././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8431256 cinder-27.0.0/0000775000175000017500000000000000000000000013067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/.coveragerc0000664000175000017500000000013100000000000015203 0ustar00zuulzuul00000000000000[run] branch = True source = cinder omit = cinder/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/.pylintrc0000664000175000017500000001523500000000000014742 0ustar00zuulzuul00000000000000[MASTER] # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code. extension-pkg-whitelist= # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS,tests,test # Add files or directories matching the regex patterns to the blacklist. The # regex matches against base names, not paths. ignore-patterns= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the # number of processors available to use. jobs=1 # Control the amount of potential inferred values when inferring a single # object. This can help the performance when dealing with large functions or # complex, nested conditions. limit-inference-results=100 # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= # Pickle collected data for later comparisons. persistent=yes # Specify a configuration file. #rcfile= # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. suggestion-mode=yes # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. confidence= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once). You can also use "--disable=all" to # disable everything first and then reenable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use "--disable=all --enable=classes # --disable=W". disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise locally-disabled, c-extension-no-member, # "E" Error for important programming issues (likely bugs) access-member-before-definition, bad-super-call, no-member, no-method-argument, no-name-in-module, no-self-argument, no-value-for-parameter, unsubscriptable-object, method-hidden, not-callable, keyword-arg-before-vararg, too-many-function-args, unsupported-assignment-operation, not-an-iterable, unsupported-membership-test, unsupported-assignment-operation, raising-bad-type, bad-option-value, unexpected-keyword-arg, assignment-from-none, assignment-from-no-return, # "W" Warnings for stylistic problems or minor programming issues exec-used, pointless-statement, unnecessary-lambda, abstract-method, arguments-differ, attribute-defined-outside-init, bad-builtin, bad-indentation, broad-except, deprecated-lambda, expression-not-assigned, fixme, global-statement, global-variable-not-assigned, no-init, non-parent-init-called, protected-access, redefined-builtin, redefined-outer-name, reimported, signature-differs, star-args, super-init-not-called, unpacking-non-sequence, unused-argument, unused-import, undefined-loop-variable, bad-staticmethod-argument, deprecated-method, useless-else-on-loop, lost-exception, pointless-string-statement, useless-super-delegation, deprecated-method, dangerous-default-value, wildcard-import, bad-staticmethod-argument, eval-used, blacklisted-name, pointless-statement, try-except-raise, # "C" Coding convention violations bad-continuation, invalid-name, missing-docstring, old-style-class, superfluous-parens, wrong-import-position, wrong-import-order, ungrouped-imports, unused-variable, len-as-condition, cell-var-from-loop, singleton-comparison, misplaced-comparison-constant, unidiomatic-typecheck, consider-using-enumerate, bad-whitespace, line-too-long, useless-super-delegation, pointless-string-statement, unsupported-membership-test, bad-classmethod-argument, bad-mcs-classmethod-argument, # "R" Refactor recommendations abstract-class-little-used, abstract-class-not-used, duplicate-code, interface-not-implemented, no-self-use, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-public-methods, too-many-return-statements, too-many-statements, too-many-nested-blocks, no-else-return, inconsistent-return-statements, simplifiable-if-statement, too-many-boolean-expressions, cyclic-import, redefined-argument-from-local, consider-using-ternary, literal-comparison, too-many-boolean-expressions, useless-object-inheritance, trailing-comma-tuple, useless-object-inheritance, consider-using-set-comprehension, consider-using-in, useless-return, chained-comparison [REPORTS] # Tells whether to display a full report or only the messages. reports=no [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ mixin-class-rgx=(^(ManageResource)$|.*[Mm]ixin) [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins=_ [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems,alembic.context,alembic.op, alembic.config,pyxcli,storpool,oslo_privsep.capabilities,nvmet signature-mutators=unittest.mock.patch,unittest.mock.patch.object,sqlalchemy.util._preloaded.dependencies # This is for cinder.objects.*, and requests.packages.*, but due to # https://github.com/PyCQA/pylint/issues/2498 # it doesn't seem that generated-members can be specified correctly. # Clean this up later when pylint works correctly. generated-members=objects,requests ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/.stestr.conf0000664000175000017500000000010400000000000015333 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./cinder/tests/unit} top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/.zuul.yaml0000664000175000017500000003121100000000000015026 0ustar00zuulzuul00000000000000- project: templates: - openstack-python3-jobs - openstack-python3-jobs-arm64 - publish-openstack-docs-pti - periodic-stable-jobs - check-requirements - integrated-gate-storage - release-notes-jobs-python3 check: jobs: - cinder-code-coverage: voting: false - cinder-mypy - cinder-tox-bandit-baseline: voting: false - openstack-tox-functional-py310: irrelevant-files: &functional-irrelevant-files - ^.*\.rst$ - ^cinder/locale/.*$ - ^cinder/tests/hacking/.*$ - ^cinder/tests/unit.*$ - ^doc/.*$ - ^releasenotes/.*$ - ^reno.yaml$ - openstack-tox-functional-py312: irrelevant-files: *functional-irrelevant-files - cinder-rally-task: voting: false irrelevant-files: *functional-irrelevant-files - openstack-tox-pylint: voting: false timeout: 5400 irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^cinder/locale/.*$ - ^cinder/tests/hacking/.*$ - ^cinder/tests/unit.*$ - ^doc/.*$ - ^releasenotes/.*$ - ^reno.yaml$ - cinder-plugin-ceph-tempest: irrelevant-files: &gate-irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^api-ref/.*$ - ^cinder/cmd/status\.py$ - ^cinder/locale/.*$ - ^cinder/tests/functional.*$ - ^cinder/tests/hacking/.*$ - ^cinder/tests/unit.*$ - ^doc/.*$ - ^releasenotes/.*$ - ^reno.yaml$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ - cinder-plugin-ceph-tempest-mn-aa: voting: false irrelevant-files: *gate-irrelevant-files - cinder-tempest-plugin-lvm-lio-barbican: # NOTE: we use this as a canary job to make sure at least # one expensive tempest job is run on changes excluded by # the gate-irrelevant-files defined above irrelevant-files: - ^.*\.rst$ - ^api-ref/.*$ - ^cinder/cmd/status\.py$ - ^cinder/locale/.*$ - ^cinder/tests/functional.*$ - ^cinder/tests/hacking/.*$ - ^cinder/tests/unit.*$ - ^doc/.*$ - ^releasenotes/.*$ - ^reno.yaml$ - ^tools/.*$ - cinder-tempest-plugin-lvm-lio-barbican-fips: voting: false irrelevant-files: *gate-irrelevant-files - cinder-tempest-plugin-protection-functional: irrelevant-files: *gate-irrelevant-files - cinder-grenade-mn-sub-volbak: irrelevant-files: *gate-irrelevant-files - cinder-tempest-lvm-multibackend: voting: false irrelevant-files: *gate-irrelevant-files - cinder-for-glance-optimized: voting: false irrelevant-files: *gate-irrelevant-files - devstack-plugin-nfs-tempest-full: irrelevant-files: *gate-irrelevant-files - devstack-plugin-nfs-tempest-full-fips: voting: false irrelevant-files: *gate-irrelevant-files - tempest-slow-py3: irrelevant-files: *gate-irrelevant-files - tempest-integrated-storage: irrelevant-files: *gate-irrelevant-files - grenade: irrelevant-files: *gate-irrelevant-files - grenade-skip-level: irrelevant-files: *gate-irrelevant-files - tempest-ipv6-only: irrelevant-files: *gate-irrelevant-files - openstacksdk-functional-devstack: irrelevant-files: *gate-irrelevant-files gate: jobs: - cinder-grenade-mn-sub-volbak: irrelevant-files: *gate-irrelevant-files - cinder-plugin-ceph-tempest: irrelevant-files: *gate-irrelevant-files - tempest-integrated-storage: irrelevant-files: *gate-irrelevant-files - grenade: irrelevant-files: *gate-irrelevant-files - tempest-ipv6-only: irrelevant-files: *gate-irrelevant-files - openstacksdk-functional-devstack: irrelevant-files: *gate-irrelevant-files experimental: jobs: - cinder-multibackend-matrix-migration: irrelevant-files: *gate-irrelevant-files - cinder-grenade-mn-sub-volschbak: irrelevant-files: *gate-irrelevant-files - cinder-grenade-mn-sub-bak: irrelevant-files: *gate-irrelevant-files - devstack-plugin-ceph-tempest-py3: irrelevant-files: *gate-irrelevant-files - tempest-pg-full: irrelevant-files: *gate-irrelevant-files - job: # Security testing for known issues name: cinder-tox-bandit-baseline parent: openstack-tox timeout: 2400 vars: tox_envlist: bandit-baseline required-projects: - openstack/requirements irrelevant-files: *gate-irrelevant-files - job: name: cinder-code-coverage parent: openstack-tox-cover timeout: 2400 irrelevant-files: - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^api-ref/.*$ - ^cinder/cmd/status\.py$ - ^cinder/locale/.*$ - ^doc/.*$ - ^releasenotes/.*$ - ^reno.yaml$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ - job: name: cinder-rally-task parent: rally-task-cinder timeout: 7800 vars: devstack_localrc: OSPROFILER_COLLECTOR: redis devstack_plugins: osprofiler: https://opendev.org/openstack/osprofiler rally-openstack: https://opendev.org/openstack/rally-openstack rally_task: rally-jobs/cinder.yaml required-projects: - openstack/rally-openstack - openstack/osprofiler - job: name: cinder-plugin-ceph-tempest parent: devstack-plugin-ceph-tempest-py3 roles: - zuul: opendev.org/openstack/cinder-tempest-plugin vars: # FIXME: change I29b1af0a4034decad to tempest added image format tests that # cannot pass in this job because the image data takes a optimized path that # bypasses nova's checks. Until the nova team decides on a strategy to handle # this issue, we skip these tests. tempest_exclude_regex: (tempest.api.image.v2.test_images_formats.ImagesFormatTest.test_compute_rejects) devstack_localrc: CEPH_MIN_CLIENT_VERSION: "mimic" # NOTE: if jobs are having memory problems, may want # to turn this on (currently defaults to false): # MYSQL_REDUCE_MEMORY: true devstack_local_conf: post-config: $GLANCE_API_CONF: DEFAULT: do_secure_hash: False test-config: $TEMPEST_CONFIG: volume-feature-enabled: volume_revert: True timeout: 10800 - job: # this depends on some ceph admin setup which is not yet complete # TODO(alee) enable this test when ceph admin work is complete. name: cinder-plugin-ceph-tempest-fips parent: cinder-plugin-ceph-tempest nodeset: devstack-single-node-centos-9-stream pre-run: playbooks/enable-fips.yaml vars: configure_swap_size: 4096 nslookup_target: 'opendev.org' - job: name: cinder-plugin-ceph-tempest-mn-aa parent: devstack-plugin-ceph-multinode-tempest-py3 roles: - zuul: opendev.org/openstack/cinder-tempest-plugin vars: configure_swap_size: 4096 devstack_localrc: TEMPEST_VOLUME_REVERT_TO_SNAPSHOT: True # NOTE: if jobs are having memory problems, may want # to turn this on (currently defaults to false): # MYSQL_REDUCE_MEMORY: true devstack_local_conf: post-config: $CINDER_CONF: DEFAULT: cluster: ceph - job: name: cinder-grenade-mn-sub-bak parent: grenade-multinode description: | Cinder grenade multinode job where cinder-backup only runs on the subnode. It tests the new c-api, c-sch, c-vol (on the controller node) with the old c-bak (on the subnode). Former names for this job were: * cinder-grenade-dsvm-mn-sub-bak * legacy-grenade-dsvm-cinder-mn-sub-bak required-projects: - opendev.org/openstack/grenade - opendev.org/openstack/cinder vars: devstack_services: c-bak: false c-vol: true group-vars: subnode: devstack_services: c-bak: true c-vol: false - job: name: cinder-grenade-mn-sub-volbak parent: grenade-multinode description: | Cinder grenade multinode job where cinder-backup and cinder-volume only run on the subnode. It tests the new c-api, c-sch (on the controller node) with the old c-bak, c-vol (on the subnode). Former names for this job were: * cinder-grenade-dsvm-mn-sub-volbak * legacy-grenade-dsvm-cinder-mn-sub-volbak required-projects: - opendev.org/openstack/grenade - opendev.org/openstack/cinder vars: devstack_services: c-bak: false c-vol: false group-vars: subnode: devstack_services: c-bak: true c-vol: true - job: name: cinder-grenade-mn-sub-volschbak parent: grenade-multinode description: | Cinder grenade multinode job where cinder-backup, cinder-volume and cinder-scheduler only run on the subnode. It tests the new c-api (on the controller node) with the old c-bak, c-sch, c-vol (on the subnode). Former names for this job were: * cinder-grenade-dsvm-mn-sub-volschbak * legacy-grenade-dsvm-cinder-mn-sub-volschbak required-projects: - opendev.org/openstack/grenade - opendev.org/openstack/cinder vars: devstack_services: c-bak: false c-sch: false c-vol: false group-vars: subnode: devstack_services: c-bak: true c-sch: true c-vol: true - job: name: cinder-tempest-lvm-multibackend parent: devstack-tempest description: | Cinder tempest job based on LVM and multiple backends. Former names for this job were: * legacy-tempest-dsvm-lvm-multibackend timeout: 10800 required-projects: - opendev.org/openstack/cinder-tempest-plugin vars: tox_envlist: all tempest_test_regex: '(?!.*\[.*\bslow\b.*\])(^tempest\.(api|scenario)|(^cinder_tempest_plugin))' tempest_plugins: - cinder-tempest-plugin devstack_localrc: CINDER_ENABLED_BACKENDS: 'lvm:lvmdriver-1,lvm:lvmdriver-2' CINDER_VOLUME_CLEAR: none irrelevant-files: *gate-irrelevant-files - job: name: cinder-mypy parent: openstack-tox vars: tox_envlist: mypy tox_inline_comments: false - job: name: cinder-for-glance-optimized parent: cinder-tempest-plugin-basic description: | Configures glance with cinder as a backend for multiple glance cinder stores and with cinder configured to use the optimized workflow of moving image data directly in the backend. vars: devstack_localrc: USE_CINDER_FOR_GLANCE: True GLANCE_ENABLE_MULTIPLE_STORES: True CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1 GLANCE_CINDER_DEFAULT_BACKEND: lvmdriver-1 GLANCE_SHOW_DIRECT_URL: True GLANCE_SHOW_MULTIPLE_LOCATIONS: True CINDER_ALLOWED_DIRECT_URL_SCHEMES: cinder CINDER_UPLOAD_OPTIMIZED: True CINDER_UPLOAD_INTERNAL_TENANT: True CINDER_USE_SERVICE_TOKEN: True tempest_test_regex: '(cinder_tempest_plugin|tempest.api.volume.test_volumes_actions)' - job: name: cinder-multibackend-matrix-migration parent: devstack-tempest description: | Run migration tests between several combinations of backends (LVM, Ceph, NFS) Former names for this job were: * legacy-tempest-dsvm-multibackend-matrix timeout: 10800 required-projects: - opendev.org/openstack/devstack-plugin-ceph - opendev.org/openstack/devstack-plugin-nfs run: playbooks/cinder-multibackend-matrix.yaml host-vars: controller: devstack_plugins: devstack-plugin-ceph: https://opendev.org/openstack/devstack-plugin-ceph devstack-plugin-nfs: https://opendev.org/openstack/devstack-plugin-nfs vars: devstack_localrc: CINDER_ENABLED_BACKENDS: lvm:lvm,nfs:nfs,ceph:ceph ENABLE_NFS_CINDER: true devstack_local_conf: test-config: $TEMPEST_CONFIG: volume: build_timeout: 900 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315576.0 cinder-27.0.0/AUTHORS0000664000175000017500000015426500000000000014154 0ustar00zuulzuul00000000000000= Abel Lopez Abhijeet Malawade Abhilash Divakaran Abhinav Srivastava Abhiram Moturi Abhishek Kekane Abhishek Kekane Abhishek Lekshmanan Abhishek Sharma Abhishek Shrivastava Accela Zhao Accela Zhao Adalberto Medeiros Adam Gandelman Adam Gandelman Adam Krpan Adam Spiers Adam Young Ade Lee Adelina Tuvenie Adriano Rosso Adrien Vergé Ajaya Agrawal Ajitha Robert Alan Alan Bishop Alan Jiang Alan Meadows Alberto Murillo Alberto Planas Alejandro Emanuel Paredes Aleksey Ruban Alessandro Pilotti Alessandro Pilotti Alex Deiter Alex Deiter Alex Holden Alex Meade Alex O'Rourke Alex O'Rourke AlexMuresan Alexander Bochkarev Alexander Deiter Alexander Gordeev Alexander Gorodnev Alexander Malashenko Alexander Maretskiy Alexandra Settle Alexandra Settle Alexandru Muresan Alexei Kornienko Alexey Khodos Alexey Yelistratov Alexis RIES Alfredo Moralejo Alon Marx Alon Zeltser Alyson Rosa Ameed Ashour Amit Oren Amit Saha Amit Zauber AmitKumarDas Amy Marrich (spotz) Ana Clara Cavalcante Anastasia Karpinska Andreas Jaeger Andreas Jaeger Andreas Karis Andreas Scheuring Andrei Andrei V. Ostapenko Andres Buraschi Andrew Bogott Andrew Forrest Andrew Kerr Andrey Kurilin Andrey Pavlov Andrey Volkov Andy Grover Aneesh Pachilangottil Angela Smith Angela Smith Angus Lees Anh Tran Anish Bhatt Ankit Agrawal Ann Kamyshnikova Anna Sortland Anthony Lee Anthony Lee Anthony Young Anton Arefiev Anton Frolov Antony Cleave Arata Notsu Archit Modi ArkadyKanevsky Armando Migliaccio Arnaud Morin Arne Wiebalck Arnon Yaari Arthur Nascimento Santos Arthur Outhenin-Chalandre Arun KV Aseel Awwad Aswad Rangnekar Atsushi Kawai Atsushi SAKAI Attila Fazekas Aviram Bar-Haim Avishay Traeger Avishay Traeger Avishay Traeger Bala Gopal Raj Bardia Keyoumarsi Bartek Zurawski Bartosz Rabiega Ben Nemec Ben Nemec Ben Swartzlander Ben Swartzlander Bertrand Lallau Bertrand Lallau Bertrand Lanson Bhaa Shakur Bharat Kumar Kobagana (BharatK) Bharat Kumar Kobagana Bharat Kumar Kobagana Bhavya Bansal Bi Shun Ke Bill Owen Bin Zhou Biser Milanov Bob Ball Bob Callaway Bob-OpenStack <295988511@qq.com> Boris Pavlovic Brant Knudson Brent Roskos Brian Rosmaita Brian Rosmaita Brian Rosmaita Brian Waldon Brianna Poulos Bridget McGinnis Bryan D. Payne Bryan Neumann Béla Vancsics CY Chiang Cady_Chen Cao ShuFeng Cao Shufeng Cao Xuan Hoang Carl Pecinovsky Carlos Goncalves Cedric Zhuang Cedric Zhuang Chandan Kumar Chandan Kumar Chang Bo Guo ChangBo Guo(gcb) Chao Zheng CZ Li Chaozhe.Chen Charles Short Chaynika Saikia Chet Burgess Chhavi Agarwal Chi Lo Chiawei Xie Chintha Govardhan Chmouel Boudjnah Chris Buccella Chris Friesen Chris M Chris Morrell Chris Morrell Chris Suttles Chris Yang Chris Yeoh Christian Berendt Christian Berendt Christian Rohmann Christoph Kassen Christophe Drevet-Droguet Christopher J Schaefer Christopher MacGown Chuck Fouts Chuck Short Chuck Short Cian O'Driscoll Ciara Stacke Cindy Pallares Clark Boylan ClaudiuNesa Clay Gerrard Clenimar Filemon Clinton Clinton Knight Colleen Murphy Corey Bryant Corey Wright Cory Stone Cory Wright Craig Vyvial Craige McWhirter Csaba Henk Curt Bruns Cyril Roelandt DTadrzak Dai Dang Van Daisuke Fujita Dan Prince Dan Radez Dan Smith Daniel Allegood Daniel Gollub Daniel Gonzalez Daniel Pawlik Daniel Pawlik Daniel Tadrzak Daniel Wilson Danny Al-Gaaf Danny Webb Dao Cong Tien Darren Birkett Davanum Srinivas Davanum Srinivas Dave Chen Dave McCowan David Hill David Medberry David Pineau David Rabel David Ripton David Rosales David Sariel David Vallee Delisle David White Deepak C Shetty Deepti Ramakrishna Deliang Fan DennyZhang Derek Chiang Dermot Tynan Derrick J. Wippler Desire Barine Diego Zamboni Diem Tran Dietmar Noll Digvijay Ukirde Dima Shulyak Dina Belova Dinesh Bhor Dinesh Bhor Dinesh Subhraveti Dirk Mueller Divya K Konoor Dmitriy Rabotjagov Dmitriy Rabotyagov Dmitry Borodaenko Dmitry Guryanov Dmitry Guryanov Dmitry Kudyukin Dolph Mathews Dominic Schlegel Dongcan Ye Dongsheng Yang Doug Hellmann Doug Hellmann Doug Schveninger Douglas Mendizábal Dr. Jens Harbott Drew Balfour Drew Fisher Drew Thorstensen Duan Jiong Duncan Thomas Duncan Thomas Dunrong Huang Earle F. Philhower, III Ed Balduf Ed Balduf Edmund Rhudy Eduardo Costa Edward Hope-Morley Edwin Wang Edwin Wang Eiichi Aikawa Einst Crazy Elad Zucker Elena Ezhova Eli Qiao Emilien Macchi Emilien Macchi Eoghan Glynn Eric Brown Eric Guo Eric Harney Eric M Gonzalez Eric Windisch Eric Young Eric Young Eric Young Erickson Santos Erik Johannes Erik Olof Gunnar Andersson Erik Zaadi Erlon Cruz Erlon R. Cruz Erlon R. Cruz Erlon R. Cruz Evgeny Antyshev Fabien Boucher Fabio Oliveira Falk Reimann Fei Long Wang Felipe Monteiro Felipe Rodrigues Felix Huettner FengJiankui Fengqian Gao Fergal Mc Carthy Fernando F. Silva Fernando Ferraz Fernando Ferraz Fernando Ferraz Flaper Fesp Flavio Percoco Florent Flament Florian Haas Forest Romain Francis Moorehead Francois Deppierraz Frederic Lepied Friedrich Hiekel Furuta Tomonori Fábio Oliveira GaoZqiang Gaozexu Gary Kotton Gary W. Smith Gaurang Tapase Gauvain Pocentek Geraint North Gerald McBrearty Gerard Garcia Ghanshyam Maan Ghanshyam Mann Ghanshyan Mann Ghe Rivero GirishChilukuri Giulio Fidente Glenn M. Gobeli Gloria Gu Gorka Eguileor Goutham Pacha Ravi Goutham Pacha Ravi Guan Qiang Guoqiang Ding Guy Kim Gyorgy Szombathelyi Gyorgy Szombathelyi Gábor Antal Ha Van Tu Hagen Finley Hahyun Hai-Xu Cheng Haiwei Xu Haleema khan Hamdy Khader Han Guangyu Hanxi Liu Hanxi_Liu Haomai Wang Harsh Mishra Harshada Mangesh Kakad Haruka Tanizawa Hayley Swimelar He Yongli Helen Walsh Hemna Hervé Beraud Hironori Shiina Hiroyuki Eguchi Huangsm Hui Cheng Hyeock Kwon Ian Denhardt Ian Govett Ian Wienand Ian Y. Choi Ibadulla Khan Igor Pugovkin Ihar Hrachyshka Ildiko Vancsa Ilya Shakhat Ilya Tyaptin Inhye Park Irina Mihai Isaac Beckman Ivan Anfimov Ivan Kolodyazhny Ivan Kolodyazhny Ivan Pchelintsev Ivan Pchelintsev Ivaylo Mitev Ivy Zhang Jack Lu Jacob Gregor Jacob M. Jacob James Carey James E. Blair James E. Blair James Page James Palmer Jamie Lennox Jamie Lennox Jan Hartkopf Jan Klare Jan-Eike Golenia Jasakov Artem Jason Ni Javeme Jay Conroy Jay Lau Jay Lee Jay Mehta Jay Payne Jay Rubenstein Jay S Bryant Jay S. Bryant Jay S. Bryant Jay S. Bryant Jay Wang Jayaanand Borra Jean Pierre Roquesalane Jean-Baptiste RANSY Jean-Baptiste Ransy Jean-Marc Saffroy Jean-Philippe Evrard Jean-Pierre Roquesalane Jeegn Chen Jeff Applewhite Jeffrey Zhang Jegor van Opdorp Jenny Shieh Jens Harbott Jeremy Liu Jeremy Stanley Jesper Schmitz Mouridsen Jess Egler Jesse Keating Jesse Pretorius Ji-Wei Jia Min Jichao Zhang Jim Branen Jimmy McCrory Jinru Yan Jiří Suchomel Joe Cropper Joe D'Andrea Joe Gordon Joe Gordon Joel Coffman Joel Friedly Johannes Kulik John Cates John Garbutt John Griffith John Griffith John McDonough Johnny Chou JohnnyChou Johnson Koil Raj Johnson koil raj Jon Bernard Jon Bernard Jordan Pittier Jordan Tardif JordanP Jorge Merlino Jorge Niedbalski Jose Castro Leon Jose Phillips Jose Porrua Joseph Glanville Joseph Vokt Josh Durgin Joshua Harlow Joshua Huber JuPing Juan Antonio Osorio Robles Juan Manuel Olle Juan Zuluaga Julia Varlamova Julien Danjou Jun Ishizaki Justin A Wilson KIYOHIRO ADACHI Kai Zhang Kaitlin Farr Kaitlin Farr Kallebe Monteiro Kamil Rykowski Karthik Prabhu Vinod Kartik Bommepally Kazufumi Noto Kazumasa Nomura Kedar Vidvans Keerthivasan Keerthivasan S Keigo Noha Keisuke Kuroki Ken'ichi Ohmichi Ken'ichi Ohmichi Kendall Nelson Kendall Nelson Kenji Yasui Kevin Carter Kevin Fox Khadija Kamran Kien Ha Koert van der Veer Konrad Gube Kourosh Vivan Kui Shi Kumar Prashant Kun Huang Kun Huang Kuo-tung Kao Kurt Martin Kurt Martin Kurt Taylor Kushal Kushal Wathore Lakhinder Walia Lakshmi Narayana Lance Bragstad Larry Matter LarryLiu Laura Sofia Enriquez Lee Lee Yarwood Lena Novokshonova Lenny Verkhovsky LeoCampelo LeopardMa Li Min Liu Liang Chen Liang Fang Lin Hua Cheng Lin Yang Lingxian Kong Liqin Dong LisaLi Liu Qing Liu Xinguo <295988511@qq.com> LiuNanke LiuSheng Liucheng Jiang Louie Kwan Lucas Alvares Gomes Lucas Oliveira Lucas de Oliveira Lucian Petrut Lucian Petrut Luciano Lo Giudice Lucio Seki Lucio Seki Luigi Toscano Luis A. Garcia Luis Pigueiras Luisa Amaral Luiz Gavioli Luiz Gavioli Lujin Lukáš Piwowarski Luong Anh Tuan Lynxzh MENJO, Takashi MORITA Kazutaka Maciej Szwed Madhuri Kumari Maksim Malchuk Mandell Degerness Manjeet Singh Bhatia Manojkiran Manojkiran MaoyangLiu Marc Koderer Marc Koderer Marc Methot Marc Methot Marga Millet Margarita Shakhova Marian Horban Mark Giles Mark Goddard Mark Korondi Mark McLoughlin Mark Sturdevant Mark T. Voelker Markus Zoeller Martin Kletzander Masaki Kanno Masayuki Igawa Masayuki Igawa Masayuki Igawa Matan Sabag Mate Lakat Mate Lakat Matheus Andrade Mathieu Gagné Matt Fischer Matt Riedemann Matt Riedemann Matt Smith Matthew Edmonds Matthew Thode Matthew Treinish Matthew Treinish Matus Brandys Maxim Nestratov Mehdi Abaakouk Mehdi Abaakouk Mehdi Abaakouk Meir Kriheli MelloCaique Michael Arndt Michael Basnight Michael Berlin Michael Dovgal Michael J Fork Michael Kerrin Michael Krotscheck Michael Latchmansingh Michael McAleer Michael McAleer Michael Price Michael Price Michael Rowden Michael Still Michal Arbet Michal Dulko Michal Jura Michał Dulko Michel Nederlof Midun Kumar Mike Bayer Mike Mason Mike Perez Mike Rooney Mike Rooney Mikhail Khodos Mikhail Khodos Mingyue Qian Minmin Ren Miriam Yumi Mitsuhiro SHIGEMATSU Mitsuhiro Tanino Mohammed Naser Monica Joshi MonicaJoshi Monty Taylor Morgan Fainberg Morgan Fainberg Moritz "WanzenBug" Wanzenböck MotoKen Mounika Sreeram Mudassir Latif Muhammad Mubeen Khan Mukul Patel Mykhailo Dovgal Nahim Alves de Souza Nahim Alves de Souza Nam Nguyen Hoai Naoki Saito Naoki Saito Nashwan Azhari Nate Potter Nathaniel Potter Navneet Singh Navneet Singh Neha Alhat Ngo Quoc Cuong Nguyen Hai Nguyen Hai Truong Nguyen Hung Phuong Nicholas Jones Nicolas Trangez Nikesh Nikesh Kumar Mahalka Nikesh Mahalka Niklas Schwarz Nikola Dipanov Nikolaj Starodubtsev Nikolay Sobolevskiy Nilesh Bhosale Nilesh Thathagar Nirmal Ranganathan Nishant Kumar Nitin Madhok Nolwenn Cauchois Olga Kopylova Olivier Pilotte Ollie Leahy Ollie Leahy OmarM Ondřej Nový OpenStack Release Bot Ory Jonay Ovidiu Poncea Pablo Caruana Pablo Colson Pablo Iranzo Gómez Pascal Wehrle Patrick East Paul Mathews Paul McMillan Pavel Boldin Pavel Glushchak Pavel Kirpichyov Pavlo Shchelokovskyy Pawel Kaminski Pedro Navarro Perez Peng Wang Pengfei Zhang Pete Zaitcev Peter Penchev Peter Pentchev Peter Wang Philipp Marek Pierre Riteau Pierre-André MOREY Pony Chou Pradeep Sathasivam Prajakta Swapnil Belapurkar Pranali Deore Pranali Deore PranaliD PranaliDeore PranaliDeore Premlata84277 Pádraig Brady Qian Gao Qian Min Chen Qin Zhao Qiu Yu Rafael Rivero Rafael Toschi Chiafarelli Rafael Weingärtner Rafi Khardalian Raghavendra Tilay Rahman LBL Rahman Muhammad Rahul Verma Raildo Mascena Rajat Dhasmana Rajesh Tailor Rajesh Tailor Rakesh H S Rakesh Jain Rakesh Mishra Ralf Haferkamp Ramy Asselin Ratnakaram Rajesh Raunak Kumar Ravi Edpuganti Ravi Jethani Ravi Shekhar Jethani Ray Chen Ray Chen Rebecca Finn Rich Hagarty Richard Hedlind Rick Bartra Rick Chen Rick Harris Rob Crittenden Robert Collins Robert Mizielski Rodrigo Barbieri Rodrigo Barbieri Rodrigo Barbieri Rohan Arora Rohan Kanade Rohit Karajgi Romain Chantereau Romain Hardouin Roman Bogorodskiy Roman Podolyaka Ronald Bradford Ronen Kat Rongze Zhu Rongze Zhu RongzeZhu Rui Yuan Dou Rushi Agrawal Rushil Chugh Russell Bryant Ryan LIANG Ryan Liang Ryan Liang Ryan Lucio Ryan McNair Ryan Rossiter Ryan Selden Ryosuke Mizuno Sachi King Sagar Waghmare Sai Kiran Saikumar Pulluri Sam Morrison Samantha Blanco Samuel Matzek Sandeep Pawar Sandeep Yadav Santhoshkumar Kolathur Sascha Peilicke Sascha Peilicke Sascha Peilicke Sasikanth Satish Venkatasubramanian Satyajeet Shahane Saverio Proto Scott DAngelo Scott Devoid Sean Chen Sean Chen Sean Dague Sean Dague Sean Dague Sean McCully Sean McGinnis Sean McGinnis Sean McGinnis Sean Mooney Sean Roberts Sebastian Jeuk Seif Lotfy Seiji Aguchi Sergey Gotliv Sergey Skripnick Sergey Vilgelm Sergey Vilgelm Sergio Cazzolato Serhii Rusin Shane Wang ShangXiao Shao Kai Li Shatadru Bandyopadhyay Shay Halsband Shay Halsband Sheel Rana Shilpa Jagannath Shlomi Avihou Shlomi Sasson Shogo Takazawa Shuangtai Tian Shunei Shiono ShunliZhou Shyama Venugopal Silvan Kaiser Simon Dodsley Simon Dodsley Simon Lorenz Simon O'Donovan Sivaramakrishna Garimella Skyler Berg Slade Baumann Slawek Kaplonski Sneha Rai Soffie Huang Sofia Enriquez Sonia Ghanekar Sorin Sbarnea Sreedhar Varma Stack Stefan Amann Stefan Nica Stephan Pampel Stephen Finucane Stephen Mulcahy Steve Kowalik Steve Noyes Steven Kaufer Stuart McLaren Stuart McLaren Subramanian Neelakantan Subramanian Neelakantan Sumit Shatwara Surya Ghatty Sushil Kumar Sven Anderson Sven Wegener Svetlana Shturm Swapnil Kulkarni Swapnil Nilangekar Swathi Hrishikesh Sylvain Baubeau Szymon Borkowski Szymon Wroblewski Szymon Wróblewski Tadas Ustinavičius Takahiro Shida Takashi Kajinami Takashi Kajinami Takashi Menjo Takashi NATSUME Takashi Natsume Takashi Natsume Takeaki Matsumoto Takeshi Nishikawa Tao Bai TaoBai Teruaki Ishizaki Thang Pham Thelo Gaultier Thiago Correa Thierry Carrez Thomas Bechtold Thomas Bechtold Thomas Goirand Tiago Pasqualini Tim Clark Timothy Okwii Tin Lam Tina Tina Tang Tingting Zha Tobias Urdin Tobias Urdin Tobias Urdin Toheeb Tom Barron Tom Barron Tom Barron Tom Cammann Tom Fifield Tom Fifield Tom Hancock Tom Patzig Tom Swanson Tomas Hancock TommyLike Tomoki Sekiyama Tomoki Sekiyama Tony Breeds Tony Saad Tony Xu Tovin Seven Tristan Cacqueray Trung Trinh Tzur Eliyahu TzurEliyahu Unana Okpoyo Unmesh Gurjar Unmesh Gurjar Vadim Kryvian Vahid Hashemian Valeriy Ponomaryov Van Hung Pham Vasanthi Thirumalai Vasyl Khomenko Vasyl Saienko Venkata Krishna Venkata Krishna Venkata Krishna Thumu VenkataKrishna Reddy Veronica Musso Victor A. Ying Victor Rodionov Victor Sergeyev Victor Stinner Victoria Martinez de la Cruz Vijay Ladani Vilobh Meshram Vincent Hou Vincent Hou Vinícius Angiolucci Reis Vipin Balachandran Viraj Hardikar Vishakha Agarwal Vishvananda Ishaya Vivek Agrawal Vivek Dhayaal Vivek Soni Vivek Soni Vlad Gusev Vladimir Popovski Vladislav Belogrudov Vladislav Belogrudov Vladislav Belogrudov Vladislav Kuzmin Vu Cong Tuan Walter A. Boring IV Walter A. Boring IV Walter A. Boring IV Wenhao Xu Wenhao Xu Wenjun Wang WenjunWang1992 <10191230@zte.com.cn> William Durairaj Wilson Liu Winicius Silva Woojay Poynter Wu Wenxiang Xavier Queralt Xi Yang Xi Yang Xiangfei Zhu Xiao Chen Xiaojun Liao Xiaoqin Li Xiaoqin Li XieYingYun XinXiaohui Xing Yang Xing Yang Xing Zhang Xingchao Yu Xinli Guan Xinliang Liu Xinyuan Huang Xu Ao Xu Qi Xuchu Jiang XueChendi YAMADA Hideki Yadiel Xuan(轩艳东) Yaguang Tang Yaguang Tang Yaguang Tang Yaguo Zhou YanLin Ren Yandong Xuan Yang Youseok Yang Yu YangLei Yasuaki Nagata YasunoriMaruyama Yejia Xu Yi Chun, Huang Yian Zong Yikun Jiang Yingxin Yong Huang Yosef Berman Yoshihide Matsumoto Yu Shengzuo Yu Zhang YuanHui Xu Yucong Feng Yug Suo Yuiko Takada Yuji Hagiwara Yuki Kasahara Yukihiro KAWADA Yun Mao Yuriy Nesenenko Yuriy Taraday Yuriy Zveryanskyy Yury Kulazhenkov Yury Kulazhenkov Yusuke Hayashi Yuzlikeev Eduard Zhang Fan Zhang Jinnan ZhangHongtao Zhao Liqiang Zhengguang--reset-author Zhenguo Niu Zhi Kun Liu Zhi Yan Liu ZhiQiang Fan Zhihai Song Zhiteng Huang Zhiteng Huang Zhiteng Huang ZhongShengping Zhongyue Luo Zhongyue Luo ZhuRongze Zohar Mamedov Zohar Mamedov Zoltan Arnold Nagy abhiram moturi abhiram_moturi abhishekkekane agireesh aimee.ukasick albertjone alexey-mr alonma amar7ibm amoturi anastasia-karpinska anastasiya-zhyrkevich andrei.perepiolkin ankitagrawal annegentle anthony.bellino apoorvad appsdesh april arsenc ashish.billore bailinzhang baiwenteng bhagyashris binean bingyan bolin.wu <1652124020@qq.com> caixiaoyu caoyuan caoyue cccqqqlll <406454833@qq.com> chadlung chaochin chenaidong1 cheneydc chenhb chenxiangui chenxing chenying chenzongliang chhagarw chiehhsuny chihyuwu clayg cuiyeliu czl389 daisy-ycguo dengzhaosen dhinesh diem_tran digvijay2016 dineshbhor dingd dongdongpei doubletao eduardBM enriquetaso erikzaadi felix23ma flelain fpxie fujioka yuuichi futaotao fuzk galstrom21 gaofei gengchc2 gfm gh159m ghanshyam ghanshyam git-harry gksk gtt116 guangpei.liu haailani haixin hallur, parashuram hamza hanchl <1922361860@qq.com> haobing1 happystacker hemna hgangwx hjy <821328772@qq.com> honjo-rikimaru-c6 howardlee huananhuawei huangtianhua hussainchachuliya huyang iain MacDonnell iberezovskiy imacdonn inori inspur-storage inspurericzhang int32bit iswarya_vakati ivyzhang j-griffith jakedahn jarbassaidai jayaanan jayaanand borra jayaanand.borra@netapp.com jbrogan jenny-shieh jeremy.jia jeremy.zhang jessewu jiamin jianghua wang jiansong jiaohaolin jking-6 john-griffith john.griffith8@gmail.com jolie jun xie junboli kan kato katarimanojkumar kedar-vidvans keystone keystone kongwei kongxiangyun kshimamu kushal lakshman leiyashuai leseb lg.yue liangjingtao lihaijing lijing lijunbo lijunjie lijunli1 likui limin0801 ling-yun lirenke lisali liu-sheng liucheng liudong liujiong liuke2 liuqing liusheng liushuobj liusu liuxichao liuxinguo liuyamin lixiaoy1 liyingjun liyingjun liyuanyuan ljhuang lkuchlan llg8212 lol <821328772@qq.com> loooosy lrqrun ls1175 luqitao lvdongbing lw-zte maaoyu malei manishd manishladdha mannuray maoshuai marcusvrn masahiro ikeda mattanShalev mayurindalkar melanie witt melissaml michael-mcaleer michael-mcaleer mikhail mouad benchchaoui msaravan neochin nidhimittalhada nikeshm nikeshmahalka ningwei nirajsingh nuritv odonos12 olegnest oorgeron openstack pallavi pangliye pawnesh.kumar pengyuesheng peter_wang peter_wang phenom pooja jadhav poojajadhav prajaktab pran1990 prashkre rackerjoe raghavendrat raghavendrat rajinir rajinir ramakris rick.chen ricolin ricolin root root root root root ruichen sanuptpm sarat inuguri sarat inuguri sathish-nagappan sathya-narayana saurabh savihou scott-dangelo scottda scottda scottda sdodsley shangxiaobj shanks.yang shaoxj sharathkacham shenjiatong shihanzhang silvacarloss skudriashev smartu3 sparkliu sreerammounika srushti stack stack stack stack stack suguangfeng sumit7990 sunyandi supriya-kotwal swapnil-nilangekar tanlin taylorh tianhui tkauthar tony-saad tonybrad tpsilva traghavendra tsekiyam tswanson tushargite96 ubaumann unicell ushen ustcdylan vdhakad venkatakrishnathumu venkatamahesh vinay_m vinita vitas.yuzhou vrushti walmart wang yong wang yong wangfaxin wanghao wanghao wanghong wanghongtaozz wanghongxu wanghui wangpeng wangqi wangqiangbj wangwei wangxiyuan wangxiyuan wangyu wangzhenyu whoami-rajat wingwj wu.shiming wuchongyao wuqiongdan wuxueyi19 wuyuting xgwang5843 xianming mao xiaolei hu xiaoxi_chen xiexs xing-yang xing-yang xqli-openstack xuanyandong xuleibj yangheng yanghuichan yanjun.fu yatin karel yehia-beyh yenai yfzhao yfzhao yixuan yixuan.zhang yixuanzhang yoan desbordes yogesh yogeshprasad yuanyue yuc yuhui_inspur yuriy_n yuval yuval brave yuval brave yuyafei zejian Zhuang zeng jia zengyingzhe zenkuro zhang.lei zhangbailin zhangboye zhangchao010 zhangchunlong1@huawei.com zhangdaolong zhangdebo1987 zhangguoqing zhangni zhangsong zhangxiaofan02 zhangxiaohan zhangyang zhangyanxian zhangyanzi zhangyi zhaochy zhaohua zhaoleilc <15247232416@163.com> zhaoqin zhaoyixin zheng yin zhengyao1 zhongjun zhouxinyong zhu.boxiang zhu.fanglei zhu.rong zhubx007 zhufl zhulingjie zhurong zhuzhubj ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/CONTRIBUTING.rst0000664000175000017500000000112300000000000015525 0ustar00zuulzuul00000000000000The source repository for this project can be found at: https://opendev.org/openstack/cinder Pull requests submitted through GitHub are not monitored. To start contributing to OpenStack, follow the steps in the contribution guide to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html Bugs should be filed on Launchpad: https://bugs.launchpad.net/cinder For more specific information about contributing to this repository, see the cinder contributor guide: https://docs.openstack.org/cinder/latest/contributor/contributing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315575.0 cinder-27.0.0/ChangeLog0000664000175000017500000172515500000000000014661 0ustar00zuulzuul00000000000000CHANGES ======= 27.0.0 ------ * [NetApp-ZAPI] Enabling snapshot creation for flexgroup pool * Add pyproject.toml file * [NetApp] Enabling total\_volumes capability support * [Pure Storage] Add support for retype to sync/trisync-replicated volume * [Pure Storage] Consistent hostnames across replicated backends * [Pure Storage] Fix volume reconnect error * Fix: optimize create volume for NFS * Add support for GET location API * Register glance user in keystoneauth plugin * [Pure Storage] Cinder manage quota breach deletion fix * [IBM SVf driver] Adding support for temporary volumegroup * Dell PowerMax: RDF consistency exempt (follow-up) * Dell PowerFlex: Improve secret handling * Remove remaining job with Ubuntu Jammy (22.04) * Dell PowerMax: NVMe/TCP suuport (follow-up) * NetApp NVMe namespace support for in-use expansion * Fix typo in release note * [NetApp]:Cinder support for self-signed transport * NetApp: NVMe namespace mapping fails during VM live migration * Dell PowerMax Driver: Added Support for NVMe/TCP * NetApp - Added ONTAP libs for ASAr2 platform * Added support for NetApp ASA r2 (All-Flash SAN Array r2) disaggregated * Delete image cache volumes when they're unusable * NetApp - Extended Consistency group support for NVMe/TCP driver * Dell PowerMax: RDF consistency exempt * HPE 3par - skip license check for new wsapi * NetApp - Fixed detach issue for multi-attached volume * [Pure Storage] Resolve EG1 arrays not reporting DRR * RBD: Fix issue with managing volume with type properties * Add support for glance new location APIs * Doc: Add doc for replication in OpenStack * Add testing for optimized volume upload * mypy: enable strict equality checking * mypy: Bump to 1.17.0 * mypy: Bump to 1.16.1 * RBD unit tests: Set cfg.rados\_connect\_timeout * Imported Translations from Zanata * Fix volume clone across cluster members * Resolve MovedIn20Warning * Update PowerMax driver doc support matrix for Caracal Dalmatian Epoxy * NetApp: iSCSI/FC detach operation fails when multiple initiators connected * Remove Python 3.9 support * [Pure Storage] Fix snapshot replication interval issue * NFS driver: Fix fail to resize NFS volume with snapshots * Fix flake8 warnings on flake8-import-order>0.19.0 * NetApp - Fixed Device busy error when multiple cinder volume clone created in parallel * Pin flake8-import-order<0.19.0 * Pin flake8\_import\_order version to <0.19.0 * [Pure Storage] Add FlashArray Volume Tags * [Pure Storage] Fix issue with VLAN LACP bond ports * Hitachi: Fix to have compatibility of GAD volumes * NetApp NVMe: Update NVMe support matrix update * HPE 3par: Ignore duplicate IP in iSCSI/vlan ip * [Pure Storage] Disable management of secure tenant volumes and snaps * cinder-manage: Use same timestamp for purging deleted rows * nfs: minimal mypy coverage * Disable glance secure hash in ceph job * Replace deprecated datetime.utcfromtimestamp() * NetApp NAS: Update NFS online volume extending support matrix update * [Pure Storage] Add capacity based backend QoS options * Remove tags from README * NVMe-oF Target: Fix incorrect check for initiator in connector data * Update python versions for testing * tox: Match doc8 ignore-path with flake8 exclude parameters * Add group resources for quota syncing * [docs] Extra spaces breaking rst->html rendering * NFS driver: Fix fail creating volume with multiple snapshots * tgt target: Provide unique scsi\_sn and scsi\_id * Allow quota sync to actually fix quota issues * [docs] update description of mv 3.46 * Clean up volume\_opts * zuul: cinder-plugin-ceph-tempest: raise swap size * [Pure Storage] Fix QoS setting for cloned volumes * Fix reimage with snapshot backed image * Pure Storage - bump version numbers for 2025.2 (Flamingo) * Imported Translations from Zanata * Update master for stable/2025.1 26.0.0.0rc1 ----------- * [Pure Storage] Add volume group support * Revert "[Pure Storage] Fix QoS setting for cloned volumes" * Implements cgroupsv2 * Lightbits update Qos doc's * [Pure Storage] Fix issue with LACP ports not being identified * [Pure Storage] Remove SafeMode PGs from Replication Pods * Deprecate iser\_opts * Add libpcre3-dev in bindep.txt for pcre.h * Dell PowerMax: Add PowerMax Laurel Release support * RBD: Correct DEBUG logging in QoS * [NetApp] Fix latent issues in unit tests * [NetApp] Certificate based authentication for NetApp drivers * [Pure Storage] Fix QoS setting for cloned volumes * Dell PowerMax: Enhanced the retry mechanism to verify the active snapshots * [NetApp] Sync mirror support for NetApp Backends * Dell PowerMax: Added exception handling after the masking view REST call * [Pure Storage] Manage Volume from GUI fails * Update the documentation regarding Lightbits' multi-tenancy functionality * lightbits: doc's "retype" support * Dell PowerMax: Add PowerMax Laurel Release support * Fujitsu Driver: Improve CLI function * unit tests: address TODO in base test case * docs: add supported db info * Dell PowerFlex: Add the 4.x support in document * HPE 3par: Add comment for cloned volumes * mypy: Support mypy 1.15.0 * lightbits: add qos support * Rally: Fix usage of cinderv3 service name * StorPool: Use os-brick instead of packages \`storpool\` and \`storpool.spopenstack\` * HPE 3par: Update the calculation of free\_capacity * Updating the Lightbits Cinder driver documentation regarding renaming * Driver assisted migration on retype when it's safe * Remove trailing white space in release note * RBD: Fix upload volume with different format * HPE 3par - Fix retype volume * Respond with HTTP 409 on resource conflict * Imported Translations from Zanata * [docs] Update REST API version history * HPE 3par: getWsApiVersion login/logout * Imported Translations from Zanata * HPE 3par iSCSI: getWsApiVersion now requires login * Add 'enforce\_multipath' in connection\_properties * NFS update volume attachment format during volume snapshot * Dell PowerMax: multi detach req caused race conditions * [Pure Storge] Ensure correct provisioned space value is used * test\_storpool.py: Assume volumes have a volume type * [Pure Storage] Enhance reported performance characteristics * Remove default override for config options policy\_file * reno: Update master for unmaintained/2023.1 * Remove unused httplib2 * Nimble: Report max\_oversubscription\_ratio via backend capabilities * Hitachi and OEM: masking REST API token on log * Huawei hypermetro: RECOVERYPOLICY typo * Fix type passed to write function during backup restoration * Fix "signature\_verified" metadata propagation to images * Always upload image volumes in a native thread * Fix a typo for assert\_called\_with * Pin upper version of mypy * Tests: Fix double mocking in test\_rbd * mypy: Print mypy version used * Imported Translations from Zanata * fix: typo in pure driver * Tests: Fix type error in volume encryption unit tests * Update gate jobs as per the 2025.1 cycle testing runtime * datacore: Mock is\_valid\_ipv6 to fix unit tests with netaddr >= 1.0.0 * HPE 3PAR: Add HPE Alletra MP related information * Fix mypy errors * tox: Drop envdir * Tests: Skip multiprocess test when using 1 cpu * StorPool: DRY volumeCreate() in create\_volume() * Hitachi: Support matrix fix * Make volume Glance metadata bulk update idempotent * Pure Storage - bump version numbers for 2025.1 (Epoxy) * Use builtin hashlib.md5 * Hitachi: fix to initialize a lock counter * nit: Add FIXME to image volume cache * Imported Translations from Zanata * trivial: Remove trailing spaces, mixed tabs/spaces * trivial: Enable E275 harder * Update master for stable/2024.2 * Fix remaining UUIDWarning 25.0.0.0rc1 ----------- * Hitachi: Support for QoS * Imported Translations from Zanata * Optimize rbd upload volume to image * Tests: Clean up RBD image unit tests * Imported Translations from Zanata * Add cinder-manage command to update service\_uuid * Fix pep8 issues in driver docs * Dell PowerStore driver: Add Cinder active-active support * Dell PowerStore: Added timeout into rest API call * Dell PowerStore driver: QoS support * Add API document for availability zone list * Create multiple snapshots from same volume * Fix "cinder-manage quota check" * Doc: Remove cinder-manage logs documentation * QemuImgInfo: Replace remaining usage of human format * [Pure Storage] Fix CG, FC WWN and NVME-TCP NSID bugs * Support hacking 7.0.0 * Skip image format detection tests for compute * StorPool: Fix typo * Hitachi: Prevent to delete a LDEV assigned to multi objects * Mock get\_all\_classes in setUp() * Remove hard coded class in FakeFilterScheduler * Use FakeHostManager for scheduler tests * Use FakeHostManager for weigher tests * Enhance FakeHostManager class * Do not call get\_all\_classes to get all classes * More thorough mock on a backup message test * Added support to Active/Active mode in ISCSI/FC drivers [NetApp ONTAP] * Keep new RBAC disable by default * HPE Nimble: Add replication * StorPool: declare the clone\_across\_pools capability * Add the clone\_across\_pools driver capability * Imported Translations from Zanata * Make Cinder reproducible * Pass with mypy 1.11.0 * reno: Update master for unmaintained/zed * Imported Translations from Zanata * add IPACL support to the Lightbits Cinder driver * RBD: Fix handling of RBD errors in get\_manageable\_volumes * Update lightos active active doc's * Hitachi: Fix to set correct object ID for LDEV nickname * Imported Translations from Zanata * Stop testing old release of cinder * CVE-2024-32498: Check for external qcow2 data file * Remove psutil requirement * Deprecate GlusterFS backup driver * Mark Quobyte Driver Unsupported * Tests: Fix misspelled scheduler unit test name * StorPool: create\_cloned\_volume() improvements * Drop six * Remove six from Datera driver * Implement revert\_to\_snapshot() for StorPool * Fujitsu Driver: Add support for revert to snapshot * Fujitsu Driver: Add parameter fujitsu\_use\_cli\_copy * Fujitsu Driver: Improve create snapshot * Fujitsu Driver: Support for update migrated volume * pylint: Skip two E1130 errors * trivial: Fix minor grammatical error in docs * HPE 3par: getWsApiVersion now requires login * Update docstrings for migration routines * StorPool: fix the retype volume flow * HPE 3par: PP - Return LUN ids from both arrays * Update documentation * StorPool: fix the "rename volume" unit test emulation * StorPool: drop copy\_image\_to\_volume() and copy\_volume\_to\_image() * [Pure Storage] Driver API version upgrade * StorPool driver: remove the obsolete backup\_volume() * Fix Inspur support matrix entry * Make default-types APIs compatible with V3.67 * Imported Translations from Zanata * Fix: [Ceph] Backup Driver Python3 Encoding Issue * Tests: Remove \_\_future\_\_ division * Remove "from \_\_future\_\_ import annotations" * Refactor get\_volume\_type\_extra\_specs * Remove six from STX drivers * Update docstrings for methods related to driver initialization * Fix fast8 tox env * Remove SQLAlchemy tips jobs * Tests: Require moto>=5.0.0 * Add wsgi module * ceph backup: Only warn about striping opts when necessary * Remove pytz dependency * Remove fallback for Python 2 * Hitachi: Stop frequently REST API request in test * Remove the duplicate code * Imported Translations from Zanata * Fujitsu Driver: Improve get volume information * Ceph: Add option to keep only last n snapshots per backup * [backup] [ceph] Catch ImageNotFound for incremental backup * Speed up starting cinder-backup * docs: Migrate docs from cinderclient to OSC (part 1) * Fix broken backup\_swift\_service\_auth=True * swift backup driver: Ignore 404 during object deletion * Fix snapshot status is always backing-up * Remove the rest of ietadm * [docs] update gerrit group info * Update ruff config * Update CI for Dalmatian * Remove cyclical import in dell\_emc powerstore driver * hacking: Reenable E275 * Support hacking 6.1.0 * hacking: Fix E501 errors * Fujitsu driver: Improve volume deletion * Mark nexenta\_encryption option as deprecated * PowerMax: Allow live migration without pool name * Dell PowerMax: Fix SnapVx unlink failure * [docs] Add recheck advice * HPE 3par: Unable to create clone of replicated vol * Imported Translations from Zanata * Ensure backup availability zone is populated if empty * Update master for stable/2024.1 24.0.0.0rc1 ----------- * reno: Update master for xena Unmaintained status * reno: Update master for Unmaintained branches * Deprecate Windows OS support * Dell EMC: Deprecate volume drivers for EOL products * Correct releasenote bug-1951250 * PowerStore Driver - Add a unit test for connection properties for NVMeOF connector * [NetApp] LUN space allocation support * Dell EMC: PowerMax - Configurable SRDF snapshots * Hitachi: Update driver document * Fix syntax in release note * Dell PowerFlex: Added timeout into rest API call * Dell PowerMax: Added timeout into rest API call * Tests: Support jsonschema 4.21 * Fix online data migration * gcs: Remove unused fallback to oauth2client * gcs: Remove logic for google-api-python-client < 1.6.0 * Run protection tests during gate check * Add the os-extend\_volume\_completion volume action * JovianDSS: Rework Open-E JovianDSS driver * Remove six from ProphetStor driver * Imported Translations from Zanata * reno: Update master for yoga Unmaintained status * Tests: Fix compat with moto>=5.0.0 * Remove six from netapp drivers * Recognize Dell PowerMax Unisphere 10.x (x>0) * Remove six from zonemanager module * Remove six from Nexenta drivers * Remove six from Synology driver * Remove six from Veritas drivers * Remove six from Hedvig driver * Dell PowerFlex driver: update support matrix in doc * Fix 'cinder-backup' service when Swift with TLS enabled * Remove six from Inspur driver * Remove six from Huawei drivers * Remove six from VMWare drivers * Remove six from test codes * Remove six from FusionStorage driver * Remove six from Zadara driver * Remove six from SandStone driver * Remove six from StorPool driver * Remove six from Infortrend driver * Remove six from HPE drivers * Remove six from Fujitsu drivers * doc: Rephrase customers by users * Replace CRLF by LF * Coerce booleans to integer values in paginate\_query * Add Cinder active-active support for Dell PowerFlex driver * Add GMR to cinder wsgi * Bump mypy to 1.7.0 * StorPool: drop \_attach\_volume() and \_detach\_volume() * Quota: Add backup related default limits * Remove consistencygroups quota entries * Fujitsu Driver: Update extend volume functionality * Ceph: Catch more failure conditions on volume backup * [Pure Storage] Enable sync repl volume creation during failover * Remove leftover nested quota DB fields from model * Clean old temporary tracking * DB: Set quota resource property length to 300 * Prevent table and column alter and drop * Handle missing volumes during cleanup of incomplete backups * Update python classifier in setup.cfg * Fix message confusion during backup restore * HPE 3par - Add ipv6 support * Fix error in cinder-manage quota sync cmd * Don't retry service update in report\_state * Skip sparse copy during volume reimage * Change log message in get\_qemu\_data * Pure Storage - bump version numbers for 2024.1 (Caracal) * mypy: Remove errant annotation * RBD: Use "RBD" capitalization in user-facing text * mypy: Cleanup "noqa: H301" comments * HPE 3par: Fix issue seen during retype/migrate * Tests: Fix invalid assert calls * Stop testing cinderlib * [docs] quotas apply to image-volume-cache owner * StorPool: cosmetic: comment headings instead of empty lines * RBD: Flattening of child volumes during deletion * pylint: Upgrade to 3.0 * Doc: Use more common form of volume create command * Hitachi: Fix exception when deleted volume is busy * Fix: Roll back volume status during reimage failure * [coordination] backend\_url should be secret * Add unit test for successful \_run\_ssh execution in Cisco driver * Remove six from nfs/remotefs drivers * Migrate cinder-sqlalchemy-2x job to py311 * UTs: Fix clearing of RPC exchange between tests * db: Set name for FK constraint * db: Remove erroneous primary key definitions * db: Silence alembic logging * Add job to test with SQLAlchemy master (2.x) * Doc: Fix incorrect QoS support for rbd driver * Fix invalid UUIDs newly detected * db: Use the same connection throughout test * db: Replace use of enginefacade in migrations * db: Replace use of Connection.execute * HPE XP and NEC V: Host group name is not correct * Revert "Driver assisted migration on retype when it's safe" * [Pure Storage] Unit tests and fixed variable name for Replication-Enabled Consistency Groups * Add cinder active-active support for Dell PowerMax driver * Tests: Make NEC tests faster * Remove importlib-metadata from requirements * Fix test\_nvmet\_driver tests * RBD: tpool.Proxy client object * db: Don't rely on branched connections * Implement add\_consumer, remove\_consumer KeyManager APIs * Pure: Report SAM-2 addressing mode for LUNs * [doc] Update markups of options in capacity-based-qos * [Pure Storage] Uniform Sync Replication disconnect * Hacking: Remove C306, C308 checks * Stop sharing tox envdir between pep8 and fast8 * Imported Translations from Zanata * Update master for stable/2023.2 23.0.0.0rc1 ----------- * Reference - Documentation correction * Fujitsu Driver: Add QoS support * Imported Translations from Zanata * Tests: Quiet Fungible invalid UUID warnings * Tests: Save 30s on hbsd FC tests * Add default to read\_deleted in context's from\_dict * api-ref: Improve sort, pagination parameter docs * Ceph: Fix restoring old backups to a different backend * [Pure Storage] Replication-Enabled and Snapshot Consistency Groups * Use openstack-tox-py311 job * Do not ignore availability\_zone in backup creation * use binary psycopg2-binary * Increase size of volume image metadata values * Automate generation of qos api-ref samples * Imported Translations from Zanata * docs: add missing space * NetApp ONTAP: Added support to Active/Active mode in NFS driver * NetApp ONTAP: Fixed errors on failover-host operation with REST API * Register all auth options when deleting encryption key * [Pure Storage] Fix failure in replication failover * Imported Translations from Zanata * [SVf] : Enable support for mirror-pool option for replication volume-type * api-ref: Fix indentation, case * Scheduler: Remove unnecessary DB read * Yadro Tatlin Unified FC driver * Add Cinder driver for TOYOU NetStor TYDS * HPE: Fix error during retype of volume without comment * Automate generation of backups api-ref samples * Fix a regression in restoring to sparse volumes * [docs] Update REST API version history * PowerMax: Fix deadlock moving SGs * Cleanup image\_utils chown check * Backup: Chunked driver reduce copying * Automate generation of volume transfer api-ref samples * Imported Translations from Zanata * mypy: Add backup/rpcapi.py * DSM is unable to fetch lowercase WWNs in SC * Imported Translations from Zanata * Ceph: Fix restore backups to diff backend * NetApp ONTAP: Fix create FlexVol pool replica * HPE 3PAR: use vlan iscsi ips * PowerStore driver - documentation update * Automate generation of volume transfer api-ref samples * Automate generation of backups api-ref samples * Nimble: Enable thin provisioning as default * Remove six from qnap driver * mypy: Cover cinder/flow\_utils.py * Make lvm-lio-barbican a canary job * api-ref: Remove non-existent attribute * Fix glance metadata properties filtering * Remove six from kaminario driver * Revert "Add tempest integrated storage job to run on ubuntu focal" * zuul: Extend timeout for cinder-plugin-ceph-tempest * Tests: Provide filter arg to VolumeAttachmentNotFound() * Doc: Improve service token * Automate generation of backups api-ref samples * Remove six from dell\_emc drivers * Allow lightos driver to run as active-active * HPE 3PAR: Fix to use small QoS Latency value * Reject unsafe delete attachment calls * Correct multiattach documentation * Imported Translations from Zanata * Hitachi: Fix to use correct pool on secondary storage * Remove six from nimble volume driver * Fix Infinidat driver to inherit compression * [Pure Storage] Add check for new error message * Remove six from solidfire driver * Bump mypy to 1.2.0 * Powerstore: Move from distutils.version to packaging.version * [Pure Storage] Add TCP transport type to NVMe driver * HPE 3PAR: Few issues with new WSAPI (of 2023) * Experiment with ruff * Tests: Fix duplicate dict keys in Pure tests * Add fips check jobs * PowerMax driver - documentation update * Update functional jobs for 2023.2 * Run py311 unit tests (non-voting) * doc: Trivial typo fix * 3PAR: Error out if vol cannot be converted to base * Fix typo in HA contributor doc * Shut up pylint about win32\_disk\_size * Make paramiko import optional * Remove six from ibm\_storage drivers * Remove six from storwize driver * Fix wrong assertion methods * PowerFlex driver - documentation update * Remove six from GPFS driver * Pylint: Cleanup ds8k driver workaround * Require novaclient >= 18.2.0 * db: Fix up some API signatures, other style issues * Add note about MYSQL\_REDUCE\_MEMORY * DB: Align volumes\_service\_uuid index in model with migration * HPE: Fix keyerror seen during volume migration * Improve test\_execute\_root\_and\_helper * Pure Storage - bump version numbers for 2023.2 (Bobcat) * Update url of "Unity Replication White Paper" * db: Remove unnecessary 'configure' call * db: Remove the legacy 'migration\_version' table * db: Remove legacy migrations * Restore into sparse volumes * Set packages in setup.cfg * Fix: PowerMax test with flipping force flag * Make PowerMax tests stable * Imported Translations from Zanata * [docs] Update documentation for Infinidat driver * Bump pylint to 2.17.0 * doc: update setup.cfg file * Add Python 3.10 to setup.cfg metadata * RemoteFS: Fix messy string formatting * Update master for stable/2023.1 * RBD: Skip update\_features when features = 0 * Configure a storage node in cinder * test\_rbd\_iscsi: Make tests compatible with python 3.11 22.0.0.0rc1 ----------- * Bump mypy to 1.0 * Dell PowerFlex: Additionnal params for enabling self signed certificates * Improve resource listing efficiency * Remove multiatttach request parameter * Correct release note formatting * PowerMax Driver - Fix for force flag * Follow up: Hitachi and OEM: Update documents * Follow up: Hitachi: Change option name \`hitachi\_pool\` to \`hitachi\_pools\` * Hitachi: Fix key error when backend is down * Hitachi HBSD: Fix invalid "raise None" * Fix NetApp NFS driver to never spawn a native thread again * [SVf] Adding Support for --delete-volumes flag for delete volume group * Hitachi: support data deduplication and compression * Update IBM Storwize drivers * Add missing extend\_target driver method * LVM nvmet: Add support for multiple ip addresses * Ceph backup: Remove leftover unicode comment * Tests: Fix typo'd unit test name * Hitachi: add GAD volume support * Hitachi: add an option for host group name format * Add 3rd Party CI requirements list * [Pure Storage] Add new array status for replication capability * Hitachi: Update retype and support storage assisted migration * [SVf] As part of Flashcopy 2.0 adding config parameter to support volumegroup * Imported Translations from Zanata * Doc: Branding change for Spectrum Virtualize family * Require tooz>=2.8.0 * Update hacking to 5.0 series * Objects: Make OPTIONAL\_FIELDS a tuple * Followup: Correct typo in releasenote * Imported Translations from Zanata * Fix Migrations UTs using wrong DB * LVM nvmet: Add support for shared subsystems * LVM: terminate\_connection fails if no initiator * nvmeof: Support new connection properties * nvmet: Fix setup methods * Imported Translations from Zanata * Filter reserved image properties * Fix Infinidat driver to backup attached volume * Update to hacking 4.1.0 * NetApp: Add NVMe/TCP driver * Bump oslo.versionedobjects to 2.4.0+ * [Pure Storage] Add support for 3-site, trisync, replication * cinder-backup: use the same backup backend host for incremental backups * Yadro tatlin\_client: Fix bad message formatting * NVMe-TCP volume driver for Fungible Storage * [Pure Storage] Fix issue with loss of replicated array * Bump boto3 requirement to 1.18.49 * Pylint: add nvmet to ignored-modules * Deadlock prevention support in synchronize * Add tools/coding\_checks.sh to pylint tox env * Use new get\_rpc\_client API from oslo.messaging * Tests: Fix double mock of Popen (test\_backup\_ceph) * remove six from cinder.transfer * Send the correct location URI to the Glance v2 API * [SVf] As part of Flashcopy 2.0 adding support for volumegroup snapshots * Dell PowerStore: Volume caching exception support * Tests: storwize: Work around bug in unit test * Tests: make hpe3par tests pass on py311 * Doc: IBM Spectrum Virtualize family [SVf] Volume driver document correction * tox: Remove [testenv] basepython setting * Handle external events in extend volume * Change functional job py39 to py310 * Imported Translations from Zanata * Storage node configuration * [SVf]: mkhost failure when volume and node are on different iogrp * Get ready for tox 4 * Infinidat: add storage assisted volume migration * [SVf] Optimize lsmdiskgrp calls in creation of replicated volumes * Fix Infinidat driver consistency groups feature * Hitachi: support new storages * Add install docs for LINSTOR driver dependencies * Check VMDK subformat against an allowed list * nit: correct comment in backup manager * S3 Backup: Warn if verify\_ssl is true with no cert file * S3 Backup: Remove list of compression algorithms * Dell PowerMax Driver - Add support for Unisphere for PowerMax 10.0 * Fix Infinidat driver multi-attach feature * Fix Infinidat driver generic volume migration * LVM-nvmet: Use nvmetcli as library instead of CLI * Add tempest integrated storage job to run on ubuntu focal * Imported Translations from Zanata * volume\_type\_access: don't validate project\_id as a uuid * JovianDSS: extend option description * Set backup status to error on VolumeNotFound * Correct help text of target configs * Remove IET iSCSI target * HPE: Cinder driver for HPE XP storage FC and iSCSI arrays * Doc: Fix way to update default quota value for a new project * Imported Translations from Zanata * Hitachi and OEM: Support multi pool * Imported Translations from Zanata * Remove reference to 'all-plugin' tox environment * Toyou: Remove allocated\_capacity\_gb calculation * Pure Storage - bump version numbers for Antelope * Update mypy to 0.981 * RBD: Default rbd\_secret\_uuid to the cluster FSID * Imported Translations from Zanata * Fix service token documentation * Remove unsupported options from cinder-manage quota command * Imported Translations from Zanata * Fix release note for bug 1957073 * Update metadata in setup.cfg * Clarify description of encrypted volume transfer mv * Clean up formatting in dplcommon.py * mypy: Correct return types for volumes/snapshots summary * Revert "PowerFlex driver - fix the display of the incorrect volume" * Ceph minimum client on cinder-plugin-ceph-tempest job enable * Switch to 2023.1 Python3 unit tests and generic template name * Update master for stable/zed * Imported Translations from Zanata 21.0.0.0rc1 ----------- * rbd: Fix snapshot delete when the source volume doesn't exist * Fix Infinidat driver to use TLS/SSL communication * lightos: parse urls with urlparse for ipv6 support * RBD backend QoS implementation * Infinidat: support for manage/unmanage API * PowerMax Driver - Fix for renaming GVG * Ceph backup: Close source file * Infinidat: add support for revert to snapshot operation * Dell EMC Unity driver in cinder * Imported Translations from Zanata * Fix a typo in cinder/volume/drivers/infinidat.py * Fix and unify capacity calculations * Support os-brick specific lock\_path * NetApp ONTAP: Add revert to snapshot functions on REST client * NetApp ONTAP: Add volume migration functions on REST client * Hitachi: fix to output resource lock message correctly * Imported Translations from Zanata * NetApp ONTAP: Add volume replication functions on REST client * Lightos - add lightos new volume states * NetApp NFS ONTAP: Deprecate Copy Offload Tool * NetApp NFS: Clone image using copy file operation * NetApp ONTAP: Add core functions on REST client * NetApp ONTAP: Add REST Client for ONTAP * image\_utils: Assume qemu-img >= 2.10.0 * HPE 3PAR: Fix umanaged volumes & snapshots missing * [SVf] As part of Flashcopy 2.0 adding support for volumegroup * Use the json format output of qemu-img info * backups: Use the most recent available backup for incrementals * Imported Translations from Zanata * [Pure Storage] Add replication support for NVMe driver * IBM FlashSystem driver using py3.5 incompatible code * Fix a deprecation warning about regex * Update nova microversion for volume backed instance rebuild * setup.cfg: [extras] for pip-installable driver requirement * api-ref: Add docs for clusters * db: Remove weird error handling code * db: Remove unnecessary engine facade decorator * Tests: Randomize tests * Tests: Fix linstor tests * Tests: Fix versions view * Tests: Fix PowerMax tests * Tests: Fix cmd tests * Tests: Fix NFS tests * Tests: Fix NetApp tests * Tests: Fix Pure test * Tests: Fix zone manager tests * Serialize message\_\* properties of RequestContext * Add support for transferring encrypted volumes * Fix volume caching in PowerFlex driver * Tests: Fix IBM XIV * Tests: Fix 3par * Tests: fix quobyte breaking other tests * Tests: Fix inspur unit test * Tests: Fix test runner never finishing * TOX: Document install\_command usage * Tests: Monkey patch before logs are loaded * Tests: Fix service stopping on cleanup * nit: correct storwize\_portset help text * Replace base64.encodestring with encodebytes * mypy: cmd/manage.py * mypy: cinder/cmd/[api,backup,scheduler,status,volume] * Update volume delete api-ref * [docs] Add info about releases * Inspur: : Eliminate timed delays in unit tests * Veritas: Eliminate timed delays in unit tests * Hitachi: Eliminate timed delays in unit tests * NetApp: Eliminate timed delays in unit tests * DataCore: Eliminate timed delays in unit tests * Doc: To update the portset feature details in IBM Spectrum Virtualize user guide * [SVf] : Fix the SVC code level for lsfcportsetmember call * PowerStore Driver - New format of connection properties for NVMeOF connector * Bugfix: Account for consumed space better * Address G004 in flake8-logging-format 0.7.4 * DEMC: Add support for trim/discard * PowerFlex driver - fix the display of the incorrect volume size on volume or snapshot creation * NetApp SolidFire: Fix RecursionError accessing undefined attributes * Remove forgotten variable \_backup\_db\_fields * Imported Translations from Zanata * Add Pure Storage NVMe-RoCE driver * Add NVMe/TCP support to Dell EMC PowerStore driver * add netapp copyoffload provider location * Add Cinder NFS driver for Dell PowerStore * Add additional transport type constants * Initial commit for Yadro Tatlin.UNIFIED driver * LVM driver: Remove "six" usage * Change cinder-mypy job to voting * mypy: work around mypy bug #13214 * Clear up \_reschedule() return value * Add doc8 check to docs builds * Tests: Quobyte: Remove six usage * Tests: test\_glance: Remove six usage * Tests: Datacore: Fix InvalidUUID warnings * Tests: RBD: Refactor mocks * Remove unused session parameter * Fix mypy job * Fix Infinidat driver to return all iSCSI portals * [SVf]: Incorrect portset value during driver intialization * NFS: Use Volume attr instead of dict compat for lock * Bump mypy version to 0.960 * tests: Address UserWarning in tests * DataCore: Add note about unique CHAP storage paths * HPE3PAR: Correct volume name in ERROR log * PowerStore driver - Request data validation fix * Move NFS job to voting * db: Remove irrelevant TODO * Remove return from rpc cast method * Add a check for virtual\_size at API layer * Update docs for Hitachi driver * Imported Translations from Zanata * mypy: cinder/api/common.py * Reintroduce DataCore driver * Fix flapping storage\_protocol in get-pools * Move bandit requirements to tox.ini * Bump moto version to support py3.9 * PowerMax Driver - Manage volume into correct storage group * Update docs for powervault driver compatability * Log when waiting to acquire coordinator lock * mypy: api\_utils * tests: Fix invalid test * db: Remove resolved TODO * db: Remove use of 'as\_scalar()' * db: Don't use strings to indicate relationship names * db: Don't use legacy calling style of select() * db: Don't use strings to indicate column, relationship paths * db: Don't pass strings to Connection.execute * Resolve invalid UUID warning * db: Pass case.whens as positionals, not a list * models: Remove implicit coercion of SELECT to scalar subquery * tests: Enable SQLAlchemy 2.0 deprecation warnings * Revert "Cleanup code duplication in cinder.cmd.backup module" * NetApp ONTAP: Fix type error exception in get\_volume\_state * [SVf] : Fix multiple SVC CLI calls for rc-relationship operations * Don't limit use of importlib-metadata to Python < 3.8 * db: Move comment * Remove redundant line from host\_manager.py * pylint: Add additional ignored-modules * ibm\_storage: Remove unused \_get\_os\_type method * PowerMax Driver - Check for moving to same target * Add image\_conversion\_disable config * Remove use of mock CONF object * [docs] Update PTL info in contributor guide * Imported Translations from Zanata * Groups: remove unneeded "status" variable * tox.ini: combine functional-py\* envs * Tests: Extend RBD deferred deletion interval * Report tri-state shared\_targets for NVMe volumes * PowerMax Docs - Known issues section * pylint: skip ManageResource Mixin * mypy: service.py * mypy: cinder/volume/volume\_types.py * docs: update release cycle tasks * Ceph: Remove unnecessary convert\_str() calls * Scheduler Evaluator: raise recursion limit * Use modern type annotation format for collections * Glance: remove \_extract\_attributes method * pylint: ibm\_storage: Fix E0601 used-before-assignment error * pylint: volume/api: Fix E0601 error * cmd/manage and coordination: Clean up exception handling * pylint: Remove invalid pylint disable in Nexenta driver * pylint: tidy up clean\_volume\_locks * pylint: Fix vmdk driver use before definition * pylint: ignore \_\_original\_module\_threading errors * [IBM DS8000] Fixed Detach for multi-attach volumes * Remove single-use test function * RBD: Fix \_show\_msg\_check\_clone\_v2\_api * mypy: annotate image/glance.py * Increase swap size to 4GB * Docs: Document clone\_image driver method * Doc: Improve name\_id documentation * Add releasenotes to drop python3.6|7 * mypy: annotate remotefs * Address pylint error * db: Remove unnecessary session management * Update Volume Delete rejection message * Tests: add microversion consistency unit tests * Seagate/Lenovo drivers: Update get\_driver\_options * backup/swift: Add support sending service user token * Handle the case when tempest fails * HPE 3PAR: In multi host env, fix multi-detach operation * Remove reference to removed nfs\_\*\_ratio options * Fix example of failed migrarion for LVM->RBD * Correct VolumeMigrationStatusField * Prevent temporary volume from being deleted accidentally * Add REIMAGE\_VOLUME message action * Drop python3.6 support in testing runtime * Run pylint tox env on all files by default * Remove contrib/block-box from zuul config * [SVf] Delete/Extend issue in reverse replication * Don't destroy existing backup by mistake on import * releasenotes: correct formatting error * Change key "cluster" to "cluster\_name" * Rename Dell EMC to Dell * [doc] update releasecycle tasks * Add review best practices section * [SVf]:Fix multiple lsvdisk calls for GMCV create volume operation * Imported Translations from Zanata * [doc] update driver review checklist * Add statement about CI for backports * Fix reported storage\_protocol * Fix cacheable capability * [SVf] Resize of GMCV volumes in group * [SVf]:Fix retype failure for replication volume-type * Remove reference to non-existing nfs\_disk\_util parameter * Added documentation about backup\_file\_size about memory usage * db: Remove unused API * db: Remove final users of 'get\_session' * db: Final cleanup for context-based enginefacade * db: Migrate 'purge\_deleted\_rows' to enginefacade * db: Migrate online upgrade helpers to enginefacade * db: Migrate "worker" APIs to enginefacade * db: Migrate "image volume cache" APIs to enginefacade * db: Migrate "driver initiator data" APIs to enginefacade * db: Migrate "message" APIs to enginefacade * db: Migrate "group snapshot" APIs to enginefacade * db: Migrate "cg snapshot" APIs to enginefacade * db: Migrate "group" APIs to enginefacade * db: Migrate "consistency group" APIs to enginefacade * db: Migrate "transfer" APIs to enginefacade * db: Migrate "backup", "backup metadata" APIs to enginefacade * db: Migrate "volume glance metadata" APIs to enginefacade * db: Migrate "volume type encryption" APIs to enginefacade * db: Migrate "qos specs" APIs to enginefacade * db: Migrate "volume type specs", "group type specs" APIs to enginefacade * db: Migrate "volume type", "group type" APIs to enginefacade * db: Migrate "snapshot metadata" APIs to enginefacade * db: Migrate "snapshot" APIs to enginefacade * db: Migrate "volume metadata" APIs to enginefacade * db: Migrate "volume", "volume attachment" APIs to enginefacade * db: Migrate "quota usage", "quota reservation" APIs to enginefacade * db: Migrate "quota class" APIs to enginefacade * db: Migrate "quota" APIs to enginefacade * db: Migrate "cluster" APIs to enginefacade * db: Migrate "service" APIs to enginefacade * db: Indicate functions that should not be converted yet * db: Drop support for MySQL 5.5 * Bump mypy version to 0.942 * Add info about code coverage job * Remove privsep/hscli * cmd/manage: Correct db\_sync() return * Tests: Add alembic to pylint ignores * Bump pylint to 2.13.4 * RBD: Use static methods where possible * RBD: Fix total\_capacity * Honor multipath config everywhere * Fix wrong attribute to find remote address * [docs] Update cinder-stable-maint description * Warn on driver detach errors * NetApp ONTAP: Fixed get\_ontap\_version * Remove unneeded volume\_types.get\_all\_types\_by\_group method * PowerMax Docs - Clarify Replication Group * Fix QOS computation * Change unsupported fileno() LOG to debug * Imported Translations from Zanata * Imported Translations from Zanata * image\_utils: Assume qemu-img supports --force-share * Remove extra volume\_type DB fetch on volume manage * [doc] Add info about backport policies * Stop removing .pyc files for unit test runs * Tests: Reduce time waiting in Hitachi HBSD tests * mypy: ceph backup driver * Prohibit volume manage to an encrypted volume type * Docs: fix small typo on contributor gerrit * Pure Storage - bump version numbers for Zed * Tests: Reduce time waiting in Hitachi HBSD tests * RBD: Remove last usage of "six" from RBD driver * Add Python3 zed unit tests * Update master for stable/yoga * Update doc8 ignore-path 20.0.0.0rc1 ----------- * Add yoga release note prelude * lightos - bugfix compression stats should be True * tests: Correct typo * mypy: set no\_implicit\_optional * db: Increase timeout for migration tests * db: Remove 'use\_slave' arguments * tests: Don't use mock.Mock as fake context * tests: Silence a number of warnings * tests: Restore - don't reset - warning filters * Doc: IBM FlashSystem family update in IBM Spectrum Virtualize user guide * Hitachi: Add port scheduler * Hitachi: Add Target Port Assignment for VSP Driver * Specify yoga os-brick in requirements * Replace distutils with packaging in 3rd party drivers * Hitachi: Support AIX as host OS type * Hitachi: bugfix and refactoring for add maintenance parameters * fix 'huawei dorado v6' FC initiator can be added to host * Fix deprecation warnings caused by invalid UUIDs * Tests: Remove typo'd assert calls from test\_attachments\_manager * Tests: Fix invalid assert\_called\_once calls in driver tests * Drivers: remove unused code * Add grenade-skip-level irrelevant-files config * Tests: Fix missing novaclient context arg in test\_host\_filters * In support matrix, move Nimble driver entries next to HPE driver * [SVf] Manage host attachment using portsets * Reset state robustification for volume os-reset\_status * Followup: Address review comments on re-image patch * Support volume re-image * Add a unit test to keep microversions.py up to date * Add missing microversion entry for 3.67 * Fix request id mess on Cinder-API * Only init message API in create volume when needed * Don't init backup API code for each create volume request * db: Preserve API signatures * db: Move utility methods to top of file * docs: Remove unnecessary 'blockstorage-' prefix * db: Enable auto-generation of database migrations * docs: Rework and move upgrades guide * docs: Add whereto for testing redirect rules * db: Add tests to ensure we keep migrations in sync * db: Resolve additional migration-model mismatches * db: Add missing foreign keys, indexes to models * Fix typo in release note * PureStorage FlashArray: Add active/active replication * Update 'cinder-specs-core' description * [SVf] update rccg name property to metadata * [Lightos] standardize connector usage * Remove usage of undefined CONF.quota\_metadata\_items * Solidfire: Clean up remaining usage of removed parameters * Use LOG.warning instead of deprecated LOG.warn * Remove the need for project\_id from API endpoints * [SVf] RevertToSnapshot for rep-volumes in group * Update new driver review checklist * [SVf] Fixed Detach for multi-attach volumes * docs: Update "Getting your patch merged" * Imported Translations from Zanata * mypy: policy.py * docs: Add docs for 'RateLimitingMiddleware' * Remove attach and detach volume driver methods * [SVf] Add cleanrate in volume-type extra specs * mypy: RBD driver * RBD driver: clean up \_trash\_purge exception handling * SF: Remove compat clone image code * Lightbits LightOS driver * PowerMax Driver - Retry on a symmetrix lock * Doc: IBM flashsystem driver update * db: Remove unnecessary timezone configuration * Remove various unused code * api-ref: include links on backups list and details * Add missing kwargs for spdk driver initialize\_connection function * Add cinder volume drivers for NEC V series Storage * Remove unused variable * PowerMax docs - Include a note in cinder backup section * Rework backup process to make it async * mypy: Allow mypy to pass with requests-packaged urllib3 * JovianDSS: fix default value in documentation * Hitachi: Make the parameters name variable for supporting OEM storages * Hitachi: Add maintenance parameters * Remove block-box files * Volume transfers: Remove duplicate policy check * Fix Typos from Review 819790 * Fix spacing in CapacityFilter debug log message * PowerMax Driver - Improve error handling around deletes * Sync rootwrap.conf from oslo.rootwrap * Support Cinder FC driver for TOYOU NetStor * Doc: Typos in Spectrum Virtulize family user guide * Imported Translations from Zanata * mypy: backup * db: Correct 'nullable' mismatches on models * db: Fix formatting of database models * Updating python testing as per Yoga testing runtime * NetApp: Support custom igroups * Fix log message error in capacity filter * mypy: annotate volume/api.py * [SVF]:Fix multiple lsiogrp,lsvdisk calls in Retype * Move nimble driver code to hpe folder * fix 'huawei dorado v6' create new volume from snapshot error * Improve no\_snapshot\_gb\_quota description * Run database migration tests * Fix a typo error in explanatory notes * PowerMax Driver - Re-use existing initiator group/host * replace deprecated pyparsing method * Update release cycle tasks * [SVF] Fix Retype issue of mirror volume * Add default types info in admin docs * api-ref: Add info regarding default types * Add info about \_\_DEFAULT\_\_ in install docs * [Pure Storage] Add volume metadata * DOC: Add cinder modules on initial configuration * Fix misspelled get\_time\_comparsion\_operators method name * RBD: Check for OSError before using errno * pylint: Update to 2.11.1 * [Pure Storage] Remove all REST version checks * Clarify conditional\_update return types * mypy: filter scheduler * RBD: Open RBD images read-only where possible * [Pure Storage] Add check for NVMe-FC capable array * Remove broken tempest-full-py3-opensuse15 job * Fixed copy-on-write mode in GPFS NFS driver * Correct type annotations for utils.retry() * Fix: Race between attachment and volume deletion * Increase min version of oslo.vmware to 3.10.0 * [ubuntu] Add tgt package for cinder storage * Imported Translations from Zanata * Signature of base method in class 'AccelBase' * PowerMax Driver - Empty port info response * PowerFlex: update supported versions in driver documentation * PowerMax Docs - Xena release notes * Fix typo in message policy deprecations * NetApp ONTAP: Fix sub-clone zapi call * NetApp ONTAP: Fix check QoS min support for SVM account * Pure Storage FlashArray - Version increment for Cinder drivers * Raise min version of ddt * NFS: Fix generic revert to snapshot flow * Update rbd warning message * [Pure Storage] Add missing DB values when performing create CG from CG snap * NetApp SolidFire: Fix request errors while Element OS upgrade * Use os-brick 5.0.1 * Fix a typo * NetApp Solidfire: Fix retype and name exception on migration * Cinder matrix - Removal of Snapshot Attachment * Fix: nfs format info limitation * [SVF] Fix rccg and relationship creation issues * Fix PowerFlex connector HTTPS certificate validation * Add reviewing section to release notes doc * Tests: Improve RBD trash unit tests * Seagate driver: fix get\_volume\_size() * Raise min of oslo.db to 11.0.0 * Fix cinder-manage db version * fixtures: Don't persist state in the Database fixture * Erroneous log message args location causes a misconception * Imported Translations from Zanata * Dell PowerVault: Fix "cinder manageable-list" * Dell PowerVault driver: Fix documentation typos * Specify xena os-brick series in requirements * Update SQLAlchemy related requirements * Raise min of oslo.policy to 3.8.1 * Add deprecation notices to deprecated policy rules * Raise minimum oslo.log to 4.5.0 * Add Python3 yoga unit tests * Update master for stable/xena * RBD: Call trash operation when plain deletion fails 19.0.0.0rc1 ----------- * Turn off inline comments for mypy zuul job * Correct "Add release note about xena policy changes" * Correct "Clean up policy matrix for Xena release" * mypy: Fix unused type: ignore in manager.py * Add release note about xena policy changes * Clean up policy matrix for Xena release * Add release note prelude for the Xena release * Update policies related to user visible extra specs * PowerMax Driver - Get manageable volumes fix * PowerMax Driver - Update unsupported retype combinations * PowerMax Driver - Fix for GVG snapshot delete * Expose volume\_attachments in Volume OVO * Delete attachment on remove\_export failure * Fix detach notification * Remove unused config imports/objects * Implement project personas for snapshot metadata * Implement Xena project personas for group snapshots * doc: RBD: add documentation to configure RBD mirroring * Implement project personas for backups API * Implement project personas for volume actions * Better workaround for policy file in PDF docs * Remove sample policy config from docs generation * [SVF] Fix extend issue for mirroring volumes * Implement Xena project personas for group\_actions * Add request method to test requests * [SVF]:Storwize HyperSwap snapshot clone is failing * PowerMax Driver - Port load balancing fix * Implement Xena personas for volume groups * Implement Xena project personas for volume types * Xena project personas for volume type access API * Implement project personas for snapshots * Implement secure RBAC for snapshot actions * Implement project personas for volume metadata API * Implement project personas for the limits API * Fix: Online migration for volume\_use\_quota\_online\_data\_migration * Implement project personas group\_types * Remove unused nested\_contexts method * Update project personas for default\_types * Implement project personas for volume API * Implement project personas for quotas and quota\_classes * Tests: Fix missing RBD delete\_volume test case * Implement project personas for volume transfers * Native multibackend-matrix Zuul v3 job * [SVF]:HyperSwap volume service status update * Hitachi: Add generic volume groups * Nimble: Add Alletra 6k related information * Make extension manager parameter optional * [SVF] Update rccg details for mirror volumes * JovianDSS: add multiattach and 16K block support * JovianDSS: fix code style and naming * NetApp ONTAP: Add option to report storage provisioned capacity * Netapp ONTAP: Add support to revert to snapshot * Clean up user visible extra specs release note * Implement user visible extra specs * JovianDSS: fix iscsi target recovery function * HPE 3PAR: Add HPE Alletra 9k related information * Remove extraneous space from error message * PowerMax Driver - refactoring feedback * Docs: Discourage using naked rechecks * NetApp ONTAP: Add storage assisted migration support * Implement project personas for attachments * Implement project personas for messages * Add infrastructure for testing new RBAC policies * Snapshot in-use volumes without force flag * Update mv version history document for 3.65 * [SVF] Manage GMCV volumes on separate pools * RBD: Don't flatten temporary resources * Update base policy file * Correct the rest api url of import\_record of backup * db: Remove 'db' argument from various managers * db: Remove 'db\_driver' option * db: Integrate alembic * db: Add all migrations to initial alembic migration * db: Add initial alembic migration * db: Add alembic boilerplate * db: Vendor 'oslo\_db.sqlalchemy.migration' * db: Move sqlalchemy-migrate migrations * Add user messages for backup operations * Test glance with cinder backend optimization * Expose volume & snapshot use\_quota field * Improve quota usage for temporary resources * Clear OVO history and compatibility * Fix notifications of migration temp volume * Change 'host' option from HostAddressOpt to StrOpt * Tests: Remove brick\_lvm mocks from all unit tests * Update snapshot api microversion docs and tests * [SVF]:Fix multiple lshost calls during attach * Modify erroneous annotation * Support Images API v2.11 * Log connection info returned from driver * [SVF]:Fix add volumes to GMCV group * [SVF]:Retype in-use hyperswap volume * mypy: update retry decorator in utils.py * [SVF] Fixed update\_rep\_properties for empty values * LVM: Retry lvextend commands on code 139 * HPE 3PAR: Reuse existing session * [SVF]:Fix create volume on drp * Add releasenote for schema validation fix * mypy: coordination.py * Doc: IBM Storages user documentation update * [SVF] Fix issue to get volume relationship details * mypy: continued manager, scheduler, rpcapi * conditional\_update(): disable false pylint error message * mypy: image cache * mypy: create\_volume flows * Fix cinder-manage clean\_locks command * PowerMax Driver - Fix for create snapshot * [SVF]: Fix Enable Replication for Storwize\_V5000E * Doc: Remove backup\_id from backup import url and parameter * PowerMax Driver - allow for None values in metadata * Add cinder permissions matrix * Remove 'enable\_v3\_api' option * Add cinder-manage command to remove file locks * Remove file locks once we delete a resource * [Pure Storage] Resolve missing provider\_id issue (PowerVC) * KumoScale Driver replicated volume missing portals attaches without raid * RBD: use correct stripe unit in clone operation * PowerMax Driver - Fix for legacy PowerMax OS around generations * Fix extra\_capabilities * [rbd] Fix create encrypted volume from snapshot * Allow removing NFS snapshots in error status * Replace deprecated tenacity.Retrying.call * Add openstacksdk-functional-devstack job in cinder gate * PowerMax Driver - Fix for group snapshot deletion 19.0.0.0b1 ---------- * Doc: api-ref doc update * [Pure Storage] Fix CG cloning crash when very long volume names * Run cinder-mypy job in the check * Add installation of mypy stubs packages * Mark QNAP driver as unsupported * db: Reorder initial migration * db: Use 'import sqlalchemy as sa' pattern * Tests: Fix InvalidUUID warnings * Reject bad img formats for uploaded encrypted vols * Drop lower-constraints jobs * NFS: Update connection info on online snap create * mypy: image\_utils * LVM: Simplify version fetching * Tests: Simplify LVM fake\_execute * [Pure Storage] Fix minimum SDK version required * Add backup\_swift\_create\_storage\_policy config opt * LVM: Retry lvdisplay and lvcreate calls on segfault * PowerMax Driver - Allow for volume uuid in manage volume * PowerMax Driver - Retype fix for replication * Use mv.LIMITS\_ADMIN\_FILTER in the code * Retry "lvs" call on segfault for \_get\_thin\_pool\_free\_space * streamline \_report\_driver\_status method * Fix: Schema validation for attachment create API * LVM: Use --readonly for lvdisplay in lv\_has\_snapshot * Fix typo in Dell EMC Unity driver documentation * Address new hacking enforcement * Update decode\_cipher doc text about encryption ciphers * Add libcgroup related packages in bindep.txt * PowerMax Driver - Allow for case mismatch in SGs * Move require\_driver\_initialized / log\_unsupp to volume\_utils * NetApp ONTAP: Fix QoS lost after moving volume * Update Block Storage API v2 api-ref * Remove Block Storage API v2 * [SVF] Support volume-resize to hyperswap volume * [SVF] Fixed add volumes to clone group issue * [Pure Storage] Ensure multiattach volumes are not disconnected early * [SVF]: Fix extend issue for a clone of rep-volume * [SVF]:Bulk create Hyperswap volume is failing * PowerMax Driver - QoS should not be set on parent storage group * Update volume api microversion doc and tests * zuul: configure irrelevant-files for the rally job * PowerMax Docs - Corrections to QoS section * Modify manner of retrieving volume\_ref * Update IRC info * Fix functional jobs * Doc: Remove incorrect note about encryption flag * Tests: Don't assert notifier not called * LVM: Added NVMe TCP support for the nvmet target * PowerMax Driver - Fix for create group from source * Fix best\_match() deprecation warning * Replace getargspec with getfullargspec * SQLA 1.4: Fix calculate\_resource\_count * SQLA 1.4: Fix conditional update Case * api-ref: Use key\_size 256 in API examples * Abort volume creation when encryption spec is invalid * remove the oslo\_utils.fnmatch * Tests: Fix oslo.i18n warning * zuul: fixes for the A/A job (nodeset, variables) * Fix PowerFlex volume type conversion * volume api: Remove unused get\_snapshot\_metadata\_value method * mypy: annotate volume\_utils / utils / exc * zuul: add mypy experimental job * Drop support for SQLite < 3.7 * image\_utils: Remove unused \_validate\_file\_format method * filter\_scheduler: Remove schedule() method * Remove references to 'sys.version\_info' * vmware: Use oslo.vmware's get\_moref\_value() * vmware tests: Support different moref backend representations * PowerMax Docs - Wallaby documentation * db: Compact migrations to Train * db: Remove 'cinder.database.migration\_backend' entrypoint * setup.cfg: Replace dashes with underscores * Tests: Add coverage for coordinator start/stop * Remove unused \_get\_non\_shared\_target\_hosts from cmd/manage * Unit test RBD clone depth calculation * api-ref: Fix "name" parameter * Update release note info * Add Python3 xena unit tests * Driver assisted migration on retype when it's safe * pylint: Fix migration E1120 no-value-for-parameter * pylint: Fix E0213 no-self-argument in tests * pylint: Fix E1120 no-value-for-parameter for mocks * Fix sporadic cleanup unit test failure * [NetApp] Fix iSCSI CHAP auth issue during volume attach * Fix instance locality scheduler filter * Pure Storage - bump version numbers for Xena * Remove uuid check/generate in service * Open local image files with "rb" mode * Bug fix for free space calculation * Reserve migrations for DB backports * PowerMax Driver - Fix for deleting replication group * Update to pylint 2.7.4 * Fix: show volume by name for non-admins * Modify/Move project validation methods to api\_utils * api-ref: add additional info for mv 3.64 * Remove unused \_db\_error code from cmd/manage.py * Set Wallaby maximum mv in REST API version history * Quota: Fix multiple race conditions * Prevent quota and reservations to go into negative * Update cinder manage quota commands * Fix quota usage duplicate entries * Remove nested quota leftovers * Quota: Fix until\_refresh config changes * Remove unused quota python classes * Add quota utils to cinder-manage * Update help text for backup compression option * Use os-brick 4.3.1 * LVM: Fix delete volume error due to lvs failure * Change snapshots type with volume retype * Fix automatic quota sync for temporary volumes * Remove unnecessary DB read * Remove unused code path in attachment\_delete * Update master for stable/wallaby 18.0.0.0rc1 ----------- * Use os-brick 4.3.0 * Cinder - Creating clone of encrypted volume fails * PowerMax Driver - Temporary snapshot enhancements * api-ref: Fix "id" parameter * NetApp ONTAP: Fix FlexGroup replication * Imported Translations from Zanata * Make availability zone type exception easier to understand * Update contributor doc with additional meetings * Support mTLS when calling the glance API * Follow up NetApp ONTAP FlexGroup feature * Backup manager: Synchronously call remove\_export\_snapshot * Backup manager: Synchronously call remove\_export * Fix PowerStore iSCSI targets filtering * Bug fix for revert to snapshot feature * Remove six from quota.py * JovianDSS: add certs and snapshot restore * Update code layout and missing Zadara features * doc: Remove cryptsetup reference for compute setup * Add explanations on safe delete * Fix volume OVO create method * Remove unnecessary save call * Fix old attach method * Fix automatic quota sync for migrating volumes * [SVF]: Volume name is not validated for host * Changed whitelist to allowlist in tox * [SVF]:Storwize hyperswap volume is not deleting * Resolve SADeprecationWarning for joinedload\_all * Always constraint dependencies in tox * Add support for consistency groups in the Nimble Storage driver * NFS: Fix for groups and cloning * Dropping explicit unicode literal * Add virtualenv requirement to tox.ini * NetApp ONTAP: Implement FlexGroup pool * Add support for RBD fast-diff feature for backups stored in Ceph * API validation: Use cinder\_host for services checks * Add iSCSI IPv6 support to Dell EMC XtremIO driver * Support format info in fs type drivers * PowerMax Driver - Release notes for 761643 and 767172 * Add Consistency Groups support in PowerStore driver * Add OpenStack volume replication v2.1 support in PowerStore driver * Remove six from cinder.tests.unit.objects * Fix invalid yaml in reno template * API validation: Add cinder\_host type to support ipv6 in manage * [SVF] Set volume IOPS based on volume size * Add QoS Suport for Pure Storage * Imported Translations from Zanata * NetApp ONTAP: Add support for dynamic Adaptive QoS policy group creation * NetApp ONTAP: Add support for QoS minimums specs * PowerMax Driver - Initiator group contents check * PowerMax Driver - Allowing for all types of boolean in extra specs * PowerMax Driver - Offline r1 promotion fixes * api-ref: Don't list HTTP 500 for backup create * [SVF]: Fixed host and group failback issues * Pure Storage: check volumename length does not exceed maximum * [DS8K]: Support revert to snapshot * [Pure] Add missing FC host personality support * Tests: Don't assert notifier not called * [Storwize] Provide IOPS based storage offering * doc: add specs repo maintenance tasks * PowerMax Driver - Extend replicated volume * Add warning message about slow volume backend * [doc] remove outdated package python-keystone * Hitachi: Trace REST API input/output logs * PowerMax Driver - Fix pylint errors in test\_common and provision * PowerMax Driver - U4P Failover SerialNumber KeyError fix * Hitachi: Use get\_volume\_stats in the base driver * Fix in api-ref * [Pure] Fix failing consistency group tempest tests * Resolve SAWarning SQLAlchemy warning * [PURE] support IPv6 / add parameter pure\_iscsi\_cidr\_list * Fix CI\_WIKI\_NAME entries * Use TOX\_CONSTRAINTS\_FILE * Drop policy check failures to DEBUG * Fusionstorage Cinder Driver Support OceanStor 100D Storage.(dsware) * image\_utils: Use QEMU\_IMG\_FORCE\_SHARE\_VERSION constant * [SVF] Update volume replication properties * smbfs: set VHD UUID using volume UUID * TOYOU: Abandon the target parameter and Report SAN driver options * Simplify composite check strings for project personas * Make sure we pass context objects directly to policy enforcement * Properly handle InvalidScope exceptions * Update secure RBAC check strings with descriptions * Tests: Improve RBD v2 clone API unit tests 18.0.0.0b1 ---------- * Add ports filtering support to Dell EMC XtremIO driver * Update to hacking 4.0.0 * HPE: Add Peer Persistence support for Primera backend * Use os-brick 4.2.0 * Move trace methods from utils to volume\_utils * Tests: test\_backup - specify volume\_type\_id for snapshots * Imported Translations from Zanata * Bump pylint to 2.6.0 * LVM: fix \_create\_vg * Move brick calls from cinder.utils to volume\_utils * vmware: Use cookiejar from oslo.vmware client directly * mypy: annotate volume manager * Add KIOXIA KumoScale NVMeOF driver * Fix irrelevant-files for two devstack jobs * doc: add rbd-iscsi-client info * Fix RBD\_OPERATION\_FEATURE\_CLONE\_PARENT in unittest * Require oslo.serialization 4.1.0 * image\_utils: Simplify fetch\_verify\_image * Update ceph driver docs, mentioning Hyper-V support * PowerMax Driver - Check for missing port group * Add encryption\_key\_id to volume and backup details * RBD: Pass bytes type for mon\_command inbuf * Change the CLI document for the extend-attached-volume * Correct format string in error message * 3PAR: Allow iSCSI driver to be enabled for Primera 4.2 onwards * Imported Translations from Zanata * Log information about the Ceph v2 clone API * created s3 cinder backup driver * Tests: Add another mock psutil in quobyte tests * Add ceph iscsi volume driver * PowerFlex documentation contains invalid paths * Add Cinder driver for TOYOU ACS5000 * Tests: Solidfire - Use UUIDs in object UUID fields * Remove NestedQuotaDriver * Label temporary files created by image\_utils * Update SolidFire Storage assisted migration in support-matrix * Pure: Add default value to pure\_host\_personality * Add driver for Dell EMC PowerVault ME Series * Imported Translations from Zanata * Introduces MV to add volume type ID in volumes details * HPMSA: Report SAN driver options * LVM: Support only LVM 2.02.107+ * LVM: Use --readonly for lvs * NetApp SolidFire: Refactor DuplicateSfVolumeNames exception * Tests: Fix rbd unit test failure due to ceph keyring file * Remove \_\_unicode\_\_() from CinderException * doc: update release cycle tasks * tox mypy: Call mypywrap.sh directly * Tests: Improve cascade delete coverage * pylint: run coding-checks.sh with bash * Hitachi: Wait until the volume can be deleted * Basic volume QoS doc * PowerMax Driver - Promotion RDF Group number fix * Add docs and update support matrix for Hitachi driver * Imported Translations from Zanata * [RBD] Fix snapshot backup name * Correct group:reset\_group\_snapshot\_status policy * Imported Translations from Zanata * [goal] Deprecate the JSON formatted policy file * RBD: Change rbd\_exclusive\_cinder\_pool's default * Imported Translations from Zanata * RBD: Retry delete if VolumeIsBusy in \_copy\_image\_to\_volume * Update requirements and lower-constraints * Add CHAP support to Dell EMC PowerStore driver * add openstack-python3-wallaby-jobs-arm64 job * [SVF]:Fix in change\_vdisk\_iogrp during retype * Exception type is not iterable so should change it to string type * [IBM DS8000]: Fix compatability issue in get\_host * Imported Translations from Zanata * PowerMax Driver - Fix assign SRP during promotion retype * Imported Translations from Zanata * Pure Storage FlashArray: Add active/active support * [SVF]: Fixed host and group failover issues * [SVF]:Fix clone fcmap not being deleted in cleanup * Fix volume rekey during clone * Pure Storage - bump version numbers for Wallaby * Tests: Move glance\_stubs to glance dir * Clarify 'supported' reinstatement policy * [IBM DS8000] Support volume name template * Update doc contributor doc * [Trivial]Add missing white space in the log message * Imported Translations from Zanata * Add Python3 wallaby unit tests * Replace md5 with oslo version * doc: restrict supported Ceph versions * Tests: Improve get\_qemu\_img\_version coverage * PowerMax Driver - Port status check * NetApp SolidFire: Fix duplicate volume when API response is lost * NetApp SolidFire: Fix error on cluster workload rebalancing * Add common RBAC personas to cinder/policies/base.py * Fix cinder-manage traceback * Imported Translations from Zanata * Delete TSM Backup driver * Update Development Environment documentation for Ubuntu * Doc:Storwize to Spectrum Virtualize Family * PowerMax Docs - Victoria new features and supported software * Imported Translations from Zanata * Fix unnecessary migration on retype * [Trivial]Fix unncessary "import xx as xx" renaming * Fix invalid asert\_called\_with statement * [doc]Fix an invalid url link in docs * Do not fail when depth is greater than rbd\_max\_clone\_depth * Imported Translations from Zanata * Bump minimum version of oslo.log * [SVF] RevertToSnapshot support for GM volumes * Make docs build parallel * [Trivial] Fix missing print format and missing white spaces * NetApp SolidFire: Fix clone and request timeout issues * Remove collections.abc backwards compatibility * Imported Translations from Zanata * Fix cinder-manage cluster remove raising NoSuchOptError * Add mypy tox env * Refactor some unit tests * Remove six of files under cinder/test/unit * Remove six of dir cinder/volume/\* * RBD: Run flatten in a different thread when cloning a volume * Import HTTPStatus instead of http\_client (policy tests) * Preparing for removal of six.reraise() * Remove six of dir cinder/image/\* * Nimble: Add support for revert to snapshot * Add Cinder driver for Open-E JovianDSS data storage * Remove six of dir cinder/tests/unit/api/\* * Remove six of dir cinder/tests/unit/backup/\* * Import HTTPStatus instead of http\_client * Remove six in files under cinder/\* * Remove six of dir cinder/tests/unit/volume&zonemanager/\* * Remove six of dir cinder/tests/unit/policies&scheduler&targets/\* * Remove six of dir cinder/tests/functional&hacking/\* * Remove six of dir cinder/backup/\* * Remove six of dir cinder/scheduler/\* * [IBM DS8K]: Fixed rest API issue to get bundle * Remove six of dir cinder/brick,cinder/common,cinder/interface,cinder/objects * Remove six of dir cinder/db/\* * Remove six of dir cinder/api/\* * Reno: Use customized template * Backup: Fix formatting errors * Critical fix for MSA 2060 and MSA 1060 * Fix service\_get\_log tests * Imported Translations from Zanata * doc: update Brocade FCZM documentation * Update new contributor doc * [SVF]:changes in create\_group\_from\_src for replicated groups * [Trivial]Add missing print format in log message * [SVF]:Reduce slowness by caching pool information * Imported Translations from Zanata * doc: update new driver merge deadline * Update master for stable/victoria * [SVF]:Fixed create\_flashcopy\_to\_consistgrp 17.0.0.0rc1 ----------- * Imported Translations from Zanata * Add prelude to victoria release notes * PowerMax Driver - Feedback on review 746486 * Add support for system and domain scoped tokens * RBD: cinderlib support for rbd\_keyring\_conf option * [SVF]: Support for retype operation on GM volumes * Make EM branch release notes static * Make EOL branch release notes static * Rollback volume status if backup service is unavailable * PowerMax Driver - Fix non-temporary snapshot delete * SPDK: Report info in top-level volume\_stats * Disallow extension of attached volumes for NFS & Quobyte drivers * Fix volume\_stats storage\_protocol usage in vol mgr * Remove install unnecessary packages * PowerMax Driver - Exception when multipath not enabled for metro * [storwize]:Fixed select\_io\_group issues * [storwize]:Fixed check\_flashcopy\_rate issues * Update HPMSA driver doc to include new HPE MSA models * Doc: Update storwize cinder driver configuration * [Storwize] Option to retain the auxiliary volume * Nimble multi-attach bad format changes * Don't create LOG if not logging * Remove oslo.versionedobjects extra install * Follow Up: Default type overrides * [goal] Fixing lower constraints for Ubuntu Focal * Default type overrides * NEC driver: fix a snapshot detach error * [api-ref]Add replication\_status(optional) to List groups with details response * PowerMax Driver - Migrate extra spec class fix * [Storwize]:Fix delete\_group\_snapshot cleanup issue * Fix a typo in the explanatory note * Imported Translations from Zanata * NetApp SolidFire: Enable driver IPv6 api request * [NetApp] Adding support for Adaptive QoS in NetApp driver * PowerMax Driver - Remove mandatory failover BID * Run l-c job on Bionic * Require os-brick >= 4.0.1 * PowerMax Driver - Feedback for migrate exception handling * PowerMax Driver - Force array and srp configuration * PowerMax Driver - Remove deprecated config options * Stop configuring install\_command in tox * Add support volume local cache * PowerMax Driver - Feedback on snap\_id feature * NetApp SolidFire: Add storage assisted migration support * Add release note for zstd compression * Pylint: pin isort to 4.3.21 * Fix volume retype with AZ * PowerMax Driver - Force add rep group volume * PowerMax Driver - Failover abilities promotion * PowerMax Driver - REST Iterator Expiration Fix * Handle oslo.messaging ping endpoint * Capacity based QoS doc * Add multiattach in Nimble driver * Fix: listing volumes with filters * Correcting the response status range in WsgiLimiterProxy * Remove Train online data migrations * Deprecate TSM Backup driver * Show cluster\_name in volume details * Mark Active-Active mode as supported * Add cinder-plugin-ceph-tempest-mn-aa job * PowerMax Driver - Failover abilities legacy improvements * PowerMax Driver - Replica rdfg suspend fix * Stop sending notifications to nonstandard pub id * RBD: remove rbd\_keyring\_conf option * NEC driver: fix live-migration failure with FC * Remove unnecessary releasenote * bindep: Track qemu-img dependency * PowerMax Driver - Replacing generations with snap\_ids * Modify default/delete volume type logic * PowerMax Driver - Prevent unmanage with snapvx * Normalize release note bug links * PowerMax Driver - Allowing for an empty group on a clone volume * Use resource\_backend for volumes and groups * PowerMax Driver - Failover group vol update fix * Change default glance\_num\_retries to 3 * Updating the release notes for PowerFlex Driver Rebrand * Brocade: Fix lookup UnboundLocalError * Doc note warning about retyping unencrypted/encrypted volume * Docs: Improve contributor's release notes * [vmware] ensure datastores exist while fetching stats * PowerMax Driver - Legacy volumes fail to live migrate * PowerMax Docs - Incorrect property set on volume group * Uncomment psycopg2 in test-requirements.txt * Update to oslo.privsep 2.3.0 * Fix lower-constraints conflicts * Fix: show volume transfer by name for non-admins * Add online extend support for Dell EMC PowerStore driver * Doc note warning about retyping an unencrypted/encrypted volume * Fix revert to snapshot for non admins * 3PAR: Set the right minimum client version * PowerMax Driver - Port Group & Port Load Balancing * Brocade: Fix AttributeError when raising exception * Brocade: Python 3 support * Add lsscsi to bindep * Rebranding of VxFlex OS driver to PowerFlex * Remove "cinder-manage shell" commands * drop use of pkg\_resources * Fixed an issue with creating a backup from snapshot with NFS volume driver * Race in Cinder backup manager * Add Cinder driver for Dell EMC PowerStore * Set cluster name for volume groups * Remove exception.Error class * Remove unused exceptions * Creating snapshot on NFS backend fails * zuul: collect cinderlib logs from tempest node(s) only * Support modern compression algorithms in cinder backup * Bump hacking version to 3.1.0 * PowerMax Driver - Volume Migrate Exception Handling * Correct new\_type type in VolumeManager retype method * Add Hitachi Block Storage Driver * PowerMax Driver - Changing from 91 to 92 REST endpoints * Correct a typo of the hpe driver * Imported Translations from Zanata * smbfs: pick up remotefs method signature change * Switch from unittest2 compat methods to py3 methods * Fix rekeying volume with legacy encryption provider * cinder:api-ref replace mention of "policy.json" * Kaminario: Fix unique\_fqdn\_network option * Don't show host\_name to non-admins * Remove unneeded arg from \_clean\_db (vol manager) * Move get\_volume\_stats impl to the base volume driver * Add non-voting code coverage job * Disable siblings for the cindelib functional tests * Default volume\_type set too early * Add cinder discard in nimble driver * NFS encrypted volume support * Add links to package metadata * Remove lxml deprecated methods * RBD: Cleanup temporary file during exception * tox: remove bash from whitelist\_externals * Make test-setup.sh compatible with mysql8 * PowerMax Driver - Create vol suspend fix & DeviceID check * Imported Translations from Zanata * PowerMax Driver - Ussuri Documentation * Backup: Limit number of concurent operations * Add generated doc output path to doc8 ignore list * Drop one more use of mock lib * Imported Translations from Zanata * PowerMax Driver - RDF State Validation Enhancements * Update Pure to support revert\_to\_snapshot * Stop to use the \_\_future\_\_ module * Tests: Mock out compute class for NfsDriverDoSetupTestCase * docs: fixes wrong config file reference * 3PAR: Fix live migration * bump pycodestyle to 2.6.0 * Bump taskflow requirement to 3.8.0 * Synology: Improve session expired error handling * Dell EMC Sc: Add support for whitelisting fault domains * NetApp ONTAP: Fix extend volume for iSCSI/FCP * Remove VxFlex OS credentials from connection\_properties * Add revert to snapshot support for Pure Storage drivers * Google backup support client 1.8.2 * 3PAR: Workaround SSH logging issue * Improve tenacity retry sleep mocking * PowerMax Driver - Array capabilities extend fix * Use 'visibility' instead of 'is\_public' when only use Glance API v2 * Correct formatting in release note * Creating image-volume cache on NFS backend fails * Imported Translations from Zanata * NetApp SolidFire: Fix bug on update cluster stats * Fix a misspelling error in QNAP driver * Fix leave volume mapped on attach failure * Switch to newer openstackdocstheme and reno versions * Add log if resource\_filters json does not exist * Cap jsonschema 3.2.0 as the minimal version * Remove reference to ThinLVMVolumeDrive * Fix a grammar error in explanatory notes * Add releasenote/ to doc8 ignore list * [SPDK] Add https protocol option to communicate with SPDK * Fix cross-project incremental backups * Update Nimble features in Support matrix * Fix Invalid() exception msg * tests: remove self.injected * Move make\_initiator\_target\_all2all\_map out of vutils * 3PAR - Fix renaming volume after migration * Imported Translations from Zanata * Fix nfs\_mount\_options description * Doc: Add notes on create\_cloned\_volume locking * Remove translation sections from setup.cfg * Raise lower constraints to meet our minimums * Fix hacking min version to 3.0.1 * Fix api-ref for GET snapshot response * Don't show host info to non-admins * Fix compliance tests * Make py3 default tox target flexible * Tests: Make tests less random * Fix dfs-sdk package name in extra reqs * Add a /healthcheck middleware * Fixed issues with Pure syncrep iSCSI CIDRs * Switch from retrying to tenacity * Fix typo on service cluster change method * Move macrosan unit tests to driver directory * PowerMax Driver - Concurrent live migrations failure * Fix outdated comment in rootwrap filter * Imported Translations from Zanata * Add missing context to function call * Update api-ref mv history file * Monkey patch original current\_thread \_active * Fix doc: s/cgroup-bin/cgroup-tools/ * Native Zuul v3 cinder-grenade jobs * Move unit test code under tests/unit/ * Make releasenotes build parallel * PowerMax Driver - U4P failover lock not released on exception * NetApp ONTAP NFS driver fail to flexclone glance image * Doc: Add max MV update step to cycle tasks * Reserve migrations for DB backports * Imported Translations from Zanata * NetApp SolidFire: Fix pylint issues * PowerMax Driver - Rep validation fix & Retype suspension fix * PowerMax Driver - Live migrate remove rep vol from sg * NetApp ONTAP: Fix iSCSI multiattach volume terminates connection * Nexenta unit tests: add mock for \_read\_mounts call * Stop invoking Python 2 * Imported Translations from Zanata * Switch to py38 and update version metadata * Add Python3 victoria unit tests * Update master for stable/ussuri 16.0.0.0rc1 ----------- * Nit: Update comment and release note on Quobyte driver bugfix * Add ussuri prelude to release notes * Add test coverage for manage\_snapshot\_flow * Remove XIV part from configuration docs * update typo and official brandname in RN * Update NetApp SolidFire Active/Active support in Support Matrix * Add release note for drivers unsupported in Train * Delete volume with additional removehostmappings parameter * Fix driver doc ordering do to file casing * Fix nits from RBD volume migration changes * Revert "Mark MacroSAN Driver Unsupported" * Add filter\_function and goodness\_function to pools * Updating docs for Datera driver * HPE 3PAR: Support duplicated FQDN in network * NetApp SolidFire: Fix retype to SolidFire * Add support for IBM GPFS Driver * PowerMax Driver - RDF status validation * Imported Translations from Zanata * add docs and release note for sandstone iscsi driver Partially-implements: bp add-sandstone-driver * Update HACKING document to match current checks * Fix service-get-log to respect server filters * Add glance image colocation feature for Virtuozzo driver * Cleanup py27 support * Correct description for encryption-type policies * Resolve deprecation of encryption policy target * PowerMax Driver - PowerMax Pools Fix * PowerMax Driver - Compression Change Bug Fix * Fix volume migration fails in the same ceph RBD pool * Fix TypeError when doing glance retry * Imported Translations from Zanata * Updating release notes for Datera driver * Fix arguments order inside assertEqual * PowerMax Driver - Detach RepConfig logging & Retype rename remote fix * Add tests for volume type encryption type policies * Fix missing print format in log messages * Reduce deprecation warnings * Update upgrade check removed drivers for ussuri * Prevent creation of ssh\_known\_hosts file in UT run * Handle py38 unit test changes * [DS8000] Update cinder driver configuration guide * Support Glance image data colocation * NetApp SolidFire: Add active/active replication * NetApp SolidFire: Fix failback failing after service restart * [Unity] Retype volume support * [Unity] Support consistency group replication * Add hacking check for 3rd party mock * Imported Translations from Zanata * Add sandstone iscsi driver * Mark Nimble Storage Driver supported * PowerMax Driver - Manage volume emulation check * PowerMax Driver - Deletion of group with volumes * PowerMax Driver - Replication Metadata Fix * Followup to address open review comments * Imported Translations from Zanata * validator: Replace InvalidInput exception w/ InvalidName * RBD: add support for revert-to-snapshot * Revert "Remove ProphetStor Flexvisor Driver" * PowerMax Driver - Version comparison correction * Adds support for min/max volume size on vol\_type * Revert "Remove the Virtuozzo Storage Driver" * Revert "Remove the Veritas Access Driver" * Correct ReST syntax * Add revert to snapshot support in VxFlex OS driver * Add support for volume migration in VxFlex OS driver * Add OpenStack volume replication v2.1 support in VxFlex OS driver * Add support for VxFlex OS 3.5 to VxFlex OS driver * Add features for add backup id to volume metadata * Remove HPE Lefthand Driver * PowerMax Driver - Limit replication devices * PowerMax Driver - SRDF Replication Fixes * PowerMax Driver - Update single underscores * Imported Translations from Zanata * Move release note to correct directory * Revert "Remove Nimble Storage Driver" * Drop unicode() hacking check * Fix revert snapshot issue * [Unity] Support create volume with tiering policy * Correct two typos in support-matrix.ini * Update hacking to 3.x release * Re-enable local hacking checks * Add test coverage for manage\_existing API revert * PowerMax Driver - Support of Multiple Replication * Change \_get\_volume\_size\_gb to \_get\_volume\_size\_bytes * Preserve request id in Cinder logs when creating boot volume * Allow creating volumes from snapshots during backups for Quobyte * API: os-reset\_status notification fix * Remove suds from requirements * RBD: Add missing driver options * SAN: Add missing driver options * Fix a spelling mistake * Do not rename rbd based volume after migration * PowerMax Driver - Allowing for default volume type in group * Excess availability\_zone judgment code * Vmware: Revert the volume stats revert * Update weekly meeting location * Imported Translations from Zanata * Add configuration item for infortrend document * PowerMax Driver - Legacy volume not found * Imported Translations from Zanata * PowerMax Driver - SRDF Enhancement * [Unity] Fix TypeError for test case test\_delete\_host\_wo\_lock * QNAP: Fix login on Python3 * Update the title of Inspur G2 storage driver * PowerMax Driver - Safeguarding retype to some in-use replicated modes * RBD: fix volume reference handling in clone logic * Tests: Remove Python 2 compat file\_spec code * Make cinder-plugin-ceph-tempest job voting * Update in-tree Datera Cinder driver * Always use the current volume URL in the Quobyte driver * Fix DriverFilter string evaluations * Ussuri contrib docs community goal * PowerMax Driver - Replication array serial check * Blacklist stestr 2.3.0 * PowerMax Driver - Print extend volume info * Port several legacy tempest jobs to Zuul v3 * Imported Translations from Zanata * Mark Huawei Fusionstorage Driver Supported * ChunkedBackupDriver: Freeing memory on restore * Revert "Remove Huawei FusionStorage Driver" * Support to query volume filter by updated\_at/created\_at * Cinder backup export broken * PowerMax Driver - Short host name and port group name override * Create backups via scheduler * Imported Translations from Zanata * Run cinder-plugin-ceph-tempest on py3 * Add new license scheme for Flashsystem9000 series 16.0.0.0b1 ---------- * Tell reno to ignore the kilo branch * Install all requirements in docs builds * Change path to query in cinderAPI V3-volume delete * 3PAR: Revert flag for Active/Active High Availability Support * Fix stestr command error in doc * Seagate driver: Add Basic auth header when logging in via HTTPS * Support volume transfer \`name\` filters * PowerMax Driver - Get Manageable Volumes Fix * Update api-ref * Fix volume unit test * NEC driver: fix migrate/retype an in-use volume * Update driver removal policy * Update reviewing doc with py3 usage guidelines * Add cryptsetup to bindep.txt * Skip cryptsetup password quality checking * Remove Dell EMC PS Series Driver * Configurable timeout of the QEMU img conversion * Handle retries in PowerMax unit tests * Add migration to make volume\_type\_id non nullable * Refactor README links * Support multiple stores of Glance * Fix some typos in docs * Add upgrade check for removed Veritas Access driver * RBD: catch argument exceptions when configuring multiattach * Bump paramiko requirement * Tests: hacking check tweaks * Hacking: Remove C304 check for LOG.audit * Update oslo.vmware version * Fix an issue in storwize unit test case * Mark MacroSAN Driver Unsupported * PowerMax Driver - Unisphere storage group/array tagging support * Add upgrade check for removed VZStorage driver * PowerMax Driver - RDF clean snapvx target fix * Remove the Veritas Access Driver * Mark Veritas CNFS Driver Unsupported * Remove the Virtuozzo Storage Driver * Make volume soft delete more thorough * Remove hacking check N325 * Add note that block-box is not supported * Enable flake8-logging-format extension * Clean up test requirements * Mark IET target driver deprecated * Imported Translations from Zanata * Fujitsu Driver: Change the calculation of TPP's capacity * Pure Storage - remove six due to Py2 support dropped * Fix: Create new cache entry when xtremio reaches snap limit * Update release notes for ibm storage * Introduce flake8-import-order extension * Huawei Cinder Driver Support Dorado V6 Storage.(iSCSI, FC) * Fix duplicated words issue like " should should " * Add missing parameters in log messages * Drop requirements for unsupported python versions * Raise hacking version to 2.0.0 * api-ref: give an example of volume['attachments'] * Fix trivial typo in comment * Fix KeyError exception when volume filter file does not exist * Correct typos * Doc: update storwize cinder driver configuration guide * Remove unused exception catch in revert * Add upgrade check for removed Sheepdog driver * Add upgrade check for removed Nimble driver * Add upgrade check for removed ProphetStor driver * Add upgrade check for removed Huawei driver * Remove Sheepdog Driver * Remove Nimble Storage Driver * Remove ProphetStor Flexvisor Driver * PowerMax Docs - corrections and improvements * doc: clarify usage of use\_multipath\_for\_image\_xfer parameter * Remove Huawei FusionStorage Driver * doc: fixes typo in multi backend configration doc * Update the community page * Fix: failed to create snapshot with DriverFilter * Fixed open tempfile.NamedTemporaryFile as text in Python3 * StorPool: move the config to the shared group * Move cinder grenade job to py3 and in cinder repo * Mark Brocade FC ZM driver unsupported * DS8k Cinder Driver support Python3 * Elaborate on terminate\_connection documentation * Publish backup capabilities to a scheduler * Readd reno to test-requirements * Removed unnecessary lines * Mark storwize driver supported * Fix pylint E1101 and E1135 issue * Remove Python 2 support from testing and gate jobs * PowerMax Driver - retype attached replication fix * PowerMax Driver - Volume group delete failure * PowerMax Driver - Unmanage Snapshot Delete Fix * Tests: Isolate rbd deferred deletion tests * Tests: Remove unused rbd mock\_driver\_configuration * Start README.rst with a better title * PowerMax Driver - Setting minimum Unisphere version to 9.1.0.5 * Imported Translations from Zanata * Modify help for update host command * Drop old neutron-grenade job * Fix tox 'bindep' environment * Convert PrettyTable usage to tabulate * remotefs: remove invalid "external" arg on lock method * RemoteFS: Use dest vol id instead of source id in snapshot temp name * Fix remotefs clone volume locking * Remove unnecessary saving of host and availability\_zone * Remove py2 mentions from contributor docs * SPDK drivers: Update RPC calls to match latest SPDK changes * NEC driver: fix a non-disruptive backup error * Use volume utils to clone encryption * Change the RPC parameter * Pure Storage - bump version numbers for Ussuri * iSCSI driver initialization should fail for Primera backend * Fix "is"/"is not" with a literal usage * update review site link * Deprecate rbd\_keyring\_conf option * NEC driver: fix an undefined variable * Pure Storage - Fix disconnect error in clustered environments * Increase cpu limit for image conversion * Fujitsu Driver: Multiple pools support * Reserve migrations for DB backports * PowerMax Driver - QoS calculation failure * NEC driver: replace deprecated xml library * Fix VxFlexOS documentation * Remove Oracle ZFSSA drivers * 3PAR: Add HPE Primera related information * [Trivial] Adjust log message and add missing ws between words * Fix source link * docs: update cinder system architecture page * Switch to opensuse-15 nodeset * Add volume type name and description check when update volume type * Switch to official Ussuri jobs * Remove unreferenced document table and image files * Remove warning that docs are auto generated * Add reference to LP bug in configuration doc readme * Remove duplicate lines in netapp driver documentation * Imported Translations from Zanata * NetApp SolidFire: Add options for replication mode * PowerMax Driver - ODE Capabilities Fix * LINSTOR driver update for LINSTOR v0.9.12 with REST API * Pure: Ensure generated volume name does not exceed 63 characters * NEC Driver : Code Refactoring * Tests: Fix retype unit test InvalidUUID warning * Fix: Online migrations for untyped volumes/snapshots * Automate generation of quotas api-ref samples * 3PAR: For Peer Persistence, add policy options in RCG * Automate generation of qos specs api-ref samples * Update master for stable/train 15.0.0.0rc1 ----------- * NetApp SolidFire: Fix replication * PowerMax Docs - Short host and port group name changes * Port check\_exec.py to Python 3 * PowerMax Docs - New features and supported software * Add "service token" documentation * Releasenote followup: Untyped to default volume type * PowerMax Driver - Volume Retype Replication fix * PowerMax Driver - Metro Volume Metadata change * Add "deletion policy" property to uploaded images * NEC Driver: Storage assist retype and a bugfix * Add doc page for accelerate image compression * Fix missing print format in log messages * HPE 3PAR: Updated supported operations for multiattach feature * Make sure stale image metadata is not used * PowerMax Driver - Unisphere version check * PowerMax Driver - Debug Metadata Fix * Untyped to Default Volume Type * NEC Driver: Support revert to snapshot * Move the constants to constants.py (for Fujitsu driver) * Unity: Add replication support * Delete unused key when rekeying volume * Leverage hw accelerator in image compression * Move hacking checks to tests dir * Fix pylint env for ancient versions of git * Tests: Fix test\_volume.py import ordering * Rekey volume on clone * 3PAR: Add Peer Persistence support * Denote max microversion for Train * Bump pylint to 2.3.0 * Pylint: use -j 0 arg * target/spdknvmf: Add max\_queue\_depth configuration parameter * NEC Driver: allow more than 4 iSCSI portals * Zadara VPSA: Move to API access key authentication * Fix up test\_snapshot notify tests * Continue renaming of volume\_utils (drivers) * Continue renaming volume\_utils (core) * Rename volume/utils.py to volume/volume\_utils.py * NEC Driver: Support multi-attach * Address reno nit for ZFSSA deprecation * StorPool: update the driver requirements * Mark the StorPool driver as supported again * Advertise some of the StorPool driver's capabilities * Mark HPE LeftHand driver as unsupported * Mark Virtuozzo Driver Unsupported * Fix online data migrations * Blacklist eventlet 0.25.0 * PowerMax Driver - Revert to Snapshot Fix * Fix potential NameError of rc\_id * Google backup: correct string encoding between py 2 and 3 * Mark Huawei Fusionstorage Driver Unsupported * Change PDF file name * [api-ref]Host name is not necessary to contain '@' * Mark IBM GPFS Driver Unsupported * Unsupport IBM DS8k and XIV Drivers * Mark Oracle ZFSSA Drivers Unsupported * nova: use EndpointNotFound from keystoneauth1 * Synology: Fix driver to be compatible with python3 * Added information how to use JsonFilter * PowerMax Driver - Volume & Snapshot Metadata * PowerMax driver - check cylinder count of source and target volumes * PowerMax Driver - SnapVX NoCopy Mode * PowerMax Driver - Miscellaneous improvements to delete * PowerMax Driver - Fix for CI on replication\_device config * Add cinder-specs link to readme.rst * Fix fujitsu's wrong call to pywbem * Dell EMC SC: Handle the mappings of multiattached volume * Mark IBM Flashsystem Drivers Unsupported * Mark the IBM Storwize driver unsupported * Add pdf documentation build in tox * docs: update new driver review page * Don't allow retype to encrypted+multiattach type * Infortrend: Mock sleep in tests * NEC Driver: Python3 compatibility * Log exception info when objects fail to init * Move some code out of utils.py * Support Incremental Backup Completion In RBD * Fix NFS volume retype with migrate * [api-ref]Fix response example file of update\_type * Fix tox docs failure * [api-ref]Fix values of service-status in list-hosts * Ignore hacking rule H101 * Fix LVM IPv6 target portals * PowerMax Driver - feedback on tag removal * Address review comments for MacroSAN driver * 3PAR: Add config for NSP single path attach * Revert "3PAR: Provide new option to specify NSP for single path attachments" * Enable mutate for cinder scheduler * Create Seagate driver from dothill driver * Hedvig: Migration to py37 * Fix "Fix upload volume to glance" * Tests: Combine similar encrypted volume tests w/ ddt * api-ref: add metadata parameter in the response of create-backup * Rollback the quota\_usages table when failed to create a incremental backup * Docs: Jenkins is dead, long live Zuul * Cisco FC Zone Manager Driver - Python3.x support * Update docs building * Docs: Make links more robust * PowerMax Driver - Train San REST Port Removal * PowerMax Driver - QoS Utils Move * Add contributor notes on cinder-status checks * Fixing 404's and broken links * Update drivers documentation * Tighten unit test dict assertions * Fix upload volume to glance * Refactor API utilities into api\_utils.py * SF remove deprecated sf\_allow\_template\_caching * Update Veritas Access in Support Matrix * Mark Storpool Driver Unsupported * Fix ceph: only close rbd image after snapshot iteration is finished * Fix issue of getting detail backups list info * Rsdlib changed providing\_pools interface * NetApp ONTAP: Fix JSON serialization error on EMS logs * Fix DetachedInstanceError for VolumeAttachment * Fix get\_driver\_options * Create extras for pypi install * Correcting typo in environment spelling * PowerMax Driver - Create Volume from SG Payload Change * Prevent double-attachment race in attachment\_reserve * Run 'tempest-ipv6-only' job in gate * 3PAR: Provide new option to specify NSP for single path attachments * Imported Translations from Zanata * Add MacroSAN cinder driver * Fix dothill multiattach support * QNAP: Avoid unnecessary sleeps * Update api-ref location * Never skip jobs when .zuul.yaml is changed * Remove outdated scripts from tools/ * [DOCFIX] Correct Cinder Backup Driver Option Value * Bump Pure Storage FC driver version number for Train * Re-add Infortrend Cinder volume driver * Replace "integrated-gate-py3" template with new "integrated-gate-storage" * Update NexentaStor5 driver * Compress images uploaded to Glance * HPE 3PAR - Fix detach of multiattach volumes * doc: Fix rbd driver marked support multiattach * Update support matrix entries for MSA and Lenovo arrays * Add policy sample file to gitignore * Add release note ignore for backport note * Move DotHill release note to correct location * Refactor use of encryption/image volume utils * Add context to cloning snapshots in remotefs driver * Cleanup api-ref sample files * Zadara VPSA: Fix driver force detach operation * Unity: force delete lun by default * Launch driver list generation with the same python interpreter * Blacklist sphinx 2.1.0 (autodoc bug) * Add support for VxFlex OS 3.0 to VxFlex OS driver * Add Active/Active HA to Support Matrix * Mark Datera driver unsupported * RBD: save and restore multiattach features * Add case: force delete snapshot * Add case: volume can't be reverted in in-use status * Fix assertion methods in a unit test * Replace deprecated with\_lockmode with with\_for\_update * Fix kwargs passed to exception to get better format of error message * SF: Handle qos values on extend volume * Fix volume type quota defaults in quotas tests * Fix :param: in docstring * Run cinderlib functional tests on Ceph job * Remove the Nexenta Edge Driver * Remove the Veritas HyperScale Driver * Remove the Tintri Driver * remove support for deprecated options in VxFlex OS driver * rename ScaleIO driver to VxFlex OS * NetApp SolidFire: Adding new fields to scheduler data * Add key-manager to sevice catalog * NFS: Retry on intermittent mount failure * Add Contributor Docs for Upgrade Checks * Rename RSD doc file to match title for sorting * Update QoS and add Accuracy Note * Remove the DRBDManage Driver * Add OS-SCH-HNT in extensions list * Cinder RSD Driver * Replace assert to raise AssertionError * VMware: Enable thin volume from image on VVol * PowerMax Driver - Metro ODE Support * PowerMax Driver - PowerMax Formatted Vols Fix * PowerMax driver - Rapid TDEV Deallocation * Remove unused CG volume manager constants * Enable osprofiler for rally job * Small doc updates: canonical URLs, rally doc, etc * Re-enable new pycodestyle errors * Mark Veritas Access Driver Unsupported * Mark Prophetstor Driver Unsupported * Mark the Nimble Driver Unsupported * PowerMax driver - locking improvements * PowerMax driver - clone improvements * PowerMax driver - add 'target\_array\_model' to metadata * VMware: Implement clone\_image * Docs: Add new driver review checklist * Revert "Implement volume capacity stats for VMware" * PowerMax driver - changing from 9.0 to 9.1 REST endpoints * Add rbd secret\_uuid in secondary config * Bump hacking version to 1.1.x * Update Python 3 test runtimes for Train * Fix formatting issues in PowerMax docs * PowerMax docs - known issues with metro * Make py37 unit tests voting * move Dell EMC SC driver exceptions * Add Upgrade Check for Drivers Removed in Stein * Add upgrade check for Windows iSCSI driver * move kaminario driver exceptions * Deprecate NestedDbQuotaDriver for nested quotas * move dothill exceptions * Mark the Sheepdog driver unsupported * move purestorage driver exceptions * move Hyperscale exceptions to driver * move storpool exception * Remove Sheepdog tests from zuul config * Drop use git.openstack.org for tox deps * Fix "connector=None" issue in Kaminario drivers * move cisco zonemanager exception * move vzstorage exceptions * move zadara exceptions * Bump bandit version * Move RBD driver exception to driver * LOG \_init\_vendor\_properites * HPE 3PAR-Added support for multiattach feature in hpe3par cinder driver * Pure Storage FlashArray iSCSI driver target CIDR support * [VNX] Fix test case issue * Remove BackupDriverWithVerify class * Cap sphinx for py2 to match global reqs * Handle collections.abc deprecations * move zfssa exception * Update NEC driver manual and support matrix * Correct typo in api-ref * Glob volume driver docs to auto sort * move netapp exception * move Nexenta exception * move NVMEOF exception to target file * move brocade zone manager exceptions * move 3par exception * move xtremio exceptions * Update doc for missed quotation marks * Remove driver requirements of removed drivers * Remove the repeated calls * Remove confusing annotation * VMware: Filter out datastore with 0 capacity * move solidfire driver exceptions * move smbfs exceptions * api-ref: mark migration\_status parameter as optional * Add python37 job * Removes python-linstor as a requirement for LINSTOR driver * Make sure we install cinder requirements during the correct tox phase * remove unused exceptions * move datera exception * move sheepdog exception * Use assertNotIn to check dict item * move Google backup exceptions * Removes drbdmanage as a requirement for drbdmanage driver * move synology driver exceptions * Remove unused exception * Move NVMET driver exceptions to driver file * [Trivial fix]Remove unnecessary slash * Do not use 'self' in classmethod * [Unity] Update doc for compressed volume support * Allow filtering on size for volume list * move GPFS driver exception to driver file * Compact DB migrations to Queens * Remove unneeded comment * Update Brocade FCZM driver's driver options * Fix invalid function name * Add user messages for some volume snapshot actions * Remove obsolete get\_backup\_driver functions * Fix swift backup driver crashing during service startup * Correct and optimize hpe3par tests * lvm: Only use initiators when comparing connector dicts * Update unit test debugging instructions for stestr * OpenDev Migration Patch * Start release cycle checklist documentation * Remove docs for --allow-multiattach * Add missing ws seperator between words * PowerMax docs - add information on new and existing functionality * 3PAR: Provide an option duing creation of volume from snapshot * NFS: Log exception when update\_migrated\_volume fails * Drop use of git.openstack.org * Fix missing print format error * Rollback the volume\_types table when failed to update quota\_usages * Uncap jsonschema * Fix cinder-manage args --max\_count in docs * Use proper casing in backup\_compression\_algorithm doc * Remove DataCore volume drivers * Introduce config to supplement periodic\_interval * VNX: add option vnx\_async\_migrate * add libpq-dev to build psycopg2 * Tests: Fix up test\_volume notify tests * Make backup\_compression\_algorithm case insensitive * Update the min version of tox * Update gate-irrelevant-files * Add upgrade check for presence of policy.json file * Allow reset-state to detach volume as per policy * Fix deprecation escape sequence errors under 3.6 * Document behavior of message.create * Reserve migrations for DB backports * VNX: delete the LUN from VNX backend * Add upgrade checker for backup driver path * Remove non-exception from EXCEPTION\_DETAIL\_MAPPINGS * Remove scanning upgrade checker from opts generation * Fix code-block JSON format issues * Replace HTTP numeric codes with constants * Remove doc for Nova option that doesn't exist * Unity: Add consistent group support * Add NetApp E-Series Driver to Removal List * Doc: Mark last Stein API microversion * Raise eventlet lower-constraint to 0.22.0 * Replace openstack.org git:// URLs with https:// * PowerMax Driver - In-Use Retype Path Fix * Run cinderlib functional tests on LVM-LIO job * Imported Translations from Zanata * Fix service\_uuid migration for volumes with no host * Update master for stable/stein * Synology: Add support for UC-Series model 14.0.0.0rc1 ----------- * Handling filters in Generalized Filtering API * Automate generation of volume transfer api-ref samples * [NetApp] Casting block\_count to int before calling ONTAP API * Fix Support Matrix - Pure does support Multiattach * Gate Fix: Bumping versions in lower-constraints.txt * Fix VolumeAttachment is not bound to a Session * Modify the configuration mode of FusionStorage Cinder Driver * Automate generation of snapshots api-ref samples * Refactored NexentaStor5 driver * Adds revert to snapshot feature to support matrix * PowerMax docs - changing cinder tags to powermax * Use excutils.save\_and\_reraise\_exception instead of reraise * PowerMax driver - test fixes for \_sanity\_port\_group\_check * NetApp SolidFire: Fix misbehavior on account creation * Add get\_driver\_options method * docs: mark rbd driver support for extending an in-use volume * Revert "Remove truncate from rootwrap filters" * Revert "Use native python truncate for privsep" * NetApp SolidFire: Fix multi-attach volume deletion * Update required version of taskflow * Update minimal acceptable glanceclient * Create volume attachments status check * Allow setting Pure host personality * Add project\_id in group snapshots list and show API * Use 'target\_helper' config opt in linstor driver * Update oslo.privsep minimum requirement * Use new target\_\* options in documentation * Remove py35 from setup.cfg * Add oslo.privsep to config-generator list * Fix typo in the comment * Use config-table for dynamic config documentation * Drop py35 jobs * VNX: update sg in cache * Disable etcd3 for lio-barbican job * Enable service capabilities update at less than 60 seconds * Extend timeout for database migration tests * Improve documentation of goodness/filter function * api-ref: add qos\_specs\_id to show default volume type response * Update support matrix of Huawei driver * Define single place for irrelevant-files * Fix Snapshot object metadata loading * Use native python truncate for privsep * Fix python3 compatibility of rbd get\_fsid * Remove truncate from rootwrap filters * Trim test list for lio-barbican job * Tests: Fix up migrate notify tests * Extend remove\_version\_from\_href support * Pylint: Filter out cinder.objects and requests E1101 * Fix version return incorrect when endpoint url end without / * Update section 'When do i need a new Microversion?' * docs: Add os-migrate\_volume\_completion api-ref * Doc8: Stop skipping D001: Line too long * Include .inc files in doc8 linting * NetApp SolidFire: Thin provisioning scheduler support * REST connector for Brocade zone driver * Fix for auth version change in Brcd HTTP * Remove drbdadm from filters * Replace 'lvconvert' from rootwrap * PowerMax Driver - Unmanage Snapshot Improvements * PowerMax driver - performance improvements * PowerMax Driver - Unit Test Refactoring * PowerMax driver - do an rdf\_group check earlier * Limit RBD discard to 32 bit chunks * Fix incorrect replication info for RBD * Remove LOCI publishing from the post pipeline * Add empty check before using zone driver * VNX Driver: delete\_hba() instead of remove\_hba() * PowerMax driver - handle special case where IG exists * PowerMax driver - move pagination code to request * PowerMax driver - do the portgroup check earlier * PowerMax driver - solve issue of concurrent snapvx operations * Compact DB migrations to Ocata * Automate generation of volume actions api-ref samples * Fix some miscapitalizations of VMware * RBD: add support for multiattach * PowerMax Driver - Storage-assisted in-use retype support * Use oslo.privsep for 'lvcreate' * Add Python 3.6 version in setup.cfg * Update install docs to match default NFS config * Update rbd thin-provisioning in support-matrix * Remove legacy-tempest-dsvm-full-drbd-devstack job * Drop nova-multiattach job * Declare multiattach support for HPE MSA * Remove 'tgtadm' from rootwrap * Huawei driver refactor(2/10) * Adding microversion in releasenote "project\_id response in groups" * Support transfer pagination * Switch tempest-slow to be run on python 3 * Remove LIO multiattach release note * Fix HPE3PAR not returning cached stats * Add project\_id in group list and show API * Return WWN for validation in Pure Storage drivers * PowerMax Driver - Replication Settings Fix * PowerMax driver - code cleanup and minor improvements * QNAP: Add support for QTS 4.4.0 * api-ref: Add cipher to update an encryption type interface * VMAX driver doc - clarify PowerMax rebrand release note * Fix keystone auth config in install doc * Fix the misspelling of "volumes" * Fix "import xxx as xxx" grammar * Add missing ws seperator between words * Add check\_encryption\_provider to volume utils * fast8 improvements * Huawei driver refactor(1/10) * Cleanup rootwrap filters * Fix gigabytes usage error when deleting snapshot group * Report msg error when cannot revert to snapshot * Fix for HPE MSA 2050 login failures * Pass image\_id to ImageDownloadFailed * Update FusionStorage Cinder Driver by using REST API * Move Fujitsu ETERNUS DX related file * Refresh the Service.service\_id after re-spawning children * Correct default policy\_file in the sample config * Remove 'lvrename' from rootwrap * LinstorDrv : Fix wrong key using when getting device path * Create new image volume cache entry when cloning fails * Refactor on policy in code * Add policy test for volume extend/retype/update\_ro policy * Add policy test for volume metadata policy * Add policy test for volume action policy * Add policy test for volume update/del policy * VMware: Implement retype * VMware: Release notes for vmware\_storage\_profile * Temporary solution for fixing gate test\_tpool\_size * Improve volume transfer records * PowerMax driver - changing from 8.4 to 9.0 REST endpoints * Handle 'is\_public' filter in volume-type list * Automate generation of volumes api-ref samples * Imported Translations from Zanata * Implement volume capacity stats for VMware * Switch ietadm to use olso.privsep * Enable some tests in the cinder.tests.unit.test\_cmd module for MacOS * Initiating Cinder Volume Manager with large number of volumes * Avoid using 'truncate' on Windows * PowerMax driver - rebranding VMAX to PowerMax * [Doc] Fix typos * Fix for CI information in SPDK volume driver * Add test case: extend non-exist volume * Reset snapshot status available * Add test case: retype volume when driver not initialized * Add test case: delete snapshot when driver not initialized * NetApp: Return all iSCSI targets-portals * Automate generation of API versions api-ref samples * NetApp Doc: Enhance 'netapp\_storage\_protocol' description * SPDK drivers documentation * Fixup some issues of Inspur AS13000 cinder driver * add resource filters to the included data\_files * LINBIT DRBDManage driver moves to maintenance mode * Fix Xtremio driver configuration ordering * Support deferred deletion in RBD * Remove those copy words occured twice times in dell-emc-vmax-driver.rst * Add new LINSTOR volume driver * Hedvig Cinder driver implementation * cinder-volume: Stop masking IOError different than ENOSPC * Add missing backup status in API doc * Using wrong parameter in test\_backup\_ceph * Move 'attach.end' notify to attachment\_complete * Mask passwords in utils.trace for func params * Fix CI failed: test\_get\_volume\_filter\_options\_using\_config * QNAP: Support QES FW on TDS series NAS * ScaleIO: Fix Extra Spec parameter name in driver doc * Remove support for NetApp E-Series systems * Adding SPDK volume driver * Delete related encryption provider when a volume type is deleting * Fix retype notifier test case (BackupNFSTestCase) * [api-ref] Fix the parameter of image metadata * Handle string status codes in logging * VMAX Driver - Failover Unisphere Support * Remove vgc-cluster * Remove 'ln' command from rootwrap filter * Add policy configuration howto * Remove umount from volume.filters * Refactor for Veritas iSCSI driver * Add x\_project\_id, accepted to transfers * Add policy test for volume create policy * Ignore migration 127 error on MariaDB * [Unity] Storage-assisted migration support * Use renamed template 'integrated-gate-py3' * Allow to use \_max qos option together with per\_gb * Driver reinitialization after failure * Correct the use of assertTrue * Revert "Synchronize all LVM operations" * Update IBM storwise svc driver doc * api-ref: volume\_type\_access should be array instead of object * Fix \_per\_gb\_min usage with \_per\_gb * Set message property in ImageDownloadFailed * Fix A/A 'resource\_backend' when scheduling volumes * Add irrelevant-files for integrated test jobs * Handle image download and conversion errors * Synchronize all LVM operations * Rename BackupNFSSwiftBasedTestCase * Cinder volume driver for Inspur AS13000 series * Add user\_id in backup list and show API * Fix permissions with NFS-backed snapshots and backups * VMware: Storage policy support * VMware: Add support for vStorageObject snapshots * Make policy config docs consistent * Revert "Ceph driver should respect the \`--incremental\` option for backups" * api-ref: add response body for set-image-metadata-for-a-volume * Revert "Move check\_encryption\_provider to volume utils" * VMAX Driver - VMAX OS Upgrade Bug * PowerMax driver doc - clarifying SE and Unisphere support * Doc: Remove unnecessary note * Extract copy\_image\_to\_volume into volume utils * Ensure image utils don't block greenthreads * Adding missing Volume states to VolumeStatus enum * Automate generation of api-ref samples * Add policy test for TENANT\_ATTRIBUTE\_POLICY * Imported Translations from Zanata * Add missing ws seperator between words * Change a function name of swift backup * Tests : notifier problem with backup * Change openstack-dev to openstack-discuss * Add secret=true to fixed\_key configuration parameter * Cleanup code duplication in cinder.cmd.backup module * Fix the problem of the scheduler status * Check Volume Status on attahcment create/update * Stop cleaning images to be deleted in remotefs driver * Retry on DBDeadLock affecting worker updates * Add test case: clone volume with bad size * Remove deprecated query\_volume\_filters config option * Correct default policy file * VMAX Driver - Place volume in SG as part of unmanage volume * PowerMax driver doc - removing support for VMAX2 * Ceph driver should respect the \`--incremental\` option for backups * api-ref: fix some issues in volumes interfaces * VMware: Check empty provider location * Ignore updated stable release note * [Doc] Fix status and parameters of vol connection api * Imported Translations from Zanata * sqlalchemy: GroupTypeProjects.deleted is boolean * An alternate way to fix retype notifier test case * api-ref: document encryption type provider choices * Imported Translations from Zanata * api-ref: volume summary is available from v3.12 * Revert "Remove mount from volume.filters" * Adding SPDK NVMe-oF target driver * doc: Change variable name DEFAULT\_API\_VERSION to \_MIN\_API\_VERSION * Block broken requests 2.20.0 * [api-ref] Fix the value of project\_id * NetApp SolidFire: Revert volume to snapshot * lvm: Avoid premature calls to terminate\_connection for muiltiattach vols * api-ref: fix succeed code and add field for update-backup * api-ref: add missing volumes\_links in list-volume interfaces * api-ref: Adding backup create response example * Add policy test for volume GET\_ALL\_POLICY * Imported Translations from Zanata * Set right attach mode after migration * api-ref: fix response example of list-backups * Make lower constraints CI works well * api-ref: fix response code and data type for volume interfaces * Imported Translations from Zanata * Improve/Clean api-ref parameters * [Trivial Fix] Correct spelling error of "bandwidth" * Add test coverage to manage existing flow * VMAX driver - allow for a clean system with no initiator groups * Drop simplejson usage * Remove Babel from requirements * Update docs landing page to follow guideline * api-ref: Correct response body type for show host details * Increase the length of resource property in quota\_usages * Remove DeprecationWarning of "decodestring()" * VMAX Driver - Fix for invalid device id length * Remove out-of-date configurations in setup.cfg * Add policy test for volume:force\_delete * add missing comma * Simplify the policy test case * Fix a typo in multiattach doc * Remove setup.py check from pep8 job * [Trivial] Fix typo in comment in delete\_volume * Update backup flow for CephBackupDriver * Imported Translations from Zanata * Handle rbd.OSError on broken RBD image * Fix non iSCSI attach serialization * Set user message on volume create failure * Imported Translations from Zanata * Raise the ImageTooBig exception when found it * Move tgt targets to privsep * Fix and Optimize : retype tests * hpe 3par driver initialization failure * api-ref: cleanup status conditions for backup/restore APIs * PY3: Ensure rados.Object.read/write use byte data * Forbidden to revert volume to a different size snapshot * Fix debug message for \_copy\_volume\_data * Remove scstadmin from rootwrap * [Trivial] Replace 'action' with 'command' in cinder-status doc * Allow using forward slashes in metadata * Fix unexpected behavior in \_clone\_image\_volume * Remove scsi\_id from rootwrap filter * Remove mount from volume.filters * Drop drv\_cfg from rootwrap filters * Remove touch command from volume.filters * Imported Translations from Zanata * cinder-manage online\_data\_migrations fixes * Handling unexpected python error "NoneType object is not iterable" * Base framework for cinder-status upgrade check * api-ref: add missing fields in volume group types * Huawei: Simplify wait\_for\_condition calls * Update sphinx extension logging * Restructure Huawei driver * VMAX: set faked max\_oversubscription\_ratio * Move cinder-manage page to cli doc subtree * api-ref: fix some inconsistencies in snapshot API * Add test-requirements to pylint reqs * Rename cinder.privsep.utils to cinder.privsep.path * Adding regression test for update\_group * DS8K: correct the usage for ssl method(SSLContext.wrap\_socket) * Fix: storage\_pools key in Huawei Driver * Imported Translations from Zanata * Fix for field type error * fix misspelling of available * Imported Translations from Zanata * Increment versioning with pbr instruction * Mark Veritas HyperScale Driver Unsupported * Cinder Backup: object\_count value incorrect * Extract volume image metadata into volume utils * Extract \_get\_image\_metadata into volume utils * Use tempest-pg-full * Remove the ITRI DISCO driver * NFS Backup: Avoid TypeError in os-brick when not configured * [minifix] Update reno formatting on Quobyte driver defaults change * api-ref: totalSnapshotsUsed in limits response should be integer * Add column only when it doesn't exist * VNX: Add constraints for async migration * Remove volume:get policy from test policy file * Replace openSUSE experimental check with newer version * Fix for cinder-manage db purge * Follow Up: Generalizing is\_replicated\_str to is\_boolean\_str * api-ref: Add response fields and response example for snapshot manage * api-ref: remove created\_at from response example of list-transfer * Handle drivers that do not support list manageable * Fix doc output examples * Fix: UnboundLocalError variable referenced before assignment * api-ref: Add response fields and response example for volume manage * api\_ref: total\_capacity and free\_capacity can be float * Stop using deprecated rpc\_backend option * Don't quote {posargs} in tox.ini * Imported Translations from Zanata * api-ref: qos and extra\_spec are optional in types view * Fix wrong uuid recognized when create group * api-ref: Fix namespace parameter for backend capabilities * Imported Translations from Zanata * SF ensure the correct volume is deleted * Mark the Nexenta Edge driver unsupported * Mark the Tintri driver unsupported * Remove source\_replica info from api-ref * Remove sg\_scan * Update unit test debug instructions * API-REF:os-quota-sets v2 API reference has the wrong parameters * VMAX docs - Replace serial\_number * Remove the HGST Flash Storage Driver * Add missing 'is\_public' volume type parameter * nimble storage: support for force detach * nimble storage: retype support * Use Tempest slow job to run all slow tests * api-ref: Change extensions updated type to updated\_at * api-ref: remove encryption from Show-an-encryption-type * hosts api ref: all fields should be of type string * Remove systool from rootwrap * Add tests for multiattach check in cinder.objects * Clean up the deprecated description in doc * Fix wrong NotFound in get\_by\_name\_or\_id * api-ref: add missing response body for groups * Fix CLI output examples * Fix multiattach set to false after retype * Update Dell EMC Driver's multi-attach Support * Fix backup driver configuration examples in the documetation * api-ref: add missing fields in volume types * api-ref: total/free\_capacity should be with postfix \_gb * api-ref: namespace in extensions is optional * Fix pylint warning * Exclude disabled API versions from listing * Enable split logging for cinder-keystoneclient interaction * Remove unecessary pass * VMAX Driver - Fix for manage volume if volume is part of SG * api-ref: clarify volume\_type param in volume create API * api-ref: fix req/resp params for v3 os-quota-class-sets * api-ref: mark name as optional in volume create API * Fix DRBD volume driver creating a 2-volume resource * ZFSSA iSCSI implement get\_manageable\_volumes() * Get rid of keys() usage * Imported Translations from Zanata * Fix bug of renaming volume with same name * Fix some inconsistencies in qos-specs api ref * Propose example volume protection tests * services api ref: fix field enum value and add missing field * ZFSSA iSCSI driver doc fix - allow\_rename auth/o * ZFSSA handle manage nonexistent volume * Adding variable for total gigabytes used by snapshot param * [Optimize] Use OVO when retrieve volume object * Add microversion history to api-ref * Dell PS Driver moves to maintenance mode * Remove udevadm from rootwrap * VMAX Rocky doc - version information * Fix some inconsistencies in messages api ref * Fix typos in volume api ref doc * Remove aoe-revalidate, aoe-discover, aoe-flush * VMAX Driver - Rollback for manage existing volume * ZFSSA volume driver REST client python3 fixes * Fix image volume cache max size and max count limits * Move hscli to privsep * Imported Translations from Zanata * 3PAR: Update Storage Driver docs * Imported Translations from Zanata * EMC ScaleIO driver does not honor create from snapshot volume size * Link to the in-tree driver support matrix from multiattach docs * Imported Translations from Zanata * api-ref: document no\_snapshots default for volume transfer API * api-ref: add preconditions for volume transfer APIs * Add comment to online data migration command * Rename devstack-plugin-ceph jobs * Fix wrong filter of backups in db api * Fix indentation in docs * Remove resolve\_hostname * Imported Translations from Zanata * Clean up bare raised pylint warning * Fix pylint warnings for "unnecessary not" * Update Zuul configuration * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * Ds8k: Mock evenlet.sleep calls * Fujitsu DX: Add retries for busy error * Ensure VNX unit tests don't sleep * Simplify running pylint * Tintri: Inherit tests from BaseDriverTestCase * NetApp SolidFire: Fix CG snapshot deletion * Imported Translations from Zanata * Modify Pure driver to configure PG/Pod names * Move check\_encryption\_provider to volume utils * Imported Translations from Zanata * Move release note to the releasenotes/notes dir * NetApp SolidFire: Fix force\_detach * fix misspelling of 'configuration' * Remove deprecated HPE Lefthand config opts * VMware: Fix revert-to-snapshot * VMware: Remove member initialization in properties * Move sphinx-feature-classification to docs reqs * Simplify hostname lookup * Imported Translations from Zanata * tintri: Remove \_resolve\_hostname * trival: Fix some spacing issues * Fix typo * Update doc for get-pools API * Avoid using dict.get() in assertions * Imported Translations from Zanata * Remove auth\_uri usage * 3PAR: Added retries on volume deletion * VMAX Driver - Initiator retrieval short hostname fix * SIO: Clean up padded volume comments * Add test case: create volume from source volume * Fix IPv6 for Cinder NetApp ONTAP drivers * Remove cinder-tox-compliance job * Correct the link of 'OpenStack Documentation Contributor Guide' * Fixed invalid number of arguments bug in ScaleIO driver * LVM: Disable multiattach for LIO iSCSI target * Replace assertRaisesRegexp with assertRaisesRegex * Fix deprecation warnings * VMAX doc - important known issue * Drop legacy backup service support * Changed default behaviour of nas\_secure\_file\_\* for Quobyte driver * Imported Translations from Zanata * api-ref: add docs for os-migrate\_volume API * Remove deprecated sio\_allow\_non\_padded\_thick\_volumes * Remove flashsystem\_multipath\_enabled opt * Dell EMC: Added excluded\_domain\_ips ListOpt to SC driver * Remove WindowsDriver mapping * Remove deprecated iSCSI target config opts * Imported Translations from Zanata * Remove os-image-create API extension * Make divisibility py3 compatible in DRBD driver * Xtremio: Remove unused constant * Raise HTTPUnsupportedMediaType when content type is unsupported * Add 'volume\_attachment' to volume expected attributes * ScaleIO: Deprecate sio\_allow\_non\_padded\_thick\_volumes * Make solidfire code py3-compatible * Imported Translations from Zanata * Add api-ref for mv 3.55 volume transfer * Fix support indicator for rbd extend attached * ScaleIO: Disable volume creation without padding * Add policy granularity to the encryption API * Fix api-ref title levels and index * Fix XTREMIO driver name * Add False check when do \_retype\_error * Add retry to LVM deactivation * [Docs] Update some links for Gerrit of Code Reviews * Optimizing code (wrap\_socket()) * Save the backup.service just before \_run\_backup * Make code py3-compatible (global callable()) * Update document for metadata show API * Reserve 3 migrations for DB backports * Imported Translations from Zanata * Imported Translations from Zanata * NetApp SolidFire: Fix NetApp SolidFire SSL option * Imported Translations from Zanata * Remove the CoprHD driver * Update reno for stable/rocky 13.0.0.0rc1 ----------- * Imported Translations from Zanata * Fix \_attachment\_reserve to not allow attaching an invalid status volume * Allow extra parameters in volume\_type\_encryption * Default functional tests to use v3 API * Add retries to delete a volume in the Nimble Driver * Add volume create schema enforcement unit tests * Fix one help description of rbd driver * Fix typo on Storwize release note * Note max API version for Rocky * Add sphinx-feature-classification based support matrix * VMAX driver - releasenote fix * Solidfire: Avoid UnboundLocalError * 3PAR: Update Storage Driver docs for Rocky release * VMAX Docs - SSL Support Revision * VMAX docs - Rocky features * VMAX docs - Restructure of content * VMAX Driver - Fix for get-pools and returned Service Levels * VMAX driver - minor metadata additions * VMAX driver - replace 'data=' with 'message=' * VMAX driver - pagination on get vols from SG list * Correct volume-transfers API endpoint * Improve messages api-ref description * Add ability to specify minimum value when using capacity based QoS * IBM XIV: fix issues for replication * Fix none-ascii char in doc 13.0.0.0b3 ---------- * solidfire: Enable SSL with requests * VMAX Driver - Fix for multiple clones failure * Fix indentation in docs * VSA: Concurrent request handling in attachment * NetApp ONTAP: Fix driver force detach operations * docs: Write high availability devref * Consume quota when importing backup resource * Update volume create schema to allow 'null' * nimble: Fix missing ssl support * INFINIDAT: add host.created\_by metadata key * Set bootable when creating volume from snapshot * Handle multiattach attribute when managing volumes * Update delete group snapshot API exception handling * tintri: Enable SSL with requests * Fix remotefs driver report wrong value * IBM XIV and DS8K: Update document for Rocky release: * Revert "Use Tempest scenario job to run all scenario tests" * Add release note for force delete MV fix * ActiveCluster support for Pure Storage drivers * Remove mox3 * XtremIO: support multiattach * NEC driver: Return non-random iscsi portal in initialize * Update microversion number with constants * [Unity] Compressed volume support * Don't require size when creating volume from snap * Fix some nits in transfer volume with snapshots * Add keystoneauth options to list\_opts * Update microversion for force delete volume parameter * VMware: set 'shared\_targets' as False * INFINIDAT: validate\_connector should validate by protocol * DS8K: correct the behavior for retype method * Non Windows per service lock for Backup service * QNAP: Add support for QES 2.1.0 * [doc] Use openstack client commands to replace cinder client * [doc] Use openstack client commands to replace cinder client * Use Tempest scenario job to run all scenario tests * Transfer snapshots with volumes * Update attachment create API document * NEC driver: loss of access after live-migration * Pure Storage FlashArray driver - enable multiattach support * VMAX driver - Block revert to snapshot for replicated volumes * VMAX driver - Add VMAX specific volume metadata to logs * Add ability to specify mode to attachment-create * use restore\_file instead of open volume when restoring * Unity: add multiattach support * Update api-ref and release notes for schema validation * Prepare Google Backup driver for latest libraries * Fix Port nvmet driver to use privsep * NetApp ONTAP iSCSI/FCP: Enable multiattach capability * Remove note about mox * Unity: add thick volume support * LVM: Enhance option descriptions * api-ref: Update notes on encryption key size * Ceph backup: set close\_fds on Popen * Use ensure\_tree instead of mkdir when creating paths * RemoteFS: Use summarize option for "du" * Remove hus-cmd from rootwrap volume.filters * Avoid os.getpgrp, breaking Windows c-backup support * Enable split logging for cinder-glanceclient interaction * Remove tee from volume.filters * Revert "Use os.mkdir instead of mkdir" * Change blockbox to block-box * Remove unused commands from volume.filters * Remove ssc command from volume.filters * NetApp: Deprecate E-Series drivers * Use os.mkdir instead of mkdir * Remove blockdev from volume.filters * Drop dmsetup from rootwrap * Remove mkdir from volume.filters * NetApp ONTAP: Remove NFS driver online volume extending support * Add policy in code documentation for os-set\_bootable API * Port nvmet driver to use privsep * DS8K: correct behavior and return value for terminate\_connection method * DS8K: correct the behavior for get\_pools method * Correct Pure Storage configuration documentation * INFINIDAT: change create\_child to create\_snapshot * Adds Overlay Volumes Created from Snapshots to Quobyte * Fix RBD incremental backup * Add blueprints and releasenotes link to README * Fix parameter description in volume and snapshot manage extension * Keep volume's status in 'creating' when creating from backup * RBD: support to get manageable snapshots * [NetApp ONTAP] Add filtering to API trace logging * [Logging] Enable filtering of trace\_api logs * Fix 'availability\_zones' attribute not recognized when creating volume * V3 jsonschema validation: Volumes * Enable running py36 unit tests * Handling type's multiattach extra specs in API task flow * NEC driver: Fix iscsi multipath initialize\_connection tests * NetApp ONTAP: Remove iSCSI/FC driver online volume extend support * Multipath: Update docs for backend drivers * Fix time convert error in testcases * NEC driver: Reuse iSCSI LUNs of detached volumes * Fix quota error when deleting temporary volume * Make scheduler check online\_extend\_support capability * Clean up unnecessary test params in test\_rbd * NetApp ONTAP NFS: Enable multiattach capability * Adds more validations in create\_snapshot * Add policy check for type show and type list * Fix vmware disk type changed when volume type is eagerZeroedThick * Remove report of allocated\_capacity\_gb from Inspur driver * Fix v3 sample files * Dell EMC: SC FC Driver wwns api response in lower * Add v3 api-ref for force delete snapshot * api-ref: Adds os-update\_readonly\_flag API * Fix prophetstor drivers report value * Fix invalid escape sequence warnings * Get rid of conditional rtslib\_fb imports * DataCore: get rid of pylint errors due to contextlib * Nimble: Fix tuple being passed for exception init * Solidfire: Fix pylint errors for class decorators * Infinidat: Get rid of pylint errors due to private lib * NetApp: Remove duplicate volume\_type kwarg * Increase pylint processes * Add v3 api-ref for updating snapshot status * Reject any filter whose key contains non ascii character * HPE3PAR: Fix pointing to backend in group failover * Remove 'message' from format argument from exception * Fix the usage of RBDVolumeProxy in getting manageable volumes * Storwize: get list of all volumes for ensure\_export * Keep ORM names matching their VO counterparts * Storwize:clone volume with iogrp value as expected * [XtremIO] Remove provisioning factor when reporting * DS8K: Enable multiattach support * Move blockbox DB volume to home directory * VMAX Driver - Incorrect SSL cert verification * VMAX docs - use config-table directive * VMAX driver - remove deprecated XML option * VMAX driver - Deleting snapshot that is source of multiple volumes fails * VMAX driver - Incorrect service level reporting * VMAX driver and docs - change san\_rest\_port to san\_api\_port * Imported Translations from Zanata * api-ref: Rephrase delete volume precondition * policy docs: Add volume summary under GET\_ALL\_POLICY entry * Storwize: add data reduction pool support * Allow running unit tests on macOS * Update macOS devref info * Fix tox -e docs failure * Add ansible helper to Linux dev environment info * Remove 'message' from format argument from exception * Remove 'message' from format argument from exception * Be explicit about tox python version * DS8K: correct the return value for initialize\_connection method * Add debug logs for weigher filters * Add documentation page for basic volume quality of service * VMAX: Address nits in list manageable code * Mark Dell EMC CoprHD Drivers unsupported * Add doc about backend\_default config * [doc]Modify failed hyperlinks * [api-ref] Fix the parameter metadata in v3 * Empty option value maybe cause Unity driver failed to initialize * Support image signature verification * Imported Translations from Zanata * trivial: Add min\_version: 3.1 to api\_ref for upload volume * Fix cinder-volume uses latest req-id for all log calls 13.0.0.0b2 ---------- * Mock log output from DataCore drivers * Add NexentaEdge drivers * VMware: Add file open mode to support python3 * Tests: Remove some ovo FutureWarnings about UUIDs * Add check to the flake8 job * Cloning image fails results duplicate cache entry * api-ref: Fix some params in volume types * Switch to oslo\_messaging.ConfFixture.transport\_url * Run pylint job under py3 * Disable failing backup tests in LIO/Barbican job * Remove kwargs 'message' when exception is created * Api-ref: Fix the incorrect parameter description * Replace os.makedirs to avoid process race * Implement privsep boilerplate in cinder * Default tox jobs to python3 * Fix group availability zone-backend host mismatch * Mark Disco driver unsupported * Mark the HGST Driver unsupported * Move driver config opts out of catchall page * RBD: support to report backend state * IBM XIV: Report backend state in service list * IBM DS8K: Report backend state * Add note about multiattach not supporting encryption to docs * Fix Dell EMC Xtremeio drivers report wrong value * Fix documentation error * Correct S-Series to DS-Series systems * Remove the deprecated config option * Fix GCS backup driver docs * Doc: Fix broken link (Nimble) * Imported Translations from Zanata * Add user messages for extend volume operation * Mark DataCore Drivers unsupported * V3 jsonschema validation: volume\_type\_encryption * ZFSSA handle non-existent snapshot * Update storage backends supported for Lenovo * Fix handling of 'cinder\_encryption\_key\_id' image metadata * Remove \_static folder in doc directory * Update "auth\_url" * Fix api-ref for backup export and import * Missing request parameters in update quotas * Trivial: Update pypi url to new url * V3 jsonschema validation: admin\_actions * [Api-ref] fix the type of service\_id in worker cleanup api * api-ref: Fix v3 API version details error * [Unity] Return logged-out initiators * Add cinder v2 experimental job * Fix cannot add a column with non-constant default * Update author in latex sphinx doc builds * Update the volume-encryption doc * Additional fix for hostname validation regex * doc: correct indentation from a note block * V3 jsonschema validation: scheduler\_hints * INFINIDAT: fixing extend mechanism * Storwize: update the function is\_volume\_hyperswap * Fix the argument for cinder-manage command * NEC driver: Faster clone status check * Fix cinder-volume setup in block-box * [Docs] Fixes documentation warning * Remove duplicate sample files * Fix option description in DRBD driver * Remove Ocata config option changes doc * VMAX driver - VMAX list manageable volumes and snapshots * VMAX driver - Enable multiattach support * Fix wrong usage of fields.BackupStatus.AVAILABLE * ScaleIO: Rebranding of product to VxFlex OS * Disable test\_volume\_backup\_restore for lio-barbican job * Support availability-zone type * Fix rbd driver json loading * Fix create backup API document * Add missing volume APIs * Imported Translations from Zanata * [Api-ref] fix incorrect host value in worker cleanup api * Fix NVMET minor log message issues * Fix google backup driver * V3 jsonschema validation: os-services * Update auth\_uri option to www\_authenticate\_uri * Huawei driver supports create\_group\_from\_src * Disallow multiattach for encrypted volumes * NetApp ONTAP iSCSI: Force exception on online extend * Fix hostname validation regex * Add snapshots to minimum driver interface * Add descriptions for auth\_strategy choices * RBD: remove redundant exception log to reduce noise * Log backup\_service.restore() exception * Add support for promoting a failed over backend * [Unity] Add support of removing empty host * V3 json schema validation: generic volume groups * Fix i18n issue in volume\_action and admin\_action * Support filter backend based on operation type * Remove stale pip-missing-reqs tox test * NEC driver: Delete max volumes per pool limit * [ci][rally] make the job inherit the right parent * Make config-table display list defaults in list format * api-ref: modify 'keys' description * QNAP: driver fails to detach while uploading volume to image * Remove static config option table for Infinidat * Remove zonemanager static config option tables * [VNX] Fix property set in configuration doc * Fix os-detach attachment\_id schema * Adding NVMET target for NVMeOF * DRBD: Remove cast to int in size calculation * Remove backup driver static config option tables * Add missing 'transfer' parameter in API document * Update HPE docs to use config-table directive * Skip running py36 tests on irrelevant files * Remove ZFSSA static config option tables * Remove Zadara static config option table * Remove Virtuozzo static config option table * Remove Tintri static config option table * Remove Synology static config option table * Remove SolidFire static config option table * Remove Quobyte static config option table * Replace Chinese punctuation with English punctuation * Replace cluster object's host with service\_topic\_queue * IBM XIV: enable FC zoning to all the ports * [doc] Add the possible status for attachment * Remove Nimble static config option table * Remove NEC static config option table * Remove static config option table for Lenovo * Remove Kaminario static config option table * Remove static config option table for DISCO driver * Add skip filter to config-table directive * Imported Translations from Zanata * Update auth\_url value in install docs * Fix doc warnings causing local failures * Updates overprovisioning dev-ref * Add reno to test-requirements * api-ref: modify 'has\_dependent\_backups' param's description * V3 jsonschema validation: Quota classes * V3 jsonschema validation: Quotas * Remove Fujitsu static config table * Update Dell EMC docs to use config-table directive * Update Datera docs to use config-table directive * Update Sheepdog docs to use config-table directive * Remove remaining block device driver config info * Update CoprHD docs to use config-table directive * GoodnessWeigher schedules non-type volumes * Add config table sphinx directive * Fix doc errors in nimble driver documentation * api-ref: Correct the cgsnapshot\_id parameter * Test running jobs under py36/bionic * Imported Translations from Zanata * Clean output of driver list generation * Fix QNap docstring format * Fix driver docstring RST formatting * Add VERSION and WIKI\_NAME to NEC driver * Remove deprecated lvm\_max\_over\_subscription\_ratio * Add Keystone v3 domain information to context * RBD: Handle ImageNotFound exception in \_get\_usage\_info correctly * QNAP: driver fails to create volume and snapshot in some case * RBD: add support for active/active replication * Dell EMC SC: Removed space from debug statement * Collect resource id from RequestSpec parameter * Add missing VolumeAttachStatus enum field * Clearup duplicate req\_version variable * Parent of RBD cloned volume marked as deleted should not be manageable * Fix backup/restore error for ceph rbd backend * Trivial: Update pypi url to new url * WinSMB volume driver doc: Add Volume backup support 13.0.0.0b1 ---------- * Dell EMC SC: Enabled multiattach in Storage Center Driver * Clarify sizing when creating vol from source * Improve API method logging * NetApp ONTAP: Set new sub-lun clone limit for ONTAP driver * Adding abstract class NVMeOF * V3 jsonschema validation: volume\_transfer * Imported Translations from Zanata * FC: refactor fczm utils decorators to functions * VMAX driver - Attaches/detaches after host assisted retype failing * V3 json schema validation: volume manage * V3 json schema validation: workers * RBD snapshot needs to be protected/unprotected after being managed/unmanaged * VMware: Extend volume after clone * Set sample\_default for 'host' option * Uncap eventlet * ZFSSA implement "Enhance iSCSI multipath support" * Rephrase for multiattach support * NetApp ONTAP: Fix export path used as volume name * Use InvalidConfigurationValue instead of InvalidInput * NEC docs: add new features and improve description * Update the HP MSA and Lenovo driver documentation * ZFSSA accept pool stats when cluster node stripped * Refresh volume when checking for conflicting attachments * nimble: update documentation for supportability * Doc: Add driver security notes to contributor doc * Move MV history under API Microversion contrib docs * V3 jsonschema validation: snapshot\_actions * Fix mocks for ZFS Storage Appliance unit tests * Storwize: Report backend state in service list * Add doc to pep8 check\_exec * VMAX docs - miscellaneous clarifications * [Optimise] Use ThreadGroup to manage periodic tasks * Update auth\_uri option to www\_authenticate\_uri * add lower-constraints job * Unignore .stestr.conf * VNX: fix performance in create/delete\_volume * Default pep8/fast8 to run under python 3 * Move testing info higher in contrib docs * Clean up doc configuration * Use sphinxcontrib.apidoc for module docs * Remove exec flag from doc files * V3 jsonschema validation: volume actions * fixs grammar problems and unify format in api-ref * Clarify volume migration CLI reference * Add delete snapshot preconditions * Handle migrating encryption key IDs in Backup table * Dell EMC SC: dell-storagecenter-driver docs updated * Doc: Add inspur driver doc to toctree * VMware: Adding NFS41 datastore as supported * Avoid second restart on offline upgrades * [Optimize] Validate configured scheduler filters when initialize * Remove extended attribute 'os-volume-replication' in Doc * Cleanup volume management CLI reference * Use check\_virtual\_size to do the size check * Unity: fail to detach lun when auto zone enabled * Implementation of Cinder driver over FC for Inspur InStorage * Dell EMC SC: volume model update return missing vol id * Adding image membership check to \_is\_image\_available * OVO: support query for disabled services * Imported Translations from Zanata * Fix doc format errors in module docstrings * Remove unused doc extensions * Allow extension of volumes with snapshots for VZstorage driver * 3PAR: Report backend state in service list * Storwize: self assign the SCSI lun id for volume attaching * ScaleIO: Prevent usage of unsafe volumes * Skip masking of command output over ssh * Move openstackdocstheme to extensions in api-ref * Unity: Enable ssl verification * Storwize: check flashcopy\_rate before mkfcmap * Reduce scope of the lock for image volume cache * Don't use custom password generating code * api-ref: Fix microversion for volume.backup\_id * Remove driver-specific notes from extend api-ref * Document more review processes * fix a typo: s/clent/client/ * Updated from global requirements * [sytle] use http code constant instead of int * V3 jsonschema validation: Clusters * VNX: fix options incorrect behavior * Storwize: modify hyperswap host\_site configuration * Support Qemu >= 2.10.0 in Quobyte driver * fix a typo in remotefs.py * Fix get\_max\_over\_subscription\_ratio docstring * Fix: Incorrect replication status on clusters * Fix: Including resources in None cluster * Follow the new PTI for document build [Updated] * Imported Translations from Zanata * NEC driver: Disallow access from the source node after live-migration * Fix parameter error in image\_utils * Remove inappropriate directory space check * Add max MV for Queens and fix formatting * INFINIDAT: set REST API client parameters * Mock local directory testing for image conversion * Imported Translations from Zanata * Fix vhd image conversion regression * Sync snapshot's encryption\_key\_id with volume's value * V3 jsonschema validation: types\_extra\_specs * DS8K: correct in-use replication vol status * Dell EMC SC: ISCSI initialize\_connection fixes * nimble: update documentation for supportability * Fix incorrect usage of assertEqual() method in RBD unit tests * Add missing 'target\_obj' when perform policy check * Fix policy in code docs for extended\_snapshot\_attributes * Updated from global requirements * Imported Translations from Zanata * Revert "Stop unnecessarily querying storage for stats" * Storwize: filter active wwpns for NPIV * Use soft authorize for 'extended\_snapshot\_attributes' policy * Add ISCSI driver for Veritas Access * Adding Glance method for listing image members and unit test * Updated from global requirements * QNAP Drivers - Move from httplib to requests * Support multiple processes on Cinder Backup * V3 jsonschema validation: volume\_image\_metadata * Updated from global requirements * Fix missing print format error * Fix api-ref response code title levels * Clean out config deprecation information * Fix API reference attach\_mode parameter default for attachments API * VMAX docs - prefix emc-vmax-driver.rst with 'dell-' * Enable slow tests for lvm-barbican job * VMAX driver - Replication failover performance improvement * V3 jsonschema validation: qos-specs * Updated from global requirements * fix spelling error with errormessage * [sytle] use http code constant instead of int * Remove ExceptionInParsingArguments * Avoid build system IP going into sample config * VMAX driver - Retype replicated volumes * use defusedxml to avoid XML attack * [VNX] Restore snapshot to volume * Update links in README * Updated from global requirements * Add documentation for capacity based quality of service * Make the LVM Barbican job vote * Fix typo errors * INFINIDAT: add support for multi-attach * Update doc name and path for dell emc vnx driver * Use rest\_status\_code for api-ref response codes * Add support for unmanaging snapshot of rbd driver * Stop unnecessarily querying storage for stats * Allow configuring tpool size * Add backup restoration cancellation support * Imported Translations from Zanata * NetApp: Add use-exact-size parameter when creating a LUN on ONTAP iSCSI * Storwize: update flashcopy\_rate range and add it to volume type * Add 'list\_volume' in Group show&list API Doc * Dell EMC SC: Added timeout options for SC driver * VMware: Option to specify datastore name regex * [api-ref] Fix 'volume\_id' for in the v3/volumes-v3-volumes-actions.inc * Updated from global requirements * Imported Translations from Zanata * Dell EMC SC: Error Deleting Consistency Group * Remove utils.read\_file\_as\_root() * 3PAR: Add \`force detach\` support * ScaleIO Driver: Document support for ScaleIO 2.5 * Move release note put in wrong location * Fix the testcase of test\_cmd * RBD: Add comment about multiattach support * Imported Translations from Zanata * RBD: Improve connection debug statement * Delete redundant code * Parse endpoint arguments in SolidFire * Api-ref:Add target volume preconditions for backup restore * Add functional-py35 gate job * Support cross AZ backups * Reserve 5 migrations for DB backports * Improve ChunkedBackupDriver hashlib calls * Fix allocated\_capacity\_gb race on create volume * Fix reporting old stats * Change order of installation to match previous * Log config options with oslo.config * RBD: Don't query Ceph on stats for exclusive pools * Imported Translations from Zanata * Enable hash randomization in unit tests * Tests: Fix backup validation with hash randomization * Add multiattach reporting to SolidFire * Tests: Fix 3par tests with hash randomization * Tests: Disable 2 netapp unit tests * Tests: Fix NetApp tests with hash randomization * Tests: Fix VMAX tests with hash randomization * Tests: Fix inspur tests with hash randomization * Fix find -delete race in tox * Use oslo\_db.sqlalchemy.test\_fixtures * Adds a Cache for Volumes Created from Snapshots with Quobyte * Add unit test coverage for backup chunkeddriver class * Add throughput limits that scale per-GB * Change 'hpe3par\_snap\_cpg' to 'hpe3par\_cpg\_snap' * Imported Translations from Zanata * Updated from global requirements * Imported Translations from Zanata * Cinder logs rabbitmq password on connection log * docs: Fix volume encryption link * Tests: Fix python version check * QNAP: Pass unit tests with hash randomization enabled * Provide a hint when performing a volume action can't find the method * Expand on mv 3.50 description * Only enforce multiattach policy for retype target * 3PAR: Monitor task of promoting a virtual copy * Correct attachment create success response code * api-ref: note that force detach is admin-only by default * Imported Translations from Zanata * Stop a connections leak when freeing an SSHPool * Remove deprecated fatal\_exception\_format\_errors option * VMAX driver - Concurrency issues involving replicated volumes * Remove legacy driver name mappings * Modify broken link * Admin guide documentation for volume multiattach * Remove deprecated service-to-driver mapping for backups * ScaleIO: Add documentation for containerized overcloud * Solving permission errors due to directory ownership on NFS * Imported Translations from Zanata * Improve logs on scheduler * Fix the note of API version in admin doc * Fix grammar error * Remove "cinder-manage logs" commands * Remove some unused parameters in parameters.yaml * Fix typos * Fix link error in 'Boot from volume' doc * Imported Translations from Zanata * fix misspelling of 'return' * Fix spelling mistakes * Fix typos in cinder * Enable multiattach capability for the zfssa driver * Update reno for stable/queens * Fix AZ not matching backend after migration 12.0.0.0rc1 ----------- * Use provider\_id for SolidFire Volume lookups * Deprecate Cinder Hosts API/Extension * Add Windows volume backup support * Fix json data format * Fix UnicodeDecodeError when decode API input * Api-ref: add 'all\_tenants' API query option * Make CinderException format errors fatal for tests * VMAX doc - Queens feature documentation * Revert consumed quota when failed to create group from source group * Imported Translations from Zanata * Zuul: Remove project name * Add python 3.5 in classifier * Correct lvm\_type default value in config doc * modify the http link to the https link * Add missing MV history for 3.50 * Move doc of report backend state to cli * Imported Translations from Zanata * Fix pylint error in DotHill drivers * Imported Translations from Zanata * Storwize: avoid ascii code error for ssh response stderr * Fix DuplicateOptError in fixed\_key migration code * Storwize: modify the self.\_helpers to backend\_helper * Update document for worker cleanup API * Add backup cancellation admin doc * 3PAR: fix cloning issue in create group from source * Fix link in boot from volume docs * docs: Use the encryption provider format constants * Remove unused parameters in v2/v3 parameters.yaml * Fix leftovers after backup abort * Updated from global requirements * Add policy check for create volume with multiattach * Dell EMC SC: Find volume folder API call fails on root folders * SMBFS: allow snapshot ro attach * SMBFS: fix creating volume from snapshot * Fix the typo in api\_conditional\_updates.rst * Fix wrong grammar * [VNX] \`initiator\_target\_map\` is None * Schedule request to scheduler when manage existing snapshot * Fix how backups handle encryption key IDs * Rename lio job to lio-barbican * Update Windows docs * 3PAR: Fixed sync period updation in rcg * Add 'metadata' in backup detail&show API document * Handle TZ change in iso8601 >=0.1.12 * Storwize: correct return value for update\_group * Fix cinder quota-usage error * Remove consistencygroups/api.py * Fix: request-id missing in volume action's response header * [Trivial] SMBFS: fix provisioning type usage 12.0.0.0b3 ---------- * Support fabric specific Cisco FC Zone Name * DS8K: Correct create volume behavior for CG * backup: set default swiftclient log level to WARN * Tests: Fix RBD invalid UUID warnings * api-ref: add multiattach considerations to retype docs * api-ref: update migration\_policy retype note about encrypted volumes * fix misspelling of 'password' * Netapp Ontap: Adds support for auto-max-over-subscription * Overprovisioning Improvements * HPE 3PAR: Update Storage Driver docs for Queens release * Update the description of quota's project id * Fix mis-named context object * Add debug logging around multiattach status changes * Fix multiattach policy names in release note * Add back support for the multiattach flag for volume create * Remove logging on Swift backup obj writer * Simplify keystone password assertions * Add policy check for complete attachment API action * [1/2] Fix mock for sleep mocks in volume drivers tests * fix wrong url link * Fix typo in test\_qnap * Add contributor notes on how to use pdb with tests * Run doc8 first * RBD: Support encrypted volumes * INFINIDAT: support force detach * Replace Chinese punctuation with English punctuation * nimble: update config doc to add missing extra-specs support * Add the nova-multiattach job to the check/gate queues * Remove extra spaces of the volume creation request body * V3 jsonschema validation: volume\_type\_access * Move disk space check to run when needed * Update Volume Encryption Documentation * Avoid use of deprecated commands in lenovo driver * Rename 'WindowsDriver' to 'WindowsISCSIDriver' * Fix readonly mode for new attachment API calls * Enable multiattach capability for lvm driver * 3PAR: fix volume migration for "in-use" volume * modify volume spelling errors * modify http to https * Fix api-ref for v3 volume group types * Fix combination of parameters for update APIs * Add enough notification for QoS * Unity: Add revert\_to\_snapshot support * Adapt cinder backup swift help test * Enable multiattach capability * Add multiattach policy * Updated from global requirements * Fix api-ref for v3 volume types * [VNX]Add \`force detach\` support * [Unity] Add \`force detach\` support * Storwize: fix an incorrect temporary parameter name * Update nfs driver doc for snapshot support * Storwize: use DLM locks * Fix BadRequest for 'ref' as string value * Remove leading and trailing spaces from parameters * Adds documentation for DataCore volume driver * Storwize: Use passed volume names in migrate * Fix glance create client unit tests * Updated from global requirements * Run backup-restore operations on native thread * Fix compression related tests * INFINIDAT: add missing connection\_info params * Update volume's 'updated\_at' when accept transfer * Tests: Fix Storpool tests hash randomization failure * Fix: Propagate OS global request ID * Fix Backup uses latest req-id for all log calls * Fix BadRequest for 'null' metadata and group\_specs * Fix api-ref for v3 Services * Delete duplicated save operation when do backup creating * Fix dictionary mistakes in docstring * Fix 'volume\_type' for in the api-ref * Remove the deprecated "giturl" option * Remove unused lib in test-rquirements.txt * VMAX driver - retype fix where workload is None * VMAX driver - QOS settings incorrectly applied on Default storage group * VMAX doc - Queens feature documentation * DS8K: block in-use volume to be added to consistency group * [Doc] Add replication group support in doc for Storwize * Fix version details API does not return 200 OK * Dell EMC SC: On None connector terminate\_connection unmaps all * Deprecate the allow-multiattach option to create * Improve multiattach error message * ScaleIO: Enable multiattach for volumes * Windows: allow multiple c-vol backends per service * Check create from image policy when image not none * Validate metadata before reserve quota * api-ref: fix the response parameter for multiattach * Docs: ibm\_storage\_driver: Fix volume\_driver path in documentation * Synology: Rename iSCSI target config option * Revert volume consumed capacity when rescheduling * 3PAR: fix create group from source functionality * Initialize osprofiler in WSGI application * VMAX driver - Multiple storage group creation attempts * Fix: validation error if the optional parameter's value is None * Storwize: correct in-use replication vol status * Unity: Attach Unity volume via IPv6 * Fix v3 api-ref for showing API details url error * VMware:Config option to disable lazy volume create * VMware: Support volume adapter type change * Tests: Fix XIV test failure with hash randomization * Block attempts to transfer encrypted volumes * Mock FixedIntervalLoopingCall for Huawei unit tests * [api-ref] Add missing fields to the API Reference * V3 jsonschema validation: Backups * Storwize: add hyperswap volume support * Handle InvalidVolume when creating a volume attachment * RBD: get manageable volumes * Fix reserve volume enforcing the wrong policy action * Fix api-ref for v3 volume types * Add descriptions of base policies * Do not explicitly list thin provisioning tools for RPM * Add driver-requirements entry for storpool * DS8K: correct LSS behavior for CG snapshots * V2/V3 json schema validation: snapshot manage * V2/V3 jsonschema validation: snapshots * VMAX driver - Fix AttributeError of dell emc driver * VMAX driver - Errors extending replicated volumes * VMAX driver - Fix error handling and checks for generic volume groups * VMAX driver - revert a volume to last snapshot * VMAX driver - Incorrect stats reporting * VMAX driver - support for manage/unmanage snapshot * VMAX driver - retypes failing when workload is missing * VMAX driver - Implement SRDF Metro feature * VMAX driver - Cannot extend volumes in a replication relationship * VMAX driver - Enable CHAP authentication for vmax backend * VMAX driver - Add async replication support * Validate volume status again when snapshot created * Use constants for cinder-volume * Reintroduce the StorPool driver * Report backend state in service list * Remove some no-use code about failover * Add instructions for the Error response codes * Fix incorrect description in api ref * V3 jsonschema validation: Volume-types * Update 'force' parameter for volume delete API * NEC driver: implement manage/unmanage functions * Adding all docs into index.rst of administration docs * Missing generalized\_filters in cinder administration doc * Updated from global requirements * Deprecate CG APIs * Bump up the API microversion in cinder.rc * Add microversion and volume status limit to revert api ref doc * HPE 3PAR - Implement Tiramisu feature on 3PAR * Enable fail back in SolidFire driver * StorageCenter: Fix volume mapping for API v3.1 * Update tgt iSCSI target on volume extend * Follow the new PTI for document build * NEC driver: add automatic configuration of SAN access control * Remove unused sf\_enable\_volume\_mapping conf option * Add admin ctxt to request qos\_specs in SolidFire * Remove In-repo cinder tempest plugin * V3 jsonschema validation: Group Snapshots * VNX: Fix cloning failure when system is busy * InStorage: fix problem when doing instance live migration * NFS: Fix nas\_secure auto mode permissions failure * Add online data migration routine for attachment\_specs * NetApp: Fix to support SVM scoped permissions * Dell EMC SC: API 4.0 incompatibilities * Trivialfix -- Fix spacing in docstring * VMAX doc - Queens feature documentation * Create group from snapshot-group failure leaves behind the volume * SMBFS: fix detecting if a volume is in-use * Raise PolicyNotAuthorized exception on consistency Group snapshot * Fix some typos in cinder doc * Updated from global requirements * Fix volume image metadata endpoints returning None * NetApp E-Series: Fix broken generic volume groups * Fix migration Invalid volume message * [api-ref] Fix http method for updating encryption type * Fix create encrypted volume from image * Storwize: disable creating volume with non cg\_snapshot group\_id * Rename iSCSI target config options to more general * 3PAR: Update CHAP on host record when volume is migrated to new compute host * Use method validate\_integer from oslo.utils * Add 'shared\_targets' only when it doesn't exist * Use defusedxml for XML parsing * Remove deprecated 'pybasedir' config option * Remove deprecated 'netapp\_eseries\_host\_type' config option * Remove deprecated HP 3PAR config options * Remove deprecated 'nas\_ip' config option * Fix indentation in docs * Fixes creation of mirrored volumes due to wrong type * Check for migrated UUID in SolidFire delete * Add thin provisioning package to install guide * Correct documented service upgrade order * VMAX doc - Queens feature documentation * Add service\_token for cinder-glance interaction * Add service\_token for cinder-nova interaction * Switch to oslo\_db retry decorator * Don't call driver.terminate\_connection if there is no connector * Store host connector in volume\_attachment.connector column * qemu-img info --force-share for NFS driver * NetApp ONTAP: Copy offload bugfix * Cleanup XtremIO IG cleanup note * SMBFS: remove deprecated config options * Storwize: fix group creation restrict rules * Restore\_backup: set error when volume status not matched * Keep v3 api-ref error response code 404 for reverting * Speed up DataCore unit tests * API ref: add host format to description * Fix discrepancy in api-ref for volume\_types APIs * Fix json formatting eror * V3 jsonschema validation: Group type specs * Don't raise 'NotAuthorized' inside when soft authorization * Change ssh\_utils parameter to correctly sends keepalive packets * Modify v3 api-ref error response codes for reverting * Removed gb quota decrement in grp snapshot delete * Add output of slowest tests to UT runs * Fix available space checking for image conversion * Deprecate cinder-manage logs commands * QNAP: Add support for QES 2.0.0 * Fix test case in test\_volume\_unicode.py * VMAX Doc - clarification and typo fixes * Updates Help Text on Quobyte Volume URL Option * V3 jsonschema validation: Volume metadata * Fix create from snapshot and source volume * VMware: Bump driver version * Check snapshot flag for test\_snapshot\_create\_volume\_description\_non\_ascii\_code * Updated from global requirements * Fix discrepancy in api-ref for create volume manage api 12.0.0.0b2 ---------- * Remove skip\_validation\_flag to False by default * XtremIO: optional clean IG with no mappings * Fix api-ref for v3 group type specs * TrivialFix: Delete word ‘I’ in admin/blockstorage-backup-disks.rst * Fix TypeError for workers cleanup api incase of invalid resource\_type value * DS8K: don't swap role after failover group * Updated from global requirements * DS8K: lss\_range\_for\_cg should take csv as well as range * Fix for volume from image snapshot free space issue * NEC driver: delete an unused configuration parameter * INFINIDAT: change CI\_WIKI\_NAME * V3 jsonschema validation: Group types * Add test for volume create with non-ascii characters * Add cg policies and clean up old policy handling * Migrate fixed\_key encryption keys to Barbican * Add shared\_targets and service\_uuid to volumes * Pass in the parameters multipath when migrating volume * Discrepancy in api-ref and code for create group type API * Imported Translations from Zanata * Remove extra test variable assignment * Tests: Cleanup utils.create\_volume fields * Imported Translations from Zanata * Discrepancy in api-ref and code for create group snapshot API * Tests: Fix Storwize hash randomization failure * added clarification in docs for usage of "volume\_clear\*" options * Disallow unmanaging encrypted volumes * Disallow managed volume to be managed again * Fix project\_domain\_name and user\_domain\_name in doc * Add support for enhanced features to the QNAP Cinder driver * Mock out CLI execution in NEC driver tests * handle no default\_cgsnapshot\_type * Add shared\_targets flag to Volumes * Allow purging of records less than 1 day old * Update the documentation links * Support create volume from backup * Fix 'KeyError' when 'with\_count' is not specified * Consolidate code that manages encryption keys * Use new oslo.db test cases * Move legacy-cinder-tox-\* jobs in-tree * Add cleanup to TestCase.flags() * HPE3PAR: Create FC vlun as host sees * Support create a volume from image snapshot * Filter valid FC initiator while FC attaching * Remove invalid parameters in the file testing.rst * SMBFS: manageable volumes * Fix retype migrating volume with rep\_status 'not-capable' * Adds DataCore Volume Drivers * Handle quota in 2 phases when managing resource * RBD: support driver-assisted volume migration * Update unity tests to use our test base * Update access control of show under hostAPI * Modify the wrong link in document * V3 jsonschema validation: Attachments * Add fixture to only emit DeprecationWarning once * Remove DB authorisation checking with quota API operations * Update oslo.context RequestContext args * Update bindep.txt for doc builds * nimble: handle unicode strings in volume create * Handle deprecation of inspect.getargspec * Add service\_uuid FK to volumes * Fix policy documentation for os-show\_image\_metadata endpoint * Fix group\_type\_id in api-ref * Imported Translations from Zanata * VMware: Backend driver for VStorageObject * NetApp ONTAP: Fix delayed snapshot deletion call * Suppress UT log messages * Try Fetching value from 'cluster' when raise exception * Fix v3 api-ref for restoring backup are wrongly described * Fix qos-spec-v2-qos-specs error * Add v3 api-ref for showing encryption specs item * Explicitly set expected log level for tests * Update api-ref to include volume\_image\_metadata * Remove ExceptionTestCase * V3 jsonschema validation: base schema framework * Check available capacity before creating resources * Remove setting of version/release from releasenotes * Test os-brick master in LIO gate job * Storwize: revert to snapshot support * Fix cinder-manage volume delete * api-ref: fix 203 error response codes to be 403 * Inspur Cinder iSCSI driver * [Trivial] set min max\_over\_subscription\_ratio to 1 * Add doc/source/\_static/cinder.policy.yaml.sample to .gitignore * Fix 'D005 No newline at end of file' Error * Fix controller install document indentation * Fix earlier backup records can't be restored * Fix several instances of chap secret being output to logs * NetApp cDot: Fix manage volumes * Fix resource count for os-host show * Correct gb,snapshot quota update in group snapshot * HPE 3PAR: Handle single path in FC configuration * Remove v1 API reference * NetApp ONTAP: Remove orphaned 7mode file * Prevent leaking encryption key when deleting volume * Schedule request to scheduler when create group from resource * Re-add QNAP Cinder volume driver * Add missing 'obj\_make\_compatible' in RequestSpec object * Tests: Rework volume deletion w/ invalid status test * set vg\_thin\_pool\_size to float type * Added mount fstype based validation of Quobyte mounts * HPE3PAR: Modify host & clear zone after detach * Use oslo\_db.sqlalchemy.enginefacade * Add .zuul.yaml with LIO job * Updated from global requirements * Unity: Remove redundant debug logging * Change default test log level * Handle assertRaisesRegexp Python 3 deprecation * Switch kaminario to use standard trace decorator * NetApp E-series: Fix provisioned\_capacity\_gb * Fix VolumeAttachment OVO Volume lazy loading * Use oslo\_log instead of logging * Remove deprecated keymgr code * VMAX doc - add manage and unmanage section * HPE 3PAR: Adding driver capability * Improve cinder revert-to-snapshot notifications * create\_volume: cleanup consistencygroup when driver exception * Improve deleting-missing-backup exception handling * SMBFS: add fixed image support * ScaleIO - Cleanup and enhance configuration doc * Remove unused print\_function imports * Schedule the request to scheduler when creating from snapshot/volume * Optimizes volume creation in the Quobyte Driver * ScaleIO: Update CI Wiki name * ScaleIO: adapt to moved connector constants * imageutils: allow passing subformat when converting * Unity: Fix duplicate hosts created with same name * Update setup instructions * Make test logging setup fixture disable future setup * XtremIO: extending volume in create\_volume\_from\_snapshot * Use total reserved quota value instead of partial * Tintri - Updated driver docs with new nfs option * Fix imageutils tests on windows * Fix api-ref for group snapshots API * HPE 3PAR: ISCSI/FC drivers – code refactoring * Storwize: Fix typo in log message * SMBFS: Enable reverting snapshots * DS8K: update replication\_status in generic group APIs * Disco driver: Fix the location to get the disco connector constant * Storwize: add backup snapshots support * Fix migate\_volume API with replication\_status ‘not-capable’ * Make service object UUID not nullable * Don't fail when deleting missing backup * ibm-storage: Fix create\_volume\_from\_snapshot * Fix incorrect use of assertFalse causing CI failed * [TrivialFix] Remove errant comma in capabilities policies * Tests: Fix coverage unit test failure * Dell EMC: Update PS and SC CI wiki names * Dell EMC PS: Fix Duplicate ACL records Issue * Completely remove mox from Cinder unittest * Support count info in List&Detail APIs * Remove doc/build before running docs job * VMware: Add profile ID to connection info * VMware: Revert to snapshot * VMware: Improve scalability of querying volumes * Fix manage\_existing API behaving wrongly with storage pools * NetApp ONTAP: Remove support for 7mode systems * Fix key\_manager API call * Revert "Tests: Fail if oslo.versionedobjects issues Invalid UUID warnings" * Imported Translations from Zanata * INFINIDAT: add support for overprovisioning * Clean up driver configuration reference * Fix migration 112 to use live\_data\_migration API * Enable using extra-specs for SF attributes * Add policy documentation and sample file * ScaleIO Driver - include snap usage in provisioned capacity * VMAX driver - Deprecate backend xml configuration * VMAX driver - Remove workload for next gen arrays * VMAX driver - Implement Tiramisu feature on VMAX 12.0.0.0b1 ---------- * Revert "Move vol\_db\_empty to NFS driver" * Change Install 'Tutorials' to 'Guides' * Add index for reservations on (deleted, uuid) * Redundant alias in import statement * RemoteFS: revert snapshot support * DS8K: support clone volume asynchronously * Move 'zoning\_mode' back to DEFAULT section * Add display\_name to solidfire volume attributes * FlashSystem: Add CMMVC6045E CLI error for multi-host mapping * Add v3 api-ref for updating specific extra specs for a volume type * Fix "import xx as xx" grammer * NEC driver: Never use target info in migration\_status * VMAX driver - detach volume shouldn't remove from volume groups * VMAX driver - Volume identifier issues * VMAX driver - concurrently deleting volumes can fail * Replace http with https for doc links in cinder * [policy in code] Add support for volume, volume type resources * DS8000 ConsistencyGroup failed to update group with volumes * ScaleIO Driver: Backup volume via snapshot * Run backup compression on native thread * Fix backup compression unit tests * Make stringify header work under python2 * Api-ref: Add missing api response for volume transfer * Switch from ostestr to stestr * Add v3 api-ref for showing all/specific extra specs for a volume type * Remove deprecated nova config options * Switch Rally Task To format V2 * Update configuration names in ITRI DISCO volume driver * Kaminario K2: Add non discovery iSCSI multipath * Fix backup-import error when the deleted record in the same db * Updated from global requirements * FlashSystems: permit snapshot/clone volumes larger than source * Deprecate SolidFire Image Caching feature * [policy in code] Add support for service, limits * [policy in code] Add support for qos and quota resources * Vzstorage: improvement of create\_cloned\_volume * [policy in code] Add support for group, g-snapshot resources * Add ability to specify backup driver via class name * Add documentation for API 'validate\_setup\_for\_nested\_quota\_use' * [policy in code] Add support for backup resource * VMAX driver - documentation updates around versions * Add Storwize replication group support * VNX: Fix issue when creating without type * Remove deprecated VD base classes which are not used now * Kaminario K2: Support duplicated FQDN in network * NEC driver: Fix exception in terminate\_connection with no connector * [policy in code] Add support for snapshot resource * Add v3 api-ref for deleting extra spec for a volume type * Switch base to the latest in doc link address * Replace the usage of some aliases in tempest * Update tenant to project in cli quota doc * ScaleIO Driver - adding cache and refactoring tests * Remove API check is\_valid\_body * Add .stestr.conf configuration * Do not load extendable routes for the Versions router * Remove newton-compat upgrade code on service startup * Remove Hitachi volume drivers * Remove Tegile volume driver * Remove ZTE volume driver * Remove X-IO volume driver * Remove Violin volume drivers * Add retries to LVM logical volume activation * Remove Reduxio volume driver * Remove QNAP volume driver * Cleanup some todo's for refresh() * Update BlockBox * Brocade: Add HTTP connection cleanup * Remove Infortrend drivers * VMAX driver - remove WLP stats * Remove deprecated heartbeat options * Dell EMC PS: Fix extend volume creating unmanaged snapsots * Remove deprecated osapi\_volume\_base\_URL * Clean up docs landing page * Dell EMC PS: Optimize parsing of capacity info from backend * Dell EMC PS: Fix over-subscription ratio stats * Doc: Move generalized\_filters to admin * Doc: Configuration: Remove some outdated Ceph info * Add ploop to parallels naming conversion * Generate create\_at date in usage in isoformat for backups/snapshots * Add uuid to services entries * Fix tempest test revet\_client typo * Fix Tempest Volume Revert test * Fix Reset a snapshot's status API Request Example * Replace DbMigrationError with DBMigrationError * Fix description for volume\_type object in API Ref * Mark Cisco FC ZM driver as unsupported * Fix v3 api-ref for updating extra specs of volume type * Add backups v3 views * Storwize: add NPIV support * Address importing directory issue * Stop overriding LVM overprovisioning ratio and deprecate * Clean up api-ref index page * Update provider parameter in sample json files * NetApp ONTAP: Fix reporting of provisioned\_capacity\_gb * Fix api-ref for reset group snapshot * Remove legacy driver name mappings * Api-ref: change 'tenant' to 'project' in v2 doc * [policy in code] Add support for message, worker, cluster resources * 3PAR: Cinder volume revert to snapshot support * Compact Newton database migrations * Add indexes to SQLAlchemy models * Updated from global requirements * [policy in code] Add support for attachment resource * Api-ref: fix v2/v3 hosts extension api doc * nimble: fix for parent volume delete * Check for outstanding attachments during reserve * Remove SCREEN\_LOGDIR from devstack install setting * nimble: update config doc for unicode unsupported * Use constants for microversion values * Add default configuration files to data\_files * HPE 3PAR: fix delete operation of replicated volume * Revert status to previous state on extend failure * Clear cached autogenerated docs before docs build * Switch to use key\_manager.backend * LVM: Activate thin snapshot before clone activation * [DOC] Update index page for Install tutorial * docs: Fix typo error in blockstorage-driver-filter-weighing.rst * Remove leftover API v1 cruft * Fix a migration issue of Huawei driver * DRBD: Remove unneeded list length checks * Add another hack from NOVA\_GROUP to generate\_cinder\_opts.py * Remove FalconStor volume drivers * RBD: get provisioned capacity using same connection * Add missing snapshot action in index.rst * [DOC] Remove duplicate build of module index * docs: link to the api-ref for 3.27/3.44 for the version history * api-ref: document microversions for the attachments API * Updated from global requirements * Update generate\_cinder\_opts path * Link to API version history from main index * Create custom assertTrue and assertFalse * Signal the max v3 microversion for mitaka * Adding project id check * Remove Glance v1 API support * Add tempest configuration for the LIO CI job * IBM XIV: Fix ordered use of a dict * Use newer location for iso8601 UTC * Remove Blockbridge volume driver * Remove Coho volume driver * Use conditional update for group update and delete * Implement keymgr list() method * Synology: Driver unable to be initialized * ibm\_storage - fix enable replication after disable * Fix wrong links in Cinder * Support az filter for snapshot * NetApp: Remove redundant QoS cleanup task * Fix test\_rpc\_consumer\_isolation for oslo.messaging 5.31.0 * Tests: Don't write files to /tmp/nec/ * vzstorage: return from \_create\_cloned\_volume * NetApp: Adds logging of ONTAP API version * Fix attachments on volume show when volume is attaching * Remove API v1 * Deprecate service-to-driver mapping for backups * VMware: Use vSphere template as snapshot format * Fix an compatibility issue after Huawei driver upgrade * VMware: Add 'managedBy' info * VMware: Remove unused variables * Remove glusterfs documentation * Fix use of outdated ternary operator * Remove unsupported BlockDeviceDriver * [Api-ref]: Add missing key/values in the json files * Update HPE 3PAR Storage Driver docs for Pike release * api-ref: Normalize response codes * api-ref: Remove unused parameter defs * Fix 'fix\_allocated\_quotas' doesn't work when validate setup * Updated from global requirements * Remove vestigate HUDSON\_PUBLISH\_DOCS reference * Dell EMC PS: Report total volumes on the backend * api-ref: Make v3 enclosing objects consistent * HPE Lefthand: fix volume manage operation * api-ref: Clarify os-host GET behavior * Fix python 3.6 escape char warnings in strings * Fix InvalidUUID warnings in unit tests * Fix installation instructions format * cleanup cg field when create image cache volume * Don't collect snapshots again when sync provider info * Fix python 3.6 escape char warnings in strings * Add contributor doc on assertEqual vs assertFalse * [Api-ref] Add api doc for attachment complete * Unhide errors from check\_uptodate.sh * Make sure that versions are applied as relative URLs * Don't lock whole project's resource when reserve/commit * VMAX driver - Pike documentation amendments * Correct contributor doc information * [Doc] Handle Sphinx warnings as errors * Making reservations before group creation * [DOC BLD FIX] Fix remote-code-block warnings * [DOC BLD FIX] Make opts sections lowercase * Reserve 5 migrations for DB backports * Hpe: reduce duplicate validation * Storwize: reduce duplicate validation * Use backup\_use\_temp\_snapshot setting for default * api-ref: Add group replication APIs * Update the documentation link * Fix allocated capacity report on non pool drivers * Modify some spelling mistakes in cinder * Correct module path in doc * TrivialFix for the docstring and json format * Remove duplicate variables in v3 * api-ref: Add backup import and export * Stop using internal \`oslo\_log.log.\_loggers\` dict * Removed unnecessary tearDown() methods * Updated from global requirements * HPE Lefthand: fix volume retype operation * Update HPE Lefthand/StoreVirtual Storage Driver doc for Pike release * Add missing PrettyTable requirement * Make cinder-manage service list to print full host * [DOC BLD FIX] Fix duplicate content warnings * Fix IntOpt type of VMAX config options * RBD: Fix stats reporting * NEC driver reads default\_backends * Cisco zonemanager: fix create multi-fabric zones * Update the documentation link * Api-ref: add 'os-services' v3 api doc * VStorage: make logging path configurable * Allow v3.0 volume metadata API calls * [Doc Bld Fix] Add links to man page contents * Ibm\_storage - fix create volume into group * Revert "Mark Virtuozzo driver as unsupported" * Updated from global requirements * Add input format check for qos\_spec update * IBM Storage: add QoS documentation * 3PAR driver-requirements name * Move config-generator to tools * Add releasenotes to check\_exec list * Ibm\_storage - fix delete group with replication * Ibm\_storage - fix failover\_replication * IBM storage: check pyxcli version on initialize * [Docs] Storwize:Update document for Pike release * [Api-ref] Add worker cleanup for service docs api * VMware: Add support for cloning attached volumes * Write maximum version of Pike * Do not fail if RBD image is broken * Save object after updating for attachment\_complete * RBD - volume create failed when volume need flatten * Change pure drivers default replication interval * ibm-storage: enable FC zonning to all ports * Make attachment\_update set status to attaching * 3PAR: Fixed image cache enabled capability * Get correct hypermetro remote lun ID while mapping * cinder incremental backup with ceph fails if last one deleted * SMBFS: enable thin provisioning support flag * Fix wrong links * Update reno for stable/pike 11.0.0.0rc1 ----------- * [Docs] Pull over Cinder service overview for install * [Docs] Add documentation contribution docs * [DOC FIX] Create missing index pages * Revert "Mark ETERNUS drivers as unsupported" * Fix bug of detaching failed while concurrently attach/detach * NetApp: block drivers fail when volume missing * Eliminate randomness in encode\_name&encode\_host\_name methods * Fix missing print format error * NetApp: Fix override of scheduler decision * INFINIDAT: add documentation for volume compression * INFINIDAT: add driver changelog * [api-ref] Add api-ref for os-quota-class-sets APIs * Pike release note cleanup * Replace dict.iteritems/iterkeys with dict.items/keys * [Api-ref] Add metatdata parameters to backup create/update * Load resource filter config file in testcase * Fix wrong param transfer in scheduler flows * [DOC BLD FIX] Fix user messages documentation * [DOC BLD FIX] Fix missing references in documentation * Make Cinder's Landing Page look like Keystone's * Fix message format for VolumeAttachmentNotFound exception * Clean up repl v1 volume creation from replication * Modify a little desc to satisfy the code * [Api-ref] Add force parameter to volume delete * Updated from global requirements * [DOC BLD FIX] Name files for inclusion properly * Revert "NetApp: Deprecate E-Series drivers" * VMAX driver - Error attaching failed over volumes * VMAX driver - None connector object in a terminate\_connection in Pike * VMAX driver - performance improvements in retype * VMAX driver - volume delete can fail * Remove duplicate variables * Fujitsu Driver: Fix exception in terminate\_connection with no connector * Infortrend mask password logging * VMAX driver - seamless upgrade from SMI-S to REST * VMAX driver - performance improvements * Fix host and cluster\_name filter for snapshot list * Refresh az cache if target az is not found in cache * Remove redundant mock rpc statement * Fix grammatical mistake, Changed character from "a" to "an" * VMware: Bump driver version * Make VolumeTypeExtraSpecsController policy more granular * Updated from global requirements * [DOC] VMware: Add doc for vmware:adapter\_type * [DOC] VMware: Update config table * Update resource\_filter to keep backward compatibility * Switch to using bool for filtering non-deleted volume attributes * Correct 'os-show\_image\_metadata' API document * Sort cinder-manage commands * VMAX driver - Pike documentation * Catch Castellan errors in create volume * Removed unnecessary setUp() call in unit test * Add API document for quota usage * Cloning a volume from a snapshot in thin lv * [DOC BLD FIX] Fix the numerous docstring probs in coprhd * NetApp: Fix SVM scoped user startup exception * Missing snapshot statuses * [DOC BLD FIX] Correct docstring errors in dell\_emc VMAX * Fix image download to volume for iso images * [DOC BLD FIX] Add missing files under configuration * VMAX driver - align VMAX QOS settings with front end * Fix layout of api-ref for group type specs * Fix failure for failing back the primary * Updated from global requirements * Revert "Handle concurrent volume creation request" * NetApp cDOT: Support generic groups for block 11.0.0.0b3 ---------- * Fix typo in message for volume creation failure * Update and replace http with https for doc links in Cinder * Add the backup metadata support releasenote * Update backend state when extending volume * Handle concurrent volume creation request * [DOC BLD FIX] Correct json code block in attachments * [DOC BLD FIX] Fix docstring error in fakes.rst * cinder backup sets incorrect parent\_id * Update URL home-page in documents according to document migration * Mock execute in unit test * Revert MySQL Cluster Support until an approach is worked out * [DOC BLD FIX] Add code:: blocks to manager.py * [DOC BLD FIX] Fix docstring problems in vzstorage driver * [DOC BLD FIX] Fix dosctring errors in veritas driver * [DOC BLD FIX] Correct docstring issues in Soldfire driver * Fix no feature to enable 'exclusive-lock' of image in ceph * [DOC BLD FIX] Fix docstring issues in hpmsa driver * Mark Tegile driver as unsupported * [DOC BLD FIX] Fix docstring errors in reduxio * Updated from global requirements * Add choices to backup\_swift\_auth * Support metadata for backup resource * VMware: Cache storage profile ID * Use OVO for test\_backup * Fix release notes link * VMware: Add volume adapter type extra-spec option * [DOC BLD FIX] Fix docstring errors in nexenta driver * Remove references to dothill driver options * [DOC BLD FIX] Fix netapp docstring issues * [DOC BLD FIX] Fix docstring issues in Lenovo driver * [DOC BLD FIX] Fix docstring errors in VNX driver * Add cinder backup service initialize check * Volume type with encryption comparison * [DOC BLD FIX] Fix docstring issues in ibm\_storage * NetApp cDOT: Support generic groups for file * [DOC BLD FIX] Fix docstring issues for Hitachi HNAS driver * Updated from global requirements * [DOC BLD FIX] Correct docstring errors in falconstor driver * [DOC BLD FIX] Fix docstring issues in dothill driver * [DOC BLD FIX] Correct dosctring errors in disco driver * Fix test\_scheduler\_stats error module calls * Add a check warning for quota update * Add VNX replication group support * [DOC BLD FIX] Fix docstring errors for VMAX driver * ibm\_storage - fix disable/delete replication * Updated from global requirements * Update group status * VMware: Optimize volume creation from image * [DOC BLD FIX] Fix formatting error in driver.py * LVM: Activate LV before creating exports * Disable merge snapshot to volume for thin LVM * Replace six.itervalues with dict.values() in cinder * Convert lenovo driver to use distributed lock manager * Do not delete group if group snapshot exists * ProphetStor: Support generic group * Updated from global requirements * VMAX driver - bootable volume errors * [DOC BLD FIX] Fix formatting errors in Dell/EMC PS * VMAX driver - Implement Generic Volume Group feature * [DOC BLD FIX] Fix docstring errors in Dell/EMC Unity * [DOC BLD FIX] correct formatting errors in datera driver * 3PAR: Get host from os-brick * Change Huawei driver's private data storing place * 3PAR: Modify update\_migrated\_volume code to driver * ZFSSA iSCSI delete volume with non-existent LUN * Make doc/source directory compliant with design in spec * Remove key\_manager.api\_class hack * Don't verify format with autodetect after image conversion * Add support for OS-Brick force disconnect * [api-ref] Update volume metadata for a specific key (v2) * [api-ref] Backups: change parameter 'backup\_id' to be mandatory * Fix 'connector' parameter in update attachments API * Enables MySQL Cluster Support for Cinder * Added CG capability to volume group in CoprHD * coordination: use tooz builtin heartbeat feature * HPE Lefthand: Fixed cloning operation * [DOC BLD FIX] Add missing glossaries * Updated from global requirements * VMware: Use temporary image * ScaleIO Driver: Updating config reference * Mark Falconstor drivers as unsupported * Mark Coho driver as unsupported * ZFSSA iSCSI volume driver multi-connect * Move config-reference to configuration * XIV\A9000: Added replication group support * [DOC BLD FIX] Fix code block issues * Kamiario: Fix over subscription reporting * VNX: ignore cg creation if group is not cg enabled * Updated from global requirements * Add missing api-ref documents for group type specs * [DOC BLD FIX] Remove todo:: directive from volume\_driver * [DOC BLD FIX] Fix docstring formatting in message\_field * ZFSSA: apply LUN specs when cloning snapshots * [DOC BLD FIX] Fix formatting in cinder.compute.nova * Migrate configuration-reference to Cinder repo * enabling nimble driver again * coordination: remove custom Lock object * SMBFS: deprecate provisioning ratio config options * [api-ref]Fix API Doc error in creating backup * ScaleIO Driver: Fix transposing of PD and SP in list of pools * Updated from global requirements * [DOC BLD FIX] Fix indentation in attachments.py * [DOC BLD FIX] Correct :returns: in db/api.py * Pop "consistencygroup" from volume object * Fix typo * NEC driver: Optimal path for non-disruptive backup * Ignore all .egg-info directories in doc8 check * INFINIDAT: support for volume compression * Update Documentation link * 3PAR: Add volume to consistency/generic group * DS8K: add replication consistency group support * DS8K driver: specify pool and lss in extra-specs * Update contents of attach\_detach\_conventions\_v2.rst * VMAX - Live Migration, replacing SMI-S with REST * VMAX driver - Replication, replacing SMI-S with REST * Dell EMC SC: Terminate\_connection chokes on None * Revert "Correct RBD Provision stats&fix a perf problem" * zfssaiscsi driver initiator group option processing * Make VolumeTypeExtraSpecsController policy more granular * Add ability to extend 'in-use' volume * ScaleIo Driver: refactor config options * Remove dothill driver options * Add insufficient space async error in create vol * Globalize regex objects * Add group to cluster when init host * Dell SC: Fix python 3.6 'async' keyword warning * Fix python 3.6 escape char warnings in strings * Add support for shared "backend\_defaults" config * Tintri: Fix config option access * INFINIDAT: Add QoS support * Fix SSHPool current\_size not work correctly * Tiramisu: Add groups param to failover\_host * Api-ref: Reorganize volumes versions * Keep consistent of naming convention * Migrate Cinder Scheduling CLI documentation * Migrate Cinder CLI Quota docs * Migrate volume management CLI documentation * Migrate the blockstorage admin-guide to Cinder * Restore ability to disable API versions * Fix errors preventing doc builds * VMAX driver - Compression, replacing SMI-S with REST * VMAX driver - QoS, replacing SMI-S with REST * Remove the unnecessary pv\_list assign during LVM object init * Fix some reST field lists in docstrings * VNX: wrong size of volume from image cache * Update attachment\_delete for multiple attachments * Fix v1 API api-ref link * Fix Log parameter * Add snapshot clean worker decorator * Remove periodic\_tasks method from base manager * Add params for API v3 snapshot-create * Modify the length of project\_id for Messages * Fix volume migration error with NEC driver * Fix the inappropriate parameter name * Switch to openstackdocstheme * VMware: Config option for default adapter type * Enable H904 check * LVM: Don't initialize VG with lvm\_type='auto' * Fix attachment\_delete for reserved case * Fix wrong usage of rbd.list\_snaps return value * XtremIO: fix fetching FC targets from X2 array * Revert "Don't use ignoreskipactivation for thin LVM" * Api-ref: Update doc for show backups * api-ref: Add parameter "offset" to list apis * Use GroupSnapshotStatus enum field * Add 'reserved' status to api-ref * Rename method 'delete' to 'delete\_backup' in backup drivers * Optimize: Replace 'fields.Enum' with 'object' * Unify host naming in readme file * Move driver options to "backend\_defaults" section * Use SnapshotStatus enum field * Fix scheduler\_host\_manager limitation * Improve Capacity filter logging * IBM Storage: fix create volume with --group-id * Add 'LUNType' configuration verification for Dorado array * Unity: temp snapshot for backup deleted twice * Use GroupStatus enum field * XtremIO: Detach all IGs for null connector * Remove dothill driver * Datera 2.4.0 driver update * Fix image cache db query when limits are enabled * Don't use ignoreskipactivation for thin LVM * Fix exception in dothill iSCSI driver's terminate\_connection * Updated from global requirements * Allow extension of volumes with snapshots for SMBfs driver * Windows iSCSI: use configured iSCSI portals * Add gmcv support in SVC driver * VMware: Fix volume cascade delete * Fix wording in warning message * Remove extra space in release note * Fix rally "volume\_type" parameter * Add custom keyring when initializing RBD connection * Avoid register extensions on the versions router * Use 'min\_version' to mark newly added parameters in DOC * Update log translation hacking rule * VNX: Add QoS support * remove extra characters "/" * Fix some doc issue * Add driver interface documentation for list manageable * [Api-ref] Update quota-set APIs ref * Api-ref: Add upload-to-image api-ref * Fix driver exception when cascade deleting volume after transferring * Rename DB Driver param for backup drivers to 'db' * IBM Storage: Fix for misidentification of QoS type * Unity: Use thin clone when cloning volume * Show user\_id when cinder snapshot-show * Tempest for revert-to-snapshot * Fix the default port value * SMBFS: enhance volume cloning * import installation guide pages from openstack-manuals * ScaleIO Driver: Fix for using scheduler specified pool * IBM Storage - Fix vol create from snapshot in CG * Replace OS\_AUTH\_TYPE with OS\_AUTH\_SYSTEM in rc * Windows: case insensitive path comparisons * Api-ref: add documentation for v2/v3 backup 'os-reset\_status' action * Add note about stop supporting of log translation * VNX: failed to create from image cache * Add policy granularity to the qos-specs API * Fix i18n issues * Allow extension of volumes with snapshots for Quobyte driver * Add params for v3 API volume-type-create * Use exception.CinderException instead of Exception * Performance: Remove useless snapshot object get * Remove redundant notification * Provide user friendly messages for db sync * [XIV] Fix exception message format issue * Enable some off-by-default checks * ScaleIO: Logging cleanup * Fix assertTrue in functional tests * Cinder volume revert to snapshot * Fix connection\_info field in VolumeAttachment object * Add v2 v3 response example in API doc * Fix bugs while integrated with Huawei Dorado array * Remove the redundant volume/snapshot check * Updated from global requirements * VNX: cg and generic group cleanup * Add IBM-Storage replication group support * VMAX driver - Retype, replacing SMI-S with REST * Storwize Support iSCSI host when FC volume is there * Add enable\_lazy in api/openstack/wsgi.py * [api-ref] Add snapshot metadata key api * [api-ref] Update volume metadata key api * Move vol\_db\_empty to NFS driver * Add project\_id admin filter to limits API * Add thin-provisioning-tools to bindep * Set default lvm\_type to auto * Fix migration completion for New Attach API's * Explicit user messages * api-ref: clarify retype docs around default policy permissions * Updated from global requirements * Tests: Disallow use of assertTrue(str) * [api-ref]Fix the incorrect description in volume API * Add the missing brace in api/v3/attachment * Remove duplicated line from \_do\_attachment\_delete * [api-ref]Fix the wrong description in volume API * ScaleIO - Make compatible with next SIO version * Add backup to cinder architecture page * Fix response code for update and delete API in volume attachments * IBM-Storage: group-create-from-src fail with error * Add rel note for create volume from enc. image * ignore error when remove non-exist vol from cg * Mock time.sleep for VMAXRestTest.test\_remove\_vol\_from\_sg\_failed * Remove deprecated oslo\_messaging.get\_transport * ScaleIO Driver: get manageable volumes * Mark ZTE driver as unsupported * Add CG capability to generic groups in GPFS driver * FusionStorage: use versioned objects * Clarify some details related blockbox deployment * IBM Storage- revert vol size capacity optimization * Send global\_request\_id to nova/glance when calls are made * Remove usage of CONF from DISCO driver * Add Metadata format check * Fix boolean opts defined as strings 11.0.0.0b2 ---------- * Allow Pure drivers to handle detach with no host * RemoteFS: enable image volume cache * Ibm storage: added new option for qos type * Fix api-ref for v3 volume and snapshot manage * Fix NoneType has no attribute get error * Api-ref: fix parameter 'volume\_id' info * Refactor update attachment's testcases * Api-ref: Add doc for update backup * Replace deprecated BaseException.message with six.text\_type * Use requests lib for Huawei array connection * Add libssl to bindep * Revert "Using assertFalse(A) instead of assertEqual(False, A)" * VMAX driver - Base functionality, replacing SMI-S with REST * Clarify create\_volume size debug message * INFINIDAT: add metadata to InfiniBox objects * Using assertFalse(A) instead of assertEqual(False, A) * Correct cinder dev doc typo * Mark Virtuozzo driver as unsupported * Dell EMC SC: Updated Created by Message * Add warning header to opts.py file * API/DB: Fix hash randomization test issues * cleanup: remove DEVSTACK\_CINDER\_SECURE\_DELETE * Mapping parallel and ploop * Replace assertTrue(isinstance()) with assertIsInstance() * Using wrong connector parameter in V3 attach * Support sort backup with name * [api-ref]Adds API documentation for 'os-set\_bootable' action * Updated from global requirements * Updated from global requirements * Fix support for AMI image conversion * Remove service filter for service list * Remove some unnecessary spaces * Minor fixes to microversion devref * VNX: fix issue when backing-up in-use volume * Introduce managing existing snapshot to rbd driver * Fix example code about api\_version in api\_m\_dev.rst * Add description about tox in dev\_env.rst * Return metadata in volume summary * Ceph: Fix delete backup with non-existent pool * Allow logging in tempest tests * Storwize: add mirrored volume support * Trivial fix typos * Refactor NEC volume driver * Remove unused \_detach\_snapshot method * Do proper cleanup if connect volume fails * Add an instance-uuid check on attachment\_reserve * GPFS: Fix forceful delete for consistency group * Periodic task to clean expired reservation * Update OVO devref * Add is\_replicated() to Volume and VolumeType OVOs * Make failover DB changes consistent * Revert "Use HostAddressOpt for opts that accept IP and hostnames" * Fix api-ref for POST backup for optional name * Fix typos in README.md * change consistencygroup\_id to group\_id * Fix typo * Add the missing "vol\_type\_id" param for qos association api-ref * Fix like filter related issues * Updated from global requirements * Extracted HTTP response codes to constants * Make "connector" param to be optional in force-detach api * LVM: update max\_over\_subscription\_ratio help message * Add Generic Volume Group Into Quota Management * Update SolidFire CI Wiki * Add a local bindep.txt override * NetApp DOT: Fix hash randomization test failures * Updated from global requirements * Update replication v2.1 devref * INFINIDAT: add support for generic volume groups * INFINIDAT: add support for iSCSI * Rolling Upgrades: Fix Volume OVO * Rolling Upgrades: Fix Group OVO * OVO: Remove child\_versions from list classes * SMBFS: remove redundant check * SMBFS: switch to distributed locks * SMBFS: drop JSON file storing allocation data * SMBFS: report each share as a pool * Add RemoteFSPoolMixin * RemoteFS: pass volume object to '\_find\_share' * Add volume type filter to API Get-Pools * Updated from global requirements * SolidFire Generic Groups Support * OVO: Fix VolumeType create not bound to a Session * VMAX driver - Detaching volumes if part of two or more MVs * XtremIO: reduce number of backend calls * Extend Falconstor driver to utilize multiple FSS storage pools * Remove empty line in api\_version\_request * Updated from global requirements * Use the deprecated os\_privileged\_xxx settings * Add blockbox to Cinder project * Create volume from VHD format image always failed * api-ref cleanup: required flag for tenants * Updated from global requirements * Python3: Don't use Exception.message * GPFS: Handle unsupported operations with an exception * api-ref: add list manageable resource API * Check the volume metadata value is a string or not * Mark X-IO driver as unsupported * Mark Synology driver as unsupported * Mark Reduxio driver as unsupported * Mark Nimble driver as unsupported * Mark Infortrend drivers as unsupported * Mark ETERNUS drivers as unsupported * Remove BRCD primary\_switch\_wwn config option * Mock qemu calls in sheepdog tests * Add time.sleep mock for test\_lv\_deactivate\_timeout * Fix typo errors * ZFSSA: Allow clones to differ in size from parent * Convert marker format for listing manageable resources * vzstorage: use remotefs.VZStorageRemoteFSClient * OVO: Add reminder to add backporting code * vstorage: added vz:volume\_format vendor property * Fix bugs of GPFSNFS & GPFSRemote Driver initialization failure * Support 'LIKE' operator to filter resource * Add the missing parameters in the api-ref of ext-backups.inc * Updated from global requirements * INFINIDAT: add locking around initialize/terminate connection * Implementation of Cinder driver for Veritas Access * Add 'resource\_filters' API to support generalized filtering * Deprecate osapi\_volume\_base\_url duplicate option * Updated from global requirements * Tests: Fix Blockbridge hash randomization failure * Tests: Fix VMAX hash randomization failure * Tests: Fix Datera hash randomization failure * Tests: Fix XIV unit hash randomization failure * backup: handle device path represented by dir * Rolling Upgrades: Fix VolumeAttachment * Add API documentation for force-delete volume in v2 and v3 * coordination: remove un-used method is\_active() * Fix py27 gate - Remove Tooz usage for tests * Remove unused volume\_types field when creating group * INFINIDAT: delete host objects when detaching the last LUN * INFINIDAT: Use infinisdk for communication with storage array * Add information in notification of type's extra\_spec * NFS Backup: Fix overwritting backups * Add missing retype API documentation * ibm-storage: update version number and history * Fix async mirroring on XIV limited range backends * Rollback snapshot's status when backup failed * Correct sphinx source code syntax * Clean codes around backup APIs * Fix all\_tenants doesn't work for volume summary * Fix incorrect volume\_id request description in api-ref * Trivial fix typos * 3PAR: Added volume to vvset in online copy * Add documentation for invalid filter keys * Add service dynamic log change/query * Dell EMC: Moved SC driver under dell\_emc * [Trivial] SMBFS: fix passing image format using volume types * Fix typo * Correct typo of cinder doc * Fix swift auth\_url/url check * Fix typo in docstring * SMBFS: avoid unnecessary conversion when uploading volumes * Fix forceful deletion of Consistency Group for GPFS Driver * Refactor internal context testcases with ddt * Enable mutable config in Cinder * VMAX driver - Close ecom connections * Read XtremIO options from self.configuration * Read rbd options from self.configuration, not CONF * Fix backup\_api\_class doesn't work * Add support for generalized filtering on list APIs * Modernize the nova client in cinder * ScaleIO 1.32 deprecation notice * SMBFS: Use share mountpoint when fetching capacity info * Fix permission error in VNX driver * VMware: Stop setting dynamicType * VMware: Apply policy at vmdk level during retype * genopts: Fix python 3 compat, sort options * Remove snapshot['size'] references * Remove backup-restore methods in driver code * Fix change API volume create message * VMAX driver - Manage/Unmanage performance fix * Updated from global requirements * VMAX driver - Rollback error on Live Migration * Remove usage of parameter enforce\_type * Trivial fix typos while reading code * Re-arrange v3 API reference * Re-order api-ref home page index * Trivial fix PEP 8 coding style violation and typo * Move the releasenote to the right folder * Add missing testcases for migration scripts * 3PAR: Added Replication Feature In Retype Flow * Functional tests for group replication * Update attachment's status when attaching a readonly volume * ds8k: should verify REST version separately * Remove dependecy testrepository * Move the releasenote to the right folder * Remove non-scheduler volume extend support * Eternus driver: Move soft dependency check out of \_\_init\_\_ * Use cryptography instead of pycrypto * Tiramisu: replication group support * xiv: enable get fc targets by host * NFS: run qemu-img info as root * Give more time for LVM deactivation * Remove unused fallocate method/filter * Fix quota reserve for manage snapshot * Updated from global requirements * Delete limited\_by\_marker from api/common.py * LIO: Fix terminate\_connection AttributeError * Spelling error "paramenter" * validate\_integer doesn't check non int properly * Fix host check in is\_backend\_frozen * Refactor 'update\_group' method * Use cg object when invoking update cg method * Volume Type Encryption added to v2 api-ref * Fix accessing uninitialized variable bug * Don't return empty volume\_image\_metadata when list volume * GPFS: Enhance GPFS driver to work in container * Create indexes for foreign keys * NetApp: Refresh directory before waiting * Fix NoneType has no attribute get * Updated from global requirements * HPE 3PAR: Adds CG capability in generic volume groups(GVG) * Fix typo in comments * Check groups before deleting group type * Fix backup temp snapshot path on remote node * Changing releasenotes file present in review change 409128 * ProphetStor failed to create volume size larger than the snapshot * Add attachment API doc * Remove obsolete Liberty code * VMAX driver - Pre-zoned port group fix * Updated from global requirements * Improve help for periodic message\_reap\_interval * fix typo * Add 'connection\_info' to attachment object * Fix the filter when list volume with group * Filtering attachment doesn't support 'instance\_id' * Don't check thin provisioning when manage volumes * Clean up expired user messages * Dell EMC SC: Support generic groups * NetApp: Deprecate E-Series drivers * ScaleIO: Adding CG support to groups * attach\_volume first check for existing attachment * Eliminate deprecation warning in db * Add a release note for groups * Revert "Huawei: disable certificate verification" * Fix for Group API update to include check policy * Create volumes from encrypted images * Glance: attach volume encryption key id to image * Dell EMC SC: Raise on \_init\_volume create\_snapshot failure * qemu\_img\_info: Don't autodetect source format * Check "kernel\_id", "ramdisk\_id" for Glance v2 only * XIV: switch to generic volume group * Updated from global requirements * Reverts quobyte\_volume\_url option back to type StrOpt * Disables xattrs on Quobyte driver mounts * [BugFix] Add method policy in attachment APIs * Adds a bugfix release note regarding backing up with the Quobytedriver * HPE Lefthand: add CG capability to generic groups * qemu\_img\_info: report 'luks' images as 'raw' * Fix encryption key deletion error handling on volume delete * Fix rally job gate-rally-dsvm-cinder-ubuntu-xenial-nv * 3PAR: Enable HPE-3PAR Compression Feature * ProphstStor driver 'SSL' communication fails to check certificates * RemoteFS: fix volume cascade delete * Tests: Remove the redundant methods * Storwize: correct unreplicated vol status * recover image-volume size if extend volume failed * Fix config option volume\_backend\_name's invalid overridden value * VNX: add missing group snapshot pool stats * Optimize the link address 11.0.0.0b1 ---------- * Updated from global requirements * Remove unused references from config-generator * Synology: fix snapshot metadata loss * Missing volume status on block storage v2 API * Validate uuid parameters strictly for create volume API * ds8k: check lun exist or not when clone volume * DS8K driver should use DLM * DS8K driver: remove code for fake connector * [api-ref] Add backing-up to snapshot statuses * Use HostAddressOpt for opts that accept IP and hostnames * DS8K driver: change the design of CG * Add sem-ver flag so pbr generates correct version * Fixed wrongly catched multiple exception types * Storwize: add CG capability to generic groups * Fix attach issue for failed-over volume * Fix migrate api giving incorrect exception message * Reduce code duplication * Separate out routine for getting qemu\_img\_info * NEC driver doesn't provide link down portal info * Fix metadata api's raises TypeError * Remove 'verbose' config option initialization * Remove sha256file value from logs for incremental backups * [api-ref] Add cascade to volume delete * Add CG capability to generic groups in Huawei driver * Huawei: disable certificate verification * Hitachi: Mark HBSD and VSP as unsupported * HNAS: Mark NFS driver as unsupported * Support for HostAddress opt * Add an optional db session argument to cinder.db.sqlalchemy.api. quota\_allocated\_get\_all\_by\_project(). Then in quota\_reserve() pass the session that is fetched for that method so that a new session does not have to be made. This can avoid DB timeouts and retries when a lot of concurrent operations are hitting quota\_reserve() * HPE 3PAR: Handle manage and unmanage hosts present * VNX: allow specify pool name for replication * VMAX driver - Live Migration is dropping connection * [Trivial]Fix some incorrect annotations * Fix some format error in docstrings * Add bandit-baseline to tox.ini * Backup: Add choices to compression algorithm option * ScaleIO: Fixing support for SIO 1.3x * Updated from global requirements * Fix error status check when create backup * Remove mirror policy parameter from Huawei driver * Updated from global requirements * Implementation of cinder driver for Veritas HyperScale * Tolerate new SQLAlchemy==1.1.7 * [BugFix][Devref] Refactor attach/detach v2 devref * [BugFix]Cinder forgets to update 'deleted\_at' when deleting * Add more specific error message * Tests: mock psutil usage in quobyte tests * New test case for InfroTrend driver * Add unit test for brick/local\_dev/lvm.py: create\_volume * [api-ref] Update snapshot/volume's metadata api-ref * api-ref: fix rest method error in reset group and group-snapshot status * Fix the missing args bug while lvm creates volume failed * [BugFix] Add 'all\_tenants', 'project\_id' in attachment-list * Huawei driver supports front-end qos * Use the list iterator instead of six iterator * Add two response parameters in the api-ref of getting capabilities * NetApp E-Series CI name update * Don't use tempest special\_fields * Add \_\_init\_\_ for cinder.config module * Clean up index links to removed content * Make cinder-manage online migrations more verbose * SMBFS: enable Nova assisted volume snapshots * Remove deprecated 'host' option for cinder-volume * Sheepdog: fix conflict of temporary snapshot name * api-ref: Fix volume\_id parameter for the path * Fix 500 error when 'mountpoint' is not provided to os-attach * RemoteFS: prevent creation of encrypted volumes * HPE LeftHand: extend volume if larger than snapshot * Mark the Violin volume drivers as unsupported * Mark QNAP volume driver as unsupported * [Doc] Add documentation for messages API * Mark Blockbridge volume driver as unsupported * Delete the duplicate interface about the api version * Fix some reST field lists in docstrings * Remove XML description from media-types * Fix version numbers on media-types * Fix status\_code of "list versions" API * Adding new test case for InforTrend driver * Fixed inconsistent naming conventions * Extracted HTTP response codes to constants * Extracted HTTP response codes to constants * Extracted HTTP response codes to constants * Extracted HTTP response codes to constants * Extracted HTTP response codes to constants * replace references to cinder.openstack.org * [doc] Update doc index * Consistency groups API is not returning project\_id filter groups * Adding tests for api/v3/group\_specs.py * Fix docs failures caused by latest eventlet * VMware: Enforce min vCenter server version 5.5 * Fix cinder functional tests job * Adds missing return value to create\_cloned\_volume of Quobyte driver * Don't change volume's status when create backups from snapshots * Add missing and incorrect response and error codes in cinder api-ref * ZFSSA can avoid fetching img when creating vol from cache * Fix more invalid UUID warnings in test\_vmax file * ZFSSA cache luns are too small when images are precisely X Gi * Support sort snapshots with name * [Doc] Add missing parameter in v3 resource documentation * Remove log translations * Remove old oslo.messaging transport aliases * Remove unused CG code * Delete consistency group fails on xtremio driver * Remove hacking check for log translation * Add IOPS limits that scale per-GB * Update db from drivers in default groups methods * ScaleIO: Fixing warnings spotted by PyCharm and tox * Added gigabyte unit to values in exception message * Set access\_policy for messaging's dispatcher * Dell EMC SC: Return dict from delete\_consisgroup * Prevent duplicate entries in the image cache * Updated from global requirements * Fix ut error of volume deletion * IBM storage: switch to generic volume group * Fix lock contention in SolidFire driver * Revert "Remove v1 API reference" * VMAX driver - volumes part of CG not managed correctly * VMAX driver - consistency group remove empty values * Datera: extend volume if larger than snapshot * Tests: Fix InvalidUUID warning for 'qosId' * Add filter, sorter and pagination for group snapshot * Correct Extend lvm Volume * Fix create\_consistencygroup when using CG objects * Fix: SnapshotStatus missing in Cinder * Replace yaml.load() with yaml.safe\_load() * [Optimise] Refactor group snapshot testcases * Add API documentation for volume detach in v3 * Dell EMC SC: Added retry to \_init\_volume * Make QnapISCSIDriver unit tests faster * Adds API documentation for list and show hosts * Updated from global requirements * Fix 500 HTTP API error caused by rpcapi RemoteError exception * Update devref for OVO version bumping * Improve devref for API errors * Add a README in tests dir linked to devref html * Remove HNAS iSCSI driver * Replace yaml.load() with yaml.safe\_load() * [Optimise] Refactor ExtraSpecsOpsTestCase with ddt * ProphetStor driver create vol from snap with wrong size * Add driver-requirements.txt * Bump prlimit cpu time for qemu-img from 2 to 8 * Remove "list\_volume" from filter * Remove unreachable code * Remove duplicate key from dictionary * Fix 500 error if boolean is\_public is passed as string * Fix a typo * api-ref: v2 API does not mention GET volume metadata by key * Update guru doc to support file modification events * rbd: resize volume not cloned but full copy * Coho: resize volume if cloned image is larger * XtremIO: allow a cloned volume with larger size * Replace obsolete vanity openstack.org URLs * Fix column types in models.py * cors: update default configuration * Tests: Use six.moves.urllib urlencode * Switch to use stable data\_utils * VMAX driver - Change vendor\_name from EMC to Dell EMC * VMware: Refactor vmdk unit tests * api-ref cleanup: remove quotas user related calls * Fix generic group's unit testcase * Switch to decorators.idempotent\_id * Change the default value of used Glance API * Disallow modification of in use Volume Types * Hash password displays in debug log * Add debug log of retryable response on SolidFire * Filter on Active status in SolidFire List * Remove TODO related to Nova API version * Add per-backend availability zones * cinder-backup - coordination not initialized * Reuse API v2 for v1 calls * VMware: Remove redundant sanity check * api-ref: v2 update volume metadata description is wrong * Fix action on services raises TypeError * api-ref: v2 API does not mention DELETE for volume metadata * Set backup available after verify * Updated from global requirements * Fix size update error for snapshot-manage operation * Remove domains \*-log-\* from compile\_catalog * Fix typo error * Updated from global requirements * Validate display\_name/description attributes in API layer * Purge immediately on Delete in SolidFire Driver * Deprecate API v2 * Huawei driver supports specifying copy speed * Fix internal tempest tests * Reuse identical API v2 code for v1 * Remove v1 API reference * NEC: Fix format string in debug message * Fix ATTACH\_READONLY\_VOLUME message generation * Remove an outdated comment * Remove reserving quotas in c-vol's retype method * Enforce \_usage\_from\_snapshot to fetch info from deleted volume * VMAX driver - replace pickle with jsonutils * Switch NFS driver to use DLM locks * Zadara driver - move from httplib to requests * Remove Linux SMBFS driver * Unity driver: fail to disconnect volume * Remove unused logging import * create snapshot with generic group fail with XtremIO Driver * Fix attachments after attached migration * Fix volume retype with migration as non-admin * Fix multibackend-matrix confs * Adds attached migration to multibackend-matrix * VMAX driver - superfluous debug messages causing error * Fix SolidFire cgsnap clone bug * Compact Mitaka database migrations * Remove Ocata's data migrations * Add the backup variable structure in backup\_driver interface * Dell EMC SC: Return dict in delete\_cgsnapshot * ceph backup support for EXCLUSIVE\_LOCK and JOURNALING features * SolidFire: Handle connect\_volume() failure when create vol from image * Change nova\_catalog\_admin\_info to default to publicURL * Adds db\_online\_data\_migrations to cinder-manage manpage * VMware: Delete temporary snapshot after clone * doc: verify all rst files * Updated from global requirements * 3PAR: rally test create-volume-from-snapshot fails * VMAX Driver - disable initiator check by default * NetApp: Track SVM and Cluster scoped credentials * Fix Pep8 Failures for Python3 * Tests: Remove mocks of nonexistent methods (NetApp) * Cinder tox updates for Python3 * [4/4]Reset generic volume group and group snapshot statuses * Removes getfattr from Quobyte Cinder driver * Use ovo instead of dict * Fix duplicate lvs2 entry in rootwrap volume filter * Updated from global requirements * fix create\_consistencygroup in xiv * Output the driver info as json * Add maximum microversion for Ocata * Dell EMC Ps: Report discard support * Switch and/or to ternary operator * Fix "No documentation found in" errors in docs * VNX: Handle error during volume deletion * Unity driver: check array OE version * Dell EMC: Create independent volume on clone * Add quota limit check and enhance roll back for cg create * Storwize: correct the product key for replication * Fix Block Storage API reference nesting * VMAX driver - snapshot creation and immediate deletion fix * Drop version validation * Add filters support to get\_pools * [11/11] Refactor test\_volume file * [10/11] Refactor test\_volume file * [9/11] Refactor test\_volume file * [8/11] Refactor test\_volume file * [7/11] Refactor test\_volume file * [6/11] Refactor test\_volume file * [5/11] Refactor test\_volume file * [4/11] Refactor test\_volume file * [3/11] Refactor test\_volume file * [2/11] Refactor test\_volume file * [1/11] Refactor test\_volume file * Disable multiattach for all drivers * Remove support for py34 * Autoselection of IBM SVC/Storwize IO group * Remove unsupported NexentaEdge driver * Remove unsupported HPE XP driver * Remove unsupported CloudByte driver * VMAX driver - allow for multi volume types in Consistency Group * Minor cleanup on tools/test-setup.sh * Add Ocata sanity check migration * Use https instead of http for git.openstack.org * Update reno for stable/ocata 10.0.0.0rc1 ----------- * VMAX driver - widen locking around storage groups * Reserve 5 migrations for DB backports * Tests: Fix NetApp unit test failure w/ hash randomization * xiv create vol from replicated source fails * Don't use None value for sql query * Remove promote and reenable from policy * VMAX driver - copy state fix * VMAX driver - ignore service level and workload in xml * VMAX driver - Convert eval to ast.literal\_eval * VMAX driver - failover error fix * 3PAR: Remove un-necessary snapshot in online copy operation * Add SUPPORTED flag to Lenovo FC driver * Image utils: fix fetching available space on Windows * NetApp cDOT: Add check if copyoffload tool is available * Fix max\_age calculation for quota usage refresh * Fix 500 error while listing manageable volumes and snapshots * Add usage and limit info on quota limit errors * Prepare for using standard python tests * Convert ignoreskippedcluster check to be static * Nosec audit url open issue from Bandit * Clean up release notes for Ocata * xiv delete cg fails if removed on backend * VMware: Set vSphere opID prefix * Make get\_capabilities look for clusters * Resolve Brocade HTTPS connection error * Add SUPPORTED flag to Lenovo iSCSI driver * Add SUPPORTED flag to HP MSA driver * Hitachi VSP: Specify compute nodes and copy ports * ZTE: Added CI\_WIKI\_NAME * Fix notification short-circuit * Fix CopyVolumeToImageTestCase max recursion depth * HNAS: Cloned volume with different volume type * VMware: Use full clone if extend needed * Extracted HTTP response codes to constants * VMware: Use storage profile name in extra-spec * RBD:Move RBDVolume calls to a separate threads * Move release notes to correct location * Add cluster\_name to temporary volumes * Fix host assignment when clustered * 3PAR: Inconsistency in copied and source CG * Remove redundant log during initialize connection * VMAX driver - sync sv issue * Reduced the complexity of the main() method * Add descriptions to Pure drivers * Adds API documentation for update snapshot status * clean up replication volume on an xiv backend * Add create volume from image policy * Short-circuit notifications when not enabled * Stop get\_notifier mock in tests cleanup * VMAX driver - removed incorrect six.text\_type * Updated from global requirements * Rebrand and move EQLX driver to dell\_emc folder * Fix Live Migration for Nimble Storage Driver * create consistency group mishandles types * Infortrend: Stable generation of strings from dicts * VMAX driver - QoS key fix * VMAX driver - remove global mock of volume\_types * Switch to decorators.idempotent\_id * Set image\_owner before adding location to glance * Dell EMC SC: Delete doesn't account for migration * Zero out SolidFire capacity when unreachable * IBM driver update CI\_WIKI\_NAME 10.0.0.0b3 ---------- * Fix Qos for clone for Nimble Storage Driver * Add lock acquire/release debug messages * [api-ref]Fix the volume metadata description error * [api-ref]Add volumes/summary API doc * [api-ref]Change tenant to project * Fix double call to "qemu-img create" * Fix api-ref status parameter * Refactor volumes summary * Small change to microversion header on devref * Check if volume node has enough space for image operations * Revert "Mark the sheepdog driver as unsupported" * backup of active 3par ISCSI bootable volume fails * Open the source code of ibm\_storage driver * NFS snapshots * Tests: Fix hash unstable dict comparison * Tests: Fix mock for vzstorage remotefs * Fix the api-ref CI failure * Eager load projects when getting types by group * VMAX driver - rename and restructure driver * Updated from global requirements * Switch ManageableSnaphots & ManageableVolumes list to OVO * VMAX driver - storage group cleanup on error * Add assert\_min\_rpc\_version decorator * NetApp cDOT: Add NetApp Volume Encryption support * Add volume backup tempest tests * Make CappedVersionUnknown exception more useful * vzstorage: use resize instead of grow for ploop images * Add driver supported status to dict output format * Break migrate\_add\_message\_prefix into two queries * Remove useless FIXME * Fix some problem for attachment list * Don't drop the volume type's extra spec at DB layer * Switch to using generic groups with Pure driver * VMAX driver - Implement volume replication for VMAX * Brcd zonemanager: Fix unstable command generation * Dell SC: Logging asyncTask if XML returned * Test: Fix assert\_has\_calls dict order bug (QoS) * FalconStor: Drop the copy\_image\_to\_volume feature * Refactor backup RPC API test cases * Refactor scheduler RPC API test cases * Update the parameter check when create attachment * FalconStor: Restore option "san\_secondary\_ip" * Extracted HTTP response codes to constants * XtremIO: Add CG capability to generic groups * VMAX driver - Consistency Group automerge fix * VMAX driver - Support for compression on All Flash * Add new attachment APIS * Add lock around SolidFire clone-image * VMAX driver - Storage assisted volume migration * Dell SC: Retype fixes * Revert "Mark Huawei volume driver as unsupported" * Fix typos * Adds getfattr and mount to Cinder rootwrap volume.filters * Refactor volume RPC API test cases * Make notify\_service\_capabilities cluster aware * VNX: Add async migration support * VNX: Update sg cache if it already existed * Allow snapshots and volumes to have Null group values * Fix for FC Initiator API in Nimble Storage Driver * Revert "Mark Quobyte volume driver as unsupported" * Fix retype with migrate * VMware: Allow deletion of snapshot in error state * Move driver tests to tests/unit/volume/drivers * Prevent claiming and updating races on worker * Fix replication freeze mechanism * Move service and cluster creation in test to utils * Make Image Volume Cache cluster aware * Make Replication support Active-Active * Fix for live migration for Nimble Storage * NexentaStor5: Added extend method to NFS driver * Sync \`\`services\`\` SQLA model with what's in the DB * Make BackupManager not a SchedulerDependentManager * Optimise: Use sqlalchemy to build table sequence * Fix volume manage * Revert "Mark Tintri volume driver as unsupported" * Nosec subprocess with shell equal True from Bandit * Modify the spelling mistakes * Add volume to fake snapshot object * Clean up image tmp file if c-vol gets restarted * Modify the spelling mistakes * Prevent driver load failures from objects * Nosec Paramiko exec\_command() warning from Bandit * Replace deprecated pylint disable-msg with disable * DB: Optimize update methods * Updated from global requirements * Save model\_update as admin in create\_volume * Improvement to get group detail(Part 2) * Add api version history for 3.23 * Deprecate the Linux SMBFS driver * Storwize: do the correct update after failover * Unity Driver: Backup volume via snapshot * RBD: Fix RBD replication on volumes from image source * Dell SC: Change DRP search to reference type * vzstorage: fixed snapshot deletion in error state * Dell SC: Missing version history * Remove deprecated RequestBodySizeLimiter class * Allow triggering cleanup from API * [py35] image file should be open in binary mode * HNAS: Change snapshot names * Add prefix to user message event ids * Improve compatibility with novaclient 7.0.0 * Updated from global requirements * Unity: Add support to set IO ports in option * Fixes hpelefthandclient AttributeError * Mark the sheepdog driver as unsupported * Add volume type access operations notification * Remove inheritance from deprecated classes * FusionStorage: rename the CI\_WIKI\_NAME * Dell SC: Failback timeout extended * Tests: Remove more InvalidUUID warnings * Don't translate exceptions w/ no message * Add command information to cinder-manage.rst * Add CG capability to generic groups in VNX driver * RBD: Remove volume\_tmp\_dir option * Fix devref create\_volume doc formatting * VMware: Use versionutils in oslo.utils * Delete volume when rescheduling * VMware: Set backend UUID to volume UUID * Rename AddFCZone and RemoveFCZone according to PEP 8 * Cleanup limit view name processing * Datera 2.3 driver update * Skip CI link in list for infra supported drivers * Add manage/unmanage to DISCO driver * Updated from global requirements * Fix a few typos in storwize\_svc\_common.py * Doc: Fix build warnings for missing code-block lang * CoprHD: Handle ScaleIO CI failing with version-3.5 * Python3 common patterns * Enable DeprecationWarning in test environments * Add orm type check for paginate\_query * [api-ref]Add api ref for "volume type encryption" * Fix sphinx errors related to missing paths/modules * Cascade + force volume delete parameters * Adds metadata in search option for snapshot * Fix python integer interpretation in Py2 and Py3 * Fix Cisco Initiator zoning updates * Add QoS and Dedupe Support for Nimble Storage * Remove unused PasteAppNotFound exception * tgt: Remove existence check before delete * Only log insecure key manager warning once * Refactoring ITRI DISCO cinder volume driver * Convert 'parallels' format name to 'ploop' * Add psycopg2 and PyMySQL to test-requirements * Attach/Delete volume for tgt driver race condition fix * Correct RBD Provision stats&fix a perf problem * Avoid Forcing the Translation of Translatable Variables * Updated from global requirements * Deprecate the block\_device driver * [1/4]Reset generic volume group status * HNAS: Deprecate HNAS iSCSI driver * VMAX driver - PortGroup rollback error path scenario * Report multiattach capability for Hitachi VSP drivers * Tests: Split up NFS backup notification test * Set replication\_status automatically on retype * Set replication\_status on volume creation * Update replication property in capabilities * Cisco: can't add new zone when no zone is created before * VMware: Fix inventory folder after volume transfer * Use oslo.messaging topics for multibackend * Backup project attribute support * Cinder consistency group returning generic error message * Move VNX driver to dell\_emc folder * NexentaStor5: sessions and HTTPS support * Add more operations to cluster * Make workers ORM compatible with SQLAlchemy 1.1.4 * VMAX driver - MVs and SGs not reflecting correct protocol * Delete space to pass the check of pep8 * Fix logging traceback in service logs * Remove "service" from required\_import\_options * Add Rest API Support for Nimble Storage * Adds release note for 'Route extend\_volume' bugfix * Add 'unmanaging' state to volumes and snapshots * Fix service\_get mock method * Mark Nexenta Edge volume drivers as unsupported * Mark DotHill volume drivers as unsupported * leave the unreleased branch unspecified for release notes * Fix VMAX clone CG release note * Mark Tintri volume driver as unsupported * Mark Quobyte volume driver as unsupported * Mark Huawei volume driver as unsupported * Mark CloudByte volume driver as unsupported * Mark HPE XP volume driver as unsupported * RBD: Implement v2.1 replication * Updated from global requirements * Storwize: create vol fails near licensed limit * Add get\_all capability to volume\_attachments * Cluster volume group fix in lvm * Drop the judgement condition for FakeConnector * Rolling upgrade procedure documentation * Remove Scality backend driver * Use method is\_valid\_boolstr from oslo\_utils 10.0.0.0b2 ---------- * Updated from global requirements * Support new osprofiler API * [api-ref] Show more specific parameter of capabilities * Cosmetic changes to scheduler * Support A/A on Scheduler operations * Fix detach twice of snapshot * Add cinder volume drivers for NEC Storage M series * Remove authorisation checks at the DB level for quota operations * Fix condition parameter bug in routes * Add Apache 2.0 license to source file * VMAX driver - Attach and detach snapshot * Improve DB method naming consistency * Simplify calls to mock\_object * Make APIVersionRequest's null check more pythonic * Nexenta: Added ZFS cleanup in NexentaStor4 iSCSI driver * Move windows drivers to versioned objects * Correct reraising of exception * Fix warning when running \`tox -e docs\` * ibm\_storage driver: Added fczm decorators * Dell SC: Added Logging to \_swap\_credentials * Unreserve volume: fix log message for completion * Dell SC: Missing volume creation options * Hacking: Remove N333 oslo namespace import check * Adds Unity Cinder Driver * Added initial backend ISCSI driver for Reduxio * Switch default py3x to py35 * Remove run\_tests.sh wrapper * Do not manage VG with > 1 volume in Kaminario driver * HNAS: Add list manageable volume/snapshots * Move to hacking 0.12 * Modify variable's usage in Log Messages * Prohibit creating volume from source with dif encryptions * Route extend\_volume calls to scheduler * Remove deprecated volume\_clear shred option * Fix dos-style endlines * Add QNAP ES Storage Driver * Fix TypeError when execute cinder-volume-usage-audit * Log detailed attach/detach volumes info * Remove anyjson test dependency * EMC VMAX: Remove unused is\_in\_range() method * Non-WAN port filter issue in Kaminario iSCSI driver * Updated from global requirements * Introduce Hitachi VSP iSCSI driver * Remove Kaminario deprecated option * Introduce Hitachi VSP driver * Support A/A in delete actions and get\_capabilities * Change volume\_type dict to ovo * Dell SC: Incorrect exceptions being returned * Tests: Fix invalid UUID warnings * Block 3.1 message with c-sch RPCAPI pinned to 3.0 * Huawei: Mask unnecessary log print * Fix status after a VolumeDriverException * Fix replication\_status on InvalidReplicationTarget * Add host check on replication actions * Updated from global requirements * Revert "Swap volume type for migration started by retype" * LVM: collect more debug data when VG creation fails * SolidFire QoS scaled by volume size * Refactor api test\_common to use ddt library * New cinder driver to support INFINIDAT InfiniBox * Remove cgsnapshot\_id before snapshot.save * Add missing consistencygroup\_id in volume * Remove support for single backend config * Removed unused constant in v3/views/clusters.py * Add back policy check for CG * Fix secondary lvm cmds rootwrap filters * Update detach\_volume() with versionedobjects * Update attach\_volume() with versionedobjects * Calculate virtual free capacity and notify * NetApp cDOT driver fails to clone from NFS cache * Admin API policy enforcement contingent on is\_admin\_project * test: fix wrong usage of config option in test\_window * Fix wrong usage of config option gpfs\_images\_share\_mode * Refactor test\_volume\_actions to use ddt library * Correct the attributes of volume which created by clone a CG * Swap volume type for migration started by retype * Replace functions 'Dict.get' and 'del' with 'Dict.pop' * Reuse already existing groups in tempest plugin * Huawei: Mark password option as secret * Remove Unicode byte order mark * Add driver interface for groups * Remove emc folder * Always allow to create snapshot db record with snapshot-manage * [api-ref] Correct status code of backup delete * Add cg constraint when deleting a volume type * Fix response bootable parameter type in api document * Skip more parameters when import backup record * Dell SC: Reject thaw call when backend is failed over * Dell SC: Unable to locate LV primary after LV-AFO * Remove nosetests i18n fix * Remove temporary directory creation for BaseBackupTest * Add logging to FakeLoggingVolumeDriver * Remove empty debug\_opts * Add unsupported status to driver listing * Add is\_up property to service and cluster objects * HNAS: Rename hnas\_svcX\_volume\_type * Fix UnBoundLocalError in update\_consistencygroup * Support A/A in attach/detach operations * Remove 2 unnecessary assignments * Show team and repo badges on README * HPE3PAR: handle conflict in iscsi host create * Add dev doc for generic volume groups * Remove unused constant in hbsd\_horcm.py * Fix create\_group with group type name * Show provider\_id for admin * Fix online data migrations cmd in release notes * Fix UnableToFailOver exception message * Gate migration tests: Add Cinder tempest hook * Updated from global requirements * Create a dell\_emc folder for Dell EMC drivers * CG API changes for migrating CGs * [2/4]Reset group snapshot status * Update is\_up from Service OVO class to match ORM * Replace assertDictMatch with assertDictEqual method in tests * Move XtremIO driver to dell\_emc folder * Replace six.iteritems with dict.items() * Fix VMAX unit test timeout issue * Migrate consistency groups to groups * Move ScaleIO driver to dell\_emc folder * Modify the position of try-except * Fix names in online-data-migrations release note * Windows SMB: use os-brick remotefs client * Fix typo in test\_quotas.py * Skip test\_storwize\_get\_host\_from\_connector\_with\_lshost\_failure * Update v2 and v3 Volume actions documentation * Handle NotImplementedError for image cache cloning * Fix v2 volume create api-reference request parameters * VMware: VMDK driver performance improvements * VMAX driver - Create a CG from a source CG * Deprecate Hitachi Block Storage Driver * Add host check while create snapshot * Prevent Active-Active on drivers by default * Log traceback for validate\_connector exception * Fix "wrap functions with 2 blank lines" pep8 check * RBD: improve readability in over provisioning tests * Delete the redundant expression expected\_attrs 10.0.0.0b1 ---------- * Remove use of timeutils.set\_time\_override * Add generic code for online data migrations * VMAX driver - Ensure VMAX volume matches cinder db volume * Updated from global requirements * Pass OVO instance to copy\_volume\_to\_image * Drop unused parameter in class EntryCreateTask * Set EVENTLET\_NO\_GREENDNS for IPv6 and dnspython compat * Fix doc comments * Add multipath enhancement to Storwize iSCSI driver * Update replication doc * Add VolumeAttachStatus Enum * Dell SC: Live Volumes not cleaned up * Remove race conditions from transfer API * Convert backup\_device to OVO * Clean \_get\_cctxt method signatures * DB: Optimize volume\_update method * Fix typo error * Fix compatibility with decorator < 4.0.x * Fix config generator issue with OVO decorators * Dell SC: Minor cleanup * NetApp ONTAP: Deprecate 7-mode driver * Fix unit tests for LVM migration with os-brick 1.7.0 * Fix releasenotes job * Make c-vol use workers table for cleanup * Updated from global requirements * Dell SC: Log FO state more clearly * Add API unit tests for snapshot creation force values * Use VolumeDriverException for driver exceptions * Dell SC: Error creating snapshots with live volume * Remove GlusterFS volume driver * VMAX driver - Useful name convention for CG names * VMAX driver - Duplicate initiator group name error * To fix miscellaneous bugs in OVOs * Dell SC: Add init\_volume check and logging * RBD: prevent creation of encrypted volumes * Make divisible py3 compatible in vmware driver * Remove mox3 in test-requirement.txt * Fix typos in comment * VMAX driver - No support for creating CG from CG source * Updated from global requirements * NetApp Data ONTAP driver enhanced support logging * FalconStor: Fix creating snapshot failed * Dell SC: AsyncTask could return incomplete * VNX:Add more check on replication\_device keys * Skip unit-tests which use os.fdatasync on MacOS * Missing parameter for storage pools in Pure Storage driver * NetApp: Report shared blocks exhaustion * Allow entry created in VolumeAttachment table for 2nd and later volumes * storwize: get\_host\_from\_connector optimization * Huawei: Fix the rest query timeout problem * Updated from global requirements * Remove logging import unused * Add current status to error msg in reserve\_volume * Return HTTPBadRequest instead of HTTPNotFound * Removed extra xml api-ref files * Fix return values of consistencygroup actions in Pure storage driver * Update .coveragerc after the removal of openstack directory * Fixes ZFSSANFS driver on Solaris platform * VMware:Config option for http connection pool size * Updated from global requirements * Add generic reset-status method in admin actions' testcases * Updated from global requirements * Imported Translations from Zanata * Mark backup\_swift\_key option as secret * make Liberty incompatibility error more helpful * Add Fibre Channel support for Nimble Storage * Updated from global requirements * Adjust doc about threading * RemoteFS: Remove deprecated config options * Fix HNAS Driver KeyError exception in volume manage * NetApp NFS: Look for new file for >60s * Cleanup RCP API versioning * VMware: Fix deprecation warning * VMware:Remove redundant check for usable ESX hosts * Updated from global requirements * NetApp Data ONTAP headroom calculation error * Update synchronized decorator * VNX: use delete instead of remove * Stop using mox in unit/api/v2/ tests * Fix the issue that osprofiler opts not in cinder.conf.sample * Remove volid from attachments\_get\_by\_host|instance * Coho Data: New socket connections per request * Imported Translations from Zanata * Log message cleanup for volume-usage-audit * Imported Translations from Zanata * Make divisible py3 compatible in remotefs driver * Add 'cinder' to the 'allowed\_direct\_url\_schemes' * Imported Translations from Zanata * Add backup notification to cinder-volume-usage-audit * VMAX driver - Create initiator group in single call * Fix log translation marker in volume/utils.py * Fix policy file used for testing * Imported Translations from Zanata * Dell SC: Can incorrectly identify if it is failed over * Enable release notes translation * Remove deprecated cinder.middleware.sizelimit * Add VERSION to ZTE driver class * Imported Translations from Zanata * Switch from tempest-lib to tempest * VMAX driver - Misleading logging on portgroups * VMAX driver - Heat detach issue * Sort options in cinder.conf by module name * Fix typos in documentation * Move volume drivers unit tests to tests.unit.volume.driver module * Add version to ibm\_storage driver * Remove support for 2.x volume RPC API * Remove support for 2.x scheduler RPC API * Imported Translations from Zanata * Error message for image conversion failure * Add cleanable base object and cleanup request VO * Imported Translations from Zanata * Fix for Tegile driver failing to establish volume connection * Removing deprecated Dell EqualLogic config options * Hacking: Remove C305 contextlib.nested check * Fix typo: remove redundant 'that' * Removing cinder-all binary * Updated from global requirements * Remove old deprecated driver mappings * Imported Translations from Zanata * Nexenta: Use base VolumeDriver migrate\_volume * Add the note of maximum microversion * VNX: raise exception if no storops * Speed up kaminario's drivers tests * VMware: Skip setting vmdk UUID * Imported Translations from Zanata * Fix typo in emc\_vmax\_fast.py * Disable API v1 by default * RBD Thin Provisioning stats * Drop unused parameter in class ExtractSchedulerSpecTask * Add interface decorator to ibm\_storage driver * Imported Translations from Zanata * VMware: Remove FakeObject * Updated from global requirements * Fix typo in devref/api\_conditional\_updates.rst * Fix a typo in manager.py,test\_common.py and emc\_vmax\_utils.py * Re-use RBDImageMetadata and RBDVolumeIOWrapper from os-brick * Fix IndexError in volume manager * Remove ovo[fixtures] dependency from tox.ini * extract\_host: Handle empty host string input * Updated from global requirements * Hacking checks for H105 * Remove dead code - replication RPC APIs * Speed up ibm storwize svc driver's tests * Files with no code must be left completely empty * Add support for Infortrend GS Series products * Add 'replication\_targets' and 'display\_name' in API docs * Imported Translations from Zanata * OVO: OPTIONAL\_FIELDS should be moved inside the OVO * OPTIONAL\_FIELDS should be inside OVO object group\_snapshot * Moving the OPTIONAL\_FIELDS inside the OVO object group * Updated from global requirements * Cleanup lintstack exceptions related to objects * Imported Translations from Zanata * Remove placeholder tempest test * Hacking: remove check for "tests/unit/integrated" * Allow attribute lazy loading in VolumeType OVO * Provide cfg to ignore lvm descriptor leak warnings * Imported Translations from Zanata * Imported Translations from Zanata * Imported Translations from Zanata * Volume get: Check policy before fetching metadata * Fix a typo in scheduler manager "filter\_propterties" * Fix test case note * Fix project assignment in VolumeType OVO * Imported Translations from Zanata * Fix pep8 E501 line too long * Using assertIsNone() instead of assertIs(None) * Fix formatting in generate\_config\_opts * Reduce redundant call of image show * Limit memory & CPU when running qemu-img info * Updated from global requirements * Allow getting qos\_specs in db.volume\_type\_get * Fix missing string format specifiers * Deprecate LocalVD, SnapshotVD, CGVD, ExtendVD, TransferVD * Use keystoneauth1 for creating the keystone session * Add test\_get\_all\_by\_volume in objects/test\_backup * replace filter() to meet python3 * Add snapshot id validation during volume creation * Imported Translations from Zanata * Refactor of getting qemu-img version * Remove extra param in list\_manageable\_snapshots * Stop using mox in cinder/tests/unit * Modifies override logic for key\_manager * Remove unnecessary setUp * Imported Translations from Zanata * Sheepdog: fix clone failure * Read domain info from context when contacting nova * Compact Liberty database migrations * XtremIO: fix migration bug * Remove unnecessary setUp and tearDown * Imported Translations from Zanata * Don't attempt to escalate cinder-manage privileges * Remove duplicate keys from dictionary * Remove unnecessary setUp and tearDown * Add missing %s in print message * Create v3 API Reference * Reserve 5 migrations for Newton backports * Add os-detach to api-ref * Fix request\_spec conversion from o.vo to dict * Storwize: remove SCSI LUN ID for volume attaching * Stop using mox in unit/\*.py (2/2) * Imported Translations from Zanata * Huawei: Backend capabilies report optimization * Imported Translations from Zanata * Imported Translations from Zanata * Update reno for stable/newton * NetApp cDOT: Fix model update for cheesecake volumes 9.0.0.0rc1 ---------- * Fixed request/response parameters in doc * Add volume RPC API v3.0 * Add scheduler RPC API v3.0 * XtremIO: bump driver version to 1.0.8 * Imported Translations from Zanata * netapp image cache cleaning start thresholds does not work * Fix missing JSON response in API reference * Mark v1 as DEPRECATED in devref * Add xUnknownSliceID to retry list * Dell SC: ISCSI initialize\_connection fixes * Use IP in Kaminario locks and add/delete loggers * Add combined function get\_vol\_type\_by\_name\_or\_id * Imported Translations from Zanata * Remove unreachable return statement * Dell SC: Disable connectivity failures for new servers * Fix release notes formatting * Fix coerce good values in test\_fields.py * VMAX driver - Does not honor clone volume size * Updated from global requirements * Fix glance v2 image update * Unit test to use vol obj instead of dictionary * Imported Translations from Zanata * Remove few Kaminario release notes * Newton release notes cleanup * Use to\_policy\_values for enforcing policy * Fix logic in retype check for encryption changes * Added default for ScaleIO driver max\_over\_subscription * Save volume\_type/group\_type uuid into db when creating group * Imported Translations from Zanata * DRBD: Don't put clients everywhere * VMAX driver - Cleanup of Initiator group fails * Imported Translations from Zanata * fix the %{} when string formating * Fixed illegal syntax error code in ScaleIO rename volume * VMware: Send additional connection info * Imported Translations from Zanata * Have a default OPTIONAL\_FIELDS for persistent OVOs * Include OPTIONAL\_FIELDS in VolumeType OVO class * Remove sleep mocks in RBD unit tests * Fix RBD timeout * Fix creating typeless volumes with Mitaka's c-sch * Add missing release notes * Make sure get\_backup\_device result is an o.vo * [api-ref] Correct response code in Cinder API v2 * VMAX driver - failed rollback on VMAX3 when MV issue * Fix order of arguments in assertIs * Remove driver interface changes from reno devref * Imported Translations from Zanata * Doc: qos-specs is always and only "Administrators only" * Cleanup Newton release notes * Clean create\_snapshots\_in\_db method * standardize release note page ordering * Dell SC: Error attaching after LV-AFO * VMAX driver - Removal of iscsiadm from vmax cinder * VMAX Plugin - Target WWN change for redundancy * Orphan view and multipath issue in Kaminario driver * Imported Translations from Zanata * [api-ref] Correct response code in Cinder API v1 * Eqlx: Ignore missing snapshot on delete * NetApp cDOT: Fix reporting of replication capabilities * Mark Scality volume driver as unsupported * Trivial: Fix a trivial flake8 error * VMAX driver - Replace "SLO" tag with "ServiceLevel" tag * Register state\_path opts and add it to sample conf * Imported Translations from Zanata * Refactor volume status in managing vol * Updated from global requirements * VMAX driver - Won't delete pre-existing initiator groups * NetApp: Fix issue with busy snapshot deletion * Provide volume size for tempest volume creation * Nexenta: Added use of sessions for NexentaEdge drivers * Imported Translations from Zanata * RBD: Error when managing a nonexistent volume * Fixed indentation * XtremIO: fix bug deleting cgsnapshot * Raise NotImplementedError instead of NotImplemented * Imported Translations from Zanata * Support create group with group type name * Rename specific driver option with synology prefix * Remove doc for promote/reenable replica * Imported Translations from Zanata * Imported Translations from Zanata * LeftHand add entry point tracing * Remove host assignment in Snapshot OVO * Fix lazy loading cluster field from Service OVO * Fix manage existing MissingDependencies error * Do not pass unicode to str.isspace() when disabling a service * Correction in log massage format syntax * EMC VMAX - multi-attach failure to VMAX3 when SLO is omitted * add more credentials information to check\_is\_admin * VMAX driver - Retrieve volume from snapshot * Fix CapacityWeiger to accept None volume type * Imported Translations from Zanata * check quota per\_volume\_gigabytes for transfer-accept 9.0.0.0b3 --------- * Dell SC: Add dell\_server\_os configuration option * Add supported driver checks to Zone Manager * Fix backup unit tests imports for os-brick * Fix backup unit test with new os-brick * Stop using mox in unit/api/contrib/ tests (1/2) * Stop using mox in unit/volume/drivers/\*.py * ScaleIO over subscription support * Fix exceed of dd action when copying with path * Fix CapacityFilter to accept None volume type * Fix HNAS stats reporting * Imported Translations from Zanata * Stop using mox in unit/api/v3/ tests * Deleting volume metadata keys with a single request * Switch StrOpt to URIOpt for URL config options * Add Castellan release note * Add ability for ceph driver to report discard * Changes stats report for ScaleIO driver * Remove cinder/tests/unit/test\_misc.py * Imported Translations from Zanata * CoprHD: Handle create volume from CG snapshot error * Performance issue for VNX attache LUN * Config logABug feature for Cinder api-ref * Stop using mox in test\_wsgi, test\_nexenta, test\_netapp * Remove unused source file tests/unit/db/fakes.py * Fix an invalid function call in test\_consistencygroups.py * zfssaiscsi driver log messges need cleaned up * Fix typo in enable\_unsupported\_driver description * Mark GlusterFS driver as deprecated * Retype encrypted volumes * Fix DellStorageCenter docstrings * Fix driver interface checks for ZTE driver * Add CI\_WIKI\_NAME to ZTE Driver * Add CI\_WIKI\_NAME to X-IO Driver * Add CI\_WIKI\_NAME to Synology iSCSI Driver * Create encrypted volumes from images * Add encryptor attach/detach in utils * XtremIO: fix the returned value of CG actions * Stop using mox in unit/api/contrib/ tests (2/2) * Change assertTrue(isinstance()) with optimal assert * Stop using mox in unit/\*.py tests (1/2) * Use assertIn instead of assertTrue(A in B) * Remove unused function from unit/fake\_utils.py * Imported Translations from Zanata * Clean import in code * Remove unused config.CONF * Add tempest tests for Consistency Groups * HNAS: Add support for manage/unmanage snapshots in NFS driver * Reduce the runtime of drivers/ibm/test\_storwize\_svc * Improve TestCase.mock\_object method * Fix the password print in cinder-volume.log issue * VMAX Driver - Live Migration for VMAX3 * Remove self.\_\_dict\_\_ for formatting strings * Imported Translations from Zanata * CoprHD: Add missing key in ScaleIO driver * Avoid patch loopingcall in VNX * Add validation for the metadata properties * Removed RPC topic config options * Clean imports in code * Replace key manager with Castellan * Cleanup api-ref * Update api-ref path * Nexenta: NexentaStor4 NFS driver cache capacity and use sessions * Stop using /etc/cinder/cinder.conf for cmd tests * Use utils.validate\_dictionary\_string\_length * Stop using mox from unit/image/\*.py * Fix some typos in comments * Separate create and update rules for volume metadata * Updated from global requirements * TrivialFix: remove unnecessary VERSION\_COMPATIBILITY * When only .pyc left, the extended API can not be used * Recover volume status after remove\_export * Pure - warn when deleting destroyed snapshot * Fix quota rollback on retype failure * Pin RPC server's serializer to min obj version * Driver documentation cleanup * Host selection when creating backup * TrivialFix: Remove cfg import unused * Fix password masking result * Let setup.py compile\_catalog process all language files * Dell SC: Add exclude\_domain\_ip option * Add support for force backup for Nimble Storage * Retype issue in Kaminario Cinder drivers * TrivialFix: Remove logging import unused * Stop using mox in unit/volume/drivers/emc/\*.py tests * Stop using mox in unit/consistencygroup/\*.py tests * Stop using mox in unit/scheduler/\*.py tests * Stop using mox in unit/zonemanager/\*.py tests * Stop using mox in unit/backup/drivers/\*.py * Fix incorrect parameter in create\_key * Use constraints for api-ref environment * Dell SC: Live Volume Autofailover * Add new supported driver checks * Tests: Fix racy volume unit test * [Trivial] Refactor the using of dict.get() in the test assertion * vzstorage: fix create/delete snapshots * Sending ScaleIO volume id in attach and detach volume * Cleanup upgrade-related FIXME's * Using oslo\_config's min attribute * Add replication failback in Kaminario K2 drivers * Added config option to enable SSL * Group specs: Fix exception name * Dell SC: Failback replications limited to 5 at a time * VMware: Add volume name in vCenter to conn info * NetApp: Report multiattach as enabled * Attachment lost after migrating in-use volume * Fix typo in the huawei\_driver.py * Fix invalid services caused by enabled\_backends * Updated from global requirements * Change NFS driver to not throw exception without nfs shares file * Datera driver 2.2 update * Reduce the runtime of drivers/dell/test\_dellsc * Add release note to warn about os-brick lock dir * Updated from global requirements * Use min attribute from oslo\_config * Imported Translations from Zanata * Fix the interface to call update\_server\_volume * Dell SC: Break\_replication unhandled NoneType error * Volume Manage/Unmanage Support for IBM FlashSystem * Imported Translations from Zanata * Change a typo error in the releasenote * Fix reported driver's versions * Use OVOs to set errors in manage volume/snapshot * Use original volume OVO instance in create flow * Fix mysql binary comparison * VMware: Allow images in ova container * Add CI\_WIKI\_NAME to driver's devref * Improve Hitachi HNAS volume drivers log messages * Updated from global requirements * EMC VMAX - iSCSI Multipath support * Fix typo in cinder/tests/unit/image/test\_glance.py * Clean imports in code * Imported Translations from Zanata * Enhance api\_microversion doc * Add CI\_WIKI\_NAME to Tegile volume driver * Add get\_manageable\_\* methods to Pure drivers * Use constraints for all tox environments * Imported Translations from Zanata * Remove the using of dict.get() in assertIsNone * Revise Synology DSM storage driver * Refactor the usage of save\_and\_reraise\_exception * Add volumes/summary API support * Imported Translations from Zanata * Ignore case when comparing wwns in Pure FC driver * NetApp: Add Consistency Group Support for NFS * Replication, hard-coding and dict.name issues in K2 * Update homepage with developer document page * Add CI wiki page to driver listing * Imported Translations from Zanata * Refactor cinder.utils.\_copy\_volume\_with\_path * HNAS: Deprecating XML config file * Add functional-py35 to tox * Remove code duplication in enums * Concurrency issue in K2 iSCSI and FC Cinder drivers * VMAX Driver - QoS support for the VMAX3 * Mask out passwords when tracing * Remove debug logger translations * Imported Translations from Zanata * Fix volume creation with no volume type * Dell SC: delete\_live\_volume payload incorrect * Dell SC: Requests async header is invalid * Imported Translations from Zanata * Use object instead of string when reset backup * Prevent doc generation failure on OVO decorators * CI: Add CI\_WIKI\_NAME to all drivers * Imported Translations from Zanata * Make the volume-manage doc be more general * Add missing dependency on keystoneauth1 * Fix tox pip-missing-reqs * Revert "Remove Tegile volume driver" * HPE XP add entry point debug tracing * Remove resource\_common\_manage load warning * Updated from global requirements * Huawei: Check before add initiator * Huawei: Modify array LUNs naming rules * Added update-host for CGs in cinder-manage * Imported Translations from Zanata * Remove unused context parameter * Imported Translations from Zanata * QoS support for the Coho Data Cinder driver * Fix backup NFS share mount with default backup\_mount\_options * Add tracing to remotefs entry points * NetApp cDOT: Add cheesecake replication support * Don't use config option sqlite\_db * Improve snapshot handling in K2 * Fix volume retype from SolidFire * EMC VMAX - Oversubscription support * Ceph doesn't save a container name for volume backup * Updated from global requirements * Fix volume upload-to-image for vhd disk-format * Imported Translations from Zanata * Remove context object in oslo.log method * 3PAR add entry point debug tracing * Fail manage operations if service is down * Support retype in K2 cinder driver * Remove comma from end of policy.json * Prevent adding same OVO version twice to history * Removed a dead db function register\_models() * Handle snapshot delete conflict in 3par driver * Imported Translations from Zanata * Specify key algorithm and size for create\_key * Add functional-py34 to tox * Fix size exceed of dd action when clearing a volume * HNAS drivers refactoring * Use volume o.vo in get\_backup\_volume\_temp\_snapshot * Imported Translations from Zanata * Fix ImportError of fake\_driver * Add proxy config option to google backup driver * Add API Reference for groups * Add stochastic weight handler to scheduler * Fix Scheduler manager masking retype fail details * fixed misspelling in synology\_common.py * Updated from global requirements * Don't puke when exiting driver list * corrected the help string in coho.driver * Fix ImportError of fake\_driver * Storwize:create\_volume\_from\_snapshot with different size * Imported Translations from Zanata * Fixing pending comments for CoprHD Drivers * Huawei: Fixed problems caused by password expiration * Add metadata length check to volume-manage * Added unit test coverage to test\_limits.py for api v2 * Dell SC: Use Live Volume for replication * Remove race condition from lvextend * Switch request\_spec in create volume calls to ovo * Support replication in K2 * Support manage/unmanage and extra-specs in K2 * Update xiv\_ds8k driver name and description * Imported Translations from Zanata * Set VERSION in VNX cinder driver * ScaleIO ignore rename of external volumes * Managed snapshot should return ceiling of size in Huawei driver * Size in tintri driver should be converted to integer * Improvement to query cg detail(Part 1) * Fix documentation and remove dead code * VMware: Fix retype when storage policy is enabled * Validate name in qos-spec * Replace functions 'Dict.get' and 'del' with 'Dict.pop' * Updated from global requirements * NetApp: Report hybrid aggregates in volume stats * Storwize: Quote vdisk names to handle spaces * Add group\_type\_id in create\_group\_snapshot * Fixes ZFSSANFS driver * DRBD: Disk-options for new resources * Fix mistakes introduced with QoSSpecs object * Add backup update function (microversion) * VMware: Fix upload to image with glance v2 * Remove Tegile volume driver * Quobyte volume driver should use DLM * Add functional tests for nested quotas * XtremIO: Implement update\_migrated\_volume * NetApp: Refactor unit tests * Add driver list to doc build * Move fake\_driver to tests root dir * Map volume/snapshot manage extensions to v3 * Replace OpenStack LLC with OpenStack Foundation * VMAX Driver - SnapVX licensing checks for VMAX3 * LVM driver: list manageable volumes and snapshots * Sheepdog:fix the bug of failed to clone image * Move drivers unit tests to unit.volume.drivers directory * Docs: Correct i18n information * Fix db purge for quality\_of\_service\_specs FK constraint * Fix log message when service is disabled * Add interface documentation to driver devref * Remove duplicate code in functional tests * Docs: cleanup doc8 errors * Docs: remove todo list generation * Update doc README and remove old Makefile * Imported Translations from Zanata * Handle API NotFound exceptions at WSGI level * Add worker's DB operations * Add workers table * Modify API to include cluster related operations * Update manage with cluster related commands * Improve cinder-manage arg parsing * Add cluster job distribution * Update Versioned Objects with Cluster object * Add cluster table and related methods * Update OVO instance on destroy method call * Fix wrong declaration in Scheduler's base driver * Refactor sqlalchemy service methods * Improve api\_version decorator to avoid noqa * Imported Translations from Zanata * Add functional tests for groups * Tests: mock notifier in CG tests * Remove duplicated code in manage\_existing\_snapshot * Initialise oslo.privsep early in main * Imported Translations from Zanata * Add lock decorator to SolidFire clone\_image method * Dell: Fix docstring cut/paste error * NetApp: Support new parameter to cDOT clone API * Auto sync manager's RPC\_API\_VERSION * Fix CinderPersistentObject.refresh * Replace locks in remotefs and glusterfs backend drivers * Block Device Driver: report pools in volume stats * Add missing test case * Updated from global requirements * Invalid volume state when manage operation fails * Add group snapshots - APIs * Dell SC: Add secondary DSM support * Fix some typos * Remove generate\_driver\_list dependency on cur dir * Huawei: Support reporting disk type of pool * Fix 500 for 'qos-create' key or value > 255 characters * Add group snapshots - manager * Add group snapshots - db and objects * XtremIO: support of snapshot manage commands * Fix the group type filter * Fix policy check for group types * Differentiate thick and thin provisioning * Remove force option from create\_snapshots\_in\_db() * Set sleep time to 0 in Datera driver * Add generic volume groups * Huawei: Support backup snapshot * Fix assertEqual usage in test\_volume\_manage * Fix chunked backup driver interface name * Removed unused code from cinder.utils * 3par driver handles concurrent host create conflict * Fix typo in delete snapshot in Synology driver * Removed unused code from cinder.utils 9.0.0.0b2 --------- * Fixes consistency snapshot creation * [doc] Fix a non-alphabetical order bug * Trivial: Use fake.VOLUME\_ID instead of id * Violin Memory iSCSI storage for 7000 series AFA * update min tox version to 2.0 * Updated from global requirements * Switch \_create\_temp\_cloned\_volume in driver to VO * NetApp: Add aggregate capacity info to scheduler * Add strict Boolean checking for storage pools * Add Synology DSM storage driver * Use from\_environ to load context * Use context.from\_dict to determine available arguments * Add ZTE Block Storage Driver * iSCSI Target: remove confusing comment/log * Storwize: Fix exception ProcessExecutionError * Changing method \_from\_db\_object to classmethod * Allow admin project to operate on all quotas * Use default provisioning type from config * Delete \*.pyc files before running tox tests * Implementation for CoprHD Cinder Drivers * Image virtual size doesn't fit to volume size * Don't apply config fixture to auth\_token.CONF * Check flashcopy mapping before deleting volume * Send scaleio id to os-brick * Nexenta: Added HA support and enhance get\_volume\_stats() * Fix failure with "None" volume type in Pure drivers * Add backend driver for Zadara Storage VPSA * Add group type and group specs * Don't use shred for volume clearing * Add cinder backend driver for Huawei FusionStorage * Do not conjecture volume-id from iscsi\_name * VNX: New Cinder driver in Newton * Return vaild reason when unknown error occur * Moving the OPTIONAL\_FIELDS inside the OVO object * Support microversions on inherited Controllers * Replace use of mox with mock in test\_base\_filter * Replace 10 \*\* 9 with a constant units.G * Reduce logging level of API validation messages * Fix manage existing volume fails after object merged * Add strict Boolean checking for volume manage * Adds unit test coverage for consistencygroups.py * FalconStor: New Cinder driver in Newton * Move QualityOfServiceSpecs to use VersionedObject * Snapshot object has no attribute about size * Add return parameter info for manage\_existing\_get\_size * cinder-manage db purge has issues with foreign keys * Huawei: Use versionedObject * Resolve unittest hang issue * Fix some typos * Add logging when filtering returns nothing * Validate extra\_specs type * Just a few spelling modification. "occurred" * Remove white space between print and () * Log traceback for initialize connection failure * Remove unused LOG * Use assertEqual/Less/Greater/In/IsNone * Updated from global requirements * Add \_\_ne\_\_ built-in function * Fix consisgroup V3 interfaces * Add proper scheduler\_hint key to dev-ref * check the validity of metadata when update volume * Remove unused \_\_init\_\_ * EQLX: Extend of snapshot or clone failure * Log terminate\_connection exception with traceback * Replace use of mox with mock in test\_api\_urlmap * Add README in release notes directory * Fix broken link in HACKING.rst * Replace int with float to bring math.ceil into effect * EMC ScaleIO should return ceiling of volume size * Deprecate defining c-vol backends in DEFAULT * Dell SC: Do not set High Availability in async mode * Replace use of mox with mock in test\_ibm\_xiv\_ds8k * Remove translation from config options * Correct reraising of exception * EMC VMAX - locking SG for concurrent threads * Use elevated context to query DB in VolumeNumberWeigher * Huawei: Support huawei consistency group * Make divisible py3 compatible in nfs driver * Updated unit tests to use official unittest methods * Fix raise UnableToFailover call in SolidFire * Provide Kaminario K2 all-flash array FC driver * RBD: uppercase rbd\_opts for consistency * Add strict Boolean checking for volume create * Replace use of mox with mock in test\_brick\_lvm * Replace use of mox with mock in test\_lvm\_driver * Remove six.iter\* * Updated from global requirements * Add validation for container name * VMware: Fix create vol from streamOptimized image * Use True instead of 1 in while loop * Remove mock.patch.stopall call in test\_smbfs * XtremIO: fix test suite * Add strict Boolean checking for quota show * Correct unavailabe to unavailable * Huawei: Support iSCSI configuration in replication feature * Dell SC: Specify High Availability for create replication * Increment Pure Storage Driver Versions * Refactoring Kaminario iSCSI driver * Don't reuse pep8 env in compliance tox runs * Trivial Fix * NetApp: Finish SSC refactor by removing dead code * Implement unit test for HPE Lefthand driver * Added coverage report to tox.ini * Add strict Boolean checking for qos delete * Add validation for type extra\_specs * Replace mock with self.override\_config * fix cinder ceph backup driver padding error * Replace 1024\*1024\*1024 with units.Gi to express more accurately * Unexpected function is called in \_migrate\_volume\_generic * Violin: resize volume if cloned lun is larger * Make dict.keys() PY3 compatible * Move \_validate\_extra\_specs to cinder/utils * VMware: Refactor vmdk unit tests * Updated from global requirements * Move \_check\_metadata\_properties to cinder/utils * Remove deprecaterd mapping in manager.py * Capability lists in Cinder scheduler * Updated from global requirements * Nexenta: Add NBD driver for NexentaEdge * Add missing coordination=True on service start * tempest: Add Unicode volume name test * Adding Scaling QoS for ScaleIO driver * Updated from global requirements * Implement match-set type 3PAR FC VLUN creation * Fix pylint error in K2 iSCSI driver * Fix lock files littering working dir during tests * Fix TSM backup driver test mocks * Improve DB volume check in LVM manage volume * VMware: Honor vmware\_cluster\_name during retype * Revert "Revert "use utf8 as default charset for all tables in mysql db"" * Fix snapshot stuck in 'deleting' after reboot c-v * Update initialize\_connection to use versionedobjects * Updated from global requirements * NetApp: Replace SSC for cDOT block & file drivers * Revert "use utf8 as default charset for all tables in mysql db" * huawei-iscsi-multipath-support * use utf8 as default charset for all tables in mysql db * Implemented unit test cases for HPE LeftHand driver * add FIXME tag for extra argument 'topic' in scheduler rpc api * update coordination.Lock.release's docstring * Fix permissions error when configuring nfs backend * Use cinders test class in cloudbyte unit tests * Correct DB modifications from compaction * Add unit tests in cinder for HPE Lefthand driver * Provide Kaminario K2 all-flash array iSCSI driver * VMware:Persist storage policy ID in vmdk meta-data * Allow setting CG name or description to empty value * Support None value of extra\_specs in cinder-scheduler filter * Fix db purge for volume\_types FK constraint * ABC Metaclass for IBM FlashSystem * IBM FlashSystem: Add host check for iSCSI driver * vzstorage: add ploop volume format support * Use versionedobjects in remotefs.py * Move test from test\_glusterfs and test\_quobyte to test\_remotefs * Netmask needs corersion to string * image\_size\_m should get ceiling of image * Add unit tests in cinder component for 3PAR driver * Handle quota exceed exception * Replace locks in volume manager * Updated from global requirements * Start/Stop coordinator with Volume service * Tests: Remove notification\_driver config * Tests: Use mock for notifier tests * 3PAR: Fix delete volume when online clone * Add driver interface checks * Add test type clarification to devref * Backup and restore broken in zfssaiscsi driver * Windows SMBFS: fix VHD/x resize * Remove duplicated fake drivers * Cleanup volume\_type\_projects * Implement OVO class init mechanism * Add debug level logs on OVO backports * Stop assuming persistent OVOs are also DictCompat * Move OVO methods from CinderObject to Persistent * Remove remotable from OVOs * Use manifest to backport OVOs during upgrades * Nimble: Don't query all vols from DB in do\_setup() * Fix wrong usage of exception's attribute message * Add parameter info for manageable snapshots * Adds ACL, IP Pool, Multipath to Datera Driver * Dell SC: Retry unhandled exception REST Gets * Fixed \_create\_3par\_iscsi\_host(), added unit test for 3PAR * Set QoS on cloned volumes * VMware: Deprecate vCenter version 5.1 * Update manage\_existing to use volume object * VMware: Fix constants in unit tests * Updated from global requirements * EMC driver should return ceiling of volume size * Use oslo\_middleware sizelimit * migrate to os-api-ref * volume upload-to-image fails for iso disk-format * Huawei driver should return ceiling of volume size * Volume manage/unmanage support for Eqlx driver * NetApp: Log unexpected errors from backend * Add Auth Version 3 support in Swift Backup Driver * Add error messages to conditional updates devref * Add docstrings to CG and CG Snapshot filters * Virtual size should get ceiling of image * DRBD: Allow to set options for new resources * SolidFire driver should return ceiling of volume size * Clean up man page and remove version/date * 3PAR: Fix terminate\_connection when failed over * VMware: Remove unused methods * Fix MySQL DB error in Delete CG * Nimble driver should return ceiling of volume size * Updated from global requirements * Remove locks from Pure volume drivers * Give more fine grained access to DriverInitiatorData * Remove IN\_USE from ConsistencyGroupStatus enum * Compact Kilo database migrations * Bump to Nova v2.1 * Switch to using correct TaskFlow Failure class * Optimize service find in migration * NetApp NFS Cmode: Fix NotFound exception * Add check to limit maximum value of age\_in\_days * GPFS: Fix consistency group ERROR\_CREATE status * Remove the duplicated definition of \_add\_to\_threadpool * Imported Translations from Zanata * Coho data sanity fix in the driver unittest 9.0.0.0b1 --------- * Dell SC: Added support for failover\_host failback * Remove API races from consistency groups * Minimize cost of DB exists decorator checks * Pass volume\_type\_id in when deleting snapshot * Add unit test case in Nimble Driver * Fix attachment\_get\_by\_\* to return entire list * Remove API races from migrate and retype * SwiftBackupDriver: Dont add project\_id to authurl * Updated from global requirements * Catch GlanceMetadataNotFound in creating cgsnp * Updated from global requirements * List manageable volumes and snapshots * Updated from global requirements * Add pagination support to messages * gpfs: Add changed options for 'find' command in rootwrap filters * Huawei replication should use auto recovery policy * Dell SC: Switched REST calls to async where available * Move all backups related unit tests to backup directory * Move consistency groups tests to consistencygroup directory * Volume manage should parse volume size as float * Return BadRequest for invalid 4byte unicode character * NetApp: Add iterator handling to client library * Make IBM drivers return snapshot object for cg ops * Coho Data: Reconnect to cluster on broken pipe * Remove mox usage from test\_nexenta * Manage/unmanage snapshot in ScaleIO driver * 'display-name' is not used in snapshot\_manage * LeftHand Fix terminate\_connection when failed over * Storwize: terminate\_connetion NPIV perf improvement * Revert "Add hacking check to prevent assert\_called\_once" * Fix 3PAR cg display name issues * DRBD: ensure\_export() might not have 'provider\_location' * Fix image-volume cache to use volume object when evicting * Fix retype from non-repl to repl type in Pure drivers * Remove test\_conf.py * Dell SC: Changed rename volume REST API call * Add hacking check to prevent assert\_called\_once * Updated from global requirements * Huawei: Raise if no FC port found * VMware: Increase default task poll interval * VMware: Update volume config during unmanage * Drop use of invalid assert\_called\_once call * Fix AttributeError when obtaining 'name' attribute from 'snap' * Handle SSL termination proxies for version list * Call \_init\_volume\_driver in backup manager * Updated from global requirements * model\_update for temp volume or snapshot in backup * Add debug messages and comments for ceph backup * Avoid ascii code error for request body strings * Fail on disabled\_reasons with more than 255 chars * Windows iSCSI: properly handle config option * Remove unused logging import and LOG global var * Fix Brcd zone driver initiator zone update * Add SnapshotStatus enum field * Tests: Fix mock imports * NetApp: Remove type checks from manage workflow * ScaleIO actual volume size model update * Capacity weigher: Pass correct arg to super() * IBM Flashsystem: Add missing items to common * Prevent Multitable conditional updates * Remove support for 1.x volume RPC API * Remove inappropriate Copyright * RBD: include cluster name in connection info * Tests: Don't attempt connection for Coho driver init * 3PAR Clean up VLUN deletion on detach * Remove object to dictionary translation logic * Refactored the update method in api.py for vol obj * Tests: Use unique volume IDs in backup tests * Move LVM tests to test\_lvm\_driver.py * Move CG unit tests to test\_cg.py * Fix an error in CG object * Fix backup using temp snapshot code path * IBM FlashSystem: Cleanup host resource leaking * Updated from global requirements * Add sample config file to cinder docs * Make api\_microversion\_dev more explicit * Use check\_string\_length from oslo\_utils * Fix up RPC cleanup process * Add unit tests in cinder component for HPE drivers * Whitelist 'rm' in api-ref tox environment * gpfs: remove a race in deleting volumes * Fix taskflow parameters for manage\_existing * Correct misspelt words in msg in cinder * Implement CHAP Authentication for E-Series Driver * WADL to RST migration in cinder tree * Properly mock out image\_utils operations for tests * Dell Eqlx: Volume from snapshot now honors resize * Use example.com in unit tests * Extend remove\_version\_from\_href support * Fix return value of \_clone\_image\_volume * image: don't use is\_public with glance V2 API * Change provider\_id to StringField * Add missing SnapshotUnavailable to cinder/exception.py * NetApp: Rewrite user API discovery logic * Remove unnecessary v3 VolumeController.\_\_init\_\_ * Fix v3/volumes.py VolumeController super(...) parameters * DB: Optimize volume existence check * Reorder DB API require\_context decorators * Remove API races on extend and volume\_upload\_image * Manage existing volume with invalid host * Remove CG db access in Storwize driver * Specify a user-agent in Pure volume drivers * Dell SC: find\_volume failure in manage and retype * 3PAR driver failure SR License missing in array * Remove unnecessary created\_at from ORM Message * Fix uncaught NotFound exceptions * Remove support for 1.x Backup RPC API * Provide consistency to "Openstack-Api-Version" header * Change OVOs project\_id and user\_id to StringField * Use to\_utf8() instead of safe\_encode() in convert\_str() * Replace manually changing CONF options * remove extra quota commit of manage snapshot * Add replication v2.1 test cases * Move initiator\_data access to helper methods * Add ability to filter by volume\_glance\_metadata * Add Cheesecake APIs to policy.json file * Add upload\_image API role-based access policy * Add snapshot\_metadata operations to policy.json * Mark the v1 API as deprecated * Fix Lun ID 0 in HPE 3PAR driver * Added 'volume:get\_transfer' to policy.json file * NetApp: Decouple capacity volume stats collection * NetApp: Managing cDOT LUN by UUID fails * VMware: Use \_in\_use() to check for in-use volumes * Add devref for conditional updates * Fix gate-cinder-tox-db-functional job * Add ordering possibilities to conditional update * Add unmanage volume for RBD driver * Updated from global requirements * VMware: Support for paraVirtual image adapter type * Pass default executor to os-brick * NetApp: E-Series driver using invalid host-types * NetApp: Use Local Cache to Copy an Available Image * Add provider\_location to cloned volume * Failback will enable the host that has been frozen * Move and rename tempest\_tests to cinder/tests/tempest * Tests: lower case all fake uuid constants * Tests: Fail if oslo.versionedobjects issues Invalid UUID warnings * Removed extra line of code * Fix uuid warnings in various api contrib unit tests * Fix snapshot test uuid warnings in api.contrib * Fix volume test uuid warnings in api.contrib * Fix vol type/spec uuid warnings in api.contrib * Fix qos\_manage uuid warnings in api.contrib tests * Fix CG uuid warnings in api.contrib unit tests * Fix admin\_actions uuid warnings in api.contrib * Fix uuid warnings from api.contrib backup tests * Fix uuid warnings from test\_db\_api unit tests * User messages API for error cases * Imported Translations from Zanata * Imported Translations from Zanata * Assisted snapshot: use Keystone Session with Nova Client * Imported Translations from Zanata * fix invalid uuid in emc extremio unit tests * fix invalid uuid warnings from test\_cmd * Fix invalid uuids in tintri unit tests * Fix uuid warnings from db unit tests * Fix invalid uuids in EMC scaleio unit tests * Tests: Fix invalid UUIDs in EMC VNX tests * ibm flashsystem: Fixing hardcoded attribute * Fix 20 typos on devref * VMware: Remove TODO for port config option * Remove XML API * Microversion of Bootable filter in cinder list * init\_host offload to default to true * Imported Translations from Zanata * Updated from global requirements * 3PAR: Remove metadata that tracks the instance id * Updated from global requirements * Handle TFlow short comings in SF driver * Add failed\_over detection to SolidFire driver * Implement replicate\_volume & failover on SolidFire * Deprecate nas\_ip and change this to nas\_host * Imported Translations from Zanata * Implement Cluster Pairing for SolidFire Driver * Fix minor typos in the migration devref * Cleanup volume\_attachment in case of reset-state attach\_status * Updated from global requirements * Use messaging notifications transport instead of default * Fix doc and source documentation errors and warning * RBD: delete snapshots if missing in the backend * Coding style issue * Remove support for 1.x scheduler RPC API * Launchpad Answers no longer used * Fix RPC and Object version pinning on API * Fix service version pinning on start * Fix doc build if git is absent * Updated from global requirements * Tests: Define fake\_constants as constants * Doc: Remove incorrect run\_tests.sh documentation * fix invalid uuid warnings from test\_quota * Fix fake uuids in NetApp ESeries unit tests * Fix invalid uuids in sheepdog unit tests * Use utils.convert\_str to convert HTTP header values * Remove deprecated ISERTgtAdm * Set backup import volume id to real uuid * fix ConsistencyGroup volume\_type\_id field * Updated from global requirements * Storwize: Multiple management IP not raising exceptions * Fix ExtraSpecsOpsTestCase sub unit tests * check per\_volume\_gigabytes quota for cinder extend * Imported Translations from Zanata * Updated from global requirements * Make cinder-all binary deprecated * Add in-tree tempest tests * Update SolidFire driver to store cluster info * VMware: Reduce volume creation time * Fix typos in Cinder files * Fix driver.failover\_host call in manager.py * Remove "patch mock to raise for invalid assert calls" * Add test for checking object compatibilities * Update the Administrator guide links * Update \_usage\_from\_backup() method with versionedobjects * Dell SC: Updated to utilize provider\_id * Updated from global requirements * Sheepdog:make full use of all sheepdog nodes * Enable str2size() to handle long int for i386 * Imported Translations from Zanata * Hacking: Ignore tools/ for C303 * Pylint: refactor ignored message lists * Brocade FCZM: Fix LOGOUT\_PAGE reference * Fix volume retype failure with on-demand policy * Define context.roles with base class * Fix invalid error message of volume create * Stop using tpool with oslo\_db * Huawei: Do not do split if replication pair abnormal * Storwize: fix unmapped multi-attached volume failure * Cleanup DB schema after Mitaka * Fix uuid warnings from api.v2 unit tests * Dell SC: Checking volume size in create\_volume\_from\_snapshot * Fix uuid warnings from api.v1 unit tests * Fix api/v1 unit test inconsistencies * Add fake\_cgsnapshot for unittest use * Fix typos in Cinder files * http header value must be a string * python34 tests don't run without python3-dev * Re-enable image to volume with failure test * Dell Eqlx: Cloning volume does not honor new size * Storwize: fix deadlock and major lock granularity issue * Fix for NexentaEdge jsonrpc url formatting * Support oversubscription for IBM Storwize/SVC 8.0.0 ----- * Huawei: Fix getting admin\_metadata * Imported Translations from Zanata * NexentaStor4 iSCSI: convert blocksize to str * Conditionally restore display\_name * NexentaStor drivers: resize volume if cloned image is larger * rtstool surrounds IPv6 addresses by brackets * Imported Translations from Zanata * NexentaStor: Remove extra format string specifier * Fix typos in Cinder files * Imported Translations from Zanata * XtremIO handle errors in terminate\_connection: * Imported Translations from Zanata * Check hasattr before calling get in SolidFire * Huawei: Fix getting admin\_metadata * Remove unused utils.find\_config function * Sheepdog:optimization of connection error handling * DRBD: extend volume if larger than snapshot * Imported Translations from Zanata * Fix invalid uuids in rbd unit tests * Dont query image metadata for empty volume list * Remove entry\_points from setup.cfg * Don't suppress the path part of glance url * Fix meaningless quota exceeded exception info * Imported Translations from Zanata * Don't disallow quota deletion if allocated < 0 * Added more options while uploading volume as image * VMware: Support for non-default port * Remove PyMySQL and psycopg2 from test-requirements.txt * Imported Translations from Zanata * Provide user friendly message for FK failure * Fix misspelled word in help text * Datera: resize volume if cloned image is larger * Fix Brcd lookup service to use defined southbound protocol * Docs: Add genconfig to devref toc * Updated doc string * Imported Translations from Zanata * Report 2.0 as supported version of RPC APIs * Fix volume RPC API methods related to backups * Report 2.0 as supported version of RPC APIs * NexentaStor5 iscsi: resize volume if cloned image is larger * Imported Translations from Zanata * EMC VMAX - Operations and timeout issues * Imported Translations from Zanata * Imported Translations from Zanata * Fix the format problem of LOG in failover\_host * Dell SC: Error results moved to json * Dell SC: create\_cgsnapshot returning wrong structure * Re-add policy check for get\_snapshot * Enable functional test for volume creation * Remove bandit.yaml in favor of defaults * NexentaEdge: resize volume if cloned image is larger * Storwize: Set multiattach property per pool * Fix volume RPC API methods related to backups * Imported Translations from Zanata * Imported Translations from Zanata * Fix race condition when toggling SP * Dell SC: create\_cgsnapshot returning wrong structure * Hitachi drivers: resize volume if cloned image is larger * Huawei: Record and check LUN wwn * Mark deprecated options correctly * SMBFS: fix parsing volume type extra specs and metadata * Update reno for stable/mitaka * VMware: Bump driver version * VMware: Bump driver version * Imported Translations from Zanata * VMware: Refactor unit tests * Update .gitreview for stable/mitaka * Huawei: Record and check LUN wwn 8.0.0.0rc1 ---------- * Reserve 5 migrations for Mitaka backports * Add update\_host for backup in cinder-manager * Fix formatting in vol/snap delete API messages * Cleaned duplicate dictionary values * Fix compatibility mode of backup jobs scheduling * Add devref on rolling upgrades * 3PAR fix create\_cloned\_volume for larger size * VNX: Enhance migration start verification * Check volume\_id consistent when creating backup * Fix race condition when toggling SP * Huawei: Check when attach hypermetro volume * Huawei: Check the QoS status before we use * LeftHand: Add default SSH timeout and key values * Pass correct source\_id to \_handle\_bootable\_volume\_glance\_meta() * Alter wrong comment about param filters * Use get\_by\_args instead of get\_by\_host\_and\_topic * Fix format in cinder/volume/drivers/netapp/dataontap/block\_base.py * XIO: refactor \_send\_cmd to reduce the redundant * Revert "VNX: Set timeout for naviseccli" * NetApp E-Series: Volumes not added to consisgroup * Fix volume migration VolumeType exception * Fix retype failure when original has no volume type * Add backup RPC API v2.0 * IBM XIV/DS8K: Implement Replication v2.1 * Huawei: Implement v2.1 replication * Remove circular import to fix config generation * Wrong comment line in quotas.py * XtremIO handle errors in terminate\_connection: * Imported Translations from Zanata * VNX: Set timeout for naviseccli * VNX: Allow set migrate rate when migrating volumes * Fix ScaleIO driver does not honor clone size * Fix py34 error of indexing 'dict\_keys' object * Imported Translations from Zanata * Fix error message when running genconfig * Huawei: Creating hypermetro failed in the remote pool * Delete deprecated configuration in NFS tests * Imported Translations from Zanata * Emit notifications for volume retype * Fixup stats key for replication in init\_host\_with\_pc * EMC VMAX - SSl connection is not picking up values * Add the key 'replication' and set the correct 'replication\_status' * register the config generator default hook with the right name * Fix for Pure drivers not checking full client version * Fixup for Pure drivers cheesecake replication setup * Remove empty directories and unused files from unit tests * Cleanup 3PAR/LeftHand failover\_host exceptions * Use googleapiclient import instead of apiclient * Enable api.view.test\_versions unit tests * Allow clone volume with different size * DRBD driver: resize volume if cloned image is larger * Fix 500 error if 'offset' is out of range * Add volume RPC API v2.0 * Imported Translations from Zanata * typo: add a missing '}' in comment * Fix up failover\_host exceptions to preserve states * Add release note for delete volume with snaps * RBD: remove duplicate clone test * Disallow quota deletes if default under usage * EMC VMAX - SnapVX and other snapshot improvements * Move replication\_status update to init\_with\_rpc * Lazy load a project quotas's default values * Permit volume type operations for policy authorized users * SMBFS: fix parsing volume type extra specs and metadata * Host selection in backup service * Add volume\_type to volume object expected\_attrs * VNX: Update replication for v2.1 * Imported Translations from Zanata * Pass new volume size when cloning (blockbridge) * Storwize: Update replication to v2.1 * LeftHand: Create cloned volume didn't honor size * Switch failover-host from rpc call to cast * Dell SC: Active\_backend\_id wrong type * Show qos\_specs\_id based on policy * Remove remaining oslo-incubator code from Cinder * Updated from global requirements * Pass RBD order to clone call * Remove "sqlite\_clean\_db" option * Exclude test.py from sample conf * Fix invalid UUID warnings for test\_volume\_\* * Huawei: Check before delete host * rbd: Change capacity calculation from integer to float * Fix failure with rbd on slow ceph clusters * Remove those unnecessary statements "return True" * Imported Translations from Zanata * Run py34 tests with plain 'tox' command * Report versions in cinder-manager service list * Dell SC: create\_cloned\_volume didn't honor size * Cleanup Mitaka release notes * Dell SC: Incorrect values in REST API Login call * Moved CORS middleware configuration into oslo-config-generator * Add get\_volume\_stats for LoggingVolumeDriver * Correcting thin provisioning behavior * SMBFS: Fix initialize connection issues caused by in-use images * NetApp: volume resize using clone fails with QoS * VMware: Unit test refactoring * Fixes creating volume issue for multiple management IPs * Imported Translations from Zanata * Add volumes table definition when migrating to 67 * Trim 5s+ from storwize unit tests * Allow api\_version\_request.matches to accept a string * microversion header for legacy endpoints removed * Update quotas to handle domain acting as project * Continue volume delete on encryption key delete errors * Fix backup import * Unset executable bit in release note * DRBD: Policy-based waiting for completion * Block subtractive operations in DB migrations * Handle exceptions about snapshot in backup create * Replace logging with oslo\_log * Support https keystone CA checking in volume quotas * Imported Translations from Zanata * Fixup release notes for v2 -> v2.1 replication impls * support new HTTP microversion header * Fix for glance\_metadata during volume migration * Readd iscsi\_target table * Imported Translations from Zanata * Fix issue with Pure drivers delete\_snapshot exception handling * Add backend id to Pure Volume Driver trace logs * Don't fail on clearing 3PAR object volume key * Fix invalid uuid warnings in backup unit tests * Update quota\_utils with import for keystone\_auth * Fix invalid uuid warnings in test\_volume.py * Tintri image direct clone * Use get\_by\_args instead of host\_and\_topic * Remove a vol in error state from a CG * Fix call to Barbican Secrets create() * 3PAR use same LUN id for each export path * Fix oslo.service config generation * Update unittest for Storwize pool-aware-cinder-scheduler * Huawei: Check the real size before extend volume * Revert "Remove Cisco FC Zone Manager Driver" * Make query to quota usage table order preserved * Allow for Pure drivers to verify HTTPS requests * Fix volume filtering for quoted display name 8.0.0.0b3 --------- * Use openstack.org URLs in README * Add attach/detach doc to index * Don't run test\_volume.VolumeTestCase twice * Fixes running error for storwize \_run\_ssh * Dell SC: Support Replication V2.1 * Use OSprofiler options consolidated in lib itself * Fix test\_create\_volume\_flow test issue * Fix test isolation issues related to versions * Add missing requirements * Convert huawei ISCSIDriver unit tests to RFC5737 addrs * Changes in ScaleIO configurations options * Storwize/SVC: Clone between different size volumes * Huawei: Manage volume fails due to lower array version * Fix exception during service update * Huawei: Create snapshot have a log error * EMC VMAX - Limit SG and MV to 64 characters * Fix spelling mistake in docstring * EMC VMAX - Recreating SG when it has been deleted * VMAX-Replacing deprecated API EMCGetTargetEndpoints * 3PAR: Update replication to v2.1 * LeftHand: Update replication to v2.1 * Update Pure replication to cheesecake * Fixed logging for oslo versioned objects * Move replication volume manager warnings to info * Trim 50s from huawei ISCSIDriver unit tests * Copy unit tests for StandardLogging fixture from Nova * Add 'conf' param for TextGuruMeditation autorun setup * Use is\_int\_like method from oslo\_utils * Fix sshpool.remove code * Remove an useless and wrong call * Copy StandardLogging fixture from Nova * Add ability to failback for replication V2.1 * Storwize SVC multiple management IPs * Updating Datera DataFabric Driver to v2 of Datera DataFabric API * Capture warnings into logs * Return BadRequest for invalid unicode names (continued) * IBM Storwize with pool-aware-cinder-scheduler * Fix HTTP sessions left open in Brocade zone driver * Fix invalid uuid warnings in block device unit tests * Fix invalid uuid warnings in scheduler unit tests * Add necessary fields to volume creation * Add scheduler RPC API v2.0 * Clean up replication v2.1 (Cheesecake) RPC API * always use pip constraints * Remove unused columns from Service ORM model * CONF add suppress\_requests\_ssl\_warnings * Make nullable of fields in db model and object match * Remove unused pngmath sphinx extension * Delete volumes with snapshots * Clarify glance\_api\_insecure help text * NetApp: Fix SSH Client File Creation in Unit Test * Trim 12s from disco unit tests * Remove QoS settings from SolidFire attributes * EMC VMAX - get iscsi ip from port in existing MV * Misprint in policy.json * Re-enable -1 child limits for nested quotas * Support IP lookup of target portal DNS name * Updated from global requirements * Huawei: delete\_snapshot need not return any value * Match the ip more accurately in Huawei driver * Huawei: Consider bandwidth when selecting port * Add description when rename LUN in Huawei driver * Huawei: Code cleanup * Modify the number of params of hypermetro in HuaweiDriver * EMC VMAX - Changing PercentSynced to CopyState in isSynched * Add basic workflow of attach/detach to devref * Change frozen error messages from \_LE to \_ * Replication v2.1 (Cheesecake) * Change Fail to Failed in error messages * NetApp: Add Consistency Group support for E-Series * Return BadRequest for invalid Unicode names * Check for service existance in capabilities API * Enable all unit tests on Python 3.4 * Adding general notes about rolling upgrades * Move deprecation release notes to correct section * Tests: Don't assert on LOG.warn * Return all target\_wwpns for FC storwize * Fix error message in cinder type-update * Fix NoneType error in service\_get\_all * Add os-brick rootwrap filter for privsep * LeftHand: Updating minimum client version * NetApp: Implement CGs for ONTAP Drivers * Fix HNAS iSCSI driver attachment * NetApp: Support iSCSI CHAP Uni-directional Auth * Avoid hardcoding value from oslo library * cinder-api-microversions code * Port netapp dataontap driver to Python 3 * Scalable backup service - Liberty compatibility * Huawei: Log the IP when login fails * Huawei: Fix create volume with prefetch value error * Huawei: Don't fail when port group does not exist * Force target\_lun to be int type to make os-brick happy * Changed Nexenta default chunksize from 16k to 32k * Volume manage/unmanage support to ZFSSA drivers * Removed unreachable code * Disable multi-attach for RBD * Correct iscsi lun type * Fix invalid uuid warnings in object unit tests * Imported Translations from Zanata * Fixed help message for AllocatedCapacityWeigher * EMC VMAX - not cleaning up HW Resource WWPN initiators * Huawei: Fixed url range * Updated from global requirements * Test: use assert\_has\_calls() instead * Move anyjson to test-requirements * Huawei: Judgement failure when creating hypermetro * Remove useless get\_replication\_updates driver call * [LVM] Restore target config during ensure\_export * 3PAR get host by WWN now handles mixed cases * NetApp: E-Series remove snapshot limitations * Fix race condition in RemoteFS create\_snapshot\_online * Add SIGHUP handlers to reset RPC version pins * Handle OverQuota exception during volume transfer * Remove extra get\_active\_zoneset query from Brocade CLI * Tintri image cache cleanup * Huawei: Ensure the port is online * Rtstool sets wrong exception message on save * Test middleware test\_faults to Python 3 * Split out NestedQuotas into a separate driver * Revert changes to use ostestr * doc: Fix wrong description about adding RESTful API * Propoerly call oslo's Service stop and wait * Remove unused kill method from Service * Wait for periodic tasks to stop on exit * Dell: Failed vol create could leave dead volumes * Roll back reservations quota in RPC if necessary * Scaling backup service * EMC ScaleIO - fix bug in extend volume * VMware: manage\_existing for VMDK driver * VMware: manage\_existing\_get\_size for VMDK driver * Delete unuseful code in Huawei driver * Cleanup unused conf variables * Port infortrend driver to Python 3 * Disable Rally backup-related scenarios * Fix last Python 3 issues in zonemanager * Port hgst driver to Python 3 * Port API v1 and v2 to Python 3 * Port API contribs to Python 3 * Port test\_emc\_vnx to Python 3 * Fix service-list filter * Coho volume stats update * Sheepdog: Fix malformed image url format * Update quota when volume type renames * Add restore\_volume\_id in backup * Updated from global requirements * Support for consistency groups in ScaleIO driver * Use of metadata id instead of metadata name * Fix 'asert' typo in unit test * Fix dynamic import of CONF.volume\_api\_class * Filter & goodness functions in NetApp drivers * Manage/unmanage volume in ScaleIO driver * HNAS driver: Fix SSH and cluster\_admin\_ip0 bug * Remove duplicated code in volume manager and base driver * Don't use Mock.called\_once\_with that does not exist * EMC VMAX - necessary updates for CG changes * RBD: use versioned objects * Tests: Fix calls to non-existent assert methods * Updated from global requirements * Profiler: make it possible to run without loading osprofiler * Profiler: don't call trace\_cls if profiler is not enabled * Tests: Set volume listen port to test\_service\_listen\_port * Remove Cisco FC Zone Manager Driver * Add variable QoS to NetApp cDOT drivers * Move integrated tests to 'functional' directory * py3: Fix usage of JSON in API contrib * Port utils.safe\_minidom\_parse\_string() to Python 3 * Include allocated quota value in the quota reserve * Log stack trace for middleware faults * hacking: Fix false positive in C302 check * Port backup drivers to Python 3 * Storwize: Implement v2 replication * Volume create fails with FakeISCSIDriver * Adds support for configuring zoning in a virtual fabric * ScaleIO QoS Support * Zfssaiscsi driver should not use 'default' initiator group * Update db in CGSnapshot create * 3PAR: Create consistency group from source CG * Remove old client version checks from 3PAR driver * Improve logging to debug invalid "extra\_specs" entries * Huawei: Implement v2 replication (managed) * DRBD: Fix arguments for resize\_volume DBus API call * Port objects unit tests to Python 3 * Updated from global requirements * VNX: Add 'None' check in parsing provider location * NexentaStor5 iSCSI driver unit tests * LeftHand: Implement un/manage snapshot support * Updated from global requirements * EMC VMAX - Method not being called for V3 * Allow for eradicating Pure volumes on Cinder delete * HNAS driver: retry on Connection reset fix * Fix issue in hacking with underscore imports * DRBD: Compat for current DRBDmanage versions * Fix variable scope issue in try-except * Imported Translations from Zanata * Bootable filter for listening volumes from CLI * Clean uploading volume when c-vol restarts * mock time.sleep in Broadcom unit test * Don't create cgsnapshot if cg is empty * Added osprofiler headers to cors middleware * Imported Translations from Zanata * Fix the replication spelling in message * 3PAR fix driver to work with image cache * Updated from global requirements * Improve logging for volume detach * Remove useless unit tests mock in Huawei driver * Return updated volume type after updating * Remove 'external=True' in Huawei driver * Fix XtremIO multi cluster support * EMC VMAX - Fix for last volume in VMAX3 storage group * Filtering type extra-spec support to ZFSSA drivers * Zfssaiscsi driver should return target\_lun as int * Pin RPC and object version to lowest running * Report RPC and objects versions * Add missing RPC calls versions to rpcapi modules * Huawei: Balanced FC port selection when zoning * VNX: Replication V2 support(managed) * Adds HTTPS southbound connector for Brocade FC Zone Driver * Replication V2 for Pure Storage * Support ZeroMQ messaging driver in cinder * Remove redundant definition of 'deleted' * Fix update\_consistencygroup log info * Correct opt type for nexenta\_chunksize/blocksize * Huawei: Add manage/unmanage snapshot support * Remove deprecated options from NFS driver * Fixing HNAS XML parser * Replace exit() by sys.exit() * IBM XIV/DS8K: Implements Replication V2 * Support cinder\_img\_volume\_type in image metadata * Adds friendly zone name support * LeftHand: Implement v2 replication (unmanaged) * EMC VMAX - VMAX driver failing to remove zones * Remove access\_mode 'rw' setting in drivers * Tests: Strengthen assertFalse assertions * Fix laggard cisco FC zone client unit tests * Fix xtremio slow unit tests * Fix sluggish rbd unit tests * Fix torpid coordinator unit tests * Rework Storwize/SVC protocol to fix add\_vdisk\_copy * NetApp ONTAP - Reapply API tracing * Run flake8 also on cinder/common * Replace assertEqual(\*, None) with assertIsNone in tests * Wrap the method to calculate virtual free capacity * Add pagination support to consistency group 8.0.0.0b2 --------- * Fix NFS driver unit test docstring * Adding action to policy.json * fix NFS driver max\_over\_subscription\_ratio typo * Add pip-missing-reqs tox env * Add missing requirements * Added 'bootable volume' filter for non-admin user * Move wsgi to oslo\_service.wsgi * Set LVM driver default overprovisioning ratio to 1.0 * Tegile tests: Change volume size to int * VMware: Fix release notes * FlashSystem reports error in \_find\_host\_exhaustive() * Huawei: Refactor driver for the second time * Add cinder backup driver for Google Cloud Storage * NexentaStor 5 iSCSI backend driver * NexentaStor 5 NFS backend driver * DRBD: Rename "dres" to avoid confusion with "res" * EMC VMAX - Incorrect SG selected on an VMAX3 attach * Activate sparse copy for Netapp * Add Fujitsu ETERNUS DX Volume Driver (FC part) * Imported Translations from Zanata * XtremIO: FC initialize connection failed * Updated from global requirements * Disallow transferring volume in consistency group * Reduce use of eval() * Remote unused iscsi\_targets table * ITRI DISCO cinder driver * NetApp eseries: report max\_over\_subscription\_ratio correctly * Python 3: Replace reduce and xrange with six.moves * Infrastructure to use the DRBD transport for NOVA * NetApp ONTAP: Fix extending volume beyond lun geometry * Refactor Windows drivers using os-win * Base iSCSI initiator validation * Fix ChunkedBackupDriver \_create\_container * XtremIO: Set the location of a CA certificate * Huawei: Add manage/unmanage volume support * Remove DB calls from Pure Volume Driver CG methods * Dell SC: Adding logging to httpclient * LeftHand: Implement v2 replication (managed) * Enable trim/discard in SolidFire's driver * API Middleware fault: Log exception type * Re-add Nexenta drivers * Remove invalid NetApp QoS keys * Fix thin provisioning flags in NetApp drivers * Return BadRequest for invalid Unicode names * Handling Invalid argument iflag=direct in dd * Execute mount.nfs check as root * Report discard support for Dell SC connections * Add ConsistencyGroupStatus enum field * 3PAR: Adding volume checks to manage snapshot API * Added Keystone and RequestID headers to CORS middleware * Allow host and instance\_uuid at attach * Trival: Remove 'MANIFEST.in' * Imported Translations from Zanata * Add finish\_volume\_migration to volume object * Fix tox -e fast8 * Updated from global requirements * Removes the Violin 6000 FC and iSCSI drivers * Dell SC: Implements Replication V2 * Storwize: Add force flag on vdisk mapping call * Storwize/SVC: Volume manage using source-name * Enable consisgroups in SolidFire driver * Replace deprecated library function os.popen() with subprocess * Constant defined for sqlAlchemy VARCHAR & INTEGER * Fix image volume creation error * Cleanup in backup reset status * NetApp FC drivers should not set 'rw' access mode * Change minimum 3PAR API version for replication * Optimize 3PAR array ID retrieval * Add metadata aliases to Volume object * Add pagination support to volume type * Re-enabled hacking checks for H105 * Don’t log warnings for image cache when disabled * Fix grammatical mistake in defining articles * Scality SOFS: don't always read /proc/mounts twice * Add BackupStatus enum field * Updated from global requirements * Fix issue with flake8 check and full paths * Fix race conditions in migration 061 * Replace use of mox with mock in test\_nfs * Updated "deleted" column of volume\_type\_access * Add Fujitsu ETERNUS DX Volume Driver (again) * Add backref relationships to ConsistencyGroup obj * Misspelling in message * Fix some warnings about 'unused variable' for XIO * Updates consistency group for ibm svc driver * Add volume driver for Tegile IntelliFlash array * Fix to allow RBD delete an unprotected snapshot * Remove the deprecated ibmnas driver * Wrong usage of "a" * VMware: optimize in get\_cluster\_refs * Rebrand HP XP driver to now be HPE * Fix creating volume by snapshot for GPFS driver * Rename Huawei drivers * Check min config requirements for rbd driver * Remove API races from delete methods * Remove API races from attach and detach methods * Quota API is now compatible with keystone API v2 * Add pagination support to Qos specs * Remove name\_id when creating volume with cache enabled * Imported Translations from Zanata * Updated from global requirements * Enhance the stats reported from the Pure Volume Drivers * Allow replicated volumes to be recoverable * Imported Translations from Zanata * Retyping volume got error under max vol limit * Add config option to enable reporting discard * Storwize: Split up \_\_init\_\_ into separate files * Fix volume upload failure with glance\_api\_version=2 * EMC VMAX - Extend Volume for VMAX3 * XtremIO add support for create CG from CG src * Fix bugs caused by porting to python3 * Imported Translations from Zanata * Disable capabilities based on 3PAR licenses * Add empty initialize\_connection method to PureBaseVolumeDriver * Fix python 3.x import issues with pure.py * Override osapi\_volume\_listen\_port for test\_workers * Updated from global requirements * replace deprecated oslo\_messaging \_impl\_messaging * Remove downgrade migrations * Fix delete\_snapshot error case in Pure driver * 3PAR: Implement un/manage snapshot support * Error handling for invalid SLO/Workload combo * 3PAR: Implement v2 replication (unmanaged) * Add serial number to eseries ASUP payload * XtremIO: fix generic glance cache with XtremIO * VMware: Add support for VVOL datastores * Tooz locks * Small refactoring in test\_admin\_actions.py * Imported Translations from Zanata * Making NFS \_find\_share efficient * Change the format of some inconsistent docstring * Preserve request id in Cinder logs * Add volume\_extensions:quotas:delete to policy.json * Pass volume\_id in request\_spec for manage\_existing * VMware: Fix volume copy across vCenter datacenters * Imported Translations from Zanata * Modify VO so that obj.get always defaults to None * Replace use of mox with mock in test\_quotas * Replace use of mox with mock in test\_rbd * Skip check whether volume is local if it's None * Imported Translations from Zanata * Implement refresh() for cinder objects * EMC VMAX - get\_short\_host\_name not called in find\_device\_number * Move retype quota checks to API * Implement snapshots-related features for Block Device Driver * Refactor cinder.utils.is\_valid\_boolstr * Add synchronization in Block Device driver * Recalculate allocated value of parent project * Updated from global requirements * Volume migration: add 'name\_id' as valid skippable field * Fix invalid cache image-volume creation * Imported Translations from Zanata * Remove eventlet WSGI functionality * Deprecated tox -downloadcache option removed * Fix for showing default quotas to non-admin user * VNX: Fix failure in SnapCopy feature * Use wild card for passing env variable * Recognize extra fields in CinderObjectDictCompat * Imported Translations from Zanata * VNX: Fix issue in deleting cg/cgsnapshot * Add validation for volume\_type of volume object * Update Pure REST API supported version numbers in Pure driver * Updated from global requirements * Retype functionality in Tintri driver * Fix non-migration swap with error * Replace use of mox with mock in test\_solidfire * Check context before returning cached value * 3PAR: Implement v2 replication (managed) * Use Cinder API v2 for Rally scenarios * Check backup service before backup delete * Python 3: fix a lot of tests * EMC VMAX - Fix for randomly selecting a portgroup * Volume driver for Coho Data storage solutions * XtremIO: fix iscsi chap discovery bug * XtremIO: fix missing multiattach flag * Fix StrOpts with integer defaults * Dynamically Pull Out Option Sections * Use the catalog to retrieve glance\_api\_servers * Address potential races in SolidFire VAG * Update migrate\_volume API to use versionedobjects * Storwize: add config option to control flash copy rate * Remove version per M-1 release instructions * Use proper config option to connect to keystone * Robustify writing iscsi target persistence file 8.0.0.0b1 --------- * Adding devref about genconfig * LIO: Handle initiator IQNs as case insensitive * Fix dictionary key error * VMware: Replace mox with mock * VMware: Unit test refactoring (image to vol - 2/2) * VMware: Unit test refactoring (image to vol - 1/2) * Imported Translations from Zanata * Remove deprecated LVM ISCSI and ISER Drivers * Delete unused codes in rbd.retype * NetApp: E-Series fix JSONDecodeError on first add * Add Mitaka-1 release notes * NetApp: Refactor E-Series tests * xio: fix regression in authentication * Add some missing fields to Volume object * Imported Translations from Zanata * Add retype in lvm driver * Updated violin driver check for volume objects * Take into consideration races in XtremIOClient3 * Optimize "open" method with context manager * Updated from global requirements * Sheepdog: Optimization of error handling * Fix the bug of can't get the desired image info * Cleanup orphaned code in sqlalchemy API * Cleanup orphaned code from image and manager * Cleanup orphaned code from brick LVM * Cleanup orphaned code from ceph backup driver * Force releasenotes warnings to be treated as errors * Remove db access in VNX driver * Fix quotas issue during volume transfer * Declare multiattach is True in Dell Eqlx driver * Nexenta Edge iSCSI backend driver * RBD: Make snapshot\_delete more robust * Hacking Checks for assertTrue/IsNone() * Remove netaddr useless requirement * Improve metadata update operations * Add atomic conditional updates to objects * Revert "Add Scality SRB driver" * VMware: Validate extra spec opt vmware:clone\_type * Update list\_replication\_targets * Port zonemanager to Python 3 * Port key manager to Python 3 * Move oslo-incubator's scheduler module to cinder * Remove stubs and add resource cleanup * VMware: Skip unsupported datastore types * Port IBM storewize\_svc driver to Python 3 * Declare multiattach is True in RBD driver * XtremIO fix attach readonly bug * CG API should return volume type IDs * EMC VMAX - Change naming convention for MV and SG for FAST * Fix the bug of OSError when convert image * Don't build two tox envs for pep8(-constraints) * Add guidelines for release notes to devref * Imported Translations from Zanata * Imported Translations from Zanata * Updated from global requirements * Eager load columns in volume\_get\_active\_by\_window * Backup snapshots * LeftHand: Remove self.db assignment * Deprecate \*\_multipath\_enabled flag for IBM drivers * Fix debug output for cinder-volume-usage-audit * Add check\_uptodate.sh --checkopts to "pep8" * Move get\_by\_id to CinderObject * Imported Translations from Zanata * fast8: Skip git rm'd files * Manage existing: fix volume object saving * Fix swap\_volume for case without migration * Remove .mailmap file * Remove db access from 3PAR and LH cg functions * Additional VAG support for SolidFire * Fix InstanceLocalityFilter scheduler filter * Add a FakeGateDriver * Fix metadata retrieval in GPFS driver * Imported Translations from Zanata * VNX: Fix metadata get overriden issue * Added VAG support to SolidFire * Refactor HP LeftHand driver to now be HPE * Remove db access from XIV/DS8K CG functions * Do not use api-paste.ini osprofiler options * Remove duplicate keys from dictionary * Bad exception clauses order * Imported Translations from Zanata * Check specific driver enabled in create\_backup * get\_all\_snapshots: Fix log message typo * Updated from global requirements * ScaleIO extend volume round up capacity * Port HP 3PAR driver to Python 3 * Modify test\_hpe3par to support random hash * Imported Translations from Zanata * Fix ScaleIO driver provisioning key * Imported Translations from Zanata * Remove the HP CLIQ proxy driver * Retry on database deadlock on service\_update method * Downstream Fix for Genconfig * Correct assertDictMatch argument order * 3PAR Fix find\_existing\_vluns * Port xio driver to Python 3 * Remove kombu as a dependency for Cinder * Port EMC VMAX to Python 3 * Port EMC VNX CLI to Python 3 * Sheepdog: Fix a problem about multi backend * Imported Translations from Zanata * Update compression license check * py3: Fix error handling in prophetstor driver * Updated from global requirements * Refactor HP 3PAR drivers to now be HPE * add "unreleased" release notes page * Add os-win to requirements.txt * Update extend\_volume API to use versionedobjects * Update retype API to use versionedobjects * Update get/delete\_volume API to use versionedobjects * Update create\_volume API to use versionedobjects * Test for object version hash changes * Fix cinder objects unit test registration * CG driver function should not access db * Add test for snapshot filtering by project id * Imported Translations from Zanata * Use oslo\_config new type PortOpt for port options * Update CONTRIBUTING.md to CONTRIBUTING.rst * CG creation should be scheduled on backend level * Removal of deprecated NPIV option in Storwize * Fix ZFSSA drivers' local cache bugs * OpenStack typo * Change nfs to NFS in the help strings of nfs.py * Port zfssa driver to Python 3 * Port vzstorage to Python 3 * Port cinder.utils.monkey\_patch() to Python 3 * XtremIO fix create CG from src flow * Don't use default=None for config options * Imported Translations from Zanata * 3PAR drivers volume size conversion is incorrect * Port vmware datastore to Python 3 * Use Service object instead of DB API directly * Docstring fix in scheduler-stats * Add LC\_ALL=C to lvcreate, lvextend and pvresize * Port cinder.hacking to Python 3 * Port test\_tintri to Python 3 * Add reno for release notes management * Imported Translations from Zanata * Fix failure of unit test TestCinderAllCmd * Execute mount.nfs check with absolute path * Imported Translations from Zanata * Update minimum tox version to 1.8 * Update cinder-manage man to match current options * Replace warnings.warn in sqlalchemy-api * Replace warnings.warn in glusterfs * Using extra-specs in cloned vols for Nimble driver * SheepdogDriver: Improve get\_volume\_stats operation * Add retype logic in manage\_existing for VNX * Adds CORS support to Cinder * Fix calling delete\_zones method with a wrong argument * Return volume\_type extra specs based on policy * Revert "Handle correct exception raised by python-novaclient" * NetApp: Fix issue with updating E-Series password * NetApp: Fix issue with E-Series volume expand * Update register\_opts hacking check to allow tuples * Updated from global requirements * optimize the copy\_image\_to\_volume method of sheepdogdriver * Fix UsedLimitsController's authorizer to soft * Imported Translations from Zanata * Handle correct exception raised by python-novaclient * lvconvert missing from cinder volume.filters * Support insecure NAS security options in Quobyte * Brocade driver add\_zone optimization * Imported Translations from Zanata * Add missing cgsnapshot field to Snapshot object * Eager load snapshot\_metadata in \*snapshot\_get\_all * Case sensitivity problem in cinder scheduler * Add protocol to help of glance\_api\_servers option * SMBFS: Fix retrieving total allocated size * Make relationships in objects consistent * Imported Translations from Zanata * Remove the jointly loaded model in finish\_volume\_migration * Update docs to generate Guru Meditation Report * Add qos\_specs\_id to volume type show * Fix NoneType Attribute error * Support initialization state in Backup Manager * Imported Translations from Zanata * Add -constraints for CI jobs * Fix typos about 'target\_discovered' * NetApp: Cleanup if E-Series volume create fails * VMware: Unit test refactoring * Cleanup for cinder tests with CGSnapshot * Imported Translations from Zanata * VMware: Enforce min vCenter version * Add hypermetro support for Huawei driver * Updated from global requirements * devref doc: assorted fixes in "Unit Tests" * LIO: Let delete\_initiator succeed if iqn not found * CGSnapshot Object * Updates in consistency\_group in xiv/ds8k driver * Retype support for CloudByte iSCSI cinder driver * Add retries for Cisco FCZM client CLI \_cfg\_save * Updated from global requirements * Remove unused gettextutils from oslo-incubator * Wrong usage of "an" in the mesages: * NetApp: E-Series fix deletion of missing volume * Wrong usage of "an" in the mesages: an service * VMware: Relocate volume only during no disk space * Port Windows drivers to Python 3 * Use project id from volume when retyping volumes * Fix typo in LIO terminate\_connection error msg * Update the devref for volume migration * Implement update\_migrated\_volume for NFS driver * Only use LOG.exception in exception handler * Port API admin action tests to Python 3 * Port API types extra specs to Python 3 * Port API to Python 3 * Mark XML API as deprecated in Mitaka * windows: don't use LOG.exception if not logging an exception * Improve performance listing detail for volumes * Move CloneableVD to common functions * Fix updating only volume type is\_public * encryption\_api\_url requires a version * Cleanup/move code in Storwize Driver * Port WSGI tests to Python 3 * Fix method VolumeTypeList.get\_all * Use lvm\_conf\_file directory for LVM\_SYSTEM\_DIR value 7.0.0 ----- * Squashed commit of WebOb 1.5 and oslo.db fixes * Change default Exception code to 500 * Dell SC: Disable REST verify warnings * Update config format for replication\_devices * Fix log formatting for rbd driver * Fix Status-Line in HTTP response * Huawei driver handle volume exists error * Updated from global requirements * Tox fast8: use pep8 env dir * Move ssh\_utils tests to test\_ssh\_utils * Volume extend error does not catch exception * Fix test\_misc for WebOb 1.5 * ScaleIO driver: update\_migrated\_volume * Fix error string format for replication API calls * Port IBM flashsystem to Python 3 * Port ceph driver to Python 3 * Provide better debug log when 'cinder manage' fails * Remove references to Swift in chunked driver * Add insecure option for swift backup * ScaleIO: Fix protection\_domain\_id log message at init * Port test\_srb to Python 3 * Add fast format option for thick volume creation * Imported Translations from Zanata * Retype enhancement for EMC VNX cinder driver * Updated from global requirements * Verify volume is replication capable * Add device identifier to replication device * Port violin driver to Python 3 * Port EMC scaleio to Python 3 * Remove extra register\_opts() calls in netapp eseries * Add multi-initiator extra-spec for Nimble driver * Fix SolidFire target composition * Port targets test\_iet\_driver to Python 3 * Port image cache to Python 3 * py3: Run unit tests with ostestr on Python 3.4 * Add testresources and testscenarios used by oslo.db fixture * Clone cg support in VNX driver * Test\_backup\_swift: Don't leak notifications * test\_backup\_nfs: Set volume id per test * test\_backup\_swift: Set volume id per test * Add backup\_swift\_auth\_url to swift backup driver * Dell Eqlx: Support over subscription in thin provisioning * Hacking check for opt name registration * Add ability to set prefix in SolidFire Volume name * Updated from global requirements * Fix broken format string in vol mgr log * Detach volume on device validation failure * Convert Retry-After header parameter value to string * Fix capacity report error in Huawei driver * emc vmax driver: use integer division for Python 3 * VMAX Target iSCSI IP Address * Updated from global requirements * Delete a temporary volume in DB with admin context * Fix update quota of subprojects * Port test\_quobyte to Python 3 * Remove unused 'deprecated' option from auth\_strategy opt 7.0.0.0rc2 ---------- * Fix VMAX live migration problem * Imported Translations from Zanata * Port netapp SSC Cmode to Python 3 * Port test\_netapp to Python 3 * VMAX Truncate Storage Group Name * HNAS iSCSI manage does not work with spaces * Port scheduler host manager to Python 3 * Fix various Python 3 issues * Fix volume throttling to Python 3 * Ensure replication functions check driver status * Fix enable/disable\_replication raise InvalidVolume * Tests: print fake\_notifier queue upon mismatch * Cleanup orphaned code from cinder root directory * Image cache tests: use fake\_notifier * Implement extend\_volume method to Block Device driver * Small optimization in Block Device driver * DRBD: new option "drbdmanage\_devs\_on\_controller" * Obtain target authentication from database same as LIO target * Dell SC: cgsnapshot-delete doesn't actually delete * LVM: Make sparse\_copy\_volume private, use for capabilities * Dell SC: cgsnapshot-delete doesn't actually delete * Fix typo in cinder-config-generator.conf * Port test\_volume to Python 3 * Fix unreachable code pylint issues * Huawei driver add check before use a QoS * Report \*real\* free capacity in Huawei driver * Fix update Huawei driver issue * Fix Python 3 issues in wsgi * py3: Port pure driver test to Python 3 * GlusterFS: extend volume to the right path * Use pbr wsgi\_scripts to install Cinder WSGI entry point * Report \*real\* free capacity in Huawei driver * Imported Translations from Zanata * Fix Bad indentation pylint issues * Show image metadata * XtremIO fix remapping bug * Revert use of netapp\_lib from NetApp Drivers * Fix volume related operation in CloudByte driver * Add placholder for migration backports in Liberty * Add placholder for migration backports in Liberty * Revert use of netapp\_lib from NetApp Drivers * Remove the destination volume check in delete\_volume * Huawei driver add check before use a QoS * Fix VMAX live migration problem * Cleanup of Translations * Missing configuration opts from cinder.sample.conf * Use function capsulation in Huawei driver 7.0.0.0rc1 ---------- * Open Mitaka development * Create volume in cg enhancement in VNX driver * Remove duplicate keys from dictionary * Fix URL format in Huawei driver * Setup LVM\_SYSTEM\_DIR earlier in LVM.\_\_init() * Add "fast8" tox env * Allow c-vol backends to start when some backends fail to load * Fix use of wrong storage pools for NetApp Drivers * VMware: Remove VMDK driver for ESX server * Use of ast for integers doesn't changes type * Make rpc\_client method private for VolumeCommands * Ignore Forbidden error on quotas-get for nested projects * Change ignore-errors to ignore\_errors * NetApp volume/snapshot delete performance fix * Replace soft\_delete in volume\_type\_access\_remove * Fix way of getting LUN id in Huawei driver * Fixing create CG from Cgsnapshot bug in VNX driver * Fix delete quota of subprojects * Dynamically create cinder.conf.sample * Updated from global requirements * Fix MITM vulnerability for Brocade FC SAN lookup * Imported Translations from Zanata * Fix cinder-all binary * NetApp: Fix volume extend with E-Series * Fix netapp\_enable\_multiattach default for E-Series * Check for None on service's updated\_at * Fix issue of volume after host-assisted migration * Attaching enhancement for EMC VNX driver * Tests: Split VolumeTestCase into separate classes * Local img-cache files ignored for image transfers * Snapmirror targets should not be reported as pools * Change check method for 'all\_tenants' * Create a page of drivers with stevedore.sphinxext * Enable certificate verification during image copy * Fix NetApp clone from glance failure * Storwize: Fix format string * Fix usage of novaclient * Check for empty attributes on SF volume * Fix volume lookups in SolidFire template caching * Don't rely on provider\_id for resource deletion * Fix Pure get pgroup volume snapshot name * Dothill fix options access * HPMSA fix access to common options * Lenovo driver fix access to common opts * Fixed missing log variable types * VMware: Fix invalid product name * Retrieve volume in update\_migrated\_volume * Swap the decorator order for PureFCDriver methods * Add ScaleIO Cinder driver commands * SolidFire provider\_id for snapshots on init * LeftHand Add update\_migrated\_volume to drivers * Huawei: fix multi REST-URLs bug * Improve coverage for snapshot\_get\_by\_host * LVM: add the exception handling to volume copy * Fix NetApp loop in clone of NFS backed images * Hacking log format arg check * backup init\_host cleanup exception handling * Making opt names consistent * Fix QoS keys not being available to scheduler * Add ConsistencyGroup object entries to linstack.py * Pass in snapshot refs for host on provider\_update * Filter hosts with pool in snapshot\_get\_by\_host * Fix typos in comments * Filter scheduler: Fix KeyError on invalid create request * Updated from global requirements * Return a tuple from SolidFire update\_provider\_info * Add unmanage default implementation to VolumeDriver * Correctly report multiattach in Pure drivers * Add manage\_existing and unmanage to BaseVD * Add migrate\_volume to BaseVD * Update update\_migrated\_volume in VNX driver * 3PAR Disable generic image volume cache * Add updated\_at into response of listing detail * Add os-brick's scsi\_id command to Cinder rootwrap * Fix order of arguments in assertEqual * Updated from global requirements * Error message in update\_migrated\_volume was incorrect * Remove empty rules from policies for API access * Fix HDS HNAS driver logging password as plain text * Add mechanism to update snapshot provider\_id * VMware: Remove global patching of open * VMware: Skip ESX hosts in maintenance mode * 3PAR Add update\_migrated\_volume to drivers * Updated from global requirements * Switch SVC driver to use lsportfc to determine FC target WWPNS * Use consolidated update for failover\_replication * VMware: Fix exception messages * Adds allow\_availability\_zone\_fallback option to Cinder * NetApp E-Series over-subscription support * ZFSSA driver to return project 'available' space 7.0.0.0b3 --------- * Get full volume model in Replication manager API’s * Fix problem of efficient volume copy for migration * Generic image-volume cache * Implement thin provisioning support for E-Series * Remove useless response checks in SolidFire driver * Sheepdog: Improve snapshot and clone operation * Fix the virtual port support in VNX driver * DotHill driver fix create\_cloned\_volume parent id * 3PAR Fix create\_cloned\_volume source volume id * Cloudbyte fix create\_cloned\_volume parent id * Scheduler-based over-subscription for NFS drivers * ScaleIO driver should use os-brick connector * Add instructions on how to deploy API under Apache * Sync volume versionedobject to ORM * Check before add lun to QoS in Huawei driver * Fix backup metadata import missing fields * Remove the unnecassary volume\_api.get(context, volume\_id) * Port image\_utils to Python 3 * Port volume transfer to Python 3 * Service object * Allow specified backend capabilities to be retrieved * Remove deprecated options * Add cinder.conf.sample to gitignore * Add delete\_snapshot and update\_snapshot rules * Handle KeyManager exception when deleting a volume * Fix a merge problem in VMAX driver * Don't require OpenSSL for unit tests * Add pagination to backups * Enhance FC zone support for Huawei driver * Add support for file I/O volume migration * Add debug logging before attaching volume in driver * Detect addition of executable files * Remove executable bits on files * NetApp DOT block driver over-subscription support * Cleanup for SnapshotObject * Add additional SSC extra specs to E-Series driver * Minor optimization * Adding delete-wait-loop for CloudByte Volumes * get\_replication\_updates call to driver is wrong * Earlier authority check for create volume API * Fix url in API response to get original * Efficient volume copy for generic volume migration * Volume status management during migration * Clean up line continuation in Storwize driver * LeftHand: Adding Consistency Group Support * 3PAR update driver to store stats * Remove driver.set\_execute() * Skip intermittent VMDK tests * Rework Scality SOFS driver to use RemoteFS class * Adds framework for get\_capabilities() feature * Implement AutoSupport for NetApp E-Series driver * Add retries to delete a volume in the RBD driver * Add support for volume groups and netapp\_raid\_type * Dell SC: init\_volume stale volume info fix * Validate filters in snapshot\*, backup\* in db.api * Fix volume copy for 'virtual' volumes in DotHill * Imported Translations from Transifex * Use version convert methods from oslo.utils * Implement manage/unmanage snapshot in Pure drivers * Reduce runtime of E-Series iSCSI tests * Cinder Nested Quota Driver * Add manage/unmanage volume support for Nimble * Python 3 incompatible expression fix * Local cache feature of Oracle ZFSSA drivers * Replace urllib.unquote with urllib.parse.unquote * Remove unused dependency discover * Update volume status AFTER terminate\_connection is done * Add unit test for backup get\_all * Incremental backup improvements for L * Sheepdog: improve create and delete operation * Implement function to manage/unmanage snapshots * Sheepdog: Add class for dog command executor * Dont eager load volume type specs on volume list * Filter out extra-specs from type get for non-admin * Prevent that all backup objects are deleted * Add pagination to snapshots * Parameter osapi\_max\_limit is always used by default * Update NetApp Drivers to use netapp\_lib * Extend unit tests for backup get\_all * Fix nimble storage volume stats reporting * TemporaryImages to inspect image before conversion * Efficient image transfer for Glance cinder store * adds user\_id to check\_is\_admin * Fix backup list all\_tenants=0 filtering for admin * Add Cinder API wsgi application * Add consistency group tests to test\_volume\_rpcapi * Cinder replication V2 * force\_detach terminate\_connection needs connector * Assisted volume migration for Oracle ZFSSA drivers * Add https options and minor code changes * Fix bad except clauses order * Add volume retype support for Huawei driver * Fix URLs to admin-guide-cloud * Nested Quota Driver: Get Project Hierarchy * Check sio\_storage\_pools in check\_for\_setup\_error * Fix description for "Barbarism of editting a file" * Dell SC: Added logging for the find\_wwns functions * Add missing space to logged error in create volume * Cleaning up CONF.register\_opts() in compute/\_\_init\_\_.py * Update provider\_id column on SolidFire init * Add ability to update provider\_id during init * Fix \_LI() to \_LW() in LOG.warning message * Remove the method delete\_volume\_admin\_metadata * Support efficient non-disruptive volume backup in VNX * Validate value when user update quota * Add SolidFire svip to config options * Return multiple iSCSI portals in VNX Cinder driver * Avoid returning volume metadata in DotHill driver * Small cleanups in BaseVD/VolumeDriver * Port 3PAR drivers to use ABCMeta driver model * Updated from global requirements * Switch to the oslo\_utils.fileutils * Parse out SolidFire account from api response * Dell SC: Better exception handling in init\_conn * Port test\_nfs to Python 3 * Corrects the order of AssertEquals params in Quobyte tests * Adds the random option to cinder retry function * Extra specs may not be in volume types * VMware: Fix re-attach volume error for VC 5.1 * Remove duplicate keys from dictionary * LeftHand driver is ignoring reserved\_percentage * Update devref unit tests doc * Tests: Fix zfssa TestRestClientURL.test\_request * Test whether sample config generation works * Revert "mark oslo.vmware as optional dependency" * Register the volume\_opts config options in remotefs.py * Create CG needs extra specs * Configure space reservation on NetApp Data ONTAP * Dell SC: Fix error causing missed log message * Rename free\_virtual in capacity filter * Make migration's volume source deletion async * Add the ability to update type public status * Adds manage/unmanage methods for HNAS drivers * Update deprecated version of novaclient * Add version columns to services table * 3PAR: Adding Consistency Group Support * Remove unused function volume\_type\_encryption\_get * Refactor to remove duplicate code * Correct comment to be consistent with code * Allow 0 length name * Add volume migration support for Huawei driver * Cleanup for cinder tests with ConsistencyGroups * VMware: Change inventory folder hierarchy * Adapt SnapshotController to view builder * Add backup/restore methods to Sheepdog driver * Use min and max on IntOpt option types * Over subscription for HP 3PAR drivers * Allow CG without snapshot to be deleted * Tintri snapshot id * Add volume type support to Datera * Fix Pure create volume from cgsnapshot * Implement Clone CG in Pure Volume Drivers * Dell Eqlx: Use generic option ssh\_timeout * Make X-IO volume driver wait for delete volume to complete * Reduced file size to prevent timeout * Update SolidFire driver to pass newer flake8 * 3PAR: Adding performance metrics to volume status * Don't use context.elevated to get volume * Enable cinder-manage to remove services * VMware: Bump driver version * Fix backup init\_host volume cleanup * VMware: Deprecate vCenter version less than 5.1 * Updated from global requirements * Small clean up in volume object * Move import and export backup metadata to object * On Volume list only retrieve needed data from DB * Return volume name from backup\_restore * Switch Pure volume drivers to use Snapshot Objects * Don't return Exception when volume is detached * Use Requests HTTP library and URL safe names * Remove RetypeVD class, fix NFS driver retype * Fix Python 3 issues in Windows tests * Add objects.register\_all() to cinder-all command * GPFS volume encryption-at-rest support * VMware: Set virtual disk UUID to volume ID * Add oslo.vmware into test-requirements * Add multipath support to 3PAR iSCSI driver * Prevent volume already in CG to be added to another * LVM Thin Provisioning auto-detect * Fix HNAS iSCSI 32 targets limitation error * Remove unused fake objects in vmdk test module * VMware: Add volume ID in vCenter's volume config * Enhance PureISCSIDriver multipath support * Add unit test cases for the capacity scheduler * Fix argument order for assertEqual in tests * Fix order of parms in assertEqual for scheduler ut * VNX driver needs extra params for create cg from src * Prevent creating encrypted volume with image * EMC VMAX Create CG from CG Snapshot * mark oslo.vmware as optional dependency * ConsistencyGroup Object * Validate string, integer limit for input parameter * Validate name and description string * Handle missing temp volume and snapshot during cleanup * Updated from global requirements * Validate 'is\_public' when creating volume type * Remove StorPool Driver * Ignore InsecureReq warning in SolidFire Driver * Attach snapshot - driver only * Remove bad tests for the VMAX driver * Update authorization actions for services API * Fix missing parameters in driver CG interface * Remove incorrect URLs from jenkins.rst * Fix list comparison for empty list * Snap copy feature for EMC VNX Cinder driver * Tests: Fix os.path.exists mock (emc\_vnxdirect) * Add connector object to create\_export * Correct usage of assertEqual for boolean values * Remove unit test migration logging * Add support '--all-tenants' for cinder backup-list * Corrected order of parameters in docstring * Fix wrong exception usage in cinder exception classes * Fix RestURL to storage backend in Huawei driver * Sync scheduler module from oslo-incubator * VNX driver needs to return snapshot objects * Revert "Revert First version of Cinder driver for Quobyte" * Enhance unit tests for zfssa drivers * VMware: Remove unused constants * Fix volume limit exceeded exception * Refactor api.v2.volumes unit tests * Dell SC: Add check of current value on retype * Update snap-quota to unlimited in Nimble driver * Add more Rally scenarios to run * Updated from global requirements * Fix PEP476 & format message of Oracle ZFSSA drivers * Add SmartX support for Huawei driver * Enhance deletion efficiency when backup init host * Fix order of arguments in assertEqual * Add multiple pools support to VMAX driver * Fix status comparison for attached volume backup * Updated from global requirements * NetApp SSC job will be run periodically * RBD: use user-configured value for chunk size * Over subscription for HP LeftHand iSCSI driver * Use prefix for SolidFire template account * Fix multi-line docstrings to meet hacking rules * sqlalchemy exception kills FixedIntervalLoopingCall thread * VMware: Fix protocol in backend stats * Fix error message in cinder/api/v2/volumes.py * Fix concurrent attaches on HNAS iSCSI driver * GlusterFS: Using 'fallocate' instead of 'dd' * Fixing notify message of manage\_existing flow * Clone CG * Fix get default quota values for subprojects * Add deactivate step to extend\_lv * Fix exception on uploading a volume to image with glance v2 API * Set VERSION on the Nimble driver 7.0.0.0b2 --------- * Log which service is down * Move update\_migrated\_volume() to BaseVD * GlusterFS backup driver * Posix backup driver * Add mock cases for IBM FlashSystem * Add discard to connection properties * Remove deprecated config options for Liberty * RBD: use user-configured value for max\_clone\_depth * Updated from global requirements * Fix lvm manage existing volume * Add entry create and cast tasks to manage workflow * Fix cleanup\_temp\_volume\_snapshots for missing vol * Remove unused context parameter * Adding NFS support to the GPFS Driver * Remove deprecated SimpleScheduler * Fix doc string definitions * Port StorwizeSVCDriver to use ABCMeta driver model * Add extra spec capability for Nimble Cinder Driver * XtremIO support for iscsi discovery auth * Add bandit for security static analysis testing * typos(?) in create\_snapshots\_in\_db * Add multiple pools support for Huawei driver * Port XIVDS8K Driver to use ABCMeta driver model * Fix Python 3 issues in Hitachi HNAS tests * Port remotefs driver to Python 3 * Port IBM driver to Python 3 * Clean up volume\_types logging * NetApp ESeries: fix delete of non-existent volume * Refactoring of manager's create\_volume flow * Remove unused arguments from c-vol's create\_volume * Updated from global requirements * Add I/T mapping check for IBM FlashSystem * Remove simple scheduler which is deprecated since Juno * LVM: Support efficient data copy for LVM driver * Implement retype for Pure drivers * Dell SC: Add support for driver retype * EMC VMAX Modify CG * XtremIO volume driver consistency group support * Add Cinder internal tenant support * VMware:Replace vCenter calls with oslo.vmware calls * Rename filename from il8n.rst to i18n.rst * Non-disruptive backup * DRBD: Rename a constant to a better name * Remove resource lock operation for HBSD * Dell SC: Fix legacy bug, init\_conn bug and REST API bug * Dell SC: Fix Consistency Group issues * Add drivers list generator * Fix 033 add encryption unique key migration * Add CHAP support for Huawei driver * Move volume.api test to correct location * Remove logging statements from migrations * DRBD: Define a separate prefix for snapshots * Prevent missing Purity hosts from raising errors * Revert "Remove X-IO volume driver" * Filter cgsnapshots data on the DB side * Refactor Huawei Volume driver * Add volume\_attachment to volume usage notifications * Graceful shutdown WSGI/RPC server * Backups: allow name to be specified during restore * Set default policy for "volume:get" * Add iSCSI multipath support for Huawei driver * Fix 3PAR driver handling of existing VLUNs * Don’t log warnings in Pure initialize\_connection * scality: add export and name keys to os-initialize\_connection info * Add delete/update\_volume\_metadata policy rules * Remove "volume:services" rule from policy.json * Report capability of multiattach for FlashSystem * Handle volume not found on zfssa volume delete * Raise BadRequest for invalid replication status * Add unit tests for cinder.api.v2.volumes * Raise HTTP exception for backup not found * Port NetApp NFS drivers to use ABC driver model * Removing OpenvStorage for no CI * Remove unused serialize\_args method * Remove obsolete API from documentation * Tests: test\_volume mock conversion * Fix restore point if backup base is diff-format in ceph * Add white list support for target ports in VNX driver * Preserve mock side\_effect’s in test\_pure * StorPool: clean up the last uses of str.format() * Removing archaic references * Remove useless logging from unit tests * cinder list fails with 'name' sort key * Storwize\_svc\_npiv\_compatibility\_mode default value change * Remove unused parameter in PureFCDriver \_connect * Cleanup unused method fake\_get\_target * Set driver version in Sheepdog driver * Updated from global requirements * Fix saving tz aware datetimes in Versioned Objects * set/unset volume image metadata * Fix not implemented wording in update\_migrated\_volume * Add support for force-delete backups * Improve 3PAR driver VLUN creation and deletion * Remove hacking check N327 * Fix tests failing in gate * Fix properties extracting from image with glance api v2 * Support SMI-S provider v8.0.3 in VMAX driver * Add ability to override OpenStack privileged user auth url * VMEM v6000: Fix export verify routines * Port Tintri driver to ABC driver model * Fix block\_device driver to behave as documented * NetApp E-Series: Add debug tracing * Set encrypted key in connection\_info during initialize * Nested Quota: Set default values to subproject * Dell SC: Add support for ManageableVD * Fix NetApp cDOT driver use of Glance locations * Fix missing pool name in consistency group * NetApp ONTAP: Add debug tracing * Add tracing facility for drivers * Fix error message in Pure driver with correct text * Notify the transfer volume action in cinder * Storwize Driver zone removing * Dell SC: Add support for consistency groups * Remove duplicate volume.filters entry * Port NetApp E-Series iSCSI driver to ABC model * Fix getting out-of-date volume operation state issue for VNX * Separate FlashSystem FC and iSCSI common code * Update expected error message from lvs * Fix HBSD horcm driver with oslo.concurrency 2.1.0 * Remove X-IO volume driver * RemoteFS: Fix the offline snapshot delete operation * Implement the update\_migrated\_volume for the drivers * Avoid race condition at snapshot deletion stage * Fix Python 3 issues in cmd * Port image/glance.py to Python 3 * Switch to oslo.reports * Validate maximum limit for quota * Updated from global requirements * Fix block eventlet threads on rbd calls * RemoteFS: Reporting configured reserved\_percentage in \_update\_volume\_stats * GlusterFS: support extending a volume that has snapshots * Port dothill to Python 3 * Fix backup.rpcapi to pass object backup * Fix typo in solidfire driver option * Mock socket.gethostbyaddr in test\_v7000\_fcp * Replace missed basestring by six for python3 compatability * Return 404 if volume type encryption is not found * Updated from global requirements * smbfs: fix invalid check for smbfs\_used\_ratio correctness * Remove lio\_initiator\_iqns config option * Move HDS drivers to Hitachi folder * Fix Python 3 issues in targets unit tests * Port drbdmanagedrv driver to Python 3 * Port test\_db\_api to Python 3 * Port hitachi driver to Python 3 * Fix getting wwpn information in infortrend driver for DS4000 * Do not allow to modify access for public volume type * Add dependency check in RBD delete\_snapshot * Port huawei driver to Python 3 * XtremIO driver fix array snapshot problem * Fix cinder.conf.sample generation * Handle attachment of second volume * VMware: Create volume backing in specific clusters * Use versionutils from oslo.log * Correct overquota error message * Updated from global requirements * Fix timeout issue in EMC VNX driver unit test * Remove oslo logging from backup unit tests * Add notifications about snapshot.update.\* * Sync the latest fileutils module from oslo-incubator * Port NetApp DATAONTAP blocks drivers to ABC model * Fix 'no actual-pathname' NetApp API error * Use right oslo.service entry points * Use symbol for error code in VNX cinder driver * Storwize driver report capability for multiattach * Filter snapshots data on the DB side * Change generic NotFound to specific exception * Storwize: add the missing stops in the end of the messages * Ensure 'WSGIService' derives from oslo\_service base class * Switch to oslo.service * Fix library includes for config generator * Revert First version of Cinder driver for Quobyte * Fix cinder-manage volume delete cmd * Fix Python 3 issues in the blockbridge driver * Fix Python 3 issues in the swift backup driver * Fix Python 3 issues in ceph and rbd drivers * Fix Python 3 issues in backup * Remove generate\_glance\_url * Fix manage\_existing function in infortrend driver * Add unit tests for the capacity filter * Modify template account creation in SolidFire drvr * Tests: Fix assertRaisesRegexp deprecation warnings 7.0.0.0b1 --------- * Harden scheduler.rpcapi unit tests * Fix backups.rpcapi to pass objects over RPC * Fix weird change of volume status in re-scheduling * Fix tox -e py34 * Add exception catch in report\_state for DBError * Updated from global requirements * Dell SC: Enable use of Storage Profiles * Use elevated context for backup destroy * Fix Cinder Objects unit tests * rbd: add volume\_id to connection\_info in initialize\_connection * Fix Datera driver export call * Add iscsi\_target\_flags configuration option * Adds the Violin Memory V7000 series FC driver * Remove the hardcoded concurrency limit for ostestr * Revert "Disable backup progress notifications for unit tests" * Nested Quota : Create allocated column in cinder.quotas * Handle incorrect '--config-dir' param * Get updated volume status in begin\_detaching * Tests: Make fake\_notifier per-instance * Validate outermost request body element name consistently * Add missing argument to delete\_keys method * Port LeftHand driver to use ABCMeta driver model * Add Virtuozzo Storage Volume Driver * Disable profiler for unit tests * Use a hard-coded project\_id in racy cinder.tests.unit.test\_volume tests * Validate bool value using strutils.bool\_from\_string method * Incorrect exception caught in qos-specs create api * VMware: Remove unused methods * Scality SOFS: enhance how the remoteFS mount is detected * Backup object * Add missing Jinja2 to requirements.txt * Storwize: remove the useless method check\_copy\_ok * Update version for Liberty 7.0.0a0 ------- * ScaleIO: Fix broken format string * Sync 'report' from oslo-incubator * Ceph driver support retries on rados\_connect\_timeout * Dell SC Removed \_find\_domain and associated tests * LVM add multiattach flag capability * Add volume drivers for Infortrend Storage * XtremIO Volume driver requests, multipath * Updated from global requirements * Adds FC and ISCSI Cinder drivers for Lenovo Storage Arrays * Adds FC and ISCSI Cinder drivers for HPMSA Storage Arrays * Replace basestring with six.string\_types * Fix broken export commands on block\_device driver * Switch to oslo.policy 0.3.0 * Add config option to set max\_volume\_size\_limit * Fix LIO target helper when missing targetcli * Move DRBD tests into tests/unit * Volume driver for HP XP storage * Replace xrange() with six.moves.range() * Drop L suffix from long integers * Pass proxy environment variables to tox * Re-add DRBD driver * Refactor API create\_volume flow * Introduce Guru Meditation Reports into Cinder * Adds FC and ISCSI Cinder drivers for DotHill Storage Arrays * Get rid of oslo-incubator copy of middleware * SQL scripts should not manage transactions * Targets tests: Clean up long lines * Update 3PAR user config help strings * Disallow backing files when uploading volumes to image * Remove WritableLogger wrapper * Get StringIO from six for Python 3 compatibility * Fix Python 3 issues in utils * Update SolidFire to use target driver model * Wait until service thread is done on service stop * Add cinder volume driver for Blockbridge EPS * 3PAR enable multiattach capability reporting * Replace dit.itervalues() with dict.values() * Rewrite code merging two dictionaries * Replace dict(obj.iteritems() with dict(obj) * Replace dict.iteritems() with dict.items() * san driver: don't use relative Python import * Implement Cinder Volume driver for HGST Solutions * Volume manager should set filter\_function and goodness\_function * Tintri driver to manage existing backend storage objects * Replace it.next() with next(it) for py3 compat * Use six to fix imports on Python 3 * NetApp E-Series: Add Fibre Channel Support * NetApp E-Series: Refactor class structure for FC * NetApp E-Series driver: Remove caching logic * Use six.reraise() for Python 3 compatibility * Updated from global requirements * Add secondary account capability to SolidFire * Replace urllib and urllib2 with six.moves.urllib * Replace unicode with six.text\_type * Use correct rtslib namespace for newer versions * Dispose DB connections between backend proc starts * EMC ScaleIO Cinder Driver * RemoteFS: Fix doc for locked\_volume\_id\_operation * Re-integrate Oracle iSCSI Cinder driver * Dell SC: Expanded comments and update var names * Re-add the StorPool distributed storage driver * Add iSCSI protocol support for IBM FlashSystem * Fixes 3PAR snapshot failure with optional params * ConsistencyGroup: Return 400 instead of 500 for invalid body * Port remote\_fs driver to use new driver model * Make VNX Cinder Driver aware of VNX Pool Full Threshold * Add 'source-id' and 'source-name' support in VNX driver * Revert "Adds drivers for DotHill Storage Arrays." * Dell SC: Added support for alternate iscsi portals * Dell: Added verify cert option for REST calls * Handle ineffective backup compression * Prophetstor driver needs to return snapshot objects * Complete switch to snapshot objects * DriverFilter: don't check volume\_backend\_name * Add Pure Storage FibreChannel driver * Fix exception parameter name * Move Dothill tests out of root test directory * Fix remaining memory issues with nfs backup unit tests * Don't send heartbeats if Manager reports a problem * Changes in rally-jobs/README.rst * Removed explicit return from \_\_init\_\_ method * Return provider\_id in SolidFire model update * Deprecate the HPLeftHandISCSIDriver CLIQ driver * Allow provisioning to reach max oversubscription * Port ProphetStor driver to use ABCMeta driver model * Clean up unused exceptions * Refactor scheduler's create\_volume flow * Adds FC and ISCSI Cinder drivers for DotHill Storage Arrays * Bump SolidFire version number * Dell SC: update\_volume\_stats could use uninitialized vars * Disable backup progress notifications for unit tests * Tintri Cinder Volume driver * Fix assertRaisesRegexp deprecation warnings in UT * Refactor PureISCSIDriver into base and iSCSI classes * Add missing unit test for goodness weigher * Non-admin user to query volume filter by az * Fix cinder concurrency issues on rtstool * Use SolidFire snapshots for Cinder snapshots * Switch get\_all\_snapshots to use objects * rbd driver in cinder does not manage glance images multi-location * Notification with volume and snapshot metadata * Remove pretty\_tox and use ostestr * Add volume ID to fake volumes in Gluster tests * Fix capacity filter to allow oversubscription * EMC VMAX Manage/Unmanage Volume * Add chap support to CloudByte cinder driver * Multiple pools support enhancement in VNX cinder driver * Remove un-used import at test\_volume\_transfer.py * NetApp FC driver shims missing manage/unmanage * Updating cmd/manage.py get\_arg\_string() argument parser and adding unit test * Fix expression-not-assigned pylint issues * Add standard QoS spec support to cDOT drivers * Avoid LUN ID collisions in NetApp iSCSI drivers * VMware: insecure option should be exposed * Create iSCSI lio portals with right IPs and port * Create consistgroup from cgsnapshot support in VNX driver * Stop using deprecated timeutils.isotime() * Fix response when querying host detail by host name * Fix wrong response with version details * Display NOTIFICATIONS on assert failure * Brocade driver not parsing zone data correctly * Fix issues with extra specs in VMAX driver * Don't use dict.iterkeys() * Address 0x712d8e0e error in VNX Cinder Driver * Leverage dict comprehension in PEP-0274 * Add missing '-o' CLI option to VNX Cinder Driver * Validate name and description for volume type * Leave sqlalchemy convert to boolean to the DB SQL type to use * Switch from MySQL-python to PyMySQL * Add ability for drivers to copy data preserving sparseness * Remove HDS HUS iSCSI driver * Updated from global requirements * Use nfs\_oversub\_ratio when reporting pool capacity * LVM: Pass volume size in MiB to copy\_volume() during volume migration * LVM: Support efficient data copy using "dd" for create\_cloned\_volume * Fix a problem with FAST support in VMAX driver * Remove use of deprecated LOG.warn * Fix incorrect reraising of exceptions * Switch to oslo\_versionedobjects * Cinder os-force\_detach api returns 500 * Check volume\_backend in retype * Fix overwrite of params in SF image cache update * Dell SC driver honoring folder name after volume creation * Check type match on create from source/snap * Add patch for consistency group update in ProphetStor driver * Logging not using oslo.i18n guidelines (openstack) * Remove unused context parameter * Replace suds test dependency with suds-jurko * Fix missing translations for log messages * Remove Brick from cinder codebase * Follow i18n guidelines in LIO target * Windows SMBFS: Fix image resize errors during volume creation * Windows iSCSI: Add CHAP authentication support * NFS Backup: Correcting backup\_sha\_block\_size\_bytes help message * Fix common misspellings * GlusterFS: Renaming test case to test\_mount\_glusterfs * Add new exception to retryables in SolidFire driver * Convert mox to mock: tests/compute/test\_service.py * FlashSystem reports error while running tests with multi-thread * Dell: Added support for update\_migrated\_volume * Fix FakeISCSIDriver and FakeISERDriver * Add volume status to error messages in backup create flow * Bad link in API version details response * Fix xxx=\n pep8 errors in volume\_utils.py * Log command failure details before raising ISCSITargetDetachFailed * Eqlx: Fixes the retries on Network Connection Error * Rename Datera test to test\_datera for discovery * Allow rexports for targets with Datera * Add os-brick to cinder requirements.txt * Fix the KeyError in CloudByte iSCSI cinder driver * LIO: Enable iSER for IPv6 * LIO: Use rtslib property instead of private method * Fix missing translations for log messages * Cinder os-attach api returns 500 * cinder os-detach api returns 500 * HDS HNAS Driver fails when FS unhealthy * Logging not using oslo.i18n guidelines (zonemgr) * Fix broken add\_iscsi\_conn log message * Fix unit tests spam output * Preserve usage and reservations on quota deletion * Fix 'driver is uninitialize' typo * Removing sleep between when a command is sent and 'YES' is sent * Windows iSCSI: remove ensure\_export * tests: replace mox by mox3, clean out mox usage * Catch additional type conversion errors * Tests: Remove sleep from NFS tests * Port block\_device driver to use new driver model * VMware: Enable vCenter certificate verification * Fix typo in log messages and comments * Clean up failed clones in VMAX driver * Correct directories check for N327 hacking check * Fake out sleeps in unit tests * Fix range check for NFS used ratio * Move logging sample to use oslo\_log * Targets test refactoring * Revert state if attachment already exists * Add retry to lvm delete * Admin extends tenant's volume but change admin's quota * Drop use of 'oslo' namespace package * Add Multi-connection support to XIV * VNX Cinder driver Over Subscription Support * Fix namespace issue in generate\_sample.sh * Add hacking check for str and unicode in exceptions * Fix volume creation from image with allowed\_direct\_url\_schemes * Change default of option volume\_driver to LVMVolumeDriver * GlusterFS: Support over subscription in thin provisioning * Remove unnecessary checks for encrypted types * Add test case for volume\_encryption\_metadata\_get * Updated from global requirements * Port rbd driver to use new driver model * Don't truncate osapi\_volume\_link prefixes * Fixed issue with mismatched config in VMAX driver 2015.1.0 -------- * Add external genconfig calls * Create initiator id for VMAX iSCSI driver * Remove deprecated methods in VNX driver * Remove unused find\_attribute\_or\_element() * 3PAR don't log version numbers every stats update * Sync oslo service module * Add external genconfig calls * Enable use of filter\_function in PureISCIDriver * NetApp E-Series: Fix instance live-migration with attached volumes * Add resource tag to logging in volume.manager.py * VMware: Handle concurrent inventory folder create * Leverage timeutils, drop strtime() usage * GlusterFS: Using mount method in RemoteFsClient * Remove redundant code from VNX Cinder Driver * Remove force check from copy\_volume\_to\_image * Logging not using oslo.i18n guidelines (scheduler) * service child process normal SIGTERM exit * service child process normal SIGTERM exit * Move unit tests into dedicated directory * Dell SC driver calls out the wrong REST API version * Move RBD calls to a separate threads * Windows SMBFS: fix volume extend * Fix a wrong argument of create method * Fix tiny typo: compatability => compatibility * Reserve 5 migrations for Kilo backports * RBD: Add missing Ceph customized cluster name support * Standardize logging in volume.api.py * Release Import of Translations from Transifex * Fix fetch\_to\_volume\_format if vhd is requested * Windows: Improve vhdutils error messages * SMBFS: Add minimum qemu-img version requirement * VolMgr: reschedule only when filter\_properties has retry * Storwize driver should only report active wwpn port * update .gitreview for stable/kilo * Mask passwords with iscsiadm commands * Add support for customized cluster name * Updated from global requirements * SMBFS: Lock on a per-volume basis * Windows SMBFS: fix volume extend * Complete the doc/README.rst instructions to build docs * Verify all quotas before updating the database * Add locking to PureISCSIDriver around creating Purity Host objects * Include boot properties from glance v2 images * Add CA cert option to backups swift driver * Fix a wrong argument of create method * Add locking to PureISCSIDriver around creating Purity Host objects * Reworked Dell SC iSCSI target portal return * Fix LUN misalignment issue with NetApp iSCSI drivers * Remove the export creation during volume creation for migration * Fix assertEqual in test\_volume.py in correct order of params * VNX Cinder Driver should report 0 free\_capacity\_gb in some scenarios * Include boot properties from glance v2 images * Logging not using oslo.i18n guidelines (brick) * set default auth\_strategy to keystone 2015.1.0rc1 ----------- * Open Liberty development * Removed sleep before 'YES' is sent to confim an operation * Update openstack-common reference in openstack/common/README * GlusterFS: Returning provider location of volume from snapshot * Fixes snapshot creation failure in CloudByte driver * Delete the temporary volume if migration fails * Revert "Removing Windows drivers" * Correct cinder hacking check numbering * Add hacking check for print() statements * Rbd update volume stats in wrong way * Add missing copy\_volume\_to\_image method to Sheepdog driver * Partial Revert "Removing Huawei drivers" * Create initiator id if not exist in VMAX driver * Fixed encrypted property for 3PAR FC and iSCSI drivers * Partial Revert "Removing ZFSSA driver" * Mock wait\_for\_volume\_removal in test\_brick\_connector * Dell SC driver has insufficient iscsi logging * VMware: Skip vSAN for preallocated image download * Enable H238 hacking rule * Use six.text\_type instead of unicode * Fix ISCSIDriver initialized connection volume type * Fix multipath device discovery when UFN is enabled * Fix missing clone\_image API support for sheepdog driver * More error handling on EMC VNX migration failure * Set volume\_attachment to [] for the temporary volume creation * Add volume:update rule to policy.json * Fix always false condition in glance wrapper * Only use operational LIFs for iscsi target details * Revert "Removing Netapp FC drivers for no reported CI" * Get volume from db again before updating it's status * Catch more general exception in manager's create\_volume * Fix broken fetch\_to\_volume\_format log message * Tests: Fix v6000 test failure with random hash seed * Check volume status in detach db api * Fix wrong command for \_rescan\_multipath * Storwize: Replication status still active when primary copy is offline * VMware: Fix instance\_uuid access in volume retype * Logging not using oslo.i18n guidelines * Remove LP bug ref in remove\_iscsi\_device * Fix potential access to missing key * Brick: Fix race in removing iSCSI device * VMware: Improve invalid container error message * Fix the format of the system name in VMAX driver * Hitachi: Fix access to volume instance\_uuid * VMware: Fix ImageNotAuthorized during copy volume * Fix: Boot from image with HNAS iSCSI * SMBFS: Fix missing volume provider location * Enhance VNX Cinder volume creation logic * Properly use obj\_extra\_fields in objects * Create unit tests for volume objects * Fix incorrect invocation of \_add\_to\_threadpool * VMware: Fixed usage of volume instance\_uuid * Change volume and snapshot stuck creating to error * Imported Translations from Transifex * Fixed access to instance\_uuid in retype * Ensure initialize\_connection in targets pass multipath parameter * Eager load volume extra specs * Be safe with getting attachment * Added the missing attachment to detach\_volume * Make lio iSCSI changes persistent to avoid lost * Sort list of cinder\_object.changes * Move to hacking 0.10 * Syncing versionutils from oslo-incubator * Properly remove host object from ISE * Dell Storage Center API change fails init\_conn * Windows iSCSI: fix volume clone * Enable request-id in cinder API logs * Use cached values for stats on query failures for vmem drivers * The value of netapp\_storage\_protocol should default to none * Change leftover oslo.\* to oslo\_\* * Updated from global requirements * Fix: Synchronise Quobyte Share mounting * Fix typo in cinder/cinder/volume/drivers/emc\_vmax * Update file doc string for pure.py * update oslo policy to remove policy.d log spam * Fix QoSSpecManageApiTest to work in parallel 2015.1.0b3 ---------- * Remove chap secret DEBUG logging in PureISCSIDriver * Removing Windows drivers for no reported CI * Fix logging mistake in swift backup driver * Removing Zadara driver for no reported CI * Removing Huawei drivers for no reported CI * Removing Netapp FC drivers for no reported CI * Removing Fujitsu driver for no reported CI * Removing DRBD driver for no reported CI * Removing FusionIO driver for no reported CI * Removing Nexenta driver for no reported CI * Removing Symantec NFS driver for no reported CI * Removing StorPool driver for no reported CI * Removing ZFSSA driver for no reported CI * Make the 3PAR drivers honor the pool in create * Removing HP MSA driver for no reported CI * Removing Coraid driver for no reported CI * Add retry to create resource in Datera driver * Logging not using oslo.i18n guidelines * Tests: Fix az test failure when PYTHONHASHSEED != 0 * Change datetime.now() to timeutils.utcnow() from oslo\_utils * Fixes nits in check\_no\_contextlib\_nested * Fix logging to catch original exceptions and tracebacks * Remove error messages from multipath command output before parsing * Return updated volume object to the caller of \_attach\_volume() * Fix SAN generic driver ssh whitespaced commands * EMC: Fix use of "\_" as variable name * Reduce configured file size for nfs backup unit tests * tests: remove useless variable * Revert "Datera's export to avoid deprecated keys" * Don't override extra specs with config in VMAX * Check license before clone in VMAX driver * Fixing mount when state\_path is configured with a final '/' * Verify all quotas before updating the database * Update Violin REST client library name * Remove the reference of volume['instance\_uuid']in VNX driver * Increase LeftHand driver minimum client version * Decrement remaining retries after failed REST call * VMware: Fail immediately for images in a container * Make unused iscsi\_num\_targets, iser\_num\_targets configs as deprecated * Raise exception for invalid mock assert calls * Mocked utils.execute for broken tests * Huawei driver check before associating LUN to a LUN group * Windows: Fixes wintypes import issue in vhdutils * Fix typos in LVMVolumeDriver * Add minimum qemu-img version check functions * Implement IET target driver * Fix unit tests for multiattach patch * Fixed a concurrency issue in VMAX driver * Fix LVM thin pool creation race * Added provider\_id to volume and snapshot object * Fix ArgsAlreadyParsedError in emc\_vnx\_cli.py * Fix typo in log message * remotefs: Fix doc string for \_create\_snapshot * Fix a typo in sf\_template\_account\_name help * Move to the oslo.middleware library * Remove use of contextlib.nested * Remove strutils from oslo-incubator * Add waiting for the driver to SchedulerManager * Fix retype return value in volume driver base class * Fix retype arguments in volume driver base class * Fix sqlalchemy reuse in multi-backend children * Fix Cinder logs to show authentication error in RBD driver * Update hacking check for oslo\_log * Add is\_ready method to scheduler driver * Fix for inconsistent cinder-services state change * Fix HNAS iSCSI driver error on LUN creation * Datera driver looks for lun-0 instead lun-1 now * Use oslo.log instead of oslo-incubator * Remove the useless next link for volumes, transfers and backups * Unset auth token before trying to login to Datera * NFS backup driver * Sort snapshots in create CG from CG snapshot * Add multiattach capabilities injection * Tests: Harden fake\_notifier asserts * Error trying to delete snapshots on Hitachi driver * Remove global mocking from test\_pure.py * Allow scheduler to receive volume stats when starting service * VMware: Fix exception logging * Adjust Cinder to support FCP on System z systems * Refactor Swift backup driver and introduce chunking driver * Namespace updates for \_i18n and imageutils & fileutils * Dell FC driver inheritance order causing failures * Add volume multi attach support * Add project\_id to barbican keymgr wrapper * Fixes VNX NotImplementedError of unmanage * Replace assertEqual(True, \*) -> assertTrue(\*) * Update Datera's export to avoid deprecated keys * Improve error handling in refactored Tgt driver * Adds pool aware scheduling for HNAS drivers * PureISCSIDriver consistency group updates * HP lefthand driver filter and evalautor function * Fix the unicode encode error when create volume * Add consistency group support for XIV/DS8K cinder driver proxy * Don't fail target\_delete if ACL's don't exist * Change log level for no object attribute found * Add Manage/Unmanage support to NetApp NFS drivers * Use snapshot object in create\_volume flow * Fix "rtsllib" -> "rtslib" typos * Fix some issues with pool name sent to SVC * Fix allocated\_capacity tracking when rescheduling * HP 3par driver filter and evaluator function * Add support to incremental backups in cinder * Convert all eqlx tests from mox to mock * Fixed the order of mock decorators in VMAX driver * Adds SSH communication to HNAS drivers * Add CHAP support to PureISCSIDriver * Make objects behave more like our old dictionaries * Two choices for iscsi\_helper is missing * Update Datera's Authentication method * Simplify cxt test\_create\_export() unit test * CG Modification Support in EMC VNX Cinder Driver * Dell SC API change fails snapshot creation * Adding manage/unmanage support for LeftHand driver * More validation logic for VNX CG creation * Change default value of gpfs\_images\_share\_mode to None * Add DB table for driver specific data * Move oslo.messaging to the oslo\_messaging namespace * Create Consistency Group from CG Snapshot API * Modify Consistency Group API * Remove useless storage\_availability\_zone import * Failover to alternative iSCSI portals on login failure * Update volume type name for volume type API * Add config option to override url for versions * Snapshot and volume objects * Cinder objects base * Dell Storage Center Driver API bug * Password config options should be marked secret * Clear migration\_status from a destination volume if migration fails * RBD: Query volume features from ceph.conf * i18n Compatibility in VMAX driver * Correct a few changes in the VMAX driver * Fix HNAS driver parsing errors * RBD: remove non-layering support for antiquated versions * Fixed errors in docstrings in the VMAX driver * SMBFS: Fix retrieving the volume path and format * More validation logic for VNX CG creation * Add flash cache policy to 3PAR driver * Update v1 deprecation warnings * Fixes the import for Lefthand driver * NetApp eseries implementation for manage/unmanage * Replication status periodic task optimization * XtreamIO version 4.0 support * Change max\_over\_subscription\_ratio default value * Use Unique SCST Group names in SCST cinder helper driver * Add CHAP persistence to SCST target helper * Fix for infinity capacity reporting in EQL driver * Use iscsi\_helper instead of target\_helper on logs * Import only modules: H302 * Revert "Remove retry\_deactivation directive on lvremove" * Over subscription for Pure Storage iSCSI driver * Use oslo\_config choices support * Custom zone name prefix not being used by brcd driver * cinder-manage man update * GET volumes API sorting REST/volume/DB updates * GET volumes API sorting enhancements common utilities * FCZM fix reading of cinder config entries * Sync scheduler.filters module from oslo-incubator * Limit volume copy bandwidth per backend * Generic filter support for volume queries * Remove warnings for long vgs and lvs calls * Use subunit-trace to enable output during unit test runs * VMware: Relocate volume to compliant datastore * VMware:Use datastore selection logic in new module * VMware: Refactor initialize\_connection unit tests * Fix exceptions logging in TgtAdm * Sync 'versionutils' module from oslo-incubator * Sync 'threadgroup' from oslo-incubator * Update 'systemd' module from oslo-incubator * Sync 'service' module from oslo-incubator * Sync 'loopingcall' module from oslo-incubator * Sync the 'fileutils' module from oslo-incubator * Sync 'eventlet\_backdoor' module from oslo-incubator * Remove unused 'test' module from oslo-incubator * IBM GPFS Consistency Group Implementation * Fixed 3PAR driver load balancing during migration * NetApp E-series: Allow scheduling by disk * Make Interval and Retries Configurable for VMAX * Pass region name to Nova client * Remove retry\_deactivation directive on lvremove * Manage/unmanage impl for NetApp ONTAP iscsi driver * Fix argument order in assertEqual: tests/test\_service.py * Fix some message nits in the ZoneManager * Implement refresh as kwargs in get\_volume\_stats * Dell sc driver iscsi multipath enhancement * Tests: Fix cxt target tests opening files * LVM: Fix thin provisioning and mirrors stats reporting * Fix exception error on HNAS drivers * Fix comments style according to the Hacking Rules * Passing privileged user to create nova assisted snapshots * Fix return value inconsistency in VNX Driver * Fixed typo * Pool-aware scheduler support in EMC VNX Cinder driver * Fix extraneous comma that breaks docbook generation * Sync policy module from oslo-incubator * Dell Storage Center: Add retries to API calls * EQLX: Consolidate CHAP config options * Add support for chiscsi iscsi helper * Fix logging guideline violations in volume/api.py * Remove useless requirement on wsgiref * Snapshot of bootable volume goes in error state * Sync periodic\_task module from oslo-incubator * Tests: Don't require binding to port 4444 * Tests: Remove TestWSGIService.test\_reset\_pool\_size\_to\_default * Tests: Remove randomness from NFS mount tests * Change exception message in volume api * Refactoring for export functions in Target object * Add iSCSI SCST Target support to cinder * EMC VMAX driver Kilo update * Fix Scality SRB driver security concerns * Fixes total\_capacity\_gb value in CloudByte driver * EMC VNX Cinder Driver iSCSI multipath enhancement * Add dedup provisioning to 3PAR drivers * Provided requirements are meant to be immutable * Remove optional parameter from lun mapping call * quobyte: remove dependency to xattr * Don't fail target\_delete if target doesn't exist * Remove custom lazy loading * DRBD: Use correct function object after DBus disconnect * Split volume driver into ABC classes * Mock out the wait routine in the VMAX driver * Limit ram and disk used by ceph backup tests * Fix detach volume from host problem in VMAX driver * fix typo in config.py * Update hacking ignore list * VMware: Delay string interpolation in log messages * VMware: Integrate VMDK driver with oslo.vmware * Enhance iSCSI multipath support * Dell Storage Center Unit Test Updates for Kilo * Updated from global requirements * Update eqlx driver help text * Add extra library oslo.concurrency to oslo.config.generator.rc 2015.1.0b2 ---------- * Support over subscription in thin provisioning * Change oslo.\* to oslo\_\* * Lefthand driver fails to attach a cloned volume * Purge deleted rows * Make PureISCSIDriver iSCSI port discovery more flexible * EMC VNX Cinder Driver Update * Make storwize debug log more readable * Fixes the EQL driver CI tests AttributeError * Add manage/unmanage methods for Hitachi Block Storage Driver * RemoteFS: Use nas\_ip and nas\_share\_path options * Scality SOFS : Use ensure\_tree from fileutils * Tests: Don't sleep for looping calls (eqlx) * Enable use of an /etc/cinder/lvm.conf file * Roll back if VMAX masking view not created * Tests: Don't sleep for looping calls * Windows iSCSI driver: Fixes copy disk method exception handling * VMware: Fix missing target resource pool * Revert "Implement Huawei SDSHypervisor driver" * Remove the solaris volume driver * Fix SSHPoolTestCase to work in parallel * Drop deprecated namespace for oslo.rootwrap * Fixes attribute content checking * Imported Translations from Transifex * Support iSER driver within the ISCSITarget flow * HP3Par: Set snapCPG when managing existing volumes * Fixed misspelling in solidfire.py * Adds unit tests for HNAS backend * Failed to discovery when iscsi multipath and CHAP both enabled * Add retry for tgtadm update when tgt exists * Add completion logging for snapshots and volumes * Fix configratuion of rally jobs * Create SolidFire Template account on init * Updated from global requirements * Add debug message for lvremove after udev settle * IBM Storwize driver Consistency Group Implementation * Use get\_my\_ipv4 from oslo.utils * TgtAdm: Fix \_recreate\_backing\_lun logging * Revert "Create SolidFire Template account on init" * HP 3PAR modules have bad log messages * Remove useless and unused request\_utils * Create SolidFire Template account on init * Fetch\_to\_volume\_format calls copy\_volume using wrong parameter * Changed pvs separator from ':' to '|' to support names with ':' * Raise correct exception when validate\_connector failed * Add provisioned\_capacity * Move 3 Fujitsu ETERNUS DX related file * Add retry to lvm snapshot create * Add a generic retry decorator to cinder/utils * Use uuidutils from oslo.utils * Remove unnecessary method: \_ensure\_iscsi\_targets() in tgt.py * Raise correct exception if deleting of LIO iSCSI target is failed * Cleanup unused DB APIs, part I * Remove argparse from requirements * Update tests for Quobyte Cinder drv from mox->mock * Fixes a small issue in find\_autodoc\_modules.sh * Fix the eqlx driver to retry on ssh timeout * Add retrying lib from global requirements * Remove usage of taskflow 'utils.misc' module * Move oslo.serialization to oslo\_serialization namespace * HP 3PAR modules do not follow coding guidelines * Improve debug logging of Dell Storage Center driver * Fix \_usage\_from\_snapshot in volume.utils * VMware:Fix error creating vCenter inventory folder * New Cinder volume driver for openvstorage * Fix cinder-manage shell ipython * Shrink down customized logging listener * Prevent deleting volumes in a consistency group * Fix bug in rbd driver: the cloned volume size is wrong * Fix HNAS driver confusing error message (iSCSI driver) * Updated from global requirements * Ensure lazy translation is disabled properly * DRBD: remove a wrong comma, it creates a tuple * Move oslo.utils to oslo\_utils namespace * Make test\_create\_delete\_snapshot more robust * Add policy\_dirs conf fixture * DRBD: Log an error if libraries can't be loaded * Fix the iSER legacy usage in the new targets flow * Move oslo.config to oslo\_config namespace * Add support for manage/unmanage volume commands to PureISCSIDriver * Scality: Lock around SOFS mount to avoid a race * Set 'driver\_volume\_type' to 'gpfs' * Verify the instance's existance in the VMAX driver * Updated from global requirements * Switch the PureISCSIDriver over to using the purestorage pypi module * Fix zfssa driver volume attach to work with latest zfssa software * Updated from global requirements * Move oslo.db to oslo\_db namespace * Fix eqlx endless loop when server closes the connection * Increase unit test coverage in hacking test * Fixed server name being retained after detach in LeftHand * Fixes misspelled words in Cinder * Imported Translations from Transifex * Add mock for cinder-rtstool call in tests.targets.test\_lio\_driver * Skip LIO target unit tests until mocked * Fix LOG formatting in api initialize\_connection * TgtAdm: Don't change CHAP username/password on live migration * Deal with PEP-0476 certificate chaining checking * Add hacking check for oslo namespace usage * Remove locks from LeftHand driver * Fix bug in tgt conf for volume * Use is\_valid\_ipv6 from oslo.utils * Use lockutils.set\_defaults to set lock\_path in test * Fix bug in sheepdog driver: the wrong volume size * Add loopingcalls for Huawei storage system driver * Implement clone\_image caching on SolidFire * Add migration tests for PostgreSQL * Garbage Remains when Attached Volume is Migrated with NFS Driver * Update README.rst to current state * Remove unused variables from ensure\_export() * Fix incorrect usage of get\_flow in volume.manager * Fix iscsi\_write\_cache setting for iscsi targets * Add debug messaging for tgt already exists * Clean up QoSSpecManageApiTest setup * Add more rally benchmarks related to Cinder * Use cinder.utils.execute directly * Deal with tgt already exists errors * Fix drbd driver to load without 3'rd party libs * i18n fixes for PureISCSIDriver * cinder-rtstool: should use acl.node\_wwn * LVM: Add terminate\_connection call for Target Objects * Add an instance-locality filter * Adds cinder iscsi driver for CloudByte storage * Add driver filter and evaluator for scheduler * Remove import of private \_lazy module * Fix argument order in assertEqual: tests/test\_nfs.py * Fix the continuation line indent to pass flake8 * Capitalize the first letter in log messages * Fix argument order in assertEqual: tests/test\_glusterfs.py * Use assertRaisesRegexp() in test\_srb.py * The DRBD(manage) Cinder volume driver * Make ProphetStor drivers compliant with logging standards * Transition LVM Driver to use Target Objects * Replace oslo-incubator with oslo\_context * Create proxy volume driver * Fix handling of serialized data in filtering of volumes * Convert mox to mock: tests/test\_glusterfs.py * Remove check on db\_exc.DBError * Add specific docs build option to tox * Imported Translations from Transifex * Add a privileged user for OpenStack services * Add support to PureISCSIDriver for Consistency Groups * Expand the description of extra\_capabilities * Fix broken StorPool driver * Brick LVM: Remove self.lv\_list * Revert "Outputs the message about failing to bind * Replace the NetApp driver proxy layer with a proper factory * Quobyte Driver Exception Cleanup * Handle the volume not found case in the VMAX driver * Fix format errors in brick/iscsi LOG messages * Add unit tests for NetApp do\_setup methods * Outputs the message about failing to bind to IPv6 * NetApp E-series: Do not log passwords in requests * Set iet\_conf to nonexistent file in unit test * Fix issue with passing lists in filters * Rename oslo.concurrency to oslo\_concurrency * Add a provider\_id column to Volumes and Snapshots * Mock leaked \_execute() calls in driver tests * Sync request\_utils module from oslo-incubator * Sync periodic\_task module from oslo-incubator * Persist volume uuid on VMAX array * Fixed a problem in terminate\_connection in VMAX driver * Sync the latest middleware module from oslo-incubator * LVM: Volume is deleted unexpectedly during volume migration * RBD: use image\_conversion\_dir for image operations * Sync the latest loopingcall module from oslo-incubator * Sync install\_venv\_common from oslo-incubator * Sync latest imageutils from oslo-incubator * rtstool on Ubuntu installs in /usr/local/bin * encryption\_id needs to be non-nullable * Mock calls to rpm and dpkg from NetApp unit tests * Fix files in Cinder with execute bit set * Add error handling to \_connect function in PureISCSIDriver * Fix typo that escaped review in connector.py * Fix 3PAR host persona mapping to match WSAPI * Punctuation and Copyright changes * Make 3PAR drivers compliant with logging standards * Fixing 3PAR connection name cache error * Remove redundant args for clone\_image method * Add Oracle ZFSSA NFS Cinder Driver Support * Fix HNAS driver initialization 2015.1.0b1 ---------- * Make GPFS driver compliant with logging standards * Updated from global requirements * Fixed wait for job completion in VMAX driver * Logging updates to properly use ',' instead of '%' * Add support for Purity Protection Groups to PureISCSIDriver * Catch ImageNotFound exception when deleting rbd volume * Isolate Cinder Attach and Connect in Base Driver * Uncouple scheduler stats from volume creation * Fibrechannel and iSCSI for Violin Memory 6000 Series Arrays * Add Scality SRB driver * Update volume driver for Huawei storage system * Implement Huawei SDSHypervisor driver * Implement Huawei SDSHypervisor connector * Added volume type description for volume type API * Added UUID as primary key for Encryption model * Fix 3PAR driver hang on SSH calls * Delete default volume size 100M in drivers * Send the notifications to the Ceilometer for backup service * Add the StorPool block storage driver * Update global requirements * Remove commented out code from cinder/test.py * Fix HNAS driver confusing error message * Remove iscsi\_helper calls from base iscsi driver * Add unit test for commit 22abe9081 * Add Support for Dell Storage Center * Ensure that lun\_id is an int for NetApp Drivers * Symantec NFS cinder driver * DB migration tests * Convert mox to mock: tests/compute/test\_nova.py * Correct default service\_name for nova\_catalog\*\_info config option * FlashSystem Code Cleanup * FibreChannel drivers for NetApp Data ONTAP storage controllers * First version of Cinder driver for Quobyte USP * Fix use of invalid variable in tgt exists check * Remove an unused variable in volume/manager.py * Brick: fix bug in tgt conf for volume * Convert test\_image\_utils tests to mock * Report better capacity info for a limitless 3par cpg * VMware: Fix datastore selection with single host * Add support for backup encryption metadata * Improve use of temporary\_file and temporary\_dir * RemoteFS: Move Nova snapshot code into RemoteFSSnapDriver * Implementing the use of \_L’x’/i18n markers * Fixes intermittent NFS driver mount failure * Updated from global requirements * Use pbr entry\_points to setup the cinder scripts * ZFSSA iSCSI driver should support extra specs * Remove the cinder.conf.sample file * Fix for typo in Purity Host create/delete methods in PureISCSIDriver * Fix a clone volume problem in VMAX driver * Updated from global requirements * Fix 3PAR driver attach error when host name missing * NetApp fix vol migration unusability * Updated from global requirements * Allow HostState to handle empty capabilities * Inherit RequestContext from oslo * Imported Translations from Transifex * Workflow documentation is now in infra-manual * Remove the check\_uptodate conf checks * Improve unit tests for cinder/volume/utils.py * Remove lio\_initiator\_iqns * Bring cinder up-to-date with new oslo libraries * VMware: Add missing storage profile requirement * Use object.property instead of object.dump()['property'] * NetApp 7mode NFS driver doesn't honor netapp\_vfiler option * Revert "Fix Brocade FC SAN lookup MITM vulnerability" * Add ability to zfssa driver to create multiple initiator groups * Improve testing of cinder/utils.py * Fix rpc initialization of cinder-manager volume * Fix 3PAR drivers attempt to locate existing host * Volume type access extension * Remove driver compatibility in volume manager * Don't use \_execute directly in brick/iscsi * Deal with tgt already exists errors * Fix find\_autodoc\_modules.sh to support OSX * Raise exception if invalid IP is specified * Fix check\_ssh\_injection in cinder/utils * Fix \_get\_disk\_of\_partition edgecase in utils * Adding volume driver for X-IO ISE * Remove Python 2.6 backwards compatibility code * Imported Translations from Transifex * Get the 'consumer' in a correct way for retyping with qos-specs * PureISCSIDriver:Handle delete called on already deleted volume * Add limited retype support for rbd * Add iSCSI Target objects as independent objects * Remove Python 2.6 classifier * Implementing the use of \_L’x’/i18n markers * Match mock.patch decorator with appropriate param * Correct misspelled words * Brick LVM: LV not found logging and error handling * etc: replace NullHandler by Python one * Don't use module importutils from oslo-incubator * Removing locks from 3PAR FC and iSCSI drivers * Update rally job files * Fix calls to assert\_called\_once in unit tests * Refactoring to allow addition of NetApp FibreChannel drivers * Add ability to create volume from image by image name * Fix exception message formatting * VMware: Set target ESX host for backing VM clone * Create "image\_conversion\_dir" before creating temporary file * Convert the DateTime into ISO8601 format for Ceilometer * Imported Translations from Transifex * Remove module timeutils * NetApp NFS and iSCSI: move zapi client logic into modules * Context cleanup * ProphetStor with pool aware cinder scheduler * Updated from global requirements * Imported Translations from Transifex * Fix messages in EMC VMAX driver with no translation * Scality SOFS: implement volume backup and restore * Fixup regressions in PureISCSIDriver log statements * Implementing the use of \_L’x’/i18n markers * Remove module jsonutils * Sync policy from oslo-incubator * Don't use module excutils from oslo-incubator * Sync latest versionutils from oslo-incubator * GlusterFS: Lock on a per-volume basis * Defining the variable "tmp" before try block * PureISCSIDriver needs to disconnect hosts before deleting volumes * context.elevated() should use copy.deepcopy() * Added missing rules in policy.json * Fix message translations for MSA common class * Switch Cinder to use oslo.concurrency * Use oslo.utils * Remove code for deprecated extension path * Imported Translations from Transifex * Update prerequisite packages in development docs * Change CHAP secret default length * Implementing the use of \_L’x’/i18n markers * Switch to oslo.serialization * Fix typo in SolidFire xDBVersionMismatch label * Fix a problem in creating consistency group in ProphetStor driver * Updated from global requirements * Disable Cgsnapshot APIs by default * Invalid GlusterFS share format error * allow image\_id for imageRef in create volume API v2 * Changing PureISCSIDriver to use % string formatting instead of .format * Update cinder.conf.sample to fix max db conn retries * CiscoFCSanLookupService passing command as string * Documentation Bug fix committed * Add i18n \_LX tags for relevant log levels in EQLX driver * Bump Req timeout to 30 seconds in SolidFire Driver * Remove cinder/compute/aggregate\_states.py * Remove deprecation warnings relating to api-paste * Mock isfile in test\_ssh\_missing\_hosts\_key\_file * Implementing the use of \_L’x’/i18n markers * Scality driver:use self.configuration instead of CONF * Mock cinder.wsgi.Server in TestWSGIService * Explicitly close requests obj in SolidFire Driver * Remove Mock class monkey patching * Add volume attribute support to volume created using clone, snapshot * Stop stacktracing on QuotaErrors * Stop stacktracing on InvalidInput exceptions * Add automatic creation and deletion of Purity hosts for PureISCSIDriver * Mox -> Mock for test\_block\_device.py * Fix Brocade FC SAN lookup MITM vulnerability * Implementing the use of \_L’x’/i18n markers * Imported Translations from Transifex * Updated from global requirements * Fix the LV NotFound situation for thin-type LVM * Fix wrapper to work with barbicanclient 3.0.1 * Retry remove iscsi target * Adding support for 'source-id' in 3PAR manage * Remove test\_barbican from keymgr tests * Implementing the use of \_L’x’/i18n markers * Capture exception when delete a volume detached * Add cinder support for IBM FlashSystem * Use urllib.urlencode instead of dict\_to\_query\_str * Disable python-barbicanclient 3.0.0 version * Activate pep8 check that \_ is imported * LIO: Fix UnboundLocalError in ensure\_export * Amend unused variables to assist pylint testing * Brick LVM: Rename get\_all\_volumes, further optimize * Fix wrong arg number for \_fix\_id\_migration issue * Cleanly override config in tests * Add debug output indicating provider\_location * Use look up service for auto zoning * Fix for debugging c-vol in PyCharm * CiscoFCSanLookupSerive uses extra argument in init * Fix SolidFire inaccurate model on migrated vols * Eventlet green threads not released back to pool * Add ability to update migration info on backend * Reserve 5 migrations for backports * Verify the full interface of the context object * IBM Storwize: Improve error message * Imported Translations from Transifex * LioAdm: Delete initiator from targets on terminate\_connection * NFS Security Enhancements: allows secure NFS environment setup * Brick LVM: Optimize get\_volume * TgtAdm: Don't change CHAP username/password on live migration * Update volume-type's quota when extending volume * Cinder api service doesn't handle SIGHUP properly * Handle DBConnectionError instead of Exception * Remove outdated \_ as a builting from pylintrc * ProphetStor driver consistency group support * Turn on Flake-8 Complexity Checking * Log a warning when getting lvs and vgs takes longer than 60 seconds * Add client\_socket\_timeout option * IBM Storwize driver: Add local variable assignment to "ctxt" * Updated from global requirements * Multipath commands with error messages in stdout fail to parse * NetApp fix to set non default server port in api * Correct the message string 2014.2 ------ * Fix LVM iSCSI driver tgtadm CHAP authentication * Export cinder volumes only if the status is 'in-use' * Fix LVM iSCSI driver tgtadm CHAP authentication * Export cinder volumes only if the status is 'in-use' * Revert "Relocate volume to compliant datastore" * Remove vol\_type\_id cast to str * Move SolidFire driver from httplib to requests * check the configuration item glance\_num\_retries * VMware: Fix initialization of datastore selector * Imported Translations from Transifex * Fix exception handling on test\_delete\_nonexistent\_volume * check the configuration eqlx\_cli\_max\_retries * Revert "Relocate volume to compliant datastore" * Remove deprecated use of gettextutils import \_ * Fix NetApp AutoSupport Shortcomings * HP 3PAR: Don't ignore extra-specs snap\_cpg when missing cpg * 3PAR migrate without losing type settings * 3PAR with pool-aware-cinder-scheduler * Fix display name change during backup restore * gitignore /.\* * Fixes docstring typos (Cinder) 2014.2.rc2 ---------- * Remove useless sslutils from openstack.common * Truncate fail\_reason to column length * Fix eqlx CLI output parsing on bad input * Eqlx fix NoSuchOptError for volume\_name\_template on clone * VMware: Bump driver version * Updated translations * NetApp fix eseries unit test mock clean * Make sure device support Direct before setting * Make sure device support Direct before setting * Eseries warn if multipath is not set for img xfer * GlusterFS: Remove unneeded conf import * ZFSSA iSCSI vol create fails with vol type option * Handle eqlx SSH connection close on abort * ZFSSA iSCSI driver cannot add multple initiators to a group * Fix race condition in ISCSIConnector \_disconnect\_volume\_multipath\_iscsi * Deprecate / obsolete NetApp volume extra specs * IBM Storwize driver: Retype the volume with correct empty QoS * Fixed Typo from modfied to modified * Updated from global requirements * Sync latest processutils from oslo-incubator * Imported Translations from Transifex * Updated from global requirements * coraid: allow setting default repository * Sync latest processutils from oslo-incubator * Windows SMBFS: Handle volume\_name in \_qemu\_img\_info * Refuse invalid qcow2 backing files * Windows SMBFS: Handle volume\_name in \_qemu\_img\_info * Refuse invalid qcow2 backing files * Clarify InvalidInput exception when the size is missing * Handle eqlx SSH connection close on abort * Deprecate / obsolete NetApp volume extra specs * Fix race condition in ISCSIConnector \_disconnect\_volume\_multipath\_iscsi * ZFSSA iSCSI driver cannot add multple initiators to a group * ZFSSA iSCSI vol create fails with vol type option * Open Kilo development 2014.2.rc1 ---------- * Fix race condition in ISCSIConnector disconnect\_volume * Adds openSUSE support for developer documentation * IBM Storwize driver: Retype the volume with correct empty QoS * VMware:Unquote folder name for folder exists check * VMware: cinder-volume create\_session fail at retry * Fixing format for log messages * Update /etc/cinder/cinder.conf.sample for memcache * VMware: Relocate volume to compliant datastore * Fix parameter miss in test\_snapshot\_metadata test case * Failed to re-detach volume when volume detached * Imported Translations from Transifex * IBM Storwize:Failed to retype from non-type to replication enable * Fix unnecessary WSGI worker warning at API startup * Remove XenAPI driver * Add required spaces in log messages * Fix ssh\_host\_key\_file default in help and config.sample.conf * Downgrade 'infinite' and 'unknown' capacity in weigher * Remove unused py33 tox env * Add unit test to cinder cgsnapshot api * DB migration 25->24 failed when dropping column * Allow scheduler pool information to be retrieved * Increase the 3PAR hostname length * Timeout triggers failures running tempest for ZFSSA driver * NetApp fix for default host type in eseries * HP 3PAR drivers should not claim to have 'infinite' space * Add tests for consistency groups DB migration * Verify requested size in volume.api create * Typo "asscoiated" should be "associated" * NetApp fix eseries unit test mock clean * Updated from global requirements * Set socket options in correct way * HP 3PAR: Allow retype when the old snapshot CPG (3PAR pool) is None * NetApp fix for controller preferred path * VMware: Add storage profile related unit tests * Check replication status failed for non-replication * VMware: Implement retype for VMDK driver * VMware: Improve datastore selection logic * Sync latest strutils from oslo-incubator for mask\_password fix * Remove executable bits on various files * Fix a problem with 'volume list' when 'all\_tenants=0' * IBMNAS: Remove call to set r/w permissions to all * Updated from global requirements * Getting iscsi\_ip\_address from cinder.conf * Handle config file with newlines and whitespaces * Volume types need to be specified when creating CG * Stop using intersphinx * Netapp drivers support for pool-aware scheduling * coraid: fix snapshot deletion * SQL scripts should not manage transactions * Add reset-state function for backups * Add test case for volume\_types.py * Block sqlalchemy-migrate 0.9.2 * Destroy Datera export target after detach * EMC VNX Direct Driver Consistency Group support * Update oslo.config and oslo.messaging requirements * Fixes Windows Volume Driver upload volume fails * Log an error on nfs mount failure * Sync service.py and its dependencies to Cinder * HP 3PAR configurable ssh-host-key-policy * Fix confusing exception message in NetApp iscsi driver * Delete consistency group failed * Fixing leaking sessions in 3PAR on attach/detach * Add Windows SMB Volume Driver * Netapp: fix multiple copies of cinder-volume * Add SMB Volume Driver * Fix possible race condition for accept transfer * Imported Translations from Transifex * Mock glance client object in version unit tests * Revert iSCSI Target objects as independent objects * Use right body for test\_create\_missing\_specs\_name * remove object in wsgi LOG.info * Don't clear \_mounted\_shares list in remoteFS while updating * Some tcp configuration paramters are ignored * Add filter to volume transfer REST api * Fix help for running specified unit tests * Deprecate the V1 API * Set default pool value to system in gpfs driver * Fixes Cinder fails to upload volume to vhd image * Unit test for restore with different hosts 2014.2.b3 --------- * During a restore send the restore request to the right host * Add Datera driver for Cinder * warn against sorting requirements * VMware: Remove redundant extend disk API call * VMware: Implement backup/restore for VMDK driver * Update the HP 3PAR default persona * Fixed Typo - from hypens to hyphens * Fixed typo from 'the the' to 'the' * Fix running unit tests with coverage * Support Volume Backup Quota * Volume Replication implementation for IBM Storwize/SVC * Add Fujitsu ETERNUS DX support * Pool-aware Scheduler Support * Small typos * Add QoS support to IBM Storwize driver * Fix unnecessary snap of glance image, with non-raw images * Driver for Fusion-io ioControl Hybrid array * Make ssh-host-key-policy configurable * Add Cisco FC Zoning plugin to the FC ZoneManager * Typo * Ignore pylint error 'hashlib' has no shaxxx member * Update oslo policy and its dependencies * Avoid using the disk cache on volume initialisation * Introduce Hitachi storage volume driver * XtremIO cinder iSCSI & FC volume drivers for Juno * Consistency Groups * Add retype method to xiv/ds8k driver interface * Fixes terminate\_connection live migration issue * Fixing 3PAR excessive FC port usage * Sync latest processutils from oslo-incubator * Sync latest strutils from oslo-incubator * Mock processutils.execute properly in test\_ibmnas * VMware: Disable suds caching * Adds volume replication methods to xiv/ds8k driver interface * Pass an empty context to the notifier * Add Oracle ZFS Storage Appliance ISCSI Driver * Add support in Cinder for volume replication - driver approach * EMC VMAX Driver Juno Update * Fix duplicate teardown to allow tox upgrade * Revert test\_rootwrap\_filter to avoid python2.6 test failure * Improve Cinder API internal cache interface * Allow backup-to-swift to take swift URL from service catalogue * Integrate OSprofiler and Cinder * Fix variable name in api/v/snapshot.py * Honor volume:get policy * Extending IBMNAS driver to support NFS based GPFS storage system * GlusterFS: Use image\_utils for tempfile creation * Modify error code compatible with Mac OS * Cache snapshots in request for extension * Remove redundant temporary\_chown from IetAdm * Failed to initialize connection * Mock out image source file in image\_utils tests * Provide a quick way to run flake8 * Ignore No value passed for parameter 'dml' message * Create RemoteFSSnapDriver class * VMware: Handle exceptions raised by image update * Adds barbican keymgr wrapper * Imported Translations from Transifex * Catch vol not found in SolidFire transfer * Fix LOG string formatting in image\_utils * Change the froce delete volume flage to True * Update ref used for notifications * HP 3PAR manage\_existing with volume-type support * Add iSCSI Target objects as independent objects * Rewrite ionice command filter using ChainingRegExpFilter * Use abstract class for the backup driver interface * Put result in quotes * Fix exception handling in PureISCSIDriver * Catch DBDuplicateEntry instead of IntegrityError * Enable import group hacking rule * Actually encode the SolidFire json dump result * Sync latest oslo-incubator log for debug fixes * Enable F402 checks and fix violations * Prevent tenant viewing volumes owed by another * VMware: Check snapshot and rename backing * Fix bad indentation in netapp and san.hp volume drivers * Ignore HTTP\_PROXY during test requests * Issue one SQL statement per execute() call * Add ProphetStor DPL Storage server volume driver for Cinder * Add timer info for copy operations * Make manage.py usable * Enable H104, F841 hacking rule and fix violations * Adds CA cert file path option for glance connections * Enable Swift backup driver for auth 2.0 * Updated HACKING.rst so that it is accurate * Update help strings * Add hacking check for use of LOG.audit * Imported Translations from Transifex * Use oslo.i18n * Add CHAP support for 3PAR ISCSI * EMC: Fix minor issue in VNX driver and unit tests * fix a small typo in development.environment.rst * Do not translate debug messages * Fixing LeftHand live migration error * Improve regex for \_ import hacking check * General cleanup of unused objects * RPC client lazy initialization * Fix snapshot id for snapshot\_destroy * Use auth\_token from keystonemiddleware * Fixes wrong usage of mock.assert\_not\_called() * Fix error log level in restore-backup routine * Add retry\_on\_deadlock to db update methods * Fix unit test test\_import\_record\_with\_verify * Change the exception type for Storwize/SVC driver * VMware: Update default task\_poll\_interval value * Change logging level AUDIT to INFO * Fix solidfire accept\_transfer * VMware: Volume from non-streamOptimized image * Enable checks for E711, E712 and E713 * Add note that some checks are disabled on purpose * VMware:Disk type conversion during clone backing * VMware:Support for attaching disk to backing * Change 3PAR delete message when volume is busy * Move generate\_password into volume utils * Move SSHPool into ssh\_utils.py * Fixes migrate\_volume\_completion * Change corrupted spelling mistakes * EMC VNX Direct Driver Update for Juno * Storwize/SVC can not get the right host * Skip incompatible test on OSX * Have task/flow logging go to a separate log file * fix atom link in XML Version API * Update ref used for notifications * Fix glance metadata SQL query performance * Add return of updated object on update from DB * fixing the iSER transport protocol when using LVMISERDriver * Add hacking check for vim headers * Get updated model info on volume transfer * Introduce iSCSI driver for Pure Storage FlashArray * Further cleanup of reservations index * Sync log from oslo-incubator for isEnabledFor fix * Modify the index migration slightly for backport * Remove cinder-clear-rabbit-queues * Remove cinder-rpc-zmq-receiver * Remove reattach function in cinder-manage * Set python hash seed to 0 in tox.ini * HP 3PAR retype implementation * Add index for reservations on (deleted, expire) * Remove Hyper-V dependency in the Windows Cinder Volume Driver * Fix no handlers could be found issue * Add storageHost content to volume messages * Add hacking check for explicit import of \_ * Make manage/unmanage admin only * Avoid OSError in get\_blkdev\_major\_minor with network filesystems * VMware:Support for create disk and disk descriptor * Implement import/export for SolidFire Driver 2014.2.b2 --------- * Implements new 'bootable' option for manage existing volume * Add hacking test * Fixes Cinder volume upload to image on windows * Add explicit import of \_ to hp\_3par\_fc and iscsi * Adds storwize\_svc\_npiv\_compatibility\_mode flag to Storwize/SVC driver * Switch to use oslo.db * Add additional explicit imports of \_ where needed * Fix failure of source volume deletion in migrate\_volume\_completion * Remove hard coded reference from gettextutils.py * Enable lazy translation for Cinder * Explicitly import \_() in Cinder code * Fix performance issues with brocade zone driver * Don't leave snapshots on the floor * Add some log info for NoValidHost * Use immutable default values for args * Update cinder generate\_sample script * XIV volume manage/unmanage support * Add affinity/anti-affinity filters * Bump oslo.rootwrap to 1.3.0.0a1 for Cinder * Mock out time.sleep in storwize unit tests * Fix the section name in CONTRIBUTING.rst * Cinder-api service throws error on SIGHUP signal * Clean up base Volume Driver * Fixes EqualLogic volume live migration * Correct misspelled word * Remove definition of Python Source Code Encodings * Fixed some typos in the cinder codebase * Sync gettextutils.py from oslo-incubator * Use PyCrypto to generate randomness passwords * Remove $sqlite\_db from default database connection * Sync processutils and log from oslo * Configure write cache option of tgtd iscsi driver * Enhance docstring for iscsi\_helper * Updated from global requirements * Ensure FC ZoneManager is called * Remove cinder.context warning logging * sync periodic\_task fix from incubator * Slow down Storwize driver initialization * Updated from global requirements * Imported Translations from Transifex * Imported Translations from Transifex * volume\_image\_metadata missing from volume list * Correct lvm\_mirrors help message * Ceph rbd volume manage/unmanage support * Enable E121,E122,E123,E126,E128 hacking rules * Replace tearDown with addCleanup - Part 3 * Fix begin\_detach logic * Use (# of CPUs) osapi\_volume\_workers by default * Restore osapi\_volume\_workers config option * Fix host option isn't set when using multiple backend * Add optional\_args to fix Volume Import failure * 3PAR Only remove FC Zone on last volume detach * Ensure rbd connect exception is properly caught * Add cinder-manage cmd to update host column * Add cinder volume driver support for Nimble Storage * Sync processutils from oslo with deps * Synced jsonutils from oslo-incubator * Enable hacking rule E111,E112,E113 * Bump minimum hacking version to 0.9.2 * Only warn about deprecation warnings once * Fix dropped exception for create\_export in vol manager * Misuse of i18n log translation to output error message * Support Volume Num Weighter * Fix docstring for snapshots API * Don't use ModelBase.save() inside of transaction * Fix unsaved exception in backup/drivers * 3PAR volume manage/unmanage support * Add cgroups related commands to rootwrap filters * Use a task subclass instead of a functor + task wrapper * Fix BrcdFCSANlookupService to iterate multiple switches * GlusterFS: Handle deletion of snapshot with no backing file * Fixed data copy issue of volume/driver.py * Make rbd driver string encoding checks consistent * Remove check\_volume\_az\_zone functor and associated passing * Minor cleanups in test\_volume * Fix retyping attached volumes requiring migration * Update \_resize\_volume\_file() to support appropriate permissions * test\_storwize\_vdisk\_copy\_ops fails if green thread context switch * VMware:Fix params for copy-image-to-volume * VMware: Optional create backing parameters * Fixes cinder volume from snapshot on Windows * Fixes cinder volume create on Windows Server 2012 R2 * Fixes cinder volume from image on Windows * Use oslo-incubator module units * Attach log listeners to other engines * Adding filter options to backup list * Remove global conf settings from iscsi helper * Add genconfig tox job for sample config file generation * Fix nfs\_shares config file parsing of spaces * GlusterFS: Various unit test improvements * vmware: Force chunked transfer for upload-to-image * Sync the latest common db code from oslo * Fix order dependency of admin metadata * GlusterFS tests: Mock out compute, don't load novaclient * Updated from global requirements * debug level logs should not be translated * Implement extend volume in NFS driver * Fixes an issue with 'dd' bug from Illumos repo * Handle the case where az is disabled/removed * I/O rate limit for volume copy with qemu-img convert * I/O rate limit for volume copy with dd * glusterfs: Honor mount options when restarting cinder service * Add keyword argument missing at some exc.HTTPError subclass * Made provision for providing optional arguments * Removes unecessary call to rbd.Image * Add task/flow listener support around the volume api flow * Retry lvremove with ignore\_suspended\_devices * Allow reset-state on attach and migration fields * Implements HDS-Cinder HNAS Drivers * vmware: Fixes VMDK volume incompatibility issue * Remove unused oslo module and adjust opentstack-common.conf 2014.2.b1 --------- * Copy custom properties to image from volume * Add strip size support to rbd driver * Fix log capturing fixture * Fix Brocade FC driver's use of the fc\_fabric\_names * LIO: Don't add ACL for local initiator name * Delete image on upload-to-image failure * Ensure flushing of IO prior to removing FC device * Fixed the comment spelling error - voumes to volumes * Remove Quota Exception Stack Traces from Cinder Logs * Use os.urandom in volume transfer * Remove check\_{attach,detach} from volumes API * Make begin\_detaching fail if volume not "in-use" * hp\_lefthand\_rest\_proxy no handler for logger during tests * 3PAR volume detach with host in a host set * Update cinder.conf * Sync periodic\_task from oslo-incubator * Remove second get call to list/show volumes * Fix a message format error in migration cleanup * Add support for z/VM driver * Handle volumes no longer existing in resume delete * Fix ISER scan retry option * Only create volume with an active image * Updated from global requirements * Ensure metadata is saved before updating volume status * Add XML deserializer for qos\_manage delete\_keys API * Use error instead of warning to log mount exc * Allow host config to be overriden in backend * Remove all mostly untranslated PO files * Updated from global requirements * Remove create\_from\* functor jump table * SSHPool in utils should allow customized host key missing policy * Check whether O\_DIRECT is supported to iflag and oflag separately * Set volume usage audit period to not NoneType * BrcdFCSanLookupService should allow customize host key and policy * NetApp fix eseries concurrent vol map failure * NetApp fix attach fail for already mapped volume * Imported Translations from Transifex * Convert SolidFire Capacity response to GiB * eliminate the need for hplefthandclient in tests * Fix solaris\_execute in SolarisISCSIDriver * Fix for solidfire driver to use reserved\_percentage * Fix retyping volume that has volume type None * eliminate the need for hp3parclient in tests * Add missing methods to FakeISCSIDriver * Add mailmap entry * Fix wrong exception reference * Limit formatting routes when adding resources * Use oslo network utils function to set tcp\_keepalive * Properly initialize rpc in cinder-volume-usage-audit * Add exception handling for copy\_volume\_to\_image() * NetApp NFS: Do not reference dst\_img\_local before assignment * Remove explicit dependency on amqplib * Fixes an issue with 3PAR attach * Ensure that lun\_id is an int * Implement validate\_connector in FibreChannelDriver * Fix broken version responses * Fix double "the" in Cinder quota warning * CinderException args to strings when exceptions * Fixed 3PAR driver issue finding correct vlun * Storwize/SVC driver detach volume failed * Add disabled kwarg to service\_get\_all\_by\_topic * Add rally job * Improve consistency of help strings * Remove unused volume instance\_uuid methods * Cinder list does not filter admin metadata * Specify lld in tgt config backends * Replace tearDown with addCleanup - Part 2 * Keep volume available if retype fails due to quota * Remove unused 3PAR driver method * Fix bad indentation in tests * Add set-bootable command * Fix handling multiple WWPNs on preferred FC node * Fallback to None on missing Glance image attrs * Remove old driver mappings from Havana * Adjust sample config for keystoneclient 0.8.0 release * Remove unused reservation methods from db.api * Re-raise exceptions in upload-to-image * Update Cinder dev doc * vmware: Fix problems with VIM API retry logic * Create volume fail when image id is "" * Use cached db object in volume\_mig\_status ext * Add exception catch if Storwize/SVC driver failed when retyping * Replace tearDown with addCleanup - Part 5 * Replace tearDown with addCleanup - Part 4 * Enable flake8 H303,H304 checking * Storwize/SVC driver crashes when check volume copy status * Switch over to FixedIntervalLoopingCall * Correct metadata ordering issue in tests * driver.create/remove\_export() require elevated context * Inform about instance\_uuid in volume usage notification * Check for silent failure of tgtadm remove * GlusterFS: Delete active snapshot file on volume delete * Fixes HP LeftHand driver with Paramiko 1.13.0 * Fixes cinder error state volume delete on Windows * Added unit test cases for \_is\_share\_eligible in NFS driver * Log initialize\_connection error before remove\_export * Force detach should only be an admin api * Updated from global requirements * Change iogrp property when retyping for Storwize/SVC * Check that all po/pot files are valid * Allow deprecated volume update keys in v2 * \_translate\_from\_glance() can cause an unnecessary HTTP request * Adds ionice command permutations to rootwrap filters * Append nas\_opts to IBMNAS\_NFSDriver configuration * Enable flake8 F841 checking * GET details REST API next link missing 'details' * GlusterFS: Delete volume-.info file when volume is deleted * Fix Jenkins translation jobs * Fixes HostTestCase failures due to slow test run * Imported Translations from Transifex * Updated from global requirements * Fixes cinder volume delete on Windows * Fixes cinder volume attach on Windows * Open Juno development 2014.1.rc1 ---------- * Imported Translations from Transifex * Changes to correct name of missing NetApp license * NetApp cmode nfs: Fix QOS extra spec * NetApp cmode iscsi: Fix QOS extra spec * Fixes a problem in attach volume in EMC driver * Update config generator from OSLO * Pass the mirrorlog option as two arguments * Import request\_id middleware bug fix from oslo * Netapp iscsi: allow snapshots with unspecified block range * Serialize the notification payload * Disable oslo.messaging debug logs * Updated from global requirements * Update tests to use CONF.set\_override * Adds xiv\_chap to xiv/ds8k driver configuration * vmware: Use SessionIsActive to find stale session * init\_host should be called before RPC consumer is created * Add RequestContextSerializer for rpc notifications * Allow NetApp iSCSI driver to sub-clone large volumes * Can't force-create snapshot by an non-exist error volume * Remove rootwrap module * Simplify test force delete snapshot unit test * ceph backup driver: improve log messages * resolve KeyError for IBM Storwize/SVC driver * vmware: Remove deprecation warning from VC driver * Remove unused method from NetApp iscsi driver * vmware: Remove pbm\_default\_policy config option * VMware: Implement vmdk extend\_volume * Fix create\_export/remove\_export in driver.py * Imported Translations from Transifex * vmware:Ignore inaccessible/inMaintenance datastore * Ensure name is utf-8 when deleting rbd vol or snap * Use six.moves.urllib.parse instead of urlparse * Use the error\_out\_volume from flow common instead * Revert "Re-enable lazy translation" * Sync latest Oslo code for imageutils * Don't send untextified exc to webob * Imported Translations from Transifex * Updated from global requirements * Use debug level logging during unit tests * Sync log.py from oslo-incubator * Fixed some FCZM unit tests hacking issues * Add missing config values for vmwware\_vmdk test * cinder-rtstool imports a not existing module * get volumes with limit and filters does not work * Fixes cinder-volume service startup on Windows * Fixed nova VM live migration issue with 3PAR * Adding domain to context * Switch over to oslosphinx * Add libffi-dev to list of packages to install in dev env * VMware: Take the volume size from the user input * Fix exception message of CoraidESMConfigureError * vmware: Mark VMware ESX vmdk driver as deprecated * Fixes ssh-injection error while using chap authentication * Generate config samples for oslo.messaging * Add conversion types in some strings * Port to oslo.messaging * Updated from global requirements * get volumes API does not handle limit=0 * EMC SMI-S delete snapshot unit test takes too long * 3PAR: Support extend volume based on snapshot * Fixed spelling error - accomodate to accommodate * GPFS unit tests: increased coverage, uses mock * Clean Up EMC VNX Direct Driver in Cinder * gpfs driver: fix logging problems * Convert cinder utils tests to use mock * Include next link when default limit is reached * Re-enable lazy translation * Sync latest Oslo config code for i18n * Fix HP LeftHand Performance issue with AO * NetApp implementation for copy offload in clustered nfs driver 2014.1.b3 --------- * Remove str() from LOG.\* and exceptions * Storwize volume manage/unmanage support * Volume manage/unmanage support * Add user defined extra capabilities * remove \_check\_container\_exists from Swift backup driver * Add initiator\_target\_map for IBM Storwize/SVC * Fix HP LeftHand migration with snapshots * Updated from global requirements * Fix docstring ordering * Typo corrections for test files in cinder * vmware: PBM wsdl file configuration * vmware: default global pbm policy configuration * vmware: check datastore availability during create * vmware: Storage policy based volume placement * Add EMC VNX Direct Driver in Cinder * gpfs volume driver backup file access fixes * Check if snapshot is deleted cleanly * Restrict rootwrap find filter for IBM NAS and GPFS * Add initiator target map in EMC SMI-S FC driver * GlusterFS: Set permissions on qcow2 snapshot files * Make EMC SMI-S driver unit tests faster * change time.sleep to use loopingcall * Change RBD delete failure log level to warn * Updated from global requirements * Update Oslo wiki link in README * Add versioning output for the FC Zone Manager * Fix volume stats with multiple LeftHand clusters * Export and import backup service metadata * Don't clear host\_state\_map when scheduling * Add volume metadata backup suport to swift driver * Add optional ionice to volume clearing process * Quota delete operation in cinder * Restrict rootwrap find filter for NetAppNFS driver * GlusterFS: Increase snapshot delete job timeout to two hours * Segment LUN clones in NetApp iSCSI * updating testing readme with more current information * Remove unused variable * Python 3: replace "im\_self" by "\_\_self\_\_" * Update FibreChannel Zone Manager config * Change warning message in NetApp driver for vsadmin creds * 3PAR: Fix extend volume GiB to MiB * TSM backup driver changes to support file backup * Fix silly error in comment * 3PAR: Create volume from snapshot with larger size * Fix free\_capacity reporting in SolidFire driver * Fix test\_delete\_should\_not\_.. to assert something * Replace assertEqual(None, \*) with assertIsNone in tests * Replace tearDown with addCleanup * Use six.StringIO instead of StringIO.StringIO * Implement retype in IBM GPFS Driver and refactor * 3PAR: Delete missing snapshot stuck in error\_del * Added 3par initiator target map for FCZM * Fix race in test\_delete\_backup * Driver for IBM SONAS and Storwize V7000 Unified * Fix webob.exc.HTTPForbidden parameter miss * Add snapshot related data to limits api * Storwize/SVC: Change volume copy task to async * Fix FC connection handling in the storwize driver * Sync log.py from oslo * Replace httplib.HTTPSConnection in unittests * Add support for FC zone lifecycle management * Give a way to save why a service has been disabled * Remove old driver mapping deprecation * 3PAR: Backend assisted volume migrate * Add HP MSA Fiber Channel driver * Ensure return for db api functions * HP LeftHand Backend assisted volume migrate * Add support for qos\_specs feature to 3PAR drivers * Add x-openstack-request-id to cinder responses * Update 3PAR drivers to pass cert test * Remove unused function * Use len instead of for-loop to get the end index * Ensures NetApp iSCSI driver correctly compares int values for size * Sync request\_id, request\_utils for cinder * IBM XIV and DS8000 Driver reorganizing (IBM dir) * Sync oslo imageutils, strutils to cinder * GPFS: Implement volume backup and restore * Fix missing package dependency for requests * test\_volume unittest fails if ran only this module * Fix invalid facilities documented in rootwrap.conf * Use six.moves cStringIO instead of cStringIO * NetApp api fix structure conversion methods * Add support for backing up volume metadata * Imported Translations from Transifex * Replace assertEqual(None, \*) with assertIsNone in tests * Add encrypted flag to volumes * Implement retype in HP LeftHand driver * Cast the quota set values to integer before checking their validity * Remove rabbit\_notifier (Sync notifier with oslo d6e1ba7) * Remove dependent module py3kcompat * Add EMC SMI-S FC Driver in Cinder * Fix wrong example of "nova\_endpoint\_template" * NetApp eseries iscsi driver implementation * Update gpfs driver volume creation process * Deactivate LV before removing * VMware: changing volumeops tests from mox to mock * Remove unused exception * Add searchDepth when getClusterInfo called * Check type argument on create from source and snap * Rename Openstack to OpenStack * Removes use of timeutils.set\_time\_override * Removed unused context in \_extend\_snapshot method * Remove unused methods * Storwize/SVC: Check wwpn not None * Changes to cast variable as integer as XML API fails * Ceph backup driver tidyup * Move create\_, remove\_ and ensure\_export from drivers * New HP LeftHand array iSCSI driver * GlusterFS: Fix create/restore backup * Allow operators to customize max header size * Fixup persistence file not found on tgt remove * Remove tox locale overrides * Add method for unit tests to set logging level * Brick support for pNFS * Storwize/SVC: Fix races in host-related functions * Fix cinder-backup volume restore with ceph driver * Dont set error\_extending status on API extend call * Fix misspellings in cinder * Fixes cinder failed to create/restore a backup with NFS driver * Brick fix BrickException message formatting * lvm: unhandled exception when migrating volume * Implement retype in SolidFire driver * Validate the quota in the API layer for volume extend * Rename self.tgtadm to self.target\_helper * Fix LVM migrate\_volume tests * Brick connector fix for GlusterFS * Updated from global requirements * vmware: intermittent failure in test\_vmware\_vmdk * RBD unit test improvements * Move clear\_volume back to it's own method * Don't use shred for volume\_clear=zero * Nexenta iSCSI driver: fixed volume\_migration * Move clear\_volume method to volume.utils * Add update support to volume type encryption * LVM: log thin pool name and size upon creation * Remove create\_export from volume create * vmdk: To add missing time unit in driver option * Update SolidFire Driver to use cinder's units * Update cinder.conf.sample for new keystone client * LVM: remove redundant casts to float * On create\_volume flow get rid of host parameter * Imported Translations from Transifex * Allow spaces in host names in the storwize driver * Remove a catching exception during delete\_volume * Remove SSH code from 3PAR drivers * Remove unused task from manager create\_volume flow * Add support for special char in volume metadata * Brick LVM: Handle space info as numeric types * Set a sane default for state\_path * Fixes incorrect key in dictionary * Stop volume\_type\_encryption creation when in use * Revert initialize\_connection changes * Convert ceph backup unit tests from mox to mock * VolumeManager: initialize even if a volume can't be found * Add create\_iscsi\_target stub to TargetAdmin * 3PAR iSCSI volumes attach to single nsp * Extra\_spec containing '/' can't be deleted * LVM: Robustify skipactivation detection * Make sure report\_interval is less than service\_down\_time * Redundant check in os-migrate\_volume\_completion * Updated error messages for volume clone * Imported Translations from Transifex * Updated from global requirements * Fix up the way we do iqn variable in unit test * Catch new iscsi exception * Delete volume transfer in volume\_destroy function * Create structure of flows' packages * Fix docstring and remove unused variable * GlusterFS: Fix deadlock in volume clone * Enable multi-process for API service * Sync RPC module from Oslo * Sync common modules from Oslo * Sync py3kcompat, sslutils, versionutils from Oslo * Sync gettextutils from Oslo * Storwize driver cleanup * Add support for retype in Storwize/SVC driver * Add notifier events to cinder volume rename, reset-state 2014.1.b2 --------- * Convert RBD unit tests to use mock instead of mox * Fixed inconsistency in iqn * Update HACKING.rst with regard to mock usage * Remove unused method 'is\_key\_value\_present()' * Remove unused config options * Remove unused exceptions * Do not show quota of removed volume types in Default Quotas panel * Fix up calculating space info for mirrored volumes * Rename \_\_init\_\_.py to create\_volume.py * Use oslo.rootwrap library instead of local copy * Fix UnboundLocalError in TgtAdm.update\_iscsi\_target * Update host on driver retype * Remove unused variable in restore\_backup method * Ensure hostnames are converted to IP for comparison * Add Backup Service to 'cinder-all' script * Remove env from rootwrap filter * Allow user to specify audit period * Fix exception log msg in attach volume method * Fix import log\_handler error with publish\_errors set * Use a mirrored mirror log * Added missing accept\_transfer to FC * Register EMC config options globally * Fix os.getlogin() problem with no tty * Updates for version list to show correct references * Fix cross-import bug in cinder.db.sqlalchemy.api * Pull Bug #1263122 fix for service module from Oslo * Pull latest scheduler change from Oslo * Use loopingcall from openstack-common * Use a \*args pass-in instead of a list one * Remove unused variable in os-extend api * GlusterFS: Synchronize additional operations * Move driver initialization check into the method * Update cinder.conf.sample for keystoneclient change * Transfer creation doesn't support 'name' via xml * Change default policy for volume\_extension:volume\_tenant\_attribute * Print driver exception on retype * Drop Chance/SimpleScheduler Implementation * Fix sqlalchemy bug in transfer\_get\_all\_by\_project * Fix sheepdog copy\_image\_to\_volume method * NFS/GlusterFS: Skip incorrectly formatted shares * Remove unused message from iogrp\_data setup * Remove legacy config option 'connection\_type' * Modify default prefix for solidfire account * Add time\_type dictionary to test\_migrations * 3PAR: Raise Ex when del snapshot with depend vol * Add bool\_type dictionary to test\_migrations * Hiding postgresql password in connection string * Fixed a problem in iSCSI multipath * Fix the invalid argument of webob.exc.HTTPBadRequest * Add ability to modify volume type * Fix downgrade in 002\_quota\_class.py for MySQL * Removed deprecated config option hp3par\_domain * Fix Brick LVM test command parsing * Update V2 API to return detailed volume information on create * LVM: Fix "intialized" typo in warning msg * Imported Translations from Transifex * removed unused context in check\_\*\*\* methods * add 'force' verification in \_volume\_upload\_image * Raise max header size to accommodate large tokens * LVM: update iscsi target on volume attach * LVM: Activate Thin Pool LV upon initialization * GlusterFS: Use correct base argument when deleting attached snaps * Switch to Oslo's config generator * Removed copyright from empty files * Remove unused fake\_flags * Replace Simple/Chance Scheduler with FilterScheduler * Reduce the redundant variable declarations * Imported Translations from Transifex * Remove vim header * Redundant size check in volume restore api * Add AllocatedCapacityWeigher * Imported Translations from Transifex * Adding helpful URL links to README.rst and HACKING.rst * Handle terminate\_connection() exception in volume manager * Empty files shouldn't contain copyright nor license * Bugfix missing foreign key removal for mysql * Fix spelling errors * Imported Translations from Transifex * Add additional metadata as key-value pairs in 3PAR * Handle initialize\_connection() exception in volume manager * Output Strings of bin/\*.py should support i18n * Add qos\_specs support to solidfire driver * Service launcher method docstring corrected * Fix QoS information in initialize\_connection() result * Fix and enable gating on F401 * Only reverify backing lun when create backing lun * Set volume\_dd\_blocksize configurable on per-driver basis * Add exception logging if flow creation fails * Remove dynamic default on sf\_account\_prefix * make delete recovery messages debug level * Remove unused code from volume manager (reset\_stats) * Pylint fixes for GlusterFS driver * Pylint fixes for Brick iSCSI/LVM * 3PAR FC: add ability to add WWNs to host * Imported Translations from Transifex * Adjust import order according to PEP8 imports rule * Do not clone non-raw images in rbd backend * Adds unit tests for drivers.rbd.RBDImageIOWrapper * [Netapp/Nexenta] Move registration of config options * Fix and enable gating on H402 * LVM: Activate source snap LV when cloning from volume * Remove test that was no longer used for long * make help text more meaningful for cinder docs * Switch create volume commands to Taskflow 0.1.1 * Use mock for scheduler tests * Remove LANG=C from rootwrap invocations * Add the driver name to get stats log output * Remove hashbang (#!) at beginning of python modules * Fix KeyError while generating a WSGI response * Updated from global requirements * Lazy log the fixed\_key warnings * Add disabled\_reason field to services table * Catch TypeError when new\_size is None on extend * Sync matchmaker\_ring.py from oslo-incubator * Add RBD test for volume not existing during delete * Sync rpc fix from oslo-incubator * Returns thin pool free space calculated from actual usage * Brick LVM: Set C locale when gathering PV info * LVM migration: Check if name is equal to dest\_vg * Convert lvm\_mirrors to int * LVM migrate: Use keywords for the brick instance * LVM: Create thin pools of adequate size * GlusterFS: Remove glusterfs\_disk\_util option * Catch ImageBusy exception when deleting rbd volume * Adds lock for create from vol/snap to avoid race conditions * Fix docstring for snapshot\_metadata controller * Fixes case insensitive for resp body * VMDK:Using host mount info for datastore selection * Fixes case insensitive for resp body 2014.1.b1 --------- * All API controllers inherit from wsgi.Controller * delete.start/delete.end notification for hostless * Fix typo/misspelled words * Update hacking to hacking>=0.8.0,<0.9 * Add more logging to migrate\_volume\_completion * Use model\_query() in db.\*\*\*\*\*\_destroy * Change method name to test\_get\_volume\_stats * Adjust RBD delete log level * Bump to sqlalchemy-migrate 0.8.2 * Add unit tests for volume reserve and unreserve * Don't stop volume service for failed re-export operations * GlusterFS: Complete snapshot\_delete when info doesn't exist * Fix typo in cinder * Imported Translations from Transifex * Add attach/detach notifications * Removes dublicated assert from test\_migrations.py * Use assertAlmostEqual instead of failUnlessAlmostEqual in unit tests * Fixing check order for empty body in get\_body() * Updates .gitignore * Remove unused import and CinderNode sqlalchemy model * Fix suppressed exceptions for migration downgrade * Fix the wrong verification for 'readonly' * Parse out '@' in volume['host'] to do discovery * Add volume migration code to Nexenta iSCSI volume driver * Handle NotFound exception in snapshots API code * Add chance weigher to scheduler * Redundant body validation for volume\_upload\_image * Imported Translations from Transifex * Fix Storwize terminate\_connection with no host * Fix \_update\_volume\_stats typos * Remove the redundant judgment for 'restore' * Make volume\_glance\_metadata\_create compat with DB2 * GlusterFS: Set correct permissions for volume file created via clone * GlusterFS: Ensure Cinder can write to shares * The param 'readonly' is incorrect checked * Fix docstring for Snapshot model * Make sure report\_interval is less than service\_down\_time * Ensure 'status' in update\_snapshot\_status * Update openstack/common/periodic\_task * Initialize and terminate connection raise 500 err * Fix docstring for \_migrate\_volume\_completion * Migrate volume should check para "host" in request * Continue to delete volumes that DNE in rbd backend * Pull latest service module from Oslo * Add greenthread.sleep() to parent wait() * Fix ./run\_tests.sh -V --virtual-env-name * Pass the size when fetching image in xenapi driver * Remove unused code in test\_admin\_actions.py * Add support for extend volume in GPFS vol driver * Remove dead code from test\_get\_volume\_stats() * Remove suffixes from LVM brick test vgs output * Subclass vendor specific exceptions * Don't do glance v2 calls when config is set to v1 * LVM: Activate source LV before cloning from it * Add default quota class into DB during migration * To fix test\_get\_dss\_rp in test\_vmware\_vmdk.py * Fix typo in cinder.volume.API * NetApp fix for vsadmin role failure for ssc * Create snapshot throws 500 Internal Error * Fixes inappropriate error message * NetApp fix free space as zero during 1st vol stats update * Add valid check and unit tests on quota class * GlusterFS: Synchronize operations that manipulate qcow2 data * Check only our VG name when testing if VG exists * Update quota-set throw 500 error * Using HttpNfcLease to transfer vmdk files * Adds extend volume to Dell EqualLogic Driver * Remove the use of common.uuidutils.gen\_uuid * Imported Translations from Transifex * Do not allow bad keys while updating quota * Use cached volumes in REST API extensions * Enable object caching in cinder REST API requests * Nexenta iSCSI driver: extend volume stats of \_update\_volume\_stats * Fail when image is bigger than the volume * Update URL for global HACKING document and remove duplicate section * Retrieve volume image metadata using single query * Add call to retrieve image metadata for volumes in bulk * Do not remove volume silently if GPFS is unmounted * Report zero capacity if GPFS is unmounted * Nexenta NFS driver refactoring * RequestContext initialization failed in cinder * Nexenta: Remove snapshot after volume-clone deletion * Don't use deprecated module commands * Remove dup of LVMISCSIDriver in LVMISERDriver * Remove duplication of ISCSIDriver in ISERDriver * Support volume\_readonly\_update using XML format * Fix typo in test\_check\_ssh\_injection\_on error test * Remove lvm-thin pool\_size config option * Examine if GPFS is mounted before writing data * Imported Translations from Transifex * Remove unused db calls to fetch original metadata * replace u\2013 char with dash * Sync log from oslo * Add tests for LVM -cow clearing * clean up numeric expressions in test * Fixes typo in method name \_notify\_voloume\_type\_error * Allow spaces in quoted SSH command arguments * Use pipe between ceph backup diff export/import * Imported Translations from Transifex * Add missing space to num\_iser\_scan\_tries text * Add cinder.db.migration.db\_initial\_version() * remove rundundant lockfile requirement * Imported Translations from Transifex * Revert "Brick connector fix for NFS drivers" * Update my mailmap * GlusterFS: set correct filename when cloning volume * Handle NotFound exceptions in API * Unit test fails in pbuilder environment * Updated from global requirements * Check if dir exists before calling listdir * Rename "rtstool" to "cinder-rtstool", add dep * Downgrade target create failure mesg to warning * Nexenta iSCSI driver: Refactor create\_cloned\_volume * VMware: Registering vmdk opts in global space * Brick connector revised fix for NFS drivers * Nexenta drivers ignore "does not exist" exception * Add openstack/common/crypto from OSLO * Fix volume transfer href issue * Remove duplication of brick.iscsi in brick.iser * Drop auth\_token configs for api-paste.ini * NetApp unit test fail fix for http\_proxy * Revert "remove cinder-rtstool because of rtslib dep" * Let GPFS driver to rename snapshot with root permission * Imported Translations from Transifex * NetApp fix for 7mode iscsi volume stats * Brick connector fix for NFS drivers * NetApp fix ssc volume filtering inconsistency * Updated from global requirements * NetApp fix mirrored stats * NetApp fix for compression and dedup stats * Fix generate conf script can't handle multistropt * Add auth\_token settings to cinder.conf.sample * Add extend\_volume for Huawei drivers * Update openstack/common/notifier * Imported Translations from Transifex * Apply six for metaclass * Provide gettext \_ in missing locations * Nexenta NFS driver: caching for appliance volroot * Cinder extension to add used resources in absolute limits * Fix Huawei HVS driver AttributeError * Storwize: Fix iogrp availability check * Imported Translations from Transifex * Uses oslo.imageutils * Don't zero out thin provisioned LV's on delete * Fix lvm.extend\_volume to pass Gig suffix * Nexenta NFS volume driver folder auto sharing * FK lookup failures during migration * Initialize shares variables for RemoteFsDriver(s) * Fix indentation errors in drivers * Imported Translations from Transifex * Fix Huawei drivers to support other host OSs * Fix all occurences of H404 Hacking warning * Imported Translations from Transifex * VMware: Fixed upload-to-image for available volume * Refactor Nexenta iSCSI driver * Remove unused 'initiator' imports * Fix tests to work in debug mode * Updated from global requirements * Remove whitespace from cfg options * Remove option count from sample configuration * improves lvm version parsing for customised builds * Fix typo in cinder.volume.drivers.nexenta.\_\_init\_\_ * Remove obsolete redhat-eventlet.patch * long flashcopy operation may block volume service * Support Huawei driver upgrade from grizzly to havana * Imported Translations from Transifex * VMware: Disallow snapshot of attached volume * Clean up comparison assertions * Utilizes assertIsNone and assertIsNotNone * Nexenta volume drivers: refactor NexentaJSONProxy * remove unused methods in driver.Scheduler * Imported Translations from Transifex * Nexenta iSCSI driver fix \_lu\_exists * Ignore H803 from Hacking * Drop conf\_key\_mgr warning message! * VMware: Re-create session for RetrievePropertiesEx * use cinder utils.get\_root\_helper * Provide user with more information on quota fail * Cleanup and more tests for db.api methods * Fix broken solidfire create-snapshot * Clean CONF out of brick iser * Open Icehouse development * Imported Translations from Transifex * Add key manager implementation with static key * Remove need for CONF acces in brick iscsi * Quotas roll back failure of create volume task * Remove incorrect class in cinder.conf.sample * Fixes incorrect class path in logging\_sample.conf * Storwize SVC driver hostname can't start with number * After commiting quota we should avoid certain reverts * Remove CONF from brick remotefs * Pass through args and kwargs in brick connectors * Clean CONF out of brick initiator * Update Babel from Openstack Requirements * Disable lazy translation * Improve gpfs config flag help text readability * Check for backing lun on iscsi target create * usedevelop in tox * Fixes ceph backup import errors * Add XML response tests for qos specs manage ext * v2 api - return bootable attr value on volume list * Fixes backup with multiple volume backend * Dont retry if target creation succeeds * VMware ESX: Fixes vol clone & clone from snapshot * Create volume revert fails for non admin user * VMware: Usng RetrvProprtisEx & does multi ESX scan * Fix XML serializer for QoS Specs extension * Fix Huawei HVS driver attaching volume error * Add debug logging for targets * Add support for querying the quotas usage * Validate force\_host\_copy API param for migration * Imported Translations from Transifex * Update OpenStack Style Commandments link * Set vol driver initialized before deleting volumes * Add error logs for Huawei driver * Clean CONF out of brick exception * Fix translation of CinderExceptions in REST API * Allow upgrade from Grizzly with ThinLVMVolumeDriver * Use module units for some drivers * Get host group id when Huawei driver initializing * Fix mandatory and optional args for create\_volume * Pass correct args to vol\_rpc create\_volume calls * Fix processutils.execute errors on windows * Sync gettextutils from oslo * LVM volume\_clear: error on unexpected inputs * Revert "Fix volume\_rpcapi calls for chance/simple scheds" * Fix finish\_volume\_migration() on SQLAlchemy 0.8.x * VMware: Handles no datastores case * Fixes some typos in cinder * Update rootwrap with code from oslo * Specific /usr/local/bin/hus-cmd rootwrap filter * Allow v2 Volume API to create volume with type name * Imported Translations from Transifex * Fix volume\_rpcapi calls for chance/simple scheds * Require assisted\_volume\_snapshots from novaclient * Fix over-indent in compute/nova * Add sg\_scan filter to rootwrap * Add extend to reference LVM driver * Fix issues with failed lvremove * GlusterFS: Copy snap from correct source file * GlusterFS: Use image\_utils for qemu-img calls * Remove default root\_helper of sudo for remotefs * Add a retry to create\_iscsi\_target for LVM * Fix HP3PAR iSCSI path connection * Added mapper for update\_all on snapshot\_metadata * Add volume metadata to v2 * Enforce driver is initialized * Added mapper for snapshot\_metadata * Fix type change in bootable setting of volume view * Add logging to prior to raising exceptions * GPFS Driver missing clone depth limit for snapshots * remove VolumeNotFoundForInstance class * Sync gettextutils from oslo * Use built-in print() instead of print statement * Fixes vol restore discard final bytes unzeroed * Fixes call GlanceConnectionFailed in invalid ARG * Fixes call VolumeNotFound in the invalid argument * Soft delete tmp migration volume * Fix \_\_init\_\_ methods of brick initiator connectors * Fix secure delete for thick LVM snapshots * assertEquals is deprecated, use assertEqual * Storwize/SVC: Optional CHAP authentication * Fix huawei driver test issues * fix wrong desciption of monkey\_patch config * Allow display\_name for v2 snapshot-update * Pass down root\_helper in more cases * Set rootwrap\_config path to rootwrap.conf * Do not use qemu-img --backing-chain or --output=json * VMware driver: Fix for invalid datastore selection * Fixes ceph volume restore to larger image than source * Imported Translations from Transifex * nms.folder.create\_with\_opts not supported on Nexenta 3.1.4.2 * Use $state\_path/conversion for image\_conversion\_dir default * Improves the parsing way of ssh returns * Fixes the use of exception.InvalidInput with the wrong arguments * Remove unused exceptions * Fix client connection leaks in HP3PAR drivers * Add default\_availability\_zone configuration option to cinder * Imported Translations from Transifex * Turn db model object into a primitive object to avoid error * Catch generic exceptions * Add delete support for volume type encryption * Adds Dell EqualLogic volume driver for Cinder * Fixing UnicodeEncodeError against volume creating function * Fix deleting qos specs key * Move novaclient to requirements.txt * fix missing unit in log message * Add check for qemu-img to image\_utils fetch\_to\_raw * Changed header from LLC to Foundation based on trademark policies * Fixed erroneous force full copy in ceph backup driver * Call to\_primitive on volumes.rpcapi.create\_volume * Fix typo in cinder.tests.test\_create\_volume\_flow * Fix Qos Specs association corner case * Fixes pep8 violation in nova * Fix bug in Nexenta NFS driver \_do\_create\_volume * Restrict Volume type deletion with volumes assoc * Replace assertEquals with assertEqual - 2/2 * Check cinder-backup service before "backing-up" * Do not attempt vg.update on uninitialized vg * Replace assertEquals with assertEqual - 1/2 * Add support for LocalConnector type in brick * Remove unused/redundant methods in cinder/test.py * Fix error casting value to float in lvm.py * Fixes misuse of assertTrue in test scripts * Utilizes assertIsNotNone * Utilize assertIsInstance * Remove deprecated assert\_() usage * Fix brick remotefs dependency on cinder * Remove quota fetch race condition * Synchronize extend\_volume methods in 3PAR drivers * Added copy-on-write support for all RBD cloning 2013.2.b3 --------- * fix log string in conversion type * VMDK copy\_image\_to\_volume and copy\_volume\_to\_image * Validate VV Set exists in 3PAR drivers * This adds a README to brick * Fix tuple usage error * Fixes brick Nova pep8 violation for lvm.py * fix inconsistent i18n log message * QEMU-assisted-snapshots for GlusterFS volumes * Add view builder to QoS specs API extension * Add features to Zadara Storage Cinder driver * Use tempfile and cleanup in windows unit test * Adds Nexenta NFS driver * Set vg\_thin\_pool to pool name instead of pool\_path * Fixes cinder-volume service startup on Windows * extract 'limits.' to constant for ratelimiting logic * Send notifications when extending volume * Fix errors in volume usage audit script * New update\_snapshot\_status API * Add volume driver for Huawei HVS storage system * Increase test coverage for cinder.utils * Add Fibre Channel drivers for Huawei storage systems * Refactor huawei Dorado array iSCSI driver * Refactor Huawei iSCSI driver * Enable gating on F811 * Add support for Havana missing features in Windows driver * Add venv wrapper for check\_uptodate.sh * Clone volume with right size with SolidFire * Fixes bug to allow for encrypted volume deletion * Sync rpc fix from oslo-incubator * Move comment back to right place * copy\_image\_to\_volume for Nexenta volume driver * Fix pep8 violation in backup * Utilizes assertIn and assertNotIn * Implements APIs for VMDK driver * Remove \_create\_volume function from several tests * Don't need to init testr explicitly * Add missing LH SAN driver features for Havana * Multi storage backend support for Nexenta driver * Fix typo in bin/cinder-volume-usage-audit * Remove unused methods from cinder.utils * Increase test coverage for cinder.image.image\_utils * Add kwargs to create\_volume in tests/utils.py * Update the version for the FC and iSCSI driver * Pass MB size on copy\_volume\_data call copy\_volume * Adding Read-Only volume attaching support to Cinder * Add NFS/GlusterFS support to brick library * Pass db into driver as constructor's parameter * Modified 3PAR drives to support 3parclient 2.0.0 * Move create\_volume flow to a subfolder * Import order cleanup * Migrate manage script needs import of db session module * Migration for attached volumes * Add optimized volume migration to Storwize/SVC * Fix quota update validation for non-int types * Imported Translations from Transifex * Removes exception instance creation on execute() * Fix except in lvm.py * Add automated check of conf sample * Remove deprecated code from Nexenta Exception class * Sync up with global requirements * Extend volume for GlusterFS * Offline snapshots for GlusterFS volumes * Ensure that qpid connection is closed (from oslo) * Imported Translations from Transifex * Test WWNs with basestring * Imported Translations from Transifex * Remove print statement in db api test * Ignore stmf target must be offline exception * Sync execute() related exceptions with oslo * The DB migration shouldn't populate types table * Use a new rest client for every Coraid ESM command * Remove unused methods from LVM driver * Storwize/SVC: allow setting of I/O group * Implement QoS support for volumes * Move the frequently injection task to the base folder * Move root task class to base file * Backup driver for IBM Tivoli Storage manager (TSM) * Dont crash service if sf cluster isnt available * 3PAR driver add missing domain check on QOS cmd * Remove unused methods from cinder.utils * Refactor cinder/tests/test\_volume.py * Unified Volume Driver for IBM XIV and IBM DS8K * Adds brick helpers to cinder utils * Fix python 3 pep8 errors for print * Fix incorrect msgstr's to avoid translation errors * GPFS use clone\_image for creating volumes * 3PAR driver terminate connection host validation * Re-enable a lot of cinder scheduler tests * emit warning while running flake8 without virtual env * Set bootable flag for volume cloned from image * Remove unused methods from cinder.utils * Clean up the sqlalchemy migrate manage.py script * Allow to delete a volume in error\_extending status * Update Brick to use executor * flake8 H202 error in test\_image\_utils.py * Removes ssh\_execute in utils.py * Fix volume\_glance\_metadata deletion * Use system locale when Accept-Language header is not provided * Generic backup\_volume and restore\_backup functions * Relax policy so owner can access encryption info * Fix Fibre Channel attach for single WWN * Make the SolidFire driver api port configurable * Add accept\_transfer to solidfire driver * Added need info to accept\_transfer * Allow volume create from source unless in error status * Avoid serializing CinderExceptions before they are translated * Add root\_helper param to get\_connector\_properties * Standardize on ID for log messages * Reduce hidden effects of sqlalchemy objects * Removed need for domain in 3PAR drivers * Allow Cinder to call Nova client * Use FakeLoopingCall instead of the real one * Fix some pylint error in Coraid Driver * Storwize/SVC: More error logging * Remove strcmp\_const\_time * Refactor LVM driver to use Brick VG utility * Added missing import * Fixes SSH injection threat in 3PAR driver * Implement missing Coraid Driver functionality for Havana * Increase test coverage brick/initiator/connector * Fix SSH injection threat in 3PAR driver * refactor/unify driver version strings * Refactor Nexenta driver * Update Nexenta ISCSI volume driver authors * Extract ISCSI tries option into connector module * Externalize error messages in the v2 API * Add more asserts to the limiter unit tests to test the RateLimit * Replace os.unlink with delete\_if\_exists * No need to declare the exception conf * Add support for encrypted volumes * Add tests for cinder/brick/initiator/connector * Tidy up the SSH call to avoid injection attacks for HP's driver * Raise exception when Glance metadata not found * Interprete scoped key as nested tags * Adding the -online option to the 3PAR clone * Fixes some unseen flake8 violations * Fixes volume clone from volume * Fixes docstring formats in connector.py * Fixes files with wrong bitmode * Add unit tests for cinder/api/contrib/quotas * remove Brick deps on cinder.exception * Remove Brick iser dependency on cinder * Fix handling ImageUnacceptable in create\_volume * Use native methods for list manipulation * Fix signature of \_create\_volume() in ThinLVMVolumeDriver * Add H233 to ignores in tox.ini * Imported Translations from Transifex * Add support for volume cloning to Nexenta driver * Fix ratelimiting * GPFS support for various volume attributes * Upgrade Scality driver to match minimum features * Ignore purge\_props for v2 Glance api and fix upload * Add support for API message localization * 3PAR drivers creating incorrect comment data * Imported Translations from Transifex * Use utils.safe\_minidom\_parse\_string in extensions * Move resource usage sync functions to db backend * Imported Translations from Transifex * Refactoring of create\_volume to use taskflow * Add minimum features in HDS driver (for Havana & Icehouse) * Ignore stmf target must be offline exception * Added glance\_request\_timeout config option * Set lock\_path in tests * 3PAR volumes created from snaps failed to attach * Add test for brick.local\_dev.lvm * Imported Translations from Transifex * Remove Brick's iscsi dependency on cinder * Remove locals() from iser * Move volume\_clear and clear\_size opts up to driver * Imported Translations from Transifex * Set the concurrent connections on the 3PAR array * Create key manager interface * Remove usage of obsolete oslo.exception * Fixes create rbd volume from image v1 glance api * Imported Translations from Transifex * Remove Storage Manager from cinder-manage * Remove cinder.exception from Brick * Add bin directory to flake8 when not in venv * Add support for volume extension to Nexenta Systems volume driver * GPFS Verify min release level for mmclone command * Sync gettextutils from oslo * Add eclipse project files to .gitignore * Remove unnecessary metadata from the 3PAR drivers * Adding support for iSER transport protocol * NetApp fix clone image compatibility issue with ssc * Set bootable flag for volume serializer * Fix chown fail for nfs file without necessary permission * Add new persona value in the 3PAR driver * Update driver version to 1.1 * Fix NetApp iscsi drivers for cinder backup * Fix pep8 and pylint violation in Nexenta volume driver * Test tools barfs on reusage of 'id' attribute * Ignore "volume does not exist error" * Call get\_session() only when necessary * Fix volume\_create()/snapshot\_create() DB methods * Execute DB API methods in a single transaction * Improve DB API test coverage * Fix check for mount.nfs helper installation * Imported Translations from Transifex * Remove xen storage manager tables * Remove unused migration\_\* methods from db api * Factorize code between nfs.py and glusterfs.py * NetApp fix create vol different size than snapshot * LVM / Block Device Drivers: Fix duplicated flags * tox.ini: Change sitepackages to False * Tidy up the SSH call to avoid injection attacks in storwize\_svc * NetApp check for 7 mode controller version * Storwize/SVC: Use reserved percentage from conf * Imported Translations from Transifex * Pop out 'offset' and 'limit' before use for filter * Imported Translations from Transifex * Fix running of migrations tests by Jenkins gate * Update to latest oslo rootwrap * Make unicode-to-utf8 conversion universal in ceph backup driver * Add more info to delete error message * Update references with new Mailing List location * Allow connect by FC-only or iSCSI-only systems * NetApp NFS efficient clone\_image impl * Removed the dep on cinder.utils * Fix the multi-backend storge issue for ZMQ * NetApp storage service feature support * Imported Translations from Transifex * Create volume from snapshot must be in the same AZ as snapshot * Using volume name property instead of using template and id * Fix unit suffix and add no\_suffix option * GPFS stub calls to truncate and dd in unit tests * Storwize/SVC: Use VolumeDriver's copy vol<->image * Implements extend volume feature in HP 3PAR driver * use encode('utf8') instead of str() * Imported Translations from Transifex * Migration for detached volumes with no snaps * Fix cinder error for deprecated Netapp drivers * get\_snapshot should populate the snapshot metadata * Adding driver minimum features and volume stats to dev doc * Update RBD driver to be compliant with HACKING * GPFS convert glance image to raw only when needed * Fix oslo.config.cfg.NoSuchOptError when running individual tests * Fixes RBD driver docstring format issues * fix name 'update\_volume\_status' to 'update\_volume\_stats' * use 'exc\_info=1' instead of import traceback * Fix further Hacking 0.6.x warnings * Add create & attach times to SolidFire attributes * Implement extend volume for Storwize/SVC * Cleanup README.rst * Fix volumes search by metadata * Add test for volume status check when extending * 3PAR Driver modifications to support QOS * Make Storwize/SVC tests work without simulator * Revert hardening of Storwize/SVC SSH commands * Clone\_image method added image\_id as parameter * Added incremental backup support to Ceph backup driver * Sync gettextutils from oslo * Imported Translations from Transifex * Fix duplicate config options * Move copy\_volume function to volume/utils.py * Fixes default value of use\_default\_quota\_class * Imported Translations from Transifex * Delete snapshot metadata when snapshot is deleted * Tidy up the SSH call to avoid injection attacks in storwize\_svc * Fix extend\_volume error handling * Fixes race condition in LVMVolumeDriver create\_cloned\_volume method * Checks the volume\_clear flag and just return if it is none 2013.2.b2 --------- * Fixes Opt type of use\_multipath\_for\_image\_xfer * Fixes Opt types in cinder/backup/drivers/ceph.py * Fix indent in cincer/volume/configuration.py * Implement validate\_connector for Storwize/SVC * Fix error when QuotaUsage.updated\_at is NULL * Rename SolidFire driver for consistency * Add Brick Fibre Channel attach/detach support * Increase timeout period for clone volume * Be sure to check deleted types on quota update * CoraidDriver: Allow volumes in error state to be deleted * Adds multiple iSCSI port support to 3PAR * Implement extend volume functionality in Sheepdog * Mark methods used in class only with prefix "\_" * Add te field user\_id into the volume detailed information * Catch additional connect fail cases * Clean up Huawei tmp files from tests * Add flag argument to 'cinder-manage config list' * Imported Translations from Transifex * Add generic block device driver * Use base ISCSI driver to fulfill some driver requirements * Cleanup and make HACKING.rst DRYer * Clone\_image should return dict of vol properties, clone status * Update requirements from openstack/requirements * Refactor SSHPool.get() to use Pool.get() * Enable zero the snapshot when delete snapshot in LVMVolumeDriver * Fixes ceph-backup failure if original volume deleted * Implement extend volume functionality in Rbd * Handle errors raised by extend\_volume * Minor reorg for (array resource usage and backend options naming) * Check enabled backup service before rpc request * Fixed Ceph backup librbd segfault * Add support to import images into sheepdog volumes * Add tests for cinder/api/urlmap.py * remove improper assert usage * Enable setting blocksize on volumes * cinder.api: Replace 'locals()' with explicit values * Update upper bound of keystoneclient version * Fix missing volume\_name\_template flag * Change check-detach to reject more states * Implement extend volume functionality in SolidFire * Add unit tests for cinder/api/versions * Make String column creation compatible with SQLAlchemy 0.8 * Remove suds requirement * Add support for storing volumes on GPFS * Consist terminate\_connection function signature * SolidFire API RequestID is useless * Add ability to specify SolidFire API version * Refactor reschedule in exception handling of volume manager * Don't pass 'session' arg to public DB API methods * Add interface class for backup drivers * Prevent wrongly privilege escalation of a context * Move brick initiator tests to brick subdirectory * Fix extent size issue when creating thin pool * Sync install\_venv\_common from oslo * Fix a few Sphinx warnings * Ignore files created by Sphinx build * Use oslo.sphinx and remove local copy of doc theme * Add unit tests for cinder/api/contrib/volume\_actions * Scheduler should not select down volume managers * Add check for snapshot to Brick LVM * Fix typo 'Flase' -> 'False' * Rename cinder.flags to cinder.common.config * Add execute wrapper to brick LVM code * Imported Translations from Transifex * CoraidDriver: Create\_volume\_from\_snapshot of a different size * Make os-services API extension consistent * Imported Translations from Transifex * Removes 3PAR domain option from cinder config file * Skip brick\_initiator test in virtual environments * Added Cinder volume backup to Ceph support * Handle ECONNREFUSED exception in SolidFire driver * Add os-availability-zone extension * Run flake8 also on cinder/\*/openstack * Imported Translations from Transifex * Quotas by Volume Type * xenapi: implement xenserver image to volume * Save some more image attributes to volume\_glance\_metadata * Fix check\_for\_setup\_error for sheepdog driver * Add Brick iSCSI attach/detach * Added volume backup and restore to Ceph RBD driver * Fix service alive information in os-services extension * Calculate count for customized dd blocksize * Content-length missing in put\_object * Replace glance\_metadata check with bootable column * Imported Translations from Transifex * Avoid winning the useless use of cat award * Fix up trivial H103 license check mismatches * Register used CONF entries in cinder.api.common.py * Fix and enable gating on H401 * Do not raise NEW exceptions * cinder.[brick,db,image] Replace 'locals()' * Update kombu requirement * Remove usage of locals() for formatting from cinder.tests.\* * Adds create\_from\_volume test cases * Use list comprehensions when possible * NetApp:iSCSI drivers reserved percent need to change to 0 * Add support for swift user/key authentication * Refactor the backup method of SwiftBackupService * Imported Translations from Transifex * NetApp unified driver implementation * Add \_create\_volume to ThinLVMVolumeDriver * Add the project name into CinderKeystoneContext * Add build directory to flake8 ignore dirs * Add missing extend volume test (rpcapi) * fix error class path in logging sample * Modify check for volume-type-id to a get w/default * Don't perform retry\_execute in certain cases * Adding host attaching support to Cinder * Update attach status when instance id invalid * Fix and enable gating on H403 * Use Python 3.x compatible except construct * cinder.backup: Replace 'locals()' with explicit values * cinder/.: replace 'locals()' with explicit values * Editable default quota support for cinder * Imported Translations from Transifex * Use common.processutils.execute * Remove usage of locals() for formatting from cinder.volume.\* * cinder.schedule: Replace 'locals()' with explicit values * Imported Translations from Transifex * Remove the 'migrate' option from cinder-manage * Use Python 3.x compatible octal numbers * Use Python 3.x compatible except: construct * Update and add notifiers in create volume * Imported Translations from Transifex * Fix up the test framework * Raise an error if iSCSI is not supported * Remove usage of locals() for formatting from cinder.api.\* * Implement capability to extend existing volume * Replace utils.to\_bytes() with strutils.to\_bytes() * Flatten Volume from Snapshot * Imported Translations from Transifex * Replace FLAGS with cfg.CONF in volume * Replace FLAGS with cfg.CONF in other modules, unless tests * Elevate volume/snap "is busy" log message for volume/snap\_delete * Imported Translations from Transifex * Fixes 3PAR drivers terminate\_connection issue * Added policy check for backup operations * Update to the latest stevedore * Fix various Sphinx warnings * Fix some unittest cases failed on osx * Fix the after subscription size checks * Re-set default sql\_connection and sqlite\_db * Remove explicit distribute depend * Add missing exception from volume/api.py * Allow disabling ssl compression for glance client * Add availability zone checking in the api service * Add missing attributes to xml deserializer for volume request * Integrate oslo's periodic tasks * Fix LVM logging error * Remove direct call to utils.execute * Add policy checking for transfer create/accept * Replace FLAGS with cfg.CONF in tests * Replace FLAGS with cfg.CONF in api * Start using Pyflakes * Add the iscsi device check and exception processing * Minor Logic bug in NFS Driver * Imported Translations from Transifex * Fix 'undefined symbol conn' error * NFS drivers don't honor vm size with volume from an image * Add missing tests for backup\_\* methods * Replace functions in utils with oslo.fileutils * Remove E12 errors from tox.ini Flake ignores * Unset all stubs before running other cleanups * Fix config registration in cinder volume drivers * Elevate acceptors context on accept reserve udpate * Removing service\_\* options from authtoken * Add call to vol driver when accepting a transfer * Imported Translations from Transifex * Implement DB migration for volume transfer BP * Replace FLAGS with cfg.CONF in db * Add missing tests for iscsi\_\* methods * Log iSCSI target output on error * Re-write of the cinder-manage man page * Replace FLAGS with cfg.CONF in scheduler * python3: Introduce py33 to tox.ini * Fix AttributeError typo * Fix path for pylint Gate * Fixed method db.api.reservation\_expire * Handle IPv6 specifid glance servers gracefully * HDS Cinder Driver. Rev #1 * Imported Translations from Transifex * Add error reporting to generate\_sample.sh on import failure * Updating HACKING to disallow the use of locals() * Prevent force delete if the volume is attached * InvalidUUID can not be raised * Fix incorrect authorization rule in quota contrib api * Rename requires files to standard names * rbd: simplify configuration and use librbd and librados * Update 3PAR driver session management * Fix typos * Add testrepository to git ignores * Fix incorrect copyright * Add missing tests for cinder.db.api.quota\_ * Return 404 from delete of extra spec if not found * Fix incorrect status for volume clone from image * Imported Translations from Transifex * Support for NFS shares with spaces in path * Fixes 3PAR Host already exists error * Ensure that pbr>=0.5.10 is installed * Add missing tests for cinder.db.api * Remove execute permissions from test files * Migrate to Oslo DB code 2013.2.b1 --------- * Catch and report errors from copy image to volume * test\_glance.py: Stub out \_get\_member\_model as well * rbd: send ceph monitor addresses with connection info * Don't set signing\_dir by default * Remove cinder\_emc\_config.xml.sample * Update cloned volumes QoS settings * Fix 'Inheritance-based rule deprecated' log warning * Added '%' before snapshot variable * Hack run\_tests.sh to work with single tests again * Imported Translations from Transifex * Don't throw ValueError for invalid volume id * ModifyVolume attributes on Clone * Improve "service is down or disabled" warning message * Add "\_" builtin method for config generation * Replace custom skip\_ methods * Migrate base test class to testtools * Fix ownership transfer when cloning with SolidFire * Make NFS share selection more intelligent * Add common Oslo DB code to the source tree * Add the service\_state into test\_schedule\_happy\_day * Implement scheduler hints for API v2 * Update log.py and jsonutils.py from oslo-incubator * Added a test for bad limit param * Added test for nonnumerical limit param * Raise VolumeNotFound with correct volume\_id * Removes a broken link from the sidebar of Sphinx built pages * Imported Translations from Transifex * Support mount options for NFS/GlusterFS volumes * Hide v1/v2 version entities in API when disabled * Allow flake8 to run in venv * Imported Translations from Transifex * Imported Translations from Transifex * Convert to oslo strutils.bool\_from\_string * Update import of strutils from oslo * Add thin provisioning support checks * Update/Publish volume service updates on delete * RemoteFsDriver: copy\_image\_to\_volume and copy\_volume\_to\_image * Imported Translations from Transifex * solidfire: Make sure src\_uuid is passed correctly * Implement cloned volume for the RBD driver * Add .coveragerc to show proper coverage statistics. As in other openstack projects * NetApp server tunneling fix * Move iscsi helpers to brick directory * Fix up hacking ignores a bit * Hide lock\_prefix argument using synchronized\_with\_prefix() * Storwize/SVC: fix attach bug for live migration * Deprecating old dot path locations for Folsom configs * solidfire: Add ability to override account prefix * Fixes an get\_volume\_stats reporting issue * Increased unit test code coverage * Create an LVM utility to use for local storage * Add CINDER\_LOCALEDIR env variable * Remove gettext.install() from cinder/\_\_init\_\_.py * Use flake8 and hacking * Use pbr instead of openstack.common.setup * Change the type of "free\_capacity\_gb" to be float * Set default values for NFS/GlusterFS share\_config files * Add missing spaces to iscsi\_iotype help * Adds notifiers to both volumeTypes and volumeTypeExtraSpecs * Fix missing spaces in Huawei Logging * Add pylint-based lintstack test to tox environment * Remove outdated cinder test doc * Implement copy\_image\_to\_volume and copy\_volume\_to\_image on nfs backends * Update import of oslo's processutils * Fix ability to add custom volume\_backend\_name * Add db client packages to dev env setup doc * Check that volume is at least minDisk size * Remove old\_name from kwargs when using IET helper * Copy the RHEL6 eventlet workaround from Oslo * Remove setuptools-git as run time dependency * Fix LHN driver to allow backend name configuration * Deleting a backup removed the backup record from database * Remove \_path\_exists method * Encode username and password in config file * Clear volumes stuck in 'downloading' * Fixes 3PAR FC driver synchronization * Avoid using whitespace in test\_safe\_parse\_xml * Add stats reporting to Nexenta Driver * Remove duplicate method definition * iscsi: Add ability to specify or autodetect block vs fileio * Rename duplicate test method * Update to latest copy of OSLO incubator * Cinder wasn't filtering the backups returned to backup list API * cinder volume service keeps retrying even code exception * Add missing space to "volumes already consumed" message * Add capabilities reporting to ThinLVM driver * NetApp: Fix failing NetApp tests * Use VERSION var for volume\_stats version (Gluster/NFS) * Add parsing to extra-specs key check * Use a SSH pool to manage SSH connection * Remove Flags usage from cinder.volume.driver * new cinder.conf.sample and fix extract\_opts.py * fix default config option types * Fix incompatible Storwize/SVC commands * Fix backup manager formatting error * Add service list functionality cinder-manage * Clean up attach/detach tests * Reformat openstack-common.conf * Sync with oslo-incubator copy of setup.py * Don't hard code AUTH\_ into the swift backup url * Remove update\_volume\_status log message from NFS driver * Implement get\_volume\_stats for GlusterFS driver * Fixed a volume creation re-schedule error * Allow deletion of backups where the service is None * Fix cinder-manage backup list to work with uuids * leave re-scheduled volume status to creating * Prevent create volume from snapshot with bad size * Add du to rootwrap filters * Change format of some judgments * Remove InvalidPortRange exception * Add availability\_zone to the volume and snapshot notifications * Throw InvalidSnapshot for failed snap delete * remove deprecated assert\_unicode sqlalchemy attribute * Fix IBM copyright strings * REST session validity not checked in get\_volume\_info * Enforce exclusive options snapshot-id, source-volid and image-id * Add snapshot events to the cinder notification * getLogger should be called after logging is configured * Mark sql\_connection with secret flag * Sync lockutils from oslo-incubator stable/grizzly * Remove unused tools/rfc.sh * Add the volume and snapshot gigabytes together * Force deletes using tgt to workaround bug 1159948 * Fixed shared gigabytes quota resource * CoraidDriver: support users that are not admin * Fix quota updating when admin deletes common user's volume * Last driver sync for Folsom and Grizzly * Fix bug with 3PAR host entry in wrong domain * Snapshot reservation sync calls wrong resource * Fetch volume\_types by uuid and not by name in v2 * Use the local configuration in the nfs drivers * Fixed attach volume for EMC SMI-S iSCSI driver * Extend param2id() to work with uuids * Clean up started volume services in tests * CoraidDriver: do not call login from \_\_init\_\_ * CoraidDriver: typo in \_login exception handler * Fixes Cinder REST API /volumes issue * Add missing processutils for impl\_zmq in oslo rpc * Update Cinder's latest copy of OSLO grizzly stable * Remove the log spam generated by the NetApp driver unit tests * Speedup solidfire unit tests * Updates to OSAPI sizelimit middleware * Use OpenStack common project requires * Rename cinder-rtstool to rtstool * Make dd block size user configurable * remove cinder-rtstool because of rtslib dep * Add snapshots to the volume usage audit report * CoraidDriver: retrive volume info (improvement) * Remove AGPL rtslib pkg from pip-requires * Fix Storwize/SVC LUN allocation with holes * Remove references to FLAGS from volume/manager.py * Allow snapshot\_delete for NFS/GlusterFS drivers * Pull Oslo log fix to enable root logger initialization * Clean up exec\_dirs prefix from rootwrap conf * Fix typo in persona valid values * Use self.configuration to support the multi-backend case 2013.1.rc1 ---------- * Bump version for Grizzly RC1 cut * Count Snapshots towards volume/gigabyte quotas * Fix 3PAR driver hiding existing host error * Switch all uses of 422 response code to 400 * Implement get\_volume\_stats in NFS driver * cinder-manage does not print any version information * Fix ISCSIDriver rescan * Compression/tier capabilities for Storwize/SVC * Fixes dettach volumes in Windows cinder plugin * Fix \_migrate\_up in test\_migrations * Switch to final 1.1.0 oslo.config release * Adds a flag to set glance api version to call * Storwize/SVC driver fix for multibackend scenario * Fix bad request response code on extra\_specs create * Fix bugs for Huawei driver * Do not use prefix to lookup host in Storwize/SVC * update error log arguements in filter scheduler * Update oslo rpc libraries * Remove/update unused log arguements in manager * Removing flags in RBD in favor of configuration * LIO iSCSI initiator ACL auto-config * Fix a few bugs for LeftHand Grizzly * Update tox.ini to support RHEL 6.x * Fix volume capacity reporting * Pull newly merged Olso update for 'is' operator * Use nose and openstack nose plugin * Exit run\_tests with the result code of the test runner * Mark configuration option netapp\_password secret * Add get\_volume\_stats in the sheepdog driver * Switch to oslo.config * Fix calling setUp() method of superclass from tearDown method * Fix 3PAR drivers to work in multi-backend mode * Fixed copy image to volume and clone volume * Fixes issues found in /os-hosts API * Fix Storwize/SVC storage\_protocol reporting * sync oslo changes for setup / version * swift backup service checks version during restore * Add some useful log to filter scheduler * Elevate context for delete volume with no host * Improved fail\_reason for cinder-backup swift connection errors * Convert from using FLAGS directly in SF driver * Improve logging for volume operations via manager * Only use iscsi\_helper config option if using LVMISCSIDriver * Fix query filter in volume\_get\_active\_by\_window() * Changed to INFO level logging for main cinder-backup operations * NetApp: Clean up lock file left behind by unit tests * NetApp: Fix race condition in 7-mode iSCSI driver with DFM * update install\_venv\_common to handle bootstrapping * allow run\_tests.sh to report why it failed * Remove compat cfg wrapper * XenAPINFS: Fix Volume always uploaded as vhd/ovf * Fixed cinder-backup start errors seen with devstack * Cinder devref doc cleanups * Fix various exception paths * Implement metadata options for snapshots * Skip timestamp check if 'capabilities' is none * Fix stale volume list for NetApp 7-mode ISCSI driver * Implement a basic backup-volume-to-swift service * Better error handling around volume delete * Moved cinder\_emc\_config.xml.sample to emc folder * Uses tempdir module to create/delete xml file * Add HUAWEI volume driver in Cinder * XenAPINFS: Create volume from image (generic) * Bump the oslo-config version to address issues * Ensure volume exists before deleting * Add LIO configuration for iSCSI initiators * rbd: implement get\_volume\_stats() * Handle maxclonepervolume/node limits in SF driver * Use oslo-config-2013.1b3 * Fix syntax error in cinder-volume-usage-audit * HP 3PAR Fibre Channel Driver and iSCSI Updates * Fibre Channel base class for Cinder drivers * Update cinder-manage to use FLAGS.log\_dir * Add a safe\_minidom\_parse\_string function * Add a volume driver in Cinder for Scality SOFS * Fix create volume from image * XenAPINFS: fix capacity reporting * Update Storwize/SVC driver for Grizzly * Set rootwrap\_config in cinder.conf sample * Skip tests if cinder is not installed * Fix undef function call in test\_migrations for py26 * Fix PEP8 violation (again) * Update cinder-volume to enable multi volume support * Install rtslib when installing cinder * Sync latest cfg and log from oslo-incubator * Handle 'infinite' and 'unknown' capacity in CapacityWeigher * Add get\_cluster\_stats to SolidFire driver * NetApp: Fix for snapshot not deleted in error state * NetApp bug fix for multibackend scenario * Adding support for Coraid AoE SANs Appliances * Add an update option to run\_tests.sh * Update EMC SMI-S Driver * Add LIO iSCSI backend support using python-rtslib * Add GlusterFS volume driver * Create a RemoteFsDriver class * Fix ordering of function args * Add an ID to temporary volume snapshot object * Allow create\_volume() to retry when exception happened * Fixes the provisioning on selected volumes for NetApp 7 mode * rbd: update volume<->image copying * Fix PEP8 violation * Update snapshot rest api to be consistent with volumes * change display\_description to description in volumes * v2 volume/snapshot create will correctly give a 202 response * add postgresql opportunistic testing * make test\_databases instance variable * Move create\_cloned\_volume() to LVMVolumeDriver * Update to latest oslo-version code * Allow disabling of long-lived SSH connections * Don't require importing paramiko for error * Allow for specifying nfs mount options * rework migration 004 testing with real data * Allow tools/install\_venv\_common.py to be run from within the source directory * add data injection on migrations * sync database connect changes from nova * XenAPINFS: Copy volume to glance * XenAPINFS: Copy image from glance * Fix inability to delete volumes in error state for NetApp driver * Copy glance\_image\_metadata when cloning volumes * Add volume\_glance\_metadata to volume.api.get * Import Oslo's common rootwrap to Cinder * Mark password config options with secret * Fixes 'not in' operator usage * Skip tests if cinder is not installed * Fix provider\_location column add for PSQL * Update 3PAR driver * Fix the generalized copy\_image\_to\_volume operation * import tools/flakes from oslo * Add unit tests for ISCSIDriver.\_do\_iscsi\_discovery and ISCSIDriver.\_get\_iscsi\_properties * Fixes "is not" usage * Pull cfg module from Oslo and update cinder-manage accordingly * Set source volume to "in use" during clone * Update some Oslo Packages * Fix typo in cinder/db/api.py * Replace CRLF with unix-style "LF" * Allow volume back-end to report 'infinite' or 'unknown' as capacity * Wrap SolidFire size parameter in int * Use install\_venv\_common.py from oslo * Update osapi\_volume\_extension default * Generic iSCSI copy volume<->image * Implement LVM thin provisioning support * Check for installed cinder in filter tests * Fix hosts extension and enable its tests * Check for non-default volume name template * Get updated vol status in volume.api.reserve * Update EMC SMI-S iSCSI Driver * Clean up QTree when deleting volume on NetApp storage box * Fix NFS volume creation * Improve error message for missing NFS share config * ensure zeros are written out when clearing volumes * Fix error for extra specs update with empty body * Clean up IPV6 config checks in test\_wsgi * Add capability to update volume metadata * Fix sheepdog volume creation * Add LUN# to provider\_location in Nexenta driver * Check for configured IPV6 before running tests * New cinder.conf.sample format * Move iscsi flags back to driver.py * Snapshot support for XenAPINFS * support a configurable volume wiping method * Relax various version constraints * Support for SSL in wsgi.Server * Enhance wsgi to listen on ipv6 address * Factor out LVM code * Implement filter scheduler * Revert "Implement filter scheduler" * Update SolidFire Volume driver * Provide HP 3PAR array iSCSI driver * Fix CinderClient exception name in EMCISCSIDriver * Enable cinder exception format checking in tests * Update exceptions to pass correct kwargs * Add option to make exception format errors fatal * Implement filter scheduler * Use tempdir for lock\_path in tests * Upgrade WebOb to 1.2.3 * Make WebOb version specification more flexible * Fix cmds clearing in TargetAdminTestCase * Add missing library * use deleted = False, not 0 for update * Implement ability to Clone volumes in Cinder * Add pyflakes * Adds synchronization to attach volume * Add EMC Volume Driver in Cinder * Added extra-spec key scoping to the 3PAR drivers * Adding marker, pagination, sort key and sort direction to v2 api * Fix typo in image\_utils tempfile handling * Make the NetAppISCSIDriver.\_is\_clone\_done() method able to handle empty responses. Add unit tests to exercise this case * Make sure we don't double remove tmp on exception * Add service mgmt extension * Added the lockutils, fileutils, gettextutils * Fixes a Windows volume driver bug on disk export * Moving host admin extension with other extensions * Allow the lvm backed drivers to use mirrrors * CHAP support for IBM Storwize/SVC driver * Remove instance quota re-sync code * Add image metadata API extension * Raise NotImplemented for drivers that don't support images * Add \*.swp to gitignore * Support glance servers over https * Add commands used by NFS volume driver to rootwrap * Changing display\_name to name in v2 api * Make summary and detail view consistent with other projects * creating separate v1 and v2 stubs and general fakes * Make copy\_to\_volume a bit more useful * Delete type call in api needs update to use ID * Convert volume\_type id from int to uuid * Fixes the 3PAR drivers CPG validation * Rename Config osapi\_compute\_link\_prefix to osapi\_volume\_base\_URL * Fix exception when size is None * Ensure request\_spec can be serialized * attaching volumes will set instance\_uuid instantly * Revert changes to monkey\_patch * Improve provider\_location cleanup code for RBD * Fix import order to make it alphabetical * Fix None being passed into as\_int() * Use auth\_token middleware from keystoneclient * Provide i18n to those messages without \_() * Revert "use O\_DIRECT when copying from /dev/zero too" * Make pep8 checks a bit stricter * Unpin lxml requirements * use O\_DIRECT when copying from /dev/zero too * Add CONTRIBUTING file * Add the persistency to the volume created by iscsi IET * adding copy of v1 as v2 * Moving contrib to cinder.api * Moving api v1 implementation into v1 directory * Switching api to use base extension manager * moving all middleware code in cinder.api.middleware * Moving common api code into cinder.api * Cleaning up volume driver paths * Add volume bootable information to api response * Add XenAPINFSDriver * Add db table for Glance Metadata * Remove redundant db.volume\_update() in volume manager create\_volume() * Pin pep8 1.3.3 * Removes the xensm driver * Pass in correct volume\_ref to create\_from\_snapshot * NetApp direct to filer drivers for iscsi and nfs * Add hosts extension to Cinder * Remove unused python-daemon dependency * Make tox.ini run pep8/hacking checks on bin * Various pep8/HACKING fixes for Cinder * Volume RPC API Versioning * Remove gen\_uuid() * Remove obsolete use\_local\_volumes * Import order cleanup per HACKING * Remove unused volume API method - remove\_from\_compute() * Scheduler API clean up and refactor * Remove dm\_setup(remove) call in volume\_delete * Add ability to disable secure volume delete * Remove the zeroing out of the volume altogether * Add 'create\_volume' to scheduler RPC API * Fix run\_tests.sh ambiguous usage msg and behaviour for -x option * Add admin only action for force detach * Changes bit mode of zadara.py to 644 * Port openstack-common/uuidutils to Cinder * Fix 401 from auth\_token middleware * Splitting out volume drivers in driver.py * Minor optimization in create\_volume in HpSanISCSIDriver * Adding a SSH Connection Pool * Fixes 3par driver methods that were double locking * Return volume type name on volume create * pin sqlalchemy to the 0.7 series * Add VolumeTenantAttribute API extension * Log the body of an /action * Detect and fix issues caused by vol ID migration * Split out drivers in san.py * Add VolumeHostAttribute API extension * Add default volume type flag * Fix typo so setting volume\_tmp\_dir works * Rollback for resources during volume creation failure * Allow the user to update a volume's metadata * Add the generation of the username and password for iSCSI target * Update HACKING.rst and related changes from Nova/PEP8 * Add trove classifiers for PyPI * Ensure device node exists before wiping during volume deletion * Update volume and snapshot status on delete * Drop unused quota\_usage db methods * Drop duplicate sqlalchemy db api methods * Change output strings to i18ned * Adds support for Windows 2012 Storage Server blueprint windows2012driver https://blueprints.launchpad.net/cinder/+spec/windows2012driver * Update common * Fix incorrect class path for legacycinder formatter in logging\_sample.conf * Error message references incorrect variable * Loosen anyjson dependency to avoid clash with ceilometer * Configuration Options clean up * Fix typo in policy documentation * Add snapshot force delete admin action * Mock out sleep in some retry tests * Use policy based rule to define context.is\_admin * Sync openstack common and add policy * Fix typo in sample configuration file * Update distribute version in test requires * Revert explicit usage of tgt-adm --conf option * Fixes remove\_export for IetAdm * Add missing entries in setup, fix up pip-requires * Fix NetAppCmodeISCSIDriver.\_get\_lun\_handle() method * Remove unused code: check\_for\_export * Return 400 if create volume snapshot force parameter is invalid * Fix cinder-volume-usage-audit * Sync with nova change I135ed85a * Remove cinder gating hack * Set the default availability zone back to nova * Add lun number (0) to model\_update in HpSanDriver * Fixes to the SolarisISCSI Driver * Stop double logging to the console * Restore SIGPIPE default action for subprocesses * Replace builtin hash with MD5 to solve 32/64-bit issues * Correct IetAdm remove\_iscsi\_target * Add nova migrate\_version check to cinder import * Bump version to 2013.1 * Clean up db.volume\_create() * Fix volume deletion when device mapper is used * Update quota when deleting volume that failed to be scheduled * Sync a change to rpc from openstack-common * Add a resume delete on volume manager startup * Improve entity validation in volumes APIs * Add entity body validation helper * Should've added super().tearDown() in test\_iscsi * Fixes bug 1050135 * Fix FLAGS.volumes\_dir help message * Use tmpdir and avoid leaving test files behind * Sync log format changes from openstack-common * Update rpc from openstack-common * Add volume quota in volume/api.py and olume/manager.py * Fixes bug 1049446 * Revert "Don't zero out snapshot volume on snapshot\_delete" * Add update to volume and snapshot controllers * Nail the pip requirement at 1.1 * Clean up .gitignore * Prevent from bug #1008866 is reverted * rename nova.pot => cinder.pot, nova.po => cinder.po * Don't zero out snapshot volume on snapshot\_delete * Recent changes to SolidFire API changed iqn format * Remove unused utils.wrap\_exception * Sync notifier changes from openstack-common * Clean up some codes about compute in VolumeTestCase * Remove unused db api * Typo nova => cinder * Remove vpn\_ping function in cinder/utils.py * Update SolidFire driver to reflect IQN changes * Rename test\_nova\_rootwrap.py to test\_cinder\_rootwrap.py * Fixes potential bugs found by pylint * Handle missing 'provider\_location' in rm\_export * Specify the conf file when creating a volume * avoid the buffer cache when copying volumes * Fix Typo in LOG.error * Remove dependencies for netaddr * Filter volumes and snapshots by query string * Remove null\_kernel option * Remove default\_schedule\_zone * Remove memcached\_servers config option * Regenerate cinder.conf.sample * Sync improvements to config file generator tools * Sync misc changes from openstack-common * Sync zmq changes from openstack-common * Sync cfg changes from openstack-common * Fix xml metadata for volumes api in cinder * Fix bug where image size is incorrectly rejected * Several hacking compliance fixes * Remove Cheetah from pip-requires * Update dev docs \* Quick pass at implementing the basics for cinder dev docs \* Remove the N/A compute related stuff \* Clean up the architecture a bit to only show cinder related \* Remove various modules form TOC's that aren't applicable * Typo fix: nova => cinder * Move newly created NFS exceptions to standard location in exception.py Addresses bug 1037619 * Add admin actions extension * Removed unnecessary call to ensure\_export * Add cinder- prefix to all binaries * Make size optional when creating a volume from a snap * Fix creation of iscsi targets * Spelling: Persistant=>Persistent * Implement volume quota support in Cinder * Remove unused return values and commented out code from NFS driver * Remove unused flags * Fix PEP8 issues * Fix incorrect tgt-admin call in create\_iscsi\_target * Add 'detaching' to volume status * Typo fix in cinder: existant => existent * Make glance image service check base exception classes * Fix PEP8 issues * Remove unused exceptions from cinder/exception.py * Add nosehtmloutput as a test dependency * Migrate volume related quota info in db migration * Use event.listen() instead of deprecated listeners kwarg * Add declare for xiv driver in fake\_flags * Remove logging in volume tests * Call driver for attach/detach\_volume * Fix spelling typos * Remove unused function * blueprint zadara-volume-driver * Adding the volume notifications to cinder * add ability to clone images * Update SolidFire volume driver * Add proper support for deprecation messages * Remove utils.deprecated functions * Move volume size validation to api layer * Map internal exceptions in the nova style * Add driver for using files on a generic NFS server as virtual block devices Add NetApp-specific NFS virtual block driver * Implements bp migrate-nova-volumes-to-cinder * add get\_location method for images * rbd: implement create\_volume\_from\_snapshot * Replace deprecated client with python-glanceclient * Remove unused imports * Fix check\_for\_export() in non-exporting drivers * Adds new volume API extensions * Driver for IBM XIV storage * Fake requests in tests should be to v1 * Add C-mode driver for NetApp * storwize-svc: improved test coverage and fixes * Use setuptools-git * Add iscsiadm path for qauntal * Create unique volumes\_dir for testing * Remove redundant 'availability\_zone' config options * Straight port of the NetApp driver updates from nova-volume to cinder * Use volume driver specific execeptions * Admin users should be restricted from seeing all snapshots by default * Use openstack.common.notifier * Admin users should be restricted from seeing all volumes by default * Deprecate root\_helper in favor of rootwrap\_config * Send 'create volume from snapshot' to the proper host * Add persistent volumes for tgtd * Scheduler-clean-up * Include AUTHORS file in MANIFEST.in * Add authors for IBM Storwize and SVC driver * Driver for IBM Storwize and SVC storage * Remove unused instance\_name\_template flag * Allow XML payload for volume creation * Include volume\_metadata with object on vol create * Trim volume type representation * Port nova-rootwrap changes to cinder-rootwrap * Don't do PEP8 test for openstack-common code * Cleanup unused code in servce.py * Use openstack.common.setup * utils module is still being used by cinder-volume service * Remove unused fake memcache client * Remove unused check\_snapshots\_enabled * Use openstack.common.log for logging * Don't create volumes if an incorrect size was given * Use rpc from openstack-common * Add missing gettextutils from openstack-common * Use save\_and\_reraise\_exception() from common * Use openstack.common.cfg.CONF * Remove cinder.log usage from cinder.rpc * Remove cinder.context dependency from cinder.rpc * Localize rpc options to rpc code * Add version to scheduler rpc API * Sync cfg and iniparser from openstack-common * Use cfg's new global CONF object * Make use of openstack.common.jsonutils * Sync with latest version of openstack.common.cfg * Convert Cinder to use openstack-common jsonutils * Add missing ack to impl\_qpid * Move queue\_get\_for() from db to rpc * Add base support for rpc API versioning * Make kombu support optional for running unit tests * Stop using cinder.exception from cinder.rpc * Remove unused synchronization decorator * Remove 'cinder-manage config convert' * Use cfg's new behavior of reset() clearing overrides * Remove unused enabled\_apis flag * Remove some unused helper scripts * Remove unused wrap\_errors decorator * Remove unused get\_{id,version}\_from\_href() * Remove unused metadata serialization * Remove unused raise\_http\_conflict\_for\_instance\_invalid\_state() * Remove unused OverLimitFault * Remove old flagfile support * Misused and not used config options * Pass 'cinder' project into ConfigOpts * Sync to newer openstack.common.cfg * Convert Cinder to use openstack-common timeutils * Do not duplicate nova docs in cinder * Remove unused db api methods * Create single initial Cinder DB migration file * Updated HpSanISCSIDriver to use initialize/terminate methods * Pruned Authors file to active contributors (from nova-volumes) * Move nova-manage.rst to cinder-manage.rst * Add action extensions to support nova integration * Revert "Add action extensions to support nova integration." * Fix volume['id'] from integer to string * Add action extensions to support nova integration * Set pep8 version to 1.1 in test\_requires * Fix topics so that the do not collide with nova * Fix up coverage and jenkins test running * Remove instance Foreign Key in volumes table, replace with instance\_uuid * Align the tox.ini file * Removed cinder/api/openstack/compute and moved the relevant pieces under cinder/api/openstack/volume. Fixes bug 994177 * Initial fork out of Nova ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/HACKING.rst0000664000175000017500000000364400000000000014674 0ustar00zuulzuul00000000000000Cinder Style Commandments ========================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Cinder Specific Commandments ---------------------------- - [N322] Ensure default arguments are not mutable. - [N323] Add check for explicit import of _() to ensure proper translation. - [C301] timeutils.utcnow() from oslo_utils should be used instead of datetime.now(). - [C303] Ensure that there are no 'print()' statements are used in code that should be using LOG calls. - [C309] Unit tests should not perform logging. - [C310] Check for improper use of logging format arguments. - [C311] Check for proper naming and usage in option registration. - [C312] Validate that logs are not translated. - [C313] Check that assertTrue(value) is used and not assertEqual(True, value). - [C336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs. - [C337] Ensure the standard library mock modules is used and not the third party mock library that was needed for Python 2 support. - [C338] Log.warn is deprecated. Enforce use of LOG.warning. General ------- - Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised:: except Exception as e: ... raise e # BAD except Exception: ... raise # OKAY Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. For more information on creating unit tests and utilizing the testing infrastructure in OpenStack Cinder, please see https://docs.openstack.org/cinder/latest/contributor/testing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/LICENSE0000664000175000017500000002363700000000000014107 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8431256 cinder-27.0.0/PKG-INFO0000644000175000017500000001336000000000000014165 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: cinder Version: 27.0.0 Summary: OpenStack Block Storage Home-page: https://docs.openstack.org/cinder/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Project-URL: Source, https://opendev.org/openstack/cinder Project-URL: Tracker, https://bugs.launchpad.net/cinder Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.10 License-File: LICENSE Requires-Dist: pbr>=5.8.0 Requires-Dist: decorator>=4.4.2 Requires-Dist: eventlet!=0.32.0,>=0.30.1 Requires-Dist: greenlet>=0.4.16 Requires-Dist: iso8601>=0.1.12 Requires-Dist: jsonschema>=3.2.0 Requires-Dist: keystoneauth1>=4.2.1 Requires-Dist: keystonemiddleware>=9.1.0 Requires-Dist: lxml>=4.5.2 Requires-Dist: oslo.config>=8.3.2 Requires-Dist: oslo.concurrency>=4.5.0 Requires-Dist: oslo.context>=3.4.0 Requires-Dist: oslo.db>=11.0.0 Requires-Dist: oslo.log>=4.6.1 Requires-Dist: oslo.messaging>=14.6.0 Requires-Dist: oslo.middleware>=4.1.1 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: oslo.privsep>=2.6.2 Requires-Dist: oslo.reports>=3.2.0 Requires-Dist: oslo.rootwrap>=6.2.0 Requires-Dist: oslo.serialization>=4.2.0 Requires-Dist: oslo.service>=2.8.0 Requires-Dist: oslo.upgradecheck>=1.1.1 Requires-Dist: oslo.utils>=6.0.0 Requires-Dist: oslo.versionedobjects>=2.4.0 Requires-Dist: osprofiler>=3.4.0 Requires-Dist: packaging>=20.4 Requires-Dist: paramiko>=2.7.2 Requires-Dist: Paste>=3.4.3 Requires-Dist: PasteDeploy>=2.1.0 Requires-Dist: pyparsing>=2.4.7 Requires-Dist: python-barbicanclient>=5.0.1 Requires-Dist: python-glanceclient>=3.2.2 Requires-Dist: python-keystoneclient>=4.1.1 Requires-Dist: python-novaclient>=18.2.0 Requires-Dist: python-swiftclient>=3.10.1 Requires-Dist: requests>=2.25.1 Requires-Dist: Routes>=2.4.1 Requires-Dist: taskflow>=4.5.0 Requires-Dist: rtslib-fb>=2.1.74 Requires-Dist: SQLAlchemy>=1.4.23 Requires-Dist: stevedore>=3.2.2 Requires-Dist: tabulate>=0.8.7 Requires-Dist: tenacity>=6.3.1 Requires-Dist: WebOb>=1.8.6 Requires-Dist: oslo.i18n>=5.1.0 Requires-Dist: oslo.vmware>=3.10.0 Requires-Dist: os-brick>=6.10.0 Requires-Dist: os-win>=5.5.0 Requires-Dist: tooz>=2.8.0 Requires-Dist: google-api-python-client>=1.11.0 Requires-Dist: castellan>=3.7.0 Requires-Dist: cryptography>=3.1 Requires-Dist: cursive>=0.2.2 Requires-Dist: zstd>=1.4.5.1 Requires-Dist: boto3>=1.18.49 Requires-Dist: distro>=1.8.0 Requires-Dist: tzdata>=2022.4 Provides-Extra: all Requires-Dist: websocket-client>=1.3.2; extra == "all" Requires-Dist: pyOpenSSL>=17.5.0; extra == "all" Requires-Dist: storops>=0.5.10; extra == "all" Requires-Dist: pywbem>=0.7.0; extra == "all" Requires-Dist: python-3parclient>=4.2.10; extra == "all" Requires-Dist: krest>=1.3.0; extra == "all" Requires-Dist: infinisdk>=103.0.1; extra == "all" Requires-Dist: py-pure-client>=1.47.0; extra == "all" Requires-Dist: rsd-lib>=1.1.0; extra == "all" Requires-Dist: storpool>=7.1.0; extra == "all" Requires-Dist: storpool.spopenstack>=2.2.1; extra == "all" Requires-Dist: dfs-sdk>=1.2.25; extra == "all" Requires-Dist: rbd-iscsi-client>=0.1.8; extra == "all" Requires-Dist: python-linstor>=1.7.0; extra == "all" Requires-Dist: psutil>=5.7.2; extra == "all" Provides-Extra: datacore Requires-Dist: websocket-client>=1.3.2; extra == "datacore" Provides-Extra: powermax Requires-Dist: pyOpenSSL>=17.5.0; extra == "powermax" Provides-Extra: vnx Requires-Dist: storops>=0.5.10; extra == "vnx" Provides-Extra: unity Requires-Dist: storops>=0.5.10; extra == "unity" Provides-Extra: fujitsu Requires-Dist: pywbem>=0.7.0; extra == "fujitsu" Provides-Extra: hpe3par Requires-Dist: python-3parclient>=4.2.10; extra == "hpe3par" Provides-Extra: kaminario Requires-Dist: krest>=1.3.0; extra == "kaminario" Provides-Extra: ds8k Requires-Dist: pyOpenSSL>=17.5.0; extra == "ds8k" Provides-Extra: infinidat Requires-Dist: infinisdk>=103.0.1; extra == "infinidat" Provides-Extra: pure Requires-Dist: py-pure-client>=1.47.0; extra == "pure" Provides-Extra: rsd Requires-Dist: rsd-lib>=1.1.0; extra == "rsd" Provides-Extra: storpool Requires-Dist: storpool>=7.1.0; extra == "storpool" Requires-Dist: storpool.spopenstack>=2.2.1; extra == "storpool" Provides-Extra: datera Requires-Dist: dfs-sdk>=1.2.25; extra == "datera" Provides-Extra: rbd-iscsi Requires-Dist: rbd-iscsi-client>=0.1.8; extra == "rbd-iscsi" Provides-Extra: linstor Requires-Dist: python-linstor>=1.7.0; extra == "linstor" Provides-Extra: quobyte Requires-Dist: psutil>=5.7.2; extra == "quobyte" Provides-Extra: test Requires-Dist: hacking<7.1.0,>=7.0.0; extra == "test" Requires-Dist: flake8-import-order<0.19.0; extra == "test" Requires-Dist: flake8-logging-format>=0.6.0; extra == "test" Requires-Dist: stestr>=3.2.1; extra == "test" Requires-Dist: coverage>=5.5; extra == "test" Requires-Dist: ddt>=1.4.4; extra == "test" Requires-Dist: fixtures>=3.0.0; extra == "test" Requires-Dist: oslotest>=4.5.0; extra == "test" Requires-Dist: PyMySQL>=0.10.0; extra == "test" Requires-Dist: psycopg2-binary>=2.8.5; extra == "test" Requires-Dist: SQLAlchemy-Utils>=0.37.8; extra == "test" Requires-Dist: testtools>=2.4.0; extra == "test" Requires-Dist: doc8>=0.8.1; extra == "test" Requires-Dist: mypy<1.18.0,>=1.7.0; extra == "test" Requires-Dist: moto>=5.0.0; extra == "test" Requires-Dist: distro>=1.8.0; extra == "test" file: README.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/README.rst0000664000175000017500000000172600000000000014564 0ustar00zuulzuul00000000000000================ OpenStack Cinder ================ OpenStack Cinder is a storage service for an open cloud computing service. You can learn more about Cinder at: * `Wiki `__ * `Developer Docs `__ * `Blueprints `__ * `Release notes `__ * `Design specifications `__ Getting Started --------------- If you'd like to run from the master branch, you can clone the git repo: git clone https://opendev.org/openstack/cinder If you'd like to contribute, please see the information in `CONTRIBUTING.rst `_ You can raise bugs on `Launchpad `__ Python client ------------- `Python Cinderclient `__ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8071156 cinder-27.0.0/api-ref/0000775000175000017500000000000000000000000014412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9031165 cinder-27.0.0/api-ref/source/0000775000175000017500000000000000000000000015712 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/api_microversion_history.rst0000664000175000017500000000010500000000000023571 0ustar00zuulzuul00000000000000.. include:: ../../cinder/api/openstack/rest_api_version_history.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/conf.py0000664000175000017500000001512000000000000017210 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Cinder documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'os_api_ref', 'openstackdocstheme', ] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' copyright = u'OpenStack Foundation' # openstackdocstheme options openstackdocs_repo_name = 'openstack/cinder' openstackdocs_bug_project = 'cinder' openstackdocs_bug_tag = 'api-ref' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme = '_theme' html_theme = 'openstackdocs' html_theme_options = { "sidebar_mode": "toc", } # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'cinderdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Cinder.tex', u'OpenStack Block Storage API Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/index.rst0000664000175000017500000000055700000000000017562 0ustar00zuulzuul00000000000000================= Block Storage API ================= Contents: API content can be searched using the :ref:`search`. Details for each microversion change can be found in the :doc:`REST API Version History ` documentation. .. toctree:: :hidden: api_microversion_history .. toctree:: :maxdepth: 2 v3/index v2/index ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/status.yaml0000664000175000017500000000303400000000000020121 0ustar00zuulzuul00000000000000 200: default: | Request was successful. 201: default: | Request has been fulfilled and new resource created. 202: default: | Request is accepted, but processing may take some time. 203: default: | Returned information is not full set, but a subset. 204: default: | Request fulfilled but service does not return anything. 300: default: | The resource corresponds to more than one representation. 400: default: | Some content in the request was invalid. 401: default: | User must authenticate before making a request. 403: default: | Policy does not allow current user to do this operation. 404: default: | The requested resource could not be found. 405: default: | Method is not valid for this endpoint and resource. 409: default: | This resource has an action in progress that would conflict with this request. 413: default: | This operation cannot be completed. 415: default: | The entity of the request is in a format not supported by the requested resource for the method. 500: default: | Something went wrong with the service which prevents it from fulfilling the request. 501: default: | The service does not have the functionality required to fulfill this request. 503: default: | The service cannot handle the request right now. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9111166 cinder-27.0.0/api-ref/source/v2/0000775000175000017500000000000000000000000016241 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/api-versions.inc0000664000175000017500000000077200000000000021361 0ustar00zuulzuul00000000000000.. -*- rst -*- API versions ============ List Api Versions ~~~~~~~~~~~~~~~~~ .. rest_method:: GET / Lists information for all Block Storage API versions. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 - 300 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 - 404 - 405 - 500 - 503 Response -------- **Example List Api Versions: JSON request** .. literalinclude:: ./samples/versions-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/availability-zones-v2.inc0000664000175000017500000000153500000000000023073 0ustar00zuulzuul00000000000000.. -*- rst -*- Availability zones (os-availability-zone) ========================================= List availability zone information. Get Availability Zone Information ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/os-availability-zone List availability zone information. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response Parameter ------------------ .. rest_parameters:: parameters.yaml - project_id: project_id - availabilityZoneInfo: availability_zone_info - zoneName: availability_zone_3 - zoneState: availability_zone_state - available: available Response Example ---------------- .. literalinclude:: ./samples/availability-zone-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/capabilities-v2.inc0000664000175000017500000000223500000000000021714 0ustar00zuulzuul00000000000000.. -*- rst -*- Capabilities for storage back ends (capabilities) ================================================= Shows capabilities for a storage back end. Show back-end capabilities ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/capabilities/{hostname} Shows capabilities for a storage back end on the host. The ``hostname`` takes the form of ``hostname@volume_backend_name``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - hostname: hostname Response Parameters ------------------- .. rest_parameters:: parameters.yaml - pool_name: pool_name - description: description - volume_backend_name: volume_backend_name - namespace: namespace_1 - visibility: visibility - driver_version: driver_version - vendor_name: vendor_name - properties: properties - storage_protocol: storage_protocol - replication_targets: replication_targets - display_name: display_name Response Example ---------------- .. literalinclude:: ./samples/backend-capabilities-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/consistencygroups-v2.inc0000664000175000017500000001304000000000000023060 0ustar00zuulzuul00000000000000.. -*- rst -*- Consistency groups ================== Consistency groups enable you to create snapshots at the exact same point in time from multiple volumes. For example, a database might place its tables, logs, and configuration on separate volumes. To restore this database from a previous point in time, it makes sense to restore the logs, tables, and configuration together from the exact same point in time. Use the policy configuration file to grant permissions for these actions to limit roles. List consistency groups ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/consistencygroups Lists consistency groups. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort_key: sort_key - sort_dir: sort_dir - limit: limit - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - id: id - name: name Response Example ---------------- .. literalinclude:: ./samples/consistency-groups-list-response.json :language: javascript Create consistency group ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/consistencygroups Creates a consistency group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - description: description_6 - availability_zone: availability_zone - volume_types: volume_types_2 - name: name_15 Request Example --------------- .. literalinclude:: ./samples/consistency-group-create-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - status: status_1 - description: description_11 - availability_zone: availability_zone - created_at: created_at - volume_types: volume_types - name: name_15 - id: consistencygroup_id_1 Response Example ---------------- .. literalinclude:: ./samples/consistency-group-create-response.json :language: javascript Show consistency group details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/consistencygroups/{consistencygroup_id} Shows details for a consistency group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - consistencygroup_id: consistencygroup_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_1 - description: description - availability_zone: availability_zone - created_at: created_at - volume_types: volume_types - id: id - name: name Response Example ---------------- .. literalinclude:: ./samples/consistency-group-show-response.json :language: javascript Create consistency group from source ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/consistencygroups/create_from_src Creates a consistency group from source. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - status: status_1 - user_id: user_id - description: description - cgsnapshot_id: cgsnapshot_id - source_cgid: source_cgid - project_id: project_id_path - name: name - project_id: project_id Request Example --------------- .. literalinclude:: ./samples/consistency-group-create-from-src-request.json :language: javascript Delete consistency group ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/consistencygroups/{consistencygroup_id}/delete Deletes a consistency group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - force: force - project_id: project_id_path - consistencygroup_id: consistencygroup_id Request Example --------------- .. literalinclude:: ./samples/consistency-group-delete-request.json :language: javascript List consistency groups with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/consistencygroups/detail Lists consistency groups with details. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort_key: sort_key - sort_dir: sort_dir - limit: limit - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_1 - description: description - availability_zone: availability_zone - created_at: created_at - volume_types: volume_types - id: id - name: name Response Example ---------------- .. literalinclude:: ./samples/consistency-groups-list-detailed-response.json :language: javascript Update consistency group ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/consistencygroups/{consistencygroup_id}/update Updates a consistency group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - remove_volumes: remove_volumes - description: description - add_volumes: add_volumes - name: name - project_id: project_id_path - consistencygroup_id: consistencygroup_id Request Example --------------- .. literalinclude:: ./samples/consistency-group-update-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/ext-backups-actions-v2.inc0000664000175000017500000000302000000000000023140 0ustar00zuulzuul00000000000000.. -*- rst -*- Backup actions (backups, action) ================================ Force-deletes a backup and reset status for a backup. Force-delete backup ~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/backups/{backup_id}/action Force-deletes a backup. Specify the ``os-force_delete`` action in the request body. This operation deletes the backup and any backup data. The backup driver returns the ``405`` status code if it does not support this operation. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 404 - 405 Request ------- .. rest_parameters:: parameters.yaml - os-force_delete: os-force_delete - project_id: project_id_path - backup_id: backup_id Request Example --------------- .. literalinclude:: ./samples/backup-force-delete-request.json :language: javascript Reset backup's status ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/backups/{backup_id}/action Reset a backup's status. Specify the ``os-reset_status`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - status: status_7 - os-reset_status: os-reset_status - project_id: project_id_path - backup_id: backup_id Request Example --------------- .. literalinclude:: ./samples/backup-reset-status-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/ext-backups.inc0000664000175000017500000002043700000000000021170 0ustar00zuulzuul00000000000000.. -*- rst -*- Backups (backups) ================= A backup is a full copy of a volume stored in an external service. The service can be configured. The only supported service is Object Storage. A backup can subsequently be restored from the external service to either the same volume that the backup was originally taken from or to a new volume. When you create, list, or delete backups, these status values are possible: **Backup statuses** +-----------------+---------------------------------------------+ | Status | Description | +-----------------+---------------------------------------------+ | creating | The backup is being created. | +-----------------+---------------------------------------------+ | available | The backup is ready to restore to a volume. | +-----------------+---------------------------------------------+ | deleting | The backup is being deleted. | +-----------------+---------------------------------------------+ | error | A backup error occurred. | +-----------------+---------------------------------------------+ | restoring | The backup is being restored to a volume. | +-----------------+---------------------------------------------+ | error_deleting | An error occurred while deleting the backup.| +-----------------+---------------------------------------------+ If an error occurs, you can find more information about the error in the ``fail_reason`` field for the backup. List backups with details ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/backups/detail Lists Block Storage backups, with details, to which the project has access. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort_key: sort_key - sort_dir: sort_dir - limit: limit - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_4 - object_count: object_count - fail_reason: fail_reason - description: description - links: links_1 - availability_zone: availability_zone - created_at: created_at - updated_at: updated_at - name: name_1 - has_dependent_backups: has_dependent_backups - volume_id: volume_id - container: container - backups: backups - size: size - id: id_1 - is_incremental: is_incremental - data_timestamp: data_timestamp - snapshot_id: snapshot_id_2 Response Example ---------------- .. literalinclude:: ./samples/backups-list-detailed-response.json :language: javascript Show backup details ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/backups/{backup_id} Shows details for a backup. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_4 - object_count: object_count - container: container - description: description - links: links_1 - availability_zone: availability_zone - created_at: created_at - updated_at: updated_at - name: name_1 - has_dependent_backups: has_dependent_backups - volume_id: volume_id - fail_reason: fail_reason - size: size - backup: backup - id: id_1 - is_incremental: is_incremental - data_timestamp: data_timestamp - snapshot_id: snapshot_id_2 Response Example ---------------- .. literalinclude:: ./samples/backup-show-response.json :language: javascript Delete backup ~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/{project_id}/backups/{backup_id} Deletes a backup. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id Restore backup ~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/backups/{backup_id}/restore Restores a Block Storage backup to an existing or new Block Storage volume. You must specify either the UUID or name of the volume. If you specify both the UUID and name, the UUID takes priority. If specifying ``volume_id`` the status of the volume must be ``available``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 413 Request ------- .. rest_parameters:: parameters.yaml - restore: restore - name: name_1 - volume_id: volume_id - project_id: project_id_path - backup_id: backup_id Request Example --------------- .. literalinclude:: ./samples/backup-restore-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - restore: restore - backup_id: backup_id - volume_id: volume_id - volume_name: volume_name Response Example ---------------- .. literalinclude:: ./samples/backup-restore-response.json :language: javascript Create backup ~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/backups Creates a Block Storage backup from a volume. The status of the volume must be ``available`` or if the ``force`` flag is used, backups of ``in-use`` volumes may also be created. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 500 Request ------- .. rest_parameters:: parameters.yaml - container: container - description: description - incremental: incremental - volume_id: volume_id - force: force - backup: backup - name: name_1 - project_id: project_id_path - snapshot_id: snapshot_id_2 Request Example --------------- .. literalinclude:: ./samples/backup-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backup: backup - id: id_1 - links: links_1 - name: name_1 List backups ~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/backups Lists Block Storage backups to which the project has access. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort_key: sort_key - sort_dir: sort_dir - limit: limit - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backups: backups - id: id_1 - links: links_1 - name: name_1 Response Example ---------------- .. literalinclude:: ./samples/backups-list-response.json :language: javascript Export backup ~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/backups/{backup_id}/export_record Export information about a backup. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backup-record: backup_record - backup_service: backup_service - backup_url: backup_url Response Example ---------------- .. literalinclude:: ./samples/backup-record-export-response.json :language: javascript Import backup ~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/backups/import_record Import information about a backup. Response codes -------------- .. rest_status_code:: success ../status.yaml - 201 .. rest_status_code:: error ../status.yaml - 400 - 503 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup-record: backup_record - backup_service: backup_service - backup_url: backup_url Request Example --------------- .. literalinclude:: ./samples/backup-record-import-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - id: id_1 - links: links_1 - name: name_1 Response Example ---------------- .. literalinclude:: ./samples/backup-record-import-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/hosts.inc0000664000175000017500000000361600000000000020102 0ustar00zuulzuul00000000000000.. -*- rst -*- Hosts extension (os-hosts) ========================== Administrators only, depending on policy settings. Lists, shows hosts. List all hosts ~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{admin_project_id}/os-hosts Lists all hosts summary info that is not disabled. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - admin_project_id: admin_project_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - service-status: host_service_status - service: host_service - zone: availability_zone_3 - service-state: service_state - host_name: host_name_1 - last-update: updated_at Response Example ---------------- .. literalinclude:: ./samples/hosts-list-response.json :language: javascript Show Host Details ~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{admin_project_id}/os-hosts/{host_name} Shows volume and snapshot details for a cinder-volume host. *Note:* This API is meant specifically for cinder-volume hosts only. It is not valid against other Cinder service hosts or hosts where the cinder-volume service has been disabled. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - admin_project_id: admin_project_id - host_name: hostname Response -------- .. rest_parameters:: parameters.yaml - volume_count: total_count_str - total_volume_gb: totalGigabytesUsedStr - total_snapshot_gb: totalSnapshotsUsedStr - project: project_id_2 - host: host_name_1 - snapshot_count: totalSnapshotsUsed **Example Show Host Details** .. literalinclude:: ./samples/hosts-get-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/index.rst0000664000175000017500000000317000000000000020103 0ustar00zuulzuul00000000000000:tocdepth: 2 ============================== Block Storage API V2 (REMOVED) ============================== .. note:: Version 2 of the Block Storage API was `deprecated in the Pike release `_ and was removed during the Xena development cycle. `This document is maintained for historical purposes only.` `Version 3 `_ of the Block Storage API was `introduced in the Mitaka release `_. Version 3.0, which is the default microversion at the ``/v3`` endpoint, was designed to be identical to version 2. Thus, scripts using the Block Storage API v2 should be adaptable to version 3 with minimal changes. .. rest_expand_all:: .. include:: api-versions.inc .. include:: availability-zones-v2.inc .. include:: ext-backups.inc .. include:: ext-backups-actions-v2.inc .. include:: capabilities-v2.inc .. include:: os-cgsnapshots-v2.inc .. include:: consistencygroups-v2.inc .. include:: hosts.inc .. include:: limits.inc .. include:: os-vol-pool-v2.inc .. include:: os-vol-transfer-v2.inc .. include:: qos-specs-v2-qos-specs.inc .. include:: quota-classes.inc .. include:: quota-sets.inc .. include:: volume-manage.inc .. include:: volume-type-access.inc .. include:: volumes-v2-extensions.inc .. include:: volumes-v2-snapshots.inc .. include:: volumes-v2-snapshots-actions.inc .. include:: volumes-v2-types.inc .. include:: volumes-v2-versions.inc .. include:: volumes-v2-volumes-actions.inc .. include:: volumes-v2-volumes.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/limits.inc0000664000175000017500000000247600000000000020246 0ustar00zuulzuul00000000000000.. -*- rst -*- Limits (limits) =============== Shows absolute limits for a project. An absolute limit value of ``-1`` indicates that the absolute limit for the item is infinite. Show absolute limits ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/limits Shows absolute limits for a project. An absolute limit value of ``-1`` indicates that the absolute limit for the item is infinite. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - totalSnapshotsUsed: totalSnapshotsUsed - maxTotalBackups: maxTotalBackups - maxTotalVolumeGigabytes: maxTotalVolumeGigabytes - limits: limits - maxTotalSnapshots: maxTotalSnapshots - maxTotalBackupGigabytes: maxTotalBackupGigabytes - totalBackupGigabytesUsed: totalBackupGigabytesUsed - maxTotalVolumes: maxTotalVolumes - totalVolumesUsed: totalVolumesUsed - rate: rate - totalBackupsUsed: totalBackupsUsed - totalGigabytesUsed: totalGigabytesUsed - absolute: absolute Response Example ---------------- .. literalinclude:: ./samples/limits-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/os-cgsnapshots-v2.inc0000664000175000017500000000653000000000000022240 0ustar00zuulzuul00000000000000.. -*- rst -*- Consistency group snapshots =========================== Lists all, lists all with details, shows details for, creates, and deletes consistency group snapshots. Delete consistency group snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/{project_id}/cgsnapshots/{cgsnapshot_id} Deletes a consistency group snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - cgsnapshot_id: cgsnapshot_id_1 Show consistency group snapshot details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/cgsnapshots/{cgsnapshot_id} Shows details for a consistency group snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - cgsnapshot_id: cgsnapshot_id_1 Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status - description: description - created_at: created_at - consistencygroup_id: consistencygroup_id - id: id - name: name Response Example ---------------- .. literalinclude:: ./samples/cgsnapshots-show-response.json :language: javascript List consistency group snapshots with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/cgsnapshots/detail Lists all consistency group snapshots with details. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status - description: description - created_at: created_at - consistencygroup_id: consistencygroup_id - id: id - name: name Response Example ---------------- .. literalinclude:: ./samples/cgsnapshots-list-detailed-response.json :language: javascript List consistency group snapshots ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/cgsnapshots Lists all consistency group snapshots. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - id: id - name: name Response Example ---------------- .. literalinclude:: ./samples/cgsnapshots-list-response.json :language: javascript Create consistency group snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/cgsnapshots Creates a consistency group snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - name: name - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/cgsnapshots-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status - description: description - created_at: created_at - consistencygroup_id: consistencygroup_id - id: id - name: name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/os-vol-pool-v2.inc0000664000175000017500000000205000000000000021444 0ustar00zuulzuul00000000000000.. -*- rst -*- Back-end storage pools ====================== Administrator only. Lists all back-end storage pools that are known to the scheduler service. List back-end storage pools ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/scheduler-stats/get_pools Lists all back-end storage pools. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - detail: detail Response Parameters ------------------- .. rest_parameters:: parameters.yaml - updated: updated - QoS_support: QoS_support - name: name_16 - total_capacity_gb: total_capacity - volume_backend_name: volume_backend_name - capabilities: capabilities - free_capacity_gb: free_capacity - driver_version: driver_version - reserved_percentage: reserved_percentage - storage_protocol: storage_protocol Response Example ---------------- .. literalinclude:: ./samples/pools-list-detailed-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/os-vol-transfer-v2.inc0000664000175000017500000001035300000000000022324 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume transfer =============== Transfers a volume from one user to another user. Accept volume transfer ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/os-volume-transfer/{transfer_id}/accept Accepts a volume transfer. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - auth_key: auth_key - transfer: transfer - transfer_id: transfer_id - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/volume-transfer-accept-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_id: volume_id - id: id - links: links - name: name Response Example ---------------- .. literalinclude:: ./samples/volume-transfer-accept-response.json :language: javascript Create volume transfer ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/os-volume-transfer Creates a volume transfer. **Preconditions** * The volume ``status`` must be ``available`` * Transferring encrypted volumes is not supported * If the volume has snapshots, those snapshots must be ``available`` Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - transfer: transfer - name: name - volume_id: volume_id - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/volume-transfer-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - auth_key: auth_key - links: links - created_at: created_at - volume_id: volume_id - id: id - name: name Response Example ---------------- .. literalinclude:: ./samples/volume-transfer-create-response.json :language: javascript List volume transfers ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/os-volume-transfer Lists volume transfers. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_id: volume_id - id: id - links: links - name: name Response Example ---------------- .. literalinclude:: ./samples/volume-transfers-list-response.json :language: javascript Show volume transfer details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/os-volume-transfer/{transfer_id} Shows details for a volume transfer. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - transfer_id: transfer_id - project_id: project_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - created_at: created_at - volume_id: volume_id - id: id - links: links - name: name Response Example ---------------- .. literalinclude:: ./samples/volume-transfer-show-response.json :language: javascript Delete volume transfer ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/{project_id}/os-volume-transfer/{transfer_id} Deletes a volume transfer. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - transfer_id: transfer_id - project_id: project_id_path List volume transfers, with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/os-volume-transfer/detail Lists volume transfers, with details. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - created_at: created_at - volume_id: volume_id - id: id - links: links - name: name Response Example ---------------- .. literalinclude:: ./samples/volume-transfers-list-detailed-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/parameters.yaml0000664000175000017500000012531300000000000021275 0ustar00zuulzuul00000000000000# variables in header x-openstack-request-id: description: > foo in: header required: false type: string # variables in path admin_project_id: description: | The UUID of the administrative project. in: path required: true type: string backup_id: description: | The UUID for a backup. in: path required: true type: string cascade: description: | Remove any snapshots along with the volume. Default=False. in: path required: false type: boolean cgsnapshot_id_1: description: | The ID of the consistency group snapshot. in: path required: true type: string encryption_id: description: | The ID of the encryption type. in: path required: true type: string hostname: description: | The name of the host that hosts the storage back end. in: path required: true type: string key_1: description: | The metadata key name for the metadata that you want to remove. in: path required: true type: string key_2: description: | The metadata key name for the metadata that you want to see. in: path required: true type: string key_3: description: | The metadata key name for the metadata that you want to update. in: path required: true type: string project_id_path: description: | The UUID of the project in a multi-tenancy cloud. in: path required: true type: string qos_id: description: | The ID of the QoS specification. in: path required: true type: string quota_class_name: description: The name of the quota class for which to set quotas. in: path required: true type: string quotas_project_id: description: | The UUID of the project in a multi-tenancy cloud. in: path required: true type: string transfer_id: description: | The unique identifier for a volume transfer. in: path required: false type: string vol_type_id: description: | The UUID for an existing volume type. in: path required: true type: string volume_id_path: description: | The UUID of the volume. in: path required: true type: string volume_type: description: | The ID of Volume Type to be accessed by project. in: path required: false type: string volume_type_access: description: | The ID of Volume Type to be accessed by project. in: path required: true type: string volume_type_id: description: | The UUID for an existing volume type. in: path required: false type: string # variables in query action: description: | The action. Valid values are "set" or "unset." in: query required: true type: string all-tenants: description: | Shows details for all projects. Admin only. in: query required: false type: string bootable_query: description: | Filters results by bootable status. Default=None. in: query required: false type: boolean detail: description: | Indicates whether to show pool details or only pool names in the response. Set to ``true`` to show pool details. Set to ``false`` to show only pool names. Default is ``false``. in: query required: false type: boolean image-id: description: | Creates volume from image ID. Default=None. in: query required: false type: string limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string name_volume: description: | Filters results by a name. Default=None. in: query required: false type: string offset: description: | Used in conjunction with ``limit`` to return a slice of items. ``offset`` is where to start in the list. in: query required: false type: integer sort: description: | Comma-separated list of sort keys and optional sort directions in the form of < key > [: < direction > ]. A valid direction is ``asc`` (ascending) or ``desc`` (descending). in: query required: false type: string sort_dir: description: | Sorts by one or more sets of attribute and sort direction combinations. If you omit the sort direction in a set, default is ``desc``. in: query required: false type: string sort_key: description: | Sorts by an attribute. A valid value is ``name``, ``status``, ``container_format``, ``disk_format``, ``size``, ``id``, ``created_at``, or ``updated_at``. Default is ``created_at``. The API uses the natural sorting direction of the ``sort_key`` attribute value. in: query required: false type: string usage: description: | Set to ``usage=true`` to show quota usage. Default is ``false``. in: query required: false type: boolean # variables in body absolute: description: | An ``absolute`` limits object. in: body required: true type: object add_volumes: description: | One or more volume UUIDs, separated by commas, to add to the volume consistency group. in: body required: false type: string alias: description: | The alias for the extension. For example, "FOXNSOX", "os- availability-zone", "os-extended-quotas", "os- share-unmanage" or "os-used-limits." in: body required: true type: string attach_status: description: | The volume attach status. in: body required: false type: string attachment_id: description: | The interface ID. in: body required: false type: string attachments: description: | Instance attachment information. If this volume is attached to a server instance, the attachments list includes the UUID of the attached server, an attachment UUID, the name of the attached host, if any, the volume UUID, the device, and the device UUID. Otherwise, this list is empty. in: body required: true type: array auth_key: description: | The authentication key for the volume transfer. in: body required: true type: string availability_zone: description: | The name of the availability zone. in: body required: false type: string availability_zone_3: description: | The availability zone name. in: body required: true type: string availability_zone_info: description: | The list of availability zone information. in: body required: true type: array availability_zone_state: description: | The current state of the availability zone. in: body required: true type: object available: description: | Whether the availability zone is available for use. in: body required: true type: boolean backup: description: | A ``backup`` object. in: body required: true type: object backup_gigabytes: description: | The size (GB) of backups that are allowed for each project. in: body required: true type: integer backup_record: description: | An object recording volume backup metadata, including ``backup_service`` and ``backup_url``. in: body required: true type: object backup_service: description: | The service used to perform the backup. in: body required: true type: string backup_url: description: | An identifier string to locate the backup. in: body required: true type: string backups: description: | A list of ``backup`` objects. in: body required: true type: array backups_number: description: | The number of backups that are allowed for each project. in: body required: true type: integer bootable: description: | Enables or disables the bootable attribute. You can boot an instance from a bootable volume. in: body required: true type: boolean bootable_response: description: | Enables or disables the bootable attribute. You can boot an instance from a bootable volume. in: body required: true type: string capabilities: description: | The capabilities for the back end. The value is either ``null`` or a string value that indicates the capabilities for each pool. For example, ``total_capacity_gb`` or ``QoS_support``. in: body required: true type: object cgsnapshot_id: description: | The UUID of the consistency group snapshot. in: body required: false type: string cipher: description: | The encryption algorithm or mode. For example, aes-xts-plain64. The default value is None. in: body required: false type: string connector: description: | The ``connector`` object. in: body required: false type: object consistencygroup_id: description: | The UUID of the consistency group. in: body required: true type: string consistencygroup_id_1: description: | The UUID of the consistency group. in: body required: false type: string consumer: description: | The consumer type. in: body required: false type: string consumer_1: description: | The consumer type. in: body required: true type: string container: description: | The container name or null. in: body required: false type: string control_location: description: | Notional service where encryption is performed. Valid values are "front-end" or "back-end". The default value is "front-end". in: body required: false type: string cores: description: | The number of instance cores that are allowed for each project. in: body required: true type: integer created_at: description: | The date and time when the resource was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. in: body required: true type: string created_at_1: description: | Date and time when the volume was created. in: body required: true type: string data_timestamp: description: | The time when the data on the volume was first saved. If it is a backup from volume, it will be the same as ``created_at`` for a backup. If it is a backup from a snapshot, it will be the same as ``created_at`` for the snapshot. in: body required: true type: string deleted: description: | The resource is deleted or not. in: body required: true type: boolean deleted_at: description: | The date and time when the resource was deleted. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. If the ``deleted_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string description: description: | The backup description or null. in: body required: false type: string description_1: description: | The consistency group snapshot description. in: body required: true type: string description_10: description: | The capabilities description. in: body required: true type: string description_11: description: | The consistency group description. in: body required: false type: string description_2: description: | The description of the consistency group. in: body required: false type: string description_3: description: | The description of the consistency group. in: body required: true type: string description_4: description: | A description for the snapshot. Default is ``None``. in: body required: false type: string description_5: description: | The volume description. in: body required: false type: string description_6: description: | The consistency group description. in: body required: true type: string description_7: description: | The extension description. in: body required: true type: string description_8: description: | A description for the snapshot. in: body required: true type: string description_9: description: | The volume description. in: body required: true type: string display_name: description: | The name of volume backend capabilities. in: body required: true type: string driver_version: description: | The driver version. in: body required: true type: string encrypted: description: | If true, this volume is encrypted. in: body required: true type: boolean encryption: description: | The encryption information. in: body required: true type: object encryption_id_body: description: | The UUID of the encryption. in: body required: true type: string extra_specs: description: | A set of key and value pairs that contains the specifications for a volume type. in: body required: true type: object fail_reason: description: | If the backup failed, the reason for the failure. Otherwise, null. in: body required: true type: string fixed_ips: description: | The number of fixed IP addresses that are allowed for each project. Must be equal to or greater than the number of allowed instances. in: body required: true type: integer floating_ips: description: | The number of floating IP addresses that are allowed for each project. in: body required: true type: integer force: description: | Indicates whether to backup, even if the volume is attached. Default is ``false``. in: body required: false type: boolean force_1: description: | Indicates whether to snapshot, even if the volume is attached. Default is ``false``. in: body required: false type: boolean force_2: description: | If set to ``true``, forces deletion of a consistency group that has a registered volume. in: body required: false type: boolean free_capacity: description: | The amount of free capacity for the back-end volume, in GBs. A valid value is a string, such as ``unknown``, or a number (integer or floating point). in: body required: true type: string gigabytes: description: | The size (GB) of volumes and snapshots that are allowed for each project. in: body required: true type: integer gigabytes_for_type: description: | The size (GB) of volumes and snapshots that are allowed for each project and the specified volume type. in: body required: true type: integer groups_number: description: | The number of groups that are allowed for each project. in: body required: true type: integer has_dependent_backups: description: | If this value is ``true``, there are other backups depending on this backup. in: body required: false type: boolean host: description: | The OpenStack Block Storage host where the existing volume resides. in: body required: true type: string host_name: description: | The name of the attaching host. in: body required: false type: string host_name_1: description: | The name of the host that hosts the storage backend, may take the format of ``host@backend``. in: body required: true type: string host_service: description: | The name of the service which is running on the host. in: body required: true type: string host_service_status: description: | The status of the service. One of ``available`` or ``unavailable``. in: body required: true type: string id: description: | The UUID of the volume transfer. in: body required: true type: string id_1: description: | The UUID of the backup. in: body required: true type: string id_2: description: | The UUID of the consistency group snapshot. in: body required: true type: string id_3: description: | The generated ID for the QoS specification. in: body required: true type: string id_4: description: | The snapshot UUID. in: body required: true type: string id_5: description: | The UUID of the volume. in: body required: true type: string id_6: description: | The UUID of the consistency group. in: body required: true type: string id_7: description: | The ID for the quota set. in: body required: true type: integer imageRef: description: | The UUID of the image from which you want to create the volume. Required to create a bootable volume. in: body required: false type: string in_use: description: | The in use data size. Visible only if you set the ``usage=true`` query parameter. in: body required: false type: string incremental: description: | The backup mode. A valid value is ``true`` for incremental backup mode or ``false`` for full backup mode. Default is ``false``. in: body required: false type: boolean injected_file_content_bytes: description: | The number of bytes of content that are allowed for each injected file. in: body required: true type: integer injected_file_path_bytes: description: | The number of bytes that are allowed for each injected file path. in: body required: true type: integer injected_files: description: | The number of injected files that are allowed for each project. in: body required: true type: integer instance_uuid: description: | The UUID of the attaching instance. in: body required: false type: string instances: description: | The number of instances that are allowed for each project. in: body required: true type: integer is_incremental: description: | Indicates whether the backup mode is incremental. If this value is ``true``, the backup mode is incremental. If this value is ``false``, the backup mode is full. in: body required: false type: boolean is_public: description: Volume type which is accessible to the public. in: body required: false type: boolean key: description: | The metadata key name for the metadata that you want to remove. in: body required: true type: string key_pairs: description: | The number of key pairs that are allowed for each user. in: body required: true type: integer key_size: description: | Size of encryption key, in bits. For example, 128 or 256. The default value is None. in: body required: false type: integer keys: description: | List of Keys. in: body required: true type: array limits: description: | A list of ``limit`` objects. in: body required: true type: object links: description: | Links for the volume transfer. in: body required: true type: array links_1: description: | Links for the backup. in: body required: true type: array links_2: description: | The QoS specification links. in: body required: true type: array links_3: description: | The volume links. in: body required: true type: array links_4: description: | List of links related to the extension. in: body required: true type: array links_5: description: | List of links related to the extension. in: body required: true type: array links_vol_optional: description: | The volume links. in: body required: false type: array location: description: | Full URL to a service or server. format: uri in: body required: true type: string maxTotalBackupGigabytes: description: | The maximum total amount of backups, in gibibytes (GiB). in: body required: true type: integer maxTotalBackups: description: | The maximum number of backups. in: body required: true type: integer maxTotalGroups: description: | The maximum number of groups. in: body required: true type: integer maxTotalSnapshots: description: | The maximum number of snapshots. in: body required: true type: integer maxTotalSnapshotsOptional: description: | The maximum number of snapshots. in: body required: false type: integer maxTotalVolumeGigabytes: description: | The maximum total amount of volumes, in gibibytes (GiB). in: body required: true type: integer maxTotalVolumeGigabytesOptional: description: | The maximum total amount of volumes, in gibibytes (GiB). in: body required: true type: integer maxTotalVolumes: description: | The maximum number of volumes. in: body required: true type: integer maxTotalVolumesOptional: description: | The maximum number of volumes. in: body required: false type: integer meta: description: | The metadata key and value pair for the volume. in: body required: true type: object metadata: description: | One or more metadata key and value pairs for the snapshot, if any. in: body required: true type: object metadata_1: description: | A ``metadata`` object. Contains one or more metadata key and value pairs that are associated with the volume. in: body required: true type: object metadata_2: description: | One or more metadata key and value pairs that are associated with the volume. in: body required: false type: object metadata_3: description: | One or more metadata key and value pairs that are associated with the volume. in: body required: true type: object metadata_4: description: | One or more metadata key and value pairs to associate with the volume. in: body required: false type: string metadata_5: description: | The image metadata to add to the volume as a set of metadata key and value pairs. in: body required: true type: object metadata_6: description: | One or more metadata key and value pairs to associate with the volume. in: body required: false type: object metadata_7: description: | One or more metadata key and value pairs for the snapshot. in: body required: false type: object metadata_8: description: | The image metadata key value pairs. in: body required: true type: object metadata_items: description: | The number of metadata items that are allowed for each instance. in: body required: true type: integer migrate_force_host_copy: description: | If false (the default), rely on the volume backend driver to perform the migration, which might be optimized. If true, or the volume driver fails to migrate the volume itself, a generic host-based migration is performed. in: body required: false type: boolean migrate_host: description: | The target host for the volume migration. Host format is ``host@backend``. in: body required: true type: string migrate_lock_volume: description: | If true, migrating an ``available`` volume will change its status to ``maintenance`` preventing other operations from being performed on the volume such as attach, detach, retype, etc. in: body required: false type: boolean migration_completion_error: description: | Used to indicate if an error has occured elsewhere that requires clean up. in: body required: false type: boolean # NOTE(mriedem): We can update the migration_policy retype note about encrypted # in-use volumes not being supported once # https://bugzilla.redhat.com/show_bug.cgi?id=760547 is fixed. migration_policy: description: | Specify if the volume should be migrated when it is re-typed. Possible values are ``on-demand`` or ``never``. If not specified, the default is ``never``. .. note:: If the volume is attached to a server instance and will be migrated, then by default policy only users with the administrative role should attempt the retype operation. A retype which involves a migration to a new host for an *in-use* encrypted volume is not supported. in: body required: false type: string migration_status: description: | The volume migration status. Admin only. in: body required: false type: string mountpoint: description: | The attaching mount point. in: body required: true type: string multiattach_resp: description: | If true, this volume can attach to more than one instance. in: body required: true type: boolean name: description: | The name of the Volume Transfer. in: body required: true type: string name_1: description: | The backup name. in: body required: true type: string name_10: description: | The name of the extension. For example, "Fox In Socks." in: body required: true type: string name_11: description: | The name of the back-end volume. in: body required: true type: string name_12: description: | The name of the snapshot. in: body required: true type: string name_13: description: | The volume name. in: body required: true type: string name_14: description: | The name of the volume to which you want to restore a backup. in: body required: false type: string name_15: description: | The consistency group name. in: body required: false type: string name_16: description: | The name of the backend pool. in: body required: true type: string name_2: description: | The consistency group snapshot name. in: body required: true type: string name_3: description: | The name of the consistency group. in: body required: true type: string name_4: description: | The name of the QoS specification. in: body required: true type: string name_5: description: | The name of the snapshot. Default is ``None``. in: body required: false type: string name_6: description: | The volume transfer name. in: body required: false type: string name_7: description: | The name of the volume type. in: body required: true type: string name_9: description: | The consistency group name. in: body required: true type: string namespace: description: | Link associated to the extension. in: body required: true type: string namespace_1: description: | The storage namespace, such as ``OS::Storage::Capabilities::foo``. in: body required: true type: string new_size: description: | The new size of the volume, in gibibytes (GiB). in: body required: true type: integer new_type: description: | The new volume type that volume is changed with. in: body required: true type: string new_volume: description: | The UUID of the new volume. in: body required: true type: string object_count: description: | The number of objects in the backup. in: body required: true type: integer os-attach: description: | The ``os-attach`` action. in: body required: true type: object os-detach: description: | The ``os-detach`` action. in: body required: true type: object os-ext-snap-attr:progress: description: | A percentage value for the build progress. in: body required: true type: string os-ext-snap-attr:project_id: description: | The UUID of the owning project. in: body required: true type: string os-extend: description: | The ``os-extend`` action. in: body required: true type: object os-force_delete: description: | The ``os-force_delete`` action. in: body required: true type: string os-force_detach: description: | The ``os-force_detach`` action. in: body required: true type: object os-migrate_volume: description: | The ``os-migrate_volume`` action. in: body required: true type: object os-migrate_volume_completion: description: | The ``os-migrate_volume_completion`` action. in: body required: true type: object os-reset_status: description: | The ``os-reset_status`` action. in: body required: true type: object os-retype: description: | The ``os-retype`` action. in: body required: true type: object OS-SCH-HNT:scheduler_hints: description: | The dictionary of data to send to the scheduler. in: body required: false type: object os-set_bootable: description: | The ``os-set_bootable`` action. in: body required: true type: object os-set_image_metadata: description: | The ``os-set_image_metadata`` action. in: body required: true type: object os-show_image_metadata: description: | The ``os-show_image_metadata`` action. in: body require: true type: object os-unmanage: description: | The ``os-unmanage`` action. This action removes the specified volume from Cinder management. in: body required: true type: object os-unset_image_metadata: description: | The ``os-unset_image_metadata`` action. This action removes the key-value pairs from the image metadata. in: body required: true type: object os-vol-host-attr:host: description: | Current back-end of the volume. Host format is ``host@backend#pool``. in: body required: false type: string os-vol-mig-status-attr:migstat: description: | The status of this volume migration (None means that a migration is not currently in progress). in: body required: false type: string os-vol-mig-status-attr:name_id: description: | The volume ID that this volume name on the back- end is based on. in: body required: false type: string os-vol-tenant-attr:tenant_id: description: | The project ID which the volume belongs to. in: body required: true type: string per_volume_gigabytes: description: | The size (GB) of volumes in request that are allowed for each volume. in: body required: true type: integer perVolumeGigabytes: description: | The maximum amount of storage per volume, in gibibytes (GiB). in: body required: true type: integer pool_name: description: | The name of the storage pool. in: body required: true type: string project: description: | The ID of the project. Volume Type access to be added to this project ID. in: body required: true type: string project_id: description: | The UUID of the project. in: body required: true type: string project_id_1: description: | The Project ID having access to this volume type. in: body required: true type: string project_id_2: description: | The UUID of the project which the host resource belongs to. In the summary resource, the value is ``(total)``. in: body required: true type: string properties: description: | The backend volume capabilities list, which is consisted of cinder standard capabilities and vendor unique properties. in: body required: true type: object provider: description: | The class that provides encryption support. in: body required: true type: string provider_optional: description: | The class that provides encryption support. in: body required: false type: string qos_specs: description: | A ``qos_specs`` object. in: body required: true type: object QoS_support: description: | The quality of service (QoS) support. in: body required: true type: boolean quota_class_id: description: The name of the quota class set. in: body required: true type: string quota_set: description: | A ``quota_set`` object. in: body required: true type: object ram: description: | The amount of instance RAM in megabytes that are allowed for each project. in: body required: true type: integer rate: description: | Rate-limit volume copy bandwidth, used to mitigate slow down of data access from the instances. in: body required: true type: array ref: description: | A reference to the existing volume. The internal structure of this reference depends on the volume driver implementation. For details about the required elements in the structure, see the documentation for the volume driver. in: body required: true type: string ref_1: description: | A reference to the existing volume. The internal structure of this reference is dependent on the implementation of the volume driver, see the specific driver's documentation for details of the required elements in the structure. in: body required: true type: object remove_volumes: description: | One or more volume UUIDs, separated by commas, to remove from the volume consistency group. in: body required: false type: string replication_status: description: | The volume replication status. in: body required: true type: string replication_targets: description: | A list of volume backends used to replicate volumes on this backend. in: body required: true type: list reserved: description: | Reserved volume size. Visible only if you set the ``usage=true`` query parameter. in: body required: false type: integer reserved_percentage: description: | The percentage of the total capacity that is reserved for the internal use by the back end. in: body required: true type: integer restore: description: | A ``restore`` object. in: body required: true type: object security_group_rules: description: | The number of rules that are allowed for each security group. in: body required: false type: integer security_groups: description: | The number of security groups that are allowed for each project. in: body required: true type: integer service_state: description: | The state of the service. One of ``enabled`` or ``disabled``. in: body required: true type: string service_status: description: | The status of the service. One of ``enabled`` or ``disabled``. in: body required: true type: string size: description: | The size of the volume, in gibibytes (GiB). in: body required: true type: integer snapshot: description: | A partial representation of a snapshot used in the creation process. in: body required: true type: string snapshot_id: description: | To create a volume from an existing snapshot, specify the UUID of the volume snapshot. The volume is created in same availability zone and with same size as the snapshot. in: body required: false type: string snapshot_id_2: description: | The UUID of the source volume snapshot. in: body required: false type: string snapshots_number: description: | The number of snapshots that are allowed for each project. in: body required: true type: integer snapshots_number_for_type: description: | The number of snapshots that are allowed for each project and the specified volume type. in: body required: true type: integer source_cgid: description: | The UUID of the source consistency group. in: body required: false type: string source_volid: description: | The UUID of the source volume. The API creates a new volume with the same size as the source volume unless a larger size is requested. in: body required: false type: string specs: description: | A ``specs`` object. in: body required: true type: object status: description: | The ``status`` of the consistency group snapshot. in: body required: false type: string status_1: description: | The status of the consistency group. in: body required: true type: string status_2: description: | The status for the snapshot. in: body required: true type: string status_3: description: | The volume status. in: body required: true type: string status_4: description: | The backup status. Refer to Backup statuses table for the possible status value. in: body required: true type: string status_7: description: | The status for the backup. in: body required: true type: string storage_protocol: description: | The storage back end for the back-end volume. For example, ``iSCSI`` or ``FC``. in: body required: true type: string total_capacity: description: | The total capacity for the back-end volume, in GBs. A valid value is a string, such as ``unknown``, or a number (integer or floating point). in: body required: true type: string total_count: description: | Total number of volumes. in: body required: true type: integer total_count_str: description: | Total number of volumes. in: body required: true type: string totalBackupGigabytesUsed: description: | The total number of backups gibibytes (GiB) used. in: body required: true type: integer totalBackupsUsed: description: | The total number of backups used. in: body required: true type: integer totalGigabytesUsed: description: | The total number of gibibytes (GiB) used. in: body required: true type: integer totalGigabytesUsedStr: description: | The total number of gibibytes (GiB) used. in: body required: true type: string totalSnapshotsUsed: description: | The total number of snapshots used. in: body required: true type: integer totalSnapshotsUsedStr: description: | The total number of snapshots used. in: body required: true type: string totalVolumesUsed: description: | The total number of volumes used. in: body required: true type: integer transfer: description: | The volume transfer object. in: body required: true type: object updated: description: | The date and time stamp when the extension was last updated. in: body required: true type: string updated_1: description: | The date and time stamp when the API request was issued. in: body required: true type: string updated_at: description: | The date and time when the resource was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. If the ``updated_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string user_id: description: | The UUID of the user. in: body required: true type: string vendor_name: description: | The name of the vendor. in: body required: true type: string visibility: description: | The volume type access. in: body required: true type: string volume: description: | A ``volume`` object. in: body required: true type: object volume_1: description: | A ``volume`` object. in: body required: true type: string volume_backend_name: description: | The name of the back-end volume. in: body required: true type: string volume_id: description: | The UUID of the volume. in: body required: true type: string volume_id_2: description: | The UUID of the volume that you want to back up. in: body required: true type: string volume_id_3: description: | To create a snapshot from an existing volume, specify the UUID of the existing volume. in: body required: true type: string volume_id_4: description: | The UUID of the volume from which the backup was created. in: body required: true type: string volume_id_5: description: | If the snapshot was created from a volume, the volume ID. in: body required: true type: string volume_id_6: description: | The UUID of the volume to which you want to restore a backup. in: body required: false type: string volume_image_metadata: description: | List of image metadata entries. Only included for volumes that were created from an image, or from a snapshot of a volume originally created from an image. in: body required: false type: object volume_name: description: | The volume name. in: body required: true type: string volume_name_optional: description: | The volume name. in: body required: false type: string volume_type_1: description: | A ``volume_type`` object. in: body required: true type: object volume_type_2: description: | The volume type (either name or ID). To create an environment with multiple-storage back ends, you must specify a volume type. Block Storage volume back ends are spawned as children to ``cinder- volume``, and they are keyed from a unique queue. They are named ``cinder- volume.HOST.BACKEND``. For example, ``cinder- volume.ubuntu.lvmdriver``. When a volume is created, the scheduler chooses an appropriate back end to handle the request based on the volume type. Default is ``None``. For information about how to use volume types to create multiple- storage back ends, see `Configure multiple-storage back ends `_. in: body required: false type: string volume_type_3: description: | The volume type. In an environment with multiple- storage back ends, the scheduler determines where to send the volume based on the volume type. For information about how to use volume types to create multiple- storage back ends, see `Configure multiple-storage back ends `_. in: body required: true type: string volume_type_4: description: | The associated volume type. in: body required: false type: string volume_type_5: description: | A list of ``volume_type`` objects. in: body required: true type: array volume_type_id_body: description: | The UUID of the volume type. in: body required: true type: string volume_types: description: | The list of volume types. In an environment with multiple-storage back ends, the scheduler determines where to send the volume based on the volume type. For information about how to use volume types to create multiple- storage back ends, see `Configure multiple-storage back ends `_. in: body required: true type: array volume_types_2: description: | The list of volume types separated by commas. In an environment with multiple-storage back ends, the scheduler determines where to send the volume based on the volume type. For information about how to use volume types to create multiple-storage back ends, see `Configure multiple-storage back ends `_. in: body required: true type: string volumes: description: | A list of ``volume`` objects. in: body required: true type: array volumes_number: description: | The number of volumes that are allowed for each project. in: body required: true type: integer volumes_number_for_type: description: | The number of volumes that are allowed for each project and the specified volume type. in: body required: true type: integer ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/qos-specs-v2-qos-specs.inc0000664000175000017500000001350600000000000023116 0ustar00zuulzuul00000000000000.. -*- rst -*- Quality of service (QoS) specifications (qos-specs) =================================================== Administrators only. Creates, lists, shows details for, associates, disassociates, sets keys, unsets keys, and deletes quality of service (QoS) specifications. Disassociate QoS specification from all associations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/qos-specs/{qos_id}/disassociate_all Disassociates a QoS specification from all associations. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id Unset keys in QoS specification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/qos-specs/{qos_id}/delete_keys Unsets keys in a QoS specification. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - keys: keys - project_id: project_id_path - qos_id: qos_id Request Example --------------- .. literalinclude:: ./samples/qos-unset-request.json :language: javascript Response Example ---------------- There is no body content for the response of a successful PUT operation. Get all associations for QoS specification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/qos-specs/{qos_id}/associations Lists all associations for a QoS specification. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id Response Example ---------------- .. literalinclude:: ./samples/qos-show-response.json :language: javascript Associate QoS specification with volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/qos-specs/{qos_id}/associate Associates a QoS specification with a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id - vol_type_id: vol_type_id Disassociate QoS specification from volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/qos-specs/{qos_id}/disassociate Disassociates a QoS specification from a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id - vol_type_id: vol_type_id Show QoS specification details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/qos-specs/{qos_id} Shows details for a QoS specification. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - name: name_4 - links: links_2 - id: id_3 - qos_specs: qos_specs - consumer: consumer - specs: specs Response Example ---------------- .. literalinclude:: ./samples/qos-show-response.json :language: javascript Set keys in QoS specification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/qos-specs/{qos_id} Sets keys in a QoS specification. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - qos_specs: qos_specs - specs: specs - project_id: project_id_path - qos_id: qos_id Request Example --------------- .. literalinclude:: ./samples/qos-update-request.json :language: javascript Response Example ---------------- .. literalinclude:: ./samples/qos-update-response.json :language: javascript Delete QoS specification ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/{project_id}/qos-specs/{qos_id} Deletes a QoS specification. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id - force: force Create QoS specification ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/qos-specs Creates a QoS specification. Specify one or more key and value pairs in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - qos_specs: qos_specs - consumer: consumer - name: name_4 - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/qos-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - name: name_4 - links: links_2 - id: id_3 - qos_specs: qos_specs - consumer: consumer - specs: specs List QoS specs ~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/qos-specs Lists quality of service (QoS) specifications. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - sort_key: sort_key - sort_dir: sort_dir - limit: limit - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - specs: specs - qos_specs: qos_specs - consumer: consumer - id: id_3 - name: name_4 Response Example ---------------- .. literalinclude:: ./samples/qos-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/quota-classes.inc0000664000175000017500000000474500000000000021532 0ustar00zuulzuul00000000000000.. -*- rst -*- Quota class set extension (os-quota-class-sets) =============================================== Administrators only, depending on policy settings. Shows and updates quota classes for a project. Show quota classes ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{admin_project_id}/os-quota-class-sets/{quota_class_name} Shows quota class set for a project. If no specific value for the quota class resource exists, then the default value will be reported. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - quota_class_name: quota_class_name - admin_project_id: admin_project_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backup_gigabytes: maxTotalBackupGigabytes - backups: maxTotalBackups - gigabytes: maxTotalVolumeGigabytes - groups: maxTotalGroups - per_volume_gigabytes: perVolumeGigabytes - snapshots: maxTotalSnapshots - volumes: maxTotalVolumes - id: quota_class_id Response Example ---------------- .. literalinclude:: ./samples/quota-classes-show-response.json :language: javascript Update quota classes ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{admin_project_id}/os-quota-class-sets/{quota_class_name} Updates quota class set for a project. If the ``quota_class_name`` key does not exist, then the API will create one. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - admin_project_id: admin_project_id - quota_class_name: quota_class_name - gigabytes: maxTotalVolumeGigabytesOptional - snapshots: maxTotalSnapshotsOptional - volumes: maxTotalVolumesOptional - volume-type: volume_type Request Example --------------- .. literalinclude:: ./samples/quota-classes-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backup_gigabytes: maxTotalBackupGigabytes - backups: maxTotalBackups - gigabytes: maxTotalVolumeGigabytes - groups: maxTotalGroups - per_volume_gigabytes: perVolumeGigabytes - snapshots: maxTotalSnapshots - volumes: maxTotalVolumes Response Example ---------------- .. literalinclude:: ./samples/quota-classes-update-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/quota-sets.inc0000664000175000017500000001016700000000000021046 0ustar00zuulzuul00000000000000.. -*- rst -*- Quota sets extension (os-quota-sets) ==================================== Administrators only, depending on policy settings. Shows, updates, and deletes quotas for a project. Show quotas ~~~~~~~~~~~ .. rest_method:: GET /v2/{admin_project_id}/os-quota-sets/{project_id} Shows quotas for a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: quotas_project_id - admin_project_id: admin_project_id - usage: usage Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - id: project_id - volumes: volumes_number - volumes_{volume_type}: volumes_number_for_type - snapshots: snapshots_number - snapshots_{volume_type}: snapshots_number_for_type - backups: backups_number - groups: groups_number - per_volume_gigabytes: per_volume_gigabytes - gigabytes: gigabytes - gigabytes_{volume_type}: gigabytes_for_type - backup_gigabytes: backup_gigabytes Response Example ---------------- .. literalinclude:: ./samples/quotas-show-response.json :language: javascript Update quotas ~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{admin_project_id}/os-quota-sets/{project_id} Updates quotas for a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - admin_project_id: admin_project_id - project_id: quotas_project_id - quota_set: quota_set - volumes: volumes_number - volumes_{volume_type}: volumes_number_for_type - snapshots: snapshots_number - snapshots_{volume_type}: snapshots_number_for_type - backups: backups_number - groups: groups_number - per_volume_gigabytes: per_volume_gigabytes - gigabytes: gigabytes - gigabytes_{volume_type}: gigabytes_for_type - backup_gigabytes: backup_gigabytes Request Example --------------- .. literalinclude:: ./samples/quotas-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - volumes: volumes_number - volumes_{volume_type}: volumes_number_for_type - snapshots: snapshots_number - snapshots_{volume_type}: snapshots_number_for_type - backups: backups_number - groups: groups_number - per_volume_gigabytes: per_volume_gigabytes - gigabytes: gigabytes - gigabytes_{volume_type}: gigabytes_for_type - backup_gigabytes: backup_gigabytes Response Example ---------------- .. literalinclude:: ./samples/quotas-update-response.json :language: javascript Delete quotas ~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/{admin_project_id}/os-quota-sets/{project_id} Deletes quotas for a project so the quotas revert to default values. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: quotas_project_id - admin_project_id: admin_project_id Response Example ---------------- There is no body content for the response of a successful DELETE operation. Get default quotas ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{admin_project_id}/os-quota-sets/{project_id}/defaults Gets default quotas for a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: quotas_project_id - admin_project_id: admin_project_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - id: project_id - volumes: volumes_number - volumes_{volume_type}: volumes_number_for_type - snapshots: snapshots_number - snapshots_{volume_type}: snapshots_number_for_type - backups: backups_number - groups: groups_number - per_volume_gigabytes: per_volume_gigabytes - gigabytes: gigabytes - gigabytes_{volume_type}: gigabytes_for_type - backup_gigabytes: backup_gigabytes Response Example ---------------- .. literalinclude:: ./samples/quotas-show-defaults-response.json :language: javascript ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315576.939117 cinder-27.0.0/api-ref/source/v2/samples/0000775000175000017500000000000000000000000017705 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/availability-zone-list-response.json0000664000175000017500000000017100000000000027027 0ustar00zuulzuul00000000000000{ "availabilityZoneInfo": [{ "zoneState": { "available": true }, "zoneName": "nova" }] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backend-capabilities-response.json0000664000175000017500000000204200000000000026450 0ustar00zuulzuul00000000000000{ "namespace": "OS::Storage::Capabilities::fake", "vendor_name": "OpenStack", "volume_backend_name": "lvmdriver-1", "pool_name": "pool", "driver_version": "2.0.0", "storage_protocol": "iSCSI", "display_name": "Capabilities of Cinder LVM driver", "description": "These are volume type options provided by Cinder LVM driver, blah, blah.", "visibility": "public", "replication_targets": [], "properties": { "compression": { "title": "Compression", "description": "Enables compression.", "type": "boolean" }, "qos": { "title": "QoS", "description": "Enables QoS.", "type": "boolean" }, "replication": { "title": "Replication", "description": "Enables replication.", "type": "boolean" }, "thin_provisioning": { "title": "Thin Provisioning", "description": "Sets thin provisioning.", "type": "boolean" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backup-create-request.json0000664000175000017500000000031000000000000024766 0ustar00zuulzuul00000000000000{ "backup": { "container": null, "description": null, "name": "backup001", "volume_id": "64f5d2fb-d836-4063-b7e2-544d5c1ff607", "incremental": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backup-create-response.json0000664000175000017500000000100400000000000025135 0ustar00zuulzuul00000000000000{ "backup": { "id": "deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", "links": [ { "href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", "rel": "self" }, { "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", "rel": "bookmark" } ], "name": "backup001" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backup-force-delete-request.json0000664000175000017500000000003600000000000026066 0ustar00zuulzuul00000000000000{ "os-force_delete": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backup-record-export-response.json0000664000175000017500000000017300000000000026475 0ustar00zuulzuul00000000000000{ "backup-record": { "backup_service": "cinder.backup.drivers.swift", "backup_url": "eyJzdGF0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backup-record-import-request.json0000664000175000017500000000017300000000000026320 0ustar00zuulzuul00000000000000{ "backup-record": { "backup_service": "cinder.backup.drivers.swift", "backup_url": "eyJzdGF0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backup-record-import-response.json0000664000175000017500000000077500000000000026476 0ustar00zuulzuul00000000000000{ "backup": { "id": "deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", "links": [ { "href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", "rel": "self" }, { "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", "rel": "bookmark" } ], "name": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backup-reset-status-request.json0000664000175000017500000000010100000000000026164 0ustar00zuulzuul00000000000000{ "os-reset_status": { "status": "available" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backup-restore-request.json0000664000175000017500000000016100000000000025212 0ustar00zuulzuul00000000000000{ "restore": { "name": "vol-01", "volume_id": "64f5d2fb-d836-4063-b7e2-544d5c1ff607" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backup-restore-response.json0000664000175000017500000000022400000000000025360 0ustar00zuulzuul00000000000000{ "restore": { "backup_id": "2ef47aee-8844-490c-804d-2a8efe561c65", "volume_id": "795114e8-7489-40be-a978-83797f2c1dd3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backup-show-response.json0000664000175000017500000000161700000000000024664 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": "az1", "container": "volumebackups", "created_at": "2013-04-02T10:35:27.000000", "description": null, "fail_reason": null, "id": "2ef47aee-8844-490c-804d-2a8efe561c65", "links": [ { "href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", "rel": "self" }, { "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", "rel": "bookmark" } ], "name": "backup001", "object_count": 22, "size": 1, "status": "available", "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", "is_incremental": true, "has_dependent_backups": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backups-list-detailed-response.json0000664000175000017500000000374600000000000026620 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": "az1", "container": "volumebackups", "created_at": "2013-04-02T10:35:27.000000", "description": null, "fail_reason": null, "id": "2ef47aee-8844-490c-804d-2a8efe561c65", "links": [ { "href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", "rel": "self" }, { "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", "rel": "bookmark" } ], "name": "backup001", "object_count": 22, "size": 1, "status": "available", "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", "is_incremental": true, "has_dependent_backups": false }, { "availability_zone": "az1", "container": "volumebackups", "created_at": "2013-04-02T10:21:48.000000", "description": null, "fail_reason": null, "id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", "links": [ { "href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", "rel": "self" }, { "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", "rel": "bookmark" } ], "name": "backup002", "object_count": 22, "size": 1, "status": "available", "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", "is_incremental": true, "has_dependent_backups": false } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/backups-list-response.json0000664000175000017500000000217000000000000025035 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "2ef47aee-8844-490c-804d-2a8efe561c65", "links": [ { "href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", "rel": "self" }, { "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/2ef47aee-8844-490c-804d-2a8efe561c65", "rel": "bookmark" } ], "name": "backup001" }, { "id": "4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", "links": [ { "href": "http://localhost:8776/v2/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", "rel": "self" }, { "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/4dbf0ec2-0b57-4669-9823-9f7c76f2b4f8", "rel": "bookmark" } ], "name": "backup002" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/cgsnapshots-create-request.json0000664000175000017500000000051000000000000026057 0ustar00zuulzuul00000000000000{ "cgsnapshot": { "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814546", "name": "firstcg", "description": "first consistency group", "user_id": "6f519a48-3183-46cf-a32f-41815f814444", "project_id": "6f519a48-3183-46cf-a32f-41815f815555", "status": "creating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/cgsnapshots-create-response.json0000664000175000017500000000015600000000000026233 0ustar00zuulzuul00000000000000{ "cgsnapshot": { "id": "6f519a48-3183-46cf-a32f-41815f816666", "name": "firstcg" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/cgsnapshots-list-detailed-response.json0000664000175000017500000000125000000000000027510 0ustar00zuulzuul00000000000000{ "cgsnapshots": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444", "status": "available", "created_at": "2015-09-16T09:28:52.000000", "name": "my-cg1", "description": "my first consistency group" }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "consistencygroup_id": "aed36625-a6d7-4681-ba59-c7ba3d18dddd", "status": "error", "created_at": "2015-09-16T09:31:15.000000", "name": "my-cg2", "description": "Edited description" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/cgsnapshots-list-response.json0000664000175000017500000000036600000000000025746 0ustar00zuulzuul00000000000000{ "cgsnapshots": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "name": "my-cg1" }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "name": "my-cg2" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/cgsnapshots-show-response.json0000664000175000017500000000047400000000000025753 0ustar00zuulzuul00000000000000{ "cgsnapshot": { "id": "6f519a48-3183-46cf-a32f-41815f813986", "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444", "status": "available", "created_at": "2015-09-16T09:28:52.000000", "name": "my-cg1", "description": "my first consistency group" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/consistency-group-create-from-src-request.json0000664000175000017500000000062000000000000030746 0ustar00zuulzuul00000000000000{ "consistencygroup-from-src": { "name": "firstcg", "description": "first consistency group", "cgsnapshot_id": "6f519a48-3183-46cf-a32f-41815f813986", "source_cgid": "6f519a48-3183-46cf-a32f-41815f814546", "user_id": "6f519a48-3183-46cf-a32f-41815f815555", "project_id": "6f519a48-3183-46cf-a32f-41815f814444", "status": "creating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/consistency-group-create-request.json0000664000175000017500000000027300000000000027224 0ustar00zuulzuul00000000000000{ "consistencygroup": { "name": "firstcg", "description": "first consistency group", "volume_types": "type1,type2", "availability_zone": "az0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/consistency-group-create-response.json0000664000175000017500000000050500000000000027370 0ustar00zuulzuul00000000000000{ "consistencygroup": { "status": "error", "description": "first consistency group", "availability_zone": "az0", "created_at": "2016-08-19T19:32:19.000000", "volume_types": ["type1", "type2"], "id": "63d1a274-de38-4384-a97e-475306777027", "name": "firstcg" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/consistency-group-delete-request.json0000664000175000017500000000007300000000000027221 0ustar00zuulzuul00000000000000{ "consistencygroup": { "force": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/consistency-group-show-response.json0000664000175000017500000000053100000000000027104 0ustar00zuulzuul00000000000000{ "consistencygroup": { "id": "6f519a48-3183-46cf-a32f-41815f813986", "status": "available", "availability_zone": "az1", "created_at": "2015-09-16T09:28:52.000000", "name": "my-cg1", "description": "my first consistency group", "volume_types": [ "123456" ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/consistency-group-update-request.json0000664000175000017500000000033200000000000027237 0ustar00zuulzuul00000000000000{ "consistencygroup": { "name": "my_cg", "description": "My consistency group", "add_volumes": "volume-uuid-1,volume-uuid-2", "remove_volumes": "volume-uuid-8,volume-uuid-9" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/consistency-groups-list-detailed-response.json0000664000175000017500000000136400000000000031040 0ustar00zuulzuul00000000000000{ "consistencygroups": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "status": "available", "availability_zone": "az1", "created_at": "2015-09-16T09:28:52.000000", "name": "my-cg1", "description": "my first consistency group", "volume_types": [ "123456" ] }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "status": "error", "availability_zone": "az2", "created_at": "2015-09-16T09:31:15.000000", "name": "my-cg2", "description": "Edited description", "volume_types": [ "234567" ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/consistency-groups-list-response.json0000664000175000017500000000037400000000000027267 0ustar00zuulzuul00000000000000{ "consistencygroups": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "name": "my-cg1" }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "name": "my-cg2" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/encryption-type-create-request.json0000664000175000017500000000023600000000000026701 0ustar00zuulzuul00000000000000{ "encryption":{ "key_size": 256, "provider": "luks", "control_location":"front-end", "cipher": "aes-xts-plain64" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/encryption-type-create-response.json0000664000175000017500000000044300000000000027047 0ustar00zuulzuul00000000000000{ "encryption": { "volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577", "control_location": "front-end", "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74", "key_size": 256, "provider": "luks", "cipher": "aes-xts-plain64" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/encryption-type-show-response.json0000664000175000017500000000054700000000000026571 0ustar00zuulzuul00000000000000{ "volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577", "control_location": "front-end", "deleted": false, "created_at": "2016-12-28T02:32:25.000000", "updated_at": null, "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74", "key_size": 256, "provider": "luks", "deleted_at": null, "cipher": "aes-xts-plain64" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/encryption-type-update-request.json0000664000175000017500000000016700000000000026723 0ustar00zuulzuul00000000000000{ "encryption":{ "key_size": 64, "provider": "luks", "control_location":"back-end" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/encryption-type-update-response.json0000664000175000017500000000016700000000000027071 0ustar00zuulzuul00000000000000{ "encryption":{ "key_size": 64, "provider": "luks", "control_location":"back-end" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/extensions-list-response.json0000664000175000017500000002105300000000000025605 0ustar00zuulzuul00000000000000{ "extensions": [ { "updated": "2013-04-18T00:00:00+00:00", "name": "SchedulerHints", "links": [], "namespace": "https://docs.openstack.org/block-service/ext/scheduler-hints/api/v2", "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler." }, { "updated": "2011-06-29T00:00:00+00:00", "name": "Hosts", "links": [], "namespace": "https://docs.openstack.org/volume/ext/hosts/api/v1.1", "alias": "os-hosts", "description": "Admin-only host administration." }, { "updated": "2011-11-03T00:00:00+00:00", "name": "VolumeTenantAttribute", "links": [], "namespace": "https://docs.openstack.org/volume/ext/volume_tenant_attribute/api/v1", "alias": "os-vol-tenant-attr", "description": "Expose the internal project_id as an attribute of a volume." }, { "updated": "2011-08-08T00:00:00+00:00", "name": "Quotas", "links": [], "namespace": "https://docs.openstack.org/volume/ext/quotas-sets/api/v1.1", "alias": "os-quota-sets", "description": "Quota management support." }, { "updated": "2011-08-24T00:00:00+00:00", "name": "TypesManage", "links": [], "namespace": "https://docs.openstack.org/volume/ext/types-manage/api/v1", "alias": "os-types-manage", "description": "Types manage support." }, { "updated": "2013-07-10T00:00:00+00:00", "name": "VolumeEncryptionMetadata", "links": [], "namespace": "https://docs.openstack.org/volume/ext/os-volume-encryption-metadata/api/v1", "alias": "os-volume-encryption-metadata", "description": "Volume encryption metadata retrieval support." }, { "updated": "2012-12-12T00:00:00+00:00", "name": "Backups", "links": [], "namespace": "https://docs.openstack.org/volume/ext/backups/api/v1", "alias": "backups", "description": "Backups support." }, { "updated": "2013-07-16T00:00:00+00:00", "name": "SnapshotActions", "links": [], "namespace": "https://docs.openstack.org/volume/ext/snapshot-actions/api/v1.1", "alias": "os-snapshot-actions", "description": "Enable snapshot manager actions." }, { "updated": "2012-05-31T00:00:00+00:00", "name": "VolumeActions", "links": [], "namespace": "https://docs.openstack.org/volume/ext/volume-actions/api/v1.1", "alias": "os-volume-actions", "description": "Enable volume actions\n " }, { "updated": "2013-10-03T00:00:00+00:00", "name": "UsedLimits", "links": [], "namespace": "https://docs.openstack.org/volume/ext/used-limits/api/v1.1", "alias": "os-used-limits", "description": "Provide data on limited resources that are being used." }, { "updated": "2012-05-31T00:00:00+00:00", "name": "VolumeUnmanage", "links": [], "namespace": "https://docs.openstack.org/volume/ext/volume-unmanage/api/v1.1", "alias": "os-volume-unmanage", "description": "Enable volume unmanage operation." }, { "updated": "2011-11-03T00:00:00+00:00", "name": "VolumeHostAttribute", "links": [], "namespace": "https://docs.openstack.org/volume/ext/volume_host_attribute/api/v1", "alias": "os-vol-host-attr", "description": "Expose host as an attribute of a volume." }, { "updated": "2013-07-01T00:00:00+00:00", "name": "VolumeTypeEncryption", "links": [], "namespace": "https://docs.openstack.org/volume/ext/volume-type-encryption/api/v1", "alias": "encryption", "description": "Encryption support for volume types." }, { "updated": "2013-06-27T00:00:00+00:00", "name": "AvailabilityZones", "links": [], "namespace": "https://docs.openstack.org/volume/ext/os-availability-zone/api/v1", "alias": "os-availability-zone", "description": "Describe Availability Zones." }, { "updated": "2013-08-02T00:00:00+00:00", "name": "Qos_specs_manage", "links": [], "namespace": "https://docs.openstack.org/volume/ext/qos-specs/api/v1", "alias": "qos-specs", "description": "QoS specs support." }, { "updated": "2011-08-24T00:00:00+00:00", "name": "TypesExtraSpecs", "links": [], "namespace": "https://docs.openstack.org/volume/ext/types-extra-specs/api/v1", "alias": "os-types-extra-specs", "description": "Type extra specs support." }, { "updated": "2013-08-08T00:00:00+00:00", "name": "VolumeMigStatusAttribute", "links": [], "namespace": "https://docs.openstack.org/volume/ext/volume_mig_status_attribute/api/v1", "alias": "os-vol-mig-status-attr", "description": "Expose migration_status as an attribute of a volume." }, { "updated": "2012-08-13T00:00:00+00:00", "name": "CreateVolumeExtension", "links": [], "namespace": "https://docs.openstack.org/volume/ext/image-create/api/v1", "alias": "os-image-create", "description": "Allow creating a volume from an image in the Create Volume API." }, { "updated": "2014-01-10T00:00:00-00:00", "name": "ExtendedServices", "links": [], "namespace": "https://docs.openstack.org/volume/ext/extended_services/api/v2", "alias": "os-extended-services", "description": "Extended services support." }, { "updated": "2012-06-19T00:00:00+00:00", "name": "ExtendedSnapshotAttributes", "links": [], "namespace": "https://docs.openstack.org/volume/ext/extended_snapshot_attributes/api/v1", "alias": "os-extended-snapshot-attributes", "description": "Extended SnapshotAttributes support." }, { "updated": "2012-12-07T00:00:00+00:00", "name": "VolumeImageMetadata", "links": [], "namespace": "https://docs.openstack.org/volume/ext/volume_image_metadata/api/v1", "alias": "os-vol-image-meta", "description": "Show image metadata associated with the volume." }, { "updated": "2012-03-12T00:00:00+00:00", "name": "QuotaClasses", "links": [], "namespace": "https://docs.openstack.org/volume/ext/quota-classes-sets/api/v1.1", "alias": "os-quota-class-sets", "description": "Quota classes management support." }, { "updated": "2013-05-29T00:00:00+00:00", "name": "VolumeTransfer", "links": [], "namespace": "https://docs.openstack.org/volume/ext/volume-transfer/api/v1.1", "alias": "os-volume-transfer", "description": "Volume transfer management support." }, { "updated": "2014-02-10T00:00:00+00:00", "name": "VolumeManage", "links": [], "namespace": "https://docs.openstack.org/volume/ext/os-volume-manage/api/v1", "alias": "os-volume-manage", "description": "Allows existing backend storage to be 'managed' by Cinder." }, { "updated": "2012-08-25T00:00:00+00:00", "name": "AdminActions", "links": [], "namespace": "https://docs.openstack.org/volume/ext/admin-actions/api/v1.1", "alias": "os-admin-actions", "description": "Enable admin actions." }, { "updated": "2012-10-28T00:00:00-00:00", "name": "Services", "links": [], "namespace": "https://docs.openstack.org/volume/ext/services/api/v2", "alias": "os-services", "description": "Services support." } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/host-attach-request.json0000664000175000017500000000007400000000000024506 0ustar00zuulzuul00000000000000{ "os-attach": { "host_name": "my_host" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/hosts-get-response.json0000664000175000017500000000116600000000000024355 0ustar00zuulzuul00000000000000{ "host": [{ "resource": { "volume_count": "8", "total_volume_gb": "11", "total_snapshot_gb": "1", "project": "(total)", "host": "node1@rbd-sas", "snapshot_count": "1" } }, { "resource": { "volume_count": "8", "total_volume_gb": "11", "total_snapshot_gb": "1", "project": "f21a9c86d7114bf99c711f4874d80474", "host": "node1@rbd-sas", "snapshot_count": "1" } }] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/hosts-list-response.json0000664000175000017500000000130700000000000024546 0ustar00zuulzuul00000000000000{ "hosts": [{ "service-status": "available", "service": "cinder-backup", "zone": "nova", "service-state": "enabled", "host_name": "node1", "last-update": "2017-03-09T21:38:41.000000" }, { "service-status": "available", "service": "cinder-scheduler", "zone": "nova", "service-state": "enabled", "host_name": "node1", "last-update": "2017-03-09T21:38:38.000000" }, { "service-status": "available", "service": "cinder-volume", "zone": "nova", "service-state": "enabled", "host_name": "node1@lvm", "last-update": "2017-03-09T21:38:35.000000" }] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/image-metadata-show-request.json0000664000175000017500000000004500000000000026103 0ustar00zuulzuul00000000000000{ "os-show_image_metadata": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/image-metadata-show-response.json0000664000175000017500000000011600000000000026250 0ustar00zuulzuul00000000000000{ "metadata": { "key1": "value1", "key2": "value2" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/limits-show-response.json0000664000175000017500000000071500000000000024716 0ustar00zuulzuul00000000000000{ "limits": { "rate": [], "absolute": { "totalSnapshotsUsed": 0, "maxTotalBackups": 10, "maxTotalVolumeGigabytes": 1000, "maxTotalSnapshots": 10, "maxTotalBackupGigabytes": 1000, "totalBackupGigabytesUsed": 0, "maxTotalVolumes": 10, "totalVolumesUsed": 0, "totalBackupsUsed": 0, "totalGigabytesUsed": 0 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/pools-list-detailed-response.json0000664000175000017500000000162700000000000026320 0ustar00zuulzuul00000000000000{ "pools": [ { "name": "pool1", "capabilities": { "updated": "2014-10-28T00:00:00-00:00", "total_capacity_gb": 1024, "free_capacity_gb": 100, "volume_backend_name": "pool1", "reserved_percentage": 0, "driver_version": "1.0.0", "storage_protocol": "iSCSI", "QoS_support": false } }, { "name": "pool2", "capabilities": { "updated": "2014-10-28T00:00:00-00:00", "total_capacity_gb": 512, "free_capacity_gb": 200, "volume_backend_name": "pool2", "reserved_percentage": 0, "driver_version": "1.0.1", "storage_protocol": "iSER", "QoS_support": true } } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/qos-create-request.json0000664000175000017500000000020000000000000024321 0ustar00zuulzuul00000000000000{ "qos_specs": { "availability": "100", "name": "reliability-spec", "numberOfFailures": "0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/qos-create-response.json0000664000175000017500000000117100000000000024477 0ustar00zuulzuul00000000000000{ "qos_specs": { "specs": { "numberOfFailures": "0", "availability": "100" }, "consumer": "back-end", "name": "reliability-spec", "id": "599ef437-1c99-42ec-9fc6-239d0519fef1" }, "links": [ { "href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/qos_specs/599ef437-1c99-42ec-9fc6-239d0519fef1", "rel": "self" }, { "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/qos_specs/599ef437-1c99-42ec-9fc6-239d0519fef1", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/qos-list-response.json0000664000175000017500000000105500000000000024210 0ustar00zuulzuul00000000000000{ "qos_specs": [ { "specs": { "availability": "100", "numberOfFailures": "0" }, "consumer": "back-end", "name": "reliability-spec", "id": "0388d6c6-d5d4-42a3-b289-95205c50dd15" }, { "specs": { "delay": "0", "throughput": "100" }, "consumer": "back-end", "name": "performance-spec", "id": "ecfc6e2e-7117-44a4-8eec-f84d04f531a8" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/qos-show-response.json0000664000175000017500000000117100000000000024214 0ustar00zuulzuul00000000000000{ "qos_specs": { "specs": { "availability": "100", "numberOfFailures": "0" }, "consumer": "back-end", "name": "reliability-spec", "id": "0388d6c6-d5d4-42a3-b289-95205c50dd15" }, "links": [ { "href": "http://23.253.228.211:8776/v2/e1cf63117ae74309a5bcc2002a23be8b/qos_specs/0388d6c6-d5d4-42a3-b289-95205c50dd15", "rel": "self" }, { "href": "http://23.253.228.211:8776/e1cf63117ae74309a5bcc2002a23be8b/qos_specs/0388d6c6-d5d4-42a3-b289-95205c50dd15", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/qos-unset-request.json0000664000175000017500000000004700000000000024225 0ustar00zuulzuul00000000000000{ "keys": [ "key1" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/qos-update-request.json0000664000175000017500000000006200000000000024346 0ustar00zuulzuul00000000000000{ "qos_specs": { "delay": "1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/qos-update-response.json0000664000175000017500000000006200000000000024514 0ustar00zuulzuul00000000000000{ "qos_specs": { "delay": "1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/qos_show_response.json0000664000175000017500000000031000000000000024352 0ustar00zuulzuul00000000000000{ "qos_associations": [ { "association_type": "volume_type", "name": "reliability-type", "id": "a12983c2-83bd-4afa-be9f-ad796573ead6" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/quota-classes-show-response.json0000664000175000017500000000046500000000000026203 0ustar00zuulzuul00000000000000{ "quota_class_set": { "per_volume_gigabytes": -1, "volumes_lvmdriver-1": -1, "groups": 10, "gigabytes": 1000, "backup_gigabytes": 1000, "snapshots": 10, "gigabytes_lvmdriver-1": -1, "volumes": 10, "snapshots_lvmdriver-1": -1, "backups": 10, "id": "default" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/quota-classes-update-request.json0000664000175000017500000000015000000000000026326 0ustar00zuulzuul00000000000000{ "quota_class_set": { "volumes_lmv": 10, "gigabytes_lmv": 1000, "snapshots_lmv": 10 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/quota-classes-update-response.json0000664000175000017500000000044000000000000026476 0ustar00zuulzuul00000000000000{ "quota_class_set": { "per_volume_gigabytes": -1, "volumes_lvmdriver-1": -1, "groups": 10, "gigabytes": 1000, "backup_gigabytes": 1000, "snapshots": 10, "gigabytes_lvmdriver-1": -1, "volumes": 10, "snapshots_lvmdriver-1": -1, "backups": 10 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/quotas-show-defaults-response.json0000664000175000017500000000014400000000000026532 0ustar00zuulzuul00000000000000{ "quota_set": { "gigabytes": 5, "snapshots": 10, "volumes": 20 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/quotas-show-response.json0000664000175000017500000000014400000000000024725 0ustar00zuulzuul00000000000000{ "quota_set": { "gigabytes": 5, "snapshots": 10, "volumes": 20 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/quotas-update-request.json0000664000175000017500000000006500000000000025063 0ustar00zuulzuul00000000000000{ "quota_set": { "snapshots": 45 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/quotas-update-response.json0000664000175000017500000000006500000000000025231 0ustar00zuulzuul00000000000000{ "quota_set": { "snapshots": 45 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/quotas-user-show-detailed-response.json0000664000175000017500000000054700000000000027461 0ustar00zuulzuul00000000000000{ "quota_set": { "gigabytes": { "in_use": 100, "limit": -1, "reserved": 0 }, "snapshots": { "in_use": 12, "limit": -1, "reserved": 0 }, "volumes": { "in_use": 1, "limit": -1, "reserved": 0 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/quotas-user-show-response.json0000664000175000017500000000014400000000000025701 0ustar00zuulzuul00000000000000{ "quota_set": { "gigabytes": 5, "snapshots": 10, "volumes": 20 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-create-request.json0000664000175000017500000000026200000000000025366 0ustar00zuulzuul00000000000000{ "snapshot": { "name": "snap-001", "description": "Daily backup", "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", "force": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-create-response.json0000664000175000017500000000051600000000000025536 0ustar00zuulzuul00000000000000{ "snapshot": { "status": "creating", "description": "Daily backup", "created_at": "2013-02-25T03:56:53.081642", "metadata": {}, "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", "size": 1, "id": "ffa9bc5e-1172-4021-acaf-cdcd78a9584d", "name": "snap-001" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-metadata-create-request.json0000664000175000017500000000006000000000000027140 0ustar00zuulzuul00000000000000{ "metadata": { "key": "v2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-metadata-create-response.json0000664000175000017500000000006000000000000027306 0ustar00zuulzuul00000000000000{ "metadata": { "key": "v2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-metadata-show-response.json0000664000175000017500000000006300000000000027026 0ustar00zuulzuul00000000000000{ "metadata": { "name": "test" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-metadata-update-request.json0000664000175000017500000000006000000000000027157 0ustar00zuulzuul00000000000000{ "metadata": { "key": "v2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-metadata-update-response.json0000664000175000017500000000006000000000000027325 0ustar00zuulzuul00000000000000{ "metadata": { "key": "v2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-show-response.json0000664000175000017500000000102300000000000025245 0ustar00zuulzuul00000000000000{ "snapshot": { "status": "available", "os-extended-snapshot-attributes:progress": "100%", "description": "Daily backup", "created_at": "2013-02-25T04:13:17.000000", "metadata": {}, "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", "os-extended-snapshot-attributes:project_id": "0c2eba2c5af04d3f9e9d0d410b371fde", "size": 1, "id": "2bb856e1-b3d8-4432-a858-09e4ce939389", "name": "snap-001", "updated_at": "2013-03-11T07:24:57Z" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-status-reset-request.json0000664000175000017500000000010100000000000026556 0ustar00zuulzuul00000000000000{ "os-reset_status": { "status": "available" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-update-request.json0000664000175000017500000000016000000000000025402 0ustar00zuulzuul00000000000000{ "snapshot": { "name": "snap-002", "description": "This is yet, another snapshot." } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshot-update-response.json0000664000175000017500000000054000000000000025552 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2013-02-20T08:11:34.000000", "description": "This is yet, another snapshot", "name": "snap-002", "id": "4b502fcb-1f26-45f8-9fe5-3b9a0a52eaf2", "size": 1, "status": "available", "metadata": {}, "volume_id": "2402b902-0b7a-458c-9c07-7435a826f794" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshots-list-detailed-response.json0000664000175000017500000000156100000000000027203 0ustar00zuulzuul00000000000000{ "snapshots": [ { "status": "available", "metadata": { "name": "test" }, "os-extended-snapshot-attributes:progress": "100%", "name": "test-volume-snapshot", "volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506", "os-extended-snapshot-attributes:project_id": "bab7d5c60cd041a0a36f7c4b6e1dd978", "created_at": "2015-11-29T02:25:51.000000", "size": 1, "id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c", "description": "volume snapshot", "updated_at": "2015-12-11T07:24:57Z" } ], "snapshots_links": [ { "href": "https://10.43.176.164:8776/v3/d55fb90e300b436cb2714a17137be023/snapshots?limit=1&marker=2e0cd28e-d7a2-4cdb-87e6-cd37c417c06d", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/snapshots-list-response.json0000664000175000017500000000132300000000000025426 0ustar00zuulzuul00000000000000{ "snapshots": [ { "status": "available", "metadata": { "name": "test" }, "name": "test-volume-snapshot", "volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506", "created_at": "2015-11-29T02:25:51.000000", "size": 1, "id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c", "description": "volume snapshot", "updated_at": "2015-12-11T07:24:57Z" } ], "snapshots_links": [ { "href": "https://10.43.176.164:8776/v3/d55fb90e300b436cb2714a17137be023/snapshots?limit=1&marker=2e0cd28e-d7a2-4cdb-87e6-cd37c417c06d", "rel": "next" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/user-quotas-show-response.json0000664000175000017500000000064300000000000025705 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "id": "fake_project", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/user-quotas-update-request.json0000664000175000017500000000011300000000000026031 0ustar00zuulzuul00000000000000{ "quota_set": { "force": true, "instances": 9 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/user-quotas-update-response.json0000664000175000017500000000060400000000000026204 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "floating_ips": 10, "fixed_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 9, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/version-show-response.json0000664000175000017500000000140000000000000025072 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "DEPRECATED", "updated": "2017-02-25T12:00:00Z", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=2" } ], "id": "v2.0", "links": [ { "href": "https://docs.openstack.org/", "type": "text/html", "rel": "describedby" }, { "href": "http://23.253.248.171:8776/v2/", "rel": "self" } ], "min_version": "", "version": "" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/versions-response.json0000664000175000017500000000275700000000000024317 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "SUPPORTED", "updated": "2014-06-28T12:20:21Z", "links": [ { "href": "https://docs.openstack.org/", "type": "text/html", "rel": "describedby" }, { "href": "http://10.0.2.15:8776/v2/", "rel": "self" } ], "min_version": "", "version": "", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=1" } ], "id": "v2.0" }, { "status": "CURRENT", "updated": "2016-02-08T12:20:21Z", "links": [ { "href": "https://docs.openstack.org/", "type": "text/html", "rel": "describedby" }, { "href": "http://10.0.2.15:8776/v3/", "rel": "self" } ], "min_version": "3.0", "version": "{Current_Max_Version}", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=1" } ], "id": "v3.0" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-attach-request.json0000664000175000017500000000017700000000000025044 0ustar00zuulzuul00000000000000{ "os-attach": { "instance_uuid": "95D9EF50-507D-11E5-B970-0800200C9A66", "mountpoint": "/dev/vdc" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-bootable-status-update-request.json0000664000175000017500000000007600000000000030166 0ustar00zuulzuul00000000000000{ "os-set_bootable": { "bootable": "True" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-create-request.json0000664000175000017500000000100300000000000025030 0ustar00zuulzuul00000000000000{ "volume": { "size": 10, "availability_zone": null, "source_volid": null, "description": null, "multiattach": false, "snapshot_id": null, "name": null, "imageRef": null, "volume_type": null, "metadata": {}, "consistencygroup_id": null }, "OS-SCH-HNT:scheduler_hints": { "same_host": [ "a0cf03a5-d921-4877-bb5c-86d26cf818e1", "8c19174f-4220-44f0-824a-cd1eeef10287" ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-create-response.json0000664000175000017500000000216200000000000025205 0ustar00zuulzuul00000000000000{ "volume": { "status": "creating", "migration_status": null, "user_id": "0eea4eabcf184061a3b6db1e0daaf010", "attachments": [], "links": [ { "href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", "rel": "self" }, { "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", "rel": "bookmark" } ], "availability_zone": "nova", "bootable": "false", "encrypted": false, "created_at": "2015-11-29T03:01:44.000000", "description": null, "updated_at": null, "volume_type": "lvmdriver-1", "name": "test-volume-attachments", "replication_status": "disabled", "consistencygroup_id": null, "source_volid": null, "snapshot_id": null, "multiattach": false, "metadata": {}, "id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38", "size": 2 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-detach-request.json0000664000175000017500000000013600000000000025023 0ustar00zuulzuul00000000000000{ "os-detach": { "attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-extend-request.json0000664000175000017500000000006300000000000025061 0ustar00zuulzuul00000000000000{ "os-extend": { "new_size": 3 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-force-delete-request.json0000664000175000017500000000003500000000000026127 0ustar00zuulzuul00000000000000{ "os-force_delete": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-force-detach-request.json0000664000175000017500000000027000000000000026116 0ustar00zuulzuul00000000000000{ "os-force_detach": { "attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1", "connector": { "initiator": "iqn.2012-07.org.fake:01" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-image-metadata-set-request.json0000664000175000017500000000042200000000000027222 0ustar00zuulzuul00000000000000{ "os-set_image_metadata": { "metadata": { "image_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", "image_name": "image", "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ramdisk_id": "somedisk" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-image-metadata-unset-request.json0000664000175000017500000000010700000000000027565 0ustar00zuulzuul00000000000000{ "os-unset_image_metadata": { "key": "ramdisk_id" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-image-metadata-update-response.json0000664000175000017500000000063600000000000030066 0ustar00zuulzuul00000000000000{ "metadata": { "kernel_id": "6ff710d2-942b-4d6b-9168-8c9cc2404ab1", "container_format": "bare", "min_ram": "0", "ramdisk_id": "somedisk", "disk_format": "qcow2", "image_name": "image", "image_id": "5137a025-3c5f-43c1-bc64-5f41270040a5", "checksum": "f8ab98ff5e73ebab884d80c9dc9c7290", "min_disk": "0", "size": "13267968" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-manage-request.json0000664000175000017500000000066300000000000025030 0ustar00zuulzuul00000000000000{ "volume": { "host": "geraint-VirtualBox", "ref": { "source-name": "existingLV", "source-id": "1234" }, "name": "New Volume", "availability_zone": "az2", "description": "Volume imported from existingLV", "volume_type": null, "bootable": true, "metadata": { "key1": "value1", "key2": "value2" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-manage-response.json0000664000175000017500000000212500000000000025171 0ustar00zuulzuul00000000000000{ "volume": { "status": "creating", "user_id": "eae1472b5fc5496998a3d06550929e7e", "attachments": [], "links": [ { "href": "http://10.0.2.15:8776/v2/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "rel": "self" }, { "href": "http://10.0.2.15:8776/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "rel": "bookmark" } ], "availability_zone": "az2", "bootable": "false", "encrypted": "false", "created_at": "2014-07-18T00:12:54.000000", "description": "Volume imported from existingLV", "os-vol-tenant-attr:tenant_id": "87c8522052ca4eed98bc672b4c1a3ddb", "volume_type": null, "name": "New Volume", "source_volid": null, "snapshot_id": null, "metadata": { "key2": "value2", "key1": "value1" }, "id": "23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "size": 0 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-metadata-create-request.json0000664000175000017500000000007000000000000026611 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-metadata-create-response.json0000664000175000017500000000007000000000000026757 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-metadata-show-key-response.json0000664000175000017500000000005700000000000027267 0ustar00zuulzuul00000000000000{ "meta": { "name": "test" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-metadata-show-response.json0000664000175000017500000000002700000000000026476 0ustar00zuulzuul00000000000000{ "metadata": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-metadata-update-key-request.json0000664000175000017500000000005300000000000027417 0ustar00zuulzuul00000000000000{ "meta": { "name": "new_name" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-metadata-update-key-response.json0000664000175000017500000000005300000000000027565 0ustar00zuulzuul00000000000000{ "meta": { "name": "new_name" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-metadata-update-request.json0000664000175000017500000000007000000000000026630 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-metadata-update-response.json0000664000175000017500000000007000000000000026776 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-os-migrate_volume-request.json0000664000175000017500000000010000000000000027220 0ustar00zuulzuul00000000000000{ "os-migrate_volume": { "host": "node1@lvm" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-os-migrate_volume_completion-request.json0000664000175000017500000000020500000000000031457 0ustar00zuulzuul00000000000000{ "os-migrate_volume_completion": { "new_volume": "2b955850-f177-45f7-9f49-ecb2c256d161", "error": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-os-retype-request.json0000664000175000017500000000016100000000000025520 0ustar00zuulzuul00000000000000{ "os-retype": { "new_type": "dedup-tier-replicaton", "migration_policy": "never" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-show-response.json0000664000175000017500000000201400000000000024716 0ustar00zuulzuul00000000000000{ "volume": { "status": "available", "attachments": [], "links": [ { "href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", "rel": "self" }, { "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", "rel": "bookmark" } ], "availability_zone": "nova", "bootable": "false", "os-vol-host-attr:host": "ip-10-168-107-25", "source_volid": null, "snapshot_id": null, "id": "5aa119a8-d25b-45a7-8d1b-88e127885635", "description": "Super volume.", "name": "vol-002", "created_at": "2013-02-25T02:40:21.000000", "volume_type": "None", "os-vol-tenant-attr:tenant_id": "0c2eba2c5af04d3f9e9d0d410b371fde", "size": 1, "metadata": { "contents": "not junk" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-status-reset-request.json0000664000175000017500000000021700000000000026236 0ustar00zuulzuul00000000000000{ "os-reset_status": { "status": "available", "attach_status": "detached", "migration_status": "migrating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-transfer-accept-request.json0000664000175000017500000000010100000000000026644 0ustar00zuulzuul00000000000000{ "accept": { "auth_key": "9266c59563c84664" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-transfer-accept-response.json0000664000175000017500000000072700000000000027030 0ustar00zuulzuul00000000000000{ "transfer": { "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", "name": "first volume transfer", "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", "links": [ { "href": "http://localhost/v2/firstproject/volumes/1", "rel": "self" }, { "href": "http://localhost/firstproject/volumes/1", "rel": "bookmark" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-transfer-create-request.json0000664000175000017500000000017000000000000026656 0ustar00zuulzuul00000000000000{ "transfer": { "volume_id": "c86b9af4-151d-4ead-b62c-5fb967af0e37", "name": "first volume" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-transfer-create-response.json0000664000175000017500000000105200000000000027024 0ustar00zuulzuul00000000000000{ "transfer": { "id": "1a7059f5-8ed7-45b7-8d05-2811e5d09f24", "created_at": "2015-02-25T03:56:53.081642", "name": "first volume", "volume_id": "c86b9af4-151d-4ead-b62c-5fb967af0e37", "auth_key": "9266c59563c84664", "links": [ { "href": "http://localhost/v2/firstproject/volumes/3", "rel": "self" }, { "href": "http://localhost/firstproject/volumes/3", "rel": "bookmark" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-transfer-show-response.json0000664000175000017500000000101300000000000026536 0ustar00zuulzuul00000000000000{ "transfer": { "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", "created_at": "2015-02-25T03:56:53.081642", "name": "first volume transfer", "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", "links": [ { "href": "http://localhost/v2/firstproject/volumes/1", "rel": "self" }, { "href": "http://localhost/firstproject/volumes/1", "rel": "bookmark" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-transfers-list-detailed-response.json0000664000175000017500000000222500000000000030473 0ustar00zuulzuul00000000000000{ "transfers": [ { "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", "created_at": "2015-02-25T03:56:53.081642", "name": "first volume transfer", "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", "links": [ { "href": "http://localhost/v2/firstproject/volumes/1", "rel": "self" }, { "href": "http://localhost/firstproject/volumes/1", "rel": "bookmark" } ] }, { "id": "f26c0dee-d20d-4e80-8dee-a8d91b9742a1", "created_at": "2015-03-25T03:56:53.081642", "name": "second volume transfer", "volume_id": "673db275-379f-41af-8371-e1652132b4c1", "links": [ { "href": "http://localhost/v2/firstproject/volumes/2", "rel": "self" }, { "href": "http://localhost/firstproject/volumes/2", "rel": "bookmark" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-transfers-list-response.json0000664000175000017500000000204500000000000026722 0ustar00zuulzuul00000000000000{ "transfers": [ { "id": "cac5c677-73a9-4288-bb9c-b2ebfb547377", "name": "first volume transfer", "volume_id": "894623a6-e901-4312-aa06-4275e6321cce", "links": [ { "href": "http://localhost/v2/firstproject/volumes/1", "rel": "self" }, { "href": "http://localhost/firstproject/volumes/1", "rel": "bookmark" } ] }, { "id": "f26c0dee-d20d-4e80-8dee-a8d91b9742a1", "name": "second volume transfer", "volume_id": "673db275-379f-41af-8371-e1652132b4c1", "links": [ { "href": "http://localhost/v2/firstproject/volumes/2", "rel": "self" }, { "href": "http://localhost/firstproject/volumes/2", "rel": "bookmark" } ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-type-access-add-request.json0000664000175000017500000000013200000000000026535 0ustar00zuulzuul00000000000000{ "addProjectAccess": { "project": "f270b245cb11498ca4031deb7e141cfa" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-type-access-delete-request.json0000664000175000017500000000013500000000000027252 0ustar00zuulzuul00000000000000{ "removeProjectAccess": { "project": "f270b245cb11498ca4031deb7e141cfa" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-type-access-list-response.json0000664000175000017500000000024100000000000027127 0ustar00zuulzuul00000000000000{ "volume_type_access": { "volume_type_id": "3c67e124-39ad-4ace-a507-8bb7bf510c26", "project_id": "f270b245cb11498ca4031deb7e141cfa" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-type-create-request.json0000664000175000017500000000034000000000000026012 0ustar00zuulzuul00000000000000{ "volume_type": { "name": "vol-type-001", "description": "volume type 0001", "os-volume-type-access:is_public": true, "extra_specs": { "capabilities": "gpu" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-type-show-request.json0000664000175000017500000000027200000000000025533 0ustar00zuulzuul00000000000000{ "volume_type": { "id": "289da7f8-6440-407c-9fb4-7db01ec49164", "name": "vol-type-001", "extra_specs": { "capabilities": "gpu" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-type-show-response.json0000664000175000017500000000040100000000000025673 0ustar00zuulzuul00000000000000{ "volume_type": { "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "name": "vol-type-001", "description": "volume type 001", "is_public": "true", "extra_specs": { "capabilities": "gpu" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-type-update-request.json0000664000175000017500000000031200000000000026030 0ustar00zuulzuul00000000000000{ "volume_type": { "name": "vol-type-001", "description": "volume type 0001", "is_public": true, "extra_specs": { "capabilities": "gpu" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-types-list-response.json0000664000175000017500000000054300000000000026060 0ustar00zuulzuul00000000000000{ "volume_types": [ { "extra_specs": { "capabilities": "gpu" }, "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "name": "SSD" }, { "extra_specs": {}, "id": "8eb69a46-df97-4e41-9586-9a40a7533803", "name": "SATA" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-unmanage-request.json0000664000175000017500000000003200000000000025361 0ustar00zuulzuul00000000000000{ "os-unmanage": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-update-request.json0000664000175000017500000000015300000000000025054 0ustar00zuulzuul00000000000000{ "volume": { "name": "vol-003", "description": "This is yet, another volume." } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volume-update-response.json0000664000175000017500000000223700000000000025227 0ustar00zuulzuul00000000000000{ "volume": { "status": "available", "migration_status": null, "user_id": "0eea4eabcf184061a3b6db1e0daaf010", "attachments": [], "links": [ { "href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", "rel": "self" }, { "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", "rel": "bookmark" } ], "availability_zone": "nova", "bootable": "false", "encrypted": false, "created_at": "2015-11-29T03:01:44.000000", "description": "This is yet, another volume.", "updated_at": null, "volume_type": "lvmdriver-1", "name": "vol-003", "replication_status": "disabled", "consistencygroup_id": null, "source_volid": null, "snapshot_id": null, "multiattach": false, "metadata": { "contents": "not junk" }, "id": "5aa119a8-d25b-45a7-8d1b-88e127885635", "size": 1 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volumes-list-detailed-response.json0000664000175000017500000001021700000000000026651 0ustar00zuulzuul00000000000000{ "volumes": [ { "migration_status": null, "attachments": [ { "server_id": "f4fda93b-06e0-4743-8117-bc8bcecd651b", "attachment_id": "3b4db356-253d-4fab-bfa0-e3626c0b8405", "host_name": null, "volume_id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38", "device": "/dev/vdb", "id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38" } ], "links": [ { "href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", "rel": "self" }, { "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/6edbc2f4-1507-44f8-ac0d-eed1d2608d38", "rel": "bookmark" } ], "availability_zone": "nova", "os-vol-host-attr:host": "difleming@lvmdriver-1#lvmdriver-1", "encrypted": false, "replication_status": "disabled", "snapshot_id": null, "id": "6edbc2f4-1507-44f8-ac0d-eed1d2608d38", "size": 2, "user_id": "32779452fcd34ae1a53a797ac8a1e064", "os-vol-tenant-attr:tenant_id": "bab7d5c60cd041a0a36f7c4b6e1dd978", "os-vol-mig-status-attr:migstat": null, "metadata": { "readonly": false, "attached_mode": "rw" }, "status": "in-use", "description": null, "multiattach": true, "source_volid": null, "consistencygroup_id": null, "os-vol-mig-status-attr:name_id": null, "name": "test-volume-attachments", "bootable": "false", "created_at": "2015-11-29T03:01:44.000000", "volume_type": "lvmdriver-1" }, { "migration_status": null, "attachments": [], "links": [ { "href": "http://23.253.248.171:8776/v2/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/173f7b48-c4c1-4e70-9acc-086b39073506", "rel": "self" }, { "href": "http://23.253.248.171:8776/bab7d5c60cd041a0a36f7c4b6e1dd978/volumes/173f7b48-c4c1-4e70-9acc-086b39073506", "rel": "bookmark" } ], "availability_zone": "nova", "os-vol-host-attr:host": "difleming@lvmdriver-1#lvmdriver-1", "encrypted": false, "replication_status": "disabled", "snapshot_id": null, "id": "173f7b48-c4c1-4e70-9acc-086b39073506", "size": 1, "user_id": "32779452fcd34ae1a53a797ac8a1e064", "os-vol-tenant-attr:tenant_id": "bab7d5c60cd041a0a36f7c4b6e1dd978", "os-vol-mig-status-attr:migstat": null, "metadata": {}, "status": "available", "volume_image_metadata": { "kernel_id": "8a55f5f1-78f7-4477-8168-977d8519342c", "checksum": "eb9139e4942121f22bbc2afc0400b2a4", "min_ram": "0", "ramdisk_id": "5f6bdf8a-92db-4988-865b-60bdd808d9ef", "disk_format": "ami", "image_name": "cirros-0.3.4-x86_64-uec", "image_id": "b48c53e1-9a96-4a5a-a630-2e74ec54ddcc", "container_format": "ami", "min_disk": "0", "size": "25165824" }, "description": "", "multiattach": false, "source_volid": null, "consistencygroup_id": null, "os-vol-mig-status-attr:name_id": null, "name": "test-volume", "bootable": "true", "created_at": "2015-11-29T02:25:18.000000", "volume_type": "lvmdriver-1" } ], "volumes_links": [{ "href": "https://158.69.65.111/volume/v2/4ad9f06ab8654e40befa59a2e7cac86d/volumes/detail?limit=1&marker=3b451d5d-9358-4a7e-a746-c6fd8b0e1462", "rel": "next" }] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/samples/volumes-list-response.json0000664000175000017500000000250100000000000025075 0ustar00zuulzuul00000000000000{ "volumes": [ { "id": "45baf976-c20a-4894-a7c3-c94b7376bf55", "links": [ { "href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/45baf976-c20a-4894-a7c3-c94b7376bf55", "rel": "self" }, { "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/45baf976-c20a-4894-a7c3-c94b7376bf55", "rel": "bookmark" } ], "name": "vol-004" }, { "id": "5aa119a8-d25b-45a7-8d1b-88e127885635", "links": [ { "href": "http://localhost:8776/v2/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", "rel": "self" }, { "href": "http://localhost:8776/0c2eba2c5af04d3f9e9d0d410b371fde/volumes/5aa119a8-d25b-45a7-8d1b-88e127885635", "rel": "bookmark" } ], "name": "vol-003" } ], "volumes_links": [{ "href": "https://158.69.65.111/volume/v2/4ad9f06ab8654e40befa59a2e7cac86d/volumes/detail?limit=1&marker=3b451d5d-9358-4a7e-a746-c6fd8b0e1462", "rel": "next" }] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/volume-manage.inc0000664000175000017500000000410700000000000021473 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume manage extension (os-volume-manage) ========================================== Creates volumes by using existing storage instead of allocating new storage. Manage existing volume ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/os-volume-manage Creates a Block Storage volume by using existing storage rather than allocating new storage. The caller must specify a reference to an existing storage volume in the ref parameter in the request. Although each storage driver might interpret this reference differently, the driver should accept a reference structure that contains either a source-id or source-name element, if possible. The API chooses the size of the volume by rounding up the size of the existing storage volume to the next gibibyte (GiB). Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - description: description - availability_zone: availability_zone - bootable: bootable - volume_type: volume_type - name: name - volume: volume - host: host - ref: ref - metadata: metadata - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/volume-manage-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - volume: volume - status: status_3 - migration_status: migration_status - user_id: user_id - attachments: attachments - links: links_3 - availability_zone: availability_zone - bootable: bootable_response - encrypted: encrypted - created_at: created_at - description: description_5 - updated_at: updated_at - volume_type: volume_type - name: name_13 - replication_status: replication_status - consistencygroup_id: consistencygroup_id - source_volid: source_volid - snapshot_id: snapshot_id - multiattach: multiattach_resp - metadata: metadata_1 - id: id_5 - size: size Response Example ---------------- .. literalinclude:: ./samples/volume-manage-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/volume-type-access.inc0000664000175000017500000000433100000000000022462 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume type access (volumes) ============================ Private volume type access to project. By default, volumes types are public. To create a private volume type, set the ``is_public`` boolean field to ``false`` at volume type creation time. To control access to a private volume type, user needs to add a project to or remove a project from the volume type. Private volume types without projects are only accessible by users with the administrative role and context. Add private volume type access ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/types/{volume_type}/action Adds private volume type access to a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project: project - project_id: project_id_path - volume_type: volume_type_access Request Example --------------- .. literalinclude:: ./samples/volume-type-access-add-request.json :language: javascript Remove private volume type access ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/types/{volume_type}/action Removes private volume type access from a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project: project - project_id: project_id_path - volume_type: volume_type_access Request Example --------------- .. literalinclude:: ./samples/volume-type-access-delete-request.json :language: javascript List private volume type access details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/types/{volume_type}/os-volume-type-access Lists project IDs that have access to private volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type: volume_type_access Response Parameters ------------------- .. rest_parameters:: parameters.yaml - project_id: project_id Response Example ---------------- .. literalinclude:: ./samples/volume-type-access-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/volumes-v2-extensions.inc0000664000175000017500000000132200000000000023146 0ustar00zuulzuul00000000000000.. -*- rst -*- API extensions (extensions) =========================== List API extensions ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/extensions Lists Block Storage API extensions. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - updated: updated_at - description: description - links: links - namespace: namespace - alias: alias - name: name Response Example ---------------- .. literalinclude:: ./samples/extensions-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/volumes-v2-snapshots-actions.inc0000664000175000017500000000133600000000000024434 0ustar00zuulzuul00000000000000.. -*- rst -*- Snapshot actions (snapshots, action) ==================================== Administrator only. Resets status for a snapshot. Reset snapshot's status ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/snapshots/{snapshot_id}/action Resets the status. Specify the ``os-reset_status`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - status: status_2 - os-reset_status: os-reset_status - project_id: project_id_path - snapshot_id: snapshot_id Request Example --------------- .. literalinclude:: ./samples/snapshot-status-reset-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/volumes-v2-snapshots.inc0000664000175000017500000002071400000000000022777 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume snapshots (snapshots) ============================ A snapshot is a point-in-time copy of the data that a volume contains. When you create, list, or delete snapshots, these status values are possible: **Snapshot statuses** +----------------+---------------------------------------------+ | Status | Description | +----------------+---------------------------------------------+ | creating | The snapshot is being created. | +----------------+---------------------------------------------+ | available | The snapshot is ready to use. | +----------------+---------------------------------------------+ | backing-up | The snapshot is being backed up. | +----------------+---------------------------------------------+ | deleting | The snapshot is being deleted. | +----------------+---------------------------------------------+ | error | A snapshot creation error occurred. | +----------------+---------------------------------------------+ | deleted | The snapshot has been deleted. | +----------------+---------------------------------------------+ | unmanaging | The snapshot is being unmanaged. | +----------------+---------------------------------------------+ | restoring | The snapshot is being restored to a volume. | +----------------+---------------------------------------------+ | error_deleting | A snapshot deletion error occurred. | +----------------+---------------------------------------------+ List snapshots with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/snapshots/detail Lists all Block Storage snapshots, with details, that the project can access. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_2 - os-extended-snapshot-attributes:progress: os-ext-snap-attr:progress - description: description - created_at: created_at - name: name - volume_id: volume_id_5 - os-extended-snapshot-attributes:project_id: os-ext-snap-attr:project_id - size: size - id: id_4 - metadata: metadata - updated_at: updated_at - snapshots_links: links_5 Response Example ---------------- .. literalinclude:: ./samples/snapshots-list-detailed-response.json :language: javascript Create snapshot ~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/snapshots Creates a volume snapshot, which is a point-in-time, complete copy of a volume. You can create a volume from a snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - snapshot: snapshot - volume_id: volume_id - force: force - description: description - name: name - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/snapshot-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_2 - description: description - created_at: created_at - name: name - snapshot: snapshot - volume_id: volume_id_5 - metadata: metadata - id: id_4 - size: size List snapshots -------------- .. rest_method:: GET /v2/{project_id}/snapshots Lists all Block Storage snapshots, with summary information, that the project can access. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_2 - description: description - created_at: created_at - name: name - volume_id: volume_id_5 - metadata: metadata - id: id_4 - size: size - updated_at: updated_at - snapshots_links: links_5 Response Example ---------------- .. literalinclude:: ./samples/snapshots-list-response.json :language: javascript Show snapshot metadata ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/snapshots/{snapshot_id}/metadata Shows metadata for a snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response Example ---------------- .. literalinclude:: ./samples/snapshot-metadata-show-response.json :language: javascript Create snapshot metadata ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/snapshots/{snapshot_id}/metadata Updates metadata for a snapshot. Creates or replaces metadata items that match keys. Does not modify items that are not in the request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - metadata: metadata - project_id: project_id_path - snapshot_id: snapshot_id Request Example --------------- .. literalinclude:: ./samples/snapshot-metadata-create-request.json :language: javascript Response Example ---------------- .. literalinclude:: ./samples/snapshot-metadata-create-response.json :language: javascript Update snapshot metadata ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/snapshots/{snapshot_id}/metadata Replaces all the snapshot's metadata with the key-value pairs in the request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - metadata: metadata - project_id: project_id_path - snapshot_id: snapshot_id Request Example --------------- .. literalinclude:: ./samples/snapshot-metadata-update-request.json :language: javascript Response Example ---------------- .. literalinclude:: ./samples/snapshot-metadata-update-response.json :language: javascript Show snapshot details ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/snapshots/{snapshot_id} Shows details for a snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_2 - os-extended-snapshot-attributes:progress: os-ext-snap-attr:progress - description: description - created_at: created_at - name: name - snapshot: snapshot - volume_id: volume_id_5 - os-extended-snapshot-attributes:project_id: os-ext-snap-attr:project_id - size: size - id: id_4 - metadata: metadata - updated_at: updated_at Response Example ---------------- .. literalinclude:: ./samples/snapshot-show-response.json :language: javascript Update snapshot ~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/snapshots/{snapshot_id} Updates a snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - snapshot: snapshot - description: description - name: name - project_id: project_id_path - snapshot_id: snapshot_id Request Example --------------- .. literalinclude:: ./samples/snapshot-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_2 - description: description - created_at: created_at - name: name - snapshot: snapshot - volume_id: volume_id_5 - metadata: metadata - id: id_4 - size: size Response Example ---------------- .. literalinclude:: ./samples/snapshot-update-response.json :language: javascript Delete snapshot ~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/{project_id}/snapshots/{snapshot_id} Deletes a snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/volumes-v2-types.inc0000664000175000017500000002225100000000000022117 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume types (types) ==================== Update volume type ~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/types/{volume_type_id} Updates a volume type. To create an environment with multiple-storage back ends, you must specify a volume type. The API spawns Block Storage volume back ends as children to ``cinder-volume``, and keys them from a unique queue. The API names the back ends ``cinder-volume.HOST.BACKEND``. For example, ``cinder-volume.ubuntu.lvmdriver``. When you create a volume, the scheduler chooses an appropriate back end for the volume type to handle the request. For information about how to use volume types to create multiple- storage back ends, see `Configure multiple-storage back ends `_. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - volume_type: volume_type_1 - volume_type_id: volume_type_id - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/volume-type-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - is_public: is_public - extra_specs: extra_specs - description: description - volume_type: volume_type_1 - name: name Response Example ---------------- .. literalinclude:: ./samples/volume-type-show-response.json :language: javascript Update extra specs for a volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/types/{volume_type_id} Updates the extra specifications that are assigned to a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - extra_specs: extra_specs - volume_type: volume_type_1 - volume_type_id: volume_type_id - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/volume-type-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - is_public: is_public - extra_specs: extra_specs - description: description - volume_type: volume_type_1 - name: name Response Example ---------------- .. literalinclude:: ./samples/volume-type-show-response.json :language: javascript Show volume type details for v2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/types/{volume_type_id} Shows details for a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - volume_type_id: volume_type_id - project_id: project_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - is_public: is_public - extra_specs: extra_specs - description: description - volume_type: volume_type_1 - name: name Response Example ---------------- .. literalinclude:: ./samples/volume-type-show-response.json :language: javascript Delete volume type ~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/{project_id}/types/{volume_type_id} Deletes a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - volume_type_id: volume_type_id - project_id: project_id_path List all volume types for v2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/types Lists volume types. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_types: volume_types - extra_specs: extra_specs - name: name - volume_type: volume_type Response Example ---------------- .. literalinclude:: ./samples/volume-types-list-response.json :language: javascript Create volume type for v2 ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/types Creates a volume type. To create an environment with multiple-storage back ends, you must specify a volume type. Block Storage volume back ends are spawned as children to ``cinder-volume``, and they are keyed from a unique queue. They are named ``cinder-volume.HOST.BACKEND``. For example, ``cinder-volume.ubuntu.lvmdriver``. When a volume is created, the scheduler chooses an appropriate back end to handle the request based on the volume type. For information about how to use volume types to create multiple- storage back ends, see `Configure multiple-storage back ends `_. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - volume_type: volume_type_1 - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/volume-type-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - is_public: is_public - extra_specs: extra_specs - description: description - volume_type: volume_type_1 - name: name Response Example ---------------- .. literalinclude:: ./samples/volume-type-show-response.json :language: javascript Show an encryption type for v2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/types/{volume_type_id}/encryption Show an encryption type. To show an encryption type for an existing volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - volume_type_id: volume_type_id - project_id: project_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_type_id: volume_type_id_body - encryption_id: encryption_id - encryption: encryption - key_size: key_size - provider: provider - control_location: control_location - cipher: cipher - deleted: deleted - created_at: created_at - updated_at: updated_at - deleted_at: deleted_at Response Example ---------------- .. literalinclude:: ./samples/encryption-type-show-response.json :language: javascript Delete an encryption type for v2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/types/{volume_type_id}/encryption/{encryption_id} Delete an encryption type. To delete an encryption type for an existing volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - volume_type_id: volume_type_id - project_id: project_id_path - encryption_id: encryption_id Create an encryption type for v2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/types/{volume_type_id}/encryption Creates an encryption type. To create an encryption type for an existing volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - volume_type_id: volume_type_id - project_id: project_id_path - encryption: encryption - key_size: key_size - provider: provider - control_location: control_location - cipher: cipher Request Example --------------- .. literalinclude:: ./samples/encryption-type-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_type_id: volume_type_id_body - encryption_id: encryption_id - encryption: encryption - key_size: key_size - provider: provider - control_location: control_location - cipher: cipher Response Example ---------------- .. literalinclude:: ./samples/encryption-type-create-response.json :language: javascript Update an encryption type for v2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/types/{volume_type_id}/encryption/{encryption_id} Update an encryption type. To update an encryption type for an existing volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - volume_type_id: volume_type_id - project_id: project_id_path - encryption_id: encryption_id - encryption: encryption - key_size: key_size - provider: provider_optional - control_location: control_location - cipher: cipher Request Example --------------- .. literalinclude:: ./samples/encryption-type-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - encryption: encryption - key_size: key_size - provider: provider_optional - control_location: control_location - cipher: cipher Response Example ---------------- .. literalinclude:: ./samples/encryption-type-update-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/volumes-v2-versions.inc0000664000175000017500000000076400000000000022630 0ustar00zuulzuul00000000000000.. -*- rst -*- API version details =================== Show API v2 details ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/ Shows details for Block Storage API v2. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- Response Parameters ------------------- .. rest_parameters:: parameters.yaml - location: location Response Example ---------------- .. literalinclude:: ./samples/version-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/volumes-v2-volumes-actions.inc0000664000175000017500000003240400000000000024104 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume actions (volumes, action) ================================ Extends the size of, resets statuses for, sets image metadata for, and removes image metadata from a volume. Attaches a volume to a server, detaches a volume from a server, and removes a volume from Block Storage management without actually removing the back-end storage object associated with it. Extend volume size ~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Extends the size of a volume to a requested size, in gibibytes (GiB). Specify the ``os-extend`` action in the request body. Preconditions - Volume status must be ``available``. - Sufficient amount of storage must exist to extend the volume. - The user quota must have sufficient volume storage. Troubleshooting - An ``error_extending`` volume status indicates that the request failed. Ensure that you meet the preconditions and retry the request. If the request fails again, investigate the storage back end. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - os-extend: os-extend - new_size: new_size - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-extend-request.json :language: javascript Reset volume statuses ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Administrator only. Resets the status, attach status, and migration status for a volume. Specify the ``os-reset_status`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - status: status_3 - migration_status: migration_status - os-reset_status: os-reset_status - attach_status: attach_status - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-status-reset-request.json :language: javascript Set image metadata for volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Sets the image metadata for a volume. Specify the ``os-set_image_metadata`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - os-set_image_metadata: os-set_image_metadata - metadata: metadata - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-image-metadata-set-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_8 Response Example ---------------- .. literalinclude:: ./samples/volume-image-metadata-update-response.json :language: javascript Remove image metadata from volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Removes image metadata, by key, from a volume. Specify the ``os-unset_image_metadata`` action in the request body and the ``key`` for the metadata key and value pair that you want to remove. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - os-unset_image_metadata: os-unset_image_metadata - key: key - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-image-metadata-unset-request.json :language: javascript Show image metadata for volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Shows image metadata for a volume. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-show_image_metadata: os-show_image_metadata Request Example --------------- .. literalinclude:: ./samples/image-metadata-show-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_8 Response Example ---------------- .. literalinclude:: ./samples/image-metadata-show-response.json :language: javascript Attach volume to server ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Attaches a volume to a server. Specify the ``os-attach`` action in the request body. Preconditions - Volume status must be ``available``. - You should set ``instance_uuid`` or ``host_name``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - instance_uuid: instance_uuid - mountpoint: mountpoint - host_name: host_name - os-attach: os-attach - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-attach-request.json :language: javascript Detach volume from a server ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Detaches a volume from a server. Specify the ``os-detach`` action in the request body. Preconditions - Volume status must be ``in-use``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - attachment_id: attachment_id - os-detach: os-detach - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-detach-request.json :language: javascript Unmanage volume ~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Removes a volume from Block Storage management without removing the back-end storage object that is associated with it. Specify the ``os-unmanage`` action in the request body. Preconditions - Volume status must be ``available``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - os-unmanage: os-unmanage - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-unmanage-request.json :language: javascript Force detach volume ~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Forces a volume to detach. Specify the ``os-force_detach`` action in the request body. Rolls back an unsuccessful detach operation after you disconnect the volume. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``volume_extension:volume_admin_actions:force_detach`` rule in the policy configuration file. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - connector: connector - attachment_id: attachment_id - os-force_detach: os-force_detach - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-force-detach-request.json :language: javascript Retype volume ~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Change type of existing volume. Specify the ``os-retype`` action in the request body. Change the volume type of existing volume, Cinder may migrate the volume to proper volume host according to the new volume type. Retyping an *in-use* volume from a multiattach-capable type to a non-multiattach-capable type, or vice-versa, is not supported. It is generally not recommended to retype an *in-use* multiattach volume if that volume has more than one active read/write attachment. Policy defaults enable only users with the administrative role or the owner of the volume to perform this operation. Cloud providers can change these permissions through the policy configuration file. Retyping an unencrypted volume to the same size encrypted volume will most likely fail. Even though the volume is the same size as the source volume, the encrypted volume needs to store additional encryption information overhead. This results in the new volume not being large enough to hold all data. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - new_type: new_type - migration_policy: migration_policy - os-retype: os-retype - volume_id: volume_id_path - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/volume-os-retype-request.json :language: javascript Migrate volume ~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Specify the ``os-migrate_volume`` action in the request body. Migrates a volume to the specified host. It is generally not recommended to migrate an *in-use* multiattach volume if that volume has more than one active read/write attachment. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the policy configuration file. **Preconditions** * The volume ``status`` must be ``available`` or ``in-use``. * The volume ``migration_status`` must be ``None``, ``deleting``, ``error``, or ``success``. * The volume ``replication_status`` must be ``None``, ``disabled`` or ``not-capable``. * The migration must happen to another host from which the volume currently resides. * The volume must not be a member of a group. * The volume must not have snapshots. **Asynchronous Postconditions** On success, the volume ``status`` will return to its original status of ``available`` or ``in-use`` and the ``migration_status`` will be ``success``. On failure, the ``migration_status`` will be ``error``. In the case of failure, if ``lock_volume`` was true and the volume was originally ``available`` when it was migrated, the ``status`` will go back to ``available``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - volume_id: volume_id_path - project_id: project_id_path - os-migrate_volume: os-migrate_volume - host: migrate_host - force_host_copy: migrate_force_host_copy - lock_volume: migrate_lock_volume Request Example --------------- .. literalinclude:: ./samples/volume-os-migrate_volume-request.json :language: javascript Complete migration of a volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Specify the ``os-migrate_volume_completion`` action in the request body. Complete the migration of a volume, updating the new volume in the DB, returning the ``status`` of the new volume to that of the original volume and finally deleting the original volume. **Preconditions** * Both the original and new volume ``migration_status`` must be ``None`` or both must be set to a non ``None`` value. * Additionally when set the new volume ``migration_status`` must take the form of ``target:VOLUME_UUID`` where VOLUME_UUID is the original volume UUID. **Asynchronous Postconditions** On success, the volume ``status`` will return to its original status of ``available`` or ``in-use`` and the ``migration_status`` will be ``success``. On failure, the ``migration_status`` will be ``error``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - volume_id: volume_id_path - project_id: project_id_path - os-migrate_volume_completion: os-migrate_volume_completion - new_volume: new_volume - error: migration_completion_error Request Example --------------- .. literalinclude:: ./samples/volume-os-migrate_volume_completion-request.json :language: javascript Force delete volume ~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Attempts force-delete of volume, regardless of state. Specify the ``os-force_delete`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - os-force_delete: os-force_delete - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-force-delete-request.json :language: javascript Update volume bootable status ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/action Update the bootable status for a volume, mark it as a bootable volume. Specify the ``os-set_bootable`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-set_bootable: os-set_bootable - bootable: bootable Request Example --------------- .. literalinclude:: ./samples/volume-bootable-status-update-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v2/volumes-v2-volumes.inc0000664000175000017500000004012500000000000022445 0ustar00zuulzuul00000000000000.. -*- rst -*- Volumes (volumes) ================= A volume is a detachable block storage device similar to a USB hard drive. You can attach a volume to one instance at a time. The ``snapshot_id`` and ``source_volid`` parameters specify the ID of the snapshot or volume from which this volume originates. If the volume was not created from a snapshot or source volume, these values are null. When you create, list, update, or delete volumes, the possible status values are: **Volume statuses** +------------------+--------------------------------------------------------+ | Status | Description | +------------------+--------------------------------------------------------+ | creating | The volume is being created. | +------------------+--------------------------------------------------------+ | available | The volume is ready to attach to an instance. | +------------------+--------------------------------------------------------+ | attaching | The volume is attaching to an instance. | +------------------+--------------------------------------------------------+ | detaching | The volume is detaching from an instance. | +------------------+--------------------------------------------------------+ | in-use | The volume is attached to an instance. | +------------------+--------------------------------------------------------+ | maintenance | The volume is locked and being migrated. | +------------------+--------------------------------------------------------+ | deleting | The volume is being deleted. | +------------------+--------------------------------------------------------+ | awaiting-transfer| The volume is awaiting for transfer. | +------------------+--------------------------------------------------------+ | error | A volume creation error occurred. | +------------------+--------------------------------------------------------+ | error_deleting | A volume deletion error occurred. | +------------------+--------------------------------------------------------+ | backing-up | The volume is being backed up. | +------------------+--------------------------------------------------------+ | restoring-backup | A backup is being restored to the volume. | +------------------+--------------------------------------------------------+ | error_backing-up | A backup error occurred. | +------------------+--------------------------------------------------------+ | error_restoring | A backup restoration error occurred. | +------------------+--------------------------------------------------------+ | error_extending | An error occurred while attempting to extend a volume. | +------------------+--------------------------------------------------------+ | downloading | The volume is downloading an image. | +------------------+--------------------------------------------------------+ | uploading | The volume is being uploaded to an image. | +------------------+--------------------------------------------------------+ | retyping | The volume is changing type to another volume type. | +------------------+--------------------------------------------------------+ | extending | The volume is being extended. | +------------------+--------------------------------------------------------+ List volumes with details ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/volumes/detail Lists all Block Storage volumes, with details, that the project can access. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - migration_status: migration_status - attachments: attachments - links: links - availability_zone: availability_zone - os-vol-host-attr:host: os-vol-host-attr:host - encrypted: encrypted - updated_at: updated_at - replication_status: replication_status - snapshot_id: snapshot_id - id: id - size: size - user_id: user_id - os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id - os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat - metadata: metadata - status: status_3 - volume_image_metadata: volume_image_metadata - description: description - multiattach: multiattach_resp - source_volid: source_volid - consistencygroup_id: consistencygroup_id - os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id - name: name - bootable: bootable_response - created_at: created_at - volumes: volumes - volume_type: volume_type - volumes_links: links_vol_optional Response Example ---------------- .. literalinclude:: ./samples/volumes-list-detailed-response.json :language: javascript Create volume ~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes Creates a volume. To create a bootable volume, include the UUID of the image from which you want to create the volume in the ``imageRef`` attribute in the request body. Preconditions - You must have enough volume storage quota remaining to create a volume of size requested. Asynchronous Postconditions - With correct permissions, you can see the volume status as ``available`` through API calls. - With correct access, you can see the created volume in the storage system that OpenStack Block Storage manages. Troubleshooting - If volume status remains ``creating`` or shows another error status, the request failed. Ensure you meet the preconditions then investigate the storage back end. - Volume is not created in the storage system that OpenStack Block Storage manages. - The storage node needs enough free storage space to match the size of the volume creation request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - size: size - description: description_9 - imageRef: imageRef - availability_zone: availability_zone - source_volid: source_volid - name: volume_name_optional - volume: volume - consistencygroup_id: consistencygroup_id_1 - volume_type: volume_type_2 - snapshot_id: snapshot_id - OS-SCH-HNT:scheduler_hints: OS-SCH-HNT:scheduler_hints - metadata: metadata_2 - project_id: project_id_path Request Example --------------- .. literalinclude:: ./samples/volume-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - migration_status: migration_status - attachments: attachments - links: links - availability_zone: availability_zone - encrypted: encrypted - updated_at: updated_at - replication_status: replication_status - snapshot_id: snapshot_id - id: id - size: size - user_id: user_id - metadata: metadata - status: status_3 - description: description - multiattach: multiattach_resp - source_volid: source_volid - volume: volume - consistencygroup_id: consistencygroup_id - name: name - bootable: bootable_response - created_at: created_at - volume_type: volume_type Response Example ---------------- .. literalinclude:: ./samples/volume-create-response.json :language: javascript List volumes ~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/volumes Lists summary information for all Block Storage volumes that the project can access. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volumes: volumes - id: id - links: links - name: name - volumes_links: links_vol_optional Response Example ---------------- .. literalinclude:: ./samples/volumes-list-response.json :language: javascript Show volume details ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/volumes/{volume_id} Shows details for a volume. Preconditions - The volume must exist. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - migration_status: migration_status - attachments: attachments - links: links - availability_zone: availability_zone - os-vol-host-attr:host: os-vol-host-attr:host - encrypted: encrypted - updated_at: updated_at - replication_status: replication_status - snapshot_id: snapshot_id - id: id - size: size - user_id: user_id - os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id - os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat - metadata: metadata - status: status_3 - volume_image_metadata: volume_image_metadata - description: description - multiattach: multiattach_resp - source_volid: source_volid - volume: volume - consistencygroup_id: consistencygroup_id - os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id - name: name - bootable: bootable_response - created_at: created_at - volume_type: volume_type Response Example ---------------- .. literalinclude:: ./samples/volume-show-response.json :language: javascript Update volume ~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/volumes/{volume_id} Updates a volume. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - volume: volume - description: description - name: name - metadata: metadata_2 - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - migration_status: migration_status - attachments: attachments - links: links - availability_zone: availability_zone - encrypted: encrypted - updated_at: updated_at - replication_status: replication_status - snapshot_id: snapshot_id - id: id - size: size - user_id: user_id - metadata: metadata_3 - status: status_3 - description: description - multiattach: multiattach_resp - source_volid: source_volid - volume: volume - consistencygroup_id: consistencygroup_id - name: name - bootable: bootable_response - created_at: created_at - volume_type: volume_type Response Example ---------------- .. literalinclude:: ./samples/volume-update-response.json :language: javascript Delete volume ~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/{project_id}/volumes/{volume_id} Deletes a volume. Preconditions - Volume status must be ``available``, ``in-use``, ``error``, or ``error_restoring``. - You cannot already have a snapshot of the volume. - You cannot delete a volume that is in a migration. Asynchronous Postconditions - The volume is deleted in volume index. - The volume managed by OpenStack Block Storage is deleted in storage node. Troubleshooting - If volume status remains in ``deleting`` or becomes ``error_deleting`` the request failed. Ensure you meet the preconditions then investigate the storage back end. - The volume managed by OpenStack Block Storage is not deleted from the storage system. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - cascade: cascade Create volume metadata ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v2/{project_id}/volumes/{volume_id}/metadata Creates or replaces metadata for a volume. Does not modify items that are not in the request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - metadata: metadata_3 - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-metadata-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_3 Response Example ---------------- .. literalinclude:: ./samples/volume-metadata-create-response.json :language: javascript Show volume metadata ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/volumes/{volume_id}/metadata Shows metadata for a volume. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_3 Response Example ---------------- .. literalinclude:: ./samples/volume-metadata-show-response.json :language: javascript Update volume metadata ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/volumes/{volume_id}/metadata Replaces all the volume's metadata with the key-value pairs in the request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - metadata: metadata_3 - project_id: project_id_path - volume_id: volume_id_path Request Example --------------- .. literalinclude:: ./samples/volume-metadata-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_3 Response Example ---------------- .. literalinclude:: ./samples/volume-metadata-update-response.json :language: javascript Show volume metadata for a specific key ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v2/{project_id}/volumes/{volume_id}/metadata/{key} Shows metadata for a volume for a specific key. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - key: key_2 Response Parameters ------------------- .. rest_parameters:: parameters.yaml - meta: meta Response Example ---------------- .. literalinclude:: ./samples/volume-metadata-show-key-response.json :language: javascript Delete volume metadata ~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v2/{project_id}/volumes/{volume_id}/metadata/{key} Deletes metadata for a volume. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - key: key_1 Update volume metadata for a specific key ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v2/{project_id}/volumes/{volume_id}/metadata/{key} Update metadata for a volume for a specific key. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - key: key_3 - meta: meta Request Example --------------- .. literalinclude:: ./samples/volume-metadata-update-key-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - meta: meta Response Example ---------------- .. literalinclude:: ./samples/volume-metadata-update-key-response.json :language: javascript ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315576.951117 cinder-27.0.0/api-ref/source/v3/0000775000175000017500000000000000000000000016242 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/api-versions.inc0000664000175000017500000000103400000000000021352 0ustar00zuulzuul00000000000000.. -*- rst -*- API versions ============ List All Api Versions ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET / Lists information for all Block Storage API versions. Response codes -------------- .. rest_status_code:: success ../status.yaml - 300 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 - 405 - 404 - 409 - 500 - 503 Request ------- Response -------- **Example List Api Versions: JSON request** .. literalinclude:: ./samples/versions/versions-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/attachments.inc0000664000175000017500000002126500000000000021256 0ustar00zuulzuul00000000000000.. -*- rst -*- Attachments (attachments) ========================= Lists all, lists all with details, shows details for, creates, and deletes attachment. .. note:: Everything except for `Complete attachment` is new as of the 3.27 microversion. `Complete attachment` is new as of the 3.44 microversion. When you create, list, update, or delete attachment, the possible status values are: **VolumeAttachment statuses** +------------------+--------------------------------------------------------+ | Status | Description | +------------------+--------------------------------------------------------+ | attached | A volume is attached for the attachment. | +------------------+--------------------------------------------------------+ | attaching | A volume is attaching for the attachment. | +------------------+--------------------------------------------------------+ | detached | A volume is detached for the attachment. | +------------------+--------------------------------------------------------+ | reserved | A volume is reserved for the attachment. | +------------------+--------------------------------------------------------+ | error_attaching | A volume is error attaching for the attachment. | +------------------+--------------------------------------------------------+ | error_detaching | A volume is error detaching for the attachment. | +------------------+--------------------------------------------------------+ | deleted | The attachment is deleted. | +------------------+--------------------------------------------------------+ Delete attachment ~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/attachments/{attachment_id} Deletes an attachment. For security reasons (see bug `#2004555 `_) the Block Storage API rejects REST API calls manually made from users with a 409 status code if there is a Nova instance currently using the attachment, which happens when all the following conditions are met: - Attachment has an instance uuid - VM exists in Nova - Instance has the volume attached - Attached volume in instance is using the attachment Calls coming from other OpenStack services (like the Compute Service) are always accepted. Available starting in the 3.27 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - attachment_id: attachment_id_path Show attachment details ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/attachments/{attachment_id} Shows details for an attachment. Available starting in the 3.27 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - attachment_id: attachment_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_attachment - detached_at: detached_at - connection_info: connection_info - attached_at: attached_at - attach_mode: attach_mode_required - instance: instance_uuid_req - volume_id: volume_id_attachment - id: attachment_id_required Response Example ---------------- .. literalinclude:: ./samples/attachment-show-response.json :language: javascript List attachments with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/attachments/detail Lists all attachments with details. Since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Available starting in the 3.27 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_attachment - detached_at: detached_at - connection_info: connection_info - attached_at: attached_at - attach_mode: attach_mode_required - instance: instance_uuid_req - volume_id: volume_id_attachment - id: attachment_id_required Response Example ---------------- .. literalinclude:: ./samples/attachment-list-detailed-response.json :language: javascript List attachments ~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/attachments Lists all attachments, since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Available starting in the 3.27 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_attachment - instance: instance_uuid_req - volume_id: volume_id_attachment - id: attachment_id_required Response Example ---------------- .. literalinclude:: ./samples/attachment-list-response.json :language: javascript Create attachment ~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/attachments Creates an attachment. Available starting in the 3.27 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - attachment: attachment - instance_uuid: instance_uuid - connector: connector - volume_uuid: volume_id_attachment - mode: attach_mode Request Example --------------- .. literalinclude:: ./samples/attachment-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - attachment: attachment - status: status_attachment - detached_at: detached_at - connection_info: connection_info - attached_at: attached_at - attach_mode: attach_mode_required - instance: instance_uuid_req - volume_id: volume_id_attachment - id: attachment_id_required Response Example ---------------- .. literalinclude:: ./samples/attachment-create-response.json :language: javascript Update an attachment ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/attachments/{attachment_id} Update a reserved attachment record with connector information and set up the appropriate connection_info from the driver. Available starting in the 3.27 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - attachment_id: attachment_id_path - attachement: attachment - connector: connector_required Request Example --------------- .. literalinclude:: ./samples/attachment-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - attachment: attachment - status: status_attachment - detached_at: detached_at - connection_info: connection_info - attached_at: attached_at - attach_mode: attach_mode_required - instance: instance_uuid_req - volume_id: volume_id_attachment - id: attachment_id_required Response Example ---------------- .. literalinclude:: ./samples/attachment-update-response.json :language: javascript Complete attachment ~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/attachments/{attachment_id}/action Complete an attachment for a cinder volume. Available starting in the 3.44 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 204 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - attachment_id: attachment_id_path Request Example --------------- .. literalinclude:: ./samples/attachment-complete.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/availability-zones-v3.inc0000664000175000017500000000154500000000000023076 0ustar00zuulzuul00000000000000.. -*- rst -*- Availability zones (os-availability-zone) ========================================= List availability zone information. List Availability Zone Information ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/os-availability-zone List availability zone information. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response Parameter ------------------ .. rest_parameters:: parameters.yaml - project_id: project_id - availabilityZoneInfo: availability_zone_info - zoneName: availability_zone_required - zoneState: availability_zone_state - available: available Response Example ---------------- .. literalinclude:: ./samples/availability-zone-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/capabilities-v3.inc0000664000175000017500000000225700000000000021722 0ustar00zuulzuul00000000000000.. -*- rst -*- Capabilities for storage back ends (capabilities) ================================================= Shows capabilities for a storage back end. Show all back-end capabilities ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/capabilities/{hostname} Shows capabilities for a storage back end on the host. The ``hostname`` takes the form of ``hostname@volume_backend_name``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - hostname: hostname Response Parameters ------------------- .. rest_parameters:: parameters.yaml - pool_name: pool_name - description: description_cap - volume_backend_name: volume_backend_name - namespace: namespace_storage - visibility: visibility - driver_version: driver_version - vendor_name: vendor_name - properties: properties - storage_protocol: storage_protocol - replication_targets: replication_targets - display_name: display_name Response Example ---------------- .. literalinclude:: ./samples/backend-capabilities-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/clusters.inc0000664000175000017500000001621700000000000020610 0ustar00zuulzuul00000000000000.. -*- rst -*- Clusters (clusters) =================== Administrator only. Lists all Cinder clusters, show cluster detail, enable or disable a cluster. Each cinder service runs on a *host* computer (possibly multiple services on the same host; it depends how you decide to deploy cinder). In order to support High Availibility scenarios, services can be grouped into *clusters* where the same type of service (for example, cinder-volume) can run on different hosts so that if one host goes down the service is still available on a different host. Since there's no point having these services sitting around doing nothing while waiting for some other host to go down (which is also known as Active/Passive mode), grouping services into clusters also allows cinder to support Active/Active mode in which all services in a cluster are doing work all the time. .. note:: Currently the only service that can be grouped into clusters is ``cinder-volume``. Clusters are determined by the deployment configuration; that's why there is no 'create-cluster' API call listed below. Once your services are up and running, however, you can use the following API requests to get information about your clusters and to update their status. Disable cluster ~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/clusters/disable Disables a cluster. Specify the cluster by its name and optionally the binary name in the request body. Available starting in the 3.7 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - name: cluster_name_required - binary: cluster_binary - disabled_reason: disabled_reason_body Request Example --------------- .. literalinclude:: ./samples/clusters/v3.7/cluster-disable-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - cluster: cluster - name: cluster_name_resp - binary: cluster_binary_resp - state: cluster_state - status: cluster_status - replication_status: cluster_replication_status - disabled_reason: disabled_reason_body Response Example ---------------- .. literalinclude:: ./samples/clusters/v3.7/cluster-disable-response.json :language: javascript Enable cluster ~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/clusters/enable Enables a cluster. Specify the cluster by its name and optionally the binary name in the request body. Available starting in the 3.7 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - name: cluster_name_required - binary: cluster_binary Request Example --------------- .. literalinclude:: ./samples/clusters/v3.7/cluster-enable-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - cluster: cluster - name: cluster_name_resp - binary: cluster_binary_resp - state: cluster_state - status: cluster_status - replication_status: cluster_replication_status - disabled_reason: disabled_reason_body Response Example ---------------- .. literalinclude:: ./samples/clusters/v3.7/cluster-enable-response.json :language: javascript Show cluster details ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/clusters/{cluster_name} Shows details for a cluster by its name and optionally the binary name. Available starting in the 3.7 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - cluster_name: cluster_name_path - binary: cluster_binary_query Response Parameters ------------------- .. rest_parameters:: parameters.yaml - cluster: cluster - name: cluster_name_resp - binary: cluster_binary_resp - state: cluster_state - status: cluster_status - num_hosts: cluster_num_hosts - num_down_hosts: cluster_num_down_hosts - last_heartbeat: cluster_last_heartbeat - created_at: created_at - updated_at: updated_at - disabled_reason: disabled_reason_body - replication_status: cluster_replication_status - frozen: cluster_frozen - active_backend_id: cluster_active_backend_id Response Example ---------------- .. literalinclude:: ./samples/clusters/v3.7/cluster-show-response.json :language: javascript List clusters ~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/clusters Lists all clusters. Available starting in the 3.7 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - name: cluster_name_query - binary: cluster_binary_query - is_up: cluster_is_up_query - disabled: cluster_disabled_query - num_hosts: cluster_num_hosts_query - num_down_hosts: cluster_num_down_hosts_query - replication_status: cluster_replication_status_query Response Parameters ------------------- .. rest_parameters:: parameters.yaml - clusters: clusters - name: cluster_name_resp - binary: cluster_binary_resp - state: cluster_state - status: cluster_status - replication_status: cluster_replication_status Response Example ---------------- .. literalinclude:: ./samples/clusters/v3.7/clusters-list-response.json :language: javascript List clusters with details ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/clusters/detail Lists all clusters with details. Available starting in the 3.7 microversion. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - name: cluster_name_query - binary: cluster_binary_query - is_up: cluster_is_up_query - disabled: cluster_disabled_query - num_hosts: cluster_num_hosts_query - num_down_hosts: cluster_num_down_hosts_query - replication_status: cluster_replication_status_query - frozen: cluster_frozen - active_backend_id: cluster_active_backend_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - clusters: clusters - name: cluster_name_resp - binary: cluster_binary_resp - state: cluster_state - status: cluster_status - num_hosts: cluster_num_hosts - num_down_hosts: cluster_num_down_hosts - last_heartbeat: cluster_last_heartbeat - created_at: created_at - updated_at: updated_at - disabled_reason: disabled_reason_body - replication_status: cluster_replication_status - frozen: cluster_frozen - active_backend_id: cluster_active_backend_id Response Example ---------------- .. literalinclude:: ./samples/clusters/v3.7/clusters-list-detailed-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/consistencygroups-v3.inc0000664000175000017500000001405200000000000023066 0ustar00zuulzuul00000000000000.. -*- rst -*- Consistency groups (DEPRECATED) =============================== Consistency groups enable you to create snapshots at the exact same point in time from multiple volumes. For example, a database might place its tables, logs, and configuration on separate volumes. To restore this database from a previous point in time, it makes sense to restore the logs, tables, and configuration together from the exact same point in time. Use the policy configuration file to grant permissions for these actions to limit roles. List project's consistency groups ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/consistencygroups Lists consistency groups. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - consistencygroups: consistencygroups - id: id - name: name Response Example ---------------- .. literalinclude:: ./samples/consistency-groups-list-response.json :language: javascript Create a consistency group ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/consistencygroups Creates a consistency group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - consistencygroup: consistencygroup - description: description_consis - availability_zone: availability_zone - volume_types: volume_types_commas - name: name_consis Request Example --------------- .. literalinclude:: ./samples/consistency-group-create-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - consistencygroup: consistencygroup - status: status_consis - description: description_cg - availability_zone: availability_zone - created_at: created_at - volume_types: volume_types - name: name_consis - id: consistencygroup_id Response Example ---------------- .. literalinclude:: ./samples/consistency-group-create-response.json :language: javascript Show a consistency group's details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/consistencygroups/{consistencygroup_id} Shows details for a consistency group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - consistencygroup_id: consistencygroup_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_consis - description: description_cg - availability_zone: availability_zone - created_at: created_at - volume_types: volume_types - id: id - name: name Response Example ---------------- .. literalinclude:: ./samples/consistency-group-show-response.json :language: javascript Create a consistency group from source ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/consistencygroups/create_from_src Creates a consistency group from source. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - consistencygroup-from-src: consistencygroup-from-src - status: status_consis - user_id: user_id - description: description_consis - cgsnapshot_id: cgsnapshot_id - source_cgid: source_cgid - project_id: project_id - name: name Request Example --------------- .. literalinclude:: ./samples/consistency-group-create-from-src-request.json :language: javascript Delete a consistency group ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/consistencygroups/{consistencygroup_id}/delete Deletes a consistency group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - consistencygroup_id: consistencygroup_id_path - consistencygroup: consistencygroup - force: force Request Example --------------- .. literalinclude:: ./samples/consistency-group-delete-request.json :language: javascript List consistency groups and details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/consistencygroups/detail Lists consistency groups with details. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - consistencygroups: consistencygroups - status: status_consis - description: description_cg - availability_zone: availability_zone - created_at: created_at - volume_types: volume_types - id: id - name: name Response Example ---------------- .. literalinclude:: ./samples/consistency-groups-list-detailed-response.json :language: javascript Update a consistency group ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/consistencygroups/{consistencygroup_id}/update Updates a consistency group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - consistencygroup_id: consistencygroup_id_path - consistencygroup: consistencygroup - remove_volumes: remove_volumes - description: description_consis - add_volumes: add_volumes - name: name Request Example --------------- .. literalinclude:: ./samples/consistency-group-update-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/default-types.inc0000664000175000017500000000643300000000000021531 0ustar00zuulzuul00000000000000.. -*- rst -*- Default Volume Types (default-types) ==================================== Manage a default volume type for individual projects. By default, a volume-create request that does not specify a volume-type will assign the configured system default volume type to the volume. You can override this behavior on a per-project basis by setting a different default volume type for any project. Available in microversion 3.62 or higher. NOTE: The default policy for list API is system admin so you would require a system scoped token to access it. To get a system scoped token, you need to run the following command: openstack --os-system-scope all --os-project-name='' token issue Create or update a default volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/default-types/{project-id} Create or update the default volume type for a project Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type: volume_type_name_or_id Request Example --------------- .. literalinclude:: ./samples/set-default-type-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - project_id: project_id - volume_type_id: volume_type_id Response Example ---------------- .. literalinclude:: ./samples/set-default-type-response.json :language: javascript Show a default volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/default-types/{project-id} Show the default volume type for a project Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 404 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - project_id: project_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - project_id: project_id - volume_type_id: volume_type_id Response Example ---------------- .. literalinclude:: ./samples/get-default-type-response.json :language: javascript List default volume types ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/default-types/ Get a list of all default volume types Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 404 Response Parameters ------------------- .. rest_parameters:: parameters.yaml - project_id: project_id - volume_type_id: volume_type_id Response Example ---------------- .. literalinclude:: ./samples/get-default-types-response.json :language: javascript Delete a default volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/default-types/{project-id} Unset the default volume type for a project. This operation does not do anything to the volume type itself. It simply removes the volume type from being the default volume type for the specified project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 204 .. rest_status_code:: error ../status.yaml - 404 Request Parameters ------------------ .. rest_parameters:: parameters.yaml - project_id: project_id_path././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/ext-backups-actions-v3.inc0000664000175000017500000000306700000000000023155 0ustar00zuulzuul00000000000000.. -*- rst -*- Backup actions (backups, action) ================================ Force-deletes a backup and reset status for a backup. Force-delete a backup ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/backups/{backup_id}/action Force-deletes a backup. Specify the ``os-force_delete`` action in the request body. This operation deletes the backup and any backup data. The backup driver returns the ``405`` status code if it does not support this operation. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 404 - 405 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id_required - os-force_delete: os-force_delete Request Example --------------- .. literalinclude:: ./samples/backup-force-delete-request.json :language: javascript Reset a backup's status ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/backups/{backup_id}/action Reset a backup's status. Specify the ``os-reset_status`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id_required - os-reset_status: os-reset_status - status: status_backup_action Request Example --------------- .. literalinclude:: ./samples/backup-reset-status-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/ext-backups.inc0000664000175000017500000002600600000000000021167 0ustar00zuulzuul00000000000000.. -*- rst -*- Backups (backups) ================= A backup is a full copy of a volume stored in an external service. The service can be configured. The only supported service is Object Storage. A backup can subsequently be restored from the external service to either the same volume that the backup was originally taken from or to a new volume. When you create, list, or delete backups, these status values are possible: **Backup statuses** +-----------------+---------------------------------------------+ | Status | Description | +-----------------+---------------------------------------------+ | creating | The backup is being created. | +-----------------+---------------------------------------------+ | available | The backup is ready to restore to a volume. | +-----------------+---------------------------------------------+ | deleting | The backup is being deleted. | +-----------------+---------------------------------------------+ | error | A backup error occurred. | +-----------------+---------------------------------------------+ | restoring | The backup is being restored to a volume. | +-----------------+---------------------------------------------+ | error_deleting | An error occurred while deleting the backup.| +-----------------+---------------------------------------------+ If an error occurs, you can find more information about the error in the ``fail_reason`` field for the backup. List backups with detail ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/backups/detail Lists Block Storage backups, with details, to which the project has access, since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker - with_count: with_count Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backups: backups - status: status_backup - object_count: object_count - fail_reason: fail_reason - description: description - links: links_backup - availability_zone: availability_zone - created_at: created_at - updated_at: updated_at - name: name_backup - has_dependent_backups: has_dependent_backups - volume_id: volume_id - container: container - size: size - id: id_backup - is_incremental: is_incremental - data_timestamp: data_timestamp - snapshot_id: snapshot_id_source_vol - os-backup-project-attr:project_id: os-backup-project-attr:project_id - count: count - metadata: metadata_backup - user_id: user_id_backup - encryption_key_id: encryption_key_id - backup_links: backup_links_optional Response Example ---------------- .. literalinclude:: ./samples/backups/backups-list-detailed-response.json :language: javascript Show backup detail ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/backups/{backup_id} Shows details for a backup. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id_required Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backup: backup - status: status_backup - object_count: object_count - container: container - description: description - links: links_backup - availability_zone: availability_zone - created_at: created_at - updated_at: updated_at - name: name_backup - has_dependent_backups: has_dependent_backups - volume_id: volume_id - fail_reason: fail_reason - size: size - backup: backup - id: id_backup - is_incremental: is_incremental - data_timestamp: data_timestamp - snapshot_id: snapshot_id_source_vol - os-backup-project-attr:project_id: os-backup-project-attr:project_id - metadata: metadata_backup - user_id: user_id_backup - encryption_key_id: encryption_key_id Response Example ---------------- .. literalinclude:: ./samples/backups/backup-show-response.json :language: javascript Delete a backup ~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/backups/{backup_id} Deletes a backup. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id_required Restore a backup ~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/backups/{backup_id}/restore Restores a Block Storage backup to an existing or new Block Storage volume. The name parameter will work only if a new volume is created. If UUID is specified, the backup will be restored to the specified volume. The specified volume has the following requirements: * the specified volume status is ``available``. * the size of specified volume must be equal to or greater than the size of backup. If no existing volume UUID is provided, the backup will be restored to a new volume matching the size and name of the originally backed up volume. In this case, if the name parameter is provided, it will be used as the name of the new volume. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 413 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id_required - restore: restore - name: volume_name_optional - volume_id: volume_id_restore Request Example --------------- .. literalinclude:: ./samples/backup-restore-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - restore: restore - backup_id: backup_id_required - volume_id: volume_id - volume_name: volume_name Response Example ---------------- .. literalinclude:: ./samples/backup-restore-response.json :language: javascript Create a backup ~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/backups Creates a Block Storage backup from a volume or snapshot. The status of the volume must be ``available`` or if the ``force`` flag is used, backups of ``in-use`` volumes may also be created. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup: backup - volume_id: volume_id_backup - container: container - description: description - incremental: incremental - force: force - name: name_optional - snapshot_id: snapshot_id_backup - metadata: metadata_backup - availability_zone: availability_zone_backup Request Example --------------- .. literalinclude:: ./samples/backups/backup-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backup: backup - id: id_backup - links: links_backup - name: name_backup - metadata: metadata_backup Response Example ---------------- .. literalinclude:: ./samples/backups/backup-create-response.json :language: javascript Update a backup ~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/backups/{backup_id} Update a Block Storage backup. This API is available since v3.9. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id_required - backup: backup - description: description - name: name_optional - metadata: metadata_backup Request Example --------------- .. literalinclude:: ./samples/backups/v3.9/backup-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backup: backup - id: id_backup - links: links_backup - name: name_backup - metadata: metadata_backup Response Example ---------------- .. literalinclude:: ./samples/backups/v3.9/backup-update-response.json :language: javascript List backups for project ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/backups Lists Block Storage backups to which the project has access, since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker - with_count: with_count Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backups: backups - id: id_backup - links: links_backup - name: name_backup - count: count - backup_links: backup_links_optional Response Example ---------------- .. literalinclude:: ./samples/backups/backups-list-response.json :language: javascript Export a backup ~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/backups/{backup_id}/export_record Export information about a backup. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup_id: backup_id_required Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backup-record: backup_record - backup_service: backup_service - backup_url: backup_url Response Example ---------------- .. literalinclude:: ./samples/backup-record-export-response.json :language: javascript Import a backup ~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/backups/import_record Import information about a backup. Response codes -------------- .. rest_status_code:: success ../status.yaml - 201 .. rest_status_code:: error ../status.yaml - 400 - 503 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - backup-record: backup_record - backup_service: backup_service - backup_url: backup_url Request Example --------------- .. literalinclude:: ./samples/backup-record-import-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - id: id_backup - links: links_backup - name: name_backup Response Example ---------------- .. literalinclude:: ./samples/backup-record-import-response.json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/group-replication.inc0000664000175000017500000000525600000000000022410 0ustar00zuulzuul00000000000000.. -*- rst -*- Group replication (groups, action) ================================== Lists targets, enables, disables, and fails over group replication. Available since API microversion 3.38. List replication targets ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/groups/{group_id}/action Lists replication targets for a group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path Request Example --------------- .. literalinclude:: ./samples/group-replication-list-targets.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - backend_id: backend_id_target - unique_key: replication_targets_unique_key Response Example ---------------- .. literalinclude:: ./samples/group-replication-target.json :language: javascript Enable group replication ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/groups/{group_id}/action Enable replication for a group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path Request Example --------------- .. literalinclude:: ./samples/group-replication-enable.json :language: javascript Disable group replication ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/groups/{group_id}/action Disable replication for a group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path Request Example --------------- .. literalinclude:: ./samples/group-replication-disable.json :language: javascript Failover replication ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/groups/{group_id}/action Failover a replicated group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path - allow_attached_volume: allow_attached_volume - secondary_backend_id: backend_id_target Request Example --------------- .. literalinclude:: ./samples/group-replication-failover.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/group-snapshots.inc0000664000175000017500000001266100000000000022117 0ustar00zuulzuul00000000000000.. -*- rst -*- Group snapshots (group_snapshots) ================================= Lists all, lists all with details, shows details for, creates, and deletes group snapshots. Delete group snapshot ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/group_snapshots/{group_snapshot_id} Deletes a group snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_snapshot_id: group_snapshot_id_path Show group snapshot details ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/group_snapshots/{group_snapshot_id} Shows details for a group snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_snapshot_id: group_snapshot_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_snapshot: group_snapshot - created_at: created_at - group_id: source_group_id_req - id: group_snapshot_id_req - name: name_group_snap_req - status: status_group_snap - description: description_group_snap_req - group_type_id: group_type_id - project_id: project_id_group_snapshot Response Example ---------------- .. literalinclude:: ./samples/group-snapshots-show-response.json :language: javascript List group snapshots with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/group_snapshots/detail Lists all group snapshots with details. Since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort_group_snapshot - sort_key: sort_key_group_snapshot - sort_dir: sort_dir_group_snapshot - limit: limit_group_snapshot - offset: offset_group_snapshot - marker: marker_group_snapshot Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_snapshots: group_snapshots - id: group_snapshot_id_req - name: name_group_snap_req - status: status_group_snap - description: description_group_snap_req - created_at: created_at - group_id: group_id - group_type_id: group_type_id - project_id: project_id_group_snapshot Response Example ---------------- .. literalinclude:: ./samples/group-snapshots-list-detailed-response.json :language: javascript List group snapshots ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/group_snapshots Lists all group snapshots, since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort_group_snapshot - sort_key: sort_key_group_snapshot - sort_dir: sort_dir_group_snapshot - limit: limit_group_snapshot - offset: offset_group_snapshot - marker: marker_group_snapshot Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_snapshots: group_snapshots - id: group_snapshot_id_req - name: name_group_snap_req Response Example ---------------- .. literalinclude:: ./samples/group-snapshots-list-response.json :language: javascript Create group snapshot ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/group_snapshots Creates a group snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_snapshot: group_snapshot - name: name_group_snap - description: description_group_snap - group_id: group_id Request Example --------------- .. literalinclude:: ./samples/group-snapshots-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_snapshot: group_snapshot - id: group_snapshot_id_req - name: name_group_snap_req - group_type_id: group_type_id Response Example ---------------- .. literalinclude:: ./samples/group-snapshots-create-response.json :language: javascript Reset group snapshot status ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/group_snapshots/{group_snapshot_id}/action Resets the status for a group snapshot. Specifies the ``reset_status`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_snapshot_id: group_snapshot_id_path - reset_status: reset_status - status: status_group_snap Request Example --------------- .. literalinclude:: ./samples/group-snapshot-reset-status-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/group-type-specs.inc0000664000175000017500000000767600000000000022203 0ustar00zuulzuul00000000000000.. -*- rst -*- Group type specs (group_types, group_specs) =========================================== Create or update group specs for a group type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/group_types/{group_type_id}/group_specs Create group specs for a group type, if the specification key already exists in group specs, this API will update the specification as well. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_type_id: group_type_id_path - group_specs: group_specs_req Request Example --------------- .. literalinclude:: ./samples/group-type-specs-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_specs: group_specs_req Response Example ---------------- .. literalinclude:: ./samples/group-type-specs-create-response.json :language: javascript List group specs for a group type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/group_types/{group_type_id}/group_specs List all the group specs for a group type, Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_type_id: group_type_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_specs: group_specs_req Response Example ---------------- .. literalinclude:: ./samples/group-type-specs-list-response.json :language: javascript Show one specific group spec for a group type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/group_types/{group_type_id}/group_specs/{spec_id} Show a group spec for a group type, Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_type_id: group_type_id_path - spec_id: spec_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - spec: spec_value Response Example ---------------- .. literalinclude:: ./samples/group-type-specs-show-response.json :language: javascript Update one specific group spec for a group type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/group_types/{group_type_id}/group_specs/{spec_id} Update a group spec for a group type, Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_type_id: group_type_id_path - spec_id: spec_id - spec: spec_value Request Example --------------- .. literalinclude:: ./samples/group-type-specs-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - spec: spec_value Response Example ---------------- .. literalinclude:: ./samples/group-type-specs-update-response.json :language: javascript Delete one specific group spec for a group type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/group_types/{group_type_id}/group_specs/{spec_id} Delete a group spec for a group type, Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_type_id: group_type_id_path - spec_id: spec_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/group-types.inc0000664000175000017500000001213000000000000021230 0ustar00zuulzuul00000000000000.. -*- rst -*- Group types (group_types) ========================= To create a generic volume group, you must specify a group type. Update group type ~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/group_types/{group_type_id} Updates a group type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 403 - 404 - 409 - 500 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_type_id: group_type_id_path - group_type: group_type - name: name_group - description: description_group_type_optional - is_public: is_public_group_type_optional Request Example --------------- .. literalinclude:: ./samples/group-type-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_type: group_type - id: group_type_id - is_public: is_public_group_type_required - group_specs: group_specs - description: description_group_type_required - name: name_group_type Response Example ---------------- .. literalinclude:: ./samples/group-type-show-response.json :language: javascript Show group type details ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/group_types/{group_type_id} Shows details for a group type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_type_id: group_type_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_type: group_type - id: group_type_id - name: name_group_type - is_public: is_public_group_type_required - group_specs: group_specs - description: description_group_type_required Response Example ---------------- .. literalinclude:: ./samples/group-type-show-response.json :language: javascript Show default group type details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/group_types/default Shows details for the default group type if configured. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_type: group_type - id: group_type_id - name: name_group_type - is_public: is_public_group_type_required - group_specs: group_specs - description: description_group_type_required Response Example ---------------- .. literalinclude:: ./samples/group-type-default-response.json :language: javascript Delete group type ~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/group_types/{group_type_id} Deletes a group type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - group_type_id: group_type_id_path - project_id: project_id_path List group types ~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/group_types Lists group types. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_types: group_types - id: group_type_id - group_specs: group_specs - name: name_group_type - is_public: is_public_group_type_required - description: description_group_type_required Response Example ---------------- .. literalinclude:: ./samples/group-types-list-response.json :language: javascript Create group type ~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/group_types Creates a group type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_type: group_type - name: name_group_type - description: description_group_type_optional - is_public: is_public_group_type_optional - group_specs: group_specs Request Example --------------- .. literalinclude:: ./samples/group-type-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group_type: group_type - id: group_type_id - is_public: is_public_group_type_required - group_specs: group_specs - description: description_group_type_required - name: name_group_type Response Example ---------------- .. literalinclude:: ./samples/group-type-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/groups.inc0000664000175000017500000001670300000000000020263 0ustar00zuulzuul00000000000000.. -*- rst -*- Generic volume groups (groups) ============================== Generic volume groups enable you to create a group of volumes and manage them together. How is generic volume groups different from consistency groups? Currently consistency groups in cinder only support consistent group snapshot. It cannot be extended easily to serve other purposes. A project may want to put volumes used in the same application together in a group so that it is easier to manage them together, and this group of volumes may or may not support consistent group snapshot. Generic volume group is introduced to solve this problem. By decoupling the tight relationship between the group construct and the consistency concept, generic volume groups can be extended to support other features in the future. List groups ~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/groups Lists groups. Since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - groups: groups - id: id - name: group_name Response Example ---------------- .. literalinclude:: ./samples/groups-list-response.json :language: javascript Create group ~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/groups Creates a group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group: group - description: description_group_false - availability_zone: availability_zone - group_type: group_type_id - volume_types: volume_types - name: group_name Request Example --------------- .. literalinclude:: ./samples/group-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - id: group_id_path - name: group_name Response Example ---------------- .. literalinclude:: ./samples/group-create-response.json :language: javascript Show group details ~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/groups/{group_id} Shows details for a group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path - list_volume: list_volume Response Parameters ------------------- .. rest_parameters:: parameters.yaml - group: group - status: status_group - description: description_group_true - availability_zone: availability_zone - created_at: created_at - group_type: group_type_id - group_snapshot_id: group_snapshot_id - source_group_id: source_group_id - volume_types: volume_types - id: id - name: group_name - volumes: volume_ids - replication_status: group_replication_status - project_id: project_id_group Response Example ---------------- .. literalinclude:: ./samples/group-show-response.json :language: javascript Create group from source ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/groups/action Creates a group from source. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - create-from-src: create-from-src - description: description_group_false - group_snapshot_id: group_snapshot_id_req - source_group_id: source_group_id_req - name: group_name Request Example --------------- .. literalinclude:: ./samples/group-create-from-src-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - id: group_id_path - name: group_name Response Example ---------------- .. literalinclude:: ./samples/group-create-from-src-response.json :language: javascript Delete group ~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/groups/{group_id}/action Deletes a group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path - delete: delete - delete-volumes: delete-volumes Request Example --------------- .. literalinclude:: ./samples/group-delete-request.json :language: javascript List groups with details ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/groups/detail Lists groups with details, since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker - list_volume: list_volume Response Parameters ------------------- .. rest_parameters:: parameters.yaml - groups: groups - status: status_group - description: description_group_true - availability_zone: availability_zone - created_at: created_at - group_type: group_type_id - group_snapshot_id: group_snapshot_id - source_group_id: source_group_id - volume_types: volume_types - id: group_id_path - name: name - volumes: volume_ids - replication_status: group_replication_status - project_id: project_id_group Response Example ---------------- .. literalinclude:: ./samples/groups-list-detailed-response.json :language: javascript Update group ~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/groups/{group_id} Updates a group. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path - group: group - remove_volumes: remove_volumes - description: description_group_false - add_volumes: add_volumes - name: group_name Request Example --------------- .. literalinclude:: ./samples/group-update-request.json :language: javascript Reset group status ~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/groups/{group_id}/action Resets the status for a group. Specify the ``reset_status`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path - reset_status: reset_status - status: status Request Example --------------- .. literalinclude:: ./samples/group-reset-status-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/hosts.inc0000664000175000017500000000377600000000000020112 0ustar00zuulzuul00000000000000.. -*- rst -*- Hosts extension (os-hosts) ========================== Administrators only, depending on policy settings. Lists, shows hosts. List all hosts for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{admin_project_id}/os-hosts Lists all hosts summary info that is not disabled. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - admin_project_id: admin_project_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - hosts: hosts - service-status: host_service_status - service: host_service - zone: availability_zone_required - service-state: service_state - host_name: host_name_backend - last-update: updated_at Response Example ---------------- .. literalinclude:: ./samples/hosts-list-response.json :language: javascript Show Host Details for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{admin_project_id}/os-hosts/{host_name} Shows volume and snapshot details for a cinder-volume host. *Note:* This API is meant specifically for cinder-volume hosts only. It is not valid against other Cinder service hosts or hosts where the cinder-volume service has been disabled. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - admin_project_id: admin_project_id - host_name: hostname Response -------- .. rest_parameters:: parameters.yaml - host: host - volume_count: total_count - total_volume_gb: totalGigabytesUsedStr - total_snapshot_gb: totalSnapGigabytesUsed - project: project_id_host - host: host_name_backend - snapshot_count: totalSnapshotsUsed Response Example ---------------- .. literalinclude:: ./samples/hosts-get-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/index.rst0000664000175000017500000000547300000000000020114 0ustar00zuulzuul00000000000000:tocdepth: 2 ============================== Block Storage API V3 (CURRENT) ============================== .. note:: The URL for most API methods includes a ``{project_id}`` placeholder that represents the caller's project ID. As of v3.67, the project_id is optional in the URL, and the following are equivalent: * ``GET /v3/{project_id}/volumes`` * ``GET /v3/volumes`` In both instances, the actual project ID used by the API method is the one in the caller's keystone context. For that reason, including a project ID in the URL is redundant. The v3.67 microversion is only used as an indicator that the API accepts a URL without a ``{project_id}`` segment, and this applies to all requests regardless of the microversion in the request. For example, an API node serving v3.67 or greater will accept a URL without a ``{project_id}`` segment even if the request asks for v3.0. Likewise, it will accept a URL containing a ``{project_id}`` segment even if the request asks for v3.67. .. rest_expand_all:: .. First thing we want to see is the version discovery document. .. include:: api-versions.inc .. include:: volumes-v3-versions.inc .. Next top-level thing could be listing extensions available on this endpoint. .. include:: volumes-v3-extensions.inc .. To create a volume, I might need a volume type, so list those next. .. include:: volumes-v3-types.inc .. include:: volume-type-access.inc .. include:: default-types.inc .. Now my primary focus is on volumes and what I can do with them. .. include:: volumes-v3-volumes.inc .. include:: volumes-v3-volumes-actions.inc .. List the other random volume APIs in just alphabetical order. .. include:: volume-manage.inc .. include:: volumes-v3-snapshots.inc .. include:: volumes-v3-snapshots-actions.inc .. include:: snapshot-manage.inc .. include:: os-vol-transfer-v3.inc .. include:: vol-transfer-v3.inc .. Now the other random things in alphabetical order. .. include:: attachments.inc .. include:: availability-zones-v3.inc .. include:: os-vol-pool-v3.inc .. include:: ext-backups.inc .. include:: ext-backups-actions-v3.inc .. include:: capabilities-v3.inc .. include:: clusters.inc .. include:: consistencygroups-v3.inc .. include:: os-cgsnapshots-v3.inc .. include:: os-services.inc .. include:: groups.inc .. include:: group-replication.inc .. include:: group-snapshots.inc .. include:: group-types.inc .. include:: group-type-specs.inc .. include:: hosts.inc .. include:: limits.inc .. include:: messages.inc .. include:: resource-filters.inc .. include:: qos-specs-v3-qos-specs.inc .. quota-sets should arguably live closer to limits, but that would mess up our nice alphabetical ordering .. include:: quota-classes.inc .. include:: quota-sets.inc .. include:: worker-cleanup.inc .. valid values for boolean parameters. .. include:: valid-boolean-values.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/limits.inc0000664000175000017500000000254100000000000020240 0ustar00zuulzuul00000000000000.. -*- rst -*- Limits (limits) =============== Shows absolute limits for a project. An absolute limit value of ``-1`` indicates that the absolute limit for the item is infinite. Show absolute limits for project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/limits Shows absolute limits for a project. An absolute limit value of ``-1`` indicates that the absolute limit for the item is infinite. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - limits: limits - rate: rate - absolute: absolute - totalSnapshotsUsed: totalSnapshotsUsed_int - maxTotalBackups: maxTotalBackups - maxTotalVolumeGigabytes: maxTotalVolumeGigabytes - maxTotalSnapshots: maxTotalSnapshots - maxTotalBackupGigabytes: maxTotalBackupGigabytes - totalBackupGigabytesUsed: totalBackupGigabytesUsed - maxTotalVolumes: maxTotalVolumes - totalVolumesUsed: totalVolumesUsed - totalBackupsUsed: totalBackupsUsed - totalGigabytesUsed: totalGigabytesUsed Response Example ---------------- .. literalinclude:: ./samples/limits/limits-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/messages.inc0000664000175000017500000000507600000000000020554 0ustar00zuulzuul00000000000000.. -*- rst -*- Messages (messages) =================== Lists all, shows, and deletes messages. These are error messages generated by failed operations as a way to find out what happened when an asynchronous operation failed. Delete message ~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/messages/{message_id} Deletes a message. Response codes -------------- .. rest_status_code:: success ../status.yaml - 204 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - message_id: message_id Show message details ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/messages/{message_id} Shows details for a message. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - message_id: message_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - message: user_message - request_id: request_id - links: links_message - message_level: message_level - event_id: event_id - created_at: created_at - guaranteed_until: guaranteed_until - resource_uuid: resource_uuid - id: id_message - resource_type: resource_type - user_message: user_message Response Example ---------------- .. literalinclude:: ./samples/messages-show-response.json :language: javascript List messages ~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/messages Lists all messages, since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - messages: user_messages - request_id: request_id - links: links_message - message_level: message_level - event_id: event_id - created_at: created_at - guaranteed_until: guaranteed_until - resource_uuid: resource_uuid - id: id_message - resource_type: resource_type - user_message: user_message Response Example ---------------- .. literalinclude:: ./samples/messages-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/os-cgsnapshots-v3.inc0000664000175000017500000000733600000000000022247 0ustar00zuulzuul00000000000000.. -*- rst -*- Consistency group snapshots (DEPRECATED) ======================================== Lists all, lists all with details, shows details for, creates, and deletes consistency group snapshots. Delete a consistency group snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/cgsnapshots/{cgsnapshot_id} Deletes a consistency group snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - cgsnapshot_id: cgsnapshot_id_path Show consistency group snapshot detail ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/cgsnapshots/{cgsnapshot_id} Shows details for a consistency group snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - cgsnapshot_id: cgsnapshot_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - cgsnapshot: cgsnapshot - status: status - description: description_cg_snapshot_true - created_at: created_at - consistencygroup_id: consistencygroup_id_required - id: id - name: name_cgsnap Response Example ---------------- .. literalinclude:: ./samples/cgsnapshots-show-response.json :language: javascript List all consistency group snapshots with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/cgsnapshots/detail Lists all consistency group snapshots with details. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - cgsnapshots: cgsnapshots - status: status - description: description_cg_snapshot_true - created_at: created_at - consistencygroup_id: consistencygroup_id_required - id: id - name: name_cgsnap Response Example ---------------- .. literalinclude:: ./samples/cgsnapshots-list-detailed-response.json :language: javascript List all consistency group snapshots ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/cgsnapshots Lists all consistency group snapshots. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - cgsnapshots: cgsnapshots - id: id - name: name_cgsnap Response Example ---------------- .. literalinclude:: ./samples/cgsnapshots-list-response.json :language: javascript Create a consistency group snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/cgsnapshots Creates a consistency group snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - cgsnapshot: cgsnapshot - name: name_snap - consistencygroup_id: consistencygroup_id_required - description: description_cg_snapshot_false Request Example --------------- .. literalinclude:: ./samples/cgsnapshots-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status - description: description_cg_snapshot_true - created_at: created_at - consistencygroup_id: consistencygroup_id_required - id: id - name: name_cgsnap ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/os-services.inc0000664000175000017500000002012400000000000021176 0ustar00zuulzuul00000000000000.. -*- rst -*- Services (os-services) ====================== Administrator only. Lists all Cinder services, enables or disables a Cinder service, freeze or thaw the specified cinder-volume host, failover a replicating cinder-volume host. List All Cinder Services ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/os-services Lists all Cinder services. Provides details why any services were disabled. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - host: host_query - binary: service_binary_query Response Parameters ------------------- .. rest_parameters:: parameters.yaml - services: services - binary: binary_required - disabled_reason: disabled_reason_body_req - host: host_name_body_req - state: service_state_up_down - status: service_status - frozen: frozen - updated_at: updated - zone: availability_zone_required - cluster: cluster_cvol - replication_status: replication_status_cvol - active_backend_id: active_backend_id - backend_state: backend_state Response Example ---------------- .. literalinclude:: ./samples/services-list-response.json :language: javascript Disable a Cinder Service ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/os-services/disable Disables a Cinder service. Specify the service by its host name and binary name. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - host: host_name_body_req - binary: binary_required Request Example --------------- .. literalinclude:: ./samples/services-disable-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - disabled: disabled_required - status: service_status - host: host_name_body_req - service: service_key - binary: binary_required Response Example ---------------- .. literalinclude:: ./samples/services-disable-response.json :language: javascript Log Disabled Cinder Service Information ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/os-services/disable-log-reason Logs information to the Cinder service table about why a Cinder service was disabled. Specify the service by its host name and binary name. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - binary: binary_required - host: host_name_body_req - disabled_reason: disabled_reason_body Request Example --------------- .. literalinclude:: ./samples/services-disable-log-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - disabled: disabled_required - status: service_status - host: host_name_body_req - service: service_key - binary: binary_required - disabled_reason: disabled_reason_body_req Response Example ---------------- .. literalinclude:: ./samples/services-disable-log-response.json :language: javascript Enable a Cinder Service ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/os-services/enable Enables a Cinder service. Specify the service by its host name and binary name. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - binary: binary_required - host: host_name_body_req Request Example --------------- .. literalinclude:: ./samples/services-enable-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - disabled: disabled_required - status: service_status - host: host_name_body_req - service: service_key - binary: binary_required - disabled_reason: disabled_reason_body_req Response Example ---------------- .. literalinclude:: ./samples/services-enable-response.json :language: javascript Get Current Log Levels for Cinder Services ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/os-services/get-log Get current log levels for services, supported since v3.32. Filter the services by binary, server name and prefix for the log path. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - binary: binary - server: host_name_body - prefix: prefix Request Example --------------- .. literalinclude:: ./samples/services-get-log-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - log_levels: log_levels - binary: binary_required - host: host_name_body_req - levels: levels Response Example ---------------- .. literalinclude:: ./samples/services-get-log-response.json :language: javascript Set Log Levels of Cinder Services Dynamically ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/os-services/set-log Set log levels of services dynamically, supported since v3.32. Filter the services by binary, server name and prefix for the log path. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - binary: binary - server: host_name_body - prefix: prefix - levels: levels_set Request Example --------------- .. literalinclude:: ./samples/services-set-log-request.json :language: javascript Freeze a Cinder Backend Host ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/os-services/freeze Freeze and disable the specified cinder-volume host, and set ``Disabled Reason`` of Cinder service table to ``frozen``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - host: host_name_body_req Request Example --------------- .. literalinclude:: ./samples/services-freeze-request.json :language: javascript Thaw a Cinder Backend Host ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/os-services/thaw Thaw and enable the specified cinder-volume host, and clean ``Disabled Reason`` of Cinder service table. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - host: host_name_body_req Request Example --------------- .. literalinclude:: ./samples/services-thaw-request.json :language: javascript Failover a Cinder Backend Host ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/os-services/failover_host Failover a replicating cinder-volume host. Since Cinder Volume API Version 3.26, you can use ``failover`` in request URL instead of ``failover_host``, and the cluster name in request body is supported. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - host: host_name_body_req - backend_id: backend_id - cluster: cluster_cvol Request Example --------------- .. literalinclude:: ./samples/services-failover-host-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/os-vol-pool-v3.inc0000664000175000017500000000234300000000000021453 0ustar00zuulzuul00000000000000.. -*- rst -*- Back-end storage pools ====================== Administrator only. Lists all back-end storage pools that are known to the scheduler service. List all back-end storage pools ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/scheduler-stats/get_pools Lists all back-end storage pools. Since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - detail: detail Response Parameters ------------------- .. rest_parameters:: parameters.yaml - pools: pools - updated: updated - QoS_support: QoS_support - name: name_backend_pool - total_capacity_gb: total_capacity - volume_backend_name: volume_backend_name - capabilities: capabilities - free_capacity_gb: free_capacity - driver_version: driver_version - reserved_percentage: reserved_percentage - storage_protocol: storage_protocol Response Example ---------------- .. literalinclude:: ./samples/pools-list-detailed-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/os-vol-transfer-v3.inc0000664000175000017500000001067300000000000022333 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume transfer =============== Transfers a volume from one user to another user. Accept a volume transfer ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/os-volume-transfer/{transfer_id}/accept Accepts a volume transfer. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - transfer_id: transfer_id - auth_key: auth_key Request Example --------------- .. literalinclude:: ./samples/volume_transfer/volume-transfer-accept-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - transfer: transfer - volume_id: volume_id - id: transfer_obj_id - links: links - name: name Response Example ---------------- .. literalinclude:: ./samples/volume_transfer/volume-transfer-accept-response.json :language: javascript Create a volume transfer ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/os-volume-transfer Creates a volume transfer. **Preconditions** * The volume ``status`` must be ``available`` * Transferring encrypted volumes is not supported * If the volume has snapshots, those snapshots must be ``available`` Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - transfer: transfer - name: name - volume_id: volume_id Request Example --------------- .. literalinclude:: ./samples/volume_transfer/volume-transfer-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - auth_key: auth_key - links: links - created_at: created_at - volume_id: volume_id - id: transfer_obj_id - name: name Response Example ---------------- .. literalinclude:: ./samples/volume_transfer/volume-transfer-create-response.json :language: javascript List volume transfers for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/os-volume-transfer Lists volume transfers. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_id: volume_id - id: transfer_obj_id - links: links - name: name Response Example ---------------- .. literalinclude:: ./samples/volume_transfer/volume-transfers-list-response.json :language: javascript Show volume transfer detail ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/os-volume-transfer/{transfer_id} Shows details for a volume transfer. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - transfer_id: transfer_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - created_at: created_at - volume_id: volume_id - id: transfer_obj_id - links: links - name: name Response Example ---------------- .. literalinclude:: ./samples/volume_transfer/volume-transfer-show-response.json :language: javascript Delete a volume transfer ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/os-volume-transfer/{transfer_id} Deletes a volume transfer. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - transfer_id: transfer_id List volume transfers and details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/os-volume-transfer/detail Lists volume transfers, with details. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - transfers: transfers - created_at: created_at - volume_id: volume_id - id: transfer_obj_id - links: links - name: name Response Example ---------------- .. literalinclude:: ./samples/volume_transfer/volume-transfers-list-detailed-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/parameters.yaml0000664000175000017500000024532000000000000021277 0ustar00zuulzuul00000000000000# variables in header x-openstack-request-id: description: > foo in: header required: false type: string # variables in path admin_project_id: description: | The UUID of the administrative project. in: path required: true type: string attachment_id_path: description: | The ID of the attachment. in: path required: true type: string backup_id_required: description: | The UUID for a backup. in: path required: true type: string cgsnapshot_id_path: description: | The ID of the consistency group snapshot. in: path required: true type: string cluster_name_path: description: | The name of the cluster. in: path required: true type: string consistencygroup_id_path: description: | The ID of the consistency group. in: path required: true type: string encryption_id: description: | The ID of the encryption type. in: path required: true type: string group_id_path: description: | The ID of the group. in: path required: true type: string group_snapshot_id_path: description: | The ID of the group snapshot. in: path required: true type: string group_type_id_path: description: | The UUID for an existing group type. in: path required: true type: string hostname: description: | The name of the host that hosts the storage back end. in: path required: true type: string init_at: description: | The date and time when the resource was initiated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm in: path required: true type: string key_encrypt_spec: description: | The key name of the encryption spec for a volume type. in: path required: true type: string key_extra_spec: description: | The key name of the extra spec for a volume type. in: path required: true type: string key_path: description: | The metadata key name for the metadata that you want to remove. in: path required: true type: string key_update: description: | The metadata key name for the metadata that you want to update. in: path required: true type: string key_view: description: | The metadata key name for the metadata that you want to see. in: path required: true type: string list_volume: description: | Show volume ids in this group. Default is False. in: path required: false type: string min_version: 3.25 message_id: description: | The UUID of the message. in: path required: true type: string project_id_path: description: | The UUID of the project in a multi-tenancy cloud. in: path required: true type: string qos_id: description: | The ID of the QoS specification. in: path required: true type: string quota_class_name: description: The name of the quota class for which to set quotas. in: path required: true type: string quotas_project_id: description: | The UUID of the project in a multi-tenancy cloud. in: path required: true type: string snapshot_id_path: description: | The UUID of the snapshot. in: path required: true type: string spec_id: description: | The id (key) of the group specification. in: path required: true type: string transfer_id: description: | The unique identifier for a volume transfer. in: path required: true type: string volume_id_path: description: | The UUID of the volume. in: path required: true type: string volume_type_id: description: | The UUID for an existing volume type. in: path required: true type: string volume_type_name_or_id: description: | The name or UUID for an existing volume type. in: path required: true type: string # variables in query action: description: | The action. Valid values are "set" or "unset." in: query required: true type: string all-tenants: description: | Shows details for all project. Admin only. in: query required: false type: string bootable_query: description: | Filters results by bootable status. Default=None. in: query required: false type: boolean cascade: description: | Remove any snapshots along with the volume. Default=False. in: query required: false type: boolean cluster_active_backend_id_query: description: | Filter the cluster list result by the ID of the active backend. in: query required: false type: string cluster_binary_query: description: | Filter the cluster list result by binary name of the clustered services. One of ``cinder-api``, ``cinder-scheduler``, ``cinder-volume`` or ``cinder-backup``. in: query required: false type: string cluster_disabled_query: description: | Filter the cluster list result by status. in: query required: false type: boolean cluster_frozen_query: description: | Filter the cluster list result by whether it's frozen. in: query required: false type: boolean cluster_is_up_query: description: | Filter the cluster list result by state. in: query required: false type: boolean cluster_name_query: description: | Filter the cluster list result by cluster name. in: query required: false type: string cluster_num_down_hosts_query: description: | Filter the cluster list result by number of down hosts. in: query required: false type: integer cluster_num_hosts_query: description: | Filter the cluster list result by number of hosts. in: query required: false type: integer cluster_replication_status_query: description: | Filter the cluster list result by replication status. One of: ``enabled``, ``disabled``. in: query required: false type: string detail: description: | Indicates whether to show pool details or only pool names in the response. Set to ``true`` to show pool details. Set to ``false`` to show only pool names. Default is ``false``. in: query required: false type: boolean filter_consumes_quota: description: | Filters results by ``consumes_quota`` field. Resources that don't use quotas are usually temporary internal resources created to perform an operation. Default is to not filter by it. Filtering by this option may not be always possible in a cloud, see :ref:`List Resource Filters ` to determine whether this filter is available in your cloud. in: query required: false type: boolean min_version: 3.65 filter_created_at: description: | Filters reuslts by a time that resources are created at with time comparison operators: gt/gte/eq/neq/lt/lte. The date and time stamp format is ISO 8601: CCYY-MM-DDThh:mm:ss±hh:mm. The ±hh:mm value, if included, returns the time zone as an offset from UTC. in: query required: false type: string min_version: 3.60 filter_updated_at: description: | Filters reuslts by a time that resources are updated at with time comaprison operators: gt/gte/eq/neq/lt/lte. The date and time stamp format is ISO 8601: CCYY-MM-DDThh:mm:ss±hh:mm. The ±hh:mm value, if included, returns the time zone as an offset from UTC. in: query required: false type: string min_version: 3.60 force_del_qos: description: | To delete a QoS specification even if it is in- use, set to ``true``. Default is ``false``. in: query required: false type: boolean force_vol_del: description: | Indicates whether to force delete a volume even if the volume is in deleting or error_deleting. Default is ``false``. in: query required: false type: boolean min_version: 3.23 host_query: description: | Filter the service list result by host name of the service. in: query required: false type: string image-id: description: | Creates volume from image ID. Default=None. in: query required: false type: string is_public_volume_type_query: description: | Filter the volume type by public visibility. See :ref:`valid boolean values `. in: query required: false type: boolean limit: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer limit_group_snapshot: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer min_version: 3.29 limit_transfer: description: | Requests a page size of items. Returns a number of items up to a limit value. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer min_version: 3.59 marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string marker_group_snapshot: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string min_version: 3.29 marker_transfer: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string min_version: 3.59 metadata_query: description: | Filters results by a metadata key and value pair. Default=None. in: query required: true type: object migration_status_query: description: | Filters results by a migration status. Default=None. Admin only. in: query required: false name_volume: description: | Filters results by a name. Default=None. in: query required: false type: string offset: description: | Used in conjunction with ``limit`` to return a slice of items. ``offset`` is where to start in the list. in: query required: false type: integer offset_group_snapshot: description: | Used in conjunction with ``limit`` to return a slice of items. ``offset`` is where to start in the list. in: query required: false type: integer min_version: 3.29 offset_transfer: description: | Used in conjunction with ``limit`` to return a slice of items. ``offset`` is where to start in the list. in: query required: false type: integer min_version: 3.59 resource: description: | Filter for resource name. in: query required: false type: string service_binary_query: description: | Filter the service list result by binary name of the service. in: query required: false type: string sort: description: | Comma-separated list of sort keys and optional sort directions in the form of ``< key > [: < direction > ]``. A valid direction is ``asc`` (ascending) or ``desc`` (descending). in: query required: false type: string sort_dir: description: | Sorts by one or more sets of attribute and sort direction combinations. If you omit the sort direction in a set, default is ``desc``. Deprecated in favour of the combined ``sort`` parameter. in: query required: false type: string sort_dir_group_snapshot: description: | Sorts by one or more sets of attribute and sort direction combinations. If you omit the sort direction in a set, default is ``desc``. Deprecated in favour of the combined ``sort`` parameter. in: query required: false type: string min_version: 3.29 sort_dir_transfer: description: | Sorts by one or more sets of attribute and sort direction combinations. If you omit the sort direction in a set, default is ``desc``. Deprecated in favour of the combined ``sort`` parameter. in: query required: false type: string min_version: 3.59 sort_group_snapshot: description: | Comma-separated list of sort keys and optional sort directions in the form of ``< key > [: < direction > ]``. A valid direction is ``asc`` (ascending) or ``desc`` (descending). in: query required: false type: string min_version: 3.29 sort_key: description: | Sorts by an attribute. A valid value is ``name``, ``status``, ``container_format``, ``disk_format``, ``size``, ``id``, ``created_at``, or ``updated_at``. Default is ``created_at``. The API uses the natural sorting direction of the ``sort_key`` attribute value. Deprecated in favour of the combined ``sort`` parameter. in: query required: false type: string sort_key_group_snapshot: description: | Sorts by an attribute. A valid value is ``name``, ``status``, ``group_id``, ``group_type_id``, ``size``, ``id``, ``created_at``, or ``updated_at``. Default is ``created_at``. The API uses the natural sorting direction of the ``sort_key`` attribute value. Deprecated in favour of the combined ``sort`` parameter. in: query required: false type: string min_version: 3.29 sort_key_transfer: description: | Sorts by an attribute. Default is ``created_at``. The API uses the natural sorting direction of the ``sort_key`` attribute value. Deprecated in favour of the combined ``sort`` parameter. in: query required: false type: string min_version: 3.59 sort_transfer: description: | Comma-separated list of sort keys and optional sort directions in the form of ``< key > [: < direction > ]``. A valid direction is ``asc`` (ascending) or ``desc`` (descending). in: query required: false type: string min_version: 3.59 status_query: description: | Filters results by a status. Default=None. in: query required: false type: boolean usage: description: | Show project's quota usage information. Default is ``false``. in: query required: false type: boolean vol_type_id_query: description: | A volume type ID. in: query required: true type: string with_count: description: | Whether to show ``count`` in API response or not, default is ``False``. in: query required: false type: boolean min_version: 3.45 # variables in body absolute: description: | An ``absolute`` limits object. in: body required: true type: object accepted: description: | Records if this transfer was accepted or not. in: body required: false type: boolean min_version: 3.57 active_backend_id: description: | The ID of active storage backend. Only in ``cinder-volume`` service. in: body required: false type: string add_project_access: description: | Adds volume type access to a project. in: body required: true type: object add_volumes: description: | One or more volume UUIDs, separated by commas, to add to the volume group or consistency group. in: body required: false type: string alias: description: | The alias for the extension. For example, "FOXNSOX", "os- availability-zone", "os-extended-quotas", "os- share-unmanage" or "os-used-limits." in: body required: true type: string allow_attached_volume: description: | Whether to allow failover if any volumes are 'in-use'. See :ref:`valid boolean values ` in: body required: true type: boolean attach_mode: description: | The attach mode of attachment, acceptable values are read-only ('ro') and read-and-write ('rw'). in: body required: false type: string min_version: 3.54 attach_mode_required: description: | The attach mode of attachment, read-only ('ro') or read-and-write ('rw'), default is 'rw'. in: body required: true type: string attach_status: description: | The volume attach status. in: body required: false type: string attached_at: description: | The time when attachment is attached. in: body required: true type: string attachment: description: | An attachment object. in: body required: true type: object attachment_id: description: | The ID of the attachment. in: body required: false type: string attachment_id_required: description: | The ID of attachment. in: body required: true type: string attachments: description: | Instance attachment information. If this volume is attached to a server instance, the attachments list includes the UUID of the attached server, an attachment UUID, the name of the attached host, if any, the volume UUID, the device, and the device UUID. Otherwise, this list is empty. For example:: [ { 'server_id': '6c8cf6e0-4c8f-442f-9196-9679737feec6', 'attachment_id': '3dafcac4-1cb9-4b60-a227-d729baa10cf6', 'attached_at': '2019-09-30T19:30:34.000000', 'host_name': null, 'volume_id': '5d95d5ee-4bdd-4452-b9d7-d44ca10d3d53', 'device': '/dev/vda', 'id': '5d95d5ee-4bdd-4452-b9d7-d44ca10d3d53' } ] in: body required: true type: array auth_key: description: | The authentication key for the volume transfer. in: body required: true type: string availability_zone: description: | The name of the availability zone. in: body required: false type: string availability_zone_backup: description: | The backup availability zone key value pair. in: body required: false type: string min_version: 3.51 availability_zone_info: description: | The list of availability zone information. in: body required: true type: array availability_zone_required: description: | The availability zone name. in: body required: true type: string availability_zone_state: description: | The current state of the availability zone. in: body required: true type: object available: description: | Whether the availability zone is available for use. in: body required: true type: boolean backend_id: description: | ID of backend to failover to. Default is ``None``. in: body required: false type: string backend_id_target: description: | ID of failover target backend. in: body required: true type: string backend_state: description: | The state of storage backend. Only in ``cinder-volume`` service. in: body required: false type: string backup: description: | A ``backup`` object. in: body required: true type: object backup_gigabytes: description: | The size (GB) of backups that are allowed for each project. in: body required: true type: integer backup_gigabytes_usage: description: | The size (GB) usage information of backup for this project, including ``in_use``, ``limit`` and ``reserved`` attributes. in: body required: true type: object backup_id: description: | The UUID of the backup. in: body required: false type: string min_version: 3.47 backup_links_optional: description: | An array containing an object with the following fields: ``"rel"`` with the value ``"next"`` and ``href``, whose value is a link to the next page of backups. Only appears when there are more backups than are listed in the current response. in: body required: false type: array backup_record: description: | An object recording volume backup metadata, including ``backup_service`` and ``backup_url``. in: body required: true type: object backup_service: description: | The service used to perform the backup. in: body required: true type: string backup_url: description: | An identifier string to locate the backup. in: body required: true type: string backups: description: | A list of ``backup`` objects. in: body required: true type: array backups_number: description: | The number of backups that are allowed for each project. in: body required: true type: integer backups_number_usage: description: | The backup usage information for this project, including ``in_use``, ``limit`` and ``reserved`` attributes. in: body required: true type: object binary: description: | The binary name of the service. in: body required: false type: string binary_required: description: | The binary name of the service. in: body required: true type: string bootable: description: | Enables or disables the bootable attribute. You can boot an instance from a bootable volume. See :ref:`valid boolean values ` in: body required: false type: boolean bootable_required: description: | Enables or disables the bootable attribute. You can boot an instance from a bootable volume. See :ref:`valid boolean values ` in: body required: true type: boolean bootable_response: description: | Enables or disables the bootable attribute. You can boot an instance from a bootable volume. in: body required: true type: string capabilities: description: | The capabilities for the back end. The value is either ``null`` or a string value that indicates the capabilities for each pool. For example, ``total_capacity_gb`` or ``QoS_support``. in: body required: true type: object cgsnapshot: description: | A consistency group snapshot object. in: body required: true type: object cgsnapshot_id: description: | The UUID of the consistency group snapshot. in: body required: false type: string cgsnapshots: description: | A collection of ``cgsnapshot`` objects. in: body required: true type: object cinder_id: description: | The UUID of the resource in Cinder. in: body required: true type: string cipher: description: | The encryption algorithm or mode. For example, aes-xts-plain64. The default value is None. in: body required: false type: string cluster: description: | A cluster object. in: body required: true type: object cluster_active_backend_id: description: | The ID of active storage backend. Only in ``cinder-volume`` service. in: body required: false type: string min_version: 3.26 cluster_binary: description: | The binary name of the services in the cluster. in: body required: false type: string cluster_binary_resp: description: | The binary name of the services in the cluster. in: body required: true type: string cluster_cvol: description: | The cluster name. Only in ``cinder-volume`` service. in: body required: false type: string min_version: 3.7 cluster_frozen: description: | Whether the cluster is frozen or not. in: body required: false type: boolean min_version: 3.26 cluster_last_heartbeat: description: | The last periodic heartbeat received. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. in: body required: true type: string cluster_mutex: description: | The OpenStack Block Storage cluster where the resource resides. Optional only if host field is provided. in: body required: false type: string cluster_name_required: description: | The name to identify the service cluster. in: body required: true type: string cluster_name_resp: description: | The name of the service cluster. in: body required: true type: string cluster_num_down_hosts: description: | The number of down hosts in the cluster. in: body required: true type: integer cluster_num_hosts: description: | The number of hosts in the cluster. in: body required: true type: integer cluster_replication_status: description: | The cluster replication status. Only included in responses if configured. One of: ``enabled`` or ``disabled``. in: body required: false type: string cluster_state: description: | The state of the cluster. One of ``up`` or ``down``. in: body required: true type: string cluster_status: description: | The status of the cluster. One of ``enabled`` or ``disabled``. in: body required: true type: string clusters: description: | A list of cluster objects. in: body required: true type: array connection_info: description: | The connection info used for server to connect the volume. in: body required: true type: object connector: description: | The ``connector`` object. in: body required: false type: object connector_required: description: | The ``connector`` object. The internal structure of connector depends on the volume driver implementation. For details about the required elements in the structure, see the documentation for the volume driver. in: body required: true type: object consistencygroup: description: | A consistency group. in: body required: true type: object consistencygroup-from-src: description: | The consistency group from source object. in: body required: true type: object consistencygroup_id: description: | The UUID of the consistency group. in: body required: false type: string consistencygroup_id_required: description: | The UUID of the consistency group. in: body required: true type: string consistencygroups: description: | A list of consistency groups. in: body required: true type: array consumer: description: | The consumer type. in: body required: false type: string consumes_quota: description: | Whether this resource consumes quota or not. Resources that not counted for quota usage are usually temporary internal resources created to perform an operation. in: body required: false type: boolean min_version: 3.65 container: description: | The container name or null. in: body required: false type: string container_format: description: | Container format for the new image. Default is bare. in: body required: false type: string container_format_upload: description: | Container format for the new image. Default is bare. (Note: Volumes of an encrypted volume type must use a bare container format.) in: body required: false type: string control_location: description: | Notional service where encryption is performed. Valid values are "front-end" or "back-end". The default value is "front-end". in: body required: false type: string count: description: | The total count of requested resource before pagination is applied. in: body required: false type: integer min_version: 3.45 create-from-src: description: | The create from source action. in: body required: true type: object created_at: description: | The date and time when the resource was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. in: body required: true type: string data_timestamp: description: | The time when the data on the volume was first saved. If it is a backup from volume, it will be the same as ``created_at`` for a backup. If it is a backup from a snapshot, it will be the same as ``created_at`` for the snapshot. in: body required: true type: string delete: description: | The delete action. in: body required: true type: object delete-volumes: description: | If set to ``true``, allows deletion of a group as well as all volumes in the group. See :ref:`valid boolean values ` in: body required: false type: boolean deleted: description: | The resource is deleted or not. in: body required: true type: boolean deleted_at: description: | The date and time when the resource was deleted. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. If the ``deleted_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string dependents: description: | Show the dependencies cluster. in: body requered: false type: string description: description: | The backup description or null. in: body required: false type: string description_cap: description: | The capabilities description. in: body required: true type: string description_cg: description: | The consistency group description. in: body required: true type: string description_cg_snapshot_false: description: | The consistency group snapshot description. in: body required: false type: string description_cg_snapshot_true: description: | The consistency group snapshot description. in: body required: true type: string description_consis: description: | The consistency group description. in: body required: false type: string description_extn: description: | The extension description. in: body required: true type: string description_group_false: description: | The group description. in: body required: false type: string description_group_snap: description: | The group snapshot description. in: body required: false type: string description_group_snap_req: description: | The group snapshot description. in: body required: true type: string description_group_true: description: | The group description. in: body required: true type: string description_group_type_optional: description: | The group type description. in: body required: false type: string description_group_type_required: description: | The group type description. in: body required: true type: string description_snap: description: | A description for the snapshot. Default is ``None``. in: body required: false type: string description_snap_req: description: | A description for the snapshot. in: body required: true type: string description_vol: description: | The volume description. in: body required: false type: string description_vol_req: description: | The volume description. in: body required: true type: string description_volume_type_optional: description: | The volume type description. in: body required: false type: string description_volume_type_required: description: | The volume type description. in: body required: true type: string desired_capacity: description: | Show and set the desired capacity of cluster. in: body requered: true type: string min_version: 3.7 destination_project_id: description: | Records the destination project_id after volume transfer. in: body required: false type: string min_version: 3.57 detached_at: description: | The time when attachment is detached. in: body required: true type: string disabled: description: | Filter by disabled status. See :ref:`valid boolean values ` in: body required: false type: boolean disabled_reason_body: description: | The reason for disabling a resource. in: body required: false type: string disabled_reason_body_req: description: | The reason for disabling a resource. in: body required: true type: string disabled_required: description: | The service is disabled or not. in: body required: true type: boolean disk_format: description: | Disk format for the new image. Default is raw. in: body required: false type: string disk_format_upload: description: | Disk format for the new image. Default is raw. (Note: volumes of an encrypted volume type can only be uploaded in raw format.) in: body required: false type: string display_name: description: | The name of volume backend capabilities. in: body required: true type: string domain: description: | show de domain in use for the cluster. in: body required: true type: string driver_version: description: | The driver version. in: body required: true type: string encrypted: description: | If true, this volume is encrypted. in: body required: true type: boolean encryption: description: | The encryption information. in: body required: true type: object encryption_id_body: description: | The UUID of the encryption. in: body required: true type: string encryption_key_id: description: | The UUID of the encryption key. Only included for encrypted volumes. in: body required: false type: string min_version: 3.64 event_id: description: | The id of the event to this message, this id could eventually be translated into ``user_message``. in: body required: true type: string extend_completion_error: description: | Used to indicate that the extend operation has failed outside of cinder. in: body required: false type: boolean extra_info: description: | More information about the resource. in: body required: true type: string extra_specs_volume_type_optional: description: | A key and value pair that contains additional specifications that are associated with the volume type. Examples include capabilities, capacity, compression, and so on, depending on the storage driver in use. in: body required: false type: object extra_specs_volume_type_required: description: | A set of key and value pairs that contains the specifications for a volume type. in: body required: true type: object fail_reason: description: | If the backup failed, the reason for the failure. Otherwise, null. in: body required: true type: string force: description: | Indicates whether to backup, even if the volume is attached. Default is ``false``. See :ref:`valid boolean values ` in: body required: false type: boolean force_snapshot: description: | Indicates whether to snapshot, even if the volume is attached. Default is ``false``. See :ref:`valid boolean values ` in: body required: false type: boolean force_upload_vol: description: | Enables or disables upload of a volume that is attached to an instance. Default=False. See :ref:`valid boolean values ` in: body required: false type: boolean free_capacity: description: | The amount of free capacity for the back-end volume, in GBs. A valid value is a string, such as ``unknown``, or a number (integer or floating point). in: body required: true type: string frozen: description: | The host is frozen or not. Only in ``cinder-volume`` service. in: body required: false type: boolean gigabytes: description: | The size (GB) of volumes and snapshots that are allowed for each project. in: body required: true type: integer gigabytes_for_type: description: | The size (GB) of volumes and snapshots that are allowed for each project and the specified volume type. in: body required: true type: integer gigabytes_for_type_usage: description: | The size (GB) usage information of volumes and snapshots for this project and this volume type, including ``in_use``, ``limit`` and ``reserved`` attributes. in: body required: true type: object gigabytes_usage: description: | The size (GB) usage information of volumes and snapshots for this project, including ``in_use``, ``limit`` and ``reserved`` attributes. in: body required: true type: object group: description: | A group object. in: body required: true type: object group_id: description: | The ID of the group. in: body required: true type: string group_id_optional: description: | The ID of the group. in: body required: false type: string min_version: 3.13 group_name: description: | The group name. in: body required: true type: string group_replication_status: description: | The group replication status. in: body required: false type: string min_version: 3.38 group_snapshot: description: | The group snapshot. in: body required: true type: object group_snapshot_id: description: | The ID of the group snapshot. in: body required: false type: string group_snapshot_id_3_14: description: | The ID of the group snapshot. in: body required: true type: string min_version: 3.14 group_snapshot_id_req: description: | The ID of the group snapshot. in: body required: true type: string group_snapshots: description: | A collection of group snapshots. in: body required: true type: array group_specs: description: | A set of key and value pairs that contains the specifications for a group type. in: body required: false type: object group_specs_req: description: | A set of key and value pairs that contains the specifications for a group type. in: body required: true type: object group_type: description: | A ``group_type`` object. in: body required: true type: object group_type_id: description: | The group type ID. in: body required: true type: string group_types: description: | The list of group types. in: body required: true type: array groups: description: | A collections of groups. in: body required: true type: array groups_number: description: | The number of groups that are allowed for each project. in: body required: true type: integer groups_number_usage: description: | The group usage information for this project, including ``in_use``, ``limit`` and ``reserved`` attributes. in: body required: true type: object guaranteed_until: description: | The expire time of the message, this message could be deleted after this time. in: body required: false type: string has_dependent_backups: description: | If this value is ``true``, there are other backups depending on this backup. in: body required: false type: boolean host: description: | The OpenStack Block Storage host where the existing volume resides. in: body required: true type: object host_mutex: description: | The OpenStack Block Storage host where the existing resource resides. Optional only if cluster field is provided. in: body required: false type: string host_name: description: | The name of the attaching host. in: body required: false type: string host_name_backend: description: | The name of the host that hosts the storage backend, may take the format of ``host@backend``. in: body required: true type: string host_name_body: description: | The name of the host. in: body required: false type: string host_name_body_req: description: | The name of the host. in: body required: true type: string host_service: description: | The name of the service which is running on the host. in: body required: true type: string host_service_status: description: | The status of the service. One of ``available`` or ``unavailable``. in: body required: true type: string hosts: description: | A OpenStack Block Storage host. in: body required: true type: object id: description: | The UUID of the object. in: body required: true type: string id_backup: description: | The UUID of the backup. in: body required: true type: string id_message: description: | The UUID for the message. in: body required: true type: string id_qos_spec: description: | The generated ID for the QoS specification. in: body required: true type: string id_snap: description: | The snapshot UUID. in: body required: true type: string id_vol: description: | The UUID of the volume. in: body required: true type: string image_id: description: | The uuid for the new image. in: body required: true type: string image_name: description: | The name for the new image. in: body required: true type: string imageRef: description: | The UUID of the image from which you want to create the volume. Required to create a bootable volume. **New in version 3.46**: Instead of directly consuming a zero-byte image that has been created by the Compute service when an instance snapshot was requested, the Block Storage service will use the ``snapshot_id`` contained in the ``block_device_mapping`` image property to locate the volume snapshot, and will use that to create the volume instead. in: body required: false type: string incremental: description: | The backup mode. A valid value is ``true`` for incremental backup mode or ``false`` for full backup mode. Default is ``false``. See :ref:`valid boolean values ` in: body required: false type: boolean instance_uuid: description: | The UUID of the attaching instance. in: body required: false type: string instance_uuid_req: description: | The UUID of the attaching instance. in: body required: true type: string is_incremental: description: | Indicates whether the backup mode is incremental. If this value is ``true``, the backup mode is incremental. If this value is ``false``, the backup mode is full. in: body required: false type: boolean is_public_group_type_optional: description: | Whether the group type is publicly visible. See :ref:`valid boolean values ` in: body required: false type: boolean is_public_group_type_required: description: | Whether the group type is publicly visible. in: body required: true type: boolean is_public_volume_type_optional: description: | Whether the volume type is publicly visible. See :ref:`valid boolean values ` in: body required: false type: boolean is_public_volume_type_required: description: | Whether the volume type is publicly visible. in: body required: true type: boolean is_up: description: | Filter by up/down status. See :ref:`valid boolean values ` in: body required: false type: boolean key: description: | The metadata key name for the metadata that you want to remove. in: body required: true type: string key_size: description: | Size of encryption key, in bits. This is usually 256. The default value is None. in: body required: false type: integer keys: description: | List of Keys. in: body required: true type: array last_heartbeat: description: | Find the operational latency between this server/cluster and the other members of the replica set. in: body required: false type: string levels: description: | The current log level that queried. in: body required: true type: object levels_set: description: | The log level to set, case insensitive, accepted values are ``INFO``, ``WARNING``, ``ERROR`` and ``DEBUG``. in: body required: true type: string limit_usage: description: | The limit data size. Visible only if you set the ``usage=true`` query parameter. in: body required: false type: integer limits: description: | A list of ``limit`` objects. in: body required: true type: object links: description: | Links for the volume transfer. in: body required: true type: array links_backup: description: | Links for the backup. in: body required: true type: array links_message: description: | Links for the message. in: body required: false type: array links_qos: description: | The QoS specification links. in: body required: true type: array links_res: description: | Links to the resources in question. in: body required: true type: array links_snap: description: | Links for the snapshot. in: body required: false type: array links_vol: description: | The volume links. in: body required: true type: array links_vol_optional: description: | The volume links. in: body required: false type: array location: description: | Full URL to a service or server. format: uri in: body required: true type: string log_levels: description: | The list of log levels. in: body required: true type: array manageable-snapshots: description: | A list of manageable snapshots. in: body required: true type: list manageable-volumes: description: | A list of manageable volumes. in: body required: true type: list max_size: description: | The maximum total size for the cluster. in: body required: true type: integer maxTotalBackupGigabytes: description: | The maximum total amount of backups, in gibibytes (GiB). in: body required: true type: integer maxTotalBackups: description: | The maximum number of backups. in: body required: true type: integer maxTotalGroups: description: | The maximum number of groups. in: body required: true type: integer maxTotalSnapshots: description: | The maximum number of snapshots. in: body required: true type: integer maxTotalSnapshotsOptional: description: | The maximum number of snapshots. in: body required: false type: integer maxTotalVolumeGigabytes: description: | The maximum total amount of volumes, in gibibytes (GiB). in: body required: true type: integer maxTotalVolumeGigabytesOptional: description: | The maximum total amount of volumes, in gibibytes (GiB). in: body required: true type: integer maxTotalVolumes: description: | The maximum number of volumes. in: body required: true type: integer maxTotalVolumesOptional: description: | The maximum number of volumes. in: body required: false type: integer media_types: description: | The `media types `_. It is an array of a fixed dict. .. note:: It is vestigial and provide no useful information. It will be deprecated and removed in the future. in: body required: true type: array message_level: description: | The level of the message, possible value is only 'ERROR' now. in: body required: true type: string meta: description: | The metadata key and value pair for the volume. in: body required: true type: object meta_snap: description: | The metadata key and value pair for the snapshot. in: body required: true type: object metadata: description: | One or more metadata key and value pairs for the snapshot, if any. in: body required: true type: object metadata_backup: description: | The backup metadata key value pairs. in: body required: false type: object min_version: 3.43 metadata_image: description: | The image metadata to add to the volume as a set of metadata key and value pairs. in: body required: true type: object metadata_snap: description: | One or more metadata key and value pairs for the snapshot. in: body required: false type: object metadata_vol: description: | One or more metadata key and value pairs to be associated with the new volume. in: body required: false type: object metadata_vol_assoc: description: | One or more metadata key and value pairs that are associated with the volume. in: body required: false type: object metadata_vol_assoc_req: description: | One or more metadata key and value pairs that are associated with the volume. in: body required: true type: object metadata_vol_obj: description: | A ``metadata`` object. Contains one or more metadata key and value pairs that are associated with the volume. in: body required: true type: object migrate_cluster: description: | The target cluster for the volume migration. Cluster format is ``cluster@backend``. Starting with microversion 3.16, either ``cluster`` or ``host`` must be specified. If ``host`` is specified and is part of a cluster, the cluster is used as the target for the migration. in: body required: false type: string min_version: 3.16 migrate_force_host_copy: description: | If false (the default), rely on the volume backend driver to perform the migration, which might be optimized. If true, or the volume driver fails to migrate the volume itself, a generic host-based migration is performed. in: body required: false type: boolean migrate_host: description: | The target host for the volume migration. Host format is ``host@backend``. Required before microversion 3.16. in: body required: false type: string migrate_lock_volume: description: | If true, migrating an ``available`` volume will change its status to ``maintenance`` preventing other operations from being performed on the volume such as attach, detach, retype, etc. in: body required: false type: boolean migration_completion_error: description: | Used to indicate if an error has occured elsewhere that requires clean up. in: body required: false type: boolean # NOTE(mriedem): We can update the migration_policy retype note about encrypted # in-use volumes not being supported once # https://bugzilla.redhat.com/show_bug.cgi?id=760547 is fixed. migration_policy: description: | Specify if the volume should be migrated when it is re-typed. Possible values are ``on-demand`` or ``never``. If not specified, the default is ``never``. .. note:: If the volume is attached to a server instance and will be migrated, then by default policy only users with the administrative role should attempt the retype operation. A retype which involves a migration to a new host for an *in-use* encrypted volume is not supported. in: body required: false type: string migration_status: description: | The volume migration status. Admin only. in: body required: false type: string min_size: description: | The maximum total size for the cluster. in: body required: true type: integer mountpoint: description: | The attaching mount point. in: body required: true type: string multiattach: description: | Enable creating multiattach volumes in: body required: false type: string multiattach_resp: description: | If true, this volume can attach to more than one instance. in: body required: true type: boolean name: description: | The name of the object. in: body required: false type: string name_backend_pool: description: | The name of the backend pool. in: body required: true type: string name_backup: description: | The backup name. in: body required: true type: string name_cgsnap: description: | The consistency group snapshot name. in: body required: true type: string name_consis: description: | The consistency group name. in: body required: false type: string name_group: description: | The group name. in: body required: false type: string name_group_snap: description: | The group snapshot name. in: body required: false type: string name_group_snap_req: description: | The group snapshot name. in: body required: true type: string name_group_type: description: | The group type name. in: body required: true type: string name_optional: description: | The name of the Volume Backup. in: body required: false type: string name_qos_spec: description: | The name of the QoS specification. in: body required: true type: string name_snap: description: | The name of the snapshot. Default is ``None``. in: body required: false type: string name_snap_req: description: | The name of the snapshot. in: body required: true type: string name_vol: description: | The volume name. in: body required: true type: string name_volume_type_optional: description: | The name of the volume type. in: body required: false type: string name_volume_type_required: description: | The name of the volume type. in: body required: true type: string namespace: description: | Link associated to the extension. in: body required: false type: string namespace_storage: description: | The storage namespace, such as ``OS::Storage::Capabilities::foo``. in: body required: true type: string new_size: description: | The new size of the volume, in gibibytes (GiB). in: body required: true type: integer new_type: description: | The new volume type that volume is changed with. in: body required: true type: string new_volume: description: | The UUID of the new volume. in: body required: true type: string no_snapshots: description: | Transfer volume without snapshots. Defaults to False if not specified. in: body required: false min_version: 3.55 type: boolean nodes: description: | A list of the UUIDs of node objects which are members of the current cluster. in: body required: false type: string num_down_hosts: description: | A list of the hosts innoperates in: body required: false type: integer num_hosts: description: | List of all hosts. in: body required: false type: integer object_count: description: | The number of objects in the backup. in: body required: true type: integer os-attach: description: | The ``os-attach`` action. in: body required: true type: object os-backup-project-attr:project_id: description: | The UUID of the owning project. in: body required: true type: string min_version: 3.18 os-begin_detaching: description: | The ``os-begin_detaching`` action. in: body required: true type: object os-detach: description: | The ``os-detach`` action. in: body required: true type: object os-ext-snap-attr:progress: description: | A percentage value for the build progress. in: body required: true type: string os-ext-snap-attr:project_id: description: | The UUID of the owning project. in: body required: true type: string os-extend: description: | The ``os-extend`` action. in: body required: true type: object os-extend_volume_completion: description: | The ``os-extend_volume_completion`` action. in: body required: true type: object os-force_delete: description: | The ``os-force_delete`` action. in: body required: true type: string os-force_detach: description: | The ``os-force_detach`` action. in: body required: true type: object os-initialize_connection: description: | The ``os-initialize_connection`` action. in: body required: true type: object os-migrate_volume: description: | The ``os-migrate_volume`` action. in: body required: true type: object os-migrate_volume_completion: description: | The ``os-migrate_volume_completion`` action. in: body required: true type: object os-reimage: description: | The ``os-reimage`` action. in: body required: true type: object min_version: 3.68 os-reserve: description: | The ``os-reserve`` action. in: body required: true type: object os-reset_status: description: | The ``os-reset_status`` action. in: body required: true type: object os-retype: description: | The ``os-retype`` action. in: body required: true type: object os-roll_detaching: description: | The ``os-roll_detaching`` action. in: body required: true type: object OS-SCH-HNT:scheduler_hints: description: | The dictionary of data to send to the scheduler. in: body required: false type: object os-set_bootable: description: | The ``os-set_bootable`` action. in: body required: true type: object os-set_image_metadata: description: | The ``os-set_image_metadata`` action. in: body required: true type: object os-show_image_metadata: description: | The ``os-show_image_metadata`` action. in: body require: true type: object os-terminate_connection: description: | The ``os-terminate_connection`` action. in: body require: true type: object os-unmanage: description: | The ``os-unmanage`` action. This action removes the specified volume from Cinder management. in: body required: true type: object os-unreserve: description: | The ``os-unreserve`` action. in: body required: true type: object os-unset_image_metadata: description: | The ``os-unset_image_metadata`` action. This action removes the key-value pairs from the image metadata. in: body required: true type: object os-update_readonly_flag: description: | The ``os-update_readonly_flag`` action. This action enables or disables update of volume to read-only access mode. in: body required: true type: object os-update_snapshot_status: description: | The ``os-update_snapshot_status`` action. in: body required: true type: object os-vol-host-attr:host: description: | Current back-end of the volume. Host format is ``host@backend#pool``. in: body required: false type: string os-vol-mig-status-attr:migstat: description: | The status of this volume migration (None means that a migration is not currently in progress). in: body required: false type: string os-vol-mig-status-attr:name_id: description: | The volume ID that this volume name on the back- end is based on. in: body required: false type: string os-vol-tenant-attr:tenant_id: description: | The project ID which the volume belongs to. in: body required: true type: string os-volume_upload_image: description: | The ``os-volume_upload_image`` action. This action uploads the specified volume to image service. in: body required: true type: object per_volume_gigabytes: description: | The size (GB) of volumes that are allowed for each volume. in: body required: true type: integer per_volume_gigabytes_usage: description: | The size (GB) usage information for each volume, including ``in_use``, ``limit`` and ``reserved`` attributes. in: body required: true type: object policies: description: | A list of UUIDs of the policies attached to current cluster. in: body required: false type: string pool_name: description: | The name of the storage pool. in: body required: true type: string pools: description: | List of storage pools. in: body required: true type: array prefix: description: | The prefix for the log path we are querying, for example ``cinder.`` or ``sqlalchemy.engine``. When not present or the empty string is passed all log levels will be retrieved. in: body required: false type: string profile_id: description: | The UUID of the profile. in: body required: false type: string min_version: 3.7 profile_name: description: | The name of a profile object. The name must start with an ASCII letter and can contain ASCII letters, digits, underscores, periods, and hyphens and its length must be less than 255 in: body required: true type: string project: description: | The ID of the project. Volume Type access to be added to this project ID. in: body required: true type: string project_id: description: | The UUID of the project. in: body required: true type: string project_id_group: description: | The UUID of the volume group project. in: body required: false type: string min_version: 3.58 project_id_group_snapshot: description: | The UUID of the volume group snapshot project. in: body required: false type: string min_version: 3.58 project_id_host: description: | The Project ID which the host resource belongs to. In the summary resource, the value is ``(total)``. in: body required: true type: string properties: description: | The backend volume capabilities list, which is consisted of cinder standard capabilities and vendor unique properties. in: body required: true type: object protected: description: | Whether the new image is protected. Default=False. See :ref:`valid boolean values ` in: body required: false type: boolean min_version: 3.1 provider: # required response parameter (get/create) description: | The class that provides encryption support. in: body required: true type: string provider_id: description: | The provider ID for the volume. The value is either a string set by the driver or ``null`` if the driver doesn't use the field or if it hasn't created it yet. Only returned for administrators. in: body required: false type: string min_version: 3.21 provider_optional: # optional response parameter (update) description: | The class that provides encryption support. in: body required: false type: string provider_req: # required request parameter (create) description: | The class that provides encryption support. Choices are: * luks - relies on Linux Unified Key Setup (recommended) * plain - relies on dm-crypt in: body required: true type: string provider_req_optional: # optional request parameter (update) description: | The class that provides encryption support. Choices are: * luks - relies on Linux Unified Key Setup (recommended) * plain - relies on dm-crypt in: body required: false type: string qos_association_id: description: | The Qos association ID. in: body required: true type: string qos_association_name: description: | The QoS association name. in: body required: true type: string qos_association_type: description: | The QoS association type. in: body required: true type: string qos_associations: description: | A collection of ``QoS associations``. in: body required: true type: array qos_set_id: description: | The QoS set ID. in: body required: true type: string qos_specs: description: | A ``qos_specs`` object. in: body required: true type: object qos_specs_id: description: | The QoS specifications ID. in: body required: false type: string QoS_support: description: | The quality of service (QoS) support. in: body required: true type: boolean quota_class_id: description: The name of the quota class set. in: body required: true type: string quota_class_set: description: | A ``quota_class_set`` object. in: body required: true type: object quota_set: description: | A ``quota_set`` object. in: body required: true type: object rate: description: | Rate-limit volume copy bandwidth, used to mitigate slow down of data access from the instances. in: body required: true type: array readonly: description: | Enables or disables read-only access mode. This value can be True, true, False, false. in: body required: true type: boolean reason_not_safe: description: | The reason why the resource can't be managed. in: body required: true type: string ref: description: | A reference to the existing volume. The internal structure of this reference depends on the volume driver implementation. For details about the required elements in the structure, see the documentation for the volume driver. in: body required: true type: object reference: description: | Some information for the resource. in: body required: true type: object reimage_reserved: description: | Normally, volumes to be re-imaged are in ``available`` or ``error`` status. When ``true``, this parameter will allow a volume in the ``reserved`` status to be re-imaged. The ability to re-image a volume in ``reserved`` status may be restricted to administrators in some clouds. Default value is ``false``. in: body required: false type: boolean remove_project_access: description: | Removes volume type access from a project. in: body required: true type: object remove_volumes: description: | One or more volume UUIDs, separated by commas, to remove from the volume group or consistency group. in: body required: false type: string replication_status: description: | The volume replication status. in: body required: true type: string replication_status_cvol: description: | The volume service replication status. Only in ``cinder-volume`` service. in: body required: false type: string replication_targets: description: | A list of volume backends used to replicate volumes on this backend. in: body required: true type: list replication_targets_unique_key: description: | Vendor specific key-values. Only returned if administrator. in: body type: string request_id: description: | The id of the request during which the message was created. in: body required: true type: string reserved_percentage: description: | The percentage of the total capacity that is reserved for the internal use by the back end. in: body required: true type: integer reset_status: description: | The ``reset_status`` action. in: body required: true type: object resource_fil: description: | Resource which the filters will be applied to. in: body required: true type: string resource_filters: description: | The resource filter array. in: body required: true type: array resource_filters_coll: description: | A collection of resource filters. in: body required: true type: array resource_id: description: | The UUID of a resource to cleanup. in: body required: false type: string resource_type: description: | The resource type corresponding to ``resource_uuid``. in: body required: false type: string resource_uuid: description: | The UUID of the resource during whose operation the message was created. in: body required: false type: string restore: description: | A ``restore`` object. in: body required: true type: object revert: description: | The ``revert`` action. in: body required: true type: object safe_to_manage: description: | If the resource can be managed or not. in: body required: true type: boolean security_group_rules: description: | The number of rules that are allowed for each security group. in: body required: false type: integer security_groups: description: | The number of security groups that are allowed for each project. in: body required: true type: integer service_id: description: | UUID for the cleanup service. in: body required: false type: integer service_key: description: | The service name. Deprecated. Keeping service key for API compatibility. in: body required: true type: string service_state: description: | The state of the service. One of ``enabled`` or ``disabled``. in: body required: true type: string service_state_up_down: description: | The state of the service. One of ``up`` or ``down``. in: body required: true type: string service_status: description: | The status of the service. One of ``enabled`` or ``disabled``. in: body required: true type: string service_uuid: description: | A unique identifier that's used to indicate what node the volume-service for a particular volume is being serviced by. in: body required: true type: string min_version: 3.48 services: description: | A list of service objects. in: body required: true type: array shared_targets: description: | An indicator whether the back-end hosting the volume utilizes shared_targets or not. Default=True. in: body required: true type: boolean min_version: 3.48 max_version: 3.68 shared_targets_tristate: description: | An indicator whether the host connecting the volume should lock for the whole attach/detach process or not. ``true`` means only is iSCSI initiator running on host doesn't support manual scans, ``false`` means never use locks, and ``null`` means to always use locks. Look at os-brick's ``guard_connection`` context manager. Default=True. in: body required: true type: boolean min_version: 3.69 size: description: | The size of the volume, in gibibytes (GiB). in: body required: true type: integer snapshot: description: | A partial representation of a snapshot used in the creation process. in: body required: true type: string snapshot_id: description: | To create a volume from an existing snapshot, specify the UUID of the volume snapshot. The volume is created in same availability zone and with same size as the snapshot. in: body required: false type: string snapshot_id_backup: description: | The UUID of the source snapshot that you want to back up. in: body required: false type: string snapshot_id_revert: description: | The UUID of the snapshot. The API reverts the volume with this snapshot. in: body required: true type: string snapshot_id_source_vol: description: | The UUID of the source volume snapshot. in: body required: false type: string snapshot_name: description: | The name of the snapshot. in: body required: false type: string snapshot_obj: description: | A ``snapshot`` object. in: body required: true type: object snapshot_progress: description: | A percentage value for snapshot build progress. in: body required: false type: string snapshots_number: description: | The number of snapshots that are allowed for each project. in: body required: true type: integer snapshots_number_for_type: description: | The number of snapshots that are allowed for each project and the specified volume type. in: body required: true type: integer snapshots_number_for_type_usage: description: | The snapshot usage information for this project and this volume type, including ``in_use``, ``limit`` and ``reserved`` attributes. in: body required: true type: object snapshots_number_usage: description: | The snapshot usage information for this project, including ``in_use``, ``limit`` and ``reserved`` attributes. in: body required: true type: object source-name: description: | The resource's name. in: body required: true type: string source_cgid: description: | The UUID of the source consistency group. in: body required: false type: string source_group_id: description: | The UUID of the source group. in: body required: false type: string source_group_id_req: description: | The UUID of the source group. in: body required: true type: string source_project_id: description: | Records the source project_id before volume transfer. in: body required: false type: string min_version: 3.57 source_reference: description: | The snapshot's origin volume information. in: body required: true type: object source_volid: description: | The UUID of the source volume. The API creates a new volume with the same size as the source volume unless a larger size is requested. in: body required: false type: string spec_value: description: | The value of the group specification corresponding to the specified key. in: body required: true type: string specs: description: | A ``specs`` object. in: body required: true type: object state: description: | The ''state'' of the cluster. One for "up" or "down". in: body required: true type: string status: description: | The ``status`` of the consistency group snapshot. in: body required: false type: string status_attachment: description: | The status of the attachment. in: body required: true type: string status_backup: description: | The backup status. Refer to Backup statuses table for the possible status value. in: body required: true type: string status_backup_action: description: | The status for the backup. in: body required: true type: string status_consis: description: | The status of the consistency group. in: body required: true type: string status_group: description: | The status of the generic group. in: body required: true type: string status_group_snap: description: | The ``status`` of the generic group snapshot. in: body required: true type: string status_reason: description: | The string representation of the reason why the object has transited to its current status. in: body required: false type: string status_snap: description: | The status for the snapshot. in: body required: true type: string status_vol: description: | The volume status. in: body required: true type: string storage_protocol: description: | The storage back end for the back-end volume. For example, ``iSCSI`` or ``FC``. in: body required: true type: string summary_metadata: description: | The dictionary of lists contains all the volumes' metadata, classified by metadata key. in: body required: true type: object min_version: 3.36 timeout: descripition: | The default timeout value (in seconds) of cluster operations. in: body required: false type: integer min_version: 3.7 total_capacity: description: | The total capacity for the back-end volume, in GBs. A valid value is a string, such as ``unknown``, or a number (integer or floating point). in: body required: true type: string total_count: description: | Total number of volumes. in: body required: true type: string total_count_int: description: | Total number of volumes. in: body required: true type: integer total_size: description: | Total size of volumes in GB. in: body required: true type: integer totalBackupGigabytesUsed: description: | The total number of backups gibibytes (GiB) used. in: body required: true type: integer totalBackupsUsed: description: | The total number of backups used. in: body required: true type: integer totalGigabytesUsed: description: | The total number of gibibytes (GiB) used. in: body required: true type: integer totalGigabytesUsedStr: description: | The total number of gibibytes (GiB) used. in: body required: true type: string totalSnapGigabytesUsed: description: | The total number of gibibytes (GiB) used by snapshots. in: body required: true type: string totalSnapshotsUsed: description: | The total number of snapshots used. in: body required: true type: string totalSnapshotsUsed_int: description: | The total number of snapshots used. in: body required: true type: integer totalVolumesUsed: description: | The total number of volumes used. in: body required: true type: integer transfer: description: | The volume transfer object. in: body required: true type: object transfer_name: description: | The name of the volume transfer. in: body required: true type: string transfer_obj_id: description: | The UUID of the volume transfer. in: body required: true type: string transfers: description: | List of transfer details. in: body required: true type: array updated: description: | The date and time stamp when the extension was last updated. in: body required: true type: string updated_at: description: | The date and time when the resource was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. In the previous example, the offset value is ``-05:00``. If the ``updated_at`` date and time stamp is not set, its value is ``null``. in: body required: true type: string user_id: description: | The UUID of the user. in: body required: true type: string user_id_backup: description: | The UUID of the project owner. in: body required: true type: string min_version: 3.56 user_id_min: description: | The UUID of the user. in: body required: true type: string min_version: 3.41 user_message: description: | The translated readable message corresponding to ``event_id``. in: body required: true type: string user_messages: description: | A collection of user messages. in: body required: true type: string vendor_name: description: | The name of the vendor. in: body required: true type: string version_id: in: body required: true description: | A common name for the version in question. Informative only, it has no real semantic meaning. type: string version_max: in: body required: true description: | If this version of the API supports microversions, the maximum microversion that is supported. This will be the empty string if microversions are not supported. type: string version_min: in: body required: true description: | If this version of the API supports microversions, the minimum microversion that is supported. This will be the empty string if microversions are not supported. type: string version_status: in: body required: true description: | The status of this API version. This can be one of: - ``CURRENT``: this is the preferred version of the API to use - ``DEPRECATED``: a deprecated version of the API that is slated for removal type: string version_updated: description: | This is a fixed string that API version updates. in: body required: true type: string visibility: description: | The volume type access. in: body required: true type: string visibility_min: description: | The visibility property of the new image. Default is private. in: body required: false type: string min_version: 3.1 volume: description: | A ``volume`` object. in: body required: true type: object volume-summary: description: | Dictionary of ``volume-summary`` objects. in: body required: true type: object volume_backend_name: description: | The name of the back-end volume. in: body required: true type: string volume_cluster_name: description: | The cluster name of volume backend. in: body required: false type: string min_version: 3.61 volume_id: description: | The UUID of the volume. in: body required: true type: string volume_id_attachment: description: | The UUID of the volume which the attachment belongs to. in: body required: true type: string volume_id_backup: description: | The UUID of the volume that you want to back up. in: body required: true type: string volume_id_restore: description: | The UUID of the volume to which you want to restore a backup. in: body required: false type: string volume_id_snap: description: | If the snapshot was created from a volume, the volume ID. in: body required: true type: string volume_ids: description: | A list of ``volume`` ids, available only when ``list_volume`` set true. in: body required: false type: array min_version: 3.25 volume_image_metadata: description: | List of image metadata entries. Only included for volumes that were created from an image, or from a snapshot of a volume originally created from an image. in: body required: false type: object volume_name: description: | The volume name. in: body required: true type: string volume_name_optional: description: | The volume name. in: body required: false type: string volume_type: description: | A ``volume_type`` object. in: body required: true type: object volume_type_access: description: | List of objects containing volume type to be accessed by project. in: body required: true type: array volume_type_detail: description: | The volume type (either name or ID). To create an environment with multiple-storage back ends, you must specify a volume type. Block Storage volume back ends are spawned as children to ``cinder- volume``, and they are keyed from a unique queue. They are named ``cinder- volume.HOST.BACKEND``. For example, ``cinder- volume.ubuntu.lvmdriver``. When a volume is created, the scheduler chooses an appropriate back end to handle the request based on the volume type. Default is ``None``. For information about how to use volume types to create multiple- storage back ends, see `Configure multiple-storage back ends `_. in: body required: false type: string volume_type_id_363: description: | The associated volume type ID for the volume. in: body required: true type: object min_version: 3.63 volume_type_id_body: description: | The UUID of the volume type. in: body required: true type: string volume_type_vol: description: | The associated volume type name for the volume. in: body required: true type: string volume_types: description: | The list of volume types. In an environment with multiple-storage back ends, the scheduler determines where to send the volume based on the volume type. For information about how to use volume types to create multiple- storage back ends, see `Configure multiple-storage back ends `_. in: body required: true type: array volume_types_commas: description: | The list of volume types separated by commas. In an environment with multiple-storage back ends, the scheduler determines where to send the volume based on the volume type. For information about how to use volume types to create multiple-storage back ends, see `Configure multiple-storage back ends `_. in: body required: true type: string volumes: description: | A list of ``volume`` objects. in: body required: true type: array volumes_number: description: | The number of volumes that are allowed for each project. in: body required: true type: integer volumes_number_for_type: description: | The number of volumes that are allowed for each project and the specified volume type. in: body required: true type: integer volumes_number_for_type_usage: description: | The volume usage information for this project and this volume type, including ``in_use``, ``limit`` and ``reserved`` attributes. in: body required: true type: object volumes_number_usage: description: | The volume usage information for this project, including ``in_use``, ``limit`` and ``reserved`` attributes. in: body required: true type: object ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/qos-specs-v3-qos-specs.inc0000664000175000017500000001452000000000000023115 0ustar00zuulzuul00000000000000.. -*- rst -*- Quality of service (QoS) specifications (qos-specs) =================================================== Administrators only, depending on policy settings. Creates, lists, shows details for, associates, disassociates, sets keys, unsets keys, and deletes quality of service (QoS) specifications. Disassociate a QoS specification from all associations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/disassociate_all Disassociates a QoS specification from all associations. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id Unset keys in a QoS specification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/qos-specs/{qos_id}/delete_keys Unsets keys in a QoS specification. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id - keys: keys Request Example --------------- .. literalinclude:: ./samples/qos/qos-unset-request.json :language: javascript Get all associations for a QoS specification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/associations Lists all associations for a QoS specification. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id Response -------- .. rest_parameters:: parameters.yaml - qos_associations: qos_associations - type: qos_association_type - id: qos_association_id - name: qos_association_name Response Example ---------------- .. literalinclude:: ./samples/qos/qos_show_response.json :language: javascript Associate QoS specification with a volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/associate Associates a QoS specification with a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id - vol_type_id: vol_type_id_query Disassociate QoS specification from a volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id}/disassociate Disassociates a QoS specification from a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id - vol_type_id: vol_type_id_query Show a QoS specification details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/qos-specs/{qos_id} Shows details for a QoS specification. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - qos_specs: qos_specs - specs: specs - consumer: consumer - name: name_qos_spec - id: id_qos_spec - links: links_qos Response Example ---------------- .. literalinclude:: ./samples/qos/qos-show-response.json :language: javascript Set keys in a QoS specification ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/qos-specs/{qos_id} Sets keys in a QoS specification. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id - qos_specs: qos_specs Request Example --------------- .. literalinclude:: ./samples/qos/qos-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - qos_specs: qos_specs Response Example ---------------- .. literalinclude:: ./samples/qos/qos-update-response.json :language: javascript Delete a QoS specification ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/qos-specs/{qos_id} Deletes a QoS specification. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_id: qos_id - force: force_del_qos Create a QoS specification ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/qos-specs Creates a QoS specification. Specify one or more key and value pairs in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - qos_specs: qos_specs - name: name_qos_spec Request Example --------------- .. literalinclude:: ./samples/qos/qos-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - qos_specs: qos_specs - name: name_qos_spec - links: links_qos - id: id_qos_spec - consumer: consumer - specs: specs Response Example ---------------- .. literalinclude:: ./samples/qos/qos-create-response.json :language: javascript List QoS Specifications ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/qos-specs Lists quality of service (QoS) specifications. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 300 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - qos_specs: qos_specs - specs: specs - consumer: consumer - id: id_qos_spec - name: name_qos_spec Response Example ---------------- .. literalinclude:: ./samples/qos/qos-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/quota-classes.inc0000664000175000017500000000612100000000000021521 0ustar00zuulzuul00000000000000.. -*- rst -*- Quota class set extension (os-quota-class-sets) =============================================== Administrators only, depending on policy settings. Shows and updates quota classes for a project. Show quota classes for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{admin_project_id}/os-quota-class-sets/{quota_class_name} Shows quota class set for a project. If no specific value for the quota class resource exists, then the default value will be reported. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - quota_class_name: quota_class_name - admin_project_id: admin_project_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_class_set: quota_class_set - backup_gigabytes: maxTotalBackupGigabytes - backups: maxTotalBackups - gigabytes: maxTotalVolumeGigabytes - gigabytes_{volume_type}: gigabytes_for_type - groups: maxTotalGroups - per_volume_gigabytes: per_volume_gigabytes - snapshots: maxTotalSnapshots - snapshots_{volume_type}: snapshots_number_for_type - volumes: maxTotalVolumes - volumes_{volume_type}: volumes_number_for_type - id: quota_class_id Response Example ---------------- .. literalinclude:: ./samples/quota_classes/quota-classes-show-response.json :language: javascript Update quota classes for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{admin_project_id}/os-quota-class-sets/{quota_class_name} Updates quota class set for a project. If the ``quota_class_name`` key does not exist, then the API will create one. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - admin_project_id: admin_project_id - quota_class_name: quota_class_name - gigabytes: maxTotalVolumeGigabytesOptional - gigabytes_{volume_type}: gigabytes_for_type - snapshots: maxTotalSnapshotsOptional - snapshots_{volume_type}: snapshots_number_for_type - volumes: maxTotalVolumesOptional - volumes_{volume_type}: volumes_number_for_type Request Example --------------- .. literalinclude:: ./samples/quota_classes/quota-classes-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_class_set: quota_class_set - backup_gigabytes: maxTotalBackupGigabytes - backups: maxTotalBackups - gigabytes: maxTotalVolumeGigabytes - gigabytes_{volume_type}: gigabytes_for_type - groups: maxTotalGroups - per_volume_gigabytes: per_volume_gigabytes - snapshots: maxTotalSnapshots - snapshots_{volume_type}: snapshots_number_for_type - volumes: maxTotalVolumes - volumes_{volume_type}: volumes_number_for_type Response Example ---------------- .. literalinclude:: ./samples/quota_classes/quota-classes-update-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/quota-sets.inc0000664000175000017500000001242500000000000021046 0ustar00zuulzuul00000000000000.. -*- rst -*- Quota sets extension (os-quota-sets) ==================================== Administrators only, depending on policy settings. Shows, updates, and deletes quotas for a project. Show quotas for a project ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{admin_project_id}/os-quota-sets/{project_id} Shows quotas for a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - admin_project_id: admin_project_id - project_id: quotas_project_id - usage: usage Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - id: project_id - volumes: volumes_number - volumes_{volume_type}: volumes_number_for_type - snapshots: snapshots_number - snapshots_{volume_type}: snapshots_number_for_type - backups: backups_number - groups: groups_number - per_volume_gigabytes: per_volume_gigabytes - gigabytes: gigabytes - gigabytes_{volume_type}: gigabytes_for_type - backup_gigabytes: backup_gigabytes Response Example ---------------- .. literalinclude:: ./samples/quota_sets/quotas-show-response.json :language: javascript Show quota usage for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{admin_project_id}/os-quota-sets/{project_id}?{usage}=True Shows quota usage for a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: quotas_project_id - admin_project_id: admin_project_id - usage: usage Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - id: project_id - volumes: volumes_number_usage - volumes_{volume_type}: volumes_number_for_type_usage - snapshots: snapshots_number_usage - snapshots_{volume_type}: snapshots_number_for_type_usage - backups: backups_number_usage - groups: groups_number_usage - per_volume_gigabytes: per_volume_gigabytes_usage - gigabytes: gigabytes_usage - gigabytes_{volume_type}: gigabytes_for_type_usage - backup_gigabytes: backup_gigabytes_usage Response Example ---------------- .. literalinclude:: ./samples/quota_sets/quotas-show-usage-response.json :language: javascript Update quotas for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{admin_project_id}/os-quota-sets/{project_id} Updates quotas for a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - admin_project_id: admin_project_id - project_id: quotas_project_id - quota_set: quota_set - volumes: volumes_number - volumes_{volume_type}: volumes_number_for_type - snapshots: snapshots_number - snapshots_{volume_type}: snapshots_number_for_type - backups: backups_number - groups: groups_number - per_volume_gigabytes: per_volume_gigabytes - gigabytes: gigabytes - gigabytes_{volume_type}: gigabytes_for_type - backup_gigabytes: backup_gigabytes Request Example --------------- .. literalinclude:: ./samples/quota_sets/quotas-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - volumes: volumes_number - volumes_{volume_type}: volumes_number_for_type - snapshots: snapshots_number - snapshots_{volume_type}: snapshots_number_for_type - backups: backups_number - groups: groups_number - per_volume_gigabytes: per_volume_gigabytes - gigabytes: gigabytes - gigabytes_{volume_type}: gigabytes_for_type - backup_gigabytes: backup_gigabytes Response Example ---------------- .. literalinclude:: ./samples/quota_sets/quotas-update-response.json :language: javascript Delete quotas for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{admin_project_id}/os-quota-sets/{project_id} Deletes quotas for a project so the quotas revert to default values. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: quotas_project_id - admin_project_id: admin_project_id Get default quotas for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{admin_project_id}/os-quota-sets/{project_id}/defaults Gets default quotas for a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - admin_project_id: admin_project_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - quota_set: quota_set - id: project_id - volumes: volumes_number - volumes_{volume_type}: volumes_number_for_type - snapshots: snapshots_number - snapshots_{volume_type}: snapshots_number_for_type - backups: backups_number - groups: groups_number - per_volume_gigabytes: per_volume_gigabytes - gigabytes: gigabytes - gigabytes_{volume_type}: gigabytes_for_type - backup_gigabytes: backup_gigabytes Response Example ---------------- .. literalinclude:: ./samples/quota_sets/quotas-show-defaults-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/resource-filters.inc0000664000175000017500000000144500000000000022236 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _resource-filters: Resource Filters (resource_filters) =================================== Lists all resource filters, available since microversion 3.33. List resource filters ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/resource_filters List filters. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - resource: resource Response Parameters ------------------- .. rest_parameters:: parameters.yaml - resource_filters: resource_filters_coll - filters: resource_filters - resource: resource_fil Response Example ---------------- .. literalinclude:: ./samples/resource-filters-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9791172 cinder-27.0.0/api-ref/source/v3/samples/0000775000175000017500000000000000000000000017706 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/attachment-complete.json0000664000175000017500000000003200000000000024532 0ustar00zuulzuul00000000000000{ "os-complete": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/attachment-create-request.json0000664000175000017500000000075000000000000025662 0ustar00zuulzuul00000000000000{ "attachment": { "instance_uuid": "462dcc2d-130d-4654-8db1-da0df2da6a0d", "connector": { "initiator": "iqn.1993-08.org.debian: 01: cad181614cec", "ip": "192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": false, "mountpoint": "/dev/vdb", "mode": "ro" }, "volume_uuid": "462dcc2d-130d-4654-8db1-da0df2da6a0d" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/attachment-create-response.json0000664000175000017500000000062000000000000026024 0ustar00zuulzuul00000000000000{ "attachment": { "status": "attaching", "detached_at": "2015-09-16T09:28:52.000000", "connection_info": {}, "attached_at": "2015-09-16T09:28:52.000000", "attach_mode": "ro", "instance": "3b8b6631-1cf7-4fd7-9afb-c01e541as345", "volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d", "id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/attachment-list-detailed-response.json0000664000175000017500000000070500000000000027311 0ustar00zuulzuul00000000000000{ "attachments": [ { "status": "attaching", "detached_at": "2015-09-16T09:28:52.000000", "connection_info": {}, "attached_at": "2015-09-16T09:28:52.000000", "attach_mode": "ro", "instance": "31c79baf-b59e-469c-979f-1df4ecb6eea7", "volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d", "id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/attachment-list-response.json0000664000175000017500000000042000000000000025532 0ustar00zuulzuul00000000000000{ "attachments": [ { "status": "attaching", "instance": "31c79baf-b59e-469c-979f-1df4ecb6eea7", "id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c", "volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/attachment-show-response.json0000664000175000017500000000062000000000000025541 0ustar00zuulzuul00000000000000{ "attachment": { "status": "attaching", "detached_at": "2015-09-16T09:28:52.000000", "connection_info": {}, "attached_at": "2015-09-16T09:28:52.000000", "attach_mode": "ro", "instance": "3b8b6631-1cf7-4fd7-9afb-c01e541as345", "volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d", "id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/attachment-update-request.json0000664000175000017500000000055000000000000025677 0ustar00zuulzuul00000000000000{ "attachment": { "connector": { "initiator": "iqn.1993-08.org.debian: 01: cad181614cec", "ip": "192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": false, "mountpoint": "/dev/vdb", "mode": "ro" } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/attachment-update-response.json0000664000175000017500000000062000000000000026043 0ustar00zuulzuul00000000000000{ "attachment": { "status": "attaching", "detached_at": "2015-09-16T09:28:52.000000", "connection_info": {}, "attached_at": "2015-09-16T09:28:52.000000", "attach_mode": "ro", "instance": "3b8b6631-1cf7-4fd7-9afb-c01e541as345", "volume_id": "462dcc2d-130d-4654-8db1-da0df2da6a0d", "id": "3b8b6631-1cf7-4fd7-9afb-c01e541a073c" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/availability-zone-list-response.json0000664000175000017500000000020000000000000027021 0ustar00zuulzuul00000000000000{ "availabilityZoneInfo": [{ "zoneState": { "available": true }, "zoneName": "nova" }] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backend-capabilities-response.json0000664000175000017500000000204200000000000026451 0ustar00zuulzuul00000000000000{ "namespace": "OS::Storage::Capabilities::fake", "vendor_name": "OpenStack", "volume_backend_name": "lvmdriver-1", "pool_name": "pool", "driver_version": "2.0.0", "storage_protocol": "iSCSI", "display_name": "Capabilities of Cinder LVM driver", "description": "These are volume type options provided by Cinder LVM driver, blah, blah.", "visibility": "public", "replication_targets": [], "properties": { "compression": { "title": "Compression", "description": "Enables compression.", "type": "boolean" }, "qos": { "title": "QoS", "description": "Enables QoS.", "type": "boolean" }, "replication": { "title": "Replication", "description": "Enables replication.", "type": "boolean" }, "thin_provisioning": { "title": "Thin Provisioning", "description": "Sets thin provisioning.", "type": "boolean" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backup-force-delete-request.json0000664000175000017500000000003600000000000026067 0ustar00zuulzuul00000000000000{ "os-force_delete": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backup-record-export-response.json0000664000175000017500000000017300000000000026476 0ustar00zuulzuul00000000000000{ "backup-record": { "backup_service": "cinder.backup.drivers.swift", "backup_url": "eyJzdGF0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backup-record-import-request.json0000664000175000017500000000017300000000000026321 0ustar00zuulzuul00000000000000{ "backup-record": { "backup_service": "cinder.backup.drivers.swift", "backup_url": "eyJzdGF0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backup-record-import-response.json0000664000175000017500000000077500000000000026477 0ustar00zuulzuul00000000000000{ "backup": { "id": "deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", "links": [ { "href": "http://localhost:8776/v3/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", "rel": "self" }, { "href": "http://localhost:8776/c95fc3e4afe248a49a28828f286a7b38/backups/deac8b8c-35c9-4c71-acaa-889c2d5d5c8e", "rel": "bookmark" } ], "name": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backup-reset-status-request.json0000664000175000017500000000010100000000000026165 0ustar00zuulzuul00000000000000{ "os-reset_status": { "status": "available" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backup-restore-request.json0000664000175000017500000000016100000000000025213 0ustar00zuulzuul00000000000000{ "restore": { "name": "vol-01", "volume_id": "64f5d2fb-d836-4063-b7e2-544d5c1ff607" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backup-restore-response.json0000664000175000017500000000026700000000000025370 0ustar00zuulzuul00000000000000{ "restore": { "backup_id": "2ef47aee-8844-490c-804d-2a8efe561c65", "volume_id": "795114e8-7489-40be-a978-83797f2c1dd3", "volume_name": "volume01" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9831173 cinder-27.0.0/api-ref/source/v3/samples/backups/0000775000175000017500000000000000000000000021336 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/backup-create-request.json0000664000175000017500000000040600000000000026425 0ustar00zuulzuul00000000000000{ "backup": { "container": null, "description": "Test backup", "name": "backup001", "volume_id": "0aa67a0c-7339-4be6-b5d5-2afe21ca270c", "incremental": false, "snapshot_id": null, "force": false } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/backup-create-response.json0000664000175000017500000000101500000000000026570 0ustar00zuulzuul00000000000000{ "backup": { "id": "b1f41f9b-741e-4992-a246-b97de7e6e87e", "links": [ { "href": "http://127.0.0.1:40797/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/b1f41f9b-741e-4992-a246-b97de7e6e87e", "rel": "self" }, { "href": "http://127.0.0.1:40797/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/b1f41f9b-741e-4992-a246-b97de7e6e87e", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/backup-show-response.json0000664000175000017500000000203600000000000026311 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:56:02.509831", "data_timestamp": "2023-06-23T11:56:02.509831", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "6a122f4b-d2f6-448f-aeb5-68bae5ff8358", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:46627/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/6a122f4b-d2f6-448f-aeb5-68bae5ff8358", "rel": "self" }, { "href": "http://127.0.0.1:46627/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/6a122f4b-d2f6-448f-aeb5-68bae5ff8358", "rel": "bookmark" } ], "name": "backup001", "object_count": 0, "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:56:02.512426", "volume_id": "49a784cf-b759-4594-acdf-5238ee50976b" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/backups-list-detailed-response.json0000664000175000017500000000223300000000000030237 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "2023-07-10T13:23:21.178739", "data_timestamp": "2023-07-10T13:23:21.178739", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "7ab823f7-1174-4447-9a76-863ae2dcf372", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:44197/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/7ab823f7-1174-4447-9a76-863ae2dcf372", "rel": "self" }, { "href": "http://127.0.0.1:44197/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/7ab823f7-1174-4447-9a76-863ae2dcf372", "rel": "bookmark" } ], "name": "backup001", "object_count": 0, "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-07-10T13:23:21.189552", "volume_id": "9fc31617-303d-4b52-826e-b598cca40419" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/backups-list-response.json0000664000175000017500000000112200000000000026462 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "c26d9897-cace-44cc-ad0f-3a0d0b6d1450", "links": [ { "href": "http://127.0.0.1:46803/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/c26d9897-cace-44cc-ad0f-3a0d0b6d1450", "rel": "self" }, { "href": "http://127.0.0.1:46803/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/c26d9897-cace-44cc-ad0f-3a0d0b6d1450", "rel": "bookmark" } ], "name": "backup001" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9831173 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.18/0000775000175000017500000000000000000000000022115 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.18/backup-create-response.json0000664000175000017500000000101500000000000027347 0ustar00zuulzuul00000000000000{ "backup": { "id": "73c2b8d8-e658-4396-a804-e1960b9330f9", "links": [ { "href": "http://127.0.0.1:34439/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/73c2b8d8-e658-4396-a804-e1960b9330f9", "rel": "self" }, { "href": "http://127.0.0.1:34439/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/73c2b8d8-e658-4396-a804-e1960b9330f9", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.18/backup-show-response.json0000664000175000017500000000216300000000000027071 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:56:06.577029", "data_timestamp": "2023-06-23T11:56:06.577029", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "606b1a40-65c3-40aa-aa35-bbaddf3b0cdc", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:40731/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/606b1a40-65c3-40aa-aa35-bbaddf3b0cdc", "rel": "self" }, { "href": "http://127.0.0.1:40731/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/606b1a40-65c3-40aa-aa35-bbaddf3b0cdc", "rel": "bookmark" } ], "name": "backup001", "object_count": 0, "os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:56:06.579796", "volume_id": "5c4f87bc-031c-455b-b936-bfedb85a1d24" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.18/backups-list-detailed-response.json0000664000175000017500000000236400000000000031023 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:55:59.754975", "data_timestamp": "2023-06-23T11:55:59.754975", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "89881aac-2ce3-476b-bb0f-23c440a5e141", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:37207/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/89881aac-2ce3-476b-bb0f-23c440a5e141", "rel": "self" }, { "href": "http://127.0.0.1:37207/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/89881aac-2ce3-476b-bb0f-23c440a5e141", "rel": "bookmark" } ], "name": "backup001", "object_count": 0, "os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:55:59.759269", "volume_id": "66eda5bf-7163-4316-a0b5-afb14c43625b" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.18/backups-list-response.json0000664000175000017500000000112200000000000027241 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "782c1178-79b7-4caf-845b-c226cf288ca0", "links": [ { "href": "http://127.0.0.1:36723/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/782c1178-79b7-4caf-845b-c226cf288ca0", "rel": "self" }, { "href": "http://127.0.0.1:36723/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/782c1178-79b7-4caf-845b-c226cf288ca0", "rel": "bookmark" } ], "name": "backup001" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9831173 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.43/0000775000175000017500000000000000000000000022113 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.43/backup-create-response.json0000664000175000017500000000101500000000000027345 0ustar00zuulzuul00000000000000{ "backup": { "id": "992835c9-4ea4-4433-aa1d-c8725c041af2", "links": [ { "href": "http://127.0.0.1:38909/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/992835c9-4ea4-4433-aa1d-c8725c041af2", "rel": "self" }, { "href": "http://127.0.0.1:38909/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/992835c9-4ea4-4433-aa1d-c8725c041af2", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.43/backup-show-response.json0000664000175000017500000000221300000000000027063 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:56:04.957710", "data_timestamp": "2023-06-23T11:56:04.957710", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "bb512d13-a64c-4793-b153-939b8c9b638f", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:45785/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/bb512d13-a64c-4793-b153-939b8c9b638f", "rel": "self" }, { "href": "http://127.0.0.1:45785/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/bb512d13-a64c-4793-b153-939b8c9b638f", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": 0, "os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:56:04.960494", "volume_id": "f9f36c56-29a0-46a1-88c1-9bcf45fb271b" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.43/backups-list-detailed-response.json0000664000175000017500000000242000000000000031012 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:55:56.808833", "data_timestamp": "2023-06-23T11:55:56.808833", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "cafabbef-cf1d-45a4-95c0-7395f30fd334", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:34215/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/cafabbef-cf1d-45a4-95c0-7395f30fd334", "rel": "self" }, { "href": "http://127.0.0.1:34215/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/cafabbef-cf1d-45a4-95c0-7395f30fd334", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": 0, "os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:55:56.811458", "volume_id": "aa4f5314-143f-4ad9-8677-17d52032f943" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.43/backups-list-response.json0000664000175000017500000000112200000000000027237 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "6fea7c87-7c93-4670-b74d-97319d71f95a", "links": [ { "href": "http://127.0.0.1:46541/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/6fea7c87-7c93-4670-b74d-97319d71f95a", "rel": "self" }, { "href": "http://127.0.0.1:46541/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/6fea7c87-7c93-4670-b74d-97319d71f95a", "rel": "bookmark" } ], "name": "backup001" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9831173 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.45/0000775000175000017500000000000000000000000022115 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.45/backup-create-response.json0000664000175000017500000000101500000000000027347 0ustar00zuulzuul00000000000000{ "backup": { "id": "ca97fe1d-8d8c-4b97-8439-f8bcfe5fe048", "links": [ { "href": "http://127.0.0.1:40345/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/ca97fe1d-8d8c-4b97-8439-f8bcfe5fe048", "rel": "self" }, { "href": "http://127.0.0.1:40345/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/ca97fe1d-8d8c-4b97-8439-f8bcfe5fe048", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.45/backup-show-response.json0000664000175000017500000000221300000000000027065 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:56:07.334265", "data_timestamp": "2023-06-23T11:56:07.334265", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "5edd3373-8fae-4ae5-a63f-7282df75b2f8", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:33005/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/5edd3373-8fae-4ae5-a63f-7282df75b2f8", "rel": "self" }, { "href": "http://127.0.0.1:33005/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/5edd3373-8fae-4ae5-a63f-7282df75b2f8", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": 0, "os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:56:07.350705", "volume_id": "164476de-38ba-44a3-b00c-78624a5256ff" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.45/backups-list-detailed-response.json0000664000175000017500000000244000000000000031016 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:55:59.217859", "data_timestamp": "2023-06-23T11:55:59.217859", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "3287d2a2-38fb-4a62-b9c4-d0faf601650c", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:46657/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3287d2a2-38fb-4a62-b9c4-d0faf601650c", "rel": "self" }, { "href": "http://127.0.0.1:46657/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3287d2a2-38fb-4a62-b9c4-d0faf601650c", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": 0, "os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:55:59.221858", "volume_id": "cc41abad-350c-45c2-a39b-82f3f891a954" } ], "count": 1 }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.45/backups-list-response.json0000664000175000017500000000114200000000000027243 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "0bc2fd0c-2727-440f-945e-97f653bf3cad", "links": [ { "href": "http://127.0.0.1:46279/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/0bc2fd0c-2727-440f-945e-97f653bf3cad", "rel": "self" }, { "href": "http://127.0.0.1:46279/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/0bc2fd0c-2727-440f-945e-97f653bf3cad", "rel": "bookmark" } ], "name": "backup001" } ], "count": 1 }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9831173 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.56/0000775000175000017500000000000000000000000022117 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.56/backup-create-response.json0000664000175000017500000000101500000000000027351 0ustar00zuulzuul00000000000000{ "backup": { "id": "15b73866-f643-407d-9c53-377d9eb3e3fc", "links": [ { "href": "http://127.0.0.1:38593/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/15b73866-f643-407d-9c53-377d9eb3e3fc", "rel": "self" }, { "href": "http://127.0.0.1:38593/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/15b73866-f643-407d-9c53-377d9eb3e3fc", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.56/backup-show-response.json0000664000175000017500000000230600000000000027072 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:56:08.691468", "data_timestamp": "2023-06-23T11:56:08.691468", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "3052c307-119e-4f78-960e-972078aa15a8", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:38135/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3052c307-119e-4f78-960e-972078aa15a8", "rel": "self" }, { "href": "http://127.0.0.1:38135/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3052c307-119e-4f78-960e-972078aa15a8", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": 0, "os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:56:08.693488", "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_id": "e763a78b-edc5-48fb-bbb3-fddc1062e27a" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.56/backups-list-detailed-response.json0000664000175000017500000000253700000000000031027 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:56:02.012007", "data_timestamp": "2023-06-23T11:56:02.012007", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "72915888-cfcb-4f41-a416-bab824f3e8ba", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:34501/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/72915888-cfcb-4f41-a416-bab824f3e8ba", "rel": "self" }, { "href": "http://127.0.0.1:34501/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/72915888-cfcb-4f41-a416-bab824f3e8ba", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": 0, "os-backup-project-attr:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:56:02.014872", "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_id": "9de8bbf6-015e-4ccd-a484-1c93acc85f60" } ], "count": 1 }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.56/backups-list-response.json0000664000175000017500000000114200000000000027245 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "eb129a85-fba3-4164-9a5e-9c3394b97810", "links": [ { "href": "http://127.0.0.1:40523/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/eb129a85-fba3-4164-9a5e-9c3394b97810", "rel": "self" }, { "href": "http://127.0.0.1:40523/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/eb129a85-fba3-4164-9a5e-9c3394b97810", "rel": "bookmark" } ], "name": "backup001" } ], "count": 1 }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9871173 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.9/0000775000175000017500000000000000000000000022035 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.9/backup-create-response.json0000664000175000017500000000101500000000000027267 0ustar00zuulzuul00000000000000{ "backup": { "id": "41f7183c-a53d-4690-a7a9-b46f5bb1acbd", "links": [ { "href": "http://127.0.0.1:34865/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/41f7183c-a53d-4690-a7a9-b46f5bb1acbd", "rel": "self" }, { "href": "http://127.0.0.1:34865/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/41f7183c-a53d-4690-a7a9-b46f5bb1acbd", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.9/backup-show-response.json0000664000175000017500000000203600000000000027010 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:56:09.563928", "data_timestamp": "2023-06-23T11:56:09.563928", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "3a6b5767-358c-4185-bcda-95b401fa3893", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:36513/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3a6b5767-358c-4185-bcda-95b401fa3893", "rel": "self" }, { "href": "http://127.0.0.1:36513/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/3a6b5767-358c-4185-bcda-95b401fa3893", "rel": "bookmark" } ], "name": "backup001", "object_count": 0, "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:56:09.567593", "volume_id": "9cfc0bc4-cf52-45c2-b461-502ae375e2a7" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.9/backup-update-request.json0000664000175000017500000000013600000000000027143 0ustar00zuulzuul00000000000000{ "backup":{ "name":"backup001", "description": "this is a backup" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.9/backup-update-response.json0000664000175000017500000000101500000000000027306 0ustar00zuulzuul00000000000000{ "backup": { "id": "06d5db4f-1f80-4a71-99a6-99368cfb8f8e", "links": [ { "href": "http://127.0.0.1:45187/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/06d5db4f-1f80-4a71-99a6-99368cfb8f8e", "rel": "self" }, { "href": "http://127.0.0.1:45187/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/06d5db4f-1f80-4a71-99a6-99368cfb8f8e", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.9/backups-list-detailed-response.json0000664000175000017500000000223300000000000030736 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "2023-06-23T11:56:04.395991", "data_timestamp": "2023-06-23T11:56:04.395991", "description": "Test backup", "fail_reason": null, "has_dependent_backups": false, "id": "a3469ffa-acb3-427d-b31f-1c93c96b009f", "is_incremental": false, "links": [ { "href": "http://127.0.0.1:43581/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/a3469ffa-acb3-427d-b31f-1c93c96b009f", "rel": "self" }, { "href": "http://127.0.0.1:43581/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/a3469ffa-acb3-427d-b31f-1c93c96b009f", "rel": "bookmark" } ], "name": "backup001", "object_count": 0, "size": 10, "snapshot_id": null, "status": "creating", "updated_at": "2023-06-23T11:56:04.398251", "volume_id": "b894eba0-506d-4019-b7b2-8508605017ba" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups/v3.9/backups-list-response.json0000664000175000017500000000112200000000000027161 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "f08aaa97-3644-4e46-9e0b-7cddce86db9c", "links": [ { "href": "http://127.0.0.1:33071/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/f08aaa97-3644-4e46-9e0b-7cddce86db9c", "rel": "self" }, { "href": "http://127.0.0.1:33071/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/backups/f08aaa97-3644-4e46-9e0b-7cddce86db9c", "rel": "bookmark" } ], "name": "backup001" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/backups-list-response.json0000664000175000017500000000104100000000000025032 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "5e7a312e-af39-4fc0-8633-b8c2cdabb67d", "links": [{ "href": "https://158.69.65.111/volume/v3/ca730406ba3c40b0870e0bd431271736/backups/5e7a312e-af39-4fc0-8633-b8c2cdabb67d", "rel": "self" }, { "href": "https://158.69.65.111/volume/ca730406ba3c40b0870e0bd431271736/backups/5e7a312e-af39-4fc0-8633-b8c2cdabb67d", "rel": "bookmark" }], "name": "tempest-VolumesBackupsAdminTest-Backup-1385312480" } ], "count": 1 }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/cgsnapshots-create-request.json0000664000175000017500000000031700000000000026065 0ustar00zuulzuul00000000000000{ "cgsnapshot": { "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814546", "name": "firstcg", "description": "first consistency group", "status": "creating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/cgsnapshots-create-response.json0000664000175000017500000000015600000000000026234 0ustar00zuulzuul00000000000000{ "cgsnapshot": { "id": "6f519a48-3183-46cf-a32f-41815f816666", "name": "firstcg" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/cgsnapshots-list-detailed-response.json0000664000175000017500000000125000000000000027511 0ustar00zuulzuul00000000000000{ "cgsnapshots": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444", "status": "available", "created_at": "2015-09-16T09:28:52.000000", "name": "my-cg1", "description": "my first consistency group" }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "consistencygroup_id": "aed36625-a6d7-4681-ba59-c7ba3d18dddd", "status": "error", "created_at": "2015-09-16T09:31:15.000000", "name": "my-cg2", "description": "Edited description" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/cgsnapshots-list-response.json0000664000175000017500000000036600000000000025747 0ustar00zuulzuul00000000000000{ "cgsnapshots": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "name": "my-cg1" }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "name": "my-cg2" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/cgsnapshots-show-response.json0000664000175000017500000000047400000000000025754 0ustar00zuulzuul00000000000000{ "cgsnapshot": { "id": "6f519a48-3183-46cf-a32f-41815f813986", "consistencygroup_id": "6f519a48-3183-46cf-a32f-41815f814444", "status": "available", "created_at": "2015-09-16T09:28:52.000000", "name": "my-cg1", "description": "my first consistency group" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8111157 cinder-27.0.0/api-ref/source/v3/samples/clusters/0000775000175000017500000000000000000000000021552 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9871173 cinder-27.0.0/api-ref/source/v3/samples/clusters/v3.7/0000775000175000017500000000000000000000000022247 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/clusters/v3.7/cluster-disable-request.json0000664000175000017500000000014400000000000027711 0ustar00zuulzuul00000000000000{ "name": "cluster_name", "binary": "cinder-volume", "disabled_reason": "for testing" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/clusters/v3.7/cluster-disable-response.json0000664000175000017500000000034500000000000030062 0ustar00zuulzuul00000000000000{ "cluster": { "name": "cluster_name", "state": "up", "binary": "cinder-volume", "status": "disabled", "disabled_reason": "for testing", "replication_status": "disable" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/clusters/v3.7/cluster-enable-request.json0000664000175000017500000000007600000000000027540 0ustar00zuulzuul00000000000000{ "name": "cluster_name", "binary": "cinder-volume" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/clusters/v3.7/cluster-enable-response.json0000664000175000017500000000033200000000000027701 0ustar00zuulzuul00000000000000{ "cluster": { "name": "cluster_name", "state": "up", "binary": "cinder-volume", "status": "enabled", "disabled_reason": null, "replication_status": "enable" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/clusters/v3.7/cluster-show-response.json0000664000175000017500000000074000000000000027436 0ustar00zuulzuul00000000000000{ "cluster": { "name": "cluster_name", "binary": "cinder-volume", "state": "up", "status": "enabled", "disabled_reason": null, "created_at": "2016-06-01T02:46:28", "updated_at": "2016-06-01T02:46:28", "num_down_hosts": 0, "num_hosts": 0, "last_heartbeat": "2016-06-01T02:46:28", "replication_status": "enable", "frozen": false, "active_backend_id": "replication1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/clusters/v3.7/clusters-list-detailed-response.json0000664000175000017500000000105100000000000031361 0ustar00zuulzuul00000000000000{ "clusters": [ { "name": "cluster_name", "binary": "cinder-volume", "state": "up", "status": "enabled", "disabled_reason": null, "created_at": "2016-06-01T02:46:28", "updated_at": "2016-06-01T02:46:28", "num_down_hosts": 0, "num_hosts": 0, "last_heartbeat": "2016-06-01T02:46:28", "replication_status": "enable", "frozen": false, "active_backend_id": "replication1" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/clusters/v3.7/clusters-list-response.json0000664000175000017500000000034200000000000027612 0ustar00zuulzuul00000000000000{ "clusters": [ { "name": "cluster_name", "binary": "cinder-volume", "state": "up", "status": "enabled", "replication_status": "enable" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/consistency-group-create-from-src-request.json0000664000175000017500000000062000000000000030747 0ustar00zuulzuul00000000000000{ "consistencygroup-from-src": { "name": "firstcg", "description": "first consistency group", "cgsnapshot_id": "6f519a48-3183-46cf-a32f-41815f813986", "source_cgid": "6f519a48-3183-46cf-a32f-41815f814546", "user_id": "6f519a48-3183-46cf-a32f-41815f815555", "project_id": "6f519a48-3183-46cf-a32f-41815f814444", "status": "creating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/consistency-group-create-request.json0000664000175000017500000000027300000000000027225 0ustar00zuulzuul00000000000000{ "consistencygroup": { "name": "firstcg", "description": "first consistency group", "volume_types": "type1,type2", "availability_zone": "az0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/consistency-group-create-response.json0000664000175000017500000000050500000000000027371 0ustar00zuulzuul00000000000000{ "consistencygroup": { "status": "error", "description": "first consistency group", "availability_zone": "az0", "created_at": "2016-08-19T19:32:19.000000", "volume_types": ["type1", "type2"], "id": "63d1a274-de38-4384-a97e-475306777027", "name": "firstcg" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/consistency-group-delete-request.json0000664000175000017500000000007300000000000027222 0ustar00zuulzuul00000000000000{ "consistencygroup": { "force": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/consistency-group-show-response.json0000664000175000017500000000053100000000000027105 0ustar00zuulzuul00000000000000{ "consistencygroup": { "id": "6f519a48-3183-46cf-a32f-41815f813986", "status": "available", "availability_zone": "az1", "created_at": "2015-09-16T09:28:52.000000", "name": "my-cg1", "description": "my first consistency group", "volume_types": [ "123456" ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/consistency-group-update-request.json0000664000175000017500000000033200000000000027240 0ustar00zuulzuul00000000000000{ "consistencygroup": { "name": "my_cg", "description": "My consistency group", "add_volumes": "volume-uuid-1,volume-uuid-2", "remove_volumes": "volume-uuid-8,volume-uuid-9" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/consistency-groups-list-detailed-response.json0000664000175000017500000000136400000000000031041 0ustar00zuulzuul00000000000000{ "consistencygroups": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "status": "available", "availability_zone": "az1", "created_at": "2015-09-16T09:28:52.000000", "name": "my-cg1", "description": "my first consistency group", "volume_types": [ "123456" ] }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "status": "error", "availability_zone": "az2", "created_at": "2015-09-16T09:31:15.000000", "name": "my-cg2", "description": "Edited description", "volume_types": [ "234567" ] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/consistency-groups-list-response.json0000664000175000017500000000037400000000000027270 0ustar00zuulzuul00000000000000{ "consistencygroups": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "name": "my-cg1" }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "name": "my-cg2" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9871173 cinder-27.0.0/api-ref/source/v3/samples/extensions/0000775000175000017500000000000000000000000022105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/extensions/extensions-list-response.json0000664000175000017500000001711100000000000030005 0ustar00zuulzuul00000000000000{ "extensions": [ { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "updated": "2011-06-29T00:00:00+00:00" }, { "alias": "os-vol-tenant-attr", "description": "Expose the internal project_id as an attribute of a volume.", "links": [], "name": "VolumeTenantAttribute", "updated": "2011-11-03T00:00:00+00:00" }, { "alias": "os-quota-sets", "description": "Quota management support.", "links": [], "name": "Quotas", "updated": "2011-08-08T00:00:00+00:00" }, { "alias": "os-availability-zone", "description": "Describe Availability Zones.", "links": [], "name": "AvailabilityZones", "updated": "2013-06-27T00:00:00+00:00" }, { "alias": "os-volume-encryption-metadata", "description": "Volume encryption metadata retrieval support.", "links": [], "name": "VolumeEncryptionMetadata", "updated": "2013-07-10T00:00:00+00:00" }, { "alias": "backups", "description": "Backups support.", "links": [], "name": "Backups", "updated": "2012-12-12T00:00:00+00:00" }, { "alias": "os-snapshot-actions", "description": "Enable snapshot manager actions.", "links": [], "name": "SnapshotActions", "updated": "2013-07-16T00:00:00+00:00" }, { "alias": "os-volume-actions", "description": "Enable volume actions.", "links": [], "name": "VolumeActions", "updated": "2012-05-31T00:00:00+00:00" }, { "alias": "os-snapshot-manage", "description": "Allows existing backend storage to be 'managed' by Cinder.", "links": [], "name": "SnapshotManage", "updated": "2014-12-31T00:00:00+00:00" }, { "alias": "os-volume-unmanage", "description": "Enable volume unmanage operation.", "links": [], "name": "VolumeUnmanage", "updated": "2012-05-31T00:00:00+00:00" }, { "alias": "consistencygroups", "description": "consistency groups support.", "links": [], "name": "Consistencygroups", "updated": "2014-08-18T00:00:00+00:00" }, { "alias": "os-vol-host-attr", "description": "Expose host as an attribute of a volume.", "links": [], "name": "VolumeHostAttribute", "updated": "2011-11-03T00:00:00+00:00" }, { "alias": "encryption", "description": "Encryption support for volume types.", "links": [], "name": "VolumeTypeEncryption", "updated": "2013-07-01T00:00:00+00:00" }, { "alias": "os-vol-image-meta", "description": "Show image metadata associated with the volume.", "links": [], "name": "VolumeImageMetadata", "updated": "2012-12-07T00:00:00+00:00" }, { "alias": "os-types-manage", "description": "Types manage support.", "links": [], "name": "TypesManage", "updated": "2011-08-24T00:00:00+00:00" }, { "alias": "capabilities", "description": "Capabilities support.", "links": [], "name": "Capabilities", "updated": "2015-08-31T00:00:00+00:00" }, { "alias": "cgsnapshots", "description": "cgsnapshots support.", "links": [], "name": "Cgsnapshots", "updated": "2014-08-18T00:00:00+00:00" }, { "alias": "os-types-extra-specs", "description": "Type extra specs support.", "links": [], "name": "TypesExtraSpecs", "updated": "2011-08-24T00:00:00+00:00" }, { "alias": "os-used-limits", "description": "Provide data on limited resources that are being used.", "links": [], "name": "UsedLimits", "updated": "2013-10-03T00:00:00+00:00" }, { "alias": "os-vol-mig-status-attr", "description": "Expose migration_status as an attribute of a volume.", "links": [], "name": "VolumeMigStatusAttribute", "updated": "2013-08-08T00:00:00+00:00" }, { "alias": "os-volume-type-access", "description": "Volume type access support.", "links": [], "name": "VolumeTypeAccess", "updated": "2014-06-26T00:00:00Z" }, { "alias": "os-extended-services", "description": "Extended services support.", "links": [], "name": "ExtendedServices", "updated": "2014-01-10T00:00:00-00:00" }, { "alias": "os-extended-snapshot-attributes", "description": "Extended SnapshotAttributes support.", "links": [], "name": "ExtendedSnapshotAttributes", "updated": "2012-06-19T00:00:00+00:00" }, { "alias": "os-snapshot-unmanage", "description": "Enable volume unmanage operation.", "links": [], "name": "SnapshotUnmanage", "updated": "2014-12-31T00:00:00+00:00" }, { "alias": "qos-specs", "description": "QoS specs support.", "links": [], "name": "Qos_specs_manage", "updated": "2013-08-02T00:00:00+00:00" }, { "alias": "os-quota-class-sets", "description": "Quota classes management support.", "links": [], "name": "QuotaClasses", "updated": "2012-03-12T00:00:00+00:00" }, { "alias": "os-volume-transfer", "description": "Volume transfer management support.", "links": [], "name": "VolumeTransfer", "updated": "2013-05-29T00:00:00+00:00" }, { "alias": "os-volume-manage", "description": "Allows existing backend storage to be 'managed' by Cinder.", "links": [], "name": "VolumeManage", "updated": "2014-02-10T00:00:00+00:00" }, { "alias": "os-admin-actions", "description": "Enable admin actions.", "links": [], "name": "AdminActions", "updated": "2012-08-25T00:00:00+00:00" }, { "alias": "os-services", "description": "Services support.", "links": [], "name": "Services", "updated": "2012-10-28T00:00:00-00:00" }, { "alias": "scheduler-stats", "description": "Scheduler stats support.", "links": [], "name": "Scheduler_stats", "updated": "2014-09-07T00:00:00+00:00" }, { "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler.", "links": [], "name": "SchedulerHints", "updated": "2013-04-18T00:00:00+00:00" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/get-default-type-response.json0000664000175000017500000000023600000000000025616 0ustar00zuulzuul00000000000000{ "default_type": { "project_id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "volume_type_id": "40ec6e5e-c9bd-4170-8740-c1cd42d7eabb" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/get-default-types-response.json0000664000175000017500000000050700000000000026002 0ustar00zuulzuul00000000000000{ "default_types": [ { "project_id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "volume_type_id": "40ec6e5e-c9bd-4170-8740-c1cd42d7eabb" }, { "project_id": "dd46ea3e-6f3f-4e50-85fa-40c182e25d12", "volume_type_id": "9fb51b63-3cd4-493f-9380-53d8f0a04bd4" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-create-from-src-request.json0000664000175000017500000000031500000000000026411 0ustar00zuulzuul00000000000000{ "create-from-src": { "name": "first_group", "description": "first group", "group_snapshot_id": "6f519a48-3183-46cf-a32f-41815f813986", "source_group_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-create-from-src-response.json0000664000175000017500000000015500000000000026561 0ustar00zuulzuul00000000000000{ "group": { "id": "6f519a48-3183-46cf-a32f-41815f816666", "name": "first_group" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-create-request.json0000664000175000017500000000051300000000000024663 0ustar00zuulzuul00000000000000{ "group": { "name": "first_group", "description": "first group", "group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1", "volume_types": [ "4e9e6d23-eed0-426d-b90a-28f87a94b6fe", "c4daaf47-c530-4901-b28e-f5f0a359c4e6" ], "availability_zone": "az0" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-create-response.json0000664000175000017500000000015500000000000025033 0ustar00zuulzuul00000000000000{ "group": { "id": "6f519a48-3183-46cf-a32f-41815f816666", "name": "first_group" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-delete-request.json0000664000175000017500000000007200000000000024662 0ustar00zuulzuul00000000000000{ "delete": { "delete-volumes": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-replication-disable.json0000664000175000017500000000004200000000000025641 0ustar00zuulzuul00000000000000{ "disable_replication": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-replication-enable.json0000664000175000017500000000004100000000000025463 0ustar00zuulzuul00000000000000{ "enable_replication": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-replication-failover.json0000664000175000017500000000017500000000000026054 0ustar00zuulzuul00000000000000{ "failover_replication": { "allow_attached_volume": true, "secondary_backend_id": "vendor-id-1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-replication-list-targets.json0000664000175000017500000000004700000000000026665 0ustar00zuulzuul00000000000000{ "list_replication_targets": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-replication-target.json0000664000175000017500000000015300000000000025527 0ustar00zuulzuul00000000000000{ "replication_targets": { "backend_id": "vendor-id-1", "unique_key": "value1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-reset-status-request.json0000664000175000017500000000007500000000000026066 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "available" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-show-response.json0000664000175000017500000000113600000000000024550 0ustar00zuulzuul00000000000000{ "group": { "id": "6f519a48-3183-46cf-a32f-41815f813986", "status": "available", "availability_zone": "az1", "created_at": "2015-09-16T09:28:52.000000", "name": "first_group", "description": "my first group", "group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1", "volume_types": [ "c4daaf47-c530-4901-b28e-f5f0a359c4e6" ], "volumes": ["a2cdf1ad-5497-4e57-bd7d-f573768f3d03"], "group_snapshot_id": null, "source_group_id": null, "project_id": "7ccf4863071f44aeb8f141f65780c51b" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-snapshot-reset-status-request.json0000664000175000017500000000007500000000000027723 0ustar00zuulzuul00000000000000{ "reset_status": { "status": "available" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-snapshots-create-request.json0000664000175000017500000000026400000000000026706 0ustar00zuulzuul00000000000000{ "group_snapshot": { "group_id": "6f519a48-3183-46cf-a32f-41815f814546", "name": "first_group_snapshot", "description": "first group snapshot" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-snapshots-create-response.json0000664000175000017500000000030000000000000027043 0ustar00zuulzuul00000000000000{ "group_snapshot": { "id": "6f519a48-3183-46cf-a32f-41815f816666", "name": "first_group_snapshot", "group_type_id": "58737af7-786b-48b7-ab7c-2447e74b0ef4" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-snapshots-list-detailed-response.json0000664000175000017500000000166100000000000030337 0ustar00zuulzuul00000000000000{ "group_snapshots": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "group_id": "6f519a48-3183-46cf-a32f-41815f814444", "status": "available", "created_at": "2015-09-16T09:28:52.000000", "name": "my_group_snapshot1", "description": "my first group snapshot", "group_type_id": "0ef094a2-d9fd-4c79-acfd-ac60a0506b7d", "project_id": "7ccf4863071f44aeb8f141f65780c51b" }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "group_id": "aed36625-a6d7-4681-ba59-c7ba3d18dddd", "status": "error", "created_at": "2015-09-16T09:31:15.000000", "name": "my_group_snapshot2", "description": "Edited description", "group_type_id": "7270c56e-6354-4528-8e8b-f54dee2232c8", "project_id": "7ccf4863071f44aeb8f141f65780c51b" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-snapshots-list-response.json0000664000175000017500000000042200000000000026560 0ustar00zuulzuul00000000000000{ "group_snapshots": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "name": "my_group_snapshot1" }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "name": "my_group_snapshot2" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-snapshots-show-response.json0000664000175000017500000000067100000000000026573 0ustar00zuulzuul00000000000000{ "group_snapshot": { "id": "6f519a48-3183-46cf-a32f-41815f813986", "group_id": "6f519a48-3183-46cf-a32f-41815f814444", "status": "available", "created_at": "2015-09-16T09:28:52.000000", "name": "my_group_snapshot1", "description": "my first group snapshot", "group_type_id": "7270c56e-6354-4528-8e8b-f54dee2232c8", "project_id": "7ccf4863071f44aeb8f141f65780c51b" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-type-create-request.json0000664000175000017500000000034400000000000025644 0ustar00zuulzuul00000000000000{ "group_type": { "name": "grp-type-001", "description": "group type 0001", "is_public": true, "group_specs": { "consistent_group_snapshot_enabled": " False" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-type-default-response.json0000664000175000017500000000033100000000000026167 0ustar00zuulzuul00000000000000{ "group_type": { "id": "7270c56e-6354-4528-8e8b-f54dee2232c8", "name": "group-type-test", "description": "default group type", "is_public": true, "group_specs": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-type-show-response.json0000664000175000017500000000043100000000000025524 0ustar00zuulzuul00000000000000{ "group_type": { "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "name": "grp-type-001", "description": "group type 001", "is_public": true, "group_specs": { "consistent_group_snapshot_enabled": " False" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-type-specs-create-request.json0000664000175000017500000000012200000000000026751 0ustar00zuulzuul00000000000000{ "group_specs": { "key1": "value1", "key2": "value2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-type-specs-create-response.json0000664000175000017500000000012200000000000027117 0ustar00zuulzuul00000000000000{ "group_specs": { "key1": "value1", "key2": "value2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-type-specs-list-response.json0000664000175000017500000000012200000000000026627 0ustar00zuulzuul00000000000000{ "group_specs": { "key1": "value1", "key2": "value2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-type-specs-show-response.json0000664000175000017500000000003100000000000026633 0ustar00zuulzuul00000000000000{ "key1": "value1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-type-specs-update-request.json0000664000175000017500000000003100000000000026767 0ustar00zuulzuul00000000000000{ "key1": "value1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-type-specs-update-response.json0000664000175000017500000000003100000000000027135 0ustar00zuulzuul00000000000000{ "key1": "value1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-type-update-request.json0000664000175000017500000000020200000000000025654 0ustar00zuulzuul00000000000000{ "group_type": { "name": "grp-type-001", "description": "group type 0001", "is_public": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-types-list-response.json0000664000175000017500000000113300000000000025702 0ustar00zuulzuul00000000000000{ "group_types": [ { "is_public": true, "group_specs": { "consistent_group_snapshot_enabled": " False" }, "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "name": "group_type1", "description": "tempest-group-type-description-1261576824" }, { "is_public": true, "group_specs": {}, "id": "8eb69a46-df97-4e41-9586-9a40a7533803", "name": "group_type2", "description": "tempest-group-type-description-3927295731" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/group-update-request.json0000664000175000017500000000030600000000000024702 0ustar00zuulzuul00000000000000{ "group": { "name": "my_group", "description": "My group", "add_volumes": "volume-uuid-1,volume-uuid-2", "remove_volumes": "volume-uuid-8,volume-uuid-9" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/groups-list-detailed-response.json0000664000175000017500000000233100000000000026475 0ustar00zuulzuul00000000000000{ "groups": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "status": "available", "availability_zone": "az1", "created_at": "2015-09-16T09:28:52.000000", "name": "my_group1", "description": "my first group", "group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1", "volume_types": [ "4e9e6d23-eed0-426d-b90a-28f87a94b6fe", "a3d55d15-eeb1-4816-ada9-bf82decc09b3" ], "volumes": ["a2cdf1ad-5497-4e57-bd7d-f573768f3d03"], "project_id": "7ccf4863071f44aeb8f141f65780c51b" }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "status": "error", "availability_zone": "az2", "created_at": "2015-09-16T09:31:15.000000", "name": "my_group2", "description": "Edited description", "group_type": "f8645498-1323-47a2-9442-5c57724d2e3c", "volume_types": [ "c4daaf47-c530-4901-b28e-f5f0a359c4e6" ], "volumes": ["a2cdf1ad-5497-4e57-bd7d-f573768f3d03"], "project_id": "7ccf4863071f44aeb8f141f65780c51b" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/groups-list-response.json0000664000175000017500000000036700000000000024733 0ustar00zuulzuul00000000000000{ "groups": [ { "id": "6f519a48-3183-46cf-a32f-41815f813986", "name": "my_group1" }, { "id": "aed36625-a6d7-4681-ba59-c7ba3d18c148", "name": "my_group2" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/host-attach-request.json0000664000175000017500000000007400000000000024507 0ustar00zuulzuul00000000000000{ "os-attach": { "host_name": "my_host" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/hosts-get-response.json0000664000175000017500000000116600000000000024356 0ustar00zuulzuul00000000000000{ "host": [{ "resource": { "volume_count": "8", "total_volume_gb": "11", "total_snapshot_gb": "1", "project": "(total)", "host": "node1@rbd-sas", "snapshot_count": "1" } }, { "resource": { "volume_count": "8", "total_volume_gb": "11", "total_snapshot_gb": "1", "project": "f21a9c86d7114bf99c711f4874d80474", "host": "node1@rbd-sas", "snapshot_count": "1" } }] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/hosts-list-response.json0000664000175000017500000000130700000000000024547 0ustar00zuulzuul00000000000000{ "hosts": [{ "service-status": "available", "service": "cinder-backup", "zone": "nova", "service-state": "enabled", "host_name": "node1", "last-update": "2017-03-09T21:38:41.000000" }, { "service-status": "available", "service": "cinder-scheduler", "zone": "nova", "service-state": "enabled", "host_name": "node1", "last-update": "2017-03-09T21:38:38.000000" }, { "service-status": "available", "service": "cinder-volume", "zone": "nova", "service-state": "enabled", "host_name": "node1@lvm", "last-update": "2017-03-09T21:38:35.000000" }] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/image-metadata-show-request.json0000664000175000017500000000004500000000000026104 0ustar00zuulzuul00000000000000{ "os-show_image_metadata": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/image-metadata-show-response.json0000664000175000017500000000011600000000000026251 0ustar00zuulzuul00000000000000{ "metadata": { "key1": "value1", "key2": "value2" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9871173 cinder-27.0.0/api-ref/source/v3/samples/limits/0000775000175000017500000000000000000000000021207 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/limits/limits-show-response.json0000664000175000017500000000071500000000000026220 0ustar00zuulzuul00000000000000{ "limits": { "rate": [], "absolute": { "totalSnapshotsUsed": 0, "maxTotalBackups": 10, "maxTotalVolumeGigabytes": 1000, "maxTotalSnapshots": 10, "maxTotalBackupGigabytes": 1000, "totalBackupGigabytesUsed": 0, "maxTotalVolumes": 10, "totalVolumesUsed": 0, "totalBackupsUsed": 0, "totalGigabytesUsed": 0 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/messages-list-response.json0000664000175000017500000000343100000000000025216 0ustar00zuulzuul00000000000000{ "messages": [{ "request_id": "req-c1216709-afba-4703-a1a3-22eda88f2f5a", "links": [ { "href": "http://localhost:8776/v3/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", "rel": "self" }, { "href": "http://localhost:8776/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", "rel": "bookmark" } ], "message_level": "ERROR", "event_id": "VOLUME_000002", "created_at": "2014-10-28T00:00:00-00:00", "guaranteed_until": "2014-10-28T00:00:00-00:00", "resource_uuid": "d5f6c517-c3e8-45fe-b994-b11118e4cacf", "id": "c506cd4b-9048-43bc-97ef-0d7dec369b42", "resource_type": "VOLUME", "user_message": "No storage could be allocated for this volume request." },{ "request_id": "req-c1216709-afba-4703-a1a3-22eda88f2f5a", "links": [ { "href": "http://localhost:8776/v3/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", "rel": "self" }, { "href": "http://localhost:8776/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", "rel": "bookmark" } ], "message_level": "ERROR", "event_id": "VOLUME_000002", "created_at": "2014-10-28T00:00:00-00:00", "guaranteed_until": "2014-10-28T00:00:00-00:00", "resource_uuid": "d5f6c517-c3e8-45fe-b994-b11118e4df4e", "id": "c506cd4b-9048-43bc-97ef-0d7dec36d5gt", "resource_type": "VOLUME", "user_message": "No storage could be allocated for this volume request." }] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/messages-show-response.json0000664000175000017500000000162400000000000025225 0ustar00zuulzuul00000000000000{ "message": { "request_id": "req-c1216709-afba-4703-a1a3-22eda88f2f5a", "links": [ { "href": "http://localhost:8776/v3/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", "rel": "self" }, { "href": "http://localhost:8776/cd609134301246f0a3faa9c3da22082e/messages/c506cd4b-9048-43bc-97ef-0d7dec369b42", "rel": "bookmark" } ], "message_level": "ERROR", "event_id": "VOLUME_000002", "created_at": "2014-10-28T00:00:00-00:00", "guaranteed_until": "2014-10-28T00:00:00-00:00", "resource_uuid": "d5f6c517-c3e8-45fe-b994-b11118e4cacf", "id": "c506cd4b-9048-43bc-97ef-0d7dec369b42", "resource_type": "VOLUME", "user_message": "No storage could be allocated for this volume request." } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/pools-list-detailed-response.json0000664000175000017500000000162700000000000026321 0ustar00zuulzuul00000000000000{ "pools": [ { "name": "pool1", "capabilities": { "updated": "2014-10-28T00:00:00-00:00", "total_capacity_gb": 1024, "free_capacity_gb": 100, "volume_backend_name": "pool1", "reserved_percentage": 0, "driver_version": "1.0.0", "storage_protocol": "iSCSI", "QoS_support": false } }, { "name": "pool2", "capabilities": { "updated": "2014-10-28T00:00:00-00:00", "total_capacity_gb": 512, "free_capacity_gb": 200, "volume_backend_name": "pool2", "reserved_percentage": 0, "driver_version": "1.0.1", "storage_protocol": "iSER", "QoS_support": true } } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9911175 cinder-27.0.0/api-ref/source/v3/samples/qos/0000775000175000017500000000000000000000000020510 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/qos/qos-create-request.json0000664000175000017500000000010000000000000025123 0ustar00zuulzuul00000000000000{ "qos_specs": { "name": "reliability-spec" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/qos/qos-create-response.json0000664000175000017500000000106100000000000025300 0ustar00zuulzuul00000000000000{ "qos_specs": { "specs": {}, "consumer": "back-end", "name": "reliability-spec", "id": "599ef437-1c99-42ec-9fc6-239d0519fef1" }, "links": [ { "href": "http://23.253.248.171:8776/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/599ef437-1c99-42ec-9fc6-239d0519fef1", "rel": "self" }, { "href": "http://23.253.248.171:8776/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/599ef437-1c99-42ec-9fc6-239d0519fef1", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/qos/qos-list-response.json0000664000175000017500000000031600000000000025012 0ustar00zuulzuul00000000000000{ "qos_specs": [ { "consumer": "back-end", "id": "62c17294-2e52-4877-a01f-a30388749d9d", "name": "reliability-spec", "specs": {} } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/qos/qos-show-response.json0000664000175000017500000000106100000000000025015 0ustar00zuulzuul00000000000000{ "qos_specs": { "specs": {}, "consumer": "back-end", "name": "reliability-spec", "id": "0388d6c6-d5d4-42a3-b289-95205c50dd15" }, "links": [ { "href": "http://23.253.228.211:8776/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/0388d6c6-d5d4-42a3-b289-95205c50dd15", "rel": "self" }, { "href": "http://23.253.228.211:8776/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/0388d6c6-d5d4-42a3-b289-95205c50dd15", "rel": "bookmark" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/qos/qos-unset-request.json0000664000175000017500000000004700000000000025030 0ustar00zuulzuul00000000000000{ "keys": [ "key1" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/qos/qos-update-request.json0000664000175000017500000000006200000000000025151 0ustar00zuulzuul00000000000000{ "qos_specs": { "delay": "1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/qos/qos-update-response.json0000664000175000017500000000006200000000000025317 0ustar00zuulzuul00000000000000{ "qos_specs": { "delay": "1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/qos/qos_show_response.json0000664000175000017500000000003600000000000025162 0ustar00zuulzuul00000000000000{ "qos_associations": [] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota-classes-show-response.json0000664000175000017500000000054500000000000026203 0ustar00zuulzuul00000000000000{ "quota_class_set": { "per_volume_gigabytes": -1, "volumes_lvmdriver-1": -1, "groups": 10, "gigabytes": 1000, "backup_gigabytes": 1000, "snapshots": 10, "gigabytes_lvmdriver-1": -1, "volumes": 10, "snapshots_lvmdriver-1": -1, "backups": 10, "id": "default" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota-classes-update-request.json0000664000175000017500000000017000000000000026331 0ustar00zuulzuul00000000000000{ "quota_class_set": { "volumes_lmv": 10, "gigabytes_lmv": 1000, "snapshots_lmv": 10 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota-classes-update-response.json0000664000175000017500000000051400000000000026501 0ustar00zuulzuul00000000000000{ "quota_class_set": { "per_volume_gigabytes": -1, "volumes_lvmdriver-1": -1, "groups": 10, "gigabytes": 1000, "backup_gigabytes": 1000, "snapshots": 10, "gigabytes_lvmdriver-1": -1, "volumes": 10, "snapshots_lvmdriver-1": -1, "backups": 10 } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9911175 cinder-27.0.0/api-ref/source/v3/samples/quota_classes/0000775000175000017500000000000000000000000022554 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota_classes/quota-classes-show-response.json0000664000175000017500000000055000000000000031045 0ustar00zuulzuul00000000000000{ "quota_class_set": { "backup_gigabytes": 1000, "backups": 10, "gigabytes": 1000, "gigabytes___DEFAULT__": -1, "groups": 10, "id": "test_class", "per_volume_gigabytes": -1, "snapshots": 10, "snapshots___DEFAULT__": -1, "volumes": 10, "volumes___DEFAULT__": -1 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota_classes/quota-classes-update-request.json0000664000175000017500000000015400000000000031201 0ustar00zuulzuul00000000000000{ "quota_class_set": { "volumes": 10, "gigabytes": 1000, "snapshots": 10 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota_classes/quota-classes-update-response.json0000664000175000017500000000051400000000000031347 0ustar00zuulzuul00000000000000{ "quota_class_set": { "backup_gigabytes": 1000, "backups": 10, "gigabytes": 1000, "gigabytes___DEFAULT__": -1, "groups": 10, "per_volume_gigabytes": -1, "snapshots": 10, "snapshots___DEFAULT__": -1, "volumes": 10, "volumes___DEFAULT__": -1 } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9911175 cinder-27.0.0/api-ref/source/v3/samples/quota_sets/0000775000175000017500000000000000000000000022075 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota_sets/quotas-show-defaults-response.json0000664000175000017500000000054300000000000030725 0ustar00zuulzuul00000000000000{ "quota_set": { "backup_gigabytes": 1000, "backups": 10, "gigabytes": 1000, "gigabytes___DEFAULT__": -1, "groups": 10, "id": "fake_tenant", "per_volume_gigabytes": -1, "snapshots": 10, "snapshots___DEFAULT__": -1, "volumes": 10, "volumes___DEFAULT__": -1 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota_sets/quotas-show-response.json0000664000175000017500000000054300000000000027120 0ustar00zuulzuul00000000000000{ "quota_set": { "backup_gigabytes": 1000, "backups": 10, "gigabytes": 1000, "gigabytes___DEFAULT__": -1, "groups": 10, "id": "fake_tenant", "per_volume_gigabytes": -1, "snapshots": 10, "snapshots___DEFAULT__": -1, "volumes": 10, "volumes___DEFAULT__": -1 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota_sets/quotas-show-usage-response.json0000664000175000017500000000226500000000000030225 0ustar00zuulzuul00000000000000{ "quota_set": { "backup_gigabytes": { "in_use": 0, "limit": 1000, "reserved": 0 }, "backups": { "in_use": 0, "limit": 10, "reserved": 0 }, "gigabytes": { "in_use": 0, "limit": 1000, "reserved": 0 }, "gigabytes___DEFAULT__": { "in_use": 0, "limit": -1, "reserved": 0 }, "groups": { "in_use": 0, "limit": 10, "reserved": 0 }, "id": "fake_tenant", "per_volume_gigabytes": { "in_use": 0, "limit": -1, "reserved": 0 }, "snapshots": { "in_use": 0, "limit": 10, "reserved": 0 }, "snapshots___DEFAULT__": { "in_use": 0, "limit": -1, "reserved": 0 }, "volumes": { "in_use": 0, "limit": 10, "reserved": 0 }, "volumes___DEFAULT__": { "in_use": 0, "limit": -1, "reserved": 0 } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota_sets/quotas-update-request.json0000664000175000017500000000013500000000000027251 0ustar00zuulzuul00000000000000{ "quota_set":{ "groups": 11, "volumes": 5, "backups": 4 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/quota_sets/quotas-update-response.json0000664000175000017500000000050400000000000027417 0ustar00zuulzuul00000000000000{ "quota_set": { "backup_gigabytes": 1000, "backups": 4, "gigabytes": 1000, "gigabytes___DEFAULT__": -1, "groups": 11, "per_volume_gigabytes": -1, "snapshots": 10, "snapshots___DEFAULT__": -1, "volumes": 5, "volumes___DEFAULT__": -1 } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/resource-filters-list-response.json0000664000175000017500000000066100000000000026706 0ustar00zuulzuul00000000000000{ "resource_filters": [ { "filters": [ "name", "status", "image_metadata", "bootable", "migration_status" ], "resource": "volume" }, { "filters": [ "name", "status", "volume_id" ], "resource": "snapshot" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-disable-log-request.json0000664000175000017500000000014500000000000026272 0ustar00zuulzuul00000000000000{ "binary": "cinder-volume", "host": "devstack@lvmdriver-1", "disabled_reason": "test" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-disable-log-response.json0000664000175000017500000000025000000000000026435 0ustar00zuulzuul00000000000000{ "disabled": true, "status": "disabled", "host": "devstack@lvmdriver-1", "service": "", "binary": "cinder-volume", "disabled_reason": "test" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-disable-request.json0000664000175000017500000000010600000000000025510 0ustar00zuulzuul00000000000000{ "binary": "cinder-volume", "host": "devstack@lvmdriver-1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-disable-response.json0000664000175000017500000000021100000000000025653 0ustar00zuulzuul00000000000000{ "disabled": true, "status": "disabled", "host": "devstack@lvmdriver-1", "service": "", "binary": "cinder-volume" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-enable-request.json0000664000175000017500000000010600000000000025333 0ustar00zuulzuul00000000000000{ "binary": "cinder-volume", "host": "devstack@lvmdriver-1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-enable-response.json0000664000175000017500000000024600000000000025506 0ustar00zuulzuul00000000000000{ "disabled": false, "status": "enabled", "host": "devstack@lvmdriver-1", "service": "", "binary": "cinder-volume", "disabled_reason": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-failover-host-request.json0000664000175000017500000000007700000000000026676 0ustar00zuulzuul00000000000000{ "host": "devstack@lvmdriver-1", "backend_id": null } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-freeze-request.json0000664000175000017500000000004300000000000025365 0ustar00zuulzuul00000000000000{ "host": "devstack@rbd-sas" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-get-log-request.json0000664000175000017500000000014700000000000025450 0ustar00zuulzuul00000000000000{ "binary": "cinder-volume", "server": "devstack@lvmdriver-1", "prefix": "cinder.volume" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-get-log-response.json0000664000175000017500000000107100000000000025613 0ustar00zuulzuul00000000000000{ "log_levels": [{ "binary": "cinder-api", "host": "devstack", "levels": { "cinder.volume.api": "DEBUG" } }, { "binary": "cinder-scheduler", "host": "devstack", "levels": { "cinder.volume.api": "DEBUG" } }, { "binary": "cinder-backup", "host": "devstack", "levels": {} }, { "binary": "cinder-volume", "host": "devstack@lvmdriver-1", "levels": { "cinder.volume.api": "DEBUG" } }] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-list-response.json0000664000175000017500000000154300000000000025234 0ustar00zuulzuul00000000000000{ "services": [{ "status": "enabled", "binary": "cinder-scheduler", "zone": "nova", "state": "up", "updated_at": "2017-06-29T05:50:35.000000", "host": "devstack", "disabled_reason": null }, { "status": "enabled", "binary": "cinder-backup", "zone": "nova", "state": "up", "updated_at": "2017-06-29T05:50:42.000000", "host": "devstack", "disabled_reason": null }, { "status": "enabled", "binary": "cinder-volume", "zone": "nova", "frozen": false, "state": "up", "updated_at": "2017-06-29T05:50:39.000000", "cluster": null, "host": "devstack@lvmdriver-1", "replication_status": "disabled", "active_backend_id": null, "disabled_reason": null }] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-set-log-request.json0000664000175000017500000000017500000000000025465 0ustar00zuulzuul00000000000000{ "binary": "cinder-volume", "server": "devstack@lvmdriver-1", "prefix": "cinder.volume", "level": "ERROR" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/services-thaw-request.json0000664000175000017500000000004300000000000025050 0ustar00zuulzuul00000000000000{ "host": "devstack@rbd-sas" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/set-default-type-request.json0000664000175000017500000000010400000000000025456 0ustar00zuulzuul00000000000000{ "default_type": { "volume_type": "lvm_backend" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/set-default-type-response.json0000664000175000017500000000023600000000000025632 0ustar00zuulzuul00000000000000{ "default_type": { "project_id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "volume_type_id": "40ec6e5e-c9bd-4170-8740-c1cd42d7eabb" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshot-force-delete-request.json0000664000175000017500000000003600000000000026461 0ustar00zuulzuul00000000000000{ "os-force_delete": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshot-manage-list-detail-response.json0000664000175000017500000000160200000000000027732 0ustar00zuulzuul00000000000000{ "manageable-snapshots": [ { "cinder_id": null, "reason_not_safe": null, "reference": { "source-name": "lvol0" }, "source_reference": { "source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b" }, "safe_to_manage": true, "size": 1, "extra_info": null }, { "cinder_id": "d0c84570-a01f-4579-9789-5e9f266587cd", "reason_not_safe": "already managed", "reference": { "source-name":"_snapshot-d0c84570-a01f-4579-9789-5e9f266587cd" }, "source_reference": { "source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b" }, "safe_to_manage": false, "size": 1, "extra_info": null } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshot-manage-list-response.json0000664000175000017500000000121400000000000026471 0ustar00zuulzuul00000000000000{ "manageable-snapshots": [ { "source_reference": { "source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b" }, "safe_to_manage": true, "reference": { "source-name": "lvol0" }, "size": 1 }, { "source_reference": { "source-name": "volume-7c064b34-1e4b-40bd-93ca-4ac5a973661b" }, "safe_to_manage": false, "reference": { "source-name": "_snapshot-d0c84570-a01f-4579-9789-5e9f266587cd" }, "size": 1 } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshot-status-reset-request.json0000664000175000017500000000010100000000000026557 0ustar00zuulzuul00000000000000{ "os-reset_status": { "status": "available" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshot-status-update-request.json0000664000175000017500000000014500000000000026727 0ustar00zuulzuul00000000000000{ "os-update_snapshot_status": { "status": "creating", "progress": "80%" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9911175 cinder-27.0.0/api-ref/source/v3/samples/snapshot_manage_extensions/0000775000175000017500000000000000000000000025334 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshot_manage_extensions/snapshot-manage-request.json0000664000175000017500000000034300000000000033002 0ustar00zuulzuul00000000000000{ "snapshot": { "description": null, "metadata": null, "ref": { "source-name": "lvol0" }, "name": null, "volume_id": "1df34919-aba7-4a1b-a614-3b409d71ac03" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshot_manage_extensions/snapshot-manage-response.json0000664000175000017500000000100200000000000033141 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2018-09-26T03:45:03.893592", "description": "this is a new snapshot", "id": "b6314a71-9d3d-439a-861d-b790def0d693", "metadata": { "manage-snap-meta1": "value1", "manage-snap-meta2": "value2", "manage-snap-meta3": "value3" }, "name": "new_snapshot", "size": 1, "status": "creating", "updated_at": "null", "volume_id": "1df34919-aba7-4a1b-a614-3b409d71ac03" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9951174 cinder-27.0.0/api-ref/source/v3/samples/snapshots/0000775000175000017500000000000000000000000021730 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-create-request.json0000664000175000017500000000035300000000000027412 0ustar00zuulzuul00000000000000{ "snapshot": { "name": "snap-001", "description": "Daily backup", "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", "force": true, "metadata": { "key": "v3" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-create-response.json0000664000175000017500000000061400000000000027560 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-11T16:24:34.469003", "description": "Daily backup", "id": "b36476e5-d18b-47f9-ac69-4818cb43ee21", "metadata": { "key": "v3" }, "name": "snap-001", "size": 10, "status": "creating", "updated_at": null, "volume_id": "d291b81c-6e40-4525-8231-90aa1588121e" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-metadata-create-request.json0000664000175000017500000000006000000000000031163 0ustar00zuulzuul00000000000000{ "metadata": { "key": "v3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-metadata-create-response.json0000664000175000017500000000006200000000000031333 0ustar00zuulzuul00000000000000{ "metadata": { "key": "value" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-metadata-show-key-response.json0000664000175000017500000000005300000000000031636 0ustar00zuulzuul00000000000000{ "meta": { "key": "v3" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-metadata-show-response.json0000664000175000017500000000005700000000000031054 0ustar00zuulzuul00000000000000{ "metadata": { "key": "v3" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-metadata-update-key-request.json0000664000175000017500000000005200000000000031771 0ustar00zuulzuul00000000000000{ "meta": { "key": "new_value" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-metadata-update-key-response.json0000664000175000017500000000006200000000000032140 0ustar00zuulzuul00000000000000{ "meta": { "key": "new_value" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-metadata-update-request.json0000664000175000017500000000007200000000000031205 0ustar00zuulzuul00000000000000{ "metadata": { "new_key": "new_value" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-metadata-update-response.json0000664000175000017500000000007200000000000031353 0ustar00zuulzuul00000000000000{ "metadata": { "new_key": "new_value" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-show-response.json0000664000175000017500000000104400000000000027273 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-12T04:42:00.809352", "description": "Daily backup", "id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "status": "creating", "updated_at": null, "volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-update-request.json0000664000175000017500000000015700000000000027433 0ustar00zuulzuul00000000000000{ "snapshot": { "name": "snap-002", "description": "This is yet, another snapshot." } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshot-update-response.json0000664000175000017500000000063600000000000027603 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-12T04:53:53.426591", "description": "This is yet, another snapshot.", "id": "43666194-8e72-451a-b7bb-54fef763b2b8", "metadata": { "key": "v3" }, "name": "snap-002", "size": 10, "status": "creating", "updated_at": null, "volume_id": "070c942d-9909-42e9-a467-7a781f150c58" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshots-list-detailed-response.json0000664000175000017500000000115500000000000031225 0ustar00zuulzuul00000000000000{ "snapshots": [ { "created_at": "2019-03-11T16:24:36.464445", "description": "Daily backup", "id": "d0083dc5-8795-4c1a-bc9c-74f70006c205", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "status": "creating", "updated_at": null, "volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/snapshots-list-response.json0000664000175000017500000000071400000000000027454 0ustar00zuulzuul00000000000000{ "snapshots": [ { "created_at": "2019-03-11T16:29:08.973832", "description": "Daily backup", "id": "2c228773-50eb-422d-be7e-b5c6ced0c7a9", "metadata": { "key": "v3" }, "name": "snap-001", "size": 10, "status": "creating", "updated_at": null, "volume_id": "428ec041-b999-40d8-8a54-9e98b19406cc" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9951174 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.14/0000775000175000017500000000000000000000000022503 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.14/snapshot-create-response.json0000664000175000017500000000065700000000000030342 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-11T16:24:34.469003", "description": "Daily backup", "id": "b36476e5-d18b-47f9-ac69-4818cb43ee21", "metadata": { "key": "v3" }, "name": "snap-001", "size": 10, "status": "creating", "updated_at": null, "volume_id": "d291b81c-6e40-4525-8231-90aa1588121e", "group_snapshot_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.14/snapshot-show-response.json0000664000175000017500000000110700000000000030046 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-12T04:42:00.809352", "description": "Daily backup", "id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "status": "creating", "updated_at": null, "volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37", "group_snapshot_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.14/snapshot-update-response.json0000664000175000017500000000070100000000000030347 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-12T04:53:53.426591", "description": "This is yet, another snapshot.", "id": "43666194-8e72-451a-b7bb-54fef763b2b8", "metadata": { "key": "v3" }, "name": "snap-002", "size": 10, "status": "creating", "updated_at": null, "volume_id": "070c942d-9909-42e9-a467-7a781f150c58", "group_snapshot_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.14/snapshots-list-detailed-response.json0000664000175000017500000000122400000000000031775 0ustar00zuulzuul00000000000000{ "snapshots": [ { "created_at": "2019-03-11T16:24:36.464445", "description": "Daily backup", "id": "d0083dc5-8795-4c1a-bc9c-74f70006c205", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "status": "creating", "updated_at": null, "volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6", "group_snapshot_id": null } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9991174 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.41/0000775000175000017500000000000000000000000022503 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.41/snapshot-create-response.json0000664000175000017500000000075200000000000030336 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-11T16:24:34.469003", "description": "Daily backup", "id": "b36476e5-d18b-47f9-ac69-4818cb43ee21", "metadata": { "key": "v3" }, "name": "snap-001", "size": 10, "status": "creating", "updated_at": null, "volume_id": "d291b81c-6e40-4525-8231-90aa1588121e", "group_snapshot_id": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.41/snapshot-show-response.json0000664000175000017500000000120200000000000030042 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-12T04:42:00.809352", "description": "Daily backup", "id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "status": "creating", "updated_at": null, "volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37", "group_snapshot_id": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.41/snapshot-update-response.json0000664000175000017500000000077400000000000030361 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-12T04:53:53.426591", "description": "This is yet, another snapshot.", "id": "43666194-8e72-451a-b7bb-54fef763b2b8", "metadata": { "key": "v3" }, "name": "snap-002", "size": 10, "status": "creating", "updated_at": null, "volume_id": "070c942d-9909-42e9-a467-7a781f150c58", "group_snapshot_id": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.41/snapshots-list-detailed-response.json0000664000175000017500000000132300000000000031775 0ustar00zuulzuul00000000000000{ "snapshots": [ { "created_at": "2019-03-11T16:24:36.464445", "description": "Daily backup", "id": "d0083dc5-8795-4c1a-bc9c-74f70006c205", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "status": "creating", "updated_at": null, "volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6", "group_snapshot_id": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9991174 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.65/0000775000175000017500000000000000000000000022511 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.65/snapshot-create-response.json0000664000175000017500000000101200000000000030332 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-11T16:24:34.469003", "description": "Daily backup", "id": "b36476e5-d18b-47f9-ac69-4818cb43ee21", "metadata": { "key": "v3" }, "name": "snap-001", "size": 10, "status": "creating", "updated_at": null, "volume_id": "d291b81c-6e40-4525-8231-90aa1588121e", "group_snapshot_id": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.65/snapshot-show-response.json0000664000175000017500000000124200000000000030054 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-12T04:42:00.809352", "description": "Daily backup", "id": "4a584cae-e4ce-429b-9154-d4c9eb8fda4c", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "status": "creating", "updated_at": null, "volume_id": "b72c48f1-64b7-4cd8-9745-b12e0be82d37", "group_snapshot_id": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.65/snapshot-update-response.json0000664000175000017500000000103400000000000030355 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2019-03-12T04:53:53.426591", "description": "This is yet, another snapshot.", "id": "43666194-8e72-451a-b7bb-54fef763b2b8", "metadata": { "key": "v3" }, "name": "snap-002", "size": 10, "status": "creating", "updated_at": null, "volume_id": "070c942d-9909-42e9-a467-7a781f150c58", "group_snapshot_id": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/snapshots/v3.65/snapshots-list-detailed-response.json0000664000175000017500000000136700000000000032013 0ustar00zuulzuul00000000000000{ "snapshots": [ { "created_at": "2019-03-11T16:24:36.464445", "description": "Daily backup", "id": "d0083dc5-8795-4c1a-bc9c-74f70006c205", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "size": 10, "status": "creating", "updated_at": null, "volume_id": "7acd675e-4e06-4653-af9f-2ecd546342d6", "group_snapshot_id": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "consumes_quota": true } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/user-quotas-show-response.json0000664000175000017500000000064300000000000025706 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "fixed_ips": -1, "floating_ips": 10, "id": "fake_project", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 10, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/user-quotas-update-request.json0000664000175000017500000000011300000000000026032 0ustar00zuulzuul00000000000000{ "quota_set": { "force": true, "instances": 9 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/user-quotas-update-response.json0000664000175000017500000000060400000000000026205 0ustar00zuulzuul00000000000000{ "quota_set": { "cores": 20, "floating_ips": 10, "fixed_ips": -1, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, "instances": 9, "key_pairs": 100, "metadata_items": 128, "ram": 51200, "security_group_rules": 20, "security_groups": 10 } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9991174 cinder-27.0.0/api-ref/source/v3/samples/versions/0000775000175000017500000000000000000000000021556 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/versions/version-show-response.json0000664000175000017500000000140000000000000026743 0ustar00zuulzuul00000000000000{ "versions": [ { "id": "v3.0", "links": [ { "href": "https://docs.openstack.org/", "rel": "describedby", "type": "text/html" }, { "href": "http://127.0.0.1:44895/v3/", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=3" } ], "min_version": "3.0", "status": "CURRENT", "updated": "2023-08-31T00:00:00Z", "version": "3.71" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/versions/versions-response.json0000664000175000017500000000140000000000000026150 0ustar00zuulzuul00000000000000{ "versions": [ { "id": "v3.0", "links": [ { "href": "https://docs.openstack.org/", "rel": "describedby", "type": "text/html" }, { "href": "http://127.0.0.1:45697/v3/", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=3" } ], "min_version": "3.0", "status": "CURRENT", "updated": "2022-08-31T00:00:00Z", "version": "3.71" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-attach-request.json0000664000175000017500000000017700000000000025045 0ustar00zuulzuul00000000000000{ "os-attach": { "instance_uuid": "95D9EF50-507D-11E5-B970-0800200C9A66", "mountpoint": "/dev/vdc" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-begin-detaching-request.json0000664000175000017500000000004100000000000026577 0ustar00zuulzuul00000000000000{ "os-begin_detaching": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-bootable-status-update-request.json0000664000175000017500000000007600000000000030167 0ustar00zuulzuul00000000000000{ "os-set_bootable": { "bootable": "True" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-detach-request.json0000664000175000017500000000013600000000000025024 0ustar00zuulzuul00000000000000{ "os-detach": { "attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-extend-request.json0000664000175000017500000000006300000000000025062 0ustar00zuulzuul00000000000000{ "os-extend": { "new_size": 3 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-force-delete-request.json0000664000175000017500000000003600000000000026131 0ustar00zuulzuul00000000000000{ "os-force_delete": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-force-detach-request.json0000664000175000017500000000027000000000000026117 0ustar00zuulzuul00000000000000{ "os-force_detach": { "attachment_id": "d8777f54-84cf-4809-a679-468ffed56cf1", "connector": { "initiator": "iqn.2012-07.org.fake:01" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-image-metadata-set-request.json0000664000175000017500000000042200000000000027223 0ustar00zuulzuul00000000000000{ "os-set_image_metadata": { "metadata": { "image_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", "image_name": "image", "kernel_id": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ramdisk_id": "somedisk" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-image-metadata-unset-request.json0000664000175000017500000000010700000000000027566 0ustar00zuulzuul00000000000000{ "os-unset_image_metadata": { "key": "ramdisk_id" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-image-metadata-update-response.json0000664000175000017500000000063600000000000030067 0ustar00zuulzuul00000000000000{ "metadata": { "kernel_id": "6ff710d2-942b-4d6b-9168-8c9cc2404ab1", "container_format": "bare", "min_ram": "0", "ramdisk_id": "somedisk", "disk_format": "qcow2", "image_name": "image", "image_id": "5137a025-3c5f-43c1-bc64-5f41270040a5", "checksum": "f8ab98ff5e73ebab884d80c9dc9c7290", "min_disk": "0", "size": "13267968" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-initialize-connection-request.json0000664000175000017500000000052500000000000030074 0ustar00zuulzuul00000000000000{ "os-initialize_connection": { "connector": { "platform":"x86_64", "host": "node2", "do_local_attach": false, "ip": "192.168.13.101", "os_type": "linux2", "multipath": false, "initiator": "iqn.1994-05.com.redhat:d16cbb5d31e5" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-manage-list-detail-response.json0000664000175000017500000000120200000000000027376 0ustar00zuulzuul00000000000000{ "manageable-volumes": [ { "cinder_id": "9ba5bb53-4a18-4b38-be06-992999da338d", "reason_not_safe": "already managed", "reference": { "source-name": "volume-9ba5bb53-4a18-4b38-be06-992999da338d" }, "safe_to_manage": false, "size": 1, "extra_info": null }, { "cinder_id": null, "reason_not_safe": null, "reference": { "source-name": "lvol0" }, "safe_to_manage": true, "size": 1, "extra_info": null } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-manage-list-response.json0000664000175000017500000000061300000000000026143 0ustar00zuulzuul00000000000000{ "manageable-volumes": [ { "safe_to_manage": false, "reference": { "source-name": "volume-3a81fdac-e8ae-4e61-b6a2-2e14ff316f19" }, "size": 1 }, { "safe_to_manage": true, "reference": { "source-name": "lvol0" }, "size": 1 } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-manage-request-cluster.json0000664000175000017500000000071100000000000026502 0ustar00zuulzuul00000000000000{ "volume": { "host": null, "cluster": "cluster@backend", "ref": { "source-name": "existingLV", "source-id": "1234" }, "name": "New Volume", "availability_zone": "az2", "description": "Volume imported from existingLV", "volume_type": null, "bootable": true, "metadata": { "key1": "value1", "key2": "value2" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-os-extend_volume_completion-request.json0000664000175000017500000000010600000000000031317 0ustar00zuulzuul00000000000000{ "os-extend_volume_completion": { "error": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-os-migrate_volume-request.json0000664000175000017500000000010000000000000027221 0ustar00zuulzuul00000000000000{ "os-migrate_volume": { "host": "node1@lvm" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-os-migrate_volume_completion-request.json0000664000175000017500000000020500000000000031460 0ustar00zuulzuul00000000000000{ "os-migrate_volume_completion": { "new_volume": "2b955850-f177-45f7-9f49-ecb2c256d161", "error": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-os-reimage-request.json0000664000175000017500000000017400000000000025626 0ustar00zuulzuul00000000000000{ "os-reimage": { "image_id": "71543ced-a8af-45b6-a5c4-a46282108a90", "reimage_reserved": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-os-retype-request.json0000664000175000017500000000015600000000000025525 0ustar00zuulzuul00000000000000{ "os-retype": { "new_type": "dedup-tier-replicaton", "migration_policy": "never" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-readonly-update-request.json0000664000175000017500000000010400000000000026664 0ustar00zuulzuul00000000000000{ "os-update_readonly_flag": { "readonly": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-reserve-request.json0000664000175000017500000000002700000000000025246 0ustar00zuulzuul00000000000000{ "os-reserve": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-revert-to-snapshot-request.json0000664000175000017500000000012700000000000027360 0ustar00zuulzuul00000000000000{ "revert": { "snapshot_id": "5aa119a8-d25b-45a7-8d1b-88e127885635" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-roll-detaching-request.json0000664000175000017500000000003700000000000026470 0ustar00zuulzuul00000000000000{ "os-roll_detaching": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-status-reset-request.json0000664000175000017500000000021700000000000026237 0ustar00zuulzuul00000000000000{ "os-reset_status": { "status": "available", "attach_status": "detached", "migration_status": "migrating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-terminate-connection-request.json0000664000175000017500000000052500000000000027723 0ustar00zuulzuul00000000000000{ "os-terminate_connection": { "connector": { "platform": "x86_64", "host": "node2", "do_local_attach": false, "ip": "192.168.13.101", "os_type": "linux2", "multipath": false, "initiator": "iqn.1994-05.com.redhat:d16cbb5d31e5" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-type-access-delete-request.json0000664000175000017500000000013500000000000027253 0ustar00zuulzuul00000000000000{ "removeProjectAccess": { "project": "f270b245cb11498ca4031deb7e141cfa" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-unmanage-request.json0000664000175000017500000000003200000000000025362 0ustar00zuulzuul00000000000000{ "os-unmanage": {} } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume-unreserve-request.json0000664000175000017500000000003200000000000025605 0ustar00zuulzuul00000000000000{ "os-unreserve":{} } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9991174 cinder-27.0.0/api-ref/source/v3/samples/volume_actions/0000775000175000017500000000000000000000000022735 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_actions/volume-upload-to-image-request.json0000664000175000017500000000033400000000000031607 0ustar00zuulzuul00000000000000{ "os-volume_upload_image":{ "image_name": "test", "force": false, "disk_format": "raw", "container_format": "bare", "visibility": "private", "protected": false } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_actions/volume-upload-to-image-response.json0000664000175000017500000000073700000000000031764 0ustar00zuulzuul00000000000000{ "os-volume_upload_image": { "container_format": "bare", "disk_format": "raw", "display_description": null, "id": "3a81fdac-e8ae-4e61-b6a2-2e14ff316f19", "image_id": "de75b74e-7f0d-4b59-a263-bd87bfc313bd", "image_name": "test", "protected": false, "size": 1, "status": "uploading", "updated_at": "2017-06-05T08:44:28.000000", "visibility": "private", "volume_type": null } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.9991174 cinder-27.0.0/api-ref/source/v3/samples/volume_manage_extensions/0000775000175000017500000000000000000000000025004 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_manage_extensions/volume-manage-request.json0000664000175000017500000000066200000000000032126 0ustar00zuulzuul00000000000000{ "volume": { "host": "geraint-VirtualBox", "ref": { "source-name": "existingLV", "source-id": "1234" }, "name": "New Volume", "availability_zone": "az2", "description": "Volume imported from existingLV", "volume_type": null, "bootable": true, "metadata": { "key1": "value1", "key2": "value2" } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_manage_extensions/volume-manage-response.json0000664000175000017500000000213200000000000032266 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "az2", "bootable": "false", "created_at": "2014-07-18T00:12:54.000000", "description": "Volume imported from existingLV", "encrypted": "false", "id": "23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "links": [ { "href": "http://10.0.2.15:8776/v3/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "rel": "self" }, { "href": "http://10.0.2.15:8776/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "rel": "bookmark" } ], "metadata": { "key1": "value1", "key2": "value2" }, "name": "New Volume", "os-vol-tenant-attr:tenant_id": "87c8522052ca4eed98bc672b4c1a3ddb", "size": 0, "snapshot_id": "null", "source_volid": "null", "status": "creating", "user_id": "eae1472b5fc5496998a3d06550929e7e", "volume_type": "null" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0031176 cinder-27.0.0/api-ref/source/v3/samples/volume_transfer/0000775000175000017500000000000000000000000023121 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfer/volume-transfer-accept-request.json0000664000175000017500000000010100000000000032060 0ustar00zuulzuul00000000000000{ "accept": { "auth_key": "9266c59563c84664" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfer/volume-transfer-accept-response.json0000664000175000017500000000114500000000000032237 0ustar00zuulzuul00000000000000{ "transfer": { "id": "0a840aa1-8f8f-4042-86d7-09d8ca755272", "links": [ { "href": "http://127.0.0.1:46057/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/0a840aa1-8f8f-4042-86d7-09d8ca755272", "rel": "self" }, { "href": "http://127.0.0.1:46057/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/0a840aa1-8f8f-4042-86d7-09d8ca755272", "rel": "bookmark" } ], "name": "first volume", "volume_id": "e56dee53-e565-40f4-9c6b-b983f74a2aa5" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfer/volume-transfer-create-request.json0000664000175000017500000000017000000000000032072 0ustar00zuulzuul00000000000000{ "transfer": { "volume_id": "c86b9af4-151d-4ead-b62c-5fb967af0e37", "name": "first volume" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfer/volume-transfer-create-response.json0000664000175000017500000000130100000000000032235 0ustar00zuulzuul00000000000000{ "transfer": { "auth_key": "dbccabcdbad19e07", "created_at": "2019-03-20T09:29:46.743632", "id": "3d26db0c-69cd-42e4-ae42-7552759ab361", "links": [ { "href": "http://127.0.0.1:40345/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/3d26db0c-69cd-42e4-ae42-7552759ab361", "rel": "self" }, { "href": "http://127.0.0.1:40345/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/3d26db0c-69cd-42e4-ae42-7552759ab361", "rel": "bookmark" } ], "name": "first volume", "volume_id": "59fe2097-931b-4ceb-b74b-f862ff3b6277" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfer/volume-transfer-show-response.json0000664000175000017500000000123100000000000031754 0ustar00zuulzuul00000000000000{ "transfer": { "created_at": "2019-03-20T09:29:48.732953", "id": "5055b9c2-527b-47ef-bdd6-62e1130f511f", "links": [ { "href": "http://127.0.0.1:41845/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/5055b9c2-527b-47ef-bdd6-62e1130f511f", "rel": "self" }, { "href": "http://127.0.0.1:41845/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/5055b9c2-527b-47ef-bdd6-62e1130f511f", "rel": "bookmark" } ], "name": "first volume", "volume_id": "8cdd62be-4bea-4b7c-bb53-c0b5424ee2af" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfer/volume-transfers-list-detailed-response.json0000664000175000017500000000134600000000000033712 0ustar00zuulzuul00000000000000{ "transfers": [ { "created_at": "2019-03-20T09:29:52.758407", "id": "1b3f7d49-8fd8-41b8-b2a5-859c5fe71a20", "links": [ { "href": "http://127.0.0.1:37479/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/1b3f7d49-8fd8-41b8-b2a5-859c5fe71a20", "rel": "self" }, { "href": "http://127.0.0.1:37479/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/1b3f7d49-8fd8-41b8-b2a5-859c5fe71a20", "rel": "bookmark" } ], "name": "first volume", "volume_id": "acb5a860-3f17-4c35-9484-394a12dd7dfc" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfer/volume-transfers-list-response.json0000664000175000017500000000125600000000000032141 0ustar00zuulzuul00000000000000{ "transfers": [ { "id": "a0f13fb9-904c-41c8-8c2e-495cac61a78f", "links": [ { "href": "http://127.0.0.1:45017/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/a0f13fb9-904c-41c8-8c2e-495cac61a78f", "rel": "self" }, { "href": "http://127.0.0.1:45017/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/a0f13fb9-904c-41c8-8c2e-495cac61a78f", "rel": "bookmark" } ], "name": "first volume", "volume_id": "e72d7454-0234-4e3e-99e9-560d1ff79a71" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0031176 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/0000775000175000017500000000000000000000000023304 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0031176 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/v3.55/0000775000175000017500000000000000000000000024064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/v3.55/volume-transfers-create-request.json0000664000175000017500000000017000000000000033220 0ustar00zuulzuul00000000000000{ "transfer": { "volume_id": "1bb4acc9-9fa4-4b4d-8992-3259b69c8372", "name": "first volume" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/v3.55/volume-transfers-create-response.json0000664000175000017500000000134000000000000033366 0ustar00zuulzuul00000000000000{ "transfer": { "auth_key": "19244092a5352ebb", "created_at": "2023-06-12T21:21:38.394873", "id": "33907fea-976f-4d67-8867-b5382f84eb8c", "links": [ { "href": "http://127.0.0.1:45183/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/33907fea-976f-4d67-8867-b5382f84eb8c", "rel": "self" }, { "href": "http://127.0.0.1:45183/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/33907fea-976f-4d67-8867-b5382f84eb8c", "rel": "bookmark" } ], "name": "first volume", "no_snapshots": false, "volume_id": "31024287-e368-4b2c-85a4-880b3b6fc8b0" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/v3.55/volume-transfers-show-response.json0000664000175000017500000000127000000000000033105 0ustar00zuulzuul00000000000000{ "transfer": { "created_at": "2023-06-22T08:28:14.618343", "id": "16b47e50-ab70-4781-bc01-cdcc01ca264a", "links": [ { "href": "http://127.0.0.1:38399/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/16b47e50-ab70-4781-bc01-cdcc01ca264a", "rel": "self" }, { "href": "http://127.0.0.1:38399/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/16b47e50-ab70-4781-bc01-cdcc01ca264a", "rel": "bookmark" } ], "name": "first volume", "no_snapshots": false, "volume_id": "a67e4027-4a83-4b80-a2d5-5b49650ac28c" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0031176 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/v3.57/0000775000175000017500000000000000000000000024066 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/v3.57/volume-transfers-create-request.json0000664000175000017500000000022700000000000033225 0ustar00zuulzuul00000000000000{ "transfer": { "volume_id": "80d68197-b67e-4c8e-bbb9-030b2581f921", "name": "first volume", "no_snapshots": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/v3.57/volume-transfers-create-response.json0000664000175000017500000000155000000000000033373 0ustar00zuulzuul00000000000000{ "transfer": { "accepted": false, "auth_key": "e2cb02466324813c", "created_at": "2023-06-12T21:21:38.392033", "destination_project_id": null, "id": "94bae1a0-83fb-496c-9cd2-800d8237ab0d", "links": [ { "href": "http://127.0.0.1:45193/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/94bae1a0-83fb-496c-9cd2-800d8237ab0d", "rel": "self" }, { "href": "http://127.0.0.1:45193/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/94bae1a0-83fb-496c-9cd2-800d8237ab0d", "rel": "bookmark" } ], "name": "first volume", "no_snapshots": false, "source_project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "volume_id": "202eead8-3c82-41e1-914f-83638a063be9" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/v3.57/volume-transfers-show-response.json0000664000175000017500000000150000000000000033103 0ustar00zuulzuul00000000000000{ "transfer": { "accepted": false, "created_at": "2023-06-22T08:28:17.647081", "destination_project_id": null, "id": "3d79fbda-8d9c-4da3-a016-e5612fcb7f65", "links": [ { "href": "http://127.0.0.1:34593/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/3d79fbda-8d9c-4da3-a016-e5612fcb7f65", "rel": "self" }, { "href": "http://127.0.0.1:34593/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/3d79fbda-8d9c-4da3-a016-e5612fcb7f65", "rel": "bookmark" } ], "name": "first volume", "no_snapshots": false, "source_project_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "volume_id": "7e31e409-2a7a-4ea6-aa0b-bc7be056fc57" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/volume-transfers-accept-request.json0000664000175000017500000000010000000000000032425 0ustar00zuulzuul00000000000000{ "accept": { "auth_key": "f318375a4400391e" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_transfers/volume-transfers-accept-response.json0000664000175000017500000000114500000000000032605 0ustar00zuulzuul00000000000000{ "transfer": { "id": "9e395d6d-5138-423c-a63c-7b62c6265fa1", "links": [ { "href": "http://127.0.0.1:39369/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/9e395d6d-5138-423c-a63c-7b62c6265fa1", "rel": "self" }, { "href": "http://127.0.0.1:39369/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/os-volume-transfer/9e395d6d-5138-423c-a63c-7b62c6265fa1", "rel": "bookmark" } ], "name": "first volume", "volume_id": "8d19f929-f1da-4a76-acad-9ed17da0981e" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0071175 cinder-27.0.0/api-ref/source/v3/samples/volume_type/0000775000175000017500000000000000000000000022256 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/encryption-type-create-request.json0000664000175000017500000000023600000000000031252 0ustar00zuulzuul00000000000000{ "encryption":{ "key_size": 256, "provider": "luks", "control_location":"front-end", "cipher": "aes-xts-plain64" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/encryption-type-create-response.json0000664000175000017500000000044300000000000031420 0ustar00zuulzuul00000000000000{ "encryption": { "volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577", "control_location": "front-end", "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74", "key_size": 256, "provider": "luks", "cipher": "aes-xts-plain64" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/encryption-type-show-response.json0000664000175000017500000000054700000000000031142 0ustar00zuulzuul00000000000000{ "volume_type_id": "2d29462d-76cb-417c-8a9f-fb23140f1577", "control_location": "front-end", "deleted": false, "created_at": "2016-12-28T02:32:25.000000", "updated_at": null, "encryption_id": "81e069c6-7394-4856-8df7-3b237ca61f74", "key_size": 256, "provider": "luks", "deleted_at": null, "cipher": "aes-xts-plain64" } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/api-ref/source/v3/samples/volume_type/encryption-type-specific-specs-show-response.json 22 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/encryption-type-specific-specs-show-response.jso0000664000175000017500000000004400000000000033652 0ustar00zuulzuul00000000000000{ "cipher": "aes-xts-plain64" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/encryption-type-update-request.json0000664000175000017500000000023400000000000031267 0ustar00zuulzuul00000000000000{ "encryption":{ "key_size": 64, "provider": "luks", "control_location":"back-end", "cipher": "aes-xts-plain64" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/encryption-type-update-response.json0000664000175000017500000000023400000000000031435 0ustar00zuulzuul00000000000000{ "encryption":{ "key_size": 64, "provider": "luks", "control_location":"back-end", "cipher": "aes-xts-plain64" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-access-add-request.json0000664000175000017500000000013100000000000031105 0ustar00zuulzuul00000000000000{ "addProjectAccess": { "project": "6f70656e737461636b20342065766572" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-access-list-response.json0000664000175000017500000000027400000000000031506 0ustar00zuulzuul00000000000000{ "volume_type_access": [ { "project_id": "6f70656e737461636b20342065766572", "volume_type_id": "a5082c24-2a27-43a4-b48e-fcec1240e36b" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-all-extra-specs-show-response.json0000664000175000017500000000007500000000000033255 0ustar00zuulzuul00000000000000{ "extra_specs": { "capabilities": "gpu" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-create-request.json0000664000175000017500000000034000000000000030363 0ustar00zuulzuul00000000000000{ "volume_type": { "name": "vol-type-001", "description": "volume type 0001", "os-volume-type-access:is_public": true, "extra_specs": { "capabilities": "gpu" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-create-response.json0000664000175000017500000000046100000000000030535 0ustar00zuulzuul00000000000000{ "volume_type": { "name": "vol-type-001", "extra_specs": { "capabilities": "gpu" }, "os-volume-type-access:is_public": true, "is_public": true, "id": "6d0ff92a-0007-4780-9ece-acfe5876966a", "description": "volume type 0001" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-default-response.json0000664000175000017500000000043600000000000030720 0ustar00zuulzuul00000000000000{ "volume_type": { "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "qos_specs_id": null, "name": "vol-type-001", "description": "volume type 0001", "is_public": true, "extra_specs": { "capabilities": "gpu" } } } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-extra-specs-create-update-request.json 22 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-extra-specs-create-update-request.js0000664000175000017500000000012200000000000033540 0ustar00zuulzuul00000000000000{ "extra_specs": { "key1": "value1", "key2": "value2" } } ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-extra-specs-create-update-response.json 22 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-extra-specs-create-update-response.j0000664000175000017500000000012200000000000033523 0ustar00zuulzuul00000000000000{ "extra_specs": { "key1": "value1", "key2": "value2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-show-response.json0000664000175000017500000000051700000000000030254 0ustar00zuulzuul00000000000000{ "volume_type": { "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "qos_specs_id": null, "name": "vol-type-001", "description": "volume type 0001", "os-volume-type-access:is_public": true, "is_public": true, "extra_specs": { "capabilities": "gpu" } } } ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-specific-extra-specs-show-response.json 22 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-specific-extra-specs-show-response.j0000664000175000017500000000003600000000000033547 0ustar00zuulzuul00000000000000{ "capabilities": "gpu" } ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-specific-extra-specs-update-request.json 22 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-specific-extra-specs-update-request.0000664000175000017500000000003100000000000033524 0ustar00zuulzuul00000000000000{ "key1": "value1" } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-specific-extra-specs-update-response.json 22 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-specific-extra-specs-update-response0000664000175000017500000000003100000000000033614 0ustar00zuulzuul00000000000000{ "key1": "value1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-update-request.json0000664000175000017500000000020400000000000030401 0ustar00zuulzuul00000000000000{ "volume_type": { "name": "vol-type-001", "description": "volume type 0001", "is_public": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-type-update-response.json0000664000175000017500000000040000000000000030545 0ustar00zuulzuul00000000000000{ "volume_type": { "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", "name": "vol-type-001", "description": "volume type 0001", "is_public": true, "extra_specs": { "capabilities": "gpu" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volume_type/volume-types-list-response.json0000664000175000017500000000205000000000000030424 0ustar00zuulzuul00000000000000{ "volume_types": [ { "description": "volume type 0002", "extra_specs": { "capabilities": "gpu" }, "id": "ef512777-6552-4013-82f0-57a96e5804b7", "is_public": true, "name": "vol-type-002", "os-volume-type-access:is_public": true, "qos_specs_id": null }, { "description": "volume type 0001", "extra_specs": { "capabilities": "gpu" }, "id": "18947ff2-ad57-42b2-9350-34262e530203", "is_public": true, "name": "vol-type-001", "os-volume-type-access:is_public": true, "qos_specs_id": null }, { "description": "Default Volume Type", "extra_specs": {}, "id": "7a56b996-b73f-4233-9f00-dd6a68b49b27", "is_public": true, "name": "__DEFAULT__", "os-volume-type-access:is_public": true, "qos_specs_id": null } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0111175 cinder-27.0.0/api-ref/source/v3/samples/volumes/0000775000175000017500000000000000000000000021400 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0151176 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.13/0000775000175000017500000000000000000000000022152 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.13/volume-create-response.json0000664000175000017500000000216600000000000027456 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:21:12.715987", "description": null, "encrypted": false, "id": "2b955850-f177-45f7-9f49-ecb2c256d161", "links": [ { "href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "self" }, { "href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.13/volume-show-response.json0000664000175000017500000000251500000000000027171 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:50:07.770785", "description": null, "encrypted": false, "id": "f7223234-1afc-4d19-bfa3-d19deb6235ef", "links": [ { "href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "self" }, { "href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.13/volume-update-response.json0000664000175000017500000000227600000000000027477 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:59:23.679903", "description": "This is yet, another volume.", "encrypted": false, "id": "8b2459d1-0059-4e14-a89f-dfa73a452af6", "links": [ { "href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "self" }, { "href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.13/volumes-list-detailed-response.json0000664000175000017500000000275600000000000031127 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:25:15.288987", "description": null, "encrypted": false, "id": "cb49b381-9012-40cb-b8ee-80c19a4801b5", "links": [ { "href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "self" }, { "href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0151176 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.21/0000775000175000017500000000000000000000000022151 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.21/volume-create-response.json0000664000175000017500000000222300000000000027447 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:21:12.715987", "description": null, "encrypted": false, "id": "2b955850-f177-45f7-9f49-ecb2c256d161", "links": [ { "href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "self" }, { "href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.21/volume-show-response.json0000664000175000017500000000255200000000000027171 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:50:07.770785", "description": null, "encrypted": false, "id": "f7223234-1afc-4d19-bfa3-d19deb6235ef", "links": [ { "href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "self" }, { "href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.21/volume-update-response.json0000664000175000017500000000233300000000000027470 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:59:23.679903", "description": "This is yet, another volume.", "encrypted": false, "id": "8b2459d1-0059-4e14-a89f-dfa73a452af6", "links": [ { "href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "self" }, { "href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.21/volumes-list-detailed-response.json0000664000175000017500000000301700000000000031115 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:25:15.288987", "description": null, "encrypted": false, "id": "cb49b381-9012-40cb-b8ee-80c19a4801b5", "links": [ { "href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "self" }, { "href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0151176 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.48/0000775000175000017500000000000000000000000022162 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.48/volume-create-response.json0000664000175000017500000000232100000000000027457 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:21:12.715987", "description": null, "encrypted": false, "id": "2b955850-f177-45f7-9f49-ecb2c256d161", "links": [ { "href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "self" }, { "href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.48/volume-show-response.json0000664000175000017500000000265000000000000027201 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:50:07.770785", "description": null, "encrypted": false, "id": "f7223234-1afc-4d19-bfa3-d19deb6235ef", "links": [ { "href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "self" }, { "href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.48/volume-update-response.json0000664000175000017500000000243100000000000027500 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:59:23.679903", "description": "This is yet, another volume.", "encrypted": false, "id": "8b2459d1-0059-4e14-a89f-dfa73a452af6", "links": [ { "href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "self" }, { "href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.48/volumes-list-detailed-response.json0000664000175000017500000000312500000000000031126 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:25:15.288987", "description": null, "encrypted": false, "id": "cb49b381-9012-40cb-b8ee-80c19a4801b5", "links": [ { "href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "self" }, { "href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0151176 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.61/0000775000175000017500000000000000000000000022155 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.61/volume-create-response.json0000664000175000017500000000235700000000000027463 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:21:12.715987", "description": null, "encrypted": false, "id": "2b955850-f177-45f7-9f49-ecb2c256d161", "links": [ { "href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "self" }, { "href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.61/volume-show-response.json0000664000175000017500000000270600000000000027176 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:50:07.770785", "description": null, "encrypted": false, "id": "f7223234-1afc-4d19-bfa3-d19deb6235ef", "links": [ { "href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "self" }, { "href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.61/volume-update-response.json0000664000175000017500000000246700000000000027504 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:59:23.679903", "description": "This is yet, another volume.", "encrypted": false, "id": "8b2459d1-0059-4e14-a89f-dfa73a452af6", "links": [ { "href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "self" }, { "href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.61/volumes-list-detailed-response.json0000664000175000017500000000316700000000000031127 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:25:15.288987", "description": null, "encrypted": false, "id": "cb49b381-9012-40cb-b8ee-80c19a4801b5", "links": [ { "href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "self" }, { "href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0191176 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.63/0000775000175000017500000000000000000000000022157 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.63/volume-create-response.json0000664000175000017500000000246100000000000027461 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:21:12.715987", "description": null, "encrypted": false, "id": "2b955850-f177-45f7-9f49-ecb2c256d161", "links": [ { "href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "self" }, { "href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.63/volume-show-response.json0000664000175000017500000000301000000000000027165 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:50:07.770785", "description": null, "encrypted": false, "id": "f7223234-1afc-4d19-bfa3-d19deb6235ef", "links": [ { "href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "self" }, { "href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.63/volume-update-response.json0000664000175000017500000000257100000000000027502 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:59:23.679903", "description": "This is yet, another volume.", "encrypted": false, "id": "8b2459d1-0059-4e14-a89f-dfa73a452af6", "links": [ { "href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "self" }, { "href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.63/volumes-list-detailed-response.json0000664000175000017500000000327500000000000031131 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:25:15.288987", "description": null, "encrypted": false, "id": "cb49b381-9012-40cb-b8ee-80c19a4801b5", "links": [ { "href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "self" }, { "href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0191176 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.65/0000775000175000017500000000000000000000000022161 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.65/volume-create-response.json0000664000175000017500000000252100000000000027460 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:21:12.715987", "description": null, "encrypted": false, "id": "2b955850-f177-45f7-9f49-ecb2c256d161", "links": [ { "href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "self" }, { "href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.65/volume-show-response.json0000664000175000017500000000305000000000000027173 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:50:07.770785", "description": null, "encrypted": false, "id": "f7223234-1afc-4d19-bfa3-d19deb6235ef", "links": [ { "href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "self" }, { "href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.65/volume-update-response.json0000664000175000017500000000263100000000000027501 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:59:23.679903", "description": "This is yet, another volume.", "encrypted": false, "id": "8b2459d1-0059-4e14-a89f-dfa73a452af6", "links": [ { "href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "self" }, { "href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.65/volumes-list-detailed-response.json0000664000175000017500000000334100000000000031125 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:25:15.288987", "description": null, "encrypted": false, "id": "cb49b381-9012-40cb-b8ee-80c19a4801b5", "links": [ { "href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "self" }, { "href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "consumes_quota": true } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0191176 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.69/0000775000175000017500000000000000000000000022165 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.69/volume-create-response.json0000664000175000017500000000252100000000000027464 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:21:12.715987", "description": null, "encrypted": false, "id": "2b955850-f177-45f7-9f49-ecb2c256d161", "links": [ { "href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "self" }, { "href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": null, "cluster_name": null, "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.69/volume-show-response.json0000664000175000017500000000305000000000000027177 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:50:07.770785", "description": null, "encrypted": false, "id": "f7223234-1afc-4d19-bfa3-d19deb6235ef", "links": [ { "href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "self" }, { "href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": null, "cluster_name": null, "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.69/volume-update-response.json0000664000175000017500000000263100000000000027505 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:59:23.679903", "description": "This is yet, another volume.", "encrypted": false, "id": "8b2459d1-0059-4e14-a89f-dfa73a452af6", "links": [ { "href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "self" }, { "href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": null, "cluster_name": null, "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/v3.69/volumes-list-detailed-response.json0000664000175000017500000000334100000000000031131 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:25:15.288987", "description": null, "encrypted": false, "id": "cb49b381-9012-40cb-b8ee-80c19a4801b5", "links": [ { "href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "self" }, { "href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__", "volume_type_id": "5fed9d7c-401d-46e2-8e80-f30c70cb7e1d", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": null, "cluster_name": null, "consumes_quota": true } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-create-request.json0000664000175000017500000000103500000000000026530 0ustar00zuulzuul00000000000000{ "volume": { "size": 10, "availability_zone": null, "source_volid": null, "description": null, "multiattach": false, "snapshot_id": null, "backup_id": null, "name": null, "imageRef": null, "volume_type": null, "metadata": {}, "consistencygroup_id": null }, "OS-SCH-HNT:scheduler_hints": { "same_host": [ "a0cf03a5-d921-4877-bb5c-86d26cf818e1", "8c19174f-4220-44f0-824a-cd1eeef10287" ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-create-response.json0000664000175000017500000000213400000000000026677 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:21:12.715987", "description": null, "encrypted": false, "id": "2b955850-f177-45f7-9f49-ecb2c256d161", "links": [ { "href": "http://127.0.0.1:33951/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "self" }, { "href": "http://127.0.0.1:33951/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/2b955850-f177-45f7-9f49-ecb2c256d161", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-metadata-create-request.json0000664000175000017500000000006700000000000030312 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata0" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-metadata-create-response.json0000664000175000017500000000006700000000000030460 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata0" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-metadata-show-key-response.json0000664000175000017500000000006300000000000030757 0ustar00zuulzuul00000000000000{ "meta": { "name": "metadata1" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-metadata-show-response.json0000664000175000017500000000002600000000000030170 0ustar00zuulzuul00000000000000{ "metadata": {} }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-metadata-update-key-request.json0000664000175000017500000000006200000000000031112 0ustar00zuulzuul00000000000000{ "meta": { "name": "new_name" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-metadata-update-key-response.json0000664000175000017500000000006200000000000031260 0ustar00zuulzuul00000000000000{ "meta": { "name": "new_name" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-metadata-update-request.json0000664000175000017500000000006700000000000030331 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata1" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-metadata-update-response.json0000664000175000017500000000006700000000000030477 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata1" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-show-response.json0000664000175000017500000000246300000000000026421 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:50:07.770785", "description": null, "encrypted": false, "id": "f7223234-1afc-4d19-bfa3-d19deb6235ef", "links": [ { "href": "http://127.0.0.1:45839/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "self" }, { "href": "http://127.0.0.1:45839/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/f7223234-1afc-4d19-bfa3-d19deb6235ef", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-update-request.json0000664000175000017500000000025300000000000026550 0ustar00zuulzuul00000000000000{ "volume": { "name": "vol-003", "description": "This is yet, another volume.", "metadata": { "name": "metadata0" } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volume-update-response.json0000664000175000017500000000224400000000000026720 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-29T06:59:23.679903", "description": "This is yet, another volume.", "encrypted": false, "id": "8b2459d1-0059-4e14-a89f-dfa73a452af6", "links": [ { "href": "http://127.0.0.1:41467/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "self" }, { "href": "http://127.0.0.1:41467/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/8b2459d1-0059-4e14-a89f-dfa73a452af6", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volumes-list-detailed-response.json0000664000175000017500000000272000000000000030344 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "2018-11-28T06:25:15.288987", "description": null, "encrypted": false, "id": "cb49b381-9012-40cb-b8ee-80c19a4801b5", "links": [ { "href": "http://127.0.0.1:43543/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "self" }, { "href": "http://127.0.0.1:43543/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/cb49b381-9012-40cb-b8ee-80c19a4801b5", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "89afd400-b646-4bbc-b12b-c0a4d63e5bd3", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "c853ca26-e8ea-4797-8a52-ee124a013d0e", "volume_type": "__DEFAULT__" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volumes-list-response.json0000664000175000017500000000111300000000000026566 0ustar00zuulzuul00000000000000{ "volumes": [ { "id": "efa54464-8fab-47cd-a05a-be3e6b396188", "links": [ { "href": "http://127.0.0.1:37097/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/efa54464-8fab-47cd-a05a-be3e6b396188", "rel": "self" }, { "href": "http://127.0.0.1:37097/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/efa54464-8fab-47cd-a05a-be3e6b396188", "rel": "bookmark" } ], "name": null } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/volumes/volumes-list-summary-response.json0000664000175000017500000000027500000000000030271 0ustar00zuulzuul00000000000000{ "volume-summary": { "total_size": 4, "total_count": 4, "metadata": { "key1": ["value1", "value2"], "key2": ["value2"] } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/worker-cleanup-request.json0000664000175000017500000000036500000000000025231 0ustar00zuulzuul00000000000000{ "cluster_name": "test", "disabled": true, "host": "host1@lvmdriver", "service_id": 1, "is_up": true, "binary": "cinder-volume", "resource_id": "b122f668-d15a-40f8-af21-38d218796ab8", "resource_type": "Volume" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/samples/worker-cleanup-response.json0000664000175000017500000000031500000000000025372 0ustar00zuulzuul00000000000000{ "cleaning": [ { "id": 1, "host": "host1@lvmdriver", "binary": "cinder-volume", "cluster_name": "test" } ], "unavailable": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/snapshot-manage.inc0000664000175000017500000000723000000000000022024 0ustar00zuulzuul00000000000000.. -*- rst -*- Snapshot manage extension (manageable_snapshots) ================================================ Creates or lists snapshots by using existing storage instead of allocating new storage. Manage an existing snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/manageable_snapshots Creates a snapshot by using existing storage rather than allocating new storage. The caller must specify a reference to an existing storage volume in the ref parameter in the request. Although each storage driver might interpret this reference differently, the driver should accept a reference structure that contains either a source-id or source-name element, if possible. The API chooses the size of the snapshot by rounding up the size of the existing snapshot to the next gibibyte (GiB). Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot: snapshot_obj - description: description_snap - metadata: metadata_snap - name: name_snap - ref: ref - volume_id: volume_id Request Example --------------- .. literalinclude:: ./samples/snapshot_manage_extensions/snapshot-manage-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - snapshot: snapshot_obj - status: status_snap - size: size - metadata: metadata_snap - name: name_snap - volume_id: volume_id - created_at: created_at - description: description_snap_req - id: id - updated_at: updated_at Response Example ---------------- .. literalinclude:: ./samples/snapshot_manage_extensions/snapshot-manage-response.json :language: javascript List summary of snapshots available to manage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/manageable_snapshots Search a volume backend and list summary of snapshots which are available to manage. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - sort: sort - sort_key: sort_key - sort_dir: sort_dir - offset: offset - limit: limit - marker: marker - host: host_query Response -------- .. rest_parameters:: parameters.yaml - manageable-snapshots: manageable-snapshots - source_reference: source_reference - safe_to_manage: safe_to_manage - reference: reference - source-name: source-name - size: size Response Example ---------------- .. literalinclude:: ./samples/snapshot-manage-list-response.json :language: javascript List detail of snapshots available to manage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/manageable_snapshots/detail Search a volume backend and list detail of snapshots which are available to manage. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - sort: sort - sort_key: sort_key - sort_dir: sort_dir - offset: offset - limit: limit - marker: marker - host: host_query Response -------- .. rest_parameters:: parameters.yaml - manageable-snapshots: manageable-snapshots - cinder_id: cinder_id - source_reference: source_reference - safe_to_manage: safe_to_manage - reason_not_safe: reason_not_safe - reference: reference - source-name: source-name - size: size - extra_info: extra_info Response Example ---------------- .. literalinclude:: ./samples/snapshot-manage-list-detail-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/valid-boolean-values.inc0000664000175000017500000000064200000000000022750 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _valid-boolean-values: Valid boolean values ==================== Following is the list of valid values for boolean parameters. [True, ‘True’, ‘TRUE’, ‘true’, ‘1’, ‘ON’, ‘On’, ‘on’, ‘YES’, ‘Yes’, ‘yes’, ‘y’, ‘t’, False, ‘False’, ‘FALSE’, ‘false’, ‘0’, ‘OFF’, ‘Off’, ‘off’, ‘NO’, ‘No’, ‘no’, ‘n’, ‘f’] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/vol-transfer-v3.inc0000664000175000017500000001242600000000000021712 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume transfers (volume-transfers) (3.55 or later) =================================================== Transfers a volume from one user to another user. This is the new transfer APIs with microversion 3.55. Accept a volume transfer ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volume-transfers/{transfer_id}/accept Accepts a volume transfer. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 413 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - transfer_id: transfer_id - auth_key: auth_key Request Example --------------- .. literalinclude:: ./samples/volume_transfers/volume-transfers-accept-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - transfer: transfer - volume_id: volume_id - id: id - links: links - name: transfer_name Response Example ---------------- .. literalinclude:: ./samples/volume_transfers/volume-transfers-accept-response.json :language: javascript Create a volume transfer ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volume-transfers Creates a volume transfer. **Preconditions** * The volume ``status`` must be ``available`` * Transferring encrypted volumes is not supported * If the volume has snapshots, those snapshots must be ``available`` unless ``no_snapshots=True`` Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - transfer: transfer - name: name - volume_id: volume_id - no_snapshots: no_snapshots Request Example --------------- .. literalinclude:: ./samples/volume_transfers/v3.57/volume-transfers-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - auth_key: auth_key - links: links - created_at: created_at - volume_id: volume_id - id: id - name: name - destination_project_id: destination_project_id - source_project_id: source_project_id - accepted: accepted - no_snapshots: no_snapshots Response Example ---------------- .. literalinclude:: ./samples/volume_transfers/v3.57/volume-transfers-create-response.json :language: javascript List volume transfers for a project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/volume-transfers Lists volume transfers. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort_transfer - sort_key: sort_key_transfer - sort_dir: sort_dir_transfer - limit: limit_transfer - offset: offset_transfer - marker: marker_transfer Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_id: volume_id - id: id - links: links - name: name Response Example ---------------- .. literalinclude:: ./samples/volume_transfer/volume-transfers-list-response.json :language: javascript Show volume transfer detail ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/volume-transfers/{transfer_id} Shows details for a volume transfer. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - transfer_id: transfer_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - created_at: created_at - volume_id: volume_id - id: id - links: links - name: name - destination_project_id: destination_project_id - source_project_id: source_project_id - accepted: accepted - no_snapshots: no_snapshots Response Example ---------------- .. literalinclude:: ./samples/volume_transfers/v3.57/volume-transfers-show-response.json :language: javascript Delete a volume transfer ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/volume-transfers/{transfer_id} Deletes a volume transfer. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - transfer_id: transfer_id List volume transfers and details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/volume-transfers/detail Lists volume transfers, with details. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - transfers: transfers - created_at: created_at - volume_id: volume_id - id: id - links: links - name: name - destination_project_id: destination_project_id - source_project_id: source_project_id - accepted: accepted Response Example ---------------- .. literalinclude:: ./samples/volume_transfer/volume-transfers-list-detailed-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/volume-manage.inc0000664000175000017500000001066400000000000021501 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume manage extension (manageable_volumes) ============================================ Creates or lists volumes by using existing storage instead of allocating new storage. Manage an existing volume ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/manageable_volumes Creates a Block Storage volume by using existing storage rather than allocating new storage. The caller must specify a reference to an existing storage volume in the ref parameter in the request. Although each storage driver might interpret this reference differently, the driver should accept a reference structure that contains either a source-id or source-name element, if possible. The API chooses the size of the volume by rounding up the size of the existing storage volume to the next gibibyte (GiB). You cannot manage a volume to an encrypted volume type. Prior to microversion 3.16 host field was required, with the possibility of defining the cluster it is no longer required, but we must have either a host or a cluster field but we cannot have them both with values. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume: volume - description: description_vol - availability_zone: availability_zone - bootable: bootable - volume_type: name_volume_type_optional - name: volume_name_optional - host: host_mutex - cluster: cluster_mutex - ref: ref - metadata: metadata_vol Request Example --------------- .. literalinclude:: ./samples/volume_manage_extensions/volume-manage-request.json :language: javascript .. literalinclude:: ./samples/volume-manage-request-cluster.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - volume: volume - status: status_vol - migration_status: migration_status - user_id: user_id - attachments: attachments - links: links_vol - availability_zone: availability_zone - bootable: bootable_response - encrypted: encrypted - created_at: created_at - description: description_vol - updated_at: updated_at - volume_type: volume_type - name: name_vol - replication_status: replication_status - consistencygroup_id: consistencygroup_id_required - source_volid: source_volid - snapshot_id: snapshot_id - multiattach: multiattach_resp - metadata: metadata_vol_obj - id: id_vol - size: size Response Example ---------------- .. literalinclude:: ./samples/volume_manage_extensions/volume-manage-response.json :language: javascript List summary of volumes available to manage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/manageable_volumes Search a volume backend and list summary of volumes which are available to manage. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - sort: sort - sort_key: sort_key - sort_dir: sort_dir - offset: offset - limit: limit - marker: marker - host: hostname Response -------- .. rest_parameters:: parameters.yaml - manageable-volumes: manageable-volumes - safe_to_manage: safe_to_manage - reference: reference - source-name: source-name - size: size Response Example ---------------- .. literalinclude:: ./samples/volume-manage-list-response.json :language: javascript List detail of volumes available to manage ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/manageable_volumes/detail Search a volume backend and list detail of volumes which are available to manage. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - sort: sort - sort_key: sort_key - sort_dir: sort_dir - offset: offset - limit: limit - marker: marker - host: host_query Response -------- .. rest_parameters:: parameters.yaml - manageable-volumes: manageable-volumes - cinder_id: cinder_id - safe_to_manage: safe_to_manage - reason_not_safe: reason_not_safe - reference: reference - source-name: source-name - size: size - extra_info: extra_info Response Example ---------------- .. literalinclude:: ./samples/volume-manage-list-detail-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/volume-type-access.inc0000664000175000017500000000501400000000000022462 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume type access (types, action) (types, os-volume-type-access) ================================================================= Private volume type access to project. By default, volumes types are public. To create a private volume type, set the ``is_public`` boolean field to ``false`` at volume type creation time. To control access to a private volume type, user needs to add a project to or remove a project from the volume type. Private volume types without projects are only accessible by users with the administrative role and context. Add private volume type access to project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/types/{volume_type}/action Adds private volume type access to a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type: volume_type_id - addProjectAccess: add_project_access - project: project Request Example --------------- .. literalinclude:: ./samples/volume_type/volume-type-access-add-request.json :language: javascript Remove private volume type access from project ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/types/{volume_type}/action Removes private volume type access from a project. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type: volume_type_id - removeProjectAccess: remove_project_access - project: project Request Example --------------- .. literalinclude:: ./samples/volume-type-access-delete-request.json :language: javascript List private volume type access detail ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/types/{volume_type}/os-volume-type-access Lists project IDs that have access to private volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type: volume_type_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_type_access: volume_type_access - project_id: project_id - volume_type_id: volume_type_id_body Response Example ---------------- .. literalinclude:: ./samples/volume_type/volume-type-access-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/volumes-v3-extensions.inc0000664000175000017500000000137700000000000023162 0ustar00zuulzuul00000000000000.. -*- rst -*- API extensions (extensions) =========================== List Known API extensions ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/extensions Lists Block Storage API extensions. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 300 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - updated: updated_at - description: description_extn - links: links - alias: alias - name: name Response Example ---------------- .. literalinclude:: ./samples/extensions/extensions-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/volumes-v3-snapshots-actions.inc0000664000175000017500000000407200000000000024436 0ustar00zuulzuul00000000000000.. -*- rst -*- Snapshot actions (snapshots, action) ==================================== Administrator only, depending on policy settings. Resets, updates status for a snapshot. Reset a snapshot's status ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/snapshots/{snapshot_id}/action Resets the status. Specify the ``os-reset_status`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - os-reset_status: os-reset_status - status: status_snap Request Example --------------- .. literalinclude:: ./samples/snapshot-status-reset-request.json :language: javascript Update status of a snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/snapshots/{snapshot_id}/action Update fields related to the status of a snapshot. Specify the ``os-update_snapshot_status`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - os-update_snapshot_status: os-update_snapshot_status - status: status_snap - progress: snapshot_progress Request Example --------------- .. literalinclude:: ./samples/snapshot-status-update-request.json :language: javascript Force delete a snapshot ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/snapshots/{snapshot_id}/action Attempts to force delete a snapshot, regardless of state. Specify the ``os-force_delete`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - os-force_delete: os-force_delete Request Example --------------- .. literalinclude:: ./samples/snapshot-force-delete-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/volumes-v3-snapshots.inc0000664000175000017500000003131200000000000022775 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume snapshots (snapshots) ============================ A snapshot is a point-in-time copy of the data that a volume contains. When you create, list, or delete snapshots, these status values are possible: **Snapshot statuses** +----------------+---------------------------------------------+ | Status | Description | +----------------+---------------------------------------------+ | creating | The snapshot is being created. | +----------------+---------------------------------------------+ | available | The snapshot is ready to use. | +----------------+---------------------------------------------+ | backing-up | The snapshot is being backed up. | +----------------+---------------------------------------------+ | deleting | The snapshot is being deleted. | +----------------+---------------------------------------------+ | error | A snapshot creation error occurred. | +----------------+---------------------------------------------+ | deleted | The snapshot has been deleted. | +----------------+---------------------------------------------+ | unmanaging | The snapshot is being unmanaged. | +----------------+---------------------------------------------+ | restoring | The snapshot is being restored to a volume. | +----------------+---------------------------------------------+ | error_deleting | A snapshot deletion error occurred. | +----------------+---------------------------------------------+ List snapshots and details ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/snapshots/detail Lists all Block Storage snapshots, with details, that the project can access, since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker - with_count: with_count - consumes_quota: filter_consumes_quota Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_snap - os-extended-snapshot-attributes:progress: os-ext-snap-attr:progress - description: description_snap_req - created_at: created_at - name: name - user_id: user_id_min - volume_id: volume_id_snap - os-extended-snapshot-attributes:project_id: os-ext-snap-attr:project_id - size: size - id: id_snap - metadata: metadata - count: count - updated_at: updated_at - snapshots_links: links_snap - group_snapshot_id: group_snapshot_id_3_14 - consumes_quota: consumes_quota Response Example (v3.65) ------------------------ .. literalinclude:: ./samples/snapshots/v3.65/snapshots-list-detailed-response.json :language: javascript Create a snapshot ~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/snapshots Creates a volume snapshot, which is a point-in-time, complete copy of a volume. You can create a volume from a snapshot. Prior to API version 3.66, a 'force' flag was required to create a snapshot of an in-use volume, but this is no longer the case. From API version 3.66, the 'force' flag is invalid when passed in a volume snapshot request. (For backward compatibility, however, a 'force' flag with a value evaluating to True is silently ignored.) Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot: snapshot_obj - volume_id: volume_id - name: name_snap_req - description: description_snap - force: force_snapshot - metadata: metadata_snap Request Example --------------- .. literalinclude:: ./samples/snapshots/snapshot-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_snap - description: description_snap_req - created_at: created_at - name: name_snap_req - snapshot: snapshot_obj - user_id: user_id_min - volume_id: volume_id_snap - metadata: metadata - id: id_snap - size: size - updated_at: updated_at - group_snapshot_id: group_snapshot_id_3_14 - consumes_quota: consumes_quota Response Example (v3.65) ------------------------ .. literalinclude:: ./samples/snapshots/v3.65/snapshot-create-response.json :language: javascript List accessible snapshots ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/snapshots Lists all Block Storage snapshots, with summary information, that the project can access, since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker - consumes_quota: filter_consumes_quota - with_count: with_count Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_snap - description: description_snap_req - created_at: created_at - name: name - volume_id: volume_id_snap - metadata: metadata - id: id_snap - size: size - count: count - updated_at: updated_at - snapshots_links: links_snap Response Example ---------------- .. literalinclude:: ./samples/snapshots/snapshots-list-response.json :language: javascript Show a snapshot's metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/snapshots/{snapshot_id}/metadata Shows metadata for a snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response Example ---------------- .. literalinclude:: ./samples/snapshots/snapshot-metadata-show-response.json :language: javascript Create a snapshot's metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/snapshots/{snapshot_id}/metadata Updates metadata for a snapshot. Creates or replaces metadata items that match keys. Does not modify items that are not in the request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - metadata: metadata Request Example --------------- .. literalinclude:: ./samples/snapshots/snapshot-metadata-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response Example ---------------- .. literalinclude:: ./samples/snapshots/snapshot-metadata-create-response.json :language: javascript Update a snapshot's metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/snapshots/{snapshot_id}/metadata Replaces all the snapshot's metadata with the key-value pairs in the request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - metadata: metadata Request Example --------------- .. literalinclude:: ./samples/snapshots/snapshot-metadata-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata Response Example ---------------- .. literalinclude:: ./samples/snapshots/snapshot-metadata-update-response.json :language: javascript Show a snapshot's details ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/snapshots/{snapshot_id} Shows details for a snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_snap - os-extended-snapshot-attributes:progress: os-ext-snap-attr:progress - description: description_snap_req - created_at: created_at - name: name - snapshot: snapshot_obj - user_id: user_id_min - volume_id: volume_id_snap - os-extended-snapshot-attributes:project_id: os-ext-snap-attr:project_id - size: size - id: id_snap - metadata: metadata - updated_at: updated_at - group_snapshot_id: group_snapshot_id_3_14 - consumes_quota: consumes_quota Response Example (v3.65) ------------------------ .. literalinclude:: ./samples/snapshots/v3.65/snapshot-show-response.json :language: javascript Update a snapshot ~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/snapshots/{snapshot_id} Updates a snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - snapshot: snapshot_obj - description: description_snap - name: snapshot_name Request Example --------------- .. literalinclude:: ./samples/snapshots/snapshot-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status_snap - description: description_snap_req - created_at: created_at - name: name - snapshot: snapshot_obj - id: id_snap - size: size - volume_id: volume_id_snap - user_id: user_id_min - metadata: metadata - group_snapshot_id: group_snapshot_id_3_14 - consumes_quota: consumes_quota Response Example (v3.65) ------------------------ .. literalinclude:: ./samples/snapshots/v3.65/snapshot-update-response.json :language: javascript Delete a snapshot ~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/snapshots/{snapshot_id} Deletes a snapshot. Preconditions: - Snapshot status must be ``available`` or ``error`` Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 403 - 404 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path Show a snapshot's metadata for a specific key ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/snapshot/{snapshot_id}/metadata/{key} Shows metadata for a snapshot for a specific key. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - key: key_view Response Parameters ------------------- .. rest_parameters:: parameters.yaml - meta: meta_snap Response Example ---------------- .. literalinclude:: ./samples/snapshots/snapshot-metadata-show-key-response.json :language: javascript Delete a snapshot's metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/snapshots/{snapshot_id}/metadata/{key} Deletes metadata for a snapshot. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - key: key_path Update a snapshot's metadata for a specific key ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/snapshots/{snapshot_id}/metadata/{key} Update metadata for a snapshot for a specific key. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - snapshot_id: snapshot_id_path - key: key_update - meta: meta_snap Request Example --------------- .. literalinclude:: ./samples/snapshots/snapshot-metadata-update-key-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - meta: meta_snap Response Example ---------------- .. literalinclude:: ./samples/snapshots/snapshot-metadata-update-key-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/volumes-v3-types.inc0000664000175000017500000003713500000000000022130 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume types (types) ==================== To create an environment with multiple-storage back ends, you must specify a volume type. The API spawns Block Storage volume back ends as children to ``cinder-volume``, and keys them from a unique queue. The API names the back ends ``cinder-volume.HOST.BACKEND``. For example, ``cinder-volume.ubuntu.lvmdriver``. When you create a volume, the scheduler chooses an appropriate back end for the volume type to handle the request. For information about how to use volume types to create multiple- storage back ends, see `Configure multiple-storage back ends `_. Update a volume type ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/types/{volume_type_id} Updates a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id - volume_type: volume_type - name: name_volume_type_optional - description: description_volume_type_optional - is_public: is_public_volume_type_optional Request Example --------------- .. literalinclude:: ./samples/volume_type/volume-type-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_type: volume_type - is_public: is_public_volume_type_required - extra_specs: extra_specs_volume_type_optional - description: description_volume_type_required - name: name_volume_type_required - id: volume_type_id Response Example ---------------- .. literalinclude:: ./samples/volume_type/volume-type-update-response.json :language: javascript Create or update extra specs for volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/types/{volume_type_id}/extra_specs Adds new extra specifications to a volume type, or updates the extra specifications that are assigned to a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id - extra_specs: extra_specs_volume_type_required Request Example --------------- .. literalinclude:: ./samples/volume_type/volume-type-extra-specs-create-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - extra_specs: extra_specs_volume_type_required Response Example ---------------- .. literalinclude:: ./samples/volume_type/volume-type-extra-specs-create-update-response.json :language: javascript Show all extra specifications for volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/types/{volume_type_id}/extra_specs Shows all extra specifications assigned to a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - extra_specs: extra_specs_volume_type_required Response Example ---------------- .. literalinclude:: ./samples/volume_type/volume-type-all-extra-specs-show-response.json :language: javascript Show extra specification for volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/types/{volume_type_id}/extra_specs/{key} Shows the specific extra specification assigned to a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id - key: key_extra_spec Response Example ---------------- .. literalinclude:: ./samples/volume_type/volume-type-specific-extra-specs-show-response.json :language: javascript Update extra specification for volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/types/{volume_type_id}/extra_specs/{key} Update the specific extra specification assigned to a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id - key: key_extra_spec Request Example --------------- .. literalinclude:: ./samples/volume_type/volume-type-specific-extra-specs-update-request.json :language: javascript Response Example ---------------- .. literalinclude:: ./samples/volume_type/volume-type-specific-extra-specs-update-response.json :language: javascript Delete extra specification for volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/types/{volume_type_id}/extra_specs/{key} Deletes the specific extra specification assigned to a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id - key: key_extra_spec Show volume type detail ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/types/{volume_type_id} Shows details for a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_type: volume_type - is_public: is_public_volume_type_required - extra_specs: extra_specs_volume_type_optional - description: description_volume_type_required - name: name_volume_type_required - id: volume_type_id_body - os-volume-type-access:is_public: is_public_volume_type_required - qos_specs_id: qos_specs_id Response Example ---------------- .. literalinclude:: ./samples/volume_type/volume-type-show-response.json :language: javascript Show default volume type ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/types/default Shows details for the default volume type, that is, the volume type that will be used in the `Create a volume`_ request if you do not specify one. This could be one of the following: - Your project's default volume type *(since microversion 3.62)* - The installation's default volume type as configured by the operator Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 404 - 500 Error conditions ---------------- It is only possible to receive a 404 (Not Found) response in pre-Train versions of the Block Storage service, as a configured default volume type has been required since the Train release. If you receive a 500 (Internal Error Response), then the default volume type has not been configured correctly by the operator. Please contact your cloud provider. * When the default volume type is misconfigured, requests to `Create a volume`_ that do not include a volume type will fail. * The workaround is to include a volume type in your request. You can `List all volume types`_ to determine a volume type to use. Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_type: volume_type - is_public: is_public_volume_type_required - extra_specs: extra_specs_volume_type_optional - description: description_volume_type_required - name: name_volume_type_required - qos_specs_id: qos_specs_id Response Example ---------------- .. literalinclude:: ./samples/volume_type/volume-type-default-response.json :language: javascript Delete a volume type ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/types/{volume_type_id} Deletes a volume type. *Note to operators:* Since the Train release, untyped volumes are not allowed, and a configured default volume type is required in each deployment. An attempt to delete the configured default volume type will fail. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id List all volume types ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/types Lists volume types. To determine which of these is the default type that will be used if you do not specify one in the `Create a volume`_ request, use the `Show default volume type`_ request. *Note to users:* There may be a volume type named ``__DEFAULT__`` in the list. Try not to use this volume type, unless necessary or instructed by the operator, in a `Create a volume`_ request. If you wish to create a volume of *your* default volume type, simply omit the ``volume_type`` parameter in your `Create a volume`_ request. *Note to operators:* The ``__DEFAULT__`` volume type was introduced in the Train release as a placeholder to prevent the creation of untyped volumes. Under the proper conditions, it may be removed from your deployment. Consult the Default Volume Types section in `Cinder Administration Guide `_ for details. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - is_public: is_public_volume_type_query - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_types: volume_types - extra_specs: extra_specs_volume_type_optional - name: name_volume_type_required - is_public: is_public_volume_type_required - description: description_volume_type_required - id: volume_type_id_body - os-volume-type-access:is_public: is_public_volume_type_required - qos_specs_id: qos_specs_id Response Example ---------------- .. literalinclude:: ./samples/volume_type/volume-types-list-response.json :language: javascript Create a volume type ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/types Creates a volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type: volume_type - name: name_volume_type_required - os-volume-type-access:is_public: is_public_volume_type_optional - description: description_volume_type_optional - extra_specs: extra_specs_volume_type_optional Request Example --------------- .. literalinclude:: ./samples/volume_type/volume-type-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_type: volume_type - is_public: is_public_volume_type_required - extra_specs: extra_specs_volume_type_optional - description: description_volume_type_required - name: name_volume_type_required - id: volume_type_id_body - os-volume-type-access:is_public: is_public_volume_type_required Response Example ---------------- .. literalinclude:: ./samples/volume_type/volume-type-create-response.json :language: javascript Show an encryption type ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/types/{volume_type_id}/encryption To show an encryption type for an existing volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_type_id: volume_type_id_body - encryption_id: encryption_id_body - key_size: key_size - provider: provider - control_location: control_location - cipher: cipher - deleted: deleted - created_at: created_at - updated_at: updated_at - deleted_at: deleted_at Response Example ---------------- .. literalinclude:: ./samples/volume_type/encryption-type-show-response.json :language: javascript Show encryption specs item ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/types/{volume_type_id}/encryption/{key} To show encryption specs item for an existing volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id - key: key_encrypt_spec Response Example ---------------- .. literalinclude:: ./samples/volume_type/encryption-type-specific-specs-show-response.json :language: javascript Delete an encryption type ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/types/{volume_type_id}/encryption/{encryption_id} To delete an encryption type for an existing volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id - encryption_id: encryption_id Create an encryption type ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/types/{volume_type_id}/encryption To create an encryption type for an existing volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id - encryption: encryption - key_size: key_size - provider: provider_req - control_location: control_location - cipher: cipher Request Example --------------- .. literalinclude:: ./samples/volume_type/encryption-type-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - encryption: encryption - volume_type_id: volume_type_id_body - encryption_id: encryption_id_body - key_size: key_size - provider: provider - control_location: control_location - cipher: cipher Response Example ---------------- .. literalinclude:: ./samples/volume_type/encryption-type-create-response.json :language: javascript Update an encryption type ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/types/{volume_type_id}/encryption/{encryption_id} To update an encryption type for an existing volume type. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_type_id: volume_type_id - encryption_id: encryption_id - encryption: encryption - key_size: key_size - provider: provider_req_optional - control_location: control_location - cipher: cipher Request Example --------------- .. literalinclude:: ./samples/volume_type/encryption-type-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - encryption: encryption - key_size: key_size - provider: provider_optional - control_location: control_location - cipher: cipher Response Example ---------------- .. literalinclude:: ./samples/volume_type/encryption-type-update-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/volumes-v3-versions.inc0000664000175000017500000000132100000000000022620 0ustar00zuulzuul00000000000000.. -*- rst -*- API version details =================== Show API v3 details ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/ Shows details for Block Storage API v3. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 403 Request ------- Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: version_status - updated: version_updated - links: links_res - min_version: version_min - version: version_max - media-types: media_types - id: version_id Response Example ---------------- .. literalinclude:: ./samples/versions/version-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/volumes-v3-volumes-actions.inc0000664000175000017500000006214000000000000024106 0ustar00zuulzuul00000000000000.. -*- rst -*- Volume actions (volumes, action) ================================ Extends the size of, resets statuses for, sets image metadata for, and removes image metadata from a volume. Attaches a volume to a server, detaches a volume from a server, and removes a volume from Block Storage management without actually removing the back-end storage object associated with it. Extend a volume size ~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Extends the size of a volume to a requested size, in gibibytes (GiB). Specify the ``os-extend`` action in the request body. Preconditions - Prior to microversion ``3.42`` the volume status must be ``available``. Starting with microversion ``3.42``, attached volumes with status ``in-use`` may be able to be extended depending on policy and backend volume and compute driver constraints in the cloud. Note that ``reserved`` is not a valid state for extend. - Sufficient amount of storage must exist to extend the volume. - The user quota must have sufficient volume storage. Postconditions - If the request is processed successfully, the volume status will change to ``extending`` while the volume size is being extended. - Upon successful completion of the extend operation, the volume status will go back to its original value. - Starting with microversion ``3.42``, when extending the size of an attached volume, the Block Storage service will notify the Compute service that an attached volume has been extended. The Compute service will asynchronously process the volume size change for the related server instance. This can be monitored using the ``GET /servers/{server_id}/os-instance-actions`` API in the Compute service. Troubleshooting - An ``error_extending`` volume status indicates that the request failed. Ensure that you meet the preconditions and retry the request. If the request fails again, investigate the storage back end. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-extend: os-extend - new_size: new_size Request Example --------------- .. literalinclude:: ./samples/volume-extend-request.json :language: javascript Complete extending a volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Specify the ``os-extend_volume_completion`` action in the request body. Complete extending an attached volume that has been left in status ``extending`` after notifying the compute agent. Depending on the value of the ``error`` parameter, the extend operation will be either rolled back or finalized. **Preconditions** * The volume must have the status ``extending``. * The volume's admin metadata must contain a set of keys indicating that Cinder was waiting for external feedback on the success of the operation. **Asynchronous Postconditions** If the ``error`` parameter is ``false`` or missing, and the extend operation was successfully finalized, the volume status will be ``in-use``. Otherwise, the volume status will be ``error_extending``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - volume_id: volume_id_path - project_id: project_id_path - os-extend_volume_completion: os-extend_volume_completion - error: extend_completion_error Request Example --------------- .. literalinclude:: ./samples/volume-os-extend_volume_completion-request.json :language: javascript Reset a volume's statuses ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Administrator only. Resets the status, attach status, revert to snapshot, and migration status for a volume. Specify the ``os-reset_status`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-reset_status: os-reset_status - status: status_vol - migration_status: migration_status - attach_status: attach_status Request Example --------------- .. literalinclude:: ./samples/volume-status-reset-request.json :language: javascript Revert volume to snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Revert a volume to its latest snapshot, this API only support reverting a detached volume, and the volume status must be ``available``. Available since API microversion ``3.40``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 400 - 403 - 404 - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - revert: revert - snapshot_id: snapshot_id_revert Request Example --------------- .. literalinclude:: ./samples/volume-revert-to-snapshot-request.json :language: javascript Set image metadata for a volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Sets the image metadata for a volume. Specify the ``os-set_image_metadata`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-set_image_metadata: os-set_image_metadata - metadata: metadata_image Request Example --------------- .. literalinclude:: ./samples/volume-image-metadata-set-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_image Response Example ---------------- .. literalinclude:: ./samples/volume-image-metadata-update-response.json :language: javascript Remove image metadata from a volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Removes image metadata, by key, from a volume. Specify the ``os-unset_image_metadata`` action in the request body and the ``key`` for the metadata key and value pair that you want to remove. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-unset_image_metadata: os-unset_image_metadata - key: key Request Example --------------- .. literalinclude:: ./samples/volume-image-metadata-unset-request.json :language: javascript Show image metadata for a volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Shows image metadata for a volume. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-show_image_metadata: os-show_image_metadata Request Example --------------- .. literalinclude:: ./samples/image-metadata-show-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_image Response Example ---------------- .. literalinclude:: ./samples/image-metadata-show-response.json :language: javascript Attach volume to a server ~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Attaches a volume to a server. Specify the ``os-attach`` action in the request body. Preconditions - Volume status must be ``available``. - You should set ``instance_uuid`` or ``host_name``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-attach: os-attach - instance_uuid: instance_uuid - mountpoint: mountpoint - host_name: host_name Request Example --------------- .. literalinclude:: ./samples/volume-attach-request.json :language: javascript Detach volume from server ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Detaches a volume from a server. Specify the ``os-detach`` action in the request body. Preconditions - Volume status must be ``in-use``. For security reasons (see bug `#2004555 `_), regardless of the policy defaults, the Block Storage API rejects REST API calls manually made from users with a 409 status code if completing the request could pose a risk, which happens if all of these happen: - The request comes from a user - There's an instance uuid in provided attachment or in the volume's attachment - VM exists in Nova - Instance has the volume attached - Attached volume in instance is using the attachment Calls coming from other OpenStack services (like the Compute Service) are always accepted. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-detach: os-detach - attachment_id: attachment_id Request Example --------------- .. literalinclude:: ./samples/volume-detach-request.json :language: javascript Unmanage a volume ~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Removes a volume from Block Storage management without removing the back-end storage object that is associated with it. Specify the ``os-unmanage`` action in the request body. Preconditions - Volume status must be ``available``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-unmanage: os-unmanage Request Example --------------- .. literalinclude:: ./samples/volume-unmanage-request.json :language: javascript Force detach a volume ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Forces a volume to detach. Specify the ``os-force_detach`` action in the request body. Rolls back an unsuccessful detach operation after you disconnect the volume. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the ``volume_extension:volume_admin_actions:force_detach`` rule in the policy configuration file. For security reasons (see bug `#2004555 `_), regardless of the policy defaults, the Block Storage API rejects REST API calls manually made from users with a 409 status code if completing the request could pose a risk, which happens if all of these happen: - The request comes from a user - There's an instance uuid in provided attachment or in the volume's attachment - VM exists in Nova - Instance has the volume attached - Attached volume in instance is using the attachment Calls coming from other OpenStack services (like the Compute Service) are always accepted. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-force_detach: os-force_detach - attachment_id: attachment_id - connector: connector Request Example --------------- .. literalinclude:: ./samples/volume-force-detach-request.json :language: javascript Retype a volume ~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Change type of existing volume. Specify the ``os-retype`` action in the request body. Change the volume type of existing volume, Cinder may migrate the volume to proper volume host according to the new volume type. Retyping an *in-use* volume from a multiattach-capable type to a non-multiattach-capable type, or vice-versa, is not supported. It is generally not recommended to retype an *in-use* multiattach volume if that volume has more than one active read/write attachment. Policy defaults enable only users with the administrative role or the owner of the volume to perform this operation. Cloud providers can change these permissions through the policy configuration file. Retyping an unencrypted volume to the same size encrypted volume will most likely fail. Even though the volume is the same size as the source volume, the encrypted volume needs to store additional encryption information overhead. This results in the new volume not being large enough to hold all data. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-retype: os-retype - new_type: new_type - migration_policy: migration_policy Request Example --------------- .. literalinclude:: ./samples/volume-os-retype-request.json :language: javascript Migrate a volume ~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Specify the ``os-migrate_volume`` action in the request body. Migrates a volume to the specified host. Starting with the `3.16 microversion`_ a cluster can be specified instead of a host. It is generally not recommended to migrate an *in-use* multiattach volume if that volume has more than one active read/write attachment. Policy defaults enable only users with the administrative role to perform this operation. Cloud providers can change these permissions through the policy configuration file. .. _3.16 microversion: https://docs.openstack.org/cinder/latest/contributor/api_microversion_history.html#id15 **Preconditions** * The volume ``status`` must be ``available`` or ``in-use``. * The volume ``migration_status`` must be ``None``, ``deleting``, ``error``, or ``success``. * The volume ``replication_status`` must be ``None``, ``disabled`` or ``not-capable``. * The migration must happen to another host (or cluster) from which the volume currently resides. * The volume must not be a member of a group. * The volume must not have snapshots. **Asynchronous Postconditions** On success, the volume ``status`` will return to its original status of ``available`` or ``in-use`` and the ``migration_status`` will be ``success``. On failure, the ``migration_status`` will be ``error``. In the case of failure, if ``lock_volume`` was true and the volume was originally ``available`` when it was migrated, the ``status`` will go back to ``available``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - volume_id: volume_id_path - project_id: project_id_path - os-migrate_volume: os-migrate_volume - host: migrate_host - force_host_copy: migrate_force_host_copy - lock_volume: migrate_lock_volume - cluster: migrate_cluster Request Example --------------- .. literalinclude:: ./samples/volume-os-migrate_volume-request.json :language: javascript Complete migration of a volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Specify the ``os-migrate_volume_completion`` action in the request body. Complete the migration of a volume, updating the new volume in the DB, returning the ``status`` of the new volume to that of the original volume and finally deleting the original volume. **Preconditions** * Both the original and new volume ``migration_status`` must be ``None`` or both must be set to a non ``None`` value. * Additionally when set the new volume ``migration_status`` must take the form of ``target:VOLUME_UUID`` where VOLUME_UUID is the original volume UUID. **Asynchronous Postconditions** On success, the volume ``status`` will return to its original status of ``available`` or ``in-use`` and the ``migration_status`` will be ``success``. On failure, the ``migration_status`` will be ``error``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - volume_id: volume_id_path - project_id: project_id_path - os-migrate_volume_completion: os-migrate_volume_completion - new_volume: new_volume - error: migration_completion_error Request Example --------------- .. literalinclude:: ./samples/volume-os-migrate_volume_completion-request.json :language: javascript Force delete a volume ~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Attempts force-delete of volume, regardless of state. Specify the ``os-force_delete`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-force_delete: os-force_delete Request Example --------------- .. literalinclude:: ./samples/volume-force-delete-request.json :language: javascript Update a volume's bootable status ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Update the bootable status for a volume, mark it as a bootable volume. Specify the ``os-set_bootable`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-set_bootable: os-set_bootable - bootable: bootable_required Request Example --------------- .. literalinclude:: ./samples/volume-bootable-status-update-request.json :language: javascript Upload volume to image ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Uploads the specified volume to image service. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-volume_upload_image: os-volume_upload_image - image_name: image_name - force: force_upload_vol - disk_format: disk_format_upload - container_format: container_format_upload - visibility: visibility_min - protected: protected Request Example --------------- .. literalinclude:: ./samples/volume_actions/volume-upload-to-image-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - os-volume_upload_image: os-volume_upload_image - status: status_vol - image_name: image_name - disk_format: disk_format - container_format: container_format - visibility: visibility_min - protected: protected - updated_at: updated_at - image_id: image_id - display_description: description_vol_req - id: id_vol - size: size - volume_type: volume_type_vol Response Example ---------------- .. literalinclude:: ./samples/volume_actions/volume-upload-to-image-response.json :language: javascript Reserve volume ~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Mark volume as reserved. Specify the ``os-reserve`` action in the request body. Preconditions - Volume status must be ``available``. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-reserve: os-reserve Request Example --------------- .. literalinclude:: ./samples/volume-reserve-request.json :language: javascript Unmark volume as reserved. ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Unmark volume as reserved. Specify the ``os-unreserve`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-unreserve: os-unreserve Request Example --------------- .. literalinclude:: ./samples/volume-unreserve-request.json :language: javascript Update volume status to detaching ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Update volume status to 'detaching'.. Specify the ``os-begin_detaching`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-begin_detaching: os-begin_detaching Request Example --------------- .. literalinclude:: ./samples/volume-begin-detaching-request.json :language: javascript Roll back volume status to in-use ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Roll back volume status to 'in-use'. Specify the ``os-roll_detaching`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-roll_detaching: os-roll_detaching Request Example --------------- .. literalinclude:: ./samples/volume-roll-detaching-request.json :language: javascript Terminate volume attachment ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Terminate volume attachment. Specify the ``os-terminate_connection`` action in the request body. Preconditions - Volume status must be ``in-use``. For security reasons (see bug `#2004555 `_), regardless of the policy defaults, the Block Storage API rejects REST API calls manually made from users with a 409 status code if completing the request could pose a risk, which happens if all of these happen: - The request comes from a user - There's an instance uuid in the volume's attachment - VM exists in Nova - Instance has the volume attached - Attached volume in instance is using the attachment Calls coming from other OpenStack services (like the Compute Service) are always accepted. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 .. rest_status_code:: error ../status.yaml - 409 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-terminate_connection: os-terminate_connection - connector: connector_required Request Example --------------- .. literalinclude:: ./samples/volume-terminate-connection-request.json :language: javascript Initialize volume attachment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Initialize volume attachment. Specify the ``os-initialize_connection`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-initialize_connection: os-initialize_connection - connector: connector_required Request Example --------------- .. literalinclude:: ./samples/volume-initialize-connection-request.json :language: javascript Updates volume read-only access-mode flag ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Enables or disables update of volume to read-only access mode. Specify the ``os-update_readonly_flag`` action in the request body. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - os-update_readonly_flag: os-update_readonly_flag - readonly: readonly Request Example --------------- .. literalinclude:: ./samples/volume-readonly-update-request.json :language: javascript Reimage a volume ~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action Re-image a volume with a specific image. Specify the ``os-reimage`` action in the request body. A volume in ``available`` or ``error`` status can be re-imaged directly. To re-image a volume in ``reserved`` status, you must include the ``reimage_reserved`` parameter set to ``true``. .. note:: Image signature verification is currently unsupported when re-imaging a volume. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - image_id: image_id - reimage_reserved: reimage_reserved - os-reimage: os-reimage Request Example --------------- .. literalinclude:: ./samples/volume-os-reimage-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/volumes-v3-volumes.inc0000664000175000017500000005062500000000000022455 0ustar00zuulzuul00000000000000.. -*- rst -*- Volumes (volumes) ================= A volume is a detachable block storage device similar to a USB hard drive. You can attach a volume to an instance, and if the volume is of an appropriate volume type, a volume can be attached to multiple instances. The ``snapshot_id`` and ``source_volid`` parameters specify the ID of the snapshot or volume from which this volume originates. If the volume was not created from a snapshot or source volume, these values are null. When you create, list, update, or delete volumes, the possible status values are: **Volume statuses** +------------------+--------------------------------------------------------+ | Status | Description | +------------------+--------------------------------------------------------+ | creating | The volume is being created. | +------------------+--------------------------------------------------------+ | available | The volume is ready to attach to an instance. | +------------------+--------------------------------------------------------+ | reserved | The volume is reserved for attaching or shelved. | +------------------+--------------------------------------------------------+ | attaching | The volume is attaching to an instance. | +------------------+--------------------------------------------------------+ | detaching | The volume is detaching from an instance. | +------------------+--------------------------------------------------------+ | in-use | The volume is attached to an instance. | +------------------+--------------------------------------------------------+ | maintenance | The volume is locked and being migrated. | +------------------+--------------------------------------------------------+ | deleting | The volume is being deleted. | +------------------+--------------------------------------------------------+ | awaiting-transfer| The volume is awaiting for transfer. | +------------------+--------------------------------------------------------+ | error | A volume creation error occurred. | +------------------+--------------------------------------------------------+ | error_deleting | A volume deletion error occurred. | +------------------+--------------------------------------------------------+ | backing-up | The volume is being backed up. | +------------------+--------------------------------------------------------+ | restoring-backup | A backup is being restored to the volume. | +------------------+--------------------------------------------------------+ | error_backing-up | A backup error occurred. | +------------------+--------------------------------------------------------+ | error_restoring | A backup restoration error occurred. | +------------------+--------------------------------------------------------+ | error_extending | An error occurred while attempting to extend a volume. | +------------------+--------------------------------------------------------+ | downloading | The volume is downloading an image. | +------------------+--------------------------------------------------------+ | uploading | The volume is being uploaded to an image. | +------------------+--------------------------------------------------------+ | retyping | The volume is changing type to another volume type. | +------------------+--------------------------------------------------------+ | extending | The volume is being extended. | +------------------+--------------------------------------------------------+ List accessible volumes with details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/volumes/detail Lists all Block Storage volumes, with details, that the project can access, since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker - with_count: with_count - created_at: filter_created_at - updated_at: filter_updated_at - consumes_quota: filter_consumes_quota Response Parameters ------------------- .. rest_parameters:: parameters.yaml - migration_status: migration_status - attachments: attachments - links: links_vol - availability_zone: availability_zone - os-vol-host-attr:host: os-vol-host-attr:host - encrypted: encrypted - encryption_key_id: encryption_key_id - updated_at: updated_at - replication_status: replication_status - snapshot_id: snapshot_id - id: id_vol - size: size - user_id: user_id - os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id - os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat - metadata: metadata_vol_obj - status: status_vol - volume_image_metadata: volume_image_metadata - description: description_vol_req - multiattach: multiattach_resp - source_volid: source_volid - consistencygroup_id: consistencygroup_id_required - os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id - name: name_vol - bootable: bootable_response - created_at: created_at - volumes: volumes - volume_type: volume_type_vol - volume_type_id: volume_type_id_363 - group_id: group_id_optional - volumes_links: links_vol_optional - provider_id: provider_id - service_uuid: service_uuid - shared_targets: shared_targets - shared_targets: shared_targets_tristate - cluster_name: volume_cluster_name - consumes_quota: consumes_quota - count: count Response Example (v3.65) ------------------------ .. literalinclude:: ./samples/volumes/v3.65/volumes-list-detailed-response.json :language: javascript Create a volume ~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes Creates a volume. To create a bootable volume, include the UUID of the image from which you want to create the volume in the ``imageRef`` attribute in the request body. Since the Train release, every volume must have a volume type. It is **optional** to specify a volume type as part of your `Create a volume` request. If you do not specify one, a default volume type will be supplied for you. This type may vary according to what project you are in and how the operator has configured the Block Storage service. Use the `Show default volume type`_ request to determine your effective default volume type. Preconditions - You must have enough volume storage quota remaining to create a volume of size requested. Asynchronous Postconditions - With correct permissions, you can see the volume status as ``available`` through API calls. - With correct access, you can see the created volume in the storage system that OpenStack Block Storage manages. Troubleshooting - If volume status remains ``creating`` or shows another error status, the request failed. Ensure you meet the preconditions then investigate the storage back end. - Volume is not created in the storage system that OpenStack Block Storage manages. - The storage node needs enough free storage space to match the size of the volume creation request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume: volume - size: size - availability_zone: availability_zone - source_volid: source_volid - description: description_vol - snapshot_id: snapshot_id - backup_id: backup_id - name: volume_name_optional - imageRef: imageRef - volume_type: volume_type_detail - metadata: metadata_vol - consistencygroup_id: consistencygroup_id_required - OS-SCH-HNT:scheduler_hints: OS-SCH-HNT:scheduler_hints Request Example --------------- .. literalinclude:: ./samples/volumes/volume-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - migration_status: migration_status - attachments: attachments - links: links_vol - availability_zone: availability_zone - encrypted: encrypted - updated_at: updated_at - replication_status: replication_status - snapshot_id: snapshot_id - id: id_vol - size: size - user_id: user_id - metadata: metadata_vol_obj - status: status_vol - description: description_vol_req - multiattach: multiattach_resp - source_volid: source_volid - volume: volume - consistencygroup_id: consistencygroup_id_required - name: name_vol - bootable: bootable_response - created_at: created_at - volume_type: volume_type_vol - volume_type_id: volume_type_id_363 - group_id: group_id_optional - provider_id: provider_id - service_uuid: service_uuid - shared_targets: shared_targets - shared_targets: shared_targets_tristate - cluster_name: volume_cluster_name - consumes_quota: consumes_quota Response Example (v3.65) ------------------------ .. literalinclude:: ./samples/volumes/v3.65/volume-create-response.json :language: javascript List accessible volumes ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/volumes Lists summary information for all Block Storage volumes that the project can access, since v3.31 if non-admin users specify invalid filters in the url, API will return bad request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 .. rest_status_code:: error ../status.yaml - 400 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants - sort: sort - sort_key: sort_key - sort_dir: sort_dir - limit: limit - offset: offset - marker: marker - with_count: with_count - created_at: filter_created_at - consumes_quota: filter_consumes_quota - updated_at: filter_updated_at Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volumes: volumes - id: id_vol - links: links_vol - name: name_vol - volumes_links: links_vol_optional - count: count Response Example ---------------- .. literalinclude:: ./samples/volumes/volumes-list-response.json :language: javascript Show a volume's details ~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/volumes/{volume_id} Shows details for a volume. Preconditions - The volume must exist. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - migration_status: migration_status - attachments: attachments - links: links_vol - availability_zone: availability_zone - os-vol-host-attr:host: os-vol-host-attr:host - encrypted: encrypted - encryption_key_id: encryption_key_id - updated_at: updated_at - replication_status: replication_status - snapshot_id: snapshot_id - id: id_vol - size: size - user_id: user_id - os-vol-tenant-attr:tenant_id: os-vol-tenant-attr:tenant_id - os-vol-mig-status-attr:migstat: os-vol-mig-status-attr:migstat - metadata: metadata_vol_obj - status: status_vol - volume_image_metadata: volume_image_metadata - description: description_vol_req - multiattach: multiattach_resp - source_volid: source_volid - volume: volume - consistencygroup_id: consistencygroup_id_required - os-vol-mig-status-attr:name_id: os-vol-mig-status-attr:name_id - name: name_vol - bootable: bootable_response - created_at: created_at - volume_type: volume_type_vol - volume_type_id: volume_type_id_363 - service_uuid: service_uuid - shared_targets: shared_targets - shared_targets: shared_targets_tristate - cluster_name: volume_cluster_name - provider_id: provider_id - group_id: group_id_optional - consumes_quota: consumes_quota Response Example (v3.65) ------------------------ .. literalinclude:: ./samples/volumes/v3.65/volume-show-response.json :language: javascript Update a volume ~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/volumes/{volume_id} Updates a volume. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - volume: volume - description: description_vol - name: volume_name_optional - metadata: metadata_vol_assoc Request Example --------------- .. literalinclude:: ./samples/volumes/volume-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - migration_status: migration_status - attachments: attachments - links: links_vol - availability_zone: availability_zone - encrypted: encrypted - updated_at: updated_at - replication_status: replication_status - snapshot_id: snapshot_id - id: id_vol - size: size - user_id: user_id - metadata: metadata_vol_obj - status: status_vol - description: description_vol_req - multiattach: multiattach_resp - source_volid: source_volid - volume: volume - consistencygroup_id: consistencygroup_id_required - name: name_vol - bootable: bootable_response - created_at: created_at - volume_type: volume_type_vol - volume_type_id: volume_type_id_363 - group_id: group_id_optional - provider_id: provider_id - service_uuid: service_uuid - shared_targets: shared_targets - shared_targets: shared_targets_tristate - cluster_name: volume_cluster_name - consumes_quota: consumes_quota Response Example (v3.65) ------------------------ .. literalinclude:: ./samples/volumes/v3.65/volume-update-response.json :language: javascript Delete a volume ~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/volumes/{volume_id} Deletes a volume. Preconditions - Volume status must be ``available``, ``in-use``, ``error``, ``error_restoring``, ``error_extending``, ``error_managing``, and must not be ``migrating``, ``attached``, ``awaiting-transfer``, belong to a group, have snapshots or be disassociated from snapshots after volume transfer. - The ``cascade`` option can be passed in the request if you want all snapshots of this volume to be deleted automatically, which should allow the volume deletion to succeed. - You cannot delete a volume that is in a migration. Asynchronous Postconditions - The volume is deleted in volume index. - The volume managed by OpenStack Block Storage is deleted in storage node. Troubleshooting - If volume status remains in ``deleting`` or becomes ``error_deleting`` the request failed. Ensure you meet the preconditions then investigate the storage back end. - The volume managed by OpenStack Block Storage is not deleted from the storage system. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - cascade: cascade - force: force_vol_del Create metadata for volume ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/metadata Creates or replaces metadata for a volume. Does not modify items that are not in the request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - metadata: metadata_vol_assoc_req Request Example --------------- .. literalinclude:: ./samples/volumes/volume-metadata-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_vol_assoc_req Response Example ---------------- .. literalinclude:: ./samples/volumes/volume-metadata-create-response.json :language: javascript Show a volume's metadata ~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/volumes/{volume_id}/metadata Shows metadata for a volume. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_vol_assoc_req Response Example ---------------- .. literalinclude:: ./samples/volumes/volume-metadata-show-response.json :language: javascript Update a volume's metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/volumes/{volume_id}/metadata Replaces all the volume's metadata with the key-value pairs in the request. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - metadata: metadata_vol_assoc_req Request Example --------------- .. literalinclude:: ./samples/volumes/volume-metadata-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - metadata: metadata_vol_assoc_req Response Example ---------------- .. literalinclude:: ./samples/volumes/volume-metadata-update-response.json :language: javascript Show a volume's metadata for a specific key ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/volumes/{volume_id}/metadata/{key} Shows metadata for a volume for a specific key. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - key: key_view Response Parameters ------------------- .. rest_parameters:: parameters.yaml - meta: meta Response Example ---------------- .. literalinclude:: ./samples/volumes/volume-metadata-show-key-response.json :language: javascript Delete a volume's metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: DELETE /v3/{project_id}/volumes/{volume_id}/metadata/{key} Deletes metadata for a volume. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - key: key_path Update a volume's metadata for a specific key ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. rest_method:: PUT /v3/{project_id}/volumes/{volume_id}/metadata/{key} Update metadata for a volume for a specific key. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - volume_id: volume_id_path - key: key_update - meta: meta Request Example --------------- .. literalinclude:: ./samples/volumes/volume-metadata-update-key-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - meta: meta Response Example ---------------- .. literalinclude:: ./samples/volumes/volume-metadata-update-key-response.json :language: javascript Get volumes summary ~~~~~~~~~~~~~~~~~~~ .. rest_method:: GET /v3/{project_id}/volumes/summary Display volumes summary with total number of volumes and total size in GB. Available since API microversion 3.12. Response codes -------------- .. rest_status_code:: success ../status.yaml - 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - all_tenants: all-tenants Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume-summary: volume-summary - total_size: total_size - total_count: total_count_int - metadata: summary_metadata Response Example ---------------- .. literalinclude:: ./samples/volumes/volumes-list-summary-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/api-ref/source/v3/worker-cleanup.inc0000664000175000017500000000207200000000000021674 0ustar00zuulzuul00000000000000.. -*- rst -*- Workers (workers) ================= Cleanup services ~~~~~~~~~~~~~~~~ .. rest_method:: POST v3/{project_id}/workers/cleanup Request cleanup of services with optional filtering. This API is only available with microversion 3.24 or later. Response codes -------------- .. rest_status_code:: success ../status.yaml - 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id_path - cluster_name: cluster_mutex - service_id: service_id - host: host_service - binary: binary_required - is-up: is_up - disabled: disabled - resource-id: resource_id - resource-type: resource_type Request Example --------------- .. literalinclude:: ./samples/worker-cleanup-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - host: host_service - binary: binary_required - id: service_id - cluster_name: cluster_mutex Response Example ---------------- .. literalinclude:: ./samples/worker-cleanup-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/bindep.txt0000664000175000017500000000351600000000000015076 0ustar00zuulzuul00000000000000# This is a cross-platform list tracking distribution packages needed for # install and tests; # see https://docs.openstack.org/infra/bindep/ for additional information. build-essential [platform:dpkg test] gcc [platform:rpm test] # gettext and graphviz are needed by doc builds only. For transition, # have them in both doc and test. # TODO(jaegerandi): Remove test once infra scripts are updated. gettext [!platform:suse doc test] gettext-runtime [platform:suse doc test] graphviz [doc test] libffi-dev [platform:dpkg] libffi-devel [platform:redhat] libffi48-devel [platform:suse] virtual/libffi [platform:gentoo] libssl-dev [platform:dpkg] openssl-devel [platform:rpm !platform:suse] libopenssl-devel [platform:suse !platform:rpm] locales [platform:debian] mariadb [platform:rpm] mariadb-server [platform:redhat platform:debian] mariadb-devel [platform:redhat] libmariadb-dev-compat [platform:debian] libmysqlclient-dev [platform:ubuntu] libmysqlclient-devel [platform:suse] mysql-client [platform:dpkg !platform:debian] mysql-server [platform:dpkg !platform:debian] postgresql postgresql-client [platform:dpkg] postgresql-devel [platform:rpm] postgresql-server [platform:rpm] python3-devel [platform:rpm test] libpq-dev [platform:dpkg] thin-provisioning-tools [platform:debian] libxml2-dev [platform:dpkg test] libpcre3-dev [platform:dpkg test] libxslt-devel [platform:rpm test] libxslt1-dev [platform:dpkg test] cryptsetup [platform:rpm] cryptsetup-bin [platform:dpkg] # Cinder uses lsscsi via os-brick. Due to bindep usage in devstack and # elsewhere, we add it here to make sure it is picked up and available in # os-brick tests. Net result is the same that lsscsi will be installed for any # cinder installation. lsscsi qemu-img [platform:redhat] qemu-tools [platform:suse] qemu-utils [platform:dpkg] libcgroup-tools [platform:rpm] cgroup-tools [platform:dpkg] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0231178 cinder-27.0.0/cinder/0000775000175000017500000000000000000000000014333 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/__init__.py0000664000175000017500000000140000000000000016437 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Root Cinder module.""" import os # Ensure compatibility issues are covered with pythondsn os.environ['EVENTLET_NO_GREENDNS'] = 'yes' # Make sure eventlet is loaded import eventlet # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0271177 cinder-27.0.0/cinder/api/0000775000175000017500000000000000000000000015104 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/__init__.py0000664000175000017500000000342000000000000017214 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import paste.urlmap CONF = cfg.CONF LOG = logging.getLogger(__name__) def root_app_factory(loader, global_conf, **local_conf): # To support upgrades from previous api-paste config files, we need # to check for and remove any legacy references to the v1 or v2 API if '/v1' in local_conf: LOG.warning('The v1 API has been removed and is no longer ' 'available. Client applications should be ' 'using v3, which is currently the only supported ' 'version of the Block Storage API.') del local_conf['/v1'] if '/v2' in local_conf: LOG.warning('The v2 API has been removed and is no longer available. ' 'Client applications must now use the v3 API only. ' 'The \'enable_v2_api\' option has been removed and is ' 'ignored in the cinder.conf file.') del local_conf['/v2'] return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/api_utils.py0000664000175000017500000002311600000000000017452 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing from typing import Any, Generator, Iterable, Optional, Union from keystoneauth1 import exceptions as ks_exc from keystoneauth1 import identity from keystoneauth1 import loading as ka_loading from keystoneclient import client from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils import webob from webob import exc from cinder import exception from cinder.i18n import _ if typing.TYPE_CHECKING: from cinder import context CONF = cfg.CONF CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token.__init__') LOG = logging.getLogger(__name__) def _parse_is_public(is_public: Optional[str]) -> Optional[bool]: """Parse is_public into something usable. * True: List public volume types only * False: List private volume types only * None: List both public and private volume types """ if is_public is None: # preserve default value of showing only public types return True elif is_none_string(is_public): return None else: try: return strutils.bool_from_string(is_public, strict=True) except ValueError: msg = _('Invalid is_public filter [%s]') % is_public raise exc.HTTPBadRequest(explanation=msg) def is_none_string(val: Any) -> bool: """Check if a string represents a None value.""" if not isinstance(val, str): return False return val.lower() == 'none' def remove_invalid_filter_options( context: 'context.RequestContext', filters: dict, allowed_search_options: Iterable[str]) -> None: """Remove search options that are not valid for non-admin API/context.""" if context.is_admin: # Allow all options return # Otherwise, strip out all unknown options unknown_options = [opt for opt in filters if opt not in allowed_search_options] bad_options = ", ".join(unknown_options) LOG.debug("Removing options '%s' from query.", bad_options) for opt in unknown_options: del filters[opt] _visible_admin_metadata_keys = ['readonly', 'attached_mode'] def add_visible_admin_metadata(volume) -> None: """Add user-visible admin metadata to regular metadata. Extracts the admin metadata keys that are to be made visible to non-administrators, and adds them to the regular metadata structure for the passed-in volume. """ visible_admin_meta = {} if volume.get('volume_admin_metadata'): if isinstance(volume['volume_admin_metadata'], dict): volume_admin_metadata = volume['volume_admin_metadata'] for key in volume_admin_metadata: if key in _visible_admin_metadata_keys: visible_admin_meta[key] = volume_admin_metadata[key] else: for item in volume['volume_admin_metadata']: if item['key'] in _visible_admin_metadata_keys: visible_admin_meta[item['key']] = item['value'] # avoid circular ref when volume is a Volume instance elif (volume.get('admin_metadata') and isinstance(volume.get('admin_metadata'), dict)): for key in _visible_admin_metadata_keys: if key in volume['admin_metadata'].keys(): visible_admin_meta[key] = volume['admin_metadata'][key] if not visible_admin_meta: return # NOTE(zhiyan): update visible administration metadata to # volume metadata, administration metadata will rewrite existing key. if volume.get('volume_metadata'): orig_meta = list(volume.get('volume_metadata')) for item in orig_meta: if item['key'] in visible_admin_meta.keys(): item['value'] = visible_admin_meta.pop(item['key']) for key, value in visible_admin_meta.items(): orig_meta.append({'key': key, 'value': value}) volume['volume_metadata'] = orig_meta # avoid circular ref when vol is a Volume instance elif (volume.get('metadata') and isinstance(volume.get('metadata'), dict)): volume['metadata'].update(visible_admin_meta) else: volume['metadata'] = visible_admin_meta def validate_integer(value: int, name: str, min_value: Optional[int] = None, max_value: Optional[int] = None) -> int: """Make sure that value is a valid integer, potentially within range. :param value: the value of the integer :param name: the name of the integer :param min_value: the min value of the integer :param max_value: the max value of the integer :returns: integer """ try: value = strutils.validate_integer(value, name, min_value, max_value) return value except ValueError as e: raise webob.exc.HTTPBadRequest(explanation=str(e)) def walk_class_hierarchy(clazz: type, encountered: Optional[list[type]] = None) -> \ Generator[type, None, None]: """Walk class hierarchy, yielding most derived classes first.""" if not encountered: encountered = [] for subclass in clazz.__subclasses__(): if subclass not in encountered: encountered.append(subclass) # drill down to leaves first for subsubclass in walk_class_hierarchy(subclass, encountered): yield subsubclass yield subclass def _keystone_client(context: 'context.RequestContext', version: tuple[int, int] = (3, 0)) -> client.Client: """Creates and returns an instance of a generic keystone client. :param context: The request context :param version: version of Keystone to request :return: keystoneclient.client.Client object """ if context.system_scope is not None: auth_plugin = identity.Token( auth_url=CONF.keystone_authtoken.auth_url, token=context.auth_token, system_scope=context.system_scope ) elif context.domain_id is not None: auth_plugin = identity.Token( auth_url=CONF.keystone_authtoken.auth_url, token=context.auth_token, domain_id=context.domain_id ) elif context.project_id is not None: auth_plugin = identity.Token( auth_url=CONF.keystone_authtoken.auth_url, token=context.auth_token, project_id=context.project_id ) else: # We're dealing with an unscoped token from keystone that doesn't # carry any authoritative power outside of the user simplify proving # they know their own password. This token isn't associated with any # authorization target (e.g., system, domain, or project). auth_plugin = context.get_auth_plugin() client_session = ka_loading.session.Session().load_from_options( auth=auth_plugin, insecure=CONF.keystone_authtoken.insecure, cacert=CONF.keystone_authtoken.cafile, key=CONF.keystone_authtoken.keyfile, cert=CONF.keystone_authtoken.certfile, split_loggers=CONF.service_user.split_loggers) return client.Client(auth_url=CONF.keystone_authtoken.auth_url, session=client_session, version=version) class GenericProjectInfo(object): """Abstraction layer for Keystone V2 and V3 project objects""" def __init__(self, project_id: str, project_keystone_api_version: str, domain_id: Optional[str] = None, name: Optional[str] = None, description: Optional[str] = None): self.id = project_id self.keystone_api_version = project_keystone_api_version self.domain_id = domain_id self.name = name self.description = description def get_project(context: 'context.RequestContext', project_id: str) -> GenericProjectInfo: """Method to verify project exists in keystone""" keystone = _keystone_client(context) generic_project = GenericProjectInfo(project_id, keystone.version) project = keystone.projects.get(project_id) generic_project.domain_id = project.domain_id generic_project.name = project.name generic_project.description = project.description return generic_project def validate_project_and_authorize(context: 'context.RequestContext', project_id: str, policy_check: str, validate_only: bool = False) -> None: target_project: Union[GenericProjectInfo, dict] try: target_project = get_project(context, project_id) if not validate_only: target_project = {'project_id': target_project.id} context.authorize(policy_check, target=target_project) except ks_exc.http.NotFound: explanation = _("Project with id %s not found." % project_id) raise exc.HTTPNotFound(explanation=explanation) except exception.NotAuthorized: explanation = _("You are not authorized to perform this " "operation.") raise exc.HTTPForbidden(explanation=explanation) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/common.py0000664000175000017500000005053000000000000016751 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum import json import os import re import typing from typing import Any, Iterable, Optional, Union import urllib from oslo_config import cfg from oslo_log import log as logging import webob from cinder.api import api_utils from cinder.api import microversions as mv from cinder.common import constants from cinder import exception from cinder.i18n import _ if typing.TYPE_CHECKING: from cinder import context api_common_opts = [ cfg.IntOpt('osapi_max_limit', default=1000, help='The maximum number of items that a collection ' 'resource returns in a single response'), cfg.StrOpt('resource_query_filters_file', default='/etc/cinder/resource_filters.json', help="Json file indicating user visible filter " "parameters for list queries."), ] CONF = cfg.CONF CONF.import_opt('public_endpoint', 'cinder.api.views.versions') CONF.register_opts(api_common_opts) LOG = logging.getLogger(__name__) _FILTERS_COLLECTION = None ATTRIBUTE_CONVERTERS = {'name~': 'display_name~', 'description~': 'display_description~', 'consumes_quota': 'use_quota'} METADATA_TYPES = enum.Enum('METADATA_TYPES', 'user image') def get_pagination_params(params: dict, max_limit: Optional[int] = None) -> tuple: """Return marker, limit, offset tuple from request. :param params: `wsgi.Request`'s GET dictionary, possibly containing 'marker', 'limit', and 'offset' variables. 'marker' is the id of the last element the client has seen, 'limit' is the maximum number of items to return and 'offset' is the number of items to skip from the marker or from the first element. If 'limit' is not specified, or > max_limit, we default to max_limit. Negative values for either offset or limit will cause exc.HTTPBadRequest() exceptions to be raised. If no offset is present we'll default to 0 and if no marker is present we'll default to None. :max_limit: Max value 'limit' return value can take :returns: Tuple (marker, limit, offset) """ max_limit = max_limit or CONF.osapi_max_limit limit = _get_limit_param(params, max_limit) marker = _get_marker_param(params) offset = _get_offset_param(params) return marker, limit, offset def _get_limit_param(params: dict, max_limit: Optional[int] = None) -> int: """Extract integer limit from request's dictionary or fail. Defaults to max_limit if not present and returns max_limit if present 'limit' is greater than max_limit. """ max_limit = max_limit or CONF.osapi_max_limit try: limit = int(params.pop('limit', max_limit)) except ValueError: msg = _('limit param must be an integer') raise webob.exc.HTTPBadRequest(explanation=msg) if limit < 0: msg = _('limit param must be positive') raise webob.exc.HTTPBadRequest(explanation=msg) limit = min(limit, max_limit) return limit def _get_marker_param(params: dict[str, Any]) -> Optional[str]: """Extract marker id from request's dictionary (defaults to None).""" return params.pop('marker', None) def _get_offset_param(params: dict[str, Any]) -> int: """Extract offset id from request's dictionary (defaults to 0) or fail.""" offset = params.pop('offset', 0) return api_utils.validate_integer(offset, 'offset', 0, constants.DB_MAX_INT) def limited(items: list, request: webob.Request, max_limit: Optional[int] = None) -> list: """Return a slice of items according to requested offset and limit. :param items: A sliceable entity :param request: ``wsgi.Request`` possibly containing 'offset' and 'limit' GET variables. 'offset' is where to start in the list, and 'limit' is the maximum number of items to return. If 'limit' is not specified, 0, or > max_limit, we default to max_limit. Negative values for either offset or limit will cause exc.HTTPBadRequest() exceptions to be raised. :kwarg max_limit: The maximum number of items to return from 'items' """ max_limit = max_limit or CONF.osapi_max_limit marker, limit, offset = get_pagination_params(request.GET.copy(), max_limit) range_end = offset + (limit or max_limit) return items[offset:range_end] def get_sort_params(params: dict, default_key: str = 'created_at', default_dir: str = 'desc') -> tuple[list[str], list[str]]: """Retrieves sort keys/directions parameters. Processes the parameters to create a list of sort keys and sort directions that correspond to either the 'sort' parameter or the 'sort_key' and 'sort_dir' parameter values. The value of the 'sort' parameter is a comma- separated list of sort keys, each key is optionally appended with ':'. Note that the 'sort_key' and 'sort_dir' parameters are deprecated in kilo and an exception is raised if they are supplied with the 'sort' parameter. The sort parameters are removed from the request parameters by this function. :param params: webob.multidict of request parameters (from cinder.api.openstack.wsgi.Request.params) :param default_key: default sort key value, added to the list if no sort keys are supplied :param default_dir: default sort dir value, added to the list if the corresponding key does not have a direction specified :returns: list of sort keys, list of sort dirs :raise webob.exc.HTTPBadRequest: If both 'sort' and either 'sort_key' or 'sort_dir' are supplied parameters """ if 'sort' in params and ('sort_key' in params or 'sort_dir' in params): msg = _("The 'sort_key' and 'sort_dir' parameters are deprecated and " "cannot be used with the 'sort' parameter.") raise webob.exc.HTTPBadRequest(explanation=msg) sort_keys = [] sort_dirs = [] if 'sort' in params: for sort in params.pop('sort').strip().split(','): sort_key, _sep, sort_dir = sort.partition(':') if not sort_dir: sort_dir = default_dir sort_keys.append(sort_key.strip()) sort_dirs.append(sort_dir.strip()) else: sort_key = params.pop('sort_key', default_key) sort_dir = params.pop('sort_dir', default_dir) sort_keys.append(sort_key.strip()) sort_dirs.append(sort_dir.strip()) return sort_keys, sort_dirs def get_request_url(request: webob.Request) -> str: url = request.application_url headers = request.headers forwarded = headers.get('X-Forwarded-Host') if forwarded: url_parts = list(urllib.parse.urlsplit(url)) url_parts[1] = re.split(r',\s?', forwarded)[-1] url = urllib.parse.urlunsplit(url_parts).rstrip('/') return url def remove_version_from_href(href: str) -> str: """Removes the first API version from the href. Given: 'http://cinder.example.com/v1.1/123' Returns: 'http://cinder.example.com/123' Given: 'http://cinder.example.com/v1.1' Returns: 'http://cinder.example.com' Given: 'http://cinder.example.com/volume/drivers/v1.1/flashsystem' Returns: 'http://cinder.example.com/volume/drivers/flashsystem' """ parsed_url: Union[list[str], urllib.parse.SplitResult] parsed_url = urllib.parse.urlsplit(href) url_parts = parsed_url.path.split('/') # NOTE: this should match vX.X or vX expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)') for x in range(len(url_parts)): if expression.match(url_parts[x]): del url_parts[x] break new_path = '/'.join(url_parts) if new_path == parsed_url.path: msg = 'href %s does not contain version' % href LOG.debug(msg) raise ValueError(msg) parsed_url = list(parsed_url) parsed_url[2] = new_path return urllib.parse.urlunsplit(parsed_url) class ViewBuilder(object): """Model API responses as dictionaries.""" _collection_name: Optional[str] = None def _get_project_id_in_url(self, request: webob.Request) -> str: project_id = request.environ["cinder.context"].project_id if project_id and ("/v3/%s" % project_id in request.url): # project_ids are not mandatory within v3 URLs, but links need # to include them if the request does. return project_id return '' def _get_links(self, request: webob.Request, identifier: str) -> list[dict[str, str]]: return [{"rel": "self", "href": self._get_href_link(request, identifier), }, {"rel": "bookmark", "href": self._get_bookmark_link(request, identifier), }] def _get_next_link(self, request: webob.Request, identifier: str, collection_name: str) -> str: """Return href string with proper limit and marker params.""" params = request.params.copy() params["marker"] = identifier prefix = self._update_link_prefix(get_request_url(request), CONF.public_endpoint) url = os.path.join(prefix, self._get_project_id_in_url(request), collection_name) return "%s?%s" % (url, urllib.parse.urlencode(params)) def _get_href_link(self, request: webob.Request, identifier: str) -> str: """Return an href string pointing to this object.""" prefix = self._update_link_prefix(get_request_url(request), CONF.public_endpoint) assert self._collection_name is not None return os.path.join(prefix, self._get_project_id_in_url(request), self._collection_name, str(identifier)) def _get_bookmark_link(self, request: webob.Request, identifier: str) -> str: """Create a URL that refers to a specific resource.""" base_url = remove_version_from_href(get_request_url(request)) base_url = self._update_link_prefix(base_url, CONF.public_endpoint) assert self._collection_name is not None return os.path.join(base_url, self._get_project_id_in_url(request), self._collection_name, str(identifier)) def _get_collection_links(self, request: webob.Request, items: list, collection_name: str, item_count: Optional[int] = None, id_key: str = "uuid") -> list[dict]: """Retrieve 'next' link, if applicable. The next link is included if we are returning as many items as we can, given the restrictions of limit optional request parameter and osapi_max_limit configuration parameter as long as we are returning some elements. So we return next link if: 1) 'limit' param is specified and equal to the number of items. 2) 'limit' param is NOT specified and the number of items is equal to CONF.osapi_max_limit. :param request: API request :param items: List of collection items :param collection_name: Name of collection, used to generate the next link for a pagination query :param item_count: Length of the list of the original collection items :param id_key: Attribute key used to retrieve the unique ID, used to generate the next link marker for a pagination query :returns: links """ item_count = item_count or len(items) limit = _get_limit_param(request.GET.copy()) if len(items) and limit <= item_count: return self._generate_next_link(items, id_key, request, collection_name) return [] def _generate_next_link(self, items: list, id_key: str, request: webob.Request, collection_name: str) -> list[dict]: links = [] last_item = items[-1] if id_key in last_item: last_item_id = last_item[id_key] else: last_item_id = last_item["id"] links.append({ "rel": "next", "href": self._get_next_link(request, last_item_id, collection_name), }) return links def _update_link_prefix(self, orig_url: str, prefix: Optional[str]) -> str: if not prefix: return orig_url url_parts = list(urllib.parse.urlsplit(orig_url)) prefix_parts = list(urllib.parse.urlsplit(prefix)) url_parts[0:2] = prefix_parts[0:2] url_parts[2] = prefix_parts[2] + url_parts[2] return urllib.parse.urlunsplit(url_parts).rstrip('/') def get_cluster_host(req: webob.Request, params: dict, cluster_version=None) -> tuple[Optional[str], Optional[str]]: """Get cluster and host from the parameters. This method checks the presence of cluster and host parameters and returns them depending on the cluster_version. If cluster_version is False we will never return the cluster_name and we will require the presence of the host parameter. If cluster_version is None we will always check for the presence of the cluster parameter, and if cluster_version is a string with a version we will only check for the presence of the parameter if the version of the request is not less than it. In both cases we will require one and only one parameter, host or cluster. """ if (cluster_version is not False and req.api_version_request.matches(cluster_version)): cluster_name = params.get('cluster') msg = _('One and only one of cluster and host must be set.') else: cluster_name = None msg = _('Host field is missing.') host = params.get('host') if bool(cluster_name) == bool(host): raise exception.InvalidInput(reason=msg) return cluster_name, host def _initialize_filters() -> None: global _FILTERS_COLLECTION if _FILTERS_COLLECTION: return if not os.path.exists(CONF.resource_query_filters_file): LOG.error( "resource query filters file does not exist: %s", CONF.resource_query_filters_file) return with open(CONF.resource_query_filters_file, 'r') as filters_file: _FILTERS_COLLECTION = json.load(filters_file) def get_enabled_resource_filters(resource: Optional[str] = None) -> dict[str, Any]: """Get list of configured/allowed filters for the specified resource. This method checks resource_query_filters_file and returns dictionary which contains the specified resource and its allowed filters: .. code-block:: json { "resource": ["filter1", "filter2", "filter3"] } if resource is not specified, all of the configuration will be returned, and if the resource is not found, empty dict will be returned. """ try: _initialize_filters() assert _FILTERS_COLLECTION is not None if not resource: return _FILTERS_COLLECTION else: return {resource: _FILTERS_COLLECTION[resource]} except Exception: LOG.debug("Failed to collect resource %s's filters.", resource) return {} def get_time_comparison_operators() -> tuple[str, ...]: """Get time comparison operators. This method returns tuple which contains the allowed comparison operators. """ return ("gt", "gte", "eq", "neq", "lt", "lte") def convert_filter_attributes(filters, resource): for key in filters.copy().keys(): if resource in ['volume', 'backup', 'snapshot'] and key in ATTRIBUTE_CONVERTERS.keys(): filters[ATTRIBUTE_CONVERTERS[key]] = filters[key] filters.pop(key) def reject_invalid_filters(context: 'context.RequestContext', filters, resource: str, enable_like_filter: bool = False): invalid_filters = [] for key in filters.copy().keys(): try: # Only ASCII characters can be valid filter keys, # in PY2/3, the key can be either unicode or string. if isinstance(key, str): key.encode('ascii') else: key.decode('ascii') except (UnicodeEncodeError, UnicodeDecodeError): raise webob.exc.HTTPBadRequest( explanation=_('Filter keys can only contain ' 'ASCII characters.')) if context.is_admin and resource not in ['pool']: # Allow all options except resource is pool # pool API is only available for admin return # Check the configured filters against those passed in resource configured_filters: Iterable configured_filters = get_enabled_resource_filters(resource) if configured_filters: configured_filters = configured_filters[resource] else: configured_filters = [] for key in filters.copy().keys(): if not enable_like_filter: if key not in configured_filters: invalid_filters.append(key) else: # If 'key~' is configured, both 'key' and 'key~' are valid. if not (key in configured_filters or "%s~" % key in configured_filters): invalid_filters.append(key) if invalid_filters: if 'all_tenants' in invalid_filters: # NOTE: this is a special case: the cinderclient always adds # 'all_tenants', so we don't want to hold that against a non-admin # user and we silently ignore it. See Bug #1917574. invalid_filters.remove('all_tenants') filters.pop('all_tenants') if len(invalid_filters) == 0: return raise webob.exc.HTTPBadRequest( explanation=_('Invalid filters %s are found in query ' 'options.') % ','.join(invalid_filters)) def process_general_filtering(resource): def wrapper(process_non_general_filtering): def _decorator(*args, **kwargs): req_version = kwargs.get('req_version') filters = kwargs.get('filters') ctxt = kwargs.get('context') ctxt = typing.cast('context.RequestContext', ctxt) assert req_version is not None if req_version.matches(mv.RESOURCE_FILTER): support_like = False if req_version.matches(mv.LIKE_FILTER): support_like = True reject_invalid_filters(ctxt, filters, resource, support_like) convert_filter_attributes(filters, resource) else: process_non_general_filtering(*args, **kwargs) return _decorator return wrapper ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0351179 cinder-27.0.0/cinder/api/contrib/0000775000175000017500000000000000000000000016544 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/__init__.py0000664000175000017500000000230500000000000020655 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contrib contains extensions that are shipped with cinder. It can't be called 'extensions' because that causes namespacing problems. """ from oslo_config import cfg from oslo_log import log as logging from cinder.api import extensions CONF = cfg.CONF LOG = logging.getLogger(__name__) def standard_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__) def select_extensions(ext_mgr): extensions.load_standard_extensions(ext_mgr, LOG, __path__, __package__, CONF.osapi_volume_ext_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/admin_actions.py0000664000175000017500000003616000000000000021734 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import strutils import webob from cinder.api import common from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import admin_actions from cinder.api import validation from cinder import backup from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import volume from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class AdminController(wsgi.Controller): """Abstract base class for AdminControllers.""" collection = None # api collection to extend # FIXME(clayg): this will be hard to keep up-to-date # Concrete classes can expand or over-ride def __init__(self, *args, **kwargs): super(AdminController, self).__init__(*args, **kwargs) # singular name of the resource self.resource_name = self.collection.rstrip('s') self.volume_api = volume.API() self.backup_api = backup.API() def _update(self, *args, **kwargs): raise NotImplementedError() def _get(self, *args, **kwargs): raise NotImplementedError() def _delete(self, *args, **kwargs): raise NotImplementedError() def validate_update(self, req, body): raise NotImplementedError() def _notify_reset_status(self, context, id, message): raise NotImplementedError() def authorize(self, context, action_name, target_obj=None): context.authorize( 'volume_extension:%(resource)s_admin_actions:%(action)s' % {'resource': self.resource_name, 'action': action_name}, target_obj=target_obj) def _remove_worker(self, context, id): # Remove the cleanup worker from the DB when we change a resource # status since it renders useless the entry. res = db.worker_destroy(context, resource_type=self.collection.title(), resource_id=id) if res: LOG.debug('Worker entry for %s with id %s has been deleted.', self.collection, id) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-reset_status') def _reset_status(self, req, id, body): """Reset status on the resource.""" def _clean_volume_attachment(context, id): attachments = ( db.volume_attachment_get_all_by_volume_id(context, id)) for attachment in attachments: db.volume_detached(context.elevated(), id, attachment.id) db.volume_admin_metadata_delete(context.elevated(), id, 'attached_mode') context = req.environ['cinder.context'] update = self.validate_update(req, body=body) msg = "Updating %(resource)s '%(id)s' with '%(update)r'" LOG.debug(msg, {'resource': self.resource_name, 'id': id, 'update': update}) self._notify_reset_status(context, id, 'reset_status.start') # Not found exception will be handled at the wsgi level self._update(context, id, update) self._remove_worker(context, id) if update.get('attach_status') == 'detached': _clean_volume_attachment(context, id) self._notify_reset_status(context, id, 'reset_status.end') @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-force_delete') def _force_delete(self, req, id, body): """Delete a resource, bypassing the check that it must be available.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level resource = self._get(context, id) self.authorize(context, 'force_delete', target_obj=resource) self._delete(context, resource, force=True) class VolumeAdminController(AdminController): """AdminController for Volumes.""" collection = 'volumes' def _notify_reset_status(self, context, id, message): volume = objects.Volume.get_by_id(context, id) volume_utils.notify_about_volume_usage(context, volume, message) def _update(self, *args, **kwargs): db.volume_update(*args, **kwargs) def _get(self, *args, **kwargs): return self.volume_api.get(*args, **kwargs) def _delete(self, *args, **kwargs): return self.volume_api.delete(*args, **kwargs) @validation.schema(admin_actions.reset) def validate_update(self, req, body): update = {} body = body['os-reset_status'] status = body.get('status', None) attach_status = body.get('attach_status', None) migration_status = body.get('migration_status', None) if status: update['status'] = status.lower() if attach_status: update['attach_status'] = attach_status.lower() if migration_status: update['migration_status'] = migration_status.lower() if update['migration_status'] == 'none': update['migration_status'] = None return update @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-reset_status') def _reset_status(self, req, id, body): """Reset status on the volume.""" def _clean_volume_attachment(context, id): attachments = ( db.volume_attachment_get_all_by_volume_id(context, id)) for attachment in attachments: db.volume_detached(context.elevated(), id, attachment.id) db.volume_admin_metadata_delete(context.elevated(), id, 'attached_mode') # any exceptions raised will be handled at the wsgi level update = self.validate_update(req, body=body) context = req.environ['cinder.context'] volume = objects.Volume.get_by_id(context, id) self.authorize(context, 'reset_status', target_obj=volume) # at this point, we still don't know if we're going to # reset the volume's state. Need to check what the caller # is requesting first. if update.get('status') in ('deleting', 'error_deleting' 'detaching'): msg = _("Cannot reset-state to %s" % update.get('status')) raise webob.exc.HTTPBadRequest(explanation=msg) if update.get('status') == 'in-use': attachments = ( db.volume_attachment_get_all_by_volume_id(context, id)) if not attachments: msg = _("Cannot reset-state to in-use " "because volume does not have any attachments.") raise webob.exc.HTTPBadRequest(explanation=msg) msg = "Updating %(resource)s '%(id)s' with '%(update)r'" LOG.debug(msg, {'resource': self.resource_name, 'id': id, 'update': update}) self._notify_reset_status(context, id, 'reset_status.start') self._update(context, id, update) self._remove_worker(context, id) if update.get('attach_status') == 'detached': _clean_volume_attachment(context, id) self._notify_reset_status(context, id, 'reset_status.end') @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-force_detach') @validation.schema(admin_actions.force_detach) def _force_detach(self, req, id, body): """Roll back a bad detach after the volume been disconnected.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self._get(context, id) self.authorize(context, 'force_detach', target_obj=volume) connector = body['os-force_detach'].get('connector', None) try: self.volume_api.terminate_connection(context, volume, connector) except exception.VolumeBackendAPIException: msg = _("Unable to terminate volume connection from backend.") raise webob.exc.HTTPInternalServerError(explanation=msg) attachment_id = body['os-force_detach'].get('attachment_id', None) try: self.volume_api.detach(context, volume, attachment_id) except messaging.RemoteError as error: if error.exc_type in ['VolumeAttachmentNotFound', 'InvalidVolume']: msg = _("Error force detaching volume - %(err_type)s: " "%(err_msg)s") % {'err_type': error.exc_type, 'err_msg': error.value} raise webob.exc.HTTPBadRequest(explanation=msg) else: # There are also few cases where force-detach call could fail # due to db or volume driver errors. These errors shouldn't # be exposed to the user and in such cases it should raise # 500 error. raise @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-migrate_volume') @validation.schema(admin_actions.migrate_volume, mv.BASE_VERSION, mv.get_prior_version(mv.VOLUME_MIGRATE_CLUSTER)) @validation.schema(admin_actions.migrate_volume_v316, mv.VOLUME_MIGRATE_CLUSTER) def _migrate_volume(self, req, id, body): """Migrate a volume to the specified host.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self._get(context, id) self.authorize(context, 'migrate_volume', target_obj=volume) params = body['os-migrate_volume'] cluster_name, host = common.get_cluster_host(req, params, mv.VOLUME_MIGRATE_CLUSTER) force_host_copy = strutils.bool_from_string(params.get( 'force_host_copy', False), strict=True) lock_volume = strutils.bool_from_string(params.get( 'lock_volume', False), strict=True) self.volume_api.migrate_volume(context, volume, host, cluster_name, force_host_copy, lock_volume) @wsgi.action('os-migrate_volume_completion') @validation.schema(admin_actions.migrate_volume_completion) def _migrate_volume_completion(self, req, id, body): """Complete an in-progress migration.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self._get(context, id) self.authorize(context, 'migrate_volume_completion', target_obj=volume) params = body['os-migrate_volume_completion'] new_volume_id = params['new_volume'] # Not found exception will be handled at the wsgi level new_volume = self._get(context, new_volume_id) error = params.get('error', False) ret = self.volume_api.migrate_volume_completion(context, volume, new_volume, error) return {'save_volume_id': ret} @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-extend_volume_completion') @validation.schema(admin_actions.extend_volume_completion) def _extend_volume_completion(self, req, id, body): """Complete an in-progress extend operation.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self._get(context, id) self.authorize(context, 'extend_volume_completion', target_obj=volume) params = body['os-extend_volume_completion'] error = params.get('error', False) self.volume_api.extend_volume_completion(context, volume, error) class SnapshotAdminController(AdminController): """AdminController for Snapshots.""" collection = 'snapshots' def _notify_reset_status(self, context, id, message): snapshot = objects.Snapshot.get_by_id(context, id) volume_utils.notify_about_snapshot_usage(context, snapshot, message) @validation.schema(admin_actions.reset_status_snapshot) def validate_update(self, req, body): status = body['os-reset_status']['status'] update = {'status': status.lower()} return update def _update(self, *args, **kwargs): context = args[0] snapshot_id = args[1] fields = args[2] snapshot = objects.Snapshot.get_by_id(context, snapshot_id) self.authorize(context, 'reset_status', target_obj=snapshot) snapshot.update(fields) snapshot.save() def _get(self, *args, **kwargs): return self.volume_api.get_snapshot(*args, **kwargs) def _delete(self, *args, **kwargs): return self.volume_api.delete_snapshot(*args, **kwargs) class BackupAdminController(AdminController): """AdminController for Backups.""" collection = 'backups' def _notify_reset_status(self, context, id, message): backup = objects.Backup.get_by_id(context, id) volume_utils.notify_about_backup_usage(context, backup, message) def _get(self, *args, **kwargs): return self.backup_api.get(*args, **kwargs) def _delete(self, *args, **kwargs): return self.backup_api.delete(*args, **kwargs) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-reset_status') @validation.schema(admin_actions.reset_status_backup) def _reset_status(self, req, id, body): """Reset status on the resource.""" context = req.environ['cinder.context'] status = body['os-reset_status']['status'] update = {'status': status.lower()} msg = "Updating %(resource)s '%(id)s' with '%(update)r'" LOG.debug(msg, {'resource': self.resource_name, 'id': id, 'update': update}) self._notify_reset_status(context, id, 'reset_status.start') # Not found exception will be handled at the wsgi level self.backup_api.reset_status(context=context, backup_id=id, status=update['status']) class Admin_actions(extensions.ExtensionDescriptor): """Enable admin actions.""" name = "AdminActions" alias = "os-admin-actions" updated = "2012-08-25T00:00:00+00:00" def get_controller_extensions(self): exts = [] for class_ in (VolumeAdminController, SnapshotAdminController, BackupAdminController): controller = class_() extension = extensions.ControllerExtension( self, class_.collection, controller) exts.append(extension) return exts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/availability_zones.py0000664000175000017500000000320400000000000023005 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api.openstack import wsgi import cinder.api.views.availability_zones import cinder.exception import cinder.volume.api class Controller(wsgi.Controller): _view_builder_class = cinder.api.views.availability_zones.ViewBuilder def __init__(self, *args, **kwargs): super(Controller, self).__init__(*args, **kwargs) self.volume_api = cinder.volume.api.API() def index(self, req): """Describe all known availability zones.""" azs = self.volume_api.list_availability_zones() return self._view_builder.list(req, azs) class Availability_zones(extensions.ExtensionDescriptor): """Describe Availability Zones.""" name = 'AvailabilityZones' alias = 'os-availability-zone' updated = '2013-06-27T00:00:00+00:00' def get_resources(self): controller = Controller() res = extensions.ResourceExtension(Availability_zones.alias, controller) return [res] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/backups.py0000664000175000017500000002724400000000000020557 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The backups api.""" from http import HTTPStatus from oslo_log import log as logging from oslo_utils import strutils from webob import exc from cinder.api import api_utils from cinder.api import common from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import backups as backup from cinder.api import validation from cinder.api.views import backups as backup_views from cinder import backup as backupAPI from cinder import exception from cinder import utils from cinder import volume as volumeAPI LOG = logging.getLogger(__name__) class BackupsController(wsgi.Controller): """The Backups API controller for the OpenStack API.""" _view_builder_class = backup_views.ViewBuilder def __init__(self): self.backup_api = backupAPI.API() self.volume_api = volumeAPI.API() super(BackupsController, self).__init__() def show(self, req, id): """Return data about the given backup.""" LOG.debug('Show backup with id: %s.', id) context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level backup = self.backup_api.get(context, backup_id=id) req.cache_db_backup(backup) return self._view_builder.detail(req, backup) @wsgi.response(HTTPStatus.ACCEPTED) def delete(self, req, id): """Delete a backup.""" context = req.environ['cinder.context'] LOG.info('Delete backup with id: %s.', id) try: backup = self.backup_api.get(context, id) self.backup_api.delete(context, backup) # Not found exception will be handled at the wsgi level except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) def index(self, req): """Returns a summary list of backups.""" return self._get_backups(req, is_detail=False) def detail(self, req): """Returns a detailed list of backups.""" return self._get_backups(req, is_detail=True) @staticmethod def _get_backup_filter_options(): """Return volume search options allowed by non-admin.""" return ('name', 'status', 'volume_id') @common.process_general_filtering('backup') def _process_backup_filtering(self, context=None, filters=None, req_version=None): api_utils.remove_invalid_filter_options( context, filters, self._get_backup_filter_options()) def _convert_sort_name(self, req_version, sort_keys): """Convert sort key "name" to "display_name". """ pass def _get_backups(self, req, is_detail): """Returns a list of backups, transformed through view builder.""" context = req.environ['cinder.context'] filters = req.params.copy() req_version = req.api_version_request marker, limit, offset = common.get_pagination_params(filters) sort_keys, sort_dirs = common.get_sort_params(filters) show_count = False if req_version.matches( mv.SUPPORT_COUNT_INFO) and 'with_count' in filters: show_count = utils.get_bool_param('with_count', filters) filters.pop('with_count') self._convert_sort_name(req_version, sort_keys) self._process_backup_filtering(context=context, filters=filters, req_version=req_version) if 'name' in filters: filters['display_name'] = filters.pop('name') backups = self.backup_api.get_all(context, search_opts=filters.copy(), marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs, ) total_count = None if show_count: total_count = self.volume_api.calculate_resource_count( context, 'backup', filters) req.cache_db_backups(backups.objects) if is_detail: backups = self._view_builder.detail_list(req, backups.objects, total_count) else: backups = self._view_builder.summary_list(req, backups.objects, total_count) return backups # TODO(frankm): Add some checks here including # - whether requested volume_id exists so we can return some errors # immediately # - maybe also do validation of swift container name @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(backup.create, mv.BASE_VERSION, mv.get_prior_version(mv.BACKUP_METADATA)) @validation.schema(backup.create_backup_v343, mv.BACKUP_METADATA, mv.get_prior_version(mv.BACKUP_AZ)) @validation.schema(backup.create_backup_v351, mv.BACKUP_AZ) def create(self, req, body): """Create a new backup.""" LOG.debug('Creating new backup %s', body) context = req.environ['cinder.context'] req_version = req.api_version_request backup = body['backup'] container = backup.get('container', None) volume_id = backup['volume_id'] self.validate_name_and_description(backup, check_length=False) name = backup.get('name', None) description = backup.get('description', None) incremental = strutils.bool_from_string(backup.get( 'incremental', False), strict=True) force = strutils.bool_from_string(backup.get( 'force', False), strict=True) snapshot_id = backup.get('snapshot_id', None) metadata = backup.get('metadata', None) if req_version.matches( mv.BACKUP_METADATA) else None if req_version.matches(mv.BACKUP_AZ): availability_zone = backup.get('availability_zone', None) else: availability_zone = None az_text = ' in az %s' % availability_zone if availability_zone else '' LOG.info("Creating backup of volume %(volume_id)s in container" " %(container)s%(az)s", {'volume_id': volume_id, 'container': container, 'az': az_text}, context=context) try: new_backup = self.backup_api.create(context, name, description, volume_id, container, incremental, availability_zone, force, snapshot_id, metadata) except (exception.InvalidVolume, exception.InvalidSnapshot, exception.InvalidVolumeMetadata, exception.InvalidVolumeMetadataSize) as error: raise exc.HTTPBadRequest(explanation=error.msg) # Other not found exceptions will be handled at the wsgi level except exception.ServiceNotFound as error: raise exc.HTTPServiceUnavailable(explanation=error.msg) retval = self._view_builder.summary(req, dict(new_backup)) return retval @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(backup.restore) def restore(self, req, id, body): """Restore an existing backup to a volume.""" LOG.debug('Restoring backup %(backup_id)s (%(body)s)', {'backup_id': id, 'body': body}) context = req.environ['cinder.context'] restore = body['restore'] volume_id = restore.get('volume_id', None) name = restore.get('name', None) LOG.info("Restoring backup %(backup_id)s to volume %(volume_id)s.", {'backup_id': id, 'volume_id': volume_id}, context=context) try: new_restore = self.backup_api.restore(context, backup_id=id, volume_id=volume_id, name=name) # Not found exception will be handled at the wsgi level except (exception.InvalidInput, exception.InvalidVolume, exception.InvalidBackup) as error: raise exc.HTTPBadRequest(explanation=error.msg) except (exception.VolumeSizeExceedsAvailableQuota, exception.VolumeLimitExceeded) as error: raise exc.HTTPRequestEntityTooLarge( explanation=error.msg, headers={'Retry-After': '0'}) retval = self._view_builder.restore_summary( req, dict(new_restore)) return retval def export_record(self, req, id): """Export a backup.""" LOG.debug('Export record for backup %s.', id) context = req.environ['cinder.context'] try: backup_info = self.backup_api.export_record(context, id) # Not found exception will be handled at the wsgi level except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) retval = self._view_builder.export_summary( req, dict(backup_info)) LOG.debug('Exported record output: %s.', retval) return retval @wsgi.response(HTTPStatus.CREATED) @validation.schema(backup.import_record) def import_record(self, req, body): """Import a backup.""" LOG.debug('Importing record from %s.', body) context = req.environ['cinder.context'] import_data = body['backup-record'] backup_service = import_data['backup_service'] backup_url = import_data['backup_url'] LOG.debug('Importing backup using %(service)s and url %(url)s.', {'service': backup_service, 'url': backup_url}) try: new_backup = self.backup_api.import_record(context, backup_service, backup_url) except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) # Other Not found exceptions will be handled at the wsgi level except exception.ServiceNotFound as error: raise exc.HTTPServiceUnavailable(explanation=error.msg) retval = self._view_builder.summary(req, dict(new_backup)) LOG.debug('Import record output: %s.', retval) return retval class Backups(extensions.ExtensionDescriptor): """Backups support.""" name = 'Backups' alias = 'backups' updated = '2012-12-12T00:00:00+00:00' def get_resources(self): resources = [] res = extensions.ResourceExtension( Backups.alias, BackupsController(), collection_actions={'detail': 'GET', 'import_record': 'POST'}, member_actions={'restore': 'POST', 'export_record': 'GET', 'action': 'POST'}) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/capabilities.py0000664000175000017500000000514000000000000021547 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import capabilities as capabilities_view from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.policies import capabilities as policy from cinder.volume import rpcapi class CapabilitiesController(wsgi.Controller): """The Capabilities controller for the OpenStack API.""" _view_builder_class = capabilities_view.ViewBuilder def __init__(self): # FIXME(jdg): Is it kosher that this just # skips the volume.api and goes straight to RPC # from here? self.volume_api = rpcapi.VolumeAPI() super(CapabilitiesController, self).__init__() def show(self, req, id): """Return capabilities list of given backend.""" context = req.environ['cinder.context'] context.authorize(policy.CAPABILITIES_POLICY) filters = {'host_or_cluster': id, 'binary': constants.VOLUME_BINARY} services = objects.ServiceList.get_all(context, filters) if not services: msg = (_("Can't find service: %s") % id) raise exception.NotFound(msg) topic = services[0].service_topic_queue try: capabilities = self.volume_api.get_capabilities(context, topic, False) except oslo_messaging.MessagingTimeout: raise exception.RPCTimeout(service=topic) return self._view_builder.summary(req, capabilities, topic) class Capabilities(extensions.ExtensionDescriptor): """Capabilities support.""" name = "Capabilities" alias = "capabilities" updated = "2015-08-31T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( Capabilities.alias, CapabilitiesController()) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/cgsnapshots.py0000664000175000017500000001430400000000000021454 0ustar00zuulzuul00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The cgsnapshots api.""" from http import HTTPStatus from oslo_log import log as logging from oslo_log import versionutils import webob from webob import exc from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import cgsnapshots as cgsnapshot_views from cinder import exception from cinder import group as group_api from cinder.i18n import _ LOG = logging.getLogger(__name__) DEPRECATE_CGSNAP_API_MSG = ("Consistency Group Snapshot APIs are deprecated. " "Use Generic Volume Group Snapshot APIs instead.") class CgsnapshotsController(wsgi.Controller): """The cgsnapshots API controller for the OpenStack API.""" _view_builder_class = cgsnapshot_views.ViewBuilder def __init__(self): self.group_snapshot_api = group_api.API() super(CgsnapshotsController, self).__init__() def show(self, req, id): """Return data about the given cgsnapshot.""" versionutils.report_deprecated_feature(LOG, DEPRECATE_CGSNAP_API_MSG) LOG.debug('show called for member %s', id) context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level cgsnapshot = self._get_cgsnapshot(context, id) return self._view_builder.detail(req, cgsnapshot) def delete(self, req, id): """Delete a cgsnapshot.""" versionutils.report_deprecated_feature(LOG, DEPRECATE_CGSNAP_API_MSG) LOG.debug('delete called for member %s', id) context = req.environ['cinder.context'] LOG.info('Delete cgsnapshot with id: %s', id) try: cgsnapshot = self._get_cgsnapshot(context, id) self.group_snapshot_api.delete_group_snapshot(context, cgsnapshot) except exception.InvalidGroupSnapshot as e: raise exc.HTTPBadRequest(explanation=str(e)) except (exception.GroupSnapshotNotFound, exception.PolicyNotAuthorized): # Exceptions will be handled at the wsgi level raise except Exception: msg = _('Failed to delete the cgsnapshot') raise exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) def index(self, req): """Returns a summary list of cgsnapshots.""" versionutils.report_deprecated_feature(LOG, DEPRECATE_CGSNAP_API_MSG) return self._get_cgsnapshots(req, is_detail=False) def detail(self, req): """Returns a detailed list of cgsnapshots.""" versionutils.report_deprecated_feature(LOG, DEPRECATE_CGSNAP_API_MSG) return self._get_cgsnapshots(req, is_detail=True) def _get_cg(self, context, id): # Not found exception will be handled at the wsgi level consistencygroup = self.group_snapshot_api.get(context, group_id=id) return consistencygroup def _get_cgsnapshot(self, context, id): # Not found exception will be handled at the wsgi level cgsnapshot = self.group_snapshot_api.get_group_snapshot( context, group_snapshot_id=id) return cgsnapshot def _get_cgsnapshots(self, req, is_detail): """Returns a list of cgsnapshots, transformed through view builder.""" context = req.environ['cinder.context'] grp_snapshots = self.group_snapshot_api.get_all_group_snapshots( context) grpsnap_limited_list = common.limited(grp_snapshots, req) if is_detail: grp_snapshots = self._view_builder.detail_list( req, grpsnap_limited_list) else: grp_snapshots = self._view_builder.summary_list( req, grpsnap_limited_list) return grp_snapshots @wsgi.response(HTTPStatus.ACCEPTED) def create(self, req, body): """Create a new cgsnapshot.""" versionutils.report_deprecated_feature(LOG, DEPRECATE_CGSNAP_API_MSG) LOG.debug('Creating new cgsnapshot %s', body) self.assert_valid_body(body, 'cgsnapshot') context = req.environ['cinder.context'] cgsnapshot = body['cgsnapshot'] self.validate_name_and_description(cgsnapshot) try: group_id = cgsnapshot['consistencygroup_id'] except KeyError: msg = _("'consistencygroup_id' must be specified") raise exc.HTTPBadRequest(explanation=msg) # Not found exception will be handled at the wsgi level group = self._get_cg(context, group_id) name = cgsnapshot.get('name', None) description = cgsnapshot.get('description', None) LOG.info("Creating cgsnapshot %(name)s.", {'name': name}, context=context) try: new_cgsnapshot = self.group_snapshot_api.create_group_snapshot( context, group, name, description) # Not found exception will be handled at the wsgi level except (exception.InvalidGroup, exception.InvalidGroupSnapshot, exception.InvalidVolume) as error: raise exc.HTTPBadRequest(explanation=error.msg) retval = self._view_builder.summary(req, new_cgsnapshot) return retval class Cgsnapshots(extensions.ExtensionDescriptor): """cgsnapshots support.""" name = 'Cgsnapshots' alias = 'cgsnapshots' updated = '2014-08-18T00:00:00+00:00' def get_resources(self): resources = [] res = extensions.ResourceExtension( Cgsnapshots.alias, CgsnapshotsController(), collection_actions={'detail': 'GET'}) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/consistencygroups.py0000664000175000017500000003166300000000000022730 0ustar00zuulzuul00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The consistencygroups api.""" from http import HTTPStatus from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import strutils import webob from webob import exc from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import consistencygroups as consistencygroup_views from cinder import exception from cinder import group as group_api from cinder.i18n import _ from cinder.policies import group_actions as gp_action_policy from cinder.policies import groups as group_policy from cinder.volume import group_types LOG = logging.getLogger(__name__) DEPRECATE_CG_API_MSG = ("Consistency Group APIs are deprecated. " "Use Generic Volume Group APIs instead.") class ConsistencyGroupsController(wsgi.Controller): """The ConsistencyGroups API controller for the OpenStack API.""" _view_builder_class = consistencygroup_views.ViewBuilder def __init__(self): self.group_api = group_api.API() super(ConsistencyGroupsController, self).__init__() def show(self, req, id): """Return data about the given consistency group.""" versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG) LOG.debug('show called for member %s', id) context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level consistencygroup = self._get(context, id) return self._view_builder.detail(req, consistencygroup) def delete(self, req, id, body): """Delete a consistency group.""" versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG) LOG.debug('delete called for member %s', id) context = req.environ['cinder.context'] force = False if body: self.assert_valid_body(body, 'consistencygroup') cg_body = body['consistencygroup'] try: force = strutils.bool_from_string(cg_body.get('force', False), strict=True) except ValueError: msg = _("Invalid value '%s' for force.") % force raise exc.HTTPBadRequest(explanation=msg) LOG.info('Delete consistency group with id: %s', id) try: group = self._get(context, id) context.authorize(gp_action_policy.DELETE_POLICY, target_obj=group) self.group_api.delete(context, group, force) # Not found exception will be handled at the wsgi level except exception.InvalidConsistencyGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) def index(self, req): """Returns a summary list of consistency groups.""" versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG) return self._get_consistencygroups(req, is_detail=False) def detail(self, req): """Returns a detailed list of consistency groups.""" versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG) return self._get_consistencygroups(req, is_detail=True) def _get(self, context, id): # Not found exception will be handled at the wsgi level consistencygroup = self.group_api.get(context, group_id=id) return consistencygroup def _get_cgsnapshot(self, context, id): # Not found exception will be handled at the wsgi level cgsnapshot = self.group_api.get_group_snapshot( context, group_snapshot_id=id) return cgsnapshot def _get_consistencygroups(self, req, is_detail): """Returns a list of consistency groups through view builder.""" context = req.environ['cinder.context'] context.authorize(group_policy.GET_ALL_POLICY) filters = req.params.copy() # make another copy of filters, since it is being modified in # consistencygroup_api while getting consistencygroups marker, limit, offset = common.get_pagination_params(filters) sort_keys, sort_dirs = common.get_sort_params(filters) groups = self.group_api.get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) if is_detail: groups = self._view_builder.detail_list(req, groups) else: groups = self._view_builder.summary_list(req, groups) return groups @wsgi.response(HTTPStatus.ACCEPTED) def create(self, req, body): """Create a new consistency group.""" versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG) LOG.debug('Creating new consistency group %s', body) self.assert_valid_body(body, 'consistencygroup') context = req.environ['cinder.context'] context.authorize(group_policy.CREATE_POLICY) consistencygroup = body['consistencygroup'] self.validate_name_and_description(consistencygroup) name = consistencygroup.get('name', None) description = consistencygroup.get('description', None) volume_types = consistencygroup.get('volume_types', None) if not volume_types: msg = _("volume_types must be provided to create " "consistency group %(name)s.") % {'name': name} raise exc.HTTPBadRequest(explanation=msg) volume_types = volume_types.rstrip(',').split(',') availability_zone = consistencygroup.get('availability_zone', None) group_type = group_types.get_default_cgsnapshot_type() if not group_type: msg = (_('Group type %s not found. Rerun migration script to ' 'create the default cgsnapshot type.') % group_types.DEFAULT_CGSNAPSHOT_TYPE) raise exc.HTTPBadRequest(explanation=msg) LOG.info("Creating consistency group %(name)s.", {'name': name}) try: new_consistencygroup = self.group_api.create( context, name, description, group_type['id'], volume_types, availability_zone=availability_zone) except (exception.InvalidConsistencyGroup, exception.InvalidGroup, exception.InvalidVolumeType, exception.ObjectActionError) as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.NotFound: # Not found exception will be handled at the wsgi level raise retval = self._view_builder.summary(req, new_consistencygroup) return retval @wsgi.response(HTTPStatus.ACCEPTED) def create_from_src(self, req, body): """Create a new consistency group from a source. The source can be a CG snapshot or a CG. Note that this does not require volume_types as the "create" API above. """ versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG) LOG.debug('Creating new consistency group %s.', body) self.assert_valid_body(body, 'consistencygroup-from-src') context = req.environ['cinder.context'] context.authorize(group_policy.CREATE_POLICY) consistencygroup = body['consistencygroup-from-src'] self.validate_name_and_description(consistencygroup) name = consistencygroup.get('name', None) description = consistencygroup.get('description', None) cgsnapshot_id = consistencygroup.get('cgsnapshot_id', None) source_cgid = consistencygroup.get('source_cgid', None) if not cgsnapshot_id and not source_cgid: msg = _("Either 'cgsnapshot_id' or 'source_cgid' must be " "provided to create consistency group %(name)s " "from source.") % {'name': name} raise exc.HTTPBadRequest(explanation=msg) if cgsnapshot_id and source_cgid: msg = _("Cannot provide both 'cgsnapshot_id' and 'source_cgid' " "to create consistency group %(name)s from " "source.") % {'name': name} raise exc.HTTPBadRequest(explanation=msg) if cgsnapshot_id: LOG.info("Creating consistency group %(name)s from " "cgsnapshot %(snap)s.", {'name': name, 'snap': cgsnapshot_id}) elif source_cgid: LOG.info("Creating consistency group %(name)s from " "source consistency group %(source_cgid)s.", {'name': name, 'source_cgid': source_cgid}) try: if source_cgid: self._get(context, source_cgid) if cgsnapshot_id: self._get_cgsnapshot(context, cgsnapshot_id) new_group = self.group_api.create_from_src( context, name, description, cgsnapshot_id, source_cgid) except exception.NotFound: # Not found exception will be handled at the wsgi level raise except exception.CinderException as error: raise exc.HTTPBadRequest(explanation=error.msg) retval = self._view_builder.summary(req, new_group) return retval def _check_update_parameters(self, name, description, add_volumes, remove_volumes): if not (name or description or add_volumes or remove_volumes): msg = _("Name, description, add_volumes, and remove_volumes " "can not be all empty in the request body.") raise exc.HTTPBadRequest(explanation=msg) def _update(self, context, group, name, description, add_volumes, remove_volumes, allow_empty=False): LOG.info("Updating consistency group %(id)s with name %(name)s " "description: %(description)s add_volumes: " "%(add_volumes)s remove_volumes: %(remove_volumes)s.", {'id': group.id, 'name': name, 'description': description, 'add_volumes': add_volumes, 'remove_volumes': remove_volumes}) self.group_api.update(context, group, name, description, add_volumes, remove_volumes) def update(self, req, id, body): """Update the consistency group. Expected format of the input parameter 'body': .. code-block:: json { "consistencygroup": { "name": "my_cg", "description": "My consistency group", "add_volumes": "volume-uuid-1,volume-uuid-2,...", "remove_volumes": "volume-uuid-8,volume-uuid-9,..." } } """ versionutils.report_deprecated_feature(LOG, DEPRECATE_CG_API_MSG) LOG.debug('Update called for consistency group %s.', id) if not body: msg = _("Missing request body.") raise exc.HTTPBadRequest(explanation=msg) self.assert_valid_body(body, 'consistencygroup') context = req.environ['cinder.context'] group = self._get(context, id) context.authorize(group_policy.UPDATE_POLICY, target_obj=group) consistencygroup = body.get('consistencygroup', None) self.validate_name_and_description(consistencygroup) name = consistencygroup.get('name', None) description = consistencygroup.get('description', None) add_volumes = consistencygroup.get('add_volumes', None) remove_volumes = consistencygroup.get('remove_volumes', None) self._check_update_parameters(name, description, add_volumes, remove_volumes) self._update(context, group, name, description, add_volumes, remove_volumes) return webob.Response(status_int=HTTPStatus.ACCEPTED) class Consistencygroups(extensions.ExtensionDescriptor): """consistency groups support.""" name = 'Consistencygroups' alias = 'consistencygroups' updated = '2014-08-18T00:00:00+00:00' def get_resources(self): resources = [] res = extensions.ResourceExtension( Consistencygroups.alias, ConsistencyGroupsController(), collection_actions={'detail': 'GET', 'create_from_src': 'POST'}, member_actions={'delete': 'POST', 'update': 'PUT'}) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/extended_services.py0000664000175000017500000000151000000000000022616 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions class Extended_services(extensions.ExtensionDescriptor): """Extended services support.""" name = "ExtendedServices" alias = "os-extended-services" updated = "2014-01-10T00:00:00-00:00" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/extended_snapshot_attributes.py0000664000175000017500000000437300000000000025112 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Extended Snapshot Attributes API extension.""" from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.policies import snapshots as policy class ExtendedSnapshotAttributesController(wsgi.Controller): def _extend_snapshot(self, req, resp_snap): db_snap = req.get_db_snapshot(resp_snap['id']) for attr in ['project_id', 'progress']: key = "%s:%s" % (Extended_snapshot_attributes.alias, attr) resp_snap[key] = db_snap[attr] @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if context.authorize(policy.EXTEND_ATTRIBUTE, fatal=False): # Attach our slave template to the response object snapshot = resp_obj.obj['snapshot'] self._extend_snapshot(req, snapshot) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if context.authorize(policy.EXTEND_ATTRIBUTE, fatal=False): # Attach our slave template to the response object for snapshot in list(resp_obj.obj['snapshots']): self._extend_snapshot(req, snapshot) class Extended_snapshot_attributes(extensions.ExtensionDescriptor): """Extended SnapshotAttributes support.""" name = "ExtendedSnapshotAttributes" alias = "os-extended-snapshot-attributes" updated = "2012-06-19T00:00:00+00:00" def get_controller_extensions(self): controller = ExtendedSnapshotAttributesController() extension = extensions.ControllerExtension(self, 'snapshots', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/hosts.py0000664000175000017500000002040100000000000020253 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The hosts admin extension.""" from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import timeutils import webob.exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.common import constants from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.policies import hosts as policy from cinder.volume import api as volume_api CONF = cfg.CONF LOG = logging.getLogger(__name__) def _list_hosts(req, service=None): """Returns a summary list of hosts.""" curr_time = timeutils.utcnow(with_timezone=True) context = req.environ['cinder.context'] filters = {'disabled': False} services = objects.ServiceList.get_all(context, filters) zone = '' if 'zone' in req.GET: zone = req.GET['zone'] if zone: services = [s for s in services if s['availability_zone'] == zone] hosts = [] for host in services: delta = curr_time - (host.updated_at or host.created_at) alive = abs(delta.total_seconds()) <= CONF.service_down_time status = "available" if alive else "unavailable" active = 'enabled' if host.disabled: active = 'disabled' LOG.debug('status, active and update: %s, %s, %s', status, active, host.updated_at) updated_at = host.updated_at if updated_at: updated_at = timeutils.normalize_time(updated_at) hosts.append({'host_name': host.host, 'service': host.topic, 'zone': host.availability_zone, 'service-status': status, 'service-state': active, 'last-update': updated_at, }) if service: hosts = [host for host in hosts if host['service'] == service] return hosts def check_host(fn): """Makes sure that the host exists.""" def wrapped(self, req, id, service=None, *args, **kwargs): listed_hosts = _list_hosts(req, service) hosts = [h["host_name"] for h in listed_hosts] if id in hosts: return fn(self, req, id, *args, **kwargs) raise exception.HostNotFound(host=id) return wrapped class HostController(wsgi.Controller): """The Hosts API controller for the OpenStack API.""" def __init__(self): self.api = volume_api.HostAPI() super(HostController, self).__init__() versionutils.report_deprecated_feature( LOG, "The Host API is deprecated and will be " "be removed in a future version.") def index(self, req): context = req.environ['cinder.context'] context.authorize(policy.MANAGE_POLICY) return {'hosts': _list_hosts(req)} @check_host def update(self, req, id, body): context = req.environ['cinder.context'] context.authorize(policy.MANAGE_POLICY) update_values = {} for raw_key, raw_val in body.items(): key = raw_key.lower().strip() val = raw_val.lower().strip() if key == "status": if val in ("enable", "disable"): update_values['status'] = val.startswith("enable") else: explanation = _("Invalid status: '%s'") % raw_val raise webob.exc.HTTPBadRequest(explanation=explanation) else: explanation = _("Invalid update setting: '%s'") % raw_key raise webob.exc.HTTPBadRequest(explanation=explanation) update_setters = {'status': self._set_enabled_status} result = {} for key, value in update_values.items(): result.update(update_setters[key](req, id, value)) return result def _set_enabled_status(self, req, host, enabled): """Sets the specified host's ability to accept new volumes.""" context = req.environ['cinder.context'] state = "enabled" if enabled else "disabled" LOG.info("Setting host %(host)s to %(state)s.", {'host': host, 'state': state}) result = self.api.set_host_enabled(context, host=host, enabled=enabled) if result not in ("enabled", "disabled"): # An error message was returned raise webob.exc.HTTPBadRequest(explanation=result) return {"host": host, "status": result} def show(self, req, id): """Shows the volume usage info given by hosts. :param req: security context :param id: hostname :returns: dict -- the host resources dictionary. ex.:: {'host': [{'resource': D},..]} D: {'host': 'hostname','project': 'admin', 'volume_count': 1, 'total_volume_gb': 2048} """ host = id context = req.environ['cinder.context'] context.authorize(policy.MANAGE_POLICY) # Not found exception will be handled at the wsgi level host_ref = objects.Service.get_by_host_and_topic( context, host, constants.VOLUME_TOPIC) # Getting total available/used resource on a host. volume_refs = db.volume_get_all_by_host(context, host_ref.host) (count, vol_sum) = db.volume_data_get_for_host(context, host_ref.host) snap_count_total = 0 snap_sum_total = 0 resources = [{'resource': {'host': host, 'project': '(total)', 'volume_count': str(count), 'total_volume_gb': str(vol_sum), 'snapshot_count': str(snap_count_total), 'total_snapshot_gb': str(snap_sum_total)}}] project_ids = [v['project_id'] for v in volume_refs] project_ids = list(set(project_ids)) for project_id in project_ids: (count, vol_sum) = db.volume_data_get_for_project( context, project_id, host=host_ref.host) (snap_count, snap_sum) = ( objects.Snapshot.snapshot_data_get_for_project( context, project_id, host=host_ref.host)) resources.append( {'resource': {'host': host, 'project': project_id, 'volume_count': str(count), 'total_volume_gb': str(vol_sum), 'snapshot_count': str(snap_count), 'total_snapshot_gb': str(snap_sum)}}) snap_count_total += int(snap_count) snap_sum_total += int(snap_sum) resources[0]['resource']['snapshot_count'] = str(snap_count_total) resources[0]['resource']['total_snapshot_gb'] = str(snap_sum_total) return {"host": resources} class Hosts(extensions.ExtensionDescriptor): """Admin-only host administration.""" name = "Hosts" alias = "os-hosts" updated = "2011-06-29T00:00:00+00:00" def get_resources(self): resources = [extensions.ResourceExtension('os-hosts', HostController(), collection_actions={ 'update': 'PUT'}, member_actions={ 'startup': 'GET', 'shutdown': 'GET', 'reboot': 'GET'})] return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/qos_specs_manage.py0000664000175000017500000004223000000000000022426 0ustar00zuulzuul00000000000000# Copyright (c) 2013 eBay Inc. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The QoS specs extension""" from http import HTTPStatus from oslo_log import log as logging from oslo_utils import timeutils import webob from cinder.api import api_utils from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import qos_specs as qos_specs_schema from cinder.api import validation from cinder.api.views import qos_specs as view_qos_specs from cinder import exception from cinder.i18n import _ from cinder.policies import qos_specs as policy from cinder import rpc from cinder import utils from cinder.volume import qos_specs LOG = logging.getLogger(__name__) def _check_specs(context, specs_id): # Not found exception will be handled at the wsgi level qos_specs.get_qos_specs(context, specs_id) class QoSSpecsController(wsgi.Controller): """The volume type extra specs API controller for the OpenStack API.""" _view_builder_class = view_qos_specs.ViewBuilder @staticmethod @utils.if_notifications_enabled def _notify_qos_specs_error(context, method, payload): rpc.get_notifier('QoSSpecs').error(context, method, payload) def index(self, req): """Returns the list of qos_specs.""" context = req.environ['cinder.context'] context.authorize(policy.GET_ALL_POLICY) params = req.params.copy() marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) filters = params allowed_search_options = ('id', 'name', 'consumer') api_utils.remove_invalid_filter_options(context, filters, allowed_search_options) specs = qos_specs.get_all_specs(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return self._view_builder.summary_list(req, specs) @validation.schema(qos_specs_schema.create) def create(self, req, body=None): context = req.environ['cinder.context'] context.authorize(policy.CREATE_POLICY) specs = body['qos_specs'] name = specs.pop('name', None) name = name.strip() try: spec = qos_specs.create(context, name, specs) notifier_info = dict(name=name, created_at=spec.created_at, specs=specs) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.create', notifier_info) except exception.InvalidQoSSpecs as err: notifier_err = dict(name=name, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.create', notifier_err) raise webob.exc.HTTPBadRequest(explanation=str(err)) except exception.QoSSpecsExists as err: notifier_err = dict(name=name, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.create', notifier_err) raise webob.exc.HTTPConflict(explanation=str(err)) except exception.QoSSpecsCreateFailed as err: notifier_err = dict(name=name, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.create', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=str(err)) return self._view_builder.detail(req, spec) @validation.schema(qos_specs_schema.set) def update(self, req, id, body=None): context = req.environ['cinder.context'] context.authorize(policy.UPDATE_POLICY) specs = body['qos_specs'] try: spec = qos_specs.get_qos_specs(context, id) qos_specs.update(context, id, specs) notifier_info = dict(id=id, created_at=spec.created_at, updated_at=timeutils.utcnow(), specs=specs) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.update', notifier_info) except (exception.QoSSpecsNotFound, exception.InvalidQoSSpecs) as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.update', notifier_err) # Not found exception will be handled at the wsgi level raise except exception.QoSSpecsUpdateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.update', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=str(err)) return body def show(self, req, id): """Return a single qos spec item.""" context = req.environ['cinder.context'] context.authorize(policy.GET_POLICY) # Not found exception will be handled at the wsgi level spec = qos_specs.get_qos_specs(context, id) return self._view_builder.detail(req, spec) def delete(self, req, id): """Deletes an existing qos specs.""" context = req.environ['cinder.context'] context.authorize(policy.DELETE_POLICY) # Convert string to bool type in strict manner force = utils.get_bool_param('force', req.params) LOG.debug("Delete qos_spec: %(id)s, force: %(force)s", {'id': id, 'force': force}) try: spec = qos_specs.get_qos_specs(context, id) qos_specs.delete(context, id, force) notifier_info = dict(id=id, created_at=spec.created_at, deleted_at=timeutils.utcnow()) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.delete', notifier_info) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) # Not found exception will be handled at the wsgi level raise except exception.QoSSpecsInUse as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) if force: msg = _('Failed to disassociate qos specs.') raise webob.exc.HTTPInternalServerError(explanation=msg) msg = _('Qos specs still in use.') raise webob.exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) @validation.schema(qos_specs_schema.unset) def delete_keys(self, req, id, body): """Deletes specified keys in qos specs.""" context = req.environ['cinder.context'] context.authorize(policy.DELETE_POLICY) keys = body['keys'] LOG.debug("Delete_key spec: %(id)s, keys: %(keys)s", {'id': id, 'keys': keys}) try: qos_specs.delete_keys(context, id, keys) spec = qos_specs.get_qos_specs(context, id) notifier_info = dict(id=id, created_at=spec.created_at, updated_at=spec.updated_at) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.delete_keys', notifier_info) except exception.NotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.delete_keys', notifier_err) # Not found exception will be handled at the wsgi level raise return webob.Response(status_int=HTTPStatus.ACCEPTED) def associations(self, req, id): """List all associations of given qos specs.""" context = req.environ['cinder.context'] context.authorize(policy.GET_ALL_POLICY) LOG.debug("Get associations for qos_spec id: %s", id) try: spec = qos_specs.get_qos_specs(context, id) associates = qos_specs.get_associations(context, id) notifier_info = dict(id=id, created_at=spec.created_at) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.associations', notifier_info) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associations', notifier_err) # Not found exception will be handled at the wsgi level raise except exception.CinderException as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associations', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=str(err)) return self._view_builder.associations(req, associates) def associate(self, req, id): """Associate a qos specs with a volume type.""" context = req.environ['cinder.context'] context.authorize(policy.UPDATE_POLICY) type_id = req.params.get('vol_type_id', None) if not type_id: msg = _('Volume Type id must not be None.') notifier_err = dict(id=id, error_message=msg) self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) raise webob.exc.HTTPBadRequest(explanation=msg) LOG.debug("Associate qos_spec: %(id)s with type: %(type_id)s", {'id': id, 'type_id': type_id}) try: spec = qos_specs.get_qos_specs(context, id) qos_specs.associate_qos_with_type(context, id, type_id) notifier_info = dict(id=id, type_id=type_id, created_at=spec.created_at) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.associate', notifier_info) except exception.NotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associate', notifier_err) # Not found exception will be handled at the wsgi level raise except exception.InvalidVolumeType as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associate', notifier_err) self._notify_qos_specs_error(context, 'qos_specs.associate', notifier_err) raise webob.exc.HTTPBadRequest(explanation=str(err)) except exception.QoSSpecsAssociateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.associate', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=str(err)) return webob.Response(status_int=HTTPStatus.ACCEPTED) def disassociate(self, req, id): """Disassociate a qos specs from a volume type.""" context = req.environ['cinder.context'] context.authorize(policy.UPDATE_POLICY) type_id = req.params.get('vol_type_id', None) if not type_id: msg = _('Volume Type id must not be None.') notifier_err = dict(id=id, error_message=msg) self._notify_qos_specs_error(context, 'qos_specs.delete', notifier_err) raise webob.exc.HTTPBadRequest(explanation=msg) LOG.debug("Disassociate qos_spec: %(id)s from type: %(type_id)s", {'id': id, 'type_id': type_id}) try: spec = qos_specs.get_qos_specs(context, id) qos_specs.disassociate_qos_specs(context, id, type_id) notifier_info = dict(id=id, type_id=type_id, created_at=spec.created_at) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.disassociate', notifier_info) except exception.NotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.disassociate', notifier_err) # Not found exception will be handled at the wsgi level raise except exception.QoSSpecsDisassociateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.disassociate', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=str(err)) return webob.Response(status_int=HTTPStatus.ACCEPTED) def disassociate_all(self, req, id): """Disassociate a qos specs from all volume types.""" context = req.environ['cinder.context'] context.authorize(policy.UPDATE_POLICY) LOG.debug("Disassociate qos_spec: %s from all.", id) try: spec = qos_specs.get_qos_specs(context, id) qos_specs.disassociate_all(context, id) notifier_info = dict(id=id, created_at=spec.created_at) rpc.get_notifier('QoSSpecs').info(context, 'qos_specs.disassociate_all', notifier_info) except exception.QoSSpecsNotFound as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.disassociate_all', notifier_err) # Not found exception will be handled at the wsgi level raise except exception.QoSSpecsDisassociateFailed as err: notifier_err = dict(id=id, error_message=err) self._notify_qos_specs_error(context, 'qos_specs.disassociate_all', notifier_err) raise webob.exc.HTTPInternalServerError( explanation=str(err)) return webob.Response(status_int=HTTPStatus.ACCEPTED) class Qos_specs_manage(extensions.ExtensionDescriptor): """QoS specs support.""" name = "Qos_specs_manage" alias = "qos-specs" updated = "2013-08-02T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( Qos_specs_manage.alias, QoSSpecsController(), member_actions={"associations": "GET", "associate": "GET", "disassociate": "GET", "disassociate_all": "GET", "delete_keys": "PUT"}) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/quota_classes.py0000664000175000017500000000612700000000000021772 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import quota_classes as quota_class from cinder.api import validation from cinder import db from cinder import exception from cinder.policies import quota_class as policy from cinder import quota QUOTAS = quota.QUOTAS GROUP_QUOTAS = quota.GROUP_QUOTAS class QuotaClassSetsController(wsgi.Controller): def _format_quota_set(self, quota_class, quota_set): """Convert the quota object to a result dict.""" quota_set['id'] = str(quota_class) return dict(quota_class_set=quota_set) def show(self, req, id): context = req.environ['cinder.context'] context.authorize(policy.GET_POLICY) try: db.sqlalchemy.api.authorize_quota_class_context(context, id) except exception.NotAuthorized: raise webob.exc.HTTPForbidden() quota_set = QUOTAS.get_class_quotas(context, id) group_quota_set = GROUP_QUOTAS.get_class_quotas(context, id) quota_set.update(group_quota_set) return self._format_quota_set(id, quota_set) @validation.schema(quota_class.update_quota_class) def update(self, req, id, body): context = req.environ['cinder.context'] context.authorize(policy.UPDATE_POLICY) self.validate_string_length(id, 'quota_class_name', min_length=1, max_length=255) quota_class = id for key, value in body['quota_class_set'].items(): try: db.quota_class_update(context, quota_class, key, value) except exception.QuotaClassNotFound: db.quota_class_create(context, quota_class, key, value) except exception.AdminRequired: raise webob.exc.HTTPForbidden() quota_set = QUOTAS.get_class_quotas(context, quota_class) group_quota_set = GROUP_QUOTAS.get_class_quotas(context, quota_class) quota_set.update(group_quota_set) return {'quota_class_set': quota_set} class Quota_classes(extensions.ExtensionDescriptor): """Quota classes management support.""" name = "QuotaClasses" alias = "os-quota-class-sets" updated = "2012-03-12T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('os-quota-class-sets', QuotaClassSetsController()) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/quotas.py0000664000175000017500000001550200000000000020435 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import quotas from cinder.api import validation from cinder import db from cinder import exception from cinder.i18n import _ from cinder.policies import quotas as policy from cinder import quota from cinder import utils QUOTAS = quota.QUOTAS GROUP_QUOTAS = quota.GROUP_QUOTAS NON_QUOTA_KEYS = quota.NON_QUOTA_KEYS class QuotaSetsController(wsgi.Controller): def _format_quota_set(self, project_id, quota_set): """Convert the quota object to a result dict.""" quota_set['id'] = str(project_id) return dict(quota_set=quota_set) def _validate_existing_resource(self, key, value, quota_values): # -1 limit will always be greater than the existing value if key == 'per_volume_gigabytes' or value == -1: return v = quota_values.get(key, {}) used = (v.get('in_use', 0) + v.get('reserved', 0)) if value < used: msg = (_("Quota %(key)s limit must be equal or greater than " "existing resources. Current usage is %(usage)s " "and the requested limit is %(limit)s.") % {'key': key, 'usage': used, 'limit': value}) raise webob.exc.HTTPBadRequest(explanation=msg) def _get_quotas(self, context, id, usages=False): values = QUOTAS.get_project_quotas(context, id, usages=usages) group_values = GROUP_QUOTAS.get_project_quotas(context, id, usages=usages) values.update(group_values) if usages: return values else: return {k: v['limit'] for k, v in values.items()} def show(self, req, id): """Show quota for a particular tenant :param req: request :param id: target project id that needs to be shown """ context = req.environ['cinder.context'] params = req.params target_project_id = id context.authorize(policy.SHOW_POLICY, target={'project_id': target_project_id}) if not hasattr(params, '__call__') and 'usage' in params: usage = utils.get_bool_param('usage', params) else: usage = False quotas = self._get_quotas(context, target_project_id, usage) return self._format_quota_set(target_project_id, quotas) @validation.schema(quotas.update_quota) def update(self, req, id, body): """Update Quota for a particular tenant :param req: request :param id: target project id that needs to be updated :param body: key, value pair that will be applied to the resources if the update succeeds """ context = req.environ['cinder.context'] target_project_id = id context.authorize(policy.UPDATE_POLICY, target={'project_id': target_project_id}) self.validate_string_length(id, 'quota_set_name', min_length=1, max_length=255) # NOTE(ankit): Pass #1 - In this loop for body['quota_set'].keys(), # we validate the quota limits to ensure that we can bail out if # any of the items in the set is bad. Meanwhile we validate value # to ensure that the value can't be lower than number of existing # resources. quota_values = QUOTAS.get_project_quotas(context, target_project_id, defaults=False) group_quota_values = GROUP_QUOTAS.get_project_quotas(context, target_project_id, defaults=False) quota_values.update(group_quota_values) valid_quotas = {} reservations = [] for key in body['quota_set'].keys(): if key in NON_QUOTA_KEYS: continue self._validate_existing_resource(key, body['quota_set'][key], quota_values) valid_quotas[key] = body['quota_set'][key] # NOTE(ankit): Pass #2 - At this point we know that all the keys and # values are valid and we can iterate and update them all in one shot # without having to worry about rolling back etc as we have done # the validation up front in the 2 loops above. for key, value in valid_quotas.items(): try: db.quota_update(context, target_project_id, key, value) except exception.ProjectQuotaNotFound: db.quota_create(context, target_project_id, key, value) except exception.AdminRequired: raise webob.exc.HTTPForbidden() if reservations: db.reservation_commit(context, reservations) return {'quota_set': self._get_quotas(context, target_project_id)} def _get_quota_usage(self, quota_obj): return (quota_obj.get('in_use', 0) + quota_obj.get('reserved', 0)) def defaults(self, req, id): context = req.environ['cinder.context'] context.authorize(policy.SHOW_POLICY, target={'project_id': id}) defaults = QUOTAS.get_defaults(context, project_id=id) group_defaults = GROUP_QUOTAS.get_defaults(context, project_id=id) defaults.update(group_defaults) return self._format_quota_set(id, defaults) def delete(self, req, id): """Delete Quota for a particular tenant. :param req: request :param id: target project id that needs to be deleted """ context = req.environ['cinder.context'] context.authorize(policy.DELETE_POLICY, target={'project_id': id}) db.quota_destroy_by_project(context, id) class Quotas(extensions.ExtensionDescriptor): """Quota management support.""" name = "Quotas" alias = "os-quota-sets" updated = "2011-08-08T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( 'os-quota-sets', QuotaSetsController(), member_actions={'defaults': 'GET'}) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/resource_common_manage.py0000664000175000017500000000503200000000000023625 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Stratoscale, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging as messaging from cinder.api import common from cinder.api import microversions as mv from cinder import exception from cinder.i18n import _ def get_manageable_resources(req, is_detail, function_get_manageable, view_builder): context = req.environ['cinder.context'] params = req.params.copy() cluster_name, host = common.get_cluster_host( req, params, mv.MANAGE_EXISTING_CLUSTER) marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params, default_key='reference') # These parameters are generally validated at the DB layer, but in this # case sorting is not done by the DB valid_sort_keys = ('reference', 'size') invalid_keys = [key for key in sort_keys if key not in valid_sort_keys] if invalid_keys: msg = _("Invalid sort keys passed: %s") % ', '.join(invalid_keys) raise exception.InvalidParameterValue(err=msg) valid_sort_dirs = ('asc', 'desc') invalid_dirs = [d for d in sort_dirs if d not in valid_sort_dirs] if invalid_dirs: msg = _("Invalid sort dirs passed: %s") % ', '.join(invalid_dirs) raise exception.InvalidParameterValue(err=msg) try: resources = function_get_manageable(context, host, cluster_name, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) except messaging.RemoteError as err: if err.exc_type == "InvalidInput": raise exception.InvalidInput(err.value) raise resource_count = len(resources) if is_detail: resources = view_builder.detail_list(req, resources, resource_count) else: resources = view_builder.summary_list(req, resources, resource_count) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/scheduler_hints.py0000664000175000017500000000335400000000000022306 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api.schemas import scheduler_hints from cinder.api import validation def create(req, body): attr = 'OS-SCH-HNT:scheduler_hints' if body.get(attr) is not None: scheduler_hints_body = dict.fromkeys((attr,), body.get(attr)) @validation.schema(scheduler_hints.create) def _validate_scheduler_hints(req=None, body=None): # TODO(pooja_jadhav): The scheduler hints schema validation # should be moved to v3 volume schema directly and this module # should be deleted at the time of deletion of v2 version code. pass _validate_scheduler_hints(req=req, body=scheduler_hints_body) body['volume']['scheduler_hints'] = scheduler_hints_body.get(attr) return body # NOTE: This class is added to include "OS-SCH-HNT" in the list extensions # response and "OS-SCH-HNT" is still not loaded as a standard extension. class Scheduler_hints(extensions.ExtensionDescriptor): """Pass arbitrary key/value pairs to the scheduler.""" name = "SchedulerHints" alias = "OS-SCH-HNT" updated = "2013-04-18T00:00:00+00:00" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/scheduler_stats.py0000664000175000017500000000531600000000000022317 0ustar00zuulzuul00000000000000# Copyright (c) 2014 eBay Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Scheduler Stats extension""" from cinder.api import common from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.views import scheduler_stats as scheduler_stats_view from cinder.policies import scheduler_stats as policy from cinder.scheduler import rpcapi from cinder import utils class SchedulerStatsController(wsgi.Controller): """The Scheduler Stats controller for the OpenStack API.""" _view_builder_class = scheduler_stats_view.ViewBuilder def __init__(self): self.scheduler_api = rpcapi.SchedulerAPI() super(SchedulerStatsController, self).__init__() @common.process_general_filtering('pool') def _process_pool_filtering(self, context=None, filters=None, req_version=None): if not req_version.matches(mv.POOL_FILTER): filters.clear() def get_pools(self, req): """List all active pools in scheduler.""" context = req.environ['cinder.context'] context.authorize(policy.GET_POOL_POLICY) detail = utils.get_bool_param('detail', req.params) req_version = req.api_version_request filters = req.params.copy() filters.pop('detail', None) self._process_pool_filtering(context=context, filters=filters, req_version=req_version) if not req_version.matches(mv.POOL_TYPE_FILTER): filters.pop('volume_type', None) pools = self.scheduler_api.get_pools(context, filters=filters) return self._view_builder.pools(req, pools, detail) class Scheduler_stats(extensions.ExtensionDescriptor): """Scheduler stats support.""" name = "Scheduler_stats" alias = "scheduler-stats" updated = "2014-09-07T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( Scheduler_stats.alias, SchedulerStatsController(), collection_actions={"get_pools": "GET"}) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/services.py0000664000175000017500000003047200000000000020747 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils import webob.exc from cinder.api import common from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import services as os_services from cinder.api import validation from cinder.backup import rpcapi as backup_rpcapi from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.policies import services as policy from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import utils from cinder import volume from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) class ServiceController(wsgi.Controller): def __init__(self, ext_mgr=None): self.ext_mgr = ext_mgr super(ServiceController, self).__init__() self.volume_api = volume.API() self.rpc_apis = { constants.SCHEDULER_BINARY: scheduler_rpcapi.SchedulerAPI(), constants.VOLUME_BINARY: volume_rpcapi.VolumeAPI(), constants.BACKUP_BINARY: backup_rpcapi.BackupAPI(), } def index(self, req): """Return a list of all running services. Filter by host & service name. """ context = req.environ['cinder.context'] context.authorize(policy.GET_ALL_POLICY) detailed = self.ext_mgr.is_loaded('os-extended-services') now = timeutils.utcnow(with_timezone=True) filters = {} if 'host' in req.GET: filters['host'] = req.GET['host'] if 'binary' in req.GET: filters['binary'] = req.GET['binary'] services = objects.ServiceList.get_all(context, filters) # Get backend state from scheduler if req.api_version_request.matches(mv.BACKEND_STATE_REPORT): backend_state_map = {} scheduler_api = self.rpc_apis[constants.SCHEDULER_BINARY] pools = scheduler_api.get_pools(context) for pool in pools: backend_name = volume_utils.extract_host(pool.get("name")) back_state = pool.get('capabilities', {}).get('backend_state', 'up') backend_state_map[backend_name] = back_state svcs = [] for svc in services: updated_at = svc.updated_at delta = now - (svc.updated_at or svc.created_at) delta_sec = delta.total_seconds() if svc.modified_at: delta_mod = now - svc.modified_at if abs(delta_sec) >= abs(delta_mod.total_seconds()): updated_at = svc.modified_at alive = abs(delta_sec) <= CONF.service_down_time art = "up" if alive else "down" active = 'enabled' if svc.disabled: active = 'disabled' if updated_at: updated_at = timeutils.normalize_time(updated_at) ret_fields = {'binary': svc.binary, 'host': svc.host, 'zone': svc.availability_zone, 'status': active, 'state': art, 'updated_at': updated_at} if (req.api_version_request.matches(mv.BACKEND_STATE_REPORT) and svc.binary == constants.VOLUME_BINARY): ret_fields['backend_state'] = backend_state_map.get(svc.host) # On CLUSTER_SUPPORT we added cluster support if req.api_version_request.matches(mv.CLUSTER_SUPPORT): ret_fields['cluster'] = svc.cluster_name if detailed: ret_fields['disabled_reason'] = svc.disabled_reason if svc.binary == constants.VOLUME_BINARY: ret_fields['replication_status'] = svc.replication_status ret_fields['active_backend_id'] = svc.active_backend_id ret_fields['frozen'] = svc.frozen svcs.append(ret_fields) return {'services': svcs} def _volume_api_proxy(self, fun, *args): try: return fun(*args) except exception.ServiceNotFound as ex: raise exception.InvalidInput(ex.msg) @validation.schema(os_services.freeze_and_thaw) def _freeze(self, req, context, body): cluster_name, host = common.get_cluster_host( req, body, mv.REPLICATION_CLUSTER) return self._volume_api_proxy(self.volume_api.freeze_host, context, host, cluster_name) @validation.schema(os_services.freeze_and_thaw) def _thaw(self, req, context, body): cluster_name, host = common.get_cluster_host( req, body, mv.REPLICATION_CLUSTER) return self._volume_api_proxy(self.volume_api.thaw_host, context, host, cluster_name) @validation.schema(os_services.failover_host) def _failover(self, req, context, clustered, body): # We set version to None to always get the cluster name from the body, # to False when we don't want to get it, and REPLICATION_CLUSTER when # we only want it if the requested version is REPLICATION_CLUSTER or # higher. version = mv.REPLICATION_CLUSTER if clustered else False cluster_name, host = common.get_cluster_host(req, body, version) self._volume_api_proxy(self.volume_api.failover, context, host, cluster_name, body.get('backend_id')) return webob.Response(status_int=HTTPStatus.ACCEPTED) def _log_params_binaries_services(self, context, body): """Get binaries and services referred by given log set/get request.""" query_filters = {'is_up': True} binary = body.get('binary') binaries = [] if binary in ('*', None, ''): binaries = constants.LOG_BINARIES elif binary == constants.API_BINARY: return [binary], [] elif binary in constants.LOG_BINARIES: binaries = [binary] query_filters['binary'] = binary server = body.get('server') if server: query_filters['host_or_cluster'] = server services = objects.ServiceList.get_all(context, filters=query_filters) return binaries, services @validation.schema(os_services.set_log) def _set_log(self, req, context, body): """Set log levels of services dynamically.""" prefix = body.get('prefix') level = body.get('level') binaries, services = self._log_params_binaries_services(context, body) log_req = objects.LogLevel(context, prefix=prefix, level=level) if constants.API_BINARY in binaries: utils.set_log_levels(prefix, level) for service in services: self.rpc_apis[service.binary].set_log_levels(context, service, log_req) return webob.Response(status_int=HTTPStatus.ACCEPTED) @validation.schema(os_services.get_log) def _get_log(self, req, context, body): """Get current log levels for services.""" prefix = body.get('prefix') binaries, services = self._log_params_binaries_services(context, body) result = [] log_req = objects.LogLevel(context, prefix=prefix) # Avoid showing constants if 'server' is set. server_filter = body.get('server') if not server_filter or server_filter == CONF.host: if constants.API_BINARY in binaries: levels = utils.get_log_levels(prefix) result.append({'host': CONF.host, 'binary': constants.API_BINARY, 'levels': levels}) for service in services: levels = self.rpc_apis[service.binary].get_log_levels(context, service, log_req) result.append({'host': service.host, 'binary': service.binary, 'levels': {le.prefix: le.level for le in levels}}) return {'log_levels': result} @validation.schema(os_services.disable_log_reason) def _disabled_log_reason(self, req, body): reason = body.get('disabled_reason') disabled = True status = "disabled" return reason, disabled, status @validation.schema(os_services.enable_and_disable) def _enable(self, req, body): disabled = False status = "enabled" return disabled, status @validation.schema(os_services.enable_and_disable) def _disable(self, req, body): disabled = True status = "disabled" return disabled, status def update(self, req, id, body): """Enable/Disable scheduling for a service. Includes Freeze/Thaw which sends call down to drivers and allows volume.manager for the specified host to disable the service rather than accessing the service directly in this API layer. """ context = req.environ['cinder.context'] context.authorize(policy.UPDATE_POLICY) support_dynamic_log = req.api_version_request.matches(mv.LOG_LEVEL) ext_loaded = self.ext_mgr.is_loaded('os-extended-services') ret_val = {} if id == "enable": disabled, status = self._enable(req, body=body) elif id == "disable": disabled, status = self._disable(req, body=body) elif id == "disable-log-reason" and ext_loaded: disabled_reason, disabled, status = ( self._disabled_log_reason(req, body=body)) ret_val['disabled_reason'] = disabled_reason elif id == "freeze": return self._freeze(req, context, body=body) elif id == "thaw": return self._thaw(req, context, body=body) elif id == "failover_host": return self._failover(req, context, False, body=body) elif (req.api_version_request.matches(mv.REPLICATION_CLUSTER) and id == 'failover'): return self._failover(req, context, True, body=body) elif support_dynamic_log and id == 'set-log': return self._set_log(req, context, body=body) elif support_dynamic_log and id == 'get-log': return self._get_log(req, context, body=body) else: raise exception.InvalidInput(reason=_("Unknown action")) host = common.get_cluster_host(req, body, False)[1] ret_val['disabled'] = disabled # NOTE(uni): deprecating service request key, binary takes precedence # Still keeping service key here for API compatibility sake. service = body.get('service', '') binary = body.get('binary', '') binary_key = binary or service # Not found exception will be handled at the wsgi level svc = objects.Service.get_by_args(context, host, binary_key) svc.disabled = ret_val['disabled'] if 'disabled_reason' in ret_val: svc.disabled_reason = ret_val['disabled_reason'] svc.save() ret_val.update({'host': host, 'service': service, 'binary': binary, 'status': status}) return ret_val class Services(extensions.ExtensionDescriptor): """Services support.""" name = "Services" alias = "os-services" updated = "2012-10-28T00:00:00-00:00" def get_resources(self): resources = [] controller = ServiceController(self.ext_mgr) resource = extensions.ResourceExtension('os-services', controller) resources.append(resource) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/snapshot_actions.py0000664000175000017500000000766500000000000022513 0ustar00zuulzuul00000000000000# Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_log import log as logging import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import snapshot_actions from cinder.api import validation from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.policies import snapshot_actions as policy LOG = logging.getLogger(__name__) class SnapshotActionsController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SnapshotActionsController, self).__init__(*args, **kwargs) LOG.debug("SnapshotActionsController initialized") @wsgi.action('os-update_snapshot_status') @validation.schema(snapshot_actions.update_snapshot_status) def _update_snapshot_status(self, req, id, body): """Update database fields related to status of a snapshot. Intended for creation of snapshots, so snapshot state must start as 'creating' and be changed to 'available', 'creating', or 'error'. """ context = req.environ['cinder.context'] LOG.debug("body: %s", body) status = body['os-update_snapshot_status']['status'] # Allowed state transitions status_map = {fields.SnapshotStatus.CREATING: [fields.SnapshotStatus.CREATING, fields.SnapshotStatus.AVAILABLE, fields.SnapshotStatus.ERROR], fields.SnapshotStatus.DELETING: [fields.SnapshotStatus.DELETING, fields.SnapshotStatus.ERROR_DELETING]} current_snapshot = objects.Snapshot.get_by_id(context, id) context.authorize(policy.UPDATE_STATUS_POLICY, target_obj=current_snapshot) if current_snapshot.status not in status_map: msg = _("Snapshot status %(cur)s not allowed for " "update_snapshot_status") % { 'cur': current_snapshot.status} raise webob.exc.HTTPBadRequest(explanation=msg) if status not in status_map[current_snapshot.status]: msg = _("Provided snapshot status %(provided)s not allowed for " "snapshot with status %(current)s.") % \ {'provided': status, 'current': current_snapshot.status} raise webob.exc.HTTPBadRequest(explanation=msg) update_dict = {'id': id, 'status': status} progress = body['os-update_snapshot_status'].get('progress', None) if progress: update_dict.update({'progress': progress}) LOG.info("Updating snapshot %(id)s with info %(dict)s", {'id': id, 'dict': update_dict}) current_snapshot.update(update_dict) current_snapshot.save() return webob.Response(status_int=HTTPStatus.ACCEPTED) class Snapshot_actions(extensions.ExtensionDescriptor): """Enable snapshot manager actions.""" name = "SnapshotActions" alias = "os-snapshot-actions" updated = "2013-07-16T00:00:00+00:00" def get_controller_extensions(self): controller = SnapshotActionsController() extension = extensions.ControllerExtension(self, 'snapshots', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/snapshot_manage.py0000664000175000017500000001264400000000000022274 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_log import log as logging from cinder.api.contrib import resource_common_manage from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import snapshot_manage from cinder.api import validation from cinder.api.views import manageable_snapshots as list_manageable_view from cinder.api.views import snapshots as snapshot_views from cinder.policies import manageable_snapshots as policy from cinder import volume as cinder_volume LOG = logging.getLogger(__name__) class SnapshotManageController(wsgi.Controller): """The /os-snapshot-manage controller for the OpenStack API.""" _view_builder_class = snapshot_views.ViewBuilder def __init__(self, *args, **kwargs): super(SnapshotManageController, self).__init__(*args, **kwargs) self.volume_api = cinder_volume.API() self._list_manageable_view = list_manageable_view.ViewBuilder() @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(snapshot_manage.create) def create(self, req, body): """Instruct Cinder to manage a storage snapshot object. Manages an existing backend storage snapshot object (e.g. a Linux logical volume or a SAN disk) by creating the Cinder objects required to manage it, and possibly renaming the backend storage snapshot object (driver dependent). From an API perspective, this operation behaves very much like a snapshot creation operation. Required HTTP Body: .. code-block:: json { "snapshot": { "volume_id": "", "ref": "" } } See the appropriate Cinder drivers' implementations of the manage_snapshot method to find out the accepted format of 'ref'. For example,in LVM driver, it will be the logic volume name of snapshot which you want to manage. This API call will return with an error if any of the above elements are missing from the request, or if the 'volume_id' element refers to a cinder volume that could not be found. The snapshot will later enter the error state if it is discovered that 'ref' is bad. Optional elements to 'snapshot' are:: name A name for the new snapshot. description A description for the new snapshot. metadata Key/value pairs to be associated with the new snapshot. """ context = req.environ['cinder.context'] snapshot = body['snapshot'] # Check whether volume exists volume_id = snapshot['volume_id'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, volume_id) context.authorize(policy.MANAGE_POLICY, target_obj=volume) LOG.debug('Manage snapshot request body: %s', body) snapshot_parameters = {} snapshot_parameters['metadata'] = snapshot.get('metadata', None) snapshot_parameters['description'] = snapshot.get('description', None) snapshot_parameters['name'] = snapshot.get('name') # Not found exception will be handled at the wsgi level new_snapshot = self.volume_api.manage_existing_snapshot( context, snapshot['ref'], volume, **snapshot_parameters) return self._view_builder.detail(req, new_snapshot) @wsgi.extends def index(self, req): """Returns a summary list of snapshots available to manage.""" context = req.environ['cinder.context'] context.authorize(policy.LIST_MANAGEABLE_POLICY) return resource_common_manage.get_manageable_resources( req, False, self.volume_api.get_manageable_snapshots, self._list_manageable_view) @wsgi.extends def detail(self, req): """Returns a detailed list of snapshots available to manage.""" context = req.environ['cinder.context'] context.authorize(policy.LIST_MANAGEABLE_POLICY) return resource_common_manage.get_manageable_resources( req, True, self.volume_api.get_manageable_snapshots, self._list_manageable_view) class Snapshot_manage(extensions.ExtensionDescriptor): """Allows existing backend storage to be 'managed' by Cinder.""" name = 'SnapshotManage' alias = 'os-snapshot-manage' updated = '2014-12-31T00:00:00+00:00' def get_resources(self): controller = SnapshotManageController() return [extensions.ResourceExtension(Snapshot_manage.alias, controller, collection_actions= {'detail': 'GET'})] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/snapshot_unmanage.py0000664000175000017500000000535200000000000022635 0ustar00zuulzuul00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_log import log as logging import webob from webob import exc from cinder.api import extensions from cinder.api.openstack import wsgi from cinder import exception from cinder.policies import manageable_snapshots as policy from cinder import volume LOG = logging.getLogger(__name__) class SnapshotUnmanageController(wsgi.Controller): def __init__(self, *args, **kwargs): super(SnapshotUnmanageController, self).__init__(*args, **kwargs) self.volume_api = volume.API() @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-unmanage') def unmanage(self, req, id, body): """Stop managing a snapshot. This action is very much like a delete, except that a different method (unmanage) is called on the Cinder driver. This has the effect of removing the snapshot from Cinder management without actually removing the backend storage object associated with it. There are no required parameters. A Not Found error is returned if the specified snapshot does not exist. """ context = req.environ['cinder.context'] LOG.info("Unmanage snapshot with id: %s", id) try: snapshot = self.volume_api.get_snapshot(context, id) context.authorize(policy.UNMANAGE_POLICY, target_obj=snapshot) self.volume_api.delete_snapshot(context, snapshot, unmanage_only=True) # Not found exception will be handled at the wsgi level except exception.InvalidSnapshot as ex: raise exc.HTTPBadRequest(explanation=ex.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) class Snapshot_unmanage(extensions.ExtensionDescriptor): """Enable volume unmanage operation.""" name = "SnapshotUnmanage" alias = "os-snapshot-unmanage" updated = "2014-12-31T00:00:00+00:00" def get_controller_extensions(self): controller = SnapshotUnmanageController() extension = extensions.ControllerExtension(self, 'snapshots', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/types_extra_specs.py0000664000175000017500000002007600000000000022667 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume types extra specs extension""" from http import HTTPStatus import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import types_extra_specs from cinder.api import validation from cinder import context as ctxt from cinder import db from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder.policies import type_extra_specs as policy from cinder import rpc from cinder.volume import volume_types class VolumeTypeExtraSpecsController(wsgi.Controller): """The volume type extra specs API controller for the OpenStack API.""" def _get_extra_specs(self, context, type_id): extra_specs = db.volume_type_extra_specs_get(context, type_id) if context.authorize(policy.READ_SENSITIVE_POLICY, fatal=False): specs_dict = extra_specs else: # Limit the response to contain only user visible specs. specs_dict = {} for uv_spec in policy.USER_VISIBLE_EXTRA_SPECS: if uv_spec in extra_specs: specs_dict[uv_spec] = extra_specs[uv_spec] return dict(extra_specs=specs_dict) def _check_type(self, context, type_id): # Not found exception will be handled at the wsgi level volume_types.get_volume_type(context, type_id) def index(self, req, type_id): """Returns the list of extra specs for a given volume type.""" context = req.environ['cinder.context'] context.authorize(policy.GET_ALL_POLICY) self._check_type(context, type_id) return self._get_extra_specs(context, type_id) def _allow_update(self, context, type_id): vols = db.volume_get_all( ctxt.get_admin_context(), limit=1, filters={'volume_type_id': type_id}) if len(vols): expl = _('Volume Type is currently in use.') raise webob.exc.HTTPBadRequest(explanation=expl) return def _check_cacheable(self, specs, type_id): extra_specs = volume_types.get_volume_type_extra_specs(type_id) multiattach = extra_specs.get('multiattach') cacheable = extra_specs.get('cacheable') isTrue = ' True' if (specs.get('multiattach') == isTrue and cacheable == isTrue) or ( specs.get('cacheable') == isTrue and multiattach == isTrue) or (specs.get('cacheable') == isTrue and specs.get('multiattach') == isTrue): expl = _('cacheable cannot be set with multiattach.') raise webob.exc.HTTPBadRequest(explanation=expl) return @validation.schema(types_extra_specs.create) def create(self, req, type_id, body): context = req.environ['cinder.context'] context.authorize(policy.CREATE_POLICY) self._allow_update(context, type_id) self._check_type(context, type_id) specs = body['extra_specs'] if 'image_service:store_id' in specs: image_service_store_id = specs['image_service:store_id'] image_utils.validate_stores_id(context, image_service_store_id) # Check if multiattach be set with cacheable self._check_cacheable(specs, type_id) db.volume_type_extra_specs_update_or_create(context, type_id, specs) # Get created_at and updated_at for notification volume_type = volume_types.get_volume_type(context, type_id) notifier_info = dict(type_id=type_id, specs=specs, created_at=volume_type['created_at'], updated_at=volume_type['updated_at']) notifier = rpc.get_notifier('volumeTypeExtraSpecs') notifier.info(context, 'volume_type_extra_specs.create', notifier_info) return body @validation.schema(types_extra_specs.update) def update(self, req, type_id, id, body): context = req.environ['cinder.context'] context.authorize(policy.UPDATE_POLICY) self._allow_update(context, type_id) self._check_type(context, type_id) if id not in body: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) if 'image_service:store_id' in body: image_service_store_id = body['image_service:store_id'] image_utils.validate_stores_id(context, image_service_store_id) if 'extra_specs' in body: specs = body['extra_specs'] # Check if multiattach be set with cacheable self._check_cacheable(specs, type_id) db.volume_type_extra_specs_update_or_create(context, type_id, body) # Get created_at and updated_at for notification volume_type = volume_types.get_volume_type(context, type_id) notifier_info = dict(type_id=type_id, id=id, created_at=volume_type['created_at'], updated_at=volume_type['updated_at']) notifier = rpc.get_notifier('volumeTypeExtraSpecs') notifier.info(context, 'volume_type_extra_specs.update', notifier_info) return body def show(self, req, type_id, id): """Return a single extra spec item.""" context = req.environ['cinder.context'] context.authorize(policy.GET_POLICY) self._check_type(context, type_id) specs = self._get_extra_specs(context, type_id) if id in specs['extra_specs']: return {id: specs['extra_specs'][id]} else: raise exception.VolumeTypeExtraSpecsNotFound( volume_type_id=type_id, extra_specs_key=id) @wsgi.response(HTTPStatus.ACCEPTED) def delete(self, req, type_id, id): """Deletes an existing extra spec.""" context = req.environ['cinder.context'] self._check_type(context, type_id) context.authorize(policy.DELETE_POLICY) self._allow_update(context, type_id) # Not found exception will be handled at the wsgi level db.volume_type_extra_specs_delete(context, type_id, id) # Get created_at and updated_at for notification volume_type = volume_types.get_volume_type(context, type_id) notifier_info = dict(type_id=type_id, id=id, created_at=volume_type['created_at'], updated_at=volume_type['updated_at'], deleted_at=volume_type['deleted_at']) notifier = rpc.get_notifier('volumeTypeExtraSpecs') notifier.info(context, 'volume_type_extra_specs.delete', notifier_info) class Types_extra_specs(extensions.ExtensionDescriptor): """Type extra specs support.""" name = "TypesExtraSpecs" alias = "os-types-extra-specs" updated = "2011-08-24T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension('extra_specs', VolumeTypeExtraSpecsController(), parent=dict(member_name='type', collection_name='types') ) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/types_manage.py0000664000175000017500000001712500000000000021600 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume types manage extension.""" from http import HTTPStatus from oslo_utils import strutils import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import volume_types as volume_types_schema from cinder.api import validation from cinder.api.views import types as views_types from cinder import exception from cinder.i18n import _ from cinder.policies import volume_type as policy from cinder import rpc from cinder import utils from cinder.volume import volume_types class VolumeTypesManageController(wsgi.Controller): """The volume types API controller for the OpenStack API.""" _view_builder_class = views_types.ViewBuilder @utils.if_notifications_enabled def _notify_volume_type_error(self, context, method, err, volume_type=None, id=None, name=None): payload = dict( volume_types=volume_type, name=name, id=id, error_message=err) rpc.get_notifier('volumeType').error(context, method, payload) @utils.if_notifications_enabled def _notify_volume_type_info(self, context, method, volume_type): payload = dict(volume_types=volume_type) rpc.get_notifier('volumeType').info(context, method, payload) @wsgi.action("create") @validation.schema(volume_types_schema.create) def _create(self, req, body): """Creates a new volume type.""" context = req.environ['cinder.context'] context.authorize(policy.CREATE_POLICY) vol_type = body['volume_type'] name = vol_type['name'] description = vol_type.get('description') specs = vol_type.get('extra_specs', {}) is_public = vol_type.get('os-volume-type-access:is_public', True) is_public = strutils.bool_from_string(is_public, strict=True) try: volume_types.create(context, name, specs, is_public, description=description) vol_type = volume_types.get_volume_type_by_name(context, name) req.cache_resource(vol_type, name='types') self._notify_volume_type_info( context, 'volume_type.create', vol_type) except exception.VolumeTypeExists as err: self._notify_volume_type_error( context, 'volume_type.create', err, volume_type=vol_type) raise webob.exc.HTTPConflict(explanation=str(err)) except exception.VolumeTypeNotFoundByName as err: self._notify_volume_type_error( context, 'volume_type.create', err, name=name) # Not found exception will be handled at the wsgi level raise return self._view_builder.show(req, vol_type) @wsgi.action("update") @validation.schema(volume_types_schema.update) def _update(self, req, id, body): # Update description for a given volume type. context = req.environ['cinder.context'] context.authorize(policy.UPDATE_POLICY) vol_type = body['volume_type'] description = vol_type.get('description') name = vol_type.get('name') is_public = vol_type.get('is_public') if is_public is not None: is_public = strutils.bool_from_string(is_public, strict=True) # If name specified, name can not be empty. if name and len(name.strip()) == 0: msg = _("Volume type name can not be empty.") raise webob.exc.HTTPBadRequest(explanation=msg) # Name, description and is_public can not be None. # Specify one of them, or a combination thereof. if name is None and description is None and is_public is None: msg = _("Specify volume type name, description, is_public or " "a combination thereof.") raise webob.exc.HTTPBadRequest(explanation=msg) try: volume_types.update(context, id, name, description, is_public=is_public) # Get the updated vol_type = volume_types.get_volume_type(context, id) req.cache_resource(vol_type, name='types') self._notify_volume_type_info( context, 'volume_type.update', vol_type) except exception.VolumeTypeNotFound as err: self._notify_volume_type_error( context, 'volume_type.update', err, id=id) # Not found exception will be handled at the wsgi level raise except exception.VolumeTypeExists as err: self._notify_volume_type_error( context, 'volume_type.update', err, volume_type=vol_type) raise webob.exc.HTTPConflict(explanation=str(err)) except exception.VolumeTypeUpdateFailed as err: self._notify_volume_type_error( context, 'volume_type.update', err, volume_type=vol_type) raise webob.exc.HTTPInternalServerError( explanation=str(err)) return self._view_builder.show(req, vol_type) @wsgi.action("delete") def _delete(self, req, id): """Deletes an existing volume type.""" context = req.environ['cinder.context'] context.authorize(policy.DELETE_POLICY) try: vol_type = volume_types.get_volume_type(context, id) volume_types.destroy(context, vol_type['id']) self._notify_volume_type_info( context, 'volume_type.delete', vol_type) except exception.VolumeTypeInUse as err: self._notify_volume_type_error( context, 'volume_type.delete', err, volume_type=vol_type) msg = _('Target volume type is still in use.') raise webob.exc.HTTPBadRequest(explanation=msg) except exception.VolumeTypeNotFound as err: self._notify_volume_type_error( context, 'volume_type.delete', err, id=id) # Not found exception will be handled at the wsgi level raise except (exception.VolumeTypeDeletionError, exception.VolumeTypeDefaultDeletionError) as err: self._notify_volume_type_error( context, 'volume_type.delete', err, volume_type=vol_type) raise webob.exc.HTTPBadRequest(explanation=err.msg) except exception.VolumeTypeDefaultMisconfiguredError as err: self._notify_volume_type_error( context, 'volume_type.delete', err, volume_type=vol_type) raise webob.exc.HTTPInternalServerError(explanation=err.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) class Types_manage(extensions.ExtensionDescriptor): """Types manage support.""" name = "TypesManage" alias = "os-types-manage" updated = "2011-08-24T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeTypesManageController() extension = extensions.ControllerExtension(self, 'types', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/used_limits.py0000664000175000017500000000527600000000000021451 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.policies import limits as policy from cinder import quota QUOTAS = quota.QUOTAS class UsedLimitsController(wsgi.Controller): @wsgi.extends def index(self, req, resp_obj): context = req.environ['cinder.context'] if context.authorize( policy.EXTEND_LIMIT_ATTRIBUTE_POLICY, fatal=False): params = req.params.copy() req_version = req.api_version_request # TODO(wangxiyuan): Support "tenant_id" here to keep the backwards # compatibility. Remove it once we drop all support for "tenant". if (req_version.matches(None, mv.GROUP_REPLICATION) or not context.is_admin): params.pop('project_id', None) params.pop('tenant_id', None) project_id = params.get( 'project_id', params.get('tenant_id', context.project_id)) quotas = QUOTAS.get_project_quotas(context, project_id, usages=True) quota_map = { 'totalVolumesUsed': 'volumes', 'totalGigabytesUsed': 'gigabytes', 'totalSnapshotsUsed': 'snapshots', 'totalBackupsUsed': 'backups', 'totalBackupGigabytesUsed': 'backup_gigabytes' } used_limits = {} for display_name, single_quota in quota_map.items(): if single_quota in quotas: used_limits[display_name] = quotas[single_quota]['in_use'] resp_obj.obj['limits']['absolute'].update(used_limits) class Used_limits(extensions.ExtensionDescriptor): """Provide data on limited resources that are being used.""" name = "UsedLimits" alias = 'os-used-limits' updated = "2013-10-03T00:00:00+00:00" def get_controller_extensions(self): controller = UsedLimitsController() extension = extensions.ControllerExtension(self, 'limits', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_actions.py0000664000175000017500000004220300000000000022146 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from castellan import key_manager from oslo_config import cfg import oslo_messaging as messaging from oslo_utils import strutils import webob from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import volume_actions as volume_action from cinder.api import validation from cinder import exception from cinder.i18n import _ from cinder.image import glance from cinder.policies import volume_actions as policy from cinder import volume from cinder.volume import volume_utils CONF = cfg.CONF class VolumeActionsController(wsgi.Controller): def __init__(self, *args, **kwargs): super(VolumeActionsController, self).__init__(*args, **kwargs) self._key_mgr = None self.volume_api = volume.API() @property def _key_manager(self): # Allows for lazy initialization of the key manager if self._key_mgr is None: self._key_mgr = key_manager.API(CONF) return self._key_mgr @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-attach') @validation.schema(volume_action.attach) def _attach(self, req, id, body): """Add attachment metadata.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) # instance UUID is an option now instance_uuid = None if 'instance_uuid' in body['os-attach']: instance_uuid = body['os-attach']['instance_uuid'] host_name = None # Keep API backward compatibility if 'host_name' in body['os-attach']: host_name = body['os-attach']['host_name'] mountpoint = body['os-attach']['mountpoint'] mode = body['os-attach'].get('mode', 'rw') try: self.volume_api.attach(context, volume, instance_uuid, host_name, mountpoint, mode) except messaging.RemoteError as error: if error.exc_type in ['InvalidVolume', 'InvalidUUID', 'InvalidVolumeAttachMode']: msg = _("Error attaching volume - %(err_type)s: " "%(err_msg)s") % { 'err_type': error.exc_type, 'err_msg': error.value} raise webob.exc.HTTPBadRequest(explanation=msg) else: # There are also few cases where attach call could fail due to # db or volume driver errors. These errors shouldn't be exposed # to the user and in such cases it should raise 500 error. raise @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-detach') @validation.schema(volume_action.detach) def _detach(self, req, id, body): """Clear attachment metadata.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) attachment_id = None attachment_id = body['os-detach'].get('attachment_id', None) try: self.volume_api.detach(context, volume, attachment_id) except messaging.RemoteError as error: if error.exc_type in ['VolumeAttachmentNotFound', 'InvalidVolume']: msg = _("Error detaching volume - %(err_type)s: " "%(err_msg)s") % { 'err_type': error.exc_type, 'err_msg': error.value} raise webob.exc.HTTPBadRequest(explanation=msg) else: # There are also few cases where detach call could fail due to # db or volume driver errors. These errors shouldn't be exposed # to the user and in such cases it should raise 500 error. raise @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-reserve') def _reserve(self, req, id, body): """Mark volume as reserved.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) self.volume_api.reserve_volume(context, volume) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-unreserve') def _unreserve(self, req, id, body): """Unmark volume as reserved.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) self.volume_api.unreserve_volume(context, volume) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-begin_detaching') def _begin_detaching(self, req, id, body): """Update volume status to 'detaching'.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) self.volume_api.begin_detaching(context, volume) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-roll_detaching') def _roll_detaching(self, req, id, body): """Roll back volume status to 'in-use'.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) self.volume_api.roll_detaching(context, volume) @wsgi.action('os-initialize_connection') @validation.schema(volume_action.initialize_connection) def _initialize_connection(self, req, id, body): """Initialize volume attachment.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) connector = body['os-initialize_connection']['connector'] try: info = self.volume_api.initialize_connection(context, volume, connector) except exception.InvalidInput as err: raise webob.exc.HTTPBadRequest( explanation=err.msg) except exception.VolumeBackendAPIException: msg = _("Unable to fetch connection information from backend.") raise webob.exc.HTTPInternalServerError(explanation=msg) except messaging.RemoteError as error: if error.exc_type == 'InvalidInput': raise exception.InvalidInput(reason=error.value) raise info['enforce_multipath'] = connector.get('enforce_multipath', False) return {'connection_info': info} @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-terminate_connection') @validation.schema(volume_action.terminate_connection) def _terminate_connection(self, req, id, body): """Terminate volume attachment.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) connector = body['os-terminate_connection']['connector'] try: self.volume_api.terminate_connection(context, volume, connector) except exception.VolumeBackendAPIException: msg = _("Unable to terminate volume connection from backend.") raise webob.exc.HTTPInternalServerError(explanation=msg) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-volume_upload_image') @validation.schema(volume_action.volume_upload_image, mv.BASE_VERSION, mv.get_prior_version(mv.UPLOAD_IMAGE_PARAMS)) @validation.schema(volume_action.volume_upload_image_v31, mv.UPLOAD_IMAGE_PARAMS) def _volume_upload_image(self, req, id, body): """Uploads the specified volume to image service.""" context = req.environ['cinder.context'] params = body['os-volume_upload_image'] req_version = req.api_version_request force = params.get('force', 'False') force = strutils.bool_from_string(force, strict=True) # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) context.authorize(policy.UPLOAD_IMAGE_POLICY) disk_format = params.get("disk_format", "raw") image_metadata = {"container_format": params.get( "container_format", "bare"), "disk_format": disk_format, "name": params["image_name"]} if volume.encryption_key_id: # encrypted volumes cannot be converted on upload if (image_metadata['disk_format'] != 'raw' or image_metadata['container_format'] != 'bare'): msg = _("An encrypted volume uploaded as an image must use " "'raw' disk_format and 'bare' container_format, " "which are the defaults for these options.") raise webob.exc.HTTPBadRequest(explanation=msg) # Clone volume encryption key: the current key cannot # be reused because it will be deleted when the volume is # deleted. encryption_key_id = volume_utils.clone_encryption_key( context, self._key_manager, volume.encryption_key_id) image_metadata['cinder_encryption_key_id'] = encryption_key_id image_metadata['cinder_encryption_key_deletion_policy'] = \ 'on_image_deletion' if req_version >= mv.get_api_version( mv.UPLOAD_IMAGE_PARAMS): image_metadata['visibility'] = params.get('visibility', 'private') image_metadata['protected'] = strutils.bool_from_string( params.get('protected', 'False'), strict=True) if image_metadata['visibility'] == 'public': context.authorize(policy.UPLOAD_PUBLIC_POLICY) try: response = self.volume_api.copy_volume_to_image(context, volume, image_metadata, force) except exception.InvalidVolume as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) except ValueError as error: raise webob.exc.HTTPBadRequest(explanation=str(error)) except messaging.RemoteError as error: msg = "%(err_type)s: %(err_msg)s" % {'err_type': error.exc_type, 'err_msg': error.value} raise webob.exc.HTTPBadRequest(explanation=msg) except Exception as error: raise webob.exc.HTTPBadRequest(explanation=str(error)) return {'os-volume_upload_image': response} @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-extend') @validation.schema(volume_action.extend) def _extend(self, req, id, body): """Extend size of volume.""" context = req.environ['cinder.context'] req_version = req.api_version_request # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) size = int(body['os-extend']['new_size']) try: if (req_version.matches(mv.VOLUME_EXTEND_INUSE) and volume.status in ['in-use']): self.volume_api.extend_attached_volume(context, volume, size) else: self.volume_api.extend(context, volume, size) except exception.InvalidVolume as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-update_readonly_flag') @validation.schema(volume_action.volume_readonly_update) def _volume_readonly_update(self, req, id, body): """Update volume readonly flag.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) readonly_flag = body['os-update_readonly_flag']['readonly'] readonly_flag = strutils.bool_from_string(readonly_flag, strict=True) self.volume_api.update_readonly_flag(context, volume, readonly_flag) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-retype') @validation.schema(volume_action.retype) def _retype(self, req, id, body): """Change type of existing volume.""" context = req.environ['cinder.context'] volume = self.volume_api.get(context, id) new_type = body['os-retype']['new_type'] policy = body['os-retype'].get('migration_policy') self.volume_api.retype(context, volume, new_type, policy) @wsgi.response(HTTPStatus.OK) @wsgi.action('os-set_bootable') @validation.schema(volume_action.set_bootable) def _set_bootable(self, req, id, body): """Update bootable status of a volume.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) bootable = strutils.bool_from_string( body['os-set_bootable']['bootable'], strict=True) update_dict = {'bootable': bootable} self.volume_api.update(context, volume, update_dict) def _get_image_snapshot_and_check_size(self, context, image_uuid, volume_size): image_snapshot = None if image_uuid: image_service = glance.get_default_image_service() image_meta = image_service.show(context, image_uuid) if image_meta is not None: bdms = image_meta.get('properties', {}).get( 'block_device_mapping', []) if bdms: boot_bdm = [bdm for bdm in bdms if ( bdm.get('source_type') == 'snapshot' and bdm.get('boot_index') == 0)] if boot_bdm: try: # validate size image_snap_size = boot_bdm[0].get('volume_size') if image_snap_size > volume_size: msg = (_( "Volume size must be greater than the " "image size. (Image: %(img_size)s, " "Volume: %(vol_size)s).") % { 'img_size': image_snap_size, 'vol_size': volume_size}) raise webob.exc.HTTPBadRequest(explanation=msg) image_snapshot = self.volume_api.get_snapshot( context, boot_bdm[0].get('snapshot_id')) except exception.NotFound: explanation = _( 'Nova specific image is found, but boot ' 'volume snapshot id:%s not found.' ) % boot_bdm[0].get('snapshot_id') raise webob.exc.HTTPNotFound( explanation=explanation) return image_snapshot @wsgi.Controller.api_version(mv.SUPPORT_REIMAGE_VOLUME) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-reimage') @validation.schema(volume_action.reimage, mv.SUPPORT_REIMAGE_VOLUME) def _reimage(self, req, id, body): """Re-image a volume with specific image.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) params = body['os-reimage'] reimage_reserved = params.get('reimage_reserved', 'False') reimage_reserved = strutils.bool_from_string(reimage_reserved, strict=True) image_id = params['image_id'] image_snap = self._get_image_snapshot_and_check_size( context, image_id, volume.size) try: self.volume_api.reimage(context, volume, image_id, reimage_reserved, image_snap) except exception.InvalidVolume as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) class Volume_actions(extensions.ExtensionDescriptor): """Enable volume actions.""" name = "VolumeActions" alias = "os-volume-actions" updated = "2012-05-31T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeActionsController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_encryption_metadata.py0000664000175000017500000000422200000000000024537 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume encryption metadata extension.""" from cinder.api import extensions from cinder.api.openstack import wsgi from cinder import db from cinder import objects from cinder.policies import volumes as policy class VolumeEncryptionMetadataController(wsgi.Controller): """The volume encryption metadata API extension.""" def index(self, req, volume_id): """Returns the encryption metadata for a given volume.""" context = req.environ['cinder.context'] volume = objects.Volume.get_by_id(context, volume_id) context.authorize(policy.ENCRYPTION_METADATA_POLICY, target_obj=volume) return db.volume_encryption_metadata_get(context, volume_id) def show(self, req, volume_id, id): """Return a single encryption item.""" encryption_item = self.index(req, volume_id) if encryption_item is not None: return encryption_item[id] else: return None class Volume_encryption_metadata(extensions.ExtensionDescriptor): """Volume encryption metadata retrieval support.""" name = "VolumeEncryptionMetadata" alias = "os-volume-encryption-metadata" updated = "2013-07-10T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( 'encryption', VolumeEncryptionMetadataController(), parent=dict(member_name='volume', collection_name='volumes')) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_host_attribute.py0000664000175000017500000000370300000000000023550 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.policies import volumes as policy class VolumeHostAttributeController(wsgi.Controller): def _add_volume_host_attribute(self, req, resp_volume): db_volume = req.get_db_volume(resp_volume['id']) key = "%s:host" % Volume_host_attribute.alias resp_volume[key] = db_volume['host'] @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if context.authorize(policy.HOST_ATTRIBUTE_POLICY, fatal=False): volume = resp_obj.obj['volume'] self._add_volume_host_attribute(req, volume) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if context.authorize(policy.HOST_ATTRIBUTE_POLICY, fatal=False): for vol in list(resp_obj.obj['volumes']): self._add_volume_host_attribute(req, vol) class Volume_host_attribute(extensions.ExtensionDescriptor): """Expose host as an attribute of a volume.""" name = "VolumeHostAttribute" alias = "os-vol-host-attr" updated = "2011-11-03T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeHostAttributeController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_image_metadata.py0000664000175000017500000001474600000000000023443 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Volume Image Metadata API extension.""" from http import HTTPStatus from oslo_log import log as logging import webob from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import volume_image_metadata from cinder.api import validation from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.policies import volume_metadata as policy from cinder import volume LOG = logging.getLogger(__name__) class VolumeImageMetadataController(wsgi.Controller): def __init__(self, *args, **kwargs): super(VolumeImageMetadataController, self).__init__(*args, **kwargs) self.volume_api = volume.API() def _get_image_metadata(self, context, volume_id): # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, volume_id) meta = self.volume_api.get_volume_image_metadata(context, volume) return (volume, meta) def _add_image_metadata(self, context, resp_volume_list, image_metas=None): """Appends the image metadata to each of the given volume. :param context: the request context :param resp_volume_list: the response volume list :param image_metas: The image metadata to append, if None is provided it will be retrieved from the database. An empty dict means there is no metadata and it should not be retrieved from the db. """ vol_id_list = [] for vol in resp_volume_list: vol_id_list.append(vol['id']) if image_metas is None: try: image_metas = self.volume_api.get_list_volumes_image_metadata( context, vol_id_list) except Exception as e: LOG.debug('Get image metadata error: %s', e) return if image_metas: for vol in resp_volume_list: image_meta = image_metas.get(vol['id']) if image_meta: vol['volume_image_metadata'] = dict(image_meta) @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if context.authorize(policy.IMAGE_METADATA_SHOW_POLICY, fatal=False): self._add_image_metadata(context, [resp_obj.obj['volume']]) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if context.authorize(policy.IMAGE_METADATA_SHOW_POLICY, fatal=False): # Just get the image metadata of those volumes in response. volumes = list(resp_obj.obj.get('volumes', [])) if volumes: self._add_image_metadata(context, volumes) @wsgi.action("os-set_image_metadata") @validation.schema(volume_image_metadata.set_image_metadata) def create(self, req, id, body): context = req.environ['cinder.context'] volume = objects.Volume.get_by_id(context, id) if context.authorize(policy.IMAGE_METADATA_SET_POLICY, target_obj=volume): metadata = body['os-set_image_metadata']['metadata'] new_metadata = self._update_volume_image_metadata(context, id, metadata, delete=False) return {'metadata': new_metadata} def _update_volume_image_metadata(self, context, volume_id, metadata, delete=False): try: volume = self.volume_api.get(context, volume_id) return self.volume_api.update_volume_metadata( context, volume, metadata, delete=False, meta_type=common.METADATA_TYPES.image) # Not found exception will be handled at the wsgi level except (ValueError, AttributeError): msg = _("Malformed request body.") raise webob.exc.HTTPBadRequest(explanation=msg) except exception.InvalidVolumeMetadata as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeMetadataSize as error: raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) @wsgi.action("os-show_image_metadata") def index(self, req, id, body): context = req.environ['cinder.context'] return {'metadata': self._get_image_metadata(context, id)[1]} @wsgi.action("os-unset_image_metadata") @validation.schema(volume_image_metadata.unset_image_metadata) def delete(self, req, id, body): """Deletes an existing image metadata.""" context = req.environ['cinder.context'] volume = objects.Volume.get_by_id(context, id) if context.authorize(policy.IMAGE_METADATA_REMOVE_POLICY, target_obj=volume): key = body['os-unset_image_metadata']['key'] vol, metadata = self._get_image_metadata(context, id) if key not in metadata: raise exception.GlanceMetadataNotFound(id=id) self.volume_api.delete_volume_metadata( context, vol, key, meta_type=common.METADATA_TYPES.image) return webob.Response(status_int=HTTPStatus.OK) class Volume_image_metadata(extensions.ExtensionDescriptor): """Show image metadata associated with the volume.""" name = "VolumeImageMetadata" alias = "os-vol-image-meta" updated = "2012-12-07T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeImageMetadataController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_manage.py0000664000175000017500000001736600000000000021752 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_log import log as logging from oslo_utils import strutils import webob from cinder.api import api_utils from cinder.api import common from cinder.api.contrib import resource_common_manage from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import volume_manage from cinder.api.v2.views import volumes as volume_views from cinder.api import validation from cinder.api.views import manageable_volumes as list_manageable_view from cinder import exception from cinder.i18n import _ from cinder.policies import manageable_volumes as policy from cinder import volume as cinder_volume from cinder.volume import volume_types LOG = logging.getLogger(__name__) class VolumeManageController(wsgi.Controller): """The /os-volume-manage controller for the OpenStack API.""" _view_builder_class = volume_views.ViewBuilder def __init__(self, *args, **kwargs): super(VolumeManageController, self).__init__(*args, **kwargs) self.volume_api = cinder_volume.API() self._list_manageable_view = list_manageable_view.ViewBuilder() @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(volume_manage.volume_manage_create, mv.BASE_VERSION, mv.get_prior_version(mv.VOLUME_MIGRATE_CLUSTER)) @validation.schema(volume_manage.volume_manage_create_v316, mv.VOLUME_MIGRATE_CLUSTER) def create(self, req, body): """Instruct Cinder to manage a storage object. Manages an existing backend storage object (e.g. a Linux logical volume or a SAN disk) by creating the Cinder objects required to manage it, and possibly renaming the backend storage object (driver dependent) From an API perspective, this operation behaves very much like a volume creation operation, except that properties such as image, snapshot and volume references don't make sense, because we are taking an existing storage object into Cinder management. Required HTTP Body: .. code-block:: json { "volume": { "host": "", "cluster": "", "ref": "" } } See the appropriate Cinder drivers' implementations of the manage_volume method to find out the accepted format of 'ref'. This API call will return with an error if any of the above elements are missing from the request, or if the 'host' element refers to a cinder host that is not registered. The volume will later enter the error state if it is discovered that 'ref' is bad. Optional elements to 'volume' are:: name A name for the new volume. description A description for the new volume. volume_type ID or name of a volume type to associate with the new Cinder volume. Does not necessarily guarantee that the managed volume will have the properties described in the volume_type. The driver may choose to fail if it identifies that the specified volume_type is not compatible with the backend storage object. metadata Key/value pairs to be associated with the new volume. availability_zone The availability zone to associate with the new volume. bootable If set to True, marks the volume as bootable. """ context = req.environ['cinder.context'] context.authorize(policy.MANAGE_POLICY) volume = body['volume'] cluster_name, host = common.get_cluster_host( req, volume, mv.VOLUME_MIGRATE_CLUSTER) LOG.debug('Manage volume request body: %s', body) kwargs = {} req_volume_type = volume.get('volume_type', None) if req_volume_type: try: kwargs['volume_type'] = volume_types.get_by_name_or_id( context, req_volume_type) except exception.VolumeTypeNotFound: msg = _("Cannot find requested '%s' " "volume type") % req_volume_type raise exception.InvalidVolumeType(reason=msg) else: kwargs['volume_type'] = {} if volume.get('name'): kwargs['name'] = volume.get('name').strip() if volume.get('description'): kwargs['description'] = volume.get('description').strip() kwargs['metadata'] = volume.get('metadata', None) kwargs['availability_zone'] = volume.get('availability_zone', None) bootable = volume.get('bootable', False) kwargs['bootable'] = strutils.bool_from_string(bootable, strict=True) try: new_volume = self.volume_api.manage_existing(context, host, cluster_name, volume['ref'], **kwargs) except exception.ServiceNotFound: msg = _("%(name)s '%(value)s' not found") % { 'name': 'Host' if host else 'Cluster', 'value': host or cluster_name} raise exception.ServiceUnavailable(message=msg) except exception.VolumeTypeDefaultMisconfiguredError as err: raise webob.exc.HTTPInternalServerError(explanation=err.msg) api_utils.add_visible_admin_metadata(new_volume) return self._view_builder.detail(req, new_volume) @wsgi.extends def index(self, req): """Returns a summary list of volumes available to manage.""" context = req.environ['cinder.context'] context.authorize(policy.LIST_MANAGEABLE_POLICY) return resource_common_manage.get_manageable_resources( req, False, self.volume_api.get_manageable_volumes, self._list_manageable_view) @wsgi.extends def detail(self, req): """Returns a detailed list of volumes available to manage.""" context = req.environ['cinder.context'] context.authorize(policy.LIST_MANAGEABLE_POLICY) return resource_common_manage.get_manageable_resources( req, True, self.volume_api.get_manageable_volumes, self._list_manageable_view) class Volume_manage(extensions.ExtensionDescriptor): """Allows existing backend storage to be 'managed' by Cinder.""" name = 'VolumeManage' alias = 'os-volume-manage' updated = '2014-02-10T00:00:00+00:00' def get_resources(self): controller = VolumeManageController() res = extensions.ResourceExtension(Volume_manage.alias, controller, collection_actions= {'detail': 'GET'}) return [res] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_mig_status_attribute.py0000664000175000017500000000413000000000000024745 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.policies import volumes as policy class VolumeMigStatusAttributeController(wsgi.Controller): def _add_volume_mig_status_attribute(self, req, resp_volume): db_volume = req.get_db_volume(resp_volume['id']) key = "%s:migstat" % Volume_mig_status_attribute.alias resp_volume[key] = db_volume['migration_status'] key = "%s:name_id" % Volume_mig_status_attribute.alias resp_volume[key] = db_volume['_name_id'] @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if context.authorize(policy.MIG_ATTRIBUTE_POLICY, fatal=False): self._add_volume_mig_status_attribute(req, resp_obj.obj['volume']) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if context.authorize(policy.MIG_ATTRIBUTE_POLICY, fatal=False): for vol in list(resp_obj.obj['volumes']): self._add_volume_mig_status_attribute(req, vol) class Volume_mig_status_attribute(extensions.ExtensionDescriptor): """Expose migration_status as an attribute of a volume.""" name = "VolumeMigStatusAttribute" alias = "os-vol-mig-status-attr" updated = "2013-08-08T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeMigStatusAttributeController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_tenant_attribute.py0000664000175000017500000000376700000000000024076 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.policies import volumes as policy class VolumeTenantAttributeController(wsgi.Controller): def _add_volume_tenant_attribute(self, req, resp_volume): db_volume = req.get_db_volume(resp_volume['id']) key = "%s:tenant_id" % Volume_tenant_attribute.alias resp_volume[key] = db_volume['project_id'] @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if context.authorize(policy.TENANT_ATTRIBUTE_POLICY, fatal=False): volume = resp_obj.obj['volume'] self._add_volume_tenant_attribute(req, volume) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if context.authorize(policy.TENANT_ATTRIBUTE_POLICY, fatal=False): for vol in list(resp_obj.obj['volumes']): self._add_volume_tenant_attribute(req, vol) class Volume_tenant_attribute(extensions.ExtensionDescriptor): """Expose the internal project_id as an attribute of a volume.""" name = "VolumeTenantAttribute" alias = "os-vol-tenant-attr" updated = "2011-11-03T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeTenantAttributeController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_transfer.py0000664000175000017500000001365500000000000022343 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_log import log as logging import webob from webob import exc from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import volume_transfer from cinder.api import validation from cinder.api.views import transfers as transfer_view from cinder import exception from cinder import transfer as transferAPI LOG = logging.getLogger(__name__) class VolumeTransferController(wsgi.Controller): """The Volume Transfer API controller for the OpenStack API.""" _view_builder_class = transfer_view.ViewBuilder def __init__(self): self.transfer_api = transferAPI.API() super(VolumeTransferController, self).__init__() def show(self, req, id): """Return data about active transfers.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level transfer = self.transfer_api.get(context, transfer_id=id) return self._view_builder.detail(req, transfer) def index(self, req): """Returns a summary list of transfers.""" return self._get_transfers(req, is_detail=False) def detail(self, req): """Returns a detailed list of transfers.""" return self._get_transfers(req, is_detail=True) def _get_transfers(self, req, is_detail): """Returns a list of transfers, transformed through view builder.""" context = req.environ['cinder.context'] filters = req.params.copy() LOG.debug('Listing volume transfers') if 'name' in filters: filters['display_name'] = filters.pop('name') transfers = self.transfer_api.get_all(context, filters=filters, sort_keys=['created_at', 'id'], sort_dirs=['asc', 'asc']) transfer_count = len(transfers) limited_list = common.limited(transfers, req) if is_detail: transfers = self._view_builder.detail_list(req, limited_list, transfer_count) else: transfers = self._view_builder.summary_list(req, limited_list, transfer_count) return transfers @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(volume_transfer.create) def create(self, req, body): """Create a new volume transfer.""" LOG.debug('Creating new volume transfer %s', body) context = req.environ['cinder.context'] transfer = body['transfer'] volume_id = transfer['volume_id'] name = transfer.get('name', None) if name is not None: name = name.strip() LOG.info("Creating transfer of volume %s", volume_id) try: new_transfer = self.transfer_api.create(context, volume_id, name, no_snapshots=False) # Not found exception will be handled at the wsgi level except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) transfer = self._view_builder.create(req, dict(new_transfer)) return transfer @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(volume_transfer.accept) def accept(self, req, id, body): """Accept a new volume transfer.""" transfer_id = id LOG.debug('Accepting volume transfer %s', transfer_id) context = req.environ['cinder.context'] accept = body['accept'] auth_key = accept['auth_key'] LOG.info("Accepting transfer %s", transfer_id) try: accepted_transfer = self.transfer_api.accept(context, transfer_id, auth_key) except exception.VolumeSizeExceedsAvailableQuota as error: raise exc.HTTPRequestEntityTooLarge( explanation=error.msg, headers={'Retry-After': '0'}) except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) transfer = \ self._view_builder.summary(req, dict(accepted_transfer)) return transfer def delete(self, req, id): """Delete a transfer.""" context = req.environ['cinder.context'] LOG.info("Delete transfer with id: %s", id) # Not found exception will be handled at the wsgi level self.transfer_api.delete(context, transfer_id=id) return webob.Response(status_int=HTTPStatus.ACCEPTED) class Volume_transfer(extensions.ExtensionDescriptor): """Volume transfer management support.""" name = "VolumeTransfer" alias = "os-volume-transfer" updated = "2013-05-29T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension(Volume_transfer.alias, VolumeTransferController(), collection_actions={'detail': 'GET'}, member_actions={'accept': 'POST'}) resources.append(res) return resources ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_type_access.py0000664000175000017500000001310700000000000023011 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume type access extension.""" from http import HTTPStatus import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import volume_type_access from cinder.api import validation from cinder import exception from cinder.i18n import _ from cinder.policies import volume_access as policy from cinder.volume import volume_types def _marshall_volume_type_access(vol_type): rval = [] for project_id in vol_type['projects']: rval.append({'volume_type_id': vol_type['id'], 'project_id': project_id}) return {'volume_type_access': rval} class VolumeTypeAccessController(object): """The volume type access API controller for the OpenStack API.""" def index(self, req, type_id): context = req.environ['cinder.context'] context.authorize(policy.TYPE_ACCESS_WHO_POLICY) # Not found exception will be handled at the wsgi level vol_type = volume_types.get_volume_type( context, type_id, expected_fields=['projects']) if vol_type['is_public']: expl = _("Access list not available for public volume types.") raise exception.VolumeTypeAccessNotFound(message=expl) return _marshall_volume_type_access(vol_type) class VolumeTypeActionController(wsgi.Controller): """The volume type access API controller for the OpenStack API.""" def _extend_vol_type(self, vol_type_rval, vol_type_ref): if vol_type_ref: key = "%s:is_public" % (Volume_type_access.alias) vol_type_rval[key] = vol_type_ref.get('is_public', True) @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['cinder.context'] if context.authorize(policy.TYPE_ACCESS_POLICY, fatal=False): vol_type = req.cached_resource_by_id(id, name='types') self._extend_vol_type(resp_obj.obj['volume_type'], vol_type) @wsgi.extends def index(self, req, resp_obj): context = req.environ['cinder.context'] if context.authorize(policy.TYPE_ACCESS_POLICY, fatal=False): for vol_type_rval in list(resp_obj.obj['volume_types']): type_id = vol_type_rval['id'] vol_type = req.cached_resource_by_id(type_id, name='types') self._extend_vol_type(vol_type_rval, vol_type) # TODO: remove this, there is no /types/detail call for this to extend @wsgi.extends def detail(self, req, resp_obj): context = req.environ['cinder.context'] if context.authorize(policy.TYPE_ACCESS_POLICY, fatal=False): for vol_type_rval in list(resp_obj.obj['volume_types']): type_id = vol_type_rval['id'] vol_type = req.cached_resource_by_id(type_id, name='types') self._extend_vol_type(vol_type_rval, vol_type) @wsgi.extends(action='create') def create(self, req, body, resp_obj): context = req.environ['cinder.context'] if context.authorize(policy.TYPE_ACCESS_POLICY, fatal=False): type_id = resp_obj.obj['volume_type']['id'] vol_type = req.cached_resource_by_id(type_id, name='types') self._extend_vol_type(resp_obj.obj['volume_type'], vol_type) @wsgi.action('addProjectAccess') @validation.schema(volume_type_access.add_project_access) def _addProjectAccess(self, req, id, body): context = req.environ['cinder.context'] context.authorize(policy.ADD_PROJECT_POLICY) project = body['addProjectAccess']['project'] try: volume_types.add_volume_type_access(context, id, project) # Not found exception will be handled at the wsgi level except exception.VolumeTypeAccessExists as err: raise webob.exc.HTTPConflict(explanation=str(err)) return webob.Response(status_int=HTTPStatus.ACCEPTED) @wsgi.action('removeProjectAccess') @validation.schema(volume_type_access.remove_project_access) def _removeProjectAccess(self, req, id, body): context = req.environ['cinder.context'] context.authorize(policy.REMOVE_PROJECT_POLICY) project = body['removeProjectAccess']['project'] # Not found exception will be handled at the wsgi level volume_types.remove_volume_type_access(context, id, project) return webob.Response(status_int=HTTPStatus.ACCEPTED) class Volume_type_access(extensions.ExtensionDescriptor): """Volume type access support.""" name = "VolumeTypeAccess" alias = "os-volume-type-access" updated = "2014-06-26T00:00:00Z" def get_resources(self): resources = [] res = extensions.ResourceExtension( Volume_type_access.alias, VolumeTypeAccessController(), parent=dict(member_name='type', collection_name='types')) resources.append(res) return resources def get_controller_extensions(self): controller = VolumeTypeActionController() extension = extensions.ControllerExtension(self, 'types', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_type_encryption.py0000664000175000017500000001453200000000000023745 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume types encryption extension.""" from http import HTTPStatus import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.schemas import volume_type_encryption from cinder.api import validation from cinder import db from cinder import exception from cinder.i18n import _ from cinder.policies import volume_type as policy from cinder import rpc from cinder.volume import volume_types class VolumeTypeEncryptionController(wsgi.Controller): """The volume type encryption API controller for the OpenStack API.""" def _get_volume_type_encryption(self, context, type_id): encryption_ref = db.volume_type_encryption_get(context, type_id) encryption_specs = {} if not encryption_ref: return encryption_specs for key, value in encryption_ref.items(): encryption_specs[key] = value return encryption_specs def _check_type(self, context, type_id): # Not found exception will be handled at the wsgi level volume_types.get_volume_type(context, type_id) def _encrypted_type_in_use(self, context, volume_type_id): volume_list = db.volume_type_encryption_volume_get(context, volume_type_id) # If there is at least one volume in the list # returned, this type is in use by a volume. if len(volume_list) > 0: return True else: return False def index(self, req, type_id): """Returns the encryption specs for a given volume type.""" context = req.environ['cinder.context'] context.authorize(policy.GET_ENCRYPTION_POLICY) self._check_type(context, type_id) return self._get_volume_type_encryption(context, type_id) @validation.schema(volume_type_encryption.create) def create(self, req, type_id, body): """Create encryption specs for an existing volume type.""" context = req.environ['cinder.context'] context.authorize(policy.CREATE_ENCRYPTION_POLICY) key_size = body['encryption'].get('key_size') if key_size is not None: body['encryption']['key_size'] = int(key_size) if self._encrypted_type_in_use(context, type_id): expl = _('Cannot create encryption specs. Volume type in use.') raise webob.exc.HTTPBadRequest(explanation=expl) self._check_type(context, type_id) encryption_specs = self._get_volume_type_encryption(context, type_id) if encryption_specs: raise exception.VolumeTypeEncryptionExists(type_id=type_id) encryption_specs = body['encryption'] db.volume_type_encryption_create(context, type_id, encryption_specs) notifier_info = dict(type_id=type_id, specs=encryption_specs) notifier = rpc.get_notifier('volumeTypeEncryption') notifier.info(context, 'volume_type_encryption.create', notifier_info) return body @validation.schema(volume_type_encryption.update) def update(self, req, type_id, id, body): """Update encryption specs for a given volume type.""" context = req.environ['cinder.context'] context.authorize(policy.UPDATE_ENCRYPTION_POLICY) key_size = body['encryption'].get('key_size') if key_size is not None: body['encryption']['key_size'] = int(key_size) self._check_type(context, type_id) if self._encrypted_type_in_use(context, type_id): expl = _('Cannot update encryption specs. Volume type in use.') raise webob.exc.HTTPBadRequest(explanation=expl) encryption_specs = body['encryption'] db.volume_type_encryption_update(context, type_id, encryption_specs) notifier_info = dict(type_id=type_id, id=id) notifier = rpc.get_notifier('volumeTypeEncryption') notifier.info(context, 'volume_type_encryption.update', notifier_info) return body def show(self, req, type_id, id): """Return a single encryption item.""" context = req.environ['cinder.context'] context.authorize(policy.GET_ENCRYPTION_POLICY) self._check_type(context, type_id) encryption_specs = self._get_volume_type_encryption(context, type_id) if id not in encryption_specs: raise exception.VolumeTypeEncryptionNotFound(type_id=type_id) return {id: encryption_specs[id]} def delete(self, req, type_id, id): """Delete encryption specs for a given volume type.""" context = req.environ['cinder.context'] context.authorize(policy.DELETE_ENCRYPTION_POLICY) if self._encrypted_type_in_use(context, type_id): expl = _('Cannot delete encryption specs. Volume type in use.') raise webob.exc.HTTPBadRequest(explanation=expl) else: # Not found exception will be handled at the wsgi level db.volume_type_encryption_delete(context, type_id) return webob.Response(status_int=HTTPStatus.ACCEPTED) class Volume_type_encryption(extensions.ExtensionDescriptor): """Encryption support for volume types.""" name = "VolumeTypeEncryption" alias = "encryption" updated = "2013-07-01T00:00:00+00:00" def get_resources(self): resources = [] res = extensions.ResourceExtension( Volume_type_encryption.alias, VolumeTypeEncryptionController(), parent=dict(member_name='type', collection_name='types')) resources.append(res) return resources def get_controller_extensions(self): controller = VolumeTypeEncryptionController() extension = extensions.ControllerExtension(self, 'types', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/contrib/volume_unmanage.py0000664000175000017500000000476400000000000022313 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_log import log as logging import webob from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.policies import manageable_volumes as policy from cinder import volume LOG = logging.getLogger(__name__) class VolumeUnmanageController(wsgi.Controller): def __init__(self, *args, **kwargs): super(VolumeUnmanageController, self).__init__(*args, **kwargs) self.volume_api = volume.API() @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.action('os-unmanage') def unmanage(self, req, id, body): """Stop managing a volume. This action is very much like a delete, except that a different method (unmanage) is called on the Cinder driver. This has the effect of removing the volume from Cinder management without actually removing the backend storage object associated with it. There are no required parameters. A Not Found error is returned if the specified volume does not exist. A Bad Request error is returned if the specified volume is still attached to an instance. """ context = req.environ['cinder.context'] LOG.info("Unmanage volume with id: %s", id) # Not found exception will be handled at the wsgi level vol = self.volume_api.get(context, id) context.authorize(policy.UNMANAGE_POLICY, target_obj=vol) self.volume_api.delete(context, vol, unmanage_only=True) return webob.Response(status_int=HTTPStatus.ACCEPTED) class Volume_unmanage(extensions.ExtensionDescriptor): """Enable volume unmanage operation.""" name = "VolumeUnmanage" alias = "os-volume-unmanage" updated = "2012-05-31T00:00:00+00:00" def get_controller_extensions(self): controller = VolumeUnmanageController() extension = extensions.ControllerExtension(self, 'volumes', controller) return [extension] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/extensions.py0000664000175000017500000002553600000000000017670 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils import webob.dec import webob.exc import cinder.api.openstack from cinder.api.openstack import wsgi from cinder import exception import cinder.policy CONF = cfg.CONF LOG = logging.getLogger(__name__) FILES_TO_SKIP = ['resource_common_manage.py'] class ExtensionDescriptor(object): """Base class that defines the contract for extensions. Note that you don't have to derive from this class to have a valid extension; it is purely a convenience. """ # The name of the extension, e.g., 'Fox In Socks' name = None # The alias for the extension, e.g., 'FOXNSOX' alias = None # The timestamp when the extension was last updated, e.g., # '2011-01-22T13:25:27-06:00' updated = None def __init__(self, ext_mgr): """Register extension with the extension manager.""" ext_mgr.register(self) self.ext_mgr = ext_mgr def get_resources(self): """List of extensions.ResourceExtension extension objects. Resources define new nouns, and are accessible through URLs. """ resources = [] return resources def get_controller_extensions(self): """List of extensions.ControllerExtension extension objects. Controller extensions are used to extend existing controllers. """ controller_exts = [] return controller_exts class ExtensionsResource(wsgi.Resource): def __init__(self, extension_manager): self.extension_manager = extension_manager super(ExtensionsResource, self).__init__(None) def _translate(self, ext): ext_data = {} ext_data['name'] = ext.name ext_data['alias'] = ext.alias ext_data['description'] = ext.__doc__ ext_data['updated'] = ext.updated ext_data['links'] = [] # TODO(dprince): implement extension links return ext_data def index(self, req): extensions = [] for _alias, ext in self.extension_manager.extensions.items(): extensions.append(self._translate(ext)) return dict(extensions=extensions) def show(self, req, id): try: # NOTE(dprince): the extensions alias is used as the 'id' for show ext = self.extension_manager.extensions[id] except KeyError: raise webob.exc.HTTPNotFound() return dict(extension=self._translate(ext)) def delete(self, req, id): raise webob.exc.HTTPNotFound() def create(self, req): raise webob.exc.HTTPNotFound() class ExtensionManager(object): """Load extensions from the configured extension path. See cinder/tests/api/extensions/foxinsocks/extension.py for an example extension implementation. """ def __init__(self): LOG.debug('Initializing extension manager.') self.cls_list = CONF.osapi_volume_extension self.extensions = {} self._load_extensions() def is_loaded(self, alias): return alias in self.extensions def register(self, ext): # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.alias LOG.debug('Loaded extension: %s', alias) if alias in self.extensions: raise exception.CinderException( "Found duplicate extension: %s" % alias) self.extensions[alias] = ext def get_resources(self): """Returns a list of ResourceExtension objects.""" resources = [] resources.append(ResourceExtension('extensions', ExtensionsResource(self))) for ext in self.extensions.values(): try: resources.extend(ext.get_resources()) except AttributeError: # NOTE(dprince): Extension aren't required to have resource # extensions pass return resources def get_controller_extensions(self): """Returns a list of ControllerExtension objects.""" controller_exts = [] for ext in self.extensions.values(): try: get_ext_method = ext.get_controller_extensions except AttributeError: # NOTE(Vek): Extensions aren't required to have # controller extensions continue controller_exts.extend(get_ext_method()) return controller_exts def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug('Ext name: %s', extension.name) LOG.debug('Ext alias: %s', extension.alias) LOG.debug('Ext description: %s', ' '.join(extension.__doc__.strip().split())) LOG.debug('Ext updated: %s', extension.updated) except AttributeError: LOG.exception("Exception loading extension.") return False return True def load_extension(self, ext_factory): """Execute an extension factory. Loads an extension. The 'ext_factory' is the name of a callable that will be imported and called with one argument--the extension manager. The factory callable is expected to call the register() method at least once. """ LOG.debug("Loading extension %s", ext_factory) # Load the factory factory = importutils.import_class(ext_factory) # Call it LOG.debug("Calling extension factory %s", ext_factory) factory(self) def _load_extensions(self): """Load extensions specified on the command line.""" extensions = list(self.cls_list) for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warning('Failed to load extension %(ext_factory)s: ' '%(exc)s', {'ext_factory': ext_factory, 'exc': exc}) class ControllerExtension(object): """Extend core controllers of cinder OpenStack API. Provide a way to extend existing cinder OpenStack API core controllers. """ def __init__(self, extension, collection, controller): self.extension = extension self.collection = collection self.controller = controller class ResourceExtension(object): """Add top level resources to the OpenStack API in cinder.""" def __init__(self, collection, controller, parent=None, collection_actions=None, member_actions=None, custom_routes_fn=None): if not collection_actions: collection_actions = {} if not member_actions: member_actions = {} self.collection = collection self.controller = controller self.parent = parent self.collection_actions = collection_actions self.member_actions = member_actions self.custom_routes_fn = custom_routes_fn def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None): """Registers all standard API extensions.""" # Walk through all the modules in our directory... our_dir = path[0] for dirpath, dirnames, filenames in os.walk(our_dir): # Compute the relative package name from the dirpath relpath = os.path.relpath(dirpath, our_dir) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) # Now, consider each file in turn, only considering .py and .pyc files for fname in filenames: root, ext = os.path.splitext(fname) # Skip __init__ and anything that's not .py and .pyc if ((ext not in ('.py', '.pyc')) or root == '__init__' or fname in FILES_TO_SKIP): continue # If .pyc and .py both exist, skip .pyc if ext == '.pyc' and ((root + '.py') in filenames): continue # Try loading it classname = "%s%s" % (root[0].upper(), root[1:]) classpath = ("%s%s.%s.%s" % (package, relpkg, root, classname)) if ext_list is not None and classname not in ext_list: logger.debug("Skipping extension: %s", classpath) continue try: ext_mgr.load_extension(classpath) except Exception as exc: logger.warning('Failed to load extension %(classpath)s: ' '%(exc)s', {'classpath': classpath, 'exc': exc}) # Now, let's consider any subdirectories we may have... subdirs = [] for dname in dirnames: # Skip it if it does not have __init__.py if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')): continue # If it has extension(), delegate... ext_name = ("%s%s.%s.extension" % (package, relpkg, dname)) try: ext = importutils.import_class(ext_name) except ImportError: # extension() doesn't exist on it, so we'll explore # the directory for ourselves subdirs.append(dname) else: try: ext(ext_mgr) except Exception as exc: logger.warning('Failed to load extension ' '%(ext_name)s: %(exc)s', {'ext_name': ext_name, 'exc': exc}) # Update the list of directories we'll explore... dirnames[:] = subdirs def extension_authorizer(api_name, extension_name): def authorize(context, target=None, action=None): if target is None: target = {'project_id': context.project_id, 'user_id': context.user_id} if action is None: act = '%s_extension:%s' % (api_name, extension_name) else: act = '%s_extension:%s:%s' % (api_name, extension_name, action) cinder.policy.authorize(context, act, target) return authorize ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/microversions.py0000664000175000017500000001164000000000000020362 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """API Microversion definitions. All new microversions should have a constant added here to be used throughout the code instead of the specific version number. Until patches land, it's common to end up with merge conflicts with other microversion changes. Merge conflicts will be easier to handle via the microversion constants defined here as the version number will only need to be changed in a single location. Actual version numbers should be used: * In this file * In cinder/api/openstack/rest_api_version_history.rst * In cinder/api/openstack/api_version_request.py * In release notes describing the new functionality * In updates to api-ref Nearly all microversion changes should include changes to all of those locations. Make sure to add relevant documentation, and make sure that documentation includes the final version number used. """ from cinder.api.openstack import api_version_request as api_version from cinder import exception # Add new constants here for each new microversion. BASE_VERSION = '3.0' UPLOAD_IMAGE_PARAMS = '3.1' VOLUME_LIST_BOOTABLE = '3.2' MESSAGES = '3.3' VOLUME_LIST_GLANCE_METADATA = '3.4' MESSAGES_PAGINATION = '3.5' CG_UPDATE_BLANK_PROPERTIES = '3.6' CLUSTER_SUPPORT = '3.7' MANAGE_EXISTING_LIST = '3.8' BACKUP_UPDATE = '3.9' VOLUME_LIST_GROUP = '3.10' GROUP_TYPE = '3.11' VOLUME_SUMMARY = '3.12' GROUP_VOLUME = '3.13' GROUP_SNAPSHOTS = '3.14' ETAGS = '3.15' VOLUME_MIGRATE_CLUSTER = '3.16' MANAGE_EXISTING_CLUSTER = '3.17' BACKUP_PROJECT = '3.18' GROUP_SNAPSHOT_RESET_STATUS = '3.19' GROUP_VOLUME_RESET_STATUS = '3.20' VOLUME_DETAIL_PROVIDER_ID = '3.21' SNAPSHOT_LIST_METADATA_FILTER = '3.22' VOLUME_DELETE_FORCE = '3.23' WORKERS_CLEANUP = '3.24' GROUP_VOLUME_LIST = '3.25' REPLICATION_CLUSTER = '3.26' NEW_ATTACH = '3.27' POOL_FILTER = '3.28' GROUP_SNAPSHOT_PAGINATION = '3.29' SNAPSHOT_SORT = '3.30' RESOURCE_FILTER = '3.31' LOG_LEVEL = '3.32' RESOURCE_FILTER_CONFIG = '3.33' LIKE_FILTER = '3.34' POOL_TYPE_FILTER = '3.35' VOLUME_SUMMARY_METADATA = '3.36' BACKUP_SORT_NAME = '3.37' GROUP_REPLICATION = '3.38' LIMITS_ADMIN_FILTER = '3.39' VOLUME_REVERT = '3.40' SNAPSHOT_LIST_USER_ID = '3.41' VOLUME_EXTEND_INUSE = '3.42' BACKUP_METADATA = '3.43' NEW_ATTACH_COMPLETION = '3.44' SUPPORT_COUNT_INFO = '3.45' SUPPORT_NOVA_IMAGE = '3.46' VOLUME_CREATE_FROM_BACKUP = '3.47' VOLUME_SHARED_TARGETS_AND_SERVICE_FIELDS = '3.48' BACKEND_STATE_REPORT = '3.49' MULTIATTACH_VOLUMES = '3.50' BACKUP_AZ = '3.51' SUPPORT_VOLUME_TYPE_FILTER = '3.52' SUPPORT_VOLUME_SCHEMA_CHANGES = '3.53' ATTACHMENT_CREATE_MODE_ARG = '3.54' TRANSFER_WITH_SNAPSHOTS = '3.55' BACKUP_PROJECT_USER_ID = '3.56' TRANSFER_WITH_HISTORY = '3.57' GROUP_GROUPSNAPSHOT_PROJECT_ID = '3.58' SUPPORT_TRANSFER_PAGINATION = '3.59' VOLUME_TIME_COMPARISON_FILTER = '3.60' VOLUME_CLUSTER_NAME = '3.61' DEFAULT_TYPE_OVERRIDES = '3.62' VOLUME_TYPE_ID_IN_VOLUME_DETAIL = '3.63' ENCRYPTION_KEY_ID_IN_DETAILS = '3.64' USE_QUOTA = '3.65' SNAPSHOT_IN_USE = '3.66' PROJECT_ID_OPTIONAL_IN_URL = '3.67' SUPPORT_REIMAGE_VOLUME = '3.68' SHARED_TARGETS_TRISTATE = '3.69' TRANSFER_ENCRYPTED_VOLUME = '3.70' EXTEND_VOLUME_COMPLETION = '3.71' def get_mv_header(version): """Gets a formatted HTTP microversion header. :param version: The microversion needed. :return: A tuple containing the microversion header with the requested version value. """ return {'OpenStack-API-Version': 'volume %s' % version} def get_api_version(version): """Gets a ``APIVersionRequest`` instance. :param version: The microversion needed. :return: The ``APIVersionRequest`` instance. """ return api_version.APIVersionRequest(version) def get_prior_version(version): """Gets the microversion before the given version. Mostly useful for testing boundaries. This gets the microversion defined just prior to the given version. :param version: The version of interest. :return: The version just prior to the given version. """ parts = version.split('.') if len(parts) != 2 or parts[0] != '3': raise exception.InvalidInput(reason='Version %s is not a valid ' 'microversion format.' % version) minor = int(parts[1]) - 1 if minor < 0: # What's your problem? Are you trying to be difficult? minor = 0 return '%s.%s' % (parts[0], minor) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0391178 cinder-27.0.0/cinder/api/middleware/0000775000175000017500000000000000000000000017221 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/middleware/__init__.py0000664000175000017500000000000000000000000021320 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/middleware/auth.py0000664000175000017500000001400300000000000020532 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common Auth Middleware. """ from http import HTTPStatus import os from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import webob.dec import webob.exc from cinder.api.openstack import wsgi from cinder import context from cinder.i18n import _ from cinder.wsgi import common as base_wsgi use_forwarded_for_opt = cfg.BoolOpt( 'use_forwarded_for', default=False, help='Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') CONF = cfg.CONF CONF.register_opt(use_forwarded_for_opt) LOG = logging.getLogger(__name__) def pipeline_factory(loader, global_conf, **local_conf): """A paste pipeline replica that keys off of auth_strategy.""" pipeline = local_conf[CONF.auth_strategy] if not CONF.api_rate_limit: limit_name = CONF.auth_strategy + '_nolimit' pipeline = local_conf.get(limit_name, pipeline) pipeline = pipeline.split() filters = [loader.get_filter(n) for n in pipeline[:-1]] app = loader.get_app(pipeline[-1]) filters.reverse() for filter in filters: app = filter(app) return app def _set_request_context(req, **kwargs): """Sets request context based on parameters and request.""" remote_address = getattr(req, 'remote_addr', '127.0.0.1') service_catalog = None if req.headers.get('X_SERVICE_CATALOG') is not None: try: catalog_header = req.headers.get('X_SERVICE_CATALOG') service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( explanation=_('Invalid service catalog json.')) if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) kwargs.setdefault('remote_address', remote_address) kwargs.setdefault('service_catalog', service_catalog) # Preserve the timestamp set by the RequestId middleware kwargs['timestamp'] = getattr(req.environ.get('cinder.context'), 'timestamp', None) # request ID and global ID are present in the environment req.environ ctx = context.RequestContext.from_environ(req.environ, **kwargs) req.environ['cinder.context'] = ctx return ctx class InjectContext(base_wsgi.Middleware): """Add a 'cinder.context' to WSGI environ.""" def __init__(self, context, *args, **kwargs): self.context = context super(InjectContext, self).__init__(*args, **kwargs) @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): req.environ['cinder.context'] = self.context return self.application class CinderKeystoneContext(base_wsgi.Middleware): """Make a request context from keystone headers.""" ENV_OVERWRITES = { 'X_PROJECT_DOMAIN_ID': 'project_domain_id', 'X_PROJECT_DOMAIN_NAME': 'project_domain_name', 'X_USER_DOMAIN_ID': 'user_domain_id', 'X_USER_DOMAIN_NAME': 'user_domain_name', } @webob.dec.wsgify(RequestClass=base_wsgi.Request) def __call__(self, req): params = {'project_name': req.headers.get('X_TENANT_NAME')} for env_name, param_name in self.ENV_OVERWRITES.items(): if req.environ.get(env_name): params[param_name] = req.environ[env_name] ctx = _set_request_context(req, **params) if ctx.user_id is None: LOG.debug("Neither X_USER_ID nor X_USER found in request") return webob.exc.HTTPUnauthorized() return self.application class NoAuthMiddlewareBase(base_wsgi.Middleware): """Return a fake token if one isn't specified.""" def base_call(self, req, project_id_in_path=False): if 'X-Auth-Token' not in req.headers: user_id = req.headers.get('X-Auth-User', 'admin') project_id = req.headers.get('X-Auth-Project-Id', 'admin') if project_id_in_path: os_url = os.path.join(req.url.rstrip('/'), project_id) else: os_url = req.url.rstrip('/') res = webob.Response() # NOTE(vish): This is expecting and returning Auth(1.1), whereas # keystone uses 2.0 auth. We should probably allow # 2.0 auth here as well. res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) res.headers['X-Server-Management-Url'] = os_url res.content_type = 'text/plain' res.status_int = HTTPStatus.NO_CONTENT return res token = req.headers['X-Auth-Token'] user_id, _sep, project_id = token.partition(':') project_id = project_id or user_id _set_request_context(req, user_id=user_id, project_id=project_id, is_admin=True) return self.application class NoAuthMiddleware(NoAuthMiddlewareBase): """Return a fake token if one isn't specified. Sets project_id in URLs. """ @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): return self.base_call(req) class NoAuthMiddlewareIncludeProjectID(NoAuthMiddlewareBase): """Return a fake token if one isn't specified. Does not set project_id in URLs. """ @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): return self.base_call(req, project_id_in_path=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/middleware/fault.py0000664000175000017500000000672200000000000020715 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_log import log as logging import webob.dec import webob.exc from cinder.api import api_utils from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _ from cinder.wsgi import common as base_wsgi LOG = logging.getLogger(__name__) class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" _status_to_type = {} @staticmethod def status_to_type(status): if not FaultWrapper._status_to_type: for clazz in api_utils.walk_class_hierarchy(webob.exc.HTTPError): FaultWrapper._status_to_type[clazz.code] = clazz return FaultWrapper._status_to_type.get( status, webob.exc.HTTPInternalServerError)() def _error(self, inner, req): if isinstance(inner, UnicodeDecodeError): msg = _("Error decoding your request. Either the URL or the " "request body contained characters that could not be " "decoded by Cinder.") return wsgi.Fault(webob.exc.HTTPBadRequest(explanation=msg)) if not isinstance(inner, exception.QuotaError): LOG.exception("Caught error: %(type)s %(error)s", {'type': type(inner), 'error': inner}) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', HTTPStatus.INTERNAL_SERVER_ERROR) if status is None: status = HTTPStatus.INTERNAL_SERVER_ERROR msg_dict = dict(url=req.url, status=status) LOG.info("%(url)s returned with HTTP %(status)s", msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: msg = (inner.msg if isinstance(inner, exception.CinderException) else str(inner)) params = {'exception': inner.__class__.__name__, 'explanation': msg} outer.explanation = _('%(exception)s: %(explanation)s') % params return wsgi.Fault(outer) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: return self._error(ex, req) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/middleware/request_id.py0000664000175000017500000000604200000000000021741 0ustar00zuulzuul00000000000000# Copyright 2022 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_middleware import request_id as oslo_req_id from oslo_utils import timeutils from cinder import context as cinder_context LOG = logging.getLogger(__name__) class RequestId(oslo_req_id.RequestId): def _context_setter(self, environ, *args, **kwargs): """Wrapper to set a temporary context. It is necessary to replace the previous request's context, but at this point when we are generating the new request ID we don't have the keystone info, so use a placeholder with the information we have: - global_request_id ==> Extracted from the headers by the parent class - request_id => Generated by the parent class __call__ method - timestamp => The moment Cinder API starts processing the request This middleware needs to be the first in ALL the pipelines for things to work as expected, otherwise we'll have the following issues: - Logs from other filters reuse a context from a previous request, presenting the wrong request id and project and user info, and then after the request passes the auth filter the request id will change to the right one. We'll see this when enabling debug mode in the keystonemiddleware module. - Requests that don't require authorization (/ and /healthcheck) won't return a request ID in the headers. """ # Replace previous request's context with all the info we have now placeholder_ctx = cinder_context.RequestContext( request_id=environ[oslo_req_id.ENV_REQUEST_ID], global_request_id=environ.get(oslo_req_id.GLOBAL_REQ_ID), timestamp=timeutils.utcnow(), ) # Only update environ, oslo_context local store was updated # automatically when instantiating the request context. environ['cinder.context'] = placeholder_ctx # Have a timestamped log with the start of the pipeline processing. LOG.debug('RequestId filter calling following filter/app') return self._application(environ, *args, **kwargs) def __init__(self, *args, **kwargs): # Parent __call__ method creates the request id and makes the call to # the chained app one after the other, so we need a wrapper on the app # to set the context. super().__init__(*args, **kwargs) self._application = self.application self.application = self._context_setter ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0391178 cinder-27.0.0/cinder/api/openstack/0000775000175000017500000000000000000000000017073 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/openstack/__init__.py0000664000175000017500000001472700000000000021217 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack API controllers. """ from oslo_config import cfg from oslo_log import log as logging from oslo_service import wsgi as base_wsgi import routes from cinder.api.openstack import wsgi from cinder.i18n import _ openstack_api_opts = [ cfg.StrOpt('project_id_regex', default=r"[0-9a-f\-]+", help=r'The validation regex for project_ids used in urls. ' r'This defaults to [0-9a-f\\-]+ if not set, ' r'which matches normal uuids created by keystone.'), ] CONF = cfg.CONF CONF.register_opts(openstack_api_opts) LOG = logging.getLogger(__name__) class APIMapper(routes.Mapper): def routematch(self, url=None, environ=None): if url == "": result = self._match("", environ) return result[0], result[1] return routes.Mapper.routematch(self, url, environ) def connect(self, *args, **kwargs): # NOTE(inhye): Default the format part of a route to only accept json # so it doesn't eat all characters after a '.' # in the url. kwargs.setdefault('requirements', {}) if not kwargs['requirements'].get('format'): kwargs['requirements']['format'] = 'json' return routes.Mapper.connect(self, *args, **kwargs) class ProjectMapper(APIMapper): def resource(self, member_name, collection_name, **kwargs): """Base resource path handler This method is compatible with resource paths that include a project_id and those that don't. Including project_id in the URLs was a legacy API requirement; and making API requests against such endpoints won't work for users that don't belong to a particular project. """ # NOTE: project_id parameter is only valid if its hex or hex + dashes # (note, integers are a subset of this). This is required to handle # our overlapping routes issues. project_id_regex = CONF.project_id_regex project_id_token = '{project_id:%s}' % project_id_regex if 'parent_resource' not in kwargs: kwargs['path_prefix'] = '%s/' % project_id_token else: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '%s/%s/:%s_id' % (project_id_token, p_collection, p_member) routes.Mapper.resource(self, member_name, collection_name, **kwargs) # Add additional routes without project_id. if 'parent_resource' not in kwargs: del kwargs['path_prefix'] else: parent_resource = kwargs['parent_resource'] p_collection = parent_resource['collection_name'] p_member = parent_resource['member_name'] kwargs['path_prefix'] = '%s/:%s_id' % (p_collection, p_member) routes.Mapper.resource(self, member_name, collection_name, **kwargs) class APIRouter(base_wsgi.Router): """Routes requests on the API to the appropriate controller and method.""" ExtensionManager = None # override in subclasses @classmethod def factory(cls, global_config, **local_config): """Simple paste factory, :class:`cinder.wsgi.Router` doesn't have.""" return cls() def __init__(self, ext_mgr=None): if ext_mgr is None: if self.ExtensionManager: ext_mgr = self.ExtensionManager() else: raise Exception(_("Must specify an ExtensionManager class")) mapper = ProjectMapper() self.resources = {} self._setup_routes(mapper, ext_mgr) self._setup_ext_routes(mapper, ext_mgr) self._setup_extensions(ext_mgr) super(APIRouter, self).__init__(mapper) def _setup_ext_routes(self, mapper, ext_mgr): for resource in ext_mgr.get_resources(): LOG.debug('Extended resource: %s', resource.collection) wsgi_resource = wsgi.Resource(resource.controller) self.resources[resource.collection] = wsgi_resource kargs = dict( controller=wsgi_resource, collection=resource.collection_actions, member=resource.member_actions) if resource.parent: kargs['parent_resource'] = resource.parent mapper.resource(resource.collection, resource.collection, **kargs) if resource.custom_routes_fn: resource.custom_routes_fn(mapper, wsgi_resource) def _setup_extensions(self, ext_mgr): for extension in ext_mgr.get_controller_extensions(): collection = extension.collection controller = extension.controller if collection not in self.resources: LOG.warning('Extension %(ext_name)s: Cannot extend ' 'resource %(collection)s: No such resource', {'ext_name': extension.extension.name, 'collection': collection}) continue LOG.debug('Extension %(ext_name)s extending resource: ' '%(collection)s', {'ext_name': extension.extension.name, 'collection': collection}) resource = self.resources[collection] resource.register_actions(controller) resource.register_extensions(controller) def _setup_routes(self, mapper, ext_mgr): raise NotImplementedError ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/openstack/api_version_request.py0000664000175000017500000003030100000000000023530 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from cinder.api.openstack import versioned_method from cinder import exception from cinder.i18n import _ from cinder import utils # Define the minimum and maximum version of the API across all of the # REST API. The format of the version is: # X.Y where: # # - X will only be changed if a significant backwards incompatible API # change is made which affects the API as whole. That is, something # that is only very very rarely incremented. # # - Y when you make any change to the API. Note that this includes # semantic changes which may not affect the input or output formats or # even originate in the API code layer. We are not distinguishing # between backwards compatible and backwards incompatible changes in # the versioning system. It must be made clear in the documentation as # to what is a backwards compatible change and what is a backwards # incompatible one. # # You must update the API version history string below with a one or # two line description as well as update rest_api_version_history.rst REST_API_VERSION_HISTORY = """ REST API Version History: * 3.0 - Includes all V2 APIs and extensions. V1 API is still supported. * 3.0 - Versions API updated to reflect beginning of microversions epoch. * 3.1 - Adds visibility and protected to _volume_upload_image parameters. * 3.2 - Bootable filters in volume GET call no longer treats all values passed to it as true. * 3.3 - Add user messages APIs. * 3.4 - Adds glance_metadata filter to list/detail volumes in _get_volumes. * 3.5 - Add pagination support to messages API. * 3.6 - Allows to set empty description and empty name for consistency group in consisgroup-update operation. * 3.7 - Add cluster API and cluster_name field to service list API * 3.8 - Adds resources from volume_manage and snapshot_manage extensions. * 3.9 - Add backup update interface. * 3.10 - Add group_id filter to list/detail volumes in _get_volumes. * 3.11 - Add group types and group specs API. * 3.12 - Add volumes summary API. * 3.13 - Add generic volume groups API. * 3.14 - Add group snapshot and create group from src APIs. * 3.15 - Inject the response's `Etag` header to avoid the lost update problem with volume metadata. * 3.16 - Migrate volume now supports cluster * 3.17 - Getting manageable volumes and snapshots now accepts cluster. * 3.18 - Add backup project attribute. * 3.19 - Add API reset status actions 'reset_status' to group snapshot. * 3.20 - Add API reset status actions 'reset_status' to generic volume group. * 3.21 - Show provider_id in detailed view of a volume for admin. * 3.22 - Add filtering based on metadata for snapshot listing. * 3.23 - Allow passing force parameter to volume delete. * 3.24 - Add workers/cleanup endpoint. * 3.25 - Add ``volumes`` field to group list/detail and group show. * 3.26 - Add failover action and cluster listings accept new filters and return new data. * 3.27 - Add attachment API * 3.28 - Add filters support to get_pools * 3.29 - Add filter, sorter and pagination support in group snapshot. * 3.30 - Support sort snapshots with "name". * 3.31 - Add support for configure resource query filters. * 3.32 - Add set-log and get-log service actions. * 3.33 - Add ``resource_filters`` API to retrieve configured resource filters. * 3.34 - Add like filter support in ``volume``, ``backup``, ``snapshot``, ``message``, ``attachment``, ``group`` and ``group-snapshot`` list APIs. * 3.35 - Add ``volume-type`` filter to Get-Pools API. * 3.36 - Add metadata to volumes/summary response body. * 3.37 - Support sort backup by "name". * 3.38 - Add replication group API (Tiramisu). * 3.39 - Add ``project_id`` admin filters support to limits. * 3.40 - Add volume revert to its latest snapshot support. * 3.41 - Add ``user_id`` field to snapshot list/detail and snapshot show. * 3.42 - Add ability to extend 'in-use' volume. User should be aware of the whole environment before using this feature because it's dependent on several external factors below: 1. nova-compute version - needs to be the latest for Pike. 2. only the libvirt compute driver supports this currently. 3. only iscsi and fibre channel volume types are supported on the nova side currently. Administrator can disable this ability by updating the 'volume:extend_attached_volume' policy rule. Extend in reserved state is intentionally NOT allowed. * 3.43 - Support backup CRUD with metadata. * 3.44 - Add attachment-complete. * 3.45 - Add ``count`` field to volume, backup and snapshot list and detail APIs. * 3.46 - Support create volume by Nova specific image (0 size image). * 3.47 - Support create volume from backup. * 3.48 - Add ``shared_targets`` and ``service_uuid`` fields to volume. * 3.49 - Support report backend storage state in service list. * 3.50 - Add multiattach capability * 3.51 - Add support for cross AZ backups. * 3.52 - ``RESKEY:availability_zones`` is a reserved spec key for AZ volume type, and filter volume type by ``extra_specs`` is supported now. * 3.53 - Add schema validation support for request body using jsonschema for V2/V3 volume APIs. 1. Modified create volume API to accept only parameters which are documented in the api-ref otherwise it will return 400 error. 2. Update volume API expects user to pass at least one valid parameter in the request body in order to update the volume. Also, additional parameters will not be allowed. * 3.54 - Add ``mode`` argument to attachment-create. * 3.55 - Support transfer volume with snapshots * 3.56 - Add ``user_id`` attribute to response body of list backup with detail and show backup detail APIs. * 3.57 - Add 'source_project_id', 'destination_project_id', 'accepted' to transfer. * 3.58 - Add ``project_id`` attribute to response body of list groups with detail, list group snapshots with detail, show group detail and show group snapshot detail APIs. * 3.59 - Support volume transfer pagination. * 3.60 - Support filtering on the "updated_at" and "created_at" fields with time comparison operators for the volume summary list ("GET /v3/{project_id}/volumes") and volume detail list ("GET /v3/{project_id}/volumes/detail") requests. * 3.61 - Add ``cluster_name`` attribute to response body of volume details for admin. * 3.62 - Default volume type overrides * 3.63 - Include volume type ID in the volume details JSON response. Before this microversion (MV), Cinder returns only the volume type name in the volume details. This MV affects the volume detail list ("GET /v3/{project_id}/volumes/detail") and volume-show ("GET /v3/{project_id}/volumes/{volume_id}") calls. * 3.64 - Include 'encryption_key_id' in volume and backup details * 3.65 - Include 'consumes_quota' in volume and snapshot details - Accept 'consumes_quota' filter in volume and snapshot list operation. * 3.66 - Allow snapshotting in-use volumes without force flag. * 3.67 - API URLs no longer need to include a project_id parameter. * 3.68 - Support re-image volume * 3.69 - Allow null value for shared_targets * 3.70 - Support encrypted volume transfers * 3.71 - Support 'os-extend_volume_completion' volume action """ # The minimum and maximum versions of the API supported # The default api version request is defined to be the # minimum version of the API supported. _MIN_API_VERSION = "3.0" _MAX_API_VERSION = "3.71" UPDATED = "2023-08-31T00:00:00Z" # NOTE(cyeoh): min and max versions declared as functions so we can # mock them for unittests. Do not use the constants directly anywhere # else. def min_api_version(): return APIVersionRequest(_MIN_API_VERSION) def max_api_version(): return APIVersionRequest(_MAX_API_VERSION) class APIVersionRequest(utils.ComparableMixin): """This class represents an API Version Request. This class includes convenience methods for manipulation and comparison of version numbers as needed to implement API microversions. """ def __init__(self, version_string=None, experimental=False): """Create an API version request object.""" self._ver_major = None self._ver_minor = None if version_string is not None: match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$", version_string) if match: self._ver_major = int(match.group(1)) self._ver_minor = int(match.group(2)) else: raise exception.InvalidAPIVersionString(version=version_string) def __str__(self): """Debug/Logging representation of object.""" return ("API Version Request Major: %(major)s, Minor: %(minor)s" % {'major': self._ver_major, 'minor': self._ver_minor}) def __bool__(self): return (self._ver_major or self._ver_minor) is not None __nonzero__ = __bool__ def _cmpkey(self): """Return the value used by ComparableMixin for rich comparisons.""" return self._ver_major, self._ver_minor def matches_versioned_method(self, method): """Compares this version to that of a versioned method.""" if type(method) is not versioned_method.VersionedMethod: msg = _('An API version request must be compared ' 'to a VersionedMethod object.') raise exception.InvalidParameterValue(err=msg) return self.matches(method.start_version, method.end_version, method.experimental) def matches(self, min_version, max_version=None, experimental=False): """Compares this version to the specified min/max range. Returns whether the version object represents a version greater than or equal to the minimum version and less than or equal to the maximum version. If min_version is null then there is no minimum limit. If max_version is null then there is no maximum limit. If self is null then raise ValueError. :param min_version: Minimum acceptable version. :param max_version: Maximum acceptable version. :param experimental: Whether to match experimental APIs. :returns: boolean """ if not self: raise ValueError if isinstance(min_version, str): min_version = APIVersionRequest(version_string=min_version) if isinstance(max_version, str): max_version = APIVersionRequest(version_string=max_version) if not min_version and not max_version: return True if not max_version: return min_version <= self if not min_version: return self <= max_version return min_version <= self <= max_version def get_string(self): """Returns a string representation of this object. If this method is used to create an APIVersionRequest, the resulting object will be an equivalent request. """ if not self: raise ValueError return ("%(major)s.%(minor)s" % {'major': self._ver_major, 'minor': self._ver_minor}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/openstack/rest_api_version_history.rst0000664000175000017500000004005200000000000024762 0ustar00zuulzuul00000000000000REST API Version History ======================== This documents the changes made to the REST API with every microversion change. The description for each version should be a verbose one which has enough information to be suitable for use in user documentation. 3.0 (Maximum in Mitaka) ----------------------- The 3.0 Cinder API includes all v2 core APIs existing prior to the introduction of microversions. The /v3 URL is used to call 3.0 APIs. This is the initial version of the Cinder API which supports microversions. A user can specify a header in the API request:: OpenStack-API-Version: volume where ```` is any valid api version for this API. If no version is specified then the API will behave as if version 3.0 was requested. The only API change in version 3.0 is versions, i.e. GET http://localhost:8786/, which now returns information about 3.0 and later versions and their respective /v3 endpoints. All other 3.0 APIs are functionally identical to version 2.0. 3.1 --- Added the parameters ``protected`` and ``visibility`` to _volume_upload_image requests. 3.2 --- Change in return value of 'GET API request' for fetching cinder volume list on the basis of 'bootable' status of volume as filter. Before V3.2, 'GET API request' to fetch volume list returns non-bootable volumes if bootable filter value is any of the false or False. For any other value provided to this filter, it always returns bootable volume list. But in V3.2, this behavior is updated. In V3.2, bootable volume list will be returned for any of the 'T/True/1/true' bootable filter values only. Non-bootable volume list will be returned for any of 'F/False/0/false' bootable filter values. But for any other values passed for bootable filter, it will return "Invalid input received: bootable={filter value}' error. 3.3 --- Added /messages API. 3.4 --- Added the filter parameters ``glance_metadata`` to list/detail volumes requests. 3.5 --- Added pagination support to /messages API 3.6 --- Allowed to set empty description and empty name for consistency group in consisgroup-update operation. 3.7 --- Added ``cluster_name`` field to service list/detail. Added /clusters endpoint to list/show/update clusters. Show endpoint requires the cluster name and optionally the binary as a URL parameter (default is "cinder-volume"). Returns: .. code-block:: json { "cluster": { "created_at": "", "disabled_reason": null, "last_heartbeat": "", "name": "cluster_name", "num_down_hosts": 4, "num_hosts": 2, "state": "up", "status": "enabled", "updated_at": "" } } Update endpoint allows enabling and disabling a cluster in a similar way to service's update endpoint, but in the body we must specify the name and optionally the binary ("cinder-volume" is the default) and the disabled reason. Returns: .. code-block:: json { "cluster": { "name": "cluster_name", "state": "up", "status": "enabled", "disabled_reason": null } } Index and detail accept filtering by `name`, `binary`, `disabled`, `num_hosts` , `num_down_hosts`, and up/down status (`is_up`) as URL parameters. Index endpoint returns: .. code-block:: json { "clusters": [ { "name": "cluster_name", "state": "up", "status": "enabled" } ] } Detail endpoint returns: .. code-block:: json { "clusters": [ { "created_at": "", "disabled_reason": null, "last_heartbeat": "", "name": "cluster_name", "num_down_hosts": 4, "num_hosts": 2, "state": "up", "status": "enabled", "updated_at": "" } ] } 3.8 --- Adds the following resources that were previously in extensions: - os-volume-manage => /v3//manageable_volumes - os-snapshot-manage => /v3//manageable_snapshots 3.9 --- Added backup update interface to change name and description. Returns: .. code-block:: json { "backup": { "id": "backup_id", "name": "backup_name", "links": "backup_link" } } 3.10 ---- Added the filter parameters ``group_id`` to list/detail volumes requests. 3.11 ---- Added group types and group specs APIs. 3.12 ---- Added volumes/summary API. 3.13 ---- Added create/delete/update/list/show APIs for generic volume groups. 3.14 ---- Added group snapshots and create group from src APIs. 3.15 (Maximum in Newton) ------------------------ Added injecting the response's `Etag` header to avoid the lost update problem with volume metadata. 3.16 ---- os-migrate_volume now accepts ``cluster`` parameter when we want to migrate a volume to a cluster. If we pass the ``host`` parameter for a volume that is in a cluster, the request will be sent to the cluster as if we had requested that specific cluster. Only ``host`` or ``cluster`` can be provided. Creating a managed volume also supports the cluster parameter. 3.17 ---- os-snapshot-manage and os-volume-manage now support ``cluster`` parameter on listings (summary and detailed). Both location parameters, ``cluster`` and ``host`` are exclusive and only one should be provided. 3.18 ---- Added backup project attribute. 3.19 ---- Added reset status actions 'reset_status' to group snapshot. 3.20 ---- Added reset status actions 'reset_status' to generic volume group. 3.21 ---- Show provider_id in detailed view of a volume for admin. 3.22 ---- Added support to filter snapshot list based on metadata of snapshot. 3.23 ---- Allow passing force parameter to volume delete. 3.24 ---- New API endpoint /workers/cleanup allows triggering cleanup for cinder-volume services. Meant for cleaning ongoing operations from failed nodes. The cleanup will be performed by other services belonging to the same cluster, so at least one of them must be up to be able to do the cleanup. Cleanup cannot be triggered during a cloud upgrade. If no arguments are provided cleanup will try to issue a clean message for all nodes that are down, but we can restrict which nodes we want to be cleaned using parameters ``service_id``, ``cluster_name``, ``host``, ``binary``, and ``disabled``. Cleaning specific resources is also possible using ``resource_type`` and ``resource_id`` parameters. We can even force cleanup on nodes that are up with ``is_up``, but that's not recommended and should only used if you know what you are doing. For example if you know a specific cinder-volume is down even though it's still not being reported as down when listing the services and you know the cluster has at least another service to do the cleanup. API will return a dictionary with 2 lists, one with services that have been issued a cleanup request (``cleaning`` key) and the other with services that cannot be cleaned right now because there is no alternative service to do the cleanup in that cluster (``unavailable`` key). Data returned for each service element in these two lists consist of the ``id``, ``host``, ``binary``, and ``cluster_name``. These are not the services that will be performing the cleanup, but the services that will be cleaned up or couldn't be cleaned up. 3.25 ---- Add ``volumes`` field to group list/detail and group show. 3.26 ---- - New ``failover`` action equivalent to ``failover_host``, but accepting ``cluster`` parameter as well as the ``host`` cluster that ``failover_host`` accepts. - ``freeze`` and ``thaw`` actions accept ``cluster`` parameter. - Cluster listing accepts ``replication_status``, ``frozen`` and ``active_backend_id`` as filters, and returns additional fields for each cluster: ``replication_status``, ``frozen``, ``active_backend_id``. 3.27 (Maximum in Ocata) ----------------------- Added new attachment APIs. See the `API reference `__ for details. 3.28 ---- Add filters support to get_pools 3.29 ---- Add filter, sorter and pagination support in group snapshot. 3.30 ---- Support sort snapshots with "name". 3.31 ---- Add support for configure resource query filters. 3.32 ---- Added ``set-log`` and ``get-log`` service actions. 3.33 ---- Add ``resource_filters`` API to retrieve configured resource filters. 3.34 ---- Add like filter support in ``volume``, ``backup``, ``snapshot``, ``message``, ``attachment``, ``group`` and ``group-snapshot`` list APIs. 3.35 ---- Add ``volume-type`` filter to Get-Pools API. 3.36 ---- Add metadata to volumes/summary response body. 3.37 ---- Support sort backup by "name". 3.38 ---- Added enable_replication/disable_replication/failover_replication/ list_replication_targets for replication groups (Tiramisu). 3.39 ---- Add ``project_id`` admin filters support to limits. 3.40 ---- Add volume revert to its latest snapshot support. 3.41 ---- Add ``user_id`` field to snapshot list/detail and snapshot show. 3.42 ---- Add ability to extend 'in-use' volume. User should be aware of the whole environment before using this feature because it's dependent on several external factors below: 1. nova-compute version - needs to be the latest for Pike. 2. only the libvirt compute driver supports this currently. 3. only iscsi and fibre channel volume types are supported on the nova side currently. Administrator can disable this ability by updating the ``volume:extend_attached_volume`` policy rule. Extend of a reserved Volume is NOT allowed. 3.43 (Maximum in Pike) ---------------------- Support backup CRUD with metadata. 3.44 ---- Support attachment completion. See the `API reference `__ for details. 3.45 ---- Add ``count`` field to volume, backup and snapshot list and detail APIs. 3.46 ---- Modify the behavior of the volume-create (``POST /v3/volumes``) call when passing an ``imageRef`` in the request body. Prior to this microversion, the image was simply downloaded and written to the volume. However, when a volume is attached to a server, it is possible to use the Compute API server ``createImage`` action to create an instance snapshot of the volume. This is a zero-byte image in the Image Service that has a ``block_device_mapping`` image property whose value contains ``snapshot`` as the ``source_type`` and a ``snapshot_id`` reference to a volume snapshot in the Block Storage service. From microversion 3.46 and later, when a volume-create request is made referring to such an image, instead of using the image to create a volume, the snapshot it references will be used. .. note:: Due to changes to cinder to handle image-related CVEs, making a volume-create call with an imageRef referring to a nova instance snapshot specifying a microversion less than 3.46 may create a volume in ``error`` status. This occurs when the ``disk_format`` property of the image is something other than ``raw``, because for non-raw formats, even an image containing no data will consist of more than zero bytes, and thus the image is rejected as being of a different format than is claimed. 3.47 ---- Support create volume from backup. 3.48 ---- Add ``shared_targets`` and ``service_uuid`` fields to volume. 3.49 ---- Support report backend storage state in service list. 3.50 (Maximum in Queens) ------------------------ Services supporting this microversion are capable of volume multiattach. This version does not need to be requested when creating the volume, but can be used as a way to query if the capability exists in the Cinder service. 3.51 ---- Add support for cross AZ backups. 3.52 ---- ``RESKEY:availability_zones`` is a reserved spec key for AZ volume type, and filter volume type by ``extra_specs`` is supported now. 3.53 ---- Schema validation support has been added using jsonschema for V2/V3 volume APIs. - Create volume API Before 3.53, create volume API used to accept any invalid parameters in the request body like the ones below were passed by python-cinderclient. 1. user_id 2. project_id 3. status 4. attach_status But in 3.53, this behavior is updated. If user passes any invalid parameters to the API which are not documented in api-ref, then it will raise badRequest error. - Update volume API Before 3.53, even if user doesn't pass any valid parameters in the request body, the volume was updated. But in 3.53, user will need to pass at least one valid parameter in the request body otherwise it will return 400 error. 3.54 ---- Add ``mode`` argument to attachment-create. 3.55 (Maximum in Rocky) ----------------------- Support ability to transfer snapshots along with their parent volume. 3.56 ---- Add ``user_id`` attribute to response body of list backup with detail and show backup detail APIs. 3.57 ---- Expanded volume transfer record details by adding ``source_project_id``, ``destination_project_id`` and ``accepted`` fields to ``transfer`` table and related api (create/show/list detail transfer APIs) responses. 3.58 ---- Add ``project_id`` attribute to response body of list groups with detail, list group snapshots with detail, show group detail and show group snapshot detail APIs. 3.59 (Maximum in Stein and Train) --------------------------------- Support volume transfer pagination. 3.60 (Maximum in Ussuri) ------------------------ Users may apply time comparison filters to the volume summary list and volume detail list requests by using the ``created_at`` or ``updated_at`` fields. Time must be expressed in ISO 8601 format. 3.61 ---- Add ``cluster_name`` attribute to response body of volume details for admin in Active/Active HA mode. 3.62 (Maximum in Victoria) -------------------------- Add support for set, get, and unset a default volume type for a specific project. Setting this default overrides the configured default_volume_type value. 3.63 ---- Includes volume type ID in the volume-show and volume-detail-list JSON responses. Before this microversion, Cinder returns only the volume type name in the volume details. 3.64 (Maximum in Wallaby) ------------------------- Include the ``encryption_key_id`` in volume and backup details when the associated volume is encrypted. 3.65 ---- Include a ``consumes_quota`` field in volume and snapshot details to indicate whether the resource is consuming quota or not. Also, accept a ``consumes_quota`` filter, which takes a boolean value, in the volume and snapshot list requests. (The default listing behavior is not to use this filter.) 3.66 (Maximum in Xena) ---------------------- Volume snapshots of in-use volumes can be created without the 'force' flag. Although the 'force' flag is now considered invalid when passed in a volume snapshot request, for backward compatibility, the 'force' flag with a value evaluating to True is silently ignored. 3.67 ---- API URLs no longer need a "project_id" argument in them. For example, the API route: ``https://$(controller)s/volume/v3/$(project_id)s/volumes`` is equivalent to ``https://$(controller)s/volume/v3/volumes``. When interacting with the cinder service as system or domain scoped users, a project_id should not be specified in the API path. 3.68 (Maximum in Yoga) ---------------------- Support ability to re-image a volume with a specific image. Specify the ``os-reimage`` action in the request body. 3.69 ---- Volume field ``shared_targets`` is a tristate boolean value now, with the following meanings: - ``true``: Do os-brick locking when host iSCSI initiator doesn't support manual scans. - ``false``: Never do locking. - ``null``: Forced locking regardless of the iSCSI initiator. 3.70 (Maximum in Zed, 2023.1 and 2023.2) ---------------------------------------- Add the ability to transfer encrypted volumes and their snapshots. The feature removes a prior restriction on transferring encrypted volumes. Otherwise, the API request and response schema are unchanged. 3.71 (Maximum in 2024.1 and 2024.2) ----------------------------------- Add the ``os-extend_volume_completion`` volume action, which Nova can use to notify Cinder of success and error when handling a ``volume-extended`` external server event. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/openstack/versioned_method.py0000664000175000017500000000317100000000000023005 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import utils class VersionedMethod(utils.ComparableMixin): def __init__(self, name, start_version, end_version, experimental, func): """Versioning information for a single method. Minimum and maximums are inclusive. :param name: Name of the method :param start_version: Minimum acceptable version :param end_version: Maximum acceptable_version :param func: Method to call """ self.name = name self.start_version = start_version self.end_version = end_version self.experimental = experimental self.func = func def __str__(self): args = { 'name': self.name, 'start': self.start_version, 'end': self.end_version } return ("Version Method %(name)s: min: %(start)s, max: %(end)s" % args) def _cmpkey(self): """Return the value used by ComparableMixin for rich comparisons.""" return self.start_version ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/openstack/wsgi.py0000664000175000017500000014547000000000000020431 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import abc import functools from http import HTTPStatus import inspect import math import time from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import strutils import webob import webob.exc from cinder.api.openstack import api_version_request as api_version from cinder.api.openstack import versioned_method from cinder import exception from cinder import i18n i18n.enable_lazy() from cinder.i18n import _ from cinder import utils from cinder.wsgi import common as wsgi LOG = logging.getLogger(__name__) SUPPORTED_CONTENT_TYPES = ( 'application/json', 'application/vnd.openstack.volume+json', ) _MEDIA_TYPE_MAP = { 'application/vnd.openstack.volume+json': 'json', 'application/json': 'json', } # name of attribute to keep version method information VER_METHOD_ATTR = 'versioned_methods' # Name of header used by clients to request a specific version # of the REST API API_VERSION_REQUEST_HEADER = 'OpenStack-API-Version' VOLUME_SERVICE = 'volume' class Request(webob.Request): """Add some OpenStack API-specific logic to the base webob.Request.""" def __init__(self, *args, **kwargs): super(Request, self).__init__(*args, **kwargs) self._resource_cache = {} if not hasattr(self, 'api_version_request'): self.api_version_request = api_version.APIVersionRequest() def cache_resource(self, resource_to_cache, id_attribute='id', name=None): """Cache the given resource. Allow API methods to cache objects, such as results from a DB query, to be used by API extensions within the same API request. The resource_to_cache can be a list or an individual resource, but ultimately resources are cached individually using the given id_attribute. Different resources types might need to be cached during the same request, they can be cached using the name parameter. For example: Controller 1: request.cache_resource(db_volumes, 'volumes') request.cache_resource(db_volume_types, 'types') Controller 2: db_volumes = request.cached_resource('volumes') db_type_1 = request.cached_resource_by_id('1', 'types') If no name is given, a default name will be used for the resource. An instance of this class only lives for the lifetime of a single API request, so there's no need to implement full cache management. """ if not isinstance(resource_to_cache, list): resource_to_cache = [resource_to_cache] if not name: name = self.path cached_resources = self._resource_cache.setdefault(name, {}) for resource in resource_to_cache: cached_resources[resource[id_attribute]] = resource def cached_resource(self, name=None): """Get the cached resources cached under the given resource name. Allow an API extension to get previously stored objects within the same API request. Note that the object data will be slightly stale. :returns: a dict of id_attribute to the resource from the cached resources, an empty map if an empty collection was cached, or None if nothing has been cached yet under this name """ if not name: name = self.path if name not in self._resource_cache: # Nothing has been cached for this key yet return None return self._resource_cache[name] def cached_resource_by_id(self, resource_id, name=None): """Get a resource by ID cached under the given resource name. Allow an API extension to get a previously stored object within the same API request. This is basically a convenience method to lookup by ID on the dictionary of all cached resources. Note that the object data will be slightly stale. :returns: the cached resource or None if the item is not in the cache """ resources = self.cached_resource(name) if not resources: # Nothing has been cached yet for this key yet return None return resources.get(resource_id) def cache_db_items(self, key, items, item_key='id'): """Get cached database items. Allow API methods to store objects from a DB query to be used by API extensions within the same API request. An instance of this class only lives for the lifetime of a single API request, so there's no need to implement full cache management. """ self.cache_resource(items, item_key, key) def get_db_items(self, key): """Get database items. Allow an API extension to get previously stored objects within the same API request. Note that the object data will be slightly stale. """ return self.cached_resource(key) def get_db_item(self, key, item_key): """Get database item. Allow an API extension to get a previously stored object within the same API request. Note that the object data will be slightly stale. """ return self.get_db_items(key).get(item_key) def cache_db_volumes(self, volumes): # NOTE(mgagne) Cache it twice for backward compatibility reasons self.cache_db_items('volumes', volumes, 'id') self.cache_db_items(self.path, volumes, 'id') def cache_db_volume(self, volume): # NOTE(mgagne) Cache it twice for backward compatibility reasons self.cache_db_items('volumes', [volume], 'id') self.cache_db_items(self.path, [volume], 'id') def get_db_volumes(self): return (self.get_db_items('volumes') or self.get_db_items(self.path)) def get_db_volume(self, volume_id): return (self.get_db_item('volumes', volume_id) or self.get_db_item(self.path, volume_id)) def cache_db_volume_types(self, volume_types): self.cache_db_items('volume_types', volume_types, 'id') def cache_db_volume_type(self, volume_type): self.cache_db_items('volume_types', [volume_type], 'id') def get_db_volume_types(self): return self.get_db_items('volume_types') def get_db_volume_type(self, volume_type_id): return self.get_db_item('volume_types', volume_type_id) def cache_db_snapshots(self, snapshots): self.cache_db_items('snapshots', snapshots, 'id') def cache_db_snapshot(self, snapshot): self.cache_db_items('snapshots', [snapshot], 'id') def get_db_snapshots(self): return self.get_db_items('snapshots') def get_db_snapshot(self, snapshot_id): return self.get_db_item('snapshots', snapshot_id) def cache_db_backups(self, backups): self.cache_db_items('backups', backups, 'id') def cache_db_backup(self, backup): self.cache_db_items('backups', [backup], 'id') def get_db_backups(self): return self.get_db_items('backups') def get_db_backup(self, backup_id): return self.get_db_item('backups', backup_id) def best_match_content_type(self): """Determine the requested response content-type.""" if 'cinder.best_content_type' not in self.environ: # Calculate the best MIME type content_type = None # Check URL path suffix parts = self.path.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in SUPPORTED_CONTENT_TYPES: content_type = possible_type if not content_type: best_matches = self.accept.acceptable_offers( SUPPORTED_CONTENT_TYPES) if best_matches: content_type = best_matches[0][0] self.environ['cinder.best_content_type'] = (content_type or 'application/json') return self.environ['cinder.best_content_type'] def get_content_type(self): """Determine content type of the request body. Does not do any body introspection, only checks header """ if "Content-Type" not in self.headers: return None allowed_types = SUPPORTED_CONTENT_TYPES content_type = self.content_type if content_type not in allowed_types: raise exception.InvalidContentType(content_type=content_type) return content_type def best_match_language(self): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not self.accept_language: return None all_languages = i18n.get_available_languages() # NOTE: To decide the default behavior, 'default' is preferred over # 'default_tag' because that is return as it is when no match. This is # also little tricky that 'default' value cannot be None. At least one # of default_tag or default must be supplied as an argument to the # method, to define the defaulting behavior. So passing a sentinal # value to return None from this function. best_match = self.accept_language.lookup(all_languages, default='fake') if best_match == 'fake': return None return best_match def set_api_version_request(self, url): """Set API version request based on the request header information.""" if API_VERSION_REQUEST_HEADER in self.headers: hdr_string = self.headers[API_VERSION_REQUEST_HEADER] # 'latest' is a special keyword which is equivalent to requesting # the maximum version of the API supported hdr_string_list = hdr_string.split(",") volume_version = None for hdr in hdr_string_list: if VOLUME_SERVICE in hdr: service, volume_version = hdr.split() break if not volume_version: raise exception.VersionNotFoundForAPIMethod( version=volume_version) if volume_version == 'latest': self.api_version_request = api_version.max_api_version() else: self.api_version_request = api_version.APIVersionRequest( volume_version) # Check that the version requested is within the global # minimum/maximum of supported API versions if not self.api_version_request.matches( api_version.min_api_version(), api_version.max_api_version()): raise exception.InvalidGlobalAPIVersion( req_ver=self.api_version_request.get_string(), min_ver=api_version.min_api_version().get_string(), max_ver=api_version.max_api_version().get_string()) else: self.api_version_request = api_version.APIVersionRequest( api_version._MIN_API_VERSION) class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) def default(self, datastring): return {'body': self._from_json(datastring)} class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): return jsonutils.dump_as_bytes(data) def serializers(**serializers): """Attaches serializers to a method. This decorator associates a dictionary of serializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_serializers'): func.wsgi_serializers = {} func.wsgi_serializers.update(serializers) return func return decorator def deserializers(**deserializers): """Attaches deserializers to a method. This decorator associates a dictionary of deserializers with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): if not hasattr(func, 'wsgi_deserializers'): func.wsgi_deserializers = {} func.wsgi_deserializers.update(deserializers) return func return decorator def response(code): """Attaches response code to a method. This decorator associates a response code with a method. Note that the function attributes are directly manipulated; the method is not wrapped. """ def decorator(func): func.wsgi_code = code return func return decorator class ResponseObject(object): """Bundles a response object with appropriate serializers. Object that app methods may return in order to bind alternate serializers with a response object to be serialized. Its use is optional. """ def __init__(self, obj, code=None, headers=None, **serializers): """Binds serializers with an object. Takes keyword arguments akin to the @serializer() decorator for specifying serializers. Serializers specified will be given preference over default serializers or method-specific serializers on return. """ self.obj = obj self.serializers = serializers self._default_code = HTTPStatus.OK self._code = code self._headers = headers or {} self.serializer = None self.media_type = None def __getitem__(self, key): """Retrieves a header with the given name.""" return self._headers[key.lower()] def __setitem__(self, key, value): """Sets a header with the given name to the given value.""" self._headers[key.lower()] = value def __delitem__(self, key): """Deletes the header with the given name.""" del self._headers[key.lower()] def _bind_method_serializers(self, meth_serializers): """Binds method serializers with the response object. Binds the method serializers with the response object. Serializers specified to the constructor will take precedence over serializers specified to this method. :param meth_serializers: A dictionary with keys mapping to response types and values containing serializer objects. """ # We can't use update because that would be the wrong # precedence for mtype, serializer in meth_serializers.items(): self.serializers.setdefault(mtype, serializer) def get_serializer(self, content_type, default_serializers=None): """Returns the serializer for the wrapped object. Returns the serializer for the wrapped object subject to the indicated content type. If no serializer matching the content type is attached, an appropriate serializer drawn from the default serializers will be used. If no appropriate serializer is available, raises InvalidContentType. """ default_serializers = default_serializers or {} try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in self.serializers: return mtype, self.serializers[mtype] else: return mtype, default_serializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) def preserialize(self, content_type, default_serializers=None): """Prepares the serializer that will be used to serialize. Determines the serializer that will be used and prepares an instance of it for later call. This allows the serializer to be accessed by extensions for, e.g., template extension. """ mtype, serializer = self.get_serializer(content_type, default_serializers) self.media_type = mtype self.serializer = serializer() def attach(self, **kwargs): """Attach slave templates to serializers.""" if self.media_type in kwargs: self.serializer.attach(kwargs[self.media_type]) def serialize(self, request, content_type, default_serializers=None): """Serializes the wrapped object. Utility method for serializing the wrapped object. Returns a webob.Response object. """ if self.serializer: serializer = self.serializer else: _mtype, _serializer = self.get_serializer(content_type, default_serializers) serializer = _serializer() response = webob.Response() response.status_int = self.code for hdr, value in self._headers.items(): response.headers[hdr] = str(value) response.headers['Content-Type'] = str(content_type) if self.obj is not None: body = serializer.serialize(self.obj) if isinstance(body, str): body = body.encode('utf-8') response.body = body return response @property def code(self): """Retrieve the response status.""" return self._code or self._default_code @property def headers(self): """Retrieve the headers.""" return self._headers.copy() def action_peek_json(body): """Determine action to invoke.""" try: decoded = jsonutils.loads(body) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) # Make sure there's exactly one key... if len(decoded) != 1: msg = _("too many body keys") raise exception.MalformedRequestBody(reason=msg) # Return the action and the decoded body... return list(decoded.keys())[0] class ResourceExceptionHandler(object): """Context manager to handle Resource exceptions. Used when processing exceptions generated by API implementation methods (or their extensions). Converts most exceptions to Fault exceptions, with the appropriate logging. """ def __enter__(self): return None def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True if isinstance(ex_value, exception.NotAuthorized): msg = str(ex_value) raise Fault(webob.exc.HTTPForbidden(explanation=msg)) elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod): raise elif isinstance(ex_value, (exception.Invalid, exception.NotFound)): raise Fault(exception.ConvertedException( code=ex_value.code, explanation=str(ex_value))) elif isinstance(ex_value, TypeError): LOG.exception('Exception handling resource:') raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info("Fault thrown: %s", ex_value) raise elif isinstance(ex_value, webob.exc.HTTPException): LOG.info("HTTP exception thrown: %s", ex_value) raise Fault(ex_value) # We didn't handle the exception return False class Resource(wsgi.Application): """WSGI app that handles (de)serialization and controller dispatch. WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon its controller. All controller action methods must accept a 'req' argument, which is the incoming wsgi.Request. If the operation is a PUT or POST, the controller method must also accept a 'body' argument (the deserialized request body). They may raise a webob.exc exception or return a dict, which will be serialized by requested content type. Exceptions derived from webob.exc.HTTPException will be automatically wrapped in Fault() to provide API friendly error responses. """ support_api_request_version = True def __init__(self, controller, action_peek=None, **deserializers): """Initialize Resource. :param controller: object that implement methods created by routes lib :param action_peek: dictionary of routines for peeking into an action request body to determine the desired action """ self.controller = controller default_deserializers = dict(json=JSONDeserializer) default_deserializers.update(deserializers) self.default_deserializers = default_deserializers self.default_serializers = dict(json=JSONDictSerializer) self.action_peek = dict(json=action_peek_json) self.action_peek.update(action_peek or {}) # Copy over the actions dictionary self.wsgi_actions = {} if controller: self.register_actions(controller) # Save a mapping of extensions self.wsgi_extensions = {} self.wsgi_action_extensions = {} def register_actions(self, controller): """Registers controller actions with this resource.""" actions = getattr(controller, 'wsgi_actions', {}) for key, method_name in actions.items(): self.wsgi_actions[key] = getattr(controller, method_name) def register_extensions(self, controller): """Registers controller extensions with this resource.""" extensions = getattr(controller, 'wsgi_extensions', []) for method_name, action_name in extensions: # Look up the extending method extension = getattr(controller, method_name) if action_name: # Extending an action... if action_name not in self.wsgi_action_extensions: self.wsgi_action_extensions[action_name] = [] self.wsgi_action_extensions[action_name].append(extension) else: # Extending a regular method if method_name not in self.wsgi_extensions: self.wsgi_extensions[method_name] = [] self.wsgi_extensions[method_name].append(extension) def get_action_args(self, request_environment): """Parse dictionary created by routes library.""" # NOTE(Vek): Check for get_action_args() override in the # controller if hasattr(self.controller, 'get_action_args'): return self.controller.get_action_args(request_environment) try: args = request_environment['wsgiorg.routing_args'][1].copy() except (KeyError, IndexError, AttributeError): return {} try: del args['controller'] except KeyError: pass try: del args['format'] except KeyError: pass return args def get_body(self, request): if len(request.body) == 0: LOG.debug("Empty body provided in request") return None, '' content_type = request.get_content_type() if not content_type: LOG.debug("No Content-Type provided in request") return None, '' return content_type, request.body def deserialize(self, meth, content_type, body): meth_deserializers = getattr(meth, 'wsgi_deserializers', {}) try: mtype = _MEDIA_TYPE_MAP.get(content_type, content_type) if mtype in meth_deserializers: deserializer = meth_deserializers[mtype] else: deserializer = self.default_deserializers[mtype] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) return deserializer().deserialize(body) def pre_process_extensions(self, extensions, request, action_args): # List of callables for post-processing extensions post = [] for ext in extensions: if inspect.isgeneratorfunction(ext): response = None # If it's a generator function, the part before the # yield is the preprocessing stage try: with ResourceExceptionHandler(): gen = ext(req=request, **action_args) response = next(gen) except Fault as ex: response = ex # We had a response... if response: return response, [] # No response, queue up generator for post-processing post.append(gen) else: # Regular functions only perform post-processing post.append(ext) # Run post-processing in the reverse order return None, reversed(post) def post_process_extensions(self, extensions, resp_obj, request, action_args): for ext in extensions: response = None if inspect.isgenerator(ext): # If it's a generator, run the second half of # processing try: with ResourceExceptionHandler(): response = ext.send(resp_obj) except StopIteration: # Normal exit of generator continue except Fault as ex: response = ex else: # Regular functions get post-processing... try: with ResourceExceptionHandler(): response = ext(req=request, resp_obj=resp_obj, **action_args) except exception.VersionNotFoundForAPIMethod: # If an attached extension (@wsgi.extends) for the # method has no version match its not an error. We # just don't run the extends code continue except Fault as ex: response = ex # We had a response... if response: return response return None @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" LOG.info("%(method)s %(url)s", {"method": request.method, "url": request.url}) if self.support_api_request_version: # Set the version of the API requested based on the header try: request.set_api_version_request(request.url) except exception.InvalidAPIVersionString as e: return Fault(webob.exc.HTTPBadRequest( explanation=str(e))) except exception.InvalidGlobalAPIVersion as e: return Fault(webob.exc.HTTPNotAcceptable( explanation=str(e))) # Identify the action, its arguments, and the requested # content type action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) # NOTE(sdague): we filter out InvalidContentTypes early so we # know everything is good from here on out. try: content_type, body = self.get_body(request) accept = request.best_match_content_type() except exception.InvalidContentType: msg = _("Unsupported Content-Type") return Fault(webob.exc.HTTPUnsupportedMediaType(explanation=msg)) # NOTE(Vek): Splitting the function up this way allows for # auditing by external tools that wrap the existing # function. If we try to audit __call__(), we can # run into troubles due to the @webob.dec.wsgify() # decorator. return self._process_stack(request, action, action_args, content_type, body, accept) def _process_stack(self, request, action, action_args, content_type, body, accept): """Implement the processing stack.""" # Get the implementing method try: meth, extensions = self.get_method(request, action, content_type, body) except (AttributeError, TypeError): return Fault(webob.exc.HTTPNotFound()) except KeyError as ex: msg = (_("There is no such action: %s. Verify the request body " "and Content-Type header and try again.") % ex.args[0]) return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) if body: decoded_body = encodeutils.safe_decode(body, errors='ignore') msg = ("Action: '%(action)s', calling method: %(meth)s, body: " "%(body)s") % {'action': action, 'body': decoded_body, 'meth': meth.__name__} LOG.debug(strutils.mask_password(msg)) else: LOG.debug("Calling method '%(meth)s'", {'meth': meth.__name__}) # Now, deserialize the request body... try: if content_type: contents = self.deserialize(meth, content_type, body) else: contents = {} except exception.InvalidContentType: msg = _("Unsupported Content-Type") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) except exception.MalformedRequestBody: msg = _("Malformed request body") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Update the action args action_args.update(contents) project_id = action_args.pop("project_id", None) context = request.environ.get('cinder.context') if (context and project_id and (project_id != context.project_id)): msg = _("Malformed request url") return Fault(webob.exc.HTTPBadRequest(explanation=msg)) # Run pre-processing extensions response, post = self.pre_process_extensions(extensions, request, action_args) if not response: try: with ResourceExceptionHandler(): action_result = self.dispatch(meth, request, action_args) except Fault as ex: response = ex if not response: # No exceptions; convert action_result into a # ResponseObject resp_obj = None if isinstance(action_result, dict) or action_result is None: resp_obj = ResponseObject(action_result) elif isinstance(action_result, ResponseObject): resp_obj = action_result else: response = action_result # Run post-processing extensions if resp_obj: _set_request_id_header(request, resp_obj) # Do a preserialize to set up the response object serializers = getattr(meth, 'wsgi_serializers', {}) resp_obj._bind_method_serializers(serializers) if hasattr(meth, 'wsgi_code'): resp_obj._default_code = meth.wsgi_code resp_obj.preserialize(accept, self.default_serializers) # Process post-processing extensions response = self.post_process_extensions(post, resp_obj, request, action_args) if resp_obj and not response: response = resp_obj.serialize(request, accept, self.default_serializers) try: msg_dict = dict(url=request.url, status=response.status_int) msg = "%(url)s returned with HTTP %(status)s" except AttributeError as e: msg_dict = dict(url=request.url, e=e) msg = "%(url)s returned a fault: %(e)s" LOG.info(msg, msg_dict) if hasattr(response, 'headers'): for hdr, val in response.headers.items(): # Headers must be utf-8 strings val = utils.convert_str(val) response.headers[hdr] = val if (request.api_version_request and not _is_legacy_endpoint(request)): response.headers[API_VERSION_REQUEST_HEADER] = ( VOLUME_SERVICE + ' ' + request.api_version_request.get_string()) response.headers['Vary'] = API_VERSION_REQUEST_HEADER return response def get_method(self, request, action, content_type, body): """Look up the action-specific method and its extensions.""" # Look up the method try: if not self.controller: meth = getattr(self, action) else: meth = getattr(self.controller, action) except AttributeError as e: with excutils.save_and_reraise_exception(e) as ctxt: if (not self.wsgi_actions or action not in ['action', 'create', 'delete', 'update']): LOG.exception('Get method error.') else: ctxt.reraise = False else: return meth, self.wsgi_extensions.get(action, []) if action == 'action': # OK, it's an action; figure out which action... mtype = _MEDIA_TYPE_MAP.get(content_type) action_name = self.action_peek[mtype](body) LOG.debug("Action body: %s", body) else: action_name = action # Look up the action method return (self.wsgi_actions[action_name], self.wsgi_action_extensions.get(action_name, [])) def dispatch(self, method, request, action_args): """Dispatch a call to the action-specific method.""" try: return method(req=request, **action_args) except exception.VersionNotFoundForAPIMethod: # We deliberately don't return any message information # about the exception to the user so it looks as if # the method is simply not implemented. return Fault(webob.exc.HTTPNotFound()) def action(name): """Mark a function as an action. The given name will be taken as the action key in the body. This is also overloaded to allow extensions to provide non-extending definitions of create and delete operations. """ def decorator(func): func.wsgi_action = name return func return decorator def extends(*args, **kwargs): """Indicate a function extends an operation. Can be used as either:: @extends def index(...): pass or as:: @extends(action='resize') def _action_resize(...): pass """ def decorator(func): # Store enough information to find what we're extending func.wsgi_extends = (func.__name__, kwargs.get('action')) return func # If we have positional arguments, call the decorator if args: return decorator(*args) # OK, return the decorator instead return decorator class ControllerMetaclass(type): """Controller metaclass. This metaclass automates the task of assembling a dictionary mapping action keys to method names. """ def __new__(mcs, name, bases, cls_dict): """Adds the wsgi_actions dictionary to the class.""" # Find all actions actions = {} extensions = [] # NOTE(geguileo): We'll keep a list of versioned methods that have been # added by the new metaclass (dictionary in attribute VER_METHOD_ATTR # on Controller class) and all the versioned methods from the different # base classes so we can consolidate them. versioned_methods = [] # NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute # between API controller class creations. This allows us # to use a class decorator on the API methods that doesn't # require naming explicitly what method is being versioned as # it can be implicit based on the method decorated. It is a bit # ugly. if bases != (object,) and VER_METHOD_ATTR in vars(Controller): # Get the versioned methods that this metaclass creation has added # to the Controller class versioned_methods.append(getattr(Controller, VER_METHOD_ATTR)) # Remove them so next metaclass has a clean start delattr(Controller, VER_METHOD_ATTR) # start with wsgi actions from base classes for base in bases: actions.update(getattr(base, 'wsgi_actions', {})) # Get the versioned methods that this base has if VER_METHOD_ATTR in vars(base): versioned_methods.append(getattr(base, VER_METHOD_ATTR)) for key, value in cls_dict.items(): if not isinstance(value, abc.Callable): continue if getattr(value, 'wsgi_action', None): actions[value.wsgi_action] = key elif getattr(value, 'wsgi_extends', None): extensions.append(value.wsgi_extends) # Add the actions and extensions to the class dict cls_dict['wsgi_actions'] = actions cls_dict['wsgi_extensions'] = extensions if versioned_methods: cls_dict[VER_METHOD_ATTR] = mcs.consolidate_vers(versioned_methods) return super(ControllerMetaclass, mcs).__new__(mcs, name, bases, cls_dict) @staticmethod def consolidate_vers(versioned_methods): """Consolidates a list of versioned methods dictionaries.""" if not versioned_methods: return {} result = versioned_methods.pop(0) for base_methods in versioned_methods: for name, methods in base_methods.items(): method_list = result.setdefault(name, []) method_list.extend(methods) method_list.sort(reverse=True) return result class Controller(object, metaclass=ControllerMetaclass): """Default controller.""" _view_builder_class = None def __init__(self, view_builder=None): """Initialize controller with a view builder instance.""" if view_builder: self._view_builder = view_builder elif self._view_builder_class: self._view_builder = self._view_builder_class() else: self._view_builder = None def __getattribute__(self, key): def version_select(*args, **kwargs): """Select and call the matching version of the specified method. Look for the method which matches the name supplied and version constraints and calls it with the supplied arguments. :returns: Returns the result of the method called :raises VersionNotFoundForAPIMethod: if there is no method which matches the name and version constraints """ # The first arg to all versioned methods is always the request # object. The version for the request is attached to the # request object if len(args) == 0: version_request = kwargs['req'].api_version_request else: version_request = args[0].api_version_request func_list = self.versioned_methods[key] for func in func_list: if version_request.matches_versioned_method(func): # Update the version_select wrapper function so # other decorator attributes like wsgi.response # are still respected. functools.update_wrapper(version_select, func.func) return func.func(self, *args, **kwargs) # No version match raise exception.VersionNotFoundForAPIMethod( version=version_request) try: version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR) except AttributeError: # No versioning on this class return object.__getattribute__(self, key) if (version_meth_dict and key in object.__getattribute__(self, VER_METHOD_ATTR)): return version_select return object.__getattribute__(self, key) # NOTE(cyeoh): This decorator MUST appear first (the outermost # decorator) on an API method for it to work correctly @classmethod def api_version(cls, min_ver, max_ver=None, experimental=False): """Decorator for versioning API methods. Add the decorator to any method which takes a request object as the first parameter and belongs to a class which inherits from wsgi.Controller. :param min_ver: string representing minimum version :param max_ver: optional string representing maximum version """ def decorator(f): obj_min_ver = api_version.APIVersionRequest(min_ver) if max_ver: obj_max_ver = api_version.APIVersionRequest(max_ver) else: obj_max_ver = api_version.APIVersionRequest() # Add to list of versioned methods registered func_name = f.__name__ new_func = versioned_method.VersionedMethod( func_name, obj_min_ver, obj_max_ver, experimental, f) func_dict = getattr(cls, VER_METHOD_ATTR, {}) if not func_dict: setattr(cls, VER_METHOD_ATTR, func_dict) func_list = func_dict.get(func_name, []) if not func_list: func_dict[func_name] = func_list func_list.append(new_func) # Ensure the list is sorted by minimum version (reversed) # so later when we work through the list in order we find # the method which has the latest version which supports # the version requested. # TODO(cyeoh): Add check to ensure that there are no overlapping # ranges of valid versions as that is ambiguous func_list.sort(reverse=True) # NOTE(geguileo): To avoid PEP8 errors when defining multiple # microversions of the same method in the same class we add the # api_version decorator to the function so it can be used instead, # thus preventing method redefinition errors. f.api_version = cls.api_version return f return decorator @staticmethod def assert_valid_body(body, entity_name): fail_msg = _( "Missing required element '%s' in request body.") % entity_name if not (body and entity_name in body): raise webob.exc.HTTPBadRequest(explanation=fail_msg) def is_dict(d): try: d.get(None) return True except AttributeError: return False if not is_dict(body[entity_name]): raise webob.exc.HTTPBadRequest(explanation=fail_msg) @staticmethod def validate_name_and_description(body, check_length=True): for attribute in ['name', 'description', 'display_name', 'display_description']: value = body.get(attribute) if value is not None: if isinstance(value, str): body[attribute] = value.strip() if check_length: try: utils.check_string_length(body[attribute], attribute, min_length=0, max_length=255) except exception.InvalidInput as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) @staticmethod def validate_string_length(value, entity_name, min_length=0, max_length=None, remove_whitespaces=False): """Check the length of specified string. :param value: the value of the string :param entity_name: the name of the string :param min_length: the min_length of the string :param max_length: the max_length of the string :param remove_whitespaces: True if trimming whitespaces is needed else False """ if isinstance(value, str) and remove_whitespaces: value = value.strip() try: utils.check_string_length(value, entity_name, min_length=min_length, max_length=max_length) except exception.InvalidInput as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) class Fault(webob.exc.HTTPException): """Wrap webob.exc.HTTPException to provide API friendly response.""" _fault_names = {HTTPStatus.BAD_REQUEST: "badRequest", HTTPStatus.UNAUTHORIZED: "unauthorized", HTTPStatus.FORBIDDEN: "forbidden", HTTPStatus.NOT_FOUND: "itemNotFound", HTTPStatus.METHOD_NOT_ALLOWED: "badMethod", HTTPStatus.CONFLICT: "conflictingRequest", HTTPStatus.REQUEST_ENTITY_TOO_LARGE: "overLimit", HTTPStatus.UNSUPPORTED_MEDIA_TYPE: "badMediaType", HTTPStatus.NOT_IMPLEMENTED: "notImplemented", HTTPStatus.SERVICE_UNAVAILABLE: "serviceUnavailable"} def __init__(self, exception): """Create a Fault for the given webob.exc.exception.""" self.wrapped_exc = exception self.status_int = exception.status_int @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. locale = req.best_match_language() code = self.wrapped_exc.status_int fault_name = self._fault_names.get(code, "computeFault") explanation = self.wrapped_exc.explanation fault_data = { fault_name: { 'code': code, 'message': i18n.translate(explanation, locale)}} if code == HTTPStatus.REQUEST_ENTITY_TOO_LARGE: retry = self.wrapped_exc.headers.get('Retry-After', None) if retry: fault_data[fault_name]['retryAfter'] = retry if req.api_version_request and not _is_legacy_endpoint(req): self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = ( VOLUME_SERVICE + ' ' + req.api_version_request.get_string()) self.wrapped_exc.headers['Vary'] = API_VERSION_REQUEST_HEADER content_type = req.best_match_content_type() serializer = { 'application/json': JSONDictSerializer(), }[content_type] body = serializer.serialize(fault_data) if isinstance(body, str): body = body.encode('utf-8') self.wrapped_exc.body = body self.wrapped_exc.content_type = content_type _set_request_id_header(req, self.wrapped_exc.headers) return self.wrapped_exc def __str__(self): return self.wrapped_exc.__str__() def _set_request_id_header(req, headers): context = req.environ.get('cinder.context') if context: headers['x-compute-request-id'] = context.request_id def _is_legacy_endpoint(request): version_str = request.api_version_request.get_string() return '1.0' in version_str or '2.0' in version_str class OverLimitFault(webob.exc.HTTPException): """Rate-limited request response.""" def __init__(self, message, details, retry_time): """Initialize new `OverLimitFault` with relevant information.""" hdrs = OverLimitFault._retry_after(retry_time) self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge(headers=hdrs) self.content = { "overLimitFault": { "code": self.wrapped_exc.status_int, "message": message, "details": details, }, } @staticmethod def _retry_after(retry_time): delay = int(math.ceil(retry_time - time.time())) retry_after = delay if delay > 0 else 0 headers = {'Retry-After': '%d' % retry_after} return headers @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """Serializes the wrapped exception conforming to our error format.""" content_type = request.best_match_content_type() def translate(msg): locale = request.best_match_language() return i18n.translate(msg, locale) self.content['overLimitFault']['message'] = \ translate(self.content['overLimitFault']['message']) self.content['overLimitFault']['details'] = \ translate(self.content['overLimitFault']['details']) serializer = { 'application/json': JSONDictSerializer(), }[content_type] content = serializer.serialize(self.content) self.wrapped_exc.body = content return self.wrapped_exc ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.047118 cinder-27.0.0/cinder/api/schemas/0000775000175000017500000000000000000000000016527 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/__init__.py0000664000175000017500000000000000000000000020626 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/admin_actions.py0000664000175000017500000001063200000000000021713 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 admin_actions API. """ import copy from cinder.api.validation import parameter_types reset = { 'type': 'object', 'properties': { 'os-reset_status': { 'type': 'object', 'format': 'validate_volume_reset_body', 'properties': { 'status': {'type': ['string', 'null'], 'format': 'volume_status'}, 'attach_status': {'type': ['string', 'null'], 'format': 'volume_attach_status'}, 'migration_status': {'type': ['string', 'null'], 'format': 'volume_migration_status'}, }, 'additionalProperties': False, }, }, 'required': ['os-reset_status'], 'additionalProperties': False, } force_detach = { 'type': 'object', 'properties': { 'os-force_detach': { 'type': 'object', 'properties': { 'connector': {'type': ['string', 'object', 'null']}, 'attachment_id': {'type': ['string', 'null']} }, 'additionalProperties': False, }, }, 'required': ['os-force_detach'], 'additionalProperties': False, } migrate_volume = { 'type': 'object', 'properties': { 'os-migrate_volume': { 'type': 'object', 'properties': { 'host': {'type': 'string', 'maxLength': 255}, 'force_host_copy': parameter_types.boolean, 'lock_volume': parameter_types.boolean, }, 'required': ['host'], 'additionalProperties': False, }, }, 'required': ['os-migrate_volume'], 'additionalProperties': False, } migrate_volume_v316 = { 'type': 'object', 'properties': { 'os-migrate_volume': { 'type': 'object', 'properties': { 'host': {'type': ['string', 'null'], 'maxLength': 255}, 'force_host_copy': parameter_types.boolean, 'lock_volume': parameter_types.boolean, 'cluster': parameter_types.name_allow_zero_min_length, }, 'additionalProperties': False, }, }, 'required': ['os-migrate_volume'], 'additionalProperties': False, } migrate_volume_completion = { 'type': 'object', 'properties': { 'os-migrate_volume_completion': { 'type': 'object', 'properties': { 'new_volume': parameter_types.uuid, 'error': {'type': ['string', 'null', 'boolean']}, }, 'required': ['new_volume'], 'additionalProperties': False, }, }, 'required': ['os-migrate_volume_completion'], 'additionalProperties': False, } extend_volume_completion = { 'type': 'object', 'properties': { 'os-extend_volume_completion': { 'type': 'object', 'properties': { 'error': {'type': ['string', 'null', 'boolean']}, }, 'additionalProperties': False, }, }, 'required': ['os-extend_volume_completion'], 'additionalProperties': False, } reset_status_backup = { 'type': 'object', 'properties': { 'os-reset_status': { 'type': 'object', 'properties': { 'status': {'type': 'string', 'format': 'backup_status'}, }, 'required': ['status'], 'additionalProperties': False, }, }, 'required': ['os-reset_status'], 'additionalProperties': False, } reset_status_snapshot = copy.deepcopy(reset_status_backup) reset_status_snapshot['properties']['os-reset_status'][ 'properties']['status']['format'] = 'snapshot_status' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/attachments.py0000664000175000017500000000342300000000000021416 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Attachments API. """ import copy from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'type': 'object', 'attachment': { 'type': 'object', 'properties': { 'instance_uuid': parameter_types.uuid, 'connector': {'type': ['object', 'null']}, 'volume_uuid': parameter_types.uuid, }, 'required': ['volume_uuid'], 'additionalProperties': False, }, }, 'required': ['attachment'], 'additionalProperties': False, } update = { 'type': 'object', 'properties': { 'type': 'object', 'attachment': { 'type': 'object', 'properties': { 'connector': {'type': 'object', 'minProperties': 1}, }, 'required': ['connector'], 'additionalProperties': False, }, }, 'required': ['attachment'], 'additionalProperties': False, } create_v354 = copy.deepcopy(create) create_v354['properties']['attachment']['properties']['mode'] = ( {'type': 'string', 'enum': ['rw', 'ro']}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/backups.py0000664000175000017500000000640300000000000020534 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Backups API. """ import copy from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'type': 'object', 'backup': { 'type': 'object', 'properties': { 'volume_id': parameter_types.uuid, 'container': parameter_types.container, 'description': parameter_types.description, 'incremental': parameter_types.boolean, 'force': parameter_types.boolean, 'name': parameter_types.name_allow_zero_min_length, 'snapshot_id': parameter_types.uuid_allow_null, }, 'required': ['volume_id'], 'additionalProperties': False, }, }, 'required': ['backup'], 'additionalProperties': False, } create_backup_v343 = copy.deepcopy(create) create_backup_v343['properties']['backup']['properties'][ 'metadata'] = parameter_types.metadata_allows_null create_backup_v351 = copy.deepcopy(create_backup_v343) create_backup_v351['properties']['backup']['properties'][ 'availability_zone'] = parameter_types.nullable_string update = { 'type': 'object', 'properties': { 'type': 'object', 'backup': { 'type': ['object', 'null'], 'properties': { 'name': parameter_types.name_allow_zero_min_length, 'description': parameter_types.description, }, 'additionalProperties': False, }, }, 'required': ['backup'], 'additionalProperties': False, } update_backup_v343 = copy.deepcopy(update) update_backup_v343['properties']['backup']['properties'][ 'metadata'] = parameter_types.extra_specs restore = { 'type': 'object', 'properties': { 'type': 'object', 'restore': { 'type': ['object', 'null'], 'properties': { 'name': parameter_types.name_allow_zero_min_length, 'volume_id': parameter_types.uuid_allow_null }, 'additionalProperties': False, }, }, 'required': ['restore'], 'additionalProperties': False, } import_record = { 'type': 'object', 'properties': { 'type': 'object', 'backup-record': { 'type': 'object', 'properties': { 'backup_service': parameter_types.backup_service, 'backup_url': parameter_types.backup_url }, 'required': ['backup_service', 'backup_url'], 'additionalProperties': False, }, }, 'required': ['backup-record'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/clusters.py0000664000175000017500000000236400000000000020752 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Clusters API. """ from cinder.api.validation import parameter_types disable_cluster = { 'type': 'object', 'properties': { 'name': parameter_types.name, 'binary': parameter_types.nullable_string, 'disabled_reason': { 'type': ['string', 'null'], 'format': 'disabled_reason' } }, 'required': ['name'], 'additionalProperties': False, } enable_cluster = { 'type': 'object', 'properties': { 'name': parameter_types.name, 'binary': parameter_types.nullable_string }, 'required': ['name'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/default_types.py0000664000175000017500000000202000000000000021743 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Default types API. """ from cinder.api.validation import parameter_types create_or_update = { 'type': 'object', 'properties': { 'default_type': { 'type': 'object', 'properties': { 'volume_type': parameter_types.name, }, 'required': ['volume_type'], 'additionalProperties': False, }, } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/group_snapshots.py0000664000175000017500000000321000000000000022333 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Group Snapshots API. """ from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'type': 'object', 'group_snapshot': { 'type': 'object', 'properties': { 'group_id': parameter_types.uuid, 'name': parameter_types.name_allow_zero_min_length, 'description': parameter_types.description, }, 'required': ['group_id'], 'additionalProperties': False, }, }, 'required': ['group_snapshot'], 'additionalProperties': False, } reset_status = { 'type': 'object', 'properties': { 'type': 'object', 'reset_status': { 'type': 'object', 'properties': { 'status': parameter_types.group_snapshot_status, }, 'required': ['status'], 'additionalProperties': False, }, }, 'required': ['reset_status'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/group_specs.py0000664000175000017500000000234500000000000021436 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy group_specs_with_no_spaces_key_and_value_null = { 'type': 'object', 'patternProperties': { '^[a-zA-Z0-9-_:.]{1,255}$': { 'type': ['string', 'null'], 'maxLength': 255 } }, 'additionalProperties': False } create = { 'type': 'object', 'properties': { 'type': 'object', 'group_specs': group_specs_with_no_spaces_key_and_value_null, }, 'required': ['group_specs'], 'additionalProperties': False, } update = copy.deepcopy(group_specs_with_no_spaces_key_and_value_null) update.update({ 'minProperties': 1, 'maxProperties': 1 }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/group_types.py0000664000175000017500000000336300000000000021466 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Group types API. """ from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'type': 'object', 'group_type': { 'type': 'object', 'properties': { 'name': parameter_types.name, 'description': parameter_types.description, 'is_public': parameter_types.boolean, 'group_specs': parameter_types.metadata_allows_null, }, 'required': ['name'], 'additionalProperties': False, }, }, 'required': ['group_type'], 'additionalProperties': False, } update = { 'type': 'object', 'properties': { 'type': 'object', 'group_type': { 'type': 'object', 'properties': { 'name': parameter_types.name_allow_zero_min_length, 'description': parameter_types.description, 'is_public': parameter_types.boolean, }, 'additionalProperties': False, }, }, 'required': ['group_type'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/groups.py0000664000175000017500000001153400000000000020424 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Generic Volume Groups API. """ from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'group': { 'type': 'object', 'properties': { 'description': parameter_types.description, 'group_type': { 'type': 'string', 'format': 'group_type' }, 'name': parameter_types.name_allow_zero_min_length, 'volume_types': { 'type': 'array', 'minItems': 1, 'items': { 'type': 'string', 'maxLength': 255, }, 'uniqueItems': True }, 'availability_zone': { 'type': ['string', 'null'], 'format': 'availability_zone' }, }, 'required': ['group_type', 'volume_types'], 'additionalProperties': False, }, }, 'required': ['group'], 'additionalProperties': False, } create_from_source = { 'type': 'object', 'properties': { 'create-from-src': { 'type': 'object', 'properties': { 'description': parameter_types.description, 'name': parameter_types.name_allow_zero_min_length, 'source_group_id': parameter_types.uuid, 'group_snapshot_id': parameter_types.uuid, }, 'oneOf': [ {'required': ['group_snapshot_id']}, {'required': ['source_group_id']} ], 'additionalProperties': False, }, }, 'required': ['create-from-src'], 'additionalProperties': False, } delete = { 'type': 'object', 'properties': { 'delete': { 'type': 'object', 'properties': { 'delete-volumes': parameter_types.boolean, }, 'additionalProperties': False, }, }, 'required': ['delete'], 'additionalProperties': False, } reset_status = { 'type': 'object', 'properties': { 'reset_status': { 'type': 'object', 'properties': { 'status': { 'type': 'string', 'format': 'group_status' }, }, 'required': ['status'], 'additionalProperties': False, }, }, 'required': ['reset_status'], 'additionalProperties': False, } update = { 'type': 'object', 'properties': { 'group': { 'type': 'object', 'properties': { 'description': parameter_types.description, 'name': parameter_types.name_allow_zero_min_length, 'add_volumes': parameter_types.description, 'remove_volumes': parameter_types.description, }, 'anyOf': [ {'required': ['name']}, {'required': ['description']}, {'required': ['add_volumes']}, {'required': ['remove_volumes']}, ], 'additionalProperties': False, }, }, 'required': ['group'], 'additionalProperties': False, } failover_replication = { 'type': 'object', 'properties': { 'failover_replication': { 'type': 'object', 'properties': { 'allow_attached_volume': parameter_types.boolean, 'secondary_backend_id': parameter_types.nullable_string, }, 'additionalProperties': False, }, }, 'required': ['failover_replication'], 'additionalProperties': False, } list_replication = { 'type': 'object', 'properties': { 'list_replication_targets': {'type': 'object'} }, 'required': ['list_replication_targets'], 'additionalProperties': False, } enable_replication = { 'type': 'object', 'properties': { 'enable_replication': {'type': 'object'} }, 'required': ['enable_replication'], 'additionalProperties': False, } disable_replication = { 'type': 'object', 'properties': { 'disable_replication': {'type': 'object'} }, 'required': ['disable_replication'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/qos_specs.py0000664000175000017500000000320200000000000021075 0ustar00zuulzuul00000000000000# Copyright 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'type': 'object', 'qos_specs': { 'type': 'object', 'properties': { 'name': { 'type': 'string', 'format': 'name_skip_leading_trailing_spaces' }, }, 'required': ['name'], 'additionalProperties': True, }, }, 'required': ['qos_specs'], 'additionalProperties': False, } set = { 'type': 'object', 'properties': { 'qos_specs': parameter_types.extra_specs_with_null }, 'required': ['qos_specs'], 'additionalProperties': False, } unset = { 'type': 'object', 'properties': { 'keys': { 'type': 'array', 'items': { 'type': 'string', 'minLength': 1, 'maxLength': 255 }, 'uniqueItems': True }, }, 'required': ['keys'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/quota_classes.py0000664000175000017500000000167200000000000021755 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Quota classes API. """ from cinder.api.validation import parameter_types update_quota_class = { 'type': 'object', 'properties': { 'type': 'object', 'quota_class_set': parameter_types.quota_class_set }, 'required': ['quota_class_set'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/quotas.py0000664000175000017500000000160700000000000020421 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Quotas API. """ update_quota = { 'type': 'object', 'properties': { 'quota_set': {'type': 'object', 'minProperties': 1, 'format': 'quota_set'} }, 'required': ['quota_set'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/scheduler_hints.py0000664000175000017500000000601700000000000022270 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 scheduler_hints API. """ from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'OS-SCH-HNT:scheduler_hints': { 'type': ['object', 'null'], 'properties': { 'local_to_instance': parameter_types.optional_uuid, 'different_host': { # NOTE: The value of 'different_host' is the set of volume # uuids where a new volume is scheduled on a different # host. A user can specify one volume as string parameter # and should specify multiple volumes as array parameter # instead. 'oneOf': [ { 'type': 'string', 'format': 'uuid' }, { 'type': 'array', 'items': parameter_types.uuid, 'uniqueItems': True, } ] }, 'same_host': { # NOTE: The value of 'same_host' is the set of volume # uuids where a new volume is scheduled on the same host. # A user can specify one volume as string parameter and # should specify multiple volumes as array parameter # instead. 'oneOf': [ { 'type': 'string', 'format': 'uuid' }, { 'type': 'array', 'items': parameter_types.uuid, 'uniqueItems': True, } ] }, 'query': { # NOTE: The value of 'query' is converted to dict data with # jsonutils.loads() and used for filtering hosts. 'type': ['string', 'object'], }, }, # NOTE: As this Mail: # http://lists.openstack.org/pipermail/openstack-dev/2015-June/067996.html # pointed out the limit the scheduler-hints in the API is # problematic. So relax it. 'additionalProperties': True }, }, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/services.py0000664000175000017500000000450100000000000020724 0ustar00zuulzuul00000000000000# Copyright 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from cinder.api.validation import parameter_types enable_and_disable = { 'type': 'object', 'properties': { 'binary': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'host': parameter_types.cinder_host, 'cluster': parameter_types.nullable_string, 'service': {'type': 'string', 'minLength': 1, 'maxLength': 255}, }, 'anyOf': [ {'required': ['binary']}, {'required': ['service']} ], 'additionalProperties': False, } disable_log_reason = copy.deepcopy(enable_and_disable) disable_log_reason['properties'][ 'disabled_reason'] = {'type': 'string', 'minLength': 1, 'maxLength': 255, 'format': 'disabled_reason'} set_log = { 'type': 'object', 'properties': { 'binary': parameter_types.binary, 'server': parameter_types.nullable_string, 'prefix': parameter_types.nullable_string, 'level': {'type': ['string', 'null'], 'format': 'level'} }, 'additionalProperties': False, } get_log = { 'type': 'object', 'properties': { 'binary': parameter_types.binary, 'server': parameter_types.nullable_string, 'prefix': parameter_types.nullable_string, }, 'additionalProperties': False, } freeze_and_thaw = { 'type': 'object', 'properties': { 'cluster': parameter_types.nullable_string, 'host': parameter_types.cinder_host, }, 'additionalProperties': False, } failover_host = { 'type': 'object', 'properties': { 'host': parameter_types.cinder_host, 'backend_id': parameter_types.nullable_string, 'cluster': parameter_types.nullable_string, }, 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/snapshot_actions.py0000664000175000017500000000215600000000000022464 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 snapshot actions API. """ update_snapshot_status = { 'type': 'object', 'properties': { 'os-update_snapshot_status': { 'type': 'object', 'properties': { 'status': {'type': 'string'}, 'progress': {'format': 'progress'}, }, 'required': ['status'], 'additionalProperties': False, }, }, 'required': ['os-update_snapshot_status'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/snapshot_manage.py0000664000175000017500000000254300000000000022254 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 snapshot_manage API. """ from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'type': 'object', 'snapshot': { 'type': 'object', 'properties': { "description": parameter_types.description, "metadata": parameter_types.metadata_allows_null, "name": parameter_types.name_allow_zero_min_length, "volume_id": parameter_types.uuid, "ref": {'type': ['object', 'null', 'string']}, }, 'required': ['ref', 'volume_id'], 'additionalProperties': False, }, }, 'required': ['snapshot'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/snapshots.py0000664000175000017500000000430100000000000021121 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Snapshots API. """ from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'type': 'object', 'snapshot': { 'type': 'object', 'properties': { 'name': parameter_types.name_allow_zero_min_length, 'display_name': parameter_types.name_allow_zero_min_length, 'description': parameter_types.description, 'volume_id': parameter_types.uuid_allow_null, 'force': parameter_types.boolean, 'metadata': parameter_types.metadata_allows_null, }, 'required': ['volume_id'], 'additionalProperties': False, }, }, 'required': ['snapshot'], 'additionalProperties': False, } update = { 'type': 'object', 'properties': { 'type': 'object', 'snapshot': { 'type': 'object', 'properties': { 'name': parameter_types.name_allow_zero_min_length, 'description': parameter_types.description, 'display_name': parameter_types.name_allow_zero_min_length, 'display_description': parameter_types.description, }, 'additionalProperties': False, 'anyOf': [ {'required': ['name']}, {'required': ['description']}, {'required': ['display_name']}, {'required': ['display_description']} ] }, }, 'required': ['snapshot'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/types_extra_specs.py0000664000175000017500000000206600000000000022651 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 types_extra_specs API. """ import copy from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'extra_specs': parameter_types.extra_specs_with_no_spaces_key }, 'required': ['extra_specs'], 'additionalProperties': False, } update = copy.deepcopy(parameter_types.extra_specs_with_no_spaces_key) update.update({ 'minProperties': 1, 'maxProperties': 1 }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/volume_actions.py0000664000175000017500000001373000000000000022134 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 volume_actions API. """ import copy from cinder.api.validation import parameter_types container_format = parameter_types.description extend = { 'type': 'object', 'properties': { 'os-extend': { 'type': 'object', 'properties': { 'new_size': parameter_types.volume_size, }, 'required': ['new_size'], 'additionalProperties': False, }, }, 'required': ['os-extend'], 'additionalProperties': False, } attach = { 'type': 'object', 'properties': { 'os-attach': { 'type': 'object', 'properties': { 'instance_uuid': parameter_types.uuid, 'mountpoint': { 'type': 'string', 'minLength': 1, 'maxLength': 255 }, 'host_name': {'type': 'string', 'maxLength': 255}, 'mode': {'type': 'string', 'enum': ['rw', 'ro']} }, 'required': ['mountpoint'], 'anyOf': [{'required': ['instance_uuid']}, {'required': ['host_name']}], 'additionalProperties': False, }, }, 'required': ['os-attach'], 'additionalProperties': False, } detach = { 'type': 'object', 'properties': { 'os-detach': { 'type': ['object', 'null'], 'properties': { # NOTE(mriedem): This allows null for backward compatibility. 'attachment_id': parameter_types.uuid_allow_null, }, 'additionalProperties': False, }, }, 'required': ['os-detach'], 'additionalProperties': False, } retype = { 'type': 'object', 'properties': { 'os-retype': { 'type': 'object', 'properties': { 'new_type': {'type': 'string'}, 'migration_policy': { 'type': ['string', 'null'], 'enum': ['on-demand', 'never']}, }, 'required': ['new_type'], 'additionalProperties': False, }, }, 'required': ['os-retype'], 'additionalProperties': False, } set_bootable = { 'type': 'object', 'properties': { 'os-set_bootable': { 'type': 'object', 'properties': { 'bootable': parameter_types.boolean }, 'required': ['bootable'], 'additionalProperties': False, }, }, 'required': ['os-set_bootable'], 'additionalProperties': False, } volume_upload_image = { 'type': 'object', 'properties': { 'os-volume_upload_image': { 'type': 'object', 'properties': { 'image_name': { 'type': 'string', 'minLength': 1, 'maxLength': 255 }, 'force': parameter_types.boolean, 'disk_format': { 'type': 'string', 'enum': ['raw', 'vmdk', 'vdi', 'qcow2', 'vhd', 'vhdx', 'ploop'] }, 'container_format': container_format }, 'required': ['image_name'], 'additionalProperties': False, }, }, 'required': ['os-volume_upload_image'], 'additionalProperties': False, } volume_upload_image_v31 = copy.deepcopy(volume_upload_image) volume_upload_image_v31['properties']['os-volume_upload_image']['properties'][ 'visibility'] = {'type': 'string', 'enum': ['community', 'public', 'private', 'shared']} volume_upload_image_v31['properties']['os-volume_upload_image']['properties'][ 'protected'] = parameter_types.boolean initialize_connection = { 'type': 'object', 'properties': { 'os-initialize_connection': { 'type': 'object', 'properties': { 'connector': {'type': ['object', 'string']}, }, 'required': ['connector'], 'additionalProperties': False, }, }, 'required': ['os-initialize_connection'], 'additionalProperties': False, } terminate_connection = { 'type': 'object', 'properties': { 'os-terminate_connection': { 'type': 'object', 'properties': { 'connector': {'type': ['string', 'object', 'null']}, }, 'required': ['connector'], 'additionalProperties': False, }, }, 'required': ['os-terminate_connection'], 'additionalProperties': False, } volume_readonly_update = { 'type': 'object', 'properties': { 'os-update_readonly_flag': { 'type': 'object', 'properties': { 'readonly': parameter_types.boolean }, 'required': ['readonly'], 'additionalProperties': False, }, }, 'required': ['os-update_readonly_flag'], 'additionalProperties': False, } reimage = { 'type': 'object', 'properties': { 'os-reimage': { 'type': 'object', 'properties': { 'image_id': parameter_types.uuid, 'reimage_reserved': parameter_types.boolean, }, 'required': ['image_id'], 'additionalProperties': False, }, }, 'required': ['os-reimage'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/volume_image_metadata.py0000664000175000017500000000312700000000000023415 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 volume image metadata API. """ from cinder.api.validation import parameter_types set_image_metadata = { 'type': 'object', 'properties': { 'os-set_image_metadata': { 'type': 'object', 'properties': { 'metadata': parameter_types.image_metadata, }, 'required': ['metadata'], 'additionalProperties': False, }, }, 'required': ['os-set_image_metadata'], 'additionalProperties': False, } unset_image_metadata = { 'type': 'object', 'properties': { 'os-unset_image_metadata': { 'type': 'object', 'properties': { 'key': {'type': 'string', 'minLength': 1, 'maxLength': 255}, }, 'required': ['key'], 'additionalProperties': False, }, }, 'required': ['os-unset_image_metadata'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/volume_manage.py0000664000175000017500000000342400000000000021723 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 volume manage API. """ import copy from cinder.api.validation import parameter_types volume_manage_create = { 'type': 'object', 'properties': { 'volume': { 'type': 'object', 'properties': { "description": parameter_types.description, "availability_zone": parameter_types. name_allow_zero_min_length, "bootable": parameter_types.boolean, "volume_type": parameter_types.name_allow_zero_min_length, "name": parameter_types.name_allow_zero_min_length, "host": parameter_types.cinder_host, "ref": {'type': ['object', 'string']}, "metadata": parameter_types.metadata_allows_null, }, 'required': ['ref'], 'additionalProperties': False, }, }, 'required': ['volume'], 'additionalProperties': False, } volume_manage_create_v316 = copy.deepcopy(volume_manage_create) volume_manage_create_v316['properties']['volume']['properties'][ 'cluster'] = {'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/volume_metadata.py0000664000175000017500000000243000000000000022247 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Volume metadata API. """ import copy from cinder.api.validation import parameter_types metadata_restricted_properties = copy.deepcopy(parameter_types.extra_specs) metadata_restricted_properties.update({ 'minProperties': 1, 'maxProperties': 1 }) create = { 'type': 'object', 'properties': { 'type': 'object', 'metadata': parameter_types.extra_specs, }, 'required': ['metadata'], 'additionalProperties': False, } update = { 'type': 'object', 'properties': { 'type': 'object', 'meta': metadata_restricted_properties, }, 'required': ['meta'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/volume_transfer.py0000664000175000017500000000446600000000000022326 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 volume transfer API. """ from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'transfer': { 'type': 'object', 'properties': { 'volume_id': parameter_types.uuid, 'name': {'oneOf': [{'type': 'string', 'format': "name_skip_leading_trailing_spaces"}, {'type': 'null'}]}, }, 'required': ['volume_id'], 'additionalProperties': False, }, }, 'required': ['transfer'], 'additionalProperties': False, } accept = { 'type': 'object', 'properties': { 'accept': { 'type': 'object', 'properties': { 'auth_key': {'type': ['string', 'integer']}, }, 'required': ['auth_key'], 'additionalProperties': False, }, }, 'required': ['accept'], 'additionalProperties': False, } create_v355 = { 'type': 'object', 'properties': { 'transfer': { 'type': 'object', 'properties': { 'volume_id': parameter_types.uuid, 'name': {'oneOf': [{'type': 'string', 'format': "name_skip_leading_trailing_spaces"}, {'type': 'null'}]}, 'no_snapshots': parameter_types.boolean }, 'required': ['volume_id'], 'additionalProperties': False, }, }, 'required': ['transfer'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/volume_type_access.py0000664000175000017500000000314700000000000022777 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 volume type access API. """ add_project_access = { 'type': 'object', 'properties': { 'type': 'object', 'addProjectAccess': { 'type': 'object', 'properties': { 'project': {'type': 'string', 'minLength': 1, 'maxLength': 255}, }, 'required': ['project'], 'additionalProperties': False, }, }, 'required': ['addProjectAccess'], 'additionalProperties': False, } remove_project_access = { 'type': 'object', 'properties': { 'type': 'object', 'removeProjectAccess': { 'type': 'object', 'properties': { 'project': {'type': 'string', 'minLength': 1, 'maxLength': 255}, }, 'required': ['project'], 'additionalProperties': False, }, }, 'required': ['removeProjectAccess'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/volume_type_encryption.py0000664000175000017500000000275600000000000023735 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Schema for V3 volume type encryption API.""" import copy from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'encryption': { 'type': 'object', 'properties': { 'key_size': parameter_types.key_size, 'provider': {'type': 'string', 'minLength': 0, 'maxLength': 255}, 'control_location': {'enum': ['front-end', 'back-end']}, 'cipher': {'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255}, }, 'required': ['provider', 'control_location'], 'additionalProperties': True, }, }, 'required': ['encryption'], 'additionalProperties': False, } update = copy.deepcopy(create) update['properties']['encryption']['required'] = [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/volume_types.py0000664000175000017500000000332200000000000021634 0ustar00zuulzuul00000000000000# Copyright 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'type': 'object', 'volume_type': { 'type': 'object', 'properties': { 'name': parameter_types.name, 'description': parameter_types.description, 'extra_specs': parameter_types.extra_specs_with_null, 'os-volume-type-access:is_public': parameter_types.boolean, }, 'required': ['name'], 'additionalProperties': False, }, }, 'required': ['volume_type'], 'additionalProperties': False, } update = { 'type': 'object', 'properties': { 'type': 'object', 'volume_type': { 'type': 'object', 'properties': { 'name': parameter_types.update_name, 'description': parameter_types.description, 'is_public': parameter_types.boolean, }, 'additionalProperties': False, }, }, 'required': ['volume_type'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/volumes.py0000664000175000017500000001147500000000000020603 0ustar00zuulzuul00000000000000# Copyright (C) 2018 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Volumes API. """ import copy from cinder.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'volume': { 'type': 'object', 'properties': { 'name': {'type': ['string', 'null'], 'format': 'name_non_mandatory_remove_white_spaces'}, 'description': { 'type': ['string', 'null'], 'format': 'description_non_mandatory_remove_white_spaces'}, 'display_name': { 'type': ['string', 'null'], 'format': 'name_non_mandatory_remove_white_spaces'}, 'display_description': { 'type': ['string', 'null'], 'format': 'description_non_mandatory_remove_white_spaces'}, # volume_type accepts 'id' as well as 'name' so do lazy schema # validation for it. 'volume_type': parameter_types.name_allow_zero_min_length, 'metadata': parameter_types.metadata_allows_null, 'snapshot_id': parameter_types.optional_uuid, 'source_volid': parameter_types.optional_uuid, 'consistencygroup_id': parameter_types.optional_uuid, 'size': parameter_types.volume_size_allows_null, 'availability_zone': parameter_types.availability_zone, # The functionality to create a multiattach volume by the # multiattach parameter is removed. # We accept the parameter but raise a BadRequest stating the # "new way" of creating multiattach volumes i.e. with a # multiattach volume type so users using the "old way" # have ease of moving into the new functionality. 'multiattach': parameter_types.optional_boolean, 'image_id': {'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255}, 'imageRef': {'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255}, }, 'additionalProperties': True, }, 'OS-SCH-HNT:scheduler_hints': { 'type': ['object', 'null'] }, }, 'required': ['volume'], 'additionalProperties': False, } create_volume_v313 = copy.deepcopy(create) create_volume_v313['properties']['volume']['properties'][ 'group_id'] = {'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255} create_volume_v347 = copy.deepcopy(create_volume_v313) create_volume_v347['properties']['volume']['properties'][ 'backup_id'] = parameter_types.optional_uuid create_volume_v353 = copy.deepcopy(create_volume_v347) create_volume_v353['properties']['volume']['additionalProperties'] = False update = { 'type': 'object', 'properties': { 'volume': { 'type': 'object', 'properties': { # The 'name' and 'description' are required to be compatible # with v2. 'name': { 'type': ['string', 'null'], 'format': 'name_non_mandatory_remove_white_spaces'}, 'description': { 'type': ['string', 'null'], 'format': 'description_non_mandatory_remove_white_spaces'}, 'display_name': { 'type': ['string', 'null'], 'format': 'name_non_mandatory_remove_white_spaces'}, 'display_description': { 'type': ['string', 'null'], 'format': 'description_non_mandatory_remove_white_spaces'}, 'metadata': parameter_types.extra_specs, }, 'additionalProperties': False, }, }, 'required': ['volume'], 'additionalProperties': False, } update_volume_v353 = copy.deepcopy(update) update_volume_v353['properties']['volume']['anyOf'] = [ {'required': ['name']}, {'required': ['description']}, {'required': ['display_name']}, {'required': ['display_description']}, {'required': ['metadata']}] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/schemas/workers.py0000664000175000017500000000230400000000000020574 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Schema for V3 Workers API. """ from cinder.api.validation import parameter_types cleanup = { 'type': 'object', 'properties': { 'cluster_name': parameter_types.hostname, 'disabled': parameter_types.boolean, 'host': parameter_types.hostname, 'is_up': parameter_types.boolean, 'binary': {'enum': ['cinder-volume', 'cinder-scheduler']}, 'resource_id': parameter_types.optional_uuid, 'resource_type': parameter_types.resource_type, 'service_id': parameter_types.service_id, }, 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/urlmap.py0000664000175000017500000002400400000000000016756 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from urllib.request import parse_http_list import paste.urlmap from cinder.api.openstack import wsgi _quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"' _option_header_piece_re = re.compile( r';\s*([^\s;=]+|%s)\s*' r'(?:=\s*([^;]+|%s))?\s*' % (_quoted_string_re, _quoted_string_re)) def unquote_header_value(value): """Unquotes a header value. This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] return value def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] :param value: a string with a list header. :return: :class:`list` """ result = [] for item in parse_http_list(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result def parse_options_header(value): """Parse 'Content-Type'-like header into a tuple. Parse a ``Content-Type`` like header into a tuple with the content type and the options: >>> parse_options_header('Content-Type: text/html; mimetype=text/html') ('Content-Type:', {'mimetype': 'text/html'}) :param value: the header to parse. :return: (str, options) """ def _tokenize(string): for match in _option_header_piece_re.finditer(string): key, value = match.groups() key = unquote_header_value(key) if value is not None: value = unquote_header_value(value) yield key, value if not value: return '', {} parts = _tokenize(';' + value) name = next(parts)[0] extra = dict(parts) return name, extra class Accept(object): def __init__(self, value): self._content_types = [parse_options_header(v) for v in parse_list_header(value)] def best_match(self, supported_content_types): # FIXME: Should we have a more sophisticated matching algorithm that # takes into account the version as well? best_quality = -1 best_content_type = None best_params = {} best_match = '*/*' for content_type in supported_content_types: for content_mask, params in self._content_types: try: quality = float(params.get('q', 1)) except ValueError: continue if quality < best_quality: continue elif best_quality == quality: if best_match.count('*') <= content_mask.count('*'): continue if self._match_mask(content_mask, content_type): best_quality = quality best_content_type = content_type best_params = params best_match = content_mask return best_content_type, best_params def content_type_params(self, best_content_type): """Find parameters in Accept header for given content type.""" for content_type, params in self._content_types: if best_content_type == content_type: return params return {} def _match_mask(self, mask, content_type): if '*' not in mask: return content_type == mask if mask == '*/*': return True mask_major = mask[:-2] content_type_major = content_type.split('/', 1)[0] return content_type_major == mask_major def urlmap_factory(loader, global_conf, **local_conf): if 'not_found_app' in local_conf: not_found_app = local_conf.pop('not_found_app') else: not_found_app = global_conf.get('not_found_app') if not_found_app: not_found_app = loader.get_app(not_found_app, global_conf=global_conf) urlmap = URLMap(not_found_app=not_found_app) for path, app_name in local_conf.items(): path = paste.urlmap.parse_path_expression(path) app = loader.get_app(app_name, global_conf=global_conf) urlmap[path] = app return urlmap class URLMap(paste.urlmap.URLMap): def _match(self, host, port, path_info): """Find longest match for a given URL path.""" for (domain, app_url), app in self.applications: if domain and domain != host and domain != host + ':' + port: continue if (path_info == app_url or path_info.startswith(app_url + '/')): return app, app_url return None, None def _set_script_name(self, app, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url return app(environ, start_response) return wrap def _munge_path(self, app, path_info, app_url): def wrap(environ, start_response): environ['SCRIPT_NAME'] += app_url environ['PATH_INFO'] = path_info[len(app_url):] return app(environ, start_response) return wrap def _path_strategy(self, host, port, path_info): """Check path suffix for MIME type and path prefix for API version.""" mime_type = app = app_url = None parts = path_info.rsplit('.', 1) if len(parts) > 1: possible_type = 'application/' + parts[1] if possible_type in wsgi.SUPPORTED_CONTENT_TYPES: mime_type = possible_type parts = path_info.split('/') if len(parts) > 1: possible_app, possible_app_url = self._match(host, port, path_info) # Don't use prefix if it ends up matching default if possible_app and possible_app_url: app_url = possible_app_url app = self._munge_path(possible_app, path_info, app_url) return mime_type, app, app_url def _content_type_strategy(self, host, port, environ): """Check Content-Type header for API version.""" app = None params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1] if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return app def _accept_strategy(self, host, port, environ, supported_content_types): """Check Accept header for best matching MIME type and API version.""" accept = Accept(environ.get('HTTP_ACCEPT', '')) app = None # Find the best match in the Accept header mime_type, params = accept.best_match(supported_content_types) if 'version' in params: app, app_url = self._match(host, port, '/v' + params['version']) if app: app = self._set_script_name(app, app_url) return mime_type, app def __call__(self, environ, start_response): host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower() if ':' in host: host, port = host.split(':', 1) else: if environ['wsgi.url_scheme'] == 'http': port = '80' else: port = '443' path_info = environ['PATH_INFO'] path_info = self.normalize_url(path_info, False)[1] # The MIME type for the response is determined in one of two ways: # 1) URL path suffix (eg /servers/detail.json) # 2) Accept header (eg application/json;q=0.8) # The API version is determined in one of three ways: # 1) URL path prefix (eg /v1.1/tenant/servers/detail) # 2) Content-Type header (eg application/json;version=1.1) # 3) Accept header (eg application/json;q=0.8;version=1.1) supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES) mime_type, app, app_url = self._path_strategy(host, port, path_info) if not app: app = self._content_type_strategy(host, port, environ) if not mime_type or not app: possible_mime_type, possible_app = self._accept_strategy( host, port, environ, supported_content_types) if possible_mime_type and not mime_type: mime_type = possible_mime_type if possible_app and not app: app = possible_app if not mime_type: mime_type = 'application/json' if not app: # Didn't match a particular version, probably matches default app, app_url = self._match(host, port, path_info) if app: app = self._munge_path(app, path_info, app_url) if app: environ['cinder.best_content_type'] = mime_type return app(environ, start_response) environ['paste.urlmap_object'] = self return self.not_found_application(environ, start_response) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.047118 cinder-27.0.0/cinder/api/v2/0000775000175000017500000000000000000000000015433 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v2/__init__.py0000664000175000017500000000000000000000000017532 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v2/limits.py0000664000175000017500000003331200000000000017310 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Module dedicated functions/classes dealing with rate limiting requests. """ import collections import copy from http import client as http_client import math import re import time from oslo_serialization import jsonutils from oslo_utils import importutils import webob.dec import webob.exc from cinder.api.openstack import wsgi from cinder.api.views import limits as limits_views from cinder.i18n import _ from cinder import quota from cinder.wsgi import common as base_wsgi QUOTAS = quota.QUOTAS LIMITS_PREFIX = "limits." # Convenience constants for the limits dictionary passed to Limiter(). PER_SECOND = 1 PER_MINUTE = 60 PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 class LimitsController(wsgi.Controller): """Controller for accessing limits in the OpenStack API.""" def index(self, req): """Return all global and rate limit information.""" context = req.environ['cinder.context'] quotas = QUOTAS.get_project_quotas(context, context.project_id, usages=False) abs_limits = {k: v['limit'] for k, v in quotas.items()} rate_limits = req.environ.get("cinder.limits", []) builder = self._get_view_builder(req) return builder.build(rate_limits, abs_limits) def _get_view_builder(self, req): return limits_views.ViewBuilder() def create_resource(): return wsgi.Resource(LimitsController()) class Limit(object): """Stores information about a limit for HTTP requests.""" UNITS = { 1: "SECOND", 60: "MINUTE", 60 * 60: "HOUR", 60 * 60 * 24: "DAY", } UNIT_MAP = {v: k for k, v in UNITS.items()} def __init__(self, verb, uri, regex, value, unit): """Initialize a new `Limit`. @param verb: HTTP verb (POST, PUT, etc.) @param uri: Human-readable URI @param regex: Regular expression format for this limit @param value: Integer number of requests which can be made @param unit: Unit of measure for the value parameter """ self.verb = verb self.uri = uri self.regex = regex self.value = int(value) self.unit = unit self.unit_string = self.display_unit().lower() self.remaining = int(value) if value <= 0: raise ValueError("Limit value must be > 0") self.last_request = None self.next_request = None self.water_level = 0 self.capacity = self.unit self.request_value = float(self.capacity) / float(self.value) msg = (_("Only %(value)s %(verb)s request(s) can be " "made to %(uri)s every %(unit_string)s.") % {'value': self.value, 'verb': self.verb, 'uri': self.uri, 'unit_string': self.unit_string}) self.error_message = msg def __call__(self, verb, url): """Represent a call to this limit from a relevant request. @param verb: string http verb (POST, GET, etc.) @param url: string URL """ if self.verb != verb or not re.match(self.regex, url): return now = self._get_time() if self.last_request is None: self.last_request = now leak_value = now - self.last_request self.water_level -= leak_value self.water_level = max(self.water_level, 0) self.water_level += self.request_value difference = self.water_level - self.capacity self.last_request = now if difference > 0: self.water_level -= self.request_value self.next_request = now + difference return difference cap = self.capacity water = self.water_level val = self.value self.remaining = math.floor(((cap - water) / cap) * val) self.next_request = now def _get_time(self): """Retrieve the current time. Broken out for testability.""" return time.time() def display_unit(self): """Display the string name of the unit.""" return self.UNITS.get(self.unit, "UNKNOWN") def display(self): """Return a useful representation of this class.""" return { "verb": self.verb, "URI": self.uri, "regex": self.regex, "value": self.value, "remaining": int(self.remaining), "unit": self.display_unit(), "resetTime": int(self.next_request or self._get_time()), } # "Limit" format is a dictionary with the HTTP verb, human-readable URI, # a regular-expression to match, value and unit of measure (PER_DAY, etc.) DEFAULT_LIMITS = [ Limit("POST", "*", ".*", 10, PER_MINUTE), Limit("POST", "*/servers", "^/servers", 50, PER_DAY), Limit("PUT", "*", ".*", 10, PER_MINUTE), Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE), Limit("DELETE", "*", ".*", 100, PER_MINUTE), ] class RateLimitingMiddleware(base_wsgi.Middleware): """Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. """ def __init__(self, application, limits=None, limiter=None, **kwargs): """Initialize class, wrap WSGI app, and set up given limits. :param application: WSGI application to wrap :param limits: String describing limits :param limiter: String identifying class for representing limits Other parameters are passed to the constructor for the limiter. """ base_wsgi.Middleware.__init__(self, application) # Select the limiter class if limiter is None: limiter = Limiter else: limiter = importutils.import_class(limiter) # Parse the limits, if any are provided if limits is not None: limits = limiter.parse_limits(limits) self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): """Represents a single call through this middleware. We should record the request if we have a limit relevant to it. If no limit is relevant to the request, ignore it. If the request should be rate limited, return a fault telling the user they are over the limit and need to retry later. """ verb = req.method url = req.url context = req.environ.get("cinder.context") if context: username = context.user_id else: username = None delay, error = self._limiter.check_for_delay(verb, url, username) if delay: msg = _("This request was rate-limited.") retry = time.time() + delay return wsgi.OverLimitFault(msg, error, retry) req.environ["cinder.limits"] = self._limiter.get_limits(username) return self.application class Limiter(object): """Rate-limit checking class which handles limits in memory.""" def __init__(self, limits, **kwargs): """Initialize the new `Limiter`. @param limits: List of `Limit` objects """ self.limits = copy.deepcopy(limits) self.levels = collections.defaultdict(lambda: copy.deepcopy(limits)) # Pick up any per-user limit information for key, value in kwargs.items(): if key.startswith(LIMITS_PREFIX): username = key[len(LIMITS_PREFIX):] self.levels[username] = self.parse_limits(value) def get_limits(self, username=None): """Return the limits for a given user.""" return [limit.display() for limit in self.levels[username]] def check_for_delay(self, verb, url, username=None): """Check the given verb/user/user triplet for limit. @return: Tuple of delay (in seconds) and error message (or None, None) """ delays = [] for limit in self.levels[username]: delay = limit(verb, url) if delay: delays.append((delay, limit.error_message)) if delays: delays.sort() return delays[0] return None, None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. We # put this in the class so that subclasses can override the # default limit parsing. @staticmethod def parse_limits(limits): """Convert a string into a list of Limit instances. This implementation expects a semicolon-separated sequence of parenthesized groups, where each group contains a comma-separated sequence consisting of HTTP method, user-readable URI, a URI reg-exp, an integer number of requests which can be made, and a unit of measure. Valid values for the latter are "SECOND", "MINUTE", "HOUR", and "DAY". @return: List of Limit instances. """ # Handle empty limit strings limits = limits.strip() if not limits: return [] # Split up the limits by semicolon result = [] for group in limits.split(';'): group = group.strip() if group[:1] != '(' or group[-1:] != ')': raise ValueError("Limit rules must be surrounded by " "parentheses") group = group[1:-1] # Extract the Limit arguments args = [a.strip() for a in group.split(',')] if len(args) != 5: raise ValueError("Limit rules must contain the following " "arguments: verb, uri, regex, value, unit") # Pull out the arguments verb, uri, regex, value, unit = args # Upper-case the verb verb = verb.upper() # Convert value--raises ValueError if it's not integer value = int(value) # Convert unit unit = unit.upper() if unit not in Limit.UNIT_MAP: raise ValueError("Invalid units specified") unit = Limit.UNIT_MAP[unit] # Build a limit result.append(Limit(verb, uri, regex, value, unit)) return result class WsgiLimiter(object): """Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`. To use, POST ``/`` with JSON data such as:: { "verb" : GET, "path" : "/servers" } and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds header containing the number of seconds to wait before the action would succeed. """ def __init__(self, limits=None): """Initialize the new `WsgiLimiter`. @param limits: List of `Limit` objects """ self._limiter = Limiter(limits or DEFAULT_LIMITS) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, request): """Handles a call to this application. Returns 204 if the request is acceptable to the limiter, else a 403 is returned with a relevant header indicating when the request *will* succeed. """ if request.method != "POST": raise webob.exc.HTTPMethodNotAllowed() try: info = dict(jsonutils.loads(request.body)) except ValueError: raise webob.exc.HTTPBadRequest() username = request.path_info_pop() verb = info.get("verb") path = info.get("path") delay, error = self._limiter.check_for_delay(verb, path, username) if delay: headers = {"X-Wait-Seconds": "%.2f" % delay} return webob.exc.HTTPForbidden(headers=headers, explanation=error) else: return webob.exc.HTTPNoContent() class WsgiLimiterProxy(object): """Rate-limit requests based on answers from a remote source.""" def __init__(self, limiter_address): """Initialize the new `WsgiLimiterProxy`. @param limiter_address: IP/port combination of where to request limit """ self.limiter_address = limiter_address def check_for_delay(self, verb, path, username=None): body = jsonutils.dump_as_bytes({"verb": verb, "path": path}) headers = {"Content-Type": "application/json"} conn = http_client.HTTPConnection(self.limiter_address) if username: conn.request("POST", "/%s" % (username), body, headers) else: conn.request("POST", "/", body, headers) resp = conn.getresponse() if (resp.status >= 200) and (resp.status < 300): return None, None return resp.getheader("X-Wait-Seconds"), resp.read() or None # Note: This method gets called before the class is instantiated, # so this must be either a static method or a class method. It is # used to develop a list of limits to feed to the constructor. # This implementation returns an empty list, since all limit # decisions are made by a remote server. @staticmethod def parse_limits(limits): """Ignore a limits string--simply doesn't apply for the limit proxy. @return: Empty list. """ return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v2/snapshots.py0000664000175000017500000001504700000000000020036 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes snapshots api.""" from http import HTTPStatus from oslo_log import log as logging from oslo_utils import strutils import webob from cinder.api import api_utils from cinder.api import common from cinder.api.openstack import wsgi from cinder.api.schemas import snapshots as snapshot from cinder.api import validation from cinder.api.views import snapshots as snapshot_views from cinder import volume from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class SnapshotsController(wsgi.Controller): """The Snapshots API controller for the OpenStack API.""" _view_builder_class = snapshot_views.ViewBuilder def __init__(self, ext_mgr=None): self.volume_api = volume.API() self.ext_mgr = ext_mgr super(SnapshotsController, self).__init__() def show(self, req, id): """Return data about the given snapshot.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level snapshot = self.volume_api.get_snapshot(context, id) req.cache_db_snapshot(snapshot) return self._view_builder.detail(req, snapshot) def delete(self, req, id): """Delete a snapshot.""" context = req.environ['cinder.context'] LOG.info("Delete snapshot with id: %s", id) # Not found exception will be handled at the wsgi level snapshot = self.volume_api.get_snapshot(context, id) self.volume_api.delete_snapshot(context, snapshot) return webob.Response(status_int=HTTPStatus.ACCEPTED) def index(self, req): """Returns a summary list of snapshots.""" return self._items(req, is_detail=False) def detail(self, req): """Returns a detailed list of snapshots.""" return self._items(req, is_detail=True) def _items(self, req, is_detail=True): """Returns a list of snapshots, transformed through view builder.""" context = req.environ['cinder.context'] # Pop out non search_opts and create local variables search_opts = req.GET.copy() sort_keys, sort_dirs = common.get_sort_params(search_opts) marker, limit, offset = common.get_pagination_params(search_opts) # Filter out invalid options allowed_search_options = ('status', 'volume_id', 'name') api_utils.remove_invalid_filter_options(context, search_opts, allowed_search_options) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in search_opts: search_opts['display_name'] = search_opts.pop('name') snapshots = self.volume_api.get_all_snapshots(context, search_opts=search_opts, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset) req.cache_db_snapshots(snapshots.objects) if is_detail: snapshots = self._view_builder.detail_list(req, snapshots.objects) else: snapshots = self._view_builder.summary_list(req, snapshots.objects) return snapshots @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(snapshot.create) def create(self, req, body): """Creates a new snapshot.""" kwargs = {} context = req.environ['cinder.context'] snapshot = body['snapshot'] kwargs['metadata'] = snapshot.get('metadata', None) volume_id = snapshot['volume_id'] volume = self.volume_api.get(context, volume_id) force = snapshot.get('force', False) force = strutils.bool_from_string(force, strict=True) LOG.info("Create snapshot from volume %s", volume_id) self.validate_name_and_description(snapshot, check_length=False) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in snapshot: snapshot['display_name'] = snapshot.pop('name') if force: new_snapshot = self.volume_api.create_snapshot_force( context, volume, snapshot.get('display_name'), snapshot.get('description'), **kwargs) else: new_snapshot = self.volume_api.create_snapshot( context, volume, snapshot.get('display_name'), snapshot.get('description'), **kwargs) req.cache_db_snapshot(new_snapshot) return self._view_builder.detail(req, new_snapshot) @validation.schema(snapshot.update) def update(self, req, id, body): """Update a snapshot.""" context = req.environ['cinder.context'] snapshot_body = body['snapshot'] self.validate_name_and_description(snapshot_body, check_length=False) if 'name' in snapshot_body: snapshot_body['display_name'] = snapshot_body.pop('name') if 'description' in snapshot_body: snapshot_body['display_description'] = snapshot_body.pop( 'description') # Not found exception will be handled at the wsgi level snapshot = self.volume_api.get_snapshot(context, id) volume_utils.notify_about_snapshot_usage(context, snapshot, 'update.start') self.volume_api.update_snapshot(context, snapshot, snapshot_body) snapshot.update(snapshot_body) req.cache_db_snapshot(snapshot) volume_utils.notify_about_snapshot_usage(context, snapshot, 'update.end') return self._view_builder.detail(req, snapshot) def create_resource(ext_mgr): return wsgi.Resource(SnapshotsController(ext_mgr)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.047118 cinder-27.0.0/cinder/api/v2/views/0000775000175000017500000000000000000000000016570 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v2/views/__init__.py0000664000175000017500000000000000000000000020667 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v2/views/volumes.py0000664000175000017500000001577200000000000020650 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common from cinder import group as group_api from cinder.objects import fields from cinder.volume import group_types class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = "volumes" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, volumes, volume_count=None): """Show a list of volumes without many details.""" return self._list_view(self.summary, request, volumes, volume_count) def detail_list(self, request, volumes, volume_count=None): """Detailed view of a list of volumes.""" return self._list_view(self.detail, request, volumes, volume_count, self._collection_name + '/detail') def summary(self, request, volume): """Generic, non-detailed view of a volume.""" return { 'volume': { 'id': volume['id'], 'name': volume['display_name'], 'links': self._get_links(request, volume['id']), }, } def _get_volume_status(self, volume): # NOTE(wanghao): for fixing bug 1504007, we introduce 'managing', # 'error_managing' and 'error_managing_deleting' status into managing # process, but still expose 'creating' and 'error' and 'deleting' # status to user for API compatibility. status_map = { 'managing': 'creating', 'error_managing': 'error', 'error_managing_deleting': 'deleting', } vol_status = volume.get('status') return status_map.get(vol_status, vol_status) def detail(self, request, volume): """Detailed view of a single volume.""" volume_ref = { 'volume': { 'id': volume.get('id'), 'status': self._get_volume_status(volume), 'size': volume.get('size'), 'availability_zone': volume.get('availability_zone'), 'created_at': volume.get('created_at'), 'updated_at': volume.get('updated_at'), 'name': volume.get('display_name'), 'description': volume.get('display_description'), 'volume_type': self._get_volume_type(request, volume), 'snapshot_id': volume.get('snapshot_id'), 'source_volid': volume.get('source_volid'), 'metadata': self._get_volume_metadata(volume), 'links': self._get_links(request, volume['id']), 'user_id': volume.get('user_id'), 'bootable': str(volume.get('bootable')).lower(), 'encrypted': self._is_volume_encrypted(volume), 'replication_status': volume.get('replication_status'), 'consistencygroup_id': volume.get('consistencygroup_id'), 'multiattach': volume.get('multiattach'), } } ctxt = request.environ['cinder.context'] attachments = self._get_attachments(volume, ctxt.is_admin) volume_ref['volume']['attachments'] = attachments if ctxt.is_admin: volume_ref['volume']['migration_status'] = ( volume.get('migration_status')) # NOTE(xyang): Display group_id as consistencygroup_id in detailed # view of the volume if group is converted from cg. group_id = volume.get('group_id') if group_id is not None: # Not found exception will be handled at the wsgi level grp = group_api.API().get(ctxt, group_id) cgsnap_type = group_types.get_default_cgsnapshot_type() if grp.group_type_id == cgsnap_type['id']: volume_ref['volume']['consistencygroup_id'] = group_id return volume_ref def _is_volume_encrypted(self, volume): """Determine if volume is encrypted.""" return volume.get('encryption_key_id') is not None def _get_attachments(self, volume, is_admin): """Retrieve the attachments of the volume object.""" attachments = [] for attachment in volume.volume_attachment: if (attachment.get('attach_status') == fields.VolumeAttachStatus.ATTACHED): a = {'id': attachment.get('volume_id'), 'attachment_id': attachment.get('id'), 'volume_id': attachment.get('volume_id'), 'server_id': attachment.get('instance_uuid'), 'host_name': attachment.get('attached_host'), 'device': attachment.get('mountpoint'), 'attached_at': attachment.get('attach_time'), } if not is_admin: a['host_name'] = None attachments.append(a) return attachments def _get_volume_metadata(self, volume): """Retrieve the metadata of the volume object.""" return volume.metadata def _get_volume_type(self, request, volume): """Retrieve the type of the volume object.""" if volume['volume_type_id'] and volume.get('volume_type'): return volume['volume_type']['name'] else: return volume['volume_type_id'] def _list_view(self, func, request, volumes, volume_count, coll_name=_collection_name): """Provide a view for a list of volumes. :param func: Function used to format the volume data :param request: API request :param volumes: List of volumes in dictionary format :param volume_count: Length of the original list of volumes :param coll_name: Name of collection, used to generate the next link for a pagination query :returns: Volume data in dictionary format """ volumes_list = [func(request, volume)['volume'] for volume in volumes] volumes_links = self._get_collection_links(request, volumes, coll_name, volume_count) volumes_dict = dict(volumes=volumes_list) if volumes_links: volumes_dict['volumes_links'] = volumes_links return volumes_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v2/volume_metadata.py0000664000175000017500000001302400000000000021154 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus import webob from cinder.api import common from cinder.api.openstack import wsgi from cinder.api.schemas import volume_metadata as metadata from cinder.api import validation from cinder import exception from cinder.i18n import _ from cinder import volume class Controller(wsgi.Controller): """The volume metadata API controller for the OpenStack API.""" def __init__(self): self.volume_api = volume.API() super(Controller, self).__init__() def _get_metadata(self, context, volume_id): # The metadata is at the second position of the tuple returned # from _get_volume_and_metadata return self._get_volume_and_metadata(context, volume_id)[1] def _get_volume_and_metadata(self, context, volume_id): # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, volume_id) meta = self.volume_api.get_volume_metadata(context, volume) return (volume, meta) def index(self, req, volume_id): """Returns the list of metadata for a given volume.""" context = req.environ['cinder.context'] return {'metadata': self._get_metadata(context, volume_id)} @validation.schema(metadata.create) def create(self, req, volume_id, body): context = req.environ['cinder.context'] metadata = body['metadata'] new_metadata = self._update_volume_metadata(context, volume_id, metadata, delete=False, use_create=True) return {'metadata': new_metadata} @validation.schema(metadata.update) def update(self, req, volume_id, id, body): meta_item = body['meta'] if id not in meta_item: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) if len(meta_item) > 1: expl = _('Request body contains too many items') raise webob.exc.HTTPBadRequest(explanation=expl) context = req.environ['cinder.context'] self._update_volume_metadata(context, volume_id, meta_item, delete=False) return {'meta': meta_item} @validation.schema(metadata.create) def update_all(self, req, volume_id, body): metadata = body['metadata'] context = req.environ['cinder.context'] new_metadata = self._update_volume_metadata(context, volume_id, metadata, delete=True) return {'metadata': new_metadata} def _update_volume_metadata(self, context, volume_id, metadata, delete=False, use_create=False): try: volume = self.volume_api.get(context, volume_id) if use_create: return self.volume_api.create_volume_metadata(context, volume, metadata) else: return self.volume_api.update_volume_metadata( context, volume, metadata, delete, meta_type=common.METADATA_TYPES.user) # Not found exception will be handled at the wsgi level except (ValueError, AttributeError): msg = _("Malformed request body") raise webob.exc.HTTPBadRequest(explanation=msg) except exception.InvalidVolumeMetadata as error: raise webob.exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeMetadataSize as error: raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) def show(self, req, volume_id, id): """Return a single metadata item.""" context = req.environ['cinder.context'] data = self._get_metadata(context, volume_id) try: return {'meta': {id: data[id]}} except KeyError: raise exception.VolumeMetadataNotFound(volume_id=volume_id, metadata_key=id) def delete(self, req, volume_id, id): """Deletes an existing metadata.""" context = req.environ['cinder.context'] volume, metadata = self._get_volume_and_metadata(context, volume_id) if id not in metadata: raise exception.VolumeMetadataNotFound(volume_id=volume_id, metadata_key=id) # Not found exception will be handled at the wsgi level self.volume_api.delete_volume_metadata( context, volume, id, meta_type=common.METADATA_TYPES.user) return webob.Response(status_int=HTTPStatus.OK) def create_resource(): return wsgi.Resource(Controller()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v2/volumes.py0000664000175000017500000003002100000000000017473 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes api.""" from http import HTTPStatus from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils import webob from webob import exc from cinder.api import api_utils from cinder.api import common from cinder.api.contrib import scheduler_hints from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import volumes from cinder.api.v2.views import volumes as volume_views from cinder.api import validation from cinder import exception from cinder import group as group_api from cinder.i18n import _ from cinder.image import glance from cinder import objects from cinder import utils from cinder import volume as cinder_volume from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) class VolumeController(wsgi.Controller): """The Volumes API controller for the OpenStack API.""" _view_builder_class = volume_views.ViewBuilder def __init__(self, ext_mgr): self.volume_api = cinder_volume.API() self.group_api = group_api.API() self.ext_mgr = ext_mgr super(VolumeController, self).__init__() def show(self, req, id): """Return data about the given volume.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level vol = self.volume_api.get(context, id, viewable_admin_meta=True) req.cache_db_volume(vol) api_utils.add_visible_admin_metadata(vol) return self._view_builder.detail(req, vol) def delete(self, req, id): """Delete a volume.""" context = req.environ['cinder.context'] cascade = utils.get_bool_param('cascade', req.params) LOG.info("Delete volume with id: %s", id) # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) self.volume_api.delete(context, volume, cascade=cascade) return webob.Response(status_int=HTTPStatus.ACCEPTED) def index(self, req): """Returns a summary list of volumes.""" return self._get_volumes(req, is_detail=False) def detail(self, req): """Returns a detailed list of volumes.""" return self._get_volumes(req, is_detail=True) def _get_volumes(self, req, is_detail): """Returns a list of volumes, transformed through view builder.""" context = req.environ['cinder.context'] params = req.params.copy() marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) filters = params # NOTE(wanghao): Always removing glance_metadata since we support it # only in API version >= VOLUME_LIST_GLANCE_METADATA. filters.pop('glance_metadata', None) api_utils.remove_invalid_filter_options( context, filters, self._get_volume_filter_options()) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in sort_keys: sort_keys[sort_keys.index('name')] = 'display_name' if 'name' in filters: filters['display_name'] = filters.pop('name') self.volume_api.check_volume_filters(filters) volumes = self.volume_api.get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, viewable_admin_meta=True, offset=offset) for volume in volumes: api_utils.add_visible_admin_metadata(volume) req.cache_db_volumes(volumes.objects) if is_detail: volumes = self._view_builder.detail_list(req, volumes) else: volumes = self._view_builder.summary_list(req, volumes) return volumes def _image_uuid_from_ref(self, image_ref, context): # If the image ref was generated by nova api, strip image_ref # down to an id. image_uuid = None try: image_uuid = image_ref.split('/').pop() except AttributeError: msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) image_service = glance.get_default_image_service() # First see if this is an actual image ID if uuidutils.is_uuid_like(image_uuid): try: image = image_service.show(context, image_uuid) if 'id' in image: return image['id'] except Exception: # Pass and see if there is a matching image name pass # Could not find by ID, check if it is an image name try: params = {'filters': {'name': image_ref}} images = list(image_service.detail(context, **params)) if len(images) > 1: msg = _("Multiple matches found for '%s', use an ID to be more" " specific.") % image_ref raise exc.HTTPConflict(explanation=msg) for img in images: return img['id'] except exc.HTTPConflict: raise except Exception: # Pass the other exception and let default not found error # handling take care of it pass msg = _("Invalid image identifier or unable to " "access requested image.") raise exc.HTTPBadRequest(explanation=msg) # NOTE: using mv.BASE_VERSION (which is 3.0) is a bit nonstandard, # but this class is no longer consumed by the v2 API, though it is # a superclass of cinder.api.v3.volumes. Although create() is # overridden in the subclass, I didn't want to remove it from # here until we are sure that the v3 unit tests for create() test # everything that the v2 unit tests covered. @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(volumes.create, mv.BASE_VERSION) def create(self, req, body): """Creates a new volume.""" LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] # NOTE (pooja_jadhav) To fix bug 1774155, scheduler hints is not # loaded as a standard extension. If user passes # OS-SCH-HNT:scheduler_hints in the request body, then it will be # validated in the create method and this method will add # scheduler_hints in body['volume']. body = scheduler_hints.create(req, body) volume = body['volume'] kwargs = {} self.validate_name_and_description(volume, check_length=False) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in volume: volume['display_name'] = volume.pop('name') # NOTE(thingee): v2 API allows description instead of # display_description if 'description' in volume: volume['display_description'] = volume.pop('description') if 'image_id' in volume: volume['imageRef'] = volume.pop('image_id') req_volume_type = volume.get('volume_type', None) if req_volume_type: # Not found exception will be handled at the wsgi level kwargs['volume_type'] = ( objects.VolumeType.get_by_name_or_id(context, req_volume_type)) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: # Not found exception will be handled at the wsgi level kwargs['snapshot'] = self.volume_api.get_snapshot(context, snapshot_id) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: # Not found exception will be handled at the wsgi level kwargs['source_volume'] = \ self.volume_api.get_volume(context, source_volid) else: kwargs['source_volume'] = None kwargs['group'] = None kwargs['consistencygroup'] = None consistencygroup_id = volume.get('consistencygroup_id') if consistencygroup_id is not None: # Not found exception will be handled at the wsgi level kwargs['group'] = self.group_api.get(context, consistencygroup_id) size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] LOG.info("Create volume of %s GB", size) image_ref = volume.get('imageRef') if image_ref is not None: image_uuid = self._image_uuid_from_ref(image_ref, context) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) try: new_volume = self.volume_api.create( context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) except exception.VolumeTypeDefaultMisconfiguredError as err: raise webob.exc.HTTPInternalServerError(explanation=err.msg) retval = self._view_builder.detail(req, new_volume) return retval def _get_volume_filter_options(self): """Return volume search options allowed by non-admin.""" return common.get_enabled_resource_filters('volume').get('volume', []) # NOTE: see NOTE for create(), above @validation.schema(volumes.update, mv.BASE_VERSION, mv.get_prior_version(mv.SUPPORT_VOLUME_SCHEMA_CHANGES)) @validation.schema(volumes.update_volume_v353, mv.SUPPORT_VOLUME_SCHEMA_CHANGES) def update(self, req, id, body): """Update a volume.""" context = req.environ['cinder.context'] update_dict = body['volume'] self.validate_name_and_description(update_dict, check_length=False) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in update_dict: update_dict['display_name'] = update_dict.pop('name') # NOTE(thingee): v2 API allows description instead of # display_description if 'description' in update_dict: update_dict['display_description'] = update_dict.pop('description') # Not found and Invalid exceptions will be handled at the wsgi level try: volume = self.volume_api.get(context, id, viewable_admin_meta=True) volume_utils.notify_about_volume_usage(context, volume, 'update.start') self.volume_api.update(context, volume, update_dict) except exception.InvalidVolumeMetadataSize as error: raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) volume.update(update_dict) api_utils.add_visible_admin_metadata(volume) volume_utils.notify_about_volume_usage(context, volume, 'update.end') return self._view_builder.detail(req, volume) def create_resource(ext_mgr): return wsgi.Resource(VolumeController(ext_mgr)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.055118 cinder-27.0.0/cinder/api/v3/0000775000175000017500000000000000000000000015434 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/__init__.py0000664000175000017500000000000000000000000017533 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/attachments.py0000664000175000017500000003055000000000000020324 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes attachments API.""" from http import HTTPStatus from oslo_log import log as logging import webob from cinder.api import api_utils from cinder.api import common from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import attachments as attachment from cinder.api.v3.views import attachments as attachment_views from cinder.api import validation from cinder import context as cinder_context from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.policies import attachments as attachment_policy from cinder.volume import api as volume_api from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class AttachmentsController(wsgi.Controller): """The Attachments API controller for the OpenStack API.""" _view_builder_class = attachment_views.ViewBuilder allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'} def __init__(self, ext_mgr=None): """Initialize controller class.""" self.volume_api = volume_api.API() self.ext_mgr = ext_mgr super(AttachmentsController, self).__init__() @wsgi.Controller.api_version(mv.NEW_ATTACH) def show(self, req, id): """Return data about the given attachment.""" context = req.environ['cinder.context'] attachment = objects.VolumeAttachment.get_by_id(context, id) volume = objects.Volume.get_by_id(cinder_context.get_admin_context(), attachment.volume_id) if volume.admin_metadata and 'format' in volume.admin_metadata: attachment.connection_info['format'] = ( volume.admin_metadata['format']) return attachment_views.ViewBuilder.detail(attachment) @wsgi.Controller.api_version(mv.NEW_ATTACH) def index(self, req): """Return a summary list of attachments.""" attachments = self._items(req) return attachment_views.ViewBuilder.list(attachments) @wsgi.Controller.api_version(mv.NEW_ATTACH) def detail(self, req): """Return a detailed list of attachments.""" attachments = self._items(req) return attachment_views.ViewBuilder.list(attachments, detail=True) @common.process_general_filtering('attachment') def _process_attachment_filtering(self, context=None, filters=None, req_version=None): api_utils.remove_invalid_filter_options(context, filters, self.allowed_filters) def _items(self, req): """Return a list of attachments, transformed through view builder.""" context = req.environ['cinder.context'] req_version = req.api_version_request # Pop out non search_opts and create local variables search_opts = req.GET.copy() sort_keys, sort_dirs = common.get_sort_params(search_opts) marker, limit, offset = common.get_pagination_params(search_opts) self._process_attachment_filtering(context=context, filters=search_opts, req_version=req_version) if search_opts.get('instance_id', None): search_opts['instance_uuid'] = search_opts.pop('instance_id', None) if context.is_admin and 'all_tenants' in search_opts: del search_opts['all_tenants'] return objects.VolumeAttachmentList.get_all( context, search_opts=search_opts, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs) else: return objects.VolumeAttachmentList.get_all_by_project( context, context.project_id, search_opts=search_opts, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs) @wsgi.Controller.api_version(mv.NEW_ATTACH) @wsgi.response(HTTPStatus.OK) @validation.schema(attachment.create, mv.NEW_ATTACH, mv.get_prior_version(mv.ATTACHMENT_CREATE_MODE_ARG)) @validation.schema(attachment.create_v354, mv.ATTACHMENT_CREATE_MODE_ARG) def create(self, req, body): """Create an attachment. This method can be used to create an empty attachment (reserve) or to create and initialize a volume attachment based on the provided input parameters. If the caller does not yet have the connector information but needs to reserve an attachment for the volume (ie Nova BootFromVolume) the create can be called with just the volume-uuid and the server identifier. This will reserve an attachment, mark the volume as reserved and prevent any new attachment_create calls from being made until the attachment is updated (completed). The alternative is that the connection can be reserved and initialized all at once with a single call if the caller has all of the required information (connector data) at the time of the call. NOTE: In Nova terms server == instance, the server_id parameter referenced below is the UUID of the Instance, for non-nova consumers this can be a server UUID or some other arbitrary unique identifier. Starting from microversion 3.54, we can pass the attach mode as argument in the request body. Expected format of the input parameter 'body': .. code-block:: json { "attachment": { "volume_uuid": "volume-uuid", "instance_uuid": "null|nova-server-uuid", "connector": "null|", "mode": "null|rw|ro" } } Example connector: .. code-block:: json { "connector": { "initiator": "iqn.1993-08.org.debian:01:cad181614cec", "ip": "192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": false, "mountpoint": "/dev/vdb", "mode": "null|rw|ro" } } NOTE all that's required for a reserve is volume_uuid and an instance_uuid. returns: A summary view of the attachment object """ context = req.environ['cinder.context'] instance_uuid = body['attachment'].get('instance_uuid') volume_uuid = body['attachment']['volume_uuid'] volume_ref = objects.Volume.get_by_id( context, volume_uuid) args = {'connector': body['attachment'].get('connector', None)} if req.api_version_request.matches(mv.ATTACHMENT_CREATE_MODE_ARG): # We check for attach_mode here and default to `null` # if nothing's provided. This seems odd to not just # set `rw`, BUT we want to keep compatability with # setting the mode via the connector for now, so we # use `null` as an identifier to distinguish that case args['attach_mode'] = body['attachment'].get('mode', 'null') err_msg = None try: attachment_ref = ( self.volume_api.attachment_create(context, volume_ref, instance_uuid, **args)) except (exception.NotAuthorized, exception.InvalidVolume): raise except exception.CinderException as ex: err_msg = _( "Unable to create attachment for volume (%s).") % ex.msg LOG.exception(err_msg) except Exception: err_msg = _("Unable to create attachment for volume.") LOG.exception(err_msg) finally: if err_msg: raise webob.exc.HTTPInternalServerError(explanation=err_msg) return attachment_views.ViewBuilder.detail(attachment_ref) @wsgi.Controller.api_version(mv.NEW_ATTACH) @validation.schema(attachment.update) def update(self, req, id, body): """Update an attachment record. Update a reserved attachment record with connector information and set up the appropriate connection_info from the driver. Expected format of the input parameter 'body': .. code:: json { "attachment": { "connector": { "initiator": "iqn.1993-08.org.debian:01:cad181614cec", "ip": "192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": false, "mountpoint": "/dev/vdb", "mode": "None|rw|ro" } } } """ context = req.environ['cinder.context'] attachment_ref = ( objects.VolumeAttachment.get_by_id(context, id)) connector = body['attachment']['connector'] err_msg = None try: attachment_ref = ( self.volume_api.attachment_update(context, attachment_ref, connector)) except (exception.NotAuthorized, exception.Invalid): raise except exception.CinderException as ex: err_msg = ( _("Unable to update attachment (%s).") % ex.msg) LOG.exception(err_msg) except Exception: err_msg = _("Unable to update the attachment.") LOG.exception(err_msg) finally: if err_msg: raise webob.exc.HTTPInternalServerError(explanation=err_msg) # TODO(jdg): Test this out some more, do we want to return and object # or a dict? return attachment_views.ViewBuilder.detail(attachment_ref) @wsgi.Controller.api_version(mv.NEW_ATTACH) def delete(self, req, id): """Delete an attachment. Disconnects/Deletes the specified attachment, returns a list of any known shared attachment-id's for the effected backend device. returns: A summary list of any attachments sharing this connection """ context = req.environ['cinder.context'] attachment = objects.VolumeAttachment.get_by_id(context, id) attachments = self.volume_api.attachment_delete(context, attachment) return attachment_views.ViewBuilder.list(attachments) @wsgi.response(HTTPStatus.NO_CONTENT) @wsgi.Controller.api_version(mv.NEW_ATTACH_COMPLETION) @wsgi.action('os-complete') def complete(self, req, id, body): """Mark a volume attachment process as completed (in-use).""" context = req.environ['cinder.context'] attachment_ref = ( objects.VolumeAttachment.get_by_id(context, id)) volume_ref = objects.Volume.get_by_id( context, attachment_ref.volume_id) context.authorize(attachment_policy.COMPLETE_POLICY, target_obj=attachment_ref) attachment_ref.update( {'attach_status': fields.VolumeAttachStatus.ATTACHED}) attachment_ref.save() volume_ref.update({'status': 'in-use', 'attach_status': 'attached'}) volume_ref.save() volume_utils.notify_about_volume_usage(context, volume_ref, "attach.end") def create_resource(ext_mgr): """Create the wsgi resource for this controller.""" return wsgi.Resource(AttachmentsController(ext_mgr)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/backups.py0000664000175000017500000001075400000000000017445 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Intel, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The backups V3 API.""" from oslo_log import log as logging from cinder.api.contrib import backups as backups_v2 from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import backups as backup from cinder.api.v3.views import backups as backup_views from cinder.api import validation from cinder.policies import backups as policy LOG = logging.getLogger(__name__) class BackupsController(backups_v2.BackupsController): """The backups API controller for the OpenStack API V3.""" _view_builder_class = backup_views.ViewBuilder @wsgi.Controller.api_version(mv.BACKUP_UPDATE) @validation.schema(backup.update, mv.BACKUP_UPDATE, mv.get_prior_version(mv.BACKUP_METADATA)) @validation.schema(backup.update_backup_v343, mv.BACKUP_METADATA) def update(self, req, id, body): """Update a backup.""" context = req.environ['cinder.context'] req_version = req.api_version_request backup_update = body['backup'] self.validate_name_and_description(backup_update, check_length=False) update_dict = {} if 'name' in backup_update: update_dict['display_name'] = backup_update.pop('name') if 'description' in backup_update: update_dict['display_description'] = ( backup_update.pop('description')) if (req_version.matches( mv.BACKUP_METADATA) and 'metadata' in backup_update): update_dict['metadata'] = backup_update.pop('metadata') new_backup = self.backup_api.update(context, id, update_dict) return self._view_builder.summary(req, new_backup) def _add_backup_project_attribute(self, req, backup): db_backup = req.get_db_backup(backup['id']) key = "os-backup-project-attr:project_id" backup[key] = db_backup['project_id'] def _add_backup_user_attribute(self, req, backup): db_backup = req.get_db_backup(backup['id']) key = "user_id" backup[key] = db_backup['user_id'] def show(self, req, id): """Return data about the given backup.""" LOG.debug('Show backup with id %s.', id) context = req.environ['cinder.context'] req_version = req.api_version_request # Not found exception will be handled at the wsgi level backup = self.backup_api.get(context, backup_id=id) req.cache_db_backup(backup) resp_backup = self._view_builder.detail(req, backup) if req_version.matches(mv.BACKUP_PROJECT): if context.authorize(policy.BACKUP_ATTRIBUTES_POLICY, fatal=False): self._add_backup_project_attribute(req, resp_backup['backup']) if req_version.matches(mv.BACKUP_PROJECT_USER_ID): if context.authorize(policy.BACKUP_ATTRIBUTES_POLICY, fatal=False): self._add_backup_user_attribute(req, resp_backup['backup']) return resp_backup def detail(self, req): resp_backup = super(BackupsController, self).detail(req) context = req.environ['cinder.context'] req_version = req.api_version_request if req_version.matches(mv.BACKUP_PROJECT): if context.authorize(policy.BACKUP_ATTRIBUTES_POLICY, fatal=False): for bak in resp_backup['backups']: self._add_backup_project_attribute(req, bak) if req_version.matches(mv.BACKUP_PROJECT_USER_ID): if context.authorize(policy.BACKUP_ATTRIBUTES_POLICY, fatal=False): for bak in resp_backup['backups']: self._add_backup_user_attribute(req, bak) return resp_backup def _convert_sort_name(self, req_version, sort_keys): if req_version.matches(mv.BACKUP_SORT_NAME) and 'name' in sort_keys: sort_keys[sort_keys.index('name')] = 'display_name' def create_resource(): return wsgi.Resource(BackupsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/clusters.py0000664000175000017500000001307100000000000017654 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import clusters as cluster from cinder.api.v3.views import clusters as clusters_view from cinder.api import validation from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.policies import clusters as policy from cinder import utils class ClusterController(wsgi.Controller): allowed_list_keys = {'name', 'binary', 'is_up', 'disabled', 'num_hosts', 'num_down_hosts', 'binary', 'replication_status', 'frozen', 'active_backend_id'} replication_fields = {'replication_status', 'frozen', 'active_backend_id'} @wsgi.Controller.api_version(mv.CLUSTER_SUPPORT) def show(self, req, id, binary=constants.VOLUME_BINARY): """Return data for a given cluster name with optional binary.""" # Let the wsgi middleware convert NotAuthorized exceptions context = req.environ['cinder.context'] context.authorize(policy.GET_POLICY) # Let the wsgi middleware convert NotFound exceptions cluster = objects.Cluster.get_by_id(context, None, binary=binary, name=id, services_summary=True) replication_data = req.api_version_request.matches( mv.REPLICATION_CLUSTER) return clusters_view.ViewBuilder.detail(cluster, replication_data) @wsgi.Controller.api_version(mv.CLUSTER_SUPPORT) def index(self, req): """Return a non detailed list of all existing clusters. Filter by is_up, disabled, num_hosts, and num_down_hosts. """ return self._get_clusters(req, detail=False) @wsgi.Controller.api_version(mv.CLUSTER_SUPPORT) def detail(self, req): """Return a detailed list of all existing clusters. Filter by is_up, disabled, num_hosts, and num_down_hosts. """ return self._get_clusters(req, detail=True) def _get_clusters(self, req, detail): # Let the wsgi middleware convert NotAuthorized exceptions context = req.environ['cinder.context'] context.authorize(policy.GET_ALL_POLICY) replication_data = req.api_version_request.matches( mv.REPLICATION_CLUSTER) filters = dict(req.GET) allowed = self.allowed_list_keys if not replication_data: allowed = allowed.difference(self.replication_fields) # Check filters are valid if not allowed.issuperset(filters): invalid_keys = set(filters).difference(allowed) msg = _('Invalid filter keys: %s') % ', '.join(invalid_keys) raise exception.InvalidInput(reason=msg) # Check boolean values for bool_key in ('disabled', 'is_up'): if bool_key in filters: filters[bool_key] = utils.get_bool_param(bool_key, req.GET) # For detailed view we need the services summary information filters['services_summary'] = detail clusters = objects.ClusterList.get_all(context, **filters) return clusters_view.ViewBuilder.list(clusters, detail, replication_data) @wsgi.Controller.api_version(mv.CLUSTER_SUPPORT) def update(self, req, id, body): """Enable/Disable scheduling for a cluster.""" # NOTE(geguileo): This method tries to be consistent with services # update endpoint API. # Let the wsgi middleware convert NotAuthorized exceptions context = req.environ['cinder.context'] context.authorize(policy.UPDATE_POLICY) if id not in ('enable', 'disable'): raise exception.NotFound(message=_("Unknown action")) disabled = id != 'enable' disabled_reason = self._disable_cluster( req, body=body) if disabled else self._enable_cluster( req, body=body) name = body['name'] binary = body.get('binary', constants.VOLUME_BINARY) # Let wsgi handle NotFound exception cluster = objects.Cluster.get_by_id(context, None, binary=binary, name=name) cluster.disabled = disabled cluster.disabled_reason = disabled_reason cluster.save() # We return summary data plus the disabled reason replication_data = req.api_version_request.matches( mv.REPLICATION_CLUSTER) ret_val = clusters_view.ViewBuilder.summary(cluster, replication_data) ret_val['cluster']['disabled_reason'] = disabled_reason return ret_val @validation.schema(cluster.disable_cluster) def _disable_cluster(self, req, body): reason = body.get('disabled_reason') if reason: reason = reason.strip() return reason @validation.schema(cluster.enable_cluster) def _enable_cluster(self, req, body): pass def create_resource(): return wsgi.Resource(ClusterController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/consistencygroups.py0000664000175000017500000000744700000000000021623 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The consistencygroups V3 API.""" from http import HTTPStatus from oslo_log import log as logging import webob from webob import exc from cinder.api.contrib import consistencygroups as cg_v2 from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.i18n import _ from cinder.policies import groups as group_policy LOG = logging.getLogger(__name__) class ConsistencyGroupsController(cg_v2.ConsistencyGroupsController): """The ConsistencyGroups API controller for the OpenStack API V3.""" def _check_update_parameters_v3(self, req, name, description, add_volumes, remove_volumes): allow_empty = req.api_version_request.matches( mv.CG_UPDATE_BLANK_PROPERTIES, None) if allow_empty: if (name is None and description is None and not add_volumes and not remove_volumes): msg = _("Must specify one or more of the following keys to " "update: name, description, " "add_volumes, remove_volumes.") raise exc.HTTPBadRequest(explanation=msg) else: if not (name or description or add_volumes or remove_volumes): msg = _("Name, description, add_volumes, and remove_volumes " "can not be all empty in the request body.") raise exc.HTTPBadRequest(explanation=msg) return allow_empty def update(self, req, id, body): """Update the consistency group. Expected format of the input parameter 'body': .. code-block:: json { "consistencygroup": { "name": "my_cg", "description": "My consistency group", "add_volumes": "volume-uuid-1,volume-uuid-2,...", "remove_volumes": "volume-uuid-8,volume-uuid-9,..." } } """ LOG.debug('Update called for consistency group %s.', id) if not body: msg = _("Missing request body.") raise exc.HTTPBadRequest(explanation=msg) self.assert_valid_body(body, 'consistencygroup') context = req.environ['cinder.context'] group = self._get(context, id) context.authorize(group_policy.UPDATE_POLICY, target_obj=group) consistencygroup = body.get('consistencygroup', None) self.validate_name_and_description(consistencygroup) name = consistencygroup.get('name', None) description = consistencygroup.get('description', None) add_volumes = consistencygroup.get('add_volumes', None) remove_volumes = consistencygroup.get('remove_volumes', None) allow_empty = self._check_update_parameters_v3(req, name, description, add_volumes, remove_volumes) self._update(context, group, name, description, add_volumes, remove_volumes, allow_empty) return webob.Response(status_int=HTTPStatus.ACCEPTED) def create_resource(): return wsgi.Resource(ConsistencyGroupsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/default_types.py0000664000175000017500000000777400000000000020675 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The resource filters api.""" from http import HTTPStatus from webob import exc from cinder.api import api_utils as utils from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import default_types from cinder.api.v3.views import default_types as default_types_view from cinder.api import validation from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.policies import default_types as policy class DefaultTypesController(wsgi.Controller): """The Default types API controller for the OpenStack API.""" _view_builder_class = default_types_view.ViewBuilder @wsgi.response(HTTPStatus.OK) @wsgi.Controller.api_version(mv.DEFAULT_TYPE_OVERRIDES) @validation.schema(default_types.create_or_update) def create_update(self, req, id, body): """Set a default volume type for the specified project.""" context = req.environ['cinder.context'] project_id = id volume_type_id = body['default_type']['volume_type'] utils.validate_project_and_authorize(context, project_id, policy.CREATE_UPDATE_POLICY) try: volume_type_id = objects.VolumeType.get_by_name_or_id( context, volume_type_id).id except exception.VolumeTypeNotFound as e: raise exc.HTTPBadRequest(explanation=e.msg) default_type = db.project_default_volume_type_set( context, volume_type_id, project_id) return self._view_builder.create(default_type) @wsgi.response(HTTPStatus.OK) @wsgi.Controller.api_version(mv.DEFAULT_TYPE_OVERRIDES) def detail(self, req, id): """Return detail of a default type.""" context = req.environ['cinder.context'] project_id = id utils.validate_project_and_authorize(context, project_id, policy.GET_POLICY) default_type = db.project_default_volume_type_get(context, project_id) if not default_type: raise exception.VolumeTypeProjectDefaultNotFound( project_id=project_id) return self._view_builder.detail(default_type) @wsgi.response(HTTPStatus.OK) @wsgi.Controller.api_version(mv.DEFAULT_TYPE_OVERRIDES) def index(self, req): """Return a list of default types.""" context = req.environ['cinder.context'] try: context.authorize(policy.GET_ALL_POLICY) except exception.NotAuthorized: explanation = _("You are not authorized to perform this " "operation.") raise exc.HTTPForbidden(explanation=explanation) default_types = db.project_default_volume_type_get(context) return self._view_builder.index(default_types) @wsgi.response(HTTPStatus.NO_CONTENT) @wsgi.Controller.api_version(mv.DEFAULT_TYPE_OVERRIDES) def delete(self, req, id): """Unset a default volume type for a project.""" context = req.environ['cinder.context'] project_id = id utils.validate_project_and_authorize(context, project_id, policy.DELETE_POLICY) db.project_default_volume_type_unset(context, id) def create_resource(): """Create the wsgi resource for this controller.""" return wsgi.Resource(DefaultTypesController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/group_snapshots.py0000664000175000017500000002210400000000000021243 0ustar00zuulzuul00000000000000# Copyright (C) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The group_snapshots API.""" from http import HTTPStatus from oslo_log import log as logging import webob from webob import exc from cinder.api import common from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import group_snapshots as snapshot from cinder.api.v3.views import group_snapshots as group_snapshot_views from cinder.api import validation from cinder import exception from cinder import group as group_api from cinder.i18n import _ from cinder import rpc from cinder.volume import group_types LOG = logging.getLogger(__name__) class GroupSnapshotsController(wsgi.Controller): """The group_snapshots API controller for the OpenStack API.""" _view_builder_class = group_snapshot_views.ViewBuilder def __init__(self): self.group_snapshot_api = group_api.API() super(GroupSnapshotsController, self).__init__() def _check_default_cgsnapshot_type(self, group_type_id): if group_types.is_default_cgsnapshot_type(group_type_id): msg = (_("Group_type %(group_type)s is reserved for migrating " "CGs to groups. Migrated group snapshots can only be " "operated by CG snapshot APIs.") % {'group_type': group_type_id}) raise exc.HTTPBadRequest(explanation=msg) @wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS) def show(self, req, id): """Return data about the given group_snapshot.""" LOG.debug('show called for member %s', id) context = req.environ['cinder.context'] group_snapshot = self.group_snapshot_api.get_group_snapshot( context, group_snapshot_id=id) self._check_default_cgsnapshot_type(group_snapshot.group_type_id) return self._view_builder.detail(req, group_snapshot) @wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS) def delete(self, req, id): """Delete a group_snapshot.""" LOG.debug('delete called for member %s', id) context = req.environ['cinder.context'] LOG.info('Delete group_snapshot with id: %s', id, context=context) try: group_snapshot = self.group_snapshot_api.get_group_snapshot( context, group_snapshot_id=id) self._check_default_cgsnapshot_type(group_snapshot.group_type_id) self.group_snapshot_api.delete_group_snapshot(context, group_snapshot) except exception.InvalidGroupSnapshot as e: raise exc.HTTPBadRequest(explanation=str(e)) except (exception.GroupSnapshotNotFound, exception.PolicyNotAuthorized): # Not found exception will be handled at the wsgi level raise except Exception: msg = _("Error occurred when deleting group snapshot %s.") % id LOG.exception(msg) raise exc.HTTPBadRequest(explanation=msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) @wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS) def index(self, req): """Returns a summary list of group_snapshots.""" return self._get_group_snapshots(req, is_detail=False) @wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS) def detail(self, req): """Returns a detailed list of group_snapshots.""" return self._get_group_snapshots(req, is_detail=True) def _get_group_snapshots(self, req, is_detail): """Returns a list of group_snapshots through view builder.""" context = req.environ['cinder.context'] req_version = req.api_version_request filters = marker = limit = offset = sort_keys = sort_dirs = None if req_version.matches(mv.GROUP_SNAPSHOT_PAGINATION): filters = req.params.copy() marker, limit, offset = common.get_pagination_params(filters) sort_keys, sort_dirs = common.get_sort_params(filters) if req_version.matches(mv.RESOURCE_FILTER): support_like = (True if req_version.matches( mv.LIKE_FILTER) else False) common.reject_invalid_filters(context, filters, 'group_snapshot', support_like) group_snapshots = self.group_snapshot_api.get_all_group_snapshots( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) if is_detail: group_snapshots = self._view_builder.detail_list(req, group_snapshots) else: group_snapshots = self._view_builder.summary_list(req, group_snapshots) new_group_snapshots = [] for grp_snap in group_snapshots['group_snapshots']: try: # Only show group snapshots not migrated from CG snapshots self._check_default_cgsnapshot_type(grp_snap['group_type_id']) if not is_detail: grp_snap.pop('group_type_id', None) new_group_snapshots.append(grp_snap) except exc.HTTPBadRequest: # Skip migrated group snapshot pass group_snapshots['group_snapshots'] = new_group_snapshots return group_snapshots @wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS) @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(snapshot.create) def create(self, req, body): """Create a new group_snapshot.""" LOG.debug('Creating new group_snapshot %s', body) context = req.environ['cinder.context'] group_snapshot = body['group_snapshot'] group_id = group_snapshot['group_id'] group = self.group_snapshot_api.get(context, group_id) self._check_default_cgsnapshot_type(group.group_type_id) name = group_snapshot.get('name', None) description = group_snapshot.get('description', None) LOG.info("Creating group_snapshot %(name)s.", {'name': name}, context=context) try: new_group_snapshot = self.group_snapshot_api.create_group_snapshot( context, group, name, description) except (exception.InvalidGroup, exception.InvalidGroupSnapshot, exception.InvalidVolume) as error: raise exc.HTTPBadRequest(explanation=error.msg) retval = self._view_builder.summary(req, new_group_snapshot) return retval @wsgi.Controller.api_version(mv.GROUP_SNAPSHOT_RESET_STATUS) @wsgi.action("reset_status") @validation.schema(snapshot.reset_status) def reset_status(self, req, id, body): return self._reset_status(req, id, body) def _reset_status(self, req, id, body): """Reset status on group snapshots""" context = req.environ['cinder.context'] status = body['reset_status']['status'].lower() LOG.debug("Updating group '%(id)s' with " "'%(update)s'", {'id': id, 'update': status}) try: notifier = rpc.get_notifier('groupSnapshotStatusUpdate') notifier.info(context, 'groupsnapshots.reset_status.start', {'id': id, 'update': status}) gsnapshot = self.group_snapshot_api.get_group_snapshot(context, id) self.group_snapshot_api.reset_group_snapshot_status(context, gsnapshot, status) notifier.info(context, 'groupsnapshots.reset_status.end', {'id': id, 'update': status}) except exception.GroupSnapshotNotFound as error: # Not found exception will be handled at the wsgi level notifier.error(context, 'groupsnapshots.reset_status', {'error_message': error.msg, 'id': id}) raise except exception.InvalidGroupSnapshotStatus as error: notifier.error(context, 'groupsnapshots.reset_status', {'error_message': error.msg, 'id': id}) raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) def create_resource(): return wsgi.Resource(GroupSnapshotsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/group_specs.py0000664000175000017500000001210700000000000020340 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The group types specs controller""" from http import HTTPStatus import webob from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import group_specs from cinder.api import validation from cinder import db from cinder import exception from cinder.i18n import _ from cinder.policies import group_types as policy from cinder import rpc from cinder.volume import group_types class GroupTypeSpecsController(wsgi.Controller): """The group type specs API controller for the OpenStack API.""" def _get_group_specs(self, context, group_type_id): group_specs = db.group_type_specs_get(context, group_type_id) specs_dict = {} for key, value in group_specs.items(): specs_dict[key] = value return dict(group_specs=specs_dict) def _check_type(self, context, group_type_id): try: group_types.get_group_type(context, group_type_id) except exception.GroupTypeNotFound as ex: raise webob.exc.HTTPNotFound(explanation=ex.msg) @wsgi.Controller.api_version(mv.GROUP_TYPE) def index(self, req, group_type_id): """Returns the list of group specs for a given group type.""" context = req.environ['cinder.context'] context.authorize(policy.SPEC_GET_ALL_POLICY) self._check_type(context, group_type_id) return self._get_group_specs(context, group_type_id) @wsgi.Controller.api_version(mv.GROUP_TYPE) @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(group_specs.create) def create(self, req, group_type_id, body): context = req.environ['cinder.context'] context.authorize(policy.SPEC_CREATE_POLICY) self._check_type(context, group_type_id) specs = body['group_specs'] db.group_type_specs_update_or_create(context, group_type_id, specs) notifier_info = dict(type_id=group_type_id, specs=specs) notifier = rpc.get_notifier('groupTypeSpecs') notifier.info(context, 'group_type_specs.create', notifier_info) return body @wsgi.Controller.api_version(mv.GROUP_TYPE) @validation.schema(group_specs.update) def update(self, req, group_type_id, id, body): context = req.environ['cinder.context'] context.authorize(policy.SPEC_UPDATE_POLICY) self._check_type(context, group_type_id) if id not in body: expl = _('Request body and URI mismatch') raise webob.exc.HTTPBadRequest(explanation=expl) db.group_type_specs_update_or_create(context, group_type_id, body) notifier_info = dict(type_id=group_type_id, id=id) notifier = rpc.get_notifier('groupTypeSpecs') notifier.info(context, 'group_type_specs.update', notifier_info) return body @wsgi.Controller.api_version(mv.GROUP_TYPE) def show(self, req, group_type_id, id): """Return a single extra spec item.""" context = req.environ['cinder.context'] context.authorize(policy.SPEC_GET_POLICY) self._check_type(context, group_type_id) specs = self._get_group_specs(context, group_type_id) if id in specs['group_specs']: return {id: specs['group_specs'][id]} else: msg = _("Group Type %(type_id)s has no extra spec with key " "%(id)s.") % ({'type_id': group_type_id, 'id': id}) raise webob.exc.HTTPNotFound(explanation=msg) @wsgi.Controller.api_version(mv.GROUP_TYPE) def delete(self, req, group_type_id, id): """Deletes an existing group spec.""" context = req.environ['cinder.context'] context.authorize(policy.SPEC_DELETE_POLICY) self._check_type(context, group_type_id) try: db.group_type_specs_delete(context, group_type_id, id) except exception.GroupTypeSpecsNotFound as error: raise webob.exc.HTTPNotFound(explanation=error.msg) notifier_info = dict(type_id=group_type_id, id=id) notifier = rpc.get_notifier('groupTypeSpecs') notifier.info(context, 'group_type_specs.delete', notifier_info) return webob.Response(status_int=HTTPStatus.ACCEPTED) def create_resource(): return wsgi.Resource(GroupTypeSpecsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/group_types.py0000664000175000017500000002247300000000000020376 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The group type & group type specs controller.""" from http import HTTPStatus from oslo_utils import strutils import webob from webob import exc from cinder.api import api_utils from cinder.api import common from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import group_types as group_type from cinder.api.v3.views import group_types as views_types from cinder.api import validation from cinder import exception from cinder.i18n import _ from cinder.policies import group_types as policy from cinder import rpc from cinder import utils from cinder.volume import group_types class GroupTypesController(wsgi.Controller): """The group types API controller for the OpenStack API.""" _view_builder_class = views_types.ViewBuilder @utils.if_notifications_enabled def _notify_group_type_error(self, context, method, err, group_type=None, id=None, name=None): payload = dict( group_types=group_type, name=name, id=id, error_message=err) rpc.get_notifier('groupType').error(context, method, payload) @utils.if_notifications_enabled def _notify_group_type_info(self, context, method, group_type): payload = dict(group_types=group_type) rpc.get_notifier('groupType').info(context, method, payload) @wsgi.Controller.api_version(mv.GROUP_TYPE) @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(group_type.create) def create(self, req, body): """Creates a new group type.""" context = req.environ['cinder.context'] context.authorize(policy.CREATE_POLICY) grp_type = body['group_type'] name = grp_type['name'] description = grp_type.get('description') specs = grp_type.get('group_specs', {}) is_public = strutils.bool_from_string(grp_type.get('is_public', True), strict=True) try: group_types.create(context, name, specs, is_public, description=description) grp_type = group_types.get_group_type_by_name(context, name) req.cache_resource(grp_type, name='group_types') self._notify_group_type_info( context, 'group_type.create', grp_type) except exception.GroupTypeExists as err: self._notify_group_type_error( context, 'group_type.create', err, group_type=grp_type) raise webob.exc.HTTPConflict(explanation=str(err)) except exception.GroupTypeNotFoundByName as err: self._notify_group_type_error( context, 'group_type.create', err, name=name) raise webob.exc.HTTPNotFound(explanation=err.msg) return self._view_builder.show(req, grp_type) @wsgi.Controller.api_version(mv.GROUP_TYPE) @validation.schema(group_type.update) def update(self, req, id, body): # Update description for a given group type. context = req.environ['cinder.context'] context.authorize(policy.UPDATE_POLICY) grp_type = body['group_type'] description = grp_type.get('description') name = grp_type.get('name') is_public = grp_type.get('is_public') if is_public is not None: is_public = strutils.bool_from_string(is_public, strict=True) # If name specified, name can not be empty. if name and len(name.strip()) == 0: msg = _("Group type name can not be empty.") raise webob.exc.HTTPBadRequest(explanation=msg) # Name, description and is_public can not be None. # Specify one of them, or a combination thereof. if name is None and description is None and is_public is None: msg = _("Specify group type name, description or " "a combination thereof.") raise webob.exc.HTTPBadRequest(explanation=msg) try: group_types.update(context, id, name, description, is_public=is_public) # Get the updated grp_type = group_types.get_group_type(context, id) req.cache_resource(grp_type, name='group_types') self._notify_group_type_info( context, 'group_type.update', grp_type) except exception.GroupTypeNotFound as err: self._notify_group_type_error( context, 'group_type.update', err, id=id) raise webob.exc.HTTPNotFound(explanation=str(err)) except exception.GroupTypeExists as err: self._notify_group_type_error( context, 'group_type.update', err, group_type=grp_type) raise webob.exc.HTTPConflict(explanation=str(err)) except exception.GroupTypeUpdateFailed as err: self._notify_group_type_error( context, 'group_type.update', err, group_type=grp_type) raise webob.exc.HTTPInternalServerError( explanation=str(err)) return self._view_builder.show(req, grp_type) @wsgi.Controller.api_version(mv.GROUP_TYPE) def delete(self, req, id): """Deletes an existing group type.""" context = req.environ['cinder.context'] context.authorize(policy.DELETE_POLICY) try: grp_type = group_types.get_group_type(context, id) group_types.destroy(context, grp_type['id']) self._notify_group_type_info( context, 'group_type.delete', grp_type) except exception.GroupTypeInUse as err: self._notify_group_type_error( context, 'group_type.delete', err, group_type=grp_type) msg = _('Target group type is still in use.') raise webob.exc.HTTPBadRequest(explanation=msg) except exception.GroupTypeNotFound as err: self._notify_group_type_error( context, 'group_type.delete', err, id=id) raise webob.exc.HTTPNotFound(explanation=err.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) @wsgi.Controller.api_version(mv.GROUP_TYPE) def index(self, req): """Returns the list of group types.""" limited_types = self._get_group_types(req) req.cache_resource(limited_types, name='group_types') return self._view_builder.index(req, limited_types) @wsgi.Controller.api_version(mv.GROUP_TYPE) def show(self, req, id): """Return a single group type item.""" context = req.environ['cinder.context'] # get default group type if id is not None and id == 'default': grp_type = group_types.get_default_group_type() if not grp_type: msg = _("Default group type can not be found.") raise exc.HTTPNotFound(explanation=msg) req.cache_resource(grp_type, name='group_types') else: try: grp_type = group_types.get_group_type(context, id) req.cache_resource(grp_type, name='group_types') except exception.GroupTypeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return self._view_builder.show(req, grp_type) def _get_group_types(self, req): """Helper function that returns a list of type dicts.""" params = req.params.copy() marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) filters = {} context = req.environ['cinder.context'] if context.is_admin: # Only admin has query access to all group types filters['is_public'] = api_utils._parse_is_public( req.params.get('is_public', None)) else: filters['is_public'] = True api_utils.remove_invalid_filter_options( context, filters, self._get_grp_type_filter_options()) limited_types = group_types.get_all_group_types(context, filters=filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset, list_result=True) return limited_types def _get_grp_type_filter_options(self): """Return group type search options allowed by non-admin.""" return ['is_public'] def create_resource(): return wsgi.Resource(GroupTypesController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/groups.py0000664000175000017500000004026600000000000017335 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The groups controller.""" from http import HTTPStatus from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import uuidutils import webob from webob import exc from cinder.api import common from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import groups as group from cinder.api.v3.views import groups as views_groups from cinder.api import validation from cinder import exception from cinder import group as group_api from cinder.i18n import _ from cinder import rpc from cinder.volume import group_types LOG = logging.getLogger(__name__) class GroupsController(wsgi.Controller): """The groups API controller for the OpenStack API.""" _view_builder_class = views_groups.ViewBuilder def __init__(self): self.group_api = group_api.API() super(GroupsController, self).__init__() def _check_default_cgsnapshot_type(self, group_type_id): if group_types.is_default_cgsnapshot_type(group_type_id): msg = _("Group_type %(group_type)s is reserved for migrating " "CGs to groups. Migrated group can only be operated by " "CG APIs.") % {'group_type': group_type_id} raise exc.HTTPBadRequest(explanation=msg) @wsgi.Controller.api_version(mv.GROUP_VOLUME) def show(self, req, id): """Return data about the given group.""" LOG.debug('show called for member %s', id) context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level group = self.group_api.get( context, group_id=id) self._check_default_cgsnapshot_type(group.group_type_id) return self._view_builder.detail(req, group) @wsgi.Controller.api_version(mv.GROUP_VOLUME_RESET_STATUS) @wsgi.action("reset_status") @validation.schema(group.reset_status) def reset_status(self, req, id, body): return self._reset_status(req, id, body) def _reset_status(self, req, id, body): """Reset status on generic group.""" context = req.environ['cinder.context'] status = body['reset_status']['status'].lower() LOG.debug("Updating group '%(id)s' with " "'%(update)s'", {'id': id, 'update': status}) try: notifier = rpc.get_notifier('groupStatusUpdate') notifier.info(context, 'groups.reset_status.start', {'id': id, 'update': status}) group = self.group_api.get(context, id) self.group_api.reset_status(context, group, status) notifier.info(context, 'groups.reset_status.end', {'id': id, 'update': status}) except exception.GroupNotFound as error: # Not found exception will be handled at the wsgi level notifier.error(context, 'groups.reset_status', {'error_message': error.msg, 'id': id}) raise except exception.InvalidGroupStatus as error: notifier.error(context, 'groups.reset_status', {'error_message': error.msg, 'id': id}) raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) @wsgi.Controller.api_version(mv.GROUP_VOLUME) @wsgi.action("delete") @validation.schema(group.delete) def delete_group(self, req, id, body): return self._delete(req, id, body) def _delete(self, req, id, body): """Delete a group.""" LOG.debug('delete called for group %s', id) context = req.environ['cinder.context'] grp_body = body['delete'] del_vol = strutils.bool_from_string(grp_body.get( 'delete-volumes', False)) LOG.info('Delete group with id: %s', id, context=context) try: group = self.group_api.get(context, id) self._check_default_cgsnapshot_type(group.group_type_id) self.group_api.delete(context, group, del_vol) except exception.GroupNotFound: # Not found exception will be handled at the wsgi level raise except exception.InvalidGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) @wsgi.Controller.api_version(mv.GROUP_VOLUME) def index(self, req): """Returns a summary list of groups.""" return self._get_groups(req, is_detail=False) @wsgi.Controller.api_version(mv.GROUP_VOLUME) def detail(self, req): """Returns a detailed list of groups.""" return self._get_groups(req, is_detail=True) def _get_groups(self, req, is_detail): """Returns a list of groups through view builder.""" context = req.environ['cinder.context'] filters = req.params.copy() api_version = req.api_version_request marker, limit, offset = common.get_pagination_params(filters) sort_keys, sort_dirs = common.get_sort_params(filters) filters.pop('list_volume', None) if api_version.matches(mv.RESOURCE_FILTER): support_like = (True if api_version.matches( mv.LIKE_FILTER) else False) common.reject_invalid_filters(context, filters, 'group', support_like) groups = self.group_api.get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) new_groups = [] for grp in groups: try: # Only show groups not migrated from CGs self._check_default_cgsnapshot_type(grp.group_type_id) new_groups.append(grp) except exc.HTTPBadRequest: # Skip migrated group pass if is_detail: groups = self._view_builder.detail_list( req, new_groups) else: groups = self._view_builder.summary_list( req, new_groups) return groups @wsgi.Controller.api_version(mv.GROUP_VOLUME) @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(group.create) def create(self, req, body): """Create a new group.""" LOG.debug('Creating new group %s', body) context = req.environ['cinder.context'] group = body['group'] name = group.get('name') description = group.get('description') if name: name = name.strip() if description: description = description.strip() group_type = group['group_type'] if not uuidutils.is_uuid_like(group_type): req_group_type = group_types.get_group_type_by_name(context, group_type) group_type = req_group_type['id'] self._check_default_cgsnapshot_type(group_type) volume_types = group['volume_types'] availability_zone = group.get('availability_zone') LOG.info("Creating group %(name)s.", {'name': name}, context=context) try: new_group = self.group_api.create( context, name, description, group_type, volume_types, availability_zone=availability_zone) except (exception.Invalid, exception.ObjectActionError) as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.NotFound: # Not found exception will be handled at the wsgi level raise retval = self._view_builder.summary(req, new_group) return retval @wsgi.Controller.api_version(mv.GROUP_SNAPSHOTS) @wsgi.action("create-from-src") @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(group.create_from_source) def create_from_src(self, req, body): """Create a new group from a source. The source can be a group snapshot or a group. Note that this does not require group_type and volume_types as the "create" API above. """ LOG.debug('Creating new group %s.', body) context = req.environ['cinder.context'] group = body['create-from-src'] name = group.get('name') description = group.get('description') if name: name = name.strip() if description: description = description.strip() group_snapshot_id = group.get('group_snapshot_id', None) source_group_id = group.get('source_group_id', None) group_type_id = None if group_snapshot_id: LOG.info("Creating group %(name)s from group_snapshot " "%(snap)s.", {'name': name, 'snap': group_snapshot_id}, context=context) grp_snap = self.group_api.get_group_snapshot(context, group_snapshot_id) group_type_id = grp_snap.group_type_id elif source_group_id: LOG.info("Creating group %(name)s from " "source group %(source_group_id)s.", {'name': name, 'source_group_id': source_group_id}, context=context) source_group = self.group_api.get(context, source_group_id) group_type_id = source_group.group_type_id self._check_default_cgsnapshot_type(group_type_id) try: new_group = self.group_api.create_from_src( context, name, description, group_snapshot_id, source_group_id) except exception.InvalidGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) except (exception.GroupNotFound, exception.GroupSnapshotNotFound): # Not found exception will be handled at the wsgi level raise except exception.CinderException as error: raise exc.HTTPBadRequest(explanation=error.msg) retval = self._view_builder.summary(req, new_group) return retval @wsgi.Controller.api_version(mv.GROUP_VOLUME) @validation.schema(group.update) def update(self, req, id, body): """Update the group. Expected format of the input parameter 'body': .. code-block:: json { "group": { "name": "my_group", "description": "My group", "add_volumes": "volume-uuid-1,volume-uuid-2,...", "remove_volumes": "volume-uuid-8,volume-uuid-9,..." } } """ LOG.debug('Update called for group %s.', id) context = req.environ['cinder.context'] group = body['group'] name = group.get('name') description = group.get('description') if name: name = name.strip() if description: description = description.strip() add_volumes = group.get('add_volumes') remove_volumes = group.get('remove_volumes') LOG.info("Updating group %(id)s with name %(name)s " "description: %(description)s add_volumes: " "%(add_volumes)s remove_volumes: %(remove_volumes)s.", {'id': id, 'name': name, 'description': description, 'add_volumes': add_volumes, 'remove_volumes': remove_volumes}, context=context) try: group = self.group_api.get(context, id) self._check_default_cgsnapshot_type(group.group_type_id) self.group_api.update( context, group, name, description, add_volumes, remove_volumes) except exception.GroupNotFound: # Not found exception will be handled at the wsgi level raise except exception.InvalidGroup as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) @wsgi.Controller.api_version(mv.GROUP_REPLICATION) @wsgi.action("enable_replication") @validation.schema(group.enable_replication) def enable_replication(self, req, id, body): """Enables replications for a group.""" context = req.environ['cinder.context'] LOG.info('Enable replication group with id: %s.', id, context=context) try: group = self.group_api.get(context, id) self.group_api.enable_replication(context, group) # Not found exception will be handled at the wsgi level except (exception.InvalidGroup, exception.InvalidGroupType, exception.InvalidVolume, exception.InvalidVolumeType) as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) @wsgi.Controller.api_version(mv.GROUP_REPLICATION) @wsgi.action("disable_replication") @validation.schema(group.disable_replication) def disable_replication(self, req, id, body): """Disables replications for a group.""" context = req.environ['cinder.context'] LOG.info('Disable replication group with id: %s.', id, context=context) try: group = self.group_api.get(context, id) self.group_api.disable_replication(context, group) # Not found exception will be handled at the wsgi level except (exception.InvalidGroup, exception.InvalidGroupType, exception.InvalidVolume, exception.InvalidVolumeType) as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) @wsgi.Controller.api_version(mv.GROUP_REPLICATION) @wsgi.action("failover_replication") @validation.schema(group.failover_replication) def failover_replication(self, req, id, body): """Fails over replications for a group.""" context = req.environ['cinder.context'] grp_body = body['failover_replication'] allow_attached = strutils.bool_from_string( grp_body.get('allow_attached_volume', False)) secondary_backend_id = grp_body.get('secondary_backend_id') LOG.info('Failover replication group with id: %s.', id, context=context) try: group = self.group_api.get(context, id) self.group_api.failover_replication(context, group, allow_attached, secondary_backend_id) # Not found exception will be handled at the wsgi level except (exception.InvalidGroup, exception.InvalidGroupType, exception.InvalidVolume, exception.InvalidVolumeType) as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=HTTPStatus.ACCEPTED) @wsgi.Controller.api_version(mv.GROUP_REPLICATION) @wsgi.action("list_replication_targets") @validation.schema(group.list_replication) def list_replication_targets(self, req, id, body): """List replication targets for a group.""" context = req.environ['cinder.context'] LOG.info('List replication targets for group with id: %s.', id, context=context) # Not found exception will be handled at the wsgi level group = self.group_api.get(context, id) replication_targets = self.group_api.list_replication_targets( context, group) return replication_targets def create_resource(): return wsgi.Resource(GroupsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/limits.py0000664000175000017500000000415400000000000017313 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The limits V3 api.""" from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.v2 import limits as limits_v2 from cinder.api.views import limits as limits_views from cinder import quota QUOTAS = quota.QUOTAS class LimitsController(limits_v2.LimitsController): """Controller for accessing limits in the OpenStack API.""" def index(self, req): """Return all global and rate limit information.""" context = req.environ['cinder.context'] params = req.params.copy() req_version = req.api_version_request # TODO(wangxiyuan): Support "tenant_id" here to keep the backwards # compatibility. Remove it once we drop all support for "tenant". if (req_version.matches(None, mv.get_prior_version(mv.LIMITS_ADMIN_FILTER)) or not context.is_admin): params.pop('project_id', None) params.pop('tenant_id', None) project_id = params.get( 'project_id', params.get('tenant_id', context.project_id)) quotas = QUOTAS.get_project_quotas(context, project_id, usages=False) abs_limits = {k: v['limit'] for k, v in quotas.items()} rate_limits = req.environ.get("cinder.limits", []) builder = self._get_view_builder(req) return builder.build(rate_limits, abs_limits) def _get_view_builder(self, req): return limits_views.ViewBuilder() def create_resource(): return wsgi.Resource(LimitsController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/messages.py0000664000175000017500000001036500000000000017622 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The messages API.""" from http import HTTPStatus import webob from cinder.api import common from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.v3.views import messages as messages_view from cinder.message import api as message_api from cinder.message import defined_messages from cinder.message import message_field from cinder.policies import messages as policy class MessagesController(wsgi.Controller): """The User Messages API controller for the OpenStack API.""" _view_builder_class = messages_view.ViewBuilder def __init__(self, ext_mgr): self.message_api = message_api.API() self.ext_mgr = ext_mgr super(MessagesController, self).__init__() def _build_user_message(self, message): # NOTE(tommylikehu): if the `action_id` is empty, we use 'event_id' # to translate the user message. if message is None: return if message['action_id'] is None and message['event_id'] is not None: message['user_message'] = defined_messages.get_message_text( message['event_id']) else: message['user_message'] = "%s:%s" % ( message_field.translate_action(message['action_id']), message_field.translate_detail(message['detail_id'])) @wsgi.Controller.api_version(mv.MESSAGES) def show(self, req, id): """Return the given message.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level message = self.message_api.get(context, id) context.authorize(policy.GET_POLICY, target_obj=message) self._build_user_message(message) return self._view_builder.detail(req, message) @wsgi.Controller.api_version(mv.MESSAGES) def delete(self, req, id): """Delete a message.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level message = self.message_api.get(context, id) context.authorize(policy.DELETE_POLICY, target_obj=message) self.message_api.delete(context, id) return webob.Response(status_int=HTTPStatus.NO_CONTENT) @wsgi.Controller.api_version(mv.MESSAGES) def index(self, req): """Returns a list of messages, transformed through view builder.""" context = req.environ['cinder.context'] api_version = req.api_version_request context.authorize(policy.GET_ALL_POLICY) filters = None marker = None limit = None offset = None sort_keys = None sort_dirs = None if api_version.matches(mv.MESSAGES_PAGINATION): filters = req.params.copy() marker, limit, offset = common.get_pagination_params(filters) sort_keys, sort_dirs = common.get_sort_params(filters) if api_version.matches(mv.RESOURCE_FILTER): support_like = (True if api_version.matches( mv.LIKE_FILTER) else False) common.reject_invalid_filters(context, filters, 'message', support_like) messages = self.message_api.get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) for message in messages: self._build_user_message(message) messages = self._view_builder.index(req, messages) return messages def create_resource(ext_mgr): return wsgi.Resource(MessagesController(ext_mgr)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/resource_common_manage.py0000664000175000017500000000701500000000000022520 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _ class ManageResource(object): """Mixin class for v3 of ManageVolume and ManageSnapshot. It requires that any class inheriting from this one has `volume_api` and `_list_manageable_view` attributes. """ VALID_SORT_KEYS = {'reference', 'size'} VALID_SORT_DIRS = {'asc', 'desc'} def _set_resource_type(self, resource): self._authorizer = extensions.extension_authorizer(resource, 'list_manageable') self.get_manageable = getattr(self.volume_api, 'get_manageable_%ss' % resource) def _ensure_min_version(self, req, allowed_version): version = req.api_version_request if not version.matches(allowed_version, None): raise exception.VersionNotFoundForAPIMethod(version=version) def _get_resources(self, req, is_detail): self._ensure_min_version(req, mv.MANAGE_EXISTING_LIST) context = req.environ['cinder.context'] self._authorizer(context) params = req.params.copy() cluster_name, host = common.get_cluster_host( req, params, mv.MANAGE_EXISTING_CLUSTER) marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params, default_key='reference') # These parameters are generally validated at the DB layer, but in this # case sorting is not done by the DB invalid_keys = set(sort_keys).difference(self.VALID_SORT_KEYS) if invalid_keys: msg = _("Invalid sort keys passed: %s") % ', '.join(invalid_keys) raise exception.InvalidParameterValue(err=msg) invalid_dirs = set(sort_dirs).difference(self.VALID_SORT_DIRS) if invalid_dirs: msg = _("Invalid sort dirs passed: %s") % ', '.join(invalid_dirs) raise exception.InvalidParameterValue(err=msg) resources = self.get_manageable(context, host, cluster_name, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) view_builder = getattr(self._list_manageable_view, 'detail_list' if is_detail else 'summary_list') return view_builder(req, resources, len(resources)) @wsgi.extends def index(self, req): """Returns a summary list of volumes available to manage.""" return self._get_resources(req, False) @wsgi.extends def detail(self, req): """Returns a detailed list of volumes available to manage.""" return self._get_resources(req, True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/resource_filters.py0000664000175000017500000000306500000000000021371 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The resource filters api.""" from cinder.api import common from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.v3.views import resource_filters as filter_views class ResourceFiltersController(wsgi.Controller): """The resource filter API controller for the OpenStack API.""" _view_builder_class = filter_views.ViewBuilder def __init__(self, ext_mgr=None): """Initialize controller class.""" self.ext_mgr = ext_mgr super(ResourceFiltersController, self).__init__() @wsgi.Controller.api_version(mv.RESOURCE_FILTER_CONFIG) def index(self, req): """Return a list of resource filters.""" resource = req.params.get('resource', None) filters = common.get_enabled_resource_filters(resource=resource) return filter_views.ViewBuilder.list(filters) def create_resource(ext_mgr): """Create the wsgi resource for this controller.""" return wsgi.Resource(ResourceFiltersController(ext_mgr)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/router.py0000664000175000017500000002505600000000000017336 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ WSGI middleware for OpenStack Volume API. """ from cinder.api import extensions import cinder.api.openstack from cinder.api.v3 import attachments from cinder.api.v3 import backups from cinder.api.v3 import clusters from cinder.api.v3 import consistencygroups from cinder.api.v3 import default_types from cinder.api.v3 import group_snapshots from cinder.api.v3 import group_specs from cinder.api.v3 import group_types from cinder.api.v3 import groups from cinder.api.v3 import limits from cinder.api.v3 import messages from cinder.api.v3 import resource_filters from cinder.api.v3 import snapshot_manage from cinder.api.v3 import snapshot_metadata from cinder.api.v3 import snapshots from cinder.api.v3 import types from cinder.api.v3 import volume_manage from cinder.api.v3 import volume_metadata from cinder.api.v3 import volume_transfer from cinder.api.v3 import volumes from cinder.api.v3 import workers from cinder.api import versions class APIRouter(cinder.api.openstack.APIRouter): """Routes requests on the API to the appropriate controller and method.""" ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper, ext_mgr): self.resources['versions'] = versions.create_resource() mapper.connect("versions", "/", controller=self.resources['versions'], action='index') mapper.redirect("", "/") self.resources['volumes'] = volumes.create_resource(ext_mgr) mapper.resource("volume", "volumes", controller=self.resources['volumes'], collection={'detail': 'GET', 'summary': 'GET'}, member={'action': 'POST'}) self.resources['messages'] = messages.create_resource(ext_mgr) mapper.resource("message", "messages", controller=self.resources['messages'], collection={'detail': 'GET'}) self.resources['clusters'] = clusters.create_resource() mapper.resource('cluster', 'clusters', controller=self.resources['clusters'], collection={'detail': 'GET'}) self.resources['types'] = types.create_resource() mapper.resource("type", "types", controller=self.resources['types'], member={'action': 'POST'}) self.resources['group_types'] = group_types.create_resource() mapper.resource("group_type", "group_types", controller=self.resources['group_types'], member={'action': 'POST'}) self.resources['group_specs'] = group_specs.create_resource() mapper.resource("group_spec", "group_specs", controller=self.resources['group_specs'], parent_resource=dict(member_name='group_type', collection_name='group_types')) self.resources['groups'] = groups.create_resource() mapper.resource("group", "groups", controller=self.resources['groups'], collection={'detail': 'GET'}, member={'action': 'POST'}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("groups", "%s/groups/{id}/action" % path_prefix, controller=self.resources["groups"], action="action", conditions={"method": ["POST"]}) mapper.connect("groups/action", "%s/groups/action" % path_prefix, controller=self.resources["groups"], action="action", conditions={"method": ["POST"]}) self.resources['group_snapshots'] = group_snapshots.create_resource() mapper.resource("group_snapshot", "group_snapshots", controller=self.resources['group_snapshots'], collection={'detail': 'GET'}, member={'action': 'POST'}) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("group_snapshots", "%s/group_snapshots/{id}/action" % path_prefix, controller=self.resources["group_snapshots"], action="action", conditions={"method": ["POST"]}) self.resources['snapshots'] = snapshots.create_resource(ext_mgr) mapper.resource("snapshot", "snapshots", controller=self.resources['snapshots'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['limits'] = limits.create_resource() mapper.resource("limit", "limits", controller=self.resources['limits']) self.resources['snapshot_metadata'] = \ snapshot_metadata.create_resource() snapshot_metadata_controller = self.resources['snapshot_metadata'] mapper.resource("snapshot_metadata", "metadata", controller=snapshot_metadata_controller, parent_resource=dict(member_name='snapshot', collection_name='snapshots')) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("metadata", "%s/snapshots/{snapshot_id}/metadata" % path_prefix, controller=snapshot_metadata_controller, action='update_all', conditions={"method": ['PUT']}) self.resources['volume_metadata'] = volume_metadata.create_resource() volume_metadata_controller = self.resources['volume_metadata'] mapper.resource("volume_metadata", "metadata", controller=volume_metadata_controller, parent_resource=dict(member_name='volume', collection_name='volumes')) for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect("metadata", "%s/volumes/{volume_id}/metadata" % path_prefix, controller=volume_metadata_controller, action='update_all', conditions={"method": ['PUT']}) self.resources['consistencygroups'] = ( consistencygroups.create_resource()) mapper.resource("consistencygroup", "consistencygroups", controller=self.resources['consistencygroups'], collection={'detail': 'GET'}, member={'action': 'POST'}) self.resources['manageable_volumes'] = volume_manage.create_resource() mapper.resource("manageable_volume", "manageable_volumes", controller=self.resources['manageable_volumes'], collection={'detail': 'GET'}) self.resources['manageable_snapshots'] = \ snapshot_manage.create_resource() mapper.resource("manageable_snapshot", "manageable_snapshots", controller=self.resources['manageable_snapshots'], collection={'detail': 'GET'}) self.resources['backups'] = ( backups.create_resource()) mapper.resource("backup", "backups", controller=self.resources['backups'], collection={'detail': 'GET'}) self.resources['attachments'] = attachments.create_resource(ext_mgr) mapper.resource("attachment", "attachments", controller=self.resources['attachments'], collection={'detail': 'GET', 'summary': 'GET'}, member={'action': 'POST'}) self.resources['workers'] = workers.create_resource() mapper.resource('worker', 'workers', controller=self.resources['workers'], collection={'cleanup': 'POST'}) self.resources['resource_filters'] = resource_filters.create_resource( ext_mgr) mapper.resource('resource_filter', 'resource_filters', controller=self.resources['resource_filters']) self.resources['volume_transfers'] = ( volume_transfer.create_resource()) mapper.resource("volume-transfer", "volume-transfers", controller=self.resources['volume_transfers'], collection={'detail': 'GET'}, member={'accept': 'POST'}) self.resources['default_types'] = default_types.create_resource() for path_prefix in ['/{project_id}', '']: # project_id is optional mapper.connect( "default-types", "%s/default-types/{id}" % path_prefix, controller=self.resources['default_types'], action='create_update', conditions={"method": ['PUT']}) mapper.connect( "default-types", "%s/default-types" % path_prefix, controller=self.resources['default_types'], action='index', conditions={"method": ['GET']}) mapper.connect( "default-types", "%s/default-types/{id}" % path_prefix, controller=self.resources['default_types'], action='detail', conditions={"method": ['GET']}) mapper.connect( "default-types", "%s/default-types/{id}" % path_prefix, controller=self.resources['default_types'], action='delete', conditions={"method": ['DELETE']}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/snapshot_manage.py0000664000175000017500000000262100000000000021156 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Stratoscale, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from cinder.api.contrib import snapshot_manage as snapshot_manage_v2 from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.v3 import resource_common_manage as common class SnapshotManageController(common.ManageResource, snapshot_manage_v2.SnapshotManageController): def __init__(self, *args, **kwargs): super(SnapshotManageController, self).__init__(*args, **kwargs) self._set_resource_type('snapshot') @wsgi.response(HTTPStatus.ACCEPTED) def create(self, req, body): self._ensure_min_version(req, mv.MANAGE_EXISTING_LIST) return super(SnapshotManageController, self).create(req, body=body) def create_resource(): return wsgi.Resource(SnapshotManageController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/snapshot_metadata.py0000664000175000017500000001245500000000000021514 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus import webob from webob import exc from cinder.api.openstack import wsgi from cinder import exception from cinder.i18n import _ from cinder import volume class Controller(wsgi.Controller): """The snapshot metadata API controller for the OpenStack API.""" def __init__(self): self.volume_api = volume.API() super(Controller, self).__init__() def _get_metadata(self, context, snapshot_id): return self._get_snapshot_and_metadata(context, snapshot_id)[1] def _get_snapshot_and_metadata(self, context, snapshot_id): # Not found exception will be handled at the wsgi level snapshot = self.volume_api.get_snapshot(context, snapshot_id) meta = self.volume_api.get_snapshot_metadata(context, snapshot) return snapshot, meta def index(self, req, snapshot_id): """Returns the list of metadata for a given snapshot.""" context = req.environ['cinder.context'] return {'metadata': self._get_metadata(context, snapshot_id)} def create(self, req, snapshot_id, body): self.assert_valid_body(body, 'metadata') context = req.environ['cinder.context'] metadata = body['metadata'] new_metadata = self._update_snapshot_metadata(context, snapshot_id, metadata, delete=False) return {'metadata': new_metadata} def update(self, req, snapshot_id, id, body): self.assert_valid_body(body, 'meta') meta_item = body['meta'] if id not in meta_item: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) if len(meta_item) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) context = req.environ['cinder.context'] self._update_snapshot_metadata(context, snapshot_id, meta_item, delete=False) return {'meta': meta_item} def update_all(self, req, snapshot_id, body): self.assert_valid_body(body, 'metadata') context = req.environ['cinder.context'] metadata = body['metadata'] new_metadata = self._update_snapshot_metadata(context, snapshot_id, metadata, delete=True) return {'metadata': new_metadata} def _update_snapshot_metadata(self, context, snapshot_id, metadata, delete=False): try: snapshot = self.volume_api.get_snapshot(context, snapshot_id) return self.volume_api.update_snapshot_metadata(context, snapshot, metadata, delete) # Not found exception will be handled at the wsgi level except (ValueError, AttributeError): msg = _("Malformed request body") raise exc.HTTPBadRequest(explanation=msg) except exception.InvalidVolumeMetadata as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.InvalidVolumeMetadataSize as error: raise exc.HTTPRequestEntityTooLarge(explanation=error.msg) def show(self, req, snapshot_id, id): """Return a single metadata item.""" context = req.environ['cinder.context'] data = self._get_metadata(context, snapshot_id) try: return {'meta': {id: data[id]}} except KeyError: raise exception.SnapshotMetadataNotFound(snapshot_id=snapshot_id, metadata_key=id) def delete(self, req, snapshot_id, id): """Deletes an existing metadata.""" context = req.environ['cinder.context'] snapshot, metadata = self._get_snapshot_and_metadata(context, snapshot_id) if id not in metadata: raise exception.SnapshotMetadataNotFound(snapshot_id=snapshot_id, metadata_key=id) self.volume_api.delete_snapshot_metadata(context, snapshot, id) return webob.Response(status_int=HTTPStatus.OK) def create_resource(): return wsgi.Resource(Controller()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/snapshots.py0000664000175000017500000001721000000000000020031 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes snapshots V3 API.""" import ast from http import HTTPStatus from oslo_log import log as logging from oslo_utils import strutils from webob import exc from cinder.api import api_utils from cinder.api import common from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import snapshots as snapshot from cinder.api.v2 import snapshots as snapshots_v2 from cinder.api.v3.views import snapshots as snapshot_views from cinder.api import validation from cinder import utils LOG = logging.getLogger(__name__) SNAPSHOT_IN_USE_FLAG_MSG = ( f"Since microversion {mv.SNAPSHOT_IN_USE} the 'force' flag is " "invalid for this request. For backward compatability, however, when " "the 'force' flag is passed with a value evaluating to True, it is " "silently ignored.") class SnapshotsController(snapshots_v2.SnapshotsController): """The Snapshots API controller for the OpenStack API.""" _view_builder_class = snapshot_views.ViewBuilder def _get_snapshot_filter_options(self): """returns tuple of valid filter options""" return 'status', 'volume_id', 'name', 'metadata' def _format_snapshot_filter_options(self, search_opts): """Convert valid filter options to correct expected format""" # Get the dict object out of queried metadata # convert metadata query value from string to dict if 'metadata' in search_opts.keys(): try: search_opts['metadata'] = ast.literal_eval( search_opts['metadata']) except (ValueError, SyntaxError): LOG.debug('Could not evaluate value %s, assuming string', search_opts['metadata']) if 'use_quota' in search_opts: search_opts['use_quota'] = utils.get_bool_param('use_quota', search_opts) MV_ADDED_FILTERS = ( (mv.get_prior_version(mv.SNAPSHOT_LIST_METADATA_FILTER), 'metadata'), # REST API receives consumes_quota, but process_general_filtering # transforms it into use_quota (mv.get_prior_version(mv.USE_QUOTA), 'use_quota'), ) @common.process_general_filtering('snapshot') def _process_snapshot_filtering(self, context=None, filters=None, req_version=None): """Formats allowed filters""" for version, field in self.MV_ADDED_FILTERS: if req_version.matches(None, version): filters.pop(field, None) # Filter out invalid options allowed_search_options = self._get_snapshot_filter_options() api_utils.remove_invalid_filter_options(context, filters, allowed_search_options) def _items(self, req, is_detail=True): """Returns a list of snapshots, transformed through view builder.""" context = req.environ['cinder.context'] # Pop out non search_opts and create local variables search_opts = req.GET.copy() sort_keys, sort_dirs = common.get_sort_params(search_opts) marker, limit, offset = common.get_pagination_params(search_opts) req_version = req.api_version_request show_count = False if req_version.matches( mv.SUPPORT_COUNT_INFO) and 'with_count' in search_opts: show_count = utils.get_bool_param('with_count', search_opts) search_opts.pop('with_count') # process filters self._process_snapshot_filtering(context=context, filters=search_opts, req_version=req_version) # process snapshot filters to appropriate formats if required self._format_snapshot_filter_options(search_opts) req_version = req.api_version_request if req_version.matches(mv.SNAPSHOT_SORT, None) and 'name' in sort_keys: sort_keys[sort_keys.index('name')] = 'display_name' # NOTE(thingee): v3 API allows name instead of display_name if 'name' in search_opts: search_opts['display_name'] = search_opts.pop('name') snapshots = self.volume_api.get_all_snapshots( context, search_opts=search_opts.copy(), marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset) total_count = None if show_count: total_count = self.volume_api.calculate_resource_count( context, 'snapshot', search_opts) req.cache_db_snapshots(snapshots.objects) if is_detail: snapshots = self._view_builder.detail_list(req, snapshots.objects, total_count) else: snapshots = self._view_builder.summary_list(req, snapshots.objects, total_count) return snapshots @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(snapshot.create) def create(self, req, body): """Creates a new snapshot.""" kwargs = {} context = req.environ['cinder.context'] snapshot = body['snapshot'] kwargs['metadata'] = snapshot.get('metadata', None) volume_id = snapshot['volume_id'] volume = self.volume_api.get(context, volume_id) req_version = req.api_version_request force_flag = snapshot.get('force') force = False if force_flag is not None: # note: this won't raise because it passed schema validation force = strutils.bool_from_string(force_flag, strict=True) if req_version.matches(mv.SNAPSHOT_IN_USE): # strictly speaking, the 'force' flag is invalid for # mv.SNAPSHOT_IN_USE, but we silently ignore a True # value for backward compatibility if force is False: raise exc.HTTPBadRequest( explanation=SNAPSHOT_IN_USE_FLAG_MSG) LOG.info("Create snapshot from volume %s", volume_id) self.validate_name_and_description(snapshot, check_length=False) if 'name' in snapshot: snapshot['display_name'] = snapshot.pop('name') if force: new_snapshot = self.volume_api.create_snapshot_force( context, volume, snapshot.get('display_name'), snapshot.get('description'), **kwargs) else: if req_version.matches(mv.SNAPSHOT_IN_USE): kwargs['allow_in_use'] = True new_snapshot = self.volume_api.create_snapshot( context, volume, snapshot.get('display_name'), snapshot.get('description'), **kwargs) req.cache_db_snapshot(new_snapshot) return self._view_builder.detail(req, new_snapshot) def create_resource(ext_mgr): return wsgi.Resource(SnapshotsController(ext_mgr)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/types.py0000664000175000017500000001310400000000000017151 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume type & volume types extra specs extension.""" import ast from oslo_log import log as logging from cinder.api import api_utils from cinder.api import common from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.v3.views import types as views_types from cinder import exception from cinder.i18n import _ from cinder.policies import type_extra_specs as extra_specs_policy from cinder.policies import volume_type as type_policy from cinder.volume import volume_types LOG = logging.getLogger(__name__) class VolumeTypesController(wsgi.Controller): """The volume types API controller for the OpenStack API.""" _view_builder_class = views_types.ViewBuilder def index(self, req): """Returns the list of volume types.""" context = req.environ['cinder.context'] context.authorize(type_policy.GET_ALL_POLICY) limited_types = self._get_volume_types(req) req.cache_resource(limited_types, name='types') return self._view_builder.index(req, limited_types) def show(self, req, id): """Return a single volume type item.""" context = req.environ['cinder.context'] # get default volume type if id is not None and id == 'default': vol_type = volume_types.get_default_volume_type(context) if not vol_type: msg = _("Default volume type can not be found.") raise exception.VolumeTypeNotFound(message=msg) req.cache_resource(vol_type, name='types') else: # Not found exception will be handled at wsgi level vol_type = volume_types.get_volume_type(context, id) req.cache_resource(vol_type, name='types') context.authorize(type_policy.GET_POLICY, target_obj=vol_type) return self._view_builder.show(req, vol_type) @common.process_general_filtering('volume_type') def _process_volume_type_filtering(self, context=None, filters=None, req_version=None): api_utils.remove_invalid_filter_options( context, filters, self._get_vol_type_filter_options()) def _get_volume_types(self, req): """Helper function that returns a list of type dicts.""" params = req.params.copy() marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) filters = params context = req.environ['cinder.context'] req_version = req.api_version_request if req_version.matches(mv.SUPPORT_VOLUME_TYPE_FILTER): self._process_volume_type_filtering(context=context, filters=filters, req_version=req_version) else: api_utils.remove_invalid_filter_options( context, filters, self._get_vol_type_filter_options()) if context.is_admin: # Only admin has query access to all volume types filters['is_public'] = api_utils._parse_is_public( req.params.get('is_public', None)) else: filters['is_public'] = True if 'extra_specs' in filters: try: filters['extra_specs'] = ast.literal_eval( filters['extra_specs']) except (ValueError, SyntaxError): LOG.debug('Could not evaluate "extra_specs" %s, assuming ' 'dictionary string.', filters['extra_specs']) # Do not allow sensitive extra specs to be used in a filter if # the context only allows access to user visible extra specs. # Removing the filter would yield inaccurate results, so an # empty result is returned because as far as an unauthorized # user goes, the list of volume-types meeting their filtering # criteria is empty. if not context.authorize(extra_specs_policy.READ_SENSITIVE_POLICY, fatal=False): for k in filters['extra_specs'].keys(): if k not in extra_specs_policy.USER_VISIBLE_EXTRA_SPECS: return [] limited_types = volume_types.get_all_types(context, filters=filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset, list_result=True) return limited_types def _get_vol_type_filter_options(self): """Return volume type search options allowed by non-admin.""" return ['is_public'] def create_resource(): return wsgi.Resource(VolumeTypesController()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.055118 cinder-27.0.0/cinder/api/v3/views/0000775000175000017500000000000000000000000016571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/__init__.py0000664000175000017500000000000000000000000020670 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/attachments.py0000664000175000017500000000400300000000000021453 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils class ViewBuilder(object): """Model an attachment API response as a python dictionary.""" _collection_name = "attachments" @staticmethod def _normalize(date): if date: return timeutils.normalize_time(date) return '' @classmethod def detail(cls, attachment, flat=False): """Detailed view of an attachment.""" result = cls.summary(attachment, flat=True) result.update( attached_at=cls._normalize(attachment.attach_time), detached_at=cls._normalize(attachment.detach_time), attach_mode=attachment.attach_mode, connection_info=attachment.connection_info) if flat: return result return {'attachment': result} @staticmethod def summary(attachment, flat=False): """Non detailed view of an attachment.""" result = { 'id': attachment.id, 'status': attachment.attach_status, 'instance': attachment.instance_uuid, 'volume_id': attachment.volume_id, } if flat: return result return {'attachment': result} @classmethod def list(cls, attachments, detail=False): """Build a view of a list of attachments.""" func = cls.detail if detail else cls.summary return {'attachments': [func(attachment, flat=True) for attachment in attachments]} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/backups.py0000664000175000017500000000324000000000000020572 0ustar00zuulzuul00000000000000# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import microversions as mv from cinder.api.views import backups as views_v2 from cinder.common import constants as cinder_constants class ViewBuilder(views_v2.ViewBuilder): """Model a backups API V3 response as a python dictionary.""" def detail(self, request, backup): """Detailed view of a single backup.""" backup_ref = super(ViewBuilder, self).detail(request, backup) # Add metadata if min version is greater than or equal to # BACKUP_METADATA. req_version = request.api_version_request if req_version.matches(mv.BACKUP_METADATA): backup_ref['backup']['metadata'] = backup.metadata if req_version.matches(mv.ENCRYPTION_KEY_ID_IN_DETAILS, None): encryption_key_id = backup.get('encryption_key_id', None) if (encryption_key_id and encryption_key_id != cinder_constants.FIXED_KEY_ID): backup_ref['backup']['encryption_key_id'] = encryption_key_id return backup_ref ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/clusters.py0000664000175000017500000000507700000000000021020 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils class ViewBuilder(object): """Map Cluster into dicts for API responses.""" @staticmethod def _normalize(date): if date: return timeutils.normalize_time(date) return '' @classmethod def detail(cls, cluster, replication_data=False, flat=False): """Detailed view of a cluster.""" result = cls.summary(cluster, flat=True) result.update( num_hosts=cluster.num_hosts, num_down_hosts=cluster.num_down_hosts, last_heartbeat=cls._normalize(cluster.last_heartbeat), created_at=cls._normalize(cluster.created_at), updated_at=cls._normalize(cluster.updated_at), disabled_reason=cluster.disabled_reason, replication_status=cluster.replication_status, frozen=cluster.frozen, active_backend_id=cluster.active_backend_id, ) if not replication_data: for field in ('replication_status', 'frozen', 'active_backend_id'): del result[field] if flat: return result return {'cluster': result} @staticmethod def summary(cluster, replication_data=False, flat=False): """Generic, non-detailed view of a cluster.""" result = { 'name': cluster.name, 'binary': cluster.binary, 'state': 'up' if cluster.is_up else 'down', 'status': 'disabled' if cluster.disabled else 'enabled', 'replication_status': cluster.replication_status, } if not replication_data: del result['replication_status'] if flat: return result return {'cluster': result} @classmethod def list(cls, clusters, detail=False, replication_data=False): func = cls.detail if detail else cls.summary return {'clusters': [func(n, replication_data, flat=True) for n in clusters]} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/default_types.py0000664000175000017500000000436500000000000022023 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ViewBuilder(object): """Model default type API response as a python dictionary.""" _collection_name = "default_types" def _convert_to_dict(self, default): return {'project_id': default.project_id, 'volume_type_id': default.volume_type_id} def create(self, default_type): """Detailed view of a default type when set.""" return {'default_type': self._convert_to_dict(default_type)} def index(self, default_types): """Build a view of a list of default types. .. code-block:: json {"default_types": [ { "project_id": "248592b4-a6da-4c4c-abe0-9d8dbe0b74b4", "volume_type_id": "7152eb1e-aef0-4bcd-a3ab-46b7ef17e2e6" }, { "project_id": "1234567-4c4c-abcd-abe0-1a2b3c4d5e6ff", "volume_type_id": "5e3b298a-f1fc-4d32-9828-0d720da81ddd" } ] } """ default_types_view = [] for default_type in default_types: default_types_view.append(self._convert_to_dict(default_type)) return {'default_types': default_types_view} def detail(self, default_type): """Build a view of a default type. .. code-block:: json {"default_type": { "project_id": "248592b4-a6da-4c4c-abe0-9d8dbe0b74b4", "volume_type_id": "6bd1de9a-b8b5-4c43-a597-00170ab06b50" } } """ return {'default_type': self._convert_to_dict(default_type)} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/group_snapshots.py0000664000175000017500000000662100000000000022406 0ustar00zuulzuul00000000000000# Copyright (C) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common from cinder.api import microversions as mv from cinder.policies import group_snapshots as policy class ViewBuilder(common.ViewBuilder): """Model group_snapshot API responses as a python dictionary.""" _collection_name = "group_snapshots" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, group_snapshots): """Show a list of group_snapshots without many details.""" return self._list_view(self.summary, request, group_snapshots) def detail_list(self, request, group_snapshots): """Detailed view of a list of group_snapshots .""" return self._list_view(self.detail, request, group_snapshots) def summary(self, request, group_snapshot): """Generic, non-detailed view of a group_snapshot.""" return { 'group_snapshot': { 'id': group_snapshot.id, 'name': group_snapshot.name, # NOTE(xyang): group_type_id is added for migrating CGs # to generic volume groups 'group_type_id': group_snapshot.group_type_id, } } def detail(self, request, group_snapshot): """Detailed view of a single group_snapshot.""" group_snapshot_ref = { 'group_snapshot': { 'id': group_snapshot.id, 'group_id': group_snapshot.group_id, 'group_type_id': group_snapshot.group_type_id, 'status': group_snapshot.status, 'created_at': group_snapshot.created_at, 'name': group_snapshot.name, 'description': group_snapshot.description } } req_version = request.api_version_request context = request.environ['cinder.context'] if req_version.matches(mv.GROUP_GROUPSNAPSHOT_PROJECT_ID, None): if context.authorize(policy.GROUP_SNAPSHOT_ATTRIBUTES_POLICY, fatal=False): group_snapshot_ref['group_snapshot']['project_id'] = ( group_snapshot.project_id) return group_snapshot_ref def _list_view(self, func, request, group_snapshots): """Provide a view for a list of group_snapshots.""" group_snapshots_list = [func(request, group_snapshot)['group_snapshot'] for group_snapshot in group_snapshots] group_snapshot_links = self._get_collection_links( request, group_snapshots_list, self._collection_name) group_snapshots_dict = dict(group_snapshots=group_snapshots_list) if group_snapshot_links: group_snapshots_dict['group_snapshot_links'] = group_snapshot_links return group_snapshots_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/group_types.py0000664000175000017500000000354600000000000021533 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common from cinder.policies import group_types as policy class ViewBuilder(common.ViewBuilder): def show(self, request, group_type, brief=False): """Trim away extraneous group type attributes.""" context = request.environ['cinder.context'] trimmed = dict(id=group_type.get('id'), name=group_type.get('name'), description=group_type.get('description'), is_public=group_type.get('is_public')) if context.authorize(policy.SHOW_ACCESS_POLICY, fatal=False): trimmed['group_specs'] = group_type.get('group_specs') return trimmed if brief else dict(group_type=trimmed) def index(self, request, group_types): """Index over trimmed group types.""" group_types_list = [self.show(request, group_type, True) for group_type in group_types] group_type_links = self._get_collection_links(request, group_types, 'group_types') group_types_dict = dict(group_types=group_types_list) if group_type_links: group_types_dict['group_type_links'] = group_type_links return group_types_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/groups.py0000664000175000017500000000754000000000000020470 0ustar00zuulzuul00000000000000# Copyright (C) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common from cinder.api import microversions as mv from cinder.policies import groups as policy from cinder import utils class ViewBuilder(common.ViewBuilder): """Model group API responses as a python dictionary.""" _collection_name = "groups" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, groups): """Show a list of groups without many details.""" return self._list_view(self.summary, request, groups) def detail_list(self, request, groups): """Detailed view of a list of groups .""" return self._list_view(self.detail, request, groups) def summary(self, request, group): """Generic, non-detailed view of a group.""" return { 'group': { 'id': group.id, 'name': group.name } } def detail(self, request, group): """Detailed view of a single group.""" context = request.environ['cinder.context'] group_ref = { 'group': { 'id': group.id, 'status': group.status, 'availability_zone': group.availability_zone, 'created_at': group.created_at, 'name': group.name, 'description': group.description, 'group_type': group.group_type_id, 'volume_types': [v_type.id for v_type in group.volume_types], } } req_version = request.api_version_request # Add group_snapshot_id and source_group_id if min version is greater # than or equal to GROUP_SNAPSHOTS. if req_version.matches(mv.GROUP_SNAPSHOTS, None): group_ref['group']['group_snapshot_id'] = group.group_snapshot_id group_ref['group']['source_group_id'] = group.source_group_id # Add volumes if min version is greater than or equal to # GROUP_VOLUME_LIST. if req_version.matches(mv.GROUP_VOLUME_LIST, None): if utils.get_bool_param('list_volume', request.params): group_ref['group']['volumes'] = [volume.id for volume in group.volumes] # Add replication_status if min version is greater than or equal # to GROUP_REPLICATION. if req_version.matches(mv.GROUP_REPLICATION, None): group_ref['group']['replication_status'] = group.replication_status if req_version.matches(mv.GROUP_GROUPSNAPSHOT_PROJECT_ID, None): if context.authorize(policy.GROUP_ATTRIBUTES_POLICY, fatal=False): group_ref['group']['project_id'] = group.project_id return group_ref def _list_view(self, func, request, groups): """Provide a view for a list of groups.""" groups_list = [ func(request, group)['group'] for group in groups] grp_links = self._get_collection_links(request, groups, self._collection_name) groups_dict = dict(groups=groups_list) if grp_links: groups_dict['group_links'] = grp_links return groups_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/messages.py0000664000175000017500000000546200000000000020761 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model a server API response as a python dictionary.""" _collection_name = "messages" def index(self, request, messages, message_count=None): """Show a list of messages.""" return self._list_view(self.detail, request, messages, message_count) def detail(self, request, message): """Detailed view of a single message.""" message_ref = { 'id': message.get('id'), 'event_id': message.get('event_id'), 'user_message': message.get('user_message'), 'message_level': message.get('message_level'), 'created_at': message.get('created_at'), 'guaranteed_until': message.get('expires_at'), 'request_id': message.get('request_id'), 'links': self._get_links(request, message['id']), } if message.get('resource_type'): message_ref['resource_type'] = message.get('resource_type') if message.get('resource_uuid'): message_ref['resource_uuid'] = message.get('resource_uuid') return {'message': message_ref} def _list_view(self, func, request, messages, message_count=None, coll_name=_collection_name): """Provide a view for a list of messages. :param func: Function used to format the message data :param request: API request :param messages: List of messages in dictionary format :param message_count: Length of the original list of messages :param coll_name: Name of collection, used to generate the next link for a pagination query :returns: message data in dictionary format """ messages_list = [func(request, message)['message'] for message in messages] messages_links = self._get_collection_links(request, messages, coll_name, message_count) messages_dict = dict(messages=messages_list) if messages_links: messages_dict['messages_links'] = messages_links return messages_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/resource_filters.py0000664000175000017500000000224400000000000022524 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ViewBuilder(object): """Model an resource filters API response as a python dictionary.""" _collection_name = "resource_filters" @classmethod def list(cls, filters): """Build a view of a list of resource filters. .. code-block:: json { "resource_filters": [{ "resource": "resource_1", "filters": ["filter1", "filter2", "filter3"] }] } """ return {'resource_filters': [{ 'resource': fil[0], 'filters': fil[1]} for fil in filters.items()]} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/snapshots.py0000664000175000017500000000305600000000000021171 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import microversions as mv from cinder.api.views import snapshots as views_v2 class ViewBuilder(views_v2.ViewBuilder): """Model a snapshots API V3 response as a python dictionary.""" def detail(self, request, snapshot): """Detailed view of a single snapshot.""" snapshot_ref = super(ViewBuilder, self).detail(request, snapshot) req_version = request.api_version_request # Add group_snapshot_id if min version is greater than or equal # to GROUP_SNAPSHOTS. snap = snapshot_ref['snapshot'] if req_version.matches(mv.GROUP_SNAPSHOTS, None): snap['group_snapshot_id'] = snapshot.get('group_snapshot_id') if req_version.matches(mv.SNAPSHOT_LIST_USER_ID, None): snap['user_id'] = snapshot.get('user_id') if req_version.matches(mv.USE_QUOTA): snap['consumes_quota'] = snapshot.get('use_quota') return snapshot_ref ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/types.py0000664000175000017500000000516600000000000020317 0ustar00zuulzuul00000000000000# Copyright 2012 Red Hat, Inc. # Copyright 2015 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common from cinder.policies import type_extra_specs as extra_specs_policy from cinder.policies import volume_type as policy class ViewBuilder(common.ViewBuilder): def show(self, request, volume_type, brief=False): """Trim away extraneous volume type attributes.""" context = request.environ['cinder.context'] trimmed = dict(id=volume_type.get('id'), name=volume_type.get('name'), is_public=volume_type.get('is_public'), description=volume_type.get('description')) if context.authorize(policy.EXTRA_SPEC_POLICY, fatal=False): extra_specs = volume_type.get('extra_specs', {}) if context.authorize(extra_specs_policy.READ_SENSITIVE_POLICY, fatal=False): trimmed_specs = extra_specs else: # Limit the response to contain only user visible specs. trimmed_specs = {} for uv_spec in extra_specs_policy.USER_VISIBLE_EXTRA_SPECS: if uv_spec in extra_specs: trimmed_specs[uv_spec] = extra_specs[uv_spec] trimmed['extra_specs'] = trimmed_specs if context.authorize(policy.QOS_POLICY, fatal=False): trimmed['qos_specs_id'] = volume_type.get('qos_specs_id') return trimmed if brief else dict(volume_type=trimmed) def index(self, request, volume_types): """Index over trimmed volume types.""" volume_types_list = [self.show(request, volume_type, True) for volume_type in volume_types] volume_type_links = self._get_collection_links(request, volume_types, 'types') volume_types_dict = dict(volume_types=volume_types_list) if volume_type_links: volume_types_dict['volume_type_links'] = volume_type_links return volume_types_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/volumes.py0000664000175000017500000001265300000000000020644 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import microversions as mv from cinder.api.v2.views import volumes as views_v2 from cinder.common import constants as cinder_constants class ViewBuilder(views_v2.ViewBuilder): """Model a volumes API V3 response as a python dictionary.""" _collection_name = "volumes" def quick_summary(self, volume_count, volume_size, all_distinct_metadata=None): """View of volumes summary. It includes number of volumes, size of volumes and all distinct metadata of volumes. """ summary = { 'volume-summary': { 'total_count': volume_count, 'total_size': volume_size } } if all_distinct_metadata is not None: summary['volume-summary']['metadata'] = all_distinct_metadata return summary def detail(self, request, volume): """Detailed view of a single volume.""" volume_ref = super(ViewBuilder, self).detail(request, volume) req_version = request.api_version_request # Add group_id if min version is greater than or equal to GROUP_VOLUME. if req_version.matches(mv.GROUP_VOLUME, None): volume_ref['volume']['group_id'] = volume.get('group_id') # Add provider_id if min version is greater than or equal to # VOLUME_DETAIL_PROVIDER_ID for admin. if (request.environ['cinder.context'].is_admin and req_version.matches(mv.VOLUME_DETAIL_PROVIDER_ID, None)): volume_ref['volume']['provider_id'] = volume.get('provider_id') if req_version.matches( mv.VOLUME_SHARED_TARGETS_AND_SERVICE_FIELDS, None): # For microversion 3.69 or higher it is acceptable to be null # but for earlier versions we convert None to True shared = volume.get('shared_targets', False) if (not req_version.matches(mv.SHARED_TARGETS_TRISTATE, None) and shared is None): shared = True volume_ref['volume']['shared_targets'] = shared volume_ref['volume']['service_uuid'] = volume.get( 'service_uuid', None) if (request.environ['cinder.context'].is_admin and req_version.matches( mv.VOLUME_CLUSTER_NAME, None)): volume_ref['volume']['cluster_name'] = volume.get( 'cluster_name', None) if req_version.matches(mv.VOLUME_TYPE_ID_IN_VOLUME_DETAIL, None): volume_ref[ 'volume']["volume_type_id"] = volume['volume_type'].get('id') if req_version.matches(mv.ENCRYPTION_KEY_ID_IN_DETAILS, None): encryption_key_id = volume.get('encryption_key_id', None) if (encryption_key_id and encryption_key_id != cinder_constants.FIXED_KEY_ID): volume_ref['volume']['encryption_key_id'] = encryption_key_id if req_version.matches(mv.USE_QUOTA): volume_ref['volume']['consumes_quota'] = volume.get('use_quota') return volume_ref def _list_view(self, func, request, volumes, volume_count, coll_name=_collection_name): """Provide a view for a list of volumes. :param func: Function used to format the volume data :param request: API request :param volumes: List of volumes in dictionary format :param volume_count: Length of the original list of volumes :param coll_name: Name of collection, used to generate the next link for a pagination query :returns: Volume data in dictionary format """ volumes_list = [func(request, volume)['volume'] for volume in volumes] volumes_links = self._get_collection_links(request, volumes, coll_name, volume_count) volumes_dict = {"volumes": volumes_list} if volumes_links: volumes_dict['volumes_links'] = volumes_links req_version = request.api_version_request if req_version.matches( mv.SUPPORT_COUNT_INFO, None) and volume_count is not None: volumes_dict['count'] = volume_count return volumes_dict def _get_volume_type(self, request, volume): """Returns the volume type of the volume. Retrieves the volume type name for microversion 3.63. Otherwise, it uses the default implementation from super. """ req_version = request.api_version_request if req_version.matches(mv.VOLUME_TYPE_ID_IN_VOLUME_DETAIL): if volume.get('volume_type'): return volume['volume_type']['name'] return None return super(ViewBuilder, self)._get_volume_type(request, volume) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/views/workers.py0000664000175000017500000000164600000000000020646 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ViewBuilder(object): """Map Cluster into dicts for API responses.""" _collection_name = 'workers' @classmethod def service_list(cls, services): return [{'id': s.id, 'host': s.host, 'binary': s.binary, 'cluster_name': s.cluster_name} for s in services] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/volume_manage.py0000664000175000017500000000257500000000000020636 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Stratoscale, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from cinder.api.contrib import volume_manage as volume_manage_v2 from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.v3 import resource_common_manage as common class VolumeManageController(common.ManageResource, volume_manage_v2.VolumeManageController): def __init__(self, *args, **kwargs): super(VolumeManageController, self).__init__(*args, **kwargs) self._set_resource_type('volume') @wsgi.response(HTTPStatus.ACCEPTED) def create(self, req, body): self._ensure_min_version(req, mv.MANAGE_EXISTING_LIST) return super(VolumeManageController, self).create(req, body=body) def create_resource(): return wsgi.Resource(VolumeManageController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/volume_metadata.py0000664000175000017500000000577200000000000021170 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volume metadata V3 api.""" import hashlib from http import HTTPStatus from oslo_serialization import jsonutils import webob from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import volume_metadata as metadata from cinder.api.v2 import volume_metadata as volume_meta_v2 from cinder.api import validation class Controller(volume_meta_v2.Controller): """The volume metadata API controller for the OpenStack API.""" def _validate_etag(self, req, volume_id): if not req.if_match: return True context = req.environ['cinder.context'] metadata = self._get_metadata(context, volume_id) data = jsonutils.dumps({"metadata": metadata}) data = data.encode('utf-8') checksum = hashlib.md5(data, usedforsecurity=False).hexdigest() return checksum in req.if_match.etags @wsgi.extends def index(self, req, volume_id): req_version = req.api_version_request metadata = super(Controller, self).index(req, volume_id) if req_version.matches(mv.ETAGS): data = jsonutils.dumps(metadata) data = data.encode('utf-8') resp = webob.Response() resp.headers['Etag'] = hashlib.md5( data, usedforsecurity=False).hexdigest() resp.body = data return resp return metadata @wsgi.extends @validation.schema(metadata.update) def update(self, req, volume_id, id, body): req_version = req.api_version_request if req_version.matches(mv.ETAGS): if not self._validate_etag(req, volume_id): return webob.Response( status_int=HTTPStatus.PRECONDITION_FAILED) return super(Controller, self).update(req, volume_id, id, body=body) @wsgi.extends @validation.schema(metadata.create) def update_all(self, req, volume_id, body): req_version = req.api_version_request if req_version.matches(mv.ETAGS): if not self._validate_etag(req, volume_id): return webob.Response( status_int=HTTPStatus.PRECONDITION_FAILED) return super(Controller, self).update_all(req, volume_id, body=body) def create_resource(): return wsgi.Resource(Controller()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/volume_transfer.py0000664000175000017500000001131400000000000021221 0ustar00zuulzuul00000000000000# Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_log import log as logging from oslo_utils import strutils from webob import exc from cinder.api import common from cinder.api.contrib import volume_transfer as volume_transfer_v2 from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import volume_transfer from cinder.api import validation from cinder import exception LOG = logging.getLogger(__name__) class VolumeTransferController(volume_transfer_v2.VolumeTransferController): """The transfer API controller for the OpenStack API V3.""" def _get_transfers(self, req, is_detail): """Returns a list of transfers, transformed through view builder.""" context = req.environ['cinder.context'] req_version = req.api_version_request params = req.params.copy() marker = limit = offset = None if req_version.matches(mv.SUPPORT_TRANSFER_PAGINATION): marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) else: # NOTE(yikun): After microversion SUPPORT_TRANSFER_PAGINATION, # transfers list api use the ['created_at'], ['asc'] # as default order, but we should keep the compatible in here. sort_keys, sort_dirs = ['created_at', 'id'], ['asc', 'asc'] filters = params if 'name' in filters: filters['display_name'] = filters.pop('name') LOG.debug('Listing volume transfers') transfers = self.transfer_api.get_all(context, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) transfer_count = len(transfers) limited_list = common.limited(transfers, req) if is_detail: transfers = self._view_builder.detail_list(req, limited_list, transfer_count) else: transfers = self._view_builder.summary_list(req, limited_list, transfer_count) return transfers def index(self, req): """Returns a summary list of transfers.""" return self._get_transfers(req, is_detail=False) def detail(self, req): """Returns a detailed list of transfers.""" return self._get_transfers(req, is_detail=True) @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(volume_transfer.create, mv.BASE_VERSION, mv.get_prior_version(mv.TRANSFER_WITH_SNAPSHOTS)) @validation.schema(volume_transfer.create_v355, mv.TRANSFER_WITH_SNAPSHOTS) def create(self, req, body): """Create a new volume transfer.""" LOG.debug('Creating new volume transfer %s', body) context = req.environ['cinder.context'] transfer = body['transfer'] volume_id = transfer['volume_id'] name = transfer.get('name', None) if name is not None: name = name.strip() no_snapshots = strutils.bool_from_string(transfer.get('no_snapshots', False)) req_version = req.api_version_request allow_encrypted = req_version.matches(mv.TRANSFER_ENCRYPTED_VOLUME) LOG.info("Creating transfer of volume %s", volume_id) try: new_transfer = self.transfer_api.create( context, volume_id, name, no_snapshots=no_snapshots, allow_encrypted=allow_encrypted) # Not found exception will be handled at the wsgi level except exception.Invalid as error: raise exc.HTTPBadRequest(explanation=error.msg) transfer = self._view_builder.create(req, dict(new_transfer)) return transfer def create_resource(): return wsgi.Resource(VolumeTransferController()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/volumes.py0000664000175000017500000004200500000000000017501 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes V3 api.""" from http import HTTPStatus from oslo_log import log as logging from oslo_utils import timeutils import webob from webob import exc from cinder.api import api_utils from cinder.api import common from cinder.api.contrib import scheduler_hints from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import volumes from cinder.api.v2 import volumes as volumes_v2 from cinder.api.v3.views import volumes as volume_views_v3 from cinder.api import validation from cinder.backup import api as backup_api from cinder import exception from cinder import group as group_api from cinder.i18n import _ from cinder.image import glance from cinder import objects from cinder.policies import volumes as policy from cinder import utils LOG = logging.getLogger(__name__) class VolumeController(volumes_v2.VolumeController): """The Volumes API controller for the OpenStack API V3.""" _view_builder_class = volume_views_v3.ViewBuilder def __init__(self, ext_mgr=None): self.group_api = group_api.API() self.backup_api = backup_api.API() super(VolumeController, self).__init__(ext_mgr) def delete(self, req, id): """Delete a volume.""" context = req.environ['cinder.context'] req_version = req.api_version_request cascade = utils.get_bool_param('cascade', req.params) force = False params = "" if req_version.matches(mv.VOLUME_DELETE_FORCE): force = utils.get_bool_param('force', req.params) if cascade or force: params = "(cascade: %(c)s, force: %(f)s)" % {'c': cascade, 'f': force} LOG.info("Delete volume with id: %(id)s %(params)s", {'id': id, 'params': params}, context=context) volume = self.volume_api.get(context, id) if force: context.authorize(policy.FORCE_DELETE_POLICY, target_obj=volume) self.volume_api.delete(context, volume, cascade=cascade, force=force) return webob.Response(status_int=HTTPStatus.ACCEPTED) MV_ADDED_FILTERS = ( (mv.get_prior_version(mv.VOLUME_LIST_GLANCE_METADATA), 'glance_metadata'), (mv.get_prior_version(mv.VOLUME_LIST_GROUP), 'group_id'), (mv.get_prior_version(mv.VOLUME_TIME_COMPARISON_FILTER), 'created_at'), (mv.get_prior_version(mv.VOLUME_TIME_COMPARISON_FILTER), 'updated_at'), # REST API receives consumes_quota, but process_general_filtering # transforms it into use_quota (mv.get_prior_version(mv.USE_QUOTA), 'use_quota'), ) @common.process_general_filtering('volume') def _process_volume_filtering(self, context=None, filters=None, req_version=None): for version, field in self.MV_ADDED_FILTERS: if req_version.matches(None, version): filters.pop(field, None) api_utils.remove_invalid_filter_options( context, filters, self._get_volume_filter_options()) def _handle_time_comparison_filters(self, filters): for time_comparison_filter in ['created_at', 'updated_at']: if time_comparison_filter in filters: time_filter_dict = {} comparison_units = filters[time_comparison_filter].split(',') operators = common.get_time_comparison_operators() for comparison_unit in comparison_units: try: operator_and_time = comparison_unit.split(":") comparison_operator = operator_and_time[0] time = '' for time_str in operator_and_time[1:-1]: time += time_str + ":" time += operator_and_time[-1] if comparison_operator not in operators: msg = _( 'Invalid %s operator') % comparison_operator raise exc.HTTPBadRequest(explanation=msg) except IndexError: msg = _('Invalid %s value') % time_comparison_filter raise exc.HTTPBadRequest(explanation=msg) try: parsed_time = timeutils.parse_isotime(time) except ValueError: msg = _('Invalid %s value') % time raise exc.HTTPBadRequest(explanation=msg) time_filter_dict[comparison_operator] = parsed_time filters[time_comparison_filter] = time_filter_dict def _get_volumes(self, req, is_detail): """Returns a list of volumes, transformed through view builder.""" context = req.environ['cinder.context'] req_version = req.api_version_request params = req.params.copy() marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) filters = params show_count = False if req_version.matches( mv.SUPPORT_COUNT_INFO) and 'with_count' in filters: show_count = utils.get_bool_param('with_count', filters) filters.pop('with_count') self._process_volume_filtering(context=context, filters=filters, req_version=req_version) # NOTE: it's 'name' in the REST API, but 'display_name' in the # database layer, so we need to do this translation if 'name' in sort_keys: sort_keys[sort_keys.index('name')] = 'display_name' if 'name' in filters: filters['display_name'] = filters.pop('name') if 'use_quota' in filters: filters['use_quota'] = utils.get_bool_param('use_quota', filters) self._handle_time_comparison_filters(filters) strict = req.api_version_request.matches( mv.VOLUME_LIST_BOOTABLE, None) self.volume_api.check_volume_filters(filters, strict) volumes = self.volume_api.get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters.copy(), viewable_admin_meta=True, offset=offset) total_count = None if show_count: total_count = self.volume_api.calculate_resource_count( context, 'volume', filters) for volume in volumes: api_utils.add_visible_admin_metadata(volume) req.cache_db_volumes(volumes.objects) if is_detail: volumes = self._view_builder.detail_list( req, volumes, total_count) else: volumes = self._view_builder.summary_list( req, volumes, total_count) return volumes @wsgi.Controller.api_version(mv.VOLUME_SUMMARY) def summary(self, req): """Return summary of volumes.""" view_builder_v3 = volume_views_v3.ViewBuilder() context = req.environ['cinder.context'] filters = req.params.copy() api_utils.remove_invalid_filter_options( context, filters, self._get_volume_filter_options()) num_vols, sum_size, metadata = self.volume_api.get_volume_summary( context, filters=filters) req_version = req.api_version_request if req_version.matches(mv.VOLUME_SUMMARY_METADATA): all_distinct_metadata = metadata else: all_distinct_metadata = None return view_builder_v3.quick_summary(num_vols, int(sum_size), all_distinct_metadata) @wsgi.response(HTTPStatus.ACCEPTED) @wsgi.Controller.api_version(mv.VOLUME_REVERT) @wsgi.action('revert') def revert(self, req, id, body): """revert a volume to a snapshot""" context = req.environ['cinder.context'] self.assert_valid_body(body, 'revert') snapshot_id = body['revert'].get('snapshot_id') volume = self.volume_api.get_volume(context, id) try: l_snap = volume.get_latest_snapshot() except exception.VolumeSnapshotNotFound: msg = _("Volume %s doesn't have any snapshots.") raise exc.HTTPBadRequest(explanation=msg % volume.id) # Ensure volume and snapshot match. if snapshot_id is None or snapshot_id != l_snap.id: msg = _("Specified snapshot %(s_id)s is None or not " "the latest one of volume %(v_id)s.") raise exc.HTTPBadRequest(explanation=msg % {'s_id': snapshot_id, 'v_id': volume.id}) if volume.size != l_snap.volume_size: msg = _("Can't revert volume %(v_id)s to its latest snapshot " "%(s_id)s. The volume size must be equal to the snapshot " "size.") raise exc.HTTPBadRequest(explanation=msg % {'s_id': snapshot_id, 'v_id': volume.id}) try: msg = 'Reverting volume %(v_id)s to snapshot %(s_id)s.' LOG.info(msg, {'v_id': volume.id, 's_id': l_snap.id}) self.volume_api.revert_to_snapshot(context, volume, l_snap) except (exception.InvalidVolume, exception.InvalidSnapshot) as e: raise exc.HTTPConflict(explanation=str(e)) def _get_image_snapshot(self, context, image_uuid): image_snapshot = None if image_uuid: image_service = glance.get_default_image_service() image_meta = image_service.show(context, image_uuid) if image_meta is not None: bdms = image_meta.get('properties', {}).get( 'block_device_mapping', []) if bdms: boot_bdm = [bdm for bdm in bdms if ( bdm.get('source_type') == 'snapshot' and bdm.get('boot_index') == 0)] if boot_bdm: try: image_snapshot = self.volume_api.get_snapshot( context, boot_bdm[0].get('snapshot_id')) return image_snapshot except exception.NotFound: explanation = _( 'Nova specific image is found, but boot ' 'volume snapshot id:%s not found.' ) % boot_bdm[0].get('snapshot_id') raise exc.HTTPNotFound(explanation=explanation) return image_snapshot @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(volumes.create, mv.BASE_VERSION, mv.get_prior_version(mv.GROUP_VOLUME)) @validation.schema(volumes.create_volume_v313, mv.GROUP_VOLUME, mv.get_prior_version(mv.VOLUME_CREATE_FROM_BACKUP)) @validation.schema(volumes.create_volume_v347, mv.VOLUME_CREATE_FROM_BACKUP, mv.get_prior_version(mv.SUPPORT_VOLUME_SCHEMA_CHANGES)) @validation.schema(volumes.create_volume_v353, mv.SUPPORT_VOLUME_SCHEMA_CHANGES) def create(self, req, body): """Creates a new volume. :param req: the request :param body: the request body :returns: dict -- the new volume dictionary :raises HTTPNotFound, HTTPBadRequest: """ LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] req_version = req.api_version_request # NOTE (pooja_jadhav) To fix bug 1774155, scheduler hints is not # loaded as a standard extension. If user passes # OS-SCH-HNT:scheduler_hints in the request body, then it will be # validated in the create method and this method will add # scheduler_hints in body['volume']. body = scheduler_hints.create(req, body) volume = body['volume'] kwargs = {} self.validate_name_and_description(volume, check_length=False) # NOTE: it's 'name'/'description' in the REST API, but # 'display_name'/display_description' in the database layer, # so we need to do this translation if 'name' in volume: volume['display_name'] = volume.pop('name') if 'description' in volume: volume['display_description'] = volume.pop('description') if 'image_id' in volume: volume['imageRef'] = volume.pop('image_id') req_volume_type = volume.get('volume_type', None) if req_volume_type: # Not found exception will be handled at the wsgi level kwargs['volume_type'] = ( objects.VolumeType.get_by_name_or_id(context, req_volume_type)) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: # Not found exception will be handled at the wsgi level kwargs['snapshot'] = self.volume_api.get_snapshot(context, snapshot_id) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: # Not found exception will be handled at the wsgi level kwargs['source_volume'] = ( self.volume_api.get_volume(context, source_volid)) else: kwargs['source_volume'] = None kwargs['group'] = None kwargs['consistencygroup'] = None consistencygroup_id = volume.get('consistencygroup_id') if consistencygroup_id is not None: # Not found exception will be handled at the wsgi level kwargs['group'] = self.group_api.get(context, consistencygroup_id) # Get group_id if volume is in a group. group_id = volume.get('group_id') if group_id is not None: # Not found exception will be handled at the wsgi level kwargs['group'] = self.group_api.get(context, group_id) image_ref = volume.get('imageRef') if image_ref is not None: image_uuid = self._image_uuid_from_ref(image_ref, context) image_snapshot = self._get_image_snapshot(context, image_uuid) if (req_version.matches(mv.get_api_version( mv.SUPPORT_NOVA_IMAGE)) and image_snapshot): kwargs['snapshot'] = image_snapshot else: kwargs['image_id'] = image_uuid backup_id = volume.get('backup_id') if backup_id: kwargs['backup'] = self.backup_api.get(context, backup_id=backup_id) size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] elif size is None and kwargs.get('backup') is not None: size = kwargs['backup']['size'] LOG.info("Create volume of %s GB", size) kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) multiattach = utils.get_bool_param('multiattach', volume) if multiattach: msg = _("multiattach parameter has been removed. The default " "behavior is to use multiattach enabled volume types. " "Contact your administrator to create a multiattach " "enabled volume type and use it to create multiattach " "volumes.") raise exc.HTTPBadRequest(explanation=msg) try: new_volume = self.volume_api.create( context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) except exception.VolumeTypeDefaultMisconfiguredError as err: raise exc.HTTPInternalServerError(explanation=err.msg) retval = self._view_builder.detail(req, new_volume) return retval def create_resource(ext_mgr): return wsgi.Resource(VolumeController(ext_mgr)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/v3/workers.py0000664000175000017500000001066600000000000017513 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_utils import strutils from oslo_utils import timeutils from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import workers from cinder.api.v3.views import workers as workers_view from cinder.api import validation from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import cleanable from cinder.policies import workers as policy from cinder.scheduler import rpcapi as sch_rpc from cinder import utils class WorkerController(wsgi.Controller): def __init__(self, *args, **kwargs): self.sch_api = sch_rpc.SchedulerAPI() @wsgi.Controller.api_version(mv.WORKERS_CLEANUP) @wsgi.response(HTTPStatus.ACCEPTED) @validation.schema(workers.cleanup) def cleanup(self, req, body=None): """Do the cleanup on resources from a specific service/host/node.""" # Let the wsgi middleware convert NotAuthorized exceptions ctxt = req.environ['cinder.context'] ctxt.authorize(policy.CLEAN_POLICY) body = body or {} for boolean in ('disabled', 'is_up'): if body.get(boolean) is not None: body[boolean] = strutils.bool_from_string(body[boolean]) resource_type = body.get('resource_type') if resource_type: resource_type = resource_type.title() types = cleanable.CinderCleanableObject.cleanable_resource_types if resource_type not in types: valid_types = utils.build_or_str(types) msg = _('Resource type %(resource_type)s not valid,' ' must be %(valid_types)s') msg = msg % {"resource_type": resource_type, "valid_types": valid_types} raise exception.InvalidInput(reason=msg) body['resource_type'] = resource_type resource_id = body.get('resource_id') if resource_id: # If we have the resource type but we don't have where it is # located, we get it from the DB to limit the distribution of the # request by the scheduler, otherwise it will be distributed to all # the services. location_keys = {'service_id', 'cluster_name', 'host'} if not location_keys.intersection(body): workers = db.worker_get_all(ctxt, resource_id=resource_id, binary=body.get('binary'), resource_type=resource_type) if len(workers) == 0: msg = (_('There is no resource with UUID %s pending ' 'cleanup.'), resource_id) raise exception.InvalidInput(reason=msg) if len(workers) > 1: msg = (_('There are multiple resources with UUID %s ' 'pending cleanup. Please be more specific.'), resource_id) raise exception.InvalidInput(reason=msg) worker = workers[0] body.update(service_id=worker.service_id, resource_type=worker.resource_type) body['until'] = timeutils.utcnow() # NOTE(geguileo): If is_up is not specified in the request # CleanupRequest's default will be used (False) cleanup_request = objects.CleanupRequest(**body) cleaning, unavailable = self.sch_api.work_cleanup(ctxt, cleanup_request) return { 'cleaning': workers_view.ViewBuilder.service_list(cleaning), 'unavailable': workers_view.ViewBuilder.service_list(unavailable), } def create_resource(): return wsgi.Resource(WorkerController()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.059118 cinder-27.0.0/cinder/api/validation/0000775000175000017500000000000000000000000017236 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/validation/__init__.py0000664000175000017500000000443100000000000021351 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Request Body validating middleware. """ import functools from cinder.api.openstack import api_version_request as api_version from cinder.api.validation import validators def schema(request_body_schema, min_version=None, max_version=None): """Register a schema to validate request body. Registered schema will be used for validating request body just before API method executing. :param dict request_body_schema: a schema to validate request body :param min_version: A string of two numerals. X.Y indicating the minimum version of the JSON-Schema to validate against. :param max_version: A string of two numerals. X.Y indicating the maximum version of the JSON-Schema to validate against. """ def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): min_ver = api_version.APIVersionRequest(min_version) max_ver = api_version.APIVersionRequest(max_version) if 'req' in kwargs: ver = kwargs['req'].api_version_request else: ver = args[1].api_version_request if ver.matches(min_ver, max_ver): # Only validate against the schema if it lies within # the version range specified. Note that if both min # and max are not specified the validator will always # be run. schema_validator = validators._SchemaValidator( request_body_schema) schema_validator.validate(kwargs['body']) return func(*args, **kwargs) return wrapper return add_validator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/validation/parameter_types.py0000664000175000017500000002016200000000000023015 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Common parameter types for validating request Body. """ import copy import re import unicodedata from cinder.common import constants def _is_printable(char): """determine if a unicode code point is printable. This checks if the character is either "other" (mostly control codes), or a non-horizontal space. All characters that don't match those criteria are considered printable; that is: letters; combining marks; numbers; punctuation; symbols; (horizontal) space separators. """ category = unicodedata.category(char) return (not category.startswith("C") and (not category.startswith("Z") or category == "Zs")) def _get_all_chars(): for i in range(0xFFFF): yield chr(i) # build a regex that matches all printable characters. This allows # spaces in the middle of the name. Also note that the regexp below # deliberately allows the empty string. This is so only the constraint # which enforces a minimum length for the name is triggered when an # empty string is tested. Otherwise it is not deterministic which # constraint fails and this causes issues for some unittests when # PYTHONHASHSEED is set randomly. def _build_regex_range(ws=True, invert=False, exclude=None): """Build a range regex for a set of characters in utf8. This builds a valid range regex for characters in utf8 by iterating the entire space and building up a set of x-y ranges for all the characters we find which are valid. :param ws: should we include whitespace in this range. :param exclude: any characters we want to exclude :param invert: invert the logic The inversion is useful when we want to generate a set of ranges which is everything that's not a certain class. For instance, produce all the non printable characters as a set of ranges. """ if exclude is None: exclude = [] regex = "" # are we currently in a range in_range = False # last character we found, for closing ranges last = None # last character we added to the regex, this lets us know that we # already have B in the range, which means we don't need to close # it out with B-B. While the later seems to work, it's kind of bad form. last_added = None def valid_char(char): if char in exclude: result = False elif ws: result = _is_printable(char) else: # Zs is the unicode class for space characters, of which # there are about 10 in this range. result = (_is_printable(char) and unicodedata.category(char) != "Zs") if invert is True: return not result return result # iterate through the entire character range. in_ for c in _get_all_chars(): if valid_char(c): if not in_range: regex += re.escape(c) last_added = c in_range = True else: if in_range and last != last_added: regex += "-" + re.escape(last) in_range = False last = c else: if in_range: regex += "-" + re.escape(c) return regex valid_description_regex_base = '^[%s]*$' valid_description_regex = valid_description_regex_base % ( _build_regex_range()) name = { 'type': 'string', 'minLength': 1, 'maxLength': 255, 'format': 'name' } update_name = { 'type': ['string', 'null'], 'minLength': 1, 'maxLength': 255 } description = { 'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255, 'pattern': valid_description_regex, } boolean = { 'type': ['boolean', 'string'], 'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on', 'YES', 'Yes', 'yes', 'y', 't', False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off', 'off', 'NO', 'No', 'no', 'n', 'f'], } uuid = { 'type': 'string', 'format': 'uuid' } extra_specs = { 'type': 'object', 'patternProperties': { '^[a-zA-Z0-9-_:. /]{1,255}$': { 'type': 'string', 'maxLength': 255 } }, 'additionalProperties': False } image_metadata = { 'type': 'object', 'patternProperties': { '^[a-zA-Z0-9-_:. /]{1,255}$': { 'type': 'string', 'format': 'mysql_text' } }, 'additionalProperties': False } extra_specs_with_no_spaces_key = { 'type': 'object', 'patternProperties': { '^[a-zA-Z0-9-_:.]{1,255}$': { 'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255 } }, 'additionalProperties': False } group_snapshot_status = { 'type': 'string', 'format': 'group_snapshot_status' } extra_specs_with_null = copy.deepcopy(extra_specs) extra_specs_with_null['patternProperties'][ '^[a-zA-Z0-9-_:. /]{1,255}$']['type'] = ['string', 'null'] name_allow_zero_min_length = { 'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255 } uuid_allow_null = { 'oneOf': [uuid, {'type': 'null'}] } metadata_allows_null = copy.deepcopy(extra_specs) metadata_allows_null['type'] = ['object', 'null'] container = { 'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255} backup_url = {'type': 'string', 'minLength': 1, 'format': 'base64'} backup_service = {'type': 'string', 'minLength': 0, 'maxLength': 255} nullable_string = { 'type': ('string', 'null'), 'minLength': 0, 'maxLength': 255 } volume_size = { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1, 'maximum': constants.DB_MAX_INT } volume_size_allows_null = copy.deepcopy(volume_size) volume_size_allows_null['type'] += ['null'] hostname = { 'type': ['string', 'null'], 'minLength': 1, 'maxLength': 255, # NOTE: 'host' is defined in "services" table, and that # means a hostname. The hostname grammar in RFC952 does # not allow for underscores in hostnames. However, this # schema allows them, because it sometimes occurs in # real systems. As it is a cinder host, not a hostname, # and due to some driver needs, colons and forward slashes # were also included in the regex. 'pattern': '^[a-zA-Z0-9-._#@:/+]*$' } cinder_host = { # A string that represents a cinder host. # Examples: # hostname # hostname.domain # hostname.domain@backend # hostname.domain@backend#pool # hostname.domain@backend#[dead:beef::cafe]:/complex_ipv6_pool_w_share 'type': ['string', 'null'], 'minLength': 1, 'maxLength': 255, 'pattern': r'^[a-zA-Z0-9-._#@:/+\[\]]*$' # hostname plus brackets } resource_type = {'type': ['string', 'null'], 'minLength': 0, 'maxLength': 40} service_id = { 'type': ['integer', 'string', 'null'], 'pattern': '^[0-9]*$', 'maxLength': 11 } optional_uuid = {'oneOf': [{'type': 'null'}, {'type': 'string', 'format': 'uuid'}]} quota_class_set = { 'type': 'object', 'format': 'quota_class_set', 'patternProperties': { '^[a-zA-Z0-9-_:. ]{1,300}$': { 'type': ['integer', 'string'], 'pattern': '^[0-9]*$', 'minimum': -1, 'minLength': 1, 'maximum': constants.DB_MAX_INT } }, 'additionalProperties': False } binary = { 'type': 'string', 'enum': [binary for binary in constants.LOG_BINARIES + ('', '*')] } key_size = {'type': ['string', 'integer', 'null'], 'minimum': 0, 'maximum': constants.DB_MAX_INT, 'format': 'key_size'} availability_zone = { 'type': ['string', 'null'], 'minLength': 1, 'maxLength': 255 } optional_boolean = {'oneOf': [{'type': 'null'}, boolean]} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/validation/validators.py0000664000175000017500000004463500000000000021774 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Internal implementation of request Body validating middleware. """ import re import jsonschema from jsonschema import exceptions as jsonschema_exc from oslo_serialization import base64 from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils import webob.exc from cinder.api import api_utils from cinder import db from cinder import exception from cinder.i18n import _ from cinder.objects import fields as c_fields from cinder import quota from cinder import utils QUOTAS = quota.QUOTAS GROUP_QUOTAS = quota.GROUP_QUOTAS NON_QUOTA_KEYS = quota.NON_QUOTA_KEYS def _soft_validate_additional_properties( validator, additional_properties_value, param_value, schema): """Validator function. If there are not any properties on the param_value that are not specified in the schema, this will return without any effect. If there are any such extra properties, they will be handled as follows: - if the validator passed to the method is not of type "object", this method will return without any effect. - if the 'additional_properties_value' parameter is True, this method will return without any effect. - if the schema has an additionalProperties value of True, the extra properties on the param_value will not be touched. - if the schema has an additionalProperties value of False and there aren't patternProperties specified, the extra properties will be stripped from the param_value. - if the schema has an additionalProperties value of False and there are patternProperties specified, the extra properties will not be touched and raise validation error if pattern doesn't match. """ if (not validator.is_type(param_value, "object") or additional_properties_value): return properties = schema.get("properties", {}) patterns = "|".join(schema.get("patternProperties", {})) extra_properties = set() for prop in param_value: if prop not in properties: if patterns: if not re.search(patterns, prop): extra_properties.add(prop) else: extra_properties.add(prop) if not extra_properties: return if patterns: error = "Additional properties are not allowed (%s %s unexpected)" if len(extra_properties) == 1: verb = "was" else: verb = "were" yield jsonschema_exc.ValidationError( error % (", ".join(repr(extra) for extra in extra_properties), verb)) else: for prop in extra_properties: del param_value[prop] def _validate_string_length(value, entity_name, mandatory=False, min_length=0, max_length=None, remove_whitespaces=False): """Check the length of specified string. :param value: the value of the string :param entity_name: the name of the string :mandatory: string is mandatory or not :param min_length: the min_length of the string :param max_length: the max_length of the string :param remove_whitespaces: True if trimming whitespaces is needed else False """ if not mandatory and not value: return True if mandatory and not value: msg = _("The '%s' can not be None.") % entity_name raise webob.exc.HTTPBadRequest(explanation=msg) if remove_whitespaces: value = value.strip() utils.check_string_length(value, entity_name, min_length=min_length, max_length=max_length) @jsonschema.FormatChecker.cls_checks('date-time') def _validate_datetime_format(param_value): try: timeutils.parse_isotime(param_value) except ValueError: return False else: return True @jsonschema.FormatChecker.cls_checks('name', exception.InvalidName) def _validate_name(param_value): if not param_value: msg = _("The 'name' can not be None.") raise exception.InvalidName(reason=msg) elif len(param_value.strip()) == 0: msg = _("The 'name' can not be empty.") raise exception.InvalidName(reason=msg) return True @jsonschema.FormatChecker.cls_checks('name_skip_leading_trailing_spaces', exception.InvalidName) def _validate_name_skip_leading_trailing_spaces(param_value): if not param_value: msg = _("The 'name' can not be None.") raise exception.InvalidName(reason=msg) param_value = param_value.strip() if len(param_value) == 0: msg = _("The 'name' can not be empty.") raise exception.InvalidName(reason=msg) elif len(param_value) > 255: msg = _("The 'name' can not be greater than 255 characters.") raise exception.InvalidName(reason=msg) return True @jsonschema.FormatChecker.cls_checks('uuid') def _validate_uuid_format(instance): return uuidutils.is_uuid_like(instance) @jsonschema.FormatChecker.cls_checks('group_snapshot_status') def _validate_status(param_value): if len(param_value.strip()) == 0: msg = _("The 'status' can not be empty.") raise exception.InvalidGroupSnapshotStatus(reason=msg) elif param_value.lower() not in c_fields.GroupSnapshotStatus.ALL: msg = _("Group snapshot status: %(status)s is invalid, " "valid statuses are: " "%(valid)s.") % {'status': param_value, 'valid': c_fields.GroupSnapshotStatus.ALL} raise exception.InvalidGroupSnapshotStatus(reason=msg) return True @jsonschema.FormatChecker.cls_checks('progress') def _validate_progress(progress): if progress: try: integer = int(progress[:-1]) except ValueError: msg = _('progress must be an integer percentage') raise exception.InvalidInput(reason=msg) if integer < 0 or integer > 100 or progress[-1] != '%': msg = _('progress must be an integer percentage between' ' 0 and 100') raise exception.InvalidInput(reason=msg) return True @jsonschema.FormatChecker.cls_checks('base64') def _validate_base64_format(instance): try: if isinstance(instance, str): instance = instance.encode('utf-8') base64.decode_as_bytes(instance) except TypeError: # The name must be string type. If instance isn't string type, the # TypeError will be raised at here. return False return True @jsonschema.FormatChecker.cls_checks('disabled_reason', exception.InvalidInput) def _validate_disabled_reason(param_value): _validate_string_length(param_value, 'disabled_reason', mandatory=False, min_length=1, max_length=255, remove_whitespaces=True) return True @jsonschema.FormatChecker.cls_checks( 'name_non_mandatory_remove_white_spaces') def _validate_name_non_mandatory_remove_white_spaces(param_value): _validate_string_length(param_value, 'name', mandatory=False, min_length=0, max_length=255, remove_whitespaces=True) return True @jsonschema.FormatChecker.cls_checks( 'description_non_mandatory_remove_white_spaces') def _validate_description_non_mandatory_remove_white_spaces(param_value): _validate_string_length(param_value, 'description', mandatory=False, min_length=0, max_length=255, remove_whitespaces=True) return True @jsonschema.FormatChecker.cls_checks('quota_set') def _validate_quota_set(quota_set): bad_keys = [] for key, value in quota_set.items(): if (key not in QUOTAS and key not in GROUP_QUOTAS and key not in NON_QUOTA_KEYS): bad_keys.append(key) continue if key in NON_QUOTA_KEYS: continue api_utils.validate_integer(value, key, min_value=-1, max_value=db.MAX_INT) if len(bad_keys) > 0: msg = _("Bad key(s) in quota set: %s") % ", ".join(bad_keys) raise exception.InvalidInput(reason=msg) return True @jsonschema.FormatChecker.cls_checks('quota_class_set') def _validate_quota_class_set(instance): bad_keys = [] for key in instance: if key not in QUOTAS and key not in GROUP_QUOTAS: bad_keys.append(key) if len(bad_keys) > 0: msg = _("Bad key(s) in quota class set: %s") % ", ".join(bad_keys) raise exception.InvalidInput(reason=msg) return True @jsonschema.FormatChecker.cls_checks( 'group_status', webob.exc.HTTPBadRequest) def _validate_group_status(param_value): if param_value is None: msg = _("The 'status' can not be None.") raise webob.exc.HTTPBadRequest(explanation=msg) if len(param_value.strip()) == 0: msg = _("The 'status' can not be empty.") raise exception.InvalidGroupStatus(reason=msg) if param_value.lower() not in c_fields.GroupSnapshotStatus.ALL: msg = _("Group status: %(status)s is invalid, valid status " "are: %(valid)s.") % {'status': param_value, 'valid': c_fields.GroupStatus.ALL} raise exception.InvalidGroupStatus(reason=msg) return True @jsonschema.FormatChecker.cls_checks('availability_zone') def _validate_availability_zone(param_value): if param_value is None: return True _validate_string_length(param_value, "availability_zone", mandatory=True, min_length=1, max_length=255, remove_whitespaces=True) return True @jsonschema.FormatChecker.cls_checks( 'group_type', (webob.exc.HTTPBadRequest, exception.InvalidInput)) def _validate_group_type(param_value): _validate_string_length(param_value, 'group_type', mandatory=True, min_length=1, max_length=255, remove_whitespaces=True) return True @jsonschema.FormatChecker.cls_checks('level') def _validate_log_level(level): utils.get_log_method(level) return True @jsonschema.FormatChecker.cls_checks('validate_volume_reset_body') def _validate_volume_reset_body(instance): status = instance.get('status') attach_status = instance.get('attach_status') migration_status = instance.get('migration_status') if not status and not attach_status and not migration_status: msg = _("Must specify 'status', 'attach_status' or 'migration_status'" " for update.") raise exception.InvalidParameterValue(err=msg) return True @jsonschema.FormatChecker.cls_checks('volume_status') def _validate_volume_status(param_value): if param_value and param_value.lower() not in c_fields.VolumeStatus.ALL: msg = _("Volume status: %(status)s is invalid, " "valid statuses are: " "%(valid)s.") % {'status': param_value, 'valid': c_fields.VolumeStatus.ALL} raise exception.InvalidParameterValue(err=msg) return True @jsonschema.FormatChecker.cls_checks('volume_attach_status') def _validate_volume_attach_status(param_value): valid_attach_status = [c_fields.VolumeAttachStatus.ATTACHED, c_fields.VolumeAttachStatus.DETACHED] if param_value and param_value.lower() not in valid_attach_status: msg = _("Volume attach status: %(status)s is invalid, " "valid statuses are: " "%(valid)s.") % {'status': param_value, 'valid': valid_attach_status} raise exception.InvalidParameterValue(err=msg) return True @jsonschema.FormatChecker.cls_checks('volume_migration_status') def _validate_volume_migration_status(param_value): if param_value and ( param_value.lower() not in c_fields.VolumeMigrationStatus.ALL): msg = _("Volume migration status: %(status)s is invalid, " "valid statuses are: " "%(valid)s.") % {'status': param_value, 'valid': c_fields.VolumeMigrationStatus.ALL} raise exception.InvalidParameterValue(err=msg) return True @jsonschema.FormatChecker.cls_checks('snapshot_status') def _validate_snapshot_status(param_value): if not param_value or ( param_value.lower() not in c_fields.SnapshotStatus.ALL): msg = _("Snapshot status: %(status)s is invalid, " "valid statuses are: " "%(valid)s.") % {'status': param_value, 'valid': c_fields.SnapshotStatus.ALL} raise exception.InvalidParameterValue(err=msg) return True @jsonschema.FormatChecker.cls_checks('backup_status') def _validate_backup_status(param_value): valid_status = [c_fields.BackupStatus.AVAILABLE, c_fields.BackupStatus.ERROR] if not param_value or ( param_value.lower() not in valid_status): msg = _("Backup status: %(status)s is invalid, " "valid statuses are: " "%(valid)s.") % {'status': param_value, 'valid': valid_status} raise exception.InvalidParameterValue(err=msg) return True @jsonschema.FormatChecker.cls_checks('key_size') def _validate_key_size(param_value): if param_value is not None: if not strutils.is_int_like(param_value): raise exception.InvalidInput(reason=( _('key_size must be an integer.'))) return True @jsonschema.FormatChecker.cls_checks('mysql_text') def _validate_mysql_text(param_value): length = len(param_value.encode('utf8')) if length > 65535: return False return True class FormatChecker(jsonschema.FormatChecker): """A FormatChecker can output the message from cause exception We need understandable validation errors messages for users. When a custom checker has an exception, the FormatChecker will output a readable message provided by the checker. """ def check(self, param_value, format): """Check whether the param_value conforms to the given format. :argument param_value: the param_value to check :type: any primitive type (str, number, bool) :argument str format: the format that param_value should conform to :raises: :exc:`FormatError` if param_value does not conform to format """ if format not in self.checkers: return # For safety reasons custom checkers can be registered with # allowed exception types. Anything else will fall into the # default formatter. func, raises = self.checkers[format] result, cause = None, None try: result = func(param_value) except raises as e: cause = e if not result: msg = "%r is not a %r" % (param_value, format) raise jsonschema_exc.FormatError(msg, cause=cause) class _SchemaValidator(object): """A validator class This class is changed from Draft4Validator to validate minimum/maximum value of a string number(e.g. '10'). This changes can be removed when we tighten up the API definition and the XML conversion. Also FormatCheckers are added for checking data formats which would be passed through cinder api commonly. """ validator = None validator_org = jsonschema.Draft4Validator def __init__(self, schema, relax_additional_properties=False): validators = { 'minimum': self._validate_minimum, 'maximum': self._validate_maximum, } if relax_additional_properties: validators[ 'additionalProperties'] = _soft_validate_additional_properties validator_cls = jsonschema.validators.extend(self.validator_org, validators) format_checker = FormatChecker() self.validator = validator_cls(schema, format_checker=format_checker) def validate(self, *args, **kwargs): try: self.validator.validate(*args, **kwargs) except jsonschema.ValidationError as ex: if isinstance(ex.cause, exception.InvalidName): detail = ex.cause.msg elif len(ex.path) > 0: detail = _("Invalid input for field/attribute %(path)s." " Value: %(value)s. %(message)s") % { 'path': ex.path.pop(), 'value': ex.instance, 'message': ex.message } else: detail = ex.message raise exception.ValidationError(detail=detail) except TypeError as ex: # NOTE: If passing non string value to patternProperties parameter, # TypeError happens. Here is for catching the TypeError. detail = str(ex) raise exception.ValidationError(detail=detail) def _number_from_str(self, param_value): try: value = int(param_value) except (ValueError, TypeError): try: value = float(param_value) except (ValueError, TypeError): return None return value def _validate_minimum(self, validator, minimum, param_value, schema): param_value = self._number_from_str(param_value) if param_value is None: return return self.validator_org.VALIDATORS['minimum'](validator, minimum, param_value, schema) def _validate_maximum(self, validator, maximum, param_value, schema): param_value = self._number_from_str(param_value) if param_value is None: return return self.validator_org.VALIDATORS['maximum'](validator, maximum, param_value, schema) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/versions.py0000664000175000017500000000631500000000000017333 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from http import HTTPStatus from oslo_config import cfg from cinder.api import extensions from cinder.api import openstack from cinder.api.openstack import api_version_request from cinder.api.openstack import wsgi from cinder.api.views import versions as views_versions CONF = cfg.CONF _LINKS = [{ "rel": "describedby", "type": "text/html", "href": "https://docs.openstack.org/", }] _KNOWN_VERSIONS = { "v3.0": { "id": "v3.0", "status": "CURRENT", "version": api_version_request._MAX_API_VERSION, "min_version": api_version_request._MIN_API_VERSION, "updated": api_version_request.UPDATED, "links": _LINKS, "media-types": [{ "base": "application/json", "type": "application/vnd.openstack.volume+json;version=3", }] }, } class Versions(openstack.APIRouter): """Route versions requests.""" ExtensionManager = extensions.ExtensionManager def _setup_routes(self, mapper, ext_mgr): self.resources['versions'] = create_resource() mapper.connect('versions', '/', controller=self.resources['versions'], action='all') mapper.redirect('', '/') def _setup_ext_routes(self, mapper, ext_mgr): # NOTE(mriedem): The version router doesn't care about extensions. pass # NOTE (jose-castro-leon): Avoid to register extensions # on the versions router, the versions router does not offer # resources to be extended. def _setup_extensions(self, ext_mgr): pass class VersionsController(wsgi.Controller): def __init__(self): super(VersionsController, self).__init__(None) @wsgi.Controller.api_version('3.0') def index(self, req): # pylint: disable=E0102 """Return versions supported after the start of microversions.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) return builder.build_versions(known_versions) # NOTE (cknight): Calling the versions API without # /v3 in the URL will lead to this unversioned # method, which should always return info about all # available versions. @wsgi.response(HTTPStatus.MULTIPLE_CHOICES) def all(self, req): """Return all known and enabled versions.""" builder = views_versions.get_view_builder(req) known_versions = copy.deepcopy(_KNOWN_VERSIONS) return builder.build_versions(known_versions) def create_resource(): return wsgi.Resource(VersionsController()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.059118 cinder-27.0.0/cinder/api/views/0000775000175000017500000000000000000000000016241 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/__init__.py0000664000175000017500000000000000000000000020340 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/availability_zones.py0000664000175000017500000000207300000000000022505 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import cinder.api.common class ViewBuilder(cinder.api.common.ViewBuilder): """Map cinder.volumes.api list_availability_zones response into dicts.""" def list(self, request, availability_zones): def fmt(az): return { 'zoneName': az['name'], 'zoneState': {'available': az['available']}, } return {'availabilityZoneInfo': [fmt(az) for az in availability_zones]} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/backups.py0000664000175000017500000001021500000000000020242 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model backup API responses as a python dictionary.""" _collection_name = "backups" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, backups, backup_count=None): """Show a list of backups without many details.""" return self._list_view(self.summary, request, backups, backup_count) def detail_list(self, request, backups, backup_count=None): """Detailed view of a list of backups .""" return self._list_view(self.detail, request, backups, backup_count, self._collection_name + '/detail') def summary(self, request, backup): """Generic, non-detailed view of a backup.""" return { 'backup': { 'id': backup['id'], 'name': backup['display_name'], 'links': self._get_links(request, backup['id']), }, } def restore_summary(self, request, restore): """Generic, non-detailed view of a restore.""" return { 'restore': { 'backup_id': restore['backup_id'], 'volume_id': restore['volume_id'], 'volume_name': restore['volume_name'], }, } def detail(self, request, backup): """Detailed view of a single backup.""" backup_dict = { 'backup': { 'id': backup.get('id'), 'status': backup.get('status'), 'size': backup.get('size'), 'object_count': backup.get('object_count'), 'availability_zone': backup.get('availability_zone'), 'container': backup.get('container'), 'created_at': backup.get('created_at'), 'updated_at': backup.get('updated_at'), 'name': backup.get('display_name'), 'description': backup.get('display_description'), 'fail_reason': backup.get('fail_reason'), 'volume_id': backup.get('volume_id'), 'links': self._get_links(request, backup['id']), 'is_incremental': backup.is_incremental, 'has_dependent_backups': backup.has_dependent_backups, 'snapshot_id': backup.snapshot_id, 'data_timestamp': backup.data_timestamp, } } return backup_dict def _list_view(self, func, request, backups, backup_count, coll_name=_collection_name): """Provide a view for a list of backups.""" backups_list = [func(request, backup)['backup'] for backup in backups] backups_links = self._get_collection_links(request, backups, coll_name, backup_count) backups_dict = dict(backups=backups_list) if backups_links: backups_dict['backups_links'] = backups_links if backup_count is not None: backups_dict['count'] = backup_count return backups_dict def export_summary(self, request, export): """Generic view of an export.""" return { 'backup-record': { 'backup_service': export['backup_service'], 'backup_url': export['backup_url'], }, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/capabilities.py0000664000175000017500000000410400000000000021243 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model capabilities API responses as a python dictionary.""" _collection_name = "capabilities" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary(self, request, capabilities, id): """Summary view of a backend capabilities.""" # Internally storage_protocol can be a list with all the variants (eg. # FC, fibre_channel), but we return a single value to users. The first # value is the preferred variant. storage_protocol = capabilities.get('storage_protocol') if isinstance(storage_protocol, list): storage_protocol = storage_protocol[0] return { 'namespace': 'OS::Storage::Capabilities::%s' % id, 'vendor_name': capabilities.get('vendor_name'), 'volume_backend_name': capabilities.get('volume_backend_name'), 'pool_name': capabilities.get('pool_name'), 'driver_version': capabilities.get('driver_version'), 'storage_protocol': storage_protocol, 'display_name': capabilities.get('display_name'), 'description': capabilities.get('description'), 'visibility': capabilities.get('visibility'), 'replication_targets': capabilities.get('replication_targets', []), 'properties': capabilities.get('properties'), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/cgsnapshots.py0000664000175000017500000000501700000000000021152 0ustar00zuulzuul00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model cgsnapshot API responses as a python dictionary.""" _collection_name = "cgsnapshots" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, cgsnapshots): """Show a list of cgsnapshots without many details.""" return self._list_view(self.summary, request, cgsnapshots) def detail_list(self, request, cgsnapshots): """Detailed view of a list of cgsnapshots .""" return self._list_view(self.detail, request, cgsnapshots) def summary(self, request, cgsnapshot): """Generic, non-detailed view of a cgsnapshot.""" return { 'cgsnapshot': { 'id': cgsnapshot.id, 'name': cgsnapshot.name } } def detail(self, request, cgsnapshot): """Detailed view of a single cgsnapshot.""" try: group_id = cgsnapshot.consistencygroup_id except AttributeError: try: group_id = cgsnapshot.group_id except AttributeError: group_id = None else: group_id = None return { 'cgsnapshot': { 'id': cgsnapshot.id, 'consistencygroup_id': group_id, 'status': cgsnapshot.status, 'created_at': cgsnapshot.created_at, 'name': cgsnapshot.name, 'description': cgsnapshot.description } } def _list_view(self, func, request, cgsnapshots): """Provide a view for a list of cgsnapshots.""" cgsnapshots_list = [func(request, cgsnapshot)['cgsnapshot'] for cgsnapshot in cgsnapshots] cgsnapshots_dict = dict(cgsnapshots=cgsnapshots_list) return cgsnapshots_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/consistencygroups.py0000664000175000017500000000636700000000000022430 0ustar00zuulzuul00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model consistencygroup API responses as a python dictionary.""" _collection_name = "consistencygroups" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, consistencygroups): """Show a list of consistency groups without many details.""" return self._list_view(self.summary, request, consistencygroups) def detail_list(self, request, consistencygroups): """Detailed view of a list of consistency groups .""" return self._list_view(self.detail, request, consistencygroups) def summary(self, request, consistencygroup): """Generic, non-detailed view of a consistency group.""" return { 'consistencygroup': { 'id': consistencygroup.id, 'name': consistencygroup.name } } def detail(self, request, consistencygroup): """Detailed view of a single consistency group.""" try: volume_types = (consistencygroup.volume_type_id.split(",") if consistencygroup.volume_type_id else []) volume_types = [type_id for type_id in volume_types if type_id] except AttributeError: try: volume_types = [v_type.id for v_type in consistencygroup.volume_types] except AttributeError: volume_types = [] return { 'consistencygroup': { 'id': consistencygroup.id, 'status': consistencygroup.status, 'availability_zone': consistencygroup.availability_zone, 'created_at': consistencygroup.created_at, 'name': consistencygroup.name, 'description': consistencygroup.description, 'volume_types': volume_types, } } def _list_view(self, func, request, consistencygroups): """Provide a view for a list of consistency groups.""" consistencygroups_list = [ func(request, consistencygroup)['consistencygroup'] for consistencygroup in consistencygroups] cg_links = self._get_collection_links(request, consistencygroups, self._collection_name) consistencygroups_dict = dict(consistencygroups=consistencygroups_list) if cg_links: consistencygroups_dict['consistencygroup_links'] = cg_links return consistencygroups_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/limits.py0000664000175000017500000000612200000000000020115 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime class ViewBuilder(object): """OpenStack API base limits view builder.""" def build(self, rate_limits, absolute_limits): rate_limits = self._build_rate_limits(rate_limits) absolute_limits = self._build_absolute_limits(absolute_limits) output = { "limits": { "rate": rate_limits, "absolute": absolute_limits, }, } return output def _build_absolute_limits(self, absolute_limits): """Builder for absolute limits absolute_limits should be given as a dict of limits. For example: {"ram": 512, "gigabytes": 1024}. """ limit_names = { "gigabytes": ["maxTotalVolumeGigabytes"], "backup_gigabytes": ["maxTotalBackupGigabytes"], "volumes": ["maxTotalVolumes"], "snapshots": ["maxTotalSnapshots"], "backups": ["maxTotalBackups"], } limits = {} for name, value in absolute_limits.items(): if name in limit_names and value is not None: for name in limit_names[name]: limits[name] = value return limits def _build_rate_limits(self, rate_limits): limits = [] for rate_limit in rate_limits: _rate_limit_key = None _rate_limit = self._build_rate_limit(rate_limit) # check for existing key for limit in limits: if (limit["uri"] == rate_limit["URI"] and limit["regex"] == rate_limit["regex"]): _rate_limit_key = limit break # ensure we have a key if we didn't find one if not _rate_limit_key: _rate_limit_key = { "uri": rate_limit["URI"], "regex": rate_limit["regex"], "limit": [], } limits.append(_rate_limit_key) _rate_limit_key["limit"].append(_rate_limit) return limits def _build_rate_limit(self, rate_limit): next_avail = datetime.datetime.fromtimestamp( rate_limit["resetTime"], tz=datetime.timezone.utc) return { "verb": rate_limit["verb"], "value": rate_limit["value"], "remaining": int(rate_limit["remaining"]), "unit": rate_limit["unit"], "next-available": next_avail.replace(tzinfo=None).isoformat(), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/manageable_snapshots.py0000664000175000017500000000434000000000000022772 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Stratoscale, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model manageable snapshot responses as a python dictionary.""" _collection_name = "os-snapshot-manage" def summary_list(self, request, snapshots, count): """Show a list of manageable snapshots without many details.""" return self._list_view(self.summary, request, snapshots, count) def detail_list(self, request, snapshots, count): """Detailed view of a list of manageable snapshots.""" return self._list_view(self.detail, request, snapshots, count) def summary(self, request, snapshot): """Generic, non-detailed view of a manageable snapshot description.""" return { 'reference': snapshot['reference'], 'size': snapshot['size'], 'safe_to_manage': snapshot['safe_to_manage'], 'source_reference': snapshot['source_reference'] } def detail(self, request, snapshot): """Detailed view of a manageable snapshot description.""" return { 'reference': snapshot['reference'], 'size': snapshot['size'], 'safe_to_manage': snapshot['safe_to_manage'], 'reason_not_safe': snapshot['reason_not_safe'], 'extra_info': snapshot['extra_info'], 'cinder_id': snapshot['cinder_id'], 'source_reference': snapshot['source_reference'] } def _list_view(self, func, request, snapshots, count): """Provide a view for a list of manageable snapshots.""" snap_list = [func(request, snapshot) for snapshot in snapshots] return {"manageable-snapshots": snap_list} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/manageable_volumes.py0000664000175000017500000000406600000000000022447 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Stratoscale, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model manageable volume responses as a python dictionary.""" _collection_name = "os-volume-manage" def summary_list(self, request, volumes, count): """Show a list of manageable volumes without many details.""" return self._list_view(self.summary, request, volumes, count) def detail_list(self, request, volumes, count): """Detailed view of a list of manageable volumes.""" return self._list_view(self.detail, request, volumes, count) def summary(self, request, volume): """Generic, non-detailed view of a manageable volume description.""" return { 'reference': volume['reference'], 'size': volume['size'], 'safe_to_manage': volume['safe_to_manage'] } def detail(self, request, volume): """Detailed view of a manageable volume description.""" return { 'reference': volume['reference'], 'size': volume['size'], 'safe_to_manage': volume['safe_to_manage'], 'reason_not_safe': volume['reason_not_safe'], 'cinder_id': volume['cinder_id'], 'extra_info': volume['extra_info'] } def _list_view(self, func, request, volumes, count): """Provide a view for a list of manageable volumes.""" vol_list = [func(request, volume) for volume in volumes] return {"manageable-volumes": vol_list} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/qos_specs.py0000664000175000017500000000463200000000000020617 0ustar00zuulzuul00000000000000# Copyright (C) 2013 eBay Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model QoS specs API responses as a python dictionary.""" _collection_name = "qos-specs" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, qos_specs, qos_count=None): """Show a list of qos_specs without many details.""" return self._list_view(self.detail, request, qos_specs, qos_count) def summary(self, request, qos_spec): """Generic, non-detailed view of a qos_specs.""" return self.detail(request, qos_spec) def detail(self, request, qos_spec): """Detailed view of a single qos_spec.""" # TODO(zhiteng) Add associations to detailed view return { 'qos_specs': { 'id': qos_spec.id, 'name': qos_spec.name, 'consumer': qos_spec.consumer, 'specs': qos_spec.specs, }, 'links': self._get_links(request, qos_spec.id), } def associations(self, request, associates): """View of qos specs associations.""" return { 'qos_associations': associates } def _list_view(self, func, request, qos_specs, qos_count=None): """Provide a view for a list of qos_specs.""" specs_list = [func(request, specs)['qos_specs'] for specs in qos_specs] specs_links = self._get_collection_links(request, qos_specs, self._collection_name, qos_count) specs_dict = dict(qos_specs=specs_list) if specs_links: specs_dict['qos_specs_links'] = specs_links return specs_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/scheduler_stats.py0000664000175000017500000000423600000000000022014 0ustar00zuulzuul00000000000000# Copyright (C) 2014 eBay Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model scheduler-stats API responses as a python dictionary.""" _collection_name = "scheduler-stats" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary(self, request, pool): """Summary view of a single pool.""" return { 'pool': { 'name': pool.get('name'), } } def detail(self, request, pool): """Detailed view of a single pool.""" # Internally storage_protocol can be a list with all the variants (eg. # FC, fibre_channel), but we return a single value to users. The first # value is the preferred variant. capabilities = pool.get('capabilities') if capabilities: protocol = capabilities.get('storage_protocol') if isinstance(protocol, list): capabilities = capabilities.copy() capabilities['storage_protocol'] = protocol[0] return { 'pool': { 'name': pool.get('name'), 'capabilities': capabilities, } } def pools(self, request, pools, detail): """Detailed/Summary view of a list of pools seen by scheduler.""" if detail: plist = [self.detail(request, pool)['pool'] for pool in pools] else: plist = [self.summary(request, pool)['pool'] for pool in pools] pools_dict = dict(pools=plist) return pools_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/snapshots.py0000664000175000017500000000616300000000000020643 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): """Model snapshot API responses as a python dictionary.""" _collection_name = "snapshots" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, snapshots, snapshot_count=None): """Show a list of snapshots without many details.""" return self._list_view(self.summary, request, snapshots, snapshot_count) def detail_list(self, request, snapshots, snapshot_count=None): """Detailed view of a list of snapshots.""" return self._list_view(self.detail, request, snapshots, snapshot_count, coll_name=self._collection_name + '/detail') def summary(self, request, snapshot): """Generic, non-detailed view of a snapshot.""" if isinstance(snapshot.metadata, dict): metadata = snapshot.metadata else: metadata = {} return { 'snapshot': { 'id': snapshot.id, 'created_at': snapshot.created_at, 'updated_at': snapshot.updated_at, 'name': snapshot.display_name, 'description': snapshot.display_description, 'volume_id': snapshot.volume_id, 'status': snapshot.status, 'size': snapshot.volume_size, 'metadata': metadata, } } def detail(self, request, snapshot): """Detailed view of a single snapshot.""" # NOTE(geguileo): No additional data at the moment return self.summary(request, snapshot) def _list_view(self, func, request, snapshots, snapshot_count, coll_name=_collection_name): """Provide a view for a list of snapshots.""" snapshots_list = [func(request, snapshot)['snapshot'] for snapshot in snapshots] snapshots_links = self._get_collection_links(request, snapshots, coll_name, snapshot_count) snapshots_dict = {self._collection_name: snapshots_list} if snapshots_links: snapshots_dict[self._collection_name + '_links'] = snapshots_links if snapshot_count is not None: snapshots_dict['count'] = snapshot_count return snapshots_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/transfers.py0000664000175000017500000001133000000000000020620 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common from cinder.api import microversions as mv class ViewBuilder(common.ViewBuilder): """Model transfer API responses as a python dictionary.""" _collection_name = "os-volume-transfer" def __init__(self): """Initialize view builder.""" super(ViewBuilder, self).__init__() def summary_list(self, request, transfers, origin_transfer_count): """Show a list of transfers without many details.""" return self._list_view(self.summary, request, transfers, origin_transfer_count) def detail_list(self, request, transfers, origin_transfer_count): """Detailed view of a list of transfers .""" return self._list_view(self.detail, request, transfers, origin_transfer_count) def summary(self, request, transfer): """Generic, non-detailed view of a transfer.""" return { 'transfer': { 'id': transfer['id'], 'volume_id': transfer.get('volume_id'), 'name': transfer['display_name'], 'links': self._get_links(request, transfer['id']), }, } def detail(self, request, transfer): """Detailed view of a single transfer.""" detail_body = { 'transfer': { 'id': transfer.get('id'), 'created_at': transfer.get('created_at'), 'name': transfer.get('display_name'), 'volume_id': transfer.get('volume_id'), 'links': self._get_links(request, transfer['id']) } } req_version = request.api_version_request if req_version.matches(mv.TRANSFER_WITH_SNAPSHOTS): detail_body['transfer'].update({'no_snapshots': transfer.get('no_snapshots')}) if req_version.matches(mv.TRANSFER_WITH_HISTORY): transfer_history = { 'destination_project_id': transfer['destination_project_id'], 'source_project_id': transfer['source_project_id'], 'accepted': transfer['accepted'] } detail_body['transfer'].update(transfer_history) return detail_body def create(self, request, transfer): """Detailed view of a single transfer when created.""" create_body = { 'transfer': { 'id': transfer.get('id'), 'created_at': transfer.get('created_at'), 'name': transfer.get('display_name'), 'volume_id': transfer.get('volume_id'), 'auth_key': transfer.get('auth_key'), 'links': self._get_links(request, transfer['id']) } } req_version = request.api_version_request if req_version.matches(mv.TRANSFER_WITH_SNAPSHOTS): create_body['transfer'].update({'no_snapshots': transfer.get('no_snapshots')}) if req_version.matches(mv.TRANSFER_WITH_HISTORY): transfer_history = { 'destination_project_id': transfer['destination_project_id'], 'source_project_id': transfer['source_project_id'], 'accepted': transfer['accepted'] } create_body['transfer'].update(transfer_history) return create_body def _list_view(self, func, request, transfers, origin_transfer_count): """Provide a view for a list of transfers.""" transfers_list = [func(request, transfer)['transfer'] for transfer in transfers] transfers_links = self._get_collection_links(request, transfers, self._collection_name, origin_transfer_count) transfers_dict = dict(transfers=transfers_list) if transfers_links: transfers_dict['transfers_links'] = transfers_links return transfers_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/types.py0000664000175000017500000000265200000000000017764 0ustar00zuulzuul00000000000000# Copyright 2012 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import common class ViewBuilder(common.ViewBuilder): def show(self, request, volume_type, brief=False): """Trim away extraneous volume type attributes.""" trimmed = dict(id=volume_type.get('id'), name=volume_type.get('name'), is_public=volume_type.get('is_public'), extra_specs=volume_type.get('extra_specs'), description=volume_type.get('description')) return trimmed if brief else dict(volume_type=trimmed) def index(self, request, volume_types): """Index over trimmed volume types.""" volume_types_list = [self.show(request, volume_type, True) for volume_type in volume_types] return dict(volume_types=volume_types_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/api/views/versions.py0000664000175000017500000000546000000000000020470 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re import urllib from oslo_config import cfg versions_opts = [ cfg.StrOpt('public_endpoint', help="Public url to use for versions endpoint. The default " "is None, which will use the request's host_url " "attribute to populate the URL base. If Cinder is " "operating behind a proxy, you will want to change " "this to represent the proxy's URL."), ] CONF = cfg.CONF CONF.register_opts(versions_opts) def get_view_builder(req): base_url = CONF.public_endpoint or req.application_url return ViewBuilder(base_url) class ViewBuilder(object): def __init__(self, base_url): """Initialize ViewBuilder. :param base_url: url of the root wsgi application """ self.base_url = base_url def build_versions(self, versions): views = [self._build_version(versions[key]) for key in sorted(list(versions.keys()))] return dict(versions=views) def _build_version(self, version): view = copy.deepcopy(version) view['links'] = self._build_links(version) return view def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" links = copy.deepcopy(version_data.get('links', {})) version_num = version_data["id"].split('.')[0] links.append({'rel': 'self', 'href': self._generate_href(version=version_num)}) return links def _generate_href(self, version='v3', path=None): """Create a URL that refers to a specific version_number.""" base_url = self._get_base_url_without_version() # Always add '/' to base_url end for urljoin href url base_url = base_url.rstrip('/') + '/' rel_version = version.lstrip('/') href = urllib.parse.urljoin(base_url, rel_version).rstrip('/') + '/' if path: href += path.lstrip('/') return href def _get_base_url_without_version(self): """Get the base URL with out the /v3 suffix.""" return re.sub('v[1-9]+/?$', '', self.base_url) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.063118 cinder-27.0.0/cinder/backup/0000775000175000017500000000000000000000000015600 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/__init__.py0000664000175000017500000000200200000000000017703 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from cinder.backup import ' elsewhere. from oslo_utils import importutils from cinder.common import config CONF = config.CONF def API(*args, **kwargs): class_name = CONF.backup_api_class return importutils.import_object(class_name, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/api.py0000664000175000017500000006534500000000000016740 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all requests relating to the volume backups service.""" from datetime import datetime import random from typing import Optional from zoneinfo import ZoneInfo from eventlet import greenthread from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from cinder.backup import rpcapi as backup_rpcapi from cinder.common import constants from cinder import context from cinder.db import base from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.policies import backup_actions as backup_action_policy from cinder.policies import backups as policy import cinder.policy from cinder import quota from cinder import quota_utils from cinder.scheduler import rpcapi as scheduler_rpcapi import cinder.volume backup_opts = [ cfg.BoolOpt('backup_use_same_host', default=False, help='Backup services use same backend.') ] CONF = cfg.CONF CONF.register_opts(backup_opts) LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS IMPORT_VOLUME_ID = '00000000-0000-0000-0000-000000000000' class API(base.Base): """API for interacting with the volume backup manager.""" def __init__(self): self.backup_rpcapi = backup_rpcapi.BackupAPI() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.volume_api = cinder.volume.API() super().__init__() def get(self, context: context.RequestContext, backup_id: str) -> 'objects.Backup': backup = objects.Backup.get_by_id(context, backup_id) context.authorize(policy.GET_POLICY, target_obj=backup) return backup def _check_support_to_force_delete(self, context: context.RequestContext, backup_host: str) -> bool: result = self.backup_rpcapi.check_support_to_force_delete(context, backup_host) return result def delete(self, context: context.RequestContext, backup: 'objects.Backup', force: bool = False) -> None: """Make the RPC call to delete a volume backup. Call backup manager to execute backup delete or force delete operation. :param context: running context :param backup: the dict of backup that is got from DB. :param force: indicate force delete or not :raises InvalidBackup: :raises BackupDriverException: :raises ServiceNotFound: """ context.authorize(policy.DELETE_POLICY, target_obj=backup) if not force and backup.status not in [fields.BackupStatus.AVAILABLE, fields.BackupStatus.ERROR]: msg = _('Backup status must be available or error') raise exception.InvalidBackup(reason=msg) if force and not self._check_support_to_force_delete(context, backup.host): msg = _('force delete') raise exception.NotSupportedOperation(operation=msg) # Don't allow backup to be deleted if there are incremental # backups dependent on it. deltas = self.get_all(context, search_opts={'parent_id': backup.id}) if deltas and len(deltas): msg = _('Incremental backups exist for this backup.') raise exception.InvalidBackup(reason=msg) backup.status = fields.BackupStatus.DELETING backup.host = self._get_available_backup_service_host( backup.host, backup.availability_zone) backup.save() self.backup_rpcapi.delete_backup(context, backup) def get_all(self, context: context.RequestContext, search_opts: Optional[dict] = None, marker: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, sort_keys: Optional[list[str]] = None, sort_dirs: Optional[list[str]] = None) -> 'objects.BackupList': context.authorize(policy.GET_ALL_POLICY) search_opts = search_opts or {} all_tenants = search_opts.pop('all_tenants', '0') if not strutils.is_valid_boolstr(all_tenants): msg = _("all_tenants must be a boolean, got '%s'.") % all_tenants raise exception.InvalidParameterValue(err=msg) if context.is_admin and strutils.bool_from_string(all_tenants): backups = objects.BackupList.get_all(context, search_opts, marker, limit, offset, sort_keys, sort_dirs) else: backups = objects.BackupList.get_all_by_project( context, context.project_id, search_opts, marker, limit, offset, sort_keys, sort_dirs ) return backups def _az_matched(self, service: 'objects.Service', availability_zone: str) -> bool: return ((not availability_zone) or service.availability_zone == availability_zone) def _is_backup_service_enabled(self, availability_zone: str, host: str) -> bool: """Check if there is a backup service available.""" topic = constants.BACKUP_TOPIC ctxt = context.get_admin_context() services = objects.ServiceList.get_all_by_topic( ctxt, topic, disabled=False) for srv in services: if (self._az_matched(srv, availability_zone) and srv.host == host and srv.is_up): return True return False def _get_any_available_backup_service( self, availability_zone: str) -> Optional[str]: """Get an available backup service host. Get an available backup service host in the specified availability zone. """ services = [srv for srv in self._list_backup_services()] random.shuffle(services) # Get the next running service with matching availability zone. idx = 0 while idx < len(services): srv = services[idx] if self._az_matched(srv, availability_zone) and srv.is_up: return srv.host idx = idx + 1 return None def get_available_backup_service_host(self, host: str, az: str) -> str: return self._get_available_backup_service_host(host, az) def _get_available_backup_service_host(self, host: str, az: str) -> str: """Return an appropriate backup service host.""" backup_host = None if not host or not CONF.backup_use_same_host: backup_host = self._get_any_available_backup_service(az) elif self._is_backup_service_enabled(az, host): backup_host = host if not backup_host: raise exception.ServiceNotFound(service_id='cinder-backup') return backup_host def _list_backup_services(self) -> list['objects.Service']: """List all enabled backup services. :returns: list -- hosts for services that are enabled for backup. """ topic = constants.BACKUP_TOPIC ctxt = context.get_admin_context() services = objects.ServiceList.get_all_by_topic( ctxt, topic, disabled=False) return services def _list_backup_hosts(self) -> list: services = self._list_backup_services() return [srv.host for srv in services if not srv.disabled and srv.is_up] def create(self, context: context.RequestContext, name: Optional[str], description: Optional[str], volume_id: str, container: str, incremental: bool = False, availability_zone: Optional[str] = None, force: bool = False, snapshot_id: Optional[str] = None, metadata: Optional[dict] = None) -> 'objects.Backup': """Make the RPC call to create a volume backup.""" volume = self.volume_api.get(context, volume_id) context.authorize(policy.CREATE_POLICY, target_obj=volume) snapshot = None if snapshot_id: snapshot = self.volume_api.get_snapshot(context, snapshot_id) if volume_id != snapshot.volume_id: msg = (_('Volume %(vol1)s does not match with ' 'snapshot.volume_id %(vol2)s.') % {'vol1': volume_id, 'vol2': snapshot.volume_id}) raise exception.InvalidVolume(reason=msg) if snapshot['status'] not in ["available"]: msg = (_('Snapshot to be backed up must be available, ' 'but the current status is "%s".') % snapshot['status']) raise exception.InvalidSnapshot(reason=msg) elif volume['status'] not in ["available", "in-use"]: msg = (_('Volume to be backed up must be available ' 'or in-use, but the current status is "%s".') % volume['status']) raise exception.InvalidVolume(reason=msg) elif volume['status'] in ["in-use"] and not force: msg = _('Backing up an in-use volume must use ' 'the force flag.') raise exception.InvalidVolume(reason=msg) previous_status = volume['status'] # Reserve a quota before setting volume status and backup status try: reserve_opts = {'backups': 1, 'backup_gigabytes': volume['size']} reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: quota_utils.process_reserve_over_quota( context, e, resource='backups', size=volume.size) # Find the latest backup and use it as the parent backup to do an # incremental backup. latest_backup = None latest_host = None if incremental: backups = objects.BackupList.get_all_by_volume( context, volume_id, volume['project_id'], filters={'project_id': context.project_id}) if backups.objects: # NOTE(xyang): The 'data_timestamp' field records the time # when the data on the volume was first saved. If it is # a backup from volume, 'data_timestamp' will be the same # as 'created_at' for a backup. If it is a backup from a # snapshot, 'data_timestamp' will be the same as # 'created_at' for a snapshot. # If not backing up from snapshot, the backup with the latest # 'data_timestamp' will be the parent; If backing up from # snapshot, the backup with the latest 'data_timestamp' will # be chosen only if 'data_timestamp' is earlier than the # 'created_at' timestamp of the snapshot; Otherwise, the # backup will not be chosen as the parent. # For example, a volume has a backup taken at 8:00, then # a snapshot taken at 8:10, and then a backup at 8:20. # When taking an incremental backup of the snapshot, the # parent should be the backup at 8:00, not 8:20, and the # 'data_timestamp' of this new backup will be 8:10. latest_backup = max( backups.objects, key=lambda x: x['data_timestamp'] if (x['status'] == fields.BackupStatus.AVAILABLE and ( not snapshot or (snapshot and x['data_timestamp'] < snapshot['created_at']))) else datetime(1, 1, 1, 1, 1, 1, tzinfo=ZoneInfo('UTC'))) else: QUOTAS.rollback(context, reservations) msg = _('No backups available to do an incremental backup.') raise exception.InvalidBackup(reason=msg) parent_id = None parent = None if latest_backup: parent = latest_backup parent_id = latest_backup.id if 'posix' in latest_backup.service: # The posix driver needs to schedule incremental backups # on the same host as the last backup, otherwise there's # nothing to base the incremental backup on. latest_host = latest_backup.host if latest_backup['status'] != fields.BackupStatus.AVAILABLE: QUOTAS.rollback(context, reservations) msg = _('No backups available to do an incremental backup.') raise exception.InvalidBackup(reason=msg) data_timestamp = None if snapshot_id: snapshot = objects.Snapshot.get_by_id(context, snapshot_id) data_timestamp = snapshot.created_at self.db.snapshot_update( context, snapshot_id, {'status': fields.SnapshotStatus.BACKING_UP}) else: self.db.volume_update(context, volume_id, {'status': 'backing-up', 'previous_status': previous_status}) kwargs = { 'user_id': context.user_id, 'project_id': context.project_id, 'display_name': name, 'display_description': description, 'volume_id': volume_id, 'status': fields.BackupStatus.CREATING, 'container': container, 'parent_id': parent_id, 'size': volume['size'], 'snapshot_id': snapshot_id, 'data_timestamp': data_timestamp, 'parent': parent, 'host': latest_host, 'metadata': metadata or {}, 'availability_zone': availability_zone } try: backup = objects.Backup(context=context, **kwargs) except Exception: with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) try: backup.create() if not snapshot_id: backup.data_timestamp = backup.created_at backup.save() QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: if 'id' in backup: backup.destroy() finally: QUOTAS.rollback(context, reservations) self.scheduler_rpcapi.create_backup(context, backup) return backup def restore(self, context: context.RequestContext, backup_id: str, volume_id: Optional[str] = None, name: Optional[str] = None) -> dict: """Make the RPC call to restore a volume backup.""" backup = self.get(context, backup_id) context.authorize(policy.RESTORE_POLICY, target_obj=backup) if backup['status'] != fields.BackupStatus.AVAILABLE: msg = _('Backup status must be available') raise exception.InvalidBackup(reason=msg) size = backup['size'] if size is None: msg = _('Backup to be restored has invalid size') raise exception.InvalidBackup(reason=msg) # Create a volume if none specified. If a volume is specified check # it is large enough for the backup if volume_id is None: if name is None: name = 'restore_backup_%s' % backup_id description = 'auto-created_from_restore_from_backup' LOG.info("Creating volume of %(size)s GB for restore of " "backup %(backup_id)s.", {'size': size, 'backup_id': backup_id}) volume = self.volume_api.create(context, size, name, description) volume_is_new = True volume_id = volume['id'] while True: volume = self.volume_api.get(context, volume_id) if volume['status'] != 'creating': break greenthread.sleep(1) if volume['status'] == "error": msg = (_('Error while creating volume %(volume_id)s ' 'for restoring backup %(backup_id)s.') % {'volume_id': volume_id, 'backup_id': backup_id}) raise exception.InvalidVolume(reason=msg) else: volume = self.volume_api.get(context, volume_id) volume_is_new = False if volume['status'] != "available": msg = _('Volume to be restored to must be available') raise exception.InvalidVolume(reason=msg) LOG.debug('Checking backup size %(bs)s against volume size %(vs)s', {'bs': size, 'vs': volume['size']}) if size > volume['size']: msg = (_('volume size %(volume_size)d is too small to restore ' 'backup of size %(size)d.') % {'volume_size': volume['size'], 'size': size}) raise exception.InvalidVolume(reason=msg) LOG.info("Overwriting volume %(volume_id)s with restore of " "backup %(backup_id)s", {'volume_id': volume_id, 'backup_id': backup_id}) # Setting the status here rather than setting at start and unrolling # for each error condition, it should be a very small window backup.host = self._get_available_backup_service_host( backup.host, backup.availability_zone) backup.status = fields.BackupStatus.RESTORING backup.restore_volume_id = volume.id backup.save() self.db.volume_update(context, volume_id, {'status': 'restoring-backup'}) self.backup_rpcapi.restore_backup(context, backup.host, backup, volume_id, volume_is_new) d = {'backup_id': backup_id, 'volume_id': volume_id, 'volume_name': volume['display_name'], } return d def reset_status(self, context: context.RequestContext, backup_id: str, status: str) -> None: """Make the RPC call to reset a volume backup's status. Call backup manager to execute backup status reset operation. :param context: running context :param backup_id: which backup's status to be reset :param status: backup's status to be reset :raises InvalidBackup: """ # get backup info backup = self.get(context, backup_id) context.authorize( backup_action_policy.BASE_POLICY_NAME % "reset_status", target_obj=backup) backup.host = self._get_available_backup_service_host( backup.host, backup.availability_zone) backup.save() # send to manager to do reset operation self.backup_rpcapi.reset_status(ctxt=context, backup=backup, status=status) def export_record(self, context: context.RequestContext, backup_id: str) -> dict: """Make the RPC call to export a volume backup. Call backup manager to execute backup export. :param context: running context :param backup_id: backup id to export :returns: dictionary -- a description of how to import the backup :returns: contains 'backup_url' and 'backup_service' :raises InvalidBackup: """ backup = self.get(context, backup_id) context.authorize(policy.EXPORT_POLICY, target_obj=backup) if backup['status'] != fields.BackupStatus.AVAILABLE: msg = (_('Backup status must be available and not %s.') % backup['status']) raise exception.InvalidBackup(reason=msg) LOG.debug("Calling RPCAPI with context: " "%(ctx)s, host: %(host)s, backup: %(id)s.", {'ctx': context, 'host': backup['host'], 'id': backup['id']}) backup.host = self._get_available_backup_service_host( backup.host, backup.availability_zone) backup.save() export_data = self.backup_rpcapi.export_record(context, backup) return export_data def _get_import_backup(self, context: context.RequestContext, backup_url: str) -> 'objects.Backup': """Prepare database backup record for import. This method decodes provided backup_url and expects to find the id of the backup in there. Then checks the DB for the presence of this backup record and if it finds it and is not deleted it will raise an exception because the record cannot be created or used. If the record is in deleted status then we must be trying to recover this record, so we'll reuse it. If the record doesn't already exist we create it with provided id. :param context: running context :param backup_url: backup description to be used by the backup driver :return: BackupImport object :raises InvalidBackup: :raises InvalidInput: """ reservations = None backup = None # Deserialize string backup record into a dictionary backup_record = objects.Backup.decode_record(backup_url) # ID is a required field since it's what links incremental backups if 'id' not in backup_record: msg = _('Provided backup record is missing an id') raise exception.InvalidInput(reason=msg) # Since we use size to reserve&commit quota, size is another required # field. if 'size' not in backup_record: msg = _('Provided backup record is missing size attribute') raise exception.InvalidInput(reason=msg) # Try to get the backup with that ID in all projects even among # deleted entries (we reuse soft-deleted backups). try: backup = objects.BackupImport.get_by_id( context.elevated(read_deleted='yes'), backup_record['id'], project_only=False) # If record exists and it's not deleted we cannot proceed # with the import if backup.status != fields.BackupStatus.DELETED: msg = _('Backup already exists in database.') raise exception.InvalidBackup(reason=msg) except exception.BackupNotFound: pass # Check that we're under limit by reserving quota try: reserve_opts = {'backups': 1, 'backup_gigabytes': backup_record['size']} reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: quota_utils.process_reserve_over_quota( context, e, resource='backups', size=backup_record['size']) kwargs = { 'user_id': context.user_id, 'project_id': context.project_id, 'volume_id': IMPORT_VOLUME_ID, 'status': fields.BackupStatus.CREATING, 'deleted_at': None, 'deleted': False, 'metadata': {} } try: if backup: # "revive" the soft-deleted backup record retrieved earlier backup.update(kwargs) backup.save() else: # create a new backup with the specified ID backup = objects.BackupImport(context=context, id=backup_record['id'], **kwargs) backup.create() QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: if backup and 'id' in backup: backup.destroy() finally: QUOTAS.rollback(context, reservations) return backup def import_record(self, context: context.RequestContext, backup_service: str, backup_url: str) -> 'objects.Backup': """Make the RPC call to import a volume backup. :param context: running context :param backup_service: backup service name :param backup_url: backup description to be used by the backup driver :raises InvalidBackup: :raises ServiceNotFound: :raises InvalidInput: """ context.authorize(policy.IMPORT_POLICY) # NOTE(ronenkat): since we don't have a backup-scheduler # we need to find a host that support the backup service # that was used to create the backup. # We send it to the first backup service host, and the backup manager # on that host will forward it to other hosts on the hosts list if it # cannot support correct service itself. hosts = self._list_backup_hosts() if len(hosts) == 0: raise exception.ServiceNotFound(service_id=backup_service) # Get Backup object that will be used to import this backup record backup = self._get_import_backup(context, backup_url) first_host = hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup, backup_service, backup_url, hosts) return backup def update(self, context: context.RequestContext, backup_id: str, fields: list) -> 'objects.Service': backup = self.get(context, backup_id) context.authorize(policy.UPDATE_POLICY, target_obj=backup) backup.update(fields) backup.save() return backup ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/chunkeddriver.py0000664000175000017500000011265300000000000021017 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # Copyright (C) 2015 Kevin Fox # Copyright (C) 2015 Tom Barron # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic base class to implement metadata, compression and chunked data operations """ import abc import hashlib import json import os import sys import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units from cinder.backup import driver from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.volume import volume_utils if sys.platform == 'win32': from os_win import utilsfactory as os_win_utilsfactory else: os_win_utilsfactory = None LOG = logging.getLogger(__name__) backup_opts = [ cfg.StrOpt('backup_compression_algorithm', default='zlib', ignore_case=True, choices=[('none', 'Do not use compression'), ('off', "Same as 'none'"), ('no', "Same as 'none'"), ('zlib', 'Use the Deflate compression algorithm'), ('gzip', "Same as 'zlib'"), ('bz2', 'Use Burrows-Wheeler transform compression'), ('bzip2', "Same as 'bz2'"), ('zstd', 'Use the Zstandard compression algorithm')], help="Compression algorithm for backups ('none' to disable)"), ] CONF = cfg.CONF CONF.register_opts(backup_opts) def _write_nonzero(volume_file, volume_offset, content): """Write non-zero parts of `content` into `volume_file`.""" chunk_length = 1024 * 1024 content = memoryview(content) for chunk_offset in range(0, len(content), chunk_length): chunk_end = chunk_offset + chunk_length chunk = content[chunk_offset:chunk_end] # The len(chunk) may be smaller than chunk_length. It's okay. if not volume_utils.is_all_zero(chunk): volume_file.seek(volume_offset + chunk_offset) volume_file.write(chunk.tobytes()) def _write_volume(volume_is_new, volume_file, volume_offset, content): if volume_is_new: _write_nonzero(volume_file, volume_offset, content) else: volume_file.seek(volume_offset) volume_file.write(content) # Object writer and reader returned by inheriting classes must not have any # logging calls, as well as the compression libraries, as eventlet has a bug # (https://github.com/eventlet/eventlet/issues/432) that would result in # failures. class ChunkedBackupDriver(driver.BackupDriver, metaclass=abc.ABCMeta): """Abstract chunked backup driver. Implements common functionality for backup drivers that store volume data in multiple "chunks" in a backup repository when the size of the backed up cinder volume exceeds the size of a backup repository "chunk." Provides abstract methods to be implemented in concrete chunking drivers. """ DRIVER_VERSION = '1.0.0' DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'} def _get_compressor(self, algorithm): try: if algorithm.lower() in ('none', 'off', 'no'): return None if algorithm.lower() in ('zlib', 'gzip'): import zlib as compressor result = compressor elif algorithm.lower() in ('bz2', 'bzip2'): import bz2 as compressor result = compressor elif algorithm.lower() == 'zstd': import zstd as compressor result = compressor else: result = None if result: # NOTE(geguileo): Compression/Decompression starves # greenthreads so we use a native thread instead. return eventlet.tpool.Proxy(result) except ImportError: pass err = _('unsupported compression algorithm: %s') % algorithm raise ValueError(err) def __init__( self, context, chunk_size_bytes, sha_block_size_bytes, backup_default_container, enable_progress_timer, ): super(ChunkedBackupDriver, self).__init__(context) self.chunk_size_bytes = chunk_size_bytes self.sha_block_size_bytes = sha_block_size_bytes self.backup_default_container = backup_default_container self.enable_progress_timer = enable_progress_timer self.backup_timer_interval = CONF.backup_timer_interval self.data_block_num = CONF.backup_object_number_per_notification self.az = CONF.storage_availability_zone self.backup_compression_algorithm = CONF.backup_compression_algorithm self.compressor = \ self._get_compressor(CONF.backup_compression_algorithm) self.support_force_delete = True if sys.platform == 'win32' and self.chunk_size_bytes % 4096: # The chunk size must be a multiple of the sector size. In order # to fail out early and avoid attaching the disks, we'll just # enforce the chunk size to be a multiple of 4096. err = _("Invalid chunk size. It must be a multiple of 4096.") raise exception.InvalidConfigurationValue(message=err) def _get_object_writer(self, container, object_name, extra_metadata=None): """Return writer proxy-wrapped to execute methods in native thread.""" writer = self.get_object_writer(container, object_name, extra_metadata) return eventlet.tpool.Proxy(writer) def _get_object_reader(self, container, object_name, extra_metadata=None): """Return reader proxy-wrapped to execute methods in native thread.""" reader = self.get_object_reader(container, object_name, extra_metadata) return eventlet.tpool.Proxy(reader) # To create your own "chunked" backup driver, implement the following # abstract methods. @abc.abstractmethod def put_container(self, container): """Create the container if needed. No failure if it pre-exists.""" return @abc.abstractmethod def get_container_entries(self, container, prefix): """Get container entry names.""" return @abc.abstractmethod def get_object_writer(self, container, object_name, extra_metadata=None): """Returns a writer object which stores the chunk data. The object returned should be a context handler that can be used in a "with" context. The object writer methods must not have any logging calls, as eventlet has a bug (https://github.com/eventlet/eventlet/issues/432) that would result in failures. """ return @abc.abstractmethod def get_object_reader(self, container, object_name, extra_metadata=None): """Returns a reader object for the backed up chunk. The object reader methods must not have any logging calls, as eventlet has a bug (https://github.com/eventlet/eventlet/issues/432) that would result in failures. """ return @abc.abstractmethod def delete_object(self, container, object_name): """Delete object from container.""" return @abc.abstractmethod def _generate_object_name_prefix(self, backup): return @abc.abstractmethod def update_container_name(self, backup, container): """Allow sub-classes to override container name. This method exists so that sub-classes can override the container name as it comes in to the driver in the backup object. Implementations should return None if no change to the container name is desired. """ return @abc.abstractmethod def get_extra_metadata(self, backup, volume): """Return extra metadata to use in prepare_backup. This method allows for collection of extra metadata in prepare_backup() which will be passed to get_object_reader() and get_object_writer(). Subclass extensions can use this extra information to optimize data transfers. Return a json serializable object. """ return def _create_container(self, backup): # Container's name will be decided by the driver (returned by method # update_container_name), if no change is required by the driver then # we'll use the one the backup object already has, but if it doesn't # have one backup_default_container will be used. new_container = self.update_container_name(backup, backup.container) if new_container: # If the driver is not really changing the name we don't want to # dirty the field in the object and save it to the DB with the same # value. if new_container != backup.container: backup.container = new_container elif backup.container is None: backup.container = self.backup_default_container LOG.debug('_create_container started, container: %(container)s,' 'backup: %(backup_id)s.', {'container': backup.container, 'backup_id': backup.id}) backup.save() self.put_container(backup.container) return backup.container def _generate_object_names(self, backup): prefix = backup['service_metadata'] object_names = self.get_container_entries(backup['container'], prefix) LOG.debug('generated object list: %s.', object_names) return object_names def _metadata_filename(self, backup): object_name = backup['service_metadata'] filename = '%s_metadata' % object_name return filename def _sha256_filename(self, backup): object_name = backup['service_metadata'] filename = '%s_sha256file' % object_name return filename def _write_metadata(self, backup, volume_id, container, object_list, volume_meta, extra_metadata=None): filename = self._metadata_filename(backup) LOG.debug('_write_metadata started, container name: %(container)s,' ' metadata filename: %(filename)s.', {'container': container, 'filename': filename}) metadata = {} metadata['version'] = self.DRIVER_VERSION metadata['backup_id'] = backup['id'] metadata['volume_id'] = volume_id metadata['backup_name'] = backup['display_name'] metadata['backup_description'] = backup['display_description'] metadata['created_at'] = str(backup['created_at']) metadata['objects'] = object_list metadata['parent_id'] = backup['parent_id'] metadata['volume_meta'] = volume_meta if extra_metadata: metadata['extra_metadata'] = extra_metadata metadata_json = json.dumps(metadata, sort_keys=True, indent=2) metadata_json = metadata_json.encode('utf-8') with self._get_object_writer(container, filename) as writer: writer.write(metadata_json) LOG.debug('_write_metadata finished. Metadata: %s.', metadata_json) def _write_sha256file(self, backup, volume_id, container, sha256_list): filename = self._sha256_filename(backup) LOG.debug('_write_sha256file started, container name: %(container)s,' ' sha256file filename: %(filename)s.', {'container': container, 'filename': filename}) sha256file = {} sha256file['version'] = self.DRIVER_VERSION sha256file['backup_id'] = backup['id'] sha256file['volume_id'] = volume_id sha256file['backup_name'] = backup['display_name'] sha256file['backup_description'] = backup['display_description'] sha256file['created_at'] = str(backup['created_at']) sha256file['chunk_size'] = self.sha_block_size_bytes sha256file['sha256s'] = sha256_list sha256file_json = json.dumps(sha256file, sort_keys=True, indent=2) sha256file_json = sha256file_json.encode('utf-8') with self._get_object_writer(container, filename) as writer: writer.write(sha256file_json) LOG.debug('_write_sha256file finished.') def _read_metadata(self, backup): container = backup['container'] filename = self._metadata_filename(backup) LOG.debug('_read_metadata started, container name: %(container)s, ' 'metadata filename: %(filename)s.', {'container': container, 'filename': filename}) with self._get_object_reader(container, filename) as reader: metadata_json = reader.read() metadata_json = metadata_json.decode('utf-8') metadata = json.loads(metadata_json) LOG.debug('_read_metadata finished. Metadata: %s.', metadata_json) return metadata def _read_sha256file(self, backup): container = backup['container'] filename = self._sha256_filename(backup) LOG.debug('_read_sha256file started, container name: %(container)s, ' 'sha256 filename: %(filename)s.', {'container': container, 'filename': filename}) with self._get_object_reader(container, filename) as reader: sha256file_json = reader.read() sha256file_json = sha256file_json.decode('utf-8') sha256file = json.loads(sha256file_json) LOG.debug('_read_sha256file finished.') return sha256file def _prepare_backup(self, backup): """Prepare the backup process and return the backup metadata.""" volume = self.db.volume_get(self.context, backup.volume_id) if volume['size'] <= 0: err = _('volume size %d is invalid.') % volume['size'] raise exception.InvalidVolume(reason=err) container = self._create_container(backup) object_prefix = self._generate_object_name_prefix(backup) backup.service_metadata = object_prefix backup.save() volume_size_bytes = volume['size'] * units.Gi availability_zone = self.az LOG.debug('starting backup of volume: %(volume_id)s,' ' volume size: %(volume_size_bytes)d, object names' ' prefix %(object_prefix)s, availability zone:' ' %(availability_zone)s', { 'volume_id': backup.volume_id, 'volume_size_bytes': volume_size_bytes, 'object_prefix': object_prefix, 'availability_zone': availability_zone, }) object_meta = {'id': 1, 'list': [], 'prefix': object_prefix, 'volume_meta': None} object_sha256 = {'id': 1, 'sha256s': [], 'prefix': object_prefix} extra_metadata = self.get_extra_metadata(backup, volume) if extra_metadata is not None: object_meta['extra_metadata'] = extra_metadata return (object_meta, object_sha256, extra_metadata, container, volume_size_bytes) def _backup_chunk(self, backup, container, data, data_offset, object_meta, extra_metadata): """Backup data chunk based on the object metadata and offset.""" object_prefix = object_meta['prefix'] object_list = object_meta['list'] object_id = object_meta['id'] object_name = '%s-%05d' % (object_prefix, object_id) obj = {} obj[object_name] = {} obj[object_name]['offset'] = data_offset obj[object_name]['length'] = len(data) LOG.debug('Backing up chunk of data from volume.') algorithm, output_data = self._prepare_output_data(data) obj[object_name]['compression'] = algorithm LOG.debug('About to put_object') with self._get_object_writer( container, object_name, extra_metadata=extra_metadata ) as writer: writer.write(output_data) md5 = eventlet.tpool.execute( hashlib.md5, data, usedforsecurity=False).hexdigest() obj[object_name]['md5'] = md5 LOG.debug('backup MD5 for %(object_name)s: %(md5)s', {'object_name': object_name, 'md5': md5}) object_list.append(obj) object_id += 1 object_meta['list'] = object_list object_meta['id'] = object_id LOG.debug('Calling eventlet.sleep(0)') eventlet.sleep(0) def _prepare_output_data(self, data): if self.compressor is None: return 'none', data data_size_bytes = len(data) # Execute compression in native thread so it doesn't prevent # cooperative greenthread switching. compressed_data = self.compressor.compress(data) comp_size_bytes = len(compressed_data) algorithm = CONF.backup_compression_algorithm.lower() if comp_size_bytes >= data_size_bytes: LOG.debug('Compression of this chunk was ineffective: ' 'original length: %(data_size_bytes)d, ' 'compressed length: %(compressed_size_bytes)d. ' 'Using original data for this chunk.', {'data_size_bytes': data_size_bytes, 'compressed_size_bytes': comp_size_bytes, }) return 'none', data LOG.debug('Compressed %(data_size_bytes)d bytes of data ' 'to %(comp_size_bytes)d bytes using %(algorithm)s.', {'data_size_bytes': data_size_bytes, 'comp_size_bytes': comp_size_bytes, 'algorithm': algorithm, }) return algorithm, compressed_data def _finalize_backup(self, backup, container, object_meta, object_sha256): """Write the backup's metadata to the backup repository.""" object_list = object_meta['list'] object_id = object_meta['id'] volume_meta = object_meta['volume_meta'] sha256_list = object_sha256['sha256s'] extra_metadata = object_meta.get('extra_metadata') self._write_sha256file(backup, backup.volume_id, container, sha256_list) self._write_metadata(backup, backup.volume_id, container, object_list, volume_meta, extra_metadata) # NOTE(whoami-rajat) : The object_id variable is used to name # the backup objects and hence differs from the object_count # variable, therefore the increment of object_id value in the last # iteration of _backup_chunk() method shouldn't be reflected in the # object_count variable. backup.object_count = object_id - 1 backup.save() LOG.debug('backup %s finished.', backup['id']) def _backup_metadata(self, backup, object_meta): """Backup volume metadata. NOTE(dosaboy): the metadata we are backing up is obtained from a versioned api so we should not alter it in any way here. We must also be sure that the service that will perform the restore is compatible with version used. """ json_meta = self.get_metadata(backup['volume_id']) if not json_meta: LOG.debug("No volume metadata to backup.") return object_meta["volume_meta"] = json_meta def _send_progress_end(self, context, backup, object_meta): object_meta['backup_percent'] = 100 volume_utils.notify_about_backup_usage(context, backup, "createprogress", extra_usage_info= object_meta) def _send_progress_notification(self, context, backup, object_meta, total_block_sent_num, total_volume_size): backup_percent = total_block_sent_num * 100 / total_volume_size object_meta['backup_percent'] = backup_percent volume_utils.notify_about_backup_usage(context, backup, "createprogress", extra_usage_info= object_meta) def _get_win32_phys_disk_size(self, disk_path): win32_diskutils = os_win_utilsfactory.get_diskutils() disk_number = win32_diskutils.get_device_number_from_device_name( disk_path) return win32_diskutils.get_disk_size(disk_number) def _calculate_sha(self, data): """Calculate SHA256 of a data chunk. This method cannot log anything as it is called on a native thread. """ # NOTE(geguileo): Using memoryview to avoid data copying when slicing # for the sha256 call. chunk = memoryview(data) shalist = [] off = 0 datalen = len(chunk) while off < datalen: chunk_end = min(datalen, off + self.sha_block_size_bytes) block = chunk[off:chunk_end] sha = hashlib.sha256(block).hexdigest() shalist.append(sha) off += self.sha_block_size_bytes return shalist def backup(self, backup, volume_file, backup_metadata=True): """Backup the given volume. If backup['parent_id'] is given, then an incremental backup is performed. """ if self.chunk_size_bytes % self.sha_block_size_bytes: err = _('Chunk size is not multiple of ' 'block size for creating hash.') raise exception.InvalidBackup(reason=err) # Read the shafile of the parent backup if backup['parent_id'] # is given. parent_backup_shafile = None parent_backup = None if backup.parent_id: parent_backup = objects.Backup.get_by_id(self.context, backup.parent_id) parent_backup_shafile = self._read_sha256file(parent_backup) parent_backup_shalist = parent_backup_shafile['sha256s'] if (parent_backup_shafile['chunk_size'] != self.sha_block_size_bytes): err = (_('Hash block size has changed since the last ' 'backup. New hash block size: %(new)s. Old hash ' 'block size: %(old)s. Do a full backup.') % {'old': parent_backup_shafile['chunk_size'], 'new': self.sha_block_size_bytes}) raise exception.InvalidBackup(reason=err) # If the volume size increased since the last backup, fail # the incremental backup and ask user to do a full backup. if backup.size > parent_backup.size: err = _('Volume size increased since the last ' 'backup. Do a full backup.') raise exception.InvalidBackup(reason=err) win32_disk_size = None if sys.platform == 'win32': # When dealing with Windows physical disks, we need the exact # size of the disk. Attempting to read passed this boundary will # lead to an IOError exception. At the same time, we cannot # seek to the end of file. win32_disk_size = self._get_win32_phys_disk_size(volume_file.name) (object_meta, object_sha256, extra_metadata, container, volume_size_bytes) = self._prepare_backup(backup) counter = 0 total_block_sent_num = 0 # There are two mechanisms to send the progress notification. # 1. The notifications are periodically sent in a certain interval. # 2. The notifications are sent after a certain number of chunks. # Both of them are working simultaneously during the volume backup, # when "chunked" backup drivers are deployed. def _notify_progress(): self._send_progress_notification(self.context, backup, object_meta, total_block_sent_num, volume_size_bytes) timer = loopingcall.FixedIntervalLoopingCall( _notify_progress) if self.enable_progress_timer: timer.start(interval=self.backup_timer_interval) sha256_list = object_sha256['sha256s'] shaindex = 0 is_backup_canceled = False while True: # First of all, we check the status of this backup. If it # has been changed to delete or has been deleted, we cancel the # backup process to do forcing delete. with backup.as_read_deleted(): backup.refresh() if backup.status in (fields.BackupStatus.DELETING, fields.BackupStatus.DELETED): is_backup_canceled = True # To avoid the chunk left when deletion complete, need to # clean up the object of chunk again. self.delete_backup(backup) LOG.debug('Cancel the backup process of %s.', backup.id) break data_offset = volume_file.tell() if win32_disk_size is not None: read_bytes = min(self.chunk_size_bytes, win32_disk_size - data_offset) else: read_bytes = self.chunk_size_bytes data = volume_file.read(read_bytes) if data == b'': break # Calculate new shas with the datablock. shalist = eventlet.tpool.execute(self._calculate_sha, data) sha256_list.extend(shalist) # If parent_backup is not None, that means an incremental # backup will be performed. if parent_backup: # Find the extent that needs to be backed up. extent_off = -1 for idx, sha in enumerate(shalist): if sha != parent_backup_shalist[shaindex]: if extent_off == -1: # Start of new extent. extent_off = idx * self.sha_block_size_bytes else: if extent_off != -1: # We've reached the end of extent. extent_end = idx * self.sha_block_size_bytes segment = data[extent_off:extent_end] self._backup_chunk(backup, container, segment, data_offset + extent_off, object_meta, extra_metadata) extent_off = -1 shaindex += 1 # The last extent extends to the end of data buffer. if extent_off != -1: extent_end = len(data) segment = data[extent_off:extent_end] self._backup_chunk(backup, container, segment, data_offset + extent_off, object_meta, extra_metadata) extent_off = -1 else: # Do a full backup. self._backup_chunk(backup, container, data, data_offset, object_meta, extra_metadata) # Notifications total_block_sent_num += self.data_block_num counter += 1 if counter == self.data_block_num: # Send the notification to Ceilometer when the chunk # number reaches the data_block_num. The backup percentage # is put in the metadata as the extra information. self._send_progress_notification(self.context, backup, object_meta, total_block_sent_num, volume_size_bytes) # Reset the counter counter = 0 # Stop the timer. timer.stop() # If backup has been cancelled we have nothing more to do # but timer.stop(). if is_backup_canceled: return # All the data have been sent, the backup_percent reaches 100. self._send_progress_end(self.context, backup, object_meta) object_sha256['sha256s'] = sha256_list if backup_metadata: try: self._backup_metadata(backup, object_meta) # Whatever goes wrong, we want to log, cleanup, and re-raise. except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Backup volume metadata failed.") self.delete_backup(backup) self._finalize_backup(backup, container, object_meta, object_sha256) def _restore_v1(self, backup, volume_id, metadata, volume_file, volume_is_new, requested_backup): """Restore a v1 volume backup. Raises BackupRestoreCancel on any requested_backup status change, we ignore the backup parameter for this check since that's only the current data source from the list of backup sources. """ backup_id = backup['id'] LOG.debug('v1 volume backup restore of %s started.', backup_id) extra_metadata = metadata.get('extra_metadata') container = backup['container'] metadata_objects = metadata['objects'] metadata_object_names = [] for obj in metadata_objects: metadata_object_names.extend(obj.keys()) LOG.debug('metadata_object_names = %s.', metadata_object_names) prune_list = [self._metadata_filename(backup), self._sha256_filename(backup)] object_names = [object_name for object_name in self._generate_object_names(backup) if object_name not in prune_list] if sorted(object_names) != sorted(metadata_object_names): err = _('restore_backup aborted, actual object list ' 'does not match object list stored in metadata.') raise exception.InvalidBackup(reason=err) for metadata_object in metadata_objects: # Abort when status changes to error, available, or anything else with requested_backup.as_read_deleted(): requested_backup.refresh() if requested_backup.status != fields.BackupStatus.RESTORING: raise exception.BackupRestoreCancel(back_id=backup.id, vol_id=volume_id) object_name, obj = list(metadata_object.items())[0] LOG.debug('restoring object. backup: %(backup_id)s, ' 'container: %(container)s, object name: ' '%(object_name)s, volume: %(volume_id)s.', { 'backup_id': backup_id, 'container': container, 'object_name': object_name, 'volume_id': volume_id, }) with self._get_object_reader( container, object_name, extra_metadata=extra_metadata) as reader: body = reader.read() compression_algorithm = metadata_object[object_name]['compression'] decompressor = self._get_compressor(compression_algorithm) if decompressor is not None: LOG.debug('decompressing data using %s algorithm', compression_algorithm) decompressed = decompressor.decompress(body) body = None # Allow Python to free it _write_volume(volume_is_new, volume_file, obj['offset'], decompressed) decompressed = None # Allow Python to free it else: _write_volume(volume_is_new, volume_file, obj['offset'], body) body = None # Allow Python to free it # force flush every write to avoid long blocking write on close volume_file.flush() # Be tolerant to IO implementations that do not support fileno() try: fileno = volume_file.fileno() except IOError: LOG.debug("volume_file does not support fileno() so skipping " "fsync()") else: os.fsync(fileno) # Restoring a backup to a volume can take some time. Yield so other # threads can run, allowing for among other things the service # status to be updated eventlet.sleep(0) LOG.debug('v1 volume backup restore of %s finished.', backup_id) def restore(self, backup, volume_id, volume_file, volume_is_new): """Restore the given volume backup from backup repository. Raises BackupRestoreCancel on any backup status change. """ backup_id = backup['id'] container = backup['container'] object_prefix = backup['service_metadata'] LOG.debug('starting restore of backup %(object_prefix)s ' 'container: %(container)s, ' 'to %(new)s volume %(volume_id)s, ' 'backup: %(backup_id)s.', { 'object_prefix': object_prefix, 'container': container, 'volume_id': volume_id, 'backup_id': backup_id, 'new': 'new' if volume_is_new else 'existing', }) metadata = self._read_metadata(backup) metadata_version = metadata['version'] LOG.debug('Restoring backup version %s', metadata_version) try: restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get( metadata_version)) except TypeError: err = (_('No support to restore backup version %s') % metadata_version) raise exception.InvalidBackup(reason=err) # Build a list of backups based on parent_id. A full backup # will be the last one in the list. backup_list = [] backup_list.append(backup) current_backup = backup while current_backup.parent_id: prev_backup = objects.Backup.get_by_id(self.context, current_backup.parent_id) backup_list.append(prev_backup) current_backup = prev_backup # Do a full restore first, then layer the incremental backups # on top of it in order. index = len(backup_list) - 1 while index >= 0: backup1 = backup_list[index] index = index - 1 metadata = self._read_metadata(backup1) restore_func(backup1, volume_id, metadata, volume_file, volume_is_new, backup) volume_meta = metadata.get('volume_meta', None) try: if volume_meta: self.put_metadata(volume_id, volume_meta) else: LOG.debug("No volume metadata in this backup.") except exception.BackupMetadataUnsupportedVersion: msg = _("Metadata restore failed due to incompatible version.") LOG.error(msg) raise exception.BackupOperationError(msg) LOG.debug('restore %(backup_id)s to %(volume_id)s finished.', {'backup_id': backup_id, 'volume_id': volume_id}) def delete_backup(self, backup): """Delete the given backup.""" container = backup['container'] object_prefix = backup['service_metadata'] LOG.debug('delete started, backup: %(id)s, container: %(cont)s, ' 'prefix: %(pre)s.', {'id': backup['id'], 'cont': container, 'pre': object_prefix}) if container is not None and object_prefix is not None: object_names = [] try: object_names = self._generate_object_names(backup) except Exception: LOG.warning('Error while listing objects, continuing' ' with delete.') for object_name in object_names: self.delete_object(container, object_name) LOG.debug('deleted object: %(object_name)s' ' in container: %(container)s.', { 'object_name': object_name, 'container': container }) # Deleting a backup's objects can take some time. # Yield so other threads can run eventlet.sleep(0) LOG.debug('delete %s finished.', backup['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/driver.py0000664000175000017500000004275600000000000017463 0ustar00zuulzuul00000000000000# Copyright (C) 2013 Deutsche Telekom AG # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for all backup drivers.""" import abc from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from cinder.db import base from cinder import exception from cinder.i18n import _ backup_opts = [ cfg.IntOpt('backup_metadata_version', default=2, help='Backup metadata version to be used when backing up ' 'volume metadata. If this number is bumped, make sure the ' 'service doing the restore supports the new version.'), cfg.IntOpt('backup_object_number_per_notification', default=10, help='The number of chunks or objects, for which one ' 'Ceilometer notification will be sent'), cfg.IntOpt('backup_timer_interval', default=120, help='Interval, in seconds, between two progress notifications ' 'reporting the backup status'), ] CONF = cfg.CONF CONF.register_opts(backup_opts) LOG = logging.getLogger(__name__) class BackupMetadataAPI(base.Base): TYPE_TAG_VOL_BASE_META = 'volume-base-metadata' TYPE_TAG_VOL_META = 'volume-metadata' TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata' def __init__(self, context): super().__init__() self.context = context self._key_mgr = None @staticmethod def _is_serializable(value): """Returns True if value is serializable.""" try: jsonutils.dumps(value) except TypeError: LOG.info("Value with type=%s is not serializable", type(value)) return False return True def _save_vol_base_meta(self, container, volume_id): """Save base volume metadata to container. This will fetch all fields from the db Volume object for volume_id and save them in the provided container dictionary. """ type_tag = self.TYPE_TAG_VOL_BASE_META LOG.debug("Getting metadata type '%s'", type_tag) meta = self.db.volume_get(self.context, volume_id) if meta: container[type_tag] = {} for key, value in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(value): LOG.info("Unable to serialize field '%s' - excluding " "from backup", key) continue # NOTE(abishop): The backup manager is now responsible for # ensuring a copy of the volume's encryption key ID is # retained in case the volume is deleted. Yes, this means # the backup's volume base metadata now stores the volume's # original encryption key ID, which affects how things are # handled when backups are restored. The backup manager # handles this, too. container[type_tag][key] = value LOG.debug("Completed fetching metadata type '%s'", type_tag) else: LOG.debug("No metadata type '%s' available", type_tag) def _save_vol_meta(self, container, volume_id): """Save volume metadata to container. This will fetch all fields from the db VolumeMetadata object for volume_id and save them in the provided container dictionary. """ type_tag = self.TYPE_TAG_VOL_META LOG.debug("Getting metadata type '%s'", type_tag) meta = self.db.volume_metadata_get(self.context, volume_id) if meta: container[type_tag] = {} for entry in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(meta[entry]): LOG.info("Unable to serialize field '%s' - excluding " "from backup", entry) continue container[type_tag][entry] = meta[entry] LOG.debug("Completed fetching metadata type '%s'", type_tag) else: LOG.debug("No metadata type '%s' available", type_tag) def _save_vol_glance_meta(self, container, volume_id): """Save volume Glance metadata to container. This will fetch all fields from the db VolumeGlanceMetadata object for volume_id and save them in the provided container dictionary. """ type_tag = self.TYPE_TAG_VOL_GLANCE_META LOG.debug("Getting metadata type '%s'", type_tag) try: meta = self.db.volume_glance_metadata_get(self.context, volume_id) if meta: container[type_tag] = {} for entry in meta: # Exclude fields that are "not JSON serializable" if not self._is_serializable(entry.value): LOG.info("Unable to serialize field '%s' - " "excluding from backup", entry) continue container[type_tag][entry.key] = entry.value LOG.debug("Completed fetching metadata type '%s'", type_tag) except exception.GlanceMetadataNotFound: LOG.debug("No metadata type '%s' available", type_tag) @staticmethod def _filter(metadata, fields, excludes=None): """Returns set of metadata restricted to required fields. If fields is empty list, the full set is returned. :param metadata: master set of metadata :param fields: list of fields we want to extract :param excludes: fields to be excluded :returns: filtered metadata """ if not fields: return metadata if not excludes: excludes = [] subset = {} for field in fields: if field in metadata and field not in excludes: subset[field] = metadata[field] else: LOG.debug("Excluding field '%s'", field) return subset def _restore_vol_base_meta(self, metadata, volume_id, fields): """Restore values to Volume object for provided fields.""" LOG.debug("Restoring volume base metadata") excludes = [] # Ignore unencrypted backups. key = 'encryption_key_id' if key in fields and key in metadata and metadata[key] is not None: self._restore_vol_encryption_meta(volume_id, metadata['volume_type_id']) # NOTE(dosaboy): if the target volume looks like it was auto-created # as part of this restore operation and we have a name to restore # then apply the name to the target volume. However, if that target # volume already existed and it has a name or we do not have a name to # restore, then ignore this key. This is intended to be a less drastic # solution than commit 7ee80f7. key = 'display_name' if key in fields and key in metadata: target_vol = self.db.volume_get(self.context, volume_id) name = target_vol.get(key, '') if (not metadata.get(key) or name and not name.startswith('restore_backup_')): excludes.append(key) excludes.append('display_description') metadata = self._filter(metadata, fields, excludes=excludes) self.db.volume_update(self.context, volume_id, metadata) def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id): """Restores the volume_type_id for encryption if needed. Only allow restoration of an encrypted backup if the destination volume has the same volume type as the source volume. Otherwise encryption will not work. If volume types are already the same, no action is needed. """ dest_vol = self.db.volume_get(self.context, volume_id) if dest_vol['volume_type_id'] != src_volume_type_id: LOG.debug("Volume type id's do not match.") # If the volume types do not match, and the destination volume # does not have a volume type, force the destination volume # to have the encrypted volume type, provided it still exists. if dest_vol['volume_type_id'] is None: try: self.db.volume_type_get( self.context, src_volume_type_id) except exception.VolumeTypeNotFound: LOG.debug("Volume type of source volume has been " "deleted. Encrypted backup restore has " "failed.") msg = _("The source volume type '%s' is not " "available.") % (src_volume_type_id) raise exception.EncryptedBackupOperationFailed(msg) # Update dest volume with src volume's volume_type_id. LOG.debug("The volume type of the destination volume " "will become the volume type of the source " "volume.") self.db.volume_update(self.context, volume_id, {'volume_type_id': src_volume_type_id}) else: # Volume type id's do not match, and destination volume # has a volume type. Throw exception. LOG.warning("Destination volume type is different from " "source volume type for an encrypted volume. " "Encrypted backup restore has failed.") msg = (_("The source volume type '%(src)s' is different " "than the destination volume type '%(dest)s'.") % {'src': src_volume_type_id, 'dest': dest_vol['volume_type_id']}) raise exception.EncryptedBackupOperationFailed(msg) def _restore_vol_meta(self, metadata, volume_id, fields): """Restore values to VolumeMetadata object for provided fields.""" LOG.debug("Restoring volume metadata") metadata = self._filter(metadata, fields) self.db.volume_metadata_update(self.context, volume_id, metadata, True) def _restore_vol_glance_meta(self, metadata, volume_id, fields): """Restore values to VolumeGlanceMetadata object for provided fields. First delete any existing metadata then save new values. """ LOG.debug("Restoring volume glance metadata") metadata = self._filter(metadata, fields) self.db.volume_glance_metadata_delete_by_volume(self.context, volume_id) for key, value in metadata.items(): self.db.volume_glance_metadata_create(self.context, volume_id, key, value) # Now mark the volume as bootable self.db.volume_update(self.context, volume_id, {'bootable': True}) def _v1_restore_factory(self): """All metadata is backed up but we selectively restore. Returns a dictionary of the form: {: (, )} Empty field list indicates that all backed up fields should be restored. """ return {self.TYPE_TAG_VOL_BASE_META: (self._restore_vol_base_meta, ['display_name', 'display_description']), self.TYPE_TAG_VOL_META: (self._restore_vol_meta, []), self.TYPE_TAG_VOL_GLANCE_META: (self._restore_vol_glance_meta, [])} def _v2_restore_factory(self): """All metadata is backed up but we selectively restore. Returns a dictionary of the form: {: (, )} Empty field list indicates that all backed up fields should be restored. """ return {self.TYPE_TAG_VOL_BASE_META: (self._restore_vol_base_meta, ['display_name', 'display_description', 'encryption_key_id']), self.TYPE_TAG_VOL_META: (self._restore_vol_meta, []), self.TYPE_TAG_VOL_GLANCE_META: (self._restore_vol_glance_meta, [])} def get(self, volume_id): """Get volume metadata. Returns a json-encoded dict containing all metadata and the restore version i.e. the version used to decide what actually gets restored from this container when doing a backup restore. """ container = {'version': CONF.backup_metadata_version} self._save_vol_base_meta(container, volume_id) self._save_vol_meta(container, volume_id) self._save_vol_glance_meta(container, volume_id) if container: return jsonutils.dumps(container) else: return None def put(self, volume_id, json_metadata): """Restore volume metadata to a volume. The json container should contain a version that is supported here. """ meta_container = jsonutils.loads(json_metadata) version = meta_container['version'] if version == 1: factory = self._v1_restore_factory() elif version == 2: factory = self._v2_restore_factory() else: msg = (_("Unsupported backup metadata version (%s)") % (version)) raise exception.BackupMetadataUnsupportedVersion(msg) for type in factory: func = factory[type][0] fields = factory[type][1] if type in meta_container: func(meta_container[type], volume_id, fields) else: LOG.debug("No metadata of type '%s' to restore", type) class BackupDriver(base.Base, metaclass=abc.ABCMeta): def __init__(self, context): super().__init__() self.context = context self.backup_meta_api = BackupMetadataAPI(context) # This flag indicates if backup driver supports force # deletion. So it should be set to True if the driver that inherits # from BackupDriver supports the force deletion function. self.support_force_delete = False def get_metadata(self, volume_id): return self.backup_meta_api.get(volume_id) def put_metadata(self, volume_id, json_metadata): self.backup_meta_api.put(volume_id, json_metadata) @abc.abstractmethod def backup(self, backup, volume_file, backup_metadata=False): """Start a backup of a specified volume. Some I/O operations may block greenthreads, so in order to prevent starvation parameter volume_file will be a proxy that will execute all methods in native threads, so the method implementation doesn't need to worry about that.. """ return @abc.abstractmethod def restore(self, backup, volume_id, volume_file, volume_is_new): """Restore a saved backup. Some I/O operations may block greenthreads, so in order to prevent starvation parameter volume_file will be a proxy that will execute all methods in native threads, so the method implementation doesn't need to worry about that.. May raise BackupRestoreCancel to indicate that the restoration of a volume has been aborted by changing the backup status. """ return @abc.abstractmethod def delete_backup(self, backup): """Delete a saved backup.""" return def export_record(self, backup): """Export driver specific backup record information. If backup backend needs additional driver specific information to import backup record back into the system it must overwrite this method and return it here as a dictionary so it can be serialized into a string. Default backup driver implementation has no extra information. :param backup: backup object to export :returns: driver_info - dictionary with extra information """ return {} def import_record(self, backup, driver_info): """Import driver specific backup record information. If backup backend needs additional driver specific information to import backup record back into the system it must overwrite this method since it will be called with the extra information that was provided by export_record when exporting the backup. Default backup driver implementation does nothing since it didn't export any specific data in export_record. :param backup: backup object to export :param driver_info: dictionary with driver specific backup record information :returns: nothing """ return def check_for_setup_error(self): """Method for checking if backup backend is successfully installed. Refer to :obj:`cinder.interface.backup_driver.BackupDriver.check_for_setup_error` for additional information. """ return ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.063118 cinder-27.0.0/cinder/backup/drivers/0000775000175000017500000000000000000000000017256 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/drivers/__init__.py0000664000175000017500000000000000000000000021355 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/drivers/ceph.py0000664000175000017500000020251000000000000020547 0ustar00zuulzuul00000000000000# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Ceph Backup Service Implementation. This driver supports backing up volumes of any type to a Ceph object store. It is also capable of detecting whether the volume to be backed up is a Ceph RBD volume and, if so, attempts to perform incremental/differential backups. Support is also included for the following in the case of a source volume being a Ceph RBD volume: * backing up within the same Ceph pool (not recommended) * backing up between different Ceph pools * backing up between different Ceph clusters At the time of writing, differential backup support in Ceph/librbd was quite new so this driver accounts for this by first attempting differential backup and falling back to full backup/copy if the former fails. It is recommended that you upgrade to Ceph Dumpling (>= v0.67) or above to get the best results. If incremental backups are used, multiple backups of the same volume are stored as snapshots so that minimal space is consumed in the object store and restoring the volume takes a far reduced amount of time compared to a full copy. Note that Cinder supports restoring to a new volume or the original volume the backup was taken from. For the latter case, a full copy is enforced since this was deemed the safest action to take. It is therefore recommended to always restore to a new volume (default). """ import fcntl import json import os import re import subprocess import tempfile import textwrap import time from typing import Dict, List, Optional, Tuple import eventlet from os_brick.initiator import linuxrbd from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder.backup import driver from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.message import api as message_api from cinder.message import message_field from cinder import objects from cinder import utils import cinder.volume.drivers.rbd as rbd_driver from cinder.volume import volume_utils try: import rados import rbd except ImportError: rados = None rbd = None LOG = logging.getLogger(__name__) service_opts = [ cfg.StrOpt('backup_ceph_conf', default='/etc/ceph/ceph.conf', help='Ceph configuration file to use.'), cfg.StrOpt('backup_ceph_user', default='cinder', help='The Ceph user to connect with. Default here is to use ' 'the same user as for Cinder volumes. If not using cephx ' 'this should be set to None.'), cfg.IntOpt('backup_ceph_chunk_size', default=(units.Mi * 128), help='The chunk size, in bytes, that a backup is broken into ' 'before transfer to the Ceph object store.'), cfg.StrOpt('backup_ceph_pool', default='backups', help='The Ceph pool where volume backups are stored.'), cfg.IntOpt('backup_ceph_stripe_unit', default=0, help='RBD stripe unit to use when creating a backup image.'), cfg.IntOpt('backup_ceph_stripe_count', default=0, help='RBD stripe count to use when creating a backup image.'), cfg.BoolOpt('backup_ceph_image_journals', default=False, help='If True, apply JOURNALING and EXCLUSIVE_LOCK feature ' 'bits to the backup RBD objects to allow mirroring'), cfg.IntOpt('backup_ceph_max_snapshots', default=0, help=textwrap.dedent("""\ Number of the most recent snapshots to keep. 0 indicates to keep an unlimited number of snapshots. Configuring this option can save disk space by only keeping a limited number of snapshots on the source volume storage. However, if a user deletes all incremental backups which still have snapshots on the source storage, the next incremental backup will automatically become a full backup as no common snapshot exists anymore. """)), cfg.BoolOpt('restore_discard_excess_bytes', default=True, help='If True, always discard excess bytes when restoring ' 'volumes i.e. pad with zeroes.') ] CONF = cfg.CONF CONF.register_opts(service_opts) class VolumeMetadataBackup(object): def __init__(self, client: 'rados.Rados', backup_id: str): self._client: 'rados.Rados' = client self._backup_id: str = backup_id @property def name(self) -> str: return "backup.%s.meta" % self._backup_id @property def exists(self) -> bool: meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx, self.name)) return self._exists(meta_obj) def _exists(self, obj) -> bool: try: obj.stat() except rados.ObjectNotFound: return False else: return True def set(self, json_meta: str) -> None: """Write JSON metadata to a new object. This should only be called once per backup. Raises VolumeMetadataBackupExists if the object already exists. """ meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx, self.name)) if self._exists(meta_obj): msg = _("Metadata backup object '%s' already exists") % self.name raise exception.VolumeMetadataBackupExists(msg) meta_obj.write(json_meta.encode('utf-8')) def get(self) -> Optional[str]: """Get metadata backup object. Returns None if the object does not exist. """ meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx, self.name)) if not self._exists(meta_obj): LOG.debug("Metadata backup object %s does not exist", self.name) return None return meta_obj.read().decode('utf-8') def remove_if_exists(self) -> None: meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx, self.name)) try: meta_obj.remove() except rados.ObjectNotFound: LOG.debug("Metadata backup object '%s' not found - ignoring", self.name) @interface.backupdriver class CephBackupDriver(driver.BackupDriver): """Backup Cinder volumes to Ceph Object Store. This class enables backing up Cinder volumes to a Ceph object store. Backups may be stored in their own pool or even cluster. Store location is defined by the Ceph conf file and service config options supplied. If the source volume is itself an RBD volume, the backup will be performed using incremental differential backups which *should* give a performance gain. """ def __init__(self, context, execute=None): super().__init__(context) self.rbd = rbd self.rados = rados self.chunk_size = CONF.backup_ceph_chunk_size self._execute = execute or utils.execute self.rbd_stripe_count = 0 self.rbd_stripe_unit = 0 if self._supports_stripingv2: self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit self.rbd_stripe_count = CONF.backup_ceph_stripe_count elif (CONF.backup_ceph_stripe_unit != 0 or CONF.backup_ceph_stripe_count != 0): LOG.info("RBD striping not supported - ignoring configuration " "settings for rbd striping.") self._ceph_backup_user = CONF.backup_ceph_user self._ceph_backup_pool = CONF.backup_ceph_pool self._ceph_backup_conf = CONF.backup_ceph_conf self.message_api = message_api.API() @staticmethod def get_driver_options() -> list: return service_opts @staticmethod def _validate_string_args(*args: str) -> bool: """Ensure all args are non-None and non-empty.""" return all(args) @staticmethod def _ceph_args(user: str, conf: Optional[str] = None, pool: Optional[str] = None) -> List[str]: """Create default ceph args for executing rbd commands. If no --conf is provided, rbd will look in the default locations e.g. /etc/ceph/ceph.conf """ # Make sure user arg is valid since rbd command may not fail if # invalid/no user provided, resulting in unexpected behaviour. if not CephBackupDriver._validate_string_args(user): raise exception.BackupInvalidCephArgs(_("invalid user '%s'") % user) args = ['--id', user] if conf: args.extend(['--conf', conf]) if pool: args.extend(['--pool', pool]) return args @property def _supports_layering(self) -> bool: """Determine if copy-on-write is supported by our version of librbd.""" return hasattr(self.rbd, 'RBD_FEATURE_LAYERING') @property def _supports_stripingv2(self) -> bool: """Determine if striping is supported by our version of librbd.""" return hasattr(self.rbd, 'RBD_FEATURE_STRIPINGV2') @property def _supports_exclusive_lock(self) -> bool: """Determine if exclusive-lock is supported by librbd.""" return hasattr(self.rbd, 'RBD_FEATURE_EXCLUSIVE_LOCK') @property def _supports_journaling(self) -> bool: """Determine if journaling is supported by our version of librbd.""" return hasattr(self.rbd, 'RBD_FEATURE_JOURNALING') @property def _supports_fast_diff(self) -> bool: """Determine if fast-diff is supported by our version of librbd.""" return hasattr(self.rbd, 'RBD_FEATURE_FAST_DIFF') def _get_rbd_support(self) -> Tuple[bool, int]: """Determine RBD features supported by our version of librbd.""" old_format = True features = 0 if self._supports_layering: old_format = False features |= self.rbd.RBD_FEATURE_LAYERING if self._supports_stripingv2: old_format = False features |= self.rbd.RBD_FEATURE_STRIPINGV2 if CONF.backup_ceph_image_journals: LOG.debug("RBD journaling supported by backend and requested " "via config. Enabling it together with " "exclusive-lock") old_format = False features |= (self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK | self.rbd.RBD_FEATURE_JOURNALING) # NOTE(christian_rohmann): Check for fast-diff support and enable it if self._supports_fast_diff: LOG.debug("RBD also supports fast-diff, enabling it " "together with exclusive-lock and object-map") old_format = False features |= (self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK | self.rbd.RBD_FEATURE_OBJECT_MAP | self.rbd.RBD_FEATURE_FAST_DIFF) return (old_format, features) def check_for_setup_error(self) -> None: """Returns an error if prerequisites aren't met.""" if rados is None or rbd is None: msg = _('rados and rbd python libraries not found') raise exception.BackupDriverException(reason=msg) for attr in ['backup_ceph_user', 'backup_ceph_pool', 'backup_ceph_conf']: val = getattr(CONF, attr) if not val: raise exception.InvalidConfigurationValue(option=attr, value=val) # NOTE: Checking connection to ceph # RADOSClient __init__ method invokes _connect_to_rados # so no need to check for self.rados.Error here. with rbd_driver.RADOSClient(self, self._ceph_backup_pool): pass # NOTE(christian_rohmann): Check features required for journaling if CONF.backup_ceph_image_journals: if not self._supports_exclusive_lock and self._supports_journaling: LOG.error("RBD journaling not supported - unable to " "support per image mirroring in backup pool") raise exception.BackupInvalidCephArgs( _("Image Journaling set but RBD backend does " "not support journaling") ) def _connect_to_rados(self, pool: Optional[str] = None) -> Tuple['rados.Rados', 'rados.Ioctx']: """Establish connection to the backup Ceph cluster.""" client = eventlet.tpool.Proxy(self.rados.Rados( rados_id=self._ceph_backup_user, conffile=self._ceph_backup_conf)) try: client.connect() pool_to_open = pool or self._ceph_backup_pool ioctx = client.open_ioctx(pool_to_open) return client, ioctx except self.rados.Error: # shutdown cannot raise an exception client.shutdown() raise @staticmethod def _disconnect_from_rados(client: 'rados.Rados', ioctx: 'rados.Ioctx') -> None: """Terminate connection with the backup Ceph cluster.""" # closing an ioctx cannot raise an exception ioctx.close() client.shutdown() @staticmethod def _format_base_name(service_metadata: str) -> str: base_name = json.loads(service_metadata)["base"] return base_name @staticmethod def _get_backup_base_name( volume_id: str, backup: Optional['objects.Backup'] = None) -> str: """Return name of base image used for backup. Incremental backups use a new base name so we support old and new style format. """ if not backup: return "volume-%s.backup.base" % volume_id if backup.service_metadata: return CephBackupDriver._format_base_name(backup.service_metadata) # 'parent' field will only be present in incremental backups. This is # filled by cinder-api if backup.parent: # Old backups don't have the base name in the service_metadata, # so we use the default RBD backup base if backup.parent.service_metadata: service_metadata = backup.parent.service_metadata base_name = CephBackupDriver._format_base_name( service_metadata) else: base_name = "volume-%s.backup.base" % volume_id return base_name return "volume-%s.backup.%s" % (volume_id, backup.id) def _discard_bytes(self, volume: linuxrbd.RBDVolumeIOWrapper, offset: int, length: int) -> None: """Trim length bytes from offset. If the volume is an rbd do a discard() otherwise assume it is a file and pad with zeroes. """ if length: LOG.debug("Discarding %(length)s bytes from offset %(offset)s", {'length': length, 'offset': offset}) if self._file_is_rbd(volume): limit = 2 * units.Gi - 1 chunks = int(length / limit) for chunk in range(0, chunks): eventlet.tpool.Proxy(volume.rbd_image).discard( offset + chunk * limit, limit) rem = int(length % limit) if rem: eventlet.tpool.Proxy(volume.rbd_image).discard( offset + chunks * limit, rem) else: zeroes = bytearray(self.chunk_size) chunks = int(length / self.chunk_size) for chunk in range(0, chunks): LOG.debug("Writing zeroes chunk %d", chunk) volume.write(zeroes) volume.flush() rem = int(length % self.chunk_size) if rem: zeroes = bytearray(rem) volume.write(zeroes) volume.flush() def _transfer_data(self, src: linuxrbd.RBDVolumeIOWrapper, src_name: str, dest: linuxrbd.RBDVolumeIOWrapper, dest_name: str, length: int, discard_zeros: bool = False) -> None: """Transfer data between files (Python IO objects).""" LOG.debug("Transferring data between '%(src)s' and '%(dest)s'", {'src': src_name, 'dest': dest_name}) chunks = int(length / self.chunk_size) LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred", {'chunks': chunks, 'bytes': self.chunk_size}) for chunk in range(0, chunks): before = time.time() data = src.read(self.chunk_size) # If we have reach end of source, discard any extraneous bytes from # destination volume if trim is enabled and stop writing. if data == b'': if CONF.restore_discard_excess_bytes: self._discard_bytes(dest, dest.tell(), length - dest.tell()) return if (discard_zeros and volume_utils.is_all_zero(data)): action = "Discarded" else: dest.write(data) dest.flush() action = "Transferred" delta = (time.time() - before) rate = (self.chunk_size / delta) / 1024 LOG.debug("%(action)s chunk %(chunk)s of %(chunks)s (%(rate)dK/s)", {'action': action, 'chunk': chunk + 1, 'chunks': chunks, 'rate': rate}) rem = int(length % self.chunk_size) if rem: LOG.debug("Transferring remaining %s bytes", rem) data = src.read(rem) if data == b'': if CONF.restore_discard_excess_bytes: self._discard_bytes(dest, dest.tell(), rem) else: dest.write(data) dest.flush() def _create_base_image(self, name: str, size: int, rados_client: 'rados.Rados') -> None: """Create a base backup image. This will be the base image used for storing differential exports. """ LOG.debug("Creating base image '%s'", name) old_format, features = self._get_rbd_support() eventlet.tpool.Proxy(self.rbd.RBD()).create( ioctx=rados_client.ioctx, name=name, size=size, old_format=old_format, features=features, stripe_unit=self.rbd_stripe_unit, stripe_count=self.rbd_stripe_count) def _delete_backup_snapshot(self, rados_client: 'rados.Rados', base_name: Optional[str], backup_id: str) -> Tuple[Optional[str], int]: """Delete snapshot associated with this backup if one exists. A backup should have at most ONE associated snapshot. This is required before attempting to delete the base image. The snapshot on the original volume can be left as it will be purged when the volume is deleted. Returns tuple(deleted_snap_name, num_of_remaining_snaps). """ remaining_snaps = 0 base_rbd = eventlet.tpool.Proxy(self.rbd.Image(rados_client.ioctx, base_name)) try: snap_name = self._get_backup_snap_name(base_rbd, base_name, backup_id) if snap_name: LOG.debug("Deleting backup snapshot='%s'", snap_name) base_rbd.remove_snap(snap_name) else: LOG.debug("No backup snapshot to delete") # Now check whether any snapshots remain on the base image backup_snaps = self.get_backup_snaps(base_rbd) if backup_snaps: remaining_snaps = len(backup_snaps) finally: base_rbd.close() return snap_name, remaining_snaps def _try_delete_base_image(self, backup: 'objects.Backup', base_name: Optional[str] = None) -> None: """Try to delete backup RBD image. If the rbd image is a base image for incremental backups, it may have snapshots. Delete the snapshot associated with backup_id and if the image has no more snapshots, delete it. Otherwise return. If no base name is provided try normal (full) format then diff format image name. If a base name is provided but does not exist, ImageNotFound will be raised. If the image is busy, a number of retries will be performed if ImageBusy is received, after which the exception will be propagated to the caller. """ retries = 3 delay = 5 try_diff_format = False volume_id = backup.volume_id if base_name is None: try_diff_format = True base_name = self._get_backup_base_name(volume_id, backup=backup) LOG.debug("Trying diff format basename='%(basename)s' for " "backup base image of volume %(volume)s.", {'basename': base_name, 'volume': volume_id}) with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, backup.container)) as client: rbd_exists, base_name = \ self._rbd_image_exists(base_name, volume_id, client, try_diff_format=try_diff_format) if not rbd_exists: raise self.rbd.ImageNotFound(_("image %s not found") % base_name) while retries >= 0: # First delete associated snapshot from base image (if exists) snap, rem = self._delete_backup_snapshot(client, base_name, backup.id) if rem: LOG.info( "Backup base image of volume %(volume)s still " "has %(snapshots)s snapshots so skipping base " "image delete.", {'snapshots': rem, 'volume': volume_id}) return LOG.info("Deleting backup base image='%(basename)s' of " "volume %(volume)s.", {'basename': base_name, 'volume': volume_id}) # Delete base if no more snapshots try: eventlet.tpool.Proxy(self.rbd.RBD()).remove( client.ioctx, base_name) except self.rbd.ImageBusy: # Allow a retry if the image is busy if retries > 0: LOG.info("Backup image of volume %(volume)s is " "busy, retrying %(retries)s more time(s) " "in %(delay)ss.", {'retries': retries, 'delay': delay, 'volume': volume_id}) else: LOG.error("Max retries reached deleting backup " "%(basename)s image of volume %(volume)s.", {'volume': volume_id, 'basename': base_name}) raise else: LOG.debug("Base backup image='%(basename)s' of volume " "%(volume)s deleted.", {'basename': base_name, 'volume': volume_id}) retries = 0 finally: retries -= 1 # Since we have deleted the base image we can delete the source # volume backup snapshot. src_name = volume_id if src_name in eventlet.tpool.Proxy( self.rbd.RBD()).list(client.ioctx): LOG.debug("Deleting source volume snapshot '%(snapshot)s' " "for backup %(basename)s.", {'snapshot': snap, 'basename': base_name}) src_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx, src_name)) try: src_rbd.remove_snap(snap) finally: src_rbd.close() def _piped_execute(self, cmd1: list, cmd2: list) -> Tuple[int, bytes]: """Pipe output of cmd1 into cmd2.""" LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1)) LOG.debug("cmd2='%s'", ' '.join(cmd2)) with tempfile.TemporaryFile() as errfile: try: p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, stderr=errfile, close_fds=True) except OSError as e: LOG.error("Pipe1 failed - %s ", e) raise # NOTE(dosaboy): ensure that the pipe is blocking. This is to work # around the case where evenlet.green.subprocess is used which # seems to use a non-blocking pipe. assert p1.stdout is not None flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK) fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags) try: p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=errfile, close_fds=True) except OSError as e: LOG.error("Pipe2 failed - %s ", e) raise p1.stdout.close() p2.communicate() p1.wait() errfile.seek(0) px_stderr = errfile.read() return p1.returncode or p2.returncode, px_stderr def _rbd_diff_transfer(self, src_name: str, src_pool: str, dest_name: str, dest_pool: str, src_user: str, src_conf: Optional[str], dest_user: str, dest_conf: Optional[str], src_snap: Optional[str] = None, from_snap: Optional[str] = None) -> None: """Copy only extents changed between two points. If no snapshot is provided, the diff extents will be all those changed since the rbd volume/base was created, otherwise it will be those changed since the snapshot was created. """ LOG.debug("Performing differential transfer from '%(src)s' to " "'%(dest)s'", {'src': src_name, 'dest': dest_name}) # NOTE(dosaboy): Need to be tolerant of clusters/clients that do # not support these operations since at the time of writing they # were very new. src_ceph_args = self._ceph_args(src_user, src_conf, pool=src_pool) dest_ceph_args = self._ceph_args(dest_user, dest_conf, pool=dest_pool) cmd1 = ['rbd', 'export-diff'] + src_ceph_args if from_snap is not None: cmd1.extend(['--from-snap', from_snap]) if src_snap: path = "%s/%s@%s" % (src_pool, src_name, src_snap) else: path = "%s/%s" % (src_pool, src_name) cmd1.extend([path, '-']) cmd2 = ['rbd', 'import-diff'] + dest_ceph_args rbd_path = "%s/%s" % (dest_pool, dest_name) cmd2.extend(['-', rbd_path]) ret, stderr = self._piped_execute(cmd1, cmd2) if ret: msg = (_("RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)") % {'ret': ret, 'stderr': stderr}) LOG.info(msg) raise exception.BackupRBDOperationFailed(msg) def _rbd_image_exists( self, name: str, volume_id: str, client: 'rados.Rados', try_diff_format: Optional[bool] = False) -> Tuple[bool, str]: """Return tuple (exists, name).""" rbds = eventlet.tpool.Proxy(self.rbd.RBD()).list(client.ioctx) if name not in rbds: LOG.debug("Image '%s' not found - trying diff format name", name) if try_diff_format: name = CephBackupDriver._get_backup_base_name(volume_id) if name not in rbds: LOG.debug("Diff format image '%s' not found", name) return False, name else: return False, name return True, name def _snap_exists(self, base_name: str, snap_name: str, client: 'rados.Rados') -> bool: """Return True if snapshot exists in base image.""" base_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx, base_name, read_only=True)) try: snaps = base_rbd.list_snaps() if snaps is None: return False for snap in snaps: if snap['name'] == snap_name: return True finally: base_rbd.close() return False def _full_rbd_backup(self, container: str, base_name: str, length: int) -> Tuple[Optional[str], bool]: """Create the base_image for a full RBD backup.""" with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, container)) as client: self._create_base_image(base_name, length, client) # Now we just need to return from_snap=None and image_created=True, if # there is some exception in making backup snapshot, will clean up the # base image. return None, True def _incremental_rbd_backup( self, backup: 'objects.Backup', base_name: str, length: int, source_rbd_image, volume_id: str) -> Tuple[Optional[str], bool]: """Select the last snapshot for a RBD incremental backup.""" container = backup.container last_incr = backup.parent_id LOG.debug("Trying to perform an incremental backup with container: " "%(container)s, base_name: %(base)s, source RBD image: " "%(source)s, volume ID %(volume)s and last incremental " "backup ID: %(incr)s.", {'container': container, 'base': base_name, 'source': source_rbd_image, 'volume': volume_id, 'incr': last_incr, }) with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, container)) as client: try: base_rbd = eventlet.tpool.Proxy( self.rbd.Image(client.ioctx, base_name, read_only=True)) except rbd.ImageNotFound: msg = (_( "Can't find base name image %(base)s.") % {'base': base_name}) LOG.error(msg) raise exception.BackupRBDOperationFailed(msg) try: from_snap = self._get_backup_snap_name(base_rbd, base_name, last_incr) if from_snap is None: msg = (_( "Can't find snapshot from parent %(incr)s and " "base name image %(base)s.") % {'incr': last_incr, 'base': base_name}) LOG.error(msg) raise exception.BackupRBDOperationFailed(msg) finally: base_rbd.close() return from_snap, False def _backup_rbd(self, backup: 'objects.Backup', volume_file: linuxrbd.RBDVolumeIOWrapper, volume_name: str, length: int) -> Dict[str, str]: """Create an incremental or full backup from an RBD image.""" rbd_user = volume_file.rbd_user rbd_pool = volume_file.rbd_pool rbd_conf = volume_file.rbd_conf source_rbd_image = eventlet.tpool.Proxy(volume_file.rbd_image) volume_id = backup.volume_id base_name = self._get_backup_base_name(volume_id, backup=backup) snaps_to_keep = CONF.backup_ceph_max_snapshots # If backup.parent_id is None performs full RBD backup if backup.parent_id is None: from_snap, image_created = self._full_rbd_backup(backup.container, base_name, length) # Otherwise performs incremental rbd backup else: # Check if there is at least one snapshot to base an incremental # backup on. If not, we cannot perform an incremental backup and # fall back to full backup. no_source_snaps = snaps_to_keep > 0 and \ self._get_backup_snap_name( source_rbd_image, base_name, backup.parent_id) is None # If true, force full backup if no_source_snaps: # Unset parent so we get a new backup base name backup.parent = None # The backup will be a full one, so it has no parent ID. # This will mark the backup as a full backup in the database. backup.parent_id = None backup.save() base_name = self.\ _get_backup_base_name(volume_id, backup=backup) LOG.info("Incremental backup was requested, but there are no " "snapshots present to use as base, " "forcing full backup.") self.message_api.create( context=self.context, action=message_field.Action.BACKUP_CREATE, resource_uuid=volume_id, detail=message_field.Detail. INCREMENTAL_BACKUP_FORCES_FULL_BACKUP, level="WARNING" ) from_snap, image_created = self._full_rbd_backup( backup.container, base_name, length) else: # Incremental backup rbd_img = source_rbd_image from_snap, image_created = \ self._incremental_rbd_backup(backup, base_name, length, rbd_img, volume_id) LOG.debug("Using --from-snap '%(snap)s' for incremental backup of " "volume %(volume)s.", {'snap': from_snap, 'volume': volume_id}) # Snapshot source volume so that we have a new point-in-time new_snap = self._get_new_snap_name(backup.id) LOG.debug("Creating backup snapshot='%s'", new_snap) source_rbd_image.create_snap(new_snap) # Attempt differential backup. If this fails, perhaps because librbd # or Ceph cluster version does not support it, do a full backup # instead. # # TODO(dosaboy): find a way to determine if the operation is supported # rather than brute force approach. try: before = time.time() self._rbd_diff_transfer(volume_name, rbd_pool, base_name, backup.container, src_user=rbd_user, src_conf=rbd_conf, dest_user=self._ceph_backup_user, dest_conf=self._ceph_backup_conf, src_snap=new_snap, from_snap=from_snap) LOG.debug("Differential backup transfer completed in %.4fs", (time.time() - before)) # only keep last n snapshots and delete older ones if snaps_to_keep > 0: self._remove_last_snapshots(source_rbd_image, snaps_to_keep) else: LOG.debug("Not deleting any snapshots because " "all should be kept") except exception.BackupRBDOperationFailed: with excutils.save_and_reraise_exception(): LOG.debug("Differential backup transfer failed") # Clean up if image was created as part of this operation if image_created: self._try_delete_base_image(backup, base_name=base_name) # Delete snapshot LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of " "source volume='%(volume)s'.", {'snapshot': new_snap, 'volume': volume_id}) source_rbd_image.remove_snap(new_snap) return {'service_metadata': '{"base": "%s"}' % base_name} def _remove_last_snapshots(self, source_rbd_image, snaps_to_keep: int): # only keep last n snapshots and delete older ones for the source # image provided snap_list = [] try: snap_list = self.get_backup_snaps(source_rbd_image) except Exception as e: LOG.debug( "Failed to get snapshot list for %s: %s", source_rbd_image, e ) remaining_snaps = len(snap_list) LOG.debug("Snapshot list: %s", snap_list) if remaining_snaps > snaps_to_keep: snaps_to_delete = remaining_snaps - snaps_to_keep LOG.debug( "There are %s snapshots and %s should be kept, " "deleting the oldest %s snapshots", remaining_snaps, snaps_to_keep, snaps_to_delete, ) for i in range(snaps_to_delete): LOG.debug("Deleting snapshot %s", snap_list[i]) try: source_rbd_image.remove_snap(snap_list[i]["name"]) except Exception as e: LOG.debug( "Failed to delete snapshot %s: %s", snap_list[i], e ) else: LOG.debug( "There are %s snapshots and %s should be kept, " "not deleting any snapshots", remaining_snaps, snaps_to_keep, ) @staticmethod def _file_is_rbd(volume_file: linuxrbd.RBDVolumeIOWrapper) -> bool: """Returns True if the volume_file is actually an RBD image.""" return hasattr(volume_file, 'rbd_image') def _full_backup(self, backup: 'objects.Backup', src_volume: linuxrbd.RBDVolumeIOWrapper, src_name: str, length: int) -> None: """Perform a full backup of src volume. First creates a base backup image in our backup location then performs an chunked copy of all data from source volume to a new backup rbd image. """ volume_id = backup.volume_id if backup.snapshot_id: backup_name = self._get_backup_base_name(volume_id) else: backup_name = self._get_backup_base_name(volume_id, backup=backup) with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, backup.container)) as client: # First create base backup image old_format, features = self._get_rbd_support() LOG.debug("Creating backup base image='%(name)s' for volume " "%(volume)s.", {'name': backup_name, 'volume': volume_id}) eventlet.tpool.Proxy(self.rbd.RBD()).create( ioctx=client.ioctx, name=backup_name, size=length, old_format=old_format, features=features, stripe_unit=self.rbd_stripe_unit, stripe_count=self.rbd_stripe_count) LOG.debug("Copying data from volume %s.", volume_id) dest_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx, backup_name)) meta_io_proxy = None try: rbd_meta = linuxrbd.RBDImageMetadata(dest_rbd, backup.container, self._ceph_backup_user, self._ceph_backup_conf) rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta) meta_io_proxy = eventlet.tpool.Proxy(rbd_fd) self._transfer_data(src_volume, src_name, meta_io_proxy, backup_name, length) finally: # Closing the wrapper will close the image as well if meta_io_proxy: meta_io_proxy.close() else: dest_rbd.close() @staticmethod def backup_snapshot_name_pattern() -> str: """Returns the pattern used to match backup snapshots. It is essential that snapshots created for purposes other than backups do not have this name format. """ return r"^backup\.([a-z0-9\-]+?)\.snap\.(.+)$" @classmethod def get_backup_snaps(cls, rbd_image: 'rbd.Image', sort: bool = False) -> List[dict]: """Get all backup snapshots for the given rbd image. NOTE: this call is made public since these snapshots must be deleted before the base volume can be deleted. """ snaps = rbd_image.list_snaps() backup_snaps = [] for snap in snaps: search_key = cls.backup_snapshot_name_pattern() result = re.search(search_key, snap['name']) if result: backup_snaps.append({'name': result.group(0), 'backup_id': result.group(1), 'timestamp': result.group(2)}) if sort: # Sort into ascending order of timestamp backup_snaps.sort(key=lambda x: x['timestamp'], reverse=True) return backup_snaps def _get_new_snap_name(self, backup_id: str) -> str: return "backup.%s.snap.%s" % (backup_id, time.time()) def _get_backup_snap_name(self, rbd_image: 'rbd.Image', name: Optional[str], backup_id: str): """Return the name of the snapshot associated with backup_id. The rbd image provided must be the base image used for an incremental backup. A backup is only allowed ONE associated snapshot. If more are found, exception.BackupOperationError is raised. """ snaps = self.get_backup_snaps(rbd_image) LOG.debug("Looking for snapshot of backup base '%s'", name) if not snaps: LOG.debug("Backup base '%s' has no snapshots", name) return None snaps = [snap['name'] for snap in snaps if snap['backup_id'] == backup_id] if not snaps: LOG.debug("Backup '%s' has no snapshot", backup_id) return None if len(snaps) > 1: msg = (_("Backup should only have one snapshot but instead has %s") % len(snaps)) raise exception.BackupOperationError(msg) LOG.debug("Found snapshot '%s'", snaps[0]) return snaps[0] def _get_volume_size_bytes(self, volume: 'objects.Volume') -> int: """Return the size in bytes of the given volume. Raises exception.InvalidParameterValue if volume size is 0. """ if int(volume['size']) == 0: errmsg = _("Need non-zero volume size") raise exception.InvalidParameterValue(errmsg) return int(volume['size']) * units.Gi def _backup_metadata(self, backup: 'objects.Backup') -> None: """Backup volume metadata. NOTE(dosaboy): the metadata we are backing up is obtained from a versioned api so we should not alter it in any way here. We must also be sure that the service that will perform the restore is compatible with version used. """ json_meta = self.get_metadata(backup.volume_id) if not json_meta: LOG.debug("No metadata to backup for volume %s.", backup.volume_id) return LOG.debug("Backing up metadata for volume %s.", backup.volume_id) try: with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, backup.container)) as client: vol_meta_backup = VolumeMetadataBackup(client, backup.id) vol_meta_backup.set(json_meta) except exception.VolumeMetadataBackupExists as e: msg = (_("Failed to backup volume metadata - %s") % e) raise exception.BackupOperationError(msg) def backup(self, backup: 'objects.Backup', volume_file: linuxrbd.RBDVolumeIOWrapper, backup_metadata: bool = True) -> dict: """Backup volume and metadata (if available) to Ceph object store. If the source volume is an RBD we will attempt to do an incremental/differential backup, otherwise a full copy is performed. If this fails we will attempt to fall back to full copy. """ volume = self.db.volume_get(self.context, backup.volume_id) updates = {} if not backup.container: backup.container = self._ceph_backup_pool backup.save() LOG.debug("Starting backup of volume='%s'.", volume.id) # Ensure we are at the beginning of the volume volume_file.seek(0) length = self._get_volume_size_bytes(volume) if backup.snapshot_id: do_full_backup = True elif self._file_is_rbd(volume_file): # If volume an RBD, attempt incremental or full backup. do_full_backup = False LOG.debug("Volume file is RBD: attempting optimized backup") try: updates = self._backup_rbd(backup, volume_file, volume.name, length) except exception.BackupRBDOperationFailed: with excutils.save_and_reraise_exception(): self.delete_backup(backup) else: if backup.parent_id: LOG.debug("Volume file is NOT RBD: can't perform " "incremental backup.") raise exception.BackupRBDOperationFailed LOG.debug("Volume file is NOT RBD: will do full backup.") do_full_backup = True if do_full_backup: try: self._full_backup(backup, volume_file, volume.name, length) except exception.BackupOperationError: with excutils.save_and_reraise_exception(): self.delete_backup(backup) if backup_metadata: try: self._backup_metadata(backup) except exception.BackupOperationError: with excutils.save_and_reraise_exception(): # Cleanup. self.delete_backup(backup) LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.", {'backup_id': backup.id, 'volume_id': volume.id}) return updates def _full_restore(self, backup: 'objects.Backup', dest_file, dest_name: str, length: int, volume_is_new: bool, src_snap=None) -> None: """Restore volume using full copy i.e. all extents. This will result in all extents being copied from source to destination. :param backup: Backup object describing the backup to be restored. :param dest_file: File object of the destination volume. :param dest_name: Name of the destination volume. :param length: Size of the destination volume in bytes. :param volume_is_new: True if the destination volume is new. :param src_snap: A string, the name of the restore point snapshot, optional, used for incremental backups or RBD backup. """ with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, backup.container)) as client: # In case of snapshot_id, the old base name format is used: # volume-.backup.base # Otherwise, the new base name format is used: # volume-.backup- # Should match the base name format in _full_backup() if backup.snapshot_id: backup_name = self._get_backup_base_name(backup.volume_id) else: backup_name = self._get_backup_base_name(backup.volume_id, backup=backup) try: # Retrieve backup volume _src = src_snap src_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx, backup_name, snapshot=_src, read_only=True)) except rbd.ImageNotFound: # Check for another base name as a fallback mechanism, in case # the backup image is not found under the expected name. # The main reason behind having two different base name formats # is due to a change in the naming convention at some point in # the history of the Cinder project. # This approach ensures backward compatibility and makes it # possible to restore older backups that were created before # the change. tried_name = backup_name if backup.snapshot_id: backup_name = self._get_backup_base_name(backup.volume_id, backup=backup) else: backup_name = self._get_backup_base_name(backup.volume_id) msg = (_("Backup %(backup_id)s of volume %(volume_id)s" " not found with name %(tried_name)s," " trying a legacy name %(next_name)s.") % {'backup_id': backup.id, 'volume_id': backup.volume_id, 'tried_name': tried_name, 'next_name': backup_name}) LOG.info(msg) src_rbd = eventlet.tpool.Proxy(self.rbd.Image( client.ioctx, backup_name, snapshot=_src, read_only=True)) try: rbd_meta = linuxrbd.RBDImageMetadata(src_rbd, backup.container, self._ceph_backup_user, self._ceph_backup_conf) rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta) self._transfer_data(eventlet.tpool.Proxy(rbd_fd), backup_name, dest_file, dest_name, length, discard_zeros=volume_is_new) finally: src_rbd.close() def _check_restore_vol_size(self, backup: 'objects.Backup', restore_vol, restore_length: int, src_pool) -> None: """Ensure that the restore volume is the correct size. If the restore volume was bigger than the backup, the diff restore will shrink it to the size of the original backup so we need to post-process and resize it back to its expected size. """ backup_base = self._get_backup_base_name(backup.volume_id, backup=backup) with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, backup.container)) as client: adjust_size = 0 base_image = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx, backup_base, read_only=True)) try: if restore_length != base_image.size(): adjust_size = restore_length finally: base_image.close() if adjust_size: LOG.debug("Adjusting restore vol size") restore_vol.rbd_image.resize(adjust_size) def _diff_restore_rbd(self, backup: 'objects.Backup', restore_file, restore_name: str, restore_point: Optional[str], restore_length: int) -> None: """Attempt restore rbd volume from backup using diff transfer.""" rbd_user = restore_file.rbd_user rbd_pool = restore_file.rbd_pool rbd_conf = restore_file.rbd_conf base_name = self._get_backup_base_name(backup.volume_id, backup=backup) LOG.debug("Attempting incremental restore from base='%(base)s' " "snap='%(snap)s'", {'base': base_name, 'snap': restore_point}) before = time.time() try: self._rbd_diff_transfer(base_name, backup.container, restore_name, rbd_pool, src_user=self._ceph_backup_user, src_conf=self._ceph_backup_conf, dest_user=rbd_user, dest_conf=rbd_conf, src_snap=restore_point) except exception.BackupRBDOperationFailed: LOG.exception("Differential restore failed, trying full restore") raise # If the volume we are restoring to is larger than the backup volume, # we will need to resize it after the diff import since import-diff # appears to shrink the target rbd volume to the size of the original # backup volume. self._check_restore_vol_size(backup, restore_file, restore_length, rbd_pool) LOG.debug("Restore transfer completed in %.4fs", (time.time() - before)) def _get_restore_point(self, base_name: str, backup_id: str) -> Optional[str]: """Get restore point snapshot name for incremental backup. If the backup was not incremental (determined by the fact that the base has no snapshots/restore points), None is returned. Otherwise, the restore point associated with backup_id is returned. """ with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self, self._ceph_backup_pool)) as client: base_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx, base_name, read_only=True)) try: restore_point = self._get_backup_snap_name(base_rbd, base_name, backup_id) finally: base_rbd.close() return restore_point @staticmethod def _rbd_has_extents(rbd_volume) -> bool: """Check whether the given rbd volume has extents. Return True if has extents, otherwise False. """ extents = [] def iter_cb(offset, length, exists): if exists: extents.append(length) rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb) if extents: LOG.debug("RBD has %s extents", sum(extents)) return True return False def _diff_restore_allowed(self, base_name: str, backup: 'objects.Backup', volume: 'objects.Volume', volume_file: linuxrbd.RBDVolumeIOWrapper, rados_client: 'rados.Rados' ) -> Tuple[bool, Optional[str]]: """Determine if differential restore is possible and restore point. Determine whether a differential restore is possible/allowed, and find out the restore point if backup base is diff-format. In order for a differential restore to be performed we need: * destination volume must be RBD * destination volume must have zero extents * backup base image must exist * backup must have a restore point * target volume is different from source volume of backup Returns True if differential restore is allowed, False otherwise. Return the restore point if back base is diff-format. """ # NOTE(dosaboy): base_name here must be diff format. rbd_exists, base_name = self._rbd_image_exists(base_name, backup.volume_id, rados_client) if not rbd_exists: return False, None # Get the restore point. If no restore point is found, we assume # that the backup was not performed using diff/incremental methods # so we enforce full copy. restore_point = self._get_restore_point(base_name, backup.id) if restore_point: if self._file_is_rbd(volume_file): LOG.debug("Volume file is RBD.") # If the volume we are restoring to is the volume the backup # was made from, force a full restore since a diff will not # work in this case. if volume.id == backup.volume_id: LOG.debug("Destination volume is same as backup source " "volume %s - forcing full copy.", volume.id) return False, restore_point # If the destination volume has extents we cannot allow a diff # restore. if self._rbd_has_extents(volume_file.rbd_image): # We return the restore point so that a full copy is done # from snapshot. LOG.debug("Destination has extents - forcing full copy") return False, restore_point return True, restore_point else: LOG.debug("Volume file is NOT RBD.") else: LOG.info("No restore point found for backup='%(backup)s' of " "volume %(volume)s although base image is found - " "forcing full copy.", {'backup': backup.id, 'volume': backup.volume_id}) return False, restore_point def _restore_volume(self, backup: 'objects.Backup', volume: 'objects.Volume', volume_file: linuxrbd.RBDVolumeIOWrapper, volume_is_new: bool) -> None: """Restore volume from backup using diff transfer if possible. Attempts a differential restore and reverts to full copy if diff fails. """ length = int(volume.size) * units.Gi if backup.service_metadata: base_name = self._get_backup_base_name(backup.volume_id, backup) else: base_name = self._get_backup_base_name(backup.volume_id) with eventlet.tpool.Proxy(rbd_driver.RADOSClient( self, backup.container)) as client: diff_allowed, restore_point = \ self._diff_restore_allowed(base_name, backup, volume, volume_file, client) do_full_restore = True if diff_allowed: # Attempt diff try: LOG.debug("Attempting differential restore.") self._diff_restore_rbd(backup, volume_file, volume.name, restore_point, length) do_full_restore = False except exception.BackupRBDOperationFailed: LOG.debug("Forcing full restore to volume %s.", volume.id) if do_full_restore: # Otherwise full copy LOG.debug("Running full restore.") self._full_restore(backup, volume_file, volume.name, length, volume_is_new, src_snap=restore_point) def _restore_metadata(self, backup: 'objects.Backup', volume_id: str) -> None: """Restore volume metadata from backup. If this backup has associated metadata, save it to the restore target otherwise do nothing. """ try: with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self)) as client: meta_bak = VolumeMetadataBackup(client, backup.id) meta = meta_bak.get() if meta is not None: self.put_metadata(volume_id, meta) else: LOG.debug("Volume %s has no backed up metadata.", backup.volume_id) except exception.BackupMetadataUnsupportedVersion: msg = _("Metadata restore failed due to incompatible version") raise exception.BackupOperationError(msg) def restore(self, backup: 'objects.Backup', volume_id: str, volume_file: linuxrbd.RBDVolumeIOWrapper, volume_is_new: bool) -> None: """Restore volume from backup in Ceph object store. If volume metadata is available this will also be restored. """ target_volume = self.db.volume_get(self.context, volume_id) LOG.debug('Starting restore from Ceph backup=%(src)s to ' 'volume=%(dest)s new=%(new)s', {'src': backup.id, 'dest': target_volume.name, 'new': volume_is_new}) try: self._restore_volume(backup, target_volume, volume_file, volume_is_new) # Be tolerant of IO implementations that do not support fileno() try: fileno = volume_file.fileno() except IOError: LOG.debug("Restore target I/O object does not support " "fileno() - skipping call to fsync().") else: os.fsync(fileno) self._restore_metadata(backup, volume_id) LOG.debug('Restore to volume %s finished successfully.', volume_id) except exception.BackupOperationError as e: LOG.error('Restore to volume %(volume)s finished with error - ' '%(error)s.', {'error': e, 'volume': volume_id}) raise def delete_backup(self, backup: 'objects.Backup') -> None: """Delete the given backup from Ceph object store.""" LOG.debug('Delete started for backup=%s', backup.id) delete_failed = False has_pool = True try: self._try_delete_base_image(backup) except self.rbd.ImageNotFound: LOG.warning( "RBD image for backup %(backup)s of volume %(volume)s " "not found. Deleting backup metadata.", {'backup': backup.id, 'volume': backup.volume_id}) delete_failed = True except self.rados.ObjectNotFound: LOG.warning("The pool %(pool)s doesn't exist.", {'pool': backup.container}) delete_failed = True has_pool = False if has_pool: with eventlet.tpool.Proxy(rbd_driver.RADOSClient( self, backup.container)) as client: VolumeMetadataBackup(client, backup.id).remove_if_exists() if delete_failed: LOG.info("Delete of backup '%(backup)s' for volume '%(volume)s' " "finished with warning.", {'backup': backup.id, 'volume': backup.volume_id}) else: LOG.debug("Delete of backup '%(backup)s' for volume " "'%(volume)s' finished.", {'backup': backup.id, 'volume': backup.volume_id}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/drivers/gcs.py0000664000175000017500000003613600000000000020415 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # Copyright (C) 2015 Kevin Fox # Copyright (C) 2015 Tom Barron # Copyright (C) 2016 Vedams Inc. # Copyright (C) 2016 Google Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service using Google Cloud Storage(GCS) Google Cloud Storage json apis are used for backup operations. Authentication and authorization are based on OAuth2.0. Server-centric flow is used for authentication. """ import base64 import hashlib import io import os try: from google.auth import exceptions as gexceptions from google.oauth2 import service_account import google_auth_httplib2 except ImportError: service_account = google_auth_httplib2 = gexceptions = None from googleapiclient import discovery from googleapiclient import errors from googleapiclient import http from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from cinder.backup import chunkeddriver from cinder import exception from cinder.i18n import _ from cinder import interface LOG = logging.getLogger(__name__) gcsbackup_service_opts = [ cfg.StrOpt('backup_gcs_bucket', help='The GCS bucket to use.'), cfg.IntOpt('backup_gcs_object_size', default=52428800, help='The size in bytes of GCS backup objects.'), cfg.IntOpt('backup_gcs_block_size', default=32768, help='The size in bytes that changes are tracked ' 'for incremental backups. backup_gcs_object_size ' 'has to be multiple of backup_gcs_block_size.'), cfg.IntOpt('backup_gcs_reader_chunk_size', default=2097152, help='GCS object will be downloaded in chunks of bytes.'), cfg.IntOpt('backup_gcs_writer_chunk_size', default=2097152, help='GCS object will be uploaded in chunks of bytes. ' 'Pass in a value of -1 if the file ' 'is to be uploaded as a single chunk.'), cfg.IntOpt('backup_gcs_num_retries', default=3, help='Number of times to retry.'), cfg.ListOpt('backup_gcs_retry_error_codes', default=['429'], help='List of GCS error codes.'), cfg.StrOpt('backup_gcs_bucket_location', default='US', help='Location of GCS bucket.'), cfg.StrOpt('backup_gcs_storage_class', default='NEARLINE', help='Storage class of GCS bucket.'), cfg.StrOpt('backup_gcs_credential_file', help='Absolute path of GCS service account credential file.'), cfg.StrOpt('backup_gcs_project_id', help='Owner project id for GCS bucket.'), cfg.StrOpt('backup_gcs_user_agent', default='gcscinder', help='Http user-agent string for gcs api.'), cfg.BoolOpt('backup_gcs_enable_progress_timer', default=True, help='Enable or Disable the timer to send the periodic ' 'progress notifications to Ceilometer when backing ' 'up the volume to the GCS backend storage. The ' 'default value is True to enable the timer.'), cfg.URIOpt('backup_gcs_proxy_url', help='URL for http proxy access.', secret=True), ] CONF = cfg.CONF CONF.register_opts(gcsbackup_service_opts) OAUTH_EXCEPTIONS = None # Google Cloud Storage(GCS) backup driver class GCSConnectionFailure(exception.BackupDriverException): message = _("Google Cloud Storage connection failure: %(reason)s") class GCSApiFailure(exception.BackupDriverException): message = _("Google Cloud Storage api failure: %(reason)s") class GCSOAuth2Failure(exception.BackupDriverException): message = _("Google Cloud Storage oauth2 failure: %(reason)s") def gcs_logger(func): def func_wrapper(self, *args, **kwargs): try: return func(self, *args, **kwargs) except errors.Error as err: raise GCSApiFailure(reason=err) except OAUTH_EXCEPTIONS as err: raise GCSOAuth2Failure(reason=err) except Exception as err: raise GCSConnectionFailure(reason=err) return func_wrapper @interface.backupdriver class GoogleBackupDriver(chunkeddriver.ChunkedBackupDriver): """Provides backup, restore and delete of backup objects within GCS.""" def __init__(self, context): global OAUTH_EXCEPTIONS backup_bucket = CONF.backup_gcs_bucket self.gcs_project_id = CONF.backup_gcs_project_id chunk_size_bytes = CONF.backup_gcs_object_size sha_block_size_bytes = CONF.backup_gcs_block_size enable_progress_timer = CONF.backup_gcs_enable_progress_timer super().__init__( context, chunk_size_bytes, sha_block_size_bytes, backup_bucket, enable_progress_timer, ) self.reader_chunk_size = CONF.backup_gcs_reader_chunk_size self.writer_chunk_size = CONF.backup_gcs_writer_chunk_size self.bucket_location = CONF.backup_gcs_bucket_location self.storage_class = CONF.backup_gcs_storage_class self.num_retries = CONF.backup_gcs_num_retries # Set or overwrite environmental proxy variables for httplib2 since # it's the only mechanism supported when using googleapiclient with # google-auth if CONF.backup_gcs_proxy_url: os.environ['http_proxy'] = CONF.backup_gcs_proxy_url backup_credential = CONF.backup_gcs_credential_file # service_account is imported if all required libraries are available if service_account: creds = service_account.Credentials.from_service_account_file( backup_credential) OAUTH_EXCEPTIONS = (gexceptions.RefreshError, gexceptions.DefaultCredentialsError) else: # NOTE(tkajinam): google-api-python-client is now in requirements # and google-auth-httplib2 is its dependency. So # this error should not be raised now. But it's # kept now in case the client library is moved to # extra dependencies msg = _('google-api-python-client not found') raise exception.BackupDriverException(reason=msg) self.conn = discovery.build('storage', 'v1', # Avoid log error on oauth2client >= 4.0.0 cache_discovery=False, credentials=creds) self.resumable = self.writer_chunk_size != -1 @staticmethod def get_driver_options(): return gcsbackup_service_opts def check_for_setup_error(self): required_options = ('backup_gcs_bucket', 'backup_gcs_credential_file', 'backup_gcs_project_id') for opt in required_options: val = getattr(CONF, opt, None) if not val: raise exception.InvalidConfigurationValue(option=opt, value=val) @gcs_logger def put_container(self, bucket): """Create the bucket if not exists.""" buckets = self.conn.buckets().list( project=self.gcs_project_id, prefix=bucket, fields="items(name)").execute( num_retries=self.num_retries).get('items', []) if not any(b.get('name') == bucket for b in buckets): self.conn.buckets().insert( project=self.gcs_project_id, body={'name': bucket, 'location': self.bucket_location, 'storageClass': self.storage_class}).execute( num_retries=self.num_retries) @gcs_logger def get_container_entries(self, bucket, prefix): """Get bucket entry names.""" obj_list_dict = self.conn.objects().list( bucket=bucket, fields="items(name)", prefix=prefix).execute(num_retries=self.num_retries).get( 'items', []) return [obj_dict.get('name') for obj_dict in obj_list_dict] def get_object_writer(self, bucket, object_name, extra_metadata=None): """Return a writer object. Returns a writer object that stores a chunk of volume data in a GCS object store. """ return GoogleObjectWriter(bucket, object_name, self.conn, self.writer_chunk_size, self.num_retries, self.resumable) def get_object_reader(self, bucket, object_name, extra_metadata=None): """Return reader object. Returns a reader object that retrieves a chunk of backed-up volume data from a GCS object store. """ return GoogleObjectReader(bucket, object_name, self.conn, self.reader_chunk_size, self.num_retries) @gcs_logger def delete_object(self, bucket, object_name): """Deletes a backup object from a GCS object store.""" self.conn.objects().delete( bucket=bucket, object=object_name).execute(num_retries=self.num_retries) def _generate_object_name_prefix(self, backup): """Generates a GCS backup object name prefix. prefix = volume_volid/timestamp/az_saz_backup_bakid volid is volume id. timestamp is time in UTC with format of YearMonthDateHourMinuteSecond. saz is storage_availability_zone. bakid is backup id for volid. """ az = 'az_%s' % self.az backup_name = '%s_backup_%s' % (az, backup.id) volume = 'volume_%s' % (backup.volume_id) timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S") prefix = volume + '/' + timestamp + '/' + backup_name LOG.debug('generate_object_name_prefix: %s', prefix) return prefix def update_container_name(self, backup, bucket): """Use the bucket name as provided - don't update.""" return def get_extra_metadata(self, backup, volume): """GCS driver does not use any extra metadata.""" return class GoogleObjectWriter(object): def __init__(self, bucket, object_name, conn, writer_chunk_size, num_retries, resumable): self.bucket = bucket self.object_name = object_name self.conn = conn self.data = bytearray() self.chunk_size = writer_chunk_size self.num_retries = num_retries self.resumable = resumable def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def write(self, data): self.data += data @gcs_logger def close(self): media = http.MediaIoBaseUpload(io.BytesIO(self.data), 'application/octet-stream', chunksize=self.chunk_size, resumable=self.resumable) resp = self.conn.objects().insert( bucket=self.bucket, name=self.object_name, body={}, media_body=media).execute(num_retries=self.num_retries) etag = resp['md5Hash'] md5 = hashlib.md5(self.data, usedforsecurity=False).digest() md5 = md5.encode('utf-8') etag = bytes(etag, 'utf-8') md5 = base64.b64encode(md5) if etag != md5: err = _('MD5 of object: %(object_name)s before: ' '%(md5)s and after: %(etag)s is not same.') % { 'object_name': self.object_name, 'md5': md5, 'etag': etag, } raise exception.InvalidBackup(reason=err) else: LOG.debug('MD5 before: %(md5)s and after: %(etag)s ' 'writing object: %(object_name)s in GCS.', {'etag': etag, 'md5': md5, 'object_name': self.object_name, }) return md5 class GoogleObjectReader(object): def __init__(self, bucket, object_name, conn, reader_chunk_size, num_retries): self.bucket = bucket self.object_name = object_name self.conn = conn self.chunk_size = reader_chunk_size self.num_retries = num_retries def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass @gcs_logger def read(self): req = self.conn.objects().get_media( bucket=self.bucket, object=self.object_name) fh = io.BytesIO() downloader = GoogleMediaIoBaseDownload( fh, req, chunksize=self.chunk_size) done = False while not done: status, done = downloader.next_chunk(num_retries=self.num_retries) LOG.debug('GCS Object download Complete.') return fh.getvalue() class GoogleMediaIoBaseDownload(http.MediaIoBaseDownload): @http.util.positional(1) def next_chunk(self, num_retries=None): error_codes = CONF.backup_gcs_retry_error_codes headers = {'range': 'bytes=%d-%d' % (self._progress, self._progress + self._chunksize)} gcs_http = self._request.http for retry_num in range(num_retries + 1): if retry_num > 0: self._sleep(self._rand() * 2 ** retry_num) resp, content = gcs_http.request(self._uri, headers=headers) if resp.status < 500 and (str(resp.status) not in error_codes): break if resp.status in [200, 206]: if 'content-location' in resp and ( resp['content-location'] != self._uri): self._uri = resp['content-location'] self._progress += len(content) self._fd.write(content) if 'content-range' in resp: content_range = resp['content-range'] length = content_range.rsplit('/', 1)[1] self._total_size = int(length) elif 'content-length' in resp: self._total_size = int(resp['content-length']) if self._progress == self._total_size: self._done = True return (http.MediaDownloadProgress(self._progress, self._total_size), self._done) else: raise http.HttpError(resp, content, uri=self._uri) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/drivers/glusterfs.py0000664000175000017500000000724700000000000021660 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service that uses GlusterFS as the backend.""" import os import stat from os_brick.remotefs import remotefs as remotefs_brick from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from cinder.backup.drivers import posix from cinder import exception from cinder import interface from cinder import utils glusterfsbackup_service_opts = [ cfg.StrOpt('glusterfs_backup_mount_point', default='$state_path/backup_mount', help='Base dir containing mount point for gluster share.'), cfg.StrOpt('glusterfs_backup_share', help='GlusterFS share in ' ': format. ' 'Eg: 1.2.3.4:backup_vol'), ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(glusterfsbackup_service_opts) @interface.backupdriver class GlusterfsBackupDriver(posix.PosixBackupDriver): """Provides backup, restore and delete using GlusterFS repository.""" def __init__(self, context): self.backup_mount_point_base = CONF.glusterfs_backup_mount_point self.backup_share = CONF.glusterfs_backup_share self._execute = putils.execute self._root_helper = utils.get_root_helper() backup_path = self._init_backup_repo_path() super().__init__(context, backup_path=backup_path) @staticmethod def get_driver_options(): return glusterfsbackup_service_opts def check_for_setup_error(self): """Raises error if any required configuration flag is missing.""" versionutils.report_deprecated_feature( LOG, "The Cinder GlusterFS Backup Driver is deprecated and will be " "removed in the 2025.1 release.") required_flags = ['glusterfs_backup_share'] for flag in required_flags: val = getattr(CONF, flag, None) if not val: raise exception.InvalidConfigurationValue(option=flag, value=val) def _init_backup_repo_path(self): remotefsclient = remotefs_brick.RemoteFsClient( 'glusterfs', self._root_helper, glusterfs_mount_point_base=self.backup_mount_point_base) remotefsclient.mount(self.backup_share) # Ensure we can write to this share mount_path = remotefsclient.get_mount_point(self.backup_share) group_id = os.getegid() current_group_id = utils.get_file_gid(mount_path) current_mode = utils.get_file_mode(mount_path) if group_id != current_group_id: cmd = ['chgrp', group_id, mount_path] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if not (current_mode & stat.S_IWGRP): cmd = ['chmod', 'g+w', mount_path] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) return mount_path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/drivers/nfs.py0000664000175000017500000001035600000000000020423 0ustar00zuulzuul00000000000000# Copyright (C) 2015 Tom Barron # Copyright (C) 2015 Kevin Fox # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service that uses NFS storage as the backend.""" import os import stat from os_brick import exception as brick_exception from os_brick.remotefs import remotefs as remotefs_brick from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from cinder.backup.drivers import posix from cinder import exception from cinder import interface from cinder import utils LOG = logging.getLogger(__name__) nfsbackup_service_opts = [ cfg.StrOpt('backup_mount_point_base', default='$state_path/backup_mount', help='Base dir containing mount point for NFS share.'), cfg.StrOpt('backup_share', help='NFS share in hostname:path, ipv4addr:path, ' 'or "[ipv6addr]:path" format.'), cfg.StrOpt('backup_mount_options', help=('Mount options passed to the NFS client. See NFS ' 'man page for details.')), cfg.IntOpt('backup_mount_attempts', min=1, default=3, help='The number of attempts to mount NFS shares before ' 'raising an error.'), ] CONF = cfg.CONF CONF.register_opts(nfsbackup_service_opts) @interface.backupdriver class NFSBackupDriver(posix.PosixBackupDriver): """Provides backup, restore and delete using NFS supplied repository.""" def __init__(self, context): self.backup_mount_point_base = CONF.backup_mount_point_base self.backup_share = CONF.backup_share self.mount_options = CONF.backup_mount_options self._execute = putils.execute self._root_helper = utils.get_root_helper() backup_path = self._init_backup_repo_path() LOG.debug("Using NFS backup repository: %s", backup_path) super().__init__(context, backup_path=backup_path) def check_for_setup_error(self): """Raises error if any required configuration flag is missing.""" required_flags = ['backup_share'] for flag in required_flags: val = getattr(CONF, flag, None) if not val: raise exception.InvalidConfigurationValue(option=flag, value=val) def _init_backup_repo_path(self): if self.backup_share is None: LOG.info("_init_backup_repo_path: " "backup_share is not set in configuration") return remotefsclient = remotefs_brick.RemoteFsClient( 'nfs', self._root_helper, nfs_mount_point_base=self.backup_mount_point_base, nfs_mount_options=self.mount_options) @utils.retry( (brick_exception.BrickException, putils.ProcessExecutionError), retries=CONF.backup_mount_attempts) def mount(): remotefsclient.mount(self.backup_share) mount() # Ensure we can write to this share mount_path = remotefsclient.get_mount_point(self.backup_share) group_id = os.getegid() current_group_id = utils.get_file_gid(mount_path) current_mode = utils.get_file_mode(mount_path) if group_id != current_group_id: cmd = ['chgrp', '-R', group_id, mount_path] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if not (current_mode & stat.S_IWGRP): cmd = ['chmod', '-R', 'g+w', mount_path] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) return mount_path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/drivers/posix.py0000664000175000017500000001346600000000000021004 0ustar00zuulzuul00000000000000# Copyright (C) 2015 Tom Barron # Copyright (C) 2015 Kevin Fox # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service that uses a posix filesystem as the backend.""" import errno import os import os.path import stat from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from cinder.backup import chunkeddriver from cinder import exception from cinder import interface LOG = logging.getLogger(__name__) SHA_SIZE = 32768 # Multiple of SHA_SIZE, close to a characteristic OS max file system size. BACKUP_FILE_SIZE = 61035 * 32768 posixbackup_service_opts = [ cfg.IntOpt('backup_file_size', default=BACKUP_FILE_SIZE, help='The maximum size in bytes of the files used to hold ' 'backups. If the volume being backed up exceeds this ' 'size, then it will be backed up into multiple files. ' 'backup_file_size also determines the buffer size ' 'used to build backup files, so should be scaled ' 'according to available RAM and number of workers. ' 'backup_file_size must be a multiple of ' 'backup_sha_block_size_bytes.'), cfg.IntOpt('backup_sha_block_size_bytes', default=SHA_SIZE, help='The size in bytes that changes are tracked ' 'for incremental backups. backup_file_size has ' 'to be multiple of backup_sha_block_size_bytes.'), cfg.BoolOpt('backup_enable_progress_timer', default=True, help='Enable or Disable the timer to send the periodic ' 'progress notifications to Ceilometer when backing ' 'up the volume to the backend storage. The ' 'default value is True to enable the timer.'), cfg.StrOpt('backup_posix_path', default='$state_path/backup', help='Path specifying where to store backups.'), cfg.StrOpt('backup_container', help='Custom directory to use for backups.'), ] CONF = cfg.CONF CONF.register_opts(posixbackup_service_opts) @interface.backupdriver class PosixBackupDriver(chunkeddriver.ChunkedBackupDriver): """Provides backup, restore and delete using a Posix file system.""" def __init__(self, context, backup_path=None): chunk_size_bytes = CONF.backup_file_size sha_block_size_bytes = CONF.backup_sha_block_size_bytes backup_default_container = CONF.backup_container enable_progress_timer = CONF.backup_enable_progress_timer super().__init__( context, chunk_size_bytes, sha_block_size_bytes, backup_default_container, enable_progress_timer, ) self.backup_path = backup_path if not backup_path: self.backup_path = CONF.backup_posix_path if not self.backup_path: raise exception.ConfigNotFound(path='backup_path') LOG.debug("Using backup repository: %s", self.backup_path) @staticmethod def get_driver_options(): return posixbackup_service_opts def update_container_name(self, backup, container): if container is not None: return container id = backup['id'] return os.path.join(id[0:2], id[2:4], id) def put_container(self, container): path = os.path.join(self.backup_path, container) if not os.path.exists(path): os.makedirs(path) permissions = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP) os.chmod(path, permissions) def get_container_entries(self, container, prefix): path = os.path.join(self.backup_path, container) return [i for i in os.listdir(path) if i.startswith(prefix)] def get_object_writer(self, container, object_name, extra_metadata=None): path = os.path.join(self.backup_path, container, object_name) f = open(path, 'wb') permissions = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP) os.chmod(path, permissions) return f def get_object_reader(self, container, object_name, extra_metadata=None): path = os.path.join(self.backup_path, container, object_name) return open(path, 'rb') def delete_object(self, container, object_name): # TODO(tbarron): clean up the container path if it is empty path = os.path.join(self.backup_path, container, object_name) try: os.remove(path) except OSError as e: # Ignore exception if path does not exist. if e.errno != errno.ENOENT: raise def _generate_object_name_prefix(self, backup): timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S") prefix = 'volume_%s_%s_backup_%s' % (backup.volume_id, timestamp, backup.id) LOG.debug('_generate_object_name_prefix: %s', prefix) return prefix def get_extra_metadata(self, backup, volume): return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/drivers/s3.py0000664000175000017500000003743600000000000020172 0ustar00zuulzuul00000000000000# Copyright (C) 2020 leafcloud b.v. # Copyright (C) 2020 FUJITSU LIMITED # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service that uses S3 as the backend **Related Flags** :backup_s3_endpoint_url: The url where the S3 server is listening. (default: None) :backup_s3_store_bucket: The S3 bucket to be used to store the Cinder backup data. (default: volumebackups) :backup_s3_store_access_key: The S3 query token access key. (default: None) :backup_s3_store_secret_key: The S3 query token secret key. (default: None) :backup_s3_sse_customer_key: The SSECustomerKey. backup_s3_sse_customer_algorithm must be set at the same time to enable SSE. (default: None) :backup_s3_sse_customer_algorithm: The SSECustomerAlgorithm. backup_s3_sse_customer_key must be set at the same time to enable SSE. (default: None) :backup_s3_object_size: The size in bytes of S3 backup objects. (default: 52428800) :backup_s3_block_size: The size in bytes that changes are tracked for incremental backups. backup_s3_object_size has to be multiple of backup_s3_block_size. (default: 32768). :backup_s3_md5_validation: Enable or Disable md5 validation in the s3 backend. (default: True) :backup_s3_http_proxy: Address or host for the http proxy server. (default: '') :backup_s3_https_proxy: Address or host for the https proxy server. (default: '') :backup_s3_timeout: The time in seconds till a timeout exception is thrown. (default: 60) :backup_s3_max_pool_connections: The maximum number of connections to keep in a connection pool. (default: 10) :backup_s3_retry_max_attempts: An integer representing the maximum number of retry attempts that will be made on a single request. (default: 4) :backup_s3_retry_mode: A string representing the type of retry mode. e.g: legacy, standard, adaptive. (default: legacy) :backup_s3_verify_ssl: Enable or Disable ssl verify. (default: True) :backup_s3_ca_cert_file: A filename of the CA cert bundle to use. (default: None) :backup_s3_enable_progress_timer: Enable or Disable the timer to send the periodic progress notifications to Ceilometer when backing up the volume to the S3 backend storage. (default: True) :backup_compression_algorithm: Compression algorithm to use for volume backups. """ import base64 import functools import hashlib import io import itertools as it import socket import boto3 from botocore.config import Config from botocore import exceptions as boto_exc from botocore.vendored.requests.packages.urllib3 import exceptions as \ urrlib_exc from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from cinder.backup import chunkeddriver from cinder import exception from cinder.i18n import _ from cinder import interface LOG = logging.getLogger(__name__) s3backup_service_opts = [ cfg.StrOpt('backup_s3_endpoint_url', help=_('The url where the S3 server is listening.')), cfg.StrOpt('backup_s3_store_access_key', secret=True, help=_('The S3 query token access key.')), cfg.StrOpt('backup_s3_store_secret_key', secret=True, help=_('The S3 query token secret key.')), cfg.StrOpt('backup_s3_store_bucket', default='volumebackups', help=_('The S3 bucket to be used ' 'to store the Cinder backup data.')), cfg.IntOpt('backup_s3_object_size', default=52428800, help='The size in bytes of S3 backup objects'), cfg.IntOpt('backup_s3_block_size', default=32768, help='The size in bytes that changes are tracked ' 'for incremental backups. backup_s3_object_size ' 'has to be multiple of backup_s3_block_size.'), cfg.BoolOpt('backup_s3_enable_progress_timer', default=True, help='Enable or Disable the timer to send the periodic ' 'progress notifications to Ceilometer when backing ' 'up the volume to the S3 backend storage. The ' 'default value is True to enable the timer.'), cfg.StrOpt('backup_s3_http_proxy', default='', help='Address or host for the http proxy server.'), cfg.StrOpt('backup_s3_https_proxy', default='', help='Address or host for the https proxy server.'), cfg.FloatOpt('backup_s3_timeout', default=60, help='The time in seconds till ' 'a timeout exception is thrown.'), cfg.IntOpt('backup_s3_max_pool_connections', default=10, help='The maximum number of connections ' 'to keep in a connection pool.'), cfg.IntOpt('backup_s3_retry_max_attempts', default=4, help='An integer representing the maximum number of ' 'retry attempts that will be made on a single request.'), cfg.StrOpt('backup_s3_retry_mode', default='legacy', help='A string representing the type of retry mode. ' 'e.g: legacy, standard, adaptive'), cfg.BoolOpt('backup_s3_verify_ssl', default=True, help='Enable or Disable ssl verify.'), cfg.StrOpt('backup_s3_ca_cert_file', default=None, help='path/to/cert/bundle.pem ' '- A filename of the CA cert bundle to use.'), cfg.BoolOpt('backup_s3_md5_validation', default=True, help='Enable or Disable md5 validation in the s3 backend.'), cfg.StrOpt('backup_s3_sse_customer_key', default=None, secret=True, help='The SSECustomerKey. backup_s3_sse_customer_algorithm ' 'must be set at the same time to enable SSE.'), cfg.StrOpt('backup_s3_sse_customer_algorithm', default=None, help='The SSECustomerAlgorithm. backup_s3_sse_customer_key ' 'must be set at the same time to enable SSE.') ] CONF = cfg.CONF CONF.register_opts(s3backup_service_opts) CONF.import_opt('backup_compression_algorithm', 'cinder.backup.chunkeddriver') class S3ConnectionFailure(exception.BackupDriverException): message = _("S3 connection failure: %(reason)s") class S3ClientError(exception.BackupDriverException): message = _("S3 client error: %(reason)s") def _wrap_exception(func): @functools.wraps(func) def func_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except boto_exc.ClientError as err: raise S3ClientError(reason=err) except Exception as err: raise S3ConnectionFailure(reason=err) return func_wrapper @interface.backupdriver class S3BackupDriver(chunkeddriver.ChunkedBackupDriver): """Provides backup, restore and delete of backup objects within S3.""" def __init__(self, context): chunk_size_bytes = CONF.backup_s3_object_size sha_block_size_bytes = CONF.backup_s3_block_size backup_bucket = CONF.backup_s3_store_bucket enable_progress_timer = CONF.backup_s3_enable_progress_timer super().__init__( context, chunk_size_bytes, sha_block_size_bytes, backup_bucket, enable_progress_timer, ) config_args = dict( connect_timeout=CONF.backup_s3_timeout, read_timeout=CONF.backup_s3_timeout, max_pool_connections=CONF.backup_s3_max_pool_connections, retries={ 'max_attempts': CONF.backup_s3_retry_max_attempts, 'mode': CONF.backup_s3_retry_mode}) if CONF.backup_s3_http_proxy: config_args['proxies'] = {'http': CONF.backup_s3_http_proxy} if CONF.backup_s3_https_proxy: config_args.setdefault('proxies', {}).update( {'https': CONF.backup_s3_https_proxy}) conn_args = { 'aws_access_key_id': CONF.backup_s3_store_access_key, 'aws_secret_access_key': CONF.backup_s3_store_secret_key, 'endpoint_url': CONF.backup_s3_endpoint_url, 'config': Config(**config_args)} if CONF.backup_s3_verify_ssl: conn_args['verify'] = CONF.backup_s3_ca_cert_file if CONF.backup_s3_ca_cert_file is None: LOG.warning('backup_s3_verify_ssl is True but no cert file ' 'was provided') else: conn_args['verify'] = False self.conn = boto3.client('s3', **conn_args) @staticmethod def get_driver_options(): backup_opts = [CONF._opts['backup_compression_algorithm']['opt']] return s3backup_service_opts + backup_opts @_wrap_exception def put_container(self, bucket): """Create the bucket if not exists.""" try: self.conn.head_bucket(Bucket=bucket) except boto_exc.ClientError as e: # NOTE: If it was a 404 error, then the bucket does not exist. error_code = e.response['Error']['Code'] if error_code != '404': raise self.conn.create_bucket(Bucket=bucket) @_wrap_exception def get_container_entries(self, bucket, prefix): """Get bucket entry names.""" paginator = self.conn.get_paginator('list_objects_v2') page_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix) result = [obj_dict.get('Key') for obj_dict in it.chain.from_iterable( page.get('Contents') for page in page_iterator)] return result def get_object_writer(self, bucket, object_name, extra_metadata=None): """Return a writer object. Returns a writer object that stores a chunk of volume data in a S3 object store. """ return S3ObjectWriter(bucket, object_name, self.conn) def get_object_reader(self, bucket, object_name, extra_metadata=None): """Return reader object. Returns a reader object that retrieves a chunk of backed-up volume data from a S3 object store. """ return S3ObjectReader(bucket, object_name, self.conn) @_wrap_exception def delete_object(self, bucket, object_name): """Deletes a backup object from a S3 object store.""" self.conn.delete_object( Bucket=bucket, Key=object_name) def _generate_object_name_prefix(self, backup): """Generates a S3 backup object name prefix. prefix = volume_volid/timestamp/az_saz_backup_bakid volid is volume id. timestamp is time in UTC with format of YearMonthDateHourMinuteSecond. saz is storage_availability_zone. bakid is backup id for volid. """ az = 'az_%s' % self.az backup_name = '%s_backup_%s' % (az, backup.id) volume = 'volume_%s' % (backup.volume_id) timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S") prefix = volume + '/' + timestamp + '/' + backup_name LOG.debug('generate_object_name_prefix: %s', prefix) return prefix def update_container_name(self, backup, container): """Use the bucket name as provided - don't update.""" return def get_extra_metadata(self, backup, volume): """S3 driver does not use any extra metadata.""" return def check_for_setup_error(self): required_options = ('backup_s3_endpoint_url', 'backup_s3_store_access_key', 'backup_s3_store_secret_key') for opt in required_options: val = getattr(CONF, opt, None) if not val: raise exception.InvalidConfigurationValue(option=opt, value=val) if ((not CONF.backup_s3_sse_customer_algorithm) != (not CONF.backup_s3_sse_customer_key)): LOG.warning("Both the backup_s3_sse_customer_algorithm and " "backup_s3_sse_customer_key options must be set " "to enable SSE. SSE is disabled.") try: self.conn.list_buckets() except Exception: LOG.exception("Cannot list s3 buckets during backup " "driver initialization.") raise class S3ObjectWriter(object): def __init__(self, bucket, object_name, conn): self.bucket = bucket self.object_name = object_name self.conn = conn self.data = bytearray() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def write(self, data): self.data += data @_wrap_exception def close(self): reader = io.BytesIO(self.data) contentmd5 = base64.b64encode( hashlib.md5(self.data, usedforsecurity=False).digest()).decode('utf-8') put_args = {'Bucket': self.bucket, 'Body': reader, 'Key': self.object_name, 'ContentLength': len(self.data)} if CONF.backup_s3_md5_validation: put_args['ContentMD5'] = contentmd5 if (CONF.backup_s3_sse_customer_algorithm and CONF.backup_s3_sse_customer_key): put_args.update( SSECustomerAlgorithm=CONF.backup_s3_sse_customer_algorithm, SSECustomerKey=CONF.backup_s3_sse_customer_key) self.conn.put_object(**put_args) return contentmd5 class S3ObjectReader(object): def __init__(self, bucket, object_name, conn): self.bucket = bucket self.object_name = object_name self.conn = conn def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass @_wrap_exception def read(self): get_args = {'Bucket': self.bucket, 'Key': self.object_name} if (CONF.backup_s3_sse_customer_algorithm and CONF.backup_s3_sse_customer_key): get_args.update( SSECustomerAlgorithm=CONF.backup_s3_sse_customer_algorithm, SSECustomerKey=CONF.backup_s3_sse_customer_key) # NOTE: these retries account for errors that occur when streaming # down the data from s3 (i.e. socket errors and read timeouts that # occur after recieving an OK response from s3). Other retryable # exceptions such as throttling errors and 5xx errors are already # retried by botocore. last_exception = None for i in range(CONF.backup_s3_retry_max_attempts): try: resp = self.conn.get_object(**get_args) return resp.get('Body').read() except (socket.timeout, socket.error, urrlib_exc.ReadTimeoutError, boto_exc.IncompleteReadError) as e: last_exception = e continue raise S3ClientError(reason=last_exception) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/drivers/swift.py0000664000175000017500000005167300000000000021000 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2014 TrilioData, Inc # Copyright (c) 2015 EMC Corporation # Copyright (C) 2015 Kevin Fox # Copyright (C) 2015 Tom Barron # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a backup service that uses Swift as the backend **Related Flags** :backup_swift_url: The URL of the Swift endpoint (default: None, use catalog). :backup_swift_auth_url: The URL of the Keystone endpoint for authentication (default: None, use catalog). :swift_catalog_info: Info to match when looking for swift in the service ' catalog. :keystone_catalog_info: Info to match when looking for keystone in the service catalog. :backup_swift_object_size: The size in bytes of the Swift objects used for volume backups (default: 52428800). :backup_swift_retry_attempts: The number of retries to make for Swift operations (default: 10). :backup_swift_retry_backoff: The backoff time in seconds between retrying failed Swift operations (default: 10). :backup_compression_algorithm: Compression algorithm to use for volume backups. Supported options are: None (to disable), zlib and bz2 (default: zlib) :backup_swift_ca_cert_file: The location of the CA certificate file to use for swift client requests (default: None) :backup_swift_auth_insecure: If true, bypass verification of server's certificate for SSL connections (default: False) """ import hashlib import io import socket from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from swiftclient import client as swift from swiftclient import exceptions as swift_exc from cinder.backup import chunkeddriver from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import service_auth from cinder.utils import retry LOG = logging.getLogger(__name__) swiftbackup_service_opts = [ cfg.URIOpt('backup_swift_url', help='The URL of the Swift endpoint'), cfg.URIOpt('backup_swift_auth_url', help='The URL of the Keystone endpoint'), cfg.StrOpt('swift_catalog_info', default='object-store:swift:publicURL', help='Info to match when looking for swift in the service ' 'catalog. Format is: separated values of the form: ' ':: - ' 'Only used if backup_swift_url is unset'), cfg.StrOpt('keystone_catalog_info', default='identity:Identity Service:publicURL', help='Info to match when looking for keystone in the service ' 'catalog. Format is: separated values of the form: ' ':: - ' 'Only used if backup_swift_auth_url is unset'), cfg.StrOpt('backup_swift_auth', default='per_user', choices=['per_user', 'single_user'], help='Swift authentication mechanism (per_user or ' 'single_user).'), cfg.StrOpt('backup_swift_auth_version', default='1', help='Swift authentication version. Specify "1" for auth 1.0' ', or "2" for auth 2.0 or "3" for auth 3.0'), cfg.StrOpt('backup_swift_tenant', help='Swift tenant/account name. Required when connecting' ' to an auth 2.0 system'), cfg.StrOpt('backup_swift_user_domain', default=None, help='Swift user domain name. Required when connecting' ' to an auth 3.0 system'), cfg.StrOpt('backup_swift_project_domain', default=None, help='Swift project domain name. Required when connecting' ' to an auth 3.0 system'), cfg.StrOpt('backup_swift_project', default=None, help='Swift project/account name. Required when connecting' ' to an auth 3.0 system'), cfg.StrOpt('backup_swift_user', help='Swift user name'), cfg.StrOpt('backup_swift_key', secret=True, help='Swift key for authentication'), cfg.StrOpt('backup_swift_container', default='volumebackups', help='The default Swift container to use'), cfg.StrOpt('backup_swift_create_storage_policy', default=None, help='The storage policy to use when creating the Swift ' 'container. If the container already exists the ' 'storage policy cannot be enforced'), cfg.IntOpt('backup_swift_object_size', default=52428800, help='The size in bytes of Swift backup objects'), cfg.IntOpt('backup_swift_block_size', default=32768, help='The size in bytes that changes are tracked ' 'for incremental backups. backup_swift_object_size ' 'has to be multiple of backup_swift_block_size.'), cfg.IntOpt('backup_swift_retry_attempts', default=3, help='The number of retries to make for Swift operations'), cfg.IntOpt('backup_swift_retry_backoff', default=2, help='The backoff time in seconds between Swift retries'), cfg.BoolOpt('backup_swift_enable_progress_timer', default=True, help='Enable or Disable the timer to send the periodic ' 'progress notifications to Ceilometer when backing ' 'up the volume to the Swift backend storage. The ' 'default value is True to enable the timer.'), cfg.StrOpt('backup_swift_ca_cert_file', help='Location of the CA certificate file to use for swift ' 'client requests.'), cfg.BoolOpt('backup_swift_auth_insecure', default=False, help='Bypass verification of server certificate when ' 'making SSL connection to Swift.'), cfg.BoolOpt('backup_swift_service_auth', default=False, help='Send a X-Service-Token header with service auth ' 'credentials. If enabled you also must set the ' 'service_user group and enable send_service_user_token.'), ] CONF = cfg.CONF CONF.register_opts(swiftbackup_service_opts) @interface.backupdriver class SwiftBackupDriver(chunkeddriver.ChunkedBackupDriver): """Provides backup, restore and delete of backup objects within Swift.""" def __init__(self, context): chunk_size_bytes = CONF.backup_swift_object_size sha_block_size_bytes = CONF.backup_swift_block_size backup_default_container = CONF.backup_swift_container enable_progress_timer = CONF.backup_swift_enable_progress_timer super().__init__( context, chunk_size_bytes, sha_block_size_bytes, backup_default_container, enable_progress_timer, ) # Do not intialize the instance created when the backup service # starts up. The context will be missing information to do things # like fetching endpoints from the service catalog. if context and context.user_id: self.initialize() @staticmethod def get_driver_options(): return swiftbackup_service_opts @retry(Exception, retries=CONF.backup_swift_retry_attempts, backoff_rate=CONF.backup_swift_retry_backoff) def _headers(self, headers=None): """Add service token to headers if its enabled""" if not CONF.backup_swift_service_auth: return headers result = headers or {} sa_plugin = service_auth.get_service_auth_plugin() if sa_plugin is not None: sa_session = service_auth.get_service_session() result['X-Service-Token'] = sa_plugin.get_token(session=sa_session) return result def initialize(self): self.swift_attempts = CONF.backup_swift_retry_attempts self.swift_backoff = CONF.backup_swift_retry_backoff self.backup_swift_auth_insecure = CONF.backup_swift_auth_insecure if CONF.backup_swift_auth == 'single_user': if CONF.backup_swift_user is None: LOG.error("single_user auth mode enabled, " "but %(param)s not set", {'param': 'backup_swift_user'}) raise exception.ParameterNotFound(param='backup_swift_user') if CONF.backup_swift_auth_url is None: self.auth_url = None info = CONF.keystone_catalog_info try: service_type, service_name, endpoint_type = info.split(':') except ValueError: raise exception.BackupDriverException(_( "Failed to parse the configuration option " "'keystone_catalog_info', must be in the form " "::")) for entry in self.context.service_catalog: if entry.get('type') == service_type: # It is assumed that service_types are unique within # the service catalog, so once the correct one is found # it is safe to break out of the loop self.auth_url = entry.get( 'endpoints')[0].get(endpoint_type) break else: self.auth_url = CONF.backup_swift_auth_url if self.auth_url is None: raise exception.BackupDriverException(_( "Could not determine which Keystone endpoint to use. This " "can either be set in the service catalog or with the " "cinder.conf config option 'backup_swift_auth_url'.")) LOG.debug("Using auth URL %s", self.auth_url) LOG.debug('Connect to %s in "%s" mode', CONF.backup_swift_auth_url, CONF.backup_swift_auth) os_options = {} if CONF.backup_swift_user_domain is not None: os_options['user_domain_name'] = CONF.backup_swift_user_domain if CONF.backup_swift_project_domain is not None: os_options['project_domain_name'] = ( CONF.backup_swift_project_domain ) if CONF.backup_swift_project is not None: os_options['project_name'] = CONF.backup_swift_project self.conn = swift.Connection( authurl=self.auth_url, auth_version=CONF.backup_swift_auth_version, tenant_name=CONF.backup_swift_tenant, user=CONF.backup_swift_user, key=CONF.backup_swift_key, os_options=os_options, retries=self.swift_attempts, starting_backoff=self.swift_backoff, insecure=self.backup_swift_auth_insecure, cacert=CONF.backup_swift_ca_cert_file) else: if CONF.backup_swift_url is None: self.swift_url = None info = CONF.swift_catalog_info try: service_type, service_name, endpoint_type = info.split(':') except ValueError: raise exception.BackupDriverException(_( "Failed to parse the configuration option " "'swift_catalog_info', must be in the form " "::")) for entry in self.context.service_catalog: if entry.get('type') == service_type: # It is assumed that service_types are unique within # the service catalog, so once the correct one is found # it is safe to break out of the loop self.swift_url = entry.get( 'endpoints')[0].get(endpoint_type) break else: self.swift_url = '%s%s' % (CONF.backup_swift_url, self.context.project_id) if self.swift_url is None: raise exception.BackupDriverException(_( "Could not determine which Swift endpoint to use. This " "can either be set in the service catalog or with the " "cinder.conf config option 'backup_swift_url'.")) LOG.debug("Using swift URL %s", self.swift_url) LOG.debug('Connect to %s in "%s" mode', CONF.backup_swift_url, CONF.backup_swift_auth) self.conn = swift.Connection(retries=self.swift_attempts, preauthurl=self.swift_url, preauthtoken=self.context.auth_token, starting_backoff=self.swift_backoff, insecure=( self.backup_swift_auth_insecure), cacert=CONF.backup_swift_ca_cert_file) class SwiftObjectWriter(object): def __init__(self, container, object_name, conn, headers_func=None): self.container = container self.object_name = object_name self.conn = conn self.data = bytearray() self.headers_func = headers_func def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def write(self, data): self.data += data def close(self): reader = io.BytesIO(self.data) try: headers = self.headers_func() if self.headers_func else None etag = self.conn.put_object(self.container, self.object_name, reader, content_length=len(self.data), headers=headers) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) md5 = hashlib.md5(self.data, usedforsecurity=False).hexdigest() if etag != md5: err = _('error writing object to swift, MD5 of object in ' 'swift %(etag)s is not the same as MD5 of object sent ' 'to swift %(md5)s') % {'etag': etag, 'md5': md5} raise exception.InvalidBackup(reason=err) return md5 class SwiftObjectReader(object): def __init__(self, container, object_name, conn, headers_func=None): self.container = container self.object_name = object_name self.conn = conn self.headers_func = headers_func def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): pass def read(self): try: headers = self.headers_func() if self.headers_func else None (_resp, body) = self.conn.get_object(self.container, self.object_name, headers=headers) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) return body def put_container(self, container): """Create the container if needed. Check if the container exist by issuing a HEAD request, if the container does not exist we create it. We cannot enforce a new storage policy on an existing container. """ try: self.conn.head_container(container, headers=self._headers()) except swift_exc.ClientException as e: if e.http_status == 404: try: storage_policy = CONF.backup_swift_create_storage_policy headers = ({'X-Storage-Policy': storage_policy} if storage_policy else None) self.conn.put_container(container, headers=self._headers(headers)) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) return LOG.warning("Failed to HEAD container to determine if it " "exists and should be created.") raise exception.SwiftConnectionFailed(reason=e) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) def get_container_entries(self, container, prefix): """Get container entry names""" try: headers = self._headers() swift_objects = self.conn.get_container(container, prefix=prefix, full_listing=True, headers=headers)[1] except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) swift_object_names = [swift_obj['name'] for swift_obj in swift_objects] return swift_object_names def get_object_writer(self, container, object_name, extra_metadata=None): """Return a writer object. Returns a writer object that stores a chunk of volume data in a Swift object store. """ return self.SwiftObjectWriter(container, object_name, self.conn, self._headers) def get_object_reader(self, container, object_name, extra_metadata=None): """Return reader object. Returns a reader object that retrieves a chunk of backed-up volume data from a Swift object store. """ return self.SwiftObjectReader(container, object_name, self.conn, self._headers) def delete_object(self, container, object_name): """Deletes a backup object from a Swift object store.""" try: self.conn.delete_object(container, object_name, headers=self._headers()) except socket.error as err: raise exception.SwiftConnectionFailed(reason=err) except swift_exc.ClientException as err: if err.http_status != 404: raise def _generate_object_name_prefix(self, backup): """Generates a Swift backup object name prefix.""" az = 'az_%s' % self.az backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) timestamp = timeutils.utcnow().strftime("%Y%m%d%H%M%S") prefix = volume + '/' + timestamp + '/' + backup_name LOG.debug('generate_object_name_prefix: %s', prefix) return prefix def update_container_name(self, backup, container): """Use the container name as provided - don't update.""" return container def get_extra_metadata(self, backup, volume): """Swift driver does not use any extra metadata.""" return None def check_for_setup_error(self): # Here we are trying to connect to swift backend service # without any additional parameters. # At the moment of execution we don't have any user data # After just trying to do easiest operations, that will show # that we've configured swift backup driver in right way if not CONF.backup_swift_url: LOG.warning("We will use endpoints from keystone. It is " "possible we could have problems because of it.") return conn = swift.Connection(retries=CONF.backup_swift_retry_attempts, preauthurl=CONF.backup_swift_url, cacert=CONF.backup_swift_ca_cert_file) try: conn.get_capabilities() # TODO(e0ne) catch less general exception except Exception: LOG.exception("Can not get Swift capabilities during backup " "driver initialization.") raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/manager.py0000664000175000017500000016150000000000000017567 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Backup manager manages volume backups. Volume Backups are full copies of persistent volumes stored in a backup store e.g. an object store or any other backup store if and when support is added. They are usable without the original object being available. A volume backup can be restored to the original volume it was created from or any other available volume with a minimum size of the original volume. Volume backups can be created, restored, deleted and listed. **Related Flags** :backup_manager: The module name of a class derived from :class:`manager.Manager` (default: :class:`cinder.backup.manager.Manager`). """ import contextlib import os import typing from castellan import key_manager from eventlet import tpool from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import loopingcall from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import timeutils from cinder.backup import rpcapi as backup_rpcapi from cinder import context from cinder import exception from cinder.i18n import _ from cinder.keymgr import migration as key_migration from cinder import manager from cinder.message import api as message_api from cinder.message import message_field from cinder import objects from cinder.objects import fields from cinder import quota from cinder import utils from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import volume_utils LOG = logging.getLogger(__name__) backup_manager_opts = [ cfg.StrOpt('backup_driver', default='cinder.backup.drivers.swift.SwiftBackupDriver', help='Driver to use for backups.',), cfg.IntOpt('backup_driver_init_check_interval', default=60, min=5, help='Time in seconds between checks to see if the backup ' 'driver has been successfully initialized, any time ' 'the driver is restarted.'), cfg.IntOpt('backup_driver_stats_polling_interval', default=60, min=10, deprecated_name='backup_driver_status_check_interval', help='Time in seconds between checks of the backup driver ' 'status. If does not report as working, it is ' 'restarted.'), cfg.BoolOpt('backup_service_inithost_offload', default=True, help='Offload pending backup delete during ' 'backup service startup. If false, the backup service ' 'will remain down until all pending backups are ' 'deleted.',), cfg.IntOpt('backup_native_threads_pool_size', default=60, min=20, help='Size of the native threads pool for the backups. ' 'Most backup drivers rely heavily on this, it can be ' 'decreased for specific drivers that don\'t.'), ] CONF = cfg.CONF CONF.register_opts(backup_manager_opts) CONF.import_opt('use_multipath_for_image_xfer', 'cinder.volume.driver') CONF.import_opt('num_volume_device_scan_tries', 'cinder.volume.driver') QUOTAS = quota.QUOTAS MAPPING = { # Module name "google" conflicts with google library namespace inside the # driver when it imports google.auth 'cinder.backup.drivers.google.GoogleBackupDriver': 'cinder.backup.drivers.gcs.GoogleBackupDriver', } SERVICE_PGRP = '' if os.name == 'nt' else os.getpgrp() # TODO(geguileo): Once Eventlet issue #432 gets fixed we can just tpool.execute # the whole call to the driver's backup and restore methods instead of proxy # wrapping the device_file and having the drivers also proxy wrap their # writes/reads and the compression/decompression calls. # (https://github.com/eventlet/eventlet/issues/432) class BackupManager(manager.SchedulerDependentManager): """Manages backup of block storage devices.""" RPC_API_VERSION = backup_rpcapi.BackupAPI.RPC_API_VERSION target = messaging.Target(version=RPC_API_VERSION) def __init__(self, *args, **kwargs): self.az = CONF.storage_availability_zone self.backup_rpcapi = backup_rpcapi.BackupAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI() super(BackupManager, self).__init__(*args, **kwargs) self.is_initialized = False self._set_tpool_size(CONF.backup_native_threads_pool_size) self._process_number = kwargs.get('process_number', 1) self._semaphore = kwargs.get('semaphore', contextlib.suppress()) self.driver_name = CONF.backup_driver if self.driver_name in MAPPING: new_name = MAPPING[self.driver_name] LOG.warning('Backup driver path %s is deprecated, update your ' 'configuration to the new path %s', self.driver_name, new_name) self.driver_name = new_name self.service = importutils.import_class(self.driver_name) self.message_api = message_api.API() @typing.no_type_check def init_host(self, **kwargs): """Run initialization needed for a standalone service.""" ctxt = context.get_admin_context() self.setup_backup_backend(ctxt) try: self._cleanup_incomplete_backup_operations(ctxt) except Exception: # Don't block startup of the backup service. LOG.exception("Problem cleaning incomplete backup operations.") # Migrate any ConfKeyManager keys based on fixed_key to the currently # configured key manager. backups = objects.BackupList.get_all_by_host(ctxt, self.host) self._add_to_threadpool(key_migration.migrate_fixed_key, backups=backups) self.publish_service_capabilities(ctxt) def _setup_backup_driver(self, ctxt): backup_service = self.service(context=ctxt) backup_service.check_for_setup_error() self.is_initialized = True raise loopingcall.LoopingCallDone() def setup_backup_backend(self, ctxt): try: init_loop = loopingcall.FixedIntervalLoopingCall( self._setup_backup_driver, ctxt) init_loop.start(interval=CONF.backup_driver_init_check_interval) except loopingcall.LoopingCallDone: LOG.info("Backup driver was successfully initialized.") except Exception: LOG.exception("Failed to initialize driver.", resource={'type': 'driver', 'id': self.__class__.__name__}) def reset(self): super(BackupManager, self).reset() self.backup_rpcapi = backup_rpcapi.BackupAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI() @utils.synchronized('cleanup_incomplete_backups_%s' % SERVICE_PGRP, external=True, delay=0.1) def _cleanup_incomplete_backup_operations(self, ctxt): # Only the first launched process should do the cleanup, the others # have waited on the lock for the first one to finish the cleanup and # can now continue with the start process. if self._process_number != 1: LOG.debug("Process #%s %sskips cleanup.", self._process_number, '(pgid=%s) ' % SERVICE_PGRP if SERVICE_PGRP else '') return LOG.info("Cleaning up incomplete backup operations.") # TODO(smulcahy) implement full resume of backup and restore # operations on restart (rather than simply resetting). # We only need to deal with the backups that aren't complete. # N.B. NULL status is possible and we consider it incomplete. incomplete_status = list(fields.BackupStatus.ALL) incomplete_status.remove(fields.BackupStatus.AVAILABLE) incomplete_status.append(None) backups = objects.BackupList.get_all( ctxt, filters={'host': self.host, 'status': incomplete_status}) for backup in backups: try: self._cleanup_one_backup(ctxt, backup) except Exception: LOG.exception("Problem cleaning up backup %(bkup)s.", {'bkup': backup['id']}) try: self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt, backup) except Exception: LOG.exception("Problem cleaning temp volumes and " "snapshots for backup %(bkup)s.", {'bkup': backup['id']}) def _cleanup_one_volume(self, ctxt, volume_id): try: volume = objects.Volume.get_by_id(ctxt, volume_id) except exception.VolumeNotFound: LOG.info('Volume %s does not exist anymore. Ignoring.', volume_id) return if volume['status'] == 'backing-up': self._detach_all_attachments(ctxt, volume) LOG.info('Resetting volume %(vol_id)s to previous ' 'status %(status)s (was backing-up).', {'vol_id': volume['id'], 'status': volume['previous_status']}) self.db.volume_update(ctxt, volume['id'], {'status': volume['previous_status']}) elif volume['status'] == 'restoring-backup': self._detach_all_attachments(ctxt, volume) LOG.info('Setting volume %s to error_restoring ' '(was restoring-backup).', volume['id']) self.db.volume_update(ctxt, volume['id'], {'status': 'error_restoring'}) def _cleanup_one_snapshot(self, ctxt, snapshot_id): try: snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id) except exception.SnapshotNotFound: LOG.info('Snapshot %s does not exist anymore. Ignoring.', snapshot_id) return if snapshot['status'] == 'backing-up': LOG.info('Resetting snapshot %(snap_id)s to previous ' 'status %(status)s (was backing-up).', {'snap_id': snapshot['id'], 'status': fields.SnapshotStatus.AVAILABLE}) snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.save() def _cleanup_one_backup(self, ctxt, backup): if backup['status'] == fields.BackupStatus.CREATING: LOG.info('Resetting backup %s to error (was creating).', backup['id']) self._cleanup_one_volume(ctxt, backup.volume_id) if backup.snapshot_id: self._cleanup_one_snapshot(ctxt, backup.snapshot_id) err = 'incomplete backup reset on manager restart' volume_utils.update_backup_error(backup, err) elif backup['status'] == fields.BackupStatus.RESTORING: LOG.info('Resetting backup %s to ' 'available (was restoring).', backup['id']) self._cleanup_one_volume(ctxt, backup.restore_volume_id) if backup.snapshot_id: self._cleanup_one_snapshot(ctxt, backup.snapshot_id) backup.status = fields.BackupStatus.AVAILABLE backup.save() elif backup['status'] == fields.BackupStatus.DELETING: # Don't resume deleting the backup of an encrypted volume. The # admin context won't be sufficient to delete the backup's copy # of the encryption key ID (a real user context is required). if backup.encryption_key_id is None: LOG.info('Resuming delete on backup: %s.', backup.id) if CONF.backup_service_inithost_offload: # Offload all the pending backup delete operations to the # threadpool to prevent the main backup service thread # from being blocked. self._add_to_threadpool(self.delete_backup, ctxt, backup) else: # Delete backups sequentially self.delete_backup(ctxt, backup) else: LOG.info('Unable to resume deleting backup of an encrypted ' 'volume, resetting backup %s to error_deleting ' '(was deleting).', backup.id) backup.status = fields.BackupStatus.ERROR_DELETING backup.save() def _detach_all_attachments(self, ctxt, volume): attachments = volume['volume_attachment'] or [] for attachment in attachments: if (attachment['attached_host'] == self.host and attachment['instance_uuid'] is None): try: rpcapi = self.volume_rpcapi rpcapi.detach_volume(ctxt, volume, attachment['id']) except Exception: LOG.exception("Detach attachment %(attach_id)s failed.", {'attach_id': attachment['id']}, resource=volume) def _delete_temp_volume(self, ctxt, backup): try: temp_volume = objects.Volume.get_by_id( ctxt, backup.temp_volume_id) self.volume_rpcapi.delete_volume(ctxt, temp_volume) except exception.VolumeNotFound: LOG.debug("Could not find temp volume %(vol)s to clean up " "for backup %(backup)s.", {'vol': backup.temp_volume_id, 'backup': backup.id}) backup.temp_volume_id = None backup.save() def _delete_temp_snapshot(self, ctxt, backup): try: temp_snapshot = objects.Snapshot.get_by_id( ctxt, backup.temp_snapshot_id) # We may want to consider routing those calls through the # cinder API. temp_snapshot.status = fields.SnapshotStatus.DELETING temp_snapshot.save() self.volume_rpcapi.delete_snapshot(ctxt, temp_snapshot) except exception.SnapshotNotFound: LOG.debug("Could not find temp snapshot %(snap)s to clean " "up for backup %(backup)s.", {'snap': backup.temp_snapshot_id, 'backup': backup.id}) backup.temp_snapshot_id = None backup.save() def _cleanup_temp_volumes_snapshots_for_one_backup(self, ctxt, backup): # NOTE(xyang): If the service crashes or gets restarted during the # backup operation, there could be temporary volumes or snapshots # that are not deleted. Make sure any temporary volumes or snapshots # create by the backup job are deleted when service is started. if (backup.temp_volume_id and backup.status == fields.BackupStatus.ERROR): self._delete_temp_volume(ctxt, backup) if (backup.temp_snapshot_id and backup.status == fields.BackupStatus.ERROR): self._delete_temp_snapshot(ctxt, backup) def _cleanup_temp_volumes_snapshots_when_backup_created( self, ctxt, backup): # Delete temp volumes or snapshots when backup creation is completed. if backup.temp_volume_id: self._delete_temp_volume(ctxt, backup) if backup.temp_snapshot_id: self._delete_temp_snapshot(ctxt, backup) @utils.limit_operations def create_backup(self, context, backup): """Create volume backups using configured backup service.""" volume_id = backup.volume_id snapshot_id = backup.snapshot_id volume = objects.Volume.get_by_id(context, volume_id) snapshot = objects.Snapshot.get_by_id( context, snapshot_id) if snapshot_id else None previous_status = volume.get('previous_status', None) context.message_resource_id = backup.id context.message_resource_type = message_field.Resource.VOLUME_BACKUP context.message_action = message_field.Action.BACKUP_CREATE if snapshot_id: log_message = ('Create backup started, backup: %(backup_id)s ' 'volume: %(volume_id)s snapshot: %(snapshot_id)s.' % {'backup_id': backup.id, 'volume_id': volume_id, 'snapshot_id': snapshot_id}) else: log_message = ('Create backup started, backup: %(backup_id)s ' 'volume: %(volume_id)s.' % {'backup_id': backup.id, 'volume_id': volume_id}) LOG.info(log_message) self._notify_about_backup_usage(context, backup, "create.start") expected_status = "backing-up" if snapshot: actual_status = snapshot['status'] if actual_status != expected_status: err = _('Create backup aborted, expected snapshot status ' '%(expected_status)s but got %(actual_status)s.') % { 'expected_status': expected_status, 'actual_status': actual_status, } volume_utils.update_backup_error(backup, err) raise exception.InvalidSnapshot(reason=err) else: actual_status = volume['status'] if actual_status != expected_status: err = _('Create backup aborted, expected volume status ' '%(expected_status)s but got %(actual_status)s.') % { 'expected_status': expected_status, 'actual_status': actual_status, } volume_utils.update_backup_error(backup, err) raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.CREATING actual_status = backup.status if actual_status != expected_status: err = _('Create backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') % { 'expected_status': expected_status, 'actual_status': actual_status, } volume_utils.update_backup_error(backup, err) self.message_api.create_from_request_context( context, detail=message_field.Detail.BACKUP_INVALID_STATE) raise exception.InvalidBackup(reason=err) try: if not self.is_working(): err = _('Create backup aborted due to backup service is down.') volume_utils.update_backup_error(backup, err) self.message_api.create_from_request_context( context, detail=message_field.Detail.BACKUP_SERVICE_DOWN) raise exception.InvalidBackup(reason=err) if not backup.availability_zone: backup.availability_zone = self.az backup.service = self.driver_name backup.save() # Start backup, then continue_backup, then finish_backup self._start_backup(context, backup, volume) except Exception as err: with excutils.save_and_reraise_exception(): if snapshot_id: assert snapshot is not None snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.save() else: try: self.db.volume_update( context, volume_id, {'status': previous_status, 'previous_status': 'error_backing-up'}) except exception.VolumeNotFound: # If the volume was deleted we cannot update its # status but we still want to set the backup to error. pass volume_utils.update_backup_error(backup, str(err)) def _start_backup(self, context, backup, volume): """This starts the backup process. First we have to get the backup device from the volume manager. This can take a long time to complete. Once the volume manager is done creating/getting the backup device, then we get a callback to complete the process of backing up the volume. """ # Save a copy of the encryption key ID in case the volume is deleted. if (volume.encryption_key_id is not None and backup.encryption_key_id is None): backup.encryption_key_id = volume_utils.clone_encryption_key( context, key_manager.API(CONF), volume.encryption_key_id) backup.save() # This is an async call to the volume manager. We will get a # callback from the volume manager to continue once it's done. LOG.info("Call Volume Manager to get_backup_device for %s", backup) self.volume_rpcapi.get_backup_device(context, backup, volume) def continue_backup(self, context, backup, backup_device): """This is the callback from the volume manager to continue.""" message_created = False volume_id = backup.volume_id volume = objects.Volume.get_by_id(context, volume_id) snapshot_id = backup.snapshot_id snapshot = objects.Snapshot.get_by_id( context, snapshot_id) if snapshot_id else None previous_status = volume.get('previous_status', None) backup_service = self.service(context) properties = volume_utils.brick_get_connector_properties( CONF.use_multipath_for_image_xfer, enforce_multipath=False) updates = {} try: try: attach_info = self._attach_device(context, backup_device.device_obj, properties, backup_device.is_snapshot) except Exception: with excutils.save_and_reraise_exception(): if not message_created: message_created = True self.message_api.create_from_request_context( context, detail=message_field.Detail.ATTACH_ERROR) try: device_path = attach_info['device']['path'] if (isinstance(device_path, str) and not os.path.isdir(device_path)): if backup_device.secure_enabled: with open(device_path, 'rb') as device_file: updates = backup_service.backup( backup, tpool.Proxy(device_file)) else: with utils.temporary_chown(device_path): with open(device_path, 'rb') as device_file: updates = backup_service.backup( backup, tpool.Proxy(device_file)) # device_path is already file-like so no need to open it else: updates = backup_service.backup(backup, tpool.Proxy(device_path)) except Exception: with excutils.save_and_reraise_exception(): if not message_created: message_created = True self.message_api.create_from_request_context( context, detail= message_field.Detail.BACKUP_CREATE_DRIVER_ERROR) finally: try: self._detach_device(context, attach_info, backup_device.device_obj, properties, backup_device.is_snapshot, force=True, ignore_errors=True) except Exception: with excutils.save_and_reraise_exception(): if not message_created: message_created = True self.message_api.create_from_request_context( context, detail= message_field.Detail.DETACH_ERROR) except Exception as err: with excutils.save_and_reraise_exception(): if snapshot: snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.save() else: self.db.volume_update( context, volume_id, {'status': previous_status, 'previous_status': 'error_backing-up'}) volume_utils.update_backup_error(backup, str(err)) finally: with backup.as_read_deleted(): backup.refresh() try: self._cleanup_temp_volumes_snapshots_when_backup_created( context, backup) except Exception: with excutils.save_and_reraise_exception(): if not message_created: self.message_api.create_from_request_context( context, detail= message_field.Detail.BACKUP_CREATE_CLEANUP_ERROR) self._finish_backup(context, backup, volume, updates) def _finish_backup(self, context, backup, volume, updates): volume_id = backup.volume_id snapshot_id = backup.snapshot_id previous_status = volume.get('previous_status', None) # Restore the original status. if snapshot_id: self.db.snapshot_update( context, snapshot_id, {'status': fields.SnapshotStatus.AVAILABLE}) else: self.db.volume_update(context, volume_id, {'status': previous_status, 'previous_status': 'backing-up'}) # continue_backup method above updated the status for the backup, so # it will reflect latest status, even if it is deleted completion_msg = 'finished' if backup.status in (fields.BackupStatus.DELETING, fields.BackupStatus.DELETED): completion_msg = 'aborted' else: backup.status = fields.BackupStatus.AVAILABLE backup.size = volume['size'] if updates: backup.update(updates) backup.save() # Handle the num_dependent_backups of parent backup when child # backup has created successfully. if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) parent_backup.num_dependent_backups += 1 parent_backup.save() LOG.info('Create backup %s. backup: %s.', completion_msg, backup.id) self._notify_about_backup_usage(context, backup, "create.end") def _is_our_backup(self, backup): # Accept strings and Service OVO if not isinstance(backup, str): backup = backup.service if not backup: return True # TODO(tommylikehu): We upgraded the 'driver_name' from module # to class name, so we use 'in' here to match two namings, # this can be replaced with equal sign during next # release (Rocky). if self.driver_name.startswith(backup): return True # We support renaming of drivers, so check old names as well for key, value in MAPPING.items(): if key.startswith(backup) and self.driver_name.startswith(value): return True return False @utils.limit_operations def restore_backup(self, context, backup, volume_id, volume_is_new): """Restore volume backups from configured backup service. :param context: RequestContext for the restore operation :param backup: Backup that we're restoring :param volume_id: The ID of the volume into which we're restoring :param volume_is_new: The volume does not have stale data, so sparse backups can be restored as such. """ context.message_resource_id = backup.id context.message_resource_type = message_field.Resource.VOLUME_BACKUP context.message_action = message_field.Action.BACKUP_RESTORE LOG.info('Restore backup started, backup: %(backup_id)s ' 'volume: %(volume_id)s.', {'backup_id': backup.id, 'volume_id': volume_id}) volume = objects.Volume.get_by_id(context, volume_id) self._notify_about_backup_usage(context, backup, "restore.start") expected_status = [fields.VolumeStatus.RESTORING_BACKUP, fields.VolumeStatus.CREATING] volume_previous_status = volume['status'] if volume_previous_status not in expected_status: err = (_('Restore backup aborted, expected volume status ' '%(expected_status)s but got %(actual_status)s.') % {'expected_status': ','.join(expected_status), 'actual_status': volume_previous_status}) backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update( context, volume_id, {'status': (fields.VolumeStatus.ERROR if volume_previous_status == fields.VolumeStatus.CREATING else fields.VolumeStatus.ERROR_RESTORING)}) self.message_api.create( context, action=message_field.Action.BACKUP_RESTORE, resource_type=message_field.Resource.VOLUME_BACKUP, resource_uuid=volume.id, detail=message_field.Detail.VOLUME_INVALID_STATE) raise exception.InvalidVolume(reason=err) expected_status = fields.BackupStatus.RESTORING actual_status = backup['status'] if actual_status != expected_status: err = (_('Restore backup aborted: expected backup status ' '%(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) volume_utils.update_backup_error(backup, err) self.db.volume_update(context, volume_id, {'status': fields.VolumeStatus.ERROR}) self.message_api.create_from_request_context( context, detail=message_field.Detail.BACKUP_INVALID_STATE) raise exception.InvalidBackup(reason=err) if volume['size'] > backup['size']: LOG.info('Volume: %(vol_id)s, size: %(vol_size)d is ' 'larger than backup: %(backup_id)s, ' 'size: %(backup_size)d, continuing with restore.', {'vol_id': volume['id'], 'vol_size': volume['size'], 'backup_id': backup['id'], 'backup_size': backup['size']}) if not self._is_our_backup(backup): err = _('Restore backup aborted, the backup service currently' ' configured [%(configured_service)s] is not the' ' backup service that was used to create this' ' backup [%(backup_service)s].') % { 'configured_service': self.driver_name, 'backup_service': backup.service, } backup.status = fields.BackupStatus.AVAILABLE backup.save() self.db.volume_update(context, volume_id, {'status': fields.VolumeStatus.ERROR}) raise exception.InvalidBackup(reason=err) canceled = False try: self._run_restore(context, backup, volume, volume_is_new) except exception.BackupRestoreCancel: canceled = True except Exception: with excutils.save_and_reraise_exception(): self.db.volume_update( context, volume_id, {'status': (fields.VolumeStatus.ERROR if actual_status == fields.VolumeStatus.CREATING else fields.VolumeStatus.ERROR_RESTORING)}) backup.status = fields.BackupStatus.AVAILABLE backup.save() if canceled: volume.status = fields.VolumeStatus.ERROR else: volume.status = fields.VolumeStatus.AVAILABLE # NOTE(tommylikehu): If previous status is 'creating', this is # just a new created volume and we need update the 'launched_at' # attribute as well. if volume_previous_status == fields.VolumeStatus.CREATING: volume['launched_at'] = timeutils.utcnow() old_src_backup_id = self.db.volume_metadata_get( context, volume_id).get("src_backup_id", None) if backup.volume_id != volume.id or ( old_src_backup_id and old_src_backup_id != backup.id): self.db.volume_metadata_update( context, volume.id, {'src_backup_id': backup.id}, False) volume.save() backup.status = fields.BackupStatus.AVAILABLE backup.save() LOG.info('%(result)s restoring backup %(backup_id)s to volume ' '%(volume_id)s.', {'result': 'Canceled' if canceled else 'Finished', 'backup_id': backup.id, 'volume_id': volume_id}) self._notify_about_backup_usage(context, backup, "restore.end") def _run_restore(self, context, backup, volume, volume_is_new): message_created = False orig_key_id = volume.encryption_key_id backup_service = self.service(context) properties = volume_utils.brick_get_connector_properties( CONF.use_multipath_for_image_xfer, enforce_multipath=False) secure_enabled = ( self.volume_rpcapi.secure_file_operations_enabled(context, volume)) try: attach_info = self._attach_device(context, volume, properties) except Exception: self.message_api.create_from_request_context( context, detail=message_field.Detail.ATTACH_ERROR) raise # NOTE(geguileo): Not all I/O disk operations properly do greenthread # context switching and may end up blocking the greenthread, so we go # with native threads proxy-wrapping the device file object. try: device_path = attach_info['device']['path'] open_mode = 'rb+' if os.name == 'nt' else 'wb' if (isinstance(device_path, str) and not os.path.isdir(device_path)): if secure_enabled: with open(device_path, open_mode) as device_file: backup_service.restore(backup, volume.id, tpool.Proxy(device_file), volume_is_new) else: with utils.temporary_chown(device_path): with open(device_path, open_mode) as device_file: backup_service.restore(backup, volume.id, tpool.Proxy(device_file), volume_is_new) # device_path is already file-like so no need to open it else: backup_service.restore(backup, volume.id, tpool.Proxy(device_path), volume_is_new) except exception.BackupRestoreCancel: raise except Exception: LOG.exception('Restoring backup %(backup_id)s to volume ' '%(volume_id)s failed.', {'backup_id': backup.id, 'volume_id': volume.id}) # We set message_create to True before creating the # message because if the message create call fails # and is catched by the base/outer exception handler # then we will end up storing a wrong message message_created = True self.message_api.create_from_request_context( context, detail=message_field.Detail.BACKUP_RESTORE_ERROR) raise finally: try: self._detach_device(context, attach_info, volume, properties, force=True) except Exception: if not message_created: self.message_api.create_from_request_context( context, detail=message_field.Detail.DETACH_ERROR) raise # Regardless of whether the restore was successful, do some # housekeeping to ensure the restored volume's encryption key ID is # unique, and any previous key ID is deleted. Start by fetching fresh # info on the restored volume. restored_volume = objects.Volume.get_by_id(context, volume.id) restored_key_id = restored_volume.encryption_key_id if restored_key_id != orig_key_id: LOG.info('Updating encryption key ID for volume %(volume_id)s ' 'from backup %(backup_id)s.', {'volume_id': volume.id, 'backup_id': backup.id}) key_mgr = key_manager.API(CONF) if orig_key_id: LOG.debug('Deleting original volume encryption key ID.') volume_utils.delete_encryption_key(context, key_mgr, orig_key_id) if backup.encryption_key_id is None: # This backup predates the current code that stores the cloned # key ID in the backup database. Fortunately, the key ID # restored from the backup data _is_ a clone of the original # volume's key ID, so grab it. LOG.debug('Gleaning backup encryption key ID from metadata.') backup.encryption_key_id = restored_key_id backup.save() # Clone the key ID again to ensure every restored volume has # a unique key ID. The volume's key ID should not be the same # as the backup.encryption_key_id (the copy made when the backup # was first created). new_key_id = volume_utils.clone_encryption_key( context, key_mgr, backup.encryption_key_id) restored_volume.encryption_key_id = new_key_id restored_volume.save() else: LOG.debug('Encryption key ID for volume %(volume_id)s already ' 'matches encryption key ID in backup %(backup_id)s.', {'volume_id': volume.id, 'backup_id': backup.id}) def delete_backup(self, context, backup): """Delete volume backup from configured backup service.""" LOG.info('Delete backup started, backup: %s.', backup.id) self._notify_about_backup_usage(context, backup, "delete.start") context.message_resource_id = backup.id context.message_resource_type = message_field.Resource.VOLUME_BACKUP context.message_action = message_field.Action.BACKUP_DELETE expected_status = fields.BackupStatus.DELETING actual_status = backup.status if actual_status != expected_status: err = _('Delete_backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') \ % {'expected_status': expected_status, 'actual_status': actual_status} volume_utils.update_backup_error(backup, err) self.message_api.create_from_request_context( context, detail=message_field.Detail.BACKUP_INVALID_STATE) raise exception.InvalidBackup(reason=err) if backup.service and not self.is_working(): err = _('Delete backup is aborted due to backup service is down.') status = fields.BackupStatus.ERROR_DELETING volume_utils.update_backup_error(backup, err, status) self.message_api.create_from_request_context( context, detail=message_field.Detail.BACKUP_SERVICE_DOWN) raise exception.InvalidBackup(reason=err) if not self._is_our_backup(backup): err = _('Delete backup aborted, the backup service currently' ' configured [%(configured_service)s] is not the' ' backup service that was used to create this' ' backup [%(backup_service)s].')\ % {'configured_service': self.driver_name, 'backup_service': backup.service} volume_utils.update_backup_error(backup, err) raise exception.InvalidBackup(reason=err) if backup.service: try: backup_service = self.service(context) backup_service.delete_backup(backup) except Exception as err: with excutils.save_and_reraise_exception(): volume_utils.update_backup_error(backup, str(err)) self.message_api.create_from_request_context( context, detail=message_field.Detail.BACKUP_DELETE_DRIVER_ERROR) # Get reservations try: reserve_opts = { 'backups': -1, 'backup_gigabytes': -backup.size, } reservations = QUOTAS.reserve(context, project_id=backup.project_id, **reserve_opts) except Exception: reservations = None LOG.exception("Failed to update usages deleting backup") if backup.encryption_key_id is not None: volume_utils.delete_encryption_key(context, key_manager.API(CONF), backup.encryption_key_id) backup.encryption_key_id = None backup.save() backup.destroy() # If this backup is incremental backup, handle the # num_dependent_backups of parent backup if backup.parent_id: parent_backup = objects.Backup.get_by_id(context, backup.parent_id) if parent_backup.has_dependent_backups: parent_backup.num_dependent_backups -= 1 parent_backup.save() # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=backup.project_id) LOG.info('Delete backup finished, backup %s deleted.', backup.id) self._notify_about_backup_usage(context, backup, "delete.end") def _notify_about_backup_usage(self, context, backup, event_suffix, extra_usage_info=None): volume_utils.notify_about_backup_usage( context, backup, event_suffix, extra_usage_info=extra_usage_info, host=self.host) def export_record(self, context, backup): """Export all volume backup metadata details to allow clean import. Export backup metadata so it could be re-imported into the database without any prerequisite in the backup database. :param context: running context :param backup: backup object to export :returns: backup_record - a description of how to import the backup :returns: contains 'backup_url' - how to import the backup, and :returns: 'backup_service' describing the needed driver. :raises InvalidBackup: """ LOG.info('Export record started, backup: %s.', backup.id) expected_status = fields.BackupStatus.AVAILABLE actual_status = backup.status if actual_status != expected_status: err = (_('Export backup aborted, expected backup status ' '%(expected_status)s but got %(actual_status)s.') % {'expected_status': expected_status, 'actual_status': actual_status}) raise exception.InvalidBackup(reason=err) backup_record = {'backup_service': backup.service} if not self._is_our_backup(backup): err = (_('Export record aborted, the backup service currently ' 'configured [%(configured_service)s] is not the ' 'backup service that was used to create this ' 'backup [%(backup_service)s].') % {'configured_service': self.driver_name, 'backup_service': backup.service}) raise exception.InvalidBackup(reason=err) # Call driver to create backup description string try: backup_service = self.service(context) driver_info = backup_service.export_record(backup) backup_url = backup.encode_record(driver_info=driver_info) backup_record['backup_url'] = backup_url except Exception as err: msg = str(err) raise exception.InvalidBackup(reason=msg) LOG.info('Export record finished, backup %s exported.', backup.id) return backup_record def import_record(self, context, backup, backup_service, backup_url, backup_hosts): """Import all volume backup metadata details to the backup db. :param context: running context :param backup: The new backup object for the import :param backup_service: The needed backup driver for import :param backup_url: An identifier string to locate the backup :param backup_hosts: Potential hosts to execute the import :raises InvalidBackup: :raises ServiceNotFound: """ LOG.info('Import record started, backup_url: %s.', backup_url) # Can we import this backup? if not self._is_our_backup(backup_service): # No, are there additional potential backup hosts in the list? if len(backup_hosts) > 0: # try the next host on the list, maybe he can import first_host = backup_hosts.pop() self.backup_rpcapi.import_record(context, first_host, backup, backup_service, backup_url, backup_hosts) else: # empty list - we are the last host on the list, fail err = _('Import record failed, cannot find backup ' 'service to perform the import. Request service ' '%(service)s.') % {'service': backup_service} volume_utils.update_backup_error(backup, err) raise exception.ServiceNotFound(service_id=backup_service) else: # Yes... try: # Deserialize backup record information backup_options = backup.decode_record(backup_url) # Extract driver specific info and pass it to the driver driver_options = backup_options.pop('driver_info', {}) backup_service = self.service(context) backup_service.import_record(backup, driver_options) except Exception as err: msg = str(err) volume_utils.update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) required_import_options = { 'display_name', 'display_description', 'container', 'size', 'service_metadata', 'object_count', 'id' } # Check for missing fields in imported data missing_opts = required_import_options - set(backup_options) if missing_opts: msg = (_('Driver successfully decoded imported backup data, ' 'but there are missing fields (%s).') % ', '.join(missing_opts)) volume_utils.update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) # Confirm the ID from the record in the DB is the right one backup_id = backup_options['id'] if backup_id != backup.id: msg = (_('Trying to import backup metadata from id %(meta_id)s' ' into backup %(id)s.') % {'meta_id': backup_id, 'id': backup.id}) volume_utils.update_backup_error(backup, msg) raise exception.InvalidBackup(reason=msg) # Overwrite some fields backup_options['service'] = self.driver_name backup_options['availability_zone'] = self.az backup_options['host'] = self.host # Remove some values which are not actual fields and some that # were set by the API node for key in ('name', 'user_id', 'project_id', 'deleted_at', 'deleted', 'fail_reason', 'status'): backup_options.pop(key, None) # Update the database backup.update(backup_options) backup.save() # Update the backup's status backup.update({"status": fields.BackupStatus.AVAILABLE}) backup.save() LOG.info('Import record id %s metadata from driver ' 'finished.', backup.id) def reset_status(self, context, backup, status): """Reset volume backup status. :param context: running context :param backup: The backup object for reset status operation :param status: The status to be set :raises InvalidBackup: :raises AttributeError: """ LOG.info('Reset backup status started, backup_id: ' '%(backup_id)s, status: %(status)s.', {'backup_id': backup.id, 'status': status}) LOG.info('Backup service: %s.', backup.service) if not self._is_our_backup(backup): err = _('Reset backup status aborted, the backup service' ' currently configured [%(configured_service)s] ' 'is not the backup service that was used to create' ' this backup [%(backup_service)s].') % \ {'configured_service': self.driver_name, 'backup_service': backup.service} raise exception.InvalidBackup(reason=err) if backup.service is not None: backup.status = status backup.save() # Needs to clean temporary volumes and snapshots. try: self._cleanup_temp_volumes_snapshots_for_one_backup( context, backup) except Exception: LOG.exception("Problem cleaning temp volumes and " "snapshots for backup %(bkup)s.", {'bkup': backup.id}) volume_utils.notify_about_backup_usage(context, backup, 'reset_status.end') def check_support_to_force_delete(self, context): """Check if the backup driver supports force delete operation. :param context: running context """ backup_service = self.service(context) return backup_service.support_force_delete def _attach_device(self, ctxt, backup_device, properties, is_snapshot=False): """Attach backup device.""" if not is_snapshot: return self._attach_volume(ctxt, backup_device, properties) else: return self._attach_snapshot(ctxt, backup_device, properties) def _attach_volume(self, context, volume, properties): """Attach a volume.""" try: conn = self.volume_rpcapi.initialize_connection(context, volume, properties) return self._connect_device(conn) except Exception: with excutils.save_and_reraise_exception(): try: self.volume_rpcapi.terminate_connection(context, volume, properties, force=True) except Exception: LOG.warning("Failed to terminate the connection " "of volume %(volume_id)s, but it is " "acceptable.", {'volume_id': volume.id}) def _attach_snapshot(self, ctxt, snapshot, properties): """Attach a snapshot.""" try: conn = self.volume_rpcapi.initialize_connection_snapshot( ctxt, snapshot, properties) return self._connect_device(conn) except Exception: with excutils.save_and_reraise_exception(): try: self.volume_rpcapi.terminate_connection_snapshot( ctxt, snapshot, properties, force=True) except Exception: LOG.warning("Failed to terminate the connection " "of snapshot %(snapshot_id)s, but it is " "acceptable.", {'snapshot_id': snapshot.id}) def _connect_device(self, conn): """Establish connection to device.""" use_multipath = CONF.use_multipath_for_image_xfer device_scan_attempts = CONF.num_volume_device_scan_tries protocol = conn['driver_volume_type'] connector = volume_utils.brick_get_connector( protocol, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, conn=conn, expect_raw_disk=True) vol_handle = connector.connect_volume(conn['data']) return {'conn': conn, 'device': vol_handle, 'connector': connector} def _detach_device(self, ctxt, attach_info, device, properties, is_snapshot=False, force=False, ignore_errors=False): """Disconnect the volume or snapshot from the host. """ connector = attach_info['connector'] connector.disconnect_volume(attach_info['conn']['data'], attach_info['device'], force=force, ignore_errors=ignore_errors) rpcapi = self.volume_rpcapi if not is_snapshot: rpcapi.terminate_connection(ctxt, device, properties, force=force) rpcapi.remove_export(ctxt, device, sync=True) else: rpcapi.terminate_connection_snapshot(ctxt, device, properties, force=force) rpcapi.remove_export_snapshot(ctxt, device, sync=True) def is_working(self): return self.is_initialized @periodic_task.periodic_task( spacing=CONF.backup_driver_stats_polling_interval) def publish_service_capabilities(self, context): """Collect driver status and then publish.""" self._report_driver_status(context) self._publish_service_capabilities(context) def _report_driver_status(self, context): backup_stats = { 'backend_state': self.is_working(), 'driver_name': self.driver_name, 'availability_zone': self.az } self.update_service_capabilities(backup_stats) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/backup/rpcapi.py0000664000175000017500000001233400000000000017433 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the volume backup RPC API. """ from oslo_log import log as logging from cinder.common import constants from cinder import rpc LOG = logging.getLogger(__name__) class BackupAPI(rpc.RPCAPI): """Client side of the volume rpc API. API version history: .. code-block:: none 1.0 - Initial version. 1.1 - Changed methods to accept backup objects instead of IDs. 1.2 - A version that got in by mistake (without breaking anything). 1.3 - Dummy version bump to mark start of having cinder-backup service decoupled from cinder-volume. ... Mitaka supports messaging 1.3. Any changes to existing methods in 1.x after this point should be done so that they can handle version cap set to 1.3. 2.0 - Remove 1.x compatibility 2.1 - Adds set_log_levels and get_log_levels 2.2 - Adds publish_service_capabilities 2.3 - Adds continue_backup call 2.4 - Add the volume_is_new flag to the restore_backup method """ RPC_API_VERSION = '2.4' RPC_DEFAULT_VERSION = '2.0' TOPIC = constants.BACKUP_TOPIC BINARY = 'cinder-backup' def create_backup(self, ctxt, backup): LOG.debug("create_backup in rpcapi backup_id %s", backup.id) cctxt = self._get_cctxt(server=backup.host) cctxt.cast(ctxt, 'create_backup', backup=backup) def continue_backup(self, ctxt, backup, backup_device): LOG.debug("continue_backup in rpcapi backup_id %s", backup.id) cctxt = self._get_cctxt(server=backup.host) cctxt.cast(ctxt, 'continue_backup', backup=backup, backup_device=backup_device) def restore_backup(self, ctxt, backup_host, backup, volume_id, volume_is_new): LOG.debug("restore_backup in rpcapi backup_id %s", backup.id) cctxt = self._get_cctxt(server=backup_host) if self.client.can_send_version('2.4'): cctxt.cast(ctxt, 'restore_backup', backup=backup, volume_id=volume_id, volume_is_new=volume_is_new) else: cctxt.cast(ctxt, 'restore_backup', backup=backup, volume_id=volume_id) def delete_backup(self, ctxt, backup): LOG.debug("delete_backup rpcapi backup_id %s", backup.id) cctxt = self._get_cctxt(server=backup.host) cctxt.cast(ctxt, 'delete_backup', backup=backup) def export_record(self, ctxt, backup) -> dict: LOG.debug("export_record in rpcapi backup_id %(id)s " "on host %(host)s.", {'id': backup.id, 'host': backup.host}) cctxt = self._get_cctxt(server=backup.host) return cctxt.call(ctxt, 'export_record', backup=backup) def import_record(self, ctxt, host, backup, backup_service, backup_url, backup_hosts) -> None: LOG.debug("import_record rpcapi backup id %(id)s " "on host %(host)s for backup_url %(url)s.", {'id': backup.id, 'host': host, 'url': backup_url}) cctxt = self._get_cctxt(server=host) cctxt.cast(ctxt, 'import_record', backup=backup, backup_service=backup_service, backup_url=backup_url, backup_hosts=backup_hosts) def reset_status(self, ctxt, backup, status): LOG.debug("reset_status in rpcapi backup_id %(id)s " "on host %(host)s.", {'id': backup.id, 'host': backup.host}) cctxt = self._get_cctxt(server=backup.host) cctxt.cast(ctxt, 'reset_status', backup=backup, status=status) def check_support_to_force_delete(self, ctxt, host) -> bool: LOG.debug("Check if backup driver supports force delete " "on host %(host)s.", {'host': host}) cctxt = self._get_cctxt(server=host) return cctxt.call(ctxt, 'check_support_to_force_delete') @rpc.assert_min_rpc_version('2.1') def set_log_levels(self, context, service, log_request): cctxt = self._get_cctxt(server=service.host, version='2.1') cctxt.cast(context, 'set_log_levels', log_request=log_request) @rpc.assert_min_rpc_version('2.1') def get_log_levels(self, context, service, log_request): cctxt = self._get_cctxt(server=service.host, version='2.1') return cctxt.call(context, 'get_log_levels', log_request=log_request) @rpc.assert_min_rpc_version('2.2') def publish_service_capabilities(self, ctxt): cctxt = self._get_cctxt(version='2.2', fanout=True) cctxt.cast(ctxt, 'publish_service_capabilities') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.063118 cinder-27.0.0/cinder/brick/0000775000175000017500000000000000000000000015425 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/brick/README.txt0000664000175000017500000000026100000000000017122 0ustar00zuulzuul00000000000000Brick has been migrated to a new standalone pypi library called os-brick. We are leaving the local_dev directory here for the time being until we can migrate it to a new home. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/brick/__init__.py0000664000175000017500000000000000000000000017524 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0671182 cinder-27.0.0/cinder/brick/local_dev/0000775000175000017500000000000000000000000017355 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/brick/local_dev/__init__.py0000664000175000017500000000000000000000000021454 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/brick/local_dev/lvm.py0000664000175000017500000010330400000000000020526 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ LVM class for performing LVM operations. """ import math import os import re from os_brick import executor from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import excutils from cinder import exception import cinder.privsep.lvm from cinder import utils LOG = logging.getLogger(__name__) MINIMUM_LVM_VERSION = (2, 2, 107) class LVM(executor.Executor): """LVM object to enable various LVM related operations.""" LVM_CMD_PREFIX = ['env', 'LC_ALL=C'] _supports_pvs_ignoreskippedcluster = None def __init__(self, vg_name, root_helper, create_vg=False, physical_volumes=None, lvm_type='default', executor=putils.execute, lvm_conf=None, suppress_fd_warn=False): """Initialize the LVM object. The LVM object is based on an LVM VolumeGroup, one instantiation for each VolumeGroup you have/use. :param vg_name: Name of existing VG or VG to create :param root_helper: Execution root_helper method to use :param create_vg: Indicates the VG doesn't exist and we want to create it :param physical_volumes: List of PVs to build VG on :param lvm_type: VG and Volume type (default, or thin) :param executor: Execute method to use, None uses common/processutils :param suppress_fd_warn: Add suppress FD Warn to LVM env """ super(LVM, self).__init__(execute=executor, root_helper=root_helper) self.vg_name = vg_name self.pv_list = [] self.vg_size = 0.0 self.vg_free_space = 0.0 self.vg_lv_count = 0 self.vg_uuid = None self.vg_thin_pool = None self.vg_thin_pool_size = 0.0 self.vg_thin_pool_free_space = 0.0 self._supports_snapshot_lv_activation = None self._supports_lvchange_ignoreskipactivation = None self.vg_provisioned_capacity = 0.0 if lvm_type not in ['default', 'thin']: raise exception.Invalid('lvm_type must be "default" or "thin"') # Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX # before the first LVM command is executed, and use the directory # where the specified lvm_conf file is located as the value. # NOTE(jdg): We use the temp var here because LVM_CMD_PREFIX is a # class global and if you use append here, you'll literally just keep # appending values to the global. _lvm_cmd_prefix = ['env', 'LC_ALL=C'] if lvm_conf and os.path.isfile(lvm_conf): lvm_sys_dir = os.path.dirname(lvm_conf) _lvm_cmd_prefix.append('LVM_SYSTEM_DIR=' + lvm_sys_dir) if suppress_fd_warn: _lvm_cmd_prefix.append('LVM_SUPPRESS_FD_WARNINGS=1') LVM.LVM_CMD_PREFIX = _lvm_cmd_prefix lvm_version = LVM.get_lvm_version(root_helper) if LVM.get_lvm_version(root_helper) < MINIMUM_LVM_VERSION: LOG.warning("LVM version %(current)s is lower than the minimum " "supported version: %(supported)s", {'current': lvm_version, 'supported': MINIMUM_LVM_VERSION}) if create_vg and physical_volumes is not None: try: self._create_vg(physical_volumes) except putils.ProcessExecutionError as err: LOG.exception('Error creating Volume Group') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name) if self._vg_exists() is False: LOG.error('Unable to locate Volume Group %s', vg_name) raise exception.VolumeGroupNotFound(vg_name=vg_name) # NOTE: we assume that the VG has been activated outside of Cinder if lvm_type == 'thin': pool_name = "%s-pool" % self.vg_name if self.get_volume(pool_name) is None: try: self.create_thin_pool(pool_name) except putils.ProcessExecutionError: # Maybe we just lost the race against another copy of # this driver being in init in parallel - e.g. # cinder-volume and cinder-backup starting in parallel if self.get_volume(pool_name) is None: raise self.vg_thin_pool = pool_name self.activate_lv(self.vg_thin_pool) self.pv_list = self.get_all_physical_volumes(root_helper, vg_name) def _vg_exists(self): """Simple check to see if VG exists. :returns: True if vg specified in object exists, else False """ exists = False cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', '-o', 'name', self.vg_name] (out, _err) = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out is not None: volume_groups = out.split() if self.vg_name in volume_groups: exists = True return exists def _create_vg(self, pv_list): cinder.privsep.lvm.create_vg(self.vg_name, pv_list) @utils.retry(retry=utils.retry_if_exit_code, retry_param=139, interval=0.5, backoff_rate=0.5) def _run_lvm_command(self, cmd_arg_list: list, root_helper: str = None, run_as_root: bool = True) -> tuple: """Run LVM commands with a retry on code 139 to work around LVM bugs. Refer to LP bug 1901783, LP bug 1932188. """ if not root_helper: root_helper = self._root_helper (out, err) = self._execute(*cmd_arg_list, root_helper=root_helper, run_as_root=run_as_root) return (out, err) def _get_thin_pool_free_space(self, vg_name, thin_pool_name): """Returns available thin pool free space. :param vg_name: the vg where the pool is placed :param thin_pool_name: the thin pool to gather info for :returns: Free space in GB (float), calculated using data_percent """ cmd = LVM.LVM_CMD_PREFIX +\ ['lvs', '--noheadings', '--unit=g', '-o', 'size,data_percent', '--separator', ':', '--nosuffix'] # NOTE(gfidente): data_percent only applies to some types of LV so we # make sure to append the actual thin pool name cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name)) free_space = 0.0 try: (out, err) = self._run_lvm_command(cmd) if out is not None: out = out.strip() data = out.split(':') pool_size = float(data[0]) data_percent = float(data[1]) consumed_space = pool_size / 100 * data_percent free_space = pool_size - consumed_space free_space = round(free_space, 2) # Need noqa due to a false error about the 'err' variable being unused # even though it is used in the logging. Possibly related to # https://github.com/PyCQA/pyflakes/issues/378. except putils.ProcessExecutionError as err: # noqa LOG.exception('Error querying thin pool about data_percent') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) return free_space @staticmethod def get_lvm_version(root_helper): """Static method to get LVM version from system. :param root_helper: root_helper to use for execute :returns: version 3-tuple """ cmd = LVM.LVM_CMD_PREFIX + ['lvm', 'version'] (out, _err) = putils.execute(*cmd) lines = out.split('\n') for line in lines: if 'LVM version' in line: version_list = line.split() # NOTE(gfidente): version is formatted as follows: # major.minor.patchlevel(library API version)[-customisation] version = version_list[2] version_filter = r"(\d+)\.(\d+)\.(\d+).*" r = re.search(version_filter, version) version_tuple = tuple(map(int, r.group(1, 2, 3))) return version_tuple @staticmethod def supports_thin_provisioning(root_helper): """Static method to check for thin LVM support on a system. :param root_helper: root_helper to use for execute :returns: True if supported, False otherwise """ return LVM.get_lvm_version(root_helper) >= (2, 2, 95) @property def supports_snapshot_lv_activation(self): """Property indicating whether snap activation changes are supported. Check for LVM version >= 2.02.91. (LVM2 git: e8a40f6 Allow to activate snapshot) :returns: True/False indicating support """ if self._supports_snapshot_lv_activation is not None: return self._supports_snapshot_lv_activation self._supports_snapshot_lv_activation = ( self.get_lvm_version(self._root_helper) >= (2, 2, 91)) return self._supports_snapshot_lv_activation @property def supports_lvchange_ignoreskipactivation(self): """Property indicating whether lvchange can ignore skip activation. Check for LVM version >= 2.02.99. (LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange) """ if self._supports_lvchange_ignoreskipactivation is not None: return self._supports_lvchange_ignoreskipactivation self._supports_lvchange_ignoreskipactivation = ( self.get_lvm_version(self._root_helper) >= (2, 2, 99)) return self._supports_lvchange_ignoreskipactivation @staticmethod def supports_pvs_ignoreskippedcluster(root_helper): """Property indicating whether pvs supports --ignoreskippedcluster Check for LVM version >= 2.02.103. (LVM2 git: baf95bbff cmdline: Add --ignoreskippedcluster. """ if LVM._supports_pvs_ignoreskippedcluster is not None: return LVM._supports_pvs_ignoreskippedcluster LVM._supports_pvs_ignoreskippedcluster = ( LVM.get_lvm_version(root_helper) >= (2, 2, 103)) return LVM._supports_pvs_ignoreskippedcluster @staticmethod @utils.retry(retry=utils.retry_if_exit_code, retry_param=139, interval=0.5, backoff_rate=0.5) # Bug#1901783 def get_lv_info(root_helper, vg_name=None, lv_name=None): """Retrieve info about LVs (all, in a VG, or a single LV). :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :param lv_name: optional, gathers info for only the specified LV :returns: List of Dictionaries with LV info """ cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size', '--nosuffix', '--readonly'] if lv_name is not None and vg_name is not None: cmd.append("%s/%s" % (vg_name, lv_name)) elif vg_name is not None: cmd.append(vg_name) try: (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) except putils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(reraise=True) as ctx: if "not found" in err.stderr or "Failed to find" in err.stderr: ctx.reraise = False LOG.info("Logical Volume not found when querying " "LVM info. (vg_name=%(vg)s, lv_name=%(lv)s", {'vg': vg_name, 'lv': lv_name}) out = None lv_list = [] if out is not None: volumes = out.split() iterator = zip(*[iter(volumes)] * 3) # pylint: disable=E1101 for vg, name, size in iterator: lv_list.append({"vg": vg, "name": name, "size": size}) return lv_list def get_volumes(self, lv_name=None): """Get all LV's associated with this instantiation (VG). :returns: List of Dictionaries with LV info """ return self.get_lv_info(self._root_helper, self.vg_name, lv_name) def get_volume(self, name): """Get reference object of volume specified by name. :returns: dict representation of Logical Volume if exists """ ref_list = self.get_volumes(name) for r in ref_list: if r['name'] == name: return r return None @staticmethod def get_all_physical_volumes(root_helper, vg_name=None): """Static method to get all PVs on a system. :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :returns: List of Dictionaries with PV info """ field_sep = '|' cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size,free', '--separator', field_sep, '--nosuffix'] if LVM.supports_pvs_ignoreskippedcluster(root_helper): cmd.append('--ignoreskippedcluster') (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) pvs = out.split() if vg_name is not None: pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]] pv_list = [] for pv in pvs: fields = pv.split(field_sep) pv_list.append({'vg': fields[0], 'name': fields[1], 'size': float(fields[2]), 'available': float(fields[3])}) return pv_list @staticmethod def get_all_volume_groups(root_helper, vg_name=None): """Static method to get all VGs on a system. :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :returns: List of Dictionaries with VG info """ cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', '--unit=g', '-o', 'name,size,free,lv_count,uuid', '--separator', ':', '--nosuffix'] if vg_name is not None: cmd.append(vg_name) (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) vg_list = [] if out is not None: vgs = out.split() for vg in vgs: fields = vg.split(':') vg_list.append({'name': fields[0], 'size': float(fields[1]), 'available': float(fields[2]), 'lv_count': int(fields[3]), 'uuid': fields[4]}) return vg_list def update_volume_group_info(self): """Update VG info for this instantiation. Used to update member fields of object and provide a dict of info for caller. :returns: Dictionaries of VG info """ vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name) if len(vg_list) != 1: LOG.error('Unable to find VG: %s', self.vg_name) raise exception.VolumeGroupNotFound(vg_name=self.vg_name) self.vg_size = float(vg_list[0]['size']) self.vg_free_space = float(vg_list[0]['available']) self.vg_lv_count = int(vg_list[0]['lv_count']) self.vg_uuid = vg_list[0]['uuid'] total_vols_size = 0.0 if self.vg_thin_pool is not None: # NOTE(xyang): If providing only self.vg_name, # get_lv_info will output info on the thin pool and all # individual volumes. # get_lv_info(self._root_helper, 'stack-vg') # sudo lvs --noheadings --unit=g -o vg_name,name,size # --nosuffix stack-vg # stack-vg stack-pool 9.51 # stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00 # stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00 # stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00 # # If providing both self.vg_name and self.vg_thin_pool, # get_lv_info will output only info on the thin pool, but not # individual volumes. # get_lv_info(self._root_helper, 'stack-vg', 'stack-pool') # sudo lvs --noheadings --unit=g -o vg_name,name,size # --nosuffix stack-vg/stack-pool # stack-vg stack-pool 9.51 # # We need info on both the thin pool and the volumes, # therefore we should provide only self.vg_name, but not # self.vg_thin_pool here. for lv in self.get_lv_info(self._root_helper, self.vg_name): lvsize = lv['size'] # get_lv_info runs "lvs" command with "--nosuffix". # This removes "g" from "1.00g" and only outputs "1.00". # Running "lvs" command without "--nosuffix" will output # "1.00g" if "g" is the unit. # Remove the unit if it is in lv['size']. if not lv['size'][-1].isdigit(): lvsize = lvsize[:-1] if lv['name'] == self.vg_thin_pool: self.vg_thin_pool_size = float(lvsize) tpfs = self._get_thin_pool_free_space(self.vg_name, self.vg_thin_pool) self.vg_thin_pool_free_space = tpfs else: total_vols_size = total_vols_size + float(lvsize) total_vols_size = round(total_vols_size, 2) self.vg_provisioned_capacity = total_vols_size def _calculate_thin_pool_size(self): """Calculates the correct size for a thin pool. Ideally we would use 100% of the containing volume group and be done. But the 100%VG notation to lvcreate is not implemented and thus cannot be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347 Further, some amount of free space must remain in the volume group for metadata for the contained logical volumes. The exact amount depends on how much volume sharing you expect. :returns: An lvcreate-ready string for the number of calculated bytes. """ # make sure volume group information is current self.update_volume_group_info() # leave 5% free for metadata return "%sg" % (self.vg_free_space * 0.95) def create_thin_pool(self, name=None, size_str=None): """Creates a thin provisioning pool for this VG. The syntax here is slightly different than the default lvcreate -T, so we'll just write a custom cmd here and do it. :param name: Name to use for pool, default is "-pool" :param size_str: Size to allocate for pool, default is entire VG :returns: The size string passed to the lvcreate command """ if not self.supports_thin_provisioning(self._root_helper): LOG.error('Requested to setup thin provisioning, ' 'however current LVM version does not ' 'support it.') return None if name is None: name = '%s-pool' % self.vg_name vg_pool_name = '%s/%s' % (self.vg_name, name) if not size_str: size_str = self._calculate_thin_pool_size() cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-L', size_str, vg_pool_name] LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of " "total %(free)sg", {'pool': vg_pool_name, 'size': size_str, 'free': self.vg_free_space}) self._run_lvm_command(cmd) self.vg_thin_pool = name return size_str def create_volume(self, name, size_str, lv_type='default', mirror_count=0): """Creates a logical volume on the object's VG. :param name: Name to use when creating Logical Volume :param size_str: Size to use when creating Logical Volume :param lv_type: Type of Volume (default or thin) :param mirror_count: Use LVM mirroring with specified count """ if lv_type == 'thin': pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool) cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path] else: cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-n', name, self.vg_name, '-L', size_str] if mirror_count > 0: cmd.extend(['--type=mirror', '-m', mirror_count, '--nosync', '--mirrorlog', 'mirrored']) terras = int(size_str[:-1]) / 1024.0 if terras >= 1.5: rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) # NOTE(vish): Next power of two for region size. See: # http://red.ht/U2BPOD cmd.extend(['-R', str(rsize)]) try: self._run_lvm_command(cmd) except putils.ProcessExecutionError as err: LOG.exception('Error creating Volume') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) LOG.error('Current state: %s', self.get_all_volume_groups(self._root_helper)) raise @utils.retry(putils.ProcessExecutionError) def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): """Creates a snapshot of a logical volume. :param name: Name to assign to new snapshot :param source_lv_name: Name of Logical Volume to snapshot :param lv_type: Type of LV (default or thin) """ source_lvref = self.get_volume(source_lv_name) if source_lvref is None: LOG.error("Trying to create snapshot by non-existent LV: %s", source_lv_name) raise exception.VolumeDeviceNotFound(device=source_lv_name) cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot', '%s/%s' % (self.vg_name, source_lv_name)] if lv_type != 'thin': size = source_lvref['size'] cmd.extend(['-L', '%sg' % (size)]) try: self._run_lvm_command(cmd) except putils.ProcessExecutionError as err: LOG.exception('Error creating snapshot') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise def _mangle_lv_name(self, name): # Linux LVM reserves name that starts with snapshot, so that # such volume name can't be created. Mangle it. if not name.startswith('snapshot'): return name return '_' + name def _lv_is_active(self, name): cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name)] out, _err = self._run_lvm_command(cmd) if out: out = out.strip() if (out[4] == 'a'): return True return False @utils.retry(exception.VolumeNotDeactivated, retries=1, interval=2) def deactivate_lv(self, name): lv_path = self.vg_name + '/' + self._mangle_lv_name(name) cmd = ['lvchange', '-a', 'n'] cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception('Error deactivating LV, retry may be possible') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise # Wait until lv is deactivated to return in # order to prevent a race condition. self._wait_for_volume_deactivation(name) @utils.retry(retry_param=exception.VolumeNotDeactivated, retries=5, backoff_rate=2) def _wait_for_volume_deactivation(self, name): LOG.debug("Checking to see if volume %s has been deactivated.", name) if self._lv_is_active(name): LOG.debug("Volume %s is still active.", name) raise exception.VolumeNotDeactivated(name=name) else: LOG.debug("Volume %s has been deactivated.", name) @utils.retry(putils.ProcessExecutionError, retries=5, backoff_rate=2) def activate_lv(self, name, is_snapshot=False, permanent=False): """Ensure that logical volume/snapshot logical volume is activated. :param name: Name of LV to activate :param is_snapshot: whether LV is a snapshot :param permanent: whether we should drop skipactivation flag :raises putils.ProcessExecutionError: """ # This is a no-op if requested for a snapshot on a version # of LVM that doesn't support snapshot activation. # (Assume snapshot LV is always active.) if is_snapshot and not self.supports_snapshot_lv_activation: return lv_path = self.vg_name + '/' + self._mangle_lv_name(name) # Must pass --yes to activate both the snap LV and its origin LV. # Otherwise lvchange asks if you would like to do this interactively, # and fails. cmd = ['lvchange', '-a', 'y', '--yes'] if self.supports_lvchange_ignoreskipactivation: # If permanent=True is specified, drop the skipactivation flag in # order to make this LV automatically activated after next reboot. if permanent: cmd += ['-k', 'n'] else: cmd.append('-K') cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception('Error activating LV') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise @utils.retry(putils.ProcessExecutionError) def delete(self, name): """Delete logical volume or snapshot. :param name: Name of LV to delete """ def run_udevadm_settle(): cinder.privsep.lvm.udevadm_settle() # LV removal seems to be a race with other writers or udev in # some cases (see LP #1270192), so we enable retry deactivation LVM_CONFIG = 'activation { retry_deactivation = 1} ' try: self._execute( 'lvremove', '--config', LVM_CONFIG, '-f', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.debug('Error reported running lvremove: CMD: %(command)s, ' 'RESPONSE: %(response)s', {'command': err.cmd, 'response': err.stderr}) LOG.debug('Attempting udev settle and retry of lvremove...') run_udevadm_settle() # The previous failing lvremove -f might leave behind # suspended devices; when lvmetad is not available, any # further lvm command will block forever. # Therefore we need to skip suspended devices on retry. LVM_CONFIG += 'devices { ignore_suspended_devices = 1}' self._execute( 'lvremove', '--config', LVM_CONFIG, '-f', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) LOG.debug('Successfully deleted volume: %s after ' 'udev settle.', name) def revert(self, snapshot_name): """Revert an LV to snapshot. :param snapshot_name: Name of snapshot to revert """ try: cinder.privsep.lvm.lvconvert(self.vg_name, snapshot_name) except putils.ProcessExecutionError as err: LOG.exception('Error Revert Volume') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise def lv_has_snapshot(self, name): cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '--readonly', '%s/%s' % (self.vg_name, name)] out, _err = self._run_lvm_command(cmd) if out: out = out.strip() if (out[0] == 'o') or (out[0] == 'O'): return True return False def lv_is_snapshot(self, name): """Return True if LV is a snapshot, False otherwise.""" cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name)] out, _err = self._run_lvm_command(cmd) out = out.strip() if out: if (out[0] == 's'): return True return False def lv_is_open(self, name): """Return True if LV is currently open, False otherwise.""" cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name)] out, _err = self._run_lvm_command(cmd) out = out.strip() if out: if (out[5] == 'o'): return True return False def lv_get_origin(self, name): """Return the origin of an LV that is a snapshot, None otherwise.""" cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Origin', '%s/%s' % (self.vg_name, name)] out, _err = self._run_lvm_command(cmd) out = out.strip() if out: return out return None def extend_volume(self, lv_name, new_size): """Extend the size of an existing volume.""" # Volumes with snaps have attributes 'o' or 'O' and will be # deactivated, but Thin Volumes with snaps have attribute 'V' # and won't be deactivated because the lv_has_snapshot method looks # for 'o' or 'O' has_snapshot = self.lv_has_snapshot(lv_name) if has_snapshot: self.deactivate_lv(lv_name) try: cmd = LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size, '%s/%s' % (self.vg_name, lv_name)] self._run_lvm_command(cmd) except putils.ProcessExecutionError as err: LOG.exception('Error extending Volume') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise if has_snapshot: self.activate_lv(lv_name) def vg_mirror_free_space(self, mirror_count): free_capacity = 0.0 disks = [] for pv in self.pv_list: disks.append(float(pv['available'])) while True: disks = sorted([a for a in disks if a > 0.0], reverse=True) if len(disks) <= mirror_count: break # consume the smallest disk disk = disks[-1] disks = disks[:-1] # match extents for each mirror on the largest disks for index in list(range(mirror_count)): disks[index] -= disk free_capacity += disk return free_capacity def vg_mirror_size(self, mirror_count): return (self.vg_free_space / (mirror_count + 1)) def rename_volume(self, lv_name, new_name): """Change the name of an existing volume.""" try: cinder.privsep.lvm.lvrename(self.vg_name, lv_name, new_name) except putils.ProcessExecutionError as err: LOG.exception('Error renaming logical volume') LOG.error('Cmd :%s', err.cmd) LOG.error('StdOut :%s', err.stdout) LOG.error('StdErr :%s', err.stderr) raise ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0671182 cinder-27.0.0/cinder/cmd/0000775000175000017500000000000000000000000015076 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/cmd/__init__.py0000664000175000017500000000000000000000000017175 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/cmd/api.py0000664000175000017500000000442300000000000016224 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for Cinder OS API.""" import logging as python_logging import sys import eventlet # noqa eventlet.monkey_patch() # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading # pylint: disable=E0401 import threading # noqa orig_threading.current_thread.__globals__['_active'] = \ threading._active # type: ignore from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from cinder import i18n # noqa i18n.enable_lazy() # Need to register global_opts from cinder.common import config from cinder import coordination from cinder import objects from cinder import rpc from cinder import service from cinder import utils from cinder import version CONF = cfg.CONF def main() -> None: objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) config.set_middleware_defaults() logging.setup(CONF, "cinder") python_logging.captureWarnings(True) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) coordination.COORDINATOR.start() rpc.init(CONF) launcher = service.process_launcher() server = service.WSGIService('osapi_volume') launcher.launch_service(server, workers=server.workers) launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/cmd/backup.py0000664000175000017500000001260300000000000016717 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Cinder Volume Backup.""" import logging as python_logging import shlex import sys # NOTE: Monkey patching must go before OSLO.log import, otherwise OSLO.context # will not use greenthread thread local and all greenthreads will share the # same context. It's also a good idea to monkey patch everything before # loading multiprocessing import eventlet eventlet.monkey_patch() # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading # pylint: disable=E0401 import threading # noqa orig_threading.current_thread.__globals__['_active'] = \ threading._active # type: ignore import typing from typing import Union import os_brick from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_privsep import priv_context from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts # Need to register global_opts from cinder.common import config # noqa from cinder.db import api as session from cinder import i18n i18n.enable_lazy() from cinder import objects from cinder import service from cinder import utils from cinder import version if typing.TYPE_CHECKING: import oslo_service CONF = cfg.CONF backup_cmd_opts = [ cfg.IntOpt('backup_workers', default=1, min=1, max=processutils.get_worker_count(), sample_default=8, help='Number of backup processes to launch. ' 'Improves performance with concurrent backups.'), cfg.IntOpt('backup_max_operations', default=15, min=0, help='Maximum number of concurrent memory heavy operations: ' 'backup and restore. Value of 0 means unlimited'), ] CONF.register_opts(backup_cmd_opts) LOG = None # NOTE: The default backup driver uses swift and performs read/write # operations in a thread. swiftclient will log requests and responses at DEBUG # level, which can cause a thread switch and break the backup operation. So we # set a default log level of WARN for swiftclient and boto to try and avoid # this issue. _EXTRA_DEFAULT_LOG_LEVELS = ['swiftclient=WARN', 'botocore=WARN'] def _launch_backup_process(launcher: 'oslo_service.ProcessLauncher', num_process: int, _semaphore: Union[eventlet.semaphore.Semaphore, utils.Semaphore]) -> None: try: server = service.Service.create(binary='cinder-backup', coordination=True, service_name='backup', process_number=num_process + 1, semaphore=_semaphore) except Exception: assert LOG is not None LOG.exception('Backup service %s failed to start.', CONF.host) sys.exit(1) else: # Dispose of the whole DB connection pool here before # starting another process. Otherwise we run into cases where # child processes share DB connections which results in errors. session.dispose_engine() launcher.launch_service(server) def main() -> None: objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.set_defaults( default_log_levels=logging.get_default_log_levels() + _EXTRA_DEFAULT_LOG_LEVELS) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) os_brick.setup(CONF) global LOG LOG = logging.getLogger(__name__) semaphore = utils.semaphore_factory(CONF.backup_max_operations, CONF.backup_workers) if CONF.backup_workers > 1: LOG.info('Backup running with %s processes.', CONF.backup_workers) launcher = service.get_launcher() for i in range(CONF.backup_workers): _launch_backup_process(launcher, i, semaphore) launcher.wait() else: LOG.info('Backup running in single process mode.') server = service.Service.create(binary='cinder-backup', coordination=True, service_name='backup', process_number=1, semaphore=semaphore) service.serve(server) service.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/cmd/manage.py0000664000175000017500000012726200000000000016712 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Interactive shell based on Django: # # Copyright (c) 2005, the Lawrence Journal-World # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Django nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """CLI interface for cinder management.""" import collections import collections.abc as collections_abc import errno import glob import itertools import logging as python_logging import os import re import sys import time import typing from typing import Any, Callable, Optional, Tuple, Union from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import timeutils import tabulate # Need to register global_opts from cinder.backup import rpcapi as backup_rpcapi from cinder.common import config # noqa from cinder import context from cinder import db from cinder.db import migration as db_migration from cinder.db.sqlalchemy import api as db_api from cinder.db.sqlalchemy import models from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base as ovo_base from cinder import quota from cinder import rpc from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import version from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) RPC_VERSIONS = { 'cinder-scheduler': scheduler_rpcapi.SchedulerAPI.RPC_API_VERSION, 'cinder-volume': volume_rpcapi.VolumeAPI.RPC_API_VERSION, 'cinder-backup': backup_rpcapi.BackupAPI.RPC_API_VERSION, } OVO_VERSION = ovo_base.OBJ_VERSIONS.get_current() # Decorators for actions @typing.no_type_check def args(*args, **kwargs): args = list(args) if not args[0].startswith('-') and '-' in args[0]: kwargs.setdefault('metavar', args[0]) args[0] = args[0].replace('-', '_') def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator class HostCommands(object): """List hosts.""" @args('zone', nargs='?', default=None, help='Availability Zone (default: %(default)s)') def list(self, zone: Optional[str] = None) -> None: """Show a list of all physical hosts. Can be filtered by zone. args: [zone] """ print(_("%(host)-25s\t%(zone)-15s") % {'host': 'host', 'zone': 'zone'}) ctxt = context.get_admin_context() services = objects.ServiceList.get_all(ctxt) if zone: services = [s for s in services if s.availability_zone == zone] hosts: list[dict[str, Any]] = [] for srv in services: if not [h for h in hosts if h['host'] == srv['host']]: hosts.append(srv) for h in hosts: print(_("%(host)-25s\t%(availability_zone)-15s") % {'host': h['host'], 'availability_zone': h['availability_zone']}) class DbCommands(object): """Class for managing the database.""" # NOTE: Online migrations cannot depend on having Cinder services running. # Migrations can be called during Fast-Forward Upgrades without having any # Cinder services up. # NOTE: Online migrations must be removed at the beginning of the next # release to the one they've been introduced. A comment with the release # a migration is introduced and the one where it must be removed must # preceed any element of the "online_migrations" tuple, like this: # # Added in Queens remove in Rocky # db.service_uuids_online_data_migration, online_migrations: Tuple[Callable[[context.RequestContext, int], Tuple[int, int]], ...] = ( # TODO: (D Release) Remove next line and this comment db.remove_temporary_admin_metadata_data_migration, ) def __init__(self): pass @args('version', nargs='?', default=None, type=int, help='Database version') @args('--bump-versions', dest='bump_versions', default=False, action='store_true', help='Update RPC and Objects versions when doing offline upgrades, ' 'with this we no longer need to restart the services twice ' 'after the upgrade to prevent ServiceTooOld exceptions.') def sync(self, version: Optional[int] = None, bump_versions: bool = False) -> None: """Sync the database up to the most recent version.""" if version is not None and version > db.MAX_INT: print(_('Version should be less than or equal to ' '%(max_version)d.') % {'max_version': db.MAX_INT}) sys.exit(1) try: db_migration.db_sync(version) except db_exc.DBMigrationError as ex: print("Error during database migration: %s" % ex) sys.exit(1) try: if bump_versions: ctxt = context.get_admin_context() services = objects.ServiceList.get_all(ctxt) for service in services: rpc_version = RPC_VERSIONS[service.binary] if (service.rpc_current_version != rpc_version or service.object_current_version != OVO_VERSION): service.rpc_current_version = rpc_version service.object_current_version = OVO_VERSION service.save() except Exception as ex: print(_('Error during service version bump: %s') % ex) sys.exit(2) def version(self) -> None: """Print the current database version.""" print(db_migration.db_version()) @args('age_in_days', type=int, help='Purge deleted rows older than age in days') def purge(self, age_in_days: int) -> None: """Purge deleted rows older than a given age from cinder tables.""" age_in_days = int(age_in_days) if age_in_days < 0: print(_("Must supply a positive value for age")) sys.exit(1) if age_in_days >= (int(time.time()) / 86400): print(_("Maximum age is count of days since epoch.")) sys.exit(1) ctxt = context.get_admin_context() try: db.purge_deleted_rows(ctxt, age_in_days) except db_exc.DBReferenceError: print(_("Purge command failed, check cinder-manage " "logs for more details.")) sys.exit(1) def _run_migration(self, ctxt: context.RequestContext, max_count: int) -> Tuple[dict, bool]: ran = 0 exceptions = False migrations = {} for migration_meth in self.online_migrations: count = max_count - ran try: found, done = migration_meth(ctxt, count) except Exception: msg = (_("Error attempting to run %(method)s") % {'method': migration_meth.__name__}) print(msg) LOG.exception(msg) exceptions = True found = done = 0 name = migration_meth.__name__ if found: print(_('%(found)i rows matched query %(meth)s, %(done)i ' 'migrated') % {'found': found, 'meth': name, 'done': done}) migrations[name] = found, done if max_count is not None: ran += done if ran >= max_count: break return migrations, exceptions @args('--max_count', metavar='', dest='max_count', type=int, help='Maximum number of objects to consider.') def online_data_migrations(self, max_count: Optional[int] = None) -> None: """Perform online data migrations for the release in batches.""" ctxt = context.get_admin_context() if max_count is not None: unlimited = False if max_count < 1: print(_('Must supply a positive value for max_count.')) sys.exit(127) else: unlimited = True max_count = 50 print(_('Running batches of %i until complete.') % max_count) ran = None exceptions = False migration_info: dict[str, Any] = {} while ran is None or ran != 0: migrations, exceptions = self._run_migration(ctxt, max_count) ran = 0 for name in migrations: migration_info.setdefault(name, (0, 0)) migration_info[name] = ( max(migration_info[name][0], migrations[name][0]), migration_info[name][1] + migrations[name][1], ) ran += migrations[name][1] if not unlimited: break headers = ["{}".format(_('Migration')), "{}".format(_('Total Needed')), "{}".format(_('Completed')), ] rows = [] for name in sorted(migration_info.keys()): info = migration_info[name] rows.append([name, info[0], info[1]]) print(tabulate.tabulate(rows, headers=headers, tablefmt='psql')) # NOTE(imacdonn): In the "unlimited" case, the loop above will only # terminate when all possible migrations have been effected. If we're # still getting exceptions, there's a problem that requires # intervention. In the max-count case, exceptions are only considered # fatal if no work was done by any other migrations ("not ran"), # because otherwise work may still remain to be done, and that work # may resolve dependencies for the failing migrations. if exceptions and (unlimited or not ran): print(_("Some migrations failed unexpectedly. Check log for " "details.")) sys.exit(2) sys.exit(1 if ran else 0) @args('--enable-replication', action='store_true', default=False, help='Set replication status to enabled (default: %(default)s).') @args('--active-backend-id', default=None, help='Change the active backend ID (default: %(default)s).') @args('--backend-host', required=True, help='The backend host name.') def reset_active_backend(self, enable_replication: bool, active_backend_id: Optional[str], backend_host: str) -> None: """Reset the active backend for a host.""" ctxt = context.get_admin_context() try: db.reset_active_backend(ctxt, enable_replication, active_backend_id, backend_host) except db_exc.DBReferenceError: print(_("Failed to reset active backend for host %s, " "check cinder-manage logs for more details.") % backend_host) sys.exit(1) class QuotaCommands(object): """Class for managing quota issues.""" def __init__(self): pass @args('--project-id', default=None, help=('The ID of the project where we want to check the quotas ' '(defaults to all projects).')) def check(self, project_id: Optional[str]) -> None: """Check if quotas and reservations are correct This action checks quotas and reservations, for a specific project or for all projects, to see if they are out of sync. The check will also look for duplicated entries. One way to use this check in combination with the sync action is to run the check for all projects, take note of those that are out of sync, and then sync them one by one at intervals to reduce stress on the DB. """ result = self._check_sync(project_id, do_fix=False) if result: sys.exit(1) @args('--project-id', default=None, help=('The ID of the project where we want to sync the quotas ' '(defaults to all projects).')) def sync(self, project_id: Optional[str]) -> None: """Fix quotas and reservations This action refreshes existing quota usage and reservation count for a specific project or for all projects. The refresh will also remove duplicated entries. This operation is best executed when Cinder is not running, but it can be run with cinder services running as well. A different transaction is used for each project's quota sync, so an action failure will only rollback the current project's changes. """ self._check_sync(project_id, do_fix=True) @db_api.main_context_manager.reader def _get_quota_projects(self, ctxt: context.RequestContext, project_id: Optional[str]) -> list[str]: """Get project ids that have quota_usage entries.""" if project_id: model = models.QuotaUsage # If the project does not exist if not ctxt.session.query( db_api.sql.exists() .where( db_api.and_( model.project_id == project_id, ~model.deleted, ), ) ).scalar(): print( 'Project id %s has no quota usage. Nothing to do.' % project_id, ) return [] return [project_id] projects = db_api.get_projects(ctxt, models.QuotaUsage, read_deleted="no") project_ids = [row.project_id for row in projects] return project_ids def _get_usages(self, ctxt: context.RequestContext, resources, project_id: str) -> list: """Get data necessary to check out of sync quota usage. Returns a list of QuotaUsage instances for the specific project """ usages = db_api.model_query( ctxt, db_api.models.QuotaUsage, read_deleted="no", ).filter_by(project_id=project_id).with_for_update().all() return usages def _get_reservations(self, ctxt: context.RequestContext, project_id: str, usage_id: str) -> list: """Get reservations for a given project and usage id.""" reservations = ( db_api.model_query( ctxt, models.Reservation, read_deleted="no", ) .filter_by(project_id=project_id, usage_id=usage_id) .with_for_update() .all() ) return reservations def _check_duplicates(self, ctxt: context.RequestContext, usages, do_fix: bool) -> tuple[list, bool]: """Look for duplicated quota used entries (bug#1484343) If we have duplicates and we are fixing them, then we reassign the reservations of the usage we are removing. """ resources = collections.defaultdict(list) for usage in usages: resources[usage.resource].append(usage) duplicates_found = False result = [] for resource_usages in resources.values(): keep_usage = resource_usages[0] if len(resource_usages) > 1: duplicates_found = True print('\t%s: %s duplicated usage entries - ' % (keep_usage.resource, len(resource_usages) - 1), end='') if do_fix: # Each of the duplicates can have reservations reassigned = 0 for usage in resource_usages[1:]: reservations = self._get_reservations( ctxt, usage.project_id, usage.id, ) reassigned += len(reservations) for reservation in reservations: reservation.usage_id = keep_usage.id keep_usage.in_use += usage.in_use keep_usage.reserved += usage.reserved usage.delete(ctxt.session) print('duplicates removed & %s reservations reassigned' % reassigned) else: print('ignored') result.append(keep_usage) return result, duplicates_found def _check_sync(self, project_id: Optional[str], do_fix: bool) -> bool: """Check the quotas and reservations optionally fixing them.""" ctxt = context.get_admin_context() # Get the quota usage types and their sync methods resources = quota.QUOTAS.resources resources.update(quota.GROUP_QUOTAS.resources) # Get all project ids that have quota usage. Method doesn't lock # projects, since newly added projects should not be out of sync and # projects removed will just turn nothing on the quota usage. projects = self._get_quota_projects(ctxt, project_id) discrepancy = False for project in projects: discrepancy &= self._check_project_sync( ctxt, project, do_fix, resources, ) print('Action successfully completed') return discrepancy @db_api.main_context_manager.writer def _check_project_sync(self, ctxt: context.RequestContext, project: str, do_fix: bool, resources) -> bool: print('Processing quota usage for project %s' % project) discrepancy = False action_msg = ' - fixed' if do_fix else '' # NOTE: It's important to always get the quota first and then the # reservations to prevent deadlocks with quota commit and rollback from # running Cinder services. # We only want to sync existing quota usage rows usages = self._get_usages(ctxt, resources, project) # Check for duplicated entries (bug#1484343) usages, duplicates_found = self._check_duplicates( ctxt, usages, do_fix, ) if duplicates_found: discrepancy = True # Check quota and reservations for usage in usages: resource_name = usage.resource # Get the correct value for this quota usage resource updates = db_api._get_sync_updates( ctxt, project, resources, resource_name, ) in_use = updates[resource_name] if in_use != usage.in_use: print( '\t%s: invalid usage saved=%s actual=%s%s' % (resource_name, usage.in_use, in_use, action_msg) ) discrepancy = True if do_fix: usage.in_use = in_use reservations = self._get_reservations( ctxt, project, usage.id, ) num_reservations = sum( r.delta for r in reservations if r.delta > 0 ) if num_reservations != usage.reserved: print( '\t%s: invalid reserved saved=%s actual=%s%s' % ( resource_name, usage.reserved, num_reservations, action_msg, ) ) discrepancy = True if do_fix: usage.reserved = num_reservations return discrepancy class VersionCommands(object): """Class for exposing the codebase version.""" def __init__(self): pass def list(self): print(version.version_string()) def __call__(self): self.list() class VolumeCommands(object): """Methods for dealing with a cloud in an odd state.""" @args('volume_id', help='Volume ID to be deleted') def delete(self, volume_id: str) -> None: """Delete a volume, bypassing the check that it must be available.""" ctxt = context.get_admin_context() volume = objects.Volume.get_by_id(ctxt, volume_id) host = volume_utils.extract_host(volume.host) if volume.host else None if not host: print(_("Volume not yet assigned to host.")) print(_("Deleting volume from database and skipping rpc.")) volume.destroy() return if volume.status == 'in-use': print(_("Volume is in-use.")) print(_("Detach volume from instance and then try again.")) return rpc.init(CONF) rpcapi = volume_rpcapi.VolumeAPI() rpcapi.delete_volume(ctxt, volume) @args('--currenthost', required=True, help='Existing volume host name in ' 'the format host@backend#pool') @args('--newhost', required=True, help='New volume host name in the ' 'format host@backend#pool') def update_host(self, currenthost: str, newhost: str) -> None: """Modify the host name associated with a volume. Particularly to recover from cases where one has moved their Cinder Volume node, or modified their backend_name in a multi-backend config. """ ctxt = context.get_admin_context() volumes = db.volume_get_all_by_host(ctxt, currenthost) for v in volumes: db.volume_update(ctxt, v['id'], {'host': newhost}) def update_service(self): """Modify the service uuid associated with a volume. In certain upgrade cases, we create new cinder services and delete the records of old ones, however, the volumes created with old service still contain the service uuid of the old services. """ ctxt = context.get_admin_context() db.volume_update_all_by_service(ctxt) class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" def __init__(self): pass @args('param', nargs='?', default=None, help='Configuration parameter to display (default: %(default)s)') def list(self, param: Optional[str] = None) -> None: """List parameters configured for cinder. Lists all parameters configured for cinder unless an optional argument is specified. If the parameter is specified we only print the requested parameter. If the parameter is not found an appropriate error is produced by .get*(). """ param = param and param.strip() if param: print('%s = %s' % (param, CONF.get(param))) else: for key, value in CONF.items(): print('%s = %s' % (key, value)) class BackupCommands(object): """Methods for managing backups.""" def list(self) -> None: """List all backups. List all backups (including ones in progress) and the host on which the backup operation is running. """ ctxt = context.get_admin_context() backups = objects.BackupList.get_all(ctxt) hdr = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s\t%-12s" print(hdr % (_('ID'), _('User ID'), _('Project ID'), _('Host'), _('Name'), _('Container'), _('Status'), _('Size'), _('Object Count'))) res = "%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d\t%-12d" for backup in backups: object_count = 0 if backup['object_count'] is not None: object_count = backup['object_count'] print(res % (backup['id'], backup['user_id'], backup['project_id'], backup['host'], backup['display_name'], backup['container'], backup['status'], backup['size'], object_count)) @args('--currenthost', required=True, help='Existing backup host name') @args('--newhost', required=True, help='New backup host name') def update_backup_host(self, currenthost: str, newhost: str) -> None: """Modify the host name associated with a backup. Particularly to recover from cases where one has moved their Cinder Backup node, and not set backup_use_same_backend. """ ctxt = context.get_admin_context() backups = objects.BackupList.get_all_by_host(ctxt, currenthost) for bk in backups: bk.host = newhost bk.save() class BaseCommand(object): @staticmethod def _normalize_time(time_field): return time_field and timeutils.normalize_time(time_field) @staticmethod def _state_repr(is_up): return ':-)' if is_up else 'XXX' class ServiceCommands(BaseCommand): """Methods for managing services.""" def list(self): """Show a list of all cinder services.""" ctxt = context.get_admin_context() services = objects.ServiceList.get_all(ctxt) print_format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s %-36s" print(print_format % (_('Binary'), _('Host'), _('Zone'), _('Status'), _('State'), _('Updated At'), _('RPC Version'), _('Object Version'), _('Cluster'))) for svc in services: art = self._state_repr(svc.is_up) status = 'disabled' if svc.disabled else 'enabled' updated_at = self._normalize_time(svc.updated_at) rpc_version = svc.rpc_current_version object_version = svc.object_current_version cluster = svc.cluster_name or '' print(print_format % (svc.binary, svc.host, svc.availability_zone, status, art, updated_at, rpc_version, object_version, cluster)) @args('binary', type=str, help='Service to delete from the host.') @args('host_name', type=str, help='Host from which to remove the service.') def remove(self, binary: str, host_name: str) -> Optional[int]: """Completely removes a service.""" ctxt = context.get_admin_context() try: svc = objects.Service.get_by_args(ctxt, host_name, binary) svc.destroy() except exception.ServiceNotFound as e: print(_("Host not found. Failed to remove %(service)s" " on %(host)s.") % {'service': binary, 'host': host_name}) print(u"%s" % e.args) return 2 print(_("Service %(service)s on host %(host)s removed.") % {'service': binary, 'host': host_name}) return None class ClusterCommands(BaseCommand): """Methods for managing clusters.""" def list(self) -> None: """Show a list of all cinder services.""" ctxt = context.get_admin_context() clusters = objects.ClusterList.get_all(ctxt, services_summary=True) print_format = "%-36s %-16s %-10s %-5s %-20s %-7s %-12s %-20s" print(print_format % (_('Name'), _('Binary'), _('Status'), _('State'), _('Heartbeat'), _('Hosts'), _('Down Hosts'), _('Updated At'))) for cluster in clusters: art = self._state_repr(cluster.is_up) status = 'disabled' if cluster.disabled else 'enabled' heartbeat = self._normalize_time(cluster.last_heartbeat) updated_at = self._normalize_time(cluster.updated_at) print(print_format % (cluster.name, cluster.binary, status, art, heartbeat, cluster.num_hosts, cluster.num_down_hosts, updated_at)) @args('--recursive', action='store_true', default=False, help='Delete associated hosts.') @args('binary', type=str, help='Service to delete from the cluster.') @args('cluster-name', type=str, help='Cluster to delete.') def remove(self, recursive: bool, binary: str, cluster_name: str) -> Optional[int]: """Completely removes a cluster.""" ctxt = context.get_admin_context() try: cluster = objects.Cluster.get_by_id(ctxt, None, name=cluster_name, binary=binary, get_services=recursive) except exception.ClusterNotFound: print(_("Couldn't remove cluster %s because it doesn't exist.") % cluster_name) return 2 if recursive: for service in cluster.services: service.destroy() try: cluster.destroy() except exception.ClusterHasHosts: print(_("Couldn't remove cluster %s because it still has hosts.") % cluster_name) return 2 msg = _('Cluster %s successfully removed.') % cluster_name if recursive: msg = (_('%(msg)s And %(num)s services from the cluster were also ' 'removed.') % {'msg': msg, 'num': len(cluster.services)}) print(msg) return None @args('--full-rename', dest='partial', action='store_false', default=True, help='Do full cluster rename instead of just replacing provided ' 'current cluster name and preserving backend and/or pool info.') @args('current', help='Current cluster name.') @args('new', help='New cluster name.') def rename(self, partial: bool, current: Optional[str], new: Optional[str]) -> Optional[int]: """Rename cluster name for Volumes and Consistency Groups. Useful when you want to rename a cluster, particularly when the backend_name has been modified in a multi-backend config or we have moved from a single backend to multi-backend. """ ctxt = context.get_admin_context() # Convert empty strings to None current = current or None new = new or None # Update Volumes num_vols = objects.VolumeList.include_in_cluster( ctxt, new, partial_rename=partial, cluster_name=current) # Update Consistency Groups num_cgs = objects.ConsistencyGroupList.include_in_cluster( ctxt, new, partial_rename=partial, cluster_name=current) if num_vols or num_cgs: msg = _('Successfully renamed %(num_vols)s volumes and ' '%(num_cgs)s consistency groups from cluster %(current)s ' 'to %(new)s') print(msg % {'num_vols': num_vols, 'num_cgs': num_cgs, 'new': new, 'current': current}) else: msg = _('No volumes or consistency groups exist in cluster ' '%(current)s.') print(msg % {'current': current}) return 2 return None class ConsistencyGroupCommands(object): """Methods for managing consistency groups.""" @args('--currenthost', required=True, help='Existing CG host name') @args('--newhost', required=True, help='New CG host name') def update_cg_host(self, currenthost: str, newhost: str) -> None: """Modify the host name associated with a Consistency Group. Particularly to recover from cases where one has moved a host from single backend to multi-backend, or changed the host configuration option, or modified the backend_name in a multi-backend config. """ ctxt = context.get_admin_context() groups = objects.ConsistencyGroupList.get_all( ctxt, {'host': currenthost}) for gr in groups: gr.host = newhost gr.save() class UtilCommands(object): """Generic utils.""" @staticmethod def _get_resources_locks() -> Tuple[collections.defaultdict, collections.defaultdict, collections.defaultdict]: """Get all vol/snap/backup file lock paths.""" backup_locks_prefix = 'cinder-cleanup_incomplete_backups_' oslo_dir = os.path.abspath(cfg.CONF.oslo_concurrency.lock_path) filenames = glob.glob(os.path.join(oslo_dir, 'cinder-*')) backend_url = cfg.CONF.coordination.backend_url if backend_url.startswith('file://'): tooz_dir = os.path.abspath(backend_url[7:]) if tooz_dir != oslo_dir: filenames += glob.glob(os.path.join(tooz_dir, 'cinder-*')) volumes: collections.defaultdict = collections.defaultdict(list) snapshots: collections.defaultdict = collections.defaultdict(list) backups = collections.defaultdict(list) matcher = re.compile('.*?([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-' '[0-9a-f]{4}-[0-9a-f]{12}).*?', re.IGNORECASE) for filename in filenames: basename = os.path.basename(filename) match = matcher.match(basename) if match: dest = snapshots if 'snapshot' in basename else volumes res_id = match.group(1) dest[res_id].append(filename) elif basename.startswith(backup_locks_prefix): pgrp = basename[34:] backups[pgrp].append(filename) return volumes, snapshots, backups def _exclude_running_backups(self, backups: dict) -> None: """Remove backup entries from the dict for running backup services.""" for backup_pgrp in list(backups.keys()): # The PGRP is the same as the PID of the parent process, so we know # the lock could be in use if the process is running and it's the # cinder-backup command (the PID could have been reused). cmdline_file = os.path.join('/proc', backup_pgrp, 'cmdline') try: with open(cmdline_file, 'r') as f: if 'cinder-backup' in f.read(): del backups[backup_pgrp] except FileNotFoundError: continue except Exception: # Unexpected error, leaving the lock file just in case del backups[backup_pgrp] @args('--services-offline', dest='online', action='store_false', default=True, help='All locks can be deleted as Cinder services are not running.') def clean_locks(self, online: bool) -> None: """Clean file locks for vols, snaps, and backups on the current host. Should be run on any host where we are running a Cinder service (API, Scheduler, Volume, Backup) and can be run with the Cinder services running or stopped. If the services are running it will check existing resources in the Cinder database in order to know which resources are still available (it's not safe to remove their file locks) and will only remove the file locks for the resources that are no longer present. Deleting locks while the services are offline is faster as there's no need to check the database. For backups, the way to know if we can remove the startup lock is by checking if the PGRP in the file name is currently running cinder-backup. Default assumes that services are online, must pass ``--services-offline`` to specify that they are offline. Doesn't clean DLM locks (except when using file locks), as those don't leave lock leftovers. """ self.ctxt = context.get_admin_context() # Find volume and snapshots ids, and backups PGRP based on the existing # file locks volumes: Union[collections.defaultdict, dict] snapshots: Union[collections.defaultdict, dict] volumes, snapshots, backups = self._get_resources_locks() # If services are online we cannot delete locks for existing resources if online: # We don't want to delete file locks for existing resources volumes = {vol_id: files for vol_id, files in volumes.items() if not objects.Volume.exists(self.ctxt, vol_id)} snapshots = {snap_id: files for snap_id, files in snapshots.items() if not objects.Snapshot.exists(self.ctxt, snap_id)} self._exclude_running_backups(backups) def _err(filename: str, exc: Exception) -> None: print('Failed to cleanup lock %(name)s: %(exc)s', {'name': filename, 'exc': exc}) # Now clean for filenames in itertools.chain(volumes.values(), snapshots.values(), backups.values()): for filename in filenames: try: os.remove(filename) except OSError as exc: if (exc.errno != errno.ENOENT): _err(filename, exc) except Exception as exc: _err(filename, exc) CATEGORIES = { 'backup': BackupCommands, 'config': ConfigCommands, 'cluster': ClusterCommands, 'cg': ConsistencyGroupCommands, 'db': DbCommands, 'host': HostCommands, 'quota': QuotaCommands, 'service': ServiceCommands, 'version': VersionCommands, 'volume': VolumeCommands, 'util': UtilCommands, } def methods_of(obj) -> list: """Return non-private methods from an object. Get all callable methods of an object that don't start with underscore :return: a list of tuples of the form (method_name, method) """ result = [] for i in dir(obj): if (isinstance(getattr(obj, i), collections_abc.Callable) and # type: ignore not i.startswith('_')): result.append((i, getattr(obj, i))) return result def missing_action(help_func: Callable) -> Callable: def wrapped(): help_func() exit(2) return wrapped def add_command_parsers(subparsers): for category in sorted(CATEGORIES): command_object = CATEGORIES[category]() parser = subparsers.add_parser(category) parser.set_defaults(command_object=command_object) parser.set_defaults(action_fn=missing_action(parser.print_help)) category_subparsers = parser.add_subparsers(dest='action') for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser(action) action_kwargs: list = [] for args, kwargs in getattr(action_fn, 'args', []): parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) parser.set_defaults(action_kwargs=action_kwargs) category_opt = cfg.SubCommandOpt('category', title='Command categories', handler=add_command_parsers) def get_arg_string(args): if args[0] == '-': # (Note)zhiteng: args starts with FLAGS.oparser.prefix_chars # is optional args. Notice that cfg module takes care of # actual ArgParser so prefix_chars is always '-'. if args[1] == '-': # This is long optional arg args = args[2:] else: args = args[1:] # pylint: disable=E1136 # We convert dashes to underscores so we can have cleaner optional arg # names if args: args = args.replace('-', '_') return args def fetch_func_args(func): fn_kwargs = {} for args, kwargs in getattr(func, 'args', []): # Argparser `dest` configuration option takes precedence for the name arg = kwargs.get('dest') or get_arg_string(args[0]) fn_kwargs[arg] = getattr(CONF.category, arg) return fn_kwargs def main(): objects.register_all() """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) script_name = sys.argv[0] if len(sys.argv) < 2: print(_("\nOpenStack Cinder version: %(version)s\n") % {'version': version.version_string()}) print(script_name + " category action []") print(_("Available categories:")) for category in CATEGORIES: print(_("\t%s") % category) sys.exit(2) try: CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) except cfg.ConfigDirNotFoundError as details: print(_("Invalid directory: %s") % details) sys.exit(2) except cfg.ConfigFilesNotFoundError as e: cfg_files = e.config_files print(_("Failed to read configuration file(s): %s") % cfg_files) sys.exit(2) fn = CONF.category.action_fn fn_kwargs = fetch_func_args(fn) fn(**fn_kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/cmd/rtstool.py0000664000175000017500000002501400000000000017160 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2012 - 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import rtslib_fb from cinder import i18n from cinder.i18n import _ i18n.enable_lazy() class RtstoolError(Exception): pass class RtstoolImportError(RtstoolError): pass def create(backing_device, name, userid, password, iser_enabled, initiator_iqns=None, portals_ips=None, portals_port=3260): # List of IPS that will not raise an error when they fail binding. # Originally we will fail on all binding errors. ips_allow_fail = () try: rtsroot = rtslib_fb.root.RTSRoot() except rtslib_fb.utils.RTSLibError: print(_('Ensure that configfs is mounted at /sys/kernel/config.')) raise # Look to see if BlockStorageObject already exists for x in rtsroot.storage_objects: if x.name == name: # Already exists, use this one return so_new = rtslib_fb.BlockStorageObject(name=name, dev=backing_device) target_new = rtslib_fb.Target(rtslib_fb.FabricModule('iscsi'), name, 'create') tpg_new = rtslib_fb.TPG(target_new, mode='create') tpg_new.set_attribute('authentication', '1') lun_new = rtslib_fb.LUN(tpg_new, storage_object=so_new) if initiator_iqns: initiator_iqns = initiator_iqns.strip(' ') for i in initiator_iqns.split(','): acl_new = rtslib_fb.NodeACL(tpg_new, i, mode='create') acl_new.chap_userid = userid acl_new.chap_password = password rtslib_fb.MappedLUN(acl_new, lun_new.lun, lun_new.lun) tpg_new.enable = 1 # If no ips are given we'll bind to all IPv4 and v6 if not portals_ips: portals_ips = ('0.0.0.0', '[::0]') # TODO(emh): Binding to IPv6 fails sometimes -- let pass for now. ips_allow_fail = ('[::0]',) for ip in portals_ips: try: # rtslib expects IPv6 addresses to be surrounded by brackets portal = rtslib_fb.NetworkPortal(tpg_new, _canonicalize_ip(ip), portals_port, mode='any') except rtslib_fb.utils.RTSLibError: raise_exc = ip not in ips_allow_fail msg_type = 'Error' if raise_exc else 'Warning' print(_('%(msg_type)s: creating NetworkPortal: ensure port ' '%(port)d on ip %(ip)s is not in use by another service.') % {'msg_type': msg_type, 'port': portals_port, 'ip': ip}) if raise_exc: raise else: try: if iser_enabled == 'True': portal.iser = True except rtslib_fb.utils.RTSLibError: print(_('Error enabling iSER for NetworkPortal: please ensure ' 'that RDMA is supported on your iSCSI port %(port)d ' 'on ip %(ip)s.') % {'port': portals_port, 'ip': ip}) raise def _lookup_target(target_iqn, initiator_iqn): try: rtsroot = rtslib_fb.root.RTSRoot() except rtslib_fb.utils.RTSLibError: print(_('Ensure that configfs is mounted at /sys/kernel/config.')) raise # Look for the target for t in rtsroot.targets: if t.wwn == target_iqn: return t raise RtstoolError(_('Could not find target %s') % target_iqn) def add_initiator(target_iqn, initiator_iqn, userid, password): target = _lookup_target(target_iqn, initiator_iqn) tpg = next(target.tpgs) # get the first one for acl in tpg.node_acls: # See if this ACL configuration already exists if acl.node_wwn.lower() == initiator_iqn.lower(): # No further action required return acl_new = rtslib_fb.NodeACL(tpg, initiator_iqn, mode='create') acl_new.chap_userid = userid acl_new.chap_password = password rtslib_fb.MappedLUN(acl_new, 0, tpg_lun=0) def delete_initiator(target_iqn, initiator_iqn): target = _lookup_target(target_iqn, initiator_iqn) tpg = next(target.tpgs) # get the first one for acl in tpg.node_acls: if acl.node_wwn.lower() == initiator_iqn.lower(): acl.delete() return print(_('delete_initiator: %s ACL not found. Continuing.') % initiator_iqn) # Return successfully. def get_targets(): rtsroot = rtslib_fb.root.RTSRoot() for x in rtsroot.targets: print(x.wwn) def delete(iqn): rtsroot = rtslib_fb.root.RTSRoot() for x in rtsroot.targets: if x.wwn == iqn: x.delete() break for x in rtsroot.storage_objects: if x.name == iqn: x.delete() break def verify_rtslib(): for member in ['BlockStorageObject', 'FabricModule', 'LUN', 'MappedLUN', 'NetworkPortal', 'NodeACL', 'root', 'Target', 'TPG']: if not hasattr(rtslib_fb, member): raise RtstoolImportError(_("rtslib_fb is missing member %s: You " "may need a newer python-rtslib-fb.") % member) def usage(): print("Usage:") print(sys.argv[0] + " create [device] [name] [userid] [password] [iser_enabled]" + " [-a] [-pPORT]") print(sys.argv[0] + " add-initiator [target_iqn] [userid] [password] [initiator_iqn]") print(sys.argv[0] + " delete-initiator [target_iqn] [initiator_iqn]") print(sys.argv[0] + " get-targets") print(sys.argv[0] + " delete [iqn]") print(sys.argv[0] + " verify") print(sys.argv[0] + " save [path_to_file]") sys.exit(1) def save_to_file(destination_file): rtsroot = rtslib_fb.root.RTSRoot() try: # If default destination use rtslib default save file if not destination_file: destination_file = rtslib_fb.root.default_save_file path_to_file = os.path.dirname(destination_file) # NOTE(geguileo): With default file we ensure path exists and # create it if doesn't. # Cinder's LIO target helper runs this as root, so it will have no # problem creating directory /etc/target. # If run manually from the command line without being root you will # get an error, same as when creating and removing targets. if not os.path.exists(path_to_file): os.makedirs(path_to_file, 0o755) except OSError as exc: raise RtstoolError(_('targetcli not installed and could not create ' 'default directory (%(default_path)s): %(exc)s') % {'default_path': path_to_file, 'exc': exc}) try: rtsroot.save_to_file(destination_file) except (OSError, IOError) as exc: raise RtstoolError(_('Could not save configuration to %(file_path)s: ' '%(exc)s') % {'file_path': destination_file, 'exc': exc}) def restore_from_file(configuration_file): rtsroot = rtslib_fb.root.RTSRoot() # If configuration file is None, use rtslib default save file. if not configuration_file: configuration_file = rtslib_fb.root.default_save_file try: rtsroot.restore_from_file(configuration_file) except (OSError, IOError) as exc: raise RtstoolError(_('Could not restore configuration file ' '%(file_path)s: %(exc)s'), {'file_path': configuration_file, 'exc': exc}) def parse_optional_create(argv): optional_args = {} for arg in argv: if arg.startswith('-a'): ips = [ip for ip in arg[2:].split(',') if ip] if not ips: usage() optional_args['portals_ips'] = ips elif arg.startswith('-p'): try: optional_args['portals_port'] = int(arg[2:]) except ValueError: usage() else: optional_args['initiator_iqns'] = arg return optional_args def _canonicalize_ip(ip): if ip.startswith('[') or "." in ip: return ip return "[" + ip + "]" def main(argv=None): if argv is None: argv = sys.argv if len(argv) < 2: usage() if argv[1] == 'create': if len(argv) < 7: usage() if len(argv) > 10: usage() backing_device = argv[2] name = argv[3] userid = argv[4] password = argv[5] iser_enabled = argv[6] if len(argv) > 7: optional_args = parse_optional_create(argv[7:]) else: optional_args = {} create(backing_device, name, userid, password, iser_enabled, **optional_args) elif argv[1] == 'add-initiator': if len(argv) < 6: usage() target_iqn = argv[2] userid = argv[3] password = argv[4] initiator_iqn = argv[5] add_initiator(target_iqn, initiator_iqn, userid, password) elif argv[1] == 'delete-initiator': if len(argv) < 4: usage() target_iqn = argv[2] initiator_iqn = argv[3] delete_initiator(target_iqn, initiator_iqn) elif argv[1] == 'get-targets': get_targets() elif argv[1] == 'delete': if len(argv) < 3: usage() iqn = argv[2] delete(iqn) elif argv[1] == 'verify': # This is used to verify that this script can be called by cinder, # and that rtslib_fb is new enough to work. verify_rtslib() return 0 elif argv[1] == 'save': if len(argv) > 3: usage() destination_file = argv[2] if len(argv) > 2 else None save_to_file(destination_file) return 0 elif argv[1] == 'restore': if len(argv) > 3: usage() configuration_file = argv[2] if len(argv) > 2 else None restore_from_file(configuration_file) return 0 else: usage() return 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/cmd/scheduler.py0000664000175000017500000000407200000000000017431 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Cinder Scheduler.""" import logging as python_logging import sys import eventlet eventlet.monkey_patch() # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading # pylint: disable=E0401 import threading # noqa orig_threading.current_thread.__globals__['_active'] = \ threading._active # type: ignore from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts # Need to register global_opts from cinder.common import config # noqa from cinder import i18n i18n.enable_lazy() from cinder import objects from cinder import service from cinder import utils from cinder import version CONF = cfg.CONF def main() -> None: objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) server = service.Service.create(binary='cinder-scheduler') service.serve(server) service.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/cmd/status.py0000664000175000017500000002706200000000000017002 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """CLI interface for cinder status commands.""" import os import sys from oslo_config import cfg from oslo_upgradecheck import upgradecheck as uc from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.policy import DEFAULT_POLICY_FILENAME # Need to import service to load config from cinder import service # noqa # We must first register Cinder's objects. Otherwise # we cannot import the volume manager. objects.register_all() import cinder.volume.manager as volume_manager CONF = cfg.CONF SUCCESS = uc.Code.SUCCESS FAILURE = uc.Code.FAILURE WARNING = uc.Code.WARNING REMOVED_DRVRS = [ "coprhd", "drbdmanage", "disco", "hgst", "hpe_lefthand", "sheepdog", "zfssa", ] def _get_enabled_drivers() -> list[str]: """Returns a list of volume_driver entries""" volume_drivers = [] if CONF.enabled_backends: backend: str for backend in filter(None, CONF.enabled_backends): # Each backend group needs to be registered first CONF.register_opts(volume_manager.volume_backend_opts, group=backend) volume_driver = CONF[backend]['volume_driver'] volume_drivers.append(volume_driver) return volume_drivers class Checks(uc.UpgradeCommands): """Upgrade checks to run.""" def __init__(self, *args, **kwargs): super(Checks, self).__init__(*args, **kwargs) self.context = context.get_admin_context() def _file_exists(self, path: str) -> bool: """Helper for mocking check of os.path.exists.""" return os.path.exists(path) def _check_backup_module(self) -> uc.Result: """Checks for the use of backup driver module paths. The use of backup modules for setting backup_driver was deprecated and we now only allow the full driver path. This checks that there are not any remaining settings using the old method. """ # We import here to avoid conf loading order issues with cinder.service # above. import cinder.backup.manager # noqa backup_driver = CONF.backup_driver # Easy check in that a class name will have mixed casing if backup_driver == backup_driver.lower(): return uc.Result( FAILURE, 'Backup driver configuration requires the full path to the ' 'driver, but current setting is using only the module path.') return uc.Result(SUCCESS) def _check_policy_file(self) -> uc.Result: """Checks if a policy.json file is present. With the switch to policy-in-code, policy files should be policy.yaml and should only be present if overriding default policy. Just checks and warns if the old file is present to make sure they are aware it is not being used. """ # make sure we know where to look for the policy file config_dir = CONF.find_file('cinder.conf') if not config_dir: return uc.Result( WARNING, 'Cannot locate your cinder configuration directory. ' 'Please re-run using the --config-dir option.') policy_file = CONF.oslo_policy.policy_file json_file = os.path.join(os.path.dirname(config_dir), 'policy.json') if policy_file == DEFAULT_POLICY_FILENAME: # Default is being used, check for old json file if self._file_exists(json_file): return uc.Result( WARNING, 'policy.json file is present. Make sure any changes from ' 'the default policies are present in a policy.yaml file ' 'instead. If you really intend to use a policy.json file, ' 'make sure that its absolute path is set as the value of ' "the 'policy_file' configuration option in the " '[oslo_policy] section of your cinder.conf file.') else: # They have configured a custom policy file. It is OK if it does # not exist, but we should check and warn about it while we're # checking. if not policy_file.startswith('/'): # policy_file is relative to config_dir policy_file = os.path.join(os.path.dirname(config_dir), policy_file) if not self._file_exists(policy_file): return uc.Result( WARNING, "Configured policy file '%s' does not exist. This may be " "expected, but default policies will be used until any " "desired overrides are added to the configured file." % policy_file) return uc.Result(SUCCESS) def _check_periodic_interval(self) -> uc.Result: """Checks for non-default use of periodic_interval. Some new configuration options have been introduced to supplement periodic_interval, which was being used for multiple, possibly conflicting purposes. If a non-default value for periodic_interval is configured, warn the operator to review whether one of the new options is better suited for the periodic task(s) being tuned. """ periodic_interval = CONF.periodic_interval if periodic_interval != 60: return uc.Result( WARNING, "Detected non-default value for the 'periodic_interval' " "option. New configuration options have been introduced to " "replace the use of 'periodic_interval' for some purposes. " "Please consult the 'Upgrade' section of the Train release " "notes for more information.") return uc.Result(SUCCESS) def _check_nested_quota(self) -> uc.Result: """Checks for the use of the nested quota driver. The NestedDbQuotaDriver is deprecated in the Train release and is removed in Wallaby release to prepare for upcoming unified limits changes. """ # We import here to avoid conf loading order issues with cinder.service # above. import cinder.quota # noqa quota_driver = CONF.quota_driver if quota_driver == 'cinder.quota.NestedDbQuotaDriver': return uc.Result( FAILURE, 'The NestedDbQuotaDriver was deprecated in Train release ' 'and is removed in Wallaby release.') return uc.Result(SUCCESS) def _check_legacy_windows_config(self) -> uc.Result: """Checks to ensure that the Windows driver path is properly updated. The WindowsDriver was renamed in the Queens release to WindowsISCSIDriver to avoid confusion with the SMB driver. The backwards compatibility for this has now been removed, so any cinder.conf settings still using cinder.volume.drivers.windows.windows.WindowsDriver must now be updated to use cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver. """ for volume_driver in _get_enabled_drivers(): if (volume_driver == "cinder.volume.drivers.windows.windows.WindowsDriver"): return uc.Result( FAILURE, 'Setting volume_driver to ' 'cinder.volume.drivers.windows.windows.WindowsDriver ' 'is no longer supported. Please update to use ' 'cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver ' 'in cinder.conf.') return uc.Result(SUCCESS) def _check_removed_drivers(self) -> uc.Result: """Checks to ensure that no removed drivers are configured. Checks start with drivers removed in the Stein release. """ removed_drivers = [] for volume_driver in _get_enabled_drivers(): for removed_driver in REMOVED_DRVRS: if removed_driver in volume_driver: removed_drivers.append(volume_driver) if removed_drivers: if len(removed_drivers) > 1: return uc.Result( FAILURE, 'The following drivers, which no longer exist, were found ' 'configured in your cinder.conf file:\n%s.\n' 'These drivers have been removed and all data should ' 'be migrated off of the associated backends before ' 'upgrading Cinder.' % ",\n".join(removed_drivers)) else: return uc.Result( FAILURE, 'Found driver %s configured in your cinder.conf file. ' 'This driver has been removed and all data should ' 'be migrated off of this backend before upgrading ' 'Cinder.' % removed_drivers[0]) return uc.Result(SUCCESS) def _check_service_uuid(self) -> uc.Result: try: db.service_get_by_uuid(self.context, None) except exception.ServiceNotFound: volumes = db.volume_get_all(self.context, limit=1, filters={'service_uuid': None}) if not volumes: return uc.Result(SUCCESS) return uc.Result( FAILURE, 'Services and volumes must have a service UUID. Please fix this ' 'issue by running Queens online data migrations.') def _check_attachment_specs(self): if db.attachment_specs_exist(self.context): return uc.Result( FAILURE, 'There should be no more AttachmentSpecs in the system. ' 'Please fix this issue by running Queens online data ' 'migrations.') return uc.Result(SUCCESS) _upgrade_checks = ( # added in Stein ('Backup Driver Path', _check_backup_module), ('Use of Policy File', _check_policy_file), ('Windows Driver Path', _check_legacy_windows_config), ('Removed Drivers', _check_removed_drivers), # added in Train ('Periodic Interval Use', _check_periodic_interval), ('Service UUIDs', _check_service_uuid), ('Attachment specs', _check_attachment_specs), # added in Wallaby ('Use of Nested Quota Driver', _check_nested_quota), ) def main(): # TODO(rosmaita): need to do this because we suggest using the # --config-dir option, and if the user gives a bogus value, we # get a stacktrace. Needs to be fixed in oslo_upgradecheck try: return uc.main(CONF, 'cinder', Checks()) except cfg.ConfigDirNotFoundError: return ('ERROR: cannot read the cinder configuration directory.\n' 'Please re-run using the --config-dir option ' 'with a valid cinder configuration directory.') if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/cmd/volume.py0000664000175000017500000001710700000000000016765 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Cinder Volume.""" import logging as python_logging import os import re import shlex import sys import eventlet import eventlet.tpool # Monkey patching must go before the oslo.log import, otherwise # oslo.context will not use greenthread thread local and all greenthreads # will share the same context. if os.name == 'nt': # eventlet monkey patching the os module causes subprocess.Popen to fail # on Windows when using pipes due to missing non-blocking IO support. eventlet.monkey_patch(os=False) else: eventlet.monkey_patch() # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading # pylint: disable=E0401 import threading # noqa orig_threading.current_thread.__globals__['_active'] = \ threading._active # type: ignore import typing import os_brick from oslo_config import cfg from oslo_log import log as logging from oslo_privsep import priv_context from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts # Need to register global_opts from cinder.common import config # noqa from cinder.common import constants from cinder.db import api as session from cinder import exception from cinder import i18n i18n.enable_lazy() from cinder.i18n import _ from cinder import objects from cinder import service from cinder import utils from cinder import version if typing.TYPE_CHECKING: import oslo_service CONF = cfg.CONF host_opt = cfg.StrOpt('backend_host', help='Backend override of host value.') CONF.register_cli_opt(host_opt) backend_name_opt = cfg.StrOpt( 'backend_name', help='NOTE: For Windows internal use only. The name of the backend to be ' 'managed by this process. It must be one of the backends defined ' 'using the "enabled_backends" option. Note that normally, this ' 'should not be used directly. Cinder uses it internally in order to ' 'spawn subprocesses on Windows.') CONF.register_cli_opt(backend_name_opt) cluster_opt = cfg.StrOpt('cluster', default=None, help='Name of this cluster. Used to group volume ' 'hosts that share the same backend ' 'configurations to work in HA Active-Active ' 'mode.') CONF.register_opt(cluster_opt) LOG = None service_started = False def _launch_service(launcher: 'oslo_service.ProcessLauncher', backend: str) -> None: CONF.register_opt(host_opt, group=backend) backend_host = getattr(CONF, backend).backend_host host = "%s@%s" % (backend_host or CONF.host, backend) # We also want to set cluster to None on empty strings, and we # ignore leading and trailing spaces. cluster = CONF.cluster and CONF.cluster.strip() cluster = (cluster or None) and '%s@%s' % (cluster, backend) try: server = service.Service.create(host=host, service_name=backend, binary=constants.VOLUME_BINARY, coordination=True, cluster=cluster) except Exception: assert LOG is not None LOG.exception('Volume service %s failed to start.', host) else: # Dispose of the whole DB connection pool here before # starting another process. Otherwise we run into cases where # child processes share DB connections which results in errors. session.dispose_engine() launcher.launch_service(server) _notify_service_started() def _ensure_service_started() -> None: if not service_started: assert LOG is not None LOG.error('No volume service(s) started successfully, terminating.') sys.exit(1) def _notify_service_started() -> None: global service_started service_started = True def _launch_services_win32() -> None: if CONF.backend_name and CONF.backend_name not in CONF.enabled_backends: msg = _('The explicitly passed backend name "%(backend_name)s" is not ' 'among the enabled backends: %(enabled_backends)s.') raise exception.InvalidInput( reason=msg % dict(backend_name=CONF.backend_name, enabled_backends=CONF.enabled_backends)) # We'll avoid spawning a subprocess if a single backend is requested. single_backend_name = (CONF.enabled_backends[0] if len(CONF.enabled_backends) == 1 else CONF.backend_name) if single_backend_name: launcher = service.get_launcher() _launch_service(launcher, single_backend_name) elif CONF.enabled_backends: # We're using the 'backend_name' argument, requesting a certain backend # and constructing the service object within the child process. launcher = service.WindowsProcessLauncher() py_script_re = re.compile(r'.*\.py\w?$') backend: str for backend in filter(None, CONF.enabled_backends): cmd = sys.argv + ['--backend_name=%s' % backend] # Recent setuptools versions will trim '-script.py' and '.exe' # extensions from sys.argv[0]. if py_script_re.match(sys.argv[0]): cmd = [sys.executable] + cmd launcher.add_process(cmd) _notify_service_started() _ensure_service_started() launcher.wait() def _launch_services_posix() -> None: launcher = service.get_launcher() backend: str for backend in filter(None, CONF.enabled_backends): _launch_service(launcher, backend) _ensure_service_started() launcher.wait() def main() -> None: objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) os_brick.setup(CONF) global LOG LOG = logging.getLogger(__name__) if not CONF.enabled_backends: LOG.error('Configuration for cinder-volume does not specify ' '"enabled_backends". Using DEFAULT section to configure ' 'drivers is not supported since Ocata.') sys.exit(1) if os.name == 'nt': # We cannot use oslo.service to spawn multiple services on Windows. # It relies on forking, which is not available on Windows. # Furthermore, service objects are unmarshallable objects that are # passed to subprocesses. _launch_services_win32() else: _launch_services_posix() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/cmd/volume_usage_audit.py0000664000175000017500000002355500000000000021343 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cron script to generate usage notifications for volumes existing during the audit period. Together with the notifications generated by volumes create/delete/resize, over that time period, this allows an external system consuming usage notification feeds to calculate volume usage for each tenant. Time periods are specified as 'hour', 'month', 'day' or 'year' - `hour` - previous hour. If run at 9:07am, will generate usage for 8-9am. - `month` - previous month. If the script is run April 1, it will generate usages for March 1 through March 31. - `day` - previous day. if run on July 4th, it generates usages for July 3rd. - `year` - previous year. If run on Jan 1, it generates usages for Jan 1 through Dec 31 of the previous year. """ import datetime import sys import iso8601 from oslo_config import cfg from oslo_log import log as logging from cinder import i18n # noqa i18n.enable_lazy() from cinder import context from cinder.i18n import _ from cinder import objects from cinder import rpc from cinder import utils from cinder import version import cinder.volume.volume_utils CONF = cfg.CONF script_opts = [ cfg.StrOpt('start_time', help="If this option is specified then the start time " "specified is used instead of the start time of the " "last completed audit period."), cfg.StrOpt('end_time', help="If this option is specified then the end time " "specified is used instead of the end time of the " "last completed audit period."), cfg.BoolOpt('send_actions', default=False, help="Send the volume and snapshot create and delete " "notifications generated in the specified period."), ] CONF.register_cli_opts(script_opts) def _time_error(LOG, begin, end): if CONF.start_time: begin = datetime.datetime.strptime(CONF.start_time, "%Y-%m-%d %H:%M:%S") if CONF.end_time: end = datetime.datetime.strptime(CONF.end_time, "%Y-%m-%d %H:%M:%S") begin = begin.replace(tzinfo=iso8601.UTC) end = end.replace(tzinfo=iso8601.UTC) if end <= begin: msg = _("The end time (%(end)s) must be after the start " "time (%(start)s).") % {'start': begin, 'end': end} LOG.error(msg) sys.exit(-1) return begin, end def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context): """volume_ref notify usage""" try: LOG.debug("Send exists notification for " "<%(extra_info)s>", {'volume_id': volume_ref.id, 'project_id': volume_ref.project_id, 'extra_info': extra_info}) cinder.volume.volume_utils.notify_about_volume_usage( admin_context, volume_ref, 'exists', extra_usage_info=extra_info) except Exception as exc_msg: LOG.error("Exists volume notification failed: %s", exc_msg, resource=volume_ref) def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context): """snapshot_ref notify usage""" try: LOG.debug("Send notification for " " <%(extra_info)s>", {'snapshot_id': snapshot_ref.id, 'project_id': snapshot_ref.project_id, 'extra_info': extra_info}) cinder.volume.volume_utils.notify_about_snapshot_usage( admin_context, snapshot_ref, 'exists', extra_info) except Exception as exc_msg: LOG.error("Exists snapshot notification failed: %s", exc_msg, resource=snapshot_ref) def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context): """backup_ref notify usage""" try: cinder.volume.volume_utils.notify_about_backup_usage( admin_context, backup_ref, 'exists', extra_info) LOG.debug("Sent notification for " " <%(extra_info)s>", {'backup_id': backup_ref.id, 'project_id': backup_ref.project_id, 'extra_info': extra_info}) except Exception as exc_msg: LOG.error("Exists backups notification failed: %s", exc_msg) def _create_action(obj_ref, admin_context, LOG, notify_about_usage, type_id_str, type_name): try: local_extra_info = { 'audit_period_beginning': str(obj_ref.created_at), 'audit_period_ending': str(obj_ref.created_at), } LOG.debug("Send create notification for <%(type_id_str)s: %(_id)s> " " <%(extra_info)s>", {'type_id_str': type_id_str, '_id': obj_ref.id, 'project_id': obj_ref.project_id, 'extra_info': local_extra_info}) notify_about_usage(admin_context, obj_ref, 'create.start', extra_usage_info=local_extra_info) notify_about_usage(admin_context, obj_ref, 'create.end', extra_usage_info=local_extra_info) except Exception as exc_msg: LOG.error("Create %(type)s notification failed: %(exc_msg)s", {'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref) def _delete_action(obj_ref, admin_context, LOG, notify_about_usage, type_id_str, type_name): try: local_extra_info = { 'audit_period_beginning': str(obj_ref.deleted_at), 'audit_period_ending': str(obj_ref.deleted_at), } LOG.debug("Send delete notification for <%(type_id_str)s: %(_id)s> " " <%(extra_info)s>", {'type_id_str': type_id_str, '_id': obj_ref.id, 'project_id': obj_ref.project_id, 'extra_info': local_extra_info}) notify_about_usage(admin_context, obj_ref, 'delete.start', extra_usage_info=local_extra_info) notify_about_usage(admin_context, obj_ref, 'delete.end', extra_usage_info=local_extra_info) except Exception as exc_msg: LOG.error("Delete %(type)s notification failed: %(exc_msg)s", {'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref) def _obj_ref_action(_notify_usage, LOG, obj_ref, extra_info, admin_context, begin, end, notify_about_usage, type_id_str, type_name): _notify_usage(LOG, obj_ref, extra_info, admin_context) if CONF.send_actions: if begin < obj_ref.created_at < end: _create_action(obj_ref, admin_context, LOG, notify_about_usage, type_id_str, type_name) if obj_ref.deleted_at and begin < obj_ref.deleted_at < end: _delete_action(obj_ref, admin_context, LOG, notify_about_usage, type_id_str, type_name) def main(): objects.register_all() admin_context = context.get_admin_context() CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") LOG = logging.getLogger("cinder") rpc.init(CONF) begin, end = utils.last_completed_audit_period() begin, end = _time_error(LOG, begin, end) LOG.info("Starting volume usage audit") LOG.info("Creating usages for %(begin_period)s until %(end_period)s", {"begin_period": begin, "end_period": end}) extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } volumes = objects.VolumeList.get_all_active_by_window(admin_context, begin, end) LOG.info("Found %d volumes", len(volumes)) for volume_ref in volumes: _obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info, admin_context, begin, end, cinder.volume.volume_utils.notify_about_volume_usage, "volume_id", "volume") snapshots = objects.SnapshotList.get_all_active_by_window(admin_context, begin, end) LOG.info("Found %d snapshots", len(snapshots)) for snapshot_ref in snapshots: _obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info, admin_context, begin, end, cinder.volume.volume_utils.notify_about_snapshot_usage, "snapshot_id", "snapshot") backups = objects.BackupList.get_all_active_by_window(admin_context, begin, end) LOG.info("Found %d backups", len(backups)) for backup_ref in backups: _obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info, admin_context, begin, end, cinder.volume.volume_utils.notify_about_backup_usage, "backup_id", "backup") LOG.info("Volume usage audit completed") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0671182 cinder-27.0.0/cinder/common/0000775000175000017500000000000000000000000015623 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/common/__init__.py0000664000175000017500000000000000000000000017722 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/common/config.py0000664000175000017500000002533200000000000017447 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # Copyright 2013 NTT corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command-line flag library. Emulates gflags by wrapping cfg.ConfigOpts. The idea is to move fully to cfg eventually, and this wrapper is a stepping stone. """ import socket from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import cors from oslo_policy import opts as policy_opts from oslo_utils import netutils CONF = cfg.CONF logging.register_options(CONF) core_opts = [ cfg.StrOpt('state_path', default='/var/lib/cinder', help="Top-level directory for maintaining cinder's state"), ] CONF.register_cli_opts(core_opts) api_opts = [ cfg.BoolOpt('api_rate_limit', default=True, help='Enables or disables rate limit of the API.'), cfg.StrOpt('group_api_class', default='cinder.group.api.API', help='The full class name of the group API class'), cfg.ListOpt('osapi_volume_ext_list', default=[], help='Specify list of extensions to load when using osapi_' 'volume_extension option with cinder.api.contrib.' 'select_extensions'), cfg.MultiStrOpt('osapi_volume_extension', default=['cinder.api.contrib.standard_extensions'], help='osapi volume extension to load'), cfg.StrOpt('volume_api_class', default='cinder.volume.api.API', help='The full class name of the volume API class to use'), ] global_opts = [ cfg.HostAddressOpt('my_ip', sample_default='', default=netutils.get_my_ipv4(), help='IP address of this host'), cfg.StrOpt('volume_manager', default='cinder.volume.manager.VolumeManager', help='Full class name for the Manager for volume'), cfg.StrOpt('scheduler_manager', default='cinder.scheduler.manager.SchedulerManager', help='Full class name for the Manager for scheduler'), cfg.StrOpt('host', sample_default='localhost', default=socket.gethostname(), help='Name of this node. This can be an opaque ' 'identifier. It is not necessarily a host name, ' 'FQDN, or IP address.'), # NOTE(vish): default to nova for compatibility with nova installs cfg.StrOpt('storage_availability_zone', default='nova', help='Availability zone of this node. Can be overridden per ' 'volume backend with the option ' '"backend_availability_zone".'), cfg.StrOpt('default_availability_zone', help='Default availability zone for new volumes. If not set, ' 'the storage_availability_zone option value is used as ' 'the default for new volumes.'), cfg.BoolOpt('allow_availability_zone_fallback', default=False, help='If the requested Cinder availability zone is ' 'unavailable, fall back to the value of ' 'default_availability_zone, then ' 'storage_availability_zone, instead of failing.'), cfg.StrOpt('default_volume_type', default='__DEFAULT__', required=True, help='Default volume type to use'), cfg.StrOpt('default_group_type', help='Default group type to use'), cfg.StrOpt('volume_usage_audit_period', default='month', help='Time period for which to generate volume usages. ' 'The options are hour, day, month, or year.'), cfg.StrOpt('rootwrap_config', default='/etc/cinder/rootwrap.conf', help='Path to the rootwrap configuration file to use for ' 'running commands as root'), cfg.BoolOpt('monkey_patch', default=False, help='Enable monkey patching'), cfg.ListOpt('monkey_patch_modules', default=[], help='List of modules/decorators to monkey patch'), cfg.IntOpt('service_down_time', default=60, help='Maximum time since last check-in for a service to be ' 'considered up'), cfg.ListOpt('enabled_backends', help='A list of backend names to use. These backend names ' 'should be backed by a unique [CONFIG] group ' 'with its options'), cfg.BoolOpt('no_snapshot_gb_quota', default=False, help="Whether snapshots sizes count against global and per " "volume type gigabyte quotas. By default snapshots' " "sizes are counted."), cfg.StrOpt('transfer_api_class', default='cinder.transfer.api.API', help='The full class name of the volume transfer API class'), cfg.StrOpt('consistencygroup_api_class', default='cinder.consistencygroup.api.API', help='The full class name of the consistencygroup API class'), cfg.BoolOpt('split_loggers', default=False, help='Log requests to multiple loggers.') ] auth_opts = [ cfg.StrOpt('auth_strategy', default='keystone', choices=[('noauth', 'Do not perform authentication'), ('noauth_include_project_id', 'Do not perform authentication, and include a' ' project_id in API URLs'), ('keystone', 'Authenticate using keystone')], help='The strategy to use for auth. Supports noauth,' ' noauth_include_project_id or keystone.'), ] backup_opts = [ cfg.StrOpt('backup_api_class', default='cinder.backup.api.API', help='The full class name of the volume backup API class'), cfg.StrOpt('backup_manager', default='cinder.backup.manager.BackupManager', help='Full class name for the Manager for volume backup'), ] image_opts = [ cfg.ListOpt('glance_api_servers', default=None, help='A list of the URLs of glance API servers available to ' 'cinder ([http[s]://][hostname|ip]:port). If protocol ' 'is not specified it defaults to http.'), cfg.IntOpt('glance_num_retries', min=0, default=3, help='Number retries when downloading an image from glance'), cfg.BoolOpt('glance_api_insecure', default=False, help='Allow to perform insecure SSL (https) requests to ' 'glance (https will be used but cert validation will ' 'not be performed).'), cfg.BoolOpt('glance_api_ssl_compression', default=False, help='Enables or disables negotiation of SSL layer ' 'compression. In some cases disabling compression ' 'can improve data throughput, such as when high ' 'network bandwidth is available and you use ' 'compressed image formats like qcow2.'), cfg.StrOpt('glance_ca_certificates_file', help='Location of ca certificates file to use for glance ' 'client requests.'), cfg.StrOpt('glance_certfile', help='Location of certificate file to use for glance ' 'client requests.'), cfg.StrOpt('glance_keyfile', help='Location of certificate key file to use for glance ' 'client requests.'), cfg.IntOpt('glance_request_timeout', help='http/https timeout value for glance operations. If no ' 'value (None) is supplied here, the glanceclient default ' 'value is used.'), ] compression_opts = [ cfg.StrOpt('compression_format', default='gzip', choices=[('gzip', 'GNUzip format')], help='Image compression format on image upload'), cfg.BoolOpt('allow_compression_on_image_upload', default=False, help='The strategy to use for image compression on upload. ' 'Default is disallow compression.'), ] CONF.register_opts(api_opts) CONF.register_opts(core_opts) CONF.register_opts(auth_opts) CONF.register_opts(backup_opts) CONF.register_opts(image_opts) CONF.register_opts(global_opts) CONF.register_opts(compression_opts) def set_middleware_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID', 'X-Trace-Info', 'X-Trace-HMAC', 'OpenStack-API-Version'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID', 'OpenStack-API-Version'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH', 'HEAD'] ) def set_external_library_defaults(): """Set default configuration options for external openstack libraries.""" # This function is required so that our settings will override the defaults # set by the libraries when the Cinder config files are generated. This # function is declared as an entry point for oslo.config.opts.defaults in # setup.cfg. set_middleware_defaults() policy_opts.set_defaults(CONF, enforce_scope=False, enforce_new_defaults=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/common/constants.py0000664000175000017500000000367500000000000020224 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # The maximum value a signed INT type may have DB_MAX_INT = 0x7FFFFFFF # The cinder services binaries and topics' names API_BINARY = "cinder-api" SCHEDULER_BINARY = "cinder-scheduler" VOLUME_BINARY = "cinder-volume" BACKUP_BINARY = "cinder-backup" SCHEDULER_TOPIC = SCHEDULER_BINARY VOLUME_TOPIC = VOLUME_BINARY BACKUP_TOPIC = BACKUP_BINARY LOG_BINARIES = (SCHEDULER_BINARY, VOLUME_BINARY, BACKUP_BINARY, API_BINARY) # The encryption key ID used by the legacy fixed-key ConfKeyMgr FIXED_KEY_ID = '00000000-0000-0000-0000-000000000000' # Storage protocol constants CEPH = 'ceph' DRBD = 'DRBD' FC = 'FC' FC_VARIANT_1 = 'fibre_channel' FC_VARIANT_2 = 'fc' FILE = 'file' ISCSI = 'iSCSI' ISCSI_VARIANT = 'iscsi' ISER = 'iSER' LIGHTOS = 'lightos' NFS = 'NFS' NFS_VARIANT = 'nfs' NVMEOF = 'NVMe-oF' NVMEOF_VARIANT_1 = 'NVMeOF' NVMEOF_VARIANT_2 = 'nvmeof' NVMEOF_ROCE = 'NVMe-RoCE' NVMEOF_FC = 'NVMe-FC' NVMEOF_TCP = 'NVMe-TCP' SCALEIO = 'scaleio' SCSI = 'SCSI' STORPOOL = 'storpool' VMDK = 'vmdk' VSTORAGE = 'vstorageobject' # These must be strings, because there are places that check specific type ISCSI_VARIANTS = [ISCSI, ISCSI_VARIANT] FC_VARIANTS = [FC, FC_VARIANT_1, FC_VARIANT_2] NFS_VARIANTS = [NFS, NFS_VARIANT] NVMEOF_VARIANTS = [NVMEOF, NVMEOF_VARIANT_1, NVMEOF_VARIANT_2] CACHEABLE_PROTOCOLS = FC_VARIANTS + ISCSI_VARIANTS + NVMEOF_VARIANTS ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/common/sqlalchemyutils.py0000664000175000017500000001535200000000000021426 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2010-2011 OpenStack Foundation # Copyright 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of paginate query.""" import datetime from oslo_log import log as logging import sqlalchemy import sqlalchemy.sql as sa_sql from sqlalchemy.sql import type_api from cinder.db import api from cinder import exception from cinder.i18n import _ LOG = logging.getLogger(__name__) _TYPE_SCHEMA = { 'datetime': datetime.datetime(1900, 1, 1), 'big_integer': 0, 'integer': 0, 'string': '', 'boolean': False, } def _get_default_column_value(model, column_name): """Return the default value of the columns from DB table. In postgreDB case, if no right default values are being set, an psycopg2.DataError will be thrown. """ attr = getattr(model, column_name) # Return the default value directly if the model contains. Otherwise return # a default value which is not None. if attr.default and isinstance(attr.default, type_api.TypeEngine): return attr.default.arg attr_type = attr.type return _TYPE_SCHEMA[attr_type.__visit_name__] # TODO(wangxiyuan): Use oslo_db.sqlalchemy.utils.paginate_query once it is # stable and afforded by the minimum version in requirement.txt. # copied from glance/db/sqlalchemy/api.py def paginate_query(query, model, limit, sort_keys, marker=None, sort_dir=None, sort_dirs=None, offset=None): """Returns a query with sorting / pagination criteria added. Pagination works by requiring a unique sort_key, specified by sort_keys. (If sort_keys is not unique, then we risk looping through values.) We use the last row in the previous page as the 'marker' for pagination. So we must return values that follow the passed marker in the order. With a single-valued sort_key, this would be easy: sort_key > X. With a compound-values sort_key, (k1, k2, k3) we must do this to repeat the lexicographical ordering: (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) We also have to cope with different sort_directions. Typically, the id of the last row is used as the client-facing pagination marker, then the actual marker object must be fetched from the db and passed in to us as marker. :param query: the query object to which we should add paging/sorting :param model: the ORM model class :param limit: maximum number of items to return :param sort_keys: array of attributes by which results should be sorted :param marker: the last item of the previous page; we returns the next results after this value. :param sort_dir: direction in which results should be sorted (asc, desc) :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys :param offset: the number of items to skip from the marker or from the first element. :rtype: sqlalchemy.orm.query.Query :return: The query with sorting/pagination added. """ if 'id' not in sort_keys: # TODO(justinsb): If this ever gives a false-positive, check # the actual primary key, rather than assuming its id LOG.warning('Id not in sort_keys; is sort_keys unique?') if sort_dir and sort_dirs: raise AssertionError('Both sort_dir and sort_dirs specified.') # Default the sort direction to ascending if sort_dirs is None and sort_dir is None: sort_dir = 'asc' # Ensure a per-column sort direction if sort_dirs is None: sort_dirs = [sort_dir for _sort_key in sort_keys] if len(sort_dirs) != len(sort_keys): raise AssertionError( 'sort_dirs length is not equal to sort_keys length.') # Add sorting for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): sort_dir_func = { 'asc': sqlalchemy.asc, 'desc': sqlalchemy.desc, }[current_sort_dir] try: sort_key_attr = getattr(model, current_sort_key) except AttributeError: raise exception.InvalidInput(reason='Invalid sort key') if not api.is_orm_value(sort_key_attr): raise exception.InvalidInput(reason='Invalid sort key') query = query.order_by(sort_dir_func(sort_key_attr)) # Add pagination if marker is not None: marker_values = [] for sort_key in sort_keys: v = getattr(marker, sort_key) if v is None: v = _get_default_column_value(model, sort_key) marker_values.append(v) # Build up an array of sort criteria as in the docstring criteria_list = [] for i in range(0, len(sort_keys)): crit_attrs = [] for j in range(0, i): model_attr = getattr(model, sort_keys[j]) default = _get_default_column_value(model, sort_keys[j]) attr = sa_sql.expression.case( *[(model_attr.isnot(None), model_attr)], else_=default, ) crit_attrs.append((attr == marker_values[j])) model_attr = getattr(model, sort_keys[i]) default = _get_default_column_value(model, sort_keys[i]) attr = sa_sql.expression.case( *[(model_attr.isnot(None), model_attr)], else_=default, ) if isinstance(model_attr.type, sqlalchemy.Boolean): marker_values[i] = int(marker_values[i]) if sort_dirs[i] == 'desc': crit_attrs.append((attr < marker_values[i])) elif sort_dirs[i] == 'asc': crit_attrs.append((attr > marker_values[i])) else: raise ValueError(_("Unknown sort direction, " "must be 'desc' or 'asc'")) criteria = sqlalchemy.sql.and_(*crit_attrs) criteria_list.append(criteria) f = sqlalchemy.sql.or_(*criteria_list) query = query.filter(f) if limit is not None: query = query.limit(limit) if offset: query = query.offset(offset) return query ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.071118 cinder-27.0.0/cinder/compute/0000775000175000017500000000000000000000000016007 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/compute/__init__.py0000664000175000017500000000207200000000000020121 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import importutils compute_opts = [ cfg.StrOpt('compute_api_class', default='cinder.compute.nova.API', help='The full class name of the ' 'compute API class to use'), ] CONF = cfg.CONF CONF.register_opts(compute_opts) def API(): compute_api_class = CONF.compute_api_class cls = importutils.import_class(compute_api_class) return cls() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/compute/nova.py0000664000175000017500000002230700000000000017330 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests to Nova. """ from keystoneauth1 import exceptions as ks_exc from keystoneauth1 import identity from keystoneauth1 import loading as ks_loading from novaclient import api_versions from novaclient import client as nova_client from novaclient import exceptions as nova_exceptions from oslo_config import cfg from oslo_log import log as logging from requests import exceptions as request_exceptions from cinder.db import base from cinder import exception from cinder.message import api as message_api from cinder.message import message_field from cinder import service_auth nova_opts = [ cfg.StrOpt('region_name', help='Name of nova region to use. Useful if keystone manages ' 'more than one region.'), cfg.StrOpt('interface', default='public', choices=['public', 'admin', 'internal'], help='Type of the nova endpoint to use. This endpoint will ' 'be looked up in the keystone catalog and should be ' 'one of public, internal or admin.'), cfg.StrOpt('token_auth_url', help='The authentication URL for the nova connection when ' 'using the current user''s token'), ] NOVA_GROUP = 'nova' CONF = cfg.CONF nova_session_opts = ks_loading.get_session_conf_options() nova_auth_opts = ks_loading.get_auth_common_conf_options() CONF.register_opts(nova_opts, group=NOVA_GROUP) CONF.register_opts(nova_session_opts, group=NOVA_GROUP) CONF.register_opts(nova_auth_opts, group=NOVA_GROUP) LOG = logging.getLogger(__name__) NOVA_API_VERSION = "2.1" nova_extensions = [ext for ext in nova_client.discover_extensions(NOVA_API_VERSION) if ext.name in ("assisted_volume_snapshots", "list_extensions", "server_external_events")] def _get_identity_endpoint_from_sc(context): # Search for the identity endpoint in the service catalog for service in context.service_catalog: if service.get('type') != 'identity': continue for endpoint in service['endpoints']: if (not CONF[NOVA_GROUP].region_name or endpoint.get('region') == CONF[NOVA_GROUP].region_name): return endpoint.get(CONF[NOVA_GROUP].interface + 'URL') raise ks_exc.EndpointNotFound() def novaclient(context, privileged_user=False, timeout=None, api_version=None): """Returns a Nova client @param privileged_user: If True, use the account from configuration (requires 'auth_type' and the other usual Keystone authentication options to be set in the [nova] section) @param timeout: Number of seconds to wait for an answer before raising a Timeout exception (None to disable) @param api_version: api version of nova """ if privileged_user and CONF[NOVA_GROUP].auth_type: LOG.debug('Creating Keystone auth plugin from conf') n_auth = ks_loading.load_auth_from_conf_options(CONF, NOVA_GROUP) else: if CONF[NOVA_GROUP].token_auth_url: url = CONF[NOVA_GROUP].token_auth_url else: url = _get_identity_endpoint_from_sc(context) LOG.debug('Creating Keystone token plugin using URL: %s', url) n_auth = identity.Token(auth_url=url, token=context.auth_token, project_name=context.project_name, project_domain_id=context.project_domain_id) if CONF.auth_strategy == 'keystone': n_auth = service_auth.get_auth_plugin(context, auth=n_auth) keystone_session = ks_loading.load_session_from_conf_options( CONF, NOVA_GROUP, auth=n_auth) c = nova_client.Client( api_versions.APIVersion(api_version or NOVA_API_VERSION), session=keystone_session, insecure=CONF[NOVA_GROUP].insecure, timeout=timeout, region_name=CONF[NOVA_GROUP].region_name, endpoint_type=CONF[NOVA_GROUP].interface, cacert=CONF[NOVA_GROUP].cafile, global_request_id=context.global_id, extensions=nova_extensions) return c class API(base.Base): """API for interacting with novaclient.""" NotFound = nova_exceptions.NotFound def __init__(self): self.message_api = message_api.API() super().__init__() def _get_volume_extended_event(self, server_id, volume_id): return {'name': 'volume-extended', 'server_uuid': server_id, 'tag': volume_id} def _get_volume_reimaged_event(self, server_id, volume_id): return {'name': 'volume-reimaged', 'server_uuid': server_id, 'tag': volume_id} def _send_events(self, context, events, api_version=None): nova = novaclient(context, privileged_user=True, api_version=api_version) try: response = nova.server_external_events.create(events) except nova_exceptions.NotFound: LOG.warning('Nova returned NotFound for events: %s.', events) return False except Exception: LOG.exception('Failed to notify nova on events: %s.', events) return False else: if not isinstance(response, list): LOG.error('Error response returned from nova: %s.', response) return False response_error = False for event in response: code = event.get('code') if code is None: response_error = True continue if code != 200: LOG.warning( 'Nova event: %s returned with failed status.', event) else: LOG.info('Nova event response: %s.', event) if response_error: LOG.error('Error response returned from nova: %s.', response) return False return True def update_server_volume(self, context, server_id, src_volid, new_volume_id): nova = novaclient(context, privileged_user=True) nova.volumes.update_server_volume(server_id, src_volid, new_volume_id) def create_volume_snapshot(self, context, volume_id, create_info): nova = novaclient(context, privileged_user=True) # pylint: disable=E1101 nova.assisted_volume_snapshots.create( volume_id, create_info=create_info) def delete_volume_snapshot(self, context, snapshot_id, delete_info): nova = novaclient(context, privileged_user=True) # pylint: disable=E1101 nova.assisted_volume_snapshots.delete( snapshot_id, delete_info=delete_info) def get_server(self, context, server_id, privileged_user=False, timeout=None): try: return novaclient(context, privileged_user=privileged_user, timeout=timeout).servers.get(server_id) except nova_exceptions.NotFound: raise exception.ServerNotFound(uuid=server_id) except request_exceptions.Timeout: raise exception.APITimeout(service='Nova') def extend_volume(self, context, server_ids, volume_id): api_version = '2.51' events = [self._get_volume_extended_event(server_id, volume_id) for server_id in server_ids] result = self._send_events(context, events, api_version=api_version) if not result: self.message_api.create( context, message_field.Action.EXTEND_VOLUME, resource_uuid=volume_id, detail=message_field.Detail.NOTIFY_COMPUTE_SERVICE_FAILED) return result def reimage_volume(self, context, server_ids, volume_id): api_version = '2.93' events = [self._get_volume_reimaged_event(server_id, volume_id) for server_id in server_ids] result = self._send_events(context, events, api_version=api_version) if not result: self.message_api.create( context, message_field.Action.REIMAGE_VOLUME, resource_uuid=volume_id, detail=message_field.Detail.REIMAGE_VOLUME_FAILED) return result @staticmethod def get_server_volume(context, server_id, volume_id): # Use microversion that includes attachment_id nova = novaclient(context, api_version='2.89') return nova.volumes.get_server_volume(server_id, volume_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/context.py0000664000175000017500000003057100000000000016377 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RequestContext: context for requests that persist through all of cinder.""" import copy from typing import Any, Optional from keystoneauth1.access import service_catalog as ksa_service_catalog from keystoneauth1 import plugin from oslo_config import cfg from oslo_context import context from oslo_db.sqlalchemy import enginefacade from oslo_log import log as logging from oslo_utils import timeutils from cinder import exception from cinder.i18n import _ from cinder.objects import base as objects_base from cinder import policy context_opts = [ cfg.StrOpt('cinder_internal_tenant_project_id', help='ID of the project which will be used as the Cinder ' 'internal tenant.'), cfg.StrOpt('cinder_internal_tenant_user_id', help='ID of the user to be used in volume operations as the ' 'Cinder internal tenant.'), ] CONF = cfg.CONF CONF.register_opts(context_opts) LOG = logging.getLogger(__name__) class _ContextAuthPlugin(plugin.BaseAuthPlugin): """A keystoneauth auth plugin that uses the values from the Context. Ideally we would use the plugin provided by auth_token middleware however this plugin isn't serialized yet so we construct one from the serialized auth data. """ def __init__(self, auth_token, sc): super(_ContextAuthPlugin, self).__init__() self.auth_token = auth_token self.service_catalog = ksa_service_catalog.ServiceCatalogV2(sc) def get_token(self, *args, **kwargs): return self.auth_token def get_endpoint(self, session, service_type=None, interface=None, region_name=None, service_name=None, **kwargs): return self.service_catalog.url_for(service_type=service_type, service_name=service_name, interface=interface, region_name=region_name) @enginefacade.transaction_context_provider class RequestContext(context.RequestContext): """Security context and request information. Represents the user taking a given action within the system. """ def __init__(self, user_id: Optional[str] = None, project_id: Optional[str] = None, is_admin: Optional[bool] = None, read_deleted: Optional[str] = "no", project_name: Optional[str] = None, remote_address: Optional[str] = None, timestamp=None, quota_class=None, service_catalog: Optional[dict] = None, user_auth_plugin=None, message_resource_id = None, message_resource_type = None, message_action = None, **kwargs): """Initialize RequestContext. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. """ # NOTE(smcginnis): To keep it compatible for code using positional # args, explicityly set user_id and project_id in kwargs. kwargs.setdefault('user_id', user_id) kwargs.setdefault('project_id', project_id) super(RequestContext, self).__init__(is_admin=is_admin, **kwargs) self.project_name = project_name self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() elif isinstance(timestamp, str): timestamp = timeutils.parse_isotime(timestamp) self.timestamp = timestamp self.quota_class = quota_class self.message_resource_id = message_resource_id self.message_resource_type = message_resource_type self.message_action = message_action if service_catalog: # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog if s.get('type') in ('identity', 'compute', 'object-store', 'image', 'key-manager')] else: # if list is empty or none self.service_catalog = [] # We need to have RequestContext attributes defined # when policy.check_is_admin invokes request logging # to make it loggable. self.is_admin: Optional[bool] if self.is_admin is None: self.is_admin = policy.check_is_admin(self) elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.user_auth_plugin = user_auth_plugin def get_auth_plugin(self): if self.user_auth_plugin: return self.user_auth_plugin else: return _ContextAuthPlugin(self.auth_token, self.service_catalog) def _get_read_deleted(self) -> str: return self._read_deleted def _set_read_deleted(self, read_deleted: str) -> None: if read_deleted not in ('no', 'yes', 'only'): raise ValueError(_("read_deleted can only be one of 'no', " "'yes' or 'only', not %r") % read_deleted) self._read_deleted = read_deleted def _del_read_deleted(self) -> None: del self._read_deleted read_deleted = property(_get_read_deleted, _set_read_deleted, _del_read_deleted) def to_dict(self) -> dict[str, Any]: result = super(RequestContext, self).to_dict() result['user_id'] = self.user_id result['project_id'] = self.project_id result['project_name'] = self.project_name result['domain_id'] = self.domain_id result['read_deleted'] = self.read_deleted result['remote_address'] = self.remote_address result['timestamp'] = self.timestamp.isoformat() result['quota_class'] = self.quota_class result['service_catalog'] = self.service_catalog result['request_id'] = self.request_id result['message_resource_id'] = self.message_resource_id result['message_resource_type'] = self.message_resource_type result['message_action'] = self.message_action return result @classmethod def from_dict(cls, values: dict) -> 'RequestContext': return cls(user_id=values.get('user_id'), project_id=values.get('project_id'), project_name=values.get('project_name'), domain_id=values.get('domain_id'), read_deleted=values.get('read_deleted', 'no'), remote_address=values.get('remote_address'), timestamp=values.get('timestamp'), quota_class=values.get('quota_class'), service_catalog=values.get('service_catalog'), request_id=values.get('request_id'), global_request_id=values.get('global_request_id'), is_admin=values.get('is_admin'), roles=values.get('roles'), auth_token=values.get('auth_token'), user_domain_id=values.get('user_domain_id'), project_domain_id=values.get('project_domain_id'), message_resource_id = values.get('message_resource_id'), message_resource_type = values.get('message_resource_type'), message_action = values.get('message_action') ) def authorize(self, action: str, target: Optional[dict] = None, target_obj: Optional[dict] = None, fatal: bool = True): """Verify that the given action is valid on the target in this context. :param action: string representing the action to be checked. :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}``. If None, then this default target will be considered: {'project_id': self.project_id, 'user_id': self.user_id} :param target_obj: dictionary representing the object which will be used to update target. :param fatal: if False, will return False when an exception.PolicyNotAuthorized occurs. :raises cinder.exception.NotAuthorized: if verification fails and fatal is True. :return: returns a non-False value (not necessarily "True") if authorized and False if not authorized and fatal is False. """ if target is None: target = {'project_id': self.project_id, 'user_id': self.user_id} if isinstance(target_obj, objects_base.CinderObject): # Turn object into dict so target.update can work target.update( target_obj.obj_to_primitive()['versioned_object.data'] or {}) # Ensure 'project_id' and 'user_id' attributes are captured. # Some objects (e.g. attachments) have a project_id attribute # that isn't present in the dict. The try/except wrappers avoid # lazy-load issues when the attribute doesn't exist. try: target['project_id'] = target_obj.project_id except Exception: pass try: target['user_id'] = target_obj.user_id except Exception: pass else: target.update(target_obj or {}) return policy.authorize(self, action, target, do_raise=fatal, exc=exception.PolicyNotAuthorized) def to_policy_values(self) -> dict: policy = super(RequestContext, self).to_policy_values() policy['is_admin'] = self.is_admin return policy def elevated(self, read_deleted: Optional[str] = None, overwrite: bool = False) -> 'RequestContext': """Return a version of this context with admin flag set.""" context = self.deepcopy() context.is_admin = True if 'admin' not in context.roles: context.roles.append('admin') if read_deleted is not None: context.read_deleted = read_deleted return context def deepcopy(self) -> 'RequestContext': return copy.deepcopy(self) def get_admin_context(read_deleted: Optional[str] = "no") -> RequestContext: return RequestContext(user_id=None, project_id=None, is_admin=True, read_deleted=read_deleted, overwrite=False) def get_internal_tenant_context() -> Optional[RequestContext]: """Build and return the Cinder internal tenant context object This request context will only work for internal Cinder operations. It will not be able to make requests to remote services. To do so it will need to use the keystone client to get an auth_token. """ project_id = CONF.cinder_internal_tenant_project_id user_id = CONF.cinder_internal_tenant_user_id if project_id and user_id: return RequestContext(user_id=user_id, project_id=project_id, is_admin=True, overwrite=False) else: LOG.warning('Unable to get internal tenant context: Missing ' 'required config parameters.') return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/coordination.py0000664000175000017500000002006200000000000017375 0ustar00zuulzuul00000000000000# Copyright 2015 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Coordination and locking utilities.""" import errno import glob import inspect import os import re import sys from typing import Callable, Optional import uuid import decorator from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from tooz import coordination from cinder import exception from cinder.i18n import _ from cinder import utils LOG = log.getLogger(__name__) coordination_opts = [ cfg.StrOpt('backend_url', secret=True, default='file://$state_path', help='The backend URL to use for distributed coordination.'), ] CONF = cfg.CONF CONF.register_opts(coordination_opts, group='coordination') class Coordinator(object): """Tooz coordination wrapper. Coordination member id is created from concatenated `prefix` and `agent_id` parameters. :param str agent_id: Agent identifier :param str prefix: Used to provide member identifier with a meaningful prefix. """ def __init__(self, agent_id: Optional[str] = None, prefix: str = ''): self.coordinator = None self.agent_id = agent_id or str(uuid.uuid4()) self.started = False self.prefix = prefix self._file_path = None def _get_file_path(self, backend_url): if backend_url.startswith('file://'): path = backend_url[7:] # Copied from TooZ's _normalize_path to get the same path they use if sys.platform == 'win32': path = re.sub(r'\\(?=\w:\\)', '', os.path.normpath(path)) return os.path.abspath(os.path.join(path, self.prefix)) return None def start(self) -> None: if self.started: return backend_url = cfg.CONF.coordination.backend_url # NOTE(bluex): Tooz expects member_id as a byte string. member_id = (self.prefix + self.agent_id).encode('ascii') self.coordinator = coordination.get_coordinator(backend_url, member_id) assert self.coordinator is not None self.coordinator.start(start_heart=True) self._file_path = self._get_file_path(backend_url) self.started = True def stop(self) -> None: """Disconnect from coordination backend and stop heartbeat.""" if self.started: if self.coordinator is not None: self.coordinator.stop() self.coordinator = None self.started = False def get_lock(self, name: str): """Return a Tooz backend lock. :param str name: The lock name that is used to identify it across all nodes. """ # NOTE(bluex): Tooz expects lock name as a byte string. lock_name = (self.prefix + name).encode('ascii') if self.coordinator is not None: return self.coordinator.get_lock(lock_name) else: raise exception.LockCreationFailed(_('Coordinator uninitialized.')) def remove_lock(self, glob_name): # Most locks clean up on release, but not the file lock, so we manually # clean them. def _err(file_name: str, exc: Exception) -> None: LOG.warning('Failed to cleanup lock %(name)s: %(exc)s', {'name': file_name, 'exc': exc}) if self._file_path: files = glob.glob(self._file_path + glob_name) for file_name in files: try: os.remove(file_name) except OSError as exc: if (exc.errno != errno.ENOENT): _err(file_name, exc) except Exception as exc: _err(file_name, exc) COORDINATOR = Coordinator(prefix='cinder-') def synchronized_remove(glob_name, coordinator=COORDINATOR): coordinator.remove_lock(glob_name) def __acquire(lock, blocking, f_name): """Acquire a lock and return the time when it was acquired.""" t1 = timeutils.now() name = utils.convert_str(lock.name) LOG.debug('Acquiring lock "%s" by "%s"', name, f_name) lock.acquire(blocking) t2 = timeutils.now() LOG.debug('Lock "%s" acquired by "%s" :: waited %0.3fs', name, f_name, t2 - t1) return t2 def __release(lock, acquired_time, f_name): """Release a lock ignoring exceptions.""" name = utils.convert_str(lock.name) try: lock.release() held = timeutils.now() - acquired_time LOG.debug('Lock "%s" released by "%s" :: held %0.3fs', name, f_name, held) except Exception as e: LOG.error('Failed to release lock "%s": %s', name, e) def synchronized(*lock_names: str, blocking: bool = True, coordinator: Coordinator = COORDINATOR) -> Callable: """Synchronization decorator. :param str lock_names: Arbitrary number of Lock names. :param blocking: If True, blocks until the lock is acquired. If False, raises exception when not acquired. Otherwise, the value is used as a timeout value and if lock is not acquired after this number of seconds exception is raised. This is a keyword only argument. :param coordinator: Coordinator class to use when creating lock. Defaults to the global coordinator. This is a keyword only argument. :raises tooz.coordination.LockAcquireFailed: if lock is not acquired Decorating a method like so:: @synchronized('mylock') def foo(self, *args): ... ensures that only one process will execute the foo method at a time. Different methods can share the same lock:: @synchronized('mylock') def foo(self, *args): ... @synchronized('mylock') def bar(self, *args): ... This way only one of either foo or bar can be executing at a time. Lock name can be formatted using Python format string syntax:: @synchronized('{f_name}-{vol.id}-{snap[name]}') def foo(self, vol, snap): ... Multiple locks can be requested simultaneously and the decorator will reorder the names by rendered lock names to prevent potential deadlocks. @synchronized('{f_name}-{vol.id}-{snap[name]}', '{f_name}-{vol.id}.delete') def foo(self, vol, snap): ... Available field names are: decorated function parameters and `f_name` as a decorated function name. """ @decorator.decorator def _synchronized(f, *a, **k) -> Callable: call_args = inspect.getcallargs(f, *a, **k) call_args['f_name'] = f.__name__ # Prevent deadlocks not duplicating and sorting them by name to always # acquire them in the same order. names = sorted(set([name.format(**call_args) for name in lock_names])) locks = [coordinator.get_lock(name) for name in names] acquired_times = [] f_name = f.__name__ t1 = timeutils.now() try: if len(locks) > 1: # Don't pollute logs for single locks LOG.debug('Acquiring %s locks by %s', len(locks), f_name) for lock in locks: acquired_times.append(__acquire(lock, blocking, f_name)) if len(locks) > 1: t = timeutils.now() - t1 LOG.debug('Acquired %s locks by %s in %0.3fs', len(locks), f_name, t) return f(*a, **k) finally: for lock, acquired_time in zip(locks, acquired_times): __release(lock, acquired_time, f_name) return _synchronized ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.071118 cinder-27.0.0/cinder/db/0000775000175000017500000000000000000000000014720 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/__init__.py0000664000175000017500000000144200000000000017032 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ DB abstraction for Cinder """ from cinder.db.api import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/alembic.ini0000664000175000017500000000400600000000000017015 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # timezone to use when rendering the date # within the migration file as well as the filename. # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = # max length of characters to apply to the # "slug" field # truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; this defaults # to cinder/db/sqlalchemy/migrations/versions. When using multiple version # directories, initial revisions must be specified with --version-path # version_locations = %(here)s/bar %(here)s/bat cinder/db/sqlalchemy/migrations/versions # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 sqlalchemy.url = sqlite:///cinder.db [post_write_hooks] # post_write_hooks defines scripts or Python functions that are run # on newly generated revision scripts. See the documentation for further # detail and examples # format using "black" - use the console_scripts runner, against the "black" entrypoint # hooks=black # black.type=console_scripts # black.entrypoint=black # black.options=-l 79 # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/api.py0000664000175000017500000021312100000000000016043 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. Functions in this module are imported into the cinder.db namespace. Call these functions from cinder.db namespace, not the cinder.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/cinder/cinder.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import options as db_options from cinder.api import common from cinder.common import constants from cinder.i18n import _ db_opts = [ cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create'), cfg.StrOpt('volume_name_template', default='volume-%s', help='Template string to be used to generate volume names'), cfg.StrOpt('snapshot_name_template', default='snapshot-%s', help='Template string to be used to generate snapshot names'), ] backup_opts = [ cfg.StrOpt('backup_name_template', default='backup-%s', help='Template string to be used to generate backup names'), ] CONF = cfg.CONF CONF.register_opts(db_opts) CONF.register_opts(backup_opts) db_options.set_defaults(CONF) _BACKEND_MAPPING = {'sqlalchemy': 'cinder.db.sqlalchemy.api'} IMPL = oslo_db_api.DBAPI.from_config(conf=CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) # The maximum value a signed INT type may have MAX_INT = constants.DB_MAX_INT ################### def dispose_engine(): """Force the engine to establish new connections.""" # FIXME(jdg): When using sqlite if we do the dispose # we seem to lose our DB here. Adding this check # means we don't do the dispose, but we keep our sqlite DB # This likely isn't the best way to handle this if 'sqlite' not in IMPL.get_engine().name: return IMPL.dispose_engine() else: return ################### class Condition(object): """Class for normal condition values for conditional_update.""" def __init__(self, value, field=None): self.value = value # Field is optional and can be passed when getting the filter self.field = field def get_filter(self, model, field=None): return IMPL.condition_db_filter( model, self._get_field(field), self.value, ) def _get_field(self, field=None): # We must have a defined field on initialization or when called field = field or self.field if not field: raise ValueError(_('Condition has no field.')) return field class Not(Condition): """Class for negated condition values for conditional_update. By default NULL values will be treated like Python treats None instead of how SQL treats it. So for example when values are (1, 2) it will evaluate to True when we have value 3 or NULL, instead of only with 3 like SQL does. """ def __init__(self, value, field=None, auto_none=True): super(Not, self).__init__(value, field) self.auto_none = auto_none def get_filter(self, model, field=None): # If implementation has a specific method use it if hasattr(IMPL, 'condition_not_db_filter'): return IMPL.condition_not_db_filter(model, self._get_field(field), self.value, self.auto_none) # Otherwise non negated object must adming ~ operator for not return ~super(Not, self).get_filter(model, field) class Case(object): """Class for conditional value selection for conditional_update.""" def __init__(self, whens, value=None, else_=None): self.whens = whens self.value = value self.else_ = else_ ################### def resource_exists(context, model, resource_id): return IMPL.resource_exists(context, model, resource_id) def get_model_for_versioned_object(versioned_object): return IMPL.get_model_for_versioned_object(versioned_object) def get_by_id(context, model, id, *args, **kwargs): return IMPL.get_by_id(context, model, id, *args, **kwargs) ################### def is_orm_value(obj): """Check if object is an ORM field.""" return IMPL.is_orm_value(obj) def conditional_update( context, model, values, expected_values, filters=None, include_deleted='no', project_only=False, order=None, ): """Compare-and-swap conditional update. Update will only occur in the DB if conditions are met. We have 4 different condition types we can use in expected_values: - Equality: {'status': 'available'} - Inequality: {'status': vol_obj.Not('deleting')} - In range: {'status': ['available', 'error'] - Not in range: {'status': vol_obj.Not(['in-use', 'attaching']) Method accepts additional filters, which are basically anything that can be passed to a sqlalchemy query's filter method, for example: .. code-block:: python [~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)] We can select values based on conditions using Case objects in the 'values' argument. For example: .. code-block:: python has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id ) case_values = db.Case( [(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot' ) db.conditional_update( context, models.Volume, {'status': case_values}, {'status': 'available'}, ) And we can use DB fields for example to store previous status in the corresponding field even though we don't know which value is in the db from those we allowed: .. code-block:: python db.conditional_update( context, models.Volume, {'status': 'deleting', 'previous_status': models.Volume.status}, {'status': ('available', 'error')}, ) :param values: Dictionary of key-values to update in the DB. :param expected_values: Dictionary of conditions that must be met for the update to be executed. :param filters: Iterable with additional filters. :param include_deleted: Should the update include deleted items, this is equivalent to read_deleted. :param project_only: Should the query be limited to context's project. :param order: Specific order of fields in which to update the values :returns: Boolean indicating whether db rows were updated. """ return IMPL.conditional_update( context, model, values, expected_values, filters, include_deleted, project_only, order, ) ################### def service_destroy(context, service_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, service_id) def service_get(context, service_id=None, backend_match_level=None, **filters): """Get a service that matches the criteria. A possible filter is is_up=True and it will filter nodes that are down. :param service_id: Id of the service. :param filters: Filters for the query in the form of key/value. :param backend_match_level: 'pool', 'backend', or 'host' for host and cluster filters (as defined in _filter_host method) :raise ServiceNotFound: If service doesn't exist. """ return IMPL.service_get(context, service_id, backend_match_level, **filters) def service_get_all(context, backend_match_level=None, **filters): """Get all services that match the criteria. A possible filter is is_up=True and it will filter nodes that are down, as well as host_or_cluster, that lets you look for services using both of these properties. :param filters: Filters for the query in the form of key/value arguments. :param backend_match_level: 'pool', 'backend', or 'host' for host and cluster filters (as defined in _filter_host method) """ return IMPL.service_get_all(context, backend_match_level, **filters) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values, retry=True): """Set the given properties on an service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values, retry) def service_get_by_uuid(context, service_uuid): """Get a service by it's uuid. Return Service ref or raise if it does not exist. """ return IMPL.service_get_by_uuid(context, service_uuid) ############### def is_backend_frozen(context, host, cluster_name): """Check if a storage backend is frozen based on host and cluster_name.""" return IMPL.is_backend_frozen(context, host, cluster_name) ############### def cluster_get(context, id=None, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, **filters): """Get a cluster that matches the criteria. :param id: Id of the cluster. :param is_up: Boolean value to filter based on the cluster's up status. :param get_services: If we want to load all services from this cluster. :param services_summary: If we want to load num_hosts and num_down_hosts fields. :param read_deleted: Filtering based on delete status. Default value is "no". :param name_match_level: 'pool', 'backend', or 'host' for name filter (as defined in _filter_host method) :param filters: Field based filters in the form of key/value. :raise ClusterNotFound: If cluster doesn't exist. """ return IMPL.cluster_get(context, id, is_up, get_services, services_summary, read_deleted, name_match_level, **filters) def cluster_get_all(context, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, **filters): """Get all clusters that match the criteria. :param is_up: Boolean value to filter based on the cluster's up status. :param get_services: If we want to load all services from this cluster. :param services_summary: If we want to load num_hosts and num_down_hosts fields. :param read_deleted: Filtering based on delete status. Default value is "no". :param name_match_level: 'pool', 'backend', or 'host' for name filter (as defined in _filter_host method) :param filters: Field based filters in the form of key/value. """ return IMPL.cluster_get_all(context, is_up, get_services, services_summary, read_deleted, name_match_level, **filters) def cluster_create(context, values): """Create a cluster from the values dictionary.""" return IMPL.cluster_create(context, values) def cluster_update(context, cluster_id, values): """Set the given properties on an cluster and update it. Raises ClusterNotFound if cluster does not exist. """ return IMPL.cluster_update(context, cluster_id, values) def cluster_destroy(context, cluster_id): """Destroy the cluster or raise if it does not exist or has hosts. :raise ClusterNotFound: If cluster doesn't exist. """ return IMPL.cluster_destroy(context, cluster_id) ############### def volume_attach(context, values): """Attach a volume.""" return IMPL.volume_attach(context, values) def volume_attached( context, attachment_id, instance_uuid, host_name, mountpoint, attach_mode='rw', mark_attached=True, ): """Ensure that a volume is set as attached.""" return IMPL.volume_attached( context, attachment_id, instance_uuid, host_name, mountpoint, attach_mode, mark_attached, ) def volume_create(context, values): """Create a volume from the values dictionary.""" return IMPL.volume_create(context, values) def volume_data_get_for_host(context, host, count_only=False): """Get (volume_count, gigabytes) for project.""" return IMPL.volume_data_get_for_host(context, host, count_only) def volume_data_get_for_project(context, project_id, host=None): """Get (volume_count, gigabytes) for project.""" return IMPL.volume_data_get_for_project(context, project_id, host=host) def volume_destroy(context, volume_id): """Destroy the volume or raise if it does not exist.""" return IMPL.volume_destroy(context, volume_id) def volume_detached(context, volume_id, attachment_id): """Ensure that a volume is set as detached.""" return IMPL.volume_detached(context, volume_id, attachment_id) def volume_get(context, volume_id): """Get a volume or raise if it does not exist.""" return IMPL.volume_get(context, volume_id) def volume_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Get all volumes.""" return IMPL.volume_get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) def calculate_resource_count(context, resource_type, filters): return IMPL.calculate_resource_count(context, resource_type, filters) def volume_get_all_by_host(context, host, filters=None): """Get all volumes belonging to a host.""" return IMPL.volume_get_all_by_host(context, host, filters=filters) def volume_update_all_by_service(context): """Update all volumes associated with an old service.""" return IMPL.volume_update_all_by_service(context) def volume_get_all_by_group(context, group_id, filters=None): """Get all volumes belonging to a consistency group.""" return IMPL.volume_get_all_by_group(context, group_id, filters=filters) def volume_get_all_by_generic_group(context, group_id, filters=None): """Get all volumes belonging to a generic volume group.""" return IMPL.volume_get_all_by_generic_group(context, group_id, filters=filters) def volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Get all volumes belonging to a project.""" return IMPL.volume_get_all_by_project(context, project_id, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) def get_volume_summary(context, project_only, filters=None): """Get volume summary.""" return IMPL.get_volume_summary(context, project_only, filters) def volume_update(context, volume_id, values): """Set the given properties on a volume and update it. Raises NotFound if volume does not exist. """ return IMPL.volume_update(context, volume_id, values) def volumes_update(context, values_list): """Set the given properties on a list of volumes and update them. Raises NotFound if a volume does not exist. """ return IMPL.volumes_update(context, values_list) def volume_include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all volumes matching the filters into a cluster. When partial_rename is set we will not set the cluster_name with cluster parameter value directly, we'll replace provided cluster_name or host filter value with cluster instead. This is useful when we want to replace just the cluster name but leave the backend and pool information as it is. If we are using cluster_name to filter, we'll use that same DB field to replace the cluster value and leave the rest as it is. Likewise if we use the host to filter. Returns the number of volumes that have been changed. """ return IMPL.volume_include_in_cluster(context, cluster, partial_rename, **filters) def volume_attachment_update(context, attachment_id, values): return IMPL.volume_attachment_update(context, attachment_id, values) def volume_attachment_get(context, attachment_id): return IMPL.volume_attachment_get(context, attachment_id) def volume_attachment_get_all_by_volume_id(context, volume_id): return IMPL.volume_attachment_get_all_by_volume_id(context, volume_id) def volume_attachment_get_all_by_host(context, host, filters=None): # FIXME(jdg): Not using filters return IMPL.volume_attachment_get_all_by_host(context, host) def volume_attachment_get_all_by_instance_uuid(context, instance_uuid, filters=None): # FIXME(jdg): Not using filters return IMPL.volume_attachment_get_all_by_instance_uuid(context, instance_uuid) def volume_attachment_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return IMPL.volume_attachment_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) def volume_attachment_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return IMPL.volume_attachment_get_all_by_project(context, project_id, filters, marker, limit, offset, sort_keys, sort_dirs) def attachment_destroy(context, attachment_id): """Destroy the attachment or raise if it does not exist.""" return IMPL.attachment_destroy(context, attachment_id) def volume_update_status_based_on_attachment(context, volume_id): """Update volume status according to attached instance id""" return IMPL.volume_update_status_based_on_attachment(context, volume_id) def volume_has_snapshots_filter(): return IMPL.volume_has_snapshots_filter() def volume_has_undeletable_snapshots_filter(): return IMPL.volume_has_undeletable_snapshots_filter() def volume_has_snapshots_in_a_cgsnapshot_filter(): return IMPL.volume_has_snapshots_in_a_cgsnapshot_filter() def volume_has_attachments_filter(): return IMPL.volume_has_attachments_filter() def volume_qos_allows_retype(new_vol_type): return IMPL.volume_qos_allows_retype(new_vol_type) def volume_has_other_project_snp_filter(): return IMPL.volume_has_other_project_snp_filter() #################### def snapshot_create(context, values): """Create a snapshot from the values dictionary.""" return IMPL.snapshot_create(context, values) def snapshot_destroy(context, snapshot_id): """Destroy the snapshot or raise if it does not exist.""" return IMPL.snapshot_destroy(context, snapshot_id) def snapshot_get(context, snapshot_id): """Get a snapshot or raise if it does not exist.""" return IMPL.snapshot_get(context, snapshot_id) def snapshot_get_all(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): """Get all snapshots.""" return IMPL.snapshot_get_all(context, filters, marker, limit, sort_keys, sort_dirs, offset) def snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): """Get all snapshots belonging to a project.""" return IMPL.snapshot_get_all_by_project(context, project_id, filters, marker, limit, sort_keys, sort_dirs, offset) def snapshot_get_all_by_host(context, host, filters=None): """Get all snapshots belonging to a host. :param host: Include include snapshots only for specified host. :param filters: Filters for the query in the form of key/value. """ return IMPL.snapshot_get_all_by_host(context, host, filters) def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id): """Get all snapshots belonging to a cgsnapshot.""" return IMPL.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id) def snapshot_get_all_for_group_snapshot(context, group_snapshot_id): """Get all snapshots belonging to a group snapshot.""" return IMPL.snapshot_get_all_for_group_snapshot(context, group_snapshot_id) def snapshot_get_all_for_volume(context, volume_id): """Get all snapshots for a volume.""" return IMPL.snapshot_get_all_for_volume(context, volume_id) def snapshot_get_latest_for_volume(context, volume_id): """Get latest snapshot for a volume""" return IMPL.snapshot_get_latest_for_volume(context, volume_id) def snapshot_update(context, snapshot_id, values): """Set the given properties on an snapshot and update it. Raises NotFound if snapshot does not exist. """ return IMPL.snapshot_update(context, snapshot_id, values) def snapshot_data_get_for_project(context, project_id, volume_type_id=None, host=None): """Get count and gigabytes used for snapshots for specified project.""" return IMPL.snapshot_data_get_for_project(context, project_id, volume_type_id, host=host) def snapshot_get_all_active_by_window(context, begin, end=None, project_id=None): """Get all the snapshots inside the window. Specifying a project_id will filter for a certain project. """ return IMPL.snapshot_get_all_active_by_window(context, begin, end, project_id) def get_snapshot_summary(context, project_only, filters=None): """Get snapshot summary.""" return IMPL.get_snapshot_summary(context, project_only, filters) #################### def snapshot_metadata_get(context, snapshot_id): """Get all metadata for a snapshot.""" return IMPL.snapshot_metadata_get(context, snapshot_id) def snapshot_metadata_delete(context, snapshot_id, key): """Delete the given metadata item.""" return IMPL.snapshot_metadata_delete(context, snapshot_id, key) def snapshot_metadata_update(context, snapshot_id, metadata, delete): """Update metadata if it exists, otherwise create it.""" return IMPL.snapshot_metadata_update(context, snapshot_id, metadata, delete) #################### def volume_metadata_get(context, volume_id): """Get all metadata for a volume.""" return IMPL.volume_metadata_get(context, volume_id) def volume_metadata_delete(context, volume_id, key, meta_type=common.METADATA_TYPES.user): """Delete the given metadata item.""" return IMPL.volume_metadata_delete(context, volume_id, key, meta_type) def volume_metadata_update(context, volume_id, metadata, delete, meta_type=common.METADATA_TYPES.user): """Update metadata if it exists, otherwise create it.""" return IMPL.volume_metadata_update(context, volume_id, metadata, delete, meta_type) ################## def volume_admin_metadata_get(context, volume_id): """Get all administration metadata for a volume.""" return IMPL.volume_admin_metadata_get(context, volume_id) def volume_admin_metadata_delete(context, volume_id, key): """Delete the given metadata item.""" return IMPL.volume_admin_metadata_delete(context, volume_id, key) def volume_admin_metadata_update(context, volume_id, metadata, delete, add=True, update=True): """Update metadata if it exists, otherwise create it.""" return IMPL.volume_admin_metadata_update(context, volume_id, metadata, delete, add, update) ################## def volume_type_create(context, values, projects=None): """Create a new volume type.""" return IMPL.volume_type_create(context, values, projects) def volume_type_update(context, volume_type_id, values): return IMPL.volume_type_update(context, volume_type_id, values) def volume_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): """Get all volume types. :param context: context to query under :param inactive: Include inactive volume types to the result set :param filters: Filters for the query in the form of key/value. :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param list_result: For compatibility, if list_result = True, return a list instead of dict. :is_public: Filter volume types based on visibility: * **True**: List public volume types only * **False**: List private volume types only * **None**: List both public and private volume types :returns: list/dict of matching volume types """ return IMPL.volume_type_get_all(context, inactive, filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset, list_result=list_result) def volume_type_get(context, id, inactive=False, expected_fields=None): """Get volume type by id. :param context: context to query under :param id: Volume type id to get. :param inactive: Consider inactive volume types when searching :param expected_fields: Return those additional fields. Supported fields are: projects. :returns: volume type """ return IMPL.volume_type_get(context, id, inactive, expected_fields) def volume_type_get_by_name(context, name): """Get volume type by name.""" return IMPL.volume_type_get_by_name(context, name) def volume_types_get_by_name_or_id(context, volume_type_list): """Get volume types by name or id.""" return IMPL.volume_types_get_by_name_or_id(context, volume_type_list) def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): """Get volume types that are associated with specific qos specs.""" return IMPL.volume_type_qos_associations_get(context, qos_specs_id, inactive) def volume_type_qos_associate(context, type_id, qos_specs_id): """Associate a volume type with specific qos specs.""" return IMPL.volume_type_qos_associate(context, type_id, qos_specs_id) def volume_type_qos_disassociate(context, qos_specs_id, type_id): """Disassociate a volume type from specific qos specs.""" return IMPL.volume_type_qos_disassociate(context, qos_specs_id, type_id) def volume_type_qos_disassociate_all(context, qos_specs_id): """Disassociate all volume types from specific qos specs.""" return IMPL.volume_type_qos_disassociate_all(context, qos_specs_id) def volume_type_qos_specs_get(context, type_id): """Get all qos specs for given volume type.""" return IMPL.volume_type_qos_specs_get(context, type_id) def volume_type_destroy(context, type_id): """Delete a volume type.""" return IMPL.volume_type_destroy(context, type_id) def volume_get_all_active_by_window(context, begin, end=None, project_id=None): """Get all the volumes inside the window. Specifying a project_id will filter for a certain project. """ return IMPL.volume_get_all_active_by_window(context, begin, end, project_id) def volume_type_access_get_all(context, type_id): """Get all volume type access of a volume type.""" return IMPL.volume_type_access_get_all(context, type_id) def volume_type_access_add(context, type_id, project_id): """Add volume type access for project.""" return IMPL.volume_type_access_add(context, type_id, project_id) def volume_type_access_remove(context, type_id, project_id): """Remove volume type access for project.""" return IMPL.volume_type_access_remove(context, type_id, project_id) def project_default_volume_type_set(context, volume_type_id, project_id): """Set default volume type for a project""" return IMPL.project_default_volume_type_set(context, volume_type_id, project_id) def project_default_volume_type_get(context, project_id=None): """Get default volume type for a project""" return IMPL.project_default_volume_type_get(context, project_id) def project_default_volume_type_unset(context, project_id): """Unset default volume type for a project (hard delete)""" return IMPL.project_default_volume_type_unset(context, project_id) def get_all_projects_with_default_type(context, volume_type_id): """Get all the projects associated with a default type""" return IMPL.get_all_projects_with_default_type(context, volume_type_id) #################### def group_type_create(context, values, projects=None): """Create a new group type.""" return IMPL.group_type_create(context, values, projects) def group_type_update(context, group_type_id, values): return IMPL.group_type_update(context, group_type_id, values) def group_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): """Get all group types. :param context: context to query under :param inactive: Include inactive group types to the result set :param filters: Filters for the query in the form of key/value. :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param list_result: For compatibility, if list_result = True, return a list instead of dict. :is_public: Filter group types based on visibility: * **True**: List public group types only * **False**: List private group types only * **None**: List both public and private group types :returns: list/dict of matching group types """ return IMPL.group_type_get_all(context, inactive, filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset, list_result=list_result) def group_type_get(context, id, inactive=False, expected_fields=None): """Get group type by id. :param context: context to query under :param id: Group type id to get. :param inactive: Consider inactive group types when searching :param expected_fields: Return those additional fields. Supported fields are: projects. :returns: group type """ return IMPL.group_type_get(context, id, inactive, expected_fields) def group_type_get_by_name(context, name): """Get group type by name.""" return IMPL.group_type_get_by_name(context, name) def group_types_get_by_name_or_id(context, group_type_list): """Get group types by name or id.""" return IMPL.group_types_get_by_name_or_id(context, group_type_list) def group_type_destroy(context, type_id): """Delete a group type.""" return IMPL.group_type_destroy(context, type_id) def group_type_access_get_all(context, type_id): """Get all group type access of a group type.""" return IMPL.group_type_access_get_all(context, type_id) def group_type_access_add(context, type_id, project_id): """Add group type access for project.""" return IMPL.group_type_access_add(context, type_id, project_id) def group_type_access_remove(context, type_id, project_id): """Remove group type access for project.""" return IMPL.group_type_access_remove(context, type_id, project_id) def volume_type_get_all_by_group(context, group_id): """Get all volumes in a group.""" return IMPL.volume_type_get_all_by_group(context, group_id) #################### def volume_type_extra_specs_get(context, volume_type_id): """Get all extra specs for a volume type.""" return IMPL.volume_type_extra_specs_get(context, volume_type_id) def volume_type_extra_specs_delete(context, volume_type_id, key): """Delete the given extra specs item.""" return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key) def volume_type_extra_specs_update_or_create( context, volume_type_id, extra_specs, ): """Create or update volume type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument. """ return IMPL.volume_type_extra_specs_update_or_create( context, volume_type_id, extra_specs, ) ################### def group_type_specs_get(context, group_type_id): """Get all group specs for a group type.""" return IMPL.group_type_specs_get(context, group_type_id) def group_type_specs_delete(context, group_type_id, key): """Delete the given group specs item.""" return IMPL.group_type_specs_delete(context, group_type_id, key) def group_type_specs_update_or_create(context, group_type_id, group_specs): """Create or update group type specs. This adds or modifies the key/value pairs specified in the group specs dict argument. """ return IMPL.group_type_specs_update_or_create(context, group_type_id, group_specs) ################### def volume_type_encryption_get(context, volume_type_id): return IMPL.volume_type_encryption_get(context, volume_type_id) def volume_type_encryption_delete(context, volume_type_id): return IMPL.volume_type_encryption_delete(context, volume_type_id) def volume_type_encryption_create(context, volume_type_id, values): return IMPL.volume_type_encryption_create(context, volume_type_id, values) def volume_type_encryption_update(context, volume_type_id, values): return IMPL.volume_type_encryption_update(context, volume_type_id, values) def volume_type_encryption_volume_get(context, volume_type_id): return IMPL.volume_type_encryption_volume_get(context, volume_type_id) def volume_encryption_metadata_get(context, volume_id): return IMPL.volume_encryption_metadata_get(context, volume_id) ################### def qos_specs_create(context, values): """Create a qos_specs.""" return IMPL.qos_specs_create(context, values) def qos_specs_get(context, qos_specs_id, inactive=False): """Get all specification for a given qos_specs.""" return IMPL.qos_specs_get(context, qos_specs_id, inactive) def qos_specs_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all qos_specs.""" return IMPL.qos_specs_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def qos_specs_get_by_name(context, name, inactive=False): """Get all specification for a given qos_specs.""" return IMPL.qos_specs_get_by_name(context, name, inactive) def qos_specs_associations_get(context, qos_specs_id): """Get all associated volume types for a given qos_specs.""" return IMPL.qos_specs_associations_get(context, qos_specs_id) def qos_specs_associate(context, qos_specs_id, type_id): """Associate qos_specs from volume type.""" return IMPL.qos_specs_associate(context, qos_specs_id, type_id) def qos_specs_disassociate(context, qos_specs_id, type_id): """Disassociate qos_specs from volume type.""" return IMPL.qos_specs_disassociate(context, qos_specs_id, type_id) def qos_specs_disassociate_all(context, qos_specs_id): """Disassociate qos_specs from all entities.""" return IMPL.qos_specs_disassociate_all(context, qos_specs_id) def qos_specs_delete(context, qos_specs_id): """Delete the qos_specs.""" return IMPL.qos_specs_delete(context, qos_specs_id) def qos_specs_item_delete(context, qos_specs_id, key): """Delete specified key in the qos_specs.""" return IMPL.qos_specs_item_delete(context, qos_specs_id, key) def qos_specs_update(context, qos_specs_id, values): """Update qos specs. This adds or modifies the key/value pairs specified in the specs dict argument for a given qos_specs. """ return IMPL.qos_specs_update(context, qos_specs_id, values) ################### def volume_glance_metadata_create(context, volume_id, key, value): """Update the Glance metadata for the specified volume.""" return IMPL.volume_glance_metadata_create(context, volume_id, key, value) def volume_glance_metadata_bulk_create(context, volume_id, metadata): """Add Glance metadata for specified volume (multiple pairs).""" return IMPL.volume_glance_metadata_bulk_create(context, volume_id, metadata) def volume_glance_metadata_get_all(context): """Return the glance metadata for all volumes.""" return IMPL.volume_glance_metadata_get_all(context) def volume_glance_metadata_get(context, volume_id): """Return the glance metadata for a volume.""" return IMPL.volume_glance_metadata_get(context, volume_id) def volume_glance_metadata_list_get(context, volume_id_list): """Return the glance metadata for a volume list.""" return IMPL.volume_glance_metadata_list_get(context, volume_id_list) def volume_snapshot_glance_metadata_get(context, snapshot_id): """Return the Glance metadata for the specified snapshot.""" return IMPL.volume_snapshot_glance_metadata_get(context, snapshot_id) def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): """Update the Glance metadata for a snapshot. This will copy all of the key:value pairs from the originating volume, to ensure that a volume created from the snapshot will retain the original metadata. """ return IMPL.volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id) def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): """Update the Glance metadata from a volume (created from a snapshot). This will copy all of the key:value pairs from the originating snapshot, to ensure that the Glance metadata from the original volume is retained. """ return IMPL.volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id) def volume_glance_metadata_delete_by_volume(context, volume_id): """Delete the glance metadata for a volume.""" return IMPL.volume_glance_metadata_delete_by_volume(context, volume_id) def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): """Delete the glance metadata for a snapshot.""" return IMPL.volume_glance_metadata_delete_by_snapshot(context, snapshot_id) def volume_glance_metadata_copy_from_volume_to_volume(context, src_volume_id, volume_id): """Update the Glance metadata for a volume. Update the Glance metadata for a volume by copying all of the key:value pairs from the originating volume. This is so that a volume created from the volume (clone) will retain the original metadata. """ return IMPL.volume_glance_metadata_copy_from_volume_to_volume( context, src_volume_id, volume_id) ################### def quota_create(context, project_id, resource, limit): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit) def quota_get(context, project_id, resource): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_update(context, project_id, resource, limit): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit) def quota_update_resource(context, old_res, new_res): """Update resource of quotas.""" return IMPL.quota_update_resource(context, old_res, new_res) def quota_destroy(context, project_id, resource): """Destroy the quota or raise if it does not exist.""" return IMPL.quota_destroy(context, project_id, resource) ################### def quota_class_create(context, class_name, resource, limit): """Create a quota class for the given name and resource.""" return IMPL.quota_class_create(context, class_name, resource, limit) def quota_class_get(context, class_name, resource): """Retrieve a quota class or raise if it does not exist.""" return IMPL.quota_class_get(context, class_name, resource) def quota_class_get_defaults(context): """Retrieve all default quotas.""" return IMPL.quota_class_get_defaults(context) def quota_class_get_all_by_name(context, class_name): """Retrieve all quotas associated with a given quota class.""" return IMPL.quota_class_get_all_by_name(context, class_name) def quota_class_update(context, class_name, resource, limit): """Update a quota class or raise if it does not exist.""" return IMPL.quota_class_update(context, class_name, resource, limit) def quota_class_update_resource(context, old_res, new_res): """Update resource name in quota_class.""" return IMPL.quota_class_update_resource(context, old_res, new_res) def quota_class_destroy(context, class_name, resource): """Destroy the quota class or raise if it does not exist.""" return IMPL.quota_class_destroy(context, class_name, resource) def quota_class_destroy_all_by_name(context, class_name): """Destroy all quotas associated with a given quota class.""" return IMPL.quota_class_destroy_all_by_name(context, class_name) ################### def quota_usage_get(context, project_id, resource): """Retrieve a quota usage or raise if it does not exist.""" return IMPL.quota_usage_get(context, project_id, resource) def quota_usage_get_all_by_project(context, project_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project(context, project_id) ################### def quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=None): """Check quotas and create appropriate reservations.""" return IMPL.quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=project_id) def reservation_commit(context, reservations, project_id=None): """Commit quota reservations.""" return IMPL.reservation_commit(context, reservations, project_id=project_id) def reservation_rollback(context, reservations, project_id=None): """Roll back quota reservations.""" return IMPL.reservation_rollback(context, reservations, project_id=project_id) def quota_destroy_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_destroy_by_project(context, project_id) def reservation_expire(context): """Roll back any expired reservations.""" return IMPL.reservation_expire(context) def quota_usage_update_resource(context, old_res, new_res): """Update resource field in quota_usages.""" return IMPL.quota_usage_update_resource(context, old_res, new_res) ################### def backup_get(context, backup_id, read_deleted=None, project_only=True): """Get a backup or raise if it does not exist.""" return IMPL.backup_get(context, backup_id, read_deleted, project_only) def backup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all backups.""" return IMPL.backup_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def backup_get_all_by_host(context, host): """Get all backups belonging to a host.""" return IMPL.backup_get_all_by_host(context, host) def backup_create(context, values): """Create a backup from the values dictionary.""" return IMPL.backup_create(context, values) def backup_metadata_get(context, backup_id): return IMPL.backup_metadata_get(context, backup_id) def backup_metadata_update(context, backup_id, metadata, delete): return IMPL.backup_metadata_update(context, backup_id, metadata, delete) def backup_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all backups belonging to a project.""" return IMPL.backup_get_all_by_project(context, project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def backup_get_all_by_volume(context, volume_id, vol_project_id, filters=None): """Get all backups belonging to a volume.""" return IMPL.backup_get_all_by_volume(context, volume_id, vol_project_id, filters=filters) def backup_get_all_active_by_window(context, begin, end=None, project_id=None): """Get all the backups inside the window. Specifying a project_id will filter for a certain project. """ return IMPL.backup_get_all_active_by_window(context, begin, end, project_id) def backup_update(context, backup_id, values): """Set the given properties on a backup and update it. Raises NotFound if backup does not exist. """ return IMPL.backup_update(context, backup_id, values) def backup_destroy(context, backup_id): """Destroy the backup or raise if it does not exist.""" return IMPL.backup_destroy(context, backup_id) ################### def transfer_get(context, transfer_id): """Get a volume transfer record or raise if it does not exist.""" return IMPL.transfer_get(context, transfer_id) def transfer_get_all(context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Get all volume transfer records.""" return IMPL.transfer_get_all(context, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) def transfer_get_all_by_project(context, project_id, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Get all volume transfer records for specified project.""" return IMPL.transfer_get_all_by_project(context, project_id, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) def transfer_create(context, values): """Create an entry in the transfers table.""" return IMPL.transfer_create(context, values) def transfer_destroy(context, transfer_id): """Destroy a record in the volume transfer table.""" return IMPL.transfer_destroy(context, transfer_id) def transfer_accept(context, transfer_id, user_id, project_id, no_snapshots=False): """Accept a volume transfer.""" return IMPL.transfer_accept(context, transfer_id, user_id, project_id, no_snapshots=no_snapshots) ################### def consistencygroup_get(context, consistencygroup_id): """Get a consistencygroup or raise if it does not exist.""" return IMPL.consistencygroup_get(context, consistencygroup_id) def consistencygroup_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all consistencygroups.""" return IMPL.consistencygroup_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None): """Create a consistencygroup from the values dictionary.""" return IMPL.consistencygroup_create(context, values, cg_snap_id, cg_id) def consistencygroup_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all consistencygroups belonging to a project.""" return IMPL.consistencygroup_get_all_by_project(context, project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def consistencygroup_update(context, consistencygroup_id, values): """Set the given properties on a consistencygroup and update it. Raises NotFound if consistencygroup does not exist. """ return IMPL.consistencygroup_update(context, consistencygroup_id, values) def consistencygroup_destroy(context, consistencygroup_id): """Destroy the consistencygroup or raise if it does not exist.""" return IMPL.consistencygroup_destroy(context, consistencygroup_id) def cg_has_cgsnapshot_filter(): """Return a filter that checks if a CG has CG Snapshots.""" return IMPL.cg_has_cgsnapshot_filter() def cg_has_volumes_filter(attached_or_with_snapshots=False): """Return a filter to check if a CG has volumes. When attached_or_with_snapshots parameter is given a True value only attached volumes or those with snapshots will be considered. """ return IMPL.cg_has_volumes_filter(attached_or_with_snapshots) def cg_creating_from_src(cg_id=None, cgsnapshot_id=None): """Return a filter to check if a CG is being used as creation source. Returned filter is meant to be used in the Conditional Update mechanism and checks if provided CG ID or CG Snapshot ID is currently being used to create another CG. This filter will not include CGs that have used the ID but have already finished their creation (status is no longer creating). Filter uses a subquery that allows it to be used on updates to the consistencygroups table. """ return IMPL.cg_creating_from_src(cg_id, cgsnapshot_id) def consistencygroup_include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all consistency groups matching the filters into a cluster. When partial_rename is set we will not set the cluster_name with cluster parameter value directly, we'll replace provided cluster_name or host filter value with cluster instead. This is useful when we want to replace just the cluster name but leave the backend and pool information as it is. If we are using cluster_name to filter, we'll use that same DB field to replace the cluster value and leave the rest as it is. Likewise if we use the host to filter. Returns the number of consistency groups that have been changed. """ return IMPL.consistencygroup_include_in_cluster(context, cluster, partial_rename, **filters) def group_include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all generic groups matching the filters into a cluster. When partial_rename is set we will not set the cluster_name with cluster parameter value directly, we'll replace provided cluster_name or host filter value with cluster instead. This is useful when we want to replace just the cluster name but leave the backend and pool information as it is. If we are using cluster_name to filter, we'll use that same DB field to replace the cluster value and leave the rest as it is. Likewise if we use the host to filter. Returns the number of generic groups that have been changed. """ return IMPL.group_include_in_cluster(context, cluster, partial_rename, **filters) ################### def group_get(context, group_id): """Get a group or raise if it does not exist.""" return IMPL.group_get(context, group_id) def group_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all groups.""" return IMPL.group_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def group_create( context, values, group_snapshot_id=None, source_group_id=None, ): """Create a group from the values dictionary.""" return IMPL.group_create( context, values, group_snapshot_id, source_group_id, ) def group_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all groups belonging to a project.""" return IMPL.group_get_all_by_project(context, project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def group_update(context, group_id, values): """Set the given properties on a group and update it. Raises NotFound if group does not exist. """ return IMPL.group_update(context, group_id, values) def group_destroy(context, group_id): """Destroy the group or raise if it does not exist.""" return IMPL.group_destroy(context, group_id) def group_has_group_snapshot_filter(): """Return a filter that checks if a Group has Group Snapshots.""" return IMPL.group_has_group_snapshot_filter() def group_has_volumes_filter(attached_or_with_snapshots=False): """Return a filter to check if a Group has volumes. When attached_or_with_snapshots parameter is given a True value only attached volumes or those with snapshots will be considered. """ return IMPL.group_has_volumes_filter(attached_or_with_snapshots) def group_creating_from_src(group_id=None, group_snapshot_id=None): """Return a filter to check if a Group is being used as creation source. Returned filter is meant to be used in the Conditional Update mechanism and checks if provided Group ID or Group Snapshot ID is currently being used to create another Group. This filter will not include Groups that have used the ID but have already finished their creation (status is no longer creating). Filter uses a subquery that allows it to be used on updates to the groups table. """ return IMPL.group_creating_from_src(group_id, group_snapshot_id) def group_volume_type_mapping_create(context, group_id, volume_type_id): """Create a group volume_type mapping entry.""" return IMPL.group_volume_type_mapping_create(context, group_id, volume_type_id) ################### def cgsnapshot_get(context, cgsnapshot_id): """Get a cgsnapshot or raise if it does not exist.""" return IMPL.cgsnapshot_get(context, cgsnapshot_id) def cgsnapshot_get_all(context, filters=None): """Get all cgsnapshots.""" return IMPL.cgsnapshot_get_all(context, filters) def cgsnapshot_create(context, values): """Create a cgsnapshot from the values dictionary.""" return IMPL.cgsnapshot_create(context, values) def cgsnapshot_get_all_by_group(context, group_id, filters=None): """Get all cgsnapshots belonging to a consistency group.""" return IMPL.cgsnapshot_get_all_by_group(context, group_id, filters) def cgsnapshot_get_all_by_project(context, project_id, filters=None): """Get all cgsnapshots belonging to a project.""" return IMPL.cgsnapshot_get_all_by_project(context, project_id, filters) def cgsnapshot_update(context, cgsnapshot_id, values): """Set the given properties on a cgsnapshot and update it. Raises NotFound if cgsnapshot does not exist. """ return IMPL.cgsnapshot_update(context, cgsnapshot_id, values) def cgsnapshot_destroy(context, cgsnapshot_id): """Destroy the cgsnapshot or raise if it does not exist.""" return IMPL.cgsnapshot_destroy(context, cgsnapshot_id) def cgsnapshot_creating_from_src(): """Get a filter that checks if a CGSnapshot is being created from a CG.""" return IMPL.cgsnapshot_creating_from_src() ################### def group_snapshot_get(context, group_snapshot_id): """Get a group snapshot or raise if it does not exist.""" return IMPL.group_snapshot_get(context, group_snapshot_id) def group_snapshot_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all group snapshots.""" return IMPL.group_snapshot_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) def group_snapshot_create(context, values): """Create a group snapshot from the values dictionary.""" return IMPL.group_snapshot_create(context, values) def group_snapshot_get_all_by_group(context, group_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all group snapshots belonging to a group.""" return IMPL.group_snapshot_get_all_by_group(context, group_id, filters, marker, limit, offset, sort_keys, sort_dirs) def group_snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all group snapshots belonging to a project.""" return IMPL.group_snapshot_get_all_by_project(context, project_id, filters, marker, limit, offset, sort_keys, sort_dirs) def group_snapshot_update(context, group_snapshot_id, values): """Set the given properties on a group snapshot and update it. Raises NotFound if group snapshot does not exist. """ return IMPL.group_snapshot_update(context, group_snapshot_id, values) def group_snapshot_destroy(context, group_snapshot_id): """Destroy the group snapshot or raise if it does not exist.""" return IMPL.group_snapshot_destroy(context, group_snapshot_id) def group_snapshot_creating_from_src(): """Get a filter to check if a grp snapshot is being created from a grp.""" return IMPL.group_snapshot_creating_from_src() ################### def purge_deleted_rows(context, age_in_days): """Purge deleted rows older than given age from cinder tables Raises InvalidParameterValue if age_in_days is incorrect. :returns: number of deleted rows """ return IMPL.purge_deleted_rows(context, age_in_days=age_in_days) def get_booleans_for_table(table_name): return IMPL.get_booleans_for_table(table_name) ################### def reset_active_backend(context, enable_replication, active_backend_id, backend_host): """Reset the active backend for a host.""" return IMPL.reset_active_backend(context, enable_replication, active_backend_id, backend_host) ################### def driver_initiator_data_insert_by_key(context, initiator, namespace, key, value): """Updates DriverInitiatorData entry. Sets the value for the specified key within the namespace. """ return IMPL.driver_initiator_data_insert_by_key(context, initiator, namespace, key, value) def driver_initiator_data_get(context, initiator, namespace): """Query for an DriverInitiatorData that has the specified key""" return IMPL.driver_initiator_data_get(context, initiator, namespace) ################### def image_volume_cache_create(context, host, cluster_name, image_id, image_updated_at, volume_id, size): """Create a new image volume cache entry.""" return IMPL.image_volume_cache_create(context, host, cluster_name, image_id, image_updated_at, volume_id, size) def image_volume_cache_delete(context, volume_id): """Delete an image volume cache entry specified by volume id.""" return IMPL.image_volume_cache_delete(context, volume_id) def image_volume_cache_get_and_update_last_used(context, image_id, **filters): """Query for an image volume cache entry.""" return IMPL.image_volume_cache_get_and_update_last_used(context, image_id, **filters) def image_volume_cache_get_by_volume_id(context, volume_id): """Query to see if a volume id is an image-volume contained in the cache""" return IMPL.image_volume_cache_get_by_volume_id(context, volume_id) def image_volume_cache_get_all(context, **filters): """Query for all image volume cache entry for a host.""" return IMPL.image_volume_cache_get_all(context, **filters) def image_volume_cache_include_in_cluster(context, cluster, partial_rename=True, **filters): """Include in cluster image volume cache entries matching the filters. When partial_rename is set we will not set the cluster_name with cluster parameter value directly, we'll replace provided cluster_name or host filter value with cluster instead. This is useful when we want to replace just the cluster name but leave the backend and pool information as it is. If we are using cluster_name to filter, we'll use that same DB field to replace the cluster value and leave the rest as it is. Likewise if we use the host to filter. Returns the number of volumes that have been changed. """ return IMPL.image_volume_cache_include_in_cluster( context, cluster, partial_rename, **filters) ################### def message_get(context, message_id): """Return a message with the specified ID.""" return IMPL.message_get(context, message_id) def message_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return IMPL.message_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def message_create(context, values): """Creates a new message with the specified values.""" return IMPL.message_create(context, values) def message_destroy(context, message_id): """Deletes message with the specified ID.""" return IMPL.message_destroy(context, message_id) def cleanup_expired_messages(context): """Soft delete expired messages""" return IMPL.cleanup_expired_messages(context) ################### def worker_create(context, **values): """Create a worker entry from optional arguments.""" return IMPL.worker_create(context, **values) def worker_get(context, **filters): """Get a worker or raise exception if it does not exist.""" return IMPL.worker_get(context, **filters) def worker_get_all(context, until=None, db_filters=None, **filters): """Get all workers that match given criteria.""" return IMPL.worker_get_all(context, until=until, db_filters=db_filters, **filters) def worker_update(context, id, filters=None, orm_worker=None, **values): """Update a worker with given values.""" return IMPL.worker_update(context, id, filters=filters, orm_worker=orm_worker, **values) def worker_claim_for_cleanup(context, claimer_id, orm_worker): """Soft delete a worker, change the service_id and update the worker.""" return IMPL.worker_claim_for_cleanup(context, claimer_id, orm_worker) def worker_destroy(context, **filters): """Delete a worker (no soft delete).""" return IMPL.worker_destroy(context, **filters) ################### def attachment_specs_exist(context): """Check if there are attachment specs left.""" return IMPL.attachment_specs_exist(context) def attachment_specs_get(context, attachment_id): """DEPRECATED: Get all specs for an attachment.""" return IMPL.attachment_specs_get(context, attachment_id) def attachment_specs_delete(context, attachment_id, key): """DEPRECATED: Delete the given attachment specs item.""" return IMPL.attachment_specs_delete(context, attachment_id, key) def attachment_specs_update_or_create(context, attachment_id, specs): """DEPRECATED: Create or update attachment specs. This adds or modifies the key/value pairs specified in the attachment specs dict argument. """ return IMPL.attachment_specs_update_or_create(context, attachment_id, specs) ################### # TODO: (D Release) remove method and this comment def remove_temporary_admin_metadata_data_migration(context, max_count): return IMPL.remove_temporary_admin_metadata_data_migration( context, max_count) def get_projects(context, model, read_deleted="no"): return IMPL.get_projects(context, model, read_deleted=read_deleted) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/base.py0000664000175000017500000000175200000000000016211 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for classes that need modular database access.""" import cinder.db class Base(object): """DB driver is injected in the init method.""" def __init__(self): super().__init__() self.db = cinder.db self.db.dispose_engine() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migration.py0000664000175000017500000000727000000000000017271 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" import os from alembic import command as alembic_api from alembic import config as alembic_config from alembic import migration as alembic_migration from oslo_config import cfg from oslo_db import options from oslo_log import log as logging from cinder.db.sqlalchemy import api as db_api options.set_defaults(cfg.CONF) LOG = logging.getLogger(__name__) def _find_alembic_conf(): """Get the project's alembic configuration :returns: An instance of ``alembic.config.Config`` """ path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'alembic.ini') config = alembic_config.Config(os.path.abspath(path)) # we don't want to use the logger configuration from the file, which is # only really intended for the CLI # https://stackoverflow.com/a/42691781/613428 config.attributes['configure_logger'] = False return config def _upgrade_alembic(engine, config, version): # re-use the connection rather than creating a new one with engine.begin() as connection: config.attributes['connection'] = connection alembic_api.upgrade(config, version or 'head') def db_version(): """Get database version.""" engine = db_api.get_engine() with engine.connect() as conn: m_context = alembic_migration.MigrationContext.configure(conn) return m_context.get_current_revision() def db_sync(version=None, engine=None): """Migrate the database to `version` or the most recent version. We're currently straddling two migration systems, sqlalchemy-migrate and alembic. This handles both by ensuring we switch from one to the other at the appropriate moment. """ # if the user requested a specific version, check if it's an integer: if # so, we're almost certainly in sqlalchemy-migrate land and won't support # that if version is not None and version.isdigit(): raise ValueError( 'You requested an sqlalchemy-migrate database version; this is ' 'no longer supported' ) if engine is None: engine = db_api.get_engine() config = _find_alembic_conf() # discard the URL encoded in alembic.ini in favour of the URL configured # for the engine by the database fixtures, casting from # 'sqlalchemy.engine.url.URL' to str in the process. This returns a # RFC-1738 quoted URL, which means that a password like "foo@" will be # turned into "foo%40". This in turns causes a problem for # set_main_option() because that uses ConfigParser.set, which (by design) # uses *python* interpolation to write the string out ... where "%" is the # special python interpolation character! Avoid this mismatch by quoting # all %'s for the set below. engine_url = str(engine.url).replace('%', '%%') config.set_main_option('sqlalchemy.url', str(engine_url)) LOG.info('Applying migration(s)') _upgrade_alembic(engine, config, version) LOG.info('Migration(s) applied') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.071118 cinder-27.0.0/cinder/db/migrations/0000775000175000017500000000000000000000000017074 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/__init__.py0000664000175000017500000000000000000000000021173 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/env.py0000664000175000017500000000645600000000000020251 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging.config import fileConfig from alembic import context from sqlalchemy import engine_from_config from sqlalchemy import pool from cinder.db.sqlalchemy import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging unless we're told not to. # This line sets up loggers basically. if config.attributes.get('configure_logger', True): fileConfig(config.config_file_name) target_metadata = models.BASE.metadata def include_name(name, type_, parent_names): # if there are any columns or tables that should be excluded from # auto-generation, include them here return True def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, include_name=include_name, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. This is modified from the default based on the below, since we want to share an engine when unit testing so in-memory database testing actually works. https://alembic.sqlalchemy.org/en/latest/cookbook.html#connection-sharing """ connectable = config.attributes.get('connection', None) if connectable is None: # only create Engine if we don't have a Connection from the outside connectable = engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, ) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, include_name=include_name, ) with context.begin_transaction(): context.run_migrations() else: context.configure( connection=connectable, target_metadata=target_metadata, include_name=include_name, ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/script.py.mako0000664000175000017500000000172000000000000021700 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ from alembic import op import sqlalchemy as sa ${imports if imports else ""} # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.075118 cinder-27.0.0/cinder/db/migrations/versions/0000775000175000017500000000000000000000000020744 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/versions/89aa6f9639f9_drop_legacy_migrate_version_table.py0000664000175000017500000000210500000000000032100 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Drop legacy migrate_version table Revision ID: 89aa6f9639f9 Revises: daa98075b90d Create Date: 2023-02-17 12:41:37.940769 """ from alembic import op from sqlalchemy.engine import reflection # revision identifiers, used by Alembic. revision = '89aa6f9639f9' down_revision = 'daa98075b90d' branch_labels = None depends_on = None def upgrade(): conn = op.get_bind() inspector = reflection.Inspector.from_engine(conn) tables = inspector.get_table_names() if 'migrate_version' in tables: op.drop_table('migrate_version') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/versions/921e1a36b076_initial.py0000664000175000017500000011736200000000000024513 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initial migration. Revision ID: 921e1a36b076 Revises: Create Date: 2020-11-02 11:27:29.952490 """ import datetime import uuid from alembic import op from oslo_config import cfg from oslo_utils import timeutils import sqlalchemy as sa from sqlalchemy.dialects import mysql from sqlalchemy.sql import expression from cinder.volume import group_types as volume_group_types from cinder.volume import volume_types # revision identifiers, used by Alembic. revision = '921e1a36b076' down_revision = None branch_labels = None depends_on = None # Get default values via config. The defaults will either # come from the default values set in the quota option # configuration or via cinder.conf if the user has configured # default values for quotas there. CONF = cfg.CONF CONF.import_opt('quota_volumes', 'cinder.quota') CONF.import_opt('quota_snapshots', 'cinder.quota') CONF.import_opt('quota_gigabytes', 'cinder.quota') CLASS_NAME = 'default' CREATED_AT = datetime.datetime.now() # noqa def upgrade(): connection = op.get_bind() op.create_table( 'services', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('host', sa.String(255)), sa.Column('binary', sa.String(255)), sa.Column('topic', sa.String(255)), sa.Column('report_count', sa.Integer, nullable=False), sa.Column('disabled', sa.Boolean), sa.Column('availability_zone', sa.String(255)), sa.Column('disabled_reason', sa.String(255)), sa.Column('modified_at', sa.DateTime(timezone=False)), sa.Column('rpc_current_version', sa.String(36)), sa.Column('object_current_version', sa.String(36)), sa.Column('replication_status', sa.String(36), default='not-capable'), sa.Column('frozen', sa.Boolean, default=False), sa.Column('active_backend_id', sa.String(255)), sa.Column('cluster_name', sa.String(255), nullable=True), sa.Column('uuid', sa.String(36), nullable=True), sa.Index('services_uuid_idx', 'uuid', unique=True), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'consistencygroups', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('user_id', sa.String(255)), sa.Column('project_id', sa.String(255)), sa.Column('host', sa.String(255)), sa.Column('availability_zone', sa.String(255)), sa.Column('name', sa.String(255)), sa.Column('description', sa.String(255)), sa.Column('volume_type_id', sa.String(255)), sa.Column('status', sa.String(255)), sa.Column('cgsnapshot_id', sa.String(36)), sa.Column('source_cgid', sa.String(36)), sa.Column('cluster_name', sa.String(255), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'cgsnapshots', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column( 'consistencygroup_id', sa.String(36), sa.ForeignKey('consistencygroups.id'), nullable=False, index=True, ), sa.Column('user_id', sa.String(255)), sa.Column('project_id', sa.String(255)), sa.Column('name', sa.String(255)), sa.Column('description', sa.String(255)), sa.Column('status', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'groups', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('user_id', sa.String(length=255)), sa.Column('project_id', sa.String(length=255)), sa.Column('cluster_name', sa.String(255)), sa.Column('host', sa.String(length=255)), sa.Column('availability_zone', sa.String(length=255)), sa.Column('name', sa.String(length=255)), sa.Column('description', sa.String(length=255)), sa.Column('group_type_id', sa.String(length=36)), sa.Column('status', sa.String(length=255)), sa.Column('group_snapshot_id', sa.String(36)), sa.Column('source_group_id', sa.String(36)), sa.Column('replication_status', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'group_snapshots', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.Column('id', sa.String(36), primary_key=True), sa.Column( 'group_id', sa.String(36), sa.ForeignKey('groups.id'), nullable=False, index=True, ), sa.Column('user_id', sa.String(length=255)), sa.Column('project_id', sa.String(length=255)), sa.Column('name', sa.String(length=255)), sa.Column('description', sa.String(length=255)), sa.Column('status', sa.String(length=255)), sa.Column('group_type_id', sa.String(length=36)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'volumes', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('ec2_id', sa.String(255)), sa.Column('user_id', sa.String(255)), sa.Column('project_id', sa.String(255)), sa.Column('host', sa.String(255)), sa.Column('size', sa.Integer), sa.Column('availability_zone', sa.String(255)), sa.Column('status', sa.String(255)), sa.Column('attach_status', sa.String(255)), sa.Column('scheduled_at', sa.DateTime), sa.Column('launched_at', sa.DateTime), sa.Column('terminated_at', sa.DateTime), sa.Column('display_name', sa.String(255)), sa.Column('display_description', sa.String(255)), sa.Column('provider_location', sa.String(256)), sa.Column('provider_auth', sa.String(256)), sa.Column('snapshot_id', sa.String(36)), sa.Column('volume_type_id', sa.String(36), nullable=False), sa.Column('source_volid', sa.String(36)), sa.Column('bootable', sa.Boolean), sa.Column('provider_geometry', sa.String(255)), sa.Column('_name_id', sa.String(36)), sa.Column('encryption_key_id', sa.String(36)), sa.Column('migration_status', sa.String(255)), sa.Column('replication_status', sa.String(255)), sa.Column('replication_extended_status', sa.String(255)), sa.Column('replication_driver_data', sa.String(255)), sa.Column( 'consistencygroup_id', sa.String(36), sa.ForeignKey('consistencygroups.id'), index=True, ), sa.Column('provider_id', sa.String(255)), sa.Column('multiattach', sa.Boolean), sa.Column('previous_status', sa.String(255)), sa.Column('cluster_name', sa.String(255), nullable=True), sa.Column( 'group_id', sa.String(36), sa.ForeignKey('groups.id'), index=True, ), sa.Column( 'service_uuid', sa.String(36), sa.ForeignKey('services.uuid'), nullable=True, ), sa.Column('shared_targets', sa.Boolean, default=True), sa.Column('use_quota', sa.Boolean, nullable=True), sa.Index('volumes_service_uuid_idx', 'service_uuid', 'deleted'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'volume_attachment', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column( 'volume_id', sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True, ), sa.Column('attached_host', sa.String(255)), sa.Column('instance_uuid', sa.String(36)), sa.Column('mountpoint', sa.String(255)), sa.Column('attach_time', sa.DateTime), sa.Column('detach_time', sa.DateTime), sa.Column('attach_mode', sa.String(36)), sa.Column('attach_status', sa.String(255)), sa.Column('connection_info', sa.Text), sa.Column('connector', sa.Text), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'attachment_specs', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(), default=False), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'attachment_id', sa.String(36), sa.ForeignKey('volume_attachment.id'), nullable=False, index=True, ), sa.Column('key', sa.String(255)), sa.Column('value', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'snapshots', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column( 'volume_id', sa.String(36), sa.ForeignKey('volumes.id', name='snapshots_volume_id_fkey'), nullable=False, index=True, ), sa.Column('user_id', sa.String(255)), sa.Column('project_id', sa.String(255)), sa.Column('status', sa.String(255)), sa.Column('progress', sa.String(255)), sa.Column('volume_size', sa.Integer), sa.Column('scheduled_at', sa.DateTime), sa.Column('display_name', sa.String(255)), sa.Column('display_description', sa.String(255)), sa.Column('provider_location', sa.String(255)), sa.Column('encryption_key_id', sa.String(36)), sa.Column('volume_type_id', sa.String(36), nullable=False), sa.Column( 'cgsnapshot_id', sa.String(36), sa.ForeignKey('cgsnapshots.id'), index=True, ), sa.Column('provider_id', sa.String(255)), sa.Column('provider_auth', sa.String(255)), sa.Column( 'group_snapshot_id', sa.String(36), sa.ForeignKey('group_snapshots.id'), index=True, ), sa.Column('use_quota', sa.Boolean, nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'snapshot_metadata', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'snapshot_id', sa.String(36), sa.ForeignKey('snapshots.id'), nullable=False, index=True, ), sa.Column('key', sa.String(255)), sa.Column('value', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'quality_of_service_specs', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column( 'specs_id', sa.String(36), sa.ForeignKey('quality_of_service_specs.id'), index=True, ), sa.Column('key', sa.String(255)), sa.Column('value', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) volume_types_table = op.create_table( 'volume_types', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('name', sa.String(255)), sa.Column( 'qos_specs_id', sa.String(36), sa.ForeignKey('quality_of_service_specs.id'), index=True, ), sa.Column('is_public', sa.Boolean), sa.Column('description', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'volume_type_projects', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column( 'volume_type_id', sa.String(36), sa.ForeignKey('volume_types.id') ), sa.Column('project_id', sa.String(255)), sa.Column('deleted', sa.Integer), sa.UniqueConstraint('volume_type_id', 'project_id', 'deleted'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'volume_metadata', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'volume_id', sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True, ), sa.Column('key', sa.String(255)), sa.Column('value', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'volume_type_extra_specs', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'volume_type_id', sa.String(36), sa.ForeignKey( 'volume_types.id', name='volume_type_extra_specs_ibfk_1', ), nullable=False, index=True, ), sa.Column('key', sa.String(255)), sa.Column('value', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'quotas', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('project_id', sa.String(255)), sa.Column('resource', sa.String(255), nullable=False), sa.Column('hard_limit', sa.Integer), sa.Column('allocated', sa.Integer, default=0), mysql_engine='InnoDB', mysql_charset='utf8', ) quota_classes_table = op.create_table( 'quota_classes', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.Column('id', sa.Integer(), primary_key=True), sa.Column('class_name', sa.String(255), index=True), sa.Column('resource', sa.String(255)), sa.Column('hard_limit', sa.Integer(), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'quota_usages', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.Column('id', sa.Integer(), primary_key=True), sa.Column('project_id', sa.String(255), index=True), sa.Column('resource', sa.String(255)), sa.Column('in_use', sa.Integer(), nullable=False), sa.Column('reserved', sa.Integer(), nullable=False), sa.Column('until_refresh', sa.Integer(), nullable=True), sa.Column('race_preventer', sa.Boolean, nullable=True), sa.Index('quota_usage_project_resource_idx', 'project_id', 'resource'), sa.UniqueConstraint('project_id', 'resource', 'race_preventer'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'reservations', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.Column('id', sa.Integer(), primary_key=True), sa.Column('uuid', sa.String(36), nullable=False), sa.Column( 'usage_id', sa.Integer(), sa.ForeignKey('quota_usages.id'), nullable=True, index=True, ), sa.Column('project_id', sa.String(255), index=True), sa.Column('resource', sa.String(255)), sa.Column('delta', sa.Integer(), nullable=False), sa.Column('expire', sa.DateTime(timezone=False)), sa.Column( 'allocated_id', sa.Integer, sa.ForeignKey('quotas.id'), nullable=True, index=True, ), sa.Index('reservations_deleted_expire_idx', 'deleted', 'expire'), sa.Index('reservations_deleted_uuid_idx', 'deleted', 'uuid'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'volume_glance_metadata', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.Column('id', sa.Integer(), primary_key=True, nullable=False), sa.Column( 'volume_id', sa.String(36), sa.ForeignKey('volumes.id'), index=True, ), sa.Column( 'snapshot_id', sa.String(36), sa.ForeignKey('snapshots.id'), index=True, ), sa.Column('key', sa.String(255)), sa.Column('value', sa.Text), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'backups', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('volume_id', sa.String(36), nullable=False), sa.Column('user_id', sa.String(255)), sa.Column('project_id', sa.String(255)), sa.Column('host', sa.String(255)), sa.Column('availability_zone', sa.String(255)), sa.Column('display_name', sa.String(255)), sa.Column('display_description', sa.String(255)), sa.Column('container', sa.String(255)), sa.Column('status', sa.String(255)), sa.Column('fail_reason', sa.String(255)), sa.Column('service_metadata', sa.String(255)), sa.Column('service', sa.String(255)), sa.Column('size', sa.Integer()), sa.Column('object_count', sa.Integer()), sa.Column('parent_id', sa.String(36)), sa.Column('temp_volume_id', sa.String(36)), sa.Column('temp_snapshot_id', sa.String(36)), sa.Column('num_dependent_backups', sa.Integer, default=0), sa.Column('snapshot_id', sa.String(36)), sa.Column('data_timestamp', sa.DateTime), sa.Column('restore_volume_id', sa.String(36)), sa.Column('encryption_key_id', sa.String(36)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'backup_metadata', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(), default=False), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'backup_id', sa.String(36), sa.ForeignKey('backups.id'), nullable=False, index=True, ), sa.Column('key', sa.String(255)), sa.Column('value', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'transfers', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column( 'volume_id', sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True, ), sa.Column('display_name', sa.String(255)), sa.Column('salt', sa.String(255)), sa.Column('crypt_hash', sa.String(255)), sa.Column('expires_at', sa.DateTime(timezone=False)), sa.Column('no_snapshots', sa.Boolean, default=False), sa.Column('source_project_id', sa.String(255), nullable=True), sa.Column('destination_project_id', sa.String(255), nullable=True), sa.Column('accepted', sa.Boolean, default=False), mysql_engine='InnoDB', mysql_charset='utf8', ) # Sqlite needs to handle nullable differently is_nullable = connection.engine.name == 'sqlite' op.create_table( 'encryption', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.Column('cipher', sa.String(255)), sa.Column('control_location', sa.String(255), nullable=is_nullable), sa.Column('key_size', sa.Integer), sa.Column('provider', sa.String(255), nullable=is_nullable), # NOTE(joel-coffman): The volume_type_id must be unique or else the # referenced volume type becomes ambiguous. That is, specifying the # volume type is not sufficient to identify a particular encryption # scheme unless each volume type is associated with at most one # encryption scheme. sa.Column('volume_type_id', sa.String(36), nullable=False), # NOTE (smcginnis): nullable=True triggers this to not set a default # value, but since it's a primary key the resulting schema will end up # still being NOT NULL. This is avoiding a case in MySQL where it will # otherwise set this to NOT NULL DEFAULT ''. May be harmless, but # inconsistent with previous schema. sa.Column( 'encryption_id', sa.String(36), primary_key=True, nullable=True, ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'volume_admin_metadata', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'volume_id', sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True, ), sa.Column('key', sa.String(255)), sa.Column('value', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'driver_initiator_data', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('initiator', sa.String(255), index=True, nullable=False), sa.Column('namespace', sa.String(255), nullable=False), sa.Column('key', sa.String(255), nullable=False), sa.Column('value', sa.String(255)), sa.UniqueConstraint('initiator', 'namespace', 'key'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'image_volume_cache_entries', sa.Column('image_updated_at', sa.DateTime(timezone=False)), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('host', sa.String(255), index=True, nullable=False), sa.Column('image_id', sa.String(36), index=True, nullable=False), sa.Column('volume_id', sa.String(36), nullable=False), sa.Column('size', sa.Integer, nullable=False), sa.Column('last_used', sa.DateTime, nullable=False), sa.Column('cluster_name', sa.String(255)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'messages', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('project_id', sa.String(255), nullable=False), sa.Column('request_id', sa.String(255)), sa.Column('resource_type', sa.String(36)), sa.Column('resource_uuid', sa.String(255), nullable=True), sa.Column('event_id', sa.String(255), nullable=False), sa.Column('message_level', sa.String(255), nullable=False), sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean), sa.Column('expires_at', sa.DateTime(timezone=False), index=True), sa.Column('detail_id', sa.String(10), nullable=True), sa.Column('action_id', sa.String(10), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'clusters', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(), default=False), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('name', sa.String(255), nullable=False), sa.Column('binary', sa.String(255), nullable=False), sa.Column('disabled', sa.Boolean(), default=False), sa.Column('disabled_reason', sa.String(255)), sa.Column('race_preventer', sa.Integer, nullable=False, default=0), sa.Column( 'replication_status', sa.String(length=36), default='not-capable', ), sa.Column('active_backend_id', sa.String(length=255)), sa.Column( 'frozen', sa.Boolean, nullable=False, default=False, server_default=expression.false(), ), # To remove potential races on creation we have a constraint set on # name and race_preventer fields, and we set value on creation to 0, so # 2 clusters with the same name will fail this constraint. On deletion # we change this field to the same value as the id which will be unique # and will not conflict with the creation of another cluster with the # same name. sa.UniqueConstraint('name', 'binary', 'race_preventer'), mysql_engine='InnoDB', mysql_charset='utf8', ) workers_table = op.create_table( 'workers', sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean(), default=False), sa.Column('id', sa.Integer, primary_key=True), sa.Column('resource_type', sa.String(40), nullable=False), sa.Column('resource_id', sa.String(36), nullable=False), sa.Column('status', sa.String(255), nullable=False), sa.Column( 'service_id', sa.Integer, sa.ForeignKey('services.id'), nullable=True, index=True, ), sa.Column( 'race_preventer', sa.Integer, nullable=False, default=0, server_default=sa.text('0'), ), sa.UniqueConstraint('resource_type', 'resource_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) group_types_table = op.create_table( 'group_types', sa.Column('id', sa.String(36), primary_key=True, nullable=False), sa.Column('name', sa.String(255), nullable=False), sa.Column('description', sa.String(255)), sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean), sa.Column('is_public', sa.Boolean), mysql_engine='InnoDB', mysql_charset='utf8', ) group_type_specs_table = op.create_table( 'group_type_specs', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('key', sa.String(255)), sa.Column('value', sa.String(255)), sa.Column( 'group_type_id', sa.String(36), sa.ForeignKey('group_types.id'), nullable=False, index=True, ), sa.Column('created_at', sa.DateTime(timezone=False)), sa.Column('updated_at', sa.DateTime(timezone=False)), sa.Column('deleted_at', sa.DateTime(timezone=False)), sa.Column('deleted', sa.Boolean), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'group_type_projects', sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column( 'group_type_id', sa.String(36), sa.ForeignKey('group_types.id') ), sa.Column('project_id', sa.String(length=255)), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), sa.UniqueConstraint('group_type_id', 'project_id', 'deleted'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'group_volume_type_mapping', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column('deleted', sa.Boolean), sa.Column('id', sa.Integer, primary_key=True, nullable=False), sa.Column( 'volume_type_id', sa.String(36), sa.ForeignKey('volume_types.id'), nullable=False, index=True, ), sa.Column( 'group_id', sa.String(36), sa.ForeignKey('groups.id'), nullable=False, index=True, ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'default_volume_types', sa.Column('created_at', sa.DateTime), sa.Column('updated_at', sa.DateTime), sa.Column('deleted_at', sa.DateTime), sa.Column( 'volume_type_id', sa.String(36), sa.ForeignKey('volume_types.id'), index=True, ), sa.Column( 'project_id', sa.String(length=255), primary_key=True, nullable=False, ), sa.Column('deleted', sa.Boolean(create_constraint=True, name=None)), mysql_engine='InnoDB', mysql_charset='utf8', ) if connection.engine.name == "mysql": tables = [ "consistencygroups", "cgsnapshots", "snapshots", "snapshot_metadata", "quality_of_service_specs", "volume_types", "volume_type_projects", "volumes", "volume_attachment", "quotas", "services", "volume_metadata", "volume_type_extra_specs", "quota_classes", "quota_usages", "reservations", "volume_glance_metadata", "backups", "backup_metadata", "transfers", "encryption", "volume_admin_metadata", "driver_initiator_data", "image_volume_cache_entries", ] op.execute("SET foreign_key_checks = 0") for table in tables: op.execute( "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table ) op.execute("SET foreign_key_checks = 1") op.execute( "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" % connection.engine.url.database ) op.execute("ALTER TABLE %s Engine=InnoDB" % table) # This is only necessary for mysql, and since the table is not in use this # will only be a schema update. if connection.engine.name.startswith('mysql'): try: with op.batch_alter_table('workers') as batch_op: batch_op.alter_column( 'updated_at', type_=mysql.DATETIME(fsp=6) ) except Exception: # MySQL v5.5 or earlier don't support sub-second resolution so we # may have cleanup races in Active-Active configurations, that's # why upgrading is recommended in that case. # Code in Cinder is capable of working with 5.5, so for 5.5 there's # no problem pass # Increase the resource column size to the quota_usages table. # # The resource value is constructed from (prefix + volume_type_name), # but the length of volume_type_name is limited to 255, if we add a # prefix such as 'volumes_' or 'gigabytes_' to volume_type_name it # will exceed the db length limit. try: with op.batch_alter_table('quota_usages') as batch_op: batch_op.alter_column('resource', type_=sa.String(300)) except Exception: # On MariaDB, max length varies depending on the version and the InnoDB # page size [1], so it is possible to have error 1071 ('Specified key # was too long; max key length is 767 bytes"). Since this migration is # to resolve a corner case, deployments with those DB versions won't be # covered. # [1]: https://mariadb.com/kb/en/library/innodb-limitations/#page-sizes if not connection.engine.name.startswith('mysql'): raise op.bulk_insert( quota_classes_table, [ # Set default quota class values { 'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'volumes', 'hard_limit': CONF.quota_volumes, 'deleted': False, }, { 'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'snapshots', 'hard_limit': CONF.quota_snapshots, 'deleted': False, }, # Set default gigabytes { 'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'gigabytes', 'hard_limit': CONF.quota_gigabytes, 'deleted': False, }, { 'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'per_volume_gigabytes', 'hard_limit': -1, 'deleted': False, }, { 'created_at': CREATED_AT, 'class_name': CLASS_NAME, 'resource': 'groups', 'hard_limit': CONF.quota_groups, 'deleted': False, }, ], ) # TODO(geguileo): Once we remove support for MySQL 5.5 we have to create # an upgrade migration to remove this row. # Set workers table sub-second support sentinel now = timeutils.utcnow().replace(microsecond=123) op.bulk_insert( workers_table, [ { 'created_at': now, 'updated_at': now, 'deleted': False, 'resource_type': 'SENTINEL', 'resource_id': 'SUB-SECOND', 'status': 'OK', }, ], ) # Create default group type now = timeutils.utcnow() grp_type_id = "%s" % uuid.uuid4() op.bulk_insert( group_types_table, [ { 'id': grp_type_id, 'name': volume_group_types.DEFAULT_CGSNAPSHOT_TYPE, 'description': 'Default group type for migrating cgsnapshot', 'created_at': now, 'updated_at': now, 'deleted': False, 'is_public': True, }, ], ) op.bulk_insert( group_type_specs_table, [ { 'key': 'consistent_group_snapshot_enabled', 'value': ' True', 'group_type_id': grp_type_id, 'created_at': now, 'updated_at': now, 'deleted': False, }, ], ) # Create default volume type op.bulk_insert( volume_types_table, [ { 'id': str(uuid.uuid4()), 'name': volume_types.DEFAULT_VOLUME_TYPE, 'description': 'Default Volume Type', 'created_at': now, 'updated_at': now, 'deleted': False, 'is_public': True, }, ], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/versions/9ab1b092a404_make_use_quota_non_nullable.py0000664000175000017500000000375200000000000030661 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Make use_quota non nullable Revision ID: 9ab1b092a404 Revises: b8660621f1b9 Create Date: 2021-10-22 16:23:17.080934 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '9ab1b092a404' down_revision = 'b8660621f1b9' branch_labels = None depends_on = None def upgrade(): # It's safe to set them as non nullable because when we run db sync on this # release the online migrations from the previous release must already have # been run. connection = op.get_bind() # SQLite doesn't support dropping/altering tables, so we use a workaround if connection.engine.name == 'sqlite': with op.batch_alter_table('volumes') as batch_op: batch_op.alter_column('use_quota', existing_type=sa.BOOLEAN, nullable=False, server_default=sa.true()) with op.batch_alter_table('snapshots') as batch_op: batch_op.alter_column('use_quota', existing_type=sa.BOOLEAN, nullable=False, server_default=sa.true()) else: op.alter_column('volumes', 'use_quota', existing_type=sa.BOOLEAN, nullable=False, server_default=sa.true()) op.alter_column('snapshots', 'use_quota', existing_type=sa.BOOLEAN, nullable=False, server_default=sa.true()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/versions/9c74c1c6971f_quota_add_backup_defaults_in_quota_class.py0000664000175000017500000000400000000000000033403 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quota: Add backup defaults in quota class Revision ID: 9c74c1c6971f Revises: b7b88f50aab5 Create Date: 2021-11-10 12:17:06.713239 """ from datetime import datetime from alembic import op from oslo_config import cfg import sqlalchemy as sa from cinder.db.sqlalchemy import models # revision identifiers, used by Alembic. revision = '9c74c1c6971f' down_revision = 'b7b88f50aab5' branch_labels = None depends_on = None def _create_default(bind, resource, hard_limit): session = sa.orm.Session(bind=bind) class_name = 'default' created_at = datetime.now() # noqa with session.begin(): if session.query(sa.sql.exists() .where( sa.and_( ~models.QuotaClass.deleted, models.QuotaClass.class_name == class_name, models.QuotaClass.resource == resource)))\ .scalar(): return quota_class = models.QuotaClass(created_at=created_at, class_name=class_name, resource=resource, hard_limit=hard_limit, deleted=False) session.add(quota_class) def upgrade(): bind = op.get_bind() _create_default(bind, 'backups', cfg.CONF.quota_backups) _create_default(bind, 'backup_gigabytes', cfg.CONF.quota_backup_gigabytes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/versions/__init__.py0000664000175000017500000000000000000000000023043 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/versions/b7b88f50aab5_remove_quota_consistencygroups.py0000664000175000017500000000245500000000000031671 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove quota consistencygroups Revision ID: b7b88f50aab5 Revises: 9ab1b092a404 Create Date: 2021-11-10 11:54:50.123389 """ from alembic import op from sqlalchemy import orm from cinder.db.sqlalchemy import models # revision identifiers, used by Alembic. revision = 'b7b88f50aab5' down_revision = '9ab1b092a404' branch_labels = None depends_on = None def upgrade(): bind = op.get_bind() session = orm.Session(bind=bind) with session.begin(): for model in (models.QuotaClass, models.Quota, models.QuotaUsage, models.Reservation): session.query(model)\ .filter_by(deleted=False, resource='consistencygroups')\ .update(model.delete_values()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/versions/b8660621f1b9_update_reservations_resource.py0000664000175000017500000000524600000000000031062 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Update reservations resource Revision ID: b8660621f1b9 Revises: 89aa6f9639f9 Create Date: 2021-10-27 17:25:16.790525 """ from alembic import op from oslo_log import log as logging import sqlalchemy as sa LOG = logging.getLogger(__name__) # revision identifiers, used by Alembic. revision = 'b8660621f1b9' down_revision = '89aa6f9639f9' branch_labels = None depends_on = None def upgrade(): connection = op.get_bind() for table_name in ('quotas', 'quota_classes', 'reservations'): table = sa.Table(table_name, sa.MetaData(), autoload_with=connection) col = table.c.resource # SQLite doesn't support altering tables, so we use a workaround if connection.engine.name == 'sqlite': with op.batch_alter_table(table_name) as batch_op: batch_op.alter_column('resource', existing_type=col.type, type_=sa.String(length=300)) else: # MySQL ALTER needs to have existing_type, existing_server_default, # and existing_nullable or it will do who-knows-what try: op.alter_column(table_name, 'resource', existing_type=col.type, existing_nullable=col.nullable, existing_server_default=col.server_default, type_=sa.String(length=300)) except Exception: # On MariaDB, max length varies depending on the version and # the InnoDB page size [1], so it is possible to have error # 1071 ('Specified key was too long; max key length is 767 # bytes"). Since this migration is to resolve a corner case, # deployments with those DB versions won't be covered. # [1]: https://mariadb.com/kb/en/library/innodb-limitations/#page-sizes # noqa if not connection.engine.name == 'mysql': raise LOG.warning('Error in migration %s, Cinder still affected by ' 'bug #1948962', revision) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/versions/c92a3e68beed_make_shared_targets_nullable.py0000664000175000017500000000323200000000000031234 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Make shared_targets nullable Revision ID: c92a3e68beed Revises: 921e1a36b076 Create Date: 2022-03-23 21:30:18.585830 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'c92a3e68beed' down_revision = '921e1a36b076' branch_labels = None depends_on = None def upgrade(): connection = op.get_bind() # Preserve existing type, be it boolean or tinyint treated as boolean table = sa.Table('volumes', sa.MetaData(), autoload_with=connection) existing_type = table.c.shared_targets.type # SQLite doesn't support altering tables, so we use a workaround if connection.engine.name == 'sqlite': with op.batch_alter_table('volumes') as batch_op: batch_op.alter_column('shared_targets', existing_type=existing_type, type_=sa.Boolean(), nullable=True) else: op.alter_column('volumes', 'shared_targets', existing_type=existing_type, type_=sa.Boolean(), nullable=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/migrations/versions/daa98075b90d_add_resource_indexes.py0000664000175000017500000000353100000000000027377 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add resource indexes Revision ID: daa98075b90d Revises: c92a3e68beed Create Date: 2021-11-26 10:26:41.883072 """ from alembic import op from oslo_db.sqlalchemy import utils from oslo_log import log as logging LOG = logging.getLogger(__name__) # revision identifiers, used by Alembic. revision = 'daa98075b90d' down_revision = 'c92a3e68beed' branch_labels = None depends_on = None INDEXES = ( ('groups', 'groups_deleted_project_id_idx', ('deleted', 'project_id')), ('group_snapshots', 'group_snapshots_deleted_project_id_idx', ('deleted', 'project_id')), ('volumes', 'volumes_deleted_project_id_idx', ('deleted', 'project_id')), ('volumes', 'volumes_deleted_host_idx', ('deleted', 'host')), ('backups', 'backups_deleted_project_id_idx', ('deleted', 'project_id')), ('snapshots', 'snapshots_deleted_project_id_idx', ('deleted', 'project_id')), ) def upgrade(): conn = op.get_bind() is_mysql = conn.dialect.name == 'mysql' for table, idx_name, fields in INDEXES: # Skip creation in mysql if it already has the index if is_mysql and utils.index_exists(conn, table, idx_name): LOG.info('Skipping index %s, already exists', idx_name) else: op.create_index(idx_name, table, fields) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.075118 cinder-27.0.0/cinder/db/sqlalchemy/0000775000175000017500000000000000000000000017062 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/sqlalchemy/__init__.py0000664000175000017500000000000000000000000021161 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/sqlalchemy/api.py0000664000175000017500000100373300000000000020214 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. Functions in this module are imported into the cinder.db namespace. Call these functions from cinder.db namespace, not the cinder.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/cinder/cinder.sqlite`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) """ import collections from collections import abc import datetime as dt import functools import itertools import re import sys import uuid from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_db import exception as db_exc from oslo_db import options from oslo_db.sqlalchemy import enginefacade from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import uuidutils osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') import sqlalchemy as sa from sqlalchemy import MetaData from sqlalchemy import or_, and_ from sqlalchemy.orm import joinedload, undefer_group, load_only from sqlalchemy.orm import RelationshipProperty from sqlalchemy import sql from sqlalchemy.sql.expression import bindparam from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import true from sqlalchemy.sql import func from sqlalchemy.sql import sqltypes from cinder.api import common from cinder.common import sqlalchemyutils from cinder import db from cinder.db.sqlalchemy import models from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) # Map with cases where attach status differs from volume status ATTACH_STATUS_MAP = {'attached': 'in-use', 'detached': 'available'} options.set_defaults(CONF, connection='sqlite:///$state_path/cinder.sqlite') main_context_manager = enginefacade.transaction_context() def get_engine(): return main_context_manager.writer.get_engine() def dispose_engine(): get_engine().dispose() _DEFAULT_QUOTA_NAME = 'default' def get_backend(): """The backend is this module itself.""" return sys.modules[__name__] def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: raise exception.CinderException( 'Use of empty request context is deprecated' ) return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ @functools.wraps(f) def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ @functools.wraps(f) def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper ################### @require_context @main_context_manager.reader def resource_exists(context, model, resource_id): conditions = [model.id == resource_id] # Match non deleted resources by the id if 'no' == context.read_deleted: conditions.append(~model.deleted) # If the context is not admin we limit it to the context's project if is_user_context(context) and hasattr(model, 'project_id'): conditions.append(model.project_id == context.project_id) query = context.session.query(sql.exists().where(and_(*conditions))) return query.scalar() def require_volume_exists(f): """Decorator to require the specified volume to exist. Requires the wrapped function to use context and volume_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, volume_id, *args, **kwargs): if not resource_exists(context, models.Volume, volume_id): raise exception.VolumeNotFound(volume_id=volume_id) return f(context, volume_id, *args, **kwargs) return wrapper def require_snapshot_exists(f): """Decorator to require the specified snapshot to exist. Requires the wrapped function to use context and snapshot_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, snapshot_id, *args, **kwargs): if not resource_exists(context, models.Snapshot, snapshot_id): raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return f(context, snapshot_id, *args, **kwargs) return wrapper def require_backup_exists(f): """Decorator to require the specified snapshot to exist. Requires the wrapped function to use context and backup_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, backup_id, *args, **kwargs): if not resource_exists(context, models.Backup, backup_id): raise exception.BackupNotFound(backup_id=backup_id) return f(context, backup_id, *args, **kwargs) return wrapper def require_qos_specs_exists(f): """Decorator to require the specified QoS speces exist. Requires the wrapped function to use context and qos_specs_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, qos_specs_id, *args, **kwargs): if not resource_exists( context, models.QualityOfServiceSpecs, qos_specs_id, ): raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) return f(context, qos_specs_id, *args, **kwargs) return wrapper def handle_db_data_error(f): @functools.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) return wrapper def model_query(context, model, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: A request context to query under :param model: Model to query. Must be a subclass of ModelBase. :param args: Arguments to query. If None - model is used. :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = context.session.query(model, *args) if read_deleted == 'no': query = query.filter_by(deleted=False) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter_by(deleted=True) elif read_deleted == 'int_no': query = query.filter_by(deleted=0) else: msg = _("Unrecognized read_deleted value '%s'") raise Exception(msg % read_deleted) if project_only and is_user_context(context): if model is models.VolumeAttachment: # NOTE(dulek): In case of VolumeAttachment, we need to join # `project_id` through `volume` relationship. query = query.filter( models.Volume.project_id == context.project_id ) else: query = query.filter_by(project_id=context.project_id) return query ################### def get_model_for_versioned_object(versioned_object): if isinstance(versioned_object, str): model_name = versioned_object else: model_name = versioned_object.obj_name() if model_name == 'BackupImport': return models.Backup return getattr(models, model_name) def _get_get_method(model): # Exceptions to model to get methods, in general method names are a simple # conversion changing ORM name from camel case to snake format and adding # _get to the string GET_EXCEPTIONS = { models.ConsistencyGroup: consistencygroup_get, models.VolumeType: _volume_type_get_full, models.QualityOfServiceSpecs: qos_specs_get, models.GroupType: _group_type_get_full, models.CGSnapshot: cgsnapshot_get, } if model in GET_EXCEPTIONS: return GET_EXCEPTIONS[model] # General conversion # Convert camel cased model name to snake format s = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', model.__name__) # Get method must be snake formatted model name concatenated with _get method_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s).lower() + '_get' return globals().get(method_name) _GET_METHODS = {} @require_context @main_context_manager.reader def get_by_id(context, model, id, *args, **kwargs): # Add get method to cache dictionary if it's not already there if not _GET_METHODS.get(model): _GET_METHODS[model] = _get_get_method(model) return _GET_METHODS[model](context, id, *args, **kwargs) def condition_db_filter(model, field, value): """Create matching filter. If value is an iterable other than a string, any of the values is a valid match (OR), so we'll use SQL IN operator. If it's not an iterator == operator will be used. """ orm_field = getattr(model, field) # For values that must match and are iterables we use IN if isinstance(value, abc.Iterable) and not isinstance(value, str): # We cannot use in_ when one of the values is None if None not in value: return orm_field.in_(value) return or_(orm_field == v for v in value) # For values that must match and are not iterables we use == return orm_field == value def condition_not_db_filter(model, field, value, auto_none=True): """Create non matching filter. If value is an iterable other than a string, any of the values is a valid match (OR), so we'll use SQL IN operator. If it's not an iterator == operator will be used. If auto_none is True then we'll consider NULL values as different as well, like we do in Python and not like SQL does. """ result = ~condition_db_filter(model, field, value) # pylint: disable=E1130 if auto_none and ( ( isinstance(value, abc.Iterable) and not isinstance(value, str) and None not in value ) or (value is not None) ): orm_field = getattr(model, field) result = or_(result, orm_field.is_(None)) return result def is_orm_value(obj): """Check if object is an ORM field or expression.""" return isinstance( obj, ( sa.orm.attributes.InstrumentedAttribute, sa.sql.expression.ColumnElement, ), ) def _check_is_not_multitable(values, model): """Check that we don't try to do multitable updates. Since PostgreSQL doesn't support multitable updates we want to always fail if we have such a query in our code, even if with MySQL it would work. """ used_models = set() for field in values: if isinstance(field, sa.orm.attributes.InstrumentedAttribute): used_models.add(field.class_) elif isinstance(field, str): used_models.add(model) else: raise exception.ProgrammingError( reason=( 'DB Conditional update - Unknown field type, must be ' 'string or ORM field.' ), ) if len(used_models) > 1: raise exception.ProgrammingError( reason=( 'DB Conditional update - Error in query, multitable ' 'updates are not supported.' ), ) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def _conditional_update( context, model, values, expected_values, filters=None, include_deleted='no', project_only=False, order=None, ): """Compare-and-swap conditional update SQLAlchemy implementation.""" _check_is_not_multitable(values, model) # Provided filters will become part of the where clause where_conds = list(filters) if filters else [] # Build where conditions with operators ==, !=, NOT IN and IN for field, condition in expected_values.items(): if not isinstance(condition, db.Condition): condition = db.Condition(condition, field) where_conds.append(condition.get_filter(model, field)) # Create the query with the where clause query = model_query( context, model, read_deleted=include_deleted, project_only=project_only ).filter(*where_conds) # NOTE(geguileo): Some DBs' update method are order dependent, and they # behave differently depending on the order of the values, example on a # volume with 'available' status: # UPDATE volumes SET previous_status=status, status='reyping' # WHERE id='44f284f9-877d-4fce-9eb4-67a052410054'; # Will result in a volume with 'retyping' status and 'available' # previous_status both on SQLite and MariaDB, but # UPDATE volumes SET status='retyping', previous_status=status # WHERE id='44f284f9-877d-4fce-9eb4-67a052410054'; # Will yield the same result in SQLite but will result in a volume with # status and previous_status set to 'retyping' in MariaDB, which is not # what we want, so order must be taken into consideration. # Order for the update will be: # 1- Order specified in argument order # 2- Values that refer to other ORM field (simple and using operations, # like size + 10) # 3- Values that use Case clause (since they may be using fields as well) # 4- All other values order = list(order) if order else tuple() orm_field_list = [] case_list = [] unordered_list = [] for key, value in values.items(): if isinstance(value, db.Case): value = sa.case( *value.whens, value=value.value, else_=value.else_, ) if key in order: # pylint: disable=E1137; ("order" is known to be a list, here) order[order.index(key)] = (key, value) continue # NOTE(geguileo): Check Case first since it's a type of orm value if isinstance(value, sql.elements.Case): value_list = case_list elif is_orm_value(value): value_list = orm_field_list else: value_list = unordered_list value_list.append((key, value)) update_args = {'synchronize_session': False} # If we don't have to enforce any kind of order just pass along the values # dictionary since it will be a little more efficient. if order or orm_field_list or case_list: # If we are doing an update with ordered parameters, we need to add # remaining values to the list values = itertools.chain( order, orm_field_list, case_list, unordered_list ) # And we have to tell SQLAlchemy that we want to preserve the order update_args['update_args'] = {'preserve_parameter_order': True} # Return True if we were able to change any DB entry, False otherwise result = query.update(values, **update_args) return 0 != result @require_context @main_context_manager.writer def conditional_update( context, model, values, expected_values, filters=None, include_deleted='no', project_only=False, order=None, ): """Compare-and-swap conditional update SQLAlchemy implementation.""" return _conditional_update( context, model, values, expected_values, filters=filters, include_deleted=include_deleted, project_only=project_only, order=order, ) ################### def _sync_volumes( context, project_id, volume_type_id=None, volume_type_name=None, ): volumes, _ = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, ) key = 'volumes' if volume_type_name: key += '_' + volume_type_name return {key: volumes} def _sync_snapshots( context, project_id, volume_type_id=None, volume_type_name=None, ): snapshots, _ = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, ) key = 'snapshots' if volume_type_name: key += '_' + volume_type_name return {key: snapshots} def _sync_backups( context, project_id, volume_type_id=None, volume_type_name=None, ): backups, _ = _backup_data_get_for_project( context, project_id, volume_type_id=volume_type_id, ) key = 'backups' return {key: backups} def _sync_gigabytes( context, project_id, volume_type_id=None, volume_type_name=None, ): _, vol_gigs = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, ) key = 'gigabytes' if volume_type_name: key += '_' + volume_type_name if CONF.no_snapshot_gb_quota: return {key: vol_gigs} _, snap_gigs = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, ) return {key: vol_gigs + snap_gigs} def _sync_backup_gigabytes( context, project_id, volume_type_id=None, volume_type_name=None, ): key = 'backup_gigabytes' _, backup_gigs = _backup_data_get_for_project( context, project_id, volume_type_id=volume_type_id, ) return {key: backup_gigs} def _sync_groups( context, project_id, volume_type_id=None, volume_type_name=None, ): _, groups = _group_data_get_for_project(context, project_id) key = 'groups' return {key: groups} QUOTA_SYNC_FUNCTIONS = { '_sync_volumes': _sync_volumes, '_sync_snapshots': _sync_snapshots, '_sync_gigabytes': _sync_gigabytes, '_sync_backups': _sync_backups, '_sync_backup_gigabytes': _sync_backup_gigabytes, '_sync_groups': _sync_groups, } ################### def _clean_filters(filters): return {k: v for k, v in filters.items() if v is not None} def _filter_host(field, value, match_level=None): """Generate a filter condition for host and cluster fields. Levels are: - 'pool': Will search for an exact match - 'backend': Will search for exact match and value#* - 'host'; Will search for exact match, value@* and value#* If no level is provided we'll determine it based on the value we want to match: - 'pool': If '#' is present in value - 'backend': If '@' is present in value and '#' is not present - 'host': In any other case :param field: ORM field. Ex: objects.Volume.model.host :param value: String to compare with :param match_level: 'pool', 'backend', or 'host' """ # If we don't set level we'll try to determine it automatically. LIKE # operations are expensive, so we try to reduce them to the minimum. if match_level is None: if '#' in value: match_level = 'pool' elif '@' in value: match_level = 'backend' else: match_level = 'host' # Mysql is not doing case sensitive filtering, so we force it conn_str = CONF.database.connection if conn_str.startswith('mysql') and conn_str[5] in ['+', ':']: cmp_value = func.binary(value) like_op = 'LIKE BINARY' else: cmp_value = value like_op = 'LIKE' conditions = [field == cmp_value] if match_level != 'pool': conditions.append(field.op(like_op)(value + '#%')) if match_level == 'host': conditions.append(field.op(like_op)(value + '@%')) return or_(*conditions) def _filter_time_comparison(field, time_filter_dict): """Generate a filter condition for time comparison operators""" conditions = [] for operator in time_filter_dict: filter_time = timeutils.normalize_time(time_filter_dict[operator]) if operator == 'gt': conditions.append(field.op('>')(filter_time)) elif operator == 'gte': conditions.append(field.op('>=')(filter_time)) if operator == 'eq': conditions.append(field.op('=')(filter_time)) elif operator == 'neq': conditions.append(field.op('!=')(filter_time)) if operator == 'lt': conditions.append(field.op('<')(filter_time)) elif operator == 'lte': conditions.append(field.op('<=')(filter_time)) return or_(*conditions) def _clustered_bool_field_filter(query, field_name, filter_value): # Now that we have clusters, a service is disabled/frozen if the service # doesn't belong to a cluster or if it belongs to a cluster and the cluster # itself is disabled/frozen. if filter_value is not None: query_filter = or_( and_( models.Service.cluster_name.is_(None), getattr(models.Service, field_name), ), and_( models.Service.cluster_name.isnot(None), sql.exists().where( and_( models.Cluster.name == models.Service.cluster_name, models.Cluster.binary == models.Service.binary, ~models.Cluster.deleted, getattr(models.Cluster, field_name), ) ), ), ) if not filter_value: query_filter = ~query_filter # pylint: disable=E1130 query = query.filter(query_filter) return query def _service_query( context, read_deleted='no', host=None, cluster_name=None, is_up=None, host_or_cluster=None, backend_match_level=None, disabled=None, frozen=None, **filters, ): filters = _clean_filters(filters) if filters and not is_valid_model_filters(models.Service, filters): return None query = model_query(context, models.Service, read_deleted=read_deleted) # Host and cluster are particular cases of filters, because we must # retrieve not only exact matches (single backend configuration), but also # match those that have the backend defined (multi backend configuration). if host: query = query.filter( _filter_host(models.Service.host, host, backend_match_level) ) if cluster_name: query = query.filter( _filter_host( models.Service.cluster_name, cluster_name, backend_match_level ) ) if host_or_cluster: query = query.filter( or_( _filter_host( models.Service.host, host_or_cluster, backend_match_level ), _filter_host( models.Service.cluster_name, host_or_cluster, backend_match_level, ), ) ) query = _clustered_bool_field_filter(query, 'disabled', disabled) query = _clustered_bool_field_filter(query, 'frozen', frozen) if filters: query = query.filter_by(**filters) if is_up is not None: date_limit = utils.service_expired_time() svc = models.Service filter_ = or_( and_(svc.created_at.isnot(None), svc.created_at >= date_limit), and_(svc.updated_at.isnot(None), svc.updated_at >= date_limit), ) query = query.filter(filter_ == is_up) return query @require_admin_context @main_context_manager.writer def service_destroy(context, service_id): query = _service_query(context, id=service_id) updated_values = models.Service.delete_values() if not query.update(updated_values): raise exception.ServiceNotFound(service_id=service_id) return updated_values @require_admin_context @main_context_manager.reader def service_get(context, service_id=None, backend_match_level=None, **filters): """Get a service that matches the criteria. A possible filter is is_up=True and it will filter nodes that are down. :param service_id: Id of the service. :param filters: Filters for the query in the form of key/value. :param backend_match_level: 'pool', 'backend', or 'host' for host and cluster filters (as defined in _filter_host method) :raise ServiceNotFound: If service doesn't exist. """ query = _service_query( context, backend_match_level=backend_match_level, id=service_id, **filters, ) service = None if not query else query.first() if not service: serv_id = service_id or filters.get('topic') or filters.get('binary') raise exception.ServiceNotFound( service_id=serv_id, host=filters.get('host') ) return service @require_admin_context @main_context_manager.reader def service_get_all(context, backend_match_level=None, **filters): """Get all services that match the criteria. A possible filter is is_up=True and it will filter nodes that are down. :param filters: Filters for the query in the form of key/value. :param backend_match_level: 'pool', 'backend', or 'host' for host and cluster filters (as defined in _filter_host method) """ query = _service_query( context, backend_match_level=backend_match_level, **filters ) return [] if not query else query.all() @require_admin_context @main_context_manager.reader def service_get_by_uuid(context, service_uuid): query = model_query(context, models.Service).filter_by(uuid=service_uuid) result = query.first() if not result: raise exception.ServiceNotFound(service_id=service_uuid) return result @require_admin_context @main_context_manager.writer def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not CONF.enable_new_services: service_ref.disabled = True service_ref.save(context.session) return service_ref @require_admin_context @main_context_manager.writer def service_update(context, service_id, values, retry=True): def _service_update(context, service_id, values): query = _service_query(context, id=service_id) if 'disabled' in values: entity = query.column_descriptions[0]['entity'] values = values.copy() values['modified_at'] = values.get('modified_at', timeutils.utcnow()) values['updated_at'] = values.get('updated_at', entity.updated_at) result = query.update(values) if not result: raise exception.ServiceNotFound(service_id=service_id) @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) def _service_update_retry(context, service_id, values): _service_update(context, service_id, values) if retry: _service_update_retry(context, service_id, values) else: _service_update(context, service_id, values) ################### @require_admin_context @main_context_manager.reader def is_backend_frozen(context, host, cluster_name): """Check if a storage backend is frozen based on host and cluster_name.""" if cluster_name: model = models.Cluster conditions = [model.name == volume_utils.extract_host(cluster_name)] else: model = models.Service conditions = [model.host == volume_utils.extract_host(host)] conditions.extend((~model.deleted, model.frozen)) query = context.session.query(sql.exists().where(and_(*conditions))) frozen = query.scalar() return frozen ################### def _cluster_query( context, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, name=None, **filters, ): filters = _clean_filters(filters) if filters and not is_valid_model_filters(models.Cluster, filters): return None query = model_query(context, models.Cluster, read_deleted=read_deleted) # Cluster is a special case of filter, because we must match exact match # as well as hosts that specify the backend if name: query = query.filter( _filter_host(models.Cluster.name, name, name_match_level) ) if filters: query = query.filter_by(**filters) if services_summary: query = query.options(undefer_group('services_summary')) # We bind the expiration time to now (as it changes with each query) # and is required by num_down_hosts query = query.params(expired=utils.service_expired_time()) elif 'num_down_hosts' in filters: query = query.params(expired=utils.service_expired_time()) if get_services: query = query.options(joinedload(models.Cluster.services)) if is_up is not None: date_limit = utils.service_expired_time() filter_ = and_( models.Cluster.last_heartbeat.isnot(None), models.Cluster.last_heartbeat >= date_limit, ) query = query.filter(filter_ == is_up) return query @require_admin_context @main_context_manager.reader def cluster_get( context, id=None, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, **filters, ): """Get a cluster that matches the criteria. :param id: Id of the cluster. :param is_up: Boolean value to filter based on the cluster's up status. :param get_services: If we want to load all services from this cluster. :param services_summary: If we want to load num_hosts and num_down_hosts fields. :param read_deleted: Filtering based on delete status. Default value is "no". :param filters: Field based filters in the form of key/value. :param name_match_level: 'pool', 'backend', or 'host' for name filter (as defined in _filter_host method) :raise ClusterNotFound: If cluster doesn't exist. """ query = _cluster_query( context, is_up, get_services, services_summary, read_deleted, name_match_level, id=id, **filters, ) cluster = None if not query else query.first() if not cluster: cluster_id = id or str(filters) raise exception.ClusterNotFound(id=cluster_id) return cluster @require_admin_context @main_context_manager.reader def cluster_get_all( context, is_up=None, get_services=False, services_summary=False, read_deleted='no', name_match_level=None, **filters, ): """Get all clusters that match the criteria. :param is_up: Boolean value to filter based on the cluster's up status. :param get_services: If we want to load all services from this cluster. :param services_summary: If we want to load num_hosts and num_down_hosts fields. :param read_deleted: Filtering based on delete status. Default value is "no". :param name_match_level: 'pool', 'backend', or 'host' for name filter (as defined in _filter_host method) :param filters: Field based filters in the form of key/value. """ query = _cluster_query( context, is_up, get_services, services_summary, read_deleted, name_match_level, **filters, ) return [] if not query else query.all() @require_admin_context @main_context_manager.writer def cluster_create(context, values): """Create a cluster from the values dictionary.""" cluster_ref = models.Cluster() cluster_ref.update(values) # Provided disabled value takes precedence if values.get('disabled') is None: cluster_ref.disabled = not CONF.enable_new_services try: cluster_ref.save(context.session) # We mark that newly created cluster has no hosts to prevent # problems at the OVO level cluster_ref.last_heartbeat = None return cluster_ref # If we had a race condition (another non deleted cluster exists with the # same name) raise Duplicate exception. except db_exc.DBDuplicateEntry: raise exception.ClusterExists(name=values.get('name')) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def cluster_update(context, cluster_id, values): """Set the given properties on an cluster and update it. Raises ClusterNotFound if cluster does not exist. """ query = _cluster_query(context, id=cluster_id) result = query.update(values) if not result: raise exception.ClusterNotFound(id=cluster_id) @require_admin_context @main_context_manager.writer def cluster_destroy(context, cluster_id): """Destroy the cluster or raise if it does not exist or has hosts.""" query = _cluster_query(context, id=cluster_id) query = query.filter(models.Cluster.num_hosts == 0) # If the update doesn't succeed we don't know if it's because the # cluster doesn't exist or because it has hosts. result = query.update( models.Cluster.delete_values(), synchronize_session=False ) if not result: # This will fail if the cluster doesn't exist raising the right # exception cluster_get(context, id=cluster_id) # If it doesn't fail, then the problem is that there are hosts raise exception.ClusterHasHosts(id=cluster_id) ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.items(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs def _dict_with_extra_specs_if_authorized(context, inst_type_query): """Convert type query result to dict with extra_spec and rate_limit. Takes a volume type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts. NOTE the contents of extra-specs are admin readable only. If the context passed in for this request is not admin then we will return an empty extra-specs dict rather than providing the admin only details. Example response with admin context: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = { x['key']: x['value'] for x in inst_type_query['extra_specs'] } inst_type_dict['extra_specs'] = extra_specs return inst_type_dict ################### def _dict_with_group_specs_if_authorized(context, inst_type_query): """Convert group type query result to dict with spec and rate_limit. Takes a group type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts. NOTE the contents of extra-specs are admin readable only. If the context passed in for this request is not admin then we will return an empty extra-specs dict rather than providing the admin only details. Example response with admin context: 'group_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'group_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) if not is_admin_context(context): del inst_type_dict['group_specs'] else: group_specs = { x['key']: x['value'] for x in inst_type_query['group_specs'] } inst_type_dict['group_specs'] = group_specs return inst_type_dict ################### def _quota_get(context, project_id, resource): result = ( model_query(context, models.Quota, read_deleted="no") .filter_by(project_id=project_id) .filter_by(resource=resource) .first() ) if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context @main_context_manager.reader def quota_get(context, project_id, resource): return _quota_get(context, project_id, resource) @require_context @main_context_manager.reader def quota_get_all_by_project(context, project_id): rows = ( model_query(context, models.Quota, read_deleted="no") .filter_by(project_id=project_id) .all() ) result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_context @main_context_manager.writer def quota_create(context, project_id, resource, limit): quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit quota_ref.save(context.session) return quota_ref @require_context @main_context_manager.writer def quota_update(context, project_id, resource, limit): quota_ref = _quota_get(context, project_id, resource) quota_ref.hard_limit = limit quota_ref.save(context.session) return quota_ref @require_context @main_context_manager.writer def quota_update_resource(context, old_res, new_res): quotas = ( model_query(context, models.Quota, read_deleted='no') .filter_by(resource=old_res) .all() ) for quota in quotas: quota.resource = new_res quota.save(context.session) return quota @require_admin_context @main_context_manager.writer def quota_destroy(context, project_id, resource): quota_ref = _quota_get(context, project_id, resource) return quota_ref.delete(context.session) ################### def _quota_class_get(context, class_name, resource): result = ( model_query(context, models.QuotaClass, read_deleted="no") .filter_by(class_name=class_name) .filter_by(resource=resource) .first() ) if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result @require_context @main_context_manager.reader def quota_class_get(context, class_name, resource): return _quota_class_get(context, class_name, resource) @require_context @main_context_manager.reader def quota_class_get_defaults(context): rows = ( model_query(context, models.QuotaClass, read_deleted="no") .filter_by(class_name=_DEFAULT_QUOTA_NAME) .all() ) result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context @main_context_manager.reader def quota_class_get_all_by_name(context, class_name): rows = ( model_query(context, models.QuotaClass, read_deleted="no") .filter_by(class_name=class_name) .all() ) result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_context @main_context_manager.reader def _quota_class_get_all_by_resource(context, resource): result = ( model_query(context, models.QuotaClass, read_deleted="no") .filter_by(resource=resource) .all() ) return result @handle_db_data_error @require_context @main_context_manager.writer def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit quota_class_ref.save(context.session) return quota_class_ref @require_context @main_context_manager.writer def quota_class_update(context, class_name, resource, limit): quota_class_ref = _quota_class_get(context, class_name, resource) quota_class_ref.hard_limit = limit quota_class_ref.save(context.session) return quota_class_ref @require_context @main_context_manager.writer def quota_class_update_resource(context, old_res, new_res): quota_class_list = _quota_class_get_all_by_resource(context, old_res) for quota_class in quota_class_list: quota_class.resource = new_res quota_class.save(context.session) @require_context @main_context_manager.writer def quota_class_destroy(context, class_name, resource): quota_class_ref = _quota_class_get(context, class_name, resource) return quota_class_ref.delete(context.session) @require_context @main_context_manager.writer def quota_class_destroy_all_by_name(context, class_name): quota_classes = ( model_query(context, models.QuotaClass, read_deleted="no") .filter_by(class_name=class_name) .all() ) for quota_class_ref in quota_classes: quota_class_ref.delete(context.session) ################### @require_context @main_context_manager.reader def quota_usage_get(context, project_id, resource): result = ( model_query(context, models.QuotaUsage, read_deleted="no") .filter_by(project_id=project_id) .filter_by(resource=resource) .first() ) if not result: raise exception.QuotaUsageNotFound(project_id=project_id) return result @require_context @main_context_manager.reader def quota_usage_get_all_by_project(context, project_id): rows = ( model_query(context, models.QuotaUsage, read_deleted="no") .filter_by(project_id=project_id) .all() ) result = {'project_id': project_id} for row in rows: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) return result def _quota_usage_create( context, project_id, resource, in_use, reserved, until_refresh, ): quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh quota_usage_ref.save(context.session) return quota_usage_ref def _reservation_create( context, uuid, usage, project_id, resource, delta, expire, ): usage_id = usage['id'] if usage else None reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage_id reservation_ref.project_id = project_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.save(context.session) return reservation_ref # NOTE(johannes): The quota code uses SQL locking to ensure races don't # cause under or over counting of resources. To avoid deadlocks, this # code always acquires the lock on quota_usages before acquiring the lock # on reservations. def _get_quota_usages(context, project_id, resources=None): # Broken out for testability query = model_query( context, models.QuotaUsage, read_deleted="no" ).filter_by(project_id=project_id) if resources: query = query.filter(models.QuotaUsage.resource.in_(list(resources))) rows = query.order_by(models.QuotaUsage.id.asc()).with_for_update().all() return {row.resource: row for row in rows} def _get_quota_usages_by_resource(context, resource): rows = ( model_query(context, models.QuotaUsage, deleted="no") .filter_by(resource=resource) .order_by(models.QuotaUsage.id.asc()) .with_for_update() .all() ) return rows @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def quota_usage_update_resource(context, old_res, new_res): usages = _get_quota_usages_by_resource(context, old_res) for usage in usages: usage.resource = new_res usage.until_refresh = 1 def _get_sync_updates(ctxt, project_id, resources, resource_name): """Return usage for a specific resource. Resources are volumes, gigabytes, backups, snapshots, and also volumes_ snapshots_ for each volume type. """ # Grab the sync routine sync = QUOTA_SYNC_FUNCTIONS[resources[resource_name].sync] # VolumeTypeResource includes the id and name of the resource. volume_type_id = getattr(resources[resource_name], 'volume_type_id', None) volume_type_name = getattr( resources[resource_name], 'volume_type_name', None ) updates = sync( ctxt, project_id, volume_type_id=volume_type_id, volume_type_name=volume_type_name, ) return updates def _is_duplicate(exc): """Check if an exception is caused by a unique constraint failure.""" return isinstance(exc, db_exc.DBDuplicateEntry) @require_context @oslo_db_api.wrap_db_retry( max_retries=5, retry_on_deadlock=True, exception_checker=_is_duplicate ) @main_context_manager.writer def quota_reserve( context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=None, ): elevated = context.elevated() if project_id is None: project_id = context.project_id # Loop until we can lock all the resource rows we'll be modifying while True: # Get the current usages and lock existing rows usages = _get_quota_usages( context, project_id, resources=deltas.keys() ) missing = [res for res in deltas if res not in usages] # If we have successfully locked all the rows we can continue. # SELECT ... FOR UPDATE used in _get_quota usages cannot lock # non-existing rows, so there can be races with other requests # trying to create those rows. if not missing: break # Create missing rows calculating current values instead of # assuming there are no used resources as admins may have been # using this mechanism to force quota usage refresh. for resource in missing: updates = _get_sync_updates( elevated, project_id, resources, resource, ) _quota_usage_create( elevated, project_id, resource, updates[resource], 0, until_refresh or None, ) # NOTE: When doing the commit there can be a race condition with # other service instances or thread that are also creating the # same rows and in that case this will raise either a Deadlock # exception (when multiple transactions were creating the same rows # and the DB failed to acquire the row lock on the non-first # transaction) or a DBDuplicateEntry exception if some other # transaction created the row between us doing the # _get_quota_usages and here. In both cases this transaction will # be rolled back and the wrap_db_retry decorator will retry. # Commit new rows to the DB. context.session.commit() # Start a new session before trying to lock all the rows again. By # trying to get all the locks in a loop we can protect us against # admins directly deleting DB rows. context.session.begin() # Handle usage refresh for resource in deltas.keys(): # Do we need to refresh the usage? refresh = False if usages[resource].in_use < 0: # If we created the entry right now we want to refresh. # Negative in_use count indicates a desync, so try to # heal from that... refresh = True elif usages[resource].until_refresh is not None: usages[resource].until_refresh -= 1 if usages[resource].until_refresh <= 0: refresh = True elif ( max_age and usages[resource].updated_at is not None and ( ( timeutils.utcnow() - usages[resource].updated_at ).total_seconds() >= max_age ) ): refresh = True # OK, refresh the usage if refresh: updates = _get_sync_updates( elevated, project_id, resources, resource, ) # Updates will always contain a single resource usage matching # the resource variable. usages[resource].in_use = updates[resource] usages[resource].until_refresh = until_refresh or None # There are 3 cases where we want to update "until_refresh" in the # DB: when we enabled it, when we disabled it, and when we changed # to a value lower than the current remaining value. else: res_until = usages[resource].until_refresh if (res_until is None and until_refresh) or ( (res_until or 0) > (until_refresh or 0) ): usages[resource].until_refresh = until_refresh or None # Check for deltas that would go negative unders = [ r for r, delta in deltas.items() if delta < 0 and delta + usages[r].in_use < 0 ] # TODO(mc_nair): Should ignore/zero alloc if using non-nested driver # Now, let's check the quotas # NOTE(Vek): We're only concerned about positive increments. # If a project has gone over quota, we want them to # be able to reduce their usage without any # problems. overs = [ r for r, delta in deltas.items() if quotas[r] >= 0 and delta >= 0 and quotas[r] < delta + usages[r].total ] # NOTE(Vek): The quota check needs to be in the transaction, # but the transaction doesn't fail just because # we're over quota, so the OverQuota raise is # outside the transaction. If we did the raise # here, our usage updates would be discarded, but # they're not invalidated by being over-quota. # Create the reservations if not overs: reservations = [] for resource, delta in deltas.items(): usage = usages[resource] reservation = _reservation_create( elevated, str(uuid.uuid4()), usage, project_id, resource, delta, expire, ) reservations.append(reservation.uuid) # Also update the reserved quantity # NOTE(Vek): Again, we are only concerned here about # positive increments. Here, though, we're # worried about the following scenario: # # 1) User initiates resize down. # 2) User allocates a new instance. # 3) Resize down fails or is reverted. # 4) User is now over quota. # # To prevent this, we only update the # reserved value if the delta is positive. if delta > 0: usages[resource].reserved += delta if unders: LOG.warning( "Reservation would make usage less than 0 for the " "following resources, so on commit they will be " "limited to prevent going below 0: %s", unders, ) if overs: usages = { k: dict(in_use=v.in_use, reserved=v.reserved) for k, v in usages.items() } raise exception.OverQuota( overs=sorted(overs), quotas=quotas, usages=usages ) return reservations def _quota_reservations(context, reservations): """Return the relevant reservations.""" # Get the listed reservations return ( model_query(context, models.Reservation, read_deleted="no") .filter(models.Reservation.uuid.in_(reservations)) .with_for_update() .all() ) def _get_reservation_resources(context, reservation_ids): """Return the relevant resources by reservations.""" reservations = ( model_query(context, models.Reservation, read_deleted="no") .options(load_only(models.Reservation.resource)) .filter(models.Reservation.uuid.in_(reservation_ids)) .all() ) return {r.resource for r in reservations} def _dict_with_usage_id(usages): return {row.id: row for row in usages.values()} @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def reservation_commit(context, reservations, project_id=None): # NOTE: There's a potential race condition window with # reservation_expire, since _get_reservation_resources does not lock # the rows, but we won't fix it because: # - Minuscule chance of happening, since quota expiration is usually # very high # - Solution could create a DB lock on rolling upgrades since we need # to reverse the order of locking the rows. usages = _get_quota_usages( context, project_id, resources=_get_reservation_resources(context, reservations), ) usages = _dict_with_usage_id(usages) for reservation in _quota_reservations(context, reservations): usage = usages[reservation.usage_id] delta = reservation.delta if delta >= 0: usage.reserved -= min(delta, usage.reserved) # For negative deltas make sure we never go into negative usage elif -delta > usage.in_use: delta = -usage.in_use usage.in_use += delta reservation.delete(context.session) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def reservation_rollback(context, reservations, project_id=None): # NOTE: There's a potential race condition window with # reservation_expire, since _get_reservation_resources does not lock # the rows, but we won't fix it because: # - Minuscule chance of happening, since quota expiration is usually # very high # - Solution could create a DB lock on rolling upgrades since we need # to reverse the order of locking the rows. usages = _get_quota_usages( context, project_id, resources=_get_reservation_resources(context, reservations), ) usages = _dict_with_usage_id(usages) for reservation in _quota_reservations(context, reservations): usage = usages[reservation.usage_id] if reservation.delta >= 0: usage.reserved -= min(reservation.delta, usage.reserved) reservation.delete(context.session) @require_context def quota_destroy_by_project(context, project_id): """Destroy all limit quotas associated with a project. Leaves usage and reservation quotas intact. """ quota_destroy_all_by_project(context, project_id, only_quotas=True) # TODO(stephenfin): No one is using this except 'quota_destroy_by_project' # above, so the only_quotas=False path could be removed. @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def quota_destroy_all_by_project(context, project_id, only_quotas=False): """Destroy all quotas associated with a project. This includes limit quotas, usage quotas and reservation quotas. Optionally can only remove limit quotas and leave other types as they are. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. :param only_quotas: Only delete limit quotas, leave other types intact. """ model_query(context, models.Quota).filter_by(project_id=project_id).update( models.Quota.delete_values() ) if only_quotas: return model_query(context, models.QuotaUsage, read_deleted="no").filter_by( project_id=project_id ).update(models.QuotaUsage.delete_values()) model_query(context, models.Reservation, read_deleted="no").filter_by( project_id=project_id ).update(models.Reservation.delete_values()) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def reservation_expire(context): current_time = timeutils.utcnow() results = ( model_query(context, models.Reservation, read_deleted="no") .filter(models.Reservation.expire < current_time) .with_for_update() .all() ) if results: for reservation in results: if reservation.delta >= 0: reservation.usage.reserved -= min( reservation.delta, reservation.usage.reserved, ) reservation.usage.save(context.session) reservation.delete(context.session) ################### @require_admin_context @main_context_manager.writer def volume_attach(context, values): volume_attachment_ref = models.VolumeAttachment() if not values.get('id'): values['id'] = str(uuid.uuid4()) volume_attachment_ref.update(values) volume_attachment_ref.save(context.session) return _attachment_get(context, values['id']) @require_admin_context @main_context_manager.writer def volume_attached( context, attachment_id, instance_uuid, host_name, mountpoint, attach_mode='rw', mark_attached=True, ): """This method updates a volume attachment entry. This function saves the information related to a particular attachment for a volume. It also updates the volume record to mark the volume as attached or attaching. The mark_attached argument is a boolean, when set to True, we mark the volume as 'in-use' and the 'attachment' as 'attached', if False, we use 'attaching' for both of these status settings. """ attach_status = fields.VolumeAttachStatus.ATTACHED volume_status = 'in-use' if not mark_attached: attach_status = fields.VolumeAttachStatus.ATTACHING volume_status = 'attaching' if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) volume_attachment_ref = _attachment_get(context, attachment_id) updated_values = { 'mountpoint': mountpoint, 'attach_status': attach_status, 'instance_uuid': instance_uuid, 'attached_host': host_name, 'attach_time': timeutils.utcnow(), 'attach_mode': attach_mode, 'updated_at': volume_attachment_ref.updated_at, } volume_attachment_ref.update(updated_values) volume_attachment_ref.save(context.session) del updated_values['updated_at'] volume_ref = _volume_get(context, volume_attachment_ref['volume_id']) volume_ref['status'] = volume_status volume_ref['attach_status'] = attach_status volume_ref.save(context.session) return volume_ref, updated_values @handle_db_data_error @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def volume_create(context, values): values['volume_metadata'] = _metadata_refs( values.get('metadata'), models.VolumeMetadata, ) if is_admin_context(context): values['volume_admin_metadata'] = _metadata_refs( values.get('admin_metadata'), models.VolumeAdminMetadata, ) elif values.get('volume_admin_metadata'): del values['volume_admin_metadata'] volume_ref = models.Volume() if not values.get('id'): values['id'] = str(uuid.uuid4()) volume_ref.update(values) context.session.add(volume_ref) return _volume_get(context, values['id']) def get_booleans_for_table(table_name): booleans = set() table = getattr(models, table_name.capitalize()) if hasattr(table, '__table__'): columns = table.__table__.columns for column in columns: if isinstance(column.type, sqltypes.Boolean): booleans.add(column.name) return booleans @require_admin_context @main_context_manager.reader def volume_data_get_for_host(context, host, count_only=False): host_attr = models.Volume.host conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] if count_only: result = ( model_query( context, func.count(models.Volume.id), read_deleted="no" ) .filter(or_(*conditions)) .first() ) return result[0] or 0 else: result = ( model_query( context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no", ) .filter(or_(*conditions)) .first() ) # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def _volume_data_get_for_project( context, project_id, volume_type_id=None, host=None, skip_internal=True, ): model = models.Volume query = model_query( context, func.count(model.id), func.sum(model.size), read_deleted="no" ).filter_by(project_id=project_id) # By default we skip temporary resources creted for internal usage and # migration destination volumes. if skip_internal: query = query.filter(model.use_quota) if host: query = query.filter(_filter_host(model.host, host)) if volume_type_id: query = query.filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return result[0] or 0, result[1] or 0 @require_admin_context @main_context_manager.reader def volume_data_get_for_project(context, project_id, host=None): return _volume_data_get_for_project( context, project_id, host=host, skip_internal=False, ) VOLUME_DEPENDENT_MODELS = frozenset( [ models.VolumeMetadata, models.VolumeAdminMetadata, models.Snapshot, models.Transfer, models.VolumeGlanceMetadata, models.VolumeAttachment, ] ) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def volume_destroy(context, volume_id): now = timeutils.utcnow() updated_values = { 'status': 'deleted', 'deleted': True, 'deleted_at': now, 'migration_status': None, } query = model_query(context, models.Volume).filter_by(id=volume_id) entity = query.column_descriptions[0]['entity'] updated_values['updated_at'] = entity.updated_at query.update(updated_values) for model in VOLUME_DEPENDENT_MODELS: query = model_query(context, model).filter_by(volume_id=volume_id) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': now, 'updated_at': entity.updated_at, } ) del updated_values['updated_at'] return updated_values def _include_in_cluster(context, cluster, model, partial_rename, filters): """Generic include in cluster method. When we include resources in a cluster we have to be careful to preserve the addressing sections that have not been provided. That's why we allow partial_renaming, so we can preserve the backend and pool if we are only providing host/cluster level information, and preserve pool information if we only provide backend level information. For example when we include a host in a cluster we receive calls with filters like {'host': 'localhost@lvmdriver-1'} and cluster with something like 'mycluster@lvmdriver-1'. Since in the DB the resources will have the host field set to something like 'localhost@lvmdriver-1#lvmdriver-1' we want to include original pool in the new cluster_name. So we want to store in cluster_name value 'mycluster@lvmdriver-1#lvmdriver-1'. """ filters = _clean_filters(filters) if filters and not is_valid_model_filters(model, filters): return None query = context.session.query(model) if hasattr(model, 'deleted'): query = query.filter_by(deleted=False) # cluster_name and host are special filter cases for field in {'cluster_name', 'host'}.intersection(filters): value = filters.pop(field) # We do a special backend filter query = query.filter(_filter_host(getattr(model, field), value)) # If we want to do a partial rename and we haven't set the cluster # already, the value we want to set is a SQL replace of existing field # value. if partial_rename and isinstance(cluster, str): cluster = func.replace(getattr(model, field), value, cluster) query = query.filter_by(**filters) result = query.update({'cluster_name': cluster}, synchronize_session=False) return result @require_admin_context @main_context_manager.writer def volume_include_in_cluster( context, cluster, partial_rename=True, **filters ): """Include all volumes matching the filters into a cluster.""" return _include_in_cluster( context, cluster, models.Volume, partial_rename, filters ) def _get_statuses_from_attachments(context, volume_id): """Get volume status and attach_status based on existing attachments.""" # NOTE: Current implementation ignores attachments on error attaching, # since they will not have been used by any consumer because os-brick's # connect_volume has not been called yet. This leads to cases where a # volume will be in 'available' state yet have attachments. # If we sort status of attachments alphabetically, ignoring errors, the # first element will be the attachment status for the volume: # attached > attaching > detaching > reserved attach_status = ( context.session.query(models.VolumeAttachment.attach_status) .filter_by(deleted=False) .filter_by(volume_id=volume_id) .filter(~models.VolumeAttachment.attach_status.startswith('error_')) .order_by(models.VolumeAttachment.attach_status.asc()) .limit(1) .scalar() ) # No volume attachment records means the volume is detached. attach_status = attach_status or 'detached' # Check cases where volume status is different from attach status, and # default to the same value if it's not one of those cases. status = ATTACH_STATUS_MAP.get(attach_status, attach_status) return status, attach_status @require_admin_context @main_context_manager.writer def volume_detached(context, volume_id, attachment_id): """Delete an attachment and update the volume accordingly. After marking the attachment as detached the method will decide the status and attach_status values for the volume based on the current status and the remaining attachments and their status. Volume status may be changed to: in-use, attaching, detaching, reserved, or available. Volume attach_status will be changed to one of: attached, attaching, detaching, reserved, or detached. """ # NOTE(jdg): This is a funky band-aid for the earlier attempts at # multiattach, it's a bummer because these things aren't really being used # but at the same time we don't want to break them until we work out the # new proposal for multi-attach # Only load basic volume info necessary to check various status and use # the volume row as a lock with the for_update. volume = _volume_get( context, volume_id, joined_load=False, for_update=True, ) try: attachment = _attachment_get(context, attachment_id) attachment_updates = attachment.delete(context.session) except exception.VolumeAttachmentNotFound: attachment_updates = None status, attach_status = _get_statuses_from_attachments(context, volume_id) volume_updates = { 'updated_at': volume.updated_at, 'attach_status': attach_status, } # Hide volume status update to available on volume migration or upload, # as status is updated later on those flows. if ( attach_status != 'detached' or (not volume.migration_status and volume.status != 'uploading') or volume.migration_status in ('success', 'error') ): volume_updates['status'] = status volume.update(volume_updates) volume.save(context.session) del volume_updates['updated_at'] return volume_updates, attachment_updates def _process_model_like_filter(model, query, filters): """Applies regex expression filtering to a query. :param model: model to apply filters to :param query: query to apply filters to :param filters: dictionary of filters with regex values :returns: the updated query. """ if query is None: return query for key in sorted(filters): column_attr = getattr(model, key) if 'property' == type(column_attr).__name__: continue value = filters[key] if not (isinstance(value, (str, int))): continue query = query.filter(column_attr.op('LIKE')(u'%%%s%%' % value)) return query def apply_like_filters(model): def decorator_filters(process_exact_filters): def _decorator(query, filters): exact_filters = filters.copy() regex_filters = {} for key, value in filters.items(): # NOTE(tommylikehu): For inexact match, the filter keys # are in the format of 'key~=value' if key.endswith('~'): exact_filters.pop(key) regex_filters[key.rstrip('~')] = value query = process_exact_filters(query, exact_filters) return _process_model_like_filter(model, query, regex_filters) return _decorator return decorator_filters @require_context def _volume_get_query(context, project_only=False, joined_load=True): """Get the query to retrieve the volume. :param context: the context used to run the method _volume_get_query :param project_only: the boolean used to decide whether to query the volume in the current project or all projects :param joined_load: the boolean used to decide whether the query loads the other models, which join the volume model in the database. Currently, the False value for this parameter is specially for the case of updating database during volume migration :returns: updated query or None """ if not joined_load: return model_query(context, models.Volume, project_only=project_only) if is_admin_context(context): return ( model_query( context, models.Volume, project_only=project_only, ) .options(joinedload(models.Volume.volume_metadata)) .options(joinedload(models.Volume.volume_admin_metadata)) .options(joinedload(models.Volume.volume_type)) .options(joinedload(models.Volume.volume_attachment)) .options(joinedload(models.Volume.consistencygroup)) .options(joinedload(models.Volume.group)) ) else: return ( model_query( context, models.Volume, project_only=project_only, ) .options(joinedload(models.Volume.volume_metadata)) .options(joinedload(models.Volume.volume_type)) .options(joinedload(models.Volume.volume_attachment)) .options(joinedload(models.Volume.consistencygroup)) .options(joinedload(models.Volume.group)) ) @require_context def _volume_get(context, volume_id, joined_load=True, for_update=False): result = _volume_get_query( context, project_only=True, joined_load=joined_load, ) if joined_load: result = result.options( joinedload(models.Volume.volume_type).joinedload( models.VolumeType.extra_specs ) ) if for_update: result = result.with_for_update() result = result.filter_by(id=volume_id).first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result def _attachment_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): if filters and not is_valid_model_filters( models.VolumeAttachment, filters, exclude_list=['project_id'], ): return [] # Generate the paginate query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.VolumeAttachment, ) if query is None: return [] return query.all() def _attachment_get( context, attachment_id, read_deleted=False, project_only=True, ): result = ( model_query( context, models.VolumeAttachment, read_deleted=read_deleted ) .filter_by(id=attachment_id) .options(joinedload(models.VolumeAttachment.volume)) .first() ) if not result: raise exception.VolumeAttachmentNotFound( filter='attachment_id = %s' % attachment_id, ) return result def _attachment_get_query(context, project_only=False): return model_query( context, models.VolumeAttachment, project_only=project_only, ).options(joinedload(models.VolumeAttachment.volume)) @apply_like_filters(model=models.VolumeAttachment) def _process_attachment_filters(query, filters): if filters: project_id = filters.pop('project_id', None) # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.VolumeAttachment, filters): return if project_id: volume = models.Volume query = query.filter( volume.id == models.VolumeAttachment.volume_id, volume.project_id == project_id, ) query = query.filter_by(**filters) return query @require_admin_context @main_context_manager.reader def volume_attachment_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): """Retrieve all Attachment records with filter and pagination options.""" return _attachment_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs ) @require_context @main_context_manager.reader def volume_attachment_get_all_by_volume_id(context, volume_id): result = ( model_query(context, models.VolumeAttachment) .filter_by(volume_id=volume_id) .filter( models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED ) .options(joinedload(models.VolumeAttachment.volume)) .all() ) return result # FIXME(jdg): Not using filters @require_context @main_context_manager.reader def volume_attachment_get_all_by_host(context, host, filters=None): result = ( model_query(context, models.VolumeAttachment) .filter_by(attached_host=host) .filter( models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED ) .options(joinedload(models.VolumeAttachment.volume)) .all() ) return result @require_context @main_context_manager.reader def volume_attachment_get(context, attachment_id): """Fetch the specified attachment record.""" return _attachment_get(context, attachment_id) # FIXME(jdg): Not using filters @require_context @main_context_manager.reader def volume_attachment_get_all_by_instance_uuid( context, instance_uuid, filters=None, ): """Fetch all attachment records associated with the specified instance.""" result = ( model_query(context, models.VolumeAttachment) .filter_by(instance_uuid=instance_uuid) .filter( models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED ) .options(joinedload(models.VolumeAttachment.volume)) .all() ) return result @require_context @main_context_manager.reader def volume_attachment_get_all_by_project( context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): """Retrieve all Attachment records for specific project.""" authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _attachment_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs, ) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def attachment_destroy(context, attachment_id): """Destroy the specified attachment record.""" utcnow = timeutils.utcnow() query = model_query(context, models.VolumeAttachment).filter_by( id=attachment_id ) entity = query.column_descriptions[0]['entity'] updated_values = { 'attach_status': fields.VolumeAttachStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, 'updated_at': entity.updated_at, } query.update(updated_values) query = model_query(context, models.AttachmentSpecs).filter_by( attachment_id=attachment_id ) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': utcnow, 'updated_at': entity.updated_at, } ) del updated_values['updated_at'] return updated_values @require_context @main_context_manager.writer def attachment_specs_exist(context): query = model_query(context, models.AttachmentSpecs, read_deleted='no') return bool(query.first()) def _attachment_specs_query(context, attachment_id): return model_query( context, models.AttachmentSpecs, read_deleted="no" ).filter_by(attachment_id=attachment_id) @require_context @main_context_manager.reader def attachment_specs_get(context, attachment_id): """DEPRECATED: Fetch the attachment_specs for the specified attachment.""" rows = _attachment_specs_query(context, attachment_id).all() result = {row['key']: row['value'] for row in rows} return result @require_context @main_context_manager.writer def attachment_specs_delete(context, attachment_id, key): """DEPRECATED: Delete attachment_specs for the specified attachment.""" _attachment_specs_get_item(context, attachment_id, key) query = _attachment_specs_query(context, attachment_id).filter_by(key=key) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } ) @require_context def _attachment_specs_get_item(context, attachment_id, key): result = ( _attachment_specs_query(context, attachment_id) .filter_by(key=key) .first() ) if not result: raise exception.AttachmentSpecsNotFound( specs_key=key, attachment_id=attachment_id, ) return result @handle_db_data_error @require_context @main_context_manager.writer def attachment_specs_update_or_create(context, attachment_id, specs): """DEPRECATED: Update attachment_specs for the specified attachment.""" spec_ref = None for key, value in specs.items(): try: spec_ref = _attachment_specs_get_item(context, attachment_id, key) except exception.AttachmentSpecsNotFound: spec_ref = models.AttachmentSpecs() spec_ref.update( { "key": key, "value": value, "attachment_id": attachment_id, "deleted": False, } ) spec_ref.save(context.session) return specs @require_context @main_context_manager.reader def volume_get(context, volume_id): return _volume_get(context, volume_id) @require_admin_context @main_context_manager.reader def volume_get_all( context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None, ): """Retrieves all volumes. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ # Generate the query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.Volume, ) # No volumes would match, return empty list if query is None: return [] return query.all() @require_context @main_context_manager.reader def get_volume_summary(context, project_only, filters=None): """Retrieves all volumes summary. :param context: context to query under :param project_only: limit summary to project volumes :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: volume summary """ if not (project_only or is_admin_context(context)): raise exception.AdminRequired() query = model_query( context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no", ) if project_only: query = query.filter_by(project_id=context.project_id) if filters: query = _process_volume_filters(query, filters) if query is None: return [] result = query.first() query_metadata = model_query( context, models.VolumeMetadata.key, models.VolumeMetadata.value, read_deleted="no", ) if project_only: query_metadata = query_metadata.join( models.Volume, models.Volume.id == models.VolumeMetadata.volume_id ).filter_by(project_id=context.project_id) result_metadata = query_metadata.distinct().all() result_metadata_list = collections.defaultdict(list) for key, value in result_metadata: result_metadata_list[key].append(value) return (result[0] or 0, result[1] or 0, result_metadata_list) @require_admin_context @main_context_manager.reader def volume_get_all_by_host(context, host, filters=None): """Retrieves all volumes hosted on a host. :param context: context to query under :param host: host for all volumes being retrieved :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ # As a side effect of the introduction of pool-aware scheduler, # newly created volumes will have pool information appended to # 'host' field of a volume record. So a volume record in DB can # now be either form below: # Host # Host#Pool if host and isinstance(host, str): host_attr = getattr(models.Volume, 'host') conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] query = _volume_get_query(context).filter(or_(*conditions)) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() elif not host: return [] @require_context @main_context_manager.reader def volume_get_all_by_group(context, group_id, filters=None): """Retrieves all volumes associated with the group_id. :param context: context to query under :param group_id: consistency group ID for all volumes being retrieved :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ query = _volume_get_query(context).filter_by(consistencygroup_id=group_id) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() @require_admin_context @main_context_manager.writer def volume_update_all_by_service(context): """Ensure volumes have the correct service_uuid value for their host. In some deployment tools, when performing an upgrade, all service records are recreated including c-vol service which gets a new record in the services table, though its host name is constant. Later we then delete the old service record. As a consequence, the volumes have the right host name but the service UUID needs to be updated to the ID of the new service record. :param context: context to query under """ # Get all cinder-volume services services = service_get_all(context, binary='cinder-volume') for service in services: query = model_query(context, models.Volume) query = query.filter( _filter_host( models.Volume.host, service.host), models.Volume.service_uuid != service.uuid) query.update( {"service_uuid": service.uuid}, synchronize_session=False) @require_context @main_context_manager.reader def volume_get_all_by_generic_group(context, group_id, filters=None): """Retrieves all volumes associated with the group_id. :param context: context to query under :param group_id: group ID for all volumes being retrieved :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ query = _volume_get_query(context).filter_by(group_id=group_id) if filters: query = _process_volume_filters(query, filters) # No volumes would match, return empty list if query is None: return [] return query.all() @require_context @main_context_manager.reader def volume_get_all_by_project( context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None, ): """Retrieves all volumes in a project. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param project_id: project for all volumes being retrieved :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ authorize_project_context(context, project_id) # Add in the project filter without modifying the given filters filters = filters.copy() if filters else {} filters['project_id'] = project_id # Generate the query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.Volume, ) # No volumes would match, return empty list if query is None: return [] return query.all() def _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset=None, paginate_type=models.Volume, ): """Generate the query to include the filters and the paginate options. Returns a query with sorting / pagination criteria added or None if the given filters will not yield any results. :param context: context to query under :param marker: the last item of the previous page; we returns the next results after this value. :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :param offset: number of items to skip :param paginate_type: type of pagination to generate :returns: updated query or None """ get_query, process_filters, get = PAGINATION_HELPERS[paginate_type] sort_keys, sort_dirs = process_sort_params( sort_keys, sort_dirs, default_dir='desc', ) query = get_query(context) if filters: query = process_filters(query, filters) if query is None: return None marker_object = None if marker is not None: marker_object = get(context, marker) return sqlalchemyutils.paginate_query( query, paginate_type, limit, sort_keys, marker=marker_object, sort_dirs=sort_dirs, offset=offset, ) @main_context_manager.reader def calculate_resource_count(context, resource_type, filters): """Calculate total count with filters applied""" if resource_type not in CALCULATE_COUNT_HELPERS.keys(): msg = _("Model %s doesn't support counting resource.") raise exception.InvalidInput(reason=msg % resource_type) get_query, process_filters = CALCULATE_COUNT_HELPERS[resource_type] query = get_query(context, joined_load=False) if filters: query = process_filters(query, filters) if query is None: return 0 return query.with_entities(func.count()).scalar() @apply_like_filters(model=models.Volume) def _process_volume_filters(query, filters): """Common filter processing for Volume queries. Filter values that are in lists, tuples, or sets cause an 'IN' operator to be used, while exact matching ('==' operator) is used for other values. A filter key/value of 'no_migration_targets'=True causes volumes with either a NULL 'migration_status' or a 'migration_status' that does not start with 'target:' to be retrieved. A 'metadata' filter key must correspond to a dictionary value of metadata key-value pairs. :param query: Model query to use :param filters: dictionary of filters :returns: updated query or None """ filters = filters.copy() # 'no_migration_targets' is unique, must be either NULL or # not start with 'target:' if filters.get('no_migration_targets', False): filters.pop('no_migration_targets') try: column_attr = getattr(models.Volume, 'migration_status') conditions = [ column_attr == None, # noqa column_attr.op('NOT LIKE')('target:%'), ] query = query.filter(or_(*conditions)) except AttributeError: LOG.debug("'migration_status' column could not be found.") return None host = filters.pop('host', None) if host: query = query.filter(_filter_host(models.Volume.host, host)) cluster_name = filters.pop('cluster_name', None) if cluster_name: query = query.filter( _filter_host(models.Volume.cluster_name, cluster_name), ) for time_comparison_filter in ['created_at', 'updated_at']: if filters.get(time_comparison_filter, None): time_filter_dict = filters.pop(time_comparison_filter) try: query = query.filter( _filter_time_comparison( getattr(models.Volume, time_comparison_filter), time_filter_dict, ), ) except AttributeError: LOG.debug( "%s column could not be found.", time_comparison_filter, ) return None # Apply exact match filters for everything else, ensure that the # filter value exists on the model for key in filters.keys(): # metadata/glance_metadata is unique, must be a dict if key in ('metadata', 'glance_metadata'): if not isinstance(filters[key], dict): LOG.debug("'%s' filter value is not valid.", key) return None continue try: column_attr = getattr(models.Volume, key) # Do not allow relationship properties since those require # schema specific knowledge prop = getattr(column_attr, 'property') if isinstance(prop, RelationshipProperty): LOG.debug( "'%s' filter key is not valid, it maps to a relationship.", key, ) return None except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return None # Holds the simple exact matches filter_dict = {} # Iterate over all filters, special case the filter if necessary for key, value in filters.items(): if key == 'metadata': # model.VolumeMetadata defines the backref to Volumes as # 'volume_metadata' or 'volume_admin_metadata', use those as # column attribute keys col_attr = getattr(models.Volume, 'volume_metadata') col_ad_attr = getattr(models.Volume, 'volume_admin_metadata') for k, v in value.items(): query = query.filter( or_( col_attr.any(key=k, value=v), col_ad_attr.any(key=k, value=v), ) ) elif key == 'glance_metadata': # use models.Volume.volume_glance_metadata as column attribute key. col_gl_attr = models.Volume.volume_glance_metadata for k, v in value.items(): query = query.filter(col_gl_attr.any(key=k, value=v)) elif isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(models.Volume, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query def process_sort_params( sort_keys, sort_dirs, default_keys=None, default_dir='asc', ): """Process the sort parameters to include default keys. Creates a list of sort keys and a list of sort directions. Adds the default keys to the end of the list if they are not already included. When adding the default keys to the sort keys list, the associated direction is: 1) The first element in the 'sort_dirs' list (if specified), else 2) 'default_dir' value (Note that 'asc' is the default value since this is the default in sqlalchemy.utils.paginate_query) :param sort_keys: List of sort keys to include in the processed list :param sort_dirs: List of sort directions to include in the processed list :param default_keys: List of sort keys that need to be included in the processed list, they are added at the end of the list if not already specified. :param default_dir: Sort direction associated with each of the default keys that are not supplied, used when they are added to the processed list :returns: list of sort keys, list of sort directions :raise exception.InvalidInput: If more sort directions than sort keys are specified or if an invalid sort direction is specified """ if default_keys is None: default_keys = ['created_at', 'id'] # Determine direction to use for when adding default keys if sort_dirs and len(sort_dirs): default_dir_value = sort_dirs[0] else: default_dir_value = default_dir # Create list of keys (do not modify the input list) if sort_keys: result_keys = list(sort_keys) else: result_keys = [] # If a list of directions is not provided, use the default sort direction # for all provided keys. if sort_dirs: result_dirs = [] # Verify sort direction for sort_dir in sort_dirs: if sort_dir not in ('asc', 'desc'): msg = _("Unknown sort direction, must be 'desc' or 'asc'.") raise exception.InvalidInput(reason=msg) result_dirs.append(sort_dir) else: result_dirs = [default_dir_value for _sort_key in result_keys] # Ensure that the key and direction length match while len(result_dirs) < len(result_keys): result_dirs.append(default_dir_value) # Unless more direction are specified, which is an error if len(result_dirs) > len(result_keys): msg = _("Sort direction array size exceeds sort key array size.") raise exception.InvalidInput(reason=msg) # Ensure defaults are included for key in default_keys: if key not in result_keys: result_keys.append(key) result_dirs.append(default_dir_value) return result_keys, result_dirs @handle_db_data_error @require_context @main_context_manager.writer def volume_update(context, volume_id, values): metadata = values.get('metadata') if metadata is not None: _volume_user_metadata_update( context, volume_id, values.pop('metadata'), delete=True, ) admin_metadata = values.get('admin_metadata') if is_admin_context(context) and admin_metadata is not None: _volume_admin_metadata_update( context, volume_id, values.pop('admin_metadata'), delete=True, ) query = _volume_get_query(context, joined_load=False) result = query.filter_by(id=volume_id).update(values) if not result: raise exception.VolumeNotFound(volume_id=volume_id) @handle_db_data_error @require_context @main_context_manager.writer def volumes_update(context, values_list): volume_refs = [] for values in values_list: volume_id = values['id'] values.pop('id') metadata = values.get('metadata') if metadata is not None: _volume_user_metadata_update( context, volume_id, values.pop('metadata'), delete=True, ) admin_metadata = values.get('admin_metadata') if is_admin_context(context) and admin_metadata is not None: _volume_admin_metadata_update( context, volume_id, values.pop('admin_metadata'), delete=True, ) volume_ref = _volume_get(context, volume_id) volume_ref.update(values) volume_refs.append(volume_ref) return volume_refs @require_context @main_context_manager.writer def volume_attachment_update(context, attachment_id, values): query = model_query(context, models.VolumeAttachment) result = query.filter_by(id=attachment_id).update(values) if not result: raise exception.VolumeAttachmentNotFound( filter='attachment_id = ' + attachment_id ) @main_context_manager.writer def volume_update_status_based_on_attachment(context, volume_id): """Update volume status based on attachment. Get volume and check if 'volume_attachment' parameter is present in volume. If 'volume_attachment' is None then set volume status to 'available' else set volume status to 'in-use'. :param context: context to query under :param volume_id: id of volume to be updated :returns: updated volume """ volume_ref = _volume_get(context, volume_id) # We need to get and update volume using same session because # there is possibility that instance is deleted between the 'get' # and 'update' volume call. if not volume_ref['volume_attachment']: volume_ref.update({'status': 'available'}) else: volume_ref.update({'status': 'in-use'}) return volume_ref def volume_has_snapshots_filter(): return sql.exists().where( and_( models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted, ) ) def volume_has_undeletable_snapshots_filter(): deletable_statuses = ['available', 'error'] return sql.exists().where( and_( models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted, or_( models.Snapshot.cgsnapshot_id != None, # noqa: != None models.Snapshot.status.notin_(deletable_statuses), ), or_( models.Snapshot.group_snapshot_id != None, # noqa: != None models.Snapshot.status.notin_(deletable_statuses), ), ) ) def volume_has_snapshots_in_a_cgsnapshot_filter(): return sql.exists().where( and_( models.Volume.id == models.Snapshot.volume_id, models.Snapshot.cgsnapshot_id.isnot(None), ) ) def volume_has_attachments_filter(): return sql.exists().where( and_( models.Volume.id == models.VolumeAttachment.volume_id, models.VolumeAttachment.attach_status != fields.VolumeAttachStatus.DETACHED, ~models.VolumeAttachment.deleted, ) ) def volume_qos_allows_retype(new_vol_type): """Filter to check that qos allows retyping the volume to new_vol_type. Returned sqlalchemy filter will evaluate to True when volume's status is available or when it's 'in-use' but the qos in new_vol_type is the same as the qos of the volume or when it doesn't exist a consumer spec key that specifies anything other than the back-end in any of the 2 volume_types. """ # Query to get the qos of the volume type new_vol_type q = ( sql.select(models.VolumeType.qos_specs_id) .where( and_( ~models.VolumeType.deleted, models.VolumeType.id == new_vol_type, ) ) .scalar_subquery() ) # Construct the filter to check qos when volume is 'in-use' return or_( # If volume is available models.Volume.status == 'available', # Or both volume types have the same qos specs sql.exists().where( and_( ~models.VolumeType.deleted, models.VolumeType.id == models.Volume.volume_type_id, models.VolumeType.qos_specs_id == q, ) ), # Or they are different specs but they are handled by the backend or # it is not specified. The way SQL evaluatels value != 'back-end' # makes it result in False not only for 'back-end' values but for # NULL as well, and with the double negation we ensure that we only # allow QoS with 'consumer' values of 'back-end' and NULL. and_( ~sql.exists().where( and_( ~models.VolumeType.deleted, models.VolumeType.id == models.Volume.volume_type_id, ( models.VolumeType.qos_specs_id == models.QualityOfServiceSpecs.specs_id ), models.QualityOfServiceSpecs.key == 'consumer', models.QualityOfServiceSpecs.value != 'back-end', ) ), ~sql.exists().where( and_( ~models.VolumeType.deleted, models.VolumeType.id == new_vol_type, ( models.VolumeType.qos_specs_id == models.QualityOfServiceSpecs.specs_id ), models.QualityOfServiceSpecs.key == 'consumer', models.QualityOfServiceSpecs.value != 'back-end', ) ), ), ) def volume_has_other_project_snp_filter(): return sql.exists().where( and_( models.Volume.id == models.Snapshot.volume_id, models.Volume.project_id != models.Snapshot.project_id, ) ) #################### def _volume_x_metadata_get_query(context, volume_id, model): return model_query(context, model, read_deleted="no").filter_by( volume_id=volume_id ) def _volume_x_metadata_get(context, volume_id, model): rows = _volume_x_metadata_get_query(context, volume_id, model).all() result = {} for row in rows: result[row['key']] = row['value'] return result def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec): result = ( _volume_x_metadata_get_query(context, volume_id, model) .filter_by(key=key) .first() ) if not result: if model is models.VolumeGlanceMetadata: raise notfound_exec(id=volume_id) else: raise notfound_exec(metadata_key=key, volume_id=volume_id) return result def _volume_x_metadata_update( context, volume_id, metadata, delete, model, add=True, update=True ): metadata = metadata.copy() # Set existing metadata to deleted if delete argument is True. This is # committed immediately to the DB if delete: expected_values = {'volume_id': volume_id} # We don't want to delete keys we are going to update if metadata: expected_values['key'] = db.Not(metadata.keys()) _conditional_update( context, model, {'deleted': True, 'deleted_at': timeutils.utcnow()}, expected_values, ) # Get existing metadata db_meta = _volume_x_metadata_get_query(context, volume_id, model).all() save = [] skip = [] # We only want to send changed metadata. for row in db_meta: if row.key in metadata: value = metadata.pop(row.key) if row.value != value and update: # ORM objects will not be saved until we do the bulk save row.value = value save.append(row) continue skip.append(row) # We also want to save non-existent metadata if add: save.extend( model(key=key, value=value, volume_id=volume_id) for key, value in metadata.items() ) # Do a bulk save if save: context.session.bulk_save_objects(save, update_changed_only=True) # Construct result dictionary with current metadata save.extend(skip) result = {row['key']: row['value'] for row in save} return result def _volume_user_metadata_get_query(context, volume_id): return _volume_x_metadata_get_query( context, volume_id, models.VolumeMetadata ) def _volume_image_metadata_get_query(context, volume_id): return _volume_x_metadata_get_query( context, volume_id, models.VolumeGlanceMetadata ) @require_context def _volume_user_metadata_get(context, volume_id): return _volume_x_metadata_get(context, volume_id, models.VolumeMetadata) @require_context def _volume_user_metadata_get_item(context, volume_id, key): return _volume_x_metadata_get_item( context, volume_id, key, models.VolumeMetadata, exception.VolumeMetadataNotFound, ) @require_context @require_volume_exists def _volume_user_metadata_update(context, volume_id, metadata, delete): return _volume_x_metadata_update( context, volume_id, metadata, delete, models.VolumeMetadata ) @require_context @require_volume_exists def _volume_image_metadata_update(context, volume_id, metadata, delete): return _volume_x_metadata_update( context, volume_id, metadata, delete, models.VolumeGlanceMetadata ) @require_context def _volume_glance_metadata_key_to_id(context, volume_id, key): db_data = volume_glance_metadata_get(context, volume_id) metadata = { meta_entry.key: meta_entry.id for meta_entry in db_data if meta_entry.key == key } metadata_id = metadata[key] return metadata_id @require_context @require_volume_exists @main_context_manager.reader def volume_metadata_get(context, volume_id): return _volume_user_metadata_get(context, volume_id) @require_context @require_volume_exists @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def volume_metadata_delete( context, volume_id, key, meta_type=common.METADATA_TYPES.user, ): if meta_type == common.METADATA_TYPES.user: query = _volume_user_metadata_get_query(context, volume_id).filter_by( key=key ) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } ) elif meta_type == common.METADATA_TYPES.image: metadata_id = _volume_glance_metadata_key_to_id( context, volume_id, key ) query = _volume_image_metadata_get_query(context, volume_id).filter_by( id=metadata_id ) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } ) else: raise exception.InvalidMetadataType( metadata_type=meta_type, id=volume_id ) @require_context @handle_db_data_error @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def volume_metadata_update( context, volume_id, metadata, delete, meta_type=common.METADATA_TYPES.user, ): if meta_type == common.METADATA_TYPES.user: return _volume_user_metadata_update( context, volume_id, metadata, delete ) elif meta_type == common.METADATA_TYPES.image: return _volume_image_metadata_update( context, volume_id, metadata, delete ) else: raise exception.InvalidMetadataType( metadata_type=meta_type, id=volume_id ) ################### def _volume_admin_metadata_get_query(context, volume_id): return _volume_x_metadata_get_query( context, volume_id, models.VolumeAdminMetadata ) @require_admin_context @require_volume_exists def _volume_admin_metadata_get(context, volume_id): return _volume_x_metadata_get( context, volume_id, models.VolumeAdminMetadata ) @require_admin_context @require_volume_exists def _volume_admin_metadata_update( context, volume_id, metadata, delete, add=True, update=True ): return _volume_x_metadata_update( context, volume_id, metadata, delete, models.VolumeAdminMetadata, add=add, update=update, ) @require_admin_context @main_context_manager.reader def volume_admin_metadata_get(context, volume_id): return _volume_admin_metadata_get(context, volume_id) @require_admin_context @require_volume_exists @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def volume_admin_metadata_delete(context, volume_id, key): query = _volume_admin_metadata_get_query(context, volume_id).filter_by( key=key ) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } ) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def volume_admin_metadata_update( context, volume_id, metadata, delete, add=True, update=True ): return _volume_admin_metadata_update( context, volume_id, metadata, delete, add=add, update=update ) ################### @require_context @handle_db_data_error @main_context_manager.writer def snapshot_create(context, values): values['snapshot_metadata'] = _metadata_refs( values.get('metadata'), models.SnapshotMetadata ) if not values.get('id'): values['id'] = str(uuid.uuid4()) snapshot_ref = models.Snapshot() snapshot_ref.update(values) context.session.add(snapshot_ref) return _snapshot_get(context, values['id']) @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def snapshot_destroy(context, snapshot_id): utcnow = timeutils.utcnow() query = model_query(context, models.Snapshot).filter_by(id=snapshot_id) entity = query.column_descriptions[0]['entity'] updated_values = { 'status': 'deleted', 'deleted': True, 'deleted_at': utcnow, 'updated_at': entity.updated_at, } query.update(updated_values) query = model_query(context, models.SnapshotMetadata).filter_by( snapshot_id=snapshot_id ) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': utcnow, 'updated_at': entity.updated_at, } ) del updated_values['updated_at'] return updated_values @require_context def _snapshot_get(context, snapshot_id): result = ( model_query(context, models.Snapshot, project_only=True) .options(joinedload(models.Snapshot.volume)) .options(joinedload(models.Snapshot.snapshot_metadata)) .filter_by(id=snapshot_id) .first() ) if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_context @main_context_manager.reader def snapshot_get(context, snapshot_id): return _snapshot_get(context, snapshot_id) @require_admin_context @main_context_manager.reader def snapshot_get_all( context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, ): """Retrieves all snapshots. If no sorting parameters are specified then returned snapshots are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param filters: dictionary of filters; will do exact matching on values. Special keys host and cluster_name refer to the volume. :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :returns: list of matching snapshots """ if filters and not is_valid_model_filters( models.Snapshot, filters, exclude_list=('host', 'cluster_name', 'availability_zone'), ): return [] query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.Snapshot, ) # No snapshots would match, return empty list if not query: return [] return query.all() def _snaps_get_query( context, project_only=False, joined_load=True, ): query = model_query(context, models.Snapshot, project_only=project_only) if joined_load: query = query.options(joinedload(models.Snapshot.snapshot_metadata)) return query @apply_like_filters(model=models.Snapshot) def _process_snaps_filters(query, filters): if filters: filters = filters.copy() exclude_list = ('host', 'cluster_name', 'availability_zone') # Ensure that filters' keys exist on the model or is metadata for key in filters.keys(): # Ensure if filtering based on metadata filter is queried # then the filters value is a dictionary if key == 'metadata': if not isinstance(filters[key], dict): LOG.debug("Metadata filter value is not valid dictionary") return None continue if key in exclude_list: continue # for keys in filter other than metadata and exclude_list # ensure that the keys are in Snapshot modelt try: column_attr = getattr(models.Snapshot, key) prop = getattr(column_attr, 'property') if isinstance(prop, RelationshipProperty): LOG.debug( "'%s' key is not valid, it maps to a relationship.", key, ) return None except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return None # filter handling for host and cluster name host = filters.pop('host', None) cluster = filters.pop('cluster_name', None) az = filters.pop('availability_zone', None) if host or cluster or az: query = query.join(models.Snapshot.volume) vol_field = models.Volume if host: query = query.filter(_filter_host(vol_field.host, host)) if cluster: query = query.filter(_filter_host(vol_field.cluster_name, cluster)) if az: query = query.filter_by(availability_zone=az) filters_dict = {} LOG.debug("Building query based on filter") for key, value in filters.items(): if key == 'metadata': col_attr = getattr(models.Snapshot, 'snapshot_metadata') for k, v in value.items(): query = query.filter(col_attr.any(key=k, value=v)) else: filters_dict[key] = value # Apply exact matches if filters_dict: query = query.filter_by(**filters_dict) return query @require_context @main_context_manager.reader def snapshot_get_all_for_volume(context, volume_id): return ( model_query( context, models.Snapshot, read_deleted='no', project_only=True ) .filter_by(volume_id=volume_id) .options(joinedload(models.Snapshot.snapshot_metadata)) .all() ) @require_context @main_context_manager.reader def snapshot_get_latest_for_volume(context, volume_id): result = ( model_query( context, models.Snapshot, read_deleted='no', project_only=True ) .filter_by(volume_id=volume_id) .options(joinedload(models.Snapshot.snapshot_metadata)) .order_by(desc(models.Snapshot.created_at)) .first() ) if not result: raise exception.VolumeSnapshotNotFound(volume_id=volume_id) return result @require_context @main_context_manager.reader def snapshot_get_all_by_host(context, host, filters=None): if filters and not is_valid_model_filters(models.Snapshot, filters): return [] query = model_query( context, models.Snapshot, read_deleted='no', project_only=True ) if filters: query = query.filter_by(**filters) # As a side effect of the introduction of pool-aware scheduler, # newly created volumes will have pool information appended to # 'host' field of a volume record. So a volume record in DB can # now be either form below: # Host # Host#Pool if host and isinstance(host, str): host_attr = getattr(models.Volume, 'host') conditions = [host_attr == host, host_attr.op('LIKE')(host + '#%')] query = ( query.join(models.Snapshot.volume) .filter(or_(*conditions)) .options(joinedload(models.Snapshot.snapshot_metadata)) ) return query.all() elif not host: return [] @require_context @main_context_manager.reader def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id): return ( model_query( context, models.Snapshot, read_deleted='no', project_only=True ) .filter_by(cgsnapshot_id=cgsnapshot_id) .options(joinedload(models.Snapshot.volume)) .options(joinedload(models.Snapshot.snapshot_metadata)) .all() ) @require_context @main_context_manager.reader def snapshot_get_all_for_group_snapshot(context, group_snapshot_id): return ( model_query( context, models.Snapshot, read_deleted='no', project_only=True ) .filter_by(group_snapshot_id=group_snapshot_id) .options(joinedload(models.Snapshot.volume)) .options(joinedload(models.Snapshot.snapshot_metadata)) .all() ) @require_context @main_context_manager.reader def snapshot_get_all_by_project( context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, ): """Retrieves all snapshots in a project. If no sorting parameters are specified then returned snapshots are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param project_id: project for all snapshots being retrieved :param filters: dictionary of filters; will do exact matching on values :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :returns: list of matching snapshots """ if filters and not is_valid_model_filters( models.Snapshot, filters, exclude_list=('host', 'cluster_name', 'availability_zone'), ): return [] authorize_project_context(context, project_id) # Add project_id to filters filters = filters.copy() if filters else {} filters['project_id'] = project_id query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.Snapshot, ) # No snapshots would match, return empty list if not query: return [] query = query.options(joinedload(models.Snapshot.snapshot_metadata)) return query.all() @require_context def _snapshot_data_get_for_project( context, project_id, volume_type_id=None, host=None, skip_internal=True, ): authorize_project_context(context, project_id) query = model_query( context, func.count(models.Snapshot.id), func.sum(models.Snapshot.volume_size), read_deleted="no", ) if skip_internal: query = query.filter(models.Snapshot.use_quota) if volume_type_id or host: query = query.join(models.Snapshot.volume) if volume_type_id: query = query.filter( models.Volume.volume_type_id == volume_type_id ) if host: query = query.filter(_filter_host(models.Volume.host, host)) result = query.filter(models.Snapshot.project_id == project_id).first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_context @main_context_manager.reader def snapshot_data_get_for_project( context, project_id, volume_type_id=None, host=None ): # This method doesn't support filtering temporary resources (use_quota # field) and defaults to returning all snapshots because all callers (quota # sync methods and os-host API extension) require all the snapshots. return _snapshot_data_get_for_project( context, project_id, volume_type_id, host=host, skip_internal=False ) @require_context @main_context_manager.reader def snapshot_get_all_active_by_window( context, begin, end=None, project_id=None ): """Return snapshots that were active during window.""" query = model_query(context, models.Snapshot, read_deleted="yes") query = query.filter( or_( models.Snapshot.deleted_at == None, # noqa models.Snapshot.deleted_at > begin, ) ) query = query.options(joinedload(models.Snapshot.volume)) query = query.options(joinedload(models.Snapshot.snapshot_metadata)) if end: query = query.filter(models.Snapshot.created_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @handle_db_data_error @require_context @main_context_manager.writer def snapshot_update(context, snapshot_id, values): query = model_query(context, models.Snapshot, project_only=True) result = query.filter_by(id=snapshot_id).update(values) if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) @require_context @main_context_manager.reader def get_snapshot_summary(context, project_only, filters=None): """Retrieves all snapshots summary. :param context: context to query under :param project_only: limit summary to snapshots :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_snaps_filters function for more information :returns: snapshots summary """ if not (project_only or is_admin_context(context)): raise exception.AdminRequired() query = model_query( context, func.count(models.Snapshot.id), func.sum(models.Snapshot.volume_size), read_deleted="no", ) if project_only: query = query.filter_by(project_id=context.project_id) if filters: query = _process_snaps_filters(query, filters) if query is None: return [] result = query.first() return result[0] or 0, result[1] or 0 #################### def _snapshot_metadata_get_query(context, snapshot_id): return model_query( context, models.SnapshotMetadata, read_deleted="no" ).filter_by(snapshot_id=snapshot_id) @require_context def _snapshot_metadata_get(context, snapshot_id): rows = _snapshot_metadata_get_query(context, snapshot_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_snapshot_exists @main_context_manager.reader def snapshot_metadata_get(context, snapshot_id): return _snapshot_metadata_get(context, snapshot_id) @require_context @require_snapshot_exists @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def snapshot_metadata_delete(context, snapshot_id, key): query = _snapshot_metadata_get_query(context, snapshot_id).filter_by( key=key ) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } ) @require_context def _snapshot_metadata_get_item(context, snapshot_id, key): result = ( _snapshot_metadata_get_query(context, snapshot_id) .filter_by(key=key) .first() ) if not result: raise exception.SnapshotMetadataNotFound( metadata_key=key, snapshot_id=snapshot_id ) return result @require_context @require_snapshot_exists @handle_db_data_error @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def snapshot_metadata_update(context, snapshot_id, metadata, delete): # Set existing metadata to deleted if delete argument is True if delete: original_metadata = _snapshot_metadata_get(context, snapshot_id) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _snapshot_metadata_get_item( context, snapshot_id, meta_key, ) meta_ref.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), } ) meta_ref.save(context.session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _snapshot_metadata_get_item( context, snapshot_id, meta_key ) except exception.SnapshotMetadataNotFound: meta_ref = models.SnapshotMetadata() item.update({"key": meta_key, "snapshot_id": snapshot_id}) meta_ref.update(item) meta_ref.save(context.session) return _snapshot_metadata_get(context, snapshot_id) ################### @handle_db_data_error @require_admin_context @main_context_manager.writer def volume_type_create(context, values, projects=None): """Create a new volume type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ if not values.get('id'): values['id'] = str(uuid.uuid4()) projects = projects or [] orm_projects = [] try: _volume_type_get_by_name(context, values['name']) raise exception.VolumeTypeExists(id=values['name']) except exception.VolumeTypeNotFoundByName: pass try: _volume_type_get(context, values['id']) raise exception.VolumeTypeExists(id=values['id']) except exception.VolumeTypeNotFound: pass try: values['extra_specs'] = _metadata_refs( values.get('extra_specs'), models.VolumeTypeExtraSpecs, ) volume_type_ref = models.VolumeType() volume_type_ref.update(values) context.session.add(volume_type_ref) except Exception as e: raise db_exc.DBError(e) for project in set(projects): access_ref = models.VolumeTypeProjects() access_ref.update( { "volume_type_id": volume_type_ref.id, "project_id": project, } ) access_ref.save(context.session) orm_projects.append(access_ref) volume_type_ref.projects = orm_projects return volume_type_ref @handle_db_data_error @require_admin_context @main_context_manager.writer def group_type_create(context, values, projects=None): """Create a new group type. In order to pass in group specs, the values dict should contain a 'group_specs' key/value pair: {'group_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ if not values.get('id'): values['id'] = str(uuid.uuid4()) projects = projects or [] try: _group_type_get_by_name(context, values['name']) raise exception.GroupTypeExists(id=values['name']) except exception.GroupTypeNotFoundByName: pass try: _group_type_get(context, values['id']) raise exception.GroupTypeExists(id=values['id']) except exception.GroupTypeNotFound: pass try: values['group_specs'] = _metadata_refs( values.get('group_specs'), models.GroupTypeSpecs ) group_type_ref = models.GroupType() group_type_ref.update(values) context.session.add(group_type_ref) except Exception as e: raise db_exc.DBError(e) for project in set(projects): access_ref = models.GroupTypeProjects() access_ref.update( { "group_type_id": group_type_ref.id, "project_id": project, } ) access_ref.save(context.session) return group_type_ref def _volume_type_get_query(context, read_deleted='no', expected_fields=None): expected_fields = expected_fields or [] query = model_query( context, models.VolumeType, read_deleted=read_deleted ).options(joinedload(models.VolumeType.extra_specs)) for expected in expected_fields: query = query.options(joinedload(getattr(models.VolumeType, expected))) if not context.is_admin: the_filter = [models.VolumeType.is_public == true()] projects_attr = getattr(models.VolumeType, 'projects') the_filter.extend([projects_attr.any(project_id=context.project_id)]) query = query.filter(or_(*the_filter)) return query def _group_type_get_query(context, read_deleted='no', expected_fields=None): expected_fields = expected_fields or [] query = model_query( context, models.GroupType, read_deleted=read_deleted ).options(joinedload(models.GroupType.group_specs)) if 'projects' in expected_fields: query = query.options(joinedload(models.GroupType.projects)) if not context.is_admin: the_filter = [models.GroupType.is_public == true()] projects_attr = models.GroupType.projects the_filter.extend([projects_attr.any(project_id=context.project_id)]) query = query.filter(or_(*the_filter)) return query def _process_volume_types_filters(query, filters): context = filters.pop('context', None) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.VolumeType.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: projects_attr = getattr(models.VolumeType, 'projects') the_filter.extend( [projects_attr.any(project_id=context.project_id, deleted=0)] ) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) if 'is_public' in filters: del filters['is_public'] if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.VolumeType, filters): return if filters.get('extra_specs') is not None: the_filter = [] searchdict = filters.pop('extra_specs') extra_specs = getattr(models.VolumeType, 'extra_specs') for k, v in searchdict.items(): # NOTE(tommylikehu): We will use 'LIKE' operator for # 'availability_zones' extra spec as it always store the # AZ list info within the format: "az1, az2,...." if k == 'RESKEY:availability_zones': the_filter.extend( [ extra_specs.any( models.VolumeTypeExtraSpecs.value.like( u'%%%s%%' % v ), key=k, deleted=False, ) ] ) else: the_filter.extend( [extra_specs.any(key=k, value=v, deleted=False)] ) if len(the_filter) > 1: query = query.filter(and_(*the_filter)) else: query = query.filter(the_filter[0]) query = query.filter_by(**filters) return query def _process_group_types_filters(query, filters): context = filters.pop('context', None) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.GroupType.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: projects_attr = getattr(models.GroupType, 'projects') the_filter.extend( [ projects_attr.any( project_id=context.project_id, deleted=False ) ] ) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) if 'is_public' in filters: del filters['is_public'] if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.GroupType, filters): return if filters.get('group_specs') is not None: the_filter = [] searchdict = filters.pop('group_specs') group_specs = getattr(models.GroupType, 'group_specs') for k, v in searchdict.items(): the_filter.extend( [group_specs.any(key=k, value=v, deleted=False)] ) if len(the_filter) > 1: query = query.filter(and_(*the_filter)) else: query = query.filter(the_filter[0]) query = query.filter_by(**filters) return query @handle_db_data_error @require_admin_context def _type_update(context, type_id, values, is_group): if is_group: model = models.GroupType exists_exc = exception.GroupTypeExists else: model = models.VolumeType exists_exc = exception.VolumeTypeExists # No description change if values['description'] is None: del values['description'] # No is_public change if values['is_public'] is None: del values['is_public'] # No name change if values['name'] is None: del values['name'] else: # Group type name is unique. If change to a name that belongs to # a different group_type, it should be prevented. conditions = and_( model.name == values['name'], model.id != type_id, ~model.deleted ) query = context.session.query(sql.exists().where(conditions)) if query.scalar(): raise exists_exc(id=values['name']) query = model_query(context, model, project_only=True) result = query.filter_by(id=type_id).update(values) if not result: if is_group: raise exception.GroupTypeNotFound(group_type_id=type_id) else: raise exception.VolumeTypeNotFound(volume_type_id=type_id) @main_context_manager.writer def volume_type_update(context, volume_type_id, values): _type_update(context, volume_type_id, values, is_group=False) @main_context_manager.writer def group_type_update(context, group_type_id, values): _type_update(context, group_type_id, values, is_group=True) @require_context @main_context_manager.reader def volume_type_get_all( context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False, ): """Returns a dict describing all volume_types with name as key. If no sort parameters are specified then the returned volume types are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_type_filters function for more information :param list_result: For compatibility, if list_result = True, return a list instead of dict. :returns: list/dict of matching volume types """ # Add context for _process_volume_types_filters filters = filters or {} filters['context'] = context # Generate the query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.VolumeType, ) # No volume types would match, return empty dict or list if query is None: if list_result: return [] return {} rows = query.all() if list_result: result = [ _dict_with_extra_specs_if_authorized(context, row) for row in rows ] return result result = { row['name']: _dict_with_extra_specs_if_authorized(context, row) for row in rows } return result @require_context @main_context_manager.reader def group_type_get_all( context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False, ): """Returns a dict describing all group_types with name as key. If no sort parameters are specified then the returned group types are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_type_filters function for more information :param list_result: For compatibility, if list_result = True, return a list instead of dict. :returns: list/dict of matching group types """ # Add context for _process_group_types_filters filters = filters or {} filters['context'] = context # Generate the query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.GroupType, ) # No group types would match, return empty dict or list if query is None: if list_result: return [] return {} rows = query.all() if list_result: result = [ _dict_with_group_specs_if_authorized(context, row) for row in rows ] return result result = { row['name']: _dict_with_group_specs_if_authorized(context, row) for row in rows } return result def _volume_type_get_id_from_volume_type_query(context, id): return model_query( context, models.VolumeType.id, read_deleted="no", base_model=models.VolumeType, ).filter_by(id=id) def _group_type_get_id_from_group_type_query(context, id): return model_query( context, models.GroupType.id, read_deleted="no", base_model=models.GroupType, ).filter_by(id=id) def _volume_type_get_id_from_volume_type(context, id): result = _volume_type_get_id_from_volume_type_query(context, id).first() if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) return result[0] def _group_type_get_id_from_group_type(context, id): result = _group_type_get_id_from_group_type_query(context, id).first() if not result: raise exception.GroupTypeNotFound(group_type_id=id) return result[0] def _volume_type_get_db_object( context, id, inactive=False, expected_fields=None ): read_deleted = "yes" if inactive else "no" result = ( _volume_type_get_query(context, read_deleted, expected_fields) .filter_by(id=id) .first() ) return result def _group_type_get_db_object( context, id, inactive=False, expected_fields=None, ): read_deleted = "yes" if inactive else "no" result = ( _group_type_get_query(context, read_deleted, expected_fields) .filter_by(id=id) .first() ) return result @require_context def _volume_type_get(context, id, inactive=False, expected_fields=None): expected_fields = expected_fields or [] result = _volume_type_get_db_object( context, id, inactive, expected_fields, ) if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) vtype = _dict_with_extra_specs_if_authorized(context, result) if 'projects' in expected_fields: vtype['projects'] = [p['project_id'] for p in result['projects']] if 'qos_specs' in expected_fields: vtype['qos_specs'] = result.qos_specs return vtype @require_context def _group_type_get(context, id, inactive=False, expected_fields=None): expected_fields = expected_fields or [] result = _group_type_get_db_object(context, id, inactive, expected_fields) if not result: raise exception.GroupTypeNotFound(group_type_id=id) gtype = _dict_with_group_specs_if_authorized(context, result) if 'projects' in expected_fields: gtype['projects'] = [p['project_id'] for p in result['projects']] return gtype @require_context @main_context_manager.reader def volume_type_get(context, id, inactive=False, expected_fields=None): """Get volume type by id. :param context: context to query under :param id: Volume type id to get. :param inactive: Consider inactive volume types when searching :param expected_fields: Return those additional fields. Supported fields are: projects. :returns: volume type """ return _volume_type_get( context, id, inactive=inactive, expected_fields=expected_fields ) @require_context @main_context_manager.reader def group_type_get(context, id, inactive=False, expected_fields=None): """Return a dict describing specific group_type.""" return _group_type_get( context, id, inactive=inactive, expected_fields=expected_fields ) def _volume_type_get_full(context, id): """Return dict for a specific volume_type with extra_specs and projects.""" return _volume_type_get( context, id, inactive=False, expected_fields=('extra_specs', 'projects'), ) def _group_type_get_full(context, id): """Return dict for a specific group_type with group_specs and projects.""" return _group_type_get( context, id, inactive=False, expected_fields=('group_specs', 'projects'), ) @require_context def _volume_type_ref_get(context, id, inactive=False): read_deleted = "yes" if inactive else "no" result = ( model_query(context, models.VolumeType, read_deleted=read_deleted) .options(joinedload(models.VolumeType.extra_specs)) .filter_by(id=id) .first() ) if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) return result @require_context def _group_type_ref_get(context, id, inactive=False): read_deleted = "yes" if inactive else "no" result = ( model_query(context, models.GroupType, read_deleted=read_deleted) .options(joinedload(models.GroupType.group_specs)) .filter_by(id=id) .first() ) if not result: raise exception.GroupTypeNotFound(group_type_id=id) return result @require_context def _volume_type_get_by_name(context, name): result = ( model_query(context, models.VolumeType) .options(joinedload(models.VolumeType.extra_specs)) .filter_by(name=name) .first() ) if not result: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) return _dict_with_extra_specs_if_authorized(context, result) @require_context def _group_type_get_by_name(context, name): result = ( model_query(context, models.GroupType) .options(joinedload(models.GroupType.group_specs)) .filter_by(name=name) .first() ) if not result: raise exception.GroupTypeNotFoundByName(group_type_name=name) return _dict_with_group_specs_if_authorized(context, result) @require_context @main_context_manager.reader def volume_type_get_by_name(context, name): """Return a dict describing specific volume_type.""" return _volume_type_get_by_name(context, name) @require_context @main_context_manager.reader def group_type_get_by_name(context, name): """Return a dict describing specific group_type.""" return _group_type_get_by_name(context, name) @require_context @main_context_manager.reader def volume_types_get_by_name_or_id(context, volume_type_list): """Return a dict describing specific volume_type.""" req_volume_types = [] for vol_t in volume_type_list: if not uuidutils.is_uuid_like(vol_t): vol_type = _volume_type_get_by_name(context, vol_t) else: try: vol_type = _volume_type_get(context, vol_t) except exception.VolumeTypeNotFound: # check again if we get this volume type by uuid-like name try: vol_type = _volume_type_get_by_name(context, vol_t) except exception.VolumeTypeNotFoundByName: raise exception.VolumeTypeNotFound(volume_type_id=vol_t) req_volume_types.append(vol_type) return req_volume_types @require_context @main_context_manager.reader def group_types_get_by_name_or_id(context, group_type_list): """Return a dict describing specific group_type.""" req_group_types = [] for grp_t in group_type_list: if not uuidutils.is_uuid_like(grp_t): grp_type = _group_type_get_by_name(context, grp_t) else: grp_type = _group_type_get(context, grp_t) req_group_types.append(grp_type) return req_group_types @require_admin_context @require_qos_specs_exists @main_context_manager.reader def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): read_deleted = "yes" if inactive else "no" vts = ( model_query(context, models.VolumeType, read_deleted=read_deleted) .options(joinedload(models.VolumeType.extra_specs)) .options(joinedload(models.VolumeType.projects)) .filter_by(qos_specs_id=qos_specs_id) .all() ) return vts @require_admin_context @main_context_manager.writer def volume_type_qos_associate(context, type_id, qos_specs_id): _volume_type_get(context, type_id) context.session.query(models.VolumeType).filter_by(id=type_id).update( {'qos_specs_id': qos_specs_id, 'updated_at': timeutils.utcnow()} ) @require_admin_context @main_context_manager.writer def volume_type_qos_disassociate(context, qos_specs_id, type_id): """Disassociate volume type from qos specs.""" _volume_type_get(context, type_id) context.session.query(models.VolumeType).filter_by(id=type_id).filter_by( qos_specs_id=qos_specs_id ).update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()}) @require_admin_context @main_context_manager.writer def volume_type_qos_disassociate_all(context, qos_specs_id): """Disassociate all volume types associated with specified qos specs.""" context.session.query(models.VolumeType).filter_by( qos_specs_id=qos_specs_id ).update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()}) @require_admin_context @main_context_manager.reader def volume_type_qos_specs_get(context, type_id): """Return all qos specs for given volume type. result looks like: { 'qos_specs': { 'id': 'qos-specs-id', 'name': 'qos_specs_name', 'consumer': 'Consumer', 'specs': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3' } } } """ _volume_type_get(context, type_id, context.session) row = ( context.session.query(models.VolumeType) .options(joinedload(models.VolumeType.qos_specs)) .filter_by(id=type_id) .first() ) # row.qos_specs is a list of QualityOfServiceSpecs ref specs = _dict_with_qos_specs(row.qos_specs) if not specs: # turn empty list to None specs = None else: specs = specs[0] return {'qos_specs': specs} @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def volume_type_destroy(context, type_id): utcnow = timeutils.utcnow() vol_types = volume_type_get_all(context) if len(vol_types) <= 1: raise exception.VolumeTypeDeletionError(volume_type_id=type_id) _volume_type_get(context, type_id) results = ( model_query(context, models.Volume) .filter_by(volume_type_id=type_id) .all() ) group_count = ( model_query( context, models.GroupVolumeTypeMapping, read_deleted="no", ) .filter_by(volume_type_id=type_id) .count() ) cg_count = ( model_query( context, models.ConsistencyGroup, ) .filter(models.ConsistencyGroup.volume_type_id.contains(type_id)) .count() ) if results or group_count or cg_count: LOG.error('VolumeType %s deletion failed, VolumeType in use.', type_id) raise exception.VolumeTypeInUse(volume_type_id=type_id) query = model_query(context, models.VolumeType).filter_by(id=type_id) entity = query.column_descriptions[0]['entity'] updated_values = { 'deleted': True, 'deleted_at': utcnow, 'updated_at': entity.updated_at, } query.update(updated_values) query = model_query( context, models.VolumeTypeExtraSpecs, ).filter_by(volume_type_id=type_id) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': utcnow, 'updated_at': entity.updated_at, } ) query = model_query(context, models.Encryption).filter_by( volume_type_id=type_id ) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': utcnow, 'updated_at': entity.updated_at, } ) model_query( context, models.VolumeTypeProjects, read_deleted="int_no" ).filter_by(volume_type_id=type_id).soft_delete(synchronize_session=False) del updated_values['updated_at'] return updated_values @require_admin_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def group_type_destroy(context, type_id): _group_type_get(context, type_id) results = ( model_query(context, models.Group) .filter_by(group_type_id=type_id) .all() ) if results: LOG.error('GroupType %s deletion failed, GroupType in use.', type_id) raise exception.GroupTypeInUse(group_type_id=type_id) query = model_query(context, models.GroupType).filter_by(id=type_id) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } ) query = model_query(context, models.GroupTypeSpecs).filter_by( group_type_id=type_id ) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } ) @require_context @main_context_manager.reader def volume_get_all_active_by_window(context, begin, end=None, project_id=None): """Return volumes that were active during window.""" query = model_query(context, models.Volume, read_deleted="yes") query = query.filter( or_( models.Volume.deleted_at == None, # noqa models.Volume.deleted_at > begin, ) ) if end: query = query.filter(models.Volume.created_at < end) if project_id: query = query.filter_by(project_id=project_id) query = ( query.options(joinedload(models.Volume.volume_metadata)) .options(joinedload(models.Volume.volume_type)) .options(joinedload(models.Volume.volume_attachment)) .options(joinedload(models.Volume.consistencygroup)) .options(joinedload(models.Volume.group)) ) if is_admin_context(context): query = query.options(joinedload(models.Volume.volume_admin_metadata)) return query.all() def _volume_type_access_query(context): return model_query( context, models.VolumeTypeProjects, read_deleted="int_no" ) def _group_type_access_query(context): return model_query(context, models.GroupTypeProjects, read_deleted="no") @require_admin_context @main_context_manager.reader def volume_type_access_get_all(context, type_id): volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) return ( _volume_type_access_query(context) .filter_by(volume_type_id=volume_type_id) .all() ) @require_admin_context @main_context_manager.reader def group_type_access_get_all(context, type_id): group_type_id = _group_type_get_id_from_group_type(context, type_id) return ( _group_type_access_query(context) .filter_by(group_type_id=group_type_id) .all() ) def _group_volume_type_mapping_query(context): return model_query( context, models.GroupVolumeTypeMapping, read_deleted="no" ) @require_admin_context @main_context_manager.reader def volume_type_get_all_by_group(context, group_id): # Generic volume group mappings = ( _group_volume_type_mapping_query(context) .filter_by(group_id=group_id) .all() ) volume_type_ids = [mapping.volume_type_id for mapping in mappings] query = ( model_query(context, models.VolumeType, read_deleted='no') .filter(models.VolumeType.id.in_(volume_type_ids)) .options(joinedload(models.VolumeType.extra_specs)) .options(joinedload(models.VolumeType.projects)) .all() ) return query def _group_volume_type_mapping_get_all_by_group_volume_type( context, group_id, volume_type_id ): mappings = ( _group_volume_type_mapping_query(context) .filter_by(group_id=group_id) .filter_by(volume_type_id=volume_type_id) .all() ) return mappings @require_admin_context @main_context_manager.writer def volume_type_access_add(context, type_id, project_id): """Add given tenant to the volume type access list.""" volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) access_ref = models.VolumeTypeProjects() access_ref.update( {"volume_type_id": volume_type_id, "project_id": project_id} ) try: access_ref.save(context.session) except db_exc.DBDuplicateEntry: raise exception.VolumeTypeAccessExists( volume_type_id=type_id, project_id=project_id ) return access_ref @require_admin_context @main_context_manager.writer def group_type_access_add(context, type_id, project_id): """Add given tenant to the group type access list.""" group_type_id = _group_type_get_id_from_group_type(context, type_id) access_ref = models.GroupTypeProjects() access_ref.update( {"group_type_id": group_type_id, "project_id": project_id} ) try: access_ref.save(context.session) except db_exc.DBDuplicateEntry: raise exception.GroupTypeAccessExists( group_type_id=type_id, project_id=project_id ) return access_ref @require_admin_context @main_context_manager.writer def volume_type_access_remove(context, type_id, project_id): """Remove given tenant from the volume type access list.""" volume_type_id = _volume_type_get_id_from_volume_type(context, type_id) count = ( _volume_type_access_query(context) .filter_by(volume_type_id=volume_type_id) .filter_by(project_id=project_id) .soft_delete(synchronize_session=False) ) if count == 0: raise exception.VolumeTypeAccessNotFound( volume_type_id=type_id, project_id=project_id ) def _project_default_volume_type_get(context, project_id=None): """Get default volume type(s) for a project(s) If a project id is passed, it returns default type for that particular project else returns default volume types for all projects """ if project_id: return ( model_query(context, models.DefaultVolumeTypes) .filter_by(project_id=project_id) .first() ) return model_query(context, models.DefaultVolumeTypes).all() @main_context_manager.reader def project_default_volume_type_get(context, project_id=None): """Get default volume type(s) for a project(s) If a project id is passed, it returns default type for that particular project else returns default volume types for all projects """ return _project_default_volume_type_get(context, project_id) @main_context_manager.writer def project_default_volume_type_set(context, volume_type_id, project_id): """Set default volume type for a project""" update_default = _project_default_volume_type_get(context, project_id) if update_default: LOG.info("Updating default type for project %s", project_id) update_default.volume_type_id = volume_type_id return update_default access_ref = models.DefaultVolumeTypes( volume_type_id=volume_type_id, project_id=project_id ) access_ref.save(context.session) return access_ref @main_context_manager.reader def get_all_projects_with_default_type(context, volume_type_id): """Get all projects with volume_type_id as their default type""" return ( model_query(context, models.DefaultVolumeTypes) .filter_by(volume_type_id=volume_type_id) .all() ) @main_context_manager.writer def project_default_volume_type_unset(context, project_id): """Unset default volume type for a project (hard delete)""" model_query(context, models.DefaultVolumeTypes).filter_by( project_id=project_id ).delete() @require_admin_context @main_context_manager.writer def group_type_access_remove(context, type_id, project_id): """Remove given tenant from the group type access list.""" group_type_id = _group_type_get_id_from_group_type(context, type_id) count = ( _group_type_access_query(context) .filter_by(group_type_id=group_type_id) .filter_by(project_id=project_id) .soft_delete(synchronize_session=False) ) if count == 0: raise exception.GroupTypeAccessNotFound( group_type_id=type_id, project_id=project_id ) #################### def _volume_type_extra_specs_query(context, volume_type_id): return model_query( context, models.VolumeTypeExtraSpecs, read_deleted="no", ).filter_by(volume_type_id=volume_type_id) @require_context @main_context_manager.reader def volume_type_extra_specs_get(context, volume_type_id): rows = _volume_type_extra_specs_query(context, volume_type_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @main_context_manager.writer def volume_type_extra_specs_delete(context, volume_type_id, key): _volume_type_extra_specs_get_item(context, volume_type_id, key) query = _volume_type_extra_specs_query( context, volume_type_id, ).filter_by(key=key) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, }, ) @require_context def _volume_type_extra_specs_get_item(context, volume_type_id, key): result = ( _volume_type_extra_specs_query(context, volume_type_id) .filter_by(key=key) .first() ) if not result: raise exception.VolumeTypeExtraSpecsNotFound( extra_specs_key=key, volume_type_id=volume_type_id, ) return result @handle_db_data_error @require_context @main_context_manager.writer def volume_type_extra_specs_update_or_create( context, volume_type_id, extra_specs, ): spec_ref = None for key, value in extra_specs.items(): try: spec_ref = _volume_type_extra_specs_get_item( context, volume_type_id, key, ) except exception.VolumeTypeExtraSpecsNotFound: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update( { "key": key, "value": value, "volume_type_id": volume_type_id, "deleted": False, }, ) spec_ref.save(context.session) return extra_specs #################### def _group_type_specs_query(context, group_type_id): return model_query( context, models.GroupTypeSpecs, read_deleted="no", ).filter_by(group_type_id=group_type_id) @require_context @main_context_manager.reader def group_type_specs_get(context, group_type_id): rows = _group_type_specs_query(context, group_type_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @main_context_manager.writer def group_type_specs_delete(context, group_type_id, key): _group_type_specs_get_item(context, group_type_id, key) query = _group_type_specs_query(context, group_type_id).filter_by(key=key) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, }, ) @require_context def _group_type_specs_get_item(context, group_type_id, key): result = ( _group_type_specs_query(context, group_type_id) .filter_by(key=key) .first() ) if not result: raise exception.GroupTypeSpecsNotFound( group_specs_key=key, group_type_id=group_type_id, ) return result @handle_db_data_error @require_context @main_context_manager.writer def group_type_specs_update_or_create(context, group_type_id, group_specs): spec_ref = None for key, value in group_specs.items(): try: spec_ref = _group_type_specs_get_item(context, group_type_id, key) except exception.GroupTypeSpecsNotFound: spec_ref = models.GroupTypeSpecs() spec_ref.update( { "key": key, "value": value, "group_type_id": group_type_id, "deleted": False, }, ) spec_ref.save(context.session) return group_specs #################### @require_admin_context @main_context_manager.writer def qos_specs_create(context, values): """Create a new QoS specs. :param values dictionary that contains specifications for QoS Expected format of the input parameter: .. code-block:: json { 'name': 'Name', 'consumer': 'front-end', 'specs': { 'total_iops_sec': 1000, 'total_bytes_sec': 1024000 } } """ specs_id = str(uuid.uuid4()) try: _qos_specs_get_all_by_name(context, values['name']) raise exception.QoSSpecsExists(specs_id=values['name']) except exception.QoSSpecsNotFound: pass try: # Insert a root entry for QoS specs specs_root = models.QualityOfServiceSpecs() root = {'id': specs_id} # 'QoS_Specs_Name' is an internal reserved key to store # the name of QoS specs root['key'] = 'QoS_Specs_Name' root['value'] = values['name'] LOG.debug("DB qos_specs_create(): root %s", root) specs_root.update(root) specs_root.save(context.session) # Save 'consumer' value directly as it will not be in # values['specs'] and so we avoid modifying/copying passed in dict consumer = { 'key': 'consumer', 'value': values['consumer'], 'specs_id': specs_id, 'id': str(uuid.uuid4()), } cons_entry = models.QualityOfServiceSpecs() cons_entry.update(consumer) cons_entry.save(context.session) # Insert all specification entries for QoS specs for k, v in values.get('specs', {}).items(): item = {'key': k, 'value': v, 'specs_id': specs_id} item['id'] = str(uuid.uuid4()) spec_entry = models.QualityOfServiceSpecs() spec_entry.update(item) spec_entry.save(context.session) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) except Exception as e: raise db_exc.DBError(e) return {'id': specs_root.id, 'name': specs_root.value} @require_admin_context def _qos_specs_get_all_by_name(context, name, inactive=False): read_deleted = 'yes' if inactive else 'no' results = ( model_query( context, models.QualityOfServiceSpecs, read_deleted=read_deleted, ) .filter_by(key='QoS_Specs_Name') .filter_by(value=name) .options(joinedload(models.QualityOfServiceSpecs.specs)) .all() ) if not results: raise exception.QoSSpecsNotFound(specs_id=name) return results @require_admin_context def _qos_specs_get_all_ref(context, qos_specs_id, inactive=False): read_deleted = 'yes' if inactive else 'no' result = ( model_query( context, models.QualityOfServiceSpecs, read_deleted=read_deleted, ) .filter_by(id=qos_specs_id) .options(joinedload(models.QualityOfServiceSpecs.specs)) .all() ) if not result: raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) return result def _dict_with_children_specs(specs): """Convert specs list to a dict.""" result = {} update_time = None for spec in specs: # Skip deleted keys if not spec['deleted']: # Add update time to specs list, in order to get the keyword # 'updated_at' in specs info when printing logs. if not update_time and spec['updated_at']: update_time = spec['updated_at'] elif update_time and spec['updated_at']: if (update_time - spec['updated_at']).total_seconds() < 0: update_time = spec['updated_at'] result.update({spec['key']: spec['value']}) if update_time: result.update({'updated_at': update_time}) return result def _dict_with_qos_specs(rows): """Convert qos specs query results to list. Qos specs query results are a list of quality_of_service_specs refs, some are root entry of a qos specs (key == 'QoS_Specs_Name') and the rest are children entry, a.k.a detailed specs for a qos specs. This function converts query results to a dict using spec name as key. """ result = [] for row in rows: if row['key'] == 'QoS_Specs_Name': # Add create time for member, in order to get the keyword # 'created_at' in the specs info when printing logs. member = { 'name': row['value'], 'id': row['id'], 'created_at': row['created_at'], } if row.specs: spec_dict = _dict_with_children_specs(row.specs) member['consumer'] = spec_dict.pop('consumer') if spec_dict.get('updated_at'): member['updated_at'] = spec_dict.pop('updated_at') member.update({'specs': spec_dict}) result.append(member) return result @require_admin_context @main_context_manager.reader def qos_specs_get(context, qos_specs_id, inactive=False): rows = _qos_specs_get_all_ref(context, qos_specs_id, inactive) return _dict_with_qos_specs(rows)[0] @require_admin_context @main_context_manager.reader def qos_specs_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): """Returns a list of all qos_specs. Results is like: [{ 'id': SPECS-UUID, 'name': 'qos_spec-1', 'consumer': 'back-end', 'specs': { 'key1': 'value1', 'key2': 'value2', ... } }, { 'id': SPECS-UUID, 'name': 'qos_spec-2', 'consumer': 'front-end', 'specs': { 'key1': 'value1', 'key2': 'value2', ... } }, ] """ # Generate the query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.QualityOfServiceSpecs, ) # No Qos specs would match, return empty list if query is None: return [] rows = query.all() return _dict_with_qos_specs(rows) @require_admin_context def _qos_specs_get_query(context): rows = ( model_query( context, models.QualityOfServiceSpecs, read_deleted='no', ) .options(joinedload(models.QualityOfServiceSpecs.specs)) .filter_by(key='QoS_Specs_Name') ) return rows def _process_qos_specs_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.QualityOfServiceSpecs, filters): return query = query.filter_by(**filters) return query @require_admin_context def _qos_specs_get(context, qos_spec_id): result = ( model_query(context, models.QualityOfServiceSpecs, read_deleted='no') .filter_by(id=qos_spec_id) .filter_by(key='QoS_Specs_Name') .first() ) if not result: raise exception.QoSSpecsNotFound(specs_id=qos_spec_id) return result @require_admin_context @main_context_manager.reader def qos_specs_get_by_name(context, name, inactive=False): rows = _qos_specs_get_all_by_name(context, name, inactive) return _dict_with_qos_specs(rows)[0] @require_admin_context @main_context_manager.reader def qos_specs_associations_get(context, qos_specs_id): """Return all entities associated with specified qos specs. For now, the only entity that is possible to associate with a qos specs is volume type, so this is just a wrapper of volume_type_qos_associations_get(). But it's possible to extend qos specs association to other entities, such as volumes, sometime in future. """ return volume_type_qos_associations_get(context, qos_specs_id) @require_admin_context @main_context_manager.writer def qos_specs_associate(context, qos_specs_id, type_id): """Associate volume type from specified qos specs.""" return volume_type_qos_associate(context, type_id, qos_specs_id) @require_admin_context @main_context_manager.writer def qos_specs_disassociate(context, qos_specs_id, type_id): """Disassociate volume type from specified qos specs.""" return volume_type_qos_disassociate(context, qos_specs_id, type_id) @require_admin_context @main_context_manager.writer def qos_specs_disassociate_all(context, qos_specs_id): """Disassociate all entities associated with specified qos specs. For now, the only entity that is possible to associate with a qos specs is volume type, so this is just a wrapper of volume_type_qos_disassociate_all(). But it's possible to extend qos specs association to other entities, such as volumes, sometime in future. """ return volume_type_qos_disassociate_all(context, qos_specs_id) @require_admin_context @main_context_manager.writer def qos_specs_item_delete(context, qos_specs_id, key): query = ( context.session.query(models.QualityOfServiceSpecs) .filter(models.QualityOfServiceSpecs.key == key) .filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id) ) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } ) @require_admin_context @main_context_manager.writer def qos_specs_delete(context, qos_specs_id): _qos_specs_get_all_ref(context, qos_specs_id) query = context.session.query(models.QualityOfServiceSpecs).filter( or_( models.QualityOfServiceSpecs.id == qos_specs_id, models.QualityOfServiceSpecs.specs_id == qos_specs_id, ) ) entity = query.column_descriptions[0]['entity'] updated_values = { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } query.update(updated_values) del updated_values['updated_at'] return updated_values @require_admin_context def _qos_specs_get_item(context, qos_specs_id, key): result = ( model_query(context, models.QualityOfServiceSpecs) .filter(models.QualityOfServiceSpecs.key == key) .filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id) .first() ) if not result: raise exception.QoSSpecsKeyNotFound( specs_key=key, specs_id=qos_specs_id ) return result @handle_db_data_error @require_admin_context @require_qos_specs_exists @main_context_manager.writer def qos_specs_update(context, qos_specs_id, values): """Make updates to an existing qos specs. Perform add, update or delete key/values to a qos specs. """ specs = values.get('specs', {}) if 'consumer' in values: # Massage consumer to the right place for DB and copy specs # before updating so we don't modify dict for caller specs = specs.copy() specs['consumer'] = values['consumer'] spec_ref = None for key in specs.keys(): try: spec_ref = _qos_specs_get_item(context, qos_specs_id, key) except exception.QoSSpecsKeyNotFound: spec_ref = models.QualityOfServiceSpecs() id = None if spec_ref.get('id', None): id = spec_ref['id'] else: id = str(uuid.uuid4()) value = { 'id': id, 'key': key, 'value': specs[key], 'specs_id': qos_specs_id, 'deleted': False, } LOG.debug('qos_specs_update() value: %s', value) spec_ref.update(value) spec_ref.save(context.session) return specs #################### @require_context def _volume_type_encryption_get(context, volume_type_id): return ( model_query( context, models.Encryption, read_deleted="no", ) .filter_by(volume_type_id=volume_type_id) .first() ) @require_context @main_context_manager.reader def volume_type_encryption_get(context, volume_type_id): return _volume_type_encryption_get(context, volume_type_id) @require_admin_context @main_context_manager.writer def volume_type_encryption_delete(context, volume_type_id): encryption = _volume_type_encryption_get(context, volume_type_id) if not encryption: raise exception.VolumeTypeEncryptionNotFound(type_id=volume_type_id) encryption.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': encryption.updated_at, }, ) @handle_db_data_error @require_admin_context @main_context_manager.writer def volume_type_encryption_create(context, volume_type_id, values): encryption = models.Encryption() if 'volume_type_id' not in values: values['volume_type_id'] = volume_type_id if 'encryption_id' not in values: values['encryption_id'] = str(uuid.uuid4()) encryption.update(values) context.session.add(encryption) return encryption @handle_db_data_error @require_admin_context @main_context_manager.writer def volume_type_encryption_update(context, volume_type_id, values): query = model_query(context, models.Encryption) result = query.filter_by(volume_type_id=volume_type_id).update(values) if not result: raise exception.VolumeTypeEncryptionNotFound(type_id=volume_type_id) @main_context_manager.reader def volume_type_encryption_volume_get(context, volume_type_id): volume_list = ( _volume_get_query(context, project_only=False) .filter_by(volume_type_id=volume_type_id) .all() ) return volume_list @require_context @main_context_manager.reader def volume_encryption_metadata_get(context, volume_id): """Return the encryption metadata for a given volume.""" volume_ref = _volume_get(context, volume_id) encryption_ref = _volume_type_encryption_get( context, volume_ref['volume_type_id'], ) values = { 'encryption_key_id': volume_ref['encryption_key_id'], } if encryption_ref: for key in ['control_location', 'cipher', 'key_size', 'provider']: values[key] = encryption_ref[key] return values #################### @require_context def _volume_glance_metadata_get_all(context): query = model_query(context, models.VolumeGlanceMetadata) if is_user_context(context): query = query.filter( models.Volume.id == models.VolumeGlanceMetadata.volume_id, models.Volume.project_id == context.project_id, ) return query.all() @require_context @main_context_manager.reader def volume_glance_metadata_get_all(context): """Return the Glance metadata for all volumes.""" return _volume_glance_metadata_get_all(context) @require_context @main_context_manager.reader def volume_glance_metadata_list_get(context, volume_id_list): """Return the glance metadata for a volume list.""" query = model_query(context, models.VolumeGlanceMetadata) query = query.filter( models.VolumeGlanceMetadata.volume_id.in_(volume_id_list) ) return query.all() @require_context @require_volume_exists def _volume_glance_metadata_get(context, volume_id): rows = ( model_query(context, models.VolumeGlanceMetadata) .filter_by(volume_id=volume_id) .filter_by(deleted=False) .all() ) if not rows: raise exception.GlanceMetadataNotFound(id=volume_id) return rows @require_context @main_context_manager.reader def volume_glance_metadata_get(context, volume_id): """Return the Glance metadata for the specified volume.""" return _volume_glance_metadata_get(context, volume_id) @require_context @require_snapshot_exists def _volume_snapshot_glance_metadata_get(context, snapshot_id): rows = ( model_query(context, models.VolumeGlanceMetadata) .filter_by(snapshot_id=snapshot_id) .filter_by(deleted=False) .all() ) if not rows: raise exception.GlanceMetadataNotFound(id=snapshot_id) return rows @require_context @main_context_manager.reader def volume_snapshot_glance_metadata_get(context, snapshot_id): """Return the Glance metadata for the specified snapshot.""" return _volume_snapshot_glance_metadata_get(context, snapshot_id) @require_context @require_volume_exists @main_context_manager.writer def volume_glance_metadata_create(context, volume_id, key, value): """Update the Glance metadata for a volume by adding a new key:value pair. This API does not support changing the value of a key once it has been created. """ rows = ( context.session.query(models.VolumeGlanceMetadata) .filter_by(volume_id=volume_id) .filter_by(key=key) .filter_by(deleted=False) .all() ) if len(rows) > 0: vol_glance_metadata = rows[0] if vol_glance_metadata.value == str(value): return raise exception.GlanceMetadataExists(key=key, volume_id=volume_id) vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = key vol_glance_metadata.value = str(value) context.session.add(vol_glance_metadata) return @require_context @require_volume_exists @main_context_manager.writer def volume_glance_metadata_bulk_create(context, volume_id, metadata): """Update the Glance metadata for a volume by adding new key:value pairs. This API does not support changing the value of a key once it has been created. """ for key, value in metadata.items(): rows = ( context.session.query(models.VolumeGlanceMetadata) .filter_by(volume_id=volume_id) .filter_by(key=key) .filter_by(deleted=False) .all() ) if len(rows) > 0: vol_glance_metadata = rows[0] if vol_glance_metadata.value == str(value): continue raise exception.GlanceMetadataExists(key=key, volume_id=volume_id) vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = key vol_glance_metadata.value = str(value) context.session.add(vol_glance_metadata) @require_context @require_snapshot_exists @main_context_manager.writer def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): """Update the Glance metadata for a snapshot. This copies all of the key:value pairs from the originating volume, to ensure that a volume created from the snapshot will retain the original metadata. """ metadata = _volume_glance_metadata_get(context, volume_id) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.snapshot_id = snapshot_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(context.session) @require_context @main_context_manager.writer def volume_glance_metadata_copy_from_volume_to_volume( context, src_volume_id, volume_id, ): """Update the Glance metadata for a volume. This copies all of the key:value pairs from the originating volume, to ensure that a volume created from the volume (clone) will retain the original metadata. """ metadata = _volume_glance_metadata_get(context, src_volume_id) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(context.session) @require_context @require_volume_exists @main_context_manager.writer def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): """Update Glance metadata from a volume. Update the Glance metadata from a volume (created from a snapshot) by copying all of the key:value pairs from the originating snapshot. This is so that the Glance metadata from the original volume is retained. """ metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(context.session) @require_context @main_context_manager.writer def volume_glance_metadata_delete_by_volume(context, volume_id): query = model_query( context, models.VolumeGlanceMetadata, read_deleted='no', ).filter_by(volume_id=volume_id) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, }, ) @require_context @main_context_manager.writer def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): query = model_query( context, models.VolumeGlanceMetadata, read_deleted='no', ).filter_by(snapshot_id=snapshot_id) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, }, ) ############################### @require_admin_context def _backup_data_get_for_project(context, project_id, volume_type_id=None): query = model_query( context, func.count(models.Backup.id), func.sum(models.Backup.size), read_deleted="no", ).filter_by(project_id=project_id) if volume_type_id: query = query.filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return result[0] or 0, result[1] or 0 @require_context @main_context_manager.reader def backup_get(context, backup_id, read_deleted=None, project_only=True): return _backup_get( context, backup_id, read_deleted=read_deleted, project_only=project_only, ) def _backup_get( context, backup_id, read_deleted=None, project_only=True, ): result = ( model_query( context, models.Backup, project_only=project_only, read_deleted=read_deleted, ) .options(joinedload(models.Backup.backup_metadata)) .filter_by(id=backup_id) .first() ) if not result: raise exception.BackupNotFound(backup_id=backup_id) return result def _backup_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): if filters and not is_valid_model_filters(models.Backup, filters): return [] # Generate the paginate query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.Backup, ) if query is None: return [] return query.all() def _backups_get_query(context, project_only=False, joined_load=True): query = model_query(context, models.Backup, project_only=project_only) if joined_load: query = query.options(joinedload(models.Backup.backup_metadata)) return query @apply_like_filters(model=models.Backup) def _process_backups_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Backup, filters): return filters_dict = {} for key, value in filters.items(): if key == 'metadata': col_attr = getattr(models.Backup, 'backup_metadata') for k, v in value.items(): query = query.filter(col_attr.any(key=k, value=v)) elif isinstance(value, (list, tuple, set, frozenset)): orm_field = getattr(models.Backup, key) query = query.filter(or_(orm_field == v for v in value)) else: filters_dict[key] = value # Apply exact matches if filters_dict: query = query.filter_by(**filters_dict) return query @require_admin_context @main_context_manager.reader def backup_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): return _backup_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs ) @require_admin_context @main_context_manager.reader def backup_get_all_by_host(context, host): return ( model_query(context, models.Backup) .options(joinedload(models.Backup.backup_metadata)) .filter_by(host=host) .all() ) @require_context @main_context_manager.reader def backup_get_all_by_project( context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _backup_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs ) @require_context @main_context_manager.reader def backup_get_all_by_volume(context, volume_id, vol_project_id, filters=None): authorize_project_context(context, vol_project_id) if not filters: filters = {} else: filters = filters.copy() filters['volume_id'] = volume_id return _backup_get_all(context, filters) @require_context @main_context_manager.reader def backup_get_all_active_by_window(context, begin, end=None, project_id=None): """Return backups that were active during window.""" query = model_query(context, models.Backup, read_deleted="yes").options( joinedload(models.Backup.backup_metadata) ) query = query.filter( or_( models.Backup.deleted_at == None, # noqa models.Backup.deleted_at > begin, ) ) if end: query = query.filter(models.Backup.created_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @handle_db_data_error @require_context @main_context_manager.writer def backup_create(context, values): values['backup_metadata'] = _metadata_refs( values.get('metadata'), models.BackupMetadata ) if not values.get('id'): values['id'] = str(uuid.uuid4()) backup_ref = models.Backup() backup_ref.update(values) context.session.add(backup_ref) return _backup_get(context, values['id']) @handle_db_data_error @require_context @main_context_manager.writer def backup_update(context, backup_id, values): if 'fail_reason' in values: values = values.copy() values['fail_reason'] = (values['fail_reason'] or '')[:255] query = model_query(context, models.Backup, read_deleted="yes") result = query.filter_by(id=backup_id).update(values) if not result: raise exception.BackupNotFound(backup_id=backup_id) @require_admin_context @main_context_manager.writer def backup_destroy(context, backup_id): utcnow = timeutils.utcnow() updated_values = { 'status': fields.BackupStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, } query = model_query(context, models.Backup).filter_by(id=backup_id) entity = query.column_descriptions[0]['entity'] updated_values['updated_at'] = entity.updated_at query.update(updated_values) query = model_query( context, models.BackupMetadata, ).filter_by(backup_id=backup_id) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': utcnow, 'updated_at': entity.updated_at, } ) del updated_values['updated_at'] return updated_values def _backup_metadata_get_query(context, backup_id): return model_query( context, models.BackupMetadata, read_deleted="no" ).filter_by(backup_id=backup_id) @require_context def _backup_metadata_get(context, backup_id): rows = _backup_metadata_get_query(context, backup_id).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_backup_exists @main_context_manager.reader def backup_metadata_get(context, backup_id): return _backup_metadata_get(context, backup_id) @require_context def _backup_metadata_get_item(context, backup_id, key): result = ( _backup_metadata_get_query(context, backup_id) .filter_by(key=key) .first() ) if not result: raise exception.BackupMetadataNotFound( metadata_key=key, backup_id=backup_id ) return result @require_context @require_backup_exists @handle_db_data_error @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def backup_metadata_update(context, backup_id, metadata, delete): # Set existing metadata to deleted if delete argument is True if delete: original_metadata = _backup_metadata_get(context, backup_id) for meta_key, meta_value in original_metadata.items(): if meta_key not in metadata: meta_ref = _backup_metadata_get_item( context, backup_id, meta_key ) meta_ref.update( {'deleted': True, 'deleted_at': timeutils.utcnow()} ) meta_ref.save(context.session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _backup_metadata_get_item(context, backup_id, meta_key) except exception.BackupMetadataNotFound: meta_ref = models.BackupMetadata() item.update({"key": meta_key, "backup_id": backup_id}) meta_ref.update(item) meta_ref.save(context.session) return backup_metadata_get(context, backup_id) ############################### @require_context def _transfer_get(context, transfer_id): query = model_query( context, models.Transfer, ).filter_by(id=transfer_id) if not is_admin_context(context): volume = models.Volume query = query.filter( models.Transfer.volume_id == volume.id, volume.project_id == context.project_id, ) result = query.first() if not result: raise exception.TransferNotFound(transfer_id=transfer_id) return result @require_context @main_context_manager.reader def transfer_get(context, transfer_id): return _transfer_get(context, transfer_id) def _process_transfer_filters(query, filters): if filters: project_id = filters.pop('project_id', None) # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Transfer, filters): return if project_id: volume = models.Volume query = query.filter( volume.id == models.Transfer.volume_id, volume.project_id == project_id, ) query = query.filter_by(**filters) return query def _translate_transfers(transfers): fields = ( 'id', 'volume_id', 'display_name', 'created_at', 'deleted', 'no_snapshots', 'source_project_id', 'destination_project_id', 'accepted', ) return [{k: transfer[k] for k in fields} for transfer in transfers] def _transfer_get_all( context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None, ): # Generate the query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.Transfer, ) if query is None: return [] return _translate_transfers(query.all()) @require_admin_context @main_context_manager.reader def transfer_get_all( context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None, ): return _transfer_get_all( context, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset, ) def _transfer_get_query(context, project_only=False): return model_query(context, models.Transfer, project_only=project_only) @require_context @main_context_manager.reader def transfer_get_all_by_project( context, project_id, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None, ): authorize_project_context(context, project_id) filters = filters.copy() if filters else {} filters['project_id'] = project_id return _transfer_get_all( context, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset, ) @require_context @handle_db_data_error @main_context_manager.writer def transfer_create(context, values): if not values.get('id'): values['id'] = str(uuid.uuid4()) transfer_id = values['id'] volume_id = values['volume_id'] expected = {'id': volume_id, 'status': 'available'} update = {'status': 'awaiting-transfer'} if not _conditional_update(context, models.Volume, update, expected): msg = _( 'Transfer %(transfer_id)s: Volume id %(volume_id)s ' 'expected in available state.' ) % {'transfer_id': transfer_id, 'volume_id': volume_id} LOG.error(msg) raise exception.InvalidVolume(reason=msg) transfer = models.Transfer() transfer.update(values) context.session.add(transfer) return transfer @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def transfer_destroy(context, transfer_id): utcnow = timeutils.utcnow() volume_id = _transfer_get(context, transfer_id)['volume_id'] expected = {'id': volume_id, 'status': 'awaiting-transfer'} update = {'status': 'available'} if not _conditional_update(context, models.Volume, update, expected): # If the volume state is not 'awaiting-transfer' don't change it, # but we can still mark the transfer record as deleted. msg = _( 'Transfer %(transfer_id)s: Volume expected in ' 'awaiting-transfer state.' ) % {'transfer_id': transfer_id} LOG.error(msg) query = model_query(context, models.Transfer).filter_by(id=transfer_id) entity = query.column_descriptions[0]['entity'] updated_values = { 'deleted': True, 'deleted_at': utcnow, 'updated_at': entity.updated_at, } query.update(updated_values) del updated_values['updated_at'] return updated_values def _roll_back_transferred_volume_and_snapshots( context, volume_id, old_user_id, old_project_id, transffered_snapshots ): expected = {'id': volume_id, 'status': 'available'} update = { 'status': 'awaiting-transfer', 'user_id': old_user_id, 'project_id': old_project_id, 'updated_at': timeutils.utcnow(), } if not _conditional_update(context, models.Volume, update, expected): LOG.warning( 'Volume: %(volume_id)s is not in the expected available ' 'status. Rolling it back.', {'volume_id': volume_id}, ) return for snapshot_id in transffered_snapshots: LOG.info( 'Beginning to roll back transferred snapshots: %s', snapshot_id ) expected = {'id': snapshot_id, 'status': 'available'} update = { 'user_id': old_user_id, 'project_id': old_project_id, 'updated_at': timeutils.utcnow(), } if not _conditional_update(context, models.Snapshot, update, expected): LOG.warning( 'Snapshot: %(snapshot_id)s is not in the expected ' 'available state. Rolling it back.', {'snapshot_id': snapshot_id}, ) return @require_context @main_context_manager.writer def transfer_accept( context, transfer_id, user_id, project_id, no_snapshots=False ): volume_id = _transfer_get(context, transfer_id)['volume_id'] expected = {'id': volume_id, 'status': 'awaiting-transfer'} update = { 'status': 'available', 'user_id': user_id, 'project_id': project_id, 'updated_at': timeutils.utcnow(), } if not _conditional_update(context, models.Volume, update, expected): msg = _( 'Transfer %(transfer_id)s: Volume id %(volume_id)s ' 'expected in awaiting-transfer state.' ) % {'transfer_id': transfer_id, 'volume_id': volume_id} LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Update snapshots for transfer snapshots with volume. if not no_snapshots: snapshots = snapshot_get_all_for_volume(context, volume_id) transferred_snapshots = [] for snapshot in snapshots: LOG.info('Begin to transfer snapshot: %s', snapshot['id']) old_user_id = snapshot['user_id'] old_project_id = snapshot['project_id'] expected = {'id': snapshot['id'], 'status': 'available'} update = { 'user_id': user_id, 'project_id': project_id, 'updated_at': timeutils.utcnow(), } if not _conditional_update( context, models.Snapshot, update, expected ): msg = _( 'Transfer %(transfer_id)s: Snapshot ' '%(snapshot_id)s is not in the expected ' 'available state.' ) % {'transfer_id': transfer_id, 'snapshot_id': snapshot['id']} LOG.error(msg) _roll_back_transferred_volume_and_snapshots( context, volume_id, old_user_id, old_project_id, transferred_snapshots, ) raise exception.InvalidSnapshot(reason=msg) transferred_snapshots.append(snapshot['id']) query = context.session.query(models.Transfer).filter_by(id=transfer_id) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, 'destination_project_id': project_id, 'accepted': True, } ) ############################### @require_admin_context def _consistencygroup_data_get_for_project(context, project_id): query = model_query( context, func.count(models.ConsistencyGroup.id), read_deleted="no", ).filter_by(project_id=project_id) result = query.first() return (0, result[0] or 0) @require_context def _consistencygroup_get(context, consistencygroup_id): result = ( model_query( context, models.ConsistencyGroup, project_only=True, ) .filter_by(id=consistencygroup_id) .first() ) if not result: raise exception.ConsistencyGroupNotFound( consistencygroup_id=consistencygroup_id ) return result @require_context @main_context_manager.reader def consistencygroup_get(context, consistencygroup_id): return _consistencygroup_get(context, consistencygroup_id) def _consistencygroups_get_query(context, project_only=False): return model_query( context, models.ConsistencyGroup, project_only=project_only, ) def _process_consistencygroups_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.ConsistencyGroup, filters): return query = query.filter_by(**filters) return query def _consistencygroup_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): if filters and not is_valid_model_filters( models.ConsistencyGroup, filters ): return [] # Generate the paginate query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.ConsistencyGroup, ) if query is None: return [] return query.all() @require_admin_context @main_context_manager.reader def consistencygroup_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): """Retrieves all consistency groups. If no sort parameters are specified then the returned cgs are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: Filters for the query in the form of key/value. :returns: list of matching consistency groups """ return _consistencygroup_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs ) @require_context @main_context_manager.reader def consistencygroup_get_all_by_project( context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): """Retrieves all consistency groups in a project. If no sort parameters are specified then the returned cgs are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: Filters for the query in the form of key/value. :returns: list of matching consistency groups """ authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _consistencygroup_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs ) @handle_db_data_error @require_context @main_context_manager.writer def consistencygroup_create(context, values, cg_snap_id=None, cg_id=None): cg_model = models.ConsistencyGroup values = values.copy() if not values.get('id'): values['id'] = str(uuid.uuid4()) if cg_snap_id: conditions = [ cg_model.id == models.CGSnapshot.consistencygroup_id, models.CGSnapshot.id == cg_snap_id, ] elif cg_id: conditions = [cg_model.id == cg_id] else: conditions = None if conditions: # We don't want duplicated field values names = ['volume_type_id', 'availability_zone', 'host', 'cluster_name'] for name in names: values.pop(name, None) fields = [getattr(cg_model, name) for name in names] fields.extend(bindparam(k, v) for k, v in values.items()) sel = context.session.query(*fields).filter(*conditions) names.extend(values.keys()) insert_stmt = cg_model.__table__.insert().from_select(names, sel) result = context.session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise # the right exception if not result.rowcount: if cg_id: raise exception.ConsistencyGroupNotFound( consistencygroup_id=cg_id ) raise exception.CgSnapshotNotFound(cgsnapshot_id=cg_snap_id) else: consistencygroup = cg_model() consistencygroup.update(values) context.session.add(consistencygroup) return _consistencygroup_get(context, values['id']) @handle_db_data_error @require_context @main_context_manager.writer def consistencygroup_update(context, consistencygroup_id, values): query = model_query(context, models.ConsistencyGroup, project_only=True) result = query.filter_by(id=consistencygroup_id).update(values) if not result: raise exception.ConsistencyGroupNotFound( consistencygroup_id=consistencygroup_id ) @require_admin_context @main_context_manager.writer def consistencygroup_destroy(context, consistencygroup_id): utcnow = timeutils.utcnow() query = model_query( context, models.ConsistencyGroup, ).filter_by(id=consistencygroup_id) entity = query.column_descriptions[0]['entity'] updated_values = { 'status': fields.ConsistencyGroupStatus.DELETED, 'deleted': True, 'deleted_at': utcnow, 'updated_at': entity.updated_at, } query.update(updated_values) del updated_values['updated_at'] return updated_values def cg_has_cgsnapshot_filter(): """Return a filter that checks if a CG has CG Snapshots.""" return sql.exists().where( and_( models.CGSnapshot.consistencygroup_id == models.ConsistencyGroup.id, ~models.CGSnapshot.deleted, ) ) def cg_has_volumes_filter(attached_or_with_snapshots=False): """Return a filter to check if a CG has volumes. When attached_or_with_snapshots parameter is given a True value only attached volumes or those with snapshots will be considered. """ query = sql.exists().where( and_( models.Volume.consistencygroup_id == models.ConsistencyGroup.id, ~models.Volume.deleted, ) ) if attached_or_with_snapshots: query = query.where( or_( models.Volume.attach_status == 'attached', sql.exists().where( and_( models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted, ) ), ) ) return query def cg_creating_from_src(cg_id=None, cgsnapshot_id=None): """Return a filter to check if a CG is being used as creation source. Returned filter is meant to be used in the Conditional Update mechanism and checks if provided CG ID or CG Snapshot ID is currently being used to create another CG. This filter will not include CGs that have used the ID but have already finished their creation (status is no longer creating). Filter uses a subquery that allows it to be used on updates to the consistencygroups table. """ # NOTE(geguileo): As explained in devref api_conditional_updates we use a # subquery to trick MySQL into using the same table in the update and the # where clause. subq = ( sql.select(models.ConsistencyGroup) .where( and_( ~models.ConsistencyGroup.deleted, models.ConsistencyGroup.status == 'creating', ) ) .alias('cg2') ) if cg_id: match_id = subq.c.source_cgid == cg_id elif cgsnapshot_id: match_id = subq.c.cgsnapshot_id == cgsnapshot_id else: msg = _( 'cg_creating_from_src must be called with cg_id or ' 'cgsnapshot_id parameter.' ) raise exception.ProgrammingError(reason=msg) return sql.exists([subq]).where(match_id) @require_admin_context @main_context_manager.writer def consistencygroup_include_in_cluster( context, cluster, partial_rename=True, **filters ): """Include all consistency groups matching the filters into a cluster.""" return _include_in_cluster( context, cluster, models.ConsistencyGroup, partial_rename, filters, ) ############################### @require_admin_context def _group_data_get_for_project(context, project_id): query = model_query( context, func.count(models.Group.id), read_deleted="no", ).filter_by(project_id=project_id) result = query.first() return (0, result[0] or 0) @require_context def _group_get(context, group_id): result = ( model_query(context, models.Group, project_only=True) .filter_by(id=group_id) .first() ) if not result: raise exception.GroupNotFound(group_id=group_id) return result @require_context @main_context_manager.reader def group_get(context, group_id): return _group_get(context, group_id) def _groups_get_query(context, project_only=False): return model_query(context, models.Group, project_only=project_only) def _group_snapshot_get_query(context, project_only=False): return model_query( context, models.GroupSnapshot, project_only=project_only, ) @apply_like_filters(model=models.Group) def _process_groups_filters(query, filters): if filters: # NOTE(xyang): backend_match_level needs to be handled before # is_valid_model_filters is called as it is not a column name # in the db. backend_match_level = filters.pop('backend_match_level', 'backend') # host is a valid filter. Filter the query by host and # backend_match_level first. host = filters.pop('host', None) if host: query = query.filter( _filter_host( models.Group.host, host, match_level=backend_match_level ) ) # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Group, filters): return query = query.filter_by(**filters) return query @apply_like_filters(model=models.GroupSnapshot) def _process_group_snapshot_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.GroupSnapshot, filters): return query = query.filter_by(**filters) return query def _group_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): # No need to call is_valid_model_filters here. It is called # in _process_group_filters when _generate_paginate_query # is called below. # Generate the paginate query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.Group, ) return query.all() if query else [] @require_admin_context @main_context_manager.reader def group_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): """Retrieves all groups. If no sort parameters are specified then the returned groups are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: Filters for the query in the form of key/value. :returns: list of matching groups """ return _group_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs ) @require_context @main_context_manager.reader def group_get_all_by_project( context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): """Retrieves all groups in a project. If no sort parameters are specified then the returned groups are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: Filters for the query in the form of key/value. :returns: list of matching groups """ authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _group_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs ) @handle_db_data_error @require_context @main_context_manager.writer def group_create( context, values, group_snapshot_id=None, source_group_id=None ): group_model = models.Group values = values.copy() if not values.get('id'): values['id'] = str(uuid.uuid4()) if group_snapshot_id: conditions = [ group_model.id == models.GroupSnapshot.group_id, models.GroupSnapshot.id == group_snapshot_id, ] elif source_group_id: conditions = [group_model.id == source_group_id] else: conditions = None if conditions: # We don't want duplicated field values values.pop('group_type_id', None) values.pop('availability_zone', None) values.pop('host', None) values.pop('cluster_name', None) # NOTE(xyang): Save volume_type_ids to update later. volume_type_ids = values.pop('volume_type_ids', []) sel = context.session.query( group_model.group_type_id, group_model.availability_zone, group_model.host, group_model.cluster_name, *(bindparam(k, v) for k, v in values.items()), ).filter(*conditions) names = ['group_type_id', 'availability_zone', 'host', 'cluster_name'] names.extend(values.keys()) insert_stmt = group_model.__table__.insert().from_select(names, sel) result = context.session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise # the right exception if not result.rowcount: if source_group_id: raise exception.GroupNotFound(group_id=source_group_id) raise exception.GroupSnapshotNotFound( group_snapshot_id=group_snapshot_id ) for item in volume_type_ids: mapping = models.GroupVolumeTypeMapping() mapping['volume_type_id'] = item mapping['group_id'] = values['id'] context.session.add(mapping) else: for item in values.get('volume_type_ids') or []: mapping = models.GroupVolumeTypeMapping() mapping['volume_type_id'] = item mapping['group_id'] = values['id'] context.session.add(mapping) group = group_model() group.update(values) context.session.add(group) return _group_get(context, values['id']) @handle_db_data_error @require_context @main_context_manager.writer def group_volume_type_mapping_create(context, group_id, volume_type_id): """Add group volume_type mapping entry.""" # Verify group exists _group_get(context, group_id) # Verify volume type exists _volume_type_get_id_from_volume_type(context, volume_type_id) existing = _group_volume_type_mapping_get_all_by_group_volume_type( context, group_id, volume_type_id ) if existing: raise exception.GroupVolumeTypeMappingExists( group_id=group_id, volume_type_id=volume_type_id ) mapping = models.GroupVolumeTypeMapping() mapping.update({"group_id": group_id, "volume_type_id": volume_type_id}) try: mapping.save(context.session) except db_exc.DBDuplicateEntry: raise exception.GroupVolumeTypeMappingExists( group_id=group_id, volume_type_id=volume_type_id ) return mapping @handle_db_data_error @require_context @main_context_manager.writer def group_update(context, group_id, values): query = model_query(context, models.Group, project_only=True) result = query.filter_by(id=group_id).update(values) if not result: raise exception.GroupNotFound(group_id=group_id) @require_admin_context @main_context_manager.writer def group_destroy(context, group_id): query = model_query(context, models.Group).filter_by(id=group_id) entity = query.column_descriptions[0]['entity'] query.update( { 'status': fields.GroupStatus.DELETED, 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } ) query = context.session.query( models.GroupVolumeTypeMapping, ).filter_by(group_id=group_id) entity = query.column_descriptions[0]['entity'] query.update( { 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } ) def group_has_group_snapshot_filter(): return sql.exists().where( and_( models.GroupSnapshot.group_id == models.Group.id, ~models.GroupSnapshot.deleted, ) ) def group_has_volumes_filter(attached_or_with_snapshots=False): query = sql.exists().where( and_(models.Volume.group_id == models.Group.id, ~models.Volume.deleted) ) if attached_or_with_snapshots: query = query.where( or_( models.Volume.attach_status == 'attached', sql.exists().where( and_( models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted, ) ), ) ) return query def group_creating_from_src(group_id=None, group_snapshot_id=None): # NOTE(geguileo): As explained in devref api_conditional_updates we use a # subquery to trick MySQL into using the same table in the update and the # where clause. subq = ( sql.select(models.Group) .where(and_(~models.Group.deleted, models.Group.status == 'creating')) .alias('group2') ) if group_id: match_id = subq.c.source_group_id == group_id elif group_snapshot_id: match_id = subq.c.group_snapshot_id == group_snapshot_id else: msg = _( 'group_creating_from_src must be called with group_id or ' 'group_snapshot_id parameter.' ) raise exception.ProgrammingError(reason=msg) return sql.exists(subq).where(match_id) @require_admin_context @main_context_manager.writer def group_include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all generic groups matching the filters into a cluster.""" return _include_in_cluster( context, cluster, models.Group, partial_rename, filters ) ############################### @require_context def _cgsnapshot_get(context, cgsnapshot_id): result = ( model_query(context, models.CGSnapshot, project_only=True) .filter_by(id=cgsnapshot_id) .first() ) if not result: raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id) return result @require_context @main_context_manager.reader def cgsnapshot_get(context, cgsnapshot_id): return _cgsnapshot_get(context, cgsnapshot_id) def is_valid_model_filters(model, filters, exclude_list=None): """Return True if filter values exist on the model :param model: a Cinder model :param filters: dictionary of filters """ for key in filters.keys(): if exclude_list and key in exclude_list: continue if key == 'metadata': if not isinstance(filters[key], dict): LOG.debug("Metadata filter value is not valid dictionary") return False continue try: key = key.rstrip('~') getattr(model, key) except AttributeError: LOG.debug("'%s' filter key is not valid.", key) return False return True def _cgsnapshot_get_all(context, project_id=None, group_id=None, filters=None): query = model_query(context, models.CGSnapshot) if filters: if not is_valid_model_filters(models.CGSnapshot, filters): return [] query = query.filter_by(**filters) if project_id: query = query.filter_by(project_id=project_id) if group_id: query = query.filter_by(consistencygroup_id=group_id) return query.all() @require_admin_context @main_context_manager.reader def cgsnapshot_get_all(context, filters=None): return _cgsnapshot_get_all(context, filters=filters) @require_admin_context @main_context_manager.reader def cgsnapshot_get_all_by_group(context, group_id, filters=None): return _cgsnapshot_get_all(context, group_id=group_id, filters=filters) @require_context @main_context_manager.reader def cgsnapshot_get_all_by_project(context, project_id, filters=None): authorize_project_context(context, project_id) return _cgsnapshot_get_all(context, project_id=project_id, filters=filters) @handle_db_data_error @require_context @main_context_manager.writer def cgsnapshot_create(context, values): if not values.get('id'): values['id'] = str(uuid.uuid4()) cg_id = values.get('consistencygroup_id') model = models.CGSnapshot if cg_id: # There has to exist at least 1 volume in the CG and the CG cannot # be updating the composing volumes or being created. conditions = [ sql.exists().where( and_( ~models.Volume.deleted, models.Volume.consistencygroup_id == cg_id, ), ), ~models.ConsistencyGroup.deleted, models.ConsistencyGroup.id == cg_id, ~models.ConsistencyGroup.status.in_(('creating', 'updating')), ] # NOTE(geguileo): We build a "fake" from_select clause instead of # using transaction isolation on the session because we would need # SERIALIZABLE level and that would have a considerable performance # penalty. binds = (bindparam(k, v) for k, v in values.items()) sel = context.session.query(*binds).filter(*conditions) insert_stmt = model.__table__.insert().from_select( values.keys(), sel, ) result = context.session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise # the right exception if not result.rowcount: msg = _( "Source CG cannot be empty or in 'creating' or " "'updating' state. No cgsnapshot will be created." ) raise exception.InvalidConsistencyGroup(reason=msg) else: cgsnapshot = model() cgsnapshot.update(values) context.session.add(cgsnapshot) return _cgsnapshot_get(context, values['id']) @require_context @handle_db_data_error @main_context_manager.writer def cgsnapshot_update(context, cgsnapshot_id, values): query = model_query(context, models.CGSnapshot, project_only=True) result = query.filter_by(id=cgsnapshot_id).update(values) if not result: raise exception.CgSnapshotNotFound(cgsnapshot_id=cgsnapshot_id) @require_admin_context @main_context_manager.writer def cgsnapshot_destroy(context, cgsnapshot_id): query = model_query(context, models.CGSnapshot).filter_by(id=cgsnapshot_id) entity = query.column_descriptions[0]['entity'] updated_values = { 'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } query.update(updated_values) del updated_values['updated_at'] return updated_values def cgsnapshot_creating_from_src(): """Get a filter that checks if a CGSnapshot is being created from a CG.""" return sql.exists().where( and_( models.CGSnapshot.consistencygroup_id == models.ConsistencyGroup.id, ~models.CGSnapshot.deleted, models.CGSnapshot.status == 'creating', ) ) ############################### @require_context def _group_snapshot_get(context, group_snapshot_id): result = ( model_query(context, models.GroupSnapshot, project_only=True) .filter_by(id=group_snapshot_id) .first() ) if not result: raise exception.GroupSnapshotNotFound( group_snapshot_id=group_snapshot_id ) return result @require_context @main_context_manager.reader def group_snapshot_get(context, group_snapshot_id): return _group_snapshot_get(context, group_snapshot_id) def _group_snapshot_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): if filters and not is_valid_model_filters( models.GroupSnapshot, filters, ): return [] # Generate the paginate query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.GroupSnapshot, ) return query.all() if query else [] @require_admin_context @main_context_manager.reader def group_snapshot_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): return _group_snapshot_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs, ) @require_admin_context @main_context_manager.reader def group_snapshot_get_all_by_group( context, group_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): if filters is None: filters = {} if group_id: filters['group_id'] = group_id return _group_snapshot_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs ) @require_context @main_context_manager.reader def group_snapshot_get_all_by_project( context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): authorize_project_context(context, project_id) if filters is None: filters = {} if project_id: filters['project_id'] = project_id return _group_snapshot_get_all( context, filters, marker, limit, offset, sort_keys, sort_dirs ) @handle_db_data_error @require_context @main_context_manager.writer def group_snapshot_create(context, values): if not values.get('id'): values['id'] = str(uuid.uuid4()) group_id = values.get('group_id') model = models.GroupSnapshot if group_id: # There has to exist at least 1 volume in the group and the group # cannot be updating the composing volumes or being created. conditions = [ sql.exists().where( and_( ~models.Volume.deleted, models.Volume.group_id == group_id ) ), ~models.Group.deleted, models.Group.id == group_id, ~models.Group.status.in_(('creating', 'updating')), ] # NOTE(geguileo): We build a "fake" from_select clause instead of # using transaction isolation on the session because we would need # SERIALIZABLE level and that would have a considerable performance # penalty. binds = (bindparam(k, v) for k, v in values.items()) sel = context.session.query(*binds).filter(*conditions) insert_stmt = model.__table__.insert().from_select(values.keys(), sel) result = context.session.execute(insert_stmt) # If we couldn't insert the row because of the conditions raise # the right exception if not result.rowcount: msg = _( "Source group cannot be empty or in 'creating' or " "'updating' state. No group snapshot will be created." ) raise exception.InvalidGroup(reason=msg) else: group_snapshot = model() group_snapshot.update(values) context.session.add(group_snapshot) return _group_snapshot_get(context, values['id']) @require_context @handle_db_data_error @main_context_manager.writer def group_snapshot_update(context, group_snapshot_id, values): result = ( model_query(context, models.GroupSnapshot, project_only=True) .filter_by(id=group_snapshot_id) .first() ) if not result: raise exception.GroupSnapshotNotFound( _("No group snapshot with id %s") % group_snapshot_id ) result.update(values) result.save(context.session) return result @require_admin_context @main_context_manager.writer def group_snapshot_destroy(context, group_snapshot_id): query = model_query(context, models.GroupSnapshot).filter_by( id=group_snapshot_id ) entity = query.column_descriptions[0]['entity'] updated_values = { 'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': entity.updated_at, } query.update(updated_values) del updated_values['updated_at'] return updated_values def group_snapshot_creating_from_src(): """Get a filter to check if a grp snapshot is being created from a grp.""" return sql.exists().where( and_( models.GroupSnapshot.group_id == models.Group.id, ~models.GroupSnapshot.deleted, models.GroupSnapshot.status == 'creating', ) ) ############################### @require_admin_context @main_context_manager.writer def purge_deleted_rows(context, age_in_days): """Purge deleted rows older than age from cinder tables.""" try: age_in_days = int(age_in_days) except ValueError: msg = _('Invalid value for age, %(age)s') LOG.exception(msg, {'age': age_in_days}) raise exception.InvalidParameterValue(msg % {'age': age_in_days}) engine = get_engine() metadata = MetaData() metadata.reflect(engine) deleted_age = timeutils.utcnow() - dt.timedelta(days=age_in_days) for table in reversed(metadata.sorted_tables): if 'deleted' not in table.columns.keys(): continue LOG.info( 'Purging deleted rows older than age=%(age)d days ' 'from table=%(table)s', {'age': age_in_days, 'table': table}, ) try: # Delete child records first from quality_of_service_specs # table to avoid FK constraints if str(table) == 'quality_of_service_specs': context.session.query(models.QualityOfServiceSpecs).filter( and_( models.QualityOfServiceSpecs.specs_id.isnot(None), models.QualityOfServiceSpecs.deleted.is_(True), models.QualityOfServiceSpecs.deleted_at < deleted_age, ) ).delete() result = context.session.execute( table.delete().where( and_( table.columns.deleted.is_(True), table.c.deleted_at < deleted_age, ) ) ) except db_exc.DBReferenceError as ex: LOG.error( 'DBError detected when purging from %(tablename)s: %(error)s.', {'tablename': table, 'error': ex}, ) raise rows_purged = result.rowcount if rows_purged != 0: LOG.info( 'Deleted %(row)d rows from table=%(table)s', {'row': rows_purged, 'table': table}, ) ############################### @require_admin_context @main_context_manager.writer def reset_active_backend( context, enable_replication, active_backend_id, backend_host, ): service = objects.Service.get_by_host_and_topic( context, backend_host, 'cinder-volume', disabled=True, ) if not service.frozen: raise exception.ServiceUnavailable( 'Service for host %(host)s must first be frozen.' % {'host': backend_host}, ) actions = { 'disabled': False, 'disabled_reason': '', 'active_backend_id': None, 'replication_status': 'enabled', } expectations = { 'frozen': True, 'disabled': True, } if service.is_clustered: service.cluster.conditional_update(actions, expectations) service.cluster.reset_service_replication() else: service.conditional_update(actions, expectations) ############################### def _translate_messages(messages): return [_translate_message(message) for message in messages] def _translate_message(message): """Translate the Message model to a dict.""" return { 'id': message['id'], 'project_id': message['project_id'], 'request_id': message['request_id'], 'resource_type': message['resource_type'], 'resource_uuid': message.get('resource_uuid'), 'event_id': message['event_id'], 'detail_id': message['detail_id'], 'action_id': message['action_id'], 'message_level': message['message_level'], 'created_at': message['created_at'], 'expires_at': message.get('expires_at'), } def _message_get(context, message_id): query = model_query( context, models.Message, read_deleted="no", project_only="yes", ) result = query.filter_by(id=message_id).first() if not result: raise exception.MessageNotFound(message_id=message_id) return result @require_context @main_context_manager.reader def message_get(context, message_id): result = _message_get(context, message_id) return _translate_message(result) @require_context @main_context_manager.reader def message_get_all( context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None, ): """Retrieves all messages. If no sort parameters are specified then the returned messages are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_messages_filters function for more information :returns: list of matching messages """ # Generate the paginate query query = _generate_paginate_query( context, marker, limit, sort_keys, sort_dirs, filters, offset, models.Message, ) if query is None: return [] results = query.all() return _translate_messages(results) @apply_like_filters(model=models.Message) def _process_messages_filters(query, filters): if filters: # Ensure that filters' keys exist on the model if not is_valid_model_filters(models.Message, filters): return None query = query.filter_by(**filters) return query def _messages_get_query(context, project_only=False): return model_query(context, models.Message, project_only=project_only) @require_context @main_context_manager.writer def message_create(context, values): message_ref = models.Message() if not values.get('id'): values['id'] = str(uuid.uuid4()) message_ref.update(values) context.session.add(message_ref) @require_admin_context @main_context_manager.writer def message_destroy(context, message_id): now = timeutils.utcnow() query = model_query(context, models.Message).filter_by(id=message_id) entity = query.column_descriptions[0]['entity'] updated_values = { 'deleted': True, 'deleted_at': now, 'updated_at': entity.updated_at, } query.update(updated_values) del updated_values['updated_at'] return updated_values @require_admin_context @main_context_manager.writer def cleanup_expired_messages(context): now = timeutils.utcnow() # NOTE(tommylikehu): Directly delete the expired # messages here. return ( context.session.query(models.Message) .filter(models.Message.expires_at < now) .delete() ) ############################### @require_context @main_context_manager.writer def driver_initiator_data_insert_by_key( context, initiator, namespace, key, value, ): data = models.DriverInitiatorData() data.initiator = initiator data.namespace = namespace data.key = key data.value = value try: data.save(context.session) except db_exc.DBDuplicateEntry: raise exception.DriverInitiatorDataExists( initiator=initiator, namespace=namespace, key=key, ) return data @require_context @main_context_manager.reader def driver_initiator_data_get(context, initiator, namespace): return ( context.session.query(models.DriverInitiatorData) .filter_by(initiator=initiator) .filter_by(namespace=namespace) .all() ) ############################### @require_context @main_context_manager.writer def image_volume_cache_create( context, host, cluster_name, image_id, image_updated_at, volume_id, size, ): cache_entry = models.ImageVolumeCacheEntry() cache_entry.host = host cache_entry.cluster_name = cluster_name cache_entry.image_id = image_id cache_entry.image_updated_at = image_updated_at cache_entry.volume_id = volume_id cache_entry.size = size context.session.add(cache_entry) return cache_entry @require_context @main_context_manager.writer def image_volume_cache_delete(context, volume_id): context.session.query( models.ImageVolumeCacheEntry, ).filter_by(volume_id=volume_id).delete() @require_context @main_context_manager.writer def image_volume_cache_get_and_update_last_used(context, image_id, **filters): filters = _clean_filters(filters) entry = ( context.session.query(models.ImageVolumeCacheEntry) .filter_by(image_id=image_id) .filter_by(**filters) .order_by(desc(models.ImageVolumeCacheEntry.last_used)) .first() ) if entry: entry.last_used = timeutils.utcnow() entry.save(context.session) return entry @require_context @main_context_manager.reader def image_volume_cache_get_by_volume_id(context, volume_id): return ( context.session.query(models.ImageVolumeCacheEntry) .filter_by(volume_id=volume_id) .first() ) @require_context @main_context_manager.reader def image_volume_cache_get_all(context, **filters): filters = _clean_filters(filters) return ( context.session.query(models.ImageVolumeCacheEntry) .filter_by(**filters) .order_by(desc(models.ImageVolumeCacheEntry.last_used)) .all() ) @require_admin_context @main_context_manager.writer def image_volume_cache_include_in_cluster( context, cluster, partial_rename=True, **filters, ): """Include all volumes matching the filters into a cluster.""" filters = _clean_filters(filters) return _include_in_cluster( context, cluster, models.ImageVolumeCacheEntry, partial_rename, filters, ) ################### def _worker_query( context, until=None, db_filters=None, ignore_sentinel=True, **filters, ): # Remove all filters based on the workers table that are set to None filters = _clean_filters(filters) if filters and not is_valid_model_filters(models.Worker, filters): return None query = model_query(context, models.Worker) # TODO: Once we stop creating the SENTINEL entry in the database (which # was only needed to support MySQL 5.5), we can drop this. Probably in the # A release or later if ignore_sentinel: # We don't want to retrieve the workers sentinel query = query.filter(models.Worker.resource_type != 'SENTINEL') if until: db_filters = list(db_filters) if db_filters else [] # Since we set updated_at at creation time we don't need to check # created_at field. db_filters.append(models.Worker.updated_at <= until) if db_filters: query = query.filter(and_(*db_filters)) if filters: query = query.filter_by(**filters) return query def _worker_set_updated_at_field(values): updated_at = values.get('updated_at', timeutils.utcnow()) if isinstance(updated_at, str): return values['updated_at'] = updated_at @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def worker_create(context, **values): """Create a worker entry from optional arguments.""" _worker_set_updated_at_field(values) worker = models.Worker(**values) try: worker.save(context.session) except db_exc.DBDuplicateEntry: raise exception.WorkerExists( type=values.get('resource_type'), id=values.get('resource_id'), ) return worker @require_context @main_context_manager.reader def worker_get(context, **filters): """Get a worker or raise exception if it does not exist.""" query = _worker_query(context, **filters) worker = query.first() if query else None if not worker: raise exception.WorkerNotFound(**filters) return worker @require_context @main_context_manager.reader def worker_get_all(context, until=None, db_filters=None, **filters): """Get all workers that match given criteria.""" query = _worker_query( context, until=until, db_filters=db_filters, **filters, ) return query.all() if query else [] def _orm_worker_update(worker, values): if not worker: return for key, value in values.items(): setattr(worker, key, value) @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def worker_update(context, id, filters=None, orm_worker=None, **values): """Update a worker with given values.""" filters = filters or {} query = _worker_query(context, id=id, **filters) # If we want to update the orm_worker and we don't set the update_at field # we set it here instead of letting SQLAlchemy do it to be able to update # the orm_worker. _worker_set_updated_at_field(values) reference = orm_worker or models.Worker values['race_preventer'] = reference.race_preventer + 1 result = query.update(values) if not result: raise exception.WorkerNotFound(id=id, **filters) _orm_worker_update(orm_worker, values) return result @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) @main_context_manager.writer def worker_claim_for_cleanup(context, claimer_id, orm_worker): """Claim a worker entry for cleanup.""" # We set updated_at value so we are sure we update the DB entry even if the # service_id is the same in the DB, thus flagging the claim. values = { 'service_id': claimer_id, 'race_preventer': orm_worker.race_preventer + 1, 'updated_at': timeutils.utcnow(), } _worker_set_updated_at_field(values) # We only update the worker entry if it hasn't been claimed by other host # or thread query = _worker_query( context, status=orm_worker.status, service_id=orm_worker.service_id, race_preventer=orm_worker.race_preventer, until=orm_worker.updated_at, id=orm_worker.id, ) result = query.update(values, synchronize_session=False) if result: _orm_worker_update(orm_worker, values) return result @require_context @main_context_manager.writer def worker_destroy(context, **filters): """Delete a worker (no soft delete).""" query = _worker_query(context, **filters) return query.delete() ############################### # TODO: (D Release) remove method and this comment @enginefacade.writer def remove_temporary_admin_metadata_data_migration(context, max_count): admin_meta_table = models.VolumeAdminMetadata query = model_query(context, admin_meta_table.id).filter_by(key='temporary') total = query.count() ids_query = query.limit(max_count).subquery() update_args = {'synchronize_session': False} # We cannot use limit with update or delete so create a new query updated = model_query(context, admin_meta_table).\ filter(admin_meta_table.id.in_(ids_query)).\ update(admin_meta_table.delete_values(), **update_args) return total, updated ############################### PAGINATION_HELPERS = { models.Volume: (_volume_get_query, _process_volume_filters, _volume_get), models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get), models.Backup: (_backups_get_query, _process_backups_filters, _backup_get), models.QualityOfServiceSpecs: ( _qos_specs_get_query, _process_qos_specs_filters, _qos_specs_get, ), models.VolumeType: ( _volume_type_get_query, _process_volume_types_filters, _volume_type_get_db_object, ), models.ConsistencyGroup: ( _consistencygroups_get_query, _process_consistencygroups_filters, _consistencygroup_get, ), models.Message: ( _messages_get_query, _process_messages_filters, _message_get, ), models.GroupType: ( _group_type_get_query, _process_group_types_filters, _group_type_get_db_object, ), models.Group: (_groups_get_query, _process_groups_filters, _group_get), models.GroupSnapshot: ( _group_snapshot_get_query, _process_group_snapshot_filters, _group_snapshot_get, ), models.VolumeAttachment: ( _attachment_get_query, _process_attachment_filters, _attachment_get, ), models.Transfer: ( _transfer_get_query, _process_transfer_filters, _transfer_get, ), } CALCULATE_COUNT_HELPERS = { 'volume': (_volume_get_query, _process_volume_filters), 'snapshot': (_snaps_get_query, _process_snaps_filters), 'backup': (_backups_get_query, _process_backups_filters), } def get_projects(context, model, read_deleted="no"): return model_query(context, model, read_deleted=read_deleted).\ with_entities(sa.Column('project_id')).distinct().all() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/db/sqlalchemy/models.py0000664000175000017500000012020700000000000020721 0ustar00zuulzuul00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for cinder data. """ from oslo_config import cfg from oslo_db.sqlalchemy import models from oslo_utils import timeutils import sqlalchemy as sa # imports needed for cinderlib from sqlalchemy import Column, String, Text # noqa: F401 from sqlalchemy import func from sqlalchemy import schema from sqlalchemy.sql import expression from sqlalchemy.orm import backref, column_property, declarative_base, \ relationship, validates CONF = cfg.CONF BASE = declarative_base() class CinderBase(models.TimestampMixin, models.ModelBase): """Base class for Cinder Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} # TODO(rpodolyaka): reuse models.SoftDeleteMixin in the next stage # of implementing of BP db-cleanup deleted_at = sa.Column(sa.DateTime) deleted = sa.Column(sa.Boolean, default=False) metadata = None @staticmethod def delete_values(): return {'deleted': True, 'deleted_at': timeutils.utcnow()} def delete(self, session): """Delete this object.""" updated_values = self.delete_values() updated_values['updated_at'] = self.updated_at self.update(updated_values) self.save(session=session) del updated_values['updated_at'] return updated_values class Service(BASE, CinderBase): """Represents a running service on a host.""" __tablename__ = 'services' __table_args__ = ( sa.Index('services_uuid_idx', 'uuid', unique=True), CinderBase.__table_args__, ) id = sa.Column(sa.Integer, primary_key=True) uuid = sa.Column(sa.String(36), nullable=True) cluster_name = sa.Column(sa.String(255), nullable=True) host = sa.Column(sa.String(255)) # , sa.ForeignKey('hosts.id')) binary = sa.Column(sa.String(255)) # We want to overwrite default updated_at definition so we timestamp at # creation as well, so we only need to check updated_at for the heartbeat updated_at = sa.Column( sa.DateTime, default=timeutils.utcnow, onupdate=timeutils.utcnow ) topic = sa.Column(sa.String(255)) report_count = sa.Column(sa.Integer, nullable=False, default=0) disabled = sa.Column(sa.Boolean, default=False) availability_zone = sa.Column(sa.String(255), default='cinder') disabled_reason = sa.Column(sa.String(255)) # adding column modified_at to contain timestamp # for manual enable/disable of cinder services # updated_at column will now contain timestamps for # periodic updates modified_at = sa.Column(sa.DateTime) # Version columns to support rolling upgrade. These report the max RPC API # and objects versions that the manager of the service is able to support. rpc_current_version = sa.Column(sa.String(36)) object_current_version = sa.Column(sa.String(36)) # replication_status can be: enabled, disabled, not-capable, error, # failed-over or not-configured replication_status = sa.Column(sa.String(36), default="not-capable") active_backend_id = sa.Column(sa.String(255)) # TODO(stephenfin): Add nullable=False frozen = sa.Column(sa.Boolean, default=False) cluster = relationship( 'Cluster', backref='services', foreign_keys=cluster_name, primaryjoin='and_(' 'Service.cluster_name == Cluster.name,' 'Service.deleted == False)', ) class Cluster(BASE, CinderBase): """Represents a cluster of hosts.""" __tablename__ = 'clusters' # To remove potential races on creation we have a constraint set on name # and race_preventer fields, and we set value on creation to 0, so 2 # clusters with the same name will fail this constraint. On deletion we # change this field to the same value as the id which will be unique and # will not conflict with the creation of another cluster with the same # name. __table_args__ = ( sa.UniqueConstraint('name', 'binary', 'race_preventer'), CinderBase.__table_args__, ) id = sa.Column(sa.Integer, primary_key=True) # NOTE(geguileo): Name is constructed in the same way that Server.host but # using cluster configuration option instead of host. name = sa.Column(sa.String(255), nullable=False) binary = sa.Column(sa.String(255), nullable=False) disabled = sa.Column(sa.Boolean, default=False) disabled_reason = sa.Column(sa.String(255)) race_preventer = sa.Column(sa.Integer, nullable=False, default=0) replication_status = sa.Column(sa.String(36), default="not-capable") active_backend_id = sa.Column(sa.String(255)) frozen = sa.Column( sa.Boolean, nullable=False, default=False, server_default=expression.false(), ) # Last heartbeat reported by any of the services of this cluster. This is # not deferred since we always want to load this field. last_heartbeat = column_property( sa.select(func.max(Service.updated_at)) .where(sa.and_(Service.cluster_name == name, ~Service.deleted)) .correlate_except(Service) .scalar_subquery(), deferred=False, ) # Number of existing services for this cluster num_hosts = column_property( sa.select(func.count(Service.id)) .where(sa.and_(Service.cluster_name == name, ~Service.deleted)) .correlate_except(Service) .scalar_subquery(), group='services_summary', deferred=True, ) # Number of services that are down for this cluster num_down_hosts = column_property( sa.select(func.count(Service.id)) .where( sa.and_( Service.cluster_name == name, ~Service.deleted, Service.updated_at < sa.bindparam('expired'), ) ) .correlate_except(Service) .scalar_subquery(), group='services_summary', deferred=True, ) @staticmethod def delete_values(): return { 'race_preventer': Cluster.id, 'deleted': True, 'deleted_at': timeutils.utcnow(), } class ConsistencyGroup(BASE, CinderBase): """Represents a consistencygroup.""" __tablename__ = 'consistencygroups' id = sa.Column(sa.String(36), primary_key=True) # TODO(stephenfin): Add nullable=False user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) cluster_name = sa.Column(sa.String(255), nullable=True) host = sa.Column(sa.String(255)) availability_zone = sa.Column(sa.String(255)) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) volume_type_id = sa.Column(sa.String(255)) status = sa.Column(sa.String(255)) cgsnapshot_id = sa.Column(sa.String(36)) source_cgid = sa.Column(sa.String(36)) class Group(BASE, CinderBase): """Represents a generic volume group.""" __tablename__ = 'groups' __table_args__ = ( # Speed up normal listings sa.Index('groups_deleted_project_id_idx', 'deleted', 'project_id'), CinderBase.__table_args__, ) id = sa.Column(sa.String(36), primary_key=True) # TODO(stephenfin): Add nullable=False user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) cluster_name = sa.Column(sa.String(255)) host = sa.Column(sa.String(255)) availability_zone = sa.Column(sa.String(255)) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) status = sa.Column(sa.String(255)) group_type_id = sa.Column(sa.String(36)) group_snapshot_id = sa.Column(sa.String(36)) source_group_id = sa.Column(sa.String(36)) replication_status = sa.Column(sa.String(255)) class CGSnapshot(BASE, CinderBase): """Represents a cgsnapshot.""" __tablename__ = 'cgsnapshots' id = sa.Column(sa.String(36), primary_key=True) consistencygroup_id = sa.Column( sa.String(36), sa.ForeignKey('consistencygroups.id'), nullable=False, index=True, ) # TODO(stephenfin): Add nullable=False user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) status = sa.Column(sa.String(255)) consistencygroup = relationship( ConsistencyGroup, backref="cgsnapshots", foreign_keys=consistencygroup_id, primaryjoin='CGSnapshot.consistencygroup_id == ConsistencyGroup.id', ) class GroupSnapshot(BASE, CinderBase): """Represents a group snapshot.""" __tablename__ = 'group_snapshots' __table_args__ = ( # Speed up normal listings sa.Index('group_snapshots_deleted_project_id_idx', 'deleted', 'project_id'), CinderBase.__table_args__, ) id = sa.Column(sa.String(36), primary_key=True) group_id = sa.Column( 'group_id', sa.String(36), sa.ForeignKey('groups.id'), nullable=False, index=True, ) user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) status = sa.Column(sa.String(255)) group_type_id = sa.Column(sa.String(36)) group = relationship( Group, backref="group_snapshots", foreign_keys=group_id, primaryjoin='GroupSnapshot.group_id == Group.id', ) class Volume(BASE, CinderBase): """Represents a block storage device that can be attached to a vm.""" __tablename__ = 'volumes' __table_args__ = ( sa.Index('volumes_service_uuid_idx', 'service_uuid', 'deleted'), # Speed up normal listings sa.Index('volumes_deleted_project_id_idx', 'deleted', 'project_id'), # Speed up service start, create volume from image when using direct # urls, host REST API, and the cinder-manage update host cmd sa.Index('volumes_deleted_host_idx', 'deleted', 'host'), CinderBase.__table_args__, ) id = sa.Column(sa.String(36), primary_key=True) _name_id = sa.Column(sa.String(36)) # Don't access/modify this directly! use_quota = Column( sa.Boolean, nullable=False, default=True, server_default=sa.true(), doc='Ignore volume in quota usage', ) @property def name_id(self): return self.id if not self._name_id else self._name_id @name_id.setter def name_id(self, value): self._name_id = value @property def name(self): return CONF.volume_name_template % self.name_id ec2_id = sa.Column(sa.String(255)) user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) snapshot_id = sa.Column(sa.String(36)) cluster_name = sa.Column(sa.String(255), nullable=True) host = sa.Column(sa.String(255)) # , sa.ForeignKey('hosts.id')) size = sa.Column(sa.Integer) availability_zone = sa.Column(sa.String(255)) # TODO(vish): foreign key? status = sa.Column(sa.String(255)) # TODO(vish): enum? attach_status = sa.Column(sa.String(255)) # TODO(vish): enum migration_status = sa.Column(sa.String(255)) scheduled_at = sa.Column(sa.DateTime) launched_at = sa.Column(sa.DateTime) terminated_at = sa.Column(sa.DateTime) display_name = sa.Column(sa.String(255)) display_description = sa.Column(sa.String(255)) provider_location = sa.Column(sa.String(256)) provider_auth = sa.Column(sa.String(256)) provider_geometry = sa.Column(sa.String(255)) provider_id = sa.Column(sa.String(255)) volume_type_id = sa.Column(sa.String(36), nullable=False) source_volid = sa.Column(sa.String(36)) encryption_key_id = sa.Column(sa.String(36)) consistencygroup_id = sa.Column( sa.String(36), sa.ForeignKey('consistencygroups.id'), index=True, ) group_id = sa.Column( 'group_id', sa.String(36), sa.ForeignKey('groups.id'), index=True, ) bootable = sa.Column(sa.Boolean, default=False) multiattach = sa.Column(sa.Boolean, default=False) replication_status = sa.Column(sa.String(255)) replication_extended_status = sa.Column(sa.String(255)) replication_driver_data = sa.Column(sa.String(255)) previous_status = sa.Column(sa.String(255)) consistencygroup = relationship( ConsistencyGroup, backref="volumes", foreign_keys=consistencygroup_id, primaryjoin='Volume.consistencygroup_id == ConsistencyGroup.id', ) group = relationship( Group, backref="volumes", foreign_keys=group_id, primaryjoin='Volume.group_id == Group.id', ) service_uuid = sa.Column( sa.String(36), sa.ForeignKey('services.uuid'), nullable=True, ) service = relationship( Service, backref="volumes", foreign_keys=service_uuid, primaryjoin='Volume.service_uuid == Service.uuid', ) # True => Do locking when iSCSI initiator doesn't support manual scan # False => Never do locking # None => Forced locking regardless of the iSCSI initiator # make an FK of service? shared_targets = sa.Column(sa.Boolean, nullable=True, default=True) class VolumeMetadata(BASE, CinderBase): """Represents a metadata key/value pair for a volume.""" __tablename__ = 'volume_metadata' id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255)) value = sa.Column(sa.String(255)) volume_id = sa.Column( sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True ) volume = relationship( Volume, backref="volume_metadata", foreign_keys=volume_id, primaryjoin='and_(' 'VolumeMetadata.volume_id == Volume.id,' 'VolumeMetadata.deleted == False)', ) class VolumeAdminMetadata(BASE, CinderBase): """Represents an administrator metadata key/value pair for a volume.""" __tablename__ = 'volume_admin_metadata' id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255)) value = sa.Column(sa.String(255)) volume_id = sa.Column( sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True ) volume = relationship( Volume, backref="volume_admin_metadata", foreign_keys=volume_id, primaryjoin='and_(' 'VolumeAdminMetadata.volume_id == Volume.id,' 'VolumeAdminMetadata.deleted == False)', ) class VolumeAttachment(BASE, CinderBase): """Represents a volume attachment for a vm.""" __tablename__ = 'volume_attachment' id = sa.Column(sa.String(36), primary_key=True) volume_id = sa.Column( sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True ) volume = relationship( Volume, backref="volume_attachment", foreign_keys=volume_id, primaryjoin='and_(' 'VolumeAttachment.volume_id == Volume.id,' 'VolumeAttachment.deleted == False)', ) instance_uuid = sa.Column(sa.String(36)) attached_host = sa.Column(sa.String(255)) mountpoint = sa.Column(sa.String(255)) attach_time = sa.Column(sa.DateTime) detach_time = sa.Column(sa.DateTime) attach_status = sa.Column(sa.String(255)) attach_mode = sa.Column(sa.String(36)) connection_info = sa.Column(sa.Text) # Stores a serialized json dict of host connector information from brick. connector = sa.Column(sa.Text) @staticmethod def delete_values(): now = timeutils.utcnow() return {'deleted': True, 'deleted_at': now, 'attach_status': 'detached', 'detach_time': now} class VolumeType(BASE, CinderBase): """Represent possible volume_types of volumes offered.""" __tablename__ = "volume_types" id = sa.Column(sa.String(36), primary_key=True) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) # A reference to qos_specs entity qos_specs_id = sa.Column( sa.String(36), sa.ForeignKey('quality_of_service_specs.id'), index=True ) is_public = sa.Column(sa.Boolean, default=True) volumes = relationship( Volume, backref=backref('volume_type', uselist=False), foreign_keys=id, primaryjoin='and_(' 'Volume.volume_type_id == VolumeType.id, ' 'VolumeType.deleted == False)', ) class GroupType(BASE, CinderBase): """Represent possible group_types of groups offered.""" __tablename__ = "group_types" id = sa.Column(sa.String(36), primary_key=True) name = sa.Column(sa.String(255), nullable=False) description = sa.Column(sa.String(255)) is_public = sa.Column(sa.Boolean, default=True) groups = relationship( Group, backref=backref('group_type', uselist=False), foreign_keys=id, primaryjoin='and_(' 'Group.group_type_id == GroupType.id, ' 'GroupType.deleted == False)', ) class GroupVolumeTypeMapping(BASE, CinderBase): """Represent mapping between groups and volume_types.""" __tablename__ = "group_volume_type_mapping" id = sa.Column(sa.Integer, primary_key=True, nullable=False) volume_type_id = sa.Column( sa.String(36), sa.ForeignKey('volume_types.id'), nullable=False, index=True, ) group_id = sa.Column( sa.String(36), sa.ForeignKey('groups.id'), nullable=False, index=True ) group = relationship( Group, backref="volume_types", foreign_keys=group_id, primaryjoin='and_(' 'GroupVolumeTypeMapping.group_id == Group.id,' 'GroupVolumeTypeMapping.deleted == False)', ) class VolumeTypeProjects(BASE, CinderBase): """Represent projects associated volume_types.""" __tablename__ = "volume_type_projects" __table_args__ = ( schema.UniqueConstraint( "volume_type_id", "project_id", "deleted", ), CinderBase.__table_args__, ) id = sa.Column(sa.Integer, primary_key=True) # TODO(stephenfin): Add nullable=False volume_type_id = sa.Column( sa.String(36), sa.ForeignKey('volume_types.id'), ) project_id = sa.Column(sa.String(255)) deleted = sa.Column(sa.Integer, default=0) volume_type = relationship( VolumeType, backref="projects", foreign_keys=volume_type_id, primaryjoin='and_(' 'VolumeTypeProjects.volume_type_id == VolumeType.id,' 'VolumeTypeProjects.deleted == 0)', ) class GroupTypeProjects(BASE, CinderBase): """Represent projects associated group_types.""" __tablename__ = "group_type_projects" __table_args__ = ( sa.UniqueConstraint('group_type_id', 'project_id', 'deleted'), CinderBase.__table_args__, ) id = sa.Column(sa.Integer, primary_key=True) # TODO(stephenfin): Add nullable=False group_type_id = sa.Column( sa.String(36), sa.ForeignKey('group_types.id'), ) project_id = sa.Column(sa.String(255)) group_type = relationship( GroupType, backref="projects", foreign_keys=group_type_id, primaryjoin='and_(' 'GroupTypeProjects.group_type_id == GroupType.id,' 'GroupTypeProjects.deleted == False)', ) class VolumeTypeExtraSpecs(BASE, CinderBase): """Represents additional specs as key/value pairs for a volume_type.""" __tablename__ = 'volume_type_extra_specs' id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255)) value = sa.Column(sa.String(255)) volume_type_id = sa.Column( sa.String(36), sa.ForeignKey( 'volume_types.id', name='volume_type_extra_specs_ibfk_1', ), nullable=False, index=True, ) volume_type = relationship( VolumeType, backref="extra_specs", foreign_keys=volume_type_id, primaryjoin='and_(' 'VolumeTypeExtraSpecs.volume_type_id == VolumeType.id,' 'VolumeTypeExtraSpecs.deleted == False)', ) class GroupTypeSpecs(BASE, CinderBase): """Represents additional specs as key/value pairs for a group_type.""" __tablename__ = 'group_type_specs' id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255)) value = sa.Column(sa.String(255)) group_type_id = sa.Column( sa.String(36), sa.ForeignKey('group_types.id'), nullable=False, index=True, ) group_type = relationship( GroupType, backref="group_specs", foreign_keys=group_type_id, primaryjoin='and_(' 'GroupTypeSpecs.group_type_id == GroupType.id,' 'GroupTypeSpecs.deleted == False)', ) class DefaultVolumeTypes(BASE, CinderBase): """Represent projects associated volume_types.""" __tablename__ = "default_volume_types" volume_type_id = sa.Column( sa.String(36), sa.ForeignKey('volume_types.id'), index=True, ) project_id = sa.Column(sa.String(255), primary_key=True) volume_type = relationship( VolumeType, foreign_keys=volume_type_id, primaryjoin='DefaultVolumeTypes.volume_type_id == VolumeType.id', ) class QualityOfServiceSpecs(BASE, CinderBase): """Represents QoS specs as key/value pairs. QoS specs is standalone entity that can be associated/disassociated with volume types (one to many relation). Adjacency list relationship pattern is used in this model in order to represent following hierarchical data with in flat table, e.g, following structure: .. code-block:: none qos-specs-1 'Rate-Limit' | +------> consumer = 'front-end' +------> total_bytes_sec = 1048576 +------> total_iops_sec = 500 qos-specs-2 'QoS_Level1' | +------> consumer = 'back-end' +------> max-iops = 1000 +------> min-iops = 200 is represented by: id specs_id key value ------ -------- ------------- ----- UUID-1 NULL QoSSpec_Name Rate-Limit UUID-2 UUID-1 consumer front-end UUID-3 UUID-1 total_bytes_sec 1048576 UUID-4 UUID-1 total_iops_sec 500 UUID-5 NULL QoSSpec_Name QoS_Level1 UUID-6 UUID-5 consumer back-end UUID-7 UUID-5 max-iops 1000 UUID-8 UUID-5 min-iops 200 """ __tablename__ = 'quality_of_service_specs' id = sa.Column(sa.String(36), primary_key=True) specs_id = sa.Column(sa.String(36), sa.ForeignKey(id), index=True) key = sa.Column(sa.String(255)) value = sa.Column(sa.String(255)) specs = relationship( "QualityOfServiceSpecs", cascade="all, delete-orphan", backref=backref("qos_spec", remote_side=id), ) vol_types = relationship( VolumeType, backref=backref('qos_specs'), foreign_keys=id, primaryjoin='and_(' 'or_(VolumeType.qos_specs_id == ' 'QualityOfServiceSpecs.id,' 'VolumeType.qos_specs_id == ' 'QualityOfServiceSpecs.specs_id),' 'QualityOfServiceSpecs.deleted == False)', ) class VolumeGlanceMetadata(BASE, CinderBase): """Glance metadata for a bootable volume.""" __tablename__ = 'volume_glance_metadata' id = sa.Column(sa.Integer, primary_key=True, nullable=False) volume_id = sa.Column( sa.String(36), sa.ForeignKey('volumes.id'), index=True ) snapshot_id = sa.Column( sa.String(36), sa.ForeignKey('snapshots.id'), index=True ) key = sa.Column(sa.String(255)) value = sa.Column(sa.Text) volume = relationship( Volume, backref="volume_glance_metadata", foreign_keys=volume_id, primaryjoin='and_(' 'VolumeGlanceMetadata.volume_id == Volume.id,' 'VolumeGlanceMetadata.deleted == False)', ) class Quota(BASE, CinderBase): """Represents a single quota override for a project. If there is no row for a given project id and resource, then the default for the quota class is used. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quotas' id = sa.Column(sa.Integer, primary_key=True) # TODO(stephenfin): Add index=True project_id = sa.Column(sa.String(255)) resource = sa.Column(sa.String(300), nullable=False) hard_limit = sa.Column(sa.Integer, nullable=True) class QuotaClass(BASE, CinderBase): """Represents a single quota override for a quota class. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quota_classes' id = sa.Column(sa.Integer, primary_key=True) class_name = sa.Column(sa.String(255), index=True) resource = sa.Column(sa.String(300)) hard_limit = sa.Column(sa.Integer, nullable=True) class QuotaUsage(BASE, CinderBase): """Represents the current usage for a given resource.""" __tablename__ = 'quota_usages' # NOTE: project_id and resource are not enough as unique constraint since # we do soft deletes and there could be duplicated entries, so we add the # race_preventer field. __table_args__ = ( sa.Index('quota_usage_project_resource_idx', 'project_id', 'resource'), sa.UniqueConstraint('project_id', 'resource', 'race_preventer'), CinderBase.__table_args__, ) id = sa.Column(sa.Integer, primary_key=True) project_id = sa.Column(sa.String(255), index=True) # TODO(stephenfin): Add index=True resource = sa.Column(sa.String(300)) in_use = sa.Column(sa.Integer, nullable=False) reserved = sa.Column(sa.Integer, nullable=False) @property def total(self): return self.in_use + self.reserved until_refresh = sa.Column(sa.Integer, nullable=True) # To prevent races during creation on quota_reserve method race_preventer = sa.Column(sa.Boolean, nullable=True, default=True) @staticmethod def delete_values(): res = CinderBase.delete_values() res['race_preventer'] = None return res class Reservation(BASE, CinderBase): """Represents a resource reservation for quotas.""" __tablename__ = 'reservations' __table_args__ = ( sa.Index('reservations_deleted_expire_idx', 'deleted', 'expire'), sa.Index('reservations_deleted_uuid_idx', 'deleted', 'uuid'), CinderBase.__table_args__, ) id = sa.Column(sa.Integer, primary_key=True) uuid = sa.Column(sa.String(36), nullable=False) usage_id = sa.Column( sa.Integer, sa.ForeignKey('quota_usages.id'), nullable=True, index=True ) project_id = sa.Column(sa.String(255), index=True) resource = sa.Column(sa.String(300)) delta = sa.Column(sa.Integer, nullable=False) # TODO(stephenfin): Add nullable=False expire = sa.Column(sa.DateTime) usage = relationship( "QuotaUsage", foreign_keys=usage_id, primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' 'QuotaUsage.deleted == False)', ) class Snapshot(BASE, CinderBase): """Represents a snapshot of volume.""" __tablename__ = 'snapshots' __table_args__ = ( # Speed up normal listings sa.Index('snapshots_deleted_project_id_idx', 'deleted', 'project_id'), CinderBase.__table_args__, ) id = sa.Column(sa.String(36), primary_key=True) use_quota = Column( sa.Boolean, nullable=False, default=True, server_default=sa.true(), doc='Ignore volume in quota usage', ) @property def name(self): return CONF.snapshot_name_template % self.id @property def volume_name(self): return self.volume.name # pylint: disable=E1101 user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) volume_id = sa.Column( sa.String(36), sa.ForeignKey('volumes.id', name='snapshots_volume_id_fkey'), nullable=False, index=True, ) cgsnapshot_id = sa.Column( sa.String(36), sa.ForeignKey('cgsnapshots.id'), index=True, ) group_snapshot_id = sa.Column( sa.String(36), sa.ForeignKey('group_snapshots.id'), index=True, ) status = sa.Column(sa.String(255)) progress = sa.Column(sa.String(255)) volume_size = sa.Column(sa.Integer) scheduled_at = sa.Column(sa.DateTime) display_name = sa.Column(sa.String(255)) display_description = sa.Column(sa.String(255)) encryption_key_id = sa.Column(sa.String(36)) volume_type_id = sa.Column(sa.String(36), nullable=False) provider_location = sa.Column(sa.String(255)) provider_id = sa.Column(sa.String(255)) provider_auth = sa.Column(sa.String(255)) volume = relationship( Volume, backref="snapshots", foreign_keys=volume_id, primaryjoin='Snapshot.volume_id == Volume.id', ) cgsnapshot = relationship( CGSnapshot, backref="snapshots", foreign_keys=cgsnapshot_id, primaryjoin='Snapshot.cgsnapshot_id == CGSnapshot.id', ) group_snapshot = relationship( GroupSnapshot, backref="snapshots", foreign_keys=group_snapshot_id, primaryjoin='Snapshot.group_snapshot_id == GroupSnapshot.id', ) class SnapshotMetadata(BASE, CinderBase): """Represents a metadata key/value pair for a snapshot.""" __tablename__ = 'snapshot_metadata' id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255)) value = sa.Column(sa.String(255)) snapshot_id = sa.Column( sa.String(36), sa.ForeignKey('snapshots.id'), nullable=False, index=True, ) snapshot = relationship( Snapshot, backref="snapshot_metadata", foreign_keys=snapshot_id, primaryjoin='and_(' 'SnapshotMetadata.snapshot_id == Snapshot.id,' 'SnapshotMetadata.deleted == False)', ) class Backup(BASE, CinderBase): """Represents a backup of a volume to Swift.""" __tablename__ = 'backups' __table_args__ = ( # Speed up normal listings sa.Index('backups_deleted_project_id_idx', 'deleted', 'project_id'), CinderBase.__table_args__, ) id = sa.Column(sa.String(36), primary_key=True) # Backups don't have use_quota field since we don't have temporary backups @property def name(self): return CONF.backup_name_template % self.id # TODO(stephenfin): Add nullable=False user_id = sa.Column(sa.String(255)) project_id = sa.Column(sa.String(255)) volume_id = sa.Column(sa.String(36), nullable=False) host = sa.Column(sa.String(255)) availability_zone = sa.Column(sa.String(255)) display_name = sa.Column(sa.String(255)) display_description = sa.Column(sa.String(255)) container = sa.Column(sa.String(255)) parent_id = sa.Column(sa.String(36)) status = sa.Column(sa.String(255)) fail_reason = sa.Column(sa.String(255)) service_metadata = sa.Column(sa.String(255)) service = sa.Column(sa.String(255)) size = sa.Column(sa.Integer) object_count = sa.Column(sa.Integer) temp_volume_id = sa.Column(sa.String(36)) temp_snapshot_id = sa.Column(sa.String(36)) num_dependent_backups = sa.Column(sa.Integer) snapshot_id = sa.Column(sa.String(36)) data_timestamp = sa.Column(sa.DateTime) restore_volume_id = sa.Column(sa.String(36)) encryption_key_id = sa.Column(sa.String(36)) @validates('fail_reason') def validate_fail_reason(self, key, fail_reason): return fail_reason and fail_reason[:255] or '' class BackupMetadata(BASE, CinderBase): """Represents a metadata key/value pair for a backup.""" __tablename__ = 'backup_metadata' id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255)) value = sa.Column(sa.String(255)) backup_id = sa.Column( sa.String(36), sa.ForeignKey('backups.id'), nullable=False, index=True ) backup = relationship( Backup, backref="backup_metadata", foreign_keys=backup_id, primaryjoin='and_(' 'BackupMetadata.backup_id == Backup.id,' 'BackupMetadata.deleted == False)', ) class Encryption(BASE, CinderBase): """Represents encryption requirement for a volume type. Encryption here is a set of performance characteristics describing cipher, provider, and key_size for a certain volume type. """ __tablename__ = 'encryption' # NOTE (smcginnis): nullable=True triggers this to not set a default # value, but since it's a primary key the resulting schema will end up # still being NOT NULL. This is avoiding a case in MySQL where it will # otherwise set this to NOT NULL DEFAULT ''. May be harmless, but # inconsistent with previous schema. encryption_id = sa.Column( sa.String(36), primary_key=True, nullable=True, ) cipher = sa.Column(sa.String(255)) key_size = sa.Column(sa.Integer) provider = sa.Column(sa.String(255)) control_location = sa.Column(sa.String(255)) # NOTE(joel-coffman): The volume_type_id must be unique or else the # referenced volume type becomes ambiguous. That is, specifying the # volume type is not sufficient to identify a particular encryption # scheme unless each volume type is associated with at most one # encryption scheme. # TODO(stephenfin): Make this a foreign key volume_type_id = sa.Column(sa.String(36), nullable=False) volume_type = relationship( VolumeType, backref="encryption", foreign_keys=volume_type_id, primaryjoin='and_(' 'Encryption.volume_type_id == VolumeType.id,' 'Encryption.deleted == False)', ) class Transfer(BASE, CinderBase): """Represents a volume transfer request.""" __tablename__ = 'transfers' id = sa.Column(sa.String(36), primary_key=True) volume_id = sa.Column( sa.String(36), sa.ForeignKey('volumes.id'), nullable=False, index=True, ) display_name = sa.Column(sa.String(255)) salt = sa.Column(sa.String(255)) crypt_hash = sa.Column(sa.String(255)) expires_at = sa.Column(sa.DateTime) no_snapshots = sa.Column(sa.Boolean, default=False) source_project_id = sa.Column(sa.String(255), nullable=True) destination_project_id = sa.Column(sa.String(255), nullable=True) accepted = sa.Column(sa.Boolean, default=False) volume = relationship( Volume, backref="transfer", foreign_keys=volume_id, primaryjoin='and_(' 'Transfer.volume_id == Volume.id,' 'Transfer.deleted == False)', ) class DriverInitiatorData(BASE, models.TimestampMixin, models.ModelBase): """Represents private key-value pair specific an initiator for drivers""" __tablename__ = 'driver_initiator_data' __table_args__ = ( schema.UniqueConstraint("initiator", "namespace", "key"), CinderBase.__table_args__, ) id = sa.Column(sa.Integer, primary_key=True, nullable=False) initiator = sa.Column(sa.String(255), index=True, nullable=False) namespace = sa.Column(sa.String(255), nullable=False) key = sa.Column(sa.String(255), nullable=False) value = sa.Column(sa.String(255)) class Message(BASE, CinderBase): """Represents a message""" __tablename__ = 'messages' id = sa.Column(sa.String(36), primary_key=True, nullable=False) project_id = sa.Column(sa.String(255), nullable=False) # Info/Error/Warning. message_level = sa.Column(sa.String(255), nullable=False) request_id = sa.Column(sa.String(255), nullable=True) resource_type = sa.Column(sa.String(36)) # The UUID of the related resource. resource_uuid = sa.Column(sa.String(255), nullable=True) # Operation specific event ID. event_id = sa.Column(sa.String(255), nullable=False) # Message detail ID. detail_id = sa.Column(sa.String(10), nullable=True) # Operation specific action. action_id = sa.Column(sa.String(10), nullable=True) # After this time the message may no longer exist expires_at = sa.Column(sa.DateTime, nullable=True, index=True) class ImageVolumeCacheEntry(BASE, models.ModelBase): """Represents an image volume cache entry""" __tablename__ = 'image_volume_cache_entries' id = sa.Column(sa.Integer, primary_key=True, nullable=False) host = sa.Column(sa.String(255), index=True, nullable=False) cluster_name = sa.Column(sa.String(255), nullable=True) image_id = sa.Column(sa.String(36), index=True, nullable=False) # TODO(stephenfin): Add nullable=False image_updated_at = sa.Column(sa.DateTime) volume_id = sa.Column(sa.String(36), nullable=False) size = sa.Column(sa.Integer, nullable=False) last_used = sa.Column( sa.DateTime, nullable=False, default=lambda: timeutils.utcnow(), ) class Worker(BASE, CinderBase): """Represents all resources that are being worked on by a node.""" __tablename__ = 'workers' __table_args__ = ( schema.UniqueConstraint('resource_type', 'resource_id'), CinderBase.__table_args__, ) # We want to overwrite default updated_at definition so we timestamp at # creation as well updated_at = sa.Column( sa.DateTime, default=timeutils.utcnow, onupdate=timeutils.utcnow ) # Id added for convenience and speed on some operations id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) # Type of the resource we are working on (Volume, Snapshot, Backup) it must # match the Versioned Object class name. resource_type = sa.Column(sa.String(40), nullable=False) # UUID of the resource we are working on resource_id = sa.Column(sa.String(36), nullable=False) # Status that should be cleaned on service failure status = sa.Column(sa.String(255), nullable=False) # Service that is currently processing the operation service_id = sa.Column( sa.Integer, sa.ForeignKey('services.id'), nullable=True, index=True, ) # To prevent claiming and updating races race_preventer = sa.Column( sa.Integer, nullable=False, default=0, server_default=sa.text('0'), ) # This is a flag we don't need to store in the DB as it is only used when # we are doing the cleanup to let decorators know cleaning = False service = relationship( 'Service', backref="workers", foreign_keys=service_id, primaryjoin='Worker.service_id == Service.id', ) class AttachmentSpecs(BASE, CinderBase): """Represents attachment specs as k/v pairs for a volume_attachment. DO NOT USE - NOTHING SHOULD WRITE NEW DATA TO THIS TABLE The volume_attachment.connector column should be used instead. """ __tablename__ = 'attachment_specs' id = sa.Column(sa.Integer, primary_key=True) key = sa.Column(sa.String(255)) value = sa.Column(sa.String(255)) attachment_id = sa.Column( sa.String(36), sa.ForeignKey('volume_attachment.id'), nullable=False, index=True, ) volume_attachment = relationship( VolumeAttachment, backref="attachment_specs", foreign_keys=attachment_id, primaryjoin='and_(' 'AttachmentSpecs.attachment_id == VolumeAttachment.id,' 'AttachmentSpecs.deleted == False)', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/exception.py0000664000175000017500000007712400000000000016716 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cinder base exception handling. Includes decorator for re-raising Cinder-type exceptions. SHOULD include dedicated exception logging. """ from typing import Optional, Union from oslo_log import log as logging from oslo_versionedobjects import exception as obj_exc import webob.exc from webob.util import status_generic_reasons from webob.util import status_reasons from cinder.i18n import _ LOG = logging.getLogger(__name__) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code: int = 500, title: str = "", explanation: str = ""): self.code = code # There is a strict rule about constructing status line for HTTP: # '...Status-Line, consisting of the protocol version followed by a # numeric status code and its associated textual phrase, with each # element separated by SP characters' # (http://www.faqs.org/rfcs/rfc2616.html) # 'code' and 'title' can not be empty because they correspond # to numeric status code and its associated text if title: self.title = title else: try: self.title = status_reasons[self.code] except KeyError: generic_code = self.code // 100 self.title = status_generic_reasons[generic_code] self.explanation = explanation super(ConvertedException, self).__init__() class CinderException(Exception): """Base Cinder Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers: dict = {} safe = False def __init__(self, message: Optional[Union[str, tuple]] = None, **kwargs): self.kwargs = kwargs self.kwargs['message'] = message if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass for k, v in self.kwargs.items(): if isinstance(v, Exception): # NOTE(tommylikehu): If this is a cinder exception it will # return the msg object, so we won't be preventing # translations. self.kwargs[k] = str(v) if self._should_format(): try: message = self.message % kwargs except Exception: # NOTE(melwitt): This is done in a separate method so it can be # monkey-patched during testing to make it a hard failure. self._log_exception() message = self.message elif isinstance(message, Exception): # NOTE(tommylikehu): If this is a cinder exception it will # return the msg object, so we won't be preventing # translations. message = str(message) # NOTE(luisg): We put the actual message in 'msg' so that we can access # it, because if we try to access the message via 'message' it will be # overshadowed by the class' message attribute self.msg = message super(CinderException, self).__init__(message) # Oslo.messaging use the argument 'message' to rebuild exception # directly at the rpc client side, therefore we should not use it # in our keyword arguments, otherwise, the rebuild process will fail # with duplicate keyword exception. self.kwargs.pop('message', None) def _log_exception(self) -> None: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception('Exception in string format operation:') for name, value in self.kwargs.items(): LOG.error("%(name)s: %(value)s", {'name': name, 'value': value}) def _should_format(self) -> bool: return self.kwargs['message'] is None or '%(message)' in self.message class VolumeBackendAPIException(CinderException): message = _("Bad or unexpected response from the storage volume " "backend API: %(data)s") class VolumeDriverException(CinderException): message = _("Volume driver reported an error: %(message)s") class BackupDriverException(CinderException): message = _("Backup driver reported an error: %(reason)s") class BackupRestoreCancel(CinderException): message = _("Canceled backup %(back_id)s restore on volume %(vol_id)s") class GlanceConnectionFailed(CinderException): message = _("Connection to glance failed: %(reason)s") class ProgrammingError(CinderException): message = _('Programming error in Cinder: %(reason)s') class NotAuthorized(CinderException): message = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): message = _("User does not have admin privileges") class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class ImageNotAuthorized(CinderException): message = _("Not authorized for image %(image_id)s.") class DriverNotInitialized(CinderException): message = _("Volume driver not ready.") class Invalid(CinderException): message = _("Unacceptable parameters.") code = 400 class InvalidSnapshot(Invalid): message = _("Invalid snapshot: %(reason)s") class InvalidVolumeAttachMode(Invalid): message = _("Invalid attaching mode '%(mode)s' for " "volume %(volume_id)s.") class VolumeAttached(Invalid): message = _("Volume %(volume_id)s is still attached, detach volume first.") class InvalidResults(Invalid): message = _("The results are invalid.") class InvalidInput(Invalid): message = _("Invalid input received: %(reason)s") class InvalidAvailabilityZone(Invalid): message = _("Availability zone '%(az)s' is invalid.") class InvalidTypeAvailabilityZones(Invalid): message = _("Volume type is only supported in these availability zones: " "%(az)s") class InvalidVolumeType(Invalid): message = _("Invalid volume type: %(reason)s") class InvalidGroupType(Invalid): message = _("Invalid group type: %(reason)s") class InvalidVolume(Invalid): message = _("Invalid volume: %(reason)s") class ResourceConflict(Invalid): message = _("Resource conflict: %(reason)s") code = 409 class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") class InvalidHost(Invalid): message = _("Invalid host: %(reason)s") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = "%(err)s" class InvalidAuthKey(Invalid): message = _("Invalid auth key: %(reason)s") class InvalidConfigurationValue(Invalid): message = _('Value "%(value)s" is not valid for ' 'configuration option "%(option)s"') class ServiceUnavailable(Invalid): message = _("Service is unavailable at this time.") class UnavailableDuringUpgrade(Invalid): message = _('Cannot perform %(action)s during system upgrade.') class ImageUnacceptable(Invalid): message = _("Image %(image_id)s is unacceptable: %(reason)s") class ImageTooBig(Invalid): message = _("Image %(image_id)s size exceeded available " "disk space: %(reason)s") class DeviceUnavailable(Invalid): message = _("The device in the path %(path)s is unavailable: %(reason)s") class SnapshotUnavailable(VolumeBackendAPIException): message = _("The snapshot is unavailable: %(data)s") class InvalidUUID(Invalid): message = _("Expected a UUID but received %(uuid)s.") class InvalidAPIVersionString(Invalid): message = _("API Version String %(version)s is of invalid format. Must " "be of format MajorNum.MinorNum.") class VersionNotFoundForAPIMethod(Invalid): message = _("API version %(version)s is not supported on this method.") class InvalidGlobalAPIVersion(Invalid): message = _("Version %(req_ver)s is not supported by the API. Minimum " "is %(min_ver)s and maximum is %(max_ver)s.") class ValidationError(Invalid): message = "%(detail)s" class APIException(CinderException): message = _("Error while requesting %(service)s API.") def __init__(self, message=None, **kwargs): if 'service' not in kwargs: kwargs['service'] = 'unknown' super(APIException, self).__init__(message, **kwargs) class APITimeout(APIException): message = _("Timeout while requesting %(service)s API.") class RPCTimeout(CinderException): message = _("Timeout while requesting capabilities from backend " "%(service)s.") code = 502 class Duplicate(CinderException): pass class NotFound(CinderException): message = _("Resource could not be found.") code = 404 safe = True class GlanceStoreNotFound(NotFound): message = _("Store %(store_id)s not enabled in glance.") class GlanceStoreReadOnly(Invalid): message = _("Store %(store_id)s is read-only in glance.") class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") class MessageNotFound(NotFound): message = _("Message %(message_id)s could not be found.") class VolumeAttachmentNotFound(NotFound): message = _("Volume attachment could not be found with " "filter: %(filter)s.") class VolumeMetadataNotFound(NotFound): message = _("Volume %(volume_id)s has no metadata with " "key %(metadata_key)s.") class InvalidVolumeMetadata(Invalid): message = _("Invalid metadata: %(reason)s") class InvalidVolumeMetadataSize(Invalid): message = _("Invalid metadata size: %(reason)s") class SnapshotMetadataNotFound(NotFound): message = _("Snapshot %(snapshot_id)s has no metadata with " "key %(metadata_key)s.") class VolumeTypeNotFound(NotFound): message = _("Volume type %(volume_type_id)s could not be found.") class VolumeTypeNotFoundByName(VolumeTypeNotFound): message = _("Volume type with name %(volume_type_name)s " "could not be found.") class VolumeTypeAccessNotFound(NotFound): message = _("Volume type access not found for %(volume_type_id)s / " "%(project_id)s combination.") class VolumeTypeExtraSpecsNotFound(NotFound): message = _("Volume Type %(volume_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class VolumeTypeInUse(CinderException): message = _("Volume Type %(volume_type_id)s deletion is not allowed with " "volumes present with the type.") class VolumeTypeDeletionError(Invalid): message = _("The volume type %(volume_type_id)s is the only currently " "defined volume type and cannot be deleted.") class VolumeTypeDefaultDeletionError(Invalid): message = _("The volume type %(volume_type_id)s is a default volume " "type and cannot be deleted.") class VolumeTypeDefaultMisconfiguredError(CinderException): message = _("The request cannot be fulfilled as the default volume type " "%(volume_type_name)s cannot be found.") class VolumeTypeProjectDefaultNotFound(NotFound): message = _("Default type for project %(project_id)s not found.") class GroupTypeNotFound(NotFound): message = _("Group type %(group_type_id)s could not be found.") class GroupTypeNotFoundByName(GroupTypeNotFound): message = _("Group type with name %(group_type_name)s " "could not be found.") class GroupTypeAccessNotFound(NotFound): message = _("Group type access not found for %(group_type_id)s / " "%(project_id)s combination.") class GroupTypeSpecsNotFound(NotFound): message = _("Group Type %(group_type_id)s has no specs with " "key %(group_specs_key)s.") class GroupTypeInUse(CinderException): message = _("Group Type %(group_type_id)s deletion is not allowed with " "groups present with the type.") class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class ServerNotFound(NotFound): message = _("Instance %(uuid)s could not be found.") class VolumeSnapshotNotFound(NotFound): message = _("No snapshots found for volume %(volume_id)s.") class VolumeIsBusy(CinderException): message = _("deleting volume %(volume_name)s that has snapshot") class SnapshotIsBusy(CinderException): message = _("deleting snapshot %(snapshot_name)s that has " "dependent volumes") class InvalidImageRef(Invalid): message = _("Invalid image href %(image_href)s.") class InvalidSignatureImage(Invalid): message = _("Signature metadata is incomplete for image: " "%(image_id)s.") class ImageSignatureVerificationException(CinderException): message = _("Failed to verify image signature, reason: %(reason)s.") class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") class ServiceNotFound(NotFound): def __init__(self, message=None, **kwargs): if not message: if kwargs.get('host', None): self.message = _("Service %(service_id)s could not be " "found on host %(host)s.") else: self.message = _("Service %(service_id)s could not be found.") super(ServiceNotFound, self).__init__(message, **kwargs) class ServiceTooOld(Invalid): message = _("Service is too old to fulfil this request.") class WorkerNotFound(NotFound): message = _("Worker with %s could not be found.") def __init__(self, message=None, **kwargs): keys_list = ('{0}=%({0})s'.format(key) for key in kwargs) placeholder = ', '.join(keys_list) self.message = self.message % placeholder super(WorkerNotFound, self).__init__(message, **kwargs) class WorkerExists(Duplicate): message = _("Worker for %(type)s %(id)s already exists.") class CleanableInUse(Invalid): message = _('%(type)s with id %(id)s is already being cleaned up or ' 'another host has taken over it.') class ClusterNotFound(NotFound): message = _('Cluster %(id)s could not be found.') class ClusterHasHosts(Invalid): message = _("Cluster %(id)s still has hosts.") class ClusterExists(Duplicate): message = _("Cluster %(name)s already exists.") class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler Host Filter %(filter_name)s could not be found.") class SchedulerHostWeigherNotFound(NotFound): message = _("Scheduler Host Weigher %(weigher_name)s could not be found.") class InvalidReservationExpiration(Invalid): message = _("Invalid reservation expiration %(expire)s.") class InvalidQuotaValue(Invalid): message = _("Change would make usage less than 0 for the following " "resources: %(unders)s") class QuotaNotFound(NotFound): message = _("Quota could not be found") class QuotaResourceUnknown(QuotaNotFound): message = _("Unknown quota resources %(unknown)s.") class ProjectQuotaNotFound(QuotaNotFound): message = _("Quota for project %(project_id)s could not be found.") class QuotaClassNotFound(QuotaNotFound): message = _("Quota class %(class_name)s could not be found.") class QuotaUsageNotFound(QuotaNotFound): message = _("Quota usage for project %(project_id)s could not be found.") class OverQuota(CinderException): message = _("Quota exceeded for resources: %(overs)s") class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") class VolumeTypeExists(Duplicate): message = _("Volume Type %(id)s already exists.") class VolumeTypeAccessExists(Duplicate): message = _("Volume type access for %(volume_type_id)s / " "%(project_id)s combination already exists.") class VolumeTypeEncryptionExists(Invalid): message = _("Volume type encryption for type %(type_id)s already exists.") class VolumeTypeEncryptionNotFound(NotFound): message = _("Volume type encryption for type %(type_id)s does not exist.") class GroupTypeExists(Duplicate): message = _("Group Type %(id)s already exists.") class GroupTypeAccessExists(Duplicate): message = _("Group type access for %(group_type_id)s / " "%(project_id)s combination already exists.") class GroupVolumeTypeMappingExists(Duplicate): message = _("Group volume type mapping for %(group_id)s / " "%(volume_type_id)s combination already exists.") class MalformedRequestBody(CinderException): message = _("Malformed message body: %(reason)s") class ConfigNotFound(NotFound): message = _("Could not find config at %(path)s") class ParameterNotFound(NotFound): message = _("Could not find parameter %(param)s") class NoValidBackend(CinderException): message = _("No valid backend was found. %(reason)s") class QuotaError(CinderException): message = _("Quota exceeded: code=%(code)s") code = 413 headers = {'Retry-After': '0'} safe = True class VolumeSizeExceedsAvailableQuota(QuotaError): message = _("Requested volume or snapshot exceeds allowed %(name)s " "quota. Requested %(requested)sG, quota is %(quota)sG and " "%(consumed)sG has been consumed.") def __init__(self, message=None, **kwargs): kwargs.setdefault('name', 'gigabytes') super(VolumeSizeExceedsAvailableQuota, self).__init__( message, **kwargs) class VolumeSizeExceedsLimit(QuotaError): message = _("Requested volume size %(size)dG is larger than " "maximum allowed limit %(limit)dG.") class VolumeBackupSizeExceedsAvailableQuota(QuotaError): message = _("Requested backup exceeds allowed Backup gigabytes " "quota. Requested %(requested)sG, quota is %(quota)sG and " "%(consumed)sG has been consumed.") class VolumeLimitExceeded(QuotaError): message = _("Maximum number of volumes allowed (%(allowed)d) exceeded for " "quota '%(name)s'.") def __init__(self, message=None, **kwargs): kwargs.setdefault('name', 'volumes') super(VolumeLimitExceeded, self).__init__(message, **kwargs) class SnapshotLimitExceeded(QuotaError): message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded") class UnexpectedOverQuota(QuotaError): message = _("Unexpected over quota on %(name)s.") class BackupLimitExceeded(QuotaError): message = _("Maximum number of backups allowed (%(allowed)d) exceeded") class ImageLimitExceeded(QuotaError): message = _("Image quota exceeded") class VolumeTypeCreateFailed(CinderException): message = _("Cannot create volume_type with " "name %(name)s and specs %(extra_specs)s") class VolumeTypeUpdateFailed(CinderException): message = _("Cannot update volume_type %(id)s") class GroupTypeCreateFailed(CinderException): message = _("Cannot create group_type with " "name %(name)s and specs %(group_specs)s") class GroupTypeUpdateFailed(CinderException): message = _("Cannot update group_type %(id)s") class GroupLimitExceeded(QuotaError): message = _("Maximum number of groups allowed (%(allowed)d) exceeded") class UnknownCmd(VolumeDriverException): message = _("Unknown or unsupported command %(cmd)s") class MalformedResponse(VolumeDriverException): message = _("Malformed response to command %(cmd)s: %(reason)s") class FailedCmdWithDump(VolumeDriverException): message = _("Operation failed with status=%(status)s. Full dump: %(data)s") class InvalidConnectorException(VolumeDriverException): message = _("Connector doesn't have required information: %(missing)s") class GlanceMetadataExists(Invalid): message = _("Glance metadata cannot be updated, key %(key)s" " exists for volume id %(volume_id)s") class GlanceMetadataNotFound(NotFound): message = _("Glance metadata for volume/snapshot %(id)s cannot be found.") class ImageDownloadFailed(CinderException): message = _("Failed to download image %(image_href)s, reason: %(reason)s") class ExportFailure(Invalid): message = _("Failed to export for volume: %(reason)s") class RemoveExportException(VolumeDriverException): message = _("Failed to remove export for volume %(volume)s: %(reason)s") class MetadataUpdateFailure(Invalid): message = _("Failed to update metadata for volume: %(reason)s") class MetadataCopyFailure(Invalid): message = _("Failed to copy metadata to volume: %(reason)s") class InvalidMetadataType(Invalid): message = _("The type of metadata: %(metadata_type)s for volume/snapshot " "%(id)s is invalid.") class ImageCopyFailure(Invalid): message = _("Failed to copy image to volume: %(reason)s") class BackupInvalidCephArgs(BackupDriverException): message = _("Invalid Ceph args provided for backup rbd operation") class BackupOperationError(Invalid): message = _("An error has occurred during backup operation") class BackupMetadataUnsupportedVersion(BackupDriverException): message = _("Unsupported backup metadata version requested") class BackupMetadataNotFound(NotFound): message = _("Backup %(backup_id)s has no metadata with " "key %(metadata_key)s.") class VolumeMetadataBackupExists(BackupDriverException): message = _("Metadata backup already exists for this volume") class BackupRBDOperationFailed(BackupDriverException): message = _("Backup RBD operation failed") class EncryptedBackupOperationFailed(BackupDriverException): message = _("Backup operation of an encrypted volume failed.") class BackupNotFound(NotFound): message = _("Backup %(backup_id)s could not be found.") class InvalidBackup(Invalid): message = _("Invalid backup: %(reason)s") class SwiftConnectionFailed(BackupDriverException): message = _("Connection to swift failed: %(reason)s") class TransferNotFound(NotFound): message = _("Transfer %(transfer_id)s could not be found.") class VolumeMigrationFailed(CinderException): message = _("Volume migration failed: %(reason)s") class SSHInjectionThreat(CinderException): message = _("SSH command injection detected: %(command)s") class QoSSpecsExists(Duplicate): message = _("QoS Specs %(specs_id)s already exists.") class QoSSpecsCreateFailed(CinderException): message = _("Failed to create qos_specs: " "%(name)s with specs %(qos_specs)s.") class QoSSpecsUpdateFailed(CinderException): message = _("Failed to update qos_specs: " "%(specs_id)s with specs %(qos_specs)s.") class QoSSpecsNotFound(NotFound): message = _("No such QoS spec %(specs_id)s.") class QoSSpecsAssociateFailed(CinderException): message = _("Failed to associate qos_specs: " "%(specs_id)s with type %(type_id)s.") class QoSSpecsDisassociateFailed(CinderException): message = _("Failed to disassociate qos_specs: " "%(specs_id)s with type %(type_id)s.") class QoSSpecsKeyNotFound(NotFound): message = _("QoS spec %(specs_id)s has no spec with " "key %(specs_key)s.") class InvalidQoSSpecs(Invalid): message = _("Invalid qos specs: %(reason)s") class QoSSpecsInUse(CinderException): message = _("QoS Specs %(specs_id)s is still associated with entities.") class KeyManagerError(CinderException): message = _("key manager error: %(reason)s") class ManageExistingInvalidReference(CinderException): message = _("Manage existing volume failed due to invalid backend " "reference %(existing_ref)s: %(reason)s") class ManageExistingAlreadyManaged(CinderException): message = _("Unable to manage existing volume. " "Volume %(volume_ref)s already managed.") class InvalidReplicationTarget(Invalid): message = _("Invalid Replication Target: %(reason)s") class UnableToFailOver(CinderException): message = _("Unable to failover to replication target: %(reason)s).") class ReplicationError(CinderException): message = _("Volume %(volume_id)s replication " "error: %(reason)s") class ReplicationGroupError(CinderException): message = _("Group %(group_id)s replication " "error: %(reason)s.") class ManageExistingVolumeTypeMismatch(CinderException): message = _("Manage existing volume failed due to volume type mismatch: " "%(reason)s") class ExtendVolumeError(CinderException): message = _("Error extending volume: %(reason)s") class EvaluatorParseException(Exception): message = _("Error during evaluator parsing: %(reason)s") class LockCreationFailed(CinderException): message = _('Unable to create lock. Coordination backend not started.') OrphanedObjectError = obj_exc.OrphanedObjectError ObjectActionError = obj_exc.ObjectActionError class CappedVersionUnknown(CinderException): message = _("Unrecoverable Error: Versioned Objects in DB are capped to " "unknown version %(version)s. Most likely your environment " "contains only new services and you're trying to start an " "older one. Use `cinder-manage service list` to check that " "and upgrade this service.") class VolumeGroupNotFound(CinderException): message = _('Unable to find Volume Group: %(vg_name)s') class VolumeGroupCreationFailed(CinderException): message = _('Failed to create Volume Group: %(vg_name)s') class VolumeNotDeactivated(CinderException): message = _('Volume %(name)s was not deactivated in time.') class VolumeDeviceNotFound(CinderException): message = _('Volume device not found at %(device)s.') # RemoteFS drivers class RemoteFSException(VolumeDriverException): message = _("Unknown RemoteFS exception") class RemoteFSConcurrentRequest(RemoteFSException): message = _("A concurrent, possibly contradictory, request " "has been made.") class RemoteFSNoSharesMounted(RemoteFSException): message = _("No mounted shares found") class RemoteFSNoSuitableShareFound(RemoteFSException): message = _("There is no share which can host %(volume_size)sG") class RemoteFSInvalidBackingFile(VolumeDriverException): message = _("File %(path)s has invalid backing file %(backing_file)s.") # NFS driver class NfsException(RemoteFSException): message = _("Unknown NFS exception") class NfsNoSharesMounted(RemoteFSNoSharesMounted): message = _("No mounted NFS shares found") class NfsNoSuitableShareFound(RemoteFSNoSuitableShareFound): message = _("There is no share which can host %(volume_size)sG") # Fibre Channel Zone Manager class ZoneManagerException(CinderException): message = _("Fibre Channel connection control failure: %(reason)s") class FCZoneDriverException(CinderException): message = _("Fibre Channel Zone operation failed: %(reason)s") class FCSanLookupServiceException(CinderException): message = _("Fibre Channel SAN Lookup failure: %(reason)s") class ZoneManagerNotInitialized(CinderException): message = _("Fibre Channel Zone Manager not initialized") # ConsistencyGroup class ConsistencyGroupNotFound(NotFound): message = _("ConsistencyGroup %(consistencygroup_id)s could not be found.") class InvalidConsistencyGroup(Invalid): message = _("Invalid ConsistencyGroup: %(reason)s") # Group class GroupNotFound(NotFound): message = _("Group %(group_id)s could not be found.") class InvalidGroup(Invalid): message = _("Invalid Group: %(reason)s") class InvalidGroupStatus(Invalid): message = _("Invalid Group Status: %(reason)s") # CgSnapshot class CgSnapshotNotFound(NotFound): message = _("CgSnapshot %(cgsnapshot_id)s could not be found.") class InvalidCgSnapshot(Invalid): message = _("Invalid CgSnapshot: %(reason)s") # GroupSnapshot class GroupSnapshotNotFound(NotFound): message = _("GroupSnapshot %(group_snapshot_id)s could not be found.") class InvalidGroupSnapshot(Invalid): message = _("Invalid GroupSnapshot: %(reason)s") class InvalidGroupSnapshotStatus(Invalid): message = _("Invalid GroupSnapshot Status: %(reason)s") # Target drivers class ISCSITargetCreateFailed(CinderException): message = _("Failed to create iscsi target for volume %(volume_id)s.") class ISCSITargetRemoveFailed(CinderException): message = _("Failed to remove iscsi target for volume %(volume_id)s.") class ISCSITargetAttachFailed(CinderException): message = _("Failed to attach iSCSI target for volume %(volume_id)s.") class ISCSITargetDetachFailed(CinderException): message = _("Failed to detach iSCSI target for volume %(volume_id)s.") class TargetUpdateFailed(CinderException): message = _("Failed to update target for volume %(volume_id)s.") class ISCSITargetHelperCommandFailed(CinderException): message = "%(error_message)s" class BadHTTPResponseStatus(VolumeDriverException): message = _("Bad HTTP response status %(status)s") class BadResetResourceStatus(CinderException): message = _("Bad reset resource status : %(reason)s") class MetadataAbsent(CinderException): message = _("There is no metadata in DB object.") class NotSupportedOperation(Invalid): message = _("Operation not supported: %(operation)s.") code = 405 class AttachmentSpecsNotFound(NotFound): message = _("Attachment %(attachment_id)s has no " "key %(specs_key)s.") class InvalidName(Invalid): message = _("An invalid 'name' value was provided. %(reason)s") class ServiceUserTokenNoAuth(CinderException): message = _("The [service_user] send_service_user_token option was " "requested, but no service auth could be loaded. Please check " "the [service_user] configuration section.") class RekeyNotSupported(CinderException): message = _("Rekey not supported.") class ImageCompressionNotAllowed(CinderException): message = _("Image compression upload disallowed, but container_format " "is compressed") class ImageConversionNotAllowed(CinderException): message = _("Image Conversion disallowed for image %(image_id)s: " "%(reason)s") class CinderAcceleratorError(CinderException): message = _("Cinder accelerator %(accelerator)s encountered an error " "while compressing/decompressing image.\n" "Command %(cmd)s execution failed.\n" "%(description)s\n" "Reason: %(reason)s") class SnapshotLimitReached(CinderException): message = _("Exceeded the configured limit of " "%(set_limit)s snapshots per volume.") class DriverInitiatorDataExists(Duplicate): message = _( "Driver initiator data for initiator '%(initiator)s' and backend " "'%(namespace)s' with key '%(key)s' already exists." ) class RequirementMissing(CinderException): message = _('Requirement %(req)s is not installed.') class ConflictNovaUsingAttachment(CinderException): message = _("Detach volume from instance %(instance_id)s using the " "Compute API") code = 409 safe = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/flow_utils.py0000664000175000017500000000640400000000000017100 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from typing import Any, Optional from oslo_log import log as logging # For more information please visit: https://wiki.openstack.org/wiki/TaskFlow from taskflow import formatters from taskflow.listeners import base from taskflow.listeners import logging as logging_listener from taskflow import task from cinder import exception LOG = logging.getLogger(__name__) def _make_task_name(cls, addons: Optional[list[str]] = None) -> str: """Makes a pretty name for a task class.""" base_name = ".".join([cls.__module__, cls.__name__]) extra = '' if addons: extra = ';%s' % (", ".join([str(a) for a in addons])) return base_name + extra class CinderTask(task.Task): """The root task class for all cinder tasks. It automatically names the given task using the module and class that implement the given task as the task name. """ def __init__(self, addons: Optional[list[str]] = None, **kwargs: Any) -> None: super(CinderTask, self).__init__(self.make_name(addons), **kwargs) @classmethod def make_name(cls, addons: Optional[list[str]] = None) -> str: return _make_task_name(cls, addons) class SpecialFormatter(formatters.FailureFormatter): #: Exception is an excepted case, don't include traceback in log if fails. _NO_TRACE_EXCEPTIONS = (exception.InvalidInput, exception.QuotaError) def __init__(self, engine): super(SpecialFormatter, self).__init__(engine) def format(self, fail, atom_matcher): if fail.check(*self._NO_TRACE_EXCEPTIONS) is not None: exc_info = None exc_details = '%s%s' % (os.linesep, fail.pformat(traceback=False)) return (exc_info, exc_details) else: return super(SpecialFormatter, self).format(fail, atom_matcher) class DynamicLogListener(logging_listener.DynamicLoggingListener): """This is used to attach to taskflow engines while they are running. It provides a bunch of useful features that expose the actions happening inside a taskflow engine, which can be useful for developers for debugging, for operations folks for monitoring and tracking of the resource actions and more... """ def __init__(self, engine, task_listen_for=base.DEFAULT_LISTEN_FOR, flow_listen_for=base.DEFAULT_LISTEN_FOR, retry_listen_for=base.DEFAULT_LISTEN_FOR, logger=LOG): super(DynamicLogListener, self).__init__( engine, task_listen_for=task_listen_for, flow_listen_for=flow_listen_for, retry_listen_for=retry_listen_for, log=logger, fail_formatter=SpecialFormatter(engine)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.075118 cinder-27.0.0/cinder/group/0000775000175000017500000000000000000000000015467 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/group/__init__.py0000664000175000017500000000164100000000000017602 0ustar00zuulzuul00000000000000# Copyright (C) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from cinder.transfer import ' elsewhere. from oslo_utils import importutils from cinder.common import config CONF = config.CONF API = importutils.import_class( CONF.group_api_class) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/group/api.py0000664000175000017500000014612300000000000016621 0ustar00zuulzuul00000000000000# Copyright (C) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to groups. """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import uuidutils from cinder import db from cinder.db import base from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields as c_fields from cinder.policies import group_actions as gp_action_policy from cinder.policies import group_snapshot_actions as gsnap_action_policy from cinder.policies import group_snapshots as gsnap_policy from cinder.policies import groups as group_policy from cinder import quota from cinder import quota_utils from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.volume import api as volume_api from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import volume_types from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) GROUP_QUOTAS = quota.GROUP_QUOTAS VALID_REMOVE_VOL_FROM_GROUP_STATUS = ( 'available', 'in-use', 'error', 'error_deleting') VALID_ADD_VOL_TO_GROUP_STATUS = ( 'available', 'in-use') class API(base.Base): """API for interacting with the volume manager for groups.""" def __init__(self): self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI() self.volume_api = volume_api.API() super().__init__() def _extract_availability_zone(self, availability_zone): raw_zones = self.volume_api.list_availability_zones(enable_cache=True) availability_zones = set([az['name'] for az in raw_zones]) if CONF.storage_availability_zone: availability_zones.add(CONF.storage_availability_zone) if availability_zone is None: if CONF.default_availability_zone: availability_zone = CONF.default_availability_zone else: # For backwards compatibility use the storage_availability_zone availability_zone = CONF.storage_availability_zone if availability_zone not in availability_zones: if CONF.allow_availability_zone_fallback: original_az = availability_zone availability_zone = ( CONF.default_availability_zone or CONF.storage_availability_zone) LOG.warning("Availability zone '%(s_az)s' not found, falling " "back to '%(s_fallback_az)s'.", {'s_az': original_az, 's_fallback_az': availability_zone}) else: msg = _("Availability zone '%(s_az)s' is invalid.") msg = msg % {'s_az': availability_zone} raise exception.InvalidInput(reason=msg) return availability_zone def _update_volumes_host(self, context, group): volumes = objects.VolumeList.get_all_by_generic_group(context, group.id) for vol in volumes: # Update the host field for the volume. vol.host = group.host vol.cluster_name = group.cluster_name vol.save() def create(self, context, name, description, group_type, volume_types, availability_zone=None): context.authorize(group_policy.CREATE_POLICY) req_volume_types = [] # NOTE: Admin context is required to get extra_specs of volume_types. req_volume_types = (self.db.volume_types_get_by_name_or_id( context.elevated(), volume_types)) if not uuidutils.is_uuid_like(group_type): req_group_type = self.db.group_type_get_by_name(context, group_type) else: try: req_group_type = self.db.group_type_get(context, group_type) except exception.GroupTypeNotFound: # check again if we get this group type by uuid-like name try: req_group_type = self.db.group_type_get_by_name( context, group_type) except exception.GroupTypeNotFoundByName: raise exception.GroupTypeNotFound(group_type_id=group_type) availability_zone = self._extract_availability_zone(availability_zone) kwargs = {'user_id': context.user_id, 'project_id': context.project_id, 'availability_zone': availability_zone, 'status': c_fields.GroupStatus.CREATING, 'name': name, 'description': description, 'volume_type_ids': [t['id'] for t in req_volume_types], 'group_type_id': req_group_type['id'], 'replication_status': c_fields.ReplicationStatus.DISABLED} try: reservations = GROUP_QUOTAS.reserve(context, project_id=context.project_id, groups=1) except exception.OverQuota as e: quota_utils.process_reserve_over_quota(context, e, resource='groups') group = None try: group = objects.Group(context=context, **kwargs) group.create() except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error occurred when creating group" " %s.", name) GROUP_QUOTAS.rollback(context, reservations) request_spec_list = [] filter_properties_list = [] for req_volume_type in req_volume_types: request_spec = {'volume_type': req_volume_type.copy(), 'group_id': group.id} filter_properties = {} request_spec_list.append(request_spec) filter_properties_list.append(filter_properties) group_spec = {'group_type': req_group_type.copy(), 'group_id': group.id} group_filter_properties = {} # Update quota for groups GROUP_QUOTAS.commit(context, reservations) self._cast_create_group(context, group, group_spec, request_spec_list, group_filter_properties, filter_properties_list) return group def create_from_src(self, context, name, description=None, group_snapshot_id=None, source_group_id=None): context.authorize(group_policy.CREATE_POLICY) # Populate group_type_id and volume_type_ids group_type_id = None volume_type_ids = [] size = 0 if group_snapshot_id: grp_snap = self.get_group_snapshot(context, group_snapshot_id) group_type_id = grp_snap.group_type_id grp_snap_src_grp = self.get(context, grp_snap.group_id) volume_type_ids = [vt.id for vt in grp_snap_src_grp.volume_types] snapshots = objects.SnapshotList.get_all_for_group_snapshot( context, group_snapshot_id) size = sum(s.volume.size for s in snapshots) elif source_group_id: source_group = self.get(context, source_group_id) group_type_id = source_group.group_type_id volume_type_ids = [vt.id for vt in source_group.volume_types] source_vols = objects.VolumeList.get_all_by_generic_group( context, source_group.id) size = sum(v.size for v in source_vols) kwargs = { 'user_id': context.user_id, 'project_id': context.project_id, 'status': c_fields.GroupStatus.CREATING, 'name': name, 'description': description, 'group_snapshot_id': group_snapshot_id, 'source_group_id': source_group_id, 'group_type_id': group_type_id, 'volume_type_ids': volume_type_ids, 'replication_status': c_fields.ReplicationStatus.DISABLED } try: reservations = GROUP_QUOTAS.reserve(context, project_id=context.project_id, groups=1) except exception.OverQuota as e: quota_utils.process_reserve_over_quota(context, e, resource='groups') group = None try: group = objects.Group(context=context, **kwargs) group.create(group_snapshot_id=group_snapshot_id, source_group_id=source_group_id) except exception.GroupNotFound: with excutils.save_and_reraise_exception(): LOG.error("Source Group %(source_group)s not found when " "creating group %(group)s from source.", {'group': name, 'source_group': source_group_id}) GROUP_QUOTAS.rollback(context, reservations) except exception.GroupSnapshotNotFound: with excutils.save_and_reraise_exception(): LOG.error("Group snapshot %(group_snap)s not found when " "creating group %(group)s from source.", {'group': name, 'group_snap': group_snapshot_id}) GROUP_QUOTAS.rollback(context, reservations) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error occurred when creating group" " %(group)s from group_snapshot %(grp_snap)s.", {'group': name, 'grp_snap': group_snapshot_id}) GROUP_QUOTAS.rollback(context, reservations) # Update quota for groups GROUP_QUOTAS.commit(context, reservations) # NOTE(tommylikehu): We wrap the size inside of the attribute # 'volume_properties' as scheduler's filter logic are all designed # based on this attribute. kwargs = {'group_id': group.id, 'volume_properties': objects.VolumeProperties(size=size)} host = group.resource_backend if not host or not self.scheduler_rpcapi.validate_host_capacity( context, host, objects.RequestSpec(**kwargs)): msg = _("No valid host to create group %s.") % group.id LOG.error(msg) raise exception.InvalidGroup(reason=msg) group.assert_not_frozen() if group_snapshot_id: self._create_group_from_group_snapshot(context, group, group_snapshot_id) elif source_group_id: self._create_group_from_source_group(context, group, source_group_id) return group def _create_group_from_group_snapshot(self, context, group, group_snapshot_id): try: group_snapshot = objects.GroupSnapshot.get_by_id( context, group_snapshot_id) snapshots = objects.SnapshotList.get_all_for_group_snapshot( context, group_snapshot.id) if not snapshots: msg = _("Group snapshot is empty. No group will be created.") raise exception.InvalidGroup(reason=msg) for snapshot in snapshots: kwargs = {} kwargs['availability_zone'] = group.availability_zone kwargs['group_snapshot'] = group_snapshot kwargs['group'] = group kwargs['snapshot'] = snapshot volume_type_id = snapshot.volume_type_id if volume_type_id: kwargs['volume_type'] = ( objects.VolumeType.get_by_name_or_id( context, volume_type_id)) # Create group volume_type mapping entries try: db.group_volume_type_mapping_create(context, group.id, volume_type_id) except exception.GroupVolumeTypeMappingExists: # Only need to create one group volume_type mapping # entry for the same combination, skipping. LOG.info("A mapping entry already exists for group" " %(grp)s and volume type %(vol_type)s. " "Do not need to create again.", {'grp': group.id, 'vol_type': volume_type_id}) # Since group snapshot is passed in, the following call will # create a db entry for the volume, but will not call the # volume manager to create a real volume in the backend yet. # If error happens, taskflow will handle rollback of quota # and removal of volume entry in the db. try: self.volume_api.create(context, snapshot.volume_size, None, None, **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.error("Error occurred when creating volume " "entry from snapshot in the process of " "creating group %(group)s " "from group snapshot %(group_snap)s.", {'group': group.id, 'group_snap': group_snapshot.id}) except Exception: with excutils.save_and_reraise_exception(): try: volumes = objects.VolumeList.get_all_by_generic_group( context, group.id) for vol in volumes: # NOTE(tommylikehu): `delete` is used here in order to # revert consumed quota. self.volume_api.delete(context, vol) group.destroy() finally: LOG.error("Error occurred when creating group " "%(group)s from group snapshot %(group_snap)s.", {'group': group.id, 'group_snap': group_snapshot.id}) self._update_volumes_host(context, group) self.volume_rpcapi.create_group_from_src( context, group, group_snapshot) def _create_group_from_source_group(self, context, group, source_group_id): try: source_group = objects.Group.get_by_id(context, source_group_id) source_vols = objects.VolumeList.get_all_by_generic_group( context, source_group.id) if not source_vols: msg = _("Source Group is empty. No group " "will be created.") raise exception.InvalidGroup(reason=msg) for source_vol in source_vols: kwargs = {} kwargs['availability_zone'] = group.availability_zone kwargs['source_group'] = source_group kwargs['group'] = group kwargs['source_volume'] = source_vol volume_type_id = source_vol.volume_type_id if volume_type_id: kwargs['volume_type'] = ( objects.VolumeType.get_by_name_or_id( context, volume_type_id)) # Create group volume_type mapping entries try: db.group_volume_type_mapping_create(context, group.id, volume_type_id) except exception.GroupVolumeTypeMappingExists: # Only need to create one group volume_type mapping # entry for the same combination, skipping. LOG.info("A mapping entry already exists for group" " %(grp)s and volume type %(vol_type)s. " "Do not need to create again.", {'grp': group.id, 'vol_type': volume_type_id}) # Since source_group is passed in, the following call will # create a db entry for the volume, but will not call the # volume manager to create a real volume in the backend yet. # If error happens, taskflow will handle rollback of quota # and removal of volume entry in the db. try: self.volume_api.create(context, source_vol.size, None, None, **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.error("Error occurred when creating cloned " "volume in the process of creating " "group %(group)s from " "source group %(source_group)s.", {'group': group.id, 'source_group': source_group.id}) except Exception: with excutils.save_and_reraise_exception(): try: volumes = objects.VolumeList.get_all_by_generic_group( context, group.id) for vol in volumes: # NOTE(tommylikehu): `delete` is used here in order to # revert consumed quota. self.volume_api.delete(context, vol) group.destroy() finally: LOG.error("Error occurred when creating " "group %(group)s from source group " "%(source_group)s.", {'group': group.id, 'source_group': source_group.id}) self._update_volumes_host(context, group) self.volume_rpcapi.create_group_from_src(context, group, None, source_group) def _cast_create_group(self, context, group, group_spec, request_spec_list, group_filter_properties, filter_properties_list): try: for request_spec in request_spec_list: volume_type = request_spec.get('volume_type') volume_type_id = None if volume_type: volume_type_id = volume_type.get('id') specs = {} if volume_type_id: qos_specs = volume_types.get_volume_type_qos_specs( volume_type_id) specs = qos_specs['qos_specs'] if not specs: # to make sure we don't pass empty dict specs = None volume_properties = { 'size': 0, # Need to populate size for the scheduler 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'attach_status': 'detached', 'encryption_key_id': request_spec.get('encryption_key_id'), 'display_description': request_spec.get('description'), 'display_name': request_spec.get('name'), 'volume_type_id': volume_type_id, 'group_type_id': group.group_type_id, 'availability_zone': group.availability_zone } request_spec['volume_properties'] = volume_properties request_spec['qos_specs'] = specs group_properties = { 'size': 0, # Need to populate size for the scheduler 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'display_description': group_spec.get('description'), 'display_name': group_spec.get('name'), 'group_type_id': group.group_type_id, } group_spec['volume_properties'] = group_properties group_spec['qos_specs'] = None except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error("Error occurred when building request spec " "list for group %s.", group.id) # Cast to the scheduler and let it handle whatever is needed # to select the target host for this group. self.scheduler_rpcapi.create_group( context, group, group_spec=group_spec, request_spec_list=request_spec_list, group_filter_properties=group_filter_properties, filter_properties_list=filter_properties_list) def update_quota(self, context, group, num, project_id=None): reserve_opts = {'groups': num} try: reservations = GROUP_QUOTAS.reserve(context, project_id=project_id, **reserve_opts) if reservations: GROUP_QUOTAS.commit(context, reservations) except Exception as e: with excutils.save_and_reraise_exception(): try: group.destroy() if isinstance(e, exception.OverQuota): quota_utils.process_reserve_over_quota( context, e, resource='groups') finally: LOG.error("Failed to update quota for group %s.", group.id) def delete(self, context, group, delete_volumes=False): context.authorize(gp_action_policy.DELETE_POLICY, target_obj=group) if not group.host: self.update_quota(context, group, -1, group.project_id) LOG.debug("No host for group %s. Deleting from " "the database.", group.id) group.destroy() return group.assert_not_frozen() if not delete_volumes and group.status not in ( [c_fields.GroupStatus.AVAILABLE, c_fields.GroupStatus.ERROR]): msg = _("Group status must be available or error, " "but current status is: %s") % group.status raise exception.InvalidGroup(reason=msg) # NOTE(tommylikehu): Admin context is required to load group snapshots. with group.obj_as_admin(): if group.group_snapshots: raise exception.InvalidGroup( reason=_("Group has existing snapshots.")) # TODO(smcginnis): Add conditional update handling for volumes # Should probably utilize the volume_api.delete code to handle # cascade snapshot deletion and force delete. volumes = self.db.volume_get_all_by_generic_group(context.elevated(), group.id) if volumes and not delete_volumes: msg = (_("Group %s still contains volumes. " "The delete-volumes flag is required to delete it.") % group.id) LOG.error(msg) raise exception.InvalidGroup(reason=msg) volumes_model_update = [] for volume in volumes: if volume['attach_status'] == "attached": msg = _("Volume in group %s is attached. " "Need to detach first.") % group.id LOG.error(msg) raise exception.InvalidGroup(reason=msg) snapshots = objects.SnapshotList.get_all_for_volume(context, volume['id']) if snapshots: msg = _("Volume in group still has " "dependent snapshots.") LOG.error(msg) raise exception.InvalidGroup(reason=msg) volumes_model_update.append({'id': volume['id'], 'status': 'deleting'}) self.db.volumes_update(context, volumes_model_update) if delete_volumes: # We're overloading the term "delete_volumes" somewhat to also # mean to delete the group regardless of the state. expected = {} else: expected = {'status': (c_fields.GroupStatus.AVAILABLE, c_fields.GroupStatus.ERROR)} filters = [~db.group_has_group_snapshot_filter(), ~db.group_has_volumes_filter( attached_or_with_snapshots=delete_volumes), ~db.group_creating_from_src(group_id=group.id)] values = {'status': c_fields.GroupStatus.DELETING} if not group.conditional_update(values, expected, filters): if delete_volumes: reason = _('Group status must be available or error and must ' 'not have dependent group snapshots') else: reason = _('Group must not have attached volumes, volumes ' 'with snapshots, or dependent group snapshots') msg = _('Cannot delete group %(id)s. %(reason)s, and ' 'it cannot be the source for an ongoing group or group ' 'snapshot creation.') % { 'id': group.id, 'reason': reason} raise exception.InvalidGroup(reason=msg) self.volume_rpcapi.delete_group(context, group) def update(self, context, group, name, description, add_volumes, remove_volumes): """Update group.""" context.authorize(group_policy.UPDATE_POLICY, target_obj=group) # Validate name. if name == group.name: name = None # Validate description. if description == group.description: description = None add_volumes_list = [] remove_volumes_list = [] if add_volumes: add_volumes = add_volumes.strip(',') add_volumes_list = add_volumes.split(',') if remove_volumes: remove_volumes = remove_volumes.strip(',') remove_volumes_list = remove_volumes.split(',') invalid_uuids = [] for uuid in add_volumes_list: if uuid in remove_volumes_list: invalid_uuids.append(uuid) if invalid_uuids: msg = _("UUIDs %s are in both add and remove volume " "list.") % invalid_uuids raise exception.InvalidVolume(reason=msg) volumes = self.db.volume_get_all_by_generic_group(context, group.id) # Validate volumes in add_volumes and remove_volumes. add_volumes_new = "" remove_volumes_new = "" if add_volumes_list: add_volumes_new = self._validate_add_volumes( context, volumes, add_volumes_list, group) if remove_volumes_list: remove_volumes_new = self._validate_remove_volumes( volumes, remove_volumes_list, group) if (name is None and description is None and not add_volumes_new and not remove_volumes_new): msg = (_("Cannot update group %(group_id)s " "because no valid name, description, add_volumes, " "or remove_volumes were provided.") % {'group_id': group.id}) raise exception.InvalidGroup(reason=msg) expected = {} fields = {'updated_at': timeutils.utcnow()} # Update name and description in db now. No need to # to send them over through an RPC call. if name is not None: fields['name'] = name if description is not None: fields['description'] = description if not add_volumes_new and not remove_volumes_new: # Only update name or description. Set status to available. fields['status'] = c_fields.GroupStatus.AVAILABLE else: expected['status'] = c_fields.GroupStatus.AVAILABLE fields['status'] = c_fields.GroupStatus.UPDATING if not group.conditional_update(fields, expected): msg = _("Group status must be available.") raise exception.InvalidGroup(reason=msg) # Do an RPC call only if the update request includes # adding/removing volumes. add_volumes_new and remove_volumes_new # are strings of volume UUIDs separated by commas with no spaces # in between. if add_volumes_new or remove_volumes_new: self.volume_rpcapi.update_group( context, group, add_volumes=add_volumes_new, remove_volumes=remove_volumes_new) def _validate_remove_volumes(self, volumes, remove_volumes_list, group): # Validate volumes in remove_volumes. remove_volumes_new = "" for volume in volumes: if volume['id'] in remove_volumes_list: if volume['status'] not in VALID_REMOVE_VOL_FROM_GROUP_STATUS: msg = (_("Cannot remove volume %(volume_id)s from " "group %(group_id)s because volume " "is in an invalid state: %(status)s. Valid " "states are: %(valid)s.") % {'volume_id': volume['id'], 'group_id': group.id, 'status': volume['status'], 'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS}) raise exception.InvalidVolume(reason=msg) # Volume currently in group. It will be removed from group. if remove_volumes_new: remove_volumes_new += "," remove_volumes_new += volume['id'] for rem_vol in remove_volumes_list: if rem_vol not in remove_volumes_new: msg = (_("Cannot remove volume %(volume_id)s from " "group %(group_id)s because it " "is not in the group.") % {'volume_id': rem_vol, 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) return remove_volumes_new def _validate_add_volumes(self, context, volumes, add_volumes_list, group): add_volumes_new = "" for volume in volumes: if volume['id'] in add_volumes_list: # Volume already in group. Remove from add_volumes. add_volumes_list.remove(volume['id']) for add_vol in add_volumes_list: try: add_vol_ref = objects.Volume.get_by_id(context, add_vol) except exception.VolumeNotFound: msg = (_("Cannot add volume %(volume_id)s to " "group %(group_id)s because volume cannot be " "found.") % {'volume_id': add_vol, 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) orig_group = add_vol_ref.group_id if orig_group: # If volume to be added is already in the group to be updated, # it should have been removed from the add_volumes_list in the # beginning of this function. If we are here, it means it is # in a different group. msg = (_("Cannot add volume %(volume_id)s to group " "%(group_id)s because it is already in " "group %(orig_group)s.") % {'volume_id': add_vol_ref.id, 'group_id': group.id, 'orig_group': orig_group}) raise exception.InvalidVolume(reason=msg) if add_vol_ref: if add_vol_ref.project_id != group.project_id: msg = (_("Cannot add volume %(volume_id)s to group " "%(group_id)s as they belong to different " "projects.") % {'volume_id': add_vol_ref.id, 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) add_vol_type_id = add_vol_ref.volume_type_id if not add_vol_type_id: msg = (_("Cannot add volume %(volume_id)s to group " "%(group_id)s because it has no volume " "type.") % {'volume_id': add_vol_ref.id, 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) vol_type_ids = [v_type.id for v_type in group.volume_types] if add_vol_type_id not in vol_type_ids: msg = (_("Cannot add volume %(volume_id)s to group " "%(group_id)s because volume type " "%(volume_type)s is not supported by the " "group.") % {'volume_id': add_vol_ref.id, 'group_id': group.id, 'volume_type': add_vol_type_id}) raise exception.InvalidVolume(reason=msg) if (add_vol_ref.status not in VALID_ADD_VOL_TO_GROUP_STATUS): msg = (_("Cannot add volume %(volume_id)s to group " "%(group_id)s because volume is in an " "invalid state: %(status)s. Valid states are: " "%(valid)s.") % {'volume_id': add_vol_ref.id, 'group_id': group.id, 'status': add_vol_ref.status, 'valid': VALID_ADD_VOL_TO_GROUP_STATUS}) raise exception.InvalidVolume(reason=msg) # group.resource_backend and add_vol_ref.resource_backend are # in this format like 'host@backend#pool' in a non-HA # deployment and will contain cluster_name in # A/A HA deployment. vol_host = volume_utils.extract_host( add_vol_ref.resource_backend) group_host = volume_utils.extract_host(group.resource_backend) if group_host != vol_host: raise exception.InvalidVolume( reason=_("Volume is not local to this node.")) # Volume exists. It will be added to CG. if add_volumes_new: add_volumes_new += "," add_volumes_new += add_vol_ref.id else: msg = (_("Cannot add volume %(volume_id)s to group " "%(group_id)s because volume does not exist.") % {'volume_id': add_vol_ref.id, 'group_id': group.id}) raise exception.InvalidVolume(reason=msg) return add_volumes_new def get(self, context, group_id): group = objects.Group.get_by_id(context, group_id) context.authorize(group_policy.GET_POLICY, target_obj=group) return group def get_all(self, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): context.authorize(group_policy.GET_ALL_POLICY) if filters is None: filters = {} if filters: LOG.debug("Searching by: %s", filters) if (context.is_admin and 'all_tenants' in filters): del filters['all_tenants'] groups = objects.GroupList.get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) else: groups = objects.GroupList.get_all_by_project( context, context.project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return groups def reset_status(self, context, group, status): """Reset status of generic group""" context.authorize(gp_action_policy.RESET_STATUS, target_obj=group) field = {'updated_at': timeutils.utcnow(), 'status': status} group.update(field) group.save() def create_group_snapshot(self, context, group, name, description): context.authorize(gsnap_policy.CREATE_POLICY, target_obj=group) group.assert_not_frozen() options = {'group_id': group.id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'name': name, 'description': description, 'group_type_id': group.group_type_id} group_snapshot = None group_snapshot_id = None try: group_snapshot = objects.GroupSnapshot(context, **options) group_snapshot.create() group_snapshot_id = group_snapshot.id snap_name = group_snapshot.name snap_desc = group_snapshot.description with group.obj_as_admin(): self.volume_api.create_snapshots_in_db( context, group.volumes, snap_name, snap_desc, None, group_snapshot_id) except Exception: with excutils.save_and_reraise_exception(): try: # If the group_snapshot has been created if group_snapshot.obj_attr_is_set('id'): group_snapshot.destroy() finally: LOG.error("Error occurred when creating group_snapshot" " %s.", group_snapshot_id) self.volume_rpcapi.create_group_snapshot(context, group_snapshot) return group_snapshot def delete_group_snapshot(self, context, group_snapshot, force=False): context.authorize(gsnap_policy.DELETE_POLICY, target_obj=group_snapshot) group_snapshot.assert_not_frozen() values = {'status': 'deleting'} expected = {'status': ('available', 'error')} filters = [~db.group_creating_from_src( group_snapshot_id=group_snapshot.id)] res = group_snapshot.conditional_update(values, expected, filters) if not res: msg = _('GroupSnapshot status must be available or error, and no ' 'Group can be currently using it as source for its ' 'creation.') raise exception.InvalidGroupSnapshot(reason=msg) snapshots = objects.SnapshotList.get_all_for_group_snapshot( context, group_snapshot.id) # TODO(xyang): Add a new db API to update all snapshots statuses # in one db API call. for snap in snapshots: snap.status = c_fields.SnapshotStatus.DELETING snap.save() self.volume_rpcapi.delete_group_snapshot(context.elevated(), group_snapshot) def update_group_snapshot(self, context, group_snapshot, fields): context.authorize(gsnap_policy.UPDATE_POLICY, target_obj=group_snapshot) group_snapshot.update(fields) group_snapshot.save() def get_group_snapshot(self, context, group_snapshot_id): group_snapshot = objects.GroupSnapshot.get_by_id(context, group_snapshot_id) context.authorize(gsnap_policy.GET_POLICY, target_obj=group_snapshot) return group_snapshot def get_all_group_snapshots(self, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): context.authorize(gsnap_policy.GET_ALL_POLICY) filters = filters or {} if context.is_admin and 'all_tenants' in filters: # Need to remove all_tenants to pass the filtering below. del filters['all_tenants'] group_snapshots = objects.GroupSnapshotList.get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) else: group_snapshots = objects.GroupSnapshotList.get_all_by_project( context.elevated(), context.project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return group_snapshots def reset_group_snapshot_status(self, context, gsnapshot, status): """Reset status of group snapshot""" context.authorize(gsnap_action_policy.RESET_STATUS, target_obj=gsnapshot) field = {'updated_at': timeutils.utcnow(), 'status': status} gsnapshot.update(field) gsnapshot.save() def _check_type(self, group): if not group.is_replicated: msg = _("Group %s is not a replication group type.") % group.id LOG.error(msg) raise exception.InvalidGroupType(reason=msg) for vol_type in group.volume_types: if not volume_utils.is_replicated_spec(vol_type.extra_specs): msg = _("Volume type %s does not have 'replication_enabled' " "spec key set to ' True'.") % vol_type.id LOG.error(msg) raise exception.InvalidVolumeType(reason=msg) # Replication group API (Tiramisu) def enable_replication(self, context, group): context.authorize(gp_action_policy.ENABLE_REP, target_obj=group) self._check_type(group) valid_status = [c_fields.GroupStatus.AVAILABLE] if group.status not in valid_status: params = {'valid': valid_status, 'current': group.status, 'id': group.id} msg = _("Group %(id)s status must be %(valid)s, " "but current status is: %(current)s. " "Cannot enable replication.") % params LOG.error(msg) raise exception.InvalidGroup(reason=msg) valid_rep_status = [c_fields.ReplicationStatus.DISABLED, c_fields.ReplicationStatus.ENABLED] if group.replication_status not in valid_rep_status: params = {'valid': valid_rep_status, 'current': group.replication_status, 'id': group.id} msg = _("Group %(id)s replication status must be %(valid)s, " "but current status is: %(current)s. " "Cannot enable replication.") % params LOG.error(msg) raise exception.InvalidGroup(reason=msg) volumes = objects.VolumeList.get_all_by_generic_group( context.elevated(), group.id) valid_status = ['available', 'in-use'] for vol in volumes: if vol.status not in valid_status: params = {'valid': valid_status, 'current': vol.status, 'id': vol.id} msg = _("Volume %(id)s status must be %(valid)s, " "but current status is: %(current)s. " "Cannot enable replication.") % params LOG.error(msg) raise exception.InvalidVolume(reason=msg) # replication_status could be set to enabled when volume is # created and the mirror is built. if vol.replication_status not in valid_rep_status: params = {'valid': valid_rep_status, 'current': vol.replication_status, 'id': vol.id} msg = _("Volume %(id)s replication status must be %(valid)s, " "but current status is: %(current)s. " "Cannot enable replication.") % params LOG.error(msg) raise exception.InvalidVolume(reason=msg) vol.replication_status = c_fields.ReplicationStatus.ENABLING vol.save() group.replication_status = c_fields.ReplicationStatus.ENABLING group.save() self.volume_rpcapi.enable_replication(context, group) def disable_replication(self, context, group): context.authorize(gp_action_policy.DISABLE_REP, target_obj=group) self._check_type(group) valid_status = [c_fields.GroupStatus.AVAILABLE, c_fields.GroupStatus.ERROR] if group.status not in valid_status: params = {'valid': valid_status, 'current': group.status, 'id': group.id} msg = _("Group %(id)s status must be %(valid)s, " "but current status is: %(current)s. " "Cannot disable replication.") % params LOG.error(msg) raise exception.InvalidGroup(reason=msg) valid_rep_status = [c_fields.ReplicationStatus.ENABLED, c_fields.ReplicationStatus.ERROR] if group.replication_status not in valid_rep_status: params = {'valid': valid_rep_status, 'current': group.replication_status, 'id': group.id} msg = _("Group %(id)s replication status must be %(valid)s, " "but current status is: %(current)s. " "Cannot disable replication.") % params LOG.error(msg) raise exception.InvalidGroup(reason=msg) volumes = objects.VolumeList.get_all_by_generic_group( context.elevated(), group.id) for vol in volumes: if vol.replication_status not in valid_rep_status: params = {'valid': valid_rep_status, 'current': vol.replication_status, 'id': vol.id} msg = _("Volume %(id)s replication status must be %(valid)s, " "but current status is: %(current)s. " "Cannot disable replication.") % params LOG.error(msg) raise exception.InvalidVolume(reason=msg) vol.replication_status = c_fields.ReplicationStatus.DISABLING vol.save() group.replication_status = c_fields.ReplicationStatus.DISABLING group.save() self.volume_rpcapi.disable_replication(context, group) def failover_replication(self, context, group, allow_attached_volume=False, secondary_backend_id=None): context.authorize(gp_action_policy.FAILOVER_REP, target_obj=group) self._check_type(group) valid_status = [c_fields.GroupStatus.AVAILABLE] if group.status not in valid_status: params = {'valid': valid_status, 'current': group.status, 'id': group.id} msg = _("Group %(id)s status must be %(valid)s, " "but current status is: %(current)s. " "Cannot failover replication.") % params LOG.error(msg) raise exception.InvalidGroup(reason=msg) valid_rep_status = [c_fields.ReplicationStatus.ENABLED, c_fields.ReplicationStatus.FAILED_OVER] if group.replication_status not in valid_rep_status: params = {'valid': valid_rep_status, 'current': group.replication_status, 'id': group.id} msg = _("Group %(id)s replication status must be %(valid)s, " "but current status is: %(current)s. " "Cannot failover replication.") % params LOG.error(msg) raise exception.InvalidGroup(reason=msg) volumes = objects.VolumeList.get_all_by_generic_group( context.elevated(), group.id) valid_status = ['available', 'in-use'] for vol in volumes: if vol.status not in valid_status: params = {'valid': valid_status, 'current': vol.status, 'id': vol.id} msg = _("Volume %(id)s status must be %(valid)s, " "but current status is: %(current)s. " "Cannot failover replication.") % params LOG.error(msg) raise exception.InvalidVolume(reason=msg) if vol.status == 'in-use' and not allow_attached_volume: msg = _("Volume %s is attached but allow_attached_volume flag " "is False. Cannot failover replication.") % vol.id LOG.error(msg) raise exception.InvalidVolume(reason=msg) if vol.replication_status not in valid_rep_status: params = {'valid': valid_rep_status, 'current': vol.replication_status, 'id': vol.id} msg = _("Volume %(id)s replication status must be %(valid)s, " "but current status is: %(current)s. " "Cannot failover replication.") % params LOG.error(msg) raise exception.InvalidVolume(reason=msg) vol.replication_status = c_fields.ReplicationStatus.FAILING_OVER vol.save() group.replication_status = c_fields.ReplicationStatus.FAILING_OVER group.save() self.volume_rpcapi.failover_replication(context, group, allow_attached_volume, secondary_backend_id) def list_replication_targets(self, context, group): context.authorize(gp_action_policy.LIST_REP, target_obj=group) self._check_type(group) return self.volume_rpcapi.list_replication_targets(context, group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/i18n.py0000664000175000017500000000212300000000000015462 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/index.html . """ import oslo_i18n as i18n DOMAIN = 'cinder' _translators = i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def enable_lazy(enable=True): return i18n.enable_lazy(enable) def translate(value, user_locale=None): return i18n.translate(value, user_locale) def get_available_languages(): return i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.075118 cinder-27.0.0/cinder/image/0000775000175000017500000000000000000000000015415 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/image/__init__.py0000664000175000017500000000000000000000000017514 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/image/accelerator.py0000664000175000017500000000564100000000000020261 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from oslo_utils import importutils from cinder import exception from cinder.i18n import _ CONF = cfg.CONF # NOTE(ZhengMa): The order of the option is improtant, accelerators # are looked by this list order # Be careful to edit it _ACCEL_PATH_PREFERENCE_ORDER_LIST = [ 'cinder.image.accelerators.qat.AccelQAT', 'cinder.image.accelerators.gzip.AccelGZIP', ] class AccelBase(object, metaclass=abc.ABCMeta): def __init__(self): return @abc.abstractmethod def is_accel_exist(self): return @abc.abstractmethod def compress_img(self, src, dest, run_as_root): return @abc.abstractmethod def decompress_img(self, src, dest, run_as_root): return class ImageAccel(object): def __init__(self, src, dest): self.src = src self.dest = dest self.compression_format = CONF.compression_format if self.compression_format == 'gzip': self._accel_engine_path = _ACCEL_PATH_PREFERENCE_ORDER_LIST else: self._accel_engine_path = None self.engine = self._get_engine() def _get_engine(self, *args, **kwargs): if self._accel_engine_path: for accel in self._accel_engine_path: engine_cls = importutils.import_class(accel) eng = engine_cls(*args, **kwargs) if eng.is_accel_exist(): return eng ex_msg = _("No valid accelerator") raise exception.CinderException(ex_msg) def is_engine_ready(self): if not self.engine: return False if not self.engine.is_accel_exist(): return False return True def compress_img(self, run_as_root): if not self.is_engine_ready(): return self.engine.compress_img(self.src, self.dest, run_as_root) def decompress_img(self, run_as_root): if not self.is_engine_ready(): return self.engine.decompress_img(self.src, self.dest, run_as_root) def is_gzip_compressed(image_file): # The first two bytes of a gzip file are: 1f 8b GZIP_MAGIC_BYTES = b'\x1f\x8b' with open(image_file, 'rb') as f: return f.read(2) == GZIP_MAGIC_BYTES ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0791183 cinder-27.0.0/cinder/image/accelerators/0000775000175000017500000000000000000000000020064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/image/accelerators/__init__.py0000664000175000017500000000000000000000000022163 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/image/accelerators/gzip.py0000664000175000017500000001004500000000000021407 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder.image import accelerator from cinder import utils LOG = logging.getLogger(__name__) class AccelGZIP(accelerator.AccelBase): def is_accel_exist(self): cmd = ['which', 'gzip'] try: utils.execute(*cmd) except processutils.ProcessExecutionError: LOG.error("GZIP package is not installed.") return False return True # NOTE(ZhengMa): Gzip compresses a file in-place and adds a .gz # extension to the filename, so we rename the compressed file back # to the name Cinder expects it to have. # (Cinder expects to have A to upload) # Follow these steps: # 1. compress A to A.gz (gzip_out_file is A.gz) # 2. mv A.gz to A (gzip_out_file to dest) def compress_img(self, src, dest, run_as_root): try: gzip_compress_cmd = ['gzip', '-k', src] utils.execute(*gzip_compress_cmd, run_as_root=run_as_root) except processutils.ProcessExecutionError as ex: raise exception.CinderAcceleratorError( accelerator='GZIP', description=_("Volume compression failed while " "uploading to glance. GZIP compression " "command failed."), cmd=gzip_compress_cmd, reason=ex.stderr) try: gzip_output_filename = src + '.gz' mv_cmd = ['mv', gzip_output_filename, dest] utils.execute(*mv_cmd, run_as_root=run_as_root) except processutils.ProcessExecutionError as ex: fnames = {'i_fname': gzip_output_filename, 'o_fname': dest} raise exception.CinderAcceleratorError( accelerator='GZIP', description = _("Failed to rename %(i_fname)s " "to %(o_fname)s") % fnames, cmd=mv_cmd, reason=ex.stderr) # NOTE(ZhengMa): Gzip can only decompresses a file with a .gz # extension to the filename, so we rename the original file so # that it can be accepted by Gzip. # Follow these steps: # 1. mv A to A.gz (gzip_in_file is A.gz) # 2. decompress A.gz to A (gzip_in_file to dest) def decompress_img(self, src, dest, run_as_root): try: gzip_input_filename = dest + '.gz' mv_cmd = ['mv', src, gzip_input_filename] utils.execute(*mv_cmd, run_as_root=run_as_root) except processutils.ProcessExecutionError as ex: fnames = {'i_fname': src, 'o_fname': gzip_input_filename} raise exception.CinderAcceleratorError( accelerator='GZIP', description = _("Failed to rename %(i_fname)s " "to %(o_fname)s") % fnames, cmd=mv_cmd, reason=ex.stderr) try: gzip_decompress_cmd = ['gzip', '-d', gzip_input_filename] utils.execute(*gzip_decompress_cmd, run_as_root=run_as_root) except processutils.ProcessExecutionError as ex: raise exception.CinderAcceleratorError( accelerator='GZIP', description = _("Image decompression failed while " "downloading from glance. GZIP " "decompression command failed."), cmd=gzip_decompress_cmd, reason=ex.stderr) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/image/accelerators/qat.py0000664000175000017500000001004100000000000021217 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder.image import accelerator from cinder import utils LOG = logging.getLogger(__name__) class AccelQAT(accelerator.AccelBase): def is_accel_exist(self): cmd = ['which', 'qzip'] try: utils.execute(*cmd) except processutils.ProcessExecutionError: LOG.error("QATzip package is not installed.") return False return True # NOTE(ZhengMa): QATzip compresses a file in-place and adds a .gz # extension to the filename, so we rename the compressed file back # to the name Cinder expects it to have. # (Cinder expects to have A to upload) # Follow these steps: # 1. compress A to A.gz (src to qat_out_file) # 2. mv A.gz to A (qat_out_file to dest) def compress_img(self, src, dest, run_as_root): try: qat_compress_cmd = ['qzip', '-k', src, '-o', dest] utils.execute(*qat_compress_cmd, run_as_root=run_as_root) except processutils.ProcessExecutionError as ex: raise exception.CinderAcceleratorError( accelerator='QAT', description=_("Volume compression failed while " "uploading to glance. QAT compression " "command failed."), cmd=qat_compress_cmd, reason=ex.stderr) try: qat_output_filename = src + '.gz' mv_cmd = ['mv', qat_output_filename, dest] utils.execute(*mv_cmd, run_as_root=run_as_root) except processutils.ProcessExecutionError as ex: fnames = {'i_fname': qat_output_filename, 'o_fname': dest} raise exception.CinderAcceleratorError( accelerator='QAT', description = _("Failed to rename %(i_fname)s " "to %(o_fname)s") % fnames, cmd=mv_cmd, reason=ex.stderr) # NOTE(ZhengMa): QATzip can only decompresses a file with a .gz # extension to the filename, so we rename the original file so # that it can be accepted by QATzip. # Follow these steps: # 1. mv A to A.gz (qat_in_file is A.gz) # 2. decompress A.gz to A (qat_in_file to dest) def decompress_img(self, src, dest, run_as_root): try: qat_input_filename = dest + '.gz' mv_cmd = ['mv', src, qat_input_filename] utils.execute(*mv_cmd, run_as_root=run_as_root) except processutils.ProcessExecutionError as ex: fnames = {'i_fname': src, 'o_fname': qat_input_filename} raise exception.CinderAcceleratorError( accelerator='QAT', description = _("Failed to rename %(i_fname)s " "to %(o_fname)s") % fnames, cmd=mv_cmd, reason=ex.stderr) try: qat_decompress_cmd = ['qzip', '-d', qat_input_filename] utils.execute(*qat_decompress_cmd, run_as_root=run_as_root) except processutils.ProcessExecutionError as ex: raise exception.CinderAcceleratorError( accelerator='QAT', description = _("Image decompression failed while " "downloading from glance. QAT " "decompression command failed."), cmd=qat_decompress_cmd, reason=ex.stderr) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/image/cache.py0000664000175000017500000002657600000000000017052 0ustar00zuulzuul00000000000000# Copyright (C) 2015 Pure Storage, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from typing import Optional from zoneinfo import ZoneInfo from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from cinder import context from cinder import objects from cinder import rpc from cinder import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) class ImageVolumeCache(object): def __init__(self, db, volume_api, max_cache_size_gb: int = 0, max_cache_size_count: int = 0, clone_across_pools: bool = False): self.db = db self.volume_api = volume_api self.max_cache_size_gb = int(max_cache_size_gb) self.max_cache_size_count = int(max_cache_size_count) self.clone_across_pools = bool(clone_across_pools) self.notifier = rpc.get_notifier('volume', CONF.host) def get_by_image_volume(self, context: context.RequestContext, volume_id: str): return self.db.image_volume_cache_get_by_volume_id(context, volume_id) def evict(self, context: context.RequestContext, cache_entry: dict) -> None: LOG.debug('Evicting image cache entry: %(entry)s.', {'entry': self._entry_to_str(cache_entry)}) self.db.image_volume_cache_delete(context, cache_entry['volume_id']) self._notify_cache_eviction(context, cache_entry['image_id'], cache_entry['host']) def _get_query_filters(self, volume_ref: objects.Volume) -> dict: if volume_ref.is_clustered: return {'cluster_name': volume_ref.cluster_name} if not self.clone_across_pools: return {'host': volume_ref.host} # FIXME(whoami-rajat): If we have two cinder backends pointing to # two different storage arrays, this logic will allow the operation # to proceed to clone across two storage arrays which will fail # eventually. We should at least filter with the hostname in the # given host value hostname@backend#pool. return {} def get_entry(self, context: context.RequestContext, volume_ref: objects.Volume, image_id: str, image_meta: dict) -> Optional[dict]: cache_entry = self.db.image_volume_cache_get_and_update_last_used( context, image_id, **self._get_query_filters(volume_ref) ) if cache_entry: LOG.debug('Found image-volume cache entry: %(entry)s.', {'entry': self._entry_to_str(cache_entry)}) if self._should_update_entry(cache_entry, image_meta): msg = 'Deleting image-volume cache entry that is out-dated' self.delete_cached_volume(context, cache_entry, msg) cache_entry = None if cache_entry: self._notify_cache_hit(context, cache_entry['image_id'], cache_entry['host']) else: self._notify_cache_miss(context, image_id, volume_ref['host']) return cache_entry def create_cache_entry(self, context: context.RequestContext, volume_ref: objects.Volume, image_id: str, image_meta: dict) -> dict: """Create a new cache entry for an image. This assumes that the volume described by volume_ref has already been created and is in an available state. """ LOG.debug('Creating new image-volume cache entry for image ' '%(image_id)s on %(service)s', {'image_id': image_id, 'service': volume_ref.service_topic_queue}) # When we are creating an image from a volume the updated_at field # will be a unicode representation of the datetime. In that case # we just need to parse it into one. If it is an actual datetime # we want to just grab it as a UTC naive datetime. image_updated_at = image_meta['updated_at'] if isinstance(image_updated_at, str): image_updated_at = timeutils.parse_strtime(image_updated_at) else: image_updated_at = image_updated_at.astimezone(ZoneInfo('UTC')) cache_entry = self.db.image_volume_cache_create( context, volume_ref.host, volume_ref.cluster_name, image_id, image_updated_at.replace(tzinfo=None), volume_ref.id, volume_ref.size ) LOG.debug('New image-volume cache entry created: %(entry)s.', {'entry': self._entry_to_str(cache_entry)}) return cache_entry def delete_cached_volume(self, context: context.RequestContext, cache_entry: dict, msg: str) -> None: """Delete a volume and remove cache entry.""" LOG.debug('%(msg)s: entry %(entry)s.', {'msg': msg, 'entry': self._entry_to_str(cache_entry)}) volume = objects.Volume.get_by_id(context, cache_entry['volume_id']) # Delete will evict the cache entry. self.volume_api.delete(context, volume) def ensure_space(self, context: context.RequestContext, volume: objects.Volume) -> bool: """Makes room for a volume cache entry. Returns True if successful, false otherwise. """ # Check to see if the cache is actually limited. if self.max_cache_size_gb == 0 and self.max_cache_size_count == 0: return True # Make sure that we can potentially fit the image in the cache # and bail out before evicting everything else to try and make # room for it. if (self.max_cache_size_gb != 0 and volume.size > self.max_cache_size_gb): return False # Assume the entries are ordered by most recently used to least used. entries = self.db.image_volume_cache_get_all( context, **self._get_query_filters(volume)) current_count = len(entries) current_size = 0 for entry in entries: current_size += entry['size'] # Add values for the entry we intend to create. current_size += volume.size current_count += 1 LOG.debug('Image-volume cache for %(service)s current_size (GB) = ' '%(size_gb)s (max = %(max_gb)s), current count = %(count)s ' '(max = %(max_count)s).', {'service': volume.service_topic_queue, 'size_gb': current_size, 'max_gb': self.max_cache_size_gb, 'count': current_count, 'max_count': self.max_cache_size_count}) while (((current_size > self.max_cache_size_gb and self.max_cache_size_gb > 0) or (current_count > self.max_cache_size_count and self.max_cache_size_count > 0)) and len(entries)): entry = entries.pop() msg = 'Deleting image-volume cache entry to reclaim space' self.delete_cached_volume(context, entry, msg) current_size -= entry['size'] current_count -= 1 LOG.debug('Image-volume cache for %(service)s new size (GB) = ' '%(size_gb)s, new count = %(count)s.', {'service': volume.service_topic_queue, 'size_gb': current_size, 'count': current_count}) # It is only possible to not free up enough gb, we will always be able # to free enough count. This is because 0 means unlimited which means # it is guaranteed to be >0 if limited, and we can always delete down # to 0. if self.max_cache_size_gb > 0: if current_size > self.max_cache_size_gb > 0: LOG.warning('Image-volume cache for %(service)s does ' 'not have enough space (GB).', {'service': volume.service_topic_queue}) return False return True @utils.if_notifications_enabled def _notify_cache_hit(self, context: context.RequestContext, image_id: str, host: str) -> None: self._notify_cache_action(context, image_id, host, 'hit') @utils.if_notifications_enabled def _notify_cache_miss(self, context: context.RequestContext, image_id: str, host: str) -> None: self._notify_cache_action(context, image_id, host, 'miss') @utils.if_notifications_enabled def _notify_cache_eviction(self, context: context.RequestContext, image_id: str, host: str) -> None: self._notify_cache_action(context, image_id, host, 'evict') @utils.if_notifications_enabled def _notify_cache_action(self, context: context.RequestContext, image_id: str, host: str, action: str) -> None: data = { 'image_id': image_id, 'host': host, } LOG.debug('ImageVolumeCache notification: action=%(action)s' ' data=%(data)s.', {'action': action, 'data': data}) self.notifier.info(context, 'image_volume_cache.%s' % action, data) def _should_update_entry(self, cache_entry: dict, image_meta: dict) -> bool: """Ensure that the cache entry image data is still valid.""" image_updated_utc = (image_meta['updated_at'] .astimezone(ZoneInfo('UTC'))) cache_updated_utc = (cache_entry['image_updated_at'] .replace(tzinfo=ZoneInfo('UTC'))) LOG.debug('Image-volume cache entry image_update_at = %(entry_utc)s, ' 'requested image updated_at = %(image_utc)s.', {'entry_utc': str(cache_updated_utc), 'image_utc': str(image_updated_utc)}) return image_updated_utc != cache_updated_utc def _entry_to_str(self, cache_entry: dict) -> str: return str({ 'id': cache_entry['id'], 'image_id': cache_entry['image_id'], 'volume_id': cache_entry['volume_id'], 'host': cache_entry['host'], 'size': cache_entry['size'], 'image_updated_at': cache_entry['image_updated_at'], 'last_used': cache_entry['last_used'], }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/image/format_inspector.py0000664000175000017500000010402300000000000021345 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is a python implementation of virtual disk format inspection routines gathered from various public specification documents, as well as qemu disk driver code. It attempts to store and parse the minimum amount of data required, and in a streaming-friendly manner to collect metadata about complex-format images. """ import struct from oslo_log import log as logging LOG = logging.getLogger(__name__) def chunked_reader(fileobj, chunk_size=512): while True: chunk = fileobj.read(chunk_size) if not chunk: break yield chunk class CaptureRegion(object): """Represents a region of a file we want to capture. A region of a file we want to capture requires a byte offset into the file and a length. This is expected to be used by a data processing loop, calling capture() with the most recently-read chunk. This class handles the task of grabbing the desired region of data across potentially multiple fractional and unaligned reads. :param offset: Byte offset into the file starting the region :param length: The length of the region """ def __init__(self, offset, length): self.offset = offset self.length = length self.data = b'' @property def complete(self): """Returns True when we have captured the desired data.""" return self.length == len(self.data) def capture(self, chunk, current_position): """Process a chunk of data. This should be called for each chunk in the read loop, at least until complete returns True. :param chunk: A chunk of bytes in the file :param current_position: The position of the file processed by the read loop so far. Note that this will be the position in the file *after* the chunk being presented. """ read_start = current_position - len(chunk) if (read_start <= self.offset <= current_position or self.offset <= read_start <= (self.offset + self.length)): if read_start < self.offset: lead_gap = self.offset - read_start else: lead_gap = 0 self.data += chunk[lead_gap:] self.data = self.data[:self.length] class ImageFormatError(Exception): """An unrecoverable image format error that aborts the process.""" pass class TraceDisabled(object): """A logger-like thing that swallows tracing when we do not want it.""" def debug(self, *a, **k): pass info = debug warning = debug error = debug class FileInspector(object): """A stream-based disk image inspector. This base class works on raw images and is subclassed for more complex types. It is to be presented with the file to be examined one chunk at a time, during read processing and will only store as much data as necessary to determine required attributes of the file. """ def __init__(self, tracing=False): self._total_count = 0 # NOTE(danms): The logging in here is extremely verbose for a reason, # but should never really be enabled at that level at runtime. To # retain all that work and assist in future debug, we have a separate # debug flag that can be passed from a manual tool to turn it on. if tracing: self._log = logging.getLogger(str(self)) else: self._log = TraceDisabled() self._capture_regions = {} def _capture(self, chunk, only=None): for name, region in self._capture_regions.items(): if only and name not in only: continue if not region.complete: region.capture(chunk, self._total_count) def eat_chunk(self, chunk): """Call this to present chunks of the file to the inspector.""" pre_regions = set(self._capture_regions.keys()) # Increment our position-in-file counter self._total_count += len(chunk) # Run through the regions we know of to see if they want this # data self._capture(chunk) # Let the format do some post-read processing of the stream self.post_process() # Check to see if the post-read processing added new regions # which may require the current chunk. new_regions = set(self._capture_regions.keys()) - pre_regions if new_regions: self._capture(chunk, only=new_regions) def post_process(self): """Post-read hook to process what has been read so far. This will be called after each chunk is read and potentially captured by the defined regions. If any regions are defined by this call, those regions will be presented with the current chunk in case it is within one of the new regions. """ pass def region(self, name): """Get a CaptureRegion by name.""" return self._capture_regions[name] def new_region(self, name, region): """Add a new CaptureRegion by name.""" if self.has_region(name): # This is a bug, we tried to add the same region twice raise ImageFormatError('Inspector re-added region %s' % name) self._capture_regions[name] = region def has_region(self, name): """Returns True if named region has been defined.""" return name in self._capture_regions @property def format_match(self): """Returns True if the file appears to be the expected format.""" return True @property def virtual_size(self): """Returns the virtual size of the disk image, or zero if unknown.""" return self._total_count @property def actual_size(self): """Returns the total size of the file. This is usually smaller than virtual_size. NOTE: this will only be accurate if the entire file is read and processed. """ return self._total_count @property def complete(self): """Returns True if we have all the information needed.""" return all(r.complete for r in self._capture_regions.values()) def __str__(self): """The string name of this file format.""" return 'raw' @property def context_info(self): """Return info on amount of data held in memory for auditing. This is a dict of region:sizeinbytes items that the inspector uses to examine the file. """ return {name: len(region.data) for name, region in self._capture_regions.items()} @classmethod def from_file(cls, filename): """Read as much of a file as necessary to complete inspection. NOTE: Because we only read as much of the file as necessary, the actual_size property will not reflect the size of the file, but the amount of data we read before we satisfied the inspector. Raises ImageFormatError if we cannot parse the file. """ inspector = cls() with open(filename, 'rb') as f: for chunk in chunked_reader(f): inspector.eat_chunk(chunk) if inspector.complete: # No need to eat any more data break if not inspector.complete or not inspector.format_match: raise ImageFormatError('File is not in requested format') return inspector def safety_check(self): """Perform some checks to determine if this file is safe. Returns True if safe, False otherwise. It may raise ImageFormatError if safety cannot be guaranteed because of parsing or other errors. """ return True # The qcow2 format consists of a big-endian 72-byte header, of which # only a small portion has information we care about: # # Dec Hex Name # 0 0x00 Magic 4-bytes 'QFI\xfb' # 4 0x04 Version (uint32_t, should always be 2 for modern files) # . . . # 8 0x08 Backing file offset (uint64_t) # 24 0x18 Size in bytes (unint64_t) # . . . # 72 0x48 Incompatible features bitfield (6 bytes) # # https://gitlab.com/qemu-project/qemu/-/blob/master/docs/interop/qcow2.txt class QcowInspector(FileInspector): """QEMU QCOW2 Format This should only require about 32 bytes of the beginning of the file to determine the virtual size, and 104 bytes to perform the safety check. """ BF_OFFSET = 0x08 BF_OFFSET_LEN = 8 I_FEATURES = 0x48 I_FEATURES_LEN = 8 I_FEATURES_DATAFILE_BIT = 3 I_FEATURES_MAX_BIT = 4 def __init__(self, *a, **k): super(QcowInspector, self).__init__(*a, **k) self.new_region('header', CaptureRegion(0, 512)) def _qcow_header_data(self): magic, version, bf_offset, bf_sz, cluster_bits, size = ( struct.unpack('>4sIQIIQ', self.region('header').data[:32])) return magic, size @property def has_header(self): return self.region('header').complete @property def virtual_size(self): if not self.region('header').complete: return 0 if not self.format_match: return 0 magic, size = self._qcow_header_data() return size @property def format_match(self): if not self.region('header').complete: return False magic, size = self._qcow_header_data() return magic == b'QFI\xFB' @property def has_backing_file(self): if not self.region('header').complete: return None if not self.format_match: return False bf_offset_bytes = self.region('header').data[ self.BF_OFFSET:self.BF_OFFSET + self.BF_OFFSET_LEN] # nonzero means "has a backing file" bf_offset, = struct.unpack('>Q', bf_offset_bytes) return bf_offset != 0 @property def has_unknown_features(self): if not self.region('header').complete: return None if not self.format_match: return False i_features = self.region('header').data[ self.I_FEATURES:self.I_FEATURES + self.I_FEATURES_LEN] # This is the maximum byte number we should expect any bits to be set max_byte = self.I_FEATURES_MAX_BIT // 8 # The flag bytes are in big-endian ordering, so if we process # them in index-order, they're reversed for i, byte_num in enumerate(reversed(range(self.I_FEATURES_LEN))): if byte_num == max_byte: # If we're in the max-allowed byte, allow any bits less than # the maximum-known feature flag bit to be set allow_mask = ((1 << self.I_FEATURES_MAX_BIT) - 1) elif byte_num > max_byte: # If we're above the byte with the maximum known feature flag # bit, then we expect all zeroes allow_mask = 0x0 else: # Any earlier-than-the-maximum byte can have any of the flag # bits set allow_mask = 0xFF if i_features[i] & ~allow_mask: LOG.warning('Found unknown feature bit in byte %i: %s/%s', byte_num, bin(i_features[byte_num] & ~allow_mask), bin(allow_mask)) return True return False @property def has_data_file(self): if not self.region('header').complete: return None if not self.format_match: return False i_features = self.region('header').data[ self.I_FEATURES:self.I_FEATURES + self.I_FEATURES_LEN] # First byte of bitfield, which is i_features[7] byte = self.I_FEATURES_LEN - 1 - self.I_FEATURES_DATAFILE_BIT // 8 # Third bit of bitfield, which is 0x04 bit = 1 << (self.I_FEATURES_DATAFILE_BIT - 1 % 8) return bool(i_features[byte] & bit) def __str__(self): return 'qcow2' def safety_check(self): return (not self.has_backing_file and not self.has_data_file and not self.has_unknown_features) def safety_check_allow_backing_file(self): return (not self.has_data_file and not self.has_unknown_features) class QEDInspector(FileInspector): def __init__(self, tracing=False): super().__init__(tracing) self.new_region('header', CaptureRegion(0, 512)) @property def format_match(self): if not self.region('header').complete: return False return self.region('header').data.startswith(b'QED\x00') def safety_check(self): # QED format is not supported by anyone, but we want to detect it # and mark it as just always unsafe. return False # The VHD (or VPC as QEMU calls it) format consists of a big-endian # 512-byte "footer" at the beginning of the file with various # information, most of which does not matter to us: # # Dec Hex Name # 0 0x00 Magic string (8-bytes, always 'conectix') # 40 0x28 Disk size (uint64_t) # # https://github.com/qemu/qemu/blob/master/block/vpc.c class VHDInspector(FileInspector): """Connectix/MS VPC VHD Format This should only require about 512 bytes of the beginning of the file to determine the virtual size. """ def __init__(self, *a, **k): super(VHDInspector, self).__init__(*a, **k) self.new_region('header', CaptureRegion(0, 512)) @property def format_match(self): return self.region('header').data.startswith(b'conectix') @property def virtual_size(self): if not self.region('header').complete: return 0 if not self.format_match: return 0 return struct.unpack('>Q', self.region('header').data[40:48])[0] def __str__(self): return 'vhd' # The VHDX format consists of a complex dynamic little-endian # structure with multiple regions of metadata and data, linked by # offsets with in the file (and within regions), identified by MSFT # GUID strings. The header is a 320KiB structure, only a few pieces of # which we actually need to capture and interpret: # # Dec Hex Name # 0 0x00000 Identity (Technically 9-bytes, padded to 64KiB, the first # 8 bytes of which are 'vhdxfile') # 196608 0x30000 The Region table (64KiB of a 32-byte header, followed # by up to 2047 36-byte region table entry structures) # # The region table header includes two items we need to read and parse, # which are: # # 196608 0x30000 4-byte signature ('regi') # 196616 0x30008 Entry count (uint32-t) # # The region table entries follow the region table header immediately # and are identified by a 16-byte GUID, and provide an offset of the # start of that region. We care about the "metadata region", identified # by the METAREGION class variable. The region table entry is (offsets # from the beginning of the entry, since it could be in multiple places): # # 0 0x00000 16-byte MSFT GUID # 16 0x00010 Offset of the actual metadata region (uint64_t) # # When we find the METAREGION table entry, we need to grab that offset # and start examining the region structure at that point. That # consists of a metadata table of structures, which point to places in # the data in an unstructured space that follows. The header is # (offsets relative to the region start): # # 0 0x00000 8-byte signature ('metadata') # . . . # 16 0x00010 2-byte entry count (up to 2047 entries max) # # This header is followed by the specified number of metadata entry # structures, identified by GUID: # # 0 0x00000 16-byte MSFT GUID # 16 0x00010 4-byte offset (uint32_t, relative to the beginning of # the metadata region) # # We need to find the "Virtual Disk Size" metadata item, identified by # the GUID in the VIRTUAL_DISK_SIZE class variable, grab the offset, # add it to the offset of the metadata region, and examine that 8-byte # chunk of data that follows. # # The "Virtual Disk Size" is a naked uint64_t which contains the size # of the virtual disk, and is our ultimate target here. # # https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-vhdx/83e061f8-f6e2-4de1-91bd-5d518a43d477 class VHDXInspector(FileInspector): """MS VHDX Format This requires some complex parsing of the stream. The first 256KiB of the image is stored to get the header and region information, and then we capture the first metadata region to read those records, find the location of the virtual size data and parse it. This needs to store the metadata table entries up until the VDS record, which may consist of up to 2047 32-byte entries at max. Finally, it must store a chunk of data at the offset of the actual VDS uint64. """ METAREGION = '8B7CA206-4790-4B9A-B8FE-575F050F886E' VIRTUAL_DISK_SIZE = '2FA54224-CD1B-4876-B211-5DBED83BF4B8' VHDX_METADATA_TABLE_MAX_SIZE = 32 * 2048 # From qemu def __init__(self, *a, **k): super(VHDXInspector, self).__init__(*a, **k) self.new_region('ident', CaptureRegion(0, 32)) self.new_region('header', CaptureRegion(192 * 1024, 64 * 1024)) def post_process(self): # After reading a chunk, we may have the following conditions: # # 1. We may have just completed the header region, and if so, # we need to immediately read and calculate the location of # the metadata region, as it may be starting in the same # read we just did. # 2. We may have just completed the metadata region, and if so, # we need to immediately calculate the location of the # "virtual disk size" record, as it may be starting in the # same read we just did. if self.region('header').complete and not self.has_region('metadata'): region = self._find_meta_region() if region: self.new_region('metadata', region) elif self.has_region('metadata') and not self.has_region('vds'): region = self._find_meta_entry(self.VIRTUAL_DISK_SIZE) if region: self.new_region('vds', region) @property def format_match(self): return self.region('ident').data.startswith(b'vhdxfile') @staticmethod def _guid(buf): """Format a MSFT GUID from the 16-byte input buffer.""" guid_format = '= 2048: raise ImageFormatError('Region count is %i (limit 2047)' % count) # Process the regions until we find the metadata one; grab the # offset and return self._log.debug('Region entry first is %x', region_entry_first) self._log.debug('Region entries %i', count) meta_offset = 0 for i in range(0, count): entry_start = region_entry_first + (i * 32) entry_end = entry_start + 32 entry = self.region('header').data[entry_start:entry_end] self._log.debug('Entry offset is %x', entry_start) # GUID is the first 16 bytes guid = self._guid(entry[:16]) if guid == self.METAREGION: # This entry is the metadata region entry meta_offset, meta_len, meta_req = struct.unpack( '= 2048: raise ImageFormatError( 'Metadata item count is %i (limit 2047)' % count) for i in range(0, count): entry_offset = 32 + (i * 32) guid = self._guid(meta_buffer[entry_offset:entry_offset + 16]) if guid == desired_guid: # Found the item we are looking for by id. # Stop our region from capturing item_offset, item_length, _reserved = struct.unpack( ':: - ' 'Only used if glance_api_servers are not provided.'), ] glance_core_properties_opts = [ cfg.ListOpt('glance_core_properties', default=['checksum', 'container_format', 'disk_format', 'image_name', 'image_id', 'min_disk', 'min_ram', 'name', 'size'], help='Default core properties of image') ] CONF = cfg.CONF CONF.register_opts(image_opts) CONF.register_opts(glance_core_properties_opts) # Register keystoneauth options to create service user # to talk to glance. GLANCE_GROUP = 'glance' glance_session_opts = ks_loading.get_session_conf_options() glance_auth_opts = ks_loading.get_auth_common_conf_options() CONF.register_opts(glance_session_opts, group=GLANCE_GROUP) CONF.register_opts(glance_auth_opts, group=GLANCE_GROUP) _SESSION = None LOG = logging.getLogger(__name__) def _parse_image_ref(image_href: str) -> tuple[str, str, bool]: """Parse an image href into composite parts. :param image_href: href of an image :returns: a tuple of the form (image_id, netloc, use_ssl) :raises ValueError: """ url = urllib.parse.urlparse(image_href) netloc = url.netloc image_id = url.path.split('/')[-1] use_ssl = (url.scheme == 'https') return (image_id, netloc, use_ssl) def _create_glance_client( context: context.RequestContext, netloc: str, use_ssl: bool, privileged_user: bool = False) -> glanceclient.Client: """Instantiate a new glanceclient.Client object.""" params = {'global_request_id': context.global_id} g_auth = None if privileged_user and CONF[GLANCE_GROUP].auth_type: LOG.debug('Creating Keystone auth plugin from conf') g_auth = ks_loading.load_auth_from_conf_options(CONF, GLANCE_GROUP) if use_ssl and CONF.auth_strategy == 'noauth': params = {'insecure': CONF.glance_api_insecure, 'cacert': CONF.glance_ca_certificates_file, 'timeout': CONF.glance_request_timeout, 'split_loggers': CONF.split_loggers } if CONF.auth_strategy == 'keystone': global _SESSION if not _SESSION: config_options = {'insecure': CONF.glance_api_insecure, 'cacert': CONF.glance_ca_certificates_file, 'timeout': CONF.glance_request_timeout, 'cert': CONF.glance_certfile, 'key': CONF.glance_keyfile, 'split_loggers': CONF.split_loggers } _SESSION = ks_session.Session().load_from_options(**config_options) auth = service_auth.get_auth_plugin(context, auth=g_auth) params['auth'] = auth params['session'] = _SESSION scheme = 'https' if use_ssl else 'http' endpoint = '%s://%s' % (scheme, netloc) return glanceclient.Client('2', endpoint, **params) def get_api_servers(context: context.RequestContext) -> Iterable: """Return Iterable over shuffled api servers. Shuffle a list of glance_api_servers and return an iterator that will cycle through the list, looping around to the beginning if necessary. If CONF.glance_api_servers is None then they will be retrieved from the catalog. """ api_servers = [] api_servers_info = [] if CONF.glance_api_servers is None: info = CONF.glance_catalog_info try: service_type, service_name, endpoint_type = info.split(':') except ValueError: raise exception.InvalidConfigurationValue(_( "Failed to parse the configuration option " "'glance_catalog_info', must be in the form " "::")) for entry in context.service_catalog: if entry.get('type') == service_type: api_servers.append( entry.get('endpoints')[0].get(endpoint_type)) else: for api_server in CONF.glance_api_servers: api_servers.append(api_server) for api_server in api_servers: if '//' not in api_server: api_server = 'http://' + api_server url = urllib.parse.urlparse(api_server) netloc = url.netloc + url.path use_ssl = (url.scheme == 'https') api_servers_info.append((netloc, use_ssl)) random.shuffle(api_servers_info) return itertools.cycle(api_servers_info) class GlanceClientWrapper(object): """Glance client wrapper class that implements retries.""" def __init__(self, context: Optional[context.RequestContext] = None, netloc: Optional[str] = None, use_ssl: bool = False, privileged_user: bool = False): self.client: Optional[glanceclient.Client] if netloc is not None: assert context is not None self.client = self._create_static_client(context, netloc, use_ssl, privileged_user) else: self.client = None self.api_servers: Optional[Iterable] = None def _create_static_client( self, context: context.RequestContext, netloc: str, use_ssl: bool, privileged_user: bool = False) -> glanceclient.Client: """Create a client that we'll use for every call.""" self.netloc = netloc self.use_ssl = use_ssl return _create_glance_client(context, self.netloc, self.use_ssl, privileged_user) def _create_onetime_client( self, context: context.RequestContext, privileged_user: bool = False) -> glanceclient.Client: """Create a client that will be used for one call.""" if self.api_servers is None: self.api_servers = get_api_servers(context) self.netloc, self.use_ssl = next(self.api_servers) # type: ignore return _create_glance_client(context, self.netloc, self.use_ssl, privileged_user) def call(self, context: context.RequestContext, method: str, *args: Any, **kwargs: Any) -> Any: """Call a glance client method. If we get a connection error, retry the request according to CONF.glance_num_retries. """ retry_excs = (glanceclient.exc.ServiceUnavailable, glanceclient.exc.InvalidEndpoint, glanceclient.exc.CommunicationError) num_attempts = 1 + CONF.glance_num_retries glance_controller = kwargs.pop('controller', 'images') store_id = kwargs.pop('store_id', None) base_image_ref = kwargs.pop('base_image_ref', None) privileged_user = kwargs.pop('privileged_user', False) for attempt in range(1, num_attempts + 1): client = self.client or self._create_onetime_client( context, privileged_user) keys = ('x-image-meta-store', 'x-openstack-base-image-ref',) values = (store_id, base_image_ref,) headers = {k: v for (k, v) in zip(keys, values) if v is not None} if headers: client.http_client.additional_headers = headers try: controller = getattr(client, glance_controller) return getattr(controller, method)(*args, **kwargs) except retry_excs as e: netloc = self.netloc extra = "retrying" error_msg = _("Error contacting glance server " "'%(netloc)s' for '%(method)s', " "%(extra)s.") if attempt == num_attempts: extra = 'done trying' LOG.exception(error_msg, {'netloc': netloc, 'method': method, 'extra': extra}) raise exception.GlanceConnectionFailed(reason=e) LOG.exception(error_msg, {'netloc': netloc, 'method': method, 'extra': extra}) time.sleep(1) except glanceclient.exc.HTTPOverLimit as e: raise exception.ImageLimitExceeded(e) class GlanceImageService(object): """Provides storage and retrieval of disk image objects within Glance.""" def __init__(self, client: Optional[Any] = None): self._client = client or GlanceClientWrapper() self._image_schema: Optional[glanceclient.v2.schemas.Schema] = None self.temp_images: Optional[image_utils.TemporaryImages] = None def detail(self, context: context.RequestContext, **kwargs: str) -> list[dict]: """Calls out to Glance for a list of detailed image information.""" params = self._extract_query_params(kwargs) try: images = self._client.call(context, 'list', **params) except Exception: _reraise_translated_exception() _images = [] for image in images: if self._is_image_available(context, image): _images.append(self._translate_from_glance(context, image)) return _images def _extract_query_params(self, params: dict) -> dict[str, Any]: _params = {} accepted_params = ('filters', 'marker', 'limit', 'sort_key', 'sort_dir') for param in accepted_params: if param in params: _params[param] = params.get(param) return _params def list_members(self, context: context.RequestContext, image_id: str) -> list[dict]: """Returns a list of dicts with image member data.""" try: return self._client.call(context, 'list', controller='image_members', image_id=image_id) except Exception: _reraise_translated_image_exception(image_id) def get_stores(self, context: context.RequestContext): """Returns a list of dicts with stores information.""" try: return self._client.call(context, 'get_stores_info') except Exception: _reraise_translated_exception() def show(self, context: context.RequestContext, image_id: str) -> dict[str, Any]: """Returns a dict with image data for the given opaque image id.""" try: image = self._client.call(context, 'get', image_id) except Exception: _reraise_translated_image_exception(image_id) if not self._is_image_available(context, image): raise exception.ImageNotFound(image_id=image_id) base_image_meta = self._translate_from_glance(context, image) return base_image_meta def get_location(self, context: context.RequestContext, image_id: str) -> tuple[Optional[str], Any]: """Get backend storage location url. Returns a tuple containing the direct url and locations representing the backend storage location, or (None, None) if these attributes are not shown by Glance. """ # direct_url is returned by v2 api client = GlanceClientWrapper() # The ``get_image_locations`` API was added to address # OSSN-0065, however to keep backward compatibility, # we need to try with the old ``get`` call if we are using # an older version of glance. # TODO: Remove the ``get`` API call when 2024.1 trasitions # to unmaintained. (``get_image_locations`` was added in 2024.2). image_meta = {} try_methods = ('get_image_locations', 'get') for method in try_methods: try: # NOTE(gmaan): Glance get_image_locations API policy rule is # default to 'service' role so cinder needs to load the auth # plugin from the keystoneauth which has the 'service' role. privileged_user = False if method == 'get_image_locations': privileged_user = True image_meta = client.call(context, method, image_id, privileged_user=privileged_user) if image_meta: break except glanceclient.exc.HTTPNotImplemented: LOG.debug('Glance method %s not available', method) except glanceclient.exc.HTTPForbidden: # In an upgrade scenario, if the operator hasn't configured # the [glance] section in the cinder configuration file, we # will fail on the new location GET API policy check. LOG.warning('Glance method %s is available but a dedicated ' '[glance] section is required in the cinder ' 'configuration file to allow service-to-service ' 'communication.', method) except Exception: _reraise_translated_image_exception(image_id) if not self._is_image_available(context, image_meta): raise exception.ImageNotFound(image_id=image_id) locations: list[Any] = [] # Since both (old and new) APIs return RequestIdProxy object, # to differentiate, we check the 'direct_url' property and if # it exists then it's the old API (otherwise new) direct_url = getattr(image_meta, 'direct_url', None) if direct_url: # Old format locations = getattr(image_meta, 'locations', []) else: # New format (iterator) # Verify that all location entries are in dict format # Example: {'url': , 'metadata': {'store': }} locations = [meta for meta in image_meta if isinstance(meta, dict)] # some glance stores like nfs only meta data # is stored and returned as locations. # so composite of two needs to be returned. # direct_url will be None when using new location APIs return (direct_url, locations) def add_location(self, context: context.RequestContext, image_id: str, url: str, metadata: dict) -> dict: """Add a backend location url to an image. Returns a dict containing image metadata on success. """ client = GlanceClientWrapper() # The ``add_image_location`` API was added to address # OSSN-0065, however to keep backward compatibility, # we need to try with the old ``add_location`` call # if we are using an older version of glance. # TODO: Remove the ``add_location`` API call when 2024.1 # trasitions to unmaintained. (``add_image_location`` # was added in 2024.2). try_methods = ('add_image_location', 'add_location') for method in try_methods: try: # NOTE(gmaan): Glance add_image_location API policy rule is # default to 'service' role so cinder needs to load the auth # plugin from the keystoneauth which has the 'service' role. if method == 'add_image_location': privileged_user = True else: privileged_user = False return client.call(context, method, image_id, url, metadata, privileged_user=privileged_user) except glanceclient.exc.HTTPNotImplemented: LOG.debug('Glance method %s not available', method) except Exception: _reraise_translated_image_exception(image_id) # If both method return HTTPNotImplemented exception raise exception.ProgrammingError( reason='unwarranted assumption about available glanceclient ' 'methods.') def download(self, context: context.RequestContext, image_id: str, data=None): """Calls out to Glance for data and writes data.""" if data and 'file' in CONF.allowed_direct_url_schemes: direct_url, locations = self.get_location(context, image_id) urls = [direct_url] + [loc.get('url') for loc in locations or []] for url in urls: if url is None: continue parsed_url = urllib.parse.urlparse(url) if parsed_url.scheme == "file": # a system call to cp could have significant performance # advantages, however we do not have the path to files at # this point in the abstraction. with open(parsed_url.path, "rb") as f: shutil.copyfileobj(f, data) return try: image_chunks = self._client.call(context, 'data', image_id) except Exception: _reraise_translated_image_exception(image_id) if image_chunks is None: raise exception.ImageDownloadFailed( image_href=image_id, reason=_('image contains no data.')) if not data: return image_chunks else: for chunk in image_chunks: data.write(chunk) def create(self, context: context.RequestContext, image_meta: dict[str, Any], data=None) -> dict[str, Any]: """Store the image data and return the new image object.""" sent_service_image_meta = self._translate_to_glance(image_meta) if data: sent_service_image_meta['data'] = data recv_service_image_meta = self._client.call(context, 'create', **sent_service_image_meta) return self._translate_from_glance(context, recv_service_image_meta) def update(self, context: context.RequestContext, image_id: str, image_meta: dict, data=None, purge_props: bool = True, store_id: Optional[str] = None, base_image_ref: Optional[str] = None) -> dict: """Modify the given image with the new data.""" # For v2, _translate_to_glance stores custom properties in image meta # directly. We need the custom properties to identify properties to # remove if purge_props is True. Save the custom properties before # translate. if purge_props: props_to_update = image_meta.get('properties', {}).keys() image_meta = self._translate_to_glance(image_meta) # NOTE(bcwaldon): id is not an editable field, but it is likely to be # passed in by calling code. Let's be nice and ignore it. image_meta.pop('id', None) kwargs = {} if store_id: kwargs['store_id'] = store_id if base_image_ref: kwargs['base_image_ref'] = base_image_ref try: if data: self._client.call(context, 'upload', image_id, data, **kwargs) if image_meta: if purge_props: # Properties to remove are those not specified in # input properties. cur_image_meta = self.show(context, image_id) cur_props = cur_image_meta['properties'].keys() remove_props = list(set(cur_props) - set(props_to_update)) image_meta['remove_props'] = remove_props image_meta = self._client.call(context, 'update', image_id, **image_meta) else: image_meta = self._client.call(context, 'get', image_id) except Exception: _reraise_translated_image_exception(image_id) else: return self._translate_from_glance(context, image_meta) def delete(self, context: context.RequestContext, image_id: str) -> bool: """Delete the given image. :raises ImageNotFound: if the image does not exist. :raises NotAuthorized: if the user is not an owner. """ try: self._client.call(context, 'delete', image_id) except glanceclient.exc.NotFound: raise exception.ImageNotFound(image_id=image_id) return True def _translate_from_glance(self, context: context.RequestContext, image: dict[str, Any]) -> dict: """Get image metadata from glance image. Extract metadata from image and convert it's properties to type cinder expected. :param image: glance image object :return: image metadata dictionary """ if self._image_schema is None: self._image_schema = self._client.call(context, 'get', controller='schemas', schema_name='image') assert self._image_schema is not None # NOTE(aarefiev): get base image property, store image 'schema' # is redundant, so ignore it. image_meta: dict = { key: getattr(image, key) for key in image.keys() if self._image_schema.is_base_property(key) is True and key != 'schema'} # Process 'cinder_encryption_key_id' as a metadata key if 'cinder_encryption_key_id' in image.keys(): image_meta['cinder_encryption_key_id'] = \ image['cinder_encryption_key_id'] # NOTE(aarefiev): nova is expected that all image properties # (custom or defined in schema-image.json) stores in # 'properties' key. image_meta['properties'] = { key: getattr(image, key) for key in image.keys() if self._image_schema.is_base_property(key) is False} image_meta = _convert_timestamps_to_datetimes(image_meta) image_meta = _convert_from_string(image_meta) return image_meta @staticmethod def _translate_to_glance(image_meta: dict[str, Any]) -> dict[str, Any]: image_meta = _convert_to_string(image_meta) image_meta = _remove_read_only(image_meta) # NOTE(tsekiyama): From the Image API v2, custom properties must # be stored in image_meta directly, instead of the 'properties' key. properties = image_meta.get('properties') if properties: image_meta.update(properties) del image_meta['properties'] return image_meta def _is_image_available(self, context: context.RequestContext, image) -> bool: """Check image availability. This check is needed in case Nova and Glance are deployed without authentication turned on. """ # The presence of an auth token implies this is an authenticated # request and we need not handle the noauth use-case. if hasattr(context, 'auth_token') and context.auth_token: return True if context.is_admin: return True if (getattr(image, 'is_public', False) or getattr(image, 'visibility', 'private') == 'public'): return True properties = image.properties if context.project_id and ('owner_id' in properties): return str(properties['owner_id']) == str(context.project_id) if context.project_id and ('project_id' in properties): return str(properties['project_id']) == str(context.project_id) if image.visibility == 'shared': for member in self.list_members(context, image.id): if (context.project_id == member['member_id'] and member['status'] == 'accepted'): return True try: user_id = properties['user_id'] except KeyError: return False return str(user_id) == str(context.user_id) def _convert_timestamps_to_datetimes(image_meta: dict) -> dict: """Returns image with timestamp fields converted to datetime objects.""" for attr in ['created_at', 'updated_at', 'deleted_at']: if image_meta.get(attr): image_meta[attr] = timeutils.parse_isotime(image_meta[attr]) return image_meta # NOTE(bcwaldon): used to store non-string data in glance metadata def _json_loads(properties: dict, attr: str) -> None: prop = properties[attr] if isinstance(prop, str): properties[attr] = jsonutils.loads(prop) def _json_dumps(properties: dict, attr: str) -> None: prop = properties[attr] if not isinstance(prop, str): properties[attr] = jsonutils.dumps(prop) _CONVERT_PROPS = ('block_device_mapping', 'mappings') def _convert(method: Callable[[dict, str], Optional[dict]], metadata: dict) -> dict: metadata = copy.deepcopy(metadata) properties = metadata.get('properties') if properties: for attr in _CONVERT_PROPS: if attr in properties: method(properties, attr) return metadata def _convert_from_string(metadata: dict) -> dict: return _convert(_json_loads, metadata) def _convert_to_string(metadata: dict) -> dict: return _convert(_json_dumps, metadata) def _remove_read_only(image_meta: dict) -> dict: IMAGE_ATTRIBUTES = ['status', 'updated_at', 'created_at', 'deleted_at'] output = copy.deepcopy(image_meta) for attr in IMAGE_ATTRIBUTES: if attr in output: del output[attr] return output def _reraise_translated_image_exception(image_id: str) -> NoReturn: """Transform the exception for the image but keep its traceback intact.""" _exc_type, exc_value, exc_trace = sys.exc_info() assert exc_value is not None new_exc = _translate_image_exception(image_id, exc_value) raise new_exc.with_traceback(exc_trace) def _reraise_translated_exception() -> NoReturn: """Transform the exception but keep its traceback intact.""" _exc_type, exc_value, exc_trace = sys.exc_info() assert exc_value is not None new_exc = _translate_plain_exception(exc_value) raise new_exc.with_traceback(exc_trace) def _translate_image_exception(image_id: str, exc_value: BaseException) -> BaseException: if isinstance(exc_value, (glanceclient.exc.Forbidden, glanceclient.exc.Unauthorized)): return exception.ImageNotAuthorized(image_id=image_id) if isinstance(exc_value, glanceclient.exc.NotFound): return exception.ImageNotFound(image_id=image_id) if isinstance(exc_value, glanceclient.exc.BadRequest): return exception.Invalid(exc_value) return exc_value def _translate_plain_exception(exc_value: BaseException) -> BaseException: if isinstance(exc_value, (glanceclient.exc.Forbidden, glanceclient.exc.Unauthorized)): return exception.NotAuthorized(exc_value) if isinstance(exc_value, glanceclient.exc.NotFound): return exception.NotFound(exc_value) if isinstance(exc_value, glanceclient.exc.BadRequest): return exception.Invalid(exc_value) return exc_value def get_remote_image_service(context: context.RequestContext, image_href) -> tuple[GlanceImageService, str]: """Create an image_service and parse the id from the given image_href. The image_href param can be an href of the form 'http://example.com:9292/v1/images/b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3', or just an id such as 'b8b2c6f7-7345-4e2f-afa2-eedaba9cbbe3'. If the image_href is a standalone id, then the default image service is returned. :param image_href: href that describes the location of an image :returns: a tuple of the form (image_service, image_id) """ # NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a # standalone image ID if '/' not in str(image_href): image_service = get_default_image_service() return image_service, image_href try: (image_id, glance_netloc, use_ssl) = _parse_image_ref(image_href) glance_client = GlanceClientWrapper(context=context, netloc=glance_netloc, use_ssl=use_ssl) except ValueError: raise exception.InvalidImageRef(image_href=image_href) image_service = GlanceImageService(client=glance_client) return image_service, image_id def get_default_image_service() -> GlanceImageService: return GlanceImageService() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/image/image_utils.py0000664000175000017500000016664400000000000020312 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper methods to deal with images. This is essentially a copy from nova.virt.images.py Some slight modifications, but at some point we should look at maybe pushing this up to Oslo """ import contextlib import errno import io import math import os import re import shutil import tempfile from typing import ContextManager, Generator, Optional import cryptography from cursive import exception as cursive_exception from cursive import signature_utils from eventlet import tpool from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import imageutils from oslo_utils import timeutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _ from cinder.image import accelerator from cinder.image import glance import cinder.privsep.format_inspector from cinder import utils from cinder.volume import throttling from cinder.volume import volume_utils LOG = logging.getLogger(__name__) image_opts = [ cfg.StrOpt('image_conversion_dir', default='$state_path/conversion', help='Directory used for temporary storage ' 'during image conversion'), cfg.BoolOpt('image_compress_on_upload', default=True, help='When possible, compress images uploaded ' 'to the image service'), cfg.IntOpt('image_conversion_cpu_limit', default=60, help='CPU time limit in seconds to convert the image'), cfg.IntOpt('image_conversion_address_space_limit', default=1, help='Address space limit in gigabytes to convert the image'), cfg.BoolOpt('image_conversion_disable', default=False, help='Disallow image conversion when creating a volume from ' 'an image and when uploading a volume as an image. Image ' 'conversion consumes a large amount of system resources and ' 'can cause performance problems on the cinder-volume node. ' 'When set True, this option disables image conversion.'), cfg.ListOpt('vmdk_allowed_types', default=['streamOptimized', 'monolithicSparse'], help='A list of strings describing the VMDK createType ' 'subformats that are allowed. We recommend that you only ' 'include single-file-with-sparse-header variants to avoid ' 'potential host file exposure when processing named extents ' 'when an image is converted to raw format as it is written ' 'to a volume. If this list is empty, no VMDK images are ' 'allowed.'), cfg.ListOpt('reserved_image_namespaces', help='List of reserved image namespaces that should be ' 'filtered out when uploading a volume as an image back ' 'to Glance. When a volume is created from an image, ' 'Cinder stores the image properties as volume ' 'image metadata, and if the volume is later uploaded as ' 'an image, Cinder will add these properties when it ' 'creates the image in Glance. This can cause problems ' 'for image metadata that are in namespaces that glance ' 'reserves for itself, or when properties (such as an ' 'image signature) cannot apply to the new image, or when ' 'an operator has configured glance property protections ' 'to make some image properties read-only. Cinder will ' '*always* filter out image metadata in the namespaces ' '`os_glance`, `img_signature` and `signature_verified`; ' 'this configuration option allows operators to specify ' '*additional* namespaces to be excluded.', default=[]), ] CONF = cfg.CONF CONF.register_opts(image_opts) QEMU_IMG_LIMITS = processutils.ProcessLimits( cpu_time=CONF.image_conversion_cpu_limit, address_space=CONF.image_conversion_address_space_limit * units.Gi) QEMU_IMG_FORMAT_MAP = { # Convert formats of Glance images to how they are processed with qemu-img. 'iso': 'raw', 'vhd': 'vpc', 'ploop': 'parallels', } QEMU_IMG_FORMAT_MAP_INV = {v: k for k, v in QEMU_IMG_FORMAT_MAP.items()} QEMU_IMG_VERSION = None COMPRESSIBLE_IMAGE_FORMATS = ('qcow2',) GLANCE_RESERVED_NAMESPACES = ["os_glance", "img_signature", "signature_verified"] def validate_stores_id(context: context.RequestContext, image_service_store_id: str) -> None: image_service = glance.get_default_image_service() stores_info = image_service.get_stores(context)['stores'] for info in stores_info: if image_service_store_id == info['id']: if info.get('read-only') == "true": raise exception.GlanceStoreReadOnly( store_id=image_service_store_id) return raise exception.GlanceStoreNotFound(store_id=image_service_store_id) def fixup_disk_format(disk_format: str) -> str: """Return the format to be provided to qemu-img convert.""" return QEMU_IMG_FORMAT_MAP.get(disk_format, disk_format) def from_qemu_img_disk_format(disk_format: str) -> str: """Return the conventional format derived from qemu-img format.""" return QEMU_IMG_FORMAT_MAP_INV.get(disk_format, disk_format) def qemu_img_info( path: str, run_as_root: bool = True, force_share: bool = False, allow_qcow2_backing_file: bool = False) -> imageutils.QemuImgInfo: """Return an object containing the parsed output from qemu-img info.""" format_name = cinder.privsep.format_inspector.get_format_if_safe( path=path, allow_qcow2_backing_file=allow_qcow2_backing_file) if format_name is None: LOG.warning('Image/Volume %s failed safety check', path) # NOTE(danms): This is the same exception as would be raised # by qemu_img_info() if the disk format was unreadable or # otherwise unsuitable. raise exception.Invalid( reason=_('Image/Volume failed safety check')) cmd = ['env', 'LC_ALL=C', 'qemu-img', 'info', '-f', format_name, '--output=json'] if force_share: cmd.append('--force-share') cmd.append(path) if os.name == 'nt': cmd = cmd[2:] out, _err = utils.execute(*cmd, run_as_root=run_as_root, prlimit=QEMU_IMG_LIMITS) info = imageutils.QemuImgInfo(out, format='json') # FIXME: figure out a more elegant way to do this if info.file_format == 'raw': # The format_inspector will detect a luks image as 'raw', and then when # we call qemu-img info -f raw above, we don't get any of the luks # format-specific info (some of which is used in the create_volume # flow). So we need to check if this is really a luks container. # (We didn't have to do this in the past because we called # qemu-img info without -f.) cmd = ['env', 'LC_ALL=C', 'qemu-img', 'info', '-f', 'luks', '--output=json'] if force_share: cmd.append('--force-share') cmd.append(path) if os.name == 'nt': cmd = cmd[2:] try: out, _err = utils.execute(*cmd, run_as_root=run_as_root, prlimit=QEMU_IMG_LIMITS) info = imageutils.QemuImgInfo(out, format='json') except processutils.ProcessExecutionError: # we'll just use the info object we already got earlier pass # From Cinder's point of view, any 'luks' formatted images # should be treated as 'raw'. (This changes the file_format, but # not any of the format-specific information.) if info.file_format == 'luks': info.file_format = 'raw' return info def get_qemu_img_version() -> Optional[list[int]]: """The qemu-img version will be cached until the process is restarted.""" global QEMU_IMG_VERSION if QEMU_IMG_VERSION is not None: return QEMU_IMG_VERSION info = utils.execute('qemu-img', '--version', check_exit_code=False)[0] pattern = r"qemu-img version ([0-9\.]*)" version = re.match(pattern, info) if not version: LOG.warning("qemu-img is not installed.") return None QEMU_IMG_VERSION = _get_version_from_string(version.groups()[0]) return QEMU_IMG_VERSION def _get_qemu_convert_luks_cmd(src: str, dest: str, out_format: str, src_format: Optional[str] = None, out_subformat: Optional[str] = None, cache_mode: Optional[str] = None, prefix: Optional[tuple] = None, cipher_spec: Optional[dict] = None, passphrase_file: Optional[str] = None, src_passphrase_file: Optional[str] = None, disable_sparse: bool = False) -> list[str]: cmd = ['qemu-img', 'convert'] if prefix: cmd = list(prefix) + cmd if cache_mode: cmd += ('-t', cache_mode) if disable_sparse: cmd += ('-S', '0') obj1 = ['--object', 'secret,id=sec1,format=raw,file=%s' % src_passphrase_file] obj2 = ['--object', 'secret,id=sec2,format=raw,file=%s' % passphrase_file] src_opts = 'encrypt.format=luks,encrypt.key-secret=sec1,' \ 'file.filename=%s' % src image_opts = ['--image-opts', src_opts] output_opts = ['-O', 'luks', '-o', 'key-secret=sec2', dest] command = cmd + obj1 + obj2 + image_opts + output_opts return command def _get_qemu_convert_cmd(src: str, dest: str, out_format: str, src_format: Optional[str] = None, out_subformat: Optional[str] = None, cache_mode: Optional[str] = None, prefix: Optional[tuple] = None, cipher_spec: Optional[dict] = None, passphrase_file: Optional[str] = None, compress: bool = False, src_passphrase_file: Optional[str] = None, disable_sparse: bool = False) -> list[str]: if src_passphrase_file is not None: if passphrase_file is None: message = _("Can't create unencrypted volume %(format)s " "from an encrypted source volume." ) % {'format': out_format} LOG.error(message) # TODO(enriquetaso): handle encrypted->unencrypted raise exception.NotSupportedOperation(operation=message) return _get_qemu_convert_luks_cmd( src, dest, out_format, src_format=src_format, out_subformat=out_subformat, cache_mode=cache_mode, prefix=None, cipher_spec=cipher_spec, passphrase_file=passphrase_file, src_passphrase_file=src_passphrase_file, disable_sparse=disable_sparse) if out_format == 'vhd': # qemu-img still uses the legacy vpc name out_format = 'vpc' cmd = ['qemu-img', 'convert', '-O', out_format] if prefix: cmd = list(prefix) + cmd if cache_mode: cmd += ('-t', cache_mode) if disable_sparse: cmd += ('-S', '0') if CONF.image_compress_on_upload and compress: if out_format in COMPRESSIBLE_IMAGE_FORMATS: cmd += ('-c',) if out_subformat: cmd += ('-o', 'subformat=%s' % out_subformat) # AMI images can be raw or qcow2 but qemu-img doesn't accept "ami" as # an image format, so we use automatic detection. # TODO(geguileo): This fixes unencrypted AMI image case, but we need to # fix the encrypted case. if (src_format or '').lower() not in ('', 'ami'): assert src_format is not None cmd += ['-f', src_format] # prevent detection of format # NOTE(lyarwood): When converting to LUKS add the cipher spec if present # and create a secret for the passphrase, written to a temp file if out_format == 'luks': if cipher_spec: cmd += ('-o', 'cipher-alg=%s,cipher-mode=%s,ivgen-alg=%s' % (cipher_spec['cipher_alg'], cipher_spec['cipher_mode'], cipher_spec['ivgen_alg'])) cmd += ('--object', 'secret,id=luks_sec,format=raw,file=%s' % passphrase_file, '-o', 'key-secret=luks_sec') cmd += [src, dest] return cmd def _get_version_from_string(version_string: str) -> list[int]: return [int(x) for x in version_string.split('.')] def check_qemu_img_version(minimum_version: str) -> None: qemu_version = get_qemu_img_version() if (qemu_version is None or qemu_version < _get_version_from_string(minimum_version)): current_version: Optional[str] if qemu_version: current_version = '.'.join((str(element) for element in qemu_version)) else: current_version = None _msg = _('qemu-img %(minimum_version)s or later is required by ' 'this volume driver. Current qemu-img version: ' '%(current_version)s') % {'minimum_version': minimum_version, 'current_version': current_version} raise exception.VolumeBackendAPIException(data=_msg) def _convert_image( prefix: tuple, source: str, dest: str, out_format: str, out_subformat: Optional[str] = None, src_format: Optional[str] = None, run_as_root: bool = True, cipher_spec: Optional[dict] = None, passphrase_file: Optional[str] = None, compress: bool = False, src_passphrase_file: Optional[str] = None, disable_sparse: bool = False, src_img_info: Optional[imageutils.QemuImgInfo] = None) -> None: """Convert image to other format. NOTE: If the qemu-img convert command fails and this function raises an exception, a non-empty dest file may be left in the filesystem. It is the responsibility of the caller to decide what to do with this file. :param prefix: command prefix, i.e. cgexec for throttling :param source: source filename :param dest: destination filename :param out_format: output image format of qemu-img :param out_subformat: output image subformat :param src_format: source image format :param run_as_root: run qemu-img as root :param cipher_spec: encryption details :param passphrase_file: filename containing luks passphrase :param compress: compress w/ qemu-img when possible (best effort) :param src_passphrase_file: filename containing source volume's luks passphrase :param src_img_info: a imageutils.QemuImgInfo object from this image, or None """ # Check whether O_DIRECT is supported and set '-t none' if it is # This is needed to ensure that all data hit the device before # it gets unmapped remotely from the host for some backends # Reference Bug: #1363016 # NOTE(jdg): In the case of file devices qemu does the # flush properly and more efficiently than would be done # setting O_DIRECT, so check for that and skip the # setting for non BLK devs cache_mode: Optional[str] if (utils.is_blk_device(dest) and volume_utils.check_for_odirect_support(source, dest, 'oflag=direct')): cache_mode = 'none' else: # use default cache_mode = None cmd = _get_qemu_convert_cmd(source, dest, out_format=out_format, src_format=src_format, out_subformat=out_subformat, cache_mode=cache_mode, prefix=prefix, cipher_spec=cipher_spec, passphrase_file=passphrase_file, compress=compress, src_passphrase_file=src_passphrase_file, disable_sparse=disable_sparse) start_time = timeutils.utcnow() # If there is not enough space on the conversion partition, include # the partitions's name in the error message. try: utils.execute(*cmd, run_as_root=run_as_root) except processutils.ProcessExecutionError as ex: if "No space left" in ex.stderr and CONF.image_conversion_dir in dest: conversion_dir = CONF.image_conversion_dir while not os.path.ismount(conversion_dir): conversion_dir = os.path.dirname(conversion_dir) message = _("Insufficient free space on %(location)s for image " "conversion.") % {'location': conversion_dir} LOG.error(message) raise duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 image_info = src_img_info if not image_info: try: image_info = qemu_img_info(source, run_as_root=run_as_root) except Exception: # NOTE: at this point, the image conversion has already # happened, and all that's left is some performance logging. # So ignoring an exception from qemu_img_info here is not a # security risk. I'm afraid that if we are too strict here # we will cause a regression, given that the converted image # source could be cinder glance_store, Glance, or one of # cinder's own backend drivers. The nfs driver knows # to pass in a src_img_info object, but others may not. # # We are catching the most general Exception here for a # similar reason: the image conversion has already happened. # If the conversion raised a ProcessExecutionError, we would # never have reached this point. But a PEE now is meaningless, # so we ignore it. pass if not image_info or image_info.virtual_size is None: msg = ("The image was successfully converted, but image size " "is unavailable. src %(src)s, dest %(dest)s") LOG.info(msg, {"src": source, "dest": dest}) return image_size = image_info.virtual_size fsz_mb = image_size / units.Mi mbps = (fsz_mb / duration) msg = ("Image conversion details: src %(src)s, size %(sz).2f MB, " "duration %(duration).2f sec, destination %(dest)s") LOG.debug(msg, {"src": source, "sz": fsz_mb, "duration": duration, "dest": dest}) msg = "Converted %(sz).2f MB image at %(mbps).2f MB/s" LOG.info(msg, {"sz": fsz_mb, "mbps": mbps}) def convert_image(source: str, dest: str, out_format: str, out_subformat: Optional[str] = None, src_format: Optional[str] = None, run_as_root: bool = True, throttle=None, cipher_spec: Optional[dict] = None, passphrase_file: Optional[str] = None, compress: bool = False, src_passphrase_file: Optional[str] = None, image_id: Optional[str] = None, data: Optional[imageutils.QemuImgInfo] = None, disable_sparse: bool = False) -> None: """Convert image to other format. NOTE: If the qemu-img convert command fails and this function raises an exception, a non-empty dest file may be left in the filesystem. It is the responsibility of the caller to decide what to do with this file. :param source: source filename :param dest: destination filename :param out_format: output image format of qemu-img :param out_subformat: output image subformat :param src_format: source image format (use image_utils.fixup_disk_format() to translate from a Glance format to one recognizable by qemu_img) :param run_as_root: run qemu-img as root :param throttle: a cinder.throttling.Throttle object, or None :param cipher_spec: encryption details :param passphrase_file: filename containing luks passphrase :param compress: compress w/ qemu-img when possible (best effort) :param src_passphrase_file: filename containing source volume's luks passphrase :param image_id: the image ID if this is a Glance image, or None :param data: a imageutils.QemuImgInfo object from this image, or None :raises ImageUnacceptable: when the image fails some format checks :raises ProcessExecutionError: when something goes wrong during conversion """ check_image_format(source, src_format, image_id, data, run_as_root) if not throttle: throttle = throttling.Throttle.get_default() with throttle.subcommand(source, dest) as throttle_cmd: _convert_image(tuple(throttle_cmd['prefix']), source, dest, out_format, out_subformat=out_subformat, src_format=src_format, run_as_root=run_as_root, cipher_spec=cipher_spec, passphrase_file=passphrase_file, compress=compress, src_passphrase_file=src_passphrase_file, disable_sparse=disable_sparse, src_img_info=data) def resize_image(source: str, size: int, run_as_root: bool = False, file_format: Optional[str] = None) -> None: """Changes the virtual size of the image.""" cmd: tuple[str, ...] if file_format: cmd = ('qemu-img', 'resize', '-f', file_format, source, '%sG' % size) else: cmd = ('qemu-img', 'resize', source, '%sG' % size) utils.execute(*cmd, run_as_root=run_as_root) def _verify_image(img_file: io.RawIOBase, verifier) -> None: # This methods must be called from a native thread, as the file I/O may # not yield to other greenthread in some cases, and since the update and # verify operations are CPU bound there would not be any yielding either, # which could lead to thread starvation. while True: chunk = img_file.read(1024) if not chunk: break verifier.update(chunk) verifier.verify() def verify_glance_image_signature(context: context.RequestContext, image_service: glance.GlanceImageService, image_id: str, path: str) -> bool: verifier = None image_meta = image_service.show(context, image_id) image_properties = image_meta.get('properties', {}) img_signature = image_properties.get('img_signature') img_sig_hash_method = image_properties.get('img_signature_hash_method') img_sig_cert_uuid = image_properties.get('img_signature_certificate_uuid') img_sig_key_type = image_properties.get('img_signature_key_type') if all(m is None for m in [img_signature, img_sig_cert_uuid, img_sig_hash_method, img_sig_key_type]): # NOTE(tommylikehu): We won't verify the image signature # if none of the signature metadata presents. return False if any(m is None for m in [img_signature, img_sig_cert_uuid, img_sig_hash_method, img_sig_key_type]): LOG.error('Image signature metadata for image %s is ' 'incomplete.', image_id) raise exception.InvalidSignatureImage(image_id=image_id) try: verifier = signature_utils.get_verifier( context=context, img_signature_certificate_uuid=img_sig_cert_uuid, img_signature_hash_method=img_sig_hash_method, img_signature=img_signature, img_signature_key_type=img_sig_key_type, ) except cursive_exception.SignatureVerificationError: message = _('Failed to get verifier for image: %s') % image_id LOG.error(message) raise exception.ImageSignatureVerificationException( reason=message) if verifier: with fileutils.remove_path_on_error(path): with open(path, "rb") as tem_file: try: tpool.execute(_verify_image, tem_file, verifier) LOG.info('Image signature verification succeeded ' 'for image: %s', image_id) return True except cryptography.exceptions.InvalidSignature: message = _('Image signature verification ' 'failed for image: %s') % image_id LOG.error(message) raise exception.ImageSignatureVerificationException( reason=message) except Exception as ex: message = _('Failed to verify signature for ' 'image: %(image)s due to ' 'error: %(error)s ') % {'image': image_id, 'error': ex} LOG.error(message) raise exception.ImageSignatureVerificationException( reason=message) return False def fetch(context: context.RequestContext, image_service: glance.GlanceImageService, image_id: str, path: str, _user_id, _project_id) -> None: # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. start_time = timeutils.utcnow() with fileutils.remove_path_on_error(path): with open(path, "wb") as image_file: try: image_service.download(context, image_id, tpool.Proxy(image_file)) except IOError as e: if e.errno == errno.ENOSPC: params = {'path': os.path.dirname(path), 'image': image_id} reason = _("No space left in image_conversion_dir " "path (%(path)s) while fetching " "image %(image)s.") % params LOG.exception(reason) raise exception.ImageTooBig(image_id=image_id, reason=reason) reason = ("IOError: %(errno)s %(strerror)s" % {'errno': e.errno, 'strerror': e.strerror}) LOG.error(reason) raise exception.ImageDownloadFailed(image_href=image_id, reason=reason) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 fsz_mb = os.stat(image_file.name).st_size / units.Mi mbps = (fsz_mb / duration) msg = ("Image fetch details: dest %(dest)s, size %(sz).2f MB, " "duration %(duration).2f sec") LOG.debug(msg, {"dest": image_file.name, "sz": fsz_mb, "duration": duration}) msg = "Image download %(sz).2f MB at %(mbps).2f MB/s" LOG.info(msg, {"sz": fsz_mb, "mbps": mbps}) def get_qemu_data(image_id: str, has_meta: bool, disk_format_raw: bool, dest: str, run_as_root: bool, force_share: bool = False) -> imageutils.QemuImgInfo: # We may be on a system that doesn't have qemu-img installed. That # is ok if we are working with a RAW image. This logic checks to see # if qemu-img is installed. If not we make sure the image is RAW and # throw an exception if not. Otherwise we stop before needing # qemu-img. Systems with qemu-img will always progress through the # whole function. try: # Use the empty tmp file to make sure qemu_img_info works. data = qemu_img_info(dest, run_as_root=run_as_root, force_share=force_share) # There are a lot of cases that can cause a process execution # error, but until we do more work to separate out the various # cases we'll keep the general catch here except processutils.ProcessExecutionError: data = None if has_meta: if not disk_format_raw: raise exception.ImageUnacceptable( reason=_("qemu-img is not installed and image is not of " "type RAW. Only RAW images can be used if " "qemu-img is not installed."), image_id=image_id) else: raise exception.ImageUnacceptable( reason=_("qemu-img is not installed and the disk " "format is not specified. Only RAW images " "can be used if qemu-img is not installed."), image_id=image_id) return data def check_qcow2_image(image_id: str, data: imageutils.QemuImgInfo) -> None: """Check some rules about qcow2 images. Does not check for a backing_file, because cinder has some legitimate use cases for qcow2 backing files. Makes sure the image: - does not have a data_file :param image_id: the image id :param data: an imageutils.QemuImgInfo object :raises ImageUnacceptable: when the image fails the check """ try: data_file = data.format_specific['data'].get('data-file') except (KeyError, TypeError): LOG.debug('Unexpected response from qemu-img info when processing ' 'image %s: missing format-specific info for a qcow2 image', image_id) msg = _('Cannot determine format-specific information') raise exception.ImageUnacceptable(image_id=image_id, reason=msg) if data_file: LOG.warning("Refusing to process qcow2 file with data-file '%s'", data_file) msg = _('A qcow2 format image is not allowed to have a data file') raise exception.ImageUnacceptable(image_id=image_id, reason=msg) def check_vmdk_image(image_id: str, data: imageutils.QemuImgInfo) -> None: """Check some rules about VMDK images. Make sure the VMDK subformat (the "createType" in vmware docs) is one that we allow as determined by the 'vmdk_allowed_types' configuration option. The default set includes only types that do not reference files outside the VMDK file, which can otherwise be used in exploits to expose host information. :param image_id: the image id :param data: an imageutils.QemuImgInfo object :raises ImageUnacceptable: when the VMDK createType is not in the allowed list """ allowed_types = CONF.vmdk_allowed_types if not len(allowed_types): msg = _('Image is a VMDK, but no VMDK createType is allowed') raise exception.ImageUnacceptable(image_id=image_id, reason=msg) try: create_type = data.format_specific['data']['create-type'] except KeyError: msg = _('Unable to determine VMDK createType') raise exception.ImageUnacceptable(image_id=image_id, reason=msg) except TypeError: msg = _('Unable to determine VMDK createType as no format-specific ' 'information is available') raise exception.ImageUnacceptable(image_id=image_id, reason=msg) if create_type not in allowed_types: LOG.warning('Refusing to process VMDK file with createType of %r ' 'which is not in allowed set of: %s', create_type, ','.join(allowed_types)) msg = _('Invalid VMDK create-type specified') raise exception.ImageUnacceptable(image_id=image_id, reason=msg) def check_image_format(source: str, src_format: Optional[str] = None, image_id: Optional[str] = None, data: Optional[imageutils.QemuImgInfo] = None, run_as_root: bool = True) -> None: """Do some image format checks. Verifies that the src_format matches what qemu-img thinks the image format is, and does some vmdk subformat checks. See Bug #1996188. - Does not check for a qcow2 backing file. - Will make a call out to qemu_img if data is None. :param source: filename of the image to check :param src_format: source image format recognized by qemu_img, or None :param image_id: the image ID if this is a Glance image, or None :param data: a imageutils.QemuImgInfo object from this image, or None :param run_as_root: when 'data' is None, call 'qemu-img info' as root :raises ImageUnacceptable: when the image fails some format checks :raises ProcessExecutionError: if 'qemu-img info' fails """ if image_id is None: image_id = 'internal image' if data is None: data = qemu_img_info(source, run_as_root=run_as_root) if data.file_format is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) if src_format is not None: if src_format.lower() == 'ami': # qemu-img doesn't recognize AMI format; see change Icde4c0f936ce. # We also use lower() here (though nowhere else) to be consistent # with that change. pass elif data.file_format != src_format: LOG.debug("Rejecting image %(image_id)s due to format mismatch. " "src_format: '%(src)s', but qemu-img info reports: " "'%(qemu)s'", {'image_id': image_id, 'src': src_format, 'qemu': data.file_format}) msg = _("The image format was claimed to be '%(src)s' but the " "image data appears to be in a different format.") raise exception.ImageUnacceptable( image_id=image_id, reason=(msg % {'src': src_format})) if data.file_format == 'vmdk': check_vmdk_image(image_id, data) if data.file_format == 'qcow2': check_qcow2_image(image_id, data) def fetch_verify_image(context: context.RequestContext, image_service: glance.GlanceImageService, image_id: str, dest: str) -> None: fetch(context, image_service, image_id, dest, None, None) image_meta = image_service.show(context, image_id) with fileutils.remove_path_on_error(dest): has_meta = False if not image_meta else True try: format_raw = True if image_meta['disk_format'] == 'raw' else False except TypeError: format_raw = False data = get_qemu_data(image_id, has_meta, format_raw, dest, True) # We can only really do verification of the image if we have # qemu data to use. # NOTE: We won't have data if qemu_img is not installed *and* the # disk_format recorded in Glance is raw (otherwise an ImageUnacceptable # would have been raised already). So this isn't as bad as it looks. if data is not None: fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file})) # a VMDK can have a backing file, but we have to check for # it differently if fmt == 'vmdk': check_vmdk_image(image_id, data) # Bug #2059809: a qcow2 can have a data file that's similar # to a backing file and is also unacceptable if fmt == 'qcow2': check_qcow2_image(image_id, data) def fetch_to_vhd(context: context.RequestContext, image_service: glance.GlanceImageService, image_id: str, dest: str, blocksize: int, volume_subformat: Optional[str] = None, user_id: Optional[str] = None, project_id: Optional[str] = None, run_as_root: bool = True, disable_sparse: bool = False) -> None: fetch_to_volume_format(context, image_service, image_id, dest, 'vpc', blocksize, volume_subformat=volume_subformat, user_id=user_id, project_id=project_id, run_as_root=run_as_root, disable_sparse=disable_sparse) def fetch_to_raw(context: context.RequestContext, image_service: glance.GlanceImageService, image_id: str, dest: str, blocksize: int, user_id: Optional[str] = None, project_id: Optional[str] = None, size: Optional[int] = None, run_as_root: bool = True, disable_sparse: bool = False) -> None: fetch_to_volume_format(context, image_service, image_id, dest, 'raw', blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root, disable_sparse=disable_sparse) def check_image_conversion_disable(disk_format, volume_format, image_id, upload=False): if CONF.image_conversion_disable and disk_format != volume_format: if upload: msg = _("Image conversion is disabled. The image disk_format " "you have requested is '%(disk_format)s', but your " "volume can only be uploaded in the format " "'%(volume_format)s'.") else: msg = _("Image conversion is disabled. The volume type you have " "requested requires that the image it is being created " "from be in '%(volume_format)s' format, but the image " "you are using has the disk_format property " "'%(disk_format)s'. You must use an image with the " "disk_format property '%(volume_format)s' to create a " "volume of this type.") raise exception.ImageConversionNotAllowed( reason=msg % {'disk_format': disk_format, 'volume_format': volume_format}, image_id=image_id) def fetch_to_volume_format(context: context.RequestContext, image_service: glance.GlanceImageService, image_id: str, dest: str, volume_format: str, blocksize: int, volume_subformat: Optional[str] = None, user_id: Optional[str] = None, project_id: Optional[str] = None, size: Optional[int] = None, run_as_root: bool = True, disable_sparse: bool = False) -> None: qemu_img = True image_meta = image_service.show(context, image_id) check_image_conversion_disable( image_meta['disk_format'], volume_format, image_id, upload=False) allow_image_compression = CONF.allow_compression_on_image_upload if image_meta and (image_meta.get('container_format') == 'compressed'): if allow_image_compression is False: compression_param = {'container_format': image_meta.get('container_format')} raise exception.ImageUnacceptable( image_id=image_id, reason=_("Image compression disallowed, " "but container_format is " "%(container_format)s.") % compression_param) # NOTE(avishay): I'm not crazy about creating temp files which may be # large and cause disk full errors which would confuse users. # Unfortunately it seems that you can't pipe to 'qemu-img convert' because # it seeks. Maybe we can think of something for a future version. with temporary_file(prefix='image_download_%s_' % image_id) as tmp: has_meta = False if not image_meta else True try: format_raw = True if image_meta['disk_format'] == 'raw' else False except TypeError: format_raw = False # Probe using the empty tmp file to see if qemu-img is available. # If it's not, and the disk_format recorded in Glance is not 'raw', # this will raise ImageUnacceptable data = get_qemu_data(image_id, has_meta, format_raw, tmp, run_as_root) if data is None: qemu_img = False tmp_images = TemporaryImages.for_image_service(image_service) tmp_image = tmp_images.get(context, image_id) if tmp_image: tmp = tmp_image else: fetch(context, image_service, image_id, tmp, user_id, project_id) # NOTE(ZhengMa): This is used to do image decompression on image # downloading with 'compressed' container_format. It is a # transparent level between original image downloaded from # Glance and Cinder image service. So the source file path is # the same with destination file path. if image_meta.get('container_format') == 'compressed': LOG.debug("Found image with compressed container format") if not accelerator.is_gzip_compressed(tmp): raise exception.ImageUnacceptable( image_id=image_id, reason=_("Unsupported compressed image format found. " "Only gzip is supported currently")) accel = accelerator.ImageAccel(tmp, tmp) accel.decompress_img(run_as_root=run_as_root) if is_xenserver_format(image_meta): replace_xenserver_image_with_coalesced_vhd(tmp) if not qemu_img: # qemu-img is not installed but we do have a RAW image. As a # result we only need to copy the image to the destination and then # return. LOG.debug('Copying image from %(tmp)s to volume %(dest)s - ' 'size: %(size)s', {'tmp': tmp, 'dest': dest, 'size': image_meta['size']}) image_size_m = math.ceil(float(image_meta['size']) / units.Mi) volume_utils.copy_volume(tmp, dest, image_size_m, blocksize) return data = qemu_img_info(tmp, run_as_root=run_as_root) # NOTE(xqueralt): If the image virtual size doesn't fit in the # requested volume there is no point on resizing it because it will # generate an unusable image. if size is not None: check_virtual_size(data.virtual_size, size, image_id) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file, }) # NOTE(jdg): I'm using qemu-img convert to write # to the volume regardless if it *needs* conversion or not # TODO(avishay): We can speed this up by checking if the image is raw # and if so, writing directly to the device. However, we need to keep # check via 'qemu-img info' that what we copied was in fact a raw # image and not a different format with a backing file, which may be # malicious. # FIXME: revisit the above 2 comments. We already have an exception # above for RAW format images when qemu-img is not available, and I'm # pretty sure that the backing file exploit only happens when # converting from some format that supports a backing file TO raw ... # a bit-for-bit copy of a qcow2 with backing file will copy the backing # file *reference* but not its *content*. disk_format = fixup_disk_format(image_meta['disk_format']) LOG.debug("%s was %s, converting to %s", image_id, fmt, volume_format) convert_image(tmp, dest, volume_format, out_subformat=volume_subformat, src_format=disk_format, run_as_root=run_as_root, image_id=image_id, data=data, disable_sparse=disable_sparse) @contextlib.contextmanager def chown_if_needed(volume_path: str) -> Generator[None, None, None]: if os.name == 'nt' or os.access(volume_path, os.R_OK): yield else: with utils.temporary_chown(volume_path): yield def upload_volume(context: context.RequestContext, image_service: glance.GlanceImageService, image_meta: dict, volume_path: str, volume_fd = None, volume_format: str = 'raw', run_as_root: bool = True, compress: bool = True, store_id: Optional[str] = None, base_image_ref: Optional[str] = None) -> None: # NOTE: You probably want to use volume_utils.upload_volume(), # not this function. image_id = image_meta['id'] check_image_conversion_disable( image_meta['disk_format'], volume_format, image_id, upload=True) if image_meta.get('container_format') != 'compressed': if (image_meta['disk_format'] == volume_format): LOG.debug("%s was %s, no need to convert to %s", image_id, volume_format, image_meta['disk_format']) if volume_fd is not None: image_service.update(context, image_id, {}, tpool.Proxy(volume_fd), store_id=store_id, base_image_ref=base_image_ref) else: with chown_if_needed(volume_path): with open(volume_path, 'rb') as image_file: image_service.update(context, image_id, {}, tpool.Proxy(image_file), store_id=store_id, base_image_ref=base_image_ref) return with temporary_file(prefix='vol_upload_') as tmp: LOG.debug("%s was %s, converting to %s", image_id, volume_format, image_meta['disk_format']) data = qemu_img_info(volume_path, run_as_root=run_as_root) backing_file = data.backing_file fmt = data.file_format if backing_file is not None: # Disallow backing files as a security measure. # This prevents a user from writing an image header into a raw # volume with a backing file pointing to data they wish to # access. raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file}) out_format = fixup_disk_format(image_meta['disk_format']) convert_image(volume_path, tmp, out_format, run_as_root=run_as_root, compress=compress, image_id=image_id, data=data) data = qemu_img_info(tmp, run_as_root=run_as_root) if data.file_format != out_format: raise exception.ImageUnacceptable( image_id=image_id, reason=_("Converted to %(f1)s, but format is now %(f2)s") % {'f1': out_format, 'f2': data.file_format}) # NOTE(ZhengMa): This is used to do image compression on image # uploading with 'compressed' container_format. # Compress file 'tmp' in-place if image_meta.get('container_format') == 'compressed': LOG.debug("Container_format set to 'compressed', compressing " "image before uploading.") accel = accelerator.ImageAccel(tmp, tmp) accel.compress_img(run_as_root=run_as_root) with open(tmp, 'rb') as image_file: image_service.update(context, image_id, {}, tpool.Proxy(image_file), store_id=store_id, base_image_ref=base_image_ref) def check_virtual_size(virtual_size: float, volume_size: int, image_id: str) -> int: virtual_size = int(math.ceil(float(virtual_size) / units.Gi)) if virtual_size > volume_size: params = {'image_size': virtual_size, 'volume_size': volume_size} reason = _("Image virtual size is %(image_size)dGB" " and doesn't fit in a volume of size" " %(volume_size)dGB.") % params raise exception.ImageUnacceptable(image_id=image_id, reason=reason) return virtual_size def check_available_space(dest: str, image_size: int, image_id: str) -> None: if not os.path.isdir(dest): dest = os.path.dirname(dest) free_space = shutil.disk_usage(dest).free if free_space <= image_size: msg = ('There is no space on %(dest_dir)s to convert image. ' 'Requested: %(image_size)s, available: %(free_space)s.' ) % {'dest_dir': dest, 'image_size': image_size, 'free_space': free_space} raise exception.ImageTooBig(image_id=image_id, reason=msg) def is_xenserver_format(image_meta: dict) -> bool: return ( image_meta['disk_format'] == 'vhd' and image_meta['container_format'] == 'ovf' ) def set_vhd_parent(vhd_path: str, parentpath: str) -> None: utils.execute('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath) def extract_targz(archive_name: str, target: str) -> None: utils.execute('tar', '-xzf', archive_name, '-C', target) def fix_vhd_chain(vhd_chain: list[str]) -> None: for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]): set_vhd_parent(child, parent) def get_vhd_size(vhd_path: str) -> int: out, _err = utils.execute('vhd-util', 'query', '-n', vhd_path, '-v') return int(out) def resize_vhd(vhd_path: str, size: int, journal: str) -> None: utils.execute( 'vhd-util', 'resize', '-n', vhd_path, '-s', '%d' % size, '-j', journal) def coalesce_vhd(vhd_path: str) -> None: utils.execute( 'vhd-util', 'coalesce', '-n', vhd_path) def create_temporary_file(*args: str, **kwargs: str) -> str: fileutils.ensure_tree(CONF.image_conversion_dir) fd, tmp = tempfile.mkstemp(dir=CONF.image_conversion_dir, *args, **kwargs) # type: ignore os.close(fd) return tmp def cleanup_temporary_file(backend_name: str) -> None: temp_dir = CONF.image_conversion_dir if (not temp_dir or not os.path.exists(temp_dir)): LOG.debug("Configuration image_conversion_dir is None or the path " "doesn't exist.") return try: # TODO(wanghao): Consider using os.scandir for better performance in # future when cinder only supports Python version 3.5+. files = os.listdir(CONF.image_conversion_dir) # NOTE(wanghao): For multi-backend case, if one backend was slow # starting but another backend is up and doing an image conversion, # init_host should only clean the tmp files which belongs to its # backend. for tmp_file in files: if tmp_file.endswith(backend_name): path = os.path.join(temp_dir, tmp_file) os.remove(path) except OSError as e: LOG.warning("Exception caught while clearing temporary image " "files: %s", e) @contextlib.contextmanager def temporary_file(*args: str, **kwargs) -> Generator[str, None, None]: tmp = None try: tmp = create_temporary_file(*args, **kwargs) yield tmp finally: if tmp: fileutils.delete_if_exists(tmp) def temporary_dir() -> ContextManager[str]: fileutils.ensure_tree(CONF.image_conversion_dir) return utils.tempdir(dir=CONF.image_conversion_dir) def coalesce_chain(vhd_chain: list[str]) -> str: for child, parent in zip(vhd_chain[:-1], vhd_chain[1:]): with temporary_dir() as directory_for_journal: size = get_vhd_size(child) journal_file = os.path.join( directory_for_journal, 'vhd-util-resize-journal') resize_vhd(parent, size, journal_file) coalesce_vhd(child) return vhd_chain[-1] def discover_vhd_chain(directory: str) -> list[str]: counter = 0 chain = [] while True: fpath = os.path.join(directory, '%d.vhd' % counter) if os.path.exists(fpath): chain.append(fpath) else: break counter += 1 return chain def replace_xenserver_image_with_coalesced_vhd(image_file: str) -> None: with temporary_dir() as tempdir: extract_targz(image_file, tempdir) chain = discover_vhd_chain(tempdir) fix_vhd_chain(chain) coalesced = coalesce_chain(chain) fileutils.delete_if_exists(image_file) os.rename(coalesced, image_file) def decode_cipher(cipher_spec: str, key_size: int) -> dict[str, str]: """Decode a dm-crypt style cipher specification string The assumed format being cipher-chainmode-ivmode, similar to that documented under linux/Documentation/admin-guide/device-mapper/dm-crypt.txt in the kernel source tree. Cinder does not support the [:keycount] or [:ivopts] options. """ try: cipher_alg, cipher_mode, ivgen_alg = cipher_spec.split('-') except ValueError: raise exception.InvalidVolumeType( reason="Invalid cipher field in encryption type") cipher_alg = cipher_alg + '-' + str(key_size) return {'cipher_alg': cipher_alg, 'cipher_mode': cipher_mode, 'ivgen_alg': ivgen_alg} class TemporaryImages(object): """Manage temporarily downloaded images to avoid downloading it twice. In the 'with TemporaryImages.fetch(image_service, ctx, image_id) as tmp' clause, 'tmp' can be used as the downloaded image path. In addition, image_utils.fetch() will use the pre-fetched image by the TemporaryImages. This is useful to inspect image contents before conversion. """ def __init__(self, image_service: glance.GlanceImageService): self.temporary_images: dict[str, dict] = {} self.image_service = image_service image_service.temp_images = self @staticmethod def for_image_service( image_service: glance.GlanceImageService) -> 'TemporaryImages': instance = image_service.temp_images if instance: return instance return TemporaryImages(image_service) @classmethod @contextlib.contextmanager def fetch(cls, image_service: glance.GlanceImageService, context: context.RequestContext, image_id: str, suffix: Optional[str] = '') -> Generator[str, None, None]: tmp_images = cls.for_image_service(image_service).temporary_images with temporary_file(prefix='image_fetch_%s_' % image_id, suffix=suffix) as tmp: fetch_verify_image(context, image_service, image_id, tmp) user = context.user_id if not tmp_images.get(user): tmp_images[user] = {} tmp_images[user][image_id] = tmp LOG.debug("Temporary image %(id)s is fetched for user %(user)s.", {'id': image_id, 'user': user}) yield tmp del tmp_images[user][image_id] LOG.debug("Temporary image %(id)s for user %(user)s is deleted.", {'id': image_id, 'user': user}) def get(self, context: context.RequestContext, image_id: str): user = context.user_id if not self.temporary_images.get(user): return None return self.temporary_images[user].get(image_id) def _filter_out_metadata(metadata, filter_keys): new_metadata = {} for k, v in metadata.items(): if any(k.startswith(filter_key) for filter_key in filter_keys): continue new_metadata[k] = v return new_metadata def filter_out_reserved_namespaces_metadata( metadata: Optional[dict[str, str]]) -> dict[str, str]: reserved_name_spaces = GLANCE_RESERVED_NAMESPACES.copy() if CONF.reserved_image_namespaces: for image_namespace in CONF.reserved_image_namespaces: if image_namespace not in reserved_name_spaces: reserved_name_spaces.append(image_namespace) if not metadata: LOG.debug("No metadata to be filtered.") return {} new_metadata = _filter_out_metadata(metadata, reserved_name_spaces) # NOTE(ganso): handle adjustment of metadata structure performed by # the cinder.volume.api.API._merge_volume_image_meta() method if 'properties' in new_metadata: new_metadata['properties'] = _filter_out_metadata( metadata['properties'], reserved_name_spaces) LOG.debug("The metadata set [%s] was filtered using the reserved name " "spaces [%s], and the result is [%s].", metadata, reserved_name_spaces, new_metadata) return new_metadata ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0791183 cinder-27.0.0/cinder/interface/0000775000175000017500000000000000000000000016273 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/__init__.py0000664000175000017500000000211700000000000020405 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # _volume_register = [] _backup_register = [] _fczm_register = [] def volumedriver(cls): """Decorator for concrete volume driver implementations.""" _volume_register.append(cls) return cls def backupdriver(cls): """Decorator for concrete backup driver implementations.""" _backup_register.append(cls) return cls def fczmdriver(cls): """Decorator for concrete fibre channel zone manager drivers.""" _fczm_register.append(cls) return cls ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/backup_chunked_driver.py0000664000175000017500000000562700000000000023200 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Backup driver with 'chunked' backup operations. """ from cinder.interface import backup_driver class BackupChunkedDriver(backup_driver.BackupDriver): """Backup driver that supports 'chunked' backups.""" def put_container(self, container): """Create the container if needed. No failure if it pre-exists. :param container: The container to write into. """ def get_container_entries(self, container, prefix): """Get container entry names. :param container: The container from which to get entries. :param prefix: The prefix used to match entries. """ def get_object_writer(self, container, object_name, extra_metadata=None): """Returns a writer which stores the chunk data in backup repository. :param container: The container to write to. :param object_name: The object name to write. :param extra_metadata: Extra metadata to be included. :returns: A context handler that can be used in a "with" context. """ def get_object_reader(self, container, object_name, extra_metadata=None): """Returns a reader object for the backed up chunk. :param container: The container to read from. :param object_name: The object name to read. :param extra_metadata: Extra metadata to be included. """ def delete_object(self, container, object_name): """Delete object from container. :param container: The container to modify. :param object_name: The object name to delete. """ def update_container_name(self, backup, container): """Allows sub-classes to override container name. This method exists so that sub-classes can override the container name as it comes in to the driver in the backup object. Implementations should return None if no change to the container name is desired. """ def get_extra_metadata(self, backup, volume): """Return extra metadata to use in prepare_backup. This method allows for collection of extra metadata in prepare_backup() which will be passed to get_object_reader() and get_object_writer(). Subclass extensions can use this extra information to optimize data transfers. :returns: json serializable object """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/backup_driver.py0000664000175000017500000001313100000000000021464 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Core backup driver interface. All backup drivers should support this interface as a bare minimum. """ from cinder.interface import base class BackupDriver(base.CinderInterface): """Backup driver required interface.""" def get_metadata(self, volume_id): """Get volume metadata. Returns a json-encoded dict containing all metadata and the restore version i.e. the version used to decide what actually gets restored from this container when doing a backup restore. Typically best to use py:class:`BackupMetadataAPI` for this. :param volume_id: The ID of the volume. :returns: json-encoded dict of metadata. """ def put_metadata(self, volume_id, json_metadata): """Set volume metadata. Typically best to use py:class:`BackupMetadataAPI` for this. :param volume_id: The ID of the volume. :param json_metadata: The json-encoded dict of metadata. """ def backup(self, backup, volume_file, backup_metadata=False): """Start a backup of a specified volume. If backup['parent_id'] is given, then an incremental backup should be performed. If the parent backup is of different size, a full backup should be performed to ensure all data is included. :param backup: The backup information. :param volume_file: The volume or file to write the backup to. :param backup_metadata: Whether to include volume metadata in the backup. The variable structure of backup in the following format:: { 'id': id, 'availability_zone': availability_zone, 'service': driver_name, 'user_id': context.user_id, 'project_id': context.project_id, 'display_name': name, 'display_description': description, 'volume_id': volume_id, 'status': fields.BackupStatus.CREATING, 'container': container, 'parent_id': parent_id, 'size': size, 'host': host, 'snapshot_id': snapshot_id, 'data_timestamp': data_timestamp, } service: backup driver parent_id: parent backup id size: equal to volume size data_timestamp: backup creation time """ def restore(self, backup, volume_id, volume_file): """Restore volume from a backup. :param backup: The backup information. :param volume_id: The volume to be restored. :param volume_file: The volume or file to read the data from. """ def delete_backup(self, backup): """Delete a backup from the backup store. :param backup: The backup to be deleted. """ def export_record(self, backup): """Export driver specific backup record information. If backup backend needs additional driver specific information to import backup record back into the system it must override this method and return it as a dictionary so it can be serialized into a string. Default backup driver implementation has no extra information. :param backup: backup object to export :returns: driver_info - dictionary with extra information """ def import_record(self, backup, driver_info): """Import driver specific backup record information. If backup backend needs additional driver specific information to import backup record back into the system it must override this method since it will be called with the extra information that was provided by export_record when exporting the backup. Default backup driver implementation does nothing since it didn't export any specific data in export_record. :param backup: backup object to export :param driver_info: dictionary with driver specific backup record information :returns: None """ def check_for_setup_error(self): """Method for checking if backup backend is successfully installed. Depends on storage backend limitations and driver implementation this method could check if all needed config options are configurated well or try to connect to the storage to verify driver can do it without any issues. A dummy default is provided. This method can be omitted from driver. :returns: None :raises InvalidConfigurationValue: raise this if you detect a problem during a configuration check :raises BackupDriverException: raise this or one of its more specific subclasses if you detect setup problems other than invalid configuration :raises Exception: refrain from raising generic exceptions, although we catch them for the benefit of legacy code """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/base.py0000664000175000017500000000636700000000000017573 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import inspect def _get_arg_count(method): """Get the number of positional parameters for a method. :param method: The method to check. :returns: The number of positional parameters for the method. """ if not method: return 0 arg_spec = inspect.getfullargspec(method) return len(arg_spec[0]) def _get_method_info(cls): """Get all methods defined in a class. Note: This will only return public methods and their associated arg count. :param cls: The class to inspect. :returns: `Dict` of method names with a tuple of the method and their arg counts. """ result = {} methods = inspect.getmembers(cls, inspect.ismethod) for (name, method) in methods: if name.startswith('_'): # Skip non-public methods continue result[name] = (method, _get_arg_count(method)) return result class CinderInterface(object, metaclass=abc.ABCMeta): """Interface base class for Cinder. Cinder interfaces should inherit from this class to support indirect inheritance evaluation. This can be used to validate compliance to an interface without requiring that the class actually be inherited from the same base class. """ _method_cache = None @classmethod def _get_methods(cls): if not cls._method_cache: cls._method_cache = _get_method_info(cls) return cls._method_cache @classmethod def __subclasshook__(cls, other_cls): """Custom class inheritance evaluation. :param cls: The CinderInterface to check against. :param other_cls: The class to be checked if it implements our interface. """ interface_methods = cls._get_methods() driver_methods = _get_method_info(other_cls) interface_keys = interface_methods.keys() driver_keys = driver_methods.keys() matching_count = len(set(interface_keys) & set(driver_keys)) if matching_count != len(interface_keys): # Missing some methods, does not implement this interface or is # missing something. return NotImplemented # TODO(smcginnis) Add method signature checking. # We know all methods are there, now make sure they look right. # Unfortunately the methods can be obfuscated by certain decorators, # so we need to find a better way to pull out the real method # signatures. # driver_methods[method_name][0].func_closure.cell_contents works # for most cases but not all. # AST might work instead of using introspect. return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/fczm_driver.py0000664000175000017500000000475000000000000021165 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Core fibre channel zone manager driver interface. All fczm drivers should support this interface as a bare minimum. """ from cinder.interface import base class FibreChannelZoneManagerDriver(base.CinderInterface): """FCZM driver required interface.""" def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Add a new initiator<>target connection. All implementing drivers should provide concrete implementation for this API. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets .. code-block:: python Example initiator_target_map: { '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] } Note that WWPN can be in lower or upper case and can be ':' separated strings. """ def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Delete an initiator<>target connection. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets .. code-block:: python Example initiator_target_map: { '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] } Note that WWPN can be in lower or upper case and can be ':' separated strings. """ def get_san_context(self, target_wwn_list): """Get SAN context for end devices. :param target_wwn_list: Mapping of initiator to list of targets Example initiator_target_map: ['20240002ac000a50', '20240002ac000a40'] Note that WWPN can be in lower or upper case and can be ':' separated strings. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/util.py0000664000175000017500000000500500000000000017622 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import fnmatch import inspect import os from cinder import interface def _ensure_loaded(start_path): """Loads everything in a given path. This will make sure all classes have been loaded and therefore all decorators have registered class. :param start_path: The starting path to load. """ for root, folder, files in os.walk(start_path): for phile in fnmatch.filter(files, '*.py'): path = os.path.join(root, phile) try: __import__( path.replace('/', '.')[:-3], globals(), locals()) except Exception: # Really don't care here pass def get_volume_drivers(): """Get a list of all volume drivers.""" _ensure_loaded('cinder/volume/drivers') return [DriverInfo(x) for x in interface._volume_register] def get_backup_drivers(): """Get a list of all backup drivers.""" _ensure_loaded('cinder/backup/drivers') return [DriverInfo(x) for x in interface._backup_register] def get_fczm_drivers(): """Get a list of all fczm drivers.""" _ensure_loaded('cinder/zonemanager/drivers') return [DriverInfo(x) for x in interface._fczm_register] class DriverInfo(object): """Information about driver implementations.""" def __init__(self, cls): self.cls = cls self.desc = cls.__doc__ self.class_name = cls.__name__ self.class_fqn = '{}.{}'.format(inspect.getmodule(cls).__name__, self.class_name) self.version = getattr(cls, 'VERSION', None) self.ci_wiki_name = getattr(cls, 'CI_WIKI_NAME', None) self.supported = getattr(cls, 'SUPPORTED', True) self.driver_options = cls.get_driver_options() def __str__(self): return self.class_name def __repr__(self): return self.class_fqn def __hash__(self): return hash(self.class_fqn) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/volume_consistencygroup_driver.py0000664000175000017500000002623300000000000025233 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Consistency group volume driver interface. """ from cinder.interface import base class VolumeConsistencyGroupDriver(base.CinderInterface): """Interface for drivers that support consistency groups.""" def create_consistencygroup(self, context, group): """Creates a consistencygroup. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :returns: model_update model_update will be in this format: {'status': xxx, ......}. If the status in model_update is 'error', the manager will throw an exception and it will be caught in the try-except block in the manager. If the driver throws an exception, the manager will also catch it in the try-except block. The group status in the db will be changed to 'error'. For a successful operation, the driver can either build the model_update and return it or return None. The group status will be set to 'available'. """ def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistencygroup from source. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :param volumes: a list of volume dictionaries in the group. :param cgsnapshot: the dictionary of the cgsnapshot as source. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :param source_cg: the dictionary of a consistency group as source. :param source_vols: a list of volume dictionaries in the source_cg. :returns: model_update, volumes_model_update The source can be cgsnapshot or a source cg. param volumes is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Volume to be precise. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. To be consistent with other volume operations, the manager will assume the operation is successful if no exception is thrown by the driver. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. """ def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be deleted. :param volumes: a list of volume dictionaries in the group. :returns: model_update, volumes_model_update param volumes is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Volume to be precise. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate volumes_model_update and model_update and return them. The manager will check volumes_model_update and update db accordingly for each volume. If the driver successfully deleted some volumes but failed to delete others, it should set statuses of the volumes accordingly so that the manager can update db correctly. If the status in any entry of volumes_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of the group will be set to 'error' in the db. If volumes_model_update is not returned by the driver, the manager will set the status of every volume in the group to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager. The statuses of the group and all volumes in it will be set to 'error'. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. The statuses of the group and all volumes will be set to 'deleted' after the manager deletes them from db. """ def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be updated. :param add_volumes: a list of volume dictionaries to be added. :param remove_volumes: a list of volume dictionaries to be removed. :returns: model_update, add_volumes_update, remove_volumes_update model_update is a dictionary that the driver wants the manager to update upon a successful return. If None is returned, the manager will set the status to 'available'. add_volumes_update and remove_volumes_update are lists of dictionaries that the driver wants the manager to update upon a successful return. Note that each entry requires a {'id': xxx} so that the correct volume entry can be updated. If None is returned, the volume will remain its original status. Also note that you cannot directly assign add_volumes to add_volumes_update as add_volumes is a list of cinder.db.sqlalchemy.models.Volume objects and cannot be used for db update directly. Same with remove_volumes. If the driver throws an exception, the status of the group as well as those of the volumes to be added/removed will be set to 'error'. """ def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot. :param context: the context of the caller. :param cgsnapshot: the dictionary of the cgsnapshot to be created. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :returns: model_update, snapshots_model_update param snapshots is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error', the status in model_update will be set to the same if it is not already 'error'. If the status in model_update is 'error', the manager will raise an exception and the status of cgsnapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of cgsnapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of cgsnapshot and all snapshots will be set to 'available' at the end of the manager function. """ def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot. :param context: the context of the caller. :param cgsnapshot: the dictionary of the cgsnapshot to be deleted. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :returns: model_update, snapshots_model_update param snapshots is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of cgsnapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of cgsnapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of cgsnapshot and all snapshots will be set to 'deleted' after the manager deletes them from db. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/volume_driver.py0000664000175000017500000005435700000000000021545 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Core backend volume driver interface. All backend drivers should support this interface as a bare minimum, but some methods (marked as optional in their description) can rely on the default implementation. """ from cinder.interface import base class VolumeDriverCore(base.CinderInterface): """Core backend driver required interface.""" def do_setup(self, context): """Any initialization the volume driver needs to do while starting. Called once by the manager after the driver is loaded. Can be used to set up clients, check licenses, set up protocol specific helpers, etc. If you choose to raise an exception here, the setup is considered failed already and the check_for_setup_error() will not be called. :param context: The admin context of type context.RequestContext. :raises InvalidConfigurationValue: raise this if you detect a problem during a configuration check :raises VolumeDriverException: raise this or one of its more specific subclasses if you detect setup problems other than invalid configuration """ def check_for_setup_error(self): """Validate there are no issues with the driver configuration. Called after do_setup(). Driver initialization can occur there or in this call, but must be complete by the time this returns. If this method raises an exception, the driver will be left in an "uninitialized" state by the volume manager, which means that it will not be sent requests for volume operations. This method typically checks things like whether the configured credentials can be used to log in the storage backend, and whether any external dependencies are present and working. :raises VolumeBackendAPIException: in case of setup error. :raises InvalidConfigurationValue: raise this if you detect a problem during a configuration check """ def get_volume_stats(self, refresh=False): """Collects volume backend stats. The get_volume_stats method is used by the volume manager to collect information from the driver instance related to information about the driver, available and used space, and driver/backend capabilities. stats are stored in 'self._stats' field, which could be updated in '_update_volume_stats' method. It returns a dict with the following required fields: * volume_backend_name This is an identifier for the backend taken from cinder.conf. Useful when using multi-backend. * vendor_name Vendor/author of the driver who serves as the contact for the driver's development and support. * driver_version The driver version is logged at cinder-volume startup and is useful for tying volume service logs to a specific release of the code. There are currently no rules for how or when this is updated, but it tends to follow typical major.minor.revision ideas. * storage_protocol The protocol used to connect to the storage, this should be a short string such as: "iSCSI", "FC", "NFS", "ceph", etc. Available protocols are present in cinder.common.constants and they must be used instead of string literals. Variant values only exist for older drivers that were already reporting those values. New drivers must use non variant versions. In some cases this may be the same value as the driver_volume_type returned by the initialize_connection method, but they are not the same thing, since this one is meant to be used by the scheduler, while the latter is the os-brick connector identifier used in the factory method. * total_capacity_gb The total capacity in gigabytes (GiB) of the storage backend being used to store Cinder volumes. Use keyword 'unknown' if the backend cannot report the value or 'infinite' if there is no upper limit. But, it is recommended to report real values as the Cinder scheduler assigns lowest weight to any storage backend reporting 'unknown' or 'infinite'. * free_capacity_gb The free capacity in gigabytes (GiB). Use keyword 'unknown' if the backend cannot report the value or 'infinite' if there is no upper limit. But, it is recommended to report real values as the Cinder scheduler assigns lowest weight to any storage backend reporting 'unknown' or 'infinite'. And the following optional fields: * reserved_percentage (integer) Percentage of backend capacity which is not used by the scheduler. * location_info (string) Driver-specific information used by the driver and storage backend to correlate Cinder volumes and backend LUNs/files. * QoS_support (Boolean) Whether the backend supports quality of service. * provisioned_capacity_gb The total provisioned capacity on the storage backend, in gigabytes (GiB), including space consumed by any user other than Cinder itself. * max_over_subscription_ratio The maximum amount a backend can be over subscribed. * thin_provisioning_support (Boolean) Whether the backend is capable of allocating thinly provisioned volumes. * thick_provisioning_support (Boolean) Whether the backend is capable of allocating thick provisioned volumes. (Typically True.) * total_volumes (integer) Total number of volumes on the storage backend. This can be used in custom driver filter functions. * filter_function (string) A custom function used by the scheduler to determine whether a volume should be allocated to this backend or not. Example: capabilities.total_volumes < 10 * goodness_function (string) Similar to filter_function, but used to weigh multiple volume backends. Example: capabilities.capacity_utilization < 0.6 ? 100 : 25 * multiattach (Boolean) Whether the backend supports multiattach or not. Defaults to False. * sparse_copy_volume (Boolean) Whether copies performed by the volume manager for operations such as migration should attempt to preserve sparseness. * online_extend_support (Boolean) Whether the backend supports in-use volume extend or not. Defaults to True. * clone_across_pools (Boolean) Whether the backend supports cloning a volume across different pools. Defaults to False. The returned dict may also contain a list, "pools", which has a similar dict for each pool being used with the backend. :param refresh: Whether to discard any cached values and force a full refresh of stats. :returns: dict of appropriate values (see above). """ def create_volume(self, volume): """Create a new volume on the backend. This method is responsible only for storage allocation on the backend. It should not export a LUN or actually make this storage available for use, this is done in a later call. TODO(smcginnis): Add example data structure of volume object. :param volume: Volume object containing specifics to create. :returns: (Optional) dict of database updates for the new volume. :raises VolumeBackendAPIException: if creation failed. """ def delete_volume(self, volume): """Delete a volume from the backend. If the driver can talk to the backend and detects that the volume is no longer present, this call should succeed and allow Cinder to complete the process of deleting the volume. It is imperative that this operation ensures that the data from the deleted volume cannot leak into new volumes when they are created, as new volumes are likely to belong to a different tenant/project. :param volume: The volume to delete. :raises VolumeIsBusy: if the volume is still attached or has snapshots. VolumeBackendAPIException on error. """ def initialize_connection(self, volume, connector, initiator_data=None): """Allow connection to connector and return connection info. :param volume: The volume to be attached. :param connector: Dictionary containing information about what is being connected to. :param initiator_data: (Optional) A dictionary of driver_initiator_data objects with key-value pairs that have been saved for this initiator by a driver in previous initialize_connection calls. :returns: A dictionary of connection information. This can optionally include a "initiator_updates" field. The "initiator_updates" field must be a dictionary containing a "set_values" and/or "remove_values" field. The "set_values" field must be a dictionary of key-value pairs to be set/updated in the db. The "remove_values" field must be a list of keys, previously set with "set_values", that will be deleted from the db. May be called multiple times to get connection information after a volume has already been attached. """ def terminate_connection(self, volume, connector): """Remove access to a volume. Note: If ``connector`` is ``None``, then all connections to the volume should be terminated. :param volume: The volume to remove. :param connector: The Dictionary containing information about the connection. This is optional when doing a force-detach and can be None. """ def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. Drivers that, always or under some circumstances, can efficiently create a volume from a Glance image can implement this method to be given a chance to try to do the volume creation as efficiently as possible. If the driver cannot do it efficiently on a specific call it can return ``(None, False)`` to let Cinder try other mechanisms. **This method is optional** and most drivers won't need to implement it and can leverage the default driver implementation that returns ``(None, False)`` to indicate that this optimization is not possible on this driver. Examples where drivers can do this optimization: - When images are stored on the same storage system and the driver can locate them and efficiently create a volume. For example the RBD driver can efficiently create a volume if the image is stored on the same Ceph cluster and the image format is ``raw``. Another example is the GPFS driver. - When volumes are locally accessible and accessing them that way is more efficient than going through the remote connection mechanism. For example in the GPFS driver if the cloning feature doesn't work it will copy the file without using os-brick to connect to the volume. :param context: Security/policy info for the request. :param volume: The volume to create, as an OVO instance. Drivers should use attributes to access its values instead of using the dictionary compatibility interface it provides. :param image_location: Tuple with (``direct_url``, ``locations``) from the `image metadata fields. `_ ``direct_url``, when present, is a string whose format depends on the image service's external storage in use. Any, or both, tuple positions can be None, depending on the image service configuration. ``locations``, when present, is a list of dictionaries where the value of the ``url`` key contains the direct urls (including the one from ``direct_url``). :param image_meta: Dictionary containing `information about the image `_, including basic attributes and custom properties. Some transformations have been applied, such as converting timestamps (from ``created_at``, ``updated_at``, and ``deleted_at``) to datetimes, and deserializing JSON values from ``block_device_mapping`` and ``mappings`` keys if present. Base properties, as per the image's schema, will be stored on the base dictionary and the rest will be stored under the ``properties`` key. An important field to check in this method is the ``disk_format`` (e.g. raw, qcow2). :param image_service: The image service to use (``GlanceImageService`` instance). Can fetch image data directly using it. :returns: Tuple of (model_update, boolean) where the boolean specifies whether the clone occurred. """ def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume. :param context: Security/policy info for the request. :param volume: The volume to create. :param image_service: The image service to use. :param image_id: The image identifier. :param disable_sparse: Enable or disable sparse copy. Default=False. :returns: Model updates. """ def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image. :param context: Security/policy info for the request. :param volume: The volume to copy. :param image_service: The image service to use. :param image_meta: Information about the image. :returns: Model updates. """ def before_volume_copy(self, context, src_vol, dest_vol, remote=None): """Driver-specific actions executed before copying a volume. This method will be called before _copy_volume_data during volume migration. :param context: Context :param src_volume: Source volume in the copy operation. :param dest_volume: Destination volume in the copy operation. :param remote: Whether the copy operation is local. :returns: There is no return value for this method. """ def after_volume_copy(self, context, src_vol, dest_vol, remote=None): """Driver-specific actions executed after copying a volume. This method will be called after _copy_volume_data during volume migration. :param context: Context :param src_volume: Source volume in the copy operation. :param dest_volume: Destination volume in the copy operation. :param remote: Whether the copy operation is local. :returns: There is no return value for this method. """ def extend_volume(self, volume, new_size): """Extend the size of a volume. :param volume: The volume to extend. :param new_size: The new desired size of the volume. Note that if the volume backend doesn't support extending an in-use volume, the driver should report online_extend_support=False. """ def migrate_volume(self, context, volume, host): """Migrate the volume to the specified host. :param context: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. :returns: Tuple of (model_update, boolean) where the boolean specifies whether the migration occurred. """ def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Return model update for migrated volume. Each driver implementing this method needs to be responsible for the values of _name_id and provider_location. If None is returned or either key is not set, it means the volume table does not need to change the value(s) for the key(s). The return format is {"_name_id": value, "provider_location": value}. :param context: Context :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ def retype(self, context, volume, new_type, diff, host): """Change the type of a volume. This operation occurs on the same backend and the return value indicates whether it was successful. If migration is required to satisfy a retype, that will be handled by the volume manager. :param context: Context :param volume: The volume to retype :param new_type: The target type for the volume :param diff: The differences between the two types :param host: The host that contains this volume :returns: Tuple of (boolean, model_update) where the boolean specifies whether the retype occurred. """ def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: Information for the snapshot to be created. """ def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: The snapshot to delete. """ def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. If volume_type extra specs includes 'replication: True' the driver needs to create a volume replica (secondary), and setup replication between the newly created volume and the secondary volume. An optional larger size for the new volume can be specified. Drivers should check this value and create or expand the new volume to match. :param volume: The volume to be created. :param snapshot: The snapshot from which to create the volume. :returns: A dict of database updates for the new volume. """ def set_initialized(self): """Mark driver as initialized. Do not implement this in a driver. Rely on the default implementation. """ def initialized(self): """Getter for driver's initialized status. Do not implement this in a driver. Rely on the default implementation. """ def supported(self): """Getter for driver's supported status. Do not implement this in a driver. Rely on the default implementation. """ def set_throttle(self): """Hook for initialization of cinder.volume.throttle. This has not been necessary to re-implement or override in any drivers thus far. The generic implementation does nothing unless explicitly enabled. """ def init_capabilities(self): """Fetch and merge capabilities of the driver. Do not override this, implement _init_vendor_properties instead. """ def _init_vendor_properties(self): """Create a dictionary of vendor unique properties. Compose a dictionary by calling ``self._set_property``. Select a prefix from the vendor, product, or device name. Prefix must match the part of property name before colon (:). :returns tuple (properties: dict, prefix: str) """ def update_provider_info(self, volumes, snapshots): """Get provider info updates from driver. This retrieves a list of volumes and a list of snapshots that changed their providers thanks to the initialization of the host, so that Cinder can update this information in the volume database. This is only implemented by drivers where such migration is possible. :param volumes: List of Cinder volumes to check for updates :param snapshots: List of Cinder snapshots to check for updates :returns: tuple (volume_updates, snapshot_updates) where volume updates {'id': uuid, provider_id: } and snapshot updates {'id': uuid, provider_id: } """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/volume_group_driver.py0000664000175000017500000002521200000000000022745 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Generic volume group volume driver interface. """ from cinder.interface import base class VolumeGroupDriver(base.CinderInterface): """Interface for drivers that support groups.""" def create_group(self, context, group): """Creates a group. :param context: the context of the caller. :param group: the Group object to be created. :returns: model_update model_update will be in this format: {'status': xxx, ......}. If the status in model_update is 'error', the manager will throw an exception and it will be caught in the try-except block in the manager. If the driver throws an exception, the manager will also catch it in the try-except block. The group status in the db will be changed to 'error'. For a successful operation, the driver can either build the model_update and return it or return None. The group status will be set to 'available'. """ def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param context: the context of the caller. :param group: the Group object to be created. :param volumes: a list of Volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of Snapshot objects in the group_snapshot. :param source_group: a Group object as source. :param source_vols: a list of Volume objects in the source_group. :returns: model_update, volumes_model_update The source can be group_snapshot or a source group. param volumes is a list of objects retrieved from the db. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. To be consistent with other volume operations, the manager will assume the operation is successful if no exception is thrown by the driver. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. """ def delete_group(self, context, group, volumes): """Deletes a group. :param context: the context of the caller. :param group: the Group object to be deleted. :param volumes: a list of Volume objects in the group. :returns: model_update, volumes_model_update param volumes is a list of objects retrieved from the db. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate volumes_model_update and model_update and return them. The manager will check volumes_model_update and update db accordingly for each volume. If the driver successfully deleted some volumes but failed to delete others, it should set statuses of the volumes accordingly so that the manager can update db correctly. If the status in any entry of volumes_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of the group will be set to 'error' in the db. If volumes_model_update is not returned by the driver, the manager will set the status of every volume in the group to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager. The statuses of the group and all volumes in it will be set to 'error'. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. The statuses of the group and all volumes will be set to 'deleted' after the manager deletes them from db. """ def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group. :param context: the context of the caller. :param group: the Group object to be updated. :param add_volumes: a list of Volume objects to be added. :param remove_volumes: a list of Volume objects to be removed. :returns: model_update, add_volumes_update, remove_volumes_update model_update is a dictionary that the driver wants the manager to update upon a successful return. If None is returned, the manager will set the status to 'available'. add_volumes_update and remove_volumes_update are lists of dictionaries that the driver wants the manager to update upon a successful return. Note that each entry requires a {'id': xxx} so that the correct volume entry can be updated. If None is returned, the volume will remain its original status. Also note that you cannot directly assign add_volumes to add_volumes_update as add_volumes is a list of volume objects and cannot be used for db update directly. Same with remove_volumes. If the driver throws an exception, the status of the group as well as those of the volumes to be added/removed will be set to 'error'. """ def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be created. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update param snapshots is a list of Snapshot objects. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error', the status in model_update will be set to the same if it is not already 'error'. If the status in model_update is 'error', the manager will raise an exception and the status of group_snapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of group_snapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of group_snapshot and all snapshots will be set to 'available' at the end of the manager function. """ def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be deleted. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update param snapshots is a list of objects. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of group_snapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of group_snapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of group_snapshot and all snapshots will be set to 'deleted' after the manager deletes them from db. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/volume_manageable_driver.py0000664000175000017500000001711000000000000023663 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Manage/unmanage existing volume driver interface. """ from cinder.interface import base class VolumeManagementDriver(base.CinderInterface): """Interface for drivers that support managing existing volumes.""" def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. 2. Place some metadata on the volume, or somewhere in the backend, that allows other driver requests (e.g. delete, clone, attach, detach...) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. The volume may have a volume_type, and the driver can inspect that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. :param volume: Cinder volume to manage :param existing_ref: Dictionary with keys 'source-id', 'source-name' with driver-specific values to identify a backend storage object. :raises ManageExistingInvalidReference: If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object. :raises ManageExistingVolumeTypeMismatch: If there is a mismatch between the volume type and the properties of the existing backend storage object. """ def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Dictionary with keys 'source-id', 'source-name' with driver-specific values to identify a backend storage object. :raises ManageExistingInvalidReference: If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object. """ def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param volume: Cinder volume to unmanage """ class VolumeListManageableDriver(VolumeManagementDriver): """Interface to support listing manageable snapshots and volumes.""" def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a volume in the host, with the following keys: - reference (dictionary): The reference for a volume, which can be passed to "manage_existing". - size (int): The size of the volume according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this volume is safe to manage according to the storage backend. For example, is the volume in use or invalid for any reason. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Any extra information to return to the user :param cinder_volumes: A list of volumes in this host that Cinder currently manages, used to determine if a volume is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a snapshot in the host, with the following keys: - reference (dictionary): The reference for a snapshot, which can be passed to "manage_existing_snapshot". - size (int): The size of the snapshot according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this snapshot is safe to manage according to the storage backend. For example, is the snapshot in use or invalid for any reason. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Any extra information to return to the user - source_reference (string): Similar to "reference", but for the snapshot's source volume. :param cinder_snapshots: A list of snapshots in this host that Cinder currently manages, used to determine if a snapshot is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/volume_snapshot_revert.py0000664000175000017500000000245000000000000023463 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Revert to snapshot capable volume driver interface. """ from cinder.interface import base class VolumeSnapshotRevertDriver(base.CinderInterface): """Interface for drivers that support revert to snapshot.""" def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot. Note: the revert process should not change the volume's current size, that means if the driver shrank the volume during the process, it should extend the volume internally. :param context: the context of the caller. :param volume: The volume to be reverted. :param snapshot: The snapshot used for reverting. """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/interface/volume_snapshotmanagement_driver.py0000664000175000017500000000645700000000000025517 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Manage/unmanage existing volume snapshots driver interface. """ from cinder.interface import base class VolumeSnapshotManagementDriver(base.CinderInterface): """Interface for drivers that support managing existing snapshots.""" def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder snapshot structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. 2. Place some metadata on the snapshot, or somewhere in the backend, that allows other driver requests (e.g. delete) to locate the backend storage object when required. :param snapshot: The snapshot to manage. :param existing_ref: Dictionary with keys 'source-id', 'source-name' with driver-specific values to identify a backend storage object. :raises ManageExistingInvalidReference: If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object. """ def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. When calculating the size, round up to the next GB. :param snapshot: The snapshot to manage. :param existing_ref: Dictionary with keys 'source-id', 'source-name' with driver-specific values to identify a backend storage object. :raises ManageExistingInvalidReference: If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object. """ def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param snapshot: The snapshot to unmanage. """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0791183 cinder-27.0.0/cinder/keymgr/0000775000175000017500000000000000000000000015631 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/keymgr/__init__.py0000664000175000017500000000144300000000000017744 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from castellan import options as castellan_opts from oslo_config import cfg CONF = cfg.CONF castellan_opts.set_defaults(CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/keymgr/conf_key_mgr.py0000664000175000017500000001462500000000000020655 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ An implementation of a key manager that reads its key from the project's configuration options. This key manager implementation provides limited security, assuming that the key remains secret. Using the volume encryption feature as an example, encryption provides protection against a lost or stolen disk, assuming that the configuration file that contains the key is not stored on the disk. Encryption also protects the confidentiality of data as it is transmitted via iSCSI from the compute host to the storage host (again assuming that an attacker who intercepts the data does not know the secret key). Because this implementation uses a single, fixed key, it proffers no protection once that key is compromised. In particular, different volumes encrypted with a key provided by this key manager actually share the same encryption key so *any* volume can be decrypted once the fixed key is known. """ import binascii from castellan.common.objects import symmetric_key from castellan.key_manager import key_manager from oslo_config import cfg from oslo_log import log as logging from cinder import exception from cinder.i18n import _ key_mgr_opts = [ cfg.StrOpt('fixed_key', secret=True, help='Fixed key returned by key manager, specified in hex', deprecated_group='keymgr'), ] CONF = cfg.CONF CONF.register_opts(key_mgr_opts, group='key_manager') LOG = logging.getLogger(__name__) class ConfKeyManager(key_manager.KeyManager): """Key Manager that supports one key defined by the fixed_key conf option. This key manager implementation supports all the methods specified by the key manager interface. This implementation creates a single key in response to all invocations of create_key. Side effects (e.g., raising exceptions) for each method are handled as specified by the key manager interface. """ warning_logged = False def __init__(self, configuration): if not ConfKeyManager.warning_logged: LOG.warning('This key manager is insecure and is not ' 'recommended for production deployments') ConfKeyManager.warning_logged = True super(ConfKeyManager, self).__init__(configuration) self.conf = configuration self.conf.register_opts(key_mgr_opts, group='key_manager') self.key_id = '00000000-0000-0000-0000-000000000000' def _get_key(self): if self.conf.key_manager.fixed_key is None: raise ValueError(_('config option key_manager.fixed_key is not ' 'defined')) hex_key = self.conf.key_manager.fixed_key key_bytes = bytes(binascii.unhexlify(hex_key)) return symmetric_key.SymmetricKey('AES', len(key_bytes) * 8, key_bytes) def create_key(self, context, **kwargs): """Creates a symmetric key. This implementation returns a UUID for the key read from the configuration file. A NotAuthorized exception is raised if the specified context is None. """ if context is None: raise exception.NotAuthorized() return self.key_id def create_key_pair(self, context, **kwargs): raise NotImplementedError( "ConfKeyManager does not support asymmetric keys") def store(self, context, managed_object, **kwargs): """Stores (i.e., registers) a key with the key manager.""" if context is None: raise exception.NotAuthorized() if managed_object != self._get_key(): raise exception.KeyManagerError( reason="cannot store arbitrary keys") return self.key_id def get(self, context, managed_object_id): """Retrieves the key identified by the specified id. This implementation returns the key that is associated with the specified UUID. A NotAuthorized exception is raised if the specified context is None; a KeyError is raised if the UUID is invalid. """ if context is None: raise exception.NotAuthorized() if managed_object_id != self.key_id: raise KeyError(str(managed_object_id) + " != " + str(self.key_id)) return self._get_key() def delete(self, context, managed_object_id): """Represents deleting the key. Because the ConfKeyManager has only one key, which is read from the configuration file, the key is not actually deleted when this is called. """ if context is None: raise exception.NotAuthorized() if managed_object_id != self.key_id: raise exception.KeyManagerError( reason="cannot delete non-existent key") LOG.warning("Not deleting key %s", managed_object_id) def add_consumer(self, context, managed_object_id, consumer_data): raise NotImplementedError( 'ConfKeyManager does not implement adding consumers' ) def remove_consumer(self, context, managed_object_id, consumer_data): raise NotImplementedError( 'ConfKeyManager does not implement removing consumers' ) def list(self, context, object_type=None, metadata_only=False): """Retrieves a list of managed objects that match the criteria. Note: Required abstract method starting with Castellan 0.13.0 :param context: Contains information of the user and the environment for the request. :param object_type: The type of object to retrieve. :param metadata_only: Whether secret data should be included. :raises NotAuthorized: If no user context. """ if context is None: raise exception.NotAuthorized() return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/keymgr/migration.py0000664000175000017500000002004000000000000020170 0ustar00zuulzuul00000000000000# Copyright 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import itertools from barbicanclient import client as barbican_client from castellan import options as castellan_options from keystoneauth1 import loading as ks_loading from keystoneauth1 import session as ks_session from oslo_config import cfg from oslo_log import log as logging from cinder import context from cinder import coordination from cinder import objects from cinder.volume import volume_migration LOG = logging.getLogger(__name__) CONF = cfg.CONF MAX_KEY_MIGRATION_ERRORS = 3 class KeyMigrator(object): def __init__(self, conf): self.conf = conf self.admin_context = context.get_admin_context() self.fixed_key_id = '00000000-0000-0000-0000-000000000000' self.fixed_key_bytes = None self.fixed_key_length = None def handle_key_migration(self, volumes, backups): castellan_options.set_defaults(self.conf) try: self.conf.import_opt(name='fixed_key', module_str='cinder.keymgr.conf_key_mgr', group='key_manager') except cfg.DuplicateOptError: pass fixed_key = self.conf.key_manager.fixed_key backend = self.conf.key_manager.backend or '' backend = backend.split('.')[-1] if backend == 'ConfKeyManager': LOG.info("Not migrating encryption keys because the " "ConfKeyManager is still in use.") elif not fixed_key: LOG.info("Not migrating encryption keys because the " "ConfKeyManager's fixed_key is not in use.") elif backend != 'barbican' and backend != 'BarbicanKeyManager': # Note: There are two ways of specifying the Barbican backend. # The long-hand method contains the "BarbicanKeyManager" class # name, and the short-hand method is just "barbican" with no # module path prefix. LOG.warning("Not migrating encryption keys because migration to " "the '%s' key_manager backend is not supported.", backend) self._log_migration_status() elif not volumes and not backups: LOG.info("Not migrating encryption keys because there are no " "volumes or backups associated with this host.") self._log_migration_status() else: self.fixed_key_bytes = bytes(binascii.unhexlify(fixed_key)) self.fixed_key_length = len(self.fixed_key_bytes) * 8 self._migrate_keys(volumes, backups) self._log_migration_status() def _migrate_keys(self, volumes, backups): LOG.info("Starting migration of ConfKeyManager keys.") # Establish a Barbican client session that will be used for the entire # key migration process. Use cinder's own service credentials. try: ks_loading.register_auth_conf_options(self.conf, 'keystone_authtoken') auth = ks_loading.load_auth_from_conf_options(self.conf, 'keystone_authtoken') sess = ks_session.Session(auth=auth) self.barbican = barbican_client.Client(session=sess) except Exception as e: LOG.error("Aborting encryption key migration due to " "error creating Barbican client: %s", e) return errors = 0 for item in itertools.chain(volumes, backups): try: self._migrate_encryption_key(item) except Exception as e: LOG.error("Error migrating encryption key: %s", e) # NOTE(abishop): There really shouldn't be any soft errors, so # if an error occurs migrating one key then chances are they # will all fail. This avoids filling the log with the same # error in situations where there are many keys to migrate. errors += 1 if errors > MAX_KEY_MIGRATION_ERRORS: LOG.error("Aborting encryption key migration " "(too many errors).") break @coordination.synchronized('{item.id}-{f_name}') def _migrate_encryption_key(self, item): if item.encryption_key_id == self.fixed_key_id: self._update_encryption_key_id(item) def _get_barbican_key_id(self, user_id): # Create a Barbican secret using the same fixed_key algorithm. secret = self.barbican.secrets.create(algorithm='AES', bit_length=self.fixed_key_length, secret_type='symmetric', mode=None, payload=self.fixed_key_bytes) secret_ref = secret.store() # Create a Barbican ACL so the user can access the secret. acl = self.barbican.acls.create(entity_ref=secret_ref, users=[user_id]) acl.submit() _, _, encryption_key_id = secret_ref.rpartition('/') return encryption_key_id def _update_encryption_key_id(self, item): LOG.info("Migrating %(item_type)s %(item_id)s encryption key " "to Barbican", {'item_type': type(item).__name__, 'item_id': item.id}) encryption_key_id = self._get_barbican_key_id(item.user_id) item.encryption_key_id = encryption_key_id item.save() allowTypes = (volume_migration.VolumeMigration, objects.volume.Volume) if isinstance(item, allowTypes): snapshots = objects.snapshot.SnapshotList.get_all_for_volume( self.admin_context, item.id) for snapshot in snapshots: snapshot.encryption_key_id = encryption_key_id snapshot.save() def _log_migration_status(self): volumes_to_migrate = len(objects.volume.VolumeList.get_all( context=self.admin_context, filters={'encryption_key_id': self.fixed_key_id})) if volumes_to_migrate == 0: LOG.info("No volumes are using the ConfKeyManager's " "encryption_key_id.") else: LOG.warning("There are still %d volume(s) using the " "ConfKeyManager's all-zeros encryption key ID.", volumes_to_migrate) backups_to_migrate = len(objects.backup.BackupList.get_all( context=self.admin_context, filters={'encryption_key_id': self.fixed_key_id})) if backups_to_migrate == 0: # Old backups may exist that were created prior to when the # encryption_key_id is stored in the backup table. It's not # easy to tell whether the backup was of an encrypted volume, # in which case an all-zeros encryption key ID might be present # in the backup's metadata. LOG.info("No backups are known to be using the ConfKeyManager's " "encryption_key_id.") else: LOG.warning("There are still %d backups(s) using the " "ConfKeyManager's all-zeros encryption key ID.", backups_to_migrate) def migrate_fixed_key(volumes=None, backups=None, conf=CONF): volumes = volumes or [] backups = backups or [] KeyMigrator(conf).handle_key_migration(volumes, backups) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/keymgr/transfer.py0000664000175000017500000001055400000000000020034 0ustar00zuulzuul00000000000000# Copyright 2022 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from castellan.common.credentials import keystone_password from castellan.common import exception as castellan_exception from castellan import key_manager as castellan_key_manager from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import context from cinder import objects LOG = logging.getLogger(__name__) CONF = cfg.CONF class KeyTransfer(object): def __init__(self, conf: cfg.ConfigOpts): self.conf = conf self._service_context = keystone_password.KeystonePassword( password=conf.keystone_authtoken.password, auth_url=conf.keystone_authtoken.auth_url, username=conf.keystone_authtoken.username, user_domain_name=conf.keystone_authtoken.user_domain_name, project_name=conf.keystone_authtoken.project_name, project_domain_name=conf.keystone_authtoken.project_domain_name) @property def service_context(self): """Returns the cinder service's context.""" return self._service_context def transfer_key(self, volume: objects.volume.Volume, src_context: context.RequestContext, dst_context: context.RequestContext) -> None: """Transfer the key from the src_context to the dst_context.""" key_manager = castellan_key_manager.API(self.conf) old_encryption_key_id = volume.encryption_key_id secret = key_manager.get(src_context, old_encryption_key_id) try: new_encryption_key_id = key_manager.store(dst_context, secret) except castellan_exception.KeyManagerError: with excutils.save_and_reraise_exception(): LOG.error("Failed to transfer the encryption key. This is " "likely because the cinder service lacks the " "privilege to create secrets.") volume.encryption_key_id = new_encryption_key_id volume.save() snapshots = objects.snapshot.SnapshotList.get_all_for_volume( context.get_admin_context(), volume.id) for snapshot in snapshots: snapshot.encryption_key_id = new_encryption_key_id snapshot.save() key_manager.delete(src_context, old_encryption_key_id) def transfer_create(context: context.RequestContext, volume: objects.volume.Volume, conf: cfg.ConfigOpts = CONF) -> None: """Transfer the key from the owner to the cinder service.""" LOG.info("Initiating transfer of encryption key for volume %s", volume.id) key_transfer = KeyTransfer(conf) key_transfer.transfer_key(volume, src_context=context, dst_context=key_transfer.service_context) def transfer_accept(context: context.RequestContext, volume: objects.volume.Volume, conf: cfg.ConfigOpts = CONF) -> None: """Transfer the key from the cinder service to the recipient.""" LOG.info("Accepting transfer of encryption key for volume %s", volume.id) key_transfer = KeyTransfer(conf) key_transfer.transfer_key(volume, src_context=key_transfer.service_context, dst_context=context) def transfer_delete(context: context.RequestContext, volume: objects.volume.Volume, conf: cfg.ConfigOpts = CONF) -> None: """Transfer the key from the cinder service back to the owner.""" LOG.info("Cancelling transfer of encryption key for volume %s", volume.id) key_transfer = KeyTransfer(conf) key_transfer.transfer_key(volume, src_context=key_transfer.service_context, dst_context=context) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8271158 cinder-27.0.0/cinder/locale/0000775000175000017500000000000000000000000015572 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8271158 cinder-27.0.0/cinder/locale/ja/0000775000175000017500000000000000000000000016164 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0831182 cinder-27.0.0/cinder/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000017751 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/locale/ja/LC_MESSAGES/cinder.po0000664000175000017500000073635500000000000021600 0ustar00zuulzuul00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # FIRST AUTHOR , 2011 # Ray Akimoto , 2015 # Ryo Fujita , 2013 # Tomoyuki KATO , 2013 # Andreas Jaeger , 2016. #zanata # Yoshiki Eguchi , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-07 22:42+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-11 03:43+0000\n" "Last-Translator: Yoshiki Eguchi \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Japanese\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "OpenStack Cinder バージョン: %(version)s\n" #, python-format msgid " but size is now %d" msgstr "しかし、現在のサイズは %d です" msgid " or " msgstr "または" #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing がホストに接続したボリュームを管理できません。イン" "ポート前に既存のホストからこのボリュームの接続を解除してください。" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "結果: %(res)s。" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "%(exception)s: %(explanation)s" #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s: 予期しない CLI 出力により失敗しました。\n" "コマンド: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "ステータスコード: %(_status)s\n" "本体: %(_body)s" #, python-format msgid "%(msg)s And %(num)s services from the cluster were also removed." msgstr "クラスターの %(msg)s と %(num)s サービスも削除されました。" #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "NetworkPortal の作成に関する %(msg_type)s: 他のサービスが IP %(ip)s 上のポー" "ト %(port)d を使用していないことを確認してください。" #, python-format msgid "%(name)s cannot be all spaces." msgstr "%(name)s は全て空白にはできません。" #, python-format msgid "%(new_size)s < current size %(size)s" msgstr "%(new_size)s < 現在のサイズ %(size)s" #, python-format msgid "%(reason)s" msgstr "%(reason)s" #, python-format msgid "" "%(type)s with id %(id)s is already being cleaned up or another host has " "taken over it." msgstr "" " %(type)s のid %(id)s はすでにクリーンアップされているか、他のホストに引き" "継がれています。" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "" "%(workers)d の %(worker_name)s 値が無効です。0 より大きい値にしなければなりま" "せん。" #, python-format msgid "%s \"data\" is not in result." msgstr "結果内に %s \"data\" がありません。" #, python-format msgid "%s assigned" msgstr "割り当てられた %s" #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "" "%s にアクセスできません。GPFS がアクティブであること、およびファイルシステム" "がマウントされていることを確認してください。" #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "" "%s はブロックを含んでいないため、複製操作を使用してサイズ変更できません。" #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "" "%s は圧縮ボリューム上でホストされているため、複製操作を使用してサイズ変更する" "ことはできません" #, python-format msgid "%s changed" msgstr "変更された %s" #, python-format msgid "%s configuration option is not set." msgstr "%s の設定オプションが設定されていません。" #, python-format msgid "%s does not exist." msgstr "%s は存在しません。" #, python-format msgid "%s is not a directory." msgstr "%s はディレクトリーではありません。" #, python-format msgid "%s is not installed" msgstr "%s がインストールされていません。" #, python-format msgid "%s is not installed." msgstr "%s がインストールされていません。" #, python-format msgid "%s is not set" msgstr "%s が設定されていません" #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "" "%s が設定されていません。これはレプリケーションデバイスを有効にするために必要" "です。" #, python-format msgid "%s is not set." msgstr "%s が設定されていません。" #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s は有効な raw または qcow2 イメージでなければなりません。" #, python-format msgid "%s must be an absolute path." msgstr "%s は絶対パスである必要があります。" #, python-format msgid "%s not set." msgstr "%s が設定されていません。" #, python-format msgid "'%(key)s = %(value)s'" msgstr "'%(key)s = %(value)s'" #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "設定ファイルの flashsystem_connection_protocol で '%(prot)s' は無効です。有効" "な値は %(enabled)s です。" msgid "'active' must be present when writing snap_info." msgstr "snap_info の書き込み時には 'active' が存在しなければなりません。" msgid "'consistencygroup_id' must be specified" msgstr "'consistencygroup_id' を指定する必要があります。" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info' の解析に失敗しました。" msgid "'success' not found" msgstr "'success' が見つかりません。" msgid "400 Bad Request" msgstr "413 Request entity too large" msgid "401 Unauthorized Error" msgstr "401 Unauthorized エラー" msgid "404 Not Found Error" msgstr "404 Not Found エラー" msgid "413 Request entity too large" msgstr "413 Request entity too large" msgid "A concurrent, possibly contradictory, request has been made." msgstr "矛盾する可能性のある同時実行リクエストが行われました。 " msgid "A readonly volume must be attached as readonly." msgstr "読み取り専用ボリュームは、読み取り専用として接続する必要があります。" msgid "A valid secondary target MUST be specified in order to failover." msgstr "" "フェイルオーバーを行うために、有効なセカンダリーターゲットを指定する必要があ" "ります。" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "API バージョンの文字列 %(version)s が無効な形式です。MajorNum.MinorNum の形式" "である必要があります。" #, python-format msgid "API response: %s" msgstr "API 応答: %s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "このメソッドでは API バージョン %(version)s はサポートされていません。" msgid "Access list not available for public volume types." msgstr "パブリックボリュームタイプではアクセスリストを使用できません。" msgid "Activate or deactivate QoS error." msgstr "QoS のアクティブ化またはアクティブ化解除のエラー。" msgid "Activate snapshot error." msgstr "スナップショットのアクティブ化のエラー。" msgid "Add FC port to host error." msgstr "ホストへの FC ポート追加のエラー。" msgid "Add fc initiator to array error." msgstr "アレイへの FC イニシエーター追加のエラー。" msgid "Add hypermetro to metrogroup error." msgstr "metrogroup への hypermetro 追加エラー。" msgid "Add initiator to array error." msgstr "アレイへのイニシエーター追加のエラー。" msgid "Add lun to cache error." msgstr "キャッシュへの LUN 追加のエラー。" msgid "Add lun to partition error." msgstr "パーティションへの LUN 追加のエラー。" msgid "Add mapping view error." msgstr "マッピングビュー追加のエラー。" msgid "Add new host error." msgstr "新規ホスト追加のエラー。" msgid "Add port to port group error." msgstr "ポートグループへのポート追加のエラー。" #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "" "管理対象となる指定されたすべてのストレージプールが存在しません。設定を確認し" "てください。存在しないプール: %s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "" "API バージョンのリクエストは VersionedMethod オブジェクトと比較する必要があり" "ます。" msgid "An error has occurred during backup operation" msgstr "バックアップ操作中にエラーが発生しました。" #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "LUNcopy 操作中にエラーが発生しました。LUNcopy 名: %(luncopyname)s。LUNcopy 状" "況: %(luncopystatus)s。LUNcopy 状態: %(luncopystate)s。" msgid "An unknown error occurred." msgstr "不明なエラーが発生しました。" msgid "An unknown exception occurred." msgstr "不明な例外が発生しました。" msgid "Append port group description error." msgstr "ポートグループの説明追加のエラー。" #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "スイッチへのゾーンおよび cfgs の適用が失敗しました (エラーコード " "=%(err_code)s エラーメッセージ =%(err_msg)s。" #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "" "アレイが存在しないかオフラインになっています。現在のアレイの状態は %s です。" msgid "Associate host to hostgroup error." msgstr "ホストグループへのホストの関連付けのエラー。" msgid "Associate host to mapping view error." msgstr "マッピングビューへのホストの関連付けのエラー。" msgid "Associate initiator to host error." msgstr "ホストへのイニシエーターの関連付けのエラー。" msgid "Associate lun to QoS error." msgstr "QoS への LUN の関連付けのエラー。" msgid "Associate lun to lungroup error." msgstr "LUN グループへの LUN の関連付けのエラー。" msgid "Associate lungroup to mapping view error." msgstr "マッピングビューへの LUN グループの関連付けのエラー。" msgid "Associate portgroup to mapping view error." msgstr "マッピングビューへのポートグループの関連付けのエラー。" #, python-format msgid "Async error: Unable to retrieve %(obj)s method %(method)s result" msgstr "" "非同期エラー: オブジェクト %(obj)s 、メソッド %(method)s の結果を取得でき" "ません。" msgid "At least one valid iSCSI IP address must be set." msgstr "有効な iSCSI IP アドレスを 1 つ以上設定する必要があります。" #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "無効な認証キーを使用して %s を転送しようとしています。" #, python-format msgid "Attribute: %s not found." msgstr "属性 %s が見つかりません。" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "" "認証が失敗しました。スイッチのクレデンシャルを検証してください。エラーコード " "%s。" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "アベイラビリティーゾーン '%(s_az)s' は無効です。" msgid "Available categories:" msgstr "使用可能カテゴリー:" #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "バックエンドが存在しません(%(backend)s)" msgid "Backend storage did not configure fiber channel target." msgstr "" "バックエンドストレージによってファイバーチャネルターゲットは設定されませんで" "した。" msgid "Backing up an in-use volume must use the force flag." msgstr "" "使用中のボリュームのバックアップを行う際は、force フラグを使用する必要があり" "ます。" #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "バックアップ %(backup_id)s が見つかりませんでした。" msgid "Backup RBD operation failed" msgstr "バックアップ RBD 操作が失敗しました。" msgid "Backup already exists in database." msgstr "データベースのバックアップが既に存在しています。" msgid "Backup operation of an encrypted volume failed." msgstr "暗号化ボリュームのバックアップ操作が失敗しました。" #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "" "バックアップに含まれるスナップショットは 1 つのみでなければなりませんが、%s " "個含まれています" msgid "Backup status must be available" msgstr "バックアップの状態は「使用可能」でなければなりません。" #, python-format msgid "Backup status must be available and not %s." msgstr "バックアップの状態は %s ではなく「使用可能」でなければなりません。" msgid "Backup status must be available or error" msgstr "バックアップの状態は「使用可能」または「エラー」でなければなりません。" msgid "Backup to be restored has invalid size" msgstr "復元するバックアップのサイズが無効です。" #, python-format msgid "Bad HTTP response status %(status)s" msgstr "不正な HTTP レスポンスステータス : %(status)s" #, python-format msgid "Bad key(s) in quota set: %s" msgstr "無効なキーがクォータセット 内にあります: %s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "" "ストレージボリュームバックエンド API からの不正な応答または想定しない応答: " "%(data)s" msgid "Bad response from Datera API" msgstr "Datera API からの正しくない応答" msgid "Bad response from SolidFire API" msgstr "SolidFire API からの正しくない応答" #, python-format msgid "Bad response from XMS, %s" msgstr "XMS からの正しくない応答、%s" msgid "Binary" msgstr "バイナリー" msgid "Blank components" msgstr "空白コンポーネント" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "BrocadeファイバーチャネルゾーニングCLIエラー:%(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "BrocadeファイバーチャネルゾーニングHTTPエラー:%(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "CHAP 秘密は 12 バイトから 16 バイトである必要があります。" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "CLI 例外出力:\n" "コマンド: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI 例外出力:\n" "コマンド: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s。" msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E VDisk は既にホストにマッピングされているため、VDisk からホストへの" "マッピングは作成されませんでした。\n" "\"" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "CPG (%s) がアレイ上に存在しません" msgid "Can not add FC port to host." msgstr "ホストに FC ポートを追加できません。" #, python-format msgid "Can not get pool info. pool: %s" msgstr "プール情報を取得できません。プール: %s" msgid "" "Can not set tiering policy for a deduplicated volume. Set the tiering policy " "on the pool where the deduplicated volume locates." msgstr "" "重複排除されたボリュームには階層化ポリシーを設定できません。重複排除されたボ" "リュームのあるプールに対して階層化ポリシーを設定します。" #, python-format msgid "Can not translate %s to integer." msgstr "%s を整数に変換できません。" msgid "Can't decode backup record." msgstr "バックアップレコードを復号化できません。" #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "レプリケーションボリュームを拡張できません。ボリューム: %(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "" "アレイで LUN を見つけることができません。source-name または source-id を確認" "してください。" #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "" "アレイでキャッシュ名を見つけることができません。キャッシュ名は %(name)s で" "す。" #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "" "アレイでパーティション名を見つけることができません。パーティション名は " "%(name)s です。" #, python-format msgid "Can't find service: %s" msgstr "サービスが見つかりません: %s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "" "アレイでスナップショットを見つけることができません。source-name または " "source-id を確認してください。" msgid "Can't find the same host id from arrays." msgstr "アレイから同一のホスト ID が見つかりません。" msgid "Can't find valid IP from rest, please check it on storage." msgstr "" "rest で有効な IP を見つけることができません。ストーレジ上で確認を行ってくださ" "い。" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "ボリューム ID を取得できません。ボリューム名: %s。" #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "" "Cinder に LUN %(lun_id)s をインポートできません。LUN タイプが一致しません。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "" "Cinder に LUN %s をインポートできません。すでに HyperMetroPair 内に存在しま" "す。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "" "Cinder に LUN %s をインポートできません。すでに LUN コピータスク内に存在しま" "す。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "" "Cinder に LUN %s をインポートできません。すでに LUN グループ内に存在します。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "" "Cinder に LUN %s をインポートできません。すでに LUN ミラー内に存在します。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "" "Cinder に LUN %s をインポートできません。既に SplitMirror 内に存在します。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "" "Cinder に LUN %s をインポートできません。すでにマイグレーションタスク内に存在" "します。" #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "" "Cinder に LUN %s をインポートできません。すでにリモートのレプリケーションタス" "ク内に存在します。" #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "" "Cinder にスナップショット%s をインポートできません。LUN 状態が正常ではありま" "せん。" #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "" "Cinder にスナップショット%s をインポートできません。スナップショットはボ" "リュームに属していません。" #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "" "Cinder にスナップショット%s をインポートできません。スナップショットはイニシ" "エーターに公開されています。" #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "" "Cinder にスナップショット %s をインポートできません。スナップショットの状態が" "正常ではないか、実行状態がオンラインではありません。" msgid "Can't parse backup record." msgstr "バックアップレコードを解析できません。" #, python-format msgid "Can't support cache on the array, cache name is: %(name)s." msgstr "アレイでキャッシュをサポートできません。キャッシュ名は %(name)s です。" #, python-format msgid "Can't support partition on the array, partition name is: %(name)s." msgstr "" "アレイでパーティションがサポートできません、パーティション名は %(name)s で" "す。" msgid "Can't support qos on the array." msgstr "アレイで QoS がサポートできません。" msgid "Can't support tier on the array." msgstr "アレイでティアがサポートできません。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because it has no " "volume type." msgstr "" "ボリューム %(volume_id)s にはボリュームタイプがないため、このボリュームをグ" "ループ %(group_id)s に追加できません。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because it is already " "in group %(orig_group)s." msgstr "" "ボリューム %(volume_id)s が既にグループ %(orig_group)s 内に存在するため、この" "ボリュームをグループ %(group_id)s に追加することはできません。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because volume cannot " "be found." msgstr "" "ボリューム %(volume_id)s は見つからないため、グループ %(group_id)s に追加でき" "ません。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because volume does " "not exist." msgstr "" "ボリューム %(volume_id)s は存在しないため、グループ %(group_id)s に追加できま" "せん。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because volume is in " "an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "ボリューム %(volume_id)s は無効な状態 %(status)s であるため、グルー" "プ%(group_id)s に追加できません。有効な状態は %(valid)s です。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because volume type " "%(volume_type)s is not supported by the group." msgstr "" "ボリュームタイプ %(volume_type)s はグループ %(group_id)s ではサポートされてい" "ないため、ボリューム %(volume_id)s をこのグループに追加できません。" msgid "Cannot change VF context in the session." msgstr "VF コンテキストをセッション内で変更できません。" #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "" "VF コンテキストを変更できません。指定された VF は管理可能な VF リスト " "%(vf_list)s で使用可能ではありません。" msgid "Cannot create encryption specs. Volume type in use." msgstr "暗号化仕様を作成できません。ボリュームタイプは使用中です。" #, python-format msgid "" "Cannot create group %(group)s because snapshot %(snap)s is not in a valid " "state. Valid states are: %(valid)s." msgstr "" "スナップショット %(snap)s は有効な状態ではないため、グループ%(group)s を作成" "できません。有効な状態は %(valid)s です。" #, python-format msgid "" "Cannot create group %(group)s because source volume %(source_vol)s is not in " "a valid state. Valid states are: %(valid)s." msgstr "" "ソースボリューム %(source_vol)s が有効な状態にないため、グループ %(group)s を" "作成できません。有効な状態は %(valid)s です。" #, python-format msgid "Cannot create group_type with name %(name)s and specs %(group_specs)s" msgstr "" "名前 %(name)s および仕様 %(group_specs)s を使用して group_type を作成できませ" "ん。" #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "" "ディスク形式 %s のイメージを作成できません。vmdk ディスク形式のみが受け入れら" "れます。" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "サイズが %s のボリュームを作成できません: 8GB の倍数ではありません。" #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "" "名前 %(name)s および仕様 %(extra_specs)s を使用して volume_type を作成できま" "せん。" msgid "Cannot delete encryption specs. Volume type in use." msgstr "暗号化仕様を削除できません。ボリュームタイプは使用中です。" msgid "Cannot find migration task." msgstr "マイグレーションタスクを見つけることができません。" #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "チャンネル ID %(channel_id)s によって mcs_id を取得できません。" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "ソースから整合性グループ %(name)s を作成するために、'cgsnapshot_id' または " "'source_cgid' の両方を提供することができません。" msgid "Cannot register resource" msgstr "リソースを登録できません。" msgid "Cannot register resources" msgstr "リソースを登録できません。" #, python-format msgid "" "Cannot remove volume %(volume_id)s from group %(group_id)s because it is not " "in the group." msgstr "" "ボリューム %(volume_id)s はグループ %(group_id)s 内に存在しないため、このグ" "ループから削除できません。" #, python-format msgid "" "Cannot remove volume %(volume_id)s from group %(group_id)s because volume is " "in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "ボリューム %(volume_id)s は無効な状態 %(status)s であるため、グループ " "%(group_id)s から削除できません。有効な状態は %(valid)s です。" #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "HPE3PARDriver から %s にタイプ変更することはできません。" msgid "Cannot retype from one 3PAR array to another." msgstr "3PAR アレイから別のアレイにタイプ変更することはできません。" msgid "Cannot retype to a CPG in a different domain." msgstr "別のドメインの CPG にタイプ変更できません。" msgid "Cannot retype to a snap CPG in a different domain." msgstr "別のドメインのスナップ CPG にタイプ変更できません。" msgid "Cannot save group_snapshots changes in group object update." msgstr "" "グループオブジェクトの更新で、 group_snapshots の変更を保存できません。" msgid "Cannot save volume_types changes in group object update." msgstr "グループオブジェクトの更新で、 volume_types の変更を保存できません。" msgid "Cannot save volumes changes in group object update." msgstr "グループオブジェクトの更新で、 ボリュームの変更を保存できません。" msgid "Cannot update encryption specs. Volume type in use." msgstr "暗号化仕様を更新できません。ボリュームタイプは使用中です。" #, python-format msgid "" "Cannot update group %(group_id)s because no valid name, description, " "add_volumes, or remove_volumes were provided." msgstr "" "有効な名前、説明、add_volumes 、または remove_volumes が指定されなかったた" "め、グループ %(group_id)s を更新できません。" #, python-format msgid "Cannot update group_type %(id)s" msgstr "group_type %(id)s を更新できません。" #, python-format msgid "Cannot update volume_type %(id)s" msgstr "volume_type %(id)s を更新できません。" #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "CgSnapshot %(cgsnapshot_id)s が見つかりませんでした。" msgid "Change hostlun id error." msgstr "hostlun ID 変更のエラー。" msgid "Change lun priority error." msgstr "LUN 優先順位変更のエラー。" msgid "Change lun smarttier policy error." msgstr "LUN smarttier ポリシー変更のエラー。" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "変更によって、次のリソースの使用量が 0 未満になります: %(unders)s" msgid "Check hostgroup associate error." msgstr "ホストグループ関連付けの確認のエラー。" msgid "Check initiator added to array error." msgstr "アレイに追加されたイニシエーターの確認のエラー。" msgid "Check initiator associated to host error." msgstr "ホストに関連付けられたイニシエーターの確認のエラー。" msgid "Check lungroup associate error." msgstr "LUN グループ関連付けの確認のエラー。" msgid "Check portgroup associate error." msgstr "ポートグループ関連付けの確認のエラー。" msgid "Chunk size is not multiple of block size for creating hash." msgstr "" "チャンクサイズが、ハッシュを作成するためのブロックサイズの倍数ではありませ" "ん。" #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "CiscoファイバーチャネルゾーニングCLIエラー:%(reason)s" #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "クローンタイプ '%(clone_type)s' は無効です。有効な値は '%(full_clone)s' およ" "び '%(linked_clone)s' です。" msgid "Cluster" msgstr "クラスター" #, python-format msgid "Cluster %(id)s could not be found." msgstr "クラスター %(id)s が見つかりませんでした。" #, python-format msgid "Cluster %(id)s still has hosts." msgstr "クラスター %(id)s はまだホストを持っています。" #, python-format msgid "Cluster %(name)s already exists." msgstr "クラスター %(name)s は既に存在します。" #, python-format msgid "Cluster %s successfully removed." msgstr "クラスター %s は正常に削除されました。" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition: %s タイムアウト。" msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "" "圧縮イネーブラーがインストールされていません。圧縮されたボリュームを作成でき" "ません。" #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "コンピュートクラスター: %(cluster)s が見つかりません。" msgid "Condition has no field." msgstr "条件にフィールドがありません。" msgid "Configuration error: dell_sc_ssn not set." msgstr "設定エラー: dell_sc_ssn not が設定されていません。" msgid "Configuration is not found." msgstr "設定が見つかりません。" #, python-format msgid "Configuration value %s is not set." msgstr "構成値 %s が設定されていません。" #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "ボリューム種別 %s に競合する QoS 仕様があります。QoS 仕様がボリューム種別に関" "連付けられている場合、レガシーの \"netapp:qos_policy_group\" はボリューム種別" "の追加仕様で許可されません。" #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "Glance との接続に失敗しました: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "Swift との接続に失敗しました: %(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "コネクターが %s を提供しません。" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "コネクターは必要な情報を持っていません: %(missing)s" #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "ConsistencyGroup %(consistencygroup_id)s が見つかりませんでした。" msgid "Container" msgstr "コンテナー" msgid "Container size smaller than required file size." msgstr "コンテナーサイズが必要なファイルサイズを下回っています。" #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "%(f1)s に変換されましたが、現在の形式は %(f2)s です" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "%(vol_format)s に変換されましたが、現在の形式は %(file_format)s です" #, python-format msgid "Converted to raw, but format is now %s" msgstr "ローに変換されましたが、現在の形式は %s です" #, python-format msgid "Converted to raw, but format is now %s." msgstr "ローに変換されましたが、現在の形式は %s です。" msgid "Coordinator uninitialized." msgstr "初期化されていないコーディネーター。" #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, " "status=%(status)s." msgstr "" "ボリュームのコピータスクが失敗しました: convert_to_base_volume: id=%(id)s、" "status=%(status)s。" #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "" "ボリュームタスクのコピーが失敗しました: create_cloned_volume id=%(id)s、状態 " "=%(status)s。" #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "" "%(src_type)s %(src_id)s から %(vol_id)s にメタデータをコピーしています。" #, python-format msgid "Could not complete failover: %s" msgstr "フェイルオーバーが完了しませんでした: %s" msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "どの Swift のエンドポイントを使用すべきか決定できませんでした。これを設定する" "には、サービスカタログまたは cinder.conf config のオプションである " "'backup_swift_auth_url' を使用します。" msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "どの Swift のエンドポイントを使用すべきか決定できませんでした。これを設定する" "には、サービスカタログまたは cinder.conf config のオプションである " "'backup_swift_url' を使用します。" #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "GPFS クラスター ID が見つかりませんでした: %s。" #, python-format msgid "Could not find GPFS file system device: %s." msgstr "GPFS ファイルシステムデバイスが見つかりませんでした: %s。" #, python-format msgid "Could not find config at %(path)s" msgstr "%(path)s で config が見つかりませんでした。" #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "ボリューム %s の iSCSI エクスポートが見つかりませんでした。" #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "" "コマンド %(cmd)s: %(out)sの出力でキーを見つけることができませんでした。" #, python-format msgid "Could not find parameter %(param)s" msgstr "パラメーター %(param)s が見つかりませんでした。" #, python-format msgid "Could not find target %s" msgstr "ターゲット %s が見つかりませんでした。" #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "" "ボリューム %(vol)s で一意のスナップショット %(snap)s を見つけることができませ" "んでした。" msgid "Could not get system name." msgstr "システム名を取得できませんでした。" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "" "スナップショット %(name)s の情報を読み取ることができませんでした。コード: " "%(code)s。理由: %(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "設定ファイル %(file_path)s をリストアできませんでした: %(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "設定を %(file_path)s に保存できませんでした: %(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "整合性グループのスナップショット %s を開始できませんでした。" #, python-format msgid "Couldn't find ORM model for Persistent Versioned Object %s." msgstr "" "バージョンが設定された永続オブジェクト %s 用のORMモデルが見つかりません。" #, python-format msgid "Couldn't remove cluster %s because it doesn't exist." msgstr "クラスター %s は存在しないため削除できませんでした。" #, python-format msgid "Couldn't remove cluster %s because it still has hosts." msgstr "クラスター %s はまだホストを持っているため削除できませんでした。" #, python-format msgid "Counter %s not found" msgstr "カウンター %s が見つかりません" msgid "Create QoS policy error." msgstr "QoS ポリシー作成のエラー。" #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップの作成が中止しました。予期していたバックアップの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップの作成が中止しました。予期していたボリュームの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" msgid "Create group failed." msgstr "グループの作成に失敗しました。" msgid "Create hostgroup error." msgstr "ホストグループ作成のエラー。" #, python-format msgid "Create hypermetro error. %s." msgstr "hypermetro 作成のエラー。%s。" msgid "Create lun error." msgstr "LUN 作成のエラー。" msgid "Create lun migration error." msgstr "LUN マイグレーション作成のエラー。" msgid "Create luncopy error." msgstr "LUN コピー作成のエラー。" msgid "Create lungroup error." msgstr "LUN グループ作成のエラー。" msgid "Create manager volume flow failed." msgstr "マネージャーボリュームフローの作成が失敗しました。" msgid "Create port group error." msgstr "ポートグループ作成のエラー。" msgid "Create replication error." msgstr "レプリケーション作成のエラー。" #, python-format msgid "Create replication pair failed. Error: %s." msgstr "レプリケーションペアの作成が失敗しました。エラー: %s。" msgid "Create snapshot error." msgstr "スナップショット作成のエラー。" #, python-format msgid "Create volume error. Because %s." msgstr "ボリューム作成のエラー。理由 %s。" msgid "Create volume failed." msgstr "ボリュームの作成に失敗しました。" #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s " "error=%(err)s)." msgstr "" "ゾーンセットの作成およびアクティブ化に失敗しました: (Zone set=%(cfg_name)s " "error=%(err)s)。" #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s " "error=%(err)s)." msgstr "" "ゾーンセットの作成およびアクティブ化に失敗しました: (Zone set=%(zoneset)s " "error=%(err)s)。" #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "Dedup は有効なプロビジョニングタイプですが、WSAPI バージョン" "「%(dedup_version)s」バージョン「%(version)s」がインストールされていることを" "必要としています。" msgid "" "Deduplication Enabler is not installed. Can not create deduplicated volume." msgstr "" "重複排除イネーブラーがインストールされていません。重複排除されたボリュームを" "作成できません。" msgid "Default group type can not be found." msgstr "デフォルトのグループ種別が見つかりません。" #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: " "quota_%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "リソース %(res)s のデフォルトのクォータは、デフォルトのクォータフラグ: " "quota_%(res)s によって設定されていますが、これは現在推奨されていません。デ" "フォルトのクォータにデフォルトのクォータクラスを使用してください。" msgid "Default volume type can not be found." msgstr "デフォルトのボリュームタイプが見つかりません。" msgid "Delete LUNcopy error." msgstr "LUN コピー削除のエラー。" msgid "Delete QoS policy error." msgstr "QoS ポリシー削除のエラー。" msgid "Delete associated lun from lungroup error." msgstr "LUN グループからの関連付けされた LUN 削除のエラー。" #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "バックアップの削除が中止しました。現在構成されているバックアップサービス " "[%(configured_service)s] は、このバックアップの作成に使用されたバックアップ" "サービス [%(backup_service)s] ではありません。" msgid "Delete group failed." msgstr "グループの削除に失敗しました。" msgid "Delete hostgroup error." msgstr "ホストグループ削除のエラー。" msgid "Delete hostgroup from mapping view error." msgstr "マッピングビューからのホストグループ削除のエラー。" msgid "Delete hypermetro from metrogroup error." msgstr "metrogroup からの hypermetro 削除エラー。" msgid "Delete hypermetro group error." msgstr "hypermetro グループ削除エラー。" msgid "Delete lun error." msgstr "LUN 削除のエラー。" msgid "Delete lun migration error." msgstr "LUN マイグレーション削除のエラー。" msgid "Delete lungroup error." msgstr "LUN グループ削除のエラー。" msgid "Delete lungroup from mapping view error." msgstr "マッピングビューからの LUN グループ削除のエラー。" msgid "Delete mapping view error." msgstr "マッピングビュー削除のエラー。" msgid "Delete port group error." msgstr "ポートグループ削除のエラー。" msgid "Delete portgroup from mapping view error." msgstr "マッピングビューからのポートグループ削除のエラー。" msgid "Delete snapshot error." msgstr "スナップショット削除のエラー。" #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "状態 %s でのボリュームのスナップショット削除はサポートされていません。" #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップの削除が中止しました。予期していたバックアップの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" msgid "Deleting volume from database and skipping rpc." msgstr "データベースからボリュームを作成中。rpc をスキップします。" #, python-format msgid "Deleting volume metadata is not allowed for volumes in %s status." msgstr "" "ボリュームの状態が %s である場合は、ボリュームメタデータの削除は許可されませ" "ん。" #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "ゾーンの削除に失敗しました: (command=%(cmd)s error=%(err)s)。" msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "整合性グループをサポートするには Dell API 2.1 以降が必要です" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "" "直接接続では Dell Cinder ドライバーの設定エラーの複製を行うことはできません。" #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "" "Dell Cinder ドライバーの設定エラー の replication_device %s が見つかりませ" "ん。" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "" "宛先の migration_status は %(stat)s ですが、予期されたのは %(exp)s です。" msgid "Destination volume not mid-migration." msgstr "宛先ボリュームはマイグレーション中ではありません" msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "" "ボリュームの切り離しが失敗しました: 2 つ以上の接続が存在するものの、" "attachment_id が提供されていません。" msgid "Detach volume from instance and then try again." msgstr "ボリュームをインスタンスから切り離して、再試行してください。" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "予期された列が %(fun)s で見つかりませんでした: %(hdr)s" #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "予期されるキー %(key)s が %(fun)s: %(raw)s で見つかりませんでした。" msgid "Down Hosts" msgstr "停止中のホスト" #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "ダウンレベル GPFS クラスターが検出されました。クラスターデーモンレベル " "%(cur)s で GPFS 複製フィーチャーが有効になっていません。レベル %(min)s 以上は" "必要です。" #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "ドライバーの初期化接続に失敗しました (エラー: %(err)s)。" msgid "Driver must implement initialize_connection" msgstr "ドライバーは initialize_connection を実装する必要があります。" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "" "ドライバーがインポートされたバックアップデータを正常に復号化しましたが、欠け" "ているフィールド (%s) があります。" #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "ソースから整合性グループ %(name)s を作成するには、'cgsnapshot_id' または " "'source_cgid' を指定する必要があります。" #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO %(slo)s またはワークロード %(workload)s のいずれかが無効です。以前のエ" "ラーステートメントで有効な値を調べてください。" msgid "Enables QoS." msgstr "QoS を有効化します。" msgid "Enables compression." msgstr "圧縮を有効化します。" msgid "Enables replication." msgstr "レプリケーションを有効化します。" msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "" "configfs が /sys/kernel/config でマウントされていることを確認してください。" msgid "Error connecting to ceph cluster." msgstr "ceph クラスターへの接続エラーです。" #, python-format msgid "Error connecting via ssh: %s" msgstr "ssh を介した接続中にエラーが発生しました: %s" msgid "Error deleting replay profile." msgstr "リプレープロファイルの削除でエラーが発生しました。" #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "ボリューム %(ssn)s の削除でエラーが発生しました: %(volume)s" #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "エバリュエーター構文解析中にエラーが発生しました: %(reason)s" #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "NetworkPortal の iSER の有効化に関するエラー: IP %(ip)s 上の iSCSI ポート " "%(port)d で RDMA がサポートされていることを確認してください。" #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "失敗した接続のクリーンアップ中にエラーが検出されました: %(ex)s" #, python-format msgid "Error executing command via ssh: %s" msgstr "ssh を介したコマンドの実行エラー: %s" #, python-format msgid "Error extending volume: %(reason)s" msgstr "ボリュームの拡張エラーです: %(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "%(name)s の検索中にエラーが発生しました。" #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "SolidFire API 応答にエラーがあります: data=%(data)s" msgid "Error not a KeyError." msgstr "KeyError ではなくエラーです。" msgid "Error not a TypeError." msgstr "TypeError ではなくエラーです。" #, python-format msgid "Error occurred when creating group_snapshot %s." msgstr "group_snapshot %s を作成中にエラーが発生しました。" #, python-format msgid "Error occurred when deleting group snapshot %s." msgstr "グループスナップショット %s を削除中にエラーが発生しました。" #, python-format msgid "Error occurred when deleting group_snapshot %s." msgstr "group_snapshot %s を削除中にエラーが発生しました。" #, python-format msgid "Error occurred when updating group %s." msgstr "グループ %s を更新中にエラーが発生しました。" msgid "Error retrieving volume size" msgstr "ボリュームサイズの抽出でエラーが発生しました" #, python-format msgid "Error while authenticating with switch: %s." msgstr "スイッチによる認証中にエラーが発生しました: %s。" #, python-format msgid "Error while changing VF context %s." msgstr "VF コンテキスト %s の変更中にエラーが発生しました。" #, python-format msgid "Error while checking the firmware version %s." msgstr "ファームウェアバージョン %s の検査中にエラーが発生しました。" #, python-format msgid "Error while checking transaction status: %s" msgstr "トランザクション状態の検査中にエラーが発生しました: %s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "VF が管理 %s に対して使用可能かどうかを検査中にエラーが発生しました。" #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "プロトコル %(protocol)s を指定したスイッチ %(switch_id)s の接続中にエラーが発" "生しました。エラー: %(error)s。" #, python-format msgid "Error while creating authentication token: %s" msgstr "認証トークンの作成中にエラーが発生しました: %s" #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "" "ssh を介してデータを取得中にエラーが発生しました: (command=%(cmd)s " "error=%(err)s)。" #, python-format msgid "Error while getting nvp value: %s." msgstr "nvp 値の取得中にエラーが発生しました: %s。" #, python-format msgid "Error while getting session information %s." msgstr "セッション情報 %s の取得中にエラーが発生しました。" #, python-format msgid "Error while parsing the data: %s." msgstr "データの解析中にエラーが発生しました: %s。" #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "" "スイッチでのページ %(url)s の照会中にエラーが発生しました。理由 %(error)s。" #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "" "ゾーン文字列でのゾーンおよび cfgs の削除中にエラーが発生しました: " "%(description)s。" #, python-format msgid "Error while requesting %(service)s API." msgstr "%(service)s API の要求中にエラーが発生しました。" #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "" "ゾーニング CLI の実行中にエラーが発生しました: (command=%(cmd)s " "error=%(err)s)。" #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "" "ゾーン文字列での新規ゾーンおよび cfgs の更新中にエラーが発生しました。エラー " "%(description)s。" msgid "Error writing field to database" msgstr "データベースへのフィールドの書き込みに失敗しました" msgid "Exceeded the limit of snapshots per volume" msgstr "ボリュームごとのスナップショットの制限を超えました。" #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "_select_ds_for_volume で例外が発生しました: %s。" #, python-format msgid "Exception while forming the zone string: %s." msgstr "ゾーン文字列の形成中に例外が発生しました: %s。" #, python-format msgid "Exception: %s" msgstr "例外: %s" #, python-format msgid "Expected higher file exists for snapshot %s" msgstr "" "スナップショット %s には、上位のファイルが存在することが期待されています。" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "" "node_count に対して整数が予期され、svcinfo lsiogrp が返されました: %(node)s。" #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "" "CLI コマンド %(cmd)s からの出力がないことが予期されます。%(out)s を受け取りま" "す。" #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "vdisk_UID でフィルタリングする場合、lsvdisk から 1 つの vdisk が返されること" "が予期されます。%(count)s が返されました。" #, python-format msgid "Expected volume size was %d" msgstr "予期されたボリュームサイズは %d でした。" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップのエクスポートが中止しました。予期していたバックアップの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "レコードのエクスポートが中止しました。現在構成されているバックアップサービス " "[%(configured_service)s] は、このバックアップの作成に使用されたバックアップ" "サービス [%(backup_service)s] ではありません。" msgid "Extend volume error." msgstr "ボリューム拡張のエラー。" msgid "Extend volume not implemented" msgstr "ボリュームの拡張が実装されていません。" msgid "" "FAST VP Enabler is not installed. Can not set tiering policy for the volume." msgstr "" "FAST VP イネーブラーがインストールされていません。ボリュームの階層化ポリシー" "を設定できません。" msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC がプロトコルですが、wwpns が OpenStack によって提供されていません。" #, python-format msgid "Faield to unassign %(volume)s" msgstr "%(volume)s の割り当て解除に失敗しました" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "ファブリック %(fabric)s の接続の追加に失敗しました。エラー: %(err)s" #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "ファブリック %s からのアクティブなゾーンセットの取得に失敗しました。" #, python-format msgid "Failed getting details for pool %s." msgstr "プール %s の詳細の取得に失敗しました。" #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "ファブリック %(fabric)s の接続の削除に失敗しました。エラー: %(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "ボリューム %(volname)s を拡張できませんでした。" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "3PAR (%(url)s) へのログインに失敗しました。理由: %(err)s" msgid "Failed to _get_node_uuid." msgstr "_get_node_uuid が失敗しました。" msgid "Failed to access active zoning configuration." msgstr "アクティブなゾーニング設定へのアクセスに失敗しました。" #, python-format msgid "Failed to access zoneset status:%s" msgstr "ゾーンセットステータスにアクセスできませんでした: %s" msgid "Failed to add or update zoning configuration." msgstr "ゾーニング設定の追加または更新に失敗しました。" msgid "Failed to add zoning configuration." msgstr "ゾーニング設定の追加に失敗しました。" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "qos_specs %(specs_id)s をタイプ %(type_id)s に関連付けることができませんでし" "た。" #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの接続に失敗しました。" #, python-format msgid "Failed to backup volume metadata - %s" msgstr "ボリュームメタデータのバックアップに失敗しました: %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "" "ボリュームメタデータのバックアップに失敗しました - メタデータバックアップオブ" "ジェクト 'backup.%s.meta' は既に存在します" #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "%(vendor_name)s 配列 %(host)s への接続に失敗しました: %(err)s" msgid "Failed to connect to Dell REST API" msgstr "Dell REST API への接続に失敗しました" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "イメージをボリュームにコピーできませんでした: %(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "メタデータをボリュームにコピーできませんでした: %(reason)s" msgid "" "Failed to copy volume to image as image quota has been met. Please delete " "images or have your limit increased, then try again." msgstr "" "イメージのクォータに到達したため、ボリュームのイメージへのコピーが失敗しまし" "た。イメージを削除するか、上限値を増やして再試行してください。" msgid "Failed to copy volume, destination device unavailable." msgstr "ボリュームのコピーに失敗しました。宛先デバイスが使用できません。" msgid "Failed to copy volume, source device unavailable." msgstr "ボリュームのコピーに失敗しました。ソースデバイスが使用できません。" #, python-format msgid "Failed to create IG, %s" msgstr "IG を作成できませんでした。%s" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "ボリュームグループを作成できませんでした: %(vg_name)s" msgid "Failed to create api volume flow." msgstr "API ボリュームフローの作成に失敗しました。" #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "%(reason)s が原因で cg スナップショット %(id)s の作成に失敗しました。" #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "%(reason)s が原因で整合性グループ %(id)s の作成に失敗しました。" #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "整合性グループ %(id)s の作成に失敗しました: %(ret)s。" #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "整合性グループ %(cgid)s の作成に失敗しました。エラー: %(excmsg)s。" #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "" "ホスト: %(name)s の作成に失敗しました。このホストがアレイに存在しているかどう" "か確認してください。" #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "" "ホストグループ: %(name)s の作成に失敗しました。このホストグループがアレイに存" "在しているかどうか確認してください。" msgid "Failed to create iqn." msgstr "iqn の作成に失敗しました。" #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの作成に失敗しました。" msgid "Failed to create manage existing flow." msgstr "既存の管理フローの作成に失敗しました。" msgid "Failed to create manage_existing flow." msgstr "manage_existing フローの作成に失敗しました。" msgid "Failed to create map." msgstr "マップの作成に失敗しました。" msgid "Failed to create partition." msgstr "パーティションの作成に失敗しました。" #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "" "仕様 %(qos_specs)s を使用して qos_specs %(name)s を作成することができませんで" "した。" msgid "Failed to create replica." msgstr "複製の作成に失敗しました。" msgid "Failed to create scheduler manager volume flow" msgstr "スケジューラーマネージャーのボリュームフローを作成できませんでした" #, python-format msgid "Failed to create snapshot %s" msgstr "スナップショット %s の作成に失敗しました。" msgid "Failed to create snapshot." msgstr "スナップショットの作成に失敗しました。" #, python-format msgid "Failed to create south bound connector for %s." msgstr "%s のサウスバウンドコネクターの作成に失敗しました。" #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "シンプールの作成に失敗しました。エラーメッセージ: %s" #, python-format msgid "Failed to create volume %s" msgstr "ボリューム %s の作成に失敗しました。" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "%(reason)s が原因で cgsnapshot %(id)s の削除に失敗しました。" #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "%(reason)s が原因で整合性グループ %(id)s の削除に失敗しました。" #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "整合性グループ %(cgname)s のファイルセットの削除に失敗しました。エラー: " "%(excmsg)s。" msgid "Failed to delete iqn." msgstr "iqn の削除に失敗しました。" msgid "Failed to delete map." msgstr "マップの削除に失敗しました。" msgid "Failed to delete partition." msgstr "パーティションの削除に失敗しました。" msgid "Failed to delete replica." msgstr "複製の削除に失敗しました。" #, python-format msgid "Failed to delete snapshot %s" msgstr "スナップショット %s の削除に失敗しました。" msgid "Failed to delete snapshot." msgstr "スナップショットの削除に失敗しました。" #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの切り離しに失敗しました。" msgid "Failed to disassociate qos specs." msgstr "qos 仕様の関連付けを解除できませんでした。" #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "" "qos_specs %(specs_id)s とタイプ %(type_id)s の関連付けを解除できませんでし" "た。" msgid "Failed to execute common command." msgstr "共通のコマンドの実行に失敗しました。" #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "ボリュームのエクスポートに失敗しました: %(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "ボリューム %(name)s の拡張に失敗しました。エラーメッセージ: %(msg)s。" msgid "Failed to find QoSnode" msgstr "QoSNode が見つかりません。" msgid "Failed to find Storage Center" msgstr "ストレージセンターが見つかりませんでした" msgid "Failed to find account for volume." msgstr "ボリュームのアカウントが見つかりませんでした。" #, python-format msgid "Failed to find available FC targets for %s." msgstr "%s で利用可能な FCターゲットを見つけることに失敗しました。" #, python-format msgid "Failed to find available iSCSI targets for %s." msgstr "%s で利用可能な iSCSI ターゲットを見つけることに失敗しました。" #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "" "パス %(path)s のファイルセットが見つかりませんでした。コマンド出力: " "%(cmdout)s。" #, python-format msgid "Failed to find group snapshot named: %s" msgstr "%s という名前のグループスナップショットが見つかりませんでした。" #, python-format msgid "Failed to find host %s." msgstr "ホスト %s を見つけることに失敗しました。" #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "LUN %s の LUN ターゲット詳細の取得に失敗しました" #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "LUN %s の LUN ターゲットリストを取得できませんでした" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s のパーティション ID の取得に失敗しました。" #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "" "スナップショット: %(snapshot_id)s からの RAID スナップショット ID の取得に失" "敗しました。" msgid "Failed to get SplitMirror." msgstr "SplitMirror の取得に失敗しました。" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "qos 仕様 %s のすべての関連付けは取得できませんでした。" msgid "Failed to get channel info." msgstr "チャンネル情報の取得に失敗しました。" #, python-format msgid "Failed to get code level (%s)." msgstr "コードレベル (%s) を取得できませんでした。" msgid "Failed to get device info." msgstr "デバイス情報の取得に失敗しました。" #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "CPG (%s) がアレイ上に存在しないため、ドメインを取得できませんでした。" msgid "Failed to get iqn info." msgstr "iqn 情報の取得に失敗しました。" msgid "Failed to get license info." msgstr "ライセンス情報の取得に失敗しました。" msgid "Failed to get lv info." msgstr "Iv 情報の取得に失敗しました。" msgid "Failed to get map info." msgstr "マップ情報の取得に失敗しました。" msgid "Failed to get migration task." msgstr "マイグレーションタスクの取得に失敗しました。" msgid "Failed to get name server info." msgstr "ネームサーバー情報の取得に失敗しました。" msgid "Failed to get network info." msgstr "ネットワーク情報の取得に失敗しました。" #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "新規プール: %(pool_id)s での新規パート ID の取得に失敗しました。" msgid "Failed to get partition info." msgstr "パーティション情報の取得に失敗しました。" msgid "Failed to get replica info." msgstr "レプリカ情報の取得に失敗しました。" msgid "Failed to get show fcns database info." msgstr "fcns データベース情報の表示に失敗しました。" msgid "Failed to get snapshot info." msgstr "スナップショット情報の取得に失敗しました。" #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "LUN %s のターゲット IQN の取得に失敗しました" msgid "Failed to get target LUN of SplitMirror." msgstr "SplitMirror のターゲット LUN の取得に失敗しました。" #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "LUN %s のターゲットポータルの取得に失敗しました" #, python-format msgid "Failed to get target_id of target [%s]" msgstr "ターゲット [%s] の target_id の取得に失敗しました。" msgid "Failed to get targets" msgstr "ターゲットを取得できませんでした" msgid "Failed to get wwn info." msgstr "wwn 情報の取得に失敗しました。" #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "" "共有 %(cgname)s のファイルセットへのリンクに失敗しました。エラー: " "%(excmsg)s。" #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "%s 配列へのログインに失敗しました (無効なログイン?)。" msgid "Failed to login with all rest URLs." msgstr "すべての rest URL のログインに失敗しました。" msgid "Failed to manage api volume flow." msgstr "API ボリュームフローの管理に失敗しました。" #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "報告されたサイズ %(size)s が浮動小数点値でないため、既存の %(type)s %(name)s " "の管理に失敗しました。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "" "ボリュームサイズ取得エラーのため、既存ボリューム %(name)s の管理に失敗しまし" "た。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "" "名前変更操作が失敗したため、既存ボリューム %(name)s の管理に失敗しました: エ" "ラーメッセージ: %(msg)s。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "" "変更されたサイズ %(size)s が浮動小数点数ではなかったため、既存ボリューム " "%(name)s の管理に失敗しました。" #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "管理対象となるボリュームのプールがバックエンドプールと一致しないことが原因" "で、既存のボリュームの管理に失敗しました。管理対象となるボリュームのプールは " "%(vdisk_pool)s です。バックエンドのプールは %(backend_pool)s です。" msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "" "管理対象となるボリュームは compress ですが、選択されたボリューム種別は " "compress でないことが原因で、既存のボリュームの管理に失敗しました。" msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "" "管理対象となるボリュームは compress ではありませんが、選択されたボリューム種" "別は compress であることが原因で、既存のボリュームの管理に失敗しました。" msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "" "管理対象となるボリュームが有効な入出力グループになかったことが原因で、既存の" "ボリュームの管理に失敗しました。" msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "" "管理対象となるボリュームは thick ですが、選択されたボリューム種別は thin であ" "ることが原因で、既存のボリュームの管理に失敗しました。" msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "" "管理対象となるボリュームは thin ですが、選択されたボリューム種別は thick であ" "ることが原因で、既存のボリュームの管理に失敗しました。" #, python-format msgid "Failed to migrate volume %(src)s." msgstr "ボリューム %(src)s のマイグレーションに失敗しました。" #, python-format msgid "" "Failed to migrate volume between source vol %(src)s and dest vol %(dst)s." msgstr "" "ソースボリューム %(src)s と宛先ボリューム %(dst)s の間のボリュームのマイグ" "レーションが失敗しました。" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI 出力の解析に失敗しました:\n" "コマンド: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s。" msgid "" "Failed to parse the configuration option 'glance_catalog_info', must be in " "the form ::" msgstr "" "構成オプション 'glance_catalog_info' の解析に失敗しました。:" ": という形式でなければなりません。" msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "設定オプションの 'keystone_catalog_info' を解析できませんでした。本オプション" "は、:: の形式を持つ必要がありま" "す。" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "構成オプション 'swift_catalog_info' の解析に失敗しました。:" ": という形式でなければなりません" #, python-format msgid "Failed to read configuration file(s): %s" msgstr "設定ファイルの読み込みに失敗しました: %s " #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "ボリューム %(volume)s のエクスポートを削除できませんでした: %(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "ボリューム %(volume_id)s の iSCSI ターゲットの削除に失敗しました。" #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "" "論理ボリューム %(name)s の名前変更に失敗しました。エラーメッセージ: " "%(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "アクティブなゾーニング構成 %s の取得に失敗しました" #, python-format msgid "Failed to retrieve attachments for volume %(name)s" msgstr "ボリューム %(name)s の接続を取得できません。" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "ターゲット IQN %(iqn)s の CHAP 認証の設定に失敗しました。詳細: %(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "" "既存のボリューム %(name)s 用の QoS の設定に失敗しました。エラーメッセージ: " "%(msg)s。" msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "SCST ターゲットの「着信ユーザー」属性の設定に失敗しました。" msgid "Failed to set partition." msgstr "パーティションの設定に失敗しました。" #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "" "整合性グループ %(cgname)s に対する許可の設定に失敗しました。エラー: " "%(excmsg)s。" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "" "整合性グループ %(cgname)s のファイルセットのリンク解除に失敗しました。エ" "ラー: %(excmsg)s。" #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "ボリュームのメタデータの更新に失敗しました: %(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "ゾーニング構成の更新または削除に失敗しました" msgid "Failed to update or delete zoning configuration." msgstr "ゾーニング設定の更新または削除に失敗しました。" #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "" "仕様 %(qos_specs)s を使用して qos_specs %(specs_id)s を更新することができませ" "んでした。" msgid "Failed to update quota usage while retyping volume." msgstr "ボリュームのタイプを変更中にクォータの使用量を更新できませんでした。" msgid "Failed to update snapshot." msgstr "スナップショットの更新に失敗しました。" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "" "指定の %(src_type)s %(src_id)s メタデータを使用してボリューム %(vol_id)s メタ" "データを更新することができませんでした" msgid "Failover requested on non replicated backend." msgstr "" "複製されていないバックエンド上でフェイルオーバーがリクエストされました。" #, python-format msgid "Failure getting LUN info for %s." msgstr "%s の LUN 情報の取得中に障害が発生しました。" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "複製された新規 LUN を %s へ移動中に障害が発生しました。" #, python-format msgid "Failure staging LUN %s to tmp." msgstr "LUN %s を一時 lun へステージング中に障害が発生しました。" #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "" "Fexvisor は %(reason)s が原因でボリューム %(id)s の追加に失敗しました。" #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor は %(ret)s が原因でグループ %(group)s でのボリューム %(vol)s の結合" "に失敗しました。" #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "" "Fexvisor は %(ret)s が原因で グループ %(group)s でのボリューム %(vol)s の削" "除に失敗しました。" #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "" "Fexvisor は %(reason)s が原因でボリューム %(id)s の削除に失敗しました。" #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "ファイバーチャネル SAN ルックアップ障害: %(reason)s" msgid "Fibre Channel Zone Manager not initialized" msgstr "ファイバーチャネルゾーンマネージャーが初期化されていません。" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "ファイバーチャネルゾーン操作が失敗しました: %(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "ファイバーチャネル接続制御障害: %(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "ファイル %(file_path)s が見つかりませんでした。" #, python-format msgid "File already exists at %s." msgstr "ファイルは %s に既に存在します。" #, python-format msgid "File already exists at: %s" msgstr "ファイルは既に存在します: %s" msgid "Find host in hostgroup error." msgstr "ホストグループでのホスト検索のエラー。" msgid "Find host lun id error." msgstr "ホスト LUN ID 検索のエラー。" msgid "Find lun group from mapping view error." msgstr "マッピングビューからの LUN グループ検索のエラー。" msgid "Find mapping view error." msgstr "マッピングビュー検索のエラー。" msgid "Find obj number error." msgstr "オブジェクト番号検索のエラー。" msgid "Find portgroup error." msgstr "ポートグループ検索のエラー。" msgid "Find portgroup from mapping view error." msgstr "マッピングビューからのポートグループ検索のエラー。" #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "Flash キャッシュポリシーは、WSAPI バージョン「%(fcache_version)s」バージョン" "「%(version)s」がインストールされていることを必要としています。" #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "" "Flexvisor によるボリュームの割り当てが失敗しました: %(id)s:%(status)s。" #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "" "Flexvisor によるボリュームの割り当てが失敗しました: %(id)s:%(status)s。" #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "" "Flexvisor はグループ %(vgid)s スナップショット %(vgsid)s でボリューム %(id)s " "スナップショットを見つけられませんでした。" #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "" "Flexvisor によるボリュームの作成が失敗しました: %(volumeid)s:%(status)s。" #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor はボリューム %(id)s の削除に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "" "Flexvisor はボリューム %(id)s をグループ %(cgid)s に追加できませんでした。" #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "" "Flexvisor は、イベント ID でステータスを照会できないことが原因でボリューム " "%(id)s を割り当てることに失敗しました。" #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor はボリューム %(id)s の割り当てに失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "" "Flexvisor はボリューム %(volume)s iqn %(iqn)s の割り当てに失敗しました。" #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor はボリューム %(id)s の複製に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "" "Flexvisor はボリューム %(id)s の複製に失敗しました (イベントの取得に失敗しま" "した)。" #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "" "Flexvisor はボリューム %(id)s のスナップショットを作成することに失敗しまし" "た: %(status)s。" #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "" "Flexvisor はボリューム %(id)s のスナップショットを作成することに失敗しました " "(イベントの取得に失敗しました)。" #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "" "Flexvisor はグループ %(vgid)s 内でボリューム %(id)s を作成できませんでした。" #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor はボリューム %(volume)s の作成に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor はボリューム %s の作成 (イベントの取得) に失敗しました。" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "" "Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました: " "%(status)s。" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました: " "%(status)s。" #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました (イ" "ベントの取得に失敗しました)。" #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor はスナップショット %(id)s の削除に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor はスナップショット %(id)s の削除に失敗しました (イベントの取得に失" "敗しました)。" #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor はボリューム %(id)s の削除に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor はボリューム %(id)s の拡張に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor はボリューム %(id)s の拡張に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "" "Flexvisor はボリューム %(id)s の拡張に失敗しました (イベントの取得に失敗しま" "した)。" #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "Flexvisor はプール情報 %(id)s の取得に失敗しました: %(status)s。" #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "" "Flexvisor はグループ %(vgid)s からボリューム %(id)s のスナップショット ID を" "取得できませんでした。" #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "" "Flexvisor はグループ %(cgid)s からのボリューム %(id)s の削除に失敗しました。" #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "" "Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました: " "%(status)s。" #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "" "Flexvisor はスナップショット %(id)s からのボリュームの作成に失敗しました (イ" "ベントの取得に失敗しました)。" #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "" "Flexvisor はボリューム %(id)s の割り当て解除に失敗しました: %(status)s。" #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "" "Flexvisor はボリューム %(id)s の割り当て解除 (イベントの取得) に失敗しまし" "た。" #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "Flexvisor はボリューム %(id)s の割り当て解除に失敗しました: %(status)s" #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor はソースボリューム %(id)s 情報を検出できません。" #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "" "Flexvisor によるボリュームの割り当て解除が失敗しました: %(id)s:%(status)s。" #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "Flexvisor ボリューム %(id)s はグループ %(vgid)s の結合に失敗しました。" #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "フォルダー %s は Nexenta Store アプライアンスに存在しません" #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS が実行されていません。状態: %s。" msgid "Get FC ports by port group error." msgstr "ポートグループによる FC ポート取得のエラー。" msgid "Get FC ports from array error." msgstr "アレイからの FC ポート取得のエラー。" msgid "Get FC target wwpn error." msgstr "FC ターゲット wwpn 取得のエラー。" msgid "Get HyperMetroPair error." msgstr "HyperMetroPair 取得のエラー。" msgid "Get LUN group by view error." msgstr "ビューによる LUN グループ取得のエラー。" msgid "Get LUNcopy information error." msgstr "LUN コピーの情報取得のエラー。" msgid "Get QoS id by lun id error." msgstr "LUN による QoS ID 取得のエラー。" msgid "Get QoS information error." msgstr "QoS 情報取得のエラー。 " msgid "Get QoS policy error." msgstr "QoS ポリシー取得のエラー。" msgid "Get SplitMirror error." msgstr "SplitMirror 取得のエラー。" msgid "Get active client failed." msgstr "アクティブなクライアントの取得が失敗しました。" msgid "Get array info error." msgstr "アレイ情報取得のエラー。" msgid "Get cache by name error." msgstr "名前によるキャッシュ取得のエラー。" msgid "Get connected free FC wwn error." msgstr "空き FC wwn 接続のエラー。" msgid "Get engines error." msgstr "エンジン取得のエラー。" msgid "Get host initiators info failed." msgstr "ホストイニシエーター情報の取得が失敗しました。" msgid "Get hostgroup information error." msgstr "ホストグループの情報取得のエラー。" msgid "Get hypermetro group by id error." msgstr "idによる hypermetro グループの取得エラー。" msgid "Get hypermetro group by name error." msgstr "名前による hypermetro グループの取得エラー。" msgid "Get hypermetro group error." msgstr "hypermetro グループ取得エラー。" msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "iSCSI ポート情報取得のエラー。huawei 設定ファイルに設定されたターゲット IP を" "確認してください。" msgid "Get iSCSI port information error." msgstr "iSCSI ポート情報取得のエラー。" msgid "Get iSCSI target port error." msgstr "iSCSI ターゲットポート取得のエラー。" msgid "Get lun id by name error." msgstr "名前による LUN ID 取得のエラー。" msgid "Get lun migration task error." msgstr "LUN マイグレーションタスク取得のエラー。" msgid "Get lungroup id by lun id error." msgstr "LUN ID による LUN グループ ID 取得のエラー。" msgid "Get lungroup information error." msgstr "LUN グループの情報取得のエラー。" msgid "Get manageable snapshots not implemented." msgstr "管理可能スナップショットの取得は実装されていません。" msgid "Get manageable volumes not implemented." msgstr "管理可能ボリュームの取得は実装されていません。" msgid "Get migration task error." msgstr "マイグレーションタスク取得のエラー。" msgid "Get pair failed." msgstr "ペアの取得が失敗しました。" msgid "Get partition by name error." msgstr "名前によるパーティション取得のエラー。" msgid "Get partition by partition id error." msgstr "パーティション ID によるパーティション取得のエラー。" msgid "Get port group by view error." msgstr "ビューによるポートグループ取得のエラー。" msgid "Get port group error." msgstr "ポートグループ取得のエラー。" msgid "Get port groups by port error." msgstr "ポートによるポートグループ取得のエラー。" msgid "Get ports by port group error." msgstr "ポートグループによるポート取得のエラー。" msgid "Get remote device info failed." msgstr "リモートデバイス情報の取得が失敗しました。" msgid "Get remote devices error." msgstr "リモートデバイス取得のエラー。" msgid "Get smartcache by cache id error." msgstr "キャッシュ ID によるスマートキャッシュ取得のエラー。" msgid "Get snapshot error." msgstr "スナップショット取得のエラー。" msgid "Get snapshot id error." msgstr "スナップショット ID 取得のエラー。" msgid "Get target IP error." msgstr "ターゲット IP 取得のエラー。" msgid "Get target LUN of SplitMirror error." msgstr "SplitMirror のターゲット LUN 取得のエラー。" msgid "Get views by port group error." msgstr "ポートグループによるビュー取得のエラー。" msgid "Get volume by name error." msgstr "名前によるボリューム取得のエラー。" msgid "Get volume error." msgstr "ボリューム取得のエラー。" #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "" "Glance メタデータを更新できません。ボリューム ID %(volume_id)s に対するキー " "%(key)s が存在します" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "" "ボリューム/スナップショット %(id)s の Glance メタデータが見つかりません。" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Google Cloud Storage の API エラー: %(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Google Cloud Storage の接続エラー: %(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Google Cloud Storage の oauth2 エラー: %(reason)s" #, python-format msgid "Group %(group_id)s could not be found." msgstr "グループ %(group_id)s が見つかりませんでした。" #, python-format msgid "" "Group %s still contains volumes. The delete-volumes flag is required to " "delete it." msgstr "" "グループ %s にはまだボリュームがあります。これを削除するには delete-volumes " "フラグが必要です。" #, python-format msgid "" "Group Type %(group_type_id)s deletion is not allowed with groups present " "with the type." msgstr "" "グループ種別 %(group_type_id)s を持つグループでは、そのグループ種別は削除でき" "ません。" #, python-format msgid "Group Type %(group_type_id)s has no specs with key %(group_specs_key)s." msgstr "" "グループ種別 %(group_type_id)s にはキー %(group_specs_key)s を持つスペックは" "ありません。" #, python-format msgid "Group Type %(id)s already exists." msgstr "グループ種別 %(id)s は既に存在します。" #, python-format msgid "Group Type %(type_id)s has no extra spec with key %(id)s." msgstr "" "グループ種別 %(type_id)s には、キー %(id)s に関する追加の仕様がありません。" msgid "Group snapshot is empty. No group will be created." msgstr "グループスナップショット が空です。グループは作成されません。" #, python-format msgid "Group status must be available or error, but current status is: %s" msgstr "" "グループの状態は「使用可能」または「エラー」でなければなりませんが、現在の状" "態は %s です。" #, python-format msgid "Group type %(group_type_id)s could not be found." msgstr "グループ種別 %(group_type_id)s が見つかりませんでした。" #, python-format msgid "" "Group type access for %(group_type_id)s / %(project_id)s combination already " "exists." msgstr "" "%(group_type_id)s / %(project_id)s の組み合わせのグループ種別アクセスは既に存" "在します。" #, python-format msgid "" "Group type access not found for %(group_type_id)s / %(project_id)s " "combination." msgstr "" "%(group_type_id)s / %(project_id)s の組み合わせのグループ種別アクセスが見つか" "りません。" msgid "Group type name can not be empty." msgstr "グループ種別名を空にすることはできません" #, python-format msgid "Group type with name %(group_type_name)s could not be found." msgstr "名前 %(group_type_name)s を持つグループ種別が見つかりませんでした。" #, python-format msgid "" "Group volume type mapping for %(group_id)s / %(volume_type_id)s combination " "already exists." msgstr "" " %(group_id)s / %(volume_type_id)s の組み合わせのグループボリューム種別のマッ" "ピングはすでに存在します。" #, python-format msgid "GroupSnapshot %(group_snapshot_id)s could not be found." msgstr "" "グループスナップショット %(group_snapshot_id)s は見つかりませんでした。" msgid "" "GroupSnapshot status must be available or error, and no Group can be " "currently using it as source for its creation." msgstr "" "GroupSnapshot の状態は「使用可能」または「エラー」でなければなりません。また" "これをソースとして用いてグループを作成することは現在できません。" #, python-format msgid "HTTP exit code: [%(code)s]" msgstr "HTTP 終了コード : [%(code)s]" #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "最後のバックアップ以降にハッシュブロックサイズが変更されました。新規ハッシュ" "ブロックサイズ: %(new)s。旧ハッシュブロックサイズ: %(old)s。フルバックアップ" "を実行してください。" msgid "Heartbeat" msgstr "ハートビート" #, python-format msgid "Hint \"%s\" not supported." msgstr "ヒント「%s」はサポートされていません。" msgid "Host" msgstr "ホスト" #, python-format msgid "Host %(host)s could not be found." msgstr "ホスト %(host)s が見つかりませんでした。" #, python-format msgid "Host %s has no FC initiators" msgstr "ホスト %s に FC イニシエーターがありません" msgid "Host attach volume failed!" msgstr "ホストへのボリュームの接続に失敗しました。" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "" "ホストが見つかりません。%(host)s 上で %(service)s の削除に失敗しました。" msgid "Hosts" msgstr "ホスト" msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "Hypermetro とレプリケーションは、同一の volume_type で使用できません。" msgid "ID" msgstr "ID" msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "" "圧縮が True に設定される場合、rsize も (not equal to -1) に設定しなければなり" "ません。" msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "" "nofmtdisk が True に設定される場合、rsize も -1 に設定しなければなりません。" #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "正しくない値 '%(prot)s' が flashsystem_connection_protocol に指定されていま" "す。有効な値は %(enabled)s です。" msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "正しくない値が storwize_svc_vol_grainsize に指定されています。32、64、128、" "256 のいずれかに設定してください。" #, python-format msgid "Image %(image_id)s could not be found." msgstr "イメージ %(image_id)s が見つかりませんでした。" #, python-format msgid "Image %(image_id)s is not active." msgstr "イメージ %(image_id)s はアクティブではありません。" #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "イメージ %(image_id)s は受け入れられません: %(reason)s" msgid "Image location not present." msgstr "イメージロケーションが存在しません。" msgid "Image quota exceeded" msgstr "イメージのクォータを超えました。" #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "イメージの仮想サイズは %(image_size)dGB であり、サイズ %(volume_size)dGB のボ" "リュームに適合しません。" msgid "Incremental backups exist for this backup." msgstr "このバックアップには増分バックアップが存在します。" #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Infortrend CLI の例外: %(err)s。パラメーター: %(param)s (戻りコード: %(rc)s) " "(出力: %(out)s)" msgid "Initiators of host cannot be empty." msgstr "ホストのイニシエーターは空にできません。" msgid "Input volumes or snapshots are invalid." msgstr "入力ボリュームまたはスナップショットが無効です。" msgid "Input volumes or source volumes are invalid." msgstr "入力ボリュームまたはソースボリュームが無効です。" #, python-format msgid "Instance %(uuid)s could not be found." msgstr "インスタンス %(uuid)s が見つかりませんでした。" #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "無効な 3PAR ドメイン: %(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "無効な ALUA 値。ALUA 値は、1 または 0 でなければなりません。" msgid "Invalid Ceph args provided for backup rbd operation" msgstr "バックアップ RBD 操作に指定された Ceph 引数が無効です。" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "無効な CgSnapshot: %(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "無効な ConsistencyGroup: %(reason)s" #, python-format msgid "Invalid Group: %(reason)s" msgstr "無効なグループ: %(reason)s" #, python-format msgid "Invalid GroupSnapshot: %(reason)s" msgstr "無効なグループスナップショット: %(reason)s" #, python-format msgid "Invalid IP address format: '%s'" msgstr "無効な IP アドレスの形式: '%s'" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "" "ボリューム %s の QoS ポリシーの取得中に 無効な QoS 仕様が検出されました" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "無効なレプリケーションターゲット: %(reason)s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "Virtuozzo Storage のシェアの指定が無効です: %r。[MDS1[,MDS2],...:/][:PASSWORD] である必要があります。" #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "" "XtremIO バージョン %(cur)s は無効です。バージョン %(min)s 以上が必要です" #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "接続モード '%(mode)s' はボリューム %(volume_id)s には無効です。" #, python-format msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "ボリューム %(name)s の接続が無効です : %(reason)s" #, python-format msgid "Invalid auth key: %(reason)s" msgstr "認証キーが無効です: %(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "無効なバックアップ: %(reason)s" #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "ボリューム %(name)s の接続初期化応答が無効です" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "ボリューム %(name)s の接続初期化応答が無効です: %(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "無効なコンテンツタイプ %(content_type)s。" #, python-format msgid "Invalid directory: %s" msgstr "無効なディレクトリー: %s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "ディスクアダプタータイプが無効です: %(invalid_type)s。" #, python-format msgid "Invalid disk backing: %s." msgstr "ディスクバッキングが無効です: %s。" #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "ディスクタイプが無効です: %(disk_type)s。" #, python-format msgid "Invalid disk type: %s." msgstr "ディスクタイプが無効です: %s。" #, python-format msgid "Invalid filter keys: %s" msgstr "無効なフィルターキー : %s" #, python-format msgid "Invalid group type: %(reason)s" msgstr "無効なグループ種別: %(reason)s" #, python-format msgid "Invalid host: %(reason)s" msgstr "無効なホスト: %(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "無効な hpe3parclient バージョンが見つかりました (%(found)s)。バージョン " "%(minimum)s 以上が必要です。 \"pip install --upgrade python-3parclient\" を実" "行して hpe3parclient をアップグレードしてください。" #, python-format msgid "Invalid image href %(image_href)s." msgstr "無効なイメージ href %(image_href)s。" msgid "Invalid image identifier or unable to access requested image." msgstr "イメージ ID が無効か、要求されたイメージにアクセスできません。" msgid "Invalid imageRef provided." msgstr "無効な imageRef が指定されました。" msgid "Invalid input" msgstr "無効な入力" #, python-format msgid "Invalid input received: %(reason)s" msgstr "無効な入力を受信しました: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "無効な is_public フィルター [%s]" #, python-format msgid "Invalid lun type %s is configured." msgstr "無効な LUN タイプ %s が設定されています。" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "無効なメタデータサイズ: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "メタデータが無効です: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "無効なマウントポイントベース: %s" #, python-format msgid "Invalid mount point base: %s." msgstr "無効なマウントポイントベース: %s" #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "新規 snapCPG 名がタイプ変更には無効です。new_snap_cpg='%s'。" #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "qos 仕様が無効です: %(reason)s" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "予約の有効期限 %(expire)s が無効です。" #, python-format msgid "Invalid secondary id %s." msgstr "無効なセカンダリー ID %s。" msgid "Invalid service catalog json." msgstr "無効なサービスカタログ JSON。" #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "無効なスナップショット: %(reason)s" #, python-format msgid "Invalid sort dirs passed: %s" msgstr "無効なソート方向が渡されました : %s" #, python-format msgid "Invalid sort keys passed: %s" msgstr "無効なソートキーが渡されました : %s" #, python-format msgid "Invalid status: '%s'" msgstr "無効な状態: '%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "無効なストレージプール %s が要求されました。再入力は失敗しました。" #, python-format msgid "Invalid storage pool %s specificed." msgstr "無効なストレージプール %s が指定されました。" #, python-format msgid "Invalid update setting: '%s'" msgstr "無効な更新設定: '%s'" #, python-format msgid "Invalid value '%s' for force." msgstr "force の値 '%s' は無効です。" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "'scheduler_max_attempts' の値が無効です。1 以上でなければなりません" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "NetApp の設定オプション netapp_host_type の値が無効です。" msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "NetApp の設定オプション netapp_lun_ostype の値が無効です。" #, python-format msgid "Invalid value for age, %(age)s" msgstr "年齢 %(age)s の値が無効です" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "作成要求に指定されたボリュームサイズ %s は無効です (size 引数は整数(または整" "数の文字列表記) でなければならず、またゼロより大きくなければなりません)。" #, python-format msgid "Invalid volume type: %(reason)s" msgstr "無効なボリューム種別: %(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "無効なボリューム: %(reason)s" #, python-format msgid "Invalid volume_type passed: %s." msgstr "無効な volume_type が渡されました: %s。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "無効な volume_type が指定されました: %s (要求されるタイプはこれと互換性があり" "ません。ソースボリュームと合致するか、 タイプの引数を排除する必要がありま" "す)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "" "無効な volume_type が指定されました: %s (要求されるタイプはこれと互換性があり" "ません。タイプの引数を排除することを推奨します)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "" "無効な volume_type %s が指定されました (要求するタイプは、この整合性グループ" "でサポートされていなければなりません)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "group)." msgstr "" "無効な volume_type %s が指定されました (要求するタイプは、このグループでサ" "ポートされていなければなりません)。" #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "wwpn 形式 %(wwpns)s は無効です" msgid "Issue encountered waiting for job." msgstr "ジョブの待機中に問題が発生しました。" msgid "Issue encountered waiting for synchronization." msgstr "同期の待機中に問題が発生しました。" msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "" "レプリケーションが適切に設定されていないため、fail-over の発行が失敗しまし" "た。" #, python-format msgid "Kaminario retryable exception: %(reason)s" msgstr "再試行可能な Kaminario 例外: %(reason)s" #, python-format msgid "KaminarioCinderDriver failure: %(reason)s" msgstr "Kaminario Cinder ドライバー障害です: %(reason)s" msgid "LUN doesn't exist." msgstr "LUN が存在しません。" msgid "LUN map overflow on every channel." msgstr "すべてのチャンネルでの LUN マップのオーバーフロー。" #, python-format msgid "LUN not found by UUID: %(uuid)s." msgstr "UUID %(uuid)s の LUN が見つかりません。" #, python-format msgid "LUN not found with given ref %s." msgstr "指定された参照 %s を持つ LUN が見つかりません。" #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN 番号がチャンネル ID: %(ch_id)s の境界を越えています。" msgid "License is unavailable." msgstr "ライセンスが使用できません。" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "" "状態 %s でのソースボリュームのリンクされた複製はサポートされていません。" msgid "Login failed." msgstr "ログインに失敗しました。" msgid "Logout session error." msgstr "ログアウトセッションのエラー。" msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "ルックアップサービスが構成されていません。fc_san_lookup_service の構成オプ" "ションはルックアップサービスの具体的実装の指定を必要とします。" msgid "Lun migration error." msgstr "LUN マイグレーションのエラー。" #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "" "オブジェクトの MD5: %(md5)s の前と %(etag)s の後の %(object_name)s が同じで" "はありません。" #, python-format msgid "Malformed fcns output string: %s" msgstr "fcns 出力ストリングの形式が誤っています: %s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "メッセージ本体の形式に誤りがあります: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "誤った形式のネームサーバー文字列: %s" msgid "Malformed request body" msgstr "誤った形式の要求本体" msgid "Malformed request body." msgstr "誤った形式のリクエスト本文。" msgid "Malformed request url" msgstr "誤った形式の要求 URL" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "コマンド %(cmd)s への応答の形式が誤っています: %(reason)s" #, python-format msgid "Malformed show fcns database string: %s" msgstr "fcns データベース文字列の形式が誤っています: %s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s " "zone_config=%(zone_config)s)." msgstr "" "誤った形式のゾーン構成: (switch=%(switch)s zone_config=%(zone_config)s)。" #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "ゾーンステータスの形式が誤っています: (switch=%(switch)s " "zone_config=%(zone_config)s)" msgid "Manage existing get size requires 'id'." msgstr "既存の get サイズを管理するには 'id' が必要です。" msgid "Manage existing snapshot not implemented." msgstr "既存のスナップショットの管理が実装されていません。" #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "" "無効なバックエンド参照 %(existing_ref)s のため、既存ボリュームの管理に失敗し" "ました: %(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "" "ボリューム種別の不一致のため、既存ボリュームの管理に失敗しました: %(reason)s" msgid "Manage existing volume not implemented." msgstr "既存ボリュームの管理は実装されていません。" msgid "Manage existing volume requires 'source-id'." msgstr "既存のボリュームを管理するには 'source-id' が必要です。" msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "" "フェイルオーバーされたボリュームへのスナップショットを管理対象にすることは許" "可されません。" msgid "Map info is None due to array version not supporting hypermetro." msgstr "" "アレイのバージョンが hypermetro をサポートしないことが原因で、マップ情報があ" "りません。" msgid "Max read bandwidth setting for volume qos, use 0 for unlimited" msgstr "" "ボリューム QoS の最大読み出し帯域幅設定。無制限にする場合は0を指定します。" msgid "Max read iops setting for volume qos, use 0 for unlimited" msgstr "ボリューム QoS の読み出し iops 設定。無制限にする場合は0を指定します。" msgid "Max total bandwidth setting for volume qos, use 0 for unlimited" msgstr "" "ボリューム QoS のトータルの最大帯域幅設定。無制限にする場合は0を指定します。" msgid "Max total iops setting for volume qos, use 0 for unlimited" msgstr "" "ボリューム QoS のトータルの iops 設定。無制限にする場合は0を指定します。" msgid "Max write bandwidth setting for volume qos, use 0 for unlimited" msgstr "" "ボリューム QoS の最大書き込み帯域幅設定。無制限にする場合は0を指定します。" msgid "Max write iops setting for volume qos, use 0 for unlimited" msgstr "ボリューム QoS の書き込み iops 設定。無制限にする場合は0を指定します。" msgid "Maximum age is count of days since epoch." msgstr "最長存続時間は、エポック以降の日数です。" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "バックアップの許容最大数 (%(allowed)d) を超えました。" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "スナップショットの許容最大数 (%(allowed)d) を超えました。" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "許容されるボリュームの最大数 (%(allowed)d) がクォータ '%(name)s' を超えまし" "た。" #, python-format msgid "May specify only one of %s" msgstr "指定できる %s は 1 つのみです" #, python-format msgid "Message %(message_id)s could not be found." msgstr "メッセージ %(message_id)s が見つかりませんでした。" msgid "Metadata backup already exists for this volume" msgstr "このボリュームのメタデータバックアップは既に存在します。" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "メタデータのバックアップオブジェクト '%s' は既に存在します" #, python-format msgid "Metadata property key %s greater than 255 characters." msgstr "メタデータプロパティーのキー %s の文字数が255文字を超えています。" #, python-format msgid "Metadata property key %s value greater than 255 characters." msgstr "メタデータプロパティーのキー %s の値の文字数が255文字を超えています。" msgid "Metadata restore failed due to incompatible version" msgstr "バージョンの非互換のため、メタデータのリストアに失敗しました" msgid "Metadata restore failed due to incompatible version." msgstr "バージョンの非互換のため、メタデータのリストアに失敗しました。" #, python-format msgid "Method %(method)s is not defined" msgstr "メソッド %(method)s は定義されていません。" msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "" "Fibre Channel の SAN 構成パラメーターの fc_fabric_names が欠落しています" #, python-format msgid "Missing attributes list for API %s." msgstr "API %s の属性リストがありません。" msgid "Missing record count for NetApp iterator API invocation." msgstr "NetApp のイテレーター API 起動記録カウントがありません。" msgid "Missing request body." msgstr "要求本体がありません。" #, python-format msgid "Missing required element '%s' in request body." msgstr "リクエストの本文に必要な要素 '%s' がありません。" #, python-format msgid "Multiple copies of volume %s found." msgstr "ボリューム %s の複数のコピーが見つかりました。" #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "" "'%s' に関して複数の一致が見つかりました。ID を使用して絞り込んでください。" msgid "Multiple profiles found." msgstr "複数のプロファイルが見つかりました。" msgid "Must implement a fallback schedule" msgstr "フォールバックスケジューラーを実装する必要があります。" msgid "Must implement schedule_create_group" msgstr "schedule_create_group の実装が必要です。" msgid "Must implement schedule_create_volume" msgstr "schedule_create_volume を実装する必要があります。" msgid "Must implement schedule_get_pools" msgstr "schedule_get_pools を実装する必要があります" msgid "Must pass wwpn or host to lsfabric." msgstr "wwpn またはホストを lsfabric に渡す必要があります。" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "" "更新のために 'status'、'attach_status'、または 'migration_status' を指定する" "必要があります。" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "" "有効な個人 %(valid)s を指定する必要があります。値 '%(persona)s' は無効です。" #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "" "有効なプロビジョニングタイプ %(valid)s を指定する必要があります。値 " "'%(prov)s' は無効です。" msgid "Must specify an ExtensionManager class" msgstr "ExtensionManager クラスを指定する必要があります" msgid "" "Must specify one or more of the following keys to update: name, description, " "add_volumes, remove_volumes." msgstr "" "更新を行うには、次のキーを一つ以上指定する必要があります : 名前、説明、 " "add_volumes 、 remove_volumes" msgid "Must specify snapshot source-name or source-id." msgstr "" "スナップショットの source-name または source-id を指定する必要があります。" msgid "Must specify source-name or source-id." msgstr "ソース名またはソース ID を指定する必要があります。" msgid "Must supply a positive value for age" msgstr "年齢には正の値を提供する必要があります" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "" "NAS 構成「%(name)s=%(value)s」は無効です。「auto」、「true」、「false」のいず" "れかでなければなりません。" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr "NFS 構成ファイルが %(config)s に存在しません。" #, python-format msgid "NFS file %s not discovered." msgstr "NFS ファイル %s は検出されていません。" msgid "NFS file could not be discovered." msgstr "NFS ファイルを検出できませんでした。" msgid "NaElement name cannot be null." msgstr "NaElement 名は NULL にできません。" msgid "Name" msgstr "名前" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "" "要求本体で、名前、説明、add_volumes、および remove_volumes をすべて空にするこ" "とはできません。" msgid "Need non-zero volume size" msgstr "ゼロでないボリュームサイズが必要です" msgid "NetApp Cinder Driver exception." msgstr "NetApp Cinder Driver 例外です。" #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "拡張用に指定する新しいサイズは、現行サイズより大きくなければなりません。(現" "行: %(size)s、拡張用: %(new_size)s)。" #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "新規サイズはバックエンドストレージの実サイズよりも大きくなければなりません。" "実サイズ: %(oldsize)s、新規サイズ: %(newsize)s。" msgid "New volume type not specified in request_spec." msgstr "新規のボリュームタイプが要求仕様に指定されていません。" msgid "Nimble Cinder Driver exception" msgstr "Nimble Cinder ドライバー例外" #, python-format msgid "No FC port can be used for LUN %s." msgstr "LUN %s に利用できる FC ポートがありません。" msgid "No FC port connected to fabric." msgstr "ファブリックに接続された FC ポートはありません。" msgid "No VF ID is defined in the configuration file." msgstr "設定ファイルに VF ID が定義されていません。" msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "提供された iSCSI IP の iSCSI ポータルがアクティブではありません。" msgid "No backups available to do an incremental backup." msgstr "増分バックアップを実行するために使用可能なバックアップがありません。" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "複製された %s という名前の LUN はファイラーで見つかりません" msgid "No config node found." msgstr "設定ノードが見つかりません。" #, python-format msgid "No element by given name %s." msgstr "指定された名前 %s の要素はありません。" #, python-format msgid "No file found with %s as backing file." msgstr "バッキングファイルとして %s を持つファイルが見つかりません。" #, python-format msgid "No group snapshot with id %s" msgstr "id %s のグループスナップショットは存在しません。" msgid "No iSCSI-enabled ports on target array." msgstr "ターゲット配列に iSCSI に対応するポートがありません。" msgid "No initiator connected to fabric." msgstr "ファブリックに接続されたイニシエーターはありません。" #, python-format msgid "No initiator group found for initiator %s" msgstr "イニシエーター %s のイニシエーターグループが見つかりません。" #, python-format msgid "No interface found on cluster for ip %s" msgstr "ip %s のクラスター上にインターフェースが見つかりませんでした" msgid "No ip address found." msgstr "IP アドレスが見つかりません。" msgid "No mounted NFS shares found" msgstr "マウントされた NFS 共有が見つかりません" msgid "No mounted Virtuozzo Storage shares found" msgstr "マウントされた Virtuozzo Storage 共有が見つかりません" msgid "No mounted shares found" msgstr "マウントされた共有が見つかりません" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "ボリューム %(vol)s の入出力グループ %(gid)s でノードが見つかりません。" msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "ボリュームのプロビジョニングに使用できるプールがありません。設定オプション " "netapp_pool_name_search_pattern が正しく設定されていることを確認してくださ" "い。" #, python-format msgid "No snap found with %s as backing file." msgstr "バッキングファイルとして %s を持つスナップが見つかりません。" msgid "" "No storage could be allocated for this volume request. You may be able to " "try another size or volume type." msgstr "" "このボリュームのリクエストに対して、ストレージを割り当てられませんでした。サ" "イズやボリューム種別を変更して試すことができるかもしれません。" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "そのような QoS 仕様 %(specs_id)s は存在しません。" msgid "No suitable discovery ip found" msgstr "適切なディスカバリー ip が見つかりません。" msgid "No suitable host was found to failover." msgstr "フェイルオーバーに適したホストが見つかりませんでした。" #, python-format msgid "No support to restore backup version %s" msgstr "バックアップバージョン %s をリストアすることができません" msgid "No valid ports." msgstr "有効なポートがありません。" #, python-format msgid "No vdisk with the ID specified by ref %s." msgstr "参照 %s によって指定された ID を持つ vdisk がありません。" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "参照 %s によって指定された UID を持つ vdisk がありません。" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "" "仮想サーバー %(vserver)s および接合パス %(junction)s を含むボリュームはクラス" "ターにありません" #, python-format msgid "No volumes or consistency groups exist in cluster %(current)s." msgstr "クラスター %(current)s にはボリュームも整合性グループも存在しません。" msgid "No wwpns found in host connector." msgstr "ホストコネクターで wwpn が見つかりませんでした。" msgid "Non-getter API passed to API test method." msgstr "API テストメソッドに非ゲッター API が渡されました。" msgid "Not a valid value for NaElement." msgstr "NaElement に無効な値です。" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "イメージ %(image_id)s では許可されません。" msgid "Not authorized." msgstr "許可されていません。" #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "バックエンド容量が不十分です (%(backend)s)" msgid "Nova returned \"error\" status while creating snapshot." msgstr "" "スナップショットの作成時に Nova から「エラー」ステータスが返されました。" msgid "Object Count" msgstr "オブジェクト数" msgid "Object Version" msgstr "オブジェクトのバージョン" msgid "Object is not a NetApp LUN." msgstr "オブジェクトは NetApp LUN ではありません。" #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "%(uri)s に対して実行できる要求は、%(unit_string)s につき %(value)s %(verb)s " "要求に限られます。" msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "非管理に設定できるのは、OpenStack が管理するボリュームのみです。" #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "状態=%(status)s で操作が失敗しました。フルダンプ: %(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "操作はサポートされていません: %(operation)s." msgid "Option gpfs_images_dir is not set correctly." msgstr "gpfs_images_dir オプションが正しく設定されていません。" msgid "Option gpfs_mount_point_base is not set correctly." msgstr "gpfs_mount_point_base オプションが正しく設定されていません。" #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "" "作成元の %(res)s %(prop)s は '%(vals)s' 値のいずれかでなければなりません。" msgid "Param [identifier] is invalid." msgstr "パラメーター [identifier] が無効です。" msgid "Param [lun_name] is invalid." msgstr "パラメーター [lun_name] が無効です。" msgid "Param [snapshot_uuid] is invalid." msgstr "パラメーター [snapshot_uuid] が無効です。" #, python-format msgid "ParseException: %s" msgstr "ParseException: %s" msgid "Password has expired or has been reset, please change the password." msgstr "" "パスワードの期限が切れているか、リセットされました。パスワードを変更してくだ" "さい。" msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "認証にはパスワードまたは SSH 秘密鍵が必要です: san_password または " "san_private_key オプションを設定してください。" msgid "Path to REST server's certificate must be specified." msgstr "REST サーバーの証明書へのパスを指定しなければなりません。" #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "事前に %(pool_list)s プールを作成してください。" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "ポリシーは %(action)s の実行を許可していません。" msgid "Pool is not available in the volume host field." msgstr "プールがボリュームホストフィールドにありません。" msgid "Pool is not available in the volume host fields." msgstr "プールがボリュームホストフィールドにありません。" #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "" "名前が %(pool_name)s のプールがドメイン %(domain_id)s で見つかりませんでし" "た。" msgid "Pools name is not set." msgstr "プール名が設定されていません。" #, python-format msgid "Programming error in Cinder: %(reason)s" msgstr "Cinder でのプログラミングエラー : %(reason)s" msgid "Project ID" msgstr "プロジェクト ID" msgid "Protection Group not ready." msgstr "保護グループの準備ができていません。" #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "" "ストレージファミリー %(storage_family)s ではプロトコル %(storage_protocol)s " "はサポートされません。" msgid "Provided backup record is missing an id" msgstr "提供されたバックアップレコードに ID がありません" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "" "指定されたスナップショットステータス %(provided)s は、ステータスが " "%(current)s となっているスナップショットには許可されません。" #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Pure Storage Cinder ドライバー障害です: %(reason)s" msgid "Purge command failed, check cinder-manage logs for more details." msgstr "Purge コマンドが失敗しました。詳細はログを確認して下さい。" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "QoS 仕様 %(specs_id)s は既に存在します。" #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "QoS 仕様 %(specs_id)s はまだエンティティーと関連付けられています。" #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "" "QoS 仕様 %(specs_id)s には、キー %(specs_key)s を持つ仕様はありません。" msgid "Qos specs still in use." msgstr "Qos 仕様はまだ使用中です。" msgid "Query resource pool error." msgstr "リソースプール照会のエラー。" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "クォータクラス %(class_name)s が見つかりませんでした。" msgid "Quota could not be found" msgstr "クォータが見つかりませんでした。" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "リソースのクォータを超過しました: %(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "クォータを超過しました: code=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "プロジェクト %(project_id)s のクォータが見つかりませんでした。" #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "プロジェクト %(project_id)s のクォータ使用率が見つかりませんでした。" #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "RBD diff 操作が失敗しました: (ret=%(ret)s stderr=%(stderr)s)" msgid "REST Async Error: Command not accepted." msgstr "REST 非同期エラー: コマンドが許可されていません。" msgid "RPC Version" msgstr "RPC のバージョン" msgid "Reference must be for an unmanaged snapshot." msgstr "参照は非管理対象のスナップショットに対するものでなければなりません。" msgid "Reference must be for an unmanaged virtual volume." msgstr "非管理対象の仮想ボリュームに対する参照でなければなりません。" msgid "Reference must contain source-id or source-name element." msgstr "" "参照には source-id または source-name の要素が含まれていなければなりません。" msgid "Reference must contain source-id or source-name key." msgstr "" "参照には source-id または source-name キーが含まれていなければなりません。" msgid "Reference must contain source-id or source-name." msgstr "参照には source-id または source-name が含まれていなければなりません。" msgid "Reference must contain source-id." msgstr "参照には source-id が含まれていなければなりません。" msgid "Reference must contain source-name element." msgstr "参照には source-name 要素が含まれていなければなりません。" msgid "Reference must contain source-name or source-id." msgstr "参照には source-name または source-id が含まれていなければなりません。" msgid "Reference must contain source-name." msgstr "参照には source-name が含まれていなければなりません。" #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "ボリューム ID %(id)s のマイグレーションを拒否中。ソースと宛先が同じボリューム" "グループ %(name)s であるため、構成を確認してください。" msgid "Remote pool cannot be found." msgstr "リモートプールが見つかりません。" msgid "Remove CHAP error." msgstr "CHAP 削除のエラー。" msgid "Remove fc from host error." msgstr "ホストからの FC 削除のエラー。" msgid "Remove host from array error." msgstr "アレイからのホスト削除のエラー。" msgid "Remove host from hostgroup error." msgstr "ホストグループからのホスト削除のエラー。" msgid "Remove iscsi from host error." msgstr "ホストからの iSCSI 削除のエラー。" msgid "Remove lun from QoS error." msgstr "QoS からの LUN 削除のエラー。" msgid "Remove lun from cache error." msgstr "キャッシュからの LUN 削除のエラー。" msgid "Remove lun from partition error." msgstr "パーティションからの LUN 削除のエラー" msgid "Remove port from port group error." msgstr "ポートグループからのポート削除のエラー。" msgid "Remove volume export failed." msgstr "ボリュームのエクスポートの削除に失敗しました。" msgid "Rename lun on array error." msgstr "アレイでの LUN 名前変更のエラー。" msgid "Rename snapshot on array error." msgstr "アレイでのスナップショット名前変更のエラー。" #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "%(name)s の %(ssn)s へのレプリケーションが失敗しました。" msgid "" "Replication is configured, but no MirrorView/S enabler installed on VNX." msgstr "" "レプリケーションが設定されていますが、 VNX にMirrorView/S イネーブラーがイン" "ストールされていません。" msgid "Replication not allowed yet." msgstr "まだレプリケーションを行うことはできません。" msgid "" "Replication setup failure: replication has been enabled but no replication " "target has been specified for this backend." msgstr "" "レプリケーションセットアップ失敗: レプリケーションが有効になっていますが、レ" "プリケーションターゲットがこのバックエンド用に指定されていません。" msgid "" "Replication setup failure: replication:livevolume has been enabled but more " "than one replication target has been specified for this backend." msgstr "" "レプリケーションセットアップ失敗: ライブボリュームが有効になっていますが、1つ" "以上のレプリケーションターゲットがこのバックエンド用に指定されています。" msgid "Request body and URI mismatch" msgstr "要求本体と URI の不一致" msgid "Request body contains too many items" msgstr "要求本体に含まれる項目が多すぎます" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "要求されたバックアップが許容バックアップクォータ (ギガバイト) を超えていま" "す。要求量 %(requested)s G、クォータ %(quota)s G、消費量 %(consumed)s. G。" #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "スナップショットにリクエストされたボリュームが許容される %(name)s のクォータ" "を超えています。%(requested)sG がリクエストされ、%(quota)sG のクォータが設定" "され、%(consumed)sG が使用されています。" msgid "Required configuration not found" msgstr "必要な構成が見つかりません" #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "バックアップの状態のリセットを中止しました。現在構成されているバックアップ" "サービス [%(configured_service)s] は、このバックアップの作成に使用されたバッ" "クアップサービス [%(backup_service)s] ではありません。" #, python-format msgid "Resizing clone %s failed." msgstr "複製 %s のリサイズが失敗しました。" msgid "Resizing image file failed." msgstr "イメージファイルのサイズ変更が失敗しました。" msgid "Resource could not be found." msgstr "リソースが見つかりませんでした。" msgid "Resource not ready." msgstr "リソースが作動不能です。" msgid "RestURL is not configured." msgstr "Rest URL は設定されていません。" #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップのリストアが中止しました。予期していたボリュームの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "バックアップのリストアが中止しました。現在構成されているバックアップサービス " "[%(configured_service)s] は、このバックアップの作成に使用されたバックアップ" "サービス [%(backup_service)s] ではありません。" #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "バックアップのリストアが中止しました。予期していたバックアップの状態は " "%(expected_status)s ですが、%(actual_status)s を受け取りました。" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "指定された Cinder スナップショットについて異なる量の SolidFire ボリュームを検" "出しました。%(ret)s を検出しましたが、%(des)s を期待していました" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "指定された Cinder ボリュームについて異なる量の SolidFire ボリュームを検出しま" "した。%(ret)s を検出しましたが、%(des)s を期待していました。" #, python-format msgid "Retry count exceeded for command: %s" msgstr "コマンドの再試行回数を超過しました: %s" msgid "Retryable Dell Exception encountered" msgstr "再試行可能な Dell 例外が発生しました" msgid "Retryable Pure Storage Exception encountered" msgstr "再試行可能な Pure Storage 例外が発生しました" msgid "Retryable SolidFire Exception encountered" msgstr "再試行可能な SolidFire 例外が発生しました" msgid "" "Retype needs volume to be in available or in-use state, not be part of an " "active migration or a consistency group, requested type has to be different " "that the one from the volume, and for in-use volumes front-end qos specs " "cannot change." msgstr "" "タイプ変更では、ボリュームが利用可能か使用中の状態であること、アクティブマイ" "グレーションや整合性グループの一部ではないこと、要求したタイプが現在のボ" "リュームの一つと異なることが必要です。また、使用中のボリュームのフロントエン" "ド qos 仕様は変更できません。" msgid "Retype requires migration but is not allowed." msgstr "タイプ変更するにはマイグレーションが必要ですが、許可されていません。" #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "" "Cinder をバージョン %s 以前の VMware vCenter と共に実行することは許可されてい" "ません。" msgid "SAN product is not configured." msgstr "SAN 製品は設定されていません。" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "%(config)s の SMBFS 構成ファイルは存在しません。" msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS 構成ファイルが設定されていません (smbfs_shares_config)。" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "SSH コマンド注入が検出されました: %(command)s" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "" "スケジューラーホストフィルター %(filter_name)s が見つかりませんでした。" #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "スケジューラーホスト Weigher %(weigher_name)s が見つかりませんでした。" #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "" "セカンダリー ID はプライマリー配列と同じであってはいけません。backend_id は " "%(secondary)s です。" #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "ホスト %(host)s 上のサービス %(service)s を削除しました。" #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "ホスト %(host)s でサービス%(service_id)s が見つかりませんでした。" #, python-format msgid "Service %(service_id)s could not be found." msgstr "サービス %(service_id)s が見つかりませんでした。" msgid "Service is too old to fulfil this request." msgstr "サービスが古すぎるため、このリクエストに対応できません。" msgid "Service is unavailable at this time." msgstr "現在サービスは使用できません。" msgid "Session might have expired." msgstr "セッションが期限切れになった可能性があります。" msgid "Set pair secondary access error." msgstr "ペアのセカンダリーアクセス設定のエラー。" msgid "Sets thin provisioning." msgstr "シンプロビジョニングを設定します。" #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_host and nas_share_path settings." msgstr "" "形式が無効であるため、共有 %s は無視されました。address:/export 形式でなけれ" "ばなりません。nas_host および nas_share_path の設定を確認してください。" msgid "Size" msgstr "サイズ" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "ボリュームのサイズ %s が見つかりません、セキュアな削除ができません。" #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "" "指定されたイメージのサイズ %(image_size)s GB がボリュームサイズ " "%(volume_size)s GB を上回っています。" msgid "SnapMirror features require Data ONTAP 8.2 or later." msgstr "SnapMirror 機能を利用するには Data ONTAP 8.2 以上が必要です。" msgid "SnapMirror relationship is not quiesced." msgstr "SnapMirror relationship は静止されていません。" #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "" "スナップショット ID %(id)s が使用可能になるよう待機している途中に削除対象に" "指定されました。同時実行リクエストが行われた可能性があります。" #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "カスケードの削除中に、「削除中」ではなく %(state)s の状態でスナップショット " "%(id)s が見つかりました。" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "スナップショット %(snapshot_id)s が見つかりませんでした。" #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "" "スナップショット %(snapshot_id)s にはキー %(metadata_key)s を持つメタデータは" "ありません。" #, python-format msgid "Snapshot %s must not be part of a group." msgstr "スナップショット %s はグループの一部であってはなりません。" #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "スナップショット '%s' はアレイに存在しません。" msgid "Snapshot already managed." msgstr "スナップショットはすでに管理されています。" msgid "Snapshot cannot be created while volume is migrating." msgstr "" "ボリュームのマイグレーション中にスナップショットを作成することはできません。" msgid "Snapshot has a temporary snapshot that can't be deleted at this time." msgstr "" "スナップショットには今回削除できない一時的なスナップショットが含まれていま" "す。" msgid "Snapshot has children and cannot be deleted!" msgstr "スナップショットには子が含まれており、削除できません。" msgid "Snapshot of secondary replica is not allowed." msgstr "2 次レプリカのスナップショットは許可されません。" #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "状態 %s でのボリュームのスナップショットはサポートされていません。" #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "" "スナップショットの状態 %(cur)s は update_snapshot_status には許可されません。" #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "" "バックアップ対象のスナップショットが利用可能である必要がありますが、現在の状" "態は \"%s\" です。" #, python-format msgid "Snapshot: %s search failed in K2." msgstr "K2 でスナップショット %s の検索に失敗しました。" msgid "SolidFire Cinder Driver exception" msgstr "SolidFire Cinder Driver 例外" msgid "Sort direction array size exceeds sort key array size." msgstr "ソート方向の配列サイズがソートキーの配列サイズを超えています。" msgid "" "Source CG cannot be empty or in 'creating' or 'updating' state. No " "cgsnapshot will be created." msgstr "" "ソース CG が空もしくは 'creating' や 'updating' の状態ではいけません。 " "cgsnapshot は作成されません。" msgid "Source Group is empty. No group will be created." msgstr "ソースグループが空です。グループは作成されません。" msgid "" "Source group cannot be empty or in 'creating' or 'updating' state. No group " "snapshot will be created." msgstr "" "ソース グループ が空もしくは 'creating' や 'updating' の状態ではいけませ" "ん。 グループスナップショット は作成されません。" msgid "Source host details not found." msgstr "ソースホスト詳細が見つかりません" msgid "Source volume device ID is required." msgstr "ソースボリュームのデバイス ID が必要です。" msgid "Source volume not mid-migration." msgstr "ソースボリュームはマイグレーション中ではありません" msgid "Specified logical volume does not exist." msgstr "指定された論理ボリュームは存在しません。" msgid "" "Specifies number of replicas for each volume. Can only be increased once " "volume is created" msgstr "" "各ボリュームで作成するレプリカ数を指定します。ボリュームが一度作成されると、" "値は増やすことのみ可能です。" msgid "Specify a password or private_key" msgstr "パスワードまたは private_key を指定してください。" msgid "Specify group type name, description or a combination thereof." msgstr "グループ種別の名前、説明、またはこれらの組み合わせを指定してください。" msgid "Specify san_password or san_private_key" msgstr "san_password または san_private_key を指定してください" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "" "ボリュームタイプの名前、説明、is_public、またはこれらの組み合わせを指定してく" "ださい。" msgid "Split pair error." msgstr "ペア分割のエラー。" msgid "Split replication failed." msgstr "レプリケーションの分割が失敗しました。" msgid "Start LUNcopy error." msgstr "LUN コピー開始のエラー。" msgid "State" msgstr "状態" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "ノードの状態が誤っています。現在の状態は %s です。" msgid "Status" msgstr "ステータス" msgid "Stop snapshot error." msgstr "スナップショット停止のエラー。" #, python-format msgid "Storage family %s is not supported." msgstr "ストレージファミリー %s はサポートされていません。" msgid "Storage pool is not configured." msgstr "ストレージプールが設定されていません。" #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "ストレージプロファイル %(storage_profile)s が見つかりません。" #, python-format msgid "" "Successfully renamed %(num_vols)s volumes and %(num_cgs)s consistency groups " "from cluster %(current)s to %(new)s" msgstr "" "%(num_vols)s ボリュームと %(num_cgs)s 整合性グループの、クラスター " "%(current)s から %(new)s への名前変更が正常に行われました。" msgid "Switch over pair error." msgstr "ペア切り替えのエラー。" msgid "Sync pair error." msgstr "ペア同期のエラー。" #, python-format msgid "Synology driver authentication failed: %(reason)s." msgstr "Synology ドライバーの認証が失敗しました: %(reason)s" msgid "System does not support compression." msgstr "システムは圧縮をサポートしません。" msgid "System is busy, retry operation." msgstr "システムがビジー状態です。再試行してください。" msgid "Target group type is still in use." msgstr "ターゲットグループ種別はまだ使用中です。" msgid "Target volume type is still in use." msgstr "ターゲットボリュームタイプはまだ使用中です。" msgid "Terminate connection failed" msgstr "接続を強制終了できませんでした。" msgid "Terminate connection unable to connect to backend." msgstr "バックエンドに接続できない接続を強制終了します。" #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "ボリューム接続の終了に失敗しました: %(err)s" msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "'sort_key' パラメーターおよび 'sort_dir' パラメーターは非推奨であり、'sort' " "パラメーターと併用することはできません。" msgid "The CG does not exist on array." msgstr "CG はアレイに存在しません。" #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "GPFS ファイルシステム %(fs)s は必要なリリースレベルに達していません。現在のレ" "ベルは %(cur)s です。%(min)s 以上は必要です。" msgid "The copy should be primary or secondary" msgstr "コピーは 1 次または 2 次であることが必要です" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "" "装飾されたメソッドは、ボリュームとスナップショットオブジェクトのいずれもを受" "け付けることができません。" msgid "The decorated method must accept image_meta." msgstr "装飾されたメソッドは、 image_meta を受け入れる必要があります。" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "パス %(path)s のデバイスは使用不可です: %(reason)s" msgid "The domain_name config in cinder.conf is wrong." msgstr "cinder.conf 内の domain_name 設定が誤っています。" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "終了時刻 (%(end)s) は開始時刻 (%(start)s) より後でなければなりません。" #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "フェイルオーバーされたボリュームを削除することができませんでした: %s" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "" "ホストはフェイルバックを行う準備ができていません。3PAR バックエンドでボリュー" "ムを再同期し、レプリケーションを再開してください。" #, python-format msgid "" "The imported lun is in pool %(lun_pool)s which is not managed by the host " "%(host)s." msgstr "" "インポートした LUN はホスト %(host)s に管理されていないプール %(lun_pool)s に" "あります。" msgid "The method update_migrated_volume is not implemented." msgstr "update_migrated_volume メソッドが実装されていません。" #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "" "指定されたスナップショット '%s' は指定されたボリュームのスナップショットでは" "ありません。" msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "ボリューム種別の extra_specs でレプリケーションモードが正しく構成されていませ" "ん。replication:mode が periodic の場合、replication:sync_period must も 300 " "秒から 31622400 秒の間に設定しなければなりません。" msgid "The results are invalid." msgstr "結果が無効です。" msgid "The snapshot cannot be created when the volume is in error status." msgstr "" "ボリュームの状態が「エラー」である場合は、スナップショットを作成できません。" msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "" "ボリュームがメンテナンスモードの場合は、スナップショットを作成できません。" #, python-format msgid "The snapshot is unavailable: %(data)s" msgstr "スナップショットは使用できません : %(data)s" #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "" "ソースのボリュームタイプ '%(src)s' が宛先のボリュームタイプ '%(dest)s' と異な" "ります。" #, python-format msgid "The source volume type '%s' is not available." msgstr "ソースボリュームタイプ '%s' は使用できません。" #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "指定された LUN は指定のプールに属していません: %s。" msgid "The specified vdisk is mapped to a host." msgstr "指定された vdisk はホストにマッピングされています。" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "ストレージデバイスは %(prot)s をサポートしません。デバイスが %(prot)s をサ" "ポートするように設定するか、別のプロトコルを使用するドライバーに切り替えてく" "ださい。" #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "" "メタデータのタイプ: ボリューム/スナップショット %(id)s の %(metadata_type)s " "が無効です。" #, python-format msgid "The value %(value)s for key %(key)s in extra specs is invalid." msgstr "追加スペックのキー %(key)s の値 %(value)s が無効です。" msgid "The volume cannot accept transfer in maintenance mode." msgstr "メンテナンスモードではボリュームを転送できません。" msgid "The volume cannot be attached in maintenance mode." msgstr "メンテナンスモードではボリュームを追加できません。" msgid "The volume cannot be detached in maintenance mode." msgstr "メンテナンスモードではボリュームを切り離すことができません。" msgid "The volume cannot be updated during maintenance." msgstr "メンテナンス中にはボリュームを更新することはできません。" msgid "The volume connection cannot be initialized in maintenance mode." msgstr "メンテナンスモードではボリューム接続を初期化できません。" msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "" "ボリュームドライバーには、コネクター内の iSCSI イニシエーター名が必要です。" msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "" "ボリュームは 3PAR 上で現在使用中のため、この時点では削除できません。後で再試" "行できます。" #, python-format msgid "" "The volume to be managed is a %(provision)s LUN and the tiering setting is " "%(tier)s. This doesn't match with the type %(type)s." msgstr "" "管理対象となるボリュームは %(provision)s LUN で、ティアリングの設定は " "%(tier)s です。これはタイプ %(type)s と適合していません。" msgid "There are no valid ESX hosts." msgstr "有効な ESX ホストがありません。" msgid "There are no valid datastores." msgstr "有効なデータストアがありません。" msgid "There is no metadata in DB object." msgstr "DB オブジェクトの中にメタデータがありません。" #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "%(volume_size)sG をホストできる共有がありません" msgid "There is no virtual disk device." msgstr "仮想ディスクデバイスがありません。" #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "" "リモートコピーグループへのボリュームの追加中にエラーが発生しました: %s。" #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "cgsnapshot の作成中にエラーが発生しました: %s" #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "リモートコピーグループの作成中にエラーが発生しました: %s。" #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "リモートコピーグループの同期期間の設定中にエラーが発生しました: %s。" #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "3PAR アレイでのリモートコピーグループのセットアップ中にエラーが発生しました: " "('%s')。ボリュームはレプリケーションタイプとして認識されません。" #, python-format msgid "There was an error starting remote copy: %s." msgstr "リモートコピーの開始中にエラーが発生しました: %s。" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "NFS 構成ファイルが構成されていません (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "" "設定済みの Quobyte ボリューム (%s) が存在しません。 例: quobyte:///" "" msgid "Thin provisioning not supported on this version of LVM." msgstr "" "このバージョンの LVM ではシンプロビジョニングはサポートされていません。" msgid "ThinProvisioning Enabler is not installed. Can not create thin volume." msgstr "" "シンプロビジョニングイネーブラーがインストールされていません。シンボリューム" "を作成できません。" msgid "This request was rate-limited." msgstr "このリクエストは一定時間内の実行回数に制限があります。" #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "" "このシステムプラットフォーム (%s) はサポートされていません。このドライバー" "は、Win32 プラットフォームのみサポートします。" #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "" "スナップショット %s を作成するために Nova の更新を待機している間にタイムアウ" "トになりました。" #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "" "スナップショット %(id)s を削除するために Nova の更新を待機している間にタイム" "アウトになりました。" #, python-format msgid "Timeout waiting for %(condition_name)s in wait_until." msgstr "wait_until 中の %(condition_name)s のタイムアウトを待っています。" #, python-format msgid "Timeout while requesting %(service)s API." msgstr "%(service)s API の要求中にタイムアウトになりました。" #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "" "バックエンドの %(service)s から機能をリクエストする際にタイムアウトが発生しま" "した。" #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "転送 %(transfer_id)s が見つかりませんでした。" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "" "ID %(meta_id)s からバックアップ %(id)s にバックアップのメタデータをインポート" "しようとしています。" #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "ボリュームタスクの調整が完了前に停止しました: volume_name=%(volume_name)s、" "task-status=%(status)s。" #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "" "タイプ %(type_id)s は既に別の qos 仕様 %(qos_specs_id)s に関連付けられていま" "す。" msgid "Type access modification is not applicable to public group type." msgstr "" "パブリックなグループタイプでは、タイプアクセスの変更を行うことはできません。" msgid "Type access modification is not applicable to public volume type." msgstr "" "パブリックなボリュームタイプでは、タイプアクセスの変更を行うことはできませ" "ん。" msgid "Type cannot be converted into NaElement." msgstr "タイプは NaElement に変換できません。" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "UUID %s が、ボリュームの追加リストと削除リストの両方に存在します。" msgid "Unable to access the backend storage via file handle." msgstr "ファイルハンドル経由でバックエンドストレージにアクセスできません。" #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "パス %(path)s を介してバックエンドストレージにアクセスできません。" #, python-format msgid "Unable to complete failover of %s." msgstr "%s のフェイルオーバーを完了できません。" msgid "Unable to connect or find connection to host" msgstr "ホストに接続できないか、ホストへの接続が見つかりません。" msgid "Unable to create lock. Coordination backend not started." msgstr "" "ロックを作成できません。コーディネーションバックエンドがスタートしていませ" "ん。" #, python-format msgid "Unable to create server object for initiator %(name)s" msgstr "イニシエーター %(name)s 用にサーバーオブジェクトを作成できません。" #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "%(snap)s からボリューム %(name)s を作成できません" #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "%(vol)s からボリューム %(name)s を作成できません。" #, python-format msgid "Unable to create volume %s" msgstr "ボリューム %s を作成できません。" msgid "Unable to create volume. Backend down." msgstr "ボリュームを作成できません。バックエンドがダウンしています。" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "スナップショット %(id)s を削除できません。状態: %(status)s。" msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "ボリュームを切り離すことができません。切り離すには、ボリュームの状態が「使用" "中」で、attach_status が「接続済み」でなければなりません。" #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "" "提供されたセカンダリー配列から secondary_array を検出できません: " "%(secondary)s" msgid "Unable to determine system id." msgstr "システム ID を判別できません。" msgid "Unable to determine system name." msgstr "システム名を判別できません。" #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "Storwize クラスター %s とのパートナーシップを確立できません。" #, python-format msgid "Unable to extend volume %s" msgstr "ボリューム %s を拡張できません。" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "" "デフォルトにフェイルバックすることできません。フェイルバックができるのは、" "フェイルオーバーの完了後に限られます。" msgid "Unable to failback. Backend is misconfigured." msgstr "フェイルバックを行えません。バックエンドの設定に誤りがあります。" msgid "Unable to fetch connection information from backend." msgstr "バックエンドから接続情報を取り出すことができません。" #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "バックエンドから接続情報を取り出すことができません: %(err)s" msgid "Unable to find K2peer in source K2:" msgstr "ソース K2 内で K2peer が見つかりません。" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "ボリュームグループが見つかりません: %(vg_name)s" msgid "Unable to find any active VPSA controller" msgstr "アクティブな VPSA コントローラーが見つかりません。" msgid "Unable to find failover target, no secondary targets configured." msgstr "" "フェイルオーバーのターゲットが見つかりません。セカンダリーターゲットが設定さ" "れていません。" msgid "Unable to find iSCSI mappings." msgstr "iSCSI のマッピングが見つかりません。" #, python-format msgid "Unable to find server object for initiator %(name)s" msgstr "イニシエーター %(name)s 用のサーバーオブジェクトが見つかりません。" #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "ssh_hosts_key_file が見つかりません: %s" #, python-format msgid "Unable to find volume %s" msgstr "ボリューム %s が見つかりません。" #, python-format msgid "Unable to find volume: %s from K2." msgstr "K2でボリューム %s が見つかりません。" msgid "Unable to get FC target wwpns from K2." msgstr "K2 から FC ターゲット wwpn を取得できませんでした。" msgid "Unable to get ISCSI IP address from K2." msgstr "K2 から iSCSI IP アドレスを取得できませんでした。" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "ファイル '%s' のブロックデバイスを取得できません。" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "ボリュームの作成に必要な設定情報を取得できません: %(errorMessage)s。" msgid "Unable to get size of manage volume." msgstr "管理ボリュームのサイズを取得できません。" #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "backend_name の統計情報を取得できません: %s" msgid "Unable to get target iqn from K2." msgstr "K2 から ターゲット iqn を取得できませんでした。" msgid "Unable to import 'krest' python module." msgstr "'krest' python モジュールをインポートできません。" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "IP アドレス「%s」を管理している SVM が見つかりません" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "指定されたリプレープロファイル %s を特定できません" #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "" "既に管理されているボリュームが存在するため、ボリューム %(volume_ref)s の管理" "に失敗しました。" #, python-format msgid "Unable to manage volume %s" msgstr "ボリューム %s を管理できません" msgid "Unable to map volume" msgstr "ボリュームをマッピングできません" msgid "Unable to map volume." msgstr "ボリュームのマッピングができません。" msgid "Unable to parse attributes." msgstr "属性を解析できません。" msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "" "use_chap_auth=True が指定されている、Cinder で管理されていないホストを再使用" "することはできません。" msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "" "不明な CHAP 資格情報が構成されているホストを再使用することはできません。" #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "ボリューム %(existing)s の名前を %(newname)s に変更できません。" #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "%(specname)s を再入力できません。要求した最新の %(spectype)s の値を受信するこ" "とを予期していたものの、%(spec)s の値を受信しました。" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "種別変更ができません。ボリューム %s のコピーが存在します。タイプ変更を行う" "と、コピー数 2 という制限を超えます。" #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "" "種別変更ができません: 現行アクションにはボリュームコピーが必要ですが、新しい" "タイプが複製の場合は許可されません。ボリューム = %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "" "%(vol)s のミラーモードレプリケーションをセットアップできません。例外: " "%(err)s。" msgid "Unable to terminate volume connection from backend." msgstr "バックエンドからのボリューム接続を終了することができません。" #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "ボリューム接続を終了することができません: %(err)s" msgid "Unacceptable parameters." msgstr "受け入れられないパラメーター。" #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "" " マッピング %(id)s の予期されないマッピング状態 %(status)s。属性: %(attr)s。" #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "" "予期しない CLI 応答: ヘッダー/行の不一致。ヘッダー: %(header)s、行: %(row)s。" #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "" "予期しない出力。[%(expected)s] が予期されましたが、[%(output)s] を受け取りま" "した" #, python-format msgid "Unexpected over quota on %(name)s." msgstr "%(name)s で、予期せずクォータを超過しました。" msgid "Unexpected response from Nimble API" msgstr "Nimble API からの予期しない応答" msgid "Unexpected status code" msgstr "予期しないステータスコード" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "URL %(page)s 用にプロトコル %(protocol)s を指定したスイッチ%(switch_id)s から" "返された予期しないステータスコード。エラー: %(error)s" msgid "Unknown NFS exception" msgstr "不明な NFS 例外" msgid "Unknown RemoteFS exception" msgstr "不明な RemoteFS 例外" msgid "Unknown SMBFS exception." msgstr "不明な SMBFS 例外。" msgid "Unknown Virtuozzo Storage exception" msgstr "Virtuozzo Storage で不明な例外が発生しました" msgid "Unknown action" msgstr "不明なアクション" #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "不明またはサポートされないコマンド (%(cmd)s) です。" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "不明なプロトコル: %(protocol)s。" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "不明なクォータリソース %(unknown)s。" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "ソート方向が不明です。'desc' または 'asc' でなければなりません。" msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "削除オプションの非管理とカスケーディングを同時に行うことはできません。" msgid "Unmanage volume not implemented." msgstr "ボリュームの非管理が実装されていません。" msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "" "フェイルオーバーされたボリュームからのスナップショットを非管理対象にすること" "は許可されません。" #, python-format msgid "Unrecognized backing format: %s" msgstr "認識されないバッキングフォーマット: %s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "認識されない read_deleted 値 '%s'" msgid "Unsupported Content-Type" msgstr "サポートされない Content-Type" #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "サポートされないバックアップのメタデータバージョン (%s)" msgid "Unsupported backup metadata version requested" msgstr "サポートされないバックアップメタデータバージョンが要求されました。" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "スイッチ %s でサポートされないファームウェアです。スイッチでファームウェア " "v6.4 以上が実行されていることを確認してください" #, python-format msgid "Unsupported volume format %s" msgstr "ボリューム形式はサポートされていません: %s " #, python-format msgid "Unsupported volume format: %s " msgstr "ボリューム形式はサポートされていません: %s " msgid "Update QoS policy error." msgstr "QoS ポリシー更新のエラー。" msgid "Updated At" msgstr "最終更新" #, python-format msgid "Updating volume metadata is not allowed for volumes in %s status." msgstr "" "ボリュームの状態が %s である場合は、ボリュームメタデータの更新は許可されませ" "ん。" msgid "Upload to glance of attached volume is not supported." msgstr "" "接続されたボリュームの glance へのアップロードはサポートされていません。" msgid "Use ALUA to associate initiator to host error." msgstr "ALUA を使用したホストへのイニシエーターの関連付けのエラー。" msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "" "CHAP を使用したホストへのイニシエーターの関連付けのエラー。CHAP のユーザー名" "とパスワードを確認してください。" msgid "User ID" msgstr "ユーザー ID" msgid "User does not have admin privileges" msgstr "ユーザーに管理者特権がありません。" msgid "User not permitted to query Data ONTAP volumes." msgstr "ユーザーには Data ONTAP ボリュームの照会が許可されていません。" msgid "UserName is not configured." msgstr "ユーザー名は設定されていません。" msgid "UserPassword is not configured." msgstr "ユーザーパスワードは設定されていません。" msgid "VF is not enabled." msgstr "VF は有効になっていません。" msgid "VNX Cinder driver does not support multiple replication targets." msgstr "" "VNX Cinder ドライバーは複数のレプリケーションターゲットをサポートしていませ" "ん。" #, python-format msgid "VV Set %s does not exist." msgstr "VV セット %s は存在しません。" #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "QoS 仕様の有効なコンシューマー: %s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "ボリューム接続の検証に失敗しました (エラー: %(err)s)." #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "構成オプション \"%(option)s\" の値 \"%(value)s\" は無効です。" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "%(src)s から %(tgt)s へのマッピングに関連しない Vdisk %(name)s。" #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "バージョン %(req_ver)s はこのAPIではサポートされていません。最大値は " "%(min_ver)s で、最小値は %(max_ver)s です。" #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s が ID ごとのオブジェクトを抽出できません。" #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "VersionedObject %s が条件の変更を行うことができません。" #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "仮想ボリューム '%s' がアレイに存在しません。" #, python-format msgid "Volume %(deviceID)s not found." msgstr "ボリューム %(deviceID)s が見つかりません。" #, python-format msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "" "ボリューム %(name)s が見つかりませんでした。すでに削除されている可能性があり" "ます。" #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "ボリューム %(name)s は VNX で作成されましたが、%(state)s 状態です。" #, python-format msgid "Volume %(name)s was not deactivated in time." msgstr "ボリューム %(name)s は時間内に非アクティブになりませんでした。" #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "ボリューム %(vol)s をプール %(pool)s に作成できませんでした。" #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "ボリューム%(vol1)s が snapshot.volume_id %(vol2)s と一致しません。" #, python-format msgid "Volume %(vol_id)s status must be %(statuses)s" msgstr "ボリューム %(vol_id)s の状態は %(statuses)s でなければいけません。" #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "読み取り専用フラグを更新するには、ボリューム %(vol_id)s の状態が「使用可能」" "でなければなりませんが、現在の状態は %(vol_status)s です。" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "ボリューム %(volume_id)s が見つかりませんでした。" #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "" "ボリューム %(volume_id)s にはキー %(metadata_key)s を持つメタデータはありませ" "ん。" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "" "ボリューム %(volume_id)s はまだ接続されています。最初にボリュームを切り離して" "ください。" #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "ボリューム %(volume_id)s 複製エラー: %(reason)s" #, python-format msgid "Volume %s could not be created from source volume." msgstr "ボリューム %s をソースボリュームから作成できませんでした。" #, python-format msgid "Volume %s could not be created on shares." msgstr "共有上でボリューム %s を作成できませんでした。" #, python-format msgid "Volume %s could not be created." msgstr "ボリューム %s を作成できませんでした。" #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "ボリューム %s は Nexenta SA に存在しません" #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "ボリューム %s は Nexenta Store アプライアンスに存在しません" #, python-format msgid "Volume %s does not exist on the array." msgstr "ボリューム %s does はアレイに存在しません。" #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "" "ボリューム %s で provider_location が指定されていません。スキップします。" #, python-format msgid "Volume %s doesn't exist on array." msgstr "ボリューム %s がアレイに存在しません。" #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "" "ボリューム %s がオンラインです。OpenStack を使用して管理するために、ボリュー" "ムをオフラインに設定してください。" #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "ボリューム %s は整合性グループの一部であってはなりません。" #, python-format msgid "Volume %s not found" msgstr "ボリューム %s が見つかりません。" #, python-format msgid "Volume %s not found." msgstr "ボリューム %s が見つかりません。" #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "ボリューム %s: ボリュームの拡張を試行中にエラーが発生しました" #, python-format msgid "Volume (%s) already exists on array" msgstr "ボリューム (%s) はすでにアレイ上に存在します" #, python-format msgid "Volume (%s) already exists on array." msgstr "ボリューム (%s) は既にアレイ上にあります。" #, python-format msgid "Volume Group %s does not exist" msgstr "ボリュームグループ %s は存在しません。" #, python-format msgid "Volume Type %(id)s already exists." msgstr "ボリューム種別 %(id)s は既に存在します。" #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "" "ボリューム種別 %(volume_type_id)s を持つボリュームでは、そのボリューム種別は" "削除できません。" #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "" "ボリューム種別 %(volume_type_id)s にはキー %(extra_specs_key)s を持つ追加の仕" "様はありません。" msgid "Volume Type id must not be None." msgstr "ボリュームタイプ ID を None に設定することはできません。" msgid "Volume already managed." msgstr "ボリュームはすでに管理されています。" msgid "Volume by this name already exists" msgstr "この名前のボリュームは既に存在します" msgid "Volume create failed while extracting volume ref." msgstr "ボリューム参照の抽出中にボリュームの作成に失敗しました。" #, python-format msgid "Volume device file path %s does not exist." msgstr "ボリュームデバイスのファイルパス %s が存在しません。" #, python-format msgid "Volume device not found at %(device)s." msgstr "%(device)s でボリュームデバイスが見つかりません。" #, python-format msgid "Volume driver %s not initialized." msgstr "ボリュームドライバー %s が初期化されていません。" msgid "Volume driver not ready." msgstr "ボリュームドライバーが準備できていません。" #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "ボリュームドライバーがエラーを報告しました: %(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "" "ボリュームには今回削除できない一時的なスナップショットが含まれています。" msgid "Volume has children and cannot be deleted!" msgstr "ボリュームには子が含まれており、削除できません。" #, python-format msgid "Volume in group %s is attached. Need to detach first." msgstr "グループ %s のボリュームが接続されています。まず切り離してください。" msgid "Volume in group still has dependent snapshots." msgstr "グループ内のボリュームには、まだ従属スナップショットがあります。" #, python-format msgid "Volume is attached to a server. (%s)" msgstr "ボリュームがサーバーに追加されています (%s)。" msgid "Volume is in-use." msgstr "ボリュームが使用中です。" msgid "Volume is not available." msgstr "ボリュームが利用できません。" msgid "Volume is not local to this node." msgstr "ボリュームがこのノードに対してローカルではありません。" msgid "Volume manage failed." msgstr "ボリュームの管理に失敗しました。" msgid "" "Volume manage identifier must contain either source-id or source-name " "element." msgstr "" "ボリューム管理識別子には、要素 source-id か source-name のいずれかが含まれて" "いる必要があります。" msgid "" "Volume manage identifier with source-id is only supported with clustered " "Data ONTAP." msgstr "" "source-id を含むボリューム管理識別子は、クラスタリングされた Data ONTAP での" "みサポートされています。" #, python-format msgid "Volume migration failed: %(reason)s" msgstr "ボリュームマイグレーションが失敗しました: %(reason)s" msgid "Volume must be in the same availability zone as the snapshot" msgstr "ボリュームはスナップショットと同じ可用性ゾーンになければなりません" msgid "Volume must be in the same availability zone as the source volume" msgstr "ボリュームはソースボリュームと同じ可用性ゾーンになければなりません" msgid "Volume must not be replicated." msgstr "ボリュームを複製することはできません。" msgid "Volume must not have snapshots." msgstr "ボリュームにスナップショットがあってはなりません。" #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "インスタンス %(instance_id)s のボリュームが見つかりませんでした。" msgid "Volume not found on configured storage backend." msgstr "ボリュームが構成済みストレージバックエンドに見つかりません。" msgid "Volume not found." msgstr "ボリュームが見つかりません。" msgid "Volume not unique." msgstr "ボリュームが一意ではありません。" msgid "Volume not yet assigned to host." msgstr "ボリュームがまだホストに割り当てられていません。" msgid "Volume should have agent-type set as None." msgstr "ボリュームには agent-type として None を設定する必要があります。" #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "" "ボリュームサイズ %(volume_size)s GB をイメージの minDisk サイズ %(min_disk)s " "GB より小さくすることはできません。" #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "" "ボリュームサイズ '%(size)s' は、整数であり、かつ 0 より大きくなければなりませ" "ん" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "ボリュームサイズ \"%(size)s\" GB を元のボリュームサイズ %(source_size)s GB よ" "り小さくすることはできません。このサイズは元のボリュームサイズ以上でなければ" "なりません。" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "ボリュームサイズ「%(size)s」GB をスナップショットサイズ %(snap_size)s GB より" "小さくすることはできません。このサイズは元のスナップショットサイズ以上でなけ" "ればなりません。" msgid "Volume size increased since the last backup. Do a full backup." msgstr "" "最後のバックアップ以降にボリュームサイズが増加しました。フルバックアップを実" "行してください。" msgid "Volume size must be a multiple of 1 GB." msgstr "ボリュームサイズは 1 GB の倍数である必要があります。" msgid "Volume size must multiple of 1 GB." msgstr "ボリュームサイズは 1 GB の倍数である必要があります" msgid "Volume status must be 'available'." msgstr "ボリュームの状態は「使用可能」でなければなりません。" #, python-format msgid "Volume status must be available for snapshot %(id)s. (is %(status)s)" msgstr "" "スナップショット %(id)s に関しては、ボリュームの状態が「利用可能」でなければ" "いけません。 (現在は %(status)s です)" msgid "Volume to Initiator Group mapping already exists" msgstr "ボリュームからイニシエーターグループへのマッピングは既に存在します。" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "" "バックアップ対象のボリュームが利用可能か使用中である必要がありますが、現在の" "状態は \"%s\" です。" msgid "Volume to be restored to must be available" msgstr "復元するボリュームは「使用可能」でなければなりません。" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "ボリューム種別 %(volume_type_id)s が見つかりませんでした。" #, python-format msgid "Volume type ID '%s' is invalid." msgstr "ボリュームタイプ ID '%s' は無効です。" #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "" "%(volume_type_id)s / %(project_id)s の組み合わせのボリューム種別アクセスは既" "に存在します。" #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "" "%(volume_type_id)s / %(project_id)s の組み合わせのボリューム種別アクセスが見" "つかりません。" #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "タイプ %(type_id)s のボリューム種別暗号化は既に存在します。" #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "タイプ %(type_id)s に対するボリューム種別暗号化は存在しません。" msgid "Volume type name can not be empty." msgstr "ボリューム種別名を空にすることはできません" #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "名前 %(volume_type_name)s を持つボリューム種別が見つかりませんでした。" msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "" " プライマリーとセカンダリーの SolidFire アカウント上で、ボリュームとアカウン" "トの数量が超過しました。" #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "VzStorage の設定の 'vzstorage_used_ratio' が無効です。0 より大きく 1.0 以下で" "ある必要があります: %s。" #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "%(config)s の VzStorage のコンフィグファイルが存在しません" #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "同期の待機が失敗しました。実行状態: %s。" msgid "We should not do switch over on primary array." msgstr "プライマリーアレイで切り替えを行ってはなりません。" #, python-format msgid "Worker for %(type)s %(id)s already exists." msgstr "%(type)s のワーカー %(id)s はすでに存在します。" #, python-format msgid "Worker with %s could not be found." msgstr "%s の ID を持つワーカーを見つけることができませんでした。" msgid "XtremIO not initialized correctly, no clusters found" msgstr "XtremIO は正しく初期化されていません。クラスターが見つかりません" msgid "You must implement __call__" msgstr "__call__ を実装しなければなりません" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "3PAR ドライバーを使用するには hpe3parclient をインストールしておく必要があり" "ます。 \"pip install python-3parclient\" を実行して hpe3parclient をインス" "トールしてください。" #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError: %s" msgid "Zone" msgstr "ゾーン" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "ゾーニングポリシー %s は認識されていません" #, python-format msgid "" "[%(group)s] Invalid %(protocol)s ports %(port)s specified for io_port_list." msgstr "" "[%(group)s] 無効な %(protocol)s ポート %(port)s が io_port_list に指定されま" "した。" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "_create_and_copy_vdisk_data: vdisk %s の属性を取得できませんでした。" msgid "_create_host failed to return the host name." msgstr "_create_host でホスト名を返すことができませんでした。" msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "" "_create_host: ホスト名を変換できません。ホスト名は Unicode でもバイト文字列で" "もありません。" msgid "_create_host: No connector ports." msgstr "_create_host: コネクターポートがありません。" msgid "_create_local_cloned_volume, Replication Service not found." msgstr "" "_create_local_cloned_volume、レプリケーションサービスが見つかりません。" #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume、ボリューム名: %(volumename)s、ソースボリューム" "名: %(sourcevolumename)s、ソースボリュームインスタンス: %(source_volume)s、" "ターゲットボリュームインスタンス: %(target_volume)s、戻りコード: %(rc)lu、エ" "ラー: %(errordesc)s。" #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - 成功メッセージが CLI 出力内に見つかりませんでし" "た。\n" " stdout: %(out)s\n" " stderr: %(err)s" msgid "_delete_copysession, Cannot find Replication Service" msgstr "" "_delete_copysession、レプリケーションサービスを見つけることができません" #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession、コピーセッションのタイプが定義されていません。コピーセッ" "ション: %(cpsession)s、コピータイプ: %(copytype)s。" #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession、コピーセッション: %(cpsession)s、操作: %(operation)s、戻" "りコード: %(rc)lu、エラー: %(errordesc)s。" #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume、ボリューム名: %(volumename)s、戻りコード: %(rc)lu、エラー: " "%(errordesc)s。" #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "" "_delete_volume、ボリューム名: %(volumename)s、ストレージ設定サービスが見つか" "りません。" #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service、クラス名: %(classname)s、InvokeMethod、ETERNUS に接続" "できません。" msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "" "_extend_volume_op: スナップショットを持つボリュームの拡張はサポートされていま" "せん。" #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group、コネクター: %(connector)s、アソシエーター: " "FUJITSU_AuthorizedTarget を ETERNUS に接続できません。" #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group、コネクター: %(connector)s、EnumerateInstanceNames を " "ETERNUS に接続できません。" #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group、コネクター: %(connector)s、AssocNames: " "FUJITSU_ProtocolControllerForUnit を ETERNUS に接続できません。" #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession、ReferenceNames、vol_instance: %(vol_instance_path)s、" "ETERNUS に接続できません。" #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service、クラス名: %(classname)s、EnumerateInstanceNames、" "ETERNUS に接続できません。" #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "" "_find_initiator_names、コネクター: %(connector)s、イニシエーターが見つかりま" "せん。" #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool、eternus_pool:%(eternus_pool)s、EnumerateInstances、ETERNUS に接続" "できません。" msgid "_get_async_url: Invalid URL." msgstr "_get_async_url: 正しくないURLです。" #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg、ファイル名: %(filename)s、tagname: %(tagname)s、データがありませ" "ん。ドライバー設定ファイルを編集して修正してください。" #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection、ファイル名: %(filename)s、ip: %(ip)s、ポート: " "%(port)s、ユーザー: %(user)s、パスワード: ****、url: %(url)s、失敗しました。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties、iscsiip list: %(iscsiip_list)s、iqn が見つかり" "ません。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties、iscsiip: %(iscsiip)s、AssociatorNames: " "CIM_BindsTo を ETERNUS に接続できません。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties、iscsiip: %(iscsiip)s、EnumerateInstanceNames " "を ETERNUS に接続できません。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties、iscsiip: %(iscsiip)s、GetInstance を ETERNUS " "に接続できません。" #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic: 属性のヘッダーと値が適合していません。\n" " ヘッダー: %(header)s\n" " 値: %(row)s。" msgid "_get_host_from_connector failed to return the host name for connector." msgstr "" "_get_host_from_connector がコネクターのホスト名を返すことができませんでした。" #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc、aglist/vol_instance からの host-affinity の取得が失敗しまし" "た。affinitygroup: %(ag)s、ReferenceNames を ETERNUS に接続できません。" #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc、host-affinity インスタンスの取得が失敗しました。volmap: " "%(volmap)s、GetInstance、ETERNUS に接続できません。" msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi、アソシエーター: FUJITSU_SAPAvailableForElement、ETERNUS " "に接続できません。" #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi、affinitygroup: %(ag)s, ReferenceNames、ETERNUS に接続でき" "ません。" #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi、vol_instance: %(vol_instance)s、ReferenceNames: " "CIM_ProtocolControllerForUnit、ETERNUS に接続できません。" #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi、volmap: %(volmap)s、GetInstance、ETERNUS に接続できませ" "ん。" msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "_get_target_port、EnumerateInstances を ETERNUS に接続できません。" #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "" "_get_target_port、プロトコル: %(protocol)s、target_port が見つかりません。" #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "_get_unmanaged_replay: %s という名前のスナップショットが見つかりません" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay: ボリューム ID %s が見つかりません。" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay: ソース名を指定する必要があります。" msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties: ホストとボリュームの接続について FC 接続情報を取得" "できませんでした。ホストが FC 接続用に正しく構成されていますか。" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties: ボリューム %(vol)s の入出力グループ %(gid)s でノー" "ドが見つかりませんでした。" #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun、vol_instance.path:%(vol)s、ボリューム名: %(volumename)s、" "volume_uid: %(uid)s、イニシエーター: %(initiator)s、ターゲット: %(tgt)s、" "aglist: %(aglist)s、ストレージ設定サービスが見つかりません。" #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun、vol_instance.path: %(volume)s、ボリューム名: %(volumename)s、" "volume_uid: %(uid)s、aglist: %(aglist)s、コントローラー設定サービスが見つかり" "ません。" #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun、ボリューム名 %(volumename)s、volume_uid: %(volume_uid)s、" "AffinityGroup: %(ag)s、戻りコード: %(rc)lu、エラー: %(errordesc)s。" #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun、vol_instance.path: %(volume)s、AssociatorNames: " "CIM_ProtocolControllerForUnit を ETERNUS に接続できません。" msgid "_update_volume_stats: Could not get storage pool data." msgstr "_update_volume_stats: ストレージプールデータを取得できませんでした。" #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete、cpsession: %(cpsession)s、コピーセッション状態は " "BROKEN です。" msgid "action_locked not found" msgstr "action_locked が見つかりません。" #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "add_vdisk_copy が失敗しました。ボリューム %s のコピーが存在します。別のコピー" "を追加すると、コピー数 2 という制限を超えます。" msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "" "期待されたプール内の vdisk コピーなしで add_vdisk_copy が開始されました。" #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants はブール値である必要がありますが、'%s' が得られました。" msgid "already created" msgstr "既に作成済み" msgid "already_created" msgstr "already_created" #, python-format msgid "attribute %s not lazy-loadable" msgstr "属性 %s は遅延ロードできません" msgid "being attached by different mode" msgstr "別のモードで接続しています。" #, python-format msgid "build_ini_targ_map fails. %s" msgstr "build_ini_targ_map が失敗しました。 %s" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "lun-map を見つけることができません。ig:%(ig)s vol:%(vol)s" msgid "can't find the volume to extend" msgstr "拡張するボリュームが見つかりません" msgid "can't handle both name and index in req" msgstr "req にある名前とインデックスはどちらも処理できません" msgid "cannot understand JSON" msgstr "JSON を解釈できません" msgid "" "cg_creating_from_src must be called with cg_id or cgsnapshot_id parameter." msgstr "" "cg_creating_from_src は cg_id または cgsnapshot_id パラメーターと共に呼び出す" "必要があります。" msgid "cgsnapshot assigned" msgstr "割り当てられた cgsnapshot" msgid "cgsnapshot changed" msgstr "変更された cgsnapshot" msgid "cgsnapshots assigned" msgstr "割り当てられた cgsnapshot" msgid "cgsnapshots changed" msgstr "変更された cgsnapshot" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error: 認証にはパスワードまたは SSH 秘密鍵が必要です: " "san_password または san_private_key オプションを設定してください。" msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error: システム ID を判別できません。" msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error: システム名を判別できません。" msgid "check_hypermetro_exist error." msgstr "check_hypermetro_exist エラー。" msgid "cluster assigned" msgstr "割り当てられたクラスター" msgid "cluster changed" msgstr "変更されたクラスター" msgid "config option key_manager.fixed_key is not defined" msgstr "設定オプション key_manager.fixed_key は定義されていません。" msgid "consistencygroup assigned" msgstr "割り当てられた整合性グループ" msgid "consistencygroup changed" msgstr "変更された整合性グループ" msgid "create hypermetro group error." msgstr "hypermetro グループ作成エラー。" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume、ETERNUS にソースボリュームが存在しません。" #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume、ターゲットボリュームインスタンス名: " "%(volume_instancename)s、インスタンスの取得が失敗しました。" #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume: ソースボリューム %(src_vol)s のサイズは %(src_size)dGB " "で、サイズ %(tgt_size)dGBand のターゲットボリューム %(tgt_vol)s に適合しませ" "ん。" msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src は 1 つの cgsnapshot ソースまたは整合性グ" "ループソースのみをサポートします。複数ソースは使用できません。" #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy: ソース vdisk %(src)s (%(src_id)s) は存在しません。" #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy: ソース vdisk %(src)s は存在しません。" msgid "create_host: Host name is not unicode or string." msgstr "create_host: ホスト名は Unicode でもバイト文字列でもありません。" msgid "create_host: No initiators or wwpns supplied." msgstr "create_host: イニシエーターも wwpn も指定されていません。" msgid "create_hypermetro_pair error." msgstr "create_hypermetro_pair エラー。" #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot、ボリューム名: %(s_volumename)s、ETERNUS でソースボリュームが" "見つかりません。" #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot: スナップショットのボリュームの状態は「使用可能」または「使用" "中」でなければなりません。無効な状態は %s です。" msgid "create_snapshot: get source volume failed." msgstr "create_snapshot: ソースボリュームの取得に失敗しました。" msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "" "create_volume_from_snapshot、ETERNUS にソースボリュームが存在しません。" #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot、ターゲットボリュームインスタンス名: " "%(volume_instancename)s、インスタンスの取得が失敗しました。" #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "" "create_volume_from_snapshot: スナップショット %(name)s は存在しません。" #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot: ボリュームを作成するには、スナップショットの状態" "が「使用可能」でなければなりません。無効な状態は %s です。" #, python-format msgid "" "create_volume_from_snapshot: snapshot %(snapshot_name)s size is " "%(snapshot_size)dGB and doesn't fit in target volume %(volume_name)s of size " "%(volume_size)dGB." msgstr "" "create_volume_from_snapshot: スナップショット %(snapshot_name)s のサイズは " "%(snapshot_size)dGB で、ターゲットボリューム %(volume_name)s のサイズ " "%(volume_size)dGB に適合しません。" msgid "data not found" msgstr "データが見つかりません。" msgid "delete_hypermetro error." msgstr "delete_hypermetro エラー。" #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "delete_initiator: %s ACL が見つかりません。処理を続行します。" msgid "delete_replication error." msgstr "delete_replication エラー。" #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "従属ボリュームを持つスナップショット %(snapshot_name)s の削除中" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "スナップショットを含むボリューム %(volume_name)s の削除中" msgid "do_setup: No configured nodes." msgstr "do_setup: 構成されたノードがありません。" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "Swift へのオブジェクトの書き込み中にエラーが発生しました。Swift 内のオブジェ" "クトの MD5 %(etag)s が Swift に送信されたオブジェクトの MD5 %(md5)s と同じで" "はありません" #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume、ボリューム: %(volume)s、ボリューム名: %(volumename)s、" "eternus_pool: %(eternus_pool)s、ストレージ設定サービスが見つかりません。" #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume、ボリューム名: %(volumename)s、戻りコード: %(rc)lu、エラー: " "%(errordesc)s、プールタイプ: %(pooltype)s。" msgid "fake" msgstr "偽" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s の基盤: %(backing_file)s" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s は %(backing_file)s でサポートされています" msgid "force delete" msgstr "強制削除" msgid "get_hyper_domain_id error." msgstr "get_hyper_domain_id エラー。" msgid "get_hypermetro_by_id error." msgstr "get_hypermetro_by_id エラー。" #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "" "get_iscsi_params: イニシエーター %(ini)s のターゲット IP の取得に失敗しまし" "た。設定ファイルを確認してください。" #, python-format msgid "" "get_iscsi_params: No valid port in portgroup. portgroup_id: %(id)s, please " "check it on storage." msgstr "" "get_iscsi_params: 有効なポートがポートグループ内に見つかりません。ストレージ" "上で確認を行って下さい。 portgroup_id: %(id)s" #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool: ボリューム %s の属性の取得に失敗しました。" msgid "glance_metadata changed" msgstr "変更された glance_metadata" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode は copy_on_write に設定されていますが、%(vol)s と " "%(img)s は異なるファイルシステムに属しています。" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode は copy_on_write に設定されていますが、%(vol)s と " "%(img)s は異なるファイルセットに属しています。" msgid "group assigned" msgstr "割り当てられたグループ" msgid "group changed" msgstr "変更されたグループ" #, python-format msgid "group-%s" msgstr "グループ - %s" msgid "" "group_creating_from_src must be called with group_id or group_snapshot_id " "parameter." msgstr "" "group_creating_from_src は group_id または group_snapshot_id パラメーターと共" "に呼び出す必要があります。" msgid "group_snapshot assigned" msgstr "割り当てられた group_snapshot " msgid "group_snapshot changed" msgstr "変更された group_snapshot " msgid "group_snapshots assigned" msgstr "割り当てられたグループスナップショット" msgid "group_type_id cannot be None" msgstr "group_type_id を None に設定することはできません。" msgid "id cannot be None" msgstr "ID を None にすることはできません。" #, python-format msgid "image %s not found" msgstr "イメージ %s が見つかりません" #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "" "initialize_connection、ボリューム: %(volume)s、ボリュームが見つかりません。" #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "initialize_connection: ボリューム %s の属性の取得に失敗しました。" #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "" "initialize_connection: ボリューム %s のボリューム属性が欠落しています。" #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "" "initialize_connection: ボリューム %(vol)s の入出力グループ %(gid)s でノードが" "見つかりませんでした。" #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection: vdisk %s が定義されていません。" #, python-format msgid "invalid user '%s'" msgstr "ユーザー '%s' は無効です" #, python-format msgid "iscsi portal, %s, not found" msgstr "iscsi ポータル %s が見つかりません" #, python-format msgid "key manager error: %(reason)s" msgstr "鍵マネージャーのエラー: %(reason)s" msgid "limit param must be an integer" msgstr "limit パラメーターは整数でなければなりません。" msgid "limit param must be positive" msgstr "limit パラメーターは正でなければなりません。" msgid "lun info not found" msgstr "LUN 情報が見つかりません。" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "" "既存のボリュームを特定するには、manage_existing で 'name' キーが必要です。" #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "" "manage_existing_snapshot: ボリューム %(vol)s 上で既存のリプレー %(ss)s の管理" "でエラーが発生しました。" #, python-format msgid "marker not found: %s" msgstr "マーカーが見つかりません: %s" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "mdiskgrp に引用符 %s がありません" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "" "ボリューム %(vol)s 上で mkfs が失敗しました。エラーメッセージ: %(err)s。" #, python-format msgid "" "mkvdiskhostmap error:\n" " command: %(cmd)s\n" " lun: %(lun)s\n" " result_lun: %(result_lun)s" msgstr "" "mkvdiskhostmap エラー:\n" " コマンド: %(cmd)s\n" " lun: %(lun)s\n" " result_lun: %(result_lun)s" msgid "mock" msgstr "モック" msgid "name cannot be None" msgstr "名前を None に設定することはできません。" msgid "no data found" msgstr "データが見つかりません。" msgid "no error code found" msgstr "エラーコードが見つかりません。" msgid "no readonly found" msgstr "読み取り専用が見つかりません。" #, python-format msgid "obj missing quotes %s" msgstr "obj に引用符 %s がありません" msgid "open_access_enabled is not off." msgstr "open_access_enabled がオフになっていません。" #, python-format msgid "pool [%s] is not writable" msgstr "プール [%s] は書き込み不可です。" msgid "progress must be an integer percentage" msgstr "進行状況は整数のパーセンテージでなければなりません。" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "qemu-img %(minimum_version)s 以降がこのボリュームドライバーに必要です。現在" "の qemu-img バージョン: %(current_version)s" msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img がインストールされておらず、ディスク形式が指定されていません。qemu-" "img がインストールされていない場合は、RAW イメージのみが使用可能です。" msgid "rados and rbd python libraries not found" msgstr "" "rados python ライブラリーおよび rbd python ライブラリーが見つかりません。" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" "read_deleted には 'no', 'yes', 'only' のいずれかのみを指定できます。%r は指定" "できません" #, python-format msgid "replication_device %s is not set." msgstr "replication_device %s が設定されていません。" #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover が失敗しました。%s が見つかりません。" msgid "replication_failover failed. Backend not configured for failover" msgstr "" "replication_failover が失敗しました。バックエンドがフェイルオーバーのために設" "定されていません。" msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup が打ち切られました。実際のオブジェクトリストが、メタデータ内に" "保管されているオブジェクトリストと一致しません。" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "" "rtslib_fb にメンバー %s がありません。より新しい python-rtslib-fb が必要かも" "しれません。" msgid "san_ip is not set." msgstr "san_ip が設定されていません。" msgid "san_ip must be set" msgstr "san_ip を設定する必要があります" msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "san_login と san_password のいずれかまたは両方が cinder.conf の Dateraドライ" "バーに設定されていません。この情報を設定して、cinder-volume サービスを再開し" "てください。" msgid "serve() can only be called once" msgstr "serve() は一度しか呼び出せません。" msgid "size not found" msgstr "サイズが見つかりません。" msgid "snapshot info not found" msgstr "スナップショット情報が見つかりません。" #, python-format msgid "snapshot-%s" msgstr "スナップショット: %s" msgid "snapshots assigned" msgstr "割り当てられたスナップショット" msgid "snapshots changed" msgstr "変更されたスナップショット" msgid "source-name cannot be empty." msgstr "source-name は空にできません。" msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "" "source-name 形式は 'vmdk_path@vm_inventory_path' でなければなりません。" msgid "specs must be a dictionary." msgstr "スペックはディクショナリーである必要があります。" #, python-format msgid "status must be %s and" msgstr "状態は %s である必要があります" msgid "status must be available" msgstr "状態は「使用可能」でなければなりません。" msgid "status not found" msgstr "ステータスが見つかりません。" msgid "stop hypermetro group error." msgstr "hypermetro グループ停止エラー。" msgid "stop_hypermetro error." msgstr "stop_hypermetro エラー。" msgid "storops Python library is not installed." msgstr "strops Python ライブラリーがインストールされていません。" msgid "sync hypermetro group error." msgstr "hypermetro グループ同期エラー。" msgid "sync_hypermetro error." msgstr "sync_hypermetro エラー。" #, python-format msgid "target=%(target)s, lun=%(lun)s" msgstr "target=%(target)s, lun=%(lun)s" #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "" "targetcli がインストールされておらず、デフォルトのディレクトリー " "(%(default_path)s) を作成できませんでした: %(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "terminate_connection: コネクターからホスト名を取得できませんでした。" msgid "too many body keys" msgstr "本体キーが多すぎます" #, python-format msgid "trg_id is invalid: %d." msgstr "trg_id が無効です: %d" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "unmanage_snapshot: %s という名前のスナップショットが見つかりません。" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot: ボリューム ID %s が見つかりません。" #, python-format msgid "unrecognized argument %s" msgstr "認識されない引数 %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "サポートされない圧縮アルゴリズム: %s" msgid "uuid not found" msgstr "UUID が見つかりません。" msgid "valid iqn needed for show_target" msgstr "show_target に必要とされる有効な iqn" #, python-format msgid "vdisk %s is not defined." msgstr "vdisk %s が定義されていません。" msgid "volume assigned" msgstr "割り当てられたボリューム" msgid "volume changed" msgstr "変更されたボリューム" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "" "ボリュームサイズ %(volume_size)d は、サイズ %(size)d のバックアップを復元する" "には小さすぎます。" #, python-format msgid "volume size %d is invalid." msgstr "ボリュームサイズ %d は無効です。" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "" "ボリュームを整合性グループに作成する場合は、volume_type を指定する必要があり" "ます。" msgid "volume_type must be provided when creating a volume in a group." msgstr "" "ボリュームをグループに作成する場合は、volume_type を指定する必要があります。" msgid "volume_type_id cannot be None" msgstr "volume_type_id を None に設定することはできません。" msgid "volume_types assigned" msgstr "割り当てられたボリュームタイプ" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "" "整合性グループ %(name)s を作成するには、volume_types を指定する必要がありま" "す。" msgid "volumes assigned" msgstr "割り当てられたボリューム" msgid "volumes changed" msgstr "変更されたボリューム" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8271158 cinder-27.0.0/cinder/locale/zh_CN/0000775000175000017500000000000000000000000016573 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0831182 cinder-27.0.0/cinder/locale/zh_CN/LC_MESSAGES/0000775000175000017500000000000000000000000020360 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/locale/zh_CN/LC_MESSAGES/cinder.po0000664000175000017500000054631400000000000022201 0ustar00zuulzuul00000000000000# Translations template for cinder. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the cinder project. # # Translators: # FIRST AUTHOR , 2011 # Kai Zhang , 2013 # Kai Zhang , 2013 # openstack , 2013 # Shuwen SUN , 2014 # Tom Fifield , 2013 # 颜海峰 , 2014 # Yu Zhang, 2014 # 颜海峰 , 2014 # Andreas Jaeger , 2016. #zanata # Eric Lei <1165970798@qq.com>, 2016. #zanata # howard lee , 2016. #zanata # Research and Development Center UnitedStack , 2022. #zanata msgid "" msgstr "" "Project-Id-Version: cinder VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2025-07-07 22:42+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2022-04-11 10:27+0000\n" "Last-Translator: Research and Development Center UnitedStack " "\n" "Language: zh_CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "\t%s" msgstr "\t%s" #, python-format msgid "" "\n" "OpenStack Cinder version: %(version)s\n" msgstr "" "\n" "OpenStack Cinder 版本:%(version)s\n" #, python-format msgid " but size is now %d" msgstr "但现在大小为 %d" msgid " or " msgstr "或者" #, python-format msgid "" "%(driver)s manage_existing cannot manage a volume connected to hosts. Please " "disconnect this volume from existing hosts before importing" msgstr "" "%(driver)s manage_existing 无法管理已连接至主机的卷。在进行导入之前,请从现有" "主机断开与此卷的连接" #, python-format msgid "" "%(err)s\n" "result: %(res)s." msgstr "" "%(err)s\n" "结果:%(res)s。" #, python-format msgid "%(exception)s: %(explanation)s" msgstr "发生异常 %(exception)s:原因 %(explanation)s" #, python-format msgid "" "%(fun)s: Failed with unexpected CLI output.\n" " Command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "%(fun)s:失败,产生了意外 CLI 输出。\n" "命令:%(cmd)s\n" "标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "%(host)-25s\t%(availability_zone)-15s" msgstr "%(host)-25s\t%(availability_zone)-15s" #, python-format msgid "%(host)-25s\t%(zone)-15s" msgstr "%(host)-25s\t%(zone)-15s" #, python-format msgid "" "%(message)s\n" "Status Code: %(_status)s\n" "Body: %(_body)s" msgstr "" "%(message)s\n" "状态码: %(_status)s\n" "主体: %(_body)s" #, python-format msgid "%(msg)s And %(num)s services from the cluster were also removed." msgstr "源于集群的 %(msg)s 和 %(num)s 服务也被删除。" #, python-format msgid "" "%(msg_type)s: creating NetworkPortal: ensure port %(port)d on ip %(ip)s is " "not in use by another service." msgstr "" "%(msg_type)s:创建 NetworkPortal:请确保 IP %(ip)s 上的端口 %(port)d未被另一" "项服务使用。" #, python-format msgid "%(name)s cannot be all spaces." msgstr "%(name)s不能是所有空间" #, python-format msgid "%(new_size)s < current size %(size)s" msgstr "%(new_size)s < 当前大小 %(size)s" #, python-format msgid "%(reason)s" msgstr "%(reason)s" #, python-format msgid "" "%(worker_name)s value of %(workers)d is invalid, must be greater than 0." msgstr "%(workers)d 的 %(worker_name)s 值无效,必须大于 0。" #, python-format msgid "%s \"data\" is not in result." msgstr "结果中没有 %s “数据”。" #, python-format msgid "%s assigned" msgstr "%s 已分配" #, python-format msgid "" "%s cannot be accessed. Verify that GPFS is active and file system is mounted." msgstr "%s 无法访问。请验证 GPFS 是否处于活动状态并且文件系统是否已安装。" #, python-format msgid "%s cannot be resized using clone operation as it contains no blocks." msgstr "%s 无法使用克隆操作来调整大小,因为它未包含任何块。" #, python-format msgid "" "%s cannot be resized using clone operation as it is hosted on compressed " "volume" msgstr "%s 无法使用克隆操作来调整大小,因为它托管于压缩卷上" #, python-format msgid "%s changed" msgstr "%s 已更改" #, python-format msgid "%s configuration option is not set." msgstr "未设置 %s 配置选项。" #, python-format msgid "%s does not exist." msgstr "%s 不存在。" #, python-format msgid "%s is not a directory." msgstr "%s 不是一个目录。" #, python-format msgid "%s is not installed" msgstr "未安装 %s" #, python-format msgid "%s is not installed." msgstr "未安装 %s。" #, python-format msgid "%s is not set" msgstr "未设置 %s " #, python-format msgid "%s is not set and is required for the replication device to be valid." msgstr "未设置 %s,它是使复制设备生效所必需的。" #, python-format msgid "%s is not set." msgstr "未设置 %s。" #, python-format msgid "%s must be a valid raw or qcow2 image." msgstr "%s 必须为有效的 raw 映像或 qcow2 映像。" #, python-format msgid "%s must be an absolute path." msgstr "%s 必须为绝对路径。" #, python-format msgid "%s not set." msgstr "未设置 %s。" #, python-format msgid "'%(key)s = %(value)s'" msgstr "'%(key)s = %(value)s'" #, python-format msgid "" "'%(prot)s' is invalid for flashsystem_connection_protocol in config file. " "valid value(s) are %(enabled)s." msgstr "" "对于配置文件中的 flashsystem_connection_protocol,“%(prot)s”无效。有效值为 " "%(enabled)s。" msgid "'active' must be present when writing snap_info." msgstr "写入 snap_info 时,状态必须为“活动”。" msgid "'consistencygroup_id' must be specified" msgstr "必须指定“consistencygroup_id”" msgid "'qemu-img info' parsing failed." msgstr "'qemu-img info'解析失败" msgid "400 Bad Request" msgstr "400 错误请求" msgid "401 Unauthorized Error" msgstr "401 未授权错误" msgid "404 Not Found Error" msgstr "404 资源未找到错误" msgid "413 Request entity too large" msgstr "413 请求实体过大" msgid "A concurrent, possibly contradictory, request has been made." msgstr "发出了并行的可能对立的请求。" msgid "A readonly volume must be attached as readonly." msgstr "只读卷必须以只读方式进行挂载。" msgid "A valid secondary target MUST be specified in order to failover." msgstr "必须指定有效辅助以进行故障转移。" #, python-format msgid "" "API Version String %(version)s is of invalid format. Must be of format " "MajorNum.MinorNum." msgstr "" "API 版本字符串 %(version)s 为无效格式。必须为以下格式:MajorNum.MinorNum。" #, python-format msgid "API response: %s" msgstr "API 响应:%s" #, python-format msgid "API version %(version)s is not supported on this method." msgstr "API 版本 %(version)s 在此方法上不受支持。" msgid "Access list not available for public volume types." msgstr "对于公用卷类型,未提供访问列表。" msgid "Activate or deactivate QoS error." msgstr "激活或者取消激活 QoS 时发生错误。" msgid "Activate snapshot error." msgstr "激活快照时发生错误。" #, python-format msgid "Activating zone set failed: (Zone set=%(cfg_name)s error=%(err)s)." msgstr "激活区域集失败(区域集为 %(cfg_name)s,发生的错误为 %(err)s)。" msgid "Add FC port to host error." msgstr "将 FC 端口添加至主机时发生错误。" msgid "Add fc initiator to array error." msgstr "将 FC 启动程序添加至阵列时发生错误。" msgid "Add initiator to array error." msgstr "将启动程序添加至阵列时发生错误。" msgid "Add lun to cache error." msgstr "将 LUN 添加至高速缓存时发生错误。" msgid "Add lun to partition error." msgstr "将 LUN 添加至分区时发生错误。" msgid "Add mapping view error." msgstr "添加映射视图时发生错误。" msgid "Add new host error." msgstr "添加新主机时发生错误。" msgid "Add port to port group error." msgstr "向端口组添加端口时出错。" #, python-format msgid "" "All the specified storage pools to be managed do not exist. Please check " "your configuration. Non-existent pools: %s" msgstr "所指定的要管理的所有存储池都不存在。请检查配置。不存在的池:%s" msgid "An API version request must be compared to a VersionedMethod object." msgstr "必须将 API 版本请求与 VersionedMethod 对象进行比较。" msgid "An error has occurred during backup operation" msgstr "在备份过程中出现一个错误" #, python-format msgid "" "An error occurred during the LUNcopy operation. LUNcopy name: " "%(luncopyname)s. LUNcopy status: %(luncopystatus)s. LUNcopy state: " "%(luncopystate)s." msgstr "" "在 LUNcopy 操作期间发生错误。LUNcopy 名称为 %(luncopyname)s。LUNcopy 状态为 " "%(luncopystatus)s。LUNcopy 状态为 %(luncopystate)s。" msgid "An unknown error occurred." msgstr "发生未知错误。" msgid "An unknown exception occurred." msgstr "发生未知异常。" msgid "Append port group description error." msgstr "附加端口组描述时出错。" #, python-format msgid "" "Applying the zones and cfgs to the switch failed (error code=%(err_code)s " "error msg=%(err_msg)s." msgstr "" "对交换机应用 zones 和 cfgs 失败(错误代码为 %(err_code)s,错误消息为 " "%(err_msg)s)。" #, python-format msgid "Array does not exist or is offline. Current status of array is %s." msgstr "阵列不存在或者处于脱机状态。阵列的当前状态为 %s。" msgid "Associate host to hostgroup error." msgstr "使主机与主机组关联时发生错误。" msgid "Associate host to mapping view error." msgstr "使主机与映射视图关联时发生错误。" msgid "Associate initiator to host error." msgstr "使启动程序与主机相关联时发生错误。" msgid "Associate lun to QoS error." msgstr "将 LUN 关联至 QoS 时出错。" msgid "Associate lun to lungroup error." msgstr "使 LUN 与 LUN 组关联时发生错误。" msgid "Associate lungroup to mapping view error." msgstr "使 LUN 组与映射视图关联时发生错误。" msgid "Associate portgroup to mapping view error." msgstr "使端口组与映射视图关联时发生错误。" msgid "At least one valid iSCSI IP address must be set." msgstr "必须至少设置一个有效 iSCSI IP 地址。" #, python-format msgid "Attempt to transfer %s with invalid auth key." msgstr "请尝试使用有效的认证密钥传输 %s。" #, python-format msgid "Attribute: %s not found." msgstr "属性: %s 未找到" #, python-format msgid "Authentication failed, verify the switch credentials, error code %s." msgstr "认证失败,请验证交换机凭证,错误代码:%s。" #, python-format msgid "Availability zone '%(s_az)s' is invalid." msgstr "可用性区域“%(s_az)s”无效。" msgid "Available categories:" msgstr "可用的类别:" #, python-format msgid "Backend doesn't exist (%(backend)s)" msgstr "后端不存在 (%(backend)s)" msgid "Backend storage did not configure fiber channel target." msgstr "后端存储器未配置光纤通道目标。" msgid "Backing up an in-use volume must use the force flag." msgstr "备份一个正在使用的卷时必须使用强制标志。" #, python-format msgid "Backup %(backup_id)s could not be found." msgstr "找不到备份 %(backup_id)s。" msgid "Backup RBD operation failed" msgstr "备份RBD操作失败" msgid "Backup already exists in database." msgstr "数据库中已存在备份。" #, python-format msgid "Backup driver reported an error: %(reason)s" msgstr "备份驱动程序已报告错误:%(reason)s" msgid "Backup operation of an encrypted volume failed." msgstr "已加密卷的备份操作失败。" #, python-format msgid "Backup should only have one snapshot but instead has %s" msgstr "备份应该仅具有一个快照,但是具有 %s 个快照" msgid "Backup status must be available" msgstr "备份状态必须为“可用”" #, python-format msgid "Backup status must be available and not %s." msgstr "备份状态必须为“可用”,不能是 %s。" msgid "Backup status must be available or error" msgstr "备份状态必须为“可用”或“错误”" msgid "Backup to be restored has invalid size" msgstr "要复原的备份具有无效大小" #, python-format msgid "Bad HTTP response status %(status)s" msgstr "错误的HTTP响应状态 %(status)s" #, python-format msgid "Bad key(s) in quota set: %s" msgstr "配额集中的键不正确:%s" #, python-format msgid "" "Bad or unexpected response from the storage volume backend API: %(data)s" msgstr "从存储卷后端 API 返回了不正确或意外的响应:%(data)s" msgid "Bad response from Datera API" msgstr "来自 Datera API 的响应不正确" msgid "Bad response from SolidFire API" msgstr "来自SolidFire API的错误响应" #, python-format msgid "Bad response from XMS, %s" msgstr "来自 XMS 的响应不正确,%s" msgid "Binary" msgstr "二进制" msgid "Blank components" msgstr "空组件" #, python-format msgid "Brocade Fibre Channel Zoning CLI error: %(reason)s" msgstr "Brocade 光纤通道分区 CLI 错误:%(reason)s" #, python-format msgid "Brocade Fibre Channel Zoning HTTP error: %(reason)s" msgstr "Brocade 光纤通道分区 HTTP 错误:%(reason)s" msgid "CHAP secret should be 12-16 bytes." msgstr "CHAP 密钥应为 12 到 16 个字节。" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "CLI 异常输出:\n" "命令:%(cmd)s\n" "标准输出:%(out)s\n" "标准错误:%(err)s" #, python-format msgid "" "CLI Exception output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "CLI 异常输出:\n" "命令:%(cmd)s\n" "标准输出:%(out)s\n" "标准错误:%(err)s。" msgid "" "CMMVC6071E The VDisk-to-host mapping was not created because the VDisk is " "already mapped to a host.\n" "\"" msgstr "" "CMMVC6071E 未创建 VDisk 至主机的映射,因为该 VDisk 已映射至主机。\n" "\"" #, python-format msgid "CPG (%s) doesn't exist on array" msgstr "数组中不存在 CPG (%s)" msgid "Can not add FC port to host." msgstr "无法将 FC 端口添加至主机。" #, python-format msgid "Can not get pool info. pool: %s" msgstr "无法获取池信息。池:%s" #, python-format msgid "Can not translate %s to integer." msgstr "无法把 %s 转换成整数" msgid "Can't decode backup record." msgstr "无法将备份记录解码。" #, python-format msgid "Can't extend replication volume, volume: %(id)s" msgstr "无法扩展复制卷,卷:%(id)s" msgid "Can't find LUN on the array, please check the source-name or source-id." msgstr "在阵列上找不到 LUN,请检查 source-name 或 source-id。" #, python-format msgid "Can't find cache name on the array, cache name is: %(name)s." msgstr "在阵列上找不到高速缓存名称,高速缓存名称为 %(name)s。" #, python-format msgid "Can't find partition name on the array, partition name is: %(name)s." msgstr "在阵列上找不到分区名称,分区名称为 %(name)s。" #, python-format msgid "Can't find service: %s" msgstr "找不到以下服务:%s" msgid "" "Can't find snapshot on array, please check the source-name or source-id." msgstr "在阵列上找不到快照,请检查 source-name 或 source-id。" msgid "Can't find the same host id from arrays." msgstr "在阵列中找不到同一主机标识。" #, python-format msgid "Can't get volume id. Volume name: %s." msgstr "无法获取卷标识,卷名:%s。" #, python-format msgid "Can't import LUN %(lun_id)s to Cinder. LUN type mismatched." msgstr "无法将 LUN %(lun_id)s 导入至 Cinder。LUN 类型不匹配。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a HyperMetroPair." msgstr "无法将 LUN %s 导入至 Cinder。它在 HyperMetroPair 中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN copy task." msgstr "无法将 LUN %s 导入至 Cinder。它在 LUN 复制任务中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN group." msgstr "无法将 LUN %s 导入至 Cinder。它在 LUN 组中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a LUN mirror." msgstr "无法将 LUN %s 导入至 Cinder。它在 LUN 镜像中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a SplitMirror." msgstr "无法将 LUN %s 导入至 Cinder。它在 SplitMirror 中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. Already exists in a migration task." msgstr "无法将 LUN %s 导入至 Cinder。它在迁移任务中已存在。" #, python-format msgid "" "Can't import LUN %s to Cinder. Already exists in a remote replication task." msgstr "无法将 LUN %s 导入至 Cinder。它在远程复制任务中已存在。" #, python-format msgid "Can't import LUN %s to Cinder. LUN status is not normal." msgstr "无法将 LUN %s 导入至 Cinder。LUN 状态异常。" #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot doesn't belong to volume." msgstr "无法将快照 %s 导入至 Cinder。快照不属于卷。" #, python-format msgid "Can't import snapshot %s to Cinder. Snapshot is exposed to initiator." msgstr "无法将快照 %s 导入至 Cinder。快照已展示给启动程序。" #, python-format msgid "" "Can't import snapshot %s to Cinder. Snapshot status is not normal or running " "status is not online." msgstr "无法将 LUN %s 导入至 Cinder。快照状态异常或运行状态并非“在线”。" msgid "Can't parse backup record." msgstr "无法解析备份记录。" #, python-format msgid "Canceled backup %(back_id)s restore on volume %(vol_id)s" msgstr "已取消从卷 %(vol_id)s 恢复备份 %(back_id)s" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because it has no " "volume type." msgstr "无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为它没有任何卷类型。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because it is already " "in group %(orig_group)s." msgstr "" "无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为它已经存在于组 " "%(orig_group)s 中。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because volume cannot " "be found." msgstr "无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为找不到该卷。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because volume does " "not exist." msgstr "无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为该卷不存在。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because volume is in " "an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为该卷处于无效状态:" "%(status)s。以下是有效状态:%(valid)s。" #, python-format msgid "" "Cannot add volume %(volume_id)s to group %(group_id)s because volume type " "%(volume_type)s is not supported by the group." msgstr "" "无法将卷 %(volume_id)s 添加至组 %(group_id)s,因为该组不支持卷类型 " "%(volume_type)s。" msgid "Cannot change VF context in the session." msgstr "无法更改会话中的 VF 上下文。" #, python-format msgid "" "Cannot change VF context, specified VF is not available in the manageable VF " "list %(vf_list)s." msgstr "无法更改 VF 上下文,指定的 VF 在管理 VF 列表 %(vf_list)s 中不可用。" msgid "Cannot create encryption specs. Volume type in use." msgstr "无法创建加密规范。卷类型在使用中。" #, python-format msgid "" "Cannot create group %(group)s because snapshot %(snap)s is not in a valid " "state. Valid states are: %(valid)s." msgstr "" "无法创建组 %(group)s,因为快照 %(snap)s 未处于有效状态。以下是有效状态:" "%(valid)s。" #, python-format msgid "" "Cannot create group %(group)s because source volume %(source_vol)s is not in " "a valid state. Valid states are: %(valid)s." msgstr "" "无法创建组 %(group)s,因为源卷 %(source_vol)s 未处于有效状态。有效状态为 " "%(valid)s。" #, python-format msgid "Cannot create group_type with name %(name)s and specs %(group_specs)s" msgstr "无法创建名称为 %(name)s 且规格为 %(group_specs)s 的组类型。" #, python-format msgid "" "Cannot create image of disk format: %s. Only vmdk disk format is accepted." msgstr "无法创建磁盘格式为 %s 映像。仅接受 vmdk 磁盘格式。" #, python-format msgid "Cannot create volume of size %s: not multiple of 8GB." msgstr "无法创建大小为 %s 的卷:该大小不是 8GB 的倍数。" #, python-format msgid "Cannot create volume_type with name %(name)s and specs %(extra_specs)s" msgstr "无法创建名称为 %(name)s 规格为 %(extra_specs)s 的卷类型。" msgid "Cannot delete encryption specs. Volume type in use." msgstr "无法删除加密规范。卷类型在使用中。" msgid "Cannot find migration task." msgstr "找不到迁移任务。" #, python-format msgid "Cannot get mcs_id by channel id: %(channel_id)s." msgstr "无法通过通道标识 %(channel_id)s 获取 mcs_id。" #, python-format msgid "" "Cannot provide both 'cgsnapshot_id' and 'source_cgid' to create consistency " "group %(name)s from source." msgstr "" "无法同时提供“cgsnapshot_id”和“source_cgid”以从源创建一致性组 %(name)s。" msgid "Cannot register resource" msgstr "无法注册资源" msgid "Cannot register resources" msgstr "无法注册多个资源" #, python-format msgid "" "Cannot remove volume %(volume_id)s from group %(group_id)s because it is not " "in the group." msgstr "无法从组 %(group_id)s 移除卷 %(volume_id)s,因为它不在该组中。" #, python-format msgid "" "Cannot remove volume %(volume_id)s from group %(group_id)s because volume is " "in an invalid state: %(status)s. Valid states are: %(valid)s." msgstr "" "无法从组 %(group_id)s 移除卷 %(volume_id)s,因为该卷处于无效状态:" "%(status)s。以下是有效状态:%(valid)s。" #, python-format msgid "Cannot retype from HPE3PARDriver to %s." msgstr "无法将 HPE3PARDriver 转型为 %s。" msgid "Cannot retype from one 3PAR array to another." msgstr "一个 3PAR 阵列无法通过 retype 操作变为另一个阵列。" msgid "Cannot retype to a CPG in a different domain." msgstr "无法执行 retype 操作,以变为另一个域中的 CPG。" msgid "Cannot retype to a snap CPG in a different domain." msgstr "无法执行 retype 操作,以变为另一个域中的 SNAP CPG。" msgid "Cannot save group_snapshots changes in group object update." msgstr "组对象更新时无法保存组快照变更。" msgid "Cannot save volume_types changes in group object update." msgstr "组对象更新时无法保存卷类型变更。" msgid "Cannot save volumes changes in group object update." msgstr "组对象更新时无法保存卷变更。" msgid "Cannot update encryption specs. Volume type in use." msgstr "无法更新加密规范。卷类型在使用中。" #, python-format msgid "" "Cannot update group %(group_id)s because no valid name, description, " "add_volumes, or remove_volumes were provided." msgstr "" "无法更新组 %(group_id)s,因为未提供任何有效名称、描述、add_volumes 或 " "remove_volumes。" #, python-format msgid "Cannot update group_type %(id)s" msgstr "无法更新组类型 %(id)s。" #, python-format msgid "Cannot update volume_type %(id)s" msgstr "无法更新 volume_type %(id)s" #, python-format msgid "CgSnapshot %(cgsnapshot_id)s could not be found." msgstr "找不到 Cg 快照 %(cgsnapshot_id)s。" msgid "Change hostlun id error." msgstr "更改 hostlun 标识时出错。" msgid "Change lun priority error." msgstr "更改 LUN 优先级时发生错误。" msgid "Change lun smarttier policy error." msgstr "更改 LUN smarttier 策略时发生错误。" #, python-format msgid "" "Change would make usage less than 0 for the following resources: %(unders)s" msgstr "对于下列资源,更改将导致使用量小于 0:%(unders)s" msgid "Check hostgroup associate error." msgstr "检查主机组关联时发生错误。" msgid "Check initiator added to array error." msgstr "检查已添加至阵列的启动程序时发生错误。" msgid "Check initiator associated to host error." msgstr "检查与主机相关联的启动程序时发生错误。" msgid "Check lungroup associate error." msgstr "检查 LUN 组关联时发生错误。" msgid "Check portgroup associate error." msgstr "检查端口组关联时发生错误。" msgid "Chunk size is not multiple of block size for creating hash." msgstr "区块大小不是用于创建散列的块大小的倍数。" #, python-format msgid "Cisco Fibre Channel Zoning CLI error: %(reason)s" msgstr "Cisco 光纤通道分区 CLI 错误:%(reason)s" #, python-format msgid "" "Clone type '%(clone_type)s' is invalid; valid values are: '%(full_clone)s' " "and '%(linked_clone)s'." msgstr "" "克隆“%(clone_type)s”无效;有效值为:“%(full_clone)s”和“%(linked_clone)s”。" msgid "Cluster" msgstr "集群" #, python-format msgid "Cluster %(id)s could not be found." msgstr "无法找到标识为 %(id)s 的集群。" #, python-format msgid "Cluster %(id)s still has hosts." msgstr "集群 %(id)s 仍存在主机。" #, python-format msgid "Cluster %(name)s already exists." msgstr "集群 %(name)s 已存在。" #, python-format msgid "Cluster %s successfully removed." msgstr "成功删除集群 %s。" #, python-format msgid "CommandLineHelper._wait_for_condition: %s timeout." msgstr "CommandLineHelper._wait_for_condition:%s 超时。" msgid "Compression Enabler is not installed. Can not create compressed volume." msgstr "未安装压缩启用程序。无法创建压缩卷。" #, python-format msgid "Compute cluster: %(cluster)s not found." msgstr "找不到计算集群 %(cluster)s。" msgid "Condition has no field." msgstr "条件没有任何字段。" msgid "Configuration error: dell_sc_ssn not set." msgstr "配置错误:未设置 dell_sc_ssn。" msgid "Configuration is not found." msgstr "找不到配置。" #, python-format msgid "Configuration value %s is not set." msgstr "未设置配置值 %s。" #, python-format msgid "" "Conflicting QoS specifications in volume type %s: when QoS spec is " "associated to volume type, legacy \"netapp:qos_policy_group\" is not allowed " "in the volume type extra specs." msgstr "" "卷类型 %s 中存在冲突的 QoS 规范:当 QoS 规范与卷类型相关联时,不允许卷类型额" "外规范中存在旧的“netapp:qos_policy_group”。" #, python-format msgid "Connection to glance failed: %(reason)s" msgstr "连接glance失败: %(reason)s" #, python-format msgid "Connection to swift failed: %(reason)s" msgstr "连接 Swift 失败:%(reason)s" #, python-format msgid "Connector does not provide: %s" msgstr "连接器未提供:%s" #, python-format msgid "Connector doesn't have required information: %(missing)s" msgstr "连接器没有必需信息:%(missing)s" #, python-format msgid "ConsistencyGroup %(consistencygroup_id)s could not be found." msgstr "找不到一致性组 %(consistencygroup_id)s。" msgid "Container" msgstr "容器" msgid "Container size smaller than required file size." msgstr "容器大小小于所需文件大小。" #, python-format msgid "Converted to %(f1)s, but format is now %(f2)s" msgstr "已转换为 %(f1)s,但现在格式为 %(f2)s" #, python-format msgid "Converted to %(vol_format)s, but format is now %(file_format)s" msgstr "已转换为 %(vol_format)s,但现在格式为 %(file_format)s" #, python-format msgid "Converted to raw, but format is now %s" msgstr "转化为裸格式,但目前格式是 %s" #, python-format msgid "Converted to raw, but format is now %s." msgstr "已转换为原始文件,但现在格式为 %s。" msgid "Coordinator uninitialized." msgstr "协调程序未初始化。" #, python-format msgid "" "Copy volume task failed: convert_to_base_volume: id=%(id)s, " "status=%(status)s." msgstr "" "“复制卷”任务失败:convert_to_base_volume:id=%(id)s,status=%(status)s。" #, python-format msgid "" "Copy volume task failed: create_cloned_volume id=%(id)s, status=%(status)s." msgstr "复制卷任务失败:create_cloned_volume id=%(id)s,status=%(status)s。" #, python-format msgid "Copying metadata from %(src_type)s %(src_id)s to %(vol_id)s." msgstr "正在将元数据从 %(src_type)s %(src_id)s 复制到 %(vol_id)s。" msgid "" "Could not determine which Keystone endpoint to use. This can either be set " "in the service catalog or with the cinder.conf config option " "'backup_swift_auth_url'." msgstr "" "无法确定要使用的 Keystone 端点。可在服务目录中设置此项,也可使用 cinder.conf " "配置选项 “backup_swift_auth_url”设置此项。" msgid "" "Could not determine which Swift endpoint to use. This can either be set in " "the service catalog or with the cinder.conf config option 'backup_swift_url'." msgstr "" "无法确定要使用的 Swift 端点。可在服务目录中设置此项,也可使用 cinder.conf 配" "置选项 “backup_swift_url”设置此项。" #, python-format msgid "Could not find GPFS cluster id: %s." msgstr "找不到 GPFS 集群标识:%s。" #, python-format msgid "Could not find GPFS file system device: %s." msgstr "找不到 GPFS 文件系统设备:%s。" #, python-format msgid "Could not find config at %(path)s" msgstr "在 %(path)s 找不到配置文件。" #, python-format msgid "Could not find iSCSI export for volume %s" msgstr "对于卷 %s,找不到 iSCSI 导出" #, python-format msgid "Could not find key in output of command %(cmd)s: %(out)s." msgstr "在命令 %(cmd)s 的输出 %(out)s 中找不到键。" #, python-format msgid "Could not find parameter %(param)s" msgstr "找不到参数 %(param)s" #, python-format msgid "Could not find target %s" msgstr "找不到目标 %s" #, python-format msgid "Could not find unique snapshot %(snap)s on volume %(vol)s." msgstr "在卷 %(vol)s 上找不到唯一快照 %(snap)s。" msgid "Could not get system name." msgstr "未能获取系统名称。" #, python-format msgid "" "Could not read information for snapshot %(name)s. Code: %(code)s. Reason: " "%(reason)s" msgstr "无法读取快照 %(name)s 的信息。代码:%(code)s。原因:%(reason)s" #, python-format msgid "Could not restore configuration file %(file_path)s: %(exc)s" msgstr "无法复原配置文件 %(file_path)s:%(exc)s" #, python-format msgid "Could not save configuration to %(file_path)s: %(exc)s" msgstr "未能将配置保存到 %(file_path)s:%(exc)s" #, python-format msgid "Could not start consistency group snapshot %s." msgstr "无法启动一致性组快照 %s。" #, python-format msgid "Couldn't find ORM model for Persistent Versioned Object %s." msgstr "无法为持久版本对象%s找到ORM模型。" #, python-format msgid "Couldn't remove cluster %s because it doesn't exist." msgstr "无法删除集群 %s,因为它不存在。" #, python-format msgid "Couldn't remove cluster %s because it still has hosts." msgstr "无法删除集群 %s,因为它仍存在主机。" #, python-format msgid "Counter %s not found" msgstr "找不到计数器 %s" msgid "Create QoS policy error." msgstr "创建 QoS 策略时发生错误。" #, python-format msgid "" "Create backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "备份创建已异常中止,需要的备份状态为 %(expected_status)s,但实际为 " "%(actual_status)s。" #, python-format msgid "" "Create backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "备份创建已异常中止,需要的卷状态为 %(expected_status)s,但实际为 " "%(actual_status)s。" msgid "Create group failed." msgstr "创建组失败。" msgid "Create hostgroup error." msgstr "创建主机组时发生错误。" #, python-format msgid "Create hypermetro error. %s." msgstr "创建 hypermetro 错误。%s。" msgid "Create lun error." msgstr "创建 LUN 时出错。" msgid "Create lun migration error." msgstr "创建 LUN 迁移时发生错误。" msgid "Create luncopy error." msgstr "创建 LUNcopy 时发生错误。" msgid "Create lungroup error." msgstr "创建 LUN 组时发生错误。" msgid "Create manager volume flow failed." msgstr "创建管理器卷流失败。" msgid "Create port group error." msgstr "创建端口组时出错。" msgid "Create replication error." msgstr "创建复制错误。" #, python-format msgid "Create replication pair failed. Error: %s." msgstr "创建复制对失败。错误:%s。" msgid "Create snapshot error." msgstr "创建快照时发生错误。" #, python-format msgid "Create volume error. Because %s." msgstr "创建卷错误。因为 %s。" msgid "Create volume failed." msgstr "创建卷失败。" #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(cfg_name)s " "error=%(err)s)." msgstr "创建并激活区域集失败(区域集为 %(cfg_name)s,发生的错误为 %(err)s)。" #, python-format msgid "" "Creating and activating zone set failed: (Zone set=%(zoneset)s " "error=%(err)s)." msgstr "创建并激活区域集失败(区域集为 %(zoneset)s,发生的错误为 %(err)s)。" #, python-format msgid "" "Dedup is a valid provisioning type, but requires WSAPI version " "'%(dedup_version)s' version '%(version)s' is installed." msgstr "" "去重是有效的供应类型,但是要求安装了 WSAPI 版本“%(dedup_version)s”版" "本“%(version)s”。" msgid "Default group type can not be found." msgstr "找不到缺省组类型。" #, python-format msgid "" "Default quota for resource: %(res)s is set by the default quota flag: " "quota_%(res)s, it is now deprecated. Please use the default quota class for " "default quota." msgstr "" "资源 %(res)s 的缺省配额由缺省配额标记 quota_%(res)s 设置,现在不推荐使用。请" "对缺省配额使用缺省配额类。 " msgid "Default volume type can not be found." msgstr "找不到缺省卷类型。" msgid "Delete LUNcopy error." msgstr "删除 LUNcopy 时发生错误。" msgid "Delete QoS policy error." msgstr "删除 QoS 策略时发生错误。" msgid "Delete associated lun from lungroup error." msgstr "从 LUN 组中删除相关联的 LUN 时发生错误。" #, python-format msgid "" "Delete backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "备份删除已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用来创" "建此备份的备份服务 [%(backup_service)s]。" msgid "Delete group failed." msgstr "删除组失败。" msgid "Delete hostgroup error." msgstr "删除主机组时发生错误。" msgid "Delete hostgroup from mapping view error." msgstr "从映射视图删除主机组时发生错误。" msgid "Delete lun error." msgstr "删除 LUN 时发生错误。" msgid "Delete lun migration error." msgstr "删除 LUN 迁移时发生错误。" msgid "Delete lungroup error." msgstr "删除 LUN 组时发生错误。" msgid "Delete lungroup from mapping view error." msgstr "从映射视图删除 LUN 组时发生错误。" msgid "Delete mapping view error." msgstr "删除映射视图时发生错误。" msgid "Delete port group error." msgstr "删除端口组时出错。" msgid "Delete portgroup from mapping view error." msgstr "从映射视图删除端口组时发生错误。" msgid "Delete snapshot error." msgstr "删除快照时发生错误。" #, python-format msgid "Delete snapshot of volume not supported in state: %s." msgstr "不支持对处于以下状态的卷删除快照:%s。" #, python-format msgid "" "Delete_backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "Delete_backup 已异常中止,需要的备份状态为 %(expected_status)s,但实际为 " "%(actual_status)s。" msgid "Deleting volume from database and skipping rpc." msgstr "正在从数据库删除卷并跳过 RPC。" #, python-format msgid "Deleting volume metadata is not allowed for volumes in %s status." msgstr "当卷状态为 %s 时,不允许删除该卷的元数据。" #, python-format msgid "Deleting zones failed: (command=%(cmd)s error=%(err)s)." msgstr "删除区域失败:(命令为 %(cmd)s,发生的错误为 %(err)s)。" msgid "Dell API 2.1 or later required for Consistency Group support" msgstr "要提供“一致性组”支持,需要 Dell API 2.1 或更高版本" msgid "" "Dell Cinder driver configuration error replication not supported with direct " "connect." msgstr "直接连接不支持 Dell Cinder 驱动程序配置错误复制。" #, python-format msgid "Dell Cinder driver configuration error replication_device %s not found" msgstr "找不到 Dell Cinder 驱动程序配置错误 replication_device %s" #, python-format msgid "Destination has migration_status %(stat)s, expected %(exp)s." msgstr "目标具有 migration_status %(stat)s,原应为 %(exp)s。" msgid "Destination volume not mid-migration." msgstr "目标卷未在迁移中。" msgid "" "Detach volume failed: More than one attachment, but no attachment_id " "provided." msgstr "拆离卷失败:存在多个连接,但是未提供 attachment_id。" msgid "Detach volume from instance and then try again." msgstr "请断开卷与实例的连接,然后再次进行尝试。" #, python-format msgid "Did not find expected column in %(fun)s: %(hdr)s." msgstr "%(fun)s 中找不到需要的列:%(hdr)s。" #, python-format msgid "Did not find the expected key %(key)s in %(fun)s: %(raw)s." msgstr "在 %(fun)s 中找不到期望的键 %(key)s:%(raw)s。" msgid "Down Hosts" msgstr "关闭主机" #, python-format msgid "" "Downlevel GPFS Cluster Detected. GPFS Clone feature not enabled in cluster " "daemon level %(cur)s - must be at least at level %(min)s." msgstr "" "检测到下层 GPFS 集群。在集群守护程序级别 %(cur)s 中未启用“GPFS 克隆”功能 - 必" "须至少处于级别 %(min)s。" #, python-format msgid "Driver initialize connection failed (error: %(err)s)." msgstr "驱动程序初始化连接失败(错误:%(err)s)。" msgid "Driver must implement initialize_connection" msgstr "驱动程序必须实现 initialize_connection" #, python-format msgid "" "Driver successfully decoded imported backup data, but there are missing " "fields (%s)." msgstr "驱动程序已成功将所导入的备份数据解码,但是缺少字段 (%s)。" #, python-format msgid "" "Either 'cgsnapshot_id' or 'source_cgid' must be provided to create " "consistency group %(name)s from source." msgstr "" "必须提供“cgsnapshot_id”或者“source_cgid”,以从源创建一致性组 %(name)s。" #, python-format msgid "" "Either SLO: %(slo)s or workload %(workload)s is invalid. Examine previous " "error statement for valid values." msgstr "" "SLO %(slo)s 或工作负载 %(workload)s 无效。请查看先前的错误说明以了解有效值。" msgid "Enables QoS." msgstr "启用 QoS。" msgid "Enables compression." msgstr "启用压缩。" msgid "Enables replication." msgstr "启用复制。" msgid "Ensure that configfs is mounted at /sys/kernel/config." msgstr "请确保 configfs 安装在 /sys/kernel/config 处。" msgid "Error connecting to ceph cluster." msgstr "连接至 ceph 集群时出错。" #, python-format msgid "Error connecting via ssh: %s" msgstr "通过 ssh 进行连接时出错:%s" msgid "Error deleting replay profile." msgstr "删除重放概要文件时出错。" #, python-format msgid "Error deleting volume %(ssn)s: %(volume)s" msgstr "删除卷 %(ssn)s 时出错:%(volume)s " #, python-format msgid "Error during evaluator parsing: %(reason)s" msgstr "在评估程序解析期间,发生错误:%(reason)s" #, python-format msgid "" "Error enabling iSER for NetworkPortal: please ensure that RDMA is supported " "on your iSCSI port %(port)d on ip %(ip)s." msgstr "" "为 NetworkPortal 启用 iSER 时出错:请确保 RDMA 在 IP %(ip)s 上的 iSCSI 端口 " "%(port)d 中受支持。" #, python-format msgid "Error encountered during cleanup of a failed attach: %(ex)s" msgstr "在清除失败的连接期间遇到错误:%(ex)s" #, python-format msgid "Error executing command via ssh: %s" msgstr "通过 ssh 执行命令时发生错误:%s" #, python-format msgid "Error extending volume: %(reason)s" msgstr "扩展卷时出错:%(reason)s" #, python-format msgid "Error finding %(name)s." msgstr "查找 %(name)s 时出错。" #, python-format msgid "Error in SolidFire API response: data=%(data)s" msgstr "SolidFire API响应里发生错误:data=%(data)s" msgid "Error not a KeyError." msgstr "错误并非 KeyError。" msgid "Error not a TypeError." msgstr "错误并非 TypeError。" #, python-format msgid "Error occurred when creating group_snapshot %s." msgstr "创建 group_snapshot %s 时发生了错误。" #, python-format msgid "Error occurred when deleting group snapshot %s." msgstr "删除组快照%s 时出现错误。" #, python-format msgid "Error occurred when deleting group_snapshot %s." msgstr "删除 group_snapshot %s 时发生了错误。" #, python-format msgid "Error occurred when updating group %s." msgstr "更新组 %s 时发生了错误。" msgid "Error retrieving volume size" msgstr "检索卷大小时出错" #, python-format msgid "Error while authenticating with switch: %s." msgstr "向交换机认证时出错:%s。" #, python-format msgid "Error while changing VF context %s." msgstr "更改 VF 上下文 %s 时出错。" #, python-format msgid "Error while checking the firmware version %s." msgstr "检查固件版本 %s 时出错。" #, python-format msgid "Error while checking transaction status: %s" msgstr "检查事务状态时发生错误:%s" #, python-format msgid "Error while checking whether VF is available for management %s." msgstr "检查 VF 对管理 %s 是否可用时出错。" #, python-format msgid "" "Error while connecting the switch %(switch_id)s with protocol %(protocol)s. " "Error: %(error)s." msgstr "" "连接带有协议 %(protocol)s 的交换机 %(switch_id)s 时出错。错误:%(error)s。" #, python-format msgid "Error while creating authentication token: %s" msgstr "创建认证令牌时出错:%s" #, python-format msgid "Error while getting data via ssh: (command=%(cmd)s error=%(err)s)." msgstr "" "通过 ssh 获取数据时发生错误:(命令为 %(cmd)s,发生的错误为 %(err)s)。" #, python-format msgid "Error while getting nvp value: %s." msgstr "获取 nvp 值时出错:%s。" #, python-format msgid "Error while getting session information %s." msgstr "获取会话信息 %s 时出错。" #, python-format msgid "Error while parsing the data: %s." msgstr "解析数据时出错:%s。" #, python-format msgid "Error while querying page %(url)s on the switch, reason %(error)s." msgstr "在交换机上查询页面 %(url)s 时出错,原因:%(error)s。" #, python-format msgid "" "Error while removing the zones and cfgs in the zone string: %(description)s." msgstr "移除区域字符串中的 zones 和 cgfs 时出错:%(description)s。" #, python-format msgid "Error while requesting %(service)s API." msgstr "请求 %(service)s API 时出错。" #, python-format msgid "Error while running zoning CLI: (command=%(cmd)s error=%(err)s)." msgstr "运行分区 CLI 时发生错误:(命令为 %(cmd)s,发生的错误为 %(err)s)。" #, python-format msgid "" "Error while updating the new zones and cfgs in the zone string. Error " "%(description)s." msgstr "更新区域字符串中的新 zones 和 cgfs 时出错。错误:%(description)s。" #, python-format msgid "" "Error while updating the zones in the zone string. Error %(description)s." msgstr "更新区域字符串中的 zones 时出错。错误:%(description)s。" msgid "Error writing field to database" msgstr "将字段写至数据库时出错。" msgid "Exceeded the limit of snapshots per volume" msgstr "超出每个卷的快照数限制" #, python-format msgid "Exception in _select_ds_for_volume: %s." msgstr "_select_ds_for_volume %s 中发生异常。" #, python-format msgid "Exception while forming the zone string: %s." msgstr "构建区域字符串时发生异常:%s。" #, python-format msgid "Exception: %s" msgstr "异常:%s" #, python-format msgid "Expected integer for node_count, svcinfo lsiogrp returned: %(node)s." msgstr "期望 node_count 的值为整数,已返回 svcinfo lsiogrp:%(node)s。" #, python-format msgid "Expected no output from CLI command %(cmd)s, got %(out)s." msgstr "期望 CLI 命令 %(cmd)s 没有任何输出,但是获得了 %(out)s。" #, python-format msgid "" "Expected single vdisk returned from lsvdisk when filtering on vdisk_UID. " "%(count)s were returned." msgstr "" "在 vdisk_UID 上进行过滤时,从 lsvdisk 返回了所需的单个 vdisk。返回了 " "%(count)s。" #, python-format msgid "Expected volume size was %d" msgstr "需要的卷大小为 %d" #, python-format msgid "" "Export backup aborted, expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "备份导出已异常中止,需要的备份状态为 %(expected_status)s,但实际为 " "%(actual_status)s。" #, python-format msgid "" "Export record aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "记录导出已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用来创" "建此备份的备份服务 [%(backup_service)s]。" msgid "Extend volume error." msgstr "扩展卷时发生错误。" msgid "Extend volume not implemented" msgstr "扩展卷未实现" msgid "FC is the protocol but wwpns are not supplied by OpenStack." msgstr "FC 为协议,但 OpenStack 未提供 wwpns。" #, python-format msgid "Faield to unassign %(volume)s" msgstr "无法取消分配 %(volume)s" #, python-format msgid "Failed adding connection for fabric=%(fabric)s: Error: %(err)s" msgstr "对于光纤网 %(fabric)s,未能添加连接:发生错误:%(err)s" #, python-format msgid "Failed getting active zone set from fabric %s." msgstr "通过光纤网络 %s 获取活动区域集失败。" #, python-format msgid "Failed getting details for pool %s." msgstr "获取池 %s 的详细信息失败。" #, python-format msgid "Failed removing connection for fabric=%(fabric)s: Error: %(err)s" msgstr "对于光纤网 %(fabric)s,未能移除连接:发生错误:%(err)s" #, python-format msgid "Failed to Extend Volume %(volname)s" msgstr "未能扩展卷 %(volname)s" #, python-format msgid "Failed to Login to 3PAR (%(url)s) because %(err)s" msgstr "未能登录到 3PAR (%(url)s),因为存在 %(err)s" msgid "Failed to access active zoning configuration." msgstr "未能访问活动分区配置。" #, python-format msgid "Failed to access zoneset status:%s" msgstr "未能访问区域集状态:%s" msgid "Failed to add or update zoning configuration." msgstr "未能添加或更新分区配置。" msgid "Failed to add zoning configuration." msgstr "未能添加分区配置。" #, python-format msgid "Failed to associate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "未能使 qos_specs %(specs_id)s 与类型 %(type_id)s 关联。" #, python-format msgid "Failed to attach iSCSI target for volume %(volume_id)s." msgstr "未能针对卷 %(volume_id)s 连接 iSCSI 目标。" #, python-format msgid "Failed to backup volume metadata - %s" msgstr "未能备份卷元数据 - %s" #, python-format msgid "" "Failed to backup volume metadata - Metadata backup object 'backup.%s.meta' " "already exists" msgstr "未能备份卷元数据 - 元数据备份对象“backup.%s.meta”已存在" #, python-format msgid "Failed to connect to %(vendor_name)s Array %(host)s: %(err)s" msgstr "未能连接至 %(vendor_name)s 阵列 %(host)s:%(err)s" msgid "Failed to connect to Dell REST API" msgstr "无法连接至 Dell REST API" #, python-format msgid "Failed to copy image to volume: %(reason)s" msgstr "未能将映像复制到卷:%(reason)s" #, python-format msgid "Failed to copy metadata to volume: %(reason)s" msgstr "未能复制元数据到卷:%(reason)s" msgid "" "Failed to copy volume to image as image quota has been met. Please delete " "images or have your limit increased, then try again." msgstr "" "由于镜像配额限制,将卷拷贝到镜像中的操作失败。请删除镜像或增加镜像配额,然后" "再尝试。" msgid "Failed to copy volume, destination device unavailable." msgstr "未能复制卷,目标设备不可用。" msgid "Failed to copy volume, source device unavailable." msgstr "未能复制卷,源设备不可用。" #, python-format msgid "Failed to create IG, %s" msgstr "未能创建映像 %s" #, python-format msgid "Failed to create Volume Group: %(vg_name)s" msgstr "未能创建卷组: %(vg_name)s" msgid "Failed to create api volume flow." msgstr "未能创建 api 卷流。" #, python-format msgid "Failed to create cg snapshot %(id)s due to %(reason)s." msgstr "由于 %(reason)s,未能创建 cg 快照 %(id)s。" #, python-format msgid "Failed to create consistency group %(id)s due to %(reason)s." msgstr "由于 %(reason)s,未能创建一致性组 %(id)s。" #, python-format msgid "Failed to create consistency group %(id)s:%(ret)s." msgstr "未能创建一致性组 %(id)s:%(ret)s。" #, python-format msgid "Failed to create consistency group: %(cgid)s. Error: %(excmsg)s." msgstr "未能创建一致性组:%(cgid)s。错误为 %(excmsg)s。" #, python-format msgid "" "Failed to create host: %(name)s. Please check if it exists on the array." msgstr "未能创建主机:%(name)s。请检查它在阵列上是否存在。" #, python-format msgid "Failed to create hostgroup: %(name)s. Check if it exists on the array." msgstr "未能创建主机组:%(name)s。请检查它在阵列上是否存在。" msgid "Failed to create iqn." msgstr "未能创建 IQN。" #, python-format msgid "Failed to create iscsi target for volume %(volume_id)s." msgstr "未能针对卷 %(volume_id)s 创建 iscsi 目标。" msgid "Failed to create manage existing flow." msgstr "未能创建 manage_existing 流。" msgid "Failed to create manage_existing flow." msgstr "未能创建 manage_existing 流。" msgid "Failed to create map." msgstr "未能创建映射。" msgid "Failed to create partition." msgstr "未能创建分区。" #, python-format msgid "Failed to create qos_specs: %(name)s with specs %(qos_specs)s." msgstr "未能通过规范 %(qos_specs)s 创建 qos_specs:%(name)s。" msgid "Failed to create replica." msgstr "未能创建副本。" msgid "Failed to create scheduler manager volume flow" msgstr "未能创建调度程序管理器卷流" #, python-format msgid "Failed to create snapshot %s" msgstr "未能创建快照 %s" msgid "Failed to create snapshot." msgstr "未能创建快照。" #, python-format msgid "Failed to create south bound connector for %s." msgstr "无法为 %s 创建南向连接器。" #, python-format msgid "Failed to create thin pool, error message was: %s" msgstr "未能创建瘦池,错误消息如下:%s" #, python-format msgid "Failed to create volume %s" msgstr "未能创建卷 %s" #, python-format msgid "Failed to delete cgsnapshot %(id)s due to %(reason)s." msgstr "由于 %(reason)s,未能删除 cg 快照 %(id)s。" #, python-format msgid "Failed to delete consistency group %(id)s due to %(reason)s." msgstr "由于 %(reason)s,未能删除一致性组 %(id)s。" #, python-format msgid "" "Failed to delete fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "未能针对一致性组 %(cgname)s 删除文件集。错误为 %(excmsg)s。" msgid "Failed to delete iqn." msgstr "未能删除 IQN。" msgid "Failed to delete map." msgstr "未能删除映射。" msgid "Failed to delete partition." msgstr "未能删除分区。" msgid "Failed to delete replica." msgstr "未能删除副本。" #, python-format msgid "Failed to delete snapshot %s" msgstr "未能删除快照 %s" msgid "Failed to delete snapshot." msgstr "未能删除快照。" #, python-format msgid "Failed to detach iSCSI target for volume %(volume_id)s." msgstr "未能针对卷 %(volume_id)s 与 iSCSI 目标断开连接。" msgid "Failed to disassociate qos specs." msgstr "未能取消关联 Qos 规范。" #, python-format msgid "Failed to disassociate qos_specs: %(specs_id)s with type %(type_id)s." msgstr "未能使 qos_specs %(specs_id)s 与类型 %(type_id)s 取消关联。" msgid "Failed to execute common command." msgstr "未能执行常见命令。" #, python-format msgid "Failed to export for volume: %(reason)s" msgstr "输出卷失败:%(reason)s" #, python-format msgid "Failed to extend volume %(name)s, Error msg: %(msg)s." msgstr "无法扩展卷 %(name)s,错误消息:%(msg)s。" msgid "Failed to find QoSnode" msgstr "找不到 QoSnode" msgid "Failed to find Storage Center" msgstr "找不到存储中心" msgid "Failed to find account for volume." msgstr "未能查找卷的帐户。" #, python-format msgid "Failed to find fileset for path %(path)s, command output: %(cmdout)s." msgstr "对于路径 %(path)s,未能找到文件集,命令输出:%(cmdout)s。" #, python-format msgid "Failed to find group snapshot named: %s" msgstr "找不到名为 %s 的组快照" #, python-format msgid "Failed to find host %s." msgstr "未能找到主机 %s。" #, python-format msgid "Failed to get LUN target details for the LUN %s" msgstr "未能获取 LUN %s 的 LUN 目标详细信息" #, python-format msgid "Failed to get LUN target list for the LUN %s" msgstr "未能获取 LUN %s 的 LUN 目标列表" #, python-format msgid "Failed to get Partition ID for volume %(volume_id)s." msgstr "未能获取卷 %(volume_id)s 的分区标识。" #, python-format msgid "Failed to get Raid Snapshot ID from snapshot: %(snapshot_id)s." msgstr "未能从快照 %(snapshot_id)s 获取 RAID 快照标识。" msgid "Failed to get SplitMirror." msgstr "无法获取 SplitMirror。" #, python-format msgid "Failed to get all associations of qos specs %s" msgstr "未能获取 qos 规范 %s 的所有关联" msgid "Failed to get channel info." msgstr "未能获取通道信息。" #, python-format msgid "Failed to get code level (%s)." msgstr "未能获取代码级别 (%s)。" msgid "Failed to get device info." msgstr "未能获取设备信息。" #, python-format msgid "Failed to get domain because CPG (%s) doesn't exist on array." msgstr "未能获取域,因为阵列上不存在 CPG (%s)。" msgid "Failed to get iqn info." msgstr "未能获取 IQN 信息。" msgid "Failed to get license info." msgstr "未能获取许可证信息。" msgid "Failed to get lv info." msgstr "未能获取 lv 信息。" msgid "Failed to get map info." msgstr "未能获取映射信息。" msgid "Failed to get migration task." msgstr "无法获取迁移任务。" msgid "Failed to get name server info." msgstr "未能获取名称服务器信息。" msgid "Failed to get network info." msgstr "未能获取网络信息。" #, python-format msgid "Failed to get new part id in new pool: %(pool_id)s." msgstr "未能在新池 %(pool_id)s 中获取新的部件标识。" msgid "Failed to get partition info." msgstr "未能获取分区信息。" msgid "Failed to get replica info." msgstr "未能获取副本信息。" msgid "Failed to get show fcns database info." msgstr "未能获取显示 fcns 数据库信息。" msgid "Failed to get snapshot info." msgstr "未能获取快照信息。" #, python-format msgid "Failed to get target IQN for the LUN %s" msgstr "未能获取 LUN %s 的目标 IQN" msgid "Failed to get target LUN of SplitMirror." msgstr "无法获取 SplitMirror 的目标 LUN。" #, python-format msgid "Failed to get target portal for the LUN %s" msgstr "未能获取 LUN %s 的目标门户网站" msgid "Failed to get targets" msgstr "未能获取目标" msgid "Failed to get wwn info." msgstr "未能获取 WWN 信息。" #, python-format msgid "Failed to link fileset for the share %(cgname)s. Error: %(excmsg)s." msgstr "未能针对共享项 %(cgname)s 链接文件集。错误为 %(excmsg)s。" #, python-format msgid "Failed to log on %s Array (invalid login?)." msgstr "未能登录到 %s 阵列(无效登录?)。" msgid "Failed to login with all rest URLs." msgstr "未能使用所有 REST URL 进行登录。" msgid "Failed to manage api volume flow." msgstr "未能管理 API 卷流。" #, python-format msgid "" "Failed to manage existing %(type)s %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "未能管理现有 %(type)s %(name)s,因为所报告的大小 %(size)s不是浮点数。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because of error in getting " "volume size." msgstr "未能管理现有卷 %(name)s,因为获取卷大小时出错。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because rename operation failed: " "Error msg: %(msg)s." msgstr "未能管理现有卷 %(name)s,因为重命名操作失败:错误消息为 %(msg)s。" #, python-format msgid "" "Failed to manage existing volume %(name)s, because reported size %(size)s " "was not a floating-point number." msgstr "未能管理现有卷 %(name)s,因为已报告的大小 %(size)s 不是浮点数。" #, python-format msgid "" "Failed to manage existing volume due to the pool of the volume to be managed " "does not match the backend pool. Pool of the volume to be managed is " "%(vdisk_pool)s. Pool of the backend is %(backend_pool)s." msgstr "" "无法管理现有卷,因为要管理的卷的池与后端池不匹配。要管理的卷的池为 " "%(vdisk_pool)s。后端的池为 %(backend_pool)s。" msgid "" "Failed to manage existing volume due to the volume to be managed is " "compress, but the volume type chosen is not compress." msgstr "无法管理现有卷,要管理的卷为压缩卷,但所选卷类型并非压缩卷。" msgid "" "Failed to manage existing volume due to the volume to be managed is not " "compress, but the volume type chosen is compress." msgstr "无法管理现有卷,要管理的卷并非压缩卷,但所选卷类型为压缩卷。" msgid "" "Failed to manage existing volume due to the volume to be managed is not in a " "valid I/O group." msgstr "无法管理现有卷,因为要管理的卷未包含在有效 I/O 组中。" msgid "" "Failed to manage existing volume due to the volume to be managed is thick, " "but the volume type chosen is thin." msgstr "无法管理现有卷,因为要管理的卷为厚卷,但所选卷类型为薄卷。" msgid "" "Failed to manage existing volume due to the volume to be managed is thin, " "but the volume type chosen is thick." msgstr "无法管理现有卷,要管理的卷为薄卷,但所选卷类型为厚卷。" #, python-format msgid "" "Failed to parse CLI output:\n" " command: %(cmd)s\n" " stdout: %(out)s\n" " stderr: %(err)s." msgstr "" "未能解析 CLI 输出:\n" "命令:%(cmd)s\n" "标准输出:%(out)s\n" "标准错误:%(err)s。" msgid "" "Failed to parse the configuration option 'glance_catalog_info', must be in " "the form ::" msgstr "" "解析配置选项“glance_catalog_info”失败,必须为以下格式::" ":" msgid "" "Failed to parse the configuration option 'keystone_catalog_info', must be in " "the form ::" msgstr "" "解析配置选项“swift_catalog_info”失败,必须为以下格式::" ":" msgid "" "Failed to parse the configuration option 'swift_catalog_info', must be in " "the form ::" msgstr "" "解析配置选项“swift_catalog_info”失败,必须为以下格式::" ":" #, python-format msgid "Failed to remove export for volume %(volume)s: %(reason)s" msgstr "未能针对卷 %(volume)s 移除导出:%(reason)s" #, python-format msgid "Failed to remove iscsi target for volume %(volume_id)s." msgstr "未能针对卷 %(volume_id)s 除去 iscsi 目标。" #, python-format msgid "" "Failed to rename logical volume %(name)s, error message was: %(err_msg)s" msgstr "未能重命名逻辑卷 %(name)s,错误消息如下:%(err_msg)s" #, python-format msgid "Failed to retrieve active zoning configuration %s" msgstr "无法检索处于活动状态的分区配置 %s" #, python-format msgid "Failed to retrieve attachments for volume %(name)s" msgstr "获取卷 %(name)s 的挂载信息失败。" #, python-format msgid "" "Failed to set CHAP authentication for target IQN %(iqn)s. Details: %(ex)s" msgstr "无法为目标 IQN %(iqn)s 设置 CHAP 认证。详细信息:%(ex)s" #, python-format msgid "Failed to set QoS for existing volume %(name)s, Error msg: %(msg)s." msgstr "未能对现有卷 %(name)s 设置 QoS,错误消息:%(msg)s。" msgid "Failed to set attribute 'Incoming user' for SCST target." msgstr "未能对 SCST 目标设置属性“新用户”。" msgid "Failed to set partition." msgstr "未能设置分区。" #, python-format msgid "" "Failed to set permissions for the consistency group %(cgname)s. Error: " "%(excmsg)s." msgstr "未能针对一致性组 %(cgname)s 设置许可权。错误为 %(excmsg)s。" #, python-format msgid "" "Failed to unlink fileset for consistency group %(cgname)s. Error: %(excmsg)s." msgstr "未能针对一致性组 %(cgname)s 取消链接文件集。错误为 %(excmsg)s。" #, python-format msgid "Failed to update metadata for volume: %(reason)s" msgstr "未能更新卷的元数据:%(reason)s" msgid "Failed to update or delete zoning configuration" msgstr "未能更新或删除分区配置" msgid "Failed to update or delete zoning configuration." msgstr "无法更新或删除分区配置。" #, python-format msgid "Failed to update qos_specs: %(specs_id)s with specs %(qos_specs)s." msgstr "未能通过规范 %(qos_specs)s 更新 qos_specs:%(specs_id)s。" msgid "Failed to update quota usage while retyping volume." msgstr "对卷进行转型时,更新配额使用率失败" msgid "Failed to update snapshot." msgstr "无法更新快照。" #, python-format msgid "" "Failed updating volume %(vol_id)s metadata using the provided %(src_type)s " "%(src_id)s metadata" msgstr "未能使用提供的 %(src_type)s %(src_id)s 元数据更新卷 %(vol_id)s 元数据" #, python-format msgid "Failure getting LUN info for %s." msgstr "针对 %s 获取 LUN 信息时发生故障。" #, python-format msgid "Failure moving new cloned LUN to %s." msgstr "将新克隆的 LUN 移至 %s 时发生故障。" #, python-format msgid "Failure staging LUN %s to tmp." msgstr "将 LUN %s 登台至临时文件夹时发生故障。" #, python-format msgid "Fexvisor failed to add volume %(id)s due to %(reason)s." msgstr "由于 %(reason)s,Fexvisor 未能添加卷 %(id)s。" #, python-format msgid "" "Fexvisor failed to join the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "由于 %(ret)s,Fexvisor 未能将卷 %(vol)s加入组 %(group)s。" #, python-format msgid "" "Fexvisor failed to remove the volume %(vol)s in the group %(group)s due to " "%(ret)s." msgstr "由于 %(ret)s,Fexvisor 未能移除组 %(group)s中的卷 %(vol)s。" #, python-format msgid "Fexvisor failed to remove volume %(id)s due to %(reason)s." msgstr "由于 %(reason)s,Fexvisor 未能移除卷 %(id)s。" #, python-format msgid "Fibre Channel SAN Lookup failure: %(reason)s" msgstr "光纤通道 SAN 查找失败:%(reason)s" msgid "Fibre Channel Zone Manager not initialized" msgstr "光纤通道区域管理器未初始化。" #, python-format msgid "Fibre Channel Zone operation failed: %(reason)s" msgstr "“光纤通道区域”操作失败:%(reason)s" #, python-format msgid "Fibre Channel connection control failure: %(reason)s" msgstr "光纤通道连接控制失败:%(reason)s" #, python-format msgid "File %(file_path)s could not be found." msgstr "找不到文件 %(file_path)s。" #, python-format msgid "File already exists at %s." msgstr "%s 处已存在文件。" #, python-format msgid "File already exists at: %s" msgstr "在以下位置处,已存在文件:%s" msgid "Find host in hostgroup error." msgstr "在主机组中查找主机时发生错误。" msgid "Find host lun id error." msgstr "查找主机 LUN 标识时发生错误。" msgid "Find lun group from mapping view error." msgstr "从映射视图查找 LUN 组时发生错误。" msgid "Find mapping view error." msgstr "查找映射视图时发生错误。" msgid "Find portgroup error." msgstr "查找端口组时发生错误。" msgid "Find portgroup from mapping view error." msgstr "从映射视图查找端口组时发生错误。" #, python-format msgid "" "Flash Cache Policy requires WSAPI version '%(fcache_version)s' version " "'%(version)s' is installed." msgstr "" "闪存高速缓存策略要求安装了 WSAPI 版本“%(fcache_version)s”版本“%(version)s”。" #, python-format msgid "Flexvisor assign volume failed.:%(id)s:%(status)s." msgstr "Flexvisor 分配卷失败:%(id)s:%(status)s。" #, python-format msgid "Flexvisor assign volume failed:%(id)s:%(status)s." msgstr "Flexvisor 分配卷失败:%(id)s:%(status)s。" #, python-format msgid "" "Flexvisor could not find volume %(id)s snapshot in the group %(vgid)s " "snapshot %(vgsid)s." msgstr "Flexvisor 在组 %(vgid)s 快照 %(vgsid)s 中找不到卷 %(id)s 快照。" #, python-format msgid "Flexvisor create volume failed.:%(volumeid)s:%(status)s." msgstr "Flexvisor 创建卷失败:%(volumeid)s:%(status)s。" #, python-format msgid "Flexvisor failed deleting volume %(id)s: %(status)s." msgstr "Flexvisor 无法删除卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to add volume %(id)s to group %(cgid)s." msgstr "Flexvisor 未能将卷 %(id)s 添加至组 %(cgid)s。" #, python-format msgid "" "Flexvisor failed to assign volume %(id)s due to unable to query status by " "event id." msgstr "Flexvisor 无法分配卷 %(id)s,因为无法按事件标识查询状态。 " #, python-format msgid "Flexvisor failed to assign volume %(id)s: %(status)s." msgstr "Flexvisor 无法分配卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to assign volume %(volume)s iqn %(iqn)s." msgstr "Flexvisor 未能分配卷 %(volume)s iqn %(iqn)s。" #, python-format msgid "Flexvisor failed to clone volume %(id)s: %(status)s." msgstr "Flexvisor 无法克隆卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to clone volume (failed to get event) %(id)s." msgstr "Flexvisor 无法克隆卷(无法获取事件)%(id)s。" #, python-format msgid "Flexvisor failed to create snapshot for volume %(id)s: %(status)s." msgstr "Flexvisor 无法对卷 %(id)s 创建快照:%(status)s。" #, python-format msgid "" "Flexvisor failed to create snapshot for volume (failed to get event) %(id)s." msgstr "Flexvisor 无法对卷 %(id)s 创建快照(无法获取事件)。 " #, python-format msgid "Flexvisor failed to create volume %(id)s in the group %(vgid)s." msgstr "Flexvisor 未能在组 %(vgid)s 中创建卷 %(id)s。" #, python-format msgid "Flexvisor failed to create volume %(volume)s: %(status)s." msgstr "Flexvisor 无法创建卷 %(volume)s:%(status)s。" #, python-format msgid "Flexvisor failed to create volume (get event) %s." msgstr "Flexvisor 无法创建卷(获取事件)%s。" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s: %(status)s." msgstr "Flexvisor 未能从快照 %(id)s 创建卷:%(status)s。" #, python-format msgid "Flexvisor failed to create volume from snapshot %(id)s:%(status)s." msgstr "Flexvisor 无法从快照 %(id)s 创建卷:%(status)s。" #, python-format msgid "" "Flexvisor failed to create volume from snapshot (failed to get event) %(id)s." msgstr "Flexvisor 无法从快照创建卷(无法获取事件)%(id)s。 " #, python-format msgid "Flexvisor failed to delete snapshot %(id)s: %(status)s." msgstr "Flexvisor 无法删除快照 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to delete snapshot (failed to get event) %(id)s." msgstr "Flexvisor 无法删除快照(无法获取事件)%(id)s。" #, python-format msgid "Flexvisor failed to delete volume %(id)s: %(status)s." msgstr "Flexvisor 无法删除卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to extend volume %(id)s: %(status)s." msgstr "Flexvisor 未能扩展卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to extend volume %(id)s:%(status)s." msgstr "Flexvisor 无法扩展卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to extend volume (failed to get event) %(id)s." msgstr "Flexvisor 无法扩展卷(无法获取事件)%(id)s。" #, python-format msgid "Flexvisor failed to get pool info %(id)s: %(status)s." msgstr "Flexvisor 无法获取池信息 %(id)s:%(status)s。" #, python-format msgid "" "Flexvisor failed to get snapshot id of volume %(id)s from group %(vgid)s." msgstr "Flexvisor 未能从组 %(vgid)s 获取卷 %(id)s 的快照标识。" #, python-format msgid "Flexvisor failed to remove volume %(id)s from group %(cgid)s." msgstr "Flexvisor 未能从组 %(cgid)s 中移除卷 %(id)s。" #, python-format msgid "Flexvisor failed to spawn volume from snapshot %(id)s:%(status)s." msgstr "Flexvisor 无法从快照 %(id)s 衍生卷:%(status)s。" #, python-format msgid "" "Flexvisor failed to spawn volume from snapshot (failed to get event) %(id)s." msgstr "Flexvisor 无法从快照衍生卷(无法获取事件)%(id)s。 " #, python-format msgid "Flexvisor failed to unassign volume %(id)s: %(status)s." msgstr "Flexvisor 无法取消分配卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor failed to unassign volume (get event) %(id)s." msgstr "Flexvisor 无法取消分配卷(获取事件)%(id)s。" #, python-format msgid "Flexvisor failed to unassign volume:%(id)s:%(status)s." msgstr "Flexvisor 未能取消分配卷 %(id)s:%(status)s。" #, python-format msgid "Flexvisor unable to find the source volume %(id)s info." msgstr "Flexvisor 找不到源卷 %(id)s 信息。" #, python-format msgid "Flexvisor unassign volume failed:%(id)s:%(status)s." msgstr "Flexvisor 取消分配卷失败:%(id)s:%(status)s。" #, python-format msgid "Flexvisor volume %(id)s failed to join group %(vgid)s." msgstr "Flexvisor 卷 %(id)s 未能加入组 %(vgid)s。" #, python-format msgid "Folder %s does not exist in Nexenta Store appliance" msgstr "文件夹 %s 在 Nexenta 存储设备中不存在" #, python-format msgid "GPFS is not running, state: %s." msgstr "GPFS 没有在运行,状态:%s。" msgid "Get FC ports by port group error." msgstr "按端口组获取 FC 端口时出错。" msgid "Get FC ports from array error." msgstr "从阵列中获取 FC 端口时发生错误。" msgid "Get FC target wwpn error." msgstr "获取 FC 目标 WWPN 时发生错误。" msgid "Get HyperMetroPair error." msgstr "获取 HyperMetroPair 时出错。" msgid "Get LUN group by view error." msgstr "按视图获取 LUN 组时出错。" msgid "Get LUNcopy information error." msgstr "获取 LUNcopy 信息时发生错误。" msgid "Get QoS id by lun id error." msgstr "通过 LUN 标识获取 QoS 标识时发生错误。" msgid "Get QoS information error." msgstr "获取 QoS 信息时发生错误。" msgid "Get QoS policy error." msgstr "获取 QoS 策略时发生错误。" msgid "Get SplitMirror error." msgstr "获取 SplitMirror 时出错。" msgid "Get active client failed." msgstr "获取活动客户机失败。" msgid "Get array info error." msgstr "获取阵列信息时出错。" msgid "Get cache by name error." msgstr "按名称获取高速缓存时发生错误。" msgid "Get connected free FC wwn error." msgstr "获取已连接的空闲 FC wwn 时发生错误。" msgid "Get engines error." msgstr "获取引擎时出错。" msgid "Get host initiators info failed." msgstr "获取主机启动程序信息失败。" msgid "Get hostgroup information error." msgstr "获取主机组信息时发生错误。" msgid "" "Get iSCSI port info error, please check the target IP configured in huawei " "conf file." msgstr "" "获取 iSCSI 端口信息时发生错误,请检查 huawei conf 文件中所配置的目标 IP。" msgid "Get iSCSI port information error." msgstr "获取 iSCSI 端口信息时发生错误。" msgid "Get iSCSI target port error." msgstr "获取 iSCSI 目标端口时发生错误。" msgid "Get lun id by name error." msgstr "通过名称获取 LUN 标识时出错。" msgid "Get lun migration task error." msgstr "获取 LUN 迁移任务时发生错误。" msgid "Get lungroup id by lun id error." msgstr "通过 LUN 标识获取 LUN 组标识时发生错误。" msgid "Get lungroup information error." msgstr "获取 LUN 组信息时发生错误。" msgid "Get manageable snapshots not implemented." msgstr "获取易管理快照的功能未实现。" msgid "Get manageable volumes not implemented." msgstr "获取易管理卷的功能未实现。" msgid "Get migration task error." msgstr "获取迁移任务时出错。" msgid "Get pair failed." msgstr "获取对失败。" msgid "Get partition by name error." msgstr "按名称获取分区时发生错误。" msgid "Get partition by partition id error." msgstr "按分区标识获取分区时发生错误。" msgid "Get port group by view error." msgstr "按视图获取端口组时出错。" msgid "Get port group error." msgstr "获取端口组时出错。" msgid "Get port groups by port error." msgstr "按端口获取端口组时出错。" msgid "Get ports by port group error." msgstr "按端口组获取端口时出错。" msgid "Get remote device info failed." msgstr "获取远程设备信息失败。" msgid "Get remote devices error." msgstr "获取远程设备时出错。" msgid "Get smartcache by cache id error." msgstr "按高速缓存标识获取 smartcache 时发生错误。" msgid "Get snapshot error." msgstr "获取快照时出错。" msgid "Get snapshot id error." msgstr "获取快照标识时发生错误。" msgid "Get target IP error." msgstr "获取目标 IP 时发生错误。" msgid "Get target LUN of SplitMirror error." msgstr "获取 SplitMirror 的目标 LUN 时出错。" msgid "Get views by port group error." msgstr "按端口组获取视图时出错。" msgid "Get volume by name error." msgstr "按名称获取卷时发生错误。" msgid "Get volume error." msgstr "获取卷时发生错误。" #, python-format msgid "" "Glance metadata cannot be updated, key %(key)s exists for volume id " "%(volume_id)s" msgstr "无法更新 Glance 元数据,对于卷标识 %(volume_id)s,键 %(key)s 存在" #, python-format msgid "Glance metadata for volume/snapshot %(id)s cannot be found." msgstr "Glance中无法找到卷/镜像 %(id)s 的元数据" #, python-format msgid "Google Cloud Storage api failure: %(reason)s" msgstr "Google 云存储器 API 故障:%(reason)s" #, python-format msgid "Google Cloud Storage connection failure: %(reason)s" msgstr "Google 云存储器连接故障:%(reason)s" #, python-format msgid "Google Cloud Storage oauth2 failure: %(reason)s" msgstr "Google 云存储器 oauth2 故障:%(reason)s" #, python-format msgid "Group %(group_id)s could not be found." msgstr "无法找到名为 %(group_id)s 的组。" #, python-format msgid "" "Group %s still contains volumes. The delete-volumes flag is required to " "delete it." msgstr "组 %s 仍然包含卷。需要 delete-volumes 标记,以将其删除。" #, python-format msgid "" "Group Type %(group_type_id)s deletion is not allowed with groups present " "with the type." msgstr "当存在类型为 %(group_type_id)s 的组时,不允许删除该组类型。" #, python-format msgid "Group Type %(group_type_id)s has no specs with key %(group_specs_key)s." msgstr "组类型 %(group_type_id)s 没有键 %(group_specs_key)s 对应的规格说明。" #, python-format msgid "Group Type %(id)s already exists." msgstr "组类型 %(id)s 已存在。" #, python-format msgid "Group Type %(type_id)s has no extra spec with key %(id)s." msgstr "组类型%(type_id)s没有与键%(id)s对应的额外规格。" msgid "Group snapshot is empty. No group will be created." msgstr "组快照为空。将不会创建任何组。" #, python-format msgid "Group status must be available or error, but current status is: %s" msgstr "组状态必须为“available”或“error”,但当前状态为:%s" #, python-format msgid "Group type %(group_type_id)s could not be found." msgstr "组类型 %(group_type_id)s 无法找到。" #, python-format msgid "" "Group type access for %(group_type_id)s / %(project_id)s combination already " "exists." msgstr "已存在针对 %(group_type_id)s / %(project_id)s 组合的组类型权限。" #, python-format msgid "" "Group type access not found for %(group_type_id)s / %(project_id)s " "combination." msgstr "使用 %(group_type_id)s / %(project_id)s 组合无法访问组类型。" msgid "Group type name can not be empty." msgstr "组类型名称不能为空。" #, python-format msgid "Group type with name %(group_type_name)s could not be found." msgstr "名为 %(group_type_name)s 的组类型无法找到。" #, python-format msgid "" "Group volume type mapping for %(group_id)s / %(volume_type_id)s combination " "already exists." msgstr "已存在映射 %(group_id)s / %(volume_type_id)s 组合的卷组类型。" #, python-format msgid "GroupSnapshot %(group_snapshot_id)s could not be found." msgstr "无法找到组快照 %(group_snapshot_id)s。" msgid "" "GroupSnapshot status must be available or error, and no Group can be " "currently using it as source for its creation." msgstr "" "组快照状态必须为“avaliable”或“error”,而且当前没有任何组可以使用该快照作为源" "来创建。" #, python-format msgid "HTTP exit code: [%(code)s]" msgstr "HTTP退出码:[%(code)s]" #, python-format msgid "" "Hash block size has changed since the last backup. New hash block size: " "%(new)s. Old hash block size: %(old)s. Do a full backup." msgstr "" "自从最近一次备份以来,散列块大小已更改。新的散列块大小:%(new)s。旧的散列块大" "小:%(old)s。请执行完全备份。" msgid "Heartbeat" msgstr "心跳" #, python-format msgid "Hint \"%s\" not supported." msgstr "提示“%s”不受支持。" msgid "Host" msgstr "主机" #, python-format msgid "Host %(host)s could not be found." msgstr "主机 %(host)s 没有找到。" #, python-format msgid "Host %s has no FC initiators" msgstr "主机 %s 没有 FC 启动程序" #, python-format msgid "Host not found. Failed to remove %(service)s on %(host)s." msgstr "找不到主机。未能在 %(host)s 上移除 %(service)s。" msgid "Hosts" msgstr "主机" msgid "Hypermetro and Replication can not be used in the same volume_type." msgstr "Hypermetro 和复制不能用于同一 volume_type。" msgid "ID" msgstr "ID" msgid "" "If compression is set to True, rsize must also be set (not equal to -1)." msgstr "如果 compression 设置为 True,那么还必须设置 rsize(不等于 -1)。" msgid "If nofmtdisk is set to True, rsize must also be set to -1." msgstr "如果 nofmtdisk 设置为 True,rsize 必须也设置为 -1。" #, python-format msgid "" "Illegal value '%(prot)s' specified for flashsystem_connection_protocol: " "valid value(s) are %(enabled)s." msgstr "" "为 flashsystem_connection_protocol 指定的值“%(prot)s”非法:有效值为 " "%(enabled)s。" msgid "" "Illegal value specified for storwize_svc_vol_grainsize: set to either 32, " "64, 128, or 256." msgstr "" "为 storwize_svc_vol_grainsize 指定了非法值:请将值设置为 32、64、128 或 256。" #, python-format msgid "Image %(image_id)s could not be found." msgstr "找不到映像 %(image_id)s。" #, python-format msgid "Image %(image_id)s is not active." msgstr "映像 %(image_id)s 处于不活动状态。" #, python-format msgid "Image %(image_id)s is unacceptable: %(reason)s" msgstr "映像 %(image_id)s 无法接受,原因是: %(reason)s" msgid "Image location not present." msgstr "映像位置不存在。" msgid "Image quota exceeded" msgstr "镜像超出配额。" #, python-format msgid "" "Image virtual size is %(image_size)dGB and doesn't fit in a volume of size " "%(volume_size)dGB." msgstr "" "映像虚拟大小为 %(image_size)dGB,在大小为 %(volume_size)dGB 的卷中将无法容" "纳。" msgid "Incremental backups exist for this backup." msgstr "对于此备份,存在增量备份。" #, python-format msgid "" "Infortrend CLI exception: %(err)s Param: %(param)s (Return Code: %(rc)s) " "(Output: %(out)s)" msgstr "" "Infortrend CLI 异常:%(err)s 参数:%(param)s(返回码:%(rc)s)(输出:" "%(out)s)" msgid "Input volumes or snapshots are invalid." msgstr "输入卷或快照无效。" msgid "Input volumes or source volumes are invalid." msgstr "输入卷或源卷无效。" #, python-format msgid "Instance %(uuid)s could not be found." msgstr "找不到实例 %(uuid)s。" #, python-format msgid "Invalid 3PAR Domain: %(err)s" msgstr "3PAR 域无效:%(err)s" msgid "Invalid ALUA value. ALUA value must be 1 or 0." msgstr "ALUA 值无效。ALUA 值必须为 1 或 0。" msgid "Invalid Ceph args provided for backup rbd operation" msgstr "为备份rbd操作提供的Ceph参数无效" #, python-format msgid "Invalid CgSnapshot: %(reason)s" msgstr "Cg 快照无效:%(reason)s" #, python-format msgid "Invalid ConsistencyGroup: %(reason)s" msgstr "一致性组无效:%(reason)s" #, python-format msgid "Invalid Group: %(reason)s" msgstr "无效的组: %(reason)s" #, python-format msgid "Invalid GroupSnapshot: %(reason)s" msgstr "无效的组快照: %(reason)s" #, python-format msgid "Invalid IP address format: '%s'" msgstr "IP 地址格式“%s”无效" #, python-format msgid "" "Invalid QoS specification detected while getting QoS policy for volume %s" msgstr "获取卷 %s 的 QoS 策略时,检测到无效 QoS 规范" #, python-format msgid "Invalid Replication Target: %(reason)s" msgstr "无效复制目标:%(reason)s" #, python-format msgid "" "Invalid Virtuozzo Storage share specification: %r. Must be: [MDS1[," "MDS2],...:/][:PASSWORD]." msgstr "" "无效 Virtuozzo 存储器共享规范:%r。必须为 [MDS1[,MDS2],...:/][:" "PASSWORD]。" #, python-format msgid "Invalid XtremIO version %(cur)s, version %(min)s or up is required" msgstr "XtremIO V%(cur)s 无效,需要 V%(min)s 或更高版本" #, python-format msgid "Invalid attaching mode '%(mode)s' for volume %(volume_id)s." msgstr "加载模式 '%(mode)s' 对于卷 %(volume_id)s 无效。" #, python-format msgid "Invalid attachment info for volume %(name)s: %(reason)s" msgstr "卷 %(name)s 的无效挂载信息: %(reason)s" #, python-format msgid "Invalid auth key: %(reason)s" msgstr "认证密钥无效:%(reason)s" #, python-format msgid "Invalid backup: %(reason)s" msgstr "备份无效:%(reason)s" #, python-format msgid "Invalid connection initialization response of volume %(name)s" msgstr "卷 %(name)s 的连接初始化响应无效" #, python-format msgid "" "Invalid connection initialization response of volume %(name)s: %(output)s" msgstr "卷 %(name)s 的连接初始化响应无效:%(output)s" #, python-format msgid "Invalid content type %(content_type)s." msgstr "无效的内容类型 %(content_type)s。" #, python-format msgid "Invalid directory: %s" msgstr "无效目录:%s" #, python-format msgid "Invalid disk adapter type: %(invalid_type)s." msgstr "无效磁盘适配器类型:%(invalid_type)s。" #, python-format msgid "Invalid disk backing: %s." msgstr "无效磁盘备份:%s。" #, python-format msgid "Invalid disk type: %(disk_type)s." msgstr "无效磁盘类型:%(disk_type)s。" #, python-format msgid "Invalid disk type: %s." msgstr "无效磁盘类型:%s。" #, python-format msgid "Invalid filter keys: %s" msgstr "无效的筛选键:%s" #, python-format msgid "Invalid group type: %(reason)s" msgstr "无效的组类型:%(reason)s" #, python-format msgid "Invalid host: %(reason)s" msgstr "主机无效:%(reason)s" #, python-format msgid "" "Invalid hpe3parclient version found (%(found)s). Version %(minimum)s or " "greater required. Run \"pip install --upgrade python-3parclient\" to upgrade " "the hpe3parclient." msgstr "" "发现无效 hpe3parclient 版本 (%(found)s)。需要版本 %(minimum)s 或更高版本。请" "运行“pip install --upgrade python-3parclient”以升级 hpe3parclient。" #, python-format msgid "Invalid image href %(image_href)s." msgstr "无效映像 href %(image_href)s。" msgid "Invalid image identifier or unable to access requested image." msgstr "映像标识无效,或无法访问所请求映像。" msgid "Invalid imageRef provided." msgstr "提供了无效的imageRef。" msgid "Invalid input" msgstr "输入无效" #, python-format msgid "Invalid input received: %(reason)s" msgstr "输入无效: %(reason)s" #, python-format msgid "Invalid is_public filter [%s]" msgstr "is_public 过滤器 [%s] 无效" #, python-format msgid "Invalid lun type %s is configured." msgstr "配置了无效 LUN 类型 %s。" #, python-format msgid "Invalid metadata size: %(reason)s" msgstr "元数据大小无效: %(reason)s" #, python-format msgid "Invalid metadata: %(reason)s" msgstr "元数据无效: %(reason)s" #, python-format msgid "Invalid mount point base: %s" msgstr "安装点基准无效:%s" #, python-format msgid "Invalid mount point base: %s." msgstr "安装点基准无效:%s。" #, python-format msgid "Invalid new snapCPG name for retype. new_snap_cpg='%s'." msgstr "新 snapCPG 名称对执行 retype 操作无效。new_snap_cpg='%s'。" #, python-format msgid "Invalid qos specs: %(reason)s" msgstr "qos 规范无效:%(reason)s" #, python-format msgid "Invalid reservation expiration %(expire)s." msgstr "预留到期 %(expire)s 无效。" #, python-format msgid "Invalid secondary id %s." msgstr "无效辅助标识 %s。" msgid "Invalid service catalog json." msgstr "服务目录 json 无效。" #, python-format msgid "Invalid snapshot: %(reason)s" msgstr "快照无效: %(reason)s" #, python-format msgid "Invalid sort dirs passed: %s" msgstr "传递的无效排序目录:%s" #, python-format msgid "Invalid sort keys passed: %s" msgstr "传递的无效排序码:%s" #, python-format msgid "Invalid status: '%s'" msgstr "无效的状态:'%s'" #, python-format msgid "Invalid storage pool %s requested. Retype failed." msgstr "请求的存储池 %s 无效。转型失败。" #, python-format msgid "Invalid storage pool %s specificed." msgstr "指定的存储池 %s 无效。" #, python-format msgid "Invalid update setting: '%s'" msgstr "无效的更新设置:'%s'" #, python-format msgid "Invalid value '%s' for force." msgstr "值“%s”对于 force 无效。" msgid "Invalid value for 'scheduler_max_attempts', must be >=1" msgstr "值对于“scheduler_max_attempts”无效,必须 >= 1" msgid "Invalid value for NetApp configuration option netapp_host_type." msgstr "NetApp 配置选项 netapp_host_type 的值无效。" msgid "Invalid value for NetApp configuration option netapp_lun_ostype." msgstr "NetApp 配置选项 netapp_lun_ostype 的值无效。" #, python-format msgid "Invalid value for age, %(age)s" msgstr "age 的值 %(age)s 无效" #, python-format msgid "" "Invalid volume size provided for create request: %s (size argument must be " "an integer (or string representation of an integer) and greater than zero)." msgstr "" "针对创建请求提供的以下卷大小无效:%s(自变量 size 必须是整数(也可以是整数的" "字符串表示法)并且大于零)。" #, python-format msgid "Invalid volume type: %(reason)s" msgstr "卷类型无效:%(reason)s" #, python-format msgid "Invalid volume: %(reason)s" msgstr "卷无效: %(reason)s" #, python-format msgid "Invalid volume_type passed: %s." msgstr "已传递的 volume_type 无效:%s。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; either " "match source volume, or omit type argument)." msgstr "" "所提供的以下 volume_type 无效:%s(所请求的类型不兼容;要么与源卷相匹配,要么" "省略类型参数)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type is not compatible; " "recommend omitting the type argument)." msgstr "所提供的 volume_type %s 无效(所请求的类型不兼容;建议省略类型参数)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "consistency group)." msgstr "提供的以下 volume_type 无效:%s(所请求类型必须受此一致性组支持)。" #, python-format msgid "" "Invalid volume_type provided: %s (requested type must be supported by this " "group)." msgstr "提供的以下 volume_type 无效:%s(所请求类型必须被该组支持)。" #, python-format msgid "Invalid wwpns format %(wwpns)s" msgstr "无效 WWPN 格式 %(wwpns)s" msgid "Issue encountered waiting for job." msgstr "等待作业时遇到问题。" msgid "Issue encountered waiting for synchronization." msgstr "等待同步时遇到问题。" msgid "" "Issuing a fail-over failed because replication is not properly configured." msgstr "发出故障转移失败,因为未正确配置复制。" #, python-format msgid "Kaminario retryable exception: %(reason)s" msgstr "Kaminario可重试异常:%(reason)s" #, python-format msgid "KaminarioCinderDriver failure: %(reason)s" msgstr "Kaminario Cinder 驱动程序故障:%(reason)s" msgid "LUN map overflow on every channel." msgstr "LUN 映射在每个通道上溢出。" #, python-format msgid "LUN not found by UUID: %(uuid)s." msgstr "无法通过UUID:%(uuid)s找到LUN。" #, python-format msgid "LUN not found with given ref %s." msgstr "找不到具有给定引用 %s 的 LUN。" #, python-format msgid "LUN number is out of bound on channel id: %(ch_id)s." msgstr "LUN 号超出了通道标识 %(ch_id)s 的范围。" msgid "License is unavailable." msgstr "许可证不可用。" #, python-format msgid "Linked clone of source volume not supported in state: %s." msgstr "不支持处于以下状态的源卷的已链接克隆:%s。" msgid "Logout session error." msgstr "注销会话错误。" msgid "" "Lookup service not configured. Config option for fc_san_lookup_service needs " "to specify a concrete implementation of the lookup service." msgstr "" "未配置查找服务。fc_san_lookup_service 的配置选项需要指定查找服务的具体实现。" msgid "Lun migration error." msgstr "Lun 迁移错误。" #, python-format msgid "" "MD5 of object: %(object_name)s before: %(md5)s and after: %(etag)s is not " "same." msgstr "对象 %(object_name)s 的 MD5 在 %(md5)s 之前和 %(etag)s 之后不相同。" #, python-format msgid "Malformed fcns output string: %s" msgstr "以下 fcns 输出字符串的格式不正确:%s" #, python-format msgid "Malformed message body: %(reason)s" msgstr "错误格式的消息体: %(reason)s" #, python-format msgid "Malformed nameserver string: %s" msgstr "以下名称服务器字符串的格式不正确:%s" msgid "Malformed request body" msgstr "错误格式的请求主体" msgid "Malformed request body." msgstr "请求主体的格式不正确。" msgid "Malformed request url" msgstr "错误格式的请求url" #, python-format msgid "Malformed response to command %(cmd)s: %(reason)s" msgstr "对命令 %(cmd)s 的响应的格式不正确:%(reason)s" #, python-format msgid "Malformed show fcns database string: %s" msgstr "以下显示 fcns 数据库字符串的格式不正确:%s" #, python-format msgid "" "Malformed zone configuration: (switch=%(switch)s " "zone_config=%(zone_config)s)." msgstr "" "区域配置的格式不正确:(switch=%(switch)s zone_config=%(zone_config)s)。" #, python-format msgid "Malformed zone status: (switch=%(switch)s zone_config=%(zone_config)s)." msgstr "" "区域状态的格式不正确:(交换机为 %(switch)s,zone_config 为 " "%(zone_config)s)。" msgid "Manage existing get size requires 'id'." msgstr "管理现有 get 大小需要“id”。" msgid "Manage existing snapshot not implemented." msgstr "未实现对现有快照的管理。" #, python-format msgid "" "Manage existing volume failed due to invalid backend reference " "%(existing_ref)s: %(reason)s" msgstr "由于后端引用 %(existing_ref)s 无效,管理现有卷失败:%(reason)s" #, python-format msgid "Manage existing volume failed due to volume type mismatch: %(reason)s" msgstr "由于卷类型不匹配,管理现有卷失败:%(reason)s" msgid "Manage existing volume not implemented." msgstr "未实现对现有卷的管理。" msgid "Manage existing volume requires 'source-id'." msgstr "管理现有卷将需要“source-id”。" msgid "Managing of snapshots to failed-over volumes is not allowed." msgstr "不允许管理到达已故障转移的卷的快照。" msgid "Map info is None due to array version not supporting hypermetro." msgstr "无映射信息,因为阵列版本不支持 hypermetro。" msgid "Max read iops setting for volume qos, use 0 for unlimited" msgstr "为卷的qos设置最大读iops,0表示无限制。" msgid "Max total iops setting for volume qos, use 0 for unlimited" msgstr "为卷的qos设置最大总量iops,0表示无限制。" msgid "Max write iops setting for volume qos, use 0 for unlimited" msgstr "为卷的qos设置最大写iops,0表示无限制。" msgid "Maximum age is count of days since epoch." msgstr "最大年龄是自新纪元开始计算的天数。" #, python-format msgid "Maximum number of backups allowed (%(allowed)d) exceeded" msgstr "已超过允许的最大备份数 (%(allowed)d)" #, python-format msgid "Maximum number of groups allowed (%(allowed)d) exceeded" msgstr "已超过允许的最大卷组数 (%(allowed)d)" #, python-format msgid "Maximum number of snapshots allowed (%(allowed)d) exceeded" msgstr "已超过允许的最大快照数 (%(allowed)d)" #, python-format msgid "" "Maximum number of volumes allowed (%(allowed)d) exceeded for quota " "'%(name)s'." msgstr "" "已超过允许的最大卷数 (%(allowed)d),当前请求超过了 '%(name)s' 的配额限制。" #, python-format msgid "May specify only one of %s" msgstr "只能指定 %s 中的一个" #, python-format msgid "Message %(message_id)s could not be found." msgstr "信息 %(message_id)s 无法找到。" msgid "Metadata backup already exists for this volume" msgstr "对于此卷,已存在元数据备份" #, python-format msgid "Metadata backup object '%s' already exists" msgstr "元数据备份对象“%s”已存在" #, python-format msgid "Metadata property key %s greater than 255 characters." msgstr "元数据属性关键字%s超过255个字符。" #, python-format msgid "Metadata property key %s value greater than 255 characters." msgstr "元数据属性关键字%s值超过255个字符。" msgid "Metadata restore failed due to incompatible version" msgstr "由于版本不兼容,元数据复原失败" msgid "Metadata restore failed due to incompatible version." msgstr "由于版本不兼容,元数据复原失败。" #, python-format msgid "Method %(method)s is not defined" msgstr "方法 %(method)s 未被定义" msgid "Missing Fibre Channel SAN configuration param - fc_fabric_names" msgstr "缺少光纤通道 SAN 配置参数 - fc_fabric_names" msgid "Missing request body." msgstr "缺少请求主体。" #, python-format msgid "Missing required element '%s' in request body." msgstr "请求主体中缺少必需元素“%s”。" #, python-format msgid "Multiple copies of volume %s found." msgstr "找到了卷 %s 的多个副本。" #, python-format msgid "Multiple matches found for '%s', use an ID to be more specific." msgstr "对于“%s”,找到多个匹配项,请使用标识以更具体地进行查找。" msgid "Multiple profiles found." msgstr "找到了多个概要文件。" msgid "Must implement a fallback schedule" msgstr "必须实现一个回滚 schedule" msgid "Must implement schedule_create_group" msgstr "必须实现 schedule_create_group" msgid "Must implement schedule_create_volume" msgstr "必须实现 schedule_create_volume" msgid "Must implement schedule_get_pools" msgstr "必须实现 schedule_get_pools" msgid "Must pass wwpn or host to lsfabric." msgstr "必须将 wwpn 或 host 传递给 lsfabric。" msgid "" "Must specify 'status', 'attach_status' or 'migration_status' for update." msgstr "必须指定“status”、“attach_status”或“migration_status”以进行更新。" #, python-format msgid "Must specify a valid persona %(valid)s,value '%(persona)s' is invalid." msgstr "必须指定有效角色 %(valid)s,值“%(persona)s”无效。" #, python-format msgid "" "Must specify a valid provisioning type %(valid)s, value '%(prov)s' is " "invalid." msgstr "指定有效供应类型 %(valid)s,值“%(prov)s”无效。" msgid "Must specify an ExtensionManager class" msgstr "必须明确一个ExtensionManager类" msgid "" "Must specify one or more of the following keys to update: name, description, " "add_volumes, remove_volumes." msgstr "更新时必须指定如下的一个或多个键:名称,描述,添加卷,删除卷。" msgid "Must specify snapshot source-name or source-id." msgstr "必须指定快照 source-name 或 source-id。" msgid "Must specify source-name or source-id." msgstr "必须指定 source-name 或 source-id。" msgid "Must supply a positive value for age" msgstr "必须为 age 提供正值" #, python-format msgid "" "NAS config '%(name)s=%(value)s' invalid. Must be 'auto', 'true', or 'false'" msgstr "NAS 配置“%(name)s=%(value)s”无效。必须为“auto”、“true”或“false”" #, python-format msgid "NFS config file at %(config)s doesn't exist" msgstr " %(config)s 处不存在 NFS 配置文件" #, python-format msgid "NFS file %s not discovered." msgstr "未发现 NFS 文件 %s。" msgid "NFS file could not be discovered." msgstr "未能发现 NFS 文件。" msgid "NaElement name cannot be null." msgstr "NaElement 名称不能为空。" msgid "Name" msgstr "名称" msgid "" "Name, description, add_volumes, and remove_volumes can not be all empty in " "the request body." msgstr "在请求主体中,名称、描述、add_volumes 和 remove_volumes 不能全部为空。" msgid "Need non-zero volume size" msgstr "需要非零卷大小" msgid "NetApp Cinder Driver exception." msgstr "发生“NetApp Cinder 驱动程序”异常。" #, python-format msgid "" "New size for extend must be greater than current size. (current: %(size)s, " "extended: %(new_size)s)." msgstr "" "用于扩展的新大小必须大于当前大小。(当前:%(size)s,已扩展:%(new_size)s)。" #, python-format msgid "" "New size should be bigger than the real size from backend storage. realsize: " "%(oldsize)s, newsize: %(newsize)s." msgstr "" "新大小应该大于后端存储器中的实际大小。realsize:%(oldsize)s,newsize:" "%(newsize)s。" msgid "New volume type not specified in request_spec." msgstr "在 request_spec 中,未指定新的卷类型。" msgid "Nimble Cinder Driver exception" msgstr "Nimble Cinder 驱动程序异常" msgid "No FC port connected to fabric." msgstr "没有任何 FC 端口连接至光纤网络。" msgid "No VF ID is defined in the configuration file." msgstr "未在配置文件中定义 VF 标识。" msgid "No active iSCSI portals with supplied iSCSI IPs" msgstr "不存在具有所提供 iSCSI IP 的活动 iSCSI 门户网站" msgid "No backups available to do an incremental backup." msgstr "没有任何备份可用于执行增量备份。" #, python-format msgid "No cloned LUN named %s found on the filer" msgstr "在文件管理器上,找不到名为 %s 的已克隆 LUN" msgid "No config node found." msgstr "找不到配置节点。" #, python-format msgid "No element by given name %s." msgstr "没有具备给定名称 %s 的元素。" #, python-format msgid "No file found with %s as backing file." msgstr "在将 %s 作为支持文件的情况下,找不到任何文件。" #, python-format msgid "No group snapshot with id %s" msgstr "不存在任何具有标识 %s 的组快照" msgid "No iSCSI-enabled ports on target array." msgstr "目标阵列上没有可支持 iSCSI 的端口。" msgid "No initiator connected to fabric." msgstr "没有任何启动程序连接至光纤网络。" #, python-format msgid "No initiator group found for initiator %s" msgstr "找不到对应启动程序 %s 的启动程序组" #, python-format msgid "No interface found on cluster for ip %s" msgstr "集群中找不到 IP %s 的接口" msgid "No ip address found." msgstr "找不到 IP 地址。" msgid "No mounted NFS shares found" msgstr "找不到任何已安装的 NFS 共享项" msgid "No mounted Virtuozzo Storage shares found" msgstr "找不到任何已安装的 Virtuozzo 存储器共享项" msgid "No mounted shares found" msgstr "找不到任何已安装的共享项" #, python-format msgid "No node found in I/O group %(gid)s for volume %(vol)s." msgstr "在卷 %(vol)s 的 I/O 组 %(gid)s 中找不到节点。" msgid "" "No pools are available for provisioning volumes. Ensure that the " "configuration option netapp_pool_name_search_pattern is set correctly." msgstr "" "没有池可用于提供卷。请确保正确设置了 netapp_pool_name_search_pattern 配置选" "项。" #, python-format msgid "No snap found with %s as backing file." msgstr "在将 %s 作为支持文件的情况下,找不到任何 snap。" msgid "" "No storage could be allocated for this volume request. You may be able to " "try another size or volume type." msgstr "无法为该卷请求分配存储。可以尝试其他大小或卷类型。" #, python-format msgid "No such QoS spec %(specs_id)s." msgstr "不存在任何此类 QoS 规范 %(specs_id)s。" msgid "No suitable discovery ip found" msgstr "找不到合适的发现 IP" #, python-format msgid "No support to restore backup version %s" msgstr "不支持复原备份版本 %s" #, python-format msgid "No valid backend was found. %(reason)s" msgstr "找不到有效后端,原因是 %(reason)s" #, python-format msgid "No vdisk with the UID specified by ref %s." msgstr "没有具备引用 %s 指定的 UID 的 vdisk。" #, python-format msgid "" "No volume on cluster with vserver %(vserver)s and junction path %(junction)s " msgstr "集群上不存在任何具有 vserver %(vserver)s 和结点路径 %(junction)s 的卷" #, python-format msgid "No volumes or consistency groups exist in cluster %(current)s." msgstr "集群 %(current)s 中不存在卷或者一致性组。" msgid "Not a valid value for NaElement." msgstr "此值对 NaElement 无效。" #, python-format msgid "Not authorized for image %(image_id)s." msgstr "未针对映像 %(image_id)s 授权。" msgid "Not authorized." msgstr "未授权。" #, python-format msgid "Not enough space on backend (%(backend)s)" msgstr "后端 (%(backend)s) 上没有足够的空间" msgid "Nova returned \"error\" status while creating snapshot." msgstr "在创建快照时,Nova 返回了“错误”状态。" msgid "Object Count" msgstr "对象计数" msgid "Object Version" msgstr "对象版本" msgid "Object is not a NetApp LUN." msgstr "对象不是 NetApp LUN。" #, python-format msgid "" "Only %(value)s %(verb)s request(s) can be made to %(uri)s every " "%(unit_string)s." msgstr "" "只能有 %(value)s 个 %(verb)s 请求发送给 %(uri)s 限定是每一个 " "%(unit_string)s。" msgid "Only volumes managed by OpenStack can be unmanaged." msgstr "只有 OpenStack 管理的卷才能为非受管卷。" #, python-format msgid "Operation failed with status=%(status)s. Full dump: %(data)s" msgstr "操作失败,并且 status=%(status)s。完全转储:%(data)s" #, python-format msgid "Operation not supported: %(operation)s." msgstr "操作 %(operation)s 不受支持。" msgid "Option gpfs_images_dir is not set correctly." msgstr "选项 gpfs_images_dir 未正确设置。" msgid "Option gpfs_mount_point_base is not set correctly." msgstr "选项 gpfs_mount_point_base 未正确设置。" #, python-format msgid "Originating %(res)s %(prop)s must be one of '%(vals)s' values" msgstr "始发 %(res)s %(prop)s 必须为其中一个“%(vals)s”值" #, python-format msgid "ParseException: %s" msgstr "ParseException:%s" msgid "" "Password or SSH private key is required for authentication: set either " "san_password or san_private_key option." msgstr "" "进行认证需要密码或 SSH 专用密钥:请设置 san_password 或 san_private_key 选" "项。" msgid "Path to REST server's certificate must be specified." msgstr "必须指定 REST 服务器的证书的路径。" #, python-format msgid "Please create %(pool_list)s pool in advance!" msgstr "请提前创建 %(pool_list)s 池!" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "政策不允许 %(action)s 被执行。" msgid "Pool is not available in the volume host field." msgstr "在卷主机字段中,未提供池。" msgid "Pool is not available in the volume host fields." msgstr "在卷主机字段中,未提供池。" #, python-format msgid "Pool with name %(pool_name)s wasn't found in domain %(domain_id)s." msgstr "在域 %(domain_id)s 中找不到名称为 %(pool_name)s 的池。" msgid "Pools name is not set." msgstr "未设置池名称。" #, python-format msgid "Programming error in Cinder: %(reason)s" msgstr "Cinder程序错误:%(reason)s" msgid "Project ID" msgstr "项目ID" msgid "Protection Group not ready." msgstr "保护组未就绪。" #, python-format msgid "" "Protocol %(storage_protocol)s is not supported for storage family " "%(storage_family)s." msgstr "存储器系列 %(storage_family)s 不支持协议 %(storage_protocol)s。" msgid "Provided backup record is missing an id" msgstr "所提供的备份记录缺少标识" #, python-format msgid "" "Provided snapshot status %(provided)s not allowed for snapshot with status " "%(current)s." msgstr "对于状态为 %(current)s 的快照,不允许提供的快照状态 %(provided)s。" #, python-format msgid "Pure Storage Cinder driver failure: %(reason)s" msgstr "Pure Storage Cinder 驱动程序故障:%(reason)s" msgid "Purge command failed, check cinder-manage logs for more details." msgstr "Pure命令执行失败,更多详细信息请查看cinder-manage日志。" #, python-format msgid "QoS Specs %(specs_id)s already exists." msgstr "QoS 规范 %(specs_id)s 已存在。" #, python-format msgid "QoS Specs %(specs_id)s is still associated with entities." msgstr "QoS 规范 %(specs_id)s 仍然与实体关联。" #, python-format msgid "QoS spec %(specs_id)s has no spec with key %(specs_key)s." msgstr "QoS 规范 %(specs_id)s 没有任何具有键 %(specs_key)s 的规范。" msgid "Qos specs still in use." msgstr "Qos 规范仍在使用中。" msgid "Query resource pool error." msgstr "查询资源池时发生错误。" #, python-format msgid "Quota class %(class_name)s could not be found." msgstr "找不到配额类 %(class_name)s。" msgid "Quota could not be found" msgstr "配额没有找到。" #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "对于资源,已超过配额:%(overs)s" #, python-format msgid "Quota exceeded: code=%(code)s" msgstr "配额用尽:code=%(code)s" #, python-format msgid "Quota for project %(project_id)s could not be found." msgstr "没有为项目 %(project_id)s 找到配额。" #, python-format msgid "Quota usage for project %(project_id)s could not be found." msgstr "找不到项目 %(project_id)s 的配额使用量。" #, python-format msgid "RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)" msgstr "RBD 差集操作失败 - (ret=%(ret)s stderr=%(stderr)s)" msgid "RPC Version" msgstr "RPC 版本" msgid "Reference must be for an unmanaged snapshot." msgstr "引用必须针对非受管快照。" msgid "Reference must be for an unmanaged virtual volume." msgstr "引用必须对应非受管虚拟卷。" msgid "Reference must contain source-id or source-name element." msgstr "引用必须包含 source-id 或 source-name 元素。" msgid "Reference must contain source-id or source-name key." msgstr "引用必须包含 source-id 或 source-name 键。" msgid "Reference must contain source-id or source-name." msgstr "引用必须包含 source-id 或 source-name。" msgid "Reference must contain source-id." msgstr "引用必须包含 source-id。" msgid "Reference must contain source-name element." msgstr "引用必须包含 source-name 元素。" msgid "Reference must contain source-name or source-id." msgstr "引用必须包含 source-name 或 source-id。" msgid "Reference must contain source-name." msgstr "引用必须包含源名称。" #, python-format msgid "" "Refusing to migrate volume ID: %(id)s. Please check your configuration " "because source and destination are the same Volume Group: %(name)s." msgstr "" "正在拒绝迁移卷标识:%(id)s。请检查配置因为源和目标是同一卷组:%(name)s。" msgid "Remote pool cannot be found." msgstr "找不到远程池。" msgid "Remove CHAP error." msgstr "移除 CHAP 时发生错误。" msgid "Remove fc from host error." msgstr "从主机中移除 FC 时发生错误。" msgid "Remove host from array error." msgstr "从阵列中移除主机时发生错误。" msgid "Remove host from hostgroup error." msgstr "从主机组中移除主机时发生错误。" msgid "Remove iscsi from host error." msgstr "从主机中移除 iSCSI 时发生错误。" msgid "Remove lun from QoS error." msgstr "从 QoS 移除 LUN 时出错。" msgid "Remove lun from cache error." msgstr "从高速缓存移除 LUN 时发生错误。" msgid "Remove lun from partition error." msgstr "从分区移除 LUN 时发生错误。" msgid "Remove port from port group error." msgstr "从端口组移除端口时出错。" msgid "Remove volume export failed." msgstr "除去卷导出失败。" msgid "Rename lun on array error." msgstr "在阵列上重命名 LUN 时发生错误。" msgid "Rename snapshot on array error." msgstr "在阵列上重命名快照时出错。" #, python-format msgid "Replication %(name)s to %(ssn)s failed." msgstr "将 %(name)s 复制到 %(ssn)s 失败。" msgid "Replication not allowed yet." msgstr "尚不允许复制。" msgid "Request body and URI mismatch" msgstr "请求主体和URI不匹配" msgid "Request body contains too many items" msgstr "请求主体包含太多items" #, python-format msgid "" "Requested backup exceeds allowed Backup gigabytes quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "所请求备份超过允许的备份千兆字节配额。已请求 %(requested)sG,配额为 " "%(quota)sG,并且已耗用 %(consumed)sG。" #, python-format msgid "" "Requested volume or snapshot exceeds allowed %(name)s quota. Requested " "%(requested)sG, quota is %(quota)sG and %(consumed)sG has been consumed." msgstr "" "所请求的卷或快照超过允许的 %(name)s 配额。已请求 %(requested)sG,配额为 " "%(quota)sG,已耗用 %(consumed)sG。" msgid "Required configuration not found" msgstr "找不到必需的配置选项" #, python-format msgid "" "Reset backup status aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "重置备份状态已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用" "来创建此备份的备份服务 [%(backup_service)s]。" #, python-format msgid "Resizing clone %s failed." msgstr "调整克隆 %s 的大小失败。" msgid "Resizing image file failed." msgstr "对映像文件调整大小失败。" msgid "Resource could not be found." msgstr "资源没有找到。" msgid "Resource not ready." msgstr "资源未就绪。" msgid "RestURL is not configured." msgstr "未配置 RestURL。" #, python-format msgid "" "Restore backup aborted, expected volume status %(expected_status)s but got " "%(actual_status)s." msgstr "" "备份复原已异常中止,需要的卷状态为 %(expected_status)s,但获得的是 " "%(actual_status)s。" #, python-format msgid "" "Restore backup aborted, the backup service currently configured " "[%(configured_service)s] is not the backup service that was used to create " "this backup [%(backup_service)s]." msgstr "" "备份复原已异常中止,当前配置的备份服务 [%(configured_service)s] 不是已用来创" "建此备份的备份服务 [%(backup_service)s]。" #, python-format msgid "" "Restore backup aborted: expected backup status %(expected_status)s but got " "%(actual_status)s." msgstr "" "备份复原已异常中止:需要的备份状态为 %(expected_status)s,但获得的是 " "%(actual_status)s。" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "snapshots. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "检索到所提供 Cinder 快照的不同 SolidFire 卷量。已检索到:%(ret)s 期望:" "%(des)s" #, python-format msgid "" "Retrieved a different amount of SolidFire volumes for the provided Cinder " "volumes. Retrieved: %(ret)s Desired: %(des)s" msgstr "" "检索到所提供 Cinder 卷的不同 SolidFire 卷量。已检索到:%(ret)s 期望:%(des)s" #, python-format msgid "Retry count exceeded for command: %s" msgstr "对于命令,超过重试次数:%s" msgid "Retryable Dell Exception encountered" msgstr "遇到可重试的Dell异常。" msgid "Retryable Pure Storage Exception encountered" msgstr "遇到可重试的Pure Storage异常。" msgid "Retryable SolidFire Exception encountered" msgstr "遇到可重试的 SolidFire 异常" msgid "Retype requires migration but is not allowed." msgstr "转型需要迁移,但是不允许。" #, python-format msgid "" "Running Cinder with a VMware vCenter version less than %s is not allowed." msgstr "不允许运行 VMware vCenter 版本低于 %s 的 Cinder。" msgid "SAN product is not configured." msgstr "未配置 SAN 产品。" #, python-format msgid "SMBFS config file at %(config)s doesn't exist." msgstr "%(config)s 处不存在 SMBFS 配置文件。" msgid "SMBFS config file not set (smbfs_shares_config)." msgstr "SMBFS 配置文件未设置 (smbfs_shares_config)。" #, python-format msgid "SSH command injection detected: %(command)s" msgstr "检测到 SSH 命令注入:%(command)s" #, python-format msgid "Scheduler Host Filter %(filter_name)s could not be found." msgstr "调度器主机过滤器 %(filter_name)s 没有找到。" #, python-format msgid "Scheduler Host Weigher %(weigher_name)s could not be found." msgstr "找不到调度程序主机衡量器 %(weigher_name)s。" #, python-format msgid "" "Secondary id can not be the same as primary array, backend_id = " "%(secondary)s." msgstr "辅助标识不能与主阵列相同,backend_id = %(secondary)s。" #, python-format msgid "Service %(service)s on host %(host)s removed." msgstr "已移除主机 %(host)s 上的服务 %(service)s。" #, python-format msgid "Service %(service_id)s could not be found on host %(host)s." msgstr "在主机 %(host)s 上找不到服务 %(service_id)s。" #, python-format msgid "Service %(service_id)s could not be found." msgstr "服务 %(service_id)s 没有找到。" msgid "Service is too old to fulfil this request." msgstr "服务太旧,无法实现此请求。" msgid "Service is unavailable at this time." msgstr "该时刻服务无法使用。" msgid "Set pair secondary access error." msgstr "设置对辅助访问时出错。" msgid "Sets thin provisioning." msgstr "设置自动精简配置。" #, python-format msgid "" "Share %s ignored due to invalid format. Must be of form address:/export. " "Please check the nas_host and nas_share_path settings." msgstr "" "由于格式无效,已忽略共享项 %s。格式必须为 address:/export。请检查 nas_host " "和 nas_share_path 设置。" msgid "Size" msgstr "配置" #, python-format msgid "Size for volume: %s not found, cannot secure delete." msgstr "找不到卷 %s 的大小,无法进行安全删除。" #, python-format msgid "" "Size of specified image %(image_size)sGB is larger than volume size " "%(volume_size)sGB." msgstr "所指定映像的大小 %(image_size)sGB 大于卷大小 %(volume_size)sGB。" #, python-format msgid "" "Snapshot %(id)s has been asked to be deleted while waiting for it to become " "available. Perhaps a concurrent request was made." msgstr "在等待快照 %(id)s 变为可用时请求删除该快照。可能发出了并行请求。" #, python-format msgid "" "Snapshot %(id)s was found in state %(state)s rather than 'deleting' during " "cascade delete." msgstr "" "级联删除期间,发现快照 %(id)s 处于 %(state)s 状态而不是“deleting”状态。" #, python-format msgid "Snapshot %(snapshot_id)s could not be found." msgstr "快照 %(snapshot_id)s 没有找到。" #, python-format msgid "Snapshot %(snapshot_id)s has no metadata with key %(metadata_key)s." msgstr "快照 %(snapshot_id)s 没有任何具有键 %(metadata_key)s 的元数据。" #, python-format msgid "Snapshot %s must not be part of a group." msgstr "快照 %s 不能属于某个组。" #, python-format msgid "Snapshot '%s' doesn't exist on array." msgstr "快照“%s”在阵列上不存在。" msgid "Snapshot already managed." msgstr "快照已管理。" msgid "Snapshot cannot be created while volume is migrating." msgstr "无法在迁移卷时创建快照。" msgid "Snapshot of secondary replica is not allowed." msgstr "不允许获取辅助副本的快照。" #, python-format msgid "Snapshot of volume not supported in state: %s." msgstr "不支持对处于以下状态的卷生成快照:%s。" #, python-format msgid "Snapshot status %(cur)s not allowed for update_snapshot_status" msgstr "对于 update_snapshot_status,不允许快照状态 %(cur)s" #, python-format msgid "" "Snapshot to be backed up must be available, but the current status is \"%s\"." msgstr "要备份的快照必须可用,但当前状态为“%s”。" msgid "SolidFire Cinder Driver exception" msgstr "发生“SolidFire Cinder 驱动程序”异常" msgid "Sort direction array size exceeds sort key array size." msgstr "排序方向阵列大小超过排序键阵列大小。" msgid "" "Source CG cannot be empty or in 'creating' or 'updating' state. No " "cgsnapshot will be created." msgstr "" "源CG不能为空,也不能是“creating”或“updating”状态。将不会创建cgsnapshot。" msgid "Source Group is empty. No group will be created." msgstr "源组为空。将不会创建任何组。" msgid "" "Source group cannot be empty or in 'creating' or 'updating' state. No group " "snapshot will be created." msgstr "源组不能为空,也不能是“creating”或“updating”状态。将不会创建组快照。" msgid "Source host details not found." msgstr "找不到源主机详细信息。" msgid "Source volume device ID is required." msgstr "需要源卷设备标识。" msgid "Source volume not mid-migration." msgstr "源卷未在迁移中。" msgid "Specified logical volume does not exist." msgstr "所指定的逻辑卷不存在。" msgid "" "Specifies number of replicas for each volume. Can only be increased once " "volume is created" msgstr "为每一个卷指定副本的数量。一旦卷被创建,其副本数量只能被增加。" msgid "Specify a password or private_key" msgstr "请指定密码或 private_key" msgid "Specify group type name, description or a combination thereof." msgstr "在其中指定组类型的名称、描述或者两者的组合。" msgid "Specify san_password or san_private_key" msgstr "指定san_password或者san_private_key" msgid "" "Specify volume type name, description, is_public or a combination thereof." msgstr "指定卷类型名称、描述、is_public 或它们的组合。" msgid "Split pair error." msgstr "拆分对时出错。" msgid "Split replication failed." msgstr "拆分复制失败。" msgid "Start LUNcopy error." msgstr "启动 LUNcopy 时发生错误。" msgid "State" msgstr "状态" #, python-format msgid "State of node is wrong. Current state is %s." msgstr "节点的状态错误。当前状态为 %s。" msgid "Status" msgstr "状态" msgid "Stop snapshot error." msgstr "停止快照时发生错误。" #, python-format msgid "Storage family %s is not supported." msgstr "不支持存储器系列 %s。" msgid "Storage pool is not configured." msgstr "未配置存储池。" #, python-format msgid "Storage profile: %(storage_profile)s not found." msgstr "找不到存储器概要文件 %(storage_profile)s。" #, python-format msgid "" "Successfully renamed %(num_vols)s volumes and %(num_cgs)s consistency groups " "from cluster %(current)s to %(new)s" msgstr "" "成功重命名%(num_vols)s卷和 %(num_cgs)s一致性组从集群%(current)s到%(new)s" msgid "Switch over pair error." msgstr "切换对时出错。" msgid "Sync pair error." msgstr "同步对时出错。" #, python-format msgid "Synology driver authentication failed: %(reason)s." msgstr "Synology驱动认证失败:%(reason)s。" msgid "System does not support compression." msgstr "系统不支持压缩。" msgid "System is busy, retry operation." msgstr "系统繁忙,请重试操作。" msgid "Target group type is still in use." msgstr "目标组类型仍在使用中。" msgid "Target volume type is still in use." msgstr "目标卷类型仍在使用中。" msgid "Terminate connection failed" msgstr "终止连接发生故障" msgid "Terminate connection unable to connect to backend." msgstr "终止连接无法连接至后端。" #, python-format msgid "Terminate volume connection failed: %(err)s" msgstr "终止卷连接失败:%(err)s" msgid "" "The 'sort_key' and 'sort_dir' parameters are deprecated and cannot be used " "with the 'sort' parameter." msgstr "" "“sort_key”和“sort_dir”参数已建议不要使用,并且不能与“sort”参数配合使用。" #, python-format msgid "" "The GPFS filesystem %(fs)s is not at the required release level. Current " "level is %(cur)s, must be at least %(min)s." msgstr "" "GPFS 文件系统 %(fs)s 未处于所要求的发行版级别。当前级别为 %(cur)s,而要求的级" "别必须至少为 %(min)s。" msgid "The copy should be primary or secondary" msgstr "副本应为主副本或者辅助副本" msgid "The decorated method must accept either a volume or a snapshot object" msgstr "已装饰的方法必须接受卷或快照对象" msgid "The decorated method must accept image_meta." msgstr "已装饰的方法必须接受 image_meta。" #, python-format msgid "The device in the path %(path)s is unavailable: %(reason)s" msgstr "路径%(path)s 指向的设备不可用:%(reason)s" #, python-format msgid "The end time (%(end)s) must be after the start time (%(start)s)." msgstr "结束时间 (%(end)s) 必须在开始时间 (%(start)s) 之后。" #, python-format msgid "The failed-over volume could not be deleted: %s" msgstr "无法删除已故障转移的卷:%s" msgid "" "The host is not ready to be failed back. Please resynchronize the volumes " "and resume replication on the 3PAR backends." msgstr "主机未准备好故障返回。请重新同步卷并在 3PAR 后端上继续进行复制。" msgid "The method update_migrated_volume is not implemented." msgstr "未实现方法 update_migrated_volume。" #, python-format msgid "The provided snapshot '%s' is not a snapshot of the provided volume." msgstr "所提供快照“%s”并非所提供卷的快照。" msgid "" "The replication mode was not configured correctly in the volume type " "extra_specs. If replication:mode is periodic, replication:sync_period must " "also be specified and be between 300 and 31622400 seconds." msgstr "" "未在卷类型 extra_specs 中正确配置复制方式。如果 replication:mode 为 " "periodic,那么必须同时指定 replication:sync_period 并且周期必须介于 300 秒到 " "31622400 秒之间。" msgid "The results are invalid." msgstr "结果无效。" msgid "The snapshot cannot be created when the volume is in error status." msgstr "当卷处于错误状态时,无法创建快照。" msgid "The snapshot cannot be created when the volume is in maintenance mode." msgstr "当卷处于维护方式时,无法创建快照。" #, python-format msgid "The snapshot is unavailable: %(data)s" msgstr "快照不可用:%(data)s" #, python-format msgid "" "The source volume type '%(src)s' is different than the destination volume " "type '%(dest)s'." msgstr "原卷类型'%(src)s'与目标卷'%(dest)s'不一致。" #, python-format msgid "The source volume type '%s' is not available." msgstr "原卷类型'%s'不可用。" #, python-format msgid "The specified LUN does not belong to the given pool: %s." msgstr "指定的 LUN 不属于给定池:%s。" msgid "The specified vdisk is mapped to a host." msgstr "指定的 vdisk 已映射到主机。" #, python-format msgid "" "The storage device does not support %(prot)s. Please configure the device to " "support %(prot)s or switch to a driver using a different protocol." msgstr "" "存储器设备不支持 %(prot)s。请配置该设备以支持 %(prot)s 或切换至使用另一协议的" "驱动程序。" #, python-format msgid "" "The type of metadata: %(metadata_type)s for volume/snapshot %(id)s is " "invalid." msgstr "卷/快照 %(id)s 的元数据类型 %(metadata_type)s无效。" msgid "The volume cannot accept transfer in maintenance mode." msgstr "在维护方式下卷无法接受传输。" msgid "The volume cannot be attached in maintenance mode." msgstr "在维护方式下无法连接卷。" msgid "The volume cannot be detached in maintenance mode." msgstr "在维护方式下无法拆离卷。" msgid "The volume cannot be updated during maintenance." msgstr "维护期间无法更新卷。" msgid "The volume connection cannot be initialized in maintenance mode." msgstr "在维护方式下无法初始化卷连接。" msgid "The volume driver requires the iSCSI initiator name in the connector." msgstr "卷驱动程序在连接器中需要 iSCSI 发起方名称。" msgid "" "The volume is currently busy on the 3PAR and cannot be deleted at this time. " "You can try again later." msgstr "该卷当前在 3PAR 上繁忙,此时无法删除。可稍后重试。" msgid "There are no valid ESX hosts." msgstr "不存在有效的 ESX 主机。" msgid "There are no valid datastores." msgstr "不存在任何有效数据存储器。" msgid "There is no metadata in DB object." msgstr "数据库对象中没有元数据。" #, python-format msgid "There is no share which can host %(volume_size)sG" msgstr "不存在任何可主管 %(volume_size)sG 的共享项" msgid "There is no virtual disk device." msgstr "不存在任何虚拟盘设备。" #, python-format msgid "There was an error adding the volume to the remote copy group: %s." msgstr "将该卷添加至远程复制组时发生了错误:%s。" #, python-format msgid "There was an error creating the cgsnapshot: %s" msgstr "创建 cgsnapshot 时发生错误:%s" #, python-format msgid "There was an error creating the remote copy group: %s." msgstr "创建远程复制组时发生了错误:%s。" #, python-format msgid "" "There was an error setting the sync period for the remote copy group: %s." msgstr "为远程复制组设置同步周期时发生了错误:%s。" #, python-format msgid "" "There was an error setting up a remote copy group on the 3PAR arrays: " "('%s'). The volume will not be recognized as replication type." msgstr "" "在 3PAR 阵列上设置远程复制组时发生了错误:(“%s”)。该卷未被识别为复制类型。" #, python-format msgid "There was an error starting remote copy: %s." msgstr "启动远程复制时发生了错误:%s。" #, python-format msgid "There's no NFS config file configured (%s)" msgstr "未配置 NFS 配置文件 (%s)" #, python-format msgid "" "There's no Quobyte volume configured (%s). Example: quobyte:///" "" msgstr "未配置 Quobyte 卷 (%s)。示例:quobyte:///" msgid "Thin provisioning not supported on this version of LVM." msgstr "在此版本的 LVM 上,不支持瘦供应。" msgid "This request was rate-limited." msgstr "这个请求受到频率限制。" #, python-format msgid "" "This system platform (%s) is not supported. This driver supports only Win32 " "platforms." msgstr "此系统平台 (%s) 不受支持。此驱动程序仅支持 Win32 平台。" #, python-format msgid "Timed out while waiting for Nova update for creation of snapshot %s." msgstr "等待 Nova 更新(以便创建快照 %s)时超时。" #, python-format msgid "" "Timed out while waiting for Nova update for deletion of snapshot %(id)s." msgstr "等待 Nova 更新(以便删除快照 %(id)s)时超时。" #, python-format msgid "Timeout while requesting %(service)s API." msgstr "请求 %(service)s API 时超时。" #, python-format msgid "Timeout while requesting capabilities from backend %(service)s." msgstr "从后端请求 %(service)s 功能时超时。" #, python-format msgid "Transfer %(transfer_id)s could not be found." msgstr "无法找到转换器%(transfer_id)s" #, python-format msgid "" "Trying to import backup metadata from id %(meta_id)s into backup %(id)s." msgstr "正在尝试将备份元数据从标识 %(meta_id)s 导入到备份 %(id)s。" #, python-format msgid "" "Tune volume task stopped before it was done: volume_name=%(volume_name)s, " "task-status=%(status)s." msgstr "" "调整卷任务未完成就已停止:volume_name=%(volume_name)s, task-" "status=%(status)s。" #, python-format msgid "" "Type %(type_id)s is already associated with another qos specs: " "%(qos_specs_id)s" msgstr "已使类型 %(type_id)s 与另一 qos 规范 %(qos_specs_id)s 关联" msgid "Type access modification is not applicable to public group type." msgstr "类型访问修改不适用于公共组类型。" msgid "Type access modification is not applicable to public volume type." msgstr "类型访问修改不适用于公共卷类型。" msgid "Type cannot be converted into NaElement." msgstr "此类型不能转换为 NaElement。" #, python-format msgid "UUIDs %s are in both add and remove volume list." msgstr "UUID %s 同时位于“添加卷”和“移除卷”列表中。" msgid "Unable to access the backend storage via file handle." msgstr "通过文件句柄无法访问后端存储器。" #, python-format msgid "Unable to access the backend storage via the path %(path)s." msgstr "无法通过路径 %(path)s 访问后端存储器。" #, python-format msgid "Unable to complete failover of %s." msgstr "无法完成 %s 的故障转移。" msgid "Unable to connect or find connection to host" msgstr "无法连接至主机,或找不到与主机的连接" msgid "Unable to create lock. Coordination backend not started." msgstr "无法创建锁定。协调后端未启动。" #, python-format msgid "Unable to create server object for initiator %(name)s" msgstr "无法为发起者 %(name)s 创建服务器对象" #, python-format msgid "Unable to create volume %(name)s from %(snap)s." msgstr "无法通过 %(snap)s 创建卷 %(name)s。" #, python-format msgid "Unable to create volume %(name)s from %(vol)s." msgstr "无法通过 %(vol)s 创建卷 %(name)s。" #, python-format msgid "Unable to create volume %s" msgstr "无法创建卷 %s" msgid "Unable to create volume. Backend down." msgstr "无法创建卷。后端已关闭。" #, python-format msgid "Unable to delete snapshot %(id)s, status: %(status)s." msgstr "无法删除快照 %(id)s,状态:%(status)s。" msgid "" "Unable to detach volume. Volume status must be 'in-use' and attach_status " "must be 'attached' to detach." msgstr "" "无法拆离卷。卷状态必须为“in-use”,并且 attach_status 必须为“attached”才能拆" "离。" #, python-format msgid "" "Unable to determine secondary_array from supplied secondary: %(secondary)s." msgstr "无法根据所提供辅助项来确定 secondary_array:%(secondary)s。" msgid "Unable to determine system id." msgstr "无法确定系统标识。" msgid "Unable to determine system name." msgstr "无法确定系统名称。" #, python-format msgid "Unable to establish the partnership with the Storwize cluster %s." msgstr "无法建立与 Storwize 集群 %s 的伙伴关系。" #, python-format msgid "Unable to extend volume %s" msgstr "无法扩展卷 %s" msgid "" "Unable to failback to \"default\", this can only be done after a failover " "has completed." msgstr "无法故障返回至“default”,此操作只能在故障转换完成后进行。" msgid "Unable to fetch connection information from backend." msgstr "无法从后端访存连接信息。" #, python-format msgid "Unable to fetch connection information from backend: %(err)s" msgstr "无法从后端访存连接信息:%(err)s" #, python-format msgid "Unable to find Volume Group: %(vg_name)s" msgstr "找不到卷组: %(vg_name)s" msgid "Unable to find any active VPSA controller" msgstr "无法找到任何活跃的VPSA控制器。" msgid "Unable to find failover target, no secondary targets configured." msgstr "找不到故障转移目标,未配置辅助目标。" msgid "Unable to find iSCSI mappings." msgstr "找不到 iSCSI 映射。" #, python-format msgid "Unable to find server object for initiator %(name)s" msgstr "无法找到发起者为 %(name)s 的服务器对象。" #, python-format msgid "Unable to find ssh_hosts_key_file: %s" msgstr "找不到 ssh_hosts_key_file:%s" #, python-format msgid "Unable to find volume %s" msgstr "找不到卷 %s" #, python-format msgid "Unable to get a block device for file '%s'" msgstr "无法获取对应文件“%s”的块设备" #, python-format msgid "" "Unable to get configuration information necessary to create a volume: " "%(errorMessage)s." msgstr "无法获取创建卷所需要的配置信息:%(errorMessage)s。" #, python-format msgid "Unable to get stats for backend_name: %s" msgstr "无法获取 backend_name %s 的统计信息" #, python-format msgid "Unable to locate an SVM that is managing the IP address '%s'" msgstr "无法找到正在管理 IP 地址“%s”的 SVM" #, python-format msgid "Unable to locate specified replay profiles %s " msgstr "找不到指定重放概要文件 %s " #, python-format msgid "" "Unable to manage existing volume. Volume %(volume_ref)s already managed." msgstr "无法管理现有卷。已管理卷 %(volume_ref)s。" #, python-format msgid "Unable to manage volume %s" msgstr "无法管理卷 %s" msgid "Unable to map volume" msgstr "无法映射卷" msgid "Unable to map volume." msgstr "无法映射卷。" msgid "Unable to parse attributes." msgstr "无法解析属性。" msgid "" "Unable to re-use a host that is not managed by Cinder with " "use_chap_auth=True," msgstr "无法在 use_chap_auth=True 的情况下复用并非由 Cinder 管理的主机," msgid "Unable to re-use host with unknown CHAP credentials configured." msgstr "无法在配置了未知 CHAP 凭证的情况下复用主机。" #, python-format msgid "Unable to rename volume %(existing)s to %(newname)s" msgstr "无法将卷 %(existing)s 重命名为 %(newname)s" #, python-format msgid "" "Unable to retype %(specname)s, expected to receive current and requested " "%(spectype)s values. Value received: %(spec)s" msgstr "" "无法对 %(specname)s 进行转型,需要接收当前的和请求的 %(spectype)s 值。接收到" "的值:%(spec)s" #, python-format msgid "" "Unable to retype: A copy of volume %s exists. Retyping would exceed the " "limit of 2 copies." msgstr "" "无法执行 retype:卷 %s 的副本已存在。执行 retype 将超过2 个副本的限制。" #, python-format msgid "" "Unable to retype: Current action needs volume-copy, it is not allowed when " "new type is replication. Volume = %s" msgstr "无法转型:当前操作需要卷拷贝,当新类型为复制时,不允许卷拷贝。卷为 %s" #, python-format msgid "" "Unable to set up mirror mode replication for %(vol)s. Exception: %(err)s." msgstr "无法对 %(vol)s 设置镜像方式复制。异常:%(err)s。" msgid "Unable to terminate volume connection from backend." msgstr "无法从后端终止卷连接。" #, python-format msgid "Unable to terminate volume connection: %(err)s" msgstr "无法终止卷连接:%(err)s" msgid "Unacceptable parameters." msgstr "无法接受的参数。" #, python-format msgid "" "Unexecpted mapping status %(status)s for mapping %(id)s. Attributes: " "%(attr)s." msgstr "映射 %(id)s 的意外映射状态 %(status)s。属性:%(attr)s。" #, python-format msgid "" "Unexpected CLI response: header/row mismatch. header: %(header)s, row: " "%(row)s." msgstr "出现意外 CLI 响应:头/行不匹配。头:%(header)s,行:%(row)s。" #, python-format msgid "Unexpected output. Expected [%(expected)s] but received [%(output)s]" msgstr "意外输出。需要 [%(expected)s],但接收到 [%(output)s]" #, python-format msgid "Unexpected over quota on %(name)s." msgstr "%(name)s 超过预期配额。" msgid "Unexpected response from Nimble API" msgstr "来自 Nimble API 的意外响应" msgid "Unexpected status code" msgstr "意外的状态码" #, python-format msgid "" "Unexpected status code from the switch %(switch_id)s with protocol " "%(protocol)s for url %(page)s. Error: %(error)s" msgstr "" "对于 url %(page)s,带有协议 %(protocol)s 的交换机 %(switch_id)s 发出意外状态" "码。错误:%(error)s" msgid "Unknown NFS exception" msgstr "NFS 异常未知" msgid "Unknown RemoteFS exception" msgstr "RemoteFS 异常未知" msgid "Unknown SMBFS exception." msgstr "SMBFS 异常未知。" msgid "Unknown Virtuozzo Storage exception" msgstr "未知 Virtuozzo 存储器异常" msgid "Unknown action" msgstr "操作未知" #, python-format msgid "Unknown or unsupported command %(cmd)s" msgstr "命令 %(cmd)s 未知或不受支持" #, python-format msgid "Unknown protocol: %(protocol)s." msgstr "未知协议:%(protocol)s。" #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "配额资源 %(unknown)s 未知。" msgid "Unknown sort direction, must be 'desc' or 'asc'" msgstr "排序方向未知,必须为“降序”或“升序”" msgid "Unknown sort direction, must be 'desc' or 'asc'." msgstr "排序方向未知,必须为“降序”或“升序”。" msgid "Unmanage and cascade delete options are mutually exclusive." msgstr "非管理选项与级联删除选项互斥。" msgid "Unmanage volume not implemented." msgstr "未实现非管理卷。" msgid "Unmanaging of snapshots from failed-over volumes is not allowed." msgstr "不允许取消管理来自已故障转移的卷的快照。" #, python-format msgid "Unrecognized backing format: %s" msgstr "无法识别支持格式:%s" #, python-format msgid "Unrecognized read_deleted value '%s'" msgstr "无法识别的 read_deleted 取值”%s“" msgid "Unsupported Content-Type" msgstr "不支持的Content-Type" #, python-format msgid "Unsupported backup metadata version (%s)" msgstr "不支持备份元数据版本 (%s)" msgid "Unsupported backup metadata version requested" msgstr "不支持请求的备份元数据版本" #, python-format msgid "" "Unsupported firmware on switch %s. Make sure switch is running firmware v6.4 " "or higher" msgstr "" "在交换机 %s 上存在不受支持的固件。请确保交换机正在运行固件 V6.4 或更高版本" #, python-format msgid "Unsupported volume format %s" msgstr "不支持的卷格式:%s" #, python-format msgid "Unsupported volume format: %s " msgstr "以下卷格式不受支持:%s " msgid "Update QoS policy error." msgstr "更新 QoS 策略时发生错误。" msgid "Updated At" msgstr "已更新于" #, python-format msgid "Updating volume metadata is not allowed for volumes in %s status." msgstr "当卷状态为 %s 时,不允许更新该卷的元数据。" msgid "Upload to glance of attached volume is not supported." msgstr "不支持上载至所连接卷的 Glance。" msgid "Use ALUA to associate initiator to host error." msgstr "使用 ALUA 使启动程序与主机相关联时发生错误。" msgid "" "Use CHAP to associate initiator to host error. Please check the CHAP " "username and password." msgstr "使用 CHAP 使启动程序与主机相关联时发生错误。请检查 CHAP 用户名和密码。" msgid "User ID" msgstr "用户ID" msgid "User does not have admin privileges" msgstr "用户没有管理员权限" msgid "UserName is not configured." msgstr "未配置 UserName。" msgid "UserPassword is not configured." msgstr "未配置 UserPassword。" msgid "VF is not enabled." msgstr "未启用 VF。" #, python-format msgid "VV Set %s does not exist." msgstr "VV 集 %s 不存在。" #, python-format msgid "Valid consumer of QoS specs are: %s" msgstr "以下是 QoS 规范的有效使用者:%s" #, python-format msgid "Validate volume connection failed (error: %(err)s)." msgstr "验证卷连接失败(错误:%(err)s)。" #, python-format msgid "" "Value \"%(value)s\" is not valid for configuration option \"%(option)s\"" msgstr "值“%(value)s”对于配置选项“%(option)s”无效" #, python-format msgid "Vdisk %(name)s not involved in mapping %(src)s -> %(tgt)s." msgstr "从 %(src)s 到 %(tgt)s 的映射中未涉及到 Vdisk %(name)s。" #, python-format msgid "" "Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and " "maximum is %(max_ver)s." msgstr "" "此 API 不支持版本 %(req_ver)s。最低版本为 %(min_ver)s,最高版本为 " "%(max_ver)s。" #, python-format msgid "VersionedObject %s cannot retrieve object by id." msgstr "VersionedObject %s 无法按标识检索对象。" #, python-format msgid "VersionedObject %s does not support conditional update." msgstr "VersionedObject %s 不支持带条件更新。" #, python-format msgid "Virtual volume '%s' doesn't exist on array." msgstr "虚拟卷“%s”在阵列上不存在。" #, python-format msgid "Volume %(deviceID)s not found." msgstr "找不到卷 %(deviceID)s。" #, python-format msgid "Volume %(name)s could not be found. It might be already deleted" msgstr "找不到卷 %(name)s。该卷可能已被删除" #, python-format msgid "Volume %(name)s was created in VNX, but in %(state)s state." msgstr "在 VNX 中创建了卷 %(name)s,但此卷处于 %(state)s 状态。" #, python-format msgid "Volume %(name)s was not deactivated in time." msgstr "卷 %(name)s 没有被及时释放。" #, python-format msgid "Volume %(vol)s could not be created in pool %(pool)s." msgstr "未能在池 %(pool)s 中创建卷 %(vol)s。" #, python-format msgid "Volume %(vol1)s does not match with snapshot.volume_id %(vol2)s." msgstr "卷 %(vol1)s 与 snapshot.volume_id %(vol2)s 不匹配。" #, python-format msgid "Volume %(vol_id)s status must be %(statuses)s" msgstr "卷 %(vol_id)s 的状态必须为 %(statuses)s" #, python-format msgid "" "Volume %(vol_id)s status must be available to update readonly flag, but " "current status is: %(vol_status)s." msgstr "" "卷 %(vol_id)s 状态必须为“可用”,才能更新只读标记,但当前状态为:" "%(vol_status)s。" #, python-format msgid "Volume %(volume_id)s could not be found." msgstr "卷 %(volume_id)s 没有找到。" #, python-format msgid "Volume %(volume_id)s has no metadata with key %(metadata_key)s." msgstr "卷 %(volume_id)s 没有含键 %(metadata_key)s 的元数据。" #, python-format msgid "Volume %(volume_id)s is still attached, detach volume first." msgstr "卷 %(volume_id)s 仍然处于连接状态,请先从卷断开连接。" #, python-format msgid "Volume %(volume_id)s replication error: %(reason)s" msgstr "卷 %(volume_id)s 复制错误:%(reason)s" #, python-format msgid "Volume %s could not be created from source volume." msgstr "未能从源卷创建卷 %s。" #, python-format msgid "Volume %s could not be created on shares." msgstr "在共享项上,未能创建卷 %s。" #, python-format msgid "Volume %s could not be created." msgstr "未能创建卷 %s。" #, python-format msgid "Volume %s does not exist in Nexenta SA" msgstr "卷 %s 在 Nexenta SA 中不存在" #, python-format msgid "Volume %s does not exist in Nexenta Store appliance" msgstr "卷 %s 在 Nexenta 存储设备中不存在" #, python-format msgid "Volume %s does not exist on the array." msgstr "卷 %s 在阵列上不存在。" #, python-format msgid "Volume %s does not have provider_location specified, skipping." msgstr "卷 %s 没有指定 provider_location,正在跳过。" #, python-format msgid "Volume %s doesn't exist on array." msgstr "卷 %s 在阵列上不存在。" #, python-format msgid "" "Volume %s is online. Set volume to offline for managing using OpenStack." msgstr "卷 %s 已联机。将该卷设置为脱机以便使用 OpenStack 进行管理。" #, python-format msgid "Volume %s must not be part of a consistency group." msgstr "卷 %s 不得是一致性组的一部分。" #, python-format msgid "Volume %s not found" msgstr "找不到卷 %s" #, python-format msgid "Volume %s not found." msgstr "找不到卷 %s。" #, python-format msgid "Volume %s: Error trying to extend volume" msgstr "卷 %s:尝试扩展卷时出错" #, python-format msgid "Volume (%s) already exists on array" msgstr "数组中已存在卷 (%s)" #, python-format msgid "Volume (%s) already exists on array." msgstr "阵列上已存在卷 (%s)。" #, python-format msgid "Volume Group %s does not exist" msgstr "卷组 %s 不存在" #, python-format msgid "Volume Type %(id)s already exists." msgstr "卷类型 %(id)s 已存在。" #, python-format msgid "" "Volume Type %(volume_type_id)s deletion is not allowed with volumes present " "with the type." msgstr "当存在类型为 %(volume_type_id)s 的卷时,不允许删除该卷类型。" #, python-format msgid "" "Volume Type %(volume_type_id)s has no extra specs with key " "%(extra_specs_key)s." msgstr "卷类型 %(volume_type_id)s 没有额外说明键 %(extra_specs_key)s 。" msgid "Volume Type id must not be None." msgstr "卷类型不能为空。" msgid "Volume already managed." msgstr "卷已管理。" msgid "Volume by this name already exists" msgstr "使用此名称的卷已存在" msgid "Volume create failed while extracting volume ref." msgstr "抽取卷引用时创建卷失败。" #, python-format msgid "Volume device file path %s does not exist." msgstr "卷设备文件路径 %s 不存在。" #, python-format msgid "Volume device not found at %(device)s." msgstr "在 %(device)s 上找不到卷设备。" #, python-format msgid "Volume driver %s not initialized." msgstr "卷驱动程序 %s 未初始化。" msgid "Volume driver not ready." msgstr "卷驱动未准备好。" #, python-format msgid "Volume driver reported an error: %(message)s" msgstr "卷驱动程序已报告错误:%(message)s" msgid "Volume has a temporary snapshot that can't be deleted at this time." msgstr "卷具有此时不能删除的临时快照。" msgid "Volume has children and cannot be deleted!" msgstr "卷具有子代,不能删除!" #, python-format msgid "Volume in group %s is attached. Need to detach first." msgstr "已连接组 %s 中的卷。需要先拆离。" msgid "Volume in group still has dependent snapshots." msgstr "组中的卷仍然具有从属快照。" #, python-format msgid "Volume is attached to a server. (%s)" msgstr "卷已连接至服务器。(%s)" msgid "Volume is in-use." msgstr "卷在使用中。" msgid "Volume is not available." msgstr "卷不可用。" msgid "Volume is not local to this node." msgstr "该卷不是此节点的本地卷。" msgid "Volume manage failed." msgstr "管理卷失败。" #, python-format msgid "Volume migration failed: %(reason)s" msgstr "卷迁移失败:%(reason)s" msgid "Volume must be in the same availability zone as the snapshot" msgstr "卷必须与快照位于同一可用性区域中" msgid "Volume must be in the same availability zone as the source volume" msgstr "卷必须与源卷位于同一可用性区域中" msgid "Volume must not be replicated." msgstr "不得复制卷。" msgid "Volume must not have snapshots." msgstr "卷不能具有快照。" #, python-format msgid "Volume not found for instance %(instance_id)s." msgstr "没有为实例 %(instance_id)s 找到卷。" msgid "Volume not found on configured storage backend." msgstr "在已配置的存储器后端上找不到卷。" msgid "Volume not found." msgstr "找不到卷。" msgid "Volume not unique." msgstr "卷并非唯一。" msgid "Volume not yet assigned to host." msgstr "卷尚未分配给主机。" msgid "Volume should have agent-type set as None." msgstr "卷应该将 agent-type 设置为“无”。" #, python-format msgid "" "Volume size %(volume_size)sGB cannot be smaller than the image minDisk size " "%(min_disk)sGB." msgstr "卷大小 %(volume_size)sGB 不能小于映像 minDisk 大小 %(min_disk)sGB。" #, python-format msgid "Volume size '%(size)s' must be an integer and greater than 0" msgstr "卷大小“%(size)s”必须为正整数" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than original volume size " "%(source_size)sGB. They must be >= original volume size." msgstr "" "卷大小“%(size)s”GB 不能小于原始卷大小 %(source_size)sGB。它们必须不小于原始卷" "大小。" #, python-format msgid "" "Volume size '%(size)s'GB cannot be smaller than the snapshot size " "%(snap_size)sGB. They must be >= original snapshot size." msgstr "" "卷大小“%(size)s”GB 不能小于快照大小 %(snap_size)sGB。它们必须不小于原始快照大" "小。" msgid "Volume size increased since the last backup. Do a full backup." msgstr "自从最近一次备份以来,卷大小已增加。请执行完全备份。" msgid "Volume size must be a multiple of 1 GB." msgstr "卷大小必须为 1 GB 的倍数。" msgid "Volume size must multiple of 1 GB." msgstr "卷大小必须是 1 GB 的倍数。" msgid "Volume status must be 'available'." msgstr "卷状态必须为“可用”。" #, python-format msgid "Volume status must be available for snapshot %(id)s. (is %(status)s)" msgstr "对于快照 %(id)s,卷状态必须为“available”。(卷状态现在为 %(status)s)" msgid "Volume to Initiator Group mapping already exists" msgstr "卷至发起方组的映射已存在" #, python-format msgid "" "Volume to be backed up must be available or in-use, but the current status " "is \"%s\"." msgstr "要备份的卷必须可用或者正在使用,但是当前状态为“%s”。" msgid "Volume to be restored to must be available" msgstr "要复原至的卷必须可用" #, python-format msgid "Volume type %(volume_type_id)s could not be found." msgstr "卷类型 %(volume_type_id)s 没有找到。" #, python-format msgid "Volume type ID '%s' is invalid." msgstr "卷类型标识“%s”无效。" #, python-format msgid "" "Volume type access for %(volume_type_id)s / %(project_id)s combination " "already exists." msgstr "已存在针对 %(volume_type_id)s / %(project_id)s 组合的卷类型访问权限。" #, python-format msgid "" "Volume type access not found for %(volume_type_id)s / %(project_id)s " "combination." msgstr "对于 %(volume_type_id)s / %(project_id)s 组合,找不到卷类型访问权限。" #, python-format msgid "Volume type encryption for type %(type_id)s already exists." msgstr "类型 %(type_id)s 的卷类型加密已存在。" #, python-format msgid "Volume type encryption for type %(type_id)s does not exist." msgstr "类型 %(type_id)s 的卷类型加密不存在。" msgid "Volume type name can not be empty." msgstr "卷类型名称不能为 空." #, python-format msgid "Volume type with name %(volume_type_name)s could not be found." msgstr "名为 %(volume_type_name)s 的卷类型没有找到。" msgid "" "Volumes/account exceeded on both primary and secondary SolidFire accounts." msgstr "卷/帐户同时超出主 SolidFire 帐户和辅助 SolidFire 帐户的限制。" #, python-format msgid "" "VzStorage config 'vzstorage_used_ratio' invalid. Must be > 0 and <= 1.0: %s." msgstr "" "VzStorage 配置“vzstorage_used_ratio”无效。必须大于 0 并且小于或等于 1.0:%s。" #, python-format msgid "VzStorage config file at %(config)s doesn't exist." msgstr "%(config)s 处不存在 VzStorage 配置文件。" #, python-format msgid "Wait synchronize failed. Running status: %s." msgstr "等待同步失败。运行状态:%s。" msgid "We should not do switch over on primary array." msgstr "不应在主阵列上切换。" #, python-format msgid "Worker for %(type)s %(id)s already exists." msgstr "标识为%(type)s %(id)s的生产者已经存在。" #, python-format msgid "Worker with %s could not be found." msgstr "无法找到标识为 %s 的生产者。" msgid "XtremIO not initialized correctly, no clusters found" msgstr "XtremIO 未正确初始化,找不到任何集群" msgid "You must implement __call__" msgstr "你必须执行 __call__" msgid "" "You must install hpe3parclient before using 3PAR drivers. Run \"pip install " "python-3parclient\" to install the hpe3parclient." msgstr "" "使用 3PAR 驱动程序之前,必须安装 hpe3parclient。运行“pip install " "python-3parclient”以安装 hpe3parclient。" #, python-format msgid "ZeroDivisionError: %s" msgstr "ZeroDivisionError:%s" msgid "Zone" msgstr "域" #, python-format msgid "Zoning Policy: %s, not recognized" msgstr "分区策略:%s,无法识别" #, python-format msgid "_create_and_copy_vdisk_data: Failed to get attributes for vdisk %s." msgstr "_create_and_copy_vdisk_data:未能获取 vdisk %s 的属性。" msgid "_create_host failed to return the host name." msgstr "_create_host 未能返回主机名。" msgid "" "_create_host: Can not translate host name. Host name is not unicode or " "string." msgstr "_create_host:无法翻译主机名。主机名不是 Unicode或字符串。" msgid "_create_host: No connector ports." msgstr "_create_host:不存在任何连接器端口。" msgid "_create_local_cloned_volume, Replication Service not found." msgstr "_create_local_cloned_volume,找不到复制服务。" #, python-format msgid "" "_create_local_cloned_volume, volumename: %(volumename)s, sourcevolumename: " "%(sourcevolumename)s, source volume instance: %(source_volume)s, target " "volume instance: %(target_volume)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_create_local_cloned_volume,volumenam:%(volumename)s,sourcevolumename:" "%(sourcevolumename)s,源卷实例:%(source_volume)s,目标卷实例:" "%(target_volume)s,返回码:%(rc)lu,错误:%(errordesc)s。" #, python-format msgid "" "_create_vdisk %(name)s - did not find success message in CLI output.\n" " stdout: %(out)s\n" " stderr: %(err)s" msgstr "" "_create_vdisk %(name)s - 找不到 CLI 输出形式的成功消息。\n" "标准输出:%(out)s\n" "标准错误:%(err)s" msgid "_delete_copysession, Cannot find Replication Service" msgstr "_delete_copysession,找不到复制服务" #, python-format msgid "" "_delete_copysession, copy session type is undefined! copy session: " "%(cpsession)s, copy type: %(copytype)s." msgstr "" "_delete_copysession,未定义复制会话类型!复制会话:%(cpsession)s,复制类型:" "%(copytype)s。" #, python-format msgid "" "_delete_copysession, copysession: %(cpsession)s, operation: %(operation)s, " "Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_delete_copysession,copysession:%(cpsession)s,操作:%(operation)s,返回" "码:%(rc)lu,错误:%(errordesc)s。" #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s." msgstr "" "_delete_volume,volumename:%(volumename)s,返回码:%(rc)lu,错误:" "%(errordesc)s。" #, python-format msgid "" "_delete_volume, volumename: %(volumename)s, Storage Configuration Service " "not found." msgstr "_delete_volume,volumename:%(volumename)s,找不到存储器配置服务。" #, python-format msgid "" "_exec_eternus_service, classname: %(classname)s, InvokeMethod, cannot " "connect to ETERNUS." msgstr "" "_exec_eternus_service,classname:%(classname)s,InvokeMethod,无法连接至 " "ETERNUS。" msgid "_extend_volume_op: Extending a volume with snapshots is not supported." msgstr "_extend_volume_op:不支持扩展带有快照的卷。" #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, Associators: " "FUJITSU_AuthorizedTarget, cannot connect to ETERNUS." msgstr "" "_find_affinity_group,连接器:%(connector)s,关联者:" "FUJITSU_AuthorizedTarget,无法连接至 ETERNUS。" #, python-format msgid "" "_find_affinity_group, connector: %(connector)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_affinity_group,连接器:%(connector)s,EnumerateInstanceNames,无法连接" "至 ETERNUS。" #, python-format msgid "" "_find_affinity_group,connector: %(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_find_affinity_group,连接器:%(connector)s,AssocNames: " "FUJITSU_ProtocolControllerForUnit,无法连接至 ETERNUS。" #, python-format msgid "" "_find_copysession, ReferenceNames, vol_instance: %(vol_instance_path)s, " "Cannot connect to ETERNUS." msgstr "" "_find_copysession,ReferenceNames,vol_instance:%(vol_instance_path)s,无法" "连接至 ETERNUS。" #, python-format msgid "" "_find_eternus_service, classname: %(classname)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_find_eternus_service,classname:%(classname)s,EnumerateInstanceNames,无法" "连接至 ETERNUS。" #, python-format msgid "_find_initiator_names, connector: %(connector)s, initiator not found." msgstr "_find_initiator_names,连接器:%(connector)s,找不到启动程序。" #, python-format msgid "" "_find_pool, eternus_pool:%(eternus_pool)s, EnumerateInstances, cannot " "connect to ETERNUS." msgstr "" "_find_pool,eternus_pool:%(eternus_pool)s,EnumerateInstances,无法连接至 " "ETERNUS。" msgid "_get_async_url: Invalid URL." msgstr "_get_async_url: 无效的 URL." #, python-format msgid "" "_get_drvcfg, filename: %(filename)s, tagname: %(tagname)s, data is None!! " "Please edit driver configuration file and correct." msgstr "" "_get_drvcfg,filename:%(filename)s,tagname:%(tagname)s,没有数据!请编辑驱" "动配置文件并更正。" #, python-format msgid "" "_get_eternus_connection, filename: %(filename)s, ip: %(ip)s, port: %(port)s, " "user: %(user)s, passwd: ****, url: %(url)s, FAILED!!." msgstr "" "_get_eternus_connection,filename:%(filename)s,ip:%(ip)s,端口:%(port)s," "用户:%(user)s,密码:****,URL:%(url)s,失败!" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip list: %(iscsiip_list)s, iqn not found." msgstr "" "_get_eternus_iscsi_properties,iscsiip 列表:%(iscsiip_list)s,找不到 iqn。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, AssociatorNames: " "CIM_BindsTo, cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties,iscsiip:%(iscsiip)s,AssociatorName:" "CIM_BindsTo,无法连接至 ETERNUS。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, EnumerateInstanceNames, " "cannot connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties,iscsiip:%(iscsiip)s,EnumerateInstanceNames," "无法连接至 ETERNUS。" #, python-format msgid "" "_get_eternus_iscsi_properties, iscsiip: %(iscsiip)s, GetInstance, cannot " "connect to ETERNUS." msgstr "" "_get_eternus_iscsi_properties,iscsiip:%(iscsiip)s,GetInstance,无法连接至 " "ETERNUS。" #, python-format msgid "" "_get_hdr_dic: attribute headers and values do not match.\n" " Headers: %(header)s\n" " Values: %(row)s." msgstr "" "_get_hdr_dic:属性头和值不匹配。\n" "头为 %(header)s\n" "值为 %(row)s。" msgid "_get_host_from_connector failed to return the host name for connector." msgstr "_get_host_from_connector 未能返回连接器的主机名。" #, python-format msgid "" "_get_mapdata_fc, getting host-affinity from aglist/vol_instance failed, " "affinitygroup: %(ag)s, ReferenceNames, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc,从 aglist/vol_instance 获取主机亲缘关系失败,affinitygroup:" "%(ag)s,ReferenceNames,无法连接至 ETERNUS。" #, python-format msgid "" "_get_mapdata_fc, getting host-affinity instance failed, volmap: %(volmap)s, " "GetInstance, cannot connect to ETERNUS." msgstr "" "_get_mapdata_fc,获取主机亲缘关系实例失败,volmap:%(volmap)s,GetInstance," "无法连接至 ETERNUS。" msgid "" "_get_mapdata_iscsi, Associators: FUJITSU_SAPAvailableForElement, cannot " "connect to ETERNUS." msgstr "" "_get_mapdata_iscsi,关联者:FUJITSU_SAPAvailableForElement,无法连接至 " "ETERNUS。" #, python-format msgid "" "_get_mapdata_iscsi, affinitygroup: %(ag)s, ReferenceNames, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi,affinitygroup:%(ag)s,ReferenceNames,无法连接至 " "ETERNUS。" #, python-format msgid "" "_get_mapdata_iscsi, vol_instance: %(vol_instance)s, ReferenceNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_get_mapdata_iscsi,vol_instance:%(vol_instance)s,ReferenceNames: " "CIM_ProtocolControllerForUnit,无法连接至 ETERNUS。" #, python-format msgid "" "_get_mapdata_iscsi, volmap: %(volmap)s, GetInstance, cannot connect to " "ETERNUS." msgstr "" "_get_mapdata_iscsi,volmap:%(volmap)s,GetInstance,无法连接至 ETERNUS。" msgid "_get_target_port, EnumerateInstances, cannot connect to ETERNUS." msgstr "_get_target_port,EnumerateInstances,无法连接至 ETERNUS。" #, python-format msgid "_get_target_port, protcol: %(protocol)s, target_port not found." msgstr "_get_target_port,协议:%(protocol)s,找不到 target_port。" #, python-format msgid "_get_unmanaged_replay: Cannot find snapshot named %s" msgstr "_get_unmanaged_replay:找不到名为 %s 的快照" #, python-format msgid "_get_unmanaged_replay: Cannot find volume id %s" msgstr "_get_unmanaged_replay:找不到卷标识 %s" msgid "_get_unmanaged_replay: Must specify source-name." msgstr "_get_unmanaged_replay:必须指定 source-name。" msgid "" "_get_vdisk_map_properties: Could not get FC connection information for the " "host-volume connection. Is the host configured properly for FC connections?" msgstr "" "_get_vdisk_map_properties:对于主机/卷连接,未能获取 FC 连接信息。已针对 FC " "连接正确配置主机吗?" #, python-format msgid "" "_get_vdisk_map_properties: No node found in I/O group %(gid)s for volume " "%(vol)s." msgstr "" "_get_vdisk_map_properties:在卷 %(vol)s 的 I/O 组 %(gid)s 中找不到任何节点。" #, python-format msgid "" "_map_lun, vol_instance.path:%(vol)s, volumename: %(volumename)s, volume_uid: " "%(uid)s, initiator: %(initiator)s, target: %(tgt)s, aglist: %(aglist)s, " "Storage Configuration Service not found." msgstr "" "_map_lun,vol_instance.path:%(vol)s,volumename:%(volumename)s," "volume_uid:%(uid)s,启动程序:%(initiator)s,目标:%(tgt)s,aglist:" "%(aglist)s,找不到存储器配置服务。" #, python-format msgid "" "_unmap_lun, vol_instance.path: %(volume)s, volumename: %(volumename)s, " "volume_uid: %(uid)s, aglist: %(aglist)s, Controller Configuration Service " "not found." msgstr "" "_unmap_lun,vol_instance.path:%(volume)s,volumename:%(volumename)s," "volume_uid:%(uid)s,aglist:%(aglist)s,找不到控制器配置服务。" #, python-format msgid "" "_unmap_lun, volumename: %(volumename)s, volume_uid: %(volume_uid)s, " "AffinityGroup: %(ag)s, Return code: %(rc)lu, Error: %(errordesc)s." msgstr "" "_unmap_lun,volumename:%(volumename)s,volume_uid:%(volume_uid)s," "AffinityGroup:%(ag)s,返回码:%(rc)lu,错误:%(errordesc)s。" #, python-format msgid "" "_unmap_lun,vol_instance.path: %(volume)s, AssociatorNames: " "CIM_ProtocolControllerForUnit, cannot connect to ETERNUS." msgstr "" "_unmap_lun,vol_instance.path:%(volume)s,AssociatorName:" "CIM_ProtocolControllerForUnit,无法连接至 ETERNUS。" msgid "_update_volume_stats: Could not get storage pool data." msgstr "_update_volume_stats:未能获取存储池数据。" #, python-format msgid "" "_wait_for_copy_complete, cpsession: %(cpsession)s, copysession state is " "BROKEN." msgstr "" "_wait_for_copy_complete,cpsession:%(cpsession)s,copysession 状态为 " "BROKEN。" #, python-format msgid "" "add_vdisk_copy failed: A copy of volume %s exists. Adding another copy would " "exceed the limit of 2 copies." msgstr "" "add_vdisk_copy 失败:卷 %s 的副本已存在。添加另一个副本将超过 2 个副本的限" "制。" msgid "add_vdisk_copy started without a vdisk copy in the expected pool." msgstr "在所需池中没有 vdisk 副本的情况下,add_vdisk_copy 已开始。" #, python-format msgid "all_tenants must be a boolean, got '%s'." msgstr "all_tenants 必须为布尔值,但是获得了“%s”。" msgid "already created" msgstr "已创建" msgid "already_created" msgstr "already_created" #, python-format msgid "attribute %s not lazy-loadable" msgstr "属性 %s 不可延迟装入" msgid "being attached by different mode" msgstr "正在通过另一方式连接" #, python-format msgid "can't find lun-map, ig:%(ig)s vol:%(vol)s" msgstr "找不到 LUN 映射,ig:%(ig)s 卷:%(vol)s" msgid "can't find the volume to extend" msgstr "找不到要扩展的卷" msgid "can't handle both name and index in req" msgstr "无法同时处理请求中的名称和索引" msgid "cannot understand JSON" msgstr "无法理解JSON" msgid "" "cg_creating_from_src must be called with cg_id or cgsnapshot_id parameter." msgstr "cg_creating_from_src必须通过cg_id or cgsnapshot_id参数调用。" msgid "cgsnapshot assigned" msgstr "已分配 cgsnapshot" msgid "cgsnapshot changed" msgstr "已更改 cgsnapshot" msgid "cgsnapshots assigned" msgstr "已分配 cgsnapshot" msgid "cgsnapshots changed" msgstr "已更改 cgsnapshot" msgid "" "check_for_setup_error: Password or SSH private key is required for " "authentication: set either san_password or san_private_key option." msgstr "" "check_for_setup_error:认证需要密码或 SSH 专用密钥:请设置 san_password 或 " "san_private_key 选项。" msgid "check_for_setup_error: Unable to determine system id." msgstr "check_for_setup_error:无法确定系统标识。" msgid "check_for_setup_error: Unable to determine system name." msgstr "check_for_setup_error:无法确定系统名称。" msgid "check_hypermetro_exist error." msgstr "check_hypermetro_exist 错误。" msgid "cluster assigned" msgstr "已分配集群" msgid "cluster changed" msgstr "已更改集群" msgid "config option key_manager.fixed_key is not defined" msgstr "配置选项 key_manager.fixed_key 为定义。" msgid "consistencygroup assigned" msgstr "已分配 consistencygroup" msgid "consistencygroup changed" msgstr "已更改 consistencygroup" msgid "create_cloned_volume, Source Volume does not exist in ETERNUS." msgstr "create_cloned_volume,源卷在 ETERNUS 中不存在。" #, python-format msgid "" "create_cloned_volume, target volume instancename: %(volume_instancename)s, " "Get Instance Failed." msgstr "" "create_cloned_volume,目标卷实例名:%(volume_instancename)s,获取实例失败。" #, python-format msgid "" "create_cloned_volume: source volume %(src_vol)s size is %(src_size)dGB and " "doesn't fit in target volume %(tgt_vol)s of size %(tgt_size)dGB." msgstr "" "create_cloned_volume:源卷 %(src_vol)s 大小为 %(src_size)dGB,无法拟合大小为 " "%(tgt_size)dGB 的目标卷 %(tgt_vol)s。" msgid "" "create_consistencygroup_from_src only supports a cgsnapshot source or a " "consistency group source. Multiple sources cannot be used." msgstr "" "create_consistencygroup_from_src 仅支持 cgsnapshot 源或一致性组源。不能使用多" "个源。" #, python-format msgid "create_copy: Source vdisk %(src)s (%(src_id)s) does not exist." msgstr "create_copy:源 vdisk %(src)s (%(src_id)s) 不存在。" #, python-format msgid "create_copy: Source vdisk %(src)s does not exist." msgstr "create_copy:源 vdisk %(src)s 不存在。" msgid "create_host: Host name is not unicode or string." msgstr "create_host:主机名不是 Unicode 或字符串。" msgid "create_host: No initiators or wwpns supplied." msgstr "create_host:未提供任何发起方或 wwpn。" msgid "create_hypermetro_pair error." msgstr "create_hypermetro_pair 错误。" #, python-format msgid "" "create_snapshot, volumename: %(s_volumename)s, source volume not found on " "ETERNUS." msgstr "" "create_snapshot,volumename:%(s_volumename)s,在 ETERNUS 上找不到源卷。" #, python-format msgid "" "create_snapshot: Volume status must be \"available\" or \"in-use\" for " "snapshot. The invalid status is %s." msgstr "" "create_snapshot:对于快照,卷状态必须为“available”或“in-use”。无效状态为 %s。" msgid "create_snapshot: get source volume failed." msgstr "create_snapshot:获取源卷失败。" msgid "create_volume_from_snapshot, Source Volume does not exist in ETERNUS." msgstr "create_volume_from_snapshot,源卷在 ETERNUS 中不存在。" #, python-format msgid "" "create_volume_from_snapshot, target volume instancename: " "%(volume_instancename)s, Get Instance Failed." msgstr "" "create_volume_from_snapshot,目标卷实例名:%(volume_instancename)s,获取实例" "失败。" #, python-format msgid "create_volume_from_snapshot: Snapshot %(name)s does not exist." msgstr "create_volume_from_snapshot:快照 %(name)s 不存在。" #, python-format msgid "" "create_volume_from_snapshot: Snapshot status must be \"available\" for " "creating volume. The invalid status is: %s." msgstr "" "create_volume_from_snapshot:快照状态必须为“可用”,以便创建卷。无效状态为 " "%s。" msgid "delete_hypermetro error." msgstr "delete_hypermetro 错误。" #, python-format msgid "delete_initiator: %s ACL not found. Continuing." msgstr "找不到 delete_initiator: %s ACL。正在继续。" msgid "delete_replication error." msgstr "delete_replication 错误。" #, python-format msgid "deleting snapshot %(snapshot_name)s that has dependent volumes" msgstr "正在删除具有从属卷的快照 %(snapshot_name)s" #, python-format msgid "deleting volume %(volume_name)s that has snapshot" msgstr "正在删除有快照的卷 %(volume_name)s" msgid "do_setup: No configured nodes." msgstr "do_setup:不存在任何已配置的节点。" #, python-format msgid "" "error writing object to swift, MD5 of object in swift %(etag)s is not the " "same as MD5 of object sent to swift %(md5)s" msgstr "" "将对象写入 swift 时出错,swift %(etag)s 中对象的 MD5 与发送至 swift %(md5)s " "的对象的 MD5 不同" #, python-format msgid "" "extend_volume, volume: %(volume)s, volumename: %(volumename)s, eternus_pool: " "%(eternus_pool)s, Storage Configuration Service not found." msgstr "" "extend_volume,卷:%(volume)s, volumename:%(volumename)s,eternus_pool:" "%(eternus_pool)s,找不到存储器配置服务。" #, python-format msgid "" "extend_volume, volumename: %(volumename)s, Return code: %(rc)lu, Error: " "%(errordesc)s, PoolType: %(pooltype)s." msgstr "" "extend_volume,volumename:%(volumename)s,返回码:%(rc)lu,错误:" "%(errordesc)s,池类型:%(pooltype)s。" msgid "fake" msgstr "fake" #, python-format msgid "fmt=%(fmt)s backed by: %(backing_file)s" msgstr "fmt=%(fmt)s 由 %(backing_file)s 支持" #, python-format msgid "fmt=%(fmt)s backed by:%(backing_file)s" msgstr "fmt=%(fmt)s 受以下项支持:%(backing_file)s" msgid "force delete" msgstr "强制删除" msgid "get_hyper_domain_id error." msgstr "get_hyper_domain_id 错误。" msgid "get_hypermetro_by_id error." msgstr "get_hypermetro_by_id 错误。" #, python-format msgid "" "get_iscsi_params: Failed to get target IP for initiator %(ini)s, please " "check config file." msgstr "get_iscsi_params:未能获取发起方 %(ini)s 的目标 IP,请检查配置文件。" #, python-format msgid "get_pool: Failed to get attributes for volume %s" msgstr "get_pool:无法获取卷 %s 的属性" msgid "glance_metadata changed" msgstr "已更改 glance_metadata" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different file systems." msgstr "" "gpfs_images_share_mode 已设置为 copy_on_write,但 %(vol)s 和 %(img)s 属于不同" "文件系统。" #, python-format msgid "" "gpfs_images_share_mode is set to copy_on_write, but %(vol)s and %(img)s " "belong to different filesets." msgstr "" "gpfs_images_share_mode 已设置为 copy_on_write,但 %(vol)s 和 %(img)s 属于不同" "文件集。" msgid "group assigned" msgstr "已分配组" msgid "group changed" msgstr "已更改组" #, python-format msgid "group-%s" msgstr "组 %s" msgid "" "group_creating_from_src must be called with group_id or group_snapshot_id " "parameter." msgstr "" "group_creating_from_src必须被调用,参数为group_id或者group_snapshot_id。" msgid "group_snapshot assigned" msgstr "已分配组快照" msgid "group_snapshot changed" msgstr "已更改组快照" msgid "group_snapshots assigned" msgstr "已分配组快照" msgid "group_type_id cannot be None" msgstr "group_type_id 不能为None" msgid "id cannot be None" msgstr "id不能是None" #, python-format msgid "image %s not found" msgstr "找不到映像 %s " #, python-format msgid "initialize_connection, volume: %(volume)s, Volume not found." msgstr "initialize_connection,卷:%(volume)s,找不到卷。" #, python-format msgid "initialize_connection: Failed to get attributes for volume %s." msgstr "initialize_connection:未能获取卷 %s 的属性。" #, python-format msgid "initialize_connection: Missing volume attribute for volume %s." msgstr "initialize_connection:缺少卷 %s 的卷属性。" #, python-format msgid "" "initialize_connection: No node found in I/O group %(gid)s for volume %(vol)s." msgstr "initialize_connection:在卷 %(vol)s 的 I/O 组 %(gid)s 中找不到节点。" #, python-format msgid "initialize_connection: vdisk %s is not defined." msgstr "initialize_connection:未定义 vdisk %s。" #, python-format msgid "invalid user '%s'" msgstr "用户 '%s' 无效" #, python-format msgid "iscsi portal, %s, not found" msgstr "找不到 iscsi 门户网站 %s" #, python-format msgid "key manager error: %(reason)s" msgstr "发生密钥管理器错误:%(reason)s" msgid "limit param must be an integer" msgstr "limit 参数必须是整数" msgid "limit param must be positive" msgstr "limit参数必须是正数" msgid "manage_existing requires a 'name' key to identify an existing volume." msgstr "manage_existing 需要“name”键以标识现有卷。" #, python-format msgid "" "manage_existing_snapshot: Error managing existing replay %(ss)s on volume " "%(vol)s" msgstr "manage_existing_snapshot:管理卷 %(vol)s 上的现有重放 %(ss)s 时出错" #, python-format msgid "marker not found: %s" msgstr "没有找到标记: %s" #, python-format msgid "mdiskgrp missing quotes %s" msgstr "Mdisk 组缺少引号 %s" #, python-format msgid "mkfs failed on volume %(vol)s, error message was: %(err)s." msgstr "mkfs 在卷 %(vol)s 上发生故障,错误消息如下:%(err)s。" msgid "mock" msgstr "mock" msgid "name cannot be None" msgstr "name不能是None" #, python-format msgid "obj missing quotes %s" msgstr "对象缺少引号 %s" msgid "open_access_enabled is not off." msgstr "open_access_enabled 未关闭。" msgid "progress must be an integer percentage" msgstr "进度必须为整数百分比" #, python-format msgid "" "qemu-img %(minimum_version)s or later is required by this volume driver. " "Current qemu-img version: %(current_version)s" msgstr "" "此卷驱动程序需要 qemu-img %(minimum_version)s 或更高版本。当前 qemu-img 版" "本:%(current_version)s" msgid "" "qemu-img is not installed and the disk format is not specified. Only RAW " "images can be used if qemu-img is not installed." msgstr "" "qemu-img 未安装,并且磁盘格式未指定。仅当 qemu-img 未安装时,才能使用原始映" "像。" msgid "rados and rbd python libraries not found" msgstr "找不到 rados 和 rbd python 库" #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "read_deleted 只能是“no”、“yes”或“only”其中一项,而不能是 %r" #, python-format msgid "replication_failover failed. %s not found." msgstr "replication_failover 失败。找不到 %s。" msgid "replication_failover failed. Backend not configured for failover" msgstr "replication_failover 失败。未配置后端,无法进行故障转移" msgid "" "restore_backup aborted, actual object list does not match object list stored " "in metadata." msgstr "" "restore_backup 已异常中止,实际的对象列表与存储在元数据中的对象列表不匹配。" #, python-format msgid "rtslib_fb is missing member %s: You may need a newer python-rtslib-fb." msgstr "rtslib_fb 缺少成员 %s:您可能需要较新的 python-rtslib-fb。" msgid "san_ip is not set." msgstr "未设置 san_ip。" msgid "san_ip must be set" msgstr "san_ip必须设置" msgid "" "san_login and/or san_password is not set for Datera driver in the cinder." "conf. Set this information and start the cinder-volume service again." msgstr "" "没有在 cinder.conf 中为 Datera 驱动程序设置 san_login 和/或 san_password。请" "设置此信息并再次启动 cinder-volume服务。" msgid "serve() can only be called once" msgstr "serve() 只能调用一次" #, python-format msgid "snapshot-%s" msgstr "快照 - %s" msgid "snapshots assigned" msgstr "已更改快照" msgid "snapshots changed" msgstr "已更改快照" msgid "source-name cannot be empty." msgstr "source-name 不能为空。" msgid "source-name format should be: 'vmdk_path@vm_inventory_path'." msgstr "source-name 格式应为“vmdk_path@vm_inventory_path”。" msgid "specs must be a dictionary." msgstr "规格说明必须是字典。" #, python-format msgid "status must be %s and" msgstr "状态必须为 %s,并且" msgid "status must be available" msgstr "状态必须可用" msgid "stop_hypermetro error." msgstr "stop_hypermetro 错误。" msgid "sync_hypermetro error." msgstr "sync_hypermetro 错误。" #, python-format msgid "target=%(target)s, lun=%(lun)s" msgstr "目标=%(target)s, lun=%(lun)s" #, python-format msgid "" "targetcli not installed and could not create default directory " "(%(default_path)s): %(exc)s" msgstr "未安装 targetcli,并且未能创建缺省目录(%(default_path)s):%(exc)s" msgid "terminate_connection: Failed to get host name from connector." msgstr "terminate_connection:未能从连接器获取主机名。" msgid "too many body keys" msgstr "过多主体密钥" #, python-format msgid "unmanage_snapshot: Cannot find snapshot named %s" msgstr "unmanage_snapshot:找不到名为 %s 的快照" #, python-format msgid "unmanage_snapshot: Cannot find volume id %s" msgstr "unmanage_snapshot:找不到卷标识 %s" #, python-format msgid "unrecognized argument %s" msgstr "无法识别自变量 %s" #, python-format msgid "unsupported compression algorithm: %s" msgstr "以下压缩算法不受支持:%s" msgid "valid iqn needed for show_target" msgstr "show_target 需要有效 iqn" #, python-format msgid "vdisk %s is not defined." msgstr "未定义 vdisk %s。" msgid "volume assigned" msgstr "卷已分配" msgid "volume changed" msgstr "卷已更改" #, python-format msgid "" "volume size %(volume_size)d is too small to restore backup of size %(size)d." msgstr "卷大小 %(volume_size)d 太小,无法复原大小为 %(size)d 的备份。" #, python-format msgid "volume size %d is invalid." msgstr "卷大小 %d 无效。" msgid "" "volume_type must be provided when creating a volume in a consistency group." msgstr "在一致性组中创建卷时,必须提供 volume_type。" msgid "volume_type must be provided when creating a volume in a group." msgstr "在组中创建卷时,必须提供 volume_type。" msgid "volume_type_id cannot be None" msgstr "volume_type_id 不能为“无”" msgid "volume_types assigned" msgstr "已分配卷类型" #, python-format msgid "volume_types must be provided to create consistency group %(name)s." msgstr "必须提供 volume_types,才能创建一致性组 %(name)s。" msgid "volumes assigned" msgstr "已分配卷" msgid "volumes changed" msgstr "已更改卷" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/manager.py0000664000175000017500000003155700000000000016332 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Manager class. Managers are responsible for a certain aspect of the system. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that it is responsible for. For example, other components that need to deal with volumes in some way, should do so by calling methods on the VolumeManager instead of directly changing fields in the database. This allows us to keep all of the code relating to volumes in the same place. We have adopted a basic strategy of Smart managers and dumb data, which means rather than attaching methods to data objects, components should call manager methods that act on the data. Methods on managers that can be executed locally should be called directly. If a particular method must execute on a remote host, this should be done via rpc to the service that wraps the manager Managers should be responsible for most of the db access, and non-implementation specific data. Anything implementation specific that can't be generalized should be done by the Driver. In general, we prefer to have one manager with multiple drivers for different implementations, but sometimes it makes sense to have multiple managers. You can think of it this way: Abstract different overall strategies at the manager level(FlatNetwork vs VlanNetwork), and different implementations at the driver level(LinuxNetDriver vs CiscoNetDriver). Managers will often provide methods for initial setup of a host or periodic tasks to a wrapping service. This module provides Manager, a base class for managers. """ from eventlet import greenpool from eventlet import tpool from oslo_config import cfg import oslo_config.types from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import periodic_task from oslo_utils import timeutils from cinder import context from cinder import db from cinder.db import base from cinder import exception from cinder import objects from cinder import rpc from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) class PeriodicTasks(periodic_task.PeriodicTasks): def __init__(self): super(PeriodicTasks, self).__init__(CONF) class Manager(base.Base, PeriodicTasks): # Set RPC API version to 1.0 by default. RPC_API_VERSION = '1.0' target = messaging.Target(version=RPC_API_VERSION) def __init__( self, host: oslo_config.types.HostAddress = None, cluster=None, **_kwargs, ): if not host: host = CONF.host self.host: oslo_config.types.HostAddress = host self.cluster = cluster self.additional_endpoints: list = [] self.availability_zone = CONF.storage_availability_zone super().__init__() def _set_tpool_size(self, nthreads: int) -> None: # NOTE(geguileo): Until PR #472 is merged we have to be very careful # not to call "tpool.execute" before calling this method. tpool.set_num_threads(nthreads) @property def service_topic_queue(self): return self.cluster or self.host def init_host(self, service_id, added_to_cluster=None): """Handle initialization if this is a standalone service. A hook point for services to execute tasks before the services are made available (i.e. showing up on RPC and starting to accept RPC calls) to other components. Child classes should override this method. :param service_id: ID of the service where the manager is running. :param added_to_cluster: True when a host's cluster configuration has changed from not being defined or being '' to any other value and the DB service record reflects this new value. """ pass def init_host_with_rpc(self): """A hook for service to do jobs after RPC is ready. Like init_host(), this method is a hook where services get a chance to execute tasks that *need* RPC. Child classes should override this method. """ pass def is_working(self): """Method indicating if service is working correctly. This method is supposed to be overridden by subclasses and return if manager is working correctly. """ return True def reset(self): """Method executed when SIGHUP is caught by the process. We're utilizing it to reset RPC API version pins to avoid restart of the service when rolling upgrade is completed. """ LOG.info('Resetting cached RPC version pins.') rpc.LAST_OBJ_VERSIONS = {} rpc.LAST_RPC_VERSIONS = {} def set_log_levels(self, context, log_request): utils.set_log_levels(log_request.prefix, log_request.level) def get_log_levels(self, context, log_request): levels = utils.get_log_levels(log_request.prefix) log_levels = [objects.LogLevel(context, prefix=prefix, level=level) for prefix, level in levels.items()] return objects.LogLevelList(context, objects=log_levels) class ThreadPoolManager(Manager): def __init__(self, *args, **kwargs): self._tp = greenpool.GreenPool() super(ThreadPoolManager, self).__init__(*args, **kwargs) def _add_to_threadpool(self, func, *args, **kwargs): self._tp.spawn_n(func, *args, **kwargs) class SchedulerDependentManager(ThreadPoolManager): """Periodically send capability updates to the Scheduler services. Services that need to update the Scheduler of their capabilities should derive from this class. Otherwise they can derive from manager.Manager directly. Updates are only sent after update_service_capabilities is called with non-None values. """ def __init__( self, host=None, service_name='undefined', cluster=None, *args, **kwargs, ): self.last_capabilities = None self.service_name = service_name self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() super().__init__(host, cluster=cluster, *args, **kwargs) def update_service_capabilities(self, capabilities): """Remember these capabilities to send on next periodic update.""" self.last_capabilities = capabilities def _publish_service_capabilities(self, context): """Pass data back to the scheduler at a periodic interval.""" if self.last_capabilities: LOG.debug('Notifying Schedulers of capabilities ...') self.scheduler_rpcapi.update_service_capabilities( context, self.service_name, self.host, self.last_capabilities, self.cluster) try: self.scheduler_rpcapi.notify_service_capabilities( context, self.service_name, self.service_topic_queue, self.last_capabilities) except exception.ServiceTooOld as e: # This means we have Newton's c-sch in the deployment, so # rpcapi cannot send the message. We can safely ignore the # error. Log it because it shouldn't happen after upgrade. msg = ("Failed to notify about cinder-volume service " "capabilities for host %(host)s. This is normal " "during a live upgrade. Error: %(e)s") LOG.warning(msg, {'host': self.host, 'e': e}) def reset(self): super(SchedulerDependentManager, self).reset() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() class CleanableManager(object): def do_cleanup(self, context: context.RequestContext, cleanup_request: objects.CleanupRequest) -> None: LOG.info('Initiating service %s cleanup', cleanup_request.service_id) # If the 'until' field in the cleanup request is not set, we default to # this very moment. until = cleanup_request.until or timeutils.utcnow() keep_entry: bool = False to_clean = db.worker_get_all( context, resource_type=cleanup_request.resource_type, resource_id=cleanup_request.resource_id, service_id=cleanup_request.service_id, until=until) for clean in to_clean: original_service_id = clean.service_id original_time = clean.updated_at # Try to do a soft delete to mark the entry as being cleaned up # by us (setting service id to our service id). res = db.worker_claim_for_cleanup(context, claimer_id=self.service_id, orm_worker=clean) # Claim may fail if entry is being cleaned by another service, has # been removed (finished cleaning) by another service or the user # started a new cleanable operation. # In any of these cases we don't have to do cleanup or remove the # worker entry. if not res: continue # Try to get versioned object for resource we have to cleanup try: vo_cls = getattr(objects, clean.resource_type) vo = vo_cls.get_by_id(context, clean.resource_id) # Set the worker DB entry in the VO and mark it as being a # clean operation clean.cleaning = True vo.worker = clean except exception.NotFound: LOG.debug('Skipping cleanup for non existent %(type)s %(id)s.', {'type': clean.resource_type, 'id': clean.resource_id}) else: # Resource status should match if vo.status != clean.status: LOG.debug('Skipping cleanup for mismatching work on ' '%(type)s %(id)s: %(exp_sts)s <> %(found_sts)s.', {'type': clean.resource_type, 'id': clean.resource_id, 'exp_sts': clean.status, 'found_sts': vo.status}) else: LOG.info('Cleaning %(type)s with id %(id)s and status ' '%(status)s', {'type': clean.resource_type, 'id': clean.resource_id, 'status': clean.status}, resource=vo) try: # Some cleanup jobs are performed asynchronously, so # we don't delete the worker entry, they'll take care # of it keep_entry = self._do_cleanup(context, vo) except Exception: LOG.exception('Could not perform cleanup.') # Return the worker DB entry to the original service db.worker_update(context, clean.id, service_id=original_service_id, updated_at=original_time) continue # The resource either didn't exist or was properly cleaned, either # way we can remove the entry from the worker table if the cleanup # method doesn't want to keep the entry (for example for delayed # deletion). if not keep_entry and not db.worker_destroy(context, id=clean.id): LOG.warning('Could not remove worker entry %s.', clean.id) LOG.info('Service %s cleanup completed.', cleanup_request.service_id) def _do_cleanup(self, ctxt: context.RequestContext, vo_resource) -> bool: return False def init_host(self, service_id, added_to_cluster=None, **kwargs): ctxt = context.get_admin_context() self.service_id = service_id cleanup_request = objects.CleanupRequest(service_id=service_id) self.do_cleanup(ctxt, cleanup_request) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0831182 cinder-27.0.0/cinder/message/0000775000175000017500000000000000000000000015757 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/message/__init__.py0000664000175000017500000000000000000000000020056 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/message/api.py0000664000175000017500000001703000000000000017103 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests related to user facing messages. """ import datetime from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from cinder.db import base from cinder.message import message_field messages_opts = [ cfg.IntOpt('message_ttl', default=2592000, help='message minimum life in seconds.'), cfg.IntOpt('message_reap_interval', default=86400, help='interval between periodic task runs to clean expired ' 'messages in seconds.') ] CONF = cfg.CONF CONF.register_opts(messages_opts) LOG = logging.getLogger(__name__) class API(base.Base): """API for handling user messages. Cinder Messages describe the outcome of a user action using predefined fields that are members of objects defined in the cinder.message.message_field package. They are intended to be exposed to end users. Their primary purpose is to provide end users with a means of discovering what went wrong when an asynchronous action in the Volume REST API (for which they've already received a 2xx response) fails. Messages contain an 'expires_at' field based on the creation time plus the value of the 'message_ttl' configuration option. They are periodically reaped by a task of the SchedulerManager class whose periodicity is given by the 'message_reap_interval' configuration option. """ def create(self, context, action, resource_type=message_field.Resource.VOLUME, resource_uuid=None, exception=None, detail=None, level="ERROR"): """Create a message record with the specified information. :param context: current context object :param action: a message_field.Action field describing what was taking place when this message was created :param resource_type: a message_field.Resource field describing the resource this message applies to. Default is message_field.Resource.VOLUME :param resource_uuid: the resource ID if this message applies to an existing resource. Default is None :param exception: if an exception has occurred, you can pass it in and it will be translated into an appropriate message detail ID (possibly message_field.Detail.UNKNOWN_ERROR). The message in the exception itself is ignored in order not to expose sensitive information to end users. Default is None :param detail: a message_field.Detail field describing the event the message is about. Default is None, in which case message_field.Detail.UNKNOWN_ERROR will be used for the message unless an exception in the message_field.EXCEPTION_DETAIL_MAPPINGS is passed; in that case the message_field.Detail field that's mapped to the exception is used. :param level: a string describing the severity of the message. Suggested values are 'INFO', 'ERROR', 'WARNING'. Default is 'ERROR'. """ LOG.info("Creating message record for request_id = %s", context.request_id) # Updates expiry time for message as per message_ttl config. expires_at = (timeutils.utcnow() + datetime.timedelta( seconds=CONF.message_ttl)) detail_id = message_field.translate_detail_id(exception, detail) message_record = {'project_id': context.project_id, 'request_id': context.request_id, 'resource_type': resource_type, 'resource_uuid': resource_uuid, 'action_id': action[0] if action else '', 'message_level': level, 'event_id': "VOLUME_%s_%s_%s" % (resource_type, action[0], detail_id), 'detail_id': detail_id, 'expires_at': expires_at} try: self.db.message_create(context, message_record) except Exception: LOG.exception("Failed to create message record " "for request_id %s", context.request_id) def create_from_request_context(self, context, exception=None, detail=None, level="ERROR"): """Create a message record with the specified information. :param context: current context object which we must have populated with the message_action, message_resource_type and message_resource_id fields :param exception: if an exception has occurred, you can pass it in and it will be translated into an appropriate message detail ID (possibly message_field.Detail.UNKNOWN_ERROR). The message in the exception itself is ignored in order not to expose sensitive information to end users. Default is None :param detail: a message_field.Detail field describing the event the message is about. Default is None, in which case message_field.Detail.UNKNOWN_ERROR will be used for the message unless an exception in the message_field.EXCEPTION_DETAIL_MAPPINGS is passed; in that case the message_field.Detail field that's mapped to the exception is used. :param level: a string describing the severity of the message. Suggested values are 'INFO', 'ERROR', 'WARNING'. Default is 'ERROR'. """ self.create(context=context, action=context.message_action, resource_type=context.message_resource_type, resource_uuid=context.message_resource_id, exception=exception, detail=detail, level=level) def get(self, context, id): """Return message with the specified id.""" return self.db.message_get(context, id) def get_all(self, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Return all messages for the given context.""" filters = filters or {} messages = self.db.message_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return messages def delete(self, context, id): """Delete message with the specified id.""" ctx = context.elevated() return self.db.message_destroy(ctx, id) def cleanup_expired_messages(self, context): ctx = context.elevated() count = self.db.cleanup_expired_messages(ctx) LOG.info("Deleted %s expired messages.", count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/message/defined_messages.py0000664000175000017500000000367700000000000021633 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Event ID and user visible message mapping. Event IDs are used to look up the message to be displayed for an API Message object. All defined messages should be appropriate for any API user to see and not contain any sensitive information. A good rule-of-thumb is to be very general in error messages unless the issue is due to a bad user action, then be specific. """ from cinder.i18n import _ class EventIds(object): UNKNOWN_ERROR = 'VOLUME_000001' UNABLE_TO_ALLOCATE = 'VOLUME_000002' ATTACH_READONLY_VOLUME = 'VOLUME_000003' IMAGE_FROM_VOLUME_OVER_QUOTA = 'VOLUME_000004' UNMANAGE_ENCRYPTED_VOLUME_UNSUPPORTED = 'VOLUME_000005' event_id_message_map = { EventIds.UNKNOWN_ERROR: _("An unknown error occurred."), EventIds.UNABLE_TO_ALLOCATE: _( "No storage could be allocated for this volume " "request. You may be able to try another size or" " volume type."), EventIds.ATTACH_READONLY_VOLUME: _( "A readonly volume must be attached as readonly."), EventIds.IMAGE_FROM_VOLUME_OVER_QUOTA: _( "Failed to copy volume to image as image quota has been met. Please " "delete images or have your limit increased, then try again."), EventIds.UNMANAGE_ENCRYPTED_VOLUME_UNSUPPORTED: _( "Unmanaging encrypted volumes is not supported."), } def get_message_text(event_id): return event_id_message_map[event_id] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/message/message_field.py0000664000175000017500000002023600000000000021123 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Message Resource, Action, Detail and user visible message. Use Resource, Action and Detail's combination to indicate the Event in the format of: EVENT: VOLUME_RESOURCE_ACTION_DETAIL Also, use exception-to-detail mapping to decrease the workload of classifying event in cinder's task code. """ from cinder.i18n import _ class Resource(object): VOLUME = 'VOLUME' VOLUME_SNAPSHOT = 'VOLUME_SNAPSHOT' VOLUME_BACKUP = 'VOLUME_BACKUP' class Action(object): SCHEDULE_ALLOCATE_VOLUME = ('001', _('schedule allocate volume')) ATTACH_VOLUME = ('002', _('attach volume')) COPY_VOLUME_TO_IMAGE = ('003', _('copy volume to image')) UPDATE_ATTACHMENT = ('004', _('update attachment')) COPY_IMAGE_TO_VOLUME = ('005', _('copy image to volume')) UNMANAGE_VOLUME = ('006', _('unmanage volume')) EXTEND_VOLUME = ('007', _('extend volume')) CREATE_VOLUME_FROM_BACKEND = ('008', _('create volume from backend storage')) SNAPSHOT_CREATE = ('009', _('create snapshot')) SNAPSHOT_DELETE = ('010', _('delete snapshot')) SNAPSHOT_UPDATE = ('011', _('update snapshot')) SNAPSHOT_METADATA_UPDATE = ('012', _('update snapshot metadata')) BACKUP_CREATE = ('013', _('create backup')) BACKUP_DELETE = ('014', _('delete backup')) BACKUP_RESTORE = ('015', _('restore backup')) REIMAGE_VOLUME = ('016', _('reimage volume')) ALL = (SCHEDULE_ALLOCATE_VOLUME, ATTACH_VOLUME, COPY_VOLUME_TO_IMAGE, UPDATE_ATTACHMENT, COPY_IMAGE_TO_VOLUME, UNMANAGE_VOLUME, EXTEND_VOLUME, CREATE_VOLUME_FROM_BACKEND, SNAPSHOT_CREATE, SNAPSHOT_DELETE, SNAPSHOT_UPDATE, SNAPSHOT_METADATA_UPDATE, BACKUP_CREATE, BACKUP_DELETE, BACKUP_RESTORE, REIMAGE_VOLUME, ) class Detail(object): UNKNOWN_ERROR = ('001', _('An unknown error occurred.')) DRIVER_NOT_INITIALIZED = ('002', _('Driver is not initialized at present.')) NO_BACKEND_AVAILABLE = ('003', _('Could not find any available ' 'weighted backend.')) FAILED_TO_UPLOAD_VOLUME = ('004', _("Failed to upload volume to image " "at backend.")) VOLUME_ATTACH_MODE_INVALID = ('005', _("Volume's attach mode is invalid.")) QUOTA_EXCEED = ('006', _("Not enough quota resource for operation.")) NOT_ENOUGH_SPACE_FOR_IMAGE = ('007', _("Image used for creating volume exceeds " "available space.")) UNMANAGE_ENC_NOT_SUPPORTED = ( '008', _("Unmanaging encrypted volumes is not supported.")) NOTIFY_COMPUTE_SERVICE_FAILED = ( '009', _("Compute service failed to extend volume.")) DRIVER_FAILED_EXTEND = ( '010', _("Volume Driver failed to extend volume.")) SIGNATURE_VERIFICATION_FAILED = ( '011', _("Image signature verification failed.")) DRIVER_FAILED_CREATE = ( '012', _('Driver failed to create the volume.')) SNAPSHOT_CREATE_ERROR = ('013', _("Snapshot failed to create.")) SNAPSHOT_UPDATE_METADATA_FAILED = ( '014', _("Volume snapshot update metadata failed.")) SNAPSHOT_IS_BUSY = ('015', _("Snapshot is busy.")) SNAPSHOT_DELETE_ERROR = ('016', _("Snapshot failed to delete.")) BACKUP_INVALID_STATE = ('017', _("Backup status is invalid.")) BACKUP_SERVICE_DOWN = ('018', _("Backup service is down.")) BACKUP_CREATE_DEVICE_ERROR = ( '019', _("Failed to get backup device from the volume service.")) BACKUP_CREATE_DRIVER_ERROR = ( '020', ("Backup driver failed to create backup.")) ATTACH_ERROR = ('021', _("Failed to attach volume.")) DETACH_ERROR = ('022', _("Failed to detach volume.")) BACKUP_CREATE_CLEANUP_ERROR = ( '023', _("Cleanup of temporary volume/snapshot failed.")) BACKUP_SCHEDULE_ERROR = ( '024', ("Backup failed to schedule. Service not found for creating backup.")) BACKUP_DELETE_DRIVER_ERROR = ( '025', _("Backup driver failed to delete backup.")) BACKUP_RESTORE_ERROR = ( '026', _("Backup driver failed to restore backup.")) VOLUME_INVALID_STATE = ('027', _("Volume status is invalid.")) REIMAGE_VOLUME_FAILED = ( '028', _("Compute service failed to reimage volume.")) IMAGE_FORMAT_UNACCEPTABLE = ( '029', _("The image disk format must be the same as the volume format for " "the volume type you are requesting.")) INCREMENTAL_BACKUP_FORCES_FULL_BACKUP = ( '030', _("Incremental backup not possible, forcing full backup.")) ALL = (UNKNOWN_ERROR, DRIVER_NOT_INITIALIZED, NO_BACKEND_AVAILABLE, FAILED_TO_UPLOAD_VOLUME, VOLUME_ATTACH_MODE_INVALID, QUOTA_EXCEED, NOT_ENOUGH_SPACE_FOR_IMAGE, UNMANAGE_ENC_NOT_SUPPORTED, NOTIFY_COMPUTE_SERVICE_FAILED, DRIVER_FAILED_EXTEND, SIGNATURE_VERIFICATION_FAILED, DRIVER_FAILED_CREATE, SNAPSHOT_CREATE_ERROR, SNAPSHOT_UPDATE_METADATA_FAILED, SNAPSHOT_IS_BUSY, SNAPSHOT_DELETE_ERROR, BACKUP_INVALID_STATE, BACKUP_SERVICE_DOWN, BACKUP_CREATE_DEVICE_ERROR, BACKUP_CREATE_DRIVER_ERROR, ATTACH_ERROR, DETACH_ERROR, BACKUP_CREATE_CLEANUP_ERROR, BACKUP_SCHEDULE_ERROR, BACKUP_DELETE_DRIVER_ERROR, BACKUP_RESTORE_ERROR, VOLUME_INVALID_STATE, REIMAGE_VOLUME_FAILED, IMAGE_FORMAT_UNACCEPTABLE, INCREMENTAL_BACKUP_FORCES_FULL_BACKUP, ) # Exception and detail mappings EXCEPTION_DETAIL_MAPPINGS = { DRIVER_NOT_INITIALIZED: ['DriverNotInitialized'], NO_BACKEND_AVAILABLE: ['NoValidBackend'], VOLUME_ATTACH_MODE_INVALID: ['InvalidVolumeAttachMode'], QUOTA_EXCEED: ['ImageLimitExceeded', 'BackupLimitExceeded', 'SnapshotLimitExceeded'], NOT_ENOUGH_SPACE_FOR_IMAGE: ['ImageTooBig'], SNAPSHOT_IS_BUSY: ['SnapshotIsBusy'], } def translate_action(action_id): action_message = next((action[1] for action in Action.ALL if action[0] == action_id), None) return action_message or 'unknown action' def translate_detail(detail_id): detail_message = next((action[1] for action in Detail.ALL if action[0] == detail_id), None) return detail_message or Detail.UNKNOWN_ERROR[1] def translate_detail_id(exception, detail): """Get a detail_id to use for a message. If exception is in the EXCEPTION_DETAIL_MAPPINGS, returns the detail_id of the mapped Detail field. If exception is not in the mapping or is None, returns the detail_id of the passed-in Detail field. Otherwise, returns the detail_id of Detail.UNKNOWN_ERROR. :param exception: an Exception (or None) :param detail: a message_field.Detail field (or None) :returns: string :returns: the detail_id of a message_field.Detail field """ if exception is not None and isinstance(exception, Exception): for key, value in Detail.EXCEPTION_DETAIL_MAPPINGS.items(): if exception.__class__.__name__ in value: return key[0] if detail in Detail.ALL: return detail[0] return Detail.UNKNOWN_ERROR[0] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0871184 cinder-27.0.0/cinder/objects/0000775000175000017500000000000000000000000015764 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/__init__.py0000664000175000017500000000374000000000000020101 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(comstud): You may scratch your head as you see code that imports # this module and then accesses attributes for objects such as Instance, # etc, yet you do not see these attributes in here. Never fear, there is # a little bit of magic. When objects are registered, an attribute is set # on this module automatically, pointing to the newest/latest version of # the object. def register_all(): # NOTE(danms): You must make sure your object gets imported in this # function in order for it to be registered by services that may # need to receive it via RPC. __import__('cinder.objects.backup') # NOTE(geguileo): Don't include cleanable to prevent circular imports __import__('cinder.objects.cleanup_request') __import__('cinder.objects.cgsnapshot') __import__('cinder.objects.cluster') __import__('cinder.objects.consistencygroup') __import__('cinder.objects.qos_specs') __import__('cinder.objects.request_spec') __import__('cinder.objects.service') __import__('cinder.objects.snapshot') __import__('cinder.objects.volume') __import__('cinder.objects.volume_attachment') __import__('cinder.objects.volume_type') __import__('cinder.objects.group_type') __import__('cinder.objects.group') __import__('cinder.objects.group_snapshot') __import__('cinder.objects.manageableresources') __import__('cinder.objects.dynamic_log') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/backup.py0000664000175000017500000003400500000000000017605 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import base64 from oslo_serialization import jsonutils from oslo_versionedobjects import fields from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields CONF = cfg.CONF @base.CinderObjectRegistry.register class Backup(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Add new field num_dependent_backups and extra fields # is_incremental and has_dependent_backups. # Version 1.2: Add new field snapshot_id and data_timestamp. # Version 1.3: Changed 'status' field to use BackupStatusField # Version 1.4: Add restore_volume_id # Version 1.5: Add metadata # Version 1.6: Add encryption_key_id # Version 1.7: Add parent VERSION = '1.7' OPTIONAL_FIELDS = ('metadata', 'parent') # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.UUIDField(), 'user_id': fields.StringField(), 'project_id': fields.StringField(), 'volume_id': fields.UUIDField(), 'host': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'container': fields.StringField(nullable=True), 'parent_id': fields.StringField(nullable=True), 'parent': fields.ObjectField('Backup', nullable=True), 'status': c_fields.BackupStatusField(nullable=True), 'fail_reason': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), # NOTE(dulek): Metadata field is used to store any strings by backup # drivers, that's why it can't be DictOfStringsField. 'service_metadata': fields.StringField(nullable=True), 'service': fields.StringField(nullable=True), 'object_count': fields.IntegerField(nullable=True), 'temp_volume_id': fields.StringField(nullable=True), 'temp_snapshot_id': fields.StringField(nullable=True), 'num_dependent_backups': fields.IntegerField(nullable=True), 'snapshot_id': fields.StringField(nullable=True), 'data_timestamp': fields.DateTimeField(nullable=True), 'restore_volume_id': fields.StringField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), 'encryption_key_id': fields.StringField(nullable=True), } obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups'] def __init__(self, *args, **kwargs): super(Backup, self).__init__(*args, **kwargs) self._orig_metadata = {} self._reset_metadata_tracking() def _reset_metadata_tracking(self, fields=None): if fields is None or 'metadata' in fields: self._orig_metadata = (dict(self.metadata) if self.obj_attr_is_set('metadata') else {}) @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): return 'metadata', @property def name(self): return CONF.backup_name_template % self.id @property def is_incremental(self) -> bool: return bool(self.parent_id) @property def has_dependent_backups(self) -> bool: return bool(self.num_dependent_backups) @classmethod def _from_db_object(cls, context: context.RequestContext, backup, db_backup, expected_attrs=None) -> 'Backup': if expected_attrs is None: expected_attrs = [] for name, field in backup.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_backup.get(name) if isinstance(field, fields.IntegerField): value = value if value is not None else 0 backup[name] = value if 'metadata' in expected_attrs: metadata = db_backup.get('backup_metadata') if metadata is None: raise exception.MetadataAbsent() backup.metadata = {item['key']: item['value'] for item in metadata} backup._context = context backup.obj_reset_changes() return backup def obj_reset_changes(self, fields=None): super(Backup, self).obj_reset_changes(fields) self._reset_metadata_tracking(fields=fields) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'parent': if self.parent_id: self.parent = self.get_by_id(self._context, self.parent_id) else: self.parent = None self.obj_reset_changes(fields=[attrname]) def obj_what_changed(self): changes = super(Backup, self).obj_what_changed() if hasattr(self, 'metadata') and self.metadata != self._orig_metadata: changes.add('metadata') return changes def create(self) -> None: if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.cinder_obj_get_changes() db_backup = db.backup_create(self._context, updates) self._from_db_object(self._context, self, db_backup) def save(self) -> None: updates = self.cinder_obj_get_changes() if updates: if 'metadata' in updates: metadata = updates.pop('metadata', None) self.metadata = db.backup_metadata_update(self._context, self.id, metadata, True) updates.pop('parent', None) db.backup_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self) -> None: with self.obj_as_admin(): updated_values = db.backup_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) @staticmethod def decode_record(backup_url) -> dict: """Deserialize backup metadata from string into a dictionary. :raises InvalidInput: """ try: return jsonutils.loads(base64.decode_as_text(backup_url)) except TypeError: msg = _("Can't decode backup record.") except ValueError: msg = _("Can't parse backup record.") raise exception.InvalidInput(reason=msg) def encode_record(self, **kwargs) -> str: """Serialize backup object, with optional extra info, into a string.""" # We don't want to export extra fields and we want to force lazy # loading, so we can't use dict(self) or self.obj_to_primitive record = {name: field.to_primitive(self, name, getattr(self, name)) for name, field in self.fields.items() if name != 'parent'} # We must update kwargs instead of record to ensure we don't overwrite # "real" data from the backup kwargs.update(record) retval = jsonutils.dump_as_bytes(kwargs) return base64.encode_as_text(retval) @base.CinderObjectRegistry.register class BackupList(base.ObjectListBase, base.CinderObject): VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('Backup'), } @classmethod def get_all(cls, context: context.RequestContext, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None) -> 'BackupList': backups = db.backup_get_all(context, filters, marker, limit, offset, sort_keys, sort_dirs) expected_attrs = Backup._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Backup, backups, expected_attrs=expected_attrs) @classmethod def get_all_by_host(cls, context: context.RequestContext, host: str) -> 'BackupList': backups = db.backup_get_all_by_host(context, host) expected_attrs = Backup._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Backup, backups, expected_attrs=expected_attrs) @classmethod def get_all_by_project(cls, context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): backups = db.backup_get_all_by_project(context, project_id, filters, marker, limit, offset, sort_keys, sort_dirs) expected_attrs = Backup._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Backup, backups, expected_attrs=expected_attrs) @classmethod def get_all_by_volume( cls, context: context.RequestContext, volume_id: str, vol_project_id: str, filters=None) -> 'BackupList': backups = db.backup_get_all_by_volume( context, volume_id, vol_project_id, filters) expected_attrs = Backup._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Backup, backups, expected_attrs=expected_attrs) @classmethod def get_all_active_by_window(cls, context, begin, end): backups = db.backup_get_all_active_by_window(context, begin, end) expected_attrs = Backup._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Backup, backups, expected_attrs=expected_attrs) @base.CinderObjectRegistry.register class BackupImport(Backup): """Special object for Backup Imports. This class should not be used for anything but Backup creation when importing backups to the DB. On creation it allows to specify the ID for the backup, since it's the reference used in parent_id it is imperative that this is preserved. Backup Import objects get promoted to standard Backups when the import is completed. """ def create(self): updates = self.cinder_obj_get_changes() db_backup = db.backup_create(self._context, updates) self._from_db_object(self._context, self, db_backup) @base.CinderObjectRegistry.register class BackupDeviceInfo(base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'volume': fields.ObjectField('Volume', nullable=True), 'snapshot': fields.ObjectField('Snapshot', nullable=True), 'secure_enabled': fields.BooleanField(default=False), } obj_extra_fields = ['is_snapshot', 'device_obj'] @property def is_snapshot(self): if self.obj_attr_is_set('snapshot') == self.obj_attr_is_set('volume'): msg = _("Either snapshot or volume field should be set.") raise exception.ProgrammingError(message=msg) return self.obj_attr_is_set('snapshot') @property def device_obj(self): return self.snapshot if self.is_snapshot else self.volume # FIXME(sborkows): This should go away in early O as we stop supporting # backward compatibility with M. @classmethod def from_primitive(cls, primitive, context, expected_attrs=None): backup_device = BackupDeviceInfo() if primitive['is_snapshot']: if isinstance(primitive['backup_device'], objects.Snapshot): backup_device.snapshot = primitive['backup_device'] else: backup_device.snapshot = objects.Snapshot._from_db_object( context, objects.Snapshot(), primitive['backup_device'], expected_attrs=expected_attrs) else: if isinstance(primitive['backup_device'], objects.Volume): backup_device.volume = primitive['backup_device'] else: backup_device.volume = objects.Volume._from_db_object( context, objects.Volume(), primitive['backup_device'], expected_attrs=expected_attrs) backup_device.secure_enabled = primitive['secure_enabled'] return backup_device # FIXME(sborkows): This should go away in early O as we stop supporting # backward compatibility with M. def to_primitive(self, context): backup_device = (db.snapshot_get(context, self.snapshot.id) if self.is_snapshot else db.volume_get(context, self.volume.id)) primitive = {'backup_device': backup_device, 'secure_enabled': self.secure_enabled, 'is_snapshot': self.is_snapshot} return primitive ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/base.py0000664000175000017500000005623700000000000017265 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cinder common internal object model""" from collections import abc import contextlib import datetime from oslo_log import log as logging from oslo_utils import versionutils from oslo_versionedobjects import base from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects LOG = logging.getLogger('object') obj_make_list = base.obj_make_list class CinderObjectVersionsHistory(dict): """Helper class that maintains objects version history. Current state of object versions is aggregated in a single version number that explicitly identifies a set of object versions. That way a service is able to report what objects it supports using a single string and all the newer services will know exactly what that mean for a single object. """ def __init__(self): super(CinderObjectVersionsHistory, self).__init__() # NOTE(dulek): This is our pre-history and a starting point - Liberty. # We want Mitaka to be able to talk to Liberty services, so we need to # handle backporting to these objects versions (although I don't expect # we've made a lot of incompatible changes inside the objects). # # If an object doesn't exist in Liberty, RPC API compatibility layer # shouldn't send it or convert it to a dictionary. # # Please note that we do not need to add similar entires for each # release. Liberty is here just for historical reasons. self.versions = ['1.38'] self['1.38'] = { 'Backup': '1.7', 'BackupDeviceInfo': '1.0', 'BackupImport': '1.7', 'BackupList': '1.0', 'CleanupRequest': '1.0', 'CGSnapshot': '1.1', 'CGSnapshotList': '1.0', 'Cluster': '1.1', 'ClusterList': '1.0', 'ConsistencyGroup': '1.4', 'ConsistencyGroupList': '1.1', 'Group': '1.2', 'GroupList': '1.0', 'GroupSnapshot': '1.0', 'GroupSnapshotList': '1.0', 'GroupType': '1.0', 'GroupTypeList': '1.0', 'LogLevel': '1.0', 'LogLevelList': '1.0', 'ManageableSnapshot': '1.0', 'ManageableSnapshotList': '1.0', 'ManageableVolume': '1.0', 'ManageableVolumeList': '1.0', 'QualityOfServiceSpecs': '1.0', 'QualityOfServiceSpecsList': '1.0', 'RequestSpec': '1.5', 'Service': '1.6', 'ServiceList': '1.1', 'Snapshot': '1.5', 'SnapshotList': '1.0', 'Volume': '1.8', 'VolumeAttachment': '1.3', 'VolumeAttachmentList': '1.1', 'VolumeList': '1.1', 'VolumeProperties': '1.1', 'VolumeType': '1.3', 'VolumeTypeList': '1.1', } def get_current(self): return self.versions[-1] def get_current_versions(self): return self[self.get_current()] def add(self, ver, updates): if ver in self.versions: msg = 'Version %s already exists in history.' % ver raise exception.ProgrammingError(reason=msg) self[ver] = self[self.get_current()].copy() self.versions.append(ver) self[ver].update(updates) OBJ_VERSIONS = CinderObjectVersionsHistory() # NOTE(dulek): You should add a new version here each time you bump a version # of any object. As a second parameter you need to specify only what changed. # On each release we should drop backward compatibility with -2 release, since # rolling upgrades only needs to support compatibility with previous release. # So if we are in N release we can remove history from L and earlier. # Example of how to keep track of this: # # TODO: (T release) remove up to next TODO (was added in R release) and # # update CinderObjectVersionsHistory # OBJ_VERSIONS.add('1.34', {'VolumeAttachment': '1.3'}) # OBJ_VERSIONS.add('1.35', {'Backup': '1.6', 'BackupImport': '1.6'}) # # # TODO: (U release) remove up to next TODO (was added in S release) and # # update CinderObjectVersionsHistory # OBJ_VERSIONS.add('1.36', {'RequestSpec': '1.4'}) # OBJ_VERSIONS.add('1.37', {'RequestSpec': '1.5'}) # OBJ_VERSIONS.add('1.38', {'Backup': '1.7', 'BackupImport': '1.7'}) # When we reach T release we remove versions 1.34 and 1.35 and update __init__ # method in CinderObjectVerseionsHistory to bump VolumeAttachment to 1.3, # Backup to 1.6 and BackupImport to 1.6, and changing the versions list to # '1.35' and the self[''] = { to self['1.35'] = { # TODO: (Z release) remove up to next TODO and update # CinderObjectVersionsHistory (was added in X release) OBJ_VERSIONS.add('1.39', {'Volume': '1.9', 'Snapshot': '1.6'}) class CinderObjectRegistry(base.VersionedObjectRegistry): def registration_hook(self, cls, index): """Hook called when registering a class. This method takes care of adding the class to cinder.objects namespace. Should registering class have a method called cinder_ovo_cls_init it will be called to support class initialization. This is convenient for all persistent classes that need to register their models. """ setattr(objects, cls.obj_name(), cls) # If registering class has a callable initialization method, call it. if isinstance(getattr(cls, 'cinder_ovo_cls_init', None), abc.Callable): cls.cinder_ovo_cls_init() class CinderObject(base.VersionedObject): # NOTE(thangp): OBJ_PROJECT_NAMESPACE needs to be set so that nova, # cinder, and other objects can exist on the same bus and be distinguished # from one another. OBJ_PROJECT_NAMESPACE = 'cinder' def cinder_obj_get_changes(self): """Returns a dict of changed fields with tz unaware datetimes. Any timezone aware datetime field will be converted to UTC timezone and returned as timezone unaware datetime. This will allow us to pass these fields directly to a db update method as they can't have timezone information. """ # Get dirtied/changed fields changes = self.obj_get_changes() # Look for datetime objects that contain timezone information for k, v in changes.items(): if isinstance(v, datetime.datetime) and v.tzinfo: # Remove timezone information and adjust the time according to # the timezone information's offset. changes[k] = v.replace(tzinfo=None) - v.utcoffset() # Return modified dict return changes def obj_make_compatible(self, primitive, target_version): _log_backport(self, target_version) super(CinderObject, self).obj_make_compatible(primitive, target_version) def __contains__(self, name): # We're using obj_extra_fields to provide aliases for some fields while # in transition period. This override is to make these aliases pass # "'foo' in obj" tests. return name in self.obj_extra_fields or super(CinderObject, self).__contains__(name) class CinderObjectDictCompat(base.VersionedObjectDictCompat): """Mix-in to provide dictionary key access compat. If an object needs to support attribute access using dictionary items instead of object attributes, inherit from this class. This should only be used as a temporary measure until all callers are converted to use modern attribute access. NOTE(berrange) This class will eventually be deleted. """ def get(self, key, value=base._NotSpecifiedSentinel): """For backwards-compatibility with dict-based objects. NOTE(danms): May be removed in the future. """ if key not in self.obj_fields: # NOTE(jdg): There are a number of places where we rely on the # old dictionary version and do a get(xxx, None). # The following preserves that compatibility but in # the future we'll remove this shim altogether so don't # rely on it. LOG.debug('Cinder object %(object_name)s has no ' 'attribute named: %(attribute_name)s', {'object_name': self.__class__.__name__, 'attribute_name': key}) return None if (value != base._NotSpecifiedSentinel and key not in self.obj_extra_fields and not self.obj_attr_is_set(key)): return value else: try: return getattr(self, key) except (exception.ObjectActionError, NotImplementedError): # Exception when haven't set a value for non-lazy # loadable attribute, but to mimic typical dict 'get' # behavior we should still return None return None class CinderPersistentObject(object): """Mixin class for Persistent objects. This adds the fields that we use in common for all persistent objects. """ OPTIONAL_FIELDS = () Not = db.Not Case = db.Case fields = { 'created_at': fields.DateTimeField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'deleted_at': fields.DateTimeField(nullable=True), 'deleted': fields.BooleanField(default=False, nullable=True), } @classmethod def cinder_ovo_cls_init(cls): """This method is called on OVO registration and sets the DB model.""" # Persistent Versioned Objects Classes should have a DB model, and if # they don't, then we have a problem and we must raise an exception on # registration. try: cls.model = db.get_model_for_versioned_object(cls) except (ImportError, AttributeError): msg = _("Couldn't find ORM model for Persistent Versioned " "Object %s.") % cls.obj_name() LOG.exception("Failed to initialize object.") raise exception.ProgrammingError(reason=msg) @contextlib.contextmanager def obj_as_admin(self): """Context manager to make an object call as an admin. This temporarily modifies the context embedded in an object to be elevated() and restores it after the call completes. Example usage: with obj.obj_as_admin(): obj.save() """ if self._context is None: raise exception.OrphanedObjectError(method='obj_as_admin', objtype=self.obj_name()) original_context = self._context self._context = self._context.elevated() try: yield finally: self._context = original_context @contextlib.contextmanager def as_read_deleted(self, mode='yes'): """Context manager to make OVO with modified read deleted context. This temporarily modifies the context embedded in an object to have a different `read_deleted` parameter. Parameter mode accepts most of the same parameters as our `model_query` DB method. We support 'yes', 'no', and 'only'. usage: with obj.as_read_deleted(): obj.refresh() if obj.status = 'deleted': ... """ if self._context is None: raise exception.OrphanedObjectError(method='as_read_deleted', objtype=self.obj_name()) original_mode = self._context.read_deleted self._context.read_deleted = mode try: yield finally: self._context.read_deleted = original_mode @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): return None @classmethod def get_by_id(cls, context, id, *args, **kwargs): # To get by id we need to have a model and for the model to # have an id field if 'id' not in cls.fields: msg = (_('VersionedObject %s cannot retrieve object by id.') % (cls.obj_name())) raise NotImplementedError(msg) orm_obj = db.get_by_id(context, cls.model, id, *args, **kwargs) # We pass parameters because fields to expect may depend on them expected_attrs = cls._get_expected_attrs(context, *args, **kwargs) kargs = {} if expected_attrs: kargs = {'expected_attrs': expected_attrs} return cls._from_db_object(context, cls(context), orm_obj, **kargs) def update_single_status_where(self, new_status, expected_status, filters=()): values = {'status': new_status} expected_status = {'status': expected_status} return self.conditional_update(values, expected_status, filters) def conditional_update(self, values, expected_values=None, filters=(), save_all=False, reflect_changes=True, order=None): """Compare-and-swap update. A conditional object update that, unlike normal update, will SAVE the contents of the update to the DB. Update will only occur in the DB and the object if conditions are met. If no expected_values are passed in we will default to make sure that all fields have not been changed in the DB. Since we cannot know the original value in the DB for dirty fields in the object those will be excluded. We have 4 different condition types we can use in expected_values: - Equality: {'status': 'available'} - Inequality: {'status': vol_obj.Not('deleting')} - In range: {'status': ['available', 'error'] - Not in range: {'status': vol_obj.Not(['in-use', 'attaching']) Method accepts additional filters, which are basically anything that can be passed to a sqlalchemy query's filter method, for example: .. code-block:: python [~sql.exists().where(models.Volume.id == models.Snapshot.volume_id)] We can select values based on conditions using Case objects in the 'values' argument. For example: .. code-block:: python has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') volume.conditional_update({'status': case_values}, {'status': 'available'})) And we can use DB fields using model class attribute for example to store previous status in the corresponding field even though we don't know which value is in the db from those we allowed: .. code-block:: python volume.conditional_update({'status': 'deleting', 'previous_status': volume.model.status}, {'status': ('available', 'error')}) :param values: Dictionary of key-values to update in the DB. :param expected_values: Dictionary of conditions that must be met for the update to be executed. :param filters: Iterable with additional filters :param save_all: Object may have changes that are not in the DB, this will say whether we want those changes saved as well. :param reflect_changes: If we want changes made in the database to be reflected in the versioned object. This may mean in some cases that we have to reload the object from the database. :param order: Specific order of fields in which to update the values :returns: Boolean indicating whether db rows were updated. It will be False if we couldn't update the DB and True if we could. """ if 'id' not in self.fields: msg = (_('VersionedObject %s does not support conditional update.') % (self.obj_name())) raise NotImplementedError(msg) # If no conditions are set we will require object in DB to be unchanged if expected_values is None: changes = self.obj_what_changed() expected = {key: getattr(self, key) for key in self.fields.keys() if self.obj_attr_is_set(key) and key not in changes and key not in self.OPTIONAL_FIELDS} else: # Set the id in expected_values to limit conditional update to only # change this object expected = expected_values.copy() expected['id'] = self.id # If we want to save any additional changes the object has besides the # ones referred in values if save_all: changes = self.cinder_obj_get_changes() changes.update(values) values = changes result = db.conditional_update(self._context, self.model, values, expected, filters, order=order) # If we were able to update the DB then we need to update this object # as well to reflect new DB contents and clear the object's dirty flags # for those fields. if result and reflect_changes: # If we have used a Case, a db field or an expression in values we # don't know which value was used, so we need to read the object # back from the DB if any(isinstance(v, self.Case) or db.is_orm_value(v) for v in values.values()): # Read back object from DB obj = type(self).get_by_id(self._context, self.id) db_values = obj.obj_to_primitive()['versioned_object.data'] # Only update fields were changes were requested values = {field: db_values[field] for field, value in values.items()} # NOTE(geguileo): We don't use update method because our objects # will eventually move away from VersionedObjectDictCompat for key, value in values.items(): setattr(self, key, value) self.obj_reset_changes(values.keys()) return result def refresh(self): # To refresh we need to have a model and for the model to have an id # field if 'id' not in self.fields: msg = (_('VersionedObject %s cannot retrieve object by id.') % (self.obj_name())) raise NotImplementedError(msg) current = self.get_by_id(self._context, self.id) # Copy contents retrieved from the DB into self my_data = vars(self) my_data.clear() my_data.update(vars(current)) @classmethod def exists(cls, context, id_): return db.resource_exists(context, cls.model, id_) class CinderComparableObject(base.ComparableVersionedObject): def __eq__(self, obj): if hasattr(obj, 'obj_to_primitive'): return self.obj_to_primitive() == obj.obj_to_primitive() return False def __ne__(self, other): return not self.__eq__(other) class ObjectListBase(base.ObjectListBase): def obj_make_compatible(self, primitive, target_version): _log_backport(self, target_version) super(ObjectListBase, self).obj_make_compatible(primitive, target_version) class ClusteredObject(object): @property def service_topic_queue(self): return self.cluster_name or self.host @property def is_clustered(self): return bool(self.cluster_name) def assert_not_frozen(self): ctxt = self._context.elevated() if db.is_backend_frozen(ctxt, self.host, self.cluster_name): msg = _('Modification operations are not allowed on frozen ' 'storage backends.') raise exception.InvalidInput(reason=msg) # The object's resource backend depends on whether it's clustered. resource_backend = service_topic_queue class CinderObjectSerializer(base.VersionedObjectSerializer): OBJ_BASE_CLASS = CinderObject def __init__(self, version_cap=None): super(CinderObjectSerializer, self).__init__() self.version_cap = version_cap # NOTE(geguileo): During upgrades we will use a manifest to ensure that # all objects are properly backported. This allows us to properly # backport child objects to the right version even if parent version # has not been bumped. if not version_cap or version_cap == OBJ_VERSIONS.get_current(): self.manifest = None else: if version_cap not in OBJ_VERSIONS: raise exception.CappedVersionUnknown(version=version_cap) self.manifest = OBJ_VERSIONS[version_cap] def _get_capped_obj_version(self, obj): objname = obj.obj_name() version_dict = OBJ_VERSIONS.get(self.version_cap, {}) version_cap = version_dict.get(objname, None) if version_cap: cap_tuple = versionutils.convert_version_to_tuple(version_cap) obj_tuple = versionutils.convert_version_to_tuple(obj.VERSION) if cap_tuple > obj_tuple: # NOTE(dulek): Do not set version cap to be higher than actual # object version as we don't support "forwardporting" of # objects. If service will receive an object that's too old it # should handle it explicitly. version_cap = None return version_cap def serialize_entity(self, context, entity): if isinstance(entity, (tuple, list, set, dict)): entity = self._process_iterable(context, self.serialize_entity, entity) elif (hasattr(entity, 'obj_to_primitive') and isinstance(entity.obj_to_primitive, abc.Callable)): # NOTE(dulek): Backport outgoing object to the capped version. backport_ver = self._get_capped_obj_version(entity) entity = entity.obj_to_primitive(backport_ver, self.manifest) return entity def _log_backport(ovo, target_version): """Log backported versioned objects.""" if target_version and target_version != ovo.VERSION: LOG.debug('Backporting %(obj_name)s from version %(src_vers)s ' 'to version %(dst_vers)s', {'obj_name': ovo.obj_name(), 'src_vers': ovo.VERSION, 'dst_vers': target_version}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/cgsnapshot.py0000664000175000017500000001517400000000000020517 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base @base.CinderObjectRegistry.register class CGSnapshot(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Added from_group_snapshot VERSION = '1.1' OPTIONAL_FIELDS = ('consistencygroup', 'snapshots') # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.UUIDField(), 'consistencygroup_id': fields.UUIDField(nullable=True), 'project_id': fields.StringField(), 'user_id': fields.StringField(), 'name': fields.StringField(nullable=True), 'description': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'consistencygroup': fields.ObjectField('ConsistencyGroup', nullable=True), 'snapshots': fields.ObjectField('SnapshotList', nullable=True), } @property def host(self): return self.consistencygroup.host @property def cluster_name(self): return self.consistencygroup.cluster_name @classmethod def _from_db_object(cls, context, cgsnapshot, db_cgsnapshots, expected_attrs=None): expected_attrs = expected_attrs or [] for name, field in cgsnapshot.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_cgsnapshots.get(name) setattr(cgsnapshot, name, value) if 'consistencygroup' in expected_attrs: consistencygroup = objects.ConsistencyGroup(context) consistencygroup._from_db_object(context, consistencygroup, db_cgsnapshots[ 'consistencygroup']) cgsnapshot.consistencygroup = consistencygroup if 'snapshots' in expected_attrs: snapshots = base.obj_make_list( context, objects.SnapshotsList(context), objects.Snapshots, db_cgsnapshots['snapshots']) cgsnapshot.snapshots = snapshots cgsnapshot._context = context cgsnapshot.obj_reset_changes() return cgsnapshot def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already_created')) updates = self.cinder_obj_get_changes() if 'consistencygroup' in updates: raise exception.ObjectActionError( action='create', reason=_('consistencygroup assigned')) db_cgsnapshots = db.cgsnapshot_create(self._context, updates) self._from_db_object(self._context, self, db_cgsnapshots) def from_group_snapshot(self, group_snapshot): """Convert a generic volume group object to a cg object.""" self.id = group_snapshot.id self.consistencygroup_id = group_snapshot.group_id self.user_id = group_snapshot.user_id self.project_id = group_snapshot.project_id self.name = group_snapshot.name self.description = group_snapshot.description self.status = group_snapshot.status def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'consistencygroup': self.consistencygroup = objects.ConsistencyGroup.get_by_id( self._context, self.consistencygroup_id) if attrname == 'snapshots': self.snapshots = objects.SnapshotList.get_all_for_cgsnapshot( self._context, self.id) self.obj_reset_changes(fields=[attrname]) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'consistencygroup' in updates: raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) db.cgsnapshot_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.cgsnapshot_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) @base.CinderObjectRegistry.register class CGSnapshotList(base.ObjectListBase, base.CinderObject): VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('CGSnapshot') } @classmethod def get_all(cls, context, filters=None): cgsnapshots = db.cgsnapshot_get_all(context, filters) return base.obj_make_list(context, cls(context), objects.CGSnapshot, cgsnapshots) @classmethod def get_all_by_project(cls, context, project_id, filters=None): cgsnapshots = db.cgsnapshot_get_all_by_project(context, project_id, filters) return base.obj_make_list(context, cls(context), objects.CGSnapshot, cgsnapshots) @classmethod def get_all_by_group(cls, context, group_id, filters=None): cgsnapshots = db.cgsnapshot_get_all_by_group(context, group_id, filters) return base.obj_make_list(context, cls(context), objects.CGSnapshot, cgsnapshots) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/cleanable.py0000664000175000017500000002315000000000000020245 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections.abc as collections import inspect import decorator from oslo_utils import versionutils from cinder import db from cinder import exception from cinder.objects import base from cinder import service from cinder.volume import rpcapi as vol_rpcapi class CinderCleanableObject(base.CinderPersistentObject): """Base class for cleanable OVO resources. All cleanable objects must have a host property/attribute. """ worker = None cleanable_resource_types = set() @classmethod def get_rpc_api(cls): # By default assume all resources are handled by c-vol services return vol_rpcapi.VolumeAPI @classmethod def cinder_ovo_cls_init(cls): """Called on OVO registration, sets set of cleanable resources.""" # First call persistent object method to store the DB model super(CinderCleanableObject, cls).cinder_ovo_cls_init() # Add this class to the set of resources cls.cleanable_resource_types.add(cls.obj_name()) @classmethod def get_pinned_version(cls): # We pin the version by the last service that gets updated, which is # c-vol or c-bak min_obj_vers_str = cls.get_rpc_api().determine_obj_version_cap() # Get current pinned down version for this object version = base.OBJ_VERSIONS[min_obj_vers_str][cls.__name__] return versionutils.convert_version_to_int(version) @staticmethod def _is_cleanable(status, obj_version): """Check if a specific status for a specific OBJ version is cleanable. Each CinderCleanableObject class should implement this method and return True for cleanable status for versions equal or higher to the ones where the functionality was added. :returns: Whether to create a workers DB entry or not :param obj_version: Min object version running in the cloud or None if current version. :type obj_version: float """ return False def is_cleanable(self, pinned=False): """Check if cleanable VO status is cleanable. :param pinned: If we should check against pinned version or current version. :type pinned: bool :returns: Whether this needs a workers DB entry or not """ if pinned: obj_version = self.get_pinned_version() else: obj_version = None return self._is_cleanable(self.status, obj_version) def create_worker(self, pinned=True): """Create a worker entry at the API.""" # This method is mostly called from the rpc layer, therefore it checks # if it's cleanable given current pinned version. if not self.is_cleanable(pinned): return False resource_type = self.__class__.__name__ entry_in_db = False # This will only loop on very rare race conditions while not entry_in_db: try: # On the common case there won't be an entry in the DB, that's # why we try to create first. db.worker_create(self._context, status=self.status, resource_type=resource_type, resource_id=self.id) entry_in_db = True except exception.WorkerExists: try: db.worker_update(self._context, None, filters={'resource_type': resource_type, 'resource_id': self.id}, service_id=None, status=self.status) entry_in_db = True except exception.WorkerNotFound: pass return entry_in_db def set_worker(self): worker = self.worker service_id = service.Service.service_id resource_type = self.__class__.__name__ if worker: if worker.cleaning: return else: try: worker = db.worker_get(self._context, resource_type=resource_type, resource_id=self.id) except exception.WorkerNotFound: # If the call didn't come from an RPC call we still have to # create the entry in the DB. try: self.worker = db.worker_create(self._context, status=self.status, resource_type=resource_type, resource_id=self.id, service_id=service_id) return except exception.WorkerExists: # If 2 cleanable operations are competing for this resource # and the other one created the entry first that one won raise exception.CleanableInUse(type=resource_type, id=self.id) # If we have to claim this work or if the status has changed we have # to update DB. if (worker.service_id != service_id or worker.status != self.status): try: db.worker_update( self._context, worker.id, filters={'service_id': worker.service_id, 'status': worker.status, 'race_preventer': worker.race_preventer, 'updated_at': worker.updated_at}, service_id=service_id, status=self.status, orm_worker=worker) except exception.WorkerNotFound: self.worker = None raise exception.CleanableInUse(type=self.__class__.__name__, id=self.id) self.worker = worker def unset_worker(self): if self.worker: db.worker_destroy(self._context, id=self.worker.id, status=self.worker.status, service_id=self.worker.service_id) self.worker = None # NOTE(geguileo): To be compatible with decorate v3.4.x and v4.0.x decorate = staticmethod(getattr(decorator, 'decorate', lambda f, w: decorator.decorator(w, f))) @staticmethod def set_workers(*decorator_args): """Decorator that adds worker DB rows for cleanable versioned objects. By default will take care of all cleanable objects, but we can limit which objects we want by passing the name of the arguments we want to be added. """ def _decorator(f): def wrapper(f, *args, **kwargs): if decorator_args: call_args = inspect.getcallargs(f, *args, **kwargs) candidates = [call_args[obj] for obj in decorator_args] else: candidates = list(args) candidates.extend(kwargs.values()) cleanables = [cand for cand in candidates if (isinstance(cand, CinderCleanableObject) and cand.is_cleanable(pinned=False))] try: # Create the entries in the workers table for cleanable in cleanables: cleanable.set_worker() # Call the function result = f(*args, **kwargs) finally: # Remove entries from the workers table for cleanable in cleanables: # NOTE(geguileo): We check that the status has changed # to avoid removing the worker entry when we finished # the operation due to an unexpected exception and also # when this process stops because the main process has # stopped. if (cleanable.worker and cleanable.status != cleanable.worker.status): try: cleanable.unset_worker() except Exception: pass return result return CinderCleanableObject.decorate(f, wrapper) # If we don't have optional decorator arguments the argument in # decorator_args is the function we have to decorate if len(decorator_args) == 1 and isinstance( decorator_args[0], collections.Callable): function = decorator_args[0] decorator_args = None return _decorator(function) return _decorator def refresh(self): # We want to keep the worker entry on refresh worker = self.worker super(CinderCleanableObject, self).refresh() self.worker = worker ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/cleanup_request.py0000664000175000017500000000400000000000000021527 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder.objects import base @base.CinderObjectRegistry.register class CleanupRequest(base.CinderObject, base.ClusteredObject): """Versioned Object to send cleanup requests.""" # Version 1.0: Initial version VERSION = '1.0' # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'service_id': fields.IntegerField(nullable=True), 'cluster_name': fields.StringField(nullable=True), 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'is_up': fields.BooleanField(default=False, nullable=True), 'disabled': fields.BooleanField(nullable=True), 'resource_id': fields.UUIDField(nullable=True), 'resource_type': fields.StringField(nullable=True), 'until': fields.DateTimeField(nullable=True), } def __init__(self, context=None, **kwargs): super(CleanupRequest, self).__init__(**kwargs) # Set non initialized fields with default or None values for field_name in self.fields: if not self.obj_attr_is_set(field_name): field = self.fields[field_name] if field.default != fields.UnspecifiedDefault: setattr(self, field_name, field.default) elif field.nullable: setattr(self, field_name, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/cluster.py0000664000175000017500000002124700000000000020025 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields from cinder import utils @base.CinderObjectRegistry.register class Cluster(base.CinderPersistentObject, base.CinderObject, base.CinderComparableObject): """Cluster Versioned Object. Method get_by_id supports as additional named arguments: - get_services: If we want to load all services from this cluster. - services_summary: If we want to load num_nodes and num_down_nodes fields. - is_up: Boolean value to filter based on the cluster's up status. - read_deleted: Filtering based on delete status. Default value "no". - Any other cluster field will be used as a filter. """ # Version 1.0: Initial version # Version 1.1: Add replication fields VERSION = '1.1' OPTIONAL_FIELDS = ('num_hosts', 'num_down_hosts', 'services') # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.IntegerField(), 'name': fields.StringField(nullable=False), 'binary': fields.StringField(nullable=False), 'disabled': fields.BooleanField(default=False, nullable=True), 'disabled_reason': fields.StringField(nullable=True), 'num_hosts': fields.IntegerField(default=0, read_only=True), 'num_down_hosts': fields.IntegerField(default=0, read_only=True), 'last_heartbeat': fields.DateTimeField(nullable=True, read_only=True), 'services': fields.ObjectField('ServiceList', nullable=True, read_only=True), # Replication properties 'replication_status': c_fields.ReplicationStatusField(nullable=True), 'frozen': fields.BooleanField(default=False), 'active_backend_id': fields.StringField(nullable=True), # Don't add race_preventer field, as it's a DB layer internal mechanism # piece to prevent races and should not be touched by other layers. } @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): """Return expected attributes when getting a cluster. Expected attributes depend on whether we are retrieving all related services as well as if we are getting the services summary. """ expected_attrs = [] if kwargs.get('get_services'): expected_attrs.append('services') if kwargs.get('services_summary'): expected_attrs.extend(('num_hosts', 'num_down_hosts')) return expected_attrs @staticmethod def _from_db_object(context, cluster, db_cluster, expected_attrs=None): """Fill cluster OVO fields from cluster ORM instance.""" expected_attrs = expected_attrs or tuple() for name, field in cluster.fields.items(): # The only field that cannot be assigned using setattr is services, # because it is an ObjectField. So we don't assign the value if # it's a non expected optional field or if it's services field. if ((name in Cluster.OPTIONAL_FIELDS and name not in expected_attrs) or name == 'services'): continue value = getattr(db_cluster, name) setattr(cluster, name, value) cluster._context = context if 'services' in expected_attrs: cluster.services = base.obj_make_list( context, objects.ServiceList(context), objects.Service, db_cluster.services) cluster.obj_reset_changes() return cluster def obj_load_attr(self, attrname): """Lazy load services attribute.""" # NOTE(geguileo): We only allow lazy loading services to raise # awareness of the high cost of lazy loading num_hosts and # num_down_hosts, so if we are going to need this information we should # be certain we really need it and it should loaded when retrieving the # data from the DB the first time we read the OVO. if attrname != 'services': raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) self.services = objects.ServiceList.get_all( self._context, {'cluster_name': self.name}) self.obj_reset_changes(fields=('services',)) def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if updates: for field in self.OPTIONAL_FIELDS: if field in updates: raise exception.ObjectActionError( action='create', reason=_('%s assigned') % field) db_cluster = db.cluster_create(self._context, updates) self._from_db_object(self._context, self, db_cluster) def save(self): updates = self.cinder_obj_get_changes() if updates: for field in self.OPTIONAL_FIELDS: if field in updates: raise exception.ObjectActionError( action='save', reason=_('%s changed') % field) db.cluster_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.cluster_destroy(self._context, self.id) for field, value in updated_values.items(): setattr(self, field, value) self.obj_reset_changes(updated_values.keys()) @property def is_up(self): return (self.last_heartbeat and self.last_heartbeat >= utils.service_expired_time(True)) def reset_service_replication(self): """Reset service replication flags on promotion. When an admin promotes a cluster, each service member requires an update to maintain database consistency. """ actions = { 'replication_status': 'enabled', 'active_backend_id': None, } expectations = { 'cluster_name': self.name, } db.conditional_update(self._context, objects.Service.model, actions, expectations) @base.CinderObjectRegistry.register class ClusterList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version VERSION = '1.0' fields = {'objects': fields.ListOfObjectsField('Cluster')} @classmethod def get_all(cls, context, is_up=None, get_services=False, services_summary=False, read_deleted='no', **filters): """Get all clusters that match the criteria. :param is_up: Boolean value to filter based on the cluster's up status. :param get_services: If we want to load all services from this cluster. :param services_summary: If we want to load num_nodes and num_down_nodes fields. :param read_deleted: Filtering based on delete status. Default value is "no". :param filters: Field based filters in the form of key/value. """ expected_attrs = Cluster._get_expected_attrs( context, get_services=get_services, services_summary=services_summary) clusters = db.cluster_get_all(context, is_up=is_up, get_services=get_services, services_summary=services_summary, read_deleted=read_deleted, **filters) return base.obj_make_list(context, cls(context), Cluster, clusters, expected_attrs=expected_attrs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/consistencygroup.py0000664000175000017500000002447000000000000021763 0ustar00zuulzuul00000000000000# Copyright 2015 Yahoo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields @base.CinderObjectRegistry.register class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Added cgsnapshots and volumes relationships # Version 1.2: Changed 'status' field to use ConsistencyGroupStatusField # Version 1.3: Added cluster fields # Version 1.4: Added from_group VERSION = '1.4' OPTIONAL_FIELDS = ('cgsnapshots', 'volumes', 'cluster') fields = { 'id': fields.UUIDField(), 'user_id': fields.StringField(), 'project_id': fields.StringField(), 'cluster_name': fields.StringField(nullable=True), 'cluster': fields.ObjectField('Cluster', nullable=True, read_only=True), 'host': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'name': fields.StringField(nullable=True), 'description': fields.StringField(nullable=True), 'volume_type_id': fields.StringField(nullable=True), 'status': c_fields.ConsistencyGroupStatusField(nullable=True), 'cgsnapshot_id': fields.UUIDField(nullable=True), 'source_cgid': fields.UUIDField(nullable=True), 'cgsnapshots': fields.ObjectField('CGSnapshotList', nullable=True), 'volumes': fields.ObjectField('VolumeList', nullable=True), } @classmethod def _from_db_object(cls, context, consistencygroup, db_consistencygroup, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in consistencygroup.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_consistencygroup.get(name) setattr(consistencygroup, name, value) if 'cgsnapshots' in expected_attrs: cgsnapshots = base.obj_make_list( context, objects.CGSnapshotList(context), objects.CGSnapshot, db_consistencygroup['cgsnapshots']) consistencygroup.cgsnapshots = cgsnapshots if 'volumes' in expected_attrs: volumes = base.obj_make_list( context, objects.VolumeList(context), objects.Volume, db_consistencygroup['volumes']) consistencygroup.volumes = volumes if 'cluster' in expected_attrs: db_cluster = db_consistencygroup.get('cluster') # If this consistency group doesn't belong to a cluster the cluster # field in the ORM instance will have value of None. if db_cluster: consistencygroup.cluster = objects.Cluster(context) objects.Cluster._from_db_object(context, consistencygroup.cluster, db_cluster) else: consistencygroup.cluster = None consistencygroup._context = context consistencygroup.obj_reset_changes() return consistencygroup def create(self, cg_snap_id=None, cg_id=None): """Create a consistency group. If cg_snap_id or cg_id are specified then volume_type_id, availability_zone, and host will be taken from the source Consistency Group. """ if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already_created')) updates = self.cinder_obj_get_changes() if 'cgsnapshots' in updates: raise exception.ObjectActionError(action='create', reason=_('cgsnapshots assigned')) if 'volumes' in updates: raise exception.ObjectActionError(action='create', reason=_('volumes assigned')) if 'cluster' in updates: raise exception.ObjectActionError( action='create', reason=_('cluster assigned')) db_consistencygroups = db.consistencygroup_create(self._context, updates, cg_snap_id, cg_id) self._from_db_object(self._context, self, db_consistencygroups) def from_group(self, group): """Convert a generic volume group object to a cg object.""" self.id = group.id self.user_id = group.user_id self.project_id = group.project_id self.cluster_name = group.cluster_name self.host = group.host self.availability_zone = group.availability_zone self.name = group.name self.description = group.description self.volume_type_id = "" for v_type in group.volume_types: self.volume_type_id += v_type.id + "," self.status = group.status self.cgsnapshot_id = group.group_snapshot_id self.source_cgid = group.source_group_id def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'cgsnapshots': self.cgsnapshots = objects.CGSnapshotList.get_all_by_group( self._context, self.id) if attrname == 'volumes': self.volumes = objects.VolumeList.get_all_by_group(self._context, self.id) # If this consistency group doesn't belong to a cluster (cluster_name # is empty), then cluster field will be None. if attrname == 'cluster': if self.cluster_name: self.cluster = objects.Cluster.get_by_id( self._context, name=self.cluster_name) else: self.cluster = None self.obj_reset_changes(fields=[attrname]) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'cgsnapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('cgsnapshots changed')) if 'volumes' in updates: raise exception.ObjectActionError( action='save', reason=_('volumes changed')) if 'cluster' in updates: raise exception.ObjectActionError( action='save', reason=_('cluster changed')) db.consistencygroup_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.consistencygroup_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) @base.CinderObjectRegistry.register class ConsistencyGroupList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version # Version 1.1: Add pagination support to consistency group VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('ConsistencyGroup') } @staticmethod def include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all consistency groups matching the filters into a cluster. When partial_rename is set we will not set the cluster_name with cluster parameter value directly, we'll replace provided cluster_name or host filter value with cluster instead. This is useful when we want to replace just the cluster name but leave the backend and pool information as it is. If we are using cluster_name to filter, we'll use that same DB field to replace the cluster value and leave the rest as it is. Likewise if we use the host to filter. Returns the number of consistency groups that have been changed. """ return db.consistencygroup_include_in_cluster(context, cluster, partial_rename, **filters) @classmethod def get_all(cls, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): consistencygroups = db.consistencygroup_get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return base.obj_make_list(context, cls(context), objects.ConsistencyGroup, consistencygroups) @classmethod def get_all_by_project(cls, context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): consistencygroups = db.consistencygroup_get_all_by_project( context, project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return base.obj_make_list(context, cls(context), objects.ConsistencyGroup, consistencygroups) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/dynamic_log.py0000664000175000017500000000341500000000000020626 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder.objects import base @base.CinderObjectRegistry.register class LogLevel(base.CinderObject): """Versioned Object to send log change requests.""" # Version 1.0: Initial version VERSION = '1.0' # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'prefix': fields.StringField(nullable=True), 'level': fields.StringField(nullable=True), } def __init__(self, context=None, **kwargs): super(LogLevel, self).__init__(**kwargs) # Set non initialized fields with default or None values for field_name in self.fields: if not self.obj_attr_is_set(field_name): field = self.fields[field_name] if field.default != fields.UnspecifiedDefault: setattr(self, field_name, field.default) elif field.nullable: setattr(self, field_name, None) @base.CinderObjectRegistry.register class LogLevelList(base.ObjectListBase, base.CinderObject): VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('LogLevel'), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/fields.py0000664000175000017500000001343200000000000017607 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Custom fields for Cinder objects.""" from oslo_versionedobjects import fields BaseEnumField = fields.BaseEnumField Enum = fields.Enum Field = fields.Field FieldType = fields.FieldType class BaseCinderEnum(Enum): def __init__(self): super(BaseCinderEnum, self).__init__(valid_values=self.__class__.ALL) class BackupStatus(BaseCinderEnum): ERROR = 'error' ERROR_DELETING = 'error_deleting' CREATING = 'creating' AVAILABLE = 'available' DELETING = 'deleting' DELETED = 'deleted' RESTORING = 'restoring' ALL = (ERROR, ERROR_DELETING, CREATING, AVAILABLE, DELETING, DELETED, RESTORING) class BackupStatusField(BaseEnumField): AUTO_TYPE = BackupStatus() class ConsistencyGroupStatus(BaseCinderEnum): ERROR = 'error' AVAILABLE = 'available' CREATING = 'creating' DELETING = 'deleting' DELETED = 'deleted' UPDATING = 'updating' ERROR_DELETING = 'error_deleting' ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, UPDATING, ERROR_DELETING) class ConsistencyGroupStatusField(BaseEnumField): AUTO_TYPE = ConsistencyGroupStatus() class GroupStatus(BaseCinderEnum): ERROR = 'error' AVAILABLE = 'available' CREATING = 'creating' DELETING = 'deleting' DELETED = 'deleted' UPDATING = 'updating' IN_USE = 'in-use' ERROR_DELETING = 'error_deleting' ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, UPDATING, IN_USE, ERROR_DELETING) class GroupStatusField(BaseEnumField): AUTO_TYPE = GroupStatus() class GroupSnapshotStatus(BaseCinderEnum): ERROR = 'error' AVAILABLE = 'available' CREATING = 'creating' DELETING = 'deleting' DELETED = 'deleted' UPDATING = 'updating' ERROR_DELETING = 'error_deleting' ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, UPDATING, ERROR_DELETING) class GroupSnapshotStatusField(BaseEnumField): AUTO_TYPE = GroupSnapshotStatus() class ReplicationStatus(BaseCinderEnum): ERROR = 'error' ENABLED = 'enabled' DISABLED = 'disabled' NOT_CAPABLE = 'not-capable' FAILING_OVER = 'failing-over' FAILOVER_ERROR = 'failover-error' FAILED_OVER = 'failed-over' ENABLING = 'enabling' DISABLING = 'disabling' ALL = (ERROR, ENABLED, DISABLED, NOT_CAPABLE, FAILOVER_ERROR, FAILING_OVER, FAILED_OVER, ENABLING, DISABLING) class ReplicationStatusField(BaseEnumField): AUTO_TYPE = ReplicationStatus() class SnapshotStatus(BaseCinderEnum): ERROR = 'error' AVAILABLE = 'available' CREATING = 'creating' DELETING = 'deleting' DELETED = 'deleted' UPDATING = 'updating' ERROR_DELETING = 'error_deleting' UNMANAGING = 'unmanaging' BACKING_UP = 'backing-up' RESTORING = 'restoring' ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED, UPDATING, ERROR_DELETING, UNMANAGING, BACKING_UP, RESTORING) class SnapshotStatusField(BaseEnumField): AUTO_TYPE = SnapshotStatus() class QoSConsumerValues(BaseCinderEnum): BACK_END = 'back-end' FRONT_END = 'front-end' BOTH = 'both' ALL = (BACK_END, FRONT_END, BOTH) class QoSConsumerField(BaseEnumField): AUTO_TYPE = QoSConsumerValues() class VolumeAttachStatus(BaseCinderEnum): ATTACHED = 'attached' ATTACHING = 'attaching' DETACHED = 'detached' RESERVED = 'reserved' ERROR_ATTACHING = 'error_attaching' ERROR_DETACHING = 'error_detaching' DELETED = 'deleted' ALL = (ATTACHED, ATTACHING, DETACHED, ERROR_ATTACHING, ERROR_DETACHING, RESERVED, DELETED) class VolumeAttachStatusField(BaseEnumField): AUTO_TYPE = VolumeAttachStatus() class VolumeStatus(BaseCinderEnum): CREATING = 'creating' AVAILABLE = 'available' DELETING = 'deleting' ERROR = 'error' ERROR_DELETING = 'error_deleting' ERROR_MANAGING = 'error_managing' MANAGING = 'managing' ATTACHING = 'attaching' IN_USE = 'in-use' DETACHING = 'detaching' MAINTENANCE = 'maintenance' RESTORING_BACKUP = 'restoring-backup' ERROR_RESTORING = 'error_restoring' RESERVED = 'reserved' AWAITING_TRANSFER = 'awaiting-transfer' BACKING_UP = 'backing-up' ERROR_BACKING_UP = 'error_backing-up' ERROR_EXTENDING = 'error_extending' DOWNLOADING = 'downloading' UPLOADING = 'uploading' RETYPING = 'retyping' EXTENDING = 'extending' ALL = (CREATING, AVAILABLE, DELETING, ERROR, ERROR_DELETING, ERROR_MANAGING, MANAGING, ATTACHING, IN_USE, DETACHING, MAINTENANCE, RESTORING_BACKUP, ERROR_RESTORING, RESERVED, AWAITING_TRANSFER, BACKING_UP, ERROR_BACKING_UP, ERROR_EXTENDING, DOWNLOADING, UPLOADING, RETYPING, EXTENDING) class VolumeStatusField(BaseEnumField): AUTO_TYPE = VolumeStatus() class VolumeMigrationStatus(BaseCinderEnum): MIGRATING = 'migrating' ERROR = 'error' SUCCESS = 'success' COMPLETING = 'completing' NONE = 'none' STARTING = 'starting' ALL = (MIGRATING, ERROR, SUCCESS, COMPLETING, NONE, STARTING) class VolumeMigrationStatusField(BaseEnumField): AUTO_TYPE = VolumeMigrationStatus() class DictOfNullableField(fields.AutoTypedField): AUTO_TYPE = fields.Dict(fields.FieldType(), nullable=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/group.py0000664000175000017500000002323500000000000017477 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields from cinder.volume import volume_utils @base.CinderObjectRegistry.register class Group(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Added group_snapshots, group_snapshot_id, and # source_group_id # Version 1.2: Added replication_status VERSION = '1.2' OPTIONAL_FIELDS = ('volumes', 'volume_types', 'group_snapshots') # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.UUIDField(), 'user_id': fields.StringField(), 'project_id': fields.StringField(), 'cluster_name': fields.StringField(nullable=True), 'host': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'name': fields.StringField(nullable=True), 'description': fields.StringField(nullable=True), 'group_type_id': fields.StringField(), 'volume_type_ids': fields.ListOfStringsField(nullable=True), 'status': c_fields.GroupStatusField(nullable=True), 'group_snapshot_id': fields.UUIDField(nullable=True), 'source_group_id': fields.UUIDField(nullable=True), 'replication_status': c_fields.ReplicationStatusField(nullable=True), 'volumes': fields.ObjectField('VolumeList', nullable=True), 'volume_types': fields.ObjectField('VolumeTypeList', nullable=True), 'group_snapshots': fields.ObjectField('GroupSnapshotList', nullable=True), } @staticmethod def _from_db_object(context, group, db_group, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in group.fields.items(): if name in Group.OPTIONAL_FIELDS: continue value = db_group.get(name) setattr(group, name, value) if 'volumes' in expected_attrs: volumes = base.obj_make_list( context, objects.VolumeList(context), objects.Volume, db_group['volumes']) group.volumes = volumes if 'volume_types' in expected_attrs: volume_types = base.obj_make_list( context, objects.VolumeTypeList(context), objects.VolumeType, db_group['volume_types']) group.volume_types = volume_types if 'group_snapshots' in expected_attrs: group_snapshots = base.obj_make_list( context, objects.GroupSnapshotList(context), objects.GroupSnapshot, db_group['group_snapshots']) group.group_snapshots = group_snapshots group._context = context group.obj_reset_changes() return group def create(self, group_snapshot_id=None, source_group_id=None): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already_created')) updates = self.cinder_obj_get_changes() if 'volume_types' in updates: raise exception.ObjectActionError( action='create', reason=_('volume_types assigned')) if 'volumes' in updates: raise exception.ObjectActionError(action='create', reason=_('volumes assigned')) if 'group_snapshots' in updates: raise exception.ObjectActionError( action='create', reason=_('group_snapshots assigned')) db_groups = db.group_create(self._context, updates, group_snapshot_id, source_group_id) self._from_db_object(self._context, self, db_groups) def obj_load_attr(self, attrname): if attrname not in Group.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'volume_types': self.volume_types = objects.VolumeTypeList.get_all_by_group( self._context, self.id) if attrname == 'volumes': self.volumes = objects.VolumeList.get_all_by_generic_group( self._context, self.id) if attrname == 'group_snapshots': self.group_snapshots = objects.GroupSnapshotList.get_all_by_group( self._context, self.id) self.obj_reset_changes(fields=[attrname]) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'volume_types' in updates: msg = _('Cannot save volume_types changes in group object ' 'update.') raise exception.ObjectActionError( action='save', reason=msg) if 'volumes' in updates: msg = _('Cannot save volumes changes in group object update.') raise exception.ObjectActionError( action='save', reason=msg) if 'group_snapshots' in updates: msg = _('Cannot save group_snapshots changes in group object ' 'update.') raise exception.ObjectActionError( action='save', reason=msg) db.group_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): db.group_destroy(self._context, self.id) @property def is_replicated(self): if (volume_utils.is_group_a_type(self, "group_replication_enabled") or volume_utils.is_group_a_type( self, "consistent_group_replication_enabled")): return True return False @base.CinderObjectRegistry.register class GroupList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('Group') } @classmethod def get_all(cls, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): groups = db.group_get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return base.obj_make_list(context, cls(context), objects.Group, groups) @classmethod def get_all_by_project(cls, context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): groups = db.group_get_all_by_project( context, project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return base.obj_make_list(context, cls(context), objects.Group, groups) @classmethod def get_all_replicated(cls, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): groups = db.group_get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) grp_obj_list = base.obj_make_list(context, cls(context), objects.Group, groups) out_groups = [grp for grp in grp_obj_list if grp.is_replicated] return out_groups @staticmethod def include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all generic groups matching the filters into a cluster. When partial_rename is set we will not set the cluster_name with cluster parameter value directly, we'll replace provided cluster_name or host filter value with cluster instead. This is useful when we want to replace just the cluster name but leave the backend and pool information as it is. If we are using cluster_name to filter, we'll use that same DB field to replace the cluster value and leave the rest as it is. Likewise if we use the host to filter. Returns the number of generic groups that have been changed. """ return db.group_include_in_cluster(context, cluster, partial_rename, **filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/group_snapshot.py0000664000175000017500000001545000000000000021416 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base @base.CinderObjectRegistry.register class GroupSnapshot(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.ClusteredObject): VERSION = '1.0' OPTIONAL_FIELDS = ('group', 'snapshots') # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.UUIDField(), 'group_id': fields.UUIDField(nullable=False), 'project_id': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'name': fields.StringField(nullable=True), 'description': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'group_type_id': fields.UUIDField(nullable=True), 'group': fields.ObjectField('Group', nullable=True), 'snapshots': fields.ObjectField('SnapshotList', nullable=True), } @property def host(self): return self.group.host @property def cluster_name(self): return self.group.cluster_name @classmethod def _from_db_object(cls, context, group_snapshot, db_group_snapshots, expected_attrs=None): expected_attrs = expected_attrs or [] for name, field in group_snapshot.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_group_snapshots.get(name) setattr(group_snapshot, name, value) if 'group' in expected_attrs: group = objects.Group(context) group._from_db_object(context, group, db_group_snapshots['group']) group_snapshot.group = group if 'snapshots' in expected_attrs: snapshots = base.obj_make_list( context, objects.SnapshotsList(context), objects.Snapshots, db_group_snapshots['snapshots']) group_snapshot.snapshots = snapshots group_snapshot._context = context group_snapshot.obj_reset_changes() return group_snapshot def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already_created')) updates = self.cinder_obj_get_changes() if 'group' in updates: raise exception.ObjectActionError( action='create', reason=_('group assigned')) db_group_snapshots = db.group_snapshot_create(self._context, updates) self._from_db_object(self._context, self, db_group_snapshots) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'group': self.group = objects.Group.get_by_id( self._context, self.group_id) if attrname == 'snapshots': self.snapshots = objects.SnapshotList.get_all_for_group_snapshot( self._context, self.id) self.obj_reset_changes(fields=[attrname]) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'group' in updates: raise exception.ObjectActionError( action='save', reason=_('group changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) db.group_snapshot_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.group_snapshot_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) @base.CinderObjectRegistry.register class GroupSnapshotList(base.ObjectListBase, base.CinderObject): VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('GroupSnapshot') } @classmethod def get_all(cls, context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): group_snapshots = db.group_snapshot_get_all(context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return base.obj_make_list(context, cls(context), objects.GroupSnapshot, group_snapshots) @classmethod def get_all_by_project(cls, context, project_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): group_snapshots = db.group_snapshot_get_all_by_project( context, project_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return base.obj_make_list(context, cls(context), objects.GroupSnapshot, group_snapshots) @classmethod def get_all_by_group(cls, context, group_id, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): group_snapshots = db.group_snapshot_get_all_by_group( context, group_id, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) return base.obj_make_list(context, cls(context), objects.GroupSnapshot, group_snapshots) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/group_type.py0000664000175000017500000001100600000000000020531 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.volume import group_types @base.CinderObjectRegistry.register class GroupType(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version VERSION = '1.0' OPTIONAL_FIELDS = ('group_specs', 'projects') # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.UUIDField(), 'name': fields.StringField(nullable=True), 'description': fields.StringField(nullable=True), 'is_public': fields.BooleanField(default=True, nullable=True), 'projects': fields.ListOfStringsField(nullable=True), 'group_specs': fields.DictOfNullableStringsField(nullable=True), } @classmethod def _get_expected_attrs(cls, context): return 'group_specs', 'projects' @classmethod def _from_db_object(cls, context, type, db_type, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in type.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_type[name] if isinstance(field, fields.IntegerField): value = value or 0 type[name] = value # Get data from db_type object that was queried by joined query # from DB if 'group_specs' in expected_attrs: type.group_specs = {} specs = db_type.get('group_specs') if specs and isinstance(specs, list): type.group_specs = {item['key']: item['value'] for item in specs} elif specs and isinstance(specs, dict): type.group_specs = specs if 'projects' in expected_attrs: type.projects = db_type.get('projects', []) type._context = context type.obj_reset_changes() return type def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) db_group_type = group_types.create(self._context, self.name, self.group_specs, self.is_public, self.projects, self.description) self._from_db_object(self._context, self, db_group_type) def save(self): updates = self.cinder_obj_get_changes() if updates: group_types.update(self._context, self.id, self.name, self.description) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): group_types.destroy(self._context, self.id) @base.CinderObjectRegistry.register class GroupTypeList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('GroupType'), } @classmethod def get_all(cls, context, inactive=0, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): types = group_types.get_all_group_types(context, inactive, filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset) expected_attrs = GroupType._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.GroupType, types.values(), expected_attrs=expected_attrs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/manageableresources.py0000664000175000017500000000700600000000000022350 0ustar00zuulzuul00000000000000# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder.objects import base class ManageableObject(object): # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'reference': fields.DictOfNullableStringsField(nullable=False), 'size': fields.IntegerField(nullable=True), 'safe_to_manage': fields.BooleanField(default=False, nullable=True), 'reason_not_safe': fields.StringField(nullable=True), 'cinder_id': fields.UUIDField(nullable=True), 'extra_info': fields.DictOfNullableStringsField(nullable=True), } @classmethod def from_primitives(cls, context, dict_resource): resource = cls() driverkeys = set(dict_resource.keys()) - set(cls.fields.keys()) for name, field in cls.fields.items(): value = dict_resource.get(name) resource[name] = value for key in driverkeys: if resource['extra_info'] is None: resource['extra_info'] = {key: dict_resource[key]} resource._context = context resource.obj_reset_changes() return resource @base.CinderObjectRegistry.register class ManageableVolume(base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject, ManageableObject): # Version 1.0: Initial version VERSION = '1.0' @base.CinderObjectRegistry.register class ManageableSnapshot(base.CinderObject, base.CinderObjectDictCompat, ManageableObject): # Version 1.0: Initial version VERSION = '1.0' # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'source_reference': fields.DictOfNullableStringsField(), } @base.CinderObjectRegistry.register class ManageableVolumeList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('ManageableVolume'), } @classmethod def from_primitives(cls, context, data): ManageableVolumeList.objects = [] for item in data: manage_vol_obj = ManageableVolume.from_primitives(context, item) ManageableVolumeList.objects.append(manage_vol_obj) ManageableVolumeList._context = context return ManageableVolumeList.objects @base.CinderObjectRegistry.register class ManageableSnapshotList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('ManageableSnapshot'), } @classmethod def from_primitives(cls, context, data): ManageableSnapshotList.objects = [] for item in data: manage_snap_obj = ManageableSnapshot.from_primitives(context, item) ManageableSnapshotList.objects.append(manage_snap_obj) ManageableSnapshotList._context = context return ManageableSnapshotList.objects ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/qos_specs.py0000664000175000017500000001752000000000000020342 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields LOG = logging.getLogger(__name__) @base.CinderObjectRegistry.register class QualityOfServiceSpecs(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version # 1.0: Initial version VERSION = "1.0" OPTIONAL_FIELDS = ('volume_types',) # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.UUIDField(), 'name': fields.StringField(), 'consumer': c_fields.QoSConsumerField( default=c_fields.QoSConsumerValues.BACK_END), 'specs': fields.DictOfNullableStringsField(nullable=True), 'volume_types': fields.ObjectField('VolumeTypeList', nullable=True), } def __init__(self, *args, **kwargs): super(QualityOfServiceSpecs, self).__init__(*args, **kwargs) self._init_specs = {} def __setattr__(self, name, value): try: super(QualityOfServiceSpecs, self).__setattr__(name, value) except ValueError: if name == 'consumer': # Give more descriptive error message for invalid 'consumer' msg = (_("Valid consumer of QoS specs are: %s") % c_fields.QoSConsumerField()) raise exception.InvalidQoSSpecs(reason=msg) else: raise def obj_reset_changes(self, fields=None, recursive=False): super(QualityOfServiceSpecs, self).obj_reset_changes(fields, recursive) if fields is None or 'specs' in fields: self._init_specs = self.specs.copy() if self.specs else {} def obj_what_changed(self): changes = super(QualityOfServiceSpecs, self).obj_what_changed() # Do comparison of what's in the dict vs. reference to the specs object if self.obj_attr_is_set('id'): if self.specs != self._init_specs: changes.add('specs') else: # If both dicts are equal don't consider anything gets changed if 'specs' in changes: changes.remove('specs') return changes def obj_get_changes(self): changes = super(QualityOfServiceSpecs, self).obj_get_changes() if 'specs' in changes: # For specs, we only want what has changed in the dictionary, # because otherwise we'll individually overwrite the DB value for # every key in 'specs' even if it hasn't changed specs_changes = {} for key, val in self.specs.items(): if val != self._init_specs.get(key): specs_changes[key] = val changes['specs'] = specs_changes specs_keys_removed = (set(self._init_specs.keys()) - set(self.specs.keys())) if specs_keys_removed: # Special key notifying which specs keys have been deleted changes['specs_keys_removed'] = specs_keys_removed return changes def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'volume_types': self.volume_types = objects.VolumeTypeList.get_all_types_for_qos( self._context, self.id) @classmethod def _from_db_object(cls, context, qos_spec, db_qos_spec, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in qos_spec.fields.items(): if name not in cls.OPTIONAL_FIELDS: value = db_qos_spec.get(name) # 'specs' could be null if only a consumer is given, so make # it an empty dict instead of None if not value and isinstance(field, fields.DictOfStringsField): value = {} setattr(qos_spec, name, value) if 'volume_types' in expected_attrs: volume_types = objects.VolumeTypeList.get_all_types_for_qos( context, db_qos_spec['id']) qos_spec.volume_types = volume_types qos_spec._context = context qos_spec.obj_reset_changes() return qos_spec def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') updates = self.cinder_obj_get_changes() try: create_ret = db.qos_specs_create(self._context, updates) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) except db_exc.DBError: LOG.exception('DB error occurred when creating QoS specs.') raise exception.QoSSpecsCreateFailed(name=self.name, qos_specs=self.specs) # Save ID with the object updates['id'] = create_ret['id'] self._from_db_object(self._context, self, updates) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'specs_keys_removed' in updates.keys(): for specs_key_to_remove in updates['specs_keys_removed']: db.qos_specs_item_delete( self._context, self.id, specs_key_to_remove) del updates['specs_keys_removed'] db.qos_specs_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self, force=False): """Deletes the QoS spec. :param force: when force is True, all volume_type mappings for this QoS are deleted. When force is False and volume_type mappings still exist, a QoSSpecsInUse exception is thrown """ if self.volume_types: if not force: raise exception.QoSSpecsInUse(specs_id=self.id) # remove all association db.qos_specs_disassociate_all(self._context, self.id) updated_values = db.qos_specs_delete(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) @base.CinderObjectRegistry.register class QualityOfServiceSpecsList(base.ObjectListBase, base.CinderObject): VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('QualityOfServiceSpecs'), } @classmethod def get_all(cls, context, *args, **kwargs): specs = db.qos_specs_get_all(context, *args, **kwargs) return base.obj_make_list(context, cls(context), objects.QualityOfServiceSpecs, specs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/request_spec.py0000664000175000017500000001320100000000000021035 0ustar00zuulzuul00000000000000# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.objects import base @base.CinderObjectRegistry.register class RequestSpec(base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Added group_id and group_backend # Version 1.2 Added ``resource_backend`` # Version 1.3: Added backup_id # Version 1.4: Add 'operation' # Version 1.5: Added 'availability_zones' VERSION = '1.5' # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'consistencygroup_id': fields.UUIDField(nullable=True), 'group_id': fields.UUIDField(nullable=True), 'cgsnapshot_id': fields.UUIDField(nullable=True), 'image_id': fields.UUIDField(nullable=True), 'snapshot_id': fields.UUIDField(nullable=True), 'source_replicaid': fields.UUIDField(nullable=True), 'source_volid': fields.UUIDField(nullable=True), 'volume_id': fields.UUIDField(nullable=True), 'volume': fields.ObjectField('Volume', nullable=True), 'volume_type': fields.ObjectField('VolumeType', nullable=True), 'volume_properties': fields.ObjectField('VolumeProperties', nullable=True), 'CG_backend': fields.StringField(nullable=True), 'group_backend': fields.StringField(nullable=True), 'resource_backend': fields.StringField(nullable=True), 'backup_id': fields.UUIDField(nullable=True), 'operation': fields.StringField(nullable=True), 'availability_zones': fields.ListOfStringsField(nullable=True), } obj_extra_fields = ['resource_properties'] @property def resource_properties(self): # TODO(dulek): This is to maintain compatibility with filters from # oslo-incubator. As we've moved them into our codebase we should adapt # them to use volume_properties and remove this shim. return self.volume_properties @classmethod def from_primitives(cls, spec): """Returns RequestSpec object creating it from legacy dictionary. FIXME(dulek): This should go away in early O as we stop supporting backward compatibility with M. """ spec = spec.copy() spec_obj = cls() vol_props = spec.pop('volume_properties', {}) if vol_props is not None: vol_props = VolumeProperties(**vol_props) spec_obj.volume_properties = vol_props if 'volume' in spec: vol = spec.pop('volume', {}) vol.pop('name', None) if vol is not None: vol = objects.Volume(**vol) spec_obj.volume = vol if 'volume_type' in spec: vol_type = spec.pop('volume_type', {}) if vol_type is not None: vol_type = objects.VolumeType(**vol_type) spec_obj.volume_type = vol_type spec.pop('resource_properties', None) for k, v in spec.items(): setattr(spec_obj, k, v) return spec_obj @base.CinderObjectRegistry.register class VolumeProperties(base.CinderObject, base.CinderObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added group_id and group_type_id VERSION = '1.1' # TODO(dulek): We should add this to initially move volume_properites to # ovo, but this should be removed as soon as possible. Most of the data # here is already in request_spec and volume there. Outstanding ones would # be reservation, and qos_specs. First one may be moved to request_spec and # second added as relationship in volume_type field and whole # volume_properties (and resource_properties) in request_spec won't be # needed. fields = { 'attach_status': fields.StringField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'cgsnapshot_id': fields.UUIDField(nullable=True), 'consistencygroup_id': fields.UUIDField(nullable=True), 'group_id': fields.UUIDField(nullable=True), 'display_description': fields.StringField(nullable=True), 'display_name': fields.StringField(nullable=True), 'encryption_key_id': fields.UUIDField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), 'multiattach': fields.BooleanField(nullable=True), 'project_id': fields.StringField(nullable=True), 'qos_specs': fields.DictOfStringsField(nullable=True), 'replication_status': fields.StringField(nullable=True), 'reservations': fields.ListOfStringsField(nullable=True), 'size': fields.IntegerField(nullable=True), 'snapshot_id': fields.UUIDField(nullable=True), 'source_replicaid': fields.UUIDField(nullable=True), 'source_volid': fields.UUIDField(nullable=True), 'status': fields.StringField(nullable=True), 'user_id': fields.StringField(nullable=True), 'volume_type_id': fields.UUIDField(nullable=True), 'group_type_id': fields.UUIDField(nullable=True), } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/service.py0000664000175000017500000002311300000000000017776 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils from oslo_utils import versionutils from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields from cinder import utils LOG = logging.getLogger(__name__) @base.CinderObjectRegistry.register class Service(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject, base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Add rpc_current_version and object_current_version fields # Version 1.2: Add get_minimum_rpc_version() and get_minimum_obj_version() # Version 1.3: Add replication fields # Version 1.4: Add cluster fields # Version 1.5: Add UUID field # Version 1.6: Modify UUID field to be not nullable VERSION = '1.6' OPTIONAL_FIELDS = ('cluster',) # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.IntegerField(), 'host': fields.StringField(nullable=True), 'binary': fields.StringField(nullable=True), 'cluster_name': fields.StringField(nullable=True), 'cluster': fields.ObjectField('Cluster', nullable=True, read_only=True), 'topic': fields.StringField(nullable=True), 'report_count': fields.IntegerField(default=0), 'disabled': fields.BooleanField(default=False, nullable=True), 'availability_zone': fields.StringField(nullable=True, default='cinder'), 'disabled_reason': fields.StringField(nullable=True), 'modified_at': fields.DateTimeField(nullable=True), 'rpc_current_version': fields.StringField(nullable=True), 'object_current_version': fields.StringField(nullable=True), # Replication properties 'replication_status': c_fields.ReplicationStatusField(nullable=True), 'frozen': fields.BooleanField(default=False), 'active_backend_id': fields.StringField(nullable=True), 'uuid': fields.StringField(), } @staticmethod def _from_db_object(context, service, db_service, expected_attrs=None): expected_attrs = expected_attrs or [] for name, field in service.fields.items(): if ((name == 'uuid' and not db_service.get(name)) or name in service.OPTIONAL_FIELDS): continue value = db_service.get(name) if isinstance(field, fields.IntegerField): value = value or 0 elif isinstance(field, fields.DateTimeField): value = value or None service[name] = value service._context = context if 'cluster' in expected_attrs: db_cluster = db_service.get('cluster') # If this service doesn't belong to a cluster the cluster field in # the ORM instance will have value of None. if db_cluster: service.cluster = objects.Cluster(context) objects.Cluster._from_db_object(context, service.cluster, db_cluster) else: service.cluster = None service.obj_reset_changes() return service def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) # NOTE(geguileo): We only have 1 optional field, so we don't need to # confirm that we are loading the cluster. # If this service doesn't belong to a cluster (cluster_name is empty), # then cluster field will be None. if self.cluster_name: self.cluster = objects.Cluster.get_by_id(self._context, None, name=self.cluster_name) else: self.cluster = None self.obj_reset_changes(fields=(attrname,)) @classmethod def get_by_host_and_topic(cls, context, host, topic, disabled=False): db_service = db.service_get(context, disabled=disabled, host=host, topic=topic) return cls._from_db_object(context, cls(context), db_service) @classmethod def get_by_args(cls, context, host, binary_key): db_service = db.service_get(context, host=host, binary=binary_key) return cls._from_db_object(context, cls(context), db_service) @classmethod def get_by_uuid(cls, context, service_uuid): db_service = db.service_get_by_uuid(context, service_uuid) return cls._from_db_object(context, cls(), db_service) def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'cluster' in updates: raise exception.ObjectActionError( action='create', reason=_('cluster assigned')) if 'uuid' not in updates: updates['uuid'] = uuidutils.generate_uuid() self.uuid = updates['uuid'] db_service = db.service_create(self._context, updates) self._from_db_object(self._context, self, db_service) def save(self, retry=True): updates = self.cinder_obj_get_changes() if 'cluster' in updates: raise exception.ObjectActionError( action='save', reason=_('cluster changed')) if updates: db.service_update(self._context, self.id, updates, retry) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.service_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) @classmethod def _get_minimum_version(cls, attribute, context, binary): services = ServiceList.get_all_by_binary(context, binary) min_ver = None min_ver_str = None for s in services: ver_str = getattr(s, attribute) if ver_str is None: # NOTE(dulek) None in *_current_version means that this # service is in Liberty version, which we now don't provide # backward compatibility to. msg = _('Service %s is in Liberty version. We do not provide ' 'backward compatibility with Liberty now, so you ' 'need to upgrade it, release by release if live ' 'upgrade is required. After upgrade you may need to ' 'remove any stale service records via ' '"cinder-manage service remove".') % s.binary raise exception.ServiceTooOld(msg) ver = versionutils.convert_version_to_int(ver_str) if min_ver is None or ver < min_ver: min_ver = ver min_ver_str = ver_str return min_ver_str @classmethod def get_minimum_rpc_version(cls, context, binary): return cls._get_minimum_version('rpc_current_version', context, binary) @classmethod def get_minimum_obj_version(cls, context, binary=None): return cls._get_minimum_version('object_current_version', context, binary) @property def is_up(self): """Check whether a service is up based on last heartbeat.""" return (self.updated_at and self.updated_at >= utils.service_expired_time(True)) @base.CinderObjectRegistry.register class ServiceList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version # Version 1.1: Service object 1.2 VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('Service'), } @classmethod def get_all(cls, context, filters=None): services = db.service_get_all(context, **(filters or {})) return base.obj_make_list(context, cls(context), objects.Service, services) @classmethod def get_all_by_topic(cls, context, topic, disabled=None): services = db.service_get_all(context, topic=topic, disabled=disabled) return base.obj_make_list(context, cls(context), objects.Service, services) @classmethod def get_all_by_binary(cls, context, binary, disabled=None): services = db.service_get_all(context, binary=binary, disabled=disabled) return base.obj_make_list(context, cls(context), objects.Service, services) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/snapshot.py0000664000175000017500000003511400000000000020201 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import cleanable from cinder.objects import fields as c_fields from cinder.volume import volume_types CONF = cfg.CONF @base.CinderObjectRegistry.register class Snapshot(cleanable.CinderCleanableObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject, base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Changed 'status' field to use SnapshotStatusField # Version 1.2: This object is now cleanable (adds rows to workers table) # Version 1.3: SnapshotStatusField now includes "unmanaging" # Version 1.4: SnapshotStatusField now includes "backing-up" # Version 1.5: SnapshotStatusField now includes "restoring" # Version 1.6: Added use_quota VERSION = '1.6' # NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They # are typically the relationship in the sqlalchemy object. OPTIONAL_FIELDS = ('volume', 'metadata', 'cgsnapshot', 'group_snapshot') # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.UUIDField(), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'use_quota': fields.BooleanField(default=True, nullable=False), 'volume_id': fields.UUIDField(nullable=True), 'cgsnapshot_id': fields.UUIDField(nullable=True), 'group_snapshot_id': fields.UUIDField(nullable=True), 'status': c_fields.SnapshotStatusField(nullable=True), 'progress': fields.StringField(nullable=True), 'volume_size': fields.IntegerField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'encryption_key_id': fields.UUIDField(nullable=True), 'volume_type_id': fields.UUIDField(nullable=True), 'provider_location': fields.StringField(nullable=True), 'provider_id': fields.StringField(nullable=True), 'metadata': fields.DictOfStringsField(), 'provider_auth': fields.StringField(nullable=True), 'volume': fields.ObjectField('Volume', nullable=True), 'cgsnapshot': fields.ObjectField('CGSnapshot', nullable=True), 'group_snapshot': fields.ObjectField('GroupSnapshot', nullable=True), } @property def cluster_name(self): return self.volume.cluster_name @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): return 'metadata', # NOTE(thangp): obj_extra_fields is used to hold properties that are not # usually part of the model obj_extra_fields = ['name', 'volume_name'] @property def name(self): return CONF.snapshot_name_template % self.id @property def volume_name(self): return self.volume.name def __init__(self, *args, **kwargs): super(Snapshot, self).__init__(*args, **kwargs) self.metadata = kwargs.get('metadata', {}) self._reset_metadata_tracking() def obj_reset_changes(self, fields=None): super(Snapshot, self).obj_reset_changes(fields) self._reset_metadata_tracking(fields=fields) def _reset_metadata_tracking(self, fields=None): if fields is None or 'metadata' in fields: self._orig_metadata = (dict(self.metadata) if self.obj_attr_is_set('metadata') else {}) def obj_what_changed(self): changes = super(Snapshot, self).obj_what_changed() if hasattr(self, 'metadata') and self.metadata != self._orig_metadata: changes.add('metadata') return changes @classmethod def _from_db_object(cls, context, snapshot, db_snapshot, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in snapshot.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_snapshot.get(name) if isinstance(field, fields.IntegerField): value = value if value is not None else 0 setattr(snapshot, name, value) if 'volume' in expected_attrs: volume = objects.Volume(context) volume._from_db_object(context, volume, db_snapshot['volume']) snapshot.volume = volume if snapshot.cgsnapshot_id and 'cgsnapshot' in expected_attrs: cgsnapshot = objects.CGSnapshot(context) cgsnapshot._from_db_object(context, cgsnapshot, db_snapshot['cgsnapshot']) snapshot.cgsnapshot = cgsnapshot if snapshot.group_snapshot_id and 'group_snapshot' in expected_attrs: group_snapshot = objects.GroupSnapshot(context) group_snapshot._from_db_object(context, group_snapshot, db_snapshot['group_snapshot']) snapshot.group_snapshot = group_snapshot if 'metadata' in expected_attrs: metadata = db_snapshot.get('snapshot_metadata') if metadata is None: raise exception.MetadataAbsent() snapshot.metadata = {item['key']: item['value'] for item in metadata} snapshot._context = context snapshot.obj_reset_changes() return snapshot def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'volume' in updates: raise exception.ObjectActionError(action='create', reason=_('volume assigned')) if 'cgsnapshot' in updates: raise exception.ObjectActionError(action='create', reason=_('cgsnapshot assigned')) if 'cluster' in updates: raise exception.ObjectActionError( action='create', reason=_('cluster assigned')) if 'group_snapshot' in updates: raise exception.ObjectActionError( action='create', reason=_('group_snapshot assigned')) if ('volume_type_id' not in updates or updates['volume_type_id'] is None): updates['volume_type_id'] = ( volume_types.get_default_volume_type()['id']) db_snapshot = db.snapshot_create(self._context, updates) self._from_db_object(self._context, self, db_snapshot) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'volume' in updates: raise exception.ObjectActionError(action='save', reason=_('volume changed')) if 'cgsnapshot' in updates: # NOTE(xyang): Allow this to pass if 'cgsnapshot' is # set to None. This is to support backward compatibility. if updates.get('cgsnapshot'): raise exception.ObjectActionError( action='save', reason=_('cgsnapshot changed')) if 'group_snapshot' in updates: raise exception.ObjectActionError( action='save', reason=_('group_snapshot changed')) if 'cluster' in updates: raise exception.ObjectActionError( action='save', reason=_('cluster changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.snapshot_metadata_update(self._context, self.id, metadata, True) db.snapshot_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.snapshot_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'volume': self.volume = objects.Volume.get_by_id(self._context, self.volume_id) if attrname == 'cgsnapshot': if self.cgsnapshot_id is None: self.cgsnapshot = None else: self.cgsnapshot = objects.CGSnapshot.get_by_id( self._context, self.cgsnapshot_id) if attrname == 'group_snapshot': if self.group_snapshot_id is None: self.group_snapshot = None else: self.group_snapshot = objects.GroupSnapshot.get_by_id( self._context, self.group_snapshot_id) self.obj_reset_changes(fields=[attrname]) def delete_metadata_key(self, context, key): db.snapshot_metadata_delete(context, self.id, key) md_was_changed = 'metadata' in self.obj_what_changed() del self.metadata[key] self._orig_metadata.pop(key, None) if not md_was_changed: self.obj_reset_changes(['metadata']) @classmethod def snapshot_data_get_for_project(cls, context, project_id, volume_type_id=None, host=None): return db.snapshot_data_get_for_project(context, project_id, volume_type_id, host=host) @staticmethod def _is_cleanable(status, obj_version): # Before 1.2 we didn't have workers table, so cleanup wasn't supported. if obj_version and obj_version < 1.2: return False return status == 'creating' @property def host(self): """All cleanable VO must have a host property/attribute.""" return self.volume.host @base.CinderObjectRegistry.register class SnapshotList(base.ObjectListBase, base.CinderObject): VERSION = '1.0' fields = { 'objects': fields.ListOfObjectsField('Snapshot'), } @classmethod def get_all(cls, context, filters, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): """Get all snapshot given some search_opts (filters). Special filters accepted are host and cluster_name, that refer to the volume's fields. """ snapshots = db.snapshot_get_all(context, filters, marker, limit, sort_keys, sort_dirs, offset) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @classmethod def get_by_host(cls, context, host, filters=None): snapshots = db.snapshot_get_all_by_host(context, host, filters) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @classmethod def get_all_by_project(cls, context, project_id, search_opts, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): snapshots = db.snapshot_get_all_by_project( context, project_id, search_opts, marker, limit, sort_keys, sort_dirs, offset) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @classmethod def get_all_for_volume(cls, context, volume_id): snapshots = db.snapshot_get_all_for_volume(context, volume_id) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @classmethod def get_all_active_by_window(cls, context, begin, end): snapshots = db.snapshot_get_all_active_by_window(context, begin, end) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @classmethod def get_all_for_cgsnapshot(cls, context, cgsnapshot_id): snapshots = db.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @classmethod def get_all_for_group_snapshot(cls, context, group_snapshot_id): snapshots = db.snapshot_get_all_for_group_snapshot( context, group_snapshot_id) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, snapshots, expected_attrs=expected_attrs) @classmethod def get_snapshot_summary(cls, context, project_only, filters=None): summary = db.get_snapshot_summary(context, project_only, filters) return summary ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/volume.py0000664000175000017500000007667600000000000017673 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import cleanable from cinder.objects import fields as c_fields from cinder.volume import volume_types CONF = cfg.CONF LOG = logging.getLogger(__name__) class MetadataObject(dict): # This is a wrapper class that simulates SQLAlchemy (.*)Metadata objects to # maintain compatibility with older representations of Volume that some # drivers rely on. This is helpful in transition period while some driver # methods are invoked with volume versioned object and some SQLAlchemy # object or dict. def __init__(self, key=None, value=None): super(MetadataObject, self).__init__() self.key = key self.value = value def __getattr__(self, name): if name in self: return self[name] else: raise AttributeError("No such attribute: " + name) def __setattr__(self, name, value): self[name] = value @base.CinderObjectRegistry.register class Volume(cleanable.CinderCleanableObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject, base.ClusteredObject): # Version 1.0: Initial version # Version 1.1: Added metadata, admin_metadata, volume_attachment, and # volume_type # Version 1.2: Added glance_metadata, consistencygroup and snapshots # Version 1.3: Added finish_volume_migration() # Version 1.4: Added cluster fields # Version 1.5: Added group # Version 1.6: This object is now cleanable (adds rows to workers table) # Version 1.7: Added service_uuid # Version 1.8: Added shared_targets # Version 1.9: Added use_quota VERSION = '1.9' OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata', 'volume_type', 'volume_attachment', 'consistencygroup', 'snapshots', 'cluster', 'group') # NOTE: When adding a field obj_make_compatible needs to be updated fields = { # id is the user facing UUID that should be passed to API calls 'id': fields.UUIDField(), # _name_id is the real volume's UUID that should be used by the driver # when it is set. This is used when migrating a volume. Property # name_id is provided for convenience. '_name_id': fields.UUIDField(nullable=True), 'ec2_id': fields.UUIDField(nullable=True), 'user_id': fields.StringField(nullable=True), 'project_id': fields.StringField(nullable=True), 'use_quota': fields.BooleanField(default=True, nullable=False), 'snapshot_id': fields.UUIDField(nullable=True), 'cluster_name': fields.StringField(nullable=True), 'cluster': fields.ObjectField('Cluster', nullable=True, read_only=True), 'host': fields.StringField(nullable=True), 'size': fields.IntegerField(nullable=True), 'availability_zone': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), 'migration_status': fields.StringField(nullable=True), 'scheduled_at': fields.DateTimeField(nullable=True), 'launched_at': fields.DateTimeField(nullable=True), 'terminated_at': fields.DateTimeField(nullable=True), 'display_name': fields.StringField(nullable=True), 'display_description': fields.StringField(nullable=True), 'provider_id': fields.StringField(nullable=True), 'provider_location': fields.StringField(nullable=True), 'provider_auth': fields.StringField(nullable=True), 'provider_geometry': fields.StringField(nullable=True), 'volume_type_id': fields.UUIDField(nullable=True), 'source_volid': fields.UUIDField(nullable=True), 'encryption_key_id': fields.UUIDField(nullable=True), 'consistencygroup_id': fields.UUIDField(nullable=True), 'group_id': fields.UUIDField(nullable=True), 'deleted': fields.BooleanField(default=False, nullable=True), 'bootable': fields.BooleanField(default=False, nullable=True), 'multiattach': fields.BooleanField(default=False, nullable=True), 'replication_status': fields.StringField(nullable=True), 'replication_extended_status': fields.StringField(nullable=True), 'replication_driver_data': fields.StringField(nullable=True), 'previous_status': fields.StringField(nullable=True), 'metadata': fields.DictOfStringsField(nullable=True), 'admin_metadata': fields.DictOfStringsField(nullable=True), 'glance_metadata': fields.DictOfStringsField(nullable=True), 'volume_type': fields.ObjectField('VolumeType', nullable=True), 'volume_attachment': fields.ObjectField('VolumeAttachmentList', nullable=True), 'consistencygroup': fields.ObjectField('ConsistencyGroup', nullable=True), 'snapshots': fields.ObjectField('SnapshotList', nullable=True), 'group': fields.ObjectField('Group', nullable=True), 'service_uuid': fields.StringField(nullable=True), 'shared_targets': fields.BooleanField(default=True, nullable=True), } # NOTE(thangp): obj_extra_fields is used to hold properties that are not # usually part of the model obj_extra_fields = ['name', 'name_id', 'volume_metadata', 'volume_admin_metadata', 'volume_glance_metadata'] @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs', 'volume_attachment'] if context.is_admin: expected_attrs.append('admin_metadata') return expected_attrs @property def name_id(self): """Actual volume's UUID for driver usage. There may be two different UUIDs for the same volume, the user facing one, and the one the driver should be using. When a volume is created these two are the same, but when doing a generic migration (create new volume, then copying data) they will be different if we were unable to rename the new volume in the final migration steps. So the volume will have been created using the new volume's UUID and the driver will have to look for it using that UUID, but the user on the other hand will keep referencing the volume with the original UUID. This property facilitates using the right UUID in the driver's code. """ return self.id if not self._name_id else self._name_id @name_id.setter def name_id(self, value): self._name_id = value @property def name(self): return CONF.volume_name_template % self.name_id # TODO(dulek): Three properties below are for compatibility with dict # representation of volume. The format there is different (list of # SQLAlchemy models) so we need a conversion. Anyway - these should be # removed when we stop this class from deriving from DictObjectCompat. @property def volume_metadata(self): md = [MetadataObject(k, v) for k, v in self.metadata.items()] return md @volume_metadata.setter def volume_metadata(self, value): md = {d['key']: d['value'] for d in value} self.metadata = md @property def volume_admin_metadata(self): md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()] return md @volume_admin_metadata.setter def volume_admin_metadata(self, value): md = {d['key']: d['value'] for d in value} self.admin_metadata = md def admin_metadata_update(self, metadata, delete, add=True, update=True): new_metadata = db.volume_admin_metadata_update(self._context, self.id, metadata, delete, add, update) self.admin_metadata = new_metadata self._reset_metadata_tracking(fields=('admin_metadata',)) @property def volume_glance_metadata(self): md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()] return md @volume_glance_metadata.setter def volume_glance_metadata(self, value): md = {d['key']: d['value'] for d in value} self.glance_metadata = md def __init__(self, *args, **kwargs): super(Volume, self).__init__(*args, **kwargs) self._reset_metadata_tracking() def obj_reset_changes(self, fields=None): super(Volume, self).obj_reset_changes(fields) self._reset_metadata_tracking(fields=fields) @classmethod def _obj_from_primitive(cls, context, objver, primitive): obj = super(Volume, Volume)._obj_from_primitive(context, objver, primitive) obj._reset_metadata_tracking() return obj def _reset_metadata_tracking(self, fields=None): if fields is None or 'metadata' in fields: self._orig_metadata = (dict(self.metadata) if 'metadata' in self else {}) if fields is None or 'admin_metadata' in fields: self._orig_admin_metadata = (dict(self.admin_metadata) if 'admin_metadata' in self else {}) if fields is None or 'glance_metadata' in fields: self._orig_glance_metadata = (dict(self.glance_metadata) if 'glance_metadata' in self else {}) def obj_what_changed(self): changes = super(Volume, self).obj_what_changed() if 'metadata' in self and self.metadata != self._orig_metadata: changes.add('metadata') if ('admin_metadata' in self and self.admin_metadata != self._orig_admin_metadata): changes.add('admin_metadata') if ('glance_metadata' in self and self.glance_metadata != self._orig_glance_metadata): changes.add('glance_metadata') return changes @classmethod def _from_db_object(cls, context, volume, db_volume, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for name, field in volume.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_volume.get(name) if isinstance(field, fields.IntegerField): value = value or 0 volume[name] = value # Get data from db_volume object that was queried by joined query # from DB if 'metadata' in expected_attrs: metadata = db_volume.get('volume_metadata', []) volume.metadata = {item['key']: item['value'] for item in metadata} if 'admin_metadata' in expected_attrs: metadata = db_volume.get('volume_admin_metadata', []) volume.admin_metadata = {item['key']: item['value'] for item in metadata} # TODO: (A release): Remove code of temporary admin metadata delete if 'temporary' in volume.admin_metadata: volume.admin_metadata.pop('temporary') # Admin metadata deletion requires admin context, but since # read also requires it we know our context is admin. db.volume_admin_metadata_delete(context, volume.id, 'temporary') if 'glance_metadata' in expected_attrs: metadata = db_volume.get('volume_glance_metadata', []) volume.glance_metadata = {item['key']: item['value'] for item in metadata} if 'volume_type' in expected_attrs: db_volume_type = db_volume.get('volume_type') if db_volume_type: vt_expected_attrs = [] if 'volume_type.extra_specs' in expected_attrs: vt_expected_attrs.append('extra_specs') volume.volume_type = objects.VolumeType._from_db_object( context, objects.VolumeType(), db_volume_type, expected_attrs=vt_expected_attrs) if 'volume_attachment' in expected_attrs: attachments = base.obj_make_list( context, objects.VolumeAttachmentList(context), objects.VolumeAttachment, db_volume.get('volume_attachment')) volume.volume_attachment = attachments if volume.consistencygroup_id and 'consistencygroup' in expected_attrs: consistencygroup = objects.ConsistencyGroup(context) consistencygroup._from_db_object(context, consistencygroup, db_volume['consistencygroup']) volume.consistencygroup = consistencygroup if 'snapshots' in expected_attrs: snapshots = base.obj_make_list( context, objects.SnapshotList(context), objects.Snapshot, db_volume['snapshots']) volume.snapshots = snapshots if 'cluster' in expected_attrs: db_cluster = db_volume.get('cluster') # If this volume doesn't belong to a cluster the cluster field in # the ORM instance will have value of None. if db_cluster: volume.cluster = objects.Cluster(context) objects.Cluster._from_db_object(context, volume.cluster, db_cluster) else: volume.cluster = None if volume.group_id and 'group' in expected_attrs: group = objects.Group(context) group._from_db_object(context, group, db_volume['group']) volume.group = group volume._context = context volume.obj_reset_changes() return volume def populate_consistencygroup(self): """Populate CG fields based on group fields. Method assumes that consistencygroup_id and consistencygroup fields have not already been set. This is a hack to support backward compatibility of consistencygroup, where we set the fields but don't want to write them to the DB, so we mark them as not changed, so they won't be stored on the next save(). """ self.consistencygroup_id = self.group_id if self.group_id and self.obj_attr_is_set('group'): cg = objects.ConsistencyGroup() cg.from_group(self.group) self.consistencygroup = cg self.obj_reset_changes(['consistencygroup', 'consistencygroup_id']) def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'consistencygroup' in updates: raise exception.ObjectActionError( action='create', reason=_('consistencygroup assigned')) if 'snapshots' in updates: raise exception.ObjectActionError( action='create', reason=_('snapshots assigned')) if 'cluster' in updates: raise exception.ObjectActionError( action='create', reason=_('cluster assigned')) if 'group' in updates: raise exception.ObjectActionError( action='create', reason=_('group assigned')) if ('volume_type_id' not in updates or updates['volume_type_id'] is None): updates['volume_type_id'] = ( volume_types.get_default_volume_type()['id']) db_volume = db.volume_create(self._context, updates) expected_attrs = self._get_expected_attrs(self._context) self._from_db_object(self._context, self, db_volume, expected_attrs) def save(self): updates = self.cinder_obj_get_changes() if updates: # NOTE(xyang): Allow this to pass if 'consistencygroup' is # set to None. This is to support backward compatibility. # Also remove 'consistencygroup' from updates because # consistencygroup is the name of a relationship in the ORM # Volume model, so SQLA tries to do some kind of update of # the foreign key based on the provided updates if # 'consistencygroup' is in updates. if updates.pop('consistencygroup', None): raise exception.ObjectActionError( action='save', reason=_('consistencygroup changed')) if 'group' in updates: raise exception.ObjectActionError( action='save', reason=_('group changed')) if 'glance_metadata' in updates: raise exception.ObjectActionError( action='save', reason=_('glance_metadata changed')) if 'snapshots' in updates: raise exception.ObjectActionError( action='save', reason=_('snapshots changed')) if 'cluster' in updates: raise exception.ObjectActionError( action='save', reason=_('cluster changed')) if 'metadata' in updates: # Metadata items that are not specified in the # self.metadata will be deleted metadata = updates.pop('metadata', None) self.metadata = db.volume_metadata_update(self._context, self.id, metadata, True) if self._context.is_admin and 'admin_metadata' in updates: metadata = updates.pop('admin_metadata', None) self.admin_metadata = db.volume_admin_metadata_update( self._context, self.id, metadata, True) # When we are creating a volume and we change from 'creating' # status to 'downloading' status we have to change the worker entry # in the DB to reflect this change, otherwise the cleanup will # not be performed as it will be mistaken for a volume that has # been somehow changed (reset status, forced operation...) if updates.get('status') == 'downloading': self.set_worker() # updates are changed after popping out metadata. if updates: db.volume_update(self._context, self.id, updates) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = db.volume_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'metadata': self.metadata = db.volume_metadata_get(self._context, self.id) elif attrname == 'admin_metadata': self.admin_metadata = {} if self._context.is_admin: self.admin_metadata = db.volume_admin_metadata_get( self._context, self.id) elif attrname == 'glance_metadata': try: # NOTE(dulek): We're using alias here to have conversion from # list to dict done there. self.volume_glance_metadata = db.volume_glance_metadata_get( self._context, self.id) except exception.GlanceMetadataNotFound: # NOTE(dulek): DB API raises when volume has no # glance_metadata. Silencing this because at this level no # metadata is a completely valid result. self.glance_metadata = {} elif attrname == 'volume_type': # If the volume doesn't have volume_type, VolumeType.get_by_id # would trigger a db call which raise VolumeTypeNotFound exception. self.volume_type = (objects.VolumeType.get_by_id( self._context, self.volume_type_id) if self.volume_type_id else None) elif attrname == 'volume_attachment': attachments = objects.VolumeAttachmentList.get_all_by_volume_id( self._context, self.id) self.volume_attachment = attachments elif attrname == 'consistencygroup': if self.consistencygroup_id is None: self.consistencygroup = None else: consistencygroup = objects.ConsistencyGroup.get_by_id( self._context, self.consistencygroup_id) self.consistencygroup = consistencygroup elif attrname == 'snapshots': self.snapshots = objects.SnapshotList.get_all_for_volume( self._context, self.id) elif attrname == 'cluster': # If this volume doesn't belong to a cluster (cluster_name is # empty), then cluster field will be None. if self.cluster_name: self.cluster = objects.Cluster.get_by_id( self._context, name=self.cluster_name) else: self.cluster = None elif attrname == 'group': if self.group_id is None: self.group = None else: group = objects.Group.get_by_id( self._context, self.group_id) self.group = group self.obj_reset_changes(fields=[attrname]) def delete_metadata_key(self, key): db.volume_metadata_delete(self._context, self.id, key) md_was_changed = 'metadata' in self.obj_what_changed() del self.metadata[key] self._orig_metadata.pop(key, None) if not md_was_changed: self.obj_reset_changes(['metadata']) def finish_volume_migration(self, dest_volume): # We swap fields between source (i.e. self) and destination at the # end of migration because we want to keep the original volume id # in the DB but now pointing to the migrated volume. skip = ({'id', 'provider_location', 'glance_metadata', 'use_quota', 'volume_type', 'volume_attachment'} | set(self.obj_extra_fields)) for key in set(dest_volume.fields.keys()) - skip: # Only swap attributes that are already set. We do not want to # unexpectedly trigger a lazy-load. if not dest_volume.obj_attr_is_set(key): continue value = getattr(dest_volume, key) value_to_dst = getattr(self, key) # Destination must have a _name_id since the id no longer matches # the volume. If it doesn't have a _name_id we set one. if key == '_name_id': if not dest_volume._name_id: setattr(dest_volume, key, self.id) continue elif key == 'migration_status': value = None value_to_dst = 'deleting' elif key == 'display_description': value_to_dst = 'migration src for ' + self.id elif key == 'status': value_to_dst = 'deleting' # Because dest_volume will be deleted soon, we can # skip to copy volume_type_id and volume_type which # are not keys for volume deletion. elif key == 'volume_type_id': # Initialize volume_type of source volume using # new volume_type_id. self.update({'volume_type_id': value}) continue setattr(self, key, value) setattr(dest_volume, key, value_to_dst) self.save() dest_volume.save() return dest_volume def get_latest_snapshot(self): """Get volume's latest snapshot""" snapshot_db = db.snapshot_get_latest_for_volume(self._context, self.id) snapshot = objects.Snapshot(self._context) return snapshot._from_db_object(self._context, snapshot, snapshot_db) @staticmethod def _is_cleanable(status, obj_version): # Before 1.6 we didn't have workers table, so cleanup wasn't supported. # cleaning. if obj_version and obj_version < 1.6: return False return status in ('creating', 'deleting', 'uploading', 'downloading') def begin_attach(self, attach_mode): attachment = objects.VolumeAttachment( context=self._context, attach_status=c_fields.VolumeAttachStatus.ATTACHING, volume_id=self.id) attachment.create() with self.obj_as_admin(): self.admin_metadata['attached_mode'] = attach_mode self.save() return attachment def finish_detach(self, attachment_id): with self.obj_as_admin(): volume_updates, attachment_updates = ( db.volume_detached(self._context, self.id, attachment_id)) db.volume_admin_metadata_delete(self._context, self.id, 'attached_mode') self.admin_metadata.pop('attached_mode', None) # Remove attachment in volume only when this field is loaded. if attachment_updates and self.obj_attr_is_set('volume_attachment'): for i, attachment in enumerate(self.volume_attachment): if attachment.id == attachment_id: del self.volume_attachment.objects[i] break self.update(volume_updates) self.obj_reset_changes( list(volume_updates.keys()) + ['volume_attachment', 'admin_metadata']) def is_replicated(self): return self.volume_type and self.volume_type.is_replicated() def is_multiattach(self): return self.volume_type and self.volume_type.is_multiattach() # Don't add it as a property to avoid having to add it obj_extra_fields, # to manager's _VOLUME_CLONE_SKIP_PROPERTIES, etc. def is_migration_target(self): return (self.migration_status or '').startswith('target:') @base.CinderObjectRegistry.register class VolumeList(base.ObjectListBase, base.CinderObject): VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('Volume'), } @staticmethod def include_in_cluster(context, cluster, partial_rename=True, **filters): """Include all volumes matching the filters into a cluster. When partial_rename is set we will not set the cluster_name with cluster parameter value directly, we'll replace provided cluster_name or host filter value with cluster instead. This is useful when we want to replace just the cluster name but leave the backend and pool information as it is. If we are using cluster_name to filter, we'll use that same DB field to replace the cluster value and leave the rest as it is. Likewise if we use the host to filter. Returns the number of volumes that have been changed. """ return db.volume_include_in_cluster(context, cluster, partial_rename, **filters) @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): expected_attrs = ['metadata', 'volume_type', 'volume_attachment'] if context.is_admin: expected_attrs.append('admin_metadata') return expected_attrs @classmethod def get_all(cls, context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): volumes = db.volume_get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) @classmethod def get_all_by_host(cls, context, host, filters=None): volumes = db.volume_get_all_by_host(context, host, filters) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) @classmethod def get_all_by_group(cls, context, group_id, filters=None): # Consistency group volumes = db.volume_get_all_by_group(context, group_id, filters) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) @classmethod def get_all_by_generic_group(cls, context, group_id, filters=None): # Generic volume group volumes = db.volume_get_all_by_generic_group(context, group_id, filters) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) @classmethod def get_all_by_project(cls, context, project_id, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): volumes = db.volume_get_all_by_project(context, project_id, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) @classmethod def get_volume_summary(cls, context, project_only, filters=None): volumes = db.get_volume_summary(context, project_only, filters) return volumes @classmethod def get_all_active_by_window(cls, context, begin, end): volumes = db.volume_get_all_active_by_window(context, begin, end) expected_attrs = cls._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Volume, volumes, expected_attrs=expected_attrs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/volume_attachment.py0000664000175000017500000002376500000000000022072 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.objects import fields as c_fields @base.CinderObjectRegistry.register class VolumeAttachment(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Added volume relationship # Version 1.2: Added connection_info attribute # Version 1.3: Added the connector attribute. VERSION = '1.3' OPTIONAL_FIELDS = ('volume',) obj_extra_fields = ['project_id', 'volume_host'] # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.UUIDField(), 'volume_id': fields.UUIDField(), 'instance_uuid': fields.UUIDField(nullable=True), 'attached_host': fields.StringField(nullable=True), 'mountpoint': fields.StringField(nullable=True), 'attach_time': fields.DateTimeField(nullable=True), 'detach_time': fields.DateTimeField(nullable=True), 'attach_status': c_fields.VolumeAttachStatusField(nullable=True), 'attach_mode': fields.StringField(nullable=True), 'volume': fields.ObjectField('Volume', nullable=False), 'connection_info': c_fields.DictOfNullableField(nullable=True), 'connector': c_fields.DictOfNullableField(nullable=True) } @property def project_id(self): return self.volume.project_id @property def volume_host(self): return self.volume.host @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): return ['volume'] @classmethod def _from_db_object(cls, context, attachment, db_attachment, expected_attrs=None): if expected_attrs is None: expected_attrs = cls._get_expected_attrs(context) for name, field in attachment.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_attachment.get(name) if isinstance(field, fields.IntegerField): value = value or 0 if name in ('connection_info', 'connector'): # Both of these fields are nullable serialized json dicts. setattr(attachment, name, jsonutils.loads(value) if value else None) else: attachment[name] = value # NOTE: Check against the ORM instance's dictionary instead of using # hasattr or get to avoid the lazy loading of the Volume on # VolumeList.get_all. # Getting a Volume loads its VolumeAttachmentList, which think they # have the volume loaded, but they don't. More detail on # https://review.opendev.org/632549 # and its related bug report. if 'volume' in expected_attrs and 'volume' in vars(db_attachment): db_volume = db_attachment.volume if db_volume: attachment.volume = objects.Volume._from_db_object( context, objects.Volume(), db_volume) attachment._context = context attachment.obj_reset_changes() # This is an online data migration which we should remove when enough # time has passed and we have a blocker schema migration to check to # make sure that the attachment_specs table is empty. Operators should # run the "cinder-manage db online_data_migrations" CLI to force the # migration on-demand. connector = db.attachment_specs_get(context, attachment.id) if connector: # Update ourselves and delete the attachment_specs. attachment.connector = connector attachment.save() # TODO(mriedem): Really need a delete-all method for this. for spec_key in connector: db.attachment_specs_delete( context, attachment.id, spec_key) return attachment def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'volume': volume = objects.Volume.get_by_id(self._context, self.volume_id) self.volume = volume self.obj_reset_changes(fields=[attrname]) @staticmethod def _convert_connection_info_to_db_format(updates): properties = updates.pop('connection_info', None) if properties is not None: updates['connection_info'] = jsonutils.dumps(properties) @staticmethod def _convert_connector_to_db_format(updates): connector = updates.pop('connector', None) if connector is not None: updates['connector'] = jsonutils.dumps(connector) def save(self): updates = self.cinder_obj_get_changes() if updates: if 'connection_info' in updates: self._convert_connection_info_to_db_format(updates) if 'connector' in updates: self._convert_connector_to_db_format(updates) if 'volume' in updates: raise exception.ObjectActionError(action='save', reason=_('volume changed')) db.volume_attachment_update(self._context, self.id, updates) self.obj_reset_changes() def finish_attach(self, instance_uuid, host_name, mount_point, attach_mode='rw'): with self.obj_as_admin(): db_volume, updated_values = db.volume_attached( self._context, self.id, instance_uuid, host_name, mount_point, attach_mode) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) return objects.Volume._from_db_object(self._context, objects.Volume(), db_volume) def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) updates = self.cinder_obj_get_changes() if 'connector' in updates: self._convert_connector_to_db_format(updates) with self.obj_as_admin(): db_attachment = db.volume_attach(self._context, updates) self._from_db_object(self._context, self, db_attachment) def destroy(self): updated_values = db.attachment_destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) @base.CinderObjectRegistry.register class VolumeAttachmentList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version # Version 1.1: Remove volume_id in get_by_host|instance VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('VolumeAttachment'), } @classmethod def get_all_by_volume_id(cls, context, volume_id): attachments = db.volume_attachment_get_all_by_volume_id(context, volume_id) return base.obj_make_list(context, cls(context), objects.VolumeAttachment, attachments) @classmethod def get_all_by_host(cls, context, host, search_opts=None): attachments = db.volume_attachment_get_all_by_host(context, host, search_opts) return base.obj_make_list(context, cls(context), objects.VolumeAttachment, attachments) @classmethod def get_all_by_instance_uuid(cls, context, instance_uuid, search_opts=None): attachments = db.volume_attachment_get_all_by_instance_uuid( context, instance_uuid, search_opts) return base.obj_make_list(context, cls(context), objects.VolumeAttachment, attachments) @classmethod def get_all(cls, context, search_opts=None, marker=None, limit=None, offset=None, sort_keys=None, sort_direction=None): attachments = db.volume_attachment_get_all( context, search_opts, marker, limit, offset, sort_keys, sort_direction) return base.obj_make_list(context, cls(context), objects.VolumeAttachment, attachments) @classmethod def get_all_by_project(cls, context, project_id, search_opts=None, marker=None, limit=None, offset=None, sort_keys=None, sort_direction=None): attachments = db.volume_attachment_get_all_by_project( context, project_id, search_opts, marker, limit, offset, sort_keys, sort_direction) return base.obj_make_list(context, cls(context), objects.VolumeAttachment, attachments) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/objects/volume_type.py0000664000175000017500000001766100000000000020721 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder.volume import volume_types from cinder.volume import volume_utils @base.CinderObjectRegistry.register class VolumeType(base.CinderPersistentObject, base.CinderObject, base.CinderObjectDictCompat, base.CinderComparableObject): # Version 1.0: Initial version # Version 1.1: Changed extra_specs to DictOfNullableStringsField # Version 1.2: Added qos_specs # Version 1.3: Add qos_specs_id VERSION = '1.3' OPTIONAL_FIELDS = ('extra_specs', 'projects', 'qos_specs') # NOTE: When adding a field obj_make_compatible needs to be updated fields = { 'id': fields.UUIDField(), 'name': fields.StringField(nullable=True), 'description': fields.StringField(nullable=True), 'is_public': fields.BooleanField(default=True, nullable=True), 'projects': fields.ListOfStringsField(nullable=True), 'extra_specs': fields.DictOfNullableStringsField(nullable=True), 'qos_specs_id': fields.UUIDField(nullable=True), 'qos_specs': fields.ObjectField('QualityOfServiceSpecs', nullable=True), } @classmethod def _get_expected_attrs(cls, context, *args, **kwargs): return 'extra_specs', 'projects' @classmethod def _from_db_object(cls, context, type, db_type, expected_attrs=None): if expected_attrs is None: expected_attrs = ['extra_specs', 'projects'] for name, field in type.fields.items(): if name in cls.OPTIONAL_FIELDS: continue value = db_type[name] if isinstance(field, fields.IntegerField): value = value or 0 type[name] = value # Get data from db_type object that was queried by joined query # from DB if 'extra_specs' in expected_attrs: type.extra_specs = {} specs = db_type.get('extra_specs') if specs and isinstance(specs, list): type.extra_specs = {item['key']: item['value'] for item in specs} elif specs and isinstance(specs, dict): type.extra_specs = specs if 'projects' in expected_attrs: # NOTE(geguileo): Until projects stops being a polymorphic value we # have to do a conversion here for VolumeTypeProjects ORM instance # lists. projects = db_type.get('projects', []) if projects and not isinstance(projects[0], str): projects = [p.project_id for p in projects] type.projects = projects if 'qos_specs' in expected_attrs: qos_specs = objects.QualityOfServiceSpecs(context) qos_specs._from_db_object(context, qos_specs, db_type['qos_specs']) type.qos_specs = qos_specs type._context = context type.obj_reset_changes() return type def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason=_('already created')) db_volume_type = volume_types.create(self._context, self.name, self.extra_specs, self.is_public, self.projects, self.description) self._from_db_object(self._context, self, db_volume_type) def save(self): updates = self.cinder_obj_get_changes() if updates: volume_types.update(self._context, self.id, self.name, self.description) self.obj_reset_changes() def destroy(self): with self.obj_as_admin(): updated_values = volume_types.destroy(self._context, self.id) self.update(updated_values) self.obj_reset_changes(updated_values.keys()) def obj_load_attr(self, attrname): if attrname not in self.OPTIONAL_FIELDS: raise exception.ObjectActionError( action='obj_load_attr', reason=_('attribute %s not lazy-loadable') % attrname) if not self._context: raise exception.OrphanedObjectError(method='obj_load_attr', objtype=self.obj_name()) if attrname == 'extra_specs': self.extra_specs = db.volume_type_extra_specs_get(self._context, self.id) elif attrname == 'qos_specs': if self.qos_specs_id: self.qos_specs = objects.QualityOfServiceSpecs.get_by_id( self._context, self.qos_specs_id) else: self.qos_specs = None elif attrname == 'projects': volume_type_projects = db.volume_type_access_get_all(self._context, self.id) self.projects = [x.project_id for x in volume_type_projects] self.obj_reset_changes(fields=[attrname]) @classmethod def get_by_name_or_id(cls, context, identity): orm_obj = volume_types.get_by_name_or_id(context, identity) expected_attrs = cls._get_expected_attrs(context) return cls._from_db_object(context, cls(context), orm_obj, expected_attrs=expected_attrs) def is_replicated(self): return volume_utils.is_replicated_spec(self.extra_specs) def is_multiattach(self): return volume_utils.is_multiattach_spec(self.extra_specs) @base.CinderObjectRegistry.register class VolumeTypeList(base.ObjectListBase, base.CinderObject): # Version 1.0: Initial version # Version 1.1: Add pagination support to volume type VERSION = '1.1' fields = { 'objects': fields.ListOfObjectsField('VolumeType'), } @classmethod def get_all(cls, context, inactive=0, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): types = volume_types.get_all_types(context, inactive, filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset) expected_attrs = VolumeType._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.VolumeType, types.values(), expected_attrs=expected_attrs) @classmethod def get_all_types_for_qos(cls, context, qos_id): types = db.qos_specs_associations_get(context, qos_id) return base.obj_make_list(context, cls(context), objects.VolumeType, types) @classmethod def get_all_by_group(cls, context, group_id): # Generic volume group types = db.volume_type_get_all_by_group( context.elevated(), group_id) expected_attrs = VolumeType._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.VolumeType, types, expected_attrs=expected_attrs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/opts.py0000664000175000017500000006105400000000000015700 0ustar00zuulzuul00000000000000 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################### # WARNING! # # Do not edit this file directly. This file should be generated by # running the command "tox -e genopts" any time a config option # has been added, changed, or removed. ################################################################### import itertools from keystoneauth1 import loading from cinder import objects # noqa objects.register_all() from cinder.api import common as cinder_api_common from cinder.api.middleware import auth as cinder_api_middleware_auth import cinder.api.openstack from cinder.api.views import versions as cinder_api_views_versions from cinder.backup import api as cinder_backup_api from cinder.backup import chunkeddriver as cinder_backup_chunkeddriver from cinder.backup import driver as cinder_backup_driver from cinder.backup.drivers import ceph as cinder_backup_drivers_ceph from cinder.backup.drivers import gcs as cinder_backup_drivers_gcs from cinder.backup.drivers import glusterfs as cinder_backup_drivers_glusterfs from cinder.backup.drivers import nfs as cinder_backup_drivers_nfs from cinder.backup.drivers import posix as cinder_backup_drivers_posix from cinder.backup.drivers import s3 as cinder_backup_drivers_s3 from cinder.backup.drivers import swift as cinder_backup_drivers_swift from cinder.backup import manager as cinder_backup_manager from cinder.cmd import backup as cinder_cmd_backup from cinder.cmd import volume as cinder_cmd_volume from cinder.common import config as cinder_common_config import cinder.compute from cinder.compute import nova as cinder_compute_nova from cinder import context as cinder_context from cinder import coordination as cinder_coordination from cinder.db import api as cinder_db_api from cinder.image import glance as cinder_image_glance from cinder.image import image_utils as cinder_image_imageutils from cinder.keymgr import conf_key_mgr as cinder_keymgr_confkeymgr from cinder.message import api as cinder_message_api from cinder import quota as cinder_quota from cinder.scheduler import driver as cinder_scheduler_driver from cinder.scheduler import host_manager as cinder_scheduler_hostmanager from cinder.scheduler import manager as cinder_scheduler_manager from cinder.scheduler import scheduler_options as \ cinder_scheduler_scheduleroptions from cinder.scheduler.weights import capacity as \ cinder_scheduler_weights_capacity from cinder.scheduler.weights import volume_number as \ cinder_scheduler_weights_volumenumber from cinder import service as cinder_service from cinder import service_auth as cinder_serviceauth from cinder import ssh_utils as cinder_sshutils from cinder.transfer import api as cinder_transfer_api from cinder.volume import api as cinder_volume_api from cinder.volume import driver as cinder_volume_driver from cinder.volume.drivers.ceph import rbd_iscsi as \ cinder_volume_drivers_ceph_rbdiscsi from cinder.volume.drivers.datacore import driver as \ cinder_volume_drivers_datacore_driver from cinder.volume.drivers.datacore import fc as \ cinder_volume_drivers_datacore_fc from cinder.volume.drivers.datacore import iscsi as \ cinder_volume_drivers_datacore_iscsi from cinder.volume.drivers.datera import datera_iscsi as \ cinder_volume_drivers_datera_dateraiscsi from cinder.volume.drivers.dell_emc.powerflex import driver as \ cinder_volume_drivers_dell_emc_powerflex_driver from cinder.volume.drivers.dell_emc.powermax import common as \ cinder_volume_drivers_dell_emc_powermax_common from cinder.volume.drivers.dell_emc.powerstore import driver as \ cinder_volume_drivers_dell_emc_powerstore_driver from cinder.volume.drivers.dell_emc.powerstore import nfs as \ cinder_volume_drivers_dell_emc_powerstore_nfs from cinder.volume.drivers.dell_emc.powervault import common as \ cinder_volume_drivers_dell_emc_powervault_common from cinder.volume.drivers.dell_emc.sc import storagecenter_common as \ cinder_volume_drivers_dell_emc_sc_storagecentercommon from cinder.volume.drivers.dell_emc.unity import driver as \ cinder_volume_drivers_dell_emc_unity_driver from cinder.volume.drivers.dell_emc.vnx import common as \ cinder_volume_drivers_dell_emc_vnx_common from cinder.volume.drivers.dell_emc import xtremio as \ cinder_volume_drivers_dell_emc_xtremio from cinder.volume.drivers.fujitsu.eternus_dx import eternus_dx_common as \ cinder_volume_drivers_fujitsu_eternus_dx_eternusdxcommon from cinder.volume.drivers.fungible import driver as \ cinder_volume_drivers_fungible_driver from cinder.volume.drivers.fusionstorage import dsware as \ cinder_volume_drivers_fusionstorage_dsware from cinder.volume.drivers.hitachi import hbsd_common as \ cinder_volume_drivers_hitachi_hbsdcommon from cinder.volume.drivers.hitachi import hbsd_replication as \ cinder_volume_drivers_hitachi_hbsdreplication from cinder.volume.drivers.hitachi import hbsd_rest as \ cinder_volume_drivers_hitachi_hbsdrest from cinder.volume.drivers.hitachi import hbsd_rest_fc as \ cinder_volume_drivers_hitachi_hbsdrestfc from cinder.volume.drivers.hpe import hpe_3par_common as \ cinder_volume_drivers_hpe_hpe3parcommon from cinder.volume.drivers.hpe import nimble as \ cinder_volume_drivers_hpe_nimble from cinder.volume.drivers.hpe.xp import hpe_xp_rest as \ cinder_volume_drivers_hpe_xp_hpexprest from cinder.volume.drivers.huawei import common as \ cinder_volume_drivers_huawei_common from cinder.volume.drivers.ibm import flashsystem_common as \ cinder_volume_drivers_ibm_flashsystemcommon from cinder.volume.drivers.ibm import flashsystem_iscsi as \ cinder_volume_drivers_ibm_flashsystemiscsi from cinder.volume.drivers.ibm import gpfs as cinder_volume_drivers_ibm_gpfs from cinder.volume.drivers.ibm.ibm_storage import ds8k_proxy as \ cinder_volume_drivers_ibm_ibm_storage_ds8kproxy from cinder.volume.drivers.ibm.ibm_storage import ibm_storage as \ cinder_volume_drivers_ibm_ibm_storage_ibmstorage from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi from cinder.volume.drivers import infinidat as cinder_volume_drivers_infinidat from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli as \ cinder_volume_drivers_infortrend_raidcmd_cli_commoncli from cinder.volume.drivers.inspur.as13000 import as13000_driver as \ cinder_volume_drivers_inspur_as13000_as13000driver from cinder.volume.drivers.inspur.instorage import instorage_common as \ cinder_volume_drivers_inspur_instorage_instoragecommon from cinder.volume.drivers.inspur.instorage import instorage_iscsi as \ cinder_volume_drivers_inspur_instorage_instorageiscsi from cinder.volume.drivers.kaminario import kaminario_common as \ cinder_volume_drivers_kaminario_kaminariocommon from cinder.volume.drivers.kioxia import kumoscale as \ cinder_volume_drivers_kioxia_kumoscale from cinder.volume.drivers.lenovo import lenovo_common as \ cinder_volume_drivers_lenovo_lenovocommon from cinder.volume.drivers import lightos as cinder_volume_drivers_lightos from cinder.volume.drivers import linstordrv as \ cinder_volume_drivers_linstordrv from cinder.volume.drivers import lvm as cinder_volume_drivers_lvm from cinder.volume.drivers.macrosan import driver as \ cinder_volume_drivers_macrosan_driver from cinder.volume.drivers.nec.v import nec_v_rest as \ cinder_volume_drivers_nec_v_necvrest from cinder.volume.drivers.netapp import options as \ cinder_volume_drivers_netapp_options from cinder.volume.drivers.nexenta import options as \ cinder_volume_drivers_nexenta_options from cinder.volume.drivers import nfs as cinder_volume_drivers_nfs from cinder.volume.drivers.open_e import options as \ cinder_volume_drivers_open_e_options from cinder.volume.drivers.prophetstor import options as \ cinder_volume_drivers_prophetstor_options from cinder.volume.drivers import pure as cinder_volume_drivers_pure from cinder.volume.drivers import qnap as cinder_volume_drivers_qnap from cinder.volume.drivers import quobyte as cinder_volume_drivers_quobyte from cinder.volume.drivers import rbd as cinder_volume_drivers_rbd from cinder.volume.drivers import remotefs as cinder_volume_drivers_remotefs from cinder.volume.drivers.san.hp import hpmsa_common as \ cinder_volume_drivers_san_hp_hpmsacommon from cinder.volume.drivers.san import san as cinder_volume_drivers_san_san from cinder.volume.drivers.sandstone import sds_driver as \ cinder_volume_drivers_sandstone_sdsdriver from cinder.volume.drivers import solidfire as cinder_volume_drivers_solidfire from cinder.volume.drivers import storpool as cinder_volume_drivers_storpool from cinder.volume.drivers.stx import common as \ cinder_volume_drivers_stx_common from cinder.volume.drivers.synology import synology_common as \ cinder_volume_drivers_synology_synologycommon from cinder.volume.drivers.toyou.acs5000 import acs5000_common as \ cinder_volume_drivers_toyou_acs5000_acs5000common from cinder.volume.drivers.toyou.tyds import tyds as \ cinder_volume_drivers_toyou_tyds_tyds from cinder.volume.drivers.veritas_access import veritas_iscsi as \ cinder_volume_drivers_veritas_access_veritasiscsi from cinder.volume.drivers.vmware import vmdk as \ cinder_volume_drivers_vmware_vmdk from cinder.volume.drivers import vzstorage as cinder_volume_drivers_vzstorage from cinder.volume.drivers.windows import iscsi as \ cinder_volume_drivers_windows_iscsi from cinder.volume.drivers.windows import smbfs as \ cinder_volume_drivers_windows_smbfs from cinder.volume.drivers.yadro import tatlin_common as \ cinder_volume_drivers_yadro_tatlincommon from cinder.volume.drivers.zadara import zadara as \ cinder_volume_drivers_zadara_zadara from cinder.volume import manager as cinder_volume_manager from cinder.volume.targets import spdknvmf as cinder_volume_targets_spdknvmf from cinder.wsgi import eventlet_server as cinder_wsgi_eventletserver from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as \ cinder_zonemanager_drivers_brocade_brcdfabricopts from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as \ cinder_zonemanager_drivers_brocade_brcdfczonedriver from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as \ cinder_zonemanager_drivers_cisco_ciscofabricopts from cinder.zonemanager.drivers.cisco import cisco_fc_zone_driver as \ cinder_zonemanager_drivers_cisco_ciscofczonedriver from cinder.zonemanager import fc_zone_manager as \ cinder_zonemanager_fczonemanager def list_opts(): return [ ('backend', itertools.chain( [cinder_cmd_volume.host_opt], )), ('brcd_fabric_example', itertools.chain( cinder_zonemanager_drivers_brocade_brcdfabricopts. brcd_zone_opts, )), ('cisco_fabric_example', itertools.chain( cinder_zonemanager_drivers_cisco_ciscofabricopts. cisco_zone_opts, )), ('coordination', itertools.chain( cinder_coordination.coordination_opts, )), ('DEFAULT', itertools.chain( cinder_api_common.api_common_opts, [cinder_api_middleware_auth.use_forwarded_for_opt], cinder.api.openstack.openstack_api_opts, cinder_api_views_versions.versions_opts, cinder_backup_api.backup_opts, cinder_backup_chunkeddriver.backup_opts, cinder_backup_driver.backup_opts, cinder_backup_drivers_ceph.service_opts, cinder_backup_drivers_gcs.gcsbackup_service_opts, cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts, cinder_backup_drivers_nfs.nfsbackup_service_opts, cinder_backup_drivers_posix.posixbackup_service_opts, cinder_backup_drivers_s3.s3backup_service_opts, cinder_backup_drivers_swift.swiftbackup_service_opts, cinder_backup_manager.backup_manager_opts, cinder_cmd_backup.backup_cmd_opts, [cinder_cmd_volume.cluster_opt], cinder_common_config.api_opts, cinder_common_config.core_opts, cinder_common_config.auth_opts, cinder_common_config.backup_opts, cinder_common_config.image_opts, cinder_common_config.global_opts, cinder_common_config.compression_opts, cinder.compute.compute_opts, cinder_context.context_opts, cinder_db_api.db_opts, cinder_db_api.backup_opts, cinder_image_glance.image_opts, cinder_image_glance.glance_core_properties_opts, cinder_image_imageutils.image_opts, cinder_message_api.messages_opts, cinder_quota.quota_opts, cinder_scheduler_driver.scheduler_driver_opts, cinder_scheduler_hostmanager.host_manager_opts, cinder_scheduler_manager.scheduler_manager_opts, [cinder_scheduler_scheduleroptions. scheduler_json_config_location_opt], cinder_scheduler_weights_capacity.capacity_weight_opts, cinder_scheduler_weights_volumenumber. volume_number_weight_opts, cinder_service.service_opts, cinder_sshutils.ssh_opts, cinder_transfer_api.volume_transfer_opts, [cinder_volume_api.allow_force_upload_opt], [cinder_volume_api.volume_host_opt], [cinder_volume_api.volume_same_az_opt], [cinder_volume_api.az_cache_time_opt], cinder_volume_driver.volume_opts, cinder_volume_driver.iser_opts, cinder_volume_driver.nvmeof_opts, cinder_volume_driver.nvmet_opts, cinder_volume_driver.scst_opts, cinder_volume_driver.backup_opts, cinder_volume_driver.image_opts, cinder_volume_drivers_datera_dateraiscsi.d_opts, cinder_volume_drivers_fungible_driver.fungible_opts, cinder_volume_drivers_fusionstorage_dsware.volume_opts, cinder_volume_drivers_hitachi_hbsdreplication._REP_OPTS, cinder_volume_drivers_hitachi_hbsdreplication. COMMON_MIRROR_OPTS, cinder_volume_drivers_hitachi_hbsdreplication. ISCSI_MIRROR_OPTS, cinder_volume_drivers_hitachi_hbsdreplication. REST_MIRROR_OPTS, cinder_volume_drivers_hitachi_hbsdreplication. REST_MIRROR_API_OPTS, cinder_volume_drivers_hitachi_hbsdreplication. REST_MIRROR_SSL_OPTS, cinder_volume_drivers_infortrend_raidcmd_cli_commoncli. infortrend_opts, cinder_volume_drivers_inspur_as13000_as13000driver. inspur_as13000_opts, cinder_volume_drivers_inspur_instorage_instoragecommon. instorage_mcs_opts, cinder_volume_drivers_inspur_instorage_instorageiscsi. instorage_mcs_iscsi_opts, cinder_volume_drivers_kioxia_kumoscale.KUMOSCALE_OPTS, cinder_volume_drivers_open_e_options.jdss_connection_opts, cinder_volume_drivers_open_e_options.jdss_iscsi_opts, cinder_volume_drivers_open_e_options.jdss_volume_opts, cinder_volume_drivers_sandstone_sdsdriver.sds_opts, cinder_volume_drivers_toyou_acs5000_acs5000common. acs5000c_opts, cinder_volume_drivers_veritas_access_veritasiscsi.VA_VOL_OPTS, cinder_volume_manager.volume_manager_opts, cinder_wsgi_eventletserver.socket_opts, )), ('fc-zone-manager', itertools.chain( cinder_zonemanager_drivers_brocade_brcdfczonedriver.brcd_opts, cinder_zonemanager_drivers_cisco_ciscofczonedriver.cisco_opts, cinder_zonemanager_fczonemanager.zone_manager_opts, )), ('key_manager', itertools.chain( cinder_keymgr_confkeymgr.key_mgr_opts, )), ('service_user', itertools.chain( cinder_serviceauth.service_user_opts, loading.get_auth_plugin_conf_options('v3password'), loading.get_session_conf_options(), )), ('backend_defaults', itertools.chain( cinder_volume_driver.volume_opts, cinder_volume_driver.iser_opts, cinder_volume_driver.nvmeof_opts, cinder_volume_driver.nvmet_opts, cinder_volume_driver.scst_opts, cinder_volume_driver.image_opts, cinder_volume_driver.fqdn_opts, cinder_volume_drivers_ceph_rbdiscsi.RBD_ISCSI_OPTS, cinder_volume_drivers_datacore_driver.datacore_opts, cinder_volume_drivers_datacore_fc.datacore_fc_opts, cinder_volume_drivers_datacore_iscsi.datacore_iscsi_opts, cinder_volume_drivers_dell_emc_powerflex_driver. powerflex_opts, cinder_volume_drivers_dell_emc_powermax_common.powermax_opts, cinder_volume_drivers_dell_emc_powerstore_driver. POWERSTORE_OPTS, cinder_volume_drivers_dell_emc_powerstore_nfs.nfs_opts, cinder_volume_drivers_dell_emc_powervault_common.common_opts, cinder_volume_drivers_dell_emc_powervault_common.iscsi_opts, cinder_volume_drivers_dell_emc_sc_storagecentercommon. common_opts, cinder_volume_drivers_dell_emc_unity_driver.UNITY_OPTS, cinder_volume_drivers_dell_emc_vnx_common.VNX_OPTS, cinder_volume_drivers_dell_emc_xtremio.XTREMIO_OPTS, cinder_volume_drivers_fujitsu_eternus_dx_eternusdxcommon. FJ_ETERNUS_DX_OPT_opts, cinder_volume_drivers_hitachi_hbsdcommon.COMMON_VOLUME_OPTS, cinder_volume_drivers_hitachi_hbsdcommon.COMMON_PORT_OPTS, cinder_volume_drivers_hitachi_hbsdcommon.COMMON_PAIR_OPTS, cinder_volume_drivers_hitachi_hbsdcommon.COMMON_NAME_OPTS, cinder_volume_drivers_hitachi_hbsdrest.REST_VOLUME_OPTS, cinder_volume_drivers_hitachi_hbsdrest.REST_PAIR_OPTS, cinder_volume_drivers_hitachi_hbsdrestfc.FC_VOLUME_OPTS, cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts, cinder_volume_drivers_hpe_nimble.nimble_opts, cinder_volume_drivers_hpe_xp_hpexprest.COMMON_VOLUME_OPTS, cinder_volume_drivers_hpe_xp_hpexprest.REST_VOLUME_OPTS, cinder_volume_drivers_hpe_xp_hpexprest.FC_VOLUME_OPTS, cinder_volume_drivers_huawei_common.huawei_opts, cinder_volume_drivers_ibm_flashsystemcommon.flashsystem_opts, cinder_volume_drivers_ibm_flashsystemiscsi. flashsystem_iscsi_opts, cinder_volume_drivers_ibm_gpfs.gpfs_opts, cinder_volume_drivers_ibm_gpfs.gpfs_remote_ssh_opts, cinder_volume_drivers_ibm_ibm_storage_ds8kproxy.ds8k_opts, cinder_volume_drivers_ibm_ibm_storage_ibmstorage.driver_opts, cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon. storwize_svc_opts, cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc. storwize_svc_fc_opts, cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi. storwize_svc_iscsi_opts, cinder_volume_drivers_infinidat.infinidat_opts, cinder_volume_drivers_kaminario_kaminariocommon. kaminario_opts, cinder_volume_drivers_lenovo_lenovocommon.common_opts, cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts, cinder_volume_drivers_lightos.lightos_opts, cinder_volume_drivers_linstordrv.linstor_opts, cinder_volume_drivers_lvm.volume_opts, cinder_volume_drivers_macrosan_driver.config.macrosan_opts, cinder_volume_drivers_nec_v_necvrest.COMMON_VOLUME_OPTS, cinder_volume_drivers_nec_v_necvrest.REST_VOLUME_OPTS, cinder_volume_drivers_nec_v_necvrest.FC_VOLUME_OPTS, cinder_volume_drivers_netapp_options.netapp_proxy_opts, cinder_volume_drivers_netapp_options.netapp_connection_opts, cinder_volume_drivers_netapp_options.netapp_transport_opts, cinder_volume_drivers_netapp_options.netapp_basicauth_opts, cinder_volume_drivers_netapp_options. netapp_certificateauth_opts, cinder_volume_drivers_netapp_options.netapp_cluster_opts, cinder_volume_drivers_netapp_options.netapp_provisioning_opts, cinder_volume_drivers_netapp_options.netapp_img_cache_opts, cinder_volume_drivers_netapp_options.netapp_nfs_extra_opts, cinder_volume_drivers_netapp_options.netapp_san_opts, cinder_volume_drivers_netapp_options.netapp_replication_opts, cinder_volume_drivers_netapp_options.netapp_support_opts, cinder_volume_drivers_netapp_options.netapp_migration_opts, cinder_volume_drivers_nexenta_options.NEXENTA_CONNECTION_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_ISCSI_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_DATASET_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_NFS_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_RRMGR_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_EDGE_OPTS, cinder_volume_drivers_nfs.nfs_opts, cinder_volume_drivers_prophetstor_options.DPL_OPTS, cinder_volume_drivers_pure.PURE_OPTS, cinder_volume_drivers_qnap.qnap_opts, cinder_volume_drivers_quobyte.volume_opts, cinder_volume_drivers_rbd.RBD_OPTS, cinder_volume_drivers_remotefs.nas_opts, cinder_volume_drivers_remotefs.volume_opts, cinder_volume_drivers_san_hp_hpmsacommon.common_opts, cinder_volume_drivers_san_hp_hpmsacommon.iscsi_opts, cinder_volume_drivers_san_san.san_opts, cinder_volume_drivers_solidfire.sf_opts, cinder_volume_drivers_storpool.storpool_opts, cinder_volume_drivers_stx_common.common_opts, cinder_volume_drivers_stx_common.iscsi_opts, cinder_volume_drivers_synology_synologycommon.cinder_opts, cinder_volume_drivers_toyou_tyds_tyds.tyds_opts, cinder_volume_drivers_vmware_vmdk.vmdk_opts, cinder_volume_drivers_vzstorage.vzstorage_opts, cinder_volume_drivers_windows_iscsi.windows_opts, cinder_volume_drivers_windows_smbfs.volume_opts, cinder_volume_drivers_yadro_tatlincommon.tatlin_opts, cinder_volume_drivers_zadara_zadara.common.zadara_opts, cinder_volume_manager.volume_backend_opts, cinder_volume_targets_spdknvmf.spdk_opts, )), ('glance', itertools.chain( cinder_image_glance.glance_session_opts, cinder_image_glance.glance_auth_opts, )), ('nova', itertools.chain( cinder_compute_nova.nova_opts, cinder_compute_nova.nova_session_opts, cinder_compute_nova.nova_auth_opts, )), ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0951183 cinder-27.0.0/cinder/policies/0000775000175000017500000000000000000000000016142 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/__init__.py0000664000175000017500000000620100000000000020252 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from cinder.policies import attachments from cinder.policies import backup_actions from cinder.policies import backups from cinder.policies import base from cinder.policies import capabilities from cinder.policies import clusters from cinder.policies import default_types from cinder.policies import group_actions from cinder.policies import group_snapshot_actions from cinder.policies import group_snapshots from cinder.policies import group_types from cinder.policies import groups from cinder.policies import hosts from cinder.policies import limits from cinder.policies import manageable_snapshots from cinder.policies import manageable_volumes from cinder.policies import messages from cinder.policies import qos_specs from cinder.policies import quota_class from cinder.policies import quotas from cinder.policies import scheduler_stats from cinder.policies import services from cinder.policies import snapshot_actions from cinder.policies import snapshot_metadata from cinder.policies import snapshots from cinder.policies import type_extra_specs from cinder.policies import volume_access from cinder.policies import volume_actions from cinder.policies import volume_metadata from cinder.policies import volume_transfer from cinder.policies import volume_type from cinder.policies import volumes from cinder.policies import workers def list_rules(): return itertools.chain( base.list_rules(), attachments.list_rules(), messages.list_rules(), clusters.list_rules(), workers.list_rules(), snapshot_metadata.list_rules(), snapshots.list_rules(), snapshot_actions.list_rules(), manageable_snapshots.list_rules(), backups.list_rules(), backup_actions.list_rules(), groups.list_rules(), group_types.list_rules(), group_snapshots.list_rules(), group_snapshot_actions.list_rules(), group_actions.list_rules(), qos_specs.list_rules(), quota_class.list_rules(), quotas.list_rules(), capabilities.list_rules(), services.list_rules(), scheduler_stats.list_rules(), hosts.list_rules(), limits.list_rules(), manageable_volumes.list_rules(), volume_type.list_rules(), volume_access.list_rules(), volume_actions.list_rules(), volume_transfer.list_rules(), volume_metadata.list_rules(), type_extra_specs.list_rules(), volumes.list_rules(), default_types.list_rules(), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/attachments.py0000664000175000017500000000674100000000000021037 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base CREATE_POLICY = 'volume:attachment_create' UPDATE_POLICY = 'volume:attachment_update' DELETE_POLICY = 'volume:attachment_delete' COMPLETE_POLICY = 'volume:attachment_complete' MULTIATTACH_BOOTABLE_VOLUME_POLICY = 'volume:multiattach_bootable_volume' deprecated_create_policy = base.CinderDeprecatedRule( name=CREATE_POLICY, check_str="" ) deprecated_update_policy = base.CinderDeprecatedRule( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_delete_policy = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_complete_policy = base.CinderDeprecatedRule( name=COMPLETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_multiattach_policy = base.CinderDeprecatedRule( name=MULTIATTACH_BOOTABLE_VOLUME_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) attachments_policies = [ policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Create attachment.", operations=[ { 'method': 'POST', 'path': '/attachments' } ], deprecated_rule=deprecated_create_policy, ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Update attachment.", operations=[ { 'method': 'PUT', 'path': '/attachments/{attachment_id}' } ], deprecated_rule=deprecated_update_policy, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete attachment.", operations=[ { 'method': 'DELETE', 'path': '/attachments/{attachment_id}' } ], deprecated_rule=deprecated_delete_policy, ), policy.DocumentedRuleDefault( name=COMPLETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Mark a volume attachment process as completed (in-use)", operations=[ { 'method': 'POST', 'path': '/attachments/{attachment_id}/action (os-complete)' } ], deprecated_rule=deprecated_complete_policy, ), policy.DocumentedRuleDefault( name=MULTIATTACH_BOOTABLE_VOLUME_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Allow multiattach of bootable volumes.", operations=[ { 'method': 'POST', 'path': '/attachments' } ], deprecated_rule=deprecated_multiattach_policy, ), ] def list_rules(): return attachments_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/backup_actions.py0000664000175000017500000000300000000000000021472 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base BASE_POLICY_NAME = 'volume_extension:backup_admin_actions:%s' backup_actions_policies = [ policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'reset_status', check_str=base.RULE_ADMIN_API, description="Reset status of a backup.", operations=[ { 'method': 'POST', 'path': '/backups/{backup_id}/action (os-reset_status)' } ]), policy.DocumentedRuleDefault( name=BASE_POLICY_NAME % 'force_delete', check_str=base.RULE_ADMIN_API, description="Force delete a backup.", operations=[ { 'method': 'POST', 'path': '/backups/{backup_id}/action (os-force_delete)' } ]), ] def list_rules(): return backup_actions_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/backups.py0000664000175000017500000001167700000000000020160 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base GET_ALL_POLICY = 'backup:get_all' GET_POLICY = 'backup:get' CREATE_POLICY = 'backup:create' UPDATE_POLICY = 'backup:update' DELETE_POLICY = 'backup:delete' RESTORE_POLICY = 'backup:restore' IMPORT_POLICY = 'backup:backup-import' EXPORT_POLICY = 'backup:export-import' BACKUP_ATTRIBUTES_POLICY = 'backup:backup_project_attribute' deprecated_get_all_policy = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, ) deprecated_get_policy = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, ) deprecated_create_policy = base.CinderDeprecatedRule( name=CREATE_POLICY, check_str="" ) deprecated_update_policy = base.CinderDeprecatedRule( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_delete_policy = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_restore_policy = base.CinderDeprecatedRule( name=RESTORE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) backups_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List backups.", operations=[ { 'method': 'GET', 'path': '/backups' }, { 'method': 'GET', 'path': '/backups/detail' } ], deprecated_rule=deprecated_get_all_policy, ), policy.DocumentedRuleDefault( name=BACKUP_ATTRIBUTES_POLICY, check_str=base.RULE_ADMIN_API, description="List backups or show backup with project attributes.", operations=[ { 'method': 'GET', 'path': '/backups/{backup_id}' }, { 'method': 'GET', 'path': '/backups/detail' } ], ), policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Create backup.", operations=[ { 'method': 'POST', 'path': '/backups' } ], deprecated_rule=deprecated_create_policy, ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show backup.", operations=[ { 'method': 'GET', 'path': '/backups/{backup_id}' } ], deprecated_rule=deprecated_get_policy ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Update backup.", operations=[ { 'method': 'PUT', 'path': '/backups/{backup_id}' } ], deprecated_rule=deprecated_update_policy, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete backup.", operations=[ { 'method': 'DELETE', 'path': '/backups/{backup_id}' } ], deprecated_rule=deprecated_delete_policy, ), policy.DocumentedRuleDefault( name=RESTORE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Restore backup.", operations=[ { 'method': 'POST', 'path': '/backups/{backup_id}/restore' } ], deprecated_rule=deprecated_restore_policy, ), policy.DocumentedRuleDefault( name=IMPORT_POLICY, check_str=base.RULE_ADMIN_API, description="Import backup.", operations=[ { 'method': 'POST', 'path': '/backups/{backup_id}/import_record' } ], ), policy.DocumentedRuleDefault( name=EXPORT_POLICY, check_str=base.RULE_ADMIN_API, description="Export backup.", operations=[ { 'method': 'POST', 'path': '/backups/{backup_id}/export_record' } ], ), ] def list_rules(): return backups_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/base.py0000664000175000017500000003446100000000000017436 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from typing import Optional from oslo_log import versionutils from oslo_policy import policy # General observations # -------------------- # - This file uses the three "default roles" provided by Keystone during # the ``keystone-manage bootstrap`` operation. These are 'admin', 'member', # and 'reader'. # # - The default Keystone installation implements an inheritance relation # between the roles: # 'admin' is-a 'member' is-a 'reader' # More importantly, however, Keystone will actually populate the roles # appropriately. Thus, someone with the 'admin' role on project X will also # have the 'member' and 'reader' roles on project X. What this means for # us is that if we have a policy we want satisfied by someone with any of # the 'admin', 'member', or 'reader' roles, we do NOT need to do this: # "get-foo-policy": "role:admin or role:member or role:reader" # Instead we can simply say: # "get-foo-policy": "role:reader" # because we know that anyone who has been assigned the 'admin' role in # Keystone also has the 'member' and 'reader' roles, and anyone assigned # the 'member' role *also* has the 'reader' role. # # - How do I know what string to use? # Cinder maintains a policy matrix correlating REST API calls, policy # names, and what "personas" can perform them. The "personas" are # abstract entities whose powers are supposed to be consistent across # OpenStack services. The "personas" are implemented by each service # using the default Keystone roles and scopes ... but you have to be # careful, because for example, a "system-reader" persona is NOT simply # a read-only administrator (it's actually less). See the policy matrix # for details. # # - This is probably obvious, but I'll say it anyway. There is nothing # magic about the 'reader' role that guarantees that someone with *only* # that role can only do read-only kind of stuff in a service. We (as the # Cinder service team) give it meaning by the way we define our policy # rules. So if as a joke, we were to write rules that allowed someone # with only the 'reader' role to delete volumes in any project, there is # nothing Keystone could do about it. So be careful. # Private policy checkstrings # --------------------------- # "Private" strings should not be used outside of this file. Add a new # public string in the appropriate place if you need one. # Generic policy check string for the persona we are calling 'system-admin'. # Note: we aren't recognizing system scope in Xena, so we aren't actually # using this check string yet. _SYSTEM_ADMIN = 'role:admin and system_scope:all' _LEGACY_SYSTEM_ADMIN = 'role:admin' # Cinder doesn't plan to use this one. It doesn't map to any of our # supported personas. It's only here in case you were wondering ... # _SYSTEM_MEMBER = 'role:member and system_scope:all' # Generic policy check string for the persona we are calling 'system-reader'. _SYSTEM_READER = 'role:reader and system_scope:all' # Note: In Xena, there isn't really a system-reader persona so make sure # the system-admin can do this _LEGACY_SYSTEM_READER = _LEGACY_SYSTEM_ADMIN # Generic policy check string for the persona we are calling 'project-admin'. # Note: We are not implementing this persona in Xena. (Compare it to the # _LEGACY_SYSTEM_ADMIN string above and you'll see why.) _PROJECT_ADMIN = 'role:admin and project_id:%(project_id)s' # Generic policy check string for the persona we are calling 'project-member'. # Note: The 'and project_id:%(project_id)s' part makes this a project-scoped # checkstring. _PROJECT_MEMBER = 'role:member and project_id:%(project_id)s' # Generic policy check string for the persona we are calling 'project-reader'. _PROJECT_READER = 'role:reader and project_id:%(project_id)s' # rule names _YOGA_SYSTEM_READER_OR_PROJECT_READER = 'rule:system_reader_or_project_reader' _YOGA_SYSTEM_ADMIN_OR_PROJECT_MEMBER = 'rule:system_admin_or_project_member' _YOGA_SYSTEM_ADMIN_OR_PROJECT_ADMIN = 'rule:system_admin_or_project_admin' _YOGA_SYSTEM_ADMIN_ONLY = 'rule:system_admin_only' # rules yoga_rule_defaults = [ policy.RuleDefault('system_reader_or_project_reader', f'({_SYSTEM_READER}) or ({_PROJECT_READER})', description=("Grants permission for the following " "Cinder personas: system-admin, system-" "reader, project-admin, project-member, " "and project-reader")), policy.RuleDefault('system_admin_or_project_member', f'({_SYSTEM_ADMIN}) or ({_PROJECT_MEMBER})', description=("Grants permission for the following " "Cinder personas: system-admin, project-" "admin, and project-member")), policy.RuleDefault('system_admin_or_project_admin', f'({_SYSTEM_ADMIN}) or ({_PROJECT_ADMIN})', description=("Grants permission for the following " "Cinder personas: system-admin and " "project-admin")), policy.RuleDefault('system_admin_only', f'({_SYSTEM_ADMIN})', description=("Grants permission only to the system-" "admin persona.")), ] # Public policy checkstrings for deprecations # ------------------------------------------- # The XENA_* need to be public because we'll use them in CinderDeprecatedRules # in the individual policy files when these are updated in Yoga. They # should *not* appear in any DocumentedRuleDefaults. # we *call* it system reader for consistency with Yoga, but in Xena # there isn't a system reader persona XENA_SYSTEM_READER_OR_PROJECT_READER = ( "rule:xena_system_admin_or_project_reader") XENA_SYSTEM_ADMIN_OR_PROJECT_MEMBER = ( "rule:xena_system_admin_or_project_member") # This will not be used. Rules appropriate for this checkstring will remain # as RULE_ADMIN_API in Xena and won't be deprecated until Yoga development. # XENA_SYSTEM_ADMIN_ONLY = "rule:xena_system_admin_only" RULE_ADMIN_API = "rule:admin_api" # TODO: xena rules to be removed in AA xena_rule_defaults = [ # these legacy rules are still used in Xena and will be used as the # checkstrings for CinderDeprecatedRules in Yoga and Z policy.RuleDefault('context_is_admin', 'role:admin', description="Decides what is required for the " "'is_admin:True' check to succeed."), policy.RuleDefault('admin_api', 'is_admin:True or (role:admin and ' 'is_admin_project:True)', # FIXME: In Yoga, point out that is_admin_project # is deprecated and operators should use system # scope instead description="Default rule for most Admin APIs."), # "pure" Xena rules policy.RuleDefault( 'xena_system_admin_or_project_reader', f'({_LEGACY_SYSTEM_ADMIN}) or ({_PROJECT_READER})', description=("NOTE: this purely role-based rule recognizes only " "project scope")), policy.RuleDefault( 'xena_system_admin_or_project_member', f'({_LEGACY_SYSTEM_ADMIN}) or ({_PROJECT_MEMBER})', description=("NOTE: this purely role-based rule recognizes only " "project scope")), ] # Public policy checkstrings expressed as personas # ------------------------------------------------ # TODO: update the following in Yoga SYSTEM_READER_OR_PROJECT_READER = XENA_SYSTEM_READER_OR_PROJECT_READER # SYSTEM_READER_OR_PROJECT_READER = _YOGA_SYSTEM_READER_OR_PROJECT_READER SYSTEM_ADMIN_OR_PROJECT_MEMBER = XENA_SYSTEM_ADMIN_OR_PROJECT_MEMBER # SYSTEM_ADMIN_OR_PROJECT_MEMBER = _YOGA_SYSTEM_ADMIN_OR_PROJECT_MEMBER # We won't be using this one in Xena. System-admin-only rules will NOT be # modified during Xena development. # SYSTEM_ADMIN_ONLY = XENA_SYSTEM_ADMIN_ONLY # SYSTEM_ADMIN_ONLY = _YOGA_SYSTEM_ADMIN_ONLY # Deprecation strategy # -------------------- # We will be using the following strategy to transform Cinder policies # from legacy Wallaby checkstrings to Keystone default-role-and-scope aware # policies over the next few cycles: # # 1. In Xena, the Wallaby checkstrings are moved to CinderDeprecatedRules and # new checkstrings (using the three default roles but project scope only) # are defined in DocumentedRuleDefaults. At this point, only the # three Cinder personas of system-admin, project-member, and project-reader # will be implemented, but to prepare for Yoga, we'll use the variables # defined in the "Public policy checkstrings expressed as personas" above. # # EXCEPTION: any policies that are currently (i.e., during Xena development) # using "rule:admin_api" (which shows up in the policy files as # 'base.RULE_ADMIN_API') will NOT be deprecated in Xena. (They will be # deprecated in Yoga.) # # 2. In Yoga, the Xena checkstrings are moved to the CinderDeprecatedRules. # For example, if a DocumentedRuleDefault with # check_str=SYSTEM_READER_OR_PROJECT_READER # contains a deprecated_rule, find the definition of that # CinderDeprecatedRule in the file and change *its* checkstring to # check_str=XENA_SYSTEM_READER_OR_PROJECT_READER # # The checkstrings in the DocumentedRuleDefaults will be updated # when we change the "Public policy checkstrings expressed as personas" # above to their _YOGA versions in this file--we will not have to manually # update the checkstrings in the individual files. # # EXCEPTION: We'll need to add CinderDeprecatedRules for any policies that # don't currently (i.e., during Yoga development) have them. (These will # be the "Admin API" calls that we didn't modify in Xena.) Their current # checkstrings will be moved to the deprecated rules, and their new # checkstrings will be SYSTEM_ADMIN_ONLY. # # OTHER UPDATES: All DocumentedRuleDefaults will need to have the # 'scope_types' field added to them, for example, # scope_types=['system', 'project'], # or # scope_types['system'], # depending on the intended scope of the rule. # # The Yoga checkstrings (using the three default roles + system scope) will # give us the full five Cinder personas. After operators have made # appropriate adjustments to user and group role assignments in Keystone, # they will be able to use the new checkstrings by setting the # 'enforce_new_defaults' and 'enforce_scope' options to appropriate # values in the [oslo_policy] section of their cinder configuration file. # # 3. In Z, we let the Yoga policy configuration bake to allow operators # to time to make the Keystone adjustments mentioned above before they # enable the Yoga rules. # # 4. In AA, we remove the CinderDeprecatedRules and adjust the # DocumentedRuleDefaults accordingly. _XENA_DEPRECATED_REASON = ( 'Default policies now support the three Keystone default roles, namely ' "'admin', 'member', and 'reader' to implement three Cinder " '"personas". See "Policy Personas and Permissions" in the "Cinder ' 'Service Configuration" documentation (Xena release) for details.') _YOGA_DEPRECATED_REASON = ( 'Default policies now support Keystone default roles and system scope to ' 'implement five Cinder "personas". See "Policy Personas and Permissions" ' 'in the "Cinder Service Configuration" documentation (Yoga release) for ' 'details.') # TODO: change these in Yoga DEPRECATED_REASON = _XENA_DEPRECATED_REASON DEPRECATED_SINCE = versionutils.deprecated.XENA class CinderDeprecatedRule(policy.DeprecatedRule): """A DeprecatedRule subclass with pre-defined fields.""" def __init__(self, name: str, check_str: str, *, deprecated_reason: Optional[str] = DEPRECATED_REASON, deprecated_since: Optional[str] = DEPRECATED_SINCE, ): super().__init__( name, check_str, deprecated_reason=deprecated_reason, deprecated_since=deprecated_since ) # This is used by the deprecated rules in the individual policy files # in Xena. # TODO: remove in Yoga RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' # FIXME: remove these when cinder.policies.default_types is updated SYSTEM_OR_DOMAIN_OR_PROJECT_ADMIN = 'rule:system_or_domain_or_project_admin' SYSTEM_ADMIN = _SYSTEM_ADMIN YOGA_REMOVAL = 'DEPRECATED: This rule will be removed in the Yoga release.' PADDING = ' ' * (70 - len(YOGA_REMOVAL)) # legacy rules to be removed in Yoga legacy_rule_defaults = [ policy.RuleDefault('admin_or_owner', 'is_admin:True or (role:admin and ' 'is_admin_project:True) or project_id:%(project_id)s', description=(f'{YOGA_REMOVAL}{PADDING}' 'Default rule for most non-Admin APIs.')), # currently used only by cinder.policies.default_types policy.RuleDefault('system_or_domain_or_project_admin', '(role:admin and system_scope:all) or ' '(role:admin and domain_id:%(domain_id)s) or ' '(role:admin and project_id:%(project_id)s)', description=(f'{YOGA_REMOVAL}{PADDING}' "Default rule for admins of cloud, domain " "or a project.")), ] def list_rules(): # TODO: update in Yoga and AA # xena: legacy_rule_defaults + xena_rule_defaults # yoga: xena_rule_defaults + yoga_rule_defaults # AA: yoga_rule_defaults only return legacy_rule_defaults + xena_rule_defaults ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/capabilities.py0000664000175000017500000000220300000000000021142 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base CAPABILITIES_POLICY = "volume_extension:capabilities" capabilities_policies = [ policy.DocumentedRuleDefault( name=CAPABILITIES_POLICY, check_str=base.RULE_ADMIN_API, description="Show backend capabilities.", operations=[ { 'method': 'GET', 'path': '/capabilities/{host_name}' } ]) ] def list_rules(): return capabilities_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/clusters.py0000664000175000017500000000344100000000000020362 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base GET_POLICY = 'clusters:get' GET_ALL_POLICY = 'clusters:get_all' UPDATE_POLICY = 'clusters:update' clusters_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_API, description="List clusters.", operations=[ { 'method': 'GET', 'path': '/clusters' }, { 'method': 'GET', 'path': '/clusters/detail' } ]), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.RULE_ADMIN_API, description="Show cluster.", operations=[ { 'method': 'GET', 'path': '/clusters/{cluster_id}' } ]), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_API, description="Update cluster.", operations=[ { 'method': 'PUT', 'path': '/clusters/{cluster_id}' } ]), ] def list_rules(): return clusters_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/default_types.py0000664000175000017500000000573500000000000021376 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base CREATE_UPDATE_POLICY = "volume_extension:default_set_or_update" GET_POLICY = "volume_extension:default_get" GET_ALL_POLICY = "volume_extension:default_get_all" DELETE_POLICY = "volume_extension:default_unset" deprecated_create_update_policy = base.CinderDeprecatedRule( name=CREATE_UPDATE_POLICY, check_str=base.SYSTEM_OR_DOMAIN_OR_PROJECT_ADMIN ) deprecated_get_policy = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.SYSTEM_OR_DOMAIN_OR_PROJECT_ADMIN ) deprecated_get_all_policy = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str=base.SYSTEM_ADMIN ) deprecated_delete_policy = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.SYSTEM_OR_DOMAIN_OR_PROJECT_ADMIN ) default_type_policies = [ policy.DocumentedRuleDefault( name=CREATE_UPDATE_POLICY, check_str=base.RULE_ADMIN_API, description="Set or update default volume type.", operations=[ { 'method': 'PUT', 'path': '/default-types' } ], deprecated_rule=deprecated_create_update_policy, ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.RULE_ADMIN_API, description="Get default types.", operations=[ { 'method': 'GET', 'path': '/default-types/{project-id}' } ], deprecated_rule=deprecated_get_policy, ), policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_API, description="Get all default types. " "WARNING: Changing this might open up too much " "information regarding cloud deployment.", operations=[ { 'method': 'GET', 'path': '/default-types/' } ], deprecated_rule=deprecated_get_all_policy, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.RULE_ADMIN_API, description="Unset default type.", operations=[ { 'method': 'DELETE', 'path': '/default-types/{project-id}' } ], deprecated_rule=deprecated_delete_policy, ), ] def list_rules(): return default_type_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/group_actions.py0000664000175000017500000001001300000000000021363 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base RESET_STATUS = 'group:reset_status' ENABLE_REP = 'group:enable_replication' DISABLE_REP = 'group:disable_replication' FAILOVER_REP = 'group:failover_replication' LIST_REP = 'group:list_replication_targets' DELETE_POLICY = 'group:delete' deprecated_delete_group = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_enable_replication = base.CinderDeprecatedRule( name=ENABLE_REP, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_disable_replication = base.CinderDeprecatedRule( name=DISABLE_REP, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_failover_replication = base.CinderDeprecatedRule( name=FAILOVER_REP, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_list_replication = base.CinderDeprecatedRule( name=LIST_REP, check_str=base.RULE_ADMIN_OR_OWNER ) # TODO(enriquetaso): update the following in Yoga. # We're not deprecating the reset rule in Xena. # deprecated_reset_status = base.CinderDeprecatedRule( # name=RESET_STATUS, # check_str=base.RULE_ADMIN_API # ) group_actions_policies = [ policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete group.", operations=[ { 'method': 'POST', 'path': '/groups/{group_id}/action (delete)' } ], deprecated_rule=deprecated_delete_group, ), policy.DocumentedRuleDefault( name=RESET_STATUS, check_str=base.RULE_ADMIN_API, description="Reset status of group.", operations=[ { 'method': 'POST', 'path': '/groups/{group_id}/action (reset_status)' } ] ), policy.DocumentedRuleDefault( name=ENABLE_REP, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Enable replication.", operations=[ { 'method': 'POST', 'path': '/groups/{group_id}/action (enable_replication)' } ], deprecated_rule=deprecated_enable_replication, ), policy.DocumentedRuleDefault( name=DISABLE_REP, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Disable replication.", operations=[ { 'method': 'POST', 'path': '/groups/{group_id}/action (disable_replication)' } ], deprecated_rule=deprecated_disable_replication, ), policy.DocumentedRuleDefault( name=FAILOVER_REP, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Fail over replication.", operations=[ { 'method': 'POST', 'path': '/groups/{group_id}/action (failover_replication)' } ], deprecated_rule=deprecated_failover_replication, ), policy.DocumentedRuleDefault( name=LIST_REP, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="List failover replication.", operations=[ { 'method': 'POST', 'path': '/groups/{group_id}/action (list_replication_targets)' } ], deprecated_rule=deprecated_list_replication, ), ] def list_rules(): return group_actions_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/group_snapshot_actions.py0000664000175000017500000000230500000000000023307 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base RESET_STATUS = 'group:reset_group_snapshot_status' group_snapshot_actions_policies = [ policy.DocumentedRuleDefault( name=RESET_STATUS, check_str=base.RULE_ADMIN_API, description="Reset status of group snapshot.", operations=[ { 'method': 'POST', 'path': '/group_snapshots/{g_snapshot_id}/action (reset_status)' } ]), ] def list_rules(): return group_snapshot_actions_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/group_snapshots.py0000664000175000017500000001017000000000000021751 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base CREATE_POLICY = 'group:create_group_snapshot' DELETE_POLICY = 'group:delete_group_snapshot' UPDATE_POLICY = 'group:update_group_snapshot' GET_POLICY = 'group:get_group_snapshot' GET_ALL_POLICY = 'group:get_all_group_snapshots' GROUP_SNAPSHOT_ATTRIBUTES_POLICY = 'group:group_snapshot_project_attribute' deprecated_get_all_group_snapshots = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_create_group_snapshot = base.CinderDeprecatedRule( name=CREATE_POLICY, check_str="" ) deprecated_get_group_snapshot = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_delete_group_snapshot = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_update_group_snapshot = base.CinderDeprecatedRule( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) group_snapshots_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List group snapshots.", operations=[ { 'method': 'GET', 'path': '/group_snapshots' }, { 'method': 'GET', 'path': '/group_snapshots/detail' } ], deprecated_rule=deprecated_get_all_group_snapshots, ), policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Create group snapshot.", operations=[ { 'method': 'POST', 'path': '/group_snapshots' } ], deprecated_rule=deprecated_create_group_snapshot, ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show group snapshot.", operations=[ { 'method': 'GET', 'path': '/group_snapshots/{group_snapshot_id}' } ], deprecated_rule=deprecated_get_group_snapshot, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete group snapshot.", operations=[ { 'method': 'DELETE', 'path': '/group_snapshots/{group_snapshot_id}' } ], deprecated_rule=deprecated_delete_group_snapshot, ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Update group snapshot.", operations=[ { 'method': 'PUT', 'path': '/group_snapshots/{group_snapshot_id}' } ], deprecated_rule=deprecated_update_group_snapshot, ), policy.DocumentedRuleDefault( name=GROUP_SNAPSHOT_ATTRIBUTES_POLICY, check_str=base.RULE_ADMIN_API, description="List group snapshots or show group " "snapshot with project attributes.", operations=[ { 'method': 'GET', 'path': '/group_snapshots/{group_snapshot_id}' }, { 'method': 'GET', 'path': '/group_snapshots/detail' } ]), ] def list_rules(): return group_snapshots_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/group_types.py0000664000175000017500000001222300000000000021074 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base # MANAGE_POLICY is deprecated MANAGE_POLICY = 'group:group_types_manage' CREATE_POLICY = 'group:group_types:create' UPDATE_POLICY = 'group:group_types:update' DELETE_POLICY = 'group:group_types:delete' SHOW_ACCESS_POLICY = 'group:access_group_types_specs' # SPEC_POLICY is deprecated SPEC_POLICY = 'group:group_types_specs' SPEC_GET_POLICY = 'group:group_types_specs:get' SPEC_GET_ALL_POLICY = 'group:group_types_specs:get_all' SPEC_CREATE_POLICY = 'group:group_types_specs:create' SPEC_UPDATE_POLICY = 'group:group_types_specs:update' SPEC_DELETE_POLICY = 'group:group_types_specs:delete' deprecated_manage_policy = base.CinderDeprecatedRule( name=MANAGE_POLICY, check_str=base.RULE_ADMIN_API, deprecated_reason=(f'{MANAGE_POLICY} has been replaced by more granular ' 'policies that separately govern POST, PUT, and DELETE ' 'operations.'), ) deprecated_spec_policy = base.CinderDeprecatedRule( name=SPEC_POLICY, check_str=base.RULE_ADMIN_API, deprecated_reason=(f'{SPEC_POLICY} has been replaced by more granular ' 'policies that separately govern GET, POST, PUT, and ' 'DELETE operations.'), ) group_types_policies = [ policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.RULE_ADMIN_API, description="Create a group type.", operations=[ { 'method': 'POST', 'path': '/group_types/' }, ], deprecated_rule=deprecated_manage_policy, ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_API, description="Update a group type.", operations=[ { 'method': 'PUT', 'path': '/group_types/{group_type_id}' }, ], deprecated_rule=deprecated_manage_policy, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.RULE_ADMIN_API, description="Delete a group type.", operations=[ { 'method': 'DELETE', 'path': '/group_types/{group_type_id}' }, ], deprecated_rule=deprecated_manage_policy, ), policy.DocumentedRuleDefault( name=SHOW_ACCESS_POLICY, check_str=base.RULE_ADMIN_API, description="Show group type with type specs attributes.", operations=[ { 'method': 'GET', 'path': '/group_types/{group_type_id}' } ] ), policy.DocumentedRuleDefault( name=SPEC_GET_POLICY, check_str=base.RULE_ADMIN_API, description="Show a group type spec.", operations=[ { 'method': 'GET', 'path': '/group_types/{group_type_id}/group_specs/{g_spec_id}' }, ], deprecated_rule=deprecated_spec_policy, ), policy.DocumentedRuleDefault( name=SPEC_GET_ALL_POLICY, check_str=base.RULE_ADMIN_API, description="List group type specs.", operations=[ { 'method': 'GET', 'path': '/group_types/{group_type_id}/group_specs' }, ], deprecated_rule=deprecated_spec_policy, ), policy.DocumentedRuleDefault( name=SPEC_CREATE_POLICY, check_str=base.RULE_ADMIN_API, description="Create a group type spec.", operations=[ { 'method': 'POST', 'path': '/group_types/{group_type_id}/group_specs' }, ], deprecated_rule=deprecated_spec_policy, ), policy.DocumentedRuleDefault( name=SPEC_UPDATE_POLICY, check_str=base.RULE_ADMIN_API, description="Update a group type spec.", operations=[ { 'method': 'PUT', 'path': '/group_types/{group_type_id}/group_specs/{g_spec_id}' }, ], deprecated_rule=deprecated_spec_policy, ), policy.DocumentedRuleDefault( name=SPEC_DELETE_POLICY, check_str=base.RULE_ADMIN_API, description="Delete a group type spec.", operations=[ { 'method': 'DELETE', 'path': '/group_types/{group_type_id}/group_specs/{g_spec_id}' }, ], deprecated_rule=deprecated_spec_policy, ), ] def list_rules(): return group_types_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/groups.py0000664000175000017500000000636300000000000020043 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base CREATE_POLICY = 'group:create' UPDATE_POLICY = 'group:update' GET_POLICY = 'group:get' GET_ALL_POLICY = 'group:get_all' GROUP_ATTRIBUTES_POLICY = 'group:group_project_attribute' deprecated_get_all_groups = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_create_group = base.CinderDeprecatedRule( name=CREATE_POLICY, check_str="" ) deprecated_get_group = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_update_group = base.CinderDeprecatedRule( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, ) groups_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List groups.", operations=[ { 'method': 'GET', 'path': '/groups' }, { 'method': 'GET', 'path': '/groups/detail' } ], deprecated_rule=deprecated_get_all_groups, ), policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Create group.", operations=[ { 'method': 'POST', 'path': '/groups' } ], deprecated_rule=deprecated_create_group, ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show group.", operations=[ { 'method': 'GET', 'path': '/groups/{group_id}' } ], deprecated_rule=deprecated_get_group, ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Update group.", operations=[ { 'method': 'PUT', 'path': '/groups/{group_id}' } ], deprecated_rule=deprecated_update_group, ), policy.DocumentedRuleDefault( name=GROUP_ATTRIBUTES_POLICY, check_str=base.RULE_ADMIN_API, description="List groups or show group with project attributes.", operations=[ { 'method': 'GET', 'path': '/groups/{group_id}' }, { 'method': 'GET', 'path': '/groups/detail' } ] ), ] def list_rules(): return groups_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/hosts.py0000664000175000017500000000247200000000000017661 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base MANAGE_POLICY = "volume_extension:hosts" hosts_policies = [ policy.DocumentedRuleDefault( name=MANAGE_POLICY, check_str=base.RULE_ADMIN_API, description="List, update or show hosts for a project.", operations=[ { 'method': 'GET', 'path': '/os-hosts' }, { 'method': 'PUT', 'path': '/os-hosts/{host_name}' }, { 'method': 'GET', 'path': '/os-hosts/{host_id}' } ]) ] def list_rules(): return hosts_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/limits.py0000664000175000017500000000250700000000000020021 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base EXTEND_LIMIT_ATTRIBUTE_POLICY = "limits_extension:used_limits" deprecated_limits = base.CinderDeprecatedRule( name=EXTEND_LIMIT_ATTRIBUTE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) limits_policies = [ policy.DocumentedRuleDefault( name=EXTEND_LIMIT_ATTRIBUTE_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show limits with used limit attributes.", operations=[ { 'method': 'GET', 'path': '/limits' } ], deprecated_rule=deprecated_limits, ) ] def list_rules(): return limits_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/manageable_snapshots.py0000664000175000017500000000401700000000000022674 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base MANAGE_POLICY = 'snapshot_extension:snapshot_manage' UNMANAGE_POLICY = 'snapshot_extension:snapshot_unmanage' LIST_MANAGEABLE_POLICY = 'snapshot_extension:list_manageable' manageable_snapshots_policies = [ policy.DocumentedRuleDefault( name=LIST_MANAGEABLE_POLICY, check_str=base.RULE_ADMIN_API, description= "List (in detail) of snapshots which are available to manage.", operations=[ { 'method': 'GET', 'path': '/manageable_snapshots' }, { 'method': 'GET', 'path': '/manageable_snapshots/detail' } ]), policy.DocumentedRuleDefault( name=MANAGE_POLICY, check_str=base.RULE_ADMIN_API, description="Manage an existing snapshot.", operations=[ { 'method': 'POST', 'path': '/manageable_snapshots' } ]), policy.DocumentedRuleDefault( name=UNMANAGE_POLICY, check_str=base.RULE_ADMIN_API, description="Stop managing a snapshot.", operations=[ { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/action (os-unmanage)' } ]), ] def list_rules(): return manageable_snapshots_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/manageable_volumes.py0000664000175000017500000000376000000000000022350 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base MANAGE_POLICY = "volume_extension:volume_manage" UNMANAGE_POLICY = "volume_extension:volume_unmanage" LIST_MANAGEABLE_POLICY = "volume_extension:list_manageable" manageable_volumes_policies = [ policy.DocumentedRuleDefault( name=LIST_MANAGEABLE_POLICY, check_str=base.RULE_ADMIN_API, description= "List (in detail) of volumes which are available to manage.", operations=[ { 'method': 'GET', 'path': '/manageable_volumes' }, { 'method': 'GET', 'path': '/manageable_volumes/detail' } ]), policy.DocumentedRuleDefault( name=MANAGE_POLICY, check_str=base.RULE_ADMIN_API, description="Manage existing volumes.", operations=[ { 'method': 'POST', 'path': '/manageable_volumes' } ]), policy.DocumentedRuleDefault( name=UNMANAGE_POLICY, check_str=base.RULE_ADMIN_API, description="Stop managing a volume.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-unmanage)' } ]), ] def list_rules(): return manageable_volumes_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/messages.py0000664000175000017500000000436300000000000020331 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base DELETE_POLICY = 'message:delete' GET_POLICY = 'message:get' GET_ALL_POLICY = 'message:get_all' deprecated_get_policy = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_get_all_policy = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_delete_policy = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) messages_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List messages.", operations=[ { 'method': 'GET', 'path': '/messages' } ], deprecated_rule=deprecated_get_all_policy, ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show message.", operations=[ { 'method': 'GET', 'path': '/messages/{message_id}' } ], deprecated_rule=deprecated_get_policy, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete message.", operations=[ { 'method': 'DELETE', 'path': '/messages/{message_id}' } ], deprecated_rule=deprecated_delete_policy, ), ] def list_rules(): return messages_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/qos_specs.py0000664000175000017500000000613700000000000020522 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base CREATE_POLICY = 'volume_extension:qos_specs_manage:create' GET_POLICY = 'volume_extension:qos_specs_manage:get' GET_ALL_POLICY = 'volume_extension:qos_specs_manage:get_all' UPDATE_POLICY = 'volume_extension:qos_specs_manage:update' DELETE_POLICY = 'volume_extension:qos_specs_manage:delete' qos_specs_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_API, description="List qos specs or list all associations.", operations=[ { 'method': 'GET', 'path': '/qos-specs' }, { 'method': 'GET', 'path': '/qos-specs/{qos_id}/associations' } ]), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.RULE_ADMIN_API, description="Show qos specs.", operations=[ { 'method': 'GET', 'path': '/qos-specs/{qos_id}' } ]), policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.RULE_ADMIN_API, description="Create qos specs.", operations=[ { 'method': 'POST', 'path': '/qos-specs' } ]), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_API, description="Update qos specs (including updating association).", operations=[ { 'method': 'PUT', 'path': '/qos-specs/{qos_id}' }, { 'method': 'GET', 'path': '/qos-specs/{qos_id}/disassociate_all' }, { 'method': 'GET', 'path': '/qos-specs/{qos_id}/associate' }, { 'method': 'GET', 'path': '/qos-specs/{qos_id}/disassociate' } ]), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.RULE_ADMIN_API, description="delete qos specs or unset one specified qos key.", operations=[ { 'method': 'DELETE', 'path': '/qos-specs/{qos_id}' }, { 'method': 'PUT', 'path': '/qos-specs/{qos_id}/delete_keys' } ]) ] def list_rules(): return qos_specs_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/quota_class.py0000664000175000017500000000372400000000000021040 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base # MANAGE_POLICY is deprecated MANAGE_POLICY = 'volume_extension:quota_classes' GET_POLICY = 'volume_extension:quota_classes:get' UPDATE_POLICY = 'volume_extension:quota_classes:update' deprecated_manage_policy = base.CinderDeprecatedRule( name=MANAGE_POLICY, check_str=base.RULE_ADMIN_API, deprecated_reason=(f'{MANAGE_POLICY} has been replaced by more granular ' 'policies that separately govern GET and PUT ' 'operations.'), ) quota_class_policies = [ policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.RULE_ADMIN_API, description="Show project quota class.", operations=[ { 'method': 'GET', 'path': '/os-quota-class-sets/{project_id}' } ], deprecated_rule=deprecated_manage_policy, ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_API, description="Update project quota class.", operations=[ { 'method': 'PUT', 'path': '/os-quota-class-sets/{project_id}' } ], deprecated_rule=deprecated_manage_policy, ), ] def list_rules(): return quota_class_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/quotas.py0000664000175000017500000000435200000000000020034 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base SHOW_POLICY = 'volume_extension:quotas:show' UPDATE_POLICY = 'volume_extension:quotas:update' DELETE_POLICY = 'volume_extension:quotas:delete' deprecated_show_policy = policy.DeprecatedRule( name=SHOW_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) quota_policies = [ policy.DocumentedRuleDefault( name=SHOW_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show project quota (including usage and default).", operations=[ { 'method': 'GET', 'path': '/os-quota-sets/{project_id}' }, { 'method': 'GET', 'path': '/os-quota-sets/{project_id}/default' }, { 'method': 'GET', 'path': '/os-quota-sets/{project_id}?usage=True' } ], deprecated_rule=deprecated_show_policy, ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_API, description="Update project quota.", operations=[ { 'method': 'PUT', 'path': '/os-quota-sets/{project_id}' } ] ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.RULE_ADMIN_API, description="Delete project quota.", operations=[ { 'method': 'DELETE', 'path': '/os-quota-sets/{project_id}' } ] ), ] def list_rules(): return quota_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/scheduler_stats.py0000664000175000017500000000217200000000000021712 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base GET_POOL_POLICY = "scheduler_extension:scheduler_stats:get_pools" pools_policies = [ policy.DocumentedRuleDefault( name=GET_POOL_POLICY, check_str=base.RULE_ADMIN_API, description="List all backend pools.", operations=[ { 'method': 'GET', 'path': '/scheduler-stats/get_pools' } ]) ] def list_rules(): return pools_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/services.py0000664000175000017500000000475000000000000020345 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base GET_ALL_POLICY = "volume_extension:services:index" UPDATE_POLICY = "volume_extension:services:update" FAILOVER_POLICY = "volume:failover_host" FREEZE_POLICY = "volume:freeze_host" THAW_POLICY = "volume:thaw_host" services_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_API, description="List all services.", operations=[ { 'method': 'GET', 'path': '/os-services' } ]), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_API, description="Update service, including failover_host, thaw, freeze, " "disable, enable, set-log and get-log actions.", operations=[ { 'method': 'PUT', 'path': '/os-services/{action}' } ]), policy.DocumentedRuleDefault( name=FREEZE_POLICY, check_str=base.RULE_ADMIN_API, description="Freeze a backend host.", operations=[ { 'method': 'PUT', 'path': '/os-services/freeze' } ]), policy.DocumentedRuleDefault( name=THAW_POLICY, check_str=base.RULE_ADMIN_API, description="Thaw a backend host.", operations=[ { 'method': 'PUT', 'path': '/os-services/thaw' } ]), policy.DocumentedRuleDefault( name=FAILOVER_POLICY, check_str=base.RULE_ADMIN_API, description="Failover a backend host.", operations=[ { 'method': 'PUT', 'path': '/os-services/failover_host' } ]), ] def list_rules(): return services_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/snapshot_actions.py0000664000175000017500000000433200000000000022075 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base RESET_STATUS_POLICY = 'volume_extension:snapshot_admin_actions:reset_status' FORCE_DELETE_POLICY = 'volume_extension:snapshot_admin_actions:force_delete' UPDATE_STATUS_POLICY = \ 'snapshot_extension:snapshot_actions:update_snapshot_status' deprecated_update_status = base.CinderDeprecatedRule( name=UPDATE_STATUS_POLICY, check_str="" ) snapshot_actions_policies = [ policy.DocumentedRuleDefault( name=RESET_STATUS_POLICY, check_str=base.RULE_ADMIN_API, description="Reset status of a snapshot.", operations=[ { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/action (os-reset_status)' } ], ), policy.DocumentedRuleDefault( name=UPDATE_STATUS_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Update database fields of snapshot.", operations=[ { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/action ' '(update_snapshot_status)' } ], deprecated_rule=deprecated_update_status, ), policy.DocumentedRuleDefault( name=FORCE_DELETE_POLICY, check_str=base.RULE_ADMIN_API, description="Force delete a snapshot.", operations=[ { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/action (os-force_delete)' } ], ) ] def list_rules(): return snapshot_actions_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/snapshot_metadata.py0000664000175000017500000000560200000000000022216 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base GET_POLICY = 'volume:get_snapshot_metadata' DELETE_POLICY = 'volume:delete_snapshot_metadata' UPDATE_POLICY = 'volume:update_snapshot_metadata' deprecated_get_snapshot_metadata = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_update_snapshot_metadata = base.CinderDeprecatedRule( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_delete_snapshot_metadata = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) snapshot_metadata_policies = [ policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show snapshot's metadata or one specified metadata " "with a given key.", operations=[ { 'method': 'GET', 'path': '/snapshots/{snapshot_id}/metadata' }, { 'method': 'GET', 'path': '/snapshots/{snapshot_id}/metadata/{key}' } ], deprecated_rule=deprecated_get_snapshot_metadata, ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Update snapshot's metadata or one specified " "metadata with a given key.", operations=[ { 'method': 'POST', 'path': '/snapshots/{snapshot_id}/metadata' }, { 'method': 'PUT', 'path': '/snapshots/{snapshot_id}/metadata/{key}' } ], deprecated_rule=deprecated_update_snapshot_metadata, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete snapshot's specified metadata " "with a given key.", operations=[ { 'method': 'DELETE', 'path': '/snapshots/{snapshot_id}/metadata/{key}' } ], deprecated_rule=deprecated_delete_snapshot_metadata, ), ] def list_rules(): return snapshot_metadata_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/snapshots.py0000664000175000017500000001020600000000000020535 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base BASE_POLICY_NAME = 'volume:snapshots:%s' GET_POLICY = 'volume:get_snapshot' GET_ALL_POLICY = 'volume:get_all_snapshots' CREATE_POLICY = 'volume:create_snapshot' DELETE_POLICY = 'volume:delete_snapshot' UPDATE_POLICY = 'volume:update_snapshot' EXTEND_ATTRIBUTE = 'volume_extension:extended_snapshot_attributes' deprecated_get_all_snapshots = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_extend_snapshot_attribute = base.CinderDeprecatedRule( name=EXTEND_ATTRIBUTE, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_create_snapshot = base.CinderDeprecatedRule( name=CREATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_get_snapshot = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_update_snapshot = base.CinderDeprecatedRule( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_delete_snapshot = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) snapshots_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List snapshots.", operations=[ { 'method': 'GET', 'path': '/snapshots' }, { 'method': 'GET', 'path': '/snapshots/detail' } ], deprecated_rule=deprecated_get_all_snapshots, ), policy.DocumentedRuleDefault( name=EXTEND_ATTRIBUTE, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List or show snapshots with extended attributes.", operations=[ { 'method': 'GET', 'path': '/snapshots/{snapshot_id}' }, { 'method': 'GET', 'path': '/snapshots/detail' } ], deprecated_rule=deprecated_extend_snapshot_attribute, ), policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Create snapshot.", operations=[ { 'method': 'POST', 'path': '/snapshots' } ], deprecated_rule=deprecated_create_snapshot, ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show snapshot.", operations=[ { 'method': 'GET', 'path': '/snapshots/{snapshot_id}' } ], deprecated_rule=deprecated_get_snapshot, ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Update snapshot.", operations=[ { 'method': 'PUT', 'path': '/snapshots/{snapshot_id}' } ], deprecated_rule=deprecated_update_snapshot, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete snapshot.", operations=[ { 'method': 'DELETE', 'path': '/snapshots/{snapshot_id}' } ], deprecated_rule=deprecated_delete_snapshot, ), ] def list_rules(): return snapshots_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/type_extra_specs.py0000664000175000017500000001007300000000000022076 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base USER_VISIBLE_EXTRA_SPECS = ( "RESKEY:availability_zones", "multiattach", "replication_enabled", ) CREATE_POLICY = "volume_extension:types_extra_specs:create" DELETE_POLICY = "volume_extension:types_extra_specs:delete" GET_ALL_POLICY = "volume_extension:types_extra_specs:index" GET_POLICY = "volume_extension:types_extra_specs:show" READ_SENSITIVE_POLICY = "volume_extension:types_extra_specs:read_sensitive" UPDATE_POLICY = "volume_extension:types_extra_specs:update" deprecated_get_all_policy = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str="" ) deprecated_get_policy = base.CinderDeprecatedRule( name=GET_POLICY, check_str="" ) type_extra_specs_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List type extra specs.", operations=[ { 'method': 'GET', 'path': '/types/{type_id}/extra_specs' } ], deprecated_rule=deprecated_get_all_policy, ), policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.RULE_ADMIN_API, description="Create type extra specs.", operations=[ { 'method': 'POST', 'path': '/types/{type_id}/extra_specs' } ] ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show one specified type extra specs.", operations=[ { 'method': 'GET', 'path': '/types/{type_id}/extra_specs/{extra_spec_key}' } ], deprecated_rule=deprecated_get_policy, ), policy.DocumentedRuleDefault( name=READ_SENSITIVE_POLICY, check_str=base.RULE_ADMIN_API, description=("Include extra_specs fields that may reveal sensitive " "information about the deployment that should not be " "exposed to end users in various volume-type responses " "that show extra_specs. The ability to make these calls " "is governed by other policies."), operations=[ { 'method': 'GET', 'path': '/types' }, { 'method': 'GET', 'path': '/types/{type_id}' }, { 'method': 'GET', 'path': '/types/{type_id}/extra_specs' }, { 'method': 'GET', 'path': '/types/{type_id}/extra_specs/{extra_spec_key}' } ] ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_API, description="Update type extra specs.", operations=[ { 'method': 'PUT', 'path': '/types/{type_id}/extra_specs/{extra_spec_key}' } ] ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.RULE_ADMIN_API, description="Delete type extra specs.", operations=[ { 'method': 'DELETE', 'path': '/types/{type_id}/extra_specs/{extra_spec_key}' } ] ), ] def list_rules(): return type_extra_specs_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/volume_access.py0000664000175000017500000000675100000000000021355 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base ADD_PROJECT_POLICY = "volume_extension:volume_type_access:addProjectAccess" REMOVE_PROJECT_POLICY = \ "volume_extension:volume_type_access:removeProjectAccess" TYPE_ACCESS_POLICY = "volume_extension:volume_type_access" TYPE_ACCESS_WHO_POLICY = "volume_extension:volume_type_access:get_all_for_type" deprecated_volume_type_access = base.CinderDeprecatedRule( name=TYPE_ACCESS_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_type_access_who_policy = base.CinderDeprecatedRule( name=TYPE_ACCESS_WHO_POLICY, # TODO: revise check_str and dep_reason in Yoga check_str=TYPE_ACCESS_POLICY, deprecated_reason=( f"Reason: '{TYPE_ACCESS_WHO_POLICY}' is a new policy that protects " f"an API call formerly governed by '{TYPE_ACCESS_POLICY}', but which " 'has been separated for finer-grained policy control.'), ) volume_access_policies = [ policy.DocumentedRuleDefault( name=TYPE_ACCESS_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description=( "Adds the boolean field 'os-volume-type-access:is_public' to " 'the responses for these API calls. The ability to make these ' 'calls is governed by other policies.'), operations=[ { 'method': 'GET', 'path': '/types' }, { 'method': 'GET', 'path': '/types/{type_id}' }, { 'method': 'POST', 'path': '/types' } ], deprecated_rule=deprecated_volume_type_access, ), policy.DocumentedRuleDefault( name=ADD_PROJECT_POLICY, check_str=base.RULE_ADMIN_API, description="Add volume type access for project.", operations=[ { 'method': 'POST', 'path': '/types/{type_id}/action (addProjectAccess)' } ]), policy.DocumentedRuleDefault( name=REMOVE_PROJECT_POLICY, check_str=base.RULE_ADMIN_API, description="Remove volume type access for project.", operations=[ { 'method': 'POST', 'path': '/types/{type_id}/action (removeProjectAccess)' } ]), policy.DocumentedRuleDefault( name=TYPE_ACCESS_WHO_POLICY, check_str=base.RULE_ADMIN_API, description=( 'List private volume type access detail, that is, list the ' 'projects that have access to this volume type.'), operations=[ { 'method': 'GET', 'path': '/types/{type_id}/os-volume-type-access' } ], deprecated_rule=deprecated_type_access_who_policy, ), ] def list_rules(): return volume_access_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/volume_actions.py0000664000175000017500000003027300000000000021550 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base EXTEND_POLICY = "volume:extend" EXTEND_ATTACHED_POLICY = "volume:extend_attached_volume" EXTEND_COMPLETE_POLICY = \ "volume_extension:volume_admin_actions:extend_volume_completion" REVERT_POLICY = "volume:revert_to_snapshot" RESET_STATUS = "volume_extension:volume_admin_actions:reset_status" RETYPE_POLICY = "volume:retype" UPDATE_READONLY_POLICY = "volume:update_readonly_flag" FORCE_DELETE_POLICY = "volume_extension:volume_admin_actions:force_delete" FORCE_DETACH_POLICY = "volume_extension:volume_admin_actions:force_detach" UPLOAD_PUBLIC_POLICY = "volume_extension:volume_actions:upload_public" UPLOAD_IMAGE_POLICY = "volume_extension:volume_actions:upload_image" MIGRATE_POLICY = "volume_extension:volume_admin_actions:migrate_volume" MIGRATE_COMPLETE_POLICY = \ "volume_extension:volume_admin_actions:migrate_volume_completion" DETACH_POLICY = "volume_extension:volume_actions:detach" ATTACH_POLICY = "volume_extension:volume_actions:attach" BEGIN_DETACHING_POLICY = "volume_extension:volume_actions:begin_detaching" UNRESERVE_POLICY = "volume_extension:volume_actions:unreserve" RESERVE_POLICY = "volume_extension:volume_actions:reserve" ROLL_DETACHING_POLICY = "volume_extension:volume_actions:roll_detaching" TERMINATE_POLICY = "volume_extension:volume_actions:terminate_connection" INITIALIZE_POLICY = "volume_extension:volume_actions:initialize_connection" REIMAGE_POLICY = "volume:reimage" REIMAGE_RESERVED_POLICY = "volume:reimage_reserved" deprecated_extend_policy = base.CinderDeprecatedRule( name=EXTEND_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_extend_attached_policy = base.CinderDeprecatedRule( name=EXTEND_ATTACHED_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_revert_policy = base.CinderDeprecatedRule( name=REVERT_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_retype_policy = base.CinderDeprecatedRule( name=RETYPE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_update_only_policy = base.CinderDeprecatedRule( name=UPDATE_READONLY_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_upload_image_policy = base.CinderDeprecatedRule( name=UPLOAD_IMAGE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_initialize_policy = base.CinderDeprecatedRule( name=INITIALIZE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_terminate_policy = base.CinderDeprecatedRule( name=TERMINATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_roll_detaching_policy = base.CinderDeprecatedRule( name=ROLL_DETACHING_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_reserve_policy = base.CinderDeprecatedRule( name=RESERVE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_unreserve_policy = base.CinderDeprecatedRule( name=UNRESERVE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_begin_detaching_policy = base.CinderDeprecatedRule( name=BEGIN_DETACHING_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_attach_policy = base.CinderDeprecatedRule( name=ATTACH_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_detach_policy = base.CinderDeprecatedRule( name=DETACH_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) volume_action_policies = [ policy.DocumentedRuleDefault( name=EXTEND_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Extend a volume.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-extend)' } ], deprecated_rule=deprecated_extend_policy, ), policy.DocumentedRuleDefault( name=EXTEND_ATTACHED_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Extend a attached volume.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-extend)' } ], deprecated_rule=deprecated_extend_attached_policy, ), policy.DocumentedRuleDefault( name=EXTEND_COMPLETE_POLICY, check_str=base.RULE_ADMIN_API, description="Complete a volume extend operation.", operations=[{ 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-extend_volume_completion)'} ], ), policy.DocumentedRuleDefault( name=REVERT_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Revert a volume to a snapshot.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (revert)' } ], deprecated_rule=deprecated_revert_policy, ), policy.DocumentedRuleDefault( name=RESET_STATUS, check_str=base.RULE_ADMIN_API, description="Reset status of a volume.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-reset_status)' } ], ), policy.DocumentedRuleDefault( name=RETYPE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Retype a volume.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-retype)' } ], deprecated_rule=deprecated_retype_policy, ), policy.DocumentedRuleDefault( name=UPDATE_READONLY_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Update a volume's readonly flag.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-update_readonly_flag)' } ], deprecated_rule=deprecated_update_only_policy, ), policy.DocumentedRuleDefault( name=FORCE_DELETE_POLICY, check_str=base.RULE_ADMIN_API, description="Force delete a volume.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-force_delete)' } ], ), policy.DocumentedRuleDefault( name=UPLOAD_PUBLIC_POLICY, check_str=base.RULE_ADMIN_API, description="Upload a volume to image with public visibility.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-volume_upload_image)' } ], ), policy.DocumentedRuleDefault( name=UPLOAD_IMAGE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Upload a volume to image.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-volume_upload_image)' } ], deprecated_rule=deprecated_upload_image_policy, ), policy.DocumentedRuleDefault( name=FORCE_DETACH_POLICY, check_str=base.RULE_ADMIN_API, description="Force detach a volume.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-force_detach)' } ], ), policy.DocumentedRuleDefault( name=MIGRATE_POLICY, check_str=base.RULE_ADMIN_API, description="migrate a volume to a specified host.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-migrate_volume)' } ], ), policy.DocumentedRuleDefault( name=MIGRATE_COMPLETE_POLICY, check_str=base.RULE_ADMIN_API, description="Complete a volume migration.", operations=[{ 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-migrate_volume_completion)'} ], ), policy.DocumentedRuleDefault( name=INITIALIZE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Initialize volume attachment.", operations=[{ 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-initialize_connection)'} ], deprecated_rule=deprecated_initialize_policy, ), policy.DocumentedRuleDefault( name=TERMINATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Terminate volume attachment.", operations=[{ 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-terminate_connection)'} ], deprecated_rule=deprecated_terminate_policy, ), policy.DocumentedRuleDefault( name=ROLL_DETACHING_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Roll back volume status to 'in-use'.", operations=[{ 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-roll_detaching)'} ], deprecated_rule=deprecated_roll_detaching_policy, ), policy.DocumentedRuleDefault( name=RESERVE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Mark volume as reserved.", operations=[{ 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-reserve)'} ], deprecated_rule=deprecated_reserve_policy, ), policy.DocumentedRuleDefault( name=UNRESERVE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Unmark volume as reserved.", operations=[{ 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-unreserve)'} ], deprecated_rule=deprecated_unreserve_policy, ), policy.DocumentedRuleDefault( name=BEGIN_DETACHING_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Begin detach volumes.", operations=[{ 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-begin_detaching)'} ], deprecated_rule=deprecated_begin_detaching_policy, ), policy.DocumentedRuleDefault( name=ATTACH_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Add attachment metadata.", operations=[{ 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-attach)'} ], deprecated_rule=deprecated_attach_policy, ), policy.DocumentedRuleDefault( name=DETACH_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Clear attachment metadata.", operations=[{ 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-detach)'} ], deprecated_rule=deprecated_detach_policy, ), policy.DocumentedRuleDefault( name=REIMAGE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Reimage a volume in 'available' or 'error' status.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-reimage)' } ]), policy.DocumentedRuleDefault( name=REIMAGE_RESERVED_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Reimage a volume in 'reserved' status.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-reimage)' } ]), ] def list_rules(): return volume_action_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/volume_metadata.py0000664000175000017500000001503700000000000021671 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base GET_POLICY = "volume:get_volume_metadata" CREATE_POLICY = "volume:create_volume_metadata" DELETE_POLICY = "volume:delete_volume_metadata" UPDATE_POLICY = "volume:update_volume_metadata" IMAGE_METADATA_POLICY = "volume_extension:volume_image_metadata" IMAGE_METADATA_SHOW_POLICY = "volume_extension:volume_image_metadata:show" IMAGE_METADATA_SET_POLICY = "volume_extension:volume_image_metadata:set" IMAGE_METADATA_REMOVE_POLICY = "volume_extension:volume_image_metadata:remove" UPDATE_ADMIN_METADATA_POLICY = "volume:update_volume_admin_metadata" BASE_POLICY_NAME = 'volume:volume_metadata:%s' deprecated_get_volume_metadata = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_create_volume_metadata = base.CinderDeprecatedRule( name=CREATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_update_volume_metadata = base.CinderDeprecatedRule( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_delete_volume_metadata = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) # this is being replaced in Xena by 3 more granular policies deprecated_image_metadata = base.CinderDeprecatedRule( name=IMAGE_METADATA_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=( f'{IMAGE_METADATA_POLICY} has been replaced by more granular ' 'policies that separately govern show, set, and remove operations.') ) volume_metadata_policies = [ policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show volume's metadata or one specified metadata " "with a given key.", operations=[ { 'method': 'GET', 'path': '/volumes/{volume_id}/metadata' }, { 'method': 'GET', 'path': '/volumes/{volume_id}/metadata/{key}' }, { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-show_image_metadata)' } ], deprecated_rule=deprecated_get_volume_metadata, ), policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Create volume metadata.", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/metadata' } ], deprecated_rule=deprecated_create_volume_metadata, ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description=( "Replace a volume's metadata dictionary or update a single " "metadatum with a given key."), operations=[ { 'method': 'PUT', 'path': '/volumes/{volume_id}/metadata' }, { 'method': 'PUT', 'path': '/volumes/{volume_id}/metadata/{key}' } ], deprecated_rule=deprecated_update_volume_metadata, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete a volume's metadatum with the given key.", operations=[ { 'method': 'DELETE', 'path': '/volumes/{volume_id}/metadata/{key}' } ], deprecated_rule=deprecated_delete_volume_metadata, ), policy.DocumentedRuleDefault( name=IMAGE_METADATA_SHOW_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description=( "Include a volume's image metadata in volume detail responses. " "The ability to make these calls is governed by other policies."), operations=[ { 'method': 'GET', 'path': '/volumes/detail' }, { 'method': 'GET', 'path': '/volumes/{volume_id}' } ], # TODO: will need its own deprecated rule in Yoga deprecated_rule=deprecated_image_metadata, ), policy.DocumentedRuleDefault( name=IMAGE_METADATA_SET_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Set image metadata for a volume", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-set_image_metadata)' } ], # TODO: will need its own deprecated rule in Yoga deprecated_rule=deprecated_image_metadata, ), policy.DocumentedRuleDefault( name=IMAGE_METADATA_REMOVE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Remove specific image metadata from a volume", operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-unset_image_metadata)' } ], # TODO: will need its own deprecated rule in Yoga deprecated_rule=deprecated_image_metadata, ), policy.DocumentedRuleDefault( name=UPDATE_ADMIN_METADATA_POLICY, # TODO: deprecate checkstring in Yoga check_str=base.RULE_ADMIN_API, description=( "Update volume admin metadata. This permission is required " "to complete these API calls, though the ability to make these " "calls is governed by other policies."), operations=[ { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-update_readonly_flag)' }, { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-attach)' } ]), ] def list_rules(): return volume_metadata_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/volume_transfer.py0000664000175000017500000001026000000000000021726 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base CREATE_POLICY = "volume:create_transfer" ACCEPT_POLICY = "volume:accept_transfer" DELETE_POLICY = "volume:delete_transfer" GET_POLICY = "volume:get_transfer" GET_ALL_POLICY = "volume:get_all_transfers" deprecated_get_all_transfers = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_create_transfer = base.CinderDeprecatedRule( name=CREATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_get_transfer = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_accept_transfer = base.CinderDeprecatedRule( name=ACCEPT_POLICY, check_str="" ) deprecated_delete_transfer = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) volume_transfer_policies = [ policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List volume transfer.", operations=[ { 'method': 'GET', 'path': '/os-volume-transfer' }, { 'method': 'GET', 'path': '/os-volume-transfer/detail' }, { 'method': 'GET', 'path': '/volume_transfers' }, { 'method': 'GET', 'path': '/volume-transfers/detail' } ], deprecated_rule=deprecated_get_all_transfers ), policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Create a volume transfer.", operations=[ { 'method': 'POST', 'path': '/os-volume-transfer' }, { 'method': 'POST', 'path': '/volume_transfers' } ], deprecated_rule=deprecated_create_transfer ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show one specified volume transfer.", operations=[ { 'method': 'GET', 'path': '/os-volume-transfer/{transfer_id}' }, { 'method': 'GET', 'path': '/volume-transfers/{transfer_id}' } ], deprecated_rule=deprecated_get_transfer ), policy.DocumentedRuleDefault( name=ACCEPT_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Accept a volume transfer.", operations=[ { 'method': 'POST', 'path': '/os-volume-transfer/{transfer_id}/accept' }, { 'method': 'POST', 'path': '/volume-transfers/{transfer_id}/accept' } ], deprecated_rule=deprecated_accept_transfer ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete volume transfer.", operations=[ { 'method': 'DELETE', 'path': '/os-volume-transfer/{transfer_id}' }, { 'method': 'DELETE', 'path': '/volume-transfers/{transfer_id}' } ], deprecated_rule=deprecated_delete_transfer ), ] def list_rules(): return volume_transfer_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/volume_type.py0000664000175000017500000002120300000000000021062 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base MANAGE_POLICY = "volume_extension:types_manage" CREATE_POLICY = "volume_extension:type_create" UPDATE_POLICY = "volume_extension:type_update" DELETE_POLICY = "volume_extension:type_delete" GET_POLICY = "volume_extension:type_get" GET_ALL_POLICY = "volume_extension:type_get_all" QOS_POLICY = "volume_extension:access_types_qos_specs_id" EXTRA_SPEC_POLICY = "volume_extension:access_types_extra_specs" # TODO: remove the next 2 in Yoga ENCRYPTION_POLICY = "volume_extension:volume_type_encryption" ENCRYPTION_BASE_POLICY_RULE = 'rule:%s' % ENCRYPTION_POLICY CREATE_ENCRYPTION_POLICY = "volume_extension:volume_type_encryption:create" GET_ENCRYPTION_POLICY = "volume_extension:volume_type_encryption:get" UPDATE_ENCRYPTION_POLICY = "volume_extension:volume_type_encryption:update" DELETE_ENCRYPTION_POLICY = "volume_extension:volume_type_encryption:delete" GENERAL_ENCRYPTION_POLICY_REASON = ( f"Reason: '{ENCRYPTION_POLICY}' was a convenience policy that allowed you " 'to set all volume encryption type policies to the same value. We are ' 'deprecating this rule to prepare for a future release in which the ' 'default values for policies that read, create/update, and delete ' 'encryption types will be different from each other.') # TODO: remove in Yoga deprecated_manage_policy = base.CinderDeprecatedRule( name=MANAGE_POLICY, check_str=base.RULE_ADMIN_API, deprecated_reason=(f'{MANAGE_POLICY} has been replaced by more granular ' 'policies that separately govern POST, PUT, and DELETE ' 'operations.'), ) deprecated_extra_spec_policy = base.CinderDeprecatedRule( name=EXTRA_SPEC_POLICY, check_str=base.RULE_ADMIN_API ) deprecated_encryption_create_policy = base.CinderDeprecatedRule( name=CREATE_ENCRYPTION_POLICY, # TODO: change to base.RULE_ADMIN_API in Yoga & remove dep_reason check_str=ENCRYPTION_BASE_POLICY_RULE, deprecated_reason=GENERAL_ENCRYPTION_POLICY_REASON, ) deprecated_encryption_get_policy = base.CinderDeprecatedRule( name=GET_ENCRYPTION_POLICY, # TODO: change to base.RULE_ADMIN_API in Yoga & remove dep_reason check_str=ENCRYPTION_BASE_POLICY_RULE, deprecated_reason=GENERAL_ENCRYPTION_POLICY_REASON, ) deprecated_encryption_update_policy = base.CinderDeprecatedRule( name=UPDATE_ENCRYPTION_POLICY, # TODO: change to base.RULE_ADMIN_API in Yoga & remove dep_reason check_str=ENCRYPTION_BASE_POLICY_RULE, deprecated_reason=GENERAL_ENCRYPTION_POLICY_REASON, ) deprecated_encryption_delete_policy = base.CinderDeprecatedRule( name=DELETE_ENCRYPTION_POLICY, # TODO: change to base.RULE_ADMIN_API in Yoga & remove dep_reason check_str=ENCRYPTION_BASE_POLICY_RULE, deprecated_reason=GENERAL_ENCRYPTION_POLICY_REASON, ) deprecated_get_volume_type = base.CinderDeprecatedRule( name=GET_POLICY, check_str="" ) deprecated_get_all_volume_type = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str="" ) volume_type_policies = [ policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.RULE_ADMIN_API, description="Create volume type.", operations=[ { 'method': 'POST', 'path': '/types' }, ], # TODO: will need its own deprecated rule in Yoga deprecated_rule=deprecated_manage_policy, ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_API, description="Update volume type.", operations=[ { 'method': 'PUT', 'path': '/types' }, ], # TODO: will need its own deprecated rule in Yoga deprecated_rule=deprecated_manage_policy, ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.RULE_ADMIN_API, description="Delete volume type.", operations=[ { 'method': 'DELETE', 'path': '/types' } ], # TODO: will need its own deprecated rule in Yoga deprecated_rule=deprecated_manage_policy, ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Get one specific volume type.", operations=[ { 'method': 'GET', 'path': '/types/{type_id}' } ], deprecated_rule=deprecated_get_volume_type, ), policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List volume types.", operations=[ { 'method': 'GET', 'path': '/types/' } ], deprecated_rule=deprecated_get_all_volume_type, ), policy.DocumentedRuleDefault( name=EXTRA_SPEC_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description=( "Include the volume type's extra_specs attribute in the volume " "type list or show requests. The ability to make these calls " "is governed by other policies."), operations=[ { 'method': 'GET', 'path': '/types/{type_id}' }, { 'method': 'GET', 'path': '/types' } ], deprecated_rule=deprecated_extra_spec_policy, ), policy.DocumentedRuleDefault( name=QOS_POLICY, check_str=base.RULE_ADMIN_API, description=( "Include the volume type's QoS specifications ID attribute in " "the volume type list or show requests. The ability to make " "these calls is governed by other policies."), operations=[ { 'method': 'GET', 'path': '/types/{type_id}' }, { 'method': 'GET', 'path': '/types' } ]), # TODO: remove in Yoga policy.RuleDefault( name=ENCRYPTION_POLICY, check_str=base.RULE_ADMIN_API, description=('DEPRECATED: This rule will be removed in the Yoga ' 'release.') ), policy.DocumentedRuleDefault( name=CREATE_ENCRYPTION_POLICY, check_str=base.RULE_ADMIN_API, description="Create volume type encryption.", operations=[ { 'method': 'POST', 'path': '/types/{type_id}/encryption' } ], deprecated_rule=deprecated_encryption_create_policy, ), policy.DocumentedRuleDefault( name=GET_ENCRYPTION_POLICY, check_str=base.RULE_ADMIN_API, description="Show a volume type's encryption type, " "show an encryption specs item.", operations=[ { 'method': 'GET', 'path': '/types/{type_id}/encryption' }, { 'method': 'GET', 'path': '/types/{type_id}/encryption/{key}' } ], deprecated_rule=deprecated_encryption_get_policy, ), policy.DocumentedRuleDefault( name=UPDATE_ENCRYPTION_POLICY, check_str=base.RULE_ADMIN_API, description="Update volume type encryption.", operations=[ { 'method': 'PUT', 'path': '/types/{type_id}/encryption/{encryption_id}' } ], deprecated_rule=deprecated_encryption_update_policy, ), policy.DocumentedRuleDefault( name=DELETE_ENCRYPTION_POLICY, check_str=base.RULE_ADMIN_API, description="Delete volume type encryption.", operations=[ { 'method': 'DELETE', 'path': '/types/{type_id}/encryption/{encryption_id}' } ], deprecated_rule=deprecated_encryption_delete_policy, ), ] def list_rules(): return volume_type_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/volumes.py0000664000175000017500000001676000000000000020220 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base CREATE_POLICY = "volume:create" CREATE_FROM_IMAGE_POLICY = "volume:create_from_image" GET_POLICY = "volume:get" GET_ALL_POLICY = "volume:get_all" UPDATE_POLICY = "volume:update" DELETE_POLICY = "volume:delete" FORCE_DELETE_POLICY = "volume:force_delete" HOST_ATTRIBUTE_POLICY = "volume_extension:volume_host_attribute" TENANT_ATTRIBUTE_POLICY = "volume_extension:volume_tenant_attribute" MIG_ATTRIBUTE_POLICY = "volume_extension:volume_mig_status_attribute" ENCRYPTION_METADATA_POLICY = "volume_extension:volume_encryption_metadata" MULTIATTACH_POLICY = "volume:multiattach" deprecated_create_volume = base.CinderDeprecatedRule( name=CREATE_POLICY, check_str="" ) deprecated_create_volume_from_image = base.CinderDeprecatedRule( name=CREATE_FROM_IMAGE_POLICY, check_str="" ) deprecated_get_volume = base.CinderDeprecatedRule( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_get_all_volumes = base.CinderDeprecatedRule( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_update_volume = base.CinderDeprecatedRule( name=UPDATE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_delete_volume = base.CinderDeprecatedRule( name=DELETE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_get_tenant_attributes = base.CinderDeprecatedRule( name=TENANT_ATTRIBUTE_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_get_encryption_metadata = base.CinderDeprecatedRule( name=ENCRYPTION_METADATA_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) deprecated_create_multiattach_volume = base.CinderDeprecatedRule( name=MULTIATTACH_POLICY, check_str=base.RULE_ADMIN_OR_OWNER ) volumes_policies = [ policy.DocumentedRuleDefault( name=CREATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Create volume.", operations=[ { 'method': 'POST', 'path': '/volumes' } ], deprecated_rule=deprecated_create_volume ), policy.DocumentedRuleDefault( name=CREATE_FROM_IMAGE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Create volume from image.", operations=[ { 'method': 'POST', 'path': '/volumes' } ], deprecated_rule=deprecated_create_volume_from_image ), policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show volume.", operations=[ { 'method': 'GET', 'path': '/volumes/{volume_id}' } ], deprecated_rule=deprecated_get_volume ), policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List volumes or get summary of volumes.", operations=[ { 'method': 'GET', 'path': '/volumes' }, { 'method': 'GET', 'path': '/volumes/detail' }, { 'method': 'GET', 'path': '/volumes/summary' } ], deprecated_rule=deprecated_get_all_volumes ), policy.DocumentedRuleDefault( name=UPDATE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Update volume or update a volume's bootable status.", operations=[ { 'method': 'PUT', 'path': '/volumes' }, # The API below calls the volume update API internally, which in # turn enforces the update policy. { 'method': 'POST', 'path': '/volumes/{volume_id}/action (os-set_bootable)' } ], deprecated_rule=deprecated_update_volume ), policy.DocumentedRuleDefault( name=DELETE_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Delete volume.", operations=[ { 'method': 'DELETE', 'path': '/volumes/{volume_id}' } ], deprecated_rule=deprecated_delete_volume ), policy.DocumentedRuleDefault( name=FORCE_DELETE_POLICY, check_str=base.RULE_ADMIN_API, description="Force Delete a volume.", operations=[ { 'method': 'DELETE', 'path': '/volumes/{volume_id}' } ], ), policy.DocumentedRuleDefault( name=HOST_ATTRIBUTE_POLICY, check_str=base.RULE_ADMIN_API, description="List or show volume with host attribute.", operations=[ { 'method': 'GET', 'path': '/volumes/{volume_id}' }, { 'method': 'GET', 'path': '/volumes/detail' } ]), policy.DocumentedRuleDefault( name=TENANT_ATTRIBUTE_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="List or show volume with tenant attribute.", operations=[ { 'method': 'GET', 'path': '/volumes/{volume_id}' }, { 'method': 'GET', 'path': '/volumes/detail' } ], deprecated_rule=deprecated_get_tenant_attributes ), policy.DocumentedRuleDefault( name=MIG_ATTRIBUTE_POLICY, check_str=base.RULE_ADMIN_API, description="List or show volume with migration status attribute.", operations=[ { 'method': 'GET', 'path': '/volumes/{volume_id}' }, { 'method': 'GET', 'path': '/volumes/detail' } ]), policy.DocumentedRuleDefault( name=ENCRYPTION_METADATA_POLICY, check_str=base.SYSTEM_READER_OR_PROJECT_READER, description="Show volume's encryption metadata.", operations=[ { 'method': 'GET', 'path': '/volumes/{volume_id}/encryption' }, { 'method': 'GET', 'path': '/volumes/{volume_id}/encryption/{encryption_key}' } ], deprecated_rule=deprecated_get_encryption_metadata ), policy.DocumentedRuleDefault( name=MULTIATTACH_POLICY, check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER, description="Create multiattach capable volume.", operations=[ { 'method': 'POST', 'path': '/volumes' } ], deprecated_rule=deprecated_create_multiattach_volume ), ] def list_rules(): return volumes_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policies/workers.py0000664000175000017500000000211400000000000020206 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from cinder.policies import base CLEAN_POLICY = 'workers:cleanup' workers_policies = [ policy.DocumentedRuleDefault( name=CLEAN_POLICY, check_str=base.RULE_ADMIN_API, description="Clean up workers.", operations=[ { 'method': 'POST', 'path': '/workers/cleanup' } ]) ] def list_rules(): return workers_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/policy.py0000664000175000017500000001632500000000000016213 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Cinder""" import sys import typing from typing import Optional, Type from oslo_config import cfg from oslo_log import log as logging from oslo_policy import opts as policy_opts from oslo_policy import policy from oslo_utils import excutils from cinder import context from cinder import exception from cinder import policies CONF = cfg.CONF LOG = logging.getLogger(__name__) # TODO(gmann): Remove overriding the default value of config options: # - 'enforce_scope', and 'enforce_new_defaults' once cinder is ready with the # new RBAC (oslo_policy enable them by default) DEFAULT_POLICY_FILENAME = 'policy.yaml' policy_opts.set_defaults( cfg.CONF, enforce_scope=False, enforce_new_defaults=False) _ENFORCER: Optional[policy.Enforcer] = None def reset() -> None: global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def init(use_conf=True, suppress_deprecation_warnings: bool = False) -> None: """Init an Enforcer class. :param use_conf: Whether to load rules from config file. """ global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer( CONF, use_conf=use_conf, fallback_to_json_file=False) _ENFORCER.suppress_deprecation_warnings = suppress_deprecation_warnings register_rules(_ENFORCER) _ENFORCER.load_rules() _ENFORCER = typing.cast(policy.Enforcer, _ENFORCER) def enforce(context, action: str, target: dict): """Verifies that the action is valid on the target in this context. :param context: cinder context :param action: string representing the action to be checked this should be colon separated for clarity. i.e. ``compute:create_instance``, ``compute:attach_volume``, ``volume:attach_volume`` :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :raises PolicyNotAuthorized: if verification fails. """ init() assert _ENFORCER is not None try: return _ENFORCER.enforce(action, target, context, do_raise=True, exc=exception.PolicyNotAuthorized, action=action) except policy.InvalidScope: raise exception.PolicyNotAuthorized(action=action) def set_rules(rules: dict, overwrite: bool = True, use_conf: bool = False) -> None: """Set rules based on the provided dict of rules. :param rules: New rules to use. It should be an instance of dict. :param overwrite: Whether to overwrite current rules or update them with the new rules. :param use_conf: Whether to reload rules from config file. """ init(use_conf=False) assert _ENFORCER is not None _ENFORCER.set_rules(rules, overwrite, use_conf) def get_rules(): if _ENFORCER: return _ENFORCER.rules def register_rules(enforcer): enforcer.register_defaults(policies.list_rules()) def get_enforcer() -> policy.Enforcer: # This method is for use by oslopolicy CLI scripts. Those scripts need the # 'output-file' and 'namespace' options, but having those in sys.argv means # loading the Cinder config options will fail as those are not expected to # be present. So we pass in an arg list with those stripped out. conf_args = [] # Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:] i = 1 while i < len(sys.argv): if sys.argv[i].strip('-') in ['namespace', 'output-file']: i += 2 continue conf_args.append(sys.argv[i]) i += 1 cfg.CONF(conf_args, project='cinder') init() assert _ENFORCER is not None return _ENFORCER def authorize(context, action: str, target: dict, do_raise: bool = True, exc: Optional[Type[Exception]] = None): """Verifies that the action is valid on the target in this context. :param context: cinder context :param action: string representing the action to be checked this should be colon separated for clarity. i.e. ``compute:create_instance``, ``compute:attach_volume``, ``volume:attach_volume`` :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. ``{'project_id': context.project_id}`` :param do_raise: if True (the default), raises PolicyNotAuthorized; if False, returns False :param exc: Class of the exception to raise if the check fails. Any remaining arguments passed to :meth:`authorize` (both positional and keyword arguments) will be passed to the exception class. If not specified, :class:`PolicyNotAuthorized` will be used. :raises cinder.exception.PolicyNotAuthorized: if verification fails and do_raise is True. Or if 'exc' is specified it will raise an exception of that type. :return: returns a non-False value (not necessarily "True") if authorized, and the exact value False if not authorized and do_raise is False. """ init() assert _ENFORCER is not None if not exc: exc = exception.PolicyNotAuthorized try: result = _ENFORCER.authorize(action, target, context, do_raise=do_raise, exc=exc, action=action) except policy.PolicyNotRegistered: with excutils.save_and_reraise_exception(): LOG.exception('Policy not registered') except Exception: with excutils.save_and_reraise_exception(): LOG.debug('Policy check for %(action)s failed with context ' '%(credentials)s', {'action': action, 'credentials': context.to_policy_values()}) return result def check_is_admin(context: 'context.RequestContext'): """Whether or not user is admin according to policy setting.""" init() assert _ENFORCER is not None # the target is user-self credentials = context.to_policy_values() target = credentials return _ENFORCER.authorize('context_is_admin', target, credentials) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0991185 cinder-27.0.0/cinder/privsep/0000775000175000017500000000000000000000000016023 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/privsep/__init__.py0000664000175000017500000000227200000000000020137 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc # Copyright 2017 Rackspace Australia # Copyright 2018 Michael Still and Aptira # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Setup privsep decorator.""" from oslo_privsep import capabilities from oslo_privsep import priv_context sys_admin_pctxt = priv_context.PrivContext( 'cinder', cfg_section='cinder_sys_admin', pypath=__name__ + '.sys_admin_pctxt', capabilities=[capabilities.CAP_CHOWN, capabilities.CAP_DAC_OVERRIDE, capabilities.CAP_DAC_READ_SEARCH, capabilities.CAP_FOWNER, capabilities.CAP_NET_ADMIN, capabilities.CAP_SYS_ADMIN], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/privsep/cgroup.py0000664000175000017500000000342700000000000017702 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc # Copyright 2017 Rackspace Australia # Copyright 2018 Michael Still and Aptira # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for cgroup related routines. """ import os.path from oslo_concurrency import processutils import cinder.privsep @cinder.privsep.sys_admin_pctxt.entrypoint def cgroup_create(name): # If this path exists, it means we have support for cgroups v2 if os.path.isfile('/sys/fs/cgroup/cgroup.controllers'): # cgroups v2 doesn't support io, but blkio instead. processutils.execute('cgcreate', '-g', 'io:%s' % name) else: processutils.execute('cgcreate', '-g', 'blkio:%s' % name) @cinder.privsep.sys_admin_pctxt.entrypoint def cgroup_limit(name, rw, dev, bps): if os.path.isfile('/sys/fs/cgroup/cgroup.controllers'): if rw == 'read': cgset_arg = 'rbps' else: cgset_arg = 'wbps' processutils.execute('cgset', '-r', 'io.max=%s %s=%s' % (dev, cgset_arg, bps), name) else: processutils.execute('cgset', '-r', 'blkio.throttle.%s_bps_device=%s %d' % (rw, dev, bps), name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/privsep/format_inspector.py0000664000175000017500000000255500000000000021762 0ustar00zuulzuul00000000000000# Copyright 2024 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for the image format_inspector. """ from cinder.image import format_inspector import cinder.privsep @cinder.privsep.sys_admin_pctxt.entrypoint def get_format_if_safe(path, allow_qcow2_backing_file): """Returns a str format name if the format is safe, otherwise None""" return _get_format_if_safe(path, allow_qcow2_backing_file) def _get_format_if_safe(path, allow_qcow2_backing_file): """Returns a str format name if the format is safe, otherwise None""" inspector = format_inspector.detect_file_format(path) format_name = str(inspector) safe = inspector.safety_check() if not safe and format_name == 'qcow2' and allow_qcow2_backing_file: safe = inspector.safety_check_allow_backing_file() if safe: return format_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/privsep/fs.py0000664000175000017500000000166500000000000017015 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc # Copyright 2017 Rackspace Australia # Copyright 2018 Michael Still and Aptira # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for filesystem related routines. """ from oslo_concurrency import processutils import cinder.privsep @cinder.privsep.sys_admin_pctxt.entrypoint def umount(mountpoint): processutils.execute('umount', mountpoint, attempts=1, delay_on_retry=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/privsep/lvm.py0000664000175000017500000000245400000000000017200 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for lvm related routines """ from oslo_concurrency import processutils import cinder.privsep @cinder.privsep.sys_admin_pctxt.entrypoint def udevadm_settle(): processutils.execute('udevadm', 'settle') @cinder.privsep.sys_admin_pctxt.entrypoint def lvrename(vg_name, lv_name, new_name): processutils.execute( 'lvrename', vg_name, lv_name, new_name) @cinder.privsep.sys_admin_pctxt.entrypoint def create_vg(vg_name, pv_list): cmd = ['vgcreate', vg_name, ','.join(pv_list)] processutils.execute(*cmd) @cinder.privsep.sys_admin_pctxt.entrypoint def lvconvert(vg_name, snapshot_name): processutils.execute( 'lvconvert', '--merge', '%s/%s' % (vg_name, snapshot_name)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/privsep/path.py0000664000175000017500000000215300000000000017332 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc # Copyright 2017 Rackspace Australia # Copyright 2018 Michael Still and Aptira # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for path related routines. """ import os from cinder import exception import cinder.privsep @cinder.privsep.sys_admin_pctxt.entrypoint def touch(path): if os.path.exists(path): os.utime(path, None) else: open(path, 'a').close() @cinder.privsep.sys_admin_pctxt.entrypoint def symlink(src, dest): if not os.path.exists(src): raise exception.FileNotFound(file_path=src) os.symlink(src, dest) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.0991185 cinder-27.0.0/cinder/privsep/targets/0000775000175000017500000000000000000000000017474 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/privsep/targets/__init__.py0000664000175000017500000000000000000000000021573 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/privsep/targets/nvmet.py0000664000175000017500000001762500000000000021212 0ustar00zuulzuul00000000000000# Copyright 2022 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NVMet Python Interface using privsep This file adds the privsep support to the nvmet package so it can be easily consumed by Cinder nvmet target. It also: - Adds some methods to the Root class to be able to get a specific subsystem or port directly without having to go through all the existing ones. - Presents the CFSNotFound exception as a NotFound exception which is easier to consume. """ import os import nvmet from oslo_log import log as logging from cinder import exception from cinder import privsep LOG = logging.getLogger(__name__) ################### # Helper methods to serialize/deserialize parameters to be sent through privsep # and to do the instance/class calls on the privsep side. def serialize(instance): """Serialize parameters, specially nvmet instances. The idea is to be able to pass an nvmet instance to privsep methods, since they are sometimes required as parameters (ie: port.setup) and also to pass the instance where do_privsep_call has to call a specific method. Instances are passed as a tuple, with the name of the class as the first element, and in the second element the kwargs necessary to instantiate the instance of that class. To differentiate nvmet instances from tuples there is a 'tuple' value that can be passed in the first element of the tuple to differentiate them. All other instances as passed unaltered. """ if isinstance(instance, nvmet.Root): return ('Root', {}) if isinstance(instance, (nvmet.Subsystem, nvmet.Host)): return (type(instance).__name__, {'nqn': instance.nqn, 'mode': 'lookup'}) if isinstance(instance, nvmet.Namespace): return ('Namespace', {'nsid': instance.nsid, 'subsystem': serialize(instance.subsystem), 'mode': 'lookup'}) if isinstance(instance, nvmet.Port): return ('Port', {'portid': instance.portid, 'mode': 'lookup'}) if isinstance(instance, nvmet.Referral): return ('Referral', {'name': instance.name, 'port': serialize(instance.port), 'mode': 'lookup'}) if isinstance(instance, nvmet.ANAGroup): return ('ANAGroup', {'grpid': instance.grpid, 'port': serialize(instance.port), 'mode': 'lookup'}) if isinstance(instance, tuple): return ('tuple', instance) return instance def deserialize(data): """Deserialize an instance, specially nvmet instances. Reverse operation of the serialize method. Converts an nvmet instance serialized in a tuple into an actual nvmet instance. """ if not isinstance(data, tuple): return data cls_name, cls_params = data if cls_name == 'tuple': return cls_params # Parameters for the instantiation of the class can be nvmet objects # themselves. params = {name: deserialize(value) for name, value in cls_params.items()} # We don't want the classes from the nvmet method but ours instead instance = getattr(nvmet, cls_name)(**params) return instance def deserialize_params(args, kwargs): """Deserialize function arguments using deserialize method.""" args = [deserialize(arg) for arg in args] kwargs = {key: deserialize(value) for key, value in kwargs.items()} return args, kwargs def _nvmet_setup_failure(message): """Simple error method to use when calling nvmet setup methods.""" LOG.error(message) raise exception.CinderException(message) @privsep.sys_admin_pctxt.entrypoint def do_privsep_call(instance, method_name, *args, **kwargs): """General privsep method for instance calls. Handle privsep method calls by deserializing the instance where we want to call a given method with the deserialized parameters. """ LOG.debug('Calling %s on %s with %s - %s', method_name, instance, args, kwargs) instance = deserialize(instance) method = getattr(instance, method_name) args, kwargs = deserialize_params(args, kwargs) # NOTE: No returning nvmet objects support. If needed add serialization on # the result and deserialization decorator before the entrypoint. return method(*args, **kwargs) @privsep.sys_admin_pctxt.entrypoint def _privsep_setup(cls_name, *args, **kwargs): """Special privsep method for nvmet setup method calls. The setup method is a special case because it's a class method (which privsep cannot handle) and also requires a function for the error handling. This method accepts a class name and reconstructs it, then calls the class' setup method passing our own error function. """ LOG.debug('Setup %s with %s - %s', cls_name, args, kwargs) cls = getattr(nvmet, cls_name) args, kwargs = deserialize_params(args, kwargs) kwargs['err_func'] = _nvmet_setup_failure return cls.setup(*args, **kwargs) def privsep_setup(cls_name, *args, **kwargs): """Wrapper for _privsep_setup that accepts err_func argument.""" # err_func parameter hardcoded in _privsep_setup as it cannot be serialized if 'err_func' in kwargs: err_func = kwargs.pop('err_func') else: # positional is always last argument of the args tuple err_func = args[-1] args = args[:-1] try: return _privsep_setup(cls_name, *args, **kwargs) except exception.CinderException as exc: if not err_func: raise err_func(exc.msg) ################### # Classes that don't currently have privsep support Host = nvmet.Host Referral = nvmet.Referral ANAGroup = nvmet.ANAGroup ################### # Custom classes that divert privileges calls to privsep # Support in these classes is limited to what's needed by the nvmet target. # Convenience error class link to nvmet's NotFound = nvmet.nvme.CFSNotFound class Namespace(nvmet.Namespace): def __init__(self, subsystem, nsid=None, mode='lookup'): super().__init__(subsystem=subsystem, nsid=nsid, mode=mode) @classmethod def setup(cls, subsys, n, err_func=None): privsep_setup(cls.__name__, serialize(subsys), n, err_func) def delete(self): do_privsep_call(serialize(self), 'delete') class Subsystem(nvmet.Subsystem): def __init__(self, nqn=None, mode='lookup'): super().__init__(nqn=nqn, mode=mode) @classmethod def setup(cls, t, err_func=None): privsep_setup(cls.__name__, t, err_func) def delete(self): do_privsep_call(serialize(self), 'delete') @property def namespaces(self): for d in os.listdir(self.path + '/namespaces/'): yield Namespace(self, os.path.basename(d)) class Port(nvmet.Port): def __init__(self, portid, mode='lookup'): super().__init__(portid=portid, mode=mode) @classmethod def setup(cls, root, n, err_func=None): privsep_setup(cls.__name__, serialize(root), n, err_func) def add_subsystem(self, nqn): do_privsep_call(serialize(self), 'add_subsystem', nqn) def remove_subsystem(self, nqn): do_privsep_call(serialize(self), 'remove_subsystem', nqn) def delete(self): do_privsep_call(serialize(self), 'delete') class Root(nvmet.Root): @property def ports(self): for d in os.listdir(self.path + '/ports/'): yield Port(os.path.basename(d)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/privsep/targets/scst.py0000664000175000017500000000162400000000000021025 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc # Copyright 2017 Rackspace Australia # Copyright 2018 Michael Still and Aptira # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for scst related routines. """ from oslo_concurrency import processutils import cinder.privsep @cinder.privsep.sys_admin_pctxt.entrypoint def run_scstadmin(*args): return processutils.execute('scstadmin', *args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/privsep/targets/tgt.py0000664000175000017500000000336700000000000020655 0ustar00zuulzuul00000000000000# Copyright 2018 Red Hat, Inc # Copyright 2017 Rackspace Australia # Copyright 2018 Michael Still and Aptira # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helpers for iscsi related routines. """ from oslo_concurrency import processutils import cinder.privsep @cinder.privsep.sys_admin_pctxt.entrypoint def tgtadmin_show(): return processutils.execute('tgt-admin', '--show') @cinder.privsep.sys_admin_pctxt.entrypoint def tgtadmin_update(name, force=False): cmd = ['tgt-admin'] cmd.extend(['--update', name]) if force: cmd.extend(['-f']) return processutils.execute(*cmd) @cinder.privsep.sys_admin_pctxt.entrypoint def tgtadmin_delete(iqn, force=False): cmd = ['tgt-admin'] cmd.extend(['--delete', iqn]) if force: cmd.extend(['-f']) processutils.execute(*cmd) @cinder.privsep.sys_admin_pctxt.entrypoint def tgtadm_show(): cmd = ('tgtadm', '--lld', 'iscsi', '--op', 'show', '--mode', 'target') return processutils.execute(*cmd) @cinder.privsep.sys_admin_pctxt.entrypoint def tgtadm_create(tid, path): cmd = ('tgtadm', '--lld', 'iscsi', '--op', 'new', '--mode', 'logicalunit', '--tid', tid, '--lun', '1', '-b', path) return processutils.execute(*cmd) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/quota.py0000664000175000017500000011442200000000000016042 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Quotas for volumes.""" import datetime from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import importutils from oslo_utils import timeutils from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ LOG = logging.getLogger(__name__) quota_opts = [ cfg.IntOpt('quota_volumes', default=10, help='Number of volumes allowed per project'), cfg.IntOpt('quota_snapshots', default=10, help='Number of volume snapshots allowed per project'), cfg.IntOpt('quota_groups', default=10, help='Number of groups allowed per project'), cfg.IntOpt('quota_gigabytes', default=1000, help='Total amount of storage, in gigabytes, allowed ' 'for volumes and snapshots per project'), cfg.IntOpt('quota_backups', default=10, help='Number of volume backups allowed per project'), cfg.IntOpt('quota_backup_gigabytes', default=1000, help='Total amount of storage, in gigabytes, allowed ' 'for backups per project'), cfg.IntOpt('reservation_expire', default=86400, help='Number of seconds until a reservation expires'), cfg.IntOpt('reservation_clean_interval', default='$reservation_expire', help='Interval between periodic task runs to clean expired ' 'reservations in seconds.'), cfg.IntOpt('until_refresh', default=0, help='Count of reservations until usage is refreshed'), cfg.IntOpt('max_age', default=0, help='Number of seconds between subsequent usage refreshes'), cfg.StrOpt('quota_driver', default="cinder.quota.DbQuotaDriver", help='Default driver to use for quota checks'), cfg.BoolOpt('use_default_quota_class', default=True, help='Enables or disables use of default quota class ' 'with default quota.'), cfg.IntOpt('per_volume_size_limit', default=-1, help='Max size allowed per volume, in gigabytes'), ] CONF = cfg.CONF CONF.register_opts(quota_opts) class DbQuotaDriver(object): """Driver to perform check to enforcement of quotas. Also allows to obtain quota information. The default driver utilizes the local database. """ def get_by_project(self, context, project_id, resource_name): """Get a specific quota by project.""" return db.quota_get(context, project_id, resource_name) def get_by_class(self, context, quota_class, resource_name): """Get a specific quota by quota class.""" return db.quota_class_get(context, quota_class, resource_name) def get_default(self, context, resource, project_id): """Get a specific default quota for a resource.""" default_quotas = db.quota_class_get_defaults(context) return default_quotas.get(resource.name, resource.default) def get_defaults(self, context, resources, project_id=None): """Given a list of resources, retrieve the default quotas. Use the class quotas named `_DEFAULT_QUOTA_NAME` as default quotas, if it exists. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The id of the current project """ quotas = {} default_quotas = {} if CONF.use_default_quota_class: default_quotas = db.quota_class_get_defaults(context) for resource in resources.values(): if default_quotas: if resource.name not in default_quotas: versionutils.report_deprecated_feature(LOG, _( "Default quota for resource: %(res)s is set " "by the default quota flag: quota_%(res)s, " "it is now deprecated. Please use the " "default quota class for default " "quota.") % {'res': resource.name}) quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas def get_class_quotas(self, context, resources, quota_class, defaults=True): """Given list of resources, retrieve the quotas for given quota class. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ quotas = {} default_quotas = {} class_quotas = db.quota_class_get_all_by_name(context, quota_class) if defaults: default_quotas = db.quota_class_get_defaults(context) for resource in resources.values(): if resource.name in class_quotas: quotas[resource.name] = class_quotas[resource.name] continue if defaults: quotas[resource.name] = default_quotas.get(resource.name, resource.default) return quotas def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True): """Retrieve quotas for a project. Given a list of resources, retrieve the quotas for the given project. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. It will be ignored if project_id == context.project_id. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ quotas = {} project_quotas = db.quota_get_all_by_project(context, project_id) default_quotas = None if usages: project_usages = db.quota_usage_get_all_by_project(context, project_id) # Get the quotas for the appropriate class. If the project ID # matches the one in the context, we use the quota_class from # the context, otherwise, we use the provided quota_class (if # any) if project_id == context.project_id: quota_class = context.quota_class if quota_class: class_quotas = db.quota_class_get_all_by_name(context, quota_class) else: class_quotas = {} for resource in resources.values(): # Omit default/quota class values if not defaults and resource.name not in project_quotas: continue quota_val = project_quotas.get(resource.name) if quota_val is None: quota_val = class_quotas.get(resource.name) if quota_val is None: # Lazy load the default quotas if default_quotas is None: default_quotas = self.get_defaults( context, resources, project_id) quota_val = default_quotas[resource.name] quotas[resource.name] = {'limit': quota_val} # Include usages if desired. This is optional because one # internal consumer of this interface wants to access the # usages directly from inside a transaction. if usages: usage = project_usages.get(resource.name, {}) quotas[resource.name].update( in_use=usage.get('in_use', 0), reserved=usage.get('reserved', 0), ) return quotas def _get_quotas(self, context, resources, keys, has_sync, project_id=None): """A helper method which retrieves the quotas for specific resources. This specific resource is identified by keys, and which apply to the current context. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param keys: A list of the desired quotas to retrieve. :param has_sync: If True, indicates that the resource must have a sync attribute; if False, indicates that the resource must NOT have a sync attribute. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # Filter resources def sync_filt(x, has_sync): if has_sync: return hasattr(x, 'sync') else: return not hasattr(x, 'sync') desired = set(keys) sub_resources = {k: v for k, v in resources.items() if k in desired and sync_filt(v, has_sync)} # Make sure we accounted for all of them... if len(keys) != len(sub_resources): unknown = desired - set(sub_resources.keys()) raise exception.QuotaResourceUnknown(unknown=sorted(unknown)) # Grab and return the quotas (without usages) quotas = self.get_project_quotas(context, sub_resources, project_id, context.quota_class, usages=False) return {k: v['limit'] for k, v in quotas.items()} def limit_check(self, context, resources, values, project_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # Ensure no value is less than zero unders = [key for key, val in values.items() if val < 0] if unders: raise exception.InvalidQuotaValue(unders=sorted(unders)) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # Get the applicable quotas quotas = self._get_quotas(context, resources, values.keys(), has_sync=False, project_id=project_id) # Check the quotas and construct a list of the resources that # would be put over limit by the desired values overs = [key for key, val in values.items() if quotas[key] >= 0 and quotas[key] < val] if overs: raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages={}) def reserve(self, context, resources, deltas, expire=None, project_id=None): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param resources: A dictionary of the registered resources. :param deltas: A dictionary of the proposed delta changes. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # Set up the reservation expiration if expire is None: expire = CONF.reservation_expire if isinstance(expire, int): expire = datetime.timedelta(seconds=expire) if isinstance(expire, datetime.timedelta): expire = timeutils.utcnow() + expire if not isinstance(expire, datetime.datetime): raise exception.InvalidReservationExpiration(expire=expire) # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id # Get the applicable quotas. # NOTE(Vek): We're not worried about races at this point. # Yes, the admin may be in the process of reducing # quotas, but that's a pretty rare thing. quotas = self._get_quotas(context, resources, deltas.keys(), has_sync=True, project_id=project_id) return self._reserve(context, resources, quotas, deltas, expire, project_id) def _reserve(self, context, resources, quotas, deltas, expire, project_id): # NOTE(Vek): Most of the work here has to be done in the DB # API, because we have to do it in a transaction, # which means access to the session. Since the # session isn't available outside the DBAPI, we # have to do the work there. return db.quota_reserve(context, resources, quotas, deltas, expire, CONF.until_refresh, CONF.max_age, project_id=project_id) def commit(self, context, reservations, project_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id db.reservation_commit(context, reservations, project_id=project_id) def rollback(self, context, reservations, project_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ # If project_id is None, then we use the project_id in context if project_id is None: project_id = context.project_id db.reservation_rollback(context, reservations, project_id=project_id) def destroy_by_project(self, context, project_id): """Destroy all limit quotas associated with a project. Leave usage and reservation quotas intact. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ db.quota_destroy_by_project(context, project_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ db.reservation_expire(context) class BaseResource(object): """Describe a single resource for quota checking.""" def __init__(self, name, flag=None, parent_project_id=None): """Initializes a Resource. :param name: The name of the resource, i.e., "volumes". :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. :param parent_project_id: The id of the current project's parent, if any. """ self.name = name self.flag = flag self.parent_project_id = parent_project_id def quota(self, driver, context, **kwargs): """Given a driver and context, obtain the quota for this resource. :param driver: A quota driver. :param context: The request context. :param project_id: The project to obtain the quota value for. If not provided, it is taken from the context. If it is given as None, no project-specific quota will be searched for. :param quota_class: The quota class corresponding to the project, or for which the quota is to be looked up. If not provided, it is taken from the context. If it is given as None, no quota class-specific quota will be searched for. Note that the quota class defaults to the value in the context, which may not correspond to the project if project_id is not the same as the one in the context. """ # Get the project ID project_id = kwargs.get('project_id', context.project_id) # Ditto for the quota class quota_class = kwargs.get('quota_class', context.quota_class) # Look up the quota for the project if project_id: try: return driver.get_by_project(context, project_id, self.name) except exception.ProjectQuotaNotFound: pass # Try for the quota class if quota_class: try: return driver.get_by_class(context, quota_class, self.name) except exception.QuotaClassNotFound: pass # OK, return the default return driver.get_default(context, self, parent_project_id=self.parent_project_id) @property def default(self): """Return the default value of the quota.""" if self.parent_project_id: return 0 return CONF[self.flag] if self.flag else -1 class ReservableResource(BaseResource): """Describe a reservable resource.""" def __init__(self, name, sync, flag=None): """Initializes a ReservableResource. Reservable resources are those resources which directly correspond to objects in the database, i.e., volumes, gigabytes, etc. A ReservableResource must be constructed with a usage synchronization function, which will be called to determine the current counts of one or more resources. The usage synchronization function will be passed three arguments: an admin context, the project ID, and an opaque session object, which should in turn be passed to the underlying database function. Synchronization functions should return a dictionary mapping resource names to the current in_use count for those resources; more than one resource and resource count may be returned. Note that synchronization functions may be associated with more than one ReservableResource. :param name: The name of the resource, i.e., "volumes". :param sync: A dbapi methods name which returns a dictionary to resynchronize the in_use count for one or more resources, as described above. :param flag: The name of the flag or configuration option which specifies the default value of the quota for this resource. """ super(ReservableResource, self).__init__(name, flag=flag) if sync: self.sync = sync class VolumeTypeResource(ReservableResource): """ReservableResource for a specific volume type.""" def __init__(self, part_name, volume_type): """Initializes a VolumeTypeResource. :param part_name: The kind of resource, i.e., "volumes". :param volume_type: The volume type for this resource. """ self.volume_type_name = volume_type['name'] self.volume_type_id = volume_type['id'] name = "%s_%s" % (part_name, self.volume_type_name) super(VolumeTypeResource, self).__init__(name, "_sync_%s" % part_name) class QuotaEngine(object): """Represent the set of recognized quotas.""" def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" self._resources = {} self._quota_driver_class = quota_driver_class self._driver_class = None @property def _driver(self): # Lazy load the driver so we give a chance for the config file to # be read before grabbing the config for which QuotaDriver to use if self._driver_class: return self._driver_class if not self._quota_driver_class: # Grab the current driver class from CONF self._quota_driver_class = CONF.quota_driver if isinstance(self._quota_driver_class, str): self._quota_driver_class = importutils.import_object( self._quota_driver_class) self._driver_class = self._quota_driver_class return self._driver_class def __contains__(self, resource): return resource in self.resources def register_resource(self, resource): """Register a resource.""" self._resources[resource.name] = resource def register_resources(self, resources): """Register a list of resources.""" for resource in resources: self.register_resource(resource) def get_by_project(self, context, project_id, resource_name): """Get a specific quota by project.""" return self._driver.get_by_project(context, project_id, resource_name) def get_by_project_or_default(self, context, project_id, resource_name): """Get specific quota by project or default quota if doesn't exists.""" try: val = self.get_by_project( context, project_id, resource_name).hard_limit except exception.ProjectQuotaNotFound: val = self.get_defaults(context, project_id)[resource_name] return val def get_by_class(self, context, quota_class, resource_name): """Get a specific quota by quota class.""" return self._driver.get_by_class(context, quota_class, resource_name) def get_default(self, context, resource, parent_project_id=None): """Get a specific default quota for a resource. :param parent_project_id: The id of the current project's parent, if any. """ return self._driver.get_default(context, resource, parent_project_id=parent_project_id) def get_defaults(self, context, project_id=None): """Retrieve the default quotas. :param context: The request context, for access checks. :param project_id: The id of the current project """ return self._driver.get_defaults(context, self.resources, project_id) def get_class_quotas(self, context, quota_class, defaults=True): """Retrieve the quotas for the given quota class. :param context: The request context, for access checks. :param quota_class: The name of the quota class to return quotas for. :param defaults: If True, the default value will be reported if there is no specific value for the resource. """ return self._driver.get_class_quotas(context, self.resources, quota_class, defaults=defaults) def get_project_quotas(self, context, project_id, quota_class=None, defaults=True, usages=True): """Retrieve the quotas for the given project. :param context: The request context, for access checks. :param project_id: The ID of the project to return quotas for. :param quota_class: If project_id != context.project_id, the quota class cannot be determined. This parameter allows it to be specified. :param defaults: If True, the quota class value (or the default value, if there is no value from the quota class) will be reported if there is no specific value for the resource. :param usages: If True, the current in_use and reserved counts will also be returned. """ return self._driver.get_project_quotas(context, self.resources, project_id, quota_class=quota_class, defaults=defaults, usages=usages) def limit_check(self, context, project_id=None, **values): """Check simple quota limits. For limits--those quotas for which there is no usage synchronization function--this method checks that a set of proposed values are permitted by the limit restriction. The values to check are given as keyword arguments, where the key identifies the specific quota limit to check, and the value is the proposed value. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it is not a simple limit resource. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns nothing. :param context: The request context, for access checks. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ return self._driver.limit_check(context, self.resources, values, project_id=project_id) def reserve(self, context, expire=None, project_id=None, **deltas): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage synchronization function--this method checks quotas against current usage and the desired deltas. The deltas are given as keyword arguments, and current usage and other reservations are factored into the quota check. This method will raise a QuotaResourceUnknown exception if a given resource is unknown or if it does not have a usage synchronization function. If any of the proposed values is over the defined quota, an OverQuota exception will be raised with the sorted list of the resources which are too high. Otherwise, the method returns a list of reservation UUIDs which were created. :param context: The request context, for access checks. :param expire: An optional parameter specifying an expiration time for the reservations. If it is a simple number, it is interpreted as a number of seconds and added to the current time; if it is a datetime.timedelta object, it will also be added to the current time. A datetime.datetime object will be interpreted as the absolute expiration time. If None is specified, the default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ reservations = self._driver.reserve(context, self.resources, deltas, expire=expire, project_id=project_id) LOG.debug("Created reservations %s", reservations) return reservations def commit(self, context, reservations, project_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.commit(context, reservations, project_id=project_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception("Failed to commit reservations %s", reservations) def rollback(self, context, reservations, project_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. :param project_id: Specify the project_id if current context is admin and admin wants to impact on common user's tenant. """ try: self._driver.rollback(context, reservations, project_id=project_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception("Failed to roll back reservations %s", reservations) def destroy_by_project(self, context, project_id): """Destroy all quota limits associated with a project. :param context: The request context, for access checks. :param project_id: The ID of the project being deleted. """ self._driver.destroy_by_project(context, project_id) def expire(self, context): """Expire reservations. Explores all currently existing reservations and rolls back any that have expired. :param context: The request context, for access checks. """ self._driver.expire(context) def add_volume_type_opts(self, context, opts, volume_type_id): """Add volume type resource options. Adds elements to the opts hash for volume type quotas. If a resource is being reserved ('gigabytes', etc) and the volume type is set up for its own quotas, these reservations are copied into keys for 'gigabytes_', etc. :param context: The request context, for access checks. :param opts: The reservations options hash. :param volume_type_id: The volume type id for this reservation. """ if not volume_type_id: return # NOTE(jdg): set inactive to True in volume_type_get, as we # may be operating on a volume that was created with a type # that has since been deleted. volume_type = db.volume_type_get(context, volume_type_id, True) for quota in ('volumes', 'gigabytes', 'snapshots'): if quota in opts: vtype_quota = "%s_%s" % (quota, volume_type['name']) opts[vtype_quota] = opts[quota] @property def resource_names(self): return sorted(self.resources.keys()) @property def resources(self): return self._resources class VolumeTypeQuotaEngine(QuotaEngine): """Represent the set of all quotas.""" @property def resources(self): """Fetches all possible quota resources.""" result = {} # Global quotas. argses = [('volumes', '_sync_volumes', 'quota_volumes'), ('per_volume_gigabytes', None, 'per_volume_size_limit'), ('snapshots', '_sync_snapshots', 'quota_snapshots'), ('gigabytes', '_sync_gigabytes', 'quota_gigabytes'), ('backups', '_sync_backups', 'quota_backups'), ('backup_gigabytes', '_sync_backup_gigabytes', 'quota_backup_gigabytes')] for args in argses: resource = ReservableResource(*args) result[resource.name] = resource # Volume type quotas. volume_types = db.volume_type_get_all(context.get_admin_context(), False) for volume_type in volume_types.values(): for part_name in ('volumes', 'gigabytes', 'snapshots'): resource = VolumeTypeResource(part_name, volume_type) result[resource.name] = resource return result def register_resource(self, resource): raise NotImplementedError(_("Cannot register resource")) def register_resources(self, resources): raise NotImplementedError(_("Cannot register resources")) def update_quota_resource(self, context, old_type_name, new_type_name): """Update resource in quota. This is to update resource in quotas, quota_classes, and quota_usages once the name of a volume type is changed. :param context: The request context, for access checks. :param old_type_name: old name of volume type. :param new_type_name: new name of volume type. """ for quota in ('volumes', 'gigabytes', 'snapshots'): old_res = "%s_%s" % (quota, old_type_name) new_res = "%s_%s" % (quota, new_type_name) db.quota_usage_update_resource(context, old_res, new_res) db.quota_class_update_resource(context, old_res, new_res) db.quota_update_resource(context, old_res, new_res) class GroupQuotaEngine(QuotaEngine): """Represent the group quotas.""" @property def resources(self): """Fetches all possible quota resources.""" result = {} # Global quotas. argses = [('groups', '_sync_groups', 'quota_groups'), ] for args in argses: resource = ReservableResource(*args) result[resource.name] = resource return result def register_resource(self, resource): raise NotImplementedError(_("Cannot register resource")) def register_resources(self, resources): raise NotImplementedError(_("Cannot register resources")) QUOTAS = VolumeTypeQuotaEngine() GROUP_QUOTAS = GroupQuotaEngine() NON_QUOTA_KEYS = ['tenant_id', 'id'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/quota_utils.py0000664000175000017500000001354200000000000017263 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from cinder import exception CONF = cfg.CONF CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token.__init__') LOG = logging.getLogger(__name__) def get_volume_type_reservation(ctxt, volume, type_id, reserve_vol_type_only=False, negative=False): from cinder import quota QUOTAS = quota.QUOTAS # Reserve quotas for the given volume type try: reserve_opts = {'volumes': 1, 'gigabytes': volume['size']} # When retyping a volume it may contain snapshots (if we are not # migrating it) and we need to account for its snapshots' size if volume.snapshots: reserve_opts['snapshots'] = len(volume.snapshots) if not CONF.no_snapshot_gb_quota: reserve_opts['gigabytes'] += sum(snap.volume_size for snap in volume.snapshots) QUOTAS.add_volume_type_opts(ctxt, reserve_opts, type_id) # If reserve_vol_type_only is True, just reserve volume_type quota, # not volume quota. if reserve_vol_type_only: reserve_opts.pop('volumes') reserve_opts.pop('gigabytes') reserve_opts.pop('snapshots', None) if negative: for key, value in reserve_opts.items(): reserve_opts[key] = -value # Note that usually the project_id on the volume will be the same as # the project_id in the context. But, if they are different then the # reservations must be recorded against the project_id that owns the # volume. project_id = volume['project_id'] reservations = QUOTAS.reserve(ctxt, project_id=project_id, **reserve_opts) except exception.OverQuota as e: process_reserve_over_quota(ctxt, e, resource='volumes', size=volume.size) return reservations def _filter_domain_id_from_parents(domain_id, tree): """Removes the domain_id from the tree if present""" new_tree = None if tree: parent, children = next(iter(tree.items())) # Don't add the domain id to the parents hierarchy if parent != domain_id: new_tree = {parent: _filter_domain_id_from_parents(domain_id, children)} return new_tree OVER_QUOTA_RESOURCE_EXCEPTIONS = {'snapshots': exception.SnapshotLimitExceeded, 'backups': exception.BackupLimitExceeded, 'volumes': exception.VolumeLimitExceeded, 'groups': exception.GroupLimitExceeded} def process_reserve_over_quota(context, over_quota_exception, resource, size=None): """Handle OverQuota exception. Analyze OverQuota exception, and raise new exception related to resource type. If there are unexpected items in overs, UnexpectedOverQuota is raised. :param context: security context :param over_quota_exception: OverQuota exception :param resource: can be backups, snapshots, and volumes :param size: requested size in reservation """ def _consumed(name): return (usages[name]['reserved'] + usages[name]['in_use']) overs = over_quota_exception.kwargs['overs'] usages = over_quota_exception.kwargs['usages'] quotas = over_quota_exception.kwargs['quotas'] invalid_overs = [] for over in overs: if 'gigabytes' in over: msg = ("Quota exceeded for %(s_pid)s, tried to create " "%(s_size)dG %(s_resource)s (%(d_consumed)dG of " "%(d_quota)dG already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 's_size': size, 's_resource': resource[:-1], 'd_consumed': _consumed(over), 'd_quota': quotas[over]}) if resource == 'backups': exc = exception.VolumeBackupSizeExceedsAvailableQuota else: exc = exception.VolumeSizeExceedsAvailableQuota raise exc( name=over, requested=size, consumed=_consumed(over), quota=quotas[over]) if (resource in OVER_QUOTA_RESOURCE_EXCEPTIONS.keys() and resource in over): msg = ("Quota exceeded for %(s_pid)s, tried to create " "%(s_resource)s (%(d_consumed)d %(s_resource)ss " "already consumed).") LOG.warning(msg, {'s_pid': context.project_id, 'd_consumed': _consumed(over), 's_resource': resource[:-1]}) raise OVER_QUOTA_RESOURCE_EXCEPTIONS[resource]( allowed=quotas[over], name=over) invalid_overs.append(over) if invalid_overs: raise exception.UnexpectedOverQuota(name=', '.join(invalid_overs)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/rpc.py0000664000175000017500000002240200000000000015471 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', ] import functools from typing import Union from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_utils import importutils profiler = importutils.try_import('osprofiler.profiler') import cinder.context import cinder.exception from cinder.i18n import _ from cinder import objects from cinder.objects import base from cinder import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) TRANSPORT = None NOTIFICATION_TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ cinder.exception.__name__, ] EXTRA_EXMODS = [] def init(conf) -> None: global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_rpc_transport(conf, allowed_remote_exmods=exmods) NOTIFICATION_TRANSPORT = messaging.get_notification_transport( conf, allowed_remote_exmods=exmods) # get_notification_transport has loaded oslo_messaging_notifications config # group, so we can now check if notifications are actually enabled. if utils.notifications_enabled(conf): json_serializer = messaging.JsonPayloadSerializer() serializer = RequestContextSerializer(json_serializer) NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) else: NOTIFIER = utils.DO_NOTHING def initialized() -> bool: return None not in [TRANSPORT, NOTIFIER] def cleanup(): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER if NOTIFIER is None: LOG.exception("RPC cleanup: NOTIFIER is None") TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup() TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): _context = context.to_dict() if profiler is not None: prof = profiler.get() if prof: trace_info = { "hmac_key": prof.hmac_key, "base_id": prof.get_base_id(), "parent_id": prof.get_id() } _context.update({"trace_info": trace_info}) return _context def deserialize_context(self, context): trace_info = context.pop("trace_info", None) if trace_info: if profiler is not None: profiler.init(**trace_info) return cinder.context.RequestContext.from_dict(context) def get_client(target, version_cap=None, serializer=None) -> messaging.RPCClient: if TRANSPORT is None: raise AssertionError('RPC transport is not initialized.') serializer = RequestContextSerializer(serializer) return messaging.get_rpc_client( TRANSPORT, target, version_cap=version_cap, serializer=serializer) def get_server(target, endpoints, serializer=None) -> messaging.rpc.server.RPCServer: if TRANSPORT is None: raise AssertionError('RPC transport is not initialized.') serializer = RequestContextSerializer(serializer) access_policy = dispatcher.DefaultRPCAccessPolicy return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor='eventlet', serializer=serializer, access_policy=access_policy) @utils.if_notifications_enabled def get_notifier(service: str = None, host: str = None, publisher_id: str = None) -> messaging.Notifier: if NOTIFIER is None: raise AssertionError('RPC Notifier is not initialized.') if not publisher_id: publisher_id = "%s.%s" % (service, host or CONF.host) return NOTIFIER.prepare(publisher_id=publisher_id) def assert_min_rpc_version(min_ver, exc=None): """Decorator to block RPC calls when version cap is lower than min_ver.""" if exc is None: exc = cinder.exception.ServiceTooOld def decorator(f): @functools.wraps(f) def _wrapper(self, *args, **kwargs): if not self.client.can_send_version(min_ver): msg = _('One of %(binary)s services is too old to accept ' '%(method)s request. Required RPC API version is ' '%(version)s. Are you running mixed versions of ' '%(binary)ss?') % {'binary': self.BINARY, 'version': min_ver, 'method': f.__name__} raise exc(msg) return f(self, *args, **kwargs) return _wrapper return decorator LAST_RPC_VERSIONS = {} LAST_OBJ_VERSIONS = {} class RPCAPI(object): """Mixin class aggregating methods related to RPC API compatibility.""" RPC_API_VERSION = '1.0' RPC_DEFAULT_VERSION = '1.0' TOPIC = '' BINARY = '' def __init__(self): target = messaging.Target(topic=self.TOPIC, version=self.RPC_API_VERSION) obj_version_cap = self.determine_obj_version_cap() serializer = base.CinderObjectSerializer(obj_version_cap) rpc_version_cap = self.determine_rpc_version_cap() self.client = get_client(target, version_cap=rpc_version_cap, serializer=serializer) def _compat_ver(self, current, *legacy): versions = (current,) + legacy for version in versions[:-1]: if self.client.can_send_version(version): return version return versions[-1] def _get_cctxt(self, version: Union[str, tuple[str, ...]] = None, **kwargs): """Prepare client context Version parameter accepts single version string or tuple of strings. Compatible version can be obtained later using: cctxt = _get_cctxt(...) version = cctxt.target.version """ if version is None: version = self.RPC_DEFAULT_VERSION if isinstance(version, tuple): version = self._compat_ver(*version) return self.client.prepare(version=version, **kwargs) @classmethod def determine_rpc_version_cap(cls): global LAST_RPC_VERSIONS if cls.BINARY in LAST_RPC_VERSIONS: return LAST_RPC_VERSIONS[cls.BINARY] version_cap = objects.Service.get_minimum_rpc_version( cinder.context.get_admin_context(), cls.BINARY) if not version_cap: # If there is no service we assume they will come up later and will # have the same version as we do. version_cap = cls.RPC_API_VERSION LOG.info('Automatically selected %(binary)s RPC version ' '%(version)s as minimum service version.', {'binary': cls.BINARY, 'version': version_cap}) LAST_RPC_VERSIONS[cls.BINARY] = version_cap return version_cap @classmethod def determine_obj_version_cap(cls): global LAST_OBJ_VERSIONS if cls.BINARY in LAST_OBJ_VERSIONS: return LAST_OBJ_VERSIONS[cls.BINARY] version_cap = objects.Service.get_minimum_obj_version( cinder.context.get_admin_context(), cls.BINARY) # If there is no service we assume they will come up later and will # have the same version as we do. if not version_cap: version_cap = base.OBJ_VERSIONS.get_current() LOG.info('Automatically selected %(binary)s objects version ' '%(version)s as minimum service version.', {'binary': cls.BINARY, 'version': version_cap}) LAST_OBJ_VERSIONS[cls.BINARY] = version_cap return version_cap ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1031184 cinder-27.0.0/cinder/scheduler/0000775000175000017500000000000000000000000016311 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/__init__.py0000664000175000017500000000000000000000000020410 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/base_filter.py0000664000175000017500000001243600000000000021150 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Filter support """ from typing import Iterable from oslo_log import log as logging from cinder.scheduler import base_handler LOG = logging.getLogger(__name__) class BaseFilter(object): """Base class for all filter classes.""" def _filter_one(self, obj, filter_properties) -> bool: """Return True if it passes the filter, False otherwise. Override this in a subclass. """ return True def filter_all(self, filter_obj_list, filter_properties): """Yield objects that pass the filter. Can be overridden in a subclass, if you need to base filtering decisions on all objects. Otherwise, one can just override _filter_one() to filter a single object. """ for obj in filter_obj_list: if self._filter_one(obj, filter_properties): yield obj # Set to true in a subclass if a filter only needs to be run once # for each request rather than for each instance run_filter_once_per_request = False def run_filter_for_index(self, index): """Return True if the filter needs to be run for n-th instances. Only need to override this if a filter needs anything other than "first only" or "all" behaviour. """ return not (self.run_filter_once_per_request and index > 0) class BaseFilterHandler(base_handler.BaseHandler): """Base class to handle loading filter classes. This class should be subclassed where one needs to use filters. """ def _log_filtration(self, full_filter_results, part_filter_results, filter_properties): # Log the filtration history rspec = filter_properties.get("request_spec", {}) msg_dict = {"vol_id": rspec.get("volume_id", ""), "str_results": full_filter_results} LOG.debug("Filtering removed all hosts for the request with " "volume ID '%(vol_id)s'. Filter results: %(str_results)s", msg_dict) msg_dict["str_results"] = ', '.join( "%(cls_name)s: (start: %(start)s, end: %(end)s)" % { "cls_name": value[0], "start": value[1], "end": value[2]} for value in part_filter_results) LOG.info("Filtering removed all hosts for the request with " "volume ID '%(vol_id)s'. Filter results: %(str_results)s", msg_dict) def get_filtered_objects(self, filter_classes, objs: Iterable, filter_properties: dict, index: int = 0) -> list: """Get objects after filter :param filter_classes: filters that will be used to filter the objects :param objs: objects that will be filtered :param filter_properties: client filter properties :param index: This value needs to be increased in the caller function of get_filtered_objects when handling each resource. """ list_objs = list(objs) LOG.debug("Starting with %d host(s)", len(list_objs)) # The 'part_filter_results' list just tracks the number of hosts # before and after the filter, unless the filter returns zero # hosts, in which it records the host/nodename for the last batch # that was removed. Since the full_filter_results can be very large, # it is only recorded if the LOG level is set to debug. part_filter_results = [] full_filter_results = [] for filter_cls in filter_classes: cls_name = filter_cls.__name__ start_count = len(list_objs) filter_class = filter_cls() if filter_class.run_filter_for_index(index): objs = filter_class.filter_all(list_objs, filter_properties) if objs is None: LOG.info("Filter %s returned 0 hosts", cls_name) full_filter_results.append((cls_name, None)) list_objs = None break list_objs = list(objs) end_count = len(list_objs) part_filter_results.append((cls_name, start_count, end_count)) remaining = [getattr(obj, "host", obj) for obj in list_objs] full_filter_results.append((cls_name, remaining)) LOG.debug("Filter %(cls_name)s returned " "%(obj_len)d host(s)", {'cls_name': cls_name, 'obj_len': len(list_objs)}) if not list_objs: self._log_filtration(full_filter_results, part_filter_results, filter_properties) return list_objs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/base_handler.py0000664000175000017500000000336200000000000021276 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A common base for handling extension classes. Used by BaseFilterHandler and BaseWeightHandler """ import inspect from stevedore import extension class BaseHandler(object): """Base class to handle loading filter and weight classes.""" def __init__(self, modifier_class_type, modifier_namespace): self.namespace = modifier_namespace self.modifier_class_type = modifier_class_type self.extension_manager = extension.ExtensionManager(modifier_namespace) def _is_correct_class(self, cls) -> bool: """Return whether an object is a class of the correct type. (or is not prefixed with an underscore) """ return (inspect.isclass(cls) and not cls.__name__.startswith('_') and issubclass(cls, self.modifier_class_type)) def get_all_classes(self) -> list: # We use a set, as some classes may have an entrypoint of their own, # and also be returned by a function such as 'all_filters' for example return [ext.plugin for ext in self.extension_manager if self._is_correct_class(ext.plugin)] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/base_weight.py0000664000175000017500000001231000000000000021141 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Pluggable Weighing support """ import abc from typing import Iterable, Optional from oslo_log import log as logging from cinder.scheduler import base_handler LOG = logging.getLogger(__name__) def normalize(weight_list: list[float], minval: Optional[float] = None, maxval: Optional[float] = None) -> Iterable[float]: """Normalize the values in a list between 0 and 1.0. The normalization is made regarding the lower and upper values present in weight_list. If the minval and/or maxval parameters are set, these values will be used instead of the minimum and maximum from the list. If all the values are equal, they are normalized to 0. """ if not weight_list: return () if maxval is None: maxval = max(weight_list) if minval is None: minval = min(weight_list) maxval = float(maxval) minval = float(minval) if minval == maxval: return [0] * len(weight_list) range_ = maxval - minval return ((i - minval) / range_ for i in weight_list) class WeighedObject(object): """Object with weight information.""" def __init__(self, obj, weight: float): self.obj = obj self.weight = weight def __repr__(self) -> str: return "" % (self.obj, self.weight) class BaseWeigher(object, metaclass=abc.ABCMeta): """Base class for pluggable weighers. The attributes maxval and minval can be specified to set up the maximum and minimum values for the weighed objects. These values will then be taken into account in the normalization step, instead of taking the values from the calculated weights. """ minval: Optional[float] = None maxval: Optional[float] = None def weight_multiplier(self) -> float: """How weighted this weigher should be. Override this method in a subclass, so that the returned value is read from a configuration option to permit operators specify a multiplier for the weigher. """ return 1.0 @abc.abstractmethod def _weigh_object(self, obj, weight_properties: dict) -> float: """Override in a subclass to specify a weight for a specific object.""" def weigh_objects(self, weighed_obj_list: list[WeighedObject], weight_properties: dict) -> list[float]: """Weigh multiple objects. Override in a subclass if you need access to all objects in order to calculate weights. Do not modify the weight of an object here, just return a list of weights. """ # Calculate the weights weights = [] for obj in weighed_obj_list: weight = self._weigh_object(obj.obj, weight_properties) # Record the min and max values if they are None. If they anything # but none we assume that the weigher has set them if self.minval is None: self.minval = weight if self.maxval is None: self.maxval = weight if weight < self.minval: self.minval = weight elif weight > self.maxval: self.maxval = weight weights.append(weight) return weights class BaseWeightHandler(base_handler.BaseHandler): object_class = WeighedObject def get_weighed_objects(self, weigher_classes: list, obj_list: list[WeighedObject], weighing_properties: dict) -> list[WeighedObject]: """Return a sorted (descending), normalized list of WeighedObjects.""" if not obj_list: return [] weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list] for weigher_cls in weigher_classes: weigher = weigher_cls() weights = weigher.weigh_objects(weighed_objs, weighing_properties) # Normalize the weights weights = normalize(weights, minval=weigher.minval, maxval=weigher.maxval) for i, weight in enumerate(weights): obj = weighed_objs[i] obj.weight += weigher.weight_multiplier() * weight LOG.debug("Weigher %(cls_name)s returned, " "weigher value is {max: %(maxval)s, min: %(minval)s}", {'cls_name': weigher_cls.__name__, 'maxval': weigher.maxval, 'minval': weigher.minval}) return sorted(weighed_objs, key=lambda x: x.weight, reverse=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/driver.py0000664000175000017500000001460100000000000020160 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler base class that all Schedulers should inherit from """ from oslo_config import cfg from oslo_utils import importutils from oslo_utils import timeutils from cinder.i18n import _ from cinder import objects from cinder.volume import rpcapi as volume_rpcapi scheduler_driver_opts = [ cfg.StrOpt('scheduler_host_manager', default='cinder.scheduler.host_manager.HostManager', help='The scheduler host manager class to use'), cfg.IntOpt('scheduler_max_attempts', default=3, help='Maximum number of attempts to schedule a volume'), ] CONF = cfg.CONF CONF.register_opts(scheduler_driver_opts) def volume_update_db(context, volume_id, host, cluster_name, availability_zone=None, volume=None): """Set the host, cluster_name, and set the scheduled_at field of a volume. :returns: A Volume with the updated fields set properly. """ if not volume: volume = objects.Volume.get_by_id(context, volume_id) volume.host = host volume.cluster_name = cluster_name volume.scheduled_at = timeutils.utcnow() volume.availability_zone = availability_zone volume.save() # A volume object is expected to be returned, as it is used by # filter_scheduler. return volume def generic_group_update_db(context, group, host, cluster_name): """Set the host and the scheduled_at field of a group. :returns: A Group with the updated fields set properly. """ group.update({'host': host, 'updated_at': timeutils.utcnow(), 'cluster_name': cluster_name}) group.save() return group class Scheduler(object): """The base class that all Scheduler classes should inherit from.""" def __init__(self): self.host_manager = importutils.import_object( CONF.scheduler_host_manager) self.volume_rpcapi = volume_rpcapi.VolumeAPI() def reset(self): """Reset volume RPC API object to load new version pins.""" self.volume_rpcapi = volume_rpcapi.VolumeAPI() def is_ready(self): """Returns True if Scheduler is ready to accept requests. This is to handle scheduler service startup when it has no volume hosts stats and will fail all the requests. """ return self.host_manager.has_all_capabilities() def is_first_receive(self): """Returns True if Scheduler receives the capabilities at startup. This is to handle the problem of too long sleep time during scheduler service startup process. """ return self.host_manager.first_receive_capabilities() def update_service_capabilities(self, service_name, host, capabilities, cluster_name, timestamp): """Process a capability update from a service node.""" self.host_manager.update_service_capabilities(service_name, host, capabilities, cluster_name, timestamp) def notify_service_capabilities(self, service_name, backend, capabilities, timestamp): """Notify capability update from a service node.""" self.host_manager.notify_service_capabilities(service_name, backend, capabilities, timestamp) def host_passes_filters(self, context, backend, request_spec, filter_properties): """Check if the specified backend passes the filters.""" raise NotImplementedError(_("Must implement backend_passes_filters")) def find_retype_host(self, context, request_spec, filter_properties=None, migration_policy='never'): """Find a backend that can accept the volume with its new type.""" raise NotImplementedError(_("Must implement find_retype_backend")) # NOTE(geguileo): For backward compatibility with out of tree Schedulers # we don't change host_passes_filters or find_retype_host method names but # create an "alias" for them with the right name instead. backend_passes_filters = host_passes_filters find_retype_backend = find_retype_host def schedule(self, context, topic, method, *_args, **_kwargs): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) def schedule_create_volume(self, context, request_spec, filter_properties): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_("Must implement schedule_create_volume")) def schedule_create_group(self, context, group, group_spec, request_spec_list, group_filter_properties, filter_properties_list): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_( "Must implement schedule_create_group")) def get_pools(self, context, filters): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_( "Must implement schedule_get_pools")) def get_backup_host(self, volume, availability_zone, driver=None): """Must override schedule method for scheduler to work.""" raise NotImplementedError(_( "Must implement get_backup_host")) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1031184 cinder-27.0.0/cinder/scheduler/evaluator/0000775000175000017500000000000000000000000020313 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/evaluator/__init__.py0000664000175000017500000000000000000000000022412 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/evaluator/evaluator.py0000664000175000017500000002000100000000000022660 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator import re import sys from typing import Callable import pyparsing from cinder import exception from cinder.i18n import _ def _operatorOperands(tokenList): it = iter(tokenList) while True: try: op1 = next(it) op2 = next(it) yield (op1, op2) except StopIteration: break class EvalConstant(object): def __init__(self, toks): self.value = toks[0] def eval(self): result = self.value if (isinstance(result, str) and re.match(r"^[a-zA-Z_]+\.[a-zA-Z_]+$", result)): (which_dict, entry) = result.split('.') try: result = _vars[which_dict][entry] except KeyError: raise exception.EvaluatorParseException( _("KeyError evaluating string")) except TypeError: raise exception.EvaluatorParseException( _("TypeError evaluating string")) try: result = int(result) except ValueError: try: result = float(result) except ValueError: if isinstance(result, str): result = result.replace('"', '').replace('\'', '') return result class EvalSignOp(object): operations = { '+': 1, '-': -1, } def __init__(self, toks): self.sign, self.value = toks[0] def eval(self): return self.operations[self.sign] * self.value.eval() class EvalAddOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): sum = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): if op == '+': sum += val.eval() elif op == '-': sum -= val.eval() return sum class EvalMultOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): prod = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): try: if op == '*': prod *= val.eval() elif op == '/': prod /= float(val.eval()) except ZeroDivisionError as e: raise exception.EvaluatorParseException( _("ZeroDivisionError: %s") % e) return prod class EvalPowerOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): prod = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): prod = pow(prod, val.eval()) return prod class EvalNegateOp(object): def __init__(self, toks): self.negation, self.value = toks[0] def eval(self): return not self.value.eval() class EvalComparisonOp(object): operations = { "<": operator.lt, "<=": operator.le, ">": operator.gt, ">=": operator.ge, "!=": operator.ne, "==": operator.eq, "<>": operator.ne, } def __init__(self, toks): self.value = toks[0] def eval(self): val1 = self.value[0].eval() for op, val in _operatorOperands(self.value[1:]): fn = self.operations[op] val2 = val.eval() if not fn(val1, val2): break val1 = val2 else: return True return False class EvalTernaryOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): condition = self.value[0].eval() if condition: return self.value[2].eval() else: return self.value[4].eval() class EvalFunction(object): functions: dict[str, Callable] = { "abs": abs, "max": max, "min": min, } def __init__(self, toks): self.func, self.value = toks[0] def eval(self): args = self.value.eval() if type(args) is list: return self.functions[self.func](*args) else: return self.functions[self.func](args) class EvalCommaSeperator(object): def __init__(self, toks): self.value = toks[0] def eval(self): val1 = self.value[0].eval() val2 = self.value[2].eval() if type(val2) is list: val_list = [] val_list.append(val1) for val in val2: val_list.append(val) return val_list return [val1, val2] class EvalBoolAndOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): left = self.value[0].eval() right = self.value[2].eval() return left and right class EvalBoolOrOp(object): def __init__(self, toks): self.value = toks[0] def eval(self): left = self.value[0].eval() right = self.value[2].eval() return left or right _parser = None _vars = {} def _def_parser(): # Enabling packrat parsing greatly speeds up the parsing. pyparsing.ParserElement.enablePackrat() alphas = pyparsing.alphas Combine = pyparsing.Combine nums = pyparsing.nums quoted_string = pyparsing.quotedString oneOf = pyparsing.oneOf opAssoc = pyparsing.opAssoc infixNotation = pyparsing.infixNotation Word = pyparsing.Word integer = Word(nums) real = Combine(Word(nums) + '.' + Word(nums)) variable = Word(alphas + '_' + '.') number = real | integer fn = Word(alphas + '_' + '.') operand = number | variable | fn | quoted_string signop = oneOf('+ -') addop = oneOf('+ -') multop = oneOf('* /') comparisonop = oneOf(' '.join(EvalComparisonOp.operations.keys())) ternaryop = ('?', ':') boolandop = oneOf('AND and &&') boolorop = oneOf('OR or ||') negateop = oneOf('NOT not !') operand.setParseAction(EvalConstant) expr = infixNotation(operand, [ (fn, 1, opAssoc.RIGHT, EvalFunction), ("^", 2, opAssoc.RIGHT, EvalPowerOp), (signop, 1, opAssoc.RIGHT, EvalSignOp), (multop, 2, opAssoc.LEFT, EvalMultOp), (addop, 2, opAssoc.LEFT, EvalAddOp), (negateop, 1, opAssoc.RIGHT, EvalNegateOp), (comparisonop, 2, opAssoc.LEFT, EvalComparisonOp), (ternaryop, 3, opAssoc.LEFT, EvalTernaryOp), (boolandop, 2, opAssoc.LEFT, EvalBoolAndOp), (boolorop, 2, opAssoc.LEFT, EvalBoolOrOp), (',', 2, opAssoc.RIGHT, EvalCommaSeperator), ]) return expr def evaluate(expression, **kwargs): """Evaluates an expression. Provides the facility to evaluate mathematical expressions, and to substitute variables from dictionaries into those expressions. Supports both integer and floating point values, and automatic promotion where necessary. """ global _parser if _parser is None: _parser = _def_parser() global _vars _vars = kwargs # Some reasonable formulas break with the default recursion limit of # 1000. Raise it here and reset it afterward. orig_recursion_limit = sys.getrecursionlimit() if orig_recursion_limit < 3000: sys.setrecursionlimit(3000) try: result = _parser.parseString(expression, parseAll=True)[0] except pyparsing.ParseException as e: raise exception.EvaluatorParseException( _("ParseException: %s") % e) finally: sys.setrecursionlimit(orig_recursion_limit) return result.eval() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filter_scheduler.py0000664000175000017500000006326500000000000022222 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Intel Corporation # Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The FilterScheduler is for creating volumes. You can customize this scheduler by specifying your own volume Filters and Weighing Functions. """ from typing import (Optional, Union) from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from cinder import context from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.scheduler import driver from cinder.scheduler.host_manager import BackendState from cinder.scheduler import scheduler_options from cinder.scheduler.weights import WeighedHost from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) class FilterScheduler(driver.Scheduler): """Scheduler that can be used for filtering and weighing.""" def __init__(self, *args, **kwargs): super(FilterScheduler, self).__init__(*args, **kwargs) self.options = scheduler_options.SchedulerOptions() self.max_attempts = self._max_attempts() def _get_configuration_options(self) -> dict: """Fetch options dictionary. Broken out for testing.""" return self.options.get_configuration() def populate_filter_properties(self, request_spec: dict, filter_properties: dict) -> None: """Stuff things into filter_properties. Can be overridden in a subclass to add more data. """ vol = request_spec['volume_properties'] filter_properties['size'] = vol['size'] filter_properties['availability_zone'] = vol.get('availability_zone') filter_properties['user_id'] = vol.get('user_id') filter_properties['metadata'] = vol.get('metadata') filter_properties['qos_specs'] = vol.get('qos_specs') def schedule_create_group(self, context: context.RequestContext, group, group_spec, request_spec_list, group_filter_properties, filter_properties_list) -> None: weighed_backend = self._schedule_generic_group( context, group_spec, request_spec_list, group_filter_properties, filter_properties_list) if not weighed_backend: raise exception.NoValidBackend(reason=_("No weighed backends " "available")) backend = weighed_backend.obj updated_group = driver.generic_group_update_db(context, group, backend.host, backend.cluster_name) self.volume_rpcapi.create_group(context, updated_group) def schedule_create_volume(self, context: context.RequestContext, request_spec: dict, filter_properties: dict) -> None: backend = self._schedule(context, request_spec, filter_properties) if not backend: raise exception.NoValidBackend(reason=_("No weighed backends " "available")) backend = backend.obj volume_id = request_spec['volume_id'] updated_volume = driver.volume_update_db( context, volume_id, backend.host, backend.cluster_name, availability_zone=backend.service['availability_zone'], volume=request_spec.get('volume')) self._post_select_populate_filter_properties(filter_properties, backend) # context is not serializable filter_properties.pop('context', None) self.volume_rpcapi.create_volume(context, updated_volume, request_spec, filter_properties, allow_reschedule=True) def backend_passes_filters(self, context: context.RequestContext, backend: str, request_spec: dict, filter_properties: dict): """Check if the specified backend passes the filters.""" weighed_backends = self._get_weighted_candidates(context, request_spec, filter_properties) # If backend has no pool defined we will ignore it in the comparison ignore_pool = not bool(volume_utils.extract_host(backend, 'pool')) for weighed_backend in weighed_backends: backend_id = weighed_backend.obj.backend_id if ignore_pool: backend_id = volume_utils.extract_host(backend_id) if backend_id == backend: return weighed_backend.obj reason_param = {'resource': 'volume', 'id': '??id missing??', 'backend': backend} for resource in ['volume', 'group', 'snapshot']: resource_id = request_spec.get('%s_id' % resource, None) if resource_id: reason_param.update({'resource': resource, 'id': resource_id}) break raise exception.NoValidBackend(_('Cannot place %(resource)s %(id)s ' 'on %(backend)s.') % reason_param) def find_retype_backend(self, context: context.RequestContext, request_spec: dict, filter_properties: Optional[dict] = None, migration_policy: str = 'never') -> BackendState: """Find a backend that can accept the volume with its new type.""" filter_properties = filter_properties or {} backend = (request_spec['volume_properties'].get('cluster_name') or request_spec['volume_properties']['host']) # The volume already exists on this backend, and so we shouldn't check # if it can accept the volume again in the CapacityFilter. filter_properties['vol_exists_on'] = backend weighed_backends = self._get_weighted_candidates(context, request_spec, filter_properties) if not weighed_backends: raise exception.NoValidBackend( reason=_('No valid backends for volume %(id)s with type ' '%(type)s') % {'id': request_spec['volume_id'], 'type': request_spec['volume_type']}) for weighed_backend in weighed_backends: backend_state = weighed_backend.obj if backend_state.backend_id == backend: return backend_state if volume_utils.extract_host(backend, 'pool') is None: # legacy volumes created before pool is introduced has no pool # info in host. But host_state.host always include pool level # info. In this case if above exact match didn't work out, we # find host_state that are of the same host of volume being # retyped. In other words, for legacy volumes, retyping could # cause migration between pools on same host, which we consider # it is different from migration between hosts thus allow that # to happen even migration policy is 'never'. for weighed_backend in weighed_backends: backend_state = weighed_backend.obj new_backend = volume_utils.extract_host( backend_state.backend_id, 'backend') if new_backend == backend: return backend_state if migration_policy == 'never': raise exception.NoValidBackend( reason=_('Current backend not valid for volume %(id)s with ' 'type %(type)s, migration not allowed') % {'id': request_spec['volume_id'], 'type': request_spec['volume_type']}) top_backend = self._choose_top_backend(weighed_backends, request_spec) return top_backend.obj def get_pools(self, context: context.RequestContext, filters: dict): return self.host_manager.get_pools(context, filters) def _post_select_populate_filter_properties( self, filter_properties: dict, backend_state: BackendState) -> None: """Populate filter properties with additional information. Add additional information to the filter properties after a backend has been selected by the scheduling process. """ # Add a retry entry for the selected volume backend: self._add_retry_backend(filter_properties, backend_state.backend_id) def _add_retry_backend(self, filter_properties: dict, backend) -> None: """Add a retry entry for the selected volume backend. In the event that the request gets re-scheduled, this entry will signal that the given backend has already been tried. """ retry = filter_properties.get('retry', None) if not retry: return # TODO(geguileo): In P - change to only use backends for key in ('hosts', 'backends'): backends = retry.get(key) if backends is not None: backends.append(backend) def _max_attempts(self) -> int: max_attempts = CONF.scheduler_max_attempts if max_attempts < 1: raise exception.InvalidParameterValue( err=_("Invalid value for 'scheduler_max_attempts', " "must be >=1")) return max_attempts def _log_volume_error(self, volume_id: str, retry: dict) -> None: """Log requests with exceptions from previous volume operations.""" exc = retry.pop('exc', None) # string-ified exception from volume if not exc: return # no exception info from a previous attempt, skip # TODO(geguileo): In P - change to hosts = retry.get('backends') backends = retry.get('backends', retry.get('hosts')) if not backends: return # no previously attempted hosts, skip last_backend = backends[-1] LOG.error("Error scheduling %(volume_id)s from last vol-service: " "%(last_backend)s : %(exc)s", {'volume_id': volume_id, 'last_backend': last_backend, 'exc': exc}) def _populate_retry(self, filter_properties: dict, request_spec: dict) -> None: """Populate filter properties with history of retries for request. If maximum retries is exceeded, raise NoValidBackend. """ max_attempts = self.max_attempts retry = filter_properties.pop('retry', {}) if max_attempts == 1: # re-scheduling is disabled. return # retry is enabled, update attempt count: if retry: retry['num_attempts'] += 1 else: retry = { 'num_attempts': 1, 'backends': [], # list of volume service backends tried 'hosts': [] # TODO(geguileo): Remove in P and leave backends } filter_properties['retry'] = retry resource_id = str(request_spec.get( 'volume_id')) or str(request_spec.get("group_id")) self._log_volume_error(resource_id, retry) if retry['num_attempts'] > max_attempts: raise exception.NoValidBackend( reason=_("Exceeded max scheduling attempts %(max_attempts)d " "for resource %(resource_id)s") % {'max_attempts': max_attempts, 'resource_id': resource_id}) def _get_weighted_candidates( self, context: context.RequestContext, request_spec: dict, filter_properties: Optional[dict] = None) -> list: """Return a list of backends that meet required specs. Returned list is ordered by their fitness. """ elevated = context.elevated() # Since Cinder is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, copying # 'volume_XX' to 'resource_XX' will make both filters happy. volume_type = request_spec.get("volume_type") # When creating snapshots, the value of volume_type is None here # which causes issues in filters (Eg: Bug #1856126). # To prevent that, we set it as an empty dictionary here. if volume_type is None: volume_type = {} resource_type = volume_type config_options = self._get_configuration_options() if filter_properties is None: filter_properties = {} self._populate_retry(filter_properties, request_spec) request_spec_dict = jsonutils.to_primitive(request_spec) filter_properties.update({'context': context, 'request_spec': request_spec_dict, 'config_options': config_options, 'volume_type': volume_type, 'resource_type': resource_type}) self.populate_filter_properties(request_spec, filter_properties) # Revert volume consumed capacity if it's a rescheduled request retry = filter_properties.get('retry', {}) if retry.get('backends', []): self.host_manager.revert_volume_consumed_capacity( retry['backends'][-1], request_spec['volume_properties']['size']) # Find our local list of acceptable backends by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. backends = self.host_manager.get_all_backend_states(elevated) # Filter local hosts based on requirements ... backends = self.host_manager.get_filtered_backends(backends, filter_properties) if not backends: return [] LOG.debug("Filtered %s", backends) # weighted_backends = WeightedHost() ... the best # backend for the job. weighed_backends = self.host_manager.get_weighed_backends( backends, filter_properties) return weighed_backends def _get_weighted_candidates_generic_group( self, context: context.RequestContext, group_spec: dict, request_spec_list: list[dict], group_filter_properties: Optional[dict] = None, filter_properties_list: Optional[list[dict]] = None) -> list: """Finds backends that supports the group. Returns a list of backends that meet the required specs, ordered by their fitness. """ elevated = context.elevated() backends_by_group_type = self._get_weighted_candidates_by_group_type( context, group_spec, group_filter_properties) weighed_backends = [] backends_by_vol_type = [] index = 0 for request_spec in request_spec_list: volume_properties = request_spec['volume_properties'] # Since Cinder is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, # copying 'volume_XX' to 'resource_XX' will make both filters # happy. resource_properties = volume_properties.copy() volume_type = request_spec.get("volume_type", None) resource_type = request_spec.get("volume_type", None) request_spec.update({'resource_properties': resource_properties}) config_options = self._get_configuration_options() filter_properties = {} if filter_properties_list: filter_properties = filter_properties_list[index] if filter_properties is None: filter_properties = {} self._populate_retry(filter_properties, request_spec) # Add group_support in extra_specs if it is not there. # Make sure it is populated in filter_properties # if 'group_support' not in resource_type.get( # 'extra_specs', {}): # resource_type['extra_specs'].update( # group_support=' True') filter_properties.update({'context': context, 'request_spec': request_spec, 'config_options': config_options, 'volume_type': volume_type, 'resource_type': resource_type}) self.populate_filter_properties(request_spec, filter_properties) # Find our local list of acceptable backends by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. all_backends = self.host_manager.get_all_backend_states(elevated) if not all_backends: return [] # Filter local backends based on requirements ... backends = self.host_manager.get_filtered_backends( all_backends, filter_properties) if not backends: return [] LOG.debug("Filtered %s", backends) # weighted_backend = WeightedHost() ... the best # backend for the job. temp_weighed_backends = self.host_manager.get_weighed_backends( backends, filter_properties) if not temp_weighed_backends: return [] if index == 0: backends_by_vol_type = temp_weighed_backends else: backends_by_vol_type = self._find_valid_backends( backends_by_vol_type, temp_weighed_backends) if not backends_by_vol_type: return [] index += 1 # Find backends selected by both the group type and volume types. weighed_backends = self._find_valid_backends(backends_by_vol_type, backends_by_group_type) return weighed_backends def _find_valid_backends(self, backend_list1: list, backend_list2: list) -> list: new_backends = [] for backend1 in backend_list1: for backend2 in backend_list2: # Should schedule creation of group on backend level, # not pool level. if (volume_utils.extract_host(backend1.obj.backend_id) == volume_utils.extract_host(backend2.obj.backend_id)): new_backends.append(backend1) if not new_backends: return [] return new_backends def _get_weighted_candidates_by_group_type( self, context: context.RequestContext, group_spec: dict, group_filter_properties: Optional[dict] = None) \ -> list[WeighedHost]: """Finds backends that supports the group type. Returns a list of backends that meet the required specs, ordered by their fitness. """ elevated = context.elevated() weighed_backends = [] volume_properties = group_spec['volume_properties'] # Since Cinder is using mixed filters from Oslo and it's own, which # takes 'resource_XX' and 'volume_XX' as input respectively, # copying 'volume_XX' to 'resource_XX' will make both filters # happy. resource_properties = volume_properties.copy() group_type = group_spec.get("group_type", None) resource_type = group_spec.get("group_type", None) group_spec.update({'resource_properties': resource_properties}) config_options = self._get_configuration_options() if group_filter_properties is None: group_filter_properties = {} self._populate_retry(group_filter_properties, resource_properties) group_filter_properties.update({'context': context, 'request_spec': group_spec, 'config_options': config_options, 'group_type': group_type, 'resource_type': resource_type}) self.populate_filter_properties(group_spec, group_filter_properties) # Find our local list of acceptable backends by filtering and # weighing our options. we virtually consume resources on # it so subsequent selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. all_backends = self.host_manager.get_all_backend_states(elevated) if not all_backends: return [] # Filter local backends based on requirements ... backends = self.host_manager.get_filtered_backends( all_backends, group_filter_properties) if not backends: return [] LOG.debug("Filtered %s", backends) # weighted_backends = WeightedHost() ... the best backend for the job. weighed_backends = self.host_manager.get_weighed_backends( backends, group_filter_properties) if not weighed_backends: return [] return weighed_backends def _schedule(self, context: context.RequestContext, request_spec: dict, filter_properties: Optional[dict] = None): weighed_backends = self._get_weighted_candidates(context, request_spec, filter_properties) # When we get the weighed_backends, we clear those backends that don't # match the resource's backend (it could be assigned from group, # snapshot or volume). resource_backend = request_spec.get('resource_backend') if weighed_backends and resource_backend: resource_backend_has_pool = bool(volume_utils.extract_host( resource_backend, 'pool')) # Get host name including host@backend#pool info from # weighed_backends. for backend in weighed_backends[::-1]: backend_id = ( backend.obj.backend_id if resource_backend_has_pool else volume_utils.extract_host(backend.obj.backend_id) ) if backend_id != resource_backend: weighed_backends.remove(backend) if not weighed_backends: assert filter_properties is not None LOG.warning('No weighed backend found for volume ' 'with properties: %s', filter_properties['request_spec'].get('volume_type')) return None return self._choose_top_backend(weighed_backends, request_spec) def _schedule_generic_group( self, context: context.RequestContext, group_spec: dict, request_spec_list: list, group_filter_properties: Optional[dict] = None, filter_properties_list: Optional[list] = None) \ -> Optional[WeighedHost]: weighed_backends = self._get_weighted_candidates_generic_group( context, group_spec, request_spec_list, group_filter_properties, filter_properties_list) if not weighed_backends: return None return self._choose_top_backend_generic_group(weighed_backends) def _choose_top_backend(self, weighed_backends: list[WeighedHost], request_spec: dict): top_backend = weighed_backends[0] backend_state = top_backend.obj LOG.debug("Choosing %s", backend_state.backend_id) volume_properties = request_spec['volume_properties'] backend_state.consume_from_volume(volume_properties) return top_backend def _choose_top_backend_generic_group( self, weighed_backends: list[WeighedHost]) -> WeighedHost: top_backend = weighed_backends[0] backend_state = top_backend.obj LOG.debug("Choosing %s", backend_state.backend_id) return top_backend def get_backup_host(self, volume: objects.Volume, availability_zone: Union[str, None], driver=None): return self.host_manager.get_backup_host(volume, availability_zone, driver) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1031184 cinder-27.0.0/cinder/scheduler/filters/0000775000175000017500000000000000000000000017761 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filters/__init__.py0000664000175000017500000000324100000000000022072 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host filters """ from cinder.scheduler import base_filter class BaseBackendFilter(base_filter.BaseFilter): """Base class for host filters.""" def _filter_one(self, obj, filter_properties): """Return True if the object passes the filter, otherwise False.""" # For backward compatibility with out of tree filters passes_method = getattr(self, 'host_passes', self.backend_passes) return passes_method(obj, filter_properties) def backend_passes(self, host_state, filter_properties): """Return True if the HostState passes the filter, otherwise False. Override this in a subclass. """ raise NotImplementedError() class BackendFilterHandler(base_filter.BaseFilterHandler): def __init__(self, namespace): super(BackendFilterHandler, self).__init__(BaseHostFilter, namespace) # NOTE(geguileo): For backward compatibility with external filters that # inherit from these classes BaseHostFilter = BaseBackendFilter HostFilterHandler = BackendFilterHandler ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filters/affinity_filter.py0000664000175000017500000000772600000000000023525 0ustar00zuulzuul00000000000000# Copyright 2014, eBay Inc. # Copyright 2014, OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils from cinder.scheduler import filters from cinder.volume import api as volume class AffinityFilter(filters.BaseBackendFilter): def __init__(self): self.volume_api = volume.API() def _get_volumes(self, context, affinity_uuids, backend_state): filters = {'id': affinity_uuids, 'deleted': False} if backend_state.cluster_name: filters['cluster_name'] = backend_state.cluster_name else: filters['host'] = backend_state.host return self.volume_api.get_all(context, filters=filters) class DifferentBackendFilter(AffinityFilter): """Schedule volume on a different back-end from a set of volumes.""" def backend_passes(self, backend_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} affinity_uuids = scheduler_hints.get('different_host', []) # scheduler hint verification: affinity_uuids can be a list of uuids # or single uuid. The checks here is to make sure every single string # in the list looks like a uuid, otherwise, this filter will fail to # pass. Note that the filter does *NOT* ignore string doesn't look # like a uuid, it is better to fail the request than serving it wrong. if isinstance(affinity_uuids, list): for uuid in affinity_uuids: if uuidutils.is_uuid_like(uuid): continue else: return False elif uuidutils.is_uuid_like(affinity_uuids): affinity_uuids = [affinity_uuids] else: # Not a list, not a string looks like uuid, don't pass it # to DB for query to avoid potential risk. return False if affinity_uuids: return not self._get_volumes(context, affinity_uuids, backend_state) # With no different_host key return True class SameBackendFilter(AffinityFilter): """Schedule volume on the same back-end as another volume.""" def backend_passes(self, backend_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} affinity_uuids = scheduler_hints.get('same_host', []) # scheduler hint verification: affinity_uuids can be a list of uuids # or single uuid. The checks here is to make sure every single string # in the list looks like a uuid, otherwise, this filter will fail to # pass. Note that the filter does *NOT* ignore string doesn't look # like a uuid, it is better to fail the request than serving it wrong. if isinstance(affinity_uuids, list): for uuid in affinity_uuids: if uuidutils.is_uuid_like(uuid): continue else: return False elif uuidutils.is_uuid_like(affinity_uuids): affinity_uuids = [affinity_uuids] else: # Not a list, not a string looks like uuid, don't pass it # to DB for query to avoid potential risk. return False if affinity_uuids: return self._get_volumes(context, affinity_uuids, backend_state) # With no same_host key return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filters/availability_zone_filter.py0000664000175000017500000000270300000000000025407 0ustar00zuulzuul00000000000000# Copyright (c) 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.scheduler import filters class AvailabilityZoneFilter(filters.BaseBackendFilter): """Filters Backends by availability zone.""" # Availability zones do not change within a request run_filter_once_per_request = True def backend_passes(self, backend_state, filter_properties): spec = filter_properties.get('request_spec', {}) availability_zones = spec.get('availability_zones') if availability_zones: return (backend_state.service['availability_zone'] in availability_zones) props = spec.get('resource_properties', {}) availability_zone = props.get('availability_zone') if availability_zone: return (availability_zone == backend_state.service['availability_zone']) return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filters/capabilities_filter.py0000664000175000017500000001041600000000000024333 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.objects.fields import VolumeAttachStatus from cinder.scheduler import filters from cinder.scheduler.filters import extra_specs_ops LOG = logging.getLogger(__name__) class CapabilitiesFilter(filters.BaseBackendFilter): """BackendFilter to work with resource (instance & volume) type records.""" def _satisfies_extra_specs(self, capabilities, filter_properties): """Check if capabilities satisfy resource type requirements. Check that the capabilities provided by the services satisfy the extra specs associated with the resource type. """ req_spec = filter_properties.get('request_spec') if req_spec and req_spec.get('operation') == 'extend_volume': # NOTE(erlon): By default, cinder considers that every backend # supports volume online extending. Those backends that don't # support it should report online_extend_support=False. online_extends = capabilities.get('online_extend_support', True) if online_extends is False: vol_prop = req_spec.get('volume_properties') attach_status = vol_prop.get('attach_status') if attach_status != VolumeAttachStatus.DETACHED: LOG.debug("Backend doesn't support attached volume extend") return False resource_type = filter_properties.get('resource_type') if not resource_type: return True extra_specs = resource_type.get('extra_specs', []) if not extra_specs: return True for key, req in extra_specs.items(): # Either not scoped format, or in capabilities scope scope = key.split(':') # Ignore scoped (such as vendor-specific) capabilities if len(scope) > 1 and scope[0] != "capabilities": continue # Strip off prefix if spec started with 'capabilities:' elif scope[0] == "capabilities": del scope[0] cap = capabilities for index in range(len(scope)): try: cap = cap[scope[index]] except (TypeError, KeyError): LOG.debug("Backend doesn't provide capability '%(cap)s' ", {'cap': scope[index]}) return False # Make all capability values a list so we can handle lists cap_list = [cap] if not isinstance(cap, list) else cap # Loop through capability values looking for any match for cap_value in cap_list: if extra_specs_ops.match(cap_value, req): break else: # Nothing matched, so bail out LOG.debug('Volume type extra spec requirement ' '"%(key)s=%(req)s" does not match reported ' 'capability "%(cap)s"', {'key': key, 'req': req, 'cap': cap}) return False return True def backend_passes(self, backend_state, filter_properties): """Return a list of backends that can create resource_type.""" # Note(zhiteng) Currently only Cinder and Nova are using # this filter, so the resource type is either instance or # volume. if not self._satisfies_extra_specs(backend_state.capabilities, filter_properties): LOG.debug("%(backend_state)s fails resource_type extra_specs " "requirements", {'backend_state': backend_state}) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filters/capacity_filter.py0000664000175000017500000002324100000000000023477 0ustar00zuulzuul00000000000000# Copyright (c) 2012 Intel # Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2015 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.scheduler import filters from cinder import utils LOG = logging.getLogger(__name__) class CapacityFilter(filters.BaseBackendFilter): """Capacity filters based on volume backend's capacity utilization.""" def backend_passes(self, backend_state, filter_properties): """Return True if host has sufficient capacity.""" volid = None # If the volume already exists on this host, don't fail it for # insufficient capacity (e.g., if we are retyping) if backend_state.backend_id == filter_properties.get('vol_exists_on'): return True spec = filter_properties.get('request_spec') if spec: volid = spec.get('volume_id') grouping = 'cluster' if backend_state.cluster_name else 'host' if filter_properties.get('new_size'): # If new_size is passed, we are allocating space to extend a volume requested_size = (int(filter_properties.get('new_size')) - int(filter_properties.get('size'))) LOG.debug('Checking if %(grouping)s %(grouping_name)s can extend ' 'the volume %(id)s in %(size)s GB', {'grouping': grouping, 'grouping_name': backend_state.backend_id, 'id': volid, 'size': requested_size}) else: requested_size = filter_properties.get('size') LOG.debug('Checking if %(grouping)s %(grouping_name)s can create ' 'a %(size)s GB volume (%(id)s)', {'grouping': grouping, 'grouping_name': backend_state.backend_id, 'id': volid, 'size': requested_size}) # requested_size is 0 means that it's a manage request. if requested_size == 0: return True if backend_state.free_capacity_gb is None: # Fail Safe LOG.error("Free capacity not set: " "volume node info collection broken.") return False free_space = backend_state.free_capacity_gb total_space = backend_state.total_capacity_gb reserved = float(backend_state.reserved_percentage) / 100 if free_space in ['infinite', 'unknown']: # NOTE(zhiteng) for those back-ends cannot report actual # available capacity, we assume it is able to serve the # request. Even if it was not, the retry mechanism is # able to handle the failure by rescheduling return True elif total_space in ['infinite', 'unknown']: # If total_space is 'infinite' or 'unknown' and reserved # is 0, we assume the back-ends can serve the request. # If total_space is 'infinite' or 'unknown' and reserved # is not 0, we cannot calculate the reserved space. # float(total_space) will throw an exception. total*reserved # also won't work. So the back-ends cannot serve the request. if reserved == 0: return True LOG.debug("Cannot calculate GB of reserved space (%s%%) with " "backend's reported total capacity '%s'", backend_state.reserved_percentage, total_space) return False total = float(total_space) if total <= 0: LOG.warning("Insufficient free space for volume creation. " "Total capacity is %(total).2f on %(grouping)s " "%(grouping_name)s.", {"total": total, "grouping": grouping, "grouping_name": backend_state.backend_id}) return False # NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs, # we will not use max_over_subscription_ratio and # provisioned_capacity_gb to determine whether a volume can be # provisioned. Instead free capacity will be used to evaluate. thin = True vol_type = filter_properties.get('volume_type', {}) or {} provision_type = vol_type.get('extra_specs', {}).get( 'provisioning:type') if provision_type == 'thick': thin = False thin_support = backend_state.thin_provisioning_support if thin_support: max_over_subscription_ratio = ( backend_state.max_over_subscription_ratio ) else: max_over_subscription_ratio = 1 # NOTE(hemna): this takes into consideration all major factors # including reserved space, free_space (reported by driver), # and over subscription ratio. factors = utils.calculate_capacity_factors( total_space, free_space, backend_state.provisioned_capacity_gb, thin_support, max_over_subscription_ratio, backend_state.reserved_percentage, thin ) virtual_free_space = factors["virtual_free_capacity"] LOG.debug("Storage Capacity factors %s", factors) msg_args = {"grouping_name": backend_state.backend_id, "grouping": grouping, "requested": requested_size, "available": virtual_free_space} # Only evaluate using max_over_subscription_ratio if # thin_provisioning_support is True. Check if the ratio of # provisioned capacity over total capacity has exceeded over # subscription ratio. if (thin and backend_state.thin_provisioning_support and backend_state.max_over_subscription_ratio >= 1): provisioned_ratio = ( (backend_state.provisioned_capacity_gb + requested_size) / ( factors["total_available_capacity"] ) ) LOG.debug("Checking provisioning for request of %s GB. " "Backend: %s", requested_size, backend_state) if provisioned_ratio > backend_state.max_over_subscription_ratio: msg_args = { "provisioned_ratio": provisioned_ratio, "oversub_ratio": backend_state.max_over_subscription_ratio, "grouping": grouping, "grouping_name": backend_state.backend_id, } LOG.warning( "Insufficient free space for thin provisioning. " "The ratio of provisioned capacity over total capacity " "%(provisioned_ratio).2f has exceeded the maximum over " "subscription ratio %(oversub_ratio).2f on %(grouping)s " "%(grouping_name)s.", msg_args) return False else: # Thin provisioning is enabled and projected over-subscription # ratio does not exceed max_over_subscription_ratio. The host # passes if virtual free capacity is enough to # accommodate the volume. Adjusted free virtual capacity is # the currently available free capacity (taking into account # of reserved space) which we can over-subscribe. msg_args["available"] = virtual_free_space res = virtual_free_space >= requested_size if not res: LOG.warning("Insufficient free virtual space " "(%(available)sGB) to accommodate thin " "provisioned %(requested)sGB volume on " "%(grouping)s %(grouping_name)s.", msg_args) else: LOG.debug("Space information for volume creation " "on %(grouping)s %(grouping_name)s " "(requested / avail): " "%(requested)s/%(available)s", msg_args) return res elif thin and backend_state.thin_provisioning_support: LOG.warning("Filtering out %(grouping)s %(grouping_name)s " "with an invalid maximum over subscription ratio " "of %(oversub_ratio).2f. The ratio should be a " "minimum of 1.0.", {"oversub_ratio": backend_state.max_over_subscription_ratio, "grouping": grouping, "grouping_name": backend_state.backend_id}) return False if virtual_free_space < requested_size: LOG.warning("Insufficient free space for volume creation " "on %(grouping)s %(grouping_name)s (requested / " "avail): %(requested)s/%(available)s", msg_args) return False LOG.debug("Space information for volume creation " "on %(grouping)s %(grouping_name)s (requested / avail): " "%(requested)s/%(available)s", msg_args) return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filters/driver_filter.py0000664000175000017500000001510700000000000023177 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.scheduler.evaluator import evaluator from cinder.scheduler import filters LOG = logging.getLogger(__name__) class DriverFilter(filters.BaseBackendFilter): """DriverFilter filters backend based on a 'filter function' and metrics. DriverFilter filters based on volume backend's provided 'filter function' and metrics. """ def backend_passes(self, backend_state, filter_properties): """Determines if a backend has a passing filter_function or not.""" stats = self._generate_stats(backend_state, filter_properties) LOG.debug("Checking backend '%s'", stats[0]['backend_stats']['backend_id']) # Run the filter function for all possible storage_protocol values # (e.g. FC, fibre_channel) and if any of them passes the filter, then # the backend passes. result = any(self._check_filter_function(stat) for stat in stats) LOG.debug("Result: %s", result) LOG.debug("Done checking backend '%s'", stats[0]['backend_stats']['backend_id']) return result def _check_filter_function(self, stats): """Checks if a volume passes a backend's filter function. Returns a tuple in the format (filter_passing, filter_invalid). Both values are booleans. """ if stats['filter_function'] is None: LOG.debug("Filter function not set :: passing backend") return True try: filter_result = self._run_evaluator(stats['filter_function'], stats) except Exception as ex: # Warn the admin for now that there is an error in the # filter function. LOG.warning("Error in filtering function " "'%(function)s' : '%(error)s' :: failing backend", {'function': stats['filter_function'], 'error': ex, }) return False return filter_result def _run_evaluator(self, func, stats): """Evaluates a given function using the provided available stats.""" backend_stats = stats['backend_stats'] backend_caps = stats['backend_caps'] extra_specs = stats['extra_specs'] qos_specs = stats['qos_specs'] volume_stats = stats['volume_stats'] LOG.debug('Running evaluator: extra_specs: %(extra)s\n' 'stats: %(stats)s\n' 'capabilities: %(capabilities)s\n' 'volume: %(volume)s\n' 'qos: %(qos)s', {'extra': extra_specs, 'stats': backend_stats, 'capabilities': backend_caps, 'volume': volume_stats, 'qos': qos_specs}) result = evaluator.evaluate( func, extra=extra_specs, stats=backend_stats, capabilities=backend_caps, volume=volume_stats, qos=qos_specs) return result def _generate_stats(self, backend_state, filter_properties): """Generates statistics from backend and volume data. Returns a list where each entry corresponds to a different storage_protocol value for those backends that use a storage protocol that has variants, but only if the function actually uses the protocol. """ backend_stats = { 'host': backend_state.host, 'cluster_name': backend_state.cluster_name, 'backend_id': backend_state.backend_id, 'volume_backend_name': backend_state.volume_backend_name, 'vendor_name': backend_state.vendor_name, 'driver_version': backend_state.driver_version, 'storage_protocol': backend_state.storage_protocol, 'QoS_support': backend_state.QoS_support, 'total_capacity_gb': backend_state.total_capacity_gb, 'allocated_capacity_gb': backend_state.allocated_capacity_gb, 'free_capacity_gb': backend_state.free_capacity_gb, 'reserved_percentage': backend_state.reserved_percentage, 'updated': backend_state.updated, } backend_caps = backend_state.capabilities filter_function = None uses_protocol = False if ('filter_function' in backend_caps and backend_caps['filter_function'] is not None): filter_function = str(backend_caps['filter_function']) uses_protocol = 'storage_protocol' in filter_function qos_specs = filter_properties.get('qos_specs', {}) volume_type = filter_properties.get('volume_type', {}) extra_specs = volume_type.get('extra_specs', {}) request_spec = filter_properties.get('request_spec', {}) volume_stats = request_spec.get('volume_properties', {}) stats = { 'backend_stats': backend_stats, 'backend_caps': backend_caps, 'extra_specs': extra_specs, 'qos_specs': qos_specs, 'volume_stats': volume_stats, 'volume_type': volume_type, 'filter_function': filter_function, } # Only create individual entries for the different protocols variants # if the function uses the protocol and there are variants. if uses_protocol and isinstance(backend_state.storage_protocol, list): result = [] for protocol in backend_state.storage_protocol: new_stats = stats.copy() new_stats['backend_stats'] = dict(new_stats['backend_stats']) new_stats['backend_stats']['storage_protocol'] = protocol new_stats['backend_caps'] = dict(new_stats['backend_caps']) new_stats['backend_caps']['storage_protocol'] = protocol result.append(new_stats) else: result = [stats] return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filters/extra_specs_ops.py0000664000175000017500000000460000000000000023534 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_utils import strutils # 1. The following operations are supported: # =, s==, s!=, s>=, s>, s<=, s<, , , , ==, !=, >=, <= # 2. Note that is handled in a different way below. # 3. If the first word in the extra_specs is not one of the operators, # it is ignored. _op_methods = {'=': lambda x, y: float(x) >= float(y), '': lambda x, y: y in x, '': lambda x, y: (strutils.bool_from_string(x) is strutils.bool_from_string(y)), '==': lambda x, y: float(x) == float(y), '!=': lambda x, y: float(x) != float(y), '>=': lambda x, y: float(x) >= float(y), '<=': lambda x, y: float(x) <= float(y), 's==': operator.eq, 's!=': operator.ne, 's<': operator.lt, 's<=': operator.le, 's>': operator.gt, 's>=': operator.ge} def match(value, req): if req is None: if value is None: return True else: return False words = req.split() op = method = None if words: op = words.pop(0) method = _op_methods.get(op) if op != '' and not method: return value == req if value is None: return False if op == '': # Ex: v1 v2 v3 while True: if words.pop(0) == value: return True if not words: break op = words.pop(0) # remove a keyword if not words: break return False try: if words and method(value, words[0]): return True except ValueError: pass return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filters/ignore_attempted_hosts_filter.py0000664000175000017500000000410700000000000026454 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.scheduler import filters LOG = logging.getLogger(__name__) class IgnoreAttemptedHostsFilter(filters.BaseBackendFilter): """Filter out previously attempted hosts A host passes this filter if it has not already been attempted for scheduling. The scheduler needs to add previously attempted hosts to the 'retry' key of filter_properties in order for this to work correctly. For example:: { 'retry': { 'backends': ['backend1', 'backend2'], 'num_attempts': 3, } } """ def backend_passes(self, backend_state, filter_properties): """Skip nodes that have already been attempted.""" attempted = filter_properties.get('retry') if not attempted: # Re-scheduling is disabled LOG.debug("Re-scheduling is disabled.") return True # TODO(geguileo): In P - Just use backends backends = attempted.get('backends', attempted.get('hosts', [])) backend = backend_state.backend_id passes = backend not in backends pass_msg = "passes" if passes else "fails" LOG.debug("Backend %(backend)s %(pass_msg)s. Previously tried " "backends: %(backends)s", {'backend': backend, 'pass_msg': pass_msg, 'backends': backends}) return passes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filters/instance_locality_filter.py0000664000175000017500000001021500000000000025403 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2014, Adrien Vergé # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_utils import uuidutils from cinder.compute import nova from cinder import exception from cinder.i18n import _ from cinder.scheduler import filters from cinder.volume import volume_utils LOG = logging.getLogger(__name__) HINT_KEYWORD = 'local_to_instance' INSTANCE_HOST_PROP = 'OS-EXT-SRV-ATTR:host' REQUESTS_TIMEOUT = 5 class InstanceLocalityFilter(filters.BaseBackendFilter): """Schedule volume on the same host as a given instance. This filter enables selection of a storage back-end located on the host where the instance's hypervisor is running. This provides data locality: the instance and the volume are located on the same physical machine. In order to work: - The Extended Server Attributes extension needs to be active in Nova (this is by default), so that the 'OS-EXT-SRV-ATTR:host' property is returned when requesting instance info. - Either an account with privileged rights for Nova must be configured in Cinder configuration (configure a keystone authentication plugin in the [nova] section), or the user making the call needs to have sufficient rights (see 'extended_server_attributes' in Nova policy). """ def __init__(self): # Cache Nova API answers directly into the Filter object. # Since a BaseBackendFilter instance lives only during the volume's # scheduling, the cache is re-created for every new volume creation. self._cache = {} super(InstanceLocalityFilter, self).__init__() def backend_passes(self, backend_state, filter_properties): context = filter_properties['context'] backend = volume_utils.extract_host(backend_state.backend_id, 'host') scheduler_hints = filter_properties.get('scheduler_hints') or {} instance_uuid = scheduler_hints.get(HINT_KEYWORD, None) # Without 'local_to_instance' hint if not instance_uuid: return True if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) # TODO(adrienverge): Currently it is not recommended to allow instance # migrations for hypervisors where this hint will be used. In case of # instance migration, a previously locally-created volume will not be # automatically migrated. Also in case of instance migration during the # volume's scheduling, the result is unpredictable. A future # enhancement would be to subscribe to Nova migration events (e.g. via # Ceilometer). # First, lookup for already-known information in local cache if instance_uuid in self._cache: return self._cache[instance_uuid] == backend server = nova.API().get_server(context, instance_uuid, privileged_user=True, timeout=REQUESTS_TIMEOUT) if not hasattr(server, INSTANCE_HOST_PROP): LOG.warning('Hint "%s" dropped because Nova did not return ' 'enough information. Either Nova policy needs to ' 'be changed or a privileged account for Nova ' 'should be specified in conf.', HINT_KEYWORD) raise exception.CinderException(_('Hint "%s" not supported.') % HINT_KEYWORD) self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP) # Match if given instance is hosted on backend return self._cache[instance_uuid] == backend ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/filters/json_filter.py0000664000175000017500000001203600000000000022653 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_serialization import jsonutils from cinder.scheduler import filters class JsonFilter(filters.BaseBackendFilter): """Backend filter for simple JSON-based grammar for selecting backends. If you want to choose one of your backend, make a query hint, for example: cinder create --hint query='["=", "$backend_id", "rbd:vol@ceph#cloud"]' """ def _op_compare(self, args, op): """Compare first item of args with the rest using specified operator. Returns True if the specified operator can successfully compare the first item in the args with all the rest. Will return False if only one item is in the list. """ if len(args) < 2: return False if op is operator.contains: bad = args[0] not in args[1:] else: bad = [arg for arg in args[1:] if not op(args[0], arg)] return not bool(bad) def _equals(self, args): """First term is == all the other terms.""" return self._op_compare(args, operator.eq) def _less_than(self, args): """First term is < all the other terms.""" return self._op_compare(args, operator.lt) def _greater_than(self, args): """First term is > all the other terms.""" return self._op_compare(args, operator.gt) def _in(self, args): """First term is in set of remaining terms.""" return self._op_compare(args, operator.contains) def _less_than_equal(self, args): """First term is <= all the other terms.""" return self._op_compare(args, operator.le) def _greater_than_equal(self, args): """First term is >= all the other terms.""" return self._op_compare(args, operator.ge) def _not(self, args): """Flip each of the arguments.""" return [not arg for arg in args] def _or(self, args): """True if any arg is True.""" return any(args) def _and(self, args): """True if all args are True.""" return all(args) commands = { '=': _equals, '<': _less_than, '>': _greater_than, 'in': _in, '<=': _less_than_equal, '>=': _greater_than_equal, 'not': _not, 'or': _or, 'and': _and, } def _parse_string(self, string, backend_state): """Parse capability lookup strings. Strings prefixed with $ are capability lookups in the form '$variable' where 'variable' is an attribute in the BackendState class. If $variable is a dictionary, you may use: $variable.dictkey """ if not string: return None if not string.startswith("$"): return string path = string[1:].split(".") obj = getattr(backend_state, path[0], None) if obj is None: return None for item in path[1:]: obj = obj.get(item) if obj is None: return None return obj def _process_filter(self, query, backend_state): """Recursively parse the query structure.""" if not query: return True cmd = query[0] method = self.commands[cmd] cooked_args = [] for arg in query[1:]: if isinstance(arg, list): arg = self._process_filter(arg, backend_state) elif isinstance(arg, str): arg = self._parse_string(arg, backend_state) if arg is not None: cooked_args.append(arg) result = method(self, cooked_args) return result def backend_passes(self, backend_state, filter_properties): """Return a list of backends that can fulfill query requirements.""" # TODO(zhiteng) Add description for filter_properties structure # and scheduler_hints. try: query = filter_properties['scheduler_hints']['query'] except KeyError: query = None if not query: return True # NOTE(comstud): Not checking capabilities or service for # enabled/disabled so that a provided json filter can decide result = self._process_filter(jsonutils.loads(query), backend_state) if isinstance(result, list): # If any succeeded, include the backend result = any(result) if result: # Filter it out. return True return False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1031184 cinder-27.0.0/cinder/scheduler/flows/0000775000175000017500000000000000000000000017443 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/flows/__init__.py0000664000175000017500000000000000000000000021542 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/flows/create_volume.py0000664000175000017500000002005000000000000022644 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from typing import Any, Optional from oslo_log import log as logging from oslo_utils import excutils import taskflow.engines import taskflow.engines.base from taskflow.patterns import linear_flow from cinder import context from cinder import exception from cinder import flow_utils from cinder.message import api as message_api from cinder.message import message_field from cinder import objects from cinder import rpc from cinder import utils from cinder.volume.flows import common LOG = logging.getLogger(__name__) ACTION = 'volume:create' class ExtractSchedulerSpecTask(flow_utils.CinderTask): """Extracts a spec object from a partial and/or incomplete request spec. Reversion strategy: N/A """ default_provides = set(['request_spec']) def __init__(self, **kwargs): super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION], **kwargs) def _populate_request_spec(self, volume: objects.Volume, snapshot_id: Optional[str], image_id: Optional[str], backup_id: Optional[str]) -> dict[str, Any]: # Create the full request spec using the volume object. # # NOTE(dulek): At this point, a volume can be deleted before it gets # scheduled. If a delete API call is made, the volume gets instantly # delete and scheduling will fail when it tries to update the DB entry # (with the host) in ScheduleCreateVolumeTask below. volume_type_id = volume.volume_type_id vol_type = volume.volume_type return { 'volume_id': volume.id, 'snapshot_id': snapshot_id, 'image_id': image_id, 'backup_id': backup_id, 'volume_properties': { 'size': utils.as_int(volume.size, quiet=False), 'availability_zone': volume.availability_zone, 'volume_type_id': volume_type_id, }, 'volume_type': list(dict(vol_type).items()), } def execute(self, context: context.RequestContext, request_spec: Optional[dict], volume: objects.Volume, snapshot_id: Optional[str], image_id: Optional[str], backup_id: Optional[str]) -> dict[str, Any]: # For RPC version < 1.2 backward compatibility if request_spec is None: request_spec = self._populate_request_spec(volume, snapshot_id, image_id, backup_id) return { 'request_spec': request_spec, } class ScheduleCreateVolumeTask(flow_utils.CinderTask): """Activates a scheduler driver and handles any subsequent failures. Notification strategy: on failure the scheduler rpc notifier will be activated and a notification will be emitted indicating what errored, the reason, and the request (and misc. other data) that caused the error to be triggered. Reversion strategy: N/A """ FAILURE_TOPIC = "scheduler.create_volume" def __init__(self, driver_api, **kwargs): super(ScheduleCreateVolumeTask, self).__init__(addons=[ACTION], **kwargs) self.driver_api = driver_api self.message_api = message_api.API() def _handle_failure(self, context: context.RequestContext, request_spec: dict, cause: Exception) -> None: try: self._notify_failure(context, request_spec, cause) finally: LOG.error("Failed to run task %(name)s: %(cause)s", {'cause': cause, 'name': self.name}) @utils.if_notifications_enabled def _notify_failure(self, context: context.RequestContext, request_spec: dict, cause: Exception) -> None: """When scheduling fails send out an event that it failed.""" payload = { 'request_spec': request_spec, 'volume_properties': request_spec.get('volume_properties', {}), 'volume_id': request_spec['volume_id'], 'state': 'error', 'method': 'create_volume', 'reason': cause, } try: rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC, payload) except exception.CinderException: LOG.exception("Failed notifying on %(topic)s " "payload %(payload)s", {'topic': self.FAILURE_TOPIC, 'payload': payload}) def execute(self, context: context.RequestContext, request_spec: dict, filter_properties: dict, volume: objects.Volume) -> None: try: self.driver_api.schedule_create_volume(context, request_spec, filter_properties) except Exception as e: self.message_api.create( context, message_field.Action.SCHEDULE_ALLOCATE_VOLUME, resource_uuid=request_spec['volume_id'], exception=e) # An error happened, notify on the scheduler queue and log that # this happened and set the volume to errored out and reraise the # error *if* exception caught isn't NoValidBackend. Otherwise *do # not* reraise (since what's the point?) with excutils.save_and_reraise_exception( reraise=not isinstance(e, exception.NoValidBackend)): try: self._handle_failure(context, request_spec, e) finally: common.error_out(volume, reason=e) def get_flow(context: context.RequestContext, driver_api, request_spec: Optional[dict] = None, filter_properties: Optional[dict] = None, volume: Optional[objects.Volume] = None, snapshot_id: Optional[str] = None, image_id: Optional[str] = None, backup_id: Optional[str] = None) -> taskflow.engines.base.Engine: """Constructs and returns the scheduler entrypoint flow. This flow will do the following: 1. Inject keys & values for dependent tasks. 2. Extract a scheduler specification from the provided inputs. 3. Use provided scheduler driver to select host and pass volume creation request further. """ create_what = { 'context': context, 'raw_request_spec': request_spec, 'filter_properties': filter_properties, 'volume': volume, 'snapshot_id': snapshot_id, 'image_id': image_id, 'backup_id': backup_id, } flow_name = ACTION.replace(":", "_") + "_scheduler" scheduler_flow = linear_flow.Flow(flow_name) # This will extract and clean the spec from the starting values. scheduler_flow.add(ExtractSchedulerSpecTask( rebind={'request_spec': 'raw_request_spec'})) # This will activate the desired scheduler driver (and handle any # driver related failures appropriately). scheduler_flow.add(ScheduleCreateVolumeTask(driver_api)) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(scheduler_flow, store=create_what) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/host_manager.py0000664000175000017500000012725400000000000021345 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Manage backends in the current zone.""" from collections import abc import random import typing from typing import (Any, Iterable, Optional, Type, Union) from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils from cinder.common import constants from cinder import context as cinder_context from cinder import exception from cinder import objects from cinder.scheduler import filters from cinder import utils from cinder.volume import volume_types from cinder.volume import volume_utils # FIXME: This file should be renamed to backend_manager, we should also rename # HostManager class, and scheduler_host_manager option, and also the weight # classes, and add code to maintain backward compatibility. host_manager_opts = [ cfg.ListOpt('scheduler_default_filters', default=[ 'AvailabilityZoneFilter', 'CapacityFilter', 'CapabilitiesFilter' ], help='Which filter class names to use for filtering hosts ' 'when not specified in the request.'), cfg.ListOpt('scheduler_default_weighers', default=[ 'CapacityWeigher' ], help='Which weigher class names to use for weighing hosts.'), cfg.StrOpt('scheduler_weight_handler', default='cinder.scheduler.weights.OrderedHostWeightHandler', help='Which handler to use for selecting the host/pool ' 'after weighing'), ] CONF = cfg.CONF CONF.register_opts(host_manager_opts) CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager') CONF.import_opt('max_over_subscription_ratio', 'cinder.volume.driver') LOG = logging.getLogger(__name__) class ReadOnlyDict(abc.Mapping): """A read-only dict.""" def __init__(self, source: Optional[Union[dict, 'ReadOnlyDict']] = None): self.data: dict if source is not None: self.data = dict(source) else: self.data = {} def __getitem__(self, key): return self.data[key] def __iter__(self): return iter(self.data) def __len__(self) -> int: return len(self.data) def __repr__(self) -> str: return '%s(%r)' % (self.__class__.__name__, self.data) class BackendState(object): """Mutable and immutable information tracked for a volume backend.""" def __init__(self, host: str, cluster_name: Optional[str], capabilities: Union[Optional[ReadOnlyDict], Optional[dict]] = None, service=None): # NOTE(geguileo): We have a circular dependency between BackendState # and PoolState and we resolve it with an instance attribute instead # of a class attribute that we would assign after the PoolState # declaration because this way we avoid splitting the code. self.pool_state_cls: Type[PoolState] = PoolState self.capabilities: Optional[ReadOnlyDict] = None self.service: Optional[ReadOnlyDict] = None self.host: str = host self.cluster_name: Optional[str] = cluster_name self.update_capabilities(capabilities, service) self.volume_backend_name = None self.vendor_name = None self.driver_version: Optional[int] = 0 self.storage_protocol = None self.QoS_support = False # Mutable available resources. # These will change as resources are virtually "consumed". self.total_capacity_gb = 0 # capacity has been allocated in cinder POV, which should be # sum(vol['size'] for vol in vols_on_hosts) self.allocated_capacity_gb = 0 self.free_capacity_gb = None self.reserved_percentage = 0 # The apparent allocated space indicating how much capacity # has been provisioned. This could be the sum of sizes of # all volumes on a backend, which could be greater than or # equal to the allocated_capacity_gb. self.provisioned_capacity_gb = 0 self.max_over_subscription_ratio = 1.0 self.thin_provisioning_support = False self.thick_provisioning_support = False # Does this backend support attaching a volume to more than # one host/instance? self.multiattach: bool = False self.filter_function = None self.goodness_function = 0 # PoolState for all pools self.pools: dict = {} self.updated = None @property def backend_id(self) -> str: return self.cluster_name or self.host def update_capabilities( self, capabilities: Optional[Union[dict, ReadOnlyDict]] = None, service: Optional[dict] = None) -> None: # Read-only capability dicts if capabilities is None: capabilities = {} self.capabilities = ReadOnlyDict(capabilities) if service is None: service = {} self.service = ReadOnlyDict(service) def update_from_volume_capability(self, capability: dict[str, Any], service=None) -> None: """Update information about a host from its volume_node info. 'capability' is the status info reported by volume backend, a typical capability looks like this: .. code-block:: python { capability = { 'volume_backend_name': 'Local iSCSI', # 'vendor_name': 'OpenStack', # backend level 'driver_version': '1.0', # mandatory/fixed 'storage_protocol': 'iSCSI', # stats&capabilities 'active_volumes': 10, # 'IOPS_provisioned': 30000, # optional custom 'fancy_capability_1': 'eat', # stats & capabilities 'fancy_capability_2': 'drink', # 'pools': [ {'pool_name': '1st pool', # 'total_capacity_gb': 500, # mandatory stats for 'free_capacity_gb': 230, # pools 'allocated_capacity_gb': 270, # 'QoS_support': 'False', # 'reserved_percentage': 0, # 'dying_disks': 100, # 'super_hero_1': 'spider-man', # optional custom 'super_hero_2': 'flash', # stats & capabilities 'super_hero_3': 'neoncat' # }, {'pool_name': '2nd pool', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'QoS_support': 'False', 'reserved_percentage': 0, 'dying_disks': 200, 'super_hero_1': 'superman', 'super_hero_2': ' ', 'super_hero_2': 'Hulk' } ] } } """ self.update_capabilities(capability, service) if capability: if self.updated and self.updated > capability['timestamp']: return # Update backend level info self.update_backend(capability) # Update pool level info self.update_pools(capability, service) def update_pools(self, capability: Optional[dict], service) -> None: """Update storage pools information from backend reported info.""" if not capability: return pools = capability.get('pools', None) active_pools = set() if pools and isinstance(pools, list): # Update all pools stats according to information from list # of pools in volume capacity for pool_cap in pools: pool_name = pool_cap['pool_name'] self._append_backend_info(pool_cap) cur_pool = self.pools.get(pool_name, None) if not cur_pool: # Add new pool cur_pool = self.pool_state_cls(self.host, self.cluster_name, pool_cap, pool_name) self.pools[pool_name] = cur_pool cur_pool.update_from_volume_capability(pool_cap, service) active_pools.add(pool_name) elif pools is None: # To handle legacy driver that doesn't report pool # information in the capability, we have to prepare # a pool from backend level info, or to update the one # we created in self.pools. pool_name = self.volume_backend_name if pool_name is None: # To get DEFAULT_POOL_NAME pool_name = volume_utils.extract_host(self.host, 'pool', True) pool_name = typing.cast(str, pool_name) if len(self.pools) == 0: # No pool was there single_pool = self.pool_state_cls(self.host, self.cluster_name, capability, pool_name) self._append_backend_info(capability) self.pools[pool_name] = single_pool else: # this is an update from legacy driver try: single_pool = self.pools[pool_name] except KeyError: single_pool = self.pool_state_cls(self.host, self.cluster_name, capability, pool_name) self._append_backend_info(capability) self.pools[pool_name] = single_pool single_pool.update_from_volume_capability(capability, service) active_pools.add(pool_name) # remove non-active pools from self.pools nonactive_pools = set(self.pools.keys()) - active_pools for pool in nonactive_pools: LOG.debug("Removing non-active pool %(pool)s @ %(host)s " "from scheduler cache.", {'pool': pool, 'host': self.host}) del self.pools[pool] def _append_backend_info(self, pool_cap: dict[str, Any]) -> None: # Fill backend level info to pool if needed. if not pool_cap.get('volume_backend_name', None): pool_cap['volume_backend_name'] = self.volume_backend_name protocol = pool_cap.get('storage_protocol', None) if protocol: # Protocols that have variants are replaced with ALL the variants storage_protocol = self.get_storage_protocol_variants(protocol) else: # Backend protocol has already been transformed with variants storage_protocol = self.storage_protocol pool_cap['storage_protocol'] = storage_protocol if not pool_cap.get('vendor_name', None): pool_cap['vendor_name'] = self.vendor_name if not pool_cap.get('driver_version', None): pool_cap['driver_version'] = self.driver_version if not pool_cap.get('timestamp', None): pool_cap['timestamp'] = self.updated self.capabilities = typing.cast(ReadOnlyDict, self.capabilities) if ('filter_function' not in pool_cap and 'filter_function' in self.capabilities): pool_cap['filter_function'] = self.capabilities['filter_function'] if ('goodness_function' not in pool_cap and 'goodness_function' in self.capabilities): pool_cap['goodness_function'] = ( self.capabilities['goodness_function']) def update_backend(self, capability: dict) -> None: self.volume_backend_name = capability.get('volume_backend_name', None) self.vendor_name = capability.get('vendor_name', None) self.driver_version = capability.get('driver_version', None) # Protocols that have variants are replaced with ALL the variants protocol = capability.get('storage_protocol', None) self.storage_protocol = self.get_storage_protocol_variants(protocol) if 'storage_protocol' in capability: capability['storage_protocol'] = self.storage_protocol self.updated = capability['timestamp'] def consume_from_volume(self, volume: objects.Volume, update_time: bool = True) -> None: """Incrementally update host state from a volume.""" volume_gb = volume['size'] self.allocated_capacity_gb += volume_gb self.provisioned_capacity_gb += volume_gb if self.free_capacity_gb == 'infinite': # There's virtually infinite space on back-end pass elif self.free_capacity_gb == 'unknown': # Unable to determine the actual free space on back-end pass else: self.free_capacity_gb -= volume_gb if update_time: self.updated = timeutils.utcnow() LOG.debug("Consumed %s GB from backend: %s", volume['size'], self) def __repr__(self) -> str: # FIXME(zhiteng) backend level free_capacity_gb isn't as # meaningful as it used to be before pool is introduced, we'd # come up with better representation of HostState. grouping = 'cluster' if self.cluster_name else 'host' grouping_name = self.backend_id return ("%(grouping)s '%(grouping_name)s': " "free_capacity_gb: %(free_capacity_gb)s, " "total_capacity_gb: %(total_capacity_gb)s, " "allocated_capacity_gb: %(allocated_capacity_gb)s, " "max_over_subscription_ratio: %(mosr)s, " "reserved_percentage: %(reserved_percentage)s, " "provisioned_capacity_gb: %(provisioned_capacity_gb)s, " "thin_provisioning_support: %(thin_provisioning_support)s, " "thick_provisioning_support: %(thick)s, " "pools: %(pools)s, " "updated at: %(updated)s" % {'grouping': grouping, 'grouping_name': grouping_name, 'free_capacity_gb': self.free_capacity_gb, 'total_capacity_gb': self.total_capacity_gb, 'allocated_capacity_gb': self.allocated_capacity_gb, 'mosr': self.max_over_subscription_ratio, 'reserved_percentage': self.reserved_percentage, 'provisioned_capacity_gb': self.provisioned_capacity_gb, 'thin_provisioning_support': self.thin_provisioning_support, 'thick': self.thick_provisioning_support, 'pools': self.pools, 'updated': self.updated}) @staticmethod def get_storage_protocol_variants(storage_protocol): if storage_protocol in constants.ISCSI_VARIANTS: return constants.ISCSI_VARIANTS if storage_protocol in constants.FC_VARIANTS: return constants.FC_VARIANTS if storage_protocol in constants.NFS_VARIANTS: return constants.NFS_VARIANTS if storage_protocol in constants.NVMEOF_VARIANTS: return constants.NVMEOF_VARIANTS return storage_protocol class PoolState(BackendState): def __init__(self, host: str, cluster_name: Optional[str], capabilities: Union[Optional[ReadOnlyDict], Optional[dict]], pool_name: str): new_host = volume_utils.append_host(host, pool_name) assert new_host is not None new_cluster = volume_utils.append_host(cluster_name, pool_name) super(PoolState, self).__init__(new_host, new_cluster, capabilities) self.pool_name = pool_name # No pools in pool self.pools: dict = {} def update_from_volume_capability(self, capability: dict[str, Any], service=None) -> None: """Update information about a pool from its volume_node info.""" LOG.debug("Updating capabilities for %s: %s", self.host, capability) self.update_capabilities(capability, service) if capability: if self.updated and self.updated > capability['timestamp']: return self.update_backend(capability) self.total_capacity_gb = capability.get('total_capacity_gb', 0) self.free_capacity_gb = capability.get('free_capacity_gb', 0) self.allocated_capacity_gb = capability.get( 'allocated_capacity_gb', 0) self.QoS_support = capability.get('QoS_support', False) self.reserved_percentage = capability.get('reserved_percentage', 0) # provisioned_capacity_gb is the apparent total capacity of # all the volumes created on a backend, which is greater than # or equal to allocated_capacity_gb, which is the apparent # total capacity of all the volumes created on a backend # in Cinder. Using allocated_capacity_gb as the default of # provisioned_capacity_gb if it is not set. self.provisioned_capacity_gb = capability.get( 'provisioned_capacity_gb', self.allocated_capacity_gb) self.thin_provisioning_support = capability.get( 'thin_provisioning_support', False) self.thick_provisioning_support = capability.get( 'thick_provisioning_support', False) self.max_over_subscription_ratio = ( utils.calculate_max_over_subscription_ratio( capability, CONF.max_over_subscription_ratio)) self.multiattach = capability.get('multiattach', False) self.filter_function = capability.get('filter_function', None) self.goodness_function = capability.get('goodness_function', 0) @typing.no_type_check def update_pools(self, capability): # Do nothing, since we don't have pools within pool, yet pass class HostManager(object): """Base HostManager class.""" backend_state_cls = BackendState ALLOWED_SERVICE_NAMES = ('volume', 'backup') REQUIRED_KEYS = frozenset([ 'pool_name', 'total_capacity_gb', 'free_capacity_gb', 'allocated_capacity_gb', 'provisioned_capacity_gb', 'thin_provisioning_support', 'thick_provisioning_support', 'max_over_subscription_ratio', 'reserved_percentage']) def __init__(self): self.service_states = {} # { : {: {cap k : v}}} self.backend_state_map: dict[str, BackendState] = {} self.backup_service_states = {} self.filter_handler = filters.BackendFilterHandler('cinder.scheduler.' 'filters') self.filter_classes = self.filter_handler.get_all_classes() self.enabled_filters = self._choose_backend_filters( CONF.scheduler_default_filters) self.weight_handler = importutils.import_object( CONF.scheduler_weight_handler, 'cinder.scheduler.weights') self.weight_classes = self.weight_handler.get_all_classes() self._no_capabilities_backends = set() # Services without capabilities self._update_backend_state_map(cinder_context.get_admin_context()) self.service_states_last_update = {} def _choose_backend_filters(self, filter_cls_names) -> list: """Return a list of available filter names. This function checks input filter names against a predefined set of acceptable filters (all loaded filters). If input is None, it uses CONF.scheduler_default_filters instead. """ if not isinstance(filter_cls_names, (list, tuple)): filter_cls_names = [filter_cls_names] good_filters = [] bad_filters = [] for filter_name in filter_cls_names: found_class = False for cls in self.filter_classes: if cls.__name__ == filter_name: found_class = True good_filters.append(cls) break if not found_class: bad_filters.append(filter_name) if bad_filters: raise exception.SchedulerHostFilterNotFound( filter_name=", ".join(bad_filters)) return good_filters def _choose_backend_weighers( self, weight_cls_names: Optional[list[str]]) -> list: """Return a list of available weigher names. This function checks input weigher names against a predefined set of acceptable weighers (all loaded weighers). If input is None, it uses CONF.scheduler_default_weighers instead. """ if weight_cls_names is None: weight_cls_names = CONF.scheduler_default_weighers if not isinstance(weight_cls_names, (list, tuple)): weight_cls_names = [weight_cls_names] good_weighers = [] bad_weighers = [] for weigher_name in weight_cls_names: found_class = False for cls in self.weight_classes: if cls.__name__ == weigher_name: good_weighers.append(cls) found_class = True break if not found_class: bad_weighers.append(weigher_name) if bad_weighers: raise exception.SchedulerHostWeigherNotFound( weigher_name=", ".join(bad_weighers)) return good_weighers def get_filtered_backends(self, backends, filter_properties, filter_class_names=None): """Filter backends and return only ones passing all filters.""" if filter_class_names is not None: filter_classes = self._choose_backend_filters(filter_class_names) else: filter_classes = self.enabled_filters return self.filter_handler.get_filtered_objects(filter_classes, backends, filter_properties) def get_weighed_backends(self, backends, weight_properties, weigher_class_names=None) -> list: """Weigh the backends.""" weigher_classes = self._choose_backend_weighers(weigher_class_names) weighed_backends = self.weight_handler.get_weighed_objects( weigher_classes, backends, weight_properties) LOG.debug("Weighed %s", weighed_backends) return weighed_backends def update_service_capabilities(self, service_name: str, host: str, capabilities: dict, cluster_name: Optional[str], timestamp) -> None: """Update the per-service capabilities based on this notification.""" if service_name not in HostManager.ALLOWED_SERVICE_NAMES: LOG.debug('Ignoring %(service_name)s service update ' 'from %(host)s', {'service_name': service_name, 'host': host}) return # Determine whether HostManager has just completed initialization, and # has not received the rpc message returned by volume. just_init = self._is_just_initialized() # TODO(geguileo): In P - Remove the next line since we receive the # timestamp timestamp = timestamp or timeutils.utcnow() # Copy the capabilities, so we don't modify the original dict capab_copy = dict(capabilities) capab_copy["timestamp"] = timestamp # Set the default capabilities in case None is set. backend = cluster_name or host if service_name == 'backup': self.backup_service_states[backend] = capabilities LOG.debug("Received %(service_name)s service update from " "%(host)s: %(cap)s", {'service_name': service_name, 'host': host, 'cap': capabilities}) return capab_old = self.service_states.get(backend, {"timestamp": 0}) capab_last_update = self.service_states_last_update.get( backend, {"timestamp": 0}) # Ignore older updates if capab_old['timestamp'] and timestamp < capab_old['timestamp']: LOG.info('Ignoring old capability report from %s.', backend) return # If the capabilities are not changed and the timestamp is older, # record the capabilities. # There are cases: capab_old has the capabilities set, # but the timestamp may be None in it. So does capab_last_update. if (not self._get_updated_pools(capab_old, capab_copy)) and ( (not capab_old.get("timestamp")) or (not capab_last_update.get("timestamp")) or (capab_last_update["timestamp"] < capab_old["timestamp"])): self.service_states_last_update[backend] = capab_old self.service_states[backend] = capab_copy cluster_msg = (('Cluster: %s - Host: ' % cluster_name) if cluster_name else '') LOG.debug("Received %(service_name)s service update from %(cluster)s " "%(host)s: %(cap)s%(cluster)s", {'service_name': service_name, 'host': host, 'cap': capabilities, 'cluster': cluster_msg}) self._no_capabilities_backends.discard(backend) if just_init: self._update_backend_state_map(cinder_context.get_admin_context()) def notify_service_capabilities(self, service_name, backend, capabilities, timestamp): """Notify the ceilometer with updated volume stats""" if service_name != 'volume': return updated = [] capa_new = self.service_states.get(backend, {}) timestamp = timestamp or timeutils.utcnow() # Compare the capabilities and timestamps to decide notifying if not capa_new: updated = self._get_updated_pools(capa_new, capabilities) else: if timestamp > self.service_states[backend]["timestamp"]: updated = self._get_updated_pools( self.service_states[backend], capabilities) if not updated: updated = self._get_updated_pools( self.service_states_last_update.get(backend, {}), self.service_states.get(backend, {})) if updated: capab_copy = dict(capabilities) capab_copy["timestamp"] = timestamp # If capabilities changes, notify and record the capabilities. self.service_states_last_update[backend] = capab_copy self.get_usage_and_notify(capabilities, updated, backend, timestamp) def has_all_capabilities(self) -> bool: return len(self._no_capabilities_backends) == 0 def _is_just_initialized(self) -> bool: return not self.service_states_last_update def first_receive_capabilities(self) -> bool: return (not self._is_just_initialized() and len(set(self.backend_state_map)) > 0 and len(self._no_capabilities_backends) == 0) def _update_backend_state_map( self, context: cinder_context.RequestContext) -> None: # Get resource usage across the available volume nodes: topic = constants.VOLUME_TOPIC volume_services = objects.ServiceList.get_all(context, {'topic': topic, 'disabled': False, 'frozen': False}) active_backends = set() active_hosts = set() no_capabilities_backends = set() for service in volume_services.objects: host = service.host if not service.is_up: LOG.warning("volume service is down. (host: %s)", host) continue backend_key = service.service_topic_queue # We only pay attention to the first up service of a cluster since # they all refer to the same capabilities entry in service_states if backend_key in active_backends: active_hosts.add(host) continue # Capabilities may come from the cluster or the host if the service # has just been converted to a cluster service. capabilities = (self.service_states.get(service.cluster_name, None) or self.service_states.get(service.host, None)) if capabilities is None: no_capabilities_backends.add(backend_key) continue # Since the service could have been added or remove from a cluster backend_state = self.backend_state_map.get(backend_key, None) if not backend_state: backend_state = self.backend_state_cls( host, service.cluster_name, capabilities=capabilities, service=dict(service)) self.backend_state_map[backend_key] = backend_state # update capabilities and attributes in backend_state backend_state.update_from_volume_capability(capabilities, service=dict(service)) active_backends.add(backend_key) self._no_capabilities_backends = no_capabilities_backends # remove non-active keys from backend_state_map inactive_backend_keys = set(self.backend_state_map) - active_backends for backend_key in inactive_backend_keys: # NOTE(geguileo): We don't want to log the removal of a host from # the map when we are removing it because it has been added to a # cluster. if backend_key not in active_hosts: LOG.info("Removing non-active backend: %(backend)s from " "scheduler cache.", {'backend': backend_key}) del self.backend_state_map[backend_key] def revert_volume_consumed_capacity(self, pool_name: str, size: int) -> None: for backend_key, state in self.backend_state_map.items(): for key in state.pools: pool_state = state.pools[key] if pool_name == '#'.join([backend_key, pool_state.pool_name]): pool_state.consume_from_volume({'size': -size}, update_time=False) def get_all_backend_states( self, context: cinder_context.RequestContext) -> Iterable: """Returns a dict of all the backends the HostManager knows about. Each of the consumable resources in BackendState are populated with capabilities scheduler received from RPC. For example: {'192.168.1.100': BackendState(), ...} """ self._update_backend_state_map(context) # build a pool_state map and return that map instead of # backend_state_map all_pools = {} for backend_key, state in self.backend_state_map.items(): for key in state.pools: pool = state.pools[key] # use backend_key.pool_name to make sure key is unique pool_key = '.'.join([backend_key, pool.pool_name]) all_pools[pool_key] = pool return all_pools.values() def _filter_pools_by_volume_type( self, context: cinder_context.RequestContext, volume_type: objects.VolumeType, pools: dict) -> dict: """Return the pools filtered by volume type specs""" # wrap filter properties only with volume_type filter_properties = { 'context': context, 'volume_type': volume_type, 'resource_type': volume_type, 'qos_specs': volume_type.get('qos_specs'), } filtered = self.get_filtered_backends(pools.values(), filter_properties) # filter the pools by value return {k: v for k, v in pools.items() if v in filtered} def get_pools(self, context: cinder_context.RequestContext, filters: Optional[dict] = None) -> list[dict]: """Returns a dict of all pools on all hosts HostManager knows about.""" self._update_backend_state_map(context) all_pools = {} name = volume_type = None if filters: name = filters.pop('name', None) volume_type = filters.pop('volume_type', None) for backend_key, state in self.backend_state_map.items(): for key in state.pools: filtered = False pool = state.pools[key] # use backend_key.pool_name to make sure key is unique pool_key = volume_utils.append_host(backend_key, pool.pool_name) new_pool = dict(name=pool_key) new_pool.update(dict(capabilities=pool.capabilities)) if name and new_pool.get('name') != name: continue if filters: # filter all other items in capabilities for (attr, value) in filters.items(): cap = new_pool.get('capabilities').\ get(attr) # type: ignore if not self._equal_after_convert(cap, value): filtered = True break if not filtered: all_pools[pool_key] = pool # filter pools by volume type if volume_type: volume_type = volume_types.get_by_name_or_id( context, volume_type) all_pools = ( self._filter_pools_by_volume_type(context, volume_type, all_pools)) # encapsulate pools in format:{name: XXX, capabilities: XXX} return [dict(name=key, capabilities=value.capabilities) for key, value in all_pools.items()] def get_usage_and_notify(self, capa_new: dict, updated_pools: Iterable[dict], host: str, timestamp) -> None: context = cinder_context.get_admin_context() usage = self._get_usage(capa_new, updated_pools, host, timestamp) self._notify_capacity_usage(context, usage) def _get_usage(self, capa_new: dict, updated_pools: Iterable[dict], host: str, timestamp) -> list[dict]: pools = capa_new.get('pools') usage = [] if pools and isinstance(pools, list): backend_usage = dict(type='backend', name_to_id=host, total=0, free=0, allocated=0, provisioned=0, virtual_free=0, reported_at=timestamp) # Process the usage. for pool in pools: pool_usage = self._get_pool_usage(pool, host, timestamp) if pool_usage: backend_usage["total"] += pool_usage["total"] backend_usage["free"] += pool_usage["free"] backend_usage["allocated"] += pool_usage["allocated"] backend_usage["provisioned"] += pool_usage["provisioned"] backend_usage["virtual_free"] += pool_usage["virtual_free"] # Only the updated pool is reported. if pool in updated_pools: usage.append(pool_usage) usage.append(backend_usage) return usage def _get_pool_usage(self, pool: dict, host: str, timestamp) -> dict[str, Any]: total = pool["total_capacity_gb"] free = pool["free_capacity_gb"] unknowns = ["unknown", "infinite", None] if (total in unknowns) or (free in unknowns): return {} allocated = pool["allocated_capacity_gb"] provisioned = pool["provisioned_capacity_gb"] reserved = pool["reserved_percentage"] ratio = utils.calculate_max_over_subscription_ratio( pool, CONF.max_over_subscription_ratio) support = pool["thin_provisioning_support"] virtual_free = utils.calculate_virtual_free_capacity( total, free, provisioned, support, ratio, reserved, support) pool_usage = dict( type='pool', name_to_id='#'.join([host, pool['pool_name']]), total=float(total), free=float(free), allocated=float(allocated), provisioned=float(provisioned), virtual_free=float(virtual_free), reported_at=timestamp) return pool_usage def _get_updated_pools(self, old_capa: dict, new_capa: dict) -> list: # Judge if the capabilities should be reported. new_pools = new_capa.get('pools', []) if not new_pools: return [] if isinstance(new_pools, list): # If the volume_stats is not well prepared, don't notify. if not all( self.REQUIRED_KEYS.issubset(pool) for pool in new_pools): return [] else: LOG.debug("The reported capabilities are not well structured...") return [] old_pools = old_capa.get('pools', []) if not old_pools: return new_pools updated_pools = [] newpools = {} oldpools = {} for new_pool in new_pools: newpools[new_pool['pool_name']] = new_pool for old_pool in old_pools: oldpools[old_pool['pool_name']] = old_pool for key in newpools: if key in oldpools.keys(): for k in self.REQUIRED_KEYS: if newpools[key][k] != oldpools[key][k]: updated_pools.append(newpools[key]) break else: updated_pools.append(newpools[key]) return updated_pools def _notify_capacity_usage(self, context: cinder_context.RequestContext, usage: list[dict]) -> None: if usage: for u in usage: volume_utils.notify_about_capacity_usage( context, u, u['type'], None, None) LOG.debug("Publish storage capacity: %s.", usage) def _equal_after_convert(self, capability, value) -> bool: if isinstance(value, type(capability)) or capability is None: return value == capability if isinstance(capability, bool): return capability == strutils.bool_from_string(value) # We can not check or convert value parameter's type in # anywhere else. # If the capability and value are not in the same type, # we just convert them into string to compare them. return str(value) == str(capability) def get_az(self, volume: objects.Volume, availability_zone: Union[str, None]) -> Union[str, None]: if availability_zone: az = availability_zone elif volume: az = volume.availability_zone else: az = None return az def get_backup_host(self, volume: objects.Volume, availability_zone: Union[str, None], driver=None) -> str: if volume: volume_host = volume_utils.extract_host(volume.host, 'host') else: volume_host = None az = self.get_az(volume, availability_zone) return self._get_available_backup_service_host(volume_host, az, driver) def _get_any_available_backup_service(self, availability_zone, driver=None): """Get an available backup service host. Get an available backup service host in the specified availability zone. """ services = [srv for srv in self._list_backup_services( availability_zone, driver)] random.shuffle(services) return services[0] if services else None def _get_available_backup_service_host(self, host, az, driver=None) -> str: """Return an appropriate backup service host.""" backup_host = None if not host or not CONF.backup_use_same_host: backup_host = self._get_any_available_backup_service(az, driver) elif self._is_backup_service_enabled(az, host): backup_host = host if not backup_host: raise exception.ServiceNotFound(service_id='cinder-backup') return backup_host def _list_backup_services(self, availability_zone, driver=None): """List all enabled backup services. :returns: list -- hosts for services that are enabled for backup. """ services = [] def _is_good_service(cap, driver, az) -> bool: if driver is None and az is None: return True match_driver = cap['driver_name'] == driver if driver else True if match_driver: if not az: return True return cap['availability_zone'] == az return False for backend, capabilities in self.backup_service_states.items(): if capabilities['backend_state']: if _is_good_service(capabilities, driver, availability_zone): services.append(backend) return services def _az_matched(self, service: objects.Service, availability_zone: Optional[str]) -> bool: return ((not availability_zone) or service.availability_zone == availability_zone) def _is_backup_service_enabled(self, availability_zone: str, host: str) -> bool: """Check if there is a backup service available.""" topic = constants.BACKUP_TOPIC ctxt = cinder_context.get_admin_context() services = objects.ServiceList.get_all_by_topic( ctxt, topic, disabled=False) for srv in services: if (self._az_matched(srv, availability_zone) and srv.host == host and srv.is_up): return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/manager.py0000664000175000017500000007006500000000000020305 0ustar00zuulzuul00000000000000# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler Service """ import collections from datetime import datetime import functools import eventlet from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import versionutils from cinder.backup import rpcapi as backup_rpcapi from cinder import context from cinder import db from cinder import exception from cinder import flow_utils from cinder.i18n import _ from cinder import manager from cinder.message import api as mess_api from cinder.message import message_field from cinder import objects from cinder.objects import fields from cinder import quota from cinder import rpc from cinder.scheduler.flows import create_volume from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import volume_utils as vol_utils scheduler_manager_opts = [ cfg.StrOpt('scheduler_driver', default='cinder.scheduler.filter_scheduler.' 'FilterScheduler', help='Default scheduler driver to use'), cfg.IntOpt('scheduler_driver_init_wait_time', default=60, min=1, help='Maximum time in seconds to wait for the driver to ' 'report as ready'), ] CONF = cfg.CONF CONF.register_opts(scheduler_manager_opts) QUOTAS = quota.QUOTAS LOG = logging.getLogger(__name__) def append_operation_type(name=None): def _decorator(schedule_function): @functools.wraps(schedule_function) def inject_operation_decorator(*args, **kwargs): request_spec = kwargs.get('request_spec', None) request_spec_list = kwargs.get('request_spec_list', None) if request_spec: request_spec['operation'] = name or schedule_function.__name__ if request_spec_list: for rs in request_spec_list: rs['operation'] = name or schedule_function.__name__ return schedule_function(*args, **kwargs) return inject_operation_decorator return _decorator class SchedulerManager(manager.CleanableManager, manager.Manager): """Chooses a host to create volumes.""" RPC_API_VERSION = scheduler_rpcapi.SchedulerAPI.RPC_API_VERSION target = messaging.Target(version=RPC_API_VERSION) def __init__(self, scheduler_driver=None, service_name=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver self.driver = importutils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs) self._startup_delay = True self.backup_api = backup_rpcapi.BackupAPI() self.volume_api = volume_rpcapi.VolumeAPI() self.sch_api = scheduler_rpcapi.SchedulerAPI() self.message_api = mess_api.API() self.rpc_api_version = versionutils.convert_version_to_int( self.RPC_API_VERSION) def init_host_with_rpc(self): ctxt = context.get_admin_context() self.request_service_capabilities(ctxt) for __ in range(CONF.scheduler_driver_init_wait_time): if self.driver.is_first_receive(): break eventlet.sleep(1) self._startup_delay = False def reset(self): super(SchedulerManager, self).reset() self.volume_api = volume_rpcapi.VolumeAPI() self.sch_api = scheduler_rpcapi.SchedulerAPI() self.driver.reset() @periodic_task.periodic_task(spacing=CONF.message_reap_interval, run_immediately=True) def _clean_expired_messages(self, context): self.message_api.cleanup_expired_messages(context) @periodic_task.periodic_task(spacing=CONF.reservation_clean_interval, run_immediately=True) def _clean_expired_reservation(self, context): QUOTAS.expire(context) def update_service_capabilities(self, context, service_name=None, host=None, capabilities=None, cluster_name=None, timestamp=None, **kwargs): """Process a capability update from a service node.""" if capabilities is None: capabilities = {} # If we received the timestamp we have to deserialize it elif timestamp: timestamp = datetime.strptime(timestamp, timeutils.PERFECT_TIME_FORMAT) self.driver.update_service_capabilities(service_name, host, capabilities, cluster_name, timestamp) def notify_service_capabilities(self, context, service_name, capabilities, host=None, backend=None, timestamp=None): """Process a capability update from a service node.""" # TODO(geguileo): On v4 remove host field. if capabilities is None: capabilities = {} # If we received the timestamp we have to deserialize it elif timestamp: timestamp = datetime.strptime(timestamp, timeutils.PERFECT_TIME_FORMAT) backend = backend or host self.driver.notify_service_capabilities(service_name, backend, capabilities, timestamp) def _wait_for_scheduler(self): # NOTE(dulek): We're waiting for scheduler to announce that it's ready # or CONF.scheduler_driver_init_wait_time seconds from service startup # has passed. while self._startup_delay and not self.driver.is_ready(): eventlet.sleep(1) @append_operation_type() def create_group(self, context, group, group_spec=None, group_filter_properties=None, request_spec_list=None, filter_properties_list=None): self._wait_for_scheduler() try: self.driver.schedule_create_group( context, group, group_spec, request_spec_list, group_filter_properties, filter_properties_list) except exception.NoValidBackend: LOG.error("Could not find a backend for group " "%(group_id)s.", {'group_id': group.id}) group.status = fields.GroupStatus.ERROR group.save() except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Failed to create generic group " "%(group_id)s.", {'group_id': group.id}) group.status = fields.GroupStatus.ERROR group.save() @objects.Volume.set_workers @append_operation_type() def create_volume(self, context, volume, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, backup_id=None): self._wait_for_scheduler() try: flow_engine = create_volume.get_flow(context, self.driver, request_spec, filter_properties, volume, snapshot_id, image_id, backup_id) except Exception: msg = _("Failed to create scheduler manager volume flow") LOG.exception(msg) raise exception.CinderException(msg) with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() @append_operation_type() def create_snapshot(self, ctxt, volume, snapshot, backend, request_spec=None, filter_properties=None): """Create snapshot for a volume. The main purpose of this method is to check if target backend (of volume and snapshot) has sufficient capacity to host to-be-created snapshot. """ self._wait_for_scheduler() try: tgt_backend = self.driver.backend_passes_filters( ctxt, backend, request_spec, filter_properties) tgt_backend.consume_from_volume( {'size': request_spec['volume_properties']['size']}) except exception.NoValidBackend as ex: self._set_snapshot_state_and_notify('create_snapshot', snapshot, fields.SnapshotStatus.ERROR, ctxt, ex, request_spec) else: volume_rpcapi.VolumeAPI().create_snapshot(ctxt, volume, snapshot) def _do_cleanup(self, ctxt: context.RequestContext, vo_resource: 'objects.base.CinderObject'): # We can only receive cleanup requests for volumes, but we check anyway # We need to cleanup the volume status for cases where the scheduler # died while scheduling the volume creation. if (isinstance(vo_resource, objects.Volume) and vo_resource.status == 'creating'): vo_resource.status = 'error' vo_resource.save() def request_service_capabilities(self, context: context.RequestContext) -> None: volume_rpcapi.VolumeAPI().publish_service_capabilities(context) try: self.backup_api.publish_service_capabilities(context) except exception.ServiceTooOld as e: # cinder-backup has publish_service_capabilities starting Stein # release only. msg = ("Failed to notify about cinder-backup service " "capabilities for host %(host)s. This is normal " "during a live upgrade. Error: %(e)s") LOG.warning(msg, {'host': self.host, 'e': e}) @append_operation_type() def migrate_volume(self, context: context.RequestContext, volume: objects.Volume, backend: str, force_copy: bool, request_spec, filter_properties) -> None: """Ensure that the backend exists and can accept the volume.""" self._wait_for_scheduler() def _migrate_volume_set_error(self, context, ex, request_spec): if volume.status == 'maintenance': previous_status = ( volume.previous_status or 'maintenance') volume_state = {'volume_state': {'migration_status': 'error', 'status': previous_status}} else: volume_state = {'volume_state': {'migration_status': 'error'}} self._set_volume_state_and_notify('migrate_volume_to_host', volume_state, context, ex, request_spec) try: tgt_backend = self.driver.backend_passes_filters(context, backend, request_spec, filter_properties) except exception.NoValidBackend as ex: _migrate_volume_set_error(self, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): _migrate_volume_set_error(self, context, ex, request_spec) else: volume_rpcapi.VolumeAPI().migrate_volume(context, volume, tgt_backend, force_copy) # FIXME(geguileo): Remove this in v4.0 of RPC API. def migrate_volume_to_host(self, context, volume, host, force_host_copy, request_spec, filter_properties=None): return self.migrate_volume(context, volume, host, force_host_copy, request_spec, filter_properties) @append_operation_type(name='retype_volume') def retype(self, context, volume, request_spec, filter_properties=None): """Schedule the modification of a volume's type. :param context: the request context :param volume: the volume object to retype :param request_spec: parameters for this retype request :param filter_properties: parameters to filter by """ self._wait_for_scheduler() def _retype_volume_set_error(self, context, ex, request_spec, volume_ref, reservations, msg=None): if reservations: QUOTAS.rollback(context, reservations) previous_status = ( volume_ref.previous_status or volume_ref.status) volume_state = {'volume_state': {'status': previous_status}} self._set_volume_state_and_notify('retype', volume_state, context, ex, request_spec, msg) reservations = request_spec.get('quota_reservations') old_reservations = request_spec.get('old_reservations', None) new_type = request_spec.get('volume_type') if new_type is None: msg = _('New volume type not specified in request_spec.') ex = exception.ParameterNotFound(param='volume_type') _retype_volume_set_error(self, context, ex, request_spec, volume, reservations, msg) # Default migration policy is 'never' migration_policy = request_spec.get('migration_policy') if not migration_policy: migration_policy = 'never' try: tgt_backend = self.driver.find_retype_backend(context, request_spec, filter_properties, migration_policy) except Exception as ex: # Not having a valid host is an expected exception, so we don't # reraise on it. reraise = not isinstance(ex, exception.NoValidBackend) with excutils.save_and_reraise_exception(reraise=reraise): _retype_volume_set_error(self, context, ex, request_spec, volume, reservations) else: volume_rpcapi.VolumeAPI().retype(context, volume, new_type['id'], tgt_backend, migration_policy, reservations, old_reservations) @append_operation_type() def manage_existing(self, context, volume, request_spec, filter_properties=None): """Ensure that the host exists and can accept the volume.""" self._wait_for_scheduler() def _manage_existing_set_error(self, context, ex, request_spec): volume_state = {'volume_state': {'status': 'error_managing'}} self._set_volume_state_and_notify('manage_existing', volume_state, context, ex, request_spec) try: backend = self.driver.backend_passes_filters( context, volume.service_topic_queue, request_spec, filter_properties) # At the API we didn't have the pool info, so the volume DB entry # was created without it, now we add it. volume.host = backend.host volume.cluster_name = backend.cluster_name volume.save() except exception.NoValidBackend as ex: _manage_existing_set_error(self, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): _manage_existing_set_error(self, context, ex, request_spec) else: volume_rpcapi.VolumeAPI().manage_existing(context, volume, request_spec.get('ref')) @append_operation_type() def manage_existing_snapshot(self, context, volume, snapshot, ref, request_spec, filter_properties=None): """Ensure that the host exists and can accept the snapshot.""" self._wait_for_scheduler() try: backend = self.driver.backend_passes_filters( context, volume.service_topic_queue, request_spec, filter_properties) backend.consume_from_volume({'size': volume.size}) except exception.NoValidBackend as ex: self._set_snapshot_state_and_notify('manage_existing_snapshot', snapshot, fields.SnapshotStatus.ERROR, context, ex, request_spec) else: volume_rpcapi.VolumeAPI().manage_existing_snapshot( context, snapshot, ref, volume.service_topic_queue) def get_pools(self, context, filters=None): """Get active pools from scheduler's cache. NOTE(dulek): There's no self._wait_for_scheduler() because get_pools is an RPC call (is blocking for the c-api). Also this is admin-only API extension so it won't hurt the user much to retry the request manually. """ return self.driver.get_pools(context, filters) @append_operation_type(name='create_group') def validate_host_capacity(self, context, backend, request_spec, filter_properties): try: backend_state = self.driver.backend_passes_filters( context, backend, request_spec, filter_properties) backend_state.consume_from_volume( {'size': request_spec['volume_properties']['size']}) except exception.NoValidBackend: LOG.error("Desired host %(host)s does not have enough " "capacity.", {'host': backend}) return False return True @append_operation_type() def extend_volume(self, context, volume, new_size, reservations, request_spec=None, filter_properties=None): def _extend_volume_set_error(self, context, ex, request_spec): volume_state = {'volume_state': {'status': volume.previous_status, 'previous_status': None}} self._set_volume_state_and_notify('extend_volume', volume_state, context, ex, request_spec) if not filter_properties: filter_properties = {} filter_properties['new_size'] = new_size try: backend_state = self.driver.backend_passes_filters( context, volume.service_topic_queue, request_spec, filter_properties) backend_state.consume_from_volume( {'size': new_size - volume.size}) volume_rpcapi.VolumeAPI().extend_volume(context, volume, new_size, reservations) except exception.NoValidBackend as ex: QUOTAS.rollback(context, reservations, project_id=volume.project_id) _extend_volume_set_error(self, context, ex, request_spec) self.message_api.create( context, message_field.Action.EXTEND_VOLUME, resource_uuid=volume.id, exception=ex) def _set_volume_state_and_notify(self, method, updates, context, ex, request_spec, msg=None): # TODO(harlowja): move into a task that just does this later. if not msg: msg = ("Failed to schedule_%(method)s: %(ex)s" % {'method': method, 'ex': ex}) LOG.error(msg) volume_state = updates['volume_state'] properties = request_spec.get('volume_properties', {}) volume_id = request_spec.get('volume_id', None) if volume_id: db.volume_update(context, volume_id, volume_state) if volume_state.get('status') == 'error_managing': volume_state['status'] = 'error' payload = dict(request_spec=request_spec, volume_properties=properties, volume_id=volume_id, state=volume_state, method=method, reason=ex) rpc.get_notifier("scheduler").error(context, 'scheduler.' + method, payload) def _set_snapshot_state_and_notify(self, method, snapshot, state, context, ex, request_spec, msg=None): if not msg: msg = ("Failed to schedule_%(method)s: %(ex)s" % {'method': method, 'ex': ex}) LOG.error(msg) model_update = dict(status=state) snapshot.update(model_update) snapshot.save() payload = dict(request_spec=request_spec, snapshot_id=snapshot.id, state=state, method=method, reason=ex) rpc.get_notifier("scheduler").error(context, 'scheduler.' + method, payload) @property def upgrading_cloud(self): min_version_str = self.sch_api.determine_rpc_version_cap() min_version = versionutils.convert_version_to_int(min_version_str) return min_version < self.rpc_api_version def _cleanup_destination(self, clusters, service): """Determines the RPC method, destination service and name. The name is only used for logging, and it is the topic queue. """ # For the scheduler we don't have a specific destination, as any # scheduler will do and we know we are up, since we are running this # code. if service.binary == 'cinder-scheduler': cleanup_rpc = self.sch_api.do_cleanup dest = None dest_name = service.host else: cleanup_rpc = self.volume_api.do_cleanup # For clustered volume services we try to get info from the cache. if service.is_clustered: # Get cluster info from cache dest = clusters[service.binary].get(service.cluster_name) # Cache miss forces us to get the cluster from the DB via OVO if not dest: dest = service.cluster clusters[service.binary][service.cluster_name] = dest dest_name = dest.name # Non clustered volume services else: dest = service dest_name = service.host return cleanup_rpc, dest, dest_name def work_cleanup(self, context, cleanup_request): """Process request from API to do cleanup on services. Here we retrieve from the DB which services we want to clean up based on the request from the user. Then send individual cleanup requests to each of the services that are up, and we finally return a tuple with services that we have sent a cleanup request and those that were not up and we couldn't send it. """ if self.upgrading_cloud: raise exception.UnavailableDuringUpgrade(action='workers cleanup') LOG.info('Workers cleanup request started.') filters = dict(service_id=cleanup_request.service_id, cluster_name=cleanup_request.cluster_name, host=cleanup_request.host, binary=cleanup_request.binary, is_up=cleanup_request.is_up, disabled=cleanup_request.disabled) # Get the list of all the services that match the request services = objects.ServiceList.get_all(context, filters) until = cleanup_request.until or timeutils.utcnow() requested = [] not_requested = [] # To reduce DB queries we'll cache the clusters data clusters: collections.defaultdict = collections.defaultdict(dict) for service in services: cleanup_request.cluster_name = service.cluster_name cleanup_request.service_id = service.id cleanup_request.host = service.host cleanup_request.binary = service.binary cleanup_request.until = until cleanup_rpc, dest, dest_name = self._cleanup_destination(clusters, service) # If it's a scheduler or the service is up, send the request. if not dest or dest.is_up: LOG.info('Sending cleanup for %(binary)s %(dest_name)s.', {'binary': service.binary, 'dest_name': dest_name}) cleanup_rpc(context, cleanup_request) requested.append(service) # We don't send cleanup requests when there are no services alive # to do the cleanup. else: LOG.info('No service available to cleanup %(binary)s ' '%(dest_name)s.', {'binary': service.binary, 'dest_name': dest_name}) not_requested.append(service) LOG.info('Cleanup requests completed.') return requested, not_requested def create_backup(self, context, backup): availability_zone = backup.availability_zone volume_id = backup.volume_id volume = self.db.volume_get(context, volume_id) try: # Bug #1952805: an incremental backup will already have a host set, # and we must respect it if not backup.host: host = self.driver.get_backup_host(volume, availability_zone) backup.host = host backup.save() self.backup_api.create_backup(context, backup) except exception.ServiceNotFound: self.db.volume_update(context, volume_id, {'status': volume['previous_status'], 'previous_status': volume['status']}) msg = "Service not found for creating backup." LOG.error(msg) vol_utils.update_backup_error(backup, msg) self.message_api.create( context, action=message_field.Action.BACKUP_CREATE, resource_type=message_field.Resource.VOLUME_BACKUP, resource_uuid=backup.id, detail=message_field.Detail.BACKUP_SCHEDULE_ERROR) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/rpcapi.py0000664000175000017500000002576100000000000020154 0ustar00zuulzuul00000000000000# Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the scheduler manager RPC API. """ from oslo_serialization import jsonutils from oslo_utils import timeutils from cinder.common import constants from cinder import rpc class SchedulerAPI(rpc.RPCAPI): """Client side of the scheduler RPC API. API version history: .. code-block:: none 1.0 - Initial version. 1.1 - Add create_volume() method 1.2 - Add request_spec, filter_properties arguments to create_volume() 1.3 - Add migrate_volume_to_host() method 1.4 - Add retype method 1.5 - Add manage_existing method 1.6 - Add create_consistencygroup method 1.7 - Add get_active_pools method 1.8 - Add sending object over RPC in create_consistencygroup method 1.9 - Adds support for sending objects over RPC in create_volume() 1.10 - Adds support for sending objects over RPC in retype() 1.11 - Adds support for sending objects over RPC in migrate_volume_to_host() ... Mitaka supports messaging 1.11. Any changes to existing methods in 1.x after this point should be done so that they can handle version cap set to 1.11. 2.0 - Remove 1.x compatibility 2.1 - Adds support for sending objects over RPC in manage_existing() 2.2 - Sends request_spec as object in create_volume() 2.3 - Add create_group method ... Newton supports messaging 2.3. Any changes to existing methods in 2.x after this point should be done so that they can handle version cap set to 2.3. 3.0 - Remove 2.x compatibility 3.1 - Adds notify_service_capabilities() 3.2 - Adds extend_volume() 3.3 - Add cluster support to migrate_volume, and to update_service_capabilities and send the timestamp from the capabilities. 3.4 - Adds work_cleanup and do_cleanup methods. 3.5 - Make notify_service_capabilities support A/A 3.6 - Removed create_consistencygroup method 3.7 - Adds set_log_levels and get_log_levels 3.8 - Addds ``valid_host_capacity`` method 3.9 - Adds create_snapshot method 3.10 - Adds backup_id to create_volume method. 3.11 - Adds manage_existing_snapshot method. 3.12 - Adds create_backup method. """ RPC_API_VERSION = '3.12' RPC_DEFAULT_VERSION = '3.0' TOPIC = constants.SCHEDULER_TOPIC BINARY = 'cinder-scheduler' def create_group(self, ctxt, group, group_spec=None, request_spec_list=None, group_filter_properties=None, filter_properties_list=None): cctxt = self._get_cctxt() request_spec_p_list = [jsonutils.to_primitive(rs) for rs in request_spec_list] group_spec_p = jsonutils.to_primitive(group_spec) msg_args = { 'group': group, 'group_spec': group_spec_p, 'request_spec_list': request_spec_p_list, 'group_filter_properties': group_filter_properties, 'filter_properties_list': filter_properties_list, } cctxt.cast(ctxt, 'create_group', **msg_args) def create_volume(self, ctxt, volume, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, backup_id=None): volume.create_worker() cctxt = self._get_cctxt() msg_args = {'snapshot_id': snapshot_id, 'image_id': image_id, 'request_spec': request_spec, 'filter_properties': filter_properties, 'volume': volume, 'backup_id': backup_id} if not self.client.can_send_version('3.10'): msg_args.pop('backup_id') cctxt.cast(ctxt, 'create_volume', **msg_args) @rpc.assert_min_rpc_version('3.8') def validate_host_capacity(self, ctxt, backend, request_spec, filter_properties=None): msg_args = {'request_spec': request_spec, 'filter_properties': filter_properties, 'backend': backend} cctxt = self._get_cctxt() return cctxt.call(ctxt, 'validate_host_capacity', **msg_args) @rpc.assert_min_rpc_version('3.9') def create_snapshot(self, ctxt, volume, snapshot, backend, request_spec=None, filter_properties=None): cctxt = self._get_cctxt() msg_args = {'request_spec': request_spec, 'filter_properties': filter_properties, 'volume': volume, 'snapshot': snapshot, 'backend': backend} cctxt.cast(ctxt, 'create_snapshot', **msg_args) def migrate_volume(self, ctxt, volume, backend, force_copy=False, request_spec=None, filter_properties=None): request_spec_p = jsonutils.to_primitive(request_spec) msg_args = {'request_spec': request_spec_p, 'filter_properties': filter_properties, 'volume': volume} version = '3.3' if self.client.can_send_version(version): msg_args['backend'] = backend msg_args['force_copy'] = force_copy method = 'migrate_volume' else: version = '3.0' msg_args['host'] = backend msg_args['force_host_copy'] = force_copy method = 'migrate_volume_to_host' cctxt = self._get_cctxt(version=version) cctxt.cast(ctxt, method, **msg_args) def retype(self, ctxt, volume, request_spec=None, filter_properties=None): cctxt = self._get_cctxt() request_spec_p = jsonutils.to_primitive(request_spec) msg_args = {'request_spec': request_spec_p, 'filter_properties': filter_properties, 'volume': volume} cctxt.cast(ctxt, 'retype', **msg_args) def manage_existing(self, ctxt, volume, request_spec=None, filter_properties=None): cctxt = self._get_cctxt() request_spec_p = jsonutils.to_primitive(request_spec) msg_args = { 'request_spec': request_spec_p, 'filter_properties': filter_properties, 'volume': volume, } cctxt.cast(ctxt, 'manage_existing', **msg_args) @rpc.assert_min_rpc_version('3.11') def manage_existing_snapshot(self, ctxt, volume, snapshot, ref, request_spec=None, filter_properties=None): cctxt = self._get_cctxt() request_spec_p = jsonutils.to_primitive(request_spec) msg_args = { 'request_spec': request_spec_p, 'filter_properties': filter_properties, 'volume': volume, 'snapshot': snapshot, 'ref': ref, } cctxt.cast(ctxt, 'manage_existing_snapshot', **msg_args) @rpc.assert_min_rpc_version('3.2') def extend_volume(self, ctxt, volume, new_size, reservations, request_spec, filter_properties=None): cctxt = self._get_cctxt() request_spec_p = jsonutils.to_primitive(request_spec) msg_args = { 'volume': volume, 'new_size': new_size, 'reservations': reservations, 'request_spec': request_spec_p, 'filter_properties': filter_properties, } cctxt.cast(ctxt, 'extend_volume', **msg_args) def get_pools(self, ctxt, filters=None): cctxt = self._get_cctxt() return cctxt.call(ctxt, 'get_pools', filters=filters) @staticmethod def prepare_timestamp(timestamp): timestamp = timestamp or timeutils.utcnow() return jsonutils.to_primitive(timestamp) def update_service_capabilities(self, ctxt, service_name, host, capabilities, cluster_name, timestamp=None): msg_args = dict(service_name=service_name, host=host, capabilities=capabilities) version = '3.3' # If server accepts timestamping the capabilities and the cluster name if self.client.can_send_version(version): # Serialize the timestamp msg_args.update(cluster_name=cluster_name, timestamp=self.prepare_timestamp(timestamp)) else: version = '3.0' cctxt = self._get_cctxt(fanout=True, version=version) cctxt.cast(ctxt, 'update_service_capabilities', **msg_args) @rpc.assert_min_rpc_version('3.1') def notify_service_capabilities(self, ctxt, service_name, backend, capabilities, timestamp=None): parameters = {'service_name': service_name, 'capabilities': capabilities} if self.client.can_send_version('3.5'): version = '3.5' parameters.update(backend=backend, timestamp=self.prepare_timestamp(timestamp)) else: version = '3.1' parameters['host'] = backend cctxt = self._get_cctxt(version=version) cctxt.cast(ctxt, 'notify_service_capabilities', **parameters) @rpc.assert_min_rpc_version('3.4') def work_cleanup(self, ctxt, cleanup_request): """Generate individual service cleanup requests from user request.""" cctxt = self.client.prepare(version='3.4') # Response will have services that are receiving the cleanup request # and services that couldn't receive it since they are down. return cctxt.call(ctxt, 'work_cleanup', cleanup_request=cleanup_request) @rpc.assert_min_rpc_version('3.4') def do_cleanup(self, ctxt, cleanup_request): """Perform this scheduler's resource cleanup as per cleanup_request.""" cctxt = self.client.prepare(version='3.4') cctxt.cast(ctxt, 'do_cleanup', cleanup_request=cleanup_request) @rpc.assert_min_rpc_version('3.7') def set_log_levels(self, context, service, log_request): cctxt = self._get_cctxt(server=service.host, version='3.7') cctxt.cast(context, 'set_log_levels', log_request=log_request) @rpc.assert_min_rpc_version('3.7') def get_log_levels(self, context, service, log_request): cctxt = self._get_cctxt(server=service.host, version='3.7') return cctxt.call(context, 'get_log_levels', log_request=log_request) @rpc.assert_min_rpc_version('3.12') def create_backup(self, ctxt, backup): cctxt = self._get_cctxt() msg_args = {'backup': backup} cctxt.cast(ctxt, 'create_backup', **msg_args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/scheduler_options.py0000664000175000017500000000653500000000000022425 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ import datetime import json import os from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils scheduler_json_config_location_opt = cfg.StrOpt( 'scheduler_json_config_location', default='', help='Absolute path to scheduler configuration JSON file.') CONF = cfg.CONF CONF.register_opt(scheduler_json_config_location_opt) LOG = logging.getLogger(__name__) class SchedulerOptions(object): """SchedulerOptions monitors a local .json file for changes. The file is reloaded if needed and converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ def __init__(self): super(SchedulerOptions, self).__init__() self.data = {} self.last_modified = None self.last_checked = None def _get_file_handle(self, filename): """Get file handle. Broken out for testing.""" return open(filename) def _get_file_timestamp(self, filename): """Get the last modified datetime. Broken out for testing.""" try: return os.path.getmtime(filename) except os.error: LOG.exception("Could not stat scheduler options file " "%(filename)s.", {'filename': filename}) raise def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return json.load(handle) except ValueError: LOG.exception("Could not decode scheduler options.") return {} def _get_time_now(self): """Get current UTC. Broken out for testing.""" return timeutils.utcnow() def get_configuration(self, filename=None) -> dict: """Check the json file for changes and load it if needed.""" if not filename: filename = CONF.scheduler_json_config_location if not filename: return self.data if self.last_checked: now = self._get_time_now() if now - self.last_checked < datetime.timedelta(minutes=5): return self.data last_modified = self._get_file_timestamp(filename) if (not last_modified or not self.last_modified or last_modified > self.last_modified): self.data = self._load_file(self._get_file_handle(filename)) self.last_modified = last_modified if not self.data: self.data = {} return self.data ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1071184 cinder-27.0.0/cinder/scheduler/weights/0000775000175000017500000000000000000000000017763 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/weights/__init__.py0000664000175000017500000000254000000000000022075 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host weights """ from cinder.scheduler import base_weight class WeighedHost(base_weight.WeighedObject): def to_dict(self): return { 'weight': self.weight, 'host': self.obj.host, } def __repr__(self): return ("WeighedHost [host: %s, weight: %s]" % (self.obj.host, self.weight)) class BaseHostWeigher(base_weight.BaseWeigher): """Base class for host weights.""" pass class OrderedHostWeightHandler(base_weight.BaseWeightHandler): object_class = WeighedHost def __init__(self, namespace): super(OrderedHostWeightHandler, self).__init__(BaseHostWeigher, namespace) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/weights/capacity.py0000664000175000017500000001410100000000000022127 0ustar00zuulzuul00000000000000# Copyright (c) 2013 eBay Inc. # Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2015 EMC Corporation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from oslo_config import cfg from cinder.scheduler import weights from cinder import utils capacity_weight_opts = [ cfg.FloatOpt('capacity_weight_multiplier', default=1.0, help='Multiplier used for weighing free capacity. ' 'Negative numbers mean to stack vs spread.'), cfg.FloatOpt('allocated_capacity_weight_multiplier', default=-1.0, help='Multiplier used for weighing allocated capacity. ' 'Positive numbers mean to stack vs spread.'), ] CONF = cfg.CONF CONF.register_opts(capacity_weight_opts) OFFSET_MIN = 10000 OFFSET_MULT = 100 class CapacityWeigher(weights.BaseHostWeigher): """Capacity Weigher weighs hosts by their virtual or actual free capacity. For thin provisioning, weigh hosts by their virtual free capacity calculated by the total capacity multiplied by the max over subscription ratio and subtracting the provisioned capacity; Otherwise, weigh hosts by their actual free capacity, taking into account the reserved space. The default is to spread volumes across all hosts evenly. If you prefer stacking, you can set the ``capacity_weight_multiplier`` option to a negative number and the weighing has the opposite effect of the default. """ def weight_multiplier(self) -> float: """Override the weight multiplier.""" return CONF.capacity_weight_multiplier def weigh_objects(self, weighed_obj_list, weight_properties): """Override the weigh objects. This override calls the parent to do the weigh objects and then replaces any infinite weights with a value that is a multiple of the delta between the min and max values. NOTE(jecarey): the infinite weight value is only used when the smallest value is being favored (negative multiplier). When the largest weight value is being used a weight of -1 is used instead. See _weigh_object method. """ tmp_weights = super(CapacityWeigher, self).weigh_objects( weighed_obj_list, weight_properties) assert self.maxval is not None if math.isinf(self.maxval): # NOTE(jecarey): if all weights were infinite then parent # method returns 0 for all of the weights. Thus self.minval # cannot be infinite at this point copy_weights = [w for w in tmp_weights if not math.isinf(w)] self.maxval = max(copy_weights) assert self.minval is not None offset = (self.maxval - self.minval) * OFFSET_MULT self.maxval += OFFSET_MIN if offset == 0.0 else offset tmp_weights = [self.maxval if math.isinf(w) else w for w in tmp_weights] return tmp_weights def _weigh_object(self, host_state, weight_properties) -> float: """Higher weights win. We want spreading to be the default.""" free_space = host_state.free_capacity_gb total_space = host_state.total_capacity_gb if (free_space == 'infinite' or free_space == 'unknown' or total_space == 'infinite' or total_space == 'unknown'): # (zhiteng) 'infinite' and 'unknown' are treated the same # here, for sorting purpose. # As a partial fix for bug #1350638, 'infinite' and 'unknown' are # given the lowest weight to discourage driver from report such # capacity anymore. free = -1 if CONF.capacity_weight_multiplier > 0 else float('inf') else: # NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs, # we will not use max_over_subscription_ratio and # provisioned_capacity_gb to determine whether a volume can be # provisioned. Instead free capacity will be used to evaluate. thin = True vol_type = weight_properties.get('volume_type', {}) or {} provision_type = vol_type.get('extra_specs', {}).get( 'provisioning:type') if provision_type == 'thick': thin = False free = utils.calculate_virtual_free_capacity( total_space, free_space, host_state.provisioned_capacity_gb, host_state.thin_provisioning_support, host_state.max_over_subscription_ratio, host_state.reserved_percentage, thin) return free class AllocatedCapacityWeigher(weights.BaseHostWeigher): """Allocated Capacity Weigher weighs hosts by their allocated capacity. The default behavior is to place new volume to the host allocated the least space. This weigher is intended to simulate the behavior of SimpleScheduler. If you prefer to place volumes to host allocated the most space, you can set the ``allocated_capacity_weight_multiplier`` option to a positive number and the weighing has the opposite effect of the default. """ def weight_multiplier(self) -> float: """Override the weight multiplier.""" return CONF.allocated_capacity_weight_multiplier def _weigh_object(self, host_state, weight_properties): # Higher weights win. We want spreading (choose host with lowest # allocated_capacity first) to be the default. allocated_space = host_state.allocated_capacity_gb return allocated_space ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/weights/chance.py0000664000175000017500000000165700000000000021567 0ustar00zuulzuul00000000000000# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from cinder.scheduler import weights class ChanceWeigher(weights.BaseHostWeigher): """Chance Weigher assigns random weights to hosts. Used to spread volumes randomly across a list of equally suitable hosts. """ def _weigh_object(self, host_state, weight_properties): return random.random() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/weights/goodness.py0000664000175000017500000001475700000000000022174 0ustar00zuulzuul00000000000000# Copyright (C) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.scheduler.evaluator import evaluator from cinder.scheduler import weights LOG = logging.getLogger(__name__) class GoodnessWeigher(weights.BaseHostWeigher): """Goodness Weigher. Assign weights based on a host's goodness function. Goodness rating is the following: .. code-block:: none 0 -- host is a poor choice . . 50 -- host is a good choice . . 100 -- host is a perfect choice """ def _weigh_object(self, host_state, weight_properties): """Determine host's goodness rating based on a goodness_function.""" stats = self._generate_stats(host_state, weight_properties) LOG.debug("Checking host '%s'", stats[0]['host_stats']['host']) # Run the goodness function for all possible storage_protocol values # (e.g. FC, fibre_channel) and use the maximum value, as the function # may look for an exact match on a protocol and the backend may be # returning a variant. result = max(self._check_goodness_function(stat) for stat in stats) LOG.debug("Goodness weight for %(host)s: %(res)s", {'res': result, 'host': stats[0]['host_stats']['host']}) return result def _check_goodness_function(self, stats): """Gets a host's goodness rating based on its goodness function.""" goodness_rating = 0 if stats['goodness_function'] is None: LOG.warning("Goodness function not set :: defaulting to " "minimal goodness rating of 0") else: try: goodness_result = self._run_evaluator( stats['goodness_function'], stats) except Exception as ex: LOG.warning("Error in goodness_function function " "'%(function)s' : '%(error)s' :: Defaulting " "to a goodness of 0", {'function': stats['goodness_function'], 'error': ex, }) return goodness_rating if type(goodness_result) is bool: if goodness_result: goodness_rating = 100 elif goodness_result < 0 or goodness_result > 100: LOG.warning("Invalid goodness result. Result must be " "between 0 and 100. Result generated: '%s' " ":: Defaulting to a goodness of 0", goodness_result) else: goodness_rating = goodness_result return goodness_rating def _run_evaluator(self, func, stats): """Evaluates a given function using the provided available stats.""" host_stats = stats['host_stats'] host_caps = stats['host_caps'] extra_specs = stats['extra_specs'] qos_specs = stats['qos_specs'] volume_stats = stats['volume_stats'] result = evaluator.evaluate( func, extra=extra_specs, stats=host_stats, capabilities=host_caps, volume=volume_stats, qos=qos_specs) return result def _generate_stats(self, host_state, weight_properties): """Generates statistics from host and volume data. Returns a list where each entry corresponds to a different storage_protocol value for those backends that use a storage protocol that has variants, but only if the function actually uses the protocol. """ host_stats = { 'host': host_state.host, 'volume_backend_name': host_state.volume_backend_name, 'vendor_name': host_state.vendor_name, 'driver_version': host_state.driver_version, 'storage_protocol': host_state.storage_protocol, 'QoS_support': host_state.QoS_support, 'total_capacity_gb': host_state.total_capacity_gb, 'allocated_capacity_gb': host_state.allocated_capacity_gb, 'free_capacity_gb': host_state.free_capacity_gb, 'reserved_percentage': host_state.reserved_percentage, 'updated': host_state.updated, } host_caps = host_state.capabilities goodness_function = None uses_protocol = False if ('goodness_function' in host_caps and host_caps['goodness_function'] is not None): goodness_function = str(host_caps['goodness_function']) uses_protocol = 'storage_protocol' in goodness_function qos_specs = weight_properties.get('qos_specs', {}) or {} volume_type = weight_properties.get('volume_type', {}) or {} extra_specs = volume_type.get('extra_specs', {}) request_spec = weight_properties.get('request_spec', {}) or {} volume_stats = request_spec.get('volume_properties', {}) stats = { 'host_stats': host_stats, 'host_caps': host_caps, 'extra_specs': extra_specs, 'qos_specs': qos_specs, 'volume_stats': volume_stats, 'volume_type': volume_type, 'goodness_function': goodness_function, } # Only create individual entries for the different protocols variants # if the function uses the protocol and there are variants. if uses_protocol and isinstance(host_state.storage_protocol, list): result = [] for protocol in host_state.storage_protocol: new_stats = stats.copy() new_stats['host_stats'] = dict(new_stats['host_stats']) new_stats['host_stats']['storage_protocol'] = protocol new_stats['host_caps'] = dict(new_stats['host_caps']) new_stats['host_caps']['storage_protocol'] = protocol result.append(new_stats) else: result = [stats] return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/weights/stochastic.py0000664000175000017500000000647400000000000022514 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Stochastic weight handler This weight handler differs from the default weight handler by giving every pool a chance to be chosen where the probability is proportional to each pools' weight. """ import random from cinder.scheduler import base_weight from cinder.scheduler import weights as wts class StochasticHostWeightHandler(base_weight.BaseWeightHandler): def __init__(self, namespace): super(StochasticHostWeightHandler, self).__init__(wts.BaseHostWeigher, namespace) def get_weighed_objects(self, weigher_classes, obj_list, weighing_properties): # The normalization performed in the superclass is nonlinear, which # messes up the probabilities, so override it. The probabilistic # approach we use here is self-normalizing. # Also, the sorting done by the parent implementation is harmless but # useless for us. # Compute the object weights as the parent would but without sorting # or normalization. weighed_objs = [wts.WeighedHost(obj, 0.0) for obj in obj_list] for weigher_cls in weigher_classes: weigher = weigher_cls() weights = weigher.weigh_objects(weighed_objs, weighing_properties) for i, weight in enumerate(weights): obj = weighed_objs[i] obj.weight += weigher.weight_multiplier() * weight # Avoid processing empty lists if not weighed_objs: return [] # First compute the total weight of all the objects and the upper # bound for each object to "win" the lottery. total_weight = 0.0 table = [] for weighed_obj in weighed_objs: total_weight += weighed_obj.weight max_value = total_weight table.append((max_value, weighed_obj)) # Now draw a random value with the computed range winning_value = random.random() * total_weight # Scan the table to find the first object with a maximum higher than # the random number. Save the index of the winner. winning_index = 0 for (i, (max_value, weighed_obj)) in enumerate(table): if max_value > winning_value: # Return a single element array with the winner. winning_index = i break # It's theoretically possible for the above loop to terminate with no # winner. This happens when winning_value >= total_weight, which # could only occur with very large numbers and floating point # rounding. In those cases the actual winner should have been the # last element, so return it. return weighed_objs[winning_index:] + weighed_objs[0:winning_index] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/scheduler/weights/volume_number.py0000664000175000017500000000373100000000000023220 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from cinder import db from cinder.scheduler import weights volume_number_weight_opts = [ cfg.FloatOpt('volume_number_multiplier', default=-1.0, help='Multiplier used for weighing volume number. ' 'Negative numbers mean to spread vs stack.'), ] CONF = cfg.CONF CONF.register_opts(volume_number_weight_opts) class VolumeNumberWeigher(weights.BaseHostWeigher): """Weigher that weighs hosts by volume number in backends. The default is to spread volumes across all hosts evenly. If you prefer stacking, you can set the ``volume_number_multiplier`` option to a positive number and the weighing has the opposite effect of the default. """ def weight_multiplier(self) -> float: """Override the weight multiplier.""" return CONF.volume_number_multiplier def _weigh_object(self, host_state, weight_properties): """Less volume number weights win. We want spreading to be the default. """ context = weight_properties['context'] context = context.elevated() volume_number = db.volume_data_get_for_host(context=context, host=host_state.host, count_only=True) return volume_number ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/service.py0000664000175000017500000006754100000000000016362 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import inspect import os import random import subprocess import sys import time from typing import Optional from oslo_concurrency import processutils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging as messaging from oslo_service import service from oslo_service import wsgi from oslo_utils import importutils osprofiler_initializer = importutils.try_import('osprofiler.initializer') profiler = importutils.try_import('osprofiler.profiler') profiler_opts = importutils.try_import('osprofiler.opts') from cinder.common import constants from cinder import context from cinder import coordination from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import base as objects_base from cinder.objects import fields from cinder import rpc from cinder import version from cinder.volume import volume_utils if os.name == 'nt': from os_win import utilsfactory as os_win_utilsfactory else: os_win_utilsfactory = None LOG = logging.getLogger(__name__) service_opts = [ cfg.IntOpt('report_interval', default=10, help='Interval, in seconds, between nodes reporting state ' 'to datastore'), cfg.IntOpt('periodic_interval', default=60, help='Interval, in seconds, between running periodic tasks'), cfg.IntOpt('periodic_fuzzy_delay', default=60, help='Range, in seconds, to randomly delay when starting the' ' periodic task scheduler to reduce stampeding.' ' (Disable by setting to 0)'), cfg.StrOpt('osapi_volume_listen', default="0.0.0.0", help='IP address on which OpenStack Volume API listens'), cfg.PortOpt('osapi_volume_listen_port', default=8776, help='Port on which OpenStack Volume API listens'), cfg.IntOpt('osapi_volume_workers', help='Number of workers for OpenStack Volume API service. ' 'The default is equal to the number of CPUs available.'), cfg.BoolOpt('osapi_volume_use_ssl', default=False, help='Wraps the socket in a SSL context if True is set. ' 'A certificate file and key file must be specified.'), ] CONF = cfg.CONF CONF.register_opts(service_opts) if profiler_opts: profiler_opts.set_defaults(CONF) def setup_profiler(binary: str, host: str) -> None: if (osprofiler_initializer is None or profiler is None or profiler_opts is None): LOG.debug('osprofiler is not present') return if CONF.profiler.enabled: osprofiler_initializer.init_from_conf( conf=CONF, context=context.get_admin_context().to_dict(), project="cinder", service=binary, host=host ) LOG.warning( "OSProfiler is enabled.\nIt means that person who knows " "any of hmac_keys that are specified in " "/etc/cinder/cinder.conf can trace his requests. \n" "In real life only operator can read this file so there " "is no security issue. Note that even if person can " "trigger profiler, only admin user can retrieve trace " "information.\n" "To disable OSProfiler set in cinder.conf:\n" "[profiler]\nenabled=false") class Service(service.Service): """Service object for binaries running on hosts. A service takes a manager and enables rpc by listening to queues based on topic. It also periodically runs tasks on the manager and reports it state to the database services table. """ # Make service_id a class attribute so it can be used for clean up service_id = None def __init__(self, host: str, binary: str, topic: str, manager: str, report_interval: Optional[int] = None, periodic_interval: Optional[int] = None, periodic_fuzzy_delay: Optional[int] = None, service_name: Optional[str] = None, coordination: bool = False, cluster: Optional[str] = None, *args, **kwargs): super(Service, self).__init__() if not rpc.initialized(): rpc.init(CONF) self.cluster = cluster self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager self.coordination = coordination manager_class = importutils.import_class(self.manager_class_name) if CONF.profiler.enabled: manager_class = profiler.trace_cls("rpc")(manager_class) self.service = None self.manager = manager_class(host=self.host, cluster=self.cluster, service_name=service_name, *args, **kwargs) self.availability_zone: str = self.manager.availability_zone self.model_disconnected: bool # NOTE(geguileo): We need to create the Service DB entry before we # create the manager, otherwise capped versions for serializer and rpc # client would use existing DB entries not including us, which could # result in us using None (if it's the first time the service is run) # or an old version (if this is a normal upgrade of a single service). ctxt = context.get_admin_context() try: service_ref = objects.Service.get_by_args(ctxt, host, binary) service_ref.rpc_current_version = manager_class.RPC_API_VERSION obj_version = objects_base.OBJ_VERSIONS.get_current() service_ref.object_current_version = obj_version # added_to_cluster attribute marks when we consider that we have # just added a host to a cluster so we can include resources into # that cluster. We consider that we have added the host when we # didn't have data in the cluster DB field and our current # configuration has a cluster value. We don't want to do anything # automatic if the cluster is changed, in those cases we'll want # to use cinder manage command and to it manually. self.added_to_cluster = (not service_ref.cluster_name and cluster) if service_ref.cluster_name != cluster: LOG.info('This service has been moved from cluster ' '%(cluster_svc)s to %(cluster_cfg)s. Resources ' 'will %(opt_no)sbe moved to the new cluster', {'cluster_svc': service_ref.cluster_name, 'cluster_cfg': cluster, 'opt_no': '' if self.added_to_cluster else 'NOT '}) if self.added_to_cluster: # We pass copy service's disable status in the cluster if we # have to create it. self._ensure_cluster_exists(ctxt, service_ref) service_ref.cluster_name = cluster service_ref.save() Service.service_id = service_ref.id self.origin_service_id = service_ref.id except exception.NotFound: self._create_service_ref(ctxt, manager_class.RPC_API_VERSION) # Service entry Entry didn't exist because it was manually removed # or it's the first time running, to be on the safe side we say we # were added if we are clustered. self.added_to_cluster = bool(cluster) # type: ignore self.report_interval = report_interval self.periodic_interval = periodic_interval self.periodic_fuzzy_delay = periodic_fuzzy_delay self.basic_config_check() self.saved_args, self.saved_kwargs = args, kwargs setup_profiler(binary, host) self.rpcserver: Optional['messaging.rpc.RPCServer'] = None self.backend_rpcserver: Optional['messaging.rpc.RPCServer'] = None self.cluster_rpcserver: Optional['messaging.rpc.RPCServer'] = None def start(self) -> None: version_string = version.version_string() LOG.info('Starting %(topic)s node (version %(version_string)s)', {'topic': self.topic, 'version_string': version_string}) self.model_disconnected = False if self.coordination: coordination.COORDINATOR.start() # NOTE(yikun): When re-spawning child process, we should set the class # attribute back using the origin service_id, otherwise, # the Service.service_id will be inherited from the parent process, # and will be recorded as the last started service id by mistaken. Service.service_id = self.origin_service_id self.manager.init_host(added_to_cluster=self.added_to_cluster, service_id=Service.service_id) LOG.debug("Creating RPC server for service %s", self.topic) ctxt = context.get_admin_context() endpoints = [self.manager] endpoints.extend(self.manager.additional_endpoints) obj_version_cap = objects.Service.get_minimum_obj_version(ctxt) LOG.debug("Pinning object versions for RPC server serializer to %s", obj_version_cap) serializer = objects_base.CinderObjectSerializer(obj_version_cap) target = messaging.Target(topic=self.topic, server=self.host) self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() # NOTE(dulek): Kids, don't do that at home. We're relying here on # oslo.messaging implementation details to keep backward compatibility # with pre-Ocata services. This will not matter once we drop # compatibility with them. if self.topic == constants.VOLUME_TOPIC: target = messaging.Target( topic='%(topic)s.%(host)s' % {'topic': self.topic, 'host': self.host}, server=volume_utils.extract_host(self.host, 'host')) self.backend_rpcserver = rpc.get_server(target, endpoints, serializer) self.backend_rpcserver.start() if self.cluster: LOG.info('Starting %(topic)s cluster %(cluster)s (version ' '%(version)s)', {'topic': self.topic, 'version': version_string, 'cluster': self.cluster}) target = messaging.Target( topic='%s.%s' % (self.topic, self.cluster), server=volume_utils.extract_host(self.cluster, 'host')) serializer = objects_base.CinderObjectSerializer(obj_version_cap) self.cluster_rpcserver = rpc.get_server(target, endpoints, serializer) self.cluster_rpcserver.start() self.manager.init_host_with_rpc() if self.report_interval: self.tg.add_timer(self.report_interval, self.report_state, initial_delay=self.report_interval) if self.periodic_interval: initial_delay: Optional[int] if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None self.tg.add_timer(self.periodic_interval, self.periodic_tasks, initial_delay=initial_delay) def basic_config_check(self) -> None: """Perform basic config checks before starting service.""" # Make sure report interval is less than service down time if self.report_interval: if CONF.service_down_time <= self.report_interval: new_down_time = int(self.report_interval * 2.5) LOG.warning( "Report interval must be less than service down " "time. Current config service_down_time: " "%(service_down_time)s, report_interval for this: " "service is: %(report_interval)s. Setting global " "service_down_time to: %(new_down_time)s", {'service_down_time': CONF.service_down_time, 'report_interval': self.report_interval, 'new_down_time': new_down_time}) CONF.set_override('service_down_time', new_down_time) def _ensure_cluster_exists(self, context: context.RequestContext, service: 'Service') -> None: if self.cluster: try: cluster = objects.Cluster.get_by_id(context, None, name=self.cluster, binary=self.binary) # If the cluster already exists, then the service replication # fields must match those of the cluster unless the service # is in error status. error_states = (fields.ReplicationStatus.ERROR, fields.ReplicationStatus.FAILOVER_ERROR) if service.replication_status not in error_states: for attr in ('replication_status', 'active_backend_id', 'frozen'): if getattr(service, attr) != getattr(cluster, attr): setattr(service, attr, getattr(cluster, attr)) except exception.ClusterNotFound: # Since the cluster didn't exist, we copy replication fields # from the service. cluster = objects.Cluster( context=context, name=self.cluster, binary=self.binary, disabled=service.disabled, replication_status=service.replication_status, active_backend_id=service.active_backend_id, frozen=service.frozen) try: cluster.create() # Race condition occurred and another service created the # cluster, so we can continue as it already exists. except exception.ClusterExists: pass def _create_service_ref(self, context: context.RequestContext, rpc_version: Optional[str] = None) -> None: kwargs = { 'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': self.availability_zone, 'rpc_current_version': rpc_version or self.manager.RPC_API_VERSION, 'object_current_version': objects_base.OBJ_VERSIONS.get_current(), } kwargs['cluster_name'] = self.cluster service_ref = objects.Service(context=context, **kwargs) service_ref.create() Service.service_id = service_ref.id self.origin_service_id = service_ref.id self._ensure_cluster_exists(context, service_ref) # If we have updated the service_ref with replication data from # the cluster it will be saved. service_ref.save() # Update all volumes that are associated with an old service with # the new service uuid db.volume_update_all_by_service(context) def __getattr__(self, key: str): manager = self.__dict__.get('manager', None) return getattr(manager, key) @classmethod def create(cls, host: Optional[str] = None, binary: Optional[str] = None, topic: Optional[str] = None, manager: Optional[str] = None, report_interval: Optional[int] = None, periodic_interval: Optional[int] = None, periodic_fuzzy_delay: Optional[int] = None, service_name: Optional[str] = None, coordination: bool = False, cluster: Optional[str] = None, **kwargs) -> 'Service': """Instantiates class and passes back application object. :param host: defaults to CONF.host :param binary: defaults to basename of executable :param topic: defaults to bin_name - 'cinder-' part :param manager: defaults to CONF._manager :param report_interval: defaults to CONF.report_interval :param periodic_interval: defaults to CONF.periodic_interval :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay :param cluster: Defaults to None, as only some services will have it """ if not host: host = CONF.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: topic = binary if not manager: subtopic = topic.rpartition('cinder-')[2] manager = CONF.get('%s_manager' % subtopic, None) if report_interval is None: report_interval = CONF.report_interval if periodic_interval is None: periodic_interval = CONF.periodic_interval if periodic_fuzzy_delay is None: periodic_fuzzy_delay = CONF.periodic_fuzzy_delay assert host is not None assert manager is not None service_obj = cls(host, binary, topic, manager, report_interval=report_interval, periodic_interval=periodic_interval, periodic_fuzzy_delay=periodic_fuzzy_delay, service_name=service_name, coordination=coordination, cluster=cluster, **kwargs) return service_obj def stop(self) -> None: # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: if self.rpcserver is not None: self.rpcserver.stop() if self.backend_rpcserver: self.backend_rpcserver.stop() if self.cluster_rpcserver: self.cluster_rpcserver.stop() except Exception: pass if self.coordination: try: coordination.COORDINATOR.stop() except Exception: pass super(Service, self).stop(graceful=True) def wait(self) -> None: if self.rpcserver: self.rpcserver.wait() if self.backend_rpcserver: self.backend_rpcserver.wait() if self.cluster_rpcserver: self.cluster_rpcserver.wait() super(Service, self).wait() def periodic_tasks(self, raise_on_error: bool = False) -> None: """Tasks to be run at a periodic interval.""" ctxt = context.get_admin_context() self.manager.run_periodic_tasks(ctxt, raise_on_error=raise_on_error) def report_state(self) -> None: """Update the state of this service in the datastore.""" if not self.manager.is_working(): # NOTE(dulek): If manager reports a problem we're not sending # heartbeats - to indicate that service is actually down. LOG.error('Manager for service %(binary)s %(host)s is ' 'reporting problems, not sending heartbeat. ' 'Service will appear "down".', {'binary': self.binary, 'host': self.host}) return ctxt = context.get_admin_context() try: try: service_ref = objects.Service.get_by_id(ctxt, Service.service_id) except exception.NotFound: LOG.debug('The service database object disappeared, ' 'recreating it.') self._create_service_ref(ctxt) service_ref = objects.Service.get_by_id(ctxt, Service.service_id) service_ref.report_count += 1 if self.availability_zone != service_ref.availability_zone: service_ref.availability_zone = self.availability_zone service_ref.save(retry=False) # TODO(termie): make this pattern be more elegant. if getattr(self, 'model_disconnected', False): self.model_disconnected = False LOG.error('Recovered model server connection!') except db_exc.DBConnectionError: if not getattr(self, 'model_disconnected', False): self.model_disconnected = True LOG.exception('model server went away') # NOTE(jsbryant) Other DB errors can happen in HA configurations. # such errors shouldn't kill this thread, so we handle them here. except db_exc.DBError: if not getattr(self, 'model_disconnected', False): self.model_disconnected = True LOG.exception('DBError encountered: ') except Exception: if not getattr(self, 'model_disconnected', False): self.model_disconnected = True LOG.exception('Exception encountered: ') def reset(self) -> None: self.manager.reset() super(Service, self).reset() class WSGIService(service.ServiceBase): """Provides ability to launch API from a 'paste' configuration.""" def __init__(self, name, loader=None): """Initialize, but do not start the WSGI server. :param name: The name of the WSGI server given to the loader. :param loader: Loads the WSGI application using the given name. :returns: None """ self.name = name self.manager = self._get_manager() self.loader = loader or wsgi.Loader(CONF) self.app = self.loader.load_app(name) self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0") self.port = getattr(CONF, '%s_listen_port' % name, 0) self.use_ssl = getattr(CONF, '%s_use_ssl' % name, False) self.workers = (getattr(CONF, '%s_workers' % name, None) or processutils.get_worker_count()) if self.workers and self.workers < 1: worker_name = '%s_workers' % name msg = (_("%(worker_name)s value of %(workers)d is invalid, " "must be greater than 0.") % {'worker_name': worker_name, 'workers': self.workers}) raise exception.InvalidConfigurationValue(msg) setup_profiler(name, self.host) self.server = wsgi.Server(CONF, name, self.app, host=self.host, port=self.port, use_ssl=self.use_ssl) def _get_manager(self): """Initialize a Manager object appropriate for this service. Use the service name to look up a Manager subclass from the configuration and initialize an instance. If no class name is configured, just return None. :returns: a Manager instance, or None. """ fl = '%s_manager' % self.name if fl not in CONF: return None manager_class_name = CONF.get(fl, None) if not manager_class_name: return None manager_class = importutils.import_class(manager_class_name) return manager_class() def start(self) -> None: """Start serving this service using loaded configuration. Also, retrieve updated port number in case '0' was passed in, which indicates a random port should be used. :returns: None """ if self.manager: self.manager.init_host() self.server.start() self.port = self.server.port def stop(self) -> None: """Stop serving this API. :returns: None """ self.server.stop() def wait(self) -> None: """Wait for the service to stop serving this API. :returns: None """ self.server.wait() def reset(self) -> None: """Reset server greenpool size to default. :returns: None """ self.server.reset() def process_launcher() -> service.ProcessLauncher: return service.ProcessLauncher(CONF, restart_method='mutate') # NOTE(vish): the global launcher is to maintain the existing # functionality of calling service.serve + # service.wait _launcher = None def serve(server, workers=None): global _launcher if _launcher: raise RuntimeError(_('serve() can only be called once')) _launcher = service.launch(CONF, server, workers=workers, restart_method='mutate') def wait() -> None: CONF.log_opt_values(LOG, logging.DEBUG) try: _launcher.wait() # type: ignore except KeyboardInterrupt: _launcher.stop() # type: ignore rpc.cleanup() class Launcher(object): def __init__(self): self.launch_service = serve self.wait = wait def get_launcher() -> service.ProcessLauncher: # Note(lpetrut): ProcessLauncher uses green pipes which fail on Windows # due to missing support of non-blocking I/O pipes. For this reason, the # service must be spawned differently on Windows, using the ServiceLauncher # class instead. if os.name == 'nt': return Launcher() else: return process_launcher() class WindowsProcessLauncher(object): def __init__(self): self._processutils = os_win_utilsfactory.get_processutils() self._workers = [] self._worker_job_handles = [] self._signal_handler = service.SignalHandler() self._add_signal_handlers() LOG.warning("Support for Windows operating systems is deprecated.") def add_process(self, cmd): LOG.info("Starting subprocess: %s", cmd) worker = subprocess.Popen(cmd) try: job_handle = self._processutils.kill_process_on_job_close( worker.pid) except Exception: LOG.exception("Could not associate child process " "with a job, killing it.") worker.kill() raise self._worker_job_handles.append(job_handle) self._workers.append(worker) def _add_signal_handlers(self): self._signal_handler.add_handler('SIGINT', self._terminate) self._signal_handler.add_handler('SIGTERM', self._terminate) def _terminate(self, *args): # We've already assigned win32 job objects to child processes, # requesting them to stop once all the job handles are closed. # When this process dies, so will the child processes. LOG.info("Received request to terminate.") sys.exit(1) def wait(self): pids = [worker.pid for worker in self._workers] if pids: self._processutils.wait_for_multiple_processes(pids, wait_all=True) # By sleeping here, we allow signal handlers to be executed. time.sleep(0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/service_auth.py0000664000175000017500000000556600000000000017402 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneauth1 import loading as ks_loading from keystoneauth1 import service_token from oslo_config import cfg from cinder import exception CONF = cfg.CONF _SERVICE_AUTH = None _SERVICE_SESSION = None SERVICE_USER_GROUP = 'service_user' service_user = cfg.OptGroup( SERVICE_USER_GROUP, title='Service token authentication type options', help=""" Configuration options for service to service authentication using a service token. These options allow to send a service token along with the user's token when contacting external REST APIs. """ ) service_user_opts = [ cfg.BoolOpt('send_service_user_token', default=False, help=""" When True, if sending a user token to an REST API, also send a service token. """) ] CONF.register_group(service_user) CONF.register_opts(service_user_opts, group=service_user) ks_loading.register_session_conf_options(CONF, SERVICE_USER_GROUP) ks_loading.register_auth_conf_options(CONF, SERVICE_USER_GROUP) def reset_globals(): """For async unit test consistency.""" global _SERVICE_AUTH _SERVICE_AUTH = None def get_service_auth_plugin(): if CONF.service_user.send_service_user_token: global _SERVICE_AUTH if not _SERVICE_AUTH: _SERVICE_AUTH = ks_loading.load_auth_from_conf_options( CONF, group=SERVICE_USER_GROUP) if _SERVICE_AUTH is None: # This can happen if no auth_type is specified, which probably # means there's no auth information in the [service_user] group raise exception.ServiceUserTokenNoAuth() return _SERVICE_AUTH return None def get_service_session(): if CONF.service_user.send_service_user_token: global _SERVICE_SESSION if not _SERVICE_SESSION: _SERVICE_SESSION = ks_loading.load_session_from_conf_options( CONF, SERVICE_USER_GROUP, auth=get_service_auth_plugin()) return _SERVICE_SESSION return None def get_auth_plugin(context, auth=None): if auth: user_auth = auth else: user_auth = context.get_auth_plugin() service_auth = get_service_auth_plugin() if service_auth is not None: return service_token.ServiceTokenAuthWrapper( user_auth=user_auth, service_auth=service_auth) return user_auth ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/ssh_utils.py0000664000175000017500000001757300000000000016737 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities related to SSH connection management.""" import os from eventlet import pools from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils try: import paramiko except ImportError: paramiko = None from cinder import exception from cinder.i18n import _ LOG = logging.getLogger(__name__) ssh_opts = [ cfg.BoolOpt('strict_ssh_host_key_policy', default=False, help='Option to enable strict host key checking. When ' 'set to "True" Cinder will only connect to systems ' 'with a host key present in the configured ' '"ssh_hosts_key_file". When set to "False" the host key ' 'will be saved upon first connection and used for ' 'subsequent connections. Default=False'), cfg.StrOpt('ssh_hosts_key_file', default='$state_path/ssh_known_hosts', help='File containing SSH host keys for the systems with which ' 'Cinder needs to communicate. OPTIONAL: ' 'Default=$state_path/ssh_known_hosts'), ] CONF = cfg.CONF CONF.register_opts(ssh_opts) class SSHPool(pools.Pool): """A simple eventlet pool to hold ssh connections.""" def __init__(self, ip, port, conn_timeout, login, password=None, privatekey=None, *args, **kwargs): self.ip = ip self.port = port self.login = login self.password = password self.conn_timeout = conn_timeout if conn_timeout else None self.privatekey = privatekey self.hosts_key_file = None self.current_size = 0 if paramiko is None: raise exception.RequirementMissing(req='paramiko') # Validate good config setting here. # Paramiko handles the case where the file is inaccessible. if not CONF.ssh_hosts_key_file: raise exception.ParameterNotFound(param='ssh_hosts_key_file') elif not os.path.isfile(CONF.ssh_hosts_key_file): # If using the default path, just create the file. if CONF.state_path in CONF.ssh_hosts_key_file: open(CONF.ssh_hosts_key_file, 'a').close() else: msg = (_("Unable to find ssh_hosts_key_file: %s") % CONF.ssh_hosts_key_file) raise exception.InvalidInput(reason=msg) if 'hosts_key_file' in kwargs.keys(): self.hosts_key_file = kwargs.pop('hosts_key_file') LOG.info("Secondary ssh hosts key file %(kwargs)s will be " "loaded along with %(conf)s from /etc/cinder.conf.", {'kwargs': self.hosts_key_file, 'conf': CONF.ssh_hosts_key_file}) LOG.debug("Setting strict_ssh_host_key_policy to '%(policy)s' " "using ssh_hosts_key_file '%(key_file)s'.", {'policy': CONF.strict_ssh_host_key_policy, 'key_file': CONF.ssh_hosts_key_file}) self.strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy if not self.hosts_key_file: self.hosts_key_file = CONF.ssh_hosts_key_file else: self.hosts_key_file += ',' + CONF.ssh_hosts_key_file super(SSHPool, self).__init__(*args, **kwargs) def __del__(self): # just return if nothing todo if not self.current_size: return # change the size of the pool to reduce the number # of elements on the pool via puts. self.resize(1) # release all but the last connection using # get and put to allow any get waiters to complete. while self.waiting() or self.current_size > 1: conn = self.get() self.put(conn) # Now free everthing that is left while self.free_items: self.free_items.popleft().close() self.current_size -= 1 def create(self): try: ssh = paramiko.SSHClient() if ',' in self.hosts_key_file: files = self.hosts_key_file.split(',') for f in files: ssh.load_host_keys(f) else: ssh.load_host_keys(self.hosts_key_file) # If strict_ssh_host_key_policy is set we want to reject, by # default if there is not entry in the known_hosts file. # Otherwise we use AutoAddPolicy which accepts on the first # Connect but fails if the keys change. load_host_keys can # handle hashed known_host entries. if self.strict_ssh_host_key_policy: ssh.set_missing_host_key_policy(paramiko.RejectPolicy()) else: ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if self.password: ssh.connect(self.ip, port=self.port, username=self.login, password=self.password, timeout=self.conn_timeout) elif self.privatekey: pkfile = os.path.expanduser(self.privatekey) privatekey = paramiko.RSAKey.from_private_key_file(pkfile) ssh.connect(self.ip, port=self.port, username=self.login, pkey=privatekey, timeout=self.conn_timeout) else: msg = _("Specify a password or private_key") raise exception.CinderException(msg) if self.conn_timeout: transport = ssh.get_transport() transport.set_keepalive(self.conn_timeout) return ssh except Exception as e: msg = _("Error connecting via ssh: %s") % str(e) LOG.error(msg) raise paramiko.SSHException(msg) def get(self): """Return an item from the pool, when one is available. This may cause the calling greenthread to block. Check if a connection is active before returning it. For dead connections create and return a new connection. """ conn = super(SSHPool, self).get() if conn: if conn.get_transport().is_active(): return conn else: conn.close() try: new_conn = self.create() except Exception: LOG.error("Create new item in SSHPool failed.") with excutils.save_and_reraise_exception(): if conn: self.current_size -= 1 return new_conn def put(self, conn): # If we are have more connections than we should just close it if self.current_size > self.max_size: conn.close() self.current_size -= 1 return super(SSHPool, self).put(conn) def remove(self, ssh): """Close an ssh client and remove it from free_items.""" ssh.close() if ssh in self.free_items: self.free_items.remove(ssh) if self.current_size > 0: self.current_size -= 1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1071184 cinder-27.0.0/cinder/tests/0000775000175000017500000000000000000000000015475 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/README.rst0000664000175000017500000000054600000000000017171 0ustar00zuulzuul00000000000000IMPORTANT DEFINITION OF TESTS IN CINDER ======================================= Cinder has a number of different test types, PLEASE be sure to refer to the Cinder Testing Docs to familiarize yourself with the various options before creating any new tests. Please check `Cinder-Testing `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/__init__.py0000664000175000017500000000000000000000000017574 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1071184 cinder-27.0.0/cinder/tests/compliance/0000775000175000017500000000000000000000000017607 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/compliance/__init__.py0000664000175000017500000000000000000000000021706 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/compliance/test_backup_drivers.py0000664000175000017500000000313100000000000024221 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import ddt from cinder.interface import backup_driver from cinder.interface import util from cinder.tests.unit import test BACKUP_DRIVERS = util.get_backup_drivers() @ddt.ddt class TestBackupDrivers(test.TestCase): def test_backup_driver_decorator(self): """Sanity check on the decorator. The interface code is somewhat implicitly tested. We don't need unit tests for all of that code, but as a minimum we should make sure it returns at least one registered driver, else the compliance test will never even run. """ self.assertGreater(len(BACKUP_DRIVERS), 0) @ddt.data(*BACKUP_DRIVERS) def test_backup_driver_compliance(self, driver): """Makes sure all backup drivers support the minimum requirements.""" self.assertTrue( issubclass(driver.cls, backup_driver.BackupDriver), "Driver {} does not conform to minimum backup driver " "requirements!".format(driver.class_fqn)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/compliance/test_fczm_drivers.py0000664000175000017500000000314700000000000023722 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import ddt from cinder.interface import fczm_driver from cinder.interface import util from cinder.tests.unit import test FCZM_DRIVERS = util.get_fczm_drivers() @ddt.ddt class TestFibreChannelZoneManagerDrivers(test.TestCase): def test_fczm_driver_decorator(self): """Sanity check on the decorator. The interface code is somewhat implicitly tested. We don't need unit tests for all of that code, but as a minimum we should make sure it returns at least one registered driver, else the compliance test will never even run. """ self.assertGreater(len(FCZM_DRIVERS), 0) @ddt.data(*FCZM_DRIVERS) def test_fczm_driver_compliance(self, driver): """Makes sure all fczm drivers support the minimum requirements.""" self.assertTrue( issubclass(driver.cls, fczm_driver.FibreChannelZoneManagerDriver), "Driver {} does not conform to minimum fczm driver " "requirements!".format(driver.class_fqn)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/compliance/test_volume_drivers.py0000664000175000017500000000301700000000000024266 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import ddt from cinder.interface import util from cinder.interface import volume_driver from cinder.tests.unit import test VOLUME_DRIVERS = util.get_volume_drivers() @ddt.ddt class TestVolumeDrivers(test.TestCase): def test_volume_driver_decorator(self): """Sanity check on the decorator. The interface code is somewhat implicitly tested. We don't need unit tests for all of that code, but as a minimum we should make sure it returns at least one registered driver, else the compliance test will never even run. """ self.assertGreater(len(VOLUME_DRIVERS), 0) @ddt.data(*VOLUME_DRIVERS) def test_volume_driver_compliance(self, driver): self.assertTrue( issubclass(driver.cls, volume_driver.VolumeDriverCore), "Driver {} does not conform to minimum volume driver " "requirements!".format(driver.class_fqn)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/fake_driver.py0000664000175000017500000003733300000000000020341 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit.brick import fake_lvm from cinder.volume import driver from cinder.volume.drivers import lvm from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils # TODO(e0ne): inherit from driver.VolumeDriver and fix unit-tests class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): super(FakeLoggingVolumeDriver, self).__init__( execute=self.fake_execute, *args, **kwargs) self.backend_name = 'fake' self.protocol = 'fake' self.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default', self.fake_execute) @volume_utils.trace_method def check_for_setup_error(self): """No setup necessary in fake mode.""" pass @volume_utils.trace_method def create_volume(self, volume): """Creates a volume.""" super(FakeLoggingVolumeDriver, self).create_volume(volume) model_update = {} try: if (volume.volume_type and volume.volume_type.extra_specs and volume_utils.is_replicated_spec( volume.volume_type.extra_specs)): # Sets the new volume's replication_status to disabled model_update['replication_status'] = ( fields.ReplicationStatus.DISABLED) except exception.VolumeTypeNotFound: pass if model_update: return model_update @volume_utils.trace_method def delete_volume(self, volume): pass @volume_utils.trace_method def create_snapshot(self, snapshot): pass @volume_utils.trace_method def delete_snapshot(self, snapshot): pass @volume_utils.trace_method def ensure_export(self, context, volume): pass @volume_utils.trace_method def create_export(self, context, volume, connector): pass @volume_utils.trace_method def remove_export(self, context, volume): pass @volume_utils.trace_method def create_export_snapshot(self, context, snapshot): pass @volume_utils.trace_method def remove_export_snapshot(self, context, snapshot): pass @volume_utils.trace_method def terminate_connection_snapshot(self, snapshot, connector): pass @volume_utils.trace_method def create_cloned_volume(self, volume, src_vol): pass @volume_utils.trace_method def create_volume_from_snapshot(self, volume, snapshot): pass @volume_utils.trace_method def initialize_connection(self, volume, connector): # NOTE(thangp): There are several places in the core cinder code where # the volume passed through is a dict and not an oslo_versionedobject. # We need to react appropriately to what type of volume is passed in, # until the switch over to oslo_versionedobjects is complete. if isinstance(volume, objects.Volume): volume_metadata = volume.admin_metadata else: volume_metadata = {} for metadata in volume['volume_admin_metadata']: volume_metadata[metadata['key']] = metadata['value'] access_mode = volume_metadata.get('attached_mode') if access_mode is None: access_mode = ('ro' if volume_metadata.get('readonly') == 'True' else 'rw') return {'driver_volume_type': 'iscsi', 'data': {'access_mode': access_mode}} @volume_utils.trace_method def initialize_connection_snapshot(self, snapshot, connector): return { 'driver_volume_type': 'iscsi', } @volume_utils.trace_method def terminate_connection(self, volume, connector, **kwargs): pass # Replication Group (Tiramisu) @volume_utils.trace_method def enable_replication(self, context, group, volumes): """Enables replication for a group and volumes in the group.""" model_update = { 'replication_status': fields.ReplicationStatus.ENABLED} volume_model_updates = [] for volume_ref in volumes: volume_model_update = {'id': volume_ref.id} volume_model_update['replication_status'] = ( fields.ReplicationStatus.ENABLED) volume_model_updates.append(volume_model_update) return model_update, volume_model_updates # Replication Group (Tiramisu) @volume_utils.trace_method def disable_replication(self, context, group, volumes): """Disables replication for a group and volumes in the group.""" model_update = { 'replication_status': fields.ReplicationStatus.DISABLED} volume_model_updates = [] for volume_ref in volumes: volume_model_update = {'id': volume_ref.id} volume_model_update['replication_status'] = ( fields.ReplicationStatus.DISABLED) volume_model_updates.append(volume_model_update) return model_update, volume_model_updates # Replication Group (Tiramisu) @volume_utils.trace_method def failover_replication(self, context, group, volumes, secondary_backend_id=None): """Fails over replication for a group and volumes in the group.""" model_update = { 'replication_status': fields.ReplicationStatus.FAILED_OVER} volume_model_updates = [] for volume_ref in volumes: volume_model_update = {'id': volume_ref.id} volume_model_update['replication_status'] = ( fields.ReplicationStatus.FAILED_OVER) volume_model_updates.append(volume_model_update) return model_update, volume_model_updates # Replication Group (Tiramisu) @volume_utils.trace_method def create_group(self, context, group): """Creates a group.""" model_update = super(FakeLoggingVolumeDriver, self).create_group( context, group) try: if group.is_replicated: # Sets the new group's replication_status to disabled model_update['replication_status'] = ( fields.ReplicationStatus.DISABLED) except exception.GroupTypeNotFound: pass return model_update def _update_volume_stats(self): data = {'volume_backend_name': self.backend_name, 'vendor_name': 'Open Source', 'driver_version': self.VERSION, 'storage_protocol': self.protocol, 'pools': []} fake_pool = {'pool_name': data['volume_backend_name'], 'total_capacity_gb': 'infinite', 'free_capacity_gb': 'infinite', 'provisioned_capacity_gb': 0, 'reserved_percentage': 100, 'QoS_support': False, 'filter_function': self.get_filter_function(), 'goodness_function': self.get_goodness_function(), 'consistencygroup_support': False, 'replication_enabled': True, 'group_replication_enabled': True, } data['pools'].append(fake_pool) self._stats = data @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" return (None, None) class FakeISERDriver(FakeLoggingVolumeDriver): def __init__(self, *args, **kwargs): super(FakeISERDriver, self).__init__(execute=self.fake_execute, *args, **kwargs) def initialize_connection(self, volume, connector): return { 'driver_volume_type': 'iser', 'data': {} } class FakeFibreChannelDriver(driver.FibreChannelDriver): def initialize_connection(self, volume, connector): conn_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, }} fczm_utils.add_fc_zone(conn_info) return conn_info def initialize_connection_with_empty_map(self, volume, connector): conn_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'initiator_target_map': {}, }} fczm_utils.add_fc_zone(conn_info) return conn_info def no_zone_initialize_connection(self, volume, connector): """This shouldn't call the ZM.""" conn_info = { 'driver_volume_type': 'bogus', 'data': { 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, }} fczm_utils.add_fc_zone(conn_info) return conn_info def terminate_connection(self, volume, connector, **kwargs): conn_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, }} fczm_utils.remove_fc_zone(conn_info) return conn_info def terminate_connection_with_empty_map(self, volume, connector, **kwargs): conn_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'initiator_target_map': {}, }} fczm_utils.remove_fc_zone(conn_info) return conn_info def no_zone_terminate_connection(self, volume, connector, **kwargs): conn_info = { 'driver_volume_type': 'bogus', 'data': { 'initiator_target_map': {'fake_wwn': ['fake_wwn2']}, }} fczm_utils.remove_fc_zone(conn_info) return conn_info class FakeGateDriver(lvm.LVMVolumeDriver): """Class designation for FakeGateDriver. FakeGateDriver is for TESTING ONLY. There are a few driver features such as CG and replication that are not supported by the reference driver LVM currently. Adding those functions in this fake driver will help detect problems when changes are introduced in those functions. Implementation of this driver is NOT meant for production. They are implemented simply to make sure calls to the driver functions are passing in the correct parameters, and the results returned by the driver are handled properly by the manager. """ def __init__(self, *args, **kwargs): super(FakeGateDriver, self).__init__(*args, **kwargs) def _update_volume_stats(self): super(FakeGateDriver, self)._update_volume_stats() self._stats["pools"][0]["consistencygroup_support"] = True # NOTE(xyang): Consistency Group functions implemented below # are for testing purpose only. Data consistency cannot be # achieved by running these functions. def create_consistencygroup(self, context, group): """Creates a consistencygroup.""" # A consistencygroup entry is already created in db # This driver just returns a status now = timeutils.utcnow() model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE, 'updated_at': now} return model_update def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, soure_cg=None, source_vols=None): """Creates a consistencygroup from cgsnapshot or source cg.""" for vol in volumes: try: if snapshots: for snapshot in snapshots: if vol['snapshot_id'] == snapshot['id']: self.create_volume_from_snapshot(vol, snapshot) break except Exception: raise try: if source_vols: for source_vol in source_vols: if vol['source_volid'] == source_vol['id']: self.create_cloned_volume(vol, source_vol) break except Exception: raise return None, None def delete_consistencygroup(self, context, group, volumes): """Deletes a consistencygroup and volumes in the group.""" model_update = {'status': group.status} volume_model_updates = [] for volume_ref in volumes: volume_model_update = {'id': volume_ref.id} try: self.remove_export(context, volume_ref) self.delete_volume(volume_ref) volume_model_update['status'] = 'deleted' except exception.VolumeIsBusy: volume_model_update['status'] = 'available' except Exception: volume_model_update['status'] = 'error' model_update['status'] = fields.ConsistencyGroupStatus.ERROR volume_model_updates.append(volume_model_update) return model_update, volume_model_updates def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group.""" return None, None, None def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot. Snapshots created here are NOT consistent. This is for testing purpose only. """ model_update = {'status': 'available'} snapshot_model_updates = [] for snapshot in snapshots: snapshot_model_update = {'id': snapshot.id} try: self.create_snapshot(snapshot) snapshot_model_update['status'] = ( fields.SnapshotStatus.AVAILABLE) except Exception: snapshot_model_update['status'] = fields.SnapshotStatus.ERROR model_update['status'] = 'error' snapshot_model_updates.append(snapshot_model_update) return model_update, snapshot_model_updates def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" model_update = {'status': cgsnapshot.status} snapshot_model_updates = [] for snapshot in snapshots: snapshot_model_update = {'id': snapshot.id} try: self.delete_snapshot(snapshot) snapshot_model_update['status'] = ( fields.SnapshotStatus.DELETED) except exception.SnapshotIsBusy: snapshot_model_update['status'] = ( fields.SnapshotStatus.AVAILABLE) except Exception: snapshot_model_update['status'] = ( fields.SnapshotStatus.ERROR) model_update['status'] = 'error' snapshot_model_updates.append(snapshot_model_update) return model_update, snapshot_model_updates class FakeHAReplicatedLoggingVolumeDriver(FakeLoggingVolumeDriver): SUPPORTS_ACTIVE_ACTIVE = True @volume_utils.trace_method def failover_completed(self, context, active_backend_id=None): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/fixtures.py0000664000175000017500000002141200000000000017720 0ustar00zuulzuul00000000000000# Copyright 2016 IBM Corp. # Copyright 2017 Rackspace Australia # Copyright 2018 Michael Still and Aptira # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures for Cinder tests.""" import logging as std_logging import os import warnings import fixtures from oslo_config import cfg from oslo_policy import policy as oslo_policy from oslo_privsep import daemon as privsep_daemon from sqlalchemy import exc as sqla_exc import cinder.policy CONF = cfg.CONF _TRUE_VALUES = ('True', 'true', '1', 'yes') class NullHandler(std_logging.Handler): """custom default NullHandler to attempt to format the record. Used in conjunction with log_fixture.get_logging_handle_error_fixture to detect formatting errors in debug level logs without saving the logs. """ def handle(self, record): self.format(record) def emit(self, record): pass def createLock(self): self.lock = None class StandardLogging(fixtures.Fixture): """Setup Logging redirection for tests. There are a number of things we want to handle with logging in tests: * Redirect the logging to somewhere that we can test or dump it later. * Ensure that as many DEBUG messages as possible are actually executed, to ensure they are actually syntactically valid (they often have not been). * Ensure that we create useful output for tests that doesn't overwhelm the testing system (which means we can't capture the 100 MB of debug logging on every run). To do this we create a logger fixture at the root level, which defaults to INFO and create a Null Logger at DEBUG which lets us execute log messages at DEBUG but not keep the output. To support local debugging OS_DEBUG=True can be set in the environment, which will print out the full debug logging. There are also a set of overrides for particularly verbose modules to be even less than INFO. """ def setUp(self): super(StandardLogging, self).setUp() # set root logger to debug root = std_logging.getLogger() root.setLevel(std_logging.INFO) # supports collecting debug level for local runs if os.environ.get('OS_DEBUG') in _TRUE_VALUES: level = std_logging.DEBUG else: level = std_logging.INFO # Collect logs fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s' self.logger = self.useFixture( fixtures.FakeLogger(format=fs, level=None)) # TODO(sdague): why can't we send level through the fake # logger? Tests prove that it breaks, but it's worth getting # to the bottom of. root.handlers[0].setLevel(level) if level > std_logging.DEBUG: # Just attempt to format debug level logs, but don't save them handler = NullHandler() self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False)) handler.setLevel(std_logging.DEBUG) # Don't log every single DB migration step std_logging.getLogger( 'migrate.versioning.api').setLevel(std_logging.WARNING) # At times we end up calling back into main() functions in # testing. This has the possibility of calling logging.setup # again, which completely unwinds the logging capture we've # created here. Once we've setup the logging in the way we want, # disable the ability for the test to change this. def fake_logging_setup(*args): pass self.useFixture( fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup)) class WarningsFixture(fixtures.Fixture): """Filters out warnings during test runs.""" def setUp(self): super().setUp() self._original_warning_filters = warnings.filters[:] # NOTE(sdague): Make deprecation warnings only happen once. Otherwise # this gets kind of crazy given the way that upstream python libs use # this. warnings.simplefilter('once', DeprecationWarning) # The UUIDFields emits a warning if the value is not a valid UUID. # Let's escalate that to an exception in the test to prevent adding # violations. warnings.filterwarnings('error', message='.*invalid UUID.*') # NOTE(sdague): this remains an unresolved item around the way # forward on is_admin, the deprecation is definitely really premature. warnings.filterwarnings( 'ignore', message='Policy enforcement is depending on the value of is_admin.' ' This key is deprecated. Please update your policy ' 'file to use the standard policy values.') # We can't do anything about this outside of cinder warnings.filterwarnings( 'ignore', message='distutils Version classes are deprecated. .*', category=DeprecationWarning, ) warnings.filterwarnings( 'ignore', message='the imp module is deprecated in favour of importlib', category=DeprecationWarning, ) warnings.filterwarnings( 'ignore', message='invalid escape sequence', category=DeprecationWarning, ) warnings.filterwarnings( 'error', message='invalid escape sequence', category=DeprecationWarning, module='cinder', ) warnings.filterwarnings( 'ignore', message='Policy ".*":".*" was deprecated in ', module='oslo_policy', category=UserWarning, ) # Enable deprecation warnings for cinder itself to capture upcoming # SQLAlchemy changes warnings.filterwarnings( 'ignore', category=sqla_exc.SADeprecationWarning, ) warnings.filterwarnings( 'error', module='cinder', category=sqla_exc.SADeprecationWarning, ) # ...but filter everything out until we get around to fixing them # TODO: Fix all of these warnings.filterwarnings( 'ignore', module='cinder', message='The current statement is being autocommitted using ', category=sqla_exc.SADeprecationWarning, ) warnings.filterwarnings( 'ignore', module='cinder', message='The Session.begin.subtransactions flag is deprecated ', category=sqla_exc.SADeprecationWarning, ) self.addCleanup(self._reset_warning_filters) def _reset_warning_filters(self): warnings.filters[:] = self._original_warning_filters class UnHelperfulClientChannel(privsep_daemon._ClientChannel): def __init__(self, context): raise Exception('You have attempted to start a privsep helper. ' 'This is not allowed in the gate, and ' 'indicates a failure to have mocked your tests.') class PrivsepNoHelperFixture(fixtures.Fixture): """A fixture to catch failures to mock privsep's rootwrap helper. If you fail to mock away a privsep'd method in a unit test, then you may well end up accidentally running the privsep rootwrap helper. This will fail in the gate, but it fails in a way which doesn't identify which test is missing a mock. Instead, we raise an exception so that you at least know where you've missed something. """ def setUp(self): super(PrivsepNoHelperFixture, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'oslo_privsep.daemon.RootwrapClientChannel', UnHelperfulClientChannel)) class PolicyFixture(fixtures.Fixture): """Load the live policy for tests. A base policy fixture that starts with the assumption that you'd like to load and enforce the shipped default policy in tests. """ def setUp(self): super().setUp() cinder.policy.reset() # Suppress deprecation warnings for unit tests. cinder.policy.init(suppress_deprecation_warnings=True) self.addCleanup(cinder.policy.reset) def set_rules(self, rules, overwrite=True): policy = cinder.policy._ENFORCER policy.set_rules(oslo_policy.Rules.from_dict(rules), overwrite=overwrite) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1111186 cinder-27.0.0/cinder/tests/functional/0000775000175000017500000000000000000000000017637 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/__init__.py0000664000175000017500000000143700000000000021755 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import objects # NOTE(e0ne): Make sure we have all of the objects loaded. We do this # at module import time, because we may be using mock decorators in our # tests that run at import time. objects.register_all() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1111186 cinder-27.0.0/cinder/tests/functional/api/0000775000175000017500000000000000000000000020410 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api/__init__.py0000664000175000017500000000000000000000000022507 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api/client.py0000664000175000017500000003212000000000000022236 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import urllib from oslo_serialization import jsonutils from oslo_utils import netutils import requests from cinder.i18n import _ from cinder.tests.unit import fake_constants as fake class OpenStackApiException(Exception): message = 'Unspecified error' def __init__(self, response=None, msg=None): self.response = response # Give chance to override default message if msg: self.message = msg if response: self.message = _( '%(message)s\nStatus Code: %(_status)s\nBody: %(_body)s') % { '_status': response.status_code, '_body': response.text, 'message': self.message} super(OpenStackApiException, self).__init__(self.message) class OpenStackApiException401(OpenStackApiException): message = _("401 Unauthorized Error") class OpenStackApiException404(OpenStackApiException): message = _("404 Not Found Error") class OpenStackApiException413(OpenStackApiException): message = _("413 Request entity too large") class OpenStackApiException400(OpenStackApiException): message = _("400 Bad Request") class OpenStackApiException403(OpenStackApiException): message = _("403 Forbidden") class OpenStackApiException500(OpenStackApiException): message = _("500 Internal Server Error") class TestOpenStackClient(object): """Simple OpenStack API Client. This is a really basic OpenStack API client that is under our control, so we can make changes / insert hooks for testing """ def __init__(self, auth_user, auth_key, auth_uri, api_version=None): super(TestOpenStackClient, self).__init__() self.auth_result = None self.auth_user = auth_user self.auth_key = auth_key self.auth_uri = auth_uri # default project_id self.project_id = fake.PROJECT_ID self.api_version = api_version def request(self, url, method='GET', body=None, headers=None, ssl_verify=True, stream=False): _headers = {'Content-Type': 'application/json'} _headers.update(headers or {}) parsed_url = urllib.parse.urlparse(url) port = parsed_url.port hostname = parsed_url.hostname scheme = parsed_url.scheme if netutils.is_valid_ipv6(hostname): hostname = "[%s]" % hostname relative_url = parsed_url.path if parsed_url.query: relative_url = relative_url + "?" + parsed_url.query if port: _url = "%s://%s:%d%s" % (scheme, hostname, int(port), relative_url) else: _url = "%s://%s%s" % (scheme, hostname, relative_url) response = requests.request(method, _url, data=body, headers=_headers, verify=ssl_verify, stream=stream) return response def _authenticate(self, reauthenticate=False): if self.auth_result and not reauthenticate: return self.auth_result auth_uri = self.auth_uri headers = {'X-Auth-User': self.auth_user, 'X-Auth-Key': self.auth_key, 'X-Auth-Project-Id': self.project_id} response = self.request(auth_uri, headers=headers) http_status = response.status_code if http_status == http_client.UNAUTHORIZED: raise OpenStackApiException401(response=response) self.auth_result = response.headers return self.auth_result def update_project(self, new_project_id): self.project_id = new_project_id self._authenticate(True) def api_request(self, relative_uri, check_response_status=None, strip_version=False, base_url=True, **kwargs): auth_result = self._authenticate() if base_url: # NOTE(justinsb): httplib 'helpfully' converts headers to lower # case base_uri = auth_result['x-server-management-url'] else: base_uri = self.auth_uri if strip_version: # cut out version number and tenant_id base_uri = '/'.join(base_uri.split('/', 3)[:-1]) full_uri = '%s/%s' % (base_uri, relative_uri) headers = kwargs.setdefault('headers', {}) headers['X-Auth-Token'] = auth_result['x-auth-token'] if self.api_version: headers['OpenStack-API-Version'] = 'volume ' + self.api_version response = self.request(full_uri, **kwargs) http_status = response.status_code if check_response_status: if http_status not in check_response_status: message = None try: exc = globals()["OpenStackApiException%s" % http_status] except KeyError: exc = OpenStackApiException message = _("Unexpected status code") raise exc(response, message) return response def _decode_json(self, response): body = response.text if body: return jsonutils.loads(body) else: return "" def api_get(self, relative_uri, base_url=True, **kwargs): kwargs.setdefault('check_response_status', [http_client.OK]) response = self.api_request(relative_uri, base_url=base_url, **kwargs) return self._decode_json(response) def api_post(self, relative_uri, body, base_url=True, **kwargs): kwargs['method'] = 'POST' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [http_client.OK, http_client.ACCEPTED]) response = self.api_request(relative_uri, base_url=base_url, **kwargs) return self._decode_json(response) def api_put(self, relative_uri, body, base_url=True, **kwargs): kwargs['method'] = 'PUT' if body: headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = 'application/json' kwargs['body'] = jsonutils.dumps(body) kwargs.setdefault('check_response_status', [http_client.OK, http_client.ACCEPTED, http_client.NO_CONTENT]) response = self.api_request(relative_uri, base_url=base_url, **kwargs) return self._decode_json(response) def api_delete(self, relative_uri, base_url=True, **kwargs): kwargs['method'] = 'DELETE' kwargs.setdefault('check_response_status', [http_client.OK, http_client.ACCEPTED, http_client.NO_CONTENT]) return self.api_request(relative_uri, base_url=base_url, **kwargs) def get_volume(self, volume_id): return self.api_get('/volumes/%s' % volume_id)['volume'] def get_volumes(self, detail=True): rel_url = '/volumes/detail' if detail else '/volumes' return self.api_get(rel_url)['volumes'] def post_volume(self, volume): return self.api_post('/volumes', volume)['volume'] def delete_volume(self, volume_id): return self.api_delete('/volumes/%s' % volume_id) def put_volume(self, volume_id, volume): return self.api_put('/volumes/%s' % volume_id, volume)['volume'] def post_manage_volume(self, host=None, ref=None): if not host: host = "fake-host" if not ref: ref = {"one": "A", "two": "B"} req_body = {"volume": {}} req_body['volume']['host'] = host req_body['volume']['ref'] = ref return self.api_post('/os-volume-manage', req_body) def get_snapshot(self, snapshot_id): return self.api_get('/snapshots/%s' % snapshot_id)['snapshot'] def post_snapshot(self, snapshot): return self.api_post('/snapshots', snapshot)['snapshot'] def delete_snapshot(self, snapshot_id): return self.api_delete('/snapshots/%s' % snapshot_id) def quota_set(self, project_id, quota_update): return self.api_put( 'os-quota-sets/%s' % project_id, {'quota_set': quota_update})['quota_set'] def quota_get(self, project_id, usage=True): return self.api_get('os-quota-sets/%s?usage=%s' % (project_id, usage))['quota_set'] def create_type(self, type_name, extra_specs=None): type = {"volume_type": {"name": type_name}} if extra_specs: type['extra_specs'] = extra_specs return self.api_post('/types', type)['volume_type'] def delete_type(self, type_id): return self.api_delete('/types/%s' % type_id) def get_type(self, type_id): return self.api_get('/types/%s' % type_id)['volume_type'] def create_volume_type_extra_specs(self, volume_type_id, extra_specs): extra_specs = {"extra_specs": extra_specs} url = "/types/%s/extra_specs" % volume_type_id return self.api_post(url, extra_specs)['extra_specs'] def create_group_type_specs(self, grp_type_id, group_specs): group_specs = {"group_specs": group_specs} url = "/group_types/%s/group_specs" % grp_type_id return self.api_post(url, group_specs)['group_specs'] def create_group_type(self, type_name, grp_specs=None): grp_type = {"group_type": {"name": type_name}} if grp_specs: grp_type['group_specs'] = grp_specs return self.api_post('/group_types', grp_type)['group_type'] def delete_group_type(self, group_type_id): return self.api_delete('/group_types/%s' % group_type_id) def get_group_type(self, grp_type_id): return self.api_get('/group_types/%s' % grp_type_id)['group_type'] def get_group(self, group_id): return self.api_get('/groups/%s' % group_id)['group'] def get_groups(self, detail=True): rel_url = '/groups/detail' if detail else '/groups' return self.api_get(rel_url)['groups'] def post_group(self, group): return self.api_post('/groups', group)['group'] def post_group_from_src(self, group): return self.api_post('/groups/action', group)['group'] def delete_group(self, group_id, params): return self.api_post('/groups/%s/action' % group_id, params) def reset_group(self, group_id, params): return self.api_post('/groups/%s/action' % group_id, params) def put_group(self, group_id, group): return self.api_put('/groups/%s' % group_id, group)['group'] def get_group_snapshot(self, group_snapshot_id): return self.api_get('/group_snapshots/%s' % group_snapshot_id)[ 'group_snapshot'] def get_group_snapshots(self, detail=True): rel_url = '/group_snapshots/detail' if detail else '/group_snapshots' return self.api_get(rel_url)['group_snapshots'] def post_group_snapshot(self, group_snapshot): return self.api_post('/group_snapshots', group_snapshot)[ 'group_snapshot'] def delete_group_snapshot(self, group_snapshot_id): return self.api_delete('/group_snapshots/%s' % group_snapshot_id) def reset_group_snapshot(self, group_snapshot_id, params): return self.api_post('/group_snapshots/%s/action' % group_snapshot_id, params) def enable_group_replication(self, group_id, params): return self.api_post('/groups/%s/action' % group_id, params) def disable_group_replication(self, group_id, params): return self.api_post('/groups/%s/action' % group_id, params) def failover_group_replication(self, group_id, params): return self.api_post('/groups/%s/action' % group_id, params) def list_group_replication_targets(self, group_id, params): return self.api_post('/groups/%s/action' % group_id, params) def set_default_type(self, project_id, params): body = {"default_type": params} return self.api_put('default-types/%s' % project_id, body, base_url=False)['default_type'] def get_default_type(self, project_id=None): if project_id: return self.api_get('default-types/%s' % project_id, base_url=False)['default_type'] return self.api_get('default-types', base_url=False)['default_types'] def unset_default_type(self, project_id): self.api_delete('default-types/%s' % project_id, base_url=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api/foxinsocks.py0000664000175000017500000000537500000000000023162 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import webob.exc from cinder.api import extensions from cinder.api.openstack import wsgi class FoxInSocksController(object): def index(self, req): return "Try to say this Mr. Knox, sir..." class FoxInSocksServerControllerExtension(wsgi.Controller): @wsgi.action('add_tweedle') def _add_tweedle(self, req, id, body): return "Tweedle Beetle Added." @wsgi.action('delete_tweedle') def _delete_tweedle(self, req, id, body): return "Tweedle Beetle Deleted." @wsgi.action('fail') def _fail(self, req, id, body): raise webob.exc.HTTPBadRequest(explanation='Tweedle fail') class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): # NOTE: This only handles JSON responses. resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing') class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller): @wsgi.extends def show(self, req, resp_obj, id): # NOTE: This only handles JSON responses. resp_obj.obj['big_bands'] = 'Pig Bands!' class Foxinsocks(extensions.ExtensionDescriptor): """The Fox In Socks Extension.""" name = "Fox In Socks" alias = "FOXNSOX" namespace = "http://www.fox.in.socks/api/ext/pie/v1.0" updated = "2011-01-22T13:25:27-06:00" def __init__(self, ext_mgr): ext_mgr.register(self) def get_resources(self): resources = [] resource = extensions.ResourceExtension('foxnsocks', FoxInSocksController()) resources.append(resource) return resources def get_controller_extensions(self): extension_list = [] extension_set = [ (FoxInSocksServerControllerExtension, 'servers'), (FoxInSocksFlavorGooseControllerExtension, 'flavors'), (FoxInSocksFlavorBandsControllerExtension, 'flavors'), ] for klass, collection in extension_set: controller = klass() ext = extensions.ControllerExtension(self, collection, controller) extension_list.append(ext) return extension_list ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1151185 cinder-27.0.0/cinder/tests/functional/api_sample_tests/0000775000175000017500000000000000000000000023173 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/__init__.py0000664000175000017500000000000000000000000025272 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/fakes.py0000664000175000017500000000625000000000000024641 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def stub_copy_volume_to_image(self, context, volume, metadata, force): image_metadata = { "status": "uploading", "container_format": "bare", "image_name": "test", "visibility": "private", "updated_at": "2017-06-05T08:44:28.000000", "image_id": "de75b74e-7f0d-4b59-a263-bd87bfc313bd", "display_description": None, "id": "3a81fdac-e8ae-4e61-b6a2-2e14ff316f19", "size": 1, "disk_format": "raw", "volume_type": None, "protected": False } return image_metadata def stub_manage_existing(self, req, body): volume = { "volume": { "status": "creating", "user_id": "eae1472b5fc5496998a3d06550929e7e", "attachments": [], "links": [ { "href": "http://10.0.2.15:8776/v3/87c8522052ca4eed98bc672b4c1a" "3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "rel": "self" }, { "href": "http://10.0.2.15:8776/87c8522052ca4eed98bc672b4c1" "a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91" "c", "rel": "bookmark" } ], "availability_zone": "az2", "bootable": "false", "encrypted": "false", "created_at": "2014-07-18T00:12:54.000000", "description": "Volume imported from existingLV", "os-vol-tenant-attr:tenant_id": "87c8522052ca4eed98bc672b4c1a3ddb", "volume_type": "null", "name": "New Volume", "source_volid": "null", "snapshot_id": "null", "metadata": { "key2": "value2", "key1": "value1" }, "id": "23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "size": 0 } } return volume def stub_manage_existing_snapshot(self, req, body): snapshot = { "snapshot": { "status": "creating", "size": 1, "metadata": { "manage-snap-meta1": "value1", "manage-snap-meta3": "value3", "manage-snap-meta2": "value2" }, "name": "new_snapshot", "volume_id": "1df34919-aba7-4a1b-a614-3b409d71ac03", "created_at": "2018-09-26T03:45:03.893592", "description": "this is a new snapshot", "id": "b6314a71-9d3d-439a-861d-b790def0d693", "updated_at": "null" } } return snapshot ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8391159 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/0000775000175000017500000000000000000000000024637 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1151185 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/0000775000175000017500000000000000000000000026267 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backup-create-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backup-create-request.json.tp0000664000175000017500000000036000000000000033777 0ustar00zuulzuul00000000000000{ "backup": { "container": null, "description": "Test backup", "name": "backup001", "volume_id": "%(volume_id)s", "incremental": false, "snapshot_id": null, "force": false } } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backup-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backup-create-response.json.t0000664000175000017500000000054200000000000033767 0ustar00zuulzuul00000000000000{ "backup": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backup-show-response.json.tpl0000664000175000017500000000163500000000000034044 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001", "object_count": %(int)s, "size": %(int)s, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false } } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backup-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backup-update-request.json.tp0000664000175000017500000000013600000000000034017 0ustar00zuulzuul00000000000000{ "backup":{ "name":"backup001", "description": "this is a backup" } }././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backups-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backups-list-detailed-respons0000664000175000017500000000166200000000000034060 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001", "object_count": %(int)s, "size": %(int)s, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false } ] } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backups-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/backups-list-response.json.tp0000664000175000017500000000054100000000000034041 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "%(uuid)s", "links": [{ "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" }], "name": "backup001" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1191187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.18/0000775000175000017500000000000000000000000027046 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backup-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backup-create-response.0000664000175000017500000000055100000000000033412 0ustar00zuulzuul00000000000000{ "backup": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backup-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backup-show-response.js0000664000175000017500000000173200000000000033466 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001", "object_count": %(int)s, "os-backup-project-attr:project_id": "%(uuid)s", "size": %(int)s, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backups-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backups-list-detailed-r0000664000175000017500000000175200000000000033407 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001", "object_count": %(int)s, "os-backup-project-attr:project_id": "%(uuid)s", "size": 10, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false } ] } ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backups-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backups-list-response.j0000664000175000017500000000054100000000000033456 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "%(uuid)s", "links": [{ "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" }], "name": "backup001" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1191187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.43/0000775000175000017500000000000000000000000027044 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backup-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backup-create-response.0000664000175000017500000000055100000000000033410 0ustar00zuulzuul00000000000000{ "backup": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backup-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backup-show-response.js0000664000175000017500000000176600000000000033473 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": %(int)s, "os-backup-project-attr:project_id": "%(uuid)s", "size": %(int)s, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backups-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backups-list-detailed-r0000664000175000017500000000200600000000000033376 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": %(int)s, "os-backup-project-attr:project_id": "%(uuid)s", "size": 10, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false } ] } ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backups-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backups-list-response.j0000664000175000017500000000054100000000000033454 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "%(uuid)s", "links": [{ "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" }], "name": "backup001" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1191187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.45/0000775000175000017500000000000000000000000027046 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backup-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backup-create-response.0000664000175000017500000000054100000000000033411 0ustar00zuulzuul00000000000000{ "backup": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backup-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backup-show-response.js0000664000175000017500000000176600000000000033475 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": %(int)s, "os-backup-project-attr:project_id": "%(uuid)s", "size": %(int)s, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backups-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backups-list-detailed-r0000664000175000017500000000204100000000000033377 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": %(int)s, "os-backup-project-attr:project_id": "%(uuid)s", "size": %(int)s, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false } ], "count": %(int)s } ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backups-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backups-list-response.j0000664000175000017500000000056700000000000033466 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "%(uuid)s", "links": [{ "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" }], "name": "backup001" } ], "count": %(int)s }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1191187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.47/0000775000175000017500000000000000000000000027050 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.47/backup-restore-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.47/backup-restore-request.0000664000175000017500000000016700000000000033471 0ustar00zuulzuul00000000000000{ "restore": { "name": "volume01", "volume_id": "%(uuid)s" "backup_id": "%(uuid)s", } }././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.47/backup-restore-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.47/backup-restore-response0000664000175000017500000000017600000000000033561 0ustar00zuulzuul00000000000000{ "restore": { "backup_id": "%(uuid)s", "volume_id": "%(uuid)s", "volume_name": "volume01" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1191187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.56/0000775000175000017500000000000000000000000027050 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backup-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backup-create-response.0000664000175000017500000000054100000000000033413 0ustar00zuulzuul00000000000000{ "backup": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001" } }././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backup-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backup-show-response.js0000664000175000017500000000203100000000000033461 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": %(int)s, "os-backup-project-attr:project_id": "%(uuid)s", "size": %(int)s, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false, "user_id": "%(uuid)s" } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backups-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backups-list-detailed-r0000664000175000017500000000210400000000000033401 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "name": "backup001", "object_count": %(int)s, "os-backup-project-attr:project_id": "%(uuid)s", "size": %(int)s, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false, "user_id": "%(uuid)s" } ], "count": %(int)s } ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backups-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backups-list-response.j0000664000175000017500000000056700000000000033470 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "%(uuid)s", "links": [{ "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" }], "name": "backup001" } ], "count": %(int)s }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1231186 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/0000775000175000017500000000000000000000000026766 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-create-response.j0000664000175000017500000000054200000000000033504 0ustar00zuulzuul00000000000000{ "backup": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001" } } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-show-response.jso0000664000175000017500000000163500000000000033567 0ustar00zuulzuul00000000000000{ "backup": { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001", "object_count": %(int)s, "size": %(int)s, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false } } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-update-request.js0000664000175000017500000000013600000000000033537 0ustar00zuulzuul00000000000000{ "backup":{ "name":"backup001", "description": "this is a backup" } }././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-update-response.j0000664000175000017500000000053100000000000033521 0ustar00zuulzuul00000000000000{ "backup": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "%(name)s" } } ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backups-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backups-list-detailed-re0000664000175000017500000000165500000000000033476 0ustar00zuulzuul00000000000000{ "backups": [ { "availability_zone": null, "container": null, "created_at": "%(strtime)s", "data_timestamp": "%(strtime)s", "description": "Test backup", "fail_reason": null, "snapshot_id": null, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" } ], "name": "backup001", "object_count": %(int)s, "size": 10, "status": "creating", "updated_at": "%(strtime)s", "volume_id": "%(uuid)s", "is_incremental": false, "has_dependent_backups": false } ] } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backups-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backups-list-response.js0000664000175000017500000000054100000000000033561 0ustar00zuulzuul00000000000000{ "backups": [ { "id": "%(uuid)s", "links": [{ "href": "%(host)s/v3/%(id)s/backups/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(id)s/backups/%(uuid)s", "rel": "bookmark" }], "name": "backup001" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1231186 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/extensions/0000775000175000017500000000000000000000000027036 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/extensions/extensions-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/extensions/extensions-list-response.j0000664000175000017500000001664500000000000034231 0ustar00zuulzuul00000000000000{ "extensions": [ { "alias": "os-hosts", "description": "Admin-only host administration.", "links": [], "name": "Hosts", "updated": "%(extension_update)s" }, { "alias": "os-vol-tenant-attr", "description": "Expose the internal project_id as an attribute of a volume.", "links": [], "name": "VolumeTenantAttribute", "updated": "%(extension_update)s" }, { "alias": "os-quota-sets", "description": "Quota management support.", "links": [], "name": "Quotas", "updated": "%(extension_update)s" }, { "alias": "os-availability-zone", "description": "Describe Availability Zones.", "links": [], "name": "AvailabilityZones", "updated": "%(extension_update)s" }, { "alias": "os-volume-encryption-metadata", "description": "Volume encryption metadata retrieval support.", "links": [], "name": "VolumeEncryptionMetadata", "updated": "%(extension_update)s" }, { "alias": "backups", "description": "Backups support.", "links": [], "name": "Backups", "updated": "%(extension_update)s" }, { "alias": "os-snapshot-actions", "description": "Enable snapshot manager actions.", "links": [], "name": "SnapshotActions", "updated": "%(extension_update)s" }, { "alias": "os-volume-actions", "description": "Enable volume actions.", "links": [], "name": "VolumeActions", "updated": "%(extension_update)s" }, { "alias": "os-snapshot-manage", "description": "Allows existing backend storage to be 'managed' by Cinder.", "links": [], "name": "SnapshotManage", "updated": "%(extension_update)s" }, { "alias": "os-volume-unmanage", "description": "Enable volume unmanage operation.", "links": [], "name": "VolumeUnmanage", "updated": "%(extension_update)s" }, { "alias": "consistencygroups", "description": "consistency groups support.", "links": [], "name": "Consistencygroups", "updated": "%(extension_update)s" }, { "alias": "os-vol-host-attr", "description": "Expose host as an attribute of a volume.", "links": [], "name": "VolumeHostAttribute", "updated": "%(extension_update)s" }, { "alias": "encryption", "description": "Encryption support for volume types.", "links": [], "name": "VolumeTypeEncryption", "updated": "%(extension_update)s" }, { "alias": "os-vol-image-meta", "description": "Show image metadata associated with the volume.", "links": [], "name": "VolumeImageMetadata", "updated": "%(extension_update)s" }, { "alias": "os-types-manage", "description": "Types manage support.", "links": [], "name": "TypesManage", "updated": "%(extension_update)s" }, { "alias": "capabilities", "description": "Capabilities support.", "links": [], "name": "Capabilities", "updated": "%(extension_update)s" }, { "alias": "cgsnapshots", "description": "cgsnapshots support.", "links": [], "name": "Cgsnapshots", "updated": "%(extension_update)s" }, { "alias": "os-types-extra-specs", "description": "Type extra specs support.", "links": [], "name": "TypesExtraSpecs", "updated": "%(extension_update)s" }, { "alias": "os-used-limits", "description": "Provide data on limited resources that are being used.", "links": [], "name": "UsedLimits", "updated": "%(extension_update)s" }, { "alias": "os-vol-mig-status-attr", "description": "Expose migration_status as an attribute of a volume.", "links": [], "name": "VolumeMigStatusAttribute", "updated": "%(extension_update)s" }, { "alias": "os-volume-type-access", "description": "Volume type access support.", "links": [], "name": "VolumeTypeAccess", "updated": "%(isotime)s" }, { "alias": "os-extended-services", "description": "Extended services support.", "links": [], "name": "ExtendedServices", "updated": "%(extension_update)s" }, { "alias": "os-extended-snapshot-attributes", "description": "Extended SnapshotAttributes support.", "links": [], "name": "ExtendedSnapshotAttributes", "updated": "%(extension_update)s" }, { "alias": "os-snapshot-unmanage", "description": "Enable volume unmanage operation.", "links": [], "name": "SnapshotUnmanage", "updated": "%(extension_update)s" }, { "alias": "qos-specs", "description": "QoS specs support.", "links": [], "name": "Qos_specs_manage", "updated": "%(extension_update)s" }, { "alias": "os-quota-class-sets", "description": "Quota classes management support.", "links": [], "name": "QuotaClasses", "updated": "%(extension_update)s" }, { "alias": "os-volume-transfer", "description": "Volume transfer management support.", "links": [], "name": "VolumeTransfer", "updated": "%(extension_update)s" }, { "alias": "os-volume-manage", "description": "Allows existing backend storage to be 'managed' by Cinder.", "links": [], "name": "VolumeManage", "updated": "%(extension_update)s" }, { "alias": "os-admin-actions", "description": "Enable admin actions.", "links": [], "name": "AdminActions", "updated": "%(extension_update)s" }, { "alias": "os-services", "description": "Services support.", "links": [], "name": "Services", "updated": "%(extension_update)s" }, { "alias": "scheduler-stats", "description": "Scheduler stats support.", "links": [], "name": "Scheduler_stats", "updated": "%(extension_update)s" }, { "alias": "OS-SCH-HNT", "description": "Pass arbitrary key/value pairs to the scheduler.", "links": [], "name": "SchedulerHints", "updated": "%(extension_update)s" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1231186 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/limits/0000775000175000017500000000000000000000000026140 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/limits/limits-show-response.json.tpl0000664000175000017500000000071500000000000033747 0ustar00zuulzuul00000000000000{ "limits": { "rate": [], "absolute": { "totalSnapshotsUsed": 0, "maxTotalBackups": 10, "maxTotalVolumeGigabytes": 1000, "maxTotalSnapshots": 10, "maxTotalBackupGigabytes": 1000, "totalBackupGigabytesUsed": 0, "maxTotalVolumes": 10, "totalVolumesUsed": 0, "totalBackupsUsed": 0, "totalGigabytesUsed": 0 } } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1231186 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/qos/0000775000175000017500000000000000000000000025441 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/qos/qos-create-request.json.tpl0000664000175000017500000000010000000000000032652 0ustar00zuulzuul00000000000000{ "qos_specs": { "name": "reliability-spec" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/qos/qos-create-response.json.tpl0000664000175000017500000000067000000000000033034 0ustar00zuulzuul00000000000000{ "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/%(uuid)s", "rel": "bookmark" } ], "qos_specs": { "consumer": "back-end", "id": "%(uuid)s", "name": "reliability-spec", "specs": {} } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/qos/qos-list-response.json.tpl0000664000175000017500000000026200000000000032541 0ustar00zuulzuul00000000000000{ "qos_specs": [ { "consumer": "back-end", "id": "%(uuid)s", "name": "reliability-spec", "specs": {} } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/qos/qos-show-response.json.tpl0000664000175000017500000000067000000000000032551 0ustar00zuulzuul00000000000000{ "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/qos-specs/%(uuid)s", "rel": "bookmark" } ], "qos_specs": { "consumer": "back-end", "id": "%(uuid)s", "name": "reliability-spec", "specs": {} } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/qos/qos-update-request.json.tpl0000664000175000017500000000006200000000000032700 0ustar00zuulzuul00000000000000{ "qos_specs": { "delay": "1" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/qos/qos-update-response.json.tpl0000664000175000017500000000006100000000000033045 0ustar00zuulzuul00000000000000{ "qos_specs": { "delay": "1" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/qos/qos_show_response.json.tpl0000664000175000017500000000003600000000000032711 0ustar00zuulzuul00000000000000{ "qos_associations": [] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1231186 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_classes/0000775000175000017500000000000000000000000027505 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_classes/quota-classes-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_classes/quota-classes-show-resp0000664000175000017500000000055000000000000034141 0ustar00zuulzuul00000000000000{ "quota_class_set": { "backup_gigabytes": 1000, "backups": 10, "gigabytes": 1000, "gigabytes___DEFAULT__": -1, "groups": 10, "id": "test_class", "per_volume_gigabytes": -1, "snapshots": 10, "snapshots___DEFAULT__": -1, "volumes": 10, "volumes___DEFAULT__": -1 } }././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_classes/quota-classes-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_classes/quota-classes-update-re0000664000175000017500000000015400000000000034100 0ustar00zuulzuul00000000000000{ "quota_class_set": { "volumes": 10, "gigabytes": 1000, "snapshots": 10 } }././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_classes/quota-classes-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_classes/quota-classes-update-re0000664000175000017500000000051400000000000034100 0ustar00zuulzuul00000000000000{ "quota_class_set": { "backup_gigabytes": 1000, "backups": 10, "gigabytes": 1000, "gigabytes___DEFAULT__": -1, "groups": 10, "per_volume_gigabytes": -1, "snapshots": 10, "snapshots___DEFAULT__": -1, "volumes": 10, "volumes___DEFAULT__": -1 } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1271186 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/0000775000175000017500000000000000000000000027026 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-show-defaults-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-show-defaults-respo0000664000175000017500000000054300000000000034200 0ustar00zuulzuul00000000000000{ "quota_set": { "backup_gigabytes": 1000, "backups": 10, "gigabytes": 1000, "gigabytes___DEFAULT__": -1, "groups": 10, "id": "fake_tenant", "per_volume_gigabytes": -1, "snapshots": 10, "snapshots___DEFAULT__": -1, "volumes": 10, "volumes___DEFAULT__": -1 } }././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-show-response.json.0000664000175000017500000000054300000000000034127 0ustar00zuulzuul00000000000000{ "quota_set": { "backup_gigabytes": 1000, "backups": 10, "gigabytes": 1000, "gigabytes___DEFAULT__": -1, "groups": 10, "id": "fake_tenant", "per_volume_gigabytes": -1, "snapshots": 10, "snapshots___DEFAULT__": -1, "volumes": 10, "volumes___DEFAULT__": -1 } }././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-show-usage-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-show-usage-response0000664000175000017500000000226500000000000034206 0ustar00zuulzuul00000000000000{ "quota_set": { "backup_gigabytes": { "in_use": 0, "limit": 1000, "reserved": 0 }, "backups": { "in_use": 0, "limit": 10, "reserved": 0 }, "gigabytes": { "in_use": 0, "limit": 1000, "reserved": 0 }, "gigabytes___DEFAULT__": { "in_use": 0, "limit": -1, "reserved": 0 }, "groups": { "in_use": 0, "limit": 10, "reserved": 0 }, "id": "fake_tenant", "per_volume_gigabytes": { "in_use": 0, "limit": -1, "reserved": 0 }, "snapshots": { "in_use": 0, "limit": 10, "reserved": 0 }, "snapshots___DEFAULT__": { "in_use": 0, "limit": -1, "reserved": 0 }, "volumes": { "in_use": 0, "limit": 10, "reserved": 0 }, "volumes___DEFAULT__": { "in_use": 0, "limit": -1, "reserved": 0 } } }././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-update-request.json0000664000175000017500000000013500000000000034202 0ustar00zuulzuul00000000000000{ "quota_set":{ "groups": 11, "volumes": 5, "backups": 4 } } ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-update-response.jso0000664000175000017500000000050400000000000034172 0ustar00zuulzuul00000000000000{ "quota_set": { "backup_gigabytes": 1000, "backups": 4, "gigabytes": 1000, "gigabytes___DEFAULT__": -1, "groups": 11, "per_volume_gigabytes": -1, "snapshots": 10, "snapshots___DEFAULT__": -1, "volumes": 5, "volumes___DEFAULT__": -1 } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1271186 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshot_manage_extensions/0000775000175000017500000000000000000000000032265 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshot_manage_extensions/snapshot-manage-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshot_manage_extensions/snapshot-m0000664000175000017500000000034300000000000034301 0ustar00zuulzuul00000000000000{ "snapshot": { "description": null, "metadata": null, "ref": { "source-name": "lvol0" }, "name": null, "volume_id": "1df34919-aba7-4a1b-a614-3b409d71ac03" } }././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshot_manage_extensions/snapshot-manage-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshot_manage_extensions/snapshot-m0000664000175000017500000000100200000000000034272 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "2018-09-26T03:45:03.893592", "description": "this is a new snapshot", "id": "b6314a71-9d3d-439a-861d-b790def0d693", "metadata": { "manage-snap-meta1": "value1", "manage-snap-meta2": "value2", "manage-snap-meta3": "value3" }, "name": "new_snapshot", "size": 1, "status": "creating", "updated_at": "null", "volume_id": "1df34919-aba7-4a1b-a614-3b409d71ac03" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1311188 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/0000775000175000017500000000000000000000000026661 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-create-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-create-request.jso0000664000175000017500000000032400000000000034163 0ustar00zuulzuul00000000000000{ "snapshot": { "name": "snap-001", "description": "Daily backup", "volume_id": "%(volume_id)s", "force": true, "metadata": { "key": "v3" } } } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-create-response.js0000664000175000017500000000050400000000000034152 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s" } }././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-create-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-create-re0000664000175000017500000000006300000000000034065 0ustar00zuulzuul00000000000000{ "metadata": { "key": "value" } } ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-create-re0000664000175000017500000000006200000000000034064 0ustar00zuulzuul00000000000000{ "metadata": { "key": "value" } }././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-show-key-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-show-key-0000664000175000017500000000005300000000000034040 0ustar00zuulzuul00000000000000{ "meta": { "key": "v3" } }././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-show-resp0000664000175000017500000000005700000000000034150 0ustar00zuulzuul00000000000000{ "metadata": { "key": "v3" } }././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-key-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-ke0000664000175000017500000000005300000000000034074 0ustar00zuulzuul00000000000000{ "meta": { "key": "new_value" } } ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-key-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-ke0000664000175000017500000000006200000000000034074 0ustar00zuulzuul00000000000000{ "meta": { "key": "new_value" } }././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-re0000664000175000017500000000007300000000000034105 0ustar00zuulzuul00000000000000{ "metadata": { "new_key": "new_value" } } ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-re0000664000175000017500000000007200000000000034104 0ustar00zuulzuul00000000000000{ "metadata": { "new_key": "new_value" } }././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-show-response.json0000664000175000017500000000070000000000000034222 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "%(uuid)s", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s" } }././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-update-request.jso0000664000175000017500000000016000000000000034200 0ustar00zuulzuul00000000000000{ "snapshot": { "name": "snap-002", "description": "This is yet, another snapshot." } } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-update-response.js0000664000175000017500000000052600000000000034175 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "This is yet, another snapshot.", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-002", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s" } }././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshots-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshots-list-detailed-res0000664000175000017500000000101100000000000034130 0ustar00zuulzuul00000000000000{ "snapshots": [ { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "%(uuid)s", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s" } ] }././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshots-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/snapshots-list-response.jso0000664000175000017500000000060500000000000034226 0ustar00zuulzuul00000000000000{ "snapshots": [ { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1311188 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/0000775000175000017500000000000000000000000027434 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshot-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshot-create-respo0000664000175000017500000000055000000000000033605 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null } } ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshot-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshot-show-respons0000664000175000017500000000074400000000000033670 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "%(uuid)s", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null } } ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshot-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshot-update-respo0000664000175000017500000000057200000000000033630 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "This is yet, another snapshot.", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-002", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null } } ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshots-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshots-list-detail0000664000175000017500000000106100000000000033610 0ustar00zuulzuul00000000000000{ "snapshots": [ { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "%(uuid)s", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1311188 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/0000775000175000017500000000000000000000000027434 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshot-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshot-create-respo0000664000175000017500000000060700000000000033610 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null, "user_id": "%(uuid)s" } } ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshot-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshot-show-respons0000664000175000017500000000100300000000000033655 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "%(uuid)s", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null, "user_id": "%(uuid)s" } } ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshot-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshot-update-respo0000664000175000017500000000063100000000000033624 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "This is yet, another snapshot.", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-002", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null, "user_id": "%(uuid)s" } } ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshots-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshots-list-detail0000664000175000017500000000112400000000000033610 0ustar00zuulzuul00000000000000{ "snapshots": [ { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "%(uuid)s", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null, "user_id": "%(uuid)s" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1351187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/0000775000175000017500000000000000000000000027442 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshot-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshot-create-respo0000664000175000017500000000064700000000000033622 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null, "user_id": "%(uuid)s", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshot-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshot-show-respons0000664000175000017500000000104300000000000033667 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "%(uuid)s", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null, "user_id": "%(uuid)s", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshot-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshot-update-respo0000664000175000017500000000067100000000000033636 0ustar00zuulzuul00000000000000{ "snapshot": { "created_at": "%(strtime)s", "description": "This is yet, another snapshot.", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-002", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null, "user_id": "%(uuid)s", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshots-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshots-list-detail0000664000175000017500000000117000000000000033617 0ustar00zuulzuul00000000000000{ "snapshots": [ { "created_at": "%(strtime)s", "description": "Daily backup", "id": "%(uuid)s", "metadata": { "key": "v3" }, "name": "snap-001", "os-extended-snapshot-attributes:progress": "0%", "os-extended-snapshot-attributes:project_id": "%(uuid)s", "size": 10, "status": "creating", "updated_at": null, "volume_id": "%(uuid)s", "group_snapshot_id": null, "user_id": "%(uuid)s", "consumes_quota": true } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1351187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/versions/0000775000175000017500000000000000000000000026507 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/versions/version-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/versions/version-show-response.json.t0000664000175000017500000000137000000000000034144 0ustar00zuulzuul00000000000000{ "versions": [ { "id": "v3.0", "links": [ { "href": "https://docs.openstack.org/", "rel": "describedby", "type": "text/html" }, { "href": "%(host)s/v3/", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=3" } ], "min_version": "3.0", "status": "CURRENT", "updated": "%(isotime)s", "version": "%(max_api_version)s" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/versions/versions-response.json.tpl0000664000175000017500000000137000000000000033705 0ustar00zuulzuul00000000000000{ "versions": [ { "status": "CURRENT", "updated": "%(isotime)s", "links": [ { "href": "https://docs.openstack.org/", "type": "text/html", "rel": "describedby" }, { "href": "%(host)s/v3/", "rel": "self" } ], "min_version": "3.0", "version": "%(max_api_version)s", "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.volume+json;version=3" } ], "id": "v3.0" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1351187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_actions/0000775000175000017500000000000000000000000027666 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_actions/volume-upload-to-image-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_actions/volume-upload-to-image0000664000175000017500000000023700000000000034104 0ustar00zuulzuul00000000000000{ "os-volume_upload_image":{ "image_name": "test", "force": false, "disk_format": "raw", "container_format": "bare" } }././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_actions/volume-upload-to-image-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_actions/volume-upload-to-image0000664000175000017500000000073700000000000034111 0ustar00zuulzuul00000000000000{ "os-volume_upload_image": { "container_format": "bare", "disk_format": "raw", "display_description": null, "id": "3a81fdac-e8ae-4e61-b6a2-2e14ff316f19", "image_id": "de75b74e-7f0d-4b59-a263-bd87bfc313bd", "image_name": "test", "protected": false, "size": 1, "status": "uploading", "updated_at": "2017-06-05T08:44:28.000000", "visibility": "private", "volume_type": null } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1351187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_manage_extensions/0000775000175000017500000000000000000000000031735 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_manage_extensions/volume-manage-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_manage_extensions/volume-manag0000664000175000017500000000065100000000000034252 0ustar00zuulzuul00000000000000{ "volume": { "host": "%(host)s", "ref": { "source-name": "existingLV", "source-id": "1234" }, "name": "New Volume", "availability_zone": "az2", "description": "Volume imported from existingLV", "volume_type": null, "bootable": true, "metadata": { "key1": "value1", "key2": "value2" } } } ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_manage_extensions/volume-manage-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_manage_extensions/volume-manag0000664000175000017500000000213200000000000034246 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "az2", "bootable": "false", "created_at": "2014-07-18T00:12:54.000000", "description": "Volume imported from existingLV", "encrypted": "false", "id": "23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "links": [ { "href": "http://10.0.2.15:8776/v3/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "rel": "self" }, { "href": "http://10.0.2.15:8776/87c8522052ca4eed98bc672b4c1a3ddb/volumes/23cf872b-c781-4cd4-847d-5f2ec8cbd91c", "rel": "bookmark" } ], "metadata": { "key1": "value1", "key2": "value2" }, "name": "New Volume", "os-vol-tenant-attr:tenant_id": "87c8522052ca4eed98bc672b4c1a3ddb", "size": 0, "snapshot_id": "null", "source_volid": "null", "status": "creating", "user_id": "eae1472b5fc5496998a3d06550929e7e", "volume_type": "null" } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1351187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/0000775000175000017500000000000000000000000030052 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-accept-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-accep0000664000175000017500000000007500000000000034201 0ustar00zuulzuul00000000000000{ "accept": { "auth_key": "%(auth_key)s" } } ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-accept-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-accep0000664000175000017500000000064100000000000034200 0ustar00zuulzuul00000000000000{ "transfer": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "bookmark" } ], "name": "first volume", "volume_id": "%(uuid)s" } }././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-create-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-creat0000664000175000017500000000014100000000000034216 0ustar00zuulzuul00000000000000{ "transfer": { "volume_id": "%(volume_id)s", "name": "first volume" } } ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-creat0000664000175000017500000000075200000000000034226 0ustar00zuulzuul00000000000000{ "transfer": { "auth_key": "%(auth_key)s", "created_at": "%(strtime)s", "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "bookmark" } ], "name": "first volume", "volume_id": "%(uuid)s" } }././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-show-0000664000175000017500000000070600000000000034164 0ustar00zuulzuul00000000000000{ "transfer": { "created_at": "%(strtime)s", "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "bookmark" } ], "name": "first volume", "volume_id": "%(uuid)s" } }././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfers-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfers-list0000664000175000017500000000102300000000000034256 0ustar00zuulzuul00000000000000{ "transfers": [ { "created_at": "%(strtime)s", "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "bookmark" } ], "name": "first volume", "volume_id": "%(uuid)s" } ] }././@PaxHeader0000000000000000000000000000022600000000000011455 xustar0000000000000000128 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfers-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfers-list0000664000175000017500000000075200000000000034266 0ustar00zuulzuul00000000000000{ "transfers": [ { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "bookmark" } ], "name": "first volume", "volume_id": "%(uuid)s" } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1351187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/0000775000175000017500000000000000000000000030235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1391187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.55/0000775000175000017500000000000000000000000031015 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.55/volume-transfers-create-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.55/volume-transfe0000664000175000017500000000015100000000000033704 0ustar00zuulzuul00000000000000{ "transfer": { "volume_id": "%(volume_id)s", "name": "first volume" } } ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.55/volume-transfers-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.55/volume-transfe0000664000175000017500000000101200000000000033701 0ustar00zuulzuul00000000000000{ "transfer": { "auth_key": "%(auth_key)s", "created_at": "%(strtime)s", "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "bookmark" } ], "name": "first volume", "volume_id": "%(uuid)s", "no_snapshots": false } } ././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.55/volume-transfers-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.55/volume-transfe0000664000175000017500000000074600000000000033716 0ustar00zuulzuul00000000000000{ "transfer": { "created_at": "%(strtime)s", "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "bookmark" } ], "name": "first volume", "volume_id": "%(uuid)s", "no_snapshots": false } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1391187 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.57/0000775000175000017500000000000000000000000031017 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.57/volume-transfers-create-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.57/volume-transfe0000664000175000017500000000020000000000000033701 0ustar00zuulzuul00000000000000{ "transfer": { "volume_id": "%(volume_id)s", "name": "first volume", "no_snapshots": false } } ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.57/volume-transfers-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.57/volume-transfe0000664000175000017500000000116600000000000033715 0ustar00zuulzuul00000000000000{ "transfer": { "auth_key": "%(auth_key)s", "created_at": "%(strtime)s", "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "bookmark" } ], "name": "first volume", "volume_id": "%(uuid)s", "no_snapshots": false, "destination_project_id": null, "source_project_id": "%(uuid)s", "accepted": false } } ././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.57/volume-transfers-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.57/volume-transfe0000664000175000017500000000112200000000000033705 0ustar00zuulzuul00000000000000{ "transfer": { "created_at": "%(strtime)s", "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "bookmark" } ], "name": "first volume", "volume_id": "%(uuid)s", "destination_project_id": null, "source_project_id": "%(uuid)s", "accepted": false, "no_snapshots": false } } ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/volume-transfers-accept-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/volume-transfers-acc0000664000175000017500000000007500000000000034222 0ustar00zuulzuul00000000000000{ "accept": { "auth_key": "%(auth_key)s" } } ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/volume-transfers-accept-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_transfers/volume-transfers-acc0000664000175000017500000000064200000000000034222 0ustar00zuulzuul00000000000000{ "transfer": { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "self" }, { "href": "%(host)s/%(uuid)s/os-volume-transfer/%(uuid)s", "rel": "bookmark" } ], "name": "first volume", "volume_id": "%(uuid)s" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1431189 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/0000775000175000017500000000000000000000000027207 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-create-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-create-re0000664000175000017500000000023600000000000034151 0ustar00zuulzuul00000000000000{ "encryption":{ "key_size": 256, "provider": "luks", "control_location":"front-end", "cipher": "aes-xts-plain64" } } ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-create-re0000664000175000017500000000035300000000000034151 0ustar00zuulzuul00000000000000{ "encryption": { "volume_type_id": "%(uuid)s", "control_location": "front-end", "encryption_id": "%(uuid)s", "key_size": 256, "provider": "luks", "cipher": "aes-xts-plain64" } } ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-show-resp0000664000175000017500000000044000000000000034226 0ustar00zuulzuul00000000000000{ "volume_type_id": "%(uuid)s", "control_location": "front-end", "deleted": false, "created_at": "%(strtime)s", "updated_at": null, "encryption_id": "%(uuid)s", "key_size": 256, "provider": "luks", "deleted_at": null, "cipher": "aes-xts-plain64" } ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-specific-specs-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-specific-0000664000175000017500000000004400000000000034141 0ustar00zuulzuul00000000000000{ "cipher": "aes-xts-plain64" } ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-update-re0000664000175000017500000000023400000000000034166 0ustar00zuulzuul00000000000000{ "encryption":{ "key_size": 64, "provider": "luks", "control_location":"back-end", "cipher": "aes-xts-plain64" } } ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-update-re0000664000175000017500000000023400000000000034166 0ustar00zuulzuul00000000000000{ "encryption":{ "key_size": 64, "provider": "luks", "control_location":"back-end", "cipher": "aes-xts-plain64" } } ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-access-add-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-access-add-re0000664000175000017500000000013200000000000034005 0ustar00zuulzuul00000000000000{ "addProjectAccess": { "project": "6f70656e737461636b20342065766572" } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-access-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-access-list-r0000664000175000017500000000021300000000000034063 0ustar00zuulzuul00000000000000{ "volume_type_access": [ { "project_id": "%(user_id)s", "volume_type_id": "%(uuid)s" } ] }././@PaxHeader0000000000000000000000000000023500000000000011455 xustar0000000000000000135 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-all-extra-specs-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-all-extra-spe0000664000175000017500000000007500000000000034076 0ustar00zuulzuul00000000000000{ "extra_specs": { "capabilities": "gpu" } } ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-create-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-create-reques0000664000175000017500000000034100000000000034161 0ustar00zuulzuul00000000000000{ "volume_type": { "name": "%(name)s", "description": "%(description)s", "os-volume-type-access:is_public": "%(bool)s", "extra_specs": { "capabilities": "gpu" } } } ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-create-respon0000664000175000017500000000042500000000000034166 0ustar00zuulzuul00000000000000{ "volume_type": { "name": "vol-type-001", "extra_specs": { "capabilities": "gpu" }, "os-volume-type-access:is_public": true, "is_public": true, "id": "%(uuid)s", "description": "volume type 0001" } } ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-default-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-default-respo0000664000175000017500000000040100000000000034163 0ustar00zuulzuul00000000000000{ "volume_type": { "description": "volume type 0001", "extra_specs": { "capabilities": "gpu" }, "id": "%(uuid)s", "is_public": true, "name": "vol-type-001", "qos_specs_id": null } }././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-extra-specs-create-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-extra-specs-c0000664000175000017500000000012200000000000034067 0ustar00zuulzuul00000000000000{ "extra_specs": { "key1": "value1", "key2": "value2" } } ././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-extra-specs-create-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-extra-specs-c0000664000175000017500000000012200000000000034067 0ustar00zuulzuul00000000000000{ "extra_specs": { "key1": "value1", "key2": "value2" } } ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-show-response0000664000175000017500000000046200000000000034234 0ustar00zuulzuul00000000000000{ "volume_type": { "description": "volume type 0001", "extra_specs": { "capabilities": "gpu" }, "id": "%(uuid)s", "is_public": true, "name": "vol-type-001", "os-volume-type-access:is_public": true, "qos_specs_id": null } }././@PaxHeader0000000000000000000000000000024200000000000011453 xustar0000000000000000140 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-specific-extra-specs-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-specific-extr0000664000175000017500000000003600000000000034162 0ustar00zuulzuul00000000000000{ "capabilities": "gpu" } ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-update-reques0000664000175000017500000000020500000000000034177 0ustar00zuulzuul00000000000000{ "volume_type": { "name": "%(name)s", "description": "%(description)s", "is_public": "%(bool)s" } } ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-update-respon0000664000175000017500000000034500000000000034206 0ustar00zuulzuul00000000000000{ "volume_type": { "id": "%(uuid)s", "name": "vol-type-001", "description": "volume type 0001", "is_public": true, "extra_specs": { "capabilities": "gpu" } } } ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-types-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volume_type/volume-types-list-respons0000664000175000017500000000172400000000000034247 0ustar00zuulzuul00000000000000{ "volume_types": [ { "description": "volume type 0002", "extra_specs": { "capabilities": "gpu" }, "id": "%(uuid)s", "is_public": true, "name": "vol-type-002", "os-volume-type-access:is_public": true, "qos_specs_id": null }, { "description": "volume type 0001", "extra_specs": { "capabilities": "gpu" }, "id": "%(uuid)s", "is_public": true, "name": "vol-type-001", "os-volume-type-access:is_public": true, "qos_specs_id": null }, { "description": "Default Volume Type", "extra_specs": {}, "id": "%(uuid)s", "is_public": true, "name": "__DEFAULT__", "os-volume-type-access:is_public": true, "qos_specs_id": null } ] }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1471188 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/0000775000175000017500000000000000000000000026331 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1471188 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/0000775000175000017500000000000000000000000027103 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volume-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volume-create-response.0000664000175000017500000000173300000000000033514 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null } } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volume-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volume-show-response.js0000664000175000017500000000222600000000000033564 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null } } ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volume-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volume-update-response.0000664000175000017500000000204300000000000033526 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": "This is yet, another volume.", "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volumes-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volumes-list-detailed-r0000664000175000017500000000246400000000000033507 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "%(name)s", "group_id": null } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1471188 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/0000775000175000017500000000000000000000000027102 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volume-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volume-create-response.0000664000175000017500000000177100000000000033515 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null } } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volume-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volume-show-response.js0000664000175000017500000000226300000000000033564 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null } } ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volume-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volume-update-response.0000664000175000017500000000210000000000000033517 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": "This is yet, another volume.", "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volumes-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volumes-list-detailed-r0000664000175000017500000000252500000000000033504 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "%(name)s", "group_id": null, "provider_id": null } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.151119 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/0000775000175000017500000000000000000000000027113 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volume-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volume-create-response.0000664000175000017500000000206600000000000033524 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true } } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volume-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volume-show-response.js0000664000175000017500000000236100000000000033574 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true } } ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volume-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volume-update-response.0000664000175000017500000000217600000000000033545 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": "This is yet, another volume.", "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volumes-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volumes-list-detailed-r0000664000175000017500000000263300000000000033515 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "%(name)s", "service_uuid": null, "provider_id": null, "group_id": null, "shared_targets": true } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.151119 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/0000775000175000017500000000000000000000000027106 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volume-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volume-create-response.0000664000175000017500000000212400000000000033512 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null } } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volume-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volume-show-response.js0000664000175000017500000000241700000000000033571 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null } } ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volume-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volume-update-response.0000664000175000017500000000223400000000000033533 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": "This is yet, another volume.", "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volumes-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volumes-list-detailed-r0000664000175000017500000000267500000000000033516 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "%(name)s", "service_uuid": null, "provider_id": null, "group_id": null, "shared_targets": true, "cluster_name": null } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.151119 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/0000775000175000017500000000000000000000000027110 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volume-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volume-create-response.0000664000175000017500000000217200000000000033517 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "%(uuid)s" } } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volume-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volume-show-response.js0000664000175000017500000000246500000000000033576 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "%(uuid)s" } } ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volume-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volume-update-response.0000664000175000017500000000230200000000000033531 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": "This is yet, another volume.", "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "%(uuid)s" } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volumes-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volumes-list-detailed-r0000664000175000017500000000274600000000000033517 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "%(name)s", "volume_type_id": "%(uuid)s", "service_uuid": null, "provider_id": null, "group_id": null, "shared_targets": true, "cluster_name": null } ] }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.151119 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/0000775000175000017500000000000000000000000027112 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volume-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volume-create-response.0000664000175000017500000000223200000000000033516 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "%(uuid)s", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volume-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volume-show-response.js0000664000175000017500000000252500000000000033575 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "provider_id": null, "group_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "%(uuid)s", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volume-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volume-update-response.0000664000175000017500000000234200000000000033537 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": "This is yet, another volume.", "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__", "group_id": null, "provider_id": null, "service_uuid": null, "shared_targets": true, "cluster_name": null, "volume_type_id": "%(uuid)s", "consumes_quota": true } } ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volumes-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volumes-list-detailed-r0000664000175000017500000000301300000000000033505 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "%(name)s", "volume_type_id": "%(uuid)s", "service_uuid": null, "provider_id": null, "group_id": null, "shared_targets": true, "cluster_name": null, "consumes_quota": true } ] } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-create-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-create-request.json.tp0000664000175000017500000000103600000000000034104 0ustar00zuulzuul00000000000000{ "volume": { "size": 10, "availability_zone": null, "source_volid": null, "description": null, "multiattach": false, "snapshot_id": null, "backup_id": null, "name": null, "imageRef": null, "volume_type": null, "metadata": {}, "consistencygroup_id": null }, "OS-SCH-HNT:scheduler_hints": { "same_host": [ "a0cf03a5-d921-4877-bb5c-86d26cf818e1", "8c19174f-4220-44f0-824a-cd1eeef10287" ] } } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-create-response.json.t0000664000175000017500000000170000000000000034070 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__" } }././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-create-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-create-reques0000664000175000017500000000007000000000000034101 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata0" } } ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-create-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-create-respon0000664000175000017500000000006700000000000034111 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata0" } }././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-show-key-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-show-key-resp0000664000175000017500000000006300000000000034053 0ustar00zuulzuul00000000000000{ "meta": { "name": "metadata1" } }././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-show-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-show-response0000664000175000017500000000002600000000000034151 0ustar00zuulzuul00000000000000{ "metadata": {} }././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-key-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-key-re0000664000175000017500000000006300000000000034012 0ustar00zuulzuul00000000000000{ "meta": { "name": "new_name" } } ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-key-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-key-re0000664000175000017500000000006200000000000034011 0ustar00zuulzuul00000000000000{ "meta": { "name": "new_name" } }././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-reques0000664000175000017500000000007000000000000034120 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata1" } } ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-respon0000664000175000017500000000006700000000000034130 0ustar00zuulzuul00000000000000{ "metadata": { "name": "metadata1" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-show-response.json.tpl0000664000175000017500000000217300000000000034146 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__" } }././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-update-request.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-update-request.json.tp0000664000175000017500000000025400000000000034124 0ustar00zuulzuul00000000000000{ "volume": { "name": "vol-003", "description": "This is yet, another volume.", "metadata": { "name": "metadata0" } } } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-update-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volume-update-response.json.t0000664000175000017500000000201000000000000034102 0ustar00zuulzuul00000000000000{ "volume": { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": "This is yet, another volume.", "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": { "name": "metadata0" }, "migration_status": null, "multiattach": false, "name": "vol-003", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "__DEFAULT__" } }././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volumes-list-detailed-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volumes-list-detailed-respons0000664000175000017500000000242600000000000034163 0ustar00zuulzuul00000000000000{ "volumes": [ { "attachments": [], "availability_zone": "nova", "bootable": "false", "consistencygroup_id": null, "created_at": "%(strtime)s", "description": null, "encrypted": false, "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "metadata": {}, "migration_status": null, "multiattach": false, "name": null, "os-vol-host-attr:host": null, "os-vol-mig-status-attr:migstat": null, "os-vol-mig-status-attr:name_id": null, "os-vol-tenant-attr:tenant_id": "%(uuid)s", "replication_status": null, "size": 10, "snapshot_id": null, "source_volid": null, "status": "creating", "updated_at": null, "user_id": "%(uuid)s", "volume_type": "%(name)s" } ] } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volumes-list-response.json.tpl 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/samples/volumes/volumes-list-response.json.tp0000664000175000017500000000073300000000000034150 0ustar00zuulzuul00000000000000{ "volumes": [ { "id": "%(uuid)s", "links": [ { "href": "%(host)s/v3/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "self" }, { "href": "%(host)s/89afd400-b646-4bbc-b12b-c0a4d63e5bd3/volumes/%(uuid)s", "rel": "bookmark" } ], "name": null } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_backups.py0000664000175000017500000000660700000000000026245 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from cinder.api import microversions as mv from cinder.tests.functional import api_samples_test_base as test_base @test_base.VolumesSampleBase.use_versions( mv.BASE_VERSION, # 3.0 mv.BACKUP_UPDATE, # 3.9 mv.BACKUP_PROJECT, # 3.18 mv.BACKUP_METADATA, # 3.43 mv.SUPPORT_COUNT_INFO, # 3.45 mv.BACKUP_PROJECT_USER_ID) # 3.56 class BackupClassesSampleJsonTest(test_base.VolumesSampleBase): sample_dir = "backups" def setUp(self): super(BackupClassesSampleJsonTest, self).setUp() res = self._create_volume() res = jsonutils.loads(res.content)['volume'] self._poll_volume_while(res['id'], ['creating']) self.subs = { "volume_id": res['id'] } with self.common_api_sample(): self.response = self._do_post('backups', 'backup-create-request', self.subs) def test_backup_create(self): self._verify_response('backup-create-response', {}, self.response, 202) @test_base.VolumesSampleBase.override_mv(mv.BASE_VERSION) # 3.0 def test_backup_list(self): response = self._do_get('backups') self._verify_response('backups-list-response', {}, response, 200) @test_base.VolumesSampleBase.override_mv(mv.SUPPORT_COUNT_INFO) # 3.45 def test_backup_list_with_count(self): response = self._do_get('backups?with_count=True') self._verify_response('backups-list-response', {}, response, 200) @test_base.VolumesSampleBase.override_mv(mv.BACKUP_UPDATE) # 3.9 def test_backup_update(self): res = jsonutils.loads(self.response.content)['backup'] response = self._do_put('backups/%s' % res['id'], 'backup-update-request') self._verify_response('backup-update-response', {}, response, 200) def test_backup_show(self): res = jsonutils.loads(self.response.content)['backup'] response = self._do_get('backups/%s' % res['id']) self._verify_response('backup-show-response', {}, response, 200) @test_base.VolumesSampleBase.override_mv(mv.BASE_VERSION) # 3.0 def test_backup_list_detail(self): response = self._do_get('backups/detail') self._verify_response('backups-list-detailed-response', {}, response, 200) @test_base.VolumesSampleBase.override_mv(mv.SUPPORT_COUNT_INFO) # 3.45 def test_backup_list_detail_with_count(self): response = self._do_get('backups/detail?with_count=True') self._verify_response('backups-list-detailed-response', {}, response, 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_extensions.py0000664000175000017500000000225600000000000027010 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api.openstack import api_version_request from cinder.tests.functional import api_samples_test_base class ExtensionsSampleJsonTest(api_samples_test_base.ApiSampleTestBase): sample_dir = "extensions" def setUp(self): super(ExtensionsSampleJsonTest, self).setUp() self.subs = { 'max_api_version': api_version_request._MAX_API_VERSION} def test_extensions(self): response = self._do_get('extensions') self._verify_response('extensions-list-response', self.subs, response, 200, update_links=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_limits.py0000664000175000017500000000166400000000000026114 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.functional import api_samples_test_base as test_base class LimitsSampleJsonTest(test_base.VolumesSampleBase): sample_dir = "limits" def setUp(self): super(LimitsSampleJsonTest, self).setUp() def test_limits_get(self): response = self._do_get('limits') self._verify_response('limits-show-response', {}, response, 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_qos.py0000664000175000017500000000416000000000000025407 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from cinder.tests.functional import api_samples_test_base as test_base class QOSSampleJsonTest(test_base.VolumesSampleBase): sample_dir = "qos" def setUp(self): super(QOSSampleJsonTest, self).setUp() self.response = self._do_post('qos-specs', 'qos-create-request') def test_qos_create(self): self._verify_response('qos-create-response', {}, self.response, 200) def test_qos_list(self): response = self._do_get('qos-specs') self._verify_response('qos-list-response', {}, response, 200) def test_qos_show(self): res = jsonutils.loads(self.response.content)['qos_specs'] response = self._do_get('qos-specs/%s' % res['id']) self._verify_response('qos-show-response', {}, response, 200) def test_qos_update(self): res = jsonutils.loads(self.response.content)['qos_specs'] response = self._do_put('qos-specs/%s' % res['id'], 'qos-update-request') self._verify_response('qos-update-response', {}, response, 200) def test_qos_show_associations(self): res = jsonutils.loads(self.response.content)['qos_specs'] response = self._do_get('qos-specs/%s/associations' % res['id']) self._verify_response('qos_show_response', {}, response, 200) def test_qos_disassociate_all(self): res = jsonutils.loads(self.response.content)['qos_specs'] response = self._do_get('qos-specs/%s/disassociate_all' % res['id']) self.assertEqual(202, response.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_quota_classes.py0000664000175000017500000000244100000000000027453 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.functional import api_samples_test_base as test_base class QuotaClassesSampleJsonTest(test_base.VolumesSampleBase): sample_dir = "quota_classes" def setUp(self): super(QuotaClassesSampleJsonTest, self).setUp() def test_quota_classes_show(self): response = self._do_get('os-quota-class-sets/test_class') self._verify_response('quota-classes-show-response', {}, response, 200) def test_quotas_update(self): response = self._do_put('os-quota-class-sets/test_class', 'quota-classes-update-request') self._verify_response('quota-classes-update-response', {}, response, 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_quota_sets.py0000664000175000017500000000311700000000000026775 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.functional import api_samples_test_base as test_base class QuotaSetsSampleJsonTest(test_base.VolumesSampleBase): sample_dir = "quota_sets" def setUp(self): super(QuotaSetsSampleJsonTest, self).setUp() def test_quotas_show(self): response = self._do_get('os-quota-sets/fake_tenant') self._verify_response('quotas-show-response', {}, response, 200) def test_quotas_show_usage(self): response = self._do_get('os-quota-sets/fake_tenant?usage=True') self._verify_response('quotas-show-usage-response', {}, response, 200) def test_quotas_update(self): response = self._do_put('os-quota-sets/fake_tenant', 'quotas-update-request') self._verify_response('quotas-update-response', {}, response, 200) def test_quotas_defaults(self): response = self._do_get('os-quota-sets/fake_tenant/defaults') self._verify_response('quotas-show-defaults-response', {}, response, 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_snapshot_manage_extensions.py0000664000175000017500000000256600000000000032243 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.functional.api_sample_tests import fakes from cinder.tests.functional import api_samples_test_base as base class SnapshotManageExtensionsSampleJsonTest(base.ApiSampleTestBase): sample_dir = "snapshot_manage_extensions" def setUp(self): super(SnapshotManageExtensionsSampleJsonTest, self).setUp() self.stub_out("cinder.api.contrib.snapshot_manage." "SnapshotManageController.create", fakes.stub_manage_existing_snapshot) def test_snapshot_manage_create(self): response = self._do_post('os-snapshot-manage', 'snapshot-manage-request', subs=self.subs) self._verify_response('snapshot-manage-response', {}, response, 202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_snapshots.py0000664000175000017500000001177700000000000026643 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from cinder.api import microversions as mv from cinder.tests.functional import api_samples_test_base as test_base class SnapshotBaseTest(test_base.VolumesSampleBase): sample_dir = "snapshots" def setup(self): res = self._create_volume() res = jsonutils.loads(res.content)['volume'] self._poll_volume_while(res['id'], ['creating']) self.subs = { "volume_id": res['id'] } with self.common_api_sample(): self.response = self._create_snapshot(self.subs) def _create_snapshot(self, subs=None): response = self._do_post('snapshots', 'snapshot-create-request', subs) return response @test_base.VolumesSampleBase.use_versions( mv.BASE_VERSION, # 3.0 mv.GROUP_SNAPSHOTS, # 3.14 mv.SNAPSHOT_LIST_USER_ID, # 3.41 mv.USE_QUOTA) # 3.65 class SnapshotDetailTests(SnapshotBaseTest): """Test snapshot details returned for operations with different MVs. The details of a snapshot have changed in the different microversions, and we have multiple operations that return them, so we should confirm that each microversion returns the right values for all these different operations. """ def test_snapshot_list_detail(self): response = self._do_get('snapshots/detail') self._verify_response('snapshots-list-detailed-response', {}, response, 200) def test_snapshot_create(self): self._verify_response('snapshot-create-response', {}, self.response, 202) def test_snapshot_show(self): res = jsonutils.loads(self.response.content)['snapshot'] response = self._do_get('snapshots/%s' % res['id']) self._verify_response('snapshot-show-response', {}, response, 200) def test_snapshot_update(self): res = jsonutils.loads(self.response.content)['snapshot'] # Use the request sample from the common API, since the request didn't # change with the microversion, what changes is the response. with self.common_api_sample(): response = self._do_put('snapshots/%s' % res['id'], 'snapshot-update-request') self._verify_response('snapshot-update-response', {}, response, 200) class VolumeSnapshotsSampleJsonTest(SnapshotBaseTest): def setUp(self): super(VolumeSnapshotsSampleJsonTest, self).setUp() self.setup() def test_snapshot_list(self): response = self._do_get('snapshots') self._verify_response('snapshots-list-response', {}, response, 200) def test_snapshot_metadata_show(self): res = jsonutils.loads(self.response.content)['snapshot'] response = self._do_get('snapshots/%s/metadata' % res['id']) self._verify_response('snapshot-metadata-show-response', {}, response, 200) def test_snapshot_metadata_create(self): res = jsonutils.loads(self.response.content)['snapshot'] response = self._do_post('snapshots/%s/metadata' % res['id'], 'snapshot-metadata-create-request') self._verify_response('snapshot-metadata-create-response', {}, response, 200) def test_snapshot_metadata_update(self): res = jsonutils.loads(self.response.content)['snapshot'] response = self._do_put('snapshots/%s/metadata' % res['id'], 'snapshot-metadata-update-request') self._verify_response('snapshot-metadata-update-response', {}, response, 200) def test_snapshot_metadata_show_specific_key(self): res = jsonutils.loads(self.response.content)['snapshot'] response = self._do_get('snapshots/%s/metadata/key' % res['id']) self._verify_response('snapshot-metadata-show-key-response', {}, response, 200) def test_snapshot_metadata_update_specific_key(self): res = jsonutils.loads(self.response.content)['snapshot'] response = self._do_put('snapshots/%s/metadata/key' % res['id'], 'snapshot-metadata-update-key-request') self._verify_response('snapshot-metadata-update-key-response', {}, response, 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_versions.py0000664000175000017500000000270200000000000026455 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api.openstack import api_version_request from cinder.tests.functional import api_samples_test_base class VersionsSampleJsonTest(api_samples_test_base.ApiSampleTestBase): sample_dir = "versions" def setUp(self): super(VersionsSampleJsonTest, self).setUp() self.subs = { 'max_api_version': api_version_request._MAX_API_VERSION} def test_versions_get_all(self): response = self.api.api_request('', strip_version=True) self._verify_response('versions-response', self.subs, response, 300, update_links=False) def test_versions_get_v3(self): response = self.api.api_request('v3/', strip_version=True) self._verify_response('version-show-response', self.subs, response, 200, update_links=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_volume_actions.py0000664000175000017500000000273500000000000027642 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from cinder.tests.functional.api_sample_tests import fakes from cinder.tests.functional import api_samples_test_base as test_base class VolumeActionsSampleJsonTest(test_base.VolumesSampleBase): sample_dir = "volume_actions" def setUp(self): super(VolumeActionsSampleJsonTest, self).setUp() self.response = self._create_volume() self.stub_out("cinder.volume.api.API.copy_volume_to_image", fakes.stub_copy_volume_to_image) def test_volume_upload_image(self): res = jsonutils.loads(self.response.content)['volume'] self._poll_volume_while(res['id'], ['creating']) response = self._do_post('volumes/%s/action' % res['id'], 'volume-upload-to-image-request') self._verify_response('volume-upload-to-image-response', {}, response, 202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_volume_manage_extensions.py0000664000175000017500000000265200000000000031707 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.functional.api_sample_tests import fakes from cinder.tests.functional import api_samples_test_base FAKE_HOST = 'hostname@backend' class VolumeActionsSampleJsonTest(api_samples_test_base.ApiSampleTestBase): sample_dir = "volume_manage_extensions" def setUp(self): super(VolumeActionsSampleJsonTest, self).setUp() self.subs = { 'host': FAKE_HOST } self.stub_out("cinder.api.contrib.volume_manage." "VolumeManageController.create", fakes.stub_manage_existing) def test_manage_existing(self): response = self._do_post('os-volume-manage', 'volume-manage-request', subs=self.subs) self._verify_response('volume-manage-response', {}, response, 202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_volume_transfer.py0000664000175000017500000000553500000000000030027 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from cinder.tests.functional import api_samples_test_base as test_base class VolumeTransferSampleJsonTest(test_base.VolumesSampleBase): sample_dir = "volume_transfer" def setUp(self): super(VolumeTransferSampleJsonTest, self).setUp() res = self._create_volume() res = jsonutils.loads(res.content)['volume'] self._poll_volume_while(res['id'], ['creating']) self.subs = { "volume_id": res['id'] } self.response = self._create_transfer(self.subs) def _create_transfer(self, subs=None): response = self._do_post('os-volume-transfer', 'volume-transfer-create-request', subs) return response def test_transfer_create(self): self._verify_response('volume-transfer-create-response', {}, self.response, 202) def test_transfer_accept(self): res = jsonutils.loads(self.response.content)['transfer'] subs = { "auth_key": res['auth_key'] } response = self._do_post( 'os-volume-transfer/%s/accept' % res['id'], 'volume-transfer-accept-request', subs) self._verify_response('volume-transfer-accept-response', {}, response, 202) def test_transfers_list(self): response = self._do_get('os-volume-transfer') self._verify_response('volume-transfers-list-response', {}, response, 200) def test_transfer_list_detail(self): res = jsonutils.loads(self.response.content)['transfer'] response = self._do_get('os-volume-transfer/%s' % res['id']) self._verify_response('volume-transfer-show-response', {}, response, 200) def test_transfers_list_detail(self): response = self._do_get('os-volume-transfer/detail') self._verify_response('volume-transfers-list-detailed-response', {}, response, 200) def test_transfer_delete(self): res = jsonutils.loads(self.response.content)['transfer'] response = self._do_delete('os-volume-transfer/%s' % res['id']) self.assertEqual(response.status_code, 202) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_volume_transfers.py0000664000175000017500000000557200000000000030213 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from cinder.api import microversions as mv from cinder.tests.functional.api_sample_tests import test_volumes from cinder.tests.functional import api_samples_test_base as test_base @test_base.VolumesSampleBase.use_versions( mv.TRANSFER_WITH_SNAPSHOTS, mv.TRANSFER_WITH_HISTORY) class VolumeTransfersSampleJsonTest(test_volumes.test_base.VolumesSampleBase): sample_dir = "volume_transfers" def setUp(self): super(VolumeTransfersSampleJsonTest, self).setUp() res = self._create_volume() res = jsonutils.loads(res.content)['volume'] self._poll_volume_while(res['id'], ['creating']) self.subs = { "volume_id": res['id'] } def _create_transfers(self, subs=None): response = self._do_post('volume-transfers', 'volume-transfers-create-request', self.subs) return response def test_create_transfers(self): response = self._create_transfers(self.subs) self._verify_response('volume-transfers-create-response', {}, response, 202) def test_accept_transfer(self): response = self._create_transfers(self.subs) res = jsonutils.loads(response.content)['transfer'] subs = { 'auth_key': res['auth_key'] } with self.common_api_sample(): response = self._do_post('volume-transfers/%s/accept' % res['id'], 'volume-transfers-accept-request', subs) self._verify_response('volume-transfers-accept-response', {}, response, 202) def test_show_transfer(self): response = self._create_transfers(self.subs) res = jsonutils.loads(response.content)['transfer'] show_response = self._do_get('volume-transfers/%s' % res['id']) self._verify_response('volume-transfers-show-response', {}, show_response, 200) def test_delete_transfer(self): response = self._create_transfers(self.subs) res = jsonutils.loads(response.content)['transfer'] delete_res = self._do_delete('volume-transfers/%s' % res['id']) self.assertEqual(202, delete_res.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_volume_types.py0000664000175000017500000001500600000000000027341 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils from cinder.tests.functional import api_samples_test_base CONF = cfg.CONF class VolumeTypesSampleJsonTest(api_samples_test_base.ApiSampleTestBase): sample_dir = "volume_type" def setUp(self): super(VolumeTypesSampleJsonTest, self).setUp() self.volume_type_name = "vol-type-001" self.subs = { "name": self.volume_type_name, "description": "volume type 0001", "bool": "True" } CONF.set_override("default_volume_type", "vol-type-001") def _volume_type_create(self, subs=None): subs = subs if subs is not None else self.subs response = self._do_post('types', 'volume-type-create-request', subs) return response def _encryption_type_create(self, volume_type_id): response = self._do_post(('types/%s/encryption') % volume_type_id, 'encryption-type-create-request') return response def test_volume_type_create(self): response = self._volume_type_create() self._verify_response('volume-type-create-response', self.subs, response, 200) def test_volume_type_show(self): res = self._volume_type_create() res = jsonutils.loads(res.content)['volume_type'] response = self._do_get('types/%s' % res['id']) self._verify_response('volume-type-show-response', self.subs, response, 200) def test_volume_type_update(self): res = self._volume_type_create() res = jsonutils.loads(res.content)['volume_type'] response = self._do_put( 'types/%s' % res['id'], 'volume-type-update-request', self.subs) self._verify_response('volume-type-update-response', self.subs, response, 200) def test_volume_type_extra_spec_create_update(self): res = self._volume_type_create() res = jsonutils.loads(res.content)['volume_type'] url = ("types/%s/extra_specs" % res['id']) response = self._do_post( url, 'volume-type-extra-specs-create-update-request', {}) self._verify_response( 'volume-type-extra-specs-create-update-response', {}, response, 200) def test_volume_type_all_extra_spec_show(self): res = self._volume_type_create() res = jsonutils.loads(res.content)['volume_type'] url = ("types/%s/extra_specs" % res['id']) response = self._do_get(url) self._verify_response( 'volume-type-all-extra-specs-show-response', {}, response, 200) def test_volume_type_specific_extra_spec_show(self): res = self._volume_type_create() res = jsonutils.loads(res.content)['volume_type'] url = ("types/%s/extra_specs/capabilities" % res['id']) response = self._do_get(url) self._verify_response( 'volume-type-specific-extra-specs-show-response', {}, response, 200) def test_volume_type_show_default(self): self._volume_type_create() response = self._do_get('types/default') self._verify_response('volume-type-default-response', self.subs, response, 200) def test_volume_type_list(self): subs = { "name": "vol-type-002", "description": "volume type 0002", "bool": "True" } self._volume_type_create() self._volume_type_create(subs) response = self._do_get('types') self._verify_response('volume-types-list-response', self.subs, response, 200) def test_encryption_type_show(self): res = self._volume_type_create() res = jsonutils.loads(res.content)['volume_type'] self._encryption_type_create(res['id']) response = self._do_get('types/%s/encryption' % res['id']) self._verify_response('encryption-type-show-response', self.subs, response, 200) def test_encryption_type_show_specific_spec(self): res = self._volume_type_create() res = jsonutils.loads(res.content)['volume_type'] self._encryption_type_create(res['id']) response = self._do_get('types/%s/encryption/cipher' % res['id']) self._verify_response('encryption-type-specific-specs-show-response', self.subs, response, 200) def test_encryption_type_create(self): res = self._volume_type_create() res = jsonutils.loads(res.content)['volume_type'] response = self._encryption_type_create(res['id']) self._verify_response('encryption-type-create-response', self.subs, response, 200) def test_encryption_type_update(self): res = self._volume_type_create() res = jsonutils.loads(res.content)['volume_type'] res_encrypt = self._encryption_type_create(res['id']) res_encrypt = jsonutils.loads(res_encrypt.content)['encryption'] response = self._do_put( 'types/%s/encryption/%s' % (res['id'], res_encrypt['encryption_id']), 'encryption-type-update-request') self._verify_response('encryption-type-update-response', self.subs, response, 200) def test_private_volume_type_access_add_list(self): subs = self.subs subs['bool'] = "False" res = self._volume_type_create(subs) res = jsonutils.loads(res.content)['volume_type'] self._do_post('types/%s/action' % res['id'], 'volume-type-access-add-request') response = self._do_get( 'types/%s/os-volume-type-access' % res['id']) self._verify_response('volume-type-access-list-response', {}, response, 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_sample_tests/test_volumes.py0000664000175000017500000001143400000000000026301 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from cinder.api import microversions as mv from cinder.tests.functional import api_samples_test_base as test_base @test_base.VolumesSampleBase.use_versions( mv.BASE_VERSION, # 3.0 mv.GROUP_VOLUME, # 3.13 mv.VOLUME_DETAIL_PROVIDER_ID, # 3.21 mv.VOLUME_SHARED_TARGETS_AND_SERVICE_FIELDS, # 3.48 mv.VOLUME_CLUSTER_NAME, # 3.61 mv.VOLUME_TYPE_ID_IN_VOLUME_DETAIL, # 3.63 mv.USE_QUOTA) # 3.65 class VolumeDetailTests(test_base.VolumesSampleBase): """Test volume details returned for operations with different MVs. The details of a volume have changed in the different microversions, and we have multiple operations that return them, so we should confirm that each microversion returns the right values for all these different operations. """ def setup(self): """Create a volume before we run each test. This method is called by _FunctionalTestBase right before each test is called. We cannot create the volume on the setUp method because at that time the API version is still 3.0, so we need it to be created right after the microversion under test has been set. This way the create method is called using the right microversion, which is required for some tests, like test_volume_create. """ self.response = self._create_volume() def test_volume_list_detail(self): response = self._do_get('volumes/detail') self._verify_response('volumes-list-detailed-response', {}, response, 200) def test_volume_show_detail(self): res = jsonutils.loads(self.response.content)['volume'] response = self._do_get('volumes/%s' % res['id']) self._verify_response('volume-show-response', {}, response, 200) def test_volume_create(self): self._verify_response('volume-create-response', {}, self.response, 202) def test_volume_update(self): res = jsonutils.loads(self.response.content)['volume'] # Use the request sample from the common API, since the request didn't # change with the microversion, what changes is the response. with self.common_api_sample(): response = self._do_put('volumes/%s' % res['id'], 'volume-update-request') self._verify_response('volume-update-response', {}, response, 200) class VolumesSampleJsonTest(test_base.VolumesSampleBase): def setUp(self): super(test_base.VolumesSampleBase, self).setUp() self.response = self._create_volume() def test_volume_list(self): response = self._do_get('volumes') self._verify_response('volumes-list-response', {}, response, 200) def test_volume_metadata_create(self): res = jsonutils.loads(self.response.content)['volume'] response = self._do_post('volumes/%s/metadata' % res['id'], 'volume-metadata-create-request') self._verify_response('volume-metadata-create-response', {}, response, 200) def test_volume_metadata_show(self): res = jsonutils.loads(self.response.content)['volume'] response = self._do_get('volumes/%s/metadata' % res['id']) self._verify_response('volume-metadata-show-response', {}, response, 200) def test_volume_metadata_update(self): res = jsonutils.loads(self.response.content)['volume'] response = self._do_put('volumes/%s/metadata' % res['id'], 'volume-metadata-update-request') self._verify_response('volume-metadata-update-response', {}, response, 200) def test_volume_metadata_show_specific_key(self): res = jsonutils.loads(self.response.content)['volume'] self._do_put('volumes/%s/metadata' % res['id'], 'volume-metadata-update-request') response = self._do_get('volumes/%s/metadata/name' % res['id']) self._verify_response('volume-metadata-show-key-response', {}, response, 200) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/api_samples_test_base.py0000664000175000017500000005232700000000000024550 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import os import pprint import re from oslo_serialization import jsonutils from cinder.tests.functional import functional_helpers from cinder.tests.unit import test PROJECT_ID = "6f70656e737461636b20342065766572" # for pretty printing errors pp = pprint.PrettyPrinter(indent=4) class NoMatch(test.TestingException): pass def pretty_data(data): data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True, indent=4) return '\n'.join(line.rstrip() for line in data.split('\n')).strip() def objectify(data): if not data: return {} # templates will contain values like %(foo)s # throughout them. If these are inside of double quoted # strings, life is good, and we can treat it just like valid # json to load it to python. # # However we've got some fields which are ints, like # aggregate_id. This means we've got a snippet in the sample # that looks like: # # "id": %(aggregate_id)s, # # which is not valid json, and will explode. We do a quick and # dirty transform of this to: # # "id": "%(int:aggregate_id)s", # # That makes it valid data to convert to json, but keeps # around the information that we need to drop those strings # later. The regex anchors from the ': ', as all of these will # be top rooted keys. data = re.sub(r'(\: )%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data) return jsonutils.loads(data) class ApiSampleTestBase(functional_helpers._FunctionalTestBase): all_extensions = True sample_dir = None _project_id = True _use_common_sample = None def __init__(self, *args, **kwargs): super(ApiSampleTestBase, self).__init__(*args, **kwargs) self.subs = {} def setUp(self): super(ApiSampleTestBase, self).setUp() # this is used to generate sample docs self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None def _get_flags(self): f = super()._get_flags() # Use noauth_include_project_id so the API samples tests include a # project_id in the API URLs. This is done for two reasons: # # 1. The API samples generated by the tests need to include a # project_id because the API documentation includes the project_id. # # 2. It ensures there are no regressions, and the API functions # correctly when a project_id is in the URL. The other functional # tests do not include the project_id, so we cover both variants. f['auth_strategy'] = {'v': 'noauth_include_project_id'} return f @property def subs(self): return self._subs @subs.setter def subs(self, value): non_strings = \ {k: v for k, v in value.items() if (not k == 'volume_host') and (not isinstance(v, str))} if len(non_strings) > 0: raise TypeError("subs can't contain non-string values:" "\n%(non_strings)s" % {'non_strings': non_strings}) else: self._subs = value @classmethod def _get_sample_path(cls, name, dirname, suffix='', api_version=None): # Note: if _use_common_sample is set then common sample files from # that location will be used instead of using the location from the # sample_dir attribute. As of now it is being used for volume POST # request to avoid duplicate copy of volume req and resp sample files. # The best approach is using the context manager provided by the # common_api_sample method as used in example # VolumesSampleBase's _create_volume method. parts = [dirname, 'samples', cls._use_common_sample or cls.sample_dir] # Base version doesn't live in a specific vX.Y directory if (not cls._use_common_sample and api_version and api_version != cls._osapi_version): parts.append('v' + api_version) parts.append(name + ".json" + suffix) return os.path.join(*parts) @classmethod def _get_sample(cls, name, api_version=None): dirname = os.path.dirname(os.path.abspath(__file__)) dirname = os.path.normpath(os.path.join(dirname, "../../../api-ref/source/v3")) return cls._get_sample_path(name, dirname, api_version=api_version) @classmethod def _get_template(cls, name, api_version=None): dirname = os.path.dirname(os.path.abspath(__file__)) dirname = os.path.normpath(os.path.join(dirname, "./api_sample_tests")) return cls._get_sample_path(name, dirname, suffix='.tpl', api_version=api_version) def _read_template(self, name): template = self._get_template(name, self.osapi_version) with open(template) as inf: return inf.read().strip() def _write_template(self, name, data): with open(self._get_template(name, self.osapi_version), 'w') as outf: outf.write(data) def _write_sample(self, name, data): with open(self._get_sample(name, self.osapi_version), 'w') as outf: outf.write(data) def _compare_dict(self, expected, result, result_str, matched_value): if not isinstance(result, dict): raise NoMatch('%(result_str)s: %(result)s is not a dict.' % {'result_str': result_str, 'result': result}) ex_keys = sorted(expected.keys()) res_keys = sorted(result.keys()) if ex_keys != res_keys: ex_delta = [] res_delta = [] for key in ex_keys: if key not in res_keys: ex_delta.append(key) for key in res_keys: if key not in ex_keys: res_delta.append(key) raise NoMatch( 'Dictionary key mismatch:\n' 'Extra key(s) in template:\n%(ex_delta)s\n' 'Extra key(s) in %(result_str)s:\n%(res_delta)s\n' % {'ex_delta': ex_delta, 'result_str': result_str, 'res_delta': res_delta}) for key in ex_keys: res = self._compare_result(expected[key], result[key], result_str) matched_value = res or matched_value return matched_value def _compare_list(self, expected, result, result_str, matched_value): if not isinstance(result, list): raise NoMatch( '%(result_str)s: %(result)s is not a list.' % {'result_str': result_str, 'result': result}) expected = expected[:] extra = [] # if it's a list of 1, do the simple compare which gives a # better error message. if len(result) == len(expected) == 1: return self._compare_result(expected[0], result[0], result_str) # This is clever enough to need some explanation. What we # are doing here is looping the result list, and trying to # compare it to every item in the expected list. If there # is more than one, we're going to get fails. We ignore # those. But every time we match an expected we drop it, # and break to the next iteration. Every time we hit the # end of the iteration, we add our results into a bucket # of non matched. # # This results in poor error messages because we don't # really know why the elements failed to match each # other. A more complicated diff might be nice. for res_obj in result: for i, ex_obj in enumerate(expected): try: matched_value = self._compare_result(ex_obj, res_obj, result_str) del expected[i] break except NoMatch: pass else: extra.append(res_obj) error = [] if expected: error.append('Extra list items in template:') error.extend([repr(o) for o in expected]) if extra: error.append('Extra list items in %(result_str)s:' % {'result_str': result_str}) error.extend([repr(o) for o in extra]) if error: raise NoMatch('\n'.join(error)) return matched_value def _compare_template(self, expected, result, result_str, matched_value): # escape stuff for regex for char in '[]<>?': expected = expected.replace(char, '\\%s' % char) # special handling of subs that are not quoted. We are # expecting an int but we had to pass in a string # so the json would parse properly. if expected.startswith("%(int:"): result = str(result) expected = expected.replace('int:', '') expected = expected % self.subs expected = '^%s$' % expected match = re.match(expected, result) if not match: raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: %(result)s' % {'expected': expected, 'result_str': result_str, 'result': result}) try: matched_value = match.group() except IndexError: if match.groups(): matched_value = match.groups()[0] return matched_value def _compare_result(self, expected, result, result_str): matched_value = None # None if expected is None: if result is None: pass else: raise NoMatch('%(result_str)s: Expected None, got %(result)s.' % {'result_str': result_str, 'result': result}) # dictionary elif isinstance(expected, dict): matched_value = self._compare_dict( expected, result, result_str, matched_value) # list elif isinstance(expected, list): matched_value = self._compare_list( expected, result, result_str, matched_value) # template string elif isinstance(expected, str) and '%' in expected: if expected[-1] == '%': if result != expected: raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: ' '%(result)s' % {'expected': expected, 'result_str': result_str, 'result': result}) else: matched_value = self._compare_template( expected, result, result_str, matched_value) # string elif isinstance(expected, str): # Ignore whitespace in this comparison expected = expected.strip() if isinstance(result, str): result = result.strip() if expected != result: raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: ' '%(result)s' % {'expected': expected, 'result_str': result_str, 'result': result}) # int elif isinstance(expected, (int, float)): if expected != result: raise NoMatch( 'Values do not match:\n' 'Template: %(expected)s\n%(result_str)s: ' '%(result)s' % {'expected': expected, 'result_str': result_str, 'result': result}) else: raise ValueError( 'Unexpected type %(expected_type)s' % {'expected_type': type(expected)}) return matched_value def generalize_subs(self, subs, vanilla_regexes): """Give the test a chance to modify subs after the server response was verified, and before the on-disk doc/samples file is checked. This may be needed by some tests to convert exact matches expected from the server into pattern matches to verify what is in the sample file. If there are no changes to be made, subs is returned unharmed. """ return subs def _update_links(self, sample_data): """Process sample data and update version specific links.""" # replace version urls url_re = self._get_host() + "/v3/" + PROJECT_ID new_url = self._get_host() + "/v" + self.osapi_version_major if self._project_id: new_url += "/" + PROJECT_ID updated_data = re.sub(url_re, new_url, sample_data) # replace unversioned urls url_re = self._get_host() + "/" + PROJECT_ID new_url = self._get_host() if self._project_id: new_url += "/" + PROJECT_ID updated_data = re.sub(url_re, new_url, updated_data) return updated_data def _verify_response(self, name, subs, response, exp_code, update_links=True): # Always also include the laundry list of base regular # expressions for possible key values in our templates. Test # specific patterns (the value of ``subs``) can override # these. regexes = self._get_regexes() regexes.update(subs) subs = regexes self.subs = subs message = response.text if response.status_code >= 400 else None self.assertEqual(exp_code, response.status_code, message) response_data = response.content response_data = pretty_data(response_data) if not os.path.exists(self._get_template(name, self.osapi_version)): self._write_template(name, response_data) template_data = response_data else: template_data = self._read_template(name) if (self.generate_samples and not os.path.exists(self._get_sample(name, self.osapi_version))): self._write_sample(name, response_data) sample_data = response_data else: with open(self._get_sample(name, self.osapi_version)) as sample: sample_data = sample.read() if update_links: sample_data = self._update_links(sample_data) try: template_data = objectify(template_data) response_data = objectify(response_data) response_result = self._compare_result(template_data, response_data, "Response") except NoMatch as e: raise NoMatch("\nFailed to match Template to Response: \n%s\n" "Template: %s\n\n" "Response: %s\n\n" % (e, pp.pformat(template_data), pp.pformat(response_data))) try: # replace some of the subs with patterns for the # doc/samples check. Also let the test do some of its # own generalization, if necessary vanilla_regexes = self._get_regexes() subs['os-vol-host-attr:host'] = vanilla_regexes['host_name'] subs['id'] = vanilla_regexes['id'] subs['uuid'] = vanilla_regexes['uuid'] subs['image_id'] = vanilla_regexes['uuid'] subs = self.generalize_subs(subs, vanilla_regexes) self.subs = subs sample_data = objectify(sample_data) self._compare_result(template_data, sample_data, "Sample") return response_result except NoMatch as e: raise NoMatch("\nFailed to match Template to Sample: \n%s\n" "Template: %s\n\n" "Sample: %s\n\n" "Hint: does your test need to override " "ApiSampleTestBase.generalize_subs()?" % (e, pp.pformat(template_data), pp.pformat(sample_data))) def _get_host(self): return 'http://localhost:8776' def _get_glance_host(self): return 'http://glance.openstack.example.com' def _get_regexes(self): text = r'(\\"|[^"])*' isotime_re = r'\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}Z' strtime_re = r'\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}\.\d{6}' extension_update = ( r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}[+-]\d{2}:\d{2}') strtime_url_re = (r'\d{4}-[0,1]\d-[0-3]\d' r'\+\d{2}\%3A\d{2}\%3A\d{2}\.\d{6}') return { 'isotime': isotime_re, 'strtime': strtime_re, 'strtime_url': strtime_url_re, 'strtime_or_none': r'None|%s' % strtime_re, 'password': '[0-9a-zA-Z]{1,12}', 'id': '(?P[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}' '-[0-9a-f]{4}-[0-9a-f]{12})', 'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}' '-[0-9a-f]{4}-[0-9a-f]{12}', 'request_id': 'req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}' '-[0-9a-f]{4}-[0-9a-f]{12}', 'host': r'https?://[0-9]+(?:\.[0-9]+){3}:[0-9]+', 'host_name': r'\w+', 'glance_host': self._get_glance_host(), 'os-vol-host-attr:host': self.volume.host, 'text': text, 'int': '[0-9]+', 'user_id': text, 'api_vers': 'v' + self.osapi_version_major, 'volume_endpoint': self._get_volume_endpoint(), 'versioned_volume_endpoint': self._get_versioned_volume_endpoint(), 'name': text, 'description': text, 'extension_update': extension_update, 'auth_key': '[a-z0-9]{16}' } def _get_volume_endpoint(self): if self._project_id: return '%s/%s' % (self._get_host(), PROJECT_ID) else: return self._get_host() def _get_versioned_volume_endpoint(self): if self._project_id: return '%s/v3/%s' % (self._get_host(), PROJECT_ID) else: return self._get_host() + '/v3/' def _get_response(self, url, method, body=None, headers=None): headers = headers or {} headers['Content-Type'] = 'application/json' headers['Accept'] = 'application/json' return self.api.api_request(url, body=body, method=method, headers=headers) def _do_options(self, url, headers=None): return self._get_response(url, 'OPTIONS', headers=headers) def _do_get(self, url, headers=None, return_json_body=False): response = self._get_response(url, 'GET', headers=headers) if return_json_body and hasattr(response, 'content'): return jsonutils.loads(response.content) return response def _do_post(self, url, name=None, subs=None, method='POST', headers=None): self.subs = {} if subs is None else subs body = None if name: body = self._read_template(name) % self.subs sample = self._get_sample(name, self.osapi_version) if self.generate_samples and not os.path.exists(sample): self._write_sample(name, body) return self._get_response(url, method, body, headers=headers) def _do_put(self, url, name=None, subs=None, headers=None): # name indicates that we have a body document. While the HTTP # spec implies that PUT is supposed to have one, we have some # APIs which don't. if name: return self._do_post( url, name, subs, method='PUT', headers=headers) else: return self._get_response(url, 'PUT', headers=headers) def _do_delete(self, url, headers=None): return self._get_response(url, 'DELETE', headers=headers) @contextlib.contextmanager def common_api_sample(self, api=None): orig_value = self.__class__._use_common_sample try: self.__class__._use_common_sample = api or self.sample_dir yield finally: self.__class__._use_common_sample = orig_value class VolumesSampleBase(ApiSampleTestBase): sample_dir = "volumes" def _create_volume(self, subs=None): # Use the samples from the common API for the request with self.common_api_sample('volumes'): response = self._do_post('volumes', 'volume-create-request', subs) return response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/functional_helpers.py0000664000175000017500000002414500000000000024103 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides common functionality for functional tests.""" import functools import os.path import random import string import time from unittest import mock import uuid import fixtures from oslo_config import cfg from cinder import service from cinder.tests.functional.api import client from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test # For the flags CONF = cfg.CONF VOLUME = 'VOLUME' SNAPSHOT = 'SNAPSHOT' GROUP = 'GROUP' GROUP_SNAPSHOT = 'GROUP_SNAPSHOT' def generate_random_alphanumeric(length): """Creates a random alphanumeric string of specified length.""" return ''.join(random.choice(string.ascii_uppercase + string.digits) for _x in range(length)) def generate_random_numeric(length): """Creates a random numeric string of specified length.""" return ''.join(random.choice(string.digits) for _x in range(length)) def generate_new_element(items, prefix, numeric=False): """Creates a random string with prefix, that is not in 'items' list.""" while True: if numeric: candidate = prefix + generate_random_numeric(8) else: candidate = prefix + generate_random_alphanumeric(8) if candidate not in items: return candidate class _FunctionalTestBase(test.TestCase): # Inheritors can change this attribute to change default tests microversion _osapi_version = '3.0' # These attributes are automatically set based on _osapi_version and when # setting osapi_version property. osapi_version_major = '3' osapi_version_minor = '0' @property def osapi_version(self): return self._osapi_version @osapi_version.setter def osapi_version(self, value): self._osapi_version = value self.osapi_version_major, self.osapi_version_minor = value.split('.') self.api.api_version = value @staticmethod def override_mv(version, pre_call=None): """Decorator that overrides the microversion for 1 test.""" def decorator(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): original_api_version = self.osapi_version self.osapi_version = version try: if pre_call: pre_call(self) return func(self, *args, **kwargs) finally: self.osapi_version = original_api_version return wrapper return decorator @staticmethod def use_versions(*versions): """Class decorator to repeat tests for each provided version.""" def generate_methods(cls): setup = getattr(cls, 'setup', None) for name, func in list(cls.__dict__.items()): if name.startswith('test_') and callable(func): for version in versions: setattr(cls, f'{name}_{version.replace(".", "_")}', _FunctionalTestBase.override_mv(version, setup)(func)) delattr(cls, name) return cls return generate_methods def setUp(self): super(_FunctionalTestBase, self).setUp() f = self._get_flags() for k, value_dict in f.items(): self.override_config(k, value_dict['v'], value_dict.get('g')) for var in ('http_proxy', 'HTTP_PROXY'): self.useFixture(fixtures.EnvironmentVariable(var)) # set up services self.volume = self.start_service('volume') # NOTE(dulek): Mocking eventlet.sleep so test won't time out on # scheduler service start. with mock.patch('eventlet.sleep'): self.scheduler = self.start_service('scheduler') self._start_api_service() self.addCleanup(self.osapi.stop) self.osapi_version_major, self.osapi_version_minor = \ self._osapi_version.split('.') self.api = client.TestOpenStackClient(fake.USER_ID, fake.PROJECT_ID, self.auth_url, self.osapi_version) def _update_project(self, new_project_id): self.api.update_project(new_project_id) def _start_api_service(self): default_conf = os.path.abspath(os.path.join( os.path.dirname(__file__), '..', '..', '..', 'etc/cinder/api-paste.ini')) CONF.api_paste_config = default_conf self.osapi = service.WSGIService("osapi_volume") self.osapi.start() # FIXME(ja): this is not the auth url - this is the service url # FIXME(ja): this needs fixed in nova as well self.auth_url = 'http://%s:%s/v' % (self.osapi.host, self.osapi.port) self.auth_url += self.osapi_version_major def _get_flags(self): """An opportunity to setup flags, before the services are started.""" f = {} # Ensure tests only listen on localhost f['osapi_volume_listen'] = {'v': '127.0.0.1'} # Auto-assign ports to allow concurrent tests f['osapi_volume_listen_port'] = {'v': 0} # Use simple scheduler to avoid complications - we test schedulers # separately f['scheduler_driver'] = {'v': ('cinder.scheduler.filter_scheduler.' 'FilterScheduler')} return f def get_unused_server_name(self): servers = self.api.get_servers() server_names = [server['name'] for server in servers] return generate_new_element(server_names, 'server') def get_invalid_image(self): return str(uuid.uuid4()) def _build_minimal_create_server_request(self): server = {} image = self.api.get_images()[0] if 'imageRef' in image: image_href = image['imageRef'] else: image_href = image['id'] image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId server['imageRef'] = image_href # Set a valid flavorId flavor = self.api.get_flavors()[0] server['flavorRef'] = 'http://fake.server/%s' % flavor['id'] # Set a valid server name server_name = self.get_unused_server_name() server['name'] = server_name return server def _poll_resource_while(self, res_id, continue_states, res_type=VOLUME, expected_end_status=None, max_retries=5, status_field='status'): """Poll (briefly) while the state is in continue_states. Continues until the state changes from continue_states or max_retries are hit. If expected_end_status is specified, we assert that the end status of the resource is expected_end_status. """ retries = 0 while retries <= max_retries: try: if res_type == VOLUME: found_res = self.api.get_volume(res_id) elif res_type == SNAPSHOT: found_res = self.api.get_snapshot(res_id) elif res_type == GROUP: found_res = self.api.get_group(res_id) elif res_type == GROUP_SNAPSHOT: found_res = self.api.get_group_snapshot(res_id) else: return None except client.OpenStackApiException404: return None except client.OpenStackApiException: # NOTE(xyang): Got OpenStackApiException( # u'Unexpected status code',) sometimes, but # it works if continue. continue self.assertEqual(res_id, found_res['id']) res_status = found_res[status_field] if res_status not in continue_states: if expected_end_status: self.assertEqual(expected_end_status, res_status) return found_res time.sleep(1) retries += 1 def _poll_volume_while(self, volume_id, continue_states, expected_end_status=None, max_retries=5, status_field='status'): return self._poll_resource_while(volume_id, continue_states, VOLUME, expected_end_status, max_retries, status_field) def _poll_snapshot_while(self, snapshot_id, continue_states, expected_end_status=None, max_retries=5, status_field='status'): return self._poll_resource_while(snapshot_id, continue_states, SNAPSHOT, expected_end_status, max_retries, status_field) def _poll_group_while(self, group_id, continue_states, expected_end_status=None, max_retries=30, status_field='status'): return self._poll_resource_while(group_id, continue_states, GROUP, expected_end_status, max_retries, status_field) def _poll_group_snapshot_while(self, group_snapshot_id, continue_states, expected_end_status=None, max_retries=30): return self._poll_resource_while(group_snapshot_id, continue_states, GROUP_SNAPSHOT, expected_end_status, max_retries) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/test_default_types.py0000664000175000017500000001101100000000000024112 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from cinder import context from cinder.tests.functional.api import client from cinder.tests.functional import functional_helpers class DefaultVolumeTypesTest(functional_helpers._FunctionalTestBase): _vol_type_name = 'functional_test_type' _osapi_version = '3.62' def setUp(self): super(DefaultVolumeTypesTest, self).setUp() self.volume_type = self.api.create_type(self._vol_type_name) self.project = self.FakeProject() # Need to mock out Keystone so the functional tests don't require other # services _keystone_client = mock.MagicMock() _keystone_client.version = 'v3' _keystone_client.projects.get.side_effect = self._get_project _keystone_client_get = mock.patch( 'cinder.api.api_utils._keystone_client', lambda *args, **kwargs: _keystone_client) _keystone_client_get.start() self.addCleanup(_keystone_client_get.stop) def _get_project(self, project_id, *args, **kwargs): return self.project class FakeProject(object): def __init__(self, name=None): self.id = uuid.uuid4().hex self.name = name self.description = 'fake project description' self.domain_id = 'default' @mock.patch.object(context.RequestContext, 'authorize') def test_default_type_set(self, mock_authorize): default_type = self.api.set_default_type( self.project.id, {'volume_type': self._vol_type_name}) self.assertEqual(self.project.id, default_type['project_id']) self.assertEqual(self.volume_type['id'], default_type['volume_type_id']) @mock.patch.object(context.RequestContext, 'authorize') def test_default_type_get(self, mock_authorize): self.api.set_default_type(self.project.id, {'volume_type': self._vol_type_name}) default_type = self.api.get_default_type(project_id=self.project.id) self.assertEqual(self.project.id, default_type['project_id']) self.assertEqual(self.volume_type['id'], default_type['volume_type_id']) @mock.patch.object(context.RequestContext, 'authorize') def test_default_type_get_all(self, mock_authorize): self.api.set_default_type(self.project.id, {'volume_type': self._vol_type_name}) default_types = self.api.get_default_type() self.assertEqual(1, len(default_types)) self.assertEqual(self.project.id, default_types[0]['project_id']) self.assertEqual(self.volume_type['id'], default_types[0]['volume_type_id']) @mock.patch.object(context.RequestContext, 'authorize') def test_default_type_unset(self, mock_authorize): self.api.set_default_type(self.project.id, {'volume_type': self._vol_type_name}) default_types = self.api.get_default_type() self.assertEqual(1, len(default_types)) self.api.unset_default_type(self.project.id) default_types = self.api.get_default_type() self.assertEqual(0, len(default_types)) @mock.patch.object(context.RequestContext, 'authorize') def test_default_type_set_volume_type_not_found(self, mock_authorize): self.assertRaises(client.OpenStackApiException400, self.api.set_default_type, self.project.id, {'volume_type': 'fake_type'}) @mock.patch.object(context.RequestContext, 'authorize') def test_cannot_delete_project_default_type(self, mock_authorize): default_type = self.api.set_default_type( self.project.id, {'volume_type': self._vol_type_name}) self.assertRaises(client.OpenStackApiException400, self.api.delete_type, default_type['volume_type_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/test_extensions.py0000664000175000017500000001313100000000000023446 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import iso8601 from oslo_config import cfg from oslo_serialization import jsonutils import webob from cinder.api import extensions from cinder.api.v3 import router from cinder.tests.functional import functional_helpers NS = "{http://docs.openstack.org/common/api/v1.0}" CONF = cfg.CONF class ExtensionTestCase(functional_helpers._FunctionalTestBase): def _get_flags(self): f = super(ExtensionTestCase, self)._get_flags() f['osapi_volume_extension'] = {'v': CONF.osapi_volume_extension[:]} f['osapi_volume_extension']['v'].append( 'cinder.tests.functional.api.foxinsocks.Foxinsocks') return f class ExtensionsTest(ExtensionTestCase): def test_get_foxnsocks(self): """Simple check that fox-n-socks works.""" response = self.api.api_request('/foxnsocks') foxnsocks = response.text self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks) class ExtensionControllerTest(ExtensionTestCase): def setUp(self): super(ExtensionControllerTest, self).setUp() self.ext_list = ["TypesManage", "TypesExtraSpecs", ] self.ext_list.sort() def test_list_extensions_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions") response = request.get_response(app) self.assertEqual(http_client.OK, response.status_int) # Make sure we have all the extensions, extra extensions being OK. data = jsonutils.loads(response.body) names = [str(x['name']) for x in data['extensions'] if str(x['name']) in self.ext_list] names.sort() self.assertEqual(self.ext_list, names) # Ensure all the timestamps are valid according to iso8601 for ext in data['extensions']: iso8601.parse_date(ext['updated']) # Make sure that at least Fox in Sox is correct. (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual( {'name': 'Fox In Socks', 'updated': '2011-01-22T13:25:27-06:00', 'description': 'The Fox In Socks Extension.', 'alias': 'FOXNSOX', 'links': []}, fox_ext) for ext in data['extensions']: url = '/fake/extensions/%s' % ext['alias'] request = webob.Request.blank(url) response = request.get_response(app) output = jsonutils.loads(response.body) self.assertEqual(ext['alias'], output['extension']['alias']) def test_get_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/FOXNSOX") response = request.get_response(app) self.assertEqual(http_client.OK, response.status_int) data = jsonutils.loads(response.body) self.assertEqual( {"name": "Fox In Socks", "updated": "2011-01-22T13:25:27-06:00", "description": "The Fox In Socks Extension.", "alias": "FOXNSOX", "links": []}, data['extension']) def test_get_non_existing_extension_json(self): app = router.APIRouter() request = webob.Request.blank("/fake/extensions/4") response = request.get_response(app) self.assertEqual(http_client.NOT_FOUND, response.status_int) class StubExtensionManager(object): """Provides access to Tweedle Beetles.""" name = "Tweedle Beetle Extension" alias = "TWDLBETL" def __init__(self, resource_ext=None, action_ext=None, request_ext=None, controller_ext=None): self.resource_ext = resource_ext self.controller_ext = controller_ext self.extra_resource_ext = None def get_resources(self): resource_exts = [] if self.resource_ext: resource_exts.append(self.resource_ext) if self.extra_resource_ext: resource_exts.append(self.extra_resource_ext) return resource_exts def get_controller_extensions(self): controller_extensions = [] if self.controller_ext: controller_extensions.append(self.controller_ext) return controller_extensions class ExtensionControllerIdFormatTest(ExtensionTestCase): def _bounce_id(self, test_id): class BounceController(object): def show(self, req, id): return id res_ext = extensions.ResourceExtension('bounce', BounceController()) manager = StubExtensionManager(res_ext) app = router.APIRouter(manager) request = webob.Request.blank("/fake/bounce/%s" % test_id) response = request.get_response(app) return response.body def test_id_with_json_format(self): result = self._bounce_id('foo.json') self.assertEqual(b'foo', result) def test_id_with_bad_format(self): result = self._bounce_id('foo.bad') self.assertEqual(b'foo.bad', result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/test_group_replication.py0000664000175000017500000001675000000000000025006 0ustar00zuulzuul00000000000000# Copyright (C) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from cinder.objects import fields from cinder.tests.functional import functional_helpers from cinder.volume import configuration class GroupReplicationTest(functional_helpers._FunctionalTestBase): _vol_type_name = 'functional_test_type' _grp_type_name = 'functional_grp_test_type' _osapi_version = '3.38' def setUp(self): super(GroupReplicationTest, self).setUp() self.volume_type = self.api.create_type(self._vol_type_name) extra_specs = {"replication_enabled": " True"} self.api.create_volume_type_extra_specs(self.volume_type['id'], extra_specs=extra_specs) self.volume_type = self.api.get_type(self.volume_type['id']) self.group_type = self.api.create_group_type(self._grp_type_name) grp_specs = {"group_replication_enabled": " True"} self.api.create_group_type_specs(self.group_type['id'], group_specs=grp_specs) self.group_type = self.api.get_group_type(self.group_type['id']) def _get_flags(self): f = super(GroupReplicationTest, self)._get_flags() f['volume_driver'] = ( {'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', 'g': configuration.SHARED_CONF_GROUP}) f['default_volume_type'] = {'v': self._vol_type_name} f['default_group_type'] = {'v': self._grp_type_name} return f def test_group_replication(self): """Tests group replication APIs.""" # Create group created_group = self.api.post_group( {'group': {'group_type': self.group_type['id'], 'volume_types': [self.volume_type['id']]}}) self.assertTrue(uuidutils.is_uuid_like(created_group['id'])) created_group_id = created_group['id'] # Check it's there found_group = self._poll_group_while(created_group_id, ['creating']) self.assertEqual(created_group_id, found_group['id']) self.assertEqual(self.group_type['id'], found_group['group_type']) self.assertEqual('available', found_group['status']) # Create volume created_volume = self.api.post_volume( {'volume': {'size': 1, 'group_id': created_group_id, 'volume_type': self.volume_type['id']}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Check it's there found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(self._vol_type_name, found_volume['volume_type']) self.assertEqual(created_group_id, found_volume['group_id']) # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['creating']) # It should be available... self.assertEqual('available', found_volume['status']) # Test enable replication group self.api.enable_group_replication(created_group_id, {'enable_replication': {}}) found_volume = self._poll_volume_while( created_volume_id, [fields.ReplicationStatus.ENABLING], status_field='replication_status') found_group = self._poll_group_while( created_group_id, [fields.ReplicationStatus.ENABLING], status_field='replication_status') self.assertEqual(fields.ReplicationStatus.ENABLED, found_group['replication_status']) self.assertEqual(fields.ReplicationStatus.ENABLED, found_volume['replication_status']) # Test list replication group targets targets = self.api.list_group_replication_targets( created_group_id, {'list_replication_targets': {}}) self.assertEqual({'replication_targets': []}, targets) # Test failover replication group self.api.failover_group_replication( created_group_id, {'failover_replication': {'secondary_backend_id': 'backend1', 'allow_attached_volume': False}}) found_volume = self._poll_volume_while( created_volume_id, [fields.ReplicationStatus.FAILING_OVER], status_field='replication_status') found_group = self._poll_group_while( created_group_id, [fields.ReplicationStatus.FAILING_OVER], status_field='replication_status') self.assertEqual(fields.ReplicationStatus.FAILED_OVER, found_group['replication_status']) self.assertEqual(fields.ReplicationStatus.FAILED_OVER, found_volume['replication_status']) # Test failback replication group self.api.failover_group_replication( created_group_id, {'failover_replication': {'secondary_backend_id': 'default', 'allow_attached_volume': False}}) found_volume = self._poll_volume_while( created_volume_id, [fields.ReplicationStatus.FAILING_OVER], status_field='replication_status') found_group = self._poll_group_while( created_group_id, [fields.ReplicationStatus.FAILING_OVER], status_field='replication_status') self.assertEqual(fields.ReplicationStatus.ENABLED, found_group['replication_status']) self.assertEqual(fields.ReplicationStatus.ENABLED, found_volume['replication_status']) # Test disable replication group self.api.disable_group_replication(created_group_id, {'disable_replication': {}}) found_volume = self._poll_volume_while( created_volume_id, [fields.ReplicationStatus.DISABLING], status_field='replication_status') found_group = self._poll_group_while( created_group_id, [fields.ReplicationStatus.DISABLING], status_field='replication_status') self.assertEqual(fields.ReplicationStatus.DISABLED, found_group['replication_status']) self.assertEqual(fields.ReplicationStatus.DISABLED, found_volume['replication_status']) # Delete the original group self.api.delete_group(created_group_id, {'delete': {'delete-volumes': True}}) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['deleting']) found_group = self._poll_group_while(created_group_id, ['deleting']) # Should be gone self.assertIsNone(found_volume) self.assertIsNone(found_group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/test_group_snapshots.py0000664000175000017500000003720100000000000024511 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from cinder.objects import fields from cinder.tests.functional import functional_helpers from cinder.volume import configuration class GroupSnapshotsTest(functional_helpers._FunctionalTestBase): _vol_type_name = 'functional_test_type' _grp_type_name = 'functional_grp_test_type' _osapi_version = '3.19' def setUp(self): super(GroupSnapshotsTest, self).setUp() self.volume_type = self.api.create_type(self._vol_type_name) self.group_type = self.api.create_group_type(self._grp_type_name) def _get_flags(self): f = super(GroupSnapshotsTest, self)._get_flags() f['volume_driver'] = ( {'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', 'g': configuration.SHARED_CONF_GROUP}) f['default_volume_type'] = {'v': self._vol_type_name} f['default_group_type'] = {'v': self._grp_type_name} return f def test_get_group_snapshots_summary(self): """Simple check that listing group snapshots works.""" grp_snaps = self.api.get_group_snapshots(False) self.assertIsNotNone(grp_snaps) def test_get_group_snapshots(self): """Simple check that listing group snapshots works.""" grp_snaps = self.api.get_group_snapshots() self.assertIsNotNone(grp_snaps) def test_create_and_delete_group_snapshot(self): """Creates and deletes a group snapshot.""" # Create group created_group = self.api.post_group( {'group': {'group_type': self.group_type['id'], 'volume_types': [self.volume_type['id']]}}) self.assertTrue(uuidutils.is_uuid_like(created_group['id'])) created_group_id = created_group['id'] # Check it's there found_group = self._poll_group_while(created_group_id, ['creating']) self.assertEqual(created_group_id, found_group['id']) self.assertEqual(self.group_type['id'], found_group['group_type']) self.assertEqual('available', found_group['status']) # Create volume created_volume = self.api.post_volume( {'volume': {'size': 1, 'group_id': created_group_id, 'volume_type': self.volume_type['id']}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Check it's there found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(self._vol_type_name, found_volume['volume_type']) self.assertEqual(created_group_id, found_volume['group_id']) # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['creating']) # It should be available... self.assertEqual('available', found_volume['status']) # Create group snapshot created_group_snapshot = self.api.post_group_snapshot( {'group_snapshot': {'group_id': created_group_id}}) self.assertTrue(uuidutils.is_uuid_like(created_group_snapshot['id'])) created_group_snapshot_id = created_group_snapshot['id'] # Check it's there found_group_snapshot = self._poll_group_snapshot_while( created_group_snapshot_id, [fields.GroupSnapshotStatus.CREATING]) self.assertEqual(created_group_snapshot_id, found_group_snapshot['id']) self.assertEqual(created_group_id, found_group_snapshot['group_id']) self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, found_group_snapshot['status']) # Delete the group snapshot self.api.delete_group_snapshot(created_group_snapshot_id) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_group_snapshot = self._poll_group_snapshot_while( created_group_snapshot_id, [fields.GroupSnapshotStatus.DELETING]) # Delete the original group self.api.delete_group(created_group_id, {'delete': {'delete-volumes': True}}) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['deleting']) found_group = self._poll_group_while(created_group_id, ['deleting']) # Should be gone self.assertIsNone(found_group_snapshot) self.assertIsNone(found_volume) self.assertIsNone(found_group) def test_create_group_from_group_snapshot(self): """Creates a group from a group snapshot.""" # Create group created_group = self.api.post_group( {'group': {'group_type': self.group_type['id'], 'volume_types': [self.volume_type['id']]}}) self.assertTrue(uuidutils.is_uuid_like(created_group['id'])) created_group_id = created_group['id'] # Check it's there found_group = self._poll_group_while(created_group_id, ['creating']) self.assertEqual(created_group_id, found_group['id']) self.assertEqual(self.group_type['id'], found_group['group_type']) self.assertEqual('available', found_group['status']) # Create volume created_volume = self.api.post_volume( {'volume': {'size': 1, 'group_id': created_group_id, 'volume_type': self.volume_type['id']}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Check it's there found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(self._vol_type_name, found_volume['volume_type']) self.assertEqual(created_group_id, found_volume['group_id']) # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['creating']) # It should be available... self.assertEqual('available', found_volume['status']) # Create group snapshot created_group_snapshot = self.api.post_group_snapshot( {'group_snapshot': {'group_id': created_group_id}}) self.assertTrue(uuidutils.is_uuid_like(created_group_snapshot['id'])) created_group_snapshot_id = created_group_snapshot['id'] # Check it's there found_group_snapshot = self._poll_group_snapshot_while( created_group_snapshot_id, ['creating']) self.assertEqual(created_group_snapshot_id, found_group_snapshot['id']) self.assertEqual(created_group_id, found_group_snapshot['group_id']) self.assertEqual('available', found_group_snapshot['status']) # Create group from group snapshot created_group_from_snap = self.api.post_group_from_src( {'create-from-src': { 'group_snapshot_id': created_group_snapshot_id}}) self.assertTrue(uuidutils.is_uuid_like(created_group_from_snap['id'])) created_group_from_snap_id = created_group_from_snap['id'] # Check it's there found_volumes = self.api.get_volumes() self._poll_volume_while(found_volumes[0], ['creating']) self._poll_volume_while(found_volumes[1], ['creating']) found_group_from_snap = self._poll_group_while( created_group_from_snap_id, ['creating']) self.assertEqual(created_group_from_snap_id, found_group_from_snap['id']) self.assertEqual(created_group_snapshot_id, found_group_from_snap['group_snapshot_id']) self.assertEqual(self.group_type['id'], found_group_from_snap['group_type']) self.assertEqual('available', found_group_from_snap['status']) # Delete the group from snap self.api.delete_group(created_group_from_snap_id, {'delete': {'delete-volumes': True}}) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_group_from_snap = self._poll_group_while( created_group_from_snap_id, ['deleting']) # Delete the group snapshot self.api.delete_group_snapshot(created_group_snapshot_id) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_group_snapshot = self._poll_group_snapshot_while( created_group_snapshot_id, [fields.GroupSnapshotStatus.DELETING]) # Delete the original group self.api.delete_group(created_group_id, {'delete': {'delete-volumes': True}}) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['deleting']) found_group = self._poll_group_while(created_group_id, ['deleting']) # Should be gone self.assertIsNone(found_group_from_snap) self.assertIsNone(found_group_snapshot) self.assertIsNone(found_volume) self.assertIsNone(found_group) def test_create_group_from_source_group(self): """Creates a group from a source group.""" # Create group created_group = self.api.post_group( {'group': {'group_type': self.group_type['id'], 'volume_types': [self.volume_type['id']]}}) self.assertTrue(uuidutils.is_uuid_like(created_group['id'])) created_group_id = created_group['id'] # Check it's there found_group = self._poll_group_while(created_group_id, ['creating']) self.assertEqual(created_group_id, found_group['id']) self.assertEqual(self.group_type['id'], found_group['group_type']) self.assertEqual('available', found_group['status']) # Create volume created_volume = self.api.post_volume( {'volume': {'size': 1, 'group_id': created_group_id, 'volume_type': self.volume_type['id']}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Check it's there found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(self._vol_type_name, found_volume['volume_type']) self.assertEqual(created_group_id, found_volume['group_id']) # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['creating']) # It should be available... self.assertEqual('available', found_volume['status']) # Test create group from source group created_group_from_group = self.api.post_group_from_src( {'create-from-src': { 'source_group_id': created_group_id}}) self.assertTrue(uuidutils.is_uuid_like(created_group_from_group['id'])) created_group_from_group_id = created_group_from_group['id'] # Check it's there found_volumes = self.api.get_volumes() self._poll_volume_while(found_volumes[0], ['creating']) self._poll_volume_while(found_volumes[1], ['creating']) found_group_from_group = self._poll_group_while( created_group_from_group_id, ['creating']) self.assertEqual(created_group_from_group_id, found_group_from_group['id']) self.assertEqual(created_group_id, found_group_from_group['source_group_id']) self.assertEqual(self.group_type['id'], found_group_from_group['group_type']) self.assertEqual('available', found_group_from_group['status']) # Delete the group from group self.api.delete_group(created_group_from_group_id, {'delete': {'delete-volumes': True}}) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_group_from_group = self._poll_group_while( created_group_from_group_id, ['deleting']) # Delete the original group self.api.delete_group(created_group_id, {'delete': {'delete-volumes': True}}) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['deleting']) found_group = self._poll_group_while(created_group_id, ['deleting']) # Should be gone self.assertIsNone(found_group_from_group) self.assertIsNone(found_volume) self.assertIsNone(found_group) def test_reset_group_snapshot(self): # Create group group1 = self.api.post_group( {'group': {'group_type': self.group_type['id'], 'volume_types': [self.volume_type['id']]}}) self.assertTrue(uuidutils.is_uuid_like(group1['id'])) group_id = group1['id'] self._poll_group_while(group_id, ['creating']) # Create volume created_volume = self.api.post_volume( {'volume': {'size': 1, 'group_id': group_id, 'volume_type': self.volume_type['id']}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] self._poll_volume_while(created_volume_id, ['creating']) # Create group snapshot group_snapshot1 = self.api.post_group_snapshot( {'group_snapshot': {'group_id': group_id}}) self.assertTrue(uuidutils.is_uuid_like(group_snapshot1['id'])) group_snapshot_id = group_snapshot1['id'] self._poll_group_snapshot_while(group_snapshot_id, fields.GroupSnapshotStatus.CREATING) group_snapshot1 = self.api.get_group_snapshot(group_snapshot_id) self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, group_snapshot1['status']) # reset group snapshot status self.api.reset_group_snapshot(group_snapshot_id, {"reset_status": { "status": fields.GroupSnapshotStatus.ERROR}}) group_snapshot1 = self.api.get_group_snapshot(group_snapshot_id) self.assertEqual(fields.GroupSnapshotStatus.ERROR, group_snapshot1['status']) # Delete group, volume and group snapshot self.api.delete_group_snapshot(group_snapshot_id) found_group_snapshot = self._poll_group_snapshot_while( group_snapshot_id, [fields.GroupSnapshotStatus.DELETING]) self.api.delete_group(group_id, {'delete': {'delete-volumes': True}}) found_volume = self._poll_volume_while(created_volume_id, ['deleting']) found_group = self._poll_group_while(group_id, ['deleting']) # Created resources should be gone self.assertIsNone(found_group_snapshot) self.assertIsNone(found_volume) self.assertIsNone(found_group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/test_groups.py0000664000175000017500000001101300000000000022563 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import uuidutils from cinder.tests.functional import functional_helpers from cinder.volume import configuration class GroupsTest(functional_helpers._FunctionalTestBase): _vol_type_name = 'functional_test_type' _grp_type_name = 'functional_grp_test_type' _osapi_version = '3.20' def setUp(self): super(GroupsTest, self).setUp() self.volume_type = self.api.create_type(self._vol_type_name) self.group_type = self.api.create_group_type(self._grp_type_name) self.group1 = self.api.post_group( {'group': {'group_type': self.group_type['id'], 'volume_types': [self.volume_type['id']]}}) def _get_flags(self): f = super(GroupsTest, self)._get_flags() f['volume_driver'] = ( {'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', 'g': configuration.SHARED_CONF_GROUP}) f['default_volume_type'] = {'v': self._vol_type_name} f['default_group_type'] = {'v': self._grp_type_name} return f def test_get_groups_summary(self): """Simple check that listing groups works.""" grps = self.api.get_groups(False) self.assertIsNotNone(grps) def test_get_groups(self): """Simple check that listing groups works.""" grps = self.api.get_groups() self.assertIsNotNone(grps) def test_reset_group_status(self): """Reset group status""" found_group = self._poll_group_while(self.group1['id'], ['creating']) self.assertEqual('available', found_group['status']) self.api.reset_group(self.group1['id'], {"reset_status": {"status": "error"}}) group = self.api.get_group(self.group1['id']) self.assertEqual("error", group['status']) def test_create_and_delete_group(self): """Creates and deletes a group.""" # Create group created_group = self.api.post_group( {'group': {'group_type': self.group_type['id'], 'volume_types': [self.volume_type['id']]}}) self.assertTrue(uuidutils.is_uuid_like(created_group['id'])) created_group_id = created_group['id'] # Check it's there found_group = self._poll_group_while(created_group_id, ['creating']) self.assertEqual(created_group_id, found_group['id']) self.assertEqual(self.group_type['id'], found_group['group_type']) self.assertEqual('available', found_group['status']) # Create volume created_volume = self.api.post_volume( {'volume': {'size': 1, 'group_id': created_group_id, 'volume_type': self.volume_type['id']}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Check it's there found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(self._vol_type_name, found_volume['volume_type']) self.assertEqual(created_group_id, found_volume['group_id']) # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['creating']) # It should be available... self.assertEqual('available', found_volume['status']) # Delete the original group self.api.delete_group(created_group_id, {'delete': {'delete-volumes': True}}) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['deleting']) found_group = self._poll_group_while(created_group_id, ['deleting']) # Should be gone self.assertIsNone(found_volume) self.assertIsNone(found_group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/test_login.py0000664000175000017500000000164200000000000022363 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.functional import functional_helpers class LoginTest(functional_helpers._FunctionalTestBase): def test_login(self): """Simple check - we list volumes - so we know we're logged in.""" volumes = self.api.get_volumes() self.assertIsNotNone(volumes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/test_middleware.py0000664000175000017500000000237700000000000023376 0ustar00zuulzuul00000000000000# Copyright 2020 Thomas Goirand # Copyright 2020 Infomaniak Networks. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils import requests from cinder.tests.functional import functional_helpers class TestHealthCheckMiddleware(functional_helpers._FunctionalTestBase): def test_healthcheck(self): # We verify that we return a HTTP200 when calling api_get url = 'http://%s:%s/healthcheck' % (self.osapi.host, self.osapi.port) response = requests.request( 'GET', url, headers={'Accept': 'application/json'}) output = jsonutils.loads(response.content) self.assertEqual(['OK'], output['reasons']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/functional/test_volumes.py0000664000175000017500000003213000000000000022741 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import uuidutils from cinder.tests.functional.api import client from cinder.tests.functional import functional_helpers from cinder.volume import configuration class VolumesTest(functional_helpers._FunctionalTestBase): _vol_type_name = 'functional_test_type' def setUp(self): super(VolumesTest, self).setUp() self.api.create_type(self._vol_type_name) def _get_flags(self): f = super(VolumesTest, self)._get_flags() f['volume_driver'] = ( {'v': 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', 'g': configuration.SHARED_CONF_GROUP}) f['default_volume_type'] = {'v': self._vol_type_name} return f def test_get_volumes_summary(self): """Simple check that listing volumes works.""" volumes = self.api.get_volumes(False) self.assertIsNotNone(volumes) def test_get_volumes(self): """Simple check that listing volumes works.""" volumes = self.api.get_volumes() self.assertIsNotNone(volumes) def test_create_and_delete_volume(self): """Creates and deletes a volume.""" # Create volume created_volume = self.api.post_volume({'volume': {'size': 1}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Check it's there found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(self._vol_type_name, found_volume['volume_type']) # It should also be in the all-volume list volumes = self.api.get_volumes() volume_names = [volume['id'] for volume in volumes] self.assertIn(created_volume_id, volume_names) # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['creating']) # It should be available... self.assertEqual('available', found_volume['status']) # Delete the volume self.api.delete_volume(created_volume_id) # Wait (briefly) for deletion. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['deleting']) # Should be gone self.assertIsNone(found_volume) def test_create_no_volume_type(self): """Verify volume_type is not None""" # Create volume created_volume = self.api.post_volume({'volume': {'size': 1}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['creating']) self.assertEqual('available', found_volume['status']) # It should have a volume_type self.assertIsNotNone(found_volume['volume_type']) # Delete the volume self.api.delete_volume(created_volume_id) found_volume = self._poll_volume_while(created_volume_id, ['deleting']) self.assertIsNone(found_volume) def test_create_volume_default_type(self): """Verify that the configured default_volume_type is used""" my_vol_type_name = 'default_type' self.api.create_type(my_vol_type_name) self.flags(default_volume_type=my_vol_type_name) # Create volume created_volume = self.api.post_volume({'volume': {'size': 1}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['creating']) self.assertEqual('available', found_volume['status']) # It should have the default volume_type self.assertEqual(my_vol_type_name, found_volume['volume_type']) # Delete the volume self.api.delete_volume(created_volume_id) found_volume = self._poll_volume_while(created_volume_id, ['deleting']) self.assertIsNone(found_volume) def test_create_volume_bad_default_type(self): """Verify non-existent default volume type errors out.""" # configure a non-existent default type self.flags(default_volume_type='non-existent-type') # Create volume and verify it errors out with 500 status self.assertRaises(client.OpenStackApiException500, self.api.post_volume, {'volume': {'size': 1}}) def test_create_volume_default_type_set_none(self): """Verify None default volume type errors out.""" # configure None default type self.flags(default_volume_type=None) # Create volume and verify it errors out with 500 status self.assertRaises(client.OpenStackApiException500, self.api.post_volume, {'volume': {'size': 1}}) @mock.patch('cinder.api.common.get_cluster_host', return_value=(None, None)) def test_manage_volume_default_type_set_none(self, fake_get_host): """Verify missing default volume type errors out when managing.""" # configure None default type self.flags(default_volume_type=None) # manage something in the backend and verify you get a 500 self.assertRaises(client.OpenStackApiException500, self.api.post_manage_volume) # make sure that we actually made it into the method we # want to test and the 500 wasn't from something else fake_get_host.assert_called_once() def test_create_volume_specified_type(self): """Verify volume_type is not default.""" my_vol_type_name = 'my_specified_type' my_vol_type_id = self.api.create_type(my_vol_type_name)['id'] # Create volume created_volume = self.api.post_volume( {'volume': {'size': 1, 'volume_type': my_vol_type_id}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Wait (briefly) for creation. Delay is due to the 'message queue' found_volume = self._poll_volume_while(created_volume_id, ['creating']) self.assertEqual('available', found_volume['status']) # It should have the specified volume_type self.assertEqual(my_vol_type_name, found_volume['volume_type']) # Delete the volume and test type self.api.delete_volume(created_volume_id) found_volume = self._poll_volume_while(created_volume_id, ['deleting']) self.assertIsNone(found_volume) self.api.delete_type(my_vol_type_id) def test_create_volume_from_source_vol_inherits_voltype(self): src_vol_type_name = 'source_vol_type' src_vol_type_id = self.api.create_type(src_vol_type_name)['id'] # Create source volume src_volume = self.api.post_volume( {'volume': {'size': 1, 'volume_type': src_vol_type_id}}) self.assertTrue(uuidutils.is_uuid_like(src_volume['id'])) src_volume_id = src_volume['id'] # Wait (briefly) for creation. Delay is due to the 'message queue' src_volume = self._poll_volume_while(src_volume_id, ['creating']) self.assertEqual('available', src_volume['status']) # Create a new volume using src_volume, do not specify a volume_type new_volume = self.api.post_volume( {'volume': {'size': 1, 'source_volid': src_volume_id}}) new_volume_id = new_volume['id'] # Wait for creation ... new_volume = self._poll_volume_while(new_volume_id, ['creating']) self.assertEqual('available', new_volume['status']) # It should have the same type as the source volume self.assertEqual(src_vol_type_name, new_volume['volume_type']) # Delete the volumes and test type self.api.delete_volume(src_volume_id) found_volume = self._poll_volume_while(src_volume_id, ['deleting']) self.assertIsNone(found_volume) self.api.delete_volume(new_volume_id) found_volume = self._poll_volume_while(new_volume_id, ['deleting']) self.assertIsNone(found_volume) self.api.delete_type(src_vol_type_id) def test_create_volume_from_snapshot_inherits_voltype(self): src_vol_type_name = 'a_very_new_vol_type' src_vol_type_id = self.api.create_type(src_vol_type_name)['id'] # Create source volume src_volume = self.api.post_volume( {'volume': {'size': 1, 'volume_type': src_vol_type_id}}) src_volume_id = src_volume['id'] # Wait (briefly) for creation. Delay is due to the 'message queue' src_volume = self._poll_volume_while(src_volume_id, ['creating']) self.assertEqual('available', src_volume['status']) # Create a snapshot of src_volume snapshot = self.api.post_snapshot( {'snapshot': {'volume_id': src_volume_id, 'name': 'test_snapshot'}}) self.assertEqual(src_volume_id, snapshot['volume_id']) snapshot_id = snapshot['id'] # make sure the snapshot is ready snapshot = self._poll_snapshot_while(snapshot_id, ['creating']) self.assertEqual('available', snapshot['status']) # create a new volume from the snapshot, do not specify a volume_type new_volume = self.api.post_volume( {'volume': {'size': 1, 'snapshot_id': snapshot_id}}) new_volume_id = new_volume['id'] # Wait for creation ... new_volume = self._poll_volume_while(new_volume_id, ['creating']) self.assertEqual('available', new_volume['status']) # Finally, here's the whole point of this test: self.assertEqual(src_vol_type_name, new_volume['volume_type']) # Delete the snapshot, volumes, and test type self.api.delete_snapshot(snapshot_id) snapshot = self._poll_snapshot_while(snapshot_id, ['deleting']) self.assertIsNone(snapshot) self.api.delete_volume(src_volume_id) src_volume = self._poll_volume_while(src_volume_id, ['deleting']) self.assertIsNone(src_volume) self.api.delete_volume(new_volume_id) new_volume = self._poll_volume_while(new_volume_id, ['deleting']) self.assertIsNone(new_volume) self.api.delete_type(src_vol_type_id) def test_create_volume_with_metadata(self): """Creates a volume with metadata.""" # Create volume metadata = {'key1': 'value1', 'key2': 'value2', 'volume/created/by': 'cinder'} created_volume = self.api.post_volume( {'volume': {'size': 1, 'metadata': metadata}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Check it's there and metadata present found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(metadata, found_volume['metadata']) def test_create_volume_in_availability_zone(self): """Creates a volume in availability_zone.""" # Create volume availability_zone = 'nova' created_volume = self.api.post_volume( {'volume': {'size': 1, 'availability_zone': availability_zone}}) self.assertTrue(uuidutils.is_uuid_like(created_volume['id'])) created_volume_id = created_volume['id'] # Check it's there and availability zone present found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual(availability_zone, found_volume['availability_zone']) def test_create_and_update_volume(self): # Create vol1 created_volume = self.api.post_volume({'volume': { 'size': 1, 'name': 'vol1'}}) self.assertEqual('vol1', created_volume['name']) created_volume_id = created_volume['id'] # update volume body = {'volume': {'name': 'vol-one'}} updated_volume = self.api.put_volume(created_volume_id, body) self.assertEqual('vol-one', updated_volume['name']) # check for update found_volume = self.api.get_volume(created_volume_id) self.assertEqual(created_volume_id, found_volume['id']) self.assertEqual('vol-one', found_volume['name']) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.151119 cinder-27.0.0/cinder/tests/hacking/0000775000175000017500000000000000000000000017101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/hacking/__init__.py0000664000175000017500000000000000000000000021200 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/hacking/checks.py0000664000175000017500000003322500000000000020720 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import re from hacking import core """ Guidelines for writing new hacking checks - Use only for Cinder specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range N3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the N3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to cinder/tests/unit/test_hacking.py """ # NOTE(thangp): Ignore N323 pep8 error caused by importing cinder objects UNDERSCORE_IMPORT_FILES = ['cinder/objects/__init__.py', 'cinder/objects/manageableresources.py'] mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") translated_log = re.compile( r"(.)*LOG\.(audit|debug|error|info|warn|warning|critical|exception)" r"\(\s*_\(\s*('|\")") string_translation = re.compile(r"(.)*_\(\s*('|\")") underscore_import_check = re.compile(r"(.)*i18n\s+import(.)* _$") underscore_import_check_multi = re.compile(r"(.)*i18n\s+import(.)* _, (.)*") # We need this for cases where they have created their own _ function. custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*") no_print_statements = re.compile(r"\s*print\s*\(.+\).*") dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)") logging_instance = re.compile( r"(.)*LOG\.(warning|info|debug|error|exception)\(") assert_True = re.compile( r".*assertEqual\(True, .*\)") class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ def __init__(self, tree, filename): """This object is created automatically by pep8. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pep8.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors for pep8.""" # Need to disable pylint check here as it doesn't catch CHECK_DESC # being defined in the subclasses. message = message or self.CHECK_DESC # pylint: disable=E1101 error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) def _check_call_names(self, call_node, names): if isinstance(call_node, ast.Call): if isinstance(call_node.func, ast.Name): if call_node.func.id in names: return True return False @core.flake8ext def no_translate_logs(logical_line, filename): """Check for 'LOG.*(_(' Starting with the Pike series, OpenStack no longer supports log translation. We shouldn't translate logs. - This check assumes that 'LOG' is a logger. - Use filename so we can start enforcing this in specific folders instead of needing to do so all at once. C312 """ if translated_log.match(logical_line): yield (0, "C312: Log messages should not be translated!") @core.flake8ext def no_mutable_default_args(logical_line): msg = "N322: Method's default argument shouldn't be mutable!" if mutable_default_args.match(logical_line): yield (0, msg) @core.flake8ext def check_explicit_underscore_import(logical_line, filename): """Check for explicit import of the _ function We need to ensure that any files that are using the _() function to translate messages are explicitly importing the _ function. We can't trust unit test to catch whether the import has been added so we need to check for it here. """ # Build a list of the files that have _ imported. No further # checking needed once it is found. for file in UNDERSCORE_IMPORT_FILES: if file in filename: return if (underscore_import_check.match(logical_line) or underscore_import_check_multi.match(logical_line) or custom_underscore_check.match(logical_line)): UNDERSCORE_IMPORT_FILES.append(filename) elif string_translation.match(logical_line): yield (0, "N323: Found use of _() without explicit import of _ !") class CheckLoggingFormatArgs(BaseASTChecker): """Check for improper use of logging format arguments. LOG.debug("Volume %s caught fire and is at %d degrees C and climbing.", ('volume1', 500)) The format arguments should not be a tuple as it is easy to miss. """ name = 'check_logging_format_args' version = '1.0' CHECK_DESC = 'C310 Log method arguments should not be a tuple.' LOG_METHODS = [ 'debug', 'info', 'warn', 'warning', 'error', 'exception', 'critical', 'fatal', 'trace', 'log' ] def _find_name(self, node): """Return the fully qualified name or a Name or Attribute.""" if isinstance(node, ast.Name): return node.id elif (isinstance(node, ast.Attribute) and isinstance(node.value, (ast.Name, ast.Attribute))): method_name = node.attr obj_name = self._find_name(node.value) if obj_name is None: return None return obj_name + '.' + method_name elif isinstance(node, str): return node else: # could be Subscript, Call or many more return None def visit_Call(self, node): """Look for the 'LOG.*' calls.""" # extract the obj_name and method_name if isinstance(node.func, ast.Attribute): obj_name = self._find_name(node.func.value) if isinstance(node.func.value, ast.Name): method_name = node.func.attr elif isinstance(node.func.value, ast.Attribute): obj_name = self._find_name(node.func.value) method_name = node.func.attr else: # could be Subscript, Call or many more return super(CheckLoggingFormatArgs, self).generic_visit(node) # obj must be a logger instance and method must be a log helper if (obj_name != 'LOG' or method_name not in self.LOG_METHODS): return super(CheckLoggingFormatArgs, self).generic_visit(node) # the call must have arguments if not len(node.args): return super(CheckLoggingFormatArgs, self).generic_visit(node) # any argument should not be a tuple for arg in node.args: if isinstance(arg, ast.Tuple): self.add_error(arg) return super(CheckLoggingFormatArgs, self).generic_visit(node) class CheckOptRegistrationArgs(BaseASTChecker): """Verifying the registration of options are well formed This class creates a check for single opt or list/tuple of opts when register_opt() or register_opts() are being called. """ name = 'check_opt_registrationg_args' version = '1.0' CHECK_DESC = ('C311: Arguments being passed to register_opt/register_opts ' 'must be a single option or list/tuple of options ' 'respectively. Options must also end with _opt or _opts ' 'respectively.') singular_method = 'register_opt' plural_method = 'register_opts' register_methods = [ singular_method, plural_method, ] def _find_name(self, node): """Return the fully qualified name or a Name or Attribute.""" if isinstance(node, ast.Name): return node.id elif (isinstance(node, ast.Attribute) and isinstance(node.value, (ast.Name, ast.Attribute))): method_name = node.attr obj_name = self._find_name(node.value) if obj_name is None: return None return obj_name + '.' + method_name elif isinstance(node, str): return node else: # could be Subscript, Call or many more return None def _is_list_or_tuple(self, obj): return isinstance(obj, (ast.List, ast.Tuple)) def visit_Call(self, node): """Look for the register_opt/register_opts calls.""" # extract the obj_name and method_name if isinstance(node.func, ast.Attribute): if not isinstance(node.func.value, ast.Name): return (super(CheckOptRegistrationArgs, self).generic_visit(node)) method_name = node.func.attr # obj must be instance of register_opt() or register_opts() if method_name not in self.register_methods: return (super(CheckOptRegistrationArgs, self).generic_visit(node)) if len(node.args) > 0: argument_name = self._find_name(node.args[0]) if argument_name: if (method_name == self.singular_method and not argument_name.lower().endswith('opt')): self.add_error(node.args[0]) elif (method_name == self.plural_method and not argument_name.lower().endswith('opts')): self.add_error(node.args[0]) else: # This covers instances of register_opt()/register_opts() # that are registering the objects directly and not # passing in a variable referencing the options being # registered. if (method_name == self.singular_method and self._is_list_or_tuple(node.args[0])): self.add_error(node.args[0]) elif (method_name == self.plural_method and not self._is_list_or_tuple(node.args[0])): self.add_error(node.args[0]) return super(CheckOptRegistrationArgs, self).generic_visit(node) @core.flake8ext def check_datetime_now(logical_line, noqa): if noqa: return msg = ("C301: Found datetime.now(). " "Please use timeutils.utcnow() from oslo_utils.") if 'datetime.now' in logical_line: yield (0, msg) @core.flake8ext def check_no_print_statements(logical_line, filename, noqa): # CLI and utils programs do need to use 'print()' so # we shouldn't check those files. if noqa: return if "cinder/cmd" in filename or "tools/" in filename: return if re.match(no_print_statements, logical_line): msg = ("C303: print() should not be used. " "Please use LOG.[info|error|warning|exception|debug]. " "If print() must be used, use '# noqa' to skip this check.") yield (0, msg) @core.flake8ext def dict_constructor_with_list_copy(logical_line): msg = ("N336: Must use a dict comprehension instead of a dict constructor " "with a sequence of key-value pairs.") if dict_constructor_with_list_copy_re.match(logical_line): yield (0, msg) @core.flake8ext def no_test_log(logical_line, filename, noqa): if ('cinder/tests' not in filename or noqa): return msg = "C309: Unit tests should not perform logging." if logging_instance.match(logical_line): yield (0, msg) @core.flake8ext def validate_assertTrue(logical_line, filename): # Note: a comparable check cannot be implemented for # assertFalse(), because assertFalse(None) passes. # Therefore, assertEqual(False, value) is required to # have the strongest test. if 'cinder/tests/unit' not in filename: return if re.match(assert_True, logical_line): msg = ("C313: Unit tests should use assertTrue(value) instead" " of using assertEqual(True, value).") yield (0, msg) third_party_mock = re.compile("^import.mock") from_third_party_mock = re.compile("^from.mock.import") @core.flake8ext def no_third_party_mock(logical_line): # We should only use unittest.mock, not the third party mock library that # was needed for py2 support. if (re.match(third_party_mock, logical_line) or re.match(from_third_party_mock, logical_line)): msg = ('C337: Unit tests should use the standard library "mock" ' 'module, not the third party mock lib.') yield (0, msg) @core.flake8ext def no_log_warn(logical_line): """Disallow 'LOG.warn(' Use LOG.warning() instead of Deprecated LOG.warn(). https://docs.python.org/3/library/logging.html#logging.warning """ msg = ("C338: LOG.warn is deprecated, please use LOG.warning!") if "LOG.warn(" in logical_line: yield (0, msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8391159 cinder-27.0.0/cinder/tests/stubs/0000775000175000017500000000000000000000000016635 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.155119 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/0000775000175000017500000000000000000000000020450 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/__init__.pyi0000664000175000017500000000032400000000000022731 0ustar00zuulzuul00000000000000# Stubs for oslo_i18n (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. from ._factory import * from ._gettextutils import * from ._lazy import * from ._translate import * ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/_factory.pyi0000664000175000017500000000117700000000000023007 0ustar00zuulzuul00000000000000# Stubs for oslo_i18n._factory (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. from typing import Any, Optional class TranslatorFactory: domain: Any = ... localedir: Any = ... def __init__(self, domain: Any, localedir: Optional[Any] = ...) -> None: ... @property def primary(self) -> ty.Callable[str]: ... @property def contextual_form(self): ... @property def plural_form(self): ... @property def log_info(self): ... @property def log_warning(self): ... @property def log_error(self): ... @property def log_critical(self): ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/_gettextutils.pyi0000664000175000017500000000035200000000000024077 0ustar00zuulzuul00000000000000# Stubs for oslo_i18n._gettextutils (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. from typing import Any def install(domain: Any) -> None: ... def get_available_languages(domain: Any): ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/_i18n.pyi0000664000175000017500000000016600000000000022114 0ustar00zuulzuul00000000000000# Stubs for oslo_i18n._i18n (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/_lazy.pyi0000664000175000017500000000024700000000000022314 0ustar00zuulzuul00000000000000# Stubs for oslo_i18n._lazy (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. def enable_lazy(enable: bool = ...) -> None: ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/_locale.pyi0000664000175000017500000000030300000000000022565 0ustar00zuulzuul00000000000000# Stubs for oslo_i18n._locale (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. from typing import Any def get_locale_dir_variable_name(domain: Any): ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/_message.pyi0000664000175000017500000000112000000000000022750 0ustar00zuulzuul00000000000000# Stubs for oslo_i18n._message (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. from typing import Any, Optional CONTEXT_SEPARATOR: str LOG: Any class Message(str): def __new__(cls, msgid: Any, msgtext: Optional[Any] = ..., params: Optional[Any] = ..., domain: str = ..., has_contextual_form: bool = ..., has_plural_form: bool = ..., *args: Any): ... def translation(self, desired_locale: Optional[Any] = ...): ... def __mod__(self, other: Any): ... def __add__(self, other: Any) -> None: ... def __radd__(self, other: Any): ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/_translate.pyi0000664000175000017500000000033700000000000023332 0ustar00zuulzuul00000000000000# Stubs for oslo_i18n._translate (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. from typing import Any, Optional def translate(obj: Any, desired_locale: Optional[Any] = ...): ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/fixture.pyi0000664000175000017500000000166200000000000022666 0ustar00zuulzuul00000000000000# Stubs for oslo_i18n.fixture (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. import fixtures import gettext from typing import Any, Optional class Translation(fixtures.Fixture): domain: Any = ... def __init__(self, domain: str = ...) -> None: ... def lazy(self, msg: Any): ... def immediate(self, msg: Any): ... class ToggleLazy(fixtures.Fixture): def __init__(self, enabled: Any) -> None: ... def setUp(self) -> None: ... class _PrefixTranslator(gettext.NullTranslations): prefix: Any = ... def __init__(self, fp: Optional[Any] = ..., prefix: str = ...) -> None: ... def gettext(self, message: Any): ... def ugettext(self, message: Any): ... class PrefixLazyTranslation(fixtures.Fixture): languages: Any = ... locale: Any = ... def __init__(self, languages: Optional[Any] = ..., locale: Optional[Any] = ...) -> None: ... def setUp(self): ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/stubs/oslo_i18n/log.pyi0000664000175000017500000000067000000000000021757 0ustar00zuulzuul00000000000000# Stubs for oslo_i18n.log (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. from logging import handlers from typing import Any, Optional class TranslationHandler(handlers.MemoryHandler): locale: Any = ... def __init__(self, locale: Optional[Any] = ..., target: Optional[Any] = ...) -> None: ... def setFormatter(self, fmt: Any) -> None: ... def emit(self, record: Any) -> None: ... ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.167119 cinder-27.0.0/cinder/tests/unit/0000775000175000017500000000000000000000000016454 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/__init__.py0000664000175000017500000000555100000000000020573 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`cinder.tests.unit` -- Cinder Unittests ===================================================== .. automodule:: cinder.tests.unit :platform: Unix """ import os import sys import eventlet # Monkey patching must go before the oslo.log import, otherwise # oslo.context will not use greenthread thread local and all greenthreads # will share the same context. if os.name == 'nt': # eventlet monkey patching the os module causes subprocess.Popen to fail # on Windows when using pipes due to missing non-blocking IO support. eventlet.monkey_patch(os=False) else: eventlet.monkey_patch() # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading # pylint: disable=E0401 import threading # noqa orig_threading.current_thread.__globals__['_active'] = threading._active from oslo_config import cfg from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from oslo_service import loopingcall from cinder import objects from cinder.tests.unit import utils as test_utils from cinder import version CONF = cfg.CONF # NOTE(alaski): Make sure this is done after eventlet monkey patching otherwise # the threading.local() store used in oslo_messaging will be initialized to # threadlocal storage rather than greenthread local. This will cause context # sets and deletes in that storage to clobber each other. # NOTE(comstud): Make sure we have all of the objects loaded. We do this # at module import time, because we may be using mock decorators in our # tests that run at import time. objects.register_all() gmr_opts.set_defaults(CONF) gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) # Keep track of looping calls looping_call_tracker = test_utils.InstanceTracker(loopingcall.LoopingCallBase) def stop_looping_calls(): for loop in looping_call_tracker.instances: try: loop.stop() except Exception: sys.stderr.write(f'Error stopping loop call {loop}\n') looping_call_tracker.clear() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.171119 cinder-27.0.0/cinder/tests/unit/api/0000775000175000017500000000000000000000000017225 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/__init__.py0000664000175000017500000000000000000000000021324 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.179119 cinder-27.0.0/cinder/tests/unit/api/contrib/0000775000175000017500000000000000000000000020665 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/__init__.py0000664000175000017500000000000000000000000022764 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_admin_actions.py0000664000175000017500000021373400000000000025120 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import ddt import fixtures from oslo_concurrency import lockutils from oslo_config import fixture as config_fixture import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import timeutils import webob from cinder.api.contrib import admin_actions from cinder.api import microversions as mv from cinder.backup import api as backup_api from cinder.backup import rpcapi as backup_rpcapi from cinder.common import constants from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import base as obj_base from cinder.objects import fields from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3 import fakes as v3_fakes from cinder.tests.unit import cast_as_call from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume import api as volume_api from cinder.volume import rpcapi from cinder.volume import volume_types def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper class BaseAdminTest(test.TestCase): def setUp(self): super(BaseAdminTest, self).setUp() self.volume_api = volume_api.API() # admin context self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) def _create_volume(self, context, updates=None): db_volume = {'status': 'available', 'host': 'test', 'binary': constants.VOLUME_BINARY, 'availability_zone': 'fake_zone', 'attach_status': fields.VolumeAttachStatus.DETACHED, 'volume_type_id': volume_types.get_default_volume_type()['id']} if updates: db_volume.update(updates) volume = objects.Volume(context=context, **db_volume) volume.create() return volume @ddt.ddt class AdminActionsTest(BaseAdminTest): def setUp(self): super(AdminActionsTest, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=self.tempdir, group='oslo_concurrency') self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(transport_url='fake:/') cast_as_call.mock_cast_as_call(self.volume_api.volume_rpcapi.client) cast_as_call.mock_cast_as_call(self.volume_api.scheduler_rpcapi.client) # start service to handle rpc messages for attach requests self.svc = self.start_service('volume', host='test') self.patch( 'cinder.objects.Service.get_minimum_obj_version', return_value=obj_base.OBJ_VERSIONS.get_current()) def _get_minimum_rpc_version_mock(ctxt, binary): binary_map = { constants.VOLUME_BINARY: rpcapi.VolumeAPI, constants.BACKUP_BINARY: backup_rpcapi.BackupAPI, constants.SCHEDULER_BINARY: scheduler_rpcapi.SchedulerAPI, } return binary_map[binary].RPC_API_VERSION self.patch('cinder.objects.Service.get_minimum_rpc_version', side_effect=_get_minimum_rpc_version_mock) self.controller = admin_actions.VolumeAdminController() def tearDown(self): self.svc.stop() super(AdminActionsTest, self).tearDown() def _issue_resource_reset(self, ctx, name, id, status): req = webob.Request.blank('/v3/%s/%s/%s/action' % ( fake.PROJECT_ID, name, id)) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-reset_status': status}) req.environ['cinder.context'] = ctx resp = req.get_response(app()) return resp def _issue_volume_reset(self, ctx, volume, updated_status): return self._issue_resource_reset(ctx, 'volumes', volume['id'], updated_status) def _issue_snapshot_reset(self, ctx, snapshot, updated_status): return self._issue_resource_reset(ctx, 'snapshots', snapshot.id, updated_status) def _issue_backup_reset(self, ctx, backup, updated_status): self.mock_object(backup_api.API, '_get_available_backup_service_host', return_value='testhost') return self._issue_resource_reset(ctx, 'backups', backup['id'], updated_status) @ddt.data({'os-reset_status': {'status': 'creating'}}, {'os-reset_status': {'status': 'available'}}, {'os-reset_status': {'status': 'deleting'}}, {'os-reset_status': {'status': 'error'}}, {'os-reset_status': {'status': 'error_deleting'}}, {'os-reset_status': {'attach_status': fields.VolumeAttachStatus.DETACHED}}, {'os-reset_status': {'attach_status': fields.VolumeAttachStatus.ATTACHED}}, {'os-reset_status': {'migration_status': 'migrating'}}, {'os-reset_status': {'migration_status': 'completing'}}, {'os-reset_status': {'migration_status': 'error'}}, {'os-reset_status': {'migration_status': 'none'}}, {'os-reset_status': {'migration_status': 'starting'}}) def test_valid_updates(self, body): req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, id)) req.method = 'POST' req.headers['content-type'] = 'application/json' req.environ['cinder.context'] = self.ctx req.api_version_request = mv.get_api_version(mv.BASE_VERSION) vac = self.controller vac.validate_update(req, body=body) @ddt.data({'os-reset_status': {'status': None}}, {'os-reset_status': {'attach_status': None}}, {'os-reset_status': {'migration_status': None}}, {'os-reset_status': {'status': "", 'attach_status': "", "migration_status": ""}}) def test_invalid_updates(self, body): req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, id)) req.method = 'POST' req.headers['content-type'] = 'application/json' req.environ['cinder.context'] = self.ctx req.api_version_request = mv.get_api_version(mv.BASE_VERSION) vac = self.controller self.assertRaises(exception.InvalidParameterValue, vac.validate_update, req, body=body) def test_reset_attach_status(self): volume = db.volume_create(self.ctx, {'attach_status': fields.VolumeAttachStatus.DETACHED, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'attach_status': fields.VolumeAttachStatus.ATTACHED}) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, volume['attach_status']) def test_reset_attach_invalid_status(self): volume = db.volume_create(self.ctx, {'attach_status': fields.VolumeAttachStatus.DETACHED, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'attach_status': 'bogus-status'}) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual(fields.VolumeAttachStatus.DETACHED, volume['attach_status']) def test_reset_detached_status_to_attached(self): volume = db.volume_create(self.ctx, {'status': 'available', 'attach_status': fields.VolumeAttachStatus.DETACHED, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'attach_status': fields.VolumeAttachStatus.ATTACHED}) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, volume['attach_status']) def test_reset_attached_status_to_attached(self): volume = db.volume_create(self.ctx, {'status': 'available', 'attach_status': fields.VolumeAttachStatus.ATTACHED, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'attach_status': fields.VolumeAttachStatus.ATTACHED}) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, volume['attach_status']) def test_reset_in_use_to_in_use_fail(self): volume = db.volume_create(self.ctx, {'status': 'in-use', 'attach_status': fields.VolumeAttachStatus.ATTACHED, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'status': 'in-use'}) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) def test_reset_available_to_in_use_on_nonattached_volume_fail(self): volume = db.volume_create(self.ctx, {'status': 'available', 'attach_status': fields.VolumeAttachStatus.DETACHED, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'status': 'in-use'}) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) @mock.patch('cinder.db.volume_attachment_get_all_by_volume_id') def test_reset_available_to_in_use_on_attached_volume( self, get_attachment): volume = db.volume_create(self.ctx, {'status': 'available', 'attach_status': fields.VolumeAttachStatus.ATTACHED, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'status': 'in-use'}) db_attachment = fake_volume.volume_attachment_db_obj() get_attachment.return_value = [db_attachment] self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, volume['attach_status']) self.assertEqual('in-use', volume['status']) def test_reset_migration_invalid_status(self): volume = db.volume_create(self.ctx, {'migration_status': None, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'migration_status': 'bogus-status'}) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertIsNone(volume['migration_status']) def test_reset_migration_status(self): volume = db.volume_create(self.ctx, {'migration_status': None, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'migration_status': 'migrating'}) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('migrating', volume['migration_status']) def test_reset_status_as_admin(self): volume = db.volume_create(self.ctx, {'status': 'available', 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'status': 'error'}) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('error', volume['status']) @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_reset_status_as_non_admin(self, fake_get): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) volume = db.volume_create(self.ctx, {'status': 'error', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) fake_get.return_value = volume resp = self._issue_volume_reset(ctx, volume, {'status': 'error'}) # request is not authorized self.assertEqual(HTTPStatus.FORBIDDEN, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) # status is still 'error' self.assertEqual('error', volume['status']) def test_backup_reset_status_as_admin(self): volume = db.volume_create(self.ctx, {'status': 'available', 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) backup = db.backup_create(self.ctx, {'status': fields.BackupStatus.AVAILABLE, 'size': 1, 'volume_id': volume['id'], 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'test'}) resp = self._issue_backup_reset(self.ctx, backup, {'status': fields.BackupStatus.ERROR}) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) def test_backup_reset_status_as_non_admin(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) backup = test_utils.create_backup(ctx, status='available') resp = self._issue_backup_reset(ctx, backup, {'status': fields.BackupStatus.ERROR}) # request is not authorized self.assertEqual(HTTPStatus.FORBIDDEN, resp.status_int) def test_backup_reset_status(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) backup = db.backup_create(self.ctx, {'status': fields.BackupStatus.AVAILABLE, 'volume_id': volume['id'], 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'test'}) resp = self._issue_backup_reset(self.ctx, backup, {'status': fields.BackupStatus.ERROR}) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) @ddt.data({'status': None}, {'status': 'restoring'}) def test_invalid_status_for_backup(self, status): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) backup = db.backup_create(self.ctx, {'status': 'available', 'volume_id': volume['id']}) resp = self._issue_backup_reset(self.ctx, backup, status) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) def test_backup_reset_status_with_invalid_backup(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) backup = db.backup_create(self.ctx, {'status': fields.BackupStatus.AVAILABLE, 'volume_id': volume['id'], 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID}) backup['id'] = fake.BACKUP_ID resp = self._issue_backup_reset(self.ctx, backup, {'status': fields.BackupStatus.ERROR}) # Should raise 404 if backup doesn't exist. self.assertEqual(HTTPStatus.NOT_FOUND, resp.status_int) @ddt.data({'os-reset_status': {}}) def test_backup_reset_status_with_invalid_body(self, body): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) backup = db.backup_create(self.ctx, {'status': fields.BackupStatus.AVAILABLE, 'volume_id': volume['id'], 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID}) req = webob.Request.blank('/v3/%s/%s/%s/action' % ( fake.PROJECT_ID, 'backups', backup['id'])) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) def test_malformed_reset_status_body(self): volume = db.volume_create(self.ctx, {'status': 'available', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'x-status': 'bad'}) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('available', volume['status']) def test_invalid_status_for_volume(self): volume = db.volume_create(self.ctx, {'status': 'available', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset(self.ctx, volume, {'status': 'invalid'}) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('available', volume['status']) def test_reset_status_for_missing_volume(self): req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-reset_status': {'status': 'available'}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) self.assertEqual(HTTPStatus.NOT_FOUND, resp.status_int) self.assertRaises(exception.NotFound, db.volume_get, self.ctx, fake.WILL_NOT_BE_FOUND_ID) def test_reset_attached_status(self): # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) self.volume_api.reserve_volume(self.ctx, volume) mountpoint = '/dev/vdb' attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') # volume is attached volume = db.volume_get(self.ctx.elevated(), volume['id']) attachment = db.volume_attachment_get(self.ctx, attachment['id']) self.assertEqual('in-use', volume['status']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, volume['attach_status']) self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual('attached', attachment['attach_status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) self.assertEqual('attached_mode', admin_metadata[1]['key']) self.assertEqual('rw', admin_metadata[1]['value']) # Reset attach_status resp = self._issue_volume_reset( self.ctx, volume, {'status': 'available', 'attach_status': fields.VolumeAttachStatus.DETACHED}) # request is accepted self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) # volume is detached volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('detached', volume['attach_status']) self.assertEqual('available', volume['status']) admin_metadata = volume['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctx, attachment['id']) def test_invalid_reset_attached_status(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'attach_status': fields.VolumeAttachStatus.DETACHED, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = self._issue_volume_reset( self.ctx, volume, {'status': 'available', 'attach_status': fields.VolumeAttachStatus.ERROR_DETACHING}) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) self.assertEqual('available', volume['status']) self.assertEqual(fields.VolumeAttachStatus.DETACHED, volume['attach_status']) def test_snapshot_reset_status(self): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'availability_zone': 'test', 'attach_status': fields.VolumeAttachStatus.DETACHED, 'volume_type_id': fake.VOLUME_TYPE_ID}) kwargs = { 'volume_id': volume['id'], 'cgsnapshot_id': None, 'user_id': self.ctx.user_id, 'project_id': self.ctx.project_id, 'status': fields.SnapshotStatus.ERROR_DELETING, 'progress': '0%', 'volume_size': volume['size'], 'metadata': {} } snapshot = objects.Snapshot(context=self.ctx, **kwargs) snapshot.create() self.addCleanup(snapshot.destroy) resp = self._issue_snapshot_reset(self.ctx, snapshot, {'status': fields.SnapshotStatus.ERROR}) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) snapshot = objects.Snapshot.get_by_id(self.ctx, snapshot['id']) self.assertEqual(fields.SnapshotStatus.ERROR, snapshot.status) @ddt.data({'status': None}, {'status': 'attaching'}) def test_invalid_status_for_snapshot(self, updated_status): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) snapshot = objects.Snapshot(self.ctx, status=fields.SnapshotStatus.AVAILABLE, volume_id=volume['id']) snapshot.create() self.addCleanup(snapshot.destroy) resp = self._issue_snapshot_reset(self.ctx, snapshot, updated_status) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status) @ddt.data({'os-reset_status': {}}) def test_snapshot_reset_status_with_invalid_body(self, body): volume = db.volume_create(self.ctx, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) snapshot = objects.Snapshot(self.ctx, status=fields.SnapshotStatus.AVAILABLE, volume_id=volume['id'], volume_tpe_id=volume['volume_type_id']) snapshot.create() self.addCleanup(snapshot.destroy) req = webob.Request.blank('/v3/%s/%s/%s/action' % ( fake.PROJECT_ID, 'snapshots', snapshot['id'])) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) def test_force_delete(self): # current status is creating volume = self._create_volume(self.ctx, {'size': 1, 'host': None}) req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume['id'])) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) # attach admin context to request req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) # request is accepted self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) # volume is deleted self.assertRaises(exception.NotFound, objects.Volume.get_by_id, self.ctx, volume.id) @mock.patch.object(volume_api.API, 'delete_snapshot', return_value=True) @mock.patch('cinder.objects.Snapshot.get_by_id') @mock.patch.object(db, 'snapshot_get') @mock.patch.object(db, 'volume_get') def test_force_delete_snapshot(self, volume_get, snapshot_get, get_by_id, delete_snapshot): volume = v3_fakes.create_volume(fake.VOLUME_ID) snapshot = v3_fakes.fake_snapshot(fake.SNAPSHOT_ID) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) volume_get.return_value = volume snapshot_get.return_value = snapshot get_by_id.return_value = snapshot_obj path = '/v3/%s/snapshots/%s/action' % ( fake.PROJECT_ID, snapshot['id']) req = webob.Request.blank(path) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) # attach admin context to request req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) def _migrate_volume_prep(self): # create volume's current host and the destination host db.service_create(self.ctx, {'host': 'test', 'topic': constants.VOLUME_TOPIC, 'binary': constants.VOLUME_BINARY, 'created_at': timeutils.utcnow()}) db.service_create(self.ctx, {'host': 'test2', 'topic': constants.VOLUME_TOPIC, 'binary': constants.VOLUME_BINARY, 'created_at': timeutils.utcnow()}) db.service_create(self.ctx, {'host': 'clustered_host', 'topic': constants.VOLUME_TOPIC, 'binary': constants.VOLUME_BINARY, 'cluster_name': 'cluster', 'created_at': timeutils.utcnow()}) db.cluster_create(self.ctx, {'name': 'cluster', 'binary': constants.VOLUME_BINARY}) # current status is available volume = self._create_volume(self.ctx) return volume def _migrate_volume_3_exec(self, ctx, volume, host, expected_status, force_host_copy=False, version=None, cluster=None): # build request to migrate to host req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume['id'])) req.method = 'POST' body = {'os-migrate_volume': {'host': host, 'force_host_copy': force_host_copy}} version = version or mv.BASE_VERSION req.headers = mv.get_mv_header(version) req.headers['Content-Type'] = 'application/json' req.api_version_request = mv.get_api_version(version) if version == mv.VOLUME_MIGRATE_CLUSTER: body['os-migrate_volume']['cluster'] = cluster req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = ctx resp = req.get_response(app()) # verify status self.assertEqual(expected_status, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) return volume @ddt.data(mv.BASE_VERSION, mv.get_prior_version(mv.VOLUME_MIGRATE_CLUSTER), mv.VOLUME_MIGRATE_CLUSTER) def test_migrate_volume_success_3(self, version): expected_status = HTTPStatus.ACCEPTED host = 'test2' volume = self._migrate_volume_prep() volume = self._migrate_volume_3_exec(self.ctx, volume, host, expected_status, version=version) self.assertEqual('starting', volume['migration_status']) def test_migrate_volume_success_cluster(self): expected_status = HTTPStatus.ACCEPTED # We cannot provide host and cluster, so send host to None host = None cluster = 'cluster' volume = self._migrate_volume_prep() volume = self._migrate_volume_3_exec(self.ctx, volume, host, expected_status, version=mv.VOLUME_MIGRATE_CLUSTER, cluster=cluster) self.assertEqual('starting', volume['migration_status']) def test_migrate_volume_fail_host_and_cluster(self): # We cannot send host and cluster in the request host = 'test2' cluster = 'cluster' volume = self._migrate_volume_prep() expected_status = HTTPStatus.BAD_REQUEST self._migrate_volume_3_exec(self.ctx, volume, host, expected_status, version=mv.VOLUME_MIGRATE_CLUSTER, cluster=cluster) def _migrate_volume_exec(self, ctx, volume, host, expected_status, force_host_copy=False, lock_volume=False): # build request to migrate to host req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume['id'])) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-migrate_volume': {'host': host, 'force_host_copy': force_host_copy, 'lock_volume': lock_volume}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = ctx resp = req.get_response(app()) # verify status self.assertEqual(expected_status, resp.status_int) volume = db.volume_get(self.ctx, volume['id']) return volume def test_migrate_volume_success(self): expected_status = HTTPStatus.ACCEPTED host = 'test2' volume = self._migrate_volume_prep() volume = self._migrate_volume_exec(self.ctx, volume, host, expected_status) self.assertEqual('starting', volume['migration_status']) def test_migrate_volume_fail_replication(self): expected_status = HTTPStatus.BAD_REQUEST host = 'test2' volume = self._migrate_volume_prep() # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'attach_status': None, 'replication_status': 'active'}) volume = self._migrate_volume_exec(self.ctx, volume, host, expected_status) def test_migrate_volume_replication_not_caple_success(self): expected_status = HTTPStatus.ACCEPTED host = 'test2' volume = self._migrate_volume_prep() # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'attach_status': None, 'replication_status': 'not-capable'}) volume = self._migrate_volume_exec(self.ctx, volume, host, expected_status) @mock.patch("cinder.volume.api.API.get") def test_migrate_volume_as_non_admin(self, fake_get): expected_status = HTTPStatus.FORBIDDEN host = 'test2' ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) volume = self._migrate_volume_prep() fake_get.return_value = volume self._migrate_volume_exec(ctx, volume, host, expected_status) def test_migrate_volume_without_host_parameter(self): expected_status = HTTPStatus.BAD_REQUEST host = 'test3' volume = self._migrate_volume_prep() # build request to migrate without host req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume['id'])) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-migrate_volume': {'host': host, 'force_host_copy': False}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) # verify status self.assertEqual(expected_status, resp.status_int) def test_migrate_volume_host_no_exist(self): expected_status = HTTPStatus.BAD_REQUEST host = 'test3' volume = self._migrate_volume_prep() self._migrate_volume_exec(self.ctx, volume, host, expected_status) def test_migrate_volume_same_host(self): expected_status = HTTPStatus.BAD_REQUEST host = 'test' volume = self._migrate_volume_prep() self._migrate_volume_exec(self.ctx, volume, host, expected_status) def test_migrate_volume_migrating(self): expected_status = HTTPStatus.BAD_REQUEST host = 'test2' volume = self._migrate_volume_prep() volume.migration_status = 'migrating' volume.save() self._migrate_volume_exec(self.ctx, volume, host, expected_status) def test_migrate_volume_with_snap(self): expected_status = HTTPStatus.BAD_REQUEST host = 'test2' volume = self._migrate_volume_prep() snap = objects.Snapshot(self.ctx, volume_id=volume['id']) snap.create() self.addCleanup(snap.destroy) self._migrate_volume_exec(self.ctx, volume, host, expected_status) @ddt.data('force_host_copy', None, ' true ', 0) def test_migrate_volume_bad_force_host_copy(self, force_host_copy): expected_status = HTTPStatus.BAD_REQUEST host = 'test2' volume = self._migrate_volume_prep() self._migrate_volume_exec(self.ctx, volume, host, expected_status, force_host_copy=force_host_copy) @ddt.data('lock_volume', None, ' true ', 0) def test_migrate_volume_bad_lock_volume(self, lock_volume): expected_status = HTTPStatus.BAD_REQUEST host = 'test2' volume = self._migrate_volume_prep() self._migrate_volume_exec(self.ctx, volume, host, expected_status, lock_volume=lock_volume) @ddt.data('true', False, '1', '0') def test_migrate_volume_valid_lock_volume(self, lock_volume): expected_status = HTTPStatus.ACCEPTED host = 'test2' volume = self._migrate_volume_prep() self._migrate_volume_exec(self.ctx, volume, host, expected_status, lock_volume=lock_volume) def _migrate_volume_comp_exec(self, ctx, volume, new_volume, error, expected_status, expected_id, no_body=False): req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume['id'])) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'new_volume': new_volume['id'], 'error': error} if no_body: body = {'': body} else: body = {'os-migrate_volume_completion': body} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = ctx resp = req.get_response(app()) resp_dict = resp.json # verify status self.assertEqual(expected_status, resp.status_int) if expected_id: self.assertEqual(expected_id, resp_dict['save_volume_id']) else: self.assertNotIn('save_volume_id', resp_dict) @mock.patch("cinder.volume.api.API.get") def test_migrate_volume_comp_as_non_admin(self, fake_get): volume = db.volume_create(self.ctx, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) new_volume = db.volume_create(self.ctx, {'id': fake.VOLUME2_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) expected_status = HTTPStatus.FORBIDDEN expected_id = None fake_get.return_value = volume ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id) def test_migrate_volume_comp_no_mig_status(self): volume1 = self._create_volume(self.ctx, {'migration_status': 'foo'}) volume2 = self._create_volume(self.ctx, {'migration_status': None}) expected_status = HTTPStatus.BAD_REQUEST expected_id = None self._migrate_volume_comp_exec(self.ctx, volume1, volume2, False, expected_status, expected_id) self._migrate_volume_comp_exec(self.ctx, volume2, volume1, False, expected_status, expected_id) def test_migrate_volume_comp_bad_mig_status(self): volume1 = self._create_volume(self.ctx, {'migration_status': 'migrating'}) volume2 = self._create_volume(self.ctx, {'migration_status': 'target:foo'}) expected_status = HTTPStatus.BAD_REQUEST expected_id = None self._migrate_volume_comp_exec(self.ctx, volume1, volume2, False, expected_status, expected_id) def test_migrate_volume_comp_no_action(self): volume = db.volume_create(self.ctx, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) new_volume = db.volume_create(self.ctx, {'id': fake.VOLUME2_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) expected_status = HTTPStatus.BAD_REQUEST expected_id = None ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) self._migrate_volume_comp_exec(ctx, volume, new_volume, False, expected_status, expected_id, True) def test_migrate_volume_comp_from_nova(self): volume = self._create_volume(self.ctx, {'status': 'in-use', 'migration_status': None, 'attach_status': fields.VolumeAttachStatus. ATTACHED}) new_volume = self._create_volume(self.ctx, {'migration_status': None, 'attach_status': fields.VolumeAttachStatus. DETACHED}) expected_status = HTTPStatus.OK expected_id = new_volume.id self._migrate_volume_comp_exec(self.ctx, volume, new_volume, False, expected_status, expected_id) def test_migrate_volume_comp_no_new_volume(self): volume = db.volume_create(self.ctx, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume['id'])) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-migrate_volume_completion': {'error': False}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = self.ctx resp = req.get_response(app()) res_dict = jsonutils.loads(resp.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) @mock.patch('cinder.backup.rpcapi.BackupAPI.delete_backup', mock.Mock()) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.backup.api.API._check_support_to_force_delete') def _force_delete_backup_util(self, test_status, mock_check_support, mock_service_get_all): mock_service_get_all.return_value = [ {'availability_zone': "az1", 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] # admin context mock_check_support.return_value = True # current status is dependent on argument: test_status. backup = test_utils.create_backup(self.ctx, status=test_status, size=1, availability_zone='az1', host='testhost') req = webob.Request.blank('/v3/%s/backups/%s/action' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) req.environ['cinder.context'] = self.ctx res = req.get_response(app()) backup.refresh() self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual('deleting', backup.status) backup.destroy() def test_delete_backup_force_when_creating(self): self._force_delete_backup_util('creating') def test_delete_backup_force_when_deleting(self): self._force_delete_backup_util('deleting') def test_delete_backup_force_when_restoring(self): self._force_delete_backup_util('restoring') def test_delete_backup_force_when_available(self): self._force_delete_backup_util('available') def test_delete_backup_force_when_error(self): self._force_delete_backup_util('error') def test_delete_backup_force_when_error_deleting(self): self._force_delete_backup_util('error_deleting') @mock.patch('cinder.backup.rpcapi.BackupAPI.check_support_to_force_delete', return_value=False) def test_delete_backup_force_when_not_supported(self, mock_check_support): # admin context self.override_config('backup_driver', 'cinder.backup.drivers.ceph') backup = test_utils.create_backup(self.ctx, size=1) req = webob.Request.blank('/v3/%s/backups/%s/action' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-force_delete': {}}) req.environ['cinder.context'] = self.ctx res = req.get_response(app()) self.assertEqual(HTTPStatus.METHOD_NOT_ALLOWED, res.status_int) def _extend_volume_comp_exec(self, ctx, volume, error, expected_status): req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume['id'])) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-extend_volume_completion': {'error': error}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = ctx resp = req.get_response(app()) # verify status self.assertEqual(expected_status, resp.status_int) def test_extend_volume_comp_accepted_success(self): volume = self._create_volume( self.ctx, {'size': 1, 'status': 'extending', 'admin_metadata': { 'extend_new_size': '2', 'extend_reservations': '["563e9e70-5f46-4265-9a92-f7bca28d896c"]' }}) expected_status = HTTPStatus.ACCEPTED self._extend_volume_comp_exec(self.ctx, volume, False, expected_status) def test_extend_volume_comp_accepted_failure(self): volume = self._create_volume( self.ctx, {'size': 1, 'status': 'extending', 'admin_metadata': { 'extend_new_size': '2', 'extend_reservations': '["563e9e70-5f46-4265-9a92-f7bca28d896c"]' }}) expected_status = HTTPStatus.ACCEPTED self._extend_volume_comp_exec(self.ctx, volume, True, expected_status) def test_extend_volume_comp_wrong_status(self): volume = self._create_volume( self.ctx, {'size': 1, 'status': 'in-use', 'admin_metadata': { 'extend_new_size': '2', 'extend_reservations': '["563e9e70-5f46-4265-9a92-f7bca28d896c"]' }}) expected_status = HTTPStatus.BAD_REQUEST self._extend_volume_comp_exec(self.ctx, volume, False, expected_status) def test_extend_volume_comp_missing_metadata(self): volume = self._create_volume( self.ctx, {'size': 1, 'status': 'extending'}) expected_status = HTTPStatus.BAD_REQUEST self._extend_volume_comp_exec(self.ctx, volume, False, expected_status) def test_extend_volume_comp_wrong_size(self): volume = self._create_volume( self.ctx, {'size': 2, 'status': 'extending', 'admin_metadata': { 'extend_new_size': '1', 'extend_reservations': '["563e9e70-5f46-4265-9a92-f7bca28d896c"]' }}) expected_status = HTTPStatus.BAD_REQUEST self._extend_volume_comp_exec(self.ctx, volume, False, expected_status) class AdminActionsAttachDetachTest(BaseAdminTest): def setUp(self): super(AdminActionsAttachDetachTest, self).setUp() # start service to handle rpc messages for attach requests self.svc = self.start_service('volume', host='test') self.mock_deletion_allowed = self.mock_object( volume_api.API, 'attachment_deletion_allowed', return_value=None) def tearDown(self): self.svc.stop() super(AdminActionsAttachDetachTest, self).tearDown() def test_force_detach_instance_attached_volume(self): # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') # volume is attached volume.refresh() self.assertEqual('in-use', volume.status) self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) admin_metadata = volume.admin_metadata self.assertEqual(2, len(admin_metadata)) self.assertEqual('False', admin_metadata['readonly']) self.assertEqual('rw', admin_metadata['attached_mode']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # build request to force detach req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' body = {'os-force_detach': {'attachment_id': attachment['id'], 'connector': connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) volume.refresh() self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctx, attachment['id']) # status changed to 'available' self.assertEqual('available', volume.status) admin_metadata = volume.admin_metadata self.assertEqual(1, len(admin_metadata)) self.assertEqual('False', admin_metadata['readonly']) # One call is for the terminate_connection and the other is for the # detach self.assertEqual(2, self.mock_deletion_allowed.call_count) self.mock_deletion_allowed.assert_has_calls( [mock.call(self.ctx, None, mock.ANY), mock.call(self.ctx, attachment['id'], mock.ANY)]) for i in (0, 1): self.assertIsInstance( self.mock_deletion_allowed.call_args_list[i][0][2], objects.Volume) def test_force_detach_host_attached_volume(self): # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.initialize_connection(self.ctx, volume, connector) mountpoint = '/dev/vbd' host_name = 'fake-host' attachment = self.volume_api.attach(self.ctx, volume, None, host_name, mountpoint, 'ro') # volume is attached volume.refresh() self.assertEqual('in-use', volume.status) self.assertIsNone(attachment['instance_uuid']) self.assertEqual(host_name, attachment['attached_host']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) admin_metadata = volume.admin_metadata self.assertEqual(2, len(admin_metadata)) self.assertEqual('False', admin_metadata['readonly']) self.assertEqual('ro', admin_metadata['attached_mode']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) # build request to force detach req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' # request status of 'error' body = {'os-force_detach': {'attachment_id': attachment['id'], 'connector': connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request resp = req.get_response(app()) # request is accepted self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) volume.refresh() self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctx, attachment['id']) # status changed to 'available' self.assertEqual('available', volume['status']) admin_metadata = volume['admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('False', admin_metadata['readonly']) # One call is for the terminate_connection and the other is for the # detach self.assertEqual(2, self.mock_deletion_allowed.call_count) self.mock_deletion_allowed.assert_has_calls( [mock.call(self.ctx, None, mock.ANY), mock.call(self.ctx, attachment['id'], mock.ANY)]) for i in (0, 1): self.assertIsInstance( self.mock_deletion_allowed.call_args_list[i][0][2], objects.Volume) def test_volume_force_detach_raises_remote_error(self): # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') # volume is attached volume.refresh() self.assertEqual('in-use', volume.status) self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) admin_metadata = volume.admin_metadata self.assertEqual(2, len(admin_metadata)) self.assertEqual('False', admin_metadata['readonly']) self.assertEqual('rw', admin_metadata['attached_mode']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # build request to force detach volume_remote_error = \ messaging.RemoteError(exc_type='VolumeAttachmentNotFound') with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request resp = req.get_response(app()) self.assertEqual(HTTPStatus.BAD_REQUEST, resp.status_int) self.mock_deletion_allowed.assert_called_once_with( self.ctx, None, volume) self.mock_deletion_allowed.reset_mock() # test for VolumeBackendAPIException volume_remote_error = ( messaging.RemoteError(exc_type='VolumeBackendAPIException')) with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID, 'connector': connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request self.assertRaises(messaging.RemoteError, req.get_response, app()) self.mock_deletion_allowed.assert_called_once_with( self.ctx, None, volume) def test_volume_force_detach_raises_db_error(self): # In case of DB error 500 error code is returned to user # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') # volume is attached volume.refresh() self.assertEqual('in-use', volume.status) self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) admin_metadata = volume.admin_metadata self.assertEqual(2, len(admin_metadata)) self.assertEqual('False', admin_metadata['readonly']) self.assertEqual('rw', admin_metadata['attached_mode']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # build request to force detach volume_remote_error = messaging.RemoteError(exc_type='DBError') with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID, 'connector': connector}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request self.assertRaises(messaging.RemoteError, req.get_response, app()) self.mock_deletion_allowed.assert_called_once_with( self.ctx, None, volume) def test_volume_force_detach_missing_connector(self): # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') # volume is attached volume.refresh() self.assertEqual('in-use', volume.status) self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) admin_metadata = volume.admin_metadata self.assertEqual(2, len(admin_metadata)) self.assertEqual('False', admin_metadata['readonly']) self.assertEqual('rw', admin_metadata['attached_mode']) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) # test when missing connector with mock.patch.object(volume_api.API, 'detach'): req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, volume.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' body = {'os-force_detach': {'attachment_id': fake.ATTACHMENT_ID}} req.body = jsonutils.dump_as_bytes(body) # attach admin context to request req.environ['cinder.context'] = self.ctx # make request resp = req.get_response(app()) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) self.mock_deletion_allowed.assert_called_once_with( self.ctx, None, volume) def test_attach_in_used_volume_by_instance(self): """Test that attaching to an in-use volume fails.""" # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, '/dev/vbd0', 'rw') self.assertEqual('rw', conn_info['data']['access_mode']) self.assertRaises(exception.InvalidVolume, self.volume_api.attach, self.ctx, volume, fake.INSTANCE_ID, None, '/dev/vdb1', 'ro') def test_attach_in_used_volume_by_host(self): """Test that attaching to an in-use volume fails.""" # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {'initiator': 'iqn.2012-07.org.fake:01'} self.volume_api.reserve_volume(self.ctx, volume) self.volume_api.initialize_connection(self.ctx, volume, connector) self.volume_api.attach(self.ctx, volume, None, 'fake_host1', '/dev/vbd0', 'rw') conn_info = self.volume_api.initialize_connection(self.ctx, volume, connector) conn_info['data']['access_mode'] = 'rw' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, self.ctx, volume, None, 'fake_host2', '/dev/vbd1', 'ro') def test_invalid_iscsi_connector(self): """Test connector without the initiator (required by iscsi driver).""" # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) connector = {} self.assertRaises(exception.InvalidInput, self.volume_api.initialize_connection, self.ctx, volume, connector) def test_attach_attaching_volume_with_different_instance(self): """Test that attaching volume reserved for another instance fails.""" # current status is available volume = self._create_volume(self.ctx, {'provider_location': '', 'size': 1}) self.volume_api.reserve_volume(self.ctx, volume) values = {'volume_id': volume['id'], 'attach_status': fields.VolumeAttachStatus.ATTACHING, 'attach_time': timeutils.utcnow(), 'instance_uuid': 'abc123', } db.volume_attach(self.ctx, values) db.volume_admin_metadata_update(self.ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' attachment = self.volume_api.attach(self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'rw') self.assertEqual(fake.INSTANCE_ID, attachment['instance_uuid']) self.assertEqual(volume['id'], attachment['volume_id'], volume['id']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) def test_attach_attaching_volume_with_different_mode(self): """Test that attaching volume reserved for another mode fails.""" # current status is available volume = self._create_volume( self.ctx, {'provider_location': '', 'size': 1, 'status': 'attaching', 'instance_uuid': fake.INSTANCE_ID, 'admin_metadata': {"attached_mode": 'rw'}}) values = {'status': 'attaching'} db.volume_update(self.ctx, volume['id'], values) db.volume_admin_metadata_update(self.ctx, volume['id'], {"attached_mode": 'rw'}, False) mountpoint = '/dev/vbd' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, self.ctx, volume, fake.INSTANCE_ID, None, mountpoint, 'ro') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_availability_zones.py0000664000175000017500000000365000000000000026172 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_utils import timeutils import cinder.api.contrib.availability_zones import cinder.context from cinder.tests.unit import test import cinder.volume.api created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099) current_time = timeutils.utcnow() def list_availability_zones(self): return ( {'name': 'ping', 'available': True}, {'name': 'pong', 'available': False}, ) class FakeRequest(object): environ = {'cinder.context': cinder.context.get_admin_context()} GET = {} class ControllerTestCase(test.TestCase): def setUp(self): super(ControllerTestCase, self).setUp() self.controller = cinder.api.contrib.availability_zones.Controller() self.req = FakeRequest() self.mock_object(cinder.volume.api.API, 'list_availability_zones', list_availability_zones) def test_list_hosts(self): """Verify that the volume hosts are returned.""" actual = self.controller.index(self.req) expected = { 'availabilityZoneInfo': [ {'zoneName': 'ping', 'zoneState': {'available': True}}, {'zoneName': 'pong', 'zoneState': {'available': False}}, ], } self.assertEqual(expected, actual) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_backup_project_attribute.py0000664000175000017500000001171600000000000027362 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from oslo_serialization import jsonutils import webob from cinder.api import microversions as mv from cinder.api.v3 import router as router_v3 from cinder.backup import api as backup_api from cinder import context from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit.backup import fake_backup from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test def fake_backup_get(*args, **kwargs): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) bak = { 'id': fake.BACKUP_ID, 'project_id': fake.PROJECT_ID, 'user_id': fake.USER_ID, } return fake_backup.fake_backup_obj(ctx, **bak) def fake_backup_get_all(*args, **kwargs): return objects.BackupList(objects=[fake_backup_get()]) def app(): # no auth, just let environ['cinder.context'] pass through api = router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper @ddt.ddt class BackupProjectAttributeTest(test.TestCase): def setUp(self): super(BackupProjectAttributeTest, self).setUp() self.mock_object(backup_api.API, 'get', fake_backup_get) self.mock_object(backup_api.API, 'get_all', fake_backup_get_all) def _send_backup_request(self, ctx, detail=False, version=mv.BACKUP_PROJECT): req = None if detail: req = webob.Request.blank(('/v3/%s/backups/detail' % fake.PROJECT_ID)) else: req = webob.Request.blank('/v3/%s/backups/%s' % (fake.PROJECT_ID, fake.BACKUP_ID)) req.method = 'GET' req.environ['cinder.context'] = ctx req.headers = mv.get_mv_header(version) req.api_version_request = mv.get_api_version(version) res = req.get_response(app()) if detail: return jsonutils.loads(res.body)['backups'] return jsonutils.loads(res.body)['backup'] @ddt.data(True, False) def test_get_backup_with_project(self, is_admin): ctx = context.RequestContext(fake.USER2_ID, fake.PROJECT_ID, is_admin) bak = self._send_backup_request(ctx) if is_admin: self.assertEqual(fake.PROJECT_ID, bak['os-backup-project-attr:project_id']) else: self.assertNotIn('os-backup-project-attr:project_id', bak) @ddt.data(True, False) def test_list_detail_backups_with_project(self, is_admin): ctx = context.RequestContext(fake.USER2_ID, fake.PROJECT_ID, is_admin) baks = self._send_backup_request(ctx, detail=True) if is_admin: self.assertEqual(fake.PROJECT_ID, baks[0]['os-backup-project-attr:project_id']) else: self.assertNotIn('os-backup-project-attr:project_id', baks[0]) def test_get_backup_under_allowed_api_version(self): ctx = context.RequestContext(fake.USER2_ID, fake.PROJECT_ID, True) bak = self._send_backup_request( ctx, version=mv.get_prior_version(mv.BACKUP_PROJECT)) self.assertNotIn('os-backup-project-attr:project_id', bak) @ddt.data(True, False) def test_get_backup_with_user_id(self, is_admin): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin) bak = self._send_backup_request(ctx, version=mv.BACKUP_PROJECT_USER_ID) if is_admin: self.assertEqual(fake.USER_ID, bak['user_id']) else: self.assertNotIn('user_id', bak) @ddt.data(True, False) def test_list_detail_backups_with_user_id(self, is_admin): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin) baks = self._send_backup_request(ctx, detail=True, version=mv.BACKUP_PROJECT_USER_ID) if is_admin: self.assertEqual(fake.USER_ID, baks[0]['user_id']) else: self.assertNotIn('user_id', baks[0]) def test_get_backup_user_id_before_microversion_v356(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) bak = self._send_backup_request( ctx, version=mv.get_prior_version(mv.BACKUP_PROJECT_USER_ID)) self.assertNotIn('user_id', bak) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_backups.py0000664000175000017500000034441500000000000023741 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Backup code.""" from http import HTTPStatus from unittest import mock import ddt from oslo_serialization import jsonutils from oslo_utils import timeutils import webob from cinder.api.contrib import backups from cinder.api import microversions as mv from cinder.api.openstack import api_version_request as api_version # needed for stubs to work import cinder.backup from cinder.backup import api as backup_api from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import quota from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils # needed for stubs to work import cinder.volume NUM_ELEMENTS_IN_BACKUP = 17 @ddt.ddt class BackupsAPITestCase(test.TestCase): """Test Case for backups API.""" def setUp(self): super(BackupsAPITestCase, self).setUp() self.volume_api = cinder.volume.API() self.backup_api = cinder.backup.API() self.context = context.get_admin_context() self.context.project_id = fake.PROJECT_ID self.context.user_id = fake.USER_ID self.user_context = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.controller = backups.BackupsController() self.patch('cinder.objects.service.Service._get_minimum_version', return_value=None) @ddt.data(False, True) def test_show_backup(self, backup_from_snapshot): volume = utils.create_volume(self.context, size=5, status='creating') snapshot = None snapshot_id = None if backup_from_snapshot: snapshot = utils.create_snapshot(self.context, volume.id) snapshot_id = snapshot.id backup = utils.create_backup(self.context, volume.id, snapshot_id=snapshot_id, container='volumebackups', size=1, availability_zone='az1') req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, backup.id)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual('az1', res_dict['backup']['availability_zone']) self.assertEqual('volumebackups', res_dict['backup']['container']) self.assertEqual('This is a test backup', res_dict['backup']['description']) self.assertEqual('test_backup', res_dict['backup']['name']) self.assertEqual(backup.id, res_dict['backup']['id']) self.assertEqual(22, res_dict['backup']['object_count']) self.assertEqual(1, res_dict['backup']['size']) self.assertEqual(fields.BackupStatus.CREATING, res_dict['backup']['status']) self.assertEqual(volume.id, res_dict['backup']['volume_id']) self.assertFalse(res_dict['backup']['is_incremental']) self.assertFalse(res_dict['backup']['has_dependent_backups']) self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id']) self.assertIn('updated_at', res_dict['backup']) if snapshot: snapshot.destroy() backup.destroy() volume.destroy() def test_show_backup_return_metadata(self): volume = utils.create_volume(self.context, size=5, status='creating') backup = utils.create_backup(self.context, volume.id, metadata={"test_key": "test_value"}) req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, backup.id)) req.method = 'GET' req.headers = mv.get_mv_header(mv.BACKUP_METADATA) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual({"test_key": "test_value"}, res_dict['backup']['metadata']) volume.destroy() backup.destroy() def test_show_backup_with_backup_NotFound(self): req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'GET' req.headers = mv.get_mv_header(mv.BACKUP_METADATA) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Backup %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) def test_list_backups_json(self): backup1 = utils.create_backup(self.context) backup2 = utils.create_backup(self.context) backup3 = utils.create_backup(self.context) req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(3, len(res_dict['backups'][0])) self.assertEqual(backup3.id, res_dict['backups'][0]['id']) self.assertEqual('test_backup', res_dict['backups'][0]['name']) self.assertEqual(3, len(res_dict['backups'][1])) self.assertEqual(backup2.id, res_dict['backups'][1]['id']) self.assertEqual('test_backup', res_dict['backups'][1]['name']) self.assertEqual(3, len(res_dict['backups'][2])) self.assertEqual(backup1.id, res_dict['backups'][2]['id']) self.assertEqual('test_backup', res_dict['backups'][2]['name']) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_backups_with_limit(self): backup1 = utils.create_backup(self.context) backup2 = utils.create_backup(self.context) backup3 = utils.create_backup(self.context) req = webob.Request.blank('/v3/%s/backups?limit=2' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(2, len(res_dict['backups'])) self.assertEqual(3, len(res_dict['backups'][0])) self.assertEqual(backup3.id, res_dict['backups'][0]['id']) self.assertEqual('test_backup', res_dict['backups'][0]['name']) self.assertEqual(3, len(res_dict['backups'][1])) self.assertEqual(backup2.id, res_dict['backups'][1]['id']) self.assertEqual('test_backup', res_dict['backups'][1]['name']) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_backups_with_offset_out_of_range(self): url = '/v3/%s/backups?offset=252452434242342434' % fake.PROJECT_ID req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_list_backups_with_marker(self): backup1 = utils.create_backup(self.context) backup2 = utils.create_backup(self.context) backup3 = utils.create_backup(self.context) url = '/v3/%s/backups?marker=%s' % (fake.PROJECT_ID, backup3.id) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(2, len(res_dict['backups'])) self.assertEqual(3, len(res_dict['backups'][0])) self.assertEqual(backup2.id, res_dict['backups'][0]['id']) self.assertEqual('test_backup', res_dict['backups'][0]['name']) self.assertEqual(3, len(res_dict['backups'][1])) self.assertEqual(backup1.id, res_dict['backups'][1]['id']) self.assertEqual('test_backup', res_dict['backups'][1]['name']) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_backups_with_limit_and_marker(self): backup1 = utils.create_backup(self.context) backup2 = utils.create_backup(self.context) backup3 = utils.create_backup(self.context) url = ('/v3/%s/backups?limit=1&marker=%s' % (fake.PROJECT_ID, backup3.id)) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(1, len(res_dict['backups'])) self.assertEqual(3, len(res_dict['backups'][0])) self.assertEqual(backup2.id, res_dict['backups'][0]['id']) self.assertEqual('test_backup', res_dict['backups'][0]['name']) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_all_backups_detail_json_over_limit(self): self.override_config('osapi_max_limit', 2) backup1 = utils.create_backup(self.context, availability_zone='az1', container='volumebackups', size=1) backup2 = utils.create_backup(self.context, availability_zone='az1', container='volumebackups', size=1) backup3 = utils.create_backup(self.context, availability_zone='az1', container='volumebackups', size=1) req = webob.Request.blank('/v3/%s/backups/detail' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1])) self.assertEqual(2, len(res_dict['backups'])) self.assertIn('backups_links', res_dict) links = res_dict['backups_links'] next_url = links[0]['href'] req = webob.Request.blank(next_url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_backups_detail_json(self): backup1 = utils.create_backup(self.context, availability_zone='az1', container='volumebackups', size=1) backup2 = utils.create_backup(self.context, availability_zone='az1', container='volumebackups', size=1) backup3 = utils.create_backup(self.context, availability_zone='az1', container='volumebackups', size=1) req = webob.Request.blank('/v3/%s/backups/detail' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) self.assertEqual('az1', res_dict['backups'][0]['availability_zone']) self.assertEqual('volumebackups', res_dict['backups'][0]['container']) self.assertEqual('This is a test backup', res_dict['backups'][0]['description']) self.assertEqual('test_backup', res_dict['backups'][0]['name']) self.assertEqual(backup3.id, res_dict['backups'][0]['id']) self.assertEqual(22, res_dict['backups'][0]['object_count']) self.assertEqual(1, res_dict['backups'][0]['size']) self.assertEqual(fields.BackupStatus.CREATING, res_dict['backups'][0]['status']) self.assertEqual(fake.VOLUME_ID, res_dict['backups'][0]['volume_id']) self.assertIn('updated_at', res_dict['backups'][0]) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1])) self.assertEqual('az1', res_dict['backups'][1]['availability_zone']) self.assertEqual('volumebackups', res_dict['backups'][1]['container']) self.assertEqual('This is a test backup', res_dict['backups'][1]['description']) self.assertEqual('test_backup', res_dict['backups'][1]['name']) self.assertEqual(backup2.id, res_dict['backups'][1]['id']) self.assertEqual(22, res_dict['backups'][1]['object_count']) self.assertEqual(1, res_dict['backups'][1]['size']) self.assertEqual(fields.BackupStatus.CREATING, res_dict['backups'][1]['status']) self.assertEqual(fake.VOLUME_ID, res_dict['backups'][1]['volume_id']) self.assertIn('updated_at', res_dict['backups'][1]) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][2])) self.assertEqual('az1', res_dict['backups'][2]['availability_zone']) self.assertEqual('volumebackups', res_dict['backups'][2]['container']) self.assertEqual('This is a test backup', res_dict['backups'][2]['description']) self.assertEqual('test_backup', res_dict['backups'][2]['name']) self.assertEqual(backup1.id, res_dict['backups'][2]['id']) self.assertEqual(22, res_dict['backups'][2]['object_count']) self.assertEqual(1, res_dict['backups'][2]['size']) self.assertEqual(fields.BackupStatus.CREATING, res_dict['backups'][2]['status']) self.assertEqual(fake.VOLUME_ID, res_dict['backups'][2]['volume_id']) self.assertIn('updated_at', res_dict['backups'][2]) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_backups_detail_return_metadata(self): backup1 = utils.create_backup(self.context, size=1, metadata={'key1': 'value1'}) backup2 = utils.create_backup(self.context, size=1, metadata={'key2': 'value2'}) backup3 = utils.create_backup(self.context, size=1) req = webob.Request.blank('/v3/%s/backups/detail' % fake.PROJECT_ID) req.method = 'GET' req.headers = mv.get_mv_header(mv.BACKUP_METADATA) req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual({'key1': 'value1'}, res_dict['backups'][2]['metadata']) self.assertEqual({'key2': 'value2'}, res_dict['backups'][1]['metadata']) self.assertEqual({}, res_dict['backups'][0]['metadata']) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_backups_detail_using_filters(self): backup1 = utils.create_backup(self.context, display_name='test2') backup2 = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE) backup3 = utils.create_backup(self.context, volume_id=fake.VOLUME3_ID) req = webob.Request.blank('/v3/%s/backups/detail?name=test2' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(1, len(res_dict['backups'])) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(backup1.id, res_dict['backups'][0]['id']) req = webob.Request.blank('/v3/%s/backups/detail?status=available' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(1, len(res_dict['backups'])) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(backup2.id, res_dict['backups'][0]['id']) req = webob.Request.blank('/v3/%s/backups/detail?volume_id=%s' % ( fake.PROJECT_ID, fake.VOLUME3_ID)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(1, len(res_dict['backups'])) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(backup3.id, res_dict['backups'][0]['id']) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_backups_detail_with_limit_and_sort_args(self): backup1 = utils.create_backup(self.context) backup2 = utils.create_backup(self.context) backup3 = utils.create_backup(self.context) url = ('/v3/%s/backups/detail?limit=2&sort_key=created_at' '&sort_dir=desc' % fake.PROJECT_ID) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(2, len(res_dict['backups'])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) self.assertEqual(backup3.id, res_dict['backups'][0]['id']) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1])) self.assertEqual(backup2.id, res_dict['backups'][1]['id']) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_backups_detail_with_marker(self): backup1 = utils.create_backup(self.context) backup2 = utils.create_backup(self.context) backup3 = utils.create_backup(self.context) url = ('/v3/%s/backups/detail?marker=%s' % ( fake.PROJECT_ID, backup3.id)) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(2, len(res_dict['backups'])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) self.assertEqual(backup2.id, res_dict['backups'][0]['id']) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1])) self.assertEqual(backup1.id, res_dict['backups'][1]['id']) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_backups_detail_with_limit_and_marker(self): backup1 = utils.create_backup(self.context) backup2 = utils.create_backup(self.context) backup3 = utils.create_backup(self.context) url = ('/v3/%s/backups/detail?limit=1&marker=%s' % ( fake.PROJECT_ID, backup3.id)) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(1, len(res_dict['backups'])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0])) self.assertEqual(backup2.id, res_dict['backups'][0]['id']) backup3.destroy() backup2.destroy() backup1.destroy() def test_list_backups_detail_with_offset_out_of_range(self): url = ('/v3/%s/backups/detail?offset=234534543657634523' % fake.PROJECT_ID) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_create_backup_json(self): volume = utils.create_volume(self.context, size=5) body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['backup']) volume.destroy() @ddt.data({"backup": {"description": " sample description", "name": " test name"}}, {"backup": {"description": "sample description ", "name": "test "}}, {"backup": {"description": " sample description ", "name": " test "}}) @mock.patch('cinder.db.service_get_all') def test_create_backup_name_description_with_leading_trailing_spaces( self, body, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': fake.BACKUP_ID}] volume = utils.create_volume(self.context, size=5) body['backup']['volume_id'] = volume.id req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) # create backup call doesn't return 'description' in response so get # the created backup to assert name and description req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, res_dict['backup']['id'])) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(body['backup']['name'].strip(), res_dict['backup']['name']) self.assertEqual(body['backup']['description'].strip(), res_dict['backup']['description']) volume.destroy() @mock.patch('cinder.db.service_get_all') def test_create_backup_with_metadata(self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] volume = utils.create_volume(self.context, size=1) # Create a backup with metadata body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", 'metadata': {'test_key': 'test_value'} } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers = mv.get_mv_header(mv.BACKUP_METADATA) req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) # Get the new backup req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, res_dict['backup']['id'])) req.method = 'GET' req.headers = mv.get_mv_header(mv.BACKUP_METADATA) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual({'test_key': 'test_value'}, res_dict['backup']['metadata']) volume.destroy() @mock.patch('cinder.objects.Service.is_up', mock.Mock(return_value=True)) @mock.patch('cinder.db.service_get_all') def test_create_backup_with_availability_zone(self, _mock_service_get_all): vol_az = 'az1' backup_svc_az = 'az2' _mock_service_get_all.return_value = [ {'availability_zone': backup_svc_az, 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] volume = utils.create_volume(self.context, availability_zone=vol_az, size=1) # Create a backup with metadata body = {'backup': {'name': 'nightly001', 'volume_id': volume.id, 'container': 'nightlybackups', 'availability_zone': backup_svc_az}} req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers = mv.get_mv_header(mv.BACKUP_AZ) req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) self.assertEqual(202, res.status_code) @mock.patch('cinder.db.service_get_all') def test_create_backup_inuse_no_force(self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] volume = utils.create_volume(self.context, size=5, status='in-use') body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) volume.destroy() def test_create_backup_inuse_force(self): volume = utils.create_volume(self.context, size=5, status='in-use') backup = utils.create_backup(self.context, volume.id, status=fields.BackupStatus.AVAILABLE, size=1, availability_zone='az1', host='testhost') body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", "force": True, } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['backup']) backup.destroy() volume.destroy() # Test default behavior for backup host def test_create_incremental_backup(self): volume = utils.create_volume(self.context, size=5, status='in-use') parent_backup = utils.create_backup( self.context, volume.id, status=fields.BackupStatus.AVAILABLE, size=1, availability_zone='az1', host='parenthost', parent=None) backup = utils.create_backup(self.context, volume.id, status=fields.BackupStatus.AVAILABLE, size=1, availability_zone='az1', host='testhost') body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", "force": True, "incremental": True, } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) with mock.patch.object( cinder.objects.backup.Backup, '_from_db_object', wraps=cinder.objects.backup.Backup._from_db_object ) as mocked_from_db_object: res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['backup']) args = mocked_from_db_object.call_args.args # Host should not be set yet self.assertIsNone(args[1]['host']) parent_backup.destroy() backup.destroy() volume.destroy() # Test behavior for backup host w/posix backend # (see https://bugs.launchpad.net/cinder/+bug/1952805) def test_create_incremental_backup_posix(self): volume = utils.create_volume(self.context, size=5, status='in-use') parent_backup = utils.create_backup( self.context, volume.id, status=fields.BackupStatus.AVAILABLE, size=1, availability_zone='az1', host='parenthost', service='cinder.backup.drivers.posix.PosixBackupDriver' ) body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", "force": True, "incremental": True, } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) with mock.patch.object( cinder.objects.backup.Backup, '_from_db_object', wraps=cinder.objects.backup.Backup._from_db_object ) as mocked_from_db_object: res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['backup']) args = mocked_from_db_object.call_args.args self.assertEqual('parenthost', args[1]['host']) parent_backup.destroy() volume.destroy() def test_create_backup_snapshot_json(self): volume = utils.create_volume(self.context, size=5, status='available') body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['backup']) volume.destroy() def test_create_backup_snapshot_with_inconsistent_volume(self): volume = utils.create_volume(self.context, size=5, status='available') volume2 = utils.create_volume(self.context, size=5, status='available') snapshot = utils.create_snapshot(self.context, volume.id, status='available') body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume2.id, "snapshot_id": snapshot.id, "container": "nightlybackups", } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertIsNotNone(res_dict['badRequest']['message']) snapshot.destroy() volume2.destroy() volume.destroy() def test_create_backup_with_invalid_snapshot(self): volume = utils.create_volume(self.context, size=5, status='available') snapshot = utils.create_snapshot(self.context, volume.id, status='error') body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "snapshot_id": snapshot.id, "volume_id": volume.id, } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) volume.destroy() snapshot.destroy() def test_create_backup_with_non_existent_snapshot(self): volume = utils.create_volume(self.context, size=5, status='restoring') body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "snapshot_id": fake.SNAPSHOT_ID, "volume_id": volume.id, } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertIsNotNone(res_dict['itemNotFound']['message']) volume.destroy() def test_create_backup_with_invalid_container(self): volume = utils.create_volume(self.context, size=5, status='available') body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "a" * 256 } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.environ['cinder.context'] = self.context req.api_version_request = api_version.APIVersionRequest("3.0") self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @ddt.data(False, True) def test_create_backup_delta(self, backup_from_snapshot): volume = utils.create_volume(self.context, size=5) snapshot = None if backup_from_snapshot: snapshot = utils.create_snapshot(self.context, volume.id, status= fields.SnapshotStatus.AVAILABLE) snapshot_id = snapshot.id body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", "incremental": True, "snapshot_id": snapshot_id, } } else: body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", "incremental": True, } } backup = utils.create_backup(self.context, volume.id, status=fields.BackupStatus.AVAILABLE, size=1, availability_zone='az1', host='testhost') req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['backup']) backup.destroy() if snapshot: snapshot.destroy() volume.destroy() @mock.patch('cinder.db.service_get_all') def test_create_incremental_backup_invalid_status( self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] volume = utils.create_volume(self.context, size=5) backup = utils.create_backup(self.context, volume.id, availability_zone='az1', size=1, host='testhost') body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", "incremental": True, } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: No backups available to ' 'do an incremental backup.', res_dict['badRequest']['message']) backup.destroy() volume.destroy() def test_create_backup_with_no_body(self): # omit body from the request req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual("None is not of type 'object'", res_dict['badRequest']['message']) def test_create_backup_with_body_KeyError(self): # omit volume_id from body body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "container": "nightlybackups", } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIn("'volume_id' is a required property", res_dict['badRequest']['message']) def test_create_backup_with_VolumeNotFound(self): body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": fake.WILL_NOT_BE_FOUND_ID, "container": "nightlybackups", } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Volume %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) def test_create_backup_with_invalid_volume_id_format(self): body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": 'not a uuid', "container": "nightlybackups", } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertIn("'not a uuid' is not a 'uuid'", res_dict['badRequest']['message']) def test_create_backup_with_InvalidVolume(self): # need to create the volume referenced below first volume = utils.create_volume(self.context, size=5, status='restoring') body = {"backup": {"display_name": "nightly001", "display_description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) @mock.patch('cinder.db.service_get_all') def test_create_backup_WithOUT_enabled_backup_service( self, _mock_service_get_all): # need an enabled backup service available _mock_service_get_all.return_value = [] volume = utils.create_volume(self.context, size=2) req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", } } req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual('available', volume.status) @mock.patch('cinder.db.service_get_all') def test_create_incremental_backup_invalid_no_full( self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] volume = utils.create_volume(self.context, size=5, status='available') body = {"backup": {"name": "nightly001", "description": "Nightly Backup 03-Sep-2012", "volume_id": volume.id, "container": "nightlybackups", "incremental": True, } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: No backups available to do ' 'an incremental backup.', res_dict['badRequest']['message']) volume.destroy() def test_create_backup_with_null_validate(self): volume = utils.create_volume(self.context, size=5) body = {"backup": {"name": None, "description": None, "volume_id": volume.id, "container": "Nonebackups", "snapshot_id": None, } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['backup']) volume.destroy() @mock.patch('cinder.db.service_get_all') def test_create_backup_with_metadata_null_validate( self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'fake_az', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] volume = utils.create_volume(self.context, size=1) body = {"backup": {"volume_id": volume.id, "container": "Nonebackups", "metadata": None, } } req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID) req.method = 'POST' req.headers = mv.get_mv_header(mv.BACKUP_METADATA) req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['backup']) volume.destroy() @mock.patch('cinder.db.service_get_all') def test_is_backup_service_enabled(self, _mock_service_get_all): testhost = 'test_host' alt_host = 'strange_host' empty_service = [] # service host not match with volume's host host_not_match = [{'availability_zone': 'fake_az', 'host': alt_host, 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] # service az not match with volume's az az_not_match = [{'availability_zone': 'strange_az', 'host': testhost, 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}] # service disabled disabled_service = [] # dead service that last reported at 20th century dead_service = [{'availability_zone': 'fake_az', 'host': alt_host, 'disabled': 0, 'updated_at': '1989-04-16 02:55:44', 'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'}] # first service's host not match but second one works. multi_services = [{'availability_zone': 'fake_az', 'host': alt_host, 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': '18417850-2ca9-43d1-9619-ae16bfb0f655'}, {'availability_zone': 'fake_az', 'host': testhost, 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'f838f35c-4035-464f-9792-ce60e390c13d'}] # Setup mock to run through the following service cases _mock_service_get_all.side_effect = [empty_service, host_not_match, az_not_match, disabled_service, dead_service, multi_services] volume = utils.create_volume(self.context, size=2, host=testhost) # test empty service self.assertEqual(False, self.backup_api._is_backup_service_enabled( volume.availability_zone, testhost)) # test host not match service self.assertEqual(False, self.backup_api._is_backup_service_enabled( volume.availability_zone, testhost)) # test az not match service self.assertEqual(False, self.backup_api._is_backup_service_enabled( volume.availability_zone, testhost)) # test disabled service self.assertEqual(False, self.backup_api._is_backup_service_enabled( volume.availability_zone, testhost)) # test dead service self.assertEqual(False, self.backup_api._is_backup_service_enabled( volume.availability_zone, testhost)) # test multi services and the last service matches self.assertTrue(self.backup_api._is_backup_service_enabled( volume.availability_zone, testhost)) @mock.patch('cinder.db.service_get_all') def test_get_available_backup_service(self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost1', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}, {'availability_zone': 'az2', 'host': 'testhost2', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}, {'availability_zone': 'az2', 'host': 'testhost3', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'}, ] actual_host = self.backup_api._get_available_backup_service_host( None, 'az1') self.assertEqual('testhost1', actual_host) actual_host = self.backup_api._get_available_backup_service_host( 'testhost2', 'az2') self.assertIn(actual_host, ['testhost2', 'testhost3']) actual_host = self.backup_api._get_available_backup_service_host( 'testhost4', 'az1') self.assertEqual('testhost1', actual_host) @mock.patch('cinder.db.service_get_all') def test_get_available_backup_service_with_same_host( self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost1', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}, {'availability_zone': 'az2', 'host': 'testhost2', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}, ] self.override_config('backup_use_same_host', True) actual_host = self.backup_api._get_available_backup_service_host( None, 'az1') self.assertEqual('testhost1', actual_host) actual_host = self.backup_api._get_available_backup_service_host( 'testhost2', 'az2') self.assertEqual('testhost2', actual_host) self.assertRaises(exception.ServiceNotFound, self.backup_api._get_available_backup_service_host, 'testhost4', 'az1') @mock.patch('cinder.db.service_get_all') def test_delete_backup_available(self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, availability_zone='az1', host='testhost') req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, backup.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) backup.refresh() self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(fields.BackupStatus.DELETING, backup.status) backup.destroy() @mock.patch('cinder.db.service_get_all') def test_delete_delta_backup(self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, availability_zone='az1', host='testhost') delta = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, incremental=True, availability_zone='az1', host='testhost') req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, delta.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) delta.refresh() self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(fields.BackupStatus.DELETING, delta.status) delta.destroy() backup.destroy() @mock.patch('cinder.db.service_get_all') def test_delete_backup_error(self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] backup = utils.create_backup(self.context, status=fields.BackupStatus.ERROR, availability_zone='az1', host='testhost') req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, backup.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) backup.refresh() self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(fields.BackupStatus.DELETING, backup.status) backup.destroy() def test_delete_backup_with_backup_NotFound(self): req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Backup %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) def test_delete_backup_with_InvalidBackup(self): backup = utils.create_backup(self.context) req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, backup.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: Backup status must be ' 'available or error', res_dict['badRequest']['message']) backup.destroy() @mock.patch('cinder.db.service_get_all') def test_delete_backup_with_InvalidBackup2(self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] volume = utils.create_volume(self.context, size=5) backup = utils.create_backup(self.context, volume.id, status=fields.BackupStatus.AVAILABLE) delta_backup = utils.create_backup( self.context, status=fields.BackupStatus.AVAILABLE, incremental=True, parent_id=backup.id) req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, backup.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: Incremental backups ' 'exist for this backup.', res_dict['badRequest']['message']) delta_backup.destroy() backup.destroy() @mock.patch('cinder.db.service_get_all') def test_delete_backup_service_down(self, _mock_service_get_all): _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': '1775-04-19 05:00:00', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] backup = utils.create_backup(self.context, status='available') req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, backup.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) backup.destroy() @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch('cinder.db.service_get_all') def test_delete_backup_service_is_none_and_is_not_working( self, _mock_service_get_all, _mock_backup_is_working): _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] _mock_backup_is_working.return_value = False backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, availability_zone='az1', host='testhost', service=None) req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, backup.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') def test_restore_backup_volume_id_specified_json( self, _mock_get_backup_host): _mock_get_backup_host.return_value = 'testhost' backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, size=1, host='testhost') # need to create the volume referenced below first volume_name = 'test1' volume = utils.create_volume(self.context, size=5, display_name=volume_name) body = {"restore": {"volume_id": volume.id, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(backup.id, res_dict['restore']['backup_id']) self.assertEqual(volume.id, res_dict['restore']['volume_id']) self.assertEqual(volume_name, res_dict['restore']['volume_name']) def test_restore_backup_with_no_body(self): # omit body from the request backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE) req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual("None is not of type 'object'", res_dict['badRequest']['message']) backup.destroy() def test_restore_backup_with_body_KeyError(self): # omit restore from body backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE) req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) body = {"restore": {'': ''}} req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIn("Additional properties are not allowed ", res_dict['badRequest']['message']) self.assertIn("'' was unexpected)", res_dict['badRequest']['message']) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.volume.api.API.create') def test_restore_backup_volume_id_unspecified( self, _mock_volume_api_create, _mock_service_get_all): # intercept volume creation to ensure created volume # has status of available def fake_volume_api_create(context, size, name, description): volume_id = utils.create_volume(self.context, size=size).id return db.volume_get(context, volume_id) _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] _mock_volume_api_create.side_effect = fake_volume_api_create backup = utils.create_backup(self.context, size=5, status=fields.BackupStatus.AVAILABLE, availability_zone='az1', host='testhost') body = {"restore": {}} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(backup.id, res_dict['restore']['backup_id']) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.volume.api.API.create') def test_restore_backup_name_specified(self, _mock_volume_api_create, _mock_service_get_all): # Intercept volume creation to ensure created volume # has status of available def fake_volume_api_create(context, size, name, description): volume_id = utils.create_volume(self.context, size=size, display_name=name).id return db.volume_get(context, volume_id) _mock_volume_api_create.side_effect = fake_volume_api_create _mock_service_get_all.return_value = [ {'availability_zone': 'az1', 'host': 'testhost', 'disabled': 0, 'updated_at': timeutils.utcnow(), 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] backup = utils.create_backup(self.context, size=5, status=fields.BackupStatus.AVAILABLE, availability_zone='az1', host='testhost') body = {"restore": {'name': 'vol-01'}} req = webob.Request.blank('/v3/%s/backups/%s/restore' % (fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) description = 'auto-created_from_restore_from_backup' # Assert that we have indeed passed on the name parameter _mock_volume_api_create.assert_called_once_with( mock.ANY, 5, body['restore']['name'], description) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(backup.id, res_dict['restore']['backup_id']) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') def test_restore_backup_name_volume_id_specified( self, _mock_get_backup_host): _mock_get_backup_host.return_value = 'testhost' backup = utils.create_backup(self.context, size=5, status=fields.BackupStatus.AVAILABLE) orig_vol_name = "vol-00" volume = utils.create_volume(self.context, size=5, display_name=orig_vol_name) body = {"restore": {'name': 'vol-01', 'volume_id': volume.id}} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(backup.id, res_dict['restore']['backup_id']) self.assertEqual(volume.id, res_dict['restore']['volume_id']) restored_vol = db.volume_get(self.context, res_dict['restore']['volume_id']) # Ensure that the original volume name wasn't overridden self.assertEqual(orig_vol_name, restored_vol['display_name']) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') def test_restore_backup_with_null_validate(self, _mock_get_backup_host): _mock_get_backup_host.return_value = 'testhost' backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, size=1, host='testhost') # need to create the volume referenced below first volume = utils.create_volume(self.context, size=1) body = {"restore": {"name": None, "volume_id": volume.id}} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(backup.id, res_dict['restore']['backup_id']) @mock.patch('cinder.backup.api.API.restore') def test_restore_backup_with_InvalidInput(self, _mock_volume_api_restore): msg = _("Invalid input") _mock_volume_api_restore.side_effect = \ exception.InvalidInput(reason=msg) backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE) # need to create the volume referenced below first volume = utils.create_volume(self.context, size=0) body = {"restore": {"volume_id": volume.id, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid input received: Invalid input', res_dict['badRequest']['message']) def test_restore_backup_with_InvalidVolume(self): backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE) # need to create the volume referenced below first volume = utils.create_volume(self.context, size=5, status='attaching') body = {"restore": {"volume_id": volume.id, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid volume: Volume to be restored to must ' 'be available', res_dict['badRequest']['message']) volume.destroy() backup.destroy() def test_restore_backup_with_InvalidBackup(self): backup = utils.create_backup(self.context, status=fields.BackupStatus.RESTORING) # need to create the volume referenced below first volume = utils.create_volume(self.context, size=5) body = {"restore": {"volume_id": volume.id, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: Backup status must be available', res_dict['badRequest']['message']) volume.destroy() backup.destroy() def test_restore_backup_with_BackupNotFound(self): # need to create the volume referenced below first volume = utils.create_volume(self.context, size=5) body = {"restore": {"volume_id": volume.id, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Backup %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) volume.destroy() def test_restore_backup_with_VolumeNotFound(self): backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE) body = {"restore": {"volume_id": fake.WILL_NOT_BE_FOUND_ID, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Volume %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) backup.destroy() @mock.patch('cinder.backup.api.API.restore') def test_restore_backup_with_VolumeSizeExceedsAvailableQuota( self, _mock_backup_restore): _mock_backup_restore.side_effect = \ exception.VolumeSizeExceedsAvailableQuota(requested='2', consumed='2', quota='3') backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE) # need to create the volume referenced below first volume = utils.create_volume(self.context, size=5) body = {"restore": {"volume_id": volume.id, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.REQUEST_ENTITY_TOO_LARGE, res.status_int) self.assertEqual(HTTPStatus.REQUEST_ENTITY_TOO_LARGE, res_dict['overLimit']['code']) self.assertEqual('Requested volume or snapshot exceeds allowed ' 'gigabytes quota. Requested 2G, quota is 3G and ' '2G has been consumed.', res_dict['overLimit']['message']) @mock.patch('cinder.backup.api.API.restore') def test_restore_backup_with_VolumeLimitExceeded(self, _mock_backup_restore): _mock_backup_restore.side_effect = \ exception.VolumeLimitExceeded(allowed=1) backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE) # need to create the volume referenced below first volume = utils.create_volume(self.context, size=5) body = {"restore": {"volume_id": volume.id, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.REQUEST_ENTITY_TOO_LARGE, res.status_int) self.assertEqual(HTTPStatus.REQUEST_ENTITY_TOO_LARGE, res_dict['overLimit']['code']) self.assertEqual("Maximum number of volumes allowed (1) exceeded for" " quota 'volumes'.", res_dict['overLimit']['message']) def test_restore_backup_to_undersized_volume(self): backup_size = 10 backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, size=backup_size) # need to create the volume referenced below first volume_size = 5 volume = utils.create_volume(self.context, size=volume_size) body = {"restore": {"volume_id": volume.id, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid volume: volume size %d is too ' 'small to restore backup of size %d.' % (volume_size, backup_size), res_dict['badRequest']['message']) volume.destroy() backup.destroy() @mock.patch('cinder.backup.api.API._get_available_backup_service_host') def test_restore_backup_to_oversized_volume(self, _mock_get_backup_host): backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, size=10) _mock_get_backup_host.return_value = 'testhost' # need to create the volume referenced below first volume_name = 'test1' volume = utils.create_volume(self.context, size=15, display_name=volume_name) body = {"restore": {"volume_id": volume.id, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(backup.id, res_dict['restore']['backup_id']) self.assertEqual(volume.id, res_dict['restore']['volume_id']) self.assertEqual(volume_name, res_dict['restore']['volume_name']) volume.destroy() backup.destroy() @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') @mock.patch('cinder.backup.api.API._get_available_backup_service_host') def test_restore_backup_with_different_host(self, _mock_get_backup_host, mock_restore_backup): volume_name = 'test1' backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, size=10, host='HostA') volume = utils.create_volume(self.context, size=10, host='HostB@BackendB#PoolB', display_name=volume_name) _mock_get_backup_host.return_value = 'testhost' body = {"restore": {"volume_id": volume.id, }} req = webob.Request.blank('/v3/%s/backups/%s/restore' % ( fake.PROJECT_ID, backup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(backup.id, res_dict['restore']['backup_id']) self.assertEqual(volume.id, res_dict['restore']['volume_id']) self.assertEqual(volume_name, res_dict['restore']['volume_name']) mock_restore_backup.assert_called_once_with(mock.ANY, 'testhost', mock.ANY, volume.id, False) # Manually check if restore_backup was called with appropriate backup. self.assertEqual(backup.id, mock_restore_backup.call_args[0][2].id) volume.destroy() backup.destroy() def test_export_record_as_non_admin(self): backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, size=10) req = webob.Request.blank('/v3/%s/backups/%s/export_record' % ( fake.PROJECT_ID, backup.id)) req.method = 'GET' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) # request is not authorized self.assertEqual(HTTPStatus.FORBIDDEN, res.status_int) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record') def test_export_backup_record_id_specified_json(self, _mock_export_record_rpc, _mock_get_backup_host): backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE, size=10) ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) backup_service = 'fake' backup_url = 'fake' _mock_export_record_rpc.return_value = \ {'backup_service': backup_service, 'backup_url': backup_url} _mock_get_backup_host.return_value = 'testhost' req = webob.Request.blank('/v3/%s/backups/%s/export_record' % ( fake.PROJECT_ID, backup.id)) req.method = 'GET' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) # verify that request is successful self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(backup_service, res_dict['backup-record']['backup_service']) self.assertEqual(backup_url, res_dict['backup-record']['backup_url']) backup.destroy() def test_export_record_with_bad_backup_id(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) backup_id = fake.WILL_NOT_BE_FOUND_ID req = webob.Request.blank('/v3/%s/backups/%s/export_record' % (fake.PROJECT_ID, backup_id)) req.method = 'GET' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Backup %s could not be found.' % backup_id, res_dict['itemNotFound']['message']) def test_export_record_for_unavailable_backup(self): backup = utils.create_backup(self.context, status=fields.BackupStatus.RESTORING) ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) req = webob.Request.blank('/v3/%s/backups/%s/export_record' % (fake.PROJECT_ID, backup.id)) req.method = 'GET' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: Backup status must be available ' 'and not restoring.', res_dict['badRequest']['message']) backup.destroy() @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record') def test_export_record_with_unavailable_service(self, _mock_export_record_rpc, _mock_get_backup_host): msg = 'fake unavailable service' _mock_export_record_rpc.side_effect = \ exception.InvalidBackup(reason=msg) _mock_get_backup_host.return_value = 'testhost' backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE) ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) req = webob.Request.blank('/v3/%s/backups/%s/export_record' % (fake.PROJECT_ID, backup.id)) req.method = 'GET' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: %s' % msg, res_dict['badRequest']['message']) backup.destroy() def test_import_record_as_non_admin(self): backup_service = 'fake' backup_url = 'fake' req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) # request is not authorized self.assertEqual(HTTPStatus.FORBIDDEN, res.status_int) @mock.patch.object(quota.QUOTAS, 'commit') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') @mock.patch('cinder.backup.api.API._list_backup_hosts') @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') def test_import_record_volume_id_specified_json(self, _mock_import_record_rpc, _mock_list_services, mock_reserve, mock_rollback, mock_commit): utils.replace_obj_loader(self, objects.Backup) mock_reserve.return_value = "fake_reservation" project_id = fake.PROJECT_ID backup_service = 'fake' ctx = context.RequestContext(fake.USER_ID, project_id, is_admin=True) backup = objects.Backup(ctx, id=fake.BACKUP_ID, user_id=fake.USER_ID, project_id=project_id, size=1, status=fields.BackupStatus.AVAILABLE) backup_url = backup.encode_record() _mock_import_record_rpc.return_value = None _mock_list_services.return_value = [backup_service] req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) # verify that request is successful self.assertEqual(HTTPStatus.CREATED, res.status_int) self.assertIn('id', res_dict['backup']) self.assertEqual(fake.BACKUP_ID, res_dict['backup']['id']) # Verify that entry in DB is as expected db_backup = objects.Backup.get_by_id(ctx, fake.BACKUP_ID) self.assertEqual(ctx.project_id, db_backup.project_id) self.assertEqual(ctx.user_id, db_backup.user_id) self.assertEqual(backup_api.IMPORT_VOLUME_ID, db_backup.volume_id) self.assertEqual(fields.BackupStatus.CREATING, db_backup.status) mock_reserve.assert_called_with( ctx, backups=1, backup_gigabytes=1) mock_commit.assert_called_with(ctx, "fake_reservation") @mock.patch.object(quota.QUOTAS, 'commit') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') @mock.patch('cinder.backup.api.API._list_backup_hosts') @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') def test_import_record_volume_id_exists_deleted(self, _mock_import_record_rpc, _mock_list_services, mock_reserve, mock_rollback, mock_commit, ): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) mock_reserve.return_value = 'fake_reservation' utils.replace_obj_loader(self, objects.Backup) # Original backup belonged to a different user_id and project_id backup = objects.Backup(ctx, id=fake.BACKUP_ID, user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID, size=1, status=fields.BackupStatus.AVAILABLE) backup_url = backup.encode_record() # Deleted DB entry has project_id and user_id set to fake backup_del = utils.create_backup(self.context, fake.VOLUME_ID, status=fields.BackupStatus.DELETED) backup_service = 'fake' _mock_import_record_rpc.return_value = None _mock_list_services.return_value = [backup_service] req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) # verify that request is successful self.assertEqual(HTTPStatus.CREATED, res.status_int) self.assertIn('id', res_dict['backup']) self.assertEqual(fake.BACKUP_ID, res_dict['backup']['id']) # Verify that entry in DB is as expected, with new project and user_id db_backup = objects.Backup.get_by_id(ctx, fake.BACKUP_ID) self.assertEqual(ctx.project_id, db_backup.project_id) self.assertEqual(ctx.user_id, db_backup.user_id) self.assertEqual(backup_api.IMPORT_VOLUME_ID, db_backup.volume_id) self.assertEqual(fields.BackupStatus.CREATING, db_backup.status) mock_reserve.assert_called_with(ctx, backups=1, backup_gigabytes=1) mock_commit.assert_called_with(ctx, "fake_reservation") backup_del.destroy() @mock.patch('cinder.backup.api.API._list_backup_hosts') def test_import_record_with_no_backup_services(self, _mock_list_services): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) backup_service = 'fake' backup_url = 'fake' _mock_list_services.return_value = [] req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.SERVICE_UNAVAILABLE, res.status_int) self.assertEqual(HTTPStatus.SERVICE_UNAVAILABLE, res_dict['serviceUnavailable']['code']) self.assertEqual('Service %s could not be found.' % backup_service, res_dict['serviceUnavailable']['message']) @mock.patch('cinder.backup.api.API._list_backup_hosts') def test_import_backup_with_wrong_backup_url(self, _mock_list_services): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) backup_service = 'fake' backup_url = 'fake' _mock_list_services.return_value = ['no-match1', 'no-match2'] req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual("Invalid input received: Can't parse backup record.", res_dict['badRequest']['message']) @mock.patch.object(quota.QUOTAS, 'commit') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') @mock.patch('cinder.backup.api.API._list_backup_hosts') def test_import_backup_with_existing_backup_record(self, _mock_list_services, mock_reserve, mock_rollback, mock_commit): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) mock_reserve.return_value = "fake_reservation" backup = utils.create_backup(self.context, fake.VOLUME_ID, size=1) backup_service = 'fake' backup_url = backup.encode_record() _mock_list_services.return_value = ['no-match1', 'no-match2'] req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid backup: Backup already exists in database.', res_dict['badRequest']['message']) # Bug #1965847: already existing backup should not be deleted backup_status = db.backup_get(context.get_admin_context(), backup.id)['status'] self.assertNotEqual(fields.BackupStatus.DELETED, backup_status) # ... and quotas should not be touched mock_reserve.assert_not_called() mock_rollback.assert_not_called() mock_commit.assert_not_called() backup.destroy() @mock.patch.object(quota.QUOTAS, 'commit') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') @mock.patch('cinder.backup.api.API._list_backup_hosts') @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') def test_import_backup_with_missing_backup_services(self, mock_reserve, mock_rollback, mock_commit, _mock_import_record, _mock_list_services): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) backup = utils.create_backup(self.context, fake.VOLUME_ID, status=fields.BackupStatus.DELETED) backup_service = 'fake' backup_url = backup.encode_record() _mock_list_services.return_value = ['no-match1', 'no-match2'] _mock_import_record.side_effect = \ exception.ServiceNotFound(service_id='fake') req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) body = {'backup-record': {'backup_service': backup_service, 'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.SERVICE_UNAVAILABLE, res.status_int) self.assertEqual(HTTPStatus.SERVICE_UNAVAILABLE, res_dict['serviceUnavailable']['code']) self.assertEqual('Service %s could not be found.' % backup_service, res_dict['serviceUnavailable']['message']) backup.destroy() def test_import_record_with_missing_body_elements(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) backup_service = 'fake' backup_url = 'fake' # test with no backup_service req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) body = {'backup-record': {'backup_url': backup_url}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual( "Invalid input for field/attribute backup-record. " "Value: {'backup_url': 'fake'}. 'backup_service' " "is a required property", res_dict['badRequest']['message']) # test with no backup_url req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) body = {'backup-record': {'backup_service': backup_service}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual( "Invalid input for field/attribute backup-record. " "Value: {'backup_service': 'fake'}. 'backup_url' " "is a required property", res_dict['badRequest']['message']) # test with no backup_url and backup_url req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) body = {'backup-record': {}} req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual( "Invalid input for field/attribute backup-record. " "Value: {}. 'backup_service' is a required property", res_dict['badRequest']['message']) def test_import_record_with_no_body(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) req = webob.Request.blank('/v3/%s/backups/import_record' % fake.PROJECT_ID) req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['content-type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=ctx)) res_dict = jsonutils.loads(res.body) # verify that request is successful self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual("None is not of type 'object'", res_dict['badRequest']['message']) @mock.patch('cinder.backup.rpcapi.BackupAPI.check_support_to_force_delete', return_value=False) def test_force_delete_with_not_supported_operation(self, mock_check_support): backup = utils.create_backup(self.context, status=fields.BackupStatus.AVAILABLE) self.assertRaises(exception.NotSupportedOperation, self.backup_api.delete, self.context, backup, True) @ddt.data(False, True) def test_show_incremental_backup(self, backup_from_snapshot): volume = utils.create_volume(self.context, size=5) parent_backup = utils.create_backup( self.context, volume.id, status=fields.BackupStatus.AVAILABLE, num_dependent_backups=1) backup = utils.create_backup(self.context, volume.id, status=fields.BackupStatus.AVAILABLE, incremental=True, parent_id=parent_backup.id, num_dependent_backups=1) snapshot = None snapshot_id = None if backup_from_snapshot: snapshot = utils.create_snapshot(self.context, volume.id) snapshot_id = snapshot.id child_backup = utils.create_backup( self.context, volume.id, status=fields.BackupStatus.AVAILABLE, incremental=True, parent_id=backup.id, snapshot_id=snapshot_id) req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, backup.id)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertTrue(res_dict['backup']['is_incremental']) self.assertTrue(res_dict['backup']['has_dependent_backups']) self.assertIsNone(res_dict['backup']['snapshot_id']) req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, parent_backup.id)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertFalse(res_dict['backup']['is_incremental']) self.assertTrue(res_dict['backup']['has_dependent_backups']) self.assertIsNone(res_dict['backup']['snapshot_id']) req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, child_backup.id)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertTrue(res_dict['backup']['is_incremental']) self.assertFalse(res_dict['backup']['has_dependent_backups']) self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id']) child_backup.destroy() backup.destroy() parent_backup.destroy() if snapshot: snapshot.destroy() volume.destroy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_capabilities.py0000664000175000017500000001214200000000000024727 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import oslo_messaging from cinder.api.contrib import capabilities from cinder import context from cinder import exception from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test def rpcapi_get_capabilities(self, context, host, discover): capabilities = dict( vendor_name='OpenStack', volume_backend_name='lvm', pool_name='pool', driver_version='2.0.0', storage_protocol='iSCSI', display_name='Capabilities of Cinder LVM driver', description='These are volume type options provided by ' 'Cinder LVM driver, blah, blah.', replication_targets=[], visibility='public', properties=dict( compression=dict( title='Compression', description='Enables compression.', type='boolean'), qos=dict( title='QoS', description='Enables QoS.', type='boolean'), replication=dict( title='Replication', description='Enables replication.', type='boolean'), thin_provisioning=dict( title='Thin Provisioning', description='Sets thin provisioning.', type='boolean'), ) ) return capabilities class CapabilitiesAPITest(test.TestCase): def setUp(self): super(CapabilitiesAPITest, self).setUp() self.flags(host='fake') self.controller = capabilities.CapabilitiesController() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities', rpcapi_get_capabilities) def test_capabilities_summary(self, mock_services): mock_services.return_value = [ {'name': 'fake', 'host': 'fake_host', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] req = fakes.HTTPRequest.blank('/fake/capabilities/fake') req.environ['cinder.context'] = self.ctxt res = self.controller.show(req, 'fake') expected = { 'namespace': 'OS::Storage::Capabilities::fake_host', 'vendor_name': 'OpenStack', 'volume_backend_name': 'lvm', 'pool_name': 'pool', 'driver_version': '2.0.0', 'storage_protocol': 'iSCSI', 'display_name': 'Capabilities of Cinder LVM driver', 'description': 'These are volume type options provided by ' 'Cinder LVM driver, blah, blah.', 'visibility': 'public', 'replication_targets': [], 'properties': { 'compression': { 'title': 'Compression', 'description': 'Enables compression.', 'type': 'boolean'}, 'qos': { 'title': 'QoS', 'description': 'Enables QoS.', 'type': 'boolean'}, 'replication': { 'title': 'Replication', 'description': 'Enables replication.', 'type': 'boolean'}, 'thin_provisioning': { 'title': 'Thin Provisioning', 'description': 'Sets thin provisioning.', 'type': 'boolean'}, } } self.assertDictEqual(expected, res) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities') def test_get_capabilities_rpc_timeout(self, mock_rpc, mock_services): mock_rpc.side_effect = oslo_messaging.MessagingTimeout mock_services.return_value = [ {'name': 'fake', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] req = fakes.HTTPRequest.blank('/fake/capabilities/fake') req.environ['cinder.context'] = self.ctxt self.assertRaises(exception.RPCTimeout, self.controller.show, req, 'fake') @mock.patch('cinder.db.service_get_all') def test_get_capabilities_service_not_found(self, mock_services): mock_services.return_value = [] req = fakes.HTTPRequest.blank('/fake/capabilities/fake') req.environ['cinder.context'] = self.ctxt self.assertRaises(exception.NotFound, self.controller.show, req, 'fake') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_cgsnapshots.py0000664000175000017500000006206100000000000024637 0ustar00zuulzuul00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for cgsnapshot code.""" from http import HTTPStatus from unittest import mock from oslo_serialization import jsonutils import webob from cinder import context from cinder import db from cinder import exception from cinder.group import api as groupAPI from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils import cinder.volume class CgsnapshotsAPITestCase(test.TestCase): """Test Case for cgsnapshots API.""" def setUp(self): super(CgsnapshotsAPITestCase, self).setUp() self.volume_api = cinder.volume.API() self.context = context.get_admin_context() self.context.project_id = fake.PROJECT_ID self.context.user_id = fake.USER_ID self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) def test_show_cgsnapshot(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID,) snapshot_id = utils.create_snapshot( self.context, volume_type_id=vol_type['id'], volume_id=volume_id, group_snapshot_id=cgsnapshot.id)['id'] req = webob.Request.blank('/v3/%s/cgsnapshots/%s' % ( fake.PROJECT_ID, cgsnapshot.id)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual('this is a test group snapshot', res_dict['cgsnapshot']['description']) self.assertEqual('test_group_snapshot', res_dict['cgsnapshot']['name']) self.assertEqual('creating', res_dict['cgsnapshot']['status']) db.snapshot_destroy(context.get_admin_context(), snapshot_id) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_show_cgsnapshot_with_cgsnapshot_NotFound(self): req = webob.Request.blank('/v3/%s/cgsnapshots/%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('GroupSnapshot %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) def test_list_cgsnapshots_json(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] cgsnapshot1 = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID,) cgsnapshot2 = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID,) cgsnapshot3 = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID,) req = webob.Request.blank('/v3/%s/cgsnapshots' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(cgsnapshot3.id, res_dict['cgsnapshots'][0]['id']) self.assertEqual('test_group_snapshot', res_dict['cgsnapshots'][0]['name']) self.assertEqual(cgsnapshot2.id, res_dict['cgsnapshots'][1]['id']) self.assertEqual('test_group_snapshot', res_dict['cgsnapshots'][1]['name']) self.assertEqual(cgsnapshot1.id, res_dict['cgsnapshots'][2]['id']) self.assertEqual('test_group_snapshot', res_dict['cgsnapshots'][2]['name']) cgsnapshot3.destroy() cgsnapshot2.destroy() cgsnapshot1.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_list_cgsnapshots_detail_json(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] cgsnapshot1 = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID,) cgsnapshot2 = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID,) cgsnapshot3 = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID,) req = webob.Request.blank('/v3/%s/cgsnapshots/detail' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual('this is a test group snapshot', res_dict['cgsnapshots'][0]['description']) self.assertEqual('test_group_snapshot', res_dict['cgsnapshots'][0]['name']) self.assertEqual(cgsnapshot3.id, res_dict['cgsnapshots'][0]['id']) self.assertEqual('creating', res_dict['cgsnapshots'][0]['status']) self.assertEqual('this is a test group snapshot', res_dict['cgsnapshots'][1]['description']) self.assertEqual('test_group_snapshot', res_dict['cgsnapshots'][1]['name']) self.assertEqual(cgsnapshot2.id, res_dict['cgsnapshots'][1]['id']) self.assertEqual('creating', res_dict['cgsnapshots'][1]['status']) self.assertEqual('this is a test group snapshot', res_dict['cgsnapshots'][2]['description']) self.assertEqual('test_group_snapshot', res_dict['cgsnapshots'][2]['name']) self.assertEqual(cgsnapshot1.id, res_dict['cgsnapshots'][2]['id']) self.assertEqual('creating', res_dict['cgsnapshots'][2]['status']) cgsnapshot3.destroy() cgsnapshot2.destroy() cgsnapshot1.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_create_cgsnapshot_json(self, mock_validate): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v3/%s/cgsnapshots' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['cgsnapshot']) self.assertTrue(mock_validate.called) cgsnapshot = objects.GroupSnapshot.get_by_id( context.get_admin_context(), res_dict['cgsnapshot']['id']) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_create_cgsnapshot_when_volume_in_error_status(self, mock_validate): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id=consistencygroup.id, status='error')['id'] body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v3/%s/cgsnapshots' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual( "Invalid volume: The snapshot cannot be created when the volume " "is in error status.", res_dict['badRequest']['message'] ) self.assertTrue(mock_validate.called) db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_create_cgsnapshot_with_no_body(self): # omit body from the request req = webob.Request.blank('/v3/%s/cgsnapshots' % fake.PROJECT_ID) req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual("Missing required element 'cgsnapshot' in " "request body.", res_dict['badRequest']['message']) @mock.patch.object(groupAPI.API, 'create_group_snapshot', side_effect=exception.InvalidGroupSnapshot( reason='invalid group_snapshot')) def test_create_with_invalid_cgsnapshot(self, mock_create_cgsnapshot): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id=consistencygroup.id)['id'] body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v3/%s/cgsnapshots' % fake.PROJECT_ID) req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid GroupSnapshot: invalid group_snapshot', res_dict['badRequest']['message']) db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() @mock.patch.object(groupAPI.API, 'create_group_snapshot', side_effect=exception.GroupSnapshotNotFound( group_snapshot_id='invalid_id')) def test_create_with_cgsnapshot_not_found(self, mock_create_cgsnapshot): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id=consistencygroup.id)['id'] body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v3/%s/cgsnapshots' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('GroupSnapshot invalid_id could not be found.', res_dict['itemNotFound']['message']) db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_create_cgsnapshot_from_empty_consistencygroup(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) body = {"cgsnapshot": {"name": "cg1", "description": "CG Snapshot 1", "consistencygroup_id": consistencygroup.id}} req = webob.Request.blank('/v3/%s/cgsnapshots' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) # If failed to create cgsnapshot, its DB object should not be created self.assertListEqual( [], list(objects.GroupSnapshotList.get_all(self.context))) consistencygroup.destroy() def test_delete_cgsnapshot_available(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID, status='available') req = webob.Request.blank('/v3/%s/cgsnapshots/%s' % (fake.PROJECT_ID, cgsnapshot.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) cgsnapshot = objects.GroupSnapshot.get_by_id(self.context, cgsnapshot.id) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual('deleting', cgsnapshot.status) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() def test_delete_cgsnapshot_available_used_as_source(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID, status='available') cg2 = utils.create_consistencygroup( self.context, status='creating', group_snapshot_id=cgsnapshot.id, group_type_id=fake.GROUP_TYPE_ID) req = webob.Request.blank('/v3/fake/cgsnapshots/%s' % cgsnapshot.id) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) cgsnapshot = objects.GroupSnapshot.get_by_id(self.context, cgsnapshot.id) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual('available', cgsnapshot.status) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() cg2.destroy() def test_delete_cgsnapshot_with_cgsnapshot_NotFound(self): req = webob.Request.blank('/v3/%s/cgsnapshots/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('GroupSnapshot %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) def test_delete_cgsnapshot_with_invalid_cgsnapshot(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID, status='invalid') req = webob.Request.blank('/v3/%s/cgsnapshots/%s' % ( fake.PROJECT_ID, cgsnapshot.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() @mock.patch('cinder.group.API.delete_group_snapshot') def test_delete_cgsnapshot_delete_policy_not_auth(self, mock_delete): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']]) volume_id = utils.create_volume(self.context, volume_type_id=vol_type['id'], group_id= consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.context, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID, status='available') mock_delete.side_effect = exception.PolicyNotAuthorized( message='PolicyNotAuthorized') req = webob.Request.blank('/v3/%s/cgsnapshots/%s' % (fake.PROJECT_ID, cgsnapshot.id)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual('PolicyNotAuthorized', res_dict['forbidden']['message']) cgsnapshot.destroy() db.volume_destroy(context.get_admin_context(), volume_id) consistencygroup.destroy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_consistencygroups.py0000664000175000017500000021574100000000000026111 0ustar00zuulzuul00000000000000# Copyright (C) 2012 - 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for consistency group code.""" from http import HTTPStatus from unittest import mock import ddt from oslo_serialization import jsonutils import webob from cinder import context from cinder import db from cinder import exception import cinder.group from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3 import fakes as v3_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.volume import api as volume_api @ddt.ddt class ConsistencyGroupsAPITestCase(test.TestCase): """Test Case for consistency groups API.""" def setUp(self): super(ConsistencyGroupsAPITestCase, self).setUp() self.cg_api = cinder.group.API() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.admin_ctxt = context.get_admin_context() db.volume_type_create(self.admin_ctxt, v3_fakes.fake_default_type_get( fake.VOLUME_TYPE2_ID)) self.vol_type = db.volume_type_get_by_name(self.admin_ctxt, 'vol_type_name') def _create_consistencygroup( self, ctxt=None, name='test_consistencygroup', user_id=fake.USER_ID, project_id=fake.PROJECT_ID, description='this is a test consistency group', group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], availability_zone='az1', host='fakehost', status=fields.ConsistencyGroupStatus.CREATING, **kwargs): """Create a consistency group object.""" ctxt = ctxt or self.ctxt consistencygroup = objects.Group(ctxt) consistencygroup.user_id = user_id consistencygroup.project_id = project_id consistencygroup.availability_zone = availability_zone consistencygroup.name = name consistencygroup.description = description consistencygroup.group_type_id = group_type_id consistencygroup.volume_type_ids = volume_type_ids consistencygroup.host = host consistencygroup.status = status consistencygroup.update(kwargs) consistencygroup.create() return consistencygroup def test_show_consistencygroup(self): vol_type = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type') consistencygroup = self._create_consistencygroup( volume_type_ids=[vol_type['id']]) req = webob.Request.blank('/v3/%s/consistencygroups/%s' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) consistencygroup.destroy() self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual('az1', res_dict['consistencygroup']['availability_zone']) self.assertEqual('this is a test consistency group', res_dict['consistencygroup']['description']) self.assertEqual('test_consistencygroup', res_dict['consistencygroup']['name']) self.assertEqual('creating', res_dict['consistencygroup']['status']) self.assertEqual([vol_type['id']], res_dict['consistencygroup']['volume_types']) def test_show_consistencygroup_with_consistencygroup_NotFound(self): req = webob.Request.blank('/v3/%s/consistencygroups/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Group %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) def test_show_consistencygroup_with_null_volume_type(self): consistencygroup = self._create_consistencygroup(volume_type_id=None) req = webob.Request.blank('/v3/%s/consistencygroups/%s' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual('az1', res_dict['consistencygroup']['availability_zone']) self.assertEqual('this is a test consistency group', res_dict['consistencygroup']['description']) self.assertEqual('test_consistencygroup', res_dict['consistencygroup']['name']) self.assertEqual('creating', res_dict['consistencygroup']['status']) self.assertEqual([], res_dict['consistencygroup']['volume_types']) consistencygroup.destroy() def test_list_consistencygroups_json(self): consistencygroup1 = self._create_consistencygroup() consistencygroup2 = self._create_consistencygroup() consistencygroup3 = self._create_consistencygroup() req = webob.Request.blank('/v3/%(project_id)s/' 'consistencygroups' % {'project_id': fake.PROJECT_ID}) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(consistencygroup3.id, res_dict['consistencygroups'][0]['id']) self.assertEqual('test_consistencygroup', res_dict['consistencygroups'][0]['name']) self.assertEqual(consistencygroup2.id, res_dict['consistencygroups'][1]['id']) self.assertEqual('test_consistencygroup', res_dict['consistencygroups'][1]['name']) self.assertEqual(consistencygroup1.id, res_dict['consistencygroups'][2]['id']) self.assertEqual('test_consistencygroup', res_dict['consistencygroups'][2]['name']) consistencygroup1.destroy() consistencygroup2.destroy() consistencygroup3.destroy() @ddt.data(False, True) def test_list_consistencygroups_with_limit(self, is_detail): consistencygroup1 = self._create_consistencygroup() consistencygroup2 = self._create_consistencygroup() consistencygroup3 = self._create_consistencygroup() url = '/v3/%s/consistencygroups?limit=1' % fake.PROJECT_ID if is_detail: url = '/v3/%s/consistencygroups/detail?limit=1' % fake.PROJECT_ID req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(1, len(res_dict['consistencygroups'])) self.assertEqual(consistencygroup3.id, res_dict['consistencygroups'][0]['id']) next_link = ( 'http://localhost/v3/%s/consistencygroups?limit=' '1&marker=%s' % (fake.PROJECT_ID, res_dict['consistencygroups'][0]['id'])) self.assertEqual(next_link, res_dict['consistencygroup_links'][0]['href']) consistencygroup1.destroy() consistencygroup2.destroy() consistencygroup3.destroy() @ddt.data(False, True) def test_list_consistencygroups_with_offset(self, is_detail): consistencygroup1 = self._create_consistencygroup() consistencygroup2 = self._create_consistencygroup() consistencygroup3 = self._create_consistencygroup() url = '/v3/%s/consistencygroups?offset=1' % fake.PROJECT_ID if is_detail: url = '/v3/%s/consistencygroups/detail?offset=1' % fake.PROJECT_ID req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(2, len(res_dict['consistencygroups'])) self.assertEqual(consistencygroup2.id, res_dict['consistencygroups'][0]['id']) self.assertEqual(consistencygroup1.id, res_dict['consistencygroups'][1]['id']) consistencygroup1.destroy() consistencygroup2.destroy() consistencygroup3.destroy() @ddt.data(False, True) def test_list_consistencygroups_with_offset_out_of_range(self, is_detail): url = ('/v3/%s/consistencygroups?offset=234523423455454' % fake.PROJECT_ID) if is_detail: url = ('/v3/%s/consistencygroups/detail?offset=234523423455454' % fake.PROJECT_ID) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) @ddt.data(False, True) def test_list_consistencygroups_with_limit_and_offset(self, is_detail): consistencygroup1 = self._create_consistencygroup() consistencygroup2 = self._create_consistencygroup() consistencygroup3 = self._create_consistencygroup() url = '/v3/%s/consistencygroups?limit=2&offset=1' % fake.PROJECT_ID if is_detail: url = ('/v3/%s/consistencygroups/detail?limit=2&offset=1' % fake.PROJECT_ID) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(2, len(res_dict['consistencygroups'])) self.assertEqual(consistencygroup2.id, res_dict['consistencygroups'][0]['id']) self.assertEqual(consistencygroup1.id, res_dict['consistencygroups'][1]['id']) consistencygroup1.destroy() consistencygroup2.destroy() consistencygroup3.destroy() @ddt.data(False, True) def test_list_consistencygroups_with_filter(self, is_detail): consistencygroup1 = self._create_consistencygroup() consistencygroup2 = self._create_consistencygroup() common_ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=False) consistencygroup3 = self._create_consistencygroup(ctxt=common_ctxt) url = ('/v3/%s/consistencygroups?' 'all_tenants=True&id=%s') % (fake.PROJECT_ID, consistencygroup3.id) if is_detail: url = ('/v3/%s/consistencygroups/detail?' 'all_tenants=True&id=%s') % (fake.PROJECT_ID, consistencygroup3.id) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(1, len(res_dict['consistencygroups'])) self.assertEqual(consistencygroup3.id, res_dict['consistencygroups'][0]['id']) consistencygroup1.destroy() consistencygroup2.destroy() consistencygroup3.destroy() @ddt.data(False, True) def test_list_consistencygroups_with_project_id(self, is_detail): consistencygroup1 = self._create_consistencygroup() consistencygroup2 = self._create_consistencygroup( name="group", project_id=fake.PROJECT2_ID) url = ('/v3/%s/consistencygroups?' 'all_tenants=True&project_id=%s') % (fake.PROJECT_ID, fake.PROJECT2_ID) if is_detail: url = ('/v3/%s/consistencygroups/detail?' 'all_tenants=True&project_id=%s') % (fake.PROJECT_ID, fake.PROJECT2_ID) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual(1, len(res_dict['consistencygroups'])) self.assertEqual("group", res_dict['consistencygroups'][0]['name']) consistencygroup1.destroy() consistencygroup2.destroy() @ddt.data(False, True) def test_list_consistencygroups_with_sort(self, is_detail): consistencygroup1 = self._create_consistencygroup() consistencygroup2 = self._create_consistencygroup() consistencygroup3 = self._create_consistencygroup() url = '/v3/%s/consistencygroups?sort=id:asc' % fake.PROJECT_ID if is_detail: url = ('/v3/%s/consistencygroups/detail?sort=id:asc' % fake.PROJECT_ID) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) expect_result = [consistencygroup1.id, consistencygroup2.id, consistencygroup3.id] expect_result.sort() self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(3, len(res_dict['consistencygroups'])) self.assertEqual(expect_result[0], res_dict['consistencygroups'][0]['id']) self.assertEqual(expect_result[1], res_dict['consistencygroups'][1]['id']) self.assertEqual(expect_result[2], res_dict['consistencygroups'][2]['id']) consistencygroup1.destroy() consistencygroup2.destroy() consistencygroup3.destroy() def test_list_consistencygroups_detail_json(self): vol_type1 = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type1') vol_type2 = utils.create_volume_type(context.get_admin_context(), self, name='my_vol_type2') consistencygroup1 = self._create_consistencygroup( volume_type_ids=[vol_type1['id']]) consistencygroup2 = self._create_consistencygroup( volume_type_ids=[vol_type1['id']]) consistencygroup3 = self._create_consistencygroup( volume_type_ids=[vol_type1['id'], vol_type2['id']]) req = webob.Request.blank('/v3/%s/consistencygroups/detail' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) cg_ids = [consistencygroup1.id, consistencygroup2.id, consistencygroup3.id] vol_type_ids = [vol_type1['id'], vol_type2['id']] consistencygroup1.destroy() consistencygroup2.destroy() consistencygroup3.destroy() self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual('az1', res_dict['consistencygroups'][0]['availability_zone']) self.assertEqual('this is a test consistency group', res_dict['consistencygroups'][0]['description']) self.assertEqual('test_consistencygroup', res_dict['consistencygroups'][0]['name']) self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids) self.assertEqual('creating', res_dict['consistencygroups'][0]['status']) for vol_type_id in res_dict['consistencygroups'][0]['volume_types']: self.assertIn(vol_type_id, vol_type_ids) self.assertEqual('az1', res_dict['consistencygroups'][1]['availability_zone']) self.assertEqual('this is a test consistency group', res_dict['consistencygroups'][1]['description']) self.assertEqual('test_consistencygroup', res_dict['consistencygroups'][1]['name']) self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids) self.assertEqual('creating', res_dict['consistencygroups'][1]['status']) for vol_type_id in res_dict['consistencygroups'][1]['volume_types']: self.assertIn(vol_type_id, vol_type_ids) self.assertEqual('az1', res_dict['consistencygroups'][2]['availability_zone']) self.assertEqual('this is a test consistency group', res_dict['consistencygroups'][2]['description']) self.assertEqual('test_consistencygroup', res_dict['consistencygroups'][2]['name']) self.assertIn(res_dict['consistencygroups'][0]['id'], cg_ids) self.assertEqual('creating', res_dict['consistencygroups'][2]['status']) for vol_type_id in res_dict['consistencygroups'][2]['volume_types']: self.assertIn(vol_type_id, vol_type_ids) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_create_consistencygroup_json(self, mock_validate): group_id = fake.CONSISTENCY_GROUP_ID # Create volume type vol_type = 'test' vol_type_id = db.volume_type_create( self.ctxt, {'name': vol_type, 'extra_specs': {}})['id'] body = {"consistencygroup": {"name": "cg1", "volume_types": vol_type_id, "description": "Consistency Group 1", }} req = webob.Request.blank('/v3/%s/consistencygroups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['consistencygroup']) self.assertTrue(mock_validate.called) group_id = res_dict['consistencygroup']['id'] cg = objects.Group.get_by_id(self.ctxt, group_id) cg.destroy() def test_create_consistencygroup_with_no_body(self): # omit body from the request req = webob.Request.blank('/v3/%s/consistencygroups' % fake.PROJECT_ID) req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual("Missing required element 'consistencygroup' in " "request body.", res_dict['badRequest']['message']) def test_delete_consistencygroup_available(self): consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.AVAILABLE) req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes({}) res = req.get_response(fakes.wsgi_app()) consistencygroup = objects.Group.get_by_id( self.ctxt, consistencygroup.id) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual('deleting', consistencygroup.status) consistencygroup.destroy() def test_delete_consistencygroup_available_used_as_source_success(self): consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.AVAILABLE) req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, consistencygroup.id)) # The other CG used the first CG as source, but it's no longer in # creating status, so we should be able to delete it. cg2 = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.AVAILABLE, source_cgid=consistencygroup.id) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes({}) res = req.get_response(fakes.wsgi_app()) consistencygroup = objects.Group.get_by_id( self.ctxt, consistencygroup.id) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual('deleting', consistencygroup.status) consistencygroup.destroy() cg2.destroy() def test_delete_consistencygroup_available_no_force(self): consistencygroup = self._create_consistencygroup(status='available') req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"force": False}} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) consistencygroup = objects.Group.get_by_id( self.ctxt, consistencygroup.id) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(fields.ConsistencyGroupStatus.DELETING, consistencygroup.status) consistencygroup.destroy() def test_delete_consistencygroup_with_consistencygroup_NotFound(self): req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(None) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Group %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) def test_delete_consistencygroup_with_invalid_consistencygroup(self): consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.CREATING) self._assert_deleting_result_400(consistencygroup.id) consistencygroup.destroy() def test_delete_consistencygroup_invalid_force(self): consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.CREATING) req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"force": True}} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) consistencygroup = objects.Group.get_by_id( self.ctxt, consistencygroup.id) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual('deleting', consistencygroup.status) def test_delete_consistencygroup_no_host(self): consistencygroup = self._create_consistencygroup( host=None, status=fields.ConsistencyGroupStatus.ERROR) req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"force": True}} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) cg = objects.Group.get_by_id( context.get_admin_context(read_deleted='yes'), consistencygroup.id) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status) self.assertIsNone(cg.host) @mock.patch('cinder.quota.GROUP_QUOTAS.reserve', return_value='reservations') @mock.patch('cinder.quota.GROUP_QUOTAS.commit') def test_create_delete_consistencygroup_update_quota(self, mock_commit, mock_reserve): name = 'mycg' description = 'consistency group 1' fake_grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'fake_grp_type'} fake_vol_type = {'id': fake.VOLUME_TYPE_ID, 'name': 'fake_vol_type'} self.mock_object(db, 'group_type_get', return_value=fake_grp_type) self.mock_object(db, 'volume_types_get_by_name_or_id', return_value=[fake_vol_type]) self.mock_object(self.cg_api, '_cast_create_group') self.mock_object(self.cg_api, 'update_quota') cg = self.cg_api.create(self.ctxt, name, description, fake.GROUP_TYPE_ID, fake_vol_type['name']) # Verify the quota reservation and commit was called mock_reserve.assert_called_once_with(self.ctxt, project_id=self.ctxt.project_id, groups=1) mock_commit.assert_called_once_with(self.ctxt, 'reservations') self.assertEqual(fields.ConsistencyGroupStatus.CREATING, cg.status) self.assertIsNone(cg.host) cg.status = fields.ConsistencyGroupStatus.ERROR self.cg_api.delete(self.ctxt, cg) self.cg_api.update_quota.assert_called_once_with( self.ctxt, cg, -1, self.ctxt.project_id) cg = objects.Group.get_by_id( context.get_admin_context(read_deleted='yes'), cg.id) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, cg.status) def test_delete_consistencygroup_with_invalid_body(self): consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.AVAILABLE) req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' body = {"invalid_request_element": {"force": False}} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_delete_consistencygroup_with_invalid_force_value_in_body(self): consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.AVAILABLE) req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"force": "abcd"}} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_delete_consistencygroup_with_empty_force_value_in_body(self): consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.AVAILABLE) req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"force": ""}} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def _assert_deleting_result_400(self, cg_id, force=False): req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, cg_id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"force": force}} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) def test_delete_consistencygroup_with_volumes(self): consistencygroup = self._create_consistencygroup(status='available') utils.create_volume(self.ctxt, group_id=consistencygroup.id, testcase_instance=self) self._assert_deleting_result_400(consistencygroup.id) consistencygroup.destroy() def test_delete_consistencygroup_with_cgsnapshot(self): consistencygroup = self._create_consistencygroup(status='available') # If we don't add a volume to the CG the cgsnapshot creation will fail vol = utils.create_volume(self.ctxt, group_id=consistencygroup.id, testcase_instance=self) cg_snap = utils.create_group_snapshot(self.ctxt, consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID) utils.create_snapshot(self.ctxt, volume_id=vol.id, group_snapshot_id=cg_snap.id, testcase_instance=self) self._assert_deleting_result_400(consistencygroup.id) cg_snap.destroy() consistencygroup.destroy() def test_delete_consistencygroup_with_cgsnapshot_force(self): consistencygroup = self._create_consistencygroup(status='available') # If we don't add a volume to the CG the cgsnapshot creation will fail vol = utils.create_volume(self.ctxt, group_id=consistencygroup.id, testcase_instance=self) cg_snap = utils.create_group_snapshot(self.ctxt, consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID) utils.create_snapshot(self.ctxt, volume_id=vol.id, group_snapshot_id=cg_snap.id, testcase_instance=self) self._assert_deleting_result_400(consistencygroup.id, force=True) cg_snap.destroy() consistencygroup.destroy() def test_delete_consistencygroup_force_with_volumes(self): consistencygroup = self._create_consistencygroup(status='available') utils.create_volume(self.ctxt, consistencygroup_id=consistencygroup.id, testcase_instance=self) req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"force": True}} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) consistencygroup = objects.Group.get_by_id( self.ctxt, consistencygroup.id) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual('deleting', consistencygroup.status) consistencygroup.destroy() def test_delete_cg_force_with_volumes_with_deleted_snapshots(self): consistencygroup = self._create_consistencygroup(status='available') vol = utils.create_volume(self.ctxt, testcase_instance=self, consistencygroup_id=consistencygroup.id) utils.create_snapshot(self.ctxt, vol.id, status='deleted', deleted=True, testcase_instance=self) req = webob.Request.blank('/v3/%s/consistencygroups/%s/delete' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"force": True}} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) consistencygroup = objects.Group.get_by_id( self.ctxt, consistencygroup.id) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual('deleting', consistencygroup.status) consistencygroup.destroy() def test_create_consistencygroup_failed_no_volume_type(self): name = 'cg1' body = {"consistencygroup": {"name": name, "description": "Consistency Group 1", }} req = webob.Request.blank('/v3/%s/consistencygroups' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_update_consistencygroup_success(self, mock_validate): volume_type_id = utils.create_volume_type( context.get_admin_context(), self, name='my_vol_type')['id'] fake_grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'fake_grp_type'} self.mock_object(db, 'group_type_get', return_value=fake_grp_type) consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.AVAILABLE, volume_type_ids=[volume_type_id], group_type_id=fake.GROUP_TYPE_ID, host='test_host') # We create another CG from the one we are updating to confirm that # it will not affect the update if it is not CREATING cg2 = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.AVAILABLE, host='test_host', volume_type_ids=[volume_type_id], source_group_id=consistencygroup.id,) remove_volume_id = utils.create_volume( self.ctxt, testcase_instance=self, volume_type_id=volume_type_id, group_id=consistencygroup.id)['id'] remove_volume_id2 = utils.create_volume( self.ctxt, testcase_instance=self, volume_type_id=volume_type_id, group_id=consistencygroup.id, status='error')['id'] remove_volume_id3 = utils.create_volume( self.ctxt, testcase_instance=self, volume_type_id=volume_type_id, group_id=consistencygroup.id, status='error_deleting')['id'] self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, consistencygroup.status) cg_volumes = db.volume_get_all_by_generic_group(self.ctxt.elevated(), consistencygroup.id) cg_vol_ids = [cg_vol['id'] for cg_vol in cg_volumes] self.assertIn(remove_volume_id, cg_vol_ids) self.assertIn(remove_volume_id2, cg_vol_ids) self.assertIn(remove_volume_id3, cg_vol_ids) add_volume_id = utils.create_volume( self.ctxt, testcase_instance=self, volume_type_id=volume_type_id)['id'] add_volume_id2 = utils.create_volume( self.ctxt, testcase_instance=self, volume_type_id=volume_type_id)['id'] req = webob.Request.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' name = 'newcg' description = 'New Consistency Group Description' add_volumes = add_volume_id + "," + add_volume_id2 remove_volumes = ','.join( [remove_volume_id, remove_volume_id2, remove_volume_id3]) body = {"consistencygroup": {"name": name, "description": description, "add_volumes": add_volumes, "remove_volumes": remove_volumes, }} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) consistencygroup = objects.Group.get_by_id( self.ctxt, consistencygroup.id) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertTrue(mock_validate.called) self.assertEqual(fields.ConsistencyGroupStatus.UPDATING, consistencygroup.status) consistencygroup.destroy() cg2.destroy() @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_update_consistencygroup_sourcing_cg(self, mock_validate): volume_type_id = fake.VOLUME_TYPE_ID consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.AVAILABLE, host='test_host') cg2 = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.CREATING, host='test_host', source_cgid=consistencygroup.id) remove_volume_id = utils.create_volume( self.ctxt, volume_type_id=volume_type_id, consistencygroup_id=consistencygroup.id)['id'] remove_volume_id2 = utils.create_volume( self.ctxt, volume_type_id=volume_type_id, consistencygroup_id=consistencygroup.id)['id'] req = webob.Request.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' name = 'newcg' description = 'New Consistency Group Description' remove_volumes = remove_volume_id + "," + remove_volume_id2 body = {"consistencygroup": {"name": name, "description": description, "remove_volumes": remove_volumes, }} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) consistencygroup = objects.Group.get_by_id( self.ctxt, consistencygroup.id) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, consistencygroup.status) consistencygroup.destroy() cg2.destroy() @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_update_consistencygroup_creating_cgsnapshot(self, mock_validate): volume_type_id = fake.VOLUME_TYPE_ID consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.AVAILABLE, host='test_host') # If we don't add a volume to the CG the cgsnapshot creation will fail utils.create_volume(self.ctxt, consistencygroup_id=consistencygroup.id, testcase_instance=self) cgsnapshot = utils.create_cgsnapshot( self.ctxt, consistencygroup_id=consistencygroup.id) add_volume_id = utils.create_volume( self.ctxt, volume_type_id=volume_type_id)['id'] add_volume_id2 = utils.create_volume( self.ctxt, volume_type_id=volume_type_id)['id'] req = webob.Request.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' name = 'newcg' description = 'New Consistency Group Description' add_volumes = add_volume_id + "," + add_volume_id2 body = {"consistencygroup": {"name": name, "description": description, "add_volumes": add_volumes}} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app()) consistencygroup = objects.Group.get_by_id( self.ctxt, consistencygroup.id) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, consistencygroup.status) consistencygroup.destroy() cgsnapshot.destroy() def test_update_consistencygroup_add_volume_not_found(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) req = webob.Request.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"name": None, "description": None, "add_volumes": "fake-volume-uuid", "remove_volumes": None, }} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) consistencygroup.destroy() def test_update_consistencygroup_remove_volume_not_found(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) req = webob.Request.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"name": None, "description": "new description", "add_volumes": None, "remove_volumes": "fake-volume-uuid", }} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) consistencygroup.destroy() def test_update_consistencygroup_empty_parameters(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) req = webob.Request.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"name": "", "description": "", "add_volumes": None, "remove_volumes": None, }} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) consistencygroup.destroy() def test_update_consistencygroup_add_volume_invalid_state(self): volume_type_id = fake.VOLUME_TYPE_ID consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) add_volume_id = utils.create_volume( self.ctxt, volume_type_id=volume_type_id, status='wrong_status')['id'] req = webob.Request.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' add_volumes = add_volume_id body = {"consistencygroup": {"name": "cg1", "description": "", "add_volumes": add_volumes, "remove_volumes": None, }} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) consistencygroup.destroy() def test_update_consistencygroup_add_volume_invalid_volume_type(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) wrong_type = fake.VOLUME_TYPE2_ID add_volume_id = utils.create_volume( self.ctxt, volume_type_id=wrong_type)['id'] req = webob.Request.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' add_volumes = add_volume_id body = {"consistencygroup": {"name": "cg1", "description": "", "add_volumes": add_volumes, "remove_volumes": None, }} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) consistencygroup.destroy() def test_update_consistencygroup_add_volume_already_in_cg(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) add_volume_id = utils.create_volume( self.ctxt, consistencygroup_id=fake.CONSISTENCY_GROUP2_ID)['id'] req = webob.Request.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' add_volumes = add_volume_id body = {"consistencygroup": {"name": "cg1", "description": "", "add_volumes": add_volumes, "remove_volumes": None, }} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) consistencygroup.destroy() def test_update_consistencygroup_invalid_state(self): volume_type_id = utils.create_volume_type( context.get_admin_context(), self, name='my_vol_type')['id'] consistencygroup = self._create_consistencygroup( status=fields.ConsistencyGroupStatus.CREATING, volume_type_ids=[volume_type_id], ctxt=self.ctxt) add_volume_id = utils.create_volume( self.ctxt, testcase_instance=self, volume_type_id=volume_type_id)['id'] req = webob.Request.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.method = 'PUT' req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"name": "new name", "description": None, "add_volumes": add_volume_id, "remove_volumes": None, }} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) consistencygroup.destroy() @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity') def test_create_consistencygroup_from_src_snap(self, mock_validate_host, mock_validate): self.mock_object(volume_api.API, "create", v3_fakes.fake_volume_create) consistencygroup = utils.create_group( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID],) volume_id = utils.create_volume( self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, group_id=consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.ctxt, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID) snapshot = utils.create_snapshot( self.ctxt, volume_id, group_snapshot_id=cgsnapshot.id, status=fields.SnapshotStatus.AVAILABLE) mock_validate_host.return_value = True test_cg_name = 'test cg' body = {"consistencygroup-from-src": {"name": test_cg_name, "description": "Consistency Group 1", "cgsnapshot_id": cgsnapshot.id}} req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['consistencygroup']) self.assertEqual(test_cg_name, res_dict['consistencygroup']['name']) self.assertTrue(mock_validate.called) cg_ref = objects.Group.get_by_id( self.ctxt.elevated(), res_dict['consistencygroup']['id']) cg_ref.destroy() snapshot.destroy() db.volume_destroy(self.ctxt.elevated(), volume_id) consistencygroup.destroy() cgsnapshot.destroy() @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity') def test_create_consistencygroup_from_src_cg(self, mock_validate): self.mock_object(volume_api.API, "create", v3_fakes.fake_volume_create) source_cg = utils.create_group( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[self.vol_type['id']],) volume_id = utils.create_volume( self.ctxt, group_id=source_cg.id)['id'] mock_validate.return_value = True test_cg_name = 'test cg' body = {"consistencygroup-from-src": {"name": test_cg_name, "description": "Consistency Group 1", "source_cgid": source_cg.id}} req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['consistencygroup']) self.assertEqual(test_cg_name, res_dict['consistencygroup']['name']) cg = objects.Group.get_by_id( self.ctxt, res_dict['consistencygroup']['id']) cg.destroy() db.volume_destroy(self.ctxt.elevated(), volume_id) source_cg.destroy() def test_create_consistencygroup_from_src_both_snap_cg(self): self.mock_object(volume_api.API, "create", v3_fakes.fake_volume_create) consistencygroup = utils.create_group( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID],) volume_id = utils.create_volume( self.ctxt, group_id=consistencygroup.id)['id'] cgsnapshot_id = utils.create_group_snapshot( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, group_id=consistencygroup.id)['id'] snapshot = utils.create_snapshot( self.ctxt, volume_id, group_snapshot_id=cgsnapshot_id, status=fields.SnapshotStatus.AVAILABLE) test_cg_name = 'test cg' body = {"consistencygroup-from-src": {"name": test_cg_name, "description": "Consistency Group 1", "cgsnapshot_id": cgsnapshot_id, "source_cgid": consistencygroup.id}} req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) snapshot.destroy() db.cgsnapshot_destroy(self.ctxt.elevated(), cgsnapshot_id) db.volume_destroy(self.ctxt.elevated(), volume_id) consistencygroup.destroy() def test_create_consistencygroup_from_src_invalid_body(self): name = 'cg1' body = {"invalid": {"name": name, "description": "Consistency Group 1", }} req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) # Missing 'consistencygroup-from-src' in the body. self.assertIsNotNone(res_dict['badRequest']['message']) def test_create_consistencygroup_from_src_no_source_id(self): name = 'cg1' body = {"consistencygroup-from-src": {"name": name, "description": "Consistency Group 1", }} req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) def test_create_consistencygroup_from_src_no_host(self): consistencygroup = utils.create_group( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], host=None) volume_id = utils.create_volume( self.ctxt, group_id=consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.ctxt, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID,) snapshot = utils.create_snapshot( self.ctxt, volume_id, group_snapshot_id=cgsnapshot.id, status=fields.SnapshotStatus.AVAILABLE) test_cg_name = 'test cg' body = {"consistencygroup-from-src": {"name": test_cg_name, "description": "Consistency Group 1", "cgsnapshot_id": cgsnapshot.id}} req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) msg = _('Invalid Group: No valid host to create group') self.assertIn(msg, res_dict['badRequest']['message']) snapshot.destroy() db.volume_destroy(self.ctxt.elevated(), volume_id) consistencygroup.destroy() cgsnapshot.destroy() @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity') def test_create_consistencygroup_from_src_cgsnapshot_empty(self, mock_validate): consistencygroup = utils.create_group( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID],) volume_id = utils.create_volume( self.ctxt, group_id=consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.ctxt, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID,) mock_validate.return_value = True test_cg_name = 'test cg' body = {"consistencygroup-from-src": {"name": test_cg_name, "description": "Consistency Group 1", "cgsnapshot_id": cgsnapshot.id}} req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) db.volume_destroy(self.ctxt.elevated(), volume_id) consistencygroup.destroy() cgsnapshot.destroy() @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity') def test_create_consistencygroup_from_src_source_cg_empty(self, mock_validate): source_cg = utils.create_group( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID],) mock_validate.return_value = True test_cg_name = 'test cg' body = {"consistencygroup-from-src": {"name": test_cg_name, "description": "Consistency Group 1", "source_cgid": source_cg.id}} req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) source_cg.destroy() def test_create_consistencygroup_from_src_cgsnapshot_notfound(self): consistencygroup = utils.create_group( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID],) volume_id = utils.create_volume( self.ctxt, group_id=consistencygroup.id)['id'] test_cg_name = 'test cg' body = { "consistencygroup-from-src": { "name": test_cg_name, "description": "Consistency Group 1", "source_cgid": fake.CGSNAPSHOT_ID } } req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertIsNotNone(res_dict['itemNotFound']['message']) db.volume_destroy(self.ctxt.elevated(), volume_id) consistencygroup.destroy() def test_create_consistencygroup_from_src_source_cg_notfound(self): test_cg_name = 'test cg' body = { "consistencygroup-from-src": { "name": test_cg_name, "description": "Consistency Group 1", "source_cgid": fake.CONSISTENCY_GROUP_ID } } req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertIsNotNone(res_dict['itemNotFound']['message']) @mock.patch.object(volume_api.API, 'create', side_effect=exception.CinderException( 'Create volume failed.')) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity') def test_create_consistencygroup_from_src_cgsnapshot_create_volume_failed( self, mock_validate, mock_create): consistencygroup = utils.create_group( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID],) volume_id = utils.create_volume( self.ctxt, group_id=consistencygroup.id)['id'] cgsnapshot = utils.create_group_snapshot( self.ctxt, group_id=consistencygroup.id, group_type_id=fake.GROUP_TYPE_ID,) snapshot = utils.create_snapshot( self.ctxt, volume_id, group_snapshot_id=cgsnapshot.id, status=fields.SnapshotStatus.AVAILABLE) mock_validate.return_value = True test_cg_name = 'test cg' body = {"consistencygroup-from-src": {"name": test_cg_name, "description": "Consistency Group 1", "cgsnapshot_id": cgsnapshot.id}} req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) msg = _("Create volume failed.") self.assertEqual(msg, res_dict['badRequest']['message']) snapshot.destroy() db.volume_destroy(self.ctxt.elevated(), volume_id) consistencygroup.destroy() cgsnapshot.destroy() @mock.patch.object(volume_api.API, 'create', side_effect=exception.CinderException( 'Create volume failed.')) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity') @mock.patch('cinder.db.sqlalchemy.api.volume_type_get') def test_create_consistencygroup_from_src_cg_create_volume_failed( self, mock_validate, mock_create, mock_vol_type_get): source_cg = utils.create_group( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID],) volume_id = utils.create_volume( self.ctxt, group_id=source_cg.id, volume_type_id=fake.VOLUME_TYPE_ID)['id'] mock_validate.return_value = True test_cg_name = 'test cg' body = {"consistencygroup-from-src": {"name": test_cg_name, "description": "Consistency Group 1", "source_cgid": source_cg.id}} req = webob.Request.blank('/v3/%s/consistencygroups/create_from_src' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertIsNotNone(res_dict['badRequest']['message']) db.volume_destroy(self.ctxt.elevated(), volume_id) source_cg.destroy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_extended_snapshot_attributes.py0000664000175000017500000001140100000000000030260 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock from oslo_serialization import jsonutils import webob from cinder import context from cinder.objects import fields from cinder.policies import snapshots as snap_policy from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test UUID1 = fake.SNAPSHOT_ID UUID2 = fake.SNAPSHOT2_ID def _get_default_snapshot_param(): return {'id': UUID1, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', 'project_id': fake.PROJECT_ID, 'progress': '0%', 'expected_attrs': ['metadata']} def fake_snapshot_get(self, context, snapshot_id): param = _get_default_snapshot_param() return param def fake_snapshot_get_all(self, context, search_opts=None): param = _get_default_snapshot_param() return [param] class ExtendedSnapshotAttributesTest(test.TestCase): content_type = 'application/json' prefix = 'os-extended-snapshot-attributes:' def setUp(self): super(ExtendedSnapshotAttributesTest, self).setUp() self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) def _make_request(self, url): req = webob.Request.blank(url) req.headers['Accept'] = self.content_type res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) return res def _get_snapshot(self, body): return jsonutils.loads(body).get('snapshot') def _get_snapshots(self, body): return jsonutils.loads(body).get('snapshots') def assertSnapshotAttributes(self, snapshot, project_id, progress): self.assertEqual(project_id, snapshot.get('%sproject_id' % self.prefix)) self.assertEqual(progress, snapshot.get('%sprogress' % self.prefix)) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') @mock.patch('cinder.context.RequestContext.authorize') def test_show(self, mock_authorize, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, auth_token=True) snapshot = _get_default_snapshot_param() snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) mock_authorize.return_value = True snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj url = '/v3/%s/snapshots/%s' % (fake.PROJECT_ID, UUID1) res = self._make_request(url) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertSnapshotAttributes(self._get_snapshot(res.body), project_id=fake.PROJECT_ID, progress='0%') calls = [mock.call(snap_policy.GET_POLICY, target_obj=snapshot_obj), mock.call(snap_policy.EXTEND_ATTRIBUTE, fatal=False)] mock_authorize.assert_has_calls(calls) @mock.patch('cinder.context.RequestContext.authorize') def test_detail(self, mock_authorize): url = '/v3/%s/snapshots/detail' % fake.PROJECT_ID res = self._make_request(url) mock_authorize.return_value = False self.assertEqual(HTTPStatus.OK, res.status_int) for snapshot in self._get_snapshots(res.body): self.assertSnapshotAttributes(snapshot, project_id=fake.PROJECT_ID, progress='0%') calls = [mock.call(snap_policy.GET_ALL_POLICY), mock.call( snap_policy.EXTEND_ATTRIBUTE, fatal=False)] mock_authorize.assert_has_calls(calls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_hosts.py0000664000175000017500000002235300000000000023443 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import iso8601 from oslo_utils import timeutils import webob.exc from cinder.api.contrib import hosts as os_hosts from cinder.common import constants from cinder import context from cinder import exception from cinder.objects import service from cinder.tests.unit import fake_constants from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099) curr_time = datetime.datetime(2013, 7, 3, 0, 0, 1) SERVICE_LIST = [ {'created_at': created_time, 'updated_at': curr_time, 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, 'availability_zone': 'cinder', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}, {'created_at': created_time, 'updated_at': curr_time, 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, 'availability_zone': 'cinder', 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}, {'created_at': created_time, 'updated_at': curr_time, 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, 'availability_zone': 'cinder', 'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'}, {'created_at': created_time, 'updated_at': curr_time, 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, 'availability_zone': 'cinder', 'uuid': '18417850-2ca9-43d1-9619-ae16bfb0f655'}, {'created_at': created_time, 'updated_at': None, 'host': 'test.host.1', 'topic': 'cinder-volume', 'disabled': 0, 'availability_zone': 'cinder', 'uuid': 'f838f35c-4035-464f-9792-ce60e390c13d'}, ] LIST_RESPONSE = [{'service-status': 'available', 'service': 'cinder-volume', 'zone': 'cinder', 'service-state': 'enabled', 'host_name': 'test.host.1', 'last-update': curr_time, }, {'service-status': 'available', 'service': 'cinder-volume', 'zone': 'cinder', 'service-state': 'enabled', 'host_name': 'test.host.1', 'last-update': curr_time, }, {'service-status': 'available', 'service': 'cinder-volume', 'zone': 'cinder', 'service-state': 'enabled', 'host_name': 'test.host.1', 'last-update': curr_time, }, {'service-status': 'available', 'service': 'cinder-volume', 'zone': 'cinder', 'service-state': 'enabled', 'host_name': 'test.host.1', 'last-update': curr_time, }, {'service-status': 'unavailable', 'service': 'cinder-volume', 'zone': 'cinder', 'service-state': 'enabled', 'host_name': 'test.host.1', 'last-update': None, }, ] def stub_utcnow(with_timezone=False): tzinfo = iso8601.UTC if with_timezone else None return datetime.datetime(2013, 7, 3, 0, 0, 2, tzinfo=tzinfo) class FakeRequest(object): environ = {'cinder.context': context.get_admin_context()} GET = {} class FakeRequestWithcinderZone(object): environ = {'cinder.context': context.get_admin_context()} GET = {'zone': 'cinder'} class HostTestCase(test.TestCase): """Test Case for hosts.""" def setUp(self): super(HostTestCase, self).setUp() self.controller = os_hosts.HostController() self.req = FakeRequest() self.patch('cinder.db.service_get_all', autospec=True, return_value=SERVICE_LIST) self.mock_object(timeutils, 'utcnow', stub_utcnow) def _test_host_update(self, host, key, val, expected_value): body = {key: val} result = self.controller.update(self.req, host, body=body) self.assertEqual(expected_value, result[key]) def test_list_hosts(self): """Verify that the volume hosts are returned.""" hosts = os_hosts._list_hosts(self.req) self.assertEqual(LIST_RESPONSE, hosts) cinder_hosts = os_hosts._list_hosts(self.req, constants.VOLUME_BINARY) expected = [host for host in LIST_RESPONSE if host['service'] == constants.VOLUME_BINARY] self.assertEqual(expected, cinder_hosts) def test_list_hosts_with_zone(self): req = FakeRequestWithcinderZone() hosts = os_hosts._list_hosts(req) self.assertEqual(LIST_RESPONSE, hosts) def test_bad_status_value(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, 'test.host.1', body={'status': 'bad'}) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, 'test.host.1', body={'status': 'disablabc'}) def test_bad_update_key(self): bad_body = {'crazy': 'bad'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, 'test.host.1', body=bad_body) def test_bad_update_key_and_correct_udpate_key(self): bad_body = {'status': 'disable', 'crazy': 'bad'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, 'test.host.1', body=bad_body) def test_good_udpate_keys(self): body = {'status': 'disable'} self.assertRaises(NotImplementedError, self.controller.update, self.req, 'test.host.1', body=body) def test_bad_host(self): self.assertRaises(exception.HostNotFound, self.controller.update, self.req, 'bogus_host_name', body={'disabled': 0}) @mock.patch.object(service.Service, 'get_by_host_and_topic') def test_show_host(self, mock_get_host): host = 'test_host' test_service = service.Service(id=1, host=host, binary=constants.VOLUME_BINARY, topic=constants.VOLUME_TOPIC) mock_get_host.return_value = test_service ctxt1 = context.RequestContext(project_id=fake_constants.PROJECT_ID, is_admin=True) ctxt2 = context.RequestContext(project_id=fake_constants.PROJECT2_ID, is_admin=True) # Create two volumes with different project. volume1 = test_utils.create_volume(ctxt1, host=host, size=1) test_utils.create_volume(ctxt2, host=host, size=1) # This volume is not on the same host. It should not be counted. test_utils.create_volume(ctxt2, host='fake_host', size=1) test_utils.create_snapshot(ctxt1, volume_id=volume1.id) resp = self.controller.show(self.req, host) host_resp = resp['host'] # There are 3 resource list: total, project1, project2 self.assertEqual(3, len(host_resp)) expected = [ { "resource": { "volume_count": "2", "total_volume_gb": "2", "host": "test_host", "total_snapshot_gb": "1", "project": "(total)", "snapshot_count": "1"} }, { "resource": { "volume_count": "1", "total_volume_gb": "1", "host": "test_host", "project": fake_constants.PROJECT2_ID, "total_snapshot_gb": "0", "snapshot_count": "0"} }, { "resource": { "volume_count": "1", "total_volume_gb": "1", "host": "test_host", "total_snapshot_gb": "1", "project": fake_constants.PROJECT_ID, "snapshot_count": "1"} } ] self.assertListEqual(expected, sorted( host_resp, key=lambda h: h['resource']['project'])) def test_show_forbidden(self): self.req.environ['cinder.context'].is_admin = False dest = 'dummydest' self.assertRaises(exception.PolicyNotAuthorized, self.controller.show, self.req, dest) self.req.environ['cinder.context'].is_admin = True def test_show_host_not_exist(self): """A host given as an argument does not exists.""" self.req.environ['cinder.context'].is_admin = True dest = 'dummydest' self.assertRaises(exception.ServiceNotFound, self.controller.show, self.req, dest) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_qos_specs_manage.py0000664000175000017500000011175000000000000025612 0ustar00zuulzuul00000000000000# Copyright 2013 eBay Inc. # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import ddt import webob from cinder.api.contrib import qos_specs_manage from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test def stub_qos_specs(id): res = dict(name='qos_specs_' + str(id)) res.update(dict(consumer='back-end')) res.update(dict(id=str(id))) specs = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} res.update(dict(specs=specs)) res.update(dict(created_at='2017-12-13T02:37:54Z')) res.update(dict(updated_at='2017-12-13T02:38:58Z')) return objects.QualityOfServiceSpecs(**res) def stub_qos_associates(id): return [{ 'association_type': 'volume_type', 'name': 'FakeVolTypeName', 'id': fake.VOLUME_TYPE_ID}] def return_qos_specs_get_all(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): return [ stub_qos_specs(fake.QOS_SPEC_ID), stub_qos_specs(fake.QOS_SPEC2_ID), stub_qos_specs(fake.QOS_SPEC3_ID), ] def return_qos_specs_get_qos_specs(context, id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) return stub_qos_specs(id) def return_qos_specs_delete(context, id, force): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) elif id == fake.IN_USE_ID: raise exception.QoSSpecsInUse(specs_id=id) pass def return_qos_specs_delete_keys(context, id, keys): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) if 'foo' in keys: raise exception.QoSSpecsKeyNotFound(specs_id=id, specs_key='foo') def return_qos_specs_update(context, id, specs): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) elif id == fake.INVALID_ID: raise exception.InvalidQoSSpecs(reason=id) elif id == fake.UPDATE_FAILED_ID: raise exception.QoSSpecsUpdateFailed(specs_id=id, qos_specs=specs) pass def return_qos_specs_create(context, name, specs): if name == 'qos_spec_%s' % fake.ALREADY_EXISTS_ID: raise exception.QoSSpecsExists(specs_id=name) elif name == 'qos_spec_%s' % fake.ACTION_FAILED_ID: raise exception.QoSSpecsCreateFailed(name=id, qos_specs=specs) elif name == 'qos_spec_%s' % fake.INVALID_ID: raise exception.InvalidQoSSpecs(reason=name) return objects.QualityOfServiceSpecs(name=name, specs=specs, created_at='2017-12-13T02:37:54Z', updated_at='2017-12-13T02:38:58Z', consumer='back-end', id=fake.QOS_SPEC_ID) def return_get_qos_associations(context, id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) elif id == fake.RAISE_ID: raise exception.CinderException() return stub_qos_associates(id) def return_associate_qos_specs(context, id, type_id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) elif id == fake.ACTION_FAILED_ID: raise exception.QoSSpecsAssociateFailed(specs_id=id, type_id=type_id) elif id == fake.ACTION2_FAILED_ID: raise exception.QoSSpecsDisassociateFailed(specs_id=id, type_id=type_id) if type_id == fake.WILL_NOT_BE_FOUND_ID: raise exception.VolumeTypeNotFound( volume_type_id=type_id) pass def return_disassociate_all(context, id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.QoSSpecsNotFound(specs_id=id) elif id == fake.ACTION2_FAILED_ID: raise exception.QoSSpecsDisassociateFailed(specs_id=id, type_id=None) @ddt.ddt class QoSSpecManageApiTest(test.TestCase): def _create_qos_specs(self, name, values=None): """Create a transfer object.""" if values: specs = dict(name=name, qos_specs=values) else: specs = {'name': name, 'consumer': 'back-end', 'specs': { 'key1': 'value1', 'key2': 'value2'}} return db.qos_specs_create(self.ctxt, specs)['id'] def setUp(self): super(QoSSpecManageApiTest, self).setUp() self.flags(host='fake') self.controller = qos_specs_manage.QoSSpecsController() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=True) self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.qos_id1 = self._create_qos_specs("Qos_test_1") self.qos_id2 = self._create_qos_specs("Qos_test_2") self.qos_id3 = self._create_qos_specs("Qos_test_3") self.qos_id4 = self._create_qos_specs("Qos_test_4") @mock.patch('cinder.volume.qos_specs.get_all_specs', side_effect=return_qos_specs_get_all) def test_index(self, mock_get_all_specs): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) res = self.controller.index(req) self.assertEqual(3, len(res['qos_specs'])) names = set() for item in res['qos_specs']: self.assertEqual('value1', item['specs']['key1']) names.add(item['name']) expected_names = ['qos_specs_%s' % fake.QOS_SPEC_ID, 'qos_specs_%s' % fake.QOS_SPEC2_ID, 'qos_specs_%s' % fake.QOS_SPEC3_ID] self.assertEqual(set(expected_names), names) def test_index_with_limit(self): url = '/v3/%s/qos-specs?limit=2' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(2, len(res['qos_specs'])) self.assertEqual(self.qos_id4, res['qos_specs'][0]['id']) self.assertEqual(self.qos_id3, res['qos_specs'][1]['id']) expect_next_link = ('http://localhost/v3/%s/qos-specs?limit' '=2&marker=%s') % ( fake.PROJECT_ID, res['qos_specs'][1]['id']) self.assertEqual(expect_next_link, res['qos_specs_links'][0]['href']) def test_index_with_offset(self): url = '/v3/%s/qos-specs?offset=1' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(3, len(res['qos_specs'])) def test_index_with_offset_out_of_range(self): url = '/v3/%s/qos-specs?offset=356576877698707' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_index_with_limit_and_offset(self): url = '/v3/%s/qos-specs?limit=2&offset=1' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(2, len(res['qos_specs'])) self.assertEqual(self.qos_id3, res['qos_specs'][0]['id']) self.assertEqual(self.qos_id2, res['qos_specs'][1]['id']) def test_index_with_marker(self): url = '/v3/%s/qos-specs?marker=%s' % (fake.PROJECT_ID, self.qos_id4) req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(3, len(res['qos_specs'])) def test_index_with_filter(self): url = '/v3/%s/qos-specs?id=%s' % (fake.PROJECT_ID, self.qos_id4) req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(1, len(res['qos_specs'])) self.assertEqual(self.qos_id4, res['qos_specs'][0]['id']) def test_index_with_sort_keys(self): url = '/v3/%s/qos-specs?sort=id' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(4, len(res['qos_specs'])) expect_result = [self.qos_id1, self.qos_id2, self.qos_id3, self.qos_id4] expect_result.sort(reverse=True) self.assertEqual(expect_result[0], res['qos_specs'][0]['id']) self.assertEqual(expect_result[1], res['qos_specs'][1]['id']) self.assertEqual(expect_result[2], res['qos_specs'][2]['id']) self.assertEqual(expect_result[3], res['qos_specs'][3]['id']) def test_index_with_sort_keys_and_sort_dirs(self): url = '/v3/%s/qos-specs?sort=id:asc' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, use_admin_context=True) res = self.controller.index(req) self.assertEqual(4, len(res['qos_specs'])) expect_result = [self.qos_id1, self.qos_id2, self.qos_id3, self.qos_id4] expect_result.sort() self.assertEqual(expect_result[0], res['qos_specs'][0]['id']) self.assertEqual(expect_result[1], res['qos_specs'][1]['id']) self.assertEqual(expect_result[2], res['qos_specs'][2]['id']) self.assertEqual(expect_result[3], res['qos_specs'][3]['id']) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete(self, mock_qos_delete, mock_qos_get_specs): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) self.controller.delete(req, fake.QOS_SPEC_ID) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete_not_found(self, mock_qos_delete, mock_qos_get_specs): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), use_admin_context=True) self.assertRaises(exception.QoSSpecsNotFound, self.controller.delete, req, fake.WILL_NOT_BE_FOUND_ID) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete_inuse(self, mock_qos_delete, mock_qos_get_specs): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % ( fake.PROJECT_ID, fake.IN_USE_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, fake.IN_USE_ID) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete', side_effect=return_qos_specs_delete) def test_qos_specs_delete_inuse_force(self, mock_qos_delete, mock_qos_get_specs): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s?force=True' % (fake.PROJECT_ID, fake.IN_USE_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.delete, req, fake.IN_USE_ID) self.assertEqual(1, self.notifier.get_notification_count()) def test_qos_specs_delete_with_invalid_force(self): invalid_force = "invalid_bool" req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/delete_keys?force=%s' % (fake.PROJECT_ID, fake.QOS_SPEC_ID, invalid_force), use_admin_context=True) self.assertRaises(exception.InvalidParameterValue, self.controller.delete, req, fake.QOS_SPEC_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys(self, mock_qos_delete_keys, mock_get_qos): body = {"keys": ['bar', 'zoo']} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s/delete_keys' % (fake.PROJECT_ID, fake.IN_USE_ID), use_admin_context=True) self.controller.delete_keys(req, fake.IN_USE_ID, body=body) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys_qos_notfound(self, mock_qos_specs_delete): body = {"keys": ['bar', 'zoo']} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s/delete_keys' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), use_admin_context=True) self.assertRaises(exception.QoSSpecsNotFound, self.controller.delete_keys, req, fake.WILL_NOT_BE_FOUND_ID, body=body) self.assertEqual(1, self.notifier.get_notification_count()) def test_qos_specs_delete_keys_invalid_key(self): body = {"keys": ['', None]} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s/delete_keys' % (fake.PROJECT_ID, fake.IN_USE_ID), use_admin_context=True) self.assertRaises(exception.ValidationError, self.controller.delete_keys, req, fake.IN_USE_ID, body=body) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) def test_qos_specs_delete_keys_badkey(self, mock_qos_specs_delete, mock_get_qos): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s/delete_keys' % (fake.PROJECT_ID, fake.IN_USE_ID), use_admin_context=True) body = {"keys": ['foo', 'zoo']} self.assertRaises(exception.QoSSpecsKeyNotFound, self.controller.delete_keys, req, fake.IN_USE_ID, body=body) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.delete_keys', side_effect=return_qos_specs_delete_keys) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) def test_qos_specs_delete_keys_get_notifier(self, mock_get_qos_specs, mock_qos_delete_keys): body = {"keys": ['bar', 'zoo']} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s/delete_keys' % (fake.PROJECT_ID, fake.IN_USE_ID), use_admin_context=True) self.controller.delete_keys(req, fake.IN_USE_ID, body=body) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) def test_create(self, mock_qos_spec_create): body = {"qos_specs": {"name": "qos_specs_%s" % fake.QOS_SPEC_ID, "key1": "value1"}} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) res_dict = self.controller.create(req, body=body) self.assertEqual(1, self.notifier.get_notification_count()) self.assertEqual('qos_specs_%s' % fake.QOS_SPEC_ID, res_dict['qos_specs']['name']) @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) def test_create_invalid_input(self, mock_qos_get_specs): body = {"qos_specs": {"name": 'qos_spec_%s' % fake.INVALID_ID, "consumer": "invalid_consumer"}} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) def test_create_conflict(self, mock_qos_spec_create): body = {"qos_specs": {"name": 'qos_spec_%s' % fake.ALREADY_EXISTS_ID, "key1": "value1"}} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) self.assertRaises(webob.exc.HTTPConflict, self.controller.create, req, body=body) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.create', side_effect=return_qos_specs_create) def test_create_failed(self, mock_qos_spec_create): body = {"qos_specs": {"name": 'qos_spec_%s' % fake.ACTION_FAILED_ID, "key1": "value1"}} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.create, req, body=body) self.assertEqual(1, self.notifier.get_notification_count()) @ddt.data({'foo': {'a': 'b'}}, {'qos_specs': {'a': 'b'}}, {'qos_specs': 'string'}, None) def test_create_invalid_body_bad_request(self, body): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) req.method = 'POST' self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @ddt.data({'name': 'fake_name', 'a' * 256: 'a'}, {'name': 'fake_name', 'a': 'a' * 256}, {'name': 'fake_name', '': 'a'}) def test_create_qos_with_invalid_specs(self, value): body = {'qos_specs': value} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) req.method = 'POST' self.assertRaises(exception.InvalidInput, self.controller.create, req, body=body) @ddt.data(({'name': None}, exception.ValidationError), ({'name': ''}, exception.ValidationError), ({'name': ' '}, exception.ValidationError), ({'name': 'n' * 256}, exception.ValidationError)) @ddt.unpack def test_create_qos_with_invalid_spec_name(self, value, exception_class): body = {'qos_specs': value} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=True) req.method = 'POST' self.assertRaises(exception_class, self.controller.create, req, body=body) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) def test_update(self, mock_get_qos, mock_qos_update): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} res = self.controller.update(req, fake.QOS_SPEC_ID, body=body) self.assertDictEqual(body, res) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) def test_update_not_found(self, mock_get_qos_specs, mock_qos_update): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), use_admin_context=True) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} self.assertRaises(exception.QoSSpecsNotFound, self.controller.update, req, fake.WILL_NOT_BE_FOUND_ID, body=body) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) def test_update_invalid_input(self, mock_qos_update, mock_get_qos): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.INVALID_ID), use_admin_context=True) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} self.assertRaises(exception.InvalidQoSSpecs, self.controller.update, req, fake.INVALID_ID, body=body) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @ddt.data({'qos_specs': {'key1': ['value1']}}, {'qos_specs': {1: 'value1'}} ) def test_update_non_string_key_or_value(self, body, mock_get_qos): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.UUID1), use_admin_context=True) self.assertRaises(exception.ValidationError, self.controller.update, req, fake.UUID1, body=body) self.assertEqual(0, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.update', side_effect=return_qos_specs_update) def test_update_failed(self, mock_qos_update, mock_get_qos): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.UPDATE_FAILED_ID), use_admin_context=True) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.update, req, fake.UPDATE_FAILED_ID, body=body) self.assertEqual(1, self.notifier.get_notification_count()) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) def test_show(self, mock_get_qos_specs): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) res_dict = self.controller.show(req, fake.QOS_SPEC_ID) self.assertEqual(fake.QOS_SPEC_ID, res_dict['qos_specs']['id']) self.assertEqual('qos_specs_%s' % fake.QOS_SPEC_ID, res_dict['qos_specs']['name']) @mock.patch('cinder.volume.qos_specs.get_associations', side_effect=return_get_qos_associations) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) def test_get_associations(self, mock_get_qos, mock_get_assciations): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/associations' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) res = self.controller.associations(req, fake.QOS_SPEC_ID) self.assertEqual('FakeVolTypeName', res['qos_associations'][0]['name']) self.assertEqual(fake.VOLUME_TYPE_ID, res['qos_associations'][0]['id']) @mock.patch('cinder.volume.qos_specs.get_associations', side_effect=return_get_qos_associations) def test_get_associations_not_found(self, mock_get_assciations): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/associations' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), use_admin_context=True) self.assertRaises(exception.QoSSpecsNotFound, self.controller.associations, req, fake.WILL_NOT_BE_FOUND_ID) @mock.patch('cinder.volume.qos_specs.get_associations', side_effect=return_get_qos_associations) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) def test_get_associations_failed(self, mock_get_qos, mock_get_associations): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/associations' % ( fake.PROJECT_ID, fake.RAISE_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.associations, req, fake.RAISE_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/associate?vol_type_id=%s' % (fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.VOLUME_TYPE_ID), use_admin_context=True) res = self.controller.associate(req, fake.QOS_SPEC_ID) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate_no_type(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s/associate' % (fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.associate, req, fake.QOS_SPEC_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate_not_found(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/associate?vol_type_id=%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID, fake.VOLUME_TYPE_ID), use_admin_context=True) self.assertRaises(exception.QoSSpecsNotFound, self.controller.associate, req, fake.WILL_NOT_BE_FOUND_ID) req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/associate?vol_type_id=%s' % (fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.WILL_NOT_BE_FOUND_ID), use_admin_context=True) self.assertRaises(exception.VolumeTypeNotFound, self.controller.associate, req, fake.QOS_SPEC_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.associate_qos_with_type', side_effect=return_associate_qos_specs) def test_associate_fail(self, mock_associate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/associate?vol_type_id=%s' % (fake.PROJECT_ID, fake.ACTION_FAILED_ID, fake.VOLUME_TYPE_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.associate, req, fake.ACTION_FAILED_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/disassociate?vol_type_id=%s' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID, fake.VOLUME_TYPE_ID), use_admin_context=True) res = self.controller.disassociate(req, fake.QOS_SPEC_ID) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate_no_type(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/disassociate' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.disassociate, req, fake.QOS_SPEC_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate_not_found(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/disassociate?vol_type_id=%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID, fake.VOLUME_TYPE_ID), use_admin_context=True) self.assertRaises(exception.QoSSpecsNotFound, self.controller.disassociate, req, fake.WILL_NOT_BE_FOUND_ID) req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/disassociate?vol_type_id=%s' % (fake.PROJECT_ID, fake.VOLUME_TYPE_ID, fake.WILL_NOT_BE_FOUND_ID), use_admin_context=True) self.assertRaises(exception.VolumeTypeNotFound, self.controller.disassociate, req, fake.VOLUME_TYPE_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_qos_specs', side_effect=return_associate_qos_specs) def test_disassociate_failed(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/disassociate?vol_type_id=%s' % ( fake.PROJECT_ID, fake.ACTION2_FAILED_ID, fake.VOLUME_TYPE_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.disassociate, req, fake.ACTION2_FAILED_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_all', side_effect=return_disassociate_all) def test_disassociate_all(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/disassociate_all' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=True) res = self.controller.disassociate_all(req, fake.QOS_SPEC_ID) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_all', side_effect=return_disassociate_all) def test_disassociate_all_not_found(self, mock_disassociate, mock_get_qos): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/disassociate_all' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), use_admin_context=True) self.assertRaises(exception.QoSSpecsNotFound, self.controller.disassociate_all, req, fake.WILL_NOT_BE_FOUND_ID) @mock.patch('cinder.volume.qos_specs.get_qos_specs', side_effect=return_qos_specs_get_qos_specs) @mock.patch('cinder.volume.qos_specs.disassociate_all', side_effect=return_disassociate_all) def test_disassociate_all_failed(self, mock_disassociate, mock_get): req = fakes.HTTPRequest.blank( '/v3/%s/qos-specs/%s/disassociate_all' % ( fake.PROJECT_ID, fake.ACTION2_FAILED_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.disassociate_all, req, fake.ACTION2_FAILED_ID) def test_index_no_admin_user(self): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.index, req) def test_create_no_admin_user(self): body = {"qos_specs": {"name": "qos_specs_%s" % fake.QOS_SPEC_ID, "key1": "value1"}} req = fakes.HTTPRequest.blank('/v3/%s/qos-specs' % fake.PROJECT_ID, use_admin_context=False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.create, req, body=body) def test_update_no_admin_user(self): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % (fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=False) body = {'qos_specs': {'key1': 'value1', 'key2': 'value2'}} self.assertRaises(exception.PolicyNotAuthorized, self.controller.update, req, fake.QOS_SPEC_ID, body=body) def test_qos_specs_delete_no_admin_user(self): req = fakes.HTTPRequest.blank('/v3/%s/qos-specs/%s' % ( fake.PROJECT_ID, fake.QOS_SPEC_ID), use_admin_context=False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.delete, req, fake.QOS_SPEC_ID) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_quotas.py0000664000175000017500000003154100000000000023616 0ustar00zuulzuul00000000000000# # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for cinder.api.contrib.quotas.py""" from unittest import mock import uuid from oslo_config import cfg from oslo_config import fixture as config_fixture import webob.exc from cinder.api.contrib import quotas from cinder import context from cinder import db from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import test_db_api CONF = cfg.CONF def make_body(root=True, gigabytes=1000, snapshots=10, volumes=10, backups=10, backup_gigabytes=1000, tenant_id=fake.PROJECT_ID, per_volume_gigabytes=-1, groups=10, subproject=False): resources = {'gigabytes': gigabytes, 'snapshots': snapshots, 'volumes': volumes, 'backups': backups, 'backup_gigabytes': backup_gigabytes, 'per_volume_gigabytes': per_volume_gigabytes, 'groups': groups} # need to consider preexisting volume types as well volume_types = db.volume_type_get_all(context.get_admin_context()) for volume_type in volume_types: # default values for subproject are 0 quota = 0 if subproject else -1 resources['gigabytes_' + volume_type] = quota resources['snapshots_' + volume_type] = quota resources['volumes_' + volume_type] = quota if tenant_id: resources['id'] = tenant_id if root: result = {'quota_set': resources} else: result = resources return result def make_subproject_body(root=True, gigabytes=0, snapshots=0, volumes=0, backups=0, backup_gigabytes=0, tenant_id=fake.PROJECT_ID, per_volume_gigabytes=0): return make_body(root=root, gigabytes=gigabytes, snapshots=snapshots, volumes=volumes, backups=backups, backup_gigabytes=backup_gigabytes, tenant_id=tenant_id, per_volume_gigabytes=per_volume_gigabytes, subproject=True) class QuotaSetsControllerTestBase(test.TestCase): class FakeProject(object): def __init__(self, id=fake.PROJECT_ID, parent_id=None, is_admin_project=False): self.id = id self.parent_id = parent_id self.subtree = None self.parents = None self.is_admin_project = is_admin_project def setUp(self): super(QuotaSetsControllerTestBase, self).setUp() self.controller = quotas.QuotaSetsController() self.req = mock.Mock() self.req.environ = {'cinder.context': context.get_admin_context()} self.req.environ['cinder.context'].is_admin = True self.req.params = {} self.req.environ['cinder.context'].project_id = uuid.uuid4().hex get_patcher = mock.patch('cinder.api.api_utils.get_project', self._get_project) get_patcher.start() self.addCleanup(get_patcher.stop) self.auth_url = 'http://localhost:5000' self.fixture = self.useFixture(config_fixture.Config(CONF)) self.fixture.config(auth_url=self.auth_url, group='keystone_authtoken') def _get_project(self, context, id, subtree_as_ids=False, parents_as_ids=False, is_admin_project=False): return self.project_by_id.get(id, self.FakeProject()) def _create_fake_quota_usages(self, usage_map): self._fake_quota_usages = {} for key, val in usage_map.items(): self._fake_quota_usages[key] = {'in_use': val} def _fake_quota_usage_get_all_by_project(self, context, project_id): return {'volumes': self._fake_quota_usages[project_id]} class QuotaSetsControllerTest(QuotaSetsControllerTestBase): def test_defaults(self): result = self.controller.defaults(self.req, fake.PROJECT_ID) self.assertDictEqual(make_body(), result) def test_show(self): result = self.controller.show(self.req, fake.PROJECT_ID) self.assertDictEqual(make_body(), result) def test_show_not_authorized(self): self.req.environ['cinder.context'].is_admin = False self.req.environ['cinder.context'].user_id = fake.USER_ID self.req.environ['cinder.context'].project_id = fake.PROJECT_ID self.req.environ['cinder.context'].roles = ['member', 'reader'] self.assertRaises(exception.PolicyNotAuthorized, self.controller.show, self.req, fake.PROJECT2_ID) def test_show_non_admin_user(self): self.controller._get_quotas = mock.Mock(side_effect= self.controller._get_quotas) result = self.controller.show(self.req, fake.PROJECT_ID) self.assertDictEqual(make_body(), result) self.controller._get_quotas.assert_called_with( self.req.environ['cinder.context'], fake.PROJECT_ID, False) def test_show_with_invalid_usage_param(self): self.req.params = {'usage': 'InvalidBool'} self.assertRaises(exception.InvalidParameterValue, self.controller.show, self.req, fake.PROJECT2_ID) def test_show_with_valid_usage_param(self): self.req.params = {'usage': 'false'} result = self.controller.show(self.req, fake.PROJECT_ID) self.assertDictEqual(make_body(), result) def test_update(self): body = make_body(gigabytes=2000, snapshots=15, volumes=5, backups=5, tenant_id=None) result = self.controller.update(self.req, fake.PROJECT_ID, body=body) self.assertDictEqual(body, result) body = make_body(gigabytes=db.MAX_INT, tenant_id=None) result = self.controller.update(self.req, fake.PROJECT_ID, body=body) self.assertDictEqual(body, result) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_string_length') def test_update_limit(self, mock_validate): body = {'quota_set': {'volumes': 10}} result = self.controller.update(self.req, fake.PROJECT_ID, body=body) self.assertEqual(10, result['quota_set']['volumes']) self.assertTrue(mock_validate.called) def test_update_wrong_key(self): body = {'quota_set': {'bad': 'bad'}} self.assertRaises(exception.InvalidInput, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_update_invalid_value_key_value(self): body = {'quota_set': {'gigabytes': "should_be_int"}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_update_invalid_type_key_value(self): body = {'quota_set': {'gigabytes': None}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_update_with_no_body(self): body = {} self.assertRaises(exception.ValidationError, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_update_with_wrong_body(self): body = {'test': {}} self.assertRaises(exception.ValidationError, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_update_multi_value_with_bad_data(self): orig_quota = self.controller.show(self.req, fake.PROJECT_ID) body = make_body(gigabytes=2000, snapshots=15, volumes="should_be_int", backups=5, tenant_id=None) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, fake.PROJECT_ID, body=body) # Verify that quota values are not updated in db new_quota = self.controller.show(self.req, fake.PROJECT_ID) self.assertDictEqual(orig_quota, new_quota) def test_update_bad_quota_limit(self): body = {'quota_set': {'gigabytes': -1000}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, fake.PROJECT_ID, body=body) body = {'quota_set': {'gigabytes': db.MAX_INT + 1}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_update_no_admin(self): self.req.environ['cinder.context'].is_admin = False self.req.environ['cinder.context'].project_id = fake.PROJECT_ID self.req.environ['cinder.context'].user_id = 'foo_user' self.assertRaises(exception.PolicyNotAuthorized, self.controller.update, self.req, fake.PROJECT_ID, body=make_body(tenant_id=None)) def test_update_without_quota_set_field(self): body = {'fake_quota_set': {'gigabytes': 100}} self.assertRaises(exception.ValidationError, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_update_empty_body(self): body = {} self.assertRaises(exception.ValidationError, self.controller.update, self.req, fake.PROJECT_ID, body=body) def _commit_quota_reservation(self): # Create simple quota and quota usage. ctxt = context.get_admin_context() res = test_db_api._quota_reserve(ctxt, fake.PROJECT_ID) db.reservation_commit(ctxt, res, fake.PROJECT_ID) expected = {'project_id': fake.PROJECT_ID, 'volumes': {'reserved': 0, 'in_use': 1}, 'gigabytes': {'reserved': 0, 'in_use': 2}, } self.assertEqual(expected, db.quota_usage_get_all_by_project(ctxt, fake.PROJECT_ID)) def test_update_lower_than_existing_resources(self): self._commit_quota_reservation() body = {'quota_set': {'volumes': 0}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, fake.PROJECT_ID, body=body) # Ensure that validation works even if some resources are valid body = {'quota_set': {'gigabytes': 1, 'volumes': 10}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_delete(self): result_show = self.controller.show(self.req, fake.PROJECT_ID) self.assertDictEqual(make_body(), result_show) body = make_body(gigabytes=2000, snapshots=15, volumes=5, backups=5, backup_gigabytes=1000, tenant_id=None) result_update = self.controller.update(self.req, fake.PROJECT_ID, body=body) self.assertDictEqual(body, result_update) self.controller.delete(self.req, fake.PROJECT_ID) result_show_after = self.controller.show(self.req, fake.PROJECT_ID) self.assertDictEqual(result_show, result_show_after) def test_delete_with_allocated_quota_different_from_zero(self): project_id_1 = uuid.uuid4().hex project_id_2 = uuid.uuid4().hex self.req.environ['cinder.context'].project_id = project_id_1 body = make_body(gigabytes=2000, snapshots=15, volumes=5, backups=5, backup_gigabytes=1000, tenant_id=None) result_update = self.controller.update(self.req, project_id_1, body=body) self.assertDictEqual(body, result_update) # Set usage param to True in order to see get allocated values. self.req.params = {'usage': 'True'} result_show = self.controller.show(self.req, project_id_1) result_update = self.controller.update(self.req, project_id_2, body=body) self.assertDictEqual(body, result_update) self.controller.delete(self.req, project_id_2) result_show_after = self.controller.show(self.req, project_id_1) self.assertDictEqual(result_show, result_show_after) def test_delete_no_admin(self): self.req.environ['cinder.context'].is_admin = False self.assertRaises(exception.PolicyNotAuthorized, self.controller.delete, self.req, fake.PROJECT_ID) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_quotas_classes.py0000664000175000017500000001455500000000000025341 0ustar00zuulzuul00000000000000# Copyright 2013 Huawei Technologies Co., Ltd # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for cinder.api.contrib.quota_classes.py """ from unittest import mock from cinder.api.contrib import quota_classes from cinder import context from cinder import db from cinder import exception from cinder import quota from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume import volume_types QUOTAS = quota.QUOTAS GROUP_QUOTAS = quota.GROUP_QUOTAS def make_body(root=True, gigabytes=1000, snapshots=10, volumes=10, backups=10, backup_gigabytes=1000, per_volume_gigabytes=-1, volume_types_faked=None, tenant_id=fake.PROJECT_ID, groups=10): resources = {'gigabytes': gigabytes, 'snapshots': snapshots, 'volumes': volumes, 'backups': backups, 'per_volume_gigabytes': per_volume_gigabytes, 'backup_gigabytes': backup_gigabytes, 'groups': groups} if not volume_types_faked: volume_types_faked = {'fake_type': None} for volume_type in volume_types_faked: resources['gigabytes_' + volume_type] = -1 resources['snapshots_' + volume_type] = -1 resources['volumes_' + volume_type] = -1 # need to consider preexisting volume types as well volume_types = db.volume_type_get_all(context.get_admin_context()) for volume_type in volume_types: resources['gigabytes_' + volume_type] = -1 resources['snapshots_' + volume_type] = -1 resources['volumes_' + volume_type] = -1 if tenant_id: resources['id'] = tenant_id if root: result = {'quota_class_set': resources} else: result = resources return result def make_response_body(root=True, ctxt=None, quota_class='foo', request_body=None, tenant_id=fake.PROJECT_ID): resources = {} if not ctxt: ctxt = context.get_admin_context() resources.update(QUOTAS.get_class_quotas(ctxt, quota_class)) resources.update(GROUP_QUOTAS.get_class_quotas(ctxt, quota_class)) if not request_body and not request_body['quota_class_set']: resources.update(request_body['quota_class_set']) if tenant_id: resources['id'] = tenant_id if root: result = {'quota_class_set': resources} else: result = resources return result class QuotaClassSetsControllerTest(test.TestCase): def setUp(self): super(QuotaClassSetsControllerTest, self).setUp() self.controller = quota_classes.QuotaClassSetsController() self.ctxt = context.get_admin_context() self.req = mock.Mock() self.req.environ = {'cinder.context': self.ctxt} self.req.environ['cinder.context'].is_admin = True def test_show(self): volume_types.create(self.ctxt, 'fake_type') result = self.controller.show(self.req, fake.PROJECT_ID) self.assertDictEqual(make_body(), result) def test_show_not_authorized(self): self.req.environ['cinder.context'].is_admin = False self.req.environ['cinder.context'].user_id = fake.USER_ID self.req.environ['cinder.context'].project_id = fake.PROJECT_ID self.assertRaises(exception.PolicyNotAuthorized, self.controller.show, self.req, fake.PROJECT_ID) def test_update(self): volume_types.create(self.ctxt, 'fake_type') body = make_body(gigabytes=2000, snapshots=15, volumes=5, tenant_id=None) result = self.controller.update(self.req, fake.PROJECT_ID, body=body) self.assertDictEqual(body, result) @mock.patch('cinder.api.openstack.wsgi.Controller.validate_string_length') def test_update_limit(self, mock_validate): volume_types.create(self.ctxt, 'fake_type') body = make_body(volumes=5, tenant_id=None) result = self.controller.update(self.req, fake.PROJECT_ID, body=body) self.assertEqual(5, result['quota_class_set']['volumes']) self.assertTrue(mock_validate.called) def test_update_wrong_key(self): volume_types.create(self.ctxt, 'fake_type') body = {'quota_class_set': {'bad': 100}} self.assertRaises(exception.InvalidInput, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_update_invalid_key_value(self): body = {'quota_class_set': {'gigabytes': "should_be_int"}} self.assertRaises(exception.ValidationError, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_update_bad_quota_limit(self): body = {'quota_class_set': {'gigabytes': -1000}} self.assertRaises(exception.ValidationError, self.controller.update, self.req, fake.PROJECT_ID, body=body) def test_update_no_admin(self): self.req.environ['cinder.context'].is_admin = False volume_types.create(self.ctxt, 'fake_type') self.assertRaises(exception.PolicyNotAuthorized, self.controller.update, self.req, fake.PROJECT_ID, body=make_body(tenant_id=None)) def test_update_with_more_volume_types(self): volume_types.create(self.ctxt, 'fake_type_1') volume_types.create(self.ctxt, 'fake_type_2') body = {'quota_class_set': {'gigabytes_fake_type_1': 1111, 'volumes_fake_type_2': 2222}} result = self.controller.update(self.req, fake.PROJECT_ID, body=body) self.assertDictEqual(make_response_body(ctxt=self.ctxt, quota_class=fake.PROJECT_ID, request_body=body, tenant_id=None), result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_scheduler_hints.py0000664000175000017500000001400600000000000025462 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import HTTPStatus import ddt from oslo_serialization import jsonutils import cinder from cinder.api.openstack import wsgi from cinder import context from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3 import fakes as v3_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test UUID = fakes.FAKE_UUID @ddt.ddt class SchedulerHintsTestCase(test.TestCase): def setUp(self): super(SchedulerHintsTestCase, self).setUp() self.fake_instance = v3_fakes.create_volume(fake.VOLUME_ID, uuid=UUID) self.fake_instance['created_at'] =\ datetime.datetime(2013, 1, 1, 1, 1, 1) self.fake_instance['launched_at'] =\ datetime.datetime(2013, 1, 1, 1, 1, 1) self.flags( osapi_volume_extension=[ 'cinder.api.contrib.select_extensions'], osapi_volume_ext_list=['Scheduler_hints']) self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.app = fakes.wsgi_app(fake_auth_context=self.user_ctxt) self.admin_ctxt = context.get_admin_context() cinder.db.volume_type_create(self.admin_ctxt, v3_fakes.fake_default_type_get( fake.VOLUME_TYPE2_ID)) self.vol_type = cinder.db.volume_type_get_by_name(self.admin_ctxt, 'vol_type_name') def test_create_server_without_hints(self): @wsgi.response(HTTPStatus.ACCEPTED) def fake_create(*args, **kwargs): self.assertNotIn('scheduler_hints', kwargs['body']) return self.fake_instance self.mock_object(cinder.api.v3.volumes.VolumeController, 'create', fake_create) req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'POST' req.content_type = 'application/json' body = {'id': UUID, 'volume_type_id': fake.VOLUME_TYPE_ID, 'volume_id': fake.VOLUME_ID, } req.body = jsonutils.dump_as_bytes(body) res = req.get_response(self.app) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_create_server_with_hints(self): @wsgi.response(HTTPStatus.ACCEPTED) def fake_create(*args, **kwargs): self.assertIn('scheduler_hints', kwargs['body']) self.assertEqual({"a": "b"}, kwargs['body']['scheduler_hints']) return self.fake_instance self.mock_object(cinder.api.v3.volumes.VolumeController, 'create', fake_create) req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'POST' req.content_type = 'application/json' body = {'id': UUID, 'volume_type_id': fake.VOLUME_TYPE_ID, 'volume_id': fake.VOLUME_ID, 'scheduler_hints': {'a': 'b'}, } req.body = jsonutils.dump_as_bytes(body) res = req.get_response(self.app) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_create_server_bad_hints(self): req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'POST' req.content_type = 'application/json' body = {'volume': { 'id': UUID, 'volume_type_id': fake.VOLUME_TYPE_ID, 'volume_id': fake.VOLUME_ID, 'scheduler_hints': 'a', }} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(self.app) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) @ddt.data({'local_to_instance': UUID}, {'local_to_instance': None}, {'different_host': [fake.UUID1, fake.UUID2]}, {'same_host': UUID}, {'same_host': [fake.UUID1, fake.UUID2]}, {'fake_key': 'fake_value'}, {'query': 'query_testing'}, {'query': {}}, None) def test_scheduler_hints_with_valid_body(self, value): req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'POST' req.content_type = 'application/json' body = {'volume': {'size': 1, 'volume_type': self.vol_type['id']}, 'OS-SCH-HNT:scheduler_hints': value} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(self.app) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) @ddt.data({'local_to_instance': 'local_to_instance'}, {'different_host': 'different_host'}, {'different_host': ['different_host']}, {'different_host': [UUID, UUID]}, {'same_host': 'same_host'}, {'same_host': ['same_host']}, {'same_host': [UUID, UUID]}, {'query': None}, {'scheduler_hints'}) def test_scheduler_hints_with_invalid_body(self, value): req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'POST' req.content_type = 'application/json' body = {'volume': {'size': 1}, 'OS-SCH-HNT:scheduler_hints': value} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(self.app) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_scheduler_stats.py0000664000175000017500000001760500000000000025503 0ustar00zuulzuul00000000000000# Copyright 2013 eBay Inc. # Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import webob from cinder.api.contrib import scheduler_stats from cinder.api import microversions as mv from cinder.api.openstack import api_version_request as api_version from cinder import context from cinder import exception from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test def schedule_rpcapi_get_pools(self, context, filters=None): all_pools = [] pool1 = dict(name='pool1', capabilities=dict( total_capacity=1024, free_capacity=100, volume_backend_name='pool1', reserved_percentage=0, driver_version='1.0.0', storage_protocol='iSCSI', QoS_support='False', updated=None)) all_pools.append(pool1) pool2 = dict(name='pool2', capabilities=dict( total_capacity=512, free_capacity=200, volume_backend_name='pool2', reserved_percentage=0, driver_version='1.0.1', storage_protocol='iSER', QoS_support='True', updated=None)) all_pools.append(pool2) return all_pools @ddt.ddt class SchedulerStatsAPITest(test.TestCase): def setUp(self): super(SchedulerStatsAPITest, self).setUp() self.flags(host='fake') self.controller = scheduler_stats.SchedulerStatsController() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools', schedule_rpcapi_get_pools) def test_get_pools_summary(self): req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt res = self.controller.get_pools(req) self.assertEqual(2, len(res['pools'])) expected = { 'pools': [ { 'name': 'pool1', }, { 'name': 'pool2', } ] } self.assertDictEqual(expected, res) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools') def test_get_pools_summary_filter_name(self, mock_rpcapi): req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats?name=pool1' % fake.PROJECT_ID) mock_rpcapi.return_value = [dict(name='pool1', capabilities=dict(foo='bar'))] req.api_version_request = mv.get_api_version(mv.POOL_FILTER) req.environ['cinder.context'] = self.ctxt res = self.controller.get_pools(req) expected = { 'pools': [ { 'name': 'pool1', } ] } self.assertDictEqual(expected, res) filters = {'name': 'pool1'} mock_rpcapi.assert_called_with(mock.ANY, filters=filters) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools') def test_get_pools_summary_filter_capabilities(self, mock_rpcapi): req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats?detail=True' '&foo=bar' % fake.PROJECT_ID) mock_rpcapi.return_value = [dict(name='pool1', capabilities=dict(foo='bar'))] req.api_version_request = mv.get_api_version(mv.POOL_FILTER) req.environ['cinder.context'] = self.ctxt res = self.controller.get_pools(req) expected = { 'pools': [ { 'name': 'pool1', 'capabilities': { 'foo': 'bar' } } ] } self.assertDictEqual(expected, res) filters = {'foo': 'bar'} mock_rpcapi.assert_called_with(mock.ANY, filters=filters) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools', schedule_rpcapi_get_pools) def test_get_pools_detail(self): req = fakes.HTTPRequest.blank('/v3/%s/scheduler_stats?detail=True' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt res = self.controller.get_pools(req) self.assertEqual(2, len(res['pools'])) expected = { 'pools': [ { 'name': 'pool1', 'capabilities': { 'updated': None, 'total_capacity': 1024, 'free_capacity': 100, 'volume_backend_name': 'pool1', 'reserved_percentage': 0, 'driver_version': '1.0.0', 'storage_protocol': 'iSCSI', 'QoS_support': 'False', } }, { 'name': 'pool2', 'capabilities': { 'updated': None, 'total_capacity': 512, 'free_capacity': 200, 'volume_backend_name': 'pool2', 'reserved_percentage': 0, 'driver_version': '1.0.1', 'storage_protocol': 'iSER', 'QoS_support': 'True', } } ] } self.assertDictEqual(expected, res) def test_get_pools_detail_invalid_bool(self): req = fakes.HTTPRequest.blank( '/v3/%s/scheduler_stats?detail=InvalidBool' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt self.assertRaises(exception.InvalidParameterValue, self.controller.get_pools, req) @ddt.data((mv.get_prior_version(mv.POOL_TYPE_FILTER), False), (mv.POOL_TYPE_FILTER, True)) @ddt.unpack @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools') @mock.patch('cinder.api.common.reject_invalid_filters') def test_get_pools_by_volume_type(self, version, support_volume_type, mock_reject_invalid_filters, mock_get_pools ): req = fakes.HTTPRequest.blank('/v3/%s/scheduler-stats/get_pools?' 'volume_type=lvm' % fake.PROJECT_ID) mock_get_pools.return_value = [{'name': 'pool1', 'capabilities': {'foo': 'bar'}}] req.api_version_request = api_version.APIVersionRequest(version) req.environ['cinder.context'] = self.ctxt res = self.controller.get_pools(req) expected = { 'pools': [{'name': 'pool1'}] } filters = dict() if support_volume_type: filters = {'volume_type': 'lvm'} filters = webob.multidict.MultiDict(filters) mock_reject_invalid_filters.assert_called_once_with(self.ctxt, filters, 'pool', True) self.assertDictEqual(expected, res) mock_get_pools.assert_called_with(mock.ANY, filters=filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_services.py0000664000175000017500000013074300000000000024131 0ustar00zuulzuul00000000000000# Copyright 2012 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import HTTPStatus from unittest import mock import ddt import iso8601 from oslo_config import cfg from cinder.api.contrib import services from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import api_version_request as api_version from cinder.common import constants from cinder import context from cinder import exception from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test CONF = cfg.CONF fake_services_list = [ {'binary': 'cinder-scheduler', 'host': 'host1', 'cluster_name': None, 'availability_zone': 'cinder', 'id': 1, 'disabled': True, 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27), 'disabled_reason': 'test1', 'modified_at': '', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}, {'binary': 'cinder-volume', 'host': 'host1', 'cluster_name': None, 'availability_zone': 'cinder', 'id': 2, 'disabled': True, 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27), 'disabled_reason': 'test2', 'modified_at': '', 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}, {'binary': 'cinder-scheduler', 'host': 'host2', 'cluster_name': 'cluster1', 'availability_zone': 'cinder', 'id': 3, 'disabled': False, 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), 'disabled_reason': '', 'modified_at': '', 'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'}, {'binary': 'cinder-volume', 'host': 'host2', 'cluster_name': 'cluster1', 'availability_zone': 'cinder', 'id': 4, 'disabled': True, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), 'disabled_reason': 'test4', 'modified_at': '', 'uuid': '18417850-2ca9-43d1-9619-ae16bfb0f655'}, {'binary': 'cinder-volume', 'host': 'host2', 'cluster_name': 'cluster2', 'availability_zone': 'cinder', 'id': 5, 'disabled': True, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), 'disabled_reason': 'test5', 'modified_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'uuid': 'f838f35c-4035-464f-9792-ce60e390c13d'}, {'binary': 'cinder-volume', 'host': 'host2', 'cluster_name': 'cluster2', 'availability_zone': 'cinder', 'id': 6, 'disabled': False, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), 'disabled_reason': '', 'modified_at': datetime.datetime(2012, 9, 18, 8, 1, 38), 'uuid': 'f2825a00-cc2f-493d-9635-003e01db8b3d'}, {'binary': 'cinder-scheduler', 'host': 'host2', 'cluster_name': None, 'availability_zone': 'cinder', 'id': 7, 'disabled': False, 'updated_at': None, 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28), 'disabled_reason': '', 'modified_at': None, 'uuid': '35fcf841-1974-4944-a798-1fb6d0a44972'}, ] class FakeRequest(object): environ = {"cinder.context": context.get_admin_context()} def __init__(self, version=mv.BASE_VERSION, **kwargs): self.GET = kwargs self.headers = mv.get_mv_header(version) self.api_version_request = mv.get_api_version(version) class FakeRequestWithBinary(FakeRequest): def __init__(self, **kwargs): kwargs.setdefault('binary', constants.VOLUME_BINARY) super(FakeRequestWithBinary, self).__init__(**kwargs) class FakeRequestWithHost(FakeRequest): def __init__(self, **kwargs): kwargs.setdefault('host', 'host1') super(FakeRequestWithHost, self).__init__(**kwargs) class FakeRequestWithHostBinary(FakeRequestWithBinary): def __init__(self, **kwargs): kwargs.setdefault('host', 'host1') super(FakeRequestWithHostBinary, self).__init__(**kwargs) def fake_service_get_all(context, **filters): result = [] host = filters.pop('host', None) for service in fake_services_list: if (host and service['host'] != host and not service['host'].startswith(host + '@')): continue if all(v is None or service.get(k) == v for k, v in filters.items()): result.append(service) return result def fake_service_get(context, service_id=None, **filters): result = fake_service_get_all(context, id=service_id, **filters) if not result: raise exception.ServiceNotFound(service_id=service_id) return result[0] def fake_service_get_by_id(value): for service in fake_services_list: if service['id'] == value: return service return None def fake_service_update(context, service_id, values, retry=True): service = fake_service_get_by_id(service_id) if service is None: raise exception.ServiceNotFound(service_id=service_id) else: {'host': 'host1', 'service': constants.VOLUME_BINARY, 'disabled': values['disabled']} def fake_policy_authorize(context, action, target, do_raise=True, exc=exception.PolicyNotAuthorized): pass def fake_utcnow(with_timezone=False): tzinfo = iso8601.UTC if with_timezone else None return datetime.datetime(2012, 10, 29, 13, 42, 11, tzinfo=tzinfo) def fake_get_pools(ctxt, filters=None): return [{"name": "host1", "capabilities": {"backend_state": "up"}}, {"name": "host2", "capabilities": {"backend_state": "down"}}] @ddt.ddt @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools', fake_get_pools) @mock.patch('cinder.db.service_get_all', fake_service_get_all) @mock.patch('cinder.db.service_get', fake_service_get) @mock.patch('oslo_utils.timeutils.utcnow', fake_utcnow) @mock.patch('cinder.db.sqlalchemy.api.service_update', fake_service_update) @mock.patch('cinder.policy.authorize', fake_policy_authorize) class ServicesTest(test.TestCase): def setUp(self): super(ServicesTest, self).setUp() self.context = context.get_admin_context() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.controller = services.ServiceController(self.ext_mgr) def test_services_list(self): req = FakeRequest() res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-scheduler', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 2)}, {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-scheduler', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 19, 6, 55, 34)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38)}, {'binary': 'cinder-scheduler', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': None}, ]} self.assertEqual(response, res_dict) def test_failover_old_version(self): req = FakeRequest(version=mv.BACKUP_PROJECT) self.assertRaises(exception.InvalidInput, self.controller.update, req, 'failover', {'cluster': 'cluster1'}) def test_failover_no_values(self): req = FakeRequest(version=mv.REPLICATION_CLUSTER) self.assertRaises(exception.InvalidInput, self.controller.update, req, 'failover', {'backend_id': 'replica1'}) @ddt.data({'host': 'hostname'}, {'cluster': 'mycluster'}) @mock.patch('cinder.volume.api.API.failover') def test_failover(self, body, failover_mock): req = FakeRequest(version=mv.REPLICATION_CLUSTER) body['backend_id'] = 'replica1' res = self.controller.update(req, 'failover', body) self.assertEqual(202, res.status_code) failover_mock.assert_called_once_with(req.environ['cinder.context'], body.get('host'), body.get('cluster'), 'replica1') @ddt.data({}, {'host': 'hostname', 'cluster': 'mycluster'}) @mock.patch('cinder.volume.api.API.failover') def test_failover_invalid_input(self, body, failover_mock): req = FakeRequest(version=mv.REPLICATION_CLUSTER) body['backend_id'] = 'replica1' self.assertRaises(exception.InvalidInput, self.controller.update, req, 'failover', body) failover_mock.assert_not_called() def test_services_list_with_cluster_name(self): req = FakeRequest(version=mv.CLUSTER_SUPPORT) res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-scheduler', 'cluster': None, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 2)}, {'binary': 'cinder-volume', 'cluster': None, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-scheduler', 'cluster': 'cluster1', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 19, 6, 55, 34)}, {'binary': 'cinder-volume', 'cluster': 'cluster1', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38)}, {'binary': 'cinder-volume', 'cluster': 'cluster2', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'cluster': 'cluster2', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38)}, {'binary': 'cinder-scheduler', 'cluster': None, 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': None}, ]} self.assertEqual(response, res_dict) def test_services_list_with_backend_state(self): req = FakeRequest(version=mv.BACKEND_STATE_REPORT) res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-scheduler', 'cluster': None, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 2)}, {'binary': 'cinder-volume', 'cluster': None, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5), 'backend_state': 'up'}, {'binary': 'cinder-scheduler', 'cluster': 'cluster1', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 19, 6, 55, 34)}, {'binary': 'cinder-volume', 'cluster': 'cluster1', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38), 'backend_state': 'down'}, {'binary': 'cinder-volume', 'cluster': 'cluster2', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5), 'backend_state': 'down'}, {'binary': 'cinder-volume', 'cluster': 'cluster2', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38), 'backend_state': 'down'}, {'binary': 'cinder-scheduler', 'cluster': None, 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': None}, ]} self.assertEqual(response, res_dict) def test_services_detail(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = FakeRequest() res_dict = self.controller.index(req) response = {'services': [{'binary': 'cinder-scheduler', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 2), 'disabled_reason': 'test1'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}, {'binary': 'cinder-scheduler', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 19, 6, 55, 34), 'disabled_reason': ''}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38), 'disabled_reason': 'test4'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test5'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime( 2012, 9, 18, 8, 3, 38), 'disabled_reason': ''}, {'binary': 'cinder-scheduler', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': None, 'disabled_reason': ''}, ]} self.assertEqual(response, res_dict) def test_services_list_with_host(self): req = FakeRequestWithHost() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-scheduler', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)}, {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]} self.assertEqual(response, res_dict) def test_services_detail_with_host(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = FakeRequestWithHost() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-scheduler', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2), 'disabled_reason': 'test1'}, {'binary': 'cinder-volume', 'frozen': False, 'replication_status': None, 'active_backend_id': None, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}]} self.assertEqual(response, res_dict) def test_services_list_with_binary(self): req = FakeRequestWithBinary() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}, {'binary': 'cinder-volume', 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]} self.assertEqual(response, res_dict) def test_services_detail_with_binary(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = FakeRequestWithBinary() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'frozen': False, 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'frozen': False, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'disabled_reason': 'test4'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'host': 'host2', 'zone': 'cinder', 'status': 'disabled', 'state': 'down', 'frozen': False, 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test5'}, {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'host': 'host2', 'zone': 'cinder', 'status': 'enabled', 'state': 'down', 'frozen': False, 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38), 'disabled_reason': ''}]} self.assertEqual(response, res_dict) def test_services_list_with_host_binary(self): req = FakeRequestWithHostBinary() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]} self.assertEqual(response, res_dict) def test_services_detail_with_host_binary(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = FakeRequestWithHostBinary() res_dict = self.controller.index(req) response = {'services': [ {'binary': 'cinder-volume', 'replication_status': None, 'active_backend_id': None, 'frozen': False, 'host': 'host1', 'zone': 'cinder', 'status': 'disabled', 'state': 'up', 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5), 'disabled_reason': 'test2'}]} self.assertEqual(response, res_dict) def test_services_enable_with_service_key(self): body = {'host': 'host1', 'service': constants.VOLUME_BINARY} req = fakes.HTTPRequest.blank( '/v3/%s/os-services/enable' % fake.PROJECT_ID) res_dict = self.controller.update(req, "enable", body) self.assertEqual('enabled', res_dict['status']) def test_services_enable_with_binary_key(self): body = {'host': 'host1', 'binary': constants.VOLUME_BINARY} req = fakes.HTTPRequest.blank( '/v3/%s/os-services/enable' % fake.PROJECT_ID) res_dict = self.controller.update(req, "enable", body) self.assertEqual('enabled', res_dict['status']) def test_services_disable_with_service_key(self): req = fakes.HTTPRequest.blank( '/v3/%s/os-services/disable' % fake.PROJECT_ID) body = {'host': 'host1', 'service': constants.VOLUME_BINARY} res_dict = self.controller.update(req, "disable", body) self.assertEqual('disabled', res_dict['status']) def test_services_disable_with_binary_key(self): req = fakes.HTTPRequest.blank( '/v3/%s/os-services/disable' % fake.PROJECT_ID) body = {'host': 'host1', 'binary': constants.VOLUME_BINARY} res_dict = self.controller.update(req, "disable", body) self.assertEqual('disabled', res_dict['status']) def test_services_disable_log_reason(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = ( fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason')) body = {'host': 'host1', 'binary': 'cinder-scheduler', 'disabled_reason': 'test-reason', } res_dict = self.controller.update(req, "disable-log-reason", body) self.assertEqual('disabled', res_dict['status']) self.assertEqual('test-reason', res_dict['disabled_reason']) def test_services_disable_log_reason_unicode(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = ( fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason')) body = {'host': 'host1', 'binary': 'cinder-scheduler', 'disabled_reason': 'test-reason', } res_dict = self.controller.update(req, "disable-log-reason", body) self.assertEqual('disabled', res_dict['status']) self.assertEqual('test-reason', res_dict['disabled_reason']) def test_services_disable_log_reason_none(self): self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = ( fakes.HTTPRequest.blank('v1/fake/os-services/disable-log-reason')) body = {'host': 'host1', 'binary': 'cinder-scheduler', 'disabled_reason': None, } self.assertRaises(exception.ValidationError, self.controller.update, req, "disable-log-reason", body) @ddt.data(' ' * 10, 'a' * 256, None) def test_invalid_reason_field(self, reason): # # Check that empty strings are not allowed self.ext_mgr.extensions['os-extended-services'] = True self.controller = services.ServiceController(self.ext_mgr) req = ( fakes.HTTPRequest.blank('v3/fake/os-services/disable-log-reason')) body = {'host': 'host1', 'binary': 'cinder-volume', 'disabled_reason': reason, } self.assertRaises(exception.ValidationError, self.controller.update, req, "disable-log-reason", body) def test_services_failover_host(self): url = '/v3/%s/os-services/failover_host' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url) body = {'host': 'fake_host', 'backend_id': 'fake_backend'} with mock.patch.object(self.controller.volume_api, 'failover') \ as failover_mock: res = self.controller.update(req, 'failover_host', body) failover_mock.assert_called_once_with(req.environ['cinder.context'], 'fake_host', None, 'fake_backend') self.assertEqual(HTTPStatus.ACCEPTED, res.status_code) @ddt.data(('failover_host', {'host': 'fake_host', 'backend_id': 'fake_backend'}), ('freeze', {'host': 'fake_host'}), ('thaw', {'host': 'fake_host'})) @ddt.unpack @mock.patch('cinder.objects.ServiceList.get_all') def test_services_action_host_not_found(self, method, body, mock_get_all_services): url = '/v3/%s/os-services/%s' % (fake.PROJECT_ID, method) req = fakes.HTTPRequest.blank(url) mock_get_all_services.return_value = [] msg = 'No service found with host=%s' % 'fake_host' result = self.assertRaises(exception.InvalidInput, self.controller.update, req, method, body) self.assertEqual(msg, result.msg) @ddt.data(('failover', {'cluster': 'fake_cluster', 'backend_id': 'fake_backend'}), ('freeze', {'cluster': 'fake_cluster'}), ('thaw', {'cluster': 'fake_cluster'})) @ddt.unpack @mock.patch('cinder.objects.ServiceList.get_all') def test_services_action_cluster_not_found(self, method, body, mock_get_all_services): url = '/v3/%s/os-services/%s' % (fake.PROJECT_ID, method) req = fakes.HTTPRequest.blank(url, version=mv.REPLICATION_CLUSTER) mock_get_all_services.return_value = [] msg = "No service found with cluster=fake_cluster" result = self.assertRaises(exception.InvalidInput, self.controller.update, req, method, body) self.assertEqual(msg, result.msg) def test_services_freeze(self): url = '/v3/%s/os-services/freeze' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url) body = {'host': 'fake_host'} with mock.patch.object(self.controller.volume_api, 'freeze_host') \ as freeze_mock: res = self.controller.update(req, 'freeze', body) freeze_mock.assert_called_once_with(req.environ['cinder.context'], 'fake_host', None) self.assertEqual(freeze_mock.return_value, res) def test_services_thaw(self): url = '/v3/%s/os-services/thaw' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url) body = {'host': 'fake_host'} with mock.patch.object(self.controller.volume_api, 'thaw_host') \ as thaw_mock: res = self.controller.update(req, 'thaw', body) thaw_mock.assert_called_once_with(req.environ['cinder.context'], 'fake_host', None) self.assertEqual(thaw_mock.return_value, res) @ddt.data('freeze', 'thaw', 'failover_host') def test_services_replication_calls_no_host(self, method): url = '/v3/%s/os-services/%s' % (fake.PROJECT_ID, method) req = fakes.HTTPRequest.blank(url) self.assertRaises(exception.InvalidInput, self.controller.update, req, method, {}) @mock.patch('cinder.api.contrib.services.ServiceController._set_log') def test_set_log(self, set_log_mock): set_log_mock.return_value = None req = FakeRequest(version=mv.LOG_LEVEL) body = mock.sentinel.body res = self.controller.update(req, 'set-log', body) self.assertEqual(set_log_mock.return_value, res) set_log_mock.assert_called_once_with(req, mock.ANY, body=body) @mock.patch('cinder.api.contrib.services.ServiceController._get_log') def test_get_log(self, get_log_mock): get_log_mock.return_value = None req = FakeRequest(version=mv.LOG_LEVEL) body = mock.sentinel.body res = self.controller.update(req, 'get-log', body) self.assertEqual(get_log_mock.return_value, res) get_log_mock.assert_called_once_with(req, mock.ANY, body=body) def test_get_log_wrong_binary(self): req = FakeRequest(version=mv.LOG_LEVEL) body = {'binary': 'wrong-binary'} self.assertRaises(exception.ValidationError, self.controller._get_log, req, self.context, body=body) def test_get_log_w_server_filter_same_host(self): server_filter = 'controller-0' CONF.set_override('host', server_filter) body = {'binary': constants.API_BINARY, 'server': server_filter} req = FakeRequest(version=mv.LOG_LEVEL) log_levels = self.controller._get_log( req=req, context=mock.sentinel.context, body=body) log_levels = log_levels['log_levels'] self.assertEqual(1, len(log_levels)) self.assertEqual('controller-0', log_levels[0]['host']) self.assertEqual('cinder-api', log_levels[0]['binary']) # since there are a lot of log levels, we just check if the key-value # exists for levels self.assertIsNotNone(log_levels[0]['levels']) def test_get_log_w_server_filter_different_host(self): server_filter = 'controller-0' CONF.set_override('host', 'controller-different-host') body = {'binary': constants.API_BINARY, 'server': server_filter} req = FakeRequest(version=mv.LOG_LEVEL) log_levels = self.controller._get_log( req=req, context=mock.sentinel.context, body=body) log_levels = log_levels['log_levels'] self.assertEqual(0, len(log_levels)) @ddt.data(None, '', '*') @mock.patch('cinder.objects.ServiceList.get_all') def test__log_params_binaries_service_all(self, binary, service_list_mock): body = {'binary': binary, 'server': 'host1'} binaries, services = self.controller._log_params_binaries_services( mock.sentinel.context, body) self.assertEqual(constants.LOG_BINARIES, binaries) self.assertEqual(service_list_mock.return_value, services) service_list_mock.assert_called_once_with( mock.sentinel.context, filters={'host_or_cluster': body['server'], 'is_up': True}) @ddt.data('cinder-api', 'cinder-volume', 'cinder-scheduler', 'cinder-backup') @mock.patch('cinder.objects.ServiceList.get_all') def test__log_params_binaries_service_one(self, binary, service_list_mock): body = {'binary': binary, 'server': 'host1'} binaries, services = self.controller._log_params_binaries_services( mock.sentinel.context, body) self.assertEqual([binary], binaries) if binary == constants.API_BINARY: self.assertEqual([], services) service_list_mock.assert_not_called() else: self.assertEqual(service_list_mock.return_value, services) service_list_mock.assert_called_once_with( mock.sentinel.context, filters={'host_or_cluster': body['server'], 'binary': binary, 'is_up': True}) @ddt.data((None, exception.InvalidInput), ('', exception.InvalidInput), ('wronglevel', exception.InvalidInput)) @ddt.unpack def test__set_log_invalid_level(self, level, exceptions): body = {'level': level} url = '/v3/%s/os-services/set-log' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url) req.api_version_request = api_version.APIVersionRequest("3.32") self.assertRaises(exceptions, self.controller._set_log, req, self.context, body=body) @mock.patch('cinder.utils.get_log_method') @mock.patch('cinder.objects.ServiceList.get_all') @mock.patch('cinder.utils.set_log_levels') @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.set_log_levels') @mock.patch('cinder.volume.rpcapi.VolumeAPI.set_log_levels') @mock.patch('cinder.backup.rpcapi.BackupAPI.set_log_levels') def test__set_log(self, backup_rpc_mock, vol_rpc_mock, sch_rpc_mock, set_log_mock, get_all_mock, get_log_mock): services = [ objects.Service(self.context, binary=constants.SCHEDULER_BINARY), objects.Service(self.context, binary=constants.VOLUME_BINARY), objects.Service(self.context, binary=constants.BACKUP_BINARY), ] get_all_mock.return_value = services url = '/v3/%s/os-services/set-log' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url) body = {'binary': '*', 'prefix': 'eventlet.', 'level': 'debug'} log_level = objects.LogLevel(prefix=body['prefix'], level=body['level']) with mock.patch('cinder.objects.LogLevel') as log_level_mock: log_level_mock.return_value = log_level res = self.controller._set_log(req, mock.sentinel.context, body=body) log_level_mock.assert_called_once_with(mock.sentinel.context, prefix=body['prefix'], level=body['level']) self.assertEqual(202, res.status_code) set_log_mock.assert_called_once_with(body['prefix'], body['level']) sch_rpc_mock.assert_called_once_with(mock.sentinel.context, services[0], log_level) vol_rpc_mock.assert_called_once_with(mock.sentinel.context, services[1], log_level) backup_rpc_mock.assert_called_once_with(mock.sentinel.context, services[2], log_level) get_log_mock.assert_called_once_with(body['level']) @mock.patch('cinder.objects.ServiceList.get_all') @mock.patch('cinder.utils.get_log_levels') @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_log_levels') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_log_levels') @mock.patch('cinder.backup.rpcapi.BackupAPI.get_log_levels') def test__get_log(self, backup_rpc_mock, vol_rpc_mock, sch_rpc_mock, get_log_mock, get_all_mock): get_log_mock.return_value = mock.sentinel.api_levels backup_rpc_mock.return_value = [ objects.LogLevel(prefix='p1', level='l1'), objects.LogLevel(prefix='p2', level='l2') ] vol_rpc_mock.return_value = [ objects.LogLevel(prefix='p3', level='l3'), objects.LogLevel(prefix='p4', level='l4') ] sch_rpc_mock.return_value = [ objects.LogLevel(prefix='p5', level='l5'), objects.LogLevel(prefix='p6', level='l6') ] services = [ objects.Service(self.context, binary=constants.SCHEDULER_BINARY, host='host'), objects.Service(self.context, binary=constants.VOLUME_BINARY, host='host@backend#pool'), objects.Service(self.context, binary=constants.BACKUP_BINARY, host='host'), ] get_all_mock.return_value = services url = '/v3/%s/os-services/get-log' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url) body = {'binary': '*', 'prefix': 'eventlet.'} log_level = objects.LogLevel(prefix=body['prefix']) with mock.patch('cinder.objects.LogLevel') as log_level_mock: log_level_mock.return_value = log_level res = self.controller._get_log(req, mock.sentinel.context, body=body) log_level_mock.assert_called_once_with(mock.sentinel.context, prefix=body['prefix']) expected = {'log_levels': [ {'binary': 'cinder-api', 'host': CONF.host, 'levels': mock.sentinel.api_levels}, {'binary': 'cinder-scheduler', 'host': 'host', 'levels': {'p5': 'l5', 'p6': 'l6'}}, {'binary': constants.VOLUME_BINARY, 'host': 'host@backend#pool', 'levels': {'p3': 'l3', 'p4': 'l4'}}, {'binary': 'cinder-backup', 'host': 'host', 'levels': {'p1': 'l1', 'p2': 'l2'}}, ]} self.assertDictEqual(expected, res) get_log_mock.assert_called_once_with(body['prefix']) sch_rpc_mock.assert_called_once_with(mock.sentinel.context, services[0], log_level) vol_rpc_mock.assert_called_once_with(mock.sentinel.context, services[1], log_level) backup_rpc_mock.assert_called_once_with(mock.sentinel.context, services[2], log_level) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_snapshot_actions.py0000664000175000017500000001322100000000000025654 0ustar00zuulzuul00000000000000# Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import ddt from oslo_serialization import jsonutils import webob from cinder.api.contrib import snapshot_actions from cinder.api import microversions as mv from cinder import context from cinder import db from cinder import exception from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3 import fakes as v3_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test def fake_snapshot_get(context, snapshot_id): snapshot = v3_fakes.fake_snapshot(snapshot_id) if snapshot_id == fake.SNAPSHOT_ID: snapshot['status'] = fields.SnapshotStatus.CREATING else: snapshot['status'] = fields.SnapshotStatus.ERROR return snapshot @ddt.ddt class SnapshotActionsTest(test.TestCase): def setUp(self): super(SnapshotActionsTest, self).setUp() self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.controller = snapshot_actions.SnapshotActionsController() @mock.patch('cinder.db.snapshot_update', autospec=True) @mock.patch('cinder.db.sqlalchemy.api._snapshot_get', side_effect=fake_snapshot_get) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_update_snapshot_status(self, metadata_get, *args): body = {'os-update_snapshot_status': {'status': fields.SnapshotStatus.AVAILABLE}} req = webob.Request.blank('/v3/%s/snapshots/%s/action' % ( fake.PROJECT_ID, fake.SNAPSHOT_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) @mock.patch('cinder.db.sqlalchemy.api._snapshot_get', side_effect=fake_snapshot_get) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_update_snapshot_status_invalid_status(self, metadata_get, *args): body = {'os-update_snapshot_status': {'status': 'in-use'}} req = webob.Request.blank('/v3/%s/snapshots/%s/action' % ( fake.PROJECT_ID, fake.SNAPSHOT_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_update_snapshot_status_without_status(self): self.mock_object(db, 'snapshot_get', fake_snapshot_get) body = {'os-update_snapshot_status': {}} req = webob.Request.blank('/v3/%s/snapshots/%s/action' % ( fake.PROJECT_ID, fake.SNAPSHOT_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) @mock.patch('cinder.db.snapshot_update', autospec=True) @mock.patch('cinder.db.sqlalchemy.api._snapshot_get', side_effect=fake_snapshot_get) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_update_snapshot_valid_progress(self, metadata_get, *args): body = {'os-update_snapshot_status': {'status': fields.SnapshotStatus.AVAILABLE, 'progress': '50%'}} req = webob.Request.blank('/v3/%s/snapshots/%s/action' % ( fake.PROJECT_ID, fake.SNAPSHOT_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) @ddt.data(({'os-update_snapshot_status': {'status': fields.SnapshotStatus.AVAILABLE, 'progress': '50'}}, exception.InvalidInput), ({'os-update_snapshot_status': {'status': fields.SnapshotStatus.AVAILABLE, 'progress': '103%'}}, exception.InvalidInput), ({'os-update_snapshot_status': {'status': fields.SnapshotStatus.AVAILABLE, 'progress': " "}}, exception.InvalidInput), ({'os-update_snapshot_status': {'status': fields.SnapshotStatus.AVAILABLE, 'progress': 50}}, exception.ValidationError)) @ddt.unpack def test_update_snapshot_invalid_progress(self, body, exception_class): req = webob.Request.blank('/v3/%s/snapshots/%s/action' % ( fake.PROJECT_ID, fake.SNAPSHOT_ID)) req.api_version_request = mv.get_api_version(mv.BASE_VERSION) self.assertRaises(exception_class, self.controller._update_snapshot_status, req, body=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_snapshot_manage.py0000664000175000017500000004476300000000000025463 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # Copyright (c) 2016 Stratoscale, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock from urllib.parse import urlencode from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils import webob from cinder.common import constants from cinder import context from cinder import exception from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_service from cinder.tests.unit import test CONF = cfg.CONF def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper def volume_get(self, context, volume_id, viewable_admin_meta=False): if volume_id == fake.VOLUME_ID: return objects.Volume(context, id=fake.VOLUME_ID, _name_id=fake.VOLUME2_ID, host='fake_host', cluster_name=None, size=1) raise exception.VolumeNotFound(volume_id=volume_id) def api_get_manageable_snapshots(*args, **kwargs): """Replacement for cinder.volume.api.API.get_manageable_snapshots.""" snap_id = 'ffffffff-0000-ffff-0000-ffffffffffff' snaps = [ {'reference': {'source-name': 'snapshot-%s' % snap_id}, 'size': 4, 'extra_info': 'qos_setting:high', 'safe_to_manage': False, 'reason_not_safe': 'snapshot in use', 'cinder_id': snap_id, 'source_reference': {'source-name': 'volume-00000000-ffff-0000-ffff-000000'}}, {'reference': {'source-name': 'mysnap'}, 'size': 5, 'extra_info': 'qos_setting:low', 'safe_to_manage': True, 'reason_not_safe': None, 'cinder_id': None, 'source_reference': {'source-name': 'myvol'}}] return snaps @mock.patch('cinder.volume.api.API.get', volume_get) class SnapshotManageTest(test.TestCase): """Test cases for cinder/api/contrib/snapshot_manage.py The API extension adds a POST /os-snapshot-manage API that is passed a cinder volume id, and a driver-specific reference parameter. If everything is passed correctly, then the cinder.volume.api.API.manage_existing_snapshot method is invoked to manage an existing storage object on the host. In this set of test cases, we are ensuring that the code correctly parses the request structure and raises the correct exceptions when things are not right, and calls down into cinder.volume.api.API.manage_existing_snapshot with the correct arguments. """ def setUp(self): super(SnapshotManageTest, self).setUp() self._admin_ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) self._non_admin_ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=False) def _get_resp_post(self, body): """Helper to execute an os-snapshot-manage API call.""" req = webob.Request.blank('/v3/%s/os-snapshot-manage' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = self._admin_ctxt req.body = jsonutils.dump_as_bytes(body) res = req.get_response(app()) return res @mock.patch( 'cinder.scheduler.rpcapi.SchedulerAPI.manage_existing_snapshot') @mock.patch('cinder.volume.api.API.create_snapshot_in_db', return_value=mock.MagicMock(id=fake.SNAPSHOT_ID)) @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_manage_snapshot_ok(self, mock_db, mock_create_snapshot, mock_rpcapi): """Test successful manage snapshot execution. Tests for correct operation when valid arguments are passed in the request body. We ensure that cinder.volume.api.API.manage_existing got called with the correct arguments, and that we return the correct HTTP code to the caller. """ mock_db.return_value = fake_service.fake_service_obj( self._admin_ctxt, binary=constants.VOLUME_BINARY) body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': {'fake_key': 'fake_ref'}}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int, res) # Check the db.service_get was called with correct arguments. mock_db.assert_called_once_with( mock.ANY, None, host='fake_host', binary=constants.VOLUME_BINARY, cluster_name=None) # Check the create_snapshot_in_db was called with correct arguments. self.assertEqual(1, mock_create_snapshot.call_count) args = mock_create_snapshot.call_args[0] named_args = mock_create_snapshot.call_args[1] self.assertEqual(fake.VOLUME_ID, args[1].get('id')) self.assertTrue(named_args['commit_quota']) # Check the volume_rpcapi.manage_existing_snapshot was called with # correct arguments. self.assertEqual(1, mock_rpcapi.call_count) args = mock_rpcapi.call_args[0] self.assertEqual({'fake_key': 'fake_ref'}, args[3]) @mock.patch( 'cinder.scheduler.rpcapi.SchedulerAPI.manage_existing_snapshot') @mock.patch('cinder.volume.api.API.create_snapshot_in_db', return_value=mock.MagicMock(id=fake.SNAPSHOT_ID)) @mock.patch('cinder.objects.service.Service.get_by_id') def test_manage_snapshot_ok_with_metadata_null( self, mock_db, mock_create_snapshot, mock_rpcapi): mock_db.return_value = fake_service.fake_service_obj( self._admin_ctxt, binary=constants.VOLUME_BINARY) body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': {'fake_key': 'fake_ref'}, 'name': 'test', 'description': 'test', 'metadata': None}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) args = mock_create_snapshot.call_args[0] # 5th argument of args is metadata. self.assertIsNone(args[5]) @mock.patch( 'cinder.scheduler.rpcapi.SchedulerAPI.manage_existing_snapshot') @mock.patch('cinder.volume.api.API.create_snapshot_in_db', return_value=mock.MagicMock(id=fake.SNAPSHOT_ID)) @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_manage_snapshot_ok_ref_as_string(self, mock_db, mock_create_snapshot, mock_rpcapi): mock_db.return_value = fake_service.fake_service_obj( self._admin_ctxt, binary=constants.VOLUME_BINARY) body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': "string"}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int, res) # Check the volume_rpcapi.manage_existing_snapshot was called with # correct arguments. self.assertEqual(1, mock_rpcapi.call_count) args = mock_rpcapi.call_args[0] self.assertEqual(body['snapshot']['ref'], args[3]) @mock.patch('cinder.objects.service.Service.is_up', return_value=True, new_callable=mock.PropertyMock) @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') @mock.patch('cinder.volume.api.API.create_snapshot_in_db', return_value=mock.MagicMock(id=fake.SNAPSHOT_ID)) @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_manage_snapshot_disabled(self, mock_db, mock_create_snapshot, mock_rpcapi, mock_is_up): """Test manage snapshot failure due to disabled service.""" mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt, disabled=True) body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': { 'fake_key': 'fake_ref'}}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) self.assertEqual(exception.ServiceUnavailable.message, res.json['badRequest']['message']) mock_create_snapshot.assert_not_called() mock_rpcapi.assert_not_called() mock_is_up.assert_not_called() @mock.patch('cinder.objects.service.Service.is_up', return_value=False, new_callable=mock.PropertyMock) @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') @mock.patch('cinder.volume.api.API.create_snapshot_in_db', return_value=mock.MagicMock(id=fake.SNAPSHOT_ID)) @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_manage_snapshot_is_down(self, mock_db, mock_create_snapshot, mock_rpcapi, mock_is_up): """Test manage snapshot failure due to down service.""" mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt) body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': {'fake_key': 'fake_ref'}}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) self.assertEqual(exception.ServiceUnavailable.message, res.json['badRequest']['message']) mock_create_snapshot.assert_not_called() mock_rpcapi.assert_not_called() self.assertTrue(mock_is_up.called) def test_manage_snapshot_missing_volume_id(self): """Test correct failure when volume_id is not specified.""" body = {'snapshot': {'ref': 'fake_ref'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_manage_snapshot_missing_ref(self): """Test correct failure when the ref is not specified.""" body = {'snapshot': {'volume_id': fake.VOLUME_ID}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_manage_snapshot_error_body(self): """Test correct failure when body is invaild.""" body = {'error_snapshot': {'volume_id': fake.VOLUME_ID}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_manage_snapshot_error_volume_id(self): """Test correct failure when volume id is invalid format.""" body = {'snapshot': {'volume_id': 'error_volume_id', 'ref': {}}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertIn("'error_volume_id' is not a 'uuid'", jsonutils.loads(res.body)['badRequest']['message']) def _get_resp_get(self, host, detailed, paging, admin=True): """Helper to execute a GET os-snapshot-manage API call.""" params = {'host': host} if paging: params.update({'marker': '1234', 'limit': 10, 'offset': 4, 'sort': 'reference:asc'}) query_string = "?%s" % urlencode(params) detail = "" if detailed: detail = "/detail" url = "/v3/%s/os-snapshot-manage%s%s" % (fake.PROJECT_ID, detail, query_string) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = (self._admin_ctxt if admin else self._non_admin_ctxt) res = req.get_response(app()) return res @mock.patch('cinder.volume.api.API.get_manageable_snapshots', wraps=api_get_manageable_snapshots) def test_get_manageable_snapshots_non_admin(self, mock_api_manageable): res = self._get_resp_get('fakehost', False, False, admin=False) self.assertEqual(HTTPStatus.FORBIDDEN, res.status_int) self.assertEqual(False, mock_api_manageable.called) res = self._get_resp_get('fakehost', True, False, admin=False) self.assertEqual(HTTPStatus.FORBIDDEN, res.status_int) self.assertEqual(False, mock_api_manageable.called) @mock.patch('cinder.volume.api.API.get_manageable_snapshots', wraps=api_get_manageable_snapshots) def test_get_manageable_snapshots_ok(self, mock_api_manageable): res = self._get_resp_get('fakehost', False, False) snap_name = 'snapshot-ffffffff-0000-ffff-0000-ffffffffffff' exp = {'manageable-snapshots': [{'reference': {'source-name': snap_name}, 'size': 4, 'safe_to_manage': False, 'source_reference': {'source-name': 'volume-00000000-ffff-0000-ffff-000000'}}, {'reference': {'source-name': 'mysnap'}, 'size': 5, 'safe_to_manage': True, 'source_reference': {'source-name': 'myvol'}}]} self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(jsonutils.loads(res.body), exp) mock_api_manageable.assert_called_once_with( self._admin_ctxt, 'fakehost', None, limit=CONF.osapi_max_limit, marker=None, offset=0, sort_dirs=['desc'], sort_keys=['reference']) @mock.patch('cinder.volume.api.API.get_manageable_snapshots', side_effect=messaging.RemoteError( exc_type='InvalidInput', value='marker not found: 1234')) def test_get_manageable_snapshots_non_existent_marker( self, mock_api_manageable): res = self._get_resp_get('fakehost', detailed=False, paging=True) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertTrue(mock_api_manageable.called) @mock.patch('cinder.volume.api.API.get_manageable_snapshots', wraps=api_get_manageable_snapshots) def test_get_manageable_snapshots_detailed_ok(self, mock_api_manageable): res = self._get_resp_get('fakehost', True, True) snap_id = 'ffffffff-0000-ffff-0000-ffffffffffff' exp = {'manageable-snapshots': [{'reference': {'source-name': 'snapshot-%s' % snap_id}, 'size': 4, 'safe_to_manage': False, 'cinder_id': snap_id, 'reason_not_safe': 'snapshot in use', 'extra_info': 'qos_setting:high', 'source_reference': {'source-name': 'volume-00000000-ffff-0000-ffff-000000'}}, {'reference': {'source-name': 'mysnap'}, 'size': 5, 'cinder_id': None, 'safe_to_manage': True, 'reason_not_safe': None, 'extra_info': 'qos_setting:low', 'source_reference': {'source-name': 'myvol'}}]} self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(jsonutils.loads(res.body), exp) mock_api_manageable.assert_called_once_with( self._admin_ctxt, 'fakehost', None, limit=10, marker='1234', offset=4, sort_dirs=['asc'], sort_keys=['reference']) @mock.patch('cinder.volume.api.API.get_manageable_snapshots', side_effect=messaging.RemoteError( exc_type='InvalidInput', value='marker not found: 1234')) def test_get_manageable_snapshots_non_existent_marker_detailed( self, mock_api_manageable): res = self._get_resp_get('fakehost', detailed=True, paging=True) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertTrue(mock_api_manageable.called) @mock.patch('cinder.objects.service.Service.is_up', return_value=True) @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_get_manageable_snapshots_disabled(self, mock_db, mock_is_up): mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt, disabled=True) res = self._get_resp_get('host_ok', False, True) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) self.assertEqual(exception.ServiceUnavailable.message, res.json['badRequest']['message']) mock_is_up.assert_not_called() @mock.patch('cinder.objects.service.Service.is_up', return_value=False, new_callable=mock.PropertyMock) @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_get_manageable_snapshots_is_down(self, mock_db, mock_is_up): mock_db.return_value = fake_service.fake_service_obj(self._admin_ctxt) res = self._get_resp_get('host_ok', False, True) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) self.assertEqual(exception.ServiceUnavailable.message, res.json['badRequest']['message']) self.assertTrue(mock_is_up.called) @mock.patch( 'cinder.scheduler.rpcapi.SchedulerAPI.manage_existing_snapshot') @mock.patch('cinder.volume.api.API.create_snapshot_in_db', return_value=mock.MagicMock(id=fake.SNAPSHOT_ID)) @mock.patch('cinder.objects.service.Service.get_by_id') def test_manage_snapshot_with_null_validate( self, mock_db, mock_create_snapshot, mock_rpcapi): mock_db.return_value = fake_service.fake_service_obj( self._admin_ctxt, binary=constants.VOLUME_BINARY) body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': {'fake_key': 'fake_ref'}, 'name': None, 'description': None}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int, res) self.assertIn('snapshot', jsonutils.loads(res.body)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_snapshot_unmanage.py0000664000175000017500000001040300000000000026006 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock from oslo_serialization import jsonutils import webob from cinder import context from cinder import exception from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import test # This list of fake snapshot is used by our tests. snapshot_id = fake.SNAPSHOT_ID bad_snp_id = fake.WILL_NOT_BE_FOUND_ID def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper def api_snapshot_get(self, context, snp_id): """Replacement for cinder.volume.api.API.get_snapshot. We stub the cinder.volume.api.API.get_snapshot method to check for the existence of snapshot_id in our list of fake snapshots and raise an exception if the specified snapshot ID is not in our list. """ snapshot = {'id': fake.SNAPSHOT_ID, 'progress': '100%', 'volume_id': fake.VOLUME_ID, 'project_id': fake.PROJECT_ID, 'status': fields.SnapshotStatus.AVAILABLE} if snp_id == snapshot_id: snapshot_objct = fake_snapshot.fake_snapshot_obj(context, **snapshot) return snapshot_objct else: raise exception.SnapshotNotFound(snapshot_id=snp_id) @mock.patch('cinder.volume.api.API.get_snapshot', api_snapshot_get) class SnapshotUnmanageTest(test.TestCase): """Test cases for cinder/api/contrib/snapshot_unmanage.py The API extension adds an action to snapshots, "os-unmanage", which will effectively issue a delete operation on the snapshot, but with a flag set that means that a different method will be invoked on the driver, so that the snapshot is not actually deleted in the storage backend. In this set of test cases, we are ensuring that the code correctly parses the request structure and raises the correct exceptions when things are not right, and calls down into cinder.volume.api.API.delete_snapshot with the correct arguments. """ def _get_resp(self, snapshot_id): """Helper to build an os-unmanage req for the specified snapshot_id.""" req = webob.Request.blank('/v3/%s/snapshots/%s/action' % ( fake.PROJECT_ID, snapshot_id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) body = {'os-unmanage': ''} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(app()) return res @mock.patch('cinder.db.conditional_update', return_value=1) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_snapshot') def test_unmanage_snapshot_ok(self, mock_rpcapi, mock_db_update, mock_conditional_update): """Return success for valid and unattached volume.""" res = self._get_resp(snapshot_id) self.assertEqual(1, mock_rpcapi.call_count) self.assertEqual(3, len(mock_rpcapi.call_args[0])) self.assertEqual(0, len(mock_rpcapi.call_args[1])) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int, res) def test_unmanage_snapshot_bad_snapshot_id(self): """Return 404 if the volume does not exist.""" res = self._get_resp(bad_snp_id) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int, res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_types_extra_specs.py0000664000175000017500000005366300000000000026057 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Copyright 2011 University of Southern California # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import timeutils import webob from cinder.api.contrib import types_extra_specs from cinder import exception from cinder.image import glance as image_store from cinder.policies import type_extra_specs as extra_specs_policy from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test import cinder.wsgi user_visible_extra_specs = { k: '%s_value' % k for k in extra_specs_policy.USER_VISIBLE_EXTRA_SPECS } volume_type_extra_specs = { **user_visible_extra_specs, "key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5", } def return_create_volume_type_extra_specs(context, volume_type_id, extra_specs): return volume_type_extra_specs def return_volume_type_extra_specs(context, volume_type_id): return volume_type_extra_specs def return_volume_type(context, volume_type_id, expected_fields=None): return dict(id=id, name='vol_type_%s' % id, description='vol_type_desc_%s' % id, extra_specs=volume_type_extra_specs, created_at=timeutils.utcnow(), updated_at=timeutils.utcnow(), deleted_at=timeutils.utcnow()) @ddt.ddt class VolumeTypesExtraSpecsTest(test.TestCase): def setUp(self): super(VolumeTypesExtraSpecsTest, self).setUp() self.flags(host='fake') self.mock_object(cinder.db, 'volume_type_get', return_volume_type) self.api_path = '/v3/%s/types/%s/extra_specs' % ( fake.PROJECT_ID, fake.VOLUME_TYPE_ID) self.controller = types_extra_specs.VolumeTypeExtraSpecsController() @ddt.data( {'is_admin': True, 'visible_specs': volume_type_extra_specs}, {'is_admin': False, 'visible_specs': user_visible_extra_specs}, ) @ddt.unpack def test_index(self, is_admin, visible_specs): self.mock_object(cinder.db, 'volume_type_extra_specs_get', return_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path, use_admin_context=is_admin) res_dict = self.controller.index(req, fake.VOLUME_TYPE_ID) self.assertEqual(visible_specs, res_dict['extra_specs']) def test_index_no_data(self): self.mock_object(cinder.db, 'volume_type_extra_specs_get', return_value={}) req = fakes.HTTPRequest.blank(self.api_path, use_admin_context=True) res_dict = self.controller.index(req, fake.VOLUME_TYPE_ID) self.assertEqual(0, len(res_dict['extra_specs'])) @ddt.data( {'is_admin': True, 'spec': 'key5', 'is_sensitive': True}, {'is_admin': False, 'spec': 'key5', 'is_sensitive': True}, # multiattach is a user visible extra spec (not sensitve) {'is_admin': True, 'spec': 'multiattach', 'is_sensitive': False}, {'is_admin': False, 'spec': 'multiattach', 'is_sensitive': False}, ) @ddt.unpack def test_show(self, is_admin, spec, is_sensitive): self.mock_object(cinder.db, 'volume_type_extra_specs_get', return_volume_type_extra_specs) req = fakes.HTTPRequest.blank(self.api_path + '/' + spec, use_admin_context=is_admin) if is_sensitive and not is_admin: self.assertRaises(exception.VolumeTypeExtraSpecsNotFound, self.controller.show, req, fake.VOLUME_ID, spec) else: res_dict = self.controller.show(req, fake.VOLUME_TYPE_ID, spec) self.assertEqual(volume_type_extra_specs[spec], res_dict[spec]) def test_show_spec_not_found(self): self.mock_object(cinder.db, 'volume_type_extra_specs_get', return_value={}) req = fakes.HTTPRequest.blank(self.api_path + '/key6') self.assertRaises(exception.VolumeTypeExtraSpecsNotFound, self.controller.show, req, fake.VOLUME_ID, 'key6') def test_delete(self): self.mock_object(cinder.db, 'volume_type_extra_specs_delete') self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path + '/key5', use_admin_context=True) self.controller.delete(req, fake.VOLUME_ID, 'key5') self.assertEqual(1, len(self.notifier.notifications)) self.assertIn('created_at', self.notifier.notifications[0]['payload']) self.assertIn('updated_at', self.notifier.notifications[0]['payload']) self.assertIn('deleted_at', self.notifier.notifications[0]['payload']) def test_delete_not_found(self): self.mock_object(cinder.db, 'volume_type_extra_specs_delete', side_effect=exception.VolumeTypeExtraSpecsNotFound( "Not Found")) req = fakes.HTTPRequest.blank(self.api_path + '/key6', use_admin_context=True) self.assertRaises(exception.VolumeTypeExtraSpecsNotFound, self.controller.delete, req, fake.VOLUME_ID, 'key6') def test_create(self): self.mock_object(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"extra_specs": {"key1": "value1"}} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path, use_admin_context=True) res_dict = self.controller.create(req, fake.VOLUME_ID, body=body) self.assertEqual(1, len(self.notifier.notifications)) self.assertIn('created_at', self.notifier.notifications[0]['payload']) self.assertIn('updated_at', self.notifier.notifications[0]['payload']) self.assertEqual('value1', res_dict['extra_specs']['key1']) @mock.patch.object(image_store.GlanceImageService, 'get_stores') def test_create_valid_image_store(self, mock_get_stores): mock_get_stores.return_value = { 'stores': [{ 'default': 'true', 'id': 'cheap' }, { 'id': 'read_only_store', 'read-only': 'true' }] } self.mock_object(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"extra_specs": {"image_service:store_id": "cheap"}} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path, use_admin_context=True) res_dict = self.controller.create(req, fake.VOLUME_ID, body=body) self.assertEqual(1, len(self.notifier.notifications)) self.assertIn('created_at', self.notifier.notifications[0]['payload']) self.assertIn('updated_at', self.notifier.notifications[0]['payload']) self.assertEqual( 'cheap', res_dict['extra_specs']['image_service:store_id']) @mock.patch.object(image_store.GlanceImageService, 'get_stores') def test_create_invalid_image_store(self, mock_get_stores): mock_get_stores.return_value = { 'stores': [{ 'default': 'true', 'id': 'cheap' }, { 'id': 'read_only_store', 'read-only': 'true' }] } body = {"extra_specs": {"image_service:store_id": "fast"}} req = fakes.HTTPRequest.blank(self.api_path, use_admin_context=True) self.assertRaises(cinder.exception.GlanceStoreNotFound, self.controller.create, req, fake.VOLUME_ID, body=body) @mock.patch.object(image_store.GlanceImageService, 'get_stores') def test_create_read_only_image_store(self, mock_get_stores): mock_get_stores.return_value = { 'stores': [{ 'default': 'true', 'id': 'cheap' }, { 'id': 'read_only_store', 'read-only': 'true' }] } body = {"extra_specs": {"image_service:store_id": "read_only_store"}} req = fakes.HTTPRequest.blank(self.api_path, use_admin_context=True) self.assertRaises(cinder.exception.GlanceStoreReadOnly, self.controller.create, req, fake.VOLUME_ID, body=body) @mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create') def test_create_key_allowed_chars( self, volume_type_extra_specs_update_or_create): mock_return_value = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} volume_type_extra_specs_update_or_create.\ return_value = mock_return_value body = {"extra_specs": {"other_alphanum.-_:": "value1"}} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path, use_admin_context=True) res_dict = self.controller.create(req, fake.VOLUME_ID, body=body) self.assertEqual(1, len(self.notifier.notifications)) self.assertEqual('value1', res_dict['extra_specs']['other_alphanum.-_:']) @mock.patch.object(cinder.db, 'volume_type_extra_specs_update_or_create') def test_create_too_many_keys_allowed_chars( self, volume_type_extra_specs_update_or_create): mock_return_value = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} volume_type_extra_specs_update_or_create.\ return_value = mock_return_value body = {"extra_specs": {"other_alphanum.-_:": "value1", "other2_alphanum.-_:": "value2", "other3_alphanum.-_:": "value3"}} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path, use_admin_context=True) res_dict = self.controller.create(req, fake.VOLUME_ID, body=body) self.assertEqual(1, len(self.notifier.notifications)) self.assertEqual('value1', res_dict['extra_specs']['other_alphanum.-_:']) self.assertEqual('value2', res_dict['extra_specs']['other2_alphanum.-_:']) self.assertEqual('value3', res_dict['extra_specs']['other3_alphanum.-_:']) @mock.patch.object(image_store.GlanceImageService, 'get_stores') def test_update_valid_image_store(self, mock_get_stores): mock_get_stores.return_value = { 'stores': [{ 'default': 'true', 'id': 'cheap' }, { 'id': 'fast', }, { 'id': 'read_only_store', 'read-only': 'true' }] } self.mock_object(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"image_service:store_id": "fast"} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank( self.api_path + "/image_service:store_id", use_admin_context=True) res_dict = self.controller.update(req, fake.VOLUME_ID, "image_service:store_id", body=body) self.assertEqual(1, len(self.notifier.notifications)) self.assertIn('created_at', self.notifier.notifications[0]['payload']) self.assertIn('updated_at', self.notifier.notifications[0]['payload']) self.assertEqual( 'fast', res_dict['image_service:store_id']) @mock.patch.object(image_store.GlanceImageService, 'get_stores') def test_update_invalid_image_store(self, mock_get_stores): mock_get_stores.return_value = { 'stores': [{ 'default': 'true', 'id': 'cheap' }, { 'id': 'fast', }, { 'id': 'read_only_store', 'read-only': 'true' }] } self.mock_object(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"image_service:store_id": "very_fast"} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank( self.api_path + "/image_service:store_id", use_admin_context=True) self.assertRaises(cinder.exception.GlanceStoreNotFound, self.controller.update, req, fake.VOLUME_ID, "image_service:store_id", body=body) @mock.patch.object(image_store.GlanceImageService, 'get_stores') def test_update_read_only_image_store(self, mock_get_stores): mock_get_stores.return_value = { 'stores': [{ 'default': 'true', 'id': 'cheap' }, { 'id': 'fast', }, { 'id': 'read_only_store', 'read-only': 'true' }] } self.mock_object(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"image_service:store_id": "read_only_store"} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank( self.api_path + "/image_service:store_id", use_admin_context=True) self.assertRaises(cinder.exception.GlanceStoreReadOnly, self.controller.update, req, fake.VOLUME_ID, "image_service:store_id", body=body) def test_update_item(self): self.mock_object(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"key1": "value1"} self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank(self.api_path + '/key1', use_admin_context=True) res_dict = self.controller.update(req, fake.VOLUME_ID, 'key1', body=body) self.assertEqual(1, len(self.notifier.notifications)) self.assertIn('created_at', self.notifier.notifications[0]['payload']) self.assertIn('updated_at', self.notifier.notifications[0]['payload']) self.assertEqual('value1', res_dict['key1']) def test_update_item_too_many_keys(self): self.mock_object(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"key1": "value1", "key2": "value2"} req = fakes.HTTPRequest.blank(self.api_path + '/key1', use_admin_context=True) self.assertRaises(exception.ValidationError, self.controller.update, req, fake.VOLUME_ID, 'key1', body=body) def test_update_item_body_uri_mismatch(self): self.mock_object(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"key1": "value1"} req = fakes.HTTPRequest.blank(self.api_path + '/bad', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, fake.VOLUME_ID, 'bad', body=body) def _extra_specs_empty_update(self, body): req = fakes.HTTPRequest.blank('/v3/%s/types/%s/extra_specs' % ( fake.PROJECT_ID, fake.VOLUME_TYPE_ID), use_admin_context=True) req.method = 'POST' self.assertRaises(exception.ValidationError, self.controller.update, req, fake.VOLUME_ID, body=body) def test_update_no_body(self): self._extra_specs_empty_update(body=None) def test_update_empty_body(self): self._extra_specs_empty_update(body={}) def _extra_specs_create_bad_body(self, body): req = fakes.HTTPRequest.blank('/v3/%s/types/%s/extra_specs' % ( fake.PROJECT_ID, fake.VOLUME_TYPE_ID), use_admin_context=True) req.method = 'POST' self.assertRaises(exception.ValidationError, self.controller.create, req, fake.VOLUME_ID, body=body) def test_create_no_body(self): self._extra_specs_create_bad_body(body=None) def test_create_missing_volume(self): body = {'foo': {'a': 'b'}} self._extra_specs_create_bad_body(body=body) def test_create_malformed_entity(self): body = {'extra_specs': 'string'} self._extra_specs_create_bad_body(body=body) def test_create_invalid_key(self): body = {"extra_specs": {"ke/y1": "value1"}} self._extra_specs_create_bad_body(body=body) def test_create_invalid_too_many_key(self): body = {"key1": "value1", "ke/y2": "value2", "key3": "value3"} self._extra_specs_create_bad_body(body=body) def test_create_volumes_exist(self): self.mock_object(cinder.db, 'volume_type_extra_specs_update_or_create', return_create_volume_type_extra_specs) body = {"extra_specs": {"key1": "value1"}} req = fakes.HTTPRequest.blank(self.api_path, use_admin_context=True) with mock.patch.object( cinder.db, 'volume_get_all', return_value=['a']): req = fakes.HTTPRequest.blank('/v3/%s/types/%s/extra_specs' % ( fake.PROJECT_ID, fake.VOLUME_TYPE_ID), use_admin_context=True) req.method = 'POST' body = {"extra_specs": {"key1": "value1"}} req = fakes.HTTPRequest.blank(self.api_path, use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, fake.VOLUME_ID, body=body) @ddt.data({'extra_specs': {'a' * 256: 'a'}}, {'extra_specs': {'a': 'a' * 256}}, {'extra_specs': {'': 'a'}}, {'extra_specs': {' ': 'a'}}) def test_create_with_invalid_extra_specs(self, body): req = fakes.HTTPRequest.blank('/v3/%s/types/%s/extra_specs' % ( fake.PROJECT_ID, fake.VOLUME_TYPE_ID), use_admin_context=True) req.method = 'POST' self.assertRaises(exception.ValidationError, self.controller.create, req, fake.VOLUME_ID, body=body) @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs') def test_check_cacheable(self, get_extra_specs): get_extra_specs.return_value = {} specs = {'multiattach': ' True', 'cacheable': ' True'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller._check_cacheable, specs, 'typeid') get_extra_specs.return_value = {'multiattach': ' True'} specs = {'cacheable': ' True'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller._check_cacheable, specs, 'typeid') get_extra_specs.return_value = {'cacheable': ' True'} specs = {'multiattach': ' True'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller._check_cacheable, specs, 'typeid') get_extra_specs.return_value = {'multiattach': ' False'} specs = {'multiattach': ' True'} # Should NOT has exception when calling below line self.controller._check_cacheable(specs, 'typeid') get_extra_specs.return_value = {'multiattach': ' True'} specs = {'multiattach': ' False', 'cacheable': ' True'} # Should NOT setting both at the same time self.assertRaises(webob.exc.HTTPBadRequest, self.controller._check_cacheable, specs, 'typeid') get_extra_specs.return_value = {'multiattach': ' False'} specs = {'multiattach': ' False', 'cacheable': ' True'} # Should NOT has exception when calling below line self.controller._check_cacheable(specs, 'typeid') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_types_manage.py0000664000175000017500000010154400000000000024757 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import strutils import webob from cinder.api.contrib import types_manage from cinder import context from cinder import exception from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume import volume_types DEFAULT_VOLUME_TYPE = fake.VOLUME_TYPE_ID IN_USE_VOLUME_TYPE = fake.VOLUME_TYPE2_ID UPDATE_DESC_ONLY_TYPE = fake.VOLUME_TYPE3_ID UPDATE_NAME_ONLY_TYPE = fake.VOLUME_TYPE4_ID UPDATE_NAME_AFTER_DELETE_TYPE = fake.VOLUME_TYPE5_ID NOT_FOUND_VOLUME_TYPE = fake.WILL_NOT_BE_FOUND_ID def fake_volume_type(id): specs = {"key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5"} return dict(id=id, name='vol_type_%s' % id, description='vol_type_desc_%s' % id, extra_specs=specs) def fake_volume_type_updated(id, is_public=True): return dict(id=id, name='vol_type_%s_%s' % (id, id), is_public=is_public, description='vol_type_desc_%s_%s' % ( id, id)) def fake_volume_type_updated_desc_only(id): return dict(id=id, name='vol_type_%s' % id, description='vol_type_desc_%s_%s' % ( id, id)) def return_volume_types_get_volume_type(context, id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.VolumeTypeNotFound(volume_type_id=id) return fake_volume_type(id) def return_volume_types_destroy(context, name): if name == fake.WILL_NOT_BE_FOUND_ID: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) pass def return_volume_types_with_volumes_destroy(context, id): if id == IN_USE_VOLUME_TYPE: raise exception.VolumeTypeInUse(volume_type_id=id) pass def return_volume_types_create(context, name, specs, is_public, description): pass def return_volume_types_create_duplicate_type(context, name, specs, is_public, description): raise exception.VolumeTypeExists(id=name) def fake_volume_type_updated_name_only(id): return dict(id=id, name='vol_type_%s_%s' % (id, id), description='vol_type_desc_%s' % id) def fake_volume_type_updated_name_after_delete(id): return dict(id=id, name='vol_type_%s' % id, description='vol_type_desc_%s' % id) def return_volume_types_get_volume_type_updated(id, is_public=True): if id == NOT_FOUND_VOLUME_TYPE: raise exception.VolumeTypeNotFound(volume_type_id=id) if id == UPDATE_DESC_ONLY_TYPE: return fake_volume_type_updated_desc_only(id) if id == UPDATE_NAME_ONLY_TYPE: return fake_volume_type_updated_name_only(id) if id == UPDATE_NAME_AFTER_DELETE_TYPE: return fake_volume_type_updated_name_after_delete(id) # anything else return fake_volume_type_updated(id, is_public=is_public) def return_volume_types_get_by_name(context, name): if name == NOT_FOUND_VOLUME_TYPE: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) return fake_volume_type(name.split("_")[2]) def return_volume_types_get_default(): return fake_volume_type(DEFAULT_VOLUME_TYPE) def return_volume_types_get_default_not_found(): return {} @ddt.ddt class VolumeTypesManageApiTest(test.TestCase): def setUp(self): super(VolumeTypesManageApiTest, self).setUp() self.flags(host='fake') self.controller = types_manage.VolumeTypesManageController() """to reset notifier drivers left over from other api/contrib tests""" def test_volume_types_delete(self): self.mock_object(volume_types, 'get_volume_type', return_volume_types_get_volume_type) self.mock_object(volume_types, 'destroy', return_volume_types_destroy) req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=True) self.assertEqual(0, len(self.notifier.notifications)) self.controller._delete(req, DEFAULT_VOLUME_TYPE) self.assertEqual(1, len(self.notifier.notifications)) def test_volume_types_delete_not_found(self): self.mock_object(volume_types, 'get_volume_type', return_volume_types_get_volume_type) self.mock_object(volume_types, 'destroy', return_volume_types_destroy) self.assertEqual(0, len(self.notifier.notifications)) req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, NOT_FOUND_VOLUME_TYPE), use_admin_context=True) self.assertRaises(exception.VolumeTypeNotFound, self.controller._delete, req, NOT_FOUND_VOLUME_TYPE) self.assertEqual(1, len(self.notifier.notifications)) def test_volume_types_with_volumes_destroy(self): self.mock_object(volume_types, 'get_volume_type', return_volume_types_get_volume_type) self.mock_object(volume_types, 'destroy', return_volume_types_with_volumes_destroy) req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=True) self.assertEqual(0, len(self.notifier.notifications)) self.controller._delete(req, DEFAULT_VOLUME_TYPE) self.assertEqual(1, len(self.notifier.notifications)) @mock.patch('cinder.volume.volume_types.destroy') @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch('cinder.policy.authorize') def test_volume_types_delete_with_non_admin(self, mock_policy_authorize, mock_get, mock_destroy): # allow policy authorized user to delete type mock_policy_authorize.return_value = None mock_get.return_value = \ {'extra_specs': {"key1": "value1"}, 'id': DEFAULT_VOLUME_TYPE, 'name': 'vol_type_1', 'description': 'vol_type_desc_%s' % DEFAULT_VOLUME_TYPE} mock_destroy.side_effect = return_volume_types_destroy req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % (fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=False) self.assertEqual(0, len(self.notifier.notifications)) self.controller._delete(req, DEFAULT_VOLUME_TYPE) self.assertEqual(1, len(self.notifier.notifications)) # non policy authorized user fails to delete type mock_policy_authorize.side_effect = ( exception.PolicyNotAuthorized(action='type_delete')) self.assertRaises(exception.PolicyNotAuthorized, self.controller._delete, req, DEFAULT_VOLUME_TYPE) def test_create(self): self.mock_object(volume_types, 'create', return_volume_types_create) self.mock_object(volume_types, 'get_volume_type_by_name', return_volume_types_get_by_name) body = {"volume_type": {"name": "vol_type_1", "os-volume-type-access:is_public": True, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID, use_admin_context=True) self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._create(req, body=body) self.assertEqual(1, len(self.notifier.notifications)) id = res_dict['volume_type']['id'] self._check_test_results(res_dict, { 'expected_name': 'vol_type_1', 'expected_desc': 'vol_type_desc_%s' % id}) @mock.patch('cinder.volume.volume_types.create') @mock.patch('cinder.volume.volume_types.get_volume_type_by_name') def test_create_with_description_of_zero_length( self, mock_get_volume_type_by_name, mock_create_type): mock_get_volume_type_by_name.return_value = \ {'extra_specs': {"key1": "value1"}, 'id': DEFAULT_VOLUME_TYPE, 'name': 'vol_type_1', 'description': ''} type_description = "" body = {"volume_type": {"name": "vol_type_1", "description": type_description, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID, use_admin_context=True) res_dict = self.controller._create(req, body=body) self._check_test_results(res_dict, { 'expected_name': 'vol_type_1', 'expected_desc': ''}) def test_create_type_with_name_too_long(self): type_name = 'a' * 256 body = {"volume_type": {"name": type_name, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID) self.assertRaises(exception.ValidationError, self.controller._create, req, body=body) def test_create_type_with_description_too_long(self): type_description = 'a' * 256 body = {"volume_type": {"name": "vol_type_1", "description": type_description, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID) self.assertRaises(exception.ValidationError, self.controller._create, req, body=body) def test_create_duplicate_type_fail(self): self.mock_object(volume_types, 'create', return_volume_types_create_duplicate_type) self.mock_object(volume_types, 'get_volume_type_by_name', return_volume_types_get_by_name) body = {"volume_type": {"name": "vol_type_1", "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID, use_admin_context=True) self.assertRaises(webob.exc.HTTPConflict, self.controller._create, req, body=body) def test_create_type_with_invalid_is_public(self): body = {"volume_type": {"name": "vol_type_1", "os-volume-type-access:is_public": "fake", "description": "test description", "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID) self.assertRaises(exception.ValidationError, self.controller._create, req, body=body) @ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', 'y', 'yes') @mock.patch.object(volume_types, "get_volume_type_by_name") @mock.patch.object(volume_types, "create") @mock.patch("cinder.api.openstack.wsgi.Request.cache_resource") @mock.patch("cinder.api.views.types.ViewBuilder.show") def test_create_type_with_valid_is_public_in_string( self, is_public, mock_show, mock_cache_resource, mock_create, mock_get): boolean_is_public = strutils.bool_from_string(is_public) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) body = {"volume_type": {"name": "vol_type_1", "os-volume-type-access:is_public": is_public, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID) req.environ['cinder.context'] = ctxt self.controller._create(req, body=body) mock_create.assert_called_once_with( ctxt, 'vol_type_1', {'key1': 'value1'}, boolean_is_public, description=None) def _create_volume_type_bad_body(self, body): req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID) req.method = 'POST' self.assertRaises(exception.ValidationError, self.controller._create, req, body=body) def test_create_no_body(self): self._create_volume_type_bad_body(body=None) def test_create_missing_volume(self): body = {'foo': {'a': 'b'}} self._create_volume_type_bad_body(body=body) def test_create_malformed_entity(self): body = {'volume_type': 'string'} self._create_volume_type_bad_body(body=body) @mock.patch('cinder.volume.volume_types.create') @mock.patch('cinder.volume.volume_types.get_volume_type_by_name') @mock.patch('cinder.policy.authorize') def test_create_with_none_admin(self, mock_policy_authorize, mock_get_volume_type_by_name, mock_create_type): # allow policy authorized user to create type mock_policy_authorize.return_value = None mock_get_volume_type_by_name.return_value = \ {'extra_specs': {"key1": "value1"}, 'id': DEFAULT_VOLUME_TYPE, 'name': 'vol_type_1', 'description': 'vol_type_desc_1'} body = {"volume_type": {"name": "vol_type_1", "os-volume-type-access:is_public": True, "extra_specs": {"key1": "value1"}}} req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID, use_admin_context=False) self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._create(req, body=body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results(res_dict, { 'expected_name': 'vol_type_1', 'expected_desc': 'vol_type_desc_1'}) # non policy authorized user fails to create type mock_policy_authorize.side_effect = ( exception.PolicyNotAuthorized(action='type_create')) self.assertRaises(exception.PolicyNotAuthorized, self.controller._create, req, body=body) @ddt.data({'a' * 256: 'a'}, {'a': 'a' * 256}, {'': 'a'}, 'foo', None) def test_create_type_with_invalid_extra_specs(self, value): body = {"volume_type": {"name": "vol_type_1", "os-volume-type-access:is_public": False, "description": "test description"}} body['volume_type']['extra_specs'] = value req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID) self.assertRaises(exception.ValidationError, self.controller._create, req, body=body) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_update(self, mock_get, mock_update): mock_get.return_value = return_volume_types_get_volume_type_updated( DEFAULT_VOLUME_TYPE, is_public=False) body = {"volume_type": {"is_public": False}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=True) req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, DEFAULT_VOLUME_TYPE, body=body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results( res_dict, {'expected_desc': 'vol_type_desc_%s_%s' % (DEFAULT_VOLUME_TYPE, DEFAULT_VOLUME_TYPE), 'expected_name': 'vol_type_%s_%s' % (DEFAULT_VOLUME_TYPE, DEFAULT_VOLUME_TYPE), 'is_public': False}) @ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', 'y', 'yes') @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch("cinder.api.openstack.wsgi.Request.cache_resource") @mock.patch("cinder.api.views.types.ViewBuilder.show") def test_update_with_valid_is_public_in_string( self, is_public, mock_show, mock_cache_resource, mock_get, mock_update): body = {"volume_type": {"is_public": is_public}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=True) req.method = 'PUT' ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = ctxt boolean_is_public = strutils.bool_from_string(is_public) self.controller._update(req, DEFAULT_VOLUME_TYPE, body=body) mock_update.assert_called_once_with( ctxt, DEFAULT_VOLUME_TYPE, None, None, is_public=boolean_is_public) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_update_type_with_description_having_length_zero( self, mock_get_volume_type, mock_type_update): mock_get_volume_type.return_value = \ {'id': DEFAULT_VOLUME_TYPE, 'name': 'vol_type_1', 'description': ''} type_description = "" body = {"volume_type": {"description": type_description}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=True) req.method = 'PUT' resp = self.controller._update(req, DEFAULT_VOLUME_TYPE, body=body) self._check_test_results(resp, {'expected_desc': '', 'expected_name': 'vol_type_1'}) def test_update_type_with_name_too_long(self): type_name = 'a' * 256 body = {"volume_type": {"name": type_name, "description": ""}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) req.method = 'PUT' self.assertRaises(exception.ValidationError, self.controller._update, req, DEFAULT_VOLUME_TYPE, body=body) def test_update_type_with_description_too_long(self): type_description = 'a' * 256 body = {"volume_type": {"description": type_description}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) req.method = 'PUT' self.assertRaises(exception.ValidationError, self.controller._update, req, DEFAULT_VOLUME_TYPE, body=body) @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch('cinder.volume.volume_types.update') def test_update_non_exist(self, mock_update, mock_get_volume_type): mock_get_volume_type.side_effect = exception.VolumeTypeNotFound( volume_type_id=NOT_FOUND_VOLUME_TYPE) body = {"volume_type": {"name": "vol_type_1_1", "description": "vol_type_desc_1_1"}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, NOT_FOUND_VOLUME_TYPE), use_admin_context=True) req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) self.assertRaises(exception.VolumeTypeNotFound, self.controller._update, req, NOT_FOUND_VOLUME_TYPE, body=body) self.assertEqual(1, len(self.notifier.notifications)) @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch('cinder.volume.volume_types.update') def test_update_db_fail(self, mock_update, mock_get_volume_type): mock_update.side_effect = exception.VolumeTypeUpdateFailed( id=DEFAULT_VOLUME_TYPE) mock_get_volume_type.return_value = fake_volume_type( DEFAULT_VOLUME_TYPE) body = {"volume_type": {"name": "vol_type_1_1", "description": "vol_type_desc_1_1"}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=True) req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) self.assertRaises(webob.exc.HTTPInternalServerError, self.controller._update, req, DEFAULT_VOLUME_TYPE, body=body) self.assertEqual(1, len(self.notifier.notifications)) def test_update_no_name_no_description(self): body = {"volume_type": {}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=True) req.method = 'PUT' self.assertRaises(webob.exc.HTTPBadRequest, self.controller._update, req, DEFAULT_VOLUME_TYPE, body=body) def test_update_empty_name(self): body = {"volume_type": {"name": " ", "description": "something"}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=True) req.method = 'PUT' self.assertRaises(webob.exc.HTTPBadRequest, self.controller._update, req, DEFAULT_VOLUME_TYPE, body=body) @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch('cinder.db.volume_type_update') @mock.patch('cinder.quota.VolumeTypeQuotaEngine.' 'update_quota_resource') def test_update_only_name(self, mock_update_quota, mock_update, mock_get): mock_get.return_value = return_volume_types_get_volume_type_updated( UPDATE_NAME_ONLY_TYPE) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) name = "vol_type_%s" % UPDATE_NAME_ONLY_TYPE updated_name = "%s_%s" % (name, UPDATE_NAME_ONLY_TYPE) desc = "vol_type_desc_%s" % UPDATE_NAME_ONLY_TYPE body = {"volume_type": {"name": name}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % (fake.PROJECT_ID, UPDATE_NAME_ONLY_TYPE)) req.method = 'PUT' req.environ['cinder.context'] = ctxt self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, UPDATE_NAME_ONLY_TYPE, body=body) self.assertEqual(1, len(self.notifier.notifications)) mock_update_quota.assert_called_once_with(ctxt, updated_name, name) self._check_test_results(res_dict, {'expected_name': updated_name, 'expected_desc': desc}) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_update_only_description(self, mock_get, mock_update): mock_get.return_value = return_volume_types_get_volume_type_updated( UPDATE_DESC_ONLY_TYPE) name = "vol_type_%s" % UPDATE_DESC_ONLY_TYPE desc = "vol_type_desc_%s" % UPDATE_DESC_ONLY_TYPE updated_desc = "%s_%s" % (desc, UPDATE_DESC_ONLY_TYPE) body = {"volume_type": {"description": updated_desc}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, UPDATE_DESC_ONLY_TYPE), use_admin_context=True) req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, UPDATE_DESC_ONLY_TYPE, body=body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results(res_dict, {'expected_name': name, 'expected_desc': updated_desc}) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_update_only_is_public(self, mock_get, mock_update): is_public = False mock_get.return_value = return_volume_types_get_volume_type_updated( DEFAULT_VOLUME_TYPE, is_public=is_public) name = "vol_type_%s" % DEFAULT_VOLUME_TYPE updated_name = '%s_%s' % (name, DEFAULT_VOLUME_TYPE) desc = "vol_type_desc_%s" % DEFAULT_VOLUME_TYPE updated_desc = "%s_%s" % (desc, DEFAULT_VOLUME_TYPE) body = {"volume_type": {"is_public": is_public}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=True) req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, DEFAULT_VOLUME_TYPE, body=body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results(res_dict, {'expected_name': updated_name, 'expected_desc': updated_desc, 'is_public': False}) def test_update_invalid_is_public(self): body = {"volume_type": {"name": "test", "description": "something", "is_public": "fake"}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) req.method = 'PUT' self.assertRaises(exception.ValidationError, self.controller._update, req, DEFAULT_VOLUME_TYPE, body=body) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') def test_rename_existing_name(self, mock_get, mock_update): id = UPDATE_NAME_AFTER_DELETE_TYPE name = "vol_type_%s" % id updated_name = "%s_%s" % (name, id) desc = "vol_type_desc_%s" % id mock_update.side_effect = exception.VolumeTypeExists( id=id, name=name) mock_get.return_value = return_volume_types_get_volume_type_updated( UPDATE_NAME_AFTER_DELETE_TYPE) # first attempt fail body = {"volume_type": {"name": name}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, UPDATE_NAME_AFTER_DELETE_TYPE), use_admin_context=True) req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) self.assertRaises(webob.exc.HTTPConflict, self.controller._update, req, UPDATE_NAME_AFTER_DELETE_TYPE, body=body) self.assertEqual(1, len(self.notifier.notifications)) # delete self.notifier.reset() self.mock_object(volume_types, 'destroy', return_volume_types_destroy) req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, UPDATE_NAME_AFTER_DELETE_TYPE), use_admin_context=True) self.assertEqual(0, len(self.notifier.notifications)) self.controller._delete(req, UPDATE_NAME_AFTER_DELETE_TYPE) self.assertEqual(1, len(self.notifier.notifications)) # update again mock_update.side_effect = mock.MagicMock() body = {"volume_type": {"name": updated_name}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, UPDATE_NAME_AFTER_DELETE_TYPE), use_admin_context=True) req.method = 'PUT' self.notifier.reset() self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, UPDATE_NAME_AFTER_DELETE_TYPE, body=body) self._check_test_results(res_dict, {'expected_name': name, 'expected_desc': desc}) self.assertEqual(1, len(self.notifier.notifications)) @mock.patch('cinder.volume.volume_types.update') @mock.patch('cinder.volume.volume_types.get_volume_type') @mock.patch('cinder.policy.authorize') def test_update_with_non_admin(self, mock_policy_authorize, mock_get, mock_update): # allow policy authorized user to update type mock_policy_authorize.return_value = None mock_get.return_value = return_volume_types_get_volume_type_updated( DEFAULT_VOLUME_TYPE, is_public=False) name = "vol_type_%s" % DEFAULT_VOLUME_TYPE updated_name = "%s_%s" % (name, DEFAULT_VOLUME_TYPE) desc = "vol_type_desc_%s" % DEFAULT_VOLUME_TYPE updated_desc = "%s_%s" % (desc, DEFAULT_VOLUME_TYPE) body = {"volume_type": {"name": updated_name, "description": updated_desc, "is_public": False}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=False) req.method = 'PUT' self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller._update(req, DEFAULT_VOLUME_TYPE, body=body) self.assertEqual(1, len(self.notifier.notifications)) self._check_test_results(res_dict, {'expected_desc': updated_desc, 'expected_name': updated_name, 'is_public': False}) # non policy authorized user fails to update type mock_policy_authorize.side_effect = ( exception.PolicyNotAuthorized(action='type_update')) self.assertRaises(exception.PolicyNotAuthorized, self.controller._update, req, DEFAULT_VOLUME_TYPE, body=body) def _check_test_results(self, results, expected_results): self.assertEqual(1, len(results)) self.assertEqual(expected_results['expected_desc'], results['volume_type']['description']) if expected_results.get('expected_name'): self.assertEqual(expected_results['expected_name'], results['volume_type']['name']) if expected_results.get('is_public') is not None: self.assertEqual(expected_results['is_public'], results['volume_type']['is_public']) def test_update_with_name_null(self): body = {"volume_type": {"name": None}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE), use_admin_context=True) req.method = 'PUT' self.assertRaises(webob.exc.HTTPBadRequest, self.controller._update, req, DEFAULT_VOLUME_TYPE, body=body) @ddt.data({"volume_type": {"name": None, "description": "description"}}, {"volume_type": {"name": None, "is_public": True}}, {"volume_type": {"description": "description", "is_public": True}}) def test_update_volume_type(self, body): req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % ( fake.PROJECT_ID, DEFAULT_VOLUME_TYPE)) req.method = 'PUT' ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = ctxt volume_type_1 = volume_types.create(ctxt, 'volume_type') res = self.controller._update(req, volume_type_1.get('id'), body=body) expected_name = body['volume_type'].get('name') if expected_name is not None: self.assertEqual(expected_name, res['volume_type']['name']) expected_is_public = body['volume_type'].get('is_public') if expected_is_public is not None: self.assertEqual(expected_is_public, res['volume_type']['is_public']) self.assertEqual(body['volume_type'].get('description'), res['volume_type']['description']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_used_limits.py0000664000175000017500000001154100000000000024621 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.api.contrib import used_limits from cinder.api import microversions as mv from cinder.api.openstack import api_version_request from cinder.api.openstack import wsgi from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class FakeRequest(object): def __init__(self, context, filter=None, api_version='2.0'): self.environ = {'cinder.context': context} self.params = filter or {} self.api_version_request = api_version_request.APIVersionRequest( api_version) @ddt.ddt class UsedLimitsTestCase(test.TestCase): def setUp(self): """Run before each test.""" super(UsedLimitsTestCase, self).setUp() self.controller = used_limits.UsedLimitsController() @ddt.data(('2.0', False), (mv.get_prior_version(mv.LIMITS_ADMIN_FILTER), True), (mv.get_prior_version(mv.LIMITS_ADMIN_FILTER), False), (mv.LIMITS_ADMIN_FILTER, True), (mv.LIMITS_ADMIN_FILTER, False)) @mock.patch('cinder.quota.QUOTAS.get_project_quotas') @mock.patch('cinder.policy.authorize') def test_used_limits(self, ver_project, _mock_policy_authorize, _mock_get_project_quotas): version, has_project = ver_project fake_req = FakeRequest(fakes.FakeRequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True), api_version=version) if has_project: fake_req = FakeRequest(fakes.FakeRequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True), filter={'project_id': fake.UUID1}, api_version=version) obj = { "limits": { "rate": [], "absolute": {}, }, } res = wsgi.ResponseObject(obj) def get_project_quotas(context, project_id, quota_class=None, defaults=True, usages=True): if project_id == fake.UUID1: return {"gigabytes": {'limit': 5, 'in_use': 1}} return {"gigabytes": {'limit': 10, 'in_use': 2}} _mock_get_project_quotas.side_effect = get_project_quotas # allow user to access used limits _mock_policy_authorize.return_value = True self.controller.index(fake_req, res) abs_limits = res.obj['limits']['absolute'] # if admin, only LIMITS_ADMIN_FILTER and req contains project_id # filter, cinder returns the specified project's quota. if version == mv.LIMITS_ADMIN_FILTER and has_project: self.assertEqual(1, abs_limits['totalGigabytesUsed']) else: self.assertEqual(2, abs_limits['totalGigabytesUsed']) fake_req = FakeRequest(fakes.FakeRequestContext(fake.USER_ID, fake.PROJECT_ID), api_version=version) if has_project: fake_req = FakeRequest(fakes.FakeRequestContext(fake.USER_ID, fake.PROJECT_ID), filter={'project_id': fake.UUID1}, api_version=version) # if non-admin, cinder always returns self quota. self.controller.index(fake_req, res) abs_limits = res.obj['limits']['absolute'] self.assertEqual(2, abs_limits['totalGigabytesUsed']) obj = { "limits": { "rate": [], "absolute": {}, }, } res = wsgi.ResponseObject(obj) # unallow user to access used limits _mock_policy_authorize.return_value = False self.controller.index(fake_req, res) abs_limits = res.obj['limits']['absolute'] self.assertNotIn('totalVolumesUsed', abs_limits) self.assertNotIn('totalGigabytesUsed', abs_limits) self.assertNotIn('totalSnapshotsUsed', abs_limits) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_actions.py0000664000175000017500000023435300000000000025337 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import HTTPStatus from unittest import mock import ddt from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils import webob from cinder.api.contrib import volume_actions from cinder.api import microversions as mv from cinder.api.openstack import api_version_request as api_version from cinder import context from cinder import db from cinder import exception from cinder.image import glance from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3 import fakes as v3_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils from cinder import volume from cinder.volume import api as volume_api from cinder.volume import rpcapi as volume_rpcapi CONF = cfg.CONF ENCRYPTED_VOLUME_ID = 'f78e8977-6164-4114-a593-358fa6646eff' @ddt.ddt class VolumeActionsTest(test.TestCase): _actions = ('os-reserve', 'os-unreserve') _methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume') def setUp(self): super(VolumeActionsTest, self).setUp() self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=False) self.controller = volume_actions.VolumeActionsController() self.api_patchers = {} for _meth in self._methods: self.api_patchers[_meth] = mock.patch('cinder.volume.api.API.' + _meth) self.api_patchers[_meth].start() self.addCleanup(self.api_patchers[_meth].stop) self.api_patchers[_meth].return_value = True db_vol = {'id': fake.VOLUME_ID, 'host': 'fake', 'status': 'available', 'size': 1, 'migration_status': None, 'volume_type_id': fake.VOLUME_TYPE_ID, 'project_id': fake.PROJECT_ID} vol = fake_volume.fake_volume_obj(self.context, **db_vol) self.get_patcher = mock.patch('cinder.volume.api.API.get') self.mock_volume_get = self.get_patcher.start() self.addCleanup(self.get_patcher.stop) self.mock_volume_get.return_value = vol self.update_patcher = mock.patch('cinder.volume.api.API.update') self.mock_volume_update = self.update_patcher.start() self.addCleanup(self.update_patcher.stop) self.mock_volume_update.return_value = vol self.db_get_patcher = mock.patch( 'cinder.db.sqlalchemy.api._volume_get') self.mock_volume_db_get = self.db_get_patcher.start() self.addCleanup(self.db_get_patcher.stop) self.mock_volume_db_get.return_value = vol self.flags(transport_url='fake:/') def test_simple_api_actions(self): app = fakes.wsgi_app(fake_auth_context=self.context) for _action in self._actions: req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = 'POST' req.body = jsonutils.dump_as_bytes({_action: None}) req.content_type = 'application/json' res = req.get_response(app) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) @ddt.data(False, True) def test_initialize_connection(self, enforce_mpath): with mock.patch.object(volume_api.API, 'initialize_connection') as init_conn: init_conn.return_value = {} body = {'os-initialize_connection': {'connector': { 'fake': 'fake', 'enforce_multipath': enforce_mpath}}} expected_conn_info = {'enforce_multipath': enforce_mpath} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) actual_conn_info = jsonutils.loads(res.body).get('connection_info') self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(expected_conn_info, actual_conn_info) def test_initialize_connection_without_connector(self): with mock.patch.object(volume_api.API, 'initialize_connection') as init_conn: init_conn.return_value = {} body = {'os-initialize_connection': {}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) @mock.patch('cinder.volume.rpcapi.VolumeAPI.initialize_connection') def test_initialize_connection_without_initiator(self, _init_connection): _init_connection.side_effect = messaging.RemoteError('InvalidInput') body = {'os-initialize_connection': {'connector': 'w/o_initiator'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_initialize_connection_exception(self): with mock.patch.object(volume_api.API, 'initialize_connection') as init_conn: init_conn.side_effect = \ exception.VolumeBackendAPIException(data=None) body = {'os-initialize_connection': {'connector': { 'fake': 'fake'}}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR, res.status_int) def test_terminate_connection(self): with mock.patch.object(volume_api.API, 'terminate_connection') as terminate_conn: terminate_conn.return_value = {} body = {'os-terminate_connection': {'connector': 'fake'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_terminate_connection_without_connector(self): with mock.patch.object(volume_api.API, 'terminate_connection') as terminate_conn: terminate_conn.return_value = {} body = {'os-terminate_connection': {}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_terminate_connection_with_exception(self): with mock.patch.object(volume_api.API, 'terminate_connection') as terminate_conn: terminate_conn.side_effect = \ exception.VolumeBackendAPIException(data=None) body = {'os-terminate_connection': {'connector': 'fake'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR, res.status_int) def test_attach_to_instance(self): body = {'os-attach': {'instance_uuid': fake.INSTANCE_ID, 'mountpoint': '/dev/vdc', 'mode': 'rw'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) body = {'os-attach': {'instance_uuid': fake.INSTANCE_ID, 'host_name': 'fake_host', 'mountpoint': '/dev/vdc'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.headers["content-type"] = "application/json" req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_attach_to_host(self): # using 'read-write' mode attach volume by default body = {'os-attach': {'host_name': 'fake_host', 'mountpoint': '/dev/vdc'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_volume_attach_to_instance_raises_remote_error(self): volume_remote_error = \ messaging.RemoteError(exc_type='InvalidUUID') with mock.patch.object(volume_api.API, 'attach', side_effect=volume_remote_error): id = fake.VOLUME_ID vol = {"instance_uuid": fake.INSTANCE_ID, "mountpoint": "/dev/vdc", "mode": "rw"} body = {"os-attach": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._attach, req, id, body=body) def test_volume_attach_to_instance_raises_db_error(self): # In case of DB error 500 error code is returned to user volume_remote_error = \ messaging.RemoteError(exc_type='DBError') with mock.patch.object(volume_api.API, 'attach', side_effect=volume_remote_error): id = fake.VOLUME_ID vol = {"instance_uuid": fake.INSTANCE_ID, "mountpoint": "/dev/vdc", "mode": "rw"} body = {"os-attach": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(messaging.RemoteError, self.controller._attach, req, id, body=body) def test_detach(self): body = {'os-detach': {'attachment_id': fake.ATTACHMENT_ID}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_detach_null_attachment_id(self): body = {'os-detach': {'attachment_id': None}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_volume_detach_raises_remote_error(self): volume_remote_error = \ messaging.RemoteError(exc_type='VolumeAttachmentNotFound') with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): id = fake.VOLUME_ID vol = {"attachment_id": fake.ATTACHMENT_ID} body = {"os-detach": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._detach, req, id, body=body) def test_volume_detach_raises_db_error(self): # In case of DB error 500 error code is returned to user volume_remote_error = \ messaging.RemoteError(exc_type='DBError') with mock.patch.object(volume_api.API, 'detach', side_effect=volume_remote_error): id = fake.VOLUME_ID vol = {"attachment_id": fake.ATTACHMENT_ID} body = {"os-detach": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(messaging.RemoteError, self.controller._detach, req, id, body=body) def test_attach_with_invalid_arguments(self): # Invalid request to attach volume an invalid target body = {'os-attach': {'mountpoint': '/dev/vdc'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.headers["content-type"] = "application/json" req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) # Invalid request to attach volume with an invalid mode body = {'os-attach': {'instance_uuid': 'fake', 'mountpoint': '/dev/vdc', 'mode': 'rr'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.headers["content-type"] = "application/json" req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) body = {'os-attach': {'host_name': 'fake_host', 'mountpoint': '/dev/vdc', 'mode': 'ww'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.headers["content-type"] = "application/json" req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_attach_to_instance_no_mountpoint(self): # The mountpoint parameter is required. If not provided the # API should fail with a 400 error. body = {'os-attach': {'instance_uuid': fake.INSTANCE_ID, 'mode': 'rw'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(400, res.status_int) def test_begin_detaching(self): def fake_begin_detaching(*args, **kwargs): return {} self.mock_object(volume.api.API, 'begin_detaching', fake_begin_detaching) body = {'os-begin_detaching': {'fake': 'fake'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_roll_detaching(self): def fake_roll_detaching(*args, **kwargs): return {} self.mock_object(volume.api.API, 'roll_detaching', fake_roll_detaching) body = {'os-roll_detaching': {'fake': 'fake'}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_extend_volume(self): def fake_extend_volume(*args, **kwargs): return {} self.mock_object(volume.api.API, 'extend', fake_extend_volume) body = {'os-extend': {'new_size': 5}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_extend_volume_invalid_status(self): def fake_extend_volume(*args, **kwargs): msg = "Volume status must be available" raise exception.InvalidVolume(reason=msg) self.mock_object(volume.api.API, 'extend', fake_extend_volume) body = {'os-extend': {'new_size': 5}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) @ddt.data((True, HTTPStatus.ACCEPTED), (False, HTTPStatus.ACCEPTED), ('1', HTTPStatus.ACCEPTED), ('0', HTTPStatus.ACCEPTED), ('true', HTTPStatus.ACCEPTED), ('false', HTTPStatus.ACCEPTED), ('tt', HTTPStatus.BAD_REQUEST), (11, HTTPStatus.BAD_REQUEST), (None, HTTPStatus.BAD_REQUEST)) @ddt.unpack def test_update_readonly_flag(self, readonly, return_code): def fake_update_readonly_flag(*args, **kwargs): return {} self.mock_object(volume.api.API, 'update_readonly_flag', fake_update_readonly_flag) body = {"os-update_readonly_flag": {"readonly": readonly}} if readonly is None: body = {"os-update_readonly_flag": {}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(return_code, res.status_int) @ddt.data((True, HTTPStatus.OK), (False, HTTPStatus.OK), ('1', HTTPStatus.OK), ('0', HTTPStatus.OK), ('true', HTTPStatus.OK), ('false', HTTPStatus.OK), ('tt', HTTPStatus.BAD_REQUEST), (11, HTTPStatus.BAD_REQUEST), (None, HTTPStatus.BAD_REQUEST)) @ddt.unpack def test_set_bootable(self, bootable, return_code): body = {"os-set_bootable": {"bootable": bootable}} if bootable is None: body = {"os-set_bootable": {}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.context)) self.assertEqual(return_code, res.status_int) @ddt.ddt class VolumeRetypeActionsTest(test.TestCase): def setUp(self): super(VolumeRetypeActionsTest, self).setUp() self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=False) self.flags(transport_url='fake:/') self.retype_mocks = {} paths = ('cinder.quota.QUOTAS.add_volume_type_opts', 'cinder.quota.QUOTAS.reserve') for path in paths: name = path.split('.')[-1] patcher = mock.patch(path, return_value=None) self.retype_mocks[name] = patcher.start() self.addCleanup(patcher.stop) @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True) def _retype_volume_exec(self, expected_status, new_type=fake.VOLUME_TYPE2_ID, vol_id=None, exists_mock=None): vol_id = vol_id or fake.VOLUME_ID req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, vol_id)) req.method = 'POST' req.headers['content-type'] = 'application/json' retype_body = {'new_type': new_type, 'migration_policy': 'never'} req.body = jsonutils.dump_as_bytes({'os-retype': retype_body}) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) self.assertEqual(expected_status, res.status_int) def test_retype_volume_no_body(self): # Request with no body should fail vol = utils.create_volume(self.context, status='available', testcase_instance=self) req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, vol.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dump_as_bytes({'os-retype': None}) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_retype_volume_bad_policy(self): # Request with invalid migration policy should fail vol = utils.create_volume(self.context, status='available', testcase_instance=self) req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, vol.id)) req.method = 'POST' req.headers['content-type'] = 'application/json' retype_body = {'new_type': 'foo', 'migration_policy': 'invalid'} req.body = jsonutils.dump_as_bytes({'os-retype': retype_body}) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_retype_volume_bad_status(self): # Should fail if volume does not have proper status vol_type_old = utils.create_volume_type(context.get_admin_context(), self, name='old') vol_type_new = utils.create_volume_type(context.get_admin_context(), self, name='new') vol = utils.create_volume(self.context, status='error', volume_type_id=vol_type_old.id, testcase_instance=self) self._retype_volume_exec(HTTPStatus.BAD_REQUEST, vol_type_new.id, vol.id) def test_retype_type_no_exist(self): # Should fail if new type does not exist vol_type_old = utils.create_volume_type(context.get_admin_context(), self, name='old') vol = utils.create_volume(self.context, status='available', volume_type_id=vol_type_old.id, testcase_instance=self) self._retype_volume_exec(HTTPStatus.NOT_FOUND, 'fake_vol_type', vol.id) def test_retype_same_type(self): # Should fail if new type and old type are the same vol_type_old = utils.create_volume_type(context.get_admin_context(), self, name='old') vol = utils.create_volume(self.context, status='available', volume_type_id=vol_type_old.id, testcase_instance=self) self._retype_volume_exec(HTTPStatus.BAD_REQUEST, vol_type_old.id, vol.id) def test_retype_over_quota(self): # Should fail if going over quota for new type vol_type_new = utils.create_volume_type(context.get_admin_context(), self, name='old') vol = utils.create_volume(self.context, status='available', testcase_instance=self) exc = exception.OverQuota(overs=['gigabytes'], quotas={'gigabytes': 20}, usages={'gigabytes': {'reserved': 5, 'in_use': 15}}) self.retype_mocks['reserve'].side_effect = exc self._retype_volume_exec(HTTPStatus.REQUEST_ENTITY_TOO_LARGE, vol_type_new.id, vol.id) @ddt.data(('in-use', 'front-end', HTTPStatus.BAD_REQUEST), ('in-use', 'back-end', HTTPStatus.ACCEPTED), ('available', 'front-end', HTTPStatus.ACCEPTED), ('available', 'back-end', HTTPStatus.ACCEPTED), ('in-use', 'front-end', HTTPStatus.ACCEPTED, True), ('in-use', 'back-end', HTTPStatus.ACCEPTED, True), ('available', 'front-end', HTTPStatus.ACCEPTED, True), ('available', 'back-end', HTTPStatus.ACCEPTED, True), ('in-use', 'front-end', HTTPStatus.BAD_REQUEST, False, False), ('in-use', 'back-end', HTTPStatus.ACCEPTED, False, False), ('in-use', '', HTTPStatus.ACCEPTED, True, False), ('available', 'front-end', HTTPStatus.ACCEPTED, False, False), ('available', 'back-end', HTTPStatus.ACCEPTED, False, False), ('available', '', HTTPStatus.ACCEPTED, True, False), ('in-use', 'front-end', HTTPStatus.BAD_REQUEST, False, False, False), ('in-use', '', HTTPStatus.ACCEPTED, True, False, False), ('in-use', 'back-end', HTTPStatus.ACCEPTED, False, False, False), ('available', 'front-end', HTTPStatus.ACCEPTED, False, False, False), ('in-use', '', HTTPStatus.ACCEPTED, True, False, False), ('in-use', 'back-end', HTTPStatus.ACCEPTED, False, False, False)) @ddt.unpack def test_retype_volume_qos(self, vol_status, consumer_pass, expected_status, same_qos=False, has_qos=True, has_type=True): """Test volume retype with QoS This test conatins following test-cases: 1) should fail if changing qos enforced by front-end for in-use volume 2) should NOT fail for in-use if changing qos enforced by back-end 3) should NOT fail if changing qos enforced by FE for available volumes 4) should NOT fail if changing qos enforced by back-end for available volumes 5) should NOT fail if changing qos enforced by front-end for in-use volumes if the qos is the same 6) should NOT fail if changing qos enforced by back-end for in-use volumes if the qos is the same 7) should NOT fail if changing qos enforced by front-end for available volumes if the qos is the same 8) should NOT fail if changing qos enforced by back-end for available volumes if the qos is the same 9) should fail if changing qos enforced by front-end on the new type and volume originally had no qos and was in-use 10) should NOT fail if changing qos enforced by back-end on the new type and volume originally had no qos and was in-use 11) should NOT fail if original and destinal types had no qos for in-use volumes 12) should NOT fail if changing qos enforced by front-end on the new type and volume originally had no qos and was available 13) should NOT fail if changing qos enforced by back-end on the new type and volume originally had no qos and was available 14) should NOT fail if original and destinal types had no qos for available volumes 15) should fail if changing volume had no type, was in-use and destination type qos was enforced by front-end 16) should NOT fail if changing volume had no type, was in-use and destination type had no qos and volume originally had no type and was in-use 17) should NOT fail if changing volume had no type, was in-use and destination type qos was enforced by back-end 18) should NOT fail if changing volume had no type, was in-use and destination type qos was enforced by front-end 19) should NOT fail if changing volume had no type, was available and destination type had no qos and volume originally had no type and was in-use 20) should NOT fail if changing volume had no type, was available and destination type qos was enforced by back-end """ admin_ctxt = context.get_admin_context() if has_qos: qos_old = utils.create_qos(admin_ctxt, self, name='old', consumer=consumer_pass)['id'] else: qos_old = None if same_qos: qos_new = qos_old else: qos_new = utils.create_qos(admin_ctxt, self, name='new', consumer=consumer_pass)['id'] if has_type: vol_type_old = utils.create_volume_type(admin_ctxt, self, name='old', qos_specs_id=qos_old).id else: vol_type_old = v3_fakes.fake_default_type_get()['id'] vol_type_new = utils.create_volume_type(admin_ctxt, self, name='new', qos_specs_id=qos_new).id vol = utils.create_volume(self.context, status=vol_status, volume_type_id=vol_type_old, testcase_instance=self) self._retype_volume_exec(expected_status, vol_type_new, vol.id) @ddt.data(('available', HTTPStatus.ACCEPTED, False, False, False), ('available', HTTPStatus.ACCEPTED, False, False), ('available', HTTPStatus.ACCEPTED, True, False, False), ('available', HTTPStatus.ACCEPTED, True, False), ('available', HTTPStatus.ACCEPTED)) @ddt.unpack def test_retype_volume_encryption(self, vol_status, expected_status, has_type=True, enc_orig=True, enc_dest=True): enc_orig = None admin_ctxt = context.get_admin_context() if has_type: vol_type_old = utils.create_volume_type(admin_ctxt, self, name='old').id if enc_orig: utils.create_encryption(admin_ctxt, vol_type_old, self) else: vol_type_old = v3_fakes.fake_default_type_get()['id'] vol_type_new = utils.create_volume_type(admin_ctxt, self, name='new').id if enc_dest: utils.create_encryption(admin_ctxt, vol_type_new, self) vol = utils.create_volume(self.context, status=vol_status, volume_type_id=vol_type_old, testcase_instance=self) self._retype_volume_exec(expected_status, vol_type_new, vol.id) def fake_volume_get(self, context, volume_id): volume = v3_fakes.create_volume(volume_id) if volume_id == fake.VOLUME3_ID: volume['status'] = 'in-use' else: volume['status'] = 'available' return volume def fake_volume_get_obj(self, context, volume_id, **kwargs): volume = fake_volume.fake_volume_obj(context, id=volume_id, display_description='displaydesc', **kwargs) if volume_id == fake.VOLUME3_ID: volume.status = 'in-use' else: volume.status = 'available' if volume_id == ENCRYPTED_VOLUME_ID: volume['encryption_key_id'] = fake.ENCRYPTION_KEY_ID volume.volume_type = fake_volume.fake_volume_type_obj( context, name=v3_fakes.DEFAULT_VOL_TYPE) return volume def fake_upload_volume_to_image_service(self, context, volume, metadata, force): ret = {"id": volume['id'], "updated_at": datetime.datetime(1, 1, 1, 1, 1, 1), "status": 'uploading', "display_description": volume['display_description'], "size": volume['size'], "volume_type": volume['volume_type'], "image_id": fake.IMAGE_ID, "container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name'} return ret @ddt.ddt class VolumeImageActionsTest(test.TestCase): def setUp(self): super(VolumeImageActionsTest, self).setUp() self.controller = volume_actions.VolumeActionsController() self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=False) self.maxDiff = 2000 def _get_os_volume_upload_image(self): vol = { "container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} return body def fake_image_service_create(self, *args): ret = { 'status': 'queued', 'name': 'image_name', 'deleted': False, 'container_format': 'bare', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'disk_format': 'raw', 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'id': fake.IMAGE_ID, 'min_ram': 0, 'checksum': None, 'min_disk': 0, 'deleted_at': None, 'properties': {'x_billing_code_license': '246254365'}, 'size': 0} return ret def fake_image_service_create_with_params(self, *args): ret = { 'status': 'queued', 'name': 'image_name', 'deleted': False, 'container_format': 'bare', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'disk_format': 'raw', 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'id': fake.IMAGE_ID, 'min_ram': 0, 'checksum': None, 'min_disk': 0, 'visibility': 'public', 'protected': True, 'deleted_at': None, 'properties': {'x_billing_code_license': '246254365'}, 'size': 0} return ret def fake_rpc_copy_volume_to_image(self, *args): pass @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(volume_api.API, "copy_volume_to_image", fake_upload_volume_to_image_service) def test_copy_volume_to_image(self): id = fake.VOLUME_ID img = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": img} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) res_dict = self.controller._volume_upload_image(req, id, body=body) expected = {'os-volume_upload_image': {'id': id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'status': 'uploading', 'display_description': 'displaydesc', 'size': 1, 'volume_type': fake_volume.fake_volume_type_obj( context, name='vol_type_name'), 'image_id': fake.IMAGE_ID, 'container_format': 'bare', 'disk_format': 'raw', 'image_name': 'image_name'}} self.assertDictEqual(expected, res_dict) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(volume_api.API, "copy_volume_to_image") def test_copy_volume_to_image_when_image_conversion_not_allowed( self, mock_copy_vol_to_img): """Make sure exception is converted properly.""" mock_copy_vol_to_img.side_effect = exception.ImageConversionNotAllowed id = fake.VOLUME_ID img = {"container_format": 'ova', "disk_format": 'vhdx', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": img} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, id, body=body) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(volume_api.API, "copy_volume_to_image") def test_check_image_metadata_copy_encrypted_volume_to_image( self, mock_copy_vol): """Make sure the encryption image properties exit the controller.""" # all we're interested in is that the 'metadata' dict contains the # correct data, so we do this bad hack to smuggle it out in the # controller's response to make it easy to access def really_fake_upload_volume(context, volume, metadata, force): return metadata mock_copy_vol.side_effect = really_fake_upload_volume FAKE_ID = 'fake-encryption-key-id' # the controller does a lazy init of the key manager, so we # need a 2-level mock here self.mock_object(self.controller, '_key_mgr') self.controller._key_mgr.return_value = not None self.mock_object(self.controller._key_manager, 'store') self.controller._key_manager.store.return_value = FAKE_ID vol_id = ENCRYPTED_VOLUME_ID img = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": img} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, vol_id)) res_dict = self.controller._volume_upload_image(req, vol_id, body=body) sent_meta = res_dict['os-volume_upload_image'] self.assertIn('cinder_encryption_key_id', sent_meta) self.assertEqual(FAKE_ID, sent_meta['cinder_encryption_key_id']) self.assertIn('cinder_encryption_key_deletion_policy', sent_meta) self.assertEqual('on_image_deletion', sent_meta['cinder_encryption_key_deletion_policy']) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(volume_api.API, "copy_volume_to_image") def test_check_image_metadata_copy_nonencrypted_volume_to_image( self, mock_copy_vol): """Make sure no encryption image properties are sent.""" def really_fake_upload_volume(context, volume, metadata, force): return metadata mock_copy_vol.side_effect = really_fake_upload_volume id = fake.VOLUME_ID img = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": img} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) res_dict = self.controller._volume_upload_image(req, id, body=body) sent_meta = res_dict['os-volume_upload_image'] self.assertNotIn('cinder_encryption_key_id', sent_meta) self.assertNotIn('cinder_encryption_key_deletion_policy', sent_meta) def test_copy_volume_to_image_volumenotfound(self): def fake_volume_get_raise_exc(self, context, volume_id): raise exception.VolumeNotFound(volume_id=volume_id) self.mock_object(volume_api.API, 'get', fake_volume_get_raise_exc) id = fake.WILL_NOT_BE_FOUND_ID vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(exception.VolumeNotFound, self.controller._volume_upload_image, req, id, body=body) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(volume_api.API, 'copy_volume_to_image', side_effect=exception.InvalidVolume(reason='blah')) def test_copy_volume_to_image_invalidvolume(self, mock_copy): id = fake.VOLUME2_ID vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, id, body=body) @mock.patch.object(volume_api.API, 'get', fake_volume_get) def test_copy_volume_to_image_invalid_disk_format(self): id = fake.IMAGE_ID vol = {"container_format": 'bare', "disk_format": 'iso', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(exception.ValidationError, self.controller._volume_upload_image, req, id, body=body) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) def test_copy_volume_to_image_bad_disk_format_for_encrypted_vol(self): id = ENCRYPTED_VOLUME_ID vol = {"container_format": 'bare', "disk_format": 'qcow2', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, id, body=body) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) def test_copy_volume_to_image_bad_container_format_for_encrypted_vol(self): id = ENCRYPTED_VOLUME_ID vol = {"container_format": 'ovf', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, id, body=body) @mock.patch.object(volume_api.API, "copy_volume_to_image") def test_copy_volume_to_image_disk_format_ploop(self, mock_copy_to_image): volume = utils.create_volume(self.context, metadata={'test': 'test'}) img = {"container_format": 'bare', "disk_format": 'ploop', "image_name": 'image_name'} body = {"os-volume_upload_image": img} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id)) image_metadata = {'container_format': 'bare', 'disk_format': 'ploop', 'name': 'image_name'} self.controller._volume_upload_image(req, volume.id, body=body) mock_copy_to_image.assert_called_once_with( req.environ['cinder.context'], volume, image_metadata, False) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(volume_api.API, 'copy_volume_to_image', side_effect=ValueError) def test_copy_volume_to_image_valueerror(self, mock_copy): id = fake.VOLUME2_ID vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, fake.VOLUME_ID)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, id, body=body) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(volume_api.API, 'copy_volume_to_image', side_effect=messaging.RemoteError) def test_copy_volume_to_image_remoteerror(self, mock_copy): id = fake.VOLUME2_ID vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": 'image_name', "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, id, body=body) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(volume_api.API, 'copy_volume_to_image', side_effect=messaging.RemoteError) @ddt.data( ({"image_name": 'image_name', "protected": None}, exception.ValidationError), ({"image_name": 'image_name', "protected": ' '}, exception.ValidationError), ({"image_name": 'image_name', "protected": 'test'}, exception.ValidationError), ({"image_name": 'image_name', "visibility": 'test'}, exception.ValidationError), ({"image_name": 'image_name', "visibility": ' '}, exception.ValidationError), ({"image_name": 'image_name', "visibility": None}, exception.ValidationError)) @ddt.unpack def test_copy_volume_to_image_invalid_request_body( self, vol, exception, mock_copy): id = fake.VOLUME2_ID body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) req.api_version_request = api_version.APIVersionRequest("3.1") self.assertRaises(exception, self.controller._volume_upload_image, req, id, body=body) def test_volume_upload_image_typeerror(self): id = fake.VOLUME2_ID body = {"os-volume_upload_image_fake": "fake"} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_volume_upload_image_without_type(self): id = fake.VOLUME2_ID vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": None, "force": True} body = {"": vol} req = webob.Request.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.context)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) @mock.patch.object(volume_api.API, 'get', fake_volume_get) def test_extend_volume_valueerror(self): id = fake.VOLUME2_ID body = {'os-extend': {'new_size': 'fake'}} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(exception.ValidationError, self.controller._extend, req, id, body=body) @ddt.data({'version': mv.get_prior_version(mv.VOLUME_EXTEND_INUSE), 'status': 'available'}, {'version': mv.get_prior_version(mv.VOLUME_EXTEND_INUSE), 'status': 'in-use'}, {'version': mv.VOLUME_EXTEND_INUSE, 'status': 'available'}, {'version': mv.VOLUME_EXTEND_INUSE, 'status': 'in-use'}) @ddt.unpack def test_extend_attached_volume(self, version, status): vol = db.volume_create(self.context, {'size': 1, 'project_id': fake.PROJECT_ID, 'status': status, 'volume_type_id': fake.VOLUME_TYPE_ID}) self.mock_object(volume_api.API, 'get', return_value=vol) mock_extend = self.mock_object(volume_api.API, '_extend') body = {"os-extend": {"new_size": 2}} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, vol['id'])) req.api_version_request = mv.get_api_version(version) self.controller._extend(req, vol['id'], body=body) if version == mv.VOLUME_EXTEND_INUSE and status == 'in-use': mock_extend.assert_called_with(req.environ['cinder.context'], vol, 2, attached=True) else: mock_extend.assert_called_with(req.environ['cinder.context'], vol, 2, attached=False) def test_extend_volume_no_exist(self): vol_id = fake.WILL_NOT_BE_FOUND_ID body = {'os-extend': {'new_size': 5}} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, vol_id)) self.assertRaises(exception.VolumeNotFound, self.controller._extend, req, vol_id, body=body) def test_copy_volume_to_image_notimagename(self): id = fake.VOLUME2_ID vol = {"container_format": 'bare', "disk_format": 'raw', "image_name": None, "force": True} body = {"os-volume_upload_image": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) self.assertRaises(exception.ValidationError, self.controller._volume_upload_image, req, id, body=body) def _create_volume_with_type(self, status='available', display_description='displaydesc', **kwargs): admin_ctxt = context.get_admin_context() vol_type = db.volume_type_create(admin_ctxt, {'name': 'vol_name'}) self.addCleanup(db.volume_type_destroy, admin_ctxt, vol_type.id) volume = utils.create_volume(self.context, volume_type_id=vol_type.id, status=status, display_description=display_description, **kwargs) self.addCleanup(db.volume_destroy, admin_ctxt, volume.id) expected = { 'os-volume_upload_image': { 'id': volume.id, 'updated_at': mock.ANY, 'status': 'uploading', 'display_description': 'displaydesc', 'size': 1, 'volume_type': mock.ANY, 'image_id': fake.IMAGE_ID, 'container_format': 'bare', 'disk_format': 'raw', 'image_name': 'image_name' } } return volume, expected @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_with_protected_prop( self, mock_copy_to_image, mock_create, mock_get_image_metadata): """Test create image from volume with protected properties.""" volume, expected = self._create_volume_with_type() mock_get_image_metadata.return_value = {"volume_id": volume.id, "key": "x_billing_license", "value": "246254365"} mock_create.side_effect = self.fake_image_service_create req = fakes.HTTPRequest.blank( '/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id), use_admin_context=self.context.is_admin) body = self._get_os_volume_upload_image() res_dict = self.controller._volume_upload_image(req, volume.id, body=body) self.assertDictEqual(expected, res_dict) vol_db = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('uploading', vol_db.status) self.assertEqual('available', vol_db.previous_status) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) def test_copy_volume_to_image_public_not_authorized(self): """Test unauthorized create public image from volume.""" id = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee' req = fakes.HTTPRequest.blank('/v3/tenant1/volumes/%s/action' % id) req.environ['cinder.context'].is_admin = False req.headers = mv.get_mv_header(mv.UPLOAD_IMAGE_PARAMS) req.api_version_request = mv.get_api_version(mv.UPLOAD_IMAGE_PARAMS) body = self._get_os_volume_upload_image() body['os-volume_upload_image']['visibility'] = 'public' self.assertRaises(exception.PolicyNotAuthorized, self.controller._volume_upload_image, req, id, body=body) @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_without_glance_metadata( self, mock_copy_to_image, mock_create, mock_get_image_metadata): """Test create image from volume if volume is created without image. In this case volume glance metadata will not be available for this volume. """ volume, expected = self._create_volume_with_type() mock_get_image_metadata.side_effect = \ exception.GlanceMetadataNotFound(id=volume.id) mock_create.side_effect = self.fake_image_service_create req = fakes.HTTPRequest.blank( '/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id), use_admin_context=self.context.is_admin) body = self._get_os_volume_upload_image() res_dict = self.controller._volume_upload_image(req, volume.id, body=body) self.assertDictEqual(expected, res_dict) vol_db = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('uploading', vol_db.status) self.assertEqual('available', vol_db.previous_status) @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_fail_image_create( self, mock_copy_to_image, mock_create, mock_get_image_metadata): """Test create image from volume if create image fails. In this case API will rollback to previous status. """ volume = utils.create_volume(self.context) mock_get_image_metadata.return_value = {} mock_create.side_effect = Exception() req = fakes.HTTPRequest.blank( '/v3/fakeproject/volumes/%s/action' % volume.id) body = self._get_os_volume_upload_image() self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, volume.id, body=body) self.assertFalse(mock_copy_to_image.called) vol_db = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('available', vol_db.status) self.assertIsNone(vol_db.previous_status) db.volume_destroy(context.get_admin_context(), volume.id) @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_in_use_no_force( self, mock_copy_to_image, mock_create, mock_get_image_metadata): """Test create image from in-use volume. In this case API will fail because we are not passing force. """ volume = utils.create_volume(self.context, status='in-use') mock_get_image_metadata.return_value = {} mock_create.side_effect = self.fake_image_service_create req = fakes.HTTPRequest.blank( '/v3/fakeproject/volumes/%s/action' % volume.id) body = self._get_os_volume_upload_image() body['os-volume_upload_image']['force'] = False self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, volume.id, body=body) self.assertFalse(mock_copy_to_image.called) vol_db = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('in-use', vol_db.status) self.assertIsNone(vol_db.previous_status) db.volume_destroy(context.get_admin_context(), volume.id) @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_in_use_with_force( self, mock_copy_to_image, mock_create, mock_get_image_metadata): """Test create image from in-use volume. In this case API will succeed only when CON.enable_force_upload is enabled. """ volume, expected = self._create_volume_with_type(status='in-use') mock_get_image_metadata.return_value = {} mock_create.side_effect = self.fake_image_service_create req = fakes.HTTPRequest.blank( '/v3/fakeproject/volumes/%s/action' % volume.id, use_admin_context=self.context.is_admin) body = self._get_os_volume_upload_image() self.assertRaises(webob.exc.HTTPBadRequest, self.controller._volume_upload_image, req, volume.id, body=body) self.assertFalse(mock_copy_to_image.called) vol_db = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('in-use', vol_db.status) self.assertIsNone(vol_db.previous_status) CONF.set_default('enable_force_upload', True) res_dict = self.controller._volume_upload_image(req, volume.id, body=body) self.assertDictEqual(expected, res_dict) vol_db = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('uploading', vol_db.status) self.assertEqual('in-use', vol_db.previous_status) @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_without_protected_prop( self, mock_volume_to_image, mock_create, mock_get_image_metadata): """Test protected property is not defined with the root image.""" volume, expected = self._create_volume_with_type() mock_get_image_metadata.return_value = {} mock_create.side_effect = self.fake_image_service_create req = fakes.HTTPRequest.blank( '/v3/fakeproject/volumes/%s/action' % volume.id, use_admin_context=self.context.is_admin) body = self._get_os_volume_upload_image() res_dict = self.controller._volume_upload_image(req, volume.id, body=body) self.assertDictEqual(expected, res_dict) vol_db = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('uploading', vol_db.status) self.assertEqual('available', vol_db.previous_status) @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_without_core_prop( self, mock_copy_to_image, mock_create): """Test glance_core_properties defined in cinder.conf is empty.""" volume, expected = self._create_volume_with_type() mock_create.side_effect = self.fake_image_service_create self.override_config('glance_core_properties', []) req = fakes.HTTPRequest.blank( '/v3/fakeproject/volumes/%s/action' % volume.id, use_admin_context=self.context.is_admin) body = self._get_os_volume_upload_image() res_dict = self.controller._volume_upload_image(req, volume.id, body=body) self.assertDictEqual(expected, res_dict) vol_db = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('uploading', vol_db.status) self.assertEqual('available', vol_db.previous_status) @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_volume_type_none( self, mock_copy_volume_to_image, mock_create, mock_get_volume_image_metadata): """Test create image from volume with none type volume.""" volume, expected = self._create_volume_with_type() mock_create.side_effect = self.fake_image_service_create req = fakes.HTTPRequest.blank( '/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id), use_admin_context=self.context.is_admin) body = self._get_os_volume_upload_image() res_dict = self.controller._volume_upload_image(req, volume.id, body=body) self.assertDictEqual(expected, res_dict) @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_version_with_params( self, mock_copy_volume_to_image, mock_create, mock_get_volume_image_metadata): """Test create image from volume with protected properties.""" volume, expected = self._create_volume_with_type() mock_get_volume_image_metadata.return_value = { "volume_id": volume.id, "key": "x_billing_code_license", "value": "246254365"} mock_create.side_effect = self.fake_image_service_create_with_params mock_copy_volume_to_image.side_effect = \ self.fake_rpc_copy_volume_to_image req = fakes.HTTPRequest.blank( '/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, volume.id), use_admin_context=self.context.is_admin) req.environ['cinder.context'].is_admin = True req.headers = mv.get_mv_header(mv.UPLOAD_IMAGE_PARAMS) req.api_version_request = mv.get_api_version(mv.UPLOAD_IMAGE_PARAMS) body = self._get_os_volume_upload_image() body = self._get_os_volume_upload_image() body['os-volume_upload_image']['visibility'] = 'public' body['os-volume_upload_image']['protected'] = True res_dict = self.controller._volume_upload_image(req, volume.id, body=body) expected['os-volume_upload_image'].update(visibility='public', protected=True) self.assertDictEqual(expected, res_dict) @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_vhd( self, mock_copy_to_image, mock_create, mock_get_image_metadata): """Test create image from volume with vhd disk format""" volume, expected = self._create_volume_with_type() mock_get_image_metadata.return_value = {} mock_create.side_effect = self.fake_image_service_create req = fakes.HTTPRequest.blank( '/v3/fakeproject/volumes/%s/action' % volume.id) body = self._get_os_volume_upload_image() body['os-volume_upload_image']['force'] = True body['os-volume_upload_image']['container_format'] = 'bare' body['os-volume_upload_image']['disk_format'] = 'vhd' res_dict = self.controller._volume_upload_image(req, volume.id, body=body) self.assertDictEqual(expected, res_dict) vol_db = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('uploading', vol_db.status) self.assertEqual('available', vol_db.previous_status) @mock.patch.object(volume_api.API, "get_volume_image_metadata") @mock.patch.object(glance.GlanceImageService, "create") @mock.patch.object(volume_rpcapi.VolumeAPI, "copy_volume_to_image") def test_copy_volume_to_image_vhdx( self, mock_copy_to_image, mock_create, mock_get_image_metadata): """Test create image from volume with vhdx disk format""" volume, expected = self._create_volume_with_type() mock_get_image_metadata.return_value = {} mock_create.side_effect = self.fake_image_service_create req = fakes.HTTPRequest.blank( '/v3/fakeproject/volumes/%s/action' % volume.id) body = self._get_os_volume_upload_image() body['os-volume_upload_image']['force'] = True body['os-volume_upload_image']['container_format'] = 'bare' body['os-volume_upload_image']['disk_format'] = 'vhdx' res_dict = self.controller._volume_upload_image(req, volume.id, body=body) self.assertDictEqual(expected, res_dict) vol_db = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('uploading', vol_db.status) self.assertEqual('available', vol_db.previous_status) def _build_reimage_req(self, body, vol_id, version=mv.SUPPORT_REIMAGE_VOLUME): req = fakes.HTTPRequest.blank( '/v3/%s/volumes/%s/action' % (fake.PROJECT_ID, id)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = self.context req.api_version_request = mv.get_api_version(version) req.headers["content-type"] = "application/json" return req @ddt.data(None, False, True) @mock.patch.object(volume_actions.VolumeActionsController, '_get_image_snapshot_and_check_size') @mock.patch.object(volume_api.API, "reimage") def test_volume_reimage( self, reimage_reserved, mock_image, mock_get_img_snap): vol = utils.create_volume(self.context) body = {"os-reimage": {"image_id": fake.IMAGE_ID}} if reimage_reserved is not None: body["os-reimage"]["reimage_reserved"] = reimage_reserved req = self._build_reimage_req(body, vol.id) self.controller._reimage(req, vol.id, body=body) @mock.patch.object(volume_api.API, "reimage") def test_volume_reimage_invaild_params(self, mock_image): vol = utils.create_volume(self.context) body = {"os-reimage": {"image_id": fake.IMAGE_ID, "reimage_reserved": 'wrong'}} req = self._build_reimage_req(body, vol) self.assertRaises(exception.ValidationError, self.controller._reimage, req, vol.id, body=body) def test_volume_reimage_before_3_68(self): vol = utils.create_volume(self.context) body = {"os-reimage": {"image_id": fake.IMAGE_ID}} req = self._build_reimage_req(body, vol.id, version="3.67") self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller._reimage, req, vol.id, body=body) @mock.patch.object(volume_actions.VolumeActionsController, '_get_image_snapshot_and_check_size') def test_reimage_volume_invalid_status(self, mock_get_img_snap): def fake_reimage_volume(*args, **kwargs): msg = "Volume status must be available." raise exception.InvalidVolume(reason=msg) self.mock_object(volume.api.API, 'reimage', fake_reimage_volume) vol = utils.create_volume(self.context) body = {"os-reimage": {"image_id": fake.IMAGE_ID}} req = self._build_reimage_req(body, vol) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._reimage, req, vol.id, body=body) @mock.patch.object(volume_actions.VolumeActionsController, '_get_image_snapshot_and_check_size') @mock.patch('cinder.context.RequestContext.authorize') def test_reimage_volume_attach_more_than_one_server(self, mock_authorize, mock_get_img_snap): vol = utils.create_volume(self.context) va_objs = [objects.VolumeAttachment(context=self.context, id=i) for i in [fake.OBJECT_ID, fake.OBJECT2_ID, fake.OBJECT3_ID]] va_list = objects.VolumeAttachmentList(context=self.context, objects=va_objs) vol.volume_attachment = va_list self.mock_object(volume_api.API, 'get', return_value=vol) body = {"os-reimage": {"image_id": fake.IMAGE_ID}} req = self._build_reimage_req(body, vol) self.assertRaises(webob.exc.HTTPConflict, self.controller._reimage, req, vol.id, body=body) @mock.patch.object(volume_api.API, 'get_snapshot') @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(glance, 'get_default_image_service') @mock.patch.object(volume_api.API, "reimage") def test_volume_reimage_image_snapshot( self, mock_image, mock_image_service, mock_get_snap): vol = utils.create_volume(self.context) image_meta = { 'properties': { 'block_device_mapping': [ { 'source_type': 'snapshot', 'boot_index': 0, 'volume_size': 1, } ] } } mock_image_service.return_value = mock.MagicMock() mock_image_service.return_value.show.return_value = image_meta body = {"os-reimage": {"image_id": fake.IMAGE_ID}} req = self._build_reimage_req(body, vol.id) self.controller._reimage(req, vol.id, body=body) @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(glance, 'get_default_image_service') @mock.patch.object(volume_api.API, "reimage") def test_volume_reimage_image_snapshot_size_mismatch( self, mock_image, mock_image_service): vol = utils.create_volume(self.context) image_meta = { 'properties': { 'block_device_mapping': [ { 'source_type': 'snapshot', 'boot_index': 0, 'volume_size': 2, } ] } } mock_image_service.return_value = mock.MagicMock() mock_image_service.return_value.show.return_value = image_meta body = {"os-reimage": {"image_id": fake.IMAGE_ID}} req = self._build_reimage_req(body, vol.id) self.assertRaises(webob.exc.HTTPBadRequest, self.controller._reimage, req, vol.id, body=body) @mock.patch.object(volume_api.API, 'get_snapshot') @mock.patch.object(volume_api.API, 'get', fake_volume_get_obj) @mock.patch.object(glance, 'get_default_image_service') @mock.patch.object(volume_api.API, "reimage") def test_volume_reimage_image_snapshot_snap_not_found( self, mock_image, mock_image_service, mock_get_snap): vol = utils.create_volume(self.context) image_meta = { 'properties': { 'block_device_mapping': [ { 'source_type': 'snapshot', 'boot_index': 0, 'volume_size': 1, } ] } } mock_image_service.return_value = mock.MagicMock() mock_image_service.return_value.show.return_value = image_meta mock_get_snap.side_effect = exception.NotFound body = {"os-reimage": {"image_id": fake.IMAGE_ID}} req = self._build_reimage_req(body, vol.id) self.assertRaises(webob.exc.HTTPNotFound, self.controller._reimage, req, vol.id, body=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_encryption_metadata.py0000664000175000017500000002257400000000000027731 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_serialization import jsonutils import webob from cinder.api.contrib import volume_encryption_metadata from cinder import context from cinder import db from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test def return_volume_type_encryption_metadata(context, volume_type_id): return fake_volume_type_encryption() def fake_volume_type_encryption(): values = { 'cipher': 'cipher', 'key_size': 256, 'provider': 'nova.volume.encryptors.base.VolumeEncryptor', 'volume_type_id': fake.VOLUME_TYPE_ID, 'control_location': 'front-end', } return values class VolumeEncryptionMetadataTest(test.TestCase): @staticmethod def _create_volume(context, display_name='test_volume', display_description='this is a test volume', status='creating', availability_zone='fake_az', host='fake_host', size=1, encryption_key_id=fake.ENCRYPTION_KEY_ID): """Create a volume object.""" volume = { 'size': size, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'status': status, 'display_name': display_name, 'display_description': display_description, 'attach_status': fields.VolumeAttachStatus.DETACHED, 'availability_zone': availability_zone, 'host': host, 'encryption_key_id': encryption_key_id, 'volume_type_id': fake.VOLUME_TYPE_ID } return db.volume_create(context, volume)['id'] def setUp(self): super(VolumeEncryptionMetadataTest, self).setUp() self.controller = (volume_encryption_metadata. VolumeEncryptionMetadataController()) self.mock_object(db.sqlalchemy.api, '_volume_type_encryption_get', return_volume_type_encryption_metadata) self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) self.volume_id = self._create_volume(self.ctxt) self.addCleanup(db.volume_destroy, self.ctxt.elevated(), self.volume_id) def test_index(self): req = webob.Request.blank('/v3/%s/volumes/%s/encryption' % ( fake.PROJECT_ID, self.volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) self.assertEqual(HTTPStatus.OK, res.status_code) res_dict = jsonutils.loads(res.body) expected = { "encryption_key_id": fake.ENCRYPTION_KEY_ID, "control_location": "front-end", "cipher": "cipher", "provider": "nova.volume.encryptors.base.VolumeEncryptor", "key_size": 256, } self.assertEqual(expected, res_dict) def test_index_bad_tenant_id(self): req = webob.Request.blank('/v3/%s/volumes/%s/encryption' % ( fake.WILL_NOT_BE_FOUND_ID, self.volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_code) res_dict = jsonutils.loads(res.body) expected = {'badRequest': {'code': HTTPStatus.BAD_REQUEST, 'message': 'Malformed request url'}} self.assertEqual(expected, res_dict) def test_index_bad_volume_id(self): bad_volume_id = fake.WILL_NOT_BE_FOUND_ID req = webob.Request.blank('/v3/%s/volumes/%s/encryption' % ( fake.PROJECT_ID, bad_volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_code) res_dict = jsonutils.loads(res.body) expected = {'itemNotFound': {'code': HTTPStatus.NOT_FOUND, 'message': 'Volume %s could not be found.' % bad_volume_id}} self.assertEqual(expected, res_dict) def test_show_key(self): req = webob.Request.blank('/v3/%s/volumes/%s/encryption/' 'encryption_key_id' % ( fake.PROJECT_ID, self.volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) self.assertEqual(HTTPStatus.OK, res.status_code) self.assertEqual(fake.ENCRYPTION_KEY_ID, res.body.decode()) def test_show_control(self): req = webob.Request.blank('/v3/%s/volumes/%s/encryption/' 'control_location' % ( fake.PROJECT_ID, self.volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) self.assertEqual(HTTPStatus.OK, res.status_code) self.assertEqual(b'front-end', res.body) def test_show_provider(self): req = webob.Request.blank('/v3/%s/volumes/%s/encryption/' 'provider' % ( fake.PROJECT_ID, self.volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) self.assertEqual(HTTPStatus.OK, res.status_code) self.assertEqual(b'nova.volume.encryptors.base.VolumeEncryptor', res.body) def test_show_bad_tenant_id(self): req = webob.Request.blank('/v3/%s/volumes/%s/encryption/' 'encryption_key_id' % (fake.WILL_NOT_BE_FOUND_ID, self.volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_code) res_dict = jsonutils.loads(res.body) expected = {'badRequest': {'code': HTTPStatus.BAD_REQUEST, 'message': 'Malformed request url'}} self.assertEqual(expected, res_dict) def test_show_bad_volume_id(self): bad_volume_id = fake.WILL_NOT_BE_FOUND_ID req = webob.Request.blank('/v3/%s/volumes/%s/encryption/' 'encryption_key_id' % ( fake.PROJECT_ID, bad_volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_code) res_dict = jsonutils.loads(res.body) expected = {'itemNotFound': {'code': HTTPStatus.NOT_FOUND, 'message': 'Volume %s could not be found.' % bad_volume_id}} self.assertEqual(expected, res_dict) def test_retrieve_key_admin(self): ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) req = webob.Request.blank('/v3/%s/volumes/%s/encryption/' 'encryption_key_id' % ( fake.PROJECT_ID, self.volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) self.assertEqual(HTTPStatus.OK, res.status_code) self.assertEqual(fake.ENCRYPTION_KEY_ID, res.body.decode()) def test_show_volume_not_encrypted_type(self): self.mock_object(db.sqlalchemy.api, '_volume_type_encryption_get', return_value=None) volume_id = self._create_volume(self.ctxt, encryption_key_id=None) self.addCleanup(db.volume_destroy, self.ctxt.elevated(), volume_id) req = webob.Request.blank('/v3/%s/volumes/%s/encryption/' 'encryption_key_id' % ( fake.PROJECT_ID, volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) self.assertEqual(HTTPStatus.OK, res.status_code) self.assertEqual(0, len(res.body)) def test_index_volume_not_encrypted_type(self): self.mock_object(db.sqlalchemy.api, '_volume_type_encryption_get', return_value=None) volume_id = self._create_volume(self.ctxt, encryption_key_id=None) self.addCleanup(db.volume_destroy, self.ctxt.elevated(), volume_id) req = webob.Request.blank('/v3/%s/volumes/%s/encryption' % ( fake.PROJECT_ID, volume_id)) res = req.get_response(fakes.wsgi_app(fake_auth_context=self.ctxt)) self.assertEqual(HTTPStatus.OK, res.status_code) res_dict = jsonutils.loads(res.body) expected = { 'encryption_key_id': None } self.assertEqual(expected, res_dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_host_attribute.py0000664000175000017500000001075600000000000026736 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_serialization import jsonutils from oslo_utils import timeutils import webob from cinder import context from cinder import db from cinder import objects from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder import volume def fake_db_volume_get(*args, **kwargs): return { 'id': fake.VOLUME_ID, 'host': 'host001', 'status': 'available', 'size': 5, 'availability_zone': 'somewhere', 'created_at': timeutils.utcnow(), 'display_name': 'anothervolume', 'display_description': 'Just another volume!', 'volume_type_id': None, 'snapshot_id': None, 'project_id': fake.PROJECT_ID, 'migration_status': None, '_name_id': fake.VOLUME2_ID, 'attach_status': fields.VolumeAttachStatus.DETACHED, } def fake_volume_api_get(*args, **kwargs): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) db_volume = fake_db_volume_get() return fake_volume.fake_volume_obj(ctx, **db_volume) def fake_volume_get_all(*args, **kwargs): return objects.VolumeList(objects=[fake_volume_api_get()]) def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper class VolumeHostAttributeTest(test.TestCase): def setUp(self): super(VolumeHostAttributeTest, self).setUp() self.mock_object(volume.api.API, 'get', fake_volume_api_get) self.mock_object(volume.api.API, 'get_all', fake_volume_get_all) self.mock_object(db, 'volume_get', fake_db_volume_get) self.UUID = uuid.uuid4() def test_get_volume_allowed(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req = webob.Request.blank('/v3/%s/volumes/%s' % ( fake.PROJECT_ID, self.UUID)) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volume'] self.assertEqual('host001', vol['os-vol-host-attr:host']) def test_get_volume_unallowed(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req = webob.Request.blank('/v3/%s/volumes/%s' % ( fake.PROJECT_ID, self.UUID)) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volume'] self.assertNotIn('os-vol-host-attr:host', vol) def test_list_detail_volumes_allowed(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req = webob.Request.blank('/v3/%s/volumes/detail' % fake.PROJECT_ID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertEqual('host001', vol[0]['os-vol-host-attr:host']) def test_list_detail_volumes_unallowed(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req = webob.Request.blank('/v3/%s/volumes/detail' % fake.PROJECT_ID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertNotIn('os-vol-host-attr:host', vol[0]) def test_list_simple_volumes_no_host(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req = webob.Request.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertNotIn('os-vol-host-attr:host', vol[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_image_metadata.py0000664000175000017500000004745100000000000026622 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import uuid from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import timeutils import webob from cinder.api.contrib import volume_image_metadata from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder.policies import base as base_policy from cinder.policies import volume_metadata as metadata_policy from cinder import policy from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder import volume def fake_db_volume_get(*args, **kwargs): return { 'id': kwargs.get('volume_id') or fake.VOLUME_ID, 'host': 'host001', 'status': 'available', 'size': 5, 'availability_zone': 'somewhere', 'created_at': timeutils.utcnow(), 'display_name': 'anothervolume', 'display_description': 'Just another volume!', 'volume_type_id': None, 'snapshot_id': None, 'project_id': fake.PROJECT_ID, 'migration_status': None, '_name_id': fake.VOLUME2_ID, 'attach_status': fields.VolumeAttachStatus.DETACHED, } def fake_volume_api_get(*args, **kwargs): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) db_volume = fake_db_volume_get(volume_id=kwargs.get('volume_id')) return fake_volume.fake_volume_obj(ctx, **db_volume) def fake_volume_get_all(*args, **kwargs): return objects.VolumeList(objects=[fake_volume_api_get(), fake_volume_api_get( volume_id=fake.VOLUME2_ID)]) def fake_volume_get_all_empty(*args, **kwargs): return objects.VolumeList(objects=[]) fake_image_metadata = { 'image_id': fake.IMAGE_ID, 'image_name': 'fake', 'kernel_id': 'somekernel', 'ramdisk_id': 'someramdisk', } def fake_get_volume_image_metadata(*args, **kwargs): return fake_image_metadata def return_empty_image_metadata(*args, **kwargs): return {} def volume_metadata_delete(context, volume_id, key, meta_type): pass def fake_create_volume_metadata(context, volume_id, metadata, delete, meta_type): return fake_get_volume_image_metadata() def return_volume_nonexistent(*args, **kwargs): raise exception.VolumeNotFound('bogus test message') class VolumeImageMetadataTest(test.TestCase): content_type = 'application/json' def setUp(self): super(VolumeImageMetadataTest, self).setUp() self.mock_object(volume.api.API, 'get', fake_volume_api_get) self.mock_object(volume.api.API, 'get_all', fake_volume_get_all) self.mock_object(volume.api.API, 'get_volume_image_metadata', fake_get_volume_image_metadata) self.UUID = uuid.uuid4() self.controller = (volume_image_metadata. VolumeImageMetadataController()) self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) def _make_request(self, url): req = webob.Request.blank(url) req.accept = self.content_type res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) return res def _get_image_metadata(self, body): return jsonutils.loads(body)['volume']['volume_image_metadata'] def _get_image_metadata_list(self, body): return [ volume['volume_image_metadata'] for volume in jsonutils.loads(body)['volumes'] if volume.get('volume_image_metadata') ] def _create_volume_and_glance_metadata(self): ctxt = context.get_admin_context() # create a bootable volume db.volume_create(ctxt, {'id': fake.VOLUME_ID, 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'image_id', fake.IMAGE_ID) db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'image_name', 'fake') db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'kernel_id', 'somekernel') db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'ramdisk_id', 'someramdisk') # create an unbootable volume db.volume_create(ctxt, {'id': fake.VOLUME2_ID, 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) def test_get_volume(self): self._create_volume_and_glance_metadata() res = self._make_request('/v3/%s/volumes/%s' % ( fake.PROJECT_ID, self.UUID)) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(fake_image_metadata, self._get_image_metadata(res.body)) def test_list_detail_volumes(self): self._create_volume_and_glance_metadata() res = self._make_request('/v3/%s/volumes/detail' % fake.PROJECT_ID) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(fake_image_metadata, self._get_image_metadata_list(res.body)[0]) def test_list_detail_empty_volumes(self): def fake_dont_call_this(*args, **kwargs): fake_dont_call_this.called = True fake_dont_call_this.called = False self.mock_object(volume.api.API, 'get_list_volumes_image_metadata', fake_dont_call_this) self.mock_object(volume.api.API, 'get_all', fake_volume_get_all_empty) res = self._make_request('/v3/%s/volumes/detail' % fake.PROJECT_ID) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertFalse(fake_dont_call_this.called) def test_list_detail_volumes_with_limit(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.VOLUME_ID, 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', 'value1') db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key2', 'value2') res = self._make_request('/v3/%s/volumes/detail?limit=1' % fake.PROJECT_ID) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual({'key1': 'value1', 'key2': 'value2'}, self._get_image_metadata_list(res.body)[0]) @mock.patch('cinder.objects.Volume.get_by_id') def test_create_image_metadata(self, fake_get): self.mock_object(volume.api.API, 'get_volume_image_metadata', return_empty_image_metadata) self.mock_object(db, 'volume_metadata_update', fake_create_volume_metadata) body = {"os-set_image_metadata": {"metadata": fake_image_metadata}} req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" fake_get.return_value = {} res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(fake_image_metadata, jsonutils.loads(res.body)["metadata"]) # Test for value > 255 body = {"os-set_image_metadata": { "metadata": {"key": "v" * 260}} } req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" fake_get.return_value = {} res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.OK, res.status_int) # This is a weird one ... take a supplementary unicode # char that requires 4 bytes, which will give us a short # string in terms of character count, but a long string # in terms of bytes, and make this string be exactly # 65535 bytes in length. This should be OK. char4bytes = "\N{CJK UNIFIED IDEOGRAPH-29D98}" self.assertEqual(1, len(char4bytes)) self.assertEqual(4, len(char4bytes.encode('utf-8'))) str65535bytes = char4bytes * 16383 + '123' self.assertLess(len(str65535bytes), 65535) self.assertEqual(65535, len(str65535bytes.encode('utf-8'))) body = {"os-set_image_metadata": { "metadata": {"key": str65535bytes}} } req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = "POST" req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" fake_get.return_value = {} res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.OK, res.status_int) @mock.patch('cinder.objects.Volume.get_by_id') def test_create_image_metadata_policy_not_authorized(self, fake_get): rules = { metadata_policy.IMAGE_METADATA_POLICY: base_policy.RULE_ADMIN_API } policy.set_rules(oslo_policy.Rules.from_dict(rules)) self.addCleanup(policy.reset) fake_get.return_value = {} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID), use_admin_context=False) req.method = 'POST' req.content_type = "application/json" body = {"os-set_image_metadata": { "metadata": {"image_name": "fake"}} } req.body = jsonutils.dump_as_bytes(body) self.assertRaises(exception.PolicyNotAuthorized, self.controller.create, req, fake.VOLUME_ID, body=body) @mock.patch('cinder.objects.Volume.get_by_id') def test_create_with_keys_case_insensitive(self, fake_get): # If the keys in uppercase_and_lowercase, should return the one # which server added self.mock_object(volume.api.API, 'get_volume_image_metadata', return_empty_image_metadata) self.mock_object(db, 'volume_metadata_update', fake_create_volume_metadata) fake_get.return_value = {} body = { "os-set_image_metadata": { "metadata": { "Image_Id": "someid", "image_name": "fake", "Kernel_id": "somekernel", "ramdisk_id": "someramdisk" }, }, } req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = 'POST' req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(fake_image_metadata, jsonutils.loads(res.body)["metadata"]) @mock.patch('cinder.objects.Volume.get_by_id') def test_create_empty_body(self, fake_get): req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = 'POST' req.headers["content-type"] = "application/json" fake_get.return_value = {} self.assertRaises(exception.ValidationError, self.controller.create, req, fake.VOLUME_ID, body=None) def test_create_nonexistent_volume(self): self.mock_object(volume.api.API, 'get', return_volume_nonexistent) req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = 'POST' req.content_type = "application/json" body = {"os-set_image_metadata": { "metadata": {"image_name": "fake"}} } req.body = jsonutils.dump_as_bytes(body) self.assertRaises(exception.VolumeNotFound, self.controller.create, req, fake.VOLUME_ID, body=body) @mock.patch('cinder.objects.Volume.get_by_id') def test_invalid_metadata_items_on_create(self, fake_get): self.mock_object(db, 'volume_metadata_update', fake_create_volume_metadata) req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = 'POST' req.headers["content-type"] = "application/json" data = {"os-set_image_metadata": { "metadata": {"a" * 260: "value1"}} } fake_get.return_value = {} # Test for long key req.body = jsonutils.dump_as_bytes(data) self.assertRaises(exception.ValidationError, self.controller.create, req, fake.VOLUME_ID, body=data) # Test for very long value data = {"os-set_image_metadata": { "metadata": {"key": "v" * 65550}} } req.body = jsonutils.dump_as_bytes(data) self.assertRaises(exception.ValidationError, self.controller.create, req, fake.VOLUME_ID, body=data) # Test for very long utf8 value data = {"os-set_image_metadata": { "metadata": {"key": "á" * 32775}} } req.body = jsonutils.dump_as_bytes(data) self.assertRaises(exception.ValidationError, self.controller.create, req, fake.VOLUME_ID, body=data) # Test a short unicode string that actually exceeds # the allowed byte count char4bytes = "\N{CJK UNIFIED IDEOGRAPH-29D98}" str65536bytes = char4bytes * 16384 self.assertEqual(65536, len(str65536bytes.encode('utf-8'))) self.assertLess(len(str65536bytes), 65535) body = {"os-set_image_metadata": { "metadata": {"key": str65536bytes}} } req.body = jsonutils.dump_as_bytes(body) self.assertRaises(exception.ValidationError, self.controller.create, req, fake.VOLUME_ID, body=data) # Test for empty key. data = {"os-set_image_metadata": { "metadata": {"": "value1"}} } req.body = jsonutils.dump_as_bytes(data) self.assertRaises(exception.ValidationError, self.controller.create, req, fake.VOLUME_ID, body=data) @mock.patch('cinder.objects.Volume.get_by_id') def test_delete(self, fake_get): self.mock_object(db, 'volume_metadata_delete', volume_metadata_delete) body = {"os-unset_image_metadata": { "key": "ramdisk_id"} } req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = 'POST' req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" fake_get.return_value = {} res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.OK, res.status_int) @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_image_metadata_policy_not_authorized(self, fake_get): rules = { metadata_policy.IMAGE_METADATA_POLICY: base_policy.RULE_ADMIN_API } policy.set_rules(oslo_policy.Rules.from_dict(rules)) self.addCleanup(policy.reset) fake_get.return_value = {} req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID), use_admin_context=False) req.method = 'POST' req.content_type = "application/json" body = {"os-unset_image_metadata": { "metadata": {"image_name": "fake"}} } req.body = jsonutils.dump_as_bytes(body) self.assertRaises(exception.ValidationError, self.controller.delete, req, fake.VOLUME_ID, body=None) @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_meta_not_found(self, fake_get): data = {"os-unset_image_metadata": { "key": "invalid_id"} } req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = 'POST' req.body = jsonutils.dump_as_bytes(data) req.headers["content-type"] = "application/json" fake_get.return_value = {} self.assertRaises(exception.GlanceMetadataNotFound, self.controller.delete, req, fake.VOLUME_ID, body=data) @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_nonexistent_volume(self, fake_get): self.mock_object(db, 'volume_metadata_delete', return_volume_nonexistent) body = {"os-unset_image_metadata": { "key": "fake"} } req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = 'POST' req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" fake_get.return_value = {} self.assertRaises(exception.GlanceMetadataNotFound, self.controller.delete, req, fake.VOLUME_ID, body=body) def test_delete_empty_body(self): req = fakes.HTTPRequest.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(exception.ValidationError, self.controller.delete, req, fake.VOLUME_ID, body=None) def test_show_image_metadata(self): body = {"os-show_image_metadata": None} req = webob.Request.blank('/v3/%s/volumes/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_ID)) req.method = 'POST' req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(fake_image_metadata, jsonutils.loads(res.body)["metadata"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_manage.py0000664000175000017500000006761700000000000025136 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # Copyright (c) 2016 Stratoscale, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock from urllib.parse import urlencode import ddt from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils import webob from cinder.api.contrib import volume_manage from cinder.api import microversions as mv from cinder.api.openstack import api_version_request as api_version from cinder import context from cinder import exception from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test CONF = cfg.CONF def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper def service_get(context, service_id, backend_match_level=None, host=None, **filters): """Replacement for db.sqlalchemy.api.service_get. We mock the db.sqlalchemy.api.service_get method to return something for a specific host, and raise an exception for anything else. We don't use the returned data (the code under test just use the call to check for existence of a host, so the content returned doesn't matter. """ if host == 'host_ok': return {'disabled': False, 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'} if host == 'host_disabled': return {'disabled': True, 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'} raise exception.ServiceNotFound(service_id=host) # Some of the tests check that volume types are correctly validated during a # volume manage operation. This data structure represents an existing volume # type. NOTE: cinder.db.sqlalchemy.volume_type_get() returns a dict describing # a specific volume type; this dict always contains an 'extra_specs' key. fake_vt = { 'id': fake.VOLUME_TYPE_ID, 'name': 'good_fakevt', 'extra_specs': {}, } fake_encrypted_vt = { 'id': fake.VOLUME_TYPE2_ID, 'name': 'fake_encrypted_vt', 'extra_specs': {}, 'encryption': { 'cipher': 'fake_cipher', 'control_location': 'front-end', 'key_size': 256, 'provider': 'fake_provider'}, } def vt_get_volume_type_by_name(context, name): """Replacement for cinder.volume.volume_types.get_volume_type_by_name. Overrides cinder.volume.volume_types.get_volume_type_by_name to return the volume type based on inspection of our fake structure, rather than going to the Cinder DB. """ if name == fake_vt['name']: return fake_vt if name == fake_encrypted_vt['name']: return fake_encrypted_vt raise exception.VolumeTypeNotFoundByName(volume_type_name=name) def vt_get_volume_type(context, vt_id): """Replacement for cinder.volume.volume_types.get_volume_type. Overrides cinder.volume.volume_types.get_volume_type to return the volume type based on inspection of our fake structure, rather than going to the Cinder DB. """ if vt_id == fake_vt['id']: return fake_vt if vt_id == fake_encrypted_vt['id']: return fake_encrypted_vt raise exception.VolumeTypeNotFound(volume_type_id=vt_id) def vt_get_default_volume_type(context): """Replacement for cinder.volume.volume_types.get_default_volume_type. If you want to use a specific fake volume type defined above, set the flag for default_volume_type to the name of that fake type. If you want to raise VolumeTypeDefaultMisconfiguredError, then set the flag for default_volume_type to None. Otherwise, for *any* non-None value of default_volume_type, this will return our generic fake volume type. (NOTE: by default, CONF.default_volume_type is '__DEFAULT__'.) """ default_vt_name = CONF.default_volume_type if not default_vt_name: raise exception.VolumeTypeDefaultMisconfiguredError( volume_type_name='from vt_get_default_volume_type') try: default_vt = vt_get_volume_type_by_name(context, default_vt_name) except exception.VolumeTypeNotFoundByName: default_vt = fake_vt return default_vt def api_manage(*args, **kwargs): """Replacement for cinder.volume.api.API.manage_existing. Overrides cinder.volume.api.API.manage_existing to return some fake volume data structure, rather than initiating a real volume managing. Note that we don't try to replicate any passed-in information (e.g. name, volume type) in the returned structure. """ ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) vol = { 'status': 'creating', 'display_name': 'fake_name', 'availability_zone': 'nova', 'tenant_id': fake.PROJECT_ID, 'id': fake.VOLUME_ID, 'volume_type': None, 'snapshot_id': None, 'user_id': fake.USER_ID, 'size': 0, 'attach_status': fields.VolumeAttachStatus.DETACHED, 'volume_type_id': None} return fake_volume.fake_volume_obj(ctx, **vol) def api_manage_new(*args, **kwargs): volume = api_manage() volume.status = 'managing' return volume def api_get_manageable_volumes(*args, **kwargs): """Replacement for cinder.volume.api.API.get_manageable_volumes.""" vols = [ {'reference': {'source-name': 'volume-%s' % fake.VOLUME_ID}, 'size': 4, 'extra_info': 'qos_setting:high', 'safe_to_manage': False, 'cinder_id': fake.VOLUME_ID, 'reason_not_safe': 'volume in use'}, {'reference': {'source-name': 'myvol'}, 'size': 5, 'extra_info': 'qos_setting:low', 'safe_to_manage': True, 'cinder_id': None, 'reason_not_safe': None}] return vols @ddt.ddt @mock.patch('cinder.db.sqlalchemy.api.service_get', service_get) @mock.patch('cinder.volume.volume_types.get_default_volume_type', vt_get_default_volume_type) @mock.patch('cinder.volume.volume_types.get_volume_type_by_name', vt_get_volume_type_by_name) @mock.patch('cinder.volume.volume_types.get_volume_type', vt_get_volume_type) class VolumeManageTest(test.TestCase): """Test cases for cinder/api/contrib/volume_manage.py The API extension adds a POST /os-volume-manage API that is passed a cinder host name, and a driver-specific reference parameter. If everything is passed correctly, then the cinder.volume.api.API.manage_existing method is invoked to manage an existing storage object on the host. In this set of test cases, we are ensuring that the code correctly parses the request structure and raises the correct exceptions when things are not right, and calls down into cinder.volume.api.API.manage_existing with the correct arguments. """ def setUp(self): super(VolumeManageTest, self).setUp() self._admin_ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) self._non_admin_ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=False) self.controller = volume_manage.VolumeManageController() def _get_resp_post(self, body, version='3.11'): """Helper to execute a POST os-volume-manage API call.""" req = webob.Request.blank('/v3/%s/os-volume-manage' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = self._admin_ctxt req.headers["OpenStack-API-Version"] = "volume " + version req.api_version_request = api_version.APIVersionRequest(version) req.body = jsonutils.dump_as_bytes(body) res = req.get_response(app()) return res @ddt.data({'host': 'host_ok'}, {'host': 'user@host#backend:/vol_path'}, {'host': 'host@backend#parts+of+pool'}, {'host': 'host@backend#[dead:beef::cafe]:/vol01'}) @ddt.unpack @mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage) def test_manage_volume_ok(self, mock_api_manage, host): """Test successful manage volume execution. Tests for correct operation when valid arguments are passed in the request body. We ensure that cinder.volume.api.API.manage_existing got called with the correct arguments, and that we return the correct HTTP code to the caller. """ body = {'volume': {'host': host, 'ref': 'fake_ref'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) # Check that the manage API was called with the correct arguments. self.assertEqual(1, mock_api_manage.call_count) args = mock_api_manage.call_args[0] self.assertEqual(body['volume']['host'], args[1]) self.assertEqual(body['volume']['ref'], args[3]) def test_manage_volume_not_ok(self): """Test not successful manage volume execution. Tests for error raised when invalid arguments are passed in the request body. """ body = {'volume': {'host': 'host not ok', 'ref': 'fake_ref'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def _get_resp_create(self, body, version=mv.BASE_VERSION): url = '/v3/%s/os-volume-manage' % fake.PROJECT_ID req = webob.Request.blank(url, base_url='http://localhost.com' + url) req.method = 'POST' req.headers = mv.get_mv_header(version) req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = self._admin_ctxt req.body = jsonutils.dump_as_bytes(body) req.api_version_request = mv.get_api_version(version) res = self.controller.create(req, body=body) return res @mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage) def test_manage_volume_ok_cluster(self, mock_api_manage): body = {'volume': {'cluster': 'cluster', 'ref': 'fake_ref'}} res = self._get_resp_create(body, mv.VOLUME_MIGRATE_CLUSTER) self.assertEqual(['volume'], list(res.keys())) # Check that the manage API was called with the correct arguments. self.assertEqual(1, mock_api_manage.call_count) args = mock_api_manage.call_args[0] self.assertIsNone(args[1]) self.assertEqual(body['volume']['cluster'], args[2]) self.assertEqual(body['volume']['ref'], args[3]) def test_manage_volume_fail_host_cluster(self): body = {'volume': {'host': 'host_ok', 'cluster': 'cluster', 'ref': 'fake_ref'}} self.assertRaises(exception.InvalidInput, self._get_resp_create, body, mv.VOLUME_MIGRATE_CLUSTER) def test_manage_volume_missing_host(self): """Test correct failure when host is not specified.""" body = {'volume': {'ref': 'fake_ref'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) @mock.patch('cinder.objects.Service.get_by_args') def test_manage_volume_service_not_found_on_host(self, mock_service): """Test correct failure when host having no volume service on it.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} mock_service.side_effect = exception.ServiceNotFound( service_id='cinder-volume', host='host_ok') res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_manage_volume_missing_ref(self): """Test correct failure when the ref is not specified.""" body = {'volume': {'host': 'host_ok'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_manage_volume_with_invalid_bootable(self): """Test correct failure when invalid bool value is specified.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'bootable': 'InvalidBool'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) @mock.patch('cinder.objects.service.Service.is_up', return_value=True, new_callable=mock.PropertyMock) def test_manage_volume_disabled(self, mock_is_up): """Test manage volume failure due to disabled service.""" body = {'volume': {'host': 'host_disabled', 'ref': 'fake_ref'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) self.assertEqual(exception.ServiceUnavailable.message, res.json['badRequest']['message']) mock_is_up.assert_not_called() @mock.patch('cinder.objects.service.Service.is_up', return_value=False, new_callable=mock.PropertyMock) def test_manage_volume_is_down(self, mock_is_up): """Test manage volume failure due to down service.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) self.assertEqual(exception.ServiceUnavailable.message, res.json['badRequest']['message']) self.assertTrue(mock_is_up.called) @mock.patch('cinder.volume.api.API.manage_existing', api_manage) def test_manage_volume_volume_type_by_uuid(self): """Tests for correct operation when a volume type is specified by ID. We wrap cinder.volume.api.API.manage_existing so that managing is not actually attempted. """ body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': fake.VOLUME_TYPE_ID, 'bootable': True}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) @mock.patch('cinder.volume.api.API.manage_existing', api_manage) def test_manage_volume_volume_type_by_name(self): """Tests for correct operation when a volume type is specified by name. We wrap cinder.volume.api.API.manage_existing so that managing is not actually attempted. """ body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': 'good_fakevt'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) def test_manage_volume_bad_volume_type_by_uuid(self): """Test failure on nonexistent volume type specified by ID.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': fake.WILL_NOT_BE_FOUND_ID}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_manage_volume_bad_volume_type_by_name(self): """Test failure on nonexistent volume type specified by name.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': 'bad_fakevt'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def _get_resp_get(self, host, detailed, paging, admin=True): """Helper to execute a GET os-volume-manage API call.""" params = {'host': host} if paging: params.update({'marker': '1234', 'limit': 10, 'offset': 4, 'sort': 'reference:asc'}) query_string = "?%s" % urlencode(params) detail = "" if detailed: detail = "/detail" url = "/v3/%s/os-volume-manage%s%s" % (fake.PROJECT_ID, detail, query_string) req = webob.Request.blank(url) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = (self._admin_ctxt if admin else self._non_admin_ctxt) res = req.get_response(app()) return res @mock.patch('cinder.volume.api.API.get_manageable_volumes', wraps=api_get_manageable_volumes) def test_get_manageable_volumes_non_admin(self, mock_api_manageable): res = self._get_resp_get('fakehost', False, False, admin=False) self.assertEqual(HTTPStatus.FORBIDDEN, res.status_int) mock_api_manageable.assert_not_called() res = self._get_resp_get('fakehost', True, False, admin=False) self.assertEqual(HTTPStatus.FORBIDDEN, res.status_int) mock_api_manageable.assert_not_called() @mock.patch('cinder.volume.api.API.get_manageable_volumes', wraps=api_get_manageable_volumes) def test_get_manageable_volumes_ok(self, mock_api_manageable): res = self._get_resp_get('fakehost', False, True) exp = {'manageable-volumes': [{'reference': {'source-name': 'volume-%s' % fake.VOLUME_ID}, 'size': 4, 'safe_to_manage': False}, {'reference': {'source-name': 'myvol'}, 'size': 5, 'safe_to_manage': True}]} self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(exp, jsonutils.loads(res.body)) mock_api_manageable.assert_called_once_with( self._admin_ctxt, 'fakehost', None, limit=10, marker='1234', offset=4, sort_dirs=['asc'], sort_keys=['reference']) @mock.patch('cinder.volume.api.API.get_manageable_volumes', side_effect=messaging.RemoteError( exc_type='InvalidInput', value='marker not found: 1234')) def test_get_manageable_volumes_non_existent_marker(self, mock_api_manageable): res = self._get_resp_get('fakehost', detailed=False, paging=True) self.assertEqual(400, res.status_int) self.assertTrue(mock_api_manageable.called) @mock.patch('cinder.volume.api.API.get_manageable_volumes', wraps=api_get_manageable_volumes) def test_get_manageable_volumes_detailed_ok(self, mock_api_manageable): res = self._get_resp_get('fakehost', True, False) exp = {'manageable-volumes': [{'reference': {'source-name': 'volume-%s' % fake.VOLUME_ID}, 'size': 4, 'reason_not_safe': 'volume in use', 'cinder_id': fake.VOLUME_ID, 'safe_to_manage': False, 'extra_info': 'qos_setting:high'}, {'reference': {'source-name': 'myvol'}, 'cinder_id': None, 'size': 5, 'reason_not_safe': None, 'safe_to_manage': True, 'extra_info': 'qos_setting:low'}]} self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(exp, jsonutils.loads(res.body)) mock_api_manageable.assert_called_once_with( self._admin_ctxt, 'fakehost', None, limit=CONF.osapi_max_limit, marker=None, offset=0, sort_dirs=['desc'], sort_keys=['reference']) @mock.patch('cinder.volume.api.API.get_manageable_volumes', side_effect=messaging.RemoteError( exc_type='InvalidInput', value='marker not found: 1234')) def test_get_manageable_volumes_non_existent_marker_detailed( self, mock_api_manageable): res = self._get_resp_get('fakehost', detailed=True, paging=True) self.assertEqual(400, res.status_int) self.assertTrue(mock_api_manageable.called) @ddt.data({'a' * 256: 'a'}, {'a': 'a' * 256}, {'': 'a'}, {'a': None}, ) def test_manage_volume_with_invalid_metadata(self, value): body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', "metadata": value}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) @mock.patch('cinder.objects.service.Service.is_up', return_value=True, new_callable=mock.PropertyMock) def test_get_manageable_volumes_disabled(self, mock_is_up): res = self._get_resp_get('host_disabled', False, True) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) self.assertEqual(exception.ServiceUnavailable.message, res.json['badRequest']['message']) mock_is_up.assert_not_called() @mock.patch('cinder.objects.service.Service.is_up', return_value=False, new_callable=mock.PropertyMock) def test_get_manageable_volumes_is_down(self, mock_is_up): res = self._get_resp_get('host_ok', False, True) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) self.assertEqual(exception.ServiceUnavailable.message, res.json['badRequest']['message']) self.assertTrue(mock_is_up.called) @mock.patch('cinder.volume.api.API.manage_existing', wraps=api_manage_new) def test_manage_volume_with_creating_status(self, mock_api_manage): """Test managing volume to return 'creating' status in V3 API.""" body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} res = self._get_resp_post(body, mv.ETAGS) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(1, mock_api_manage.call_count) self.assertEqual('creating', jsonutils.loads(res.body)['volume']['status']) def test_negative_manage_to_encrypted_type(self): """Not allowed to manage a volume to an encrypted volume type.""" ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': fake_encrypted_vt['name']}} req = webob.Request.blank('/v3/%s/os-volume-manage' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_negative_manage_to_encrypted_default_type(self): """Fail if no vol type in request and default vol type is encrypted.""" self.flags(default_volume_type=fake_encrypted_vt['name']) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} req = webob.Request.blank('/v3/%s/os-volume-manage' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_negative_no_volume_type(self): """Fail when no volume type is available for the managed volume.""" self.flags(default_volume_type=None) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} req = webob.Request.blank('/v3/%s/os-volume-manage' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR, res.status_int) @mock.patch('cinder.group.API') @mock.patch('cinder.flow_utils') @mock.patch('cinder.volume.flows.api.manage_existing.get_flow') @mock.patch('cinder.volume.api.API._get_service_by_host_cluster') def test_manage_when_default_type_is_encrypted(self, mock_get_cluster, mock_get_flow, mock_flow_utils, mock_group_api): """Default type doesn't matter if non-encrypted type is in request.""" # make an encrypted type the default volume type self.flags(default_volume_type=fake_encrypted_vt['name']) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) # pass a non-encrypted volume type in the request requested_vt = fake_vt body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref', 'volume_type': requested_vt['name']}} req = webob.Request.blank('/v3/%s/os-volume-manage' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) # request should be accepted self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) # make sure the volume type passed through is the specified one called_with = mock_get_flow.call_args.args[2] self.assertEqual(requested_vt['name'], called_with['volume_type']['name']) self.assertEqual(requested_vt['id'], called_with['volume_type']['id']) @mock.patch('cinder.group.API') @mock.patch('cinder.flow_utils') @mock.patch('cinder.volume.flows.api.manage_existing.get_flow') @mock.patch('cinder.volume.api.API._get_service_by_host_cluster') def test_manage_with_default_type(self, mock_get_cluster, mock_get_flow, mock_flow_utils, mock_group_api): """A non-encrypted default volume type should cause no problems.""" # make an non-encrypted type the default volume type default_vt = fake_vt self.flags(default_volume_type=default_vt['name']) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) # don't pass a volume type in the request body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} req = webob.Request.blank('/v3/%s/os-volume-manage' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) # request should be accepted self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) # make sure the volume type passed through is the default called_with = mock_get_flow.call_args.args[2] self.assertEqual(default_vt['name'], called_with['volume_type']['name']) self.assertEqual(default_vt['id'], called_with['volume_type']['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_migration_status_attribute.py0000664000175000017500000001160700000000000031351 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import timeutils import webob from cinder import context from cinder import objects from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder import volume def fake_db_volume_get(*args, **kwargs): return { 'id': fake.VOLUME_ID, 'host': 'host001', 'status': 'available', 'size': 5, 'availability_zone': 'somewhere', 'created_at': timeutils.utcnow(), 'attach_status': fields.VolumeAttachStatus.DETACHED, 'display_name': 'anothervolume', 'display_description': 'Just another volume!', 'volume_type_id': None, 'snapshot_id': None, 'project_id': fake.PROJECT_ID, 'migration_status': 'migrating', '_name_id': fake.VOLUME2_ID, } def fake_volume_api_get(*args, **kwargs): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) db_volume = fake_db_volume_get() return fake_volume.fake_volume_obj(ctx, **db_volume) def fake_volume_get_all(*args, **kwargs): return objects.VolumeList(objects=[fake_volume_api_get()]) def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper class VolumeMigStatusAttributeTest(test.TestCase): def setUp(self): super(VolumeMigStatusAttributeTest, self).setUp() self.mock_object(volume.api.API, 'get', fake_volume_api_get) self.mock_object(volume.api.API, 'get_all', fake_volume_get_all) self.UUID = fake.UUID1 def test_get_volume_allowed(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req = webob.Request.blank('/v3/%s/volumes/%s' % ( fake.PROJECT_ID, self.UUID)) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volume'] self.assertEqual('migrating', vol['os-vol-mig-status-attr:migstat']) self.assertEqual(fake.VOLUME2_ID, vol['os-vol-mig-status-attr:name_id']) def test_get_volume_unallowed(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req = webob.Request.blank('/v3/%s/volumes/%s' % ( fake.PROJECT_ID, self.UUID)) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volume'] self.assertNotIn('os-vol-mig-status-attr:migstat', vol) self.assertNotIn('os-vol-mig-status-attr:name_id', vol) def test_list_detail_volumes_allowed(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req = webob.Request.blank('/v3/%s/volumes/detail' % fake.PROJECT_ID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertEqual('migrating', vol[0]['os-vol-mig-status-attr:migstat']) self.assertEqual(fake.VOLUME2_ID, vol[0]['os-vol-mig-status-attr:name_id']) def test_list_detail_volumes_unallowed(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req = webob.Request.blank('/v3/%s/volumes/detail' % fake.PROJECT_ID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertNotIn('os-vol-mig-status-attr:migstat', vol[0]) self.assertNotIn('os-vol-mig-status-attr:name_id', vol[0]) def test_list_simple_volumes_no_migration_status(self): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req = webob.Request.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertNotIn('os-vol-mig-status-attr:migstat', vol[0]) self.assertNotIn('os-vol-mig-status-attr:name_id', vol[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_tenant_attribute.py0000664000175000017500000001327300000000000027247 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils import webob from cinder import context from cinder import objects from cinder.policies.volumes import TENANT_ATTRIBUTE_POLICY from cinder import policy from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder import volume PROJECT_ID = '88fd1da4-f464-4a87-9ce5-26f2f40743b9' def fake_volume_get(*args, **kwargs): ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) vol = { 'id': fake.VOLUME_ID, 'project_id': PROJECT_ID, } return fake_volume.fake_volume_obj(ctx, **vol) def fake_volume_get_all(*args, **kwargs): return objects.VolumeList(objects=[fake_volume_get()]) def app(): # no auth, just let environ['cinder.context'] pass through api = fakes.router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper class VolumeTenantAttributeTest(test.TestCase): def setUp(self): super(VolumeTenantAttributeTest, self).setUp() self.mock_object(volume.api.API, 'get', fake_volume_get) self.mock_object(volume.api.API, 'get_all', fake_volume_get_all) self.UUID = uuid.uuid4() policy.reset() policy.init() self.addCleanup(policy.reset) def test_get_volume_includes_tenant_id(self): allow_all = {TENANT_ATTRIBUTE_POLICY: oslo_policy._checks.TrueCheck()} policy._ENFORCER.set_rules(allow_all, overwrite=False) ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req = webob.Request.blank('/v3/%s/volumes/%s' % ( fake.PROJECT_ID, self.UUID)) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volume'] self.assertEqual(PROJECT_ID, vol['os-vol-tenant-attr:tenant_id']) self.assertIn('os-vol-tenant-attr:tenant_id', vol) def test_get_volume_excludes_tenant_id(self): allow_none = {TENANT_ATTRIBUTE_POLICY: oslo_policy._checks.FalseCheck()} policy._ENFORCER.set_rules(allow_none, overwrite=False) ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req = webob.Request.blank('/v3/%s/volumes/%s' % ( fake.PROJECT_ID, self.UUID)) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volume'] self.assertEqual(fake.VOLUME_ID, vol['id']) self.assertNotIn('os-vol-tenant-attr:tenant_id', vol) def test_list_detail_volumes_includes_tenant_id(self): allow_all = {TENANT_ATTRIBUTE_POLICY: oslo_policy._checks.TrueCheck()} policy._ENFORCER.set_rules(allow_all, overwrite=False) ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req = webob.Request.blank('/v3/%s/volumes/detail' % fake.PROJECT_ID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertEqual(PROJECT_ID, vol[0]['os-vol-tenant-attr:tenant_id']) def test_list_detail_volumes_excludes_tenant_id(self): allow_none = {TENANT_ATTRIBUTE_POLICY: oslo_policy._checks.FalseCheck()} policy._ENFORCER.set_rules(allow_none, overwrite=False) ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req = webob.Request.blank('/v3/%s/volumes/detail' % fake.PROJECT_ID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertEqual(fake.VOLUME_ID, vol[0]['id']) self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0]) def test_list_simple_volumes_never_has_tenant_id(self): allow_all = {TENANT_ATTRIBUTE_POLICY: oslo_policy._checks.TrueCheck()} policy._ENFORCER.set_rules(allow_all, overwrite=False) ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req = webob.Request.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertEqual(fake.VOLUME_ID, vol[0]['id']) self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0]) allow_none = {TENANT_ATTRIBUTE_POLICY: oslo_policy._checks.FalseCheck()} policy._ENFORCER.set_rules(allow_none, overwrite=False) ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req = webob.Request.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'GET' req.environ['cinder.context'] = ctx res = req.get_response(app()) vol = jsonutils.loads(res.body)['volumes'] self.assertEqual(fake.VOLUME_ID, vol[0]['id']) self.assertNotIn('os-vol-tenant-attr:tenant_id', vol[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_transfer.py0000664000175000017500000006055400000000000025523 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume transfer code.""" from http import HTTPStatus from unittest import mock from oslo_serialization import jsonutils import webob from cinder.api.contrib import volume_transfer from cinder import context from cinder import db from cinder import exception from cinder.objects import fields from cinder import quota from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3 import fakes as v3_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test import cinder.transfer class VolumeTransferAPITestCase(test.TestCase): """Test Case for the "old" transfers API, still valid in 3.0.""" def setUp(self): super(VolumeTransferAPITestCase, self).setUp() self.volume_transfer_api = cinder.transfer.API() self.controller = volume_transfer.VolumeTransferController() self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) def _create_transfer(self, volume_id=fake.VOLUME_ID, display_name='test_transfer'): """Create a transfer object.""" return self.volume_transfer_api.create(context.get_admin_context(), volume_id, display_name) @staticmethod def _create_volume(display_name='test_volume', display_description='this is a test volume', status='available', size=1, project_id=fake.PROJECT_ID, attach_status=fields.VolumeAttachStatus.DETACHED): """Create a volume object.""" vol = {} vol['host'] = 'fake_host' vol['size'] = size vol['user_id'] = fake.USER_ID vol['project_id'] = project_id vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = attach_status vol['availability_zone'] = 'fake_zone' vol['volume_type_id'] = fake.VOLUME_TYPE_ID return db.volume_create(context.get_admin_context(), vol)['id'] def test_show_transfer(self): volume_id = self._create_volume(size=5) transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v3/%s/os-volume-transfer/%s' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual('test_transfer', res_dict['transfer']['name']) self.assertEqual(transfer['id'], res_dict['transfer']['id']) self.assertEqual(volume_id, res_dict['transfer']['volume_id']) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_show_transfer_with_transfer_NotFound(self): req = webob.Request.blank('/v3/%s/os-volume-transfer/%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Transfer %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) def test_list_transfers_json(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v3/%s/os-volume-transfer' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(4, len(res_dict['transfers'][0])) self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) self.assertEqual(4, len(res_dict['transfers'][1])) self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_1) db.volume_destroy(context.get_admin_context(), volume_id_2) def test_list_transfers_detail_json(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v3/%s/os-volume-transfer/detail' % fake.PROJECT_ID) req.method = 'GET' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(5, len(res_dict['transfers'][0])) self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) self.assertEqual(volume_id_1, res_dict['transfers'][0]['volume_id']) self.assertEqual(5, len(res_dict['transfers'][1])) self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) self.assertEqual(transfer2['id'], res_dict['transfers'][1]['id']) self.assertEqual(volume_id_2, res_dict['transfers'][1]['volume_id']) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_2) db.volume_destroy(context.get_admin_context(), volume_id_1) def test_list_transfers_with_all_tenants(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5, project_id=fake.PROJECT_ID) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = fakes.HTTPRequest.blank('/v3/%s/os-volume-transfer?' 'all_tenants=1' % fake.PROJECT_ID, use_admin_context=True) res_dict = self.controller.index(req) expected = [(transfer1['id'], 'test_transfer'), (transfer2['id'], 'test_transfer')] ret = [] for item in res_dict['transfers']: ret.append((item['id'], item['name'])) self.assertEqual(set(expected), set(ret)) db.transfer_destroy(context.get_admin_context(), transfer2['id']) db.transfer_destroy(context.get_admin_context(), transfer1['id']) db.volume_destroy(context.get_admin_context(), volume_id_1) def test_create_transfer_json(self): volume_id = self._create_volume(status='available', size=5) body = {"transfer": {"name": "transfer1", "volume_id": volume_id}} req = webob.Request.blank('/v3/%s/os-volume-transfer' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['transfer']) self.assertIn('auth_key', res_dict['transfer']) self.assertIn('created_at', res_dict['transfer']) self.assertIn('name', res_dict['transfer']) self.assertIn('volume_id', res_dict['transfer']) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_transfer_with_no_body(self): req = webob.Request.blank('/v3/%s/os-volume-transfer' % fake.PROJECT_ID) req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) def test_create_transfer_with_body_KeyError(self): body = {"transfer": {"name": "transfer1"}} req = webob.Request.blank('/v3/%s/os-volume-transfer' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) def test_create_transfer_with_invalid_volume_id_value(self): body = {"transfer": {"name": "transfer1", "volume_id": 1234}} req = webob.Request.blank('/v3/%s/os-volume-transfer' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) def test_create_transfer_with_InvalidVolume(self): volume_id = self._create_volume(status='attached') body = {"transfer": {"name": "transfer1", "volume_id": volume_id}} req = webob.Request.blank('/v3/%s/os-volume-transfer' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual('Invalid volume: status must be available', res_dict['badRequest']['message']) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_transfer_with_leading_trailing_spaces_for_name(self): volume_id = self._create_volume(status='available', size=5) body = {"transfer": {"name": " transfer1 ", "volume_id": volume_id}} req = webob.Request.blank('/v3/%s/os-volume-transfer' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(body['transfer']['name'].strip(), res_dict['transfer']['name']) db.volume_destroy(context.get_admin_context(), volume_id) def test_delete_transfer_awaiting_transfer(self): volume_id = self._create_volume() transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v3/%s/os-volume-transfer/%s' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) # verify transfer has been deleted req = webob.Request.blank('/v3/%s/os-volume-transfer/%s' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Transfer %s could not be found.' % transfer['id'], res_dict['itemNotFound']['message']) self.assertEqual(db.volume_get(context.get_admin_context(), volume_id)['status'], 'available') db.volume_destroy(context.get_admin_context(), volume_id) def test_delete_transfer_with_transfer_NotFound(self): req = webob.Request.blank('/v3/%s/os-volume-transfer/%s' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Transfer %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) @mock.patch.object(quota.QUOTAS, 'reserve') @mock.patch.object(db, 'volume_type_get', v3_fakes.fake_volume_type_get) def test_accept_transfer_volume_id_specified_json(self, type_get): volume_id = self._create_volume() transfer = self._create_transfer(volume_id) svc = self.start_service('volume', host='fake_host') body = {"accept": {"auth_key": transfer['auth_key']}} req = webob.Request.blank('/v3/%s/os-volume-transfer/%s/accept' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(transfer['id'], res_dict['transfer']['id']) self.assertEqual(volume_id, res_dict['transfer']['volume_id']) # cleanup svc.stop() def test_accept_transfer_with_no_body(self): volume_id = self._create_volume(size=5) transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v3/%s/os-volume-transfer/%s/accept' % ( fake.PROJECT_ID, transfer['id'])) req.body = jsonutils.dump_as_bytes(None) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_accept_transfer_with_body_KeyError(self): volume_id = self._create_volume(size=5) transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v3/%s/os-volume-transfer/%s/accept' % ( fake.PROJECT_ID, transfer['id'])) body = {"": {}} req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_accept_transfer_invalid_id_auth_key(self): volume_id = self._create_volume() transfer = self._create_transfer(volume_id) body = {"accept": {"auth_key": 1}} req = webob.Request.blank('/v3/%s/os-volume-transfer/%s/accept' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual(res_dict['badRequest']['message'], 'Invalid auth key: Attempt to transfer %s with ' 'invalid auth key.' % transfer['id']) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_accept_transfer_with_invalid_transfer(self): volume_id = self._create_volume() transfer = self._create_transfer(volume_id) body = {"accept": {"auth_key": 1}} req = webob.Request.blank('/v3/%s/os-volume-transfer/%s/accept' % ( fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Transfer %s could not be found.' % fake.WILL_NOT_BE_FOUND_ID, res_dict['itemNotFound']['message']) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_accept_transfer_with_VolumeSizeExceedsAvailableQuota(self): def fake_transfer_api_accept_throwing_VolumeSizeExceedsAvailableQuota( cls, context, transfer, volume_id): raise exception.VolumeSizeExceedsAvailableQuota(requested='2', consumed='2', quota='3') self.mock_object( cinder.transfer.API, 'accept', fake_transfer_api_accept_throwing_VolumeSizeExceedsAvailableQuota) volume_id = self._create_volume() transfer = self._create_transfer(volume_id) body = {"accept": {"auth_key": transfer['auth_key']}} req = webob.Request.blank('/v3/%s/os-volume-transfer/%s/accept' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(413, res.status_int) self.assertEqual(413, res_dict['overLimit']['code']) self.assertEqual('Requested volume or snapshot exceeds allowed ' 'gigabytes quota. Requested 2G, quota is 3G and ' '2G has been consumed.', res_dict['overLimit']['message']) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_accept_transfer_with_VolumeLimitExceeded(self): def fake_transfer_api_accept_throwing_VolumeLimitExceeded(cls, context, transfer, volume_id): raise exception.VolumeLimitExceeded(allowed=1) self.mock_object(cinder.transfer.API, 'accept', fake_transfer_api_accept_throwing_VolumeLimitExceeded) volume_id = self._create_volume() transfer = self._create_transfer(volume_id) body = {"accept": {"auth_key": transfer['auth_key']}} req = webob.Request.blank('/v3/%s/os-volume-transfer/%s/accept' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(413, res.status_int) self.assertEqual(413, res_dict['overLimit']['code']) self.assertEqual("VolumeLimitExceeded: Maximum number of volumes " "allowed (1) exceeded for quota 'volumes'.", res_dict['overLimit']['message']) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) def test_accept_transfer_with_auth_key_null(self): volume_id = self._create_volume(size=5) transfer = self._create_transfer(volume_id) body = {"accept": {"auth_key": None}} req = webob.Request.blank('/v3/%s/os-volume-transfer/%s/accept' % ( fake.PROJECT_ID, transfer['id'])) req.body = jsonutils.dump_as_bytes(body) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) db.transfer_destroy(context.get_admin_context(), transfer['id']) db.volume_destroy(context.get_admin_context(), volume_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_type_access.py0000664000175000017500000003741700000000000026203 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import HTTPStatus from unittest import mock import webob from cinder.api.contrib import volume_type_access as type_access from cinder.api.v3 import types from cinder import context from cinder import db from cinder import exception from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test def generate_type(type_id, is_public): return { 'id': type_id, 'name': 'test', 'deleted': False, 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1), 'updated_at': None, 'deleted_at': None, 'is_public': bool(is_public) } VOLUME_TYPES = { fake.VOLUME_TYPE_ID: generate_type(fake.VOLUME_TYPE_ID, True), fake.VOLUME_TYPE2_ID: generate_type(fake.VOLUME_TYPE2_ID, True), fake.VOLUME_TYPE3_ID: generate_type(fake.VOLUME_TYPE3_ID, False), fake.VOLUME_TYPE4_ID: generate_type(fake.VOLUME_TYPE4_ID, False)} PROJ1_UUID = fake.PROJECT_ID PROJ2_UUID = fake.PROJECT2_ID PROJ3_UUID = fake.PROJECT3_ID ACCESS_LIST = [{'volume_type_id': fake.VOLUME_TYPE3_ID, 'project_id': PROJ2_UUID}, {'volume_type_id': fake.VOLUME_TYPE3_ID, 'project_id': PROJ3_UUID}, {'volume_type_id': fake.VOLUME_TYPE4_ID, 'project_id': PROJ3_UUID}] def fake_volume_type_get(context, id, inactive=False, expected_fields=None): vol = VOLUME_TYPES[id] if expected_fields and 'projects' in expected_fields: vol['projects'] = [a['project_id'] for a in ACCESS_LIST if a['volume_type_id'] == id] return vol def _has_type_access(type_id, project_id): for access in ACCESS_LIST: if access['volume_type_id'] == type_id and \ access['project_id'] == project_id: return True return False def fake_volume_type_get_all(context, inactive=False, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): if filters is None or filters['is_public'] is None: if list_result: return list(VOLUME_TYPES.values()) return VOLUME_TYPES res = {} for k, v in VOLUME_TYPES.items(): if filters['is_public'] and _has_type_access(k, context.project_id): res.update({k: v}) continue if v['is_public'] == filters['is_public']: res.update({k: v}) if list_result: return list(res.values()) return res class FakeResponse(object): obj = {'volume_type': {'id': fake.VOLUME_TYPE_ID}, 'volume_types': [ {'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE3_ID}]} def attach(self, **kwargs): pass class FakeRequest(object): environ = {"cinder.context": context.get_admin_context()} def cached_resource_by_id(self, resource_id, name=None): return VOLUME_TYPES[resource_id] class VolumeTypeAccessTest(test.TestCase): def setUp(self): super(VolumeTypeAccessTest, self).setUp() self.type_controller = types.VolumeTypesController() self.type_access_controller = type_access.VolumeTypeAccessController() self.type_action_controller = type_access.VolumeTypeActionController() self.req = FakeRequest() self.context = self.req.environ['cinder.context'] self.mock_object(db, 'volume_type_get', fake_volume_type_get) self.mock_object(db, 'volume_type_get_all', fake_volume_type_get_all) def assertVolumeTypeListEqual(self, expected, observed): self.assertEqual(len(expected), len(observed)) expected = sorted(expected, key=lambda item: item['id']) observed = sorted(observed, key=lambda item: item['id']) for d1, d2 in zip(expected, observed): self.assertEqual(d1['id'], d2['id']) def test_list_type_access_public(self): """Querying os-volume-type-access on public type should return 404.""" req = fakes.HTTPRequest.blank('/v3/%s/types/os-volume-type-access' % fake.PROJECT_ID, use_admin_context=True) self.assertRaises(exception.VolumeTypeAccessNotFound, self.type_access_controller.index, req, fake.VOLUME_TYPE2_ID) def test_list_type_access_private(self): expected = {'volume_type_access': [ {'volume_type_id': fake.VOLUME_TYPE3_ID, 'project_id': PROJ2_UUID}, {'volume_type_id': fake.VOLUME_TYPE3_ID, 'project_id': PROJ3_UUID}]} result = self.type_access_controller.index(self.req, fake.VOLUME_TYPE3_ID) self.assertEqual(expected, result) def test_list_with_no_context(self): req = fakes.HTTPRequest.blank('/v3/flavors/%s/flavors' % fake.PROJECT_ID) def fake_authorize(context, target=None, action=None): raise exception.PolicyNotAuthorized(action='index') with mock.patch('cinder.context.RequestContext.authorize', fake_authorize): self.assertRaises(exception.PolicyNotAuthorized, self.type_access_controller.index, req, fake.PROJECT_ID) def test_list_type_with_admin_default_proj1(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID, use_admin_context=True) req.environ['cinder.context'].project_id = PROJ1_UUID result = self.type_controller.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_admin_default_proj2(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}, {'id': fake.VOLUME_TYPE3_ID}]} req = fakes.HTTPRequest.blank('/v3/%s/types' % PROJ2_UUID, use_admin_context=True) req.environ['cinder.context'].project_id = PROJ2_UUID result = self.type_controller.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_admin_ispublic_true(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v3/%s/types?is_public=true' % fake.PROJECT_ID, use_admin_context=True) result = self.type_controller.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_admin_ispublic_false(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID}, {'id': fake.VOLUME_TYPE4_ID}]} req = fakes.HTTPRequest.blank('/v3/%s/types?is_public=false' % fake.PROJECT_ID, use_admin_context=True) result = self.type_controller.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_admin_ispublic_false_proj2(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID}, {'id': fake.VOLUME_TYPE4_ID}]} req = fakes.HTTPRequest.blank('/v3/%s/types?is_public=false' % fake.PROJECT_ID, use_admin_context=True) req.environ['cinder.context'].project_id = PROJ2_UUID result = self.type_controller.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_admin_ispublic_none(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}, {'id': fake.VOLUME_TYPE3_ID}, {'id': fake.VOLUME_TYPE4_ID}]} req = fakes.HTTPRequest.blank('/v3/%s/types?is_public=none' % fake.PROJECT_ID, use_admin_context=True) result = self.type_controller.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_no_admin_default(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID, use_admin_context=False) result = self.type_controller.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_no_admin_ispublic_true(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v3/%s/types?is_public=true' % fake.PROJECT_ID, use_admin_context=False) result = self.type_controller.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_no_admin_ispublic_false(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v3/%s/types?is_public=false' % fake.PROJECT_ID, use_admin_context=False) result = self.type_controller.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_list_type_with_no_admin_ispublic_none(self): expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}]} req = fakes.HTTPRequest.blank('/v3/%s/types?is_public=none' % fake.PROJECT_ID, use_admin_context=False) result = self.type_controller.index(req) self.assertVolumeTypeListEqual(expected['volume_types'], result['volume_types']) def test_show(self): resp = FakeResponse() self.type_action_controller.show(self.req, resp, fake.VOLUME_TYPE_ID) self.assertEqual({'id': fake.VOLUME_TYPE_ID, 'os-volume-type-access:is_public': True}, resp.obj['volume_type']) def test_detail(self): resp = FakeResponse() self.type_action_controller.detail(self.req, resp) self.assertEqual( [{'id': fake.VOLUME_TYPE_ID, 'os-volume-type-access:is_public': True}, {'id': fake.VOLUME_TYPE3_ID, 'os-volume-type-access:is_public': False}], resp.obj['volume_types']) def test_create(self): resp = FakeResponse() self.type_action_controller.create(self.req, {}, resp) self.assertEqual({'id': fake.VOLUME_TYPE_ID, 'os-volume-type-access:is_public': True}, resp.obj['volume_type']) def test_add_project_access(self): def fake_add_volume_type_access(context, type_id, project_id): self.assertEqual(fake.VOLUME_TYPE4_ID, type_id, "type_id") self.assertEqual(PROJ2_UUID, project_id, "project_id") self.mock_object(db, 'volume_type_access_add', fake_add_volume_type_access) body = {'addProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True) result = self.type_action_controller._addProjectAccess( req, fake.VOLUME_TYPE4_ID, body=body) self.assertEqual(HTTPStatus.ACCEPTED, result.status_code) def test_add_project_access_with_no_admin_user(self): req = fakes.HTTPRequest.blank('/v3/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=False) body = {'addProjectAccess': {'project': PROJ2_UUID}} self.assertRaises(exception.PolicyNotAuthorized, self.type_action_controller._addProjectAccess, req, fake.VOLUME_TYPE3_ID, body=body) def test_add_project_access_with_already_added_access(self): def fake_add_volume_type_access(context, type_id, project_id): raise exception.VolumeTypeAccessExists(volume_type_id=type_id, project_id=project_id) self.mock_object(db, 'volume_type_access_add', fake_add_volume_type_access) body = {'addProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True) self.assertRaises(webob.exc.HTTPConflict, self.type_action_controller._addProjectAccess, req, fake.VOLUME_TYPE3_ID, body=body) def test_remove_project_access_with_bad_access(self): def fake_remove_volume_type_access(context, type_id, project_id): raise exception.VolumeTypeAccessNotFound(volume_type_id=type_id, project_id=project_id) self.mock_object(db, 'volume_type_access_remove', fake_remove_volume_type_access) body = {'removeProjectAccess': {'project': PROJ2_UUID}} req = fakes.HTTPRequest.blank('/v3/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True) self.assertRaises(exception.VolumeTypeAccessNotFound, self.type_action_controller._removeProjectAccess, req, fake.VOLUME_TYPE4_ID, body=body) def test_remove_project_access_with_no_admin_user(self): req = fakes.HTTPRequest.blank('/v3/%s/types/%s/action' % ( fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=False) body = {'removeProjectAccess': {'project': PROJ2_UUID}} self.assertRaises(exception.PolicyNotAuthorized, self.type_action_controller._removeProjectAccess, req, fake.VOLUME_TYPE3_ID, body=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_type_encryption.py0000664000175000017500000006122300000000000027124 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_serialization import jsonutils import webob from cinder import context from cinder import db from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test def return_volume_type_encryption(context, volume_type_id): return fake_volume_type_encryption() def fake_volume_type_encryption(): values = { 'cipher': 'fake_cipher', 'control_location': 'front-end', 'key_size': 256, 'provider': 'fake_provider' } return values class VolumeTypeEncryptionTest(test.TestCase): _default_volume_type = { 'id': fake.VOLUME_TYPE_ID, 'name': 'fake_type', } def setUp(self): super(VolumeTypeEncryptionTest, self).setUp() self.flags(host='fake') self.api_path = '/v3/%s/types/%s/encryption' % ( fake.PROJECT_ID, fake.VOLUME_TYPE_ID) """to reset notifier drivers left over from other api/contrib tests""" def _get_response(self, volume_type, admin=True, url='/v3/%s/types/%s/encryption', req_method='GET', req_body=None, req_headers=None): ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=admin) req = webob.Request.blank(url % (fake.PROJECT_ID, volume_type['id'])) req.method = req_method req.body = req_body if req_headers: req.headers['Content-Type'] = req_headers return req.get_response(fakes.wsgi_app(fake_auth_context=ctxt)) def _create_type_and_encryption(self, volume_type, body=None): if body is None: body = {"encryption": fake_volume_type_encryption()} db.volume_type_create(context.get_admin_context(), volume_type) return self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') def test_index(self): self.mock_object(db, 'volume_type_encryption_get', return_volume_type_encryption) volume_type = self._default_volume_type self._create_type_and_encryption(volume_type) res = self._get_response(volume_type) self.assertEqual(HTTPStatus.OK, res.status_code) res_dict = jsonutils.loads(res.body) expected = fake_volume_type_encryption() self.assertEqual(expected, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_index_invalid_type(self): volume_type = self._default_volume_type res = self._get_response(volume_type) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_code) res_dict = jsonutils.loads(res.body) expected = { 'itemNotFound': { 'code': HTTPStatus.NOT_FOUND, 'message': ('Volume type %s could not be found.' % volume_type['id']) } } self.assertEqual(expected, res_dict) def test_show_key_size(self): volume_type = self._default_volume_type self._create_type_and_encryption(volume_type) res = self._get_response(volume_type, url='/v3/%s/types/%s/encryption/key_size') res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_code) self.assertEqual(256, res_dict['key_size']) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_show_provider(self): volume_type = self._default_volume_type self._create_type_and_encryption(volume_type) res = self._get_response(volume_type, url='/v3/%s/types/%s/encryption/provider') res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_code) self.assertEqual('fake_provider', res_dict['provider']) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_show_item_not_found(self): volume_type = self._default_volume_type self._create_type_and_encryption(volume_type) res = self._get_response(volume_type, url='/v3/%s/types/%s/encryption/fake') res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_code) expected = { 'itemNotFound': { 'code': HTTPStatus.NOT_FOUND, 'message': ('Volume type encryption for type %s does not ' 'exist.' % volume_type['id']) } } self.assertEqual(expected, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def _create(self, cipher, control_location, key_size, provider): volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) body = {"encryption": {'cipher': cipher, 'control_location': control_location, 'key_size': key_size, 'provider': provider }} self.assertEqual(0, len(self.notifier.notifications)) res = self._get_response(volume_type) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_code) # Confirm that volume type has no encryption information # before create. self.assertEqual(b'{}', res.body) # Create encryption specs for the volume type # with the defined body. res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) self.assertEqual(1, len(self.notifier.notifications)) # check response self.assertIn('encryption', res_dict) self.assertEqual(cipher, res_dict['encryption']['cipher']) self.assertEqual(control_location, res_dict['encryption']['control_location']) self.assertEqual(key_size, res_dict['encryption']['key_size']) self.assertEqual(provider, res_dict['encryption']['provider']) # check database encryption = db.volume_type_encryption_get(context.get_admin_context(), volume_type['id']) self.assertIsNotNone(encryption) self.assertEqual(cipher, encryption['cipher']) self.assertEqual(key_size, encryption['key_size']) self.assertEqual(provider, encryption['provider']) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_create_invalid_volume_type(self): volume_type = self._default_volume_type body = {"encryption": fake_volume_type_encryption()} # Attempt to create encryption without first creating type res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) self.assertEqual(0, len(self.notifier.notifications)) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_code) expected = { 'itemNotFound': { 'code': HTTPStatus.NOT_FOUND, 'message': ('Volume type %s could not be found.' % volume_type['id']) } } self.assertEqual(expected, res_dict) def test_create_encryption_type_exists(self): volume_type = self._default_volume_type body = {"encryption": fake_volume_type_encryption()} self._create_type_and_encryption(volume_type, body) # Try to create encryption specs for a volume type # that already has them. res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) expected = { 'badRequest': { 'code': HTTPStatus.BAD_REQUEST, 'message': ('Volume type encryption for type ' '%s already exists.' % fake.VOLUME_TYPE_ID) } } self.assertEqual(expected, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_create_volume_exists(self): # Create the volume type and a volume with the volume type. volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) db.volume_create(context.get_admin_context(), {'id': fake.VOLUME_ID, 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy', 'volume_type_id': volume_type['id']}) body = {"encryption": {'cipher': 'cipher', 'key_size': 128, 'control_location': 'front-end', 'provider': 'fake_provider' }} # Try to create encryption specs for a volume type # with a volume. res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) expected = { 'badRequest': { 'code': HTTPStatus.BAD_REQUEST, 'message': ('Cannot create encryption specs. ' 'Volume type in use.') } } self.assertEqual(expected, res_dict) db.volume_destroy(context.get_admin_context(), fake.VOLUME_ID) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def _encryption_create_bad_body(self, body): volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_create_no_body(self): self._encryption_create_bad_body(body=None) def test_create_malformed_entity(self): body = {'encryption': 'string'} self._encryption_create_bad_body(body=body) def test_create_negative_key_size(self): body = {"encryption": {'cipher': 'cipher', 'key_size': -128, 'provider': 'fake_provider', 'control_location': 'front-end' }} self._encryption_create_bad_body(body=body) def test_create_with_minimum_key_size(self): body = {"encryption": {'cipher': 'cipher', 'key_size': '-1', 'provider': 'fake_provider', 'control_location': 'front-end' }} self._encryption_create_bad_body(body=body) def test_create_with_maximum_key_size(self): body = {"encryption": {'cipher': 'cipher', 'key_size': '12345678788', 'provider': 'fake_provider', 'control_location': 'front-end' }} self._encryption_create_bad_body(body=body) def test_create_none_key_size(self): self._create('fake_cipher', 'front-end', None, 'fake_encryptor') def test_create_invalid_control_location(self): body = {"encryption": {'cipher': 'cipher', 'control_location': 'fake_control', 'provider': 'fake_provider' }} self._encryption_create_bad_body(body=body) def test_create_no_provider(self): body = {"encryption": {'cipher': 'cipher'}} self._encryption_create_bad_body(body=body) def test_create_no_control_location(self): body = {"encryption": {'provider': 'fake_provider'}} self._encryption_create_bad_body(body=body) def test_delete(self): volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) # Test that before create, there's nothing with a get res = self._get_response(volume_type) self.assertEqual(HTTPStatus.OK, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual({}, res_dict) body = {"encryption": {'cipher': 'cipher', 'key_size': 128, 'control_location': 'front-end', 'provider': 'fake_provider'}} # Create, and test that get returns something res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res_dict = jsonutils.loads(res.body) res = self._get_response(volume_type, req_method='GET', req_headers='application/json', url='/v3/%s/types/%s/encryption') self.assertEqual(HTTPStatus.OK, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual(volume_type['id'], res_dict['volume_type_id']) # Delete, and test that get returns nothing res = self._get_response(volume_type, req_method='DELETE', req_headers='application/json', url='/v3/%s/types/%s/encryption/provider') self.assertEqual(HTTPStatus.ACCEPTED, res.status_code) self.assertEqual(0, len(res.body)) res = self._get_response(volume_type, req_method='GET', req_headers='application/json', url='/v3/%s/types/%s/encryption') self.assertEqual(HTTPStatus.OK, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual({}, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_delete_with_volume_in_use(self): # Create the volume type volume_type = self._default_volume_type db.volume_type_create(context.get_admin_context(), volume_type) body = {"encryption": {'cipher': 'cipher', 'key_size': 128, 'control_location': 'front-end', 'provider': 'fake_provider' }} # Create encryption with volume type, and test with GET res = self._get_response(volume_type, req_method='POST', req_body=jsonutils.dump_as_bytes(body), req_headers='application/json') res = self._get_response(volume_type, req_method='GET', req_headers='application/json', url='/v3/%s/types/%s/encryption') self.assertEqual(HTTPStatus.OK, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual(volume_type['id'], res_dict['volume_type_id']) # Create volumes with the volume type db.volume_create(context.get_admin_context(), {'id': fake.VOLUME_ID, 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy', 'volume_type_id': volume_type['id']}) db.volume_create(context.get_admin_context(), {'id': fake.VOLUME2_ID, 'display_description': 'Test Desc2', 'size': 2, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy', 'volume_type_id': volume_type['id']}) # Delete, and test that there is an error since volumes exist res = self._get_response(volume_type, req_method='DELETE', req_headers='application/json', url='/v3/%s/types/%s/encryption/provider') self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_code) res_dict = jsonutils.loads(res.body) expected = { 'badRequest': { 'code': HTTPStatus.BAD_REQUEST, 'message': 'Cannot delete encryption specs. ' 'Volume type in use.' } } self.assertEqual(expected, res_dict) # Delete the volumes db.volume_destroy(context.get_admin_context(), fake.VOLUME_ID) db.volume_destroy(context.get_admin_context(), fake.VOLUME2_ID) # Delete, and test that get returns nothing res = self._get_response(volume_type, req_method='DELETE', req_headers='application/json', url='/v3/%s/types/%s/encryption/provider') self.assertEqual(HTTPStatus.ACCEPTED, res.status_code) self.assertEqual(0, len(res.body)) res = self._get_response(volume_type, req_method='GET', req_headers='application/json', url='/v3/%s/types/%s/encryption') self.assertEqual(HTTPStatus.OK, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual({}, res_dict) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_delete_with_no_encryption(self): volume_type = self._default_volume_type # create a volume type db.volume_type_create(context.get_admin_context(), volume_type) # without creating encryption type, try to delete # and check if 404 is raised. res = self._get_response(volume_type, req_method='DELETE', req_headers='application/json', url='/v3/%s/types/%s/encryption/provider') self.assertEqual(HTTPStatus.NOT_FOUND, res.status_code) expected = { "itemNotFound": { "message": "Volume type encryption for type " "%s does not exist." % fake.VOLUME_TYPE_ID, "code": HTTPStatus.NOT_FOUND } } self.assertEqual(expected, jsonutils.loads(res.body)) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_update_item(self): volume_type = self._default_volume_type # Create Encryption Specs create_body = {"encryption": {'cipher': 'cipher', 'control_location': 'front-end', 'key_size': 128, 'provider': 'fake_provider'}} self._create_type_and_encryption(volume_type, create_body) # Update Encryption Specs update_body = {"encryption": {'key_size': 512, 'provider': 'fake_provider2'}} res = self.\ _get_response(volume_type, req_method='PUT', req_body=jsonutils.dump_as_bytes(update_body), req_headers='application/json', url='/v3/%s/types/%s/encryption/' + fake.ENCRYPTION_KEY_ID) res_dict = jsonutils.loads(res.body) self.assertEqual(512, res_dict['encryption']['key_size']) self.assertEqual('fake_provider2', res_dict['encryption']['provider']) # Get Encryption Specs res = self._get_response(volume_type) res_dict = jsonutils.loads(res.body) # Confirm Encryption Specs self.assertEqual(512, res_dict['key_size']) self.assertEqual('fake_provider2', res_dict['provider']) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def _encryption_update_bad_body(self, update_body): # Create Volume Type and Encryption volume_type = self._default_volume_type res = self._create_type_and_encryption(volume_type) # Update Encryption res = self.\ _get_response(volume_type, req_method='PUT', req_body=jsonutils.dump_as_bytes(update_body), req_headers='application/json', url='/v3/%s/types/%s/encryption/' + fake.ENCRYPTION_KEY_ID) res_dict = jsonutils.loads(res.body) # Confirm Failure self.assertEqual(HTTPStatus.BAD_REQUEST, res_dict['badRequest']['code']) db.volume_type_destroy(context.get_admin_context(), volume_type['id']) def test_update_too_many_items(self): update_body = {"encryption": {'key_size': 512}, "encryption2": {'key_size': 256}} self._encryption_update_bad_body(update_body) def test_update_key_size_non_integer(self): update_body = {"encryption": {'key_size': 'abc'}} self._encryption_update_bad_body(update_body) def test_update_item_invalid_body(self): update_body = {"key_size": "value1"} self._encryption_update_bad_body(update_body) def _encryption_empty_update(self, update_body): self._encryption_update_bad_body(update_body) def test_update_no_body(self): self._encryption_empty_update(update_body=None) def test_update_empty_body(self): self._encryption_empty_update(update_body={}) def test_update_with_volume_in_use(self): # Create the volume type and encryption volume_type = self._default_volume_type self._create_type_and_encryption(volume_type) # Create a volume with the volume type db.volume_create(context.get_admin_context(), {'id': fake.VOLUME_ID, 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy', 'volume_type_id': volume_type['id']}) # Get the Encryption res = self._get_response(volume_type) self.assertEqual(HTTPStatus.OK, res.status_code) res_dict = jsonutils.loads(res.body) self.assertEqual(volume_type['id'], res_dict['volume_type_id']) # Update, and test that there is an error since volumes exist update_body = {"encryption": {'key_size': 512}} res = self.\ _get_response(volume_type, req_method='PUT', req_body=jsonutils.dump_as_bytes(update_body), req_headers='application/json', url='/v3/%s/types/%s/encryption/' + fake.ENCRYPTION_KEY_ID) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_code) res_dict = jsonutils.loads(res.body) expected = { 'badRequest': { 'code': HTTPStatus.BAD_REQUEST, 'message': 'Cannot update encryption specs. ' 'Volume type in use.' } } self.assertEqual(expected, res_dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/contrib/test_volume_unmanage.py0000664000175000017500000001042300000000000025460 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock from oslo_serialization import jsonutils import webob from cinder import context from cinder import db from cinder import objects from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils class VolumeUnmanageTest(test.TestCase): """Test cases for cinder/api/contrib/volume_unmanage.py The API extension adds an action to volumes, "os-unmanage", which will effectively issue a delete operation on the volume, but with a flag set that means that a different method will be invoked on the driver, so that the volume is not actually deleted in the storage backend. In this set of test cases, we are ensuring that the code correctly parses the request structure and raises the correct exceptions when things are not right, and calls down into cinder.volume.api.API.delete with the correct arguments. """ def setUp(self): super(VolumeUnmanageTest, self).setUp() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) api = fakes.router_v3.APIRouter() self.app = fakes.urlmap.URLMap() self.app['/v3'] = api def _get_resp(self, volume_id): """Helper to build an os-unmanage req for the specified volume_id.""" req = webob.Request.blank('/v3/%s/volumes/%s/action' % (self.ctxt.project_id, volume_id)) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = self.ctxt body = {'os-unmanage': ''} req.body = jsonutils.dump_as_bytes(body) res = req.get_response(self.app) return res @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_volume') def test_unmanage_volume_ok(self, mock_rpcapi): """Return success for valid and unattached volume.""" vol = utils.create_volume(self.ctxt) res = self._get_resp(vol.id) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int, res) mock_rpcapi.assert_called_once_with(self.ctxt, mock.ANY, True, False) vol = objects.volume.Volume.get_by_id(self.ctxt, vol.id) self.assertEqual('unmanaging', vol.status) db.volume_destroy(self.ctxt, vol.id) def test_unmanage_volume_bad_volume_id(self): """Return 404 if the volume does not exist.""" res = self._get_resp(fake.WILL_NOT_BE_FOUND_ID) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int, res) def test_unmanage_volume_attached(self): """Return 400 if the volume exists but is attached.""" vol = utils.create_volume( self.ctxt, status='in-use', attach_status=fields.VolumeAttachStatus.ATTACHED) res = self._get_resp(vol.id) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) db.volume_destroy(self.ctxt, vol.id) def test_unmanage_volume_with_snapshots(self): """Return 400 if the volume exists but has snapshots.""" vol = utils.create_volume(self.ctxt) snap = utils.create_snapshot(self.ctxt, vol.id) res = self._get_resp(vol.id) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) db.volume_destroy(self.ctxt, vol.id) db.snapshot_destroy(self.ctxt, snap.id) def test_unmanage_encrypted_volume_denied(self): vol = utils.create_volume( self.ctxt, encryption_key_id='7a98391f-6619-46af-bd00-5862a3f7f1bd') res = self._get_resp(vol.id) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) db.volume_destroy(self.ctxt, vol.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/fakes.py0000664000175000017500000001250200000000000020670 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_service import wsgi from oslo_utils import timeutils import routes import webob import webob.dec import webob.request from cinder.api.middleware import auth from cinder.api.middleware import fault from cinder.api.openstack import api_version_request as api_version from cinder.api.openstack import wsgi as os_wsgi from cinder.api import urlmap from cinder.api.v3 import limits from cinder.api.v3 import router as router_v3 from cinder.api import versions from cinder import context from cinder.tests.unit import fake_constants as fake FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} class Context(object): pass class FakeRouter(wsgi.Router): def __init__(self, ext_mgr=None): pass @webob.dec.wsgify def __call__(self, req): res = webob.Response() res.status = '200' res.headers['X-Test-Success'] = 'True' return res @webob.dec.wsgify def fake_wsgi(self, req): return self.application def wsgi_app(inner_app_v2=None, fake_auth=True, fake_auth_context=None, use_no_auth=False, ext_mgr=None, inner_app_v3=None): if not inner_app_v3: inner_app_v3 = router_v3.APIRouter(ext_mgr) if fake_auth: if fake_auth_context is not None: ctxt = fake_auth_context else: ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, auth_token=True) api_v3 = fault.FaultWrapper(auth.InjectContext(ctxt, inner_app_v3)) elif use_no_auth: api_v3 = fault.FaultWrapper(auth.NoAuthMiddleware( limits.RateLimitingMiddleware(inner_app_v3))) else: api_v3 = fault.FaultWrapper(auth.AuthMiddleware( limits.RateLimitingMiddleware(inner_app_v3))) mapper = urlmap.URLMap() mapper['/v3'] = api_v3 mapper['/'] = fault.FaultWrapper(versions.VersionsController()) return mapper class FakeToken(object): id_count = 0 def __getitem__(self, key): return getattr(self, key) def __init__(self, **kwargs): FakeToken.id_count += 1 self.id = FakeToken.id_count for k, v in kwargs.items(): setattr(self, k, v) class FakeRequestContext(context.RequestContext): def __init__(self, *args, **kwargs): kwargs['auth_token'] = kwargs.get(fake.USER_ID, fake.PROJECT_ID) super(FakeRequestContext, self).__init__(*args, **kwargs) class HTTPRequest(webob.Request): @classmethod def blank(cls, *args, **kwargs): if args is not None: if 'v1' in args[0]: kwargs['base_url'] = 'http://localhost/v1' if 'v2' in args[0]: kwargs['base_url'] = 'http://localhost/v2' if 'v3' in args[0]: kwargs['base_url'] = 'http://localhost/v3' use_admin_context = kwargs.pop('use_admin_context', False) version = kwargs.pop('version', api_version._MIN_API_VERSION) system_scope = kwargs.pop('system_scope', None) out = os_wsgi.Request.blank(*args, **kwargs) out.environ['cinder.context'] = FakeRequestContext( fake.USER_ID, fake.PROJECT_ID, is_admin=use_admin_context, system_scope=system_scope) out.api_version_request = api_version.APIVersionRequest(version) return out class TestRouter(wsgi.Router): def __init__(self, controller): mapper = routes.Mapper() mapper.resource("test", "tests", controller=os_wsgi.Resource(controller)) super(TestRouter, self).__init__(mapper) class FakeAuthDatabase(object): data = {} @staticmethod def auth_token_get(context, token_hash): return FakeAuthDatabase.data.get(token_hash, None) @staticmethod def auth_token_create(context, token): fake_token = FakeToken(created_at=timeutils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token @staticmethod def auth_token_destroy(context, token_id): token = FakeAuthDatabase.data.get('id_%i' % token_id) if token and token.token_hash in FakeAuthDatabase.data: del FakeAuthDatabase.data[token.token_hash] del FakeAuthDatabase.data['id_%i' % token_id] class FakeRateLimiter(object): def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): return self.application def get_fake_uuid(token=0): if token not in FAKE_UUIDS: FAKE_UUIDS[token] = str(uuid.uuid4()) return FAKE_UUIDS[token] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.179119 cinder-27.0.0/cinder/tests/unit/api/middleware/0000775000175000017500000000000000000000000021342 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/middleware/__init__.py0000664000175000017500000000000000000000000023441 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/middleware/test_auth.py0000664000175000017500000001043400000000000023716 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_middleware import request_id import webob import cinder.api.middleware.auth from cinder.tests.unit import test class TestCinderKeystoneContextMiddleware(test.TestCase): def setUp(self): super(TestCinderKeystoneContextMiddleware, self).setUp() @webob.dec.wsgify() def fake_app(req): self.context = req.environ['cinder.context'] return webob.Response() self.context = None self.middleware = (cinder.api.middleware.auth .CinderKeystoneContext(fake_app)) self.request = webob.Request.blank('/') self.request.headers['X_TENANT_ID'] = 'testtenantid' self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken' def test_no_user_or_user_id(self): response = self.request.get_response(self.middleware) self.assertEqual(HTTPStatus.UNAUTHORIZED, response.status_int) def test_user_only(self): self.request.headers['X_USER'] = 'testuser' response = self.request.get_response(self.middleware) self.assertEqual(HTTPStatus.OK, response.status_int) self.assertEqual('testuser', self.context.user_id) def test_user_id_only(self): self.request.headers['X_USER_ID'] = 'testuserid' response = self.request.get_response(self.middleware) self.assertEqual(HTTPStatus.OK, response.status_int) self.assertEqual('testuserid', self.context.user_id) def test_user_id_trumps_user(self): self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_USER'] = 'testuser' response = self.request.get_response(self.middleware) self.assertEqual(HTTPStatus.OK, response.status_int) self.assertEqual('testuserid', self.context.user_id) def test_tenant_id_name(self): self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_TENANT_NAME'] = 'testtenantname' response = self.request.get_response(self.middleware) self.assertEqual(HTTPStatus.OK, response.status_int) self.assertEqual('testtenantid', self.context.project_id) self.assertEqual('testtenantname', self.context.project_name) def test_request_id_extracted_from_env(self): req_id = 'dummy-request-id' self.request.headers['X_PROJECT_ID'] = 'testtenantid' self.request.headers['X_USER_ID'] = 'testuserid' self.request.environ[request_id.ENV_REQUEST_ID] = req_id self.request.get_response(self.middleware) self.assertEqual(req_id, self.context.request_id) def test_request_project_domain_id(self): self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_PROJECT_DOMAIN_ID'] = 'domain1' self.request.get_response(self.middleware) self.assertEqual('domain1', self.context.project_domain_id) def test_request_project_domain_name(self): self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_PROJECT_DOMAIN_NAME'] = 'mydomain' self.request.get_response(self.middleware) self.assertEqual('mydomain', self.context.project_domain_name) def test_request_user_domain_id(self): self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_USER_DOMAIN_ID'] = 'domain2' self.request.get_response(self.middleware) self.assertEqual('domain2', self.context.user_domain_id) def test_request_user_domain_name(self): self.request.headers['X_USER_ID'] = 'testuserid' self.request.headers['X_USER_DOMAIN_NAME'] = 'mydomain2' self.request.get_response(self.middleware) self.assertEqual('mydomain2', self.context.user_domain_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/middleware/test_faults.py0000664000175000017500000000665500000000000024265 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from oslo_i18n import fixture as i18n_fixture from oslo_serialization import jsonutils import webob.dec from cinder.api.middleware import fault from cinder.api.openstack import wsgi from cinder.tests.unit import test class TestFaults(test.TestCase): """Tests covering `cinder.api.openstack.faults:Fault` class.""" def setUp(self): super(TestFaults, self).setUp() self.useFixture(i18n_fixture.ToggleLazy(True)) def test_400_fault_json(self): """Test fault serialized to JSON via file-extension and/or header.""" requests = [ webob.Request.blank('/.json'), webob.Request.blank('/', headers={"Accept": "application/json"}), ] for request in requests: fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='scram')) response = request.get_response(fault) expected = { "badRequest": { "message": "scram", "code": HTTPStatus.BAD_REQUEST, }, } actual = jsonutils.loads(response.body) self.assertEqual("application/json", response.content_type) self.assertEqual(expected, actual) def test_413_fault_json(self): """Test fault serialized to JSON via file-extension and/or header.""" requests = [ webob.Request.blank('/.json'), webob.Request.blank('/', headers={"Accept": "application/json"}), ] for request in requests: exc = webob.exc.HTTPRequestEntityTooLarge fault = wsgi.Fault(exc(explanation='sorry', headers={'Retry-After': '4'})) response = request.get_response(fault) expected = { "overLimit": { "message": "sorry", "code": HTTPStatus.REQUEST_ENTITY_TOO_LARGE, "retryAfter": "4", }, } actual = jsonutils.loads(response.body) self.assertEqual("application/json", response.content_type) self.assertEqual(expected, actual) def test_fault_has_status_int(self): """Ensure the status_int is set correctly on faults.""" fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) self.assertEqual(HTTPStatus.BAD_REQUEST, fault.status_int) class ExceptionTest(test.TestCase): def _wsgi_app(self, inner_app): return fault.FaultWrapper(inner_app) def test_unicode_decode_error(self): @webob.dec.wsgify def unicode_error(req): raise UnicodeDecodeError("ascii", b"", 0, 1, "bad") api = self._wsgi_app(unicode_error) resp = webob.Request.blank('/').get_response(api) self.assertEqual(400, resp.status_int) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.179119 cinder-27.0.0/cinder/tests/unit/api/openstack/0000775000175000017500000000000000000000000021214 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/openstack/__init__.py0000664000175000017500000000000000000000000023313 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/openstack/test_api_version_request.py0000664000175000017500000001235700000000000026723 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from cinder.api.openstack import api_version_request from cinder import exception from cinder.tests.unit import test @ddt.ddt class APIVersionRequestTests(test.TestCase): def test_init(self): result = api_version_request.APIVersionRequest() self.assertIsNone(result._ver_major) self.assertIsNone(result._ver_minor) def test_min_version(self): self.assertEqual( api_version_request.APIVersionRequest( api_version_request._MIN_API_VERSION), api_version_request.min_api_version()) def test_max_api_version(self): self.assertEqual( api_version_request.APIVersionRequest( api_version_request._MAX_API_VERSION), api_version_request.max_api_version()) @ddt.data( ('1.1', 1, 1), ('2.10', 2, 10), ('5.234', 5, 234), ('12.5', 12, 5), ('2.0', 2, 0), ('2.200', 2, 200) ) @ddt.unpack def test_valid_version_strings(self, version_string, major, minor): request = api_version_request.APIVersionRequest(version_string) self.assertEqual(major, request._ver_major) self.assertEqual(minor, request._ver_minor) def test_null_version(self): v = api_version_request.APIVersionRequest() self.assertFalse(bool(v)) def test_not_null_version(self): v = api_version_request.APIVersionRequest('1.1') self.assertTrue(bool(v)) @ddt.data('2', '200', '2.1.4', '200.23.66.3', '5 .3', '5. 3', '5.03', '02.1', '2.001', '', ' 2.1', '2.1 ') def test_invalid_version_strings(self, version_string): self.assertRaises(exception.InvalidAPIVersionString, api_version_request.APIVersionRequest, version_string) def test_cmpkey(self): request = api_version_request.APIVersionRequest('1.2') self.assertEqual((1, 2), request._cmpkey()) def test_version_comparisons(self): v1 = api_version_request.APIVersionRequest('2.0') v2 = api_version_request.APIVersionRequest('2.5') v3 = api_version_request.APIVersionRequest('5.23') v4 = api_version_request.APIVersionRequest('2.0') v_null = api_version_request.APIVersionRequest() self.assertLess(v1, v2) self.assertLessEqual(v1, v2) self.assertGreater(v3, v2) self.assertGreaterEqual(v3, v2) self.assertNotEqual(v1, v2) self.assertEqual(v1, v4) self.assertNotEqual(v1, v_null) self.assertEqual(v_null, v_null) self.assertNotEqual('2.0', v1) def test_version_matches(self): v1 = api_version_request.APIVersionRequest('2.0') v2 = api_version_request.APIVersionRequest('2.5') v3 = api_version_request.APIVersionRequest('2.45') v4 = api_version_request.APIVersionRequest('3.3') v5 = api_version_request.APIVersionRequest('3.23') v6 = api_version_request.APIVersionRequest('2.0') v7 = api_version_request.APIVersionRequest('3.3') v8 = api_version_request.APIVersionRequest('4.0') v_null = api_version_request.APIVersionRequest() self.assertTrue(v2.matches(v1, v3)) self.assertTrue(v2.matches(v1, v_null)) self.assertTrue(v1.matches(v6, v2)) self.assertTrue(v4.matches(v2, v7)) self.assertTrue(v4.matches(v_null, v7)) self.assertTrue(v4.matches(v_null, v8)) self.assertFalse(v1.matches(v2, v3)) self.assertFalse(v5.matches(v2, v4)) self.assertFalse(v2.matches(v3, v1)) self.assertTrue(v1.matches(v_null, v_null)) self.assertRaises(ValueError, v_null.matches, v1, v3) def test_matches_versioned_method(self): request = api_version_request.APIVersionRequest('2.0') self.assertRaises(exception.InvalidParameterValue, request.matches_versioned_method, 'fake_method') def test_get_string(self): v1_string = '3.23' v1 = api_version_request.APIVersionRequest(v1_string) self.assertEqual(v1_string, v1.get_string()) self.assertRaises(ValueError, api_version_request.APIVersionRequest().get_string) @ddt.data(('1', '0'), ('1', '1')) @ddt.unpack def test_str(self, major, minor): request_input = '%s.%s' % (major, minor) request = api_version_request.APIVersionRequest(request_input) request_string = str(request) self.assertEqual('API Version Request ' 'Major: %s, Minor: %s' % (major, minor), request_string) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/openstack/test_versioned_method.py0000664000175000017500000000243300000000000026165 0ustar00zuulzuul00000000000000# Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api.openstack import versioned_method from cinder.tests.unit import test class VersionedMethodTestCase(test.TestCase): def test_str(self): args = ('fake_name', 'fake_min', 'fake_max') method = versioned_method.VersionedMethod(*(args + (False, None))) method_string = str(method) self.assertEqual('Version Method %s: min: %s, max: %s' % args, method_string) def test_cmpkey(self): method = versioned_method.VersionedMethod( 'fake_name', 'fake_start_version', 'fake_end_version', False, 'fake_func') self.assertEqual('fake_start_version', method._cmpkey()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/openstack/test_wsgi.py0000664000175000017500000010275200000000000023605 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus import inspect from unittest import mock import ddt from oslo_utils import encodeutils import webob from cinder.api.openstack import wsgi from cinder import exception from cinder.tests.unit.api import fakes from cinder.tests.unit import test class RequestTest(test.TestCase): def setUp(self): super(RequestTest, self).setUp() self.patch('cinder.i18n.get_available_languages', return_value=['en-GB', 'en-AU', 'de', 'zh-CN', 'en-US', 'ja-JP']) def test_content_type_missing(self): request = wsgi.Request.blank('/tests/123', method='POST') request.body = b"" self.assertIsNone(request.get_content_type()) def test_content_type_unsupported(self): request = wsgi.Request.blank('/tests/123', method='POST') request.headers["Content-Type"] = "text/html" request.body = b"asdf
" self.assertRaises(exception.InvalidContentType, request.get_content_type) def test_content_type_with_charset(self): request = wsgi.Request.blank('/tests/123') request.headers["Content-Type"] = "application/json; charset=UTF-8" result = request.get_content_type() self.assertEqual("application/json", result) def test_content_type_from_accept(self): for content_type in ('application/json', 'application/vnd.openstack.volume+json'): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = content_type result = request.best_match_content_type() self.assertEqual(content_type, result) def test_content_type_from_accept_best(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/json" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_from_query_extension(self): request = wsgi.Request.blank('/tests/123.json') result = request.best_match_content_type() self.assertEqual("application/json", result) request = wsgi.Request.blank('/tests/123.invalid') result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_default(self): request = wsgi.Request.blank('/tests/123.unsupported') request.headers["Accept"] = "application/unsupported1" result = request.best_match_content_type() self.assertEqual("application/json", result) def test_content_type_accept_with_quality_values(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = ( "application/json;q=0.4," "application/vnd.openstack.volume+json;q=0.6") result = request.best_match_content_type() self.assertEqual("application/vnd.openstack.volume+json", result) def test_from_request(self): request = wsgi.Request.blank('/') accepted = 'bogus;q=1, en-gb;q=0.7,en-us,en;q=0.5,*;q=0.7' request.headers = {'Accept-Language': accepted} self.assertEqual(request.best_match_language(), 'en-US') def test_asterisk(self): # In the section 3.4 of RFC4647, it says as follows: # If the language range "*"(asterisk) is the only one # in the language priority list or if no other language range # follows, the default value is computed and returned. # # In this case, the default value 'None' is returned. request = wsgi.Request.blank('/') accepted = '*;q=0.5' request.headers = {'Accept-Language': accepted} self.assertIsNone(request.best_match_language()) def test_asterisk_followed_by_other_language(self): request = wsgi.Request.blank('/') accepted = '*,ja-jp;q=0.5' request.headers = {'Accept-Language': accepted} self.assertEqual('ja-JP', request.best_match_language()) def test_truncate(self): request = wsgi.Request.blank('/') accepted = 'de-CH' request.headers = {'Accept-Language': accepted} self.assertEqual('de', request.best_match_language()) def test_secondary(self): request = wsgi.Request.blank('/') accepted = 'nn,en-gb;q=0.5' request.headers = {'Accept-Language': accepted} self.assertEqual('en-GB', request.best_match_language()) def test_none_found(self): request = wsgi.Request.blank('/') accepted = 'nb-no' request.headers = {'Accept-Language': accepted} self.assertIsNone(request.best_match_language()) def test_no_lang_header(self): request = wsgi.Request.blank('/') accepted = '' request.headers = {'Accept-Language': accepted} self.assertIsNone(request.best_match_language()) def test_best_match_language(self): # Test that we are actually invoking language negotiation by webob request = wsgi.Request.blank('/') accepted = 'unknown-lang' request.headers = {'Accept-Language': accepted} self.mock_object(request.accept_language, 'best_match', return_value=None) self.assertIsNone(request.best_match_language()) # If accept-language is not included or empty, match should be None request.headers = {'Accept-Language': ''} self.assertIsNone(request.best_match_language()) request.headers.pop('Accept-Language') self.assertIsNone(request.best_match_language()) def test_cache_and_retrieve_resources(self): request = wsgi.Request.blank('/foo') # Test that trying to retrieve a cached object on # an empty cache fails gracefully self.assertIsNone(request.cached_resource()) self.assertIsNone(request.cached_resource_by_id('r-0')) resources = [] for x in range(3): resources.append({'id': 'r-%s' % x}) # Cache an empty list of resources using the default name request.cache_resource([]) self.assertEqual({}, request.cached_resource()) self.assertIsNone(request.cached_resource('r-0')) # Cache some resources request.cache_resource(resources[:2]) # Cache one resource request.cache_resource(resources[2]) # Cache a different resource name other_resource = {'id': 'o-0'} request.cache_resource(other_resource, name='other-resource') self.assertEqual(resources[0], request.cached_resource_by_id('r-0')) self.assertEqual(resources[1], request.cached_resource_by_id('r-1')) self.assertEqual(resources[2], request.cached_resource_by_id('r-2')) self.assertIsNone(request.cached_resource_by_id('r-3')) self.assertEqual({'r-0': resources[0], 'r-1': resources[1], 'r-2': resources[2]}, request.cached_resource()) self.assertEqual(other_resource, request.cached_resource_by_id('o-0', name='other-resource')) def test_cache_and_retrieve_volumes(self): self._test_cache_and_retrieve_resources('volume') def test_cache_and_retrieve_volume_types(self): self._test_cache_and_retrieve_resources('volume_type') def test_cache_and_retrieve_snapshots(self): self._test_cache_and_retrieve_resources('snapshot') def test_cache_and_retrieve_backups(self): self._test_cache_and_retrieve_resources('backup') def _test_cache_and_retrieve_resources(self, resource_name): """Generic helper for cache tests.""" cache_all_func = 'cache_db_%ss' % resource_name cache_one_func = 'cache_db_%s' % resource_name get_db_all_func = 'get_db_%ss' % resource_name get_db_one_func = 'get_db_%s' % resource_name r = wsgi.Request.blank('/foo') resources = [] for x in range(3): resources.append({'id': 'id%s' % x}) # Store 2 getattr(r, cache_all_func)(resources[:2]) # Store 1 getattr(r, cache_one_func)(resources[2]) self.assertEqual(resources[0], getattr(r, get_db_one_func)('id0')) self.assertEqual(resources[1], getattr(r, get_db_one_func)('id1')) self.assertEqual(resources[2], getattr(r, get_db_one_func)('id2')) self.assertIsNone(getattr(r, get_db_one_func)('id3')) self.assertEqual({'id0': resources[0], 'id1': resources[1], 'id2': resources[2]}, getattr(r, get_db_all_func)()) class ActionDispatcherTest(test.TestCase): def test_dispatch(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' self.assertEqual('pants', serializer.dispatch({}, action='create')) def test_dispatch_action_None(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual('trousers', serializer.dispatch({}, action=None)) def test_dispatch_default(self): serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' self.assertEqual('trousers', serializer.dispatch({}, action='update')) class DictSerializerTest(test.TestCase): def test_dispatch_default(self): serializer = wsgi.DictSerializer() self.assertEqual('', serializer.serialize({}, 'update')) class JSONDictSerializerTest(test.TestCase): def test_json(self): input_dict = dict(servers=dict(a=(2, 3))) expected_json = b'{"servers":{"a":[2,3]}}' serializer = wsgi.JSONDictSerializer() result = serializer.serialize(input_dict) result = result.replace(b'\n', b'').replace(b' ', b'') self.assertEqual(expected_json, result) class TextDeserializerTest(test.TestCase): def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() self.assertEqual({}, deserializer.deserialize({}, 'update')) class JSONDeserializerTest(test.TestCase): def test_json(self): data = """{"a": { "a1": "1", "a2": "2", "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" as_dict = { 'body': { 'a': { 'a1': '1', 'a2': '2', 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], 'd': {'e': '1'}, 'f': '1', }, }, } deserializer = wsgi.JSONDeserializer() self.assertEqual(as_dict, deserializer.deserialize(data)) class ResourceTest(test.TestCase): def test_resource_call(self): class Controller(object): def index(self, req): return 'off' req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(b'off', response.body) self.assertEqual(HTTPStatus.OK, response.status_int) def test_resource_not_authorized(self): class Controller(object): def index(self, req): raise exception.NotAuthorized() req = webob.Request.blank('/tests') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) def test_dispatch(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) method, _extensions = resource.get_method(None, 'index', None, '') actual = resource.dispatch(method, None, {'pants': 'off'}) expected = 'off' self.assertEqual(expected, actual) @mock.patch('oslo_utils.strutils.mask_password') def test_process_stack_non_ascii(self, masker): class Controller(wsgi.Controller): @wsgi.action('fooAction') def fooAction(self, req, id, body): return 'done' controller = Controller() resource = wsgi.Resource(controller) # The following body has a non-ascii chars serialized_body = '{"foo": {"nonascii": "\xe2\x80\x96\xe2\x88\xa5"}}' request = webob.Request.blank('/tests/fooAction') action_args = {'id': 12} # Now test _process_stack() mainline flow. # Without the fix to safe_decode the body in _process_stack(), # this test fails with: # UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in # position 22: ordinal not in range(128) response = resource._process_stack(request, 'fooAction', action_args, 'application/json', serialized_body, 'application/json') self.assertEqual('done', response) # The following check verifies that mask_password was called with # the decoded body. self.assertEqual(1, masker.call_count) decoded_body = encodeutils.safe_decode( serialized_body, errors='ignore') self.assertIn(decoded_body, masker.call_args[0][0]) def test_get_method_undefined_controller_action(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(AttributeError, resource.get_method, None, 'create', None, '') def test_get_method_action_json(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) method, _extensions = resource.get_method(None, 'action', 'application/json', '{"fooAction": true}') self.assertEqual(controller._action_foo, method) def test_get_method_action_bad_body(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(exception.MalformedRequestBody, resource.get_method, None, 'action', 'application/json', '{}') def test_get_method_unknown_controller_action(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(KeyError, resource.get_method, None, 'action', 'application/json', '{"barAction": true}') def test_get_action_args(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) env = { 'wsgiorg.routing_args': [None, { 'controller': None, 'format': None, 'action': 'update', 'id': 12, }], } expected = {'action': 'update', 'id': 12} self.assertEqual(expected, resource.get_action_args(env)) def test_get_body_bad_content(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) request = wsgi.Request.blank('/', method='POST') request.headers['Content-Type'] = 'application/none' request.body = b'foo' self.assertRaises(exception.InvalidContentType, resource.get_body, request) def test_get_body_no_content_type(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) request = wsgi.Request.blank('/', method='POST') request.body = b'foo' content_type, body = resource.get_body(request) self.assertIsNone(content_type) self.assertEqual('', body) def test_get_body_no_content_body(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) request = wsgi.Request.blank('/', method='POST') request.headers['Content-Type'] = 'application/json' request.body = b'' content_type, body = resource.get_body(request) self.assertIsNone(content_type) self.assertEqual('', body) def test_get_body(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) request = wsgi.Request.blank('/', method='POST') request.headers['Content-Type'] = 'application/json' request.body = b'foo' content_type, body = resource.get_body(request) self.assertEqual('application/json', content_type) self.assertEqual(b'foo', body) def test_deserialize_badtype(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) self.assertRaises(exception.InvalidContentType, resource.deserialize, controller.index, 'application/none', 'foo') def test_deserialize_default(self): class JSONDeserializer(object): def deserialize(self, body): return 'json' class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller, json=JSONDeserializer) obj = resource.deserialize(controller.index, 'application/json', 'foo') self.assertEqual('json', obj) def test_register_actions(self): class Controller(object): def index(self, req, pants=None): return pants class ControllerExtended(wsgi.Controller): @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body @wsgi.action('barAction') def _action_bar(self, req, id, body): return body controller = Controller() resource = wsgi.Resource(controller) self.assertEqual({}, resource.wsgi_actions) extended = ControllerExtended() resource.register_actions(extended) self.assertEqual({'fooAction': extended._action_foo, 'barAction': extended._action_bar, }, resource.wsgi_actions) def test_register_extensions(self): class Controller(object): def index(self, req, pants=None): return pants class ControllerExtended(wsgi.Controller): @wsgi.extends def index(self, req, resp_obj, pants=None): return None @wsgi.extends(action='fooAction') def _action_foo(self, req, resp, id, body): return None controller = Controller() resource = wsgi.Resource(controller) self.assertEqual({}, resource.wsgi_extensions) self.assertEqual({}, resource.wsgi_action_extensions) extended = ControllerExtended() resource.register_extensions(extended) self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions) self.assertEqual({'fooAction': [extended._action_foo]}, resource.wsgi_action_extensions) def test_get_method_extensions(self): class Controller(object): def index(self, req, pants=None): return pants class ControllerExtended(wsgi.Controller): @wsgi.extends def index(self, req, resp_obj, pants=None): return None controller = Controller() extended = ControllerExtended() resource = wsgi.Resource(controller) resource.register_extensions(extended) method, extensions = resource.get_method(None, 'index', None, '') self.assertEqual(controller.index, method) self.assertEqual([extended.index], extensions) def test_get_method_action_extensions(self): class Controller(wsgi.Controller): def index(self, req, pants=None): return pants @wsgi.action('fooAction') def _action_foo(self, req, id, body): return body class ControllerExtended(wsgi.Controller): @wsgi.extends(action='fooAction') def _action_foo(self, req, resp_obj, id, body): return None controller = Controller() extended = ControllerExtended() resource = wsgi.Resource(controller) resource.register_extensions(extended) method, extensions = resource.get_method(None, 'action', 'application/json', '{"fooAction": true}') self.assertEqual(controller._action_foo, method) self.assertEqual([extended._action_foo], extensions) def test_get_method_action_whitelist_extensions(self): class Controller(wsgi.Controller): def index(self, req, pants=None): return pants class ControllerExtended(wsgi.Controller): @wsgi.action('create') def _create(self, req, body): pass @wsgi.action('delete') def _delete(self, req, id): pass controller = Controller() extended = ControllerExtended() resource = wsgi.Resource(controller) resource.register_actions(extended) method, extensions = resource.get_method(None, 'create', 'application/json', '{"create": true}') self.assertEqual(extended._create, method) self.assertEqual([], extensions) method, extensions = resource.get_method(None, 'delete', None, None) self.assertEqual(extended._delete, method) self.assertEqual([], extensions) def test_pre_process_extensions_regular(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) called = [] def extension1(req, resp_obj): called.append(1) return None def extension2(req, resp_obj): called.append(2) return None extensions = [extension1, extension2] response, post = resource.pre_process_extensions(extensions, None, {}) self.assertEqual([], called) self.assertIsNone(response) self.assertEqual([extension2, extension1], list(post)) def test_pre_process_extensions_generator(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) called = [] def extension1(req): called.append('pre1') yield called.append('post1') def extension2(req): called.append('pre2') yield called.append('post2') extensions = [extension1, extension2] response, post = resource.pre_process_extensions(extensions, None, {}) post = list(post) self.assertEqual(['pre1', 'pre2'], called) self.assertIsNone(response) self.assertEqual(2, len(post)) self.assertTrue(inspect.isgenerator(post[0])) self.assertTrue(inspect.isgenerator(post[1])) for gen in post: try: gen.send(None) except StopIteration: continue self.assertEqual(['pre1', 'pre2', 'post2', 'post1'], called) def test_pre_process_extensions_generator_response(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) called = [] def extension1(req): called.append('pre1') yield 'foo' def extension2(req): called.append('pre2') extensions = [extension1, extension2] response, post = resource.pre_process_extensions(extensions, None, {}) self.assertEqual(['pre1'], called) self.assertEqual('foo', response) self.assertEqual([], post) def test_post_process_extensions_regular(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) called = [] def extension1(req, resp_obj): called.append(1) return None def extension2(req, resp_obj): called.append(2) return None response = resource.post_process_extensions([extension2, extension1], None, None, {}) self.assertEqual([2, 1], called) self.assertIsNone(response) def test_post_process_extensions_regular_response(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) called = [] def extension1(req, resp_obj): called.append(1) return None def extension2(req, resp_obj): called.append(2) return 'foo' response = resource.post_process_extensions([extension2, extension1], None, None, {}) self.assertEqual([2], called) self.assertEqual('foo', response) def test_post_process_extensions_version_not_found(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) called = [] def extension1(req, resp_obj): called.append(1) return 'bar' def extension2(req, resp_obj): raise exception.VersionNotFoundForAPIMethod(version='fake_version') response = resource.post_process_extensions([extension2, extension1], None, None, {}) self.assertEqual([1], called) self.assertEqual('bar', response) def test_post_process_extensions_generator(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) called = [] def extension1(req): yield called.append(1) def extension2(req): yield called.append(2) ext1 = extension1(None) next(ext1) ext2 = extension2(None) next(ext2) response = resource.post_process_extensions([ext2, ext1], None, None, {}) self.assertEqual([2, 1], called) self.assertIsNone(response) def test_post_process_extensions_generator_response(self): class Controller(object): def index(self, req, pants=None): return pants controller = Controller() resource = wsgi.Resource(controller) called = [] def extension1(req): yield called.append(1) def extension2(req): yield called.append(2) yield 'foo' ext1 = extension1(None) next(ext1) ext2 = extension2(None) next(ext2) response = resource.post_process_extensions([ext2, ext1], None, None, {}) self.assertEqual([2], called) self.assertEqual('foo', response) class ResponseObjectTest(test.TestCase): def test_default_code(self): robj = wsgi.ResponseObject({}) self.assertEqual(HTTPStatus.OK, robj.code) def test_modified_code(self): robj = wsgi.ResponseObject({}) robj._default_code = HTTPStatus.ACCEPTED self.assertEqual(HTTPStatus.ACCEPTED, robj.code) def test_override_default_code(self): robj = wsgi.ResponseObject({}, code=HTTPStatus.NOT_FOUND) self.assertEqual(HTTPStatus.NOT_FOUND, robj.code) def test_override_modified_code(self): robj = wsgi.ResponseObject({}, code=HTTPStatus.NOT_FOUND) robj._default_code = HTTPStatus.ACCEPTED self.assertEqual(HTTPStatus.NOT_FOUND, robj.code) def test_set_header(self): robj = wsgi.ResponseObject({}) robj['Header'] = 'foo' self.assertEqual({'header': 'foo'}, robj.headers) def test_get_header(self): robj = wsgi.ResponseObject({}) robj['Header'] = 'foo' self.assertEqual('foo', robj['hEADER']) def test_del_header(self): robj = wsgi.ResponseObject({}) robj['Header'] = 'foo' del robj['hEADER'] self.assertNotIn('header', robj.headers) def test_header_isolation(self): robj = wsgi.ResponseObject({}) robj['Header'] = 'foo' hdrs = robj.headers hdrs['hEADER'] = 'bar' self.assertEqual('foo', robj['hEADER']) def test_default_serializers(self): robj = wsgi.ResponseObject({}) self.assertEqual({}, robj.serializers) @ddt.data class ValidBodyTest(test.TestCase): def setUp(self): super(ValidBodyTest, self).setUp() self.controller = wsgi.Controller() def test_assert_valid_body(self): body = {'foo': {}} self.controller.assert_valid_body(body, 'foo') def test_assert_valid_body_none(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.assert_valid_body(None, 'foo')) def test_assert_valid_body_empty(self): self.assertRaises(webob.exc.HTTPBadRequest, self.controller.assert_valid_body({}, 'foo')) def test_assert_valid_body_no_entity(self): body = {'bar': {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.assert_valid_body(body, 'foo')) def test_assert_valid_body_malformed_entity(self): body = {'foo': 'bar'} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.assert_valid_body(body, 'foo')) def test_validate_string_length_with_name_too_long(self): name = 'a' * 256 self.assertRaises(webob.exc.HTTPBadRequest, self.controller.validate_string_length, name, 'Name', min_length=1, max_length=255, remove_whitespaces=False) @ddt.data('name', 'display_name', 'description', 'display_description') def test_validate_name_and_description_with_name_too_long(self, attribute): body = {attribute: 'a' * 256} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.validate_name_and_description, body) @ddt.data('name', 'display_name', 'description', 'display_description') def test_validate_name_and_description_with_name_as_int(self, attribute): body = {attribute: 1234} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.validate_name_and_description, body) @ddt.data('name', 'display_name', 'description', 'display_description') def test_validate_name_and_description_with_name_zero_length(self, attribute): # NOTE(jdg): We allow zero length names currently, particularly # from Nova, changes to this require an API version bump body = {attribute: ""} self.controller.validate_name_and_description(body) self.assertEqual('', body[attribute]) @ddt.data('name', 'display_name', 'description', 'display_description') def test_validate_name_and_description_with_name_contains_white_spaces( self, attribute): body = {attribute: 'a' * 255 + " "} self.controller.validate_name_and_description(body) self.assertEqual('a' * 255, body[attribute]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/test_api_validation.py0000664000175000017500000004565500000000000023640 0ustar00zuulzuul00000000000000# Copyright (C) 2017 NTT DATA # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http import re import fixtures from cinder.api.openstack import api_version_request as api_version from cinder.api import validation from cinder.api.validation import parameter_types from cinder import exception from cinder.tests.unit import test class FakeRequest(object): api_version_request = api_version.APIVersionRequest("3.0") environ = {} class ValidationRegex(test.TestCase): def test_build_regex_range(self): def _get_all_chars(): for i in range(0x7F): yield chr(i) self.useFixture(fixtures.MonkeyPatch( 'cinder.api.validation.parameter_types._get_all_chars', _get_all_chars)) r = parameter_types._build_regex_range(ws=False) self.assertEqual(re.escape('!') + '-' + re.escape('~'), r) # if we allow whitespace the range starts earlier r = parameter_types._build_regex_range(ws=True) self.assertEqual(re.escape(' ') + '-' + re.escape('~'), r) # excluding a character will give us 2 ranges r = parameter_types._build_regex_range(ws=True, exclude=['A']) self.assertEqual(re.escape(' ') + '-' + re.escape('@') + 'B' + '-' + re.escape('~'), r) # inverting which gives us all the initial unprintable characters. r = parameter_types._build_regex_range(ws=False, invert=True) self.assertEqual(re.escape('\x00') + '-' + re.escape(' '), r) # excluding characters that create a singleton. Naively this would be: # ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural. r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C']) self.assertEqual(re.escape(' ') + '-' + re.escape('@') + 'B' + 'D' + '-' + re.escape('~'), r) # ws=True means the positive regex has printable whitespaces, # so the inverse will not. The inverse will include things we # exclude. r = parameter_types._build_regex_range( ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True) self.assertEqual(re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ', r) class APIValidationTestCase(test.TestCase): def setUp(self, schema=None): super(APIValidationTestCase, self).setUp() self.post = None if schema is not None: @validation.schema(request_body_schema=schema) def post(req, body): return 'Validation succeeded.' self.post = post def check_validation_error(self, method, body, expected_detail, req=None): if not req: req = FakeRequest() try: method(body=body, req=req,) except exception.ValidationError as ex: self.assertEqual(http.BAD_REQUEST, ex.kwargs['code']) if isinstance(expected_detail, list): self.assertIn(ex.kwargs['detail'], expected_detail, 'Exception details did not match expected') elif not re.match(expected_detail, ex.kwargs['detail']): self.assertEqual(expected_detail, ex.kwargs['detail'], 'Exception details did not match expected') except Exception as ex: self.fail('An unexpected exception happens: %s' % ex) else: self.fail('Any exception did not happen.') class RequiredDisableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, } super(RequiredDisableTestCase, self).setUp(schema=schema) def test_validate_required_disable(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'abc': 1}, req=FakeRequest())) class RequiredEnableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'] } super(RequiredEnableTestCase, self).setUp(schema=schema) def test_validate_required_enable(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) def test_validate_required_enable_fails(self): detail = "'foo' is a required property" self.check_validation_error(self.post, body={'abc': 1}, expected_detail=detail) class AdditionalPropertiesEnableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], } super(AdditionalPropertiesEnableTestCase, self).setUp(schema=schema) def test_validate_additionalProperties_enable(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 1, 'ext': 1}, req=FakeRequest())) class AdditionalPropertiesDisableTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'integer', }, }, 'required': ['foo'], 'additionalProperties': False, } super(AdditionalPropertiesDisableTestCase, self).setUp(schema=schema) def test_validate_additionalProperties_disable(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) def test_validate_additionalProperties_disable_fails(self): detail = "Additional properties are not allowed ('ext' was unexpected)" self.check_validation_error(self.post, body={'foo': 1, 'ext': 1}, expected_detail=detail) class PatternPropertiesTestCase(APIValidationTestCase): def setUp(self): schema = { 'patternProperties': { '^[a-zA-Z0-9]{1,10}$': { 'type': 'string' }, }, 'additionalProperties': False, } super(PatternPropertiesTestCase, self).setUp(schema=schema) def test_validate_patternProperties(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'bar'}, req=FakeRequest())) def test_validate_patternProperties_fails(self): details = [ "Additional properties are not allowed ('__' was unexpected)", "'__' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'" ] self.check_validation_error(self.post, body={'__': 'bar'}, expected_detail=details) details = [ "'' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'", "Additional properties are not allowed ('' was unexpected)" ] self.check_validation_error(self.post, body={'': 'bar'}, expected_detail=details) details = [ ("'0123456789a' does not match any of the regexes: " "'^[a-zA-Z0-9]{1,10}$'"), ("Additional properties are not allowed ('0123456789a' was" " unexpected)") ] self.check_validation_error(self.post, body={'0123456789a': 'bar'}, expected_detail=details) detail = "expected string or bytes-like object" self.check_validation_error(self.post, body={None: 'bar'}, expected_detail=detail) class StringTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', }, }, } super(StringTestCase, self).setUp(schema=schema) def test_validate_string(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'abc'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': ''}, req=FakeRequest())) def test_validate_string_fails(self): detail = ("Invalid input for field/attribute foo. Value: 1." " 1 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.5." " 1.5 is not of type 'string'") self.check_validation_error(self.post, body={'foo': 1.5}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) class StringLengthTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'minLength': 1, 'maxLength': 10, }, }, } super(StringLengthTestCase, self).setUp(schema=schema) def test_validate_string_length(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0123456789'}, req=FakeRequest())) def test_validate_string_length_fails(self): # checks for jsonschema output from 3.2.x and 4.21.x detail = ("Invalid input for field/attribute foo. Value: ." " '' " "(is too short|should be non-empty)") self.check_validation_error(self.post, body={'foo': ''}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0123456789a." " '0123456789a' is too long") self.check_validation_error(self.post, body={'foo': '0123456789a'}, expected_detail=detail) class IntegerTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', }, }, } super(IntegerTestCase, self).setUp(schema=schema) def test_validate_integer(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0123456789'}, req=FakeRequest())) def test_validate_integer_fails(self): detail = ("Invalid input for field/attribute foo. Value: abc." " 'abc' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': 'abc'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: True." " True is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': True}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0xffff." " '0xffff' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '0xffff'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.0." " 1.0 is not of type 'integer', 'string'") self.check_validation_error(self.post, body={'foo': 1.0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 1.0." " '1.0' does not match '^[0-9]+$'") self.check_validation_error(self.post, body={'foo': '1.0'}, expected_detail=detail) class IntegerRangeTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1, 'maximum': 10, }, }, } super(IntegerRangeTestCase, self).setUp(schema=schema) def test_validate_integer_range(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 1}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 10}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) def test_validate_integer_range_fails(self): detail = ("Invalid input for field/attribute foo. Value: 0." " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': 0}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': 11}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 0." " 0(.0)? is less than the minimum of 1") self.check_validation_error(self.post, body={'foo': '0'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 11." " 11(.0)? is greater than the maximum of 10") self.check_validation_error(self.post, body={'foo': '11'}, expected_detail=detail) class BooleanTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': parameter_types.boolean, }, } super(BooleanTestCase, self).setUp(schema=schema) def test_validate_boolean(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': True}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': False}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'True'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'False'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': '0'}, req=FakeRequest())) def test_validate_boolean_fails(self): enum_boolean = ("[True, 'True', 'TRUE', 'true', '1', 'ON', 'On'," " 'on', 'YES', 'Yes', 'yes', 'y', 't'," " False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off'," " 'off', 'NO', 'No', 'no', 'n', 'f']") detail = ("Invalid input for field/attribute foo. Value: bar." " 'bar' is not one of %s") % enum_boolean self.check_validation_error(self.post, body={'foo': 'bar'}, expected_detail=detail) detail = ("Invalid input for field/attribute foo. Value: 2." " '2' is not one of %s") % enum_boolean self.check_validation_error(self.post, body={'foo': '2'}, expected_detail=detail) class NameTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': parameter_types.name, }, } super(NameTestCase, self).setUp(schema=schema) def test_validate_name(self): self.assertEqual('Validation succeeded.', self.post(body={'foo': 'volume.1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'volume 1'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': 'a'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434'}, req=FakeRequest())) self.assertEqual('Validation succeeded.', self.post(body={'foo': u'\u0434\u2006\ufffd'}, req=FakeRequest())) class DatetimeTestCase(APIValidationTestCase): def setUp(self): schema = { 'type': 'object', 'properties': { 'foo': { 'type': 'string', 'format': 'date-time', }, }, } super(DatetimeTestCase, self).setUp(schema=schema) def test_validate_datetime(self): self.assertEqual('Validation succeeded.', self.post(body={ 'foo': '2017-01-14T01:00:00Z'}, req=FakeRequest() )) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/test_common.py0000664000175000017500000006213400000000000022134 0ustar00zuulzuul00000000000000# encoding:utf-8 # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test suites for 'common' code used throughout the OpenStack HTTP API.""" from unittest import mock import ddt from oslo_config import cfg from testtools import matchers import webob import webob.exc from cinder.api import common from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants from cinder.tests.unit import test NS = "{http://docs.openstack.org/compute/api/v1.1}" ATOMNS = "{http://www.w3.org/2005/Atom}" CONF = cfg.CONF TINY = list(range(1)) SMALL = list(range(10)) MEDIUM = list(range(1000)) LARGE = list(range(10000)) ITEMS = list(range(2000)) @ddt.ddt class LimiterTest(test.TestCase): """Unit tests for the `cinder.api.common.limited` method. This method takes in a list of items and, depending on the 'offset' and 'limit' GET params, returns a subset or complete set of the given items. """ @ddt.data('/?offset=', '/?offset=123456789012346456', u'/?offset=\u0020aa', '/?offset=-30', u'/?limit=hello', '/?limit=-3000', '/?offset=30034522235674530&limit=10') def test_limiter_bad_offset_or_limit_values(self, value): """Test limiter with bad offset or limit values This test includes next test cases: 1) Offset key works with a blank offset; 2) Offset key works with an offset out of range; 3) Offset key works with a BAD offset; 4) Offset value is negative; 5) Limit value is bad; 6) Limit value is negative value. 7) With both offset and limit; """ req = webob.Request.blank(value) self.assertRaises( webob.exc.HTTPBadRequest, common.limited, SMALL, req) @ddt.data( ({'req': '/?offset=0', 'values': ((TINY, TINY), (SMALL, SMALL), (MEDIUM, MEDIUM), (LARGE[:1000], LARGE))}), ({'req': '/?offset=10', 'values': (([], TINY), (SMALL[10:], SMALL), (MEDIUM[10:], MEDIUM), (LARGE[10:1010], LARGE))}), ({'req': '/?offset=1001', 'values': (([], TINY), ([], SMALL), ([], MEDIUM), (LARGE[1001:2001], LARGE))}), ({'req': '/', 'values': ((TINY, TINY), (SMALL, SMALL), (MEDIUM, MEDIUM), (LARGE[:1000], LARGE))}), ({'req': '/?limit=0', 'values': ((TINY, TINY), (SMALL, SMALL), (MEDIUM, MEDIUM), (LARGE[:1000], LARGE))}), ({'req': '/?limit=10', 'values': ((TINY, TINY), (SMALL, SMALL), (MEDIUM[:10], MEDIUM), (LARGE[:10], LARGE))}), ({'req': '/?limit=3000', 'values': ((TINY, TINY), (SMALL, SMALL), (MEDIUM, MEDIUM), (LARGE[:1000], LARGE))})) @ddt.unpack def test_limiter(self, req, values): """Test limited method with different input parameters. This test includes next test cases: 1) Test offset key works with 0; 2) Test offset key works with a medium sized number; 3) Test offset key works with a number over 1000 (max_limit); 4) Test request with no offset or limit; 5) Test limit of zero; 6) Test limit of 10; 7) Test limit of 3000; """ req = webob.Request.blank(req) for expected, value, in values: self.assertEqual(expected, common.limited(value, req)) @ddt.data(('/?offset=1&limit=3', 1, 4), ('/?offset=3&limit=0', 3, 1003), ('/?offset=3&limit=1500', 3, 1003), ('/?offset=3000&limit=10', 0, 0), ('/?offset=1&limit=3', 1, 4, 2000), ('/?offset=3&limit=0', 3, None, 2000), ('/?offset=3&limit=2500', 3, None, 2000), ('/?offset=3000&limit=10', 0, 0, 2000)) @ddt.unpack def test_limiter_with_offset_limit_max_limit(self, req, slice_start, slice_end, max_limit=None): """Test with both parameters offset and limit and custom max_limit.""" # NOTE(mdovgal): using 0 as slice_start and slice_end we will # get empty list as a result # [3:None] equal to [3:] req = webob.Request.blank(req) self.assertEqual(ITEMS[slice_start:slice_end], common.limited(ITEMS, req, max_limit=max_limit)) class PaginationParamsTest(test.TestCase): """Unit tests for `cinder.api.common.get_pagination_params` method. This method takes in a request object and returns 'marker' and 'limit' GET params. """ def test_nonnumerical_limit(self): """Test nonnumerical limit param.""" req = webob.Request.blank('/?limit=hello') self.assertRaises( webob.exc.HTTPBadRequest, common.get_pagination_params, req.GET.copy()) @mock.patch.object(common, 'CONF') def test_no_params(self, mock_cfg): """Test no params.""" mock_cfg.osapi_max_limit = 100 req = webob.Request.blank('/') expected = (None, 100, 0) self.assertEqual(expected, common.get_pagination_params(req.GET.copy())) def test_valid_marker(self): """Test valid marker param.""" marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' req = webob.Request.blank('/?marker=' + marker) expected = (marker, CONF.osapi_max_limit, 0) self.assertEqual(expected, common.get_pagination_params(req.GET.copy())) def test_valid_limit(self): """Test valid limit param.""" req = webob.Request.blank('/?limit=10') expected = (None, 10, 0) self.assertEqual(expected, common.get_pagination_params(req.GET.copy())) def test_invalid_limit(self): """Test invalid limit param.""" req = webob.Request.blank('/?limit=-2') self.assertRaises( webob.exc.HTTPBadRequest, common.get_pagination_params, req.GET.copy()) def test_valid_limit_and_marker(self): """Test valid limit and marker parameters.""" marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2' req = webob.Request.blank('/?limit=20&marker=%s' % marker) expected = (marker, 20, 0) self.assertEqual(expected, common.get_pagination_params(req.GET.copy())) @ddt.ddt class SortParamUtilsTest(test.TestCase): @ddt.data(({'params': {}}, ['created_at'], ['desc']), ({'params': {}, 'default_key': 'key1', 'default_dir': 'dir1'}, ['key1'], ['dir1']), ({'params': {'sort': 'key1:dir1'}}, ['key1'], ['dir1']), ({'params': {'sort_key': 'key1', 'sort_dir': 'dir1'}}, ['key1'], ['dir1']), ({'params': {'sort': 'key1'}}, ['key1'], ['desc']), ({'params': {'sort': 'key1:dir1,key2:dir2,key3:dir3'}}, ['key1', 'key2', 'key3'], ['dir1', 'dir2', 'dir3']), ({'params': {'sort': 'key1:dir1,key2,key3:dir3'}}, ['key1', 'key2', 'key3'], ['dir1', 'desc', 'dir3']), ({'params': {'sort': 'key1:dir1,key2,key3'}, 'default_dir': 'foo'}, ['key1', 'key2', 'key3'], ['dir1', 'foo', 'foo']), ({'params': {'sort': ' key1 : dir1,key2: dir2 , key3 '}}, ['key1', 'key2', 'key3'], ['dir1', 'dir2', 'desc'])) @ddt.unpack def test_get_sort_params(self, parameters, expected_keys, expected_dirs): """Test for get sort parameters method This test includes next test cases: 1) Verifies the default sort key and direction. 2) Verifies that the defaults can be overridden. 3) Verifies a single sort key and direction. 4) Verifies a single sort key and direction. 5) Verifies a single sort value with a default direction. 6) Verifies multiple sort parameter values. 7) Verifies multiple sort keys without all directions. 8) Verifies multiple sort keys and overriding default direction. 9) Verifies that leading and trailing spaces are removed. """ sort_keys, sort_dirs = common.get_sort_params(**parameters) self.assertEqual(expected_keys, sort_keys) self.assertEqual(expected_dirs, sort_dirs) def test_get_sort_params_params_modified(self): """Verifies that the input sort parameter are modified.""" params = {'sort': 'key1:dir1,key2:dir2,key3:dir3'} common.get_sort_params(params) self.assertEqual({}, params) params = {'sort_key': 'key1', 'sort_dir': 'dir1'} common.get_sort_params(params) self.assertEqual({}, params) def test_get_params_mix_sort_and_old_params(self): """An exception is raised if both types of sorting params are given.""" for params in ({'sort': 'k1', 'sort_key': 'k1'}, {'sort': 'k1', 'sort_dir': 'd1'}, {'sort': 'k1', 'sort_key': 'k1', 'sort_dir': 'd2'}): self.assertRaises(webob.exc.HTTPBadRequest, common.get_sort_params, params) @ddt.ddt class MiscFunctionsTest(test.TestCase): @ddt.data(('http://cinder.example.com/v1/images', 'http://cinder.example.com/images'), ('http://cinder.example.com/v1.1/images', 'http://cinder.example.com/images'), ('http://cinder.example.com/v1.1/', 'http://cinder.example.com/'), ('http://cinder.example.com/v10.10', 'http://cinder.example.com'), ('http://cinder.example.com/v1.1/images/v10.5', 'http://cinder.example.com/images/v10.5'), ('http://cinder.example.com/cinder/v2', 'http://cinder.example.com/cinder'), ('http://cinder.example.com/volume/v2/123', 'http://cinder.example.com/volume/123')) @ddt.unpack def test_remove_version_from_href(self, fixture, expected): """Test for removing version from href This test conatins following test-cases: 1) remove major version from href 2-5) remove version from href 6) remove version from href version not trailing domain """ actual = common.remove_version_from_href(fixture) self.assertEqual(expected, actual) @ddt.data('http://cinder.example.com/1.1/images', 'http://cinder.example.com/v/images', 'http://cinder.example.com/v1.1images') def test_remove_version_from_href_bad_request(self, fixture): self.assertRaises(ValueError, common.remove_version_from_href, fixture) @ddt.ddt class TestCollectionLinks(test.TestCase): """Tests the _get_collection_links method.""" def _validate_next_link(self, item_count, osapi_max_limit, limit, should_link_exist): req = webob.Request.blank('/?limit=%s' % limit if limit else '/') link_return = [{"rel": "next", "href": "fake_link"}] self.flags(osapi_max_limit=osapi_max_limit) if limit is None: limited_list_size = min(item_count, osapi_max_limit) else: limited_list_size = min(item_count, osapi_max_limit, limit) limited_list = [{"uuid": str(i)} for i in range(limited_list_size)] builder = common.ViewBuilder() def get_pagination_params(params, max_limit=CONF.osapi_max_limit, original_call=common.get_pagination_params): return original_call(params, max_limit) def _get_limit_param(params, max_limit=CONF.osapi_max_limit, original_call=common._get_limit_param): return original_call(params, max_limit) with mock.patch.object(common, 'get_pagination_params', get_pagination_params), \ mock.patch.object(common, '_get_limit_param', _get_limit_param), \ mock.patch.object(common.ViewBuilder, '_generate_next_link', return_value=link_return) as href_link_mock: results = builder._get_collection_links(req, limited_list, mock.sentinel.coll_key, item_count, "uuid") if should_link_exist: href_link_mock.assert_called_once_with(limited_list, "uuid", req, mock.sentinel.coll_key) self.assertThat(results, matchers.HasLength(1)) else: self.assertFalse(href_link_mock.called) self.assertThat(results, matchers.HasLength(0)) @ddt.data((5, 5, True), (5, 5, True, 4), (5, 5, True, 5), (5, 5, True, 6), (5, 7, False), (5, 7, True, 4), (5, 7, True, 5), (5, 7, False, 6), (5, 7, False, 7), (5, 7, False, 8), (5, 3, True), (5, 3, True, 2), (5, 3, True, 3), (5, 3, True, 4), (5, 3, True, 5), (5, 3, True, 6)) @ddt.unpack def test_items(self, item_count, osapi_max_limit, should_link_exist, limit=None): """Test 1) Items count equals osapi_max_limit without limit; 2) Items count equals osapi_max_limit and greater than limit; 3) Items count equals osapi_max_limit and equals limit; 4) Items count equals osapi_max_limit and less than limit; 5) Items count less than osapi_max_limit without limit; 6) Limit less than items count and less than osapi_max_limit; 7) Limit equals items count and less than osapi_max_limit; 8) Items count less than limit and less than osapi_max_limit; 9) Items count less than osapi_max_limit and equals limit; 10) Items count less than osapi_max_limit and less than limit; 11) Items count greater than osapi_max_limit without limit; 12) Limit less than items count and greater than osapi_max_limit; 13) Items count greater than osapi_max_limit and equals limit; 14) Items count greater than limit and greater than osapi_max_limit; 15) Items count equals limit and greater than osapi_max_limit; 16) Limit greater than items count and greater than osapi_max_limit; """ self._validate_next_link(item_count, osapi_max_limit, limit, should_link_exist) @ddt.data( { # The project_id in the context matches the one in the v3 URL. 'project_id': fake_constants.PROJECT_ID, 'url': '/v3/%s/something' % fake_constants.PROJECT_ID, 'expected': fake_constants.PROJECT_ID, }, { # The project_id in the context does NOT match the one in the URL. 'project_id': fake_constants.PROJECT2_ID, 'url': '/v3/%s/something' % fake_constants.PROJECT_ID, 'expected': '', }, { # The context does not include a project_id (it's system scoped). 'project_id': None, 'url': '/v3/%s/something' % fake_constants.PROJECT_ID, 'expected': '', }, { # The v3 URL does not contain a project ID. 'project_id': fake_constants.PROJECT_ID, 'url': '/v3/something', 'expected': '', }, { # The URL doesn't specify v3. 'project_id': fake_constants.PROJECT_ID, 'url': '/vX/%s/something' % fake_constants.PROJECT_ID, 'expected': '', }, ) @ddt.unpack def test_project_id_in_url(self, project_id, url, expected): req = fakes.HTTPRequest.blank(url) req.environ['cinder.context'].project_id = project_id actual = common.ViewBuilder()._get_project_id_in_url(req) self.assertEqual(expected, actual) @ddt.ddt class GeneralFiltersTest(test.TestCase): @ddt.data({'filters': {'volume': ['key1', 'key2']}, 'resource': 'volume', 'expected': {'volume': ['key1', 'key2']}}, {'filters': {'volume': ['key1', 'key2']}, 'resource': 'snapshot', 'expected': {}}, {'filters': {'volume': ['key1', 'key2']}, 'resource': None, 'expected': {'volume': ['key1', 'key2']}}) @ddt.unpack def test_get_enabled_resource_filters(self, filters, resource, expected): with mock.patch('cinder.api.common._FILTERS_COLLECTION', filters): result = common.get_enabled_resource_filters(resource) self.assertEqual(expected, result) @ddt.data({'filters': {'key1': 'value1'}, 'is_admin': False, 'result': {'fake_resource': ['key1']}, 'expected': {'key1': 'value1'}, 'resource': 'fake_resource'}, {'filters': {'key1': 'value1', 'key2': 'value2'}, 'is_admin': False, 'result': {'fake_resource': ['key1']}, 'expected': None, 'resource': 'fake_resource'}, {'filters': {'key1': 'value1', 'all_tenants': 'value2', 'key3': 'value3'}, 'is_admin': True, 'result': {'fake_resource': []}, 'expected': {'key1': 'value1', 'all_tenants': 'value2', 'key3': 'value3'}, 'resource': 'fake_resource'}, {'filters': {'key1': 'value1', 'all_tenants': 'value2', 'key3': 'value3'}, 'is_admin': True, 'result': {'pool': []}, 'expected': None, 'resource': 'pool'}, {'filters': {'中文': 'value1'}, 'is_admin': True, 'result': None, 'expected': None, 'resource': None}, {'filters': {'中文': 'value1'}, 'is_admin': False, 'result': {'fake_resource': []}, 'expected': None, 'resource': 'fake_resource'}, {'filters': {'is_public': True}, 'is_admin': False, 'result': {'volume_type': ["is_public"]}, 'expected': {'is_public': True}, 'resource': 'volume_type'}, {'filters': {'key1': 'value1', 'all_tenants': 'value2', 'key3': 'value3'}, 'is_admin': False, 'result': {'fake_resource': ['key1', 'key3']}, 'expected': {'key1': 'value1', 'key3': 'value3'}, 'resource': 'fake_resource'}) @ddt.unpack @mock.patch('cinder.api.common.get_enabled_resource_filters') def test_reject_invalid_filters(self, mock_get, filters, is_admin, result, expected, resource): class FakeContext(object): def __init__(self, admin): self.is_admin = admin fake_context = FakeContext(is_admin) mock_get.return_value = result if expected: common.reject_invalid_filters(fake_context, filters, resource) self.assertEqual(expected, filters) else: self.assertRaises( webob.exc.HTTPBadRequest, common.reject_invalid_filters, fake_context, filters, resource) @ddt.data({'filters': {'name': 'value1'}, 'is_admin': False, 'result': {'fake_resource': ['name']}, 'expected': {'name': 'value1'}}, {'filters': {'name~': 'value1'}, 'is_admin': False, 'result': {'fake_resource': ['name']}, 'expected': None}, {'filters': {'name': 'value1'}, 'is_admin': False, 'result': {'fake_resource': ['name~']}, 'expected': {'name': 'value1'}}, {'filters': {'name~': 'value1'}, 'is_admin': False, 'result': {'fake_resource': ['name~']}, 'expected': {'name~': 'value1'}} ) @ddt.unpack @mock.patch('cinder.api.common.get_enabled_resource_filters') def test_reject_invalid_filters_like_operator_enabled( self, mock_get, filters, is_admin, result, expected): class FakeContext(object): def __init__(self, admin): self.is_admin = admin fake_context = FakeContext(is_admin) mock_get.return_value = result if expected: common.reject_invalid_filters(fake_context, filters, 'fake_resource', True) self.assertEqual(expected, filters) else: self.assertRaises( webob.exc.HTTPBadRequest, common.reject_invalid_filters, fake_context, filters, 'fake_resource') @ddt.data({'resource': 'volume', 'expected': ["name", "status", "metadata", "bootable", "migration_status", "availability_zone", "group_id", "size", "created_at", "updated_at", "consumes_quota"]}, {'resource': 'backup', 'expected': ["name", "status", "volume_id"]}, {'resource': 'snapshot', 'expected': ["name", "status", "volume_id", "metadata", "availability_zone", "consumes_quota"]}, {'resource': 'group_snapshot', 'expected': ["name", "status", "group_id"]}, {'resource': 'attachment', 'expected': ["volume_id", "status", "instance_id", "attach_status"]}, {'resource': 'message', 'expected': ["resource_uuid", "resource_type", "event_id", "request_id", "message_level"]}, {'resource': 'pool', 'expected': ["name", "volume_type"]}) @ddt.unpack def test_filter_keys_exists(self, resource, expected): result = common.get_enabled_resource_filters(resource) self.assertEqual(expected, result[resource]) @ddt.data({'resource': 'group', 'filters': {'name~': 'value'}, 'expected': {'name~': 'value'}}, {'resource': 'snapshot', 'filters': {'status~': 'value'}, 'expected': {'status~': 'value'}}, {'resource': 'volume', 'filters': {'name~': 'value', 'description~': 'value'}, 'expected': {'display_name~': 'value', 'display_description~': 'value'}}, {'resource': 'backup', 'filters': {'name~': 'value', 'description~': 'value'}, 'expected': {'display_name~': 'value', 'display_description~': 'value'}}, ) @ddt.unpack def test_convert_filter_attributes(self, resource, filters, expected): common.convert_filter_attributes(filters, resource) self.assertEqual(expected, filters) @ddt.ddt class LinkPrefixTest(test.TestCase): @ddt.data((["http://192.168.0.243:24/", "http://127.0.0.1/volume"], "http://127.0.0.1/volume"), (["http://foo.x.com/v1", "http://new.prefix.com"], "http://new.prefix.com/v1"), (["http://foo.x.com/v1", "http://new.prefix.com:20455/new_extra_prefix"], "http://new.prefix.com:20455/new_extra_prefix/v1")) @ddt.unpack def test_update_link_prefix(self, update_args, expected): vb = common.ViewBuilder() result = vb._update_link_prefix(*update_args) self.assertEqual(expected, result) class RequestUrlTest(test.TestCase): def test_get_request_url_no_forward(self): app_url = 'http://127.0.0.1/v2;param?key=value#frag' request = type('', (), { 'application_url': app_url, 'headers': {} }) result = common.get_request_url(request) self.assertEqual(app_url, result) def test_get_request_url_forward(self): request = type('', (), { 'application_url': 'http://127.0.0.1/v2;param?key=value#frag', 'headers': {'X-Forwarded-Host': '192.168.0.243:24'} }) result = common.get_request_url(request) self.assertEqual('http://192.168.0.243:24/v2;param?key=value#frag', result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/test_versions.py0000664000175000017500000004712100000000000022513 0ustar00zuulzuul00000000000000# Copyright 2015 Clinton Knight # Copyright 2022 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus import inspect import re import ddt from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import encodeutils import webob from cinder.api import microversions from cinder.api.openstack import api_version_request from cinder.api.openstack import wsgi from cinder.api.v3 import router from cinder.api import versions from cinder import exception from cinder.tests.unit.api import fakes from cinder.tests.unit import test CONF = cfg.CONF VERSION_HEADER_NAME = 'OpenStack-API-Version' VOLUME_SERVICE = 'volume ' @ddt.ddt class VersionsControllerTestCase(test.TestCase): def setUp(self): super(VersionsControllerTestCase, self).setUp() self.wsgi_apps = (versions.Versions(), router.APIRouter()) def build_request(self, base_dir=None, base_url='http://localhost/v3', header_version=None): if base_dir: req = fakes.HTTPRequest.blank(base_dir, base_url=base_url) else: req = fakes.HTTPRequest.blank('/', base_url=base_url) req.method = 'GET' req.content_type = 'application/json' if header_version: req.headers = {VERSION_HEADER_NAME: VOLUME_SERVICE + header_version} return req def check_response(self, response, version): self.assertEqual(VOLUME_SERVICE + version, response.headers[VERSION_HEADER_NAME]) self.assertEqual(VERSION_HEADER_NAME, response.headers['Vary']) def test_versions_root(self): req = self.build_request(base_url='http://localhost') response = req.get_response(versions.Versions()) self.assertEqual(HTTPStatus.MULTIPLE_CHOICES, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual(1, len(ids)) self.assertIn('v3.0', ids) v3 = [v for v in version_list if v['id'] == 'v3.0'][0] self.assertEqual(api_version_request._MAX_API_VERSION, v3.get('version')) self.assertEqual(api_version_request._MIN_API_VERSION, v3.get('min_version')) def test_versions(self): version = '3.0' req = self.build_request( base_url='http://localhost/v{}'.format(version[0]), header_version=version) response = req.get_response(router.APIRouter()) self.assertEqual(HTTPStatus.OK, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual(1, len(ids)) self.assertIn('v{}'.format(version), ids) self.check_response(response, version) self.assertEqual(api_version_request._MAX_API_VERSION, version_list[0].get('version')) self.assertEqual(api_version_request._MIN_API_VERSION, version_list[0].get('min_version')) def test_versions_version_latest(self): req = self.build_request(header_version='latest') response = req.get_response(router.APIRouter()) self.assertEqual(HTTPStatus.OK, response.status_int) self.check_response(response, api_version_request._MAX_API_VERSION) def test_versions_version_invalid(self): req = self.build_request(header_version='2.0.1') for app in self.wsgi_apps: response = req.get_response(app) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) def test_versions_response_fault(self): version = '3.0' req = self.build_request(header_version=version) req.api_version_request = ( api_version_request.APIVersionRequest(version)) app = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?')) response = req.get_response(app) self.assertEqual(HTTPStatus.BAD_REQUEST, response.status_int) self.check_response(response, '3.0') def test_versions_inheritance_internals_of_non_base_controller(self): """Test ControllerMetaclass works inheriting from non base class.""" def _get_str_version(version): return "%s.%s" % (version._ver_major, version._ver_minor) def assert_method_equal(expected, observed): self.assertEqual(expected, observed) class ControllerParent(wsgi.Controller): @wsgi.Controller.api_version('3.0') def index(self, req): pass # We create this class in between to confirm that we don't leave # undesired versioned methods in the wsgi.Controller class. class Controller(wsgi.Controller): @wsgi.Controller.api_version('2.0') def index(self, req): pass class ControllerChild(ControllerParent): @wsgi.Controller.api_version('3.1') def index(self, req): pass @wsgi.Controller.api_version('3.2') def new_method(self, req): pass # ControllerParent will only have its own index method self.assertSetEqual({'index'}, set(ControllerParent.versioned_methods)) self.assertEqual(1, len(ControllerParent.versioned_methods['index'])) index = ControllerParent.versioned_methods['index'][0] assert_method_equal(ControllerParent.index, index.func) self.assertEqual('index', index.name) self.assertEqual('3.0', _get_str_version(index.start_version)) self.assertEqual('None.None', _get_str_version(index.end_version)) # Same thing will happen with the Controller class, thus confirming # that we don't cross pollinate our classes with undesired methods. self.assertSetEqual({'index'}, set(Controller.versioned_methods)) self.assertEqual(1, len(Controller.versioned_methods['index'])) index = Controller.versioned_methods['index'][0] assert_method_equal(Controller.index, index.func) self.assertEqual('index', index.name) self.assertEqual('2.0', _get_str_version(index.start_version)) self.assertEqual('None.None', _get_str_version(index.end_version)) # ControllerChild will inherit index method from ControllerParent and # add its own version as well as add a new method self.assertSetEqual({'index', 'new_method'}, set(ControllerChild.versioned_methods)) self.assertEqual(2, len(ControllerChild.versioned_methods['index'])) # The methods are ordered from newest version to oldest version index = ControllerChild.versioned_methods['index'][0] assert_method_equal(ControllerChild.index, index.func) self.assertEqual('index', index.name) self.assertEqual('3.1', _get_str_version(index.start_version)) self.assertEqual('None.None', _get_str_version(index.end_version)) index = ControllerChild.versioned_methods['index'][1] assert_method_equal(ControllerParent.index, index.func) self.assertEqual('index', index.name) self.assertEqual('3.0', _get_str_version(index.start_version)) self.assertEqual('None.None', _get_str_version(index.end_version)) # New method also gets added even if it didn't exist in any of the base # classes. self.assertEqual(1, len(ControllerChild.versioned_methods['new_method'])) new_method = ControllerChild.versioned_methods['new_method'][0] assert_method_equal(ControllerChild.new_method, new_method.func) self.assertEqual('new_method', new_method.name) self.assertEqual('3.2', _get_str_version(new_method.start_version)) self.assertEqual('None.None', _get_str_version(new_method.end_version)) @ddt.data( ('2.0', 'index', HTTPStatus.NOT_ACCEPTABLE, 'ControllerParent'), ('2.0', 'show', HTTPStatus.NOT_ACCEPTABLE, 'ControllerParent'), ('3.0', 'index', HTTPStatus.NOT_FOUND, 'ControllerParent'), ('3.0', 'show', HTTPStatus.NOT_FOUND, 'ControllerParent'), ('3.1', 'index', 'parent', 'ControllerParent'), ('3.1', 'show', HTTPStatus.NOT_FOUND, 'ControllerParent'), ('3.2', 'index', 'parent', 'ControllerParent'), ('3.2', 'show', HTTPStatus.NOT_FOUND, 'ControllerParent'), ('2.0', 'index', HTTPStatus.NOT_ACCEPTABLE, 'Controller'), ('2.0', 'show', HTTPStatus.NOT_ACCEPTABLE, 'Controller'), ('3.0', 'index', HTTPStatus.NOT_FOUND, 'Controller'), ('3.0', 'show', HTTPStatus.NOT_FOUND, 'Controller'), ('3.1', 'index', 'single', 'Controller'), ('3.1', 'show', HTTPStatus.NOT_FOUND, 'Controller'), ('3.2', 'index', 'single', 'Controller'), ('3.2', 'show', HTTPStatus.NOT_FOUND, 'Controller'), ('2.0', 'index', HTTPStatus.NOT_ACCEPTABLE, 'ControllerChild'), ('2.0', 'show', HTTPStatus.NOT_ACCEPTABLE, 'ControllerChild'), ('3.0', 'index', HTTPStatus.NOT_FOUND, 'ControllerChild'), ('3.0', 'show', HTTPStatus.NOT_FOUND, 'ControllerChild'), ('3.1', 'index', 'parent', 'ControllerChild'), ('3.1', 'show', HTTPStatus.NOT_FOUND, 'ControllerChild'), ('3.2', 'index', 'child 3.2', 'ControllerChild'), ('3.2', 'show', HTTPStatus.NOT_FOUND, 'ControllerChild'), ('3.3', 'index', 'child 3.3', 'ControllerChild'), ('3.3', 'show', 'show', 'ControllerChild'), ('3.4', 'index', 'child 3.4', 'ControllerChild')) @ddt.unpack def test_versions_inheritance_of_non_base_controller(self, version, call, expected, controller): """Test ControllerMetaclass works inheriting from non base class.""" class ControllerParent(wsgi.Controller): @wsgi.Controller.api_version('3.1') def index(self, req): return 'parent' # We create this class in between to confirm that we don't leave # undesired versioned methods in the wsgi.Controller class. class Controller(wsgi.Controller): @wsgi.Controller.api_version('3.1') def index(self, req): return 'single' class ControllerChild(ControllerParent): # We don't add max version to confirm that once we set a newer # version it doesn't really matter because the newest one will be # called. @wsgi.Controller.api_version('3.2') def index(self, req): return 'child 3.2' @index.api_version('3.3') def index(self, req): return 'child 3.3' @index.api_version('3.4') def index(self, req): return 'child 3.4' @wsgi.Controller.api_version('3.3') def show(self, req, *args, **kwargs): return 'show' base_dir = '/tests' if call == 'index' else '/tests/123' req = self.build_request(base_dir=base_dir, header_version=version) app = fakes.TestRouter(locals()[controller]()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') if isinstance(expected, str): self.assertEqual(HTTPStatus.OK, response.status_int) self.assertEqual(expected, resp) else: self.assertEqual(expected, response.status_int) def test_versions_version_not_found(self): api_version_request_4_0 = api_version_request.APIVersionRequest('4.0') self.mock_object(api_version_request, 'max_api_version', return_value=api_version_request_4_0) class Controller(wsgi.Controller): @wsgi.Controller.api_version('3.0', '3.0') def index(self, req): return 'off' req = self.build_request(header_version='3.5') app = fakes.TestRouter(Controller()) response = req.get_response(app) self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_versions_version_not_acceptable(self): req = self.build_request(header_version='4.0') response = req.get_response(router.APIRouter()) self.assertEqual(HTTPStatus.NOT_ACCEPTABLE, response.status_int) @ddt.data(['volume 3.0, compute 2.22', True], ['volume 3.0, compute 2.22, identity 2.3', True], ['compute 2.22, identity 2.3', False]) @ddt.unpack def test_versions_multiple_services_header( self, service_list, should_pass): req = self.build_request() req.headers = {VERSION_HEADER_NAME: service_list} try: response = req.get_response(router.APIRouter()) except exception.VersionNotFoundForAPIMethod: if should_pass: raise elif not should_pass: return self.assertEqual(HTTPStatus.OK, response.status_int) body = jsonutils.loads(response.body) version_list = body['versions'] ids = [v['id'] for v in version_list] self.assertEqual({'v3.0'}, set(ids)) self.check_response(response, '3.0') self.assertEqual(api_version_request._MAX_API_VERSION, version_list[0].get('version')) self.assertEqual(api_version_request._MIN_API_VERSION, version_list[0].get('min_version')) @ddt.data(['3.5', HTTPStatus.OK], ['3.55', HTTPStatus.NOT_FOUND]) @ddt.unpack def test_req_version_matches(self, version, HTTP_ret): version_request = api_version_request.APIVersionRequest(version) self.mock_object(api_version_request, 'max_api_version', return_value=version_request) class Controller(wsgi.Controller): @wsgi.Controller.api_version('3.0', '3.6') def index(self, req): return 'off' req = self.build_request(base_dir='/tests', header_version=version) app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') if HTTP_ret == HTTPStatus.OK: self.assertEqual('off', resp) elif HTTP_ret == HTTPStatus.NOT_FOUND: self.assertNotEqual('off', resp) self.assertEqual(HTTP_ret, response.status_int) @ddt.data(['3.5', 'older'], ['3.37', 'newer']) @ddt.unpack def test_req_version_matches_with_if(self, version, ret_val): version_request = api_version_request.APIVersionRequest(version) self.mock_object(api_version_request, 'max_api_version', return_value=version_request) class Controller(wsgi.Controller): def index(self, req): req_version = req.api_version_request if req_version.matches('3.1', '3.8'): return 'older' if req_version.matches('3.9', '8.8'): return 'newer' req = self.build_request(base_dir='/tests', header_version=version) app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') self.assertEqual(ret_val, resp) self.assertEqual(HTTPStatus.OK, response.status_int) @ddt.data(['3.5', 'older'], ['3.37', 'newer']) @ddt.unpack def test_req_version_matches_with_None(self, version, ret_val): version_request = api_version_request.APIVersionRequest(version) self.mock_object(api_version_request, 'max_api_version', return_value=version_request) class Controller(wsgi.Controller): def index(self, req): req_version = req.api_version_request if req_version.matches(None, '3.8'): return 'older' if req_version.matches('3.9', None): return 'newer' req = self.build_request(base_dir='/tests', header_version=version) app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') self.assertEqual(ret_val, resp) self.assertEqual(HTTPStatus.OK, response.status_int) def test_req_version_matches_with_None_None(self): version_request = api_version_request.APIVersionRequest('3.39') self.mock_object(api_version_request, 'max_api_version', return_value=version_request) class Controller(wsgi.Controller): def index(self, req): req_version = req.api_version_request # This case is artificial, and will return True if req_version.matches(None, None): return "Pass" req = self.build_request(base_dir='/tests', header_version='3.39') app = fakes.TestRouter(Controller()) response = req.get_response(app) resp = encodeutils.safe_decode(response.body, incoming='utf-8') self.assertEqual("Pass", resp) self.assertEqual(HTTPStatus.OK, response.status_int) class MicroversionsTest(test.TestCase): VERSION_NAME_RE = r'^[A-Z][A-Z0-9_]+$' def test_max_microversion_present(self): # This test ensures that a developer adds a microversions.py # entry for a new api_version_request._MAX_API_VERSION microversion. a = vars(microversions) versions_present = list(v for v in a if re.match(self.VERSION_NAME_RE, v)) max_mv = api_version_request._MAX_API_VERSION count = len(list(v for v in versions_present if getattr(microversions, v) == max_mv)) self.assertEqual( 1, count, max_mv + ' should be present once in microversions.py') def test_microversion_consistency(self): # This test ensures that the microversions defined are # sequential and not duplicated. MAJOR_VERSION = '3' self.assertEqual(microversions.BASE_VERSION, MAJOR_VERSION + '.0') VERSION_VALUE_RE = r'^[' + MAJOR_VERSION + r']\.[0-9]+$' members = inspect.getmembers(microversions) members = [m for m in members if re.match(self.VERSION_NAME_RE, m[0]) and re.match(VERSION_VALUE_RE, m[1])] max_microversion = max(members, key=lambda item: int(item[1].split('.')[1])) # Ensure that the list of versions is the expected length based on # the highest microversion present. (max + 1) max_revision = int(max_microversion[1].split('.')[1]) num_versions = max_revision + 1 self.assertEqual(len(members), num_versions) ver_dict = {} for m in members: ver_dict[m[1]] = m[0] self.assertEqual(len(ver_dict), num_versions) for v in range(0, max_revision): # Ensure no microversions are skipped self.assertIn(MAJOR_VERSION + '.' + str(v), ver_dict) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1831193 cinder-27.0.0/cinder/tests/unit/api/v2/0000775000175000017500000000000000000000000017554 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v2/__init__.py0000664000175000017500000000000000000000000021653 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v2/fakes.py0000664000175000017500000002550400000000000021225 0ustar00zuulzuul00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 from cinder import exception as exc from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder import utils DEFAULT_VOL_NAME = "displayname" DEFAULT_VOL_DESCRIPTION = "displaydesc" DEFAULT_VOL_SIZE = 1 DEFAULT_VOL_TYPE = "vol_type_name" DEFAULT_VOL_STATUS = "fakestatus" DEFAULT_VOL_ID = fake.VOLUME_ID # TODO(vbala): api.v1 tests use hard-coded "fakeaz" for verifying # post-conditions. Update value to "zone1:host1" once we remove # api.v1 tests and use it in api.v2 tests. DEFAULT_AZ = "fakeaz" def create_fake_volume(id, **kwargs): volume = { 'id': id, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'fakehost', 'size': DEFAULT_VOL_SIZE, 'availability_zone': DEFAULT_AZ, 'status': DEFAULT_VOL_STATUS, 'migration_status': None, 'attach_status': fields.VolumeAttachStatus.ATTACHED, 'name': 'vol name', 'display_name': DEFAULT_VOL_NAME, 'display_description': DEFAULT_VOL_DESCRIPTION, 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'snapshot_id': None, 'source_volid': None, 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', 'encryption_key_id': None, 'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'}, {'key': 'readonly', 'value': 'False'}], 'bootable': False, 'launched_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'volume_type': fake_volume.fake_db_volume_type(name=DEFAULT_VOL_TYPE), 'replication_status': 'disabled', 'replication_extended_status': None, 'replication_driver_data': None, 'volume_attachment': [], 'multiattach': False, } volume.update(kwargs) if kwargs.get('volume_glance_metadata', None): volume['bootable'] = True if kwargs.get('attach_status') == fields.VolumeAttachStatus.DETACHED: del volume['volume_admin_metadata'][0] return volume def fake_volume_create(self, context, size, name, description, snapshot=None, **param): vol = create_fake_volume(DEFAULT_VOL_ID) vol['size'] = size vol['display_name'] = name vol['display_description'] = description source_volume = param.get('source_volume') or {} vol['source_volid'] = source_volume.get('id') vol['bootable'] = False vol['volume_attachment'] = [] vol['multiattach'] = utils.get_bool_param('multiattach', param) try: vol['snapshot_id'] = snapshot['id'] except (KeyError, TypeError): vol['snapshot_id'] = None vol['availability_zone'] = param.get('availability_zone', 'fakeaz') return vol def fake_volume_api_create(self, context, *args, **kwargs): vol = fake_volume_create(self, context, *args, **kwargs) return fake_volume.fake_volume_obj(context, **vol) def fake_image_service_detail(self, context, **kwargs): filters = kwargs.get('filters', {'name': ''}) if filters['name'] == "Fedora-x86_64-20-20140618-sda": return [{'id': "c905cedb-7281-47e4-8a62-f26bc5fc4c77"}] elif filters['name'] == "multi": return [{'id': "c905cedb-7281-47e4-8a62-f26bc5fc4c77"}, {'id': "c905cedb-abcd-47e4-8a62-f26bc5fc4c77"}] return [] def fake_volume_create_from_image(self, context, size, name, description, snapshot, volume_type, metadata, availability_zone): vol = create_fake_volume(fake.VOLUME_ID) vol['status'] = 'creating' vol['size'] = size vol['display_name'] = name vol['display_description'] = description vol['availability_zone'] = 'cinder' vol['bootable'] = False return vol def fake_volume_update(self, context, *args, **param): pass def fake_volume_delete(self, context, *args, **param): pass def fake_volume_get(self, context, volume_id, viewable_admin_meta=False): if viewable_admin_meta: return create_fake_volume(volume_id) else: volume = create_fake_volume(volume_id) del volume['volume_admin_metadata'] return volume def fake_volume_get_notfound(self, context, volume_id, viewable_admin_meta=False): raise exc.VolumeNotFound(volume_id) def fake_volume_get_db(context, volume_id): if context.is_admin: return create_fake_volume(volume_id) else: volume = create_fake_volume(volume_id) del volume['volume_admin_metadata'] return volume def fake_volume_api_get(self, context, volume_id, viewable_admin_meta=False): vol = create_fake_volume(volume_id) return fake_volume.fake_volume_obj(context, **vol) def fake_volume_get_all(context, search_opts=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=None): return [create_fake_volume(fake.VOLUME_ID, project_id=fake.PROJECT_ID), create_fake_volume(fake.VOLUME2_ID, project_id=fake.PROJECT2_ID), create_fake_volume(fake.VOLUME3_ID, project_id=fake.PROJECT3_ID)] def fake_volume_get_all_by_project(self, context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=None): return [fake_volume_get(self, context, fake.VOLUME_ID, viewable_admin_meta=True)] def fake_volume_api_get_all_by_project(self, context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=None): vol = fake_volume_get(self, context, fake.VOLUME_ID, viewable_admin_meta=viewable_admin_meta) vol_obj = fake_volume.fake_volume_obj(context, **vol) return objects.VolumeList(objects=[vol_obj]) def fake_snapshot(id, **kwargs): snapshot = {'id': id, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', 'project_id': fake.PROJECT_ID, 'snapshot_metadata': []} snapshot.update(kwargs) return snapshot def fake_backup(id, **kwargs): backup = {'id': fake.BACKUP_ID, 'volume_id': fake.VOLUME_ID, 'status': fields.BackupStatus.CREATING, 'size': 1, 'display_name': 'fake_name', 'display_description': 'fake_description', 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'temp_volume_id': None, 'temp_snapshot_id': None, 'snapshot_id': None, 'service': 'cinder.fake.backup.service', 'data_timestamp': None, 'restore_volume_id': None, 'backup_metadata': {}} backup.update(kwargs) return backup def fake_snapshot_get_all(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): return [fake_snapshot(fake.VOLUME_ID, project_id=fake.PROJECT_ID), fake_snapshot(fake.VOLUME2_ID, project_id=fake.PROJECT2_ID), fake_snapshot(fake.VOLUME3_ID, project_id=fake.PROJECT3_ID)] def fake_snapshot_get_all_by_project(context, project_id, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): return [fake_snapshot(fake.SNAPSHOT_ID)] def fake_snapshot_update(self, context, *args, **param): pass def fake_service_get_all(*args, **kwargs): return [{'availability_zone': "zone1:host1", "disabled": 0, 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] def fake_service_get_all_by_topic(context, topic, disabled=None): return [{'availability_zone': "zone1:host1", "disabled": 0, 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}] def fake_snapshot_get(self, context, snapshot_id): if snapshot_id == fake.WILL_NOT_BE_FOUND_ID: raise exc.SnapshotNotFound(snapshot_id=snapshot_id) return fake_snapshot(snapshot_id) def fake_backup_get(self, context, backup_id): if backup_id == fake.WILL_NOT_BE_FOUND_ID: raise exc.BackupNotFound(backup_id=backup_id) return fake_backup(backup_id) def fake_consistencygroup_get_notfound(self, context, cg_id): raise exc.GroupNotFound(group_id=cg_id) def fake_volume_type_get(context, id, *args, **kwargs): return {'id': id, 'name': 'vol_type_name', 'description': 'A fake volume type', 'is_public': True, 'projects': [], 'extra_specs': {}, 'created_at': None, 'deleted_at': None, 'updated_at': None, 'qos_specs_id': fake.QOS_SPEC_ID, 'deleted': False} def fake_default_type_get(id=fake.VOLUME_TYPE_ID): return {'id': id, 'name': 'vol_type_name', 'description': 'A fake volume type', 'is_public': True, 'projects': [], 'extra_specs': {}, 'created_at': None, 'deleted_at': None, 'updated_at': None, 'qos_specs_id': fake.QOS_SPEC_ID, 'deleted': False} def fake_volume_type_name_get(context, id, *args, **kwargs): return fake_volume_type_get(context, id)['name'] or id def fake_volume_admin_metadata_get(context, volume_id, **kwargs): admin_meta = {'attached_mode': 'rw', 'readonly': 'False'} if kwargs.get('attach_status') == fields.VolumeAttachStatus.DETACHED: del admin_meta['attached_mode'] return admin_meta ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v2/test_limits.py0000664000175000017500000007114200000000000022473 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests dealing with HTTP rate-limiting. """ from http import client as http_client import io from oslo_serialization import jsonutils import webob from cinder.api.v2 import limits from cinder.api import views import cinder.context from cinder.tests.unit import test TEST_LIMITS = [ limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE), limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE), limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE), ] NS = { 'atom': 'http://www.w3.org/2005/Atom', 'ns': 'http://docs.openstack.org/common/api/v1.0', } class BaseLimitTestSuite(test.TestCase): """Base test suite which provides relevant stubs and time abstraction.""" def setUp(self): super(BaseLimitTestSuite, self).setUp() self.time = 0.0 self.mock_object(limits.Limit, "_get_time", self._get_time) self.absolute_limits = {} def fake_get_project_quotas(context, project_id, usages=True): return {k: dict(limit=v) for k, v in self.absolute_limits.items()} self.mock_object(cinder.quota.QUOTAS, "get_project_quotas", fake_get_project_quotas) def _get_time(self): """Return the "time" according to this test suite.""" return self.time class LimitsControllerTest(BaseLimitTestSuite): """Tests for `limits.LimitsController` class.""" def setUp(self): """Run before each test.""" super(LimitsControllerTest, self).setUp() self.controller = limits.create_resource() def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" request = webob.Request.blank("/") request.accept = accept_header request.environ["wsgiorg.routing_args"] = (None, { "action": "index", "controller": "", }) context = cinder.context.RequestContext('testuser', 'testproject') request.environ["cinder.context"] = context return request def _populate_limits(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), limits.Limit("GET", "changes-since*", "changes-since", 5, 60).display(), ] request.environ["cinder.limits"] = _limits return request def test_empty_index_json(self): """Test getting empty limit details in JSON.""" request = self._get_index_request() response = request.get_response(self.controller) expected = { "limits": { "rate": [], "absolute": {}, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def test_index_json(self): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits(request) self.absolute_limits = { 'gigabytes': 512, 'volumes': 5, } response = request.get_response(self.controller) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 10, "remaining": 10, }, { "verb": "POST", "next-available": "1970-01-01T00:00:00", "unit": "HOUR", "value": 5, "remaining": 5, }, ], }, { "regex": "changes-since", "uri": "changes-since*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 5, "remaining": 5, }, ], }, ], "absolute": {"maxTotalVolumeGigabytes": 512, "maxTotalVolumes": 5, }, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def _populate_limits_diff_regex(self, request): """Put limit info into a request.""" _limits = [ limits.Limit("GET", "*", ".*", 10, 60).display(), limits.Limit("GET", "*", "*.*", 10, 60).display(), ] request.environ["cinder.limits"] = _limits return request def test_index_diff_regex(self): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits_diff_regex(request) response = request.get_response(self.controller) expected = { "limits": { "rate": [ { "regex": ".*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, { "regex": "*.*", "uri": "*", "limit": [ { "verb": "GET", "next-available": "1970-01-01T00:00:00", "unit": "MINUTE", "value": 10, "remaining": 10, }, ], }, ], "absolute": {}, }, } body = jsonutils.loads(response.body) self.assertEqual(expected, body) def _test_index_absolute_limits_json(self, expected): request = self._get_index_request() response = request.get_response(self.controller) body = jsonutils.loads(response.body) self.assertEqual(expected, body['limits']['absolute']) def test_index_ignores_extra_absolute_limits_json(self): self.absolute_limits = {'unknown_limit': 9001} self._test_index_absolute_limits_json({}) class TestLimiter(limits.Limiter): pass class LimitMiddlewareTest(BaseLimitTestSuite): """Tests for the `limits.RateLimitingMiddleware` class.""" @webob.dec.wsgify def _empty_app(self, request): """Do-nothing WSGI app.""" pass def setUp(self): """Prepare middleware for use through fake WSGI app.""" super(LimitMiddlewareTest, self).setUp() _limits = '(GET, *, .*, 1, MINUTE)' self.app = limits.RateLimitingMiddleware(self._empty_app, _limits, "%s.TestLimiter" % self.__class__.__module__) def test_limit_class(self): """Test that middleware selected correct limiter class.""" self.assertIsInstance(self.app._limiter, TestLimiter) def test_good_request(self): """Test successful GET request through middleware.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(http_client.OK, response.status_int) def test_limited_request_json(self): """Test a rate-limited (413) GET request through middleware.""" request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(http_client.OK, response.status_int) request = webob.Request.blank("/") response = request.get_response(self.app) self.assertEqual(http_client.REQUEST_ENTITY_TOO_LARGE, response.status_int) self.assertIn('Retry-After', response.headers) retry_after = int(response.headers['Retry-After']) self.assertAlmostEqual(retry_after, 60, 1) body = jsonutils.loads(response.body) expected = "Only 1 GET request(s) can be made to * every minute." value = body["overLimitFault"]["details"].strip() self.assertEqual(expected, value) class LimitTest(BaseLimitTestSuite): """Tests for the `limits.Limit` class.""" def test_GET_no_delay(self): """Test a limit handles 1 GET per second.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(0, limit.next_request) self.assertEqual(0, limit.last_request) def test_GET_delay(self): """Test two calls to 1 GET per second limit.""" limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertIsNone(delay) delay = limit("GET", "/anything") self.assertEqual(1, delay) self.assertEqual(1, limit.next_request) self.assertEqual(0, limit.last_request) self.time += 4 delay = limit("GET", "/anything") self.assertIsNone(delay) self.assertEqual(4, limit.next_request) self.assertEqual(4, limit.last_request) def test_invalid_limit(self): """Test that invalid limits are properly checked on construction.""" self.assertRaises(ValueError, limits.Limit, "GET", "*", ".*", 0, 1) class ParseLimitsTest(BaseLimitTestSuite): """Tests for the default limits parser in the `limits.Limiter` class.""" def test_invalid(self): """Test that parse_limits() handles invalid input correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, ';;;;;') def test_bad_rule(self): """Test that parse_limits() handles bad rules correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, 'GET, *, .*, 20, minute') def test_missing_arg(self): """Test that parse_limits() handles missing args correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20)') def test_bad_value(self): """Test that parse_limits() handles bad values correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, foo, minute)') def test_bad_unit(self): """Test that parse_limits() handles bad units correctly.""" self.assertRaises(ValueError, limits.Limiter.parse_limits, '(GET, *, .*, 20, lightyears)') def test_multiple_rules(self): """Test that parse_limits() handles multiple rules correctly.""" try: test_limits = limits.Limiter.parse_limits( '(get, *, .*, 20, minute);' '(PUT, /foo*, /foo.*, 10, hour);' '(POST, /bar*, /bar.*, 5, second);' '(Say, /derp*, /derp.*, 1, day)') except ValueError as e: self.assertFalse(str(e)) # Make sure the number of returned limits are correct self.assertEqual(4, len(test_limits)) # Check all the verbs... expected = ['GET', 'PUT', 'POST', 'SAY'] self.assertEqual(expected, [t.verb for t in test_limits]) # ...the URIs... expected = ['*', '/foo*', '/bar*', '/derp*'] self.assertEqual(expected, [t.uri for t in test_limits]) # ...the regexes... expected = ['.*', '/foo.*', '/bar.*', '/derp.*'] self.assertEqual(expected, [t.regex for t in test_limits]) # ...the values... expected = [20, 10, 5, 1] self.assertEqual(expected, [t.value for t in test_limits]) # ...and the units... expected = [limits.PER_MINUTE, limits.PER_HOUR, limits.PER_SECOND, limits.PER_DAY] self.assertEqual(expected, [t.unit for t in test_limits]) class LimiterTest(BaseLimitTestSuite): """Tests for the in-memory `limits.Limiter` class.""" def setUp(self): """Run before each test.""" super(LimiterTest, self).setUp() userlimits = {'limits.user3': '', 'limits.user0': '(get, *, .*, 4, minute);' '(put, *, .*, 2, minute)'} self.limiter = limits.Limiter(TEST_LIMITS, **userlimits) def _check(self, num, verb, url, username=None): """Check and yield results from checks.""" for x in range(num): yield self.limiter.check_for_delay(verb, url, username)[0] def _check_sum(self, num, verb, url, username=None): """Check and sum results from checks.""" results = self._check(num, verb, url, username) return sum(item for item in results if item) def test_no_delay_GET(self): """Ensure no delay on a single call for a limit verb we didn't set.""" delay = self.limiter.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_no_delay_PUT(self): """Ensure no delay on a single call for a known limit.""" delay = self.limiter.check_for_delay("PUT", "/anything") self.assertEqual((None, None), delay) def test_delay_PUT(self): """Test delay on 11th PUT request. Ensure the 11th PUT will result in a delay of 6.0 seconds until the next request will be granced. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_POST(self): """Test delay on 8th POST request. Ensure the 8th POST will result in a delay of 6.0 seconds until the next request will be granced. """ expected = [None] * 7 results = list(self._check(7, "POST", "/anything")) self.assertEqual(expected, results) expected = 60.0 / 7.0 results = self._check_sum(1, "POST", "/anything") self.assertAlmostEqual(expected, results, 8) def test_delay_GET(self): """Ensure the 11th GET will result in NO delay.""" expected = [None] * 11 results = list(self._check(11, "GET", "/anything")) self.assertEqual(expected, results) expected = [None] * 4 + [15.0] results = list(self._check(5, "GET", "/foo", "user0")) self.assertEqual(expected, results) def test_delay_PUT_volumes(self): """Test delay on /volumes. Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is still OK after 5 requests...but then after 11 total requests, PUT limiting kicks in. """ # First 6 requests on PUT /volumes expected = [None] * 5 + [12.0] results = list(self._check(6, "PUT", "/volumes")) self.assertEqual(expected, results) # Next 5 request on PUT /anything expected = [None] * 4 + [6.0] results = list(self._check(5, "PUT", "/anything")) self.assertEqual(expected, results) def test_delay_PUT_wait(self): """Test limit is lifted again. Ensure after hitting the limit and then waiting for the correct amount of time, the limit will be lifted. """ expected = [None] * 10 + [6.0] results = list(self._check(11, "PUT", "/anything")) self.assertEqual(expected, results) # Advance time self.time += 6.0 expected = [None, 6.0] results = list(self._check(2, "PUT", "/anything")) self.assertEqual(expected, results) def test_multiple_delays(self): """Ensure multiple requests still get a delay.""" expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything")) self.assertEqual(expected, results) self.time += 1.0 expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything")) self.assertEqual(expected, results) expected = [None] * 2 + [30.0] * 8 results = list(self._check(10, "PUT", "/anything", "user0")) self.assertEqual(expected, results) def test_user_limit(self): """Test user-specific limits.""" self.assertEqual([], self.limiter.levels['user3']) self.assertEqual(2, len(self.limiter.levels['user0'])) def test_multiple_users(self): """Tests involving multiple users.""" # User0 expected = [None] * 2 + [30.0] * 8 results = list(self._check(10, "PUT", "/anything", "user0")) self.assertEqual(expected, results) # User1 expected = [None] * 10 + [6.0] * 10 results = list(self._check(20, "PUT", "/anything", "user1")) self.assertEqual(expected, results) # User2 expected = [None] * 10 + [6.0] * 5 results = list(self._check(15, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User3 expected = [None] * 20 results = list(self._check(20, "PUT", "/anything", "user3")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [5.0] * 10 results = list(self._check(10, "PUT", "/anything", "user1")) self.assertEqual(expected, results) self.time += 1.0 # User1 again expected = [4.0] * 5 results = list(self._check(5, "PUT", "/anything", "user2")) self.assertEqual(expected, results) # User0 again expected = [28.0] results = list(self._check(1, "PUT", "/anything", "user0")) self.assertEqual(expected, results) self.time += 28.0 expected = [None, 30.0] results = list(self._check(2, "PUT", "/anything", "user0")) self.assertEqual(expected, results) class WsgiLimiterTest(BaseLimitTestSuite): """Tests for `limits.WsgiLimiter` class.""" def setUp(self): """Run before each test.""" super(WsgiLimiterTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) def _request_data(self, verb, path): """Get data describing a limit request verb/path.""" return jsonutils.dump_as_bytes({"verb": verb, "path": path}) def _request(self, verb, url, username=None): """POST request to given url by given username. Make sure that POSTing to the given url causes the given username to perform the given action. Make the internal rate limiter return delay and make sure that the WSGI app returns the correct response. """ if username: request = webob.Request.blank("/%s" % username) else: request = webob.Request.blank("/") request.method = "POST" request.body = self._request_data(verb, url) response = request.get_response(self.app) if "X-Wait-Seconds" in response.headers: self.assertEqual(http_client.FORBIDDEN, response.status_int) return response.headers["X-Wait-Seconds"] self.assertEqual(http_client.NO_CONTENT, response.status_int) def test_invalid_methods(self): """Only POSTs should work.""" for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: request = webob.Request.blank("/", method=method) response = request.get_response(self.app) self.assertEqual(http_client.METHOD_NOT_ALLOWED, response.status_int) def test_good_url(self): delay = self._request("GET", "/something") self.assertIsNone(delay) def test_escaping(self): delay = self._request("GET", "/something/jump%20up") self.assertIsNone(delay) def test_response_to_delays(self): delay = self._request("GET", "/delayed") self.assertIsNone(delay) delay = self._request("GET", "/delayed") self.assertEqual('60.00', delay) def test_response_to_delays_usernames(self): delay = self._request("GET", "/delayed", "user1") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user2") self.assertIsNone(delay) delay = self._request("GET", "/delayed", "user1") self.assertEqual('60.00', delay) delay = self._request("GET", "/delayed", "user2") self.assertEqual('60.00', delay) class FakeHttplibSocket(object): """Fake `http_client.HTTPResponse` replacement.""" def __init__(self, response_string): """Initialize new `FakeHttplibSocket`.""" if isinstance(response_string, str): response_string = response_string.encode('utf-8') self._buffer = io.BytesIO(response_string) def makefile(self, mode, *args): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """Fake `http_client.HTTPConnection`.""" def __init__(self, app, host): """Initialize `FakeHttplibConnection`.""" self.app = app self.host = host def request(self, method, path, body="", headers=None): """Fake request handler. Requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into an `http_client.HTTPResponse`. """ if not headers: headers = {} req = webob.Request.blank(path) req.method = method req.headers = headers req.host = self.host req.body = body resp = str(req.get_response(self.app)) resp = "HTTP/1.0 %s" % resp sock = FakeHttplibSocket(resp) self.http_response = http_client.HTTPResponse(sock) self.http_response.begin() def getresponse(self): """Return our generated response from the request.""" return self.http_response def wire_HTTPConnection_to_WSGI(host, app): """Monkeypatches HTTPConnection. Monkeypatches HTTPConnection so that if you try to connect to host, you are instead routed straight to the given WSGI app. After calling this method, when any code calls http_client.HTTPConnection(host) the connection object will be a fake. Its requests will be sent directly to the given WSGI app rather than through a socket. Code connecting to hosts other than host will not be affected. This method may be called multiple times to map different hosts to different apps. This method returns the original HTTPConnection object, so that the caller can restore the default HTTPConnection interface (for all hosts). """ class HTTPConnectionDecorator(object): """Decorator to mock the HTTPConecction class. Wraps the real HTTPConnection class so that when you instantiate the class you might instead get a fake instance. """ def __init__(self, wrapped): self.wrapped = wrapped def __call__(self, connection_host, *args, **kwargs): if connection_host == host: return FakeHttplibConnection(app, host) else: return self.wrapped(connection_host, *args, **kwargs) oldHTTPConnection = http_client.HTTPConnection new_http_connection = HTTPConnectionDecorator(http_client.HTTPConnection) http_client.HTTPConnection = new_http_connection return oldHTTPConnection class WsgiLimiterProxyTest(BaseLimitTestSuite): """Tests for the `limits.WsgiLimiterProxy` class.""" def setUp(self): """setUp() for WsgiLimiterProxyTest. Do some nifty HTTP/WSGI magic which allows for WSGI to be called directly by something like the `http_client` library. """ super(WsgiLimiterProxyTest, self).setUp() self.app = limits.WsgiLimiter(TEST_LIMITS) oldHTTPConnection = ( wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)) self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80") self.addCleanup(self._restore, oldHTTPConnection) def _restore(self, oldHTTPConnection): # restore original HTTPConnection object http_client.HTTPConnection = oldHTTPConnection def test_200(self): """Successful request test.""" delay = self.proxy.check_for_delay("GET", "/anything") self.assertEqual((None, None), delay) def test_403(self): """Forbidden request test.""" delay = self.proxy.check_for_delay("GET", "/delayed") self.assertEqual((None, None), delay) delay, error = self.proxy.check_for_delay("GET", "/delayed") error = error.strip() expected = ("60.00", b"403 Forbidden\n\nOnly 1 GET request(s) can be " b"made to /delayed every minute.") self.assertEqual(expected, (delay, error)) class LimitsViewBuilderTest(test.TestCase): def setUp(self): super(LimitsViewBuilderTest, self).setUp() self.view_builder = views.limits.ViewBuilder() self.rate_limits = [{"URI": "*", "regex": ".*", "value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "resetTime": 1311272226}, {"URI": "*/volumes", "regex": "^/volumes", "value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "resetTime": 1311272226}] self.absolute_limits = {"gigabytes": 1, "backup_gigabytes": 2, "volumes": 3, "snapshots": 4, "backups": 5} def test_build_limits(self): tdate = "2011-07-21T18:17:06" expected_limits = { "limits": {"rate": [{"uri": "*", "regex": ".*", "limit": [{"value": 10, "verb": "POST", "remaining": 2, "unit": "MINUTE", "next-available": tdate}]}, {"uri": "*/volumes", "regex": "^/volumes", "limit": [{"value": 50, "verb": "POST", "remaining": 10, "unit": "DAY", "next-available": tdate}]}], "absolute": {"maxTotalVolumeGigabytes": 1, "maxTotalBackupGigabytes": 2, "maxTotalVolumes": 3, "maxTotalSnapshots": 4, "maxTotalBackups": 5}}} output = self.view_builder.build(self.rate_limits, self.absolute_limits) self.assertDictEqual(expected_limits, output) def test_build_limits_empty_limits(self): expected_limits = {"limits": {"rate": [], "absolute": {}}} abs_limits = {} rate_limits = [] output = self.view_builder.build(rate_limits, abs_limits) self.assertDictEqual(expected_limits, output) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v2/test_snapshots.py0000664000175000017500000007576700000000000023235 0ustar00zuulzuul00000000000000# Copyright 2011 Denali Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import HTTPStatus from unittest import mock from urllib import parse as urllib from zoneinfo import ZoneInfo import ddt from oslo_config import cfg import webob from cinder.api import common from cinder.api.v2 import snapshots from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils from cinder import volume CONF = cfg.CONF UUID = '00000000-0000-0000-0000-000000000001' INVALID_UUID = '00000000-0000-0000-0000-000000000002' def _get_default_snapshot_param(): return { 'id': UUID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 100, 'created_at': None, 'updated_at': None, 'user_id': 'bcb7746c7a41472d88a1ffac89ba6a9b', 'project_id': '7ffe17a15c724e2aa79fc839540aec15', 'display_name': 'Default name', 'display_description': 'Default description', 'deleted': None, 'volume': {'availability_zone': 'test_zone'} } def fake_snapshot_delete(self, context, snapshot): if snapshot['id'] != UUID: raise exception.SnapshotNotFound(snapshot['id']) def fake_snapshot_get(self, context, snapshot_id): if snapshot_id != UUID: raise exception.SnapshotNotFound(snapshot_id) param = _get_default_snapshot_param() return param def fake_snapshot_get_all(self, context, search_opts=None): param = _get_default_snapshot_param() return [param] @ddt.ddt class SnapshotApiTest(test.TestCase): def setUp(self): super(SnapshotApiTest, self).setUp() self.mock_object(scheduler_rpcapi.SchedulerAPI, 'create_snapshot') self.controller = snapshots.SnapshotsController() self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) def test_snapshot_create(self): volume = utils.create_volume(self.ctx, volume_type_id=None) snapshot_name = 'Snapshot Test Name' snapshot_description = 'Snapshot Test Desc' snapshot = { "volume_id": volume.id, "force": False, "name": snapshot_name, "description": snapshot_description } body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v3/snapshots') resp_dict = self.controller.create(req, body=body) self.assertIn('snapshot', resp_dict) self.assertEqual(snapshot_name, resp_dict['snapshot']['name']) self.assertEqual(snapshot_description, resp_dict['snapshot']['description']) self.assertIn('updated_at', resp_dict['snapshot']) db.volume_destroy(self.ctx, volume.id) def test_snapshot_create_with_null_validate(self): volume = utils.create_volume(self.ctx, volume_type_id=None) snapshot = { "volume_id": volume.id, "force": False, "name": None, "description": None } body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v3/snapshots') resp_dict = self.controller.create(req, body=body) self.assertIn('snapshot', resp_dict) self.assertIsNone(resp_dict['snapshot']['name']) self.assertIsNone(resp_dict['snapshot']['description']) db.volume_destroy(self.ctx, volume.id) @ddt.data(True, 'y', 'true', 'yes', '1', 'on') def test_snapshot_create_force(self, force_param): volume = utils.create_volume(self.ctx, status='in-use', volume_type_id=None) snapshot_name = 'Snapshot Test Name' snapshot_description = 'Snapshot Test Desc' snapshot = { "volume_id": volume.id, "force": force_param, "name": snapshot_name, "description": snapshot_description } body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v3/snapshots') resp_dict = self.controller.create(req, body=body) self.assertIn('snapshot', resp_dict) self.assertEqual(snapshot_name, resp_dict['snapshot']['name']) self.assertEqual(snapshot_description, resp_dict['snapshot']['description']) self.assertIn('updated_at', resp_dict['snapshot']) db.volume_destroy(self.ctx, volume.id) @ddt.data(False, 'n', 'false', 'No', '0', 'off') def test_snapshot_create_force_failure(self, force_param): volume = utils.create_volume(self.ctx, status='in-use', volume_type_id=None) snapshot_name = 'Snapshot Test Name' snapshot_description = 'Snapshot Test Desc' snapshot = { "volume_id": volume.id, "force": force_param, "name": snapshot_name, "description": snapshot_description } body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v3/snapshots') self.assertRaises(exception.InvalidVolume, self.controller.create, req, body=body) db.volume_destroy(self.ctx, volume.id) @ddt.data("**&&^^%%$$##@@", '-1', 2, '01', 'falSE', 0, 'trUE', 1, "1 ") def test_snapshot_create_invalid_force_param(self, force_param): volume = utils.create_volume(self.ctx, status='available', volume_type_id=None) snapshot_name = 'Snapshot Test Name' snapshot_description = 'Snapshot Test Desc' snapshot = { "volume_id": volume.id, "force": force_param, "name": snapshot_name, "description": snapshot_description } body = dict(snapshot=snapshot) req = fakes.HTTPRequest.blank('/v3/snapshots') self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) db.volume_destroy(self.ctx, volume.id) def test_snapshot_create_without_volume_id(self): snapshot_name = 'Snapshot Test Name' snapshot_description = 'Snapshot Test Desc' body = { "snapshot": { "force": True, "name": snapshot_name, "description": snapshot_description } } req = fakes.HTTPRequest.blank('/v3/snapshots') self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @ddt.data({"snapshot": {"description": " sample description", "name": " test"}}, {"snapshot": {"description": "sample description ", "name": "test "}}, {"snapshot": {"description": " sample description ", "name": " test name "}}) def test_snapshot_create_with_leading_trailing_spaces(self, body): volume = utils.create_volume(self.ctx, volume_type_id=None) body['snapshot']['volume_id'] = volume.id req = fakes.HTTPRequest.blank('/v3/snapshots') resp_dict = self.controller.create(req, body=body) self.assertEqual(body['snapshot']['display_name'].strip(), resp_dict['snapshot']['name']) self.assertEqual(body['snapshot']['description'].strip(), resp_dict['snapshot']['description']) db.volume_destroy(self.ctx, volume.id) @mock.patch.object(volume.api.API, "update_snapshot", side_effect=v2_fakes.fake_snapshot_update) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.db.volume_get') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_update( self, snapshot_get_by_id, volume_get, snapshot_metadata_get, update_snapshot): snapshot = { 'id': UUID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'created_at': "2014-01-01 00:00:00", 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(self.ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get.return_value = fake_volume_obj updates = { "name": "Updated Test Name", } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID) req.environ['cinder.context'] = self.ctx res_dict = self.controller.update(req, UUID, body=body) expected = { 'snapshot': { 'id': UUID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'size': 100, 'created_at': datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=ZoneInfo('UTC')), 'updated_at': None, 'name': u'Updated Test Name', 'description': u'Default description', 'metadata': {}, } } self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) @mock.patch.object(volume.api.API, "update_snapshot", side_effect=v2_fakes.fake_snapshot_update) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.db.volume_get') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_update_with_null_validate( self, snapshot_get_by_id, volume_get, snapshot_metadata_get, update_snapshot): snapshot = { 'id': UUID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'created_at': "2014-01-01 00:00:00", 'volume_size': 100, 'name': 'Default name', 'description': 'Default description', 'expected_attrs': ['metadata'], } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(self.ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get.return_value = fake_volume_obj updates = { "name": None, "description": None, } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID) req.environ['cinder.context'] = self.ctx res_dict = self.controller.update(req, UUID, body=body) self.assertEqual(fields.SnapshotStatus.AVAILABLE, res_dict['snapshot']['status']) self.assertIsNone(res_dict['snapshot']['name']) self.assertIsNone(res_dict['snapshot']['description']) def test_snapshot_update_missing_body(self): body = {} req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID) self.assertRaises(exception.ValidationError, self.controller.update, req, UUID, body=body) def test_snapshot_update_invalid_body(self): body = {'name': 'missing top level snapshot key'} req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID) self.assertRaises(exception.ValidationError, self.controller.update, req, UUID, body=body) def test_snapshot_update_not_found(self): self.mock_object(volume.api.API, "get_snapshot", fake_snapshot_get) updates = { "name": "Updated Test Name", } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v3/snapshots/not-the-uuid') self.assertRaises(exception.SnapshotNotFound, self.controller.update, req, 'not-the-uuid', body=body) @mock.patch.object(volume.api.API, "update_snapshot", side_effect=v2_fakes.fake_snapshot_update) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.db.volume_get') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_update_with_leading_trailing_spaces( self, snapshot_get_by_id, volume_get, snapshot_metadata_get, update_snapshot): snapshot = { 'id': UUID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'created_at': "2018-01-14 00:00:00", 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(self.ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get.return_value = fake_volume_obj updates = { "name": " test ", "description": " test " } body = {"snapshot": updates} req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID) req.environ['cinder.context'] = self.ctx res_dict = self.controller.update(req, UUID, body=body) expected = { 'snapshot': { 'id': UUID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'size': 100, 'created_at': datetime.datetime(2018, 1, 14, 0, 0, 0, tzinfo=ZoneInfo('UTC')), 'updated_at': None, 'name': u'test', 'description': u'test', 'metadata': {}, } } self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) @mock.patch.object(volume.api.API, "delete_snapshot", side_effect=v2_fakes.fake_snapshot_update) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_delete(self, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get, delete_snapshot): snapshot = { 'id': UUID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(self.ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj snapshot_id = UUID req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % snapshot_id) req.environ['cinder.context'] = self.ctx resp = self.controller.delete(req, snapshot_id) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) def test_snapshot_delete_invalid_id(self): self.mock_object(volume.api.API, "delete_snapshot", fake_snapshot_delete) snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % snapshot_id) self.assertRaises(exception.SnapshotNotFound, self.controller.delete, req, snapshot_id) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_show(self, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get): snapshot = { 'id': UUID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(self.ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID) req.environ['cinder.context'] = self.ctx resp_dict = self.controller.show(req, UUID) self.assertIn('snapshot', resp_dict) self.assertEqual(UUID, resp_dict['snapshot']['id']) self.assertIn('updated_at', resp_dict['snapshot']) def test_snapshot_show_invalid_id(self): snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % snapshot_id) self.assertRaises(exception.SnapshotNotFound, self.controller.show, req, snapshot_id) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') @mock.patch('cinder.volume.api.API.get_all_snapshots') def test_snapshot_detail(self, get_all_snapshots, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get): snapshot = { 'id': UUID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'] } ctx = context.RequestContext(fake.PROJECT_ID, fake.USER_ID, True) snapshot_obj = fake_snapshot.fake_snapshot_obj(ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList(objects=[snapshot_obj]) get_all_snapshots.return_value = snapshots req = fakes.HTTPRequest.blank('/v3/snapshots/detail') resp_dict = self.controller.detail(req) self.assertIn('snapshots', resp_dict) resp_snapshots = resp_dict['snapshots'] self.assertEqual(1, len(resp_snapshots)) self.assertIn('updated_at', resp_snapshots[0]) resp_snapshot = resp_snapshots.pop() self.assertEqual(UUID, resp_snapshot['id']) @mock.patch.object(db, 'snapshot_get_all_by_project', v2_fakes.fake_snapshot_get_all_by_project) @mock.patch.object(db, 'snapshot_get_all', v2_fakes.fake_snapshot_get_all) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_admin_list_snapshots_limited_to_project(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v3/%s/snapshots' % fake.PROJECT_ID, use_admin_context=True) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_list_snapshots_with_limit_and_offset(self, snapshot_metadata_get): def list_snapshots_with_limit_and_offset(snaps, is_admin): req = fakes.HTTPRequest.blank('/v3/%s/snapshots?limit=1' '&offset=1' % fake.PROJECT_ID, use_admin_context=is_admin) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) self.assertEqual(snaps[1].id, res['snapshots'][0]['id']) self.assertIn('updated_at', res['snapshots'][0]) # Test that we get an empty list with an offset greater than the # number of items req = fakes.HTTPRequest.blank('/v3/snapshots?limit=1&offset=3') self.assertEqual({'snapshots': []}, self.controller.index(req)) volume, snaps = self._create_db_snapshots(3) # admin case list_snapshots_with_limit_and_offset(snaps, is_admin=True) # non-admin case list_snapshots_with_limit_and_offset(snaps, is_admin=False) @mock.patch.object(db, 'snapshot_get_all_by_project') @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_list_snapshots_with_wrong_limit_and_offset(self, mock_metadata_get, mock_snapshot_get_all): """Test list with negative and non numeric limit and offset.""" mock_snapshot_get_all.return_value = [] # Negative limit req = fakes.HTTPRequest.blank('/v3/snapshots?limit=-1&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) # Non numeric limit req = fakes.HTTPRequest.blank('/v3/snapshots?limit=a&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) # Negative offset req = fakes.HTTPRequest.blank('/v3/snapshots?limit=1&offset=-1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) # Non numeric offset req = fakes.HTTPRequest.blank('/v3/snapshots?limit=1&offset=a') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) # Test that we get an exception HTTPBadRequest(400) with an offset # greater than the maximum offset value. url = '/v3/snapshots?limit=1&offset=323245324356534235' req = fakes.HTTPRequest.blank(url) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def _assert_list_next(self, expected_query=None, project=fake.PROJECT_ID, **kwargs): """Check a page of snapshots list.""" # Since we are accessing v2 api directly we don't need to specify # v2 in the request path, if we did, we'd get /v3/v2 links back request_path = '/v3/%s/snapshots' % project expected_path = request_path # Construct the query if there are kwargs if kwargs: request_str = request_path + '?' + urllib.urlencode(kwargs) else: request_str = request_path # Make the request req = fakes.HTTPRequest.blank(request_str) res = self.controller.index(req) # We only expect to have a next link if there is an actual expected # query. if expected_query: # We must have the links self.assertIn('snapshots_links', res) links = res['snapshots_links'] # Must be a list of links, even if we only get 1 back self.assertIsInstance(links, list) next_link = links[0] # rel entry must be next self.assertIn('rel', next_link) self.assertIn('next', next_link['rel']) # href entry must have the right path self.assertIn('href', next_link) href_parts = urllib.urlparse(next_link['href']) self.assertEqual(expected_path, href_parts.path) # And the query from the next link must match what we were # expecting params = urllib.parse_qs(href_parts.query) self.assertDictEqual(expected_query, params) # Make sure we don't have links if we were not expecting them else: self.assertNotIn('snapshots_links', res) def _create_db_snapshots(self, num_snaps): volume = utils.create_volume(self.ctx, volume_type_id=None) snaps = [utils.create_snapshot(self.ctx, volume.id, display_name='snap' + str(i)) for i in range(num_snaps)] self.addCleanup(db.volume_destroy, self.ctx, volume.id) for snap in snaps: self.addCleanup(db.snapshot_destroy, self.ctx, snap.id) snaps.reverse() return volume, snaps def test_list_snapshots_next_link_default_limit(self): """Test that snapshot list pagination is limited by osapi_max_limit.""" volume, snaps = self._create_db_snapshots(3) # NOTE(geguileo): Since cinder.api.common.limited has already been # imported his argument max_limit already has a default value of 1000 # so it doesn't matter that we change it to 2. That's why we need to # mock it and send it current value. We still need to set the default # value because other sections of the code use it, for example # _get_collection_links CONF.set_default('osapi_max_limit', 2) def get_pagination_params(params, max_limit=CONF.osapi_max_limit, original_call=common.get_pagination_params): return original_call(params, max_limit) def _get_limit_param(params, max_limit=CONF.osapi_max_limit, original_call=common._get_limit_param): return original_call(params, max_limit) with mock.patch.object(common, 'get_pagination_params', get_pagination_params), \ mock.patch.object(common, '_get_limit_param', _get_limit_param): # The link from the first page should link to the second self._assert_list_next({'marker': [snaps[1].id]}) # Second page should have no next link self._assert_list_next(marker=snaps[1].id) def test_list_snapshots_next_link_with_limit(self): """Test snapshot list pagination with specific limit.""" volume, snaps = self._create_db_snapshots(2) # The link from the first page should link to the second self._assert_list_next({'limit': ['1'], 'marker': [snaps[0].id]}, limit=1) # Even though there are no more elements, we should get a next element # per specification. expected = {'limit': ['1'], 'marker': [snaps[1].id]} self._assert_list_next(expected, limit=1, marker=snaps[0].id) # When we go beyond the number of elements there should be no more # next links self._assert_list_next(limit=1, marker=snaps[1].id) @mock.patch.object(db, 'snapshot_get_all_by_project', v2_fakes.fake_snapshot_get_all_by_project) @mock.patch.object(db, 'snapshot_get_all', v2_fakes.fake_snapshot_get_all) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_admin_list_snapshots_all_tenants(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v3/%s/snapshots?all_tenants=1' % fake.PROJECT_ID, use_admin_context=True) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(3, len(res['snapshots'])) @mock.patch.object(db, 'snapshot_get_all') @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_admin_list_snapshots_by_tenant_id(self, snapshot_metadata_get, snapshot_get_all): def get_all(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): if 'project_id' in filters and 'tenant1' in filters['project_id']: return [v2_fakes.fake_snapshot(fake.VOLUME_ID, tenant_id='tenant1')] else: return [] snapshot_get_all.side_effect = get_all req = fakes.HTTPRequest.blank('/v3/%s/snapshots?all_tenants=1' '&project_id=tenant1' % fake.PROJECT_ID, use_admin_context=True) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) @mock.patch.object(db, 'snapshot_get_all_by_project', v2_fakes.fake_snapshot_get_all_by_project) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_all_tenants_non_admin_gets_all_tenants(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v3/%s/snapshots?all_tenants=1' % fake.PROJECT_ID) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) @mock.patch.object(db, 'snapshot_get_all_by_project', v2_fakes.fake_snapshot_get_all_by_project) @mock.patch.object(db, 'snapshot_get_all', v2_fakes.fake_snapshot_get_all) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) def test_non_admin_get_by_project(self, snapshot_metadata_get): req = fakes.HTTPRequest.blank('/v3/%s/snapshots' % fake.PROJECT_ID) res = self.controller.index(req) self.assertIn('snapshots', res) self.assertEqual(1, len(res['snapshots'])) def _create_snapshot_bad_body(self, body): req = fakes.HTTPRequest.blank('/v3/%s/snapshots' % fake.PROJECT_ID) req.method = 'POST' self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_create_no_body(self): self._create_snapshot_bad_body(body=None) def test_create_missing_snapshot(self): body = {'foo': {'a': 'b'}} self._create_snapshot_bad_body(body=body) def test_create_malformed_entity(self): body = {'snapshot': 'string'} self._create_snapshot_bad_body(body=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v2/test_volume_metadata.py0000664000175000017500000007524300000000000024347 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock from oslo_config import cfg from oslo_serialization import jsonutils import webob from cinder.api import extensions from cinder.api.v2 import volume_metadata from cinder.api.v2 import volumes from cinder import db from cinder import exception from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder import volume from cinder.volume import api as volume_api CONF = cfg.CONF def return_create_volume_metadata(context, volume_id, metadata, delete, meta_type): return fake_volume_metadata() def return_new_volume_metadata(context, volume_id, metadata, delete, meta_type): return fake_new_volume_metadata() def return_create_volume_metadata_insensitive(context, snapshot_id, metadata, delete, meta_type): return fake_volume_metadata_insensitive() def return_volume_metadata(context, volume_id): return fake_volume_metadata() def fake_volume_metadata(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", } return metadata def fake_new_volume_metadata(): metadata = { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', } return metadata def fake_volume_metadata_insensitive(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4", } return metadata def get_volume(*args, **kwargs): vol = {'name': 'fake', 'metadata': {}, 'project_id': fake.PROJECT_ID } return fake_volume.fake_volume_obj(args[0], **vol) def return_volume_nonexistent(*args, **kwargs): raise exception.VolumeNotFound('bogus test message') class VolumeMetaDataTest(test.TestCase): def setUp(self): super(VolumeMetaDataTest, self).setUp() self.volume_api = volume_api.API() self.mock_object(volume.api.API, 'get', get_volume) self.mock_object(db, 'volume_metadata_get', return_volume_metadata) self.mock_object(db, 'service_get_all', return_value=v2_fakes.fake_service_get_all_by_topic( None, None), autospec=True) self.mock_object(self.volume_api, 'update_volume_metadata') self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.volume_controller = volumes.VolumeController(self.ext_mgr) self.controller = volume_metadata.Controller() self.req_id = fake.REQUEST_ID self.url = '/v2/%s/volumes/%s/metadata' % ( fake.PROJECT_ID, self.req_id) vol = {"size": 100, "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1", "metadata": {}, "volume_type": self.vt['id']} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/%s/volumes' % fake.PROJECT_ID) self.volume_controller.create(req, body=body) def test_index(self): req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = { 'metadata': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', }, } self.assertEqual(expected, res_dict) def test_index_nonexistent_volume(self): self.mock_object(db, 'volume_metadata_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url) self.assertRaises(exception.VolumeNotFound, self.controller.index, req, self.url) def test_index_no_data(self): self.mock_object(db, 'volume_metadata_get', return_value={}) req = fakes.HTTPRequest.blank(self.url) res_dict = self.controller.index(req, self.req_id) expected = {'metadata': {}} self.assertEqual(expected, res_dict) def test_show(self): req = fakes.HTTPRequest.blank(self.url + '/key2') res_dict = self.controller.show(req, self.req_id, 'key2') expected = {'meta': {'key2': 'value2'}} self.assertEqual(expected, res_dict) def test_show_nonexistent_volume(self): self.mock_object(db, 'volume_metadata_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url + '/key2') self.assertRaises(exception.VolumeNotFound, self.controller.show, req, self.req_id, 'key2') def test_show_meta_not_found(self): self.mock_object(db, 'volume_metadata_get', return_value={}) req = fakes.HTTPRequest.blank(self.url + '/key6') self.assertRaises(exception.VolumeMetadataNotFound, self.controller.show, req, self.req_id, 'key6') @mock.patch.object(db, 'volume_metadata_delete') @mock.patch.object(db, 'volume_metadata_get') def test_delete(self, metadata_get, metadata_delete): fake_volume = objects.Volume(id=self.req_id, status='available') fake_context = mock.Mock() metadata_get.side_effect = return_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key2') req.method = 'DELETE' req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res = self.controller.delete(req, self.req_id, 'key2') self.assertEqual(HTTPStatus.OK, res.status_int) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_delete') @mock.patch.object(db, 'volume_metadata_get') def test_delete_volume_maintenance(self, metadata_get, metadata_delete): fake_volume = objects.Volume(id=self.req_id, status='maintenance') fake_context = mock.Mock() metadata_get.side_effect = return_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key2') req.method = 'DELETE' req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.InvalidVolume, self.controller.delete, req, self.req_id, 'key2') get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_delete') @mock.patch.object(db, 'volume_metadata_get') def test_delete_nonexistent_volume(self, metadata_get, metadata_delete): fake_volume = objects.Volume(id=self.req_id, status='available') fake_context = mock.Mock() metadata_get.side_effect = return_volume_metadata metadata_delete.side_effect = return_volume_nonexistent req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'DELETE' req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.VolumeNotFound, self.controller.delete, req, self.req_id, 'key1') get_volume.assert_called_once_with(fake_context, self.req_id) def test_delete_meta_not_found(self): self.mock_object(db, 'volume_metadata_get', return_value={}) req = fakes.HTTPRequest.blank(self.url + '/key6') req.method = 'DELETE' self.assertRaises(exception.VolumeMetadataNotFound, self.controller.delete, req, self.req_id, 'key6') @mock.patch.object(db, 'volume_metadata_update') @mock.patch.object(db, 'volume_metadata_get') def test_create(self, metadata_get, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.return_value = {} metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank('/v2/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", }} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.create(req, self.req_id, body=body) self.assertEqual(body, res_dict) @mock.patch.object(db, 'volume_metadata_update') @mock.patch.object(db, 'volume_metadata_get') def test_create_volume_maintenance(self, metadata_get, metadata_update): fake_volume = {'id': self.req_id, 'status': 'maintenance'} fake_context = mock.Mock() metadata_get.return_value = {} metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank('/v2/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", }} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.InvalidVolume, self.controller.create, req, self.req_id, body=body) @mock.patch.object(db, 'volume_metadata_update') @mock.patch.object(db, 'volume_metadata_get') def test_create_with_keys_in_uppercase_and_lowercase(self, metadata_get, metadata_update): # if the keys in uppercase_and_lowercase, should return the one # which server added fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.return_value = {} metadata_update.side_effect = return_create_volume_metadata_insensitive req = fakes.HTTPRequest.blank('/v2/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "KEY1": "value1", "key2": "value2", "KEY2": "value2", "key3": "value3", "KEY4": "value4"}} expected = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4"}} req.body = jsonutils.dump_as_bytes(body) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.create(req, self.req_id, body=body) self.assertEqual(expected, res_dict) def test_create_empty_body(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(exception.ValidationError, self.controller.create, req, self.req_id, body=None) def test_create_metadata_keys_value_none(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" body = {"meta": {"key": None}} self.assertRaises(exception.ValidationError, self.controller.create, req, self.req_id, body=body) def test_create_item_empty_key(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(exception.ValidationError, self.controller.create, req, self.req_id, body=body) def test_create_item_key_too_long(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(exception.ValidationError, self.controller.create, req, self.req_id, body=body) def test_create_nonexistent_volume(self): self.mock_object(volume.api.API, 'get', return_volume_nonexistent) self.mock_object(db, 'volume_metadata_get', return_volume_metadata) self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank('/v2/volume_metadata') req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(exception.VolumeNotFound, self.controller.create, req, self.req_id, body=body) @mock.patch.object(db, 'volume_metadata_update') def test_update_all(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_new_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update_all(req, self.req_id, body=expected) self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_update') def test_update_all_volume_maintenance(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'maintenance'} fake_context = mock.Mock() metadata_update.side_effect = return_new_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.InvalidVolume, self.controller.update_all, req, self.req_id, body=expected) self.assertFalse(metadata_update.called) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_update') @mock.patch.object(db, 'volume_metadata_get') def test_update_all_with_keys_in_uppercase_and_lowercase(self, metadata_get, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_get.side_effect = return_create_volume_metadata metadata_update.side_effect = return_new_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = { 'metadata': { 'key10': 'value10', 'KEY10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update_all(req, self.req_id, body=body) self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_update') def test_update_all_empty_container(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.return_value = {} req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': {}} req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update_all(req, self.req_id, body=expected) self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) def test_update_all_malformed_container(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'meta': {}} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(exception.ValidationError, self.controller.update_all, req, self.req_id, body=expected) def test_update_all_malformed_data(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': ['asdf']} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(exception.ValidationError, self.controller.update_all, req, self.req_id, body=expected) def test_update_all_nonexistent_volume(self): self.mock_object(db, 'volume_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url) req.method = 'PUT' req.content_type = "application/json" body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(exception.VolumeNotFound, self.controller.update_all, req, '100', body=body) @mock.patch.object(db, 'volume_metadata_update') def test_update_item(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update(req, self.req_id, 'key1', body=body) expected = {'meta': {'key1': 'value1'}} self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) def test_update_metadata_item_keys_value_none(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"a": None}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(exception.ValidationError, self.controller.update, req, self.req_id, 'key1', body=body) @mock.patch.object(db, 'volume_metadata_update') def test_update_item_volume_maintenance(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'maintenance'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.InvalidVolume, self.controller.update, req, self.req_id, 'key1', body=body) self.assertFalse(metadata_update.called) get_volume.assert_called_once_with(fake_context, self.req_id) def test_update_item_nonexistent_volume(self): self.mock_object(db, 'volume_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank( '/v2/%s/volumes/asdf/metadata/key1' % fake.PROJECT_ID) req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(exception.VolumeNotFound, self.controller.update, req, self.req_id, 'key1', body=body) def test_update_item_empty_body(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' req.headers["content-type"] = "application/json" self.assertRaises(exception.ValidationError, self.controller.update, req, self.req_id, 'key1', body=None) @mock.patch.object(db, 'volume_metadata_update') def test_update_item_empty_key(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.ValidationError, self.controller.update, req, self.req_id, '', body=body) self.assertFalse(metadata_update.called) @mock.patch.object(db, 'volume_metadata_update') def test_update_item_key_too_long(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.ValidationError, self.controller.update, req, self.req_id, ("a" * 260), body=body) self.assertFalse(metadata_update.called) @mock.patch.object(db, 'volume_metadata_update') def test_update_item_value_too_long(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": ("a" * 260)}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.ValidationError, self.controller.update, req, self.req_id, "key1", body=body) self.assertFalse(metadata_update.called) def test_update_item_too_many_keys(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.method = 'PUT' body = {"meta": {"key1": "value1", "key2": "value2"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(exception.ValidationError, self.controller.update, req, self.req_id, 'key1', body=body) def test_update_item_body_uri_mismatch(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/bad') req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'bad', body=body) @mock.patch.object(db, 'volume_metadata_update') def test_invalid_metadata_items_on_create(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url) req.method = 'POST' req.headers["content-type"] = "application/json" # test for long key data = {"metadata": {"a" * 260: "value1"}} req.body = jsonutils.dump_as_bytes(data) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.ValidationError, self.controller.create, req, self.req_id, body=data) # test for long value data = {"metadata": {"key": "v" * 260}} req.body = jsonutils.dump_as_bytes(data) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.ValidationError, self.controller.create, req, self.req_id, body=data) # test for empty key. data = {"metadata": {"": "value1"}} req.body = jsonutils.dump_as_bytes(data) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume self.assertRaises(exception.ValidationError, self.controller.create, req, self.req_id, body=data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v2/test_volumes.py0000664000175000017500000023354300000000000022671 0ustar00zuulzuul00000000000000# Copyright 2013 Josh Durgin # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import HTTPStatus import json from unittest import mock import urllib import ddt import fixtures import iso8601 from oslo_config import cfg import webob from cinder.api import common from cinder.api import extensions from cinder.api.v2.views import volumes as v_vol from cinder.api.v2 import volumes from cinder import context from cinder import db from cinder import exception from cinder import group as groupAPI from cinder import objects from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.volume import api as volume_api CONF = cfg.CONF NS = '{http://docs.openstack.org/api/openstack-block-storage/2.0/content}' DEFAULT_AZ = "zone1:host1" @ddt.ddt class VolumeApiTest(test.TestCase): def setUp(self): super(VolumeApiTest, self).setUp() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} fake_image.mock_image_service(self) self.controller = volumes.VolumeController(self.ext_mgr) self.maxDiff = None self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) # This will be cleaned up by the NestedTempfile fixture in base class self.tmp_path = self.useFixture(fixtures.TempDir()).path self.mock_object(objects.VolumeType, 'get_by_id', self.fake_volume_type_get) self.mock_object(v_vol.ViewBuilder, '_get_volume_type', v2_fakes.fake_volume_type_name_get) def fake_volume_type_get(self, context, id, *args, **kwargs): return {'id': id, 'name': 'vol_type_name', 'description': 'A fake volume type', 'is_public': True, 'projects': [], 'extra_specs': {}, 'created_at': None, 'deleted_at': None, 'updated_at': None, 'qos_specs_id': fake.QOS_SPEC_ID, 'deleted': False} @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create(self, mock_validate): self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_api_create) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) vol = self._vol_in_request_body() body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') res_dict = self.controller.create(req, body=body) ex = self._expected_vol_from_controller() self.assertEqual(ex, res_dict) self.assertTrue(mock_validate.called) @mock.patch.object(db, 'volume_get_all', v2_fakes.fake_volume_get_all) @mock.patch.object(db, 'service_get_all', return_value=v2_fakes.fake_service_get_all_by_topic( None, None), autospec=True) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create_with_type(self, mock_validate, mock_service_get): db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), '__DEFAULT__') vol = self._vol_in_request_body(volume_type="FakeTypeName") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') # Raise 404 when type name isn't valid self.assertRaises(exception.VolumeTypeNotFoundByName, self.controller.create, req, body=body) # Use correct volume type name vol = self._vol_in_request_body(volume_type=CONF.default_volume_type) body.update(dict(volume=vol)) res_dict = self.controller.create(req, body=body) volume_id = res_dict['volume']['id'] self.assertEqual(1, len(res_dict)) # Use correct volume type id vol = self._vol_in_request_body(volume_type=db_vol_type['id']) body.update(dict(volume=vol)) res_dict = self.controller.create(req, body=body) volume_id = res_dict['volume']['id'] self.assertEqual(1, len(res_dict)) vol_db = v2_fakes.create_fake_volume( volume_id, volume_type={'name': db_vol_type['name']}) vol_obj = fake_volume.fake_volume_obj(context.get_admin_context(), **vol_db) self.mock_object(volume_api.API, 'get_all', return_value=objects.VolumeList(objects=[vol_obj])) # NOTE(geguileo): This is required because common get_by_id method in # cinder.db.sqlalchemy.api caches the real get method. db.sqlalchemy.api._GET_METHODS = {} self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes/detail') res_dict = self.controller.detail(req) self.assertTrue(mock_validate.called) @classmethod def _vol_in_request_body(cls, size=v2_fakes.DEFAULT_VOL_SIZE, name=v2_fakes.DEFAULT_VOL_NAME, description=v2_fakes.DEFAULT_VOL_DESCRIPTION, availability_zone=DEFAULT_AZ, snapshot_id=None, source_volid=None, consistencygroup_id=None, volume_type=None, image_ref=None, image_id=None): vol = {"size": size, "name": name, "description": description, "availability_zone": availability_zone, "snapshot_id": snapshot_id, "source_volid": source_volid, "consistencygroup_id": consistencygroup_id, "volume_type": volume_type, } if image_id is not None: vol['image_id'] = image_id elif image_ref is not None: vol['imageRef'] = image_ref return vol def _expected_vol_from_controller( self, size=v2_fakes.DEFAULT_VOL_SIZE, availability_zone=DEFAULT_AZ, description=v2_fakes.DEFAULT_VOL_DESCRIPTION, name=v2_fakes.DEFAULT_VOL_NAME, consistencygroup_id=None, source_volid=None, snapshot_id=None, metadata=None, attachments=None, volume_type=v2_fakes.DEFAULT_VOL_TYPE, status=v2_fakes.DEFAULT_VOL_STATUS, with_migration_status=False, multiattach=False): metadata = metadata or {} attachments = attachments or [] volume = {'volume': {'attachments': attachments, 'availability_zone': availability_zone, 'bootable': 'false', 'consistencygroup_id': consistencygroup_id, 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'updated_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'description': description, 'id': v2_fakes.DEFAULT_VOL_ID, 'links': [{'href': 'http://localhost/v3/volumes/%s' % ( fake.VOLUME_ID), 'rel': 'self'}, {'href': 'http://localhost/volumes/%s' % ( fake.VOLUME_ID), 'rel': 'bookmark'}], 'metadata': metadata, 'name': name, 'replication_status': 'disabled', 'multiattach': multiattach, 'size': size, 'snapshot_id': snapshot_id, 'source_volid': source_volid, 'status': status, 'user_id': fake.USER_ID, 'volume_type': volume_type, 'encrypted': False}} if with_migration_status: volume['volume']['migration_status'] = None return volume def _expected_volume_api_create_kwargs(self, snapshot=None, availability_zone=DEFAULT_AZ, source_volume=None): return {'metadata': None, 'snapshot': snapshot, 'source_volume': source_volume, 'group': None, 'consistencygroup': None, 'availability_zone': availability_zone, 'scheduler_hints': None, } @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', autospec=True) @mock.patch.object(volume_api.API, 'get_snapshot', autospec=True) @mock.patch.object(volume_api.API, 'create', autospec=True) def test_volume_creation_from_snapshot(self, create, get_snapshot, volume_type_get): create.side_effect = v2_fakes.fake_volume_api_create get_snapshot.side_effect = v2_fakes.fake_snapshot_get volume_type_get.side_effect = v2_fakes.fake_volume_type_get snapshot_id = fake.SNAPSHOT_ID vol = self._vol_in_request_body(snapshot_id=snapshot_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') res_dict = self.controller.create(req, body=body) ex = self._expected_vol_from_controller(snapshot_id=snapshot_id) self.assertEqual(ex, res_dict) context = req.environ['cinder.context'] get_snapshot.assert_called_once_with(self.controller.volume_api, context, snapshot_id) kwargs = self._expected_volume_api_create_kwargs( v2_fakes.fake_snapshot(snapshot_id)) create.assert_called_once_with( self.controller.volume_api, context, vol['size'], v2_fakes.DEFAULT_VOL_NAME, v2_fakes.DEFAULT_VOL_DESCRIPTION, **kwargs) @mock.patch.object(volume_api.API, 'get_snapshot', autospec=True) def test_volume_creation_fails_with_invalid_snapshot(self, get_snapshot): get_snapshot.side_effect = v2_fakes.fake_snapshot_get snapshot_id = fake.WILL_NOT_BE_FOUND_ID vol = self._vol_in_request_body(snapshot_id=snapshot_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') # Raise 404 when snapshot cannot be found. self.assertRaises(exception.SnapshotNotFound, self.controller.create, req, body=body) context = req.environ['cinder.context'] get_snapshot.assert_called_once_with(self.controller.volume_api, context, snapshot_id) @ddt.data({'s': 'ea895e29-8485-4930-bbb8-c5616a309c0e'}, ['ea895e29-8485-4930-bbb8-c5616a309c0e'], 42) def test_volume_creation_fails_with_invalid_snapshot_type(self, value): snapshot_id = value vol = self._vol_in_request_body(snapshot_id=snapshot_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') # Raise 400 when snapshot has not uuid type. self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', autospec=True) @mock.patch.object(volume_api.API, 'get_volume', autospec=True) @mock.patch.object(volume_api.API, 'create', autospec=True) def test_volume_creation_from_source_volume(self, create, get_volume, volume_type_get): get_volume.side_effect = v2_fakes.fake_volume_api_get create.side_effect = v2_fakes.fake_volume_api_create volume_type_get.side_effect = v2_fakes.fake_volume_type_get source_volid = '2f49aa3a-6aae-488d-8b99-a43271605af6' vol = self._vol_in_request_body(source_volid=source_volid) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') res_dict = self.controller.create(req, body=body) ex = self._expected_vol_from_controller(source_volid=source_volid) self.assertEqual(ex, res_dict) context = req.environ['cinder.context'] get_volume.assert_called_once_with(self.controller.volume_api, context, source_volid) db_vol = v2_fakes.create_fake_volume(source_volid) vol_obj = fake_volume.fake_volume_obj(context, **db_vol) kwargs = self._expected_volume_api_create_kwargs( source_volume=vol_obj) create.assert_called_once_with( self.controller.volume_api, context, vol['size'], v2_fakes.DEFAULT_VOL_NAME, v2_fakes.DEFAULT_VOL_DESCRIPTION, **kwargs) @mock.patch.object(volume_api.API, 'get_volume', autospec=True) def test_volume_creation_fails_with_invalid_source_volume(self, get_volume): get_volume.side_effect = v2_fakes.fake_volume_get_notfound source_volid = fake.VOLUME_ID vol = self._vol_in_request_body(source_volid=source_volid) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') # Raise 404 when source volume cannot be found. self.assertRaises(exception.VolumeNotFound, self.controller.create, req, body=body) context = req.environ['cinder.context'] get_volume.assert_called_once_with(self.controller.volume_api, context, source_volid) @ddt.data({'source_volid': 1}, {'source_volid': []}, {'consistencygroup_id': 1}, {'consistencygroup_id': []}) def test_volume_creation_fails_with_invalid_uuids(self, updated_uuids): vol = self._vol_in_request_body() vol.update(updated_uuids) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') # Raise 400 for resource requested with invalid uuids. self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @mock.patch.object(groupAPI.API, 'get', autospec=True) def test_volume_creation_fails_with_invalid_consistency_group(self, get_cg): get_cg.side_effect = v2_fakes.fake_consistencygroup_get_notfound consistencygroup_id = '4f49aa3a-6aae-488d-8b99-a43271605af6' vol = self._vol_in_request_body( consistencygroup_id=consistencygroup_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') # Raise 404 when consistency group is not found. self.assertRaises(exception.GroupNotFound, self.controller.create, req, body=body) context = req.environ['cinder.context'] get_cg.assert_called_once_with(self.controller.group_api, context, consistencygroup_id) def test_volume_creation_fails_with_bad_size(self): vol = self._vol_in_request_body(size="") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_volume_creation_fails_with_bad_availability_zone(self): vol = self._vol_in_request_body(availability_zone="zonen:hostn") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(exception.InvalidAvailabilityZone, self.controller.create, req, body=body) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create_with_image_ref(self, mock_validate): self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_api_create) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) vol = self._vol_in_request_body( availability_zone="nova", image_ref="c905cedb-7281-47e4-8a62-f26bc5fc4c77") ex = self._expected_vol_from_controller(availability_zone="nova") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') res_dict = self.controller.create(req, body=body) self.assertEqual(ex, res_dict) self.assertTrue(mock_validate.called) def test_volume_create_with_image_ref_is_integer(self): self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) vol = self._vol_in_request_body(availability_zone="cinder", image_ref=1234) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_volume_create_with_image_ref_not_uuid_format(self): self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) self.mock_object(fake_image._FakeImageService, "detail", v2_fakes.fake_image_service_detail) vol = self._vol_in_request_body(availability_zone="cinder", image_ref="12345") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) def test_volume_create_with_image_ref_with_empty_string(self): self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) self.mock_object(fake_image._FakeImageService, "detail", v2_fakes.fake_image_service_detail) vol = self._vol_in_request_body(availability_zone="cinder", image_ref="") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create_with_image_id(self, mock_validate): self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_api_create) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) vol = self._vol_in_request_body( availability_zone="nova", image_id="c905cedb-7281-47e4-8a62-f26bc5fc4c77") ex = self._expected_vol_from_controller(availability_zone="nova") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') res_dict = self.controller.create(req, body=body) self.assertEqual(ex, res_dict) self.assertTrue(mock_validate.called) def test_volume_create_with_image_id_is_integer(self): self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) vol = self._vol_in_request_body(availability_zone="cinder", image_id=1234) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_volume_create_with_image_id_not_uuid_format(self): self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) self.mock_object(fake_image._FakeImageService, "detail", v2_fakes.fake_image_service_detail) vol = self._vol_in_request_body(availability_zone="cinder", image_id="12345") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) def test_volume_create_with_image_id_with_empty_string(self): self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) self.mock_object(fake_image._FakeImageService, "detail", v2_fakes.fake_image_service_detail) vol = self._vol_in_request_body(availability_zone="cinder", image_id="") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create_with_image_name(self, mock_validate): self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_api_create) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) self.mock_object(fake_image._FakeImageService, "detail", v2_fakes.fake_image_service_detail) test_id = "Fedora-x86_64-20-20140618-sda" vol = self._vol_in_request_body(availability_zone="nova", image_ref=test_id) ex = self._expected_vol_from_controller(availability_zone="nova") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') res_dict = self.controller.create(req, body=body) self.assertEqual(ex, res_dict) def test_volume_create_with_image_name_has_multiple(self): self.mock_object(db, 'volume_get', v2_fakes.fake_volume_get_db) self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) self.mock_object(fake_image._FakeImageService, "detail", v2_fakes.fake_image_service_detail) test_id = "multi" vol = self._vol_in_request_body(availability_zone="nova", image_ref=test_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(webob.exc.HTTPConflict, self.controller.create, req, body=body) def test_volume_create_with_image_name_no_match(self): self.mock_object(db, 'volume_get', v2_fakes.fake_volume_get_db) self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_create) self.mock_object(fake_image._FakeImageService, "detail", v2_fakes.fake_image_service_detail) test_id = "MissingName" vol = self._vol_in_request_body(availability_zone="nova", image_ref=test_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) @ddt.data({'a' * 256: 'a'}, {'a': 'a' * 256}, {'': 'a'}, {'a': None}) def test_volume_create_with_invalid_metadata(self, value): vol = self._vol_in_request_body() vol['metadata'] = value body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @ddt.data({"name": "Updated Test Name", "description": "Updated Test Description"}, {"name": " test name ", "description": " test description "}) def test_volume_update(self, body): self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) updates = { "name": body['name'], "description": body['description'] } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertEqual(0, len(self.notifier.notifications)) name = updates["name"].strip() description = updates["description"].strip() expected = self._expected_vol_from_controller( availability_zone=v2_fakes.DEFAULT_AZ, name=name, description=description, metadata={'attached_mode': 'rw', 'readonly': 'False'}) res_dict = self.controller.update(req, fake.VOLUME_ID, body=body) self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update_deprecation(self, mock_validate): self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) updates = { "display_name": "Updated Test Name", "display_description": "Updated Test Description", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller.update(req, fake.VOLUME_ID, body=body) expected = self._expected_vol_from_controller( availability_zone=v2_fakes.DEFAULT_AZ, name="Updated Test Name", description="Updated Test Description", metadata={'attached_mode': 'rw', 'readonly': 'False'}) self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update_deprecation_key_priority(self, mock_validate): """Test current update keys have priority over deprecated keys.""" self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) updates = { "name": "New Name", "description": "New Description", "display_name": "Not Shown Name", "display_description": "Not Shown Description", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller.update(req, fake.VOLUME_ID, body=body) expected = self._expected_vol_from_controller( availability_zone=v2_fakes.DEFAULT_AZ, name="New Name", description="New Description", metadata={'attached_mode': 'rw', 'readonly': 'False'}) self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update_metadata(self, mock_validate): self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) updates = { "metadata": {"qos_max_iops": '2000'} } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertEqual(0, len(self.notifier.notifications)) res_dict = self.controller.update(req, fake.VOLUME_ID, body=body) expected = self._expected_vol_from_controller( availability_zone=v2_fakes.DEFAULT_AZ, metadata={'attached_mode': 'rw', 'readonly': 'False', 'qos_max_iops': '2000'}) self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_update_with_admin_metadata(self, mock_validate): self.mock_object(volume_api.API, "update", v2_fakes.fake_volume_update) volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) del volume['name'] del volume['volume_type'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), fake.VOLUME_ID, {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': fake.VOLUME_ID, } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], fake.INSTANCE_ID, None, '/') attach_tmp = db.volume_attachment_get(context.get_admin_context(), attachment['id']) volume_tmp = db.volume_get(context.get_admin_context(), fake.VOLUME_ID) updates = { "name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertEqual(0, len(self.notifier.notifications)) admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.update(req, fake.VOLUME_ID, body=body) expected = self._expected_vol_from_controller( availability_zone=v2_fakes.DEFAULT_AZ, status='in-use', name='Updated Test Name', attachments=[{'id': fake.VOLUME_ID, 'attachment_id': attachment['id'], 'volume_id': v2_fakes.DEFAULT_VOL_ID, 'server_id': fake.INSTANCE_ID, 'host_name': None, 'device': '/', 'attached_at': attach_tmp['attach_time'].replace( tzinfo=iso8601.UTC), }], volume_type=fake.VOLUME_TYPE_NAME, metadata={'key': 'value', 'readonly': 'True'}, with_migration_status=True) expected['volume']['updated_at'] = volume_tmp['updated_at'].replace( tzinfo=iso8601.UTC) self.assertEqual(expected, res_dict) self.assertEqual(2, len(self.notifier.notifications)) self.assertTrue(mock_validate.called) @ddt.data({'a' * 256: 'a'}, {'a': 'a' * 256}, {'': 'a'}, {'a': None}) @mock.patch.object(volume_api.API, 'get', side_effect=v2_fakes.fake_volume_api_get, autospec=True) def test_volume_update_with_invalid_metadata(self, value, get): updates = { "metadata": value } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertRaises(exception.ValidationError, self.controller.update, req, fake.VOLUME_ID, body=body) def test_update_empty_body(self): body = {} req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertRaises(exception.ValidationError, self.controller.update, req, fake.VOLUME_ID, body=body) def test_update_invalid_body(self): body = { 'name': 'missing top level volume key' } req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertRaises(exception.ValidationError, self.controller.update, req, fake.VOLUME_ID, body=body) @ddt.data({'name': 'a' * 256}, {'description': 'a' * 256}, {'display_name': 'a' * 256}, {'display_description': 'a' * 256}) def test_update_exceeds_length_name_description(self, vol): req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) body = {'volume': vol} self.assertRaises(exception.InvalidInput, self.controller.update, req, fake.VOLUME_ID, body=body) def test_update_not_found(self): self.mock_object(volume_api.API, "get", v2_fakes.fake_volume_get_notfound) updates = { "name": "Updated Test Name", } body = {"volume": updates} req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertRaises(exception.VolumeNotFound, self.controller.update, req, fake.VOLUME_ID, body=body) def test_volume_list_summary(self): self.mock_object(volume_api.API, 'get_all', v2_fakes.fake_volume_api_get_all_by_project) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) res_dict = self.controller.index(req) expected = { 'volumes': [ { 'name': v2_fakes.DEFAULT_VOL_NAME, 'id': fake.VOLUME_ID, 'links': [ { 'href': 'http://localhost/v3/%s/volumes/%s' % ( fake.PROJECT_ID, fake.VOLUME_ID), 'rel': 'self' }, { 'href': 'http://localhost/%s/volumes/%s' % ( fake.PROJECT_ID, fake.VOLUME_ID), 'rel': 'bookmark' } ], } ] } self.assertEqual(expected, res_dict) # Finally test that we cached the returned volumes self.assertEqual(1, len(req.cached_resource())) def test_volume_list_detail(self): self.mock_object(volume_api.API, 'get_all', v2_fakes.fake_volume_api_get_all_by_project) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes/detail') res_dict = self.controller.detail(req) exp_vol = self._expected_vol_from_controller( availability_zone=v2_fakes.DEFAULT_AZ, metadata={'attached_mode': 'rw', 'readonly': 'False'}) expected = {'volumes': [exp_vol['volume']]} self.assertEqual(expected, res_dict) # Finally test that we cached the returned volumes self.assertEqual(1, len(req.cached_resource())) def test_volume_list_detail_with_admin_metadata(self): volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) del volume['name'] del volume['volume_type'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), fake.VOLUME_ID, {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': fake.VOLUME_ID, } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], fake.INSTANCE_ID, None, '/') attach_tmp = db.volume_attachment_get(context.get_admin_context(), attachment['id']) volume_tmp = db.volume_get(context.get_admin_context(), fake.VOLUME_ID) req = fakes.HTTPRequest.blank('/v3/volumes/detail') admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.detail(req) exp_vol = self._expected_vol_from_controller( availability_zone=v2_fakes.DEFAULT_AZ, status="in-use", volume_type=fake.VOLUME_TYPE_NAME, attachments=[{'attachment_id': attachment['id'], 'device': '/', 'server_id': fake.INSTANCE_ID, 'host_name': None, 'id': fake.VOLUME_ID, 'volume_id': v2_fakes.DEFAULT_VOL_ID, 'attached_at': attach_tmp['attach_time'].replace( tzinfo=iso8601.UTC), }], metadata={'key': 'value', 'readonly': 'True'}, with_migration_status=True) exp_vol['volume']['updated_at'] = volume_tmp['updated_at'].replace( tzinfo=iso8601.UTC) expected = {'volumes': [exp_vol['volume']]} self.assertEqual(expected, res_dict) def test_volume_list_detail_host_name_admin_non_admin(self): fake_host = 'fake_host' volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) del volume['name'] del volume['volume_type'] db.volume_create(context.get_admin_context(), volume) values = {'volume_id': fake.VOLUME_ID, } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], fake.INSTANCE_ID, fake_host, '/') db.volume_attachment_get(context.get_admin_context(), attachment['id']) req = fakes.HTTPRequest.blank('/v3/volumes/detail') res_dict = self.controller.detail(req) # host_name will always be None for non-admins self.assertIsNone( res_dict['volumes'][0]['attachments'][0]['host_name']) admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.detail(req) # correct host_name is returned for admins self.assertEqual(fake_host, res_dict['volumes'][0]['attachments'][0]['host_name'] ) def test_volume_index_with_marker(self): def fake_volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): return [ v2_fakes.create_fake_volume(fake.VOLUME_ID, display_name='vol1'), v2_fakes.create_fake_volume(fake.VOLUME2_ID, display_name='vol2'), ] self.mock_object(db, 'volume_get_all_by_project', fake_volume_get_all_by_project) self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) req = fakes.HTTPRequest.blank('/v3/volumes?marker=1') res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(2, len(volumes)) self.assertEqual(fake.VOLUME_ID, volumes[0]['id']) self.assertEqual(fake.VOLUME2_ID, volumes[1]['id']) def test_volume_index_limit(self): self.mock_object(db, 'volume_get_all_by_project', v2_fakes.fake_volume_get_all_by_project) self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) req = fakes.HTTPRequest.blank('/v3/%s/volumes' '?limit=1&name=foo' '&sort=id1:asc' % fake.PROJECT_ID) res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) # Ensure that the next link is correctly formatted, it should # contain the same limit, filter, and sort information as the # original request as well as a marker; this ensures that the # caller can simply use the "next" link and that they do not # need to manually insert the limit and sort information. links = res_dict['volumes_links'] self.assertEqual('next', links[0]['rel']) href_parts = urllib.parse.urlparse(links[0]['href']) self.assertEqual('/v3/%s/volumes' % fake.PROJECT_ID, href_parts.path) params = urllib.parse.parse_qs(href_parts.query) self.assertEqual(str(volumes[0]['id']), params['marker'][0]) self.assertEqual('1', params['limit'][0]) self.assertEqual('foo', params['name'][0]) self.assertEqual('id1:asc', params['sort'][0]) def test_volume_index_limit_negative(self): req = fakes.HTTPRequest.blank('/v3/volumes?limit=-1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_volume_index_limit_non_int(self): req = fakes.HTTPRequest.blank('/v3/volumes?limit=a') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_volume_index_limit_marker(self): self.mock_object(db, 'volume_get_all_by_project', v2_fakes.fake_volume_get_all_by_project) self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) req = fakes.HTTPRequest.blank('/v3/volumes?marker=1&limit=1') res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(fake.VOLUME_ID, volumes[0]['id']) def _create_db_volumes(self, num_volumes): volumes = [utils.create_volume(self.ctxt, display_name='vol%s' % i) for i in range(num_volumes)] for vol in volumes: self.addCleanup(db.volume_destroy, self.ctxt, vol.id) volumes.reverse() return volumes def test_volume_index_limit_offset(self): created_volumes = self._create_db_volumes(2) req = fakes.HTTPRequest.blank('/v3/volumes?limit=2&offset=1') res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(created_volumes[1].id, volumes[0]['id']) req = fakes.HTTPRequest.blank('/v3/volumes?limit=-1&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) req = fakes.HTTPRequest.blank('/v3/volumes?limit=a&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) # Test that we get an exception HTTPBadRequest(400) with an offset # greater than the maximum offset value. url = '/v3/volumes?limit=2&offset=43543564546567575' req = fakes.HTTPRequest.blank(url) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_volume_detail_with_marker(self): def fake_volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): return [ v2_fakes.create_fake_volume(fake.VOLUME_ID, display_name='vol1'), v2_fakes.create_fake_volume(fake.VOLUME2_ID, display_name='vol2'), ] self.mock_object(db, 'volume_get_all_by_project', fake_volume_get_all_by_project) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes/detail?marker=1') res_dict = self.controller.detail(req) volumes = res_dict['volumes'] self.assertEqual(2, len(volumes)) self.assertEqual(fake.VOLUME_ID, volumes[0]['id']) self.assertEqual(fake.VOLUME2_ID, volumes[1]['id']) def test_volume_detail_limit(self): self.mock_object(db, 'volume_get_all_by_project', v2_fakes.fake_volume_get_all_by_project) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/%s/volumes/detail?limit=1' % fake.PROJECT_ID) res_dict = self.controller.detail(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) # Ensure that the next link is correctly formatted links = res_dict['volumes_links'] self.assertEqual('next', links[0]['rel']) href_parts = urllib.parse.urlparse(links[0]['href']) self.assertEqual('/v3/%s/volumes/detail' % fake.PROJECT_ID, href_parts.path) params = urllib.parse.parse_qs(href_parts.query) self.assertIn('marker', params) self.assertEqual('1', params['limit'][0]) def test_volume_detail_limit_negative(self): req = fakes.HTTPRequest.blank('/v3/volumes/detail?limit=-1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) def test_volume_detail_limit_non_int(self): req = fakes.HTTPRequest.blank('/v3/volumes/detail?limit=a') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) def test_volume_detail_limit_marker(self): self.mock_object(db, 'volume_get_all_by_project', v2_fakes.fake_volume_get_all_by_project) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes/detail?marker=1&limit=1') res_dict = self.controller.detail(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(fake.VOLUME_ID, volumes[0]['id']) def test_volume_detail_limit_offset(self): created_volumes = self._create_db_volumes(2) req = fakes.HTTPRequest.blank('/v3/volumes/detail?limit=2&offset=1') res_dict = self.controller.detail(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(created_volumes[1].id, volumes[0]['id']) req = fakes.HTTPRequest.blank('/v3/volumes/detail?limit=2&offset=1', use_admin_context=True) res_dict = self.controller.detail(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(created_volumes[1].id, volumes[0]['id']) req = fakes.HTTPRequest.blank('/v3/volumes/detail?limit=-1&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) req = fakes.HTTPRequest.blank('/v3/volumes/detail?limit=a&offset=1') self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) url = '/v3/volumes/detail?limit=2&offset=4536546546546467' req = fakes.HTTPRequest.blank(url) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) def test_volume_with_limit_zero(self): def fake_volume_get_all(context, marker, limit, **kwargs): return [] self.mock_object(db, 'volume_get_all', fake_volume_get_all) req = fakes.HTTPRequest.blank('/v3/volumes?limit=0') res_dict = self.controller.index(req) expected = {'volumes': []} self.assertEqual(expected, res_dict) def _validate_next_link(self, detailed, item_count, osapi_max_limit, limit, should_link_exist): keys_fns = (('volumes', self.controller.index), ('volumes/detail', self.controller.detail)) key, fn = keys_fns[detailed] req_string = '/v3/%s?all_tenants=1' % key if limit: req_string += '&limit=%s' % limit req = fakes.HTTPRequest.blank(req_string, use_admin_context=True) link_return = [{"rel": "next", "href": "fake_link"}] self.flags(osapi_max_limit=osapi_max_limit) def get_pagination_params(params, max_limit=CONF.osapi_max_limit, original_call=common.get_pagination_params): return original_call(params, max_limit) def _get_limit_param(params, max_limit=CONF.osapi_max_limit, original_call=common._get_limit_param): return original_call(params, max_limit) with mock.patch.object(common, 'get_pagination_params', get_pagination_params), \ mock.patch.object(common, '_get_limit_param', _get_limit_param), \ mock.patch.object(common.ViewBuilder, '_generate_next_link', return_value=link_return): res_dict = fn(req) self.assertEqual(item_count, len(res_dict['volumes'])) self.assertEqual(should_link_exist, 'volumes_links' in res_dict) def test_volume_default_limit(self): self._create_db_volumes(3) # Verify both the index and detail queries for detailed in (True, False): # Number of volumes less than max, do not include self._validate_next_link(detailed, item_count=3, osapi_max_limit=4, limit=None, should_link_exist=False) # Number of volumes equals the max, next link will be included self._validate_next_link(detailed, item_count=3, osapi_max_limit=3, limit=None, should_link_exist=True) # Number of volumes more than the max, include next link self._validate_next_link(detailed, item_count=2, osapi_max_limit=2, limit=None, should_link_exist=True) # Limit lower than max but doesn't limit, no next link self._validate_next_link(detailed, item_count=3, osapi_max_limit=5, limit=4, should_link_exist=False) # Limit lower than max and limits, we have next link self._validate_next_link(detailed, item_count=2, osapi_max_limit=4, limit=2, should_link_exist=True) # Limit higher than max and max limits, we have next link self._validate_next_link(detailed, item_count=2, osapi_max_limit=2, limit=4, should_link_exist=True) # Limit higher than max but none of them limiting, no next link self._validate_next_link(detailed, item_count=3, osapi_max_limit=4, limit=5, should_link_exist=False) def test_volume_list_default_filters(self): """Tests that the default filters from volume.api.API.get_all are set. 1. 'no_migration_status'=True for non-admins and get_all_by_project is invoked. 2. 'no_migration_status' is not included for admins. 3. When 'all_tenants' is not specified, then it is removed and get_all_by_project is invoked for admins. 3. When 'all_tenants' is specified, then it is removed and get_all is invoked for admins. """ # Non-admin, project function should be called with no_migration_status def fake_volume_get_all_by_project(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): self.assertTrue(filters['no_migration_targets']) self.assertNotIn('all_tenants', filters) return [v2_fakes.create_fake_volume(fake.VOLUME_ID, display_name='vol1')] def fake_volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): return [] self.mock_object(db, 'volume_get_all_by_project', fake_volume_get_all_by_project) self.mock_object(db, 'volume_get_all', fake_volume_get_all) # all_tenants does not matter for non-admin for params in ['', '?all_tenants=1']: req = fakes.HTTPRequest.blank('/v3/volumes%s' % params) resp = self.controller.index(req) self.assertEqual(1, len(resp['volumes'])) self.assertEqual('vol1', resp['volumes'][0]['name']) # Admin, all_tenants is not set, project function should be called # without no_migration_status def fake_volume_get_all_by_project2(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): self.assertNotIn('no_migration_targets', filters) return [v2_fakes.create_fake_volume(fake.VOLUME_ID, display_name='vol2')] def fake_volume_get_all2(context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): return [] self.mock_object(db, 'volume_get_all_by_project', fake_volume_get_all_by_project2) self.mock_object(db, 'volume_get_all', fake_volume_get_all2) req = fakes.HTTPRequest.blank('/v3/volumes', use_admin_context=True) resp = self.controller.index(req) self.assertEqual(1, len(resp['volumes'])) self.assertEqual('vol2', resp['volumes'][0]['name']) # Admin, all_tenants is set, get_all function should be called # without no_migration_status def fake_volume_get_all_by_project3(context, project_id, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): return [] def fake_volume_get_all3(context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, viewable_admin_meta=False, offset=0): self.assertNotIn('no_migration_targets', filters) self.assertNotIn('all_tenants', filters) return [v2_fakes.create_fake_volume(fake.VOLUME3_ID, display_name='vol3')] self.mock_object(db, 'volume_get_all_by_project', fake_volume_get_all_by_project3) self.mock_object(db, 'volume_get_all', fake_volume_get_all3) req = fakes.HTTPRequest.blank('/v3/volumes?all_tenants=1', use_admin_context=True) resp = self.controller.index(req) self.assertEqual(1, len(resp['volumes'])) self.assertEqual('vol3', resp['volumes'][0]['name']) def test_volume_show(self): self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) res_dict = self.controller.show(req, fake.VOLUME_ID) expected = self._expected_vol_from_controller( availability_zone=v2_fakes.DEFAULT_AZ, metadata={'attached_mode': 'rw', 'readonly': 'False'}) self.assertEqual(expected, res_dict) # Finally test that we cached the returned volume self.assertIsNotNone(req.cached_resource_by_id(fake.VOLUME_ID)) def test_volume_show_no_attachments(self): def fake_volume_get(self, context, volume_id, **kwargs): vol = v2_fakes.create_fake_volume( volume_id, attach_status= fields.VolumeAttachStatus.DETACHED) return fake_volume.fake_volume_obj(context, **vol) def fake_volume_admin_metadata_get(context, volume_id, **kwargs): return v2_fakes.fake_volume_admin_metadata_get( context, volume_id, attach_status= fields.VolumeAttachStatus.DETACHED) self.mock_object(volume_api.API, 'get', fake_volume_get) self.mock_object(db, 'volume_admin_metadata_get', fake_volume_admin_metadata_get) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) res_dict = self.controller.show(req, fake.VOLUME_ID) expected = self._expected_vol_from_controller( availability_zone=v2_fakes.DEFAULT_AZ, metadata={'readonly': 'False'}) self.assertEqual(expected, res_dict) def test_volume_show_no_volume(self): self.mock_object(volume_api.API, "get", v2_fakes.fake_volume_get_notfound) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertRaises(exception.VolumeNotFound, self.controller.show, req, 1) # Finally test that nothing was cached self.assertIsNone(req.cached_resource_by_id(fake.VOLUME_ID)) def test_volume_show_with_admin_metadata(self): volume = v2_fakes.create_fake_volume(fake.VOLUME_ID) del volume['name'] del volume['volume_type'] volume['metadata'] = {'key': 'value'} db.volume_create(context.get_admin_context(), volume) db.volume_admin_metadata_update(context.get_admin_context(), fake.VOLUME_ID, {"readonly": "True", "invisible_key": "invisible_value"}, False) values = {'volume_id': fake.VOLUME_ID, } attachment = db.volume_attach(context.get_admin_context(), values) db.volume_attached(context.get_admin_context(), attachment['id'], fake.INSTANCE_ID, None, '/') attach_tmp = db.volume_attachment_get(context.get_admin_context(), attachment['id']) volume_tmp = db.volume_get(context.get_admin_context(), fake.VOLUME_ID) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.show(req, fake.VOLUME_ID) expected = self._expected_vol_from_controller( availability_zone=v2_fakes.DEFAULT_AZ, volume_type=fake.VOLUME_TYPE_NAME, status='in-use', attachments=[{'id': fake.VOLUME_ID, 'attachment_id': attachment['id'], 'volume_id': v2_fakes.DEFAULT_VOL_ID, 'server_id': fake.INSTANCE_ID, 'host_name': None, 'device': '/', 'attached_at': attach_tmp['attach_time'].replace( tzinfo=iso8601.UTC), }], metadata={'key': 'value', 'readonly': 'True'}, with_migration_status=True) expected['volume']['updated_at'] = volume_tmp['updated_at'].replace( tzinfo=iso8601.UTC) self.assertEqual(expected, res_dict) def test_volume_show_with_encrypted_volume(self): def fake_volume_get(self, context, volume_id, **kwargs): vol = v2_fakes.create_fake_volume(volume_id, encryption_key_id=fake.KEY_ID) return fake_volume.fake_volume_obj(context, **vol) self.mock_object(volume_api.API, 'get', fake_volume_get) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) res_dict = self.controller.show(req, fake.VOLUME_ID) self.assertTrue(res_dict['volume']['encrypted']) def test_volume_show_with_unencrypted_volume(self): self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) res_dict = self.controller.show(req, fake.VOLUME_ID) self.assertEqual(False, res_dict['volume']['encrypted']) def test_volume_show_with_error_managing_deleting(self): def fake_volume_get(self, context, volume_id, **kwargs): vol = v2_fakes.create_fake_volume(volume_id, status='error_managing_deleting') return fake_volume.fake_volume_obj(context, **vol) self.mock_object(volume_api.API, 'get', fake_volume_get) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) res_dict = self.controller.show(req, fake.VOLUME_ID) self.assertEqual('deleting', res_dict['volume']['status']) @mock.patch.object(volume_api.API, 'delete', v2_fakes.fake_volume_delete) @mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get) def test_volume_delete(self): req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) resp = self.controller.delete(req, fake.VOLUME_ID) self.assertEqual(HTTPStatus.ACCEPTED, resp.status_int) def test_volume_delete_attached(self): def fake_volume_attached(self, context, volume, force=False, cascade=False): raise exception.VolumeAttached(volume_id=volume['id']) self.mock_object(volume_api.API, "delete", fake_volume_attached) self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) exp = self.assertRaises(exception.VolumeAttached, self.controller.delete, req, 1) expect_msg = "Volume 1 is still attached, detach volume first." self.assertEqual(expect_msg, str(exp)) def test_volume_delete_no_volume(self): self.mock_object(volume_api.API, "get", v2_fakes.fake_volume_get_notfound) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) self.assertRaises(exception.VolumeNotFound, self.controller.delete, req, 1) def test_admin_list_volumes_limited_to_project(self): self.mock_object(db, 'volume_get_all_by_project', v2_fakes.fake_volume_get_all_by_project) req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID, use_admin_context=True) res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(1, len(res['volumes'])) @mock.patch.object(db, 'volume_get_all', v2_fakes.fake_volume_get_all) @mock.patch.object(db, 'volume_get_all_by_project', v2_fakes.fake_volume_get_all_by_project) def test_admin_list_volumes_all_tenants(self): req = fakes.HTTPRequest.blank( '/v3/%s/volumes?all_tenants=1' % fake.PROJECT_ID, use_admin_context=True) res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(3, len(res['volumes'])) @mock.patch.object(db, 'volume_get_all', v2_fakes.fake_volume_get_all) @mock.patch.object(db, 'volume_get_all_by_project', v2_fakes.fake_volume_get_all_by_project) @mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get) def test_all_tenants_non_admin_gets_all_tenants(self): req = fakes.HTTPRequest.blank( '/v3/%s/volumes?all_tenants=1' % fake.PROJECT_ID) res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(1, len(res['volumes'])) @mock.patch.object(db, 'volume_get_all_by_project', v2_fakes.fake_volume_get_all_by_project) @mock.patch.object(volume_api.API, 'get', v2_fakes.fake_volume_get) def test_non_admin_get_by_project(self): req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) res = self.controller.index(req) self.assertIn('volumes', res) self.assertEqual(1, len(res['volumes'])) def _create_volume_bad_request(self, body): req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'POST' self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_create_no_body(self): self._create_volume_bad_request(body=None) def test_create_missing_volume(self): body = {'foo': {'a': 'b'}} self._create_volume_bad_request(body=body) def test_create_malformed_entity(self): body = {'volume': 'string'} self._create_volume_bad_request(body=body) def _test_get_volumes_by_name(self, get_all, display_name): req = mock.MagicMock() context = mock.Mock() req.environ = {'cinder.context': context} req.params = {'display_name': display_name} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( context, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'display_name': display_name}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_string(self, get_all): """Test to get a volume with an alpha-numeric display name.""" self._test_get_volumes_by_name(get_all, 'Volume-573108026') @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_double_quoted_string(self, get_all): """Test to get a volume with a double-quoted display name.""" self._test_get_volumes_by_name(get_all, '"Volume-573108026"') @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_single_quoted_string(self, get_all): """Test to get a volume with a single-quoted display name.""" self._test_get_volumes_by_name(get_all, "'Volume-573108026'") @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_quote_in_between_string(self, get_all): """Test to get a volume with a quote in between the display name.""" self._test_get_volumes_by_name(get_all, 'Volu"me-573108026') @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_mixed_quoted_string(self, get_all): """Test to get a volume with a mix of single and double quotes. """ # The display name starts with a single quote and ends with a # double quote self._test_get_volumes_by_name(get_all, '\'Volume-573108026"') @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_true(self, get_all): req = mock.MagicMock() context = mock.Mock() req.environ = {'cinder.context': context} req.params = {'display_name': 'Volume-573108026', 'bootable': 1} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( context, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'display_name': 'Volume-573108026', 'bootable': True}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_false(self, get_all): req = mock.MagicMock() context = mock.Mock() req.environ = {'cinder.context': context} req.params = {'display_name': 'Volume-573108026', 'bootable': 0} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( context, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'display_name': 'Volume-573108026', 'bootable': False}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_list(self, get_all): req = mock.MagicMock() context = mock.Mock() req.environ = {'cinder.context': context} req.params = {'id': "['%s', '%s', '%s']" % ( fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID)} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( context, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'id': [fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID]}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_expression(self, get_all): req = mock.MagicMock() context = mock.Mock() req.environ = {'cinder.context': context} req.params = {'name': "d-"} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( context, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'display_name': 'd-'}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_status(self, get_all): req = mock.MagicMock() ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'status': 'available'} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'status': 'available'}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_metadata(self, get_all): req = mock.MagicMock() ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'metadata': "{'fake_key': 'fake_value'}"} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'metadata': {'fake_key': 'fake_value'}}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_availability_zone(self, get_all): req = mock.MagicMock() ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'availability_zone': 'nova'} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'availability_zone': 'nova'}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_bootable(self, get_all): req = mock.MagicMock() ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'bootable': 1} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'bootable': True}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_filter_with_invalid_filter(self, get_all): req = mock.MagicMock() ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'invalid_filter': 'invalid', 'availability_zone': 'nova'} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_keys=['created_at'], sort_dirs=['desc'], filters={'availability_zone': 'nova'}, viewable_admin_meta=True, offset=0) @mock.patch('cinder.volume.api.API.get_all') def test_get_volumes_sort_by_name(self, get_all): """Name in client means display_name in database.""" req = mock.MagicMock() ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) req.environ = {'cinder.context': ctxt} req.params = {'sort': 'name'} self.controller._view_builder.detail_list = mock.Mock() self.controller._get_volumes(req, True) get_all.assert_called_once_with( ctxt, None, CONF.osapi_max_limit, sort_dirs=['desc'], viewable_admin_meta=True, sort_keys=['display_name'], filters={}, offset=0) def test_get_volume_filter_options_using_config(self): filter_list = ["name", "status", "metadata", "bootable", "migration_status", "availability_zone", "group_id"] # Clear the filters collection to make sure the filters collection # cache can be reloaded using tmp filter file. common._FILTERS_COLLECTION = None tmp_filter_file = self.tmp_path + '/resource_filters_tests.json' self.override_config('resource_query_filters_file', tmp_filter_file) with open(tmp_filter_file, 'w') as f: f.write(json.dumps({"volume": filter_list})) self.assertEqual(filter_list, self.controller._get_volume_filter_options()) # Reset the CONF.resource_query_filters_file and clear the filters # collection to avoid leaking other cases, and it will be re-loaded # from CONF.resource_query_filters_file in next call. self._reset_filter_file() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1871192 cinder-27.0.0/cinder/tests/unit/api/v3/0000775000175000017500000000000000000000000017555 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/__init__.py0000664000175000017500000000000000000000000021654 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/fakes.py0000664000175000017500000001267200000000000021230 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder import utils FAKE_UUID = fake.OBJECT_ID DEFAULT_VOL_NAME = "displayname" DEFAULT_VOL_DESCRIPTION = "displaydesc" DEFAULT_VOL_SIZE = 1 DEFAULT_VOL_TYPE = "vol_type_name" DEFAULT_VOL_STATUS = "fakestatus" DEFAULT_VOL_ID = fake.VOLUME_ID DEFAULT_AZ = "fakeaz" def fake_message(id, **kwargs): message = { 'id': id, 'action_id': "002", 'detail_id': "001", 'event_id': "VOLUME_VOLUME_002_001", 'message_level': "ERROR", 'request_id': FAKE_UUID, 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'expires_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), } message.update(kwargs) return message def fake_message_get(self, context, message_id): return fake_message(message_id) def create_volume(id, **kwargs): volume = { 'id': id, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'fakehost', 'size': DEFAULT_VOL_SIZE, 'availability_zone': DEFAULT_AZ, 'status': DEFAULT_VOL_STATUS, 'migration_status': None, 'attach_status': 'attached', 'name': 'vol name', 'display_name': DEFAULT_VOL_NAME, 'display_description': DEFAULT_VOL_DESCRIPTION, 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'snapshot_id': None, 'source_volid': None, 'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66', 'encryption_key_id': None, 'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'}, {'key': 'readonly', 'value': 'False'}], 'bootable': False, 'launched_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'volume_type': fake_volume.fake_db_volume_type(name=DEFAULT_VOL_TYPE), 'replication_status': 'disabled', 'replication_extended_status': None, 'replication_driver_data': None, 'volume_attachment': [], 'multiattach': False, 'group_id': fake.GROUP_ID, } volume.update(kwargs) if kwargs.get('volume_glance_metadata', None): volume['bootable'] = True if kwargs.get('attach_status') == 'detached': del volume['volume_admin_metadata'][0] return volume def fake_volume_create(self, context, size, name, description, snapshot=None, group_id=None, **param): vol = create_volume(DEFAULT_VOL_ID) vol['size'] = size vol['display_name'] = name vol['display_description'] = description source_volume = param.get('source_volume') or {} vol['source_volid'] = source_volume.get('id') vol['bootable'] = False vol['volume_attachment'] = [] vol['multiattach'] = utils.get_bool_param('multiattach', param) try: vol['snapshot_id'] = snapshot['id'] except (KeyError, TypeError): vol['snapshot_id'] = None vol['availability_zone'] = param.get('availability_zone', 'fakeaz') if group_id: vol['group_id'] = group_id return vol def fake_volume_type_get(context, id, *args, **kwargs): return {'id': id, 'name': 'vol_type_name', 'description': 'A fake volume type', 'is_public': True, 'projects': [], 'extra_specs': {}, 'created_at': None, 'deleted_at': None, 'updated_at': None, 'qos_specs_id': fake.QOS_SPEC_ID, 'deleted': False} def fake_default_type_get(id=fake.VOLUME_TYPE_ID): return {'id': id, 'name': 'vol_type_name', 'description': 'A fake volume type', 'is_public': True, 'projects': [], 'extra_specs': {}, 'created_at': None, 'deleted_at': None, 'updated_at': None, 'qos_specs_id': fake.QOS_SPEC_ID, 'deleted': False} def fake_snapshot(id, **kwargs): snapshot = {'id': id, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 100, 'created_at': None, 'display_name': 'Default name', 'display_description': 'Default description', 'project_id': fake.PROJECT_ID, 'snapshot_metadata': []} snapshot.update(kwargs) return snapshot ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/stubs.py0000664000175000017500000000262600000000000021275 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 from cinder.tests.unit import fake_constants as fake FAKE_UUID = fake.OBJECT_ID def stub_message(id, **kwargs): message = { 'id': id, 'action_id': "002", 'detail_id': "001", 'event_id': "VOLUME_VOLUME_002_001", 'message_level': "ERROR", 'request_id': FAKE_UUID, 'updated_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'expires_at': datetime.datetime(1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), } message.update(kwargs) return message def stub_message_get(self, context, message_id): return stub_message(message_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_attachments.py0000664000175000017500000004760100000000000023511 0ustar00zuulzuul00000000000000# Copyright (C) 2017 HuaWei Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for attachments Api.""" from unittest import mock import ddt import webob from cinder.api import microversions as mv from cinder.api.v3 import attachments as v3_attachments from cinder import context from cinder import exception from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume import api as volume_api from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import volume_utils @ddt.ddt class AttachmentsAPITestCase(test.TestCase): """Test Case for attachment API.""" def setUp(self): super(AttachmentsAPITestCase, self).setUp() self.controller = v3_attachments.AttachmentsController() self.volume_api = volume_api.API() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) self.volume1 = self._create_volume(display_name='fake_volume_1', project_id=fake.PROJECT_ID) self.volume2 = self._create_volume(display_name='fake_volume_2', project_id=fake.PROJECT2_ID) self.attachment1 = self._create_attachment( volume_uuid=self.volume1.id, instance_uuid=fake.UUID1, host='host-a') self.attachment2 = self._create_attachment( volume_uuid=self.volume1.id, instance_uuid=fake.UUID1, host='host-b') self.attachment3 = self._create_attachment( volume_uuid=self.volume1.id, instance_uuid=fake.UUID2, host='host-c') self.attachment4 = self._create_attachment( volume_uuid=self.volume2.id, instance_uuid=fake.UUID2, host='host-d') self.addCleanup(self._cleanup) def _cleanup(self): self.attachment1.destroy() self.attachment2.destroy() self.attachment3.destroy() self.attachment4.destroy() self.volume1.destroy() self.volume2.destroy() def _create_volume(self, ctxt=None, display_name=None, project_id=None): """Create a volume object.""" ctxt = ctxt or self.ctxt volume = objects.Volume(ctxt) volume.display_name = display_name volume.project_id = project_id volume.status = 'available' volume.attach_status = 'attached' volume.create() return volume def test_create_attachment(self): req = fakes.HTTPRequest.blank('/v3/%s/attachments' % fake.PROJECT_ID, version=mv.NEW_ATTACH) body = { "attachment": { "connector": None, "instance_uuid": fake.UUID1, "volume_uuid": self.volume1.id }, } attachment = self.controller.create(req, body=body) self.assertEqual(self.volume1.id, attachment['attachment']['volume_id']) self.assertEqual(fake.UUID1, attachment['attachment']['instance']) @mock.patch.object(volume_rpcapi.VolumeAPI, 'attachment_update') def test_update_attachment(self, mock_update): fake_connector = {'fake_key': 'fake_value', 'host': 'somehost'} mock_update.return_value = fake_connector req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % (fake.PROJECT_ID, self.attachment1.id), version=mv.NEW_ATTACH, use_admin_context=True) body = { "attachment": { "connector": {'fake_key': 'fake_value', 'host': 'somehost', 'connection_info': 'a'}, }, } attachment = self.controller.update(req, self.attachment1.id, body=body) self.assertEqual(fake_connector, attachment['attachment']['connection_info']) self.assertEqual(fake.UUID1, attachment['attachment']['instance']) def test_update_attachment_with_empty_connector_object(self): req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % (fake.PROJECT_ID, self.attachment1.id), version=mv.NEW_ATTACH, use_admin_context=True) body = { "attachment": { "connector": {}, }, } self.assertRaises(exception.ValidationError, self.controller.update, req, self.attachment1.id, body=body) @mock.patch.object(volume_api.API, 'attachment_update') def test_update_attachment_not_authorized(self, mock_update): exc = exception.NotAuthorized(reason='Operation is not authorized.') mock_update.side_effect = exc req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % (fake.PROJECT_ID, self.attachment1.id), version=mv.NEW_ATTACH, use_admin_context=True) body = { "attachment": { "connector": {'fake_key': 'fake_value', 'host': 'somehost', 'connection_info': 'a'}, }, } self.assertRaises(exception.NotAuthorized, self.controller.update, req, self.attachment1.id, body=body) @mock.patch('cinder.volume.api.API.attachment_update') def test_update_attachment_invalid_volume_conflict(self, mock_update): exc = exception.ResourceConflict( reason='Duplicate connectors or improper volume status') mock_update.side_effect = exc req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % (fake.PROJECT_ID, self.attachment1.id), version=mv.NEW_ATTACH, use_admin_context=True) body = { "attachment": { "connector": {'fake_key': 'fake_value', 'host': 'somehost', 'connection_info': 'a'}, }, } self.assertRaises(exception.ResourceConflict, self.controller.update, req, self.attachment1.id, body=body) @mock.patch.object(volume_api.API, 'attachment_update') def test_update_attachment_generic_exception_invalid(self, mock_update): exc = exception.Invalid(reason='Invalid class generic Exception') mock_update.side_effect = exc req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % (fake.PROJECT_ID, self.attachment1.id), version=mv.NEW_ATTACH, use_admin_context=True) body = { "attachment": { "connector": {'fake_key': 'fake_value', 'host': 'somehost', 'connection_info': 'a'}, }, } self.assertRaises(exception.Invalid, self.controller.update, req, self.attachment1.id, body=body) @mock.patch.object(volume_api.API, 'attachment_update') def test_update_attachment_cinder_exception(self, mock_update): exc = exception.CinderException(reason='Generic Cinder Exception') mock_update.side_effect = exc req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % (fake.PROJECT_ID, self.attachment1.id), version=mv.NEW_ATTACH, use_admin_context=True) body = { "attachment": { "connector": {'fake_key': 'fake_value', 'host': 'somehost', 'connection_info': 'a'}, }, } self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.update, req, self.attachment1.id, body=body) @mock.patch.object(volume_api.API, 'attachment_update') def test_update_attachment_all_other_exceptions(self, mock_update): exc = Exception('The most generic Exception') mock_update.side_effect = exc req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % (fake.PROJECT_ID, self.attachment1.id), version=mv.NEW_ATTACH, use_admin_context=True) body = { "attachment": { "connector": {'fake_key': 'fake_value', 'host': 'somehost', 'connection_info': 'a'}, }, } self.assertRaises(webob.exc.HTTPInternalServerError, self.controller.update, req, self.attachment1.id, body=body) @ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER), mv.RESOURCE_FILTER, mv.LIKE_FILTER) @mock.patch('cinder.api.common.reject_invalid_filters') def test_attachment_list_with_general_filter(self, version, mock_update): url = '/v3/%s/attachments' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=version, use_admin_context=False) self.controller.index(req) if version != mv.get_prior_version(mv.RESOURCE_FILTER): support_like = True if version == mv.LIKE_FILTER else False mock_update.assert_called_once_with(req.environ['cinder.context'], mock.ANY, 'attachment', support_like) @ddt.data('reserved', 'attached') @mock.patch.object(volume_rpcapi.VolumeAPI, 'attachment_delete') def test_delete_attachment(self, status, mock_delete): self.patch('cinder.volume.api.API.attachment_deletion_allowed', return_value=None) volume1 = self._create_volume(display_name='fake_volume_1', project_id=fake.PROJECT_ID) attachment = self._create_attachment( volume_uuid=volume1.id, instance_uuid=fake.UUID1, attach_status=status) req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' % (fake.PROJECT_ID, attachment.id), version=mv.NEW_ATTACH, use_admin_context=True) self.controller.delete(req, attachment.id) volume2 = objects.Volume.get_by_id(self.ctxt, volume1.id) # Volume and attachment status is changed on the API service self.assertEqual('detached', volume2.attach_status) self.assertEqual('available', volume2.status) self.assertRaises( exception.VolumeAttachmentNotFound, objects.VolumeAttachment.get_by_id, self.ctxt, attachment.id) if status != 'reserved': mock_delete.assert_called_once_with(req.environ['cinder.context'], attachment.id, mock.ANY) def _create_attachment(self, ctxt=None, volume_uuid=None, instance_uuid=None, mountpoint=None, attach_time=None, detach_time=None, attach_status=None, attach_mode=None, host=''): """Create an attachment object.""" ctxt = ctxt or self.ctxt attachment = objects.VolumeAttachment(ctxt) attachment.volume_id = volume_uuid attachment.instance_uuid = instance_uuid attachment.mountpoint = mountpoint attachment.attach_time = attach_time attachment.detach_time = detach_time attachment.attach_status = attach_status or 'reserved' attachment.attach_mode = attach_mode attachment.connector = {'host': host} attachment.create() return attachment @ddt.data("instance_uuid", "volume_uuid") def test_create_attachment_without_resource_uuid(self, resource_uuid): req = fakes.HTTPRequest.blank('/v3/%s/attachments' % fake.PROJECT_ID, version=mv.NEW_ATTACH) body = { "attachment": { "connector": None } } body["attachment"][resource_uuid] = "test_id" self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @ddt.data( {"attachment": { "connector": None, "instance_uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", "volume_uuid": "invalid-uuid"}}, {"attachment": { "instance_uuid": "invalid-uuid", "volume_uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"}}) def test_create_attachment_with_invalid_resource_uuid(self, fake_body): req = fakes.HTTPRequest.blank('/v3/%s/attachments' % fake.PROJECT_ID, version=mv.NEW_ATTACH) self.assertRaises(exception.ValidationError, self.controller.create, req, body=fake_body) @mock.patch('cinder.volume.api.API._attachment_reserve') def test_create_attachment_in_use_volume_multiattach_false(self, mock_reserve): """Negative test for creating an attachment on an in-use volume.""" req = fakes.HTTPRequest.blank('/v3/%s/attachments' % fake.PROJECT_ID, version=mv.NEW_ATTACH) body = { "attachment": { "connector": None, "instance_uuid": fake.UUID1, "volume_uuid": self.volume1.id }, } mock_reserve.side_effect = ( exception.InvalidVolume( reason="Volume %s status must be available or " "downloading" % self.volume1.id)) # Note that if we were using the full WSGi stack, the # ResourceExceptionHandler would convert this to an HTTPBadRequest. self.assertRaises(exception.InvalidVolume, self.controller.create, req, body=body) @ddt.data(False, True) def test_list_attachments(self, is_detail): url = '/v3/%s/attachments' % fake.PROJECT_ID list_func = self.controller.index if is_detail: url = '/v3/%s/groups/detail' % fake.PROJECT_ID list_func = self.controller.detail req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH, use_admin_context=True) res_dict = list_func(req) self.assertEqual(1, len(res_dict)) self.assertEqual(3, len(res_dict['attachments'])) self.assertEqual(self.attachment3.id, res_dict['attachments'][0]['id']) def test_list_attachments_with_limit(self): url = '/v3/%s/attachments?limit=1' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH, use_admin_context=True) res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(1, len(res_dict['attachments'])) def test_list_attachments_with_marker(self): url = '/v3/%s/attachments?marker=%s' % (fake.PROJECT_ID, self.attachment3.id) req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH, use_admin_context=True) res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(2, len(res_dict['attachments'])) self.assertEqual(self.attachment2.id, res_dict['attachments'][0]['id']) @ddt.data("desc", "asc") def test_list_attachments_with_sort(self, sort_dir): url = '/v3/%s/attachments?sort_key=id&sort_dir=%s' % (fake.PROJECT_ID, sort_dir) req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH, use_admin_context=True) res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(3, len(res_dict['attachments'])) order_ids = sorted([self.attachment1.id, self.attachment2.id, self.attachment3.id]) expect_result = order_ids[2] if sort_dir == "desc" else order_ids[0] self.assertEqual(expect_result, res_dict['attachments'][0]['id']) @ddt.data({'admin': True, 'request_url': '?all_tenants=1', 'count': 4}, {'admin': False, 'request_url': '?all_tenants=1', 'count': 3}, {'admin': True, 'request_url': '?all_tenants=1&project_id=%s' % fake.PROJECT2_ID, 'count': 1}, {'admin': False, 'request_url': '', 'count': 3}, {'admin': False, 'request_url': '?instance_id=%s' % fake.UUID1, 'count': 2}, {'admin': False, 'request_url': '?instance_id=%s' % fake.UUID2, 'count': 1}) @ddt.unpack def test_list_attachment_with_tenants(self, admin, request_url, count): url = '/v3/%s/attachments%s' % (fake.PROJECT_ID, request_url) req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH, use_admin_context=admin) res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(count, len(res_dict['attachments'])) @mock.patch.object(volume_utils, 'notify_about_volume_usage') def test_complete_attachment(self, mock_notify): def fake_notify(context, volume, event_suffix, extra_usage_info=None, host=None): # Check the notify content is in-use volume and 'attach.end' self.assertEqual('in-use', volume['status']) self.assertEqual('attach.end', event_suffix) mock_notify.side_effect = fake_notify req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s/action' % (fake.PROJECT_ID, self.attachment1.id), version=mv.NEW_ATTACH_COMPLETION, use_admin_context=True) body = {"os-complete": {}} self.controller.complete(req, self.attachment1.id, body=body) # Check notify has been called once mock_notify.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_backups.py0000664000175000017500000003344100000000000022623 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Intel, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The backups V3 api.""" import copy from unittest import mock import ddt from oslo_serialization import jsonutils from oslo_utils import strutils import webob from cinder.api import microversions as mv from cinder.api.openstack import api_version_request as api_version from cinder.api.v3 import backups from cinder.api.views import backups as backup_view import cinder.backup from cinder import context from cinder import exception from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3.test_volumes import ENCRYPTION_KEY_ID_IN_DETAILS from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils @ddt.ddt class BackupsControllerAPITestCase(test.TestCase): """Test cases for backups API.""" def setUp(self): super(BackupsControllerAPITestCase, self).setUp() self.backup_api = cinder.backup.API() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) self.controller = backups.BackupsController() self.user_context = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) def _fake_update_request(self, backup_id, version=mv.BACKUP_UPDATE): req = fakes.HTTPRequest.blank('/v3/%s/backups/%s/update' % (fake.PROJECT_ID, backup_id)) req.environ['cinder.context'].is_admin = True req.headers['Content-Type'] = 'application/json' req.headers['OpenStack-API-Version'] = 'volume ' + version req.api_version_request = api_version.APIVersionRequest(version) return req def test_update_wrong_version(self): req = self._fake_update_request( fake.BACKUP_ID, version=mv.get_prior_version(mv.BACKUP_UPDATE)) body = {"backup": {"name": "Updated Test Name", }} self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.update, req, fake.BACKUP_ID, body) def test_backup_update_with_no_body(self): # omit body from the request req = self._fake_update_request(fake.BACKUP_ID) self.assertRaises(exception.ValidationError, self.controller.update, req, fake.BACKUP_ID, body=None) def test_backup_update_with_unsupported_field(self): req = self._fake_update_request(fake.BACKUP_ID) body = {"backup": {"id": fake.BACKUP2_ID, "description": "", }} self.assertRaises(exception.ValidationError, self.controller.update, req, fake.BACKUP_ID, body=body) def test_backup_update_with_backup_not_found(self): req = self._fake_update_request(fake.BACKUP_ID) updates = { "name": "Updated Test Name", "description": "Updated Test description.", } body = {"backup": updates} self.assertRaises(exception.NotFound, self.controller.update, req, fake.BACKUP_ID, body=body) def _create_multiple_backups_with_different_project(self): test_utils.create_backup( context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)) test_utils.create_backup( context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True)) test_utils.create_backup( context.RequestContext(fake.USER_ID, fake.PROJECT2_ID, True)) @ddt.data('backups', 'backups/detail') def test_list_backup_with_count_param_version_not_matched(self, action): self._create_multiple_backups_with_different_project() is_detail = True if 'detail' in action else False req = fakes.HTTPRequest.blank("/v3/%s?with_count=True" % action) req.headers = mv.get_mv_header( mv.get_prior_version(mv.SUPPORT_COUNT_INFO)) req.api_version_request = mv.get_api_version( mv.get_prior_version(mv.SUPPORT_COUNT_INFO)) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = ctxt res_dict = self.controller._get_backups(req, is_detail=is_detail) self.assertNotIn('count', res_dict) @ddt.data({'method': 'backups', 'display_param': 'True'}, {'method': 'backups', 'display_param': 'False'}, {'method': 'backups', 'display_param': '1'}, {'method': 'backups/detail', 'display_param': 'True'}, {'method': 'backups/detail', 'display_param': 'False'}, {'method': 'backups/detail', 'display_param': '1'} ) @ddt.unpack def test_list_backups_with_count_param(self, method, display_param): self._create_multiple_backups_with_different_project() is_detail = True if 'detail' in method else False show_count = strutils.bool_from_string(display_param, strict=True) # Request with 'with_count' and 'limit' req = fakes.HTTPRequest.blank( "/v3/%s?with_count=%s&limit=1" % (method, display_param)) req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO) req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req.environ['cinder.context'] = ctxt res_dict = self.controller._get_backups(req, is_detail=is_detail) self.assertEqual(1, len(res_dict['backups'])) if show_count: self.assertEqual(2, res_dict['count']) else: self.assertNotIn('count', res_dict) # Request with 'with_count' req = fakes.HTTPRequest.blank( "/v3/%s?with_count=%s" % (method, display_param)) req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO) req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req.environ['cinder.context'] = ctxt res_dict = self.controller._get_backups(req, is_detail=is_detail) self.assertEqual(2, len(res_dict['backups'])) if show_count: self.assertEqual(2, res_dict['count']) else: self.assertNotIn('count', res_dict) # Request with admin context and 'all_tenants' req = fakes.HTTPRequest.blank( "/v3/%s?with_count=%s&all_tenants=1" % (method, display_param)) req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO) req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = ctxt res_dict = self.controller._get_backups(req, is_detail=is_detail) self.assertEqual(3, len(res_dict['backups'])) if show_count: self.assertEqual(3, res_dict['count']) else: self.assertNotIn('count', res_dict) @ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER), mv.RESOURCE_FILTER, mv.LIKE_FILTER) @mock.patch('cinder.api.common.reject_invalid_filters') def test_backup_list_with_general_filter(self, version, mock_update): url = '/v3/%s/backups' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=version, use_admin_context=False) self.controller.index(req) if version != mv.get_prior_version(mv.RESOURCE_FILTER): support_like = True if version == mv.LIKE_FILTER else False mock_update.assert_called_once_with(req.environ['cinder.context'], mock.ANY, 'backup', support_like) @ddt.data(mv.get_prior_version(mv.BACKUP_SORT_NAME), mv.BACKUP_SORT_NAME) def test_backup_list_with_name(self, version): backup1 = test_utils.create_backup( self.ctxt, display_name='b_test_name', status=fields.BackupStatus.AVAILABLE) backup2 = test_utils.create_backup( self.ctxt, display_name='a_test_name', status=fields.BackupStatus.AVAILABLE) url = '/v3/%s/backups?sort_key=name' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=version) if version == mv.get_prior_version(mv.BACKUP_SORT_NAME): self.assertRaises(exception.InvalidInput, self.controller.index, req) else: expect = backup_view.ViewBuilder().summary_list(req, [backup1, backup2]) result = self.controller.index(req) self.assertEqual(expect, result) def test_backup_update(self): backup = test_utils.create_backup( self.ctxt, status=fields.BackupStatus.AVAILABLE) req = self._fake_update_request(fake.BACKUP_ID) new_name = "updated_test_name" new_description = "Updated Test description." updates = { "name": new_name, "description": new_description, } body = {"backup": updates} self.controller.update(req, backup.id, body=body) backup.refresh() self.assertEqual(new_name, backup.display_name) self.assertEqual(new_description, backup.display_description) @ddt.data({"backup": {"description": " sample description", "name": " test name"}}, {"backup": {"description": "sample description ", "name": "test "}}, {"backup": {"description": " sample description ", "name": " test "}}) def test_backup_update_name_description_with_leading_trailing_spaces( self, body): backup = test_utils.create_backup( self.ctxt, status=fields.BackupStatus.AVAILABLE) req = self._fake_update_request(fake.BACKUP_ID) expected_body = copy.deepcopy(body) self.controller.update(req, backup.id, body=body) backup.refresh() # backup update call doesn't return 'description' in response so get # the updated backup to assert name and description req = webob.Request.blank('/v3/%s/backups/%s' % ( fake.PROJECT_ID, backup.id)) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_context)) res_dict = jsonutils.loads(res.body) self.assertEqual(expected_body['backup']['name'].strip(), res_dict['backup']['name']) self.assertEqual(expected_body['backup']['description'].strip(), res_dict['backup']['description']) @ddt.data(mv.get_prior_version(mv.BACKUP_METADATA), mv.BACKUP_METADATA) def test_backup_show_with_metadata(self, version): backup = test_utils.create_backup( self.ctxt, display_name='test_backup_metadata', status=fields.BackupStatus.AVAILABLE) # show backup metadata url = '/v3/%s/backups/%s' % (fake.PROJECT_ID, backup.id) req = fakes.HTTPRequest.blank(url, version=version) backup_get = self.controller.show(req, backup.id)['backup'] if version == mv.get_prior_version(mv.BACKUP_METADATA): self.assertNotIn('metadata', backup_get) else: self.assertIn('metadata', backup_get) @ddt.data(*ENCRYPTION_KEY_ID_IN_DETAILS) @ddt.unpack def test_backup_show_with_encryption_key_id(self, expected_in_details, encryption_key_id, version): backup = test_utils.create_backup(self.ctxt, encryption_key_id=encryption_key_id) self.addCleanup(backup.destroy) url = '/v3/%s/backups/%s' % (fake.PROJECT_ID, backup.id) req = fakes.HTTPRequest.blank(url, version=version) backup_details = self.controller.show(req, backup.id)['backup'] if expected_in_details: self.assertIn('encryption_key_id', backup_details) else: self.assertNotIn('encryption_key_id', backup_details) def test_backup_update_with_null_validate(self): backup = test_utils.create_backup( self.ctxt, status=fields.BackupStatus.AVAILABLE) req = self._fake_update_request(fake.BACKUP_ID) updates = { "name": None, } body = {"backup": updates} self.controller.update(req, backup.id, body=body) backup.refresh() self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_cluster.py0000664000175000017500000003440600000000000022656 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import ddt import iso8601 from oslo_utils import versionutils from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import api_version_request as api_version from cinder.api.v3 import clusters from cinder import context from cinder import exception from cinder.tests.unit import fake_cluster from cinder.tests.unit import test CLUSTERS = [ fake_cluster.fake_db_cluster( id=1, replication_status='error', frozen=False, active_backend_id='replication1', last_heartbeat=datetime.datetime(2016, 6, 1, 2, 46, 28), updated_at=datetime.datetime(2016, 6, 1, 2, 46, 28), created_at=datetime.datetime(2016, 6, 1, 2, 46, 28)), fake_cluster.fake_db_cluster( id=2, name='cluster2', num_hosts=2, num_down_hosts=1, disabled=True, replication_status='error', frozen=True, active_backend_id='replication2', updated_at=datetime.datetime(2016, 6, 1, 1, 46, 28), created_at=datetime.datetime(2016, 6, 1, 1, 46, 28)) ] CLUSTERS_ORM = [fake_cluster.fake_cluster_orm(**kwargs) for kwargs in CLUSTERS] EXPECTED = [{'created_at': datetime.datetime(2016, 6, 1, 2, 46, 28), 'disabled_reason': None, 'last_heartbeat': datetime.datetime(2016, 6, 1, 2, 46, 28), 'name': 'cluster_name', 'binary': 'cinder-volume', 'num_down_hosts': 0, 'num_hosts': 0, 'state': 'up', 'status': 'enabled', 'replication_status': 'error', 'frozen': False, 'active_backend_id': 'replication1', 'updated_at': datetime.datetime(2016, 6, 1, 2, 46, 28)}, {'created_at': datetime.datetime(2016, 6, 1, 1, 46, 28), 'disabled_reason': None, 'last_heartbeat': '', 'name': 'cluster2', 'binary': 'cinder-volume', 'num_down_hosts': 1, 'num_hosts': 2, 'state': 'down', 'status': 'disabled', 'replication_status': 'error', 'frozen': True, 'active_backend_id': 'replication2', 'updated_at': datetime.datetime(2016, 6, 1, 1, 46, 28)}] class FakeRequest(object): def __init__(self, is_admin=True, version=mv.CLUSTER_SUPPORT, **kwargs): self.GET = kwargs self.headers = {'OpenStack-API-Version': 'volume ' + version} self.api_version_request = api_version.APIVersionRequest(version) self.environ = { 'cinder.context': context.RequestContext(user_id=None, project_id=None, is_admin=is_admin, read_deleted='no', overwrite=False) } def fake_utcnow(with_timezone=False): tzinfo = iso8601.UTC if with_timezone else None return datetime.datetime(2016, 6, 1, 2, 46, 30, tzinfo=tzinfo) @ddt.ddt @mock.patch('oslo_utils.timeutils.utcnow', fake_utcnow) class ClustersTestCase(test.TestCase): """Test Case for Clusters.""" LIST_FILTERS = ({}, {'is_up': True}, {'disabled': False}, {'num_hosts': 2}, {'num_down_hosts': 1}, {'binary': 'cinder-volume'}, {'is_up': True, 'disabled': False, 'num_hosts': 2, 'num_down_hosts': 1, 'binary': 'cinder-volume'}) REPLICATION_FILTERS = ({'replication_status': 'error'}, {'frozen': True}, {'active_backend_id': 'replication'}) def _get_expected(self, version=mv.get_prior_version(mv.REPLICATION_CLUSTER)): if (versionutils.convert_version_to_tuple(version) >= versionutils.convert_version_to_tuple(mv.REPLICATION_CLUSTER)): return EXPECTED expect = [] for cluster in EXPECTED: cluster = cluster.copy() for key in ('replication_status', 'frozen', 'active_backend_id'): cluster.pop(key) expect.append(cluster) return expect def setUp(self): super(ClustersTestCase, self).setUp() self.context = context.get_admin_context() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.controller = clusters.ClusterController(self.ext_mgr) @mock.patch('cinder.db.cluster_get_all', return_value=CLUSTERS_ORM) def _test_list(self, get_all_mock, detailed, filters=None, expected=None, version=mv.get_prior_version(mv.REPLICATION_CLUSTER)): filters = filters or {} req = FakeRequest(version=version, **filters) method = getattr(self.controller, 'detail' if detailed else 'index') clusters = method(req) filters = filters.copy() filters.setdefault('is_up', None) filters.setdefault('read_deleted', 'no') self.assertEqual(expected, clusters) get_all_mock.assert_called_once_with( req.environ['cinder.context'], get_services=False, services_summary=detailed, **filters) @ddt.data(*LIST_FILTERS) def test_index_detail(self, filters): """Verify that we get all clusters with detailed data.""" expected = {'clusters': self._get_expected()} self._test_list(detailed=True, filters=filters, expected=expected) @ddt.data(*LIST_FILTERS) def test_index_summary(self, filters): """Verify that we get all clusters with summary data.""" expected = {'clusters': [{'name': 'cluster_name', 'binary': 'cinder-volume', 'state': 'up', 'status': 'enabled'}, {'name': 'cluster2', 'binary': 'cinder-volume', 'state': 'down', 'status': 'disabled'}]} self._test_list(detailed=False, filters=filters, expected=expected) @ddt.data(*REPLICATION_FILTERS) def test_index_detail_fail_old(self, filters): self.assertRaises(exception.InvalidInput, self._test_list, detailed=True, filters=filters) @ddt.data(*REPLICATION_FILTERS) def test_index_summary_fail_old(self, filters): self.assertRaises(exception.InvalidInput, self._test_list, detailed=False, filters=filters) @ddt.data(True, False) def test_index_unauthorized(self, detailed): """Verify that unauthorized user can't list clusters.""" self.assertRaises(exception.PolicyNotAuthorized, self._test_list, detailed=detailed, filters={'is_admin': False}) @ddt.data(True, False) def test_index_wrong_version(self, detailed): """Verify the wrong version so that user can't list clusters.""" self.assertRaises(exception.VersionNotFoundForAPIMethod, self._test_list, detailed=detailed, version=mv.get_prior_version(mv.CLUSTER_SUPPORT)) @ddt.data(*REPLICATION_FILTERS) def test_index_detail_replication_new_fields(self, filters): expected = {'clusters': self._get_expected(mv.REPLICATION_CLUSTER)} self._test_list(detailed=True, filters=filters, expected=expected, version=mv.REPLICATION_CLUSTER) @ddt.data(*REPLICATION_FILTERS) def test_index_summary_replication_new_fields(self, filters): expected = {'clusters': [{'name': 'cluster_name', 'binary': 'cinder-volume', 'state': 'up', 'replication_status': 'error', 'status': 'enabled'}, {'name': 'cluster2', 'binary': 'cinder-volume', 'state': 'down', 'replication_status': 'error', 'status': 'disabled'}]} self._test_list(detailed=False, filters=filters, expected=expected, version=mv.REPLICATION_CLUSTER) @mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=CLUSTERS_ORM[0]) def test_show(self, get_mock): req = FakeRequest() expected = {'cluster': self._get_expected()[0]} cluster = self.controller.show(req, 'cluster_name', 'cinder-volume') self.assertEqual(expected, cluster) get_mock.assert_called_once_with( req.environ['cinder.context'], None, services_summary=True, name='cluster_name', binary='cinder-volume') def test_show_unauthorized(self): req = FakeRequest(is_admin=False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.show, req, 'name') def test_show_wrong_version(self): req = FakeRequest(version=mv.get_prior_version(mv.CLUSTER_SUPPORT)) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.show, req, 'name') @mock.patch('cinder.db.sqlalchemy.api.cluster_update') @mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=CLUSTERS_ORM[1]) def test_update_enable(self, get_mock, update_mock): req = FakeRequest() expected = {'cluster': {'name': 'cluster2', 'binary': 'cinder-volume', 'state': 'down', 'status': 'enabled', 'disabled_reason': None}} res = self.controller.update(req, 'enable', body={'name': 'cluster_name', 'binary': 'cinder-volume'}) self.assertEqual(expected, res) ctxt = req.environ['cinder.context'] get_mock.assert_called_once_with(ctxt, None, binary='cinder-volume', name='cluster_name') update_mock.assert_called_once_with(ctxt, get_mock.return_value.id, {'disabled': False, 'disabled_reason': None}) @mock.patch('cinder.db.sqlalchemy.api.cluster_update') @mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=CLUSTERS_ORM[0]) def test_update_disable(self, get_mock, update_mock): req = FakeRequest() disabled_reason = 'For testing' expected = {'cluster': {'name': 'cluster_name', 'state': 'up', 'binary': 'cinder-volume', 'status': 'disabled', 'disabled_reason': disabled_reason}} res = self.controller.update(req, 'disable', body={'name': 'cluster_name', 'binary': 'cinder-volume', 'disabled_reason': disabled_reason}) self.assertEqual(expected, res) ctxt = req.environ['cinder.context'] get_mock.assert_called_once_with(ctxt, None, binary='cinder-volume', name='cluster_name') update_mock.assert_called_once_with( ctxt, get_mock.return_value.id, {'disabled': True, 'disabled_reason': disabled_reason}) def test_update_wrong_action(self): req = FakeRequest() self.assertRaises(exception.NotFound, self.controller.update, req, 'action', body={'name': 'cluster_name'}) @ddt.data('enable', 'disable') def test_update_missing_name(self, action): req = FakeRequest() self.assertRaises(exception.ValidationError, self.controller.update, req, action, body={'binary': 'cinder-volume'}) def test_update_with_binary_more_than_255_characters(self): req = FakeRequest() self.assertRaises(exception.ValidationError, self.controller.update, req, 'enable', body={'name': 'cluster_name', 'binary': 'a' * 256}) def test_update_with_name_more_than_255_characters(self): req = FakeRequest() self.assertRaises(exception.ValidationError, self.controller.update, req, 'enable', body={'name': 'a' * 256, 'binary': 'cinder-volume'}) @ddt.data('a' * 256, ' ') def test_update_wrong_disabled_reason(self, disabled_reason): req = FakeRequest() self.assertRaises(exception.ValidationError, self.controller.update, req, 'disable', body={'name': 'cluster_name', 'disabled_reason': disabled_reason}) @ddt.data('enable', 'disable') def test_update_unauthorized(self, action): req = FakeRequest(is_admin=False) self.assertRaises(exception.PolicyNotAuthorized, self.controller.update, req, action, body={'name': 'fake_name'}) @ddt.data('enable', 'disable') def test_update_wrong_version(self, action): req = FakeRequest(version=mv.get_prior_version(mv.CLUSTER_SUPPORT)) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.update, req, action, {}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_consistencygroups.py0000664000175000017500000002403100000000000024767 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus import ddt import webob from cinder.api import microversions as mv from cinder.api.openstack import api_version_request as api_version from cinder.api.v3 import consistencygroups from cinder import context from cinder import objects from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test @ddt.ddt class ConsistencyGroupsAPITestCase(test.TestCase): """Test Case for consistency groups API.""" def setUp(self): super(ConsistencyGroupsAPITestCase, self).setUp() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.controller = consistencygroups.ConsistencyGroupsController() def _create_consistencygroup( self, ctxt=None, name='test_consistencygroup', description='this is a test consistency group', group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], availability_zone='az1', host='fakehost', status=fields.ConsistencyGroupStatus.CREATING, **kwargs): """Create a consistency group object.""" ctxt = ctxt or self.ctxt consistencygroup = objects.Group(ctxt) consistencygroup.user_id = fake.USER_ID consistencygroup.project_id = fake.PROJECT_ID consistencygroup.availability_zone = availability_zone consistencygroup.name = name consistencygroup.description = description consistencygroup.group_type_id = group_type_id consistencygroup.volume_type_ids = volume_type_ids consistencygroup.host = host consistencygroup.status = status consistencygroup.update(kwargs) consistencygroup.create() return consistencygroup def test_update_consistencygroup_empty_parameters(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.environ['cinder.context'].is_admin = True req.headers = mv.get_mv_header(mv.CG_UPDATE_BLANK_PROPERTIES) req.headers['Content-Type'] = 'application/json' req.api_version_request = mv.get_api_version( mv.CG_UPDATE_BLANK_PROPERTIES) body = {"consistencygroup": {"name": "", "description": "", "add_volumes": None, "remove_volumes": None, }} res_dict = self.controller.update(req, consistencygroup.id, body) consistencygroup = objects.Group.get_by_id( self.ctxt, consistencygroup.id) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) self.assertEqual("", consistencygroup.name) self.assertEqual("", consistencygroup.description) consistencygroup.destroy() def test_update_consistencygroup_empty_parameters_unsupport_version(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.environ['cinder.context'].is_admin = True req.headers['Content-Type'] = 'application/json' req.headers['OpenStack-API-Version'] = 'volume 3.5' req.api_version_request = api_version.APIVersionRequest('3.5') body = {"consistencygroup": {"name": "", "description": "", "add_volumes": None, "remove_volumes": None, }} self.assertRaisesRegex(webob.exc.HTTPBadRequest, "Name, description, add_volumes, " "and remove_volumes can not be all " "empty in the request body.", self.controller.update, req, consistencygroup.id, body) consistencygroup.destroy() def test_update_consistencygroup_all_empty_parameters_version_36(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.environ['cinder.context'].is_admin = True req.headers = mv.get_mv_header(mv.CG_UPDATE_BLANK_PROPERTIES) req.headers['Content-Type'] = 'application/json' req.api_version_request = mv.get_api_version( mv.CG_UPDATE_BLANK_PROPERTIES) body = {"consistencygroup": {"name": None, "description": None, "add_volumes": None, "remove_volumes": None, }} self.assertRaisesRegex(webob.exc.HTTPBadRequest, "Must specify " "one or more of the following keys to " "update: name, description, add_volumes, " "remove_volumes.", self.controller.update, req, consistencygroup.id, body) consistencygroup.destroy() def test_update_consistencygroup_all_empty_parameters_not_version_ok(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.environ['cinder.context'].is_admin = True non_supported_version = mv.get_prior_version( mv.CG_UPDATE_BLANK_PROPERTIES) req.headers = mv.get_mv_header(non_supported_version) req.api_version_request = mv.get_api_version(non_supported_version) req.headers['Content-Type'] = 'application/json' body = {"consistencygroup": {"name": None, "description": None, "add_volumes": None, "remove_volumes": None, }} self.assertRaisesRegex(webob.exc.HTTPBadRequest, "Name, description, " "add_volumes, and remove_volumes can not be " "all empty in the request body.", self.controller.update, req, consistencygroup.id, body) consistencygroup.destroy() def test_update_consistencygroup_no_body(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.environ['cinder.context'].is_admin = True non_supported_version = mv.get_prior_version( mv.CG_UPDATE_BLANK_PROPERTIES) req.headers = mv.get_mv_header(non_supported_version) req.api_version_request = mv.get_api_version(non_supported_version) req.headers['Content-Type'] = 'application/json' body = None self.assertRaisesRegex(webob.exc.HTTPBadRequest, "Missing request body", self.controller.update, req, consistencygroup.id, body) consistencygroup.destroy() def test_update_consistencygroups_no_empty_parameters(self): consistencygroup = self._create_consistencygroup( ctxt=self.ctxt, status=fields.ConsistencyGroupStatus.AVAILABLE) req = fakes.HTTPRequest.blank('/v3/%s/consistencygroups/%s/update' % (fake.PROJECT_ID, consistencygroup.id)) req.environ['cinder.context'].is_admin = True non_supported_version = mv.get_prior_version( mv.CG_UPDATE_BLANK_PROPERTIES) req.headers = mv.get_mv_header(non_supported_version) req.headers['Content-Type'] = 'application/json' req.api_version_request = mv.get_api_version(non_supported_version) body = {"consistencygroup": {"name": "my_fake_cg", "description": "fake consistency group", "add_volumes": "volume-uuid-1", "remove_volumes": "volume-uuid-2, volume uuid-3", }} allow_empty = self.controller._check_update_parameters_v3( req, body['consistencygroup']['name'], body['consistencygroup']['description'], body['consistencygroup']['add_volumes'], body['consistencygroup']['remove_volumes']) self.assertEqual(False, allow_empty) consistencygroup.destroy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_default_types.py0000664000175000017500000002130200000000000024034 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.api import microversions as mv from cinder.api.v3 import default_types from cinder import context from cinder import exception from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class DefaultVolumeTypesApiTest(test.TestCase): def _create_volume_type(self, ctxt, volume_type_name, extra_specs=None, is_public=True, projects=None): vol_type = objects.VolumeType(ctxt, name=volume_type_name, is_public=is_public, description='', extra_specs=extra_specs, projects=projects) vol_type.create() return vol_type def _set_default_type_system_scope(self, project_id=fake.PROJECT_ID, volume_type='volume_type1'): body = { 'default_type': {'volume_type': volume_type} } req = fakes.HTTPRequest.blank('/v3/default-types/%s' % project_id, use_admin_context=True, version=mv.DEFAULT_TYPE_OVERRIDES, system_scope='all') res_dict = self.controller.create_update(req, id=project_id, body=body) return res_dict def _set_default_type_project_scope(self, project_id=fake.PROJECT_ID, volume_type='volume_type1'): body = { 'default_type': {'volume_type': volume_type} } req = fakes.HTTPRequest.blank('/v3/default-types/%s' % project_id, use_admin_context=True, version=mv.DEFAULT_TYPE_OVERRIDES) res_dict = self.controller.create_update(req, id=project_id, body=body) return res_dict def setUp(self): super(DefaultVolumeTypesApiTest, self).setUp() self.controller = default_types.DefaultTypesController() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=True, system_scope='all') self.type1 = self._create_volume_type( self.ctxt, 'volume_type1') self.type2 = self._create_volume_type( self.ctxt, 'volume_type2') get_patcher = mock.patch('cinder.api.api_utils.get_project', self._get_project) get_patcher.start() self.addCleanup(get_patcher.stop) class FakeProject(object): def __init__(self, id=fake.PROJECT_ID, domain_id=fake.DOMAIN_ID, parent_id=None, is_admin_project=False): self.id = id self.domain_id = domain_id def _get_project(self, context, id, subtree_as_ids=False, parents_as_ids=False, is_admin_project=False): return self.FakeProject(id) def test_default_volume_types_create_update_system_admin(self): res_dict = self._set_default_type_system_scope() self.assertEqual(fake.PROJECT_ID, res_dict['default_type']['project_id']) self.assertEqual(self.type1.id, res_dict['default_type']['volume_type_id']) def test_default_volume_types_create_update_project_admin(self): res_dict = self._set_default_type_project_scope() self.assertEqual(fake.PROJECT_ID, res_dict['default_type']['project_id']) self.assertEqual(self.type1.id, res_dict['default_type']['volume_type_id']) def test_default_volume_types_detail_system_admin(self): self._set_default_type_system_scope() req = fakes.HTTPRequest.blank('/v3/default-types/%s' % fake.PROJECT_ID, use_admin_context=True, version=mv.DEFAULT_TYPE_OVERRIDES, system_scope='all') res_dict = self.controller.detail(req, fake.PROJECT_ID) self.assertEqual(fake.PROJECT_ID, res_dict['default_type']['project_id']) self.assertEqual(self.type1.id, res_dict['default_type']['volume_type_id']) def test_default_volume_types_detail_project_admin(self): self._set_default_type_project_scope() req = fakes.HTTPRequest.blank('/v3/default-types/%s' % fake.PROJECT_ID, use_admin_context=True, version=mv.DEFAULT_TYPE_OVERRIDES) res_dict = self.controller.detail(req, fake.PROJECT_ID) self.assertEqual(fake.PROJECT_ID, res_dict['default_type']['project_id']) self.assertEqual(self.type1.id, res_dict['default_type']['volume_type_id']) def test_default_volume_types_detail_no_default_found(self): req = fakes.HTTPRequest.blank('/v3/default-types/%s' % fake.PROJECT_ID, use_admin_context=True, version=mv.DEFAULT_TYPE_OVERRIDES, system_scope='all') self.assertRaises(exception.VolumeTypeProjectDefaultNotFound, self.controller.detail, req, fake.PROJECT_ID) def test_default_volume_types_list(self): req = fakes.HTTPRequest.blank('/v3/default-types/', use_admin_context=True, version=mv.DEFAULT_TYPE_OVERRIDES, system_scope='all') # Confirm this returns an empty list when no default types are set res_dict = self.controller.index(req) self.assertEqual(0, len(res_dict['default_types'])) self._set_default_type_system_scope() self._set_default_type_system_scope(project_id=fake.PROJECT2_ID, volume_type='volume_type2') res_dict = self.controller.index(req) self.assertEqual(2, len(res_dict['default_types'])) self.assertEqual(fake.PROJECT_ID, res_dict['default_types'][0]['project_id']) self.assertEqual(fake.PROJECT2_ID, res_dict['default_types'][1]['project_id']) def test_default_volume_types_delete_system_admin(self): self._set_default_type_system_scope() req = fakes.HTTPRequest.blank('/v3/default-types/', use_admin_context=True, version=mv.DEFAULT_TYPE_OVERRIDES, system_scope='all') res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict['default_types'])) self.controller.delete(req, fake.PROJECT_ID) res_dict_new = self.controller.index(req) self.assertEqual(0, len(res_dict_new['default_types'])) def test_default_volume_types_delete_project_admin(self): self._set_default_type_project_scope() req = fakes.HTTPRequest.blank('/v3/default-types/', use_admin_context=True, version=mv.DEFAULT_TYPE_OVERRIDES) req_admin = fakes.HTTPRequest.blank('/v3/default-types/', use_admin_context=True, version=mv.DEFAULT_TYPE_OVERRIDES, system_scope='all') res_dict = self.controller.index(req_admin) self.assertEqual(1, len(res_dict['default_types'])) self.controller.delete(req, fake.PROJECT_ID) res_dict_new = self.controller.index(req_admin) self.assertEqual(0, len(res_dict_new['default_types'])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_group_snapshots.py0000664000175000017500000007221600000000000024434 0ustar00zuulzuul00000000000000# Copyright (C) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for group_snapshot code.""" from http import HTTPStatus from unittest import mock import ddt from oslo_policy import policy as oslo_policy import webob from cinder.api import microversions as mv from cinder.api.v3 import group_snapshots as v3_group_snapshots from cinder import context from cinder import db from cinder import exception from cinder.group import api as group_api from cinder import objects from cinder.objects import fields from cinder.policies import base as base_policy from cinder.policies import group_snapshots as group_snapshots_policy from cinder import policy from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils import cinder.volume @ddt.ddt class GroupSnapshotsAPITestCase(test.TestCase): """Test Case for group_snapshots API.""" def setUp(self): super(GroupSnapshotsAPITestCase, self).setUp() self.controller = v3_group_snapshots.GroupSnapshotsController() self.volume_api = cinder.volume.API() self.context = context.get_admin_context() self.context.project_id = fake.PROJECT_ID self.context.user_id = fake.USER_ID self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.group = utils.create_group(self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID]) self.volume = utils.create_volume(self.context, group_id=self.group.id, volume_type_id=fake.VOLUME_TYPE_ID) self.g_snapshots_array = [ utils.create_group_snapshot( self.context, group_id=self.group.id, group_type_id=self.group.group_type_id) for _ in range(3)] self.addCleanup(self._cleanup) def _cleanup(self): for snapshot in self.g_snapshots_array: snapshot.destroy() self.volume.destroy() self.group.destroy() def test_show_group_snapshot(self): group_snapshot = utils.create_group_snapshot( self.context, group_id=self.group.id) req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % (fake.PROJECT_ID, group_snapshot.id), version=mv.GROUP_SNAPSHOTS) res_dict = self.controller.show(req, group_snapshot.id) self.assertEqual(1, len(res_dict)) self.assertEqual('this is a test group snapshot', res_dict['group_snapshot']['description']) self.assertEqual('test_group_snapshot', res_dict['group_snapshot']['name']) self.assertEqual(fields.GroupSnapshotStatus.CREATING, res_dict['group_snapshot']['status']) group_snapshot.destroy() @ddt.data(True, False) def test_list_group_snapshots_with_limit(self, is_detail): url = '/v3/%s/group_snapshots?limit=1' % fake.PROJECT_ID if is_detail: url = '/v3/%s/group_snapshots/detail?limit=1' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=mv.GROUP_SNAPSHOT_PAGINATION) if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(2, len(res_dict)) self.assertEqual(1, len(res_dict['group_snapshots'])) self.assertEqual(self.g_snapshots_array[2].id, res_dict['group_snapshots'][0]['id']) next_link = ( 'http://localhost/v3/%s/group_snapshots?limit=' '1&marker=%s' % (fake.PROJECT_ID, res_dict['group_snapshots'][0]['id'])) self.assertEqual(next_link, res_dict['group_snapshot_links'][0]['href']) if is_detail: self.assertIn('description', res_dict['group_snapshots'][0].keys()) else: self.assertNotIn('description', res_dict['group_snapshots'][0].keys()) @ddt.data(True, False) def test_list_group_snapshot_with_offset(self, is_detail): url = '/v3/%s/group_snapshots?offset=1' % fake.PROJECT_ID if is_detail: url = '/v3/%s/group_snapshots/detail?offset=1' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=mv.GROUP_SNAPSHOT_PAGINATION) if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(2, len(res_dict['group_snapshots'])) self.assertEqual(self.g_snapshots_array[1].id, res_dict['group_snapshots'][0]['id']) self.assertEqual(self.g_snapshots_array[0].id, res_dict['group_snapshots'][1]['id']) if is_detail: self.assertIn('description', res_dict['group_snapshots'][0].keys()) else: self.assertNotIn('description', res_dict['group_snapshots'][0].keys()) @ddt.data(True, False) def test_list_group_snapshot_with_offset_out_of_range(self, is_detail): url = ('/v3/%s/group_snapshots?offset=234523423455454' % fake.PROJECT_ID) if is_detail: url = ('/v3/%s/group_snapshots/detail?offset=234523423455454' % fake.PROJECT_ID) req = fakes.HTTPRequest.blank(url, version=mv.GROUP_SNAPSHOT_PAGINATION) if is_detail: self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) else: self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) @ddt.data(False, True) def test_list_group_snapshot_with_limit_and_offset(self, is_detail): group_snapshot = utils.create_group_snapshot( self.context, group_id=self.group.id, group_type_id=self.group.group_type_id) url = '/v3/%s/group_snapshots?limit=2&offset=1' % fake.PROJECT_ID if is_detail: url = ('/v3/%s/group_snapshots/detail?limit=2&offset=1' % fake.PROJECT_ID) req = fakes.HTTPRequest.blank(url, version=mv.GROUP_SNAPSHOT_PAGINATION) if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(2, len(res_dict)) self.assertEqual(2, len(res_dict['group_snapshots'])) self.assertEqual(self.g_snapshots_array[2].id, res_dict['group_snapshots'][0]['id']) self.assertEqual(self.g_snapshots_array[1].id, res_dict['group_snapshots'][1]['id']) self.assertIsNotNone(res_dict['group_snapshot_links'][0]['href']) if is_detail: self.assertIn('description', res_dict['group_snapshots'][0].keys()) else: self.assertNotIn('description', res_dict['group_snapshots'][0].keys()) group_snapshot.destroy() @ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER), mv.RESOURCE_FILTER, mv.LIKE_FILTER) @mock.patch('cinder.api.common.reject_invalid_filters') def test_group_snapshot_list_with_general_filter(self, version, mock_update): url = '/v3/%s/group_snapshots' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=version, use_admin_context=False) self.controller.index(req) if version != mv.get_prior_version(mv.RESOURCE_FILTER): support_like = True if version == mv.LIKE_FILTER else False mock_update.assert_called_once_with(req.environ['cinder.context'], mock.ANY, 'group_snapshot', support_like) @ddt.data(False, True) def test_list_group_snapshot_with_filter(self, is_detail): url = ('/v3/%s/group_snapshots?' 'all_tenants=True&id=%s') % (fake.PROJECT_ID, self.g_snapshots_array[0].id) if is_detail: url = ('/v3/%s/group_snapshots/detail?' 'all_tenants=True&id=%s') % (fake.PROJECT_ID, self.g_snapshots_array[0].id) req = fakes.HTTPRequest.blank(url, version=mv.GROUP_SNAPSHOT_PAGINATION, use_admin_context=True) if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(1, len(res_dict['group_snapshots'])) self.assertEqual(self.g_snapshots_array[0].id, res_dict['group_snapshots'][0]['id']) if is_detail: self.assertIn('description', res_dict['group_snapshots'][0].keys()) else: self.assertNotIn('description', res_dict['group_snapshots'][0].keys()) @ddt.data({'is_detail': True, 'version': mv.GROUP_SNAPSHOTS}, {'is_detail': False, 'version': mv.GROUP_SNAPSHOTS}, {'is_detail': True, 'version': mv.POOL_FILTER}, {'is_detail': False, 'version': mv.POOL_FILTER},) @ddt.unpack def test_list_group_snapshot_with_filter_previous_version(self, is_detail, version): url = ('/v3/%s/group_snapshots?' 'all_tenants=True&id=%s') % (fake.PROJECT_ID, self.g_snapshots_array[0].id) if is_detail: url = ('/v3/%s/group_snapshots/detail?' 'all_tenants=True&id=%s') % (fake.PROJECT_ID, self.g_snapshots_array[0].id) req = fakes.HTTPRequest.blank(url, version=version, use_admin_context=True) if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(3, len(res_dict['group_snapshots'])) @ddt.data(False, True) def test_list_group_snapshot_with_sort(self, is_detail): url = '/v3/%s/group_snapshots?sort=id:asc' % fake.PROJECT_ID if is_detail: url = ('/v3/%s/group_snapshots/detail?sort=id:asc' % fake.PROJECT_ID) req = fakes.HTTPRequest.blank(url, version=mv.GROUP_SNAPSHOT_PAGINATION) expect_result = [snapshot.id for snapshot in self.g_snapshots_array] expect_result.sort() if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(3, len(res_dict['group_snapshots'])) self.assertEqual(expect_result[0], res_dict['group_snapshots'][0]['id']) self.assertEqual(expect_result[1], res_dict['group_snapshots'][1]['id']) self.assertEqual(expect_result[2], res_dict['group_snapshots'][2]['id']) if is_detail: self.assertIn('description', res_dict['group_snapshots'][0].keys()) else: self.assertNotIn('description', res_dict['group_snapshots'][0].keys()) def test_show_group_snapshot_with_group_snapshot_not_found(self): req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), version=mv.GROUP_SNAPSHOTS) self.assertRaises(exception.GroupSnapshotNotFound, self.controller.show, req, fake.WILL_NOT_BE_FOUND_ID) def test_show_group_snapshot_with_project_id(self): group_snapshot = utils.create_group_snapshot( self.context, group_id=self.group.id) req = fakes.HTTPRequest.blank( '/v3/%s/group_snapshots/%s' % (fake.PROJECT_ID, group_snapshot.id), version=mv.GROUP_GROUPSNAPSHOT_PROJECT_ID, use_admin_context=True) res_dict = self.controller.show(req, group_snapshot.id) self.assertEqual(1, len(res_dict)) self.assertEqual('test_group_snapshot', res_dict['group_snapshot']['name']) self.assertEqual(fake.PROJECT_ID, res_dict['group_snapshot']['project_id']) group_snapshot.destroy() def test_show_group_snapshot_without_project_id(self): group_snapshot = utils.create_group_snapshot( self.context, group_id=self.group.id) # using mv.TRANSFER_WITH_HISTORY (3.57) to test the # project_id field is not in response before mv 3.58 req = fakes.HTTPRequest.blank( '/v3/%s/group_snapshots/%s' % (fake.PROJECT_ID, group_snapshot.id), version=mv.TRANSFER_WITH_HISTORY, use_admin_context=True) res_dict = self.controller.show(req, group_snapshot.id) self.assertEqual(1, len(res_dict)) self.assertEqual('test_group_snapshot', res_dict['group_snapshot']['name']) self.assertNotIn('project_id', res_dict['group_snapshot']) group_snapshot.destroy() @ddt.data(True, False) def test_list_group_snapshots_json(self, is_detail): if is_detail: request_url = '/v3/%s/group_snapshots/detail' else: request_url = '/v3/%s/group_snapshots' req = fakes.HTTPRequest.blank(request_url % fake.PROJECT_ID, version=mv.GROUP_SNAPSHOTS) if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(3, len(res_dict['group_snapshots'])) for index, snapshot in enumerate(self.g_snapshots_array): self.assertEqual(snapshot.id, res_dict['group_snapshots'][2 - index]['id']) self.assertIsNotNone( res_dict['group_snapshots'][2 - index]['name']) if is_detail: self.assertIn('description', res_dict['group_snapshots'][2 - index].keys()) else: self.assertNotIn('description', res_dict['group_snapshots'][2 - index].keys()) @ddt.data(True, False) def test_list_group_snapshots_with_project_id(self, is_detail): if is_detail: request_url = '/v3/%s/group_snapshots/detail' else: request_url = '/v3/%s/group_snapshots' req = fakes.HTTPRequest.blank( request_url % fake.PROJECT_ID, version=mv.GROUP_GROUPSNAPSHOT_PROJECT_ID, use_admin_context=True) if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(3, len(res_dict['group_snapshots'])) for group in res_dict['group_snapshots']: if is_detail: self.assertIsNotNone(group['project_id']) else: self.assertNotIn('project_id', group) @mock.patch('cinder.db.volume_type_get') @mock.patch('cinder.quota.VolumeTypeQuotaEngine.reserve') def test_create_group_snapshot_json(self, mock_quota, mock_vol_type): body = {"group_snapshot": {"name": "group_snapshot1", "description": "Group Snapshot 1", "group_id": self.group.id}} req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % fake.PROJECT_ID, version=mv.GROUP_SNAPSHOTS) res_dict = self.controller.create(req, body=body) self.assertEqual(1, len(res_dict)) self.assertIn('id', res_dict['group_snapshot']) group_snapshot = objects.GroupSnapshot.get_by_id( context.get_admin_context(), res_dict['group_snapshot']['id']) group_snapshot.destroy() @mock.patch('cinder.db.volume_type_get') def test_create_group_snapshot_when_volume_in_error_status( self, mock_vol_type): group = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID],) volume_id = utils.create_volume( self.context, status='error', group_id=group.id, volume_type_id=fake.VOLUME_TYPE_ID)['id'] body = {"group_snapshot": {"name": "group_snapshot1", "description": "Group Snapshot 1", "group_id": group.id}} req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % fake.PROJECT_ID, version=mv.GROUP_SNAPSHOTS) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) group.destroy() db.volume_destroy(context.get_admin_context(), volume_id) def test_create_group_snapshot_with_no_body(self): # omit body from the request req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % fake.PROJECT_ID, version=mv.GROUP_SNAPSHOTS) self.assertRaises(exception.ValidationError, self.controller.create, req, body=None) def test_create_group_snapshot_with_empty_body(self): # empty body in the request req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % fake.PROJECT_ID, version=mv.GROUP_SNAPSHOTS) body = {"group_snapshot": {}} self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @mock.patch.object(group_api.API, 'create_group_snapshot', side_effect=exception.InvalidGroupSnapshot( reason='Invalid group snapshot')) def test_create_with_invalid_group_snapshot(self, mock_create_group_snap): body = {"group_snapshot": {"name": "group_snapshot1", "description": "Group Snapshot 1", "group_id": self.group.id}} req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % fake.PROJECT_ID, version=mv.GROUP_SNAPSHOTS) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) @mock.patch.object(group_api.API, 'create_group_snapshot', side_effect=exception.GroupSnapshotNotFound( group_snapshot_id='invalid_id')) def test_create_with_group_snapshot_not_found(self, mock_create_grp_snap): body = {"group_snapshot": {"name": "group_snapshot1", "description": "Group Snapshot 1", "group_id": self.group.id}} req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % fake.PROJECT_ID, version=mv.GROUP_SNAPSHOTS) self.assertRaises(exception.GroupSnapshotNotFound, self.controller.create, req, body=body) def test_create_group_snapshot_from_empty_group(self): empty_group = utils.create_group( self.context, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID]) body = {"group_snapshot": {"name": "group_snapshot1", "description": "Group Snapshot 1", "group_id": empty_group.id}} req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % fake.PROJECT_ID, version=mv.GROUP_SNAPSHOTS) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) empty_group.destroy() def test_delete_group_snapshot_available(self): group_snapshot = utils.create_group_snapshot( self.context, group_id=self.group.id, status=fields.GroupSnapshotStatus.AVAILABLE) req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % (fake.PROJECT_ID, group_snapshot.id), version=mv.GROUP_SNAPSHOTS) res_dict = self.controller.delete(req, group_snapshot.id) group_snapshot = objects.GroupSnapshot.get_by_id(self.context, group_snapshot.id) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) self.assertEqual(fields.GroupSnapshotStatus.DELETING, group_snapshot.status) group_snapshot.destroy() def test_delete_group_snapshot_available_used_as_source(self): group_snapshot = utils.create_group_snapshot( self.context, group_id=self.group.id, status=fields.GroupSnapshotStatus.AVAILABLE) group2 = utils.create_group( self.context, status='creating', group_snapshot_id=group_snapshot.id, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID],) req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % (fake.PROJECT_ID, group_snapshot.id), version=mv.GROUP_SNAPSHOTS) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, group_snapshot.id) group_snapshot.destroy() group2.destroy() def test_delete_group_snapshot_with_group_snapshot_NotFound(self): req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), version=mv.GROUP_SNAPSHOTS) self.assertRaises(exception.GroupSnapshotNotFound, self.controller.delete, req, fake.WILL_NOT_BE_FOUND_ID) def test_delete_group_snapshot_with_invalid_group_snapshot(self): group_snapshot = utils.create_group_snapshot( self.context, group_id=self.group.id, status='invalid') req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s' % (fake.PROJECT_ID, group_snapshot.id), version=mv.GROUP_SNAPSHOTS) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, group_snapshot.id) group_snapshot.destroy() def test_delete_group_snapshot_policy_not_authorized(self): group_snapshot = utils.create_group_snapshot( self.context, group_id=self.group.id, status=fields.GroupSnapshotStatus.AVAILABLE) req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/' % (fake.PROJECT_ID, group_snapshot.id), version=mv.GROUP_SNAPSHOTS, use_admin_context=False) rules = { group_snapshots_policy.DELETE_POLICY: base_policy.RULE_ADMIN_API } policy.set_rules(oslo_policy.Rules.from_dict(rules)) self.addCleanup(policy.reset) self.assertRaises(exception.PolicyNotAuthorized, self.controller.delete, req, group_snapshot.id) @ddt.data((mv.GROUP_TYPE, 'fake_snapshot_001', fields.GroupSnapshotStatus.AVAILABLE, exception.VersionNotFoundForAPIMethod), (mv.get_prior_version(mv.GROUP_SNAPSHOT_RESET_STATUS), 'fake_snapshot_001', fields.GroupSnapshotStatus.AVAILABLE, exception.VersionNotFoundForAPIMethod), (mv.GROUP_SNAPSHOT_RESET_STATUS, 'fake_snapshot_001', fields.GroupSnapshotStatus.AVAILABLE, exception.GroupSnapshotNotFound)) @ddt.unpack def test_reset_group_snapshot_status_illegal(self, version, group_snapshot_id, status, exceptions): req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' % (fake.PROJECT_ID, group_snapshot_id), version=version) body = {"reset_status": { "status": status }} self.assertRaises(exceptions, self.controller.reset_status, req, group_snapshot_id, body=body) def test_reset_group_snapshot_status_invalid_status(self): group_snapshot = utils.create_group_snapshot( self.context, group_id=self.group.id, status=fields.GroupSnapshotStatus.CREATING) req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' % (fake.PROJECT_ID, group_snapshot.id), version=mv.GROUP_SNAPSHOT_RESET_STATUS) body = {"reset_status": { "status": "invalid_test_status" }} self.assertRaises(exception.InvalidGroupSnapshotStatus, self.controller.reset_status, req, group_snapshot.id, body=body) group_snapshot.destroy() def test_reset_group_snapshot_status(self): group_snapshot = utils.create_group_snapshot( self.context, group_id=self.group.id, status=fields.GroupSnapshotStatus.CREATING) req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots/%s/action' % (fake.PROJECT_ID, group_snapshot.id), version=mv.GROUP_SNAPSHOT_RESET_STATUS) body = {"reset_status": { "status": fields.GroupSnapshotStatus.AVAILABLE }} response = self.controller.reset_status(req, group_snapshot.id, body=body) g_snapshot = objects.GroupSnapshot.get_by_id(self.context, group_snapshot.id) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, g_snapshot.status) group_snapshot.destroy() @mock.patch('cinder.db.volume_type_get') @mock.patch('cinder.quota.VolumeTypeQuotaEngine.reserve') def test_create_group_snapshot_with_null_validate( self, mock_quota, mock_vol_type): body = {"group_snapshot": {"name": None, "description": None, "group_id": self.group.id}} req = fakes.HTTPRequest.blank('/v3/%s/group_snapshots' % self.context.project_id, version=mv.GROUP_SNAPSHOTS) res_dict = self.controller.create(req, body=body) self.assertIn('group_snapshot', res_dict) self.assertIsNone(res_dict['group_snapshot']['name']) group_snapshot = objects.GroupSnapshot.get_by_id( context.get_admin_context(), res_dict['group_snapshot']['id']) group_snapshot.destroy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_group_specs.py0000664000175000017500000002345400000000000023527 0ustar00zuulzuul00000000000000# Copyright 2017 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import webob from cinder.api import microversions as mv from cinder.api.v3 import group_specs as v3_group_specs from cinder import context from cinder import db from cinder import exception from cinder import rpc from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test fake_group_specs = { 'key1': 'value1', 'key2': 'value2' } create_fake_group_specs = { 'group_specs': { 'key1': 'value1', 'key2': 'value2' } } update_fake_group_specs = { 'id': 'any_string' } incorrect_fake_group_specs = { 'group_specs': { 'key#': 'value1', 'key2': 'value2' } } class GroupSpecsTestCase(test.TestCase): """test cases for the group specs API""" def setUp(self): super(GroupSpecsTestCase, self).setUp() self.controller = v3_group_specs.GroupTypeSpecsController() self.ctxt = context.RequestContext( user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=True) @mock.patch.object(db, 'group_type_get', return_value={}) @mock.patch.object(db, 'group_type_specs_get', return_value=fake_group_specs) def test_group_types_index(self, mock_group_type_specs_get, mock_group_type_get): req = fakes.HTTPRequest.blank('v3/%s/group_specs' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt res_dict = self.controller.index(req, fake.GROUP_ID) group_specs_dict = res_dict['group_specs'] mock_group_type_specs_get.assert_called() self.assertEqual('value1', group_specs_dict['key1']) self.assertEqual('value2', group_specs_dict['key2']) @mock.patch.object(rpc, 'get_notifier') @mock.patch.object(db, 'group_type_get', return_value={}) @mock.patch.object(db, 'group_type_specs_update_or_create', return_value={}) def test_group_types_create(self, mock_update_or_create, mock_group_type_get, mock_rpc_notifier): req = fakes.HTTPRequest.blank('v3/%s/group_specs' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) self.controller.create(req, fake.GROUP_ID, body=create_fake_group_specs) self.assertTrue(mock_rpc_notifier.called) @mock.patch.object(rpc, 'get_notifier') @mock.patch.object(db, 'group_type_get', return_value={}) @mock.patch.object(db, 'group_type_specs_get', return_value=fake_group_specs) @mock.patch.object(db, 'group_type_specs_update_or_create', return_value={}) def test_group_types_update(self, mock_update_or_create, mock_typ_specs_get, mock_group_type_get, mock_rpc_notifier): req = fakes.HTTPRequest.blank('v3/%s/group_specs' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) self.controller.update(req, fake.GROUP_TYPE_ID, 'id', body=update_fake_group_specs) self.assertTrue(mock_rpc_notifier.called) @mock.patch.object(db, 'group_type_specs_get', return_value=fake_group_specs) @mock.patch.object(db, 'group_type_get', return_value={}) def test_group_types_show(self, mock_group_type_get, mock_fake_group_specs): req = fakes.HTTPRequest.blank('v3/%s/group_specs' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) res_dict = self.controller.show(req, fake.GROUP_TYPE_ID, 'key1') self.assertEqual('value1', res_dict['key1']) @mock.patch.object(rpc, 'get_notifier') @mock.patch.object(db, 'group_type_specs_delete', return_value={}) @mock.patch.object(db, 'group_type_get', return_value={}) def test_group_types_delete(self, mock_group_type_get, mock_group_spec_delete, rpc_notifier_mock): req = fakes.HTTPRequest.blank('v3/%s/group_specs' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) self.controller.delete(req, fake.GROUP_TYPE_ID, 'key1') self.assertTrue(rpc_notifier_mock.called) @mock.patch.object(rpc, 'get_notifier') @mock.patch.object(db, 'group_type_specs_update_or_create', return_value={}) def test_check_type_should_raise_exception(self, mock_db_update_or_create, mock_rpc_notifier): req = fakes.HTTPRequest.blank('v3/%s/group_specs' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, req, fake.GROUP_ID, body=create_fake_group_specs) @mock.patch.object(rpc, 'get_notifier') @mock.patch.object(db, 'group_type_get', return_value={}) def test_delete_should_raise_exception(self, mock_group_type_get, mock_get_notifier): req = fakes.HTTPRequest.blank('v3/%s/group_specs' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, fake.GROUP_TYPE_ID, 'key1') @mock.patch.object(db, 'group_type_get', return_value={}) def test_update_should_raise_exceptions(self, mock_group_type_get): req = fakes.HTTPRequest.blank('v3/%s/group_specs' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) self.assertRaises(exception.ValidationError, self.controller.update, req, fake.GROUP_TYPE_ID, 'id', body=None) self.assertRaises(exception.ValidationError, self.controller.update, req, fake.GROUP_TYPE_ID, 'id', body=fake_group_specs) self.assertRaises(exception.ValidationError, self.controller.update, req, fake.GROUP_TYPE_ID, 'key1', body=fake_group_specs) @mock.patch.object(db, 'group_type_specs_get', return_value=fake_group_specs) @mock.patch.object(db, 'group_type_get', return_value={}) def test_show_should_raise_exception(self, mock_group_type_get, mock_group_type_specs_get): req = fakes.HTTPRequest.blank('v3/%s/group_specs' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, fake.GROUP_TYPE_ID, 'key') @mock.patch.object(rpc, 'get_notifier') @mock.patch.object(db, 'group_type_get', return_value={}) @mock.patch.object(db, 'group_type_specs_update_or_create', return_value={}) def test_check_key_name_should_raise_exception(self, mock_update_or_create, mock_group_type_get, mock_rpc_notifier): req = fakes.HTTPRequest.blank('v3/%s/group_specs' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) self.assertRaises(exception.ValidationError, self.controller.create, req, fake.GROUP_ID, body=incorrect_fake_group_specs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_group_types.py0000664000175000017500000006612400000000000023557 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import ddt from oslo_utils import strutils from oslo_utils import timeutils import webob from cinder.api import microversions as mv from cinder.api.v3 import group_specs as v3_group_specs from cinder.api.v3 import group_types as v3_group_types from cinder.api.v3.views import group_types as views_types from cinder import context from cinder import exception from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume import group_types IN_USE_GROUP_TYPE = fake.GROUP_TYPE3_ID def stub_group_type(id): specs = { "key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5" } return dict( id=id, name='group_type_%s' % id, description='group_type_desc_%s' % id, group_specs=specs, ) def return_group_types_get_all_types(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): result = dict(group_type_1=stub_group_type(1), group_type_2=stub_group_type(2), group_type_3=stub_group_type(3) ) if list_result: return list(result.values()) return result def return_empty_group_types_get_all_types(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): if list_result: return [] return {} def return_group_types_get_group_type(context, id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.GroupTypeNotFound(group_type_id=id) return stub_group_type(id) def return_group_types_get_default(): return stub_group_type(1) def return_group_types_get_default_not_found(): return {} def return_group_types_with_groups_destroy(context, id): if id == IN_USE_GROUP_TYPE: raise exception.GroupTypeInUse(group_type_id=id) @ddt.ddt class GroupTypesApiTest(test.TestCase): def _create_group_type(self, group_type_name, group_specs=None, is_public=True, projects=None): return group_types.create(self.ctxt, group_type_name, group_specs, is_public, projects).get('id') def setUp(self): super(GroupTypesApiTest, self).setUp() self.controller = v3_group_types.GroupTypesController() self.specs_controller = v3_group_specs.GroupTypeSpecsController() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=True) self.user_ctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID, is_admin=False) self.type_id1 = self._create_group_type('group_type1', {'key1': 'value1'}) self.type_id2 = self._create_group_type('group_type2', {'key2': 'value2'}) self.type_id3 = self._create_group_type('group_type3', {'key3': 'value3'}, False, [fake.PROJECT_ID]) self.type_id0 = group_types.get_default_cgsnapshot_type()['id'] @ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', 'y', 'yes') @mock.patch.object(group_types, "get_group_type_by_name") @mock.patch.object(group_types, "create") @mock.patch("cinder.api.openstack.wsgi.Request.cache_resource") @mock.patch("cinder.api.views.types.ViewBuilder.show") def test_create_group_type_with_valid_is_public_in_string( self, is_public, mock_show, mock_cache_resource, mock_create, mock_get): boolean_is_public = strutils.bool_from_string(is_public) req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt body = {"group_type": {"is_public": is_public, "name": "group_type1", "description": None}} self.controller.create(req, body=body) mock_create.assert_called_once_with( self.ctxt, 'group_type1', {}, boolean_is_public, description=None) @mock.patch.object(group_types, "get_group_type_by_name") @mock.patch.object(group_types, "create") @mock.patch("cinder.api.openstack.wsgi.Request.cache_resource") @mock.patch("cinder.api.views.types.ViewBuilder.show") def test_create_group_type_with_group_specs_null( self, mock_show, mock_cache_resource, mock_create, mock_get): req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt body = {"group_type": {"name": "group_type1", "group_specs": None}} self.controller.create(req, body=body) mock_create.assert_called_once_with( self.ctxt, 'group_type1', None, True, description=None) @ddt.data(fake.GROUP_TYPE_ID, IN_USE_GROUP_TYPE) def test_group_type_destroy(self, grp_type_id): grp_type = {'id': grp_type_id, 'name': 'grp' + grp_type_id} self.mock_object(group_types, 'get_group_type', return_value=grp_type) self.mock_object(group_types, 'destroy', return_group_types_with_groups_destroy) mock_notify_info = self.mock_object( v3_group_types.GroupTypesController, '_notify_group_type_info') mock_notify_error = self.mock_object( v3_group_types.GroupTypesController, '_notify_group_type_error') req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' % ( fake.PROJECT_ID, grp_type_id), version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt if grp_type_id == IN_USE_GROUP_TYPE: self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete, req, grp_type_id) mock_notify_error.assert_called_once_with( self.ctxt, 'group_type.delete', mock.ANY, group_type=grp_type) else: self.controller.delete(req, grp_type_id) mock_notify_info.assert_called_once_with( self.ctxt, 'group_type.delete', grp_type) def test_group_types_index(self): self.mock_object(group_types, 'get_all_group_types', return_group_types_get_all_types) req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID, use_admin_context=True, version=mv.GROUP_TYPE) res_dict = self.controller.index(req) self.assertEqual(3, len(res_dict['group_types'])) expected_names = ['group_type_1', 'group_type_2', 'group_type_3'] actual_names = map(lambda e: e['name'], res_dict['group_types']) self.assertEqual(set(expected_names), set(actual_names)) for entry in res_dict['group_types']: self.assertEqual('value1', entry['group_specs']['key1']) def test_group_types_index_no_data(self): self.mock_object(group_types, 'get_all_group_types', return_empty_group_types_get_all_types) req = fakes.HTTPRequest.blank('/v3/%s/group_types' % fake.PROJECT_ID, version=mv.GROUP_TYPE) res_dict = self.controller.index(req) self.assertEqual(0, len(res_dict['group_types'])) def test_group_types_index_with_limit(self): req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(1, len(res['group_types'])) self.assertEqual(self.type_id3, res['group_types'][0]['id']) expect_next_link = ('http://localhost/v3/%s/group_types?limit=1' '&marker=%s' % (fake.PROJECT_ID, res['group_types'][0]['id'])) self.assertEqual(expect_next_link, res['group_type_links'][0]['href']) def test_group_types_index_with_offset(self): req = fakes.HTTPRequest.blank( '/v3/%s/group_types?offset=1' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(3, len(res['group_types'])) def test_group_types_index_with_offset_out_of_range(self): url = '/v3/%s/group_types?offset=424366766556787' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=mv.GROUP_TYPE) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_group_types_index_with_limit_and_offset(self): req = fakes.HTTPRequest.blank( '/v3/%s/group_types?limit=2&offset=1' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(2, len(res['group_types'])) self.assertEqual(self.type_id2, res['group_types'][0]['id']) self.assertEqual(self.type_id1, res['group_types'][1]['id']) def test_group_types_index_with_limit_and_marker(self): req = fakes.HTTPRequest.blank('/v3/%s/group_types?limit=1' '&marker=%s' % (fake.PROJECT_ID, self.type_id2), version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(1, len(res['group_types'])) self.assertEqual(self.type_id1, res['group_types'][0]['id']) def test_group_types_index_with_valid_filter(self): req = fakes.HTTPRequest.blank( '/v3/%s/group_types?is_public=True' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(4, len(res['group_types'])) self.assertEqual(self.type_id3, res['group_types'][0]['id']) self.assertEqual(self.type_id2, res['group_types'][1]['id']) self.assertEqual(self.type_id1, res['group_types'][2]['id']) self.assertEqual(self.type_id0, res['group_types'][3]['id']) def test_group_types_index_with_invalid_filter(self): req = fakes.HTTPRequest.blank( '/v3/%s/group_types?id=%s' % (fake.PROJECT_ID, self.type_id1), version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(4, len(res['group_types'])) def test_group_types_index_with_sort_keys(self): req = fakes.HTTPRequest.blank('/v3/%s/group_types?sort=id' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) expect_result = [self.type_id0, self.type_id1, self.type_id2, self.type_id3] expect_result.sort(reverse=True) self.assertEqual(4, len(res['group_types'])) self.assertEqual(expect_result[0], res['group_types'][0]['id']) self.assertEqual(expect_result[1], res['group_types'][1]['id']) self.assertEqual(expect_result[2], res['group_types'][2]['id']) self.assertEqual(expect_result[3], res['group_types'][3]['id']) def test_group_types_index_with_sort_and_limit(self): req = fakes.HTTPRequest.blank( '/v3/%s/group_types?sort=id&limit=2' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) expect_result = [self.type_id0, self.type_id1, self.type_id2, self.type_id3] expect_result.sort(reverse=True) self.assertEqual(2, len(res['group_types'])) self.assertEqual(expect_result[0], res['group_types'][0]['id']) self.assertEqual(expect_result[1], res['group_types'][1]['id']) def test_group_types_index_with_sort_keys_and_sort_dirs(self): req = fakes.HTTPRequest.blank( '/v3/%s/group_types?sort=id:asc' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) expect_result = [self.type_id0, self.type_id1, self.type_id2, self.type_id3] expect_result.sort() self.assertEqual(4, len(res['group_types'])) self.assertEqual(expect_result[0], res['group_types'][0]['id']) self.assertEqual(expect_result[1], res['group_types'][1]['id']) self.assertEqual(expect_result[2], res['group_types'][2]['id']) self.assertEqual(expect_result[3], res['group_types'][3]['id']) @ddt.data('0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', 'y', 'yes') @mock.patch.object(group_types, "get_group_type") @mock.patch.object(group_types, "update") @mock.patch("cinder.api.openstack.wsgi.Request.cache_resource") @mock.patch("cinder.api.views.types.ViewBuilder.show") def test_update_group_type_with_valid_is_public_in_string( self, is_public, mock_show, mock_cache_resource, mock_update, mock_get): type_id = str(uuid.uuid4()) req = fakes.HTTPRequest.blank( '/v3/%s/types/%s' % (fake.PROJECT_ID, type_id), version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt boolean_is_public = strutils.bool_from_string(is_public) body = {"group_type": {"is_public": is_public, "name": "group_type1"}} self.controller.update(req, type_id, body=body) mock_update.assert_called_once_with( self.ctxt, type_id, 'group_type1', None, is_public=boolean_is_public) def test_update_group_type_with_name_null(self): req = fakes.HTTPRequest.blank( '/v3/%s/types/%s' % (fake.PROJECT_ID, fake.GROUP_TYPE_ID), version=mv.GROUP_TYPE) req.environ['cinder.context'] = self.ctxt body = {"group_type": {"name": None}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, fake.GROUP_TYPE_ID, body=body) @ddt.data({"group_type": {"name": None, "description": "description"}}, {"group_type": {"name": "test", "is_public": True}}, {"group_type": {"description": None, "is_public": True}}) def test_update_group_type(self, body): req = fakes.HTTPRequest.blank( '/v3/%s/types/%s' % (fake.PROJECT_ID, fake.GROUP_TYPE_ID), version=mv.GROUP_TYPE) group_type_1 = group_types.create(self.ctxt, 'group_type') req.environ['cinder.context'] = self.ctxt res = self.controller.update(req, group_type_1.get('id'), body=body) expected_name = body['group_type'].get('name') if expected_name is not None: self.assertEqual(expected_name, res['group_type']['name']) expected_is_public = body['group_type'].get('is_public') if expected_is_public is not None: self.assertEqual(expected_is_public, res['group_type']['is_public']) self.assertEqual(body['group_type'].get('description'), res['group_type']['description']) def test_group_types_show(self): self.mock_object(group_types, 'get_group_type', return_group_types_get_group_type) type_id = str(uuid.uuid4()) req = fakes.HTTPRequest.blank('/v3/%s/group_types/' % fake.PROJECT_ID + type_id, version=mv.GROUP_TYPE) res_dict = self.controller.show(req, type_id) self.assertEqual(1, len(res_dict)) self.assertEqual(type_id, res_dict['group_type']['id']) type_name = 'group_type_' + type_id self.assertEqual(type_name, res_dict['group_type']['name']) def test_group_types_show_pre_microversion(self): self.mock_object(group_types, 'get_group_type', return_group_types_get_group_type) type_id = uuid.uuid4() req = fakes.HTTPRequest.blank( '/v3/%s/group_types/%s' % (fake.PROJECT_ID, type_id), version=mv.get_prior_version(mv.GROUP_TYPE)) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.show, req, type_id) def test_group_types_show_not_found(self): self.mock_object(group_types, 'get_group_type', return_group_types_get_group_type) req = fakes.HTTPRequest.blank('/v3/%s/group_types/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), version=mv.GROUP_TYPE) self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, fake.WILL_NOT_BE_FOUND_ID) def test_get_default(self): self.mock_object(group_types, 'get_default_group_type', return_group_types_get_default) req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.method = 'GET' res_dict = self.controller.show(req, 'default') self.assertEqual(1, len(res_dict)) self.assertEqual('group_type_1', res_dict['group_type']['name']) self.assertEqual('group_type_desc_1', res_dict['group_type']['description']) def test_get_default_not_found(self): self.mock_object(group_types, 'get_default_group_type', return_group_types_get_default_not_found) req = fakes.HTTPRequest.blank('/v3/%s/group_types/default' % fake.PROJECT_ID, version=mv.GROUP_TYPE) req.method = 'GET' self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, 'default') def test_view_builder_show(self): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_group_type = dict( name='new_type', description='new_type_desc', is_public=True, deleted=False, created_at=now, updated_at=now, group_specs={}, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v3", version=mv.GROUP_TYPE) output = view_builder.show(request, raw_group_type) self.assertIn('group_type', output) expected_group_type = dict( name='new_type', description='new_type_desc', is_public=True, id=42, ) self.assertDictEqual(expected_group_type, output['group_type']) def test_view_builder_show_admin(self): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_group_type = dict( name='new_type', description='new_type_desc', is_public=True, deleted=False, created_at=now, updated_at=now, group_specs={}, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v3", use_admin_context=True, version=mv.GROUP_TYPE) output = view_builder.show(request, raw_group_type) self.assertIn('group_type', output) expected_group_type = dict( name='new_type', description='new_type_desc', is_public=True, group_specs={}, id=42, ) self.assertDictEqual(expected_group_type, output['group_type']) def __test_view_builder_show_qos_specs_id_policy(self): with mock.patch.object(context.RequestContext, 'authorize', side_effect=[False, True]): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_group_type = dict( name='new_type', description='new_type_desc', is_public=True, deleted=False, created_at=now, updated_at=now, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v3", version=mv.GROUP_TYPE) output = view_builder.show(request, raw_group_type) self.assertIn('group_type', output) expected_group_type = dict( name='new_type', description='new_type_desc', is_public=True, id=42, ) self.assertDictEqual(expected_group_type, output['group_type']) def test_view_builder_show_group_specs_policy(self): with mock.patch.object(context.RequestContext, 'authorize', side_effect=[True, False]): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_group_type = dict( name='new_type', description='new_type_desc', is_public=True, deleted=False, created_at=now, updated_at=now, group_specs={}, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v3", version=mv.GROUP_TYPE) output = view_builder.show(request, raw_group_type) self.assertIn('group_type', output) expected_group_type = dict( name='new_type', description='new_type_desc', group_specs={}, is_public=True, id=42, ) self.assertDictEqual(expected_group_type, output['group_type']) def test_view_builder_show_pass_all_policy(self): with mock.patch.object(context.RequestContext, 'authorize', side_effect=[True, False]): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_group_type = dict( name='new_type', description='new_type_desc', is_public=True, deleted=False, created_at=now, updated_at=now, group_specs={}, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v3", version=mv.GROUP_TYPE) output = view_builder.show(request, raw_group_type) self.assertIn('group_type', output) expected_group_type = dict( name='new_type', description='new_type_desc', group_specs={}, is_public=True, id=42, ) self.assertDictEqual(expected_group_type, output['group_type']) def test_view_builder_list(self): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_group_types = [] for i in range(0, 10): raw_group_types.append( dict( name='new_type', description='new_type_desc', is_public=True, deleted=False, created_at=now, updated_at=now, group_specs={}, deleted_at=None, id=42 + i ) ) request = fakes.HTTPRequest.blank("/v3", version=mv.GROUP_TYPE) output = view_builder.index(request, raw_group_types) self.assertIn('group_types', output) for i in range(0, 10): expected_group_type = dict( name='new_type', description='new_type_desc', is_public=True, id=42 + i ) self.assertDictEqual(expected_group_type, output['group_types'][i]) def test_view_builder_list_admin(self): view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_group_types = [] for i in range(0, 10): raw_group_types.append( dict( name='new_type', description='new_type_desc', is_public=True, deleted=False, created_at=now, updated_at=now, group_specs={}, deleted_at=None, id=42 + i ) ) request = fakes.HTTPRequest.blank("/v3", use_admin_context=True, version=mv.GROUP_TYPE) output = view_builder.index(request, raw_group_types) self.assertIn('group_types', output) for i in range(0, 10): expected_group_type = dict( name='new_type', description='new_type_desc', is_public=True, group_specs={}, id=42 + i ) self.assertDictEqual(expected_group_type, output['group_types'][i]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_groups.py0000664000175000017500000020252500000000000022513 0ustar00zuulzuul00000000000000# Copyright (C) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for group code.""" from http import HTTPStatus from unittest import mock import ddt import webob from cinder.api import microversions as mv from cinder.api.v3 import groups as v3_groups from cinder import context from cinder import db from cinder import exception import cinder.group from cinder import objects from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3 import fakes as v3_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.volume import api as volume_api INVALID_GROUP_REPLICATION = mv.get_prior_version(mv.GROUP_REPLICATION) @ddt.ddt class GroupsAPITestCase(test.TestCase): """Test Case for groups API.""" def setUp(self): super(GroupsAPITestCase, self).setUp() self.controller = v3_groups.GroupsController() self.group_api = cinder.group.API() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.volume_type1 = self._create_volume_type(id=fake.VOLUME_TYPE_ID) self.group1 = self._create_group() self.group2 = self._create_group() self.group3 = self._create_group(ctxt=self.user_ctxt) self.addCleanup(self._cleanup) def _cleanup(self): self.group1.destroy() self.group2.destroy() self.group3.destroy() db.volume_type_destroy(self.ctxt, self.volume_type1.id) def _create_group( self, ctxt=None, name='test_group', description='this is a test group', group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], availability_zone='az1', host='fakehost', status=fields.GroupStatus.CREATING, replication_status=fields.ReplicationStatus.DISABLED, **kwargs): """Create a group object.""" ctxt = ctxt or self.ctxt group = objects.Group(ctxt) group.user_id = fake.USER_ID group.project_id = fake.PROJECT_ID group.availability_zone = availability_zone group.name = name group.description = description group.group_type_id = group_type_id group.volume_type_ids = volume_type_ids group.host = host group.status = status group.replication_status = replication_status group.update(kwargs) group.create() return group def _create_volume_type( self, ctxt=None, id=fake.VOLUME_TYPE_ID, name='test_volume_type', description='this is a test volume type', extra_specs={"test_key": "test_val"}, testcase_instance=None, **kwargs): """Create a volume type.""" ctxt = ctxt or self.ctxt vol_type = utils.create_volume_type( ctxt, testcase_instance=testcase_instance, id=id, name=name, description=description, extra_specs=extra_specs, **kwargs) return vol_type @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') @mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group') def test_show_group(self, mock_vol_get_all_by_group, mock_vol_type_get_all_by_group): volume_objs = [objects.Volume(context=self.ctxt, id=i) for i in [fake.VOLUME_ID]] volumes = objects.VolumeList(context=self.ctxt, objects=volume_objs) mock_vol_get_all_by_group.return_value = volumes vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i) for i in [fake.VOLUME_TYPE_ID]] vol_types = objects.VolumeTypeList(context=self.ctxt, objects=vol_type_objs) mock_vol_type_get_all_by_group.return_value = vol_types req = fakes.HTTPRequest.blank('/v3/%s/groups/%s' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) res_dict = self.controller.show(req, self.group1.id) self.assertEqual(1, len(res_dict)) self.assertEqual('az1', res_dict['group']['availability_zone']) self.assertEqual('this is a test group', res_dict['group']['description']) self.assertEqual('test_group', res_dict['group']['name']) self.assertEqual('creating', res_dict['group']['status']) self.assertEqual([fake.VOLUME_TYPE_ID], res_dict['group']['volume_types']) @ddt.data((mv.get_prior_version(mv.GROUP_VOLUME_LIST), False), (mv.get_prior_version(mv.GROUP_VOLUME_LIST), True), (mv.GROUP_VOLUME_LIST, False), (mv.GROUP_VOLUME_LIST, True)) @ddt.unpack @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') @mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group') def test_list_group_with_list_volume(self, version, has_list_volume, mock_vol_get_all_by_group, mock_vol_type_get_all_by_group): volume_objs = [objects.Volume(context=self.ctxt, id=i) for i in [fake.VOLUME_ID]] volumes = objects.VolumeList(context=self.ctxt, objects=volume_objs) mock_vol_get_all_by_group.return_value = volumes vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i) for i in [fake.VOLUME_TYPE_ID]] vol_types = objects.VolumeTypeList(context=self.ctxt, objects=vol_type_objs) mock_vol_type_get_all_by_group.return_value = vol_types if has_list_volume: req = fakes.HTTPRequest.blank( '/v3/%s/groups/detail?list_volume=True' % fake.PROJECT_ID, version=version) else: req = fakes.HTTPRequest.blank('/v3/%s/groups/detail' % fake.PROJECT_ID, version=version) res_dict = self.controller.detail(req) # If the microversion >= 3.25 and "list_volume=True", "volumes" should # be contained in the response body. Else, "volumes" should not be # contained in the response body. self.assertEqual(3, len(res_dict['groups'])) if (version, has_list_volume) == (mv.GROUP_VOLUME_LIST, True): self.assertEqual([fake.VOLUME_ID], res_dict['groups'][0]['volumes']) else: self.assertNotIn('volumes', res_dict['groups'][0]) # "volumes" should not be contained in the response body when list # groups without detail. res_dict = self.controller.index(req) self.assertNotIn('volumes', res_dict['groups'][0]) @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') @mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group') def test_show_group_with_list_volume(self, mock_vol_get_all_by_group, mock_vol_type_get_all_by_group): volume_objs = [objects.Volume(context=self.ctxt, id=i) for i in [fake.VOLUME_ID]] volumes = objects.VolumeList(context=self.ctxt, objects=volume_objs) mock_vol_get_all_by_group.return_value = volumes vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i) for i in [fake.VOLUME_TYPE_ID]] vol_types = objects.VolumeTypeList(context=self.ctxt, objects=vol_type_objs) mock_vol_type_get_all_by_group.return_value = vol_types # If the microversion >= 3.25 and "list_volume=True", "volumes" should # be contained in the response body. req = fakes.HTTPRequest.blank('/v3/%s/groups/%s?list_volume=True' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME_LIST) res_dict = self.controller.show(req, self.group1.id) self.assertEqual(1, len(res_dict)) self.assertEqual([fake.VOLUME_ID], res_dict['group']['volumes']) # If the microversion >= 3.25 but "list_volume" is missing, "volumes" # should not be contained in the response body. req = fakes.HTTPRequest.blank('/v3/%s/groups/%s' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME_LIST) res_dict = self.controller.show(req, self.group1.id) self.assertEqual(1, len(res_dict)) self.assertNotIn('volumes', res_dict['group']) # If the microversion < 3.25, "volumes" should not be contained in the # response body. req = fakes.HTTPRequest.blank( '/v3/%s/groups/%s?list_volume=True' % (fake.PROJECT_ID, self.group1.id), version=mv.get_prior_version(mv.GROUP_VOLUME_LIST)) res_dict = self.controller.show(req, self.group1.id) self.assertEqual(1, len(res_dict)) self.assertNotIn('volumes', res_dict['group']) def test_show_group_with_group_NotFound(self): req = fakes.HTTPRequest.blank('/v3/%s/groups/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), version=mv.GROUP_VOLUME) self.assertRaises(exception.GroupNotFound, self.controller.show, req, fake.WILL_NOT_BE_FOUND_ID) @ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER), mv.RESOURCE_FILTER, mv.LIKE_FILTER) @mock.patch('cinder.api.common.reject_invalid_filters') def test_group_list_with_general_filter(self, version, mock_update): url = '/v3/%s/groups' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=version, use_admin_context=False) self.controller.index(req) if version != mv.get_prior_version(mv.RESOURCE_FILTER): support_like = True if version == mv.LIKE_FILTER else False mock_update.assert_called_once_with(req.environ['cinder.context'], mock.ANY, 'group', support_like) def test_list_groups_json(self): self.group2.group_type_id = fake.GROUP_TYPE2_ID # TODO(geguileo): One `volume_type_ids` gets sorted out make proper # changes here # self.group2.volume_type_ids = [fake.VOLUME_TYPE2_ID] self.group2.save() self.group3.group_type_id = fake.GROUP_TYPE3_ID # TODO(geguileo): One `volume_type_ids` gets sorted out make proper # changes here # self.group3.volume_type_ids = [fake.VOLUME_TYPE3_ID] self.group3.save() req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, version=mv.GROUP_VOLUME) res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(self.group3.id, res_dict['groups'][0]['id']) self.assertEqual('test_group', res_dict['groups'][0]['name']) self.assertEqual(self.group2.id, res_dict['groups'][1]['id']) self.assertEqual('test_group', res_dict['groups'][1]['name']) self.assertEqual(self.group1.id, res_dict['groups'][2]['id']) self.assertEqual('test_group', res_dict['groups'][2]['name']) @ddt.data(False, True) def test_list_groups_with_limit(self, is_detail): url = '/v3/%s/groups?limit=1' % fake.PROJECT_ID if is_detail: url = '/v3/%s/groups/detail?limit=1' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=mv.GROUP_VOLUME) if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(2, len(res_dict)) self.assertEqual(1, len(res_dict['groups'])) self.assertEqual(self.group3.id, res_dict['groups'][0]['id']) next_link = ( 'http://localhost/v3/%s/groups?limit=' '1&marker=%s' % (fake.PROJECT_ID, res_dict['groups'][0]['id'])) self.assertEqual(next_link, res_dict['group_links'][0]['href']) if is_detail: self.assertIn('description', res_dict['groups'][0].keys()) @ddt.data(False, True) def test_list_groups_with_offset(self, is_detail): url = '/v3/%s/groups?offset=1' % fake.PROJECT_ID if is_detail: url = '/v3/%s/groups/detail?offset=1' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=mv.GROUP_VOLUME) res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(2, len(res_dict['groups'])) self.assertEqual(self.group2.id, res_dict['groups'][0]['id']) self.assertEqual(self.group1.id, res_dict['groups'][1]['id']) @ddt.data(False, True) def test_list_groups_with_offset_out_of_range(self, is_detail): url = ('/v3/%s/groups?offset=234523423455454' % fake.PROJECT_ID) if is_detail: url = ('/v3/%s/groups/detail?offset=234523423455454' % fake.PROJECT_ID) req = fakes.HTTPRequest.blank(url, version=mv.GROUP_VOLUME) if is_detail: self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail, req) else: self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) @ddt.data(False, True) def test_list_groups_with_limit_and_offset(self, is_detail): url = '/v3/%s/groups?limit=2&offset=1' % fake.PROJECT_ID if is_detail: url = ('/v3/%s/groups/detail?limit=2&offset=1' % fake.PROJECT_ID) req = fakes.HTTPRequest.blank(url, version=mv.GROUP_VOLUME) if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(2, len(res_dict)) self.assertEqual(2, len(res_dict['groups'])) self.assertEqual(self.group2.id, res_dict['groups'][0]['id']) self.assertEqual(self.group1.id, res_dict['groups'][1]['id']) if is_detail: self.assertIn('description', res_dict['groups'][0].keys()) @ddt.data(False, True) def test_list_groups_with_filter(self, is_detail): # Create a group with user context url = ('/v3/%s/groups?' 'all_tenants=True&id=%s') % (fake.PROJECT_ID, self.group3.id) if is_detail: url = ('/v3/%s/groups/detail?' 'all_tenants=True&id=%s') % (fake.PROJECT_ID, self.group3.id) req = fakes.HTTPRequest.blank(url, version=mv.GROUP_VOLUME, use_admin_context=True) if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(1, len(res_dict['groups'])) self.assertEqual(self.group3.id, res_dict['groups'][0]['id']) if is_detail: self.assertIn('description', res_dict['groups'][0].keys()) @ddt.data(False, True) def test_list_groups_with_sort(self, is_detail): url = '/v3/%s/groups?sort=id:asc' % fake.PROJECT_ID if is_detail: url = ('/v3/%s/groups/detail?sort=id:asc' % fake.PROJECT_ID) req = fakes.HTTPRequest.blank(url, version=mv.GROUP_VOLUME) expect_result = [self.group1.id, self.group2.id, self.group3.id] expect_result.sort() if is_detail: res_dict = self.controller.detail(req) else: res_dict = self.controller.index(req) self.assertEqual(1, len(res_dict)) self.assertEqual(3, len(res_dict['groups'])) self.assertEqual(expect_result[0], res_dict['groups'][0]['id']) self.assertEqual(expect_result[1], res_dict['groups'][1]['id']) self.assertEqual(expect_result[2], res_dict['groups'][2]['id']) if is_detail: self.assertIn('description', res_dict['groups'][0].keys()) @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') def test_list_groups_detail_json(self, mock_vol_type_get_all_by_group): volume_type_ids = [fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID] vol_type_objs = [objects.VolumeType(context=self.ctxt, id=i) for i in volume_type_ids] vol_types = objects.VolumeTypeList(context=self.ctxt, objects=vol_type_objs) mock_vol_type_get_all_by_group.return_value = vol_types # TODO(geguileo): One `volume_type_ids` gets sorted out make proper # changes here # self.group1.volume_type_ids = volume_type_ids # self.group1.save() # self.group2.volume_type_ids = volume_type_ids # self.group2.save() # self.group3.volume_type_ids = volume_type_ids # self.group3.save() req = fakes.HTTPRequest.blank('/v3/%s/groups/detail' % fake.PROJECT_ID, version=mv.GROUP_VOLUME) res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict)) index = 0 for group in [self.group3, self.group2, self.group1]: self.assertEqual(group.id, res_dict['groups'][index]['id']) self.assertEqual([fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID], res_dict['groups'][index]['volume_types']) self.assertEqual('test_group', res_dict['groups'][index]['name']) self.assertTrue({'availability_zone', 'description', 'status'}.issubset( set(res_dict['groups'][index].keys()))) index += 1 @ddt.data(False, True) def test_create_group_json(self, use_group_type_name): # Create volume types and group type vol_type = 'test' vol_type_id = db.volume_type_create( self.ctxt, {'name': vol_type, 'extra_specs': {}}).get('id') grp_type_name = 'test_grp_type' grp_type = db.group_type_create( self.ctxt, {'name': grp_type_name, 'group_specs': {}}).get('id') if use_group_type_name: grp_type = grp_type_name body = {"group": {"name": "group1", "volume_types": [vol_type_id], "group_type": grp_type, "description": "Group 1", }} req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, version=mv.GROUP_VOLUME) res_dict = self.controller.create(req, body=body) self.assertEqual(1, len(res_dict)) self.assertIn('id', res_dict['group']) group_id = res_dict['group']['id'] objects.Group.get_by_id(self.ctxt, group_id) def test_create_group_with_no_body(self): # omit body from the request req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, version=mv.GROUP_VOLUME) self.assertRaises(exception.ValidationError, self.controller.create, req, body=None) @ddt.data(("", webob.exc.HTTPBadRequest), (" ", exception.InvalidInput), ("a" * 256, exception.InvalidInput)) @ddt.unpack def test_create_group_with_invalid_availability_zone( self, az_name, exceptions): vol_type = 'test' vol_type_id = db.volume_type_create( self.ctxt, {'name': vol_type, 'extra_specs': {}}).get('id') grp_type_name = 'test_grp_type' grp_type = db.group_type_create( self.ctxt, {'name': grp_type_name, 'group_specs': {}}).get('id') body = {"group": {"name": "group1", "volume_types": [vol_type_id], "group_type": grp_type, "availability_zone": az_name}} req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, version=mv.GROUP_VOLUME) self.assertRaises(exceptions, self.controller.create, req, body=body) def test_delete_group_available(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": False}} res_dict = self.controller.delete_group( req, self.group1.id, body=body) group = objects.Group.get_by_id( self.ctxt, self.group1.id) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) self.assertEqual(fields.GroupStatus.DELETING, group.status) def test_delete_group_available_no_delete_volumes(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": False}} res_dict = self.controller.delete_group( req, self.group1.id, body=body) group = objects.Group.get_by_id( self.ctxt, self.group1.id) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) self.assertEqual(fields.GroupStatus.DELETING, group.status) def test_delete_group_with_group_NotFound(self): req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": False}} self.assertRaises(exception.GroupNotFound, self.controller.delete_group, req, fake.WILL_NOT_BE_FOUND_ID, body=body) def test_delete_group_with_invalid_group(self): req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": False}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete_group, req, self.group1.id, body=body) def test_delete_group_invalid_delete_volumes(self): req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": True}} res_dict = self.controller.delete_group( req, self.group1.id, body=body) group = objects.Group.get_by_id( self.ctxt, self.group1.id) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) self.assertEqual(fields.GroupStatus.DELETING, group.status) def test_delete_group_no_host(self): self.group1.host = None self.group1.status = fields.GroupStatus.ERROR self.group1.save() req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": True}} res_dict = self.controller.delete_group( req, self.group1.id, body=body) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) group = objects.Group.get_by_id( context.get_admin_context(read_deleted='yes'), self.group1.id) self.assertEqual(fields.GroupStatus.DELETED, group.status) self.assertIsNone(group.host) @mock.patch('cinder.quota.GROUP_QUOTAS.reserve', return_value='reservations') @mock.patch('cinder.quota.GROUP_QUOTAS.commit') def test_create_delete_group_update_quota(self, mock_commit, mock_reserve): name = 'mygroup' description = 'group 1' grp_type = {'id': fake.GROUP_TYPE_ID, 'name': 'group_type'} fake_type = {'id': fake.VOLUME_TYPE_ID, 'name': 'fake_type'} self.mock_object(db, 'volume_types_get_by_name_or_id', return_value=[fake_type]) self.mock_object(db, 'group_type_get', return_value=grp_type) self.mock_object(self.group_api, '_cast_create_group') self.mock_object(self.group_api, 'update_quota') group = self.group_api.create(self.ctxt, name, description, grp_type['id'], [fake_type['id']]) # Verify that quota reservation and commit was called mock_reserve.assert_called_once_with(self.ctxt, project_id=self.ctxt.project_id, groups=1) mock_commit.assert_called_once_with(self.ctxt, 'reservations') self.assertEqual(fields.GroupStatus.CREATING, group.status) self.assertIsNone(group.host) group.status = fields.GroupStatus.ERROR self.group_api.delete(self.ctxt, group) self.group_api.update_quota.assert_called_once_with( self.ctxt, group, -1, self.ctxt.project_id) group = objects.Group.get_by_id( context.get_admin_context(read_deleted='yes'), group.id) self.assertEqual(fields.GroupStatus.DELETED, group.status) @mock.patch('cinder.group.api.API.create') def test_create_group_failed_exceeded_quota(self, mock_group_create): mock_group_create.side_effect = exception.GroupLimitExceeded(allowed=1) name = 'group1' body = {"group": {"group_type": fake.GROUP_TYPE_ID, "volume_types": [fake.VOLUME_TYPE_ID], "name": name, "description": "Group 1", }} req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, version=mv.GROUP_VOLUME) ex = self.assertRaises(exception.GroupLimitExceeded, self.controller.create, req, body=body) self.assertEqual(HTTPStatus.REQUEST_ENTITY_TOO_LARGE, ex.code) def test_delete_group_with_invalid_body(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"invalid_request_element": {"delete-volumes": False}} self.assertRaises(exception.ValidationError, self.controller.delete_group, req, self.group1.id, body=body) def test_delete_group_with_invalid_delete_volumes_value_in_body(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": "abcd"}} self.assertRaises(exception.ValidationError, self.controller.delete_group, req, self.group1.id, body=body) def test_delete_group_with_empty_delete_volumes_value_in_body(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": ""}} self.assertRaises(exception.ValidationError, self.controller.delete_group, req, self.group1.id, body=body) def test_delete_group_with_group_snapshot(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() g_snapshot = utils.create_group_snapshot(self.ctxt, self.group1.id) req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": True}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete_group, req, self.group1.id, body=body) g_snapshot.destroy() res_dict = self.controller.delete_group( req, self.group1.id, body=body) group = objects.Group.get_by_id( self.ctxt, self.group1.id) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) self.assertEqual(fields.GroupStatus.DELETING, group.status) def test_delete_group_delete_volumes(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() vol = utils.create_volume(self.ctxt, group_id=self.group1.id) req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": True}} res_dict = self.controller.delete_group( req, self.group1.id, body=body) group = objects.Group.get_by_id( self.ctxt, self.group1.id) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) self.assertEqual(fields.GroupStatus.DELETING, group.status) vol.destroy() def test_delete_group_delete_volumes_with_attached_volumes(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() vol = utils.create_volume(self.ctxt, group_id=self.group1.id, attach_status='attached') req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": True}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete_group, req, self.group1.id, body=body) vol.destroy() def test_delete_group_delete_volumes_with_snapshots(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() vol = utils.create_volume(self.ctxt, group_id=self.group1.id) utils.create_snapshot(self.ctxt, vol.id) req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": True}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete_group, req, self.group1.id, body=body) vol.destroy() def test_delete_group_delete_volumes_with_deleted_snapshots(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() vol = utils.create_volume(self.ctxt, group_id=self.group1.id) utils.create_snapshot(self.ctxt, vol.id, status=fields.SnapshotStatus.DELETED, deleted=True) req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"delete": {"delete-volumes": True}} res_dict = self.controller.delete_group( req, self.group1.id, body=body) group = objects.Group.get_by_id( self.ctxt, self.group1.id) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) self.assertEqual(fields.GroupStatus.DELETING, group.status) vol.destroy() def test_create_group_failed_no_group_type(self): name = 'group1' body = {"group": {"volume_types": [fake.VOLUME_TYPE_ID], "name": name, "description": "Group 1", }} req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, version=mv.GROUP_VOLUME) self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @ddt.data(None, "", " ", "a" * 256) def test_create_group_failed_invalid_group_type(self, group_type): name = 'group1' body = {"group": {"volume_types": [fake.VOLUME_TYPE_ID], "name": name, "description": "Group 1", "group_type": group_type}} req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, version=mv.GROUP_VOLUME) self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_create_group_failed_no_volume_types(self): name = 'group1' body = {"group": {"group_type": fake.GROUP_TYPE_ID, "name": name, "description": "Group 1", }} req = fakes.HTTPRequest.blank('/v3/%s/groups' % fake.PROJECT_ID, version=mv.GROUP_VOLUME) self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_update_group_success(self): volume_type_id = fake.VOLUME_TYPE_ID self.group1.status = fields.GroupStatus.AVAILABLE self.group1.host = 'test_host' # TODO(geguileo): One `volume_type_ids` gets sorted out make proper # changes here # self.group1.volume_type_ids = [volume_type_id] self.group1.save() remove_volume = utils.create_volume( self.ctxt, volume_type_id=volume_type_id, group_id=self.group1.id) remove_volume2 = utils.create_volume( self.ctxt, volume_type_id=volume_type_id, group_id=self.group1.id, status='error') remove_volume3 = utils.create_volume( self.ctxt, volume_type_id=volume_type_id, group_id=self.group1.id, status='error_deleting') self.assertEqual(fields.GroupStatus.AVAILABLE, self.group1.status) group_volumes = db.volume_get_all_by_generic_group( self.ctxt.elevated(), self.group1.id) group_vol_ids = [group_vol['id'] for group_vol in group_volumes] self.assertIn(remove_volume.id, group_vol_ids) self.assertIn(remove_volume2.id, group_vol_ids) self.assertIn(remove_volume3.id, group_vol_ids) add_volume = utils.create_volume( self.ctxt, volume_type_id=volume_type_id) add_volume2 = utils.create_volume( self.ctxt, volume_type_id=volume_type_id) req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) name = 'newgroup' description = 'New Group Description' add_volumes = add_volume.id + "," + add_volume2.id remove_volumes = ','.join( [remove_volume.id, remove_volume2.id, remove_volume3.id]) body = {"group": {"name": name, "description": description, "add_volumes": add_volumes, "remove_volumes": remove_volumes, }} res_dict = self.controller.update( req, self.group1.id, body=body) group = objects.Group.get_by_id( self.ctxt, self.group1.id) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) self.assertEqual(fields.GroupStatus.UPDATING, group.status) remove_volume.destroy() remove_volume2.destroy() remove_volume3.destroy() add_volume.destroy() add_volume2.destroy() @ddt.data(fields.GroupStatus.CREATING, fields.GroupStatus.UPDATING) def test_update_group_any_state(self, status): self.group1.status = status req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"group": {"name": "new name", "description": "new description", "add_volumes": None, "remove_volumes": None, }} res_dict = self.controller.update( req, self.group1.id, body=body) self.assertEqual(HTTPStatus.ACCEPTED, res_dict.status_int) group = objects.Group.get_by_id(self.ctxt, self.group1.id) self.assertEqual("new name", group.name) self.assertEqual("new description", group.description) def test_update_group_add_volume_not_found(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"group": {"name": None, "description": None, "add_volumes": "fake-volume-uuid", "remove_volumes": None, }} self.assertRaises(exception.InvalidVolume, self.controller.update, req, self.group1.id, body=body) def test_update_group_remove_volume_not_found(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"group": {"name": None, "description": "new description", "add_volumes": None, "remove_volumes": "fake-volume-uuid", }} self.assertRaises(exception.InvalidVolume, self.controller.update, req, self.group1.id, body=body) def test_update_group_empty_parameters(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"group": {"name": None, "description": None, "add_volumes": None, "remove_volumes": None, }} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.group1.id, body=body) def test_update_group_add_volume_invalid_state(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() add_volume = utils.create_volume( self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, status='wrong_status') req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) add_volumes = add_volume.id body = {"group": {"name": "group1", "description": "", "add_volumes": add_volumes, "remove_volumes": None, }} self.assertRaises(exception.InvalidVolume, self.controller.update, req, self.group1.id, body=body) add_volume.destroy() def test_update_group_add_volume_invalid_volume_type(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() wrong_type = fake.VOLUME_TYPE2_ID add_volume = utils.create_volume( self.ctxt, volume_type_id=wrong_type) req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) add_volumes = add_volume.id body = {"group": {"name": "group1", "description": "", "add_volumes": add_volumes, "remove_volumes": None, }} self.assertRaises(exception.InvalidVolume, self.controller.update, req, self.group1.id, body=body) add_volume.destroy() def test_update_group_add_volume_already_in_group(self): self.group1.status = fields.GroupStatus.AVAILABLE self.group1.save() add_volume = utils.create_volume( self.ctxt, group_id=fake.GROUP2_ID) req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) add_volumes = add_volume.id body = {"group": {"name": "group1", "description": "", "add_volumes": add_volumes, "remove_volumes": None, }} self.assertRaises(exception.InvalidVolume, self.controller.update, req, self.group1.id, body=body) add_volume.destroy() @ddt.data(fields.GroupStatus.CREATING, fields.GroupStatus.UPDATING) def test_update_group_invalid_state(self, status): self.group1.status = status add_volume = utils.create_volume( self.ctxt, volume_type_id=self.volume_type1.id, host=self.group1.host) req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/update' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_VOLUME) body = {"group": {"name": "new name", "description": None, "add_volumes": add_volume.id, "remove_volumes": None, }} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.group1.id, body=body) vol = objects.Volume.get_by_id(self.ctxt, add_volume.id) self.assertEqual(add_volume.status, vol.status) add_volume.destroy() @ddt.data((mv.GROUP_TYPE, 'fake_group_001', fields.GroupStatus.AVAILABLE, exception.VersionNotFoundForAPIMethod), (mv.GROUP_SNAPSHOT_RESET_STATUS, 'fake_group_001', fields.GroupStatus.AVAILABLE, exception.VersionNotFoundForAPIMethod), (mv.GROUP_VOLUME_RESET_STATUS, 'fake_group_001', fields.GroupStatus.AVAILABLE, exception.GroupNotFound), (mv.GROUP_VOLUME_RESET_STATUS, None, 'invalid_test_status', exception.InvalidGroupStatus), (mv.GROUP_VOLUME_RESET_STATUS, 'fake_group_001', None, exception.ValidationError) ) @ddt.unpack def test_reset_group_status_illegal(self, version, group_id, status, exceptions): g_id = group_id or self.group2.id req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, g_id), version=version) body = {"reset_status": { "status": status }} self.assertRaises(exceptions, self.controller.reset_status, req, g_id, body=body) def test_reset_group_without_status(self): g_id = self.group2.id req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, g_id), version=mv.GROUP_VOLUME_RESET_STATUS) body = {"reset_status": {}} self.assertRaises(exception.ValidationError, self.controller.reset_status, req, g_id, body=body) def test_reset_group_status(self): req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group2.id), version=mv.GROUP_VOLUME_RESET_STATUS) req.environ['cinder.context'] = self.ctxt body = {"reset_status": { "status": fields.GroupStatus.AVAILABLE }} response = self.controller.reset_status(req, self.group2.id, body=body) group = objects.Group.get_by_id(self.ctxt, self.group2.id) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) self.assertEqual(fields.GroupStatus.AVAILABLE, group.status) @ddt.data(True, False) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity') def test_create_group_from_src_snap(self, valid_host, mock_validate_host): self.mock_object(volume_api.API, "create", v3_fakes.fake_volume_create) group = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID]) volume = utils.create_volume( self.ctxt, group_id=group.id, volume_type_id=fake.VOLUME_TYPE_ID) group_snapshot = utils.create_group_snapshot( self.ctxt, group_id=group.id, group_type_id=group.group_type_id) snapshot = utils.create_snapshot( self.ctxt, volume.id, group_snapshot_id=group_snapshot.id, status=fields.SnapshotStatus.AVAILABLE, volume_type_id=volume.volume_type_id) mock_validate_host.return_value = valid_host test_grp_name = 'test grp' body = {"create-from-src": {"name": test_grp_name, "description": "Group 1", "group_snapshot_id": group_snapshot.id}} req = fakes.HTTPRequest.blank('/v3/%s/groups/action' % fake.PROJECT_ID, version=mv.GROUP_SNAPSHOTS) if valid_host: res_dict = self.controller.create_from_src(req, body=body) self.assertIn('id', res_dict['group']) self.assertEqual(test_grp_name, res_dict['group']['name']) grp_ref = objects.Group.get_by_id( self.ctxt.elevated(), res_dict['group']['id']) else: self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create_from_src, req, body=body) groups = objects.GroupList.get_all_by_project(self.ctxt, fake.PROJECT_ID) grp_ref = objects.Group.get_by_id( self.ctxt.elevated(), groups[0]['id']) grp_ref.destroy() snapshot.destroy() volume.destroy() group.destroy() group_snapshot.destroy() @ddt.data(True, False) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity') def test_create_group_from_src_grp(self, host_valid, mock_validate_host): self.mock_object(volume_api.API, "create", v3_fakes.fake_volume_create) source_grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID]) volume = utils.create_volume( self.ctxt, group_id=source_grp.id, volume_type_id=fake.VOLUME_TYPE_ID) mock_validate_host.return_value = host_valid test_grp_name = 'test cg' body = {"create-from-src": {"name": test_grp_name, "description": "Consistency Group 1", "source_group_id": source_grp.id}} req = fakes.HTTPRequest.blank('/v3/%s/groups/action' % fake.PROJECT_ID, version=mv.GROUP_SNAPSHOTS) if host_valid: res_dict = self.controller.create_from_src(req, body=body) self.assertIn('id', res_dict['group']) self.assertEqual(test_grp_name, res_dict['group']['name']) grp = objects.Group.get_by_id( self.ctxt, res_dict['group']['id']) grp.destroy() volume.destroy() source_grp.destroy() else: self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create_from_src, req, body=body) groups = objects.GroupList.get_all_by_project(self.ctxt, fake.PROJECT_ID) grp = objects.Group.get_by_id( self.ctxt.elevated(), groups[0]['id']) grp.destroy() volume.destroy() source_grp.destroy() @mock.patch('cinder.volume.volume_utils.is_replicated_spec', return_value=True) @mock.patch('cinder.volume.volume_utils.is_group_a_type', return_value=True) def test_enable_replication(self, mock_rep_grp_type, mock_rep_vol_type): req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group3.id), version=mv.GROUP_REPLICATION) self.group3.status = fields.GroupStatus.AVAILABLE self.group3.save() body = {"enable_replication": {}} response = self.controller.enable_replication(req, self.group3.id, body=body) group = objects.Group.get_by_id(self.ctxt, self.group3.id) self.assertEqual(202, response.status_int) self.assertEqual(fields.GroupStatus.AVAILABLE, group.status) self.assertEqual(fields.ReplicationStatus.ENABLING, group.replication_status) @ddt.data((True, False), (False, True), (False, False)) @ddt.unpack @mock.patch('cinder.volume.volume_utils.is_replicated_spec') @mock.patch('cinder.volume.volume_utils.is_group_a_type') def test_enable_replication_wrong_type(self, is_grp_rep_type, is_vol_rep_type, mock_rep_grp_type, mock_rep_vol_type): mock_rep_grp_type.return_value = is_grp_rep_type mock_rep_vol_type.return_value = is_vol_rep_type req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group3.id), version=mv.GROUP_REPLICATION) self.group3.status = fields.GroupStatus.AVAILABLE self.group3.save() body = {"enable_replication": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.enable_replication, req, self.group3.id, body=body) @mock.patch('cinder.volume.volume_utils.is_replicated_spec', return_value=False) @mock.patch('cinder.volume.volume_utils.is_group_a_type', return_value=True) def test_enable_replication_wrong_group_type(self, mock_rep_grp_type, mock_rep_vol_type): req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group3.id), version=mv.GROUP_REPLICATION) self.group3.status = fields.GroupStatus.AVAILABLE self.group3.save() body = {"enable_replication": {}} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.enable_replication, req, self.group3.id, body=body) @mock.patch('cinder.volume.volume_utils.is_replicated_spec', return_value=True) @mock.patch('cinder.volume.volume_utils.is_group_a_type', return_value=True) @ddt.data((mv.GROUP_REPLICATION, True, fields.GroupStatus.CREATING, webob.exc.HTTPBadRequest), (mv.GROUP_REPLICATION, False, fields.GroupStatus.AVAILABLE, exception.GroupNotFound), (INVALID_GROUP_REPLICATION, True, fields.GroupStatus.AVAILABLE, exception.VersionNotFoundForAPIMethod), ) @ddt.unpack def test_enable_replication_negative(self, version, not_fake, status, exceptions, mock_rep_grp_type, mock_rep_vol_type): if not_fake: group_id = self.group3.id else: group_id = fake.GROUP_ID req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, group_id), version=version) if not_fake: self.group3.status = status self.group3.save() body = {"enable_replication": {}} self.assertRaises(exceptions, self.controller.enable_replication, req, group_id, body=body) @mock.patch('cinder.volume.volume_utils.is_replicated_spec', return_value=True) @mock.patch('cinder.volume.volume_utils.is_group_a_type', return_value=True) def test_disable_replication(self, mock_rep_grp_type, mock_rep_vol_type): req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group3.id), version=mv.GROUP_REPLICATION) self.group3.status = fields.GroupStatus.AVAILABLE self.group3.replication_status = fields.ReplicationStatus.ENABLED self.group3.save() body = {"disable_replication": {}} response = self.controller.disable_replication(req, self.group3.id, body=body) group = objects.Group.get_by_id(self.ctxt, self.group3.id) self.assertEqual(202, response.status_int) self.assertEqual(fields.GroupStatus.AVAILABLE, group.status) self.assertEqual(fields.ReplicationStatus.DISABLING, group.replication_status) @mock.patch('cinder.volume.volume_utils.is_replicated_spec', return_value=True) @mock.patch('cinder.volume.volume_utils.is_group_a_type', return_value=True) @ddt.data((mv.GROUP_REPLICATION, True, fields.GroupStatus.CREATING, fields.ReplicationStatus.ENABLED, webob.exc.HTTPBadRequest), (mv.GROUP_REPLICATION, True, fields.GroupStatus.AVAILABLE, fields.ReplicationStatus.DISABLED, webob.exc.HTTPBadRequest), (mv.GROUP_REPLICATION, False, fields.GroupStatus.AVAILABLE, fields.ReplicationStatus.DISABLED, exception.GroupNotFound), (INVALID_GROUP_REPLICATION, True, fields.GroupStatus.AVAILABLE, fields.ReplicationStatus.ENABLED, exception.VersionNotFoundForAPIMethod), ) @ddt.unpack def test_disable_replication_negative(self, version, not_fake, status, rep_status, exceptions, mock_rep_grp_type, mock_rep_vol_type): if not_fake: group_id = self.group3.id else: group_id = fake.GROUP_ID req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, group_id), version=version) if not_fake: self.group3.status = status self.group3.replication_status = rep_status self.group3.save() body = {"disable_replication": {}} self.assertRaises(exceptions, self.controller.disable_replication, req, group_id, body=body) @mock.patch('cinder.volume.volume_utils.is_replicated_spec', return_value=True) @mock.patch('cinder.volume.volume_utils.is_group_a_type', return_value=True) def test_failover_replication(self, mock_rep_grp_type, mock_rep_vol_type): req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group3.id), version=mv.GROUP_REPLICATION) self.group3.status = fields.GroupStatus.AVAILABLE self.group3.replication_status = fields.ReplicationStatus.ENABLED self.group3.save() body = {"failover_replication": {}} response = self.controller.failover_replication(req, self.group3.id, body=body) group = objects.Group.get_by_id(self.ctxt, self.group3.id) self.assertEqual(202, response.status_int) self.assertEqual(fields.GroupStatus.AVAILABLE, group.status) self.assertEqual(fields.ReplicationStatus.FAILING_OVER, group.replication_status) @mock.patch('cinder.volume.volume_utils.is_replicated_spec', return_value=True) @mock.patch('cinder.volume.volume_utils.is_group_a_type', return_value=True) @ddt.data((mv.GROUP_REPLICATION, True, fields.GroupStatus.CREATING, fields.ReplicationStatus.ENABLED, webob.exc.HTTPBadRequest), (mv.GROUP_REPLICATION, True, fields.GroupStatus.AVAILABLE, fields.ReplicationStatus.DISABLED, webob.exc.HTTPBadRequest), (mv.GROUP_REPLICATION, False, fields.GroupStatus.AVAILABLE, fields.ReplicationStatus.DISABLED, exception.GroupNotFound), (INVALID_GROUP_REPLICATION, True, fields.GroupStatus.AVAILABLE, fields.ReplicationStatus.ENABLED, exception.VersionNotFoundForAPIMethod), ) @ddt.unpack def test_failover_replication_negative(self, version, not_fake, status, rep_status, exceptions, mock_rep_grp_type, mock_rep_vol_type): if not_fake: group_id = self.group3.id else: group_id = fake.GROUP_ID req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, group_id), version=version) if not_fake: self.group3.status = status self.group3.replication_status = rep_status self.group3.save() body = {"failover_replication": {}} self.assertRaises(exceptions, self.controller.failover_replication, req, group_id, body=body) @mock.patch('cinder.volume.volume_utils.is_replicated_spec', return_value=True) @mock.patch('cinder.volume.volume_utils.is_group_a_type', return_value=True) @mock.patch('cinder.volume.rpcapi.VolumeAPI.list_replication_targets') def test_list_replication_targets(self, mock_list_rep_targets, mock_rep_grp_type, mock_rep_vol_type): req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' % (fake.PROJECT_ID, self.group3.id), version=mv.GROUP_REPLICATION) targets = { 'replication_targets': [ {'backend_id': 'lvm_backend_1'} ] } mock_list_rep_targets.return_value = targets self.group3.status = fields.GroupStatus.AVAILABLE self.group3.save() body = {"list_replication_targets": {}} response = self.controller.list_replication_targets( req, self.group3.id, body=body) self.assertIn('replication_targets', response) self.assertEqual('lvm_backend_1', response['replication_targets'][0]['backend_id']) def test_show_group_with_project_id(self): # If the microversion >= 3.58 and "is_admin=True", "project_id" should # be contained in the response body. req = fakes.HTTPRequest.blank( '/v3/%s/groups/%s' % (fake.PROJECT_ID, self.group1.id), version=mv.GROUP_GROUPSNAPSHOT_PROJECT_ID, use_admin_context=True) res_dict = self.controller.show(req, self.group1.id) self.assertEqual(1, len(res_dict)) self.assertEqual(fake.PROJECT_ID, res_dict['group']['project_id']) # If the microversion < 3.58, "project_id" should not be # contained in the response body. req = fakes.HTTPRequest.blank( '/v3/%s/groups/%s' % (fake.PROJECT_ID, self.group1.id), version=mv.get_prior_version(mv.GROUP_GROUPSNAPSHOT_PROJECT_ID), use_admin_context=True) res_dict = self.controller.show(req, self.group1.id) self.assertEqual(1, len(res_dict)) self.assertNotIn('project_id', res_dict['group']) def test_list_groups_with_project_id(self): self.group1.group_type_id = fake.GROUP_TYPE_ID self.group1.save() self.group2.group_type_id = fake.GROUP_TYPE2_ID self.group2.save() req = fakes.HTTPRequest.blank( '/v3/%s/groups/detail' % self.ctxt.project_id, version=mv.GROUP_GROUPSNAPSHOT_PROJECT_ID, use_admin_context=True) res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict)) self.assertEqual(self.group1.project_id, res_dict['groups'][0]['project_id']) self.assertEqual(self.group2.project_id, res_dict['groups'][1]['project_id']) def test_show_group_without_project_id(self): # If the microversion >= 3.58 and "is_admin=False", "project_id" should # not be contained in the response body. req = fakes.HTTPRequest.blank( '/v3/%s/groups/%s' % (fake.PROJECT_ID, self.group3.id), version=mv.GROUP_GROUPSNAPSHOT_PROJECT_ID) res_dict = self.controller.show(req, self.group1.id) self.assertEqual(1, len(res_dict)) self.assertNotIn('project_id', res_dict['group']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_limits.py0000664000175000017500000000614300000000000022473 0ustar00zuulzuul00000000000000# Copyright 2017 Huawei Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.api import microversions as mv from cinder.api.openstack import api_version_request as api_version from cinder.api.v3 import limits from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test LIMITS_FILTER = mv.LIMITS_ADMIN_FILTER PRE_LIMITS_FILTER = mv.get_prior_version(LIMITS_FILTER) @ddt.ddt class LimitsControllerTest(test.TestCase): def setUp(self): super(LimitsControllerTest, self).setUp() self.controller = limits.LimitsController() @ddt.data((PRE_LIMITS_FILTER, True), (PRE_LIMITS_FILTER, False), (LIMITS_FILTER, True), (LIMITS_FILTER, False)) @mock.patch('cinder.quota.VolumeTypeQuotaEngine.get_project_quotas') def test_get_limit_with_project_id(self, ver_project, mock_get_quotas): max_ver, has_project = ver_project req = fakes.HTTPRequest.blank('/v3/limits', use_admin_context=True) if has_project: req = fakes.HTTPRequest.blank( '/v3/limits?project_id=%s' % fake.UUID1, use_admin_context=True) req.api_version_request = api_version.APIVersionRequest(max_ver) def get_project_quotas(context, project_id, quota_class=None, defaults=True, usages=True): if project_id == fake.UUID1: return {"gigabytes": {'limit': 5}} return {"gigabytes": {'limit': 10}} mock_get_quotas.side_effect = get_project_quotas resp_dict = self.controller.index(req) # if admin, only LIMITS_FILTER and req contains project_id filter, # cinder returns the specified project's quota. if max_ver == LIMITS_FILTER and has_project: self.assertEqual( 5, resp_dict['limits']['absolute']['maxTotalVolumeGigabytes']) else: self.assertEqual( 10, resp_dict['limits']['absolute']['maxTotalVolumeGigabytes']) # if non-admin, cinder always returns self quota. req = fakes.HTTPRequest.blank('/v3/limits', use_admin_context=False) if has_project: req = fakes.HTTPRequest.blank( '/v3/limits?project_id=%s' % fake.UUID1, use_admin_context=False) req.api_version_request = api_version.APIVersionRequest(max_ver) resp_dict = self.controller.index(req) self.assertEqual( 10, resp_dict['limits']['absolute']['maxTotalVolumeGigabytes']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_messages.py0000664000175000017500000001405600000000000023003 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import ddt from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.v3 import messages from cinder import context from cinder import exception from cinder.message import api as message_api from cinder.message import message_field from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3 import fakes as v3_fakes from cinder.tests.unit import test NS = '{http://docs.openstack.org/api/openstack-block-storage/3.0/content}' @ddt.ddt class MessageApiTest(test.TestCase): def setUp(self): super(MessageApiTest, self).setUp() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.controller = messages.MessagesController(self.ext_mgr) self.maxDiff = None self.ctxt = context.RequestContext('admin', 'fakeproject', True) def _expected_message_from_controller(self, id): message = v3_fakes.fake_message(id) links = [ {'href': 'http://localhost/v3/fakeproject/messages/%s' % id, 'rel': 'self'}, {'href': 'http://localhost/fakeproject/messages/%s' % id, 'rel': 'bookmark'}, ] return { 'message': { 'id': message.get('id'), 'user_message': "%s:%s" % ( message_field.translate_action(message.get('action_id')), message_field.translate_detail(message.get('detail_id'))), 'request_id': message.get('request_id'), 'event_id': message.get('event_id'), 'created_at': message.get('created_at'), 'message_level': message.get('message_level'), 'guaranteed_until': message.get('expires_at'), 'links': links, } } def test_show(self): self.mock_object(message_api.API, 'get', v3_fakes.fake_message_get) req = fakes.HTTPRequest.blank( '/v3/fakeproject/messages/%s' % fakes.FAKE_UUID, version=mv.MESSAGES) req.environ['cinder.context'] = self.ctxt res_dict = self.controller.show(req, fakes.FAKE_UUID) ex = self._expected_message_from_controller(fakes.FAKE_UUID) self.assertEqual(ex, res_dict) def test_show_not_found(self): self.mock_object(message_api.API, 'get', side_effect=exception.MessageNotFound( message_id=fakes.FAKE_UUID)) req = fakes.HTTPRequest.blank( '/v3/messages/%s' % fakes.FAKE_UUID, version=mv.MESSAGES) req.environ['cinder.context'] = self.ctxt self.assertRaises(exception.MessageNotFound, self.controller.show, req, fakes.FAKE_UUID) def test_show_pre_microversion(self): self.mock_object(message_api.API, 'get', v3_fakes.fake_message_get) req = fakes.HTTPRequest.blank('/v3/messages/%s' % fakes.FAKE_UUID, version=mv.BASE_VERSION) req.environ['cinder.context'] = self.ctxt self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.show, req, fakes.FAKE_UUID) def test_delete(self): self.mock_object(message_api.API, 'get', v3_fakes.fake_message_get) self.mock_object(message_api.API, 'delete') req = fakes.HTTPRequest.blank( '/v3/messages/%s' % fakes.FAKE_UUID, version=mv.MESSAGES) req.environ['cinder.context'] = self.ctxt resp = self.controller.delete(req, fakes.FAKE_UUID) self.assertEqual(HTTPStatus.NO_CONTENT, resp.status_int) self.assertTrue(message_api.API.delete.called) def test_delete_not_found(self): self.mock_object(message_api.API, 'get', side_effect=exception.MessageNotFound( message_id=fakes.FAKE_UUID)) req = fakes.HTTPRequest.blank( '/v3/messages/%s' % fakes.FAKE_UUID, version=mv.MESSAGES) self.assertRaises(exception.MessageNotFound, self.controller.delete, req, fakes.FAKE_UUID) @ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER), mv.RESOURCE_FILTER, mv.LIKE_FILTER) @mock.patch('cinder.api.common.reject_invalid_filters') def test_message_list_with_general_filter(self, version, mock_update): url = '/v3/%s/messages' % fakes.FAKE_UUID req = fakes.HTTPRequest.blank(url, version=version, use_admin_context=False) self.controller.index(req) if version != mv.get_prior_version(mv.RESOURCE_FILTER): support_like = True if version == mv.LIKE_FILTER else False mock_update.assert_called_once_with(req.environ['cinder.context'], mock.ANY, 'message', support_like) def test_index(self): self.mock_object(message_api.API, 'get_all', return_value=[v3_fakes.fake_message(fakes.FAKE_UUID)]) req = fakes.HTTPRequest.blank( '/v3/fakeproject/messages/%s' % fakes.FAKE_UUID, version=mv.MESSAGES) req.environ['cinder.context'] = self.ctxt res_dict = self.controller.index(req) ex = self._expected_message_from_controller(fakes.FAKE_UUID) expected = { 'messages': [ex['message']] } self.assertDictEqual(expected, res_dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_resource_filters.py0000664000175000017500000000461500000000000024553 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for resource filters API.""" from unittest import mock import ddt from cinder.api import microversions as mv from cinder.api.v3 import resource_filters as v3_filters from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test @ddt.ddt class ResourceFiltersAPITestCase(test.TestCase): """Test Case for filter API.""" def setUp(self): super(ResourceFiltersAPITestCase, self).setUp() self.controller = v3_filters.ResourceFiltersController() @ddt.data({'filters': {'volume': ['key1']}, 'resource': 'volume', 'expected_filters': [{'resource': 'volume', 'filters': ['key1']}]}, {'filters': {'volume': ['key1'], 'snapshot': ['key2']}, 'resource': None, 'expected_filters': [{'resource': 'volume', 'filters': ['key1']}, {'resource': 'snapshot', 'filters': ['key2']}]}, {'filters': {'volume': ['key1', 'key2']}, 'resource': 'snapshot', 'expected_filters': []}) @ddt.unpack def test_get_allowed_filters(self, filters, resource, expected_filters): request_url = '/v3/%s/resource_filters' % fake.PROJECT_ID if resource is not None: request_url += '?resource=%s' % resource req = fakes.HTTPRequest.blank(request_url, version=mv.RESOURCE_FILTER_CONFIG) with mock.patch('cinder.api.common._FILTERS_COLLECTION', filters): result = self.controller.index(req) self.assertCountEqual(list(result), ['resource_filters']) self.assertCountEqual(expected_filters, result['resource_filters']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_snapshot_manage.py0000664000175000017500000002174300000000000024344 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Stratoscale, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock from urllib.parse import urlencode import ddt from oslo_config import cfg from oslo_serialization import jsonutils import webob from cinder.api import microversions as mv from cinder.api.v3 import router as router_v3 from cinder.common import constants from cinder import context from cinder import objects from cinder.tests.unit.api.contrib import test_snapshot_manage as test_contrib from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_service from cinder.tests.unit import test CONF = cfg.CONF def app(): # no auth, just let environ['cinder.context'] pass through api = router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper @ddt.ddt @mock.patch('cinder.volume.api.API.get', test_contrib.volume_get) class SnapshotManageTest(test.TestCase): """Test cases for cinder/api/v3/snapshot_manage.py""" def setUp(self): super(SnapshotManageTest, self).setUp() self._admin_ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) def _get_resp_post(self, body, version=mv.MANAGE_EXISTING_LIST): """Helper to execute a POST manageable_snapshots API call.""" req = webob.Request.blank('/v3/%s/manageable_snapshots' % fake.PROJECT_ID) req.method = 'POST' req.headers = mv.get_mv_header(version) req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = self._admin_ctxt req.body = jsonutils.dump_as_bytes(body) res = req.get_response(app()) return res @mock.patch( 'cinder.scheduler.rpcapi.SchedulerAPI.manage_existing_snapshot') @mock.patch('cinder.volume.api.API.create_snapshot_in_db', return_value=mock.MagicMock(id=fake.SNAPSHOT_ID)) @mock.patch('cinder.objects.service.Service.get_by_id') def test_manage_snapshot_route(self, mock_service_get, mock_create_snapshot, mock_rpcapi): """Test call to manage snapshot. There is currently no change between the API in contrib and the API in v3, so here we simply check that the call is routed properly, rather than copying all the tests. """ mock_service_get.return_value = fake_service.fake_service_obj( self._admin_ctxt, binary=constants.VOLUME_BINARY) body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': {'fake_ref': "fake_val"}}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int, res) def test_manage_snapshot_previous_version(self): body = {'snapshot': {'volume_id': fake.VOLUME_ID, 'ref': 'fake_ref'}} res = self._get_resp_post( body, version=mv.get_prior_version(mv.MANAGE_EXISTING_LIST)) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int, res) def _get_resp_get(self, host, detailed, paging, version=mv.MANAGE_EXISTING_LIST, **kwargs): """Helper to execute a GET os-snapshot-manage API call.""" params = {'host': host} if host else {} params.update(kwargs) if paging: params.update({'marker': '1234', 'limit': 10, 'offset': 4, 'sort': 'reference:asc'}) query_string = "?%s" % urlencode(params) detail = "" if detailed: detail = "/detail" req = webob.Request.blank('/v3/%s/manageable_snapshots%s%s' % (fake.PROJECT_ID, detail, query_string)) req.method = 'GET' req.headers = mv.get_mv_header(version) req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = self._admin_ctxt res = req.get_response(app()) return res @mock.patch('cinder.volume.api.API.get_manageable_snapshots', wraps=test_contrib.api_get_manageable_snapshots) def test_get_manageable_snapshots_route(self, mock_api_manageable): """Test call to get manageable volumes. There is currently no change between the API in contrib and the API in v3, so here we simply check that the call is routed properly, rather than copying all the tests. """ res = self._get_resp_get('fakehost', False, False) self.assertEqual(HTTPStatus.OK, res.status_int) def test_get_manageable_snapshots_previous_version(self): res = self._get_resp_get( 'fakehost', False, False, version=mv.get_prior_version(mv.MANAGE_EXISTING_LIST)) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) @mock.patch('cinder.volume.api.API.get_manageable_snapshots', wraps=test_contrib.api_get_manageable_snapshots) def test_get_manageable_snapshots_detail_route(self, mock_api_manageable): """Test call to get manageable volumes (detailed). There is currently no change between the API in contrib and the API in v3, so here we simply check that the call is routed properly, rather than copying all the tests. """ res = self._get_resp_get('fakehost', True, True) self.assertEqual(HTTPStatus.OK, res.status_int) def test_get_manageable_snapshots_detail_previous_version(self): res = self._get_resp_get( 'fakehost', True, True, version=mv.get_prior_version(mv.MANAGE_EXISTING_LIST)) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) @ddt.data((True, True, 'detail_list'), (True, False, 'summary_list'), (False, True, 'detail_list'), (False, False, 'summary_list')) @ddt.unpack @mock.patch('cinder.objects.Service.is_up', True) @mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt') @mock.patch('cinder.objects.Service.get_by_id') def test_get_manageable_detail(self, clustered, is_detail, view_method, get_service_mock, get_cctxt_mock): if clustered: host = None cluster_name = 'mycluster' version = mv.MANAGE_EXISTING_CLUSTER kwargs = {'cluster': cluster_name} else: host = 'fakehost' cluster_name = None version = mv.MANAGE_EXISTING_LIST kwargs = {} service = objects.Service(disabled=False, host='fakehost', cluster_name=cluster_name) get_service_mock.return_value = service snaps = [mock.sentinel.snap1, mock.sentinel.snap2] get_cctxt_mock.return_value.call.return_value = snaps view_data = {'manageable-snapshots': [{'vol': 'mock.sentinel.snap1'}, {'vol': 'mock.sentinel.snap2'}]} view_path = ('cinder.api.views.manageable_snapshots.ViewBuilder.' + view_method) with mock.patch(view_path, return_value=view_data) as detail_view_mock: res = self._get_resp_get(host, is_detail, False, version=version, **kwargs) self.assertEqual(HTTPStatus.OK, res.status_int) get_cctxt_mock.assert_called_once_with(service.service_topic_queue, version=('3.10', '3.0')) get_cctxt_mock.return_value.call.assert_called_once_with( mock.ANY, 'get_manageable_snapshots', marker=None, limit=CONF.osapi_max_limit, offset=0, sort_keys=['reference'], sort_dirs=['desc'], want_objects=True) detail_view_mock.assert_called_once_with(mock.ANY, snaps, len(snaps)) get_service_mock.assert_called_once_with( mock.ANY, None, host=host, binary=constants.VOLUME_BINARY, cluster_name=cluster_name) @ddt.data(mv.MANAGE_EXISTING_LIST, mv.MANAGE_EXISTING_CLUSTER) def test_get_manageable_missing_host(self, version): res = self._get_resp_get(None, True, False, version=version) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_get_manageable_both_host_cluster(self): res = self._get_resp_get('host', True, False, version=mv.MANAGE_EXISTING_CLUSTER, cluster='cluster') self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_snapshot_metadata.py0000664000175000017500000006660400000000000024701 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import uuid import ddt from oslo_serialization import jsonutils import webob from cinder.api import extensions from cinder.api.v2 import snapshots from cinder.api.v3 import snapshot_metadata from cinder import context import cinder.db from cinder import exception from cinder.objects import fields from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v3 import fakes as v3_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder import volume def return_create_snapshot_metadata(context, snapshot_id, metadata, delete): return fake_snapshot_metadata() def return_create_snapshot_metadata_insensitive(context, snapshot_id, metadata, delete): return fake_snapshot_metadata_insensitive() def return_new_snapshot_metadata(context, snapshot_id, metadata, delete): return fake_new_snapshot_metadata() def fake_snapshot_metadata(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", } return metadata def fake_snapshot_metadata_insensitive(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4", } return metadata def fake_new_snapshot_metadata(): metadata = { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', } return metadata def return_snapshot(context, snapshot_id): return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', 'name': 'fake', 'status': 'available', 'snapshot_metadata': {}} # First argument needs to be self to receive the context argument in the right # variable, as this'll be used to replace the original API.get method which # receives self as the first argument. def fake_get(self, context, *args, **kwargs): vol = {'id': fake.VOLUME_ID, 'size': 100, 'name': 'fake', 'host': 'fake-host', 'status': 'available', 'encryption_key_id': None, 'volume_type_id': fake.VOLUME_TYPE_ID, 'migration_status': None, 'availability_zone': 'fake-zone', 'attach_status': fields.VolumeAttachStatus.DETACHED, 'metadata': {}} return fake_volume.fake_volume_obj(context, **vol) def return_snapshot_nonexistent(context, snapshot_id): raise exception.SnapshotNotFound(snapshot_id=snapshot_id) @ddt.ddt class SnapshotMetaDataTest(test.TestCase): def setUp(self): super(SnapshotMetaDataTest, self).setUp() self.volume_api = cinder.volume.api.API() self.mock_object(volume.api.API, 'get', fake_get) self.mock_object(cinder.db.sqlalchemy.api, 'volume_type_get', v3_fakes.fake_volume_type_get) self.mock_object(scheduler_rpcapi.SchedulerAPI, 'create_snapshot') self.mock_object(cinder.db, 'snapshot_get', return_snapshot) self.mock_object(self.volume_api, 'update_snapshot_metadata') self.patch('cinder.objects.volume.Volume.refresh') self.patch('cinder.quota.QuotaEngine.reserve') self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr) self.controller = snapshot_metadata.Controller() self.req_id = str(uuid.uuid4()) self.url = '/v3/%s/snapshots/%s/metadata' % ( fake.PROJECT_ID, self.req_id) self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) snap = {"volume_id": fake.VOLUME_ID, "display_name": "Volume Test Name", "description": "Volume Test Desc", "metadata": {}} body = {"snapshot": snap} req = fakes.HTTPRequest.blank('/v3/snapshots') req.environ['cinder.context'] = self.ctx self.snapshot_controller.create(req, body=body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_index(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_obj['metadata'] = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx res_dict = self.controller.index(req, self.req_id) expected = { 'metadata': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', }, } self.assertEqual(expected, res_dict) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_index_nonexistent_snapshot(self, snapshot_get_by_id): snapshot_get_by_id.side_effect = \ exception.SnapshotNotFound(snapshot_id=self.req_id) req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx self.assertRaises(exception.SnapshotNotFound, self.controller.index, req, self.url) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_index_no_data(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx res_dict = self.controller.index(req, self.req_id) expected = {'metadata': {}} self.assertEqual(expected, res_dict) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_show(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_obj['metadata'] = {'key2': 'value2'} snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key2') req.environ['cinder.context'] = self.ctx res_dict = self.controller.show(req, self.req_id, 'key2') expected = {'meta': {'key2': 'value2'}} self.assertEqual(expected, res_dict) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_show_nonexistent_snapshot(self, snapshot_get_by_id): snapshot_get_by_id.side_effect = \ exception.SnapshotNotFound(snapshot_id=self.req_id) req = fakes.HTTPRequest.blank(self.url + '/key2') req.environ['cinder.context'] = self.ctx self.assertRaises(exception.SnapshotNotFound, self.controller.show, req, self.req_id, 'key2') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_show_meta_not_found(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key6') req.environ['cinder.context'] = self.ctx self.assertRaises(exception.SnapshotMetadataNotFound, self.controller.show, req, self.req_id, 'key6') @mock.patch('cinder.db.snapshot_metadata_delete') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_delete(self, snapshot_get_by_id, snapshot_metadata_delete): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_obj['metadata'] = {'key2': 'value2'} snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key2') req.environ['cinder.context'] = self.ctx req.method = 'DELETE' res = self.controller.delete(req, self.req_id, 'key2') self.assertEqual(HTTPStatus.OK, res.status_int) def test_delete_nonexistent_snapshot(self): self.mock_object(cinder.db, 'snapshot_get', return_snapshot_nonexistent) req = fakes.HTTPRequest.blank(self.url + '/key1') req.environ['cinder.context'] = self.ctx req.method = 'DELETE' self.assertRaises(exception.SnapshotNotFound, self.controller.delete, req, self.req_id, 'key1') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_delete_meta_not_found(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key6') req.environ['cinder.context'] = self.ctx req.method = 'DELETE' self.assertRaises(exception.SnapshotMetadataNotFound, self.controller.delete, req, self.req_id, 'key6') @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create(self, snapshot_get_by_id, volume_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(self.ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank('/v3/snapshot_metadata') req.environ['cinder.context'] = self.ctx req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3"}} req.body = jsonutils.dump_as_bytes(body) res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(body, res_dict) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create_with_keys_in_uppercase_and_lowercase( self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj # if the keys in uppercase_and_lowercase, should return the one # which server added self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata_insensitive) req = fakes.HTTPRequest.blank('/v3/snapshot_metadata') req.environ['cinder.context'] = self.ctx req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key1": "value1", "KEY1": "value1", "key2": "value2", "KEY2": "value2", "key3": "value3", "KEY4": "value4"}} expected = {"metadata": {"key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4"}} req.body = jsonutils.dump_as_bytes(body) res_dict = self.controller.create(req, self.req_id, body) self.assertEqual(expected, res_dict) def test_create_empty_body(self): self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx req.method = 'POST' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, None) def test_create_item_empty_key(self): self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.environ['cinder.context'] = self.ctx req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_item_key_too_long(self): self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.environ['cinder.context'] = self.ctx req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, self.req_id, body) def test_create_nonexistent_snapshot(self): self.mock_object(cinder.db, 'snapshot_get', return_snapshot_nonexistent) self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank('/v3/snapshot_metadata') req.environ['cinder.context'] = self.ctx req.method = 'POST' req.content_type = "application/json" body = {"metadata": {"key9": "value9"}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(exception.SnapshotNotFound, self.controller.create, req, self.req_id, body) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_all(self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': [] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.mock_object(cinder.db, 'snapshot_metadata_update', return_new_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) @mock.patch('cinder.db.snapshot_update', return_value={'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20'}) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_all_with_keys_in_uppercase_and_lowercase( self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.mock_object(cinder.db, 'snapshot_metadata_update', return_new_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx req.method = 'PUT' req.content_type = "application/json" body = { 'metadata': { 'key10': 'value10', 'KEY10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) res_dict = self.controller.update_all(req, self.req_id, body) self.assertEqual(expected, res_dict) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_all_empty_container(self, snapshot_get_by_id, snapshot_update): snapshot = { 'id': self.req_id, 'expected_attrs': [] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.mock_object(cinder.db, 'snapshot_metadata_update', return_value={}) req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': {}} req.body = jsonutils.dump_as_bytes(expected) res_dict = self.controller.update_all(req, self.req_id, expected) self.assertEqual(expected, res_dict) def test_update_all_malformed_container(self): self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx req.method = 'PUT' req.content_type = "application/json" expected = {'meta': {}} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) def test_update_all_malformed_data(self): self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx req.method = 'PUT' req.content_type = "application/json" expected = {'metadata': ['asdf']} req.body = jsonutils.dump_as_bytes(expected) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update_all, req, self.req_id, expected) def test_update_all_nonexistent_snapshot(self): self.mock_object(cinder.db, 'snapshot_get', return_snapshot_nonexistent) req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx req.method = 'PUT' req.content_type = "application/json" body = {'metadata': {'key10': 'value10'}} req.body = jsonutils.dump_as_bytes(body) self.assertRaises(exception.SnapshotNotFound, self.controller.update_all, req, '100', body) @mock.patch('cinder.db.snapshot_metadata_update', return_value=dict()) @mock.patch('cinder.db.snapshot_update') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_item(self, snapshot_get_by_id, snapshot_update, snapshot_metadata_update): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key1') req.environ['cinder.context'] = self.ctx req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" res_dict = self.controller.update(req, self.req_id, 'key1', body) expected = {'meta': {'key1': 'value1'}} self.assertEqual(expected, res_dict) def test_update_item_nonexistent_snapshot(self): self.mock_object(cinder.db, 'snapshot_get', return_snapshot_nonexistent) req = fakes.HTTPRequest.blank( '/v3/%s/snapshots/asdf/metadata/key1' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctx req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(exception.SnapshotNotFound, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_empty_body(self): self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.environ['cinder.context'] = self.ctx req.method = 'PUT' req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', None) @mock.patch('cinder.db.sqlalchemy.api._snapshot_get') @mock.patch('cinder.db.snapshot_metadata_update', autospec=True) def test_update_item_empty_key(self, metadata_update, snapshot_get): snapshot_get.side_effect = return_snapshot req = fakes.HTTPRequest.blank(self.url + '/key1') req.environ['cinder.context'] = self.ctx req.method = 'PUT' body = {"meta": {"": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, '', body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_item_key_too_long(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.environ['cinder.context'] = self.ctx req.method = 'PUT' body = {"meta": {("a" * 260): "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, self.req_id, ("a" * 260), body) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_item_value_too_long(self, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1') req.environ['cinder.context'] = self.ctx req.method = 'PUT' body = {"meta": {"key1": ("a" * 260)}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPRequestEntityTooLarge, self.controller.update, req, self.req_id, "key1", body) @ddt.data({"meta": {"key1": "value1", "key2": "value2"}}, {"meta": {"key1": None}}) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_update_invalid_metadata(self, body, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj req = fakes.HTTPRequest.blank(self.url + '/key1') req.environ['cinder.context'] = self.ctx req.method = 'PUT' req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'key1', body) def test_update_item_body_uri_mismatch(self): self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url + '/bad') req.environ['cinder.context'] = self.ctx req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, self.req_id, 'bad', body) @ddt.data({"metadata": {"a" * 260: "value1"}}, {"metadata": {"key": "v" * 260}}, {"metadata": {"": "value1"}}, {"metadata": {"key": None}}) @mock.patch('cinder.objects.Snapshot.get_by_id') def test_invalid_metadata_items_on_create(self, data, snapshot_get_by_id): snapshot = { 'id': self.req_id, 'expected_attrs': ['metadata'] } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) snapshot_get_by_id.return_value = snapshot_obj self.mock_object(cinder.db, 'snapshot_metadata_update', return_create_snapshot_metadata) req = fakes.HTTPRequest.blank(self.url) req.environ['cinder.context'] = self.ctx req.method = 'POST' req.headers["content-type"] = "application/json" exc = webob.exc.HTTPBadRequest if (len(list(data['metadata'].keys())[0]) > 255 or (list(data['metadata'].values())[0] is not None and len(list(data['metadata'].values())[0]) > 255)): exc = webob.exc.HTTPRequestEntityTooLarge req.body = jsonutils.dump_as_bytes(data) self.assertRaises(exc, self.controller.create, req, self.req_id, data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_snapshots.py0000664000175000017500000004707300000000000023223 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import strutils from webob import exc from cinder.api import microversions as mv from cinder.api.v3 import snapshots from cinder import context from cinder import db from cinder import exception from cinder.objects import fields from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder import volume UUID = '00000000-0000-0000-0000-000000000001' INVALID_UUID = '00000000-0000-0000-0000-000000000002' def fake_get(self, context, *args, **kwargs): vol = {'id': fake.VOLUME_ID, 'size': 100, 'name': 'fake', 'host': 'fake-host', 'status': 'available', 'encryption_key_id': None, 'migration_status': None, 'availability_zone': 'fake-zone', 'attach_status': 'detached', 'metadata': {}, 'volume_type_id': fake.VOLUME_TYPE_ID} return fake_volume.fake_volume_obj(context, **vol) def create_snapshot_query_with_metadata(metadata_query_string, api_microversion): """Helper to create metadata querystring with microversion""" req = fakes.HTTPRequest.blank('/v3/snapshots?metadata=' + metadata_query_string) req.headers = mv.get_mv_header(api_microversion) req.api_version_request = mv.get_api_version(api_microversion) return req @ddt.ddt class SnapshotApiTest(test.TestCase): def setUp(self): super(SnapshotApiTest, self).setUp() self.mock_object(volume.api.API, 'get', fake_get) self.mock_object(db.sqlalchemy.api, 'volume_type_get', v2_fakes.fake_volume_type_get) self.patch('cinder.quota.QUOTAS.reserve') self.mock_object(scheduler_rpcapi.SchedulerAPI, 'create_snapshot') self.controller = snapshots.SnapshotsController() self.ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) @ddt.data(mv.GROUP_SNAPSHOTS, mv.get_prior_version(mv.GROUP_SNAPSHOTS), mv.SNAPSHOT_LIST_USER_ID) @mock.patch('cinder.db.snapshot_metadata_get', return_value=dict()) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_snapshot_show(self, max_ver, snapshot_get_by_id, volume_get_by_id, snapshot_metadata_get): snapshot = { 'id': UUID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 100, 'display_name': 'Default name', 'display_description': 'Default description', 'expected_attrs': ['metadata'], 'group_snapshot_id': None, } snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctx, **snapshot) fake_volume_obj = fake_volume.fake_volume_obj(self.ctx) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = fake_volume_obj req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % UUID) req.environ['cinder.context'] = self.ctx req.api_version_request = mv.get_api_version(max_ver) resp_dict = self.controller.show(req, UUID) self.assertIn('snapshot', resp_dict) self.assertEqual(UUID, resp_dict['snapshot']['id']) self.assertIn('updated_at', resp_dict['snapshot']) if max_ver == mv.SNAPSHOT_LIST_USER_ID: self.assertIn('user_id', resp_dict['snapshot']) elif max_ver == mv.GROUP_SNAPSHOTS: self.assertIn('group_snapshot_id', resp_dict['snapshot']) self.assertNotIn('user_id', resp_dict['snapshot']) else: self.assertNotIn('group_snapshot_id', resp_dict['snapshot']) self.assertNotIn('user_id', resp_dict['snapshot']) @ddt.data( (True, True, mv.USE_QUOTA), (True, False, mv.USE_QUOTA), (False, True, mv.get_prior_version(mv.USE_QUOTA)), (False, False, mv.get_prior_version(mv.USE_QUOTA)), ) @ddt.unpack def test_snapshot_show_with_use_quota(self, present, value, microversion): volume = test_utils.create_volume(self.ctx, host='test_host1', cluster_name='cluster1', availability_zone='nova1') snapshot = test_utils.create_snapshot(self.ctx, volume.id, use_quota=value) url = '/v3/snapshots?%s' % snapshot.id req = fakes.HTTPRequest.blank(url, version=microversion) res_dict = self.controller.show(req, snapshot.id)['snapshot'] if present: self.assertIs(value, res_dict['consumes_quota']) else: self.assertNotIn('consumes_quota', res_dict) def test_snapshot_show_invalid_id(self): snapshot_id = INVALID_UUID req = fakes.HTTPRequest.blank('/v3/snapshots/%s' % snapshot_id) self.assertRaises(exception.SnapshotNotFound, self.controller.show, req, snapshot_id) def _create_snapshot(self, name=None, metadata=None): """Creates test snapshopt with provided metadata""" req = fakes.HTTPRequest.blank('/v3/snapshots') req.environ['cinder.context'] = self.ctx snap = {"volume_id": fake.VOLUME_ID, "display_name": name or "Volume Test Name", "description": "Volume Test Desc" } if metadata: snap["metadata"] = metadata body = {"snapshot": snap} self.controller.create(req, body=body) @ddt.data(('host', 'test_host1', True, mv.RESOURCE_FILTER), ('cluster_name', 'cluster1', True, mv.RESOURCE_FILTER), ('availability_zone', 'nova1', False, mv.RESOURCE_FILTER), ('consumes_quota', 'true', False, mv.USE_QUOTA)) @ddt.unpack def test_snapshot_list_with_filter(self, filter_name, filter_value, is_admin_user, microversion): volume1 = test_utils.create_volume(self.ctx, host='test_host1', cluster_name='cluster1', availability_zone='nova1') volume2 = test_utils.create_volume(self.ctx, host='test_host2', cluster_name='cluster2', availability_zone='nova2') snapshot1 = test_utils.create_snapshot(self.ctx, volume1.id, use_quota=True) test_utils.create_snapshot(self.ctx, volume2.id, use_quota=False) url = '/v3/snapshots?%s=%s' % (filter_name, filter_value) # Generic filtering is introduced since '3,31' and we add # 'availability_zone' support by using generic filtering. req = fakes.HTTPRequest.blank(url, use_admin_context=is_admin_user, version=microversion) res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['snapshots'])) self.assertEqual(snapshot1.id, res_dict['snapshots'][0]['id']) def _create_multiple_snapshots_with_different_project(self): volume1 = test_utils.create_volume(self.ctx, project=fake.PROJECT_ID) volume2 = test_utils.create_volume(self.ctx, project=fake.PROJECT2_ID) test_utils.create_snapshot( context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True), volume1.id) test_utils.create_snapshot( context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True), volume1.id) test_utils.create_snapshot( context.RequestContext(fake.USER_ID, fake.PROJECT2_ID, True), volume2.id) @ddt.data('snapshots', 'snapshots/detail') def test_list_snapshot_with_count_param_version_not_matched(self, action): self._create_multiple_snapshots_with_different_project() is_detail = True if 'detail' in action else False req = fakes.HTTPRequest.blank("/v3/%s?with_count=True" % action) req.headers = mv.get_mv_header( mv.get_prior_version(mv.SUPPORT_COUNT_INFO)) req.api_version_request = mv.get_api_version( mv.get_prior_version(mv.SUPPORT_COUNT_INFO)) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = ctxt res_dict = self.controller._items(req, is_detail=is_detail) self.assertNotIn('count', res_dict) @ddt.data({'method': 'snapshots', 'display_param': 'True'}, {'method': 'snapshots', 'display_param': 'False'}, {'method': 'snapshots', 'display_param': '1'}, {'method': 'snapshots/detail', 'display_param': 'True'}, {'method': 'snapshots/detail', 'display_param': 'False'}, {'method': 'snapshots/detail', 'display_param': '1'} ) @ddt.unpack def test_list_snapshot_with_count_param(self, method, display_param): self._create_multiple_snapshots_with_different_project() is_detail = True if 'detail' in method else False show_count = strutils.bool_from_string(display_param, strict=True) # Request with 'with_count' and 'limit' req = fakes.HTTPRequest.blank( "/v3/%s?with_count=%s&limit=1" % (method, display_param)) req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO) req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req.environ['cinder.context'] = ctxt res_dict = self.controller._items(req, is_detail=is_detail) self.assertEqual(1, len(res_dict['snapshots'])) if show_count: self.assertEqual(2, res_dict['count']) else: self.assertNotIn('count', res_dict) # Request with 'with_count' req = fakes.HTTPRequest.blank( "/v3/%s?with_count=%s" % (method, display_param)) req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO) req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req.environ['cinder.context'] = ctxt res_dict = self.controller._items(req, is_detail=is_detail) self.assertEqual(2, len(res_dict['snapshots'])) if show_count: self.assertEqual(2, res_dict['count']) else: self.assertNotIn('count', res_dict) # Request with admin context and 'all_tenants' req = fakes.HTTPRequest.blank( "/v3/%s?with_count=%s&all_tenants=1" % (method, display_param)) req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO) req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = ctxt res_dict = self.controller._items(req, is_detail=is_detail) self.assertEqual(3, len(res_dict['snapshots'])) if show_count: self.assertEqual(3, res_dict['count']) else: self.assertNotIn('count', res_dict) @mock.patch('cinder.objects.volume.Volume.refresh') def test_snapshot_list_with_sort_name(self, mock_refresh): self._create_snapshot(name='test1') self._create_snapshot(name='test2') req = fakes.HTTPRequest.blank( '/v3/snapshots?sort_key=name', version=mv.get_prior_version(mv.SNAPSHOT_SORT)) self.assertRaises(exception.InvalidInput, self.controller.detail, req) req = fakes.HTTPRequest.blank('/v3/snapshots?sort_key=name', version=mv.SNAPSHOT_SORT) res_dict = self.controller.detail(req) self.assertEqual(2, len(res_dict['snapshots'])) self.assertEqual('test2', res_dict['snapshots'][0]['name']) self.assertEqual('test1', res_dict['snapshots'][1]['name']) @mock.patch('cinder.objects.volume.Volume.refresh') def test_snapshot_list_with_one_metadata_in_filter(self, mock_refresh): # Create snapshot with metadata key1: value1 metadata = {"key1": "val1"} self._create_snapshot(metadata=metadata) # Create request with metadata filter key1: value1 req = create_snapshot_query_with_metadata( '{"key1":"val1"}', mv.SNAPSHOT_LIST_METADATA_FILTER) # query controller with above request res_dict = self.controller.detail(req) # verify 1 snapshot is returned self.assertEqual(1, len(res_dict['snapshots'])) # verify if the medadata of the returned snapshot is key1: value1 self.assertDictEqual({"key1": "val1"}, res_dict['snapshots'][0][ 'metadata']) # Create request with metadata filter key2: value2 req = create_snapshot_query_with_metadata( '{"key2":"val2"}', mv.SNAPSHOT_LIST_METADATA_FILTER) # query controller with above request res_dict = self.controller.detail(req) # verify no snapshot is returned self.assertEqual(0, len(res_dict['snapshots'])) @mock.patch('cinder.objects.volume.Volume.refresh') def test_snapshot_list_with_multiple_metadata_in_filter(self, mock_refresh): # Create snapshot with metadata key1: value1, key11: value11 metadata = {"key1": "val1", "key11": "val11"} self._create_snapshot(metadata=metadata) # Create request with metadata filter key1: value1, key11: value11 req = create_snapshot_query_with_metadata( '{"key1":"val1", "key11":"val11"}', mv.SNAPSHOT_LIST_METADATA_FILTER) # query controller with above request res_dict = self.controller.detail(req) # verify 1 snapshot is returned self.assertEqual(1, len(res_dict['snapshots'])) # verify if the medadata of the returned snapshot is key1: value1 self.assertDictEqual({"key1": "val1", "key11": "val11"}, res_dict[ 'snapshots'][0]['metadata']) # Create request with metadata filter key1: value1 req = create_snapshot_query_with_metadata( '{"key1":"val1"}', mv.SNAPSHOT_LIST_METADATA_FILTER) # query controller with above request res_dict = self.controller.detail(req) # verify 1 snapshot is returned self.assertEqual(1, len(res_dict['snapshots'])) # verify if the medadata of the returned snapshot is key1: value1 self.assertDictEqual({"key1": "val1", "key11": "val11"}, res_dict[ 'snapshots'][0]['metadata']) # Create request with metadata filter key2: value2 req = create_snapshot_query_with_metadata( '{"key2":"val2"}', mv.SNAPSHOT_LIST_METADATA_FILTER) # query controller with above request res_dict = self.controller.detail(req) # verify no snapshot is returned self.assertEqual(0, len(res_dict['snapshots'])) @ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER), mv.RESOURCE_FILTER, mv.LIKE_FILTER) @mock.patch('cinder.api.common.reject_invalid_filters') def test_snapshot_list_with_general_filter(self, version, mock_update): url = '/v3/%s/snapshots' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=version, use_admin_context=False) self.controller.index(req) if version != mv.get_prior_version(mv.RESOURCE_FILTER): support_like = True if version == mv.LIKE_FILTER else False mock_update.assert_called_once_with(req.environ['cinder.context'], mock.ANY, 'snapshot', support_like) @mock.patch('cinder.objects.volume.Volume.refresh') def test_snapshot_list_with_metadata_unsupported_microversion( self, mock_refresh): # Create snapshot with metadata key1: value1 metadata = {"key1": "val1"} self._create_snapshot(metadata=metadata) # Create request with metadata filter key2: value2 req = create_snapshot_query_with_metadata( '{"key2":"val2"}', mv.get_prior_version(mv.SNAPSHOT_LIST_METADATA_FILTER)) # query controller with above request res_dict = self.controller.detail(req) # verify some snapshot is returned self.assertNotEqual(0, len(res_dict['snapshots'])) @mock.patch('cinder.volume.api.API.create_snapshot') def test_snapshot_create_allow_in_use(self, mock_create): req = create_snapshot_query_with_metadata( '{"key2": "val2"}', mv.SNAPSHOT_IN_USE) body = {'snapshot': {'volume_id': fake.VOLUME_ID}} self.controller.create(req, body=body) self.assertIn('allow_in_use', mock_create.call_args_list[0][1]) self.assertTrue(mock_create.call_args_list[0][1]['allow_in_use']) @mock.patch('cinder.volume.api.API.create_snapshot') def test_snapshot_create_allow_in_use_negative(self, mock_create): req = create_snapshot_query_with_metadata( '{"key2": "val2"}', mv.get_prior_version(mv.SNAPSHOT_IN_USE)) body = {'snapshot': {'volume_id': fake.VOLUME_ID}} self.controller.create(req, body=body) self.assertNotIn('allow_in_use', mock_create.call_args_list[0][1]) @ddt.data(False, 'false', 'f', 'no', 'n', '0', 'off') @mock.patch('cinder.volume.api.API.create_snapshot') def test_snapshot_create_force_false(self, force_flag, mock_create): snapshot_name = 'Snapshot Test Name' snapshot_description = 'Snapshot Test Desc' snapshot = { "volume_id": fake.VOLUME_ID, "force": force_flag, "name": snapshot_name, "description": snapshot_description } body = dict(snapshot=snapshot) req = create_snapshot_query_with_metadata( '{"key2": "val2"}', mv.SNAPSHOT_IN_USE) self.assertRaises(exc.HTTPBadRequest, self.controller.create, req, body=body) mock_create.assert_not_called() # prevent regression -- shouldn't raise for pre-mv-3.66 req = create_snapshot_query_with_metadata( '{"key2": "val2"}', mv.get_prior_version(mv.SNAPSHOT_IN_USE)) self.controller.create(req, body=body) # ... but also shouldn't allow an in-use snapshot self.assertNotIn('allow_in_use', mock_create.call_args_list[0][1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_types.py0000664000175000017500000001265700000000000022345 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.api import microversions as mv from cinder.api.v3 import types from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume import volume_types class VolumeTypesApiTest(test.TestCase): def _create_volume_type(self, ctxt, volume_type_name, extra_specs=None, is_public=True, projects=None): vol_type = objects.VolumeType(ctxt, name=volume_type_name, is_public=is_public, description='', extra_specs=extra_specs, projects=projects) vol_type.create() return vol_type def setUp(self): super(VolumeTypesApiTest, self).setUp() self.controller = types.VolumeTypesController() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=True) self.type1 = self._create_volume_type( self.ctxt, 'volume_type1', {'key1': 'value1', 'RESKEY:availability_zones': 'az1,az2'}) self.type2 = self._create_volume_type( self.ctxt, 'volume_type2', {'key2': 'value2', 'RESKEY:availability_zones': 'az1,az3'}) self.type3 = self._create_volume_type( self.ctxt, 'volume_type3', {'key3': 'value3'}, False, [fake.PROJECT_ID]) self.addCleanup(self._cleanup) def _cleanup(self): self.type1.destroy() self.type2.destroy() self.type3.destroy() def test_volume_types_index_with_extra_specs(self): def _get_volume_types(extra_specs, use_admin_context=True, microversion=mv.SUPPORT_VOLUME_TYPE_FILTER): req = fakes.HTTPRequest.blank( '/v3/%s/types?extra_specs=%s' % (fake.PROJECT_ID, extra_specs), use_admin_context=use_admin_context) req.api_version_request = mv.get_api_version(microversion) res_dict = self.controller.index(req) return res_dict['volume_types'] # since __DEFAULT__ type always exists, total number of volume types # is total_types_created + 1. In this case it's 4 volume_types = _get_volume_types('{"key1":"value1"}', use_admin_context=False, microversion=mv.get_prior_version( mv.SUPPORT_VOLUME_TYPE_FILTER)) self.assertEqual(4, len(volume_types)) # Test filter volume type with extra specs volume_types = _get_volume_types('{"key1":"value1"}') self.assertEqual(1, len(volume_types)) self.assertDictEqual({'key1': 'value1', 'RESKEY:availability_zones': 'az1,az2'}, volume_types[0]['extra_specs']) # Test filter volume type with 'availability_zones' volume_types = _get_volume_types('{"RESKEY:availability_zones":"az1"}') self.assertEqual(2, len(volume_types)) self.assertEqual( ['volume_type1', 'volume_type2'], sorted([az['name'] for az in volume_types])) # Test ability for non-admin to filter with user visible extra specs volume_types = _get_volume_types('{"RESKEY:availability_zones":"az1"}', use_admin_context=False) self.assertEqual(2, len(volume_types)) self.assertEqual( ['volume_type1', 'volume_type2'], sorted([az['name'] for az in volume_types])) # Test inability for non-admin to filter with sensitive extra specs volume_types = _get_volume_types('{"key1":"value1"}', use_admin_context=False) self.assertEqual(0, len(volume_types)) def test_delete_non_project_default_type(self): type = self._create_volume_type(self.ctxt, 'type1') db.project_default_volume_type_set( self.ctxt, fake.VOLUME_TYPE_ID, fake.PROJECT_ID) volume_types.destroy(self.ctxt, type.id) self.assertRaises(exception.VolumeTypeNotFound, volume_types.get_by_name_or_id, self.ctxt, type.id) def test_cannot_delete_project_default_type(self): default_type = db.project_default_volume_type_set( self.ctxt, fake.VOLUME_TYPE_ID, fake.PROJECT_ID) self.assertRaises(exception.VolumeTypeDefaultDeletionError, volume_types.destroy, self.ctxt, default_type['volume_type_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_types_orig.py0000664000175000017500000004051500000000000023357 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import timeutils import webob from cinder.api.v3 import types from cinder.api.v3.views import types as views_types from cinder import context from cinder import exception from cinder.policies import type_extra_specs as extra_specs_policy from cinder.policies import volume_type as type_policy from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume import volume_types def fake_volume_type(id): specs = { "key1": "value1", "key2": "value2", "key3": "value3", "key4": "value4", "key5": "value5" } return dict( id=id, name='vol_type_%s' % id, description='vol_type_desc_%s' % id, extra_specs=specs, ) def return_volume_types_get_all_types(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): result = dict(vol_type_1=fake_volume_type(1), vol_type_2=fake_volume_type(2), vol_type_3=fake_volume_type(3) ) if list_result: return list(result.values()) return result def return_empty_volume_types_get_all_types(context, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): if list_result: return [] return {} def return_volume_types_get_volume_type(context, id): if id == fake.WILL_NOT_BE_FOUND_ID: raise exception.VolumeTypeNotFound(volume_type_id=id) return fake_volume_type(id) def return_volume_types_get_default(context): return fake_volume_type(1) @ddt.ddt class VolumeTypesApiTest(test.TestCase): def _create_volume_type(self, volume_type_name, extra_specs=None, is_public=True, projects=None): return volume_types.create(self.ctxt, volume_type_name, extra_specs, is_public, projects).get('id') def setUp(self): super(VolumeTypesApiTest, self).setUp() self.controller = types.VolumeTypesController() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=True) self.mock_authorize = self.patch( 'cinder.context.RequestContext.authorize') # since __DEFAULT__ type always exists, total number of volume types # is total_types_created + 1. In this case it's 4 self.type_id1 = self._create_volume_type('volume_type1', {'key1': 'value1'}) self.type_id2 = self._create_volume_type('volume_type2', {'key2': 'value2'}) self.type_id3 = self._create_volume_type('volume_type3', {'key3': 'value3'}, False, [fake.PROJECT_ID]) self.default_type = volume_types.get_default_volume_type()['id'] self.vol_type = volume_types.get_by_name_or_id( context.get_admin_context(), '__DEFAULT__')['id'] def test_volume_types_index(self): self.mock_object(volume_types, 'get_all_types', return_volume_types_get_all_types) req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID, use_admin_context=True) res_dict = self.controller.index(req) self.assertEqual(3, len(res_dict['volume_types'])) expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3'] actual_names = map(lambda e: e['name'], res_dict['volume_types']) self.assertEqual(set(expected_names), set(actual_names)) for entry in res_dict['volume_types']: self.assertEqual('value1', entry['extra_specs']['key1']) self.mock_authorize.assert_any_call(type_policy.GET_ALL_POLICY) def test_volume_types_index_no_data(self): self.mock_object(volume_types, 'get_all_types', return_empty_volume_types_get_all_types) req = fakes.HTTPRequest.blank('/v3/%s/types' % fake.PROJECT_ID) res_dict = self.controller.index(req) self.assertEqual(0, len(res_dict['volume_types'])) def test_volume_types_index_with_limit(self): req = fakes.HTTPRequest.blank('/v3/%s/types?limit=1' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(1, len(res['volume_types'])) self.assertEqual(self.type_id3, res['volume_types'][0]['id']) expect_next_link = ('http://localhost/v3/%s/types?limit=1' '&marker=%s' % (fake.PROJECT_ID, res['volume_types'][0]['id'])) self.assertEqual(expect_next_link, res['volume_type_links'][0]['href']) def test_volume_types_index_with_offset(self): req = fakes.HTTPRequest.blank( '/v3/%s/types?offset=1' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(3, len(res['volume_types'])) def test_volume_types_index_with_offset_out_of_range(self): url = '/v3/%s/types?offset=424366766556787' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_volume_types_index_with_limit_and_offset(self): req = fakes.HTTPRequest.blank( '/v3/%s/types?limit=2&offset=1' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(2, len(res['volume_types'])) self.assertEqual(self.type_id2, res['volume_types'][0]['id']) self.assertEqual(self.type_id1, res['volume_types'][1]['id']) def test_volume_types_index_with_limit_and_marker(self): req = fakes.HTTPRequest.blank('/v3/%s/types?limit=1' '&marker=%s' % (fake.PROJECT_ID, self.type_id2)) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(1, len(res['volume_types'])) self.assertEqual(self.type_id1, res['volume_types'][0]['id']) def test_volume_types_index_with_valid_filter(self): req = fakes.HTTPRequest.blank( '/v3/%s/types?is_public=True' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) self.assertEqual(4, len(res['volume_types'])) self.assertEqual(self.type_id3, res['volume_types'][0]['id']) self.assertEqual(self.type_id2, res['volume_types'][1]['id']) self.assertEqual(self.type_id1, res['volume_types'][2]['id']) def test_volume_types_index_with_invalid_filter(self): req = fakes.HTTPRequest.blank( '/v3/%s/types?id=%s' % (fake.PROJECT_ID, self.type_id1)) req.environ['cinder.context'] = context.RequestContext( user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=False) res = self.controller.index(req) self.assertEqual(4, len(res['volume_types'])) def test_volume_types_index_with_sort_keys(self): req = fakes.HTTPRequest.blank('/v3/%s/types?sort=id' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) expect_result = [self.default_type, self.type_id1, self.type_id2, self.type_id3] expect_result.sort(reverse=True) self.assertEqual(4, len(res['volume_types'])) self.assertEqual(expect_result[0], res['volume_types'][0]['id']) self.assertEqual(expect_result[1], res['volume_types'][1]['id']) self.assertEqual(expect_result[2], res['volume_types'][2]['id']) self.assertEqual(expect_result[3], res['volume_types'][3]['id']) def test_volume_types_index_with_sort_and_limit(self): req = fakes.HTTPRequest.blank( '/v3/%s/types?sort=id&limit=2' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) expect_result = [self.default_type, self.type_id1, self.type_id2, self.type_id3] expect_result.sort(reverse=True) self.assertEqual(2, len(res['volume_types'])) self.assertEqual(expect_result[0], res['volume_types'][0]['id']) self.assertEqual(expect_result[1], res['volume_types'][1]['id']) def test_volume_types_index_with_sort_keys_and_sort_dirs(self): req = fakes.HTTPRequest.blank( '/v3/%s/types?sort=id:asc' % fake.PROJECT_ID) req.environ['cinder.context'] = self.ctxt res = self.controller.index(req) expect_result = [self.default_type, self.type_id1, self.type_id2, self.type_id3] expect_result.sort() self.assertEqual(4, len(res['volume_types'])) self.assertEqual(expect_result[0], res['volume_types'][0]['id']) self.assertEqual(expect_result[1], res['volume_types'][1]['id']) self.assertEqual(expect_result[2], res['volume_types'][2]['id']) self.assertEqual(expect_result[3], res['volume_types'][3]['id']) def test_volume_types_show(self): self.mock_object(volume_types, 'get_volume_type', return_volume_types_get_volume_type) type_id = fake.VOLUME_TYPE_ID req = fakes.HTTPRequest.blank('/v3/%s/types/' % fake.PROJECT_ID + type_id) res_dict = self.controller.show(req, type_id) self.assertEqual(1, len(res_dict)) self.assertEqual(type_id, res_dict['volume_type']['id']) type_name = 'vol_type_' + type_id self.assertEqual(type_name, res_dict['volume_type']['name']) self.mock_authorize.assert_any_call( type_policy.GET_POLICY, target_obj=mock.ANY) def test_volume_types_show_not_found(self): self.mock_object(volume_types, 'get_volume_type', return_volume_types_get_volume_type) req = fakes.HTTPRequest.blank('/v3/%s/types/%s' % (fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID)) self.assertRaises(exception.VolumeTypeNotFound, self.controller.show, req, fake.WILL_NOT_BE_FOUND_ID) def test_get_default(self): self.mock_object(volume_types, 'get_default_volume_type', return_volume_types_get_default) req = fakes.HTTPRequest.blank('/v3/%s/types/default' % fake.PROJECT_ID) req.method = 'GET' res_dict = self.controller.show(req, 'default') self.assertEqual(1, len(res_dict)) self.assertEqual('vol_type_1', res_dict['volume_type']['name']) self.assertEqual('vol_type_desc_1', res_dict['volume_type']['description']) def test_get_default_not_found(self): self.mock_object(volume_types, 'get_default_volume_type', return_value={}) req = fakes.HTTPRequest.blank('/v3/%s/types/default' % fake.PROJECT_ID) req.method = 'GET' self.assertRaises(exception.VolumeTypeNotFound, self.controller.show, req, 'default') @ddt.data( { 'extra_spec_policy': False, 'read_sensitive_policy': False, 'qos_policy': False, }, { 'extra_spec_policy': True, 'read_sensitive_policy': False, 'qos_policy': False, }, { 'extra_spec_policy': True, 'read_sensitive_policy': True, 'qos_policy': False, }, { 'extra_spec_policy': False, 'read_sensitive_policy': False, 'qos_policy': True, }, { 'extra_spec_policy': True, 'read_sensitive_policy': True, 'qos_policy': True, }, ) @ddt.unpack def test_view_builder_show(self, extra_spec_policy, read_sensitive_policy, qos_policy): # This function returns the authorization result supplied by the # DDT data for the associated policy. def authorize(policy, fatal): policy_data = { type_policy.EXTRA_SPEC_POLICY: extra_spec_policy, extra_specs_policy.READ_SENSITIVE_POLICY: ( read_sensitive_policy), type_policy.QOS_POLICY: qos_policy, } return policy_data[policy] view_builder = views_types.ViewBuilder() now = timeutils.utcnow().isoformat() raw_volume_type = dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, deleted=False, created_at=now, updated_at=now, extra_specs={'multiattach': True, 'sensitive': 'secret'}, deleted_at=None, id=42, ) request = fakes.HTTPRequest.blank("/v3") with mock.patch('cinder.context.RequestContext.authorize', side_effect=authorize): output = view_builder.show(request, raw_volume_type) self.assertIn('volume_type', output) expected_volume_type = dict( name='new_type', description='new_type_desc', is_public=True, id=42, ) if extra_spec_policy: expected_volume_type['extra_specs'] = {'multiattach': True} if read_sensitive_policy: expected_volume_type['extra_specs']['sensitive'] = 'secret' if qos_policy: expected_volume_type['qos_specs_id'] = 'new_id' self.assertDictEqual(expected_volume_type, output['volume_type']) @ddt.data(False, True) def test_view_builder_list(self, is_admin): view_builder = views_types.ViewBuilder() self.mock_authorize.return_value = is_admin now = timeutils.utcnow().isoformat() raw_volume_types = [] for i in range(0, 10): raw_volume_types.append( dict( name='new_type', description='new_type_desc', qos_specs_id='new_id', is_public=True, deleted=False, created_at=now, updated_at=now, extra_specs={'multiattach': True, 'sensitive': 'secret'}, deleted_at=None, id=42 + i ) ) request = fakes.HTTPRequest.blank("/v3") output = view_builder.index(request, raw_volume_types) self.assertIn('volume_types', output) for i in range(0, 10): expected_volume_type = dict( name='new_type', description='new_type_desc', is_public=True, id=42 + i ) if is_admin: expected_volume_type['qos_specs_id'] = 'new_id' expected_volume_type['extra_specs'] = {'multiattach': True, 'sensitive': 'secret'} self.assertDictEqual(expected_volume_type, output['volume_types'][i]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_volume_manage.py0000664000175000017500000002153500000000000024013 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Stratoscale, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock from urllib.parse import urlencode import ddt from oslo_config import cfg from oslo_serialization import jsonutils import webob from cinder.api import microversions as mv from cinder.api.v3 import router as router_v3 from cinder.common import constants from cinder import context from cinder import objects from cinder.tests.unit.api.contrib import test_volume_manage as test_contrib from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test CONF = cfg.CONF def app(): # no auth, just let environ['cinder.context'] pass through api = router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper @ddt.ddt @mock.patch('cinder.objects.service.Service.get_by_host_and_topic', test_contrib.service_get) @mock.patch('cinder.volume.volume_types.get_default_volume_type', test_contrib.vt_get_default_volume_type) @mock.patch('cinder.volume.volume_types.get_volume_type_by_name', test_contrib.vt_get_volume_type_by_name) @mock.patch('cinder.volume.volume_types.get_volume_type', test_contrib.vt_get_volume_type) class VolumeManageTest(test.TestCase): """Test cases for cinder/api/v3/volume_manage.py""" def setUp(self): super(VolumeManageTest, self).setUp() self._admin_ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) def _get_resp_post(self, body, version=mv.MANAGE_EXISTING_LIST): """Helper to execute a POST manageable_volumes API call.""" req = webob.Request.blank('/v3/%s/manageable_volumes' % fake.PROJECT_ID) req.method = 'POST' req.headers = mv.get_mv_header(version) req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = self._admin_ctxt req.body = jsonutils.dump_as_bytes(body) res = req.get_response(app()) return res @mock.patch('cinder.volume.api.API.manage_existing', wraps=test_contrib.api_manage) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_manage_volume_route(self, mock_validate, mock_api_manage): """Test call to manage volume. There is currently no change between the API in contrib and the API in v3, so here we simply check that the call is routed properly, rather than copying all the tests. """ body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int, res) def test_manage_volume_previous_version(self): body = {'volume': {'host': 'host_ok', 'ref': 'fake_ref'}} res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int, res) def _get_resp_get(self, host, detailed, paging, version=mv.MANAGE_EXISTING_LIST, **kwargs): """Helper to execute a GET os-volume-manage API call.""" params = {'host': host} if host else {} params.update(kwargs) if paging: params.update({'marker': '1234', 'limit': 10, 'offset': 4, 'sort': 'reference:asc'}) query_string = "?%s" % urlencode(params) detail = "" if detailed: detail = "/detail" req = webob.Request.blank('/v3/%s/manageable_volumes%s%s' % (fake.PROJECT_ID, detail, query_string)) req.method = 'GET' req.headers = mv.get_mv_header(version) req.headers['Content-Type'] = 'application/json' req.environ['cinder.context'] = self._admin_ctxt res = req.get_response(app()) return res @mock.patch('cinder.volume.api.API.get_manageable_volumes', wraps=test_contrib.api_get_manageable_volumes) def test_get_manageable_volumes_route(self, mock_api_manageable): """Test call to get manageable volumes. There is currently no change between the API in contrib and the API in v3, so here we simply check that the call is routed properly, rather than copying all the tests. """ res = self._get_resp_get('fakehost', False, True) self.assertEqual(HTTPStatus.OK, res.status_int) def test_get_manageable_volumes_previous_version(self): res = self._get_resp_get( 'fakehost', False, True, version=mv.get_prior_version(mv.MANAGE_EXISTING_LIST)) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) @mock.patch('cinder.volume.api.API.get_manageable_volumes', wraps=test_contrib.api_get_manageable_volumes) def test_get_manageable_volumes_detail_route(self, mock_api_manageable): """Test call to get manageable volumes (detailed). There is currently no change between the API in contrib and the API in v3, so here we simply check that the call is routed properly, rather than copying all the tests. """ res = self._get_resp_get('fakehost', True, False) self.assertEqual(HTTPStatus.OK, res.status_int) def test_get_manageable_volumes_detail_previous_version(self): res = self._get_resp_get( 'fakehost', True, False, version=mv.get_prior_version(mv.MANAGE_EXISTING_LIST)) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) @ddt.data((True, True, 'detail_list'), (True, False, 'summary_list'), (False, True, 'detail_list'), (False, False, 'summary_list')) @ddt.unpack @mock.patch('cinder.objects.Service.is_up', True) @mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt') @mock.patch('cinder.objects.Service.get_by_id') def test_get_manageable_detail(self, clustered, is_detail, view_method, get_service_mock, get_cctxt_mock): if clustered: host = None cluster_name = 'mycluster' version = mv.MANAGE_EXISTING_CLUSTER kwargs = {'cluster': cluster_name} else: host = 'fakehost' cluster_name = None version = mv.MANAGE_EXISTING_LIST kwargs = {} service = objects.Service(disabled=False, host='fakehost', cluster_name=cluster_name) get_service_mock.return_value = service volumes = [mock.sentinel.volume1, mock.sentinel.volume2] get_cctxt_mock.return_value.call.return_value = volumes view_data = {'manageable-volumes': [{'vol': str(v)} for v in volumes]} view_path = ('cinder.api.views.manageable_volumes.ViewBuilder.' + view_method) with mock.patch(view_path, return_value=view_data) as detail_view_mock: res = self._get_resp_get(host, is_detail, False, version=version, **kwargs) self.assertEqual(HTTPStatus.OK, res.status_int) get_cctxt_mock.assert_called_once_with(service.service_topic_queue, version=('3.10', '3.0')) get_cctxt_mock.return_value.call.assert_called_once_with( mock.ANY, 'get_manageable_volumes', marker=None, limit=CONF.osapi_max_limit, offset=0, sort_keys=['reference'], sort_dirs=['desc'], want_objects=True) detail_view_mock.assert_called_once_with(mock.ANY, volumes, len(volumes)) get_service_mock.assert_called_once_with( mock.ANY, None, host=host, binary=constants.VOLUME_BINARY, cluster_name=cluster_name) @ddt.data(mv.MANAGE_EXISTING_LIST, mv.MANAGE_EXISTING_CLUSTER) def test_get_manageable_missing_host(self, version): res = self._get_resp_get(None, True, False, version=version) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) def test_get_manageable_both_host_cluster(self): res = self._get_resp_get('host', True, False, version=mv.MANAGE_EXISTING_CLUSTER, cluster='cluster') self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_volume_metadata.py0000664000175000017500000002475700000000000024354 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from oslo_config import cfg from oslo_serialization import jsonutils from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.v3 import volume_metadata from cinder.api.v3 import volumes from cinder.backup import rpcapi as backup_rpcapi from cinder import db from cinder import exception from cinder.objects import base as obj_base from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit.api.v2 import test_volume_metadata as v2_test from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder import volume from cinder.volume import api as volume_api CONF = cfg.CONF def return_create_volume_metadata(context, volume_id, metadata, delete, meta_type): return stub_volume_metadata() def return_new_volume_metadata(context, volume_id, metadata, delete, meta_type): return stub_new_volume_metadata() def return_create_volume_metadata_insensitive(context, snapshot_id, metadata, delete, meta_type): return stub_volume_metadata_insensitive() def return_volume_metadata(context, volume_id): return stub_volume_metadata() def return_empty_volume_metadata(context, volume_id): return {} def return_empty_container_metadata(context, volume_id, metadata, delete, meta_type): return {} def stub_volume_metadata(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", } return metadata def stub_new_volume_metadata(): metadata = { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', } return metadata def stub_volume_metadata_insensitive(): metadata = { "key1": "value1", "key2": "value2", "key3": "value3", "KEY4": "value4", } return metadata def get_volume(*args, **kwargs): vol = {'name': 'fake', 'metadata': {}, 'project_id': fake.PROJECT_ID} return fake_volume.fake_volume_obj(args[0], **vol) def return_volume_nonexistent(*args, **kwargs): raise exception.VolumeNotFound('bogus test message') def fake_update_volume_metadata(self, context, volume, diff): pass class VolumeMetaDataTest(test.TestCase): def setUp(self): super(VolumeMetaDataTest, self).setUp() self.volume_api = volume_api.API() self.mock_object(volume.api.API, 'get', get_volume) self.mock_object(db, 'volume_metadata_get', return_volume_metadata) self.patch( 'cinder.db.service_get_all', autospec=True, return_value=v2_fakes.fake_service_get_all_by_topic(None, None)) self.mock_object(self.volume_api, 'update_volume_metadata', fake_update_volume_metadata) self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.patch( 'cinder.objects.Service.get_minimum_obj_version', return_value=obj_base.OBJ_VERSIONS.get_current()) def _get_minimum_rpc_version_mock(ctxt, binary): binary_map = { 'cinder-backup': backup_rpcapi.BackupAPI, 'cinder-scheduler': scheduler_rpcapi.SchedulerAPI, } return binary_map[binary].RPC_API_VERSION self.patch('cinder.objects.Service.get_minimum_rpc_version', side_effect=_get_minimum_rpc_version_mock) self.volume_controller = volumes.VolumeController(self.ext_mgr) self.controller = volume_metadata.Controller() self.req_id = str(uuid.uuid4()) self.url = '/v3/%s/volumes/%s/metadata' % ( fake.PROJECT_ID, self.req_id) vol = {"size": 100, "display_name": "Volume Test Name", "display_description": "Volume Test Desc", "availability_zone": "zone1:host1", "metadata": {}} body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) self.volume_controller.create(req, body=body) def test_index(self): req = fakes.HTTPRequest.blank(self.url, version=mv.ETAGS) data = self.controller.index(req, self.req_id) expected = { 'metadata': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3', }, } result = jsonutils.loads(data.body) self.assertDictEqual(expected, result) def test_index_nonexistent_volume(self): self.mock_object(db, 'volume_metadata_get', return_volume_nonexistent) req = fakes.HTTPRequest.blank(self.url, version=mv.ETAGS) self.assertRaises(exception.VolumeNotFound, self.controller.index, req, self.url) def test_index_no_data(self): self.mock_object(db, 'volume_metadata_get', return_empty_volume_metadata) req = fakes.HTTPRequest.blank(self.url, version=mv.ETAGS) data = self.controller.index(req, self.req_id) expected = {'metadata': {}} result = jsonutils.loads(data.body) self.assertDictEqual(expected, result) def test_validate_etag_true(self): self.mock_object(db, 'volume_metadata_get', return_value={'key1': 'vanue1', 'key2': 'value2'}) req = fakes.HTTPRequest.blank(self.url, version=mv.ETAGS) req.environ['cinder.context'] = mock.Mock() req.if_match.etags = ['d5103bf7b26ff0310200d110da3ed186'] self.assertTrue(self.controller._validate_etag(req, self.req_id)) @mock.patch.object(db, 'volume_metadata_update') def test_update_all(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_new_volume_metadata req = fakes.HTTPRequest.blank(self.url, version=mv.ETAGS) req.method = 'PUT' req.content_type = "application/json" expected = { 'metadata': { 'key10': 'value10', 'key99': 'value99', 'KEY20': 'value20', }, } req.body = jsonutils.dump_as_bytes(expected) req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update_all(req, self.req_id, body=expected) self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) @mock.patch.object(db, 'volume_metadata_update') def test_update_item(self, metadata_update): fake_volume = {'id': self.req_id, 'status': 'available'} fake_context = mock.Mock() metadata_update.side_effect = return_create_volume_metadata req = fakes.HTTPRequest.blank(self.url + '/key1', version=mv.ETAGS) req.method = 'PUT' body = {"meta": {"key1": "value1"}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" req.environ['cinder.context'] = fake_context with mock.patch.object(self.controller.volume_api, 'get') as get_volume: get_volume.return_value = fake_volume res_dict = self.controller.update(req, self.req_id, 'key1', body=body) expected = {'meta': {'key1': 'value1'}} self.assertEqual(expected, res_dict) get_volume.assert_called_once_with(fake_context, self.req_id) def test_create_metadata_keys_value_none(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url, version=mv.ETAGS) req.method = 'POST' req.headers["content-type"] = "application/json" body = {"meta": {"key": None}} self.assertRaises(exception.ValidationError, self.controller.create, req, self.req_id, body=body) def test_update_items_value_none(self): self.mock_object(db, 'volume_metadata_update', return_create_volume_metadata) req = fakes.HTTPRequest.blank(self.url + '/key1', version=mv.ETAGS) req.method = 'PUT' body = {"metadata": {"key": None}} req.body = jsonutils.dump_as_bytes(body) req.headers["content-type"] = "application/json" self.assertRaises(exception.ValidationError, self.controller.create, req, self.req_id, body=body) class VolumeMetaDataTestNoMicroversion(v2_test.VolumeMetaDataTest): """Volume metadata tests with no microversion provided.""" def setUp(self): super(VolumeMetaDataTestNoMicroversion, self).setUp() self.patch( 'cinder.objects.Service.get_minimum_obj_version', return_value=obj_base.OBJ_VERSIONS.get_current()) def _get_minimum_rpc_version_mock(ctxt, binary): binary_map = { 'cinder-backup': backup_rpcapi.BackupAPI, 'cinder-scheduler': scheduler_rpcapi.SchedulerAPI, } return binary_map[binary].RPC_API_VERSION self.patch('cinder.objects.Service.get_minimum_rpc_version', side_effect=_get_minimum_rpc_version_mock) self.volume_controller = volumes.VolumeController(self.ext_mgr) self.controller = volume_metadata.Controller() self.url = '/v3/%s/volumes/%s/metadata' % ( fake.PROJECT_ID, self.req_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_volume_transfer.py0000664000175000017500000005435400000000000024414 0ustar00zuulzuul00000000000000# Copyright 2018 FiberHome Telecommunication Technologies CO.,LTD # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume transfer code.""" from http import HTTPStatus from unittest import mock import ddt from oslo_serialization import jsonutils import webob from cinder.api.contrib import volume_transfer from cinder.api import microversions as mv from cinder.api.v3 import volume_transfer as volume_transfer_v3 from cinder import context from cinder import db from cinder import exception from cinder.objects import fields from cinder import quota from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils import cinder.transfer @ddt.ddt class VolumeTransferAPITestCase(test.TestCase): """Test Case for transfers V3 API.""" microversion = mv.TRANSFER_WITH_SNAPSHOTS expect_transfer_history = False DETAIL_LEN = 6 SUMMARY_LEN = 4 def setUp(self): super(VolumeTransferAPITestCase, self).setUp() self.volume_transfer_api = cinder.transfer.API() self.controller = volume_transfer.VolumeTransferController() self.v3_controller = volume_transfer_v3.VolumeTransferController() self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) def _create_transfer(self, volume_id=fake.VOLUME_ID, display_name='test_transfer'): """Create a transfer object.""" transfer = self.volume_transfer_api.create(context.get_admin_context(), volume_id, display_name) self.addCleanup(db.transfer_destroy, context.get_admin_context(), transfer['id']) return transfer def _create_volume(self, display_name='test_volume', display_description='this is a test volume', status='available', size=1, project_id=fake.PROJECT_ID, attach_status=fields.VolumeAttachStatus.DETACHED): """Create a volume object.""" vol = {} vol['host'] = 'fake_host' vol['size'] = size vol['user_id'] = fake.USER_ID vol['project_id'] = project_id vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = attach_status vol['availability_zone'] = 'fake_zone' vol['volume_type_id'] = fake.VOLUME_TYPE_ID volume_id = db.volume_create(context.get_admin_context(), vol)['id'] self.addCleanup(db.volume_destroy, context.get_admin_context(), volume_id) return volume_id def _check_history_in_res(self, transfer_dict): tx_history_keys = ['source_project_id', 'destination_project_id', 'accepted'] if self.expect_transfer_history: for key in tx_history_keys: self.assertIn(key, transfer_dict) else: for key in tx_history_keys: self.assertNotIn(key, transfer_dict) def test_show_transfer(self): volume_id = self._create_volume(size=5) transfer = self._create_transfer(volume_id) req = webob.Request.blank('/v3/%s/volume-transfers/%s' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'GET' req.headers = mv.get_mv_header(self.microversion) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual('test_transfer', res_dict['transfer']['name']) self.assertEqual(transfer['id'], res_dict['transfer']['id']) self.assertEqual(volume_id, res_dict['transfer']['volume_id']) def test_list_transfers(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v3/%s/volume-transfers' % fake.PROJECT_ID) req.method = 'GET' req.headers = mv.get_mv_header(self.microversion) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(self.SUMMARY_LEN, len(res_dict['transfers'][0])) self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) self.assertEqual(self.SUMMARY_LEN, len(res_dict['transfers'][1])) self.assertEqual(transfer2['id'], res_dict['transfers'][1]['id']) self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) def test_list_transfers_with_limit(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) self._create_transfer(volume_id_1) self._create_transfer(volume_id_2) url = '/v3/%s/volume-transfers?limit=1' % fake.PROJECT_ID req = fakes.HTTPRequest.blank(url, version=mv.SUPPORT_TRANSFER_PAGINATION, use_admin_context=True) res_dict = self.v3_controller.index(req) self.assertEqual(1, len(res_dict['transfers'])) def test_list_transfers_with_marker(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) url = '/v3/%s/volume-transfers?marker=%s' % (fake.PROJECT_ID, transfer2['id']) req = fakes.HTTPRequest.blank(url, version=mv.SUPPORT_TRANSFER_PAGINATION, use_admin_context=True) res_dict = self.v3_controller.index(req) self.assertEqual(1, len(res_dict['transfers'])) self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) @ddt.data("desc", "asc") def test_list_transfers_with_sort(self, sort_dir): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) url = '/v3/%s/volume-transfers?sort_key=id&sort_dir=%s' % ( fake.PROJECT_ID, sort_dir) req = fakes.HTTPRequest.blank(url, version=mv.SUPPORT_TRANSFER_PAGINATION, use_admin_context=True) res_dict = self.v3_controller.index(req) self.assertEqual(2, len(res_dict['transfers'])) order_ids = sorted([transfer1['id'], transfer2['id']]) expect_result = order_ids[1] if sort_dir == "desc" else order_ids[0] self.assertEqual(expect_result, res_dict['transfers'][0]['id']) def test_list_transfers_detail(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v3/%s/volume-transfers/detail' % fake.PROJECT_ID) req.method = 'GET' req.headers = mv.get_mv_header(self.microversion) req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(self.DETAIL_LEN, len(res_dict['transfers'][0])) self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) self.assertEqual(volume_id_1, res_dict['transfers'][0]['volume_id']) self._check_history_in_res(res_dict['transfers'][0]) self.assertEqual(self.DETAIL_LEN, len(res_dict['transfers'][1])) self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) self.assertEqual(transfer2['id'], res_dict['transfers'][1]['id']) self.assertEqual(volume_id_2, res_dict['transfers'][1]['volume_id']) self._check_history_in_res(res_dict['transfers'][1]) def test_list_transfers_detail_with_no_snapshots(self): volume_id_1 = self._create_volume(size=5) volume_id_2 = self._create_volume(size=5) transfer1 = self._create_transfer(volume_id_1) transfer2 = self._create_transfer(volume_id_2) req = webob.Request.blank('/v3/%s/volume-transfers/detail' % fake.PROJECT_ID) req.method = 'GET' req.headers = mv.get_mv_header(self.microversion) req.headers['Content-Type'] = 'application/json' req.headers['Accept'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.OK, res.status_int) self.assertEqual(self.DETAIL_LEN, len(res_dict['transfers'][0])) self.assertEqual('test_transfer', res_dict['transfers'][0]['name']) self.assertEqual(transfer1['id'], res_dict['transfers'][0]['id']) self.assertEqual(volume_id_1, res_dict['transfers'][0]['volume_id']) self.assertEqual(False, res_dict['transfers'][0]['no_snapshots']) self.assertEqual(self.DETAIL_LEN, len(res_dict['transfers'][1])) self.assertEqual('test_transfer', res_dict['transfers'][1]['name']) self.assertEqual(transfer2['id'], res_dict['transfers'][1]['id']) self.assertEqual(volume_id_2, res_dict['transfers'][1]['volume_id']) self.assertEqual(False, res_dict['transfers'][1]['no_snapshots']) def test_create_transfer(self): volume_id = self._create_volume(status='available', size=5) body = {"transfer": {"name": "transfer1", "volume_id": volume_id}} req = webob.Request.blank('/v3/%s/volume-transfers' % fake.PROJECT_ID) req.method = 'POST' req.headers = mv.get_mv_header(self.microversion) req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['transfer']) self.assertIn('auth_key', res_dict['transfer']) self.assertIn('created_at', res_dict['transfer']) self.assertIn('name', res_dict['transfer']) self.assertIn('volume_id', res_dict['transfer']) self._check_history_in_res(res_dict['transfer']) def test_create_transfer_with_no_snapshots(self): volume_id = self._create_volume(status='available', size=5) body = {"transfer": {"name": "transfer1", "volume_id": volume_id, 'no_snapshots': True}} req = webob.Request.blank('/v3/%s/volume-transfers' % fake.PROJECT_ID) req.method = 'POST' req.headers = mv.get_mv_header(self.microversion) req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['transfer']) self.assertIn('auth_key', res_dict['transfer']) self.assertIn('created_at', res_dict['transfer']) self.assertIn('name', res_dict['transfer']) self.assertIn('volume_id', res_dict['transfer']) self.assertIn('no_snapshots', res_dict['transfer']) self._check_history_in_res(res_dict['transfer']) def test_delete_transfer_awaiting_transfer(self): volume_id = self._create_volume() transfer = self.volume_transfer_api.create(context.get_admin_context(), volume_id, 'test_transfer') req = webob.Request.blank('/v3/%s/volume-transfers/%s' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'DELETE' req.headers = mv.get_mv_header(self.microversion) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) # verify transfer has been deleted req = webob.Request.blank('/v3/%s/volume-transfers/%s' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'GET' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_int) self.assertEqual(HTTPStatus.NOT_FOUND, res_dict['itemNotFound']['code']) self.assertEqual('Transfer %s could not be found.' % transfer['id'], res_dict['itemNotFound']['message']) self.assertEqual(db.volume_get(context.get_admin_context(), volume_id)['status'], 'available') @mock.patch.object(quota.QUOTAS, 'reserve') @mock.patch.object(db, 'volume_type_get', v2_fakes.fake_volume_type_get) def test_accept_transfer_volume_id_specified(self, type_get): volume_id = self._create_volume() transfer = self.volume_transfer_api.create(context.get_admin_context(), volume_id, 'test_transfer') svc = self.start_service('volume', host='fake_host') body = {"accept": {"auth_key": transfer['auth_key']}} req = webob.Request.blank('/v3/%s/volume-transfers/%s/accept' % ( fake.PROJECT_ID, transfer['id'])) req.method = 'POST' req.headers = mv.get_mv_header(self.microversion) req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertEqual(transfer['id'], res_dict['transfer']['id']) self.assertEqual(volume_id, res_dict['transfer']['volume_id']) # cleanup svc.stop() class VolumeTransferAPITestCase357(VolumeTransferAPITestCase): microversion = mv.TRANSFER_WITH_HISTORY DETAIL_LEN = 9 expect_transfer_history = True @ddt.ddt class VolumeTransferEncryptedAPITestCase(test.TestCase): # NOTE: # - The TRANSFER_ENCRYPTED_VOLUME microversion is only relevant when # creating a volume transfer. The microversion specified when accepting # or deleting a transfer is not relevant. # - The tests take advantage of the fact that a project_id is no longer # required in API URLs. def setUp(self): super(VolumeTransferEncryptedAPITestCase, self).setUp() self.volume_transfer_api = cinder.transfer.API() self.controller = volume_transfer_v3.VolumeTransferController() self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) self.admin_ctxt = context.get_admin_context() def _create_volume(self, encryption_key_id): vol_type = test_utils.create_volume_type(self.admin_ctxt, name='fake_vol_type', testcase_instance=self) volume = test_utils.create_volume(self.user_ctxt, volume_type_id=vol_type.id, testcase_instance=self, encryption_key_id=encryption_key_id) return volume @mock.patch('cinder.keymgr.transfer.transfer_create') def _create_transfer(self, volume_id, mock_key_transfer_create): transfer = self.volume_transfer_api.create(self.admin_ctxt, volume_id, display_name='test', allow_encrypted=True) return transfer @ddt.data(None, fake.ENCRYPTION_KEY_ID) @mock.patch('cinder.keymgr.transfer.transfer_create') def test_create_transfer(self, encryption_key_id, mock_key_transfer_create): volume = self._create_volume(encryption_key_id) body = {"transfer": {"name": "transfer1", "volume_id": volume.id}} req = webob.Request.blank('/v3/volume-transfers') req.method = 'POST' req.headers = mv.get_mv_header(mv.TRANSFER_ENCRYPTED_VOLUME) req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) call_count = 0 if encryption_key_id is None else 1 self.assertEqual(mock_key_transfer_create.call_count, call_count) def test_create_transfer_encrypted_volume_not_supported(self): volume = self._create_volume(fake.ENCRYPTION_KEY_ID) body = {"transfer": {"name": "transfer1", "volume_id": volume.id}} req = webob.Request.blank('/v3/volume-transfers') req.method = 'POST' req.headers = mv.get_mv_header( mv.get_prior_version(mv.TRANSFER_ENCRYPTED_VOLUME)) req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) self.assertEqual(('Invalid volume: ' 'transferring encrypted volume is not supported'), res_dict['badRequest']['message']) @mock.patch('cinder.keymgr.transfer.transfer_create', side_effect=exception.KeyManagerError('whoops!')) def test_create_transfer_key_transfer_failed(self, mock_key_transfer_create): volume = self._create_volume(fake.ENCRYPTION_KEY_ID) body = {"transfer": {"name": "transfer1", "volume_id": volume.id}} req = webob.Request.blank('/v3/volume-transfers') req.method = 'POST' req.headers = mv.get_mv_header(mv.TRANSFER_ENCRYPTED_VOLUME) req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR, res.status_int) @ddt.data(None, fake.ENCRYPTION_KEY_ID) @mock.patch('cinder.keymgr.transfer.transfer_accept') @mock.patch('cinder.volume.api.API.accept_transfer') def test_accept_transfer(self, encryption_key_id, mock_volume_accept_transfer, mock_key_transfer_accept): volume = self._create_volume(encryption_key_id) transfer = self._create_transfer(volume.id) body = {"accept": {"auth_key": transfer['auth_key']}} req = webob.Request.blank('/v3/volume-transfers/%s/accept' % ( transfer['id'])) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) call_count = 0 if encryption_key_id is None else 1 self.assertEqual(mock_key_transfer_accept.call_count, call_count) @ddt.data(None, fake.ENCRYPTION_KEY_ID) @mock.patch('cinder.keymgr.transfer.transfer_delete') def test_delete_transfer(self, encryption_key_id, mock_key_transfer_delete): volume = self._create_volume(encryption_key_id) transfer = self._create_transfer(volume.id) req = webob.Request.blank('/v3/volume-transfers/%s' % ( transfer['id'])) req.method = 'DELETE' req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app( fake_auth_context=self.user_ctxt)) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) call_count = 0 if encryption_key_id is None else 1 self.assertEqual(mock_key_transfer_delete.call_count, call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_volumes.py0000664000175000017500000016074300000000000022673 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from http import HTTPStatus import json from unittest import mock import ddt import fixtures import iso8601 from oslo_serialization import jsonutils from oslo_utils import strutils from oslo_utils import timeutils import webob from cinder.api import api_utils from cinder.api import common from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.v2.views.volumes import ViewBuilder from cinder.api.v3 import volumes from cinder.backup import api as backup_api from cinder.common import constants as cinder_constants from cinder import context from cinder import db from cinder import exception from cinder.group import api as group_api from cinder import objects from cinder.objects import fields from cinder.policies import volumes as policy from cinder.tests.unit.api import fakes from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit.api.v2 import test_volumes as v2_test_volumes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume import api as volume_api from cinder.volume import api as vol_get DEFAULT_AZ = "zone1:host1" # DDT data for testing whether an 'encryption_key_id' should appear in a # volume's or backup's details (also used by test_backups.py). ENCRYPTION_KEY_ID_IN_DETAILS = { 'expected_in_details': True, 'encryption_key_id': fake.ENCRYPTION_KEY_ID, 'version': mv.ENCRYPTION_KEY_ID_IN_DETAILS, }, { # No encryption ID to display 'expected_in_details': False, 'encryption_key_id': None, 'version': mv.ENCRYPTION_KEY_ID_IN_DETAILS, }, { # Fixed key ID should not be displayed 'expected_in_details': False, 'encryption_key_id': cinder_constants.FIXED_KEY_ID, 'version': mv.ENCRYPTION_KEY_ID_IN_DETAILS, }, { # Unsupported microversion 'expected_in_details': False, 'encryption_key_id': fake.ENCRYPTION_KEY_ID, 'version': mv.get_prior_version(mv.ENCRYPTION_KEY_ID_IN_DETAILS), } @ddt.ddt class VolumeApiTest(test.TestCase): def setUp(self): super(VolumeApiTest, self).setUp() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} fake_image.mock_image_service(self) self.controller = volumes.VolumeController(self.ext_mgr) self.flags(host='fake') self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) # This will be cleaned up by the NestedTempfile fixture in base class self.tmp_path = self.useFixture(fixtures.TempDir()).path def test_check_volume_filters_called(self): # Clear the filters collection to make sure the filters collection # cache can be reloaded using tmp filter file. common._FILTERS_COLLECTION = None with mock.patch.object(vol_get.API, 'check_volume_filters') as volume_get: req = fakes.HTTPRequest.blank('/v3/volumes?bootable=True') req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(mv.BASE_VERSION) req.environ['cinder.context'].is_admin = True tmp_filter_file = self.tmp_path + '/resource_filters_tests.json' self.override_config('resource_query_filters_file', tmp_filter_file) with open(tmp_filter_file, 'w') as f: f.write(json.dumps({"volume": ['bootable']})) self.controller.index(req) filters = req.params.copy() volume_get.assert_called_with(filters, False) # Reset the CONF.resource_query_filters_file and clear the filters # collection to avoid leaking other cases, and it will be re-loaded # from CONF.resource_query_filters_file in next call. self._reset_filter_file() def test_check_volume_filters_strict_called(self): # Clear the filters collection to make sure the filters collection # cache can be reloaded using tmp filter file. common._FILTERS_COLLECTION = None with mock.patch.object(vol_get.API, 'check_volume_filters') as volume_get: req = fakes.HTTPRequest.blank('/v3/volumes?bootable=True') req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(mv.VOLUME_LIST_BOOTABLE) req.environ['cinder.context'].is_admin = True req.api_version_request = mv.get_api_version( mv.VOLUME_LIST_BOOTABLE) tmp_filter_file = self.tmp_path + '/resource_filters_tests.json' self.override_config('resource_query_filters_file', tmp_filter_file) with open(tmp_filter_file, 'w') as f: f.write(json.dumps({"volume": ['bootable']})) self.controller.index(req) filters = req.params.copy() volume_get.assert_called_with(filters, True) # Reset the CONF.resource_query_filters_file and clear the filters # collection to avoid leaking other cases, and it will be re-loaded # from CONF.resource_query_filters_file in next call. self._reset_filter_file() def _create_volume_with_glance_metadata(self): basetime = timeutils.utcnow() td = datetime.timedelta(minutes=1) vol1 = db.volume_create(self.ctxt, {'display_name': 'test1', 'created_at': basetime - 3 * td, 'updated_at': basetime - 2 * td, 'project_id': self.ctxt.project_id, 'volume_type_id': fake.VOLUME_TYPE_ID, 'id': fake.VOLUME_ID}) db.volume_glance_metadata_create(self.ctxt, vol1.id, 'image_name', 'imageTestOne') vol2 = db.volume_create(self.ctxt, {'display_name': 'test2', 'created_at': basetime - td, 'updated_at': basetime, 'project_id': self.ctxt.project_id, 'volume_type_id': fake.VOLUME_TYPE_ID, 'id': fake.VOLUME2_ID}) db.volume_glance_metadata_create(self.ctxt, vol2.id, 'image_name', 'imageTestTwo') db.volume_glance_metadata_create(self.ctxt, vol2.id, 'disk_format', 'qcow2') return [vol1, vol2] def _create_volume_with_group(self): vol1 = db.volume_create(self.ctxt, {'display_name': 'test1', 'project_id': self.ctxt.project_id, 'group_id': fake.GROUP_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) vol2 = db.volume_create(self.ctxt, {'display_name': 'test2', 'project_id': self.ctxt.project_id, 'group_id': fake.GROUP2_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) return [vol1, vol2] def _create_multiple_volumes_with_different_project(self): # Create volumes in project 1 db.volume_create(self.ctxt, {'display_name': 'test1', 'project_id': fake.PROJECT_ID, 'volume_type_id': fake.VOLUME_TYPE_ID, 'id': fake.VOLUME_ID}) db.volume_create(self.ctxt, {'display_name': 'test2', 'project_id': fake.PROJECT_ID, 'volume_type_id': fake.VOLUME_TYPE_ID, 'id': fake.VOLUME2_ID}) # Create volume in project 2 db.volume_create(self.ctxt, {'display_name': 'test3', 'project_id': fake.PROJECT2_ID, 'volume_type_id': fake.VOLUME_TYPE_ID, 'id': fake.VOLUME3_ID}) def test_volume_index_filter_by_glance_metadata(self): vols = self._create_volume_with_glance_metadata() req = fakes.HTTPRequest.blank("/v3/volumes?glance_metadata=" "{'image_name': 'imageTestOne'}") req.headers = mv.get_mv_header(mv.VOLUME_LIST_GLANCE_METADATA) req.api_version_request = mv.get_api_version( mv.VOLUME_LIST_GLANCE_METADATA) req.environ['cinder.context'] = self.ctxt res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(vols[0].id, volumes[0]['id']) def test_volume_index_filter_by_glance_metadata_in_unsupport_version(self): self._create_volume_with_glance_metadata() req = fakes.HTTPRequest.blank("/v3/volumes?glance_metadata=" "{'image_name': 'imageTestOne'}") req.headers = mv.get_mv_header(mv.BASE_VERSION) req.api_version_request = mv.get_api_version(mv.BASE_VERSION) req.environ['cinder.context'] = self.ctxt res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(2, len(volumes)) def test_volume_index_filter_by_group_id(self): vols = self._create_volume_with_group() req = fakes.HTTPRequest.blank(("/v3/volumes?group_id=%s") % fake.GROUP_ID) req.headers = mv.get_mv_header(mv.VOLUME_LIST_GROUP) req.api_version_request = mv.get_api_version(mv.VOLUME_LIST_GROUP) req.environ['cinder.context'] = self.ctxt res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(vols[0].id, volumes[0]['id']) @ddt.data('volumes', 'volumes/detail') def test_list_volume_with_count_param_version_not_matched(self, action): self._create_multiple_volumes_with_different_project() is_detail = True if 'detail' in action else False req = fakes.HTTPRequest.blank("/v3/%s?with_count=True" % action) req.headers = mv.get_mv_header( mv.get_prior_version(mv.SUPPORT_COUNT_INFO)) req.api_version_request = mv.get_api_version( mv.get_prior_version(mv.SUPPORT_COUNT_INFO)) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = ctxt res_dict = self.controller._get_volumes(req, is_detail=is_detail) self.assertNotIn('count', res_dict) @ddt.data({'method': 'volumes', 'display_param': 'True'}, {'method': 'volumes', 'display_param': 'False'}, {'method': 'volumes', 'display_param': '1'}, {'method': 'volumes/detail', 'display_param': 'True'}, {'method': 'volumes/detail', 'display_param': 'False'}, {'method': 'volumes/detail', 'display_param': '1'} ) @ddt.unpack def test_list_volume_with_count_param(self, method, display_param): self._create_multiple_volumes_with_different_project() self.mock_object(ViewBuilder, '_get_volume_type', v2_fakes.fake_volume_type_name_get) is_detail = True if 'detail' in method else False show_count = strutils.bool_from_string(display_param, strict=True) # Request with 'with_count' and 'limit' req = fakes.HTTPRequest.blank( "/v3/%s?with_count=%s&limit=1" % (method, display_param)) req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO) req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req.environ['cinder.context'] = ctxt res_dict = self.controller._get_volumes(req, is_detail=is_detail) self.assertEqual(1, len(res_dict['volumes'])) if show_count: self.assertEqual(2, res_dict['count']) else: self.assertNotIn('count', res_dict) # Request with 'with_count' req = fakes.HTTPRequest.blank( "/v3/%s?with_count=%s" % (method, display_param)) req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO) req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req.environ['cinder.context'] = ctxt res_dict = self.controller._get_volumes(req, is_detail=is_detail) self.assertEqual(2, len(res_dict['volumes'])) if show_count: self.assertEqual(2, res_dict['count']) else: self.assertNotIn('count', res_dict) # Request with admin context and 'all_tenants' req = fakes.HTTPRequest.blank( "/v3/%s?with_count=%s&all_tenants=1" % (method, display_param)) req.headers = mv.get_mv_header(mv.SUPPORT_COUNT_INFO) req.api_version_request = mv.get_api_version(mv.SUPPORT_COUNT_INFO) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = ctxt res_dict = self.controller._get_volumes(req, is_detail=is_detail) self.assertEqual(3, len(res_dict['volumes'])) if show_count: self.assertEqual(3, res_dict['count']) else: self.assertNotIn('count', res_dict) def test_list_volume_with_multiple_filters(self): metadata = {'key_X': 'value_X'} self._create_multiple_volumes_with_different_project() test_utils.create_volume(self.ctxt, metadata=metadata) self.mock_object(ViewBuilder, '_get_volume_type', v2_fakes.fake_volume_type_name_get) # Request with 'all_tenants' and 'metadata' req = fakes.HTTPRequest.blank( "/v3/volumes/detail?all_tenants=1" "&metadata=%7B%27key_X%27%3A+%27value_X%27%7D") ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req.environ['cinder.context'] = ctxt res_dict = self.controller._get_volumes(req, is_detail=True) self.assertEqual(1, len(res_dict['volumes'])) self.assertEqual(metadata, res_dict['volumes'][0]['metadata']) def test_list_volume_with_filter_and_paginate(self): self._create_multiple_volumes_with_different_project() test_utils.create_volume(self.ctxt) self.mock_object(ViewBuilder, '_get_volume_type', v2_fakes.fake_volume_type_name_get) req = fakes.HTTPRequest.blank( "/v3/volumes/detail?all_tenants=1" "&sort=bootable:asc&with_count=True&limit=5" "&marker=" + fake.VOLUME_ID) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False) req.environ['cinder.context'] = ctxt res_dict = self.controller._get_volumes(req, is_detail=True) self.assertEqual(2, len(res_dict['volumes'])) def test_volume_index_filter_by_group_id_in_unsupport_version(self): self._create_volume_with_group() req = fakes.HTTPRequest.blank(("/v3/volumes?group_id=%s") % fake.GROUP_ID) req.headers = mv.get_mv_header(mv.BACKUP_UPDATE) req.api_version_request = mv.get_api_version(mv.BACKUP_UPDATE) req.environ['cinder.context'] = self.ctxt res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(2, len(volumes)) @ddt.data(('true', 0), ('false', 1)) @ddt.unpack def test_volume_list_with_quota_filter(self, use_quota, expected_index): volumes = (test_utils.create_volume(self.ctxt, host='test_host1', cluster_name='cluster1', volume_type_id=None, use_quota=True, availability_zone='nova1'), test_utils.create_volume(self.ctxt, host='test_host2', cluster_name='cluster2', volume_type_id=None, use_quota=False, availability_zone='nova2')) req = fakes.HTTPRequest.blank( '/v3/volumes?consumes_quota=%s' % use_quota, version=mv.USE_QUOTA) res_dict = self.controller.detail(req) self.assertEqual(1, len(res_dict['volumes'])) self.assertEqual(volumes[expected_index].id, res_dict['volumes'][0]['id']) def test_volume_list_without_quota_filter(self): num_vols = 4 vol_ids = set() # Half of the volumes will use quota, the other half won't for i in range(num_vols): vol = test_utils.create_volume(self.ctxt, use_quota=bool(i % 2), host='test_host', cluster_name='cluster', volume_type_id=None, availability_zone='nova1') vol_ids.add(vol.id) req = fakes.HTTPRequest.blank('/v3/volumes', version=mv.USE_QUOTA) res_dict = self.controller.detail(req) res_vol_ids = {v['id'] for v in res_dict['volumes']} self.assertEqual(num_vols, len(res_vol_ids)) self.assertEqual(vol_ids, res_vol_ids) def _fake_volumes_summary_request(self, version=mv.VOLUME_SUMMARY, all_tenant=False, is_admin=False): req_url = '/v3/volumes/summary' if all_tenant: req_url += '?all_tenants=True' req = fakes.HTTPRequest.blank(req_url, use_admin_context=is_admin) req.headers = mv.get_mv_header(version) req.api_version_request = mv.get_api_version(version) return req @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', autospec=True) @mock.patch.object(volume_api.API, 'get_snapshot', autospec=True) @mock.patch.object(volume_api.API, 'create', autospec=True) @mock.patch( 'cinder.api.openstack.wsgi.Controller.validate_name_and_description') def test_volume_create_with_snapshot_image(self, mock_validate, create, get_snapshot, volume_type_get): create.side_effect = v2_fakes.fake_volume_api_create get_snapshot.side_effect = v2_fakes.fake_snapshot_get volume_type_get.side_effect = v2_fakes.fake_volume_type_get vol = self._vol_in_request_body( image_id="b0a599e0-41d7-3582-b260-769f443c862a") snapshot_id = fake.SNAPSHOT_ID ex = self._expected_vol_from_controller(snapshot_id=snapshot_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.headers = mv.get_mv_header(mv.SUPPORT_NOVA_IMAGE) req.api_version_request = mv.get_api_version(mv.SUPPORT_NOVA_IMAGE) res_dict = self.controller.create(req, body=body) self.assertEqual(ex, res_dict) context = req.environ['cinder.context'] get_snapshot.assert_called_once_with(self.controller.volume_api, context, snapshot_id) kwargs = self._expected_volume_api_create_kwargs( v2_fakes.fake_snapshot(snapshot_id)) create.assert_called_once_with( self.controller.volume_api, context, vol['size'], v2_fakes.DEFAULT_VOL_NAME, v2_fakes.DEFAULT_VOL_DESCRIPTION, **kwargs) def test_volumes_summary_in_unsupport_version(self): """Function call to test summary volumes API in unsupported version""" req = self._fake_volumes_summary_request( version=mv.get_prior_version(mv.VOLUME_SUMMARY)) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.summary, req) def test_volumes_summary_in_supported_version(self): """Function call to test the summary volumes API for version v3.""" req = self._fake_volumes_summary_request() res_dict = self.controller.summary(req) expected = {'volume-summary': {'total_size': 0.0, 'total_count': 0}} self.assertEqual(expected, res_dict) vol = v2_test_volumes.VolumeApiTest._vol_in_request_body( availability_zone="nova") body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') res_dict = self.controller.create(req, body=body) req = self._fake_volumes_summary_request() res_dict = self.controller.summary(req) expected = {'volume-summary': {'total_size': 1.0, 'total_count': 1}} self.assertEqual(expected, res_dict) @ddt.data( (mv.get_prior_version(mv.VOLUME_SUMMARY_METADATA), {'volume-summary': {'total_size': 0.0, 'total_count': 0}}), (mv.VOLUME_SUMMARY_METADATA, {'volume-summary': {'total_size': 0.0, 'total_count': 0, 'metadata': {}}})) @ddt.unpack def test_volume_summary_empty(self, summary_api_version, expect_result): req = self._fake_volumes_summary_request(version=summary_api_version) res_dict = self.controller.summary(req) self.assertEqual(expect_result, res_dict) @ddt.data( (mv.get_prior_version(mv.VOLUME_SUMMARY_METADATA), {'volume-summary': {'total_size': 2, 'total_count': 2}}), (mv.VOLUME_SUMMARY_METADATA, {'volume-summary': {'total_size': 2, 'total_count': 2, 'metadata': { 'name': ['test_name1', 'test_name2'], 'age': ['test_age']}}})) @ddt.unpack def test_volume_summary_return_metadata(self, summary_api_version, expect_result): test_utils.create_volume(self.ctxt, metadata={'name': 'test_name1', 'age': 'test_age'}) test_utils.create_volume(self.ctxt, metadata={'name': 'test_name2', 'age': 'test_age'}) ctxt2 = context.RequestContext(fake.USER_ID, fake.PROJECT2_ID, True) test_utils.create_volume(ctxt2, metadata={'name': 'test_name3'}) req = self._fake_volumes_summary_request(version=summary_api_version) res_dict = self.controller.summary(req) self.assertEqual(expect_result, res_dict) @ddt.data( (mv.get_prior_version(mv.VOLUME_SUMMARY_METADATA), {'volume-summary': {'total_size': 2, 'total_count': 2}}), (mv.VOLUME_SUMMARY_METADATA, {'volume-summary': {'total_size': 2, 'total_count': 2, 'metadata': { 'name': ['test_name1', 'test_name2'], 'age': ['test_age']}}})) @ddt.unpack def test_volume_summary_return_metadata_all_tenant( self, summary_api_version, expect_result): test_utils.create_volume(self.ctxt, metadata={'name': 'test_name1', 'age': 'test_age'}) ctxt2 = context.RequestContext(fake.USER_ID, fake.PROJECT2_ID, True) test_utils.create_volume(ctxt2, metadata={'name': 'test_name2', 'age': 'test_age'}) req = self._fake_volumes_summary_request(version=summary_api_version, all_tenant=True, is_admin=True) res_dict = self.controller.summary(req) self.assertEqual(expect_result, res_dict) def _vol_in_request_body(self, size=v2_fakes.DEFAULT_VOL_SIZE, name=v2_fakes.DEFAULT_VOL_NAME, description=v2_fakes.DEFAULT_VOL_DESCRIPTION, availability_zone=DEFAULT_AZ, snapshot_id=None, source_volid=None, consistencygroup_id=None, volume_type=None, image_ref=None, image_id=None, group_id=None, backup_id=None): vol = {"size": size, "name": name, "description": description, "availability_zone": availability_zone, "snapshot_id": snapshot_id, "source_volid": source_volid, "consistencygroup_id": consistencygroup_id, "volume_type": volume_type, "group_id": group_id, } if image_id is not None: vol['image_id'] = image_id elif image_ref is not None: vol['imageRef'] = image_ref elif backup_id is not None: vol['backup_id'] = backup_id return vol def _expected_vol_from_controller( self, size=v2_fakes.DEFAULT_VOL_SIZE, availability_zone=DEFAULT_AZ, description=v2_fakes.DEFAULT_VOL_DESCRIPTION, name=v2_fakes.DEFAULT_VOL_NAME, consistencygroup_id=None, source_volid=None, snapshot_id=None, metadata=None, attachments=None, volume_type=v2_fakes.DEFAULT_VOL_TYPE, status=v2_fakes.DEFAULT_VOL_STATUS, with_migration_status=False, group_id=None, req_version=None): metadata = metadata or {} attachments = attachments or [] volume = {'volume': {'attachments': attachments, 'availability_zone': availability_zone, 'bootable': 'false', 'consistencygroup_id': consistencygroup_id, 'group_id': group_id, 'created_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'updated_at': datetime.datetime( 1900, 1, 1, 1, 1, 1, tzinfo=iso8601.UTC), 'description': description, 'id': v2_fakes.DEFAULT_VOL_ID, 'links': [{'href': 'http://localhost/v3/%s/volumes/%s' % ( fake.PROJECT_ID, fake.VOLUME_ID), 'rel': 'self'}, {'href': 'http://localhost/%s/volumes/%s' % ( fake.PROJECT_ID, fake.VOLUME_ID), 'rel': 'bookmark'}], 'metadata': metadata, 'name': name, 'replication_status': 'disabled', 'multiattach': False, 'size': size, 'snapshot_id': snapshot_id, 'source_volid': source_volid, 'status': status, 'user_id': fake.USER_ID, 'volume_type': volume_type, 'encrypted': False}} if with_migration_status: volume['volume']['migration_status'] = None # Remove group_id if max version is less than GROUP_VOLUME. if req_version and req_version.matches( None, mv.get_prior_version(mv.GROUP_VOLUME)): volume['volume'].pop('group_id') return volume def _expected_volume_api_create_kwargs(self, snapshot=None, availability_zone=DEFAULT_AZ, source_volume=None, test_group=None, req_version=None): volume = { 'metadata': None, 'snapshot': snapshot, 'source_volume': source_volume, 'consistencygroup': None, 'availability_zone': availability_zone, 'scheduler_hints': None, 'group': test_group, } # Remove group_id if max version is less than GROUP_VOLUME. if req_version and req_version.matches( None, mv.get_prior_version(mv.GROUP_VOLUME)): volume.pop('group') return volume @ddt.data((mv.GROUP_VOLUME, {'display_name': ' test name ', 'display_description': ' test desc ', 'size': 1}), (mv.get_prior_version(mv.GROUP_VOLUME), {'name': ' test name ', 'description': ' test desc ', 'size': 1}), ('3.0', {'name': 'test name', 'description': 'test desc', 'size': 1, 'user_id': 'teapot', 'project_id': 'kettle', 'status': 'confused'})) @ddt.unpack def test_volume_create(self, max_ver, volume_body): self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_api_create) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes') req.api_version_request = mv.get_api_version(max_ver) body = {'volume': volume_body} res_dict = self.controller.create(req, body=body) ex = self._expected_vol_from_controller( req_version=req.api_version_request, name='test name', description='test desc') self.assertEqual(ex['volume']['name'], res_dict['volume']['name']) self.assertEqual(ex['volume']['description'], res_dict['volume']['description']) def test_volume_create_extra_params(self): self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_get) self.mock_object(volume_api.API, "create", v2_fakes.fake_volume_api_create) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes') req.api_version_request = mv.get_api_version( mv.SUPPORT_VOLUME_SCHEMA_CHANGES) body = {'volume': { 'name': 'test name', 'description': 'test desc', 'size': 1, 'user_id': 'teapot', 'project_id': 'kettle', 'status': 'confused'}} self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @ddt.data(mv.get_prior_version(mv.VOLUME_DELETE_FORCE), mv.VOLUME_DELETE_FORCE) @mock.patch('cinder.context.RequestContext.authorize') def test_volume_delete_with_force(self, request_version, mock_authorize): mock_delete = self.mock_object(volume_api.API, "delete") self.mock_object(volume_api.API, 'get', return_value="fake_volume") req = fakes.HTTPRequest.blank('/v3/volumes/fake_id?force=True') req.api_version_request = mv.get_api_version(request_version) self.controller.delete(req, 'fake_id') context = req.environ['cinder.context'] if request_version == mv.VOLUME_DELETE_FORCE: mock_authorize.assert_called_with(policy.FORCE_DELETE_POLICY, target_obj="fake_volume") mock_delete.assert_called_with(context, "fake_volume", cascade=False, force=True) else: mock_authorize.assert_not_called() mock_delete.assert_called_with(context, "fake_volume", cascade=False, force=False) @ddt.data(mv.GROUP_SNAPSHOTS, mv.get_prior_version(mv.GROUP_SNAPSHOTS)) @mock.patch.object(group_api.API, 'get') @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', autospec=True) @mock.patch.object(volume_api.API, 'get_snapshot', autospec=True) @mock.patch.object(volume_api.API, 'create', autospec=True) def test_volume_creation_from_snapshot(self, max_ver, create, get_snapshot, volume_type_get, group_get): create.side_effect = v2_fakes.fake_volume_api_create get_snapshot.side_effect = v2_fakes.fake_snapshot_get volume_type_get.side_effect = v2_fakes.fake_volume_type_get fake_group = { 'id': fake.GROUP_ID, 'group_type_id': fake.GROUP_TYPE_ID, 'name': 'fake_group' } group_get.return_value = fake_group snapshot_id = fake.SNAPSHOT_ID vol = self._vol_in_request_body(snapshot_id=snapshot_id, group_id=fake.GROUP_ID) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.api_version_request = mv.get_api_version(max_ver) res_dict = self.controller.create(req, body=body) ex = self._expected_vol_from_controller( snapshot_id=snapshot_id, req_version=req.api_version_request) self.assertEqual(ex, res_dict) context = req.environ['cinder.context'] get_snapshot.assert_called_once_with(self.controller.volume_api, context, snapshot_id) kwargs = self._expected_volume_api_create_kwargs( v2_fakes.fake_snapshot(snapshot_id), test_group=fake_group, req_version=req.api_version_request) create.assert_called_once_with( self.controller.volume_api, context, vol['size'], v2_fakes.DEFAULT_VOL_NAME, v2_fakes.DEFAULT_VOL_DESCRIPTION, **kwargs) @ddt.data(mv.VOLUME_CREATE_FROM_BACKUP, mv.get_prior_version(mv.VOLUME_CREATE_FROM_BACKUP)) @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full', autospec=True) @mock.patch.object(backup_api.API, 'get', autospec=True) @mock.patch.object(volume_api.API, 'create', autospec=True) def test_volume_creation_from_backup(self, max_ver, create, get_backup, volume_type_get): create.side_effect = v2_fakes.fake_volume_api_create get_backup.side_effect = v2_fakes.fake_backup_get volume_type_get.side_effect = v2_fakes.fake_volume_type_get backup_id = fake.BACKUP_ID req = fakes.HTTPRequest.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.api_version_request = mv.get_api_version(max_ver) if max_ver == mv.VOLUME_CREATE_FROM_BACKUP: vol = self._vol_in_request_body(backup_id=backup_id) else: vol = self._vol_in_request_body() body = {"volume": vol} res_dict = self.controller.create(req, body=body) ex = self._expected_vol_from_controller( req_version=req.api_version_request) self.assertEqual(ex, res_dict) context = req.environ['cinder.context'] kwargs = self._expected_volume_api_create_kwargs( req_version=req.api_version_request) if max_ver >= mv.VOLUME_CREATE_FROM_BACKUP: get_backup.assert_called_once_with(self.controller.backup_api, context, backup_id) kwargs.update({'backup': v2_fakes.fake_backup_get(None, context, backup_id)}) create.assert_called_once_with( self.controller.volume_api, context, vol['size'], v2_fakes.DEFAULT_VOL_NAME, v2_fakes.DEFAULT_VOL_DESCRIPTION, **kwargs) def test_volume_creation_with_scheduler_hints(self): vol = self._vol_in_request_body(availability_zone=None) vol.pop('group_id') body = {"volume": vol, "OS-SCH-HNT:scheduler_hints": { 'different_host': [fake.UUID1, fake.UUID2]}} req = webob.Request.blank('/v3/%s/volumes' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.body = jsonutils.dump_as_bytes(body) res = req.get_response(fakes.wsgi_app( fake_auth_context=self.ctxt)) res_dict = jsonutils.loads(res.body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_int) self.assertIn('id', res_dict['volume']) @ddt.data('fake_host', '', 1234, ' ') def test_volume_creation_invalid_scheduler_hints(self, invalid_hints): vol = self._vol_in_request_body() vol.pop('group_id') body = {"volume": vol, "OS-SCH-HNT:scheduler_hints": { 'different_host': invalid_hints}} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @ddt.data({'size': 'a'}, {'size': ''}, {'size': 0}, {'size': 2 ** 31}) def test_volume_creation_fails_with_invalid_parameters( self, vol_body): body = {"volume": vol_body} req = fakes.HTTPRequest.blank('/v3/volumes') self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_volume_creation_fails_with_additional_properties(self): body = {"volume": {"size": 1, "user_id": fake.USER_ID, "project_id": fake.PROJECT_ID}} req = fakes.HTTPRequest.blank('/v3/volumes') req.api_version_request = mv.get_api_version( mv.SUPPORT_VOLUME_SCHEMA_CHANGES) self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_volume_update_without_vol_data(self): body = {"volume": {}} req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID) req.api_version_request = mv.get_api_version( mv.SUPPORT_VOLUME_SCHEMA_CHANGES) self.assertRaises(exception.ValidationError, self.controller.update, req, fake.VOLUME_ID, body=body) @ddt.data({'s': 'ea895e29-8485-4930-bbb8-c5616a309c0e'}, ['ea895e29-8485-4930-bbb8-c5616a309c0e'], 42) def test_volume_creation_fails_with_invalid_snapshot_type(self, value): snapshot_id = value vol = self._vol_in_request_body(snapshot_id=snapshot_id) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v3/volumes') # Raise 400 when snapshot has not uuid type. self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @ddt.data({'source_volid': 1}, {'source_volid': []}, {'consistencygroup_id': 1}, {'consistencygroup_id': []}) def test_volume_creation_fails_with_invalid_uuids(self, updated_uuids): vol = self._vol_in_request_body() vol.update(updated_uuids) body = {"volume": vol} req = fakes.HTTPRequest.blank('/v2/volumes') # Raise 400 for resource requested with invalid uuids. self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) @ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER), mv.RESOURCE_FILTER, mv.LIKE_FILTER) @mock.patch.object(volume_api.API, 'check_volume_filters', mock.Mock()) @mock.patch.object(api_utils, 'add_visible_admin_metadata', mock.Mock()) @mock.patch('cinder.api.common.reject_invalid_filters') def test_list_volume_with_general_filter(self, version, mock_update): req = fakes.HTTPRequest.blank('/v3/volumes', version=version) self.controller.index(req) if version >= mv.RESOURCE_FILTER: support_like = True if version == mv.LIKE_FILTER else False mock_update.assert_called_once_with(req.environ['cinder.context'], mock.ANY, 'volume', support_like) @ddt.data({'admin': True, 'version': mv.VOLUME_DETAIL_PROVIDER_ID}, {'admin': False, 'version': mv.VOLUME_DETAIL_PROVIDER_ID}, {'admin': True, 'version': mv.get_prior_version(mv.VOLUME_DETAIL_PROVIDER_ID)}, {'admin': False, 'version': mv.get_prior_version(mv.VOLUME_DETAIL_PROVIDER_ID)}) @ddt.unpack def test_volume_show_provider_id(self, admin, version): self.mock_object(volume_api.API, 'get', v2_fakes.fake_volume_api_get) self.mock_object(db.sqlalchemy.api, '_volume_type_get_full', v2_fakes.fake_volume_type_get) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % fake.VOLUME_ID, version=version) if admin: admin_ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) req.environ['cinder.context'] = admin_ctx res_dict = self.controller.show(req, fake.VOLUME_ID) req_version = req.api_version_request # provider_id is in view if min version is greater than or equal to # VOLUME_DETAIL_PROVIDER_ID for admin. if req_version.matches(mv.VOLUME_DETAIL_PROVIDER_ID, None) and admin: self.assertIn('provider_id', res_dict['volume']) else: self.assertNotIn('provider_id', res_dict['volume']) @ddt.data(*ENCRYPTION_KEY_ID_IN_DETAILS) @ddt.unpack def test_volume_show_with_encryption_key_id(self, expected_in_details, encryption_key_id, version): volume = test_utils.create_volume(self.ctxt, testcase_instance=self, volume_type_id=None, encryption_key_id=encryption_key_id) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % volume.id, version=version) volume_details = self.controller.show(req, volume.id)['volume'] if expected_in_details: self.assertIn('encryption_key_id', volume_details) else: self.assertNotIn('encryption_key_id', volume_details) @ddt.data( (True, True, mv.USE_QUOTA), (True, False, mv.USE_QUOTA), (False, True, mv.get_prior_version(mv.USE_QUOTA)), (False, False, mv.get_prior_version(mv.USE_QUOTA)), ) @ddt.unpack def test_volume_show_with_use_quota(self, present, value, microversion): volume = test_utils.create_volume(self.ctxt, volume_type_id=None, use_quota=value) req = fakes.HTTPRequest.blank('/v3/volumes/%s' % volume.id, version=microversion) volume_details = self.controller.show(req, volume.id)['volume'] if present: self.assertIs(value, volume_details['consumes_quota']) else: self.assertNotIn('consumes_quota', volume_details) def _fake_create_volume(self, size=1): vol = { 'display_name': 'fake_volume1', 'status': 'available', 'size': size } volume = objects.Volume(context=self.ctxt, **vol) volume.create() return volume def _fake_create_snapshot(self, volume_id, volume_size=1): snap = { 'display_name': 'fake_snapshot1', 'status': 'available', 'volume_id': volume_id, 'volume_size': volume_size } snapshot = objects.Snapshot(context=self.ctxt, **snap) snapshot.create() return snapshot @mock.patch.object(objects.Volume, 'get_latest_snapshot') @mock.patch.object(volume_api.API, 'get_volume') def test_volume_revert_with_snapshot_not_found(self, mock_volume, mock_latest): fake_volume = self._fake_create_volume() mock_volume.return_value = fake_volume mock_latest.side_effect = exception.VolumeSnapshotNotFound(volume_id= 'fake_id') req = fakes.HTTPRequest.blank('/v3/volumes/fake_id/revert') req.headers = mv.get_mv_header(mv.VOLUME_REVERT) req.api_version_request = mv.get_api_version( mv.VOLUME_REVERT) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.revert, req, 'fake_id', {'revert': {'snapshot_id': 'fake_snapshot_id'}}) @mock.patch.object(objects.Volume, 'get_latest_snapshot') @mock.patch.object(volume_api.API, 'get_volume') def test_volume_revert_with_snapshot_not_match(self, mock_volume, mock_latest): fake_volume = self._fake_create_volume() mock_volume.return_value = fake_volume fake_snapshot = self._fake_create_snapshot(fake.UUID1) mock_latest.return_value = fake_snapshot req = fakes.HTTPRequest.blank('/v3/volumes/fake_id/revert') req.headers = mv.get_mv_header(mv.VOLUME_REVERT) req.api_version_request = mv.get_api_version( mv.VOLUME_REVERT) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.revert, req, 'fake_id', {'revert': {'snapshot_id': 'fake_snapshot_id'}}) @mock.patch.object(objects.Volume, 'get_latest_snapshot') @mock.patch('cinder.objects.base.' 'CinderPersistentObject.update_single_status_where') @mock.patch.object(volume_api.API, 'get_volume') def test_volume_revert_update_status_failed(self, mock_volume, mock_update, mock_latest): fake_volume = self._fake_create_volume() fake_snapshot = self._fake_create_snapshot(fake_volume['id']) mock_volume.return_value = fake_volume mock_latest.return_value = fake_snapshot req = fakes.HTTPRequest.blank('/v3/volumes/%s/revert' % fake_volume['id']) req.headers = mv.get_mv_header(mv.VOLUME_REVERT) req.api_version_request = mv.get_api_version( mv.VOLUME_REVERT) req.environ['cinder.context'] = self.ctxt # update volume's status failed mock_update.side_effect = [False, True] self.assertRaises(webob.exc.HTTPConflict, self.controller.revert, req, fake_volume['id'], {'revert': {'snapshot_id': fake_snapshot['id']}}) # update snapshot's status failed mock_update.side_effect = [True, False] self.assertRaises(webob.exc.HTTPConflict, self.controller.revert, req, fake_volume['id'], {'revert': {'snapshot_id': fake_snapshot['id']}}) @mock.patch.object(objects.Volume, 'get_latest_snapshot') @mock.patch.object(volume_api.API, 'get_volume') def test_volume_revert_with_not_equal_size(self, mock_volume, mock_latest): fake_volume = self._fake_create_volume(size=2) fake_snapshot = self._fake_create_snapshot(fake_volume['id'], volume_size=1) mock_volume.return_value = fake_volume mock_latest.return_value = fake_snapshot req = fakes.HTTPRequest.blank('/v3/volumes/%s/revert' % fake_volume['id']) req.headers = mv.get_mv_header(mv.VOLUME_REVERT) req.api_version_request = mv.get_api_version( mv.VOLUME_REVERT) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.revert, req, fake_volume['id'], {'revert': {'snapshot_id': fake_snapshot['id']}}) def test_view_get_attachments(self): fake_volume = self._fake_create_volume() fake_volume['attach_status'] = fields.VolumeAttachStatus.ATTACHING att_time = datetime.datetime(2017, 8, 31, 21, 55, 7, tzinfo=iso8601.UTC) a1 = { 'id': fake.UUID1, 'volume_id': fake.UUID2, 'instance': None, 'attached_host': None, 'mountpoint': None, 'attach_time': None, 'attach_status': fields.VolumeAttachStatus.ATTACHING } a2 = { 'id': fake.UUID3, 'volume_id': fake.UUID4, 'instance_uuid': fake.UUID5, 'attached_host': 'host1', 'mountpoint': 'na', 'attach_time': att_time, 'attach_status': fields.VolumeAttachStatus.ATTACHED } attachment1 = objects.VolumeAttachment(self.ctxt, **a1) attachment2 = objects.VolumeAttachment(self.ctxt, **a2) atts = {'objects': [attachment1, attachment2]} attachments = objects.VolumeAttachmentList(self.ctxt, **atts) fake_volume['volume_attachment'] = attachments # get_attachments should only return attachments with the # attached status = ATTACHED attachments = ViewBuilder()._get_attachments(fake_volume, True) self.assertEqual(1, len(attachments)) self.assertEqual(fake.UUID3, attachments[0]['attachment_id']) self.assertEqual(fake.UUID4, attachments[0]['volume_id']) self.assertEqual(fake.UUID5, attachments[0]['server_id']) self.assertEqual('host1', attachments[0]['host_name']) self.assertEqual('na', attachments[0]['device']) self.assertEqual(att_time, attachments[0]['attached_at']) # When admin context is false (non-admin), host_name will be None attachments = ViewBuilder()._get_attachments(fake_volume, False) self.assertIsNone(attachments[0]['host_name']) @ddt.data(('created_at=gt:', 0), ('created_at=lt:', 2)) @ddt.unpack def test_volume_index_filter_by_created_at_with_gt_and_lt(self, change, expect_result): self._create_volume_with_glance_metadata() change_time = timeutils.utcnow() + datetime.timedelta(minutes=1) req = fakes.HTTPRequest.blank(("/v3/volumes?%s%s") % (change, change_time)) req.environ['cinder.context'] = self.ctxt req.headers = mv.get_mv_header(mv.VOLUME_TIME_COMPARISON_FILTER) req.api_version_request = mv.get_api_version( mv.VOLUME_TIME_COMPARISON_FILTER) res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(expect_result, len(volumes)) @ddt.data(('updated_at=gt:', 0), ('updated_at=lt:', 1)) @ddt.unpack def test_vol_filter_by_updated_at_with_gt_and_lt(self, change, result): vols = self._create_volume_with_glance_metadata() change_time = vols[1].updated_at req = fakes.HTTPRequest.blank(("/v3/volumes?%s%s") % (change, change_time)) req.environ['cinder.context'] = self.ctxt req.headers = mv.get_mv_header(mv.VOLUME_TIME_COMPARISON_FILTER) req.api_version_request = mv.get_api_version( mv.VOLUME_TIME_COMPARISON_FILTER) res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(result, len(volumes)) @ddt.data(('updated_at=eq:', 1, fake.VOLUME2_ID), ('updated_at=neq:', 1, fake.VOLUME_ID)) @ddt.unpack def test_vol_filter_by_updated_at_with_eq_and_neq(self, change, result, expected_volume_id): vols = self._create_volume_with_glance_metadata() change_time = vols[1].updated_at req = fakes.HTTPRequest.blank(("/v3/volumes?%s%s") % (change, change_time)) req.environ['cinder.context'] = self.ctxt req.headers = mv.get_mv_header(mv.VOLUME_TIME_COMPARISON_FILTER) req.api_version_request = mv.get_api_version( mv.VOLUME_TIME_COMPARISON_FILTER) res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(result, len(volumes)) self.assertEqual(expected_volume_id, volumes[0]['id']) @ddt.data('created_at', 'updated_at') def test_volume_filter_by_time_with_invaild_time(self, change): self._create_volume_with_glance_metadata() change_time = '123' req = fakes.HTTPRequest.blank(("/v3/volumes?%s=%s") % (change, change_time)) req.environ['cinder.context'] = self.ctxt req.headers = mv.get_mv_header(mv.VOLUME_TIME_COMPARISON_FILTER) req.api_version_request = mv.get_api_version( mv.VOLUME_TIME_COMPARISON_FILTER) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req) def test_volume_index_filter_by_time_with_lte_and_gte(self): vols = self._create_volume_with_glance_metadata() change_since = vols[1].updated_at change_before = timeutils.utcnow() + datetime.timedelta(minutes=1) req = fakes.HTTPRequest.blank(("/v3/volumes?updated_at=lte:%s&" "updated_at=gte:%s") % (change_before, change_since)) req.environ['cinder.context'] = self.ctxt req.headers = mv.get_mv_header(mv.VOLUME_TIME_COMPARISON_FILTER) req.api_version_request = mv.get_api_version( mv.VOLUME_TIME_COMPARISON_FILTER) res_dict = self.controller.index(req) volumes = res_dict['volumes'] self.assertEqual(1, len(volumes)) self.assertEqual(vols[1].id, volumes[0]['id']) def test_create_volume_with_multiattach_param(self): """Tests creating a volume with multiattach=True but no multiattach volume type. This test verifies that providing the multiattach parameter will error out the request since it is removed and the recommended way is to create a multiattach volume using a multiattach volume type. """ req = fakes.HTTPRequest.blank('/v3/volumes') body = {'volume': { 'name': 'test name', 'description': 'test desc', 'size': 1, 'multiattach': True}} exc = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, req, body=body) self.assertIn("multiattach parameter has been removed", exc.explanation) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/v3/test_workers.py0000664000175000017500000001766400000000000022700 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import ddt from oslo_serialization import jsonutils import webob from cinder.api import microversions as mv from cinder.api.v3 import router as router_v3 from cinder.api.v3 import workers from cinder.common import constants from cinder import context from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test SERVICES = ( [objects.Service(id=1, host='host1', binary=constants.VOLUME_BINARY, cluster_name='mycluster'), objects.Service(id=2, host='host2', binary=constants.VOLUME_BINARY, cluster_name='mycluster')], [objects.Service(id=3, host='host3', binary=constants.VOLUME_BINARY, cluster_name='mycluster'), objects.Service(id=4, host='host4', binary=constants.VOLUME_BINARY, cluster_name='mycluster')], ) def app(): # no auth, just let environ['cinder.context'] pass through api = router_v3.APIRouter() mapper = fakes.urlmap.URLMap() mapper['/v3'] = api return mapper @ddt.ddt class WorkersTestCase(test.TestCase): """Tes Case for the cleanup of Workers entries.""" def setUp(self): super(WorkersTestCase, self).setUp() self.context = context.RequestContext(user_id=None, project_id=fake.PROJECT_ID, is_admin=True, read_deleted='no', overwrite=False) self.controller = workers.create_resource() def _get_resp_post(self, body, version=mv.WORKERS_CLEANUP, ctxt=None): """Helper to execute a POST workers API call.""" req = webob.Request.blank('/v3/%s/workers/cleanup' % fake.PROJECT_ID) req.method = 'POST' req.headers['Content-Type'] = 'application/json' req.headers['OpenStack-API-Version'] = 'volume ' + version req.environ['cinder.context'] = ctxt or self.context req.body = jsonutils.dump_as_bytes(body) res = req.get_response(app()) return res @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup') def test_cleanup_old_api_version(self, rpc_mock): res = self._get_resp_post({}, mv.get_prior_version(mv.WORKERS_CLEANUP)) self.assertEqual(HTTPStatus.NOT_FOUND, res.status_code) rpc_mock.assert_not_called() @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup') def test_cleanup_not_authorized(self, rpc_mock): ctxt = context.RequestContext(user_id=None, project_id=fake.PROJECT_ID, is_admin=False, read_deleted='no', overwrite=False) res = self._get_resp_post({}, ctxt=ctxt) self.assertEqual(HTTPStatus.FORBIDDEN, res.status_code) rpc_mock.assert_not_called() @ddt.data({'binary': 'nova-scheduler'}, {'disabled': 'sure'}, {'is_up': 'nop'}, {'resource_type': 'service'}, {'resource_id': 'non UUID'}, {'is_up': 11}, {'disabled': 11}, {'is_up': ' true '}, {'disabled': ' false '}) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup') def test_cleanup_wrong_param(self, body, rpc_mock): res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_code) expected = 'Invalid input' self.assertIn(expected, res.json['badRequest']['message']) rpc_mock.assert_not_called() @ddt.data({'fake_key': 'value'}) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup') def test_cleanup_with_additional_properties(self, body, rpc_mock): res = self._get_resp_post(body) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_code) expected = 'Additional properties are not allowed' self.assertIn(expected, res.json['badRequest']['message']) rpc_mock.assert_not_called() def _expected_services(self, cleaning, unavailable): def service_view(service): return {'id': service.id, 'host': service.host, 'binary': service.binary, 'cluster_name': service.cluster_name} return {'cleaning': [service_view(s) for s in cleaning], 'unavailable': [service_view(s) for s in unavailable]} @ddt.data({'service_id': 10}, {'binary': 'cinder-volume'}, {'binary': 'cinder-scheduler'}, {'disabled': 'false'}, {'is_up': 'no'}, {'resource_type': 'Volume'}, {'resource_id': fake.VOLUME_ID, 'host': 'host@backend'}, {'host': 'host@backend#pool'}, {'cluster_name': 'cluster@backend'}, {'cluster_name': 'cluster@backend#pool'}, {'service_id': None}, {'cluster_name': None}, {'host': None}, {'resource_type': ''}, {'resource_type': None}, {'resource_id': None}) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup', return_value=SERVICES) def test_cleanup_params(self, body, rpc_mock): res = self._get_resp_post(body) self.assertEqual(HTTPStatus.ACCEPTED, res.status_code) rpc_mock.assert_called_once_with(self.context, mock.ANY) cleanup_request = rpc_mock.call_args[0][1] for key, value in body.items(): if key in ('disabled', 'is_up'): if value is not None: value = value == 'true' self.assertEqual(value, getattr(cleanup_request, key)) self.assertEqual(self._expected_services(*SERVICES), res.json) @mock.patch('cinder.db.worker_get_all', return_value=[mock.Mock(service_id=1, resource_type='Volume')]) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup', return_value=SERVICES) def test_cleanup_missing_location_ok(self, rpc_mock, worker_mock): res = self._get_resp_post({'resource_id': fake.VOLUME_ID}) self.assertEqual(HTTPStatus.ACCEPTED, res.status_code) rpc_mock.assert_called_once_with(self.context, mock.ANY) cleanup_request = rpc_mock.call_args[0][1] self.assertEqual(fake.VOLUME_ID, cleanup_request.resource_id) self.assertEqual(1, cleanup_request.service_id) self.assertEqual('Volume', cleanup_request.resource_type) self.assertEqual(self._expected_services(*SERVICES), res.json) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup') def test_cleanup_missing_location_fail_none(self, rpc_mock): res = self._get_resp_post({'resource_id': fake.VOLUME_ID}) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_code) self.assertIn('Invalid input', res.json['badRequest']['message']) rpc_mock.assert_not_called() @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.work_cleanup', return_value=[1, 2]) def test_cleanup_missing_location_fail_multiple(self, rpc_mock): res = self._get_resp_post({'resource_id': fake.VOLUME_ID}) self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_code) self.assertIn('Invalid input', res.json['badRequest']['message']) rpc_mock.assert_not_called() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1911192 cinder-27.0.0/cinder/tests/unit/api/views/0000775000175000017500000000000000000000000020362 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/views/__init__.py0000664000175000017500000000000000000000000022461 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/api/views/test_versions.py0000664000175000017500000001223000000000000023641 0ustar00zuulzuul00000000000000# Copyright 2015 Clinton Knight # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import ddt from cinder.api.views import versions from cinder.tests.unit import test class FakeRequest(object): def __init__(self, application_url): self.application_url = application_url URL_BASE = 'http://localhost/volume/' URL_BASE_NO_SLASH = 'http://localhost/volume' FAKE_HREF = URL_BASE + 'v1/' FAKE_VERSIONS = { "v1.0": { "id": "v1.0", "status": "CURRENT", "version": "1.1", "min_version": "1.0", "updated": "2015-07-30T11:33:21Z", "links": [ { "rel": "describedby", "type": "text/html", "href": 'http://docs.openstack.org/', }, ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.share+json;version=1", } ], }, } FAKE_LINKS = [ { "rel": "describedby", "type": "text/html", "href": 'http://docs.openstack.org/', }, { 'rel': 'self', 'href': FAKE_HREF }, ] @ddt.ddt class ViewBuilderTestCase(test.TestCase): def _get_builder(self): request = FakeRequest(URL_BASE) return versions.get_view_builder(request) def _get_builder_no_slash(self): request = FakeRequest(URL_BASE_NO_SLASH) return versions.get_view_builder(request) def test_build_versions(self): self.mock_object(versions.ViewBuilder, '_build_links', return_value=FAKE_LINKS) fake_versions = copy.deepcopy(FAKE_VERSIONS) result = self._get_builder().build_versions(fake_versions) result_no_slash = self._get_builder_no_slash().build_versions( fake_versions) expected = {'versions': list(fake_versions.values())} expected['versions'][0]['links'] = FAKE_LINKS self.assertEqual(expected, result) self.assertEqual(expected, result_no_slash) def test_build_version(self): self.mock_object(versions.ViewBuilder, '_build_links', return_value=FAKE_LINKS) result = self._get_builder()._build_version(FAKE_VERSIONS['v1.0']) result_no_slash = self._get_builder_no_slash()._build_version( FAKE_VERSIONS['v1.0']) expected = copy.deepcopy(FAKE_VERSIONS['v1.0']) expected['links'] = FAKE_LINKS self.assertEqual(expected, result) self.assertEqual(expected, result_no_slash) def test_build_links(self): self.mock_object(versions.ViewBuilder, '_generate_href', return_value=FAKE_HREF) result = self._get_builder()._build_links(FAKE_VERSIONS['v1.0']) result_no_slash = self._get_builder_no_slash()._build_links( FAKE_VERSIONS['v1.0']) self.assertEqual(FAKE_LINKS, result) self.assertEqual(FAKE_LINKS, result_no_slash) def test_generate_href_defaults(self): result = self._get_builder()._generate_href() result_no_slash = self._get_builder_no_slash()._generate_href() self.assertEqual(URL_BASE + 'v3/', result) self.assertEqual(URL_BASE + 'v3/', result_no_slash) @ddt.data( ('v2', None, URL_BASE + 'v2/'), ('/v2/', None, URL_BASE + 'v2/'), ('/v2/', 'fake_path', URL_BASE + 'v2/fake_path'), ('/v2/', '/fake_path/', URL_BASE + 'v2/fake_path/'), ) @ddt.unpack def test_generate_href_no_path(self, version, path, expected): result = self._get_builder()._generate_href(version=version, path=path) result_no_slash = self._get_builder_no_slash()._generate_href( version=version, path=path) self.assertEqual(expected, result) self.assertEqual(expected, result_no_slash) @ddt.data( ('http://1.1.1.1/', 'http://1.1.1.1/'), ('http://localhost/', 'http://localhost/'), ('http://localhost/volume/', 'http://localhost/volume/'), ('http://1.1.1.1/v1/', 'http://1.1.1.1/'), ('http://1.1.1.1/volume/v1/', 'http://1.1.1.1/volume/'), ('http://1.1.1.1/v1', 'http://1.1.1.1/'), ('http://1.1.1.1/v11', 'http://1.1.1.1/'), ) @ddt.unpack def test_get_base_url_without_version(self, base_url, base_url_no_version): request = FakeRequest(base_url) builder = versions.get_view_builder(request) result = builder._get_base_url_without_version() self.assertEqual(base_url_no_version, result) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1911192 cinder-27.0.0/cinder/tests/unit/attachments/0000775000175000017500000000000000000000000020767 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/attachments/__init__.py0000664000175000017500000000000000000000000023066 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/attachments/test_attachments_api.py0000664000175000017500000006201500000000000025550 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.compute import nova from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as tests_utils from cinder.volume import api as volume_api from cinder.volume import configuration as conf class AttachmentManagerTestCase(test.TestCase): """Attachment related test for volume/api.py.""" def setUp(self): """Setup test class.""" super(AttachmentManagerTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.context = context.get_admin_context() self.context.user_id = fake.USER_ID self.project_id = fake.PROJECT3_ID self.context.project_id = self.project_id self.volume_api = volume_api.API() self.user_context = context.RequestContext( user_id=fake.USER_ID, project_id=fake.PROJECT3_ID) def test_attachment_create_no_connector(self): """Test attachment_create no connector.""" volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) aref = self.volume_api.attachment_create(self.context, vref, fake.UUID2) self.assertEqual(fake.UUID2, aref.instance_uuid) self.assertIsNone(aref.attach_time) self.assertEqual('reserved', aref.attach_status) self.assertEqual('null', aref.attach_mode) self.assertEqual(vref.id, aref.volume_id) self.assertEqual({}, aref.connection_info) @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update') def test_attachment_create_with_connector(self, mock_rpc_attachment_update): """Test attachment_create with connector.""" volume_params = {'status': 'available'} connection_info = {'fake_key': 'fake_value', 'fake_key2': ['fake_value1', 'fake_value2']} mock_rpc_attachment_update.return_value = connection_info vref = tests_utils.create_volume(self.context, **volume_params) connector = {'fake': 'connector'} attachment = self.volume_api.attachment_create(self.context, vref, fake.UUID2, connector) mock_rpc_attachment_update.assert_called_once_with(self.context, mock.ANY, connector, mock.ANY) new_attachment = objects.VolumeAttachment.get_by_id(self.context, attachment.id) self.assertEqual(connection_info, new_attachment.connection_info) @mock.patch.object(volume_api.API, 'attachment_deletion_allowed') @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete') def test_attachment_delete_reserved(self, mock_rpc_attachment_delete, mock_allowed): """Test attachment_delete with reserved.""" mock_allowed.return_value = None volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) aref = self.volume_api.attachment_create(self.context, vref, fake.UUID2) aobj = objects.VolumeAttachment.get_by_id(self.context, aref.id) self.assertEqual('reserved', aref.attach_status) self.assertEqual(vref.id, aref.volume_id) self.volume_api.attachment_delete(self.context, aobj) mock_allowed.assert_called_once_with(self.context, aobj) # Since it's just reserved and never finalized, we should never make an # rpc call mock_rpc_attachment_delete.assert_not_called() @mock.patch.object(volume_api.API, 'attachment_deletion_allowed') @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete') @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update') def test_attachment_create_update_and_delete( self, mock_rpc_attachment_update, mock_rpc_attachment_delete, mock_allowed): """Test attachment_delete.""" mock_allowed.return_value = None volume_params = {'status': 'available'} connection_info = {'fake_key': 'fake_value', 'fake_key2': ['fake_value1', 'fake_value2']} mock_rpc_attachment_update.return_value = connection_info vref = tests_utils.create_volume(self.context, **volume_params) aref = self.volume_api.attachment_create(self.context, vref, fake.UUID2) aref = objects.VolumeAttachment.get_by_id(self.context, aref.id) vref = objects.Volume.get_by_id(self.context, vref.id) connector = {'fake': 'connector', 'host': 'somehost'} self.volume_api.attachment_update(self.context, aref, connector) aref = objects.VolumeAttachment.get_by_id(self.context, aref.id) self.assertEqual(connection_info, aref.connection_info) # We mock the actual call that updates the status # so force it here values = {'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'attached', 'instance_uuid': fake.UUID2} aref = db.volume_attach(self.context, values) aref = objects.VolumeAttachment.get_by_id(self.context, aref.id) self.assertEqual(vref.id, aref.volume_id) self.volume_api.attachment_delete(self.context, aref) mock_allowed.assert_called_once_with(self.context, aref) mock_rpc_attachment_delete.assert_called_once_with(self.context, aref.id, mock.ANY) def test_additional_attachment_create_no_connector(self): """Test attachment_create no connector.""" volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) aref = self.volume_api.attachment_create(self.context, vref, fake.UUID2) self.assertEqual(fake.UUID2, aref.instance_uuid) self.assertIsNone(aref.attach_time) self.assertEqual('reserved', aref.attach_status) self.assertEqual('null', aref.attach_mode) self.assertEqual(vref.id, aref.volume_id) self.assertEqual({}, aref.connection_info) self.assertRaises(exception.InvalidVolume, self.volume_api.attachment_create, self.context, vref, fake.UUID1) self.volume_api.attachment_create(self.context, vref, fake.UUID2) vref = objects.Volume.get_by_id(self.context, vref.id) self.assertEqual(2, len(vref.volume_attachment)) @mock.patch.object(volume_api.API, 'attachment_deletion_allowed') @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update') def test_attachment_create_reserve_delete( self, mock_rpc_attachment_update, mock_allowed): mock_allowed.return_value = None volume_params = {'status': 'available'} connector = { "initiator": "iqn.1993-08.org.debian:01:cad181614cec", "ip": "192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": False} connection_info = {'fake_key': 'fake_value', 'fake_key2': ['fake_value1', 'fake_value2']} mock_rpc_attachment_update.return_value = connection_info vref = tests_utils.create_volume(self.context, **volume_params) aref = self.volume_api.attachment_create(self.context, vref, fake.UUID2, connector=connector) vref = objects.Volume.get_by_id(self.context, vref.id) # Need to set the status here because our mock isn't doing it for us vref.status = 'in-use' vref.save() # Now a second attachment acting as a reserve self.volume_api.attachment_create(self.context, vref, fake.UUID2) # We should now be able to delete the original attachment that gave us # 'in-use' status, and in turn we should revert to the outstanding # attachments reserve self.volume_api.attachment_delete(self.context, aref) mock_allowed.assert_called_once_with(self.context, aref) vref = objects.Volume.get_by_id(self.context, vref.id) self.assertEqual('reserved', vref.status) @mock.patch.object(volume_api.API, 'attachment_deletion_allowed') def test_reserve_reserve_delete(self, mock_allowed): """Test that we keep reserved status across multiple reserves.""" mock_allowed.return_value = None volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) aref = self.volume_api.attachment_create(self.context, vref, fake.UUID2) vref = objects.Volume.get_by_id(self.context, vref.id) self.assertEqual('reserved', vref.status) self.volume_api.attachment_create(self.context, vref, fake.UUID2) vref = objects.Volume.get_by_id(self.context, vref.id) self.assertEqual('reserved', vref.status) self.volume_api.attachment_delete(self.context, aref) mock_allowed.assert_called_once_with(self.context, aref) vref = objects.Volume.get_by_id(self.context, vref.id) self.assertEqual('reserved', vref.status) self.assertEqual(1, len(vref.volume_attachment)) def test_attachment_create_readonly_volume(self): """Test attachment_create on a readonly volume.""" volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) self.volume_api.update_readonly_flag(self.context, vref, True) aref = self.volume_api.attachment_create(self.context, vref, fake.UUID2) self.assertEqual(fake.UUID2, aref.instance_uuid) self.assertIsNone(aref.attach_time) self.assertEqual('reserved', aref.attach_status) self.assertEqual('ro', aref.attach_mode) self.assertEqual(vref.id, aref.volume_id) self.assertEqual({}, aref.connection_info) def test_attachment_create_volume_in_error_state(self): """Test attachment_create volume in error state.""" volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) vref.status = "error" self.assertRaises(exception.InvalidVolume, self.volume_api.attachment_create, self.context, vref, fake.UUID2) def test_attachment_update_volume_in_error_state(self): """Test attachment_update volumem in error state.""" volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) aref = self.volume_api.attachment_create(self.context, vref, fake.UUID2) self.assertEqual(fake.UUID2, aref.instance_uuid) self.assertIsNone(aref.attach_time) self.assertEqual('reserved', aref.attach_status) self.assertEqual(vref.id, aref.volume_id) self.assertEqual({}, aref.connection_info) vref.status = 'error' vref.save() connector = {'fake': 'connector', 'host': 'somehost'} caught_exc = self.assertRaises( exception.ResourceConflict, self.volume_api.attachment_update, self.context, aref, connector) self.assertEqual(409, caught_exc.code) @mock.patch('cinder.db.sqlalchemy.api.volume_attachment_update', return_value={}) @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update', return_value={}) @mock.patch.object(db.sqlalchemy.api, '_volume_type_get', v2_fakes.fake_volume_type_get) def test_attachment_update_duplicate(self, mock_va_update, mock_db_upd): volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, deleted=0, **volume_params) tests_utils.attach_volume(self.context, vref.id, fake.UUID1, 'somehost', 'somemountpoint') # Update volume with another attachment tests_utils.attach_volume(self.context, vref.id, fake.UUID2, 'somehost2', 'somemountpoint2') vref.refresh() # This attachment will collide with the first connector = {'host': 'somehost'} vref.volume_attachment[0]['connector'] = {'host': 'somehost'} vref.volume_attachment[0]['connection_info'] = {'c': 'd'} with mock.patch('cinder.objects.Volume.get_by_id', return_value=vref): with mock.patch.object(self.volume_api.volume_rpcapi, 'attachment_update') as m_au: caught_exc = self.assertRaises( exception.ResourceConflict, self.volume_api.attachment_update, self.context, vref.volume_attachment[1], connector) self.assertEqual(409, caught_exc.code) m_au.assert_not_called() mock_va_update.assert_not_called() mock_db_upd.assert_not_called() def test_attachment_create_creating_volume(self): """Test attachment_create on a creating volume.""" volume_params = {'status': 'creating'} vref = tests_utils.create_volume(self.context, **volume_params) self.assertRaises(exception.InvalidVolume, self.volume_api.attachment_create, self.context, vref, fake.UUID1) def _get_attachment(self, with_instance_id=True): volume = fake_volume.fake_volume_obj(self.context, id=fake.VOLUME_ID) volume.volume_attachment = objects.VolumeAttachmentList() attachment = fake_volume.volume_attachment_ovo( self.context, volume_id=fake.VOLUME_ID, instance_uuid=fake.INSTANCE_ID if with_instance_id else None, connection_info='{"a": 1}') attachment.volume = volume return attachment @mock.patch('cinder.compute.nova.API.get_server_volume') def test_attachment_deletion_allowed_service_call(self, mock_get_server): """Service calls are never redirected.""" self.context.service_roles = ['reader', 'service'] attachment = self._get_attachment() self.volume_api.attachment_deletion_allowed(self.context, attachment) mock_get_server.assert_not_called() @mock.patch('cinder.compute.nova.API.get_server_volume') def test_attachment_deletion_allowed_service_call_different_service_name( self, mock_get_server): """Service calls are never redirected and role can be different. In this test we support 2 different service roles, the standard service and a custom one called captain_awesome, and passing the custom one works as expected. """ self.override_config('service_token_roles', ['service', 'captain_awesome'], group='keystone_authtoken') self.context.service_roles = ['reader', 'captain_awesome'] attachment = self._get_attachment() self.volume_api.attachment_deletion_allowed(self.context, attachment) mock_get_server.assert_not_called() @mock.patch('cinder.compute.nova.API.get_server_volume') def test_attachment_deletion_allowed_no_instance(self, mock_get_server): """Attachments with no instance id are never redirected.""" attachment = self._get_attachment(with_instance_id=False) self.volume_api.attachment_deletion_allowed(self.context, attachment) mock_get_server.assert_not_called() @mock.patch('cinder.compute.nova.API.get_server_volume') def test_attachment_deletion_allowed_no_conn_info(self, mock_get_server): """Attachments with no connection information are never redirected.""" attachment = self._get_attachment(with_instance_id=False) attachment.connection_info = None self.volume_api.attachment_deletion_allowed(self.context, attachment) mock_get_server.assert_not_called() def test_attachment_deletion_allowed_no_attachment(self): """For users don't allow operation with no attachment reference.""" self.assertRaises(exception.ConflictNovaUsingAttachment, self.volume_api.attachment_deletion_allowed, self.context, None) @mock.patch('cinder.objects.VolumeAttachment.get_by_id', side_effect=exception.VolumeAttachmentNotFound(filter='')) def test_attachment_deletion_allowed_attachment_id_not_found(self, mock_get): """For users don't allow if attachment cannot be found.""" attachment = self._get_attachment(with_instance_id=False) attachment.connection_info = None self.assertRaises(exception.ConflictNovaUsingAttachment, self.volume_api.attachment_deletion_allowed, self.context, fake.ATTACHMENT_ID) mock_get.assert_called_once_with(self.context, fake.ATTACHMENT_ID) def test_attachment_deletion_allowed_volume_no_attachments(self): """For users allow if volume has no attachments.""" volume = tests_utils.create_volume(self.context) self.volume_api.attachment_deletion_allowed(self.context, None, volume) def test_attachment_deletion_allowed_multiple_attachment(self): """For users don't allow if volume has multiple attachments.""" attachment = self._get_attachment() volume = attachment.volume volume.volume_attachment = objects.VolumeAttachmentList( objects=[attachment, attachment]) self.assertRaises(exception.ConflictNovaUsingAttachment, self.volume_api.attachment_deletion_allowed, self.context, None, volume) @mock.patch('cinder.compute.nova.API.get_server_volume') def test_attachment_deletion_allowed_vm_not_found(self, mock_get_server): """Don't reject if instance doesn't exist""" mock_get_server.side_effect = nova.API.NotFound(404) attachment = self._get_attachment() self.volume_api.attachment_deletion_allowed(self.context, attachment) mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID, fake.VOLUME_ID) @mock.patch('cinder.compute.nova.API.get_server_volume') def test_attachment_deletion_allowed_attachment_from_volume( self, mock_get_server): """Don't reject if instance doesn't exist""" mock_get_server.side_effect = nova.API.NotFound(404) attachment = self._get_attachment() volume = attachment.volume volume.volume_attachment = objects.VolumeAttachmentList( objects=[attachment]) self.volume_api.attachment_deletion_allowed(self.context, None, volume) mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID, volume.id) @mock.patch('cinder.objects.VolumeAttachment.get_by_id') def test_attachment_deletion_allowed_mismatched_volume_and_attach_id( self, mock_get_attatchment): """Reject if volume and attachment don't match.""" attachment = self._get_attachment() volume = attachment.volume volume.volume_attachment = objects.VolumeAttachmentList( objects=[attachment]) attachment2 = self._get_attachment() attachment2.volume_id = attachment.volume.id = fake.VOLUME2_ID self.assertRaises(exception.InvalidInput, self.volume_api.attachment_deletion_allowed, self.context, attachment2.id, volume) mock_get_attatchment.assert_called_once_with(self.context, attachment2.id) @mock.patch('cinder.objects.VolumeAttachment.get_by_id') @mock.patch('cinder.compute.nova.API.get_server_volume') def test_attachment_deletion_allowed_not_found_attachment_id( self, mock_get_server, mock_get_attachment): """Don't reject if instance doesn't exist""" mock_get_server.side_effect = nova.API.NotFound(404) mock_get_attachment.return_value = self._get_attachment() self.volume_api.attachment_deletion_allowed(self.context, fake.ATTACHMENT_ID) mock_get_attachment.assert_called_once_with(self.context, fake.ATTACHMENT_ID) mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID, fake.VOLUME_ID) @mock.patch('cinder.compute.nova.API.get_server_volume') def test_attachment_deletion_allowed_mismatch_id(self, mock_get_server): """Don't reject if attachment id on nova doesn't match""" mock_get_server.return_value.attachment_id = fake.ATTACHMENT2_ID attachment = self._get_attachment() self.volume_api.attachment_deletion_allowed(self.context, attachment) mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID, fake.VOLUME_ID) @mock.patch('cinder.compute.nova.API.get_server_volume') def test_attachment_deletion_allowed_user_call_fails(self, mock_get_server): """Fail user calls""" attachment = self._get_attachment() mock_get_server.return_value.attachment_id = attachment.id self.assertRaises(exception.ConflictNovaUsingAttachment, self.volume_api.attachment_deletion_allowed, self.context, attachment) mock_get_server.assert_called_once_with(self.context, fake.INSTANCE_ID, fake.VOLUME_ID) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/attachments/test_attachments_manager.py0000664000175000017500000003227600000000000026417 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from oslo_utils import importutils from cinder import context from cinder import db from cinder.objects import fields from cinder.objects import volume_attachment from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as tests_utils from cinder.volume import configuration as conf CONF = cfg.CONF @ddt.ddt class AttachmentManagerTestCase(test.TestCase): """Attachment related test for volume.manager.py.""" def setUp(self): """Setup test class.""" super(AttachmentManagerTestCase, self).setUp() self.manager = importutils.import_object(CONF.volume_manager) self.mock_object(self.manager, '_driver_shares_targets', return_value=False) self.configuration = mock.Mock(conf.Configuration) self.context = context.get_admin_context() self.context.user_id = fake.USER_ID self.project_id = fake.PROJECT3_ID self.context.project_id = self.project_id self.manager.driver.set_initialized() self.manager.stats = {'allocated_capacity_gb': 100, 'pools': {}} @ddt.data(False, True) @mock.patch.object(db.sqlalchemy.api, '_volume_type_get', v2_fakes.fake_volume_type_get) @mock.patch('cinder.db.sqlalchemy.api.volume_type_qos_specs_get') @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs') def test_attachment_update(self, enforce_mpath, get_extra_specs, mock_type_get): """Test attachment_update.""" volume_params = {'status': 'available'} connector = { "initiator": "iqn.1993-08.org.debian:01:cad181614cec", "ip": "192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": False, "enforce_multipath": enforce_mpath} vref = tests_utils.create_volume(self.context, **volume_params) self.manager.create_volume(self.context, vref) values = {'volume_id': vref.id, 'attached_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1, 'attach_mode': 'rw'} attachment_ref = db.volume_attach(self.context, values) with mock.patch.object( self.manager, '_notify_about_volume_usage'): expected = { 'encrypted': False, 'qos_specs': None, 'cacheable': False, 'access_mode': 'rw', 'driver_volume_type': 'iscsi', 'attachment_id': attachment_ref.id, 'enforce_multipath': enforce_mpath} get_extra_specs.return_value = {} self.assertEqual(expected, self.manager.attachment_update( self.context, vref, connector, attachment_ref.id)) expected = { 'encrypted': False, 'qos_specs': None, 'cacheable': True, 'access_mode': 'rw', 'driver_volume_type': 'iscsi', 'attachment_id': attachment_ref.id, 'enforce_multipath': enforce_mpath} get_extra_specs.return_value = {'cacheable': ' True'} self.assertEqual(expected, self.manager.attachment_update( self.context, vref, connector, attachment_ref.id)) new_attachment_ref = db.volume_attachment_get(self.context, attachment_ref.id) self.assertEqual(attachment_ref.instance_uuid, new_attachment_ref['instance_uuid']) self.assertEqual(connector['host'], new_attachment_ref['attached_host']) self.assertEqual('na', new_attachment_ref['mountpoint']) self.assertEqual('rw', new_attachment_ref['attach_mode']) new_volume_ref = db.volume_get(self.context, vref.id) self.assertEqual('attaching', new_volume_ref.status) self.assertEqual(fields.VolumeAttachStatus.ATTACHING, new_volume_ref.attach_status) def test_attachment_delete(self): """Test attachment_delete.""" volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) self.manager.create_volume(self.context, vref) values = {'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1} attachment_ref = db.volume_attach(self.context, values) attachment_ref = db.volume_attachment_get( self.context, attachment_ref['id']) vref.refresh() expected_status = (vref.status, vref.attach_status, attachment_ref.attach_status) self.manager.attachment_delete(self.context, attachment_ref['id'], vref) # Manager doesn't change the resource status. It is changed on the API attachment_ref = db.volume_attachment_get(self.context, attachment_ref.id) vref.refresh() self.assertEqual( expected_status, (vref.status, vref.attach_status, attachment_ref.attach_status)) def test_attachment_delete_remove_export_fail(self): """attachment_delete removes attachment on remove_export failure.""" self.mock_object(self.manager.driver, 'remove_export', side_effect=Exception) # Report that the connection is not shared self.mock_object(self.manager, '_connection_terminate', return_value=False) vref = tests_utils.create_volume(self.context, status='in-use', attach_status='attached') values = {'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': fake.UUID1} attach = db.volume_attach(self.context, values) # Confirm the volume OVO has the attachment before the deletion vref.refresh() expected_vol_status = (vref.status, vref.attach_status) self.assertEqual(1, len(vref.volume_attachment)) self.manager.attachment_delete(self.context, attach.id, vref) # Manager doesn't change the resource status. It is changed on the API attachment = db.volume_attachment_get(self.context, attach.id) self.assertEqual(attach.attach_status, attachment.attach_status) vref = db.volume_get(self.context, vref.id) self.assertEqual(expected_vol_status, (vref.status, vref.attach_status)) def test_attachment_delete_multiple_attachments(self): volume_params = {'status': 'available'} vref = tests_utils.create_volume(self.context, **volume_params) attachment1 = volume_attachment.VolumeAttachment() attachment2 = volume_attachment.VolumeAttachment() attachment1.id = fake.UUID1 attachment2.id = fake.UUID2 @mock.patch('cinder.objects.VolumeAttachment.get_by_id', side_effect=[attachment1, attachment2]) @mock.patch.object(self.context, 'elevated') @mock.patch.object(self.manager, '_notify_about_volume_usage') @mock.patch.object(self.manager, '_connection_terminate') @mock.patch.object(self.manager.driver, 'remove_export') def _test(mock_rm_export, mock_con_term, mock_notify, mock_elevated, mock_get_attachment): mock_elevated.return_value = self.context mock_con_term.return_value = False # test single attachment. This should call # detach and remove_export vref.volume_attachment.objects.append(attachment1) self.manager.attachment_delete(self.context, attachment1.id, vref) mock_elevated.assert_called_once_with() mock_notify.assert_called_once_with(self.context, vref, "detach.start") mock_con_term.assert_called_once_with(self.context, vref, attachment1) mock_rm_export.assert_called_once_with(self.context, vref) # test more than 1 attachment. This should skip # detach and remove_export mock_con_term.return_value = True vref.volume_attachment.objects.append(attachment2) mock_elevated.reset_mock() mock_notify.reset_mock() mock_con_term.reset_mock() mock_rm_export.reset_mock() self.manager.attachment_delete(self.context, attachment2.id, vref) mock_elevated.assert_not_called() mock_notify.assert_called_once_with(self.context, vref, "detach.start") mock_con_term.assert_called_once_with(self.context, vref, attachment2) mock_rm_export.assert_not_called() _test() def test_connection_terminate_no_connector_force_false(self): # Tests that calling _connection_terminate with an attachment that # does not have a connector will not call the driver and return None # if the force flag is False. attachment = mock.MagicMock(connector={}) with mock.patch.object(self.manager.driver, '_initialized', create=True, new=True): with mock.patch.object(self.manager.driver, 'terminate_connection') as term_conn: has_shared_connection = self.manager._connection_terminate( self.context, mock.sentinel.volume, attachment) self.assertIsNone(has_shared_connection) term_conn.assert_not_called() def test_connection_terminate_no_connector_force_true(self): # Tests that calling _connection_terminate with an attachment that # does not have a connector will call the driver when force is True. volume = mock.MagicMock() attachment = mock.MagicMock(connector={}) with mock.patch.object(self.manager.driver, '_initialized', create=True, new=True): with mock.patch.object(self.manager.driver, 'terminate_connection') as term_conn: has_shared_connection = self.manager._connection_terminate( self.context, volume, attachment, force=True) self.assertFalse(has_shared_connection) term_conn.assert_called_once_with(volume, {}, force=True) @mock.patch('cinder.objects.VolumeAttachment.get_by_id') @mock.patch('cinder.volume.manager.VolumeManager.' '_notify_about_volume_usage') @mock.patch('cinder.volume.manager.VolumeManager._connection_terminate', return_value=None) @mock.patch('cinder.db.volume_detached') @mock.patch('cinder.db.volume_admin_metadata_delete') def test_attachment_delete_none_shared_connection(self, mock_meta_del, mock_vol_detached, mock_conn_term, mock_notify, mock_get_attachment): # Tests that attachment_delete does not call remove_export # if _connection_terminate returns None indicating there is nothing # to consider for the export. volume = mock.MagicMock() with mock.patch.object(self.manager.driver, '_initialized', create=True, new=True): with mock.patch.object(self.manager.driver, 'remove_export') as remove_export: self.manager.attachment_delete( self.context, mock.sentinel.attachment_id, volume) mock_get_attachment.assert_called_once_with( self.context, mock.sentinel.attachment_id) remove_export.assert_not_called() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1911192 cinder-27.0.0/cinder/tests/unit/backup/0000775000175000017500000000000000000000000017721 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/__init__.py0000664000175000017500000000000000000000000022020 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1951194 cinder-27.0.0/cinder/tests/unit/backup/drivers/0000775000175000017500000000000000000000000021377 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/drivers/__init__.py0000664000175000017500000000000000000000000023476 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/drivers/test_backup_ceph.py0000664000175000017500000022340500000000000025262 0ustar00zuulzuul00000000000000# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Ceph backup service.""" import hashlib import json import os import subprocess import tempfile import threading from unittest import mock import ddt from os_brick.initiator import linuxrbd from oslo_concurrency import processutils from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import units from cinder.backup import driver from cinder.backup.drivers import ceph from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test import cinder.volume.drivers.rbd as rbd_driver # This is used to collect raised exceptions so that tests may check what was # raised. # NOTE: this must be initialised in test setUp(). RAISED_EXCEPTIONS = [] CONF = cfg.CONF class MockException(Exception): def __init__(self, *args, **kwargs): RAISED_EXCEPTIONS.append(self.__class__) class MockImageNotFoundException(MockException): """Used as mock for rbd.ImageNotFound.""" class MockImageBusyException(MockException): """Used as mock for rbd.ImageBusy.""" class MockObjectNotFoundException(MockException): """Used as mock for rados.MockObjectNotFoundException.""" def common_mocks(f): """Decorator to set mocks common to all tests. The point of doing these mocks here is so that we don't accidentally set mocks that can't/don't get unset. """ def _common_inner_inner1(inst, *args, **kwargs): # NOTE(dosaboy): mock Popen to, by default, raise Exception in order to # ensure that any test ending up in a subprocess fails # if not properly mocked. @mock.patch('subprocess.Popen', spec=True) # NOTE(dosaboy): mock out eventlet.sleep() so that it does nothing. @mock.patch('eventlet.sleep', spec=True) @mock.patch('time.time', spec=True) # NOTE(dosaboy): set spec to empty object so that hasattr calls return # False by default. @mock.patch('cinder.backup.drivers.ceph.rbd') @mock.patch('cinder.backup.drivers.ceph.rados') def _common_inner_inner2(mock_rados, mock_rbd, mock_time, mock_sleep, mock_popen): mock_time.side_effect = inst.time_inc mock_popen.side_effect = Exception inst.mock_rados = mock_rados inst.mock_rbd = mock_rbd inst.mock_rbd.ImageBusy = MockImageBusyException inst.mock_rbd.ImageNotFound = MockImageNotFoundException inst.mock_rados.ObjectNotFound = MockObjectNotFoundException inst.service.rbd = inst.mock_rbd inst.service.rados = inst.mock_rados return f(inst, *args, **kwargs) return _common_inner_inner2() return _common_inner_inner1 @ddt.ddt class BackupCephTestCase(test.TestCase): """Test case for ceph backup driver.""" def _create_volume_db_entry(self, id, size): vol = {'id': id, 'size': size, 'status': 'available', 'volume_type_id': self.vt['id']} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, backupid, volid, size, userid=fake.USER_ID, projectid=fake.PROJECT_ID): backup = {'id': backupid, 'size': size, 'volume_id': volid, 'user_id': userid, 'project_id': projectid} return db.backup_create(self.ctxt, backup)['id'] def _create_parent_backup_object(self): tmp_backup_id = fake.BACKUP3_ID self._create_backup_db_entry(tmp_backup_id, self.volume_id, self.volume_size) tmp_backup = objects.Backup.get_by_id(self.ctxt, tmp_backup_id) tmp_backup.service_metadata = 'mock_base_name' return tmp_backup def time_inc(self): self.counter += 1 return self.counter def _get_wrapped_rbd_io(self, rbd_image): rbd_meta = linuxrbd.RBDImageMetadata(rbd_image, 'pool_foo', 'user_foo', 'conf_foo') return linuxrbd.RBDVolumeIOWrapper(rbd_meta) def _setup_mock_popen(self, retval=None, p1hook=None, p2hook=None): class MockPopen(object): hooks = [p2hook, p1hook] def __init__(mock_inst, cmd, *args, **kwargs): self.callstack.append('popen_init') mock_inst.stdout = mock.Mock() mock_inst.stdout.close = mock.Mock() mock_inst.stdout.close.side_effect = \ lambda *args: self.callstack.append('stdout_close') mock_inst.returncode = 0 hook = mock_inst.__class__.hooks.pop() if hook is not None: hook() def communicate(mock_inst): self.callstack.append('communicate') return retval def wait(mock_inst): self.callstack.append('wait') return retval subprocess.Popen.side_effect = MockPopen def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(BackupCephTestCase, self).setUp() self.ctxt = context.get_admin_context() # Create volume. self.volume_size = 1 self.volume_id = fake.VOLUME_ID self._create_volume_db_entry(self.volume_id, self.volume_size) self.volume = db.volume_get(self.ctxt, self.volume_id) # Create backup of volume. self.backup_id = fake.BACKUP_ID self._create_backup_db_entry(self.backup_id, self.volume_id, self.volume_size) self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id) self.backup.container = "backups" # Create parent backup of volume self.parent_backup = self._create_parent_backup_object() # Create alternate backup with parent self.alt_backup_id = fake.BACKUP2_ID self._create_backup_db_entry(self.alt_backup_id, self.volume_id, self.volume_size) self.alt_backup = objects.Backup.get_by_id(self.ctxt, self.alt_backup_id) base_name = "volume-%s.backup.%s" % (self.volume_id, self.backup_id) self.alt_backup.container = "backups" self.alt_backup.parent = self.backup self.alt_backup.parent.service_metadata = '{"base": "%s"}' % base_name # Create alternate volume. self.alt_volume_id = fake.VOLUME2_ID self._create_volume_db_entry(self.alt_volume_id, self.volume_size) self.alt_volume = db.volume_get(self.ctxt, self.alt_volume_id) self.chunk_size = 1024 self.num_chunks = 128 self.data_length = self.num_chunks * self.chunk_size self.checksum = hashlib.sha256() # Create a file with some data in it. self.volume_file = tempfile.NamedTemporaryFile() self.addCleanup(self.volume_file.close) for _i in range(0, self.num_chunks): data = os.urandom(self.chunk_size) self.checksum.update(data) self.volume_file.write(data) self.volume_file.seek(0) # Always trigger an exception if a command is executed since it should # always be dealt with gracefully. At time of writing on rbd # export/import-diff is executed and if they fail we expect to find # alternative means of backing up. mock_exec = mock.Mock() mock_exec.side_effect = processutils.ProcessExecutionError self.service = ceph.CephBackupDriver(self.ctxt, execute=mock_exec) # Ensure that time.time() always returns more than the last time it was # called to avoid div by zero errors. self.counter = float(0) self.callstack = [] @common_mocks def test_get_rbd_support(self): del self.service.rbd.RBD_FEATURE_LAYERING del self.service.rbd.RBD_FEATURE_STRIPINGV2 del self.service.rbd.RBD_FEATURE_EXCLUSIVE_LOCK del self.service.rbd.RBD_FEATURE_JOURNALING del self.service.rbd.RBD_FEATURE_OBJECT_MAP del self.service.rbd.RBD_FEATURE_FAST_DIFF self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_LAYERING')) self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_STRIPINGV2')) self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_EXCLUSIVE_LOCK')) self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_JOURNALING')) self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_OBJECT_MAP')) self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_FAST_DIFF')) oldformat, features = self.service._get_rbd_support() self.assertTrue(oldformat) self.assertEqual(0, features) self.service.rbd.RBD_FEATURE_LAYERING = 1 oldformat, features = self.service._get_rbd_support() self.assertFalse(oldformat) self.assertEqual(1, features) self.service.rbd.RBD_FEATURE_STRIPINGV2 = 2 oldformat, features = self.service._get_rbd_support() self.assertFalse(oldformat) self.assertEqual(1 | 2, features) # initially, backup_ceph_image_journals = False. test that # the flags are defined, but that they are not returned. self.service.rbd.RBD_FEATURE_EXCLUSIVE_LOCK = 4 oldformat, features = self.service._get_rbd_support() self.assertFalse(oldformat) self.assertEqual(1 | 2, features) self.service.rbd.RBD_FEATURE_JOURNALING = 64 oldformat, features = self.service._get_rbd_support() self.assertFalse(oldformat) self.assertEqual(1 | 2, features) # test that the config setting properly sets the FEATURE bits. # because journaling requires exclusive-lock, these are set # at the same time. CONF.set_override("backup_ceph_image_journals", True) oldformat, features = self.service._get_rbd_support() self.assertFalse(oldformat) self.assertEqual(1 | 2 | 4 | 64, features) # # test that FAST_DIFF is enabled if supported by RBD # this also enables OBJECT_MAP as required by Ceph # self.service.rbd.RBD_FEATURE_OBJECT_MAP = 8 self.service.rbd.RBD_FEATURE_FAST_DIFF = 16 oldformat, features = self.service._get_rbd_support() self.assertFalse(oldformat) self.assertEqual(1 | 2 | 4 | 8 | 16 | 64, features) @common_mocks def test_get_backup_snap_name(self): snap_name = 'backup.%s.snap.3824923.1412' % (fake.VOLUME3_ID) def get_backup_snaps(inst, *args): return [{'name': 'backup.%s.snap.6423868.2342' % (fake.UUID1), 'backup_id': fake.BACKUP2_ID}, {'name': snap_name, 'backup_id': self.backup_id}] with mock.patch.object(self.service, 'get_backup_snaps'): name = self.service._get_backup_snap_name(self.service.rbd.Image(), 'base_foo', self.backup_id) self.assertIsNone(name) with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: mock_get_backup_snaps.side_effect = get_backup_snaps name = self.service._get_backup_snap_name(self.service.rbd.Image(), 'base_foo', self.backup_id) self.assertEqual(snap_name, name) self.assertTrue(mock_get_backup_snaps.called) @common_mocks def test_get_backup_snaps(self): image = self.mock_rbd.Image.return_value image.list_snaps.return_value = [ {'name': 'backup.%s.snap.6423868.2342' % (fake.UUID1)}, {'name': 'backup.%s.wambam.6423868.2342' % (fake.UUID2)}, {'name': 'backup.%s.snap.1321319.3235' % (fake.UUID3)}, {'name': 'bbbackup.%s.snap.1321319.3235' % (fake.UUID4)}, {'name': 'backup.%s.snap.3824923.1412' % (fake.UUID5)}] snaps = self.service.get_backup_snaps(image) self.assertEqual(3, len(snaps)) @common_mocks def test_transfer_data_from_rbd_to_file(self): def fake_read(offset, length): self.volume_file.seek(offset) return self.volume_file.read(length) self.mock_rbd.Image.return_value.read.side_effect = fake_read self.mock_rbd.Image.return_value.size.return_value = self.data_length with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) self.service._transfer_data(rbd_io, 'src_foo', test_file, 'dest_foo', self.data_length) checksum = hashlib.sha256() test_file.seek(0) for _c in range(0, self.num_chunks): checksum.update(test_file.read(self.chunk_size)) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @common_mocks def test_transfer_data_from_rbd_to_rbd(self): def fake_read(offset, length): self.volume_file.seek(offset) return self.volume_file.read(length) def mock_write_data(data, offset): checksum.update(data) test_file.write(data) rbd1 = mock.Mock() rbd1.read.side_effect = fake_read rbd1.size.return_value = os.fstat(self.volume_file.fileno()).st_size rbd2 = mock.Mock() rbd2.write.side_effect = mock_write_data with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) checksum = hashlib.sha256() src_rbd_io = self._get_wrapped_rbd_io(rbd1) dest_rbd_io = self._get_wrapped_rbd_io(rbd2) self.service._transfer_data(src_rbd_io, 'src_foo', dest_rbd_io, 'dest_foo', self.data_length) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @common_mocks def test_transfer_data_from_file_to_rbd(self): def mock_write_data(data, offset): checksum.update(data) test_file.write(data) self.mock_rbd.Image.return_value.write.side_effect = mock_write_data with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) checksum = hashlib.sha256() rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) self.service._transfer_data(self.volume_file, 'src_foo', rbd_io, 'dest_foo', self.data_length) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @common_mocks def test_transfer_data_from_file_to_file(self): with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) checksum = hashlib.sha256() self.service._transfer_data(self.volume_file, 'src_foo', test_file, 'dest_foo', self.data_length) checksum = hashlib.sha256() test_file.seek(0) for _c in range(0, self.num_chunks): checksum.update(test_file.read(self.chunk_size)) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @common_mocks def test_backup_volume_from_file(self): checksum = hashlib.sha256() thread_dict = {} def mock_write_data(data, offset): checksum.update(data) thread_dict['thread'] = threading.current_thread() test_file.write(data) self.service.rbd.Image.return_value.write.side_effect = mock_write_data with mock.patch.object(self.service, '_backup_metadata'): with mock.patch.object(self.service, '_discard_bytes'): with tempfile.NamedTemporaryFile() as test_file: self.service.backup(self.alt_backup, self.volume_file) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) self.assertTrue(self.service.rbd.Image.return_value.write.called) self.assertNotEqual(threading.current_thread(), thread_dict['thread']) @common_mocks def test_get_backup_base_name_without_backup_param(self): """Test _get_backup_base_name without backup.""" name = self.service._get_backup_base_name(self.volume_id) self.assertEqual("volume-%s.backup.base" % (self.volume_id), name) @common_mocks def test_get_backup_base_name_w_backup_and_no_parent(self): """Test _get_backup_base_name with backup and no parent.""" name = self.service._get_backup_base_name(self.volume_id, self.backup) self.assertEqual("volume-%s.backup.%s" % (self.volume_id, self.backup.id), name) @common_mocks def test_get_backup_base_name_w_backup_and_parent(self): """Test _get_backup_base_name with backup and parent.""" name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) base_name = json.loads(self.alt_backup.parent.service_metadata) self.assertEqual(base_name["base"], name) @common_mocks @mock.patch('fcntl.fcntl', spec=True) def test_backup_volume_from_rbd(self, mock_fnctl): """Test full RBD backup generated successfully.""" backup_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) def mock_write_data(): self.volume_file.seek(0) data = self.volume_file.read(self.data_length) self.callstack.append('write') checksum.update(data) test_file.write(data) def mock_read_data(): self.callstack.append('read') return self.volume_file.read(self.data_length) self._setup_mock_popen(['out', 'err'], p1hook=mock_read_data, p2hook=mock_write_data) self.mock_rbd.RBD.list = mock.Mock() self.mock_rbd.RBD.list.return_value = [backup_name] with mock.patch.object(self.service, '_backup_metadata'): with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: with mock.patch.object(self.service, '_full_backup') as \ mock_full_backup: with mock.patch.object(self.service, '_try_delete_base_image'): with tempfile.NamedTemporaryFile() as test_file: checksum = hashlib.sha256() image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) mock_get_backup_snaps.return_value = ( [{'name': 'backup.mock.snap.153464362.12'}, {'name': 'backup.mock.snap.15341241.90'}, {'name': 'backup.mock.snap.199994362.10'}]) output = self.service.backup(self.alt_backup, rbdio) base_name = '{"base": "%s"}' % backup_name service_meta = {'service_metadata': base_name} self.assertDictEqual(service_meta, output) self.assertEqual(['popen_init', 'read', 'popen_init', 'write', 'stdout_close', 'communicate', 'wait'], self.callstack) self.assertFalse(mock_full_backup.called) self.assertFalse(mock_get_backup_snaps.called) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) @common_mocks def test_backup_snapshot_lifecycle(self): with mock.patch.object(self.service, '_rbd_diff_transfer'), \ mock.patch.object(self.service, "get_backup_snaps") \ as mock_get_backup_snaps: CONF.set_override('backup_ceph_max_snapshots', 1) mocked_snaps = [ {'name': 'backup.mock.snap.153464362.12'}, {'name': 'backup.mock.snap.225341241.90'}, {'name': 'backup.mock.snap.399994362.10'}] mock_get_backup_snaps.return_value = mocked_snaps self.mock_rbd.RBD.remove_snap = mock.Mock() image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) rbdio.seek(0) self.service._backup_rbd(self.backup, rbdio, self.volume.name, self.volume.size) self.assertEqual(2, self.mock_rbd.Image.return_value. remove_snap.call_count) expected_calls = [mock.call('backup.mock.snap.153464362.12'), mock.call('backup.mock.snap.225341241.90')] self.mock_rbd.Image.return_value.remove_snap.\ assert_has_calls(expected_calls) @common_mocks def test_backup_volume_from_rbd_set_parent_id(self): with mock.patch.object(self.service, '_backup_rbd') as \ mock_backup_rbd, mock.patch.object(self.service, '_backup_metadata'): mock_backup_rbd.return_value = {'service_metadata': 'base_name'} image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) output = self.service.backup(self.backup, rbdio) self.assertDictEqual({'service_metadata': 'base_name'}, output) @common_mocks def test_backup_volume_from_rbd_got_exception(self): base_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) self.mock_rbd.RBD().list.return_value = [base_name] with mock.patch.object(self.service, 'get_backup_snaps'), \ mock.patch.object(self.service, '_rbd_diff_transfer') as \ mock_rbd_diff_transfer: def mock_rbd_diff_transfer_side_effect(src_name, src_pool, dest_name, dest_pool, src_user, src_conf, dest_user, dest_conf, src_snap, from_snap): raise exception.BackupRBDOperationFailed(_('mock')) # Raise a pseudo exception.BackupRBDOperationFailed. mock_rbd_diff_transfer.side_effect \ = mock_rbd_diff_transfer_side_effect with mock.patch.object(self.service, '_full_backup'), \ mock.patch.object(self.service, '_try_delete_base_image'): with mock.patch.object(self.service, '_backup_metadata'): with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) mock_get_backup_snaps.return_value = ( [{'name': 'backup.mock.snap.153464362.12', 'backup_id': 'mock_parent_id'}, {'name': 'backup.mock.snap.199994362.10', 'backup_id': 'mock'}]) self.assertRaises(exception.BackupRBDOperationFailed, self.service.backup, self.alt_backup, rbdio) @common_mocks def test_backup_rbd_set_parent_id(self): base_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) vol_name = self.volume.name vol_length = self.volume.size self.mock_rbd.RBD().list.return_value = [base_name] with mock.patch.object(self.service, '_snap_exists'), \ mock.patch.object(self.service, '_get_backup_snap_name') as \ mock_get_backup_snap_name, \ mock.patch.object(self.service, '_rbd_diff_transfer'): image = self.service.rbd.Image() mock_get_backup_snap_name.return_value = 'mock_snap_name' meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) rbdio.seek(0) output = self.service._backup_rbd(self.alt_backup, rbdio, vol_name, vol_length) base_name = '{"base": "%s"}' % base_name self.assertEqual({'service_metadata': base_name}, output) self.backup.parent_id = None @common_mocks def test_backup_rbd_without_parent_id(self): full_backup_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) vol_name = self.volume.name vol_length = self.volume.size with mock.patch.object(self.service, '_rbd_diff_transfer'), \ mock.patch.object(self.service, '_create_base_image') as \ mock_create_base_image, mock.patch.object( rbd_driver, 'RADOSClient') as mock_rados_client: client = mock.Mock() mock_rados_client.return_value.__enter__.return_value = client image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) rbdio.seek(0) output = self.service._backup_rbd(self.alt_backup, rbdio, vol_name, vol_length) mock_create_base_image.assert_called_with(full_backup_name, vol_length, client) base_name = '{"base": "%s"}' % full_backup_name self.assertEqual({'service_metadata': base_name}, output) @common_mocks @mock.patch('fcntl.fcntl', spec=True) def test_backup_volume_from_rbd_fail(self, mock_fnctl): """Test of when an exception occurs in an exception handler. In _backup_rbd(), after an exception.BackupRBDOperationFailed occurs in self._rbd_diff_transfer(), we want to check the process when the second exception occurs in self._try_delete_base_image(). """ backup_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) def mock_write_data(): self.volume_file.seek(0) data = self.volume_file.read(self.data_length) self.callstack.append('write') checksum.update(data) test_file.write(data) def mock_read_data(): self.callstack.append('read') return self.volume_file.read(self.data_length) self._setup_mock_popen(['out', 'err'], p1hook=mock_read_data, p2hook=mock_write_data) self.mock_rbd.RBD.list = mock.Mock() self.mock_rbd.RBD.list.return_value = [backup_name] with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: mock_get_backup_snaps.return_value = ( [{'name': 'backup.mock.snap.153464362.12'}, {'name': 'backup.mock.snap.199994362.10'}]) with mock.patch.object(self.service, '_rbd_diff_transfer') as \ mock_rbd_diff_transfer: def mock_rbd_diff_transfer_side_effect(src_name, src_pool, dest_name, dest_pool, src_user, src_conf, dest_user, dest_conf, src_snap, from_snap): raise exception.BackupRBDOperationFailed(_('mock')) # Raise a pseudo exception.BackupRBDOperationFailed. mock_rbd_diff_transfer.side_effect \ = mock_rbd_diff_transfer_side_effect with mock.patch.object(self.service, '_full_backup'), \ mock.patch.object(self.service, '_try_delete_base_image') as \ mock_try_delete_base_image: def mock_try_delete_base_image_side_effect(backup_id, base_name): raise self.service.rbd.ImageNotFound(_('mock')) # Raise a pesudo exception rbd.ImageNotFound. mock_try_delete_base_image.side_effect \ = mock_try_delete_base_image_side_effect with mock.patch.object(self.service, '_backup_metadata'): with tempfile.NamedTemporaryFile() as test_file: checksum = hashlib.sha256() image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) # We expect that the second exception is # notified. self.assertRaises( self.service.rbd.ImageNotFound, self.service.backup, self.alt_backup, rbdio) @common_mocks @mock.patch('fcntl.fcntl', spec=True) def test_backup_volume_from_rbd_fail2(self, mock_fnctl): """Test of when an exception occurs in an exception handler. In backup(), after an exception.BackupOperationError occurs in self._backup_metadata(), we want to check the process when the second exception occurs in self.delete_backup(). """ backup_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) def mock_write_data(): self.volume_file.seek(0) data = self.volume_file.read(self.data_length) self.callstack.append('write') checksum.update(data) test_file.write(data) def mock_read_data(): self.callstack.append('read') return self.volume_file.read(self.data_length) self._setup_mock_popen(['out', 'err'], p1hook=mock_read_data, p2hook=mock_write_data) self.mock_rbd.RBD.list = mock.Mock() self.mock_rbd.RBD.list.return_value = [backup_name] with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: mock_get_backup_snaps.return_value = ( [{'name': 'backup.mock.snap.153464362.12'}, {'name': 'backup.mock.snap.199994362.10'}]) with mock.patch.object(self.service, '_rbd_diff_transfer'), \ mock.patch.object(self.service, '_full_backup'), \ mock.patch.object(self.service, '_backup_metadata') as \ mock_backup_metadata: def mock_backup_metadata_side_effect(backup): raise exception.BackupOperationError(_('mock')) # Raise a pseudo exception.BackupOperationError. mock_backup_metadata.side_effect = ( mock_backup_metadata_side_effect) with mock.patch.object(self.service, 'delete_backup') as \ mock_delete: def mock_delete_side_effect(backup): raise self.service.rbd.ImageBusy() # Raise a pseudo exception rbd.ImageBusy. mock_delete.side_effect = mock_delete_side_effect with tempfile.NamedTemporaryFile() as test_file: checksum = hashlib.sha256() image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) # We expect that the second exception is # notified. self.assertRaises( self.service.rbd.ImageBusy, self.service.backup, self.alt_backup, rbdio) @common_mocks def test_backup_rbd_from_snap(self): backup_name = self.service._get_backup_base_name(self.volume_id) vol_name = self.volume['name'] vol_length = self.service._get_volume_size_bytes(self.volume) self.mock_rbd.RBD().list = mock.Mock() self.mock_rbd.RBD().list.return_value = ['mock'] with mock.patch.object(self.service, '_get_new_snap_name') as \ mock_get_new_snap_name: with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: with mock.patch.object(self.service, '_rbd_diff_transfer') as \ mock_rbd_diff_transfer: with mock.patch.object(self.service, '_get_backup_base_name') as \ mock_get_backup_base_name: mock_get_backup_base_name.return_value = ( backup_name) mock_get_backup_snaps.return_value = ( [{'name': 'backup.mock.snap.153464362.12'}, {'name': 'backup.mock.snap.15341241.90'}, {'name': 'backup.mock.snap.199994362.10'}]) mock_get_new_snap_name.return_value = 'new_snap' image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) rbdio.seek(0) self.service._backup_rbd(self.backup, rbdio, vol_name, vol_length) mock_rbd_diff_transfer.assert_called_with( vol_name, 'pool_foo', backup_name, self.backup.container, src_user='user_foo', src_conf='conf_foo', dest_conf='/etc/ceph/ceph.conf', dest_user='cinder', src_snap='new_snap', from_snap=None) @common_mocks def test_backup_rbd_from_snap2(self): base_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) vol_name = self.volume['name'] vol_length = self.service._get_volume_size_bytes(self.volume) self.mock_rbd.RBD().list = mock.Mock() self.mock_rbd.RBD().list.return_value = [base_name] with mock.patch.object(self.service, '_get_backup_base_name') as \ mock_get_backup_base_name: with mock.patch.object(self.service, '_rbd_diff_transfer') as \ mock_rbd_diff_transfer: with mock.patch.object(self.service, '_get_new_snap_name') as \ mock_get_new_snap_name: mock_get_backup_base_name.return_value = base_name mock_get_new_snap_name.return_value = 'new_snap' image = self.service.rbd.Image() meta = linuxrbd.RBDImageMetadata(image, 'pool_foo', 'user_foo', 'conf_foo') rbdio = linuxrbd.RBDVolumeIOWrapper(meta) rbdio.seek(0) self.service._backup_rbd(self.alt_backup, rbdio, vol_name, vol_length) mock_rbd_diff_transfer.assert_called_with( vol_name, 'pool_foo', base_name, self.backup.container, src_user='user_foo', src_conf='conf_foo', dest_conf='/etc/ceph/ceph.conf', dest_user='cinder', src_snap='new_snap', from_snap=None) @common_mocks def test_backup_vol_length_0(self): volume_id = fake.VOLUME4_ID self._create_volume_db_entry(volume_id, 0) backup_id = fake.BACKUP4_ID self._create_backup_db_entry(backup_id, volume_id, 1) backup = objects.Backup.get_by_id(self.ctxt, backup_id) self.assertRaises(exception.InvalidParameterValue, self.service.backup, backup, self.volume_file) @common_mocks def test_backup_with_container_name(self): volume_size = self.volume_size * units.Gi backup_id = fake.BACKUP4_ID self._create_backup_db_entry(backup_id, self.volume_id, 1) backup = objects.Backup.get_by_id(self.ctxt, backup_id) backup.container = "test" with mock.patch.object( self.service, '_full_backup', side_effect=exception.BackupOperationError()) as mock_full: self.assertRaises(exception.BackupOperationError, self.service.backup, backup, self.volume_file) mock_full.assert_called_once_with(backup, self.volume_file, self.volume.name, volume_size) @common_mocks def test_restore(self): backup_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) self.mock_rbd.RBD.return_value.list.return_value = [backup_name] thread_dict = {} def mock_read_data(offset, length): thread_dict['thread'] = threading.current_thread() return self.volume_file.read(self.data_length) self.mock_rbd.Image.return_value.read.side_effect = mock_read_data self.mock_rbd.Image.return_value.size.return_value = \ self.chunk_size * self.num_chunks with mock.patch.object(self.service, '_restore_metadata') as \ mock_restore_metadata: with mock.patch.object(self.service, '_discard_bytes') as \ mock_discard_bytes: with tempfile.NamedTemporaryFile() as test_file: self.volume_file.seek(0) self.service.restore(self.alt_backup, self.volume_id, test_file, False) checksum = hashlib.sha256() test_file.seek(0) for _c in range(0, self.num_chunks): checksum.update(test_file.read(self.chunk_size)) # Ensure the files are equal self.assertEqual(checksum.digest(), self.checksum.digest()) self.assertTrue(mock_restore_metadata.called) self.assertTrue(mock_discard_bytes.called) self.assertTrue(mock_discard_bytes.called) self.assertTrue(self.service.rbd.Image.return_value.read.called) self.assertNotEqual(threading.current_thread(), thread_dict['thread']) @common_mocks def test_full_restore_without_snapshot_id_nor_src_snap(self): length = 1024 volume_is_new = True src_snap = '' with tempfile.NamedTemporaryFile() as dest_file: with mock.patch.object(self.service, '_transfer_data') as mock_transfer_data, \ mock.patch.object(self.service, '_get_backup_base_name') as mock_getbasename: self.service._full_restore(self.backup, dest_file, length, volume_is_new, src_snap) mock_getbasename.assert_called_once_with(self.volume_id, backup=self.backup) mock_transfer_data.assert_called_once() @common_mocks def test_full_restore_without_snapshot_id_w_src_snap(self): length = 1024 volume_is_new = True src_snap = 'random_snap' with tempfile.NamedTemporaryFile() as dest_file: with mock.patch.object(self.service, '_transfer_data') as mock_transfer_data, \ mock.patch.object(self.service, '_get_backup_base_name') as mock_getbasename: self.service._full_restore(self.backup, dest_file, length, volume_is_new, src_snap) mock_getbasename.assert_called_once_with(self.volume_id, backup=self.backup) mock_transfer_data.assert_called_once() @common_mocks def test_full_restore_with_snapshot_id(self): length = 1024 volume_is_new = True src_snap = '' # Create alternate backup with snapshot_id backup_id = fake.BACKUP4_ID self._create_backup_db_entry(backup_id, self.volume_id, self.volume_size) backup = objects.Backup.get_by_id(self.ctxt, backup_id) backup.snapshot_id = 'random_snap_id' backup.container = "backups" backup.parent = self.backup backup.parent.service_metadata = '{"base": "random"}' with tempfile.NamedTemporaryFile() as dest_file: with mock.patch.object(self.service, '_transfer_data') as mock_transfer_data, \ mock.patch.object(self.service, '_get_backup_base_name') as mock_getbasename: self.service._full_restore(backup, dest_file, length, volume_is_new, src_snap) mock_getbasename.assert_called_once_with(self.volume_id) mock_transfer_data.assert_called_once() @common_mocks def test_full_restore_with_image_not_found(self): length = 1024 volume_is_new = True src_snap = None with tempfile.NamedTemporaryFile() as dest_file: with mock.patch.object(self.service, '_get_backup_base_name') as mock_name, \ mock.patch('eventlet.tpool.Proxy') as mock_proxy: self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound self.assertRaises(self.mock_rbd.ImageNotFound, self.service._full_restore, self.backup, dest_file, length, volume_is_new, src_snap) # Check that the _get_backup_base_name was called # twice due to the exception self.assertEqual(mock_name.call_count, 2) self.assertEqual(mock_proxy.call_count, 2) @common_mocks def test_discard_bytes(self): # Lower the chunksize to a memory manageable number thread_dict = {} self.service.chunk_size = 1024 image = self.mock_rbd.Image.return_value wrapped_rbd = self._get_wrapped_rbd_io(image) def mock_discard(offset, length): thread_dict['thread'] = threading.current_thread() return self.mock_rbd.Image.discard(offset, length) self.mock_rbd.Image.return_value.discard.side_effect = mock_discard self.service._discard_bytes(wrapped_rbd, 0, 0) self.assertEqual(0, image.discard.call_count) image.discard.reset_mock() self.service._discard_bytes(wrapped_rbd, 0, 1234) self.assertEqual(1, image.discard.call_count) image.discard.assert_has_calls([mock.call(0, 1234)]) image.discard.reset_mock() limit = 2 * units.Gi - 1 self.service._discard_bytes(wrapped_rbd, 0, limit) self.assertEqual(1, image.discard.call_count) image.discard.assert_has_calls([mock.call(0, 2147483647)]) image.discard.reset_mock() self.service._discard_bytes(wrapped_rbd, 0, limit * 2 + 1234) self.assertEqual(3, image.discard.call_count) image.discard.assert_has_calls([mock.call(0, 2147483647), mock.call(2147483647, 2147483647), mock.call(4294967294, 1234)]) image.reset_mock() # Test discard with no remainder with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = False self.service._discard_bytes(wrapped_rbd, 0, self.service.chunk_size * 2) self.assertEqual(2, image.write.call_count) self.assertEqual(2, image.flush.call_count) self.assertFalse(image.discard.called) zeroes = bytearray(self.service.chunk_size) image.write.assert_has_calls([mock.call(zeroes, 0), mock.call(zeroes, self.chunk_size)]) self.assertNotEqual(threading.current_thread(), thread_dict['thread']) image.reset_mock() image.write.reset_mock() # Now test with a remainder. with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = False self.service._discard_bytes(wrapped_rbd, 0, (self.service.chunk_size * 2) + 1) self.assertEqual(3, image.write.call_count) self.assertEqual(3, image.flush.call_count) self.assertFalse(image.discard.called) image.write.assert_has_calls([mock.call(zeroes, self.chunk_size * 2), mock.call(zeroes, self.chunk_size * 3), mock.call(bytearray(1), self.chunk_size * 4)]) @common_mocks def test_delete_backup_snapshot(self): snap_name = 'backup.%s.snap.3824923.1412' % fake.UUID1 base_name = self.service._get_backup_base_name(self.volume_id) self.mock_rbd.RBD.remove_snap = mock.Mock() thread_dict = {} def mock_side_effect(snap): thread_dict['thread'] = threading.current_thread() self.mock_rbd.Image.return_value.remove_snap.side_effect = \ mock_side_effect with mock.patch.object(self.service, '_get_backup_snap_name') as \ mock_get_backup_snap_name: mock_get_backup_snap_name.return_value = snap_name with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: mock_get_backup_snaps.return_value = None rem = self.service._delete_backup_snapshot(self.mock_rados, base_name, self.backup_id) self.assertTrue(mock_get_backup_snap_name.called) self.assertTrue(mock_get_backup_snaps.called) self.assertEqual((snap_name, 0), rem) self.assertNotEqual(threading.current_thread(), thread_dict['thread']) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_try_delete_base_image_diff_format(self, mock_meta_backup): backup_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) self.mock_rbd.RBD.return_value.list.return_value = [backup_name] with mock.patch.object(self.service, '_delete_backup_snapshot') as \ mock_del_backup_snap: snap_name = self.service._get_new_snap_name(self.alt_backup_id) mock_del_backup_snap.return_value = (snap_name, 0) self.service.delete_backup(self.alt_backup) self.assertTrue(mock_del_backup_snap.called) self.assertTrue(self.mock_rbd.RBD.return_value.list.called) self.assertTrue(self.mock_rbd.RBD.return_value.remove.called) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_try_delete_base_image(self, mock_meta_backup): backup_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) thread_dict = {} def mock_side_effect(ioctx, base_name): thread_dict['thread'] = threading.current_thread() self.mock_rbd.RBD.return_value.list.return_value = [backup_name] self.mock_rbd.RBD.return_value.remove.side_effect = mock_side_effect with mock.patch.object(self.service, 'get_backup_snaps'): self.service.delete_backup(self.alt_backup) self.assertTrue(self.mock_rbd.RBD.return_value.remove.called) self.assertNotEqual(threading.current_thread(), thread_dict['thread']) @common_mocks def test_try_delete_base_image_busy(self): """This should induce retries then raise rbd.ImageBusy.""" backup_name = self.service._get_backup_base_name(self.volume_id, self.alt_backup) rbd = self.mock_rbd.RBD.return_value rbd.list.return_value = [backup_name] rbd.remove.side_effect = self.mock_rbd.ImageBusy with mock.patch.object(self.service, 'get_backup_snaps') as \ mock_get_backup_snaps: self.assertRaises(self.mock_rbd.ImageBusy, self.service._try_delete_base_image, self.alt_backup) self.assertTrue(mock_get_backup_snaps.called) self.assertTrue(rbd.list.called) self.assertTrue(rbd.remove.called) self.assertIn(MockImageBusyException, RAISED_EXCEPTIONS) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_delete_image_not_found(self, mock_meta_backup): with mock.patch.object(self.service, '_try_delete_base_image') as \ mock_del_base: mock_del_base.side_effect = self.mock_rbd.ImageNotFound # ImageNotFound exception is caught so that db entry can be cleared self.service.delete_backup(self.backup) self.assertEqual([MockImageNotFoundException], RAISED_EXCEPTIONS) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_delete_pool_not_found(self, mock_meta_backup): with mock.patch.object( self.service, '_try_delete_base_image') as mock_del_base: mock_del_base.side_effect = self.mock_rados.ObjectNotFound # ObjectNotFound exception is caught so that db entry can be # cleared self.service.delete_backup(self.backup) self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS) mock_del_base.assert_called_once_with(self.backup) mock_meta_backup.return_value.remove_if_exists.assert_not_called() @common_mocks def test_diff_restore_allowed_with_image_not_exists(self): """Test diff restore not allowed when backup not diff-format.""" not_allowed = (False, None) backup_base = 'backup.base' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_different = [backup_base, self.backup, self.alt_volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (False, backup_base) resp = self.service._diff_restore_allowed(*args_vols_different) self.assertEqual(not_allowed, resp) mock_rbd_image_exists.assert_called_once_with( backup_base, self.backup['volume_id'], self.mock_rados) @common_mocks def test_diff_restore_allowed_with_no_restore_point(self): """Test diff restore not allowed when no restore point found. Detail conditions: 1. backup base is diff-format 2. restore point does not exist """ not_allowed = (False, None) backup_base = 'backup.base' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_different = [backup_base, self.backup, self.alt_volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (True, backup_base) with mock.patch.object(self.service, '_get_restore_point') as \ mock_get_restore_point: mock_get_restore_point.return_value = None args = args_vols_different resp = self.service._diff_restore_allowed(*args) self.assertEqual(not_allowed, resp) self.assertTrue(mock_rbd_image_exists.called) mock_get_restore_point.assert_called_once_with( backup_base, self.backup['id']) @common_mocks def test_diff_restore_allowed_with_not_rbd(self): """Test diff restore not allowed when destination volume is not rbd. Detail conditions: 1. backup base is diff-format 2. restore point exists 3. destination volume is not an rbd. """ backup_base = 'backup.base' restore_point = 'backup.snap.1' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_different = [backup_base, self.backup, self.alt_volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (True, backup_base) with mock.patch.object(self.service, '_get_restore_point') as \ mock_get_restore_point: mock_get_restore_point.return_value = restore_point with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = False args = args_vols_different resp = self.service._diff_restore_allowed(*args) self.assertEqual((False, restore_point), resp) self.assertTrue(mock_rbd_image_exists.called) self.assertTrue(mock_get_restore_point.called) mock_file_is_rbd.assert_called_once_with( rbd_io) @common_mocks def test_diff_restore_allowed_with_same_volume(self): """Test diff restore not allowed when volumes are same. Detail conditions: 1. backup base is diff-format 2. restore point exists 3. destination volume is an rbd 4. source and destination volumes are the same """ backup_base = 'backup.base' restore_point = 'backup.snap.1' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_same = [backup_base, self.backup, self.volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (True, backup_base) with mock.patch.object(self.service, '_get_restore_point') as \ mock_get_restore_point: mock_get_restore_point.return_value = restore_point with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = True resp = self.service._diff_restore_allowed(*args_vols_same) self.assertEqual((False, restore_point), resp) self.assertTrue(mock_rbd_image_exists.called) self.assertTrue(mock_get_restore_point.called) self.assertTrue(mock_file_is_rbd.called) @common_mocks def test_diff_restore_allowed_with_has_extents(self): """Test diff restore not allowed when destination volume has data. Detail conditions: 1. backup base is diff-format 2. restore point exists 3. destination volume is an rbd 4. source and destination volumes are different 5. destination volume has data on it - full copy is mandated """ backup_base = 'backup.base' restore_point = 'backup.snap.1' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_different = [backup_base, self.backup, self.alt_volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (True, backup_base) with mock.patch.object(self.service, '_get_restore_point') as \ mock_get_restore_point: mock_get_restore_point.return_value = restore_point with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = True with mock.patch.object(self.service, '_rbd_has_extents') \ as mock_rbd_has_extents: mock_rbd_has_extents.return_value = True args = args_vols_different resp = self.service._diff_restore_allowed(*args) self.assertEqual((False, restore_point), resp) self.assertTrue(mock_rbd_image_exists.called) self.assertTrue(mock_get_restore_point.called) self.assertTrue(mock_file_is_rbd.called) mock_rbd_has_extents.assert_called_once_with( rbd_io.rbd_image) @common_mocks def test_diff_restore_allowed_with_no_extents(self): """Test diff restore allowed when no data in destination volume. Detail conditions: 1. backup base is diff-format 2. restore point exists 3. destination volume is an rbd 4. source and destination volumes are different 5. destination volume no data on it """ backup_base = 'backup.base' restore_point = 'backup.snap.1' rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image()) args_vols_different = [backup_base, self.backup, self.alt_volume, rbd_io, self.mock_rados] with mock.patch.object(self.service, '_rbd_image_exists') as \ mock_rbd_image_exists: mock_rbd_image_exists.return_value = (True, backup_base) with mock.patch.object(self.service, '_get_restore_point') as \ mock_get_restore_point: mock_get_restore_point.return_value = restore_point with mock.patch.object(self.service, '_file_is_rbd') as \ mock_file_is_rbd: mock_file_is_rbd.return_value = True with mock.patch.object(self.service, '_rbd_has_extents') \ as mock_rbd_has_extents: mock_rbd_has_extents.return_value = False args = args_vols_different resp = self.service._diff_restore_allowed(*args) self.assertEqual((True, restore_point), resp) self.assertTrue(mock_rbd_image_exists.called) self.assertTrue(mock_get_restore_point.called) self.assertTrue(mock_file_is_rbd.called) self.assertTrue(mock_rbd_has_extents.called) @common_mocks @mock.patch('fcntl.fcntl', spec=True) def test_piped_execute(self, mock_fcntl): mock_fcntl.return_value = 0 self._setup_mock_popen(['out', 'err']) self.service._piped_execute(['foo'], ['bar']) self.assertEqual(['popen_init', 'popen_init', 'stdout_close', 'communicate', 'wait'], self.callstack) @common_mocks def test_restore_metdata(self): version = 2 def mock_read(*args): base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META return jsonutils.dumps({base_tag: {'image_name': 'image.base'}, glance_tag: {'image_name': 'image.glance'}, 'version': version}).encode('utf-8') self.mock_rados.Object.return_value.read.side_effect = mock_read self.service._restore_metadata(self.backup, self.volume_id) self.assertTrue(self.mock_rados.Object.return_value.stat.called) self.assertTrue(self.mock_rados.Object.return_value.read.called) version = 3 try: self.service._restore_metadata(self.backup, self.volume_id) except exception.BackupOperationError as exc: msg = _("Metadata restore failed due to incompatible version") self.assertEqual(msg, str(exc)) else: # Force a test failure self.assertFalse(True) @common_mocks @mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True) def test_backup_metadata_already_exists(self, mock_meta_backup): def mock_set(json_meta): msg = (_("Metadata backup object '%s' already exists") % ("backup.%s.meta" % (self.backup_id))) raise exception.VolumeMetadataBackupExists(msg) mock_meta_backup.return_value.set = mock.Mock() mock_meta_backup.return_value.set.side_effect = mock_set with mock.patch.object(self.service, 'get_metadata') as \ mock_get_metadata: mock_get_metadata.return_value = "some.json.metadata" try: self.service._backup_metadata(self.backup) except exception.BackupOperationError as e: msg = (_("Failed to backup volume metadata - Metadata backup " "object 'backup.%s.meta' already exists") % (self.backup_id)) self.assertEqual(msg, str(e)) else: # Make the test fail self.assertFalse(True) self.assertFalse(mock_meta_backup.set.called) @common_mocks def test_backup_metadata_error(self): """Ensure that delete_backup() is called if the metadata backup fails. Also ensure that the exception is propagated to the caller. """ with mock.patch.object(self.service, '_backup_metadata') as \ mock_backup_metadata: mock_backup_metadata.side_effect = exception.BackupOperationError with mock.patch.object(self.service, '_get_volume_size_bytes'): with mock.patch.object(self.service, '_file_is_rbd', return_value=False): with mock.patch.object(self.service, '_full_backup'): with mock.patch.object(self.service, 'delete_backup') as \ mock_delete: self.assertRaises(exception.BackupOperationError, self.service.backup, self.backup, mock.Mock(), backup_metadata=True) self.assertTrue(mock_delete.called) @common_mocks def test_restore_invalid_metadata_version(self): def mock_read(*args): base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META return jsonutils.dumps({base_tag: {'image_name': 'image.base'}, glance_tag: {'image_name': 'image.glance'}, 'version': 3}).encode('utf-8') self.mock_rados.Object.return_value.read.side_effect = mock_read with mock.patch.object(ceph.VolumeMetadataBackup, '_exists') as \ mock_exists: mock_exists.return_value = True self.assertRaises(exception.BackupOperationError, self.service._restore_metadata, self.backup, self.volume_id) self.assertTrue(mock_exists.called) self.assertTrue(self.mock_rados.Object.return_value.read.called) @ddt.data((None, False), ([{'name': 'test'}], False), ([{'name': 'test'}, {'name': 'fake'}], True)) @ddt.unpack @common_mocks def test__snap_exists(self, snapshots, snap_exist): client = mock.Mock() thread_dict = {} with mock.patch.object(self.service.rbd.Image(), 'list_snaps') as snaps: snaps.return_value = snapshots def mock_side_effect(): thread_dict['thread'] = threading.current_thread() return snaps.return_value snaps.side_effect = mock_side_effect exist = self.service._snap_exists(None, 'fake', client) self.assertEqual(snap_exist, exist) self.assertNotEqual(thread_dict['thread'], threading.current_thread()) def common_meta_backup_mocks(f): """Decorator to set mocks common to all metadata backup tests. The point of doing these mocks here is so that we don't accidentally set mocks that can't/don't get unset. """ def _common_inner_inner1(inst, *args, **kwargs): @mock.patch('cinder.backup.drivers.ceph.rbd') @mock.patch('cinder.backup.drivers.ceph.rados') def _common_inner_inner2(mock_rados, mock_rbd): inst.mock_rados = mock_rados inst.mock_rbd = mock_rbd inst.mock_rados.ObjectNotFound = MockObjectNotFoundException return f(inst, *args, **kwargs) return _common_inner_inner2() return _common_inner_inner1 class VolumeMetadataBackupTestCase(test.TestCase): def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(VolumeMetadataBackupTestCase, self).setUp() self.backup_id = fake.BACKUP_ID self.mb = ceph.VolumeMetadataBackup(mock.Mock(), self.backup_id) @common_meta_backup_mocks def test_name(self): self.assertEqual('backup.%s.meta' % (self.backup_id), self.mb.name) @common_meta_backup_mocks def test_exists(self): thread_dict = {} def mock_side_effect(): thread_dict['thread'] = threading.current_thread() # True self.mock_rados.Object.return_value.stat.side_effect = mock_side_effect self.assertTrue(self.mb.exists) self.assertTrue(self.mock_rados.Object.return_value.stat.called) self.mock_rados.Object.return_value.reset_mock() self.assertNotEqual(thread_dict['thread'], threading.current_thread()) # False self.mock_rados.Object.return_value.stat.side_effect = ( self.mock_rados.ObjectNotFound) self.assertFalse(self.mb.exists) self.assertTrue(self.mock_rados.Object.return_value.stat.called) self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS) @common_meta_backup_mocks def test_set(self): obj_data = [] called = [] thread_dict = {} def mock_read(*args): called.append('read') self.assertEqual(1, len(obj_data)) return obj_data[0] def _mock_write(data): obj_data.append(data) called.append('write') thread_dict['thread'] = threading.current_thread() self.mb.get = mock.Mock() self.mb.get.side_effect = mock_read serialized_meta_1 = jsonutils.dumps({'foo': 'bar'}) serialized_meta_2 = jsonutils.dumps({'doo': 'dah'}) with mock.patch.object(ceph.VolumeMetadataBackup, 'set') as mock_write: mock_write.side_effect = _mock_write self.mb.set(serialized_meta_1) self.assertEqual(serialized_meta_1, self.mb.get()) self.assertTrue(self.mb.get.called) self.mb._exists = mock.Mock() self.mb._exists.return_value = True # use the unmocked set() method. self.assertRaises(exception.VolumeMetadataBackupExists, self.mb.set, serialized_meta_2) # check the meta obj state has not changed. self.assertEqual(serialized_meta_1, self.mb.get()) self.assertEqual(['write', 'read', 'read'], called) self.mb._exists.return_value = False self.mb.set(serialized_meta_2) self.assertNotEqual(thread_dict['thread'], threading.current_thread) @common_meta_backup_mocks def test_get(self): self.mock_rados.Object.return_value.stat.side_effect = ( self.mock_rados.ObjectNotFound) self.mock_rados.Object.return_value.read.return_value = ( 'meta'.encode('utf-8')) self.assertIsNone(self.mb.get()) self.mock_rados.Object.return_value.stat.side_effect = None self.assertEqual('meta', self.mb.get()) @common_meta_backup_mocks def remove_if_exists(self): thread_dict = {} def mock_side_effect(): thread_dict['thread'] = threading.current_thread() with mock.patch.object(self.mock_rados.Object, 'remove') as \ mock_remove: mock_remove.side_effect = self.mock_rados.ObjectNotFound self.mb.remove_if_exists() self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS) self.mock_rados.Object.remove.side_effect = mock_side_effect self.mb.remove_if_exists() self.assertEqual([], RAISED_EXCEPTIONS) self.assertNotEqual(thread_dict['thread'], threading.current_thread) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/drivers/test_backup_driver_base.py0000664000175000017500000004055400000000000026632 0ustar00zuulzuul00000000000000# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for the backup service base driver. """ from unittest import mock from oslo_serialization import jsonutils from cinder.backup import driver from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.tests.unit.backup import fake_service from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class BackupBaseDriverTestCase(test.TestCase): def _create_volume_db_entry(self, id, size): vol = {'id': id, 'size': size, 'status': 'available', 'volume_type_id': self.vt['id']} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, backupid, volid, size, userid=fake.USER_ID, projectid=fake.PROJECT_ID): backup = {'id': backupid, 'size': size, 'volume_id': volid, 'user_id': userid, 'project_id': projectid} return db.backup_create(self.ctxt, backup)['id'] def setUp(self): super(BackupBaseDriverTestCase, self).setUp() self.ctxt = context.get_admin_context() self.volume_id = fake.VOLUME_ID self.backup_id = fake.BACKUP_ID self._create_backup_db_entry(self.backup_id, self.volume_id, 1) self._create_volume_db_entry(self.volume_id, 1) self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id) self.driver = fake_service.FakeBackupService(self.ctxt) def test_get_metadata(self): json_metadata = self.driver.get_metadata(self.volume_id) metadata = jsonutils.loads(json_metadata) self.assertEqual(2, metadata['version']) def test_put_metadata(self): metadata = {'version': 1} self.driver.put_metadata(self.volume_id, jsonutils.dumps(metadata)) def test_get_put_metadata(self): json_metadata = self.driver.get_metadata(self.volume_id) self.driver.put_metadata(self.volume_id, json_metadata) def test_export_record(self): export_record = self.driver.export_record(self.backup) self.assertDictEqual({}, export_record) def test_import_record(self): export_record = {'key1': 'value1'} self.assertIsNone(self.driver.import_record(self.backup, export_record)) class BackupMetadataAPITestCase(test.TestCase): def _create_volume_db_entry(self, id, size, display_name, display_description): vol = {'id': id, 'size': size, 'status': 'available', 'display_name': display_name, 'display_description': display_description, 'volume_type_id': self.vt['id']} return db.volume_create(self.ctxt, vol)['id'] def setUp(self): super(BackupMetadataAPITestCase, self).setUp() self.ctxt = context.get_admin_context() self.volume_id = fake.VOLUME2_ID self.backup_id = fake.BACKUP2_ID self.volume_display_name = 'vol-1' self.volume_display_description = 'test vol' self._create_volume_db_entry(self.volume_id, 1, self.volume_display_name, self.volume_display_description) self.bak_meta_api = driver.BackupMetadataAPI(self.ctxt) def _add_metadata(self, vol_meta=False, vol_glance_meta=False): if vol_meta: # Add some VolumeMetadata db.volume_metadata_update(self.ctxt, self.volume_id, {'fee': 'fi'}, False) db.volume_metadata_update(self.ctxt, self.volume_id, {'fo': 'fum'}, False) if vol_glance_meta: # Add some GlanceMetadata db.volume_glance_metadata_create(self.ctxt, self.volume_id, 'disk_format', 'bare') db.volume_glance_metadata_create(self.ctxt, self.volume_id, 'container_type', 'ovf') def test_get(self): # Volume won't have anything other than base by default meta = self.bak_meta_api.get(self.volume_id) s1 = set(jsonutils.loads(meta).keys()) s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META] self.assertEqual(set(), s1.symmetric_difference(s2)) self._add_metadata(vol_glance_meta=True) meta = self.bak_meta_api.get(self.volume_id) s1 = set(jsonutils.loads(meta).keys()) s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] self.assertEqual(set(), s1.symmetric_difference(s2)) self._add_metadata(vol_meta=True) meta = self.bak_meta_api.get(self.volume_id) s1 = set(jsonutils.loads(meta).keys()) s2 = ['version', self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META, self.bak_meta_api.TYPE_TAG_VOL_META] self.assertEqual(set(), s1.symmetric_difference(s2)) def test_put(self): meta = self.bak_meta_api.get(self.volume_id) self.bak_meta_api.put(self.volume_id, meta) self._add_metadata(vol_glance_meta=True) meta = self.bak_meta_api.get(self.volume_id) self.bak_meta_api.put(self.volume_id, meta) self._add_metadata(vol_meta=True) meta = self.bak_meta_api.get(self.volume_id) self.bak_meta_api.put(self.volume_id, meta) def test_put_invalid_version(self): container = jsonutils.dumps({'version': 3}) self.assertRaises(exception.BackupMetadataUnsupportedVersion, self.bak_meta_api.put, self.volume_id, container) def test_v1_restore_factory(self): fact = self.bak_meta_api._v1_restore_factory() keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] self.assertEqual(set([]), set(keys).symmetric_difference(set(fact.keys()))) meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META: {'display_name': 'my-backed-up-volume', 'display_description': 'backed up description'}, self.bak_meta_api.TYPE_TAG_VOL_META: {}, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}} # Emulate restore to new volume volume_id = fake.VOLUME3_ID vol_name = 'restore_backup_%s' % (self.backup_id) self._create_volume_db_entry(volume_id, 1, vol_name, 'fake volume') for f in fact: func = fact[f][0] fields = fact[f][1] func(meta_container[f], volume_id, fields) vol = db.volume_get(self.ctxt, volume_id) self.assertEqual('my-backed-up-volume', vol['display_name']) self.assertEqual('backed up description', vol['display_description']) def test_v1_restore_factory_no_restore_name(self): fact = self.bak_meta_api._v1_restore_factory() keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] self.assertEqual(set([]), set(keys).symmetric_difference(set(fact.keys()))) meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META: {'display_name': 'my-backed-up-volume', 'display_description': 'backed up description'}, self.bak_meta_api.TYPE_TAG_VOL_META: {}, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}} for f in fact: func = fact[f][0] fields = fact[f][1] func(meta_container[f], self.volume_id, fields) vol = db.volume_get(self.ctxt, self.volume_id) self.assertEqual(self.volume_display_name, vol['display_name']) self.assertEqual(self.volume_display_description, vol['display_description']) def test_v2_restore_factory(self): fact = self.bak_meta_api._v2_restore_factory() keys = [self.bak_meta_api.TYPE_TAG_VOL_BASE_META, self.bak_meta_api.TYPE_TAG_VOL_META, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META] self.assertEqual(set([]), set(keys).symmetric_difference(set(fact.keys()))) meta_container = {self.bak_meta_api.TYPE_TAG_VOL_BASE_META: {'encryption_key_id': '123', 'volume_type_id': self.vt.get('id'), 'display_name': 'vol-2', 'display_description': 'description'}, self.bak_meta_api.TYPE_TAG_VOL_META: {}, self.bak_meta_api.TYPE_TAG_VOL_GLANCE_META: {}} for f in fact: func = fact[f][0] fields = fact[f][1] func(meta_container[f], self.volume_id, fields) vol = db.volume_get(self.ctxt, self.volume_id) self.assertEqual(self.volume_display_name, vol['display_name']) self.assertEqual(self.volume_display_description, vol['display_description']) self.assertEqual('123', vol['encryption_key_id']) def test_restore_vol_glance_meta(self): # Fields is an empty list for _restore_vol_glance_meta method. fields = [] container = {} self.bak_meta_api._save_vol_glance_meta(container, self.volume_id) self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id, fields) self._add_metadata(vol_glance_meta=True) self.bak_meta_api._save_vol_glance_meta(container, self.volume_id) self.bak_meta_api._restore_vol_glance_meta(container, self.volume_id, fields) def test_restore_vol_meta(self): # Fields is an empty list for _restore_vol_meta method. fields = [] container = {} self.bak_meta_api._save_vol_meta(container, self.volume_id) # Extract volume metadata from container. metadata = container.get('volume-metadata', {}) self.bak_meta_api._restore_vol_meta(metadata, self.volume_id, fields) self._add_metadata(vol_meta=True) self.bak_meta_api._save_vol_meta(container, self.volume_id) # Extract volume metadata from container. metadata = container.get('volume-metadata', {}) self.bak_meta_api._restore_vol_meta(metadata, self.volume_id, fields) def test_restore_vol_base_meta(self): # Fields is a list with 'encryption_key_id' for # _restore_vol_base_meta method. fields = ['encryption_key_id'] container = {} self.bak_meta_api._save_vol_base_meta(container, self.volume_id) self.bak_meta_api._restore_vol_base_meta(container, self.volume_id, fields) def _create_encrypted_volume_db_entry(self, id, type_id, encrypted): if encrypted: key_id = fake.ENCRYPTION_KEY_ID vol = {'id': id, 'size': 1, 'status': 'available', 'volume_type_id': type_id, 'encryption_key_id': key_id} else: vol = {'id': id, 'size': 1, 'status': 'available', 'volume_type_id': type_id, 'encryption_key_id': None} return db.volume_create(self.ctxt, vol)['id'] def test_restore_encrypted_vol_to_different_volume_type(self): fields = ['encryption_key_id'] container = {} # Create an encrypted volume enc_vol1_id = self._create_encrypted_volume_db_entry(fake.VOLUME4_ID, 'enc_vol_type', True) # Create a second encrypted volume, of a different volume type enc_vol2_id = self._create_encrypted_volume_db_entry(fake.VOLUME5_ID, 'enc_vol_type2', True) # Backup the first volume and attempt to restore to the second self.bak_meta_api._save_vol_base_meta(container, enc_vol1_id) self.assertRaises(exception.EncryptedBackupOperationFailed, self.bak_meta_api._restore_vol_base_meta, container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], enc_vol2_id, fields) def test_restore_unencrypted_vol_to_different_volume_type(self): fields = ['encryption_key_id'] container = {} # Create an unencrypted volume vol1_id = self._create_encrypted_volume_db_entry(fake.VOLUME6_ID, 'vol_type1', False) # Create a second unencrypted volume, of a different volume type vol2_id = self._create_encrypted_volume_db_entry(fake.VOLUME7_ID, 'vol_type2', False) # Backup the first volume and restore to the second self.bak_meta_api._save_vol_base_meta(container, vol1_id) self.bak_meta_api._restore_vol_base_meta( container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], vol2_id, fields) self.assertNotEqual( db.volume_get(self.ctxt, vol1_id)['volume_type_id'], db.volume_get(self.ctxt, vol2_id)['volume_type_id']) def test_restore_encrypted_vol_to_same_volume_type(self): fields = ['encryption_key_id'] container = {} # Create an encrypted volume enc_vol1_id = self._create_encrypted_volume_db_entry(fake.VOLUME8_ID, 'enc_vol_type', True) # Create an encrypted volume of the same type enc_vol2_id = self._create_encrypted_volume_db_entry(fake.VOLUME9_ID, 'enc_vol_type', True) # Backup the first volume and restore to the second self.bak_meta_api._save_vol_base_meta(container, enc_vol1_id) self.bak_meta_api._restore_vol_base_meta( container[self.bak_meta_api.TYPE_TAG_VOL_BASE_META], enc_vol2_id, fields) def test_filter(self): metadata = {'a': 1, 'b': 2, 'c': 3} self.assertEqual(metadata, self.bak_meta_api._filter(metadata, [])) self.assertEqual({'b': 2}, self.bak_meta_api._filter(metadata, ['b'])) self.assertEqual({}, self.bak_meta_api._filter(metadata, ['d'])) self.assertEqual({'a': 1, 'b': 2}, self.bak_meta_api._filter(metadata, ['a', 'b'])) def test_save_vol_glance_meta(self): container = {} self.bak_meta_api._save_vol_glance_meta(container, self.volume_id) def test_save_vol_meta(self): container = {} self.bak_meta_api._save_vol_meta(container, self.volume_id) def test_save_vol_base_meta(self): container = {} self.bak_meta_api._save_vol_base_meta(container, self.volume_id) def test_is_serializable(self): data = {'foo': 'bar'} if self.bak_meta_api._is_serializable(data): jsonutils.dumps(data) def test_is_not_serializable(self): data = {'foo': 'bar'} with mock.patch.object(jsonutils, 'dumps') as mock_dumps: mock_dumps.side_effect = TypeError self.assertFalse(self.bak_meta_api._is_serializable(data)) mock_dumps.assert_called_once_with(data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/drivers/test_backup_glusterfs.py0000664000175000017500000000723500000000000026362 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for GlusterFS backup driver.""" import os from unittest import mock from os_brick.remotefs import remotefs as remotefs_brick from cinder.backup.drivers import glusterfs from cinder import context from cinder import exception from cinder.tests.unit import test from cinder import utils FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base' FAKE_HOST = 'fake_host' FAKE_VOL_NAME = 'backup_vol' FAKE_BACKUP_SHARE = '%s:%s' % (FAKE_HOST, FAKE_VOL_NAME) FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE, 'e51e43e3c63fd5770e90e58e2eafc709') class BackupGlusterfsShareTestCase(test.TestCase): def setUp(self): super(BackupGlusterfsShareTestCase, self).setUp() self.ctxt = context.get_admin_context() def test_check_configuration(self): self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE) self.mock_object(glusterfs.GlusterfsBackupDriver, '_init_backup_repo_path', return_value=FAKE_BACKUP_PATH) driver = glusterfs.GlusterfsBackupDriver(self.ctxt) driver.check_for_setup_error() def test_check_configuration_no_backup_share(self): self.override_config('glusterfs_backup_share', None) self.mock_object(glusterfs.GlusterfsBackupDriver, '_init_backup_repo_path', return_value=FAKE_BACKUP_PATH) driver = glusterfs.GlusterfsBackupDriver(self.ctxt) self.assertRaises(exception.InvalidConfigurationValue, driver.check_for_setup_error) def test_init_backup_repo_path(self): self.override_config('glusterfs_backup_share', FAKE_BACKUP_SHARE) self.override_config('glusterfs_backup_mount_point', FAKE_BACKUP_MOUNT_POINT_BASE) mock_remotefsclient = mock.Mock() mock_remotefsclient.get_mount_point = mock.Mock( return_value=FAKE_BACKUP_PATH) self.mock_object(glusterfs.GlusterfsBackupDriver, 'check_for_setup_error') self.mock_object(remotefs_brick, 'RemoteFsClient', return_value=mock_remotefsclient) self.mock_object(os, 'getegid', return_value=333333) self.mock_object(utils, 'get_file_gid', return_value=333333) self.mock_object(utils, 'get_file_mode', return_value=00000) self.mock_object(utils, 'get_root_helper') with mock.patch.object(glusterfs.GlusterfsBackupDriver, '_init_backup_repo_path'): driver = glusterfs.GlusterfsBackupDriver(self.ctxt) self.mock_object(driver, '_execute') path = driver._init_backup_repo_path() self.assertEqual(FAKE_BACKUP_PATH, path) utils.get_root_helper.assert_called_once_with() mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE) mock_remotefsclient.get_mount_point.assert_called_once_with( FAKE_BACKUP_SHARE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/drivers/test_backup_google.py0000664000175000017500000006534700000000000025630 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (C) 2016 Vedams Inc. # Copyright (C) 2016 Google Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Google Backup code.""" import bz2 import filecmp import hashlib import os import shutil import tempfile import threading from unittest import mock import zlib from eventlet import tpool from oslo_utils import units import zstd from cinder.backup.drivers import gcs as google_dr from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.tests.unit.backup import fake_google_client from cinder.tests.unit.backup import fake_google_client2 from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class FakeMD5(object): def __init__(self, *args, **kwargs): pass @classmethod def digest(cls): return 'gcscindermd5' @classmethod def hexdigest(cls): return 'gcscindermd5' class FakeObjectName(object): @classmethod def _fake_generate_object_name_prefix(cls, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup.id) volume = 'volume_%s' % (backup.volume_id) prefix = volume + '_' + backup_name return prefix def gcs_client(func): @mock.patch.object(google_dr.discovery, 'build', fake_google_client.FakeGoogleDiscovery.Build) @mock.patch.object(google_dr, 'GoogleMediaIoBaseDownload', fake_google_client.FakeGoogleMediaIoBaseDownload) @mock.patch.object(hashlib, 'md5', FakeMD5) def func_wrapper(self, *args, **kwargs): if google_dr.service_account: with mock.patch.object(google_dr.service_account.Credentials, 'from_service_account_file', fake_google_client.FakeGoogleCredentials): return func(self, *args, **kwargs) return func(self, *args, **kwargs) return func_wrapper def gcs_client2(func): @mock.patch.object(google_dr.discovery, 'build', fake_google_client2.FakeGoogleDiscovery.Build) @mock.patch.object(google_dr, 'GoogleMediaIoBaseDownload', fake_google_client2.FakeGoogleMediaIoBaseDownload) @mock.patch.object(google_dr.GoogleBackupDriver, '_generate_object_name_prefix', FakeObjectName._fake_generate_object_name_prefix) @mock.patch.object(hashlib, 'md5', FakeMD5) def func_wrapper(self, *args, **kwargs): if google_dr.service_account: with mock.patch.object(google_dr.service_account.Credentials, 'from_service_account_file', fake_google_client.FakeGoogleCredentials): return func(self, *args, **kwargs) return func(self, *args, **kwargs) return func_wrapper def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(reason=_('fake')) def fake_delete(self, backup): raise exception.BackupOperationError() def _fake_delete_object(self, bucket_name, object_name): raise AssertionError('delete_object method should not be called.') class GoogleBackupDriverTestCase(test.TestCase): """Test Case for Google""" _DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df' def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID): vol = {'id': volume_id, 'size': 1, 'status': 'available', 'volume_type_id': self.vt['id']} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, volume_id=_DEFAULT_VOLUME_ID, container=google_dr.CONF.backup_gcs_bucket, parent_id=None, status=None, service_metadata=None): try: db.volume_get(self.ctxt, volume_id) except exception.NotFound: self._create_volume_db_entry(volume_id=volume_id) kwargs = {'size': 1, 'container': container, 'volume_id': volume_id, 'parent_id': parent_id, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'status': status, 'service_metadata': service_metadata, } backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup def _write_effective_compression_file(self, data_size): """Ensure file contents can be effectively compressed.""" self.volume_file.seek(0) self.volume_file.write(bytes([65] * data_size)) self.volume_file.seek(0) def setUp(self): super(GoogleBackupDriverTestCase, self).setUp() self.flags(backup_gcs_bucket='gcscinderbucket') self.flags(backup_gcs_credential_file='test-file') self.flags(backup_gcs_project_id='test-gcs') self.ctxt = context.get_admin_context() self.volume_file = tempfile.NamedTemporaryFile() self.temp_dir = tempfile.mkdtemp() self.addCleanup(self.volume_file.close) # Remove tempdir. self.addCleanup(shutil.rmtree, self.temp_dir) self.size_volume_file = 0 for _i in range(0, 64): self.volume_file.write(os.urandom(units.Ki)) self.size_volume_file += 1024 # Note(yikun): It mocks out the backup notifier to avoid to leak # notifications into other test. notify_patcher = mock.patch( 'cinder.volume.volume_utils.notify_about_backup_usage') notify_patcher.start() self.addCleanup(notify_patcher.stop) @gcs_client def test_backup(self): volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec2' container_name = 'test-bucket' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) result = service.backup(backup, self.volume_file) self.assertIsNone(result) @gcs_client def test_backup_uncompressed(self): volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) @gcs_client def test_backup_bz2(self): volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='bz2') service = google_dr.GoogleBackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) service.backup(backup, self.volume_file) @gcs_client def test_backup_zlib(self): volume_id = '5cea0535-b6fb-4531-9a38-000000bea094' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zlib') service = google_dr.GoogleBackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) service.backup(backup, self.volume_file) @gcs_client def test_backup_zstd(self): volume_id = '471910a0-a197-4259-9c50-0fc3d6a07dbc' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zstd') service = google_dr.GoogleBackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) service.backup(backup, self.volume_file) @gcs_client def test_backup_default_container(self): volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' backup = self._create_backup_db_entry(volume_id=volume_id, container=None) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertEqual('gcscinderbucket', backup.container) @gcs_client def test_backup_proxy_configured(self): # Configuration overwrites enviromental variable proxy_cfg = "http://myproxy.example.com" os.environ['http_proxy'] = proxy_cfg + '_fake' google_dr.CONF.set_override("backup_gcs_proxy_url", proxy_cfg) google_dr.GoogleBackupDriver(self.ctxt) self.assertEqual(proxy_cfg, os.environ.get('http_proxy')) @gcs_client @mock.patch('cinder.backup.drivers.gcs.GoogleBackupDriver.' '_send_progress_end') @mock.patch('cinder.backup.drivers.gcs.GoogleBackupDriver.' '_send_progress_notification') def test_backup_default_container_notify(self, _send_progress, _send_progress_end): volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a' backup = self._create_backup_db_entry(volume_id=volume_id, container=None) # If the backup_object_number_per_notification is set to 1, # the _send_progress method will be called for sure. google_dr.CONF.set_override("backup_object_number_per_notification", 1) google_dr.CONF.set_override("backup_gcs_enable_progress_timer", False) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the backup_object_number_per_notification is increased to # another value, the _send_progress method will not be called. _send_progress.reset_mock() _send_progress_end.reset_mock() google_dr.CONF.set_override("backup_object_number_per_notification", 10) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertFalse(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the timer is enabled, the _send_progress will be called, # since the timer can trigger the progress notification. _send_progress.reset_mock() _send_progress_end.reset_mock() google_dr.CONF.set_override("backup_object_number_per_notification", 10) google_dr.CONF.set_override("backup_gcs_enable_progress_timer", True) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) @gcs_client def test_backup_custom_container(self): volume_id = '1da9859e-77e5-4731-bd58-000000ca119e' container_name = 'fake99' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) @gcs_client2 def test_backup_shafile(self): volume_id = '6465dad4-22af-48f7-8a1a-000000218907' container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) # Verify sha contents content1 = service._read_sha256file(backup) self.assertEqual(64 * units.Ki / content1['chunk_size'], len(content1['sha256s'])) @gcs_client2 def test_backup_cmp_shafiles(self): volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2' container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service1 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) # Create incremental backup with no change to contents deltabackup = self._create_backup_db_entry(volume_id=volume_id, container=container_name, parent_id=backup.id) service2 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service2.backup(deltabackup, self.volume_file) self.assertEqual(container_name, deltabackup.container) # Compare shas from both files content1 = service1._read_sha256file(backup) content2 = service2._read_sha256file(deltabackup) self.assertEqual(len(content1['sha256s']), len(content2['sha256s'])) self.assertEqual(set(content1['sha256s']), set(content2['sha256s'])) @gcs_client2 def test_backup_delta_two_objects_change(self): volume_id = '30dab288-265a-4583-9abe-000000d42c67' self.flags(backup_gcs_object_size=8 * units.Ki) self.flags(backup_gcs_block_size=units.Ki) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service1 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) # Create incremental backup with no change to contents self.volume_file.seek(2 * 8 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) self.volume_file.seek(4 * 8 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) deltabackup = self._create_backup_db_entry(volume_id=volume_id, container=container_name, parent_id=backup.id) service2 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service2.backup(deltabackup, self.volume_file) self.assertEqual(container_name, deltabackup.container) content1 = service1._read_sha256file(backup) content2 = service2._read_sha256file(deltabackup) # Verify that two shas are changed at index 16 and 32 self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32]) @gcs_client2 def test_backup_delta_two_blocks_in_object_change(self): volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba' self.flags(backup_gcs_object_size=8 * units.Ki) self.flags(backup_gcs_block_size=units.Ki) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service1 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) # Create incremental backup with no change to contents self.volume_file.seek(16 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) self.volume_file.seek(20 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) deltabackup = self._create_backup_db_entry(volume_id=volume_id, container=container_name, parent_id=backup.id) service2 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service2.backup(deltabackup, self.volume_file) self.assertEqual(container_name, deltabackup.container) # Verify that two shas are changed at index 16 and 20 content1 = service1._read_sha256file(backup) content2 = service2._read_sha256file(deltabackup) self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) @gcs_client def test_create_backup_fail(self): volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec3' container_name = 'gcs_api_failure' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) self.assertRaises(google_dr.GCSApiFailure, service.backup, backup, self.volume_file) @gcs_client def test_create_backup_fail2(self): volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec4' container_name = 'gcs_oauth2_failure' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) self.assertRaises(google_dr.GCSOAuth2Failure, service.backup, backup, self.volume_file) @gcs_client @mock.patch.object(google_dr.GoogleBackupDriver, '_backup_metadata', fake_backup_metadata) def test_backup_backup_metadata_fail(self): """Test of when an exception occurs in backup(). In backup(), after an exception occurs in self._backup_metadata(), we want to check the process of an exception handler. """ volume_id = '020d9142-339c-4876-a445-000000f1520c' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) # We expect that an exception be notified directly. self.assertRaises(exception.BackupDriverException, service.backup, backup, self.volume_file) @gcs_client @mock.patch.object(google_dr.GoogleBackupDriver, '_backup_metadata', fake_backup_metadata) @mock.patch.object(google_dr.GoogleBackupDriver, 'delete_backup', fake_delete) def test_backup_backup_metadata_fail2(self): """Test of when an exception occurs in an exception handler. In backup(), after an exception occurs in self._backup_metadata(), we want to check the process when the second exception occurs in self.delete_backup(). """ volume_id = '2164421d-f181-4db7-b9bd-000000eeb628' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) # We expect that the second exception is notified. self.assertRaises(exception.BackupOperationError, service.backup, backup, self.volume_file) @gcs_client def test_restore(self): volume_id = 'c2a81f09-f480-4325-8424-00000071685b' backup = self._create_backup_db_entry( volume_id=volume_id, status=objects.fields.BackupStatus.RESTORING) service = google_dr.GoogleBackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: service.restore(backup, volume_id, volume_file, False) @gcs_client def test_restore_fail(self): volume_id = 'c2a81f09-f480-4325-8424-00000071685b' container_name = 'gcs_connection_failure' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = google_dr.GoogleBackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: self.assertRaises(google_dr.GCSConnectionFailure, service.restore, backup, volume_id, volume_file, False) @gcs_client2 def test_restore_delta(self): volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e' self.flags(backup_gcs_object_size=8 * units.Ki) self.flags(backup_gcs_block_size=units.Ki) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service1 = google_dr.GoogleBackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) # Create incremental backup with no change to contents self.volume_file.seek(16 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) self.volume_file.seek(20 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) deltabackup = self._create_backup_db_entry( volume_id=volume_id, status=objects.fields.BackupStatus.RESTORING, container=container_name, parent_id=backup.id) self.volume_file.seek(0) service2 = google_dr.GoogleBackupDriver(self.ctxt) service2.backup(deltabackup, self.volume_file, True) with tempfile.NamedTemporaryFile() as restored_file: service2.restore(deltabackup, volume_id, restored_file, False) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) @gcs_client def test_delete(self): volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31' object_prefix = 'test_prefix' backup = self._create_backup_db_entry(volume_id=volume_id, service_metadata=object_prefix) service = google_dr.GoogleBackupDriver(self.ctxt) service.delete_backup(backup) @gcs_client @mock.patch.object(google_dr.GoogleBackupDriver, 'delete_object', _fake_delete_object) def test_delete_without_object_prefix(self): volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1' backup = self._create_backup_db_entry(volume_id=volume_id) service = google_dr.GoogleBackupDriver(self.ctxt) service.delete_backup(backup) @gcs_client def test_get_compressor(self): service = google_dr.GoogleBackupDriver(self.ctxt) compressor = service._get_compressor('None') self.assertIsNone(compressor) compressor = service._get_compressor('zlib') self.assertEqual(zlib, compressor) self.assertIsInstance(compressor, tpool.Proxy) compressor = service._get_compressor('bz2') self.assertEqual(bz2, compressor) self.assertIsInstance(compressor, tpool.Proxy) compressor = service._get_compressor('zstd') self.assertEqual(zstd, compressor) self.assertIsInstance(compressor, tpool.Proxy) self.assertRaises(ValueError, service._get_compressor, 'fake') @gcs_client def test_prepare_output_data_effective_compression(self): """Test compression works on a native thread.""" # Use dictionary to share data between threads thread_dict = {} original_compress = zlib.compress def my_compress(data): thread_dict['compress'] = threading.current_thread() return original_compress(data) self.mock_object(zlib, 'compress', side_effect=my_compress) service = google_dr.GoogleBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 result = service._prepare_output_data(fake_data) self.assertEqual('zlib', result[0]) self.assertGreater(len(fake_data), len(result[1])) self.assertNotEqual(threading.current_thread(), thread_dict['compress']) @gcs_client def test_prepare_output_data_no_compression(self): self.flags(backup_compression_algorithm='none') service = google_dr.GoogleBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 result = service._prepare_output_data(fake_data) self.assertEqual('none', result[0]) self.assertEqual(fake_data, result[1]) @gcs_client def test_prepare_output_data_ineffective_compression(self): service = google_dr.GoogleBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 # Pre-compress so that compression in the driver will be ineffective. already_compressed_data = service.compressor.compress(fake_data) result = service._prepare_output_data(already_compressed_data) self.assertEqual('none', result[0]) self.assertEqual(already_compressed_data, result[1]) @mock.patch.object(google_dr, 'gexceptions', mock.Mock()) @mock.patch.object(google_dr.discovery, 'build') @mock.patch.object(google_dr, 'service_account') def test_client_setup(self, account, build): google_dr.CONF.set_override('backup_gcs_credential_file', 'credentials_file') google_dr.GoogleBackupDriver(self.ctxt) create_creds = account.Credentials.from_service_account_file create_creds.assert_called_once_with('credentials_file') build.assert_called_once_with('storage', 'v1', cache_discovery=False, credentials=create_creds.return_value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/drivers/test_backup_nfs.py0000664000175000017500000011777300000000000025143 0ustar00zuulzuul00000000000000# Copyright (C) 2015 Tom Barron # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Backup NFS driver.""" import bz2 import filecmp import hashlib import os import shutil import stat import tempfile import threading from unittest import mock import zlib import ddt from eventlet import tpool from os_brick import exception as brick_exception from os_brick.remotefs import remotefs as remotefs_brick from oslo_config import cfg import zstd from cinder.backup.drivers import nfs from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test CONF = cfg.CONF FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base' FAKE_HOST = 'fake_host' FAKE_EXPORT_PATH = 'fake/export/path' FAKE_BACKUP_SHARE = '%s:/%s' % (FAKE_HOST, FAKE_EXPORT_PATH) FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE, FAKE_EXPORT_PATH) FAKE_BACKUP_ID = fake.BACKUP_ID FAKE_BACKUP_ID_PART1 = fake.BACKUP_ID[:2] FAKE_BACKUP_ID_PART2 = fake.BACKUP_ID[2:4] FAKE_BACKUP_ID_REST = fake.BACKUP_ID[4:] UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1, FAKE_BACKUP_ID_PART2, FAKE_BACKUP_ID) FAKE_EGID = 1234 @ddt.ddt class BackupNFSShareTestCase(test.TestCase): def setUp(self): super(BackupNFSShareTestCase, self).setUp() self.ctxt = context.get_admin_context() self.mock_object(nfs, 'LOG') # Note(yikun): It mocks out the backup notifier to avoid to leak # notifications into other test. notify_patcher = mock.patch( 'cinder.volume.volume_utils.notify_about_backup_usage') notify_patcher.start() self.addCleanup(notify_patcher.stop) def test_check_configuration_no_backup_share(self): self.override_config('backup_share', None) self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path', return_value=FAKE_BACKUP_PATH) driver = nfs.NFSBackupDriver(self.ctxt) self.assertRaises(exception.InvalidConfigurationValue, driver.check_for_setup_error) @mock.patch('os.getegid', return_value=FAKE_EGID) @mock.patch('cinder.utils.get_file_gid') @mock.patch('cinder.utils.get_file_mode') @ddt.data((FAKE_EGID, 0), (FAKE_EGID, stat.S_IWGRP), (6666, 0), (6666, stat.S_IWGRP)) @ddt.unpack def test_init_backup_repo_path(self, file_gid, file_mode, mock_get_file_mode, mock_get_file_gid, mock_getegid): self.override_config('backup_share', FAKE_BACKUP_SHARE) self.override_config('backup_mount_point_base', FAKE_BACKUP_MOUNT_POINT_BASE) mock_remotefsclient = mock.Mock() mock_remotefsclient.get_mount_point = mock.Mock( return_value=FAKE_BACKUP_PATH) self.mock_object(nfs.NFSBackupDriver, 'check_for_setup_error') self.mock_object(remotefs_brick, 'RemoteFsClient', return_value=mock_remotefsclient) with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'): driver = nfs.NFSBackupDriver(self.ctxt) mock_get_file_gid.return_value = file_gid mock_get_file_mode.return_value = file_mode mock_execute = self.mock_object(driver, '_execute') path = driver._init_backup_repo_path() self.assertEqual(FAKE_BACKUP_PATH, path) mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE) mock_remotefsclient.get_mount_point.assert_called_once_with( FAKE_BACKUP_SHARE) mock_execute_calls = [] if file_gid != FAKE_EGID: mock_execute_calls.append( mock.call('chgrp', '-R', FAKE_EGID, path, root_helper=driver._root_helper, run_as_root=True)) if not (file_mode & stat.S_IWGRP): mock_execute_calls.append( mock.call('chmod', '-R', 'g+w', path, root_helper=driver._root_helper, run_as_root=True)) mock_execute.assert_has_calls(mock_execute_calls, any_order=True) self.assertEqual(len(mock_execute_calls), mock_execute.call_count) def test_init_backup_repo_path_unconfigured(self): """RemoteFsClient is not created if backup_share unset""" self.override_config('backup_share', None) mock_remotefsclient = mock.Mock() self.mock_object(remotefs_brick, 'RemoteFsClient') driver = nfs.NFSBackupDriver(self.ctxt) driver._init_backup_repo_path() self.assertEqual(0, mock_remotefsclient.call_count) @mock.patch('time.sleep') def test_init_backup_repo_path_mount_retry(self, mock_sleep): self.override_config('backup_share', FAKE_BACKUP_SHARE) self.override_config('backup_mount_attempts', 2) mock_remotefsclient = mock.Mock() self.mock_object(remotefs_brick, 'RemoteFsClient', return_value=mock_remotefsclient) mock_remotefsclient.mount.side_effect = [ brick_exception.BrickException] * 2 with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'): driver = nfs.NFSBackupDriver(self.ctxt) self.assertRaises(brick_exception.BrickException, driver._init_backup_repo_path) self.assertEqual([mock.call(FAKE_BACKUP_SHARE), mock.call(FAKE_BACKUP_SHARE)], mock_remotefsclient.mount.call_args_list) def fake_md5(arg, usedforsecurity=False): class result(object): def hexdigest(self): return 'fake-md5-sum' ret = result() return ret class BackupNFSTestCase(test.TestCase): """Test Cases for NFS backup driver.""" _DEFAULT_VOLUME_ID = fake.VOLUME_ID def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID): vol = {'id': volume_id, 'size': 1, 'status': 'available', 'volume_type_id': self.vt['id']} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, volume_id=_DEFAULT_VOLUME_ID, container='test-container', backup_id=fake.BACKUP_ID, parent_id=None, status=None): try: db.volume_get(self.ctxt, volume_id) except exception.NotFound: self._create_volume_db_entry(volume_id=volume_id) backup = {'id': backup_id, 'size': 1, 'container': container, 'volume_id': volume_id, 'parent_id': parent_id, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'status': status, } return db.backup_create(self.ctxt, backup)['id'] def _write_effective_compression_file(self, data_size): """Ensure file contents can be effectively compressed.""" self.volume_file.seek(0) self.volume_file.write(bytes([65] * data_size)) self.volume_file.seek(0) def _store_thread(self, *args, **kwargs): self.thread_dict['thread'] = threading.current_thread() return self.thread_original_method(*args, **kwargs) def setUp(self): super(BackupNFSTestCase, self).setUp() self.ctxt = context.get_admin_context() self.mock_object(hashlib, 'md5', fake_md5) self.volume_file = tempfile.NamedTemporaryFile() self.temp_dir = tempfile.mkdtemp() self.addCleanup(self.volume_file.close) self.override_config('backup_share', FAKE_BACKUP_SHARE) self.override_config('backup_mount_point_base', FAKE_BACKUP_MOUNT_POINT_BASE) self.override_config('backup_file_size', 52428800) self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path', return_value=self.temp_dir) # Remove tempdir. self.addCleanup(shutil.rmtree, self.temp_dir) self.size_volume_file = 0 for _i in range(0, 32): self.volume_file.write(os.urandom(1024)) self.size_volume_file += 1024 # Use dictionary to share data between threads self.thread_dict = {} # Note(yikun): It mocks out the backup notifier to avoid to leak # notifications into other test. notify_patcher = mock.patch( 'cinder.volume.volume_utils.notify_about_backup_usage') notify_patcher.start() self.addCleanup(notify_patcher.stop) def test_backup_uncompressed(self): volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) def test_backup_bz2(self): volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='bz2') service = nfs.NFSBackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) def test_backup_zlib(self): volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zlib') service = nfs.NFSBackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) def test_backup_zstd(self): volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zstd') service = nfs.NFSBackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) def test_backup_default_container(self): volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id, container=None, backup_id=FAKE_BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID) self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME) def test_backup_cancel(self): """Test the backup abort mechanism when backup is force deleted.""" count = set() def my_refresh(): # This refresh method will abort the backup after 1 chunk count.add(len(count) + 1) if len(count) == 2: backup.destroy() original_refresh() volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id, container=None, backup_id=FAKE_BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID) original_refresh = backup.refresh # We cannot mock refresh method in backup object directly because # mock will raise AttributeError on context manager exit. with mock.patch('cinder.objects.base.CinderPersistentObject.refresh', side_effect=my_refresh), \ mock.patch.object(service, 'delete_object', side_effect=service.delete_object) as delete: # Driver shouldn't raise the NotFound exception service.backup(backup, self.volume_file) # Ensure we called the delete_backup method when abort is detected self.assertEqual(1, delete.call_count) @mock.patch('cinder.backup.drivers.posix.PosixBackupDriver.' 'update_container_name', return_value='testcontainer1') @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_send_progress_end') @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_send_progress_notification') def test_backup_container_notify_1(self, _send_progress, _send_progress_end, _mock_update_container_name): # This unit test writes data to disk. It should be # updated to not do that. volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id, container='testcontainer1') # If the backup_object_number_per_notification is set to 1, # the _send_progress method will be called for sure. _send_progress.reset_mock() _send_progress_end.reset_mock() CONF.set_override("backup_object_number_per_notification", 1) CONF.set_override("backup_enable_progress_timer", False) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) @mock.patch('cinder.backup.drivers.posix.PosixBackupDriver.' 'update_container_name', return_value='testcontainer2') @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_send_progress_end') @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_send_progress_notification') def test_backup_container_notify_2(self, _send_progress, _send_progress_end, _mock_update_container_name): # This unit test writes data to disk. It should be # updated to not do that. volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id, container='testcontainer2') # If the backup_object_number_per_notification is increased to # another value, the _send_progress method will not be called. _send_progress.reset_mock() _send_progress_end.reset_mock() CONF.set_override("backup_object_number_per_notification", 10) CONF.set_override("backup_enable_progress_timer", False) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) self.assertFalse(_send_progress.called) self.assertTrue(_send_progress_end.called) @mock.patch('cinder.backup.drivers.posix.PosixBackupDriver.' 'update_container_name', return_value='testcontainer3') @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_send_progress_end') @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_send_progress_notification') def test_backup_container_notify_3(self, _send_progress, _send_progress_end, _mock_update_container_name): # This unit test writes data to disk. It should be # updated to not do that. volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id, container='testcontainer3') # If the timer is enabled, the _send_progress will be called, # since the timer can trigger the progress notification. _send_progress.reset_mock() _send_progress_end.reset_mock() CONF.set_override("backup_object_number_per_notification", 10) CONF.set_override("backup_enable_progress_timer", True) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) def test_backup_custom_container(self): volume_id = fake.VOLUME_ID container_name = 'fake99' self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual(backup['container'], container_name) def test_backup_shafile(self): volume_id = fake.VOLUME_ID def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix self.mock_object(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual(backup['container'], container_name) # Verify sha contents content1 = service._read_sha256file(backup) self.assertEqual(32 * 1024 / content1['chunk_size'], len(content1['sha256s'])) def test_backup_cmp_shafiles(self): volume_id = fake.VOLUME_ID def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix self.mock_object(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual(backup['container'], container_name) # Create incremental backup with no change to contents self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) self.assertEqual(deltabackup['container'], container_name) # Compare shas from both files content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) self.assertEqual(len(content1['sha256s']), len(content2['sha256s'])) self.assertEqual(set(content1['sha256s']), set(content2['sha256s'])) def test_backup_delta_two_objects_change(self): volume_id = fake.VOLUME_ID def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix self.mock_object(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_file_size=(8 * 1024)) self.flags(backup_sha_block_size_bytes=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual(backup['container'], container_name) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) self.assertEqual(deltabackup['container'], container_name) content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) # Verify that two shas are changed at index 16 and 20 self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) def test_backup_delta_two_blocks_in_object_change(self): volume_id = fake.VOLUME_ID def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix self.mock_object(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_file_size=(8 * 1024)) self.flags(backup_sha_block_size_bytes=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual(backup['container'], container_name) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) self.assertEqual(deltabackup['container'], container_name) # Verify that two shas are changed at index 16 and 20 content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) def test_backup_backup_metadata_fail(self): """Test of when an exception occurs in backup(). In backup(), after an exception occurs in self._backup_metadata(), we want to check the process of an exception handler. """ volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(reason=_('fake')) # Raise a pseudo exception.BackupDriverException. self.mock_object(nfs.NFSBackupDriver, '_backup_metadata', fake_backup_metadata) # We expect that an exception be notified directly. self.assertRaises(exception.BackupDriverException, service.backup, backup, self.volume_file) def test_backup_backup_metadata_fail2(self): """Test of when an exception occurs in an exception handler. In backup(), after an exception occurs in self._backup_metadata(), we want to check the process when the second exception occurs in self.delete_backup(). """ volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(reason=_('fake')) # Raise a pseudo exception.BackupDriverException. self.mock_object(nfs.NFSBackupDriver, '_backup_metadata', fake_backup_metadata) def fake_delete(self, backup): raise exception.BackupOperationError() # Raise a pseudo exception.BackupOperationError. self.mock_object(nfs.NFSBackupDriver, 'delete_backup', fake_delete) # We expect that the second exception is notified. self.assertRaises(exception.BackupOperationError, service.backup, backup, self.volume_file) def test_restore_uncompressed(self): volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') self.flags(backup_sha_block_size_bytes=32) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) backup.status = objects.fields.BackupStatus.RESTORING backup.save() service.restore(backup, volume_id, restored_file, False) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) def test_restore_bz2(self): self.thread_original_method = bz2.decompress volume_id = fake.VOLUME_ID self.mock_object(bz2, 'decompress', side_effect=self._store_thread) self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='bz2') file_size = 1024 * 3 self.flags(backup_file_size=file_size) self.flags(backup_sha_block_size_bytes=1024) service = nfs.NFSBackupDriver(self.ctxt) self._write_effective_compression_file(file_size) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) backup.status = objects.fields.BackupStatus.RESTORING backup.save() service.restore(backup, volume_id, restored_file, False) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) self.assertNotEqual(threading.current_thread(), self.thread_dict['thread']) def test_restore_zlib(self): self.thread_original_method = zlib.decompress self.mock_object(zlib, 'decompress', side_effect=self._store_thread) volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zlib') file_size = 1024 * 3 self.flags(backup_file_size=file_size) self.flags(backup_sha_block_size_bytes=1024) service = nfs.NFSBackupDriver(self.ctxt) self._write_effective_compression_file(file_size) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) backup.status = objects.fields.BackupStatus.RESTORING backup.save() service.backup(backup, self.volume_file) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.restore(backup, volume_id, restored_file, False) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) self.assertNotEqual(threading.current_thread(), self.thread_dict['thread']) def test_restore_zstd(self): self.thread_original_method = zstd.decompress self.mock_object(zstd, 'decompress', side_effect=self._store_thread) volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zstd') file_size = 1024 * 3 self.flags(backup_file_size=file_size) self.flags(backup_sha_block_size_bytes=1024) service = nfs.NFSBackupDriver(self.ctxt) self._write_effective_compression_file(file_size) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) backup.status = objects.fields.BackupStatus.RESTORING backup.save() service.backup(backup, self.volume_file) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.restore(backup, volume_id, restored_file, False) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) self.assertNotEqual(threading.current_thread(), self.thread_dict['thread']) def test_restore_abort_delta(self): volume_id = fake.VOLUME_ID count = set() def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix def my_refresh(): # This refresh method will abort the backup after 1 chunk count.add(len(count) + 1) if len(count) == 2: backup.status = objects.fields.BackupStatus.AVAILABLE backup.save() original_refresh() self.mock_object(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_file_size=(1024 * 8)) self.flags(backup_sha_block_size_bytes=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry( volume_id=volume_id, status=objects.fields.BackupStatus.RESTORING, container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) service.backup(deltabackup, self.volume_file, True) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) original_refresh = backup.refresh with tempfile.NamedTemporaryFile() as restored_file, \ mock.patch('cinder.objects.Backup.refresh', side_effect=my_refresh): self.assertRaises(exception.BackupRestoreCancel, service.restore, backup, volume_id, restored_file, False) def test_restore_delta(self): volume_id = fake.VOLUME_ID def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix self.mock_object(nfs.NFSBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_file_size=(1024 * 8)) self.flags(backup_sha_block_size_bytes=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) service = nfs.NFSBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry( volume_id=volume_id, status=objects.fields.BackupStatus.RESTORING, container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) service.backup(deltabackup, self.volume_file, True) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) service.restore(backup, volume_id, restored_file, False) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) def test_delete(self): volume_id = fake.VOLUME_ID self._create_backup_db_entry(volume_id=volume_id) service = nfs.NFSBackupDriver(self.ctxt) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.delete_backup(backup) def test_get_compressor(self): service = nfs.NFSBackupDriver(self.ctxt) compressor = service._get_compressor('None') self.assertIsNone(compressor) compressor = service._get_compressor('zlib') self.assertEqual(compressor, zlib) self.assertIsInstance(compressor, tpool.Proxy) compressor = service._get_compressor('bz2') self.assertEqual(compressor, bz2) self.assertIsInstance(compressor, tpool.Proxy) compressor = service._get_compressor('zstd') self.assertEqual(zstd, compressor) self.assertIsInstance(compressor, tpool.Proxy) self.assertRaises(ValueError, service._get_compressor, 'fake') def create_buffer(self, size): # Set up buffer of zeroed bytes return bytearray(size) def test_prepare_output_data_effective_compression(self): """Test compression works on a native thread.""" self.thread_original_method = zlib.compress self.mock_object(zlib, 'compress', side_effect=self._store_thread) service = nfs.NFSBackupDriver(self.ctxt) fake_data = self.create_buffer(128) result = service._prepare_output_data(fake_data) self.assertEqual('zlib', result[0]) self.assertGreater(len(fake_data), len(result[1])) self.assertNotEqual(threading.current_thread(), self.thread_dict['thread']) def test_prepare_output_data_no_compresssion(self): self.flags(backup_compression_algorithm='none') service = nfs.NFSBackupDriver(self.ctxt) fake_data = self.create_buffer(128) result = service._prepare_output_data(fake_data) self.assertEqual('none', result[0]) self.assertEqual(fake_data, result[1]) def test_prepare_output_data_ineffective_compression(self): service = nfs.NFSBackupDriver(self.ctxt) fake_data = self.create_buffer(128) # Pre-compress so that compression in the driver will be ineffective. already_compressed_data = service.compressor.compress(fake_data) result = service._prepare_output_data(already_compressed_data) self.assertEqual('none', result[0]) self.assertEqual(already_compressed_data, result[1]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/drivers/test_backup_posix.py0000664000175000017500000003111500000000000025500 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Posix backup driver.""" import builtins import os import shutil import tempfile from unittest import mock import uuid from cinder.backup.drivers import posix from cinder.common import config from cinder import context from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test CONF = config.CONF FAKE_FILE_SIZE = 52428800 FAKE_SHA_BLOCK_SIZE_BYTES = 1024 FAKE_BACKUP_ENABLE_PROGRESS_TIMER = True FAKE_CONTAINER = 'fake/container' FAKE_BACKUP_ID = fake.BACKUP_ID FAKE_BACKUP_ID_PART1 = fake.BACKUP_ID[:2] FAKE_BACKUP_ID_PART2 = fake.BACKUP_ID[2:4] FAKE_BACKUP_ID_REST = fake.BACKUP_ID[4:] FAKE_BACKUP = {'id': FAKE_BACKUP_ID, 'container': None} UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1, FAKE_BACKUP_ID_PART2, FAKE_BACKUP_ID) FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base' FAKE_EXPORT_PATH = 'fake/export/path' FAKE_BACKUP_POSIX_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE, FAKE_EXPORT_PATH) FAKE_PREFIX = 'prefix-' FAKE_CONTAINER_ENTRIES = [FAKE_PREFIX + 'one', FAKE_PREFIX + 'two', 'three'] EXPECTED_CONTAINER_ENTRIES = [FAKE_PREFIX + 'one', FAKE_PREFIX + 'two'] FAKE_OBJECT_NAME = 'fake-object-name' FAKE_OBJECT_PATH = os.path.join(FAKE_BACKUP_POSIX_PATH, FAKE_CONTAINER, FAKE_OBJECT_NAME) class PosixBackupDriverTestCase(test.TestCase): def setUp(self): super(PosixBackupDriverTestCase, self).setUp() self.ctxt = context.get_admin_context() self.override_config('backup_file_size', FAKE_FILE_SIZE) self.override_config('backup_sha_block_size_bytes', FAKE_SHA_BLOCK_SIZE_BYTES) self.override_config('backup_enable_progress_timer', FAKE_BACKUP_ENABLE_PROGRESS_TIMER) self.override_config('backup_posix_path', FAKE_BACKUP_POSIX_PATH) self.mock_object(posix, 'LOG') self.driver = posix.PosixBackupDriver(self.ctxt) def test_init(self): drv = posix.PosixBackupDriver(self.ctxt) self.assertEqual(FAKE_BACKUP_POSIX_PATH, drv.backup_path) def test_update_container_name_container_passed(self): result = self.driver.update_container_name(FAKE_BACKUP, FAKE_CONTAINER) self.assertEqual(FAKE_CONTAINER, result) def test_update_container_na_container_passed(self): result = self.driver.update_container_name(FAKE_BACKUP, None) self.assertEqual(UPDATED_CONTAINER_NAME, result) def test_put_container(self): self.mock_object(os.path, 'exists', return_value=False) self.mock_object(os, 'makedirs') self.mock_object(os, 'chmod') path = os.path.join(self.driver.backup_path, FAKE_CONTAINER) self.driver.put_container(FAKE_CONTAINER) os.path.exists.assert_called_once_with(path) os.makedirs.assert_called_once_with(path) os.chmod.assert_called_once_with(path, 0o770) def test_put_container_already_exists(self): self.mock_object(os.path, 'exists', return_value=True) self.mock_object(os, 'makedirs') self.mock_object(os, 'chmod') path = os.path.join(self.driver.backup_path, FAKE_CONTAINER) self.driver.put_container(FAKE_CONTAINER) os.path.exists.assert_called_once_with(path) self.assertEqual(0, os.makedirs.call_count) self.assertEqual(0, os.chmod.call_count) def test_put_container_exception(self): self.mock_object(os.path, 'exists', return_value=False) self.mock_object(os, 'makedirs', side_effect=OSError) self.mock_object(os, 'chmod') path = os.path.join(self.driver.backup_path, FAKE_CONTAINER) self.assertRaises(OSError, self.driver.put_container, FAKE_CONTAINER) os.path.exists.assert_called_once_with(path) os.makedirs.assert_called_once_with(path) self.assertEqual(0, os.chmod.call_count) def test_get_container_entries(self): self.mock_object(os, 'listdir', return_value=FAKE_CONTAINER_ENTRIES) result = self.driver.get_container_entries(FAKE_CONTAINER, FAKE_PREFIX) self.assertEqual(EXPECTED_CONTAINER_ENTRIES, result) def test_get_container_entries_no_list(self): self.mock_object(os, 'listdir', return_value=[]) result = self.driver.get_container_entries(FAKE_CONTAINER, FAKE_PREFIX) self.assertEqual([], result) def test_get_container_entries_no_match(self): self.mock_object(os, 'listdir', return_value=FAKE_CONTAINER_ENTRIES) result = self.driver.get_container_entries(FAKE_CONTAINER, FAKE_PREFIX + 'garbage') self.assertEqual([], result) def test_get_object_writer(self): self.mock_object(builtins, 'open', mock.mock_open()) self.mock_object(os, 'chmod') self.driver.get_object_writer(FAKE_CONTAINER, FAKE_OBJECT_NAME) os.chmod.assert_called_once_with(FAKE_OBJECT_PATH, 0o660) builtins.open.assert_called_once_with(FAKE_OBJECT_PATH, 'wb') def test_get_object_reader(self): self.mock_object(builtins, 'open', mock.mock_open()) self.driver.get_object_reader(FAKE_CONTAINER, FAKE_OBJECT_NAME) builtins.open.assert_called_once_with(FAKE_OBJECT_PATH, 'rb') def test_delete_object(self): self.mock_object(os, 'remove') self.driver.delete_object(FAKE_CONTAINER, FAKE_OBJECT_NAME) @mock.patch.object(posix.timeutils, 'utcnow') def test_generate_object_name_prefix(self, utcnow_mock): timestamp = '20170518102205' utcnow_mock.return_value.strftime.return_value = timestamp backup = objects.Backup(self.ctxt, volume_id=fake.VOLUME_ID, id=fake.BACKUP_ID) res = self.driver._generate_object_name_prefix(backup) expected = 'volume_%s_%s_backup_%s' % (backup.volume_id, timestamp, backup.id) self.assertEqual(expected, res) class PosixBackupTestWithData(test.TestCase): def _create_volume_db_entry(self, display_name='test_volume', display_description='this is a test volume', status='backing-up', previous_status='available', size=1, host='testhost', encryption_key_id=None, project_id=None): """Create a volume entry in the DB. Return the entry ID """ vol = {} vol['size'] = size vol['host'] = host vol['user_id'] = fake.USER_ID vol['project_id'] = project_id or fake.PROJECT_ID vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = objects.fields.VolumeAttachStatus.DETACHED vol['availability_zone'] = '1' vol['previous_status'] = previous_status vol['encryption_key_id'] = encryption_key_id vol['volume_type_id'] = fake.VOLUME_TYPE_ID volume = objects.Volume(context=self.ctxt, **vol) volume.create() return volume.id def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()), restore_volume_id=None, display_name='test_backup', display_description='this is a test backup', container='volumebackups', status=objects.fields.BackupStatus.CREATING, size=1, object_count=0, project_id=str(uuid.uuid4()), service=None, temp_volume_id=None, temp_snapshot_id=None, snapshot_id=None, metadata=None, parent_id=None, encryption_key_id=None): """Create a backup entry in the DB. Return the entry ID """ kwargs = {} kwargs['volume_id'] = volume_id kwargs['restore_volume_id'] = restore_volume_id kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = project_id kwargs['host'] = 'testhost' kwargs['availability_zone'] = '1' kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['container'] = container kwargs['status'] = status kwargs['fail_reason'] = '' kwargs['service'] = service or CONF.backup_driver kwargs['snapshot_id'] = snapshot_id kwargs['parent_id'] = parent_id kwargs['size'] = size kwargs['object_count'] = object_count kwargs['temp_volume_id'] = temp_volume_id kwargs['temp_snapshot_id'] = temp_snapshot_id kwargs['metadata'] = metadata or {} kwargs['encryption_key_id'] = encryption_key_id backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup def setUp(self): super(PosixBackupTestWithData, self).setUp() self.tempdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.tempdir) backup_path = os.path.join(self.tempdir, "backup-dir") os.mkdir(backup_path) self.ctxt = context.get_admin_context() self.override_config('backup_file_size', FAKE_FILE_SIZE) self.override_config('backup_sha_block_size_bytes', FAKE_SHA_BLOCK_SIZE_BYTES) self.override_config('backup_enable_progress_timer', FAKE_BACKUP_ENABLE_PROGRESS_TIMER) self.override_config('backup_posix_path', backup_path) self.mock_object(posix, 'LOG') self.driver = posix.PosixBackupDriver(self.ctxt) mock_volume_filename = "restore-volume" self.vol_path = os.path.join(self.tempdir, mock_volume_filename) def test_restore_backup_with_sparseness(self): """Test a sparse backup restoration.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) chunk_size = 1024 * 1024 obj_data = b'01234567890123456789' backup = self._create_backup_db_entry( volume_id=vol_id, status=objects.fields.BackupStatus.RESTORING) with tempfile.NamedTemporaryFile() as volume_file: # First, we create a fake volume with a hole. Although we know that # the driver only detects zeroes, we create a real file with a hole # as a way to future-proof this a little. Also, it's easier. # Miraclously, tmpfs supports files with actual holes. volume_file.seek(3 * chunk_size) volume_file.write(obj_data) # And then, we immediately run a backup on the fake volume. # We don't attempt to re-create the backup volume by hand. volume_file.seek(0) self.driver.backup(backup, volume_file) # Next, we restore, excercising the code under test. with open(self.vol_path, 'wb') as volume_file: self.driver.restore(backup, vol_id, volume_file, True) # Finally, we examine the fake volume into which we restored. with open(self.vol_path, 'rb') as volume_file: volume_file.seek(3 * chunk_size) question_data = volume_file.read(len(obj_data)) self.assertEqual(obj_data, question_data) statb = os.stat(self.vol_path) self.assertLess(statb.st_blocks * 512, (3 * chunk_size + 512) / 512) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/drivers/test_backup_s3.py0000664000175000017500000006075200000000000024674 0ustar00zuulzuul00000000000000# Copyright (C) 2020 leafcloud b.v. # Copyright (C) 2020 FUJITSU LIMITED # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Backup s3 code.""" import bz2 import filecmp import hashlib import os import shutil import tempfile import threading from unittest import mock import zlib from eventlet import tpool from moto import mock_aws from oslo_utils import units from cinder.backup.drivers import s3 as s3_dr from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.tests.unit.backup import fake_s3_client from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class FakeMD5(object): def __init__(self, *args, **kwargs): pass @classmethod def digest(cls): return 's3cindermd5'.encode('utf-8') @classmethod def hexdigest(cls): return 's3cindermd5' def s3_client(func): @mock.patch.object(s3_dr.boto3, 'client', fake_s3_client.FakeS3Boto3.Client) @mock.patch.object(hashlib, 'md5', FakeMD5) def func_wrapper(self, *args, **kwargs): return func(self, *args, **kwargs) return func_wrapper def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(reason=_('fake')) def fake_delete(self, backup): raise exception.BackupOperationError() def _fake_delete_object(self, bucket_name, object_name): raise AssertionError('delete_object method should not be called.') class BackupS3TestCase(test.TestCase): """Test Case for s3.""" _DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df' def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID): vol = {'id': volume_id, 'size': 1, 'status': 'available', 'volume_type_id': self.vt['id']} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, volume_id=_DEFAULT_VOLUME_ID, container=s3_dr.CONF.backup_s3_store_bucket, parent_id=None, status=None, service_metadata=None): try: db.volume_get(self.ctxt, volume_id) except exception.NotFound: self._create_volume_db_entry(volume_id=volume_id) kwargs = {'size': 1, 'container': container, 'volume_id': volume_id, 'parent_id': parent_id, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'status': status, 'service_metadata': service_metadata, } backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup def _write_effective_compression_file(self, data_size): """Ensure file contents can be effectively compressed.""" self.volume_file.seek(0) self.volume_file.write(bytes([65] * data_size)) self.volume_file.seek(0) def setUp(self): super(BackupS3TestCase, self).setUp() self.ctxt = context.get_admin_context() self.volume_file = tempfile.NamedTemporaryFile() self.temp_dir = tempfile.mkdtemp() self.addCleanup(self.volume_file.close) # Remove tempdir. self.addCleanup(shutil.rmtree, self.temp_dir) self.size_volume_file = 0 for _i in range(0, 64): self.volume_file.write(os.urandom(units.Ki)) self.size_volume_file += 1024 notify_patcher = mock.patch( 'cinder.volume.volume_utils.notify_about_backup_usage') notify_patcher.start() self.addCleanup(notify_patcher.stop) self.flags(backup_s3_endpoint_url=None) self.flags(backup_s3_store_access_key='s3cinderaccesskey') self.flags(backup_s3_store_secret_key='s3cindersecretkey') self.flags(backup_s3_sse_customer_key='s3aeskey') @mock_aws def test_backup_correctly_configured(self): self.service = s3_dr.S3BackupDriver(self.ctxt) self.assertIsInstance(self.service, s3_dr.S3BackupDriver) @mock_aws def test_backup(self): volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec2' container_name = 'test-bucket' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) result = service.backup(backup, self.volume_file) self.assertIsNone(result) @mock_aws def test_backup_uncompressed(self): volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) @mock_aws def test_backup_bz2(self): volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='bz2') service = s3_dr.S3BackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) service.backup(backup, self.volume_file) @mock_aws def test_backup_zlib(self): volume_id = '5cea0535-b6fb-4531-9a38-000000bea094' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zlib') service = s3_dr.S3BackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) service.backup(backup, self.volume_file) @mock_aws def test_backup_zstd(self): volume_id = '471910a0-a197-4259-9c50-0fc3d6a07dbc' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zstd') service = s3_dr.S3BackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) service.backup(backup, self.volume_file) @mock_aws def test_backup_default_container(self): volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' backup = self._create_backup_db_entry(volume_id=volume_id, container=None) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertEqual('volumebackups', backup.container) @mock_aws def test_backup_custom_container(self): volume_id = '1da9859e-77e5-4731-bd58-000000ca119e' container_name = 'fake99' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertEqual(container_name, backup.container) @mock_aws def test_backup_shafile(self): volume_id = '6465dad4-22af-48f7-8a1a-000000218907' backup = self._create_backup_db_entry(volume_id=volume_id) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) # Verify sha contents content1 = service._read_sha256file(backup) self.assertEqual(64 * units.Ki / content1['chunk_size'], len(content1['sha256s'])) @mock_aws def test_backup_cmp_shafiles(self): volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2' backup = self._create_backup_db_entry(volume_id=volume_id) service1 = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) # Create incremental backup with no change to contents deltabackup = self._create_backup_db_entry(volume_id=volume_id, container=None, parent_id=backup.id) service2 = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service2.backup(deltabackup, self.volume_file) # Compare shas from both files content1 = service1._read_sha256file(backup) content2 = service2._read_sha256file(deltabackup) self.assertEqual(len(content1['sha256s']), len(content2['sha256s'])) self.assertEqual(set(content1['sha256s']), set(content2['sha256s'])) @mock_aws def test_backup_delta_two_objects_change(self): volume_id = '30dab288-265a-4583-9abe-000000d42c67' self.flags(backup_s3_object_size=8 * units.Ki) self.flags(backup_s3_block_size=units.Ki) backup = self._create_backup_db_entry(volume_id=volume_id) service1 = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) # Create incremental backup with no change to contents self.volume_file.seek(2 * 8 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) self.volume_file.seek(4 * 8 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) deltabackup = self._create_backup_db_entry(volume_id=volume_id, container=None, parent_id=backup.id) service2 = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service2.backup(deltabackup, self.volume_file) content1 = service1._read_sha256file(backup) content2 = service2._read_sha256file(deltabackup) # Verify that two shas are changed at index 16 and 32 self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32]) @mock_aws def test_backup_delta_two_blocks_in_object_change(self): volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba' self.flags(backup_s3_object_size=8 * units.Ki) self.flags(backup_s3_block_size=units.Ki) backup = self._create_backup_db_entry(volume_id=volume_id) service1 = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) # Create incremental backup with no change to contents self.volume_file.seek(16 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) self.volume_file.seek(20 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) deltabackup = self._create_backup_db_entry(volume_id=volume_id, container=None, parent_id=backup.id) service2 = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service2.backup(deltabackup, self.volume_file) # Verify that two shas are changed at index 16 and 20 content1 = service1._read_sha256file(backup) content2 = service2._read_sha256file(deltabackup) self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) @mock_aws @mock.patch('cinder.backup.drivers.s3.S3BackupDriver.' '_send_progress_end') @mock.patch('cinder.backup.drivers.s3.S3BackupDriver.' '_send_progress_notification') def test_backup_default_container_notify(self, _send_progress, _send_progress_end): volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a' backup = self._create_backup_db_entry(volume_id=volume_id, container=None) # If the backup_object_number_per_notification is set to 1, # the _send_progress method will be called for sure. s3_dr.CONF.set_override("backup_object_number_per_notification", 1) s3_dr.CONF.set_override("backup_s3_enable_progress_timer", False) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the backup_object_number_per_notification is increased to # another value, the _send_progress method will not be called. _send_progress.reset_mock() _send_progress_end.reset_mock() s3_dr.CONF.set_override("backup_object_number_per_notification", 10) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertFalse(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the timer is enabled, the _send_progress will be called, # since the timer can trigger the progress notification. _send_progress.reset_mock() _send_progress_end.reset_mock() s3_dr.CONF.set_override("backup_object_number_per_notification", 10) s3_dr.CONF.set_override("backup_s3_enable_progress_timer", True) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) @mock_aws @mock.patch.object(s3_dr.S3BackupDriver, '_backup_metadata', fake_backup_metadata) def test_backup_backup_metadata_fail(self): """Test of when an exception occurs in backup(). In backup(), after an exception occurs in self._backup_metadata(), we want to check the process of an exception handler. """ volume_id = '020d9142-339c-4876-a445-000000f1520c' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) # We expect that an exception be notified directly. self.assertRaises(exception.BackupDriverException, service.backup, backup, self.volume_file) @mock_aws @mock.patch.object(s3_dr.S3BackupDriver, '_backup_metadata', fake_backup_metadata) @mock.patch.object(s3_dr.S3BackupDriver, 'delete_backup', fake_delete) def test_backup_backup_metadata_fail2(self): """Test of when an exception occurs in an exception handler. In backup(), after an exception occurs in self._backup_metadata(), we want to check the process when the second exception occurs in self.delete_backup(). """ volume_id = '2164421d-f181-4db7-b9bd-000000eeb628' backup = self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) # We expect that the second exception is notified. self.assertRaises(exception.BackupOperationError, service.backup, backup, self.volume_file) @mock_aws def test_delete(self): volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31' object_prefix = 'test_prefix' backup = self._create_backup_db_entry(volume_id=volume_id, service_metadata=object_prefix) service = s3_dr.S3BackupDriver(self.ctxt) service.delete_backup(backup) @mock_aws @mock.patch.object(s3_dr.S3BackupDriver, 'delete_object', _fake_delete_object) def test_delete_without_object_prefix(self): volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1' backup = self._create_backup_db_entry(volume_id=volume_id) service = s3_dr.S3BackupDriver(self.ctxt) service.delete_backup(backup) @mock_aws def test_get_compressor(self): service = s3_dr.S3BackupDriver(self.ctxt) compressor = service._get_compressor('None') self.assertIsNone(compressor) compressor = service._get_compressor('zlib') self.assertEqual(zlib, compressor) self.assertIsInstance(compressor, tpool.Proxy) compressor = service._get_compressor('bz2') self.assertEqual(bz2, compressor) self.assertIsInstance(compressor, tpool.Proxy) self.assertRaises(ValueError, service._get_compressor, 'fake') @mock_aws def test_prepare_output_data_effective_compression(self): """Test compression works on a native thread.""" # Use dictionary to share data between threads thread_dict = {} original_compress = zlib.compress def my_compress(data): thread_dict['compress'] = threading.current_thread() return original_compress(data) self.mock_object(zlib, 'compress', side_effect=my_compress) service = s3_dr.S3BackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 result = service._prepare_output_data(fake_data) self.assertEqual('zlib', result[0]) self.assertGreater(len(fake_data), len(result[1])) self.assertNotEqual(threading.current_thread(), thread_dict['compress']) @mock_aws def test_prepare_output_data_no_compression(self): self.flags(backup_compression_algorithm='none') service = s3_dr.S3BackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 result = service._prepare_output_data(fake_data) self.assertEqual('none', result[0]) self.assertEqual(fake_data, result[1]) @mock_aws def test_prepare_output_data_ineffective_compression(self): service = s3_dr.S3BackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 # Pre-compress so that compression in the driver will be ineffective. already_compressed_data = service.compressor.compress(fake_data) result = service._prepare_output_data(already_compressed_data) self.assertEqual('none', result[0]) self.assertEqual(already_compressed_data, result[1]) @mock_aws def test_no_config_option(self): # With no config option to connect driver should raise exception. self.flags(backup_s3_endpoint_url=None) self.flags(backup_s3_store_access_key=None) self.flags(backup_s3_store_secret_key=None) self.assertRaises(exception.InvalidConfigurationValue, s3_dr.S3BackupDriver.check_for_setup_error, self) @s3_client def test_create_backup_fail(self): volume_id = 'b09b1ad4-5f0e-4d3f-8b9e-0000004f5ec3' container_name = 's3_api_failure' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) self.assertRaises(s3_dr.S3ClientError, service.backup, backup, self.volume_file) @s3_client def test_create_backup_faili2(self): volume_id = '2a59c20e-0b79-4f57-aa63-5be208df48f6' container_name = 's3_connection_error' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) self.assertRaises(s3_dr.S3ConnectionFailure, service.backup, backup, self.volume_file) @mock_aws def test_restore(self): volume_id = 'c2a81f09-f480-4325-8424-00000071685b' backup = self._create_backup_db_entry( volume_id=volume_id, status=objects.fields.BackupStatus.RESTORING) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) with tempfile.NamedTemporaryFile() as volume_file: service.restore(backup, volume_id, volume_file, False) @mock_aws def test_restore_delta(self): volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e' self.flags(backup_s3_object_size=8 * units.Ki) self.flags(backup_s3_block_size=units.Ki) backup = self._create_backup_db_entry(volume_id=volume_id) service1 = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service1.backup(backup, self.volume_file) # Create incremental backup with no change to contents self.volume_file.seek(16 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) self.volume_file.seek(20 * units.Ki) self.volume_file.write(os.urandom(units.Ki)) deltabackup = self._create_backup_db_entry( volume_id=volume_id, status=objects.fields.BackupStatus.RESTORING, parent_id=backup.id) self.volume_file.seek(0) service2 = s3_dr.S3BackupDriver(self.ctxt) service2.backup(deltabackup, self.volume_file, True) with tempfile.NamedTemporaryFile() as restored_file: service2.restore(deltabackup, volume_id, restored_file, False) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) @s3_client def test_restore_fail(self): volume_id = '651496c7-0d8b-45f3-bfe8-9ef6ad30910f' container_name = 's3_api_failure' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = s3_dr.S3BackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: self.assertRaises(s3_dr.S3ClientError, service.restore, backup, volume_id, volume_file, False) @s3_client def test_restore_faili2(self): volume_id = '87f3f2c2-1a79-48c1-9d98-47c4cab7bf00' container_name = 's3_connection_error' backup = self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = s3_dr.S3BackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: self.assertRaises(s3_dr.S3ConnectionFailure, service.restore, backup, volume_id, volume_file, False) @mock_aws def test_backup_md5_validation(self): volume_id = 'c0a79eb2-ef56-4de2-b3b9-3861fcdf7fad' self.flags(backup_s3_md5_validation=True) backup = self._create_backup_db_entry(volume_id=volume_id) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) @mock_aws def test_backup_sse(self): volume_id = 'c0a79eb2-ef56-4de2-b3b9-3861fcdf7fad' self.flags(backup_s3_sse_customer_algorithm='AES256') self.flags(backup_s3_sse_customer_key='sse_key') backup = self._create_backup_db_entry(volume_id=volume_id) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) @mock_aws def test_restore_sse(self): volume_id = 'c0a79eb2-ef56-4de2-b3b9-3861fcdf7fad' self.flags(backup_s3_sse_customer_algorithm='AES256') self.flags(backup_s3_sse_customer_key='sse_key') backup = self._create_backup_db_entry( volume_id=volume_id, status=objects.fields.BackupStatus.RESTORING) service = s3_dr.S3BackupDriver(self.ctxt) self.volume_file.seek(0) service.backup(backup, self.volume_file) with tempfile.NamedTemporaryFile() as volume_file: service.restore(backup, volume_id, volume_file, False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/drivers/test_backup_swift.py0000664000175000017500000013737000000000000025504 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Backup swift code.""" import bz2 import filecmp import hashlib import os import shutil import tempfile import threading from unittest import mock import zlib import ddt from eventlet import tpool from oslo_config import cfg from swiftclient import client as swift import zstd from cinder.backup import chunkeddriver from cinder.backup.drivers import swift as swift_dr from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import service_auth from cinder.tests.unit.backup import fake_swift_client from cinder.tests.unit.backup import fake_swift_client2 from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test CONF = cfg.CONF ANY = mock.ANY def fake_md5(arg, usedforsecurity=False): class result(object): def hexdigest(self): return 'fake-md5-sum' ret = result() return ret @ddt.ddt class BackupSwiftTestCase(test.TestCase): """Test Case for swift.""" _DEFAULT_VOLUME_ID = 'c7eb81f4-bec6-4730-a60f-8888885874df' def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID): vol = {'id': volume_id, 'size': 1, 'status': 'available', 'volume_type_id': self.vt['id']} return db.volume_create(self.ctxt, vol)['id'] def _create_backup_db_entry(self, volume_id=_DEFAULT_VOLUME_ID, container='test-container', backup_id=fake.BACKUP_ID, parent_id=None, service_metadata=None): try: db.volume_get(self.ctxt, volume_id) except exception.NotFound: self._create_volume_db_entry(volume_id=volume_id) backup = {'id': backup_id, 'size': 1, 'container': container, 'volume_id': volume_id, 'parent_id': parent_id, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'service_metadata': service_metadata, } return db.backup_create(self.ctxt, backup)['id'] def _write_effective_compression_file(self, data_size): """Ensure file contents can be effectively compressed.""" self.volume_file.seek(0) self.volume_file.write(bytes([65] * data_size)) self.volume_file.seek(0) def setUp(self): super(BackupSwiftTestCase, self).setUp() service_catalog = [{u'type': u'object-store', u'name': u'swift', u'endpoints': [{ u'publicURL': u'http://example.com'}]}, {u'type': u'identity', u'name': u'keystone', u'endpoints': [{ u'publicURL': u'http://example.com'}]}] self.ctxt = context.RequestContext(user_id=fake.USER_ID, is_admin=True, service_catalog=service_catalog) self.mock_object(swift, 'Connection', fake_swift_client.FakeSwiftClient.Connection) self.mock_object(hashlib, 'md5', fake_md5) self.volume_file = tempfile.NamedTemporaryFile() self.temp_dir = tempfile.mkdtemp() self.addCleanup(self.volume_file.close) # Remove tempdir. self.addCleanup(shutil.rmtree, self.temp_dir) self.size_volume_file = 0 for _i in range(0, 64): self.volume_file.write(os.urandom(1024)) self.size_volume_file += 1024 notify_patcher = mock.patch( 'cinder.volume.volume_utils.notify_about_backup_usage') notify_patcher.start() self.addCleanup(notify_patcher.stop) def test_backup_swift_url(self): self.ctxt.service_catalog = [{u'type': u'object-store', u'name': u'swift', u'endpoints': [{ u'adminURL': u'http://example.com'}]}, {u'type': u'identity', u'name': u'keystone', u'endpoints': [{ u'publicURL': u'http://example.com'}]}] self.assertRaises(exception.BackupDriverException, swift_dr.SwiftBackupDriver, self.ctxt) def test_backup_swift_auth_url(self): self.ctxt.service_catalog = [{u'type': u'object-store', u'name': u'swift', u'endpoints': [{ u'publicURL': u'http://example.com'}]}, {u'type': u'identity', u'name': u'keystone', u'endpoints': [{ u'adminURL': u'http://example.com'}]}] self.override_config("backup_swift_auth", "single_user") self.override_config("backup_swift_user", "fake_user") self.assertRaises(exception.BackupDriverException, swift_dr.SwiftBackupDriver, self.ctxt) def test_backup_swift_url_conf(self): self.ctxt.service_catalog = [{u'type': u'object-store', u'name': u'swift', u'endpoints': [{ u'adminURL': u'http://example.com'}]}, {u'type': u'identity', u'name': u'keystone', u'endpoints': [{ u'publicURL': u'http://example.com'}]}] self.ctxt.project_id = fake.PROJECT_ID self.override_config("backup_swift_url", "http://public.example.com/") backup = swift_dr.SwiftBackupDriver(self.ctxt) self.assertEqual("%s%s" % (CONF.backup_swift_url, self.ctxt.project_id), backup.swift_url) def test_backup_swift_url_conf_nocatalog(self): self.ctxt.service_catalog = [] self.ctxt.project_id = fake.PROJECT_ID self.override_config("backup_swift_url", "http://public.example.com/") backup = swift_dr.SwiftBackupDriver(self.ctxt) self.assertEqual("%s%s" % (CONF.backup_swift_url, self.ctxt.project_id), backup.swift_url) def test_backup_swift_auth_url_conf(self): self.ctxt.service_catalog = [{u'type': u'object-store', u'name': u'swift', u'endpoints': [{ u'publicURL': u'http://example.com'}]}, {u'type': u'identity', u'name': u'keystone', u'endpoints': [{ u'adminURL': u'http://example.com'}]}] self.ctxt.project_id = fake.PROJECT_ID self.override_config("backup_swift_auth_url", "http://public.example.com") self.override_config("backup_swift_auth", "single_user") self.override_config("backup_swift_user", "fake_user") backup = swift_dr.SwiftBackupDriver(self.ctxt) self.assertEqual(CONF.backup_swift_auth_url, backup.auth_url) def test_backup_swift_info(self): self.override_config("swift_catalog_info", "dummy") self.assertRaises(exception.BackupDriverException, swift_dr.SwiftBackupDriver, self.ctxt) @ddt.data( {'auth': 'single_user', 'insecure': True}, {'auth': 'single_user', 'insecure': False}, {'auth': 'per_user', 'insecure': True}, {'auth': 'per_user', 'insecure': False}, ) @ddt.unpack def test_backup_swift_auth_insecure(self, auth, insecure): self.override_config("backup_swift_auth_insecure", insecure) self.override_config('backup_swift_auth', auth) if auth == 'single_user': self.override_config('backup_swift_user', 'swift-user') mock_connection = self.mock_object(swift, 'Connection') swift_dr.SwiftBackupDriver(self.ctxt) if auth == 'single_user': mock_connection.assert_called_once_with(insecure=insecure, authurl=ANY, auth_version=ANY, tenant_name=ANY, user=ANY, key=ANY, os_options={}, retries=ANY, starting_backoff=ANY, cacert=ANY) else: mock_connection.assert_called_once_with(insecure=insecure, retries=ANY, preauthurl=ANY, preauthtoken=ANY, starting_backoff=ANY, cacert=ANY) @ddt.data( {'auth_version': '3', 'user_domain': 'UserDomain', 'project': 'Project', 'project_domain': 'ProjectDomain'}, {'auth_version': '3', 'user_domain': None, 'project': 'Project', 'project_domain': 'ProjectDomain'}, {'auth_version': '3', 'user_domain': 'UserDomain', 'project': None, 'project_domain': 'ProjectDomain'}, {'auth_version': '3', 'user_domain': 'UserDomain', 'project': 'Project', 'project_domain': None}, {'auth_version': '3', 'user_domain': None, 'project': None, 'project_domain': None}, ) @ddt.unpack def test_backup_swift_auth_v3_single_user(self, auth_version, user_domain, project, project_domain): self.override_config('backup_swift_auth', 'single_user') self.override_config('backup_swift_user', 'swift-user') self.override_config('backup_swift_auth_version', auth_version) self.override_config('backup_swift_user_domain', user_domain) self.override_config('backup_swift_project', project) self.override_config('backup_swift_project_domain', project_domain) os_options = {} if user_domain is not None: os_options['user_domain_name'] = user_domain if project is not None: os_options['project_name'] = project if project_domain is not None: os_options['project_domain_name'] = project_domain mock_connection = self.mock_object(swift, 'Connection') swift_dr.SwiftBackupDriver(self.ctxt) mock_connection.assert_called_once_with(insecure=ANY, authurl=ANY, auth_version=auth_version, tenant_name=ANY, user=ANY, key=ANY, os_options=os_options, retries=ANY, starting_backoff=ANY, cacert=ANY) def _test_backup_swift_service_auth_headers_no_impact(self): service = swift_dr.SwiftBackupDriver(self.ctxt) self.assertIsNone(service._headers()) current = {'some': 'header'} self.assertEqual(service._headers(current), current) def test_backup_swift_service_auth_headers_disabled(self): self._test_backup_swift_service_auth_headers_no_impact() def test_backup_swift_service_auth_headers_partial_enabled(self): self.override_config('send_service_user_token', True, group='service_user') self._test_backup_swift_service_auth_headers_no_impact() @mock.patch.object(service_auth, 'get_service_auth_plugin') def test_backup_swift_service_auth_headers_enabled(self, mock_plugin): class FakeServiceAuthPlugin: def get_token(self, session): return "fake" self.override_config('send_service_user_token', True, group='service_user') self.override_config('backup_swift_service_auth', True) mock_plugin.return_value = FakeServiceAuthPlugin() service = swift_dr.SwiftBackupDriver(self.ctxt) expected = {'X-Service-Token': 'fake'} self.assertEqual(service._headers(), expected) expected = {'X-Service-Token': 'fake', 'some': 'header'} self.assertEqual(service._headers({'some': 'header'}), expected) @mock.patch.object(fake_swift_client.FakeSwiftConnection, 'put_container') def test_default_backup_swift_create_storage_policy(self, mock_put): service = swift_dr.SwiftBackupDriver(self.ctxt) service.put_container('missing_container') mock_put.assert_called_once_with('missing_container', headers=None) @mock.patch.object(fake_swift_client.FakeSwiftConnection, 'put_container') def test_backup_swift_create_storage_policy(self, mock_put): self.override_config('backup_swift_create_storage_policy', 'mypolicy') service = swift_dr.SwiftBackupDriver(self.ctxt) service.put_container('missing_container') mock_put.assert_called_once_with( 'missing_container', headers={'X-Storage-Policy': 'mypolicy'} ) def test_default_backup_swift_create_storage_policy_put_socket_error(self): service = swift_dr.SwiftBackupDriver(self.ctxt) self.assertRaises(exception.SwiftConnectionFailed, service.put_container, 'missing_container_socket_error_on_put') def test_default_backup_swift_create_storage_policy_head_error(self): service = swift_dr.SwiftBackupDriver(self.ctxt) self.assertRaises(exception.SwiftConnectionFailed, service.put_container, 'unauthorized_container') def test_backup_swift_create_storage_policy_head_error(self): self.override_config('backup_swift_create_storage_policy', 'mypolicy') service = swift_dr.SwiftBackupDriver(self.ctxt) self.assertRaises(exception.SwiftConnectionFailed, service.put_container, 'unauthorized_container') def test_default_backup_swift_create_storage_policy_head_sockerr(self): service = swift_dr.SwiftBackupDriver(self.ctxt) self.assertRaises(exception.SwiftConnectionFailed, service.put_container, 'socket_error_on_head') def test_backup_swift_create_storage_policy_head_socket_error(self): self.override_config('backup_swift_create_storage_policy', 'mypolicy') service = swift_dr.SwiftBackupDriver(self.ctxt) self.assertRaises(exception.SwiftConnectionFailed, service.put_container, 'socket_error_on_head') def test_backup_uncompressed(self): volume_id = '2b9f10a3-42b4-4fdf-b316-000000ceb039' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) def test_backup_uncompressed_casing(self): volume_id = '2b9f10a3-42b4-dead-b316-000000ceb039' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='None') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) def test_backup_bz2(self): volume_id = 'dc0fee35-b44e-4f13-80d6-000000e1b50c' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='bz2') service = swift_dr.SwiftBackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) def test_backup_zlib(self): volume_id = '5cea0535-b6fb-4531-9a38-000000bea094' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zlib') service = swift_dr.SwiftBackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) def test_backup_zstd(self): volume_id = '471910a0-a197-4259-9c50-0fc3d6a07dbc' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='zstd') service = swift_dr.SwiftBackupDriver(self.ctxt) self._write_effective_compression_file(self.size_volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) @mock.patch.object(db, 'backup_update', wraps=db.backup_update) def test_backup_default_container(self, backup_update_mock): volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' self._create_backup_db_entry(volume_id=volume_id, container=None) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual('volumebackups', backup['container']) self.assertEqual(3, backup_update_mock.call_count) @mock.patch.object(db, 'backup_update', wraps=db.backup_update) def test_backup_db_container(self, backup_update_mock): volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' self._create_backup_db_entry(volume_id=volume_id, container='existing_name') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual('existing_name', backup['container']) # Make sure we are not making a DB update when we are using the same # value that's already in the DB. self.assertEqual(2, backup_update_mock.call_count) @mock.patch.object(db, 'backup_update', wraps=db.backup_update) def test_backup_driver_container(self, backup_update_mock): volume_id = '9552017f-c8b9-4e4e-a876-00000053349c' self._create_backup_db_entry(volume_id=volume_id, container=None) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) with mock.patch.object(service, 'update_container_name', return_value='driver_name'): service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual('driver_name', backup['container']) self.assertEqual(3, backup_update_mock.call_count) @mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.' '_send_progress_end') @mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.' '_send_progress_notification') def test_backup_default_container_notify(self, _send_progress, _send_progress_end): volume_id = '87dd0eed-2598-4ebd-8ebb-000000ac578a' self._create_backup_db_entry(volume_id=volume_id, container=None) # If the backup_object_number_per_notification is set to 1, # the _send_progress method will be called for sure. CONF.set_override("backup_object_number_per_notification", 1) CONF.set_override("backup_swift_enable_progress_timer", False) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the backup_object_number_per_notification is increased to # another value, the _send_progress method will not be called. _send_progress.reset_mock() _send_progress_end.reset_mock() CONF.set_override("backup_object_number_per_notification", 10) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) self.assertFalse(_send_progress.called) self.assertTrue(_send_progress_end.called) # If the timer is enabled, the _send_progress will be called, # since the timer can trigger the progress notification. _send_progress.reset_mock() _send_progress_end.reset_mock() CONF.set_override("backup_object_number_per_notification", 10) CONF.set_override("backup_swift_enable_progress_timer", True) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) self.assertTrue(_send_progress.called) self.assertTrue(_send_progress_end.called) def test_backup_custom_container(self): volume_id = '1da9859e-77e5-4731-bd58-000000ca119e' container_name = 'fake99' self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual(container_name, backup['container']) def test_backup_shafile(self): volume_id = '6465dad4-22af-48f7-8a1a-000000218907' def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix self.mock_object(swift_dr.SwiftBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name) self.mock_object(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual(container_name, backup['container']) # Verify sha contents content1 = service._read_sha256file(backup) self.assertEqual(64 * 1024 / content1['chunk_size'], len(content1['sha256s'])) def test_backup_cmp_shafiles(self): volume_id = '1a99ac67-c534-4fe3-b472-0000001785e2' def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix self.mock_object(swift_dr.SwiftBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) self.mock_object(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual(container_name, backup['container']) # Create incremental backup with no change to contents self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) self.mock_object(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) self.assertEqual(container_name, deltabackup['container']) # Compare shas from both files content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) self.assertEqual(len(content1['sha256s']), len(content2['sha256s'])) self.assertEqual(set(content1['sha256s']), set(content2['sha256s'])) def test_backup_delta_two_objects_change(self): volume_id = '30dab288-265a-4583-9abe-000000d42c67' def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix self.mock_object(swift_dr.SwiftBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_swift_object_size=8 * 1024) self.flags(backup_swift_block_size=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) self.mock_object(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual(container_name, backup['container']) # Create incremental backup with no change to contents self.volume_file.seek(2 * 8 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(4 * 8 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) self.mock_object(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) self.assertEqual(container_name, deltabackup['container']) content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) # Verify that two shas are changed at index 16 and 32 self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][32], content2['sha256s'][32]) def test_backup_delta_two_blocks_in_object_change(self): volume_id = 'b943e84f-aa67-4331-9ab2-000000cf19ba' def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix self.mock_object(swift_dr.SwiftBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_swift_object_size=8 * 1024) self.flags(backup_swift_block_size=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) self.mock_object(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertEqual(container_name, backup['container']) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) self.mock_object(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) service.backup(deltabackup, self.volume_file) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) self.assertEqual(container_name, deltabackup['container']) # Verify that two shas are changed at index 16 and 20 content1 = service._read_sha256file(backup) content2 = service._read_sha256file(deltabackup) self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16]) self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20]) def test_create_backup_put_object_wraps_socket_error(self): volume_id = 'c09b1ad4-5f0e-4d3f-8b9e-0000004caec8' container_name = 'socket_error_on_put' self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertRaises(exception.SwiftConnectionFailed, service.backup, backup, self.volume_file) def test_backup_backup_metadata_fail(self): """Test of when an exception occurs in backup(). In backup(), after an exception occurs in self._backup_metadata(), we want to check the process of an exception handler. """ volume_id = '020d9142-339c-4876-a445-000000f1520c' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(reason=_('fake')) # Raise a pseudo exception.BackupDriverException. self.mock_object(swift_dr.SwiftBackupDriver, '_backup_metadata', fake_backup_metadata) # We expect that an exception be notified directly. self.assertRaises(exception.BackupDriverException, service.backup, backup, self.volume_file) def test_backup_backup_metadata_fail2(self): """Test of when an exception occurs in an exception handler. In backup(), after an exception occurs in self._backup_metadata(), we want to check the process when the second exception occurs in self.delete_backup(). """ volume_id = '2164421d-f181-4db7-b9bd-000000eeb628' self._create_backup_db_entry(volume_id=volume_id) self.flags(backup_compression_algorithm='none') service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) def fake_backup_metadata(self, backup, object_meta): raise exception.BackupDriverException(reason=_('fake')) # Raise a pseudo exception.BackupDriverException. self.mock_object(swift_dr.SwiftBackupDriver, '_backup_metadata', fake_backup_metadata) def fake_delete(self, backup): raise exception.BackupOperationError() # Raise a pseudo exception.BackupOperationError. self.mock_object(swift_dr.SwiftBackupDriver, 'delete_backup', fake_delete) # We expect that the second exception is notified. self.assertRaises(exception.BackupOperationError, service.backup, backup, self.volume_file) def test_restore(self): volume_id = 'c2a81f09-f480-4325-8424-00000071685b' self._create_backup_db_entry(volume_id=volume_id) service = swift_dr.SwiftBackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) backup.status = objects.fields.BackupStatus.RESTORING backup.save() service.restore(backup, volume_id, volume_file, False) def test_restore_delta(self): volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e' def _fake_generate_object_name_prefix(self, backup): az = 'az_fake' backup_name = '%s_backup_%s' % (az, backup['id']) volume = 'volume_%s' % (backup['volume_id']) prefix = volume + '_' + backup_name return prefix self.mock_object(swift_dr.SwiftBackupDriver, '_generate_object_name_prefix', _fake_generate_object_name_prefix) self.flags(backup_swift_object_size=8 * 1024) self.flags(backup_swift_block_size=1024) container_name = self.temp_dir.replace(tempfile.gettempdir() + '/', '', 1) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP_ID) self.mock_object(swift, 'Connection', fake_swift_client2.FakeSwiftClient2.Connection) service = swift_dr.SwiftBackupDriver(self.ctxt) self.volume_file.seek(0) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.backup(backup, self.volume_file) # Create incremental backup with no change to contents self.volume_file.seek(16 * 1024) self.volume_file.write(os.urandom(1024)) self.volume_file.seek(20 * 1024) self.volume_file.write(os.urandom(1024)) self._create_backup_db_entry(volume_id=volume_id, container=container_name, backup_id=fake.BACKUP2_ID, parent_id=fake.BACKUP_ID) self.volume_file.seek(0) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) service.backup(deltabackup, self.volume_file, True) deltabackup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) with tempfile.NamedTemporaryFile() as restored_file: backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID) backup.status = objects.fields.BackupStatus.RESTORING backup.save() service.restore(backup, volume_id, restored_file, False) self.assertTrue(filecmp.cmp(self.volume_file.name, restored_file.name)) def test_restore_wraps_socket_error(self): volume_id = 'c1160de7-2774-4f20-bf14-0000001ac139' container_name = 'socket_error_on_get' self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = swift_dr.SwiftBackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertRaises(exception.SwiftConnectionFailed, service.restore, backup, volume_id, volume_file, False) def test_restore_unsupported_version(self): volume_id = '390db8c1-32d3-42ca-82c9-00000010c703' container_name = 'unsupported_version' self._create_backup_db_entry(volume_id=volume_id, container=container_name) service = swift_dr.SwiftBackupDriver(self.ctxt) with tempfile.NamedTemporaryFile() as volume_file: backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertRaises(exception.InvalidBackup, service.restore, backup, volume_id, volume_file, False) def test_delete(self): volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31' object_prefix = 'test_prefix' self._create_backup_db_entry(volume_id=volume_id, service_metadata=object_prefix) service = swift_dr.SwiftBackupDriver(self.ctxt) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.delete_backup(backup) def test_delete_not_found(self): volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31' container_name = 'not_found_on_delete' object_prefix = 'test_prefix' self._create_backup_db_entry(volume_id=volume_id, container=container_name, service_metadata=object_prefix) service = swift_dr.SwiftBackupDriver(self.ctxt) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.delete_backup(backup) def test_delete_wraps_socket_error(self): volume_id = 'f74cb6fa-2900-40df-87ac-0000000f72ea' container_name = 'socket_error_on_delete' object_prefix = 'test_prefix' self._create_backup_db_entry(volume_id=volume_id, container=container_name, service_metadata=object_prefix) service = swift_dr.SwiftBackupDriver(self.ctxt) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) self.assertRaises(exception.SwiftConnectionFailed, service.delete_backup, backup) def test_delete_without_object_prefix(self): volume_id = 'ee30d649-72a6-49a5-b78d-000000edb6b1' def _fake_delete_object(self, container, object_name): raise AssertionError('delete_object method should not be called.') self.mock_object(swift_dr.SwiftBackupDriver, 'delete_object', _fake_delete_object) self._create_backup_db_entry(volume_id=volume_id) service = swift_dr.SwiftBackupDriver(self.ctxt) backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID) service.delete_backup(backup) def test_get_compressor(self): service = swift_dr.SwiftBackupDriver(self.ctxt) compressor = service._get_compressor('None') self.assertIsNone(compressor) compressor = service._get_compressor('zlib') self.assertEqual(zlib, compressor) self.assertIsInstance(compressor, tpool.Proxy) compressor = service._get_compressor('bz2') self.assertEqual(bz2, compressor) self.assertIsInstance(compressor, tpool.Proxy) compressor = service._get_compressor('zstd') self.assertEqual(zstd, compressor) self.assertIsInstance(compressor, tpool.Proxy) self.assertRaises(ValueError, service._get_compressor, 'fake') def test_prepare_output_data_effective_compression(self): """Test compression works on a native thread.""" # Use dictionary to share data between threads thread_dict = {} original_compress = zlib.compress def my_compress(data): thread_dict['compress'] = threading.current_thread() return original_compress(data) self.mock_object(zlib, 'compress', side_effect=my_compress) service = swift_dr.SwiftBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 result = service._prepare_output_data(fake_data) self.assertEqual('zlib', result[0]) self.assertGreater(len(fake_data), len(result[1])) self.assertNotEqual(threading.current_thread(), thread_dict['compress']) def test_prepare_output_data_no_compresssion(self): self.flags(backup_compression_algorithm='none') service = swift_dr.SwiftBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 result = service._prepare_output_data(fake_data) self.assertEqual('none', result[0]) self.assertEqual(fake_data, result[1]) def test_prepare_output_data_ineffective_compression(self): service = swift_dr.SwiftBackupDriver(self.ctxt) # Set up buffer of 128 zeroed bytes fake_data = b'\0' * 128 # Pre-compress so that compression in the driver will be ineffective. already_compressed_data = service.compressor.compress(fake_data) result = service._prepare_output_data(already_compressed_data) self.assertEqual('none', result[0]) self.assertEqual(already_compressed_data, result[1]) @mock.patch('cinder.backup.drivers.swift.SwiftBackupDriver.initialize') def test_no_user_context(self, mock_initialize): # With no user_id the driver should not initialize itself. admin_context = context.get_admin_context() swift_dr.SwiftBackupDriver(admin_context) mock_initialize.assert_not_called() class WindowsBackupSwiftTestCase(BackupSwiftTestCase): # We're running all the parent class tests, while doing # some patching in order to simulate Windows behavior. def setUp(self): self._mock_utilsfactory = mock.Mock() platform_patcher = mock.patch('sys.platform', 'win32') platform_patcher.start() self.addCleanup(platform_patcher.stop) super(WindowsBackupSwiftTestCase, self).setUp() read = self.volume_file.read def win32_read(sz): # We're simulating the Windows behavior. if self.volume_file.tell() > fake_get_size(): raise IOError() return read(sz) read_patcher = mock.patch.object( self.volume_file, 'read', win32_read) read_patcher.start() self.addCleanup(read_patcher.stop) def fake_get_size(*args, **kwargs): pos = self.volume_file.tell() sz = self.volume_file.seek(0, 2) self.volume_file.seek(pos) return sz self._disk_size_getter_mocker = mock.patch.object( swift_dr.SwiftBackupDriver, '_get_win32_phys_disk_size', fake_get_size) self._disk_size_getter_mocker.start() self.addCleanup(self._disk_size_getter_mocker.stop) def test_invalid_chunk_size(self): self.flags(backup_swift_object_size=1000) # We expect multiples of 4096 self.assertRaises(exception.InvalidConfigurationValue, swift_dr.SwiftBackupDriver, self.ctxt) @mock.patch.object(chunkeddriver, 'os_win_utilsfactory', create=True) def test_get_phys_disk_size(self, mock_utilsfactory): # We're patching this method in setUp, so we need to # retrieve the original one. Note that we'll get an unbound # method. service = swift_dr.SwiftBackupDriver(self.ctxt) get_disk_size = self._disk_size_getter_mocker.temp_original disk_utils = mock_utilsfactory.get_diskutils.return_value disk_utils.get_device_number_from_device_name.return_value = ( mock.sentinel.dev_num) disk_utils.get_disk_size.return_value = mock.sentinel.disk_size disk_size = get_disk_size(service, mock.sentinel.disk_path) self.assertEqual(mock.sentinel.disk_size, disk_size) disk_utils.get_device_number_from_device_name.assert_called_once_with( mock.sentinel.disk_path) disk_utils.get_disk_size.assert_called_once_with( mock.sentinel.dev_num) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/fake_backup.py0000664000175000017500000000411500000000000022527 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.objects import fields as c_fields from cinder.tests.unit import fake_constants as fake def fake_db_backup(**updates): db_backup = { 'id': fake.BACKUP_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'volume_id': fake.VOLUME_ID, 'status': c_fields.BackupStatus.CREATING, 'host': 'fake_host', 'display_name': 'fake_name', 'size': 5, 'display_description': 'fake_description', 'service_metadata': 'fake_metadata', 'service': 'fake_service', 'object_count': 5, 'num_dependent_backups': 0, 'backup_metadata': [ {'key': 'key1', 'value': 'value1'}, {'key': 'key2', 'value': 'value2'} ], } for name, field in objects.Backup.fields.items(): if name in db_backup: continue if field.nullable: db_backup[name] = None elif field.default != fields.UnspecifiedDefault: db_backup[name] = field.default else: raise Exception('fake_db_backup needs help with %s' % name) if updates: db_backup.update(updates) return db_backup def fake_backup_obj(context, **updates): return objects.Backup._from_db_object(context, objects.Backup(), fake_db_backup(**updates), expected_attrs=['metadata']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/fake_google_client.py0000664000175000017500000001100700000000000024072 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (C) 2016 Vedams Inc. # Copyright (C) 2016 Google Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import zlib from google.auth import exceptions as gexceptions from googleapiclient import errors from oslo_utils import units class FakeGoogleObjectInsertExecute(object): def __init__(self, *args, **kwargs): self.container_name = kwargs['bucket'] def execute(self, *args, **kwargs): if self.container_name == 'gcs_api_failure': raise errors.Error return {u'md5Hash': u'Z2NzY2luZGVybWQ1'} class FakeGoogleObjectListExecute(object): def __init__(self, *args, **kwargs): self.container_name = kwargs['bucket'] def execute(self, *args, **kwargs): if self.container_name == 'gcs_connection_failure': raise Exception return {'items': [{'name': 'backup_001'}, {'name': 'backup_002'}, {'name': 'backup_003'}]} class FakeGoogleBucketListExecute(object): def __init__(self, *args, **kwargs): self.container_name = kwargs['prefix'] def execute(self, *args, **kwargs): if self.container_name == 'gcs_oauth2_failure': raise gexceptions.DefaultCredentialsError return {u'items': [{u'name': u'gcscinderbucket'}, {u'name': u'gcsbucket'}]} class FakeGoogleBucketInsertExecute(object): def execute(self, *args, **kwargs): pass class FakeMediaObject(object): def __init__(self, *args, **kwargs): self.bucket_name = kwargs['bucket'] self.object_name = kwargs['object'] class FakeGoogleObject(object): def insert(self, *args, **kwargs): return FakeGoogleObjectInsertExecute(*args, **kwargs) def get_media(self, *args, **kwargs): return FakeMediaObject(*args, **kwargs) def list(self, *args, **kwargs): return FakeGoogleObjectListExecute(*args, **kwargs) class FakeGoogleBucket(object): def list(self, *args, **kwargs): return FakeGoogleBucketListExecute(*args, **kwargs) def insert(self, *args, **kwargs): return FakeGoogleBucketInsertExecute() class FakeGoogleDiscovery(object): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): pass @classmethod def Build(cls, *args, **kargs): return FakeDiscoveryBuild() class FakeDiscoveryBuild(object): """Logging calls instead of executing.""" def __init__(self, *args, **kwargs): pass def objects(self): return FakeGoogleObject() def buckets(self): return FakeGoogleBucket() class FakeGoogleCredentials(object): def __init__(self, *args, **kwargs): pass @classmethod def from_stream(cls, *args, **kwargs): pass class FakeGoogleMediaIoBaseDownload(object): def __init__(self, fh, req, chunksize=None): if 'metadata' in req.object_name: metadata = {} metadata['version'] = '1.0.0' metadata['backup_id'] = 123 metadata['volume_id'] = 123 metadata['backup_name'] = 'fake backup' metadata['backup_description'] = 'fake backup description' metadata['created_at'] = '2016-01-09 11:20:54,805' metadata['objects'] = [{ 'backup_001': {'compression': 'zlib', 'length': 10, 'offset': 0}, 'backup_002': {'compression': 'zlib', 'length': 10, 'offset': 10}, 'backup_003': {'compression': 'zlib', 'length': 10, 'offset': 20} }] metadata_json = json.dumps(metadata, sort_keys=True, indent=2) metadata_json = metadata_json.encode('utf-8') fh.write(metadata_json) else: fh.write(zlib.compress(os.urandom(units.Mi))) def next_chunk(self, **kwargs): return (100, True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/fake_google_client2.py0000664000175000017500000000722200000000000024160 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (C) 2016 Vedams Inc. # Copyright (C) 2016 Google Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile class FakeGoogleObjectInsertExecute(object): def execute(self, *args, **kwargs): return {u'md5Hash': u'Z2NzY2luZGVybWQ1'} class FakeGoogleObjectListExecute(object): def __init__(self, *args, **kwargs): self.bucket_name = kwargs['bucket'] self.prefix = kwargs['prefix'] def execute(self, *args, **kwargs): bucket_dir = tempfile.gettempdir() + '/' + self.bucket_name fake_body = [] for f in os.listdir(bucket_dir): try: f.index(self.prefix) fake_body.append({'name': f}) except Exception: pass return {'items': fake_body} class FakeGoogleBucketListExecute(object): def execute(self, *args, **kwargs): return {u'items': [{u'name': u'gcscinderbucket'}, {u'name': u'gcsbucket'}]} class FakeGoogleBucketInsertExecute(object): def execute(self, *args, **kwargs): pass class FakeMediaObject(object): def __init__(self, *args, **kwargs): self.bucket_name = kwargs['bucket'] self.object_name = kwargs['object'] class FakeGoogleObject(object): def insert(self, *args, **kwargs): object_path = (tempfile.gettempdir() + '/' + kwargs['bucket'] + '/' + kwargs['name']) kwargs['media_body']._fd.getvalue() with open(object_path, 'wb') as object_file: kwargs['media_body']._fd.seek(0) object_file.write(kwargs['media_body']._fd.read()) return FakeGoogleObjectInsertExecute() def get_media(self, *args, **kwargs): return FakeMediaObject(*args, **kwargs) def list(self, *args, **kwargs): return FakeGoogleObjectListExecute(*args, **kwargs) class FakeGoogleBucket(object): def list(self, *args, **kwargs): return FakeGoogleBucketListExecute() def insert(self, *args, **kwargs): return FakeGoogleBucketInsertExecute() class FakeGoogleDiscovery(object): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): pass @classmethod def Build(cls, *args, **kargs): return FakeDiscoveryBuild() class FakeDiscoveryBuild(object): """Logging calls instead of executing.""" def __init__(self, *args, **kwargs): pass def objects(self): return FakeGoogleObject() def buckets(self): return FakeGoogleBucket() class FakeGoogleCredentials(object): def __init__(self, *args, **kwargs): pass @classmethod def from_stream(cls, *args, **kwargs): pass class FakeGoogleMediaIoBaseDownload(object): def __init__(self, fh, req, chunksize=None): object_path = (tempfile.gettempdir() + '/' + req.bucket_name + '/' + req.object_name) with open(object_path, 'rb') as object_file: fh.write(object_file.read()) def next_chunk(self, **kwargs): return (100, True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/fake_s3_client.py0000664000175000017500000000444700000000000023155 0ustar00zuulzuul00000000000000# Copyright (C) 2020 leafcloud b.v. # Copyright (C) 2020 FUJITSU LIMITED # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from botocore.exceptions import ClientError from botocore.exceptions import ConnectionError class FakeS3Boto3(object): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): pass @classmethod def Client(cls, *args, **kargs): return FakeBoto3Client() class FakeBoto3Client(object): """Logging calls instead of executing.""" def __init__(self, *args, **kwargs): pass def list_objects(self, *args, **kwargs): return {u'Contents': [{u'Key': u'backup_001'}, {u'Key': u'backup_002'}, {u'Key': u'backup_003'}]} def list_buckets(self, *args, **kwargs): return {u'Buckets': [{u'Name': u's3cinderbucket'}, {u'Name': u's3bucket'}]} def head_bucket(self, *args, **kwargs): pass def get_object(self, Bucket, *args, **kwargs): if Bucket == 's3_api_failure': raise ClientError( error_response={ 'Error': {'Code': 'MyCode', 'Message': 'MyMessage'}}, operation_name='myoperation') if Bucket == 's3_connection_error': raise ConnectionError(error='MyMessage') def create_bucket(self, *args, **kwargs): pass def put_object(self, Bucket, *args, **kwargs): if Bucket == 's3_api_failure': raise ClientError( error_response={ 'Error': {'Code': 'MyCode', 'Message': 'MyMessage'}}, operation_name='myoperation') if Bucket == 's3_connection_error': raise ConnectionError(error='MyMessage') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/fake_service.py0000664000175000017500000000231600000000000022723 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.backup import driver class FakeBackupService(driver.BackupDriver): def __init__(self, context): super().__init__(context) def backup(self, backup, volume_file): pass def restore(self, backup, volume_id, volume_file, volume_is_new): pass def delete_backup(self, backup): # if backup has magic name of 'fail_on_delete' # we raise an error - useful for some tests - # otherwise we return without error if backup['display_name'] == 'fail_on_delete': raise IOError('fake') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/fake_swift_client.py0000664000175000017500000001055000000000000023754 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client import json import os import socket import zlib from swiftclient import client as swift from swiftclient import exceptions as swift_exc class FakeSwiftClient(object): """Logs calls instead of executing.""" def __init__(self, *args, **kwargs): pass @classmethod def Connection(cls, *args, **kargs): return FakeSwiftConnection() class FakeSwiftConnection(object): """Logging calls instead of executing.""" def __init__(self, *args, **kwargs): pass def head_container(self, container, headers=None): if container in ['missing_container', 'missing_container_socket_error_on_put']: raise swift.ClientException('fake exception', http_status=http_client.NOT_FOUND) elif container == 'unauthorized_container': raise swift.ClientException('fake exception', http_status=http_client.UNAUTHORIZED) elif container == 'socket_error_on_head': raise socket.error(111, 'ECONNREFUSED') pass def put_container(self, container, headers=None): if container == 'missing_container_socket_error_on_put': raise socket.error(111, 'ECONNREFUSED') def get_container(self, container, headers=None, **kwargs): fake_header = None fake_body = [{'name': 'backup_001'}, {'name': 'backup_002'}, {'name': 'backup_003'}] return fake_header, fake_body def head_object(self, container, name, headers=None): return {'etag': 'fake-md5-sum'} def get_object(self, container, name, headers=None): if container == 'socket_error_on_get': raise socket.error(111, 'ECONNREFUSED') if 'metadata' in name: fake_object_header = None metadata = {} if container == 'unsupported_version': metadata['version'] = '9.9.9' else: metadata['version'] = '1.0.0' metadata['backup_id'] = 123 metadata['volume_id'] = 123 metadata['backup_name'] = 'fake backup' metadata['backup_description'] = 'fake backup description' metadata['created_at'] = '2013-02-19 11:20:54,805' metadata['objects'] = [{ 'backup_001': {'compression': 'zlib', 'length': 10, 'offset': 0}, 'backup_002': {'compression': 'zlib', 'length': 10, 'offset': 10}, 'backup_003': {'compression': 'zlib', 'length': 10, 'offset': 20} }] metadata_json = json.dumps(metadata, sort_keys=True, indent=2) metadata_json = metadata_json.encode('utf-8') fake_object_body = metadata_json return (fake_object_header, fake_object_body) fake_header = None fake_object_body = os.urandom(1024 * 1024) return (fake_header, zlib.compress(fake_object_body)) def put_object(self, container, name, reader, content_length=None, etag=None, chunk_size=None, content_type=None, headers=None, query_string=None): if container == 'socket_error_on_put': raise socket.error(111, 'ECONNREFUSED') return 'fake-md5-sum' def delete_object(self, container, name, headers=None): if container == 'socket_error_on_delete': raise socket.error(111, 'ECONNREFUSED') if container == 'not_found_on_delete': raise swift_exc.ClientException( msg='404 Not Found', http_status=404, http_reason='Not Found') pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/fake_swift_client2.py0000664000175000017500000000602600000000000024041 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # Copyright (C) 2014 TrilioData, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from http import client as http_client import os import socket import tempfile from swiftclient import client as swift class FakeSwiftClient2(object): def __init__(self, *args, **kwargs): pass @classmethod def Connection(cls, *args, **kargs): return FakeSwiftConnection2() class FakeSwiftConnection2(object): def __init__(self, *args, **kwargs): self.tempdir = tempfile.mkdtemp() def head_container(self, container, headers=None): if container == 'missing_container': raise swift.ClientException('fake exception', http_status=http_client.NOT_FOUND) elif container == 'unauthorized_container': raise swift.ClientException('fake exception', http_status=http_client.UNAUTHORIZED) elif container == 'socket_error_on_head': raise socket.error(111, 'ECONNREFUSED') def put_container(self, container, headers=None): pass def get_container(self, container, headers=None, **kwargs): fake_header = None container_dir = tempfile.gettempdir() + '/' + container fake_body = [] for f in os.listdir(container_dir): try: f.index(kwargs['prefix']) fake_body.append({'name': f}) except Exception: pass return fake_header, fake_body def head_object(self, container, name, headers=None): return {'etag': 'fake-md5-sum'} def get_object(self, container, name, headers=None): if container == 'socket_error_on_get': raise socket.error(111, 'ECONNREFUSED') object_path = tempfile.gettempdir() + '/' + container + '/' + name with open(object_path, 'rb') as object_file: return (None, object_file.read()) def put_object(self, container, name, reader, content_length=None, etag=None, chunk_size=None, content_type=None, headers=None, query_string=None): object_path = tempfile.gettempdir() + '/' + container + '/' + name with open(object_path, 'wb') as object_file: object_file.write(reader.read()) return hashlib.md5(reader.read(), usedforsecurity=False).hexdigest() def delete_object(self, container, name, headers=None): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/test_backup.py0000664000175000017500000033704200000000000022610 0ustar00zuulzuul00000000000000# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Backup code.""" import copy import os from unittest import mock import uuid import ddt from eventlet import tpool from os_brick.initiator.connectors import fake as fake_connectors from oslo_config import cfg from oslo_db import exception as db_exc from oslo_service import loopingcall from oslo_utils import importutils from oslo_utils import timeutils import cinder from cinder.backup import api from cinder.backup import manager from cinder import context from cinder import db from cinder import exception from cinder.message import message_field from cinder import objects from cinder.objects import fields from cinder import quota from cinder.tests import fake_driver from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import known_issues as issues from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.volume import rpcapi as volume_rpcapi CONF = cfg.CONF class FakeBackupException(Exception): pass class BaseBackupTest(test.TestCase): def setUp(self): super(BaseBackupTest, self).setUp() self.backup_mgr = importutils.import_object(CONF.backup_manager) self.backup_mgr.host = 'testhost' self.backup_mgr.is_initialized = True self.ctxt = context.get_admin_context() paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot', 'cinder.volume.rpcapi.VolumeAPI.delete_volume', 'cinder.volume.rpcapi.VolumeAPI.detach_volume', 'cinder.volume.rpcapi.VolumeAPI.' 'secure_file_operations_enabled'] self.volume_patches = {} self.volume_mocks = {} for path in paths: name = path.split('.')[-1] self.volume_patches[name] = mock.patch(path) self.volume_mocks[name] = self.volume_patches[name].start() self.addCleanup(self.volume_patches[name].stop) def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()), restore_volume_id=None, display_name='test_backup', display_description='this is a test backup', container='volumebackups', status=fields.BackupStatus.CREATING, size=1, object_count=0, project_id=str(uuid.uuid4()), service=None, temp_volume_id=None, temp_snapshot_id=None, snapshot_id=None, metadata=None, parent_id=None, availability_zone='1', encryption_key_id=None): """Create a backup entry in the DB. Return the entry ID """ kwargs = {} kwargs['volume_id'] = volume_id kwargs['restore_volume_id'] = restore_volume_id kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = project_id kwargs['host'] = 'testhost' kwargs['availability_zone'] = availability_zone kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['container'] = container kwargs['status'] = status kwargs['fail_reason'] = '' kwargs['service'] = service or CONF.backup_driver kwargs['snapshot_id'] = snapshot_id kwargs['parent_id'] = parent_id kwargs['size'] = size kwargs['object_count'] = object_count kwargs['temp_volume_id'] = temp_volume_id kwargs['temp_snapshot_id'] = temp_snapshot_id kwargs['metadata'] = metadata or {} kwargs['encryption_key_id'] = encryption_key_id backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup def _create_volume_db_entry(self, display_name='test_volume', display_description='this is a test volume', status='backing-up', previous_status='available', size=1, host='testhost', encryption_key_id=None, project_id=None): """Create a volume entry in the DB. Return the entry ID """ vol = {} vol['size'] = size vol['host'] = host vol['user_id'] = fake.USER_ID vol['project_id'] = project_id or fake.PROJECT_ID vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = fields.VolumeAttachStatus.DETACHED vol['availability_zone'] = '1' vol['previous_status'] = previous_status vol['encryption_key_id'] = encryption_key_id vol['volume_type_id'] = fake.VOLUME_TYPE_ID volume = objects.Volume(context=self.ctxt, **vol) volume.create() return volume.id def _create_snapshot_db_entry(self, display_name='test_snapshot', display_description='test snapshot', status=fields.SnapshotStatus.AVAILABLE, size=1, volume_id=str(uuid.uuid4()), provider_location=None): """Create a snapshot entry in the DB. Return the entry ID. """ kwargs = {} kwargs['size'] = size kwargs['user_id'] = fake.USER_ID kwargs['project_id'] = fake.PROJECT_ID kwargs['status'] = status kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['volume_id'] = volume_id kwargs['cgsnapshot_id'] = None kwargs['volume_size'] = size kwargs['metadata'] = {} kwargs['provider_location'] = provider_location kwargs['volume_type_id'] = fake.VOLUME_TYPE_ID snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs) snapshot_obj.create() return snapshot_obj def _create_volume_attach(self, volume_id): values = {'volume_id': volume_id, 'attach_status': fields.VolumeAttachStatus.ATTACHED, } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], None, 'testhost', '/dev/vd0') def _create_exported_record_entry(self, vol_size=1, exported_id=None): """Create backup metadata export entry.""" vol_id = self._create_volume_db_entry(status='available', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) if exported_id is not None: backup.id = exported_id export = self.backup_mgr.export_record(self.ctxt, backup) return export def _create_export_record_db_entry(self, volume_id=str(uuid.uuid4()), status=fields.BackupStatus.CREATING, project_id=str(uuid.uuid4()), backup_id=None): """Create a backup entry in the DB. Return the entry ID """ kwargs = {} kwargs['volume_id'] = volume_id kwargs['user_id'] = fake.USER_ID kwargs['project_id'] = project_id kwargs['status'] = status if backup_id: kwargs['id'] = backup_id backup = objects.BackupImport(context=self.ctxt, **kwargs) backup.create() return backup @ddt.ddt class BackupTestCase(BaseBackupTest): """Test Case for backups.""" @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'set_initialized') @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'do_setup') @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'check_for_setup_error') @mock.patch.object(cinder.db.sqlalchemy.api, '_volume_type_get_by_name', v2_fakes.fake_volume_type_get) @mock.patch('cinder.context.get_admin_context') def test_init_host(self, mock_get_admin_context, mock_check, mock_setup, mock_set_initialized): """Test stuck volumes and backups. Make sure stuck volumes and backups are reset to correct states when backup_manager.init_host() is called """ def get_admin_context(): return self.ctxt self.override_config('backup_service_inithost_offload', False) self.override_config('periodic_interval', 0) vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) vol2_id = self._create_volume_db_entry() self._create_volume_attach(vol2_id) db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'}) vol3_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol3_id, {'status': 'available'}) vol4_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'}) temp_vol_id = self._create_volume_db_entry() db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'}) vol5_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'}) temp_snap = self._create_snapshot_db_entry() temp_snap.status = fields.SnapshotStatus.AVAILABLE temp_snap.save() backup1 = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=vol1_id) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, restore_volume_id=vol2_id) backup3 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol3_id) self._create_backup_db_entry(status=fields.BackupStatus.CREATING, volume_id=vol4_id, temp_volume_id=temp_vol_id) self._create_backup_db_entry(status=fields.BackupStatus.CREATING, volume_id=vol5_id, temp_snapshot_id=temp_snap.id) mock_get_admin_context.side_effect = get_admin_context self.volume = importutils.import_object(CONF.volume_manager) self.backup_mgr.init_host() vol1 = db.volume_get(self.ctxt, vol1_id) self.assertEqual('available', vol1['status']) vol2 = db.volume_get(self.ctxt, vol2_id) self.assertEqual('error_restoring', vol2['status']) vol3 = db.volume_get(self.ctxt, vol3_id) self.assertEqual('available', vol3['status']) vol4 = db.volume_get(self.ctxt, vol4_id) self.assertEqual('available', vol4['status']) vol5 = db.volume_get(self.ctxt, vol5_id) self.assertEqual('available', vol5['status']) backup1 = db.backup_get(self.ctxt, backup1.id) self.assertEqual(fields.BackupStatus.ERROR, backup1['status']) backup2 = db.backup_get(self.ctxt, backup2.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup2['status']) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup3.id) temp_vol = objects.Volume.get_by_id(self.ctxt, temp_vol_id) self.volume_mocks['delete_volume'].assert_called_once_with( self.ctxt, temp_vol) self.assertTrue(self.volume_mocks['detach_volume'].called) @mock.patch('cinder.objects.backup.BackupList.get_all_by_host') @mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool') def test_init_host_with_service_inithost_offload(self, mock_add_threadpool, mock_get_all_by_host): vol1_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol1_id, {'status': 'available'}) backup1 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol1_id) vol2_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol2_id, {'status': 'available'}) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol2_id) mock_get_all_by_host.return_value = [backup1, backup2] self.backup_mgr.init_host() calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1), mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)] mock_add_threadpool.assert_has_calls(calls, any_order=True) # 3 calls because 1 is always made to handle encryption key migration. self.assertEqual(3, mock_add_threadpool.call_count) @mock.patch('cinder.keymgr.migration.migrate_fixed_key') @mock.patch('cinder.objects.BackupList.get_all_by_host') @mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool') def test_init_host_key_migration(self, mock_add_threadpool, mock_get_all_by_host, mock_migrate_fixed_key): self.backup_mgr.init_host() mock_add_threadpool.assert_called_once_with( mock_migrate_fixed_key, backups=mock_get_all_by_host()) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') @ddt.data(123456, 654321) def test_setup_backup_backend_uses_new_config( self, new_cfg_value, mock_FILC): # previously used CONF.periodic_interval; see Bug #1828748 new_cfg_name = 'backup_driver_init_check_interval' self.addCleanup(CONF.clear_override, new_cfg_name) CONF.set_override(new_cfg_name, new_cfg_value) mock_init_loop = mock.MagicMock() mock_init_loop.start.side_effect = loopingcall.LoopingCallDone() mock_FILC.return_value = mock_init_loop self.backup_mgr.setup_backup_backend(self.ctxt) mock_init_loop.start.assert_called_once_with(interval=new_cfg_value) @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '1.3', 'cinder-volume': '1.7'}) def test_reset(self, get_min_obj, get_min_rpc): old_version = objects.base.OBJ_VERSIONS.versions[-2] with mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-volume': old_version, 'cinder-scheduler': old_version, 'cinder-backup': old_version}): backup_mgr = manager.BackupManager() backup_rpcapi = backup_mgr.backup_rpcapi volume_rpcapi = backup_mgr.volume_rpcapi self.assertEqual('1.3', backup_rpcapi.client.version_cap) self.assertEqual(old_version, backup_rpcapi.client.serializer._base.version_cap) self.assertEqual('1.7', volume_rpcapi.client.version_cap) self.assertEqual(old_version, volume_rpcapi.client.serializer._base.version_cap) get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current() backup_mgr.reset() backup_rpcapi = backup_mgr.backup_rpcapi volume_rpcapi = backup_mgr.volume_rpcapi self.assertEqual(get_min_rpc.return_value, backup_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, backup_rpcapi.client.serializer._base.version_cap) self.assertIsNone(backup_rpcapi.client.serializer._base.manifest) self.assertEqual(get_min_rpc.return_value, volume_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, volume_rpcapi.client.serializer._base.version_cap) self.assertIsNone(volume_rpcapi.client.serializer._base.manifest) @ddt.data(True, False) def test_is_working(self, initialized): self.backup_mgr.is_initialized = initialized self.assertEqual(initialized, self.backup_mgr.is_working()) def test_cleanup_incomplete_backup_operations(self): """Test cleanup resilience when some are incomplete.""" # For correct operation, this test relies on the DB being # pre-populated by a properly complete backup. fake_incomplete_backup_list = [ self._create_backup_db_entry(status=s) for s in (None, fields.BackupStatus.CREATING, fields.BackupStatus.DELETING, fields.BackupStatus.RESTORING, fields.BackupStatus.ERROR, fields.BackupStatus.ERROR_DELETING, fields.BackupStatus.DELETED) ] self._create_backup_db_entry(status=fields.BackupStatus.AVAILABLE) mock_backup_cleanup = self.mock_object( self.backup_mgr, '_cleanup_one_backup') mock_temp_cleanup = self.mock_object( self.backup_mgr, '_cleanup_temp_volumes_snapshots_for_one_backup') result = self.backup_mgr._cleanup_incomplete_backup_operations( self.ctxt) self.assertIsNone(result) self.assertEqual(len(fake_incomplete_backup_list), mock_backup_cleanup.call_count) for b in fake_incomplete_backup_list: mock_backup_cleanup.assert_any_call(self.ctxt, b) self.assertEqual(len(fake_incomplete_backup_list), mock_temp_cleanup.call_count) for b in fake_incomplete_backup_list: mock_temp_cleanup.assert_any_call(self.ctxt, b) def test_cleanup_incomplete_backup_operations_with_exceptions(self): """Test cleanup resilience in the face of exceptions.""" fake_backup_list = [ self._create_backup_db_entry(status=s) for s in (fields.BackupStatus.CREATING, fields.BackupStatus.DELETING, fields.BackupStatus.RESTORING, fields.BackupStatus.ERROR, fields.BackupStatus.ERROR_DELETING, fields.BackupStatus.DELETED) ] self._create_backup_db_entry(status=fields.BackupStatus.AVAILABLE) mock_backup_cleanup = self.mock_object( self.backup_mgr, '_cleanup_one_backup') mock_backup_cleanup.side_effect = [Exception] mock_temp_cleanup = self.mock_object( self.backup_mgr, '_cleanup_temp_volumes_snapshots_for_one_backup') mock_temp_cleanup.side_effect = [Exception] self.assertIsNone( self.backup_mgr._cleanup_incomplete_backup_operations( self.ctxt)) self.assertEqual(len(fake_backup_list), mock_backup_cleanup.call_count) self.assertEqual(len(fake_backup_list), mock_temp_cleanup.call_count) @mock.patch('cinder.objects.BackupList') @mock.patch.object(manager.BackupManager, '_cleanup_one_backup') @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_cleanup_non_primary_process(self, temp_cleanup_mock, backup_cleanup_mock, backup_ovo_mock): """Test cleanup doesn't run on non primary processes.""" self.backup_mgr._process_number = 2 self.backup_mgr._cleanup_incomplete_backup_operations(self.ctxt) backup_ovo_mock.get_all_by_host.assert_not_called() backup_cleanup_mock.assert_not_called() temp_cleanup_mock.assert_not_called() def test_cleanup_one_backing_up_volume(self): """Test cleanup_one_volume for volume status 'backing-up'.""" volume_id = self._create_volume_db_entry(status='backing-up', previous_status='available') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume_id) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('available', volume['status']) def test_cleanup_one_backing_up_snapshot(self): """Test cleanup_one_snapshot for snapshot status 'backing-up'.""" volume_id = str(uuid.uuid4()) snapshot_entry = self._create_snapshot_db_entry(status='backing-up', volume_id=volume_id) self.backup_mgr._cleanup_one_snapshot(self.ctxt, snapshot_entry.id) snapshot = db.snapshot_get(self.ctxt, snapshot_entry.id) self.assertEqual('available', snapshot['status']) def test_cleanup_one_restoring_backup_volume(self): """Test cleanup_one_volume for volume status 'restoring-backup'.""" volume_id = self._create_volume_db_entry(status='restoring-backup') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume_id) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('error_restoring', volume['status']) @ddt.data(fields.BackupStatus.CREATING, fields.BackupStatus.RESTORING) def test_cleanup_one_backup_with_deleted_volume(self, backup_status): """Test cleanup_one_backup for non-existing volume.""" volume_id = str(uuid.uuid4()) backup = self._create_backup_db_entry( status=backup_status, volume_id=volume_id, restore_volume_id=volume_id ) mock_log = self.mock_object(manager, 'LOG') self.backup_mgr._cleanup_one_backup(self.ctxt, backup) mock_log.info.assert_called_with( 'Volume %s does not exist anymore. Ignoring.', volume_id ) @ddt.data(fields.BackupStatus.CREATING, fields.BackupStatus.RESTORING) def test_cleanup_one_backup_with_deleted_snapshot(self, backup_status): """Test cleanup_one_backup for non-existing volume.""" volume_id = str(uuid.uuid4()) snapshot_id = str(uuid.uuid4()) backup = self._create_backup_db_entry( status=backup_status, volume_id=volume_id, restore_volume_id=volume_id, snapshot_id=snapshot_id ) mock_log = self.mock_object(manager, 'LOG') self.backup_mgr._cleanup_one_backup(self.ctxt, backup) mock_log.info.assert_called_with( 'Snapshot %s does not exist anymore. Ignoring.', snapshot_id ) def test_cleanup_one_creating_backup(self): """Test cleanup_one_backup for volume status 'creating'.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up', }) backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=vol1_id) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertEqual(fields.BackupStatus.ERROR, backup.status) volume = objects.Volume.get_by_id(self.ctxt, vol1_id) self.assertEqual('available', volume.status) def test_cleanup_one_restoring_backup(self): """Test cleanup_one_backup for volume status 'restoring'.""" vol1_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol1_id, {'status': 'restoring-backup', }) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, restore_volume_id=vol1_id) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) volume = objects.Volume.get_by_id(self.ctxt, vol1_id) self.assertEqual('error_restoring', volume.status) def test_cleanup_one_deleting_backup(self): """Test cleanup_one_backup for backup status 'deleting'.""" self.override_config('backup_service_inithost_offload', False) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup.id) def test_cleanup_one_deleting_encrypted_backup(self): """Test cleanup of backup status 'deleting' (encrypted).""" self.override_config('backup_service_inithost_offload', False) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, encryption_key_id=fake.ENCRYPTION_KEY_ID) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertIsNotNone(backup) self.assertEqual(fields.BackupStatus.ERROR_DELETING, backup.status) def test_detach_all_attachments_handles_exceptions(self): """Test detach_all_attachments with exceptions.""" mock_log = self.mock_object(manager, 'LOG') self.volume_mocks['detach_volume'].side_effect = [Exception] fake_attachments = [ { 'id': fake.ATTACHMENT_ID, 'attached_host': 'testhost', 'instance_uuid': None, }, { 'id': fake.ATTACHMENT2_ID, 'attached_host': 'testhost', 'instance_uuid': None, } ] fake_volume = { 'id': fake.VOLUME3_ID, 'volume_attachment': fake_attachments } self.backup_mgr._detach_all_attachments(self.ctxt, fake_volume) self.assertEqual(len(fake_attachments), mock_log.exception.call_count) @ddt.data(KeyError, exception.VolumeNotFound) def test_cleanup_temp_volumes_snapshots_for_one_backup_volume_not_found( self, err): """Ensure we handle missing volume for a backup.""" mock_volume_get = self.mock_object(db, 'volume_get') mock_volume_get.side_effect = [err] backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) def test_cleanup_temp_snapshot_for_one_backup_not_found(self): """Ensure we handle missing temp snapshot for a backup.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) backup = self._create_backup_db_entry( status=fields.BackupStatus.ERROR, volume_id=vol1_id, temp_snapshot_id=fake.SNAPSHOT_ID) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) self.assertFalse(self.volume_mocks['delete_snapshot'].called) self.assertIsNone(backup.temp_snapshot_id) backup.destroy() db.volume_destroy(self.ctxt, vol1_id) def test_cleanup_temp_volume_for_one_backup_not_found(self): """Ensure we handle missing temp volume for a backup.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol1_id, temp_volume_id=fake.VOLUME4_ID) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) self.assertFalse(self.volume_mocks['delete_volume'].called) self.assertIsNone(backup.temp_volume_id) backup.destroy() db.volume_destroy(self.ctxt, vol1_id) def test_create_backup_with_bad_volume_status(self): """Test creating a backup from a volume with a bad status.""" vol_id = self._create_volume_db_entry( status='restoring-backup', size=1) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertRaises(exception.InvalidVolume, self.backup_mgr.create_backup, self.ctxt, backup) def test_create_backup_with_bad_backup_status(self): """Test creating a backup with a backup with a bad status.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.create_backup, self.ctxt, backup) def test_create_backup_with_error(self): """Test error handling when error occurs during backup creation.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry(volume_id=vol_id) mock_run_backup = self.mock_object(self.backup_mgr, '_start_backup') mock_run_backup.side_effect = FakeBackupException(str(uuid.uuid4())) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('error_backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) self.assertTrue(mock_run_backup.called) @mock.patch('cinder.backup.manager.BackupManager._start_backup') def test_create_backup_aborted(self, start_backup_mock): """Test error handling when abort occurs during backup creation.""" def my_start_backup(*args, **kwargs): backup.destroy() with backup.as_read_deleted(): original_refresh() start_backup_mock.side_effect = my_start_backup vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry(volume_id=vol_id) original_refresh = backup.refresh vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.backup_mgr.create_backup(self.ctxt, backup) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.backup_mgr._finish_backup(self.ctxt, backup, vol, {}) self.assertTrue(start_backup_mock.called) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol.status) self.assertEqual('backing-up', vol['previous_status']) # Make sure we didn't set the backup to available after it was deleted with backup.as_read_deleted(): backup.refresh() self.assertEqual(fields.BackupStatus.DELETED, backup.status) @mock.patch('cinder.backup.manager.BackupManager._start_backup', side_effect=FakeBackupException(str(uuid.uuid4()))) @mock.patch.object(db, 'volume_update') def test_create_backup_aborted_volume_not_found(self, vol_up_mock, start_backup_mock): """Test error handling when backup fails and volume does not exist.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry(volume_id=vol_id) vol_up_mock.side_effect = exception.VolumeNotFound(volume_id=vol_id) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) backup.refresh() self.assertEqual(fields.BackupStatus.ERROR, backup.status) self.assertTrue(start_backup_mock.called) self.assertTrue(vol_up_mock.called) @mock.patch('cinder.backup.manager.BackupManager._start_backup', side_effect=FakeBackupException(str(uuid.uuid4()))) def test_create_backup_with_snapshot_error(self, mock_start_backup): """Test error handling when error occurs during backup creation.""" vol_id = self._create_volume_db_entry(size=1) snapshot = self._create_snapshot_db_entry(status='backing-up', volume_id=vol_id) backup = self._create_backup_db_entry(volume_id=vol_id, snapshot_id=snapshot.id) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) snapshot.refresh() self.assertEqual('available', snapshot.status) backup.refresh() self.assertEqual(fields.BackupStatus.ERROR, backup.status) self.assertTrue(mock_start_backup.called) @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('builtins.open', wraps=open) @mock.patch.object(os.path, 'isdir', return_value=False) def test_create_backup(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_backup_device, mock_get_conn): """Test normal backup creation.""" vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) vol = objects.Volume.get_by_id(self.ctxt, vol_id) backup_device_dict = {'backup_device': vol, 'secure_enabled': False, 'is_snapshot': False, } mock_backup_device = ( objects.BackupDeviceInfo.from_primitive(backup_device_dict, self.ctxt, ['admin_metadata', 'metadata'])) attach_info = {'device': {'path': '/dev/null'}} mock_detach_device = self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_get_conn.return_value = properties self.backup_mgr.create_backup(self.ctxt, backup) self.backup_mgr.continue_backup(self.ctxt, backup, mock_backup_device) mock_temporary_chown.assert_called_once_with('/dev/null') mock_attach_device.assert_called_once_with(self.ctxt, vol, properties, False) mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) mock_get_conn.assert_called_once_with(False, enforce_multipath=False) mock_detach_device.assert_called_once_with(self.ctxt, attach_info, vol, properties, False, force=True, ignore_errors=True) mock_open.assert_called_once_with('/dev/null', 'rb') vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) self.assertIsNone(backup.encryption_key_id) @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_set_parent_id_to_none(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id, parent_id='mock') with mock.patch.object(self.backup_mgr, 'service') as \ mock_service: mock_service.return_value.backup.return_value = ( {'parent_id': None}) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.backup_mgr.create_backup(self.ctxt, backup) self.backup_mgr.continue_backup(self.ctxt, backup, mock_backup_device) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) self.assertIsNone(backup.parent_id) @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_set_parent_id(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) parent_backup = self._create_backup_db_entry(size=vol_size) with mock.patch.object(self.backup_mgr, 'service') as \ mock_service: mock_service.return_value.backup.return_value = ( {'parent_id': parent_backup.id}) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.backup_mgr.create_backup(self.ctxt, backup) self.backup_mgr.continue_backup(self.ctxt, backup, mock_backup_device) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) self.assertEqual(parent_backup.id, backup.parent_id) @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_fail_with_excep(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_id = self._create_volume_db_entry() backup = self._create_backup_db_entry(volume_id=vol_id) # These are set in create_backup, but we are calling # continue_backup self.ctxt.message_resource_id = backup.id self.ctxt.message_resource_type = message_field.Resource.VOLUME_BACKUP self.ctxt.message_action = message_field.Action.BACKUP_CREATE with mock.patch.object(self.backup_mgr, 'service') as \ mock_service: mock_service.return_value.backup.side_effect = ( FakeBackupException('fake')) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.assertRaises(FakeBackupException, self.backup_mgr.continue_backup, self.ctxt, backup, mock_backup_device) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('available', vol.status) self.assertEqual('error_backing-up', vol.previous_status) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup.status) @mock.patch('cinder.backup.manager.BackupManager._finish_backup') @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_run_backup_with_dir_device_path(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick, mock_finish): backup_service = mock.Mock() backup_service.backup = mock.Mock( return_value=mock.sentinel.backup_update) self.backup_mgr.service = lambda x: backup_service vol_id = self._create_volume_db_entry() backup = self._create_backup_db_entry(volume_id=vol_id) volume = objects.Volume.get_by_id(self.ctxt, vol_id) # device_path is represented by a directory device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} self.backup_mgr._attach_device = mock.Mock( return_value=attach_info) self.backup_mgr._detach_device = mock.Mock() self.backup_mgr.continue_backup(self.ctxt, backup, mock_backup_device) mock_chown.assert_not_called() mock_open.assert_not_called() backup_service.backup.assert_called_once_with( backup, device_path) mock_finish.assert_called_once_with(self.ctxt, backup, volume, mock.sentinel.backup_update) @mock.patch('cinder.backup.manager.BackupManager._start_backup') @ddt.data((fields.SnapshotStatus.BACKING_UP, 'available'), (fields.SnapshotStatus.BACKING_UP, 'in-use'), (fields.SnapshotStatus.AVAILABLE, 'available'), (fields.SnapshotStatus.AVAILABLE, 'in-use')) @ddt.unpack def test_create_backup_with_snapshot(self, snapshot_status, volume_status, mock_start_backup): vol_id = self._create_volume_db_entry(status=volume_status) snapshot = self._create_snapshot_db_entry(volume_id=vol_id, status=snapshot_status) backup = self._create_backup_db_entry(volume_id=vol_id, snapshot_id=snapshot.id) if snapshot_status == fields.SnapshotStatus.BACKING_UP: self.backup_mgr.create_backup(self.ctxt, backup) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.backup_mgr._finish_backup(self.ctxt, backup, vol, {}) vol = objects.Volume.get_by_id(self.ctxt, vol_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual(volume_status, vol.status) self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status) else: self.assertRaises(exception.InvalidSnapshot, self.backup_mgr.create_backup, self.ctxt, backup) @mock.patch('cinder.volume.rpcapi.VolumeAPI.remove_export_snapshot') @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('builtins.open') @mock.patch.object(os.path, 'isdir', return_value=False) def test_create_backup_with_temp_snapshot(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_backup_device, mock_get_conn, mock_remove_export_snapshot): """Test backup in-use volume using temp snapshot.""" self.override_config('backup_use_same_host', True) vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') backup = self._create_backup_db_entry(volume_id=vol_id) snap = self._create_snapshot_db_entry(volume_id=vol_id) vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_backup_device = ( objects.BackupDeviceInfo.from_primitive({ 'backup_device': snap, 'secure_enabled': False, 'is_snapshot': True, }, self.ctxt, expected_attrs=['metadata'])) attach_info = { 'device': {'path': '/dev/null'}, 'conn': {'data': {}}, 'connector': fake_connectors.FakeConnector(None)} mock_terminate_connection_snapshot = self.mock_object( volume_rpcapi.VolumeAPI, 'terminate_connection_snapshot') mock_initialize_connection_snapshot = self.mock_object( volume_rpcapi.VolumeAPI, 'initialize_connection_snapshot') mock_connect_device = self.mock_object( manager.BackupManager, '_connect_device') mock_connect_device.return_value = attach_info properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'rb') self.backup_mgr.create_backup(self.ctxt, backup) self.backup_mgr.continue_backup(self.ctxt, backup, mock_backup_device) mock_temporary_chown.assert_called_once_with('/dev/null') mock_initialize_connection_snapshot.assert_called_once_with( self.ctxt, snap, properties) mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) mock_get_conn.assert_called_once_with(False, enforce_multipath=False) mock_terminate_connection_snapshot.assert_called_once_with( self.ctxt, snap, properties, force=True) mock_remove_export_snapshot.assert_called_once_with( self.ctxt, mock.ANY, sync=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('in-use', vol['status']) self.assertEqual('backing-up', vol['previous_status']) backup = objects.Backup.get_by_id(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_snapshot') def test_create_temp_snapshot(self, mock_create_snapshot): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_create_snapshot.return_value = {'provider_id': 'fake_provider_id'} temp_snap = volume_manager.driver._create_temp_snapshot( self.ctxt, vol) self.assertEqual('available', temp_snap['status']) self.assertEqual('fake_provider_id', temp_snap['provider_id']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_cloned_volume') def test_create_temp_cloned_volume(self, mock_create_cloned_volume): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_create_cloned_volume.return_value = {'provider_id': 'fake_provider_id'} temp_vol = volume_manager.driver._create_temp_cloned_volume( self.ctxt, vol, status=fields.VolumeStatus.BACKING_UP) self.assertEqual(fields.VolumeStatus.BACKING_UP, temp_vol['status']) self.assertEqual('fake_provider_id', temp_vol['provider_id']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_volume_from_snapshot') def test_create_temp_volume_from_snapshot(self, mock_create_vol_from_snap): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) snap = self._create_snapshot_db_entry(volume_id=vol_id) mock_create_vol_from_snap.return_value = {'provider_id': 'fake_provider_id'} temp_vol = volume_manager.driver._create_temp_volume_from_snapshot( self.ctxt, vol, snap, status=fields.VolumeStatus.BACKING_UP) self.assertEqual(fields.VolumeStatus.BACKING_UP, temp_vol['status']) self.assertEqual('fake_provider_id', temp_vol['provider_id']) @mock.patch('cinder.volume.volume_utils.notify_about_backup_usage') def test_create_backup_with_notify(self, notify): """Test normal backup creation with notifications.""" vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) self.mock_object(self.backup_mgr, '_start_backup') self.backup_mgr.create_backup(self.ctxt, backup) self.assertEqual(1, notify.call_count) @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.volume.volume_utils.clone_encryption_key') @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') def test_create_backup_encrypted_volume(self, mock_connector_properties, mock_clone_encryption_key, mock_get_backup_device): """Test backup of encrypted volume. Test whether the volume's encryption key ID is cloned and saved in the backup. """ vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry(volume_id=vol_id) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID2 self.backup_mgr.create_backup(self.ctxt, backup) mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID2, backup.encryption_key_id) @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.volume.volume_utils.clone_encryption_key') @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') def test_create_backup_encrypted_volume_again(self, mock_connector_properties, mock_clone_encryption_key, mock_get_backup_device): """Test backup of encrypted volume. Test when the backup already has a clone of the volume's encryption key ID. """ vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry(volume_id=vol_id, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} self.backup_mgr.create_backup(self.ctxt, backup) mock_clone_encryption_key.assert_not_called() def test_restore_backup_with_bad_volume_status(self): """Test error handling. Test error handling when restoring a backup to a volume with a bad status. """ vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertRaises(exception.InvalidVolume, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id, False) backup = db.backup_get(self.ctxt, backup.id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error_restoring', vol['status']) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) def test_restore_backup_with_bad_backup_status(self): """Test error handling. Test error handling when restoring a backup with a backup with a bad status. """ vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id, False) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_restore_backup_with_driver_error(self): """Test error handling when an error occurs during backup restore.""" vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) mock_run_restore = self.mock_object( self.backup_mgr, '_run_restore') mock_run_restore.side_effect = FakeBackupException('fake') self.assertRaises(FakeBackupException, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id, False) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error_restoring', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertTrue(mock_run_restore.called) def test_restore_backup_with_driver_cancellation(self): """Test error handling when a restore is cancelled.""" vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) mock_run_restore = self.mock_object( self.backup_mgr, '_run_restore') mock_run_restore.side_effect = exception.BackupRestoreCancel( vol_id=vol_id, back_id=backup.id) # We shouldn't raise an exception on the call, it's OK to cancel self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('error', vol.status) backup.refresh() self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertTrue(mock_run_restore.called) def test_restore_backup_with_creating_volume(self): """Test restore backup with a creating volume.""" vol_id = self._create_volume_db_entry( status=fields.VolumeStatus.CREATING, size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) mock_run_restore = self.mock_object( self.backup_mgr, '_run_restore') self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual(fields.VolumeStatus.AVAILABLE, vol.status) self.assertIsNotNone(vol.launched_at) backup.refresh() self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertTrue(mock_run_restore.called) def test_restore_backup_canceled_with_creating_volume(self): """Test restore backup with a creating volume.""" vol_id = self._create_volume_db_entry( status=fields.VolumeStatus.CREATING, size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) mock_run_restore = self.mock_object( self.backup_mgr, '_run_restore') mock_run_restore.side_effect = exception.BackupRestoreCancel( vol_id=vol_id, back_id=backup.id) # We shouldn't raise an exception on the call, it's OK to cancel self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual(fields.VolumeStatus.ERROR, vol.status) backup.refresh() self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertTrue(mock_run_restore.called) def test_restore_backup_with_bad_service(self): """Test error handling. Test error handling when attempting a restore of a backup with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id, False) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.utils.temporary_chown') @mock.patch('builtins.open', wraps=open) @mock.patch.object(os.path, 'isdir', return_value=False) @ddt.data({'os_name': 'nt', 'exp_open_mode': 'rb+'}, {'os_name': 'posix', 'exp_open_mode': 'wb'}) @ddt.unpack def test_restore_backup(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_conn, os_name, exp_open_mode): """Test normal backup restoration.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) properties = {} mock_get_conn.return_value = properties mock_secure_enabled = ( self.volume_mocks['secure_file_operations_enabled']) mock_secure_enabled.return_value = False vol = objects.Volume.get_by_id(self.ctxt, vol_id) attach_info = {'device': {'path': '/dev/null'}} mock_detach_device = self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info with mock.patch('os.name', os_name): self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False) mock_open.assert_called_once_with('/dev/null', exp_open_mode) mock_temporary_chown.assert_called_once_with('/dev/null') mock_get_conn.assert_called_once_with(False, enforce_multipath=False) vol.status = 'available' vol.obj_reset_changes() mock_secure_enabled.assert_called_once_with(self.ctxt, vol) mock_attach_device.assert_called_once_with(self.ctxt, vol, properties) mock_detach_device.assert_called_once_with(self.ctxt, attach_info, vol, properties, force=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertNotEqual(backup.id, vol.metadata.get('src_backup_id')) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.utils.temporary_chown') @mock.patch('builtins.open', wraps=open) @mock.patch.object(os.path, 'isdir', return_value=False) @ddt.data({'os_name': 'nt', 'exp_open_mode': 'rb+'}, {'os_name': 'posix', 'exp_open_mode': 'wb'}) @ddt.unpack def test_restore_backup_new_volume(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_conn, os_name, exp_open_mode): """Test normal backup restoration.""" vol_size = 1 vol_id = self._create_volume_db_entry( status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) vol2_id = self._create_volume_db_entry( status='restoring-backup', size=vol_size) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol2_id) vol2 = objects.Volume.get_by_id(self.ctxt, vol2_id) properties = {} mock_get_conn.return_value = properties mock_secure_enabled = ( self.volume_mocks['secure_file_operations_enabled']) mock_secure_enabled.return_value = False new_vol_id = self._create_volume_db_entry( status='restoring-backup', size=vol_size) vol = objects.Volume.get_by_id(self.ctxt, new_vol_id) attach_info = {'device': {'path': '/dev/null'}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device.return_value = attach_info with mock.patch('os.name', os_name): self.backup_mgr.restore_backup(self.ctxt, backup, new_vol_id, False) backup.status = "restoring" db.backup_update(self.ctxt, backup.id, {"status": "restoring"}) vol.status = 'available' vol.obj_reset_changes() with mock.patch('os.name', os_name): self.backup_mgr.restore_backup(self.ctxt, backup, vol2_id, False) vol2.refresh() old_src_backup_id = vol2.metadata["src_backup_id"] self.assertEqual(backup.id, old_src_backup_id) vol2.status = 'restoring-backup' db.volume_update(self.ctxt, vol2.id, {"status": "restoring-backup"}) vol2.obj_reset_changes() with mock.patch('os.name', os_name): self.backup_mgr.restore_backup(self.ctxt, backup2, vol2_id, False) vol2.status = 'available' vol2.obj_reset_changes() vol.refresh() vol2.refresh() self.assertEqual('available', vol.status) backup.refresh() self.assertEqual(backup.id, vol.metadata["src_backup_id"]) self.assertNotEqual(old_src_backup_id, vol2.metadata["src_backup_id"]) self.assertEqual(backup2.id, vol2.metadata["src_backup_id"]) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.volume.volume_utils.notify_about_backup_usage') def test_restore_backup_with_notify(self, notify): """Test normal backup restoration with notifications.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) self.backup_mgr._run_restore = mock.Mock() self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False) self.assertEqual(2, notify.call_count) @mock.patch('cinder.volume.volume_utils.clone_encryption_key') @mock.patch('cinder.volume.volume_utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') def test_restore_backup_encrypted_volume(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): """Test restore of encrypted volume. Test restoring a volume from its own backup. In this situation, the volume's encryption key ID shouldn't change. """ vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False) volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID1, volume.encryption_key_id) mock_clone_encryption_key.assert_not_called() mock_delete_encryption_key.assert_not_called() @mock.patch('cinder.volume.volume_utils.clone_encryption_key') @mock.patch('cinder.volume.volume_utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') def test_restore_backup_new_encrypted_volume(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): """Test restore of encrypted volume. Test handling of encryption key IDs when retoring to another encrypted volume, i.e. a volume whose key ID is different from the volume originally backed up. - The volume's prior encryption key ID is deleted. - The volume is assigned a fresh clone of the backup's encryption key ID. """ vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID3 # Mimic the driver's side effect where it updates the volume's # metadata. For backups of encrypted volumes, this will essentially # overwrite the volume's encryption key ID prior to the restore. def restore_side_effect(backup, volume_id, volume_file, volume_is_new): db.volume_update(self.ctxt, volume_id, {'encryption_key_id': fake.UUID4}) mock_backup_driver_restore.side_effect = restore_side_effect self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False) # Volume's original encryption key ID should be deleted mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) # Backup's encryption key ID should have been cloned mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID2) # Volume should have the cloned backup key ID volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID3, volume.encryption_key_id) # Backup's key ID should not have changed backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID2, backup.encryption_key_id) @mock.patch('cinder.volume.volume_utils.clone_encryption_key') @mock.patch('cinder.volume.volume_utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') def test_restore_backup_glean_key_id(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): """Test restore of encrypted volume. Test restoring a backup that was created prior to when the encryption key ID is saved in the backup DB. The backup encryption key ID is gleaned from the restored volume. """ vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID3 # Mimic the driver's side effect where it updates the volume's # metadata. For backups of encrypted volumes, this will essentially # overwrite the volume's encryption key ID prior to the restore. def restore_side_effect(backup, volume_id, volume_file, volume_is_new): db.volume_update(self.ctxt, volume_id, {'encryption_key_id': fake.UUID4}) mock_backup_driver_restore.side_effect = restore_side_effect self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False) # Volume's original encryption key ID should be deleted mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) # Backup's encryption key ID should have been cloned from # the value restored from the metadata. mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID4) # Volume should have the cloned backup key ID volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID3, volume.encryption_key_id) # Backup's key ID should have been gleaned from value restored # from the backup's metadata backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID4, backup.encryption_key_id) def test_delete_backup_with_bad_backup_status(self): """Test error handling. Test error handling when deleting a backup with a backup with a bad status. """ vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_error(self): """Test error handling when an error occurs during backup deletion.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, display_name='fail_on_delete', volume_id=vol_id) self.assertRaises(IOError, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_bad_service(self): """Test error handling. Test error handling when attempting a delete of a backup with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_no_service(self): """Test error handling. Test error handling when attempting a delete of a backup with no service defined for that backup, relates to bug #1162908 """ vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) backup.service = None backup.save() self.backup_mgr.delete_backup(self.ctxt, backup) def test_delete_backup(self): """Test normal backup deletion.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id, service='cinder.tests.unit.backup.fake_service.FakeBackupService') self.backup_mgr.delete_backup(self.ctxt, backup) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup.id) ctxt_read_deleted = context.get_admin_context('yes') backup = db.backup_get(ctxt_read_deleted, backup.id) self.assertTrue(backup.deleted) self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at) self.assertEqual(fields.BackupStatus.DELETED, backup.status) @mock.patch('cinder.volume.volume_utils.delete_encryption_key') def test_delete_backup_of_encrypted_volume(self, mock_delete_encryption_key): """Test deletion of backup of encrypted volume""" vol_id = self._create_volume_db_entry( encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.DELETING, encryption_key_id=fake.UUID2) self.backup_mgr.delete_backup(self.ctxt, backup) mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID2) ctxt_read_deleted = context.get_admin_context('yes') backup = db.backup_get(ctxt_read_deleted, backup.id) self.assertTrue(backup.deleted) self.assertIsNone(backup.encryption_key_id) @mock.patch('cinder.volume.volume_utils.notify_about_backup_usage') def test_delete_backup_with_notify(self, notify): """Test normal backup deletion with notifications.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) self.backup_mgr.delete_backup(self.ctxt, backup) self.assertEqual(2, notify.call_count) def test_list_backup(self): project_id = fake.PROJECT_ID backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(0, len(backups)) self._create_backup_db_entry() b2 = self._create_backup_db_entry(project_id=project_id) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(1, len(backups)) self.assertEqual(b2.id, backups[0].id) def test_backup_get_all_by_project_with_deleted(self): """Test deleted backups. Test deleted backups don't show up in backup_get_all_by_project. Unless context.read_deleted is 'yes'. """ project_id = fake.PROJECT2_ID backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(0, len(backups)) backup_keep = self._create_backup_db_entry(project_id=project_id) backup = self._create_backup_db_entry(project_id=project_id) db.backup_destroy(self.ctxt, backup.id) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(1, len(backups)) self.assertEqual(backup_keep.id, backups[0].id) ctxt_read_deleted = context.get_admin_context('yes') backups = db.backup_get_all_by_project(ctxt_read_deleted, project_id) self.assertEqual(2, len(backups)) def test_backup_get_all_by_host_with_deleted(self): """Test deleted backups. Test deleted backups don't show up in backup_get_all_by_project. Unless context.read_deleted is 'yes' """ backups = db.backup_get_all_by_host(self.ctxt, 'testhost') self.assertEqual(0, len(backups)) backup_keep = self._create_backup_db_entry() backup = self._create_backup_db_entry() db.backup_destroy(self.ctxt, backup.id) backups = db.backup_get_all_by_host(self.ctxt, 'testhost') self.assertEqual(1, len(backups)) self.assertEqual(backup_keep.id, backups[0].id) ctxt_read_deleted = context.get_admin_context('yes') backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost') self.assertEqual(2, len(backups)) def test_export_record_with_bad_service(self): """Test error handling. Test error handling when attempting an export of a backup record with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.export_record, self.ctxt, backup) def test_export_record_with_bad_backup_status(self): """Test error handling. Test error handling when exporting a backup record with a backup with a bad status. """ vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.export_record, self.ctxt, backup) def test_export_record(self): """Test normal backup record export.""" service = 'cinder.tests.unit.backup.fake_service.FakeBackupService' vol_size = 1 vol_id = self._create_volume_db_entry(status='available', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id, service=service) export = self.backup_mgr.export_record(self.ctxt, backup) self.assertEqual(service, export['backup_service']) self.assertIn('backup_url', export) def test_import_record_with_verify_not_implemented(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver does not support verify. """ vol_size = 1 backup_id = fake.BACKUP4_ID export = self._create_exported_record_entry(vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) def test_import_record_with_wrong_id(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver does not support verify. """ vol_size = 1 export = self._create_exported_record_entry(vol_size=vol_size) imported_record = self._create_export_record_db_entry() backup_hosts = [] self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) def test_import_record_with_bad_service(self): """Test error handling. Test error handling when attempting an import of a backup record with a different service to that used to create the backup. """ export = self._create_exported_record_entry() export['backup_service'] = 'cinder.tests.unit.backup.bad_service' imported_record = self._create_export_record_db_entry() # Test the case where the additional hosts list is empty backup_hosts = [] self.assertRaises(exception.ServiceNotFound, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) # Test that the import backup keeps calling other hosts to find a # suitable host for the backup service backup_hosts = ['fake1', 'fake2'] backup_hosts_expect = list(backup_hosts) BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record' with mock.patch(BackupAPI_import) as _mock_backup_import: self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) next_host = backup_hosts_expect.pop() _mock_backup_import.assert_called_once_with( self.ctxt, next_host, imported_record, export['backup_service'], export['backup_url'], backup_hosts_expect) def test_import_record_with_invalid_backup(self): """Test error handling. Test error handling when attempting an import of a backup record where the backup driver returns an exception. """ export = self._create_exported_record_entry() backup_driver = self.backup_mgr.service(self.ctxt) _mock_record_import_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'import_record')) imported_record = self._create_export_record_db_entry() backup_hosts = [] with mock.patch(_mock_record_import_class) as _mock_record_import: _mock_record_import.side_effect = FakeBackupException('fake') self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) self.assertTrue(_mock_record_import.called) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_not_supported_driver_to_force_delete(self): """Test force delete check method for not supported drivers.""" self.override_config('backup_driver', 'cinder.backup.drivers.ceph.CephBackupDriver') self.backup_mgr = importutils.import_object(CONF.backup_manager) result = self.backup_mgr.check_support_to_force_delete(self.ctxt) self.assertFalse(result) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_init_backup_repo_path', return_value=None) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' 'check_for_setup_error', return_value=None) def test_check_support_to_force_delete(self, mock_check_configuration, mock_init_backup_repo_path): """Test force delete check method for supported drivers.""" self.override_config('backup_driver', 'cinder.backup.drivers.nfs.NFSBackupDriver') self.backup_mgr = importutils.import_object(CONF.backup_manager) result = self.backup_mgr.check_support_to_force_delete(self.ctxt) self.assertTrue(result) def test_backup_has_dependent_backups(self): """Test backup has dependent backups. Test the query of has_dependent_backups in backup object is correct. """ vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertFalse(backup.has_dependent_backups) @test.testtools.skipIf(issues.TPOOL_KILLALL_ISSUE, 'tpool.killall bug') def test_default_tpool_size(self): """Test we can set custom tpool size.""" tpool._nthreads = 20 self.assertListEqual([], tpool._threads) self.backup_mgr = importutils.import_object(CONF.backup_manager) self.assertEqual(60, tpool._nthreads) self.assertListEqual([], tpool._threads) @test.testtools.skipIf(issues.TPOOL_KILLALL_ISSUE, 'tpool.killall bug') def test_tpool_size(self): """Test we can set custom tpool size.""" self.assertNotEqual(100, tpool._nthreads) self.assertListEqual([], tpool._threads) self.override_config('backup_native_threads_pool_size', 100) self.backup_mgr = importutils.import_object(CONF.backup_manager) self.assertEqual(100, tpool._nthreads) self.assertListEqual([], tpool._threads) @mock.patch('cinder.backup.manager.BackupManager._run_restore') def test_backup_max_operations_restore(self, mock_restore): mock_sem = self.mock_object(self.backup_mgr, '_semaphore') vol_id = self._create_volume_db_entry( status=fields.VolumeStatus.RESTORING_BACKUP) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING) self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False) self.assertEqual(1, mock_sem.__enter__.call_count) self.assertEqual(1, mock_restore.call_count) self.assertEqual(1, mock_sem.__exit__.call_count) @mock.patch('cinder.backup.manager.BackupManager._start_backup') def test_backup_max_operations_backup(self, mock_backup): mock_sem = self.mock_object(self.backup_mgr, '_semaphore') vol_id = self._create_volume_db_entry( status=fields.VolumeStatus.BACKING_UP) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.CREATING) self.backup_mgr.create_backup(self.ctxt, backup) self.assertEqual(1, mock_sem.__enter__.call_count) self.assertEqual(1, mock_backup.call_count) self.assertEqual(1, mock_sem.__exit__.call_count) @ddt.ddt class BackupAPITestCase(BaseBackupTest): def setUp(self): super(BackupAPITestCase, self).setUp() self.api = api.API() def test_get_all_wrong_all_tenants_value(self): self.assertRaises(exception.InvalidParameterValue, self.api.get_all, self.ctxt, {'all_tenants': 'bad'}) @mock.patch.object(objects, 'BackupList') def test_get_all_no_all_tenants_value(self, mock_backuplist): result = self.api.get_all(self.ctxt, {'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') @ddt.data(False, 'false', '0', 0, 'no') def test_get_all_false_value_all_tenants( self, false_value, mock_backuplist): result = self.api.get_all(self.ctxt, {'all_tenants': false_value, 'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') @ddt.data(True, 'true', '1', 1, 'yes') def test_get_all_true_value_all_tenants( self, true_value, mock_backuplist): result = self.api.get_all(self.ctxt, {'all_tenants': true_value, 'key': 'value'}) self.assertFalse(mock_backuplist.get_all_by_project.called) self.assertEqual(mock_backuplist.get_all.return_value, result) mock_backuplist.get_all.assert_called_once_with( self.ctxt, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') def test_get_all_true_value_all_tenants_non_admin(self, mock_backuplist): ctxt = context.RequestContext(uuid.uuid4(), uuid.uuid4()) result = self.api.get_all(ctxt, {'all_tenants': '1', 'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(quota.QUOTAS, 'commit') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') @mock.patch.object(db, 'backup_create', side_effect=db_exc.DBError()) def test_create_when_failed_to_create_backup_object( self, mock_create, mock_reserve, mock_rollback, mock_commit, mock_get_service): # Create volume in admin context volume_id = utils.create_volume(self.ctxt)['id'] # Will try to backup from a different context new_context = copy.copy(self.ctxt) new_context.user_id = fake.USER3_ID new_context.project_id = fake.USER3_ID # name the reservation so we can check it later mock_reserve.return_value = 'fake-reservation' # The opposite side of this test case is a "NotImplementedError: # Cannot load 'id' in the base class" being raised. # More detailed, in the try clause, if backup.create() failed # with DB exception, backup.id won't be assigned. However, # in the except clause, backup.destroy() is invoked to do cleanup, # which internally tries to access backup.id. self.assertRaises(db_exc.DBError, self.api.create, context=new_context, name="test_backup", description="test backup description", volume_id=volume_id, container='volumebackups') # make sure quotas are behaving as expected when backup.create() fails mock_reserve.assert_called_once() mock_rollback.assert_called_with(new_context, 'fake-reservation') mock_commit.assert_not_called() @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(quota.QUOTAS, 'commit') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') @mock.patch.object(objects.Backup, '__init__', side_effect=exception.InvalidInput( reason='Failed to new')) def test_create_when_failed_to_new_backup_object( self, mock_new, mock_reserve, mock_rollback, mock_commit, mock_get_service): volume_id = utils.create_volume(self.ctxt)['id'] # name the reservation so we can check it later mock_reserve.return_value = 'fake-reservation' # The opposite side of this test case is that a "UnboundLocalError: # local variable 'backup' referenced before assignment" is raised. # More detailed, in the try clause, backup = objects.Backup(...) # raises exception, so 'backup' is not assigned. But in the except # clause, 'backup' is referenced to invoke cleanup methods. self.assertRaises(exception.InvalidInput, self.api.create, context=self.ctxt, name="test_backup", description="test backup description", volume_id=volume_id, container='volumebackups') # make sure quotas are behaving as expected when objects.Backup() fails mock_reserve.assert_called_once() mock_rollback.assert_called_with(self.ctxt, 'fake-reservation') mock_commit.assert_not_called() @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') def test_create_backup_from_snapshot_with_volume_in_use( self, mock_create, mock_get_service): self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='in-use') snapshot = self._create_snapshot_db_entry(volume_id=volume_id) backup = self.api.create(self.ctxt, None, None, volume_id, None, snapshot_id=snapshot.id) self.assertEqual(fields.BackupStatus.CREATING, backup.status) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual(fields.SnapshotStatus.BACKING_UP, snapshot.status) self.assertEqual('in-use', volume.status) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @ddt.data(True, False) def test_create_backup_resource_status(self, is_snapshot, mock_create, mock_get_service): self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='available') snapshot = self._create_snapshot_db_entry(volume_id=volume_id) if is_snapshot: self.api.create(self.ctxt, None, None, volume_id, None, snapshot_id=snapshot.id) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual('backing-up', snapshot.status) self.assertEqual('available', volume.status) else: self.api.create(self.ctxt, None, None, volume_id, None) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual('available', snapshot.status) self.assertEqual('backing-up', volume.status) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') def test_restore_volume(self, mock_rpcapi_restore, mock_get_backup_host): volume_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(size=1, status='available') mock_get_backup_host.return_value = 'testhost' self.api.restore(self.ctxt, backup.id, volume_id) backup = objects.Backup.get_by_id(self.ctxt, backup.id) self.assertEqual(volume_id, backup.restore_volume_id) @mock.patch.object(objects.Backup, 'decode_record') @mock.patch.object(quota.QUOTAS, 'commit') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') def test__get_import_backup_invalid_backup( self, mock_reserve, mock_rollback, mock_commit, mock_decode): backup = self._create_backup_db_entry(size=1, status='available') mock_decode.return_value = {'id': backup.id, 'project_id': backup.project_id, 'user_id': backup.user_id, 'volume_id': backup.volume_id, 'size': 1} self.assertRaises(exception.InvalidBackup, self.api._get_import_backup, self.ctxt, 'fake_backup_url') # make sure Bug #1965847 has been fixed backup = db.backup_get(self.ctxt, backup.id) self.assertNotEqual(fields.BackupStatus.DELETED, backup.status) # the fix for Bug #1965847 changed the workflow in the method # under test, so we check that none of this stuff happens any more mock_reserve.assert_not_called() mock_rollback.assert_not_called() mock_commit.assert_not_called() @mock.patch.object(objects.Backup, 'decode_record') @mock.patch.object(quota.QUOTAS, 'commit') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') def test__get_import_backup_reuse_backup( self, mock_reserve, mock_rollback, mock_commit, mock_decode): backup = self._create_backup_db_entry( size=1, status=fields.BackupStatus.DELETED) mock_decode.return_value = {'id': backup.id, 'project_id': backup.project_id, 'user_id': backup.user_id, 'volume_id': backup.volume_id, 'size': 1} mock_reserve.return_value = 'fake_reservation' self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' # check pre-conditions self.assertNotEqual(self.ctxt.user_id, backup.user_id) self.assertNotEqual(self.ctxt.project_id, backup.project_id) self.assertEqual(fields.BackupStatus.DELETED, backup.status) self.api._get_import_backup(self.ctxt, 'fake_backup_url') # check post-conditions backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(self.ctxt.user_id, backup.user_id) self.assertEqual(self.ctxt.project_id, backup.project_id) self.assertNotEqual(fields.BackupStatus.DELETED, backup.status) mock_reserve.assert_called_with( self.ctxt, backups=1, backup_gigabytes=1) mock_commit.assert_called_with(self.ctxt, 'fake_reservation') mock_rollback.assert_not_called() @mock.patch.object(objects.BackupImport, '__init__') @mock.patch.object(objects.BackupImport, 'get_by_id') @mock.patch.object(objects.Backup, 'decode_record') @mock.patch.object(quota.QUOTAS, 'commit') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') def test__get_import_backup_rollback_situation( self, mock_reserve, mock_rollback, mock_commit, mock_decode, mock_get_by_id, mock_imp_init): mock_decode.return_value = {'id': fake.BACKUP_ID, 'project_id': fake.PROJECT_ID, 'user_id': fake.USER_ID, 'volume_id': fake.VOLUME_ID, 'size': 1} # we won't find a backup, so we'll need to create one mock_get_by_id.side_effect = exception.BackupNotFound( backup_id=fake.BACKUP_ID) # we should make a reservation ... mock_reserve.return_value = 'fake_reservation' # ... but will fail to create and will have to roll back mock_imp_init.side_effect = FakeBackupException, self.assertRaises(FakeBackupException, self.api._get_import_backup, self.ctxt, 'fake_backup_url') mock_reserve.assert_called_with( self.ctxt, backups=1, backup_gigabytes=1) mock_commit.assert_not_called() mock_rollback.assert_called_with(self.ctxt, "fake_reservation") @mock.patch('cinder.objects.BackupList.get_all_by_volume') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') def test_create_backup_failed_with_empty_backup_objects( self, mock_reserve, mock_rollback, mock_get_backups): backups = mock.Mock() backups.objects = [] mock_get_backups.return_value = backups is_incremental = True self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' mock_reserve.return_value = 'fake_reservation' volume_id = self._create_volume_db_entry(status='available', host='testhost#rbd', size=1, project_id="vol_proj_id") self.assertRaises(exception.InvalidBackup, self.api.create, self.ctxt, None, None, volume_id, None, incremental=is_incremental) mock_rollback.assert_called_with(self.ctxt, "fake_reservation") mock_get_backups.assert_called_once_with( self.ctxt, volume_id, 'vol_proj_id', filters={'project_id': 'fake_project'}) @mock.patch('cinder.db.backup_get_all_by_volume', return_value=[v2_fakes.fake_backup('fake-1')]) @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') def test_create_backup_failed_with_backup_status_not_available( self, mock_reserve, mock_rollback, mock_get_service, mock_createi, mock_get_backups): is_incremental = True self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' mock_reserve.return_value = 'fake_reservation' volume_id = self._create_volume_db_entry(status='available', host='testhost#rbd', size=1) self.assertRaises(exception.InvalidBackup, self.api.create, self.ctxt, None, None, volume_id, None, incremental=is_incremental) mock_rollback.assert_called_with(self.ctxt, "fake_reservation") @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.create_backup') @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(quota.QUOTAS, 'rollback') @mock.patch.object(quota.QUOTAS, 'reserve') def test_create_backup_with_specified_az( self, mock_reserve, mock_rollback, mock_get_service, mock_create): self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' mock_reserve.return_value = 'fake_reservation' mock_get_service.return_value = 'host_1' volume_id = self._create_volume_db_entry(status='available', host='testhost#rbd', size=1) backup = self.api.create(self.ctxt, name='test', description='test', volume_id=volume_id, container='test', availability_zone='test_az') self.assertEqual('test_az', backup.availability_zone) def test_create_backup_set_az_if_empty(self): '''Test populating the availability_zone field if it was empty''' vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id, parent_id='mock', availability_zone=None) with mock.patch.object(self.backup_mgr, '_start_backup') as \ mock__start_backup: self.backup_mgr.az = 'test_az' self.backup_mgr.create_backup(self.ctxt, backup) mock__start_backup.assert_called_once() backup = db.backup_get(self.ctxt, backup.id) self.assertEqual('test_az', backup.availability_zone) def test_create_backup_set_az_if_provided(self): '''Test backup availability_zone field remains populated''' vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id, parent_id='mock', availability_zone='backup_az') with mock.patch.object(self.backup_mgr, '_start_backup') as \ mock__start_backup: self.backup_mgr.az = 'test_az' self.backup_mgr.create_backup(self.ctxt, backup) mock__start_backup.assert_called_once() backup = db.backup_get(self.ctxt, backup.id) self.assertEqual('backup_az', backup.availability_zone) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/test_backup_messages.py0000664000175000017500000006645600000000000024507 0ustar00zuulzuul00000000000000# Copyright 2021, Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for User Facing Messages in Backup Operations.""" from unittest import mock from cinder.backup import manager as backup_manager from cinder import exception from cinder.message import message_field from cinder.scheduler import manager as sch_manager from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class BackupUserMessagesTest(test.TestCase): @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager._start_backup') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') def test_backup_create_invalid_status( self, mock_notify, mock_working, mock_run, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='available', volume_id=fake.VOLUME_ID, snapshot_id=None) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__ mock_get_vol.return_value = mock_vol self.assertRaises( exception.InvalidBackup, manager.create_backup, fake_context, fake_backup) self.assertEqual(message_field.Action.BACKUP_CREATE, fake_context.message_action) self.assertEqual(message_field.Resource.VOLUME_BACKUP, fake_context.message_resource_type) self.assertEqual(fake_backup.id, fake_context.message_resource_id) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.BACKUP_INVALID_STATE) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager._start_backup') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') def test_backup_create_service_down( self, mock_notify, mock_working, mock_run, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID, snapshot_id=None) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__ mock_get_vol.return_value = mock_vol mock_working.return_value = False mock_run.side_effect = exception.InvalidBackup(reason='test reason') self.assertRaises( exception.InvalidBackup, manager.create_backup, fake_context, fake_backup) self.assertEqual(message_field.Action.BACKUP_CREATE, fake_context.message_action) self.assertEqual(message_field.Resource.VOLUME_BACKUP, fake_context.message_resource_type) self.assertEqual(fake_backup.id, fake_context.message_resource_id) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.BACKUP_SERVICE_DOWN) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') @mock.patch( 'cinder.backup.manager.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.backup.manager.BackupManager.' '_cleanup_temp_volumes_snapshots_when_backup_created') @mock.patch('cinder.backup.manager.BackupManager._attach_device') def test_backup_create_attach_error( self, mock_attach, mock_cleanup, mock_get_bak_dev, mock_get_conn, mock_notify, mock_working, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID, snapshot_id=None) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__ mock_get_vol.return_value = mock_vol mock_working.return_value = True backup_device = mock.MagicMock() mock_attach.side_effect = exception.InvalidVolume(reason="test reason") self.assertRaises(exception.InvalidVolume, manager.continue_backup, fake_context, fake_backup, backup_device) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.ATTACH_ERROR) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') @mock.patch( 'cinder.backup.manager.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.backup.manager.BackupManager.' '_cleanup_temp_volumes_snapshots_when_backup_created') @mock.patch('cinder.backup.manager.BackupManager._attach_device') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.backup') @mock.patch('cinder.backup.manager.open') @mock.patch('cinder.backup.manager.BackupManager._detach_device') def test_backup_create_driver_error( self, mock_detach, mock_open, mock_backup, mock_attach, mock_cleanup, mock_get_bak_dev, mock_get_conn, mock_notify, mock_working, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID, snapshot_id=None) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__ mock_get_vol.return_value = mock_vol mock_working.return_value = True backup_device = mock.MagicMock() mock_attach.return_value = {'device': {'path': '/dev/sdb'}} mock_backup.side_effect = exception.InvalidBackup(reason="test reason") self.assertRaises(exception.InvalidBackup, manager.continue_backup, fake_context, fake_backup, backup_device) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.BACKUP_CREATE_DRIVER_ERROR) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') @mock.patch( 'cinder.backup.manager.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.backup.manager.BackupManager.' '_cleanup_temp_volumes_snapshots_when_backup_created') @mock.patch('cinder.backup.manager.BackupManager._attach_device') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.backup') @mock.patch('cinder.backup.manager.open') @mock.patch('cinder.backup.manager.BackupManager._detach_device') def test_backup_create_detach_error( self, mock_detach, mock_open, mock_backup, mock_attach, mock_cleanup, mock_get_bak_dev, mock_get_conn, mock_notify, mock_working, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID, snapshot_id=None) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__ mock_get_vol.return_value = mock_vol mock_working.return_value = True backup_device = mock.MagicMock() mock_attach.return_value = {'device': {'path': '/dev/sdb'}} mock_detach.side_effect = exception.InvalidVolume(reason="test reason") self.assertRaises(exception.InvalidVolume, manager.continue_backup, fake_context, fake_backup, backup_device) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.DETACH_ERROR) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') @mock.patch( 'cinder.backup.manager.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.backup.manager.BackupManager.' '_cleanup_temp_volumes_snapshots_when_backup_created') @mock.patch('cinder.backup.manager.BackupManager._attach_device') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.backup') @mock.patch('cinder.backup.manager.open') @mock.patch('cinder.backup.manager.BackupManager._detach_device') def test_backup_create_cleanup_error( self, mock_detach, mock_open, mock_backup, mock_attach, mock_cleanup, mock_get_bak_dev, mock_get_conn, mock_notify, mock_working, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID, snapshot_id=None) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = {'status': 'backing-up'}.__getitem__ mock_get_vol.return_value = mock_vol mock_working.return_value = True backup_device = mock.MagicMock() mock_attach.return_value = {'device': {'path': '/dev/sdb'}} mock_cleanup.side_effect = exception.InvalidVolume( reason="test reason") self.assertRaises(exception.InvalidVolume, manager.continue_backup, fake_context, fake_backup, backup_device) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.BACKUP_CREATE_CLEANUP_ERROR) @mock.patch('cinder.scheduler.host_manager.HostManager.' '_choose_backend_filters') @mock.patch('cinder.scheduler.host_manager.HostManager.' '_get_available_backup_service_host') @mock.patch('cinder.volume.volume_utils.update_backup_error') @mock.patch('cinder.db.volume_update') @mock.patch('cinder.db.volume_get') @mock.patch('cinder.message.api.API.create') def test_backup_create_scheduling_error( self, mock_msg_create, mock_get_vol, mock_vol_update, mock_update_error, mock_get_backup_host, mock_choose_filters): mock_choose_filters.return_value = ['AvailabilityZoneFilter'] manager = sch_manager.SchedulerManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock(id=fake.BACKUP_ID, volume_id=fake.VOLUME_ID, host=None) mock_get_vol.return_value = mock.MagicMock() exception.ServiceNotFound(service_id='cinder-backup') mock_get_backup_host.side_effect = exception.ServiceNotFound( service_id='cinder-backup') manager.create_backup(fake_context, fake_backup) mock_msg_create.assert_called_once_with( fake_context, action=message_field.Action.BACKUP_CREATE, resource_type=message_field.Resource.VOLUME_BACKUP, resource_uuid=fake_backup.id, detail=message_field.Detail.BACKUP_SCHEDULE_ERROR) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch( 'cinder.backup.manager.BackupManager._notify_about_backup_usage') def test_backup_delete_invalid_state( self, mock_notify, mock_msg_create, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='available', volume_id=fake.VOLUME_ID, snapshot_id=None) self.assertRaises( exception.InvalidBackup, manager.delete_backup, fake_context, fake_backup) self.assertEqual(message_field.Action.BACKUP_DELETE, fake_context.message_action) self.assertEqual(message_field.Resource.VOLUME_BACKUP, fake_context.message_resource_type) self.assertEqual(fake_backup.id, fake_context.message_resource_id) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.BACKUP_INVALID_STATE) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch( 'cinder.backup.manager.BackupManager._notify_about_backup_usage') def test_backup_delete_service_down( self, mock_notify, mock_working, mock_msg_create, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='deleting', volume_id=fake.VOLUME_ID, snapshot_id=None) mock_working.return_value = False self.assertRaises( exception.InvalidBackup, manager.delete_backup, fake_context, fake_backup) self.assertEqual(message_field.Action.BACKUP_DELETE, fake_context.message_action) self.assertEqual(message_field.Resource.VOLUME_BACKUP, fake_context.message_resource_type) self.assertEqual(fake_backup.id, fake_context.message_resource_id) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.BACKUP_SERVICE_DOWN) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager._is_our_backup') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch( 'cinder.backup.manager.BackupManager._notify_about_backup_usage') def test_backup_delete_driver_error( self, mock_notify, mock_working, mock_our_back, mock_msg_create, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='deleting', volume_id=fake.VOLUME_ID, snapshot_id=None) fake_backup.__getitem__.side_effect = ( {'display_name': 'fail_on_delete'}.__getitem__) mock_working.return_value = True mock_our_back.return_value = True self.assertRaises( IOError, manager.delete_backup, fake_context, fake_backup) self.assertEqual(message_field.Action.BACKUP_DELETE, fake_context.message_action) self.assertEqual(message_field.Resource.VOLUME_BACKUP, fake_context.message_resource_type) self.assertEqual(fake_backup.id, fake_context.message_resource_id) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.BACKUP_DELETE_DRIVER_ERROR) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') def test_backup_restore_volume_invalid_state( self, mock_notify, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID, snapshot_id=None) fake_backup.__getitem__.side_effect = ( {'status': 'restoring', 'size': 1}.__getitem__) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = ( {'id': fake.VOLUME_ID, 'status': 'available', 'size': 1}.__getitem__) mock_get_vol.return_value = mock_vol self.assertRaises( exception.InvalidVolume, manager.restore_backup, fake_context, fake_backup, fake.VOLUME_ID, False) mock_msg_create.assert_called_once_with( fake_context, action=message_field.Action.BACKUP_RESTORE, resource_type=message_field.Resource.VOLUME_BACKUP, resource_uuid=mock_vol.id, detail=message_field.Detail.VOLUME_INVALID_STATE) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') def test_backup_restore_backup_invalid_state( self, mock_notify, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID, snapshot_id=None) fake_backup.__getitem__.side_effect = ( {'status': 'available', 'size': 1}.__getitem__) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = ( {'status': 'restoring-backup', 'size': 1}.__getitem__) mock_get_vol.return_value = mock_vol self.assertRaises( exception.InvalidBackup, manager.restore_backup, fake_context, fake_backup, fake.VOLUME_ID, False) self.assertEqual(message_field.Action.BACKUP_RESTORE, fake_context.message_action) self.assertEqual(message_field.Resource.VOLUME_BACKUP, fake_context.message_resource_type) self.assertEqual(fake_backup.id, fake_context.message_resource_id) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.BACKUP_INVALID_STATE) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager._is_our_backup') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') @mock.patch( 'cinder.backup.manager.volume_utils.brick_get_connector_properties') @mock.patch( 'cinder.volume.rpcapi.VolumeAPI.secure_file_operations_enabled') @mock.patch('cinder.backup.manager.BackupManager._attach_device') @mock.patch('cinder.backup.manager.BackupManager._detach_device') def test_backup_restore_attach_error( self, mock_detach, mock_attach, mock_sec_opts, mock_get_conn, mock_notify, mock_working, mock_our_back, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID, snapshot_id=None) fake_backup.__getitem__.side_effect = ( {'status': 'restoring', 'size': 1}.__getitem__) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = ( {'status': 'restoring-backup', 'size': 1}.__getitem__) mock_get_vol.return_value = mock_vol mock_working.return_value = True mock_our_back.return_value = True mock_attach.side_effect = exception.InvalidBackup( reason="test reason") self.assertRaises( exception.InvalidBackup, manager.restore_backup, fake_context, fake_backup, fake.VOLUME_ID, False) self.assertEqual(message_field.Action.BACKUP_RESTORE, fake_context.message_action) self.assertEqual(message_field.Resource.VOLUME_BACKUP, fake_context.message_resource_type) self.assertEqual(fake_backup.id, fake_context.message_resource_id) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.ATTACH_ERROR) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager._is_our_backup') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') @mock.patch( 'cinder.backup.manager.volume_utils.brick_get_connector_properties') @mock.patch( 'cinder.volume.rpcapi.VolumeAPI.secure_file_operations_enabled') @mock.patch('cinder.backup.manager.BackupManager._attach_device') @mock.patch('cinder.backup.manager.open') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.backup.manager.BackupManager._detach_device') def test_backup_restore_driver_error( self, mock_detach, mock_restore, mock_open, mock_attach, mock_sec_opts, mock_get_conn, mock_notify, mock_working, mock_our_back, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID, snapshot_id=None) fake_backup.__getitem__.side_effect = ( {'status': 'restoring', 'size': 1}.__getitem__) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = ( {'status': 'restoring-backup', 'size': 1}.__getitem__) mock_get_vol.return_value = mock_vol mock_working.return_value = True mock_our_back.return_value = True mock_attach.return_value = {'device': {'path': '/dev/sdb'}} mock_restore.side_effect = exception.InvalidBackup( reason="test reason") self.assertRaises( exception.InvalidBackup, manager.restore_backup, fake_context, fake_backup, fake.VOLUME_ID, False) self.assertEqual(message_field.Action.BACKUP_RESTORE, fake_context.message_action) self.assertEqual(message_field.Resource.VOLUME_BACKUP, fake_context.message_resource_type) self.assertEqual(fake_backup.id, fake_context.message_resource_id) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.BACKUP_RESTORE_ERROR) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.message.api.API.create_from_request_context') @mock.patch('cinder.backup.manager.BackupManager._is_our_backup') @mock.patch('cinder.backup.manager.BackupManager.is_working') @mock.patch('cinder.backup.manager.BackupManager.' '_notify_about_backup_usage') @mock.patch( 'cinder.backup.manager.volume_utils.brick_get_connector_properties') @mock.patch( 'cinder.volume.rpcapi.VolumeAPI.secure_file_operations_enabled') @mock.patch('cinder.backup.manager.BackupManager._attach_device') @mock.patch('cinder.backup.manager.open') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.backup.manager.BackupManager._detach_device') def test_backup_restore_detach_error( self, mock_detach, mock_restore, mock_open, mock_attach, mock_sec_opts, mock_get_conn, mock_notify, mock_working, mock_our_back, mock_msg_create, mock_get_vol, mock_vol_update): manager = backup_manager.BackupManager() fake_context = mock.MagicMock() fake_backup = mock.MagicMock( id=fake.BACKUP_ID, status='creating', volume_id=fake.VOLUME_ID, snapshot_id=None) fake_backup.__getitem__.side_effect = ( {'status': 'restoring', 'size': 1}.__getitem__) mock_vol = mock.MagicMock() mock_vol.__getitem__.side_effect = ( {'status': 'restoring-backup', 'size': 1}.__getitem__) mock_get_vol.return_value = mock_vol mock_working.return_value = True mock_our_back.return_value = True mock_attach.return_value = {'device': {'path': '/dev/sdb'}} mock_detach.side_effect = exception.InvalidBackup( reason="test reason") self.assertRaises( exception.InvalidBackup, manager.restore_backup, fake_context, fake_backup, fake.VOLUME_ID, False) self.assertEqual(message_field.Action.BACKUP_RESTORE, fake_context.message_action) self.assertEqual(message_field.Resource.VOLUME_BACKUP, fake_context.message_resource_type) self.assertEqual(fake_backup.id, fake_context.message_resource_id) mock_msg_create.assert_called_with( fake_context, detail=message_field.Detail.DETACH_ERROR) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/test_chunkeddriver.py0000664000175000017500000004735300000000000024203 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the base chunkedbackupdriver class.""" import json from unittest import mock from oslo_config import cfg from oslo_utils import units from cinder.backup import chunkeddriver as cbd from cinder import context from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test CONF = cfg.CONF TEST_DATA = ('abcdefhijklmnopqrstuvwxyz' * 10).encode('utf-8') class ConcreteChunkedDriver(cbd.ChunkedBackupDriver): def __init__(self, ctxt): super(ConcreteChunkedDriver, self).__init__( ctxt, 1, 1, 'container', False) def _generate_object_name_prefix(self, backup): return 'test-' def delete_object(self, container, object_name): return True def get_container_entries(self, container, prefix): return ['{}{}'.format(container, prefix)] def get_extra_metadata(self, backup, volume): return "{}extra_metadata".format(volume.id) def get_object_reader(self, *args, **kwargs): return TestObjectReader(*args, **kwargs) def get_object_writer(self, *args, **kwargs): return TestObjectWriter(self, *args, **kwargs) def put_container(self, bucket): pass def update_container_name(self, backup, bucket): return None class TestObjectWriter(object): def __init__(self, container, filename, extra_metadata=None): self.container = container self.filename = filename self.extra_metadata = extra_metadata self.written_data = None self.write_count = 0 def __enter__(self): return self def __exit__(self, type, value, traceback): pass def write(self, data): self.written_data = data self.write_count += 1 class TestObjectReader(object): def __init__(self, container, filename, extra_metadata=None): self.container = container self.filename = filename self.extra_metadata = extra_metadata self.written_data = None metadata = {} metadata['version'] = 1 metadata['backup_id'] = 'backupid' metadata['volume_id'] = 'volumeid' metadata['backup_name'] = 'backup_name' metadata['backup_description'] = 'backup_description' metadata['objects'] = ['obj1'] metadata['parent_id'] = 'parent_id' metadata['extra_metadata'] = 'extra_metadata' metadata['chunk_size'] = 1 metadata['sha256s'] = ['sha'] metadata['volume_meta'] = json.dumps(metadata) metadata['version'] = '1.0.0' self.metadata = metadata def __enter__(self): return self def __exit__(self, type, value, traceback): pass def read(self): return json.dumps(self.metadata).encode('utf-8') class ChunkedDriverTestCase(test.TestCase): def _create_backup_db_entry(self, volume_id=fake.VOLUME_ID, restore_volume_id=None, display_name='test_backup', display_description='this is a test backup', container='volumebackups', status=fields.BackupStatus.CREATING, size=1, object_count=0, project_id=fake.PROJECT_ID, service=None, temp_volume_id=None, temp_snapshot_id=None, snapshot_id=None, metadata=None, parent_id=None, encryption_key_id=None): """Create a backup entry in the DB. Return the entry ID """ kwargs = {} kwargs['volume_id'] = volume_id kwargs['restore_volume_id'] = restore_volume_id kwargs['user_id'] = fake.USER_ID kwargs['project_id'] = project_id kwargs['host'] = 'testhost' kwargs['availability_zone'] = '1' kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['container'] = container kwargs['status'] = status kwargs['fail_reason'] = '' kwargs['service'] = service or CONF.backup_driver kwargs['snapshot_id'] = snapshot_id kwargs['parent_id'] = parent_id kwargs['size'] = size kwargs['object_count'] = object_count kwargs['temp_volume_id'] = temp_volume_id kwargs['temp_snapshot_id'] = temp_snapshot_id kwargs['metadata'] = metadata or {} kwargs['encryption_key_id'] = encryption_key_id kwargs['service_metadata'] = 'test_metadata' backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup def _create_volume_db_entry(self, display_name='test_volume', display_description='this is a test volume', status='backing-up', previous_status='available', size=1, host='testhost', encryption_key_id=None): """Create a volume entry in the DB. Return the entry ID """ vol = {} vol['size'] = size vol['host'] = host vol['user_id'] = fake.USER_ID vol['project_id'] = fake.PROJECT_ID vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = fields.VolumeAttachStatus.DETACHED vol['availability_zone'] = '1' vol['previous_status'] = previous_status vol['encryption_key_id'] = encryption_key_id volume = objects.Volume(context=self.ctxt, **vol) volume.create() return volume.id def setUp(self): super(ChunkedDriverTestCase, self).setUp() self.ctxt = context.get_admin_context() self.driver = ConcreteChunkedDriver(self.ctxt) self.driver.compressor = None self.volume = self._create_volume_db_entry() self.backup = self._create_backup_db_entry(volume_id=self.volume) def test_get_compressor_none(self): for algo in ['None', 'Off', 'No']: self.assertIsNone(self.driver._get_compressor(algo)) def test_get_compressor_zlib(self): for algo in ['zlib', 'gzip']: self.assertIn('zlib', str(self.driver._get_compressor(algo))) def test_get_compressor_bz(self): for algo in ['bz2', 'bzip2']: self.assertIn('bz', str(self.driver._get_compressor(algo))) def test_get_compressor_zstd(self): self.assertIn('zstd', str(self.driver._get_compressor('zstd'))) def test_get_compressor_invalid(self): self.assertRaises(ValueError, self.driver._get_compressor, 'winzip') def test_create_container(self): self.assertEqual(self.backup.container, self.driver._create_container(self.backup)) def test_create_container_default(self): self.backup.container = None self.assertEqual('container', self.driver._create_container(self.backup)) def test_create_container_new_container(self): with mock.patch.object(self.driver, 'update_container_name', return_value='new_and_improved'): self.assertEqual('new_and_improved', self.driver._create_container(self.backup)) def test_generate_object_names(self): obj_names = self.driver._generate_object_names(self.backup) self.assertTrue(len(obj_names) == 1) self.assertEqual('{}{}'.format(self.backup.container, self.backup.service_metadata), obj_names[0]) def test_metadata_filename(self): filename = self.driver._metadata_filename(self.backup) self.assertEqual('{}_metadata'.format(self.backup.service_metadata), filename) def test_sha256_filename(self): filename = self.driver._sha256_filename(self.backup) self.assertEqual('{}_sha256file'.format(self.backup.service_metadata), filename) def test_write_metadata(self): obj_writer = TestObjectWriter('', '') with mock.patch.object(self.driver, 'get_object_writer', return_value=obj_writer): self.driver._write_metadata(self.backup, 'volid', 'contain_name', ['obj1'], 'volume_meta', extra_metadata='extra_metadata') self.assertIsNotNone(obj_writer.written_data) written_data = obj_writer.written_data.decode('utf-8') metadata = json.loads(written_data) self.assertEqual(self.driver.DRIVER_VERSION, metadata.get('version')) self.assertEqual(self.backup.id, metadata.get('backup_id')) self.assertEqual('volid', metadata.get('volume_id')) self.assertEqual(self.backup.display_name, metadata.get('backup_name')) self.assertEqual(self.backup.display_description, metadata.get('backup_description')) self.assertEqual(['obj1'], metadata.get('objects')) self.assertEqual(self.backup.parent_id, metadata.get('parent_id')) self.assertEqual('volume_meta', metadata.get('volume_meta')) self.assertEqual('extra_metadata', metadata.get('extra_metadata')) def test_write_sha256file(self): obj_writer = TestObjectWriter('', '') with mock.patch.object(self.driver, 'get_object_writer', return_value=obj_writer): self.driver._write_sha256file(self.backup, 'volid', 'contain_name', ['sha']) self.assertIsNotNone(obj_writer.written_data) written_data = obj_writer.written_data.decode('utf-8') metadata = json.loads(written_data) self.assertEqual(self.driver.DRIVER_VERSION, metadata.get('version')) self.assertEqual(self.backup.id, metadata.get('backup_id')) self.assertEqual('volid', metadata.get('volume_id')) self.assertEqual(self.backup.display_name, metadata.get('backup_name')) self.assertEqual(self.backup.display_description, metadata.get('backup_description')) self.assertEqual(self.driver.sha_block_size_bytes, metadata.get('chunk_size')) self.assertEqual(['sha'], metadata.get('sha256s')) def test_read_metadata(self): obj_reader = TestObjectReader('', '') with mock.patch.object(self.driver, 'get_object_reader', return_value=obj_reader): metadata = self.driver._read_metadata(self.backup) self.assertIsNotNone(obj_reader.metadata) expected = obj_reader.metadata self.assertEqual(expected['version'], metadata['version']) self.assertEqual(expected['backup_id'], metadata['backup_id']) self.assertEqual(expected['volume_id'], metadata['volume_id']) self.assertEqual(expected['backup_name'], metadata['backup_name']) self.assertEqual(expected['backup_description'], metadata['backup_description']) self.assertEqual(expected['objects'], metadata['objects']) self.assertEqual(expected['parent_id'], metadata['parent_id']) self.assertEqual(expected['volume_meta'], metadata['volume_meta']) self.assertEqual(expected['extra_metadata'], metadata['extra_metadata']) def test_read_sha256file(self): obj_reader = TestObjectReader('', '') with mock.patch.object(self.driver, 'get_object_reader', return_value=obj_reader): metadata = self.driver._read_sha256file(self.backup) self.assertIsNotNone(obj_reader.metadata) expected = obj_reader.metadata self.assertEqual(expected['version'], metadata['version']) self.assertEqual(expected['backup_id'], metadata['backup_id']) self.assertEqual(expected['volume_id'], metadata['volume_id']) self.assertEqual(expected['backup_name'], metadata['backup_name']) self.assertEqual(expected['backup_description'], metadata['backup_description']) self.assertEqual(expected['chunk_size'], metadata['chunk_size']) self.assertEqual(expected['sha256s'], metadata['sha256s']) def test_prepare_backup(self): (object_meta, object_sha256, extra_metadata, container, volume_size_bytes) = self.driver._prepare_backup(self.backup) self.assertDictEqual({'id': 1, 'list': [], 'prefix': 'test-', 'volume_meta': None, 'extra_metadata': "{}extra_metadata".format( self.volume), }, object_meta) self.assertDictEqual({'id': 1, 'sha256s': [], 'prefix': 'test-', }, object_sha256) self.assertEqual(extra_metadata, object_meta['extra_metadata']) self.assertEqual(self.backup.container, container) self.assertEqual(self.backup.size * units.Gi, volume_size_bytes) def test_prepare_backup_invalid_size(self): volume = self._create_volume_db_entry(size=0) backup = self._create_backup_db_entry(volume_id=volume) self.assertRaises(exception.InvalidVolume, self.driver._prepare_backup, backup) def test_backup_chunk(self): (object_meta, object_sha256, extra_metadata, container, volume_size_bytes) = self.driver._prepare_backup(self.backup) obj_writer = TestObjectWriter('', '') with mock.patch.object(self.driver, 'get_object_writer', return_value=obj_writer): self.driver._backup_chunk(self.backup, self.backup.container, TEST_DATA, 0, object_meta, extra_metadata) self.assertEqual(TEST_DATA, obj_writer.written_data) self.assertEqual(1, len(object_meta['list'])) self.assertEqual(2, object_meta['id']) chunk = object_meta['list'][0]['test--00001'] self.assertEqual('b4bc937908ab6be6039b6d4141200de8', chunk['md5']) self.assertEqual(0, chunk['offset']) self.assertEqual(len(TEST_DATA), chunk['length']) def test_finalize_backup(self): (object_meta, object_sha256, extra_metadata, container, volume_size_bytes) = self.driver._prepare_backup(self.backup) obj_writer = TestObjectWriter('', '') with mock.patch.object(self.driver, 'get_object_writer', return_value=obj_writer): self.driver._backup_chunk(self.backup, self.backup.container, TEST_DATA, 0, object_meta, extra_metadata) self.driver._finalize_backup(self.backup, self.backup.container, object_meta, object_sha256) self.assertEqual(1, self.backup.object_count) def test_backup_metadata(self): object_meta = {} self.driver._backup_metadata(self.backup, object_meta) self.assertIn('volume_meta', object_meta.keys()) # Too much that we mostly don't care about for UT purposes. Just spot # check a few things metadata = json.loads(object_meta['volume_meta']) self.assertIn('volume-base-metadata', metadata.keys()) self.assertEqual(self.volume, metadata['volume-base-metadata']['id']) self.assertEqual(1, metadata['volume-base-metadata']['size']) self.assertEqual('test_volume', metadata['volume-base-metadata']['display_name']) self.assertEqual('testhost', metadata['volume-base-metadata']['host']) @mock.patch('cinder.volume.volume_utils.notify_about_backup_usage') def test_send_progress_end(self, mock_notify): obj_meta = {} self.driver._send_progress_end(self.ctxt, self.backup, obj_meta) self.assertEqual(100, obj_meta.get('backup_percent', 0)) self.assertTrue(mock_notify.called) @mock.patch('cinder.volume.volume_utils.notify_about_backup_usage') def test_send_progress_notification(self, mock_notify): obj_meta = {} self.driver._send_progress_notification( self.ctxt, self.backup, obj_meta, 1, 2) self.assertEqual(50, obj_meta.get('backup_percent', 0)) self.assertTrue(mock_notify.called) @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') def test_backup(self, mock_notify): volume_file = mock.Mock() volume_file.tell.side_effect = [0, len(TEST_DATA)] volume_file.read.side_effect = [TEST_DATA, b''] obj_writer = TestObjectWriter('', '') with mock.patch.object(self.driver, 'get_object_writer', return_value=obj_writer): self.driver.backup(self.backup, volume_file) self.assert_notify_called(mock_notify, (['INFO', 'backup.createprogress'],)) def test_backup_invalid_size(self): self.driver.chunk_size_bytes = 999 self.driver.sha_block_size_bytes = 1024 self.assertRaises(exception.InvalidBackup, self.driver.backup, self.backup, mock.Mock()) def test_restore(self): volume_file = mock.Mock() restore_test = mock.Mock() self.driver._restore_v1 = restore_test # Create a second backup backup = self._create_backup_db_entry( self.volume, parent_id=self.backup.id) with mock.patch.object(self.driver, 'put_metadata') as mock_put: self.driver.restore(backup, self.volume, volume_file, False) self.assertEqual(2, mock_put.call_count) restore_test.assert_called() def test_delete_backup(self): with mock.patch.object(self.driver, 'delete_object') as mock_delete: self.driver.delete_backup(self.backup) mock_delete.assert_called() self.assertEqual(1, mock_delete.call_count) mock_delete.assert_called_once_with( self.backup.container, self.backup.container + self.backup.service_metadata) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/backup/test_rpcapi.py0000664000175000017500000001157200000000000022616 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for cinder.backup.rpcapi """ from unittest import mock from cinder.backup import rpcapi as backup_rpcapi from cinder import objects from cinder.tests.unit.backup import fake_backup from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class BackupRPCAPITestCase(test.RPCAPITestCase): def setUp(self): super(BackupRPCAPITestCase, self).setUp() self.rpcapi = backup_rpcapi.BackupAPI self.fake_backup_obj = fake_backup.fake_backup_obj(self.context) self.can_send_version_mock = self.patch( 'oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_backup(self): self._test_rpc_api('create_backup', rpc_method='cast', server=self.fake_backup_obj.host, backup=self.fake_backup_obj) def test_restore_backup(self): self._test_rpc_api('restore_backup', rpc_method='cast', server='fake_volume_host', backup_host='fake_volume_host', backup=self.fake_backup_obj, volume_id=fake.VOLUME_ID, volume_is_new=True) with mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '2.0'}): self._test_rpc_api('restore_backup', rpc_method='cast', server='fake_volume_host', backup_host='fake_volume_host', backup=self.fake_backup_obj, volume_id=fake.VOLUME_ID, volume_is_new=False) def test_delete_backup(self): self._test_rpc_api('delete_backup', rpc_method='cast', server=self.fake_backup_obj.host, backup=self.fake_backup_obj) def test_export_record(self): self._test_rpc_api('export_record', rpc_method='call', server=self.fake_backup_obj.host, backup=self.fake_backup_obj, retval={'backup_service': 'fake_backup_driver', 'backup_url': 'http://fake_url'}) def test_import_record(self): self._test_rpc_api('import_record', rpc_method='cast', server='fake_volume_host', host='fake_volume_host', backup=self.fake_backup_obj, backup_service='fake_service', backup_url='fake_url', backup_hosts=['fake_host1', 'fake_host2']) def test_reset_status(self): self._test_rpc_api('reset_status', rpc_method='cast', server=self.fake_backup_obj.host, backup=self.fake_backup_obj, status='error') def test_check_support_to_force_delete(self): self._test_rpc_api('check_support_to_force_delete', rpc_method='call', server='fake_volume_host', host='fake_volume_host', retval=True) @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) def test_set_log_levels(self): service = objects.Service(self.context, host='host1') self._test_rpc_api('set_log_levels', rpc_method='cast', server=service.host, service=service, log_request='log_request', version='2.1') @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) def test_get_log_levels(self): service = objects.Service(self.context, host='host1') self._test_rpc_api('get_log_levels', rpc_method='call', server=service.host, service=service, log_request='log_request', version='2.1') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1951194 cinder-27.0.0/cinder/tests/unit/brick/0000775000175000017500000000000000000000000017546 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/brick/__init__.py0000664000175000017500000000000000000000000021645 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/brick/fake_lvm.py0000664000175000017500000000333600000000000021711 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class FakeBrickLVM(object): """Logs and records calls, for unit tests.""" def __init__(self, vg_name, create, pv_list, vtype, execute=None): super(FakeBrickLVM, self).__init__() self.vg_size = '5.00' self.vg_free_space = '5.00' self.vg_name = vg_name def supports_thin_provisioning(): return False def get_volumes(self): return ['fake-volume'] def get_volume(self, name): return ['name'] def get_all_physical_volumes(vg_name=None): return [] def update_volume_group_info(self): pass def create_thin_pool(self, name=None, size_str=0): pass def create_volume(self, name, size_str, lv_type='default', mirror_count=0): pass def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): pass def delete(self, name): pass def revert(self, snapshot_name): pass def deactivate_lv(self, name): pass def lv_has_snapshot(self, name): return False def activate_lv(self, lv, is_snapshot=False, permanent=False): pass def rename_volume(self, lv_name, new_name): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/brick/test_brick_lvm.py0000664000175000017500000005070500000000000023136 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from os_brick import executor as os_brick_executor from oslo_concurrency import processutils from cinder.brick.local_dev import lvm as brick from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf @ddt.ddt class BrickLvmTestCase(test.TestCase): def setUp(self): if not hasattr(self, 'configuration'): self.configuration = mock.Mock(conf.Configuration) self.configuration.lvm_suppress_fd_warnings = False self.configuration.volume_group_name = 'fake-vg' super(BrickLvmTestCase, self).setUp() self.mock_object(processutils, 'execute', self.fake_execute) self.vg = brick.LVM( self.configuration.volume_group_name, 'sudo', False, None, 'default', self.fake_execute, suppress_fd_warn=self.configuration.lvm_suppress_fd_warnings) def failed_fake_execute(obj, *cmd, **kwargs): return ("\n", "fake-error") def fake_pretend_lvm_version(obj, *cmd, **kwargs): return (" LVM version: 2.03.00 (2012-03-06)\n", "") def fake_old_lvm_version(obj, *cmd, **kwargs): # Does not support thin prov or snap activation return (" LVM version: 2.02.65(2) (2012-03-06)\n", "") def fake_customised_lvm_version(obj, *cmd, **kwargs): return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "") def fake_execute(obj, *cmd, **kwargs): # noqa if obj.configuration.lvm_suppress_fd_warnings: _lvm_prefix = 'env LC_ALL=C LVM_SUPPRESS_FD_WARNINGS=1 ' else: _lvm_prefix = 'env LC_ALL=C ' cmd_string = ' '.join(cmd) data = "\n" if (_lvm_prefix + 'vgs --noheadings --unit=g -o name' == cmd_string): data = " fake-vg\n" data += " some-other-vg\n" elif (_lvm_prefix + 'vgs --noheadings -o name fake-vg' == cmd_string): data = " fake-vg\n" elif _lvm_prefix + 'lvm version' in cmd_string: data = " LVM version: 2.03.07(2) (2019-11-30)\n" elif _lvm_prefix + 'vgs --noheadings -o uuid fake-vg' in cmd_string: data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" elif (_lvm_prefix + 'vgs --noheadings --unit=g ' '-o name,size,free,lv_count,uuid ' '--separator : --nosuffix' in cmd_string): data = (" test-prov-cap-vg-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-unit' in cmd_string: return (data, "") data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:" "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n") if 'test-prov-cap-vg-no-unit' in cmd_string: return (data, "") data = " fake-vg:10.00:10.00:0:"\ "kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n" if 'fake-vg' in cmd_string: return (data, "") data += " fake-vg-2:10.00:10.00:0:"\ "lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n" data += " fake-vg-3:10.00:10.00:0:"\ "mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n" elif (_lvm_prefix + 'lvs --noheadings ' '--unit=g -o vg_name,name,size --nosuffix --readonly ' 'fake-vg/lv-nothere' in cmd_string): raise processutils.ProcessExecutionError( stderr="One or more specified logical volume(s) not found.") elif (_lvm_prefix + 'lvs --noheadings ' '--unit=g -o vg_name,name,size --nosuffix --readonly ' 'fake-vg/lv-newerror' in cmd_string): raise processutils.ProcessExecutionError( stderr="Failed to find logical volume \"fake-vg/lv-newerror\"") elif (_lvm_prefix + 'lvs --noheadings ' '--unit=g -o vg_name,name,size' in cmd_string): if 'fake-unknown' in cmd_string: raise processutils.ProcessExecutionError( stderr="One or more volume(s) not found." ) if 'test-prov-cap-vg-unit' in cmd_string: data = " fake-vg test-prov-cap-pool-unit 9.50g\n" data += " fake-vg fake-volume-1 1.00g\n" data += " fake-vg fake-volume-2 2.00g\n" elif 'test-prov-cap-vg-no-unit' in cmd_string: data = " fake-vg test-prov-cap-pool-no-unit 9.50\n" data += " fake-vg fake-volume-1 1.00\n" data += " fake-vg fake-volume-2 2.00\n" elif 'test-found-lv-name' in cmd_string: data = " fake-vg test-found-lv-name 9.50\n" else: data = " fake-vg fake-1 1.00g\n" data += " fake-vg fake-2 1.00g\n" elif (_lvm_prefix + 'lvdisplay --noheading -C -o Attr' in cmd_string): if 'test-volumes' in cmd_string: data = ' wi-a-' elif 'snapshot' in cmd_string: data = ' swi-a-s--' elif 'open' in cmd_string: data = ' -wi-ao---' else: data = ' owi-a-' elif (_lvm_prefix + 'lvdisplay --noheading -C -o Origin' in cmd_string): if 'snapshot' in cmd_string: data = ' fake-volume-1' else: data = ' ' elif _lvm_prefix + 'pvs --noheadings' in cmd_string: data = " fake-vg|/dev/sda|10.00|1.00\n" data += " fake-vg|/dev/sdb|10.00|1.00\n" data += " fake-vg|/dev/sdc|10.00|8.99\n" data += " fake-vg-2|/dev/sdd|10.00|9.99\n" if '--ignoreskippedcluster' not in cmd_string: raise processutils.ProcessExecutionError( stderr="Skipping clustered volume group", stdout=data, exit_code=5 ) elif _lvm_prefix + 'lvs --noheadings --unit=g' \ ' -o size,data_percent --separator :' in cmd_string: if 'test-prov-cap-pool' in cmd_string: data = " 9.5:20\n" else: data = " 9:12\n" elif 'lvcreate -T -L ' in cmd_string: pass elif 'lvcreate -T -V ' in cmd_string: pass elif 'lvcreate -n ' in cmd_string: pass elif 'lvcreate --name ' in cmd_string: pass elif 'lvextend -L ' in cmd_string: pass else: raise AssertionError('unexpected command called: %s' % cmd_string) return (data, "") def test_create_lv_snapshot(self): self.assertIsNone(self.vg.create_lv_snapshot('snapshot-1', 'fake-1')) with mock.patch.object(self.vg, 'get_volume', return_value=None): try: self.vg.create_lv_snapshot('snapshot-1', 'fake-non-existent') except exception.VolumeDeviceNotFound as e: self.assertEqual('fake-non-existent', e.kwargs['device']) else: self.fail("Exception not raised") def test_vg_exists(self): self.assertTrue(self.vg._vg_exists()) def test_get_all_volumes(self): out = self.vg.get_volumes() self.assertEqual('fake-1', out[0]['name']) self.assertEqual('1.00g', out[0]['size']) self.assertEqual('fake-vg', out[0]['vg']) def test_get_volume(self): self.assertEqual('fake-1', self.vg.get_volume('fake-1')['name']) def test_get_volume_none(self): self.assertIsNone(self.vg.get_volume('fake-unknown')) def test_get_lv_info_notfound(self): # lv-nothere will raise lvm < 2.102.112 exception self.assertEqual( [], self.vg.get_lv_info( 'sudo', vg_name='fake-vg', lv_name='lv-nothere') ) # lv-newerror will raise lvm > 2.102.112 exception self.assertEqual( [], self.vg.get_lv_info( 'sudo', vg_name='fake-vg', lv_name='lv-newerror') ) def test_get_lv_info_found(self): lv_info = [{'size': '9.50', 'name': 'test-found-lv-name', 'vg': 'fake-vg'}] self.assertEqual( lv_info, self.vg.get_lv_info( 'sudo', vg_name='fake-vg', lv_name='test-found-lv-name') ) def test_get_lv_info_no_lv_name(self): lv_info = [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'}, {'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}] self.assertEqual( lv_info, self.vg.get_lv_info( 'sudo', vg_name='fake-vg') ) @mock.patch('tenacity.nap.sleep', mock.Mock()) @mock.patch.object(brick.putils, 'execute') def test_get_lv_info_retry(self, exec_mock): exec_mock.side_effect = ( processutils.ProcessExecutionError('', '', exit_code=139), ('vg name size', ''), ) self.assertEqual( [{'name': 'name', 'vg': 'vg', 'size': 'size'}], self.vg.get_lv_info('sudo', vg_name='vg', lv_name='name') ) self.assertEqual(2, exec_mock.call_count) args = ['env', 'LC_ALL=C', 'lvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size', '--nosuffix', '--readonly', 'vg/name'] if self.configuration.lvm_suppress_fd_warnings: args.insert(2, 'LVM_SUPPRESS_FD_WARNINGS=1') lvs_call = mock.call(*args, root_helper='sudo', run_as_root=True) exec_mock.assert_has_calls([lvs_call, lvs_call]) @mock.patch('tenacity.nap.sleep', mock.Mock()) @mock.patch.object(os_brick_executor.Executor, '_execute') def test_get_thin_pool_free_space_retry(self, exec_mock): exec_mock.side_effect = ( processutils.ProcessExecutionError('', '', exit_code=139), ('15.84:50', ''), ) self.assertEqual( 7.92, self.vg._get_thin_pool_free_space('vg', 'thinpool') ) self.assertEqual(2, exec_mock.call_count) args = ['env', 'LC_ALL=C', 'lvs', '--noheadings', '--unit=g', '-o', 'size,data_percent', '--separator', ':', '--nosuffix', '/dev/vg/thinpool'] if self.configuration.lvm_suppress_fd_warnings: args.insert(2, 'LVM_SUPPRESS_FD_WARNINGS=1') lvs_call = mock.call(*args, root_helper='sudo', run_as_root=True) exec_mock.assert_has_calls([lvs_call, lvs_call]) def test_get_all_physical_volumes(self): # Filtered VG version pvs = self.vg.get_all_physical_volumes('sudo', 'fake-vg') self.assertEqual(3, len(pvs)) # Non-Filtered, all VG's pvs = self.vg.get_all_physical_volumes('sudo') self.assertEqual(4, len(pvs)) def test_get_volume_groups(self): self.assertEqual(3, len(self.vg.get_all_volume_groups('sudo'))) self.assertEqual(1, len(self.vg.get_all_volume_groups('sudo', 'fake-vg'))) def test_thin_support(self): # lvm.supports_thin() is a static method and doesn't # use the self._executor fake we pass in on init # so we need to stub processutils.execute appropriately self.assertTrue(self.vg.supports_thin_provisioning('sudo')) with mock.patch.object(processutils, 'execute', self.fake_pretend_lvm_version): self.assertTrue(self.vg.supports_thin_provisioning('sudo')) with mock.patch.object(processutils, 'execute', self.fake_old_lvm_version): self.assertFalse(self.vg.supports_thin_provisioning('sudo')) with mock.patch.object(processutils, 'execute', self.fake_customised_lvm_version): self.assertTrue(self.vg.supports_thin_provisioning('sudo')) def test_snapshot_lv_activate_support(self): self.vg._supports_snapshot_lv_activation = None self.assertTrue(self.vg.supports_snapshot_lv_activation) self.vg._supports_snapshot_lv_activation = None with mock.patch.object(processutils, 'execute', self.fake_old_lvm_version): self.assertFalse(self.vg.supports_snapshot_lv_activation) self.vg._supports_snapshot_lv_activation = None def test_lvchange_ignskipact_support_yes(self): """Tests if lvchange -K is available via a lvm2 version check.""" self.vg._supports_lvchange_ignoreskipactivation = None with mock.patch.object(processutils, 'execute', self.fake_pretend_lvm_version): self.assertTrue(self.vg.supports_lvchange_ignoreskipactivation) self.vg._supports_lvchange_ignoreskipactivation = None with mock.patch.object(processutils, 'execute', self.fake_old_lvm_version): self.assertFalse(self.vg.supports_lvchange_ignoreskipactivation) self.vg._supports_lvchange_ignoreskipactivation = None def test_pvs_ignoreskippedcluster_support(self): """Tests if lvm support ignoreskippedcluster option.""" brick.LVM._supports_pvs_ignoreskippedcluster = None with mock.patch.object(processutils, 'execute', self.fake_pretend_lvm_version): self.assertTrue(brick.LVM.supports_pvs_ignoreskippedcluster( 'sudo')) brick.LVM._supports_pvs_ignoreskippedcluster = None with mock.patch.object(processutils, 'execute', self.fake_old_lvm_version): self.assertFalse(brick.LVM.supports_pvs_ignoreskippedcluster( 'sudo')) brick.LVM._supports_pvs_ignoreskippedcluster = None def test_thin_pool_creation(self): # The size of fake-vg volume group is 10g, so the calculated thin # pool size should be 9.5g (95% of 10g). self.assertEqual("9.5g", self.vg.create_thin_pool()) # Passing a size parameter should result in a thin pool of that exact # size. for size in ("1g", "1.2g", "1.75g"): self.assertEqual(size, self.vg.create_thin_pool(size_str=size)) def test_thin_pool_provisioned_capacity(self): self.vg.vg_thin_pool = "test-prov-cap-pool-unit" self.vg.vg_name = 'test-prov-cap-vg-unit' self.assertEqual( "9.5g", self.vg.create_thin_pool(name=self.vg.vg_thin_pool)) self.assertEqual(9.50, self.vg.vg_thin_pool_size) self.assertEqual(7.6, self.vg.vg_thin_pool_free_space) self.assertEqual(3.0, self.vg.vg_provisioned_capacity) self.vg.vg_thin_pool = "test-prov-cap-pool-no-unit" self.vg.vg_name = 'test-prov-cap-vg-no-unit' self.assertEqual( "9.5g", self.vg.create_thin_pool(name=self.vg.vg_thin_pool)) self.assertEqual(9.50, self.vg.vg_thin_pool_size) self.assertEqual(7.6, self.vg.vg_thin_pool_free_space) self.assertEqual(3.0, self.vg.vg_provisioned_capacity) def test_thin_pool_free_space(self): # The size of fake-vg-pool is 9g and the allocated data sums up to # 12% so the calculated free space should be 7.92 self.assertEqual(float("7.92"), self.vg._get_thin_pool_free_space("fake-vg", "fake-vg-pool")) def test_volume_create_after_thin_creation(self): """Test self.vg.vg_thin_pool is set to pool_name See bug #1220286 for more info. """ vg_name = "vg-name" pool_name = vg_name + "-pool" self.vg.create_thin_pool(pool_name, "1G") with mock.patch.object(self.vg, '_execute', return_value=(0, 0)): self.vg.create_volume("test", "1G", lv_type='thin') if self.configuration.lvm_suppress_fd_warnings is False: self.vg._execute.assert_called_once_with( 'env', 'LC_ALL=C', 'lvcreate', '-T', '-V', '1G', '-n', 'test', 'fake-vg/vg-name-pool', root_helper='sudo', run_as_root=True) else: self.vg._execute.assert_called_once_with( 'env', 'LC_ALL=C', 'LVM_SUPPRESS_FD_WARNINGS=1', 'lvcreate', '-T', '-V', '1G', '-n', 'test', 'fake-vg/vg-name-pool', root_helper='sudo', run_as_root=True) self.assertEqual(pool_name, self.vg.vg_thin_pool) def test_volume_create_when_executor_failed(self): def fail(*args, **kwargs): raise processutils.ProcessExecutionError() self.vg._execute = fail with mock.patch.object(self.vg, 'get_all_volume_groups') as m_gavg: self.assertRaises( processutils.ProcessExecutionError, self.vg.create_volume, "test", "1G" ) m_gavg.assert_called() def test_lv_has_snapshot(self): self.assertTrue(self.vg.lv_has_snapshot('fake-vg')) self.assertFalse(self.vg.lv_has_snapshot('test-volumes')) def test_lv_is_snapshot(self): self.assertTrue(self.vg.lv_is_snapshot('fake-snapshot')) self.assertFalse(self.vg.lv_is_snapshot('test-volumes')) def test_lv_is_open(self): self.assertTrue(self.vg.lv_is_open('fake-open')) self.assertFalse(self.vg.lv_is_open('fake-snapshot')) def test_lv_get_origin(self): self.assertEqual('fake-volume-1', self.vg.lv_get_origin('fake-snapshot')) self.assertIsNone(self.vg.lv_get_origin('test-volumes')) def test_activate_lv(self): with mock.patch.object(self.vg, '_execute'): self.vg._supports_lvchange_ignoreskipactivation = True self.vg._execute('lvchange', '-a', 'y', '--yes', '-K', 'fake-vg/my-lv', root_helper='sudo', run_as_root=True) self.vg.activate_lv('my-lv') def test_get_mirrored_available_capacity(self): self.assertEqual(2.0, self.vg.vg_mirror_free_space(1)) @ddt.data(True, False) def test_lv_extend(self, has_snapshot): with mock.patch.object(self.vg, '_execute', return_value=('', '')): with mock.patch.object(self.vg, 'lv_has_snapshot'): self.vg.deactivate_lv = mock.MagicMock() self.vg.activate_lv = mock.MagicMock() self.vg.lv_has_snapshot.return_value = has_snapshot self.vg.extend_volume("test", "2G") self.vg.lv_has_snapshot.assert_called_once_with("test") if has_snapshot: self.vg.activate_lv.assert_called_once_with("test") self.vg.deactivate_lv.assert_called_once_with("test") else: self.vg.activate_lv.assert_not_called() self.vg.deactivate_lv.assert_not_called() def test_lv_deactivate(self): with mock.patch.object(self.vg, '_execute', return_value=(0, 0)): is_active_mock = mock.Mock() is_active_mock.return_value = False self.vg._lv_is_active = is_active_mock self.vg.create_volume('test', '1G') self.vg.deactivate_lv('test') @mock.patch('time.sleep') def test_lv_deactivate_timeout(self, _mock_sleep): with mock.patch.object(self.vg, '_execute', return_value=(0, 0)): is_active_mock = mock.Mock() is_active_mock.return_value = True self.vg._lv_is_active = is_active_mock self.vg.create_volume('test', '1G') self.assertRaises(exception.VolumeNotDeactivated, self.vg.deactivate_lv, 'test') class BrickLvmTestCaseIgnoreFDWarnings(BrickLvmTestCase): def setUp(self): self.configuration = mock.Mock(conf.Configuration) self.configuration.lvm_suppress_fd_warnings = True super(BrickLvmTestCaseIgnoreFDWarnings, self).setUp() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/cast_as_call.py0000664000175000017500000000223600000000000021441 0ustar00zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock def mock_cast_as_call(obj=None): """Use this to mock `cast` as calls. :param obj: Either an instance of RPCClient or an instance of _Context. """ orig_prepare = obj.prepare def prepare(*args, **kwargs): cctxt = orig_prepare(*args, **kwargs) mock_cast_as_call(obj=cctxt) # woo, recurse! return cctxt prepare_patch = mock.patch.object(obj, 'prepare').start() prepare_patch.side_effect = prepare cast_patch = mock.patch.object(obj, 'cast').start() cast_patch.side_effect = obj.call ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1951194 cinder-27.0.0/cinder/tests/unit/cmd/0000775000175000017500000000000000000000000017217 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/cmd/__init__.py0000664000175000017500000000000000000000000021316 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/cmd/test_status.py0000664000175000017500000002740700000000000022165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the cinder-status CLI interfaces.""" from unittest import mock import uuid import ddt from oslo_config import cfg from oslo_upgradecheck import upgradecheck as uc import testtools import cinder.backup.manager # noqa from cinder.cmd import status from cinder import context from cinder import db from cinder.db.sqlalchemy import api as sqla_api from cinder import exception from cinder.tests.unit import fake_constants as fakes from cinder.tests.unit import test import cinder.volume.manager as volume_manager CONF = cfg.CONF @ddt.ddt class TestCinderStatus(testtools.TestCase): """Test cases for the cinder-status upgrade check command.""" def _setup_database(self): CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') self.useFixture(test.Database()) sqla_api._GET_METHODS = {} self.addCleanup(CONF.reset) def setUp(self): super(TestCinderStatus, self).setUp() self.checks = status.Checks() # Make sure configuration is initialized try: CONF([], project='cinder') except cfg.RequiredOptError: # Doesn't matter in this situation pass # Make sure our expected path is returned patcher = mock.patch.object(CONF, 'find_file') self.addCleanup(patcher.stop) self.find_file = patcher.start() self.find_file.return_value = '/etc/cinder/' self._setup_database() self.context = context.get_admin_context() def _set_config(self, key, value, group=None): CONF.set_override(key, value, group=group) self.addCleanup(CONF.clear_override, key, group=group) def _set_backup_driver(self, driver_path): CONF.set_override('backup_driver', driver_path) self.addCleanup(CONF.clear_override, 'backup_driver') def _set_volume_driver(self, volume_driver, enabled_backend): CONF.register_opts(volume_manager.volume_backend_opts, group=enabled_backend) CONF.set_override('enabled_backends', enabled_backend) CONF.set_override('volume_driver', volume_driver, group=enabled_backend) self.addCleanup(CONF.clear_override, 'volume_driver', group=enabled_backend) self.addCleanup(CONF.clear_override, 'enabled_backends') def test_check_backup_module(self): self._set_config( 'backup_driver', 'cinder.backup.drivers.swift.SwiftBackupDriver') result = self.checks._check_backup_module() self.assertEqual(uc.Code.SUCCESS, result.code) def test_check_backup_module_not_class(self): self._set_config('backup_driver', 'cinder.backup.drivers.swift') result = self.checks._check_backup_module() self.assertEqual(uc.Code.FAILURE, result.code) self.assertIn('requires the full path', result.details) def test_check_policy_file(self): with mock.patch.object(self.checks, '_file_exists') as fe: fe.return_value = False result = self.checks._check_policy_file() self.assertEqual(uc.Code.SUCCESS, result.code) def test_check_policy_file_exists(self): with mock.patch.object(self.checks, '_file_exists') as fe: fe.return_value = True result = self.checks._check_policy_file() self.assertEqual(uc.Code.WARNING, result.code) self.assertIn('policy.json file is present', result.details) def test_check_policy_file_custom_path(self): policy_path = '/my/awesome/configs/policy.yaml' self._set_config('policy_file', policy_path, group='oslo_policy') with mock.patch.object(self.checks, '_file_exists') as fe: fe.return_value = False result = self.checks._check_policy_file() fe.assert_called_with(policy_path) self.assertEqual(uc.Code.WARNING, result.code) self.assertIn(policy_path, result.details) def test_check_policy_file_custom_file(self): policy_path = 'mypolicy.yaml' self._set_config('policy_file', policy_path, group='oslo_policy') with mock.patch.object(self.checks, '_file_exists') as fe: fe.return_value = False result = self.checks._check_policy_file() fe.assert_called_with('/etc/cinder/%s' % policy_path) self.assertEqual(uc.Code.WARNING, result.code) self.assertIn(policy_path, result.details) def test_check_periodic_interval_default(self): # default value is 60 self._set_config('periodic_interval', 60) result = self.checks._check_periodic_interval() self.assertEqual(uc.Code.SUCCESS, result.code) def test_check_periodic_interval_not_default(self): # default value is 60 self._set_config('periodic_interval', 22) result = self.checks._check_periodic_interval() self.assertEqual(uc.Code.WARNING, result.code) self.assertIn('New configuration options have been introduced', result.details) @ddt.data(['cinder.quota.DbQuotaDriver', True], ['cinder.quota.NestedDbQuotaDriver', False]) @ddt.unpack def test_nested_quota_driver(self, driver, should_pass): self._set_config('quota_driver', driver) result = self.checks._check_nested_quota() if should_pass: expected = uc.Code.SUCCESS else: expected = uc.Code.FAILURE self.assertEqual(expected, result.code) def test_check_legacy_win_conf(self): self._set_volume_driver( 'cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver', 'winiscsi') result = self.checks._check_legacy_windows_config() self.assertEqual(uc.Code.SUCCESS, result.code) def test_check_legacy_win_conf_fail(self): self._set_volume_driver( 'cinder.volume.drivers.windows.windows.WindowsDriver', 'winiscsi') result = self.checks._check_legacy_windows_config() self.assertEqual(uc.Code.FAILURE, result.code) self.assertIn('Please update to use', result.details) def test_check_legacy_win_conf_no_drivers(self): self._set_config('enabled_backends', None) result = self.checks._check_legacy_windows_config() self.assertEqual(uc.Code.SUCCESS, result.code) def test_check_removed_drivers(self): self._set_volume_driver( 'cinder.volume.drivers.lvm.LVMVolumeDriver', 'winiscsi') result = self.checks._check_removed_drivers() self.assertEqual(uc.Code.SUCCESS, result.code) @ddt.data('cinder.volume.drivers.coprhd.fc.EMCCoprHDFCDriver', 'cinder.volume.drivers.coprhd.iscsi.EMCCoprHDISCSIDriver', 'cinder.volume.drivers.coprhd.scaleio.EMCCoprHDScaleIODriver', 'cinder.volume.drivers.disco.disco.DiscoDriver', 'cinder.volume.drivers.hgst.HGSTDriver', 'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.' 'HPELeftHandISCSIDriver', 'cinder.volume.drivers.sheepdog.SheepdogDriver', 'cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver', 'cinder.volume.drivers.zfssa.zfssanfs.ZFSSANFSDriver') def test_check_removed_drivers_fail(self, volume_driver): self._set_volume_driver( volume_driver, 'testDriver') result = self.checks._check_removed_drivers() self.assertEqual(uc.Code.FAILURE, result.code) self.assertIn(volume_driver, result.details) # Check for singular version of result message self.assertIn('This driver has been removed', result.details) def test_check_multiple_removed_drivers_fail(self): d1 = 'cinder.volume.drivers.coprhd.fc.EMCCoprHDFCDriver' d3 = 'cinder.volume.drivers.coprhd.scaleio.EMCCoprHDScaleIODriver' d5 = 'cinder.volume.drivers.hgst.HGSTDriver' d2 = 'cinder.volume.drivers.foo.iscsi.FooDriver' d4 = 'cinder.volume.drivers.bar.fc.BarFCDriver' self._set_volume_driver(d1, 'b1') self._set_volume_driver(d2, 'b2') self._set_volume_driver(d3, 'b3') self._set_volume_driver(d4, 'b4') self._set_volume_driver(d5, 'b5') CONF.set_override('enabled_backends', 'b1,b2,b3,b4,b5') result = self.checks._check_removed_drivers() self.assertEqual(uc.Code.FAILURE, result.code) self.assertIn(d1, result.details) self.assertIn(d3, result.details) self.assertIn(d5, result.details) self.assertNotIn(d2, result.details) self.assertNotIn(d4, result.details) # check for plural version of result message self.assertIn('The following drivers', result.details) def test_check_removed_drivers_no_drivers(self): self._set_config('enabled_backends', None) result = self.checks._check_removed_drivers() self.assertEqual(uc.Code.SUCCESS, result.code) @staticmethod def uuid(): return str(uuid.uuid4()) def _create_service(self, **values): values.setdefault('uuid', self.uuid()) db.service_create(self.context, values) def _create_volume(self, **values): values.setdefault('id', self.uuid()) values.setdefault('service_uuid', self.uuid()) try: db.volume_create(self.context, values) # Support setting deleted on creation except exception.VolumeNotFound: if values.get('deleted') is not True: raise def test__check_service_uuid_ok(self): self._create_service() self._create_service() self._create_volume(volume_type_id=fakes.VOLUME_TYPE_ID) # Confirm that we ignored deleted entries self._create_volume(service_uuid=None, deleted=True, volume_type_id=fakes.VOLUME_TYPE_ID) result = self.checks._check_service_uuid() self.assertEqual(uc.Code.SUCCESS, result.code) def test__check_service_uuid_fail_service(self): self._create_service() self._create_service(uuid=None) self._create_volume(volume_type_id=fakes.VOLUME_TYPE_ID) result = self.checks._check_service_uuid() self.assertEqual(uc.Code.FAILURE, result.code) def test__check_service_uuid_fail_volume(self): self._create_service() self._create_volume(service_uuid=None, volume_type_id=fakes.VOLUME_TYPE_ID) result = self.checks._check_service_uuid() self.assertEqual(uc.Code.FAILURE, result.code) def test__check_attachment_specs_ok(self): attach_uuid = self.uuid() # Confirm that we ignore deleted attachment specs db.attachment_specs_update_or_create(self.context, attach_uuid, {'k': 'v'}) db.attachment_specs_delete(self.context, attach_uuid, 'k') result = self.checks._check_attachment_specs() self.assertEqual(uc.Code.SUCCESS, result.code) def test__check_attachment_specs_fail(self): db.attachment_specs_update_or_create(self.context, self.uuid(), {'k': 'v', 'k2': 'v2'}) result = self.checks._check_attachment_specs() self.assertEqual(uc.Code.FAILURE, result.code) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1991193 cinder-27.0.0/cinder/tests/unit/compute/0000775000175000017500000000000000000000000020130 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/compute/__init__.py0000664000175000017500000000000000000000000022227 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/compute/test_nova.py0000664000175000017500000003461500000000000022515 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from keystoneauth1 import exceptions as ks_exc from keystoneauth1 import loading as ks_loading from novaclient import exceptions as nova_exceptions from oslo_config import cfg from cinder.compute import nova from cinder import context from cinder.message import message_field from cinder.tests.unit import test CONF = cfg.CONF class NovaClientTestCase(test.TestCase): def setUp(self): super(NovaClientTestCase, self).setUp() # Register the Password auth plugin options, # so we can use CONF.set_override # reset() first, otherwise already registered CLI options will # prevent unregister in tearDown() # Use CONF.set_override(), because we'll unregister the opts, # no need (and not possible) to cleanup. CONF.reset() self.password_opts = \ ks_loading.get_auth_plugin_conf_options('password') CONF.register_opts(self.password_opts, group='nova') CONF.set_override('auth_url', 'http://keystonehost:5000', group='nova') CONF.set_override('username', 'adminuser', group='nova') CONF.set_override('password', 'strongpassword', group='nova') self.ctx = context.RequestContext('regularuser', 'e3f0833dc08b4cea', auth_token='token', is_admin=False) self.ctx.service_catalog = \ [{'type': 'compute', 'name': 'nova', 'endpoints': [{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]}, {'type': 'identity', 'name': 'keystone', 'endpoints': [{'publicURL': 'http://keystonehostfromsc:5000/v3'}]}] self.override_config('auth_type', 'password', group='nova') self.override_config('cafile', 'my.ca', group='nova') def tearDown(self): super(NovaClientTestCase, self).tearDown() CONF.unregister_opts(self.password_opts, group='nova') @mock.patch('novaclient.api_versions.APIVersion') @mock.patch('novaclient.client.Client') @mock.patch('keystoneauth1.identity.Token') @mock.patch('keystoneauth1.session.Session') def test_nova_client_regular(self, p_session, p_token_plugin, p_client, p_api_version): self.override_config('token_auth_url', 'http://keystonehost:5000', group='nova') nova.novaclient(self.ctx) p_token_plugin.assert_called_once_with( auth_url='http://keystonehost:5000', token='token', project_name=None, project_domain_id=None ) p_client.assert_called_once_with( p_api_version(nova.NOVA_API_VERSION), session=p_session.return_value, region_name=None, insecure=False, endpoint_type='public', cacert='my.ca', global_request_id=self.ctx.request_id, timeout=None, extensions=nova.nova_extensions) @mock.patch('novaclient.api_versions.APIVersion') @mock.patch('novaclient.client.Client') @mock.patch('keystoneauth1.identity.Token') @mock.patch('keystoneauth1.session.Session') def test_nova_client_regular_service_catalog(self, p_session, p_token_plugin, p_client, p_api_version): nova.novaclient(self.ctx) p_token_plugin.assert_called_once_with( auth_url='http://keystonehostfromsc:5000/v3', token='token', project_name=None, project_domain_id=None ) p_client.assert_called_once_with( p_api_version(nova.NOVA_API_VERSION), session=p_session.return_value, region_name=None, insecure=False, endpoint_type='public', cacert='my.ca', global_request_id=self.ctx.request_id, timeout=None, extensions=nova.nova_extensions) @mock.patch('novaclient.api_versions.APIVersion') @mock.patch('novaclient.client.Client') @mock.patch('keystoneauth1.identity.Password') @mock.patch('keystoneauth1.session.Session') def test_nova_client_privileged_user(self, p_session, p_password_plugin, p_client, p_api_version): nova.novaclient(self.ctx, privileged_user=True) p_password_plugin.assert_called_once() self.assertEqual('adminuser', p_password_plugin.call_args[1]['username']) self.assertEqual('http://keystonehost:5000', p_password_plugin.call_args[1]['auth_url']) p_client.assert_called_once_with( p_api_version(nova.NOVA_API_VERSION), session=p_session.return_value, region_name=None, insecure=False, endpoint_type='public', cacert='my.ca', global_request_id=self.ctx.request_id, timeout=None, extensions=nova.nova_extensions) @mock.patch('novaclient.api_versions.APIVersion') @mock.patch('novaclient.client.Client') @mock.patch('keystoneauth1.identity.Password') @mock.patch('keystoneauth1.session.Session') def test_nova_client_privileged_user_custom_auth_url(self, p_session, p_password_plugin, p_client, p_api_version): CONF.set_override('auth_url', 'http://privatekeystonehost:5000', group='nova') nova.novaclient(self.ctx, privileged_user=True) p_password_plugin.assert_called_once() self.assertEqual('http://privatekeystonehost:5000', p_password_plugin.call_args[1]['auth_url']) self.assertEqual('adminuser', p_password_plugin.call_args[1]['username']) p_client.assert_called_once_with( p_api_version(nova.NOVA_API_VERSION), session=p_session.return_value, region_name=None, insecure=False, endpoint_type='public', cacert='my.ca', global_request_id=self.ctx.request_id, timeout=None, extensions=nova.nova_extensions) @mock.patch('novaclient.api_versions.APIVersion') @mock.patch('novaclient.client.Client') @mock.patch('keystoneauth1.identity.Password') @mock.patch('keystoneauth1.session.Session') def test_nova_client_custom_region(self, p_session, p_password_plugin, p_client, p_api_version): CONF.set_override('region_name', 'farfaraway', group='nova') nova.novaclient(self.ctx, privileged_user=True) # This doesn't impact the password plugin, just make sure it was called # with expected default values p_password_plugin.assert_called_once() self.assertEqual('http://keystonehost:5000', p_password_plugin.call_args[1]['auth_url']) self.assertEqual('adminuser', p_password_plugin.call_args[1]['username']) p_client.assert_called_once_with( p_api_version(nova.NOVA_API_VERSION), session=p_session.return_value, region_name='farfaraway', insecure=False, endpoint_type='public', cacert='my.ca', global_request_id=self.ctx.request_id, timeout=None, extensions=nova.nova_extensions) def test_get_identity_endpoint_from_sc_endpoint_not_found(self): ctxt = context.get_admin_context() self.assertRaises(ks_exc.EndpointNotFound, nova._get_identity_endpoint_from_sc, ctxt) class FakeNovaClient(object): class ServerExternalEvents(object): def __getattr__(self, item): return None class Volumes(object): def __getattr__(self, item): return None def __init__(self): self.server_external_events = self.ServerExternalEvents() self.volumes = self.Volumes() def create_volume_snapshot(self, *args, **kwargs): pass def delete_volume_snapshot(self, *args, **kwargs): pass @ddt.ddt class NovaApiTestCase(test.TestCase): def setUp(self): super(NovaApiTestCase, self).setUp() self.api = nova.API() self.novaclient = FakeNovaClient() self.ctx = context.get_admin_context() def test_update_server_volume(self): with mock.patch.object(nova, 'novaclient') as mock_novaclient, \ mock.patch.object(self.novaclient.volumes, 'update_server_volume') as \ mock_update_server_volume: mock_novaclient.return_value = self.novaclient self.api.update_server_volume(self.ctx, 'server_id', 'attach_id', 'new_volume_id') mock_novaclient.assert_called_once_with(self.ctx, privileged_user=True) mock_update_server_volume.assert_called_once_with( 'server_id', 'attach_id', 'new_volume_id' ) def test_extend_volume(self): server_ids = ['server-id-1', 'server-id-2'] with mock.patch.object(nova, 'novaclient') as mock_novaclient, \ mock.patch.object(self.novaclient.server_external_events, 'create') as mock_create_event: mock_novaclient.return_value = self.novaclient mock_create_event.return_value = [] result = self.api.extend_volume(self.ctx, server_ids, 'volume_id') self.assertTrue(result) mock_novaclient.assert_called_once_with(self.ctx, privileged_user=True, api_version='2.51') mock_create_event.assert_called_once_with([ {'name': 'volume-extended', 'server_uuid': 'server-id-1', 'tag': 'volume_id'}, {'name': 'volume-extended', 'server_uuid': 'server-id-2', 'tag': 'volume_id'}, ]) @ddt.data(nova_exceptions.NotFound, Exception, 'illegal_list', [{'code': None}]) @mock.patch('cinder.message.api.API.create') def test_extend_volume_failed(self, nova_result, mock_create): server_ids = ['server-id-1', 'server-id-2'] with mock.patch.object(nova, 'novaclient') as mock_novaclient, \ mock.patch.object(self.novaclient.server_external_events, 'create') as mock_create_event: mock_novaclient.return_value = self.novaclient mock_create_event.side_effect = [nova_result] result = self.api.extend_volume(self.ctx, server_ids, 'volume_id') self.assertFalse(result) mock_novaclient.assert_called_once_with(self.ctx, privileged_user=True, api_version='2.51') mock_create.assert_called_once_with( self.ctx, message_field.Action.EXTEND_VOLUME, resource_uuid='volume_id', detail=message_field.Detail.NOTIFY_COMPUTE_SERVICE_FAILED) mock_create_event.assert_called_once_with([ {'name': 'volume-extended', 'server_uuid': 'server-id-1', 'tag': 'volume_id'}, {'name': 'volume-extended', 'server_uuid': 'server-id-2', 'tag': 'volume_id'}, ]) def test_reimage_volume(self): server_ids = ['server-id-1', 'server-id-2'] with mock.patch.object(nova, 'novaclient') as mock_novaclient, \ mock.patch.object(self.novaclient.server_external_events, 'create') as mock_create_event: mock_novaclient.return_value = self.novaclient mock_create_event.return_value = [] result = self.api.reimage_volume(self.ctx, server_ids, 'volume_id') self.assertTrue(result) mock_novaclient.assert_called_once_with(self.ctx, privileged_user=True, api_version='2.93') mock_create_event.assert_called_once_with([ {'name': 'volume-reimaged', 'server_uuid': 'server-id-1', 'tag': 'volume_id'}, {'name': 'volume-reimaged', 'server_uuid': 'server-id-2', 'tag': 'volume_id'}, ]) @ddt.data(nova_exceptions.NotFound, Exception, 'illegal_list', [{'code': None}]) @mock.patch('cinder.message.api.API.create') def test_reimage_volume_failed(self, nova_result, mock_create): server_ids = ['server-id-1', 'server-id-2'] with mock.patch.object(nova, 'novaclient') as mock_novaclient, \ mock.patch.object(self.novaclient.server_external_events, 'create') as mock_create_event: mock_novaclient.return_value = self.novaclient mock_create_event.side_effect = [nova_result] result = self.api.reimage_volume(self.ctx, server_ids, 'volume_id') self.assertFalse(result) mock_novaclient.assert_called_once_with(self.ctx, privileged_user=True, api_version='2.93') mock_create.assert_called_once_with( self.ctx, message_field.Action.REIMAGE_VOLUME, resource_uuid='volume_id', detail=message_field.Detail.REIMAGE_VOLUME_FAILED) mock_create_event.assert_called_once_with([ {'name': 'volume-reimaged', 'server_uuid': 'server-id-1', 'tag': 'volume_id'}, {'name': 'volume-reimaged', 'server_uuid': 'server-id-2', 'tag': 'volume_id'}, ]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/conf_fixture.py0000664000175000017500000000522300000000000021523 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from cinder.volume import configuration CONF = cfg.CONF CONF.import_opt('policy_file', 'cinder.policy', group='oslo_policy') CONF.import_opt('volume_driver', 'cinder.volume.manager', group=configuration.SHARED_CONF_GROUP) CONF.import_opt('backup_driver', 'cinder.backup.manager') CONF.import_opt('backend', 'cinder.keymgr', group='key_manager') CONF.import_opt('scheduler_driver', 'cinder.scheduler.manager') def_vol_type = '__DEFAULT__' def set_defaults(conf): conf.set_default('default_volume_type', def_vol_type) conf.set_default('volume_driver', 'cinder.tests.fake_driver.FakeLoggingVolumeDriver', group=configuration.SHARED_CONF_GROUP) conf.set_default('target_helper', 'fake') conf.set_default('transport_url', 'fake:/') conf.set_default('connection', 'sqlite://', group='database') conf.set_default('sqlite_synchronous', False, group='database') conf.set_default('policy_file', '/this/path/does/not/exist', group='oslo_policy') conf.set_default('backup_driver', 'cinder.tests.unit.backup.fake_service.FakeBackupService') conf.set_default('backend', 'castellan.tests.unit.key_manager.mock_key_manager.' 'MockKeyManager', group='key_manager') conf.set_default('scheduler_driver', 'cinder.scheduler.filter_scheduler.FilterScheduler') conf.set_default('state_path', os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', '..'))) conf.set_default('policy_dirs', [], group='oslo_policy') # This is where we don't authenticate conf.set_default('auth_strategy', 'noauth') conf.set_default('auth_url', 'fake', 'keystone_authtoken') # we use "fake" and "openstack" as project ID in a number of tests conf.set_default('project_id_regex', r"[0-9a-fopnstk\-]+") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1991193 cinder-27.0.0/cinder/tests/unit/consistencygroup/0000775000175000017500000000000000000000000022072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/consistencygroup/__init__.py0000664000175000017500000000000000000000000024171 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/consistencygroup/fake_cgsnapshot.py0000664000175000017500000000346500000000000025613 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.tests.unit import fake_constants as fake def fake_db_cgsnapshot(**updates): db_values = { 'id': fake.CGSNAPSHOT_ID, 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, } for name, field in objects.CGSnapshot.fields.items(): if name in db_values: continue if field.nullable: db_values[name] = None elif field.default != fields.UnspecifiedDefault: db_values[name] = field.default else: raise Exception('fake_db_snapshot needs help with %s' % name) if updates: db_values.update(updates) return db_values def fake_cgsnapshot_obj(context, **updates): expected_attrs = updates.pop('expected_attrs', None) return objects.CGSnapshot._from_db_object(context, objects.CGSnapshot(), fake_db_cgsnapshot( **updates), expected_attrs=expected_attrs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/consistencygroup/fake_consistencygroup.py0000664000175000017500000000336500000000000027057 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.tests.unit import fake_constants as fake def fake_db_consistencygroup(**updates): db_values = { 'id': fake.CONSISTENCY_GROUP_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'FakeHost', 'volumes': [], } for name, field in objects.ConsistencyGroup.fields.items(): if name in db_values: continue if field.nullable: db_values[name] = None elif field.default != fields.UnspecifiedDefault: db_values[name] = field.default else: raise Exception('fake_db_consistencygroup needs help with %s' % name) if updates: db_values.update(updates) return db_values def fake_consistencyobject_obj(context, **updates): return objects.ConsistencyGroup._from_db_object(context, objects.ConsistencyGroup(), fake_db_consistencygroup( **updates)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.1991193 cinder-27.0.0/cinder/tests/unit/db/0000775000175000017500000000000000000000000017041 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/__init__.py0000664000175000017500000000000000000000000021140 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_cluster.py0000664000175000017500000003066500000000000022145 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for cluster table related operations.""" from unittest import mock from sqlalchemy.orm import exc from cinder import db from cinder import exception from cinder.tests.unit import test_db_api from cinder.tests.unit import utils class ClusterTestCase(test_db_api.BaseTest): """Unit tests for cinder.db.api.cluster_*.""" def test_cluster_create_and_get(self): """Basic cluster creation test.""" values = utils.default_cluster_values() cluster = db.cluster_create(self.ctxt, values) values['last_heartbeat'] = None self.assertEqual(0, cluster.race_preventer) for k, v in values.items(): self.assertEqual(v, getattr(cluster, k)) db_cluster = db.cluster_get(self.ctxt, cluster.id, services_summary=True) for k, v in values.items(): self.assertEqual(v, getattr(db_cluster, k)) self.assertEqual(0, db_cluster.race_preventer) def test_cluster_create_cfg_disabled(self): """Test that create uses enable_new_services configuration option.""" self.override_config('enable_new_services', False) cluster = utils.create_cluster(self.ctxt, disabled=None) self.assertTrue(cluster.disabled) def test_cluster_create_disabled_preference(self): """Test that provided disabled value has highest priority on create.""" self.override_config('enable_new_services', False) cluster = utils.create_cluster(self.ctxt) self.assertFalse(cluster.disabled) def test_cluster_create_duplicate(self): """Test that unique constraints are working. To remove potential races on creation we have a constraint set on name and race_preventer fields, and we set value on creation to 0, so 2 clusters with the same name will fail this constraint. On deletion we change this field to the same value as the id which will be unique and will not conflict with the creation of another cluster with the same name. """ cluster = utils.create_cluster(self.ctxt) self.assertRaises(exception.ClusterExists, utils.create_cluster, self.ctxt, name=cluster.name) def test_cluster_create_not_duplicate(self): """Test that unique constraints will work with delete operation. To remove potential races on creation we have a constraint set on name and race_preventer fields, and we set value on creation to 0, so 2 clusters with the same name will fail this constraint. On deletion we change this field to the same value as the id which will be unique and will not conflict with the creation of another cluster with the same name. """ cluster = utils.create_cluster(self.ctxt) self.assertIsNone(db.cluster_destroy(self.ctxt, cluster.id)) self.assertIsNotNone(utils.create_cluster(self.ctxt, name=cluster.name)) def test_cluster_get_fail(self): """Test that cluster get will fail if the cluster doesn't exists.""" utils.create_cluster(self.ctxt, name='cluster@backend') self.assertRaises(exception.ClusterNotFound, db.cluster_get, self.ctxt, 'name=cluster@backend2') def test_cluster_get_by_name(self): """Getting a cluster by name will include backends if not specified.""" cluster = utils.create_cluster(self.ctxt, name='cluster@backend') # Get without the backend db_cluster = db.cluster_get(self.ctxt, name='cluster') self.assertEqual(cluster.id, db_cluster.id) # Get with the backend detail db_cluster = db.cluster_get(self.ctxt, name='cluster@backend') self.assertEqual(cluster.id, db_cluster.id) def test_cluster_get_without_summary(self): """Test getting cluster without summary information.""" cluster = utils.create_cluster(self.ctxt) db_cluster = db.cluster_get(self.ctxt, cluster.id) self.assertRaises(exc.DetachedInstanceError, getattr, db_cluster, 'num_hosts') self.assertRaises(exc.DetachedInstanceError, getattr, db_cluster, 'num_down_hosts') self.assertIsNone(db_cluster.last_heartbeat) def test_cluster_get_with_summary_empty_cluster(self): """Test getting empty cluster with summary information.""" cluster = utils.create_cluster(self.ctxt) db_cluster = db.cluster_get(self.ctxt, cluster.id, services_summary=True) self.assertEqual(0, db_cluster.num_hosts) self.assertEqual(0, db_cluster.num_down_hosts) self.assertIsNone(db_cluster.last_heartbeat) def test_cluster_get_with_summary(self): """Test getting cluster with summary information.""" cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 1) db_cluster = db.cluster_get(self.ctxt, cluster.id, services_summary=True) self.assertEqual(3, db_cluster.num_hosts) self.assertEqual(1, db_cluster.num_down_hosts) self.assertEqual(svcs[1].updated_at, db_cluster.last_heartbeat) def test_cluster_get_is_up_on_empty_cluster(self): """Test is_up filter works on empty clusters.""" cluster = utils.create_cluster(self.ctxt) db_cluster = db.cluster_get(self.ctxt, cluster.id, is_up=False) self.assertEqual(cluster.id, db_cluster.id) self.assertRaises(exception.ClusterNotFound, db.cluster_get, self.ctxt, cluster.id, is_up=True) def test_cluster_get_services_on_empty_cluster(self): """Test get_services filter works on empty clusters.""" cluster = utils.create_cluster(self.ctxt) db_cluster = db.cluster_get(self.ctxt, cluster.id, get_services=True) self.assertEqual(cluster.id, db_cluster.id) self.assertListEqual([], db_cluster.services) def test_cluster_get_services(self): """Test services is properly populated on non empty cluster.""" # We create another cluster to see we do the selection correctly utils.create_populated_cluster(self.ctxt, 2, name='cluster2') # We create our cluster with 2 up nodes and 1 down cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 1) # Add a deleted service to the cluster db.service_create(self.ctxt, {'cluster_name': cluster.name, 'deleted': True}) db_cluster = db.cluster_get(self.ctxt, name=cluster.name, get_services=True) self.assertEqual(3, len(db_cluster.services)) self.assertSetEqual({svc.id for svc in svcs}, {svc.id for svc in db_cluster.services}) def test_cluster_get_is_up_all_are_down(self): """Test that is_up filter works when all services are down.""" cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 3) self.assertRaises(exception.ClusterNotFound, db.cluster_get, self.ctxt, cluster.id, is_up=True) db_cluster = db.cluster_get(self.ctxt, name=cluster.name, is_up=False) self.assertEqual(cluster.id, db_cluster.id) def test_cluster_get_by_num_down_hosts(self): """Test cluster_get by subquery field num_down_hosts.""" cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 2) result = db.cluster_get(self.ctxt, num_down_hosts=2) self.assertEqual(cluster.id, result.id) def test_cluster_get_by_num_hosts(self): """Test cluster_get by subquery field num_hosts.""" cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 2) result = db.cluster_get(self.ctxt, num_hosts=3) self.assertEqual(cluster.id, result.id) def test_cluster_destroy(self): """Test basic cluster destroy.""" cluster = utils.create_cluster(self.ctxt) # On creation race_preventer is marked with a 0 self.assertEqual(0, cluster.race_preventer) db.cluster_destroy(self.ctxt, cluster.id) db_cluster = db.cluster_get(self.ctxt, cluster.id, read_deleted='yes') self.assertTrue(db_cluster.deleted) self.assertIsNotNone(db_cluster.deleted_at) # On deletion race_preventer is marked with the id self.assertEqual(cluster.id, db_cluster.race_preventer) def test_cluster_destroy_non_existent(self): """Test destroying non existent cluster.""" self.assertRaises(exception.ClusterNotFound, db.cluster_destroy, self.ctxt, 0) def test_cluster_destroy_has_services(self): """Test that we cannot delete a cluster with non deleted services.""" cluster, svcs = utils.create_populated_cluster(self.ctxt, 3, 1) self.assertRaises(exception.ClusterHasHosts, db.cluster_destroy, self.ctxt, cluster.id) def test_cluster_update_non_existent(self): """Test that we raise an exception on updating non existent cluster.""" self.assertRaises(exception.ClusterNotFound, db.cluster_update, self.ctxt, 0, {'disabled': True}) def test_cluster_update(self): """Test basic cluster update.""" cluster = utils.create_cluster(self.ctxt) self.assertFalse(cluster.disabled) db.cluster_update(self.ctxt, cluster.id, {'disabled': True}) db_cluster = db.cluster_get(self.ctxt, cluster.id) self.assertTrue(db_cluster.disabled) def test_cluster_get_all_empty(self): """Test basic empty cluster get_all.""" self.assertListEqual([], db.cluster_get_all(self.ctxt)) def test_cluster_get_all_matches(self): """Basic test of get_all with a matching filter.""" cluster1, svcs = utils.create_populated_cluster(self.ctxt, 3, 1) cluster2, svcs = utils.create_populated_cluster(self.ctxt, 3, 2, name='cluster2') cluster3, svcs = utils.create_populated_cluster(self.ctxt, 3, 3, name='cluster3') expected = {cluster1.id, cluster2.id} result = db.cluster_get_all(self.ctxt, is_up=True) self.assertEqual(len(expected), len(result)) self.assertSetEqual(expected, {cluster.id for cluster in result}) def test_cluster_get_all_no_match(self): """Basic test of get_all with a non matching filter.""" cluster1, svcs = utils.create_populated_cluster(self.ctxt, 3, 3) result = db.cluster_get_all(self.ctxt, is_up=True) self.assertListEqual([], result) @mock.patch('cinder.db.sqlalchemy.api._cluster_query') def test_cluster_get_all_passes_parameters(self, cluster_query_mock): """Test that get_all passes all parameters. Since we have already tested all filters and parameters with cluster_get method all we have to do for get_all is to check that we are passing them to the query building method. """ args = (mock.sentinel.read_deleted, mock.sentinel.get_services, mock.sentinel.services_summary, mock.sentinel.is_up, mock.sentinel.name_match_level) filters = {'session': mock.sentinel.session, 'name': mock.sentinel.name, 'disabled': mock.sentinel.disabled, 'disabled_reason': mock.sentinel.disabled_reason, 'race_preventer': mock.sentinel.race_preventer, 'last_heartbeat': mock.sentinel.last_heartbeat, 'num_hosts': mock.sentinel.num_hosts, 'num_down_hosts': mock.sentinel.num_down_hosts} db.cluster_get_all(self.ctxt, *args, **filters) cluster_query_mock.assert_called_once_with(self.ctxt, *args, **filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_default_types.py0000664000175000017500000001256200000000000023330 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for default volume types.""" from cinder import context from cinder import db from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class DefaultVolumeTypesTestCase(test.TestCase): """DB tests for default volume types.""" def setUp(self): super(DefaultVolumeTypesTestCase, self).setUp() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=True) def test_default_type_set(self): default_type = db.project_default_volume_type_set( self.ctxt, fake.VOLUME_TYPE_ID, fake.PROJECT_ID) self.assertEqual(fake.PROJECT_ID, default_type.project_id) self.assertEqual(fake.VOLUME_TYPE_ID, default_type.volume_type_id) db.project_default_volume_type_unset(self.ctxt, default_type.project_id) def test_default_type_get(self): db.project_default_volume_type_set(self.ctxt, fake.VOLUME_TYPE_ID, fake.PROJECT_ID) default_type = db.project_default_volume_type_get( self.ctxt, project_id=fake.PROJECT_ID) self.assertEqual(fake.PROJECT_ID, default_type.project_id) self.assertEqual(fake.VOLUME_TYPE_ID, default_type.volume_type_id) db.project_default_volume_type_unset(self.ctxt, default_type.project_id) def test_get_all_projects_by_default_type(self): db.project_default_volume_type_set(self.ctxt, fake.VOLUME_TYPE_ID, fake.PROJECT_ID) default_type = db.get_all_projects_with_default_type( self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID) self.assertEqual(1, len(default_type)) self.assertEqual(fake.PROJECT_ID, default_type[0].project_id) def test_default_type_get_all(self): db.project_default_volume_type_set(self.ctxt, fake.VOLUME_TYPE_ID, fake.PROJECT_ID) db.project_default_volume_type_set(self.ctxt, fake.VOLUME_TYPE2_ID, fake.PROJECT2_ID) default_types = db.project_default_volume_type_get(self.ctxt) self.assertEqual(2, len(default_types)) db.project_default_volume_type_unset(self.ctxt, default_types[0].project_id) db.project_default_volume_type_unset(self.ctxt, default_types[1].project_id) def test_default_type_delete(self): db.project_default_volume_type_set(self.ctxt, fake.VOLUME_TYPE_ID, fake.PROJECT_ID) default_types = db.project_default_volume_type_get(self.ctxt) self.assertEqual(1, len(default_types)) db.project_default_volume_type_unset(self.ctxt, default_types[0].project_id) default_types = db.project_default_volume_type_get(self.ctxt) self.assertEqual(0, len(default_types)) def test_default_type_update(self): default_type = db.project_default_volume_type_set( self.ctxt, fake.VOLUME_TYPE_ID, fake.PROJECT_ID) self.assertEqual(fake.PROJECT_ID, default_type.project_id) self.assertEqual(fake.VOLUME_TYPE_ID, default_type.volume_type_id) # update to type 2 db.project_default_volume_type_set(self.ctxt, fake.VOLUME_TYPE2_ID, fake.PROJECT_ID) default_type = db.project_default_volume_type_get( self.ctxt, project_id=fake.PROJECT_ID) self.assertEqual(fake.PROJECT_ID, default_type.project_id) self.assertEqual(fake.VOLUME_TYPE2_ID, default_type.volume_type_id) # update to type 3 db.project_default_volume_type_set(self.ctxt, fake.VOLUME_TYPE3_ID, fake.PROJECT_ID) default_type = db.project_default_volume_type_get( self.ctxt, project_id=fake.PROJECT_ID) self.assertEqual(fake.PROJECT_ID, default_type.project_id) self.assertEqual(fake.VOLUME_TYPE3_ID, default_type.volume_type_id) # back to original db.project_default_volume_type_set(self.ctxt, fake.VOLUME_TYPE_ID, fake.PROJECT_ID) default_type = db.project_default_volume_type_get( self.ctxt, project_id=fake.PROJECT_ID) self.assertEqual(fake.PROJECT_ID, default_type.project_id) self.assertEqual(fake.VOLUME_TYPE_ID, default_type.volume_type_id) db.project_default_volume_type_unset(self.ctxt, default_type.project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_migration.py0000664000175000017500000000424400000000000022447 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from alembic.runtime import migration as alembic_migration from oslotest import base as test_base from cinder.db import migration from cinder.db.sqlalchemy import api as db_api class TestDBSync(test_base.BaseTestCase): def test_db_sync_legacy_version(self): """We don't allow users to request legacy versions.""" self.assertRaises(ValueError, migration.db_sync, '402') @mock.patch.object(migration, '_upgrade_alembic') @mock.patch.object(migration, '_find_alembic_conf') @mock.patch.object(db_api, 'get_engine') def test_db_sync(self, mock_get_engine, mock_find_conf, mock_upgrade): migration.db_sync() mock_get_engine.assert_called_once_with() mock_find_conf.assert_called_once_with() mock_find_conf.return_value.set_main_option.assert_called_once_with( 'sqlalchemy.url', str(mock_get_engine.return_value.url), ) mock_upgrade.assert_called_once_with( mock_get_engine.return_value, mock_find_conf.return_value, None, ) @mock.patch.object(alembic_migration.MigrationContext, 'configure') @mock.patch.object(db_api, 'get_engine') class TestDBVersion(test_base.BaseTestCase): def test_db_version(self, mock_get_engine, mock_m_context_configure): """Database is controlled by alembic.""" ret = migration.db_version() mock_m_context = mock_m_context_configure.return_value self.assertEqual( mock_m_context.get_current_revision.return_value, ret, ) mock_get_engine.assert_called_once_with() mock_m_context_configure.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_migrations.py0000664000175000017500000004635400000000000022642 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. For each database backend supported by cinder, the test case runs a series of test cases to ensure that migrations work properly and that no data loss occurs if possible. """ import functools from unittest import mock from alembic import command as alembic_api from alembic import script as alembic_script import fixtures from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import test_migrations from oslo_db.sqlalchemy import utils as db_utils from oslo_log.fixture import logging_error as log_fixture from oslotest import base as test_base import sqlalchemy from cinder.db import migration from cinder.db.sqlalchemy import api from cinder.db.sqlalchemy import models from cinder.tests import fixtures as cinder_fixtures def prevent_drop_alter(func): """Decorator to prevent dropping or altering tables and columns. With rolling upgrades we shouldn't blindly allow dropping or altering tables and columns, since that can easily break them. Dropping and altering should be done in a backward-compatible manner. A more detailed explanation is provided in Cinder's developer documentation. To properly work, the first parameter of the decorated method must be a class or instance with the DROP_ALTER_EXCEPTIONS and FORBIDDEN_METHODS attribute, and the second parameter must be the version (legacy migrations) or revision (alembic migrations). Reviewers should be very careful when adding exceptions to DROP_ALTER_EXCEPTIONS and make sure that in the previous release there was nothing using that column, not even an ORM model (unless the whole ORM model was not being used) """ @functools.wraps(func) def wrapper(self, revision, *args, **kwargs): exceptions = getattr(self, 'DROP_ALTER_EXCEPTIONS', []) do_ban = revision not in exceptions patchers = [] if do_ban: forbidden = getattr(self, 'FORBIDDEN_METHODS', []) for path in forbidden: txt = (f'Migration {revision}: Operation {path}() is not ' 'allowed in a DB migration') patcher = mock.patch(path, autospec=True, side_effect=Exception(txt)) patcher.start() patchers.append(patcher) try: return func(self, revision, *args, **kwargs) finally: for patcher in patchers: patcher.stop() return wrapper class CinderModelsMigrationsSync(test_migrations.ModelsMigrationsSync): """Test sqlalchemy-migrate migrations.""" # Migrations can take a long time, particularly on underpowered CI nodes. # Give them some breathing room. TIMEOUT_SCALING_FACTOR = 4 def setUp(self): # Ensure BaseTestCase's ConfigureLogging fixture is disabled since # we're using our own (StandardLogging). with fixtures.EnvironmentVariable('OS_LOG_CAPTURE', '0'): super().setUp() self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(cinder_fixtures.WarningsFixture()) self.useFixture(cinder_fixtures.StandardLogging()) self.engine = enginefacade.writer.get_engine() self.patch(api, 'get_engine', self.get_engine) def db_sync(self, engine): migration.db_sync(engine=self.engine) def get_engine(self): return self.engine def get_metadata(self): return models.BASE.metadata def filter_metadata_diff(self, diff): """Filter out allowed differences between DB ORM model and actual DB We want to keep the DB ORM (models.py) and DB tables in sync, so the test_models_sync test checks for discrepancies between them. Due to the rolling upgrades feature there are cases where we will have the model and DB out of sync for 1 release, as we stop using it in the ORM first and then remove it from the DB in the next, so we use this method to allow such discrepancies. We must add a TODO item on the filtering code with the release the filtering must be removed. The diff parameter is a list of diff directives, which can be a tuple or a list of tuples: https://alembic.sqlalchemy.org/en/latest/api/autogenerate.html#getting-diffs """ # TODO: (D Release) Remove this method and its usage def ignore_leftover_nested_quota(element): operation = element[0] if operation == 'remove_column': table, column = element[2], element[3].name return (table, column) in (('quotas', 'allocated'), ('reservations', 'allocated_id')) if operation == 'remove_index': return element[1].name == 'ix_reservations_allocated_id' if operation == 'remove_fk': return (element[1].table.name == 'reservations' and element[1].column_keys == ['allocated_id']) return False def include_element(element): """Determine whether diff element should be excluded.""" if element[0] == 'modify_nullable': table_name, column = element[2], element[3] return (table_name, column) not in { # NOTE(stephenfin): This field has nullable=True set, but # since it's also a primary key (primary_key=True) the # resulting schema will still end up being "NOT NULL". This # weird combination was deemed necessary because MySQL will # otherwise set this to "NOT NULL DEFAULT ''" which may be # harmless but is inconsistent with other models. See the # migration for more information. ('encryption', 'encryption_id'), # NOTE(stephenfin): The nullability of these fields is # dependent on the backend, for some reason ('encryption', 'provider'), ('encryption', 'control_location'), } if ignore_leftover_nested_quota(element): return False return True def filter_elements(diff_directive): """Return only the elements that should not be ignored. It may return None or [] when all elements from the directive have been filtered out. """ if isinstance(diff_directive, list): return [element for element in diff_directive if include_element(element)] if include_element(diff_directive): return diff_directive return None result = [] for diff_directive in diff: remaining = filter_elements(diff_directive) if remaining: result.append(remaining) return result class TestModelsSyncSQLite( CinderModelsMigrationsSync, test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, ): pass class TestModelsSyncMySQL( CinderModelsMigrationsSync, test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture class TestModelsSyncPostgreSQL( CinderModelsMigrationsSync, test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, ): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture class MigrationsWalk( test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, ): # Migrations can take a long time, particularly on underpowered CI nodes. # Give them some breathing room. TIMEOUT_SCALING_FACTOR = 4 BOOL_TYPE = sqlalchemy.types.BOOLEAN # NOTE: List of migrations where we allow dropping/altring things. # Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE WITHOUT CARE, and make # sure that in the previous release there was nothing using that column, # not even an ORM model (unless the whole ORM model was not being used) # See prevent_drop_alter method docstring. DROP_ALTER_EXCEPTIONS = [ # Drops and alters from initial migration have already been accepted '921e1a36b076', # Making shared_targets explicitly nullable (DB already allowed it) 'c92a3e68beed', # Migration 89aa6f9639f9 doesn't fail because it's for a SQLAlquemy # internal table, and we only check Cinder's tables. # Increasing resource column max length to 300 is acceptable, since # it's a backward compatible change. 'b8660621f1b9', # Making use_quota non-nullable is acceptable since on the last release # we added an online migration to set the value, but we also provide # a default on the OVO, the ORM, and the DB engine. '9ab1b092a404', # Removing allocated_id and allocated columns is acceptable now since # we stopped using them in the code on the previous release. # TODO: (D Release) Uncomment next line # 'afd7494d43b7', ] FORBIDDEN_METHODS = ('alembic.operations.Operations.alter_column', 'alembic.operations.Operations.drop_column', 'alembic.operations.Operations.drop_table', 'alembic.operations.BatchOperations.alter_column', 'alembic.operations.BatchOperations.drop_column') VARCHAR_TYPE = sqlalchemy.types.VARCHAR def setUp(self): super().setUp() self.engine = enginefacade.writer.get_engine() self.patch(api, 'get_engine', lambda: self.engine) self.config = migration._find_alembic_conf() self.init_version = '921e1a36b076' @prevent_drop_alter def _migrate_up(self, revision, connection): check_method = getattr(self, f'_check_{revision}', None) if revision != self.init_version: # no tests for the initial revision self.assertIsNotNone( check_method, f"API DB Migration {revision} doesn't have a test; add one" ) pre_upgrade = getattr(self, f'_pre_upgrade_{revision}', None) if pre_upgrade: pre_upgrade(connection) alembic_api.upgrade(self.config, revision) if check_method: check_method(connection) def test_single_base_revision(self): """Ensure we only have a single base revision. There's no good reason for us to have diverging history, so validate that only one base revision exists. This will prevent simple errors where people forget to specify the base revision. If this fail for your change, look for migrations that do not have a 'revises' line in them. """ script = alembic_script.ScriptDirectory.from_config(self.config) self.assertEqual(1, len(script.get_bases())) def test_single_head_revision(self): """Ensure we only have a single head revision. There's no good reason for us to have diverging history, so validate that only one head revision exists. This will prevent merge conflicts adding additional head revision points. If this fail for your change, look for migrations with the same 'revises' line in them. """ script = alembic_script.ScriptDirectory.from_config(self.config) self.assertEqual(1, len(script.get_heads())) def test_walk_versions(self): with self.engine.begin() as connection: self.config.attributes['connection'] = connection script = alembic_script.ScriptDirectory.from_config(self.config) revisions = list(script.walk_revisions()) # Need revisions from older to newer so the walk works as intended revisions.reverse() for revision_script in revisions: self._migrate_up(revision_script.revision, connection) def test_db_version_alembic(self): migration.db_sync() head = alembic_script.ScriptDirectory.from_config( self.config, ).get_current_head() self.assertEqual(head, migration.db_version()) def _pre_upgrade_c92a3e68beed(self, connection): """Test shared_targets is nullable.""" table = db_utils.get_table(connection, 'volumes') self._previous_type = type(table.c.shared_targets.type) def _check_c92a3e68beed(self, connection): """Test shared_targets is nullable.""" table = db_utils.get_table(connection, 'volumes') self.assertIn('shared_targets', table.c) # Type hasn't changed self.assertIsInstance(table.c.shared_targets.type, self._previous_type) # But it's nullable self.assertTrue(table.c.shared_targets.nullable) def _check_daa98075b90d(self, connection): """Test resources have indexes.""" for table in ('groups', 'group_snapshots', 'volumes', 'snapshots', 'backups'): db_utils.index_exists(connection, table, f'{table}_deleted_project_id_idx') db_utils.index_exists(connection, 'volumes', 'volumes_deleted_host_idx') def _check_89aa6f9639f9(self, connection): # the table only existed on legacy deployments: there's no way to check # for its removal without creating it first, which is dumb pass def _pre_upgrade_b8660621f1b9(self, connection): """Test resource columns were limited to 255 chars before.""" for table_name in ('quotas', 'quota_classes', 'reservations'): table = db_utils.get_table(connection, table_name) self.assertIn('resource', table.c) self.assertIsInstance(table.c.resource.type, self.VARCHAR_TYPE) self.assertEqual(255, table.c.resource.type.length) def _check_b8660621f1b9(self, connection): """Test resource columns can be up to 300 chars.""" for table_name in ('quotas', 'quota_classes', 'reservations'): table = db_utils.get_table(connection, table_name) self.assertIn('resource', table.c) self.assertIsInstance(table.c.resource.type, self.VARCHAR_TYPE) self.assertEqual(300, table.c.resource.type.length) def _check_9ab1b092a404(self, connection): """Test use_quota is non-nullable.""" volumes = db_utils.get_table(connection, 'volumes') self.assertFalse(volumes.c.use_quota.nullable) snapshots = db_utils.get_table(connection, 'snapshots') self.assertFalse(snapshots.c.use_quota.nullable) def _check_b7b88f50aab5(self, connection): """Test consistencygroups quota was removed.""" quota_classes = db_utils.get_table(connection, 'quota_classes') res = connection.execute( quota_classes.select().where( sqlalchemy.and_( quota_classes.c.resource == 'consistencygroups', ~quota_classes.c.deleted, quota_classes.c.class_name == 'default') )).all() self.assertListEqual([], res) def _check_9c74c1c6971f(self, connection): """Test backup related quota was added.""" quota_classes = db_utils.get_table(connection, 'quota_classes') res = connection.execute( sqlalchemy.select(quota_classes.c.resource).where( sqlalchemy.and_( quota_classes.c.resource.startswith('backup'), ~quota_classes.c.deleted, quota_classes.c.class_name == 'default') )).all() self.assertEqual(2, len(res)) self.assertEqual({'backups', 'backup_gigabytes'}, {r[0] for r in res}) # TODO: (D Release) Uncomment method _check_afd7494d43b7 and create a # migration with hash afd7494d43b7 using the following command: # $ tox -e venv -- alembic -c cinder/db/alembic.ini revision \ # --rev-id afd7494d43b7 -m 'drop quota leftovers' # Then replace the upgrade method in file # cinder/db/migrations/versions/afd7494d43b7_drop_quota_leftovers.py with # the uncommented upgrade method below, removing the unused sqlalchemy # import and adding "from oslo_db.sqlalchemy import utils as db_utils" # # def _check_afd7494d43b7(self, connection): # """Test drop allocated related columns.""" # reservations = db_utils.get_table(connection, 'reservations') # self.assertNotIn('allocated_id', reservations.c) # quotas = db_utils.get_table(connection, 'quotas') # self.assertNotIn('allocated', quotas.c) # # def upgrade(): # connection = op.get_bind() # # SQLite doesn't support dropping columns, so we use a workaround # if connection.engine.name == 'sqlite': # with op.batch_alter_table('reservations') as batch_op: # batch_op.drop_index(op.f('ix_reservations_allocated_id')) # batch_op.drop_column('allocated_id') # with op.batch_alter_table('quotas') as batch_op: # batch_op.drop_column('allocated') # # else: # # The foreign key is unnamed and Cinder doesn't set a naming # # convention, so the name was decided by the DB engine on # # creation, and each engine uses a different convention, so we # # find out the name. # fk_name = db_utils.get_foreign_key_constraint_name( # connection, 'reservations', 'allocated_id') # op.drop_constraint(fk_name, 'reservations', type_='foreignkey') # # # Find out the name of the index as well # indexes_names = db_utils.get_indexes(connection, 'reservations') # index_name = [idx['name'] # for idx in indexes_names # if idx['column_names'] == ['allocated_id']] # # There HAS to be an index, but just to be safe... # if index_name: # # Use op.f to indicate that the index name already has the # # naming convention applied to it. # op.drop_index(op.f(index_name[0]), table_name='reservations') # # op.drop_column('reservations', 'allocated_id') # op.drop_column('quotas', 'allocated') # class TestMigrationsWalkSQLite( MigrationsWalk, test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, ): pass class TestMigrationsWalkMySQL( MigrationsWalk, test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture class TestMigrationsWalkPostgreSQL( MigrationsWalk, test_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, ): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_name_id.py0000664000175000017500000000446400000000000022056 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume name_id.""" from oslo_config import cfg from cinder import context from cinder import db from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as testutils CONF = cfg.CONF class NameIDsTestCase(test.TestCase): """Test cases for naming volumes with name_id.""" def setUp(self): super(NameIDsTestCase, self).setUp() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID) def test_name_id_same(self): """New volume should have same 'id' and 'name_id'.""" vol_ref = testutils.create_volume(self.ctxt, size=1) self.assertEqual(vol_ref['name_id'], vol_ref['id']) expected_name = CONF.volume_name_template % vol_ref['id'] self.assertEqual(expected_name, vol_ref['name']) def test_name_id_diff(self): """Change name ID to mimic volume after migration.""" vol_ref = testutils.create_volume(self.ctxt, size=1, _name_id=fake.VOLUME2_ID) vol_ref = db.volume_get(self.ctxt, vol_ref['id']) expected_name = CONF.volume_name_template % fake.VOLUME2_ID self.assertEqual(expected_name, vol_ref['name']) def test_name_id_snapshot_volume_name(self): """Make sure snapshot['volume_name'] is updated.""" vol_ref = testutils.create_volume(self.ctxt, size=1, _name_id=fake.VOLUME2_ID) snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id']) expected_name = CONF.volume_name_template % fake.VOLUME2_ID self.assertEqual(expected_name, snap_ref['volume_name']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_orm_relationships.py0000664000175000017500000000341700000000000024220 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for code that makes assumptions about ORM relationships.""" from sqlalchemy_utils import functions as saf from cinder.db.sqlalchemy import api as db_api from cinder.db.sqlalchemy import models from cinder.tests.unit import test class VolumeRelationshipsTestCase(test.TestCase): """Test cases for Volume ORM model relationshps.""" def test_volume_dependent_models_list(self): """Make sure the volume dependent tables list is accurate.""" # Addresses LP Bug #1542169 volume_declarative_base = saf.get_declarative_base(models.Volume) volume_fks = saf.get_referencing_foreign_keys(models.Volume) dependent_tables = [] for table, fks in saf.group_foreign_keys(volume_fks): dependent_tables.append(table) found_dependent_models = [] for table in dependent_tables: found_dependent_models.append(saf.get_class_by_table( volume_declarative_base, table)) self.assertEqual(len(found_dependent_models), len(db_api.VOLUME_DEPENDENT_MODELS)) for model in found_dependent_models: self.assertIn(model, db_api.VOLUME_DEPENDENT_MODELS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_purge.py0000664000175000017500000004307700000000000021607 0ustar00zuulzuul00000000000000# Copyright (C) 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for db purge.""" import datetime import uuid from oslo_db import exception as db_exc from oslo_db.sqlalchemy import utils as sqlalchemyutils from oslo_utils import timeutils from sqlalchemy.dialects import sqlite from cinder import context from cinder import db from cinder.db.sqlalchemy import api as db_api from cinder import exception from cinder.tests.unit import test class PurgeDeletedTest(test.TestCase): def setUp(self): super(PurgeDeletedTest, self).setUp() self.context = context.get_admin_context() # enable foreign keys self.engine = db_api.get_engine() if self.engine.url.get_dialect() == sqlite.dialect: with self.engine.connect() as conn: conn.connection.execute("PRAGMA foreign_keys = ON") self.volumes = sqlalchemyutils.get_table( self.engine, "volumes") # The volume_metadata table has a FK of volume_id self.vm = sqlalchemyutils.get_table( self.engine, "volume_metadata") self.vol_types = sqlalchemyutils.get_table( self.engine, "volume_types") # The volume_type_projects table has a FK of volume_type_id self.vol_type_proj = sqlalchemyutils.get_table( self.engine, "volume_type_projects") self.snapshots = sqlalchemyutils.get_table( self.engine, "snapshots") self.sm = sqlalchemyutils.get_table( self.engine, "snapshot_metadata") self.vgm = sqlalchemyutils.get_table( self.engine, "volume_glance_metadata") self.qos = sqlalchemyutils.get_table( self.engine, "quality_of_service_specs") self.uuidstrs = [] for unused in range(6): self.uuidstrs.append(uuid.uuid4().hex) # Add 6 rows to table with db_api.main_context_manager.writer.using(self.context): for uuidstr in self.uuidstrs: ins_stmt = self.volumes.insert().values(id=uuidstr, volume_type_id=uuidstr) self.context.session.execute(ins_stmt) ins_stmt = self.vm.insert().values(volume_id=uuidstr) self.context.session.execute(ins_stmt) ins_stmt = self.vgm.insert().values( volume_id=uuidstr, key='image_name', value='test') self.context.session.execute(ins_stmt) ins_stmt = self.vol_types.insert().values(id=uuidstr) self.context.session.execute(ins_stmt) ins_stmt = self.vol_type_proj.insert().\ values(volume_type_id=uuidstr) self.context.session.execute(ins_stmt) ins_stmt = self.snapshots.insert().values( id=uuidstr, volume_id=uuidstr, volume_type_id=uuidstr) self.context.session.execute(ins_stmt) ins_stmt = self.sm.insert().values(snapshot_id=uuidstr) self.context.session.execute(ins_stmt) ins_stmt = self.vgm.insert().values( snapshot_id=uuidstr, key='image_name', value='test') self.context.session.execute(ins_stmt) ins_stmt = self.qos.insert().values( id=uuidstr, key='QoS_Specs_Name', value='test') self.context.session.execute(ins_stmt) ins_stmt = self.vol_types.insert().values( id=uuid.uuid4().hex, qos_specs_id=uuidstr) self.context.session.execute(ins_stmt) ins_stmt = self.qos.insert().values( id=uuid.uuid4().hex, specs_id=uuidstr, key='desc', value='test') self.context.session.execute(ins_stmt) # Set 5 of them deleted # 2 are 60 days ago, 2 are 20 days ago, one is just now. now = timeutils.utcnow() old = timeutils.utcnow() - datetime.timedelta(days=20) older = timeutils.utcnow() - datetime.timedelta(days=60) make_vol_now = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_vol_old = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_vol_older = self.volumes.update().\ where(self.volumes.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_vol_meta_now = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_vol_meta_old = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_vol_meta_older = self.vm.update().\ where(self.vm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_vol_types_now = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_vol_types_old = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_vol_types_older = self.vol_types.update().\ where(self.vol_types.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_vol_type_proj_now = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_vol_type_proj_old = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_vol_type_proj_older = self.vol_type_proj.update().\ where(self.vol_type_proj.c.volume_type_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_snap_now = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_snap_old = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_snap_older = self.snapshots.update().\ where(self.snapshots.c.id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_snap_meta_now = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_snap_meta_old = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_snap_meta_older = self.sm.update().\ where(self.sm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_vol_glance_meta_now = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_vol_glance_meta_old = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_vol_glance_meta_older = self.vgm.update().\ where(self.vgm.c.volume_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_snap_glance_meta_now = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[0:1]))\ .values(deleted_at=now, deleted=True) make_snap_glance_meta_old = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[1:3]))\ .values(deleted_at=old, deleted=True) make_snap_glance_meta_older = self.vgm.update().\ where(self.vgm.c.snapshot_id.in_(self.uuidstrs[4:6]))\ .values(deleted_at=older, deleted=True) make_qos_now = self.qos.update().where( self.qos.c.id.in_(self.uuidstrs[0:1])).values(deleted_at=now, deleted=True) make_qos_old = self.qos.update().where( self.qos.c.id.in_(self.uuidstrs[1:3])).values(deleted_at=old, deleted=True) make_qos_older = self.qos.update().where( self.qos.c.id.in_(self.uuidstrs[4:6])).values(deleted_at=older, deleted=True) make_qos_child_record_now = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[0:1])).values( deleted_at=now, deleted=True) make_qos_child_record_old = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[1:3])).values( deleted_at=old, deleted=True) make_qos_child_record_older = self.qos.update().where( self.qos.c.specs_id.in_(self.uuidstrs[4:6])).values( deleted_at=older, deleted=True) make_vol_types1_now = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[0:1])).values( deleted_at=now, deleted=True) make_vol_types1_old = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[1:3])).values( deleted_at=old, deleted=True) make_vol_types1_older = self.vol_types.update().where( self.vol_types.c.qos_specs_id.in_(self.uuidstrs[4:6])).values( deleted_at=older, deleted=True) with db_api.main_context_manager.writer.using(self.context): self.context.session.execute(make_vol_now) self.context.session.execute(make_vol_old) self.context.session.execute(make_vol_older) self.context.session.execute(make_vol_meta_now) self.context.session.execute(make_vol_meta_old) self.context.session.execute(make_vol_meta_older) self.context.session.execute(make_vol_types_now) self.context.session.execute(make_vol_types_old) self.context.session.execute(make_vol_types_older) self.context.session.execute(make_vol_type_proj_now) self.context.session.execute(make_vol_type_proj_old) self.context.session.execute(make_vol_type_proj_older) self.context.session.execute(make_snap_now) self.context.session.execute(make_snap_old) self.context.session.execute(make_snap_older) self.context.session.execute(make_snap_meta_now) self.context.session.execute(make_snap_meta_old) self.context.session.execute(make_snap_meta_older) self.context.session.execute(make_vol_glance_meta_now) self.context.session.execute(make_vol_glance_meta_old) self.context.session.execute(make_vol_glance_meta_older) self.context.session.execute(make_snap_glance_meta_now) self.context.session.execute(make_snap_glance_meta_old) self.context.session.execute(make_snap_glance_meta_older) self.context.session.execute(make_qos_now) self.context.session.execute(make_qos_old) self.context.session.execute(make_qos_older) self.context.session.execute(make_qos_child_record_now) self.context.session.execute(make_qos_child_record_old) self.context.session.execute(make_qos_child_record_older) self.context.session.execute(make_vol_types1_now) self.context.session.execute(make_vol_types1_old) self.context.session.execute(make_vol_types1_older) def test_purge_deleted_rows_in_zero_age_in(self): # Purge at age_in_days=0, should delete one more row db.purge_deleted_rows(self.context, age_in_days=0) with db_api.main_context_manager.writer.using(self.context): vol_rows = self.context.session.query(self.volumes).count() vol_meta_rows = self.context.session.query(self.vm).count() vol_type_rows = self.context.session.query(self.vol_types).count() vol_type_proj_rows = self.context.session.query( self.vol_type_proj, ).count() snap_rows = self.context.session.query(self.snapshots).count() snap_meta_rows = self.context.session.query(self.sm).count() vol_glance_meta_rows = self.context.session.query(self.vgm).count() qos_rows = self.context.session.query(self.qos).count() # Verify that we only have 1 rows now self.assertEqual(1, vol_rows) self.assertEqual(1, vol_meta_rows) self.assertEqual(3, vol_type_rows) self.assertEqual(1, vol_type_proj_rows) self.assertEqual(1, snap_rows) self.assertEqual(1, snap_meta_rows) self.assertEqual(2, vol_glance_meta_rows) self.assertEqual(2, qos_rows) def test_purge_deleted_rows_old(self): # Purge at 30 days old, should only delete 2 rows db.purge_deleted_rows(self.context, age_in_days=30) with db_api.main_context_manager.writer.using(self.context): vol_rows = self.context.session.query(self.volumes).count() vol_meta_rows = self.context.session.query(self.vm).count() vol_type_rows = self.context.session.query(self.vol_types).count() vol_type_proj_rows = self.context.session.query( self.vol_type_proj, ).count() snap_rows = self.context.session.query(self.snapshots).count() snap_meta_rows = self.context.session.query(self.sm).count() vol_glance_meta_rows = self.context.session.query(self.vgm).count() qos_rows = self.context.session.query(self.qos).count() # Verify that we only deleted 2 self.assertEqual(4, vol_rows) self.assertEqual(4, vol_meta_rows) self.assertEqual(9, vol_type_rows) self.assertEqual(4, vol_type_proj_rows) self.assertEqual(4, snap_rows) self.assertEqual(4, snap_meta_rows) self.assertEqual(8, vol_glance_meta_rows) self.assertEqual(8, qos_rows) def test_purge_deleted_rows_older(self): # Purge at 10 days old now, should delete 2 more rows db.purge_deleted_rows(self.context, age_in_days=10) with db_api.main_context_manager.writer.using(self.context): vol_rows = self.context.session.query(self.volumes).count() vol_meta_rows = self.context.session.query(self.vm).count() vol_type_rows = self.context.session.query(self.vol_types).count() vol_type_proj_rows = self.context.session.query( self.vol_type_proj, ).count() snap_rows = self.context.session.query(self.snapshots).count() snap_meta_rows = self.context.session.query(self.sm).count() vol_glance_meta_rows = self.context.session.query(self.vgm).count() qos_rows = self.context.session.query(self.qos).count() # Verify that we only have 2 rows now self.assertEqual(2, vol_rows) self.assertEqual(2, vol_meta_rows) self.assertEqual(5, vol_type_rows) self.assertEqual(2, vol_type_proj_rows) self.assertEqual(2, snap_rows) self.assertEqual(2, snap_meta_rows) self.assertEqual(4, vol_glance_meta_rows) self.assertEqual(4, qos_rows) def test_purge_deleted_rows_bad_args(self): # Test with no age argument self.assertRaises(TypeError, db.purge_deleted_rows, self.context) # Test purge with non-integer self.assertRaises(exception.InvalidParameterValue, db.purge_deleted_rows, self.context, age_in_days='ten') def test_purge_deleted_rows_integrity_failure(self): # add new entry in volume and volume_admin_metadata for # integrity check uuid_str = uuid.uuid4().hex ins_stmt = self.volumes.insert().values(id=uuid_str, volume_type_id=uuid_str) with db_api.main_context_manager.writer.using(self.context): self.context.session.execute(ins_stmt) ins_stmt = self.vm.insert().values(volume_id=uuid_str) with db_api.main_context_manager.writer.using(self.context): self.context.session.execute(ins_stmt) # set volume record to deleted 20 days ago old = timeutils.utcnow() - datetime.timedelta(days=20) make_old = self.volumes.update().where( self.volumes.c.id.in_([uuid_str])).values(deleted_at=old, deleted=True) with db_api.main_context_manager.writer.using(self.context): self.context.session.execute(make_old) # Verify that purge_deleted_rows fails due to Foreign Key constraint self.assertRaises(db_exc.DBReferenceError, db.purge_deleted_rows, self.context, age_in_days=10) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_qos_specs.py0000664000175000017500000002126300000000000022455 0ustar00zuulzuul00000000000000# Copyright (C) 2013 eBay Inc. # Copyright (C) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for quality_of_service_specs table.""" import time from cinder import context from cinder import db from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume import volume_types def fake_qos_specs_get_by_name(context, name, session=None, inactive=False): pass class QualityOfServiceSpecsTableTestCase(test.TestCase): """Test case for QualityOfServiceSpecs model.""" def setUp(self): super(QualityOfServiceSpecsTableTestCase, self).setUp() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=True) def _create_qos_specs(self, name, consumer='back-end', values=None): """Create a transfer object.""" if values is None: values = {'key1': 'value1', 'key2': 'value2'} specs = {'name': name, 'consumer': consumer, 'specs': values} return db.qos_specs_create(self.ctxt, specs)['id'] def test_qos_specs_create(self): # If there is qos specs with the same name exists, # a QoSSpecsExists exception will be raised. name = 'QoSSpecsCreationTest' self._create_qos_specs(name) self.assertRaises(exception.QoSSpecsExists, db.qos_specs_create, self.ctxt, dict(name=name)) specs_id = self._create_qos_specs('NewName') query_id = db.qos_specs_get_by_name( self.ctxt, 'NewName')['id'] self.assertEqual(specs_id, query_id) def test_qos_specs_get(self): qos_spec = {'name': 'Name1', 'consumer': 'front-end', 'specs': {'key1': 'foo', 'key2': 'bar'}} specs_id = self._create_qos_specs(qos_spec['name'], qos_spec['consumer'], qos_spec['specs']) fake_id = fake.WILL_NOT_BE_FOUND_ID self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_get, self.ctxt, fake_id) specs_returned = db.qos_specs_get(self.ctxt, specs_id) qos_spec['created_at'] = specs_returned['created_at'] qos_spec['id'] = specs_id self.assertDictEqual(qos_spec, specs_returned) def test_qos_specs_get_all(self): qos_list = [ {'name': 'Name1', 'consumer': 'front-end', 'specs': {'key1': 'v1', 'key2': 'v2'}}, {'name': 'Name2', 'consumer': 'back-end', 'specs': {'key1': 'v3', 'key2': 'v4'}}, {'name': 'Name3', 'consumer': 'back-end', 'specs': {'key1': 'v5', 'key2': 'v6'}}] for index, qos in enumerate(qos_list): qos['id'] = self._create_qos_specs(qos['name'], qos['consumer'], qos['specs']) specs = db.qos_specs_get(self.ctxt, qos['id']) qos_list[index]['created_at'] = specs['created_at'] specs_list_returned = db.qos_specs_get_all(self.ctxt) self.assertEqual(len(qos_list), len(specs_list_returned), "Unexpected number of qos specs records") for expected_qos in qos_list: self.assertIn(expected_qos, specs_list_returned) def test_qos_specs_delete(self): name = str(int(time.time())) specs_id = self._create_qos_specs(name) db.qos_specs_delete(self.ctxt, specs_id) self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_get, self.ctxt, specs_id) def test_qos_specs_item_delete(self): name = str(int(time.time())) value = dict(foo='Foo', bar='Bar') specs_id = self._create_qos_specs(name, 'front-end', value) del value['foo'] expected = {'name': name, 'id': specs_id, 'consumer': 'front-end', 'specs': value} db.qos_specs_item_delete(self.ctxt, specs_id, 'foo') specs = db.qos_specs_get(self.ctxt, specs_id) expected['created_at'] = specs['created_at'] self.assertDictEqual(expected, specs) def test_associate_type_with_qos(self): self.assertRaises(exception.VolumeTypeNotFound, db.volume_type_qos_associate, self.ctxt, fake.VOLUME_ID, fake.QOS_SPEC_ID) type_id = volume_types.create(self.ctxt, 'TypeName')['id'] specs_id = self._create_qos_specs('FakeQos') db.volume_type_qos_associate(self.ctxt, type_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(1, len(res)) self.assertEqual(type_id, res[0]['id']) self.assertEqual(specs_id, res[0]['qos_specs_id']) def test_qos_associations_get(self): self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_associations_get, self.ctxt, fake.WILL_NOT_BE_FOUND_ID) type_id = volume_types.create(self.ctxt, 'TypeName')['id'] specs_id = self._create_qos_specs('FakeQos') res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(0, len(res)) db.volume_type_qos_associate(self.ctxt, type_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(1, len(res)) self.assertEqual(type_id, res[0]['id']) self.assertEqual(specs_id, res[0]['qos_specs_id']) type0_id = volume_types.create(self.ctxt, 'Type0Name')['id'] db.volume_type_qos_associate(self.ctxt, type0_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(2, len(res)) self.assertEqual(specs_id, res[0]['qos_specs_id']) self.assertEqual(specs_id, res[1]['qos_specs_id']) def test_qos_specs_disassociate(self): type_id = volume_types.create(self.ctxt, 'TypeName')['id'] specs_id = self._create_qos_specs('FakeQos') db.volume_type_qos_associate(self.ctxt, type_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(type_id, res[0]['id']) self.assertEqual(specs_id, res[0]['qos_specs_id']) db.qos_specs_disassociate(self.ctxt, specs_id, type_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(0, len(res)) res = db.volume_type_get(self.ctxt, type_id) self.assertIsNone(res['qos_specs_id']) def test_qos_specs_disassociate_all(self): specs_id = self._create_qos_specs('FakeQos') type1_id = volume_types.create(self.ctxt, 'Type1Name')['id'] type2_id = volume_types.create(self.ctxt, 'Type2Name')['id'] type3_id = volume_types.create(self.ctxt, 'Type3Name')['id'] db.volume_type_qos_associate(self.ctxt, type1_id, specs_id) db.volume_type_qos_associate(self.ctxt, type2_id, specs_id) db.volume_type_qos_associate(self.ctxt, type3_id, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(3, len(res)) db.qos_specs_disassociate_all(self.ctxt, specs_id) res = db.qos_specs_associations_get(self.ctxt, specs_id) self.assertEqual(0, len(res)) def test_qos_specs_update(self): name = 'FakeName' specs_id = self._create_qos_specs(name) value = {'consumer': 'both', 'specs': {'key2': 'new_value2', 'key3': 'value3'}} self.assertRaises(exception.QoSSpecsNotFound, db.qos_specs_update, self.ctxt, fake.WILL_NOT_BE_FOUND_ID, value) db.qos_specs_update(self.ctxt, specs_id, value) specs = db.qos_specs_get(self.ctxt, specs_id) value['created_at'] = specs['created_at'] self.assertEqual('new_value2', specs['specs']['key2']) self.assertEqual('value3', specs['specs']['key3']) self.assertEqual('both', specs['consumer']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_reset_backend.py0000664000175000017500000000526000000000000023246 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for resetting active backend replication parameters.""" from cinder import db from cinder import exception from cinder.tests.unit import test_db_api from cinder.tests.unit import utils class ResetActiveBackendCase(test_db_api.BaseTest): """Unit tests for cinder.db.api.reset_active_backend.""" def test_enabled_service(self): """Test that enabled services cannot be queried.""" service_overrides = {'topic': 'cinder-volume'} service = utils.create_service(self.ctxt, values=service_overrides) self.assertRaises(exception.ServiceNotFound, db.reset_active_backend, self.ctxt, True, 'fake-backend-id', service.host) def test_disabled_service(self): """Test that non-frozen services are rejected.""" service_overrides = {'topic': 'cinder-volume', 'disabled': True} service = utils.create_service(self.ctxt, values=service_overrides) self.assertRaises(exception.ServiceUnavailable, db.reset_active_backend, self.ctxt, True, 'fake-backend-id', service.host) def test_disabled_and_frozen_service(self): """Test that disabled and frozen services are updated correctly.""" service_overrides = {'topic': 'cinder-volume', 'disabled': True, 'frozen': True, 'replication_status': 'failed-over', 'active_backend_id': 'seconary'} service = utils.create_service(self.ctxt, values=service_overrides) db.reset_active_backend(self.ctxt, True, 'fake-backend-id', service.host) db_service = db.service_get(self.ctxt, service.id) self.assertFalse(db_service.disabled) self.assertEqual('', db_service.disabled_reason) self.assertIsNone(db_service.active_backend_id) self.assertEqual('enabled', db_service.replication_status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_transfers.py0000664000175000017500000002063100000000000022463 0ustar00zuulzuul00000000000000# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for transfers table.""" from cinder import context from cinder import db from cinder.db.sqlalchemy import api as db_api from cinder.db.sqlalchemy import models from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils class TransfersTableTestCase(test.TestCase): """Test case for transfers model.""" def setUp(self): super(TransfersTableTestCase, self).setUp() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID) def _create_transfer(self, volume_id=None, source_project_id=None): """Create a transfer object.""" transfer = {'display_name': 'display_name', 'salt': 'salt', 'crypt_hash': 'crypt_hash'} if volume_id is not None: transfer['volume_id'] = volume_id if source_project_id is not None: transfer['source_project_id'] = source_project_id return db.transfer_create(self.ctxt, transfer)['id'] def test_transfer_create(self): # If the volume_id is Null a KeyError exception will be raised. self.assertRaises(KeyError, self._create_transfer) volume_id = utils.create_volume(self.ctxt)['id'] self._create_transfer(volume_id) def test_transfer_create_not_available(self): volume_id = utils.create_volume(self.ctxt, size=1, status='notavailable')['id'] self.assertRaises(exception.InvalidVolume, self._create_transfer, volume_id) def test_transfer_get(self): volume_id1 = utils.create_volume(self.ctxt)['id'] xfer_id1 = self._create_transfer(volume_id1) xfer = db.transfer_get(self.ctxt, xfer_id1) self.assertEqual(volume_id1, xfer.volume_id, "Unexpected volume_id") nctxt = context.RequestContext(user_id='new_user_id', project_id='new_project_id') self.assertRaises(exception.TransferNotFound, db.transfer_get, nctxt, xfer_id1) xfer = db.transfer_get(nctxt.elevated(), xfer_id1) self.assertEqual(volume_id1, xfer.volume_id, "Unexpected volume_id") def test_transfer_get_all(self): volume_id1 = utils.create_volume(self.ctxt)['id'] volume_id2 = utils.create_volume(self.ctxt)['id'] self._create_transfer(volume_id1) self._create_transfer(volume_id2) self.assertRaises(exception.NotAuthorized, db.transfer_get_all, self.ctxt) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEqual(2, len(xfer), "Unexpected number of transfer records") xfer = db.transfer_get_all_by_project(self.ctxt, self.ctxt.project_id) self.assertEqual(2, len(xfer), "Unexpected number of transfer records") nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) self.assertRaises(exception.NotAuthorized, db.transfer_get_all_by_project, nctxt, self.ctxt.project_id) xfer = db.transfer_get_all_by_project(nctxt.elevated(), self.ctxt.project_id) self.assertEqual(2, len(xfer), "Unexpected number of transfer records") def test_transfer_destroy(self): volume_id = utils.create_volume(self.ctxt)['id'] volume_id2 = utils.create_volume(self.ctxt)['id'] xfer_id1 = self._create_transfer(volume_id) xfer_id2 = self._create_transfer(volume_id2) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEqual(2, len(xfer), "Unexpected number of transfer records") self.assertFalse(xfer[0]['deleted'], "Deleted flag is set") db.transfer_destroy(self.ctxt, xfer_id1) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEqual(1, len(xfer), "Unexpected number of transfer records") self.assertEqual(xfer[0]['id'], xfer_id2, "Unexpected value for Transfer id") nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) self.assertRaises(exception.TransferNotFound, db.transfer_destroy, nctxt, xfer_id2) db.transfer_destroy(nctxt.elevated(), xfer_id2) xfer = db.transfer_get_all(context.get_admin_context()) self.assertEqual(0, len(xfer), "Unexpected number of transfer records") def test_transfer_accept(self): volume = utils.create_volume(self.ctxt) xfer_id = self._create_transfer(volume['id'], volume['project_id']) nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) xfer = db.transfer_get(nctxt.elevated(), xfer_id) self.assertEqual(volume.project_id, xfer['source_project_id']) self.assertFalse(xfer['accepted']) self.assertIsNone(xfer['destination_project_id']) db.transfer_accept(nctxt.elevated(), xfer_id, fake.USER2_ID, fake.PROJECT2_ID) nctxt_admin = nctxt.elevated() with db_api.main_context_manager.reader.using(nctxt_admin): xfer = db_api.model_query( nctxt_admin, models.Transfer, read_deleted='yes' ).filter_by(id=xfer_id).first() self.assertEqual(volume.project_id, xfer['source_project_id']) self.assertTrue(xfer['accepted']) self.assertEqual(fake.PROJECT2_ID, xfer['destination_project_id']) def test_transfer_accept_with_snapshots(self): volume_id = utils.create_volume(self.ctxt)['id'] snapshot_id1 = utils.create_snapshot(self.ctxt, volume_id, status='available')['id'] snapshot_id2 = utils.create_snapshot(self.ctxt, volume_id, status='available')['id'] xfer_id = self._create_transfer(volume_id) nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) db.transfer_accept(nctxt.elevated(), xfer_id, fake.USER2_ID, fake.PROJECT2_ID) self.assertEqual(fake.PROJECT2_ID, db.snapshot_get(nctxt, snapshot_id1)['project_id']) self.assertEqual(fake.PROJECT2_ID, db.snapshot_get(nctxt, snapshot_id2)['project_id']) def test_transfer_accept_with_snapshots_invalid_status(self): volume_id = utils.create_volume(self.ctxt)['id'] snapshot_id1 = utils.create_snapshot(self.ctxt, volume_id, status='available')['id'] snapshot_id2 = utils.create_snapshot(self.ctxt, volume_id)['id'] xfer_id = self._create_transfer(volume_id) nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) self.assertRaises(exception.InvalidSnapshot, db.transfer_accept, nctxt.elevated(), xfer_id, fake.USER2_ID, fake.PROJECT2_ID) self.assertEqual(fake.PROJECT_ID, db.snapshot_get(self.ctxt, snapshot_id1)['project_id']) self.assertEqual(fake.PROJECT_ID, db.snapshot_get(self.ctxt, snapshot_id2)['project_id']) self.assertEqual('awaiting-transfer', db.volume_get(self.ctxt, volume_id)['status']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/db/test_volume_type.py0000664000175000017500000001511500000000000023025 0ustar00zuulzuul00000000000000# Copyright 2016 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume type.""" from cinder import context from cinder import db from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.volume import volume_types class VolumeTypeTestCase(test.TestCase): """Test cases for volume type.""" def setUp(self): super(VolumeTypeTestCase, self).setUp() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID, is_admin=True) def test_volume_type_delete(self): volume_type = db.volume_type_create(self.ctxt, {'name': 'fake volume type'}) volume_types.destroy(self.ctxt, volume_type['id']) self.assertRaises(exception.VolumeTypeNotFound, volume_types.get_by_name_or_id, self.ctxt, volume_type['id']) def test_volume_db_delete_last_type(self): default = volume_types.get_default_volume_type() self.assertRaises(exception.VolumeTypeDeletionError, db.volume_type_destroy, self.ctxt, default['id']) def test_volume_type_delete_with_volume_in_use(self): volume_type = db.volume_type_create(self.ctxt, {'name': 'fake volume type'}) volume = db.volume_create(self.ctxt, {'volume_type_id': volume_type['id']}) self.assertRaises(exception.VolumeTypeInUse, volume_types.destroy, self.ctxt, volume_type['id']) db.volume_destroy(self.ctxt, volume['id']) volume_types.destroy(self.ctxt, volume_type['id']) def test_volume_type_delete_with_group_in_use(self): volume_type = db.volume_type_create(self.ctxt, {'name': 'fake volume type'}) group = db.group_create(self.ctxt, {}) db.group_volume_type_mapping_create(self.ctxt, group['id'], volume_type['id']) self.assertRaises(exception.VolumeTypeInUse, volume_types.destroy, self.ctxt, volume_type['id']) db.group_destroy(self.ctxt, group['id']) volume_types.destroy(self.ctxt, volume_type['id']) def test_volume_type_mark_in_use_exists(self): volume_type = db.volume_type_create( self.ctxt, {'name': 'fake volume type'}, ) group = db.group_create(self.ctxt, {}) db.group_volume_type_mapping_create( self.ctxt, group['id'], volume_type['id'], ) self.assertRaises( exception.GroupVolumeTypeMappingExists, db.group_volume_type_mapping_create, self.ctxt, group['id'], volume_type['id'], ) def test_volume_type_delete_with_consistencygroups_in_use(self): volume_type = db.volume_type_create(self.ctxt, {'name': 'fake volume type'}) consistency_group1 = db.consistencygroup_create(self.ctxt, {'volume_type_id': volume_type['id']}) consistency_group2 = db.consistencygroup_create(self.ctxt, {'volume_type_id': volume_type['id']}) self.assertRaises(exception.VolumeTypeInUse, volume_types.destroy, self.ctxt, volume_type['id']) db.consistencygroup_destroy(self.ctxt, consistency_group1['id']) self.assertRaises(exception.VolumeTypeInUse, volume_types.destroy, self.ctxt, volume_type['id']) db.consistencygroup_destroy(self.ctxt, consistency_group2['id']) volume_types.destroy(self.ctxt, volume_type['id']) def test_volume_type_update(self): vol_type_ref = volume_types.create(self.ctxt, 'fake volume type') updates = dict(name='test_volume_type_update', description=None, is_public=None) db.volume_type_update(self.ctxt, vol_type_ref.id, updates) updated_vol_type = db.volume_type_get(self.ctxt, vol_type_ref.id) self.assertEqual('test_volume_type_update', updated_vol_type['name']) volume_types.destroy(self.ctxt, vol_type_ref.id) def test_volume_type_get_with_qos_specs(self): """Ensure volume types get can load qos_specs.""" qos_data = {'name': 'qos', 'consumer': 'front-end', 'specs': {'key': 'value', 'key2': 'value2'}} qos = utils.create_qos(self.ctxt, **qos_data) vol_type = db.volume_type_create(self.ctxt, {'name': 'my-vol-type', 'qos_specs_id': qos['id']}) db_vol_type = db.volume_type_get(self.ctxt, vol_type.id, expected_fields=['qos_specs']) expected = {('QoS_Specs_Name', 'qos'), ('consumer', 'front-end'), ('key', 'value'), ('key2', 'value2')} actual = {(spec.key, spec.value) for spec in db_vol_type['qos_specs']} self.assertEqual(expected, actual) def test_volume_type_get_with_projects(self): """Ensure volume types get can load projects.""" projects = [fake.PROJECT_ID, fake.PROJECT2_ID, fake.PROJECT3_ID] vol_type = db.volume_type_create(self.ctxt, {'name': 'my-vol-type'}, projects=projects) db_vol_type = db.volume_type_get(self.ctxt, vol_type.id, expected_fields=['projects']) self.assertEqual(set(projects), set(db_vol_type['projects'])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/fake_cluster.py0000664000175000017500000000424200000000000021477 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from oslo_versionedobjects import fields from cinder.db.sqlalchemy import models from cinder import objects def cluster_basic_fields(): """Return basic fields for a cluster.""" return { 'id': 1, 'created_at': timeutils.utcnow(with_timezone=False), 'deleted': False, 'name': 'cluster_name', 'binary': 'cinder-volume', 'race_preventer': 0, } def fake_cluster_orm(**updates): """Create a fake ORM cluster instance.""" db_cluster = fake_db_cluster(**updates) del db_cluster['services'] cluster = models.Cluster(**db_cluster) return cluster def fake_db_cluster(**updates): """Helper method for fake_cluster_orm. Creates a complete dictionary filling missing fields based on the Cluster field definition (defaults and nullable). """ db_cluster = cluster_basic_fields() for name, field in objects.Cluster.fields.items(): if name in db_cluster: continue if field.default != fields.UnspecifiedDefault: db_cluster[name] = field.default elif field.nullable: db_cluster[name] = None else: raise Exception('fake_db_cluster needs help with %s.' % name) if updates: db_cluster.update(updates) return db_cluster def fake_cluster_ovo(context, **updates): """Create a fake Cluster versioned object.""" return objects.Cluster._from_db_object(context, objects.Cluster(), fake_cluster_orm(**updates)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/fake_constants.py0000664000175000017500000001214400000000000022032 0ustar00zuulzuul00000000000000# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ACTION_FAILED_ID = 'f26f181d-7891-4720-b022-b074ec1733ef' ACTION2_FAILED_ID = '02f53bd8-3514-485b-ba60-2722ef09c016' ALREADY_EXISTS_ID = '8f7495fe-5e44-4f33-81af-4b28e9b2952f' ATTACHMENT_ID = '4dc3bb12-ad75-41b9-ab2c-7609e743e600' ATTACHMENT2_ID = 'ac2439fe-c071-468f-94e3-547bedb95de0' BACKUP_ID = '707844eb-6d8a-4ac1-8b98-618e1c0b3a3a' BACKUP2_ID = '40e8462a-c9d8-462f-a810-b732a1790535' BACKUP3_ID = '30ae7641-017e-4221-a642-855687c8bd71' BACKUP4_ID = '23f8605b-8273-4f49-9b3d-1eeca81a63c2' BACKUP5_ID = '50c97b22-51ea-440b-8d01-ded20a55d7e0' CGSNAPSHOT_ID = '5e34cce3-bc97-46b7-a127-5cfb95ef445d' CGSNAPSHOT_NAME = 'cgsnapshot-5e34cce3-bc97-46b7-a127-5cfb95ef445d' CGSNAPSHOT2_ID = '5c36d762-d6ba-4f04-bd07-88a298cc410a' CGSNAPSHOT3_ID = '5f392156-fc03-492a-9cb8-e46a7eedaf33' CONSISTENCY_GROUP_ID = 'f18abf73-79ee-4f2b-8d4f-1c044148f117' CONSISTENCY_GROUP2_ID = '8afc8952-9dce-4228-9f8a-706c5cb5fc82' ENCRYPTION_KEY_ID = 'e8387001-745d-45d0-9e4e-0473815ef09a' ENCRYPTION_KEY2_ID = 'fa0dc8ce-79a4-4162-846f-c731b99f3113' ENCRYPTION_TYPE_ID = 'af2ae9b8-f40a-4cbc-9f51-b54eb5469405' IMAGE_ID = 'e79161cd-5f9d-4007-8823-81a807a64332' INSTANCE_ID = 'fa617131-cdbc-45dc-afff-f21f17ae054e' IN_USE_ID = '8ee42073-4ac2-4099-8c7a-d416630e6aee' INVALID_ID = 'f45dcab0-ff2a-46ec-b3b7-74d6f4bb0027' KEY_ID = '9112ecec-fb9d-4299-a948-ffb52650a5b5' OBJECT_ID = 'd7c5b12f-d57d-4762-99ab-db5f62ae3569' OBJECT2_ID = '51f5b8fa-c13c-48ba-8c9d-b470466cbc9c' OBJECT3_ID = '7bf5ffa9-18a2-4b64-aab4-0798b53ee4e7' PROJECT_ID = '89afd400-b646-4bbc-b12b-c0a4d63e5bd3' PROJECT2_ID = '452ebfbc-55d9-402a-87af-65061916c24b' PROJECT3_ID = 'f6c912d7-bf30-4b12-af81-a9e0b2f85f85' DOMAIN_ID = 'e747b880-4565-4d18-b8e2-310bdec83759' PROVIDER_ID = '60087173-e899-470a-9e3a-ba4cffa3e3e3' PROVIDER2_ID = '1060eccd-64bb-4ed2-86ce-aeaf135a97b8' PROVIDER3_ID = '63736819-1c95-440e-a873-b9d685afede5' PROVIDER4_ID = '7db06e02-26b6-4282-945d-7f6c9347a7b0' QOS_SPEC_ID = 'fc0f7527-79d7-44be-a4f6-3b24db8e11ac' QOS_SPEC2_ID = 'c561b69d-98d9-478c-815b-6de11f5a09c9' QOS_SPEC3_ID = '6034720b-f586-4302-a1eb-fe30672069f6' RAISE_ID = 'a56762e1-4a30-4008-b997-5a438ec9c457' REQUEST_ID = '253c2a22-931e-4104-a9ab-1d70071e4bd4' SNAPSHOT_ID = '253b2878-ec60-4793-ad19-e65496ec7aab' SNAPSHOT_NAME = 'snapshot-253b2878-ec60-4793-ad19-e65496ec7aab' SNAPSHOT2_ID = 'c02c44fa-5665-4a26-9e66-2ebaf25e5d2d' SNAPSHOT3_ID = '454f9970-1e05-4193-a3ed-5c390c3faa18' UPDATE_FAILED_ID = '110b29df-5e0f-4dbb-840c-ef5963d06933' USER_ID = 'c853ca26-e8ea-4797-8a52-ee124a013d0e' USER2_ID = '95f7b7ed-bd7f-426e-b05f-f1ffeb4f09df' USER3_ID = '5f590c70-7f2b-4240-a9b2-a37d343e2a63' VOLUME_ID = '1e5177e7-95e5-4a0f-b170-e45f4b469f6a' VOLUME_NAME = 'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a' VOLUME2_ID = '43a09914-e495-475f-b862-0bda3c8918e4' VOLUME2_NAME = 'volume-43a09914-e495-475f-b862-0bda3c8918e4' VOLUME3_ID = '1b1cf149-219c-44ac-aee3-13121a7f86a7' VOLUME3_NAME = 'volume-1b1cf149-219c-44ac-aee3-13121a7f86a7' VOLUME4_ID = '904d4602-4301-4e9b-8df1-8133b51904e6' VOLUME4_NAME = 'volume-904d4602-4301-4e9b-8df1-8133b51904e6' VOLUME5_ID = '17b0e01d-3d2d-4c31-a1aa-c962420bc3dc' VOLUME5_NAME = 'volume-17b0e01d-3d2d-4c31-a1aa-c962420bc3dc' VOLUME6_ID = '84375761-46e0-4df2-a567-02f0113428d7' VOLUME7_ID = '4d6722d1-fafb-455c-9a1c-bc542841c408' VOLUME8_ID = '439965c7-2ce5-4dff-81fe-549007b2b9da' VOLUME9_ID = '9bcc62a8-d407-4711-8471-8b9010ae10a3' VOLUME_NAME_ID = 'ee73d33c-52ed-4cb7-a8a9-2687c1205c22' VOLUME2_NAME_ID = '63fbdd21-03bc-4309-b867-2893848f86af' VOLUME_TYPE_ID = '4e9e6d23-eed0-426d-b90a-28f87a94b6fe' VOLUME_TYPE_NAME = 'vol_type_name' VOLUME_TYPE2_ID = 'c4daaf47-c530-4901-b28e-f5f0a359c4e6' VOLUME_TYPE3_ID = 'a3d55d15-eeb1-4816-ada9-bf82decc09b3' VOLUME_TYPE4_ID = '69943076-754d-4da8-8718-0b0117e9cab1' VOLUME_TYPE5_ID = '1c450d81-8aab-459e-b338-a6569139b835' WILL_NOT_BE_FOUND_ID = 'ce816f65-c5aa-46d6-bd62-5272752d584a' GROUP_TYPE_ID = '29514915-5208-46ab-9ece-1cc4688ad0c1' GROUP_TYPE2_ID = 'f8645498-1323-47a2-9442-5c57724d2e3c' GROUP_TYPE3_ID = '1b7915f4-b899-4510-9eff-bd67508c3334' GROUP_ID = '9a965cc6-ee3a-468d-a721-cebb193f696f' GROUP2_ID = '40a85639-abc3-4461-9230-b131abd8ee07' GROUP3_ID = '1078414b-380c-474c-bf76-57e2c235841c' GROUP_SNAPSHOT_ID = '1e2ab152-44f0-11e6-819f-000c29d19d84' GROUP_SNAPSHOT2_ID = '33e2ff04-44f0-11e6-819f-000c29d19d84' # I don't care what it's used for, I just want a damn UUID UUID1 = '84d0c5f7-2349-401c-8672-f76214d13cab' UUID2 = '25406d50-e645-4e62-a9ef-1f53f9cba13f' UUID3 = '29c80662-3a9f-4844-a585-55cd3cd180b5' UUID4 = '4cd72b2b-5a4f-4f24-93dc-7c0212002916' UUID5 = '0a574d83-cacf-42b9-8f9f-8f4faa6d4746' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/fake_group.py0000664000175000017500000000467300000000000021162 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.tests.unit import fake_constants as fake def fake_db_group(**updates): db_group = { 'id': fake.GROUP_ID, 'name': 'group-1', 'status': 'available', 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'group_type_id': fake.GROUP_TYPE_ID, 'group_snapshot_id': None, 'source_group_id': None, } for name, field in objects.Group.fields.items(): if name in db_group: continue if field.nullable: db_group[name] = None elif field.default != fields.UnspecifiedDefault: db_group[name] = field.default else: raise Exception('fake_db_group needs help with %s.' % name) if updates: db_group.update(updates) return db_group def fake_db_group_type(**updates): db_group_type = { 'id': fake.GROUP_TYPE_ID, 'name': 'type-1', 'description': 'A fake group type', 'is_public': True, 'projects': [], 'group_specs': {}, } for name, field in objects.GroupType.fields.items(): if name in db_group_type: continue if field.nullable: db_group_type[name] = None elif field.default != fields.UnspecifiedDefault: db_group_type[name] = field.default else: raise Exception('fake_db_group_type needs help with %s.' % name) if updates: db_group_type.update(updates) return db_group_type def fake_group_obj(context, **updates): return objects.Group._from_db_object( context, objects.Group(), fake_db_group(**updates)) def fake_group_type_obj(context, **updates): return objects.GroupType._from_db_object( context, objects.GroupType(), fake_db_group_type(**updates)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/fake_group_snapshot.py0000664000175000017500000000332000000000000023065 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder import objects from cinder.tests.unit import fake_constants as fake def fake_db_group_snapshot(**updates): db_group_snapshot = { 'id': fake.GROUP_SNAPSHOT_ID, 'name': 'group-1', 'status': 'available', 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'group_type_id': fake.GROUP_TYPE_ID, 'group_id': fake.GROUP_ID, } for name, field in objects.GroupSnapshot.fields.items(): if name in db_group_snapshot: continue if field.nullable: db_group_snapshot[name] = None elif field.default != fields.UnspecifiedDefault: db_group_snapshot[name] = field.default else: raise Exception('fake_db_group_snapshot needs help with %s.' % name) if updates: db_group_snapshot.update(updates) return db_group_snapshot def fake_group_snapshot_obj(context, **updates): return objects.GroupSnapshot._from_db_object( context, objects.GroupSnapshot(), fake_db_group_snapshot(**updates)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/fake_notifier.py0000664000175000017500000000605500000000000021641 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import functools import json import oslo_messaging as messaging from cinder import rpc FakeMessage = collections.namedtuple('Message', ['publisher_id', 'priority', 'event_type', 'payload']) class FakeNotifier(object): def __init__(self, transport, publisher_id=None, serializer=None, driver=None, topic=None, retry=None): self.transport = transport self.publisher_id = publisher_id for priority in ['debug', 'info', 'warn', 'error', 'critical']: setattr(self, priority, functools.partial(self._notify, priority.upper())) self._serializer = serializer or messaging.serializer.NoOpSerializer() self._topic = topic self.retry = retry self.notifications = [] def prepare(self, publisher_id=None): if publisher_id is None: publisher_id = self.publisher_id return self.__class__(self.transport, publisher_id, self._serializer) def get_notification_count(self): return len(self.notifications) def _notify(self, priority, ctxt, event_type, payload): payload = self._serializer.serialize_entity(ctxt, payload) # NOTE(sileht): simulate the kombu serializer # this permit to raise an exception if something have not # been serialized correctly json.dumps(payload) msg = dict(publisher_id=self.publisher_id, priority=priority, event_type=event_type, payload=payload) self.notifications.append(msg) def reset(self): del self.notifications[:] def mock_notifier(testcase): testcase.mock_object(messaging, 'Notifier', FakeNotifier) if rpc.NOTIFIER: serializer = getattr(rpc.NOTIFIER, '_serializer', None) testcase.mock_object(rpc, 'NOTIFIER', FakeNotifier(rpc.NOTIFIER.transport, rpc.NOTIFIER.publisher_id, serializer=serializer)) def get_fake_notifier(service=None, host=None, publisher_id=None): if not publisher_id: publisher_id = "%s.%s" % (service, host) serializer = getattr(rpc.NOTIFIER, '_serializer', None) notifier = FakeNotifier(None, publisher_id=publisher_id, serializer=serializer) return notifier.prepare(publisher_id=publisher_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/fake_objects.py0000664000175000017500000000576000000000000021455 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat Inc. # Copyright (c) 2016 Intel Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import versionutils from cinder import objects @objects.base.CinderObjectRegistry.register_if(False) class ChildObject(objects.base.CinderObject): VERSION = '1.2' fields = { 'scheduled_at': objects.base.fields.DateTimeField(nullable=True), 'uuid': objects.base.fields.UUIDField(), 'text': objects.base.fields.StringField(nullable=True), 'integer': objects.base.fields.IntegerField(nullable=True), } def obj_make_compatible(self, primitive, target_version): super(ChildObject, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): primitive.pop('text', None) if target_version < (1, 2): primitive.pop('integer', None) @objects.base.CinderObjectRegistry.register_if(False) class ParentObject(objects.base.CinderObject): VERSION = '1.1' fields = { 'uuid': objects.base.fields.UUIDField(), 'child': objects.base.fields.ObjectField('ChildObject', nullable=True), 'scheduled_at': objects.base.fields.DateTimeField(nullable=True), } def obj_make_compatible(self, primitive, target_version): super(ParentObject, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): primitive.pop('scheduled_at', None) @objects.base.CinderObjectRegistry.register_if(False) class ParentObjectList(objects.base.CinderObject, objects.base.ObjectListBase): VERSION = ParentObject.VERSION fields = { 'objects': objects.base.fields.ListOfObjectsField('ParentObject'), } class MyHistory(objects.base.CinderObjectVersionsHistory): linked_objects = {'ParentObject': 'ParentObjectList'} def __init__(self): self.versions = ['1.0'] self['1.0'] = {'ChildObject': '1.0'} self.add('1.1', {'ChildObject': '1.1'}) self.add('1.2', {'ParentObject': '1.0'}) self.add('1.3', {'ParentObjectList': '1.0'}) self.add('1.4', {'ParentObject': '1.1'}) self.add('1.5', {'ParentObjectList': '1.1'}) self.add('1.6', {'ChildObject': '1.2'}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/fake_service.py0000664000175000017500000000402500000000000021455 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from oslo_versionedobjects import fields from cinder.db.sqlalchemy import models from cinder import objects def fake_service_orm(**updates): """Create a fake ORM service instance.""" db_service = fake_db_service(**updates) service = models.Service(**db_service) return service def fake_db_service(**updates): NOW = timeutils.utcnow().replace(microsecond=0) db_service = { 'created_at': NOW, 'updated_at': NOW, 'deleted_at': None, 'deleted': False, 'id': 123, 'uuid': 'ce59413f-4061-425c-9ad0-3479bd102ab2', 'host': 'fake-host', 'binary': 'fake-service', 'topic': 'fake-service-topic', 'report_count': 1, 'disabled': False, 'disabled_reason': None, 'modified_at': NOW, } for name, field in objects.Service.fields.items(): if name in db_service: continue if field.nullable: db_service[name] = None elif field.default != fields.UnspecifiedDefault: db_service[name] = field.default else: raise Exception('fake_db_service needs help with %s.' % name) if updates: db_service.update(updates) return db_service def fake_service_obj(context, **updates): return objects.Service._from_db_object(context, objects.Service(), fake_db_service(**updates)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/fake_snapshot.py0000664000175000017500000000415100000000000021654 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fields from cinder.objects import fields as c_fields from cinder.objects import snapshot from cinder.tests.unit import fake_constants as fake def fake_db_snapshot(**updates): db_snapshot = { 'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, 'status': c_fields.SnapshotStatus.CREATING, 'progress': '0%', 'volume_size': 1, 'display_name': 'fake_name', 'display_description': 'fake_description', 'metadata': {}, 'snapshot_metadata': [], } for name, field in snapshot.Snapshot.fields.items(): if name in db_snapshot: continue if field.nullable: db_snapshot[name] = None elif field.default != fields.UnspecifiedDefault: db_snapshot[name] = field.default else: raise Exception('fake_db_snapshot needs help with %s' % name) if updates: db_snapshot.update(updates) return db_snapshot def fake_snapshot_obj(context, **updates): expected_attrs = updates.pop('expected_attrs', None) or [] if 'volume' in updates and 'volume' not in expected_attrs: expected_attrs.append('volume') if 'context' in updates and 'context' not in expected_attrs: expected_attrs.append('context') return snapshot.Snapshot._from_db_object(context, snapshot.Snapshot(), fake_db_snapshot(**updates), expected_attrs=expected_attrs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/fake_utils.py0000664000175000017500000000522700000000000021162 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This modules stubs out functions in cinder.utils.""" import re from eventlet import greenthread _fake_execute_repliers = [] _fake_execute_log = [] def fake_execute_get_log(): return _fake_execute_log def fake_execute_clear_log(): global _fake_execute_log _fake_execute_log = [] def fake_execute_set_repliers(repliers): """Allows the client to configure replies to commands.""" global _fake_execute_repliers _fake_execute_repliers = repliers def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): """A reply handler for commands that haven't been added to the reply list. Returns empty strings for stdout and stderr. """ return '', '' def fake_execute(*cmd_parts, **kwargs): """This function stubs out execute. It optionally executes a preconfigued function to return expected data. """ global _fake_execute_repliers process_input = kwargs.get('process_input', None) check_exit_code = kwargs.get('check_exit_code', 0) delay_on_retry = kwargs.get('delay_on_retry', True) attempts = kwargs.get('attempts', 1) run_as_root = kwargs.get('run_as_root', False) cmd_str = ' '.join(str(part) for part in cmd_parts) _fake_execute_log.append(cmd_str) reply_handler = fake_execute_default_reply_handler for fake_replier in _fake_execute_repliers: if re.match(fake_replier[0], cmd_str): reply_handler = fake_replier[1] break if isinstance(reply_handler, str): # If the reply handler is a string, return it as stdout reply = reply_handler, '' else: # Alternative is a function, so call it reply = reply_handler(cmd_parts, process_input=process_input, delay_on_retry=delay_on_retry, attempts=attempts, run_as_root=run_as_root, check_exit_code=check_exit_code) # Replicate the sleep call in the real function greenthread.sleep(0) return reply ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/fake_volume.py0000664000175000017500000001145500000000000021331 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils.uuidutils import is_uuid_like from oslo_versionedobjects import fields from cinder.db.sqlalchemy import models from cinder import objects from cinder.objects import fields as c_fields from cinder.tests.unit import fake_constants as fake def fake_db_volume(**updates): db_volume = { 'id': fake.VOLUME_ID, 'size': 1, 'name': 'volume-%s' % fake.VOLUME_ID, 'availability_zone': 'fake_availability_zone', 'status': 'available', 'attach_status': c_fields.VolumeAttachStatus.DETACHED, 'previous_status': None, 'volume_attachment': [], 'volume_metadata': [], 'volume_admin_metadata': [], 'volume_glance_metadata': [], 'snapshots': [], } for name, field in objects.Volume.fields.items(): if name in db_volume: continue if field.nullable: db_volume[name] = None elif field.default != fields.UnspecifiedDefault: db_volume[name] = field.default else: raise Exception('fake_db_volume needs help with %s.' % name) if updates: db_volume.update(updates) return db_volume def fake_db_volume_type(**updates): db_volume_type = { 'id': fake.VOLUME_TYPE_ID, 'name': 'type-1', 'description': 'A fake volume type', 'is_public': True, 'projects': [], 'extra_specs': {}, } for name, field in objects.VolumeType.fields.items(): if name in db_volume_type: continue if field.nullable: db_volume_type[name] = None elif field.default != fields.UnspecifiedDefault: db_volume_type[name] = field.default else: raise Exception('fake_db_volume_type needs help with %s.' % name) if updates: db_volume_type.update(updates) return db_volume_type def fake_db_volume_attachment(**updates): db_volume_attachment = { 'id': fake.ATTACHMENT_ID, 'volume_id': fake.VOLUME_ID, 'volume': fake_db_volume(), } for name, field in objects.VolumeAttachment.fields.items(): if name in db_volume_attachment: continue if field.nullable: db_volume_attachment[name] = None elif field.default != fields.UnspecifiedDefault: db_volume_attachment[name] = field.default else: raise Exception( 'fake_db_volume_attachment needs help with %s.' % name) if updates: db_volume_attachment.update(updates) return db_volume_attachment def fake_volume_obj(context, **updates): if updates.get('encryption_key_id'): assert is_uuid_like(updates['encryption_key_id']) updates['volume_attachment'] = updates.get('volume_attachment') or [] expected_attrs = updates.pop('expected_attrs', ['metadata', 'admin_metadata', 'volume_attachment']) vol = objects.Volume._from_db_object(context, objects.Volume(), fake_db_volume(**updates), expected_attrs=expected_attrs) return vol def fake_volume_type_obj(context, **updates): return objects.VolumeType._from_db_object( context, objects.VolumeType(), fake_db_volume_type(**updates)) def fake_volume_attachment_obj(context, **updates): return objects.VolumeAttachment._from_db_object( context, objects.VolumeAttachment(), fake_db_volume_attachment(**updates)) def volume_db_obj(**updates): """Return a volume ORM object.""" updates.setdefault('id', fake.VOLUME_ID) updates.setdefault('size', 1) return models.Volume(**updates) def volume_attachment_db_obj(**updates): updates.setdefault('id', fake.ATTACHMENT_ID) updates.setdefault('volume_id', fake.VOLUME_ID) updates.setdefault('volume', volume_db_obj()) return models.VolumeAttachment(**updates) def volume_attachment_ovo(context, **updates): orm = volume_attachment_db_obj(**updates) return objects.VolumeAttachment._from_db_object(context, objects.VolumeAttachment(), orm) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2031193 cinder-27.0.0/cinder/tests/unit/group/0000775000175000017500000000000000000000000017610 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/group/__init__.py0000664000175000017500000000000000000000000021707 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/group/test_groups_api.py0000664000175000017500000012515100000000000023376 0ustar00zuulzuul00000000000000# Copyright (C) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for group API. """ from unittest import mock import ddt from cinder import context from cinder import exception import cinder.group from cinder import objects from cinder.objects import fields from cinder.policies import group_snapshots as g_snap_policies from cinder import quota from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils GROUP_QUOTAS = quota.GROUP_QUOTAS @ddt.ddt class GroupAPITestCase(test.TestCase): """Test Case for group API.""" def setUp(self): super(GroupAPITestCase, self).setUp() self.group_api = cinder.group.API() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) self.user_ctxt = context.RequestContext( fake.USER_ID, fake.PROJECT_ID, auth_token=True) @mock.patch('cinder.objects.Group.get_by_id') def test_get(self, mock_group_get): fake_group = {'name': 'fake_group'} mock_group_get.return_value = fake_group grp = self.group_api.get(self.ctxt, fake.GROUP_ID) self.assertEqual(fake_group, grp) @ddt.data(True, False) @mock.patch('cinder.objects.GroupList.get_all') @mock.patch('cinder.objects.GroupList.get_all_by_project') def test_get_all(self, is_admin, mock_get_all_by_project, mock_get_all): self.group_api.LOG = mock.Mock() fake_groups = ['fake_group1', 'fake_group2'] fake_groups_by_project = ['fake_group1'] mock_get_all.return_value = fake_groups mock_get_all_by_project.return_value = fake_groups_by_project if is_admin: grps = self.group_api.get_all(self.ctxt, filters={'all_tenants': True}) self.assertEqual(fake_groups, grps) else: grps = self.group_api.get_all(self.user_ctxt) self.assertEqual(fake_groups_by_project, grps) @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group') @mock.patch('cinder.db.volume_get_all_by_generic_group') @mock.patch('cinder.db.volumes_update') @mock.patch('cinder.group.api.API._cast_create_group') @mock.patch('cinder.group.api.API.update_quota') @mock.patch('cinder.objects.Group') @mock.patch('cinder.db.group_type_get') @mock.patch('cinder.db.volume_types_get_by_name_or_id') def test_create_delete(self, mock_volume_types_get, mock_group_type_get, mock_group, mock_update_quota, mock_cast_create_group, mock_volumes_update, mock_volume_get_all, mock_rpc_delete_group): mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} name = "test_group" description = "this is a test group" grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], availability_zone='nova', host=None, name=name, description=description, status=fields.GroupStatus.CREATING) mock_group.return_value = grp ret_group = self.group_api.create(self.ctxt, name, description, fake.GROUP_TYPE_ID, [fake.VOLUME_TYPE_ID], availability_zone='nova') self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive()) ret_group.host = "test_host@fakedrv#fakepool" ret_group.status = fields.GroupStatus.AVAILABLE ret_group.assert_not_frozen = mock.Mock(return_value=True) ret_group.group_snapshots = [] self.group_api.delete(self.ctxt, ret_group, delete_volumes=True) mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id) mock_volumes_update.assert_called_once_with(self.ctxt, []) mock_rpc_delete_group.assert_called_once_with(self.ctxt, ret_group) @mock.patch('cinder.group.api.API._cast_create_group') @mock.patch('cinder.group.api.API.update_quota') @mock.patch('cinder.objects.Group') @mock.patch('cinder.db.group_type_get_by_name') @mock.patch('cinder.db.volume_types_get_by_name_or_id') def test_create_with_group_name(self, mock_volume_types_get, mock_group_type_get, mock_group, mock_update_quota, mock_cast_create_group): mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} name = "test_group" description = "this is a test group" grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], availability_zone='nova', host=None, name=name, description=description, status=fields.GroupStatus.CREATING) mock_group.return_value = grp ret_group = self.group_api.create(self.ctxt, name, description, "fake-grouptype-name", [fake.VOLUME_TYPE_ID], availability_zone='nova') self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive()) mock_group_type_get.assert_called_once_with(self.ctxt, "fake-grouptype-name") @mock.patch('cinder.group.api.API._cast_create_group') @mock.patch('cinder.group.api.API.update_quota') @mock.patch('cinder.db.group_type_get') @mock.patch('cinder.db.group_type_get_by_name') @mock.patch('cinder.db.volume_types_get_by_name_or_id') def test_create_with_uuid_format_group_type_name( self, mock_volume_types_get, mock_group_type_get_by_name, mock_group_type_get, mock_update_quota, mock_cast_create_group): uuid_format_type_name = fake.UUID1 mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] mock_group_type_get.side_effect = exception.GroupTypeNotFound( group_type_id=uuid_format_type_name) mock_group_type_get_by_name.return_value = {'id': fake.GROUP_TYPE_ID} ret_group = self.group_api.create(self.ctxt, "test_group", '', uuid_format_type_name, [fake.VOLUME_TYPE_ID], availability_zone='nova') self.assertEqual(ret_group["group_type_id"], fake.GROUP_TYPE_ID) @mock.patch('cinder.group.api.API._cast_create_group') @mock.patch('cinder.group.api.API.update_quota') @mock.patch('cinder.db.group_type_get_by_name') @mock.patch('cinder.db.sqlalchemy.api._volume_type_get') @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_by_name') def test_create_with_uuid_format_volume_type_name( self, mock_vol_t_get_by_name, mock_vol_types_get_by_id, mock_group_type_get, mock_update_quota, mock_cast_create_group): uuid_format_name = fake.UUID1 mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} volume_type = {'id': fake.VOLUME_TYPE_ID, 'name': uuid_format_name} mock_vol_types_get_by_id.side_effect = exception.VolumeTypeNotFound( volume_type_id=uuid_format_name) mock_vol_t_get_by_name.return_value = volume_type group = self.group_api.create(self.ctxt, "test_group", "this is a test group", "fake-grouptype-name", [uuid_format_name], availability_zone='nova') self.assertEqual(group["volume_type_ids"], [volume_type['id']]) @mock.patch('cinder.group.api.API._cast_create_group') @mock.patch('cinder.group.api.API.update_quota') @mock.patch('cinder.db.group_type_get_by_name') @mock.patch('cinder.db.volume_types_get_by_name_or_id') def test_create_with_multi_types(self, mock_volume_types_get, mock_group_type_get, mock_update_quota, mock_cast_create_group): volume_types = [{'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE2_ID}] mock_volume_types_get.return_value = volume_types mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} volume_type_names = ['fake-volume-type1', 'fake-volume-type2'] name = "test_group" description = "this is a test group" group = self.group_api.create(self.ctxt, name, description, "fake-grouptype-name", volume_type_names, availability_zone='nova') self.assertEqual(group["volume_type_ids"], [t['id'] for t in volume_types]) self.assertEqual(group["group_type_id"], fake.GROUP_TYPE_ID) mock_group_type_get.assert_called_once_with(self.ctxt, "fake-grouptype-name") mock_volume_types_get.assert_called_once_with(mock.ANY, volume_type_names) @mock.patch('oslo_utils.timeutils.utcnow') @mock.patch('cinder.objects.Group') def test_reset_status(self, mock_group, mock_time_util): mock_time_util.return_value = "time_now" self.group_api.reset_status(self.ctxt, mock_group, fields.GroupStatus.AVAILABLE) update_field = {'updated_at': "time_now", 'status': fields.GroupStatus.AVAILABLE} mock_group.update.assert_called_once_with(update_field) mock_group.save.assert_called_once_with() @mock.patch.object(GROUP_QUOTAS, "reserve") @mock.patch('cinder.objects.Group') @mock.patch('cinder.db.group_type_get_by_name') @mock.patch('cinder.db.volume_types_get_by_name_or_id') def test_create_group_failed_update_quota(self, mock_volume_types_get, mock_group_type_get, mock_group, mock_group_quota_reserve): mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} fake_overs = ['groups'] fake_quotas = {'groups': 1} fake_usages = {'groups': {'reserved': 0, 'in_use': 1}} mock_group_quota_reserve.side_effect = exception.OverQuota( overs=fake_overs, quotas=fake_quotas, usages=fake_usages) name = "test_group" description = "this is a test group" grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], availability_zone='nova', host=None, name=name, description=description, status=fields.GroupStatus.CREATING) mock_group.return_value = grp self.assertRaises(exception.GroupLimitExceeded, self.group_api.create, self.ctxt, name, description, "fake-grouptype-name", [fake.VOLUME_TYPE_ID], availability_zone='nova') @mock.patch('cinder.objects.Group') @mock.patch('cinder.db.volume_get') def test__validate_add_volumes(self, mock_volume_get, mock_group): grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], availability_zone='nova', host=None, name="name", description="description", status=fields.GroupStatus.CREATING) mock_group.return_value = grp fake_volume_obj = fake_volume.fake_volume_obj(self.ctxt) mock_volume_get.return_value = fake_volume_obj self.assertRaises(exception.InvalidVolume, self.group_api._validate_add_volumes, self.ctxt, [], ['123456789'], grp) @ddt.data(['test_host@fakedrv#fakepool', 'test_host@fakedrv#fakepool'], ['test_host@fakedrv#fakepool', 'test_host2@fakedrv#fakepool']) @mock.patch('cinder.volume.rpcapi.VolumeAPI.update_group') @mock.patch('cinder.db.volume_get_all_by_generic_group') @mock.patch('cinder.group.api.API._cast_create_group') @mock.patch('cinder.group.api.API.update_quota') @mock.patch('cinder.objects.Group') @mock.patch('cinder.db.group_type_get') @mock.patch('cinder.db.volume_types_get_by_name_or_id') def test_update(self, hosts, mock_volume_types_get, mock_group_type_get, mock_group, mock_update_quota, mock_cast_create_group, mock_volume_get_all, mock_rpc_update_group): vol_type_dict = {'id': fake.VOLUME_TYPE_ID, 'name': 'fake_volume_type'} vol_type = objects.VolumeType(self.ctxt, **vol_type_dict) mock_volume_types_get.return_value = [{'id': fake.VOLUME_TYPE_ID}] mock_group_type_get.return_value = {'id': fake.GROUP_TYPE_ID} name = "test_group" description = "this is a test group" grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], availability_zone='nova', host=None, name=name, description=description, status=fields.GroupStatus.CREATING) mock_group.return_value = grp ret_group = self.group_api.create(self.ctxt, name, description, fake.GROUP_TYPE_ID, [fake.VOLUME_TYPE_ID], availability_zone='nova') self.assertEqual(grp.obj_to_primitive(), ret_group.obj_to_primitive()) ret_group.volume_types = [vol_type] ret_group.host = hosts[0] # set resource_backend directly because ret_group # is instance of MagicMock ret_group.resource_backend = 'fake-cluster' ret_group.status = fields.GroupStatus.AVAILABLE ret_group.id = fake.GROUP_ID vol1 = utils.create_volume( self.ctxt, host=hosts[1], availability_zone=ret_group.availability_zone, volume_type_id=fake.VOLUME_TYPE_ID, cluster_name='fake-cluster') vol2 = utils.create_volume( self.ctxt, host=hosts[1], availability_zone=ret_group.availability_zone, volume_type_id=fake.VOLUME_TYPE_ID, group_id=fake.GROUP_ID, cluster_name='fake-cluster') vol2_dict = { 'id': vol2.id, 'group_id': fake.GROUP_ID, 'volume_type_id': fake.VOLUME_TYPE_ID, 'availability_zone': ret_group.availability_zone, 'host': hosts[1], 'status': 'available', } mock_volume_get_all.return_value = [vol2_dict] new_name = "new_group_name" new_desc = "this is a new group" self.group_api.update(self.ctxt, ret_group, new_name, new_desc, vol1.id, vol2.id) mock_volume_get_all.assert_called_once_with(mock.ANY, ret_group.id) mock_rpc_update_group.assert_called_once_with(self.ctxt, ret_group, add_volumes=vol1.id, remove_volumes=vol2.id) @mock.patch('cinder.objects.GroupSnapshot.get_by_id') @mock.patch('cinder.context.RequestContext.authorize') def test_get_group_snapshot(self, mock_authorize, mock_group_snap): fake_group_snap = 'fake_group_snap' mock_group_snap.return_value = fake_group_snap grp_snap = self.group_api.get_group_snapshot( self.ctxt, fake.GROUP_SNAPSHOT_ID) self.assertEqual(fake_group_snap, grp_snap) mock_authorize.assert_called_once_with( g_snap_policies.GET_POLICY, target_obj=fake_group_snap) @ddt.data(True, False) @mock.patch('cinder.objects.GroupSnapshotList.get_all') @mock.patch('cinder.objects.GroupSnapshotList.get_all_by_project') def test_get_all_group_snapshots(self, is_admin, mock_get_all_by_project, mock_get_all): fake_group_snaps = ['fake_group_snap1', 'fake_group_snap2'] fake_group_snaps_by_project = ['fake_group_snap1'] mock_get_all.return_value = fake_group_snaps mock_get_all_by_project.return_value = fake_group_snaps_by_project if is_admin: grp_snaps = self.group_api.get_all_group_snapshots( self.ctxt, filters={'all_tenants': True}) self.assertEqual(fake_group_snaps, grp_snaps) else: grp_snaps = self.group_api.get_all_group_snapshots( self.user_ctxt) self.assertEqual(fake_group_snaps_by_project, grp_snaps) @mock.patch('cinder.objects.GroupSnapshot') def test_update_group_snapshot(self, mock_group_snap): grp_snap_update = {"name": "new_name", "description": "This is a new description"} self.group_api.update_group_snapshot(self.ctxt, mock_group_snap, grp_snap_update) mock_group_snap.update.assert_called_once_with(grp_snap_update) mock_group_snap.save.assert_called_once_with() @mock.patch('cinder.volume.rpcapi.VolumeAPI.delete_group_snapshot') @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_snapshot') @mock.patch('cinder.volume.api.API.create_snapshots_in_db') @mock.patch('cinder.objects.Group') @mock.patch('cinder.objects.GroupSnapshot') @mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot') def test_create_delete_group_snapshot(self, mock_snap_get_all, mock_group_snap, mock_group, mock_create_in_db, mock_create_api, mock_delete_api): name = "fake_name" description = "fake description" mock_group.id = fake.GROUP_ID mock_group.group_type_id = fake.GROUP_TYPE_ID mock_group.assert_not_frozen = mock.Mock(return_value=True) mock_group.volumes = [] ret_group_snap = self.group_api.create_group_snapshot( self.ctxt, mock_group, name, description) mock_snap_get_all.return_value = [] options = {'group_id': fake.GROUP_ID, 'user_id': self.ctxt.user_id, 'project_id': self.ctxt.project_id, 'status': "creating", 'name': name, 'description': description, 'group_type_id': fake.GROUP_TYPE_ID} mock_group_snap.assert_called_once_with(self.ctxt, **options) ret_group_snap.create.assert_called_once_with() mock_create_in_db.assert_called_once_with(self.ctxt, [], ret_group_snap.name, ret_group_snap.description, None, ret_group_snap.id) mock_create_api.assert_called_once_with(self.ctxt, ret_group_snap) ret_group_snap.assert_not_frozen = mock.Mock(return_value=True) self.group_api.delete_group_snapshot(self.ctxt, ret_group_snap) mock_delete_api.assert_called_once_with(mock.ANY, ret_group_snap) @mock.patch('cinder.volume.api.API.delete') @mock.patch('cinder.objects.VolumeType.get_by_name_or_id') @mock.patch('cinder.db.group_volume_type_mapping_create') @mock.patch('cinder.volume.api.API.create') @mock.patch('cinder.objects.GroupSnapshot.get_by_id') @mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot') @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src') @mock.patch('cinder.objects.VolumeList.get_all_by_generic_group') def test_create_group_from_snap_volume_failed( self, mock_volume_get_all, mock_rpc_create_group_from_src, mock_snap_get_all, mock_group_snap_get, mock_volume_api_create, mock_mapping_create, mock_get_volume_type, mock_volume_delete): mock_volume_api_create.side_effect = [exception.CinderException] vol_type = fake_volume.fake_volume_type_obj( self.ctxt, id=fake.VOLUME_TYPE_ID, name='fake_volume_type') mock_get_volume_type.return_value = vol_type grp_snap = utils.create_group_snapshot( self.ctxt, fake.GROUP_ID, group_type_id=fake.GROUP_TYPE_ID, status=fields.GroupStatus.CREATING) mock_group_snap_get.return_value = grp_snap vol1 = utils.create_volume( self.ctxt, availability_zone='nova', volume_type_id=vol_type['id'], group_id=fake.GROUP_ID) snap = utils.create_snapshot(self.ctxt, vol1.id, volume_type_id=vol_type['id'], status=fields.GroupStatus.CREATING) mock_snap_get_all.return_value = [snap] name = "test_group" description = "this is a test group" grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', name=name, description=description, group_snapshot_id=grp_snap.id, status=fields.GroupStatus.CREATING) vol2 = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=vol_type['id'], group_id=grp.id, snapshot_id=snap.id) mock_volume_get_all.return_value = [vol2] self.assertRaises( exception.CinderException, self.group_api._create_group_from_group_snapshot, self.ctxt, grp, grp_snap.id) mock_volume_api_create.assert_called_once_with( self.ctxt, 1, None, None, availability_zone=grp.availability_zone, group_snapshot=grp_snap, group=grp, snapshot=snap, volume_type=vol_type) mock_rpc_create_group_from_src.assert_not_called() mock_volume_delete.assert_called_once_with(self.ctxt, vol2) vol2.destroy() grp.destroy() snap.destroy() vol1.destroy() grp_snap.destroy() @mock.patch('cinder.group.api.API._update_volumes_host') @mock.patch('cinder.objects.VolumeType.get_by_name_or_id') @mock.patch('cinder.db.group_volume_type_mapping_create') @mock.patch('cinder.volume.api.API.create') @mock.patch('cinder.objects.GroupSnapshot.get_by_id') @mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot') @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src') @mock.patch('cinder.objects.VolumeList.get_all_by_generic_group') def test_create_group_from_snap(self, mock_volume_get_all, mock_rpc_create_group_from_src, mock_snap_get_all, mock_group_snap_get, mock_volume_api_create, mock_mapping_create, mock_get_volume_type, mock_update_volumes_host): vol_type = fake_volume.fake_volume_type_obj( self.ctxt, id=fake.VOLUME_TYPE_ID, name='fake_volume_type') mock_get_volume_type.return_value = vol_type grp_snap = utils.create_group_snapshot( self.ctxt, fake.GROUP_ID, group_type_id=fake.GROUP_TYPE_ID, status=fields.GroupStatus.CREATING) mock_group_snap_get.return_value = grp_snap vol1 = utils.create_volume( self.ctxt, availability_zone='nova', volume_type_id=vol_type['id'], group_id=fake.GROUP_ID) snap = utils.create_snapshot(self.ctxt, vol1.id, volume_type_id=vol_type['id'], status=fields.GroupStatus.CREATING) mock_snap_get_all.return_value = [snap] name = "test_group" description = "this is a test group" grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', name=name, description=description, group_snapshot_id=grp_snap.id, status=fields.GroupStatus.CREATING) vol2 = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=vol_type['id'], group_id=grp.id, snapshot_id=snap.id) mock_volume_get_all.return_value = [vol2] self.group_api._create_group_from_group_snapshot(self.ctxt, grp, grp_snap.id) mock_volume_api_create.assert_called_once_with( self.ctxt, 1, None, None, availability_zone=grp.availability_zone, group_snapshot=grp_snap, group=grp, snapshot=snap, volume_type=vol_type) mock_rpc_create_group_from_src.assert_called_once_with( self.ctxt, grp, grp_snap) mock_update_volumes_host.assert_called_once_with( self.ctxt, grp ) vol2.destroy() grp.destroy() snap.destroy() vol1.destroy() grp_snap.destroy() @mock.patch('cinder.group.api.API._update_volumes_host') @mock.patch('cinder.objects.VolumeType.get_by_name_or_id') @mock.patch('cinder.db.group_volume_type_mapping_create') @mock.patch('cinder.volume.api.API.create') @mock.patch('cinder.objects.Group.get_by_id') @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src') @mock.patch('cinder.objects.VolumeList.get_all_by_generic_group') def test_create_group_from_group(self, mock_volume_get_all, mock_rpc_create_group_from_src, mock_group_get, mock_volume_api_create, mock_mapping_create, mock_get_volume_type, mock_update_volumes_host): vol_type = fake_volume.fake_volume_type_obj( self.ctxt, id=fake.VOLUME_TYPE_ID, name='fake_volume_type') mock_get_volume_type.return_value = vol_type grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', status=fields.GroupStatus.CREATING) mock_group_get.return_value = grp vol = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=fake.VOLUME_TYPE_ID, group_id=grp.id) mock_volume_get_all.return_value = [vol] grp2 = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', source_group_id=grp.id, status=fields.GroupStatus.CREATING) vol2 = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=vol_type['id'], group_id=grp2.id, source_volid=vol.id) self.group_api._create_group_from_source_group(self.ctxt, grp2, grp.id) mock_volume_api_create.assert_called_once_with( self.ctxt, 1, None, None, availability_zone=grp.availability_zone, source_group=grp, group=grp2, source_volume=vol, volume_type=vol_type) mock_rpc_create_group_from_src.assert_called_once_with( self.ctxt, grp2, None, grp) mock_update_volumes_host.assert_called_once_with( self.ctxt, grp2 ) vol2.destroy() grp2.destroy() vol.destroy() grp.destroy() @mock.patch('cinder.volume.api.API.delete') @mock.patch('cinder.objects.VolumeType.get_by_name_or_id') @mock.patch('cinder.db.group_volume_type_mapping_create') @mock.patch('cinder.volume.api.API.create') @mock.patch('cinder.objects.Group.get_by_id') @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_group_from_src') @mock.patch('cinder.objects.VolumeList.get_all_by_generic_group') def test_create_group_from_group_create_volume_failed( self, mock_volume_get_all, mock_rpc_create_group_from_src, mock_group_get, mock_volume_api_create, mock_mapping_create, mock_get_volume_type, mock_volume_delete): vol_type = fake_volume.fake_volume_type_obj( self.ctxt, id=fake.VOLUME_TYPE_ID, name='fake_volume_type') mock_get_volume_type.return_value = vol_type grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', status=fields.GroupStatus.CREATING) mock_group_get.return_value = grp vol1 = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=fake.VOLUME_TYPE_ID, group_id=grp.id) vol2 = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=fake.VOLUME_TYPE_ID, group_id=grp.id) mock_volume_get_all.side_effect = [[vol1, vol2], [vol1]] grp2 = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', source_group_id=grp.id, status=fields.GroupStatus.CREATING) mock_volume_api_create.side_effect = [None, exception.CinderException] self.assertRaises( exception.CinderException, self.group_api._create_group_from_source_group, self.ctxt, grp2, grp.id) mock_rpc_create_group_from_src.assert_not_called() mock_volume_delete.assert_called_once_with(self.ctxt, vol1) grp2.destroy() vol2.destroy() vol1.destroy() grp.destroy() @mock.patch('cinder.group.api.API._create_group_from_group_snapshot') @mock.patch('cinder.group.api.API._create_group_from_source_group') @mock.patch('cinder.group.api.API.update_quota') @mock.patch('cinder.objects.GroupSnapshot.get_by_id') @mock.patch('cinder.objects.SnapshotList.get_all_for_group_snapshot') @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity') def test_create_from_src(self, mock_validate_host, mock_snap_get_all, mock_group_snap_get, mock_update_quota, mock_create_from_group, mock_create_from_snap): name = "test_group" description = "this is a test group" grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], availability_zone='nova', name=name, description=description, status=fields.GroupStatus.AVAILABLE,) vol1 = utils.create_volume( self.ctxt, availability_zone='nova', volume_type_id=fake.VOLUME_TYPE_ID, group_id=grp.id) snap = utils.create_snapshot(self.ctxt, vol1.id, volume_type_id=fake.VOLUME_TYPE_ID, status=fields.SnapshotStatus.AVAILABLE) mock_snap_get_all.return_value = [snap] mock_validate_host.return_host = True grp_snap = utils.create_group_snapshot( self.ctxt, grp.id, group_type_id=fake.GROUP_TYPE_ID, status=fields.GroupStatus.AVAILABLE) mock_group_snap_get.return_value = grp_snap grp2 = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID], availability_zone='nova', name=name, description=description, status=fields.GroupStatus.CREATING, group_snapshot_id=grp_snap.id) with mock.patch('cinder.objects.Group') as mock_group: mock_group.return_value = grp2 with mock.patch('cinder.objects.group.Group.create'): ret_group = self.group_api.create_from_src( self.ctxt, name, description, group_snapshot_id=grp_snap.id, source_group_id=None) self.assertEqual(grp2.obj_to_primitive(), ret_group.obj_to_primitive()) mock_create_from_snap.assert_called_once_with( self.ctxt, grp2, grp_snap.id) snap.destroy() grp_snap.destroy() vol1.destroy() grp.destroy() grp2.destroy() @mock.patch('oslo_utils.timeutils.utcnow') @mock.patch('cinder.objects.GroupSnapshot') def test_reset_group_snapshot_status(self, mock_group_snapshot, mock_time_util): mock_time_util.return_value = "time_now" self.group_api.reset_group_snapshot_status( self.ctxt, mock_group_snapshot, fields.GroupSnapshotStatus.ERROR) update_field = {'updated_at': "time_now", 'status': fields.GroupSnapshotStatus.ERROR} mock_group_snapshot.update.assert_called_once_with(update_field) mock_group_snapshot.save.assert_called_once_with() @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.validate_host_capacity') def test_create_group_from_src_frozen(self, mock_validate_host): service = utils.create_service(self.ctxt, {'frozen': True}) group = utils.create_group(self.ctxt, host=service.host, group_type_id='gt') mock_validate_host.return_value = True group_api = cinder.group.api.API() self.assertRaises(exception.InvalidInput, group_api.create_from_src, self.ctxt, 'group', 'desc', group_snapshot_id=None, source_group_id=group.id) @mock.patch('cinder.objects.volume.Volume.host', new_callable=mock.PropertyMock) @mock.patch('cinder.objects.volume.Volume.cluster_name', new_callable=mock.PropertyMock) @mock.patch('cinder.objects.VolumeList.get_all_by_generic_group') def test_update_volumes_host(self, mock_volume_get_all, mock_cluster_name, mock_host): vol_type = utils.create_volume_type(self.ctxt, name='test_vol_type') grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type['id']], availability_zone='nova', status=fields.GroupStatus.CREATING, cluster_name='fake_cluster') vol1 = utils.create_volume( self.ctxt, availability_zone=grp.availability_zone, volume_type_id=fake.VOLUME_TYPE_ID, group_id=grp.id) mock_volume = mock.Mock() mock_volume_get_all.return_value = [mock_volume] group_api = cinder.group.api.API() group_api._update_volumes_host(None, grp) mock_cluster_name.assert_called() mock_host.assert_called() self.assertEqual(grp.host, mock_volume.host) self.assertEqual(grp.cluster_name, mock_volume.cluster_name) mock_volume.save.assert_called_once_with() vol1.destroy() grp.destroy() def test_delete_group_frozen(self): service = utils.create_service(self.ctxt, {'frozen': True}) group = utils.create_group(self.ctxt, host=service.host, group_type_id='gt') group_api = cinder.group.api.API() self.assertRaises(exception.InvalidInput, group_api.delete, self.ctxt, group) def test_create_group_snapshot_frozen(self): service = utils.create_service(self.ctxt, {'frozen': True}) group = utils.create_group(self.ctxt, host=service.host, group_type_id='gt') group_api = cinder.group.api.API() self.assertRaises(exception.InvalidInput, group_api.create_group_snapshot, self.ctxt, group, 'group_snapshot', 'desc') def test_delete_group_snapshot_frozen(self): service = utils.create_service(self.ctxt, {'frozen': True}) group = utils.create_group(self.ctxt, host=service.host, group_type_id='gt') gsnap = utils.create_group_snapshot(self.ctxt, group.id) group_api = cinder.group.api.API() self.assertRaises(exception.InvalidInput, group_api.delete_group_snapshot, self.ctxt, gsnap) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs', return_value={'qos_specs': {}}) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.create_group') def test_cast_create_group(self, mock_create_group, mock_get_volume_type_qos_specs): vol_type = utils.create_volume_type(self.ctxt, name='test_vol_type') encryption_key_id = mock.sentinel.encryption_key_id description = mock.sentinel.description name = mock.sentinel.name req_spec = {'volume_type': vol_type, 'encryption_key_id': encryption_key_id, 'description': description, 'name': name} grp_name = "test_group" grp_description = "this is a test group" grp_spec = {'name': grp_name, 'description': grp_description} grp = utils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[vol_type.id], availability_zone='nova') grp_filter_properties = mock.sentinel.group_filter_properties filter_properties_list = mock.sentinel.filter_properties_list self.group_api._cast_create_group(self.ctxt, grp, grp_spec, [req_spec], grp_filter_properties, filter_properties_list) mock_get_volume_type_qos_specs.assert_called_once_with(vol_type.id) exp_vol_properties = { 'size': 0, 'user_id': self.ctxt.user_id, 'project_id': self.ctxt.project_id, 'status': 'creating', 'attach_status': 'detached', 'encryption_key_id': encryption_key_id, 'display_description': description, 'display_name': name, 'volume_type_id': vol_type.id, 'group_type_id': grp.group_type_id, 'availability_zone': grp.availability_zone } exp_req_spec = { 'volume_type': vol_type, 'encryption_key_id': encryption_key_id, 'description': description, 'name': name, 'volume_properties': exp_vol_properties, 'qos_specs': None } exp_grp_properties = { 'size': 0, 'user_id': self.ctxt.user_id, 'project_id': self.ctxt.project_id, 'status': 'creating', 'display_description': grp_description, 'display_name': grp_name, 'group_type_id': grp.group_type_id, } exp_grp_spec = { 'name': grp_name, 'description': grp_description, 'volume_properties': exp_grp_properties, 'qos_specs': None } mock_create_group.assert_called_once_with( self.ctxt, grp, group_spec=exp_grp_spec, request_spec_list=[exp_req_spec], group_filter_properties=grp_filter_properties, filter_properties_list=filter_properties_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/group/test_groups_manager.py0000664000175000017500000013345000000000000024240 0ustar00zuulzuul00000000000000# Copyright (C) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from oslo_utils import importutils from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder import quota from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as tests_utils from cinder.volume import api as volume_api from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume import volume_utils GROUP_QUOTAS = quota.GROUP_QUOTAS CONF = cfg.CONF @ddt.ddt class GroupManagerTestCase(test.TestCase): def setUp(self): super(GroupManagerTestCase, self).setUp() self.volume = importutils.import_object(CONF.volume_manager) self.mock_object(self.volume, '_driver_shares_targets', return_value=False) self.configuration = mock.Mock(conf.Configuration) self.context = context.get_admin_context() self.context.user_id = fake.USER_ID self.project_id = fake.PROJECT3_ID self.context.project_id = self.project_id self.volume.driver.set_initialized() self.volume.stats = {'allocated_capacity_gb': 0, 'pools': {}} self.volume_api = volume_api.API() def test_delete_volume_in_group(self): """Test deleting a volume that's tied to a group fails.""" volume_params = {'status': 'available', 'group_id': fake.GROUP_ID} volume = tests_utils.create_volume(self.context, **volume_params) self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume) @mock.patch( 'cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'create_cloned_volume') @mock.patch( 'cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'create_volume_from_snapshot') @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'create_volume') def test_create_vol_with_group_id_driver_exception(self, mock_create_volume, mock_create_from_snap, mock_create_cloned_vol): """Test create a volume with group_id but driver exception.""" # create_raw_volume with group id, but driver exception mock_create_volume.side_effect = exception.CinderException group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID, host=CONF.host) self.volume.create_group(self.context, group) volume = tests_utils.create_volume( self.context, group_id=group.id, volume_type_id=fake.VOLUME_TYPE_ID, status='available', host=group.host) self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume) self.assertIsNone(volume.consistencygroup_id) # create volume from_snapshot with group id but driver exception mock_create_from_snap.side_effect = exception.CinderException snapshot = tests_utils.create_snapshot(self.context, volume.id) volume2 = tests_utils.create_volume( self.context, group_id=group.id, snapshot_id=snapshot.id, status='available', host=group.host, volume_type_id=fake.VOLUME_TYPE_ID) self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume2) self.assertIsNone(volume2.consistencygroup_id) # create cloned volume with group_id but driver exception mock_create_cloned_vol.side_effect = exception.CinderException volume3 = tests_utils.create_volume( self.context, group_id=group.id, source_volid=volume.id, status='available', host=group.host, volume_type_id=fake.VOLUME_TYPE_ID) self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume3) self.assertIsNone(volume3.consistencygroup_id) @mock.patch.object(GROUP_QUOTAS, "reserve", return_value=["RESERVATION"]) @mock.patch.object(GROUP_QUOTAS, "commit") @mock.patch.object(GROUP_QUOTAS, "rollback") @mock.patch.object(driver.VolumeDriver, "delete_group", return_value=({'status': ( fields.GroupStatus.DELETED)}, [])) def test_create_delete_group(self, fake_delete_grp, fake_rollback, fake_commit, fake_reserve): """Test group can be created and deleted.""" def fake_driver_create_grp(context, group): """Make sure that the pool is part of the host.""" self.assertIn('host', group) host = group.host pool = volume_utils.extract_host(host, level='pool') self.assertEqual('fakepool', pool) return {'status': fields.GroupStatus.AVAILABLE} self.mock_object(self.volume.driver, 'create_group', fake_driver_create_grp) group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], host='fakehost@fakedrv#fakepool', group_type_id=fake.GROUP_TYPE_ID) group = objects.Group.get_by_id(self.context, group.id) self.assertEqual(0, len(self.notifier.notifications), self.notifier.notifications) self.volume.create_group(self.context, group) self.assertEqual(2, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[0] self.assertEqual('group.create.start', msg['event_type']) expected = { 'status': fields.GroupStatus.AVAILABLE, 'name': 'test_group', 'availability_zone': 'nova', 'tenant_id': self.context.project_id, 'created_at': mock.ANY, 'user_id': fake.USER_ID, 'group_id': group.id, 'group_type': fake.GROUP_TYPE_ID } self.assertDictEqual(expected, msg['payload']) msg = self.notifier.notifications[1] self.assertEqual('group.create.end', msg['event_type']) self.assertDictEqual(expected, msg['payload']) self.assertEqual( group.id, objects.Group.get_by_id(context.get_admin_context(), group.id).id) self.volume.delete_group(self.context, group) grp = objects.Group.get_by_id( context.get_admin_context(read_deleted='yes'), group.id) self.assertEqual(fields.GroupStatus.DELETED, grp.status) self.assertEqual(4, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[2] self.assertEqual('group.delete.start', msg['event_type']) self.assertDictEqual(expected, msg['payload']) msg = self.notifier.notifications[3] self.assertEqual('group.delete.end', msg['event_type']) expected['status'] = fields.GroupStatus.DELETED self.assertDictEqual(expected, msg['payload']) self.assertRaises(exception.NotFound, objects.Group.get_by_id, self.context, group.id) @ddt.data(('', [], 0, None, True), ('1,2', ['available', 'in-use'], 2, None, True), ('1,2,3', ['available', 'in-use', 'error_deleting'], 3, None, False), ('1,2', ['wrong_status', 'available'], 0, exception.InvalidVolume, True), ('1,2', ['available', exception.VolumeNotFound], 0, exception.VolumeNotFound, True)) @ddt.unpack @mock.patch('cinder.objects.Volume.get_by_id') def test__collect_volumes_for_group(self, add_volumes, returned, expected, raise_error, add, mock_get): side_effect = [] class FakeVolume(object): def __init__(self, status): self.status = status self.id = fake.UUID1 for value in returned: if isinstance(value, str): value = FakeVolume(value) else: value = value(volume_id=fake.UUID1) side_effect.append(value) mock_get.side_effect = side_effect group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID, host=CONF.host) with mock.patch.object(self.volume, '_check_is_our_resource', mock.Mock()) as mock_check: if raise_error: self.assertRaises(raise_error, self.volume._collect_volumes_for_group, None, group, add_volumes, add) else: result = self.volume._collect_volumes_for_group(None, group, add_volumes, add=add) if add: self.assertEqual(expected, mock_check.call_count) self.assertEqual(expected, len(result)) @ddt.data((False, fake.GROUP_TYPE_ID), (True, fake.GROUP_TYPE_ID), (True, fake.GROUP_TYPE2_ID)) @ddt.unpack @mock.patch('cinder.volume.group_types.get_default_cgsnapshot_type', return_value={'id': fake.GROUP_TYPE2_ID}) @mock.patch.object(GROUP_QUOTAS, "reserve", return_value=["RESERVATION"]) @mock.patch.object(GROUP_QUOTAS, "commit") @mock.patch.object(GROUP_QUOTAS, "rollback") @mock.patch.object(driver.VolumeDriver, "create_group", return_value={'status': 'available'}) @mock.patch('cinder.volume.manager.VolumeManager._update_group_generic') @mock.patch.object(driver.VolumeDriver, 'update_consistencygroup') @mock.patch.object(driver.VolumeDriver, "update_group") def test_update_group(self, raise_error, type_id, fake_update_grp, fake_update_cg, fake_generic_update, fake_create_grp, fake_rollback, fake_commit, fake_reserve, fake_get_type): """Test group can be updated.""" group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=type_id, host=CONF.host) self.volume.create_group(self.context, group) volume = tests_utils.create_volume( self.context, group_id=group.id, volume_type_id=fake.VOLUME_TYPE_ID, status='available', host=group.host) self.volume.create_volume(self.context, volume) volume2 = tests_utils.create_volume( self.context, group_id=None, volume_type_id=fake.VOLUME_TYPE_ID, status='available', host=group.host) self.volume.create_volume(self.context, volume) driver_result = ({'status': fields.GroupStatus.AVAILABLE}, [{'id': volume2.id, 'status': 'available'}], [{'id': volume.id, 'status': 'available'}]) if raise_error: fake_update_grp.side_effect = [NotImplementedError] fake_update_cg.return_value = driver_result fake_generic_update.return_value = driver_result else: fake_update_grp.return_value = driver_result with mock.patch.object( self.volume, '_convert_group_to_cg', mock.Mock()) as mock_convert, mock.patch.object( self.volume, '_remove_consistencygroup_id_from_volumes', mock.Mock()): mock_convert.return_value = ('fake_cg', [volume]) self.volume.update_group(self.context, group, add_volumes=volume2.id, remove_volumes=volume.id) if raise_error: if type_id == fake.GROUP_TYPE2_ID: fake_update_cg.assert_called_once_with( self.context, 'fake_cg', add_volumes=mock.ANY, remove_volumes=[volume]) else: fake_generic_update.assert_called_once_with( self.context, group, add_volumes=mock.ANY, remove_volumes=mock.ANY) grp = objects.Group.get_by_id(self.context, group.id) expected = { 'status': fields.GroupStatus.AVAILABLE, 'name': 'test_group', 'availability_zone': 'nova', 'tenant_id': self.context.project_id, 'created_at': mock.ANY, 'user_id': fake.USER_ID, 'group_id': group.id, 'group_type': type_id } self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status) self.assertEqual(10, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[6] self.assertEqual('group.update.start', msg['event_type']) self.assertDictEqual(expected, msg['payload']) msg = self.notifier.notifications[8] self.assertEqual('group.update.end', msg['event_type']) self.assertDictEqual(expected, msg['payload']) grpvolumes = db.volume_get_all_by_generic_group(self.context, group.id) grpvol_ids = [grpvol['id'] for grpvol in grpvolumes] # Verify volume is removed. self.assertNotIn(volume.id, grpvol_ids) # Verify volume is added. self.assertIn(volume2.id, grpvol_ids) volume3 = tests_utils.create_volume( self.context, group_id=None, host=group.host, volume_type_id=fake.VOLUME_TYPE_ID, status='wrong-status') volume_id3 = volume3['id'] volume_get_orig = self.volume.db.volume_get self.volume.db.volume_get = mock.Mock( return_value={'status': 'wrong_status', 'id': volume_id3}) # Try to add a volume in wrong status self.assertRaises(exception.InvalidVolume, self.volume.update_group, self.context, group, add_volumes=volume_id3, remove_volumes=None) self.volume.db.volume_get.reset_mock() self.volume.db.volume_get = volume_get_orig def test_update_group_vol_not_found(self): """Test add non existent volume to group""" group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID, host=CONF.host) self.volume.create_group(self.context, group) self.assertRaises(exception.VolumeNotFound, self.volume.update_group, self.context, group, add_volumes=fake.VOLUME_ID, remove_volumes=fake.VOLUME2_ID) @mock.patch('cinder.db.sqlalchemy.api.' 'volume_glance_metadata_copy_to_volume') @mock.patch('cinder.db.sqlalchemy.api.' 'volume_glance_metadata_copy_from_volume_to_volume') @mock.patch.object(driver.VolumeDriver, "create_group", return_value={'status': 'available'}) @mock.patch.object(driver.VolumeDriver, "delete_group", return_value=({'status': 'deleted'}, [])) @mock.patch.object(driver.VolumeDriver, "create_group_snapshot", return_value={'status': 'available'}) @mock.patch.object(driver.VolumeDriver, "delete_group_snapshot", return_value=({'status': 'deleted'}, [])) @mock.patch.object(driver.VolumeDriver, "create_group_from_src", return_value=(None, None)) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'create_volume_from_snapshot') @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'create_cloned_volume') def test_create_group_from_src(self, mock_create_cloned_vol, mock_create_vol_from_snap, mock_create_from_src, mock_delete_grpsnap, mock_create_grpsnap, mock_delete_grp, mock_create_grp, mock_metadata_copy_volume_to_volume, mock_metadata_copy_to_volume): """Test group can be created and deleted.""" group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, status=fields.GroupStatus.AVAILABLE, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID, host=CONF.host) volume = tests_utils.create_volume( self.context, group_id=group.id, status='available', multiattach=True, bootable=True, host=group.host, volume_type_id=fake.VOLUME_TYPE_ID, size=1) volume_id = volume['id'] group_snapshot_returns = self._create_group_snapshot(group.id, [volume_id]) group_snapshot = group_snapshot_returns[0] snapshot_id = group_snapshot_returns[1][0]['id'] # Create group from source group snapshot. group2 = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, group_snapshot_id=group_snapshot.id, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID, host=CONF.host) group2 = objects.Group.get_by_id(self.context, group2.id) volume2 = tests_utils.create_volume( self.context, group_id=group2.id, snapshot_id=snapshot_id, status='available', host=group2.host, volume_type_id=fake.VOLUME_TYPE_ID) self.volume.create_volume(self.context, volume2) self.volume.create_group_from_src( self.context, group2, group_snapshot=group_snapshot) grp2 = objects.Group.get_by_id(self.context, group2.id) expected = { 'status': fields.GroupStatus.AVAILABLE, 'name': 'test_group', 'availability_zone': 'nova', 'tenant_id': self.context.project_id, 'created_at': mock.ANY, 'user_id': fake.USER_ID, 'group_id': group2.id, 'group_type': fake.GROUP_TYPE_ID, } self.assertEqual(fields.GroupStatus.AVAILABLE, grp2.status) self.assertEqual(group2.id, grp2['id']) self.assertEqual(group_snapshot.id, grp2['group_snapshot_id']) self.assertIsNone(grp2['source_group_id']) msg = self.notifier.notifications[2] self.assertEqual('group.create.start', msg['event_type']) self.assertDictEqual(expected, msg['payload']) msg = self.notifier.notifications[4] self.assertEqual('group.create.end', msg['event_type']) self.assertDictEqual(expected, msg['payload']) if len(self.notifier.notifications) > 6: self.assertFalse(self.notifier.notifications[6], self.notifier.notifications) self.assertEqual(6, len(self.notifier.notifications), self.notifier.notifications) self.volume.delete_group(self.context, group2) if len(self.notifier.notifications) > 9: self.assertFalse(self.notifier.notifications[10], self.notifier.notifications) self.assertEqual(9, len(self.notifier.notifications), self.notifier.notifications) msg = self.notifier.notifications[6] self.assertEqual('group.delete.start', msg['event_type']) expected['status'] = fields.GroupStatus.AVAILABLE self.assertDictEqual(expected, msg['payload']) msg = self.notifier.notifications[8] self.assertEqual('group.delete.end', msg['event_type']) expected['status'] = fields.GroupStatus.DELETED self.assertDictEqual(expected, msg['payload']) grp2 = objects.Group.get_by_id( context.get_admin_context(read_deleted='yes'), group2.id) self.assertEqual(fields.GroupStatus.DELETED, grp2.status) self.assertRaises(exception.NotFound, objects.Group.get_by_id, self.context, group2.id) # Create group from source group group3 = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, source_group_id=group.id, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID, host=CONF.host) volume3 = tests_utils.create_volume( self.context, group_id=group3.id, source_volid=volume_id, status='available', host=group3.host, volume_type_id=fake.VOLUME_TYPE_ID) self.volume.create_volume(self.context, volume3) self.volume.create_group_from_src( self.context, group3, source_group=group) grp3 = objects.Group.get_by_id(self.context, group3.id) vol3 = objects.Volume.get_by_id(self.context, volume3.id) self.assertEqual(fields.GroupStatus.AVAILABLE, grp3.status) self.assertEqual(group3.id, grp3.id) self.assertEqual(group.id, grp3.source_group_id) self.assertIsNone(grp3.group_snapshot_id) self.assertEqual(volume.multiattach, vol3.multiattach) self.assertEqual(volume.bootable, vol3.bootable) self.volume.delete_group_snapshot(self.context, group_snapshot) self.volume.delete_group(self.context, group) def test_sort_snapshots(self): vol1 = {'id': fake.VOLUME_ID, 'name': 'volume 1', 'snapshot_id': fake.SNAPSHOT_ID, 'group_id': fake.GROUP_ID} vol2 = {'id': fake.VOLUME2_ID, 'name': 'volume 2', 'snapshot_id': fake.SNAPSHOT2_ID, 'group_id': fake.GROUP_ID} vol3 = {'id': fake.VOLUME3_ID, 'name': 'volume 3', 'snapshot_id': fake.SNAPSHOT3_ID, 'group_id': fake.GROUP_ID} snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1', 'group_snapshot_id': fake.GROUP_ID} snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2', 'group_snapshot_id': fake.GROUP_ID} snp3 = {'id': fake.SNAPSHOT3_ID, 'name': 'snap 3', 'group_snapshot_id': fake.GROUP_ID} snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1) snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2) snp3_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp3) volumes = [] snapshots = [] volumes.append(vol1) volumes.append(vol2) volumes.append(vol3) snapshots.append(snp2_obj) snapshots.append(snp3_obj) snapshots.append(snp1_obj) i = 0 for vol in volumes: snap = snapshots[i] i += 1 self.assertNotEqual(vol['snapshot_id'], snap.id) sorted_snaps = self.volume._sort_snapshots(volumes, snapshots) i = 0 for vol in volumes: snap = sorted_snaps[i] i += 1 self.assertEqual(vol['snapshot_id'], snap.id) snapshots[2]['id'] = fake.WILL_NOT_BE_FOUND_ID self.assertRaises(exception.SnapshotNotFound, self.volume._sort_snapshots, volumes, snapshots) self.assertRaises(exception.InvalidInput, self.volume._sort_snapshots, volumes, []) def test_sort_source_vols(self): vol1 = {'id': '1', 'name': 'volume 1', 'source_volid': '1', 'group_id': '2'} vol2 = {'id': '2', 'name': 'volume 2', 'source_volid': '2', 'group_id': '2'} vol3 = {'id': '3', 'name': 'volume 3', 'source_volid': '3', 'group_id': '2'} src_vol1 = {'id': '1', 'name': 'source vol 1', 'group_id': '1'} src_vol2 = {'id': '2', 'name': 'source vol 2', 'group_id': '1'} src_vol3 = {'id': '3', 'name': 'source vol 3', 'group_id': '1'} volumes = [] src_vols = [] volumes.append(vol1) volumes.append(vol2) volumes.append(vol3) src_vols.append(src_vol2) src_vols.append(src_vol3) src_vols.append(src_vol1) i = 0 for vol in volumes: src_vol = src_vols[i] i += 1 self.assertNotEqual(vol['source_volid'], src_vol['id']) sorted_src_vols = self.volume._sort_source_vols(volumes, src_vols) i = 0 for vol in volumes: src_vol = sorted_src_vols[i] i += 1 self.assertEqual(vol['source_volid'], src_vol['id']) src_vols[2]['id'] = '9999' self.assertRaises(exception.VolumeNotFound, self.volume._sort_source_vols, volumes, src_vols) self.assertRaises(exception.InvalidInput, self.volume._sort_source_vols, volumes, []) def _create_group_snapshot(self, group_id, volume_ids, size='0'): """Create a group_snapshot object.""" grpsnap = objects.GroupSnapshot(self.context) grpsnap.user_id = fake.USER_ID grpsnap.project_id = fake.PROJECT_ID grpsnap.group_id = group_id grpsnap.status = fields.GroupStatus.CREATING grpsnap.create() # Create snapshot list for volume_id in volume_ids: snaps = [] snap = objects.Snapshot(context.get_admin_context()) snap.volume_size = size snap.user_id = fake.USER_ID snap.project_id = fake.PROJECT_ID snap.volume_id = volume_id snap.status = fields.SnapshotStatus.AVAILABLE snap.group_snapshot_id = grpsnap.id snap.create() snaps.append(snap) return grpsnap, snaps @ddt.data((CONF.host, None), (CONF.host + 'fake', 'mycluster')) @ddt.unpack @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') @mock.patch('cinder.volume.driver.VolumeDriver.create_group', autospec=True, return_value={'status': 'available'}) @mock.patch('cinder.volume.driver.VolumeDriver.delete_group', autospec=True, return_value=({'status': 'deleted'}, [])) @mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot', autospec=True, return_value=({'status': 'available'}, [])) @mock.patch('cinder.volume.driver.VolumeDriver.delete_group_snapshot', autospec=True, return_value=({'status': 'deleted'}, [])) def test_create_delete_group_snapshot(self, host, cluster, mock_del_grpsnap, mock_create_grpsnap, mock_del_grp, _mock_create_grp, mock_notify): """Test group_snapshot can be created and deleted.""" original_add_volume_type_opts = quota.QUOTAS.add_volume_type_opts quota.QUOTAS.add_volume_type_opts = mock.MagicMock( side_effect=original_add_volume_type_opts ) self.volume.cluster = cluster group = tests_utils.create_group( self.context, cluster_name=cluster, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID, host=host) volume = tests_utils.create_volume( self.context, group_id=group.id, host=group.host, cluster_name=group.cluster_name, volume_type_id=fake.VOLUME_TYPE_ID) self.volume.create_volume(self.context, volume) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end'])) group_snapshot_returns = self._create_group_snapshot(group.id, [volume.id]) group_snapshot = group_snapshot_returns[0] self.volume.create_group_snapshot(self.context, group_snapshot) self.assertEqual(group_snapshot.id, objects.GroupSnapshot.get_by_id( context.get_admin_context(), group_snapshot.id).id) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end'], ['INFO', 'group_snapshot.create.start'], ['INFO', 'snapshot.create.start'], ['INFO', 'group_snapshot.create.end'], ['INFO', 'snapshot.create.end'])) self.volume.delete_group_snapshot(self.context, group_snapshot) reserve_opts = { 'snapshots': -1, 'gigabytes': 0 } quota.QUOTAS.add_volume_type_opts.assert_called() self.assertEqual( (reserve_opts, fake.VOLUME_TYPE_ID), quota.QUOTAS.add_volume_type_opts.call_args[0][1:] ) quota.QUOTAS.add_volume_type_opts = original_add_volume_type_opts self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end'], ['INFO', 'group_snapshot.create.start'], ['INFO', 'snapshot.create.start'], ['INFO', 'group_snapshot.create.end'], ['INFO', 'snapshot.create.end'], ['INFO', 'group_snapshot.delete.start'], ['INFO', 'snapshot.delete.start'], ['INFO', 'group_snapshot.delete.end'], ['INFO', 'snapshot.delete.end'])) grpsnap = objects.GroupSnapshot.get_by_id( context.get_admin_context(read_deleted='yes'), group_snapshot.id) self.assertEqual('deleted', grpsnap.status) self.assertRaises(exception.NotFound, objects.GroupSnapshot.get_by_id, self.context, group_snapshot.id) self.volume.delete_group(self.context, group) self.assertTrue(mock_create_grpsnap.called) self.assertTrue(mock_del_grpsnap.called) self.assertTrue(mock_del_grp.called) @mock.patch('cinder.volume.driver.VolumeDriver.create_group', return_value={'status': 'available'}) @mock.patch('cinder.volume.driver.VolumeDriver.delete_group', return_value=({'status': 'deleted'}, [])) def test_delete_group_correct_host(self, mock_del_grp, _mock_create_grp): """Test group can be deleted. Test group can be deleted when volumes are on the correct volume node. """ group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID) volume = tests_utils.create_volume( self.context, group_id=group.id, host='host1@backend1#pool1', status='creating', volume_type_id=fake.VOLUME_TYPE_ID, size=1) self.volume.host = 'host1@backend1' self.volume.create_volume(self.context, volume) self.volume.delete_group(self.context, group) grp = objects.Group.get_by_id( context.get_admin_context(read_deleted='yes'), group.id) self.assertEqual(fields.GroupStatus.DELETED, grp.status) self.assertRaises(exception.NotFound, objects.Group.get_by_id, self.context, group.id) self.assertTrue(mock_del_grp.called) @mock.patch('cinder.volume.driver.VolumeDriver.create_group', mock.Mock(return_value={'status': 'available'})) @mock.patch('cinder.volume.driver.VolumeDriver.delete_group', return_value=({'status': 'deleted'}, [])) def test_delete_group_cluster(self, mock_del_grp): """Test group can be deleted on another service in the cluster.""" cluster_name = 'cluster@backend1' self.volume.host = 'host2@backend1' self.volume.cluster = cluster_name group = tests_utils.create_group( self.context, host=CONF.host + 'fake', cluster_name=cluster_name, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID) volume = tests_utils.create_volume( self.context, group_id=group.id, host='host1@backend1#pool1', cluster_name=cluster_name, status='creating', volume_type_id=fake.VOLUME_TYPE_ID, size=1) self.volume.host = 'host2@backend1' self.volume.create_volume(self.context, volume) self.volume.delete_group(self.context, group) grp = objects.Group.get_by_id( context.get_admin_context(read_deleted='yes'), group.id) self.assertEqual(fields.GroupStatus.DELETED, grp.status) self.assertRaises(exception.NotFound, objects.Group.get_by_id, self.context, group.id) self.assertTrue(mock_del_grp.called) @mock.patch('cinder.volume.driver.VolumeDriver.create_group', return_value={'status': 'available'}) def test_delete_group_wrong_host(self, *_mock_create_grp): """Test group cannot be deleted. Test group cannot be deleted when volumes in the group are not local to the volume node. """ group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID) volume = tests_utils.create_volume( self.context, group_id=group.id, host='host1@backend1#pool1', status='creating', volume_type_id=fake.VOLUME_TYPE_ID, size=1) self.volume.host = 'host1@backend2' self.volume.create_volume(self.context, volume) self.assertRaises(exception.Invalid, self.volume.delete_group, self.context, group) grp = objects.Group.get_by_id(self.context, group.id) # Group is not deleted self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status) def test_create_volume_with_group_invalid_type(self): """Test volume creation with group & invalid volume type.""" db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), '__DEFAULT__') grp = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, status=fields.GroupStatus.AVAILABLE, volume_type_ids=[db_vol_type['id']], group_type_id=fake.GROUP_TYPE_ID, host=CONF.host) fake_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='fake') # Volume type must be provided when creating a volume in a # group. self.assertRaises(exception.InvalidInput, self.volume_api.create, self.context, 1, 'vol1', 'volume 1', group=grp) # Volume type must be valid. self.assertRaises(exception.InvalidInput, self.volume_api.create, self.context, 1, 'vol1', 'volume 1', volume_type=fake_type, group=grp) @mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot', autospec=True, return_value=({'status': 'available'}, [])) def test_create_group_snapshot_with_bootable_volumes(self, mock_create_grpsnap): """Test group_snapshot can be created and deleted.""" group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], group_type_id=fake.GROUP_TYPE_ID, host=CONF.host) volume = tests_utils.create_volume( self.context, group_id=group.id, host=group.host, volume_type_id=fake.VOLUME_TYPE_ID) self.volume.create_volume(self.context, volume) # Create a bootable volume bootable_vol_params = {'status': 'creating', 'host': CONF.host, 'size': 1, 'bootable': True} bootable_vol = tests_utils.create_volume(self.context, group_id=group.id, **bootable_vol_params) # Create a common volume self.volume.create_volume(self.context, bootable_vol) volume_ids = [volume.id, bootable_vol.id] group_snapshot_returns = self._create_group_snapshot(group.id, volume_ids) group_snapshot = group_snapshot_returns[0] self.volume.create_group_snapshot(self.context, group_snapshot) self.assertEqual(group_snapshot.id, objects.GroupSnapshot.get_by_id( context.get_admin_context(), group_snapshot.id).id) self.assertTrue(mock_create_grpsnap.called) @mock.patch( 'cinder.tests.fake_driver.FakeLoggingVolumeDriver.create_snapshot') def test_create_group_snapshot_generic(self, mock_create_snap): grp_snp = {'id': fake.GROUP_SNAPSHOT_ID, 'group_id': fake.GROUP_ID, 'name': 'group snap 1'} snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1', 'group_snapshot_id': fake.GROUP_SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID} snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2', 'group_snapshot_id': fake.GROUP_SNAPSHOT_ID, 'volume_id': fake.VOLUME2_ID} snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1) snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2) snapshots = [] snapshots.append(snp1_obj) snapshots.append(snp2_obj) driver_update = {'test_snap_key': 'test_val'} mock_create_snap.return_value = driver_update model_update, snapshot_model_updates = ( self.volume._create_group_snapshot_generic( self.context, grp_snp, snapshots)) for update in snapshot_model_updates: self.assertEqual(driver_update['test_snap_key'], update['test_snap_key']) @mock.patch( 'cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'create_volume_from_snapshot') @mock.patch( 'cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'create_cloned_volume') def test_create_group_from_src_generic(self, mock_create_clone, mock_create_vol_from_snap): grp = {'id': fake.GROUP_ID, 'name': 'group 1'} grp_snp = {'id': fake.GROUP_SNAPSHOT_ID, 'group_id': fake.GROUP_ID, 'name': 'group snap 1'} grp2 = {'id': fake.GROUP2_ID, 'name': 'group 2', 'group_snapshot_id': fake.GROUP_SNAPSHOT_ID} vol1 = {'id': fake.VOLUME_ID, 'name': 'volume 1', 'group_id': fake.GROUP_ID} vol2 = {'id': fake.VOLUME2_ID, 'name': 'volume 2', 'group_id': fake.GROUP_ID} snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1', 'group_snapshot_id': fake.GROUP_SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID} snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2', 'group_snapshot_id': fake.GROUP_SNAPSHOT_ID, 'volume_id': fake.VOLUME2_ID} snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1) snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2) snapshots = [] snapshots.append(snp1_obj) snapshots.append(snp2_obj) vol3 = {'id': fake.VOLUME3_ID, 'name': 'volume 3', 'snapshot_id': fake.SNAPSHOT_ID, 'group_id': fake.GROUP2_ID} vol4 = {'id': fake.VOLUME4_ID, 'name': 'volume 4', 'snapshot_id': fake.SNAPSHOT2_ID, 'group_id': fake.GROUP2_ID} vol3_obj = fake_volume.fake_volume_obj(self.context, **vol3) vol4_obj = fake_volume.fake_volume_obj(self.context, **vol4) vols2 = [] vols2.append(vol3_obj) vols2.append(vol4_obj) grp2_obj = fake_group.fake_group_obj(self.context, **grp2) grp_snp_obj = fake_group_snapshot.fake_group_snapshot_obj( self.context, **grp_snp) driver_update = {'test_key': 'test_val'} mock_create_vol_from_snap.return_value = driver_update model_update, vol_model_updates = ( self.volume._create_group_from_src_generic( self.context, grp2_obj, vols2, grp_snp_obj, snapshots)) for update in vol_model_updates: self.assertEqual(driver_update['test_key'], update['test_key']) vol1_obj = fake_volume.fake_volume_obj(self.context, **vol1) vol2_obj = fake_volume.fake_volume_obj(self.context, **vol2) vols = [] vols.append(vol1_obj) vols.append(vol2_obj) grp_obj = fake_group.fake_group_obj(self.context, **grp) grp3 = {'id': fake.GROUP3_ID, 'name': 'group 3', 'source_group_id': fake.GROUP_ID} grp3_obj = fake_group.fake_group_obj(self.context, **grp3) vol5 = {'id': fake.VOLUME5_ID, 'name': 'volume 5', 'source_volid': fake.VOLUME_ID, 'group_id': fake.GROUP3_ID} vol6 = {'id': fake.VOLUME6_ID, 'name': 'volume 6', 'source_volid': fake.VOLUME2_ID, 'group_id': fake.GROUP3_ID} vol5_obj = fake_volume.fake_volume_obj(self.context, **vol5) vol6_obj = fake_volume.fake_volume_obj(self.context, **vol6) vols3 = [] vols3.append(vol5_obj) vols3.append(vol6_obj) driver_update = {'test_key2': 'test_val2'} mock_create_clone.return_value = driver_update model_update, vol_model_updates = ( self.volume._create_group_from_src_generic( self.context, grp3_obj, vols3, None, None, grp_obj, vols)) for update in vol_model_updates: self.assertEqual(driver_update['test_key2'], update['test_key2']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/group/test_groups_manager_replication.py0000664000175000017500000001272000000000000026625 0ustar00zuulzuul00000000000000# Copyright (C) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from oslo_utils import importutils from cinder import context from cinder import exception from cinder import objects from cinder.objects import fields from cinder import quota from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as tests_utils from cinder.volume import api as volume_api from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume import volume_utils GROUP_QUOTAS = quota.GROUP_QUOTAS CONF = cfg.CONF @ddt.ddt class GroupManagerTestCase(test.TestCase): def setUp(self): super(GroupManagerTestCase, self).setUp() self.volume = importutils.import_object(CONF.volume_manager) self.configuration = mock.Mock(conf.Configuration) self.context = context.get_admin_context() self.context.user_id = fake.USER_ID self.project_id = fake.PROJECT3_ID self.context.project_id = self.project_id self.volume.driver.set_initialized() self.volume.stats = {'allocated_capacity_gb': 0, 'pools': {}} self.volume_api = volume_api.API() @mock.patch.object(GROUP_QUOTAS, "reserve", return_value=["RESERVATION"]) @mock.patch.object(GROUP_QUOTAS, "commit") @mock.patch.object(GROUP_QUOTAS, "rollback") @mock.patch.object(driver.VolumeDriver, "delete_group", return_value=({'status': ( fields.GroupStatus.DELETED)}, [])) @mock.patch.object(driver.VolumeDriver, "enable_replication", return_value=(None, [])) @mock.patch.object(driver.VolumeDriver, "disable_replication", return_value=(None, [])) @mock.patch.object(driver.VolumeDriver, "failover_replication", return_value=(None, [])) def test_replication_group(self, fake_failover_rep, fake_disable_rep, fake_enable_rep, fake_delete_grp, fake_rollback, fake_commit, fake_reserve): """Test enable, disable, and failover replication for group.""" def fake_driver_create_grp(context, group): """Make sure that the pool is part of the host.""" self.assertIn('host', group) host = group.host pool = volume_utils.extract_host(host, level='pool') self.assertEqual('fakepool', pool) return {'status': fields.GroupStatus.AVAILABLE, 'replication_status': fields.ReplicationStatus.DISABLING} self.mock_object(self.volume.driver, 'create_group', fake_driver_create_grp) group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, volume_type_ids=[fake.VOLUME_TYPE_ID], host='fakehost@fakedrv#fakepool', group_type_id=fake.GROUP_TYPE_ID) group = objects.Group.get_by_id(self.context, group.id) self.volume.create_group(self.context, group) self.assertEqual( group.id, objects.Group.get_by_id(context.get_admin_context(), group.id).id) self.volume.disable_replication(self.context, group) group = objects.Group.get_by_id( context.get_admin_context(), group.id) self.assertEqual(fields.ReplicationStatus.DISABLED, group.replication_status) group.replication_status = fields.ReplicationStatus.ENABLING group.save() self.volume.enable_replication(self.context, group) group = objects.Group.get_by_id( context.get_admin_context(), group.id) self.assertEqual(fields.ReplicationStatus.ENABLED, group.replication_status) group.replication_status = fields.ReplicationStatus.FAILING_OVER group.save() self.volume.failover_replication(self.context, group) group = objects.Group.get_by_id( context.get_admin_context(), group.id) self.assertEqual(fields.ReplicationStatus.FAILED_OVER, group.replication_status) targets = self.volume.list_replication_targets(self.context, group) self.assertIn('replication_targets', targets) self.volume.delete_group(self.context, group) grp = objects.Group.get_by_id( context.get_admin_context(read_deleted='yes'), group.id) self.assertEqual(fields.GroupStatus.DELETED, grp.status) self.assertRaises(exception.NotFound, objects.Group.get_by_id, self.context, group.id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2031193 cinder-27.0.0/cinder/tests/unit/image/0000775000175000017500000000000000000000000017536 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/image/__init__.py0000664000175000017500000000000000000000000021635 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2031193 cinder-27.0.0/cinder/tests/unit/image/accelerators/0000775000175000017500000000000000000000000022205 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/image/accelerators/__init__.py0000664000175000017500000000000000000000000024304 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/image/accelerators/test_qat_gzip.py0000664000175000017500000002065000000000000025437 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.image import accelerator from cinder.tests.unit import test class TestAccelerators(test.TestCase): @mock.patch('cinder.utils.execute') @mock.patch('cinder.image.accelerators.qat.AccelQAT.is_accel_exist', return_value = True) @mock.patch('cinder.image.accelerators.gzip.AccelGZIP.is_accel_exist', return_value = True) # Compress test, QAT and GZIP available def test_compress_img_prefer_qat_when_available(self, mock_gzip_exist, mock_qat_exist, mock_exec): source = 'fake_path' dest = 'fake_path' accel = accelerator.ImageAccel(source, dest) accel.compress_img(run_as_root=True) expected = [ mock.call('qzip', '-k', dest, '-o', dest, run_as_root=True), mock.call('mv', dest + '.gz', dest, run_as_root=True) ] mock_exec.assert_has_calls(expected) @mock.patch('cinder.utils.execute') @mock.patch('cinder.image.accelerators.qat.AccelQAT.is_accel_exist', return_value = False) @mock.patch('cinder.image.accelerators.gzip.AccelGZIP.is_accel_exist', return_value = True) # Compress test, QAT not available but GZIP available def test_compress_img_qat_accel_not_exist_gzip_exist(self, mock_gzip_exist, mock_qat_exist, mock_exec): source = 'fake_path' dest = 'fake_path' accel = accelerator.ImageAccel(source, dest) accel.compress_img(run_as_root=True) not_called = mock.call('qzip', '-k', dest, '-o', dest, run_as_root=True) self.assertNotIn(not_called, mock_exec.call_args_list) expected = [ mock.call('gzip', '-k', dest, run_as_root=True), mock.call('mv', dest + '.gz', dest, run_as_root=True) ] mock_exec.assert_has_calls(expected) @mock.patch('cinder.utils.execute') @mock.patch('cinder.image.accelerators.qat.AccelQAT.is_accel_exist', return_value = True) @mock.patch('cinder.image.accelerators.gzip.AccelGZIP.is_accel_exist', return_value = False) # Compress test, QAT available but GZIP not available def test_compress_img_prefer_qat_without_gzip(self, mock_gzip_exist, mock_qat_exist, mock_exec): source = 'fake_path' dest = 'fake_path' accel = accelerator.ImageAccel(source, dest) accel.compress_img(run_as_root=True) expected = [ mock.call('qzip', '-k', dest, '-o', dest, run_as_root=True), mock.call('mv', dest + '.gz', dest, run_as_root=True) ] mock_exec.assert_has_calls(expected) @mock.patch('cinder.utils.execute') @mock.patch('cinder.image.accelerators.qat.AccelQAT.is_accel_exist', return_value = False) @mock.patch('cinder.image.accelerators.gzip.AccelGZIP.is_accel_exist', return_value = False) # Compress test, no accelerator available def test_compress_img_no_accel_exist(self, mock_gzip_exist, mock_qat_exist, mock_exec): source = 'fake_path' dest = 'fake_path' self.assertRaises(exception.CinderException, accelerator.ImageAccel, source, dest) @mock.patch('cinder.utils.execute') @mock.patch('cinder.image.accelerators.qat.AccelQAT.is_accel_exist', return_value = True) @mock.patch('cinder.image.accelerators.gzip.AccelGZIP.is_accel_exist', return_value = True) # Decompress test, QAT and GZIP available def test_decompress_img_prefer_qat_when_available(self, mock_gzip_exist, mock_qat_exist, mock_exec): source = 'fake_path' dest = 'fake_path' accel = accelerator.ImageAccel(source, dest) accel.decompress_img(run_as_root=True) expected = [ mock.call('mv', source, source + '.gz', run_as_root=True), mock.call('qzip', '-d', source + '.gz', run_as_root=True) ] mock_exec.assert_has_calls(expected) @mock.patch('cinder.utils.execute') @mock.patch('cinder.image.accelerators.qat.AccelQAT.is_accel_exist', return_value = False) @mock.patch('cinder.image.accelerators.gzip.AccelGZIP.is_accel_exist', return_value = True) # Decompress test, QAT not available but GZIP available def test_decompress_img_qat_accel_not_exist_gzip_exist(self, mock_gzip_exist, mock_qat_exist, mock_exec): source = 'fake_path' dest = 'fake_path' accel = accelerator.ImageAccel(source, dest) accel.decompress_img(run_as_root=True) not_called = mock.call('qzip', '-d', source + '.gz', run_as_root=True) self.assertNotIn(not_called, mock_exec.call_args_list) expected = [ mock.call('mv', source, source + '.gz', run_as_root=True), mock.call('gzip', '-d', source + '.gz', run_as_root=True) ] mock_exec.assert_has_calls(expected) @mock.patch('cinder.utils.execute') @mock.patch('cinder.image.accelerators.qat.AccelQAT.is_accel_exist', return_value = True) @mock.patch('cinder.image.accelerators.gzip.AccelGZIP.is_accel_exist', return_value = False) # Decompress test, QAT available but GZIP not available def test_decompress_img_prefer_qat_without_gzip(self, mock_gzip_exist, mock_qat_exist, mock_exec): source = 'fake_path' dest = 'fake_path' accel = accelerator.ImageAccel(source, dest) accel.decompress_img(run_as_root=True) expected = [ mock.call('mv', source, source + '.gz', run_as_root=True), mock.call('qzip', '-d', source + '.gz', run_as_root=True) ] mock_exec.assert_has_calls(expected) @mock.patch('cinder.utils.execute') @mock.patch('cinder.image.accelerators.qat.AccelQAT.is_accel_exist', return_value = False) @mock.patch('cinder.image.accelerators.gzip.AccelGZIP.is_accel_exist', return_value = False) # Decompress test, no accelerator available def test_decompress_img_no_accel_exist(self, mock_gzip_exist, mock_qat_exist, mock_exec): source = 'fake_path' dest = 'fake_path' self.assertRaises(exception.CinderException, accelerator.ImageAccel, source, dest) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/image/fake.py0000664000175000017500000002370100000000000021021 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake image service.""" import copy import datetime from unittest import mock import uuid from cinder import exception import cinder.image.glance from cinder.tests.unit import fake_constants class _FakeImageService(object): """Mock (fake) image service for unit testing.""" def __init__(self): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3) image1 = {'id': fake_constants.IMAGE_ID, 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'visibility': 'private', 'protected': False, 'container_format': 'raw', 'disk_format': 'raw', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': 'x86_64'}, 'size': 12345678} image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'visibility': 'public', 'protected': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel'}, 'size': 1} image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'visibility': 'public', 'protected': True, 'container_format': None, 'disk_format': None, 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel'}, 'size': 1000000000000} image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'visibility': 'public', 'protected': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel'}, 'size': 20000000} image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'visibility': 'public', 'protected': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': { 'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'ramdisk_id': None}, 'size': 50000} image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379', 'name': 'fakeimage6', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'visibility': 'public', 'protected': False, 'container_format': 'ova', 'disk_format': 'vhd', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': 'x86_64', 'auto_disk_config': 'False'}, 'size': 7777777} image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b', 'name': 'fakeimage7', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'visibility': 'public', 'protected': False, 'container_format': 'ova', 'disk_format': 'vhd', 'properties': {'kernel_id': 'nokernel', 'ramdisk_id': 'nokernel', 'architecture': 'x86_64', 'auto_disk_config': 'True'}, 'size': 1234000000} image8 = {'id': 'b0a599e0-41d7-3582-b260-769f443c862a', 'name': 'fakeimage8', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'bare', 'disk_format': 'raw', 'properties': {'block_device_mapping': [ {'boot_index': 0, 'source_type': 'snapshot', 'snapshot_id': fake_constants.SNAPSHOT_ID}], 'ramdisk_id': 'nokernel', 'architecture': 'x86_64', 'auto_disk_config': 'True'}} self.create(None, image1) self.create(None, image2) self.create(None, image3) self.create(None, image4) self.create(None, image5) self.create(None, image6) self.create(None, image7) self.create(None, image8) self._imagedata = {} self.temp_images = mock.MagicMock() super(_FakeImageService, self).__init__() # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir def detail(self, context, **kwargs): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) def download(self, context, image_id, data): self.show(context, image_id) data.write(self._imagedata.get(image_id, '')) def show(self, context, image_id): """Get data about specified image. Returns a dict containing image data for the given opaque image id. """ image = self.images.get(str(image_id)) if image: return copy.deepcopy(image) raise exception.ImageNotFound(image_id=image_id) def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises Duplicate: if the image already exist. """ image_id = str(metadata.get('id', uuid.uuid4())) metadata['id'] = image_id if image_id in self.images: raise exception.Duplicate() self.images[image_id] = copy.deepcopy(metadata) if data: self._imagedata[image_id] = data.read() return self.images[image_id] def update(self, context, image_id, metadata, data=None, purge_props=False, store_id=None, base_image_ref=None): """Replace the contents of the given image with the new data. :raises ImageNotFound: if the image does not exist. """ if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) if purge_props: self.images[image_id] = copy.deepcopy(metadata) else: image = self.images[image_id] try: image['properties'].update(metadata.pop('properties')) except Exception: pass image.update(metadata) return self.images[image_id] def delete(self, context, image_id): """Delete the given image. :raises ImageNotFound: if the image does not exist. """ removed = self.images.pop(image_id, None) if not removed: raise exception.ImageNotFound(image_id=image_id) def get_location(self, context, image_id): if image_id in self.images: return 'fake_location' return None def add_location(self, context, image_id, url, metadata): self.update(context, image_id, {'locations': [{'url': url, 'metadata': metadata}]}) return True _fakeImageService = _FakeImageService() def FakeImageService(): return _fakeImageService def FakeImageService_reset(): global _fakeImageService _fakeImageService = _FakeImageService() def mock_image_service(testcase): testcase.mock_object(cinder.image.glance, 'get_remote_image_service', lambda x, y: (FakeImageService(), y)) testcase.mock_object(cinder.image.glance, 'get_default_image_service', mock.Mock(side_effect=FakeImageService)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/image/glance_stubs.py0000664000175000017500000001212400000000000022561 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glanceclient.exc NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', 'container_format', 'checksum', 'id', 'name', 'created_at', 'updated_at', 'deleted', 'status', 'min_disk', 'min_ram', 'visibility', 'protected'] class StubGlanceClient(object): def __init__(self, images=None): self._images = [] _images = images or [] map(lambda image: self.create(**image), _images) # NOTE(bcwaldon): HACK to get client.images.* to work self.images = lambda: None for fn in ('list', 'get', 'data', 'create', 'update', 'upload', 'delete', 'get_image_locations'): setattr(self.images, fn, getattr(self, fn)) self.schemas = lambda: None setattr(self.schemas, 'get', getattr(self, 'schemas_get')) # TODO(bcwaldon): implement filters def list(self, filters=None, marker=None, limit=30): if marker is None: index = 0 else: for index, image in enumerate(self._images): if image.id == str(marker): index += 1 break else: raise glanceclient.exc.BadRequest('Marker not found') return self._images[index:index + limit] def get(self, image_id): for image in self._images: if image.id == str(image_id): return image raise glanceclient.exc.NotFound(image_id) def get_image_locations(self, image_id): direct_url = None locations = [] for image in self._images: if image.id == str(image_id): if getattr(image, 'direct_url', None): direct_url = image.direct_url if getattr(image, 'locations', []): locations = image.locations if direct_url and direct_url not in locations: locations.append({'url': direct_url}) return locations raise glanceclient.exc.NotFound(image_id) def data(self, image_id): image = self.get(image_id) if getattr(image, 'size', 0): return ['*' * image.size] else: return [] def create(self, **metadata): metadata['created_at'] = NOW_GLANCE_FORMAT metadata['updated_at'] = NOW_GLANCE_FORMAT self._images.append(FakeImage(metadata)) try: image_id = str(metadata['id']) except KeyError: # auto-generate an id if one wasn't provided image_id = str(len(self._images)) self._images[-1].id = image_id return self._images[-1] def update(self, image_id, **metadata): for i, image in enumerate(self._images): if image.id == str(image_id): for k, v in metadata.items(): if k == 'data': setattr(self._images[i], 'size', len(v)) else: setattr(self._images[i], k, v) return self._images[i] raise glanceclient.exc.NotFound(image_id) def delete(self, image_id): for i, image in enumerate(self._images): if image.id == image_id: del self._images[i] return raise glanceclient.exc.NotFound(image_id) def upload(self, image_id, data): for i, image in enumerate(self._images): if image.id == image_id: setattr(self._images[i], 'size', len(data)) return raise glanceclient.exc.NotFound(image_id) def schemas_get(self, schema_name): if schema_name != 'image': raise glanceclient.exc.NotFound() return FakeSchema() class FakeImage(object): def __init__(self, metadata): raw = dict.fromkeys(IMAGE_ATTRIBUTES) raw.update(metadata) self.__dict__['raw'] = raw def __getattr__(self, key): try: return self.__dict__['raw'][key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): try: self.__dict__['raw'][key] = value except KeyError: raise AttributeError(key) def keys(self): return self.__dict__['raw'].keys() class FakeSchema(object): def is_base_property(self, key): if key in IMAGE_ATTRIBUTES: return True else: return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/image/test_accelerator.py0000664000175000017500000000713400000000000023440 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.image import accelerator from cinder.tests.unit import test class fakeEngine(object): def __init__(self): pass def compress_img(self, src, dest, run_as_root): pass def decompress_img(self, src, dest, run_as_root): pass class TestAccelerator(test.TestCase): @mock.patch('cinder.image.accelerator.ImageAccel._get_engine') @mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready', return_value = True) def test_compress_img_engine_ready(self, mock_accel_engine_ready, mock_get_engine): source = mock.sentinel.source dest = mock.sentinel.dest run_as_root = mock.sentinel.run_as_root mock_engine = mock.Mock(spec=fakeEngine) mock_get_engine.return_value = mock_engine accel = accelerator.ImageAccel(source, dest) accel.compress_img(run_as_root=run_as_root) mock_engine.compress_img.assert_called() @mock.patch('cinder.image.accelerator.ImageAccel._get_engine') @mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready', return_value = False) def test_compress_img_engine_not_ready(self, mock_accel_engine_ready, mock_get_engine): source = mock.sentinel.source dest = mock.sentinel.dest run_as_root = mock.sentinel.run_as_root mock_engine = mock.Mock(spec=fakeEngine) mock_get_engine.return_value = mock_engine accel = accelerator.ImageAccel(source, dest) accel.compress_img(run_as_root=run_as_root) mock_engine.compress_img.assert_not_called() @mock.patch('cinder.image.accelerator.ImageAccel._get_engine') @mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready', return_value = True) def test_decompress_img_engine_ready(self, mock_accel_engine_ready, mock_get_engine): source = mock.sentinel.source dest = mock.sentinel.dest run_as_root = mock.sentinel.run_as_root mock_engine = mock.Mock(spec=fakeEngine) mock_get_engine.return_value = mock_engine accel = accelerator.ImageAccel(source, dest) accel.decompress_img(run_as_root=run_as_root) mock_engine.decompress_img.assert_called() @mock.patch('cinder.image.accelerator.ImageAccel._get_engine') @mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready', return_value = False) def test_decompress_img_engine_not_ready(self, mock_accel_engine_ready, mock_get_engine): source = mock.sentinel.source dest = mock.sentinel.dest run_as_root = mock.sentinel.run_as_root mock_engine = mock.Mock(spec=fakeEngine) mock_get_engine.return_value = mock_engine accel = accelerator.ImageAccel(source, dest) accel.decompress_img(run_as_root=run_as_root) mock_engine.decompress_img.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/image/test_cache.py0000664000175000017500000003044000000000000022213 0ustar00zuulzuul00000000000000# Copyright (C) 2015 Pure Storage, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import timedelta from unittest import mock import ddt from oslo_utils import timeutils from cinder import context as ctxt from cinder.db.sqlalchemy import models from cinder.image import cache as image_cache from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test @ddt.ddt class ImageVolumeCacheTestCase(test.TestCase): def setUp(self): super(ImageVolumeCacheTestCase, self).setUp() self.mock_db = mock.Mock() self.mock_volume_api = mock.Mock() self.context = ctxt.get_admin_context() self.volume = models.Volume() vol_params = {'id': fake.VOLUME_ID, 'host': 'foo@bar#whatever', 'cluster_name': 'cluster', 'size': 0} self.volume.update(vol_params) self.volume_ovo = objects.Volume(self.context, **vol_params) def _build_cache(self, max_gb=0, max_count=0, clone_across_pools=False): cache = image_cache.ImageVolumeCache(self.mock_db, self.mock_volume_api, max_gb, max_count, clone_across_pools) cache.notifier = self.notifier return cache def _build_entry(self, size=10): entry = { 'id': 1, 'host': 'test@foo#bar', 'cluster_name': 'cluster@foo#bar', 'image_id': 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2', 'image_updated_at': timeutils.utcnow(with_timezone=True), 'volume_id': '70a599e0-31e7-49b7-b260-868f441e862b', 'size': size, 'last_used': timeutils.utcnow(with_timezone=True) } return entry def test_get_by_image_volume(self): cache = self._build_cache() ret = {'id': 1} volume_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.mock_db.image_volume_cache_get_by_volume_id.return_value = ret entry = cache.get_by_image_volume(self.context, volume_id) self.assertEqual(ret, entry) self.mock_db.image_volume_cache_get_by_volume_id.return_value = None entry = cache.get_by_image_volume(self.context, volume_id) self.assertIsNone(entry) def test_evict(self): cache = self._build_cache() entry = self._build_entry() cache.evict(self.context, entry) self.mock_db.image_volume_cache_delete.assert_called_once_with( self.context, entry['volume_id'] ) msg = self.notifier.notifications[0] self.assertEqual('image_volume_cache.evict', msg['event_type']) self.assertEqual('INFO', msg['priority']) self.assertEqual(entry['host'], msg['payload']['host']) self.assertEqual(entry['image_id'], msg['payload']['image_id']) self.assertEqual(1, len(self.notifier.notifications)) @ddt.data((True, True), (True, False), (False, True), (False, False)) @ddt.unpack def test_get_entry(self, clustered, clone_across_pools): cache = self._build_cache(clone_across_pools=clone_across_pools) entry = self._build_entry() image_meta = { 'is_public': True, 'owner': '70a599e0-31e7-49b7-b260-868f441e862b', 'properties': { 'virtual_size': '1.7' }, 'updated_at': entry['image_updated_at'] } (self.mock_db. image_volume_cache_get_and_update_last_used.return_value) = entry if not clustered: self.volume_ovo.cluster_name = None expect = {} if clone_across_pools else {'host': self.volume.host} else: expect = {'cluster_name': self.volume.cluster_name} found_entry = cache.get_entry(self.context, self.volume_ovo, entry['image_id'], image_meta) self.assertDictEqual(entry, found_entry) (self.mock_db. image_volume_cache_get_and_update_last_used.assert_called_once_with)( self.context, entry['image_id'], **expect ) msg = self.notifier.notifications[0] self.assertEqual('image_volume_cache.hit', msg['event_type']) self.assertEqual('INFO', msg['priority']) self.assertEqual(entry['host'], msg['payload']['host']) self.assertEqual(entry['image_id'], msg['payload']['image_id']) self.assertEqual(1, len(self.notifier.notifications)) def test_get_entry_not_exists(self): cache = self._build_cache() image_meta = { 'is_public': True, 'owner': '70a599e0-31e7-49b7-b260-868f441e862b', 'properties': { 'virtual_size': '1.7' }, 'updated_at': timeutils.utcnow(with_timezone=True) } image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2' (self.mock_db. image_volume_cache_get_and_update_last_used.return_value) = None found_entry = cache.get_entry(self.context, self.volume_ovo, image_id, image_meta) self.assertIsNone(found_entry) msg = self.notifier.notifications[0] self.assertEqual('image_volume_cache.miss', msg['event_type']) self.assertEqual('INFO', msg['priority']) self.assertEqual(self.volume.host, msg['payload']['host']) self.assertEqual(image_id, msg['payload']['image_id']) self.assertEqual(1, len(self.notifier.notifications)) @mock.patch('cinder.objects.Volume.get_by_id') def test_get_entry_needs_update(self, mock_volume_by_id): cache = self._build_cache() entry = self._build_entry() image_meta = { 'is_public': True, 'owner': '70a599e0-31e7-49b7-b260-868f441e862b', 'properties': { 'virtual_size': '1.7' }, 'updated_at': entry['image_updated_at'] + timedelta(hours=2) } (self.mock_db. image_volume_cache_get_and_update_last_used.return_value) = entry mock_volume = mock.MagicMock() mock_volume_by_id.return_value = mock_volume found_entry = cache.get_entry(self.context, self.volume_ovo, entry['image_id'], image_meta) # Expect that the cache entry is not returned and the image-volume # for it is deleted. self.assertIsNone(found_entry) self.mock_volume_api.delete.assert_called_with(self.context, mock_volume) msg = self.notifier.notifications[0] self.assertEqual('image_volume_cache.miss', msg['event_type']) self.assertEqual('INFO', msg['priority']) self.assertEqual(self.volume.host, msg['payload']['host']) self.assertEqual(entry['image_id'], msg['payload']['image_id']) self.assertEqual(1, len(self.notifier.notifications)) def test_create_cache_entry(self): cache = self._build_cache() entry = self._build_entry() image_meta = { 'updated_at': entry['image_updated_at'] } self.mock_db.image_volume_cache_create.return_value = entry created_entry = cache.create_cache_entry(self.context, self.volume_ovo, entry['image_id'], image_meta) self.assertEqual(entry, created_entry) self.mock_db.image_volume_cache_create.assert_called_once_with( self.context, self.volume_ovo.host, self.volume_ovo.cluster_name, entry['image_id'], entry['image_updated_at'].replace(tzinfo=None), self.volume_ovo.id, self.volume_ovo.size ) def test_ensure_space_unlimited(self): cache = self._build_cache(max_gb=0, max_count=0) has_space = cache.ensure_space(self.context, self.volume) self.assertTrue(has_space) self.volume.size = 500 has_space = cache.ensure_space(self.context, self.volume) self.assertTrue(has_space) def test_ensure_space_no_entries(self): cache = self._build_cache(max_gb=100, max_count=10) self.mock_db.image_volume_cache_get_all.return_value = [] self.volume_ovo.size = 5 has_space = cache.ensure_space(self.context, self.volume_ovo) self.assertTrue(has_space) self.volume_ovo.size = 101 has_space = cache.ensure_space(self.context, self.volume_ovo) self.assertFalse(has_space) def test_ensure_space_need_gb(self): cache = self._build_cache(max_gb=30, max_count=0) mock_delete = mock.patch.object(cache, 'delete_cached_volume').start() entries = [] entry1 = self._build_entry(size=12) entries.append(entry1) entry2 = self._build_entry(size=5) entries.append(entry2) entry3 = self._build_entry(size=10) entries.append(entry3) self.mock_db.image_volume_cache_get_all.return_value = entries self.volume_ovo.size = 15 has_space = cache.ensure_space(self.context, self.volume_ovo) self.assertTrue(has_space) self.assertEqual(2, mock_delete.call_count) mock_delete.assert_any_call(self.context, entry2, mock.ANY) mock_delete.assert_any_call(self.context, entry3, mock.ANY) self.mock_db.image_volume_cache_get_all.assert_called_with( self.context, cluster_name=self.volume_ovo.cluster_name) def test_ensure_space_need_count(self): cache = self._build_cache(max_gb=0, max_count=2) mock_delete = mock.patch.object(cache, 'delete_cached_volume').start() entries = [] entry1 = self._build_entry(size=10) entries.append(entry1) entry2 = self._build_entry(size=5) entries.append(entry2) self.mock_db.image_volume_cache_get_all.return_value = entries self.volume_ovo.size = 12 has_space = cache.ensure_space(self.context, self.volume_ovo) self.assertTrue(has_space) self.assertEqual(1, mock_delete.call_count) mock_delete.assert_any_call(self.context, entry2, mock.ANY) def test_ensure_space_need_gb_and_count(self): cache = self._build_cache(max_gb=30, max_count=3) mock_delete = mock.patch.object(cache, 'delete_cached_volume').start() entries = [] entry1 = self._build_entry(size=10) entries.append(entry1) entry2 = self._build_entry(size=5) entries.append(entry2) entry3 = self._build_entry(size=12) entries.append(entry3) self.mock_db.image_volume_cache_get_all.return_value = entries self.volume_ovo.size = 16 has_space = cache.ensure_space(self.context, self.volume_ovo) self.assertTrue(has_space) self.assertEqual(2, mock_delete.call_count) mock_delete.assert_any_call(self.context, entry2, mock.ANY) mock_delete.assert_any_call(self.context, entry3, mock.ANY) def test_ensure_space_cant_free_enough_gb(self): cache = self._build_cache(max_gb=30, max_count=10) mock_delete = mock.patch.object(cache, 'delete_cached_volume').start() entries = list(self._build_entry(size=25)) self.mock_db.image_volume_cache_get_all.return_value = entries self.volume_ovo.size = 50 has_space = cache.ensure_space(self.context, self.volume_ovo) self.assertFalse(has_space) mock_delete.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/image/test_format_inspector.py0000664000175000017500000004701200000000000024531 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import os import re import struct import subprocess import tempfile from unittest import mock from oslo_utils import units from cinder.image import format_inspector from cinder.tests.unit import test def get_size_from_qemu_img(filename): output = subprocess.check_output('qemu-img info "%s"' % filename, shell=True) for line in output.split(b'\n'): m = re.search(b'^virtual size: .* .([0-9]+) bytes', line.strip()) if m: return int(m.group(1)) raise Exception('Could not find virtual size with qemu-img') class TestFormatInspectors(test.TestCase): def setUp(self): super(TestFormatInspectors, self).setUp() self._created_files = [] def tearDown(self): super(TestFormatInspectors, self).tearDown() for fn in self._created_files: try: os.remove(fn) except Exception: pass def _create_img(self, fmt, size, subformat=None, options=None, backing_file=None): if fmt == 'vhd': # QEMU calls the vhd format vpc fmt = 'vpc' if options is None: options = {} opt = '' prefix = 'glance-unittest-formatinspector-' if subformat: options['subformat'] = subformat prefix += subformat + '-' if options: opt += '-o ' + ','.join('%s=%s' % (k, v) for k, v in options.items()) if backing_file is not None: opt += ' -b %s -F raw' % backing_file fn = tempfile.mktemp(prefix=prefix, suffix='.%s' % fmt) self._created_files.append(fn) subprocess.check_output( 'qemu-img create -f %s %s %s %i' % (fmt, opt, fn, size), shell=True) return fn def _create_allocated_vmdk(self, size_mb, subformat=None): # We need a "big" VMDK file to exercise some parts of the code of the # format_inspector. A way to create one is to first create an empty # file, and then to convert it with the -S 0 option. if subformat is None: # Matches qemu-img default, see `qemu-img convert -O vmdk -o help` subformat = 'monolithicSparse' prefix = 'glance-unittest-formatinspector-%s-' % subformat fn = tempfile.mktemp(prefix=prefix, suffix='.vmdk') self._created_files.append(fn) raw = tempfile.mktemp(prefix=prefix, suffix='.raw') self._created_files.append(raw) # Create a file with pseudo-random data, otherwise it will get # compressed in the streamOptimized format subprocess.check_output( 'dd if=/dev/urandom of=%s bs=1M count=%i' % (raw, size_mb), shell=True) # Convert it to VMDK subprocess.check_output( 'qemu-img convert -f raw -O vmdk -o subformat=%s -S 0 %s %s' % ( subformat, raw, fn), shell=True) return fn def _test_format_at_block_size(self, format_name, img, block_size): fmt = format_inspector.get_inspector(format_name)() self.assertIsNotNone(fmt, 'Did not get format inspector for %s' % ( format_name)) wrapper = format_inspector.InfoWrapper(open(img, 'rb'), fmt) while True: chunk = wrapper.read(block_size) if not chunk: break wrapper.close() return fmt def _test_format_at_image_size(self, format_name, image_size, subformat=None): img = self._create_img(format_name, image_size, subformat=subformat) # Some formats have internal alignment restrictions making this not # always exactly like image_size, so get the real value for comparison virtual_size = get_size_from_qemu_img(img) # Read the format in various sizes, some of which will read whole # sections in a single read, others will be completely unaligned, etc. for block_size in (64 * units.Ki, 512, 17, 1 * units.Mi): fmt = self._test_format_at_block_size(format_name, img, block_size) self.assertTrue(fmt.format_match, 'Failed to match %s at size %i block %i' % ( format_name, image_size, block_size)) self.assertEqual(virtual_size, fmt.virtual_size, ('Failed to calculate size for %s at size %i ' 'block %i') % (format_name, image_size, block_size)) memory = sum(fmt.context_info.values()) self.assertLess(memory, 512 * units.Ki, 'Format used more than 512KiB of memory: %s' % ( fmt.context_info)) def _test_format(self, format_name, subformat=None): # Try a few different image sizes, including some odd and very small # sizes for image_size in (512, 513, 2057, 7): self._test_format_at_image_size(format_name, image_size * units.Mi, subformat=subformat) def test_qcow2(self): self._test_format('qcow2') def test_vhd(self): self._test_format('vhd') def test_vhdx(self): self._test_format('vhdx') def test_vmdk(self): self._test_format('vmdk') def test_vmdk_stream_optimized(self): self._test_format('vmdk', 'streamOptimized') def test_from_file_reads_minimum(self): img = self._create_img('qcow2', 10 * units.Mi) file_size = os.stat(img).st_size fmt = format_inspector.QcowInspector.from_file(img) # We know everything we need from the first 512 bytes of a QCOW image, # so make sure that we did not read the whole thing when we inspect # a local file. self.assertLess(fmt.actual_size, file_size) def test_qed_always_unsafe(self): img = self._create_img('qed', 10 * units.Mi) fmt = format_inspector.get_inspector('qed').from_file(img) self.assertTrue(fmt.format_match) self.assertFalse(fmt.safety_check()) def _test_vmdk_bad_descriptor_offset(self, subformat=None): format_name = 'vmdk' image_size = 10 * units.Mi descriptorOffsetAddr = 0x1c BAD_ADDRESS = 0x400 img = self._create_img(format_name, image_size, subformat=subformat) # Corrupt the header fd = open(img, 'r+b') fd.seek(descriptorOffsetAddr) fd.write(struct.pack(' ImageService - This is needed so we can support multiple ImageServices (Glance, Local, etc) 2. ImageService -> API - This is needed so we can support multiple APIs (OpenStack, EC2) """ NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22" NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000" class tzinfo(datetime.tzinfo): @staticmethod def utcoffset(*args, **kwargs): return datetime.timedelta() NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo()) def setUp(self): super(TestGlanceImageService, self).setUp() client = glance_stubs.StubGlanceClient() service_catalog = [{u'type': u'image', u'name': u'glance', u'endpoints': [{ u'publicURL': u'http://example.com:9292'}]}] self.service = self._create_image_service(client) self.context = context.RequestContext('fake', 'fake', auth_token=True) self.context.service_catalog = service_catalog self.mock_object(glance.time, 'sleep', return_value=None) def _create_image_service(self, client): def _fake_create_glance_client( context, netloc, use_ssl, privileged_user=False): return client self.mock_object(glance, '_create_glance_client', _fake_create_glance_client) client_wrapper = glance.GlanceClientWrapper('fake', 'fake_host', 9292) return glance.GlanceImageService(client=client_wrapper) @staticmethod def _make_fixture(**kwargs): fixture = {'name': None, 'properties': {}, 'status': None, 'visibility': None, 'protected': None} fixture.update(kwargs) return fixture @staticmethod def _make_image_member_fixtures(**kwargs): fixtures = [] fixture = {'status': None, 'image_id': None, 'member_id': None, 'created_at': '2018-03-14T21:48:13Z', 'updated_at': '2018-03-14T21:50:51Z', 'schema': '/v2/schemas/member'} fixture.update(kwargs) fixtures.append(fixture) return fixtures def _make_datetime_fixture(self): return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT, updated_at=self.NOW_GLANCE_FORMAT, deleted_at=self.NOW_GLANCE_FORMAT) def test_list_members(self): fixture = {'status': None, 'image_id': None, 'member_id': None, 'created_at': '2018-03-14T21:48:13Z', 'updated_at': '2018-03-14T21:50:51Z', 'schema': '/v2/schemas/member'} image_id = '97c1ef11-3a64-4756-9f8c-7f9fb5abe09f' member_id = '50fcc79f25524744a2c34682a1a74914' fixture['status'] = 'accepted' fixture['image_id'] = image_id fixture['member_id'] = member_id with mock.patch.object(self.service, '_client') as client_mock: client_mock.call.return_value = self._make_image_member_fixtures( image_id=image_id, member_id=member_id, status='accepted') result = self.service.list_members(self.context, image_id) self.assertEqual([fixture], result) client_mock.call.assert_called_once_with(self.context, 'list', controller='image_members', image_id=image_id) def test_get_api_servers(self): result = glance.get_api_servers(self.context) expected = (u'example.com:9292', False) self.assertEqual(expected, next(result)) def test_get_api_servers_not_mounted_at_root_and_ssl(self): service_catalog = [{u'type': u'image', u'name': u'glance', u'endpoints': [{ u'publicURL': u'https://example.com/image'}]}] self.context = context.RequestContext('fake', 'fake', auth_token=True) self.context.service_catalog = service_catalog result = glance.get_api_servers(self.context) expected = (u'example.com/image', True) self.assertEqual(expected, next(result)) def test_create_with_instance_id(self): """Ensure instance_id is persisted as an image-property.""" fixture = {'name': 'test image', 'is_public': False, 'protected': False, 'properties': {'instance_id': '42', 'user_id': 'fake'}} image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) expected = { 'id': image_id, 'name': 'test image', 'protected': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'status': None, 'properties': {'instance_id': '42', 'is_public': False, 'user_id': 'fake'}, 'owner': None, 'visibility': None, } self.assertDictEqual(expected, image_meta) image_metas = self.service.detail(self.context) self.assertDictEqual(expected, image_metas[0]) def test_create_without_instance_id(self): """Test Creating images without instance_id. Ensure we can create an image without having to specify an instance_id. Public images are an example of an image not tied to an instance. """ fixture = {'name': 'test image', 'is_public': False, 'protected': False} image_id = self.service.create(self.context, fixture)['id'] expected = { 'id': image_id, 'name': 'test image', 'protected': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'status': None, 'properties': {'is_public': False}, 'owner': None, 'visibility': None, } actual = self.service.show(self.context, image_id) self.assertDictEqual(expected, actual) def test_create_without_is_public(self): """Test Creating images without is_public. Ensure we can create an image without is_public attribute. """ fixture = {'name': 'test image', 'protected': False} image_id = self.service.create(self.context, fixture)['id'] expected = { 'id': image_id, 'name': 'test image', 'protected': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'status': None, 'properties': {}, 'owner': None, 'visibility': None, } actual = self.service.show(self.context, image_id) self.assertDictEqual(expected, actual) def test_show_shared_image_membership_success(self): """Test Create Shared Image Membership Success Ensure we can get access to a shared image """ fixture = {'name': 'test image', 'is_public': False, 'protected': False, 'visibility': 'shared'} # pid = self.context.project_id image_id = self.service.create(self.context, fixture)['id'] image = { 'id': image_id, 'name': 'test image', 'protected': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'status': None, 'properties': {'is_public': False}, 'owner': None, 'visibility': None, } member_id = '50fcc79f25524744a2c34682a1a74914' with mock.patch.object(self.service, '_client') as client_mock: with mock.patch.object( self.service, '_translate_from_glance') as tg_mock: tg_mock.return_value = {} mock_image = mock.Mock() mock_image.is_public = False mock_image.properties = {'is_public': False} mock_image.visibility = 'shared' mock_image.keys.return_value = image.keys() client_mock.call.side_effect = [ mock_image, self._make_image_member_fixtures(image_id=image_id, member_id=member_id, status='accepted')] self.context.project_id = member_id self.context.is_admin = False self.context.user_id = image_id self.context.auth_token = False self.service.show(self.context, image_id) def test_show_shared_image_membership_fail_status(self): """Test Create Shared Image Membership Failure Ensure we can't get access to a shared image with the wrong membership status (in this case 'pending') """ fixture = {'name': 'test image', 'is_public': False, 'protected': False, 'visibility': 'shared'} # pid = self.context.project_id image_id = self.service.create(self.context, fixture)['id'] image = { 'id': image_id, 'name': 'test image', 'protected': False, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'status': None, 'properties': {'is_public': False}, 'owner': None, 'visibility': None, } member_id = '50fcc79f25524744a2c34682a1a74914' with mock.patch.object(self.service, '_client') as client_mock: with mock.patch.object( self.service, '_translate_from_glance') as tg_mock: tg_mock.return_value = {} mock_image = mock.Mock() mock_image.is_public = False mock_image.properties = {'is_public': False} mock_image.visibility = 'shared' mock_image.keys.return_value = image.keys() client_mock.call.side_effect = [ mock_image, self._make_image_member_fixtures(image_id=image_id, member_id=member_id, status='pending')] self.context.project_id = member_id self.context.is_admin = False self.context.user_id = image_id self.context.auth_token = False self.assertRaises(exception.ImageNotFound, self.service.show, self.context, image_id) def test_create(self): fixture = self._make_fixture(name='test image') num_images = len(self.service.detail(self.context)) image_id = self.service.create(self.context, fixture)['id'] self.assertIsNotNone(image_id) self.assertEqual(num_images + 1, len(self.service.detail(self.context))) def test_create_and_show_non_existing_image(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] self.assertIsNotNone(image_id) self.assertRaises(exception.ImageNotFound, self.service.show, self.context, 'bad image id') def test_detail_private_image(self): fixture = self._make_fixture(name='test image') fixture['visibility'] = 'private' fixture['protected'] = False properties = {'owner_id': 'proj1'} fixture['properties'] = properties self.service.create(self.context, fixture) proj = self.context.project_id self.context.project_id = 'proj1' image_metas = self.service.detail(self.context) self.context.project_id = proj self.assertEqual(1, len(image_metas)) self.assertEqual('test image', image_metas[0]['name']) self.assertEqual('private', image_metas[0]['visibility']) def test_detail_v2(self): """Check we don't send is_public key by default with Glance v2.""" with mock.patch.object(self.service, '_client') as client_mock: client_mock.return_value = [] result = self.service.detail(self.context) self.assertListEqual([], result) client_mock.call.assert_called_once_with(self.context, 'list') def test_detail_marker(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, marker=ids[1]) self.assertEqual(8, len(image_metas)) i = 2 for meta in image_metas: expected = { 'id': ids[i], 'status': None, 'protected': None, 'name': 'TestImage %d' % (i), 'properties': {'properties': {}}, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'owner': None, 'visibility': None, } self.assertDictEqual(expected, meta) i = i + 1 def test_detail_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, limit=5) self.assertEqual(5, len(image_metas)) def test_detail_default_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context) for i, meta in enumerate(image_metas): self.assertEqual(meta['name'], 'TestImage %d' % (i)) def test_detail_marker_and_limit(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) image_metas = self.service.detail(self.context, marker=ids[3], limit=5) self.assertEqual(5, len(image_metas)) i = 4 for meta in image_metas: expected = { 'id': ids[i], 'status': None, 'protected': None, 'name': 'TestImage %d' % (i), 'properties': {'properties': {}}, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'owner': None, 'visibility': None, } self.assertDictEqual(expected, meta) i = i + 1 def test_detail_invalid_marker(self): fixtures = [] ids = [] for i in range(10): fixture = self._make_fixture(name='TestImage %d' % (i)) fixtures.append(fixture) ids.append(self.service.create(self.context, fixture)['id']) self.assertRaises(exception.Invalid, self.service.detail, self.context, marker='invalidmarker') def test_update(self): fixture = self._make_fixture(name='test image') image = self.service.create(self.context, fixture) image_id = image['id'] fixture['name'] = 'new image name' self.service.update(self.context, image_id, fixture) new_image_data = self.service.show(self.context, image_id) self.assertEqual('new image name', new_image_data['name']) def test_update_with_data(self): fixture = self._make_fixture(name='test image') image = self.service.create(self.context, fixture) image_id = image['id'] fixture['name'] = 'new image name' data = '*' * 256 self.service.update(self.context, image_id, fixture, data=data) new_image_data = self.service.show(self.context, image_id) self.assertEqual(256, new_image_data['size']) self.assertEqual('new image name', new_image_data['name']) @mock.patch.object(glance.GlanceImageService, '_translate_from_glance') @mock.patch.object(glance.GlanceImageService, 'show') def test_update_purge_props(self, show, translate_from_glance): image_id = mock.sentinel.image_id client = mock.Mock(call=mock.Mock()) service = glance.GlanceImageService(client=client) image_meta = {'properties': {'k1': 'v1'}} show.return_value = {'properties': {'k2': 'v2'}} translate_from_glance.return_value = image_meta.copy() ret = service.update(self.context, image_id, image_meta) self.assertDictEqual(image_meta, ret) client.call.assert_called_once_with( self.context, 'update', image_id, k1='v1', remove_props=['k2']) @mock.patch.object(glance.GlanceImageService, '_translate_from_glance') @mock.patch.object(glance.GlanceImageService, 'show') def test_update_base_image_ref(self, show, translate_from_glance): image_id = mock.sentinel.image_id client = mock.Mock(call=mock.Mock()) service = glance.GlanceImageService(client=client) data = '*' * 256 show.return_value = {} translate_from_glance.return_value = {} service.update(self.context, image_id, {}, data, base_image_ref=123) calls = [mock.call.call( self.context, 'upload', image_id, data, base_image_ref=123), mock.call.call(self.context, 'get', image_id)] client.assert_has_calls(calls, any_order=True) def test_call_with_additional_headers(self): glance_wrapper = glance.GlanceClientWrapper() fake_client = mock.Mock() self.mock_object(glance_wrapper, 'client', fake_client) glance_wrapper.call(self.context, 'upload', {}, store_id='xyz', base_image_ref=123) self.assertDictEqual({ 'x-image-meta-store': 'xyz', 'x-openstack-base-image-ref': 123}, fake_client.http_client.additional_headers) def test_delete(self): fixture1 = self._make_fixture(name='test image 1') fixture2 = self._make_fixture(name='test image 2') fixtures = [fixture1, fixture2] num_images = len(self.service.detail(self.context)) self.assertEqual(0, num_images) ids = [] for fixture in fixtures: new_id = self.service.create(self.context, fixture)['id'] ids.append(new_id) num_images = len(self.service.detail(self.context)) self.assertEqual(2, num_images) self.service.delete(self.context, ids[0]) num_images = len(self.service.detail(self.context)) self.assertEqual(1, num_images) def test_show_passes_through_to_client(self): fixture = self._make_fixture(name='image1', is_public=True) image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) expected = { 'id': image_id, 'name': 'image1', 'protected': None, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'status': None, 'properties': {'is_public': True, 'properties': {}}, 'owner': None, 'visibility': None } self.assertEqual(expected, image_meta) def test_show_passes_when_is_admin_in_the_context(self): fixture = self._make_fixture(name='image2') image_id = self.service.create(self.context, fixture)['id'] self.context.auth_token = False self.context.is_admin = True image_meta = self.service.show(self.context, image_id) expected = { 'id': image_id, 'name': 'image2', 'protected': None, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'status': None, 'properties': {'properties': {}}, 'owner': None, 'visibility': None } self.assertEqual(expected, image_meta) def test_show_passes_when_is_public_in_visibility_param(self): fixture = self._make_fixture(name='image3') fixture['visibility'] = 'public' image_id = self.service.create(self.context, fixture)['id'] self.context.auth_token = False self.context.is_admin = False image_meta = self.service.show(self.context, image_id) expected = { 'id': image_id, 'name': 'image3', 'protected': None, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'status': None, 'properties': {'properties': {}}, 'owner': None, 'visibility': 'public' } self.assertEqual(expected, image_meta) def test_show_raises_when_no_authtoken_in_the_context(self): fixture = self._make_fixture(name='image1', is_public=False, protected=False) image_id = self.service.create(self.context, fixture)['id'] self.context.auth_token = False self.assertRaises(exception.ImageNotFound, self.service.show, self.context, image_id) def test_show_raises_when_no_is_admin_in_the_context(self): fixture = self._make_fixture(name='image2', is_public=False, protected=False) image_id = self.service.create(self.context, fixture)['id'] self.context.auth_token = False self.context.is_admin = False self.assertRaises(exception.ImageNotFound, self.service.show, self.context, image_id) def test_show_raises_when_is_private_in_visibility_param(self): fixture = self._make_fixture(name='image3', protected=False) fixture['visibility'] = 'private' image_id = self.service.create(self.context, fixture)['id'] self.context.auth_token = False self.context.is_admin = False self.assertRaises(exception.ImageNotFound, self.service.show, self.context, image_id) def test_detail_passes_through_to_client(self): fixture = self._make_fixture(name='image10', is_public=True) image_id = self.service.create(self.context, fixture)['id'] image_metas = self.service.detail(self.context) expected = [ { 'id': image_id, 'name': 'image10', 'protected': None, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted': None, 'status': None, 'properties': {'is_public': True, 'properties': {}}, 'owner': None, 'visibility': None }, ] self.assertEqual(expected, image_metas) def test_show_makes_datetimes(self): fixture = self._make_datetime_fixture() image_id = self.service.create(self.context, fixture)['id'] image_meta = self.service.show(self.context, image_id) self.assertEqual(self.NOW_DATETIME, image_meta['created_at']) self.assertEqual(self.NOW_DATETIME, image_meta['updated_at']) def test_detail_makes_datetimes(self): fixture = self._make_datetime_fixture() self.service.create(self.context, fixture) image_meta = self.service.detail(self.context)[0] self.assertEqual(self.NOW_DATETIME, image_meta['created_at']) self.assertEqual(self.NOW_DATETIME, image_meta['updated_at']) @mock.patch.object(glance.GlanceClientWrapper, 'call') def test_add_location(self, mock_call): image_id = mock.sentinel.image_id service = glance.GlanceImageService(client=mock_call) url = 'cinder://fake-store/c984be2b-8789-4b9e-bf71-19164f537e63' metadata = {'store': 'fake-store'} service.add_location(self.context, image_id, url, metadata) mock_call.assert_called_once_with( self.context, 'add_image_location', image_id, url, metadata, privileged_user=True) @mock.patch.object(glance.GlanceClientWrapper, 'call') def test_add_location_old(self, mock_call): mock_call.side_effect = [glanceclient.exc.HTTPNotImplemented, None] image_id = mock.sentinel.image_id service = glance.GlanceImageService(client=mock_call) url = 'cinder://fake-store/c984be2b-8789-4b9e-bf71-19164f537e63' metadata = {'store': 'fake-store'} service.add_location(self.context, image_id, url, metadata) calls = [ mock.call.call( self.context, 'add_image_location', image_id, url, metadata, privileged_user=True), mock.call.call( self.context, 'add_location', image_id, url, metadata, privileged_user=False)] mock_call.assert_has_calls(calls) @mock.patch.object(glance.GlanceClientWrapper, 'call') def test_get_location(self, mock_call): url = 'cinder://fake-store/c984be2b-8789-4b9e-bf71-19164f537e63' meta = {'store': 'fake-store'} locations = [{'url': url, 'metadata': meta}] mock_call.return_value = iter(locations) image_id = mock.sentinel.image_id service = glance.GlanceImageService(client=mock_call) direct_url, locations = service.get_location(self.context, image_id) mock_call.assert_called_once_with(self.context, 'get_image_locations', image_id, privileged_user=True) self.assertIsNone(direct_url) self.assertEqual(1, len(locations)) self.assertEqual(url, locations[0]['url']) self.assertEqual(meta, locations[0]['metadata']) @mock.patch.object(glance.GlanceClientWrapper, 'call') def test_get_location_old_not_implemented(self, mock_call): class ImageMeta: def __init__(self, image_meta): self.__dict__.update(image_meta) url = 'cinder://fake-store/c984be2b-8789-4b9e-bf71-19164f537e63' meta = {'store': 'fake-store'} loc = {'url': url, 'metadata': meta} get_loc = ImageMeta({'direct_url': url, 'locations': loc}) mock_call.side_effect = [glanceclient.exc.HTTPNotImplemented, get_loc] image_id = mock.sentinel.image_id service = glance.GlanceImageService(client=mock_call) direct_url, locations = service.get_location(self.context, image_id) calls = [ mock.call.call(self.context, 'get_image_locations', image_id, privileged_user=True), mock.call.call(self.context, 'get', image_id, privileged_user=False)] mock_call.assert_has_calls(calls) self.assertEqual(url, direct_url) self.assertEqual(loc, locations) @mock.patch.object(glance.GlanceClientWrapper, 'call') def test_get_location_old_forbidden(self, mock_call): class ImageMeta: def __init__(self, image_meta): self.__dict__.update(image_meta) url = 'cinder://fake-store/c984be2b-8789-4b9e-bf71-19164f537e63' meta = {'store': 'fake-store'} loc = {'url': url, 'metadata': meta} get_loc = ImageMeta({'direct_url': url, 'locations': loc}) mock_call.side_effect = [glanceclient.exc.HTTPForbidden, get_loc] image_id = mock.sentinel.image_id service = glance.GlanceImageService(client=mock_call) direct_url, locations = service.get_location(self.context, image_id) calls = [ mock.call.call(self.context, 'get_image_locations', image_id, privileged_user=True), mock.call.call(self.context, 'get', image_id, privileged_user=False)] mock_call.assert_has_calls(calls) self.assertEqual(url, direct_url) self.assertEqual(loc, locations) def test_download_with_retries(self): tries = [0] class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that fails the first time, then succeeds.""" def get(self, image_id): if tries[0] == 0: tries[0] = 1 raise glanceclient.exc.ServiceUnavailable('') else: return {} client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() # When retries are disabled, we should get an exception self.flags(glance_num_retries=0) self.assertRaises(exception.GlanceConnectionFailed, service.download, self.context, image_id, writer) # Now lets enable retries. No exception should happen now. tries = [0] self.flags(glance_num_retries=1) service.download(self.context, image_id, writer) def test_download_no_data(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """Returns None instead of an iterator.""" def data(self, image_id): return None client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 'fake-image-uuid' e = self.assertRaises(exception.ImageDownloadFailed, service.download, self.context, image_id) self.assertIn('image contains no data', str(e)) self.assertIn(image_id, str(e)) def test_client_forbidden_converts_to_imagenotauthed(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a Forbidden exception.""" def get(self, image_id): raise glanceclient.exc.Forbidden(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotAuthorized, service.download, self.context, image_id, writer) def test_client_translated_exc_includes_original_traceback(self): image_id = 1 # doesn't matter original_exc = glanceclient.exc.Forbidden(image_id) class MyGlanceStubClient(glance_stubs.StubGlanceClient): def get(self, image_id): raise original_exc client = MyGlanceStubClient() service = self._create_image_service(client) writer = NullWriter() exc = self.assertRaises(exception.ImageNotAuthorized, service.download, self.context, image_id, writer) original = traceback.extract_tb(original_exc.__traceback__) original.reverse() received = traceback.extract_tb(exc.__traceback__) received.reverse() # verify that we have the same traceback as original_exc for orig, recd in zip(original, received): self.assertEqual(orig, recd) # note that the received exception contains more frames in # its traceback than the original self.assertGreater(len(received), len(original)) def test_plain_translated_exc_includes_original_traceback(self): original_exc = glanceclient.exc.Forbidden() class MyGlanceStubClient(glance_stubs.StubGlanceClient): def list(self): raise original_exc client = MyGlanceStubClient() service = self._create_image_service(client) exc = self.assertRaises(exception.NotAuthorized, service.detail, self.context) original = traceback.extract_tb(original_exc.__traceback__) original.reverse() received = traceback.extract_tb(exc.__traceback__) received.reverse() # verify that we have the same traceback as original_exc for orig, recd in zip(original, received): self.assertEqual(orig, recd) # note that the received exception contains more frames in # its traceback than the original self.assertGreater(len(received), len(original)) def test_client_httpforbidden_converts_to_imagenotauthed(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a HTTPForbidden exception.""" def get(self, image_id): raise glanceclient.exc.HTTPForbidden(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotAuthorized, service.download, self.context, image_id, writer) def test_client_notfound_converts_to_imagenotfound(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a NotFound exception.""" def get(self, image_id): raise glanceclient.exc.NotFound(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotFound, service.download, self.context, image_id, writer) def test_client_httpnotfound_converts_to_imagenotfound(self): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that raises a HTTPNotFound exception.""" def get(self, image_id): raise glanceclient.exc.HTTPNotFound(image_id) client = MyGlanceStubClient() service = self._create_image_service(client) image_id = 1 # doesn't matter writer = NullWriter() self.assertRaises(exception.ImageNotFound, service.download, self.context, image_id, writer) @mock.patch('builtins.open', new_callable=mock.mock_open) @mock.patch('shutil.copyfileobj') @mock.patch('cinder.image.glance.get_api_servers', return_value=itertools.cycle([(False, 'localhost:9292')])) def test_download_from_direct_file(self, api_servers, mock_copyfileobj, mock_open): fixture = self._make_fixture(name='test image', locations=[{'url': 'file:///tmp/test'}]) image_id = self.service.create(self.context, fixture)['id'] writer = NullWriter() self.flags(allowed_direct_url_schemes=['file']) self.service.download(self.context, image_id, writer) mock_copyfileobj.assert_called_once_with(mock.ANY, writer) mock_open.assert_called_once_with('/tmp/test', 'rb') @mock.patch('builtins.open', new_callable=mock.mock_open) @mock.patch('shutil.copyfileobj') @mock.patch('cinder.image.glance.get_api_servers', return_value=itertools.cycle([(False, 'localhost:9292')])) def test_download_from_direct_file_non_file(self, api_servers, mock_copyfileobj, mock_open): fixture = self._make_fixture(name='test image', direct_url='swift+http://test/image') image_id = self.service.create(self.context, fixture)['id'] writer = NullWriter() self.flags(allowed_direct_url_schemes=['file']) self.service.download(self.context, image_id, writer) self.assertIsNone(mock_copyfileobj.call_args) def test_glance_client_image_id(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] (_service, same_id) = glance.get_remote_image_service(self.context, image_id) self.assertEqual(same_id, image_id) def test_glance_client_image_ref(self): fixture = self._make_fixture(name='test image') image_id = self.service.create(self.context, fixture)['id'] image_url = 'http://something-less-likely/%s' % image_id (service, same_id) = glance.get_remote_image_service(self.context, image_url) self.assertEqual(same_id, image_id) self.assertEqual('something-less-likely', service._client.netloc) for ipv6_url in ('[::1]', '::1', '[::1]:444'): image_url = 'http://%s/%s' % (ipv6_url, image_id) (service, same_id) = glance.get_remote_image_service(self.context, image_url) self.assertEqual(same_id, image_id) self.assertEqual(ipv6_url, service._client.netloc) def test_extracting_missing_attributes(self): """Verify behavior from glance objects that are missing attributes This fakes the image class and is missing the checksum and name attribute as the client would return if they're not set in the database. Regression test for bug #1308058. """ def _extract_attributes(image): IMAGE_ATTRIBUTES = ('size', 'disk_format', 'owner', 'container_format', 'status', 'id', 'name', 'created_at', 'updated_at', 'deleted', 'deleted_at', 'checksum', 'min_disk', 'min_ram', 'protected', 'visibility', 'cinder_encryption_key_id') output = {} for attr in IMAGE_ATTRIBUTES: if attr == 'deleted_at' and not output['deleted']: output[attr] = None elif attr == 'checksum' and output['status'] != 'active': output[attr] = None else: output[attr] = getattr(image, attr, None) output['properties'] = getattr(image, 'properties', {}) return output class MyFakeGlanceImage(glance_stubs.FakeImage): def __init__(self, metadata): IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner', 'container_format', 'id', 'created_at', 'updated_at', 'deleted', 'status', 'min_disk', 'min_ram', 'is_public', 'visibility', 'protected'] raw = dict.fromkeys(IMAGE_ATTRIBUTES) raw.update(metadata) self.__dict__['raw'] = raw metadata = { 'id': 1, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, } image = MyFakeGlanceImage(metadata) actual = _extract_attributes(image) expected = { 'id': 1, 'name': None, 'protected': None, 'size': None, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'created_at': self.NOW_DATETIME, 'updated_at': self.NOW_DATETIME, 'deleted_at': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None, 'visibility': None, 'cinder_encryption_key_id': None } self.assertEqual(expected, actual) @mock.patch('cinder.image.glance.CONF') def test_v2_passes_visibility_param(self, config): config.glance_num_retries = 0 metadata = { 'id': 1, 'size': 2, 'visibility': 'public', } image = glance_stubs.FakeImage(metadata) client = glance_stubs.StubGlanceClient() service = self._create_image_service(client) service._image_schema = glance_stubs.FakeSchema() actual = service._translate_from_glance('fake_context', image) expected = { 'id': 1, 'name': None, 'visibility': 'public', 'protected': None, 'size': 2, 'min_disk': None, 'min_ram': None, 'disk_format': None, 'container_format': None, 'checksum': None, 'deleted': None, 'status': None, 'properties': {}, 'owner': None, 'created_at': None, 'updated_at': None } self.assertEqual(expected, actual) @mock.patch('cinder.image.glance.CONF') def test_extracting_v2_boot_properties(self, config): config.glance_num_retries = 0 metadata = { 'id': 1, 'size': 2, 'min_disk': 2, 'min_ram': 2, 'kernel_id': 'foo', 'ramdisk_id': 'bar', } image = glance_stubs.FakeImage(metadata) client = glance_stubs.StubGlanceClient() service = self._create_image_service(client) service._image_schema = glance_stubs.FakeSchema() actual = service._translate_from_glance('fake_context', image) expected = { 'id': 1, 'name': None, 'visibility': None, 'protected': None, 'size': 2, 'min_disk': 2, 'min_ram': 2, 'disk_format': None, 'container_format': None, 'checksum': None, 'deleted': None, 'status': None, 'properties': {'kernel_id': 'foo', 'ramdisk_id': 'bar'}, 'owner': None, 'created_at': None, 'updated_at': None } self.assertEqual(expected, actual) def test_translate_to_glance(self): client = glance_stubs.StubGlanceClient() service = self._create_image_service(client) metadata = { 'id': 1, 'size': 2, 'min_disk': 2, 'min_ram': 2, 'cinder_encryption_key_deletion_policy': 'outer', # note that a key duplicated in the 'properties' dict # will overwrite the "outer" value 'properties': {'kernel_id': 'foo', 'ramdisk_id': 'bar', 'x_billinginfo': '123', 'cinder_encryption_key_deletion_policy': 'NOPE'}, } actual = service._translate_to_glance(metadata) expected = { 'id': 1, 'size': 2, 'min_disk': 2, 'min_ram': 2, 'cinder_encryption_key_deletion_policy': 'NOPE', 'kernel_id': 'foo', 'ramdisk_id': 'bar', 'x_billinginfo': '123', } self.assertEqual(expected, actual) def test_translate_to_glance_no_properties_element(self): """Show _translate does not remove arbitrary flat properties""" client = glance_stubs.StubGlanceClient() service = self._create_image_service(client) metadata = { 'id': 1, 'cinder_encryption_key_deletion_policy': 'baz', 'size': 2, 'min_disk': 2, 'min_ram': 2, 'kernel_id': 'foo', 'ramdisk_id': 'bar', 'x_billinginfo': '123', } actual = service._translate_to_glance(metadata) expected = { 'id': 1, 'cinder_encryption_key_deletion_policy': 'baz', 'size': 2, 'min_disk': 2, 'min_ram': 2, 'kernel_id': 'foo', 'ramdisk_id': 'bar', 'x_billinginfo': '123', } self.assertEqual(expected, actual) @mock.patch('cinder.image.glance.glanceclient.Client') @mock.patch('cinder.image.glance.get_api_servers', return_value=itertools.cycle([(False, 'localhost:9292')])) def test_call_glance_over_quota(self, api_servers, _mockglanceclient): """Test glance version set by arg to GlanceClientWrapper""" glance_wrapper = glance.GlanceClientWrapper() fake_client = mock.Mock() fake_client.images.method = mock.Mock( side_effect=glanceclient.exc.HTTPOverLimit) self.mock_object(glance_wrapper, 'client', fake_client) self.assertRaises(exception.ImageLimitExceeded, glance_wrapper.call, 'fake_context', 'method') def _create_failing_glance_client(info): class MyGlanceStubClient(glance_stubs.StubGlanceClient): """A client that fails the first time, then succeeds.""" def get(self, image_id): info['num_calls'] += 1 if info['num_calls'] == 1: raise glanceclient.exc.ServiceUnavailable('') return {} return MyGlanceStubClient() class TestGlanceImageServiceClient(test.TestCase): def setUp(self): super(TestGlanceImageServiceClient, self).setUp() self.context = context.RequestContext('fake', 'fake', auth_token=True) self.mock_object(glance.time, 'sleep', return_value=None) service_auth.reset_globals() @mock.patch('cinder.service_auth.get_auth_plugin') @mock.patch.object(ks_session.Session, 'load_from_options') def test_create_glance_client_with_protocol_http( self, mock_load, mock_get_auth_plugin): glance._SESSION = None self.flags(auth_strategy='keystone') self.flags(glance_request_timeout=None) class MyGlanceStubClient(test.TestCase): def __init__(self, version, *args, **kwargs): self.assertEqual('2', version) self.assertEqual("http://fake_host:9292", args[0]) self.assertNotIn('timeout', kwargs) self.assertIn("session", kwargs) self.assertIn("auth", kwargs) config_options = {'insecure': False, 'cacert': None, 'key': None, 'cert': None, 'timeout': None, 'split_loggers': False} mock_get_auth_plugin.return_value = context._ContextAuthPlugin mock_load.return_value = session.Session self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient) client = glance._create_glance_client(self.context, 'fake_host:9292', False) self.assertIsInstance(client, MyGlanceStubClient) mock_get_auth_plugin.assert_called_once_with(self.context, auth=None) mock_load.assert_called_once_with(**config_options) @mock.patch('cinder.service_auth.get_auth_plugin') @mock.patch.object(ks_session.Session, 'load_from_options') def test_create_glance_client_with_protocol_https( self, mock_load, mock_get_auth_plugin): glance._SESSION = None self.flags(auth_strategy='keystone') self.flags(glance_request_timeout=60) self.flags( glance_ca_certificates_file='/opt/stack/data/ca-bundle.pem') self.flags(glance_certfile='/opt/stack/data/cert.pem') self.flags(glance_keyfile='/opt/stack/data/key.pem') class MyGlanceStubClient(test.TestCase): def __init__(self, version, *args, **kwargs): self.assertEqual('2', version) self.assertEqual("https://fake_host:9292", args[0]) self.assertNotIn('timeout', kwargs) self.assertIn("session", kwargs) self.assertIn("auth", kwargs) config_options = {'insecure': False, 'cacert': '/opt/stack/data/ca-bundle.pem', 'cert': '/opt/stack/data/cert.pem', 'key': '/opt/stack/data/key.pem', 'timeout': 60, 'split_loggers': False} mock_get_auth_plugin.return_value = context._ContextAuthPlugin mock_load.return_value = session.Session self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient) client = glance._create_glance_client(self.context, 'fake_host:9292', True) self.assertIsInstance(client, MyGlanceStubClient) mock_get_auth_plugin.assert_called_once_with(self.context, auth=None) mock_load.assert_called_once_with(**config_options) def test_create_glance_client_auth_strategy_noauth_with_protocol_https( self): self.flags(auth_strategy='noauth') self.flags(glance_request_timeout=60) self.flags(glance_api_insecure=False) self.flags( glance_ca_certificates_file='/opt/stack/data/ca-bundle.pem') class MyGlanceStubClient(test.TestCase): def __init__(self, version, *args, **kwargs): self.assertEqual('2', version) self.assertEqual('https://fake_host:9292', args[0]) self.assertEqual(60, kwargs['timeout']) self.assertNotIn("session", kwargs) self.assertNotIn("auth", kwargs) self.assertEqual( '/opt/stack/data/ca-bundle.pem', kwargs['cacert']) self.assertEqual(False, kwargs['insecure']) self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient) client = glance._create_glance_client(self.context, 'fake_host:9292', True) self.assertIsInstance(client, MyGlanceStubClient) def test_create_glance_client_auth_strategy_noauth_with_protocol_http( self): self.flags(auth_strategy='noauth') self.flags(glance_request_timeout=None) class MyGlanceStubClient(test.TestCase): def __init__(self, version, *args, **kwargs): self.assertEqual('2', version) self.assertEqual("http://fake_host:9292", args[0]) self.assertNotIn('timeout', kwargs) self.assertNotIn("session", kwargs) self.assertNotIn("auth", kwargs) self.mock_object(glance.glanceclient, 'Client', MyGlanceStubClient) client = glance._create_glance_client(self.context, 'fake_host:9292', False) self.assertIsInstance(client, MyGlanceStubClient) @mock.patch('cinder.service_auth.get_auth_plugin') @mock.patch.object(ksloading, 'load_auth_from_conf_options') def test_create_glance_client_with_privileged_user( self, mock_load, mock_get_auth_plugin): self.flags(auth_strategy='keystone') self.flags(auth_type='password', group='glance') mock_load.return_value = 'fake_auth_plugin' glance.GlanceClientWrapper(self.context, 'fake_host:9292', True, True) mock_load.assert_called_once() mock_get_auth_plugin.assert_called_once_with( self.context, auth='fake_auth_plugin') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2071195 cinder-27.0.0/cinder/tests/unit/keymgr/0000775000175000017500000000000000000000000017752 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/keymgr/__init__.py0000664000175000017500000000000000000000000022051 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/keymgr/fake.py0000664000175000017500000000155400000000000021237 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of a fake key manager.""" from castellan.tests.unit.key_manager import mock_key_manager def fake_api(configuration=None): return mock_key_manager.MockKeyManager(configuration) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/keymgr/test_conf_key_mgr.py0000664000175000017500000001056000000000000024027 0ustar00zuulzuul00000000000000# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test cases for the conf key manager. """ import binascii from castellan.common.objects import symmetric_key as key from oslo_config import cfg from cinder import context from cinder import exception from cinder.keymgr import conf_key_mgr from cinder.tests.unit import test CONF = cfg.CONF CONF.import_opt('fixed_key', 'cinder.keymgr.conf_key_mgr', group='key_manager') class ConfKeyManagerTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs) self._hex_key = '1' * 64 def _create_key_manager(self): CONF.set_default('fixed_key', default=self._hex_key, group='key_manager') return conf_key_mgr.ConfKeyManager(CONF) def setUp(self): super(ConfKeyManagerTestCase, self).setUp() self.key_mgr = self._create_key_manager() self.ctxt = context.RequestContext('fake', 'fake') self.key_id = '00000000-0000-0000-0000-000000000000' encoded = bytes(binascii.unhexlify(self._hex_key)) self.key = key.SymmetricKey('AES', len(encoded) * 8, encoded) def test___init__(self): self.assertEqual(self.key_id, self.key_mgr.key_id) def test_create_key(self): key_id_1 = self.key_mgr.create_key(self.ctxt) key_id_2 = self.key_mgr.create_key(self.ctxt) # ensure that the UUIDs are the same self.assertEqual(key_id_1, key_id_2) def test_create_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.create_key, None) def test_create_key_pair(self): self.assertRaises(NotImplementedError, self.key_mgr.create_key_pair, self.ctxt) def test_create_key_pair_null_context(self): self.assertRaises(NotImplementedError, self.key_mgr.create_key_pair, None) def test_store_key(self): key_id = self.key_mgr.store(self.ctxt, self.key) actual_key = self.key_mgr.get(self.ctxt, key_id) self.assertEqual(self.key, actual_key) def test_store_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.store, None, self.key) def test_store_key_invalid(self): encoded = bytes(binascii.unhexlify('0' * 64)) inverse_key = key.SymmetricKey('AES', len(encoded) * 8, encoded) self.assertRaises(exception.KeyManagerError, self.key_mgr.store, self.ctxt, inverse_key) def test_delete_key(self): key_id = self.key_mgr.create_key(self.ctxt) self.key_mgr.delete(self.ctxt, key_id) # cannot delete key -- might have lingering references self.assertEqual(self.key, self.key_mgr.get(self.ctxt, self.key_id)) def test_delete_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.delete, None, None) def test_delete_unknown_key(self): self.assertRaises(exception.KeyManagerError, self.key_mgr.delete, self.ctxt, None) def test_get_key(self): self.assertEqual(self.key, self.key_mgr.get(self.ctxt, self.key_id)) def test_get_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.get, None, None) def test_get_unknown_key(self): self.assertRaises(KeyError, self.key_mgr.get, self.ctxt, None) def test_list(self): keys = self.key_mgr.list(self.ctxt) self.assertEqual(0, len(keys)) def test_list_null_context(self): self.assertRaises(exception.NotAuthorized, self.key_mgr.list, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/keymgr/test_migration.py0000664000175000017500000003021100000000000023351 0ustar00zuulzuul00000000000000# Copyright 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for encryption key migration.""" from unittest import mock from oslo_config import cfg from cinder import db from cinder.keymgr import migration from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base CONF = cfg.CONF FIXED_KEY_ID = '00000000-0000-0000-0000-000000000000' class KeyMigrationTestCase(base.BaseVolumeTestCase): def setUp(self): super(KeyMigrationTestCase, self).setUp() self.conf = CONF self.fixed_key = '1' * 64 try: self.conf.import_opt(name='fixed_key', module_str='cinder.keymgr.conf_key_mgr', group='key_manager') except cfg.DuplicateOptError: pass self.conf.set_override('fixed_key', self.fixed_key, group='key_manager') self.conf.set_override('backend', 'barbican', group='key_manager') self.my_vols = [] self.my_baks = [] def tearDown(self): for vol in objects.VolumeList.get_all(self.context): self.volume.delete_volume(self.context, vol) for bak in objects.BackupList.get_all(self.context): bak.destroy() super(KeyMigrationTestCase, self).tearDown() def create_volume(self, key_id=FIXED_KEY_ID): vol = tests_utils.create_volume(self.context, host=self.conf.host) self.volume.create_volume(self.context, vol) if key_id: vol.encryption_key_id = key_id vol.save() vol.refresh() self.my_vols.append(vol) return vol def create_backup(self, volume_id=fake.VOLUME_ID, key_id=FIXED_KEY_ID): bak = tests_utils.create_backup(self.context, volume_id=volume_id, host=self.conf.host) if key_id: bak.encryption_key_id = key_id bak.save() self.my_baks = objects.BackupList.get_all_by_host(self.context, self.conf.host) bak.refresh() return bak @mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys') @mock.patch('cinder.keymgr.migration.KeyMigrator._log_migration_status') def test_no_fixed_key(self, mock_log_migration_status, mock_migrate_keys): self.create_volume() self.conf.set_override('fixed_key', None, group='key_manager') migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) mock_migrate_keys.assert_not_called() mock_log_migration_status.assert_not_called() @mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys') @mock.patch('cinder.keymgr.migration.KeyMigrator._log_migration_status') def test_using_conf_key_manager(self, mock_log_migration_status, mock_migrate_keys): self.create_volume() self.conf.set_override('backend', 'some.ConfKeyManager', group='key_manager') migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) mock_migrate_keys.assert_not_called() mock_log_migration_status.assert_not_called() @mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys') @mock.patch('cinder.keymgr.migration.KeyMigrator._log_migration_status') def test_using_barbican_module_path(self, mock_log_migration_status, mock_migrate_keys): # Verify the long-hand method of specifying the Barbican backend # is properly parsed. self.create_volume() self.conf.set_override( 'backend', 'castellan.key_manager.barbican_key_manager.BarbicanKeyManager', group='key_manager') migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) mock_migrate_keys.assert_called_once_with(self.my_vols, self.my_baks) mock_log_migration_status.assert_called_once_with() @mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys') @mock.patch('cinder.keymgr.migration.KeyMigrator._log_migration_status') def test_using_unsupported_key_manager(self, mock_log_migration_status, mock_migrate_keys): self.create_volume() self.conf.set_override('backend', 'some.OtherKeyManager', group='key_manager') migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) mock_migrate_keys.assert_not_called() mock_log_migration_status.assert_called_once_with() @mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys') @mock.patch('cinder.keymgr.migration.KeyMigrator._log_migration_status') def test_no_volumes(self, mock_log_migration_status, mock_migrate_keys): migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) mock_migrate_keys.assert_not_called() mock_log_migration_status.assert_called_once_with() @mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_encryption_key') @mock.patch('barbicanclient.client.Client') def test_fail_no_barbican_client(self, mock_barbican_client, mock_migrate_encryption_key): self.create_volume() mock_barbican_client.side_effect = Exception migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) mock_migrate_encryption_key.assert_not_called() @mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_encryption_key') @mock.patch('barbicanclient.client.Client') def test_fail_too_many_errors(self, mock_barbican_client, mock_migrate_encryption_key): for n in range(0, (migration.MAX_KEY_MIGRATION_ERRORS + 3)): self.create_volume() mock_migrate_encryption_key.side_effect = Exception migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) self.assertEqual(mock_migrate_encryption_key.call_count, (migration.MAX_KEY_MIGRATION_ERRORS + 1)) @mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys') def test_migration_status_more_to_migrate(self, mock_migrate_keys): mock_log = self.mock_object(migration, 'LOG') self.create_volume() migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) # Look for one warning (more volumes to migrate) and one info (no # backups to migrate) log messages. self.assertEqual(mock_log.warning.call_count, 1) self.assertEqual(mock_log.info.call_count, 1) @mock.patch('cinder.keymgr.migration.KeyMigrator._migrate_keys') def test_migration_status_all_done(self, mock_migrate_keys): mock_log = self.mock_object(migration, 'LOG') self.create_volume(key_id=fake.ENCRYPTION_KEY_ID) migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) # Look for two info (no volumes to migrate, no backups to migrate) # and no warning log messages. mock_log.warning.assert_not_called() self.assertEqual(mock_log.info.call_count, 2) @mock.patch( 'cinder.keymgr.migration.KeyMigrator._update_encryption_key_id') @mock.patch('barbicanclient.client.Client') def test_fixed_key_migration(self, mock_barbican_client, mock_update_encryption_key_id): # Create two volumes with fixed key ID that needs to be migrated, and # a couple of volumes with key IDs that don't need to be migrated, # or no key ID. vol_1 = self.create_volume() self.create_volume(key_id=fake.UUID1) self.create_volume(key_id=None) vol_2 = self.create_volume() self.create_volume(key_id=fake.UUID2) # Create a few backups self.create_backup(key_id=None) self.create_backup(key_id=fake.UUID3) bak_1 = self.create_backup() self.create_backup(key_id=fake.UUID4) bak_2 = self.create_backup() migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) calls = [mock.call(vol_1), mock.call(vol_2), mock.call(bak_1), mock.call(bak_2)] mock_update_encryption_key_id.assert_has_calls(calls, any_order=True) self.assertEqual(mock_update_encryption_key_id.call_count, len(calls)) @mock.patch('barbicanclient.client.Client') def test_get_barbican_key_id(self, mock_barbican_client): vol = self.create_volume() # Barbican's secret.store() returns a URI that contains the # secret's key ID at the end. secret_ref = 'http://some/path/' + fake.ENCRYPTION_KEY_ID mock_secret = mock.MagicMock() mock_secret.store.return_value = secret_ref mock_barbican_client.return_value.secrets.create.return_value \ = mock_secret migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) mock_acls_create = mock_barbican_client.return_value.acls.create mock_acls_create.assert_called_once_with(entity_ref=secret_ref, users=[fake.USER_ID]) mock_acls_create.return_value.submit.assert_called_once_with() vol_db = db.volume_get(self.context, vol.id) self.assertEqual(fake.ENCRYPTION_KEY_ID, vol_db['encryption_key_id']) @mock.patch('cinder.keymgr.migration.KeyMigrator._get_barbican_key_id') @mock.patch('barbicanclient.client.Client') def test_update_volume_encryption_key_id(self, mock_barbican_client, mock_get_barbican_key_id): vol = self.create_volume() snap_ids = [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, fake.SNAPSHOT3_ID] for snap_id in snap_ids: tests_utils.create_snapshot(self.context, vol.id, id=snap_id) mock_get_barbican_key_id.return_value = fake.ENCRYPTION_KEY_ID migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) vol_db = db.volume_get(self.context, vol.id) self.assertEqual(fake.ENCRYPTION_KEY_ID, vol_db['encryption_key_id']) for snap_id in snap_ids: snap_db = db.snapshot_get(self.context, snap_id) self.assertEqual(fake.ENCRYPTION_KEY_ID, snap_db['encryption_key_id']) @mock.patch('cinder.keymgr.migration.KeyMigrator._get_barbican_key_id') @mock.patch('barbicanclient.client.Client') def test_update_backup_encryption_key_id(self, mock_barbican_client, mock_get_barbican_key_id): bak = self.create_backup() mock_get_barbican_key_id.return_value = fake.ENCRYPTION_KEY_ID migration.migrate_fixed_key(self.my_vols, self.my_baks, conf=self.conf) bak_db = db.backup_get(self.context, bak.id) self.assertEqual(fake.ENCRYPTION_KEY_ID, bak_db['encryption_key_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/keymgr/test_transfer.py0000664000175000017500000001554000000000000023214 0ustar00zuulzuul00000000000000# Copyright 2022 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for encryption key transfer.""" from unittest import mock from castellan.common.credentials import keystone_password from oslo_config import cfg from cinder.common import constants from cinder import context from cinder.keymgr import conf_key_mgr from cinder.keymgr import transfer from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils CONF = cfg.CONF ENCRYPTION_SECRET = 'the_secret' CINDER_USERNAME = 'cinder' CINDER_PASSWORD = 'key_transfer_test' class KeyTransferTestCase(test.TestCase): OLD_ENCRYPTION_KEY_ID = fake.ENCRYPTION_KEY_ID NEW_ENCRYPTION_KEY_ID = fake.ENCRYPTION_KEY2_ID key_manager_class = ('castellan.key_manager.barbican_key_manager.' 'BarbicanKeyManager') def setUp(self): super(KeyTransferTestCase, self).setUp() self.conf = CONF self.conf.set_override('backend', self.key_manager_class, group='key_manager') self.conf.set_override('username', CINDER_USERNAME, group='keystone_authtoken') self.conf.set_override('password', CINDER_PASSWORD, group='keystone_authtoken') self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) def _create_volume_and_snapshots(self): volume = test_utils.create_volume( self.context, testcase_instance=self, encryption_key_id=self.OLD_ENCRYPTION_KEY_ID) _ = test_utils.create_snapshot( self.context, volume.id, display_name='snap_1', testcase_instance=self, encryption_key_id=self.OLD_ENCRYPTION_KEY_ID) _ = test_utils.create_snapshot( self.context, volume.id, display_name='snap_2', testcase_instance=self, encryption_key_id=self.OLD_ENCRYPTION_KEY_ID) return volume def _verify_service_context(self, mocked_call): service_context = mocked_call.call_args.args[0] self.assertIsInstance(service_context, keystone_password.KeystonePassword) self.assertEqual(service_context.username, CINDER_USERNAME) self.assertEqual(service_context.password, CINDER_PASSWORD) def _verify_encryption_key_id(self, volume_id, encryption_key_id): volume = objects.Volume.get_by_id(self.context, volume_id) self.assertEqual(volume.encryption_key_id, encryption_key_id) snapshots = objects.snapshot.SnapshotList.get_all_for_volume( self.context, volume.id) self.assertEqual(len(snapshots), 2) for snapshot in snapshots: self.assertEqual(snapshot.encryption_key_id, encryption_key_id) def _test_transfer_from_user_to_cinder(self, transfer_fn): volume = self._create_volume_and_snapshots() with mock.patch( self.key_manager_class + '.get', return_value=ENCRYPTION_SECRET) as mock_key_get, \ mock.patch( self.key_manager_class + '.store', return_value=self.NEW_ENCRYPTION_KEY_ID) as mock_key_store, \ mock.patch( self.key_manager_class + '.delete') as mock_key_delete: transfer_fn(self.context, volume) # Verify the user's context was used to fetch and delete the # volume's current key ID. mock_key_get.assert_called_once_with( self.context, self.OLD_ENCRYPTION_KEY_ID) mock_key_delete.assert_called_once_with( self.context, self.OLD_ENCRYPTION_KEY_ID) # Verify the cinder service created the new key ID. mock_key_store.assert_called_once_with( mock.ANY, ENCRYPTION_SECRET) self._verify_service_context(mock_key_store) # Verify the volume (and its snaps) reference the new key ID. self._verify_encryption_key_id(volume.id, self.NEW_ENCRYPTION_KEY_ID) def _test_transfer_from_cinder_to_user(self, transfer_fn): volume = self._create_volume_and_snapshots() with mock.patch( self.key_manager_class + '.get', return_value=ENCRYPTION_SECRET) as mock_key_get, \ mock.patch( self.key_manager_class + '.store', return_value=self.NEW_ENCRYPTION_KEY_ID) as mock_key_store, \ mock.patch( self.key_manager_class + '.delete') as mock_key_delete: transfer_fn(self.context, volume) # Verify the cinder service was used to fetch and delete the # volume's current key ID. mock_key_get.assert_called_once_with( mock.ANY, self.OLD_ENCRYPTION_KEY_ID) self._verify_service_context(mock_key_get) mock_key_delete.assert_called_once_with( mock.ANY, self.OLD_ENCRYPTION_KEY_ID) self._verify_service_context(mock_key_delete) # Verify the user's context created the new key ID. mock_key_store.assert_called_once_with( self.context, ENCRYPTION_SECRET) # Verify the volume (and its snaps) reference the new key ID. self._verify_encryption_key_id(volume.id, self.NEW_ENCRYPTION_KEY_ID) def test_transfer_create(self): self._test_transfer_from_user_to_cinder(transfer.transfer_create) def test_transfer_accept(self): self._test_transfer_from_cinder_to_user(transfer.transfer_accept) def test_transfer_delete(self): self._test_transfer_from_cinder_to_user(transfer.transfer_delete) class KeyTransferFixedKeyTestCase(KeyTransferTestCase): OLD_ENCRYPTION_KEY_ID = constants.FIXED_KEY_ID NEW_ENCRYPTION_KEY_ID = constants.FIXED_KEY_ID key_manager_class = 'cinder.keymgr.conf_key_mgr.ConfKeyManager' def setUp(self): super(KeyTransferFixedKeyTestCase, self).setUp() self.conf.register_opts(conf_key_mgr.key_mgr_opts, group='key_manager') self.conf.set_override('fixed_key', 'df393fca58657e6dc76a6fea31c3e7e0', group='key_manager') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/known_issues.py0000664000175000017500000000215300000000000021556 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # KNOWN ISSUES RUNNING UNIT TESTS # We've seen tpool.killall method freeze everything. The issue seems to be # resolved by calling killall during the cleanup after stopping all remaining # looping calls, but we cannot be 100% of it, so we have this flag to quickly # disable the cleanup and the tests that would break with the change if # necessary. # If we find that an stestr child runner is blocking we can trigger the Guru # Meditation Report (kill -USR2 ) and look if a Green Thread is # stuck on tpool.killall. TPOOL_KILLALL_ISSUE = False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2071195 cinder-27.0.0/cinder/tests/unit/message/0000775000175000017500000000000000000000000020100 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/message/__init__.py0000664000175000017500000000000000000000000022177 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/message/test_api.py0000664000175000017500000005174000000000000022271 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslo_config import cfg from oslo_utils import timeutils from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.openstack import api_version_request as api_version from cinder.api.v3 import messages from cinder import context from cinder import exception from cinder.message import api as message_api from cinder.message import message_field from cinder.tests.unit.api import fakes import cinder.tests.unit.fake_constants as fake_constants from cinder.tests.unit import test from cinder.tests.unit import utils CONF = cfg.CONF version_header_name = 'OpenStack-API-Version' class MessageApiTest(test.TestCase): def setUp(self): super(MessageApiTest, self).setUp() self.message_api = message_api.API() self.mock_object(self.message_api, 'db') self.ctxt = context.RequestContext('admin', 'fakeproject', True) self.ctxt.request_id = 'fakerequestid' self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.controller = messages.MessagesController(self.ext_mgr) @mock.patch('oslo_utils.timeutils.utcnow') def test_create(self, mock_utcnow): CONF.set_override('message_ttl', 300) mock_utcnow.return_value = datetime.datetime.utcnow() expected_expires_at = timeutils.utcnow() + datetime.timedelta( seconds=300) expected_message_record = { 'project_id': 'fakeproject', 'request_id': 'fakerequestid', 'resource_type': 'fake_resource_type', 'resource_uuid': None, 'action_id': message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0], 'detail_id': message_field.Detail.UNKNOWN_ERROR[0], 'message_level': 'ERROR', 'expires_at': expected_expires_at, 'event_id': "VOLUME_fake_resource_type_001_001", } self.message_api.create(self.ctxt, message_field.Action.SCHEDULE_ALLOCATE_VOLUME, detail=message_field.Detail.UNKNOWN_ERROR, resource_type="fake_resource_type") self.message_api.db.message_create.assert_called_once_with( self.ctxt, expected_message_record) mock_utcnow.assert_called_with() @mock.patch('oslo_utils.timeutils.utcnow') def test_create_with_minimum_args(self, mock_utcnow): CONF.set_override('message_ttl', 300) mock_utcnow.return_value = datetime.datetime.utcnow() expected_expires_at = timeutils.utcnow() + datetime.timedelta( seconds=300) expected_message_record = { 'project_id': 'fakeproject', 'request_id': 'fakerequestid', 'resource_type': message_field.Resource.VOLUME, 'resource_uuid': None, 'action_id': message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0], 'detail_id': message_field.Detail.UNKNOWN_ERROR[0], 'message_level': 'ERROR', 'expires_at': expected_expires_at, 'event_id': "VOLUME_VOLUME_001_001", } self.message_api.create( self.ctxt, action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME) self.message_api.db.message_create.assert_called_once_with( self.ctxt, expected_message_record) mock_utcnow.assert_called_with() @mock.patch('oslo_utils.timeutils.utcnow') def test_create_with_no_detail(self, mock_utcnow): # Should get Detail.UNKNOWN_ERROR CONF.set_override('message_ttl', 300) mock_utcnow.return_value = datetime.datetime.utcnow() expected_expires_at = timeutils.utcnow() + datetime.timedelta( seconds=300) expected_message_record = { 'project_id': 'fakeproject', 'request_id': 'fakerequestid', 'resource_type': 'fake_resource_type', 'resource_uuid': None, 'action_id': message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0], 'detail_id': message_field.Detail.UNKNOWN_ERROR[0], 'message_level': 'ERROR', 'expires_at': expected_expires_at, 'event_id': "VOLUME_fake_resource_type_001_001", } self.message_api.create( self.ctxt, action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME, resource_type="fake_resource_type") self.message_api.db.message_create.assert_called_once_with( self.ctxt, expected_message_record) mock_utcnow.assert_called_with() @mock.patch('oslo_utils.timeutils.utcnow') def test_create_with_detail_only(self, mock_utcnow): CONF.set_override('message_ttl', 300) mock_utcnow.return_value = datetime.datetime.utcnow() expected_expires_at = timeutils.utcnow() + datetime.timedelta( seconds=300) expected_message_record = { 'project_id': 'fakeproject', 'request_id': 'fakerequestid', 'resource_type': 'fake_resource_type', 'resource_uuid': None, 'action_id': message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0], # this doesn't make sense for this Action, but that's the point 'detail_id': message_field.Detail.FAILED_TO_UPLOAD_VOLUME[0], 'message_level': 'ERROR', 'expires_at': expected_expires_at, 'event_id': "VOLUME_fake_resource_type_001_004", } self.message_api.create( self.ctxt, action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME, detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME, resource_type="fake_resource_type") self.message_api.db.message_create.assert_called_once_with( self.ctxt, expected_message_record) mock_utcnow.assert_called_with() @mock.patch('oslo_utils.timeutils.utcnow') def test_create_passed_exception_no_detail(self, mock_utcnow): # Detail should be automatically supplied based on the # message_field.Detail.EXCEPTION_DETAIL_MAPPINGS CONF.set_override('message_ttl', 300) mock_utcnow.return_value = datetime.datetime.utcnow() expected_expires_at = timeutils.utcnow() + datetime.timedelta( seconds=300) expected_message_record = { 'project_id': 'fakeproject', 'request_id': 'fakerequestid', 'resource_type': 'fake_resource_type', 'resource_uuid': None, 'action_id': message_field.Action.SCHEDULE_ALLOCATE_VOLUME[0], # this is determined by the exception we'll be passing 'detail_id': message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE[0], 'message_level': 'ERROR', 'expires_at': expected_expires_at, 'event_id': "VOLUME_fake_resource_type_001_007", } exc = exception.ImageTooBig(image_id='fake_image', reason='MYOB') self.message_api.create( self.ctxt, action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME, exception=exc, resource_type="fake_resource_type") self.message_api.db.message_create.assert_called_once_with( self.ctxt, expected_message_record) mock_utcnow.assert_called_with() @mock.patch('oslo_utils.timeutils.utcnow') def test_create_passed_unmapped_exception_no_detail(self, mock_utcnow): CONF.set_override('message_ttl', 300) mock_utcnow.return_value = datetime.datetime.utcnow() expected_expires_at = timeutils.utcnow() + datetime.timedelta( seconds=300) expected_message_record = { 'project_id': 'fakeproject', 'request_id': 'fakerequestid', 'resource_type': 'fake_resource_type', 'resource_uuid': None, 'action_id': message_field.Action.COPY_IMAGE_TO_VOLUME[0], 'detail_id': message_field.Detail.UNKNOWN_ERROR[0], 'message_level': 'ERROR', 'expires_at': expected_expires_at, 'event_id': "VOLUME_fake_resource_type_005_001", } exc = exception.ImageUnacceptable(image_id='fake_image', reason='MYOB') self.message_api.create( self.ctxt, action=message_field.Action.COPY_IMAGE_TO_VOLUME, exception=exc, resource_type="fake_resource_type") self.message_api.db.message_create.assert_called_once_with( self.ctxt, expected_message_record) mock_utcnow.assert_called_with() @mock.patch('oslo_utils.timeutils.utcnow') def test_create_passed_mapped_exception_and_detail(self, mock_utcnow): # passed Detail should be ignored because this is a mapped exception CONF.set_override('message_ttl', 300) mock_utcnow.return_value = datetime.datetime.utcnow() expected_expires_at = timeutils.utcnow() + datetime.timedelta( seconds=300) expected_message_record = { 'project_id': 'fakeproject', 'request_id': 'fakerequestid', 'resource_type': 'fake_resource_type', 'resource_uuid': None, 'action_id': message_field.Action.UPDATE_ATTACHMENT[0], 'detail_id': message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE[0], 'message_level': 'ERROR', 'expires_at': expected_expires_at, 'event_id': "VOLUME_fake_resource_type_004_007", } exc = exception.ImageTooBig(image_id='fake_image', reason='MYOB') self.message_api.create( self.ctxt, action=message_field.Action.UPDATE_ATTACHMENT, detail=message_field.Detail.VOLUME_ATTACH_MODE_INVALID, exception=exc, resource_type="fake_resource_type") self.message_api.db.message_create.assert_called_once_with( self.ctxt, expected_message_record) mock_utcnow.assert_called_with() @mock.patch('oslo_utils.timeutils.utcnow') def test_create_passed_unmapped_exception_and_detail(self, mock_utcnow): # passed Detail should be honored CONF.set_override('message_ttl', 300) mock_utcnow.return_value = datetime.datetime.utcnow() expected_expires_at = timeutils.utcnow() + datetime.timedelta( seconds=300) expected_message_record = { 'project_id': 'fakeproject', 'request_id': 'fakerequestid', 'resource_type': 'fake_resource_type', 'resource_uuid': None, 'action_id': message_field.Action.UPDATE_ATTACHMENT[0], 'detail_id': message_field.Detail.VOLUME_ATTACH_MODE_INVALID[0], 'message_level': 'ERROR', 'expires_at': expected_expires_at, 'event_id': "VOLUME_fake_resource_type_004_005", } exc = ValueError('bogus error') self.message_api.create( self.ctxt, action=message_field.Action.UPDATE_ATTACHMENT, detail=message_field.Detail.VOLUME_ATTACH_MODE_INVALID, exception=exc, resource_type="fake_resource_type") self.message_api.db.message_create.assert_called_once_with( self.ctxt, expected_message_record) mock_utcnow.assert_called_with() def test_create_swallows_exception(self): self.mock_object(self.message_api.db, 'create', side_effect=Exception()) self.message_api.create(self.ctxt, message_field.Action.ATTACH_VOLUME, "fake_resource") self.message_api.db.message_create.assert_called_once_with( self.ctxt, mock.ANY) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_from_request_context(self, mock_utcnow): CONF.set_override('message_ttl', 300) mock_utcnow.return_value = datetime.datetime.utcnow() expected_expires_at = timeutils.utcnow() + datetime.timedelta( seconds=300) self.ctxt.message_resource_id = 'fake-uuid' self.ctxt.message_resource_type = 'fake_resource_type' self.ctxt.message_action = message_field.Action.BACKUP_CREATE expected_message_record = { 'project_id': 'fakeproject', 'request_id': 'fakerequestid', 'resource_type': 'fake_resource_type', 'resource_uuid': 'fake-uuid', 'action_id': message_field.Action.BACKUP_CREATE[0], 'detail_id': message_field.Detail.BACKUP_INVALID_STATE[0], 'message_level': 'ERROR', 'expires_at': expected_expires_at, 'event_id': "VOLUME_fake_resource_type_013_017", } self.message_api.create_from_request_context( self.ctxt, detail=message_field.Detail.BACKUP_INVALID_STATE) self.message_api.db.message_create.assert_called_once_with( self.ctxt, expected_message_record) mock_utcnow.assert_called_with() def test_get(self): self.message_api.get(self.ctxt, 'fake_id') self.message_api.db.message_get.assert_called_once_with(self.ctxt, 'fake_id') def test_get_all(self): self.message_api.get_all(self.ctxt) self.message_api.db.message_get_all.assert_called_once_with( self.ctxt, filters={}, limit=None, marker=None, offset=None, sort_dirs=None, sort_keys=None) def test_delete(self): admin_context = mock.Mock() self.mock_object(self.ctxt, 'elevated', return_value=admin_context) self.message_api.delete(self.ctxt, 'fake_id') self.message_api.db.message_destroy.assert_called_once_with( admin_context, 'fake_id') def test_cleanup_expired_messages(self): admin_context = mock.Mock() self.mock_object(self.ctxt, 'elevated', return_value=admin_context) self.message_api.cleanup_expired_messages(self.ctxt) self.message_api.db.cleanup_expired_messages.assert_called_once_with( admin_context) def create_message_for_tests(self): """Create messages to test pagination functionality""" utils.create_message( self.ctxt, action=message_field.Action.ATTACH_VOLUME) utils.create_message( self.ctxt, action=message_field.Action.SCHEDULE_ALLOCATE_VOLUME) utils.create_message( self.ctxt, action=message_field.Action.COPY_VOLUME_TO_IMAGE) utils.create_message( self.ctxt, action=message_field.Action.COPY_VOLUME_TO_IMAGE) def test_get_all_messages_with_limit(self): self.create_message_for_tests() url = '/v3/messages?limit=1' req = fakes.HTTPRequest.blank(url) req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION) req.api_version_request = mv.get_api_version(mv.RESOURCE_FILTER) req.environ['cinder.context'].is_admin = True res = self.controller.index(req) self.assertEqual(1, len(res['messages'])) url = '/v3/messages?limit=3' req = fakes.HTTPRequest.blank(url) req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION) req.api_version_request = mv.get_api_version(mv.RESOURCE_FILTER) req.environ['cinder.context'].is_admin = True res = self.controller.index(req) self.assertEqual(3, len(res['messages'])) def test_get_all_messages_with_limit_wrong_version(self): self.create_message_for_tests() PRE_MESSAGES_PAGINATION = mv.get_prior_version(mv.MESSAGES_PAGINATION) url = '/v3/messages?limit=1' req = fakes.HTTPRequest.blank(url) req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(PRE_MESSAGES_PAGINATION) req.api_version_request = mv.get_api_version(PRE_MESSAGES_PAGINATION) req.environ['cinder.context'].is_admin = True res = self.controller.index(req) self.assertEqual(4, len(res['messages'])) def test_get_all_messages_with_offset(self): self.create_message_for_tests() url = '/v3/messages?offset=1' req = fakes.HTTPRequest.blank(url) req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION) req.api_version_request = mv.get_api_version(mv.MESSAGES_PAGINATION) req.environ['cinder.context'].is_admin = True res = self.controller.index(req) self.assertEqual(3, len(res['messages'])) def test_get_all_messages_with_limit_and_offset(self): self.create_message_for_tests() url = '/v3/messages?limit=2&offset=1' req = fakes.HTTPRequest.blank(url) req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION) req.api_version_request = mv.get_api_version(mv.MESSAGES_PAGINATION) req.environ['cinder.context'].is_admin = True res = self.controller.index(req) self.assertEqual(2, len(res['messages'])) def test_get_all_messages_with_filter(self): self.create_message_for_tests() url = '/v3/messages?action_id=%s' % ( message_field.Action.ATTACH_VOLUME[0]) req = fakes.HTTPRequest.blank(url) req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION) req.api_version_request = mv.get_api_version(mv.MESSAGES_PAGINATION) req.environ['cinder.context'].is_admin = True res = self.controller.index(req) self.assertEqual(1, len(res['messages'])) def test_get_all_messages_with_sort(self): self.create_message_for_tests() url = '/v3/messages?sort=event_id:asc' req = fakes.HTTPRequest.blank(url) req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION) req.api_version_request = mv.get_api_version(mv.MESSAGES_PAGINATION) req.environ['cinder.context'].is_admin = True res = self.controller.index(req) expect_result = [ "VOLUME_VOLUME_001_002", "VOLUME_VOLUME_002_002", "VOLUME_VOLUME_003_002", "VOLUME_VOLUME_003_002", ] expect_result.sort() self.assertEqual(4, len(res['messages'])) self.assertEqual(expect_result[0], res['messages'][0]['event_id']) self.assertEqual(expect_result[1], res['messages'][1]['event_id']) self.assertEqual(expect_result[2], res['messages'][2]['event_id']) self.assertEqual(expect_result[3], res['messages'][3]['event_id']) def test_get_all_messages_paging(self): self.create_message_for_tests() # first request of this test url = '/v3/%s/messages?limit=2' % fake_constants.PROJECT_ID req = fakes.HTTPRequest.blank(url) req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION) req.api_version_request = mv.get_api_version(mv.RESOURCE_FILTER) req.environ['cinder.context'].is_admin = True res = self.controller.index(req) self.assertEqual(2, len(res['messages'])) next_link = ('http://localhost/v3/%s/messages?limit=' '2&marker=%s') % (fake_constants.PROJECT_ID, res['messages'][1]['id']) self.assertEqual(next_link, res['messages_links'][0]['href']) # Second request in this test # Test for second page using marker (res['messages][0]['id']) # values fetched in first request with limit 2 in this test url = '/v3/%s/messages?limit=1&marker=%s' % ( fake_constants.PROJECT_ID, res['messages'][0]['id']) req = fakes.HTTPRequest.blank(url) req.method = 'GET' req.content_type = 'application/json' req.headers = mv.get_mv_header(mv.MESSAGES_PAGINATION) req.api_version_request = api_version.max_api_version() req.environ['cinder.context'].is_admin = True result = self.controller.index(req) self.assertEqual(1, len(result['messages'])) # checking second message of first request in this test with first # message of second request. (to test paging mechanism) self.assertEqual(res['messages'][1], result['messages'][0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/message/test_defined_messages.py0000664000175000017500000000333100000000000024776 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.message import defined_messages from cinder.tests.unit import test class DefinedMessagesTest(test.TestCase): def test_event_id_formats(self): """Assert all cinder event ids start with VOLUME_.""" for attr_name in dir(defined_messages.EventIds): if not attr_name.startswith('_'): value = getattr(defined_messages.EventIds, attr_name) self.assertTrue(value.startswith('VOLUME_')) def test_unique_event_ids(self): """Assert that no event_id is duplicated.""" event_ids = [] for attr_name in dir(defined_messages.EventIds): if not attr_name.startswith('_'): value = getattr(defined_messages.EventIds, attr_name) event_ids.append(value) self.assertEqual(len(event_ids), len(set(event_ids))) def test_event_id_has_message(self): for attr_name in dir(defined_messages.EventIds): if not attr_name.startswith('_'): value = getattr(defined_messages.EventIds, attr_name) msg = defined_messages.event_id_message_map.get(value) self.assertGreater(len(msg), 1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/message/test_message_field.py0000664000175000017500000001044100000000000024300 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from itertools import chain import ddt from cinder import exception from cinder.message import message_field from cinder.tests.unit import test @ddt.ddt class MessageFieldTest(test.TestCase): def test_unique_action_ids(self): """Assert that no action_id is duplicated.""" action_ids = [x[0] for x in message_field.Action.ALL] self.assertEqual(len(action_ids), len(set(action_ids))) def test_all_action_fields_in_ALL(self): """Assert that all and only defined fields are in the ALL tuple""" defined_fields = [k for k in message_field.Action.__dict__.keys() if k != 'ALL' and not k.startswith('__')] for d in defined_fields: self.assertIn(getattr(message_field.Action, d), message_field.Action.ALL) self.assertEqual(len(message_field.Action.ALL), len(defined_fields)) def test_unique_detail_ids(self): """Assert that no detail_id is duplicated.""" detail_ids = [x[0] for x in message_field.Detail.ALL] self.assertEqual(len(detail_ids), len(set(detail_ids))) def test_all_detail_fields_in_ALL(self): """Assert that all and only defined fields are in the ALL tuple""" defined_fields = [k for k in message_field.Detail.__dict__.keys() if k != 'ALL' and not k.startswith('__') and k != 'EXCEPTION_DETAIL_MAPPINGS'] for d in defined_fields: self.assertIn(getattr(message_field.Detail, d), message_field.Detail.ALL) self.assertEqual(len(message_field.Detail.ALL), len(defined_fields)) known_exceptions = [ name for name, _ in inspect.getmembers(exception, inspect.isclass)] mapped_exceptions = list(chain.from_iterable( message_field.Detail.EXCEPTION_DETAIL_MAPPINGS.values())) @ddt.idata(mapped_exceptions) def test_exception_detail_map_no_unknown_exceptions(self, exc): """Assert that only known exceptions are in the map.""" self.assertIn(exc, self.known_exceptions) @ddt.ddt class MessageFieldFunctionsTest(test.TestCase): @ddt.data({'id': '001', 'content': 'schedule allocate volume'}, {'id': '002', 'content': 'attach volume'}, {'id': 'invalid', 'content': None}) @ddt.unpack def test_translate_action(self, id, content): result = message_field.translate_action(id) if content is None: content = 'unknown action' self.assertEqual(content, result) @ddt.data({'id': '001', 'content': 'An unknown error occurred.'}, {'id': '002', 'content': 'Driver is not initialized at present.'}, {'id': 'invalid', 'content': None}) @ddt.unpack def test_translate_detail(self, id, content): result = message_field.translate_detail(id) if content is None: content = 'An unknown error occurred.' self.assertEqual(content, result) @ddt.data({'exception': exception.DriverNotInitialized(), 'detail': '', 'expected': '002'}, {'exception': exception.CinderException(), 'detail': '', 'expected': '001'}, {'exception': exception.CinderException(), 'detail': message_field.Detail.QUOTA_EXCEED, 'expected': '007'}, {'exception': '', 'detail': message_field.Detail.QUOTA_EXCEED, 'expected': '007'}) @ddt.unpack def translate_detail_id(self, exception, detail, expected): result = message_field.translate_detail_id(exception, detail) self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2071195 cinder-27.0.0/cinder/tests/unit/monkey_patch_example/0000775000175000017500000000000000000000000022650 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/monkey_patch_example/__init__.py0000664000175000017500000000213000000000000024755 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module for testing utils.monkey_patch().""" CALLED_FUNCTION = [] def example_decorator(name, function): """decorator for notify which is used from utils.monkey_patch(). :param name: name of the function :param function: - object of the function :returns: function -- decorated function """ def wrapped_func(*args, **kwarg): CALLED_FUNCTION.append(name) return function(*args, **kwarg) return wrapped_func ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/monkey_patch_example/example_a.py0000664000175000017500000000162500000000000025161 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module A for testing utils.monkey_patch().""" def example_function_a(): return 'Example function' class ExampleClassA(object): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/monkey_patch_example/example_b.py0000664000175000017500000000162600000000000025163 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Example Module B for testing utils.monkey_patch().""" def example_function_b(): return 'Example function' class ExampleClassB(object): def example_method(self): return 'Example method' def example_method_add(self, arg1, arg2): return arg1 + arg2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2151196 cinder-27.0.0/cinder/tests/unit/objects/0000775000175000017500000000000000000000000020105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/__init__.py0000664000175000017500000000442200000000000022220 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils from cinder import context from cinder import exception from cinder.objects import base as obj_base from cinder.tests.unit import test class BaseObjectsTestCase(test.TestCase): def setUp(self, *args, **kwargs): super(BaseObjectsTestCase, self).setUp(*args, **kwargs) self.user_id = 'fake-user' self.project_id = 'fake-project' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=False) # We only test local right now. # TODO(mriedem): Testing remote would be nice... self.assertIsNone(obj_base.CinderObject.indirection_api) # TODO(mriedem): Replace this with # oslo_versionedobjects.fixture.compare_obj when that is in a released # version of o.vo. @staticmethod def _compare(test, db, obj): for field, value in db.items(): try: getattr(obj, field) except (AttributeError, exception.CinderException, NotImplementedError): # NotImplementedError: ignore "Cannot load 'projects' in the # base class" error continue obj_field = getattr(obj, field) if field in ('modified_at', 'created_at', 'updated_at', 'deleted_at', 'last_heartbeat') and db[field]: test.assertEqual(db[field], timeutils.normalize_time(obj_field)) elif isinstance(obj_field, obj_base.ObjectListBase): test.assertEqual(db[field], obj_field.objects) else: test.assertEqual(db[field], obj_field) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_backup.py0000664000175000017500000004515100000000000022771 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from zoneinfo import ZoneInfo from oslo_utils import timeutils from cinder.db.sqlalchemy import models from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects from cinder.tests.unit import utils fake_backup = { 'id': fake.BACKUP_ID, 'volume_id': fake.VOLUME_ID, 'status': fields.BackupStatus.CREATING, 'size': 1, 'display_name': 'fake_name', 'display_description': 'fake_description', 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'temp_volume_id': None, 'temp_snapshot_id': None, 'snapshot_id': None, 'data_timestamp': None, 'restore_volume_id': None, 'backup_metadata': {}, } vol_props = {'status': 'available', 'size': 1} fake_vol = fake_volume.fake_db_volume(**vol_props) snap_props = {'status': fields.BackupStatus.AVAILABLE, 'volume_id': fake_vol['id'], 'expected_attrs': ['metadata']} fake_snap = fake_snapshot.fake_db_snapshot(**snap_props) class TestBackup(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.get_by_id', return_value=fake_backup) def test_get_by_id(self, backup_get): backup = objects.Backup.get_by_id(self.context, fake.USER_ID) self._compare(self, fake_backup, backup) backup_get.assert_called_once_with(self.context, models.Backup, fake.USER_ID) @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_get_by_id_no_existing_id(self, model_query): query = mock.Mock() filter_by = mock.Mock() query_options = mock.Mock() filter_by.first.return_value = None query_options.filter_by.return_value = filter_by query.options.return_value = query_options model_query.return_value = query self.assertRaises(exception.BackupNotFound, objects.Backup.get_by_id, self.context, 123) @mock.patch('cinder.db.backup_create', return_value=fake_backup) def test_create(self, backup_create): backup = objects.Backup(context=self.context) backup.create() self.assertEqual(fake_backup['id'], backup.id) self.assertEqual(fake_backup['volume_id'], backup.volume_id) @mock.patch('cinder.db.backup_update') def test_save(self, backup_update): backup = objects.Backup._from_db_object( self.context, objects.Backup(), fake_backup) backup.display_name = 'foobar' backup.save() backup_update.assert_called_once_with(self.context, backup.id, {'display_name': 'foobar'}) @mock.patch('cinder.db.backup_metadata_update', return_value={'key1': 'value1'}) @mock.patch('cinder.db.backup_update') def test_save_with_metadata(self, backup_update, metadata_update): backup = objects.Backup._from_db_object( self.context, objects.Backup(), fake_backup) backup.metadata = {'key1': 'value1'} self.assertEqual({'metadata': {'key1': 'value1'}}, backup.obj_get_changes()) backup.save() metadata_update.assert_called_once_with(self.context, backup.id, {'key1': 'value1'}, True) @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) @mock.patch('cinder.db.sqlalchemy.api.backup_destroy') def test_destroy(self, backup_destroy, utcnow_mock): backup_destroy.return_value = { 'status': fields.BackupStatus.DELETED, 'deleted': True, 'deleted_at': utcnow_mock.return_value} backup = objects.Backup(context=self.context, id=fake.BACKUP_ID) backup.destroy() self.assertTrue(backup_destroy.called) admin_context = backup_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) self.assertTrue(backup.deleted) self.assertEqual(fields.BackupStatus.DELETED, backup.status) self.assertEqual( utcnow_mock.return_value.replace(tzinfo=ZoneInfo('UTC')), backup.deleted_at) def test_obj_field_temp_volume_snapshot_id(self): backup = objects.Backup(context=self.context, temp_volume_id='2', temp_snapshot_id='3') self.assertEqual('2', backup.temp_volume_id) self.assertEqual('3', backup.temp_snapshot_id) def test_obj_field_snapshot_id(self): backup = objects.Backup(context=self.context, snapshot_id='2') self.assertEqual('2', backup.snapshot_id) def test_obj_field_restore_volume_id(self): backup = objects.Backup(context=self.context, restore_volume_id='2') self.assertEqual('2', backup.restore_volume_id) def test_obj_field_metadata(self): backup = objects.Backup(context=self.context, metadata={'test_key': 'test_value'}) self.assertEqual({'test_key': 'test_value'}, backup.metadata) @mock.patch('cinder.objects.backup.Backup.get_by_id', return_value=None) def test_obj_field_parent(self, mock_lzy_ld): backup = objects.Backup(context=self.context, parent_id=None) self.assertIsNone(backup.parent) # Bug #1862635: should trigger a lazy load backup = objects.Backup(context=self.context, parent_id=fake.UUID5) _ = backup.parent mock_lzy_ld.assert_called_once() def test_import_record(self): utils.replace_obj_loader(self, objects.Backup) backup = objects.Backup(context=self.context, id=fake.BACKUP_ID, parent_id=None, num_dependent_backups=0) export_string = backup.encode_record() imported_backup = objects.Backup.decode_record(export_string) # Make sure we don't lose data when converting from string self.assertDictEqual(self._expected_backup(backup), imported_backup) @mock.patch('cinder.db.get_by_id', return_value=fake_backup) def test_import_record_w_parent(self, backup_get): full_backup = objects.Backup.get_by_id(self.context, fake.USER_ID) self._compare(self, fake_backup, full_backup) utils.replace_obj_loader(self, objects.Backup) incr_backup = objects.Backup(context=self.context, id=fake.BACKUP2_ID, parent=full_backup, parent_id=full_backup['id'], num_dependent_backups=0) export_string = incr_backup.encode_record() imported_backup = objects.Backup.decode_record(export_string) # Make sure we don't lose data when converting from string self.assertDictEqual(self._expected_backup(incr_backup), imported_backup) def test_import_record_additional_info(self): utils.replace_obj_loader(self, objects.Backup) backup = objects.Backup(context=self.context, id=fake.BACKUP_ID, parent_id=None, num_dependent_backups=0) extra_info = {'driver': {'key1': 'value1', 'key2': 'value2'}} extra_info_copy = extra_info.copy() export_string = backup.encode_record(extra_info=extra_info) imported_backup = objects.Backup.decode_record(export_string) # Dictionary passed should not be modified self.assertDictEqual(extra_info_copy, extra_info) # Make sure we don't lose data when converting from string and that # extra info is still there expected = self._expected_backup(backup) expected['extra_info'] = extra_info self.assertDictEqual(expected, imported_backup) def _expected_backup(self, backup): record = {name: field.to_primitive(backup, name, getattr(backup, name)) for name, field in backup.fields.items() if name != 'parent'} return record def test_import_record_additional_info_cant_overwrite(self): utils.replace_obj_loader(self, objects.Backup) backup = objects.Backup(context=self.context, id=fake.BACKUP_ID, parent_id=None, num_dependent_backups=0) export_string = backup.encode_record(id='fake_id') imported_backup = objects.Backup.decode_record(export_string) # Make sure the extra_info can't overwrite basic data self.assertDictEqual(self._expected_backup(backup), imported_backup) def test_import_record_decoding_error(self): export_string = '123456' self.assertRaises(exception.InvalidInput, objects.Backup.decode_record, export_string) def test_import_record_parsing_error(self): export_string = '' self.assertRaises(exception.InvalidInput, objects.Backup.decode_record, export_string) @mock.patch('cinder.db.sqlalchemy.api.backup_get') def test_refresh(self, backup_get): db_backup1 = fake_backup.copy() db_backup2 = db_backup1.copy() db_backup2['display_name'] = 'foobar' # On the second backup_get, return the backup with an updated # display_name backup_get.side_effect = [db_backup1, db_backup2] backup = objects.Backup.get_by_id(self.context, fake.BACKUP_ID) self._compare(self, db_backup1, backup) # display_name was updated, so a backup refresh should have a new value # for that field backup.refresh() self._compare(self, db_backup2, backup) backup_get.assert_has_calls([mock.call(self.context, fake.BACKUP_ID), mock.call.__bool__(), mock.call(self.context, fake.BACKUP_ID)]) class TestBackupList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.backup_get_all', return_value=[fake_backup]) def test_get_all(self, backup_get_all): backups = objects.BackupList.get_all(self.context) self.assertEqual(1, len(backups)) TestBackup._compare(self, fake_backup, backups[0]) @mock.patch('cinder.db.backup_get_all_by_project', return_value=[fake_backup]) def test_get_all_by_project(self, get_all_by_project): backups = objects.BackupList.get_all_by_project( self.context, self.project_id) self.assertEqual(1, len(backups)) TestBackup._compare(self, fake_backup, backups[0]) @mock.patch('cinder.db.backup_get_all_by_host', return_value=[fake_backup]) def test_get_all_by_host(self, get_all_by_host): backups = objects.BackupList.get_all_by_host(self.context, "fake_host") self.assertEqual(1, len(backups)) TestBackup._compare(self, fake_backup, backups[0]) @mock.patch('cinder.db.backup_get_all', return_value=[fake_backup]) def test_get_all_tenants(self, backup_get_all): search_opts = {'all_tenants': 1} backups = objects.BackupList.get_all(self.context, search_opts) self.assertEqual(1, len(backups)) TestBackup._compare(self, fake_backup, backups[0]) @mock.patch('cinder.db.backup_get_all_by_volume', return_value=[fake_backup]) def test_get_all_by_volume(self, get_all_by_volume): backups = objects.BackupList.get_all_by_volume( self.context, fake.VOLUME_ID, 'fake_proj') self.assertEqual(1, len(backups)) get_all_by_volume.assert_called_once_with(self.context, fake.VOLUME_ID, 'fake_proj', None) TestBackup._compare(self, fake_backup, backups[0]) class BackupDeviceInfoTestCase(test_objects.BaseObjectsTestCase): def setUp(self): super(BackupDeviceInfoTestCase, self).setUp() self.vol_obj = fake_volume.fake_volume_obj(self.context, **vol_props) self.snap_obj = fake_snapshot.fake_snapshot_obj(self.context, **snap_props) self.backup_device_dict = {'secure_enabled': False, 'is_snapshot': False, } @mock.patch('cinder.db.volume_get', return_value=fake_vol) def test_from_primitive_with_volume(self, mock_fake_vol): vol_obj = self.vol_obj self.backup_device_dict['backup_device'] = vol_obj backup_device_info = objects.BackupDeviceInfo.from_primitive( self.backup_device_dict, self.context) self.assertFalse(backup_device_info.is_snapshot) self.assertEqual(self.backup_device_dict['secure_enabled'], backup_device_info.secure_enabled) self.assertEqual(vol_obj, backup_device_info.volume) self.backup_device_dict['backup_device'] = fake_vol backup_device_info = objects.BackupDeviceInfo.from_primitive( self.backup_device_dict, self.context) vol_obj_from_db = objects.Volume._from_db_object(self.context, objects.Volume(), fake_vol) self.assertEqual(vol_obj_from_db, backup_device_info.volume) @mock.patch('cinder.db.snapshot_get', return_value=fake_snap) def test_from_primitive_with_snapshot(self, mock_fake_snap): snap_obj = self.snap_obj self.backup_device_dict['is_snapshot'] = True self.backup_device_dict['backup_device'] = snap_obj backup_device_info = objects.BackupDeviceInfo.from_primitive( self.backup_device_dict, self.context, expected_attrs=['metadata']) self.assertTrue(backup_device_info.is_snapshot) self.assertEqual(self.backup_device_dict['secure_enabled'], backup_device_info.secure_enabled) self.assertEqual(snap_obj, backup_device_info.snapshot) self.backup_device_dict['backup_device'] = fake_snap backup_device_info = objects.BackupDeviceInfo.from_primitive( self.backup_device_dict, self.context, expected_attrs=['metadata']) self.assertEqual(snap_obj, backup_device_info.snapshot) @mock.patch('cinder.db.volume_get', return_value=fake_vol) def test_to_primitive_with_volume(self, mock_fake_vol): vol_obj = self.vol_obj self.backup_device_dict['backup_device'] = fake_vol backup_device_info = objects.BackupDeviceInfo() backup_device_info.volume = vol_obj backup_device_info.secure_enabled = ( self.backup_device_dict['secure_enabled']) backup_device_ret_dict = backup_device_info.to_primitive(self.context) self.assertEqual(self.backup_device_dict['secure_enabled'], backup_device_ret_dict['secure_enabled']) self.assertFalse(backup_device_ret_dict['is_snapshot']) self.assertEqual(self.backup_device_dict['backup_device'], backup_device_ret_dict['backup_device']) @mock.patch('cinder.db.snapshot_get', return_value=fake_snap) def test_to_primitive_with_snapshot(self, mock_fake_snap): snap_obj = self.snap_obj backup_device_info = objects.BackupDeviceInfo() backup_device_info.snapshot = snap_obj backup_device_info.secure_enabled = ( self.backup_device_dict['secure_enabled']) backup_device_ret_dict = backup_device_info.to_primitive(self.context) self.assertEqual(self.backup_device_dict['secure_enabled'], backup_device_ret_dict['secure_enabled']) self.assertTrue(backup_device_ret_dict['is_snapshot']) # NOTE(sborkows): since volume in sqlalchemy snapshot is a sqlalchemy # object too, to compare snapshots we need to convert their volumes to # dicts. snap_actual_dict = fake_snap snap_ref_dict = backup_device_ret_dict['backup_device'] snap_actual_dict['volume'] = self.vol_obj.obj_to_primitive() snap_ref_dict['volume'] = snap_ref_dict['volume'] self.assertEqual(snap_actual_dict, snap_ref_dict) def test_is_snapshot_both_volume_and_snapshot_raises_error(self): snap = self.snap_obj vol = self.vol_obj backup_device_info = objects.BackupDeviceInfo() backup_device_info.snapshot = snap backup_device_info.volume = vol backup_device_info.secure_enabled = ( self.backup_device_dict['secure_enabled']) self.assertRaises(exception.ProgrammingError, getattr, backup_device_info, 'is_snapshot') def test_is_snapshot_neither_volume_nor_snapshot_raises_error(self): backup_device_info = objects.BackupDeviceInfo() backup_device_info.secure_enabled = ( self.backup_device_dict['secure_enabled']) self.assertRaises(exception.ProgrammingError, getattr, backup_device_info, 'is_snapshot') def test_device_obj_with_volume(self): vol = self.vol_obj backup_device_info = objects.BackupDeviceInfo() backup_device_info.volume = vol backup_device_info.secure_enabled = ( self.backup_device_dict['secure_enabled']) backup_device_obj = backup_device_info.device_obj self.assertIsInstance(backup_device_obj, objects.Volume) self.assertEqual(vol, backup_device_obj) def test_device_obj_with_snapshot(self): snap = self.snap_obj backup_device_info = objects.BackupDeviceInfo() backup_device_info.snapshot = snap backup_device_info.secure_enabled = ( self.backup_device_dict['secure_enabled']) backup_device_obj = backup_device_info.device_obj self.assertIsInstance(backup_device_obj, objects.Snapshot) self.assertEqual(snap, backup_device_obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_base.py0000664000175000017500000011557700000000000022450 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import uuid import ddt from iso8601 import iso8601 from oslo_versionedobjects import fields from sqlalchemy import sql from cinder import context from cinder import db from cinder.db.sqlalchemy import models from cinder import exception from cinder import objects from cinder.objects import fields as c_fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_objects from cinder.tests.unit import objects as test_objects from cinder.tests.unit import test class TestCinderObjectVersionHistory(test_objects.BaseObjectsTestCase): def test_add(self): history = test_objects.obj_base.CinderObjectVersionsHistory() first_version = history.versions[0] v10 = {'Backup': '2.0'} v11 = {'Backup': '2.1'} history.add('1.0', v10) history.add('1.1', v11) # We have 3 elements because we have the liberty version by default self.assertEqual(2 + 1, len(history)) expected_v10 = history[first_version].copy() expected_v10.update(v10) expected_v11 = history[first_version].copy() expected_v11.update(v11) self.assertEqual('1.1', history.get_current()) self.assertEqual(expected_v11, history.get_current_versions()) self.assertEqual(expected_v10, history['1.0']) def test_add_existing(self): history = test_objects.obj_base.CinderObjectVersionsHistory() history.add('1.0', {'Backup': '1.0'}) self.assertRaises(exception.ProgrammingError, history.add, '1.0', {'Backup': '1.0'}) @ddt.ddt class TestCinderObject(test_objects.BaseObjectsTestCase): """Tests methods from CinderObject.""" def setUp(self): super(TestCinderObject, self).setUp() self.obj = fake_objects.ChildObject( scheduled_at=None, uuid=uuid.uuid4(), text='text') self.obj.obj_reset_changes() def test_cinder_obj_get_changes_no_changes(self): self.assertDictEqual({}, self.obj.cinder_obj_get_changes()) def test_cinder_obj_get_changes_other_changes(self): self.obj.text = 'text2' self.assertDictEqual({'text': 'text2'}, self.obj.cinder_obj_get_changes()) def test_cinder_obj_get_changes_datetime_no_tz(self): now = datetime.datetime.utcnow() self.obj.scheduled_at = now self.assertDictEqual({'scheduled_at': now}, self.obj.cinder_obj_get_changes()) def test_cinder_obj_get_changes_datetime_tz_utc(self): now_tz = iso8601.parse_date('2015-06-26T22:00:01Z') now = now_tz.replace(tzinfo=None) self.obj.scheduled_at = now_tz self.assertDictEqual({'scheduled_at': now}, self.obj.cinder_obj_get_changes()) def test_cinder_obj_get_changes_datetime_tz_non_utc_positive(self): now_tz = iso8601.parse_date('2015-06-26T22:00:01+01') now = now_tz.replace(tzinfo=None) - datetime.timedelta(hours=1) self.obj.scheduled_at = now_tz self.assertDictEqual({'scheduled_at': now}, self.obj.cinder_obj_get_changes()) def test_cinder_obj_get_changes_datetime_tz_non_utc_negative(self): now_tz = iso8601.parse_date('2015-06-26T10:00:01-05') now = now_tz.replace(tzinfo=None) + datetime.timedelta(hours=5) self.obj.scheduled_at = now_tz self.assertDictEqual({'scheduled_at': now}, self.obj.cinder_obj_get_changes()) @mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id') def test_refresh(self, get_by_id): @objects.base.CinderObjectRegistry.register_if(False) class MyTestObject(objects.base.CinderObject, objects.base.CinderObjectDictCompat, objects.base.CinderComparableObject, objects.base.CinderPersistentObject): fields = {'id': fields.UUIDField(), 'name': fields.StringField()} test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo') refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar') get_by_id.return_value = refresh_obj test_obj.refresh() self._compare(self, refresh_obj, test_obj) @mock.patch('cinder.objects.base.CinderPersistentObject.get_by_id') def test_refresh_readonly(self, get_by_id_mock): @objects.base.CinderObjectRegistry.register_if(False) class MyTestObject(objects.base.CinderObject, objects.base.CinderObjectDictCompat, objects.base.CinderComparableObject, objects.base.CinderPersistentObject): fields = {'id': fields.UUIDField(), 'name': fields.StringField(read_only=True)} test_obj = MyTestObject(id=fake.OBJECT_ID, name='foo') refresh_obj = MyTestObject(id=fake.OBJECT_ID, name='bar') get_by_id_mock.return_value = refresh_obj test_obj.refresh() self._compare(self, refresh_obj, test_obj) def test_refresh_no_id_field(self): @objects.base.CinderObjectRegistry.register_if(False) class MyTestObjectNoId(objects.base.CinderObject, objects.base.CinderObjectDictCompat, objects.base.CinderComparableObject, objects.base.CinderPersistentObject): fields = {'uuid': fields.UUIDField()} test_obj = MyTestObjectNoId(uuid=fake.OBJECT_ID, name='foo') self.assertRaises(NotImplementedError, test_obj.refresh) @mock.patch('cinder.objects.base.objects', mock.Mock()) def test_cls_init(self): """Test that class init method gets called on registration.""" @objects.base.CinderObjectRegistry.register class MyTestObject(objects.base.CinderObject, objects.base.CinderPersistentObject): cinder_ovo_cls_init = mock.Mock() MyTestObject.cinder_ovo_cls_init.assert_called_once_with() def test_as_read_deleted_default(self): volume = objects.Volume(context=self.context) self.assertEqual('no', volume._context.read_deleted) with volume.as_read_deleted(): self.assertEqual('yes', volume._context.read_deleted) self.assertEqual('no', volume._context.read_deleted) @ddt.data('yes', 'no', 'only') def test_as_read_deleted_modes(self, mode): volume = objects.Volume(context=self.context) self.assertEqual('no', volume._context.read_deleted) with volume.as_read_deleted(mode=mode): self.assertEqual(mode, volume._context.read_deleted) self.assertEqual('no', volume._context.read_deleted) class TestCinderComparableObject(test_objects.BaseObjectsTestCase): def test_comparable_objects(self): @objects.base.CinderObjectRegistry.register class MyComparableObj(objects.base.CinderObject, objects.base.CinderObjectDictCompat, objects.base.CinderComparableObject): fields = {'foo': fields.Field(fields.Integer())} class NonVersionedObject(object): pass obj1 = MyComparableObj(foo=1) obj2 = MyComparableObj(foo=1) obj3 = MyComparableObj(foo=2) obj4 = NonVersionedObject() self.assertTrue(obj1 == obj2) self.assertFalse(obj1 == obj3) self.assertFalse(obj1 == obj4) self.assertIsNotNone(obj1) @ddt.ddt class TestCinderObjectConditionalUpdate(test.TestCase): def setUp(self): super(TestCinderObjectConditionalUpdate, self).setUp() self.context = context.get_admin_context() def _create_volume(self): vol = { 'display_description': 'Test Desc', 'size': 1, 'status': 'available', 'availability_zone': 'az', 'host': 'dummy', 'attach_status': c_fields.VolumeAttachStatus.DETACHED, } volume = objects.Volume(context=self.context, **vol) volume.create() return volume def _create_snapshot(self, volume): snapshot = objects.Snapshot(context=self.context, volume_id=volume.id) snapshot.create() return snapshot def _check_volume(self, volume, status, size, reload=False, dirty_keys=(), **kwargs): if reload: volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual(status, volume.status) self.assertEqual(size, volume.size) dirty = volume.cinder_obj_get_changes() self.assertEqual(list(dirty_keys), list(dirty.keys())) for key, value in kwargs.items(): self.assertEqual(value, getattr(volume, key)) def test_conditional_update_non_iterable_expected(self): volume = self._create_volume() # We also check that we can check for None values self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 2}, {'status': 'available', 'migration_status': None})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 2) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 2, True) def test_conditional_update_non_iterable_expected_model_field(self): volume = self._create_volume() # We also check that we can check for None values self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 2, 'previous_status': volume.model.status}, {'status': 'available', 'migration_status': None})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 2, previous_status='available') # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 2, True, previous_status='available') def test_conditional_update_non_iterable_expected_save_all(self): volume = self._create_volume() volume.size += 1 # We also check that we can check for not None values self.assertTrue(volume.conditional_update( {'status': 'deleting'}, {'status': 'available', 'availability_zone': volume.Not(None)}, save_all=True)) # Check that the object in memory has been updated and that the size # is not a dirty key self._check_volume(volume, 'deleting', 2) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 2, True) def test_conditional_update_non_iterable_expected_dont_save_all(self): volume = self._create_volume() volume.size += 1 self.assertTrue(volume.conditional_update( {'status': 'deleting'}, {'status': 'available'}, save_all=False)) # Check that the object in memory has been updated with the new status # but that size has not been saved and is a dirty key self._check_volume(volume, 'deleting', 2, False, ['size']) # Check that the volume in the DB also has been updated but not the # size self._check_volume(volume, 'deleting', 1, True) def test_conditional_update_fail_non_iterable_expected_save_all(self): volume = self._create_volume() volume.size += 1 self.assertFalse(volume.conditional_update( {'status': 'available'}, {'status': 'deleting'}, save_all=True)) # Check that the object in memory has not been updated and that the # size is still a dirty key self._check_volume(volume, 'available', 2, False, ['size']) # Check that the volume in the DB hasn't been updated self._check_volume(volume, 'available', 1, True) def test_default_conditional_update_non_iterable_expected(self): volume = self._create_volume() self.assertTrue(volume.conditional_update({'status': 'deleting'})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 1, True) def test_default_conditional_fail_update_non_iterable_expected(self): volume_in_db = self._create_volume() volume = objects.Volume.get_by_id(self.context, volume_in_db.id) volume_in_db.size += 1 volume_in_db.save() # This will fail because size in DB is different self.assertFalse(volume.conditional_update({'status': 'deleting'})) # Check that the object in memory has not been updated self._check_volume(volume, 'available', 1) # Check that the volume in the DB hasn't changed the status but has # the size we changed before the conditional update self._check_volume(volume_in_db, 'available', 2, True) def test_default_conditional_update_non_iterable_expected_with_dirty(self): volume_in_db = self._create_volume() volume = objects.Volume.get_by_id(self.context, volume_in_db.id) volume_in_db.size += 1 volume_in_db.save() volume.size = 33 # This will fail because even though we have excluded the size from # the default condition when we dirtied it in the volume object, we # still have the last update timestamp that will be included in the # condition self.assertFalse(volume.conditional_update({'status': 'deleting'})) # Check that the object in memory has not been updated self._check_volume(volume, 'available', 33, False, ['size']) # Check that the volume in the DB hasn't changed the status but has # the size we changed before the conditional update self._check_volume(volume_in_db, 'available', 2, True) def test_conditional_update_negated_non_iterable_expected(self): volume = self._create_volume() self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 2}, {'status': db.Not('in-use'), 'size': db.Not(2)})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 2) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 2, True) def test_conditional_update_non_iterable_expected_filter(self): # Volume we want to change volume = self._create_volume() # Another volume that has no snapshots volume2 = self._create_volume() # A volume with snapshots volume3 = self._create_volume() self._create_snapshot(volume3) # Update only it it has no snapshot filters = (~sql.exists().where( models.Snapshot.volume_id == models.Volume.id),) self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 2}, {'status': 'available'}, filters)) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 2) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 2, True) # Check that the other volumes in the DB haven't changed self._check_volume(volume2, 'available', 1, True) self._check_volume(volume3, 'available', 1, True) def test_conditional_update_iterable_expected(self): volume = self._create_volume() self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 20}, {'status': ('error', 'available'), 'size': range(10)})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 20) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 20, True) def test_conditional_update_negated_iterable_expected(self): volume = self._create_volume() self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': 20}, {'status': db.Not(('creating', 'in-use')), 'size': range(10)})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 20) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 20, True) def test_conditional_update_fail_non_iterable_expected(self): volume = self._create_volume() self.assertFalse(volume.conditional_update( {'status': 'deleting'}, {'status': 'available', 'size': 2})) # Check that the object in memory hasn't changed self._check_volume(volume, 'available', 1) # Check that the volume in the DB hasn't changed either self._check_volume(volume, 'available', 1, True) def test_conditional_update_fail_negated_non_iterable_expected(self): volume = self._create_volume() result = volume.conditional_update({'status': 'deleting'}, {'status': db.Not('in-use'), 'size': 2}) self.assertFalse(result) # Check that the object in memory hasn't changed self._check_volume(volume, 'available', 1) # Check that the volume in the DB hasn't changed either self._check_volume(volume, 'available', 1, True) def test_conditional_update_fail_iterable_expected(self): volume = self._create_volume() self.assertFalse(volume.conditional_update( {'status': 'available'}, {'status': ('error', 'creating'), 'size': range(2, 10)})) # Check that the object in memory hasn't changed self._check_volume(volume, 'available', 1) # Check that the volume in the DB hasn't changed either self._check_volume(volume, 'available', 1, True) def test_conditional_update_fail_negated_iterable_expected(self): volume = self._create_volume() self.assertFalse(volume.conditional_update( {'status': 'error'}, {'status': db.Not(('available', 'in-use')), 'size': range(2, 10)})) # Check that the object in memory hasn't changed self._check_volume(volume, 'available', 1) # Check that the volume in the DB hasn't changed either self._check_volume(volume, 'available', 1, True) def test_conditional_update_fail_non_iterable_expected_filter(self): # Volume we want to change volume = self._create_volume() self._create_snapshot(volume) # A volume that has no snapshots volume2 = self._create_volume() # Another volume with snapshots volume3 = self._create_volume() self._create_snapshot(volume3) # Update only it it has no snapshot filters = (~sql.exists().where( models.Snapshot.volume_id == models.Volume.id),) self.assertFalse(volume.conditional_update( {'status': 'deleting', 'size': 2}, {'status': 'available'}, filters)) # Check that the object in memory hasn't been updated self._check_volume(volume, 'available', 1) # Check that no volume in the DB also has been updated self._check_volume(volume, 'available', 1, True) self._check_volume(volume2, 'available', 1, True) self._check_volume(volume3, 'available', 1, True) def test_conditional_update_non_iterable_case_value(self): # Volume we want to change and has snapshots volume = self._create_volume() self._create_snapshot(volume) # Filter that checks if a volume has snapshots has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) # We want the updated value to depend on whether it has snapshots or # not case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') self.assertTrue(volume.conditional_update({'status': case_values}, {'status': 'available'})) # Check that the object in memory has been updated self._check_volume(volume, 'has-snapshot', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'has-snapshot', 1, True) def test_conditional_update_non_iterable_case_value_else(self): # Volume we want to change volume = self._create_volume() # Filter that checks if a volume has snapshots has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) # We want the updated value to depend on whether it has snapshots or # not case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') self.assertTrue(volume.conditional_update({'status': case_values}, {'status': 'available'})) # Check that the object in memory has been updated self._check_volume(volume, 'no-snapshot', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'no-snapshot', 1, True) def test_conditional_update_non_iterable_case_value_fail(self): # Volume we want to change doesn't have snapshots volume = self._create_volume() # Filter that checks if a volume has snapshots has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) # We want the updated value to depend on whether it has snapshots or # not case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') # We won't update because volume status is available self.assertFalse(volume.conditional_update({'status': case_values}, {'status': 'deleting'})) # Check that the object in memory has not been updated self._check_volume(volume, 'available', 1) # Check that the volume in the DB also hasn't been updated either self._check_volume(volume, 'available', 1, True) def test_conditional_update_iterable_with_none_expected(self): volume = self._create_volume() # We also check that we can check for None values in an iterable self.assertTrue(volume.conditional_update( {'status': 'deleting'}, {'status': (None, 'available'), 'migration_status': (None, 'finished')})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 1, True) def test_conditional_update_iterable_with_not_none_expected(self): volume = self._create_volume() # We also check that we can check for None values in a negated iterable self.assertTrue(volume.conditional_update( {'status': 'deleting'}, {'status': volume.Not((None, 'in-use'))})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 1, True) def test_conditional_update_iterable_with_not_includes_null(self): volume = self._create_volume() # We also check that negation includes None values by default like we # do in Python and not like MySQL does self.assertTrue(volume.conditional_update( {'status': 'deleting'}, {'status': 'available', 'migration_status': volume.Not(('migrating', 'error'))})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', 1) # Check that the volume in the DB also has been updated self._check_volume(volume, 'deleting', 1, True) def test_conditional_update_iterable_with_not_includes_null_fails(self): volume = self._create_volume() # We also check that negation excludes None values if we ask it to self.assertFalse(volume.conditional_update( {'status': 'deleting'}, {'status': 'available', 'migration_status': volume.Not(('migrating', 'error'), auto_none=False)})) # Check that the object in memory has not been updated self._check_volume(volume, 'available', 1, False) # Check that the volume in the DB hasn't been updated self._check_volume(volume, 'available', 1, True) def test_conditional_update_use_operation_in_value(self): volume = self._create_volume() expected_size = volume.size + 1 # We also check that using fields in requested changes will work as # expected self.assertTrue(volume.conditional_update( {'status': 'deleting', 'size': volume.model.size + 1}, {'status': 'available'})) # Check that the object in memory has been updated self._check_volume(volume, 'deleting', expected_size, False) # Check that the volume in the DB has also been updated self._check_volume(volume, 'deleting', expected_size, True) def test_conditional_update_auto_order(self): volume = self._create_volume() has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') values = {'status': 'deleting', 'previous_status': volume.model.status, 'migration_status': case_values} with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query: update = model_query.return_value.filter.return_value.update update.return_value = 0 self.assertFalse(volume.conditional_update( values, {'status': 'available'})) # We check that we are passing values to update to SQLAlchemy in the # right order self.assertEqual(1, update.call_count) self.assertListEqual( [('previous_status', volume.model.status), ('migration_status', mock.ANY), ('status', 'deleting')], list(update.call_args[0][0])) self.assertDictEqual( {'synchronize_session': False, 'update_args': {'preserve_parameter_order': True}}, update.call_args[1]) def test_conditional_update_force_order(self): volume = self._create_volume() has_snapshot_filter = sql.exists().where( models.Snapshot.volume_id == models.Volume.id) case_values = volume.Case([(has_snapshot_filter, 'has-snapshot')], else_='no-snapshot') values = {'status': 'deleting', 'previous_status': volume.model.status, 'migration_status': case_values} order = ['status'] with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query: update = model_query.return_value.filter.return_value.update update.return_value = 0 self.assertFalse(volume.conditional_update( values, {'status': 'available'}, order=order)) # We check that we are passing values to update to SQLAlchemy in the # right order self.assertEqual(1, update.call_count) self.assertListEqual( [('status', 'deleting'), ('previous_status', volume.model.status), ('migration_status', mock.ANY)], list(update.call_args[0][0])) self.assertDictEqual( {'synchronize_session': False, 'update_args': {'preserve_parameter_order': True}}, update.call_args[1]) def test_conditional_update_no_order(self): volume = self._create_volume() values = {'status': 'deleting', 'previous_status': 'available', 'migration_status': None} with mock.patch('cinder.db.sqlalchemy.api.model_query') as model_query: update = model_query.return_value.filter.return_value.update update.return_value = 0 self.assertFalse(volume.conditional_update( values, {'status': 'available'})) # Check that arguments passed to SQLAlchemy's update are correct (order # is not relevant). self.assertEqual(1, update.call_count) arg = update.call_args[0][0] self.assertIsInstance(arg, dict) self.assertEqual(set(values.keys()), set(arg.keys())) def test_conditional_update_multitable_fail(self): volume = self._create_volume() self.assertRaises(exception.ProgrammingError, volume.conditional_update, {'status': 'deleting', objects.Snapshot.model.status: 'available'}, {'status': 'available'}) def test_conditional_update_multitable_fail_fields_different_models(self): volume = self._create_volume() self.assertRaises(exception.ProgrammingError, volume.conditional_update, {objects.Backup.model.status: 'available', objects.Snapshot.model.status: 'available'}) @ddt.data(('available', 'error', None), ('error', 'rolling_back', [{'fake_filter': 'faked'}])) @ddt.unpack @mock.patch('cinder.objects.base.' 'CinderPersistentObject.conditional_update') def test_update_status_where(self, value, expected, filters, mock_update): volume = self._create_volume() if filters: volume.update_single_status_where(value, expected, filters) mock_update.assert_called_with({'status': value}, {'status': expected}, filters) else: volume.update_single_status_where(value, expected) mock_update.assert_called_with({'status': value}, {'status': expected}, ()) class TestCinderDictObject(test_objects.BaseObjectsTestCase): @objects.base.CinderObjectRegistry.register_if(False) class TestDictObject(objects.base.CinderObjectDictCompat, objects.base.CinderObject): obj_extra_fields = ['foo'] fields = { 'abc': fields.StringField(nullable=True), 'def': fields.IntegerField(nullable=True), } @property def foo(self): return 42 def test_dict_objects(self): obj = self.TestDictObject() self.assertNotIn('non_existing', obj) self.assertEqual('val', obj.get('abc', 'val')) self.assertNotIn('abc', obj) obj.abc = 'val2' self.assertEqual('val2', obj.get('abc', 'val')) self.assertEqual(42, obj.get('foo')) self.assertEqual(42, obj.get('foo', None)) self.assertIn('foo', obj) self.assertIn('abc', obj) self.assertNotIn('def', obj) @mock.patch('cinder.objects.base.OBJ_VERSIONS', fake_objects.MyHistory()) class TestCinderObjectSerializer(test_objects.BaseObjectsTestCase): BACKPORT_MSG = ('Backporting %(obj_name)s from version %(src_vers)s to ' 'version %(dst_vers)s') def setUp(self): super(TestCinderObjectSerializer, self).setUp() self.obj = fake_objects.ChildObject(scheduled_at=None, uuid=uuid.uuid4(), text='text', integer=1) self.parent = fake_objects.ParentObject(uuid=uuid.uuid4(), child=self.obj, scheduled_at=None) self.parent_list = fake_objects.ParentObjectList(objects=[self.parent]) def test_serialize_init_current_has_no_manifest(self): """Test that pinned to current version we have no manifest.""" serializer = objects.base.CinderObjectSerializer('1.6') # Serializer should not have a manifest self.assertIsNone(serializer.manifest) def test_serialize_init_no_cap_has_no_manifest(self): """Test that without cap we have no manifest.""" serializer = objects.base.CinderObjectSerializer() # Serializer should not have a manifest self.assertIsNone(serializer.manifest) def test_serialize_init_pinned_has_manifest(self): """Test that pinned to older version we have manifest.""" objs_version = '1.5' serializer = objects.base.CinderObjectSerializer(objs_version) # Serializer should have the right manifest self.assertDictEqual(fake_objects.MyHistory()[objs_version], serializer.manifest) def test_serialize_entity_unknown_version(self): """Test that bad cap version will prevent serializer creation.""" self.assertRaises(exception.CappedVersionUnknown, objects.base.CinderObjectSerializer, '0.9') @mock.patch('cinder.objects.base.LOG.debug') def test_serialize_entity_basic_no_backport(self, log_debug_mock): """Test single element serializer with no backport.""" serializer = objects.base.CinderObjectSerializer('1.6') primitive = serializer.serialize_entity(self.context, self.obj) self.assertEqual('1.2', primitive['versioned_object.version']) data = primitive['versioned_object.data'] self.assertEqual(1, data['integer']) self.assertEqual('text', data['text']) log_debug_mock.assert_not_called() @mock.patch('cinder.objects.base.LOG.debug') def test_serialize_entity_basic_backport(self, log_debug_mock): """Test single element serializer with backport.""" serializer = objects.base.CinderObjectSerializer('1.5') primitive = serializer.serialize_entity(self.context, self.obj) self.assertEqual('1.1', primitive['versioned_object.version']) data = primitive['versioned_object.data'] self.assertNotIn('integer', data) self.assertEqual('text', data['text']) log_debug_mock.assert_called_once_with(self.BACKPORT_MSG, {'obj_name': 'ChildObject', 'src_vers': '1.2', 'dst_vers': '1.1'}) @mock.patch('cinder.objects.base.LOG.debug') def test_serialize_entity_full_no_backport(self, log_debug_mock): """Test related elements serialization with no backport.""" serializer = objects.base.CinderObjectSerializer('1.6') primitive = serializer.serialize_entity(self.context, self.parent_list) self.assertEqual('1.1', primitive['versioned_object.version']) parent = primitive['versioned_object.data']['objects'][0] self.assertEqual('1.1', parent['versioned_object.version']) child = parent['versioned_object.data']['child'] self.assertEqual('1.2', child['versioned_object.version']) log_debug_mock.assert_not_called() @mock.patch('cinder.objects.base.LOG.debug') def test_serialize_entity_full_backport_last_children(self, log_debug_mock): """Test related elements serialization with backport of the last child. Test that using the manifest we properly backport a child object even when all its parents have not changed their version. """ serializer = objects.base.CinderObjectSerializer('1.5') primitive = serializer.serialize_entity(self.context, self.parent_list) self.assertEqual('1.1', primitive['versioned_object.version']) parent = primitive['versioned_object.data']['objects'][0] self.assertEqual('1.1', parent['versioned_object.version']) # Only the child has been backported child = parent['versioned_object.data']['child'] self.assertEqual('1.1', child['versioned_object.version']) # Check that the backport has been properly done data = child['versioned_object.data'] self.assertNotIn('integer', data) self.assertEqual('text', data['text']) log_debug_mock.assert_called_once_with(self.BACKPORT_MSG, {'obj_name': 'ChildObject', 'src_vers': '1.2', 'dst_vers': '1.1'}) @mock.patch('cinder.objects.base.LOG.debug') def test_serialize_entity_full_backport(self, log_debug_mock): """Test backport of the whole tree of related elements.""" serializer = objects.base.CinderObjectSerializer('1.3') primitive = serializer.serialize_entity(self.context, self.parent_list) # List has been backported self.assertEqual('1.0', primitive['versioned_object.version']) parent = primitive['versioned_object.data']['objects'][0] # Parent has been backported as well self.assertEqual('1.0', parent['versioned_object.version']) # And the backport has been properly done data = parent['versioned_object.data'] self.assertNotIn('scheduled_at', data) # And child as well child = parent['versioned_object.data']['child'] self.assertEqual('1.1', child['versioned_object.version']) # Check that the backport has been properly done data = child['versioned_object.data'] self.assertNotIn('integer', data) self.assertEqual('text', data['text']) log_debug_mock.assert_has_calls([ mock.call(self.BACKPORT_MSG, {'obj_name': 'ParentObjectList', 'src_vers': '1.1', 'dst_vers': '1.0'}), mock.call(self.BACKPORT_MSG, {'obj_name': 'ParentObject', 'src_vers': '1.1', 'dst_vers': '1.0'}), mock.call(self.BACKPORT_MSG, {'obj_name': 'ChildObject', 'src_vers': '1.2', 'dst_vers': '1.1'})]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_cgsnapshot.py0000664000175000017500000002001200000000000023662 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from zoneinfo import ZoneInfo from oslo_utils import timeutils from cinder import exception from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import objects as test_objects from cinder.tests.unit.objects.test_consistencygroup import \ fake_consistencygroup fake_cgsnapshot = { 'id': fake.CGSNAPSHOT_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'name': 'fake_name', 'description': 'fake_description', 'status': 'creating', 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID, } class TestCGSnapshot(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.cgsnapshot_get', return_value=fake_cgsnapshot) def test_get_by_id(self, cgsnapshot_get): cgsnapshot = objects.CGSnapshot.get_by_id(self.context, fake.CGSNAPSHOT_ID) self._compare(self, fake_cgsnapshot, cgsnapshot) @mock.patch('cinder.db.cgsnapshot_create', return_value=fake_cgsnapshot) def test_create(self, cgsnapshot_create): fake_cgsnap = fake_cgsnapshot.copy() del fake_cgsnap['id'] cgsnapshot = objects.CGSnapshot(context=self.context, **fake_cgsnap) cgsnapshot.create() self._compare(self, fake_cgsnapshot, cgsnapshot) def test_create_with_id_except_exception(self): cgsnapshot = objects.CGSnapshot(context=self.context, **{'id': fake.CONSISTENCY_GROUP_ID}) self.assertRaises(exception.ObjectActionError, cgsnapshot.create) @mock.patch('cinder.db.cgsnapshot_update') def test_save(self, cgsnapshot_update): cgsnapshot = objects.CGSnapshot._from_db_object( self.context, objects.CGSnapshot(), fake_cgsnapshot) cgsnapshot.status = 'active' cgsnapshot.save() cgsnapshot_update.assert_called_once_with(self.context, cgsnapshot.id, {'status': 'active'}) @mock.patch('cinder.db.consistencygroup_update', return_value=fake_consistencygroup) @mock.patch('cinder.db.cgsnapshot_update') def test_save_with_consistencygroup(self, cgsnapshot_update, cgsnapshot_cg_update): consistencygroup = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), fake_consistencygroup) cgsnapshot = objects.CGSnapshot._from_db_object( self.context, objects.CGSnapshot(), fake_cgsnapshot) cgsnapshot.name = 'foobar' cgsnapshot.consistencygroup = consistencygroup self.assertEqual({'name': 'foobar', 'consistencygroup': consistencygroup}, cgsnapshot.obj_get_changes()) self.assertRaises(exception.ObjectActionError, cgsnapshot.save) @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) @mock.patch('cinder.db.sqlalchemy.api.cgsnapshot_destroy') def test_destroy(self, cgsnapshot_destroy, utcnow_mock): cgsnapshot_destroy.return_value = { 'status': 'deleted', 'deleted': True, 'deleted_at': utcnow_mock.return_value} cgsnapshot = objects.CGSnapshot(context=self.context, id=fake.CGSNAPSHOT_ID) cgsnapshot.destroy() self.assertTrue(cgsnapshot_destroy.called) admin_context = cgsnapshot_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) self.assertTrue(cgsnapshot.deleted) self.assertEqual('deleted', cgsnapshot.status) self.assertEqual( utcnow_mock.return_value.replace(tzinfo=ZoneInfo('UTC')), cgsnapshot.deleted_at) @mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id') @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_obj_load_attr(self, snapshotlist_get_for_cgs, consistencygroup_get_by_id): cgsnapshot = objects.CGSnapshot._from_db_object( self.context, objects.CGSnapshot(), fake_cgsnapshot) # Test consistencygroup lazy-loaded field consistencygroup = objects.ConsistencyGroup( context=self.context, id=fake.CONSISTENCY_GROUP_ID) consistencygroup_get_by_id.return_value = consistencygroup self.assertEqual(consistencygroup, cgsnapshot.consistencygroup) consistencygroup_get_by_id.assert_called_once_with( self.context, cgsnapshot.consistencygroup_id) # Test snapshots lazy-loaded field snapshots_objs = [objects.Snapshot(context=self.context, id=i) for i in [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, fake.SNAPSHOT3_ID]] snapshots = objects.SnapshotList(context=self.context, objects=snapshots_objs) snapshotlist_get_for_cgs.return_value = snapshots self.assertEqual(snapshots, cgsnapshot.snapshots) snapshotlist_get_for_cgs.assert_called_once_with( self.context, cgsnapshot.id) @mock.patch('cinder.db.sqlalchemy.api.cgsnapshot_get') def test_refresh(self, cgsnapshot_get): db_cgsnapshot1 = fake_cgsnapshot.copy() db_cgsnapshot2 = db_cgsnapshot1.copy() db_cgsnapshot2['description'] = 'foobar' # On the second cgsnapshot_get, return the CGSnapshot with an updated # description cgsnapshot_get.side_effect = [db_cgsnapshot1, db_cgsnapshot2] cgsnapshot = objects.CGSnapshot.get_by_id(self.context, fake.CGSNAPSHOT_ID) self._compare(self, db_cgsnapshot1, cgsnapshot) # description was updated, so a CGSnapshot refresh should have a new # value for that field cgsnapshot.refresh() self._compare(self, db_cgsnapshot2, cgsnapshot) cgsnapshot_get.assert_has_calls([mock.call(self.context, fake.CGSNAPSHOT_ID), mock.call.__bool__(), mock.call(self.context, fake.CGSNAPSHOT_ID)]) class TestCGSnapshotList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.cgsnapshot_get_all', return_value=[fake_cgsnapshot]) def test_get_all(self, cgsnapshot_get_all): cgsnapshots = objects.CGSnapshotList.get_all(self.context) self.assertEqual(1, len(cgsnapshots)) TestCGSnapshot._compare(self, fake_cgsnapshot, cgsnapshots[0]) @mock.patch('cinder.db.cgsnapshot_get_all_by_project', return_value=[fake_cgsnapshot]) def test_get_all_by_project(self, cgsnapshot_get_all_by_project): cgsnapshots = objects.CGSnapshotList.get_all_by_project( self.context, self.project_id) self.assertEqual(1, len(cgsnapshots)) TestCGSnapshot._compare(self, fake_cgsnapshot, cgsnapshots[0]) @mock.patch('cinder.db.cgsnapshot_get_all_by_group', return_value=[fake_cgsnapshot]) def test_get_all_by_group(self, cgsnapshot_get_all_by_group): cgsnapshots = objects.CGSnapshotList.get_all_by_group( self.context, self.project_id) self.assertEqual(1, len(cgsnapshots)) TestCGSnapshot._compare(self, fake_cgsnapshot, cgsnapshots[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_cleanable.py0000664000175000017500000004211700000000000023431 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect from unittest import mock from cinder import context from cinder import exception from cinder.objects import cleanable from cinder import service from cinder.tests.unit import objects as test_objects from cinder.volume import rpcapi # NOTE(geguileo): We use Backup because we have version changes from 1.0 to 1.3 class Backup(cleanable.CinderCleanableObject): def __init__(self, *args, **kwargs): super(Backup, self).__init__(*args) for attr, value in kwargs.items(): setattr(self, attr, value) @staticmethod def _is_cleanable(status, obj_version): if obj_version and obj_version < 1003: return False return status == 'cleanable' class TestCleanable(test_objects.BaseObjectsTestCase): MOCK_WORKER = False def setUp(self): super(TestCleanable, self).setUp() self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) def test_get_rpc_api(self): """Test get_rpc_api.""" vol_rpcapi = cleanable.CinderCleanableObject.get_rpc_api() self.assertEqual(rpcapi.VolumeAPI, vol_rpcapi) def set_version(self, version): self.patch('cinder.volume.rpcapi.VolumeAPI.determine_obj_version_cap', mock.Mock(return_value='1.0')) self.patch('cinder.objects.base.OBJ_VERSIONS', {'1.0': {'Backup': version}}) def test_get_pinned_version(self): """Test that we get the pinned version for this specific object.""" self.set_version('1.3') version = Backup.get_pinned_version() self.assertEqual(1003, version) def test_is_cleanable_pinned_pinned_too_old(self): """Test is_cleanable with pinned version with uncleanable version.""" self.set_version('1.0') backup = Backup(status='cleanable') self.assertFalse(backup.is_cleanable(pinned=True)) def test_is_cleanable_pinned_result_true(self): """Test with pinned version with cleanable version and status.""" self.set_version('1.3') backup = Backup(status='cleanable') self.assertTrue(backup.is_cleanable(pinned=True)) def test_is_cleanable_pinned_result_false(self): """Test with pinned version with cleanable version but not status.""" self.set_version('1.0') backup = Backup(status='not_cleanable') self.assertFalse(backup.is_cleanable(pinned=True)) def test_is_cleanable_unpinned_result_false(self): """Test unpinned version with old version and non cleanable status.""" self.set_version('1.0') backup = Backup(status='not_cleanable') self.assertFalse(backup.is_cleanable(pinned=False)) def test_is_cleanable_unpinned_result_true(self): """Test unpinned version with old version and cleanable status.""" self.set_version('1.0') backup = Backup(status='cleanable') self.assertTrue(backup.is_cleanable(pinned=False)) @mock.patch('cinder.db.worker_create', autospec=True) def test_create_worker(self, mock_create): """Test worker creation as if it were from an rpc call.""" self.set_version('1.3') mock_create.return_value = mock.sentinel.worker backup = Backup(_context=self.context, status='cleanable', id=mock.sentinel.id) res = backup.create_worker() self.assertTrue(res) mock_create.assert_called_once_with(self.context, status='cleanable', resource_type='Backup', resource_id=mock.sentinel.id) @mock.patch('cinder.db.worker_create', autospec=True) def test_create_worker_pinned_too_old(self, mock_create): """Test worker creation when we are pinnned with an old version.""" self.set_version('1.0') mock_create.return_value = mock.sentinel.worker backup = Backup(_context=self.context, status='cleanable', id=mock.sentinel.id) res = backup.create_worker() self.assertFalse(res) self.assertFalse(mock_create.called) @mock.patch('cinder.db.worker_create', autospec=True) def test_create_worker_non_cleanable(self, mock_create): """Test worker creation when status is non cleanable.""" self.set_version('1.3') mock_create.return_value = mock.sentinel.worker backup = Backup(_context=self.context, status='non_cleanable', id=mock.sentinel.id) res = backup.create_worker() self.assertFalse(res) self.assertFalse(mock_create.called) @mock.patch('cinder.db.worker_update', autospec=True) @mock.patch('cinder.db.worker_create', autospec=True) def test_create_worker_already_exists(self, mock_create, mock_update): """Test worker creation when a worker for the resource exists.""" self.set_version('1.3') mock_create.side_effect = exception.WorkerExists(type='type', id='id') backup = Backup(_context=self.context, status='cleanable', id=mock.sentinel.id) res = backup.create_worker() self.assertTrue(res) self.assertTrue(mock_create.called) mock_update.assert_called_once_with( self.context, None, filters={'resource_type': 'Backup', 'resource_id': mock.sentinel.id}, service_id=None, status='cleanable') @mock.patch('cinder.db.worker_update', autospec=True) @mock.patch('cinder.db.worker_create', autospec=True) def test_create_worker_cleaning(self, mock_create, mock_update): """Test worker creation on race condition. Test that we still create an entry if there is a rare race condition that the entry gets removed from the DB between our failure to create it and our try to update the entry. """ self.set_version('1.3') mock_create.side_effect = [ exception.WorkerExists(type='type', id='id'), mock.sentinel.worker] mock_update.side_effect = exception.WorkerNotFound backup = Backup(_context=self.context, status='cleanable', id=mock.sentinel.id) self.assertTrue(backup.create_worker()) self.assertEqual(2, mock_create.call_count) self.assertTrue(mock_update.called) @mock.patch('cinder.db.worker_update', autospec=True) @mock.patch('cinder.db.worker_get', autospec=True) def test_set_worker(self, mock_get, mock_update): """Test set worker for a normal job received from an rpc call.""" service.Service.service_id = mock.sentinel.service_id mock_get.return_value.cleaning = False backup = Backup(_context=self.context, status=mock.sentinel.status, id=mock.sentinel.id) backup.set_worker() mock_get.assert_called_once_with(self.context, resource_type='Backup', resource_id=mock.sentinel.id) worker = mock_get.return_value mock_update.assert_called_once_with( self.context, worker.id, filters={'service_id': worker.service_id, 'status': worker.status, 'race_preventer': worker.race_preventer, 'updated_at': worker.updated_at}, service_id=mock.sentinel.service_id, status=mock.sentinel.status, orm_worker=worker) self.assertEqual(worker, backup.worker) @mock.patch('cinder.db.worker_create', autospec=True) @mock.patch('cinder.db.worker_get', autospec=True) def test_set_worker_direct(self, mock_get, mock_create): """Test set worker for direct call (non rpc call).""" mock_get.side_effect = exception.WorkerNotFound service_id = mock.sentinel.service_id service.Service.service_id = service_id mock_create.return_value = mock.Mock(service_id=service_id, status=mock.sentinel.status, deleted=False, cleaning=False) backup = Backup(_context=self.context, status=mock.sentinel.status, id=mock.sentinel.id) backup.set_worker() mock_get.assert_called_once_with(self.context, resource_type='Backup', resource_id=mock.sentinel.id) mock_create.assert_called_once_with(self.context, status=mock.sentinel.status, resource_type='Backup', resource_id=mock.sentinel.id, service_id=service_id) self.assertEqual(mock_create.return_value, backup.worker) @mock.patch('cinder.db.worker_update', autospec=True) @mock.patch('cinder.db.worker_get', autospec=True) def test_set_worker_claim_from_another_host(self, mock_get, mock_update): """Test set worker when the job was started on another failed host.""" service_id = mock.sentinel.service_id service.Service.service_id = service_id worker = mock.Mock(service_id=mock.sentinel.service_id2, status=mock.sentinel.status, cleaning=False, updated_at=mock.sentinel.updated_at) mock_get.return_value = worker backup = Backup(_context=self.context, status=mock.sentinel.status, id=mock.sentinel.id) backup.set_worker() mock_update.assert_called_once_with( self.context, worker.id, filters={'service_id': mock.sentinel.service_id2, 'status': mock.sentinel.status, 'race_preventer': worker.race_preventer, 'updated_at': mock.sentinel.updated_at}, service_id=service_id, status=mock.sentinel.status, orm_worker=worker) self.assertEqual(worker, backup.worker) @mock.patch('cinder.db.worker_create', autospec=True) @mock.patch('cinder.db.worker_get', autospec=True) def test_set_worker_race_condition_fail(self, mock_get, mock_create): """Test we cannot claim a work if we lose race condition.""" service.Service.service_id = mock.sentinel.service_id mock_get.side_effect = exception.WorkerNotFound mock_create.side_effect = exception.WorkerExists(type='type', id='id') backup = Backup(_context=self.context, status=mock.sentinel.status, id=mock.sentinel.id) self.assertRaises(exception.CleanableInUse, backup.set_worker) self.assertTrue(mock_get.called) self.assertTrue(mock_create.called) @mock.patch('cinder.db.worker_update', autospec=True) @mock.patch('cinder.db.worker_get', autospec=True) def test_set_worker_claim_fail_after_get(self, mock_get, mock_update): """Test we don't have race condition if worker changes after get.""" service.Service.service_id = mock.sentinel.service_id worker = mock.Mock(service_id=mock.sentinel.service_id2, status=mock.sentinel.status, deleted=False, cleaning=False) mock_get.return_value = worker mock_update.side_effect = exception.WorkerNotFound backup = Backup(_context=self.context, status=mock.sentinel.status, id=mock.sentinel.id) self.assertRaises(exception.CleanableInUse, backup.set_worker) self.assertTrue(mock_get.called) self.assertTrue(mock_update.called) @mock.patch('cinder.db.worker_destroy') def test_unset_worker(self, destroy_mock): backup = Backup(_context=self.context, status=mock.sentinel.status, id=mock.sentinel.id) worker = mock.Mock() backup.worker = worker backup.unset_worker() destroy_mock.assert_called_once_with(self.context, id=worker.id, status=worker.status, service_id=worker.service_id) self.assertIsNone(backup.worker) @mock.patch('cinder.db.worker_destroy') def test_unset_worker_not_set(self, destroy_mock): backup = Backup(_context=self.context, status=mock.sentinel.status, id=mock.sentinel.id) backup.unset_worker() self.assertFalse(destroy_mock.called) @mock.patch('cinder.db.worker_update', autospec=True) @mock.patch('cinder.db.worker_get', autospec=True) def test_set_workers_no_arguments(self, mock_get, mock_update): """Test set workers decorator without arguments.""" @Backup.set_workers def my_function(arg1, arg2, kwarg1=None, kwarg2=True): return arg1, arg2, kwarg1, kwarg2 # Decorator with no args must preserve the method's signature self.assertEqual('my_function', my_function.__name__) call_args = inspect.getcallargs( my_function, mock.sentinel.arg1, mock.sentinel.arg2, mock.sentinel.kwargs1, kwarg2=mock.sentinel.kwarg2) expected = {'arg1': mock.sentinel.arg1, 'arg2': mock.sentinel.arg2, 'kwarg1': mock.sentinel.kwargs1, 'kwarg2': mock.sentinel.kwarg2} self.assertDictEqual(expected, call_args) service.Service.service_id = mock.sentinel.service_id mock_get.return_value.cleaning = False backup = Backup(_context=self.context, status='cleanable', id=mock.sentinel.id) backup2 = Backup(_context=self.context, status='non-cleanable', id=mock.sentinel.id2) res = my_function(backup, backup2) self.assertEqual((backup, backup2, None, True), res) mock_get.assert_called_once_with(self.context, resource_type='Backup', resource_id=mock.sentinel.id) worker = mock_get.return_value mock_update.assert_called_once_with( self.context, worker.id, filters={'service_id': worker.service_id, 'status': worker.status, 'race_preventer': worker.race_preventer, 'updated_at': worker.updated_at}, service_id=mock.sentinel.service_id, status='cleanable', orm_worker=worker) self.assertEqual(worker, backup.worker) @mock.patch('cinder.db.worker_update', autospec=True) @mock.patch('cinder.db.worker_get', autospec=True) def test_set_workers_with_arguments(self, mock_get, mock_update): """Test set workers decorator with an argument.""" @Backup.set_workers('arg2', 'kwarg1') def my_function(arg1, arg2, kwarg1=None, kwarg2=True): return arg1, arg2, kwarg1, kwarg2 # Decorator with args must preserve the method's signature self.assertEqual('my_function', my_function.__name__) call_args = inspect.getcallargs( my_function, mock.sentinel.arg1, mock.sentinel.arg2, mock.sentinel.kwargs1, kwarg2=mock.sentinel.kwarg2) expected = {'arg1': mock.sentinel.arg1, 'arg2': mock.sentinel.arg2, 'kwarg1': mock.sentinel.kwargs1, 'kwarg2': mock.sentinel.kwarg2} self.assertDictEqual(expected, call_args) service.Service.service_id = mock.sentinel.service_id mock_get.return_value.cleaning = False backup = Backup(_context=self.context, status='cleanable', id=mock.sentinel.id) backup2 = Backup(_context=self.context, status='non-cleanable', id=mock.sentinel.id2) backup3 = Backup(_context=self.context, status='cleanable', id=mock.sentinel.id3) res = my_function(backup, backup2, backup3) self.assertEqual((backup, backup2, backup3, True), res) mock_get.assert_called_once_with(self.context, resource_type='Backup', resource_id=mock.sentinel.id3) worker = mock_get.return_value mock_update.assert_called_once_with( self.context, worker.id, filters={'service_id': worker.service_id, 'status': worker.status, 'race_preventer': worker.race_preventer, 'updated_at': worker.updated_at}, service_id=mock.sentinel.service_id, status='cleanable', orm_worker=worker) self.assertEqual(worker, backup3.worker) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_cleanup_request.py0000664000175000017500000000537000000000000024722 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import timeutils from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import objects as test_objects class TestCleanupRequest(test_objects.BaseObjectsTestCase): all_fields = ('service_id', 'cluster_name', 'host', 'binary', 'service_id', 'is_up', 'disabled', 'resource_id', 'resource_type', 'until') default = {'is_up': False} def setUp(self): super(TestCleanupRequest, self).setUp() self.fields = dict(service_id=1, cluster_name='cluster_name', host='host_name', binary='binary_name', is_up=False, resource_id=fake.VOLUME_ID, resource_type='Volume', until=timeutils.utcnow(with_timezone=True), disabled=True) def _req_as_dict(self, req): return {field: getattr(req, field) for field in self.all_fields} def _req_default(self, field): return self.default.get(field, None) def test_init_all_set(self): """Test __init__ when setting all field values.""" req = objects.CleanupRequest(mock.sentinel.context, **self.fields) self.assertDictEqual(self.fields, self._req_as_dict(req)) def test_init_default(self): """Test __init__ when one field is missing.""" for field in self.fields: fields = self.fields.copy() del fields[field] req = objects.CleanupRequest(mock.sentinel.context, **fields) fields[field] = self._req_default(field) self.assertDictEqual(fields, self._req_as_dict(req)) def test_init_defaults(self): """Test __init__ when only one field is set.""" all_defaults = {field: self._req_default(field) for field in self.all_fields} for field in self.fields: fields = {field: self.fields[field]} req = objects.CleanupRequest(mock.sentinel.context, **fields) expected = all_defaults.copy() expected.update(fields) self.assertDictEqual(expected, self._req_as_dict(req)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_cluster.py0000664000175000017500000001461400000000000023205 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import timeutils import cinder.db from cinder.db.sqlalchemy import models from cinder import objects from cinder.tests.unit import fake_cluster from cinder.tests.unit import objects as test_objects from cinder import utils def _get_filters_sentinel(): return {'session': mock.sentinel.session, 'read_deleted': mock.sentinel.read_deleted, 'get_services': mock.sentinel.get_services, 'services_summary': mock.sentinel.services_summary, 'name': mock.sentinel.name, 'binary': mock.sentinel.binary, 'is_up': mock.sentinel.is_up, 'disabled': mock.sentinel.disabled, 'disabled_reason': mock.sentinel.disabled_reason, 'race_preventer': mock.sentinel.race_preventer, 'last_heartbeat': mock.sentinel.last_heartbeat, 'num_hosts': mock.sentinel.num_hosts, 'name_match_level': mock.sentinel.name_match_level, 'num_down_hosts': mock.sentinel.num_down_hosts} @ddt.ddt class TestCluster(test_objects.BaseObjectsTestCase): """Test Cluster Versioned Object methods.""" cluster = fake_cluster.fake_cluster_orm() @mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster) def test_get_by_id(self, cluster_get_mock): filters = _get_filters_sentinel() cluster = objects.Cluster.get_by_id(self.context, mock.sentinel.cluster_id, **filters) self.assertIsInstance(cluster, objects.Cluster) self._compare(self, self.cluster, cluster) cluster_get_mock.assert_called_once_with(self.context, mock.sentinel.cluster_id, **filters) @mock.patch('cinder.db.sqlalchemy.api.cluster_create', return_value=cluster) def test_create(self, cluster_create_mock): cluster = objects.Cluster(context=self.context, name='cluster_name') cluster.create() self.assertEqual(self.cluster.id, cluster.id) cluster_create_mock.assert_called_once_with(self.context, {'name': 'cluster_name'}) @mock.patch('cinder.db.sqlalchemy.api.cluster_update', return_value=cluster) def test_save(self, cluster_update_mock): cluster = fake_cluster.fake_cluster_ovo(self.context) cluster.disabled = True cluster.save() cluster_update_mock.assert_called_once_with(self.context, cluster.id, {'disabled': True}) @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy') def test_destroy(self, cluster_destroy_mock): cluster = fake_cluster.fake_cluster_ovo(self.context) cluster.destroy() cluster_destroy_mock.assert_called_once_with(mock.ANY, cluster.id) @mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster) def test_refresh(self, cluster_get_mock): cluster = fake_cluster.fake_cluster_ovo(self.context) cluster.refresh() cluster_get_mock.assert_called_once_with(self.context, cluster.id) def test_is_up_no_last_hearbeat(self): cluster = fake_cluster.fake_cluster_ovo(self.context, last_heartbeat=None) self.assertFalse(bool(cluster.is_up)) def test_is_up(self): cluster = fake_cluster.fake_cluster_ovo( self.context, last_heartbeat=timeutils.utcnow(with_timezone=True)) self.assertTrue(cluster.is_up) def test_is_up_limit(self): limit_expired = (utils.service_expired_time(True) + timeutils.datetime.timedelta(seconds=1)) cluster = fake_cluster.fake_cluster_ovo(self.context, last_heartbeat=limit_expired) self.assertTrue(cluster.is_up) def test_is_up_down(self): expired_time = (utils.service_expired_time(True) - timeutils.datetime.timedelta(seconds=1)) cluster = fake_cluster.fake_cluster_ovo(self.context, last_heartbeat=expired_time) self.assertFalse(cluster.is_up) @mock.patch.object(cinder.db, 'conditional_update') def test_reset_service_replication(self, mock_update): cluster = fake_cluster.fake_cluster_ovo(self.context) cluster.reset_service_replication() mock_update.assert_called_with(self.context, models.Service, {'replication_status': 'enabled', 'active_backend_id': None}, {'cluster_name': cluster.name}) class TestClusterList(test_objects.BaseObjectsTestCase): """Test ClusterList Versioned Object methods.""" @mock.patch('cinder.db.sqlalchemy.api.cluster_get_all') def test_cluster_get_all(self, cluster_get_all_mock): orm_values = [ fake_cluster.fake_cluster_orm(), fake_cluster.fake_cluster_orm(id=2, name='cluster_name2'), ] cluster_get_all_mock.return_value = orm_values filters = _get_filters_sentinel() result = objects.ClusterList.get_all(self.context, **filters) cluster_get_all_mock.assert_called_once_with( self.context, filters.pop('is_up'), filters.pop('get_services'), filters.pop('services_summary'), filters.pop('read_deleted'), filters.pop('name_match_level'), **filters) self.assertEqual(2, len(result)) for i in range(len(result)): self.assertIsInstance(result[i], objects.Cluster) self._compare(self, orm_values[i], result[i]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_consistencygroup.py0000664000175000017500000003426500000000000025146 0ustar00zuulzuul00000000000000# Copyright 2015 Yahoo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from zoneinfo import ZoneInfo from oslo_utils import timeutils from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects fake_consistencygroup = { 'id': fake.CONSISTENCY_GROUP_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'fake_host', 'availability_zone': 'fake_az', 'name': 'fake_name', 'description': 'fake_description', 'volume_type_id': fake.VOLUME_TYPE_ID, 'status': fields.ConsistencyGroupStatus.CREATING, 'cgsnapshot_id': fake.CGSNAPSHOT_ID, 'source_cgid': None, } fake_cgsnapshot = { 'id': fake.CGSNAPSHOT_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'name': 'fake_name', 'description': 'fake_description', 'status': 'creating', 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID, } fake_group = { 'id': fake.GROUP_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'fake_host', 'availability_zone': 'fake_az', 'name': 'fake_name', 'description': 'fake_description', 'group_type_id': fake.GROUP_TYPE_ID, 'status': fields.GroupStatus.CREATING, } class TestConsistencyGroup(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_get', return_value=fake_consistencygroup) def test_get_by_id(self, consistencygroup_get): consistencygroup = objects.ConsistencyGroup.get_by_id( self.context, fake.CONSISTENCY_GROUP_ID) self._compare(self, fake_consistencygroup, consistencygroup) consistencygroup_get.assert_called_once_with( self.context, fake.CONSISTENCY_GROUP_ID) @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_get_by_id_no_existing_id(self, model_query): model_query().filter_by().first.return_value = None self.assertRaises(exception.ConsistencyGroupNotFound, objects.ConsistencyGroup.get_by_id, self.context, 123) @mock.patch('cinder.db.consistencygroup_create', return_value=fake_consistencygroup) def test_create(self, consistencygroup_create): fake_cg = fake_consistencygroup.copy() del fake_cg['id'] consistencygroup = objects.ConsistencyGroup(context=self.context, **fake_cg) consistencygroup.create() self._compare(self, fake_consistencygroup, consistencygroup) @mock.patch('cinder.db.group_create', return_value=fake_group) def test_create_from_group(self, group_create): fake_grp = fake_group.copy() del fake_grp['id'] group = objects.Group(context=self.context, **fake_grp) group.create() volumes_objs = [objects.Volume(context=self.context, id=i) for i in [fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID]] volumes = objects.VolumeList(objects=volumes_objs) group.volumes = volumes consistencygroup = objects.ConsistencyGroup() consistencygroup.from_group(group) self.assertEqual(group.id, consistencygroup.id) self.assertEqual(group.name, consistencygroup.name) def test_create_with_id_except_exception(self, ): consistencygroup = objects.ConsistencyGroup( context=self.context, **{'id': fake.CONSISTENCY_GROUP_ID}) self.assertRaises(exception.ObjectActionError, consistencygroup.create) @mock.patch('cinder.db.consistencygroup_update') def test_save(self, consistencygroup_update): consistencygroup = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), fake_consistencygroup) consistencygroup.status = fields.ConsistencyGroupStatus.AVAILABLE consistencygroup.save() consistencygroup_update.assert_called_once_with( self.context, consistencygroup.id, {'status': fields.ConsistencyGroupStatus.AVAILABLE}) def test_save_with_cgsnapshots(self): consistencygroup = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), fake_consistencygroup) cgsnapshots_objs = [objects.CGSnapshot(context=self.context, id=i) for i in [fake.CGSNAPSHOT_ID, fake.CGSNAPSHOT2_ID, fake.CGSNAPSHOT3_ID]] cgsnapshots = objects.CGSnapshotList(objects=cgsnapshots_objs) consistencygroup.name = 'foobar' consistencygroup.cgsnapshots = cgsnapshots self.assertEqual({'name': 'foobar', 'cgsnapshots': cgsnapshots}, consistencygroup.obj_get_changes()) self.assertRaises(exception.ObjectActionError, consistencygroup.save) def test_save_with_volumes(self): consistencygroup = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), fake_consistencygroup) volumes_objs = [objects.Volume(context=self.context, id=i) for i in [fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID]] volumes = objects.VolumeList(objects=volumes_objs) consistencygroup.name = 'foobar' consistencygroup.volumes = volumes self.assertEqual({'name': 'foobar', 'volumes': volumes}, consistencygroup.obj_get_changes()) self.assertRaises(exception.ObjectActionError, consistencygroup.save) @mock.patch('cinder.objects.cgsnapshot.CGSnapshotList.get_all_by_group') @mock.patch('cinder.objects.volume.VolumeList.get_all_by_group') def test_obj_load_attr(self, mock_vol_get_all_by_group, mock_cgsnap_get_all_by_group): consistencygroup = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), fake_consistencygroup) # Test cgsnapshots lazy-loaded field cgsnapshots_objs = [objects.CGSnapshot(context=self.context, id=i) for i in [fake.CGSNAPSHOT_ID, fake.CGSNAPSHOT2_ID, fake.CGSNAPSHOT3_ID]] cgsnapshots = objects.CGSnapshotList(context=self.context, objects=cgsnapshots_objs) mock_cgsnap_get_all_by_group.return_value = cgsnapshots self.assertEqual(cgsnapshots, consistencygroup.cgsnapshots) mock_cgsnap_get_all_by_group.assert_called_once_with( self.context, consistencygroup.id) # Test volumes lazy-loaded field volume_objs = [objects.Volume(context=self.context, id=i) for i in [fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID]] volumes = objects.VolumeList(context=self.context, objects=volume_objs) mock_vol_get_all_by_group.return_value = volumes self.assertEqual(volumes, consistencygroup.volumes) mock_vol_get_all_by_group.assert_called_once_with(self.context, consistencygroup.id) @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_destroy') def test_destroy(self, consistencygroup_destroy, utcnow_mock): consistencygroup_destroy.return_value = { 'status': fields.ConsistencyGroupStatus.DELETED, 'deleted': True, 'deleted_at': utcnow_mock.return_value} consistencygroup = objects.ConsistencyGroup( context=self.context, id=fake.CONSISTENCY_GROUP_ID) consistencygroup.destroy() self.assertTrue(consistencygroup_destroy.called) admin_context = consistencygroup_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) self.assertTrue(consistencygroup.deleted) self.assertEqual(fields.ConsistencyGroupStatus.DELETED, consistencygroup.status) self.assertEqual( utcnow_mock.return_value.replace(tzinfo=ZoneInfo('UTC')), consistencygroup.deleted_at) @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_get') def test_refresh(self, consistencygroup_get): db_cg1 = fake_consistencygroup.copy() db_cg2 = db_cg1.copy() db_cg2['description'] = 'foobar' # On the second consistencygroup_get, return the ConsistencyGroup with # an updated description consistencygroup_get.side_effect = [db_cg1, db_cg2] cg = objects.ConsistencyGroup.get_by_id(self.context, fake.CONSISTENCY_GROUP_ID) self._compare(self, db_cg1, cg) # description was updated, so a ConsistencyGroup refresh should have a # new value for that field cg.refresh() self._compare(self, db_cg2, cg) consistencygroup_get.assert_has_calls([ mock.call( self.context, fake.CONSISTENCY_GROUP_ID), mock.call.__bool__(), mock.call( self.context, fake.CONSISTENCY_GROUP_ID)]) def test_from_db_object_with_all_expected_attributes(self): expected_attrs = ['volumes', 'cgsnapshots'] db_volumes = [fake_volume.fake_db_volume(admin_metadata={}, volume_metadata={})] db_cgsnaps = [fake_cgsnapshot.copy()] db_cg = fake_consistencygroup.copy() db_cg['volumes'] = db_volumes db_cg['cgsnapshots'] = db_cgsnaps cg = objects.ConsistencyGroup._from_db_object( self.context, objects.ConsistencyGroup(), db_cg, expected_attrs) self.assertEqual(len(db_volumes), len(cg.volumes)) self._compare(self, db_volumes[0], cg.volumes[0]) self.assertEqual(len(db_cgsnaps), len(cg.cgsnapshots)) self._compare(self, db_cgsnaps[0], cg.cgsnapshots[0]) class TestConsistencyGroupList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.consistencygroup_get_all', return_value=[fake_consistencygroup]) def test_get_all(self, consistencygroup_get_all): consistencygroups = objects.ConsistencyGroupList.get_all(self.context) self.assertEqual(1, len(consistencygroups)) TestConsistencyGroup._compare(self, fake_consistencygroup, consistencygroups[0]) @mock.patch('cinder.db.consistencygroup_get_all_by_project', return_value=[fake_consistencygroup]) def test_get_all_by_project(self, consistencygroup_get_all_by_project): consistencygroups = objects.ConsistencyGroupList.get_all_by_project( self.context, self.project_id) self.assertEqual(1, len(consistencygroups)) TestConsistencyGroup._compare(self, fake_consistencygroup, consistencygroups[0]) @mock.patch('cinder.db.consistencygroup_get_all', return_value=[fake_consistencygroup]) def test_get_all_with_pagination(self, consistencygroup_get_all): consistencygroups = objects.ConsistencyGroupList.get_all( self.context, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') self.assertEqual(1, len(consistencygroups)) consistencygroup_get_all.assert_called_once_with( self.context, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') TestConsistencyGroup._compare(self, fake_consistencygroup, consistencygroups[0]) @mock.patch('cinder.db.consistencygroup_get_all_by_project', return_value=[fake_consistencygroup]) def test_get_all_by_project_with_pagination( self, consistencygroup_get_all_by_project): consistencygroups = objects.ConsistencyGroupList.get_all_by_project( self.context, self.project_id, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') self.assertEqual(1, len(consistencygroups)) consistencygroup_get_all_by_project.assert_called_once_with( self.context, self.project_id, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') TestConsistencyGroup._compare(self, fake_consistencygroup, consistencygroups[0]) @mock.patch('cinder.db.consistencygroup_include_in_cluster') def test_include_in_cluster(self, include_mock): filters = {'host': mock.sentinel.host, 'cluster_name': mock.sentinel.cluster_name} cluster = 'new_cluster' objects.ConsistencyGroupList.include_in_cluster(self.context, cluster, **filters) include_mock.assert_called_once_with(self.context, cluster, True, **filters) @mock.patch('cinder.db.consistencygroup_include_in_cluster') def test_include_in_cluster_specify_partial(self, include_mock): filters = {'host': mock.sentinel.host, 'cluster_name': mock.sentinel.cluster_name} cluster = 'new_cluster' objects.ConsistencyGroupList.include_in_cluster( self.context, cluster, mock.sentinel.partial_rename, **filters) include_mock.assert_called_once_with( self.context, cluster, mock.sentinel.partial_rename, **filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_fields.py0000664000175000017500000001722200000000000022770 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.objects import fields from cinder.tests.unit import test class FakeFieldType(fields.FieldType): def coerce(self, obj, attr, value): return '*%s*' % value def to_primitive(self, obj, attr, value): return '!%s!' % value def from_primitive(self, obj, attr, value): return value[1:-1] class TestField(test.TestCase): def setUp(self): super(TestField, self).setUp() self.field = fields.Field(FakeFieldType()) self.coerce_good_values = [('foo', '*foo*')] self.coerce_bad_values = [] self.to_primitive_values = [('foo', '!foo!')] self.from_primitive_values = [('!foo!', 'foo')] def test_coerce_good_values(self): for in_val, out_val in self.coerce_good_values: self.assertEqual(out_val, self.field.coerce('obj', 'attr', in_val)) def test_coerce_bad_values(self): for in_val in self.coerce_bad_values: self.assertRaises((TypeError, ValueError), self.field.coerce, 'obj', 'attr', in_val) def test_to_primitive(self): for in_val, prim_val in self.to_primitive_values: self.assertEqual(prim_val, self.field.to_primitive('obj', 'attr', in_val)) def test_from_primitive(self): class ObjectLikeThing(object): _context = 'context' for prim_val, out_val in self.from_primitive_values: self.assertEqual(out_val, self.field.from_primitive( ObjectLikeThing, 'attr', prim_val)) def test_stringify(self): self.assertEqual('123', self.field.stringify(123)) class TestBackupStatus(TestField): def setUp(self): super(TestBackupStatus, self).setUp() self.field = fields.BackupStatusField() self.coerce_good_values = [('error', fields.BackupStatus.ERROR), ('error_deleting', fields.BackupStatus.ERROR_DELETING), ('creating', fields.BackupStatus.CREATING), ('available', fields.BackupStatus.AVAILABLE), ('deleting', fields.BackupStatus.DELETING), ('deleted', fields.BackupStatus.DELETED), ('restoring', fields.BackupStatus.RESTORING)] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'error'", self.field.stringify('error')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'not_a_status') class TestConsistencyGroupStatus(TestField): def setUp(self): super(TestConsistencyGroupStatus, self).setUp() self.field = fields.ConsistencyGroupStatusField() self.coerce_good_values = [ ('error', fields.ConsistencyGroupStatus.ERROR), ('available', fields.ConsistencyGroupStatus.AVAILABLE), ('creating', fields.ConsistencyGroupStatus.CREATING), ('deleting', fields.ConsistencyGroupStatus.DELETING), ('deleted', fields.ConsistencyGroupStatus.DELETED), ('updating', fields.ConsistencyGroupStatus.UPDATING), ('error_deleting', fields.ConsistencyGroupStatus.ERROR_DELETING)] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'error'", self.field.stringify('error')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'not_a_status') class TestSnapshotStatus(TestField): def setUp(self): super(TestSnapshotStatus, self).setUp() self.field = fields.SnapshotStatusField() self.coerce_good_values = [ ('error', fields.SnapshotStatus.ERROR), ('available', fields.SnapshotStatus.AVAILABLE), ('creating', fields.SnapshotStatus.CREATING), ('deleting', fields.SnapshotStatus.DELETING), ('deleted', fields.SnapshotStatus.DELETED), ('updating', fields.SnapshotStatus.UPDATING), ('error_deleting', fields.SnapshotStatus.ERROR_DELETING)] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'error'", self.field.stringify('error')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'not_a_status') class TestVolumeAttachStatus(TestField): def setUp(self): super(TestVolumeAttachStatus, self).setUp() self.field = fields.VolumeAttachStatusField() self.coerce_good_values = [('attaching', fields.VolumeAttachStatus.ATTACHING), ('attached', fields.VolumeAttachStatus.ATTACHED), ('detached', fields.VolumeAttachStatus.DETACHED), ('error_attaching', fields.VolumeAttachStatus.ERROR_ATTACHING), ('error_detaching', fields.VolumeAttachStatus.ERROR_DETACHING)] self.coerce_bad_values = ['acme'] self.to_primitive_values = self.coerce_good_values[0:1] self.from_primitive_values = self.coerce_good_values[0:1] def test_stringify(self): self.assertEqual("'attaching'", self.field.stringify('attaching')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'not_a_status') class TestVolumeMigrationStatus(TestField): def setUp(self): super(TestVolumeMigrationStatus, self).setUp() self.field = fields.VolumeMigrationStatusField() self.coerce_good_values = [ ('migrating', fields.VolumeMigrationStatus.MIGRATING), ('error', fields.VolumeMigrationStatus.ERROR), ('success', fields.VolumeMigrationStatus.SUCCESS), ('completing', fields.VolumeMigrationStatus.COMPLETING), ('starting', fields.VolumeMigrationStatus.STARTING), ('none', fields.VolumeMigrationStatus.NONE), ] self.coerce_bad_values = ['available'] self.to_primitive_values = self.coerce_good_values self.from_primitive_values = self.coerce_good_values def test_stringify(self): self.assertEqual("'migrating'", self.field.stringify('migrating')) def test_stringify_invalid(self): self.assertRaises(ValueError, self.field.stringify, 'not_a_status') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_group.py0000664000175000017500000002665100000000000022664 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects fake_group = { 'id': fake.GROUP_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'fake_host', 'availability_zone': 'fake_az', 'name': 'fake_name', 'description': 'fake_description', 'group_type_id': fake.GROUP_TYPE_ID, 'status': fields.GroupStatus.CREATING, } @ddt.ddt class TestGroup(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.group_get', return_value=fake_group) def test_get_by_id(self, group_get): group = objects.Group.get_by_id( self.context, fake.GROUP_ID) self._compare(self, fake_group, group) group_get.assert_called_once_with( self.context, fake.GROUP_ID) @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_get_by_id_no_existing_id(self, model_query): model_query().filter_by().first.return_value = None self.assertRaises(exception.GroupNotFound, objects.Group.get_by_id, self.context, 123) @mock.patch('cinder.db.group_create', return_value=fake_group) def test_create(self, group_create): fake_grp = fake_group.copy() del fake_grp['id'] group = objects.Group(context=self.context, **fake_grp) group.create() self._compare(self, fake_group, group) def test_create_with_id_except_exception(self, ): group = objects.Group( context=self.context, **{'id': fake.GROUP_ID}) self.assertRaises(exception.ObjectActionError, group.create) @mock.patch('cinder.db.group_update') def test_save(self, group_update): group = objects.Group._from_db_object( self.context, objects.Group(), fake_group) group.status = fields.GroupStatus.AVAILABLE group.save() group_update.assert_called_once_with( self.context, group.id, {'status': fields.GroupStatus.AVAILABLE}) def test_save_with_volumes(self): group = objects.Group._from_db_object( self.context, objects.Group(), fake_group) volumes_objs = [objects.Volume(context=self.context, id=i) for i in [fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID]] volumes = objects.VolumeList(objects=volumes_objs) group.name = 'foobar' group.volumes = volumes self.assertEqual({'name': 'foobar', 'volumes': volumes}, group.obj_get_changes()) self.assertRaises(exception.ObjectActionError, group.save) @mock.patch('cinder.objects.volume_type.VolumeTypeList.get_all_by_group') @mock.patch('cinder.objects.volume.VolumeList.get_all_by_generic_group') def test_obj_load_attr(self, mock_vol_get_all_by_group, mock_vol_type_get_all_by_group): group = objects.Group._from_db_object( self.context, objects.Group(), fake_group) # Test volumes lazy-loaded field volume_objs = [objects.Volume(context=self.context, id=i) for i in [fake.VOLUME_ID, fake.VOLUME2_ID, fake.VOLUME3_ID]] volumes = objects.VolumeList(context=self.context, objects=volume_objs) mock_vol_get_all_by_group.return_value = volumes self.assertEqual(volumes, group.volumes) mock_vol_get_all_by_group.assert_called_once_with(self.context, group.id) @mock.patch('cinder.db.group_destroy') def test_destroy(self, group_destroy): group = objects.Group( context=self.context, id=fake.GROUP_ID) group.destroy() self.assertTrue(group_destroy.called) admin_context = group_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) @mock.patch('cinder.db.sqlalchemy.api.group_get') def test_refresh(self, group_get): db_group1 = fake_group.copy() db_group2 = db_group1.copy() db_group2['description'] = 'foobar' # On the second group_get, return the Group with # an updated description group_get.side_effect = [db_group1, db_group2] group = objects.Group.get_by_id(self.context, fake.GROUP_ID) self._compare(self, db_group1, group) # description was updated, so a Group refresh should have a # new value for that field group.refresh() self._compare(self, db_group2, group) group_get.assert_has_calls([ mock.call( self.context, fake.GROUP_ID), mock.call.__bool__(), mock.call( self.context, fake.GROUP_ID)]) def test_from_db_object_with_all_expected_attributes(self): expected_attrs = ['volumes'] db_volumes = [fake_volume.fake_db_volume(admin_metadata={}, volume_metadata={})] db_group = fake_group.copy() db_group['volumes'] = db_volumes group = objects.Group._from_db_object( self.context, objects.Group(), db_group, expected_attrs) self.assertEqual(len(db_volumes), len(group.volumes)) self._compare(self, db_volumes[0], group.volumes[0]) @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_is_replicated_true(self, mock_get_specs): mock_get_specs.return_value = ' True' group = objects.Group(self.context, group_type_id=fake.GROUP_TYPE_ID) self.assertTrue(group.is_replicated) @ddt.data(' False', None, 'notASpecValueWeCareAbout') def test_is_replicated_false(self, spec_value): with mock.patch('cinder.volume.group_types' '.get_group_type_specs') as mock_get_specs: mock_get_specs.return_value = spec_value group = objects.Group(self.context, group_type_id=fake.GROUP_TYPE_ID) # NOTE(xyang): Changed the following from self.assertFalse( # group.is_replicated) to self.assertEqual(False, # group.is_replicated) to address a review comment. This way this # test will still pass even if is_replicated is a method and not # a property. self.assertEqual(False, group.is_replicated) @ddt.ddt class TestGroupList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.group_get_all', return_value=[fake_group]) def test_get_all(self, group_get_all): groups = objects.GroupList.get_all(self.context) self.assertEqual(1, len(groups)) TestGroup._compare(self, fake_group, groups[0]) @mock.patch('cinder.db.group_get_all_by_project', return_value=[fake_group]) def test_get_all_by_project(self, group_get_all_by_project): groups = objects.GroupList.get_all_by_project( self.context, self.project_id) self.assertEqual(1, len(groups)) TestGroup._compare(self, fake_group, groups[0]) @mock.patch('cinder.db.group_get_all', return_value=[fake_group]) def test_get_all_with_pagination(self, group_get_all): groups = objects.GroupList.get_all( self.context, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') self.assertEqual(1, len(groups)) group_get_all.assert_called_once_with( self.context, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') TestGroup._compare(self, fake_group, groups[0]) @mock.patch('cinder.db.group_get_all_by_project', return_value=[fake_group]) def test_get_all_by_project_with_pagination( self, group_get_all_by_project): groups = objects.GroupList.get_all_by_project( self.context, self.project_id, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') self.assertEqual(1, len(groups)) group_get_all_by_project.assert_called_once_with( self.context, self.project_id, filters={'id': 'fake'}, marker=None, limit=1, offset=None, sort_keys='id', sort_dirs='asc') TestGroup._compare(self, fake_group, groups[0]) @ddt.data({'cluster_name': 'fake_cluster'}, {'host': 'fake_host'}) @mock.patch('cinder.volume.group_types.get_group_type_specs') @mock.patch('cinder.db.group_get_all') def test_get_all_replicated(self, filters, mock_get_groups, mock_get_specs): mock_get_specs.return_value = ' True' fake_group2 = fake_group.copy() fake_group2['id'] = fake.GROUP2_ID fake_group2['cluster_name'] = 'fake_cluster' if filters.get('cluster_name'): mock_get_groups.return_value = [fake_group2] else: mock_get_groups.return_value = [fake_group] res = objects.GroupList.get_all_replicated(self.context, filters=filters) self.assertEqual(1, len(res)) if filters.get('cluster_name'): self.assertEqual(fake.GROUP2_ID, res[0].id) self.assertEqual('fake_cluster', res[0].cluster_name) else: self.assertEqual(fake.GROUP_ID, res[0].id) self.assertIsNone(res[0].cluster_name) @mock.patch('cinder.db.group_include_in_cluster') def test_include_in_cluster(self, include_mock): filters = {'host': mock.sentinel.host, 'cluster_name': mock.sentinel.cluster_name} cluster = 'new_cluster' objects.GroupList.include_in_cluster(self.context, cluster, **filters) include_mock.assert_called_once_with(self.context, cluster, True, **filters) @mock.patch('cinder.db.group_include_in_cluster') def test_include_in_cluster_specify_partial(self, include_mock): filters = {'host': mock.sentinel.host, 'cluster_name': mock.sentinel.cluster_name} cluster = 'new_cluster' objects.GroupList.include_in_cluster(self.context, cluster, mock.sentinel.partial_rename, **filters) include_mock.assert_called_once_with(self.context, cluster, mock.sentinel.partial_rename, **filters) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_group_snapshot.py0000664000175000017500000002054200000000000024574 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from zoneinfo import ZoneInfo from oslo_utils import timeutils from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import objects as test_objects from cinder.tests.unit.objects.test_group import fake_group fake_group_snapshot = { 'id': fake.GROUP_SNAPSHOT_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'name': 'fake_name', 'description': 'fake_description', 'status': fields.GroupSnapshotStatus.CREATING, 'group_id': fake.GROUP_ID, } class TestGroupSnapshot(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.group_snapshot_get', return_value=fake_group_snapshot) def test_get_by_id(self, group_snapshot_get): group_snapshot = objects.GroupSnapshot.get_by_id( self.context, fake.GROUP_SNAPSHOT_ID) self._compare(self, fake_group_snapshot, group_snapshot) @mock.patch('cinder.db.group_snapshot_create', return_value=fake_group_snapshot) def test_create(self, group_snapshot_create): fake_group_snap = fake_group_snapshot.copy() del fake_group_snap['id'] group_snapshot = objects.GroupSnapshot(context=self.context, **fake_group_snap) group_snapshot.create() self._compare(self, fake_group_snapshot, group_snapshot) def test_create_with_id_except_exception(self): group_snapshot = objects.GroupSnapshot( context=self.context, **{'id': fake.GROUP_ID}) self.assertRaises(exception.ObjectActionError, group_snapshot.create) @mock.patch('cinder.db.group_snapshot_update') def test_save(self, group_snapshot_update): group_snapshot = objects.GroupSnapshot._from_db_object( self.context, objects.GroupSnapshot(), fake_group_snapshot) group_snapshot.status = 'active' group_snapshot.save() group_snapshot_update.assert_called_once_with(self.context, group_snapshot.id, {'status': 'active'}) @mock.patch('cinder.db.group_update', return_value=fake_group) @mock.patch('cinder.db.group_snapshot_update') def test_save_with_group(self, group_snapshot_update, group_snapshot_cg_update): group = objects.Group._from_db_object( self.context, objects.Group(), fake_group) group_snapshot = objects.GroupSnapshot._from_db_object( self.context, objects.GroupSnapshot(), fake_group_snapshot) group_snapshot.name = 'foobar' group_snapshot.group = group self.assertEqual({'name': 'foobar', 'group': group}, group_snapshot.obj_get_changes()) self.assertRaises(exception.ObjectActionError, group_snapshot.save) @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) @mock.patch('cinder.db.sqlalchemy.api.group_snapshot_destroy') def test_destroy(self, group_snapshot_destroy, utcnow_mock): group_snapshot_destroy.return_value = { 'status': fields.GroupSnapshotStatus.DELETED, 'deleted': True, 'deleted_at': utcnow_mock.return_value} group_snapshot = objects.GroupSnapshot(context=self.context, id=fake.GROUP_SNAPSHOT_ID) group_snapshot.destroy() self.assertTrue(group_snapshot_destroy.called) admin_context = group_snapshot_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) self.assertTrue(group_snapshot.deleted) self.assertEqual(fields.GroupSnapshotStatus.DELETED, group_snapshot.status) self.assertEqual( utcnow_mock.return_value.replace(tzinfo=ZoneInfo('UTC')), group_snapshot.deleted_at) @mock.patch('cinder.objects.group.Group.get_by_id') @mock.patch( 'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot') def test_obj_load_attr(self, snapshotlist_get_for_cgs, group_get_by_id): group_snapshot = objects.GroupSnapshot._from_db_object( self.context, objects.GroupSnapshot(), fake_group_snapshot) # Test group lazy-loaded field group = objects.Group( context=self.context, id=fake.GROUP_ID) group_get_by_id.return_value = group self.assertEqual(group, group_snapshot.group) group_get_by_id.assert_called_once_with( self.context, group_snapshot.group_id) # Test snapshots lazy-loaded field snapshots_objs = [objects.Snapshot(context=self.context, id=i) for i in [fake.SNAPSHOT_ID, fake.SNAPSHOT2_ID, fake.SNAPSHOT3_ID]] snapshots = objects.SnapshotList(context=self.context, objects=snapshots_objs) snapshotlist_get_for_cgs.return_value = snapshots self.assertEqual(snapshots, group_snapshot.snapshots) snapshotlist_get_for_cgs.assert_called_once_with( self.context, group_snapshot.id) @mock.patch('cinder.db.sqlalchemy.api.group_snapshot_get') def test_refresh(self, group_snapshot_get): db_group_snapshot1 = fake_group_snapshot.copy() db_group_snapshot2 = db_group_snapshot1.copy() db_group_snapshot2['description'] = 'foobar' # On the second group_snapshot_get, return the GroupSnapshot with an # updated description group_snapshot_get.side_effect = [db_group_snapshot1, db_group_snapshot2] group_snapshot = objects.GroupSnapshot.get_by_id( self.context, fake.GROUP_SNAPSHOT_ID) self._compare(self, db_group_snapshot1, group_snapshot) # description was updated, so a GroupSnapshot refresh should have a new # value for that field group_snapshot.refresh() self._compare(self, db_group_snapshot2, group_snapshot) group_snapshot_get.assert_has_calls( [mock.call(self.context, fake.GROUP_SNAPSHOT_ID), mock.call.__bool__(), mock.call(self.context, fake.GROUP_SNAPSHOT_ID)]) class TestGroupSnapshotList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.group_snapshot_get_all', return_value=[fake_group_snapshot]) def test_get_all(self, group_snapshot_get_all): group_snapshots = objects.GroupSnapshotList.get_all(self.context) self.assertEqual(1, len(group_snapshots)) TestGroupSnapshot._compare(self, fake_group_snapshot, group_snapshots[0]) @mock.patch('cinder.db.group_snapshot_get_all_by_project', return_value=[fake_group_snapshot]) def test_get_all_by_project(self, group_snapshot_get_all_by_project): group_snapshots = objects.GroupSnapshotList.get_all_by_project( self.context, self.project_id) self.assertEqual(1, len(group_snapshots)) TestGroupSnapshot._compare(self, fake_group_snapshot, group_snapshots[0]) @mock.patch('cinder.db.group_snapshot_get_all_by_group', return_value=[fake_group_snapshot]) def test_get_all_by_group(self, group_snapshot_get_all_by_group): group_snapshots = objects.GroupSnapshotList.get_all_by_group( self.context, self.project_id) self.assertEqual(1, len(group_snapshots)) TestGroupSnapshot._compare(self, fake_group_snapshot, group_snapshots[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_group_type.py0000664000175000017500000001324500000000000023720 0ustar00zuulzuul00000000000000# Copyright 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_group from cinder.tests.unit import objects as test_objects class TestGroupType(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api._group_type_get_full') def test_get_by_id(self, group_type_get): db_group_type = fake_group.fake_db_group_type() group_type_get.return_value = db_group_type group_type = objects.GroupType.get_by_id(self.context, fake.GROUP_TYPE_ID) self._compare(self, db_group_type, group_type) @mock.patch('cinder.volume.group_types.create') def test_create(self, group_type_create): db_group_type = fake_group.fake_db_group_type() group_type_create.return_value = db_group_type group_type = objects.GroupType(context=self.context) group_type.name = db_group_type['name'] group_type.group_specs = db_group_type['group_specs'] group_type.is_public = db_group_type['is_public'] group_type.projects = db_group_type['projects'] group_type.description = db_group_type['description'] group_type.create() group_type_create.assert_called_once_with( self.context, db_group_type['name'], db_group_type['group_specs'], db_group_type['is_public'], db_group_type['projects'], db_group_type['description']) @mock.patch('cinder.volume.group_types.update') def test_save(self, group_type_update): db_group_type = fake_group.fake_db_group_type() group_type = objects.GroupType._from_db_object(self.context, objects.GroupType(), db_group_type) group_type.description = 'foobar' group_type.save() group_type_update.assert_called_once_with(self.context, group_type.id, group_type.name, group_type.description) @mock.patch('cinder.volume.group_types.destroy') def test_destroy(self, group_type_destroy): db_group_type = fake_group.fake_db_group_type() group_type = objects.GroupType._from_db_object(self.context, objects.GroupType(), db_group_type) group_type.destroy() self.assertTrue(group_type_destroy.called) admin_context = group_type_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) @mock.patch('cinder.db.sqlalchemy.api._group_type_get_full') def test_refresh(self, group_type_get): db_type1 = fake_group.fake_db_group_type() db_type2 = db_type1.copy() db_type2['description'] = 'foobar' # updated description group_type_get.side_effect = [db_type1, db_type2] group_type = objects.GroupType.get_by_id(self.context, fake.GROUP_TYPE_ID) self._compare(self, db_type1, group_type) # description was updated, so a group type refresh should have a new # value for that field group_type.refresh() self._compare(self, db_type2, group_type) group_type_get.assert_has_calls([mock.call(self.context, fake.GROUP_TYPE_ID), mock.call.__bool__(), mock.call(self.context, fake.GROUP_TYPE_ID)]) class TestGroupTypeList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.volume.group_types.get_all_group_types') def test_get_all(self, get_all_types): db_group_type = fake_group.fake_db_group_type() get_all_types.return_value = {db_group_type['name']: db_group_type} group_types = objects.GroupTypeList.get_all(self.context) self.assertEqual(1, len(group_types)) TestGroupType._compare(self, db_group_type, group_types[0]) @mock.patch('cinder.volume.group_types.get_all_group_types') def test_get_all_with_pagination(self, get_all_types): db_group_type = fake_group.fake_db_group_type() get_all_types.return_value = {db_group_type['name']: db_group_type} group_types = objects.GroupTypeList.get_all(self.context, filters={'is_public': True}, marker=None, limit=1, sort_keys='id', sort_dirs='desc', offset=None) self.assertEqual(1, len(group_types)) TestGroupType._compare(self, db_group_type, group_types[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_manageable_volumes_snapshots.py0000664000175000017500000001013500000000000027446 0ustar00zuulzuul00000000000000# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from cinder import objects from cinder.tests.unit import objects as test_objects @ddt.ddt class TestManageableResources(test_objects.BaseObjectsTestCase): def resource_test(self, resource, resource_type): if resource_type == "manageable_volume_obj": resource.manageable_volume_obj.wrong_key elif resource_type == "manageable_snapshot_obj": resource.manageable_snapshot_obj.wrong_key def setUp(self): super(TestManageableResources, self).setUp() self.manageable_volume_dict = [ {'cinder_id': 'e334aab4-c987-4eb0-9c81-d4a773b4f7a6', 'extra_info': None, 'reason_not_safe': 'already managed', 'reference': {'source-name': 'volume-e334aab4-c987-4eb0-9c81-d4a773b4f7a6'}, 'safe_to_manage': False, 'size': 1, 'foo': 'bar'}, {'cinder_id': 'da25ac53-3fe0-4f56-9369-4d289d8902fd', 'extra_info': None, 'reason_not_safe': 'already managed', 'reference': {'source-name': 'volume-da25ac53-3fe0-4f56-9369-4d289d8902fd'}, 'safe_to_manage': False, 'size': 2} ] self.manageable_snapshot_dict = [ {'cinder_id': 'e334aab4-c987-4eb0-9c81-d4a773b4f7a6', 'reference': {'source-name': 'volume-e334aab4-c987-4eb0-9c81-d4a773b4f7a6'}, 'extra_info': None, 'reason_not_safe': 'already managed', 'source_reference': {'source-name': 'volume-e334aab4-c987-4eb0-9c81-d4a773b4f7a6'}, 'safe_to_manage': False, 'size': 1, 'foo': 'bar'}, {'cinder_id': 'da25ac53-3fe0-4f56-9369-4d289d8902fd', 'reference': {'source-name': 'volume-da25ac53-3fe0-4f56-9369-4d289d8902fd'}, 'extra_info': None, 'reason_not_safe': 'already managed', 'source_reference': {'source-name': 'da25ac53-3fe0-4f56-9369-4d289d8902fd'}, 'safe_to_manage': False, 'size': 2} ] vol_mang_list = (objects.ManageableVolumeList.from_primitives (self.context, self.manageable_volume_dict)) self.manageable_volume_obj_list = vol_mang_list snap_mang_list = (objects.ManageableSnapshotList.from_primitives (self.context, self.manageable_snapshot_dict)) self.manageable_snapshot_obj_list = snap_mang_list self.manageable_volume_obj = self.manageable_volume_obj_list[0] self.manageable_snapshot_obj = self.manageable_snapshot_obj_list[0] @ddt.data('manageable_volume_obj', 'manageable_snapshot_obj') def test_extra_info(self, obj): # Making sure that any new key assignment gets stored in extra_info # field of manageable_volume_object & manageable_snapshot_object self.assertEqual( 'bar', getattr(self, obj).extra_info['foo']) @ddt.data('manageable_volume_obj', 'manageable_snapshot_obj') def test_extra_info_wrong_key(self, obj): # Making sure referring an attribute before setting it raises an # Attribute Error for manageable_volume_object & # manageable_snapshot_object getattr(self, obj).foo = "test" self.assertRaises(AttributeError, self.resource_test, self, obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_objects.py0000664000175000017500000002016300000000000023151 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import fixture from cinder import db from cinder import objects from cinder.objects import base from cinder.tests.unit import test # NOTE: The hashes in this list should only be changed if they come with a # corresponding version bump in the affected objects. object_data = { 'Backup': '1.7-fffdbcd5da3c30750916fa2cc0e8ffb5', 'BackupDeviceInfo': '1.0-74b3950676c690538f4bc6796bd0042e', 'BackupImport': '1.7-fffdbcd5da3c30750916fa2cc0e8ffb5', 'BackupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'CleanupRequest': '1.0-e7c688b893e1d5537ccf65cc3eb10a28', 'Cluster': '1.1-e2c533eb8cdd8d229b6c45c6cf3a9e2c', 'ClusterList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'CGSnapshot': '1.1-3212ac2b4c2811b7134fb9ba2c49ff74', 'CGSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'ConsistencyGroup': '1.4-7bf01a79b82516639fc03cd3ab6d9c01', 'ConsistencyGroupList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'LogLevel': '1.0-7a8200b6b5063b33ec7b569dc6be66d2', 'LogLevelList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'ManageableSnapshot': '1.0-5be933366eb17d12db0115c597158d0d', 'ManageableSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'ManageableVolume': '1.0-5fd0152237ec9dfb7b5c7095b8b09ffa', 'ManageableVolumeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8', 'QualityOfServiceSpecsList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'RequestSpec': '1.5-2f6efbb86107ee70cc1bb07f4bdb4ec7', 'Service': '1.6-e881b6b324151dd861e09cdfffcdaccd', 'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'Snapshot': '1.6-457ae45840b208c8fdfe399daaf1f745', 'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'Volume': '1.9-37de6d473e44d3f9f6d946fe93a3cece', 'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'VolumeAttachment': '1.3-e6a3f7c5590d19f1e3ff6f819fbe6593', 'VolumeAttachmentList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'VolumeProperties': '1.1-cadac86b2bdc11eb79d1dcea988ff9e8', 'VolumeType': '1.3-a5d8c3473db9bc3bbcdbab9313acf4d1', 'VolumeTypeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'GroupType': '1.0-d4a7b272199d0b0d6fc3ceed58539d30', 'GroupTypeList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'Group': '1.2-2ade6acf2e55687b980048fc3f51dad9', 'GroupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', 'GroupSnapshot': '1.0-9af3e994e889cbeae4427c3e351fa91d', 'GroupSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e', } class TestObjectVersions(test.TestCase): def test_versions(self): checker = fixture.ObjectVersionChecker( base.CinderObjectRegistry.obj_classes()) expected, actual = checker.test_hashes(object_data) self.assertEqual(expected, actual, "Some objects have changed; please make sure the " "versions have been bumped and backporting " "compatibility code has been added to " "obj_make_compatible if necessary, and then update " "their hashes in the object_data map in this test " "module. If we don't need to add backporting code " "then it means we also don't need the version bump " "and we just have to change the hash in this module.") def test_versions_history(self): # If we inserted a fake element in history, remove it so we don't fail if base.OBJ_VERSIONS.get_current() == self.FAKE_OVO_HISTORY_VERSION: fake_version = base.OBJ_VERSIONS.versions.pop(-1) del base.OBJ_VERSIONS[fake_version] classes = base.CinderObjectRegistry.obj_classes() versions = base.OBJ_VERSIONS.get_current_versions() expected = {} actual = {} for name, cls in classes.items(): if name not in versions: expected[name] = cls[0].VERSION elif cls[0].VERSION != versions[name]: expected[name] = cls[0].VERSION actual[name] = versions[name] self.assertEqual(expected, actual, 'Some objects versions have changed; please make ' 'sure a new objects history version was added in ' 'cinder.objects.base.OBJ_VERSIONS.') def test_object_nullable_match_db(self): # This test is to keep nullable of every field in corresponding # db model and object match. def _check_table_matched(db_model, cls): for column in db_model.__table__.columns: # Ignore columns that aren't reflected by the model if column.name not in cls.fields: continue # NOTE(jdg): Model and Object don't match intentionally here if name == 'Service' and column.name == 'uuid': continue # NOTE(xyang): Skip the comparison of the colume name # group_type_id in table Group because group_type_id # is in the object Group but it is stored in a different # table in the database, not in the Group table. if name == 'Group' and column.name == 'group_type_id': continue # TODO(stephenfin): Model and Object don't match here, but it # wasn't intentional if (name, column.name) in { ('Backup', 'project_id'), ('Backup', 'user_id'), ('BackupImport', 'project_id'), ('BackupImport', 'user_id'), ('CGSnapshot', 'project_id'), ('CGSnapshot', 'user_id'), ('CGSnapshot', 'consistencygroup_id'), ('ConsistencyGroup', 'project_id'), ('ConsistencyGroup', 'user_id'), ('Group', 'user_id'), ('Group', 'project_id'), ('GroupType', 'name'), ('Service', 'frozen'), ('Snapshot', 'volume_id'), ('Snapshot', 'volume_type_id'), ('Volume', 'volume_type_id'), }: continue self.assertEqual( column.nullable, cls.fields[column.name].nullable, 'Column %(c)s in table %(t)s not match.' % { 'c': column.name, 't': name, }, ) classes = base.CinderObjectRegistry.obj_classes() for name, cls in classes.items(): if issubclass(cls[0], base.CinderPersistentObject): db_model = db.get_model_for_versioned_object(cls[0]) _check_table_matched(db_model, cls[0]) def test_obj_make_compatible(self): # Go through all of the object classes and run obj_to_primitive() with # a target version of all previous minor versions. It doesn't test # the converted data, but at least ensures the method doesn't blow # up on something simple. init_args = {} init_kwargs = {objects.Snapshot: {'context': 'ctxt'}, objects.Backup: {'context': 'ctxt'}, objects.BackupImport: {'context': 'ctxt'}} checker = fixture.ObjectVersionChecker( base.CinderObjectRegistry.obj_classes()) checker.test_compatibility_routines(init_args=init_args, init_kwargs=init_kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_qos.py0000664000175000017500000001273200000000000022325 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from zoneinfo import ZoneInfo from oslo_utils import timeutils from cinder.db.sqlalchemy import models from cinder import exception from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import objects as test_objects fake_qos = {'consumer': 'front-end', 'id': fake.OBJECT_ID, 'name': 'qos_name', 'specs': {'key1': 'val1', 'key2': 'val2'}} fake_qos_no_id = fake_qos.copy() del fake_qos_no_id['id'] class TestQos(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.get_by_id', return_value=fake_qos) def test_get_by_id(self, qos_get): qos_object = objects.QualityOfServiceSpecs.get_by_id( self.context, fake.OBJECT_ID) self._compare(self, fake_qos, qos_object) qos_get.assert_called_once_with( self.context, models.QualityOfServiceSpecs, fake.OBJECT_ID) @mock.patch('cinder.db.qos_specs_create', return_value={'name': 'qos_name', 'id': fake.OBJECT_ID}) def test_create(self, qos_fake_create): qos_object = objects.QualityOfServiceSpecs( self.context, **fake_qos_no_id) qos_object.create() self._compare(self, fake_qos, qos_object) # Fail to create a second time self.assertRaises(exception.ObjectActionError, qos_object.create) self.assertEqual(1, len(qos_fake_create.mock_calls)) @mock.patch('cinder.db.qos_specs_item_delete') @mock.patch('cinder.db.qos_specs_update') def test_save(self, qos_fake_update, qos_fake_delete): qos_dict = fake_qos.copy() qos_dict['specs']['key_to_remove1'] = 'val' qos_dict['specs']['key_to_remove2'] = 'val' qos_object = objects.QualityOfServiceSpecs._from_db_object( self.context, objects.QualityOfServiceSpecs(), qos_dict) qos_object.specs['key1'] = 'val1' qos_object.save() # No values have changed so no updates should be made self.assertFalse(qos_fake_update.called) qos_object.consumer = 'back-end' qos_object.specs['key1'] = 'val2' qos_object.specs['new_key'] = 'val3' del qos_object.specs['key_to_remove1'] del qos_object.specs['key_to_remove2'] qos_object.save() qos_fake_update.assert_called_once_with( self.context, fake.OBJECT_ID, {'specs': {'key1': 'val2', 'new_key': 'val3'}, 'consumer': 'back-end'}) qos_fake_delete.assert_has_calls([ mock.call(self.context, fake.OBJECT_ID, 'key_to_remove1'), mock.call(self.context, fake.OBJECT_ID, 'key_to_remove2')], any_order=True) @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) @mock.patch('cinder.objects.VolumeTypeList.get_all_types_for_qos', return_value=None) @mock.patch('cinder.db.sqlalchemy.api.qos_specs_delete') def test_destroy_no_vol_types(self, qos_fake_delete, fake_get_vol_types, utcnow_mock): qos_fake_delete.return_value = { 'deleted': True, 'deleted_at': utcnow_mock.return_value} qos_object = objects.QualityOfServiceSpecs._from_db_object( self.context, objects.QualityOfServiceSpecs(), fake_qos) qos_object.destroy() qos_fake_delete.assert_called_once_with(mock.ANY, fake_qos['id']) self.assertTrue(qos_object.deleted) self.assertEqual( utcnow_mock.return_value.replace(tzinfo=ZoneInfo('UTC')), qos_object.deleted_at) @mock.patch('cinder.db.sqlalchemy.api.qos_specs_delete') @mock.patch('cinder.db.qos_specs_disassociate_all') @mock.patch('cinder.objects.VolumeTypeList.get_all_types_for_qos') def test_destroy_with_vol_types(self, fake_get_vol_types, qos_fake_disassociate, qos_fake_delete): qos_object = objects.QualityOfServiceSpecs._from_db_object( self.context, objects.QualityOfServiceSpecs(), fake_qos) fake_get_vol_types.return_value = objects.VolumeTypeList( objects=[objects.VolumeType(id=fake.VOLUME_TYPE_ID)]) self.assertRaises(exception.QoSSpecsInUse, qos_object.destroy) qos_object.destroy(force=True) qos_fake_delete.assert_called_once_with(mock.ANY, fake_qos['id']) qos_fake_disassociate.assert_called_once_with( self.context, fake_qos['id']) @mock.patch('cinder.objects.VolumeTypeList.get_all_types_for_qos', return_value=None) @mock.patch('cinder.db.get_by_id', return_value=fake_qos) def test_get_volume_type(self, fake_get_by_id, fake_get_vol_types): qos_object = objects.QualityOfServiceSpecs.get_by_id( self.context, fake.OBJECT_ID) self.assertFalse(fake_get_vol_types.called) # Access lazy-loadable attribute qos_object.volume_types self.assertTrue(fake_get_vol_types.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_service.py0000664000175000017500000002476200000000000023171 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from zoneinfo import ZoneInfo import ddt from oslo_utils import timeutils from cinder import exception from cinder import objects from cinder.tests.unit import fake_cluster from cinder.tests.unit import fake_service from cinder.tests.unit import objects as test_objects @ddt.ddt class TestService(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_get_by_id(self, service_get): db_service = fake_service.fake_db_service() service_get.return_value = db_service service = objects.Service.get_by_id(self.context, 1) self._compare(self, db_service, service) service_get.assert_called_once_with(self.context, 1) @ddt.data(True, False) @mock.patch('cinder.db.service_get') def test_get_by_host_and_topic(self, show_disabled, service_get): db_service = fake_service.fake_db_service() service_get.return_value = db_service service = objects.Service.get_by_host_and_topic( self.context, 'fake-host', 'fake-topic', disabled=show_disabled) self._compare(self, db_service, service) service_get.assert_called_once_with( self.context, disabled=show_disabled, host='fake-host', topic='fake-topic') @mock.patch('cinder.db.service_get') def test_get_by_args(self, service_get): db_service = fake_service.fake_db_service() service_get.return_value = db_service service = objects.Service.get_by_args( self.context, 'fake-host', 'fake-key') self._compare(self, db_service, service) service_get.assert_called_once_with( self.context, host='fake-host', binary='fake-key') @mock.patch('cinder.db.service_create') def test_create(self, service_create): db_service = fake_service.fake_db_service() service_create.return_value = db_service service = objects.Service(context=self.context) service.create() self.assertEqual(db_service['id'], service.id) service_create.assert_called_once_with(self.context, {'uuid': mock.ANY}) @mock.patch('cinder.db.service_update') def test_save(self, service_update): db_service = fake_service.fake_db_service() service = objects.Service._from_db_object( self.context, objects.Service(), db_service) service.topic = 'foobar' service.save() service_update.assert_called_once_with(self.context, service.id, {'topic': 'foobar'}, True) @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) @mock.patch('cinder.db.sqlalchemy.api.service_destroy') def test_destroy(self, service_destroy, utcnow_mock): service_destroy.return_value = { 'deleted': True, 'deleted_at': utcnow_mock.return_value} db_service = fake_service.fake_db_service() service = objects.Service._from_db_object( self.context, objects.Service(), db_service) with mock.patch.object(service._context, 'elevated') as elevated_ctx: service.destroy() service_destroy.assert_called_once_with(elevated_ctx(), 123) self.assertTrue(service.deleted) self.assertEqual( utcnow_mock.return_value.replace(tzinfo=ZoneInfo('UTC')), service.deleted_at) @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_refresh(self, service_get): db_service1 = fake_service.fake_db_service() db_service2 = db_service1.copy() db_service2['availability_zone'] = 'foobar' # On the second service_get, return the service with an updated # availability_zone service_get.side_effect = [db_service1, db_service2] service = objects.Service.get_by_id(self.context, 123) self._compare(self, db_service1, service) # availability_zone was updated, so a service refresh should have a # new value for that field service.refresh() self._compare(self, db_service2, service) service_get.assert_has_calls([mock.call(self.context, 123), mock.call.__bool__(), mock.call(self.context, 123)]) @mock.patch('cinder.db.service_get_all') def test_get_minimum_version(self, service_get_all): services_update = [ {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, {'rpc_current_version': '1.1', 'object_current_version': '1.2'}, {'rpc_current_version': '2.0', 'object_current_version': '2.5'}, ] expected = ('1.0', '1.2') services = [fake_service.fake_db_service(**s) for s in services_update] service_get_all.return_value = services min_rpc = objects.Service.get_minimum_rpc_version(self.context, 'foo') self.assertEqual(expected[0], min_rpc) min_obj = objects.Service.get_minimum_obj_version(self.context, 'foo') self.assertEqual(expected[1], min_obj) service_get_all.assert_has_calls( [mock.call(self.context, binary='foo', disabled=None)] * 2) @mock.patch('cinder.db.service_get_all') def test_get_minimum_version_liberty(self, service_get_all): services_update = [ {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, {'rpc_current_version': '1.1', 'object_current_version': None}, {'rpc_current_version': None, 'object_current_version': '2.5'}, ] services = [fake_service.fake_db_service(**s) for s in services_update] service_get_all.return_value = services self.assertRaises(exception.ServiceTooOld, objects.Service.get_minimum_rpc_version, self.context, 'foo') self.assertRaises(exception.ServiceTooOld, objects.Service.get_minimum_obj_version, self.context, 'foo') @mock.patch('cinder.db.service_get_all') def test_get_minimum_version_no_binary(self, service_get_all): services_update = [ {'rpc_current_version': '1.0', 'object_current_version': '1.3'}, {'rpc_current_version': '1.1', 'object_current_version': '1.2'}, {'rpc_current_version': '2.0', 'object_current_version': '2.5'}, ] services = [fake_service.fake_db_service(**s) for s in services_update] service_get_all.return_value = services min_obj = objects.Service.get_minimum_obj_version(self.context) self.assertEqual('1.2', min_obj) service_get_all.assert_called_once_with(self.context, binary=None, disabled=None) @mock.patch('cinder.db.sqlalchemy.api.cluster_get') def test_lazy_loading_cluster_field(self, cluster_get): cluster_orm = fake_cluster.fake_cluster_orm(name='mycluster') cluster_get.return_value = cluster_orm cluster = objects.Cluster._from_db_object(self.context, objects.Cluster(), cluster_orm) service = fake_service.fake_service_obj(self.context, cluster_name='mycluster') self.assertEqual(cluster, service.cluster) cluster_get.assert_called_once_with(self.context, None, name='mycluster') def test_service_is_up(self): # NOTE(mdovgal): don't use @ddt.data with the real timestamp value # for this test. # When using ddt decorators ddt.data seems to have been calculated # not at the time of test's execution but at the tests's beginning. # And this one depends on utcnow func. So it won't be utcnow at the # execution moment and the result will be unexpected. down_time = 5 self.flags(service_down_time=down_time) # test if service is up service = fake_service.fake_service_obj(self.context) self.assertTrue(service.is_up) service.updated_at = timeutils.utcnow() self.assertTrue(service.is_up) # test is service is down now past_time = timeutils.utcnow() - datetime.timedelta(seconds=64) service.updated_at = past_time self.assertFalse(service.is_up) class TestServiceList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.service_get_all') def test_get_all(self, service_get_all): db_service = fake_service.fake_db_service() service_get_all.return_value = [db_service] filters = {'host': 'host', 'binary': 'foo', 'disabled': False} services = objects.ServiceList.get_all(self.context, filters) service_get_all.assert_called_once_with(self.context, **filters) self.assertEqual(1, len(services)) TestService._compare(self, db_service, services[0]) @mock.patch('cinder.db.service_get_all') def test_get_all_by_topic(self, service_get_all): db_service = fake_service.fake_db_service() service_get_all.return_value = [db_service] services = objects.ServiceList.get_all_by_topic( self.context, 'foo', 'bar') service_get_all.assert_called_once_with( self.context, topic='foo', disabled='bar') self.assertEqual(1, len(services)) TestService._compare(self, db_service, services[0]) @mock.patch('cinder.db.service_get_all') def test_get_all_by_binary(self, service_get_all): db_service = fake_service.fake_db_service() service_get_all.return_value = [db_service] services = objects.ServiceList.get_all_by_binary( self.context, 'foo', 'bar') service_get_all.assert_called_once_with( self.context, binary='foo', disabled='bar') self.assertEqual(1, len(services)) TestService._compare(self, db_service, services[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_snapshot.py0000664000175000017500000004036600000000000023366 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from zoneinfo import ZoneInfo import ddt from oslo_utils import timeutils from cinder.db.sqlalchemy import models from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects fake_db_snapshot = fake_snapshot.fake_db_snapshot( cgsnapshot_id=fake.CGSNAPSHOT_ID) del fake_db_snapshot['metadata'] del fake_db_snapshot['volume'] # NOTE(andrey-mp): make Snapshot object here to check object algorithms fake_snapshot_obj = { 'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'volume_size': 1, 'display_name': 'fake_name', 'display_description': 'fake_description', 'metadata': {}, } @ddt.ddt class TestSnapshot(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.get_by_id', return_value=fake_db_snapshot) def test_get_by_id(self, snapshot_get): snapshot = objects.Snapshot.get_by_id(self.context, 1) self._compare(self, fake_snapshot_obj, snapshot) snapshot_get.assert_called_once_with(self.context, models.Snapshot, 1) @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_get_by_id_no_existing_id(self, model_query): query = model_query().options().options().filter_by().first query.return_value = None self.assertRaises(exception.SnapshotNotFound, objects.Snapshot.get_by_id, self.context, 123) def test_reset_changes(self): snapshot = objects.Snapshot() snapshot.metadata = {'key1': 'value1'} self.assertEqual({}, snapshot._orig_metadata) snapshot.obj_reset_changes(['metadata']) self.assertEqual({'key1': 'value1'}, snapshot._orig_metadata) @mock.patch('cinder.db.snapshot_create', return_value=fake_db_snapshot) def test_create(self, snapshot_create): snapshot = objects.Snapshot(context=self.context) snapshot.create() self.assertEqual(fake_snapshot_obj['id'], snapshot.id) self.assertEqual(fake_snapshot_obj['volume_id'], snapshot.volume_id) @mock.patch('cinder.db.snapshot_create') def test_create_with_provider_id(self, snapshot_create): snapshot_create.return_value = copy.deepcopy(fake_db_snapshot) snapshot_create.return_value['provider_id'] = fake.PROVIDER_ID snapshot = objects.Snapshot(context=self.context) snapshot.create() self.assertEqual(fake.PROVIDER_ID, snapshot.provider_id) @mock.patch('cinder.db.snapshot_update') def test_save(self, snapshot_update): snapshot = objects.Snapshot._from_db_object( self.context, objects.Snapshot(), fake_db_snapshot) snapshot.display_name = 'foobar' snapshot.save() snapshot_update.assert_called_once_with(self.context, snapshot.id, {'display_name': 'foobar'}) @mock.patch('cinder.db.snapshot_metadata_update', return_value={'key1': 'value1'}) @mock.patch('cinder.db.snapshot_update') def test_save_with_metadata(self, snapshot_update, snapshot_metadata_update): snapshot = objects.Snapshot._from_db_object( self.context, objects.Snapshot(), fake_db_snapshot) snapshot.display_name = 'foobar' snapshot.metadata = {'key1': 'value1'} self.assertEqual({'display_name': 'foobar', 'metadata': {'key1': 'value1'}}, snapshot.obj_get_changes()) snapshot.save() snapshot_update.assert_called_once_with(self.context, snapshot.id, {'display_name': 'foobar'}) snapshot_metadata_update.assert_called_once_with(self.context, fake.SNAPSHOT_ID, {'key1': 'value1'}, True) @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) @mock.patch('cinder.db.sqlalchemy.api.snapshot_destroy') def test_destroy(self, snapshot_destroy, utcnow_mock): snapshot_destroy.return_value = { 'status': 'deleted', 'deleted': True, 'deleted_at': utcnow_mock.return_value} snapshot = objects.Snapshot(context=self.context, id=fake.SNAPSHOT_ID) snapshot.destroy() self.assertTrue(snapshot_destroy.called) admin_context = snapshot_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) self.assertTrue(snapshot.deleted) self.assertEqual(fields.SnapshotStatus.DELETED, snapshot.status) self.assertEqual( utcnow_mock.return_value.replace(tzinfo=ZoneInfo('UTC')), snapshot.deleted_at) @mock.patch('cinder.db.snapshot_metadata_delete') def test_delete_metadata_key(self, snapshot_metadata_delete): snapshot = objects.Snapshot(self.context, id=fake.SNAPSHOT_ID) snapshot.metadata = {'key1': 'value1', 'key2': 'value2'} self.assertEqual({}, snapshot._orig_metadata) snapshot.delete_metadata_key(self.context, 'key2') self.assertEqual({'key1': 'value1'}, snapshot.metadata) snapshot_metadata_delete.assert_called_once_with(self.context, fake.SNAPSHOT_ID, 'key2') def test_obj_fields(self): volume = objects.Volume(context=self.context, id=fake.VOLUME_ID, _name_id=fake.VOLUME_NAME_ID) snapshot = objects.Snapshot(context=self.context, id=fake.VOLUME_ID, volume=volume) self.assertEqual(['name', 'volume_name'], snapshot.obj_extra_fields) self.assertEqual('snapshot-%s' % fake.VOLUME_ID, snapshot.name) self.assertEqual('volume-%s' % fake.VOLUME_NAME_ID, snapshot.volume_name) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.objects.cgsnapshot.CGSnapshot.get_by_id') def test_obj_load_attr(self, cgsnapshot_get_by_id, volume_get_by_id): snapshot = objects.Snapshot._from_db_object( self.context, objects.Snapshot(), fake_db_snapshot) # Test volume lazy-loaded field volume = objects.Volume(context=self.context, id=fake.VOLUME_ID) volume_get_by_id.return_value = volume self.assertEqual(volume, snapshot.volume) volume_get_by_id.assert_called_once_with(self.context, snapshot.volume_id) self.assertEqual(snapshot.metadata, {}) # Test cgsnapshot lazy-loaded field cgsnapshot = objects.CGSnapshot(context=self.context, id=fake.CGSNAPSHOT_ID) cgsnapshot_get_by_id.return_value = cgsnapshot self.assertEqual(cgsnapshot, snapshot.cgsnapshot) cgsnapshot_get_by_id.assert_called_once_with(self.context, snapshot.cgsnapshot_id) @mock.patch('cinder.objects.cgsnapshot.CGSnapshot.get_by_id') def test_obj_load_attr_cgroup_not_exist(self, cgsnapshot_get_by_id): fake_non_cg_db_snapshot = fake_snapshot.fake_db_snapshot( cgsnapshot_id=None) snapshot = objects.Snapshot._from_db_object( self.context, objects.Snapshot(), fake_non_cg_db_snapshot) self.assertIsNone(snapshot.cgsnapshot) cgsnapshot_get_by_id.assert_not_called() @mock.patch('cinder.objects.group_snapshot.GroupSnapshot.get_by_id') def test_obj_load_attr_group_not_exist(self, group_snapshot_get_by_id): fake_non_cg_db_snapshot = fake_snapshot.fake_db_snapshot( group_snapshot_id=None) snapshot = objects.Snapshot._from_db_object( self.context, objects.Snapshot(), fake_non_cg_db_snapshot) self.assertIsNone(snapshot.group_snapshot) group_snapshot_get_by_id.assert_not_called() @mock.patch('cinder.db.snapshot_data_get_for_project') def test_snapshot_data_get_for_project(self, snapshot_data_get): snapshot = objects.Snapshot._from_db_object( self.context, objects.Snapshot(), fake_db_snapshot) volume_type_id = mock.sentinel.volume_type_id snapshot.snapshot_data_get_for_project(self.context, self.project_id, volume_type_id) snapshot_data_get.assert_called_once_with(self.context, self.project_id, volume_type_id, host=None) @mock.patch('cinder.db.sqlalchemy.api.snapshot_get') def test_refresh(self, snapshot_get): db_snapshot1 = fake_snapshot.fake_db_snapshot() db_snapshot2 = db_snapshot1.copy() db_snapshot2['display_name'] = 'foobar' # On the second snapshot_get, return the snapshot with an updated # display_name snapshot_get.side_effect = [db_snapshot1, db_snapshot2] snapshot = objects.Snapshot.get_by_id(self.context, fake.SNAPSHOT_ID) self._compare(self, db_snapshot1, snapshot) # display_name was updated, so a snapshot refresh should have a new # value for that field snapshot.refresh() self._compare(self, db_snapshot2, snapshot) snapshot_get.assert_has_calls([ mock.call(self.context, fake.SNAPSHOT_ID), mock.call.__bool__(), mock.call(self.context, fake.SNAPSHOT_ID)]) class TestSnapshotList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all', return_value=[fake_db_snapshot]) def test_get_all(self, snapshot_get_all, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj search_opts = mock.sentinel.search_opts snapshots = objects.SnapshotList.get_all( self.context, search_opts) self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) snapshot_get_all.assert_called_once_with(self.context, search_opts, None, None, None, None, None) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all_by_host', return_value=[fake_db_snapshot]) def test_get_by_host(self, get_by_host, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList.get_by_host( self.context, 'fake-host') self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all_by_project', return_value=[fake_db_snapshot]) def test_get_all_by_project(self, get_all_by_project, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj search_opts = mock.sentinel.search_opts snapshots = objects.SnapshotList.get_all_by_project( self.context, self.project_id, search_opts) self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) get_all_by_project.assert_called_once_with(self.context, self.project_id, search_opts, None, None, None, None, None) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all_for_volume', return_value=[fake_db_snapshot]) def test_get_all_for_volume(self, get_all_for_volume, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList.get_all_for_volume( self.context, fake_volume_obj.id) self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all_active_by_window', return_value=[fake_db_snapshot]) def test_get_all_active_by_window(self, get_all_active_by_window, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList.get_all_active_by_window( self.context, mock.sentinel.begin, mock.sentinel.end) self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all_for_cgsnapshot', return_value=[fake_db_snapshot]) def test_get_all_for_cgsnapshot(self, get_all_for_cgsnapshot, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj snapshots = objects.SnapshotList.get_all_for_cgsnapshot( self.context, mock.sentinel.cgsnapshot_id) self.assertEqual(1, len(snapshots)) TestSnapshot._compare(self, fake_snapshot_obj, snapshots[0]) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all') def test_get_all_without_metadata(self, snapshot_get_all, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj snapshot = copy.deepcopy(fake_db_snapshot) del snapshot['snapshot_metadata'] snapshot_get_all.return_value = [snapshot] search_opts = mock.sentinel.search_opts self.assertRaises(exception.MetadataAbsent, objects.SnapshotList.get_all, self.context, search_opts) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.db.snapshot_get_all') def test_get_all_with_metadata(self, snapshot_get_all, volume_get_by_id): fake_volume_obj = fake_volume.fake_volume_obj(self.context) volume_get_by_id.return_value = fake_volume_obj db_snapshot = copy.deepcopy(fake_db_snapshot) db_snapshot['snapshot_metadata'] = [{'key': 'fake_key', 'value': 'fake_value'}] snapshot_get_all.return_value = [db_snapshot] search_opts = mock.sentinel.search_opts snapshots = objects.SnapshotList.get_all( self.context, search_opts) self.assertEqual(1, len(snapshots)) snapshot_obj = copy.deepcopy(fake_snapshot_obj) snapshot_obj['metadata'] = {'fake_key': 'fake_value'} TestSnapshot._compare(self, snapshot_obj, snapshots[0]) snapshot_get_all.assert_called_once_with(self.context, search_opts, None, None, None, None, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_volume.py0000664000175000017500000010020100000000000023017 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from zoneinfo import ZoneInfo import ddt from oslo_utils import timeutils from cinder import context from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit.consistencygroup import fake_consistencygroup from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects fake_group = { 'id': fake.GROUP_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'fake_host', 'availability_zone': 'fake_az', 'name': 'fake_name', 'description': 'fake_description', 'group_type_id': fake.GROUP_TYPE_ID, 'status': fields.GroupStatus.CREATING, } @ddt.ddt class TestVolume(test_objects.BaseObjectsTestCase): @staticmethod def _compare(test, db, obj): db = {k: v for k, v in db.items() if not k.endswith('metadata') or k.startswith('volume')} test_objects.BaseObjectsTestCase._compare(test, db, obj) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_get_by_id(self, volume_get): db_volume = fake_volume.fake_db_volume() volume_get.return_value = db_volume volume = objects.Volume.get_by_id(self.context, fake.VOLUME_ID) volume_get.assert_called_once_with(self.context, fake.VOLUME_ID) self._compare(self, db_volume, volume) @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_get_by_id_no_existing_id(self, model_query): pf = (model_query().options().options().options().options().options(). options()) pf.filter_by().first.return_value = None self.assertRaises(exception.VolumeNotFound, objects.Volume.get_by_id, self.context, 123) @mock.patch('cinder.db.volume_create') def test_create(self, volume_create): db_volume = fake_volume.fake_db_volume() volume_create.return_value = db_volume volume = objects.Volume(context=self.context) volume.create() self.assertEqual(db_volume['id'], volume.id) @mock.patch('cinder.db.volume_update') @ddt.data(False, True) def test_save(self, test_cg, volume_update): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' if test_cg: volume.consistencygroup = None volume.save() volume_update.assert_called_once_with(self.context, volume.id, {'display_name': 'foobar'}) def test_save_error(self): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' volume.consistencygroup = ( fake_consistencygroup.fake_consistencyobject_obj(self.context)) self.assertRaises(exception.ObjectActionError, volume.save) @mock.patch('cinder.db.volume_metadata_update', return_value={'key1': 'value1'}) @mock.patch('cinder.db.volume_update') def test_save_with_metadata(self, volume_update, metadata_update): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' volume.metadata = {'key1': 'value1'} self.assertEqual({'display_name': 'foobar', 'metadata': {'key1': 'value1'}}, volume.obj_get_changes()) volume.save() volume_update.assert_called_once_with(self.context, volume.id, {'display_name': 'foobar'}) metadata_update.assert_called_once_with(self.context, volume.id, {'key1': 'value1'}, True) @mock.patch('cinder.db.volume_admin_metadata_update', return_value={'key1': 'value1'}) @mock.patch('cinder.db.volume_update') def test_save_with_admin_metadata(self, volume_update, admin_metadata_update): # Test with no admin context db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.admin_metadata = {'key1': 'value1'} volume.save() self.assertFalse(admin_metadata_update.called) # Test with admin context admin_context = context.RequestContext(self.user_id, self.project_id, is_admin=True) volume = objects.Volume._from_db_object(admin_context, objects.Volume(), db_volume) volume.admin_metadata = {'key1': 'value1'} volume.save() admin_metadata_update.assert_called_once_with( admin_context, volume.id, {'key1': 'value1'}, True) def test_save_with_glance_metadata(self): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' volume.glance_metadata = {'key1': 'value1'} self.assertRaises(exception.ObjectActionError, volume.save) def test_save_with_consistencygroup(self): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' volume.consistencygroup = objects.ConsistencyGroup() self.assertRaises(exception.ObjectActionError, volume.save) def test_save_with_snapshots(self): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.display_name = 'foobar' volume.snapshots = objects.SnapshotList() self.assertRaises(exception.ObjectActionError, volume.save) @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) @mock.patch('cinder.db.sqlalchemy.api.volume_destroy') def test_destroy(self, volume_destroy, utcnow_mock): volume_destroy.return_value = { 'status': 'deleted', 'deleted': True, 'deleted_at': utcnow_mock.return_value} db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) volume.destroy() self.assertTrue(volume_destroy.called) admin_context = volume_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) self.assertTrue(volume.deleted) self.assertEqual('deleted', volume.status) self.assertEqual( utcnow_mock.return_value.replace(tzinfo=ZoneInfo('UTC')), volume.deleted_at) self.assertIsNone(volume.migration_status) def test_obj_fields(self): volume = objects.Volume(context=self.context, id=fake.VOLUME_ID, name_id=fake.VOLUME_NAME_ID) self.assertEqual(['name', 'name_id', 'volume_metadata', 'volume_admin_metadata', 'volume_glance_metadata'], volume.obj_extra_fields) self.assertEqual('volume-%s' % fake.VOLUME_NAME_ID, volume.name) self.assertEqual(fake.VOLUME_NAME_ID, volume.name_id) def test_obj_field_previous_status(self): volume = objects.Volume(context=self.context, previous_status='backing-up') self.assertEqual('backing-up', volume.previous_status) @mock.patch('cinder.db.volume_metadata_delete') def test_delete_metadata_key(self, metadata_delete): volume = objects.Volume(self.context, id=fake.VOLUME_ID) volume.metadata = {'key1': 'value1', 'key2': 'value2'} self.assertEqual({}, volume._orig_metadata) volume.delete_metadata_key('key2') self.assertEqual({'key1': 'value1'}, volume.metadata) metadata_delete.assert_called_once_with(self.context, fake.VOLUME_ID, 'key2') @mock.patch('cinder.db.volume_metadata_get') @mock.patch('cinder.db.volume_glance_metadata_get') @mock.patch('cinder.db.volume_admin_metadata_get') @mock.patch('cinder.objects.volume_type.VolumeType.get_by_id') @mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.' 'get_all_by_volume_id') @mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id') @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_volume') def test_obj_load_attr(self, mock_sl_get_all_for_volume, mock_cg_get_by_id, mock_va_get_all_by_vol, mock_vt_get_by_id, mock_admin_metadata_get, mock_glance_metadata_get, mock_metadata_get): fake_db_volume = fake_volume.fake_db_volume( consistencygroup_id=fake.CONSISTENCY_GROUP_ID) volume = objects.Volume._from_db_object( self.context, objects.Volume(), fake_db_volume) # Test metadata lazy-loaded field metadata = {'foo': 'bar'} mock_metadata_get.return_value = metadata self.assertEqual(metadata, volume.metadata) mock_metadata_get.assert_called_once_with(self.context, volume.id) # Test glance_metadata lazy-loaded field glance_metadata = [{'key': 'foo', 'value': 'bar'}] mock_glance_metadata_get.return_value = glance_metadata self.assertEqual({'foo': 'bar'}, volume.glance_metadata) mock_glance_metadata_get.assert_called_once_with( self.context, volume.id) # Test volume_type lazy-loaded field # Case1. volume.volume_type_id = None self.assertIsNone(volume.volume_type) # Case2. volume2.volume_type_id = 1 fake2 = fake_volume.fake_db_volume() fake2.update({'volume_type_id': fake.VOLUME_ID}) volume2 = objects.Volume._from_db_object( self.context, objects.Volume(), fake2) volume_type = objects.VolumeType(context=self.context, id=fake.VOLUME_TYPE_ID) mock_vt_get_by_id.return_value = volume_type self.assertEqual(volume_type, volume2.volume_type) mock_vt_get_by_id.assert_called_once_with(self.context, volume2.volume_type_id) # Test consistencygroup lazy-loaded field consistencygroup = objects.ConsistencyGroup( context=self.context, id=fake.CONSISTENCY_GROUP_ID) mock_cg_get_by_id.return_value = consistencygroup self.assertEqual(consistencygroup, volume.consistencygroup) mock_cg_get_by_id.assert_called_once_with(self.context, volume.consistencygroup_id) # Test snapshots lazy-loaded field snapshots = objects.SnapshotList(context=self.context, id=fake.SNAPSHOT_ID) mock_sl_get_all_for_volume.return_value = snapshots self.assertEqual(snapshots, volume.snapshots) mock_sl_get_all_for_volume.assert_called_once_with(self.context, volume.id) # Test volume_attachment lazy-loaded field va_objs = [objects.VolumeAttachment(context=self.context, id=i) for i in [fake.OBJECT_ID, fake.OBJECT2_ID, fake.OBJECT3_ID]] va_list = objects.VolumeAttachmentList(context=self.context, objects=va_objs) mock_va_get_all_by_vol.return_value = va_list self.assertEqual(va_list, volume.volume_attachment) mock_va_get_all_by_vol.assert_called_once_with(self.context, volume.id) # Test admin_metadata lazy-loaded field - user context adm_metadata = {'bar': 'foo'} mock_admin_metadata_get.return_value = adm_metadata self.assertEqual({}, volume.admin_metadata) self.assertFalse(mock_admin_metadata_get.called) # Test admin_metadata lazy-loaded field - admin context adm_context = self.context.elevated() volume = objects.Volume._from_db_object(adm_context, objects.Volume(), fake_volume.fake_db_volume()) adm_metadata = {'bar': 'foo'} mock_admin_metadata_get.return_value = adm_metadata self.assertEqual(adm_metadata, volume.admin_metadata) mock_admin_metadata_get.assert_called_once_with(adm_context, volume.id) @mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id') def test_obj_load_attr_cgroup_not_exist(self, mock_cg_get_by_id): fake_db_volume = fake_volume.fake_db_volume(consistencygroup_id=None) volume = objects.Volume._from_db_object( self.context, objects.Volume(), fake_db_volume) self.assertIsNone(volume.consistencygroup) mock_cg_get_by_id.assert_not_called() @mock.patch('cinder.objects.group.Group.get_by_id') def test_obj_load_attr_group_not_exist(self, mock_group_get_by_id): fake_db_volume = fake_volume.fake_db_volume(group_id=None) volume = objects.Volume._from_db_object( self.context, objects.Volume(), fake_db_volume) self.assertIsNone(volume.group) mock_group_get_by_id.assert_not_called() def test_from_db_object_with_all_expected_attributes(self): expected_attrs = ['metadata', 'admin_metadata', 'glance_metadata', 'volume_type', 'volume_attachment', 'consistencygroup'] db_metadata = [{'key': 'foo', 'value': 'bar'}] db_admin_metadata = [{'key': 'admin_foo', 'value': 'admin_bar'}] db_glance_metadata = [{'key': 'glance_foo', 'value': 'glance_bar'}] db_volume_type = fake_volume.fake_db_volume_type() db_volume_attachments = fake_volume.volume_attachment_db_obj() db_consistencygroup = fake_consistencygroup.fake_db_consistencygroup() db_snapshots = fake_snapshot.fake_db_snapshot() db_volume = fake_volume.fake_db_volume( volume_metadata=db_metadata, volume_admin_metadata=db_admin_metadata, volume_glance_metadata=db_glance_metadata, volume_type=db_volume_type, volume_attachment=[db_volume_attachments], consistencygroup=db_consistencygroup, snapshots=[db_snapshots], ) volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume, expected_attrs) self.assertEqual({'foo': 'bar'}, volume.metadata) self.assertEqual({'admin_foo': 'admin_bar'}, volume.admin_metadata) self.assertEqual({'glance_foo': 'glance_bar'}, volume.glance_metadata) self._compare(self, db_volume_type, volume.volume_type) self._compare(self, db_volume_attachments, volume.volume_attachment) self._compare(self, db_consistencygroup, volume.consistencygroup) self._compare(self, db_snapshots, volume.snapshots) @mock.patch('cinder.db.volume_glance_metadata_get', return_value={}) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_refresh(self, volume_get, volume_metadata_get): db_volume1 = fake_volume.fake_db_volume() db_volume2 = db_volume1.copy() db_volume2['display_name'] = 'foobar' # On the second volume_get, return the volume with an updated # display_name volume_get.side_effect = [db_volume1, db_volume2] volume = objects.Volume.get_by_id(self.context, fake.VOLUME_ID) self._compare(self, db_volume1, volume) # display_name was updated, so a volume refresh should have a new value # for that field volume.refresh() self._compare(self, db_volume2, volume) volume_get.assert_has_calls([mock.call(self.context, fake.VOLUME_ID), mock.call.__bool__(), mock.call(self.context, fake.VOLUME_ID)]) def test_metadata_aliases(self): volume = objects.Volume(context=self.context) # metadata<->volume_metadata volume.metadata = {'abc': 'def'} self.assertEqual([{'key': 'abc', 'value': 'def'}], volume.volume_metadata) md = [{'key': 'def', 'value': 'abc'}] volume.volume_metadata = md self.assertEqual({'def': 'abc'}, volume.metadata) # admin_metadata<->volume_admin_metadata volume.admin_metadata = {'foo': 'bar'} self.assertEqual([{'key': 'foo', 'value': 'bar'}], volume.volume_admin_metadata) volume.volume_admin_metadata = [{'key': 'xyz', 'value': '42'}] self.assertEqual({'xyz': '42'}, volume.admin_metadata) # glance_metadata<->volume_glance_metadata volume.glance_metadata = {'jkl': 'mno'} self.assertEqual([{'key': 'jkl', 'value': 'mno'}], volume.volume_glance_metadata) volume.volume_glance_metadata = [{'key': 'prs', 'value': 'tuw'}] self.assertEqual({'prs': 'tuw'}, volume.glance_metadata) @mock.patch('cinder.db.volume_metadata_update', return_value={}) @mock.patch('cinder.db.volume_update') @ddt.data({'src_vol_type_id': fake.VOLUME_TYPE_ID, 'dest_vol_type_id': fake.VOLUME_TYPE2_ID}, {'src_vol_type_id': None, 'dest_vol_type_id': fake.VOLUME_TYPE2_ID}) @ddt.unpack def test_finish_volume_migration(self, volume_update, metadata_update, src_vol_type_id, dest_vol_type_id): src_volume_db = fake_volume.fake_db_volume( **{'id': fake.VOLUME_ID, 'volume_type_id': src_vol_type_id, 'use_quota': True}) if src_vol_type_id: src_volume_db['volume_type'] = fake_volume.fake_db_volume_type( id=src_vol_type_id) dest_volume_db = fake_volume.fake_db_volume( **{'id': fake.VOLUME2_ID, 'volume_type_id': dest_vol_type_id, 'use_quota': False}) if dest_vol_type_id: dest_volume_db['volume_type'] = fake_volume.fake_db_volume_type( id=dest_vol_type_id) expected_attrs = objects.Volume._get_expected_attrs(self.context) src_volume = objects.Volume._from_db_object( self.context, objects.Volume(), src_volume_db, expected_attrs=expected_attrs) dest_volume = objects.Volume._from_db_object( self.context, objects.Volume(), dest_volume_db, expected_attrs=expected_attrs) updated_dest_volume = src_volume.finish_volume_migration( dest_volume) self.assertEqual('deleting', updated_dest_volume.migration_status) self.assertEqual('migration src for ' + src_volume.id, updated_dest_volume.display_description) self.assertEqual(src_volume.id, updated_dest_volume._name_id) self.assertTrue(volume_update.called) volume_update.assert_has_calls([ mock.call(self.context, src_volume.id, mock.ANY), mock.call(self.context, dest_volume.id, mock.ANY)]) ctxt, vol_id, updates = volume_update.call_args[0] self.assertNotIn('volume_type', updates) # Ensure that the destination volume type has not been overwritten self.assertEqual(dest_vol_type_id, getattr(updated_dest_volume, 'volume_type_id')) # Ignore these attributes, since they were updated by # finish_volume_migration ignore_keys = ('id', 'provider_location', '_name_id', 'migration_status', 'display_description', 'status', 'volume_glance_metadata', 'volume_type', 'use_quota', 'volume_attachment') dest_vol_dict = {k: updated_dest_volume[k] for k in updated_dest_volume.keys() if k not in ignore_keys} src_vol_dict = {k: src_volume[k] for k in src_volume.keys() if k not in ignore_keys} self.assertEqual(src_vol_dict, dest_vol_dict) # use_quota must not have been switched, we'll mess our quota otherwise self.assertTrue(src_volume.use_quota) self.assertFalse(updated_dest_volume.use_quota) def test_volume_with_metadata_serialize_deserialize_no_changes(self): updates = {'volume_glance_metadata': [{'key': 'foo', 'value': 'bar'}], 'expected_attrs': ['glance_metadata']} volume = fake_volume.fake_volume_obj(self.context, **updates) serializer = objects.base.CinderObjectSerializer() serialized_volume = serializer.serialize_entity(self.context, volume) volume = serializer.deserialize_entity(self.context, serialized_volume) self.assertDictEqual({}, volume.obj_get_changes()) @mock.patch('cinder.db.volume_admin_metadata_update') @mock.patch('cinder.db.sqlalchemy.api.volume_attach') def test_begin_attach(self, volume_attach, metadata_update): volume = fake_volume.fake_volume_obj(self.context, use_quota=True) db_attachment = fake_volume.volume_attachment_db_obj( volume_id=volume.id, attach_status=fields.VolumeAttachStatus.ATTACHING) volume_attach.return_value = db_attachment metadata_update.return_value = {'attached_mode': 'rw'} with mock.patch.object(self.context, 'elevated') as mock_elevated: mock_elevated.return_value = context.get_admin_context() attachment = volume.begin_attach("rw") self.assertIsInstance(attachment, objects.VolumeAttachment) self.assertEqual(volume.id, attachment.volume_id) self.assertEqual(fields.VolumeAttachStatus.ATTACHING, attachment.attach_status) metadata_update.assert_called_once_with(self.context.elevated(), volume.id, {'attached_mode': u'rw'}, True) self.assertEqual('rw', volume.admin_metadata['attached_mode']) @mock.patch('cinder.db.volume_admin_metadata_delete') @mock.patch('cinder.db.sqlalchemy.api.volume_detached') @mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.' 'get_all_by_volume_id') def test_volume_detached_with_attachment( self, volume_attachment_get, volume_detached, metadata_delete): va_objs = [objects.VolumeAttachment(context=self.context, id=i, volume_id=fake.VOLUME_ID) for i in [fake.OBJECT_ID, fake.OBJECT2_ID, fake.OBJECT3_ID]] # As changes are not saved, we need reset it here. Later changes # will be checked. for obj in va_objs: obj.obj_reset_changes() va_list = objects.VolumeAttachmentList(context=self.context, objects=va_objs) va_list.obj_reset_changes() volume_attachment_get.return_value = va_list admin_context = context.get_admin_context() volume = fake_volume.fake_volume_obj( admin_context, id=fake.VOLUME_ID, volume_attachment=va_list, volume_admin_metadata=[{'key': 'attached_mode', 'value': 'rw'}]) self.assertEqual(3, len(volume.volume_attachment)) volume_detached.return_value = ({'status': 'in-use'}, {'attached_mode': 'rw'}) with mock.patch.object(admin_context, 'elevated') as mock_elevated: mock_elevated.return_value = admin_context volume.finish_detach(fake.OBJECT_ID) volume_detached.assert_called_once_with(admin_context, volume.id, fake.OBJECT_ID) metadata_delete.assert_called_once_with(admin_context, volume.id, 'attached_mode') self.assertEqual('in-use', volume.status) self.assertEqual({}, volume.cinder_obj_get_changes()) self.assertEqual(2, len(volume.volume_attachment)) self.assertNotIn('attached_mode', volume.admin_metadata) @mock.patch('cinder.db.volume_admin_metadata_delete') @mock.patch('cinder.db.sqlalchemy.api.volume_detached') @mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.' 'get_all_by_volume_id') def test_volume_detached_without_attachment( self, volume_attachment_get, volume_detached, metadata_delete): admin_context = context.get_admin_context() volume = fake_volume.fake_volume_obj( admin_context, volume_admin_metadata=[{'key': 'attached_mode', 'value': 'rw'}]) self.assertEqual([], volume.volume_attachment.objects) volume_detached.return_value = ({'status': 'in-use'}, None) with mock.patch.object(admin_context, 'elevated') as mock_elevated: mock_elevated.return_value = admin_context volume.finish_detach(fake.OBJECT_ID) metadata_delete.assert_called_once_with(admin_context, volume.id, 'attached_mode') volume_detached.assert_called_once_with(admin_context, volume.id, fake.OBJECT_ID) self.assertEqual('in-use', volume.status) self.assertEqual({}, volume.cinder_obj_get_changes()) self.assertFalse(volume_attachment_get.called) @ddt.data(True, False) def test_is_replicated(self, result): volume_type = fake_volume.fake_volume_type_obj(self.context) volume = fake_volume.fake_volume_obj( self.context, volume_type_id=volume_type.id) volume.volume_type = volume_type with mock.patch.object(volume_type, 'is_replicated', return_value=result) as is_replicated: self.assertEqual(result, volume.is_replicated()) is_replicated.assert_called_once_with() def test_is_replicated_no_type(self): volume = fake_volume.fake_volume_obj( self.context, volume_type_id=None, volume_type=None) self.assertFalse(bool(volume.is_replicated())) @ddt.data((None, False), ('error', False), ('success', False), ('target:123456', True)) @ddt.unpack def test_is_migration_target(self, migration_status, expected): volume = fake_volume.fake_volume_obj(self.context, migration_status=migration_status) self.assertIs(expected, volume.is_migration_target()) @ddt.ddt class TestVolumeList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.volume_get_all') def test_get_all(self, volume_get_all): db_volume = fake_volume.fake_db_volume() volume_get_all.return_value = [db_volume] volumes = objects.VolumeList.get_all(self.context, mock.sentinel.marker, mock.sentinel.limit, mock.sentinel.sort_key, mock.sentinel.sort_dir) self.assertEqual(1, len(volumes)) TestVolume._compare(self, db_volume, volumes[0]) @mock.patch('cinder.db.volume_get_all_by_host') def test_get_by_host(self, get_all_by_host): db_volume = fake_volume.fake_db_volume() get_all_by_host.return_value = [db_volume] volumes = objects.VolumeList.get_all_by_host( self.context, 'fake-host') self.assertEqual(1, len(volumes)) TestVolume._compare(self, db_volume, volumes[0]) @mock.patch('cinder.db.volume_get_all_by_group') def test_get_by_group(self, get_all_by_group): db_volume = fake_volume.fake_db_volume() get_all_by_group.return_value = [db_volume] volumes = objects.VolumeList.get_all_by_group( self.context, 'fake-host') self.assertEqual(1, len(volumes)) TestVolume._compare(self, db_volume, volumes[0]) @mock.patch('cinder.db.volume_get_all_by_project') def test_get_by_project(self, get_all_by_project): db_volume = fake_volume.fake_db_volume() get_all_by_project.return_value = [db_volume] volumes = objects.VolumeList.get_all_by_project( self.context, mock.sentinel.project_id, mock.sentinel.marker, mock.sentinel.limit, mock.sentinel.sorted_keys, mock.sentinel.sorted_dirs, mock.sentinel.filters) self.assertEqual(1, len(volumes)) TestVolume._compare(self, db_volume, volumes[0]) @ddt.data(['name_id'], ['__contains__']) def test_get_by_project_with_sort_key(self, sort_keys): fake_volume.fake_db_volume() self.assertRaises(exception.InvalidInput, objects.VolumeList.get_all_by_project, self.context, self.context.project_id, sort_keys=sort_keys) @mock.patch('cinder.db.volume_include_in_cluster') def test_include_in_cluster(self, include_mock): filters = {'host': mock.sentinel.host, 'cluster_name': mock.sentinel.cluster_name} cluster = 'new_cluster' objects.VolumeList.include_in_cluster(self.context, cluster, **filters) include_mock.assert_called_once_with(self.context, cluster, True, **filters) @mock.patch('cinder.db.volume_include_in_cluster') def test_include_in_cluster_specify_partial(self, include_mock): filters = {'host': mock.sentinel.host, 'cluster_name': mock.sentinel.cluster_name} cluster = 'new_cluster' objects.VolumeList.include_in_cluster(self.context, cluster, mock.sentinel.partial_rename, **filters) include_mock.assert_called_once_with(self.context, cluster, mock.sentinel.partial_rename, **filters) @mock.patch('cinder.db.group_create', return_value=fake_group) def test_populate_consistencygroup(self, mock_db_grp_create): db_volume = fake_volume.fake_db_volume() volume = objects.Volume._from_db_object(self.context, objects.Volume(), db_volume) fake_grp = fake_group.copy() del fake_grp['id'] group = objects.Group(context=self.context, **fake_grp) group.create() volume.group_id = group.id volume.group = group volume.populate_consistencygroup() self.assertEqual(volume.group_id, volume.consistencygroup_id) self.assertEqual(volume.group.id, volume.consistencygroup.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_volume_attachment.py0000664000175000017500000002216000000000000025236 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from sqlalchemy.orm import attributes from cinder import db from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects @ddt.ddt class TestVolumeAttachment(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api.volume_attachment_get') def test_get_by_id(self, volume_attachment_get): db_attachment = fake_volume.volume_attachment_db_obj() attachment_obj = fake_volume.volume_attachment_ovo(self.context) volume_attachment_get.return_value = db_attachment attachment = objects.VolumeAttachment.get_by_id(self.context, fake.ATTACHMENT_ID) self._compare(self, attachment_obj, attachment) @mock.patch.object(objects.Volume, 'get_by_id') def test_lazy_load_volume(self, volume_get_mock): volume = objects.Volume(self.context, id=fake.VOLUME_ID) volume_get_mock.return_value = volume attach = objects.VolumeAttachment(self.context, id=fake.ATTACHMENT_ID, volume_id=volume.id) r = attach.volume self.assertEqual(volume, r) volume_get_mock.assert_called_once_with(self.context, volume.id) def test_from_db_object_no_volume(self): original_get = attributes.InstrumentedAttribute.__get__ def my_get(get_self, instance, owner): self.assertNotEqual('volume', get_self.key) return original_get(get_self, instance, owner) # Volume field is not loaded attach = fake_volume.models.VolumeAttachment(id=fake.ATTACHMENT_ID, volume_id=fake.VOLUME_ID) patch_str = 'sqlalchemy.orm.attributes.InstrumentedAttribute.__get__' with mock.patch(patch_str, side_effect=my_get): objects.VolumeAttachment._from_db_object( self.context, objects.VolumeAttachment(), attach) @mock.patch('cinder.db.volume_attachment_update') def test_save(self, volume_attachment_update): attachment = fake_volume.volume_attachment_ovo(self.context) attachment.attach_status = fields.VolumeAttachStatus.ATTACHING attachment.save() volume_attachment_update.assert_called_once_with( self.context, attachment.id, {'attach_status': fields.VolumeAttachStatus.ATTACHING}) @mock.patch('cinder.db.sqlalchemy.api.volume_attachment_get') def test_refresh(self, attachment_get): db_attachment1 = fake_volume.volume_attachment_db_obj() attachment_obj1 = fake_volume.volume_attachment_ovo(self.context) db_attachment2 = fake_volume.volume_attachment_db_obj() db_attachment2.mountpoint = '/dev/sdc' attachment_obj2 = fake_volume.volume_attachment_ovo( self.context, mountpoint='/dev/sdc') # On the second volume_attachment_get, return the volume attachment # with an updated mountpoint attachment_get.side_effect = [db_attachment1, db_attachment2] attachment = objects.VolumeAttachment.get_by_id(self.context, fake.ATTACHMENT_ID) self._compare(self, attachment_obj1, attachment) # mountpoint was updated, so a volume attachment refresh should have a # new value for that field attachment.refresh() self._compare(self, attachment_obj2, attachment) attachment_get.assert_has_calls([mock.call(self.context, fake.ATTACHMENT_ID), mock.call.__bool__(), mock.call(self.context, fake.ATTACHMENT_ID)]) @mock.patch('cinder.db.sqlalchemy.api.volume_attached') def test_volume_attached(self, volume_attached): attachment = fake_volume.volume_attachment_ovo(self.context) updated_values = {'mountpoint': '/dev/sda', 'attach_status': fields.VolumeAttachStatus.ATTACHED, 'instance_uuid': fake.INSTANCE_ID} volume_attached.return_value = (fake_volume.fake_db_volume(), updated_values) volume = attachment.finish_attach(fake.INSTANCE_ID, 'fake_host', '/dev/sda', 'rw') self.assertIsInstance(volume, objects.Volume) volume_attached.assert_called_once_with(mock.ANY, attachment.id, fake.INSTANCE_ID, 'fake_host', '/dev/sda', 'rw', True) self.assertEqual('/dev/sda', attachment.mountpoint) self.assertEqual(fake.INSTANCE_ID, attachment.instance_uuid) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment.attach_status) def test_migrate_attachment_specs(self): # Create an attachment. attachment = objects.VolumeAttachment( self.context, attach_status='attaching', volume_id=fake.VOLUME_ID) attachment.create() # Create some attachment_specs. Note that the key and value have to # be strings, the table doesn't handle things like a wwpns list # for a fibrechannel connector. connector = {'host': '127.0.0.1'} db.attachment_specs_update_or_create( self.context, attachment.id, connector) # Now get the volume attachment object from the database and make # sure the connector was migrated from the attachment_specs table # to the volume_attachment table and the specs were deleted. attachment = objects.VolumeAttachment.get_by_id( self.context, attachment.id) self.assertIn('connector', attachment) self.assertDictEqual(connector, attachment.connector) self.assertEqual(0, len(db.attachment_specs_get( self.context, attachment.id))) # Make sure we can store a fibrechannel type connector that has a wwpns # list value. connector['wwpns'] = ['21000024ff34c92d', '21000024ff34c92c'] attachment.connector = connector attachment.save() # Get the object from the DB again and make sure the connector is # there. attachment = objects.VolumeAttachment.get_by_id( self.context, attachment.id) self.assertIn('connector', attachment) self.assertDictEqual(connector, attachment.connector) class TestVolumeAttachmentList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.volume_attachment_get_all_by_volume_id') def test_get_all_by_volume_id(self, get_used_by_volume_id): db_attachment = fake_volume.volume_attachment_db_obj() get_used_by_volume_id.return_value = [db_attachment] attachment_obj = fake_volume.volume_attachment_ovo(self.context) attachments = objects.VolumeAttachmentList.get_all_by_volume_id( self.context, mock.sentinel.volume_id) self.assertEqual(1, len(attachments)) self._compare(self, attachment_obj, attachments[0]) @mock.patch('cinder.db.volume_attachment_get_all_by_host') def test_get_all_by_host(self, get_by_host): db_attachment = fake_volume.volume_attachment_db_obj() attachment_obj = fake_volume.volume_attachment_ovo(self.context) get_by_host.return_value = [db_attachment] attachments = objects.VolumeAttachmentList.get_all_by_host( self.context, mock.sentinel.host) self.assertEqual(1, len(attachments)) self._compare(self, attachment_obj, attachments[0]) @mock.patch('cinder.db.volume_attachment_get_all_by_instance_uuid') def test_get_all_by_instance_uuid(self, get_by_instance_uuid): db_attachment = fake_volume.volume_attachment_db_obj() get_by_instance_uuid.return_value = [db_attachment] attachment_obj = fake_volume.volume_attachment_ovo(self.context) attachments = objects.VolumeAttachmentList.get_all_by_instance_uuid( self.context, mock.sentinel.uuid) self.assertEqual(1, len(attachments)) self._compare(self, attachment_obj, attachments[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/objects/test_volume_type.py0000664000175000017500000003034100000000000024067 0ustar00zuulzuul00000000000000# Copyright 2015 SimpliVity Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from zoneinfo import ZoneInfo import ddt from oslo_utils import timeutils from cinder import db from cinder.db.sqlalchemy import models from cinder import objects from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import objects as test_objects @ddt.ddt class TestVolumeType(test_objects.BaseObjectsTestCase): @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') def test_get_by_id(self, volume_type_get): db_volume_type = fake_volume.fake_db_volume_type() volume_type_get.return_value = db_volume_type volume_type = objects.VolumeType.get_by_id(self.context, fake.VOLUME_TYPE_ID) self._compare(self, db_volume_type, volume_type) @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') def test_get_by_id_with_projects(self, volume_type_get): projects = [models.VolumeTypeProjects(project_id=fake.PROJECT_ID), models.VolumeTypeProjects(project_id=fake.PROJECT2_ID)] db_volume_type = fake_volume.fake_db_volume_type(projects=projects) volume_type_get.return_value = db_volume_type volume_type = objects.VolumeType.get_by_id(self.context, fake.VOLUME_TYPE_ID) db_volume_type['projects'] = [p.project_id for p in projects] self._compare(self, db_volume_type, volume_type) @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') def test_get_by_id_with_string_projects(self, volume_type_get): projects = [fake.PROJECT_ID, fake.PROJECT2_ID] db_volume_type = fake_volume.fake_db_volume_type(projects=projects) volume_type_get.return_value = db_volume_type volume_type = objects.VolumeType.get_by_id(self.context, fake.VOLUME_TYPE_ID) self._compare(self, db_volume_type, volume_type) @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') def test_get_by_id_null_spec(self, volume_type_get): db_volume_type = fake_volume.fake_db_volume_type( extra_specs={'foo': None}) volume_type_get.return_value = db_volume_type volume_type = objects.VolumeType.get_by_id(self.context, fake.VOLUME_TYPE_ID) self._compare(self, db_volume_type, volume_type) @mock.patch('cinder.volume.volume_types.get_by_name_or_id') def test_get_by_name_or_id(self, volume_type_get): db_volume_type = fake_volume.fake_db_volume_type() volume_type_get.return_value = db_volume_type volume_type = objects.VolumeType.get_by_name_or_id( self.context, fake.VOLUME_TYPE_ID) self._compare(self, db_volume_type, volume_type) @mock.patch('cinder.volume.volume_types.create') def test_create(self, volume_type_create): db_volume_type = fake_volume.fake_db_volume_type() volume_type_create.return_value = db_volume_type volume_type = objects.VolumeType(context=self.context) volume_type.name = db_volume_type['name'] volume_type.extra_specs = db_volume_type['extra_specs'] volume_type.is_public = db_volume_type['is_public'] volume_type.projects = db_volume_type['projects'] volume_type.description = db_volume_type['description'] volume_type.create() volume_type_create.assert_called_once_with( self.context, db_volume_type['name'], db_volume_type['extra_specs'], db_volume_type['is_public'], db_volume_type['projects'], db_volume_type['description']) @mock.patch('cinder.volume.volume_types.update') def test_save(self, volume_type_update): db_volume_type = fake_volume.fake_db_volume_type() volume_type = objects.VolumeType._from_db_object(self.context, objects.VolumeType(), db_volume_type) volume_type.description = 'foobar' volume_type.save() volume_type_update.assert_called_once_with(self.context, volume_type.id, volume_type.name, volume_type.description) @mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow()) @mock.patch('cinder.db.sqlalchemy.api.volume_type_destroy') @mock.patch.object(db.sqlalchemy.api, 'volume_type_get', v2_fakes.fake_volume_type_get) def test_destroy(self, volume_type_destroy, utcnow_mock): volume_type_destroy.return_value = { 'deleted': True, 'deleted_at': utcnow_mock.return_value} db_volume_type = fake_volume.fake_db_volume_type() volume_type = objects.VolumeType._from_db_object(self.context, objects.VolumeType(), db_volume_type) volume_type.destroy() self.assertTrue(volume_type_destroy.called) admin_context = volume_type_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) self.assertTrue(volume_type.deleted) self.assertEqual( utcnow_mock.return_value.replace(tzinfo=ZoneInfo('UTC')), volume_type.deleted_at) @mock.patch('cinder.db.sqlalchemy.api._volume_type_get_full') def test_refresh(self, volume_type_get): db_type1 = fake_volume.fake_db_volume_type() db_type2 = db_type1.copy() db_type2['description'] = 'foobar' # updated description volume_type_get.side_effect = [db_type1, db_type2] volume_type = objects.VolumeType.get_by_id(self.context, fake.VOLUME_TYPE_ID) self._compare(self, db_type1, volume_type) # description was updated, so a volume type refresh should have a new # value for that field volume_type.refresh() self._compare(self, db_type2, volume_type) volume_type_get.assert_has_calls([mock.call(self.context, fake.VOLUME_TYPE_ID), mock.call.__bool__(), mock.call(self.context, fake.VOLUME_TYPE_ID)]) @mock.patch('cinder.objects.QualityOfServiceSpecs.get_by_id') @mock.patch('cinder.db.sqlalchemy.api._volume_type_get') def test_lazy_loading_qos(self, get_mock, qos_get_mock): qos_get_mock.return_value = objects.QualityOfServiceSpecs( id=fake.QOS_SPEC_ID) vol_type = fake_volume.fake_db_volume_type( qos_specs_id=fake.QOS_SPEC_ID) get_mock.return_value = vol_type volume_type = objects.VolumeType.get_by_id(self.context, vol_type['id']) self._compare(self, qos_get_mock.return_value, volume_type.qos_specs) qos_get_mock.assert_called_once_with(self.context, fake.QOS_SPEC_ID) @mock.patch('cinder.db.volume_type_access_get_all') @mock.patch('cinder.db.sqlalchemy.api._volume_type_get') def test_lazy_loading_projects(self, get_mock, get_projects_mock): vol_type = fake_volume.fake_db_volume_type( qos_specs_id=fake.QOS_SPEC_ID) get_mock.return_value = vol_type projects = [models.VolumeTypeProjects(project_id=fake.PROJECT_ID), models.VolumeTypeProjects(project_id=fake.PROJECT2_ID)] get_projects_mock.return_value = projects volume_type = objects.VolumeType.get_by_id(self.context, vol_type['id']) # Simulate this type has been loaded by a volume get_all method del volume_type.projects self.assertEqual([p.project_id for p in projects], volume_type.projects) get_projects_mock.assert_called_once_with(self.context, vol_type['id']) @mock.patch('cinder.db.volume_type_extra_specs_get') @mock.patch('cinder.db.sqlalchemy.api._volume_type_get') def test_lazy_loading_extra_specs(self, get_mock, get_specs_mock): get_specs_mock.return_value = {'key': 'value', 'key2': 'value2'} vol_type = fake_volume.fake_db_volume_type( qos_specs_id=fake.QOS_SPEC_ID) get_mock.return_value = vol_type volume_type = objects.VolumeType.get_by_id(self.context, vol_type['id']) # Simulate this type has been loaded by a volume get_all method del volume_type.extra_specs self.assertEqual(get_specs_mock.return_value, volume_type.extra_specs) get_specs_mock.assert_called_once_with(self.context, vol_type['id']) @ddt.data(' True', ' true', ' yes') def test_is_replicated_true(self, enabled): volume_type = fake_volume.fake_volume_type_obj( self.context, extra_specs={'replication_enabled': enabled}) self.assertTrue(volume_type.is_replicated()) def test_is_replicated_no_specs(self): volume_type = fake_volume.fake_volume_type_obj( self.context, extra_specs={}) self.assertFalse(bool(volume_type.is_replicated())) @ddt.data(' False', ' false', ' f', 'baddata', 'bad data') def test_is_replicated_specs_false(self, not_enabled): volume_type = fake_volume.fake_volume_type_obj( self.context, extra_specs={'replication_enabled': not_enabled}) self.assertFalse(volume_type.is_replicated()) @ddt.data(' False', ' false', ' f') def test_is_multiattach_specs_false(self, false): volume_type = fake_volume.fake_volume_type_obj( self.context, extra_specs={'multiattach': false}) self.assertFalse(volume_type.is_multiattach()) @ddt.data(' True', ' True') def test_is_multiattach_specs_true(self, true): volume_type = fake_volume.fake_volume_type_obj( self.context, extra_specs={'multiattach': true}) self.assertTrue(volume_type.is_multiattach()) class TestVolumeTypeList(test_objects.BaseObjectsTestCase): @mock.patch('cinder.volume.volume_types.get_all_types') def test_get_all(self, get_all_types): db_volume_type = fake_volume.fake_db_volume_type() get_all_types.return_value = {db_volume_type['name']: db_volume_type} volume_types = objects.VolumeTypeList.get_all(self.context) self.assertEqual(1, len(volume_types)) TestVolumeType._compare(self, db_volume_type, volume_types[0]) @mock.patch('cinder.volume.volume_types.get_all_types') def test_get_all_with_pagination(self, get_all_types): db_volume_type = fake_volume.fake_db_volume_type() get_all_types.return_value = {db_volume_type['name']: db_volume_type} volume_types = objects.VolumeTypeList.get_all(self.context, filters={'is_public': True}, marker=None, limit=1, sort_keys='id', sort_dirs='desc', offset=None) self.assertEqual(1, len(volume_types)) TestVolumeType._compare(self, db_volume_type, volume_types[0]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2191195 cinder-27.0.0/cinder/tests/unit/policies/0000775000175000017500000000000000000000000020263 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/__init__.py0000664000175000017500000000000000000000000022362 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/base.py0000664000175000017500000001754500000000000021563 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils.fixture import uuidsentinel as uuids from cinder import context as cinder_context from cinder import exception from cinder.tests import fixtures from cinder.tests.unit import test LOG = logging.getLogger(__name__) # The list of users, with characterstics/persona implied by the name, # are declared statically for use as DDT data. all_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', # NOTE: Xena does not support these system scoped personae. They need # to be tested in Yoga when support is added for system scope. # 'system_member', # 'system_reader', # 'system_foo', 'project_admin', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] class BasePolicyTest(test.TestCase): def setUp(self, enforce_scope, enforce_new_defaults, *args, **kwargs): super().setUp(*args, **kwargs) self.enforce_scope = enforce_scope self.enforce_new_defaults = enforce_new_defaults self.override_config('enforce_scope', enforce_scope, 'oslo_policy') self.override_config('enforce_new_defaults', enforce_new_defaults, 'oslo_policy') self.policy = self.useFixture(fixtures.PolicyFixture()) self.admin_project_id = uuids.admin_project_id self.project_id = uuids.project_id self.project_id_other = uuids.project_id_other self.context_details = { 'legacy_admin': dict( project_id=self.admin_project_id, roles=['admin', 'member', 'reader'], ), 'legacy_owner': dict( project_id=self.project_id, roles=[], ), 'system_admin': dict( roles=['admin', 'member', 'reader'], # NOTE: The system_admin in Xena is project scoped, and will # change in Yoga when support is added for system scope. project_id=self.admin_project_id, # system_scope='all', ), 'project_admin': dict( project_id=self.project_id, roles=['admin', 'member', 'reader'], ), 'project_member': dict( project_id=self.project_id, roles=['member', 'reader'], ), 'project_reader': dict( project_id=self.project_id, roles=['reader'], ), 'project_foo': dict( project_id=self.project_id, roles=['foo'], ), 'other_project_member': dict( project_id=self.project_id_other, roles=['member', 'reader'], ), 'other_project_reader': dict( project_id=self.project_id_other, roles=['reader'], ), } # These context objects are useful for subclasses to create test # resources (e.g. volumes). Subclasses may create additional # contexts as needed. self.project_admin_context = self.create_context('project_admin') self.project_member_context = self.create_context('project_member') def is_authorized(self, user_id, authorized_users, unauthorized_users): if user_id in authorized_users: return True elif user_id in unauthorized_users: return False else: msg = ('"%s" must be either an authorized or unauthorized user.' % (user_id)) raise exception.CinderException(message=msg) def create_context(self, user_id): try: details = self.context_details[user_id] except KeyError: msg = ('No context details defined for user_id "%s".' % (user_id)) raise exception.CinderException(message=msg) return cinder_context.RequestContext(user_id=user_id, **details) def common_policy_check(self, user_id, authorized_users, unauthorized_users, unauthorized_exceptions, rule_name, func, req, *args, **kwargs): req.environ['cinder.context'] = self.create_context(user_id) fatal = kwargs.pop('fatal', True) def ensure_raises(req, *args, **kwargs): try: func(req, *args, **kwargs) except exception.NotAuthorized as exc: # In case of multi-policy APIs, PolicyNotAuthorized can be # raised from either of the policy so checking the error # message, which includes the rule name, can mismatch. Tests # verifying the multi policy can pass rule_name as None to # skip the error message assert. if (isinstance(exc, exception.PolicyNotAuthorized) and rule_name is not None): self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.args[0]) except Exception as exc: self.assertIn(type(exc), unauthorized_exceptions) else: msg = ('"%s" was authorized for "%s" policy when it should ' 'be unauthorized.' % (user_id, rule_name)) raise exception.CinderException(message=msg) return None if self.is_authorized(user_id, authorized_users, unauthorized_users): # Verify the context having allowed scope and roles pass # the policy check. LOG.info('Testing authorized "%s"', user_id) # noqa: ignore=C309 response = func(req, *args, **kwargs) else: # Verify the context not having allowed scope or roles fail # the policy check. LOG.info('Testing unauthorized "%s"', user_id) # noqa: ignore=C309 if not fatal: try: response = func(req, *args, **kwargs) # We need to ignore the PolicyNotAuthorized # exception here so that we can add the correct response # in unauthorize_response for the case of fatal=False. # This handle the case of multi policy checks where tests # are verifying the second policy via the response of # fatal-False and ignoring the response checks where the # first policy itself fail to pass (even test override the # first policy to allow for everyone but still, scope # checks can leads to PolicyNotAuthorized error). # For example: flavor extra specs policy for GET flavor # API. In that case, flavor extra spec policy is checked # after the GET flavor policy. So any context failing on # GET flavor will raise the PolicyNotAuthorized and for # that case we do not have any way to verify the flavor # extra specs so skip that context to check in test. except exception.PolicyNotAuthorized: pass else: response = ensure_raises(req, *args, **kwargs) return response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_attachments.py0000664000175000017500000002332100000000000024210 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.api import microversions as mv from cinder.api.v3 import attachments from cinder import exception from cinder.policies import attachments as attachments_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.policies import base from cinder.tests.unit import utils as test_utils from cinder.volume import manager as volume_manager @ddt.ddt class AttachmentsPolicyTest(base.BasePolicyTest): authorized_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_users = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = attachments.AttachmentsController() self.manager = volume_manager.VolumeManager() self.manager.driver = mock.MagicMock() self.manager.driver.initialize_connection = mock.MagicMock() self.manager.driver.initialize_connection.side_effect = ( self._initialize_connection) self.api_path = '/v3/%s/attachments' % (self.project_id) self.api_version = mv.NEW_ATTACH self.mock_is_service = self.patch( 'cinder.volume.api.API.is_service_request', return_value=True) def _initialize_connection(self, volume, connector): return {'data': connector} def _create_attachment(self): vol_type = test_utils.create_volume_type(self.project_admin_context, name='fake_vol_type', testcase_instance=self) volume = test_utils.create_volume(self.project_member_context, volume_type_id=vol_type.id, admin_metadata={ 'attached_mode': 'ro' }, testcase_instance=self) volume = test_utils.attach_volume(self.project_member_context, volume.id, fake.INSTANCE_ID, 'fake_host', 'fake_mountpoint') return volume.volume_attachment[0].id @ddt.data(*base.all_users) def test_create_attachment_policy(self, user_id): volume = test_utils.create_volume(self.project_member_context, testcase_instance=self) rule_name = attachments_policies.CREATE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "attachment": { "instance_uuid": fake.INSTANCE_ID, "volume_uuid": volume.id, } } # Some context return HTTP 404 (rather than 403). unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update') def test_update_attachment_policy(self, user_id, mock_attachment_update): # Redirect the RPC call directly to the volume manager. def attachment_update(*args): return self.manager.attachment_update(*args) mock_attachment_update.side_effect = attachment_update rule_name = attachments_policies.UPDATE_POLICY attachment_id = self._create_attachment() url = '%s/%s' % (self.api_path, attachment_id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'PUT' body = { "attachment": { "connector": { "initiator": "iqn.1993-08.org.debian: 01: cad181614cec", "ip": "192.168.1.20", "platform": "x86_64", "host": "tempest-1", "os_type": "linux2", "multipath": False, "mountpoint": "/dev/vdb", "mode": "ro" } } } unauthorized_exceptions = [] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller.update, req, id=attachment_id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete') def test_delete_attachment_policy(self, user_id, mock_attachment_delete): # Redirect the RPC call directly to the volume manager. def attachment_delete(*args): return self.manager.attachment_delete(*args) mock_attachment_delete.side_effect = attachment_delete rule_name = attachments_policies.DELETE_POLICY attachment_id = self._create_attachment() url = '%s/%s' % (self.api_path, attachment_id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'DELETE' unauthorized_exceptions = [] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller.delete, req, id=attachment_id) @ddt.data(*base.all_users) def test_complete_attachment_policy(self, user_id): rule_name = attachments_policies.COMPLETE_POLICY attachment_id = self._create_attachment() url = '%s/%s/action' % (self.api_path, attachment_id) req = fake_api.HTTPRequest.blank(url, version=mv.NEW_ATTACH_COMPLETION) req.method = 'POST' body = { "os-complete": {} } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller.complete, req, id=attachment_id, body=body) @ddt.data(*base.all_users) def test_multiattach_bootable_volume_policy(self, user_id): volume = test_utils.create_volume(self.project_member_context, multiattach=True, status='in-use', bootable=True, testcase_instance=self) rule_name = attachments_policies.MULTIATTACH_BOOTABLE_VOLUME_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "attachment": { "instance_uuid": fake.INSTANCE_ID, "volume_uuid": volume.id, } } # Relax the CREATE_POLICY in order to get past that check, which takes # place prior to checking the MULTIATTACH_BOOTABLE_VOLUME_POLICY. self.policy.set_rules({attachments_policies.CREATE_POLICY: ""}, overwrite=False) unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) class AttachmentsPolicySecureRbacTest(AttachmentsPolicyTest): authorized_users = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_users = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_backups.py0000664000175000017500000003646700000000000023344 0ustar00zuulzuul00000000000000# All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.api import microversions as mv from cinder.api.v3 import backups from cinder import exception from cinder.objects import fields from cinder.policies import backups as backups_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base from cinder.tests.unit import utils as test_utils @ddt.ddt class BackupsPolicyTest(base.BasePolicyTest): authorized_readers = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_readers = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_members = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.override_config('backup_use_same_host', True) self.controller = backups.BackupsController() self.api_path = '/v3/%s/backups' % (self.project_id) self.api_version = mv.BASE_VERSION def _create_backup(self): backup = test_utils.create_backup(self.project_member_context, status=fields.BackupStatus.AVAILABLE, size=1) self.addCleanup(backup.destroy) return backup @ddt.data(*base.all_users) def test_get_all_backups_policy(self, user_id): self._create_backup() rule_name = backups_policies.GET_ALL_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) # Generally, any logged in user can list all backups. authorized_users = [user_id] unauthorized_users = [] # The exception is when deprecated rules are disabled, in which case # roles are enforced. Users without the 'reader' role should be # blocked. if self.enforce_new_defaults: context = self.create_context(user_id) if 'reader' not in context.roles: authorized_users = [] unauthorized_users = [user_id] response = self.common_policy_check(user_id, authorized_users, unauthorized_users, [], rule_name, self.controller.index, req) # For some users, even if they're authorized, the list of backups # will be empty if they are not in the backup's project. empty_response_users = [ *self.unauthorized_readers, # legacy_admin and system_admin do not have a project_id, and # so the list of backups returned will be empty. 'legacy_admin', 'system_admin', ] backups = response['backups'] if response else [] backup_count = 0 if user_id in empty_response_users else 1 self.assertEqual(backup_count, len(backups)) @ddt.data(*base.all_users) def test_get_backup_policy(self, user_id): backup_id = self._create_backup().id rule_name = backups_policies.GET_POLICY url = '%s/%s' % (self.api_path, backup_id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) unauthorized_exceptions = [ exception.BackupNotFound, ] self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, unauthorized_exceptions, rule_name, self.controller.show, req, id=backup_id) @ddt.data(*base.all_users) def test_create_backup_policy(self, user_id): volume = test_utils.create_volume(self.project_member_context, testcase_instance=self) rule_name = backups_policies.CREATE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "backup": { "container": None, "description": None, "name": "backup001", "volume_id": volume.id, } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) def test_update_backup_policy(self, user_id): backup_id = self._create_backup().id rule_name = backups_policies.UPDATE_POLICY url = '%s/%s' % (self.api_path, backup_id) req = fake_api.HTTPRequest.blank(url, version=mv.BACKUP_UPDATE) req.method = 'PUT' body = { "backup": { "name": "backup666", } } # Relax the GET_POLICY in order to get past that check. self.policy.set_rules({backups_policies.GET_POLICY: ""}, overwrite=False) unauthorized_exceptions = [ exception.BackupNotFound, ] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.update, req, id=backup_id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.backup.api.API._is_backup_service_enabled', return_value=True) def test_delete_backup_policy(self, user_id, mock_backup_service_enabled): backup_id = self._create_backup().id rule_name = backups_policies.DELETE_POLICY url = '%s/%s' % (self.api_path, backup_id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'DELETE' # Relax the GET_POLICY in order to get past that check. self.policy.set_rules({backups_policies.GET_POLICY: ""}, overwrite=False) unauthorized_exceptions = [ exception.BackupNotFound, ] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.delete, req, id=backup_id) @ddt.data(*base.all_users) @mock.patch('cinder.backup.api.API._is_backup_service_enabled', return_value=True) @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') def test_restore_backup_policy(self, user_id, mock_backup_restore, mock_backup_service_enabled): backup_id = self._create_backup().id volume = test_utils.create_volume(self.project_member_context, testcase_instance=self) rule_name = backups_policies.RESTORE_POLICY url = '%s/%s' % (self.api_path, backup_id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "restore": { "volume_id": volume.id } } # Relax the GET_POLICY in order to get past that check. self.policy.set_rules({backups_policies.GET_POLICY: ""}, overwrite=False) unauthorized_exceptions = [ exception.BackupNotFound, ] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.restore, req, id=backup_id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.backup.api.API._list_backup_hosts') @mock.patch('cinder.backup.api.API._get_import_backup') @mock.patch('cinder.backup.rpcapi.BackupAPI.import_record') def test_import_backup_policy(self, user_id, mock_import_record, mock_get_import_backup, mock_list_backup_hosts): def _list_backup_hosts(*args): return ['backup-host'] def _get_import_backup(*args): return self._create_backup() mock_list_backup_hosts.side_effect = _list_backup_hosts mock_get_import_backup.side_effect = _get_import_backup rule_name = backups_policies.IMPORT_POLICY url = '%s/import_record' % (self.api_path) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'PUT' body = { "backup-record": { "backup_service": "backup-host", "backup_url": "eyJzdGF0" } } unauthorized_exceptions = [] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.controller.import_record, req, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.backup.api.API._get_available_backup_service_host', return_value='backup-host') @mock.patch('cinder.backup.rpcapi.BackupAPI.export_record', return_value={ "backup_service": "backup-host", "backup_url": "eyJzdGF0" }) def test_export_backup_policy(self, user_id, mock_export_record, mock_get_backup_service_host): backup_id = self._create_backup().id rule_name = backups_policies.EXPORT_POLICY url = '%s/%s/export_record' % (self.api_path, backup_id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) # Relax the GET_POLICY in order to get past that check. self.policy.set_rules({backups_policies.GET_POLICY: ""}, overwrite=False) unauthorized_exceptions = [ exception.BackupNotFound, ] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.controller.export_record, req, id=backup_id) @ddt.data(*base.all_users) def test_backup_attributes_policy(self, user_id): backup_id = self._create_backup().id # Although we're testing the BACKUP_ATTRIBUTES_POLICY, unauthorized # readers will (correctly) fail on the GET_POLICY. For authorized # readers, later we'll test the response to verify the # BACKUP_ATTRIBUTES_POLICY is properly enforced. rule_name = backups_policies.GET_POLICY url = '%s/%s' % (self.api_path, backup_id) req = fake_api.HTTPRequest.blank(url, version=mv.BACKUP_PROJECT_USER_ID) unauthorized_exceptions = [ exception.BackupNotFound, ] response = self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, unauthorized_exceptions, rule_name, self.controller.show, req, id=backup_id) if user_id in self.authorized_readers: # Check whether the backup record includes a user_id. Only # authorized_admins should see one. backup_user_id = response['backup'].get('user_id', None) if user_id in self.authorized_admins: self.assertIsNotNone(backup_user_id) else: self.assertIsNone(backup_user_id) class BackupsPolicySecureRbacTest(BackupsPolicyTest): authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_readers = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_members = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] # NOTE(Xena): The authorized_admins and unauthorized_admins are the same # as the BackupsPolicyTest's. This is because in Xena the "admin only" # rules are the legacy RULE_ADMIN_API. This will change in Yoga, when # RULE_ADMIN_API will be deprecated in favor of the SYSTEM_ADMIN rule that # is scope based. authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_base.py0000664000175000017500000000713500000000000022614 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils import webob from cinder.api import microversions as mv from cinder import context as cinder_context from cinder import objects from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import test class CinderPolicyTests(test.TestCase): def setUp(self): super(CinderPolicyTests, self).setUp() self.project_id = fake_constants.PROJECT_ID self.other_project_id = fake_constants.PROJECT2_ID self.admin_context = cinder_context.RequestContext( user_id=fake_constants.USER_ID, project_id=self.project_id, roles=['admin'] ) self.other_admin_context = cinder_context.RequestContext( user_id=fake_constants.USER_ID, project_id=self.other_project_id, roles=['admin'] ) self.user_context = cinder_context.RequestContext( user_id=fake_constants.USER2_ID, project_id=self.project_id, roles=['non-admin'] ) self.other_user_context = cinder_context.RequestContext( user_id=fake_constants.USER3_ID, project_id=self.other_project_id, roles=['non-admin'] ) self.system_admin_context = cinder_context.RequestContext( user_id=fake_constants.USER_ID, project_id=self.project_id, roles=['admin'], system_scope='all') fake_image.mock_image_service(self) def _get_request_response(self, context, path, method, body=None, microversion=mv.BASE_VERSION): request = webob.Request.blank(path) request.content_type = 'application/json' request.headers = mv.get_mv_header(microversion) request.method = method if body: request.headers["content-type"] = "application/json" request.body = jsonutils.dump_as_bytes(body) return request.get_response( fakes.wsgi_app(fake_auth_context=context) ) def _create_fake_volume(self, context, status=None, attach_status=None, metadata=None, admin_metadata=None): vol = { 'display_name': 'fake_volume1', 'status': 'available', 'project_id': context.project_id } if status: vol['status'] = status if attach_status: vol['attach_status'] = attach_status if metadata: vol['metadata'] = metadata if admin_metadata: vol['admin_metadata'] = admin_metadata volume = objects.Volume(context=context, **vol) volume.create() return volume def _create_fake_type(self, context): vol_type = { 'name': 'fake_volume1', 'extra_specs': {}, 'is_public': True, 'projects': [], 'description': 'A fake volume type' } volume_type = objects.VolumeType(context=context, **vol_type) volume_type.create() return volume_type ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_default_volume_types.py0000664000175000017500000002745600000000000026151 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import uuid import ddt from webob import exc from cinder.api import api_utils from cinder.api import microversions as mv from cinder.api.v3 import default_types from cinder import db from cinder.policies import default_types as default_type_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit import fake_constants from cinder.tests.unit.policies import base from cinder.tests.unit.policies import test_base from cinder.tests.unit import utils as test_utils class FakeProject(object): def __init__(self, id=None, name=None): if id: self.id = id else: self.id = uuid.uuid4().hex self.name = name self.description = 'fake project description' self.domain_id = 'default' class DefaultVolumeTypesPolicyTests(test_base.CinderPolicyTests): class FakeDefaultType: project_id = fake_constants.PROJECT_ID volume_type_id = fake_constants.VOLUME_TYPE_ID def setUp(self): super(DefaultVolumeTypesPolicyTests, self).setUp() self.volume_type = self._create_fake_type(self.admin_context) self.project = FakeProject() # Need to mock out Keystone so the functional tests don't require other # services _keystone_client = mock.MagicMock() _keystone_client.version = 'v3' _keystone_client.projects.get.side_effect = self._get_project _keystone_client_get = mock.patch( 'cinder.api.api_utils._keystone_client', lambda *args, **kwargs: _keystone_client) _keystone_client_get.start() self.addCleanup(_keystone_client_get.stop) def _get_project(self, project_id, *args, **kwargs): return self.project def test_system_admin_can_set_default(self): system_admin_context = self.system_admin_context path = '/v3/default-types/%s' % system_admin_context.project_id body = { 'default_type': {"volume_type": self.volume_type.id} } response = self._get_request_response(system_admin_context, path, 'PUT', body=body, microversion= mv.DEFAULT_TYPE_OVERRIDES) self.assertEqual(HTTPStatus.OK, response.status_int) def test_project_admin_can_set_default(self): admin_context = self.admin_context path = '/v3/default-types/%s' % admin_context.project_id body = { 'default_type': {"volume_type": self.volume_type.id} } response = self._get_request_response(admin_context, path, 'PUT', body=body, microversion= mv.DEFAULT_TYPE_OVERRIDES) self.assertEqual(HTTPStatus.OK, response.status_int) @mock.patch.object(db, 'project_default_volume_type_get', return_value=FakeDefaultType()) def test_system_admin_can_get_default(self, mock_default_get): system_admin_context = self.system_admin_context path = '/v3/default-types/%s' % system_admin_context.project_id response = self._get_request_response(system_admin_context, path, 'GET', microversion= mv.DEFAULT_TYPE_OVERRIDES) self.assertEqual(HTTPStatus.OK, response.status_int) def test_project_admin_can_get_default(self): admin_context = self.admin_context path = '/v3/default-types/%s' % admin_context.project_id body = { 'default_type': {"volume_type": self.volume_type.id} } self._get_request_response(admin_context, path, 'PUT', body=body, microversion= mv.DEFAULT_TYPE_OVERRIDES) path = '/v3/default-types/%s' % admin_context.project_id response = self._get_request_response(admin_context, path, 'GET', microversion= mv.DEFAULT_TYPE_OVERRIDES) self.assertEqual(HTTPStatus.OK, response.status_int) def test_system_admin_can_get_all_default(self): system_admin_context = self.system_admin_context path = '/v3/default-types' response = self._get_request_response(system_admin_context, path, 'GET', microversion= mv.DEFAULT_TYPE_OVERRIDES) self.assertEqual(HTTPStatus.OK, response.status_int) def test_system_admin_can_unset_default(self): system_admin_context = self.system_admin_context path = '/v3/default-types/%s' % system_admin_context.project_id response = self._get_request_response(system_admin_context, path, 'DELETE', microversion= mv.DEFAULT_TYPE_OVERRIDES) self.assertEqual(HTTPStatus.NO_CONTENT, response.status_int) def test_project_admin_can_unset_default(self): admin_context = self.admin_context path = '/v3/default-types/%s' % admin_context.project_id response = self._get_request_response(admin_context, path, 'DELETE', microversion= mv.DEFAULT_TYPE_OVERRIDES) self.assertEqual(HTTPStatus.NO_CONTENT, response.status_int) @ddt.ddt class DefaultVolumeTypesPolicyTest(base.BasePolicyTest): authorized_admins = [ 'system_admin', 'legacy_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = default_types.DefaultTypesController() self.api_path = '/v3/default-types/%s' % (self.project_id) self.api_version = mv.DEFAULT_TYPE_OVERRIDES def _create_volume_type(self): vol_type = test_utils.create_volume_type(self.project_admin_context, name='fake_vol_type', testcase_instance=self) return vol_type @ddt.data(*base.all_users) @mock.patch.object(api_utils, 'get_project') def test_default_type_set_policy(self, user_id, fake_project): vol_type = self._create_volume_type() fake_project.return_value = FakeProject(id=self.project_id) rule_name = default_type_policies.CREATE_UPDATE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = {"default_type": {"volume_type": vol_type.id}} unauthorized_exceptions = [exc.HTTPForbidden] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.controller.create_update, req, id=vol_type.id, body=body) @ddt.data(*base.all_users) @mock.patch.object(default_types.db, 'project_default_volume_type_get') @mock.patch.object(api_utils, 'get_project') def test_default_type_get_policy(self, user_id, fake_project, mock_default_get): fake_project.return_value = FakeProject(id=self.project_id) rule_name = default_type_policies.GET_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) unauthorized_exceptions = [exc.HTTPForbidden] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.controller.detail, req, id=self.project_id) @ddt.data(*base.all_users) @mock.patch.object(default_types.db, 'project_default_volume_type_get') def test_default_type_get_all_policy(self, user_id, mock_default_get): rule_name = default_type_policies.GET_ALL_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) unauthorized_exceptions = [exc.HTTPForbidden] # NOTE: The users 'legacy_admin' and 'project_admin' pass for # GET_ALL_POLICY since with enforce_new_defaults=False, we have # a logical OR between old policy and new one hence RULE_ADMIN_API # allows them to pass self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.controller.index, req) @ddt.data(*base.all_users) @mock.patch.object(api_utils, 'get_project') @mock.patch.object(default_types.db, 'project_default_volume_type_get') def test_default_type_unset_policy(self, user_id, mock_default_unset, fake_project): fake_project.return_value = FakeProject(id=self.project_id) rule_name = default_type_policies.DELETE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'DELETE' unauthorized_exceptions = [exc.HTTPForbidden] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.controller.delete, req, id=self.project_id) class DefaultVolumeTypesPolicySecureRbacTest(DefaultVolumeTypesPolicyTest): authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_group_actions.py0000664000175000017500000003515200000000000024556 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.api import microversions as mv from cinder.api.v3 import groups from cinder import exception from cinder.objects import fields from cinder.policies import group_actions as group_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base from cinder.tests.unit import utils as test_utils from cinder.volume import group_types @ddt.ddt class GroupActionPolicyTest(base.BasePolicyTest): sysadmins = [ 'legacy_admin', 'project_admin', 'system_admin', ] non_sysadmins = [ 'legacy_owner', 'project_member', 'project_reader', 'project_foo', 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_users = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_members = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = groups.GroupsController() self.api_path = '/v3/%s/groups' % (self.project_id) self.api_version = mv.GROUP_REPLICATION self.group_type = group_types.create(self.project_admin_context, 'group_type_name', {'key3': 'value3'}, is_public=True) # not surprisingly, to do a group action you need to get a # group, so relax the group:get policy so that these tests # will check the group action policy we're interested in self.policy.set_rules({"group:get": ""}, overwrite=False) def _create_group(self, group_status=fields.GroupStatus.AVAILABLE): volume_type = test_utils.create_volume_type(self.project_admin_context, name="test") group = test_utils.create_group(self.project_admin_context, status=group_status, group_type_id=self.group_type.id, volume_type_ids=[volume_type.id]) test_utils.create_volume(self.project_member_context, group_id=group.id, testcase_instance=self, volume_type_id=volume_type.id) return group.id @ddt.data(*base.all_users) @mock.patch('cinder.group.api.API.enable_replication') def test_enable_group_replication_policy(self, user_id, mock_enable_replication): """Test enable group replication policy.""" # FIXME: this is a very fragile approach def fake_enable_rep(context, group): context.authorize(group_policies.ENABLE_REP, target_obj=group) volume_type = test_utils.create_volume_type(self.project_admin_context, name='test_group_policy') group = test_utils.create_group(self.project_admin_context, status=fields.GroupStatus.AVAILABLE, group_type_id=self.group_type.id, volume_type_ids=[volume_type.id]) test_utils.create_volume(self.project_member_context, group_id=group.id, testcase_instance=self, volume_type_id=volume_type.id) mock_enable_replication.side_effect = fake_enable_rep self.group_type.status = 'enabled' rule_name = group_policies.ENABLE_REP version = mv.GROUP_REPLICATION url = '%s/%s/action' % (self.api_path, group.id) req = fake_api.HTTPRequest.blank(url, version=version) req.method = 'POST' body = { "enable_replication": {} } unauthorized_exceptions = [exception.GroupNotFound] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.enable_replication, req, id=group.id, body=body) group.destroy() @ddt.data(*base.all_users) @mock.patch('cinder.group.api.API.disable_replication') def test_disable_group_replication_policy(self, user_id, mock_disable_replication): """Test disable group replication policy.""" # FIXME: this is a very fragile approach def fake_disable_rep(context, group): context.authorize(group_policies.DISABLE_REP, target_obj=group) volume_type = test_utils.create_volume_type(self.project_admin_context, name='test_group_policy') group = test_utils.create_group(self.project_admin_context, status=fields.GroupStatus.AVAILABLE, group_type_id=self.group_type.id, volume_type_ids=[volume_type.id]) test_utils.create_volume(self.project_member_context, group_id=group.id, testcase_instance=self, volume_type_id=volume_type.id) mock_disable_replication.side_effect = fake_disable_rep rule_name = group_policies.DISABLE_REP version = mv.GROUP_REPLICATION url = '%s/%s/action' % (self.api_path, group.id) req = fake_api.HTTPRequest.blank(url, version=version) req.method = 'POST' body = { "disable_replication": {} } unauthorized_exceptions = [exception.GroupNotFound] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.disable_replication, req, id=group.id, body=body) group.destroy() @ddt.data(*base.all_users) def test_reset_status_group_policy(self, user_id): """Test reset status of group policy.""" rule_name = group_policies.RESET_STATUS group_id = self._create_group(group_status=fields.GroupStatus.ERROR) url = '%s/%s/action' % (self.api_path, group_id) version = mv.GROUP_VOLUME_RESET_STATUS req = fake_api.HTTPRequest.blank(url, version=version) req.method = 'POST' body = { "reset_status": { "status": "available" } } unauthorized_exceptions = [exception.GroupNotFound] self.common_policy_check(user_id, self.sysadmins, self.non_sysadmins, unauthorized_exceptions, rule_name, self.controller.reset_status, req, id=group_id, body=body) @ddt.data(*base.all_users) def test_delete_group_policy(self, user_id): """Test delete group policy.""" volume_type = test_utils.create_volume_type(self.project_admin_context, name='test_group_policy') group_1 = test_utils.create_group(self.project_admin_context, status=fields.GroupStatus.AVAILABLE, group_type_id=self.group_type.id, volume_type_ids=[volume_type.id]) rule_name = group_policies.DELETE_POLICY url = '%s/%s' % (self.api_path, group_1.id) req = fake_api.HTTPRequest.blank(url, version=mv.GROUP_VOLUME) req.method = 'POST' body = { "delete": { "delete-volumes": "false" } } unauthorized_exceptions = [exception.GroupNotFound] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.delete_group, req, id=group_1.id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.group.api.API.failover_replication') def test_fail_over_replication_group_policy(self, user_id, mock_failover_replication): """Test fail over replication group policy.""" # FIXME: this is a very fragile approach def fake_failover_rep(context, group, allow_attached_volume=False, secondary_backend_id=None): context.authorize(group_policies.FAILOVER_REP, target_obj=group) volume_type = test_utils.create_volume_type(self.project_admin_context, name='test_group_policy') group_2 = test_utils.create_group(self.project_admin_context, status=fields.GroupStatus.AVAILABLE, group_type_id=self.group_type.id, volume_type_ids=[volume_type.id]) mock_failover_replication.side_effect = fake_failover_rep rule_name = group_policies.FAILOVER_REP url = '%s/%s' % (self.api_path, group_2.id) req = fake_api.HTTPRequest.blank(url, version=mv.GROUP_REPLICATION) req.method = 'POST' body = { "failover_replication": { "allow_attached_volume": "true", "secondary_backend_id": "vendor-id-1" } } unauthorized_exceptions = [exception.GroupNotFound] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.failover_replication, req, id=group_2.id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.group.api.API.list_replication_targets') def test_list_replication_targets_group_policy(self, user_id, mock_list_targets): """Test list replication targets for a group policy.""" # FIXME: this is a very fragile approach def fake_list_targets(context, group): context.authorize(group_policies.LIST_REP, target_obj=group) volume_type = test_utils.create_volume_type(self.project_admin_context, name='test_group_policy') group_2 = test_utils.create_group(self.project_admin_context, status=fields.GroupStatus.AVAILABLE, group_type_id=self.group_type.id, volume_type_ids=[volume_type.id]) mock_list_targets.side_effect = fake_list_targets rule_name = group_policies.LIST_REP url = '%s/%s/action' % (self.api_path, group_2.id) req = fake_api.HTTPRequest.blank(url, version=mv.GROUP_REPLICATION) req.method = 'POST' body = {"list_replication_targets": {}} unauthorized_exceptions = [exception.GroupNotFound] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.list_replication_targets, req, id=group_2.id, body=body) group_2.destroy() class GroupActionPolicySecureRbacTest(GroupActionPolicyTest): sysadmins = [ 'legacy_admin', 'system_admin', 'project_admin', ] non_sysadmins = [ 'legacy_owner', 'project_member', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] authorized_users = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_users = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] authorized_members = authorized_users unauthorized_members = unauthorized_users def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_group_snapshots.py0000664000175000017500000003225300000000000025137 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.api import microversions as mv from cinder.api.v3 import group_snapshots from cinder import exception from cinder.group import api as group_api from cinder.objects import fields from cinder.policies import group_snapshots as group_snap_policies from cinder.policies import groups as group_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.policies import base from cinder.tests.unit import utils as test_utils from cinder.volume import manager as volume_manager @ddt.ddt class GroupSnapshotsPolicyTest(base.BasePolicyTest): authorized_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_users = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = authorized_users unauthorized_members = unauthorized_users authorized_readers = authorized_users unauthorized_readers = unauthorized_users sysadmins = [ 'legacy_admin', 'project_admin', 'system_admin', ] non_sysadmins = [ 'legacy_owner', 'project_member', 'project_reader', 'project_foo', 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = group_snapshots.GroupSnapshotsController() self.manager = volume_manager.VolumeManager() self.api_path = '/v3/%s/group_snapshots' % (self.project_id) self.api_version = mv.GROUP_GROUPSNAPSHOT_PROJECT_ID def _create_group_snapshot( self, snap_status=fields.GroupSnapshotStatus.AVAILABLE): volume_type = test_utils.create_volume_type(self.project_admin_context, name="test") group = test_utils.create_group(self.project_admin_context, status=fields.GroupStatus.AVAILABLE, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[volume_type.id]) test_utils.create_volume(self.project_member_context, group_id=group.id, testcase_instance=self, volume_type_id=volume_type.id) return test_utils.create_group_snapshot( self.project_admin_context, group_id=group.id, status=snap_status, group_type_id=group.group_type_id) def _create_group_snap_array(self): group = test_utils.create_group(self.project_admin_context, status=fields.GroupStatus.AVAILABLE, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID]) test_utils.create_volume(self.project_member_context, group_id=group.id, volume_type_id=fake.VOLUME_TYPE_ID) g_snapshots_array = [ test_utils.create_group_snapshot( self.project_admin_context, group_id=group.id, group_type_id=group.group_type_id) for _ in range(3)] return g_snapshots_array @ddt.data(*base.all_users) def test_create_group_snapshot_policy(self, user_id): """Test create a group snapshot.""" volume_type = test_utils.create_volume_type(self.project_admin_context, name='test') group = test_utils.create_group(self.project_admin_context, status=fields.GroupStatus.AVAILABLE, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[volume_type.id]) test_utils.create_volume(self.project_member_context, group_id=group.id, testcase_instance=self, volume_type_id=volume_type.id) rule_name = group_snap_policies.CREATE_POLICY version = mv.GROUP_GROUPSNAPSHOT_PROJECT_ID url = self.api_path req = fake_api.HTTPRequest.blank(url, version=version) req.method = 'POST' body = { "group_snapshot": { "name": "my_group_snapshot", "description": "My group snapshot", "group_id": group.id, } } unauthorized_exceptions = [exception.GroupNotFound] # Relax the group:get policy in order to get past that check. self.policy.set_rules({group_policies.GET_POLICY: ""}, overwrite=False) self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) group.destroy() @ddt.data(*base.all_users) def test_update_group_snapshot_policy(self, user_id): # This call is not implemented in the Block Storage API v3 # so we need to test group_snap_policies.UPDATE_POLICY directly # against the group API group_snapshot = self._create_group_snapshot() api = group_api.API() ctxt = self.create_context(user_id) if user_id in self.authorized_members: api.update_group_snapshot(ctxt, group_snapshot, {}) elif user_id in self.unauthorized_members: self.assertRaises(exception.PolicyNotAuthorized, api.update_group_snapshot, ctxt, group_snapshot, {}) else: self.fail(f'{user_id} not in authorized or unauthorized members') @ddt.data(*base.all_users) def test_delete_group_snapshot_policy(self, user_id): """Delete group snapshot.""" # Redirect the RPC call directly to the volume manager. rule_name = group_snap_policies.DELETE_POLICY group_snapshot = self._create_group_snapshot() url = '%s/%s' % (self.api_path, group_snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'DELETE' unauthorized_exceptions = [exception.GroupSnapshotNotFound, exception.InvalidGroupSnapshot] # Relax the GET_POLICY in order to get past that check. self.policy.set_rules({group_snap_policies.GET_POLICY: ""}, overwrite=False) self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.delete, req, id=group_snapshot.id) @ddt.data(*base.all_users) def test_get_all_group_snapshot_policy(self, user_id): """List group snapshots.""" self._create_group_snap_array() rule_name = group_snap_policies.GET_ALL_POLICY url = '%s/detail' % (self.api_path) version = mv.GROUP_SNAPSHOTS req = fake_api.HTTPRequest.blank(url, version=version) unauthorized_exceptions = [] # NOTE: we intentionally don't use the un/authorized_readers # lists in this function because get-all doesn't have a target # to authorize against # # legacy: any logged in user can list all group snapshots # (project-specific filtering happens later) authorized_users = [user_id] unauthorized_users = [] # ... unless deprecated rules are not allowed, then you # must have the 'reader' role to read if self.enforce_new_defaults: context = self.create_context(user_id) if 'reader' not in context.roles: authorized_users = [] unauthorized_users = [user_id] self.common_policy_check(user_id, authorized_users, unauthorized_users, unauthorized_exceptions, rule_name, self.controller.detail, req) @ddt.data(*base.all_users) def test_get_group_snapshot_policy(self, user_id): """Show group snapshot.""" group_snapshot = self._create_group_snapshot() rule_name = group_snap_policies.GET_POLICY url = '%s/%s' % (self.api_path, group_snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) unauthorized_exceptions = [exception.GroupSnapshotNotFound] self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, unauthorized_exceptions, rule_name, self.controller.show, req, id=group_snapshot.id) @ddt.data(*base.all_users) @mock.patch('cinder.api.v3.views.group_snapshots.ViewBuilder.detail') @mock.patch('cinder.group.api.API.get_group_snapshot') def test_group_snapshot_project_attribute_policy(self, user_id, mock_api, mock_view): """Test show group snapshot with project attributes.""" # FIXME: kind of fragile, but I'm beginning to like this approach def mock_view_detail(request, group_snapshot): context = request.environ['cinder.context'] context.authorize( group_snap_policies.GROUP_SNAPSHOT_ATTRIBUTES_POLICY) group_snapshot = self._create_group_snapshot() group_snapshot_id = group_snapshot.id mock_api.return_value = group_snapshot mock_view.side_effect = mock_view_detail rule_name = group_snap_policies.GROUP_SNAPSHOT_ATTRIBUTES_POLICY url = '%s/%s' % (self.api_path, group_snapshot_id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) unauthorized_exceptions = [exception.GroupSnapshotNotFound] self.common_policy_check(user_id, self.sysadmins, self.non_sysadmins, unauthorized_exceptions, rule_name, self.controller.show, req, id=group_snapshot_id) class GroupSnapshotsPolicySecureRbacTest(GroupSnapshotsPolicyTest): authorized_users = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_users = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] authorized_members = authorized_users unauthorized_members = unauthorized_users authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_readers = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_group_types.py0000664000175000017500000000442500000000000024261 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from cinder.api import microversions as mv from cinder.api.v3 import group_types from cinder.policies import group_types as group_type_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base @ddt.ddt class GroupTypesPolicyTest(base.BasePolicyTest): authorized_admins = [ 'system_admin', 'legacy_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = group_types.GroupTypesController() self.api_path = '/v3/%s/group_types' % (self.project_id) self.api_version = mv.GROUP_TYPE @ddt.data(*base.all_users) def test_create_group_type_policy(self, user_id): rule_name = group_type_policies.CREATE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = {"group_type": {"name": "test-group-type"}} unauthorized_exceptions = [] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_groups.py0000664000175000017500000002230500000000000023215 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.api import microversions as mv from cinder.api.v3 import groups from cinder.objects import group as group_obj from cinder.policies import groups as group_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit import fake_constants from cinder.tests.unit.policies import base from cinder.tests.unit import utils as test_utils @ddt.ddt class GroupsPolicyTest(base.BasePolicyTest): unauthorized_readers = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_members = [ 'other_project_member', 'other_project_reader', 'system_member', 'system_reader', 'system_foo', ] authorized_show = authorized_members unauthorized_show = unauthorized_members create_authorized_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', # The other_* users are allowed because we don't have any check # mechanism in the code to validate this, these are validated on # the WSGI layer 'other_project_member', 'other_project_reader', ] create_unauthorized_users = [ 'system_member', 'system_reader', 'system_foo', ] def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = groups.GroupsController() self.api_path = '/v3/%s/groups' % (self.project_id) self.api_version = mv.GROUP_VOLUME def _create_volume_type(self): vol_type = test_utils.create_volume_type(self.project_admin_context, name='fake_vol_type') return vol_type @ddt.data(*base.all_users) @mock.patch('cinder.group.api.GROUP_QUOTAS') @mock.patch('cinder.db.group_type_get') def test_create_group_policy(self, user_id, mock_get_type, mock_quotas): vol_type = self._create_volume_type() grp_type = {'id': fake_constants.GROUP_TYPE_ID, 'name': 'group_type'} mock_get_type.return_value = grp_type rule_name = group_policies.CREATE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = {"group": {"group_type": fake_constants.GROUP_TYPE_ID, "volume_types": [vol_type.id], "name": "test-group"}} unauthorized_exceptions = [] self.common_policy_check(user_id, self.create_authorized_users, self.create_unauthorized_users, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.group.api.GROUP_QUOTAS') @mock.patch.object(group_obj.Group, 'get_by_id') def test_get_group_policy(self, user_id, mock_get, mock_quotas): group = test_utils.create_group( self.project_admin_context, group_type_id=fake_constants.GROUP_TYPE_ID) mock_get.return_value = group rule_name = group_policies.GET_POLICY url = '%s/%ss' % (self.api_path, group.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) unauthorized_exceptions = [] self.common_policy_check(user_id, self.authorized_show, self.unauthorized_show, unauthorized_exceptions, rule_name, self.controller.show, req, id=group.id) @ddt.data(*base.all_users) def test_get_all_groups_policy(self, user_id): test_utils.create_group( self.project_admin_context, group_type_id=fake_constants.GROUP_TYPE_ID) rule_name = group_policies.GET_ALL_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) # Generally, any logged in user can list all groups. authorized_users = [user_id] unauthorized_users = [] # The exception is when deprecated rules are disabled, in which case # roles are enforced. Users without the 'reader' role should be # blocked. if self.enforce_new_defaults: context = self.create_context(user_id) if 'reader' not in context.roles: authorized_users = [] unauthorized_users = [user_id] response = self.common_policy_check(user_id, authorized_users, unauthorized_users, [], rule_name, self.controller.index, req) # For some users, even if they're authorized, the list of volumes # will be empty if they are not in the volume's project. empty_response_users = [ *self.unauthorized_readers, # legacy_admin and system_admin do not have a project_id, and # so the list of backups returned will be empty. 'legacy_admin', 'system_admin', ] groups = response['groups'] if response else [] group_count = 0 if user_id in empty_response_users else 1 self.assertEqual(group_count, len(groups)) @ddt.data(*base.all_users) @mock.patch('cinder.group.api.GROUP_QUOTAS') @mock.patch.object(group_obj.Group, 'get_by_id') def test_delete_group_policy(self, user_id, mock_get, mock_quotas): group = test_utils.create_group( self.project_admin_context, group_type_id=fake_constants.GROUP_TYPE_ID) mock_get.return_value = group rule_name = group_policies.UPDATE_POLICY url = '%s/%s' % (self.api_path, group.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'PUT' body = {"group": {"name": "test-update-group"}} unauthorized_exceptions = [] # need to get past the GET_POLICY check self.policy.set_rules({group_policies.GET_POLICY: ""}, overwrite=False) self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.update, req, id=group.id, body=body) class GroupsPolicySecureRbacTest(GroupsPolicyTest): unauthorized_readers = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', 'project_foo', ] authorized_show = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_show = [ 'legacy_owner', 'project_foo', 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_members = [ 'legacy_owner', 'project_reader', 'project_foo', 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] create_authorized_users = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'other_project_member', ] create_unauthorized_users = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'other_project_reader', 'project_foo', 'project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_limits.py0000664000175000017500000001041100000000000023172 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.api.contrib import used_limits from cinder.api import microversions as mv from cinder.api.v3 import limits from cinder.policies import limits as policy from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base @ddt.ddt class LimitsPolicyTest(base.BasePolicyTest): authorized_readers = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', # The other_* users are allowed because we don't have any check # mechanism in the code to validate the project_id, which is # validated at the WSGI layer. 'other_project_member', 'other_project_reader', ] unauthorized_readers = [ 'system_member', 'system_reader', 'system_foo', ] unauthorized_exceptions = [] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.limits_controller = limits.LimitsController() self.used_limits_controller = used_limits.UsedLimitsController() self.api_path = '/v3/%s/limits' % (self.project_id) self.api_version = mv.BASE_VERSION @ddt.data(*base.all_users) def test_extend_limit_attribute_policy(self, user_id): rule_name = policy.EXTEND_LIMIT_ATTRIBUTE_POLICY url = self.api_path # Create a resp_obj (necessary for the UsedLimitsController) by # requesting the limits via the LimitsController, which actually # generates the response. req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.environ['cinder.context'] = self.project_admin_context limits = self.limits_controller.index(req)['limits'] resp_obj = mock.MagicMock(obj={'limits': limits}) # This proves the LimitsController's response doesn't include any # "used" entries (e.g. totalVolumesUsed). self.assertNotIn('totalVolumesUsed', limits['absolute'].keys()) # Now hit the UsedLimitsController and see if it adds "used" # limits to the resp_obj. req = fake_api.HTTPRequest.blank(url, version=self.api_version) self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, self.unauthorized_exceptions, rule_name, self.used_limits_controller.index, req, resp_obj=resp_obj, fatal=False) if user_id in self.authorized_readers: self.assertIn('totalVolumesUsed', limits['absolute'].keys()) else: self.assertNotIn('totalVolumesUsed', limits['absolute'].keys()) class LimitsPolicySecureRbacTest(LimitsPolicyTest): authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'other_project_member', 'other_project_reader', ] unauthorized_readers = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_foo', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_messages.py0000664000175000017500000001403700000000000023510 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.v3 import messages from cinder.db import api as db_api from cinder import exception from cinder.policies import messages as messages_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.policies import base @ddt.ddt class MessagesPolicyTest(base.BasePolicyTest): authorized_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_users = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] # Basic policy tests are without scope and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.ext_mgr = extensions.ExtensionManager() self.controller = messages.MessagesController(self.ext_mgr) self.api_path = '/v3/%s/messages' % (self.project_id) self.api_version = mv.MESSAGES def _create_message(self): message_values = { 'id': fake.UUID1, 'event_id': 'VOLUME_000001', 'message_level': 'ERROR', 'project_id': self.project_id, } db_api.message_create(self.project_member_context, message_values) return message_values['id'] @ddt.data(*base.all_users) def test_get_message_policy(self, user_id): message_id = self._create_message() rule_name = messages_policies.GET_POLICY url = '%s/%s' % (self.api_path, message_id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) unauthorized_exceptions = [ exception.MessageNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller.show, req, id=message_id) @ddt.data(*base.all_users) def test_get_all_message_policy(self, user_id): self._create_message() rule_name = messages_policies.GET_ALL_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) # The GET_ALL_POLICY is an interesting test case, primarily because # the policy check passes regardless of the whether the project_id # in the request matches the one in the context. This is OK because # the WSGI controller ensures the context can access the project_id # in the request. So in Xena, where scope is not supported, all users # will tend to pass the policy check regardless of their project_id. authorized_users = [user_id] unauthorized_users = [] # The exception is when reprecated rules are disabled, in which case # roles are enforced. Users without the 'reader' role should be # blocked. if self.enforce_new_defaults: context = self.create_context(user_id) if 'reader' not in context.roles: authorized_users = [] unauthorized_users = [user_id] unauthorized_exceptions = [] self.common_policy_check(user_id, authorized_users, unauthorized_users, unauthorized_exceptions, rule_name, self.controller.index, req) @ddt.data(*base.all_users) def test_delete_message_policy(self, user_id): message_id = self._create_message() rule_name = messages_policies.DELETE_POLICY url = '%s/%s' % (self.api_path, message_id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) # The project_reader should not be able to delete a message unless # the deprecated policy rules are enabled. if user_id == 'project_reader' and self.enforce_new_defaults: unauthorized_users = [user_id] authorized_users = [] else: authorized_users = self.authorized_users unauthorized_users = self.unauthorized_users unauthorized_exceptions = [ exception.MessageNotFound, ] self.common_policy_check(user_id, authorized_users, unauthorized_users, unauthorized_exceptions, rule_name, self.controller.delete, req, id=message_id) class MessagesPolicySecureRbacTest(MessagesPolicyTest): authorized_users = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_users = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_quota_class.py0000664000175000017500000000722500000000000024220 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from cinder.api.contrib import quota_classes from cinder.api import microversions as mv from cinder.policies import quota_class as policy from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base @ddt.ddt class QuotaClassPolicyTest(base.BasePolicyTest): authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] unauthorized_exceptions = [] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = quota_classes.QuotaClassSetsController() self.api_path = '/v3/os-quota-class-sets' self.api_version = mv.BASE_VERSION @ddt.data(*base.all_users) def test_get_policy(self, user_id): rule_name = policy.GET_POLICY req = fake_api.HTTPRequest.blank(self.api_path, version=self.api_version) self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, self.unauthorized_exceptions, rule_name, self.controller.show, req, id='my_class') @ddt.data(*base.all_users) def test_update_policy(self, user_id): rule_name = policy.UPDATE_POLICY req = fake_api.HTTPRequest.blank(self.api_path, version=self.api_version) req.method = 'PUT' body = { "quota_class_set": { "groups": 11, "volumes": 5, "backups": 4 } } self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, self.unauthorized_exceptions, rule_name, self.controller.update, req, id='my_class', body=body) class QuotaClassPolicySecureRbacTest(QuotaClassPolicyTest): # NOTE(Xena): The authorized_admins and unauthorized_admins are the same # as the QuotasPolicyTest's. This is because in Xena the "admin only" # rules are the legacy RULE_ADMIN_API. This will change in Yoga, when # RULE_ADMIN_API will be deprecated in favor of the SYSTEM_ADMIN rule that # is scope based. def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_quotas.py0000664000175000017500000001161200000000000023211 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from cinder.api.contrib import quotas from cinder.api import microversions as mv from cinder.policies import quotas as policy from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base @ddt.ddt class QuotasPolicyTest(base.BasePolicyTest): authorized_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_users = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] unauthorized_exceptions = [] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = quotas.QuotaSetsController() self.api_path = '/v3/os-quota-sets' self.api_version = mv.BASE_VERSION @ddt.data(*base.all_users) def test_show_policy(self, user_id): rule_name = policy.SHOW_POLICY req = fake_api.HTTPRequest.blank(self.api_path, version=self.api_version) self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, self.unauthorized_exceptions, rule_name, self.controller.show, req, id=self.project_id) @ddt.data(*base.all_users) def test_update_policy(self, user_id): rule_name = policy.UPDATE_POLICY req = fake_api.HTTPRequest.blank(self.api_path, version=self.api_version) req.method = 'PUT' body = { "quota_set": { "groups": 11, "volumes": 5, "backups": 4 } } self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, self.unauthorized_exceptions, rule_name, self.controller.update, req, id=self.project_id, body=body) @ddt.data(*base.all_users) def test_delete_policy(self, user_id): rule_name = policy.DELETE_POLICY req = fake_api.HTTPRequest.blank(self.api_path, version=self.api_version) req.method = 'DELETE' self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, self.unauthorized_exceptions, rule_name, self.controller.delete, req, id=self.project_id) class QuotasPolicySecureRbacTest(QuotasPolicyTest): authorized_users = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_users = [ 'legacy_owner', 'system_member', 'system_foo', 'project_foo', 'other_project_member', 'other_project_reader', ] # NOTE(Xena): The authorized_admins and unauthorized_admins are the same # as the QuotasPolicyTest's. This is because in Xena the "admin only" # rules are the legacy RULE_ADMIN_API. This will change in Yoga, when # RULE_ADMIN_API will be deprecated in favor of the SYSTEM_ADMIN rule that # is scope based. def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_snapshot_actions.py0000664000175000017500000001473400000000000025264 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from cinder.api.contrib import admin_actions from cinder.api.contrib import snapshot_actions from cinder.api import microversions as mv from cinder import exception from cinder.policies import snapshot_actions as policy from cinder.policies import snapshots as snapshots_policy from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base from cinder.tests.unit import utils as test_utils @ddt.ddt class SnapshotActionsPolicyTest(base.BasePolicyTest): authorized_members = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_members = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] # DB validations will throw SnapshotNotFound for some contexts unauthorized_exceptions = [ exception.SnapshotNotFound, ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = snapshot_actions.SnapshotActionsController() self.admin_controller = admin_actions.SnapshotAdminController() self.api_path = '/v3/%s/snapshots' % (self.project_id) self.api_version = mv.BASE_VERSION # Relax the snapshots GET_POLICY in order to get past that check. self.policy.set_rules({snapshots_policy.GET_POLICY: ""}, overwrite=False) def _create_snapshot(self, **kwargs): vol_type = test_utils.create_volume_type(self.project_admin_context, name='fake_vol_type', testcase_instance=self) volume = test_utils.create_volume(self.project_member_context, volume_type_id=vol_type.id, testcase_instance=self) snapshot = test_utils.create_snapshot(self.project_member_context, volume_id=volume.id, testcase_instance=self, **kwargs) return snapshot @ddt.data(*base.all_users) def test_reset_status_policy(self, user_id): snapshot = self._create_snapshot(status='error') rule_name = policy.RESET_STATUS_POLICY url = '%s/%s/action' % (self.api_path, snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-reset_status": { "status": "available", } } self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, self.unauthorized_exceptions, rule_name, self.admin_controller._reset_status, req, id=snapshot.id, body=body) @ddt.data(*base.all_users) def test_update_status_policy(self, user_id): snapshot = self._create_snapshot(status='creating') rule_name = policy.UPDATE_STATUS_POLICY url = '%s/%s/action' % (self.api_path, snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-update_snapshot_status": { "status": "error" } } self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, self.unauthorized_exceptions, rule_name, self.controller._update_snapshot_status, req, id=snapshot.id, body=body) @ddt.data(*base.all_users) def test_force_delete_policy(self, user_id): snapshot = self._create_snapshot(status='error') rule_name = policy.FORCE_DELETE_POLICY url = '%s/%s/action' % (self.api_path, snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-force_delete": {} } self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, self.unauthorized_exceptions, rule_name, self.admin_controller._force_delete, req, id=snapshot.id, body=body) class SnapshotActionsPolicySecureRbacTest(SnapshotActionsPolicyTest): authorized_members = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_members = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_snapshot_metadata.py0000664000175000017500000001545200000000000025402 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from cinder.api import microversions as mv from cinder.api.v3 import snapshot_metadata from cinder import exception from cinder.policies import snapshot_metadata as policy from cinder.policies import snapshots as snapshots_policy from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base from cinder.tests.unit import utils as test_utils @ddt.ddt class SnapshotMetadataPolicyTest(base.BasePolicyTest): authorized_readers = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_readers = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_members = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] # DB validations will throw SnapshotNotFound for some contexts unauthorized_exceptions = [ exception.SnapshotNotFound, ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = snapshot_metadata.Controller() self.api_path = '/v3/%s/snapshots' % (self.project_id) self.api_version = mv.BASE_VERSION self.vol_type = test_utils.create_volume_type( self.project_admin_context, name='fake_vol_type', testcase_instance=self) # Relax the snapshots GET_POLICY in order to get past that check. self.policy.set_rules({snapshots_policy.GET_POLICY: ""}, overwrite=False) def _create_volume(self, **kwargs): volume = test_utils.create_volume(self.project_member_context, volume_type_id=self.vol_type.id, testcase_instance=self, **kwargs) return volume def _create_snapshot(self, **kwargs): volume = self._create_volume(**kwargs) snapshot = test_utils.create_snapshot(self.project_member_context, volume_id=volume.id, testcase_instance=self, **kwargs) return snapshot @ddt.data(*base.all_users) def test_get_policy(self, user_id): metadata = {'inside': 'out'} snapshot = self._create_snapshot(metadata=metadata) rule_name = policy.GET_POLICY url = '%s/%s/metadata' % (self.api_path, snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) response = self.common_policy_check( user_id, self.authorized_readers, self.unauthorized_readers, self.unauthorized_exceptions, rule_name, self.controller.show, req, snapshot_id=snapshot.id, id='inside') if user_id in self.authorized_readers: self.assertDictEqual(metadata, response['meta']) @ddt.data(*base.all_users) def test_update_policy(self, user_id): snapshot = self._create_snapshot() rule_name = policy.UPDATE_POLICY url = '%s/%s/metadata' % (self.api_path, snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' metadata = { 'inside': 'out', 'outside': 'in' } body = { "metadata": {**metadata} } response = self.common_policy_check( user_id, self.authorized_members, self.unauthorized_members, self.unauthorized_exceptions, rule_name, self.controller.update_all, req, snapshot_id=snapshot.id, body=body) if user_id in self.authorized_members: self.assertDictEqual(metadata, response['metadata']) @ddt.data(*base.all_users) def test_delete_policy(self, user_id): metadata = {'inside': 'out'} snapshot = self._create_snapshot(metadata=metadata) rule_name = policy.DELETE_POLICY url = '%s/%s/metadata/inside' % (self.api_path, snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'DELETE' # Relax the GET_POLICY in order to get past that check. self.policy.set_rules({policy.GET_POLICY: ""}, overwrite=False) self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, self.unauthorized_exceptions, rule_name, self.controller.delete, req, snapshot_id=snapshot.id, id='inside') class SnapshotMetadataPolicySecureRbacTest(SnapshotMetadataPolicyTest): authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_readers = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_members = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_snapshots.py0000664000175000017500000002512400000000000023722 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.api.contrib import extended_snapshot_attributes as snapshot_attr from cinder.api import microversions as mv from cinder.api.v3 import snapshots from cinder import exception from cinder.policies import snapshots as policy from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base from cinder.tests.unit import utils as test_utils @ddt.ddt class SnapshotsPolicyTest(base.BasePolicyTest): authorized_readers = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_readers = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_members = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] # DB validations will throw SnapshotNotFound for some contexts unauthorized_exceptions = [ exception.SnapshotNotFound, ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = snapshots.SnapshotsController() self.api_path = '/v3/%s/snapshots' % (self.project_id) self.api_version = mv.BASE_VERSION self.vol_type = test_utils.create_volume_type( self.project_admin_context, name='fake_vol_type', testcase_instance=self) def _create_volume(self, **kwargs): volume = test_utils.create_volume(self.project_member_context, volume_type_id=self.vol_type.id, testcase_instance=self, **kwargs) return volume def _create_snapshot(self, **kwargs): volume = self._create_volume(**kwargs) snapshot = test_utils.create_snapshot(self.project_member_context, volume_id=volume.id, testcase_instance=self, **kwargs) return snapshot @ddt.data(*base.all_users) def test_get_all_policy(self, user_id): self._create_snapshot() rule_name = policy.GET_ALL_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) # Generally, any logged in user can list all volumes. authorized_readers = [user_id] unauthorized_readers = [] # The exception is when deprecated rules are disabled, in which case # roles are enforced. Users without the 'reader' role should be # blocked. if self.enforce_new_defaults: context = self.create_context(user_id) if 'reader' not in context.roles: authorized_readers = [] unauthorized_readers = [user_id] response = self.common_policy_check(user_id, authorized_readers, unauthorized_readers, self.unauthorized_exceptions, rule_name, self.controller.index, req) # For some users, even if they're authorized, the list of snapshots # will be empty if they are not in the snapshots's project. empty_response_users = [ *self.unauthorized_readers, # legacy_admin and system_admin do not have a project_id, and # so the list of snapshots returned will be empty. 'legacy_admin', 'system_admin', ] snapshots = response['snapshots'] if response else [] snapshot_count = 0 if user_id in empty_response_users else 1 self.assertEqual(snapshot_count, len(snapshots)) @ddt.data(*base.all_users) def test_get_policy(self, user_id): snapshot = self._create_snapshot() rule_name = policy.GET_POLICY url = '%s/%s' % (self.api_path, snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, self.unauthorized_exceptions, rule_name, self.controller.show, req, id=snapshot.id) @ddt.data(*base.all_users) def test_extend_attribute_policy(self, user_id): snapshot = self._create_snapshot() rule_name = policy.EXTEND_ATTRIBUTE url = '%s/%s' % (self.api_path, snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) snapshot_dict = snapshot.obj_to_primitive()['versioned_object.data'] req.get_db_snapshot = mock.MagicMock() req.get_db_snapshot.return_value = snapshot_dict resp_obj = mock.MagicMock(obj={'snapshot': snapshot_dict}) self.assertNotIn('os-extended-snapshot-attributes:project_id', snapshot_dict.keys()) controller = snapshot_attr.ExtendedSnapshotAttributesController() self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, self.unauthorized_exceptions, rule_name, controller.show, req, resp_obj=resp_obj, id=snapshot.id, fatal=False) if user_id in self.authorized_readers: self.assertIn('os-extended-snapshot-attributes:project_id', snapshot_dict.keys()) @ddt.data(*base.all_users) def test_create_policy(self, user_id): volume = self._create_volume() rule_name = policy.CREATE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "snapshot": { "name": "snap-001", "volume_id": volume.id, } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) def test_update_policy(self, user_id): snapshot = self._create_snapshot() rule_name = policy.UPDATE_POLICY url = '%s/%s' % (self.api_path, snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'PUT' body = { "snapshot": { "description": "This is yet another snapshot." } } # Relax the GET_POLICY in order to get past that check. self.policy.set_rules({policy.GET_POLICY: ""}, overwrite=False) self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, self.unauthorized_exceptions, rule_name, self.controller.update, req, id=snapshot.id, body=body) @ddt.data(*base.all_users) def test_delete_policy(self, user_id): snapshot = self._create_snapshot(status='available') rule_name = policy.DELETE_POLICY url = '%s/%s' % (self.api_path, snapshot.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'DELETE' # Relax the GET_POLICY in order to get past that check. self.policy.set_rules({policy.GET_POLICY: ""}, overwrite=False) self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, self.unauthorized_exceptions, rule_name, self.controller.delete, req, id=snapshot.id) class SnapshotsPolicySecureRbacTest(SnapshotsPolicyTest): authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_readers = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_members = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_type_extra_specs.py0000664000175000017500000002641100000000000025261 0ustar00zuulzuul00000000000000# Copyright (c) 2021 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from cinder.api.contrib import types_extra_specs from cinder.api import microversions as mv from cinder.api.v3 import types from cinder.policies import type_extra_specs as policy from cinder.policies import volume_type as type_policy from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base from cinder.tests.unit import utils as test_utils @ddt.ddt class TypeExtraSpecsPolicyTest(base.BasePolicyTest): """Verify extra specs policy settings for the types API""" # Deprecated check_str="" allows anyone to read extra specs authorized_readers = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] unauthorized_readers = [ ] authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] unauthorized_exceptions = [] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = types_extra_specs.VolumeTypeExtraSpecsController() self.api_path = '/v3/%s/types' % (self.project_id) self.api_version = mv.BASE_VERSION @ddt.data(*base.all_users) def test_get_all_policy(self, user_id): vol_type = test_utils.create_volume_type(self.project_admin_context, testcase_instance=self, name='fake_vol_type', extra_specs={'foo': 'bar'}) rule_name = policy.GET_ALL_POLICY url = '%s/%s/extra_specs' % (self.api_path, vol_type.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, self.unauthorized_exceptions, rule_name, self.controller.index, req, type_id=vol_type.id) @ddt.data(*base.all_users) def test_get_policy(self, user_id): vol_type = test_utils.create_volume_type(self.project_admin_context, testcase_instance=self, name='fake_vol_type', extra_specs={'foo': 'bar'}) rule_name = policy.GET_POLICY url = '%s/%s/extra_specs/foo' % (self.api_path, vol_type.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) # Relax the READ_SENSITIVE_POLICY policy so that any user is able # to "see" the spec. self.policy.set_rules({policy.READ_SENSITIVE_POLICY: ""}, overwrite=False) self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, self.unauthorized_exceptions, rule_name, self.controller.show, req, type_id=vol_type.id, id='foo') @ddt.data(*base.all_users) def test_create_policy(self, user_id): vol_type = test_utils.create_volume_type(self.project_admin_context, testcase_instance=self, name='fake_vol_type') rule_name = policy.CREATE_POLICY url = '%s/%s/extra_specs' % (self.api_path, vol_type.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "extra_specs": { "foo": "bar", } } self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, self.unauthorized_exceptions, rule_name, self.controller.create, req, type_id=vol_type.id, body=body) @ddt.data(*base.all_users) def test_update_policy(self, user_id): vol_type = test_utils.create_volume_type(self.project_admin_context, testcase_instance=self, name='fake_vol_type', extra_specs={'foo': 'bar'}) rule_name = policy.UPDATE_POLICY url = '%s/%s/extra_specs/foo' % (self.api_path, vol_type.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'PUT' body = {"foo": "zap"} self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, self.unauthorized_exceptions, rule_name, self.controller.update, req, type_id=vol_type.id, id='foo', body=body) @ddt.data(*base.all_users) def test_delete_policy(self, user_id): vol_type = test_utils.create_volume_type(self.project_admin_context, testcase_instance=self, name='fake_vol_type', extra_specs={'foo': 'bar'}) rule_name = policy.DELETE_POLICY url = '%s/%s/extra_specs/foo' % (self.api_path, vol_type.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'DELETE' self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, self.unauthorized_exceptions, rule_name, self.controller.delete, req, type_id=vol_type.id, id='foo') @ddt.data(*base.all_users) def test_read_sensitive_policy(self, user_id): # The 'multiattach' extra spec is user visible, and the # 'sensitive' extra spec should not be user visible. extra_specs = { 'multiattach': ' True', 'sensitive': 'secret', } vol_type = test_utils.create_volume_type(self.project_admin_context, testcase_instance=self, name='fake_vol_type', extra_specs=extra_specs) rule_name = policy.READ_SENSITIVE_POLICY url = '%s/%s' % (self.api_path, vol_type.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) # Relax these policies in order to get past those checks. self.policy.set_rules({type_policy.GET_POLICY: ""}, overwrite=False) self.policy.set_rules({type_policy.EXTRA_SPEC_POLICY: ""}, overwrite=False) # With the relaxed policies, all users are authorized because # failing the READ_SENSITIVE_POLICY policy check is not fatal. authorized_users = [user_id] unauthorized_users = [] controller = types.VolumeTypesController() response = self.common_policy_check(user_id, authorized_users, unauthorized_users, self.unauthorized_exceptions, rule_name, controller.show, req, id=vol_type.id) if user_id in self.authorized_admins: # Admins should see all extra specs expected = extra_specs else: # Non-admins should only see user visible extra specs expected = {'multiattach': ' True'} self.assertDictEqual(expected, response['volume_type']['extra_specs']) class TypeExtraSpecsPolicySecureRbacTest(TypeExtraSpecsPolicyTest): authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'system_member', 'system_reader', 'other_project_member', 'other_project_reader', ] unauthorized_readers = [ # These are unauthorized because they don't have the reader role 'legacy_owner', 'project_foo', 'system_foo', ] # NOTE(Xena): The authorized_admins and unauthorized_admins are the same # as the TypeExtraSpecsPolicyTest. This is because in Xena the "admin only" # rules are the legacy RULE_ADMIN_API. This will change in Yoga, when # RULE_ADMIN_API will be deprecated in favor of the SYSTEM_ADMIN rule that # is scope based. authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_volume.py0000664000175000017500000010062000000000000023202 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import ddt from cinder.api.contrib import volume_encryption_metadata from cinder.api.contrib import volume_tenant_attribute from cinder.api.v3 import volumes from cinder import exception from cinder.policies import volumes as volume_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit import fake_constants from cinder.tests.unit.policies import base from cinder.tests.unit.policies import test_base from cinder.tests.unit import utils as test_utils from cinder.volume import api as volume_api # TODO(yikun): The below policy test cases should be added: # * HOST_ATTRIBUTE_POLICY # * MIG_ATTRIBUTE_POLICY class VolumePolicyTests(test_base.CinderPolicyTests): def test_admin_can_create_volume(self): admin_context = self.admin_context path = '/v3/%(project_id)s/volumes' % { 'project_id': admin_context.project_id } body = {"volume": {"size": 1}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_nonadmin_user_can_create_volume(self): user_context = self.user_context path = '/v3/%(project_id)s/volumes' % { 'project_id': user_context.project_id } body = {"volume": {"size": 1}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_admin_can_create_volume_from_image(self): admin_context = self.admin_context path = '/v3/%(project_id)s/volumes' % { 'project_id': admin_context.project_id } body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_nonadmin_user_can_create_volume_from_image(self): user_context = self.user_context path = '/v3/%(project_id)s/volumes' % { 'project_id': user_context.project_id } body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get_volume') def test_admin_can_show_volumes(self, mock_volume): # Make sure administrators are authorized to list volumes admin_context = self.admin_context volume = self._create_fake_volume(admin_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) self.assertEqual(response.json_body['volume']['id'], volume.id) @mock.patch.object(volume_api.API, 'get_volume') def test_owner_can_show_volumes(self, mock_volume): # Make sure owners are authorized to list their volumes user_context = self.user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) self.assertEqual(response.json_body['volume']['id'], volume.id) @mock.patch.object(volume_api.API, 'get_volume') def test_owner_cannot_show_volumes_for_others(self, mock_volume): # Make sure volumes are only exposed to their owners owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } response = self._get_request_response(non_owner_context, path, 'GET') # NOTE(lbragstad): Technically, this user isn't supposed to see this # volume, because they didn't create it and it lives in a different # project. Does cinder return a 404 in cases like this? Or is a 403 # expected? self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int) def test_admin_can_get_all_volumes_detail(self): # Make sure administrators are authorized to list volumes admin_context = self.admin_context volume = self._create_fake_volume(admin_context) path = '/v3/%(project_id)s/volumes/detail' % { 'project_id': admin_context.project_id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volumes'][0] self.assertEqual(volume.id, res_vol['id']) def test_owner_can_get_all_volumes_detail(self): # Make sure owners are authorized to list volumes user_context = self.user_context volume = self._create_fake_volume(user_context) path = '/v3/%(project_id)s/volumes/detail' % { 'project_id': user_context.project_id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volumes'][0] self.assertEqual(volume.id, res_vol['id']) @mock.patch.object(volume_api.API, 'get') def test_admin_can_update_volumes(self, mock_volume): admin_context = self.admin_context volume = self._create_fake_volume(admin_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"volume": {"name": "update_name"}} response = self._get_request_response(admin_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_can_update_volumes(self, mock_volume): user_context = self.user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"volume": {"name": "update_name"}} response = self._get_request_response(user_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_update_volumes_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"volume": {"name": "update_name"}} response = self._get_request_response(non_owner_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_can_delete_volumes(self, mock_volume): user_context = self.user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id } response = self._get_request_response(user_context, path, 'DELETE') self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_admin_can_delete_volumes(self, mock_volume): admin_context = self.admin_context volume = self._create_fake_volume(admin_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } response = self._get_request_response(admin_context, path, 'DELETE') self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_delete_volumes_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } response = self._get_request_response(non_owner_context, path, 'DELETE') self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get_volume') def test_admin_can_show_tenant_id_in_volume(self, mock_volume): # Make sure administrators are authorized to show tenant_id admin_context = self.admin_context volume = self._create_fake_volume(admin_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volume'] self.assertEqual(admin_context.project_id, res_vol['os-vol-tenant-attr:tenant_id']) @mock.patch.object(volume_api.API, 'get_volume') def test_owner_can_show_tenant_id_in_volume(self, mock_volume): # Make sure owners are authorized to show tenant_id in volume user_context = self.user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volume'] self.assertEqual(user_context.project_id, res_vol['os-vol-tenant-attr:tenant_id']) def test_admin_can_show_tenant_id_in_volume_detail(self): # Make sure admins are authorized to show tenant_id in volume detail admin_context = self.admin_context self._create_fake_volume(admin_context) path = '/v3/%(project_id)s/volumes/detail' % { 'project_id': admin_context.project_id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volumes'][0] # Make sure owners are authorized to show tenant_id self.assertEqual(admin_context.project_id, res_vol['os-vol-tenant-attr:tenant_id']) def test_owner_can_show_tenant_id_in_volume_detail(self): # Make sure owners are authorized to show tenant_id in volume detail user_context = self.user_context self._create_fake_volume(user_context) path = '/v3/%(project_id)s/volumes/detail' % { 'project_id': user_context.project_id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_vol = response.json_body['volumes'][0] # Make sure owners are authorized to show tenant_id self.assertEqual(user_context.project_id, res_vol['os-vol-tenant-attr:tenant_id']) def test_admin_can_create_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k1": "v1"}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) def test_admin_can_get_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v', res_meta['k']) def test_admin_can_update_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k": "v2"}} response = self._get_request_response(admin_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v2', res_meta['k']) def test_admin_can_delete_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id, 'key': 'k' } response = self._get_request_response(admin_context, path, 'DELETE') self.assertEqual(HTTPStatus.OK, response.status_int) def test_owner_can_create_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k1": "v1"}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) def test_owner_can_get_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': user_context.project_id, 'volume_id': volume.id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v', res_meta['k']) def test_owner_can_update_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k": "v2"}} response = self._get_request_response(user_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v2', res_meta['k']) def test_owner_can_delete_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id, 'key': 'k' } response = self._get_request_response(user_context, path, 'DELETE') self.assertEqual(HTTPStatus.OK, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_create_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k1": "v1"}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_get_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } response = self._get_request_response(non_owner_context, path, 'GET') self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_update_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k": "v2"}} response = self._get_request_response(non_owner_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_delete_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id, 'key': 'k' } response = self._get_request_response(non_owner_context, path, 'DELETE') self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @ddt.ddt class VolumesPolicyTest(base.BasePolicyTest): authorized_readers = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_readers = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_members = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] create_authorized_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', # The other_* users are allowed because we don't have any check # mechanism in the code to validate this, these are validated on # the WSGI layer 'other_project_member', 'other_project_reader', ] create_unauthorized_users = [ 'system_member', 'system_reader', 'system_foo', ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = volumes.VolumeController(mock.MagicMock()) self.api_path = '/v3/%s/volumes' % (self.project_id) def _create_volume(self): vol_type = test_utils.create_volume_type(self.project_admin_context, name='fake_vol_type', testcase_instance=self) volume = test_utils.create_volume(self.project_member_context, volume_type_id=vol_type.id, testcase_instance=self) return volume @ddt.data(*base.all_users) def test_create_volume_policy(self, user_id): rule_name = volume_policies.CREATE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url) req.method = 'POST' body = {"volume": {"size": 1}} unauthorized_exceptions = [] self.common_policy_check(user_id, self.create_authorized_users, self.create_unauthorized_users, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.api.v3.volumes.VolumeController._image_uuid_from_ref', return_value=fake_constants.IMAGE_ID) @mock.patch('cinder.api.v3.volumes.VolumeController._get_image_snapshot', return_value=None) @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask._get_image_metadata', return_value=None) def test_create_volume_from_image_policy( self, user_id, mock_image_from_ref, mock_image_snap, mock_img_meta): rule_name = volume_policies.CREATE_FROM_IMAGE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url) req.method = 'POST' body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}} unauthorized_exceptions = [] self.common_policy_check(user_id, self.create_authorized_users, self.create_unauthorized_users, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) def test_create_multiattach_volume_policy(self, user_id): vol_type = test_utils.create_volume_type( self.project_admin_context, name='multiattach_type', extra_specs={'multiattach': ' True'}) rule_name = volume_policies.MULTIATTACH_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url) req.method = 'POST' body = {"volume": {"size": 1, "volume_type": vol_type.id}} # Relax the CREATE_POLICY in order to get past that check. self.policy.set_rules({volume_policies.CREATE_POLICY: ""}, overwrite=False) unauthorized_exceptions = [] self.common_policy_check(user_id, self.create_authorized_users, self.create_unauthorized_users, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) def test_get_volume_policy(self, user_id): volume = self._create_volume() rule_name = volume_policies.GET_POLICY url = '%s/%s' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url) unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, unauthorized_exceptions, rule_name, self.controller.show, req, id=volume.id) @ddt.data(*base.all_users) def test_get_all_volumes_policy(self, user_id): self._create_volume() rule_name = volume_policies.GET_ALL_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url) # Generally, any logged in user can list all volumes. authorized_users = [user_id] unauthorized_users = [] # The exception is when deprecated rules are disabled, in which case # roles are enforced. Users without the 'reader' role should be # blocked. if self.enforce_new_defaults: context = self.create_context(user_id) if 'reader' not in context.roles: authorized_users = [] unauthorized_users = [user_id] response = self.common_policy_check(user_id, authorized_users, unauthorized_users, [], rule_name, self.controller.index, req) # For some users, even if they're authorized, the list of volumes # will be empty if they are not in the volume's project. empty_response_users = [ *self.unauthorized_readers, # legacy_admin and system_admin do not have a project_id, and # so the list of volumes returned will be empty. 'legacy_admin', 'system_admin', ] volumes = response['volumes'] if response else [] volume_count = 0 if user_id in empty_response_users else 1 self.assertEqual(volume_count, len(volumes)) @ddt.data(*base.all_users) @mock.patch('cinder.db.volume_encryption_metadata_get') def test_get_volume_encryption_meta_policy(self, user_id, mock_encrypt_meta): encryption_key_id = fake_constants.ENCRYPTION_KEY_ID mock_encrypt_meta.return_value = ( {'encryption_key_id': encryption_key_id}) controller = ( volume_encryption_metadata.VolumeEncryptionMetadataController()) volume = self._create_volume() rule_name = volume_policies.ENCRYPTION_METADATA_POLICY url = '%s/%s/encryption' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url) unauthorized_exceptions = [ exception.VolumeNotFound, ] resp = self.common_policy_check( user_id, self.authorized_readers, self.unauthorized_readers, unauthorized_exceptions, rule_name, controller.index, req, volume.id) if user_id in self.authorized_readers: self.assertEqual(encryption_key_id, resp['encryption_key_id']) @ddt.data(*base.all_users) def test_get_volume_tenant_attr_policy(self, user_id): controller = volume_tenant_attribute.VolumeTenantAttributeController() volume = self._create_volume() volume = volume.obj_to_primitive()['versioned_object.data'] rule_name = volume_policies.TENANT_ATTRIBUTE_POLICY url = '%s/%s' % (self.api_path, volume['id']) req = fake_api.HTTPRequest.blank(url) req.get_db_volume = mock.MagicMock() req.get_db_volume.return_value = volume resp_obj = mock.MagicMock(obj={'volume': volume}) unauthorized_exceptions = [ exception.VolumeNotFound, ] self.assertNotIn('os-vol-tenant-attr:tenant_id', volume.keys()) self.common_policy_check( user_id, self.authorized_readers, self.unauthorized_readers, unauthorized_exceptions, rule_name, controller.show, req, resp_obj, volume['id'], fatal=False) if user_id in self.authorized_readers: self.assertIn('os-vol-tenant-attr:tenant_id', volume.keys()) @ddt.data(*base.all_users) def test_update_volume_policy(self, user_id): volume = self._create_volume() rule_name = volume_policies.UPDATE_POLICY url = '%s/%s' % (self.api_path, volume.id) body = {"volume": {"name": "update_name"}} req = fake_api.HTTPRequest.blank(url) req.method = 'PUT' unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check( user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.update, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_delete_volume_policy(self, user_id): volume = self._create_volume() rule_name = volume_policies.DELETE_POLICY url = '%s/%s' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url) req.method = 'DELETE' unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check( user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.delete, req, id=volume.id) class VolumesPolicySecureRbacTest(VolumesPolicyTest): create_authorized_users = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'other_project_member', ] create_unauthorized_users = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'other_project_reader', 'project_foo', 'project_reader', ] authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_readers = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_members = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_volume_access.py0000664000175000017500000002130500000000000024525 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import ddt from cinder.api.contrib import volume_type_access as vta from cinder.api import microversions as mv from cinder import objects from cinder.policies import volume_access as vta_policies from cinder.tests.unit.api.contrib import test_volume_type_access as vta_test from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.policies import base IS_PUBLIC_FIELD = 'os-volume-type-access:is_public' # the original uses a class var and admin context class FakeRequest(vta_test.FakeRequest): def __init__(self, context): self.environ = {"cinder.context": context} FAKE_RESP_OBJ = { 'volume_type': {'id': fake.VOLUME_TYPE_ID}, 'volume_types': [ {'id': fake.VOLUME_TYPE_ID}, {'id': fake.VOLUME_TYPE3_ID} ]} # need an instance var so this will work with ddt class FakeResponse(vta_test.FakeResponse): def __init__(self): self.obj = deepcopy(FAKE_RESP_OBJ) @ddt.ddt class VolumeTypeAccessFieldPolicyTest(base.BasePolicyTest): # NOTE: We are testing directly against the extension controller. # Its call to context.authorize doesn't provide a target, so # "is_admin" or "project_id:%(project_id)s" always matches. authorized_users = [ 'legacy_admin', 'project_admin', 'system_admin', 'legacy_owner', 'project_member', 'project_reader', 'project_foo', 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] # note: authorize is called with fatal=False, so everyone is a winner! everyone = authorized_users # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = vta.VolumeTypeActionController() self.rule_name = vta_policies.TYPE_ACCESS_POLICY self.api_version = mv.BASE_VERSION self.api_path = f'/v3/{self.project_id}/types' @ddt.data(*base.all_users) def test_type_access_policy_types_list(self, user_id): unauthorized_exceptions = None req = FakeRequest(self.create_context(user_id)) resp = FakeResponse() self.common_policy_check(user_id, self.everyone, [], unauthorized_exceptions, self.rule_name, self.controller.index, req, resp) # this is where the real check happens if user_id in self.authorized_users: for vol_type in resp.obj['volume_types']: self.assertIn(IS_PUBLIC_FIELD, vol_type) else: for vol_type in resp.obj['volume_types']: self.assertNotIn(IS_PUBLIC_FIELD, vol_type) @ddt.data(*base.all_users) def test_type_access_policy_type_show(self, user_id): unauthorized_exceptions = None req = FakeRequest(self.create_context(user_id)) resp = FakeResponse() self.common_policy_check(user_id, self.everyone, [], unauthorized_exceptions, self.rule_name, self.controller.show, req, resp, fake.VOLUME_TYPE_ID) if user_id in self.authorized_users: self.assertIn(IS_PUBLIC_FIELD, resp.obj['volume_type']) else: self.assertNotIn(IS_PUBLIC_FIELD, resp.obj['volume_type']) @ddt.data(*base.all_users) def test_type_access_policy_type_create(self, user_id): unauthorized_exceptions = None req = FakeRequest(self.create_context(user_id)) resp = FakeResponse() body = None self.common_policy_check(user_id, self.everyone, [], unauthorized_exceptions, self.rule_name, self.controller.create, req, body, resp) if user_id in self.authorized_users: self.assertIn(IS_PUBLIC_FIELD, resp.obj['volume_type']) else: self.assertNotIn(IS_PUBLIC_FIELD, resp.obj['volume_type']) class VolumeTypeAccessFieldPolicySecureRbacTest( VolumeTypeAccessFieldPolicyTest): # Remember that we are testing directly against the extension controller, # so while the below may seem over-permissive, in real life there is # a more selective check that happens first. authorized_users = [ 'legacy_admin', 'project_admin', 'system_admin', 'project_member', 'system_member', 'other_project_member', ] # this will be anyone without the 'admin' or 'member' role unauthorized_users = [ 'legacy_owner', 'project_foo', 'project_reader', 'system_reader', 'system_foo', 'other_project_reader', ] everyone = authorized_users + unauthorized_users def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) @ddt.ddt class VolumeTypeAccessListProjectsPolicyTest(base.BasePolicyTest): authorized_users = [ 'legacy_admin', 'project_admin', 'system_admin', ] unauthorized_users = [ 'legacy_owner', 'project_member', 'project_reader', 'project_foo', 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = vta.VolumeTypeAccessController() self.volume_type = objects.VolumeType( self.project_admin_context, name='private_volume_type', is_public=False, description='volume type for srbac testing', extra_specs=None, projects=[self.project_id, self.project_id_other]) self.volume_type.create() self.addCleanup(self.volume_type.destroy) self.api_version = mv.BASE_VERSION self.api_path = (f'/v3/{self.project_id}/types/' f'{self.volume_type.id}/os-volume-type-access') @ddt.data(*base.all_users) def test_type_access_who_policy(self, user_id): """Test policy for listing projects with access to a volume type.""" rule_name = vta_policies.TYPE_ACCESS_WHO_POLICY unauthorized_exceptions = None req = fake_api.HTTPRequest.blank(self.api_path) self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller.index, req, self.volume_type.id) class VolumeTypeAccessListProjectsPolicySecureRbacTest( VolumeTypeAccessListProjectsPolicyTest): def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_volume_actions.py0000664000175000017500000014171200000000000024731 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import ddt from cinder.api.contrib import admin_actions from cinder.api.contrib import volume_actions from cinder.api import extensions from cinder.api import microversions as mv from cinder.api.v3 import volumes from cinder import exception from cinder.objects import fields from cinder.policies import volume_actions as policy from cinder.policies import volumes as volume_policy from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit import fake_constants from cinder.tests.unit.policies import base from cinder.tests.unit.policies import test_base from cinder.tests.unit import utils as test_utils from cinder.volume import api as volume_api from cinder.volume import manager as volume_manager @ddt.ddt class VolumeActionsPolicyTest(base.BasePolicyTest): authorized_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_users = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.ext_mgr = extensions.ExtensionManager() self.controller = volume_actions.VolumeActionsController(self.ext_mgr) self.admin_controller = admin_actions.VolumeAdminController( self.ext_mgr) self.volume_controller = volumes.VolumeController(self.ext_mgr) self.manager = volume_manager.VolumeManager() self.manager.driver = mock.MagicMock() self.manager.driver.initialize_connection = mock.MagicMock() self.manager.driver.initialize_connection.side_effect = ( self._initialize_connection) self.api_path = '/v3/%s/volumes' % (self.project_id) self.api_version = mv.BASE_VERSION self.mock_is_service = self.patch( 'cinder.volume.api.API.is_service_request', return_value=True) def _initialize_connection(self, volume, connector): return {'data': connector} def _create_volume(self, attached=False, **kwargs): vol_type = test_utils.create_volume_type(self.project_admin_context, name='fake_vol_type', testcase_instance=self) volume = test_utils.create_volume(self.project_member_context, volume_type_id=vol_type.id, testcase_instance=self, **kwargs) if attached: volume = test_utils.attach_volume(self.project_member_context, volume.id, fake_constants.INSTANCE_ID, 'fake_host', 'fake_mountpoint') return volume @ddt.data(*base.all_users) def test_extend_policy(self, user_id): volume = self._create_volume() rule_name = policy.EXTEND_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-extend": { "new_size": 3 } } # DB validations will throw VolumeNotFound for some contexts unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._extend, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_extend_attached_policy(self, user_id): volume = self._create_volume(attached=True) rule_name = policy.EXTEND_ATTACHED_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=mv.VOLUME_EXTEND_INUSE) req.method = 'POST' body = { "os-extend": { "new_size": 3 } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._extend, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_revert_policy(self, user_id): volume = self._create_volume() snap = test_utils.create_snapshot( self.project_member_context, volume.id, status=fields.SnapshotStatus.AVAILABLE, testcase_instance=self) rule_name = policy.REVERT_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=mv.VOLUME_REVERT) req.method = 'POST' body = { "revert": { "snapshot_id": snap.id } } # Relax the volume:GET_POLICY in order to get past that check. self.policy.set_rules({volume_policy.GET_POLICY: ""}, overwrite=False) unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.volume_controller.revert, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_reset_policy(self, user_id): volume = self._create_volume(attached=True) rule_name = policy.RESET_STATUS url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-reset_status": { "status": "available", "attach_status": "detached", } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.admin_controller._reset_status, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_retype_policy(self, user_id): volume = self._create_volume() test_utils.create_volume_type(self.project_admin_context, name='another_vol_type', testcase_instance=self) rule_name = policy.RETYPE_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-retype": { "new_type": "another_vol_type", } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._retype, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_update_readonly_policy(self, user_id): volume = self._create_volume() rule_name = policy.UPDATE_READONLY_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-update_readonly_flag": { "readonly": True } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._volume_readonly_update, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_force_delete_policy(self, user_id): volume = self._create_volume() rule_name = policy.FORCE_DELETE_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-force_delete": {} } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.admin_controller._force_delete, req, id=volume.id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.volume.rpcapi.VolumeAPI.detach_volume') @mock.patch('cinder.volume.rpcapi.VolumeAPI.terminate_connection') def test_force_detach_policy(self, user_id, mock_terminate_connection, mock_detach_volume): # Redirect the RPC calls directly to the volume manager. # The volume manager needs the volume.id, not the volume. def detach_volume(ctxt, volume, connector, force=False): return self.manager.detach_volume(ctxt, volume.id, attachment_id=None, volume=None) def terminate_connection(ctxt, volume, connector, force=False): return self.manager.terminate_connection(ctxt, volume.id, connector, force) mock_detach_volume.side_effect = detach_volume mock_terminate_connection.side_effect = terminate_connection volume = self._create_volume(attached=True) rule_name = policy.FORCE_DETACH_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-force_detach": {} } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.admin_controller._force_detach, req, id=volume.id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.volume.rpcapi.VolumeAPI.copy_volume_to_image') @mock.patch('cinder.image.glance.GlanceImageService.create') def test_upload_image_policy(self, user_id, mock_image_create, mock_copy_volume_to_image): # Redirect the RPC calls directly to the volume manager. # The volume manager needs the volume.id, not the volume. def copy_volume_to_image(ctxt, volume, image_meta): return self.manager.copy_volume_to_image(ctxt, volume.id, image_meta) mock_copy_volume_to_image.side_effect = copy_volume_to_image volume = self._create_volume(status='available') rule_name = policy.UPLOAD_IMAGE_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-volume_upload_image": { "image_name": "test", } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._volume_upload_image, req, id=volume.id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.volume.rpcapi.VolumeAPI.copy_volume_to_image') @mock.patch('cinder.image.glance.GlanceImageService.create') def test_upload_public_policy(self, user_id, mock_image_create, mock_copy_volume_to_image): # Redirect the RPC calls directly to the volume manager. # The volume manager needs the volume.id, not the volume. def copy_volume_to_image(ctxt, volume, image_meta): return self.manager.copy_volume_to_image(ctxt, volume.id, image_meta) mock_copy_volume_to_image.side_effect = copy_volume_to_image volume = self._create_volume(status='available') rule_name = policy.UPLOAD_PUBLIC_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=mv.UPLOAD_IMAGE_PARAMS) req.method = 'POST' body = { "os-volume_upload_image": { "image_name": "test", "visibility": "public", } } # Relax the UPLOAD_IMAGE_POLICY in order to get past that check. self.policy.set_rules({policy.UPLOAD_IMAGE_POLICY: ""}, overwrite=False) unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.controller._volume_upload_image, req, id=volume.id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.objects.Service.get_by_id') def test_migrate_policy(self, user_id, mock_get_service_by_id): volume = self._create_volume() rule_name = policy.MIGRATE_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-migrate_volume": { "host": "node1@lvm" } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.admin_controller._migrate_volume, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_migrate_complete_policy(self, user_id): volume = self._create_volume() # Can't use self._create_volume() because it would fail when # trying to create the volume type a second time. new_volume = test_utils.create_volume(self.project_member_context, testcase_instance=self) rule_name = policy.MIGRATE_COMPLETE_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-migrate_volume_completion": { "new_volume": new_volume.id } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check( user_id, self.authorized_admins, self.unauthorized_admins, unauthorized_exceptions, rule_name, self.admin_controller._migrate_volume_completion, req, id=volume.id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.volume.rpcapi.VolumeAPI.attach_volume') def test_attach_policy(self, user_id, mock_attach_volume): def attach_volume(context, volume, instance_uuid, host_name, mountpoint, mode): return self.manager.attach_volume(context, volume.id, instance_uuid, host_name, mountpoint, mode) mock_attach_volume.side_effect = attach_volume volume = self._create_volume(status='available') rule_name = policy.ATTACH_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-attach": { "instance_uuid": fake_constants.INSTANCE_ID, "mountpoint": "/dev/vdc" } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._attach, req, id=volume.id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.volume.rpcapi.VolumeAPI.detach_volume') @mock.patch('cinder.volume.rpcapi.VolumeAPI.terminate_connection') def test_detach_policy(self, user_id, mock_terminate_connection, mock_detach_volume): # Redirect the RPC calls directly to the volume manager. # The volume manager needs the volume.id, not the volume. def detach_volume(ctxt, volume, connector, force=False): return self.manager.detach_volume(ctxt, volume.id, attachment_id=None, volume=None) def terminate_connection(ctxt, volume, connector, force=False): return self.manager.terminate_connection(ctxt, volume.id, connector, force) mock_detach_volume.side_effect = detach_volume mock_terminate_connection.side_effect = terminate_connection volume = self._create_volume(attached=True) rule_name = policy.DETACH_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-detach": { "attachment_id": volume.volume_attachment[0].id } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._detach, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_begin_detaching_policy(self, user_id): volume = self._create_volume(status='in-use', attach_status='attached') rule_name = policy.BEGIN_DETACHING_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-begin_detaching": {} } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._begin_detaching, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_reserve_policy(self, user_id): volume = self._create_volume(status='available') rule_name = policy.RESERVE_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-reserve": {} } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._reserve, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_unreserve_policy(self, user_id): volume = self._create_volume(status='reserved') rule_name = policy.UNRESERVE_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-unreserve": {} } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._unreserve, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_roll_detaching_policy(self, user_id): volume = self._create_volume(status='detaching') rule_name = policy.ROLL_DETACHING_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-roll_detaching": {} } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._roll_detaching, req, id=volume.id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.volume.rpcapi.VolumeAPI.initialize_connection') def test_initialize_policy(self, user_id, mock_initialize_connection): def initialize_connection(*args): return self.manager.initialize_connection(*args) mock_initialize_connection.side_effect = initialize_connection volume = self._create_volume() rule_name = policy.INITIALIZE_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-initialize_connection": { "connector": { "platform": "x86_64", "host": "node2", "do_local_attach": False, "ip": "192.168.13.101", "os_type": "linux2", "multipath": False, "initiator": "iqn.1994-05.com.redhat:d16cbb5d31e5" } } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._initialize_connection, req, id=volume.id, body=body) @ddt.data(*base.all_users) @mock.patch('cinder.volume.rpcapi.VolumeAPI.terminate_connection') def test_terminate_policy(self, user_id, mock_terminate_connection): def terminate_connection(ctxt, volume, connector, force=False): return self.manager.terminate_connection(ctxt, volume.id, connector, force=False) mock_terminate_connection.side_effect = terminate_connection volume = self._create_volume() rule_name = policy.TERMINATE_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-terminate_connection": { "connector": { "platform": "x86_64", "host": "node2", "do_local_attach": False, "ip": "192.168.13.101", "os_type": "linux2", "multipath": False, "initiator": "iqn.1994-05.com.redhat:d16cbb5d31e5" } } } unauthorized_exceptions = [ exception.VolumeNotFound, ] self.common_policy_check(user_id, self.authorized_users, self.unauthorized_users, unauthorized_exceptions, rule_name, self.controller._terminate_connection, req, id=volume.id, body=body) class VolumeActionsPolicySecureRbacTest(VolumeActionsPolicyTest): authorized_users = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_users = [ 'legacy_owner', 'system_member', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) class VolumeProtectionTests(test_base.CinderPolicyTests): def test_admin_can_extend_volume(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"os-extend": {"new_size": "2"}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_owner_can_extend_volume(self): user_context = self.user_context volume = self._create_fake_volume(user_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"os-extend": {"new_size": "2"}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_extend_volume_for_others(self, mock_volume): user_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"os-extend": {"new_size": "2"}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) def test_admin_can_extend_attached_volume(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"os-extend": {"new_size": "2"}} response = self._get_request_response( admin_context, path, 'POST', body=body, microversion=mv.VOLUME_EXTEND_INUSE) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_owner_can_extend_attached_volume(self): user_context = self.user_context volume = self._create_fake_volume(user_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"os-extend": {"new_size": "2"}} response = self._get_request_response( user_context, path, 'POST', body=body, microversion=mv.VOLUME_EXTEND_INUSE) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_extend_attached_volume_for_others(self, mock_volume): user_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"os-extend": {"new_size": "2"}} response = self._get_request_response( non_owner_context, path, 'POST', body=body, microversion=mv.VOLUME_EXTEND_INUSE) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) def test_admin_can_retype_volume(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context) vol_type = self._create_fake_type(admin_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"os-retype": {"new_type": "%s" % vol_type.name, "migration_policy": "never"}} response = self._get_request_response( admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_owner_can_retype_volume(self): user_context = self.user_context volume = self._create_fake_volume(user_context) vol_type = self._create_fake_type(user_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"os-retype": {"new_type": "%s" % vol_type.name, "migration_policy": "never"}} response = self._get_request_response( user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_retype_volume_for_others(self, mock_volume): user_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume vol_type = self._create_fake_type(user_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"os-retype": {"new_type": "%s" % vol_type.name, "migration_policy": "never"}} response = self._get_request_response( non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) def test_admin_can_update_readonly(self): admin_context = self.admin_context volume = self._create_fake_volume( admin_context, admin_metadata={"readonly": "False"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"os-update_readonly_flag": {"readonly": "True"}} response = self._get_request_response( admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_owner_can_update_readonly(self): user_context = self.user_context volume = self._create_fake_volume( user_context, admin_metadata={"readonly": "False"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"os-update_readonly_flag": {"readonly": "True"}} response = self._get_request_response( user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_update_readonly_for_others(self, mock_volume): user_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume( user_context, admin_metadata={"readonly": "False"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"os-update_readonly_flag": {"readonly": "True"}} response = self._get_request_response( non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.API, 'get_volume') def test_admin_can_force_delete_volumes(self, mock_volume): # Make sure administrators are authorized to force delete volumes admin_context = self.admin_context volume = self._create_fake_volume(admin_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"os-force_delete": {}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get_volume') def test_nonadmin_cannot_force_delete_volumes(self, mock_volume): # Make sure volumes only can be force deleted by admin user_context = self.user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"os-force_delete": {}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'attach_volume') @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'detach_volume') def test_admin_can_attach_detach_volume(self, mock_detach, mock_attach): admin_context = self.admin_context volume = self._create_fake_volume(admin_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"os-attach": {"instance_uuid": fake_constants.UUID1, "mountpoint": "/dev/vdc"}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) body = {"os-detach": {}} # Detach for user call succeeds because the volume has no attachments response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'attach_volume') @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'detach_volume') def test_owner_can_attach_detach_volume(self, mock_detach, mock_attach): user_context = self.user_context volume = self._create_fake_volume(user_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"os-attach": {"instance_uuid": fake_constants.UUID1, "mountpoint": "/dev/vdc"}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) # Succeeds for a user call because there are no attachments body = {"os-detach": {}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'attach_volume') @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'detach_volume') @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_attach_detach_volume_for_others(self, mock_volume, mock_detach, mock_attach): user_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"os-attach": {"instance_uuid": fake_constants.UUID1, "mountpoint": "/dev/vdc"}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) body = {"os-detach": {}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) def test_admin_can_reserve_unreserve_volume(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"os-reserve": {}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) body = {"os-unreserve": {}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_owner_can_reserve_unreserve_volume(self): user_context = self.user_context volume = self._create_fake_volume(user_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"os-reserve": {}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) body = {"os-unreserve": {}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_reserve_unreserve_volume_for_others(self, mock_volume): user_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"os-attach": {"instance_uuid": fake_constants.UUID1, "mountpoint": "/dev/vdc"}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) body = {"os-detach": {}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'initialize_connection') @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'terminate_connection') def test_admin_can_initialize_terminate_conn(self, mock_t, mock_i): admin_context = self.admin_context admin_context.service_roles = ['service'] volume = self._create_fake_volume(admin_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"os-initialize_connection": {'connector': {}}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) body = {"os-terminate_connection": {'connector': {}}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'initialize_connection') @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'terminate_connection') def test_owner_can_initialize_terminate_conn(self, mock_t, mock_i): user_context = self.user_context user_context.service_roles = ['service'] volume = self._create_fake_volume(user_context) path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"os-initialize_connection": {'connector': {}}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) body = {"os-terminate_connection": {'connector': {}}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'initialize_connection') @mock.patch.object(volume_api.volume_rpcapi.VolumeAPI, 'terminate_connection') @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_initialize_terminate_conn_for_others(self, mock_volume, mock_t, mock_i): user_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(user_context) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"os-initialize_connection": {'connector': {}}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) body = {"os-terminate_connection": {'connector': {}}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) def test_admin_can_begin_roll_detaching(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, status='in-use', attach_status='attached') path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"os-begin_detaching": {}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) body = {"os-roll_detaching": {}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) def test_owner_can_begin_roll_detaching(self): user_context = self.user_context volume = self._create_fake_volume(user_context, status='in-use', attach_status='attached') path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"os-begin_detaching": {}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) body = {"os-roll_detaching": {}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.ACCEPTED, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_begin_roll_detaching_for_others(self, mock_volume): user_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(user_context, status='in-use', attach_status='attached') mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/action' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"os-begin_detaching": {}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) body = {"os-roll_detaching": {}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_volume_metadata.py0000664000175000017500000004677200000000000025063 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import HTTPStatus from unittest import mock import ddt from cinder.api.contrib import volume_image_metadata as image_metadata from cinder.api import microversions as mv from cinder.api.v3 import volume_metadata from cinder import db from cinder import exception from cinder.policies import volume_metadata as policy from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base from cinder.tests.unit.policies import test_base from cinder.tests.unit import utils as test_utils from cinder.volume import api as volume_api @ddt.ddt class VolumeMetadataPolicyTest(base.BasePolicyTest): authorized_readers = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_readers = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_members = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_admins = [ 'legacy_admin', 'system_admin', 'project_admin', ] unauthorized_admins = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] # DB validations will throw VolumeNotFound for some contexts unauthorized_exceptions = [ exception.VolumeNotFound, ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = volume_metadata.Controller() self.image_controller = image_metadata.VolumeImageMetadataController() self.api_path = '/v3/%s/volumes' % (self.project_id) self.api_version = mv.BASE_VERSION def _create_volume(self, image_metadata=None, **kwargs): vol_type = test_utils.create_volume_type(self.project_admin_context, name='fake_vol_type', testcase_instance=self) volume = test_utils.create_volume(self.project_member_context, volume_type_id=vol_type.id, testcase_instance=self, **kwargs) for (k, v) in (image_metadata.items() if image_metadata else []): db.volume_glance_metadata_create(self.project_admin_context, volume.id, k, v) return volume @ddt.data(*base.all_users) def test_get_policy(self, user_id): volume = self._create_volume() rule_name = policy.GET_POLICY url = '%s/%s/metadata' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, self.unauthorized_exceptions, rule_name, self.controller.index, req, volume_id=volume.id) @ddt.data(*base.all_users) def test_create_policy(self, user_id): volume = self._create_volume() rule_name = policy.CREATE_POLICY url = '%s/%s/metadata' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "metadata": { "name": "metadata0" } } self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, self.unauthorized_exceptions, rule_name, self.controller.create, req, volume_id=volume.id, body=body) @ddt.data(*base.all_users) def test_update_policy(self, user_id): volume = self._create_volume(metadata={"foo": "bar"}) rule_name = policy.UPDATE_POLICY url = '%s/%s/metadata' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'PUT' body = { # Not sure why, but the API code expects the body to contain # a "meta" (not "metadata") dict. "meta": { "foo": "zap" } } self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, self.unauthorized_exceptions, rule_name, self.controller.update, req, volume_id=volume.id, id='foo', body=body) @ddt.data(*base.all_users) def test_delete_policy(self, user_id): volume = self._create_volume(metadata={"foo": "bar"}) rule_name = policy.DELETE_POLICY url = '%s/%s/metadata/foo' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'DELETE' # Relax the GET_POLICY in order to get past that check. self.policy.set_rules({policy.GET_POLICY: ""}, overwrite=False) self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, self.unauthorized_exceptions, rule_name, self.controller.delete, req, volume_id=volume.id, id='foo') @ddt.data(*base.all_users) def test_image_metadata_show_policy(self, user_id): image_metadata = { "up": "down", "left": "right" } volume = self._create_volume(image_metadata) volume = volume.obj_to_primitive()['versioned_object.data'] rule_name = policy.IMAGE_METADATA_SHOW_POLICY url = '%s/%s' % (self.api_path, volume['id']) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.get_db_volume = mock.MagicMock() req.get_db_volume.return_value = volume resp_obj = mock.MagicMock(obj={'volume': volume}) self.assertNotIn('volume_image_metadata', volume.keys()) self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, self.unauthorized_exceptions, rule_name, self.image_controller.show, req, resp_obj, id=volume['id'], fatal=False) if user_id in self.authorized_readers: self.assertDictEqual(image_metadata, volume['volume_image_metadata']) @ddt.data(*base.all_users) def test_image_metadata_set_policy(self, user_id): volume = self._create_volume() rule_name = policy.IMAGE_METADATA_SET_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-set_image_metadata": { "metadata": { "image_name": "my_image", } } } self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, self.unauthorized_exceptions, rule_name, self.image_controller.create, req, id=volume.id, body=body) @ddt.data(*base.all_users) def test_image_metadata_remove_policy(self, user_id): volume = self._create_volume(image_metadata={"foo": "bar"}) rule_name = policy.IMAGE_METADATA_REMOVE_POLICY url = '%s/%s/action' % (self.api_path, volume.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) req.method = 'POST' body = { "os-unset_image_metadata": { "key": "foo" } } self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, self.unauthorized_exceptions, rule_name, self.image_controller.delete, req, id=volume.id, body=body) # NOTE(abishop): # The following code is a work in progress, and work is deferred until # Yoga. This is because the UPDATE_ADMIN_METADATA_POLICY rule is # unchanged in Xena (it's RULE_ADMIN_API). This test will be necessary # when RULE_ADMIN_API is deprecated in Yoga. # # @ddt.data(*base.all_users) # def test_update_admin_metadata_policy(self, user_id): # volume = self._create_volume() # rule_name = policy.UPDATE_ADMIN_METADATA_POLICY # url = '%s/%s/action' % (self.api_path, volume.id) # req = fake_api.HTTPRequest.blank(url, version=self.api_version) # req.method = 'POST' # body = { # "os-update_readonly_flag": { # "readonly": True # } # } # # # Only this test needs a VolumeActionsController # ext_mgr = extensions.ExtensionManager() # controller = volume_actions.VolumeActionsController(ext_mgr) # # # Relax the UPDATE_READONLY_POLICY in order to get past that check. # self.policy.set_rules({va_policy.UPDATE_READONLY_POLICY: ""}, # overwrite=False) # # self.common_policy_check(user_id, self.authorized_admins, # self.unauthorized_admins, # self.unauthorized_exceptions, # rule_name, # controller._volume_readonly_update, req, # id=volume.id, body=body) class VolumeMetadataPolicySecureRbacTest(VolumeMetadataPolicyTest): authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_readers = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_members = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) class VolumePolicyTests(test_base.CinderPolicyTests): def test_admin_can_get_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v', res_meta['k']) def test_owner_can_get_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': user_context.project_id, 'volume_id': volume.id } response = self._get_request_response(user_context, path, 'GET') self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v', res_meta['k']) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_get_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } response = self._get_request_response(non_owner_context, path, 'GET') self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) def test_admin_can_create_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k1": "v1"}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) def test_owner_can_create_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k1": "v1"}} response = self._get_request_response(user_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_create_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k1": "v1"}} response = self._get_request_response(non_owner_context, path, 'POST', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) def test_admin_can_delete_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % { 'project_id': admin_context.project_id, 'volume_id': volume.id, 'key': 'k' } response = self._get_request_response(admin_context, path, 'DELETE') self.assertEqual(HTTPStatus.OK, response.status_int) def test_owner_can_delete_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % { 'project_id': user_context.project_id, 'volume_id': volume.id, 'key': 'k' } response = self._get_request_response(user_context, path, 'DELETE') self.assertEqual(HTTPStatus.OK, response.status_int) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_delete_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id, 'key': 'k' } response = self._get_request_response(non_owner_context, path, 'DELETE') self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) def test_admin_can_update_metadata(self): admin_context = self.admin_context volume = self._create_fake_volume(admin_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': admin_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k": "v2"}} response = self._get_request_response(admin_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v2', res_meta['k']) def test_owner_can_update_metadata(self): user_context = self.user_context volume = self._create_fake_volume(user_context, metadata={"k": "v"}) path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': user_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k": "v2"}} response = self._get_request_response(user_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.OK, response.status_int) res_meta = response.json_body['metadata'] self.assertIn('k', res_meta) self.assertEqual('v2', res_meta['k']) @mock.patch.object(volume_api.API, 'get') def test_owner_cannot_update_metadata_for_others(self, mock_volume): owner_context = self.user_context non_owner_context = self.other_user_context volume = self._create_fake_volume(owner_context, metadata={"k": "v"}) mock_volume.return_value = volume path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % { 'project_id': non_owner_context.project_id, 'volume_id': volume.id } body = {"metadata": {"k": "v2"}} response = self._get_request_response(non_owner_context, path, 'PUT', body=body) self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_volume_transfers.py0000664000175000017500000002331500000000000025276 0ustar00zuulzuul00000000000000# Copyright 2021 Red Hat, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.api.v3 import volume_transfer from cinder import context from cinder import exception from cinder.policies import volume_transfer as vol_transfer_policies from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit.policies import base from cinder.tests.unit import utils as test_utils import cinder.transfer from cinder.volume import api as vol_api from cinder.volume import volume_utils @ddt.ddt class VolumeTransferPolicyTest(base.BasePolicyTest): authorized_readers = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_readers = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', ] unauthorized_members = [ 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] accept_authorized_users = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] accept_unauthorized_users = [ 'system_member', 'system_reader', 'system_foo', ] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = volume_transfer.VolumeTransferController() self.api_path = '/v3/%s/os-volume-transfer' % (self.project_id) self.volume_transfer_api = cinder.transfer.API() def _create_volume(self): vol_type = test_utils.create_volume_type(self.project_admin_context, name='fake_vol_type', testcase_instance=self) volume = test_utils.create_volume(self.project_member_context, volume_type_id=vol_type.id, testcase_instance=self) return volume def _create_volume_transfer(self, volume=None): if not volume: volume = self._create_volume() return self.volume_transfer_api.create(context.get_admin_context(), volume.id, 'test-transfer') @ddt.data(*base.all_users) def test_create_volume_transfer_policy(self, user_id): volume = self._create_volume() rule_name = vol_transfer_policies.CREATE_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url) req.method = 'POST' body = {"transfer": {'volume_id': volume.id}} unauthorized_exceptions = [ exception.VolumeNotFound ] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.create, req, body=body) @ddt.data(*base.all_users) def test_get_volume_transfer_policy(self, user_id): vol_transfer = self._create_volume_transfer() rule_name = vol_transfer_policies.GET_POLICY url = '%s/%s' % (self.api_path, vol_transfer['id']) req = fake_api.HTTPRequest.blank(url) unauthorized_exceptions = [ exception.TransferNotFound ] self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, unauthorized_exceptions, rule_name, self.controller.show, req, id=vol_transfer['id']) @ddt.data(*base.all_users) def test_get_all_volumes_policy(self, user_id): self._create_volume_transfer() rule_name = vol_transfer_policies.GET_ALL_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url) # Generally, any logged in user can list all transfers. authorized_users = [user_id] unauthorized_users = [] # The exception is when deprecated rules are disabled, in which case # roles are enforced. Users without the 'reader' role should be # blocked. if self.enforce_new_defaults: context = self.create_context(user_id) if 'reader' not in context.roles: authorized_users = [] unauthorized_users = [user_id] response = self.common_policy_check(user_id, authorized_users, unauthorized_users, [], rule_name, self.controller.index, req) # For some users, even if they're authorized, the list of volumes # will be empty if they are not in the volume's project. empty_response_users = [ *self.unauthorized_readers, # legacy_admin and system_admin do not have a project_id, and # so the list of backups returned will be empty. 'legacy_admin', 'system_admin', ] transfers = response['transfers'] if response else [] transfer_count = 0 if user_id in empty_response_users else 1 self.assertEqual(transfer_count, len(transfers)) @ddt.data(*base.all_users) @mock.patch.object(volume_utils, 'notify_about_volume_usage') def test_delete_volume_transfer_policy(self, user_id, mock_notify): vol_transfer = self._create_volume_transfer() rule_name = vol_transfer_policies.DELETE_POLICY url = '%s/%s' % (self.api_path, vol_transfer['id']) req = fake_api.HTTPRequest.blank(url) req.method = 'DELETE' unauthorized_exceptions = [ exception.TransferNotFound ] self.common_policy_check(user_id, self.authorized_members, self.unauthorized_members, unauthorized_exceptions, rule_name, self.controller.delete, req, id=vol_transfer['id']) @ddt.data(*base.all_users) @mock.patch('cinder.transfer.api.QUOTAS') @mock.patch.object(volume_utils, 'notify_about_volume_usage') def test_accept_volume_transfer_policy(self, user_id, mock_notify, mock_quotas): vol_transfer = self._create_volume_transfer() rule_name = vol_transfer_policies.ACCEPT_POLICY url = '%s/%s/accept' % (self.api_path, vol_transfer['id']) req = fake_api.HTTPRequest.blank(url) req.method = 'POST' body = {"accept": {'auth_key': vol_transfer['auth_key']}} unauthorized_exceptions = [ exception.TransferNotFound ] with mock.patch.object(vol_api.API, 'accept_transfer'): self.common_policy_check(user_id, self.accept_authorized_users, self.accept_unauthorized_users, unauthorized_exceptions, rule_name, self.controller.accept, req, id=vol_transfer['id'], body=body) class VolumeTransferPolicySecureRbacTest(VolumeTransferPolicyTest): authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', ] unauthorized_readers = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_foo', 'other_project_member', 'other_project_reader', ] authorized_members = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', ] unauthorized_members = [ 'legacy_owner', 'system_member', 'system_reader', 'system_foo', 'project_reader', 'project_foo', 'other_project_member', 'other_project_reader', ] # This is a special case since other project member should be # allowed to accept the transfer of a volume accept_authorized_users = authorized_members.copy() accept_authorized_users.append('other_project_member') accept_unauthorized_users = unauthorized_members.copy() accept_unauthorized_users.remove('other_project_member') def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policies/test_volume_type.py0000664000175000017500000003204200000000000024245 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client from unittest import mock import ddt from cinder.api.contrib import volume_type_encryption as vol_type_enc from cinder.api import microversions as mv from cinder.api.v3 import types from cinder import db from cinder.policies import volume_type as type_policy from cinder.tests.unit.api import fakes as fake_api from cinder.tests.unit import fake_constants from cinder.tests.unit.policies import base from cinder.tests.unit.policies import test_base from cinder.tests.unit import utils as test_utils @ddt.ddt class VolumeTypePolicyTest(base.BasePolicyTest): """Verify default policy settings for the types API""" # legacy: everyone can make these calls authorized_readers = [ 'legacy_admin', 'legacy_owner', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'project_foo', 'system_member', 'system_reader', 'system_foo', 'other_project_member', 'other_project_reader', ] unauthorized_readers = [] unauthorized_exceptions = [] # Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. def setUp(self, enforce_scope=False, enforce_new_defaults=False, *args, **kwargs): super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs) self.controller = types.VolumeTypesController() self.api_path = '/v3/%s/types' % (self.project_id) self.api_version = mv.BASE_VERSION @ddt.data(*base.all_users) def test_type_get_all_policy(self, user_id): rule_name = type_policy.GET_ALL_POLICY url = self.api_path req = fake_api.HTTPRequest.blank(url, version=self.api_version) self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, self.unauthorized_exceptions, rule_name, self.controller.index, req) @ddt.data(*base.all_users) def test_type_get_policy(self, user_id): vol_type = test_utils.create_volume_type(self.project_admin_context, testcase_instance=self, name='fake_vol_type') rule_name = type_policy.GET_POLICY url = '%s/%s' % (self.api_path, vol_type.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) self.common_policy_check(user_id, self.authorized_readers, self.unauthorized_readers, self.unauthorized_exceptions, rule_name, self.controller.show, req, id=vol_type.id) @ddt.data(*base.all_users) def test_extra_spec_policy(self, user_id): vol_type = test_utils.create_volume_type( self.project_admin_context, testcase_instance=self, name='fake_vol_type', extra_specs={'multiattach': ' True'}) rule_name = type_policy.EXTRA_SPEC_POLICY url = '%s/%s' % (self.api_path, vol_type.id) req = fake_api.HTTPRequest.blank(url, version=self.api_version) # Relax the GET_POLICY in order to get past that check. self.policy.set_rules({type_policy.GET_POLICY: ""}, overwrite=False) # With the relaxed GET_POLICY, all users are authorized because # failing the policy check is not fatal. authorized_readers = [user_id] unauthorized_readers = [] response = self.common_policy_check(user_id, authorized_readers, unauthorized_readers, self.unauthorized_exceptions, rule_name, self.controller.show, req, id=vol_type.id) # Check whether the response should contain extra_specs. The logic # is a little unusual: # - The new rule is SYSTEM_READER_OR_PROJECT_READER (i.e. users # with the 'reader' role) # - The deprecated rule is RULE_ADMIN_API (i.e. users with the # 'admin' role) context = self.create_context(user_id) if 'reader' in context.roles or 'admin' in context.roles: self.assertIn('extra_specs', response['volume_type']) else: self.assertNotIn('extra_specs', response['volume_type']) class VolumeTypePolicySecureRbacTest(VolumeTypePolicyTest): authorized_readers = [ 'legacy_admin', 'system_admin', 'project_admin', 'project_member', 'project_reader', 'system_member', 'system_reader', 'other_project_member', 'other_project_reader', ] unauthorized_readers = [ 'legacy_owner', 'project_foo', 'system_foo', ] unauthorized_exceptions = [] def setUp(self, *args, **kwargs): # Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). super().setUp(enforce_scope=False, enforce_new_defaults=True, *args, **kwargs) class VolumeTypeEncryptionTypePolicyTests(test_base.CinderPolicyTests): """Verify default policy settings for encryption types in the types API""" def setUp(self): super(VolumeTypeEncryptionTypePolicyTests, self).setUp() self.volume_type = self._create_fake_type(self.admin_context) def test_admin_can_create_volume_type_encryption_type(self): admin_context = self.admin_context path = '/v3/%(project_id)s/types/%(type_id)s/encryption' % { 'project_id': admin_context.project_id, 'type_id': self.volume_type.id } body = {"encryption": {"key_size": 128, "provider": "luks", "control_location": "front-end", "cipher": "aes-xts-plain64"}} response = self._get_request_response(admin_context, path, 'POST', body=body) self.assertEqual(http.client.OK, response.status_int) def test_nonadmin_cannot_create_volume_type_encryption_type(self): self.assertTrue(self.volume_type.is_public) path = '/v3/%(project_id)s/types/%(type_id)s/encryption' % { 'project_id': self.user_context.project_id, 'type_id': self.volume_type.id } body = {"encryption": {"key_size": 128, "provider": "luks", "control_location": "front-end", "cipher": "aes-xts-plain64"}} response = self._get_request_response(self.user_context, path, 'POST', body=body) self.assertEqual(http.client.FORBIDDEN, response.status_int) @mock.patch.object(vol_type_enc.VolumeTypeEncryptionController, '_get_volume_type_encryption') def test_admin_can_show_volume_type_encryption_type(self, mock_get_enc): mock_get_enc.return_value = { 'cipher': 'aes-xts-plain64', 'control_location': 'front-end', 'encryption_id': fake_constants.ENCRYPTION_TYPE_ID, 'key_size': 128, 'provider': 'luks', 'volume_type_id': self.volume_type.id} admin_context = self.admin_context path = '/v3/%(project_id)s/types/%(type_id)s/encryption' % { 'project_id': admin_context.project_id, 'type_id': self.volume_type.id } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(http.client.OK, response.status_int) def test_nonadmin_cannot_show_volume_type_encryption_type(self): self.assertTrue(self.volume_type.is_public) path = '/v3/%(project_id)s/types/%(type_id)s/encryption' % { 'project_id': self.user_context.project_id, 'type_id': self.volume_type.id } response = self._get_request_response(self.user_context, path, 'GET') self.assertEqual(http.client.FORBIDDEN, response.status_int) @mock.patch.object(vol_type_enc.VolumeTypeEncryptionController, '_get_volume_type_encryption') def test_admin_can_show_volume_type_encryption_spec_item( self, mock_get_enc): enc_specs = { 'cipher': 'aes-xts-plain64', 'control_location': 'front-end', 'encryption_id': fake_constants.ENCRYPTION_TYPE_ID, 'key_size': 128, 'provider': 'foobar', 'volume_type_id': self.volume_type.id} mock_get_enc.return_value = enc_specs admin_context = self.admin_context path = '/v3/%(project_id)s/types/%(type_id)s/encryption/%(item)s' % { 'project_id': admin_context.project_id, 'type_id': self.volume_type.id, 'item': 'provider' } response = self._get_request_response(admin_context, path, 'GET') self.assertEqual(http.client.OK, response.status_int) def test_nonadmin_cannot_show_volume_type_encryption_spec_item(self): self.assertTrue(self.volume_type.is_public) path = '/v3/%(project_id)s/types/%(type_id)s/encryption/%(item)s' % { 'project_id': self.user_context.project_id, 'type_id': self.volume_type.id, 'item': 'control_location' } response = self._get_request_response(self.user_context, path, 'GET') self.assertEqual(http.client.FORBIDDEN, response.status_int) @mock.patch.object(db, 'volume_type_encryption_delete', return_value=None) def test_admin_can_delete_volume_type_encryption_type( self, mock_db_delete): admin_context = self.admin_context path = '/v3/%(project_id)s/types/%(type_id)s/encryption/%(enc_id)s' % { 'project_id': admin_context.project_id, 'type_id': self.volume_type.id, 'enc_id': fake_constants.ENCRYPTION_TYPE_ID } response = self._get_request_response(admin_context, path, 'DELETE') self.assertEqual(http.client.ACCEPTED, response.status_int) def test_nonadmin_cannot_delete_volume_type_encryption_type(self): self.assertTrue(self.volume_type.is_public) path = '/v3/%(project_id)s/types/%(type_id)s/encryption/%(enc_id)s' % { 'project_id': self.user_context.project_id, 'type_id': self.volume_type.id, 'enc_id': fake_constants.ENCRYPTION_TYPE_ID } response = self._get_request_response(self.user_context, path, 'DELETE') self.assertEqual(http.client.FORBIDDEN, response.status_int) @mock.patch.object(db, 'volume_type_encryption_update', return_value=None) def test_admin_can_update_volume_type_encryption_type( self, mock_db_update): admin_context = self.admin_context req_body = {"encryption": {"key_size": 64, "control_location": "back-end"}} path = '/v3/%(project_id)s/types/%(type_id)s/encryption/%(enc_id)s' % { 'project_id': admin_context.project_id, 'type_id': self.volume_type.id, 'enc_id': fake_constants.ENCRYPTION_TYPE_ID } response = self._get_request_response(admin_context, path, 'PUT', body=req_body) self.assertEqual(http.client.OK, response.status_int) def test_nonadmin_cannot_update_volume_type_encryption_type(self): self.assertTrue(self.volume_type.is_public) req_body = {"encryption": {"key_size": 64, "control_location": "back-end"}} path = '/v3/%(project_id)s/types/%(type_id)s/encryption/%(enc_id)s' % { 'project_id': self.user_context.project_id, 'type_id': self.volume_type.id, 'enc_id': fake_constants.ENCRYPTION_TYPE_ID } response = self._get_request_response(self.user_context, path, 'PUT', body=req_body) self.assertEqual(http.client.FORBIDDEN, response.status_int) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/policy.yaml0000664000175000017500000000110000000000000020627 0ustar00zuulzuul00000000000000# Default rule for most non-Admin APIs. "admin_or_owner": "is_admin:True or project_id:%(project_id)s" # Default rule for most Admin APIs. "admin_api": "is_admin:True" # Reset status of group snapshot. # POST /group_snapshots/{g_snapshot_id}/action (reset_status) "group:reset_group_snapshot_status": "" # List all services. # GET /os-services "volume_extension:services:index": "" # Update service, including failover_host, thaw, freeze, disable, # enable, set-log and get-log actions. # PUT /os-services/{action} #"volume_extension:services:update": "rule:admin_api" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2231195 cinder-27.0.0/cinder/tests/unit/privsep/0000775000175000017500000000000000000000000020144 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/privsep/__init__.py0000664000175000017500000000000000000000000022243 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2231195 cinder-27.0.0/cinder/tests/unit/privsep/targets/0000775000175000017500000000000000000000000021615 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/privsep/targets/__init__.py0000664000175000017500000000000000000000000023714 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/privsep/targets/fake_nvmet_lib.py0000664000175000017500000000303600000000000025136 0ustar00zuulzuul00000000000000# Copyright 2022 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ FAKE the nvmet library if it's not installed. This must be imported before cinder/volume/targets/nvmet.py and cinder/privsep/targets/nvmet.py """ import sys from unittest import mock from cinder import exception mock_nvmet_lib = mock.Mock(name='nvmet', Root=type('Root', (mock.Mock, ), {}), Subsystem=type('Subsystem', (mock.Mock, ), {}), Port=type('Port', (mock.Mock, ), {}), Namespace=type('Namespace', (mock.Mock, ), {'MAX_NSID': 8192}), Host=type('Host', (mock.Mock, ), {}), ANAGroup=type('ANAGroup', (mock.Mock, ), {}), Referral=type('Referral', (mock.Mock, ), {}), nvme=mock.Mock(CFSNotFound=exception.NotFound)) sys.modules['nvmet'] = mock_nvmet_lib reset_mock = mock_nvmet_lib.reset_mock ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/privsep/targets/test_nvmet.py0000664000175000017500000004471500000000000024372 0ustar00zuulzuul00000000000000# Copyright 2022 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from unittest import mock import ddt from cinder import exception from cinder.tests.unit.privsep.targets import fake_nvmet_lib from cinder.tests.unit import test # This must go after fake_nvmet_lib has been imported (thus the noqa) from cinder.privsep.targets import nvmet # noqa @ddt.ddt class TestSerialize(test.TestCase): def setUp(self): super().setUp() fake_nvmet_lib.reset_mock() def test_tuple(self): """Test serialization of a tuple.""" instance = (1, 'string') res = nvmet.serialize(instance) self.assertEqual(('tuple', instance), res) @ddt.data(1, 1.1, 'string', None, [1, 2, 'string']) def test_others(self, instance): """Test normal Python instances that should not be modified.""" res = nvmet.serialize(instance) self.assertEqual(instance, res) def test_root(self): instance = nvmet.Root() res = nvmet.serialize(instance) self.assertEqual(('Root', {}), res) def test_host(self): instance = nvmet.Host(nqn='_nqn') res = nvmet.serialize(instance) self.assertEqual(('Host', {'nqn': '_nqn', 'mode': 'lookup'}), res) def test_subsystem(self): instance = nvmet.Subsystem(nqn='_nqn') res = nvmet.serialize(instance) self.assertEqual(('Subsystem', {'nqn': '_nqn', 'mode': 'lookup'}), res) def test_namespace(self): subsys = nvmet.Subsystem(nqn='_nqn') instance = nvmet.Namespace(subsystem=subsys, nsid='_nsid') res = nvmet.serialize(instance) # Subsystem is a recursive serialization expected = ( 'Namespace', {'subsystem': ('Subsystem', {'nqn': '_nqn', 'mode': 'lookup'}), 'nsid': '_nsid', 'mode': 'lookup'}) self.assertEqual(expected, res) def test_port(self): instance = nvmet.Port(portid='_portid') res = nvmet.serialize(instance) expected = ('Port', {'portid': '_portid', 'mode': 'lookup'}) self.assertEqual(expected, res) def test_Referral(self): port = nvmet.Port(portid='_portid') # name is a Mock attribute, so we'll use it as instance.name instance = nvmet.Referral(port=port, name='_name') res = nvmet.serialize(instance) # Port is a recursive serialization expected = ( 'Referral', {'port': ('Port', {'portid': '_portid', 'mode': 'lookup'}), 'name': instance.name, 'mode': 'lookup'}) self.assertEqual(expected, res) def test_ANAGroup(self): port = nvmet.Port(portid='_portid') instance = nvmet.ANAGroup(port=port, grpid='_grpid') res = nvmet.serialize(instance) expected = ( 'ANAGroup', {'port': ('Port', {'portid': '_portid', 'mode': 'lookup'}), 'grpid': '_grpid', 'mode': 'lookup'}) self.assertEqual(expected, res) @ddt.ddt class TestDeserialize(test.TestCase): def test_deserialize_tuple(self): """Test serialization of a tuple.""" expected = (1, 'string') data = ('tuple', expected) res = nvmet.deserialize(data) self.assertEqual(expected, res) @ddt.data(1, 1.1, 'string', None, [1, 2, 'string']) def test_deserialize_others(self, data): """Test normal Python instances that should not be modified.""" res = nvmet.deserialize(data) self.assertEqual(data, res) def test_deserialize_root(self): data = ('Root', {}) res = nvmet.deserialize(data) self.assertIsInstance(res, nvmet.nvmet.Root) def test_deserialize_host(self): data = ('Host', {'nqn': '_nqn', 'mode': 'lookup'}) host = nvmet.deserialize(data) self.assertIsInstance(host, nvmet.nvmet.Host) self.assertEqual('_nqn', host.nqn) self.assertEqual('lookup', host.mode) def test_deserialize_subsystem(self): data = ('Subsystem', {'nqn': '_nqn', 'mode': 'lookup'}) subsys = nvmet.deserialize(data) self.assertIsInstance(subsys, nvmet.nvmet.Subsystem) self.assertEqual('_nqn', subsys.nqn) self.assertEqual('lookup', subsys.mode) def test_deserialize_namespace(self): data = ('Namespace', {'subsystem': ('Subsystem', {'nqn': '_nqn', 'mode': 'lookup'}), 'nsid': '_nsid', 'mode': 'lookup'}) ns = nvmet.deserialize(data) self.assertIsInstance(ns, nvmet.nvmet.Namespace) self.assertEqual('_nsid', ns.nsid) self.assertEqual('lookup', ns.mode) self.assertIsInstance(ns.subsystem, nvmet.nvmet.Subsystem) self.assertEqual('_nqn', ns.subsystem.nqn) self.assertEqual('lookup', ns.subsystem.mode) def test_deserialize_port(self): data = ('Port', {'portid': '_portid', 'mode': 'lookup'}) port = nvmet.deserialize(data) self.assertIsInstance(port, nvmet.nvmet.Port) self.assertEqual('_portid', port.portid) self.assertEqual('lookup', port.mode) def test_deserialize_Referral(self): data = ('Referral', {'port': ('Port', {'portid': '_portid', 'mode': 'lookup'}), 'name': '1', 'mode': 'lookup'}) ref = nvmet.deserialize(data) self.assertIsInstance(ref, nvmet.nvmet.Referral) self.assertEqual('1', ref._mock_name) # Because name is used by Mock self.assertEqual('lookup', ref.mode) self.assertIsInstance(ref.port, nvmet.nvmet.Port) self.assertEqual('_portid', ref.port.portid) self.assertEqual('lookup', ref.port.mode) def test_deserialize_ANAGroup(self): data = ('ANAGroup', {'port': ('Port', {'portid': '_portid', 'mode': 'lookup'}), 'grpid': '_grpid', 'mode': 'lookup'}) ana = nvmet.deserialize(data) self.assertIsInstance(ana, nvmet.nvmet.ANAGroup) self.assertEqual('_grpid', ana.grpid) self.assertEqual('lookup', ana.mode) self.assertIsInstance(ana.port, nvmet.nvmet.Port) self.assertEqual('_portid', ana.port.portid) self.assertEqual('lookup', ana.port.mode) @mock.patch.object(nvmet, 'deserialize') def test_deserialize_params(self, mock_deserialize): mock_deserialize.side_effect = [11, 22, 33, 55, 77] args = [1, 2, 3] kwargs = {'4': 5, '6': 7} res_args, res_kwargs = nvmet.deserialize_params(args, kwargs) self.assertEqual(5, mock_deserialize.call_count) mock_deserialize.assert_has_calls((mock.call(1), mock.call(2), mock.call(3), mock.call(5), mock.call(7))) self.assertEqual([11, 22, 33], res_args) self.assertEqual({'4': 55, '6': 77}, res_kwargs) class TestPrivsep(test.TestCase): @mock.patch.object(nvmet.LOG, 'error') def test__nvmet_setup_failure(self, mock_log): self.assertRaises(exception.CinderException, nvmet._nvmet_setup_failure, mock.sentinel.message) mock_log.assert_called_once_with(mock.sentinel.message) @mock.patch.object(nvmet, '_privsep_setup') def test_privsep_setup(self, mock_setup): args = [mock.sentinel.arg1, mock.sentinel.arg2] kwargs = {'kwarg1': mock.sentinel.kwarg1} res = nvmet.privsep_setup('MyClass', err_func=None, *args, **kwargs) mock_setup.assert_called_once_with('MyClass', *args, **kwargs) self.assertEqual(mock_setup.return_value, res) @mock.patch.object(nvmet, '_privsep_setup') def test_privsep_setup_err_func_as_arg_none(self, mock_setup): exc = exception.CinderException('ouch') mock_setup.side_effect = exc args = [mock.sentinel.arg1, mock.sentinel.arg2, None] kwargs = {'kwarg1': mock.sentinel.kwarg1} # NOTE: testtools.TestCase were Cinder's tests inherit from masks the # unittest's assertRaises that supports context manager usage, so we # address it directly. with unittest.TestCase.assertRaises(self, exception.CinderException) as cm: nvmet.privsep_setup('MyClass', *args, **kwargs) self.assertEqual(exc, cm.exception) mock_setup.assert_called_once_with('MyClass', *args[:-1], **kwargs) @mock.patch.object(nvmet, '_privsep_setup') def test_privsep_setup_err_func_as_arg(self, mock_setup): def err_func(msg): raise exception.VolumeDriverException() mock_setup.side_effect = exception.CinderException('ouch') args = [mock.sentinel.arg1, mock.sentinel.arg2, err_func] self.assertRaises(exception.VolumeDriverException, nvmet.privsep_setup, 'MyClass', *args) mock_setup.assert_called_once_with('MyClass', *args[:-1]) # We mock the privsep context mode to fake that we are not the client @mock.patch('cinder.privsep.sys_admin_pctxt.client_mode', False) @mock.patch.object(nvmet, 'deserialize_params') @mock.patch.object(nvmet.nvmet, 'MyClass') def test__privsep_setup(self, mock_class, mock_deserialize): args = (1, 2, 3) kwargs = {'4': 5, '6': 7} deserialized_args = (11, 22, 33) deserialized_kwargs = {'4': 55, '6': 77} expected_args = deserialized_args[:] expected_kwargs = deserialized_kwargs.copy() expected_kwargs['err_func'] = nvmet._nvmet_setup_failure mock_deserialize.return_value = (deserialized_args, deserialized_kwargs) res = nvmet._privsep_setup('MyClass', *args, **kwargs) mock_deserialize.assert_called_once_with(args, kwargs) mock_class.setup.assert_called_once_with(*expected_args, **expected_kwargs) self.assertEqual(mock_class.setup.return_value, res) # We mock the privsep context mode to fake that we are not the client @mock.patch('cinder.privsep.sys_admin_pctxt.client_mode', False) @mock.patch.object(nvmet, 'deserialize') @mock.patch.object(nvmet, 'deserialize_params') def test_do_privsep_call(self, mock_deserialize_params, mock_deserialize): args = (1, 2, 3) kwargs = {'4': 5, '6': 7} deserialized_args = (11, 22, 33) deserialized_kwargs = {'4': 55, '6': 77} mock_deserialize_params.return_value = (deserialized_args, deserialized_kwargs) res = nvmet.do_privsep_call(mock.sentinel.instance, 'method_name', *args, **kwargs) mock_deserialize.assert_called_once_with(mock.sentinel.instance) mock_deserialize_params.assert_called_once_with(args, kwargs) mock_method = mock_deserialize.return_value.method_name mock_method.assert_called_once_with(*deserialized_args, **deserialized_kwargs) self.assertEqual(mock_method.return_value, res) @ddt.ddt class TestNvmetClasses(test.TestCase): @ddt.data('Host', 'Referral', 'ANAGroup') def test_same_classes(self, cls_name): self.assertEqual(getattr(nvmet, cls_name), getattr(nvmet.nvmet, cls_name)) def test_subsystem_init(self): subsys = nvmet.Subsystem('nqn') self.assertIsInstance(subsys, nvmet.nvmet.Subsystem) self.assertIsInstance(subsys, nvmet.Subsystem) self.assertEqual('nqn', subsys.nqn) self.assertEqual('lookup', subsys.mode) @mock.patch.object(nvmet, 'privsep_setup') def test_subsystem_setup(self, mock_setup): nvmet.Subsystem.setup(mock.sentinel.t, mock.sentinel.err_func) mock_setup.assert_called_once_with('Subsystem', mock.sentinel.t, mock.sentinel.err_func) @mock.patch.object(nvmet, 'privsep_setup') def test_subsystem_setup_no_err_func(self, mock_setup): nvmet.Subsystem.setup(mock.sentinel.t) mock_setup.assert_called_once_with('Subsystem', mock.sentinel.t, None) @mock.patch.object(nvmet, 'serialize') @mock.patch.object(nvmet, 'do_privsep_call') def test_subsystem_delete(self, mock_privsep, mock_serialize): subsys = nvmet.Subsystem('nqn') subsys.delete() mock_serialize.assert_called_once_with(subsys) mock_privsep.assert_called_once_with(mock_serialize.return_value, 'delete') @mock.patch('os.listdir', return_value=['/path/namespaces/1', '/path/namespaces/2']) @mock.patch.object(nvmet, 'Namespace') def test_subsystem_namespaces(self, mock_nss, mock_listdir): subsys = nvmet.Subsystem(mock.sentinel.nqn) subsys.path = '/path' # Set by the parent nvmet library Root class res = list(subsys.namespaces) self.assertEqual([mock_nss.return_value, mock_nss.return_value], res) mock_listdir.assert_called_once_with('/path/namespaces/') self.assertEqual(2, mock_nss.call_count) mock_nss.assert_has_calls((mock.call(subsys, '1'), mock.call(subsys, '2'))) def test_port_init(self): port = nvmet.Port('portid') self.assertIsInstance(port, nvmet.nvmet.Port) self.assertIsInstance(port, nvmet.Port) self.assertEqual('portid', port.portid) self.assertEqual('lookup', port.mode) @mock.patch.object(nvmet, 'serialize') @mock.patch.object(nvmet, 'privsep_setup') def test_port_setup(self, mock_setup, mock_serialize): nvmet.Port.setup(mock.sentinel.root, mock.sentinel.n, mock.sentinel.err_func) mock_serialize.assert_called_once_with(mock.sentinel.root) mock_setup.assert_called_once_with('Port', mock_serialize.return_value, mock.sentinel.n, mock.sentinel.err_func) @mock.patch.object(nvmet, 'serialize') @mock.patch.object(nvmet, 'privsep_setup') def test_port_setup_no_err_func(self, mock_setup, mock_serialize): nvmet.Port.setup(mock.sentinel.root, mock.sentinel.n) mock_serialize.assert_called_once_with(mock.sentinel.root) mock_setup.assert_called_once_with('Port', mock_serialize.return_value, mock.sentinel.n, None) @mock.patch.object(nvmet, 'serialize') @mock.patch.object(nvmet, 'do_privsep_call') def test_port_add_subsystem(self, mock_privsep, mock_serialize): port = nvmet.Port('portid') port.add_subsystem(mock.sentinel.nqn) mock_serialize.assert_called_once_with(port) mock_privsep.assert_called_once_with(mock_serialize.return_value, 'add_subsystem', mock.sentinel.nqn) @mock.patch.object(nvmet, 'serialize') @mock.patch.object(nvmet, 'do_privsep_call') def test_port_remove_subsystem(self, mock_privsep, mock_serialize): port = nvmet.Port('portid') port.remove_subsystem(mock.sentinel.nqn) mock_serialize.assert_called_once_with(port) mock_privsep.assert_called_once_with(mock_serialize.return_value, 'remove_subsystem', mock.sentinel.nqn) @mock.patch.object(nvmet, 'serialize') @mock.patch.object(nvmet, 'do_privsep_call') def test_port_delete(self, mock_privsep, mock_serialize): port = nvmet.Port('portid') port.delete() mock_serialize.assert_called_once_with(port) mock_privsep.assert_called_once_with(mock_serialize.return_value, 'delete') @mock.patch('os.listdir', return_value=['/path/ports/1', '/path/ports/2']) @mock.patch.object(nvmet, 'Port') def test_root_ports(self, mock_port, mock_listdir): r = nvmet.Root() r.path = '/path' # This is set by the parent nvmet library Root class res = list(r.ports) self.assertEqual([mock_port.return_value, mock_port.return_value], res) mock_listdir.assert_called_once_with('/path/ports/') self.assertEqual(2, mock_port.call_count) mock_port.assert_has_calls((mock.call('1'), mock.call('2'))) def test_namespace_init(self): ns = nvmet.Namespace('subsystem', 'nsid') self.assertIsInstance(ns, nvmet.nvmet.Namespace) self.assertIsInstance(ns, nvmet.Namespace) self.assertEqual('subsystem', ns.subsystem) self.assertEqual('nsid', ns.nsid) self.assertEqual('lookup', ns.mode) @mock.patch.object(nvmet, 'serialize') @mock.patch.object(nvmet, 'privsep_setup') def test_namespace_setup(self, mock_setup, mock_serialize): nvmet.Namespace.setup(mock.sentinel.subsys, mock.sentinel.n) mock_serialize.assert_called_once_with(mock.sentinel.subsys) mock_setup.assert_called_once_with('Namespace', mock_serialize.return_value, mock.sentinel.n, None) @mock.patch.object(nvmet, 'serialize') @mock.patch.object(nvmet, 'do_privsep_call') def test_namespace_delete(self, mock_privsep, mock_serialize): ns = nvmet.Namespace('subsystem', 'nsid') ns.delete() mock_serialize.assert_called_once_with(ns) mock_privsep.assert_called_once_with(mock_serialize.return_value, 'delete') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/privsep/test_format_inspector.py0000664000175000017500000001107200000000000025134 0ustar00zuulzuul00000000000000# Copyright 2024 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.privsep import format_inspector as pfi from cinder.tests.unit import test class TestFormatInspectorHelper(test.TestCase): @mock.patch('cinder.image.format_inspector.detect_file_format') def test_get_format_if_safe__happy_path(self, mock_detect): mock_inspector = mock.MagicMock() mock_inspector.__str__.return_value = 'mock_fmt' mock_safety = mock_inspector.safety_check mock_safety.return_value = True mock_backing = mock_inspector.safety_check_allow_backing_file mock_detect.return_value = mock_inspector test_path = mock.sentinel.path fmt_name = pfi._get_format_if_safe(path=test_path, allow_qcow2_backing_file=False) self.assertEqual(fmt_name, 'mock_fmt') mock_safety.assert_called_once_with() mock_backing.assert_not_called() @mock.patch('cinder.image.format_inspector.detect_file_format') def test_get_format_if_safe__allow_backing(self, mock_detect): mock_inspector = mock.MagicMock() mock_inspector.__str__.return_value = 'qcow2' mock_safety = mock_inspector.safety_check mock_safety.return_value = False mock_backing = mock_inspector.safety_check_allow_backing_file mock_backing.return_value = True mock_detect.return_value = mock_inspector test_path = mock.sentinel.path fmt_name = pfi._get_format_if_safe(path=test_path, allow_qcow2_backing_file=True) self.assertEqual(fmt_name, 'qcow2') mock_safety.assert_called_once_with() mock_backing.assert_called_once_with() @mock.patch('cinder.image.format_inspector.detect_file_format') def test_get_format_if_safe__backing_fail(self, mock_detect): """backing flag should only work for qcow2""" mock_inspector = mock.MagicMock() mock_inspector.__str__.return_value = 'mock_fmt' mock_safety = mock_inspector.safety_check mock_safety.return_value = False mock_backing = mock_inspector.safety_check_allow_backing_file mock_detect.return_value = mock_inspector test_path = mock.sentinel.path fmt_name = pfi._get_format_if_safe(path=test_path, allow_qcow2_backing_file=True) self.assertIsNone(fmt_name) mock_safety.assert_called_once_with() mock_backing.assert_not_called() @mock.patch('cinder.image.format_inspector.detect_file_format') def test_get_format_if_safe__allow_backing_but_other_problem( self, mock_detect): mock_inspector = mock.MagicMock() mock_inspector.__str__.return_value = 'qcow2' mock_safety = mock_inspector.safety_check mock_safety.return_value = False mock_backing = mock_inspector.safety_check_allow_backing_file mock_backing.return_value = False mock_detect.return_value = mock_inspector test_path = mock.sentinel.path fmt_name = pfi._get_format_if_safe(path=test_path, allow_qcow2_backing_file=True) self.assertIsNone(fmt_name) mock_safety.assert_called_once_with() mock_backing.assert_called_once_with() @mock.patch('cinder.image.format_inspector.detect_file_format') def test_get_format_if_safe__unsafe(self, mock_detect): mock_inspector = mock.MagicMock() mock_inspector.__str__.return_value = 'mock_fmt' mock_safety = mock_inspector.safety_check mock_safety.return_value = False mock_backing = mock_inspector.safety_check_allow_backing_file mock_detect.return_value = mock_inspector test_path = mock.sentinel.path fmt_name = pfi._get_format_if_safe(path=test_path, allow_qcow2_backing_file=False) self.assertIsNone(fmt_name) mock_safety.assert_called_once_with() mock_backing.assert_not_called() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2271197 cinder-27.0.0/cinder/tests/unit/scheduler/0000775000175000017500000000000000000000000020432 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/__init__.py0000664000175000017500000000000000000000000022531 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/fake_hosts.py0000664000175000017500000000315600000000000023137 0ustar00zuulzuul00000000000000# Copyright 2012 Intel Inc, OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For filters tests. """ class FakeHostManager(object): """Defines fake hosts. host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0 host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536 host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072 host4: free_ram_mb=8192 free_disk_gb=8192 """ def __init__(self): self.service_states = { 'host1': { 'compute': {'host_memory_free': 1073741824}, }, 'host2': { 'compute': {'host_memory_free': 2147483648}, }, 'host3': { 'compute': {'host_memory_free': 3221225472}, }, 'host4': { 'compute': {'host_memory_free': 999999999}, }, } class FakeHostState(object): def __init__(self, host, attribute_dict): self.host = host for (key, val) in attribute_dict.items(): setattr(self, key, val) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/fakes.py0000664000175000017500000003136000000000000022100 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For Scheduler tests. """ import copy from oslo_utils import importutils from oslo_utils import timeutils from oslo_utils import uuidutils from cinder import context as cinder_context from cinder.scheduler import filter_scheduler from cinder.scheduler import filters from cinder.scheduler import host_manager from cinder.tests.unit.scheduler import helpers from cinder.volume import volume_utils UTC_NOW = timeutils.utcnow() SERVICE_STATES = { 'host1': {'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 10, 'volume_backend_name': 'lvm1', 'timestamp': UTC_NOW, 'multiattach': True, 'online_extend_support': True, 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}, 'host2': {'total_capacity_gb': 2048, 'free_capacity_gb': 300, 'allocated_capacity_gb': 1748, 'provisioned_capacity_gb': 1748, 'max_over_subscription_ratio': '1.5', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 10, 'volume_backend_name': 'lvm2', 'timestamp': UTC_NOW, 'online_extend_support': False, 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}, 'host3': {'total_capacity_gb': 512, 'free_capacity_gb': 256, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 0, 'volume_backend_name': 'lvm3', 'timestamp': UTC_NOW, 'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'}, 'host4': {'total_capacity_gb': 2048, 'free_capacity_gb': 200, 'allocated_capacity_gb': 1848, 'provisioned_capacity_gb': 2047, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'volume_backend_name': 'lvm4', 'timestamp': UTC_NOW, 'consistent_group_snapshot_enabled': True, 'uuid': '18417850-2ca9-43d1-9619-ae16bfb0f655'}, 'host5': {'total_capacity_gb': 'infinite', 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'timestamp': UTC_NOW, 'uuid': 'f838f35c-4035-464f-9792-ce60e390c13d'}, } SERVICE_STATES_WITH_POOLS = { 'host1@BackendA': { 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824', 'replication_enabled': False, 'driver_version': '1.0.0', 'volume_backend_name': 'BackendA', 'pools': [ { 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 15, 'pool_name': 'openstack_iscsi_1', }, { 'total_capacity_gb': 2048, 'free_capacity_gb': 1008, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 15, 'pool_name': 'openstack_iscsi_2', }, ], 'storage_protocol': 'iSCSI', 'timestamp': UTC_NOW, }, 'host1@BackendB': { 'replication_enabled': True, 'driver_version': '1.5.0', 'volume_backend_name': 'BackendB', 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e', 'pools': [ { 'total_capacity_gb': 2048, 'free_capacity_gb': 300, 'allocated_capacity_gb': 1748, 'provisioned_capacity_gb': 1748, 'max_over_subscription_ratio': '1.5', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 10, 'pool_name': 'openstack_nfs_1', }, { 'total_capacity_gb': 512, 'free_capacity_gb': 256, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 10, 'pool_name': 'openstack_nfs_2', }, ], 'storage_protocol': 'nfs', 'timestamp': UTC_NOW, }, 'host2@BackendX': { 'replication_enabled': False, 'driver_version': '3.5.1', 'total_capacity_gb': 512, 'free_capacity_gb': 256, 'allocated_capacity_gb': 256, 'provisioned_capacity_gb': 256, 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 0, 'volume_backend_name': 'BackendX', 'storage_protocol': 'iSCSI', 'timestamp': UTC_NOW, 'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7' }, 'host3@BackendY': { 'replication_enabled': True, 'driver_version': '1.5.0', 'volume_backend_name': 'BackendY', 'uuid': '18417850-2ca9-43d1-9619-ae16bfb0f655', 'pools': [ { 'total_capacity_gb': 'infinite', 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 170, 'provisioned_capacity_gb': 170, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'QoS_support': True, 'reserved_percentage': 0, 'pool_name': 'openstack_fcp_1', }, { 'total_capacity_gb': 'infinite', 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'QoS_support': True, 'reserved_percentage': 0, 'pool_name': 'openstack_fcp_2', }, ], 'storage_protocol': 'fc', 'timestamp': UTC_NOW, } } class FakeFilterScheduler(filter_scheduler.FilterScheduler): def __init__(self, *args, **kwargs): # note: the following call will inject a host_manager into this # object whose class is determined by the config option # 'scheduler_host_manager' super(FakeFilterScheduler, self).__init__(*args, **kwargs) class FakeHostManager(host_manager.HostManager): def __init__(self, multibackend_with_pools=False): # For testing, we don't call __init__() on the super class; instead we # do explicit initialization of our fake instance. Differences noted # below. # For testing, set the service_states directly self.service_states = copy.deepcopy( SERVICE_STATES_WITH_POOLS if multibackend_with_pools else SERVICE_STATES ) self.backend_state_map: dict[str, host_manager.BackendState] = {} self.backup_service_states = {} self.filter_handler = filters.BackendFilterHandler('cinder.scheduler.' 'filters') # for testing, set the filter_classes directly instead of # calling the filter_handler to discover them self.filter_classes = helpers.ALL_FILTER_CLASSES[:] # for testing, use specific values instead of config settings self.enabled_filters = self._choose_backend_filters( helpers.DEFAULT_SCHEDULER_FILTERS[:]) self.weight_handler = importutils.import_object( 'cinder.scheduler.weights.OrderedHostWeightHandler', 'cinder.scheduler.weights') # for testing, set the weigher classes instead of discovering them self.weight_classes = helpers.ALL_WEIGHER_CLASSES[:] self._no_capabilities_backends = set() # Services without capabilities self._update_backend_state_map(cinder_context.get_admin_context()) self.service_states_last_update = {} class FakeBackendState(host_manager.BackendState): def __init__(self, host, attribute_dict): super(FakeBackendState, self).__init__(host, None) for (key, val) in attribute_dict.items(): setattr(self, key, val) class FakeNovaClient(object): class Server(object): def __init__(self, host): self.uuid = uuidutils.generate_uuid() self.host = host setattr(self, 'OS-EXT-SRV-ATTR:host', host) class ServerManager(object): def __init__(self): self._servers = [] def create(self, host): self._servers.append(FakeNovaClient.Server(host)) return self._servers[-1].uuid def get(self, server_uuid): for s in self._servers: if s.uuid == server_uuid: return s return None def list(self, detailed=True, search_opts=None): matching = list(self._servers) if search_opts: for opt, val in search_opts.items(): matching = [m for m in matching if getattr(m, opt, None) == val] return matching class ListExtResource(object): def __init__(self, ext_name): self.name = ext_name class ListExtManager(object): def __init__(self, ext_srv_attr=True): self.ext_srv_attr = ext_srv_attr def show_all(self): if self.ext_srv_attr: return [ FakeNovaClient.ListExtResource('ExtendedServerAttributes')] return [] def __init__(self, ext_srv_attr=True): self.servers = FakeNovaClient.ServerManager() self.list_extensions = FakeNovaClient.ListExtManager( ext_srv_attr=ext_srv_attr) def mock_host_manager_db_calls(mock_obj, backends_with_pools=False, disabled=None): service_states = ( SERVICE_STATES_WITH_POOLS if backends_with_pools else SERVICE_STATES ) services = [] az_map = { 'host1': 'zone1', 'host2': 'zone1', 'host3': 'zone2', 'host4': 'zone3', 'host5': 'zone3', } sid = 0 for svc, state in service_states.items(): sid += 1 services.append( { 'id': sid, 'host': svc, 'availability_zone': az_map[volume_utils.extract_host(svc, 'host')], 'topic': 'volume', 'disabled': False, 'updated_at': timeutils.utcnow(), 'uuid': state.get('uuid', uuidutils.generate_uuid()), } ) if disabled is None: mock_obj.return_value = services else: mock_obj.return_value = [service for service in services if service['disabled'] == disabled] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/helpers.py0000664000175000017500000000450400000000000022451 0ustar00zuulzuul00000000000000# Copyright 2024 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.scheduler.filters import affinity_filter from cinder.scheduler.filters import availability_zone_filter from cinder.scheduler.filters import capabilities_filter from cinder.scheduler.filters import capacity_filter from cinder.scheduler.filters import driver_filter from cinder.scheduler.filters import ignore_attempted_hosts_filter from cinder.scheduler.filters import instance_locality_filter from cinder.scheduler.filters import json_filter from cinder.scheduler.weights import capacity from cinder.scheduler.weights import chance from cinder.scheduler.weights import goodness from cinder.scheduler.weights import stochastic from cinder.scheduler.weights import volume_number ALL_FILTER_CLASSES = [ availability_zone_filter.AvailabilityZoneFilter, capabilities_filter.CapabilitiesFilter, capacity_filter.CapacityFilter, affinity_filter.DifferentBackendFilter, driver_filter.DriverFilter, instance_locality_filter.InstanceLocalityFilter, ignore_attempted_hosts_filter.IgnoreAttemptedHostsFilter, json_filter.JsonFilter, affinity_filter.SameBackendFilter, ] ALL_FILTERS = [filter_cls.__name__ for filter_cls in ALL_FILTER_CLASSES] DEFAULT_SCHEDULER_FILTER_CLASSES = [ availability_zone_filter.AvailabilityZoneFilter, capabilities_filter.CapabilitiesFilter, capacity_filter.CapacityFilter, ] DEFAULT_SCHEDULER_FILTERS = [ filter_cls.__name__ for filter_cls in DEFAULT_SCHEDULER_FILTER_CLASSES] ALL_WEIGHER_CLASSES = [ capacity.AllocatedCapacityWeigher, capacity.CapacityWeigher, chance.ChanceWeigher, goodness.GoodnessWeigher, stochastic.StochasticHostWeightHandler, volume_number.VolumeNumberWeigher, ] ALL_WEIGHERS = [weigher.__name__ for weigher in ALL_WEIGHER_CLASSES] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py0000664000175000017500000001007700000000000027367 0ustar00zuulzuul00000000000000# Copyright 2013 eBay Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Allocated Capacity Weigher. """ from unittest import mock from cinder.common import constants from cinder import context from cinder.scheduler import weights from cinder.tests.unit.scheduler import fakes from cinder.tests.unit import test from cinder.volume import volume_utils class AllocatedCapacityWeigherTestCase(test.TestCase): def setUp(self): super(AllocatedCapacityWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = weights.OrderedHostWeightHandler( 'cinder.scheduler.weights') def _get_weighed_host(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = {} return self.weight_handler.get_weighed_objects( [weights.capacity.AllocatedCapacityWeigher], hosts, weight_properties)[0] @mock.patch('cinder.db.sqlalchemy.api.service_get_all') def _get_all_backends(self, _mock_service_get_all, disabled=False): ctxt = context.get_admin_context() fakes.mock_host_manager_db_calls(_mock_service_get_all, disabled=disabled) host_states = self.host_manager.get_all_backend_states(ctxt) _mock_service_get_all.assert_called_once_with( ctxt, None, # backend_match_level topic=constants.VOLUME_TOPIC, frozen=False, disabled=disabled) return host_states def test_default_of_spreading_first(self): hostinfo_list = self._get_all_backends() # host1: allocated_capacity_gb=0, weight=0 Norm=0.0 # host2: allocated_capacity_gb=1748, weight=-1748 # host3: allocated_capacity_gb=256, weight=-256 # host4: allocated_capacity_gb=1848, weight=-1848 Norm=-1.0 # host5: allocated_capacity_gb=1548, weight=-1540 # so, host1 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(0.0, weighed_host.weight) self.assertEqual( 'host1', volume_utils.extract_host(weighed_host.obj.host)) def test_capacity_weight_multiplier1(self): self.flags(allocated_capacity_weight_multiplier=1.0) hostinfo_list = self._get_all_backends() # host1: allocated_capacity_gb=0, weight=0 Norm=0.0 # host2: allocated_capacity_gb=1748, weight=1748 # host3: allocated_capacity_gb=256, weight=256 # host4: allocated_capacity_gb=1848, weight=1848 Norm=1.0 # host5: allocated_capacity_gb=1548, weight=1540 # so, host4 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(1.0, weighed_host.weight) self.assertEqual( 'host4', volume_utils.extract_host(weighed_host.obj.host)) def test_capacity_weight_multiplier2(self): self.flags(allocated_capacity_weight_multiplier=-2.0) hostinfo_list = self._get_all_backends() # host1: allocated_capacity_gb=0, weight=0 Norm=0.0 # host2: allocated_capacity_gb=1748, weight=-3496 # host3: allocated_capacity_gb=256, weight=-512 # host4: allocated_capacity_gb=1848, weight=-3696 Norm=-2.0 # host5: allocated_capacity_gb=1548, weight=-3080 # so, host1 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(0.0, weighed_host.weight) self.assertEqual( 'host1', volume_utils.extract_host(weighed_host.obj.host)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_base_filter.py0000664000175000017500000001323700000000000024330 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from cinder.scheduler import base_filter from cinder.tests.unit import test class TestBaseFilter(test.TestCase): def setUp(self): super(TestBaseFilter, self).setUp() self.filter = base_filter.BaseFilter() def test_filter_one_is_called(self): filters = [1, 2, 3, 4] filter_properties = {'x': 'y'} self.filter._filter_one = mock.Mock() self.filter._filter_one.side_effect = [False, True, True, False] calls = [mock.call(i, filter_properties) for i in filters] result = list(self.filter.filter_all(filters, filter_properties)) self.assertEqual([2, 3], result) self.filter._filter_one.assert_has_calls(calls) class FakeExtension(object): def __init__(self, plugin): self.plugin = plugin class BaseFakeFilter(base_filter.BaseFilter): pass class FakeFilter1(BaseFakeFilter): """Derives from BaseFakeFilter and has a fake entry point defined. Entry point is returned by fake ExtensionManager. Should be included in the output of all_classes. """ pass class FakeFilter2(BaseFakeFilter): """Derives from BaseFakeFilter but has no entry point. Should be not included in all_classes. """ pass class FakeFilter3(base_filter.BaseFilter): """Does not derive from BaseFakeFilter. Should not be included. """ pass class FakeFilter4(BaseFakeFilter): """Derives from BaseFakeFilter and has an entry point. Should be included. """ pass class FakeFilter5(BaseFakeFilter): """Derives from BaseFakeFilter but has no entry point. Should not be included. """ run_filter_once_per_request = True pass class FilterA(base_filter.BaseFilter): def filter_all(self, list_objs, filter_properties): # return all but the first object return list_objs[1:] class FilterB(base_filter.BaseFilter): def filter_all(self, list_objs, filter_properties): # return an empty list return None class FakeExtensionManager(list): def __init__(self, namespace): classes = [FakeFilter1, FakeFilter3, FakeFilter4] exts = map(FakeExtension, classes) super(FakeExtensionManager, self).__init__(exts) self.namespace = namespace class TestBaseFilterHandler(test.TestCase): def setUp(self): super(TestBaseFilterHandler, self).setUp() self.mock_object(base_filter.base_handler.extension, 'ExtensionManager', FakeExtensionManager) self.handler = base_filter.BaseFilterHandler(BaseFakeFilter, 'fake_filters') def test_get_all_classes(self): # In order for a FakeFilter to be returned by get_all_classes, it has # to comply with these rules: # * It must be derived from BaseFakeFilter # AND # * It must have a python entrypoint assigned (returned by # FakeExtensionManager) expected = [FakeFilter1, FakeFilter4] result = self.handler.get_all_classes() self.assertEqual(expected, result) def _get_filtered_objects(self, filter_classes, index=0): filter_objs_initial = [1, 2, 3, 4] filter_properties = {'x': 'y'} return self.handler.get_filtered_objects(filter_classes, filter_objs_initial, filter_properties, index) @mock.patch.object(FakeFilter4, 'filter_all') @mock.patch.object(FakeFilter3, 'filter_all', return_value=None) def test_get_filtered_objects_return_none(self, fake3_filter_all, fake4_filter_all): filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4] result = self._get_filtered_objects(filter_classes) self.assertIsNone(result) self.assertFalse(fake4_filter_all.called) def test_get_filtered_objects(self): filter_objs_expected = [1, 2, 3, 4] filter_classes = [FakeFilter1, FakeFilter2, FakeFilter3, FakeFilter4] result = self._get_filtered_objects(filter_classes) self.assertEqual(filter_objs_expected, result) def test_get_filtered_objects_with_filter_run_once(self): filter_objs_expected = [1, 2, 3, 4] filter_classes = [FakeFilter5] with mock.patch.object(FakeFilter5, 'filter_all', return_value=filter_objs_expected ) as fake5_filter_all: result = self._get_filtered_objects(filter_classes) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) result = self._get_filtered_objects(filter_classes, index=1) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) result = self._get_filtered_objects(filter_classes, index=2) self.assertEqual(filter_objs_expected, result) self.assertEqual(1, fake5_filter_all.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_capacity_weigher.py0000664000175000017500000004154600000000000025364 0ustar00zuulzuul00000000000000# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For Capacity Weigher.""" from datetime import datetime from unittest import mock import ddt from cinder.common import constants from cinder import context from cinder.scheduler import weights from cinder.tests.unit.scheduler import fakes from cinder.tests.unit import test from cinder.volume import volume_utils @ddt.ddt class CapacityWeigherTestCase(test.TestCase): def setUp(self): super(CapacityWeigherTestCase, self).setUp() self.host_manager = fakes.FakeHostManager() self.weight_handler = weights.OrderedHostWeightHandler( 'cinder.scheduler.weights') def _get_weighed_hosts(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = {'size': 1} return self.weight_handler.get_weighed_objects( [weights.capacity.CapacityWeigher], hosts, weight_properties) @mock.patch('cinder.db.sqlalchemy.api.service_get_all') def _get_all_backends(self, _mock_service_get_all, disabled=False): ctxt = context.get_admin_context() fakes.mock_host_manager_db_calls(_mock_service_get_all, disabled=disabled) backend_states = self.host_manager.get_all_backend_states(ctxt) _mock_service_get_all.assert_called_once_with( ctxt, None, # backend_match_level topic=constants.VOLUME_TOPIC, frozen=False, disabled=disabled) return backend_states # If thin and thin_provisioning_support are True, # use the following formula: # free = (total * host_state.max_over_subscription_ratio # - host_state.provisioned_capacity_gb # - math.floor(total * reserved)) # Otherwise, use the following formula: # free = free_space - math.floor(total * reserved) @ddt.data( {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, 'winner': 'host2'}, {'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}, 'winner': 'host1'}, {'volume_type': {'extra_specs': {}}, 'winner': 'host2'}, {'volume_type': {}, 'winner': 'host2'}, {'volume_type': None, 'winner': 'host2'}, ) @ddt.unpack def test_default_of_spreading_first(self, volume_type, winner): backend_info_list = self._get_all_backends() # Results for the 1st test # {'provisioning:type': 'thin'}: # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=1024-math.floor(1024*0.1)=922 # Norm=0.837837837838 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=2048*1.5-1748-math.floor(2048*0.1)=1120 # Norm=1.0 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=256-512*0=256 # Norm=0.292383292383 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=2048*1.0-2047-math.floor(2048*0.05)=-101 # Norm=0.0 # host5: free_capacity_gb=unknown free=-1 # Norm=0.0819000819001 # so, host2 should win: weight_properties = { 'size': 1, 'volume_type': volume_type, } weighed_host = self._get_weighed_hosts( backend_info_list, weight_properties=weight_properties)[0] self.assertEqual(1.0, weighed_host.weight) self.assertEqual(winner, volume_utils.extract_host(weighed_host.obj.host)) @ddt.data( {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, 'winner': 'host4'}, {'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}, 'winner': 'host4'}, {'volume_type': {'extra_specs': {}}, 'winner': 'host4'}, {'volume_type': {}, 'winner': 'host4'}, {'volume_type': None, 'winner': 'host4'}, ) @ddt.unpack def test_capacity_weight_multiplier1(self, volume_type, winner): self.flags(capacity_weight_multiplier=-1.0) backend_info_list = self._get_all_backends() # Results for the 1st test # {'provisioning:type': 'thin'}: # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=-(1024-math.floor(1024*0.1))=-922 # Norm=-0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=-(256-512*0)=-256 # Norm=--0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=-(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=unknown free=-float('inf') # Norm=-1.0 # so, host4 should win: weight_properties = { 'size': 1, 'volume_type': volume_type, } weighed_host = self._get_weighed_hosts( backend_info_list, weight_properties=weight_properties) weighed_host = weighed_host[0] self.assertEqual(0.0, weighed_host.weight) self.assertEqual(winner, volume_utils.extract_host(weighed_host.obj.host)) @ddt.data( {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, 'winner': 'host2'}, {'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}, 'winner': 'host1'}, {'volume_type': {'extra_specs': {}}, 'winner': 'host2'}, {'volume_type': {}, 'winner': 'host2'}, {'volume_type': None, 'winner': 'host2'}, ) @ddt.unpack def test_capacity_weight_multiplier2(self, volume_type, winner): self.flags(capacity_weight_multiplier=2.0) backend_info_list = self._get_all_backends() # Results for the 1st test # {'provisioning:type': 'thin'}: # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))*2=1844 # Norm=1.67567567568 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240 # Norm=2.0 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)*2=512 # Norm=0.584766584767 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202 # Norm=0.0 # host5: free_capacity_gb=unknown free=-2 # Norm=0.1638001638 # so, host2 should win: weight_properties = { 'size': 1, 'volume_type': volume_type, } weighed_host = self._get_weighed_hosts( backend_info_list, weight_properties=weight_properties)[0] self.assertEqual(1.0 * 2, weighed_host.weight) self.assertEqual(winner, volume_utils.extract_host(weighed_host.obj.host)) def test_capacity_weight_no_unknown_or_infinite(self): self.flags(capacity_weight_multiplier=-1.0) del self.host_manager.service_states['host5'] backend_info_list = self._get_all_backends() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm=-0.837837837838 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-1.0 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.292383292383 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # so, host4 should win: weighed_hosts = self._get_weighed_hosts(backend_info_list) best_host = weighed_hosts[0] self.assertEqual(0.0, best_host.weight) self.assertEqual('host4', volume_utils.extract_host(best_host.obj.host)) # and host2 is the worst: worst_host = weighed_hosts[-1] self.assertEqual(-1.0, worst_host.weight) self.assertEqual('host2', volume_utils.extract_host(worst_host.obj.host)) def test_capacity_weight_free_unknown(self): self.flags(capacity_weight_multiplier=-1.0) self.host_manager.service_states['host5'] = { 'total_capacity_gb': 3000, 'free_capacity_gb': 'unknown', 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'timestamp': datetime.utcnow()} backend_info_list = self._get_all_backends() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=unknown free=3000 # Norm=-1.0 # so, host4 should win: weighed_hosts = self._get_weighed_hosts(backend_info_list) best_host = weighed_hosts[0] self.assertEqual(0.0, best_host.weight) self.assertEqual('host4', volume_utils.extract_host(best_host.obj.host)) # and host5 is the worst: worst_host = weighed_hosts[-1] self.assertEqual(-1.0, worst_host.weight) self.assertEqual('host5', volume_utils.extract_host(worst_host.obj.host)) def test_capacity_weight_cap_unknown(self): self.flags(capacity_weight_multiplier=-1.0) self.host_manager.service_states['host5'] = { 'total_capacity_gb': 'unknown', 'free_capacity_gb': 3000, 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'timestamp': datetime.utcnow()} backend_info_list = self._get_all_backends() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=3000 free=unknown # Norm=-1.0 # so, host4 should win: weighed_hosts = self._get_weighed_hosts(backend_info_list) best_host = weighed_hosts[0] self.assertEqual(0.0, best_host.weight) self.assertEqual('host4', volume_utils.extract_host(best_host.obj.host)) # and host5 is the worst: worst_host = weighed_hosts[-1] self.assertEqual(-1.0, worst_host.weight) self.assertEqual('host5', volume_utils.extract_host(worst_host.obj.host)) def test_capacity_weight_free_infinite(self): self.flags(capacity_weight_multiplier=-1.0) self.host_manager.service_states['host5'] = { 'total_capacity_gb': 3000, 'free_capacity_gb': 'infinite', 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'timestamp': datetime.utcnow()} backend_info_list = self._get_all_backends() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=infinite free=3000 # Norm=-1.0 # so, host4 should win: weighed_hosts = self._get_weighed_hosts(backend_info_list) best_host = weighed_hosts[0] self.assertEqual(0.0, best_host.weight) self.assertEqual('host4', volume_utils.extract_host(best_host.obj.host)) # and host5 is the worst: worst_host = weighed_hosts[-1] self.assertEqual(-1.0, worst_host.weight) self.assertEqual('host5', volume_utils.extract_host(worst_host.obj.host)) def test_capacity_weight_cap_infinite(self): self.flags(capacity_weight_multiplier=-1.0) self.host_manager.service_states['host5'] = { 'total_capacity_gb': 'infinite', 'free_capacity_gb': 3000, 'allocated_capacity_gb': 1548, 'provisioned_capacity_gb': 1548, 'max_over_subscription_ratio': 1.0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5, 'timestamp': datetime.utcnow()} backend_info_list = self._get_all_backends() # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=3000 free=infinite # Norm=-1.0 # so, host4 should win: weighed_hosts = self._get_weighed_hosts(backend_info_list) best_host = weighed_hosts[0] self.assertEqual(0.0, best_host.weight) self.assertEqual('host4', volume_utils.extract_host(best_host.obj.host)) # and host5 is the worst: worst_host = weighed_hosts[-1] self.assertEqual(-1.0, worst_host.weight) self.assertEqual('host5', volume_utils.extract_host(worst_host.obj.host)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_chance_weigher.py0000664000175000017500000000510400000000000024776 0ustar00zuulzuul00000000000000# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Chance Weigher. """ from unittest import mock from cinder.scheduler import host_manager from cinder.scheduler.weights import chance from cinder.tests.unit.scheduler import fakes from cinder.tests.unit import test class ChanceWeigherTestCase(test.TestCase): def fake_random(self, reset=False): if reset: self.not_random_float = 0.0 else: self.not_random_float += 1.0 return self.not_random_float @mock.patch('random.random') def test_chance_weigher(self, _mock_random): # stub random.random() to verify the ChanceWeigher # is using random.random() (repeated calls to weigh should # return incrementing weights) weigher = chance.ChanceWeigher() _mock_random.side_effect = self.fake_random self.fake_random(reset=True) host_state = {'host': 'host.example.com', 'free_capacity_gb': 99999} weight = weigher._weigh_object(host_state, None) self.assertEqual(1.0, weight) weight = weigher._weigh_object(host_state, None) self.assertEqual(2.0, weight) weight = weigher._weigh_object(host_state, None) self.assertEqual(3.0, weight) def test_host_manager_choosing_chance_weigher(self): # ensure HostManager can find the ChanceWeigher hm = fakes.FakeHostManager() weighers = hm._choose_backend_weighers('ChanceWeigher') self.assertEqual(1, len(weighers)) self.assertEqual(weighers[0], chance.ChanceWeigher) def test_use_of_chance_weigher_via_host_manager(self): # ensure we don't lose any hosts when weighing with # the ChanceWeigher hm = fakes.FakeHostManager() fake_backends = [host_manager.BackendState('fake_be%s' % x, None) for x in range(1, 5)] weighed_backends = hm.get_weighed_backends(fake_backends, {}, 'ChanceWeigher') self.assertEqual(4, len(weighed_backends)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_filter_scheduler.py0000664000175000017500000006515400000000000025401 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For Filter Scheduler.""" from unittest import mock import ddt from cinder import context from cinder import exception from cinder import objects from cinder.scheduler import filter_scheduler from cinder.scheduler import host_manager from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.scheduler import fakes from cinder.tests.unit.scheduler import test_scheduler from cinder.volume import volume_utils @ddt.ddt class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): """Test case for Filter Scheduler.""" driver_cls = filter_scheduler.FilterScheduler def test_create_group_no_hosts(self): # Ensure empty hosts result in NoValidBackend exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type1', 'extra_specs': {}}} request_spec2 = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type2', 'extra_specs': {}}} request_spec_list = [request_spec, request_spec2] group_spec = {'group_type': {'name': 'GrpType'}, 'volume_properties': {'project_id': 1, 'size': 0}} self.assertRaises(exception.NoValidBackend, sched.schedule_create_group, fake_context, 'faki-id1', group_spec, request_spec_list, {}, []) @ddt.data( {'capabilities:consistent_group_snapshot_enabled': ' True'}, {'consistent_group_snapshot_enabled': ' True'} ) @mock.patch('cinder.db.service_get_all') def test_schedule_group(self, specs, _mock_service_get_all): # Make sure _schedule_group() can find host successfully. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all) request_spec = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type1', 'extra_specs': specs}} request_spec2 = {'volume_properties': {'project_id': 1, 'size': 0}, 'volume_type': {'name': 'Type2', 'extra_specs': specs}} request_spec_list = [request_spec, request_spec2] group_spec = {'group_type': {'name': 'GrpType'}, 'volume_properties': {'project_id': 1, 'size': 0}} weighed_host = sched._schedule_generic_group(fake_context, group_spec, request_spec_list, {}, []) self.assertIsNotNone(weighed_host.obj) self.assertTrue(_mock_service_get_all.called) def test_create_volume_no_hosts(self): # Ensure empty hosts/child_zones result in NoValidBackend exception. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_id': fake.VOLUME_ID} request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidBackend, sched.schedule_create_volume, fake_context, request_spec, {}) def test_create_volume_no_hosts_invalid_req(self): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') # request_spec is missing 'volume_id' request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}} request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidBackend, sched.schedule_create_volume, fake_context, request_spec, {}) def test_create_volume_no_volume_type(self): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project') # request_spec is missing 'volume_type' request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_id': fake.VOLUME_ID} request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidBackend, sched.schedule_create_volume, fake_context, request_spec, {}) @mock.patch('cinder.scheduler.host_manager.HostManager.' 'get_all_backend_states') def test_create_volume_non_admin(self, _mock_get_all_backend_states): # Test creating a volume locally using create_volume, passing # a non-admin context. DB actions should work. self.was_admin = False def fake_get(ctxt): # Make sure this is called with admin context, even though # we're using user context below. self.was_admin = ctxt.is_admin return {} sched = fakes.FakeFilterScheduler() _mock_get_all_backend_states.side_effect = fake_get fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_id': fake.VOLUME_ID} request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidBackend, sched.schedule_create_volume, fake_context, request_spec, {}) self.assertTrue(self.was_admin) @mock.patch('cinder.db.service_get_all') def test_schedule_happy_day(self, _mock_service_get_all): # Make sure there's nothing glaringly wrong with _schedule() # by doing a happy day pass through. sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(_mock_service_get_all) request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} request_spec = objects.RequestSpec.from_primitives(request_spec) weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNotNone(weighed_host.obj) self.assertTrue(_mock_service_get_all.called) @ddt.data(('host10@BackendA', True), ('host10@BackendB#openstack_nfs_1', True), ('host10', False)) @ddt.unpack @mock.patch('cinder.db.service_get_all') def test_create_volume_host_different_with_resource_backend( self, resource_backend, multibackend_with_pools, _mock_service_get_all): sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager( multibackend_with_pools=multibackend_with_pools) fakes.mock_host_manager_db_calls( _mock_service_get_all, backends_with_pools=multibackend_with_pools) fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'resource_backend': resource_backend} weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIsNone(weighed_host) @ddt.data(('host1@BackendA', True), ('host1@BackendB#openstack_nfs_1', True), ('host1', False)) @ddt.unpack @mock.patch('cinder.db.service_get_all') def test_create_volume_host_same_as_resource(self, resource_backend, multibackend_with_pools, _mock_service_get_all): # Ensure we don't clear the host whose backend is same as # requested backend (ex: create from source-volume/snapshot, # or create within a group) sched = fakes.FakeFilterScheduler() sched.host_manager = fakes.FakeHostManager( multibackend_with_pools=multibackend_with_pools) fakes.mock_host_manager_db_calls( _mock_service_get_all, backends_with_pools=multibackend_with_pools) fake_context = context.RequestContext('user', 'project') request_spec = {'volume_properties': {'project_id': 1, 'size': 1}, 'volume_type': {'name': 'LVM_iSCSI'}, 'resource_backend': resource_backend} weighed_host = sched._schedule(fake_context, request_spec, {}) self.assertIn(resource_backend, weighed_host.obj.host) def test_max_attempts(self): self.flags(scheduler_max_attempts=4) sched = fakes.FakeFilterScheduler() self.assertEqual(4, sched._max_attempts()) def test_invalid_max_attempts(self): self.flags(scheduler_max_attempts=0) self.assertRaises(exception.InvalidParameterValue, fakes.FakeFilterScheduler) def test_retry_disabled(self): # Retry info should not get populated when re-scheduling is off. self.flags(scheduler_max_attempts=1) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} request_spec = objects.RequestSpec.from_primitives(request_spec) filter_properties = {} sched._schedule(self.context, request_spec, filter_properties=filter_properties) # Should not have retry info in the populated filter properties. self.assertNotIn("retry", filter_properties) def test_retry_attempt_one(self): # Test retry logic on initial scheduling attempt. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} request_spec = objects.RequestSpec.from_primitives(request_spec) filter_properties = {} sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(1, num_attempts) def test_retry_attempt_two(self): # Test retry logic when re-scheduling. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} request_spec = objects.RequestSpec.from_primitives(request_spec) retry = dict(num_attempts=1) filter_properties = dict(retry=retry) sched._schedule(self.context, request_spec, filter_properties=filter_properties) num_attempts = filter_properties['retry']['num_attempts'] self.assertEqual(2, num_attempts) def test_retry_exceeded_max_attempts(self): # Test for necessary explosion when max retries is exceeded. self.flags(scheduler_max_attempts=2) sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} request_spec = objects.RequestSpec.from_primitives(request_spec) retry = dict(num_attempts=2) filter_properties = dict(retry=retry) self.assertRaises(exception.NoValidBackend, sched._schedule, self.context, request_spec, filter_properties=filter_properties) def test_retry_revert_consumed_capacity(self): sched = fakes.FakeFilterScheduler() request_spec = {'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 2}} request_spec = objects.RequestSpec.from_primitives(request_spec) retry = dict(num_attempts=1, backends=['fake_backend_name']) filter_properties = dict(retry=retry) with mock.patch.object( sched.host_manager, 'revert_volume_consumed_capacity') as mock_revert: sched._schedule(self.context, request_spec, filter_properties=filter_properties) mock_revert.assert_called_once_with('fake_backend_name', 2) def test_add_retry_backend(self): retry = dict(num_attempts=1, backends=[]) filter_properties = dict(retry=retry) backend = "fakehost" sched = fakes.FakeFilterScheduler() sched._add_retry_backend(filter_properties, backend) backends = filter_properties['retry']['backends'] self.assertListEqual([backend], backends) def test_post_select_populate(self): # Test addition of certain filter props after a node is selected. retry = {'backends': [], 'num_attempts': 1} filter_properties = {'retry': retry} sched = fakes.FakeFilterScheduler() backend_state = host_manager.BackendState('host', None) backend_state.total_capacity_gb = 1024 sched._post_select_populate_filter_properties(filter_properties, backend_state) self.assertEqual('host', filter_properties['retry']['backends'][0]) self.assertEqual(1024, backend_state.total_capacity_gb) def _backend_passes_filters_setup(self, mock_obj): sched = fakes.FakeFilterScheduler() fake_context = context.RequestContext('user', 'project', is_admin=True) fakes.mock_host_manager_db_calls(mock_obj) return (sched, fake_context) @ddt.data(None, {'name': 'LVM_iSCSI'}) @mock.patch('cinder.db.service_get_all') def test_backend_passes_filters_happy_day(self, volume_type, _mock_service_get_topic): """Do a successful pass through of with backend_passes_filters().""" sched, ctx = self._backend_passes_filters_setup( _mock_service_get_topic) request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': volume_type, 'volume_properties': {'project_id': 1, 'size': 1, 'multiattach': True}} request_spec = objects.RequestSpec.from_primitives(request_spec) ret_host = sched.backend_passes_filters(ctx, 'host1#lvm1', request_spec, {}) self.assertEqual('host1', volume_utils.extract_host(ret_host.host)) self.assertTrue(_mock_service_get_topic.called) @mock.patch('cinder.db.service_get_all') def test_backend_passes_filters_default_pool_happy_day( self, _mock_service_get_topic): """Do a successful pass through of with backend_passes_filters().""" sched, ctx = self._backend_passes_filters_setup( _mock_service_get_topic) request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} request_spec = objects.RequestSpec.from_primitives(request_spec) ret_host = sched.backend_passes_filters(ctx, 'host5#_pool0', request_spec, {}) self.assertEqual('host5', volume_utils.extract_host(ret_host.host)) self.assertTrue(_mock_service_get_topic.called) @mock.patch('cinder.db.service_get_all') def test_backend_passes_filters_without_pool(self, mock_service_get_all): """Do a successful pass through of with backend_passes_filters().""" sched, ctx = self._backend_passes_filters_setup(mock_service_get_all) request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1}} request_spec = objects.RequestSpec.from_primitives(request_spec) ret_host = sched.backend_passes_filters(ctx, 'host1', request_spec, {}) self.assertEqual('host1', volume_utils.extract_host(ret_host.host)) self.assertTrue(mock_service_get_all.called) @mock.patch('cinder.db.service_get_all') def test_backend_passes_filters_no_capacity(self, _mock_service_get_topic): """Fail the host due to insufficient capacity.""" sched, ctx = self._backend_passes_filters_setup( _mock_service_get_topic) request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1024}} request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidBackend, sched.backend_passes_filters, ctx, 'host1#lvm1', request_spec, {}) self.assertTrue(_mock_service_get_topic.called) @mock.patch('cinder.db.service_get_all') def test_backend_passes_filters_online_extend_support_happy_day( self, _mock_service_get_topic): """Do a successful online extend with backend_passes_filters().""" sched, ctx = self._backend_passes_filters_setup( _mock_service_get_topic) request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1, 'attach_status': 'attached'}, 'operation': 'extend_volume'} request_spec = objects.RequestSpec.from_primitives(request_spec) # host1#lvm1 has online_extend_support = True sched.backend_passes_filters(ctx, 'host1#lvm1', request_spec, {}) self.assertTrue(_mock_service_get_topic.called) @mock.patch('cinder.db.service_get_all') def test_backend_passes_filters_no_online_extend_support( self, _mock_service_get_topic): """Fail the host due to lack of online extend support.""" sched, ctx = self._backend_passes_filters_setup( _mock_service_get_topic) request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI'}, 'volume_properties': {'project_id': 1, 'size': 1, 'attach_status': 'attached'}, 'operation': 'extend_volume'} request_spec = objects.RequestSpec.from_primitives(request_spec) # host2#lvm2 has online_extend_support = False self.assertRaises(exception.NoValidBackend, sched.backend_passes_filters, ctx, 'host2#lvm2', request_spec, {}) self.assertTrue(_mock_service_get_topic.called) @mock.patch('cinder.db.service_get_all') def test_retype_policy_never_migrate_pass(self, _mock_service_get_topic): # Retype should pass if current host passes filters and # policy=never. host4 doesn't have enough space to hold an additional # 200GB, but it is already the host of this volume and should not be # counted twice. sched, ctx = self._backend_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm4'} request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4#lvm4'}} request_spec = objects.RequestSpec.from_primitives(request_spec) host_state = sched.find_retype_backend(ctx, request_spec, filter_properties={}, migration_policy='never') self.assertEqual('host4', volume_utils.extract_host(host_state.host)) @mock.patch('cinder.db.service_get_all') def test_retype_with_pool_policy_never_migrate_pass( self, _mock_service_get_topic): # Retype should pass if current host passes filters and # policy=never. host4 doesn't have enough space to hold an additional # 200GB, but it is already the host of this volume and should not be # counted twice. sched, ctx = self._backend_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm3'} request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host3#lvm3'}} request_spec = objects.RequestSpec.from_primitives(request_spec) host_state = sched.find_retype_backend(ctx, request_spec, filter_properties={}, migration_policy='never') self.assertEqual('host3#lvm3', host_state.host) @mock.patch('cinder.db.service_get_all') def test_retype_policy_never_migrate_fail(self, _mock_service_get_topic): # Retype should fail if current host doesn't pass filters and # policy=never. sched, ctx = self._backend_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4'}} request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidBackend, sched.find_retype_backend, ctx, request_spec, filter_properties={}, migration_policy='never') @mock.patch('cinder.db.service_get_all') def test_retype_policy_demand_migrate_pass(self, _mock_service_get_topic): # Retype should pass if current host fails filters but another host # is suitable when policy=on-demand. sched, ctx = self._backend_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 200, 'host': 'host4'}} request_spec = objects.RequestSpec.from_primitives(request_spec) host_state = sched.find_retype_backend(ctx, request_spec, filter_properties={}, migration_policy='on-demand') self.assertEqual('host1', volume_utils.extract_host(host_state.host)) @mock.patch('cinder.db.service_get_all') def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic): # Retype should fail if current host doesn't pass filters and # no other suitable candidates exist even if policy=on-demand. sched, ctx = self._backend_passes_filters_setup( _mock_service_get_topic) extra_specs = {'volume_backend_name': 'lvm1'} request_spec = {'volume_id': fake.VOLUME_ID, 'volume_type': {'name': 'LVM_iSCSI', 'extra_specs': extra_specs}, 'volume_properties': {'project_id': 1, 'size': 2048, 'host': 'host4'}} request_spec = objects.RequestSpec.from_primitives(request_spec) self.assertRaises(exception.NoValidBackend, sched.find_retype_backend, ctx, request_spec, filter_properties={}, migration_policy='on-demand') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_goodness_weigher.py0000664000175000017500000001565500000000000025412 0ustar00zuulzuul00000000000000# Copyright (C) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Goodness Weigher. """ from cinder.scheduler.weights import goodness from cinder.tests.unit.scheduler import fakes from cinder.tests.unit import test class GoodnessWeigherTestCase(test.TestCase): def test_goodness_weigher_with_no_goodness_function(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'foo': '50' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) def test_goodness_weigher_passing_host(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '100' } }) host_state_2 = fakes.FakeBackendState('host2', { 'host': 'host2.example.com', 'capabilities': { 'goodness_function': '0' } }) host_state_3 = fakes.FakeBackendState('host3', { 'host': 'host3.example.com', 'capabilities': { 'goodness_function': '100 / 2' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(100, weight) weight = weigher._weigh_object(host_state_2, weight_properties) self.assertEqual(0, weight) weight = weigher._weigh_object(host_state_3, weight_properties) self.assertEqual(50, weight) def test_goodness_weigher_capabilities_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'foo': 50, 'goodness_function': '10 + capabilities.foo' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_extra_specs_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + extra.foo' } }) weight_properties = { 'volume_type': { 'extra_specs': { 'foo': 50 } } } weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_volume_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + volume.foo' } }) weight_properties = { 'request_spec': { 'volume_properties': { 'foo': 50 } } } weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_qos_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + qos.foo' } }) weight_properties = { 'qos_specs': { 'foo': 50 } } weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(60, weight) def test_goodness_weigher_stats_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': 'stats.free_capacity_gb > 20' }, 'free_capacity_gb': 50 }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(100, weight) def test_goodness_weigher_invalid_substitution(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '10 + stats.my_val' }, 'foo': 50 }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) def test_goodness_weigher_host_rating_out_of_bounds(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '-10' } }) host_state_2 = fakes.FakeBackendState('host2', { 'host': 'host2.example.com', 'capabilities': { 'goodness_function': '200' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) weight = weigher._weigh_object(host_state_2, weight_properties) self.assertEqual(0, weight) def test_goodness_weigher_invalid_goodness_function(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '50 / 0' } }) weight_properties = {} weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(0, weight) def test_goodness_weigher_untyped_volume(self): weigher = goodness.GoodnessWeigher() host_state = fakes.FakeBackendState('host1', { 'host': 'host.example.com', 'capabilities': { 'goodness_function': '67' } }) weight_properties = { 'volume_type': None, } weight = weigher._weigh_object(host_state, weight_properties) self.assertEqual(67, weight) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_host_filters.py0000664000175000017500000023664700000000000024572 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For Scheduler Host Filters.""" from unittest import mock import ddt from oslo_serialization import jsonutils from requests import exceptions as request_exceptions from cinder.compute import nova from cinder import context from cinder import db from cinder import exception from cinder.scheduler import filters from cinder.scheduler.filters import extra_specs_ops from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.scheduler import fakes from cinder.tests.unit.scheduler import helpers from cinder.tests.unit import test from cinder.tests.unit import utils class BackendFiltersTestCase(test.TestCase): """Test case for backend filters.""" def setUp(self): super(BackendFiltersTestCase, self).setUp() self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) # we're testing that the filters work, not that they are retrievable # so pre-populate this convenient class map self.class_map = {} for cls in helpers.ALL_FILTER_CLASSES: self.class_map[cls.__name__] = cls @ddt.ddt @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) class CapacityFilterTestCase(BackendFiltersTestCase): def setUp(self): super(CapacityFilterTestCase, self).setUp() self.json_query = jsonutils.dumps( ['and', ['>=', '$free_capacity_gb', 1024], ['>=', '$total_capacity_gb', 10 * 1024]]) def test_filter_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_passes_without_volume_id(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filter_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'updated_at': None, 'service': service}) self.assertTrue(filter_cls.backend_passes(host, filter_properties)) def test_filter_current_backend_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'vol_exists_on': 'host1', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 100, 'free_capacity_gb': 10, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 121, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 200, 'free_capacity_gb': 120, 'reserved_percentage': 20, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_fails_free_capacity_None(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_capacity_gb': None, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_with_size_0(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 0, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 1500, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_passes_infinite(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_capacity_gb': 'infinite', 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_extend_request(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'new_size': 100, 'size': 50, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_capacity_gb': 200, 'updated_at': None, 'total_capacity_gb': 500, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_extend_request_negative(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 50, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_capacity_gb': 49, 'updated_at': None, 'total_capacity_gb': 500, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_passes_unknown(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_capacity_gb': 'unknown', 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_passes_total_infinite(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_capacity_gb': 'infinite', 'total_capacity_gb': 'infinite', 'reserved_percentage': 0, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_passes_total_unknown(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_capacity_gb': 'unknown', 'total_capacity_gb': 'unknown', 'reserved_percentage': 0, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_fails_total_infinite(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 'infinite', 'reserved_percentage': 5, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_fails_total_unknown(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 'unknown', 'reserved_percentage': 5, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_fails_total_zero(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 0, 'reserved_percentage': 5, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_thin_true_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 500, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_thin_true_passes2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 2400, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 7000, 'max_over_subscription_ratio': 20, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_thin_false_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' False', 'capabilities:thick_provisioning_support': ' True', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} # If "thin_provisioning_support" is False, # "max_over_subscription_ratio" will be ignored. host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 300, 'max_over_subscription_ratio': 1.0, 'reserved_percentage': 5, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_over_subscription_less_than_1(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 200, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 0.8, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_over_subscription_equal_to_1(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 150, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 1.0, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_over_subscription_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 200, 'provisioned_capacity_gb': 700, 'max_over_subscription_ratio': 1.5, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_over_subscription_fails2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 2000, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 30, 'provisioned_capacity_gb': 9000, 'max_over_subscription_ratio': 20, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_reserved_thin_true_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 1000, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_reserved_thin_false_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' False', 'capabilities:thick_provisioning_support': ' True', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} # If "thin_provisioning_support" is False, # "max_over_subscription_ratio" will be ignored. host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 1.0, 'reserved_percentage': 5, 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_reserved_thin_thick_true_fails(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 151, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' True', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 0, 'provisioned_capacity_gb': 800, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_reserved_thin_thick_true_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' True', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 125, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_reserved_thin_true_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' False', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 80, 'provisioned_capacity_gb': 600, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_filter_reserved_thin_thick_true_fails2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' True', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 99, 'provisioned_capacity_gb': 1000, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 5, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_filter_reserved_thin_thick_true_passes2(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'capabilities:thin_provisioning_support': ' True', 'capabilities:thick_provisioning_support': ' True', 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) @ddt.data( {'volume_type': {'extra_specs': {'provisioning:type': 'thick'}}}, {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}}, {'volume_type': {'extra_specs': {}}}, {'volume_type': {}}, {'volume_type': None}, ) @ddt.unpack def test_filter_provisioning_type(self, _mock_serv_is_up, volume_type): _mock_serv_is_up.return_value = True filt_cls = self.class_map['CapacityFilter']() filter_properties = {'size': 100, 'volume_type': volume_type, 'request_spec': {'volume_id': fake.VOLUME_ID}} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'total_capacity_gb': 500, 'free_capacity_gb': 100, 'provisioned_capacity_gb': 400, 'max_over_subscription_ratio': 2.0, 'reserved_percentage': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'updated_at': None, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) class AffinityFilterTestCase(BackendFiltersTestCase): @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) def test_different_filter_passes(self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['DifferentBackendFilter']() service = {'disabled': False} host = fakes.FakeBackendState('host1:pool0', {'free_capacity_gb': '1000', 'updated_at': None, 'service': service}) volume = utils.create_volume(self.context, host='host1:pool1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': {'different_host': [vol_id], }, 'request_spec': {'volume_id': fake.VOLUME_ID}} self.assertTrue(filt_cls.backend_passes(host, filter_properties)) @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) def test_different_filter_legacy_volume_hint_passes( self, _mock_serv_is_up): _mock_serv_is_up.return_value = True filt_cls = self.class_map['DifferentBackendFilter']() service = {'disabled': False} host = fakes.FakeBackendState('host1:pool0', {'free_capacity_gb': '1000', 'updated_at': None, 'service': service}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': {'different_host': [vol_id], }, 'request_spec': {'volume_id': fake.VOLUME_ID}} self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_different_filter_non_list_fails(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeBackendState('host2', {}) volume = utils.create_volume(self.context, host='host2') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': vol_id}} self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_different_filter_fails(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeBackendState('host1', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': {'different_host': [vol_id], }, 'request_spec': {'volume_id': fake.VOLUME_ID}} self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_different_filter_handles_none(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeBackendState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': None, 'request_spec': {'volume_id': fake.VOLUME_ID}} self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_different_filter_handles_deleted_instance(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeBackendState('host1', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id db.volume_destroy(utils.get_test_admin_context(), vol_id) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id], }} self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_different_filter_fail_nonuuid_hint(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeBackendState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': "NOT-a-valid-UUID", }} self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_different_filter_handles_multiple_uuids(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeBackendState('host1#pool0', {}) volume1 = utils.create_volume(self.context, host='host1:pool1') vol_id1 = volume1.id volume2 = utils.create_volume(self.context, host='host1:pool3') vol_id2 = volume2.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id1, vol_id2], }} self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_different_filter_handles_invalid_uuids(self): filt_cls = self.class_map['DifferentBackendFilter']() host = fakes.FakeBackendState('host1', {}) volume = utils.create_volume(self.context, host='host2') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'different_host': [vol_id, "NOT-a-valid-UUID"], }} self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_same_filter_no_list_passes(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeBackendState('host1', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': vol_id}} self.assertTrue(bool(filt_cls.backend_passes(host, filter_properties))) def test_same_filter_passes(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeBackendState('host1#pool0', {}) volume = utils.create_volume(self.context, host='host1#pool0') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} self.assertTrue(bool(filt_cls.backend_passes(host, filter_properties))) def test_same_filter_legacy_vol_fails(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeBackendState('host1#pool0', {}) volume = utils.create_volume(self.context, host='host1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} result = filt_cls.backend_passes(host, filter_properties) self.assertEqual([], result.objects) def test_same_filter_fails(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeBackendState('host1#pool0', {}) volume = utils.create_volume(self.context, host='host1#pool1') vol_id = volume.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} result = filt_cls.backend_passes(host, filter_properties) self.assertEqual([], result.objects) def test_same_filter_vol_list_pass(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeBackendState('host1', {}) volume1 = utils.create_volume(self.context, host='host1') vol_id1 = volume1.id volume2 = utils.create_volume(self.context, host='host2') vol_id2 = volume2.id filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id1, vol_id2], }} self.assertTrue(bool(filt_cls.backend_passes(host, filter_properties))) def test_same_filter_handles_none(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeBackendState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': None} self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_same_filter_handles_deleted_instance(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeBackendState('host1', {}) volume = utils.create_volume(self.context, host='host2') vol_id = volume.id db.volume_destroy(utils.get_test_admin_context(), vol_id) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': [vol_id], }} result = filt_cls.backend_passes(host, filter_properties) self.assertEqual([], result.objects) def test_same_filter_fail_nonuuid_hint(self): filt_cls = self.class_map['SameBackendFilter']() host = fakes.FakeBackendState('host1', {}) filter_properties = {'context': self.context.elevated(), 'scheduler_hints': { 'same_host': "NOT-a-valid-UUID", }} self.assertFalse(filt_cls.backend_passes(host, filter_properties)) class DriverFilterTestCase(BackendFiltersTestCase): def test_passing_function(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'filter_function': '1 == 1', } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) def test_failing_function(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'filter_function': '1 == 2', } }) filter_properties = {'volume_type': {}} self.assertFalse(filt_cls.backend_passes(host1, filter_properties)) def test_no_filter_function(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'filter_function': None, } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) def test_not_implemented(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': {} }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) def test_no_volume_extra_specs(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'filter_function': '1 == 1', } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) def test_function_extra_spec_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'filter_function': 'extra.var == 1', } }) filter_properties = { 'volume_type': { 'extra_specs': { 'var': 1, } } } self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) def test_function_stats_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'total_capacity_gb': 100, 'capabilities': { 'filter_function': 'stats.total_capacity_gb < 200', } }) filter_properties = {'volume_type': {}} self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) def test_function_volume_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'filter_function': 'volume.size < 5', } }) filter_properties = { 'request_spec': { 'volume_properties': { 'size': 1 } } } self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) def test_function_qos_spec_replacement(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'filter_function': 'qos.var == 1', } }) filter_properties = { 'qos_specs': { 'var': 1 } } self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) def test_function_exception_caught(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'filter_function': '1 / 0 == 0', } }) filter_properties = {} self.assertFalse(filt_cls.backend_passes(host1, filter_properties)) def test_function_empty_qos(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'filter_function': 'qos.maxiops == 1', } }) filter_properties = { 'qos_specs': None } self.assertFalse(filt_cls.backend_passes(host1, filter_properties)) def test_capabilities(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'foo': 10, 'filter_function': 'capabilities.foo == 10', }, }) filter_properties = {} self.assertTrue(filt_cls.backend_passes(host1, filter_properties)) def test_wrong_capabilities(self): filt_cls = self.class_map['DriverFilter']() host1 = fakes.FakeBackendState( 'host1', { 'capabilities': { 'bar': 10, 'filter_function': 'capabilities.foo == 10', }, }) filter_properties = {} self.assertFalse(filt_cls.backend_passes(host1, filter_properties)) class InstanceLocalityFilterTestCase(BackendFiltersTestCase): def setUp(self): super(InstanceLocalityFilterTestCase, self).setUp() self.context.service_catalog = \ [{'type': 'compute', 'name': 'nova', 'endpoints': [{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]}, {'type': 'identity', 'name': 'keystone', 'endpoints': [{'publicURL': 'http://keystonehost:5000/v2.0'}]}] @mock.patch('novaclient.client.discover_extensions') @mock.patch('cinder.compute.nova.novaclient') def test_same_host(self, _mock_novaclient, fake_extensions): _mock_novaclient.return_value = fakes.FakeNovaClient() fake_extensions.return_value = ( fakes.FakeNovaClient().list_extensions.show_all()) filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeBackendState('host1', {}) uuid = nova.novaclient(context=self.context).servers.create('host1') filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': uuid}, 'request_spec': {'volume_id': fake.VOLUME_ID}} self.assertTrue(filt_cls.backend_passes(host, filter_properties)) @mock.patch('novaclient.client.discover_extensions') @mock.patch('cinder.compute.nova.novaclient') def test_different_host(self, _mock_novaclient, fake_extensions): _mock_novaclient.return_value = fakes.FakeNovaClient() fake_extensions.return_value = ( fakes.FakeNovaClient().list_extensions.show_all()) filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeBackendState('host1', {}) uuid = nova.novaclient(context=self.context).servers.create('host2') filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': uuid}, 'request_spec': {'volume_id': fake.VOLUME_ID}} self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_handles_none(self): filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeBackendState('host1', {}) filter_properties = {'context': self.context, 'scheduler_hints': None, 'request_spec': {'volume_id': fake.VOLUME_ID}} self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_invalid_uuid(self): filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeBackendState('host1', {}) filter_properties = {'context': self.context, 'scheduler_hints': {'local_to_instance': 'e29b11d4-not-valid-a716'}, 'request_spec': {'volume_id': fake.VOLUME_ID}} self.assertRaises(exception.InvalidUUID, filt_cls.backend_passes, host, filter_properties) @mock.patch('cinder.compute.nova.novaclient') def test_nova_down_does_not_alter_other_filters(self, _mock_novaclient): # Simulate Nova API is not available _mock_novaclient.side_effect = Exception filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeBackendState('host1', {}) filter_properties = {'context': self.context, 'size': 100, 'request_spec': {'volume_id': fake.VOLUME_ID}} self.assertTrue(filt_cls.backend_passes(host, filter_properties)) @mock.patch('cinder.compute.nova.novaclient') def test_nova_timeout(self, mock_novaclient): # Simulate a HTTP timeout mock_get = mock_novaclient.return_value.servers.get mock_get.side_effect = request_exceptions.Timeout filt_cls = self.class_map['InstanceLocalityFilter']() host = fakes.FakeBackendState('host1', {}) filter_properties = \ {'context': self.context, 'scheduler_hints': {'local_to_instance': 'e29b11d4-15ef-34a9-a716-598a6f0b5467'}, 'request_spec': {'volume_id': fake.VOLUME_ID}} self.assertRaises(exception.APITimeout, filt_cls.backend_passes, host, filter_properties) class TestFilter(filters.BaseBackendFilter): pass class TestBogusFilter(object): """Class that doesn't inherit from BaseBackendFilter.""" pass @ddt.ddt class ExtraSpecsOpsTestCase(test.TestCase): def _do_extra_specs_ops_test(self, value, req, matches): assertion = self.assertTrue if matches else self.assertFalse assertion(extra_specs_ops.match(value, req)) def test_extra_specs_fails_with_bogus_ops(self): self._do_extra_specs_ops_test( value='4', req='> 2', matches=False) @ddt.data({'value': '1', 'req': '1', 'matches': True}, {'value': '', 'req': '1', 'matches': False}, {'value': '3', 'req': '1', 'matches': False}, {'value': '222', 'req': '2', 'matches': False}) @ddt.unpack def test_extra_specs_matches_simple(self, value, req, matches): self._do_extra_specs_ops_test( value=value, req=req, matches=matches) @ddt.data({'value': '123', 'req': '= 123', 'matches': True}, {'value': '124', 'req': '= 123', 'matches': True}, {'value': '34', 'req': '= 234', 'matches': False}, {'value': '34', 'req': '=', 'matches': False}) @ddt.unpack def test_extra_specs_matches_with_op_eq(self, value, req, matches): self._do_extra_specs_ops_test( value=value, req=req, matches=matches) @ddt.data({'value': '2', 'req': '<= 10', 'matches': True}, {'value': '3', 'req': '<= 2', 'matches': False}, {'value': '3', 'req': '>= 1', 'matches': True}, {'value': '2', 'req': '>= 3', 'matches': False}) @ddt.unpack def test_extra_specs_matches_with_op_not_eq(self, value, req, matches): self._do_extra_specs_ops_test( value=value, req=req, matches=matches) @ddt.data({'value': '123', 'req': 's== 123', 'matches': True}, {'value': '1234', 'req': 's== 123', 'matches': False}, {'value': '1234', 'req': 's!= 123', 'matches': True}, {'value': '123', 'req': 's!= 123', 'matches': False}) @ddt.unpack def test_extra_specs_matches_with_op_seq(self, value, req, matches): self._do_extra_specs_ops_test( value=value, req=req, matches=matches) @ddt.data({'value': '1000', 'req': 's>= 234', 'matches': False}, {'value': '1234', 'req': 's<= 1000', 'matches': False}, {'value': '2', 'req': 's< 12', 'matches': False}, {'value': '12', 'req': 's> 2', 'matches': False}) @ddt.unpack def test_extra_specs_fails_with_op_not_seq(self, value, req, matches): self._do_extra_specs_ops_test( value=value, req=req, matches=matches) @ddt.data({'value': '12311321', 'req': ' 11', 'matches': True}, {'value': '12311321', 'req': ' 12311321', 'matches': True}, {'value': '12311321', 'req': ' 12311321 ', 'matches': True}, {'value': '12310321', 'req': ' 11', 'matches': False}, {'value': '12310321', 'req': ' 11 ', 'matches': False}) @ddt.unpack def test_extra_specs_matches_with_op_in(self, value, req, matches): self._do_extra_specs_ops_test( value=value, req=req, matches=matches) @ddt.data({'value': True, 'req': ' True', 'matches': True}, {'value': False, 'req': ' False', 'matches': True}, {'value': False, 'req': ' Nonsense', 'matches': True}, {'value': True, 'req': ' False', 'matches': False}, {'value': False, 'req': ' True', 'matches': False}) @ddt.unpack def test_extra_specs_matches_with_op_is(self, value, req, matches): self._do_extra_specs_ops_test( value=value, req=req, matches=matches) @ddt.data({'value': '12', 'req': ' 11 12', 'matches': True}, {'value': '12', 'req': ' 11 12 ', 'matches': True}, {'value': '13', 'req': ' 11 12', 'matches': False}, {'value': '13', 'req': ' 11 12 ', 'matches': False}) @ddt.unpack def test_extra_specs_matches_with_op_or(self, value, req, matches): self._do_extra_specs_ops_test( value=value, req=req, matches=matches) @ddt.data({'value': None, 'req': None, 'matches': True}, {'value': 'foo', 'req': None, 'matches': False}) @ddt.unpack def test_extra_specs_matches_none_req(self, value, req, matches): self._do_extra_specs_ops_test( value=value, req=req, matches=matches) @ddt.ddt class BasicFiltersTestCase(BackendFiltersTestCase): """Test case for host filters.""" def setUp(self): super(BasicFiltersTestCase, self).setUp() self.json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024]]) def test_all_filters(self): # Double check at least a couple of known filters exist self.assertIn('JsonFilter', self.class_map) self.assertIn('CapabilitiesFilter', self.class_map) self.assertIn('AvailabilityZoneFilter', self.class_map) self.assertIn('IgnoreAttemptedHostsFilter', self.class_map) def _do_test_type_filter_extra_specs(self, ecaps, especs, passes): filt_cls = self.class_map['CapabilitiesFilter']() capabilities = {'enabled': True} capabilities.update(ecaps) service = {'disabled': False} filter_properties = {'resource_type': {'name': 'fake_type', 'extra_specs': especs}, 'request_spec': {'volume_id': fake.VOLUME_ID}} host = fakes.FakeBackendState('host1', {'free_capacity_gb': 1024, 'capabilities': capabilities, 'service': service}) assertion = self.assertTrue if passes else self.assertFalse assertion(filt_cls.backend_passes(host, filter_properties)) def test_capability_filter_passes_extra_specs_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '2'}, passes=True) def test_capability_filter_fails_extra_specs_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': '1', 'opt2': '2'}, especs={'opt1': '1', 'opt2': '222'}, passes=False) def test_capability_filter_passes_extra_specs_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': 10, 'opt2': 5}, especs={'opt1': '>= 2', 'opt2': '<= 8'}, passes=True) def test_capability_filter_fails_extra_specs_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': 10, 'opt2': 5}, especs={'opt1': '>= 2', 'opt2': '>= 8'}, passes=False) def test_capability_filter_passes_extra_specs_list_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': ['1', '2'], 'opt2': '2'}, especs={'opt1': '1', 'opt2': '2'}, passes=True) @ddt.data(' True', ' False') def test_capability_filter_passes_extra_specs_list_complex(self, opt1): self._do_test_type_filter_extra_specs( ecaps={'opt1': [True, False], 'opt2': ['1', '2']}, especs={'opt1': opt1, 'opt2': '<= 8'}, passes=True) def test_capability_filter_fails_extra_specs_list_simple(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': ['1', '2'], 'opt2': ['2']}, especs={'opt1': '3', 'opt2': '2'}, passes=False) def test_capability_filter_fails_extra_specs_list_complex(self): self._do_test_type_filter_extra_specs( ecaps={'opt1': [True, False], 'opt2': ['1', '2']}, especs={'opt1': 'fake', 'opt2': '<= 8'}, passes=False) def test_capability_filter_passes_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '>= 2'}, passes=True) def test_capability_filter_passes_fakescope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}, 'opt2': 5}, especs={'scope_lv1:opt1': '= 2', 'opt2': '>= 3'}, passes=True) def test_capability_filter_fails_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv1': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '<= 2'}, passes=False) def test_capability_filter_passes_multi_level_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'scope_lv1': {'scope_lv2': {'opt1': 10}}}}, especs={'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '>= 2'}, passes=True) def test_capability_filter_fails_unenough_level_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'scope_lv1': None}}, especs={'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': '>= 2'}, passes=False) def test_capability_filter_fails_wrong_scope_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': 10}}, especs={'capabilities:scope_lv1:opt1': '>= 2'}, passes=False) def test_capability_filter_passes_none_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': None}}, especs={'capabilities:scope_lv0:opt1': None}, passes=True) def test_capability_filter_fails_none_extra_specs(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': 10}}, especs={'capabilities:scope_lv0:opt1': None}, passes=False) def test_capability_filter_fails_none_caps(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': None}}, especs={'capabilities:scope_lv0:opt1': 'foo'}, passes=False) def test_capability_filter_passes_multi_level_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={ 'scope_lv0': { 'scope_lv1': { 'scope_lv2': { 'opt1': [True, False], }, }, }, }, especs={ 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': ' True', }, passes=True) def test_capability_filter_fails_multi_level_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={ 'scope_lv0': { 'scope_lv1': { 'scope_lv2': { 'opt1': [True, False], 'opt2': ['1', '2'], }, }, }, }, especs={ 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt1': ' True', 'capabilities:scope_lv0:scope_lv1:scope_lv2:opt2': '3', }, passes=False) def test_capability_filter_fails_wrong_scope_extra_specs_list(self): self._do_test_type_filter_extra_specs( ecaps={'scope_lv0': {'opt1': [True, False]}}, especs={'capabilities:scope_lv1:opt1': ' True'}, passes=False) def test_json_filter_passes(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}, 'request_spec': {'volume_id': fake.VOLUME_ID}} capabilities = {'enabled': True} host = fakes.FakeBackendState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_json_filter_passes_with_no_query(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'request_spec': {'volume_id': fake.VOLUME_ID}} capabilities = {'enabled': True} host = fakes.FakeBackendState('host1', {'free_ram_mb': 0, 'free_disk_mb': 0, 'capabilities': capabilities}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_json_filter_fails_on_memory(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}, 'request_spec': {'volume_id': fake.VOLUME_ID}} capabilities = {'enabled': True} host = fakes.FakeBackendState('host1', {'free_ram_mb': 1023, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_json_filter_fails_on_disk(self): filt_cls = self.class_map['JsonFilter']() filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': self.json_query}, 'request_spec': {'volume_id': fake.VOLUME_ID}} capabilities = {'enabled': True} host = fakes.FakeBackendState('host1', {'free_ram_mb': 1024, 'free_disk_mb': (200 * 1024) - 1, 'capabilities': capabilities}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_json_filter_fails_on_caps_disabled(self): filt_cls = self.class_map['JsonFilter']() json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], '$capabilities.enabled']) filter_properties = {'resource_type': {'memory_mb': 1024, 'root_gb': 200, 'ephemeral_gb': 0}, 'scheduler_hints': {'query': json_query}, 'request_spec': {'volume_id': fake.VOLUME_ID}} capabilities = {'enabled': False} host = fakes.FakeBackendState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_json_filter_fails_on_service_disabled(self): filt_cls = self.class_map['JsonFilter']() json_query = jsonutils.dumps( ['and', ['>=', '$free_ram_mb', 1024], ['>=', '$free_disk_mb', 200 * 1024], ['not', '$service.disabled']]) filter_properties = {'resource_type': {'memory_mb': 1024, 'local_gb': 200}, 'scheduler_hints': {'query': json_query}, 'request_spec': {'volume_id': fake.VOLUME_ID}} capabilities = {'enabled': True} host = fakes.FakeBackendState('host1', {'free_ram_mb': 1024, 'free_disk_mb': 200 * 1024, 'capabilities': capabilities}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_json_filter_happy_day(self): """Test json filter more thoroughly.""" filt_cls = self.class_map['JsonFilter']() raw = ['and', '$capabilities.enabled', ['=', '$capabilities.opt1', 'match'], ['or', ['and', ['<', '$free_ram_mb', 30], ['<', '$free_disk_mb', 300]], ['and', ['>', '$free_ram_mb', 30], ['>', '$free_disk_mb', 300]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, 'request_spec': {'volume_id': fake.VOLUME_ID} } # Passes capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_ram_mb': 10, 'free_disk_mb': 200, 'capabilities': capabilities, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) # Passes capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) # Fails due to capabilities being disabled capabilities = {'enabled': False, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_ram_mb': 40, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) # Fails due to being exact memory/disk we don't want capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_ram_mb': 30, 'free_disk_mb': 300, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) # Fails due to memory lower but disk higher capabilities = {'enabled': True, 'opt1': 'match'} service = {'disabled': False} host = fakes.FakeBackendState('host1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) # Fails due to capabilities 'opt1' not equal capabilities = {'enabled': True, 'opt1': 'no-match'} service = {'enabled': True} host = fakes.FakeBackendState('host1', {'free_ram_mb': 20, 'free_disk_mb': 400, 'capabilities': capabilities, 'service': service}) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_json_filter_basic_operators(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeBackendState('host1', {'capabilities': {'enabled': True}}) # (operator, arguments, expected_result) ops_to_test = [ ['=', [1, 1], True], ['=', [1, 2], False], ['<', [1, 2], True], ['<', [1, 1], False], ['<', [2, 1], False], ['>', [2, 1], True], ['>', [2, 2], False], ['>', [2, 3], False], ['<=', [1, 2], True], ['<=', [1, 1], True], ['<=', [2, 1], False], ['>=', [2, 1], True], ['>=', [2, 2], True], ['>=', [2, 3], False], ['in', [1, 1], True], ['in', [1, 1, 2, 3], True], ['in', [4, 1, 2, 3], False], ['not', [True], False], ['not', [False], True], ['or', [True, False], True], ['or', [False, False], False], ['and', [True, True], True], ['and', [False, False], False], ['and', [True, False], False], # Nested ((True or False) and (2 > 1)) == Passes ['and', [['or', True, False], ['>', 2, 1]], True]] for (op, args, expected) in ops_to_test: raw = [op] + args filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, 'request_spec': {'volume_id': fake.VOLUME_ID} } self.assertEqual(expected, filt_cls.backend_passes(host, filter_properties)) # This results in [False, True, False, True] and if any are True # then it passes... raw = ['not', True, False, True, False] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.backend_passes(host, filter_properties)) # This results in [False, False, False] and if any are True # then it passes...which this doesn't raw = ['not', True, True, True] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_json_filter_unknown_operator_raises(self): filt_cls = self.class_map['JsonFilter']() raw = ['!=', 1, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } host = fakes.FakeBackendState('host1', {'capabilities': {'enabled': True}}) self.assertRaises(KeyError, filt_cls.backend_passes, host, filter_properties) def test_json_filter_empty_filters_pass(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeBackendState('host1', {'capabilities': {'enabled': True}}) raw = [] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.backend_passes(host, filter_properties)) raw = {} filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_json_filter_invalid_num_arguments_fails(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeBackendState('host1', {'capabilities': {'enabled': True}}) raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.backend_passes(host, filter_properties)) raw = ['>', 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertFalse(filt_cls.backend_passes(host, filter_properties)) def test_json_filter_unknown_variable_ignored(self): filt_cls = self.class_map['JsonFilter']() host = fakes.FakeBackendState('host1', {'capabilities': {'enabled': True}}) raw = ['=', '$........', 1, 1] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.backend_passes(host, filter_properties)) raw = ['=', '$foo', 2, 2] filter_properties = { 'scheduler_hints': { 'query': jsonutils.dumps(raw), }, } self.assertTrue(filt_cls.backend_passes(host, filter_properties)) @staticmethod def _make_zone_request(zone, is_admin=False): ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin) return { 'context': ctxt, 'request_spec': { 'resource_properties': { 'availability_zone': zone } } } def test_availability_zone_filter_same(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = self._make_zone_request('nova') host = fakes.FakeBackendState('host1', {'service': service}) self.assertTrue(filt_cls.backend_passes(host, request)) def test_availability_zone_filter_with_AZs(self): filt_cls = self.class_map['AvailabilityZoneFilter']() ctxt = context.RequestContext('fake', 'fake', is_admin=False) request = { 'context': ctxt, 'request_spec': {'availability_zones': ['nova1', 'nova2']} } host1 = fakes.FakeBackendState( 'host1', {'service': {'availability_zone': 'nova1'}}) host2 = fakes.FakeBackendState( 'host2', {'service': {'availability_zone': 'nova2'}}) host3 = fakes.FakeBackendState( 'host3', {'service': {'availability_zone': 'nova3'}}) self.assertTrue(filt_cls.backend_passes(host1, request)) self.assertTrue(filt_cls.backend_passes(host2, request)) self.assertFalse(filt_cls.backend_passes(host3, request)) def test_availability_zone_filter_different(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = self._make_zone_request('bad') host = fakes.FakeBackendState('host1', {'service': service}) self.assertFalse(filt_cls.backend_passes(host, request)) def test_availability_zone_filter_empty(self): filt_cls = self.class_map['AvailabilityZoneFilter']() service = {'availability_zone': 'nova'} request = {} host = fakes.FakeBackendState('host1', {'service': service}) self.assertTrue(filt_cls.backend_passes(host, request)) def test_ignore_attempted_hosts_filter_disabled(self): # Test case where re-scheduling is disabled. filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() host = fakes.FakeBackendState('host1', {}) filter_properties = {} self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_ignore_attempted_hosts_filter_pass(self): # Node not previously tried. filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() host = fakes.FakeBackendState('host1', {}) attempted = dict(num_attempts=2, hosts=['host2']) filter_properties = dict(retry=attempted) self.assertTrue(filt_cls.backend_passes(host, filter_properties)) def test_ignore_attempted_hosts_filter_fail(self): # Node was already tried. filt_cls = self.class_map['IgnoreAttemptedHostsFilter']() host = fakes.FakeBackendState('host1', {}) attempted = dict(num_attempts=2, backends=['host1']) filter_properties = dict(retry=attempted) self.assertFalse(filt_cls.backend_passes(host, filter_properties)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_host_manager.py0000664000175000017500000021126300000000000024517 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For HostManager.""" from datetime import datetime from datetime import timedelta from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils from cinder.common import constants from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.scheduler import filters from cinder.scheduler import host_manager from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.objects import test_service from cinder.tests.unit.scheduler import helpers from cinder.tests.unit import test CONF = cfg.CONF class FakeFilterClass1(filters.BaseBackendFilter): def backend_passes(self, host_state, filter_properties): pass class FakeFilterClass2(filters.BaseBackendFilter): def backend_passes(self, host_state, filter_properties): pass class FakeFilterClass3(filters.BaseHostFilter): def host_passes(self, host_state, filter_properties): return host_state.get('volume_backend_name') == \ filter_properties.get('volume_type')['volume_backend_name'] @ddt.ddt class HostManagerTestCase(test.TestCase): """Test case for HostManager class.""" def setUp(self): super(HostManagerTestCase, self).setUp() with mock.patch('cinder.scheduler.filters.BackendFilterHandler.' 'get_all_classes', return_value=helpers.ALL_FILTER_CLASSES[:]): self.host_manager = host_manager.HostManager() self.fake_backends = [host_manager.BackendState('fake_be%s' % x, None) for x in range(1, 5)] # For a second scheduler service. with mock.patch('cinder.scheduler.filters.BackendFilterHandler.' 'get_all_classes', return_value=helpers.ALL_FILTER_CLASSES[:]): self.host_manager_1 = host_manager.HostManager() @mock.patch( 'cinder.scheduler.filters.BackendFilterHandler.get_all_classes') def test_initialize_with_default_filters(self, mock_get_all_classes): self.flags(scheduler_default_filters= 'FakeFilterClass1,FakeFilterClass2') mock_get_all_classes.return_value = [ FakeFilterClass1, FakeFilterClass2, FakeFilterClass3] h_manager = host_manager.HostManager() self.assertListEqual([FakeFilterClass1, FakeFilterClass2], h_manager.enabled_filters) def test_choose_backend_filters_not_found(self): self.host_manager.filter_classes = [FakeFilterClass1, FakeFilterClass2] self.assertRaises(exception.SchedulerHostFilterNotFound, self.host_manager._choose_backend_filters, 'FakeFilterClass3') def test_choose_backend_filters(self): self.host_manager.filter_classes = [FakeFilterClass1, FakeFilterClass2] # Test 'volume' returns 1 correct function filter_classes = self.host_manager._choose_backend_filters( 'FakeFilterClass2') self.assertEqual(1, len(filter_classes)) self.assertEqual('FakeFilterClass2', filter_classes[0].__name__) @mock.patch('cinder.scheduler.host_manager.HostManager.' '_choose_backend_filters') def test_get_filtered_backends(self, _mock_choose_backend_filters): filter_class = FakeFilterClass1 mock_func = mock.Mock() mock_func.return_value = True filter_class._filter_one = mock_func _mock_choose_backend_filters.return_value = [filter_class] fake_properties = {'moo': 1, 'cow': 2} expected = [] for fake_backend in self.fake_backends: expected.append(mock.call(fake_backend, fake_properties)) host_manager1 = host_manager.HostManager() result = host_manager1.get_filtered_backends(self.fake_backends, fake_properties) self.assertEqual(expected, mock_func.call_args_list) self.assertEqual(set(self.fake_backends), set(result)) @mock.patch( 'cinder.scheduler.host_manager.HostManager._is_just_initialized') @mock.patch('cinder.scheduler.host_manager.HostManager._get_updated_pools') @mock.patch('oslo_utils.timeutils.utcnow') def test_update_service_capabilities(self, _mock_utcnow, _mock_get_updated_pools, _mock_is_just_initialized): service_states = self.host_manager.service_states self.assertDictEqual({}, service_states) _mock_is_just_initialized.return_value = False _mock_utcnow.side_effect = [31338, 31339] _mock_get_updated_pools.return_value = [] timestamp = jsonutils.to_primitive(datetime.utcnow()) host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=timestamp) host1_old_volume_capabs = dict(free_capacity_gb=1, timestamp=timestamp) host2_volume_capabs = dict(free_capacity_gb=5432) host3_volume_capabs = dict(free_capacity_gb=6543) service_name = 'volume' # The host manager receives a deserialized timestamp timestamp = datetime.strptime(timestamp, timeutils.PERFECT_TIME_FORMAT) self.host_manager.update_service_capabilities(service_name, 'host1', host1_volume_capabs, None, timestamp) # It'll ignore older updates old_timestamp = timestamp - timedelta(hours=1) self.host_manager.update_service_capabilities(service_name, 'host1', host1_old_volume_capabs, None, old_timestamp) self.host_manager.update_service_capabilities(service_name, 'host2', host2_volume_capabs, None, None) self.host_manager.update_service_capabilities(service_name, 'host3', host3_volume_capabs, None, None) # Make sure dictionary isn't re-assigned self.assertEqual(service_states, self.host_manager.service_states) host1_volume_capabs['timestamp'] = timestamp host2_volume_capabs['timestamp'] = 31338 host3_volume_capabs['timestamp'] = 31339 expected = {'host1': host1_volume_capabs, 'host2': host2_volume_capabs, 'host3': host3_volume_capabs} self.assertDictEqual(expected, service_states) @mock.patch( 'cinder.scheduler.host_manager.HostManager._is_just_initialized') @mock.patch( 'cinder.scheduler.host_manager.HostManager.get_usage_and_notify') @mock.patch('oslo_utils.timeutils.utcnow') def test_update_and_notify_service_capabilities_case1( self, _mock_utcnow, _mock_get_usage_and_notify, _mock_is_just_initialized): _mock_utcnow.side_effect = [31337, 31338, 31339] _mock_is_just_initialized.return_value = False service_name = 'volume' capab1 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, 'free_capacity_gb': 10, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, 'reserved_percentage': 0}]} # Run 1: # capa: capa1 # S0: update_service_capabilities() # S0: notify_service_capabilities() # S1: update_service_capabilities() # # notify capab1 to ceilometer by S0 # # S0: update_service_capabilities() self.host_manager.update_service_capabilities(service_name, 'host1', capab1, None, None) self.assertDictEqual(dict(dict(timestamp=31337), **capab1), self.host_manager.service_states['host1']) # S0: notify_service_capabilities() self.host_manager.notify_service_capabilities(service_name, 'host1', capab1, None) self.assertDictEqual(dict(dict(timestamp=31337), **capab1), self.host_manager.service_states['host1']) self.assertDictEqual( dict(dict(timestamp=31338), **capab1), self.host_manager.service_states_last_update['host1']) # notify capab1 to ceilometer by S0 self.assertEqual(1, _mock_get_usage_and_notify.call_count) # S1: update_service_capabilities() self.host_manager_1.update_service_capabilities(service_name, 'host1', capab1, None, None) self.assertDictEqual(dict(dict(timestamp=31339), **capab1), self.host_manager_1.service_states['host1']) @mock.patch( 'cinder.scheduler.host_manager.HostManager._is_just_initialized') @mock.patch( 'cinder.scheduler.host_manager.HostManager.get_usage_and_notify') @mock.patch('oslo_utils.timeutils.utcnow') def test_update_and_notify_service_capabilities_case2( self, _mock_utcnow, _mock_get_usage_and_notify, _mock_is_just_initialized): _mock_utcnow.side_effect = [31340, 31341, 31342] _mock_is_just_initialized.return_value = False service_name = 'volume' capab1 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, 'free_capacity_gb': 10, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, 'reserved_percentage': 0}]} self.host_manager.service_states['host1'] = ( dict(dict(timestamp=31337), **capab1)) self.host_manager.service_states_last_update['host1'] = ( dict(dict(timestamp=31338), **capab1)) self.host_manager_1.service_states['host1'] = ( dict(dict(timestamp=31339), **capab1)) # Run 2: # capa: capa1 # S0: update_service_capabilities() # S1: update_service_capabilities() # S1: notify_service_capabilities() # # Don't notify capab1 to ceilometer. # S0: update_service_capabilities() self.host_manager.update_service_capabilities(service_name, 'host1', capab1, None, None) self.assertDictEqual(dict(dict(timestamp=31340), **capab1), self.host_manager.service_states['host1']) self.assertDictEqual( dict(dict(timestamp=31338), **capab1), self.host_manager.service_states_last_update['host1']) # S1: update_service_capabilities() self.host_manager_1.update_service_capabilities(service_name, 'host1', capab1, None, None) self.assertDictEqual(dict(dict(timestamp=31341), **capab1), self.host_manager_1.service_states['host1']) self.assertDictEqual( dict(dict(timestamp=31339), **capab1), self.host_manager_1.service_states_last_update['host1']) # S1: notify_service_capabilities() self.host_manager_1.notify_service_capabilities(service_name, 'host1', capab1, None) self.assertDictEqual(dict(dict(timestamp=31341), **capab1), self.host_manager_1.service_states['host1']) self.assertDictEqual( self.host_manager_1.service_states_last_update['host1'], dict(dict(timestamp=31339), **capab1)) # Don't notify capab1 to ceilometer. self.assertEqual(0, _mock_get_usage_and_notify.call_count) @mock.patch( 'cinder.scheduler.host_manager.HostManager.get_usage_and_notify') @mock.patch('oslo_utils.timeutils.utcnow') def test_update_and_notify_service_capabilities_case3( self, _mock_utcnow, _mock_get_usage_and_notify): _mock_utcnow.side_effect = [31343, 31344, 31345] service_name = 'volume' capab1 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, 'free_capacity_gb': 10, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, 'reserved_percentage': 0}]} self.host_manager.service_states['host1'] = ( dict(dict(timestamp=31340), **capab1)) self.host_manager.service_states_last_update['host1'] = ( dict(dict(timestamp=31338), **capab1)) self.host_manager_1.service_states['host1'] = ( dict(dict(timestamp=31341), **capab1)) self.host_manager_1.service_states_last_update['host1'] = ( dict(dict(timestamp=31339), **capab1)) # Run 3: # capa: capab1 # S0: notify_service_capabilities() # S0: update_service_capabilities() # S1: update_service_capabilities() # # Don't notify capab1 to ceilometer. # S0: notify_service_capabilities() self.host_manager.notify_service_capabilities(service_name, 'host1', capab1, None) self.assertDictEqual( dict(dict(timestamp=31338), **capab1), self.host_manager.service_states_last_update['host1']) self.assertDictEqual(dict(dict(timestamp=31340), **capab1), self.host_manager.service_states['host1']) # Don't notify capab1 to ceilometer. self.assertEqual(0, _mock_get_usage_and_notify.call_count) # S0: update_service_capabilities() self.host_manager.update_service_capabilities(service_name, 'host1', capab1, None, None) self.assertDictEqual( dict(dict(timestamp=31340), **capab1), self.host_manager.service_states_last_update['host1']) self.assertDictEqual(dict(dict(timestamp=31344), **capab1), self.host_manager.service_states['host1']) # S1: update_service_capabilities() self.host_manager_1.update_service_capabilities(service_name, 'host1', capab1, None, None) self.assertDictEqual(dict(dict(timestamp=31345), **capab1), self.host_manager_1.service_states['host1']) self.assertDictEqual( dict(dict(timestamp=31341), **capab1), self.host_manager_1.service_states_last_update['host1']) @mock.patch( 'cinder.scheduler.host_manager.HostManager.get_usage_and_notify') @mock.patch('oslo_utils.timeutils.utcnow') def test_update_and_notify_service_capabilities_case4( self, _mock_utcnow, _mock_get_usage_and_notify): _mock_utcnow.side_effect = [31346, 31347, 31348] service_name = 'volume' capab1 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, 'free_capacity_gb': 10, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, 'reserved_percentage': 0}]} self.host_manager.service_states['host1'] = ( dict(dict(timestamp=31344), **capab1)) self.host_manager.service_states_last_update['host1'] = ( dict(dict(timestamp=31340), **capab1)) self.host_manager_1.service_states['host1'] = ( dict(dict(timestamp=31345), **capab1)) self.host_manager_1.service_states_last_update['host1'] = ( dict(dict(timestamp=31341), **capab1)) capab2 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, 'free_capacity_gb': 9, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 1, 'allocated_capacity_gb': 1, 'reserved_percentage': 0}]} # Run 4: # capa: capab2 # S0: update_service_capabilities() # S1: notify_service_capabilities() # S1: update_service_capabilities() # # notify capab2 to ceilometer. # S0: update_service_capabilities() self.host_manager.update_service_capabilities(service_name, 'host1', capab2, None, None) self.assertDictEqual( dict(dict(timestamp=31340), **capab1), self.host_manager.service_states_last_update['host1']) self.assertDictEqual(dict(dict(timestamp=31346), **capab2), self.host_manager.service_states['host1']) # S1: notify_service_capabilities() self.host_manager_1.notify_service_capabilities(service_name, 'host1', capab2, None) self.assertDictEqual(dict(dict(timestamp=31345), **capab1), self.host_manager_1.service_states['host1']) self.assertDictEqual( dict(dict(timestamp=31347), **capab2), self.host_manager_1.service_states_last_update['host1']) # notify capab2 to ceilometer. self.assertLess(0, _mock_get_usage_and_notify.call_count) # S1: update_service_capabilities() self.host_manager_1.update_service_capabilities(service_name, 'host1', capab2, None, None) self.assertDictEqual(dict(dict(timestamp=31348), **capab2), self.host_manager_1.service_states['host1']) self.assertDictEqual( dict(dict(timestamp=31347), **capab2), self.host_manager_1.service_states_last_update['host1']) @mock.patch( 'cinder.scheduler.host_manager.HostManager.get_usage_and_notify') @mock.patch('oslo_utils.timeutils.utcnow') def test_update_and_notify_service_capabilities_case5( self, _mock_utcnow, _mock_get_usage_and_notify): _mock_utcnow.side_effect = [31349, 31350, 31351] service_name = 'volume' capab1 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, 'free_capacity_gb': 10, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 0, 'allocated_capacity_gb': 0, 'reserved_percentage': 0}]} capab2 = {'pools': [{ 'pool_name': 'pool1', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'total_capacity_gb': 10, 'free_capacity_gb': 9, 'max_over_subscription_ratio': '1', 'provisioned_capacity_gb': 1, 'allocated_capacity_gb': 1, 'reserved_percentage': 0}]} self.host_manager.service_states['host1'] = ( dict(dict(timestamp=31346), **capab2)) self.host_manager.service_states_last_update['host1'] = ( dict(dict(timestamp=31340), **capab1)) self.host_manager_1.service_states['host1'] = ( dict(dict(timestamp=31348), **capab2)) self.host_manager_1.service_states_last_update['host1'] = ( dict(dict(timestamp=31347), **capab2)) # Run 5: # capa: capa2 # S0: notify_service_capabilities() # S0: update_service_capabilities() # S1: update_service_capabilities() # # This is the special case not handled. # 1) capab is changed (from capab1 to capab2) # 2) S1 has already notify the capab2 in Run 4. # 3) S0 just got update_service_capabilities() in Run 4. # 4) S0 got notify_service_capabilities() immediately in next run, # here is Run 5. # S0 has no ways to know whether other scheduler (here is S1) who # has noitified the changed capab2 or not. S0 just thinks it's his # own turn to notify the changed capab2. # In this case, we have notified the same capabilities twice. # # S0: notify_service_capabilities() self.host_manager.notify_service_capabilities(service_name, 'host1', capab2, None) self.assertDictEqual( dict(dict(timestamp=31349), **capab2), self.host_manager.service_states_last_update['host1']) self.assertDictEqual(dict(dict(timestamp=31346), **capab2), self.host_manager.service_states['host1']) # S0 notify capab2 to ceilometer. self.assertLess(0, _mock_get_usage_and_notify.call_count) # S0: update_service_capabilities() self.host_manager.update_service_capabilities(service_name, 'host1', capab2, None, None) self.assertDictEqual( dict(dict(timestamp=31349), **capab2), self.host_manager.service_states_last_update['host1']) self.assertDictEqual(dict(dict(timestamp=31350), **capab2), self.host_manager.service_states['host1']) # S1: update_service_capabilities() self.host_manager_1.update_service_capabilities(service_name, 'host1', capab2, None, None) self.assertDictEqual( dict(dict(timestamp=31348), **capab2), self.host_manager_1.service_states_last_update['host1']) self.assertDictEqual(dict(dict(timestamp=31351), **capab2), self.host_manager_1.service_states['host1']) @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) @mock.patch('cinder.db.service_get_all') def test_has_all_capabilities(self, _mock_service_get_all, _mock_service_is_up): _mock_service_is_up.return_value = True services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'), dict(id=2, host='host2', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='4200b32b-0bf9-436c-86b2-0675f6ac218e'), dict(id=3, host='host3', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'), ] _mock_service_get_all.return_value = services # Create host_manager again to let db.service_get_all mock run with mock.patch('cinder.scheduler.filters.BackendFilterHandler.' 'get_all_classes', return_value=helpers.ALL_FILTER_CLASSES[:]): self.host_manager = host_manager.HostManager() self.assertFalse(self.host_manager.has_all_capabilities()) timestamp = jsonutils.to_primitive(datetime.utcnow()) host1_volume_capabs = dict(free_capacity_gb=4321) host2_volume_capabs = dict(free_capacity_gb=5432) host3_volume_capabs = dict(free_capacity_gb=6543) service_name = 'volume' self.host_manager.update_service_capabilities(service_name, 'host1', host1_volume_capabs, None, timestamp) self.assertFalse(self.host_manager.has_all_capabilities()) self.host_manager.update_service_capabilities(service_name, 'host2', host2_volume_capabs, None, timestamp) self.assertFalse(self.host_manager.has_all_capabilities()) self.host_manager.update_service_capabilities(service_name, 'host3', host3_volume_capabs, None, timestamp) self.assertTrue(self.host_manager.has_all_capabilities()) @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) @mock.patch('cinder.db.service_get_all') def test_first_receive_capabilities_case1(self, _mock_service_get_all, _mock_service_is_up): # No volume service startup self.assertFalse(self.host_manager.first_receive_capabilities()) services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='06acda71-b3b4-4f1b-8d87-db5c47e7ebd2', ) ] _mock_service_get_all.return_value = services _mock_service_is_up.return_value = True timestamp = jsonutils.to_primitive(datetime.utcnow()) host1_volume_capabs = dict(free_capacity_gb=4321) service_name = 'volume' self.host_manager.update_service_capabilities(service_name, 'host1', host1_volume_capabs, None, timestamp) self.assertTrue(self.host_manager.first_receive_capabilities()) @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) @mock.patch('cinder.db.service_get_all') def test_first_receive_capabilities_case2(self, _mock_service_get_all, _mock_service_is_up): _mock_service_is_up.return_value = True services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='36ede0e2-1b3c-41b0-9cd3-66e1f56dc959'), dict(id=2, host='host2', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='b124e8dc-bf5f-4923-802d-27153ac7fe56'), dict(id=3, host='host3', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='4d0b1c5e-ce3c-424e-b2f4-a09a0f54d328'), ] _mock_service_get_all.return_value = services # Create host_manager again to let db.service_get_all mock run with mock.patch('cinder.scheduler.filters.BackendFilterHandler.' 'get_all_classes', return_value=helpers.ALL_FILTER_CLASSES[:]): self.host_manager = host_manager.HostManager() self.assertFalse(self.host_manager.first_receive_capabilities()) timestamp = jsonutils.to_primitive(datetime.utcnow()) host1_volume_capabs = dict(free_capacity_gb=4321) host2_volume_capabs = dict(free_capacity_gb=5432) host3_volume_capabs = dict(free_capacity_gb=6543) service_name = 'volume' self.host_manager.update_service_capabilities(service_name, 'host1', host1_volume_capabs, None, timestamp) self.assertFalse(self.host_manager.first_receive_capabilities()) self.host_manager.update_service_capabilities(service_name, 'host2', host2_volume_capabs, None, timestamp) self.assertFalse(self.host_manager.first_receive_capabilities()) self.host_manager.update_service_capabilities(service_name, 'host3', host3_volume_capabs, None, timestamp) self.assertTrue(self.host_manager.first_receive_capabilities()) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) @mock.patch('oslo_utils.timeutils.utcnow') def test_update_and_get_pools(self, _mock_utcnow, _mock_service_is_up, _mock_service_get_all): """Test interaction between update and get_pools This test verifies that each time that get_pools is called it gets the latest copy of service_capabilities, which is timestamped with the current date/time. """ context = 'fake_context' dates = [datetime.fromtimestamp(400), datetime.fromtimestamp(401), datetime.fromtimestamp(402)] _mock_utcnow.side_effect = dates services = [ # This is the first call to utcnow() dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7',) ] mocked_service_states = { 'host1': dict(volume_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=dates[1], reserved_percentage=0), } _mock_service_get_all.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warn = _mock_warning host_volume_capabs = dict(free_capacity_gb=4321) service_name = 'volume' with mock.patch.dict(self.host_manager.service_states, mocked_service_states): self.host_manager.update_service_capabilities(service_name, 'host1', host_volume_capabs, None, None) res = self.host_manager.get_pools(context) self.assertEqual(1, len(res)) self.assertEqual(dates[1], res[0]['capabilities']['timestamp']) @mock.patch('cinder.objects.Service.is_up', True) def test_get_all_backend_states_cluster(self): """Test get_all_backend_states when we have clustered services. Confirm that clustered services are grouped and that only the latest of the capability reports is relevant. """ ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) cluster_name = 'cluster' db.cluster_create(ctxt, {'name': cluster_name, 'binary': constants.VOLUME_BINARY}) services = ( db.service_create(ctxt, {'host': 'clustered_host_1', 'topic': constants.VOLUME_TOPIC, 'binary': constants.VOLUME_BINARY, 'cluster_name': cluster_name, 'created_at': timeutils.utcnow()}), # Even if this service is disabled, since it belongs to an enabled # cluster, it's not really disabled. db.service_create(ctxt, {'host': 'clustered_host_2', 'topic': constants.VOLUME_TOPIC, 'binary': constants.VOLUME_BINARY, 'disabled': True, 'cluster_name': cluster_name, 'created_at': timeutils.utcnow()}), db.service_create(ctxt, {'host': 'clustered_host_3', 'topic': constants.VOLUME_TOPIC, 'binary': constants.VOLUME_BINARY, 'cluster_name': cluster_name, 'created_at': timeutils.utcnow()}), db.service_create(ctxt, {'host': 'non_clustered_host', 'topic': constants.VOLUME_TOPIC, 'binary': constants.VOLUME_BINARY, 'created_at': timeutils.utcnow()}), # This service has no capabilities db.service_create(ctxt, {'host': 'no_capabilities_host', 'topic': constants.VOLUME_TOPIC, 'binary': constants.VOLUME_BINARY, 'created_at': timeutils.utcnow()}), ) capabilities = ((1, {'free_capacity_gb': 1000}), # This is the capacity that will be selected for the # cluster because is the one with the latest timestamp. (3, {'free_capacity_gb': 2000}), (2, {'free_capacity_gb': 3000}), (1, {'free_capacity_gb': 4000})) for i in range(len(capabilities)): self.host_manager.update_service_capabilities( 'volume', services[i].host, capabilities[i][1], services[i].cluster_name, capabilities[i][0]) res = self.host_manager.get_all_backend_states(ctxt) result = {(s.cluster_name or s.host, s.free_capacity_gb) for s in res} expected = {(cluster_name + '#_pool0', 2000), ('non_clustered_host#_pool0', 4000)} self.assertSetEqual(expected, result) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) def test_get_all_backend_states(self, _mock_service_is_up, _mock_service_get_all): context = 'fake_context' timestamp = datetime.utcnow() topic = constants.VOLUME_TOPIC services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None, uuid='a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'), dict(id=2, host='host2', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None, uuid='4200b32b-0bf9-436c-86b2-0675f6ac218e'), dict(id=3, host='host3', topic='volume', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None, uuid='6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'), dict(id=4, host='host4', topic='volume', disabled=False, availability_zone='zone3', updated_at=timeutils.utcnow(), binary=None, deleted=False, created_at=None, modified_at=None, report_count=0, deleted_at=None, disabled_reason=None, uuid='18417850-2ca9-43d1-9619-ae16bfb0f655'), ] service_objs = [] for db_service in services: service_obj = objects.Service() service_objs.append(objects.Service._from_db_object(context, service_obj, db_service)) service_states = { 'host1': dict(volume_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=timestamp, reserved_percentage=0, provisioned_capacity_gb=312), 'host2': dict(volume_backend_name='BBB', total_capacity_gb=256, free_capacity_gb=100, timestamp=timestamp, reserved_percentage=0, provisioned_capacity_gb=156), 'host3': dict(volume_backend_name='CCC', total_capacity_gb=10000, free_capacity_gb=700, timestamp=timestamp, reserved_percentage=0, provisioned_capacity_gb=9300), } # First test: service.is_up is always True, host5 is disabled, # host4 has no capabilities self.host_manager.service_states = service_states _mock_service_get_all.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warning = _mock_warning # Get all states self.host_manager.get_all_backend_states(context) _mock_service_get_all.assert_called_with(context, disabled=False, frozen=False, topic=topic) # verify that Service.is_up was called for each srv expected = [mock.call() for s in service_objs] self.assertEqual(expected, _mock_service_is_up.call_args_list) # Get backend_state_map and make sure we have the first 3 hosts backend_state_map = self.host_manager.backend_state_map self.assertEqual(3, len(backend_state_map)) for i in range(3): volume_node = services[i] host = volume_node['host'] test_service.TestService._compare(self, volume_node, backend_state_map[host].service) # Second test: Now service.is_up returns False for host3 _mock_service_is_up.reset_mock() _mock_service_is_up.side_effect = [True, True, False, True] _mock_service_get_all.reset_mock() _mock_warning.reset_mock() # Get all states, make sure host 3 is reported as down self.host_manager.get_all_backend_states(context) _mock_service_get_all.assert_called_with(context, disabled=False, frozen=False, topic=topic) self.assertEqual(expected, _mock_service_is_up.call_args_list) self.assertGreater(_mock_warning.call_count, 0) # Get backend_state_map and make sure we have the first 2 hosts (host3 # is down, host4 is missing capabilities) backend_state_map = self.host_manager.backend_state_map self.assertEqual(2, len(backend_state_map)) for i in range(2): volume_node = services[i] host = volume_node['host'] test_service.TestService._compare(self, volume_node, backend_state_map[host].service) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) def test_get_pools(self, _mock_service_is_up, _mock_service_get_all): context = 'fake_context' timestamp = datetime.utcnow() services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'), dict(id=2, host='host2@back1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='4200b32b-0bf9-436c-86b2-0675f6ac218e'), dict(id=3, host='host2@back2', topic='volume', disabled=False, availability_zone='zone2', updated_at=timeutils.utcnow(), uuid='6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'), ] mocked_service_states = { 'host1': dict(volume_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=timestamp, reserved_percentage=0, provisioned_capacity_gb=312), 'host2@back1': dict(volume_backend_name='BBB', total_capacity_gb=256, free_capacity_gb=100, timestamp=timestamp, reserved_percentage=0, provisioned_capacity_gb=156), 'host2@back2': dict(volume_backend_name='CCC', total_capacity_gb=10000, free_capacity_gb=700, timestamp=timestamp, reserved_percentage=0, provisioned_capacity_gb=9300), } _mock_service_get_all.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warn = _mock_warning with mock.patch.dict(self.host_manager.service_states, mocked_service_states): res = self.host_manager.get_pools(context) # check if get_pools returns all 3 pools self.assertEqual(3, len(res)) expected = [ { 'name': 'host1#AAA', 'capabilities': { 'timestamp': timestamp, 'volume_backend_name': 'AAA', 'free_capacity_gb': 200, 'driver_version': None, 'total_capacity_gb': 512, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 312}, }, { 'name': 'host2@back1#BBB', 'capabilities': { 'timestamp': timestamp, 'volume_backend_name': 'BBB', 'free_capacity_gb': 100, 'driver_version': None, 'total_capacity_gb': 256, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 156}, }, { 'name': 'host2@back2#CCC', 'capabilities': { 'timestamp': timestamp, 'volume_backend_name': 'CCC', 'free_capacity_gb': 700, 'driver_version': None, 'total_capacity_gb': 10000, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 9300}, } ] def sort_func(data): return data['name'] self.assertEqual(len(expected), len(res)) self.assertEqual(sorted(expected, key=sort_func), sorted(res, key=sort_func)) def test_get_usage(self): host = "host1@backend1" timestamp = 40000 volume_stats1 = {'pools': [ {'pool_name': 'pool1', 'total_capacity_gb': 30.01, 'free_capacity_gb': 28.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 5}, {'pool_name': 'pool2', 'total_capacity_gb': 20.01, 'free_capacity_gb': 18.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5}]} updated_pools1 = [{'pool_name': 'pool1', 'total_capacity_gb': 30.01, 'free_capacity_gb': 28.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 5}, {'pool_name': 'pool2', 'total_capacity_gb': 20.01, 'free_capacity_gb': 18.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5}] volume_stats2 = {'pools': [ {'pool_name': 'pool1', 'total_capacity_gb': 30.01, 'free_capacity_gb': 28.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 0}, {'pool_name': 'pool2', 'total_capacity_gb': 20.01, 'free_capacity_gb': 18.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 5}]} updated_pools2 = [{'pool_name': 'pool1', 'total_capacity_gb': 30.01, 'free_capacity_gb': 28.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, 'max_over_subscription_ratio': '2.0', 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 0}] expected1 = [ {"name_to_id": 'host1@backend1#pool1', "type": "pool", "total": 30.01, "free": 28.01, "allocated": 2.0, "provisioned": 2.0, "virtual_free": 27.01, "reported_at": 40000}, {"name_to_id": 'host1@backend1#pool2', "type": "pool", "total": 20.01, "free": 18.01, "allocated": 2.0, "provisioned": 2.0, "virtual_free": 36.02, "reported_at": 40000}, {"name_to_id": 'host1@backend1', "type": "backend", "total": 50.02, "free": 46.02, "allocated": 4.0, "provisioned": 4.0, "virtual_free": 63.03, "reported_at": 40000}] expected2 = [ {"name_to_id": 'host1@backend1#pool1', "type": "pool", "total": 30.01, "free": 28.01, "allocated": 2.0, "provisioned": 2.0, "virtual_free": 58.02, "reported_at": 40000}, {"name_to_id": 'host1@backend1', "type": "backend", "total": 50.02, "free": 46.02, "allocated": 4.0, "provisioned": 4.0, "virtual_free": 94.04, "reported_at": 40000}] def sort_func(data): return data['name_to_id'] res1 = self.host_manager._get_usage(volume_stats1, updated_pools1, host, timestamp) self.assertEqual(len(expected1), len(res1)) self.assertEqual(sorted(expected1, key=sort_func), sorted(res1, key=sort_func)) res2 = self.host_manager._get_usage(volume_stats2, updated_pools2, host, timestamp) self.assertEqual(len(expected2), len(res2)) self.assertEqual(sorted(expected2, key=sort_func), sorted(res2, key=sort_func)) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) def test_get_pools_filter_name(self, _mock_service_is_up, _mock_service_get_all_by_topic): context = 'fake_context' services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'), dict(id=2, host='host2@back1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='4200b32b-0bf9-436c-86b2-0675f6ac218e') ] mocked_service_states = { 'host1': dict(volume_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=312), 'host2@back1': dict(volume_backend_name='BBB', total_capacity_gb=256, free_capacity_gb=100, timestamp=None, reserved_percentage=0, provisioned_capacity_gb=156) } _mock_service_get_all_by_topic.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warn = _mock_warning with mock.patch.dict(self.host_manager.service_states, mocked_service_states): filters = {'name': 'host1#AAA'} res = self.host_manager.get_pools(context, filters=filters) expected = [ { 'name': 'host1#AAA', 'capabilities': { 'timestamp': None, 'volume_backend_name': 'AAA', 'free_capacity_gb': 200, 'driver_version': None, 'total_capacity_gb': 512, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'provisioned_capacity_gb': 312}, } ] self.assertEqual(expected, res) @mock.patch('cinder.scheduler.host_manager.HostManager.' '_choose_backend_filters') def test_get_pools_filtered_by_volume_type(self, _mock_choose_backend_filters): context = 'fake_context' filter_class = FakeFilterClass3 _mock_choose_backend_filters.return_value = [filter_class] hosts = { 'host1': {'volume_backend_name': 'AAA', 'total_capacity_gb': 512, 'free_capacity_gb': 200, 'timestamp': None, 'reserved_percentage': 0, 'provisioned_capacity_gb': 312}, 'host2@back1': {'volume_backend_name': 'BBB', 'total_capacity_gb': 256, 'free_capacity_gb': 100, 'timestamp': None, 'reserved_percentage': 0, 'provisioned_capacity_gb': 156}} mock_warning = mock.Mock() host_manager.LOG.warn = mock_warning mock_volume_type = { 'volume_backend_name': 'AAA', 'qos_specs': 'BBB', } host_manager1 = host_manager.HostManager() res = host_manager1._filter_pools_by_volume_type(context, mock_volume_type, hosts) expected = {'host1': {'volume_backend_name': 'AAA', 'total_capacity_gb': 512, 'free_capacity_gb': 200, 'timestamp': None, 'reserved_percentage': 0, 'provisioned_capacity_gb': 312}} self.assertEqual(expected, res) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) def test_get_pools_filter_multiattach(self, _mock_service_is_up, _mock_service_get_all_by_topic): context = 'fake_context' services = [ dict(id=1, host='host1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'), dict(id=2, host='host2@back1', topic='volume', disabled=False, availability_zone='zone1', updated_at=timeutils.utcnow(), uuid='4200b32b-0bf9-436c-86b2-0675f6ac218e') ] mocked_service_states = { 'host1': dict(volume_backend_name='AAA', total_capacity_gb=512, free_capacity_gb=200, timestamp=None, reserved_percentage=0, multiattach=True), 'host2@back1': dict(volume_backend_name='BBB', total_capacity_gb=256, free_capacity_gb=100, timestamp=None, reserved_percentage=0, multiattach=False) } _mock_service_get_all_by_topic.return_value = services _mock_service_is_up.return_value = True _mock_warning = mock.Mock() host_manager.LOG.warn = _mock_warning with mock.patch.dict(self.host_manager.service_states, mocked_service_states): filters_t = {'multiattach': 'true'} filters_f = {'multiattach': False} res_t = self.host_manager.get_pools(context, filters=filters_t) res_f = self.host_manager.get_pools(context, filters=filters_f) expected_t = [ { 'name': 'host1#AAA', 'capabilities': { 'timestamp': None, 'volume_backend_name': 'AAA', 'free_capacity_gb': 200, 'driver_version': None, 'total_capacity_gb': 512, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'multiattach': True}, } ] expected_f = [ { 'name': 'host2@back1#BBB', 'capabilities': { 'timestamp': None, 'volume_backend_name': 'BBB', 'free_capacity_gb': 100, 'driver_version': None, 'total_capacity_gb': 256, 'reserved_percentage': 0, 'vendor_name': None, 'storage_protocol': None, 'multiattach': False}, } ] self.assertEqual(expected_t, res_t) self.assertEqual(expected_f, res_f) @ddt.data( (None, None, True), (None, 'value', False), ('cap', None, False), (False, 'True', False), (True, 'True', True), (True, True, True), (False, 'false', True), (1.1, '1.1', True), (0, '0', True), (1.1, '1.11', False), ('str', 'str', True), ('str1', 'str2', False), ('str', 'StR', False), ([], [], True), (['hdd', 'ssd'], ['ssd'], False), (['hdd', 'ssd'], ['ssd', 'hdd'], False), (['hdd', 'ssd'], "['hdd', 'ssd']", True), ({}, {}, True), ({'a': 'a', 'b': 'b'}, {'b': 'b', 'a': 'a'}, True), ({'a': 'a', 'b': 'b'}, {'b': 'b'}, False), ({'a': 'a'}, "{'a': 'a'}", True), ) @ddt.unpack def test_equal_after_convert(self, cap, value, ret_value): self.assertEqual(ret_value, self.host_manager._equal_after_convert(cap, value)) class BackendStateTestCase(test.TestCase): """Test case for BackendState class.""" def test_update_from_volume_capability_nopool(self): fake_backend = host_manager.BackendState('be1', None) self.assertIsNone(fake_backend.free_capacity_gb) volume_capability = {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'provisioned_capacity_gb': 512, 'reserved_percentage': 0, 'timestamp': None} fake_backend.update_from_volume_capability(volume_capability) # Backend level stats remain uninitialized self.assertEqual(0, fake_backend.total_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb) # Pool stats has been updated self.assertEqual(1024, fake_backend.pools['_pool0'].total_capacity_gb) self.assertEqual(512, fake_backend.pools['_pool0'].free_capacity_gb) self.assertEqual(512, fake_backend.pools['_pool0'].provisioned_capacity_gb) # Test update for existing host state volume_capability.update(dict(total_capacity_gb=1000)) fake_backend.update_from_volume_capability(volume_capability) self.assertEqual(1000, fake_backend.pools['_pool0'].total_capacity_gb) # Test update for existing host state with different backend name volume_capability.update(dict(volume_backend_name='magic')) fake_backend.update_from_volume_capability(volume_capability) self.assertEqual(1000, fake_backend.pools['magic'].total_capacity_gb) self.assertEqual(512, fake_backend.pools['magic'].free_capacity_gb) self.assertEqual(512, fake_backend.pools['magic'].provisioned_capacity_gb) # 'pool0' becomes nonactive pool, and is deleted self.assertRaises(KeyError, lambda: fake_backend.pools['pool0']) def test_update_from_volume_capability_with_pools(self): fake_backend = host_manager.BackendState('host1', None) self.assertIsNone(fake_backend.free_capacity_gb) capability = { 'volume_backend_name': 'Local iSCSI', 'vendor_name': 'OpenStack', 'driver_version': '1.0.1', 'storage_protocol': 'iSCSI', 'pools': [ {'pool_name': '1st pool', 'total_capacity_gb': 500, 'free_capacity_gb': 230, 'allocated_capacity_gb': 270, 'provisioned_capacity_gb': 270, 'QoS_support': 'False', 'reserved_percentage': 0, 'dying_disks': 100, 'super_hero_1': 'spider-man', 'super_hero_2': 'flash', 'super_hero_3': 'neoncat', }, {'pool_name': '2nd pool', 'total_capacity_gb': 1024, 'free_capacity_gb': 1024, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'QoS_support': 'False', 'reserved_percentage': 0, 'dying_disks': 200, 'super_hero_1': 'superman', 'super_hero_2': 'Hulk', } ], 'timestamp': None, } fake_backend.update_from_volume_capability(capability) self.assertEqual('Local iSCSI', fake_backend.volume_backend_name) # Storage protocol is changed to include its variants self.assertEqual(['iSCSI', 'iscsi'], fake_backend.storage_protocol) self.assertEqual('OpenStack', fake_backend.vendor_name) self.assertEqual('1.0.1', fake_backend.driver_version) # Backend level stats remain uninitialized self.assertEqual(0, fake_backend.total_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb) # Pool stats has been updated self.assertEqual(2, len(fake_backend.pools)) self.assertEqual(500, fake_backend.pools['1st pool'].total_capacity_gb) self.assertEqual(230, fake_backend.pools['1st pool'].free_capacity_gb) self.assertEqual( 270, fake_backend.pools['1st pool'].provisioned_capacity_gb) self.assertEqual( 1024, fake_backend.pools['2nd pool'].total_capacity_gb) self.assertEqual(1024, fake_backend.pools['2nd pool'].free_capacity_gb) self.assertEqual( 0, fake_backend.pools['2nd pool'].provisioned_capacity_gb) capability = { 'volume_backend_name': 'Local iSCSI', 'vendor_name': 'OpenStack', 'driver_version': '1.0.2', 'storage_protocol': 'iSCSI', 'pools': [ {'pool_name': '3rd pool', 'total_capacity_gb': 10000, 'free_capacity_gb': 10000, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'QoS_support': 'False', 'reserved_percentage': 0, }, ], 'timestamp': None, } # test update BackendState Record fake_backend.update_from_volume_capability(capability) self.assertEqual('1.0.2', fake_backend.driver_version) # Non-active pool stats has been removed self.assertEqual(1, len(fake_backend.pools)) self.assertRaises(KeyError, lambda: fake_backend.pools['1st pool']) self.assertRaises(KeyError, lambda: fake_backend.pools['2nd pool']) self.assertEqual(10000, fake_backend.pools['3rd pool'].total_capacity_gb) self.assertEqual(10000, fake_backend.pools['3rd pool'].free_capacity_gb) self.assertEqual( 0, fake_backend.pools['3rd pool'].provisioned_capacity_gb) def test_update_from_volume_infinite_capability(self): fake_backend = host_manager.BackendState('host1', None) self.assertIsNone(fake_backend.free_capacity_gb) volume_capability = {'total_capacity_gb': 'infinite', 'free_capacity_gb': 'infinite', 'reserved_percentage': 0, 'timestamp': None} fake_backend.update_from_volume_capability(volume_capability) # Backend level stats remain uninitialized self.assertEqual(0, fake_backend.total_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb) # Pool stats has been updated self.assertEqual( 'infinite', fake_backend.pools['_pool0'].total_capacity_gb) self.assertEqual( 'infinite', fake_backend.pools['_pool0'].free_capacity_gb) def test_update_from_volume_unknown_capability(self): fake_backend = host_manager.BackendState('host1', None) self.assertIsNone(fake_backend.free_capacity_gb) volume_capability = {'total_capacity_gb': 'infinite', 'free_capacity_gb': 'unknown', 'reserved_percentage': 0, 'timestamp': None} fake_backend.update_from_volume_capability(volume_capability) # Backend level stats remain uninitialized self.assertEqual(0, fake_backend.total_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb) # Pool stats has been updated self.assertEqual( 'infinite', fake_backend.pools['_pool0'].total_capacity_gb) self.assertEqual( 'unknown', fake_backend.pools['_pool0'].free_capacity_gb) def test_update_from_empty_volume_capability(self): fake_backend = host_manager.BackendState('host1', None) vol_cap = {'timestamp': None} fake_backend.update_from_volume_capability(vol_cap) self.assertEqual(0, fake_backend.total_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb) # Pool stats has been updated self.assertEqual(0, fake_backend.pools['_pool0'].total_capacity_gb) self.assertEqual(0, fake_backend.pools['_pool0'].free_capacity_gb) self.assertEqual(0, fake_backend.pools['_pool0'].provisioned_capacity_gb) def test_filter_goodness_default_capabilities(self): self.addCleanup(CONF.clear_override, 'filter_function') self.addCleanup(CONF.clear_override, 'goodness_function') CONF.set_override('filter_function', '2') CONF.set_override('goodness_function', '4') capability = { 'filter_function': CONF.filter_function, 'goodness_function': CONF.goodness_function, 'timestamp': None, 'pools': [{'pool_name': 'fake_pool'}] } fake_backend = host_manager.BackendState('host1', None) fake_backend.update_from_volume_capability(capability) pool_cap = fake_backend.pools['fake_pool'] self.assertEqual('2', pool_cap.filter_function) self.assertEqual('4', pool_cap.goodness_function) def test_append_backend_info_custom_capabilities(self): pool_cap = { 'pool_name': 'pool1', 'total_capacity_gb': 30.01, 'free_capacity_gb': 28.01, 'allocated_capacity_gb': 2.0, 'provisioned_capacity_gb': 2.0, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 5 } volume_capability = {'filter_function': 5, 'goodness_function': 10} fake_backend = host_manager.BackendState( 'host1', None, capabilities=volume_capability) fake_backend._append_backend_info(pool_cap) self.assertEqual(5, pool_cap['filter_function']) self.assertEqual(10, pool_cap['goodness_function']) class PoolStateTestCase(test.TestCase): """Test case for BackendState class.""" def test_update_from_volume_capability(self): fake_pool = host_manager.PoolState('host1', None, None, 'pool0') self.assertIsNone(fake_pool.free_capacity_gb) volume_capability = {'total_capacity_gb': 1024, 'free_capacity_gb': 512, 'reserved_percentage': 0, 'provisioned_capacity_gb': 512, 'timestamp': None, 'cap1': 'val1', 'cap2': 'val2'} fake_pool.update_from_volume_capability(volume_capability) self.assertEqual('host1#pool0', fake_pool.host) self.assertEqual('pool0', fake_pool.pool_name) self.assertEqual(1024, fake_pool.total_capacity_gb) self.assertEqual(512, fake_pool.free_capacity_gb) self.assertEqual(512, fake_pool.provisioned_capacity_gb) self.assertDictEqual(volume_capability, dict(fake_pool.capabilities)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_rpcapi.py0000664000175000017500000003417000000000000023326 0ustar00zuulzuul00000000000000 # Copyright 2012, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for cinder.scheduler.rpcapi.""" from datetime import datetime from unittest import mock import ddt from cinder import exception from cinder import objects from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.tests.unit.backup import fake_backup from cinder.tests.unit import fake_constants from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test @ddt.ddt class SchedulerRPCAPITestCase(test.RPCAPITestCase): def setUp(self): super(SchedulerRPCAPITestCase, self).setUp() self.rpcapi = scheduler_rpcapi.SchedulerAPI self.base_version = '3.0' self.volume_id = fake_constants.VOLUME_ID self.fake_volume = fake_volume.fake_volume_obj( self.context, expected_attrs=['metadata', 'admin_metadata', 'glance_metadata']) self.fake_snapshot = fake_snapshot.fake_snapshot_obj( self.context) self.fake_rs_obj = objects.RequestSpec.from_primitives({}) self.fake_rs_dict = {'volume_id': self.volume_id} self.fake_fp_dict = {'availability_zone': 'fake_az'} self.fake_backup_dict = fake_backup.fake_backup_obj(self.context) @ddt.data('3.0', '3.3') @mock.patch('oslo_messaging.RPCClient.can_send_version') def test_update_service_capabilities(self, version, can_send_version): can_send_version.side_effect = lambda x: x == version self._test_rpc_api('update_service_capabilities', rpc_method='cast', service_name='fake_name', host='fake_host', cluster_name='cluster_name', capabilities={}, fanout=True, version=version, timestamp='123') can_send_version.assert_called_once_with('3.3') @ddt.data('3.0', '3.10') @mock.patch('oslo_messaging.RPCClient.can_send_version') def test_create_volume(self, version, can_send_version): can_send_version.side_effect = lambda x: x == version create_worker_mock = self.mock_object(self.fake_volume, 'create_worker') self._test_rpc_api('create_volume', rpc_method='cast', volume=self.fake_volume, snapshot_id=fake_constants.SNAPSHOT_ID, image_id=fake_constants.IMAGE_ID, backup_id=fake_constants.BACKUP_ID, request_spec=self.fake_rs_obj, filter_properties=self.fake_fp_dict) create_worker_mock.assert_called_once() can_send_version.assert_called_once_with('3.10') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_create_snapshot(self, can_send_version_mock): self._test_rpc_api('create_snapshot', rpc_method='cast', volume='fake_volume', snapshot='fake_snapshot', backend='fake_backend', request_spec={'snapshot_id': self.fake_snapshot.id}, filter_properties=None) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_create_snapshot_capped(self, can_send_version_mock): self.assertRaises(exception.ServiceTooOld, self._test_rpc_api, 'create_snapshot', rpc_method='cast', volume=self.fake_volume, snapshot=self.fake_snapshot, backend='fake_backend', request_spec=self.fake_rs_obj, version='3.5') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_manage_existing_snapshot(self, can_send_version_mock): self._test_rpc_api('manage_existing_snapshot', rpc_method='cast', volume='fake_volume', snapshot='fake_snapshot', ref='fake_ref', request_spec={'snapshot_id': self.fake_snapshot.id}, filter_properties=None) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_manage_existing_snapshot_capped(self, can_send_version_mock): self.assertRaises(exception.ServiceTooOld, self._test_rpc_api, 'manage_existing_snapshot', rpc_method='cast', volume=self.fake_volume, snapshot=self.fake_snapshot, ref='fake_ref', request_spec={'snapshot_id': self.fake_snapshot.id, 'ref': 'fake_ref'}, filter_properties=None, version='3.10') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_notify_service_capabilities_backend(self, can_send_version_mock): """Test sending new backend by RPC instead of old host parameter.""" capabilities = {'host': 'fake_host', 'total': '10.01', } with mock.patch('oslo_utils.timeutils.utcnow', return_value=datetime(1970, 1, 1)): self._test_rpc_api('notify_service_capabilities', rpc_method='cast', service_name='fake_name', backend='fake_host', capabilities=capabilities, timestamp='1970-01-01T00:00:00.000000', version='3.5') @mock.patch('oslo_messaging.RPCClient.can_send_version', side_effect=(True, False)) def test_notify_service_capabilities_host(self, can_send_version_mock): """Test sending old host RPC parameter instead of backend.""" capabilities = {'host': 'fake_host', 'total': '10.01', } self._test_rpc_api('notify_service_capabilities', rpc_method='cast', service_name='fake_name', server='fake_host', expected_kwargs_diff={'host': 'fake_host'}, backend='fake_host', capabilities=capabilities, version='3.1') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_notify_service_capabilities_capped(self, can_send_version_mock): capabilities = {'host': 'fake_host', 'total': '10.01', } self.assertRaises(exception.ServiceTooOld, self._test_rpc_api, 'notify_service_capabilities', rpc_method='cast', service_name='fake_name', backend='fake_host', server='fake_host', # ignore_for_method=['host'], # ignore_for_rpc=['backend'], capabilities=capabilities, version='3.1') @mock.patch('oslo_messaging.RPCClient.can_send_version') def test_migrate_volume(self, can_send_version): create_worker_mock = self.mock_object(self.fake_volume, 'create_worker') self._test_rpc_api('migrate_volume', rpc_method='cast', backend='host', force_copy=True, request_spec='fake_request_spec', filter_properties='filter_properties', volume=self.fake_volume, version='3.3') create_worker_mock.assert_not_called() def test_retype(self): self._test_rpc_api('retype', rpc_method='cast', request_spec=self.fake_rs_dict, filter_properties=self.fake_fp_dict, volume=self.fake_volume) def test_manage_existing(self): self._test_rpc_api('manage_existing', rpc_method='cast', request_spec=self.fake_rs_dict, filter_properties=self.fake_fp_dict, volume=self.fake_volume) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) def test_extend_volume_capped(self, can_send_version_mock): self.assertRaises(exception.ServiceTooOld, self._test_rpc_api, 'extend_volume', rpc_method='cast', request_spec='fake_request_spec', filter_properties='filter_properties', volume=self.fake_volume, new_size=4, reservations=['RESERVATIONS'], version='3.0') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) def test_extend_volume(self, can_send_version_mock): create_worker_mock = self.mock_object(self.fake_volume, 'create_worker') self._test_rpc_api('extend_volume', rpc_method='cast', request_spec='fake_request_spec', filter_properties='filter_properties', volume=self.fake_volume, new_size=4, reservations=['RESERVATIONS']) create_worker_mock.assert_not_called() def test_get_pools(self): self._test_rpc_api('get_pools', rpc_method='call', filters=None, retval=[{ 'name': 'fake_pool', 'capabilities': {}, }]) def test_create_group(self): self._test_rpc_api('create_group', rpc_method='cast', group='group', group_spec=self.fake_rs_dict, request_spec_list=[self.fake_rs_dict], group_filter_properties=[self.fake_fp_dict], filter_properties_list=[self.fake_fp_dict]) @ddt.data(('work_cleanup', 'myhost', None), ('work_cleanup', 'myhost', 'mycluster'), ('do_cleanup', 'myhost', None), ('do_cleanup', 'myhost', 'mycluster')) @ddt.unpack @mock.patch('cinder.rpc.get_client') def test_cleanup(self, method, host, cluster, get_client): cleanup_request = objects.CleanupRequest(self.context, host=host, cluster_name=cluster) rpcapi = scheduler_rpcapi.SchedulerAPI() getattr(rpcapi, method)(self.context, cleanup_request) prepare = get_client.return_value.prepare prepare.assert_called_once_with( version='3.4') rpc_call = 'cast' if method == 'do_cleanup' else 'call' getattr(prepare.return_value, rpc_call).assert_called_once_with( self.context, method, cleanup_request=cleanup_request) @ddt.data('do_cleanup', 'work_cleanup') def test_cleanup_too_old(self, method): cleanup_request = objects.CleanupRequest(self.context) rpcapi = scheduler_rpcapi.SchedulerAPI() with mock.patch.object(rpcapi.client, 'can_send_version', return_value=False) as can_send_mock: self.assertRaises(exception.ServiceTooOld, getattr(rpcapi, method), self.context, cleanup_request) can_send_mock.assert_called_once_with('3.4') @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) def test_set_log_levels(self): service = objects.Service(self.context, host='host1') self._test_rpc_api('set_log_levels', rpc_method='cast', server=service.host, service=service, log_request='log_request', version='3.7') @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) def test_get_log_levels(self): service = objects.Service(self.context, host='host1') self._test_rpc_api('get_log_levels', rpc_method='call', server=service.host, service=service, log_request='log_request', version='3.7') @mock.patch('oslo_messaging.RPCClient.can_send_version') def test_create_backup(self, can_send_version): self._test_rpc_api('create_backup', rpc_method='cast', backup=self.fake_backup_dict) can_send_version.assert_called_once_with('3.12') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_scheduler.py0000664000175000017500000010713100000000000024024 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For Scheduler.""" import collections import copy from datetime import datetime from unittest import mock import ddt from oslo_config import cfg from cinder.common import constants from cinder import context from cinder import exception from cinder.message import message_field from cinder import objects from cinder.scheduler import driver from cinder.scheduler import manager from cinder.tests.unit.backup import fake_backup from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.scheduler import fakes as fake_scheduler from cinder.tests.unit import test from cinder.tests.unit import utils as tests_utils CONF = cfg.CONF @ddt.ddt class SchedulerManagerTestCase(test.TestCase): """Test case for scheduler manager.""" manager_cls = manager.SchedulerManager driver_cls = driver.Scheduler driver_cls_name = 'cinder.scheduler.driver.Scheduler' class AnException(Exception): pass def setUp(self): super(SchedulerManagerTestCase, self).setUp() self.flags(scheduler_driver=self.driver_cls_name) # the host_manager injected into the scheduler driver is controlled # via configuration self.flags(scheduler_host_manager='cinder.tests.unit.scheduler.fakes.' 'FakeHostManager') self.manager = self.manager_cls() self.manager._startup_delay = False self.context = context.get_admin_context() self.topic = 'fake_topic' self.fake_args = (1, 2, 3) self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'} def test_1_correct_init(self): # Correct scheduler driver manager = self.manager self.assertIsInstance(manager.driver, self.driver_cls) @mock.patch('cinder.scheduler.driver.Scheduler.is_first_receive') @mock.patch('eventlet.sleep') @mock.patch('cinder.volume.rpcapi.VolumeAPI.publish_service_capabilities') def test_init_host_with_rpc_delay_after_3_tries(self, publish_capabilities_mock, sleep_mock, is_first_receive_mock): self.manager._startup_delay = True is_first_receive_mock.side_effect = [False, False, True] self.manager.init_host_with_rpc() publish_capabilities_mock.assert_called_once_with(mock.ANY) calls = [mock.call(1)] * 2 sleep_mock.assert_has_calls(calls) self.assertEqual(2, sleep_mock.call_count) self.assertFalse(self.manager._startup_delay) @mock.patch('cinder.scheduler.driver.Scheduler.is_first_receive') @mock.patch('eventlet.sleep') @mock.patch('cinder.volume.rpcapi.VolumeAPI.publish_service_capabilities') @ddt.data(71, 17) def test_init_host_with_rpc_delay_uses_new_config( self, new_cfg_value, publish_capabilities_mock, sleep_mock, is_first_receive_mock): # previously used CONF.periodic_interval; see Bug #1828748 new_cfg_name = 'scheduler_driver_init_wait_time' self.addCleanup(CONF.clear_override, new_cfg_name) CONF.set_override(new_cfg_name, new_cfg_value) is_first_receive_mock.return_value = False self.manager.init_host_with_rpc() self.assertEqual(new_cfg_value, sleep_mock.call_count) @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters') @mock.patch( 'cinder.scheduler.host_manager.BackendState.consume_from_volume') @mock.patch('cinder.volume.rpcapi.VolumeAPI.manage_existing_snapshot') def test_manage_existing_snapshot(self, mock_manage_existing_snapshot, mock_consume, mock_backend_passes): volume = fake_volume.fake_volume_obj(self.context, **{'size': 1}) fake_backend = fake_scheduler.FakeBackendState('host1', {}) mock_backend_passes.return_value = fake_backend self.manager.manage_existing_snapshot(self.context, volume, 'fake_snapshot', 'fake_ref', None) mock_consume.assert_called_once_with({'size': 1}) mock_manage_existing_snapshot.assert_called_once_with( self.context, 'fake_snapshot', 'fake_ref', volume.service_topic_queue) @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-volume': '1.3'}) def test_reset(self, get_min_obj, get_min_rpc): old_version = objects.base.OBJ_VERSIONS.versions[-2] with mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-volume': old_version, 'cinder-scheduler': old_version, 'cinder-backup': old_version}): mgr = self.manager_cls() volume_rpcapi = mgr.driver.volume_rpcapi self.assertEqual('1.3', volume_rpcapi.client.version_cap) self.assertEqual(old_version, volume_rpcapi.client.serializer._base.version_cap) get_min_obj.return_value = self.latest_ovo_version mgr.reset() volume_rpcapi = mgr.driver.volume_rpcapi self.assertEqual(get_min_rpc.return_value, volume_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, volume_rpcapi.client.serializer._base.version_cap) self.assertIsNone(volume_rpcapi.client.serializer._base.manifest) @mock.patch('cinder.message.api.API.cleanup_expired_messages') def test_clean_expired_messages(self, mock_clean): self.manager._clean_expired_messages(self.context) mock_clean.assert_called_once_with(self.context) @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters') @mock.patch( 'cinder.scheduler.host_manager.BackendState.consume_from_volume') @mock.patch('cinder.volume.rpcapi.VolumeAPI.extend_volume') def test_extend_volume(self, mock_extend, mock_consume, mock_backend_passes): volume = fake_volume.fake_volume_obj(self.context, **{'size': 1}) fake_backend = fake_scheduler.FakeBackendState('host1', {}) mock_backend_passes.return_value = fake_backend self.manager.extend_volume(self.context, volume, 2, 'fake_reservation') mock_consume.assert_called_once_with({'size': 1}) mock_extend.assert_called_once_with( self.context, volume, 2, 'fake_reservation') @ddt.data({'key': 'value'}, objects.RequestSpec(volume_id=fake.VOLUME2_ID)) def test_append_operation_decorator(self, rs): @manager.append_operation_type() def _fake_schedule_method1(request_spec=None): return request_spec @manager.append_operation_type(name='_fake_schedule_method22') def _fake_schedule_method2(request_spec=None): return request_spec @manager.append_operation_type() def _fake_schedule_method3(request_spec2=None): return request_spec2 result1 = _fake_schedule_method1(request_spec=copy.deepcopy(rs)) result2 = _fake_schedule_method2(request_spec=copy.deepcopy(rs)) result3 = _fake_schedule_method3(request_spec2=copy.deepcopy(rs)) self.assertEqual('_fake_schedule_method1', result1['operation']) self.assertEqual('_fake_schedule_method22', result2['operation']) self.assertEqual(rs, result3) @ddt.data([{'key1': 'value1'}, {'key1': 'value2'}], [objects.RequestSpec(volume_id=fake.VOLUME_ID), objects.RequestSpec(volume_id=fake.VOLUME2_ID)]) def test_append_operation_decorator_with_list(self, rs_list): @manager.append_operation_type() def _fake_schedule_method(request_spec_list=None): return request_spec_list result1 = _fake_schedule_method(request_spec_list=rs_list) for rs in result1: self.assertEqual('_fake_schedule_method', rs['operation']) @ddt.data('available', 'in-use') @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters') @mock.patch( 'cinder.scheduler.host_manager.BackendState.consume_from_volume') @mock.patch('cinder.volume.rpcapi.VolumeAPI.extend_volume') @mock.patch('cinder.quota.QUOTAS.rollback') @mock.patch('cinder.message.api.API.create') def test_extend_volume_no_valid_host(self, status, mock_create, mock_rollback, mock_extend, mock_consume, mock_backend_passes): volume = fake_volume.fake_volume_obj(self.context, **{'size': 1, 'previous_status': status}) no_valid_backend = exception.NoValidBackend(reason='') mock_backend_passes.side_effect = [no_valid_backend] with mock.patch.object(self.manager, '_set_volume_state_and_notify') as mock_notify: self.manager.extend_volume(self.context, volume, 2, 'fake_reservation') mock_notify.assert_called_once_with( 'extend_volume', {'volume_state': {'status': status, 'previous_status': None}}, self.context, no_valid_backend, None) mock_rollback.assert_called_once_with( self.context, 'fake_reservation', project_id=volume.project_id) mock_consume.assert_not_called() mock_extend.assert_not_called() mock_create.assert_called_once_with( self.context, message_field.Action.EXTEND_VOLUME, resource_uuid=volume.id, exception=no_valid_backend) @mock.patch('cinder.quota.QuotaEngine.expire') def test_clean_expired_reservation(self, mock_clean): self.manager._clean_expired_reservation(self.context) mock_clean.assert_called_once_with(self.context) @mock.patch('cinder.scheduler.driver.Scheduler.' 'update_service_capabilities') def test_update_service_capabilities_empty_dict(self, _mock_update_cap): # Test no capabilities passes empty dictionary service = 'fake_service' host = 'fake_host' self.manager.update_service_capabilities(self.context, service_name=service, host=host) _mock_update_cap.assert_called_once_with(service, host, {}, None, None) @mock.patch('cinder.scheduler.driver.Scheduler.' 'update_service_capabilities') def test_update_service_capabilities_correct(self, _mock_update_cap): # Test capabilities passes correctly service = 'fake_service' host = 'fake_host' capabilities = {'fake_capability': 'fake_value'} self.manager.update_service_capabilities(self.context, service_name=service, host=host, capabilities=capabilities) _mock_update_cap.assert_called_once_with(service, host, capabilities, None, None) @mock.patch('cinder.scheduler.driver.Scheduler.' 'notify_service_capabilities') def test_notify_service_capabilities_no_timestamp(self, _mock_notify_cap): """Test old interface that receives host.""" service = 'volume' host = 'fake_host' capabilities = {'fake_capability': 'fake_value'} self.manager.notify_service_capabilities(self.context, service_name=service, host=host, capabilities=capabilities) _mock_notify_cap.assert_called_once_with(service, host, capabilities, None) @mock.patch('cinder.scheduler.driver.Scheduler.' 'notify_service_capabilities') def test_notify_service_capabilities_timestamp(self, _mock_notify_cap): """Test new interface that receives backend and timestamp.""" service = 'volume' backend = 'fake_cluster' capabilities = {'fake_capability': 'fake_value'} timestamp = '1970-01-01T00:00:00.000000' self.manager.notify_service_capabilities(self.context, service_name=service, backend=backend, capabilities=capabilities, timestamp=timestamp) _mock_notify_cap.assert_called_once_with(service, backend, capabilities, datetime(1970, 1, 1)) @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.db.volume_update') def test_create_volume_exception_puts_volume_in_error_state( self, _mock_volume_update, _mock_message_create, _mock_sched_create): # Test NoValidBackend exception behavior for create_volume. # Puts the volume in 'error' state and eats the exception. _mock_sched_create.side_effect = exception.NoValidBackend(reason="") volume = fake_volume.fake_volume_obj(self.context, use_quota=True) request_spec = {'volume_id': volume.id, 'volume': {'id': volume.id, '_name_id': None, 'metadata': {}, 'admin_metadata': {}, 'glance_metadata': {}}} request_spec_obj = objects.RequestSpec.from_primitives(request_spec) self.manager.create_volume(self.context, volume, request_spec=request_spec_obj, filter_properties={}) _mock_volume_update.assert_called_once_with(self.context, volume.id, {'status': 'error'}) _mock_sched_create.assert_called_once_with(self.context, request_spec_obj, {}) _mock_message_create.assert_called_once_with( self.context, message_field.Action.SCHEDULE_ALLOCATE_VOLUME, resource_uuid=volume.id, exception=mock.ANY) @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @mock.patch('eventlet.sleep') def test_create_volume_no_delay(self, _mock_sleep, _mock_sched_create): volume = fake_volume.fake_volume_obj(self.context) request_spec = {'volume_id': volume.id} request_spec_obj = objects.RequestSpec.from_primitives(request_spec) self.manager.create_volume(self.context, volume, request_spec=request_spec_obj, filter_properties={}) _mock_sched_create.assert_called_once_with(self.context, request_spec_obj, {}) self.assertFalse(_mock_sleep.called) @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @mock.patch('eventlet.sleep') def test_create_volume_set_worker(self, _mock_sleep, _mock_sched_create): """Make sure that the worker is created when creating a volume.""" volume = tests_utils.create_volume(self.context, status='creating') request_spec = {'volume_id': volume.id} self.manager.create_volume(self.context, volume, request_spec=request_spec, filter_properties={}) volume.set_worker.assert_called_once_with() @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @mock.patch('cinder.scheduler.driver.Scheduler.is_ready') @mock.patch('eventlet.sleep') def test_create_volume_delay_scheduled_after_3_tries(self, _mock_sleep, _mock_is_ready, _mock_sched_create): self.manager._startup_delay = True volume = fake_volume.fake_volume_obj(self.context) request_spec = {'volume_id': volume.id} request_spec_obj = objects.RequestSpec.from_primitives(request_spec) _mock_is_ready.side_effect = [False, False, True] self.manager.create_volume(self.context, volume, request_spec=request_spec_obj, filter_properties={}) _mock_sched_create.assert_called_once_with(self.context, request_spec_obj, {}) calls = [mock.call(1)] * 2 _mock_sleep.assert_has_calls(calls) self.assertEqual(2, _mock_sleep.call_count) @mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume') @mock.patch('cinder.scheduler.driver.Scheduler.is_ready') @mock.patch('eventlet.sleep') def test_create_volume_delay_scheduled_in_1_try(self, _mock_sleep, _mock_is_ready, _mock_sched_create): self.manager._startup_delay = True volume = fake_volume.fake_volume_obj(self.context) request_spec = {'volume_id': volume.id} request_spec_obj = objects.RequestSpec.from_primitives(request_spec) _mock_is_ready.return_value = True self.manager.create_volume(self.context, volume, request_spec=request_spec_obj, filter_properties={}) _mock_sched_create.assert_called_once_with(self.context, request_spec_obj, {}) self.assertFalse(_mock_sleep.called) @mock.patch('cinder.db.volume_get') @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters') @mock.patch('cinder.db.volume_update') def test_migrate_volume_exception_returns_volume_state( self, _mock_volume_update, _mock_backend_passes, _mock_volume_get): # Test NoValidBackend exception behavior for migrate_volume_to_host. # Puts the volume in 'error_migrating' state and eats the exception. fake_updates = {'migration_status': 'error'} self._test_migrate_volume_exception_returns_volume_state( _mock_volume_update, _mock_backend_passes, _mock_volume_get, 'available', fake_updates) @mock.patch('cinder.db.volume_get') @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters') @mock.patch('cinder.db.volume_update') def test_migrate_volume_exception_returns_volume_state_maintenance( self, _mock_volume_update, _mock_backend_passes, _mock_volume_get): fake_updates = {'status': 'available', 'migration_status': 'error'} self._test_migrate_volume_exception_returns_volume_state( _mock_volume_update, _mock_backend_passes, _mock_volume_get, 'maintenance', fake_updates) def _test_migrate_volume_exception_returns_volume_state( self, _mock_volume_update, _mock_backend_passes, _mock_volume_get, status, fake_updates): volume = tests_utils.create_volume(self.context, status=status, previous_status='available') fake_volume_id = volume.id request_spec = {'volume_id': fake_volume_id} _mock_backend_passes.side_effect = exception.NoValidBackend(reason="") _mock_volume_get.return_value = volume self.manager.migrate_volume_to_host(self.context, volume, 'host', True, request_spec=request_spec, filter_properties={}) _mock_volume_update.assert_called_once_with(self.context, fake_volume_id, fake_updates) _mock_backend_passes.assert_called_once_with(self.context, 'host', request_spec, {}) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.db.volume_attachment_get_all_by_volume_id') @mock.patch('cinder.quota.QUOTAS.rollback') def test_retype_volume_exception_returns_volume_state( self, quota_rollback, _mock_vol_attachment_get, _mock_vol_update): # Test NoValidBackend exception behavior for retype. # Puts the volume in original state and eats the exception. volume = tests_utils.create_volume(self.context, status='retyping', previous_status='in-use') instance_uuid = '12345678-1234-5678-1234-567812345678' volume_attach = tests_utils.attach_volume(self.context, volume.id, instance_uuid, None, '/dev/fake') _mock_vol_attachment_get.return_value = [volume_attach] reservations = mock.sentinel.reservations request_spec = {'volume_id': volume.id, 'volume_type': {'id': 3}, 'migration_policy': 'on-demand', 'quota_reservations': reservations} _mock_vol_update.return_value = {'status': 'in-use'} _mock_find_retype_backend = mock.Mock( side_effect=exception.NoValidBackend(reason="")) orig_retype = self.manager.driver.find_retype_backend self.manager.driver.find_retype_backend = _mock_find_retype_backend self.manager.retype(self.context, volume, request_spec=request_spec, filter_properties={}) _mock_find_retype_backend.assert_called_once_with(self.context, request_spec, {}, 'on-demand') quota_rollback.assert_called_once_with(self.context, reservations) _mock_vol_update.assert_called_once_with(self.context, volume.id, {'status': 'in-use'}) self.manager.driver.find_retype_host = orig_retype def test_do_cleanup(self): vol = tests_utils.create_volume(self.context, status='creating') self.manager._do_cleanup(self.context, vol) vol.refresh() self.assertEqual('error', vol.status) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI' '.determine_rpc_version_cap', mock.Mock(return_value='2.0')) def test_upgrading_cloud(self): self.assertTrue(self.manager.upgrading_cloud) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI' '.determine_rpc_version_cap') def test_upgrading_cloud_not(self, cap_mock): cap_mock.return_value = self.manager.RPC_API_VERSION self.assertFalse(self.manager.upgrading_cloud) def test_cleanup_destination_scheduler(self): service = objects.Service(id=1, host='hostname', binary='cinder-scheduler') result = self.manager._cleanup_destination(None, service) expected = self.manager.sch_api.do_cleanup, None, service.host self.assertEqual(expected, result) def test_cleanup_destination_volume(self): service = objects.Service(id=1, host='hostname', cluster_name=None, binary=constants.VOLUME_BINARY) result = self.manager._cleanup_destination(None, service) expected = self.manager.volume_api.do_cleanup, service, service.host self.assertEqual(expected, result) def test_cleanup_destination_volume_cluster_cache_hit(self): cluster = objects.Cluster(id=1, name='mycluster', binary=constants.VOLUME_BINARY) service = objects.Service(id=2, host='hostname', cluster_name=cluster.name, binary=constants.VOLUME_BINARY) cluster_cache = {'cinder-volume': {'mycluster': cluster}} result = self.manager._cleanup_destination(cluster_cache, service) expected = self.manager.volume_api.do_cleanup, cluster, cluster.name self.assertEqual(expected, result) @mock.patch('cinder.objects.Cluster.get_by_id') def test_cleanup_destination_volume_cluster_cache_miss(self, get_mock): cluster = objects.Cluster(id=1, name='mycluster', binary=constants.VOLUME_BINARY) service = objects.Service(self.context, id=2, host='hostname', cluster_name=cluster.name, binary=constants.VOLUME_BINARY) get_mock.return_value = cluster cluster_cache = collections.defaultdict(dict) result = self.manager._cleanup_destination(cluster_cache, service) expected = self.manager.volume_api.do_cleanup, cluster, cluster.name self.assertEqual(expected, result) @mock.patch('cinder.scheduler.manager.SchedulerManager.upgrading_cloud') def test_work_cleanup_upgrading(self, upgrading_mock): cleanup_request = objects.CleanupRequest(host='myhost') upgrading_mock.return_value = True self.assertRaises(exception.UnavailableDuringUpgrade, self.manager.work_cleanup, self.context, cleanup_request) @mock.patch('cinder.objects.Cluster.is_up', True) @mock.patch('cinder.objects.Service.is_up', False) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.do_cleanup') @mock.patch('cinder.volume.rpcapi.VolumeAPI.do_cleanup') @mock.patch('cinder.objects.ServiceList.get_all') def test_work_cleanup(self, get_mock, vol_clean_mock, sch_clean_mock): args = dict(service_id=1, cluster_name='cluster_name', host='host', binary=constants.VOLUME_BINARY, is_up=False, disabled=True, resource_id=fake.VOLUME_ID, resource_type='Volume') cluster = objects.Cluster(id=1, name=args['cluster_name'], binary=constants.VOLUME_BINARY) services = [objects.Service(self.context, id=2, host='hostname', cluster_name=cluster.name, binary=constants.VOLUME_BINARY, cluster=cluster), objects.Service(self.context, id=3, host='hostname', cluster_name=None, binary=constants.SCHEDULER_BINARY), objects.Service(self.context, id=4, host='hostname', cluster_name=None, binary=constants.VOLUME_BINARY)] get_mock.return_value = services cleanup_request = objects.CleanupRequest(self.context, **args) res = self.manager.work_cleanup(self.context, cleanup_request) self.assertEqual((services[:2], services[2:]), res) self.assertEqual(1, vol_clean_mock.call_count) self.assertEqual(1, sch_clean_mock.call_count) @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @mock.patch('cinder.objects.backup.Backup.save') @mock.patch('cinder.scheduler.driver.Scheduler.get_backup_host') @mock.patch('cinder.db.volume_get') def test_create_backup(self, mock_volume_get, mock_host, mock_save, mock_create): volume = fake_volume.fake_db_volume() mock_volume_get.return_value = volume mock_host.return_value = 'cinder-backup' backup = fake_backup.fake_backup_obj(self.context, host=None) self.manager.create_backup(self.context, backup=backup) mock_save.assert_called_once() mock_host.assert_called_once_with(volume, None) mock_volume_get.assert_called_once_with(self.context, backup.volume_id) mock_create.assert_called_once_with(self.context, backup) @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @mock.patch('cinder.objects.backup.Backup.save') @mock.patch('cinder.scheduler.driver.Scheduler.get_backup_host') @mock.patch('cinder.db.volume_get') def test_create_backup_with_host(self, mock_volume_get, mock_host, mock_save, mock_create): volume = fake_volume.fake_db_volume() mock_volume_get.return_value = volume mock_host.return_value = 'cinder-backup' backup = fake_backup.fake_backup_obj(self.context, host='testhost') self.manager.create_backup(self.context, backup=backup) # With the ready-made host, we should skip # looking up and updating the host: mock_save.assert_not_called() mock_host.assert_not_called() mock_volume_get.assert_called_once_with(self.context, backup.volume_id) mock_create.assert_called_once_with(self.context, backup) @mock.patch('cinder.volume.volume_utils.update_backup_error') @mock.patch('cinder.scheduler.driver.Scheduler.get_backup_host') @mock.patch('cinder.db.volume_get') @mock.patch('cinder.db.volume_update') def test_create_backup_no_service(self, mock_volume_update, mock_volume_get, mock_host, mock_error): volume = fake_volume.fake_db_volume() volume['status'] = 'backing-up' volume['previous_status'] = 'available' mock_volume_get.return_value = volume mock_host.side_effect = exception.ServiceNotFound( service_id='cinder-volume') backup = fake_backup.fake_backup_obj(self.context, host=None) self.manager.create_backup(self.context, backup=backup) mock_host.assert_called_once_with(volume, None) mock_volume_get.assert_called_once_with(self.context, backup.volume_id) mock_volume_update.assert_called_once_with( self.context, backup.volume_id, {'status': 'available', 'previous_status': 'backing-up'}) mock_error.assert_called_once_with( backup, 'Service not found for creating backup.') def test_get_az(self): volume = fake_volume.fake_db_volume() volume['status'] = 'backing-up' volume['previous_status'] = 'available' # this next line looks a little suspect, but the FakeHostManager # is a HostManager and does not override any relevant functions hm = fake_scheduler.FakeHostManager() az = hm.get_az(volume, availability_zone='test_az') self.assertEqual('test_az', az) class SchedulerTestCase(test.TestCase): """Test case for base scheduler driver class.""" # So we can subclass this test and re-use tests if we need. driver_cls = driver.Scheduler def setUp(self): super(SchedulerTestCase, self).setUp() # the host_manager injected into the scheduler driver is controlled # via configuration self.flags(scheduler_host_manager='cinder.tests.unit.scheduler.fakes.' 'FakeHostManager') self.driver = self.driver_cls() self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) self.topic = 'fake_topic' @mock.patch('cinder.scheduler.driver.Scheduler.' 'update_service_capabilities') def test_update_service_capabilities(self, _mock_update_cap): service_name = 'fake_service' host = 'fake_host' capabilities = {'fake_capability': 'fake_value'} self.driver.update_service_capabilities(service_name, host, capabilities, None) _mock_update_cap.assert_called_once_with(service_name, host, capabilities, None) @mock.patch('cinder.scheduler.host_manager.HostManager.' 'has_all_capabilities', return_value=False) def test_is_ready(self, _mock_has_caps): ready = self.driver.is_ready() _mock_has_caps.assert_called_once_with() self.assertFalse(ready) class SchedulerDriverBaseTestCase(SchedulerTestCase): """Test schedule driver class. Test cases for base scheduler driver class methods that will fail if the driver is changed. """ def test_unimplemented_schedule(self): fake_args = (1, 2, 3) fake_kwargs = {'cat': 'meow'} self.assertRaises(NotImplementedError, self.driver.schedule, self.context, self.topic, 'schedule_something', *fake_args, **fake_kwargs) class SchedulerDriverModuleTestCase(test.TestCase): """Test case for scheduler driver module methods.""" def setUp(self): super(SchedulerDriverModuleTestCase, self).setUp() self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_volume_host_update_db(self, _mock_volume_get, _mock_vol_update): volume = fake_volume.fake_volume_obj(self.context, use_quota=True) _mock_volume_get.return_value = volume driver.volume_update_db(self.context, volume.id, 'fake_host', 'fake_cluster') scheduled_at = volume.scheduled_at.replace(tzinfo=None) _mock_volume_get.assert_called_once_with(self.context, volume.id) _mock_vol_update.assert_called_once_with( self.context, volume.id, {'host': 'fake_host', 'cluster_name': 'fake_cluster', 'scheduled_at': scheduled_at, 'availability_zone': None}) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_volume_host_update_db_vol_present(self, _mock_volume_get, _mock_vol_update): volume = fake_volume.fake_volume_obj(self.context, use_quota=True) driver.volume_update_db(self.context, volume.id, 'fake_host', 'fake_cluster', volume=volume) scheduled_at = volume.scheduled_at.replace(tzinfo=None) _mock_volume_get.assert_not_called() _mock_vol_update.assert_called_once_with( self.context, volume.id, {'host': 'fake_host', 'cluster_name': 'fake_cluster', 'scheduled_at': scheduled_at, 'availability_zone': None}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_scheduler_options.py0000664000175000017500000001171100000000000025575 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For PickledScheduler. """ import datetime import io from oslo_serialization import jsonutils from cinder.scheduler import scheduler_options from cinder.tests.unit import test class FakeSchedulerOptions(scheduler_options.SchedulerOptions): def __init__(self, last_checked, now, file_old, file_now, data, filedata): super(FakeSchedulerOptions, self).__init__() # Change internals ... self.last_modified = file_old self.last_checked = last_checked self.data = data # For overrides ... self._time_now = now self._file_now = file_now self._file_data = filedata self.file_was_loaded = False def _get_file_timestamp(self, filename): return self._file_now def _get_file_handle(self, filename): self.file_was_loaded = True return io.StringIO(self._file_data) def _get_time_now(self): return self._time_now class SchedulerOptionsTestCase(test.TestCase): def test_get_configuration_first_time_no_flag(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual({}, fake.get_configuration()) self.assertFalse(fake.file_was_loaded) def test_get_configuration_first_time_empty_file(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) jdata = "" fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual({}, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_first_time_happy_day(self): last_checked = None now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = None file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, {}, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) def test_get_configuration_second_time_no_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2012, 1, 1, 1, 1, 1) data = dict(a=1, b=2, c=3) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, data, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_too_fast(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2011, 1, 1, 1, 1, 2) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEqual(old_data, fake.get_configuration('foo.json')) self.assertFalse(fake.file_was_loaded) def test_get_configuration_second_time_change(self): last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1) now = datetime.datetime(2012, 1, 1, 1, 1, 1) file_old = datetime.datetime(2012, 1, 1, 1, 1, 1) file_now = datetime.datetime(2013, 1, 1, 1, 1, 1) old_data = dict(a=1, b=2, c=3) data = dict(a=11, b=12, c=13) jdata = jsonutils.dumps(data) fake = FakeSchedulerOptions(last_checked, now, file_old, file_now, old_data, jdata) self.assertEqual(data, fake.get_configuration('foo.json')) self.assertTrue(fake.file_was_loaded) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_stochastic_weight_handler.py0000664000175000017500000000372700000000000027264 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for stochastic weight handler.""" import random import ddt from cinder.scheduler import base_weight from cinder.scheduler.weights.stochastic import StochasticHostWeightHandler from cinder.tests.unit import test @ddt.ddt class StochasticWeightHandlerTestCase(test.TestCase): """Test case for StochasticHostWeightHandler.""" @ddt.data( (0.0, 'A'), (0.1, 'A'), (0.2, 'B'), (0.3, 'B'), (0.4, 'B'), (0.5, 'B'), (0.6, 'B'), (0.7, 'C'), (0.8, 'C'), (0.9, 'C'), ) @ddt.unpack def test_get_weighed_objects_correct(self, rand_value, expected_obj): self.mock_object(random, 'random', return_value=rand_value) class MapWeigher(base_weight.BaseWeigher): minval = 0 maxval = 100 def _weigh_object(self, obj, weight_map): return weight_map[obj] weight_map = {'A': 1, 'B': 3, 'C': 2} objs = sorted(weight_map.keys()) weigher_classes = [MapWeigher] handler = StochasticHostWeightHandler('fake_namespace') weighted_objs = handler.get_weighed_objects(weigher_classes, objs, weight_map) winner = weighted_objs[0].obj self.assertEqual(expected_obj, winner) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_volume_number_weigher.py0000664000175000017500000001046600000000000026443 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Volume Number Weigher. """ from unittest import mock from cinder.common import constants from cinder import context from cinder.db.sqlalchemy import api from cinder.scheduler import weights from cinder.tests.unit import fake_constants from cinder.tests.unit.scheduler import fakes from cinder.tests.unit import test from cinder.volume import volume_utils def fake_volume_data_get_for_host(context, host, count_only=False): host = volume_utils.extract_host(host) if host == 'host1': return 1 elif host == 'host2': return 2 elif host == 'host3': return 3 elif host == 'host4': return 4 elif host == 'host5': return 5 else: return 6 class VolumeNumberWeigherTestCase(test.TestCase): def setUp(self): super(VolumeNumberWeigherTestCase, self).setUp() uid = fake_constants.USER_ID pid = fake_constants.PROJECT_ID self.context = context.RequestContext(user_id=uid, project_id=pid, is_admin=False, read_deleted="no", overwrite=False) self.host_manager = fakes.FakeHostManager() self.weight_handler = weights.OrderedHostWeightHandler( 'cinder.scheduler.weights') def _get_weighed_host(self, hosts, weight_properties=None): if weight_properties is None: weight_properties = {'context': self.context} return self.weight_handler.get_weighed_objects( [weights.volume_number.VolumeNumberWeigher], hosts, weight_properties)[0] @mock.patch('cinder.db.sqlalchemy.api.service_get_all') def _get_all_backends(self, _mock_service_get_all, disabled=False): ctxt = context.get_admin_context() fakes.mock_host_manager_db_calls(_mock_service_get_all, disabled=disabled) backend_states = self.host_manager.get_all_backend_states(ctxt) _mock_service_get_all.assert_called_once_with( ctxt, None, # backend_match_level topic=constants.VOLUME_TOPIC, frozen=False, disabled=disabled) return backend_states def test_volume_number_weight_multiplier1(self): self.flags(volume_number_multiplier=-1.0) backend_info_list = self._get_all_backends() # host1: 1 volume Norm=0.0 # host2: 2 volumes # host3: 3 volumes # host4: 4 volumes # host5: 5 volumes Norm=-1.0 # so, host1 should win: with mock.patch.object(api, 'volume_data_get_for_host', fake_volume_data_get_for_host): weighed_host = self._get_weighed_host(backend_info_list) self.assertEqual(0.0, weighed_host.weight) self.assertEqual('host1', volume_utils.extract_host(weighed_host.obj.host)) def test_volume_number_weight_multiplier2(self): self.flags(volume_number_multiplier=1.0) backend_info_list = self._get_all_backends() # host1: 1 volume Norm=0 # host2: 2 volumes # host3: 3 volumes # host4: 4 volumes # host5: 5 volumes Norm=1 # so, host5 should win: with mock.patch.object(api, 'volume_data_get_for_host', fake_volume_data_get_for_host): weighed_host = self._get_weighed_host(backend_info_list) self.assertEqual(1.0, weighed_host.weight) self.assertEqual('host5', volume_utils.extract_host(weighed_host.obj.host)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/scheduler/test_weights.py0000664000175000017500000000361200000000000023517 0ustar00zuulzuul00000000000000# Copyright 2011-2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For Scheduler weights. """ from cinder.scheduler import base_weight from cinder.tests.unit import test class TestWeightHandler(test.TestCase): def test_no_multiplier(self): class FakeWeigher(base_weight.BaseWeigher): def _weigh_object(self, *args, **kwargs): pass self.assertEqual(1.0, FakeWeigher().weight_multiplier()) def test_no_weight_object(self): class FakeWeigher(base_weight.BaseWeigher): def weight_multiplier(self, *args, **kwargs): pass self.assertRaises(TypeError, FakeWeigher) def test_normalization(self): # weight_list, expected_result, minval, maxval map_ = ( ((), (), None, None), ((0.0, 0.0), (0.0, 0.0), None, None), ((1.0, 1.0), (0.0, 0.0), None, None), ((20.0, 50.0), (0.0, 1.0), None, None), ((20.0, 50.0), (0.0, 0.375), None, 100.0), ((20.0, 50.0), (0.4, 1.0), 0.0, None), ((20.0, 50.0), (0.2, 0.5), 0.0, 100.0), ) for seq, result, minval, maxval in map_: ret = base_weight.normalize(seq, minval=minval, maxval=maxval) self.assertEqual(result, tuple(ret)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2311196 cinder-27.0.0/cinder/tests/unit/targets/0000775000175000017500000000000000000000000020125 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/__init__.py0000664000175000017500000000000000000000000022224 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/targets_fixture.py0000664000175000017500000001107200000000000023717 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import shutil import tempfile from unittest import mock from oslo_utils import fileutils from oslo_utils import timeutils from cinder.tests.unit import test from cinder.volume import configuration as conf class TargetDriverFixture(test.TestCase): def setUp(self): super(TargetDriverFixture, self).setUp() self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.safe_get = mock.Mock(side_effect=self.fake_safe_get) self.configuration.target_ip_address = '10.9.8.7' self.configuration.target_port = 3260 self.fake_volumes_dir = tempfile.mkdtemp() fileutils.ensure_tree(self.fake_volumes_dir) self.fake_project_id = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba' self.fake_project_id_2 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba' self.fake_volume_id = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba' self.addCleanup(self._cleanup) self.testvol =\ {'project_id': self.fake_project_id, 'name': 'testvol', 'size': 1, 'id': self.fake_volume_id, 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 ' 'iqn.2010-10.org.openstack:' 'volume-%s 0' % self.fake_volume_id, 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '512 512', 'created_at': timeutils.utcnow(), 'host': 'fake_host@lvm#lvm'} self.testvol_no_prov_loc = copy.copy(self.testvol) self.testvol_no_prov_loc['provider_location'] = None self.iscsi_target_prefix = 'iqn.2010-10.org.openstack:' self.target_string = ('127.0.0.1:3260,1 ' + self.iscsi_target_prefix + 'volume-%s' % self.testvol['id']) self.testvol_2 =\ {'project_id': self.fake_project_id_2, 'name': 'testvol2', 'size': 1, 'id': self.fake_volume_id, 'volume_type_id': None, 'provider_location': ('%(ip)s:%(port)d%(iqn)svolume-%(vol)s 2' % {'ip': self.configuration.target_ip_address, 'port': self.configuration.target_port, 'iqn': self.iscsi_target_prefix, 'vol': self.fake_volume_id}), 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '512 512', 'created_at': timeutils.utcnow(), 'host': 'fake_host@lvm#lvm'} self.expected_iscsi_properties = \ {'auth_method': 'CHAP', 'auth_password': '2FE0CQ8J196R', 'auth_username': 'stack-1-a60e2611875f40199931f2c76370d66b', 'encrypted': False, 'logical_block_size': '512', 'physical_block_size': '512', 'target_discovered': False, 'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' % self.fake_volume_id, 'target_lun': 0, 'target_portal': '10.10.7.1:3260', 'volume_id': self.fake_volume_id} self.VOLUME_ID = '83c2e877-feed-46be-8435-77884fe55b45' self.VOLUME_NAME = 'volume-' + self.VOLUME_ID self.test_vol = (self.iscsi_target_prefix + self.VOLUME_NAME) def _cleanup(self): if os.path.exists(self.fake_volumes_dir): shutil.rmtree(self.fake_volumes_dir) def fake_safe_get(self, value): if value == 'volumes_dir': return self.fake_volumes_dir elif value == 'target_protocol': return self.configuration.target_protocol elif value == 'target_prefix': return self.iscsi_target_prefix ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/test_base_iscsi_driver.py0000664000175000017500000002070600000000000025222 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_config import cfg from cinder import context from cinder import exception from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume import configuration as conf from cinder.volume.targets import fake from cinder.volume.targets import iscsi class FakeIncompleteDriver(iscsi.ISCSITarget): def null_method(): pass @ddt.ddt class TestBaseISCSITargetDriver(tf.TargetDriverFixture): def setUp(self): super(TestBaseISCSITargetDriver, self).setUp() self.target = fake.FakeTarget(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target.db = mock.MagicMock( volume_get=mock.MagicMock(return_value={'provider_auth': 'CHAP otzL 234Z'})) def test_abc_methods_not_present_fails(self): configuration = conf.Configuration(cfg.StrOpt('target_prefix', default='foo', help='you wish')) self.assertRaises(TypeError, FakeIncompleteDriver, configuration=configuration) def test_get_iscsi_properties(self): self.assertEqual(self.expected_iscsi_properties, self.target._get_iscsi_properties(self.testvol)) def test_get_iscsi_properties_multiple_targets(self): testvol = self.testvol.copy() expected_iscsi_properties = self.expected_iscsi_properties.copy() iqn = expected_iscsi_properties['target_iqn'] testvol.update( {'provider_location': '10.10.7.1:3260;10.10.8.1:3260 ' 'iqn.2010-10.org.openstack:' 'volume-%s 0' % self.fake_volume_id}) expected_iscsi_properties.update( {'target_portals': ['10.10.7.1:3260', '10.10.8.1:3260'], 'target_iqns': [iqn, iqn], 'target_luns': [0, 0]}) self.assertEqual(expected_iscsi_properties, self.target._get_iscsi_properties(testvol)) def test_build_iscsi_auth_string(self): auth_string = 'chap chap-user chap-password' self.assertEqual(auth_string, self.target._iscsi_authentication('chap', 'chap-user', 'chap-password')) def test_do_iscsi_discovery(self): with mock.patch.object(self.configuration, 'safe_get', return_value='127.0.0.1'), \ mock.patch('cinder.utils.execute', return_value=(self.target_string, '')): self.assertEqual(self.target_string, self.target._do_iscsi_discovery(self.testvol)) def test_remove_export(self): with mock.patch.object(self.target, '_get_target_and_lun') as \ mock_get_target, \ mock.patch.object(self.target, 'show_target'), \ mock.patch.object(self.target, 'remove_iscsi_target') as \ mock_remove_target: mock_get_target.return_value = (0, 1) iscsi_target, lun = mock_get_target.return_value ctxt = context.get_admin_context() self.target.remove_export(ctxt, self.testvol) mock_remove_target.assert_called_once_with( iscsi_target, lun, 'ed2c2222-5fc0-11e4-aa15-123b93f75cba', 'testvol') def test_remove_export_notfound(self): with mock.patch.object(self.target, '_get_target_and_lun') as \ mock_get_target, \ mock.patch.object(self.target, 'show_target'), \ mock.patch.object(self.target, 'remove_iscsi_target'): mock_get_target.side_effect = exception.NotFound ctxt = context.get_admin_context() self.assertIsNone(self.target.remove_export(ctxt, self.testvol)) def test_remove_export_show_error(self): with mock.patch.object(self.target, '_get_target_and_lun') as \ mock_get_target, \ mock.patch.object(self.target, 'show_target') as mshow, \ mock.patch.object(self.target, 'remove_iscsi_target'): mock_get_target.return_value = (0, 1) iscsi_target, lun = mock_get_target.return_value mshow.side_effect = Exception ctxt = context.get_admin_context() self.assertIsNone(self.target.remove_export(ctxt, self.testvol)) def test_initialize_connection(self): expected = {'driver_volume_type': 'iscsi', 'data': self.expected_iscsi_properties} self.assertEqual(expected, self.target.initialize_connection(self.testvol, {})) def test_validate_connector(self): bad_connector = {'no_initiator': 'nada'} self.assertRaises(exception.InvalidConnectorException, self.target.validate_connector, bad_connector) connector = {'initiator': 'fake_init'} self.assertTrue(bool(self.target.validate_connector), connector) def test_show_target_error(self): self.assertRaises(exception.InvalidParameterValue, self.target.show_target, 0, None) with mock.patch.object(self.target, '_get_target') as mock_get_target: mock_get_target.side_effect = exception.NotFound() self.assertRaises(exception.NotFound, self.target.show_target, 0, self.expected_iscsi_properties['target_iqn']) def test_iscsi_location(self): location = self.target._iscsi_location('portal', 1, 'target', 2) self.assertEqual('portal:3260,1 target 2', location) location = self.target._iscsi_location('portal', 1, 'target', 2, ['portal2']) self.assertEqual('portal:3260;portal2:3260,1 target 2', location) def test_iscsi_location_IPv6(self): ip = 'fd00:fd00:fd00:3000::12' ip2 = 'fd00:fd00:fd00:3000::13' location = self.target._iscsi_location(ip, 1, 'target', 2) self.assertEqual('[%s]:3260,1 target 2' % ip, location) location = self.target._iscsi_location(ip, 1, 'target', 2, [ip2]) self.assertEqual('[%s]:3260;[%s]:3260,1 target 2' % (ip, ip2), location) # Mix of IPv6 (already with square brackets) and IPv4 ip = '[' + ip + ']' location = self.target._iscsi_location(ip, 1, 'target', 2, ['192.168.1.1']) self.assertEqual(ip + ':3260;192.168.1.1:3260,1 target 2', location) def test_get_target_chap_auth(self): ctxt = context.get_admin_context() self.assertEqual(('otzL', '234Z'), self.target._get_target_chap_auth(ctxt, self.testvol)) self.target.db.volume_get.assert_called_once_with( ctxt, self.testvol['id']) def test_are_same_connector(self): res = self.target.are_same_connector({'initiator': 'iqn'}, {'initiator': 'iqn'}) self.assertTrue(res) @ddt.data(({}, {}), ({}, {'initiator': 'iqn'}), ({'initiator': 'iqn'}, {}), ({'initiator': 'iqn1'}, {'initiator': 'iqn2'})) @ddt.unpack def test_are_same_connector_different(self, a_conn_props, b_conn_props): res = self.target.are_same_connector(a_conn_props, b_conn_props) self.assertFalse(bool(res)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/test_cxt_driver.py0000664000175000017500000001777200000000000023725 0ustar00zuulzuul00000000000000# Copyright 2015 Chelsio Communications Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from unittest import mock from cinder import context from cinder.tests.unit.targets import targets_fixture as tf from cinder.tests.unit import test from cinder import utils from cinder.volume.targets import cxt class TestCxtAdmDriver(tf.TargetDriverFixture): def setUp(self): super(TestCxtAdmDriver, self).setUp() self.cxt_subdir = cxt.CxtAdm.cxt_subdir self.target = cxt.CxtAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.VG = 'stack-volumes-lvmdriver-1' self.fake_iscsi_scan = \ ('\n' 'TARGET: iqn.2010-10.org.openstack:%(vol)s, id=1, login_ip=0\n' ' PortalGroup=1@10.9.8.7:3260,timeout=0\n' ' TargetDevice=/dev/%(vg)s/%(vol)s' ',BLK,PROD=CHISCSI ' 'Target,SN=0N0743000000000,ID=0D074300000000000000000,' 'WWN=:W00743000000000\n' % {'vol': self.VOLUME_NAME, 'vg': self.VG}) def test_get_target(self): with mock.patch.object(self.target, '_get_volumes_dir', return_value=self.fake_volumes_dir), \ mock.patch('cinder.utils.execute', return_value=(self.fake_iscsi_scan, None)) as m_exec: self.assertEqual( '1', self.target._get_target( 'iqn.2010-10.org.openstack:volume-%s' % self.VOLUME_ID ) ) self.assertTrue(m_exec.called) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute') def test_create_iscsi_target(self, mock_execute, mock_get_targ): mock_execute.return_value = ('', '') with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: mock_get.return_value = self.fake_volumes_dir self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir, portals_ips=[self.configuration.target_ip_address])) self.assertTrue(mock_get.called) self.assertTrue(mock_execute.called) self.assertTrue(mock_get_targ.called) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute', return_value=('fake out', 'fake err')) def test_create_iscsi_target_port_ips(self, mock_execute, mock_get_targ): ips = ['10.0.0.15', '127.0.0.1'] port = 3261 mock_execute.return_value = ('', '') with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: mock_get.return_value = self.fake_volumes_dir test_vol = 'iqn.2010-10.org.openstack:'\ 'volume-83c2e877-feed-46be-8435-77884fe55b45' self.assertEqual( 1, self.target.create_iscsi_target( test_vol, 1, 0, self.fake_volumes_dir, portals_port=port, portals_ips=ips)) self.assertTrue(mock_get.called) self.assertTrue(mock_execute.called) self.assertTrue(mock_get_targ.called) file_path = os.path.join(self.fake_volumes_dir, test_vol.split(':')[1]) expected_cfg = { 'name': test_vol, 'device': self.fake_volumes_dir, 'ips': ','.join(map(lambda ip: '%s:%s' % (ip, port), ips)), 'spaces': ' ' * 14, 'spaces2': ' ' * 23} expected_file = ('\n%(spaces)starget:' '\n%(spaces2)sTargetName=%(name)s' '\n%(spaces2)sTargetDevice=%(device)s' '\n%(spaces2)sPortalGroup=1@%(ips)s' '\n%(spaces)s ') % expected_cfg with open(file_path, 'r') as cfg_file: result = cfg_file.read() self.assertEqual(expected_file, result) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute', return_value=('fake out', 'fake err')) def test_create_iscsi_target_already_exists(self, mock_execute, mock_get_targ): with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: mock_get.return_value = self.fake_volumes_dir self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir, portals_ips=[self.configuration.target_ip_address])) self.assertTrue(mock_get.called) self.assertTrue(mock_get_targ.called) self.assertTrue(mock_execute.called) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target', return_value=1) @mock.patch('cinder.utils.execute') @mock.patch.object(cxt.CxtAdm, '_get_target_chap_auth') def test_create_export(self, mock_chap, mock_execute, mock_get_targ): mock_execute.return_value = ('', '') mock_chap.return_value = ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ') with mock.patch.object(self.target, '_get_volumes_dir') as mock_get: mock_get.return_value = self.fake_volumes_dir expected_result = {'location': '10.9.8.7:3260,1 ' 'iqn.2010-10.org.openstack:testvol 0', 'auth': 'CHAP ' 'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'} ctxt = context.get_admin_context() self.assertEqual(expected_result, self.target.create_export(ctxt, self.testvol, self.fake_volumes_dir)) self.assertTrue(mock_get.called) self.assertTrue(mock_execute.called) @mock.patch('cinder.volume.targets.cxt.CxtAdm._get_target_chap_auth') def test_ensure_export(self, mock_get_chap): fake_creds = ('asdf', 'qwert') mock_get_chap.return_value = fake_creds ctxt = context.get_admin_context() with mock.patch.object(self.target, 'create_iscsi_target'): self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) self.target.create_iscsi_target.assert_called_once_with( 'iqn.2010-10.org.openstack:testvol', 1, 0, self.fake_volumes_dir, fake_creds, check_exit_code=False, old_name=None, portals_ips=[self.configuration.target_ip_address], portals_port=self.configuration.target_port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/test_iser_driver.py0000664000175000017500000000534600000000000024063 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import lio from cinder.volume.targets import tgt class TestIserTgtDriver(tf.TargetDriverFixture): """Unit tests for the iSER TGT flow""" def setUp(self): super(TestIserTgtDriver, self).setUp() self.configuration.target_protocol = 'iser' self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) def test_iscsi_protocol(self): self.assertEqual('iser', self.target.iscsi_protocol) @mock.patch.object(tgt.TgtAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi): connector = {'initiator': 'fake_init'} mock_get_iscsi.return_value = {} expected_return = {'driver_volume_type': 'iser', 'data': {}} self.assertEqual(expected_return, self.target.initialize_connection(self.testvol, connector)) class TestIserLioAdmDriver(tf.TargetDriverFixture): """Unit tests for the iSER LIO flow""" def setUp(self): super(TestIserLioAdmDriver, self).setUp() self.configuration.target_protocol = 'iser' with mock.patch.object(lio.LioAdm, '_verify_rtstool'): self.target = lio.LioAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target.db = mock.MagicMock( volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'}) def test_iscsi_protocol(self): self.assertEqual('iser', self.target.iscsi_protocol) @mock.patch('cinder.utils.execute') @mock.patch.object(lio.LioAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi, mock_execute): connector = {'initiator': 'fake_init'} mock_get_iscsi.return_value = {} ret = self.target.initialize_connection(self.testvol, connector) driver_volume_type = ret['driver_volume_type'] self.assertEqual('iser', driver_volume_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/test_lio_driver.py0000664000175000017500000003623600000000000023706 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_concurrency import processutils as putils from cinder import context from cinder import exception from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import lio class TestLioAdmDriver(tf.TargetDriverFixture): def setUp(self): super(TestLioAdmDriver, self).setUp() with mock.patch.object(lio.LioAdm, '_verify_rtstool'): self.target = lio.LioAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') def test_get_target(self, mexecute, mpersist_cfg, mlock_exec): mexecute.return_value = (self.test_vol, None) self.assertEqual(self.test_vol, self.target._get_target(self.test_vol)) self.assertFalse(mpersist_cfg.called) expected_args = ('cinder-rtstool', 'get-targets') mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) mexecute.assert_called_once_with(*expected_args, run_as_root=True) def test_get_iscsi_target(self): ctxt = context.get_admin_context() expected = 0 self.assertEqual(expected, self.target._get_iscsi_target(ctxt, self.testvol['id'])) def test_get_target_and_lun(self): lun = 0 iscsi_target = 0 ctxt = context.get_admin_context() expected = (iscsi_target, lun) self.assertEqual(expected, self.target._get_target_and_lun(ctxt, self.testvol)) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') @mock.patch.object(lio.LioAdm, '_get_target') def test_create_iscsi_target(self, mget_target, mexecute, mpersist_cfg, mlock_exec): mget_target.return_value = 1 # create_iscsi_target sends volume_name instead of volume_id on error self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir)) mpersist_cfg.assert_called_once_with(self.VOLUME_NAME) mexecute.assert_called_once_with( 'cinder-rtstool', 'create', self.fake_volumes_dir, self.test_vol, '', '', self.target.iscsi_protocol == 'iser', run_as_root=True) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch.object(utils, 'execute') @mock.patch.object(lio.LioAdm, '_get_target', return_value=1) def test_create_iscsi_target_port_ip(self, mget_target, mexecute, mpersist_cfg, mlock_exec): ip = '10.0.0.15' port = 3261 self.assertEqual( 1, self.target.create_iscsi_target( name=self.test_vol, tid=1, lun=0, path=self.fake_volumes_dir, **{'portals_port': port, 'portals_ips': [ip]})) expected_args = ( 'cinder-rtstool', 'create', self.fake_volumes_dir, self.test_vol, '', '', self.target.iscsi_protocol == 'iser', '-p%s' % port, '-a' + ip) mlock_exec.assert_any_call(*expected_args, run_as_root=True) mexecute.assert_any_call(*expected_args, run_as_root=True) mpersist_cfg.assert_called_once_with(self.VOLUME_NAME) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch.object(utils, 'execute') @mock.patch.object(lio.LioAdm, '_get_target', return_value=1) def test_create_iscsi_target_port_ips(self, mget_target, mexecute, mpersist_cfg, mlock_exec): test_vol = 'iqn.2010-10.org.openstack:' + self.VOLUME_NAME ips = ['10.0.0.15', '127.0.0.1'] port = 3261 self.assertEqual( 1, self.target.create_iscsi_target( name=test_vol, tid=1, lun=0, path=self.fake_volumes_dir, **{'portals_port': port, 'portals_ips': ips})) expected_args = ( 'cinder-rtstool', 'create', self.fake_volumes_dir, test_vol, '', '', self.target.iscsi_protocol == 'iser', '-p%s' % port, '-a' + ','.join(ips)) mlock_exec.assert_any_call(*expected_args, run_as_root=True) mexecute.assert_any_call(*expected_args, run_as_root=True) mpersist_cfg.assert_called_once_with(self.VOLUME_NAME) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute', side_effect=putils.ProcessExecutionError) @mock.patch.object(lio.LioAdm, '_get_target') def test_create_iscsi_target_already_exists(self, mget_target, mexecute, mpersist_cfg, mlock_exec): chap_auth = ('foo', 'bar') self.assertRaises(exception.ISCSITargetCreateFailed, self.target.create_iscsi_target, self.test_vol, 1, 0, self.fake_volumes_dir, chap_auth) self.assertFalse(mpersist_cfg.called) expected_args = ('cinder-rtstool', 'create', self.fake_volumes_dir, self.test_vol, chap_auth[0], chap_auth[1], False) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) mexecute.assert_called_once_with(*expected_args, run_as_root=True) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') def test_remove_iscsi_target(self, mexecute, mpersist_cfg, mlock_exec): # Test the normal case self.target.remove_iscsi_target(0, 0, self.testvol['id'], self.testvol['name']) expected_args = ('cinder-rtstool', 'delete', self.iscsi_target_prefix + self.testvol['name']) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) mexecute.assert_called_once_with(*expected_args, run_as_root=True) mpersist_cfg.assert_called_once_with(self.fake_volume_id) # Test the failure case: putils.ProcessExecutionError mlock_exec.reset_mock() mpersist_cfg.reset_mock() mexecute.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetRemoveFailed, self.target.remove_iscsi_target, 0, 0, self.testvol['id'], self.testvol['name']) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) # Ensure there have been no calls to persist configuration self.assertFalse(mpersist_cfg.called) @mock.patch.object(lio.LioAdm, '_get_targets') @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch('cinder.utils.execute') def test_ensure_export(self, mock_exec, mock_execute, mock_get_targets): ctxt = context.get_admin_context() mock_get_targets.return_value = None self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) expected_args = ('cinder-rtstool', 'restore') mock_exec.assert_called_once_with(*expected_args, run_as_root=True) @mock.patch.object(lio.LioAdm, '_get_targets') @mock.patch.object(lio.LioAdm, '_restore_configuration') def test_ensure_export_target_exist(self, mock_restore, mock_get_targets): ctxt = context.get_admin_context() mock_get_targets.return_value = 'target' self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) self.assertFalse(mock_restore.called) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') @mock.patch.object(lio.LioAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi, mock_execute, mpersist_cfg, mlock_exec): target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id connector = {'initiator': 'fake_init'} # Test the normal case mock_get_iscsi.return_value = 'foo bar' expected_return = {'driver_volume_type': 'iscsi', 'data': 'foo bar'} self.assertEqual(expected_return, self.target.initialize_connection(self.testvol, connector)) expected_args = ('cinder-rtstool', 'add-initiator', target_id, self.expected_iscsi_properties['auth_username'], '2FE0CQ8J196R', connector['initiator']) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) mock_execute.assert_called_once_with(*expected_args, run_as_root=True) mpersist_cfg.assert_called_once_with(self.fake_volume_id) # Test the failure case: putils.ProcessExecutionError mlock_exec.reset_mock() mpersist_cfg.reset_mock() mock_execute.side_effect = putils.ProcessExecutionError self.assertRaises(exception.ISCSITargetAttachFailed, self.target.initialize_connection, self.testvol, connector) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) # Ensure there have been no calls to persist configuration self.assertFalse(mpersist_cfg.called) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') def test_terminate_connection(self, mock_execute, mpersist_cfg, mlock_exec): target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id connector = {'initiator': 'fake_init'} self.target.terminate_connection(self.testvol, connector) expected_args = ('cinder-rtstool', 'delete-initiator', target_id, connector['initiator']) mlock_exec.assert_called_once_with(*expected_args, run_as_root=True) mock_execute.assert_called_once_with(*expected_args, run_as_root=True) mpersist_cfg.assert_called_once_with(self.fake_volume_id) @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') def test_terminate_connection_no_prov_loc(self, mock_execute, mpersist_cfg, mlock_exec): """terminate_connection does nothing if provider_location is None""" connector = {'initiator': 'fake_init'} self.target.terminate_connection(self.testvol_no_prov_loc, connector) mlock_exec.assert_not_called() mock_execute.assert_not_called() mpersist_cfg.assert_not_called() @mock.patch.object(lio.LioAdm, '_execute', side_effect=lio.LioAdm._execute) @mock.patch.object(lio.LioAdm, '_persist_configuration') @mock.patch('cinder.utils.execute') def test_terminate_connection_fail(self, mock_execute, mpersist_cfg, mlock_exec): target_id = self.iscsi_target_prefix + 'volume-' + self.fake_volume_id mock_execute.side_effect = putils.ProcessExecutionError connector = {'initiator': 'fake_init'} self.assertRaises(exception.ISCSITargetDetachFailed, self.target.terminate_connection, self.testvol, connector) mlock_exec.assert_called_once_with('cinder-rtstool', 'delete-initiator', target_id, connector['initiator'], run_as_root=True) self.assertFalse(mpersist_cfg.called) def test_iscsi_protocol(self): self.assertEqual('iscsi', self.target.iscsi_protocol) @mock.patch.object(lio.LioAdm, '_get_target_and_lun', return_value=(1, 2)) @mock.patch.object(lio.LioAdm, 'create_iscsi_target', return_value=3) @mock.patch.object(lio.LioAdm, '_get_target_chap_auth', return_value=(mock.sentinel.user, mock.sentinel.pwd)) def test_create_export(self, mock_chap, mock_create, mock_get_target): ctxt = context.get_admin_context() result = self.target.create_export(ctxt, self.testvol_2, self.fake_volumes_dir) loc = (u'%(ip)s:%(port)d,3 %(prefix)s%(name)s 2' % {'ip': self.configuration.target_ip_address, 'port': self.configuration.target_port, 'prefix': self.iscsi_target_prefix, 'name': self.testvol_2['name']}) expected_result = { 'location': loc, 'auth': 'CHAP %s %s' % (mock.sentinel.user, mock.sentinel.pwd), } self.assertEqual(expected_result, result) mock_create.assert_called_once_with( self.iscsi_target_prefix + self.testvol_2['name'], 1, 2, self.fake_volumes_dir, (mock.sentinel.user, mock.sentinel.pwd), portals_ips=[self.configuration.target_ip_address], portals_port=self.configuration.target_port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/test_nvmeof_driver.py0000664000175000017500000002552100000000000024410 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_utils import timeutils from cinder import context from cinder import exception from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import nvmeof class FakeNVMeOFDriver(nvmeof.NVMeOF): def __init__(self, *args, **kwargs): super(FakeNVMeOFDriver, self).__init__(*args, **kwargs) def delete_nvmeof_target(self, target_name): pass @ddt.ddt class TestNVMeOFDriver(tf.TargetDriverFixture): def setUp(self): super(TestNVMeOFDriver, self).setUp() self.configuration.target_protocol = 'nvmet_rdma' self.target = FakeNVMeOFDriver(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target_ip = self.configuration.target_ip_address self.target_port = self.configuration.target_port self.nvmet_subsystem_name = self.configuration.target_prefix self.nvmet_ns_id = self.configuration.nvmet_ns_id self.nvmet_port_id = self.configuration.nvmet_port_id self.nvme_transport_type = 'rdma' self.fake_volume_id = 'c446b9a2-c968-4260-b95f-a18a7b41c004' self.testvol_path = ( '/dev/stack-volumes-lvmdriver-1/volume-%s' % self.fake_volume_id) self.fake_project_id = 'ed2c1fd4-5555-1111-aa15-123b93f75cba' self.testvol = ( {'project_id': self.fake_project_id, 'name': 'testvol', 'size': 1, 'id': self.fake_volume_id, 'volume_type_id': None, 'provider_location': self.target.get_nvmeof_location( "ngn.%s-%s" % ( self.nvmet_subsystem_name, self.fake_volume_id), [self.target_ip], self.target_port, self.nvme_transport_type, self.nvmet_ns_id ), 'provider_auth': None, 'provider_geometry': None, 'created_at': timeutils.utcnow(), 'host': 'fake_host@lvm#lvm'}) @mock.patch.object(nvmeof.NVMeOF, '_get_connection_properties_from_vol') def test_initialize_connection(self, mock_get_conn): mock_connector = {'initiator': 'fake_init'} mock_testvol = self.testvol expected_return = { 'driver_volume_type': 'nvmeof', 'data': mock_get_conn.return_value } self.assertEqual(expected_return, self.target.initialize_connection(mock_testvol, mock_connector)) mock_get_conn.assert_called_once_with(mock_testvol) @mock.patch.object(FakeNVMeOFDriver, 'create_nvmeof_target') def test_create_export(self, mock_create_nvme_target): ctxt = context.get_admin_context() self.target.create_export(ctxt, self.testvol, self.testvol_path) mock_create_nvme_target.assert_called_once_with( self.fake_volume_id, self.configuration.target_prefix, [self.target_ip], self.target_port, self.nvme_transport_type, self.nvmet_port_id, self.nvmet_ns_id, self.testvol_path ) @mock.patch.object(FakeNVMeOFDriver, 'delete_nvmeof_target') def test_remove_export(self, mock_delete_nvmeof_target): ctxt = context.get_admin_context() self.target.remove_export(ctxt, self.testvol) mock_delete_nvmeof_target.assert_called_once_with( self.testvol ) @mock.patch.object(nvmeof.NVMeOF, '_get_nvme_uuid') @mock.patch.object(nvmeof.NVMeOF, '_get_connection_properties') def test__get_connection_properties(self, mock_get_conn_props, mock_uuid): """Test connection properties from a volume.""" res = self.target._get_connection_properties_from_vol(self.testvol) self.assertEqual(mock_get_conn_props.return_value, res) mock_uuid.assert_called_once_with(self.testvol) mock_get_conn_props.assert_called_once_with( f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}', [self.target_ip], str(self.target_port), self.nvme_transport_type, str(self.nvmet_ns_id), mock_uuid.return_value) @mock.patch.object(nvmeof.NVMeOF, '_get_nvme_uuid') @mock.patch.object(nvmeof.NVMeOF, '_get_connection_properties') def test__get_connection_properties_multiple_addresses( self, mock_get_conn_props, mock_uuid): """Test connection properties from a volume with multiple ips.""" self.testvol['provider_location'] = self.target.get_nvmeof_location( f"ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}", [self.target_ip, '127.0.0.1'], self.target_port, self.nvme_transport_type, self.nvmet_ns_id ) res = self.target._get_connection_properties_from_vol(self.testvol) self.assertEqual(mock_get_conn_props.return_value, res) mock_uuid.assert_called_once_with(self.testvol) mock_get_conn_props.assert_called_once_with( f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}', [self.target_ip, '127.0.0.1'], str(self.target_port), self.nvme_transport_type, str(self.nvmet_ns_id), mock_uuid.return_value) def test__get_connection_properties_old(self): """Test connection properties with the old NVMe-oF format.""" nqn = f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}' expected_return = { 'target_portal': self.target_ip, 'target_port': str(self.target_port), 'nqn': nqn, 'transport_type': self.nvme_transport_type, 'ns_id': str(self.nvmet_ns_id) } res = self.target._get_connection_properties(nqn, [self.target_ip], str(self.target_port), self.nvme_transport_type, str(self.nvmet_ns_id), mock.sentinel.uuid) self.assertEqual(expected_return, res) @ddt.data(('rdma', 'RoCEv2'), ('tcp', 'tcp')) @ddt.unpack def test__get_connection_properties_new( self, transport, expected_transport): """Test connection properties with the new NVMe-oF format.""" nqn = f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}' self.configuration.nvmeof_conn_info_version = 2 expected_return = { 'target_nqn': nqn, 'vol_uuid': mock.sentinel.uuid, 'ns_id': str(self.nvmet_ns_id), 'portals': [(self.target_ip, str(self.target_port), expected_transport)], } res = self.target._get_connection_properties(nqn, [self.target_ip], str(self.target_port), transport, str(self.nvmet_ns_id), mock.sentinel.uuid) self.assertEqual(expected_return, res) @ddt.data({'nqn': 'fake-nqn'}, {'nqn': 'fake-nqn', 'initiator': 'fake-iqn'}) def test_validate_connector(self, mock_connector): self.assertTrue(self.target.validate_connector(mock_connector)) @ddt.data({'initiator': 'fake-iqn'}, {}) def test_validate_connector_not_found(self, mock_connector): self.assertRaises(exception.InvalidConnectorException, self.target.validate_connector, mock_connector) def test_invalid_target_protocol(self): self.configuration.target_protocol = 'iser' self.assertRaises(nvmeof.UnsupportedNVMETProtocol, FakeNVMeOFDriver, root_helper=utils.get_root_helper(), configuration=self.configuration) def test_invalid_secondary_ips_old_conn_info_combination(self): """Secondary IPS are only supported with new connection information.""" self.configuration.target_secondary_ip_addresses = ['127.0.0.1'] self.configuration.nvmeof_conn_info_version = 1 self.assertRaises(exception.InvalidConfigurationValue, FakeNVMeOFDriver, root_helper=utils.get_root_helper(), configuration=self.configuration) def test_valid_secondary_ips_old_conn_info_combination(self): """Secondary IPS are supported with new connection information.""" self.configuration.target_secondary_ip_addresses = ['127.0.0.1'] self.configuration.nvmeof_conn_info_version = 2 FakeNVMeOFDriver(root_helper=utils.get_root_helper(), configuration=self.configuration) def test_are_same_connector(self): res = self.target.are_same_connector({'nqn': 'nvme'}, {'nqn': 'nvme'}) self.assertTrue(res) @ddt.data(({}, {}), ({}, {'nqn': 'nvmE'}), ({'nqn': 'nvmeE'}, {}), ({'nqn': 'nvme1'}, {'nqn': 'nvme2'})) @ddt.unpack def test_are_same_connector_different(self, a_conn_props, b_conn_props): res = self.target.are_same_connector(a_conn_props, b_conn_props) self.assertFalse(bool(res)) def test_get_nvmeof_location(self): """Serialize connection information into location.""" result = self.target.get_nvmeof_location( 'ngn.subsys_name-vol_id', ['127.0.0.1'], 4420, 'tcp', 10) expected = '127.0.0.1:4420 tcp ngn.subsys_name-vol_id 10' self.assertEqual(expected, result) def test_get_nvmeof_location_multiple_ips(self): """Serialize connection information with multiple ips into location.""" result = self.target.get_nvmeof_location( 'ngn.subsys_name-vol_id', ['127.0.0.1', '192.168.1.1'], 4420, 'tcp', 10) expected = '127.0.0.1,192.168.1.1:4420 tcp ngn.subsys_name-vol_id 10' self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/test_nvmet_driver.py0000664000175000017500000007412400000000000024252 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder import exception from cinder.tests.unit.privsep.targets import fake_nvmet_lib from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import nvmet # This must go after fake_nvmet_lib has been imported (thus the noqa) from cinder.privsep.targets import nvmet as priv_nvmet # noqa @ddt.ddt class TestNVMETDriver(tf.TargetDriverFixture): def setUp(self): super(TestNVMETDriver, self).setUp() self.configuration.target_prefix = 'nvme-subsystem-1' self.configuration.target_protocol = 'nvmet_rdma' self.target = nvmet.NVMET(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target.share_targets = False fake_nvmet_lib.reset_mock() def test_supports_shared(self): self.assertTrue(self.target.SHARED_TARGET_SUPPORT) @mock.patch.object(nvmet.nvmeof.NVMeOF, 'initialize_connection') @mock.patch.object(nvmet.NVMET, '_map_volume') def test_initialize_connection_non_shared(self, mock_map, mock_init_conn): """Non shared initialize doesn't do anything (calls NVMeOF).""" res = self.target.initialize_connection(mock.sentinel.volume, mock.sentinel.connector) self.assertEqual(mock_init_conn.return_value, res) mock_init_conn.assert_called_once_with(mock.sentinel.volume, mock.sentinel.connector) mock_map.assert_not_called() @mock.patch.object(nvmet.NVMET, '_get_nvme_uuid') @mock.patch('os.path.exists') @mock.patch.object(nvmet.NVMET, '_get_connection_properties') @mock.patch.object(nvmet.nvmeof.NVMeOF, 'initialize_connection') @mock.patch.object(nvmet.NVMET, '_map_volume') def test_initialize_connection_shared( self, mock_map, mock_init_conn, mock_get_conn_props, mock_exists, mock_uuid): """When sharing, the initialization maps the volume.""" self.mock_object(self.target, 'share_targets', True) mock_map.return_value = (mock.sentinel.nqn, mock.sentinel.nsid) vol = mock.Mock() res = self.target.initialize_connection(vol, mock.sentinel.connector) expected = {'driver_volume_type': 'nvmeof', 'data': mock_get_conn_props.return_value} self.assertEqual(expected, res) mock_init_conn.assert_not_called() mock_exists.assert_called_once_with(vol.provider_location) mock_map.assert_called_once_with(vol, vol.provider_location, mock.sentinel.connector) mock_uuid.assert_called_once_with(vol) mock_get_conn_props.assert_called_once_with( mock.sentinel.nqn, self.target.target_ips, self.target.target_port, self.target.nvme_transport_type, mock.sentinel.nsid, mock_uuid.return_value) @mock.patch.object(nvmet.NVMET, '_get_nvme_uuid') @mock.patch('os.path.exists', return_value=False) @mock.patch.object(nvmet.NVMET, '_get_connection_properties') @mock.patch.object(nvmet.nvmeof.NVMeOF, 'initialize_connection') @mock.patch.object(nvmet.NVMET, '_map_volume') def test_initialize_connection_shared_no_path( self, mock_map, mock_init_conn, mock_get_conn_props, mock_exists, mock_uuid): """Fails if the provided path is not present in the system.""" self.mock_object(self.target, 'share_targets', True) mock_map.return_value = (mock.sentinel.nqn, mock.sentinel.nsid) vol = mock.Mock() self.assertRaises(exception.InvalidConfigurationValue, self.target.initialize_connection, vol, mock.sentinel.connector) mock_init_conn.assert_not_called() mock_exists.assert_called_once_with(vol.provider_location) mock_map.assert_not_called() mock_uuid.assert_not_called() mock_get_conn_props.assert_not_called() @mock.patch.object(nvmet.NVMET, 'get_nvmeof_location') @mock.patch.object(nvmet.NVMET, '_map_volume') def test_create_export(self, mock_map, mock_location): """When not sharing, the export maps the volume.""" mock_map.return_value = (mock.sentinel.nqn, mock.sentinel.nsid) res = self.target.create_export(mock.sentinel.context, mock.sentinel.vol, mock.sentinel.volume_path) self.assertEqual({'location': mock_location.return_value, 'auth': ''}, res) mock_map.assert_called_once_with(mock.sentinel.vol, mock.sentinel.volume_path) mock_location.assert_called_once_with(mock.sentinel.nqn, self.target.target_ips, self.target.target_port, self.target.nvme_transport_type, mock.sentinel.nsid) @mock.patch.object(nvmet.NVMET, 'get_nvmeof_location') @mock.patch.object(nvmet.NVMET, '_map_volume') def test_create_export_shared(self, mock_map, mock_location): """When sharing, the export just stores the volume path as location.""" self.mock_object(self.target, 'share_targets', True) res = self.target.create_export(mock.sentinel.context, mock.sentinel.vol, mock.sentinel.volume_path) self.assertEqual({'location': mock.sentinel.volume_path, 'auth': ''}, res) mock_map.assert_not_called() mock_location.assert_not_called() @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(nvmet.NVMET, '_get_nvme_uuid') @mock.patch.object(nvmet.NVMET, '_ensure_port_exports') @mock.patch.object(nvmet.NVMET, '_ensure_subsystem_exists') @mock.patch.object(nvmet.NVMET, '_get_target_nqn') def test__map_volume(self, mock_nqn, mock_subsys, mock_port, mock_uuid, mock_lock): """Normal volume mapping.""" vol = mock.Mock() res = self.target._map_volume(vol, mock.sentinel.volume_path, mock.sentinel.connector) expected = (mock_nqn.return_value, mock_subsys.return_value) self.assertEqual(res, expected) mock_nqn.assert_called_once_with(vol.id, mock.sentinel.connector) mock_uuid.assert_called_once_with(vol) mock_subsys.assert_called_once_with(mock_nqn.return_value, mock.sentinel.volume_path, mock_uuid.return_value) mock_port.assert_called_once_with(mock_nqn.return_value, self.target.target_ips, self.target.target_port, self.target.nvme_transport_type, self.target.nvmet_port_id) mock_lock.assert_called() @ddt.data((ValueError, None), (None, IndexError)) @ddt.unpack @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(nvmet.NVMET, '_get_nvme_uuid') @mock.patch.object(nvmet.NVMET, '_ensure_port_exports') @mock.patch.object(nvmet.NVMET, '_ensure_subsystem_exists') @mock.patch.object(nvmet.NVMET, '_get_target_nqn') def test__map_volume_error(self, subsys_effect, port_effect, mock_nqn, mock_subsys, mock_port, mock_uuid, mock_lock): """Failing create target executing subsystem or port creation.""" mock_subsys.side_effect = subsys_effect mock_port.side_effect = port_effect mock_nqn.return_value = mock.sentinel.nqn mock_uuid.return_value = mock.sentinel.uuid vol = mock.Mock() self.assertRaises(nvmet.NVMETTargetAddError, self.target._map_volume, vol, mock.sentinel.volume_path, mock.sentinel.connector) mock_nqn.assert_called_once_with(vol.id, mock.sentinel.connector) mock_uuid.assert_called_once_with(vol) mock_subsys.assert_called_once_with(mock.sentinel.nqn, mock.sentinel.volume_path, mock.sentinel.uuid) if subsys_effect: mock_port.assert_not_called() else: mock_port.assert_called_once_with(mock.sentinel.nqn, self.target.target_ips, self.target.target_port, self.target.nvme_transport_type, self.target.nvmet_port_id) mock_lock.assert_called() @mock.patch.object(nvmet.NVMET, '_ensure_namespace_exists') @mock.patch.object(priv_nvmet, 'Subsystem') def test__ensure_subsystem_exists_already_exists(self, mock_subsys, mock_namespace): """Skip subsystem creation if already exists.""" nqn = 'nqn.nvme-subsystem-1-uuid' res = self.target._ensure_subsystem_exists(nqn, mock.sentinel.vol_path, mock.sentinel.uuid) self.assertEqual(mock_namespace.return_value, res) mock_subsys.assert_called_once_with(nqn) mock_subsys.setup.assert_not_called() mock_namespace.assert_called_once_with(mock_subsys.return_value, mock.sentinel.vol_path, mock.sentinel.uuid) @mock.patch.object(nvmet.NVMET, '_ensure_namespace_exists') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch.object(priv_nvmet, 'Subsystem') def test__ensure_subsystem_exists(self, mock_subsys, mock_uuid, mock_namespace): """Create subsystem when it doesn't exist.""" mock_subsys.side_effect = priv_nvmet.NotFound mock_uuid.return_value = 'uuid' nqn = 'nqn.nvme-subsystem-1-uuid' self.target._ensure_subsystem_exists(nqn, mock.sentinel.vol_path, mock.sentinel.uuid) mock_subsys.assert_called_once_with(nqn) expected_section = { 'allowed_hosts': [], 'attr': {'allow_any_host': '1'}, 'namespaces': [{'device': {'nguid': 'uuid', 'uuid': mock.sentinel.uuid, 'path': mock.sentinel.vol_path}, 'enable': 1, 'nsid': self.target.nvmet_ns_id}], 'nqn': nqn } mock_subsys.setup.assert_called_once_with(expected_section) mock_namespace.assert_not_called() @mock.patch('oslo_utils.uuidutils.generate_uuid') def test__namespace_dict(self, mock_uuid): """For not shared nguid is randomly generated.""" res = self.target._namespace_dict(mock.sentinel.uuid, mock.sentinel.volume_path, mock.sentinel.ns_id) expected = {"device": {"nguid": str(mock_uuid.return_value), "uuid": mock.sentinel.uuid, "path": mock.sentinel.volume_path}, "enable": 1, "nsid": mock.sentinel.ns_id} self.assertEqual(expected, res) mock_uuid.assert_called_once() @mock.patch('oslo_utils.uuidutils.generate_uuid') def test__namespace_dict_shared(self, mock_uuid): """For shared uuid = nguid.""" self.mock_object(self.target, 'share_targets', True) res = self.target._namespace_dict(mock.sentinel.uuid, mock.sentinel.volume_path, mock.sentinel.ns_id) expected = {"device": {"nguid": mock.sentinel.uuid, "uuid": mock.sentinel.uuid, "path": mock.sentinel.volume_path}, "enable": 1, "nsid": mock.sentinel.ns_id} self.assertEqual(expected, res) mock_uuid.assert_not_called def test__ensure_namespace_exist_exists(self): """Nothing to do if the namespace is already mapped.""" base_path = '/dev/stack-volumes-lvmdriver-1/volume-' volume_path = f'{base_path}uuid2' subsys = mock.Mock() ns_other = mock.Mock(**{'get_attr.return_value': f'{base_path}uuid1'}) ns_found = mock.Mock(**{'get_attr.return_value': volume_path}) # nw_other appears twice to confirm we stop when found subsys.namespaces = [ns_other, ns_found, ns_other] res = self.target._ensure_namespace_exists(subsys, volume_path, mock.sentinel.uuid) self.assertEqual(ns_found.nsid, res) ns_other.get_attr.assert_called_once_with('device', 'path') ns_found.get_attr.assert_called_once_with('device', 'path') @mock.patch.object(priv_nvmet, 'Namespace') @mock.patch.object(nvmet.NVMET, '_namespace_dict') @mock.patch.object(nvmet.NVMET, '_get_available_namespace_id') def test__ensure_namespace_exist_create(self, mock_get_nsid, mock_ns_dict, mock_ns): """Create the namespace when the path is not mapped yet.""" base_path = '/dev/stack-volumes-lvmdriver-1/volume-' subsys = mock.Mock() ns_other = mock.Mock(**{'get_attr.return_value': f'{base_path}uuid1'}) subsys.namespaces = [ns_other] res = self.target._ensure_namespace_exists(subsys, mock.sentinel.volume_path, mock.sentinel.uuid) self.assertEqual(mock_get_nsid.return_value, res) ns_other.get_attr.assert_called_once_with('device', 'path') mock_get_nsid.assert_called_once_with(subsys) mock_ns_dict.assert_called_once_with(mock.sentinel.uuid, mock.sentinel.volume_path, mock_get_nsid.return_value) mock_ns.setup.assert_called_once_with(subsys, mock_ns_dict.return_value) def test__get_available_namespace_id(self): """For non shared we always return the value from the config.""" res = self.target._get_available_namespace_id(mock.Mock()) self.assertEqual(self.target.nvmet_ns_id, res) def test__get_available_namespace_id_none_used(self): """For shared, on empty subsystem return the configured value.""" self.mock_object(self.target, 'share_targets', True) subsys = mock.Mock(namespaces=[]) res = self.target._get_available_namespace_id(subsys) self.assertEqual(self.target.nvmet_ns_id, res) def test__get_available_namespace_id_no_gaps(self): """For shared, if there are no gaps in ids return next.""" self.mock_object(self.target, 'share_targets', True) expected = self.target.nvmet_ns_id + 2 subsys = mock.Mock(namespaces=[mock.Mock(nsid=expected - 1), mock.Mock(nsid=expected - 2)]) res = self.target._get_available_namespace_id(subsys) self.assertEqual(expected, res) def test__get_available_namespace_id_gap_value(self): """For shared, if there is a gap any of them is valid.""" self.mock_object(self.target, 'share_targets', True) lower = self.target.nvmet_ns_id subsys = mock.Mock(namespaces=[mock.Mock(nsid=lower + 3), mock.Mock(nsid=lower)]) res = self.target._get_available_namespace_id(subsys) self.assertTrue(res in [lower + 2, lower + 1]) @mock.patch.object(priv_nvmet, 'Port') def test__ensure_port_exports_already_does(self, mock_port): """Skips port creation and subsystem export since they both exist.""" nqn = 'nqn.nvme-subsystem-1-uuid' port_id = 1 mock_port.return_value.subsystems = [nqn] self.target._ensure_port_exports(nqn, [mock.sentinel.addr], mock.sentinel.port, mock.sentinel.transport, port_id) mock_port.assert_called_once_with(port_id) mock_port.setup.assert_not_called() mock_port.return_value.add_subsystem.assert_not_called() @mock.patch.object(priv_nvmet, 'Port') def test__ensure_port_exports_port_exists_not_exported(self, mock_port): """Skips port creation if exists but exports subsystem.""" nqn = 'nqn.nvme-subsystem-1-vol-2-uuid' port_id = 1 mock_port.return_value.subsystems = ['nqn.nvme-subsystem-1-vol-1-uuid'] self.target._ensure_port_exports(nqn, [mock.sentinel.addr], mock.sentinel.port, mock.sentinel.transport, port_id) mock_port.assert_called_once_with(port_id) mock_port.setup.assert_not_called() mock_port.return_value.add_subsystem.assert_called_once_with(nqn) @mock.patch.object(priv_nvmet, 'Port') def test__ensure_port_exports_port(self, mock_port): """Creates the port and export the subsystem when they don't exist.""" nqn = 'nqn.nvme-subsystem-1-vol-2-uuid' port_id = 1 mock_port.side_effect = priv_nvmet.NotFound self.target._ensure_port_exports(nqn, [mock.sentinel.addr, mock.sentinel.addr2], mock.sentinel.port, mock.sentinel.transport, port_id) new_port1 = {'addr': {'adrfam': 'ipv4', 'traddr': mock.sentinel.addr, 'treq': 'not specified', 'trsvcid': mock.sentinel.port, 'trtype': mock.sentinel.transport}, 'portid': port_id, 'referrals': [], 'subsystems': [nqn]} new_port2 = new_port1.copy() new_port2['portid'] = port_id + 1 new_port2['addr'] = new_port1['addr'].copy() new_port2['addr']['traddr'] = mock.sentinel.addr2 self.assertEqual(2, mock_port.call_count) self.assertEqual(2, mock_port.setup.call_count) mock_port.assert_has_calls([ mock.call(port_id), mock.call.setup(self.target._nvmet_root, new_port1), mock.call(port_id + 1), mock.call.setup(self.target._nvmet_root, new_port2) ]) mock_port.return_value.assert_not_called() @mock.patch.object(nvmet.NVMET, '_locked_unmap_volume') def test_terminate_connection(self, mock_unmap): """For non shared there's nothing to do.""" self.target.terminate_connection(mock.sentinel.vol, mock.sentinel.connector) mock_unmap.assert_not_called() @mock.patch.object(nvmet.NVMET, '_locked_unmap_volume') def test_terminate_connection_shared(self, mock_unmap): """For shared the volume must be unmapped.""" self.mock_object(self.target, 'share_targets', True) vol = mock.Mock() self.target.terminate_connection(vol, mock.sentinel.connector) mock_unmap.assert_called_once_with(vol, mock.sentinel.connector) @mock.patch.object(nvmet.NVMET, '_locked_unmap_volume') def test_remove_export(self, mock_unmap): """For non shared the volume must be unmapped.""" vol = mock.Mock() self.target.remove_export(mock.sentinel.context, vol) mock_unmap.assert_called_once_with(vol) @mock.patch.object(nvmet.NVMET, '_locked_unmap_volume') def test_remove_export_shared(self, mock_unmap): """For shared there's nothing to do.""" self.mock_object(self.target, 'share_targets', True) self.target.remove_export(mock.sentinel.context, mock.sentinel.vol) mock_unmap.assert_not_called() @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(nvmet.NVMET, '_get_nqns_for_location', return_value=[]) @mock.patch.object(nvmet.NVMET, '_get_target_nqn') @mock.patch.object(nvmet.NVMET, '_unmap_volume') def test__locked_unmap_volume_no_nqn(self, mock_unmap, mock_nqn, mock_nqns, mock_lock): """Nothing to do with no subsystem when sharing and no connector.""" self.mock_object(self.target, 'share_targets', True) vol = mock.Mock() self.target._locked_unmap_volume(vol, connector=None) mock_lock.assert_called() mock_nqn.assert_not_called() mock_nqns.assert_called_once_with(vol.provider_location) mock_unmap.assert_not_called() @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(nvmet.NVMET, '_get_nqns_for_location') @mock.patch.object(nvmet.NVMET, '_get_target_nqn') @mock.patch.object(nvmet.NVMET, '_unmap_volume') def test__locked_unmap_volume_non_shared(self, mock_unmap, mock_nqn, mock_nqns, mock_lock): """Unmap locked with non sharing and no connector.""" vol = mock.Mock() self.target._locked_unmap_volume(vol, connector=None) mock_lock.assert_called() mock_nqn.assert_called_once_with(vol.id, None) mock_nqns.assert_not_called() mock_unmap.assert_called_once_with(vol, mock_nqn.return_value) @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(nvmet.NVMET, '_get_nqns_for_location') @mock.patch.object(nvmet.NVMET, '_get_target_nqn') @mock.patch.object(nvmet.NVMET, '_unmap_volume') def test__locked_unmap_volume_shared_multiple(self, mock_unmap, mock_nqn, mock_nqns, mock_lock): """Unmap locked with sharing and no connector, having multiple nqns.""" self.mock_object(self.target, 'share_targets', True) vol = mock.Mock() mock_nqns.return_value = [mock.sentinel.nqn1, mock.sentinel.nqn2] self.target._locked_unmap_volume(vol, connector=None) mock_lock.assert_called() mock_nqn.assert_not_called() mock_nqns.assert_called_once_with(vol.provider_location) expected = [mock.call(vol, mock.sentinel.nqn1), mock.call(vol, mock.sentinel.nqn2)] mock_unmap.assert_has_calls(expected) self.assertEqual(2, mock_unmap.call_count) @mock.patch.object(nvmet.NVMET, '_get_target_nqn') @mock.patch.object(priv_nvmet, 'Subsystem') def test__unmap_volume_no_subsys(self, mock_subsys, mock_nqn): """Nothing to do it there is no subsystem.""" mock_subsys.side_effect = priv_nvmet.NotFound vol = mock.Mock() # This port is used just to confirm we don't reach that part port = mock.Mock(subsystems=[mock.sentinel.port]) self.mock_object(priv_nvmet.Root, 'ports', [port]) self.target._unmap_volume(vol, mock.sentinel.nqn) mock_subsys.assert_called_once_with(mock.sentinel.nqn) port.remove_subsystem.assert_not_called() @mock.patch.object(priv_nvmet, 'Subsystem') def test__unmap_volume_not_shared(self, mock_subsys): """Non shared assumes the subsystem is empty.""" vol = mock.Mock() # The ns is used to confirm we don't check it ns = mock.Mock(**{'get_attr.return_value': vol.provider_location}) subsys = mock_subsys.return_value subsys.nqn = mock.sentinel.nqn subsys.namespaces = [ns] port = mock.Mock(subsystems=[subsys.nqn]) self.mock_object(priv_nvmet.Root, 'ports', [port]) self.target._unmap_volume(vol, mock.sentinel.nqn) mock_subsys.assert_called_once_with(mock.sentinel.nqn) ns.get_attr.assert_not_called() ns.delete.assert_not_called() port.remove_subsystem.assert_called_once_with(mock.sentinel.nqn) subsys.delete.assert_called_once_with() @mock.patch.object(priv_nvmet, 'Subsystem') def test__unmap_volume_shared_more_ns(self, mock_subsys): """For shared don't unexport subsys if there are other ns.""" self.mock_object(self.target, 'share_targets', True) vol = mock.Mock() ns = mock.Mock(**{'get_attr.return_value': vol.provider_location}) subsys = mock_subsys.return_value subsys.namespaces = [ns] # Use this port to confirm we don't reach that point port = mock.Mock(subsystems=[subsys]) self.mock_object(priv_nvmet.Root, 'ports', [port]) self.target._unmap_volume(vol, mock.sentinel.nqn) mock_subsys.assert_called_once_with(mock.sentinel.nqn) ns.get_attr.assert_called_once_with('device', 'path') ns.delete.assert_called_once_with() port.remove_subsystem.assert_not_called() mock_subsys.return_value.delete.assert_not_called() @mock.patch('oslo_concurrency.lockutils.lock') @mock.patch.object(nvmet.NVMET, '_get_target_nqn') @mock.patch.object(priv_nvmet, 'Subsystem') def test__unmap_volume_shared_last_ns(self, mock_subsys, mock_nqn, mock_lock): """For shared unexport subsys if there are no other ns.""" self.mock_object(self.target, 'share_targets', True) vol = mock.Mock() ns = mock.Mock(**{'get_attr.return_value': vol.provider_location}) nss = [ns] ns.delete.side_effect = nss.clear subsys = mock_subsys.return_value subsys.nqn = mock.sentinel.nqn subsys.namespaces = nss port = mock.Mock(subsystems=[subsys.nqn]) self.mock_object(priv_nvmet.Root, 'ports', [port]) self.target._unmap_volume(vol, mock.sentinel.nqn) mock_subsys.assert_called_once_with(mock.sentinel.nqn) ns.get_attr.assert_called_once_with('device', 'path') ns.delete.assert_called_once_with() port.remove_subsystem.assert_called_once_with(mock.sentinel.nqn) mock_subsys.return_value.delete.assert_called_once_with() def test__get_target_nqn(self): """Non shared uses volume id for subsystem name.""" res = self.target._get_target_nqn('volume_id', None) self.assertEqual('nqn.nvme-subsystem-1-volume_id', res) def test__get_target_nqn_shared(self): """Shared uses connector's hostname for subsystem name.""" self.mock_object(self.target, 'share_targets', True) res = self.target._get_target_nqn('volume_id', {'host': 'localhost'}) self.assertEqual('nqn.nvme-subsystem-1-localhost', res) def test__get_nvme_uuid(self): vol = mock.Mock() res = self.target._get_nvme_uuid(vol) self.assertEqual(vol.name_id, res) def test__get_nqns_for_location_no_subsystems(self): self.mock_object(self.target._nvmet_root, 'subsystems', iter([])) res = self.target._get_nqns_for_location(mock.sentinel.location) self.assertListEqual([], res) def test__get_nqns_for_location_no_subsystems_found(self): ns1 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location1}) subsys1 = mock.Mock(namespaces=iter([ns1])) ns2 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location2}) subsys2 = mock.Mock(namespaces=iter([ns2])) subsys = iter([subsys1, subsys2]) self.mock_object(self.target._nvmet_root, 'subsystems', subsys) res = self.target._get_nqns_for_location(mock.sentinel.location3) self.assertListEqual([], res) ns1.get_attr.assert_called_once_with('device', 'path') ns2.get_attr.assert_called_once_with('device', 'path') def test__get_nqns_for_location_subsystems_found(self): ns1 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location1}) subsys1 = mock.Mock(namespaces=iter([ns1])) ns2 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location2}) ns1b = mock.Mock(**{'get_attr.return_value': mock.sentinel.location1}) ns3 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location3}) subsys2 = mock.Mock(namespaces=iter([ns2, ns1b, ns3])) ns4 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location4}) subsys3 = mock.Mock(namespaces=iter([ns4])) subsys4 = mock.Mock(namespaces=iter([])) subsys = iter([subsys1, subsys2, subsys3, subsys4]) self.mock_object(self.target._nvmet_root, 'subsystems', subsys) res = self.target._get_nqns_for_location(mock.sentinel.location1) self.assertListEqual([subsys1.nqn, subsys2.nqn, subsys4.nqn], res) ns1.get_attr.assert_called_once_with('device', 'path') ns2.get_attr.assert_called_once_with('device', 'path') ns1b.get_attr.assert_called_once_with('device', 'path') ns3.get_attr.assert_not_called() ns4.get_attr.assert_called_once_with('device', 'path') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/test_scst_driver.py0000664000175000017500000002613100000000000024070 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import context from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import scst from cinder.volume import volume_utils class TestSCSTAdmDriver(tf.TargetDriverFixture): def setUp(self): super(TestSCSTAdmDriver, self).setUp() self.target = scst.SCSTAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.fake_iscsi_scan = \ ('Collecting current configuration: done.\n' 'Driver Target\n' '----------------------------------------------\n' 'iscsi iqn.2010-10.org.openstack:' 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba\n' 'All done.\n') self.fake_iscsi_attribute_scan = \ ('Collecting current configuration: done.\n' 'Attribute Value Writable KEY\n' '------------------------------------------\n' 'rel_tgt_id 1 Yes Yes\n' 'Dynamic attributes available\n' '----------------------------\n' 'IncomingUser\n' 'OutgoingUser\n' 'allowed_portal\n' 'LUN CREATE attributes available\n' '-------------------------------\n' 'read_only\n' 'All done.\n') self.fake_list_group = \ ('org.openstack:volume-vedams\n' 'Collecting current configuration: done.\n' 'Driver: iscsi\n' 'Target: iqn.2010-10.org.openstack:volume-vedams\n' 'Driver/target \'iscsi/iqn.2010-10.org.openstack:volume-vedams\'' 'has no associated LUNs.\n' 'Group: iqn.1993-08.org.debian:01:626bf14ebdc\n' 'Assigned LUNs:\n' 'LUN Device\n' '------------------\n' '1 1b67387810256\n' '2 2a0f1cc9cd595\n' 'Assigned Initiators:\n' 'Initiator\n' '-------------------------------------\n' 'iqn.1993-08.org.debian:01:626bf14ebdc\n' 'All done.\n') self.target.db = mock.MagicMock( volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'}) @mock.patch.object(utils, 'execute') @mock.patch.object(scst.SCSTAdm, '_target_attribute') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_get_target(self, mock_execute, mock_target_attribute, mock_scst_execute): mock_target_attribute.return_value = 1 mock_execute.return_value = (self.fake_iscsi_scan, None) expected = 1 self.assertEqual(expected, self.target._get_target( 'iqn.2010-10.org.openstack:' 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba')) @mock.patch('cinder.privsep.targets.scst.run_scstadmin') def test_target_attribute(self, mock_privsep): mock_privsep.return_value = (self.fake_iscsi_attribute_scan, None) self.assertEqual(str(1), self.target._target_attribute( 'iqn.2010-10.org.openstack:' 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba')) def test_single_lun_get_target_and_lun(self): ctxt = context.get_admin_context() self.assertEqual((0, 1), self.target._get_target_and_lun( ctxt, self.testvol)) @mock.patch.object(utils, 'execute') @mock.patch.object(scst.SCSTAdm, '_get_group') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_multi_lun_get_target_and_lun(self, mock_execute, mock_get_group, mock_scst_execute): mock_execute.return_value = (self.fake_list_group, None) mock_get_group.return_value = self.fake_list_group ctxt = context.get_admin_context() with mock.patch.object(self.target, 'target_name', return_value='iqn.2010-10.org.openstack:' 'volume-vedams'): self.assertEqual((0, 3), self.target._get_target_and_lun( ctxt, self.testvol)) @mock.patch.object(utils, 'execute') @mock.patch.object(scst.SCSTAdm, '_get_target') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_create_iscsi_target(self, mock_execute, mock_get_target, mock_scst_execute): mock_execute.return_value = (None, None) mock_get_target.return_value = 1 self.assertEqual(1, self.target.create_iscsi_target( 'iqn.2010-10.org.openstack:' 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba', 'vol1', 0, 1, self.fake_volumes_dir)) @mock.patch.object(utils, 'execute') @mock.patch.object(scst.SCSTAdm, '_get_target') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_create_export(self, mock_execute, mock_get_target, mock_scst_execute): mock_execute.return_value = (None, None) mock_scst_execute.return_value = (None, None) mock_get_target.return_value = 1 def _fake_get_target_and_lun(*args, **kwargs): return 0, 1 def _fake_iscsi_location(*args, **kwargs): return '10.9.8.7:3260,1 iqn.2010-10.org.openstack:' \ 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba 1' def _fake_get_target_chap_auth(*args, **kwargs): return ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ') ctxt = context.get_admin_context() expected_result = {'location': '10.9.8.7:3260,1 ' 'iqn.2010-10.org.openstack:' 'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba 1', 'auth': 'CHAP ' 'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'} with mock.patch.object(self.target, '_get_target_and_lun', side_effect=_fake_get_target_and_lun), \ mock.patch.object(self.target, '_get_target_chap_auth', side_effect=_fake_get_target_chap_auth), \ mock.patch.object(self.target, 'initiator_iqn', return_value='iqn.1993-08.org.debian:' '01:626bf14ebdc'), \ mock.patch.object(self.target, '_iscsi_location', side_effect=_fake_iscsi_location), \ mock.patch.object(self.target, 'target_driver', return_value='iscsi'), \ mock.patch.object(volume_utils, 'generate_username', side_effect= lambda: 'QZJbisGmn9AL954FNF4D'), \ mock.patch.object(volume_utils, 'generate_password', side_effect=lambda: 'P68eE7u9eFqDGexd28DQ'): self.assertEqual(expected_result, self.target.create_export(ctxt, self.testvol, self.fake_volumes_dir)) @mock.patch('cinder.utils.execute') @mock.patch.object(scst.SCSTAdm, '_get_target') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_ensure_export(self, mock_execute, mock_get_target, mock_scst_execute): mock_execute.return_value = (None, None) mock_scst_execute.return_value = (None, None) mock_get_target.return_value = 1 ctxt = context.get_admin_context() def _fake_get_target_and_lun(*args, **kwargs): return 0, 1 def _fake_get_target_chap_auth(*args, **kwargs): return ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ') with mock.patch.object(self.target, 'create_iscsi_target'), \ mock.patch.object(self.target, '_get_target_chap_auth', side_effect=_fake_get_target_chap_auth), \ mock.patch.object(self.target, '_get_target_and_lun', side_effect=_fake_get_target_and_lun): self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) self.target.create_iscsi_target.assert_called_once_with( 'iqn.2010-10.org.openstack:testvol', 'ed2c2222-5fc0-11e4-aa15-123b93f75cba', 0, 1, self.fake_volumes_dir, _fake_get_target_chap_auth()) @mock.patch('cinder.utils.execute') @mock.patch.object(scst.SCSTAdm, '_get_target') @mock.patch.object(scst.SCSTAdm, 'scst_execute') def test_ensure_export_chap(self, mock_execute, mock_get_target, mock_scst_execute): mock_execute.return_value = (None, None) mock_scst_execute.return_value = (None, None) mock_get_target.return_value = 1 ctxt = context.get_admin_context() def _fake_get_target_and_lun(*args, **kwargs): return 0, 1 def _fake_get_target_chap_auth(*args, **kwargs): return None with mock.patch.object(self.target, 'create_iscsi_target'), \ mock.patch.object(self.target, '_get_target_chap_auth', side_effect=_fake_get_target_chap_auth), \ mock.patch.object(self.target, '_get_target_and_lun', side_effect=_fake_get_target_and_lun): self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) self.target.create_iscsi_target.assert_called_once_with( 'iqn.2010-10.org.openstack:testvol', 'ed2c2222-5fc0-11e4-aa15-123b93f75cba', 0, 1, self.fake_volumes_dir, None) def test_iscsi_location(self): location = self.target._iscsi_location('portal', 1, 'target', 2) self.assertEqual('portal:3260,1 target 2', location) def test_iscsi_location_IPv6(self): ip = 'fd00:fd00:fd00:3000::12' location = self.target._iscsi_location(ip, 1, 'target', 2) self.assertEqual('[%s]:3260,1 target 2' % ip, location) ip = '[' + ip + ']' location = self.target._iscsi_location(ip, 1, 'target', 2) self.assertEqual(ip + ':3260,1 target 2', location) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/test_spdknvmf.py0000664000175000017500000003426000000000000023373 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import json from unittest import mock import requests from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.targets import spdknvmf as spdknvmf_driver BDEVS = [{ "num_blocks": 4096000, "name": "Nvme0n1", "driver_specific": { "nvme": { "trid": { "trtype": "PCIe", "traddr": "0000:00:04.0" }, "ns_data": { "id": 1 }, "pci_address": "0000:00:04.0", "vs": { "nvme_version": "1.1" }, "ctrlr_data": { "firmware_revision": "1.0", "serial_number": "deadbeef", "oacs": { "ns_manage": 0, "security": 0, "firmware": 0, "format": 0 }, "vendor_id": "0x8086", "model_number": "QEMU NVMe Ctrl" }, "csts": { "rdy": 1, "cfs": 0 } } }, "supported_io_types": { "reset": True, "nvme_admin": True, "unmap": False, "read": True, "write_zeroes": False, "write": True, "flush": True, "nvme_io": True }, "claimed": False, "block_size": 512, "product_name": "NVMe disk", "aliases": ["Nvme0n1"] }, { "num_blocks": 8192, "uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d", "aliases": [ "Nvme0n1p0" ], "driver_specific": { "lvol": { "base_bdev": "Nvme0n1", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": False } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Split Disk", "name": "Nvme0n1p0" }, { "num_blocks": 8192, "uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d", "aliases": [ "Nvme0n1p1" ], "driver_specific": { "lvol": { "base_bdev": "Nvme0n1", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": False } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Split Disk", "name": "Nvme0n1p1" }, { "num_blocks": 8192, "uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d", "aliases": [ "lvs_test/lvol0" ], "driver_specific": { "lvol": { "base_bdev": "Malloc0", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": False } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Logical Volume", "name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297" }, { "num_blocks": 8192, "uuid": "8dec1964-d533-41df-bea7-40520efdb416", "aliases": [ "lvs_test/lvol1" ], "driver_specific": { "lvol": { "base_bdev": "Malloc0", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": True } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Logical Volume", "name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967298" }] NVMF_SUBSYSTEMS = [{ "listen_addresses": [], "subtype": "Discovery", "nqn": "nqn.2014-08.org.nvmexpress.discovery", "hosts": [], "allow_any_host": True }, { "listen_addresses": [], "subtype": "NVMe", "hosts": [{ "nqn": "nqn.2016-06.io.spdk:init" }], "namespaces": [{ "bdev_name": "Nvme0n1p0", "nsid": 1, "name": "Nvme0n1p0" }], "allow_any_host": False, "serial_number": "SPDK00000000000001", "nqn": "nqn.2016-06.io.spdk:cnode1" }, { "listen_addresses": [], "subtype": "NVMe", "hosts": [], "namespaces": [{ "bdev_name": "Nvme1n1p0", "nsid": 1, "name": "Nvme1n1p0" }], "allow_any_host": True, "serial_number": "SPDK00000000000002", "nqn": "nqn.2016-06.io.spdk:cnode2" }] class JSONRPCException(Exception): def __init__(self, message): self.message = message class JSONRPCClient(object): def __init__(self, addr=None, port=None): self.methods = {"bdev_get_bdevs": self.get_bdevs, "construct_nvmf_subsystem": self.construct_nvmf_subsystem, "nvmf_delete_subsystem": self.delete_nvmf_subsystem, "nvmf_create_subsystem": self.nvmf_subsystem_create, "nvmf_subsystem_add_listener": self.nvmf_subsystem_add_listener, "nvmf_subsystem_add_ns": self.nvmf_subsystem_add_ns, "nvmf_get_subsystems": self.get_nvmf_subsystems} self.bdevs = copy.deepcopy(BDEVS) self.nvmf_subsystems = copy.deepcopy(NVMF_SUBSYSTEMS) def __del__(self): pass def get_bdevs(self, params=None): if params and 'name' in params: for bdev in self.bdevs: for alias in bdev['aliases']: if params['name'] in alias: return json.dumps({"result": [bdev]}) if bdev['name'] == params['name']: return json.dumps({"result": [bdev]}) return json.dumps({"error": "Not found"}) return json.dumps({"result": self.bdevs}) def get_nvmf_subsystems(self, params=None): return json.dumps({"result": self.nvmf_subsystems}) def construct_nvmf_subsystem(self, params=None): nvmf_subsystem = { "listen_addresses": [], "subtype": "NVMe", "hosts": [], "namespaces": [{ "bdev_name": "Nvme1n1p0", "nsid": 1, "name": "Nvme1n1p0" }], "allow_any_host": True, "serial_number": params['serial_number'], "nqn": params['nqn'] } self.nvmf_subsystems.append(nvmf_subsystem) return json.dumps({"result": nvmf_subsystem}) def delete_nvmf_subsystem(self, params=None): found_id = -1 i = 0 for nvmf_subsystem in self.nvmf_subsystems: if nvmf_subsystem['nqn'] == params['nqn']: found_id = i i += 1 if found_id != -1: del self.nvmf_subsystems[found_id] return json.dumps({"result": {}}) def nvmf_subsystem_create(self, params=None): nvmf_subsystem = { "namespaces": [], "nqn": params['nqn'], "serial_number": "S0000000000000000001", "allow_any_host": False, "subtype": "NVMe", "hosts": [], "listen_addresses": [] } self.nvmf_subsystems.append(nvmf_subsystem) return json.dumps({"result": nvmf_subsystem}) def nvmf_subsystem_add_listener(self, params=None): for nvmf_subsystem in self.nvmf_subsystems: if nvmf_subsystem['nqn'] == params['nqn']: nvmf_subsystem['listen_addresses'].append( params['listen_address'] ) return json.dumps({"result": ""}) def nvmf_subsystem_add_ns(self, params=None): for nvmf_subsystem in self.nvmf_subsystems: if nvmf_subsystem['nqn'] == params['nqn']: nvmf_subsystem['namespaces'].append( params['namespace'] ) return json.dumps({"result": ""}) def call(self, method, params=None): req = {} req['jsonrpc'] = '2.0' req['method'] = method req['id'] = 1 if (params): req['params'] = params response = json.loads(self.methods[method](params)) if not response: return {} if 'error' in response: msg = "\n".join(["Got JSON-RPC error response", "request:", json.dumps(req, indent=2), "response:", json.dumps(response['error'], indent=2)]) raise JSONRPCException(msg) return response['result'] class Target(object): def __init__(self, name="Nvme0n1p0"): self.name = name class SpdkNvmfDriverTestCase(test.TestCase): def setUp(self): super(SpdkNvmfDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.target_ip_address = '192.168.0.1' self.configuration.target_secondary_ip_addresses = [] self.configuration.target_port = '4420' self.configuration.target_prefix = "" self.configuration.nvmet_port_id = "1" self.configuration.nvmet_ns_id = "fake_id" self.configuration.nvmet_subsystem_name = "nqn.2014-08.io.spdk" self.configuration.target_protocol = "nvmet_rdma" self.configuration.spdk_rpc_ip = "127.0.0.1" self.configuration.spdk_rpc_port = 8000 self.configuration.spdk_rpc_protocol = "https" self.configuration.spdk_rpc_username = "user" self.configuration.spdk_rpc_password = "password" self.configuration.driver_ssl_cert_verify = False self.driver = spdknvmf_driver.SpdkNvmf(configuration= self.configuration) self.jsonrpcclient = JSONRPCClient() def get_item(self): return {'result': 'test_result'} def test__rpc_parameters(self): url = ('%(protocol)s://%(ip)s:%(port)s/' % {'protocol': self.configuration.spdk_rpc_protocol, 'ip': self.configuration.spdk_rpc_ip, 'port': self.configuration.spdk_rpc_port}) auth = (self.configuration.spdk_rpc_username, self.configuration.spdk_rpc_password) verify = self.configuration.driver_ssl_cert_verify requests.post = mock.MagicMock() setattr(requests.post.service.__getitem__, 'side_effect', self.get_item) self.driver._rpc_call("bdev_get_bdevs") requests.post.assert_called_once_with(url, auth=auth, data=mock.ANY, timeout=mock.ANY, verify=verify) def test__get_spdk_volume_name(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): bdevs = self.driver._rpc_call("bdev_get_bdevs") bdev_name = bdevs[0]['name'] volume_name = self.driver._get_spdk_volume_name(bdev_name) self.assertEqual(bdev_name, volume_name) volume_name = self.driver._get_spdk_volume_name("fake") self.assertIsNone(volume_name) def test__get_nqn_with_volume_name(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): nqn = self.driver._get_nqn_with_volume_name("Nvme0n1p0") nqn_tmp = self.driver._rpc_call("nvmf_get_subsystems")[1]['nqn'] self.assertEqual(nqn, nqn_tmp) nqn = self.driver._get_nqn_with_volume_name("fake") self.assertIsNone(nqn) def test__get_first_free_node(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): free_node = self.driver._get_first_free_node() self.assertEqual(3, free_node) def test_create_nvmeof_target(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): subsystems_first = self.driver._rpc_call("nvmf_get_subsystems") self.driver.create_nvmeof_target("Nvme0n1p1", "nqn.2016-06.io.spdk", "192.168.0.1", 4420, "rdma", -1, -1, "") subsystems_last = self.driver._rpc_call("nvmf_get_subsystems") self.assertEqual(len(subsystems_first) + 1, len(subsystems_last)) def test_delete_nvmeof_target(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): subsystems_first = self.driver._rpc_call("nvmf_get_subsystems") target = Target() self.driver.delete_nvmeof_target(target) subsystems_last = self.driver._rpc_call("nvmf_get_subsystems") self.assertEqual(len(subsystems_first) - 1, len(subsystems_last)) target.name = "fake" self.driver.delete_nvmeof_target(target) self.assertEqual(len(subsystems_first) - 1, len(subsystems_last)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/targets/test_tgt_driver.py0000664000175000017500000004252000000000000023712 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import time from unittest import mock from oslo_concurrency import processutils as putils from cinder import context from cinder import exception from cinder.tests.unit.targets import targets_fixture as tf from cinder.tests.unit import test from cinder import utils from cinder.volume.targets import tgt from cinder.volume import volume_utils class TestTgtAdmDriver(tf.TargetDriverFixture): def setUp(self): super(TestTgtAdmDriver, self).setUp() self.configuration.get = mock.Mock(side_effect=self.fake_get) self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.testvol_path = \ '/dev/stack-volumes-lvmdriver-1/%s' % self.VOLUME_NAME self.fake_iscsi_scan =\ ('Target 1: %(test_vol)s\n' ' System information:\n' ' Driver: iscsi\n' ' State: ready\n' ' I_T nexus information:\n' ' LUN information:\n' ' LUN: 0\n' ' Type: controller\n' ' SCSI ID: IET 00010000\n' ' SCSI SN: beaf10\n' ' Size: 0 MB, Block size: 1\n' ' Online: Yes\n' ' Removable media: No\n' ' Prevent removal: No\n' ' Readonly: No\n' ' SWP: No\n' ' Thin-provisioning: No\n' ' Backing store type: null\n' ' Backing store path: None\n' ' Backing store flags:\n' ' LUN: 1\n' ' Type: disk\n' ' SCSI ID: IET 00010001\n' ' SCSI SN: beaf11\n' ' Size: 1074 MB, Block size: 512\n' ' Online: Yes\n' ' Removable media: No\n' ' Prevent removal: No\n' ' Readonly: No\n' ' SWP: No\n' ' Thin-provisioning: No\n' ' Backing store type: rdwr\n' ' Backing store path: %(bspath)s\n' ' Backing store flags:\n' ' Account information:\n' ' mDVpzk8cZesdahJC9h73\n' ' ACL information:\n' ' ALL"\n' % {'test_vol': self.test_vol, 'bspath': self.testvol_path}) self.patch('time.sleep') def fake_get(self, value, default): if value in ('iscsi_target_flags', 'iscsi_write_cache'): return getattr(self, value, default) def test_iscsi_protocol(self): self.assertEqual('iscsi', self.target.iscsi_protocol) def test_get_target(self): with mock.patch('cinder.privsep.targets.tgt.tgtadmin_show', return_value=(self.fake_iscsi_scan, None)): iqn = self.test_vol self.assertEqual('1', self.target._get_target(iqn)) def test_verify_backing_lun(self): iqn = self.test_vol with mock.patch('cinder.privsep.targets.tgt.tgtadmin_show', return_value=(self.fake_iscsi_scan, None)): self.assertTrue(self.target._verify_backing_lun(iqn, '1')) # Test the failure case bad_scan = self.fake_iscsi_scan.replace('LUN: 1', 'LUN: 3') with mock.patch('cinder.privsep.targets.tgt.tgtadmin_show', return_value=(bad_scan, None)): self.assertFalse(self.target._verify_backing_lun(iqn, '1')) @mock.patch.object(time, 'sleep') @mock.patch('cinder.privsep.targets.tgt.tgtadm_create') def test_recreate_backing_lun(self, mock_privsep, mock_sleep): mock_privsep.return_value = ('out', 'err') self.target._recreate_backing_lun(self.test_vol, '1', self.testvol['name'], self.testvol_path) mock_privsep.assert_called_once_with('1', self.testvol_path) # Test the failure case mock_privsep.side_effect = putils.ProcessExecutionError self.assertIsNone( self.target._recreate_backing_lun(self.test_vol, '1', self.testvol['name'], self.testvol_path)) def test_get_iscsi_target(self): ctxt = context.get_admin_context() expected = 0 self.assertEqual(expected, self.target._get_iscsi_target(ctxt, self.testvol['id'])) def test_get_target_and_lun(self): lun = 1 iscsi_target = 0 ctxt = context.get_admin_context() expected = (iscsi_target, lun) self.assertEqual(expected, self.target._get_target_and_lun(ctxt, self.testvol)) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") def test_create_iscsi_target(self): with mock.patch('cinder.privsep.targets.tgt.tgtadm_show', return_value=('', '')), \ mock.patch.object(self.target, '_get_target', side_effect=lambda x: 1), \ mock.patch('cinder.privsep.targets.tgt.tgtadmin_update', return_value=('', '')), \ mock.patch.object(self.target, '_verify_backing_lun', side_effect=lambda x, y: True): self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir)) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") def test_create_iscsi_target_content(self): self.iscsi_target_flags = 'foo' self.iscsi_write_cache = 'bar' mock_open = mock.mock_open() with mock.patch('cinder.privsep.targets.tgt.tgtadm_show', return_value=('', '')), \ mock.patch.object(self.target, '_get_target', side_effect=lambda x: 1), \ mock.patch.object(self.target, '_verify_backing_lun', side_effect=lambda x, y: True), \ mock.patch('cinder.privsep.targets.tgt.tgtadmin_update', return_value=('', '')), \ mock.patch('cinder.volume.targets.tgt.open', mock_open, create=True): self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.testvol_path, chap_auth=('chap_foo', 'chap_bar'))) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") def test_create_iscsi_target_already_exists(self): def _fake_execute(*args, **kwargs): if 'update' in args: raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='target already exists', cmd='tgtad --lld iscsi --op show --mode target') else: return 'fake out', 'fake err' with mock.patch.object(self.target, '_get_target', side_effect=lambda x: 1), \ mock.patch.object(self.target, '_verify_backing_lun', side_effect=lambda x, y: True), \ mock.patch('cinder.privsep.targets.tgt.tgtadmin_update', return_value=('', '')), \ mock.patch('cinder.privsep.targets.tgt.tgtadm_show', _fake_execute): self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir)) @mock.patch('os.path.isfile', return_value=True) @mock.patch('os.path.exists', return_value=True) @mock.patch('cinder.privsep.targets.tgt.tgtadmin_delete') @mock.patch('os.unlink', return_value=None) def test_delete_target_not_found(self, mock_unlink, mock_exec, mock_pathexists, mock_isfile): def _fake_execute(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='can\'t find the target', cmd='tgt-admin --force --delete') def _fake_execute_wrong_message(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='this is not the error you are looking for', cmd='tgt-admin --force --delete') mock_exec.side_effect = _fake_execute with mock.patch.object(self.target, '_get_target', return_value=False): self.assertIsNone(self.target.remove_iscsi_target( 1, 0, self.VOLUME_ID, self.VOLUME_NAME)) mock_exec.side_effect = _fake_execute_wrong_message self.assertRaises(exception.ISCSITargetRemoveFailed, self.target.remove_iscsi_target, 1, 0, self.VOLUME_ID, self.VOLUME_NAME) @mock.patch('os.path.isfile', return_value=True) @mock.patch('os.path.exists', return_value=True) @mock.patch('cinder.privsep.targets.tgt.tgtadmin_delete') @mock.patch('os.unlink', return_value=None) def test_delete_target_acl_not_found(self, mock_unlink, mock_exec, mock_pathexists, mock_isfile): def _fake_execute(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='this access control rule does not exist', cmd='tgt-admin --force --delete') def _fake_execute_wrong_message(*args, **kwargs): raise putils.ProcessExecutionError( exit_code=1, stdout='', stderr='this is not the error you are looking for', cmd='tgt-admin --force --delete') mock_exec.side_effect = _fake_execute with mock.patch.object(self.target, '_get_target', return_value=False): self.assertIsNone(self.target.remove_iscsi_target( 1, 0, self.VOLUME_ID, self.VOLUME_NAME)) mock_exec.side_effect = _fake_execute_wrong_message self.assertRaises(exception.ISCSITargetRemoveFailed, self.target.remove_iscsi_target, 1, 0, self.VOLUME_ID, self.VOLUME_NAME) @mock.patch.object(tgt.TgtAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi): connector = {'initiator': 'fake_init'} # Test the normal case mock_get_iscsi.return_value = 'foo bar' expected_return = {'driver_volume_type': 'iscsi', 'data': 'foo bar'} self.assertEqual(expected_return, self.target.initialize_connection(self.testvol, connector)) @mock.patch('cinder.utils.execute') @mock.patch.object(tgt.TgtAdm, '_get_target') @mock.patch.object(os.path, 'exists') @mock.patch.object(os.path, 'isfile') @mock.patch.object(os, 'unlink') @mock.patch('cinder.privsep.targets.tgt.tgtadmin_delete') def test_remove_iscsi_target(self, mock_delete, mock_unlink, mock_isfile, mock_path_exists, mock_get_target, mock_execute): # Test the failure case: path does not exist mock_path_exists.return_value = None self.assertIsNone(self.target.remove_iscsi_target( 0, 1, self.testvol['id'], self.testvol['name'])) # Test the normal case mock_path_exists.return_value = True mock_isfile.return_value = True self.target.remove_iscsi_target(0, 1, self.testvol['id'], self.testvol['name']) mock_delete.assert_called_with( self.iscsi_target_prefix + self.testvol['name']) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") def test_create_export(self): expected_result = {'location': '10.9.8.7:3260,1 ' + self.iscsi_target_prefix + self.testvol['name'] + ' 1', 'auth': 'CHAP QZJb P68e'} with mock.patch('cinder.privsep.targets.tgt.tgtadm_show', return_value=('', '')), \ mock.patch.object(self.target, '_get_target', side_effect=lambda x: 1), \ mock.patch.object(self.target, '_verify_backing_lun', side_effect=lambda x, y: True), \ mock.patch.object(self.target, '_get_target_chap_auth', side_effect=lambda x, y: None) as m_chap, \ mock.patch.object(volume_utils, 'generate_username', side_effect=lambda: 'QZJb'), \ mock.patch('cinder.privsep.targets.tgt.tgtadmin_update', return_value=('', '')), \ mock.patch.object(volume_utils, 'generate_password', side_effect=lambda: 'P68e'): ctxt = context.get_admin_context() self.assertEqual(expected_result, self.target.create_export(ctxt, self.testvol, self.fake_volumes_dir)) m_chap.side_effect = lambda x, y: ('otzL', '234Z') expected_result['auth'] = ('CHAP otzL 234Z') self.assertEqual(expected_result, self.target.create_export(ctxt, self.testvol, self.fake_volumes_dir)) @mock.patch.object(tgt.TgtAdm, '_get_target_chap_auth') @mock.patch.object(tgt.TgtAdm, 'create_iscsi_target') def test_ensure_export(self, _mock_create, mock_get_chap): ctxt = context.get_admin_context() mock_get_chap.return_value = ('foo', 'bar') self.target.ensure_export(ctxt, self.testvol, self.fake_volumes_dir) _mock_create.assert_called_once_with( self.iscsi_target_prefix + self.testvol['name'], 0, 1, self.fake_volumes_dir, ('foo', 'bar'), check_exit_code=False, old_name=None, portals_ips=[self.configuration.target_ip_address], portals_port=self.configuration.target_port) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") def test_create_iscsi_target_retry(self): with mock.patch('cinder.privsep.targets.tgt.tgtadm_show', return_value=('', '')), \ mock.patch.object(self.target, '_get_target', side_effect=[None, None, 1]) as get_target, \ mock.patch('cinder.privsep.targets.tgt.tgtadmin_update', return_value=('', '')), \ mock.patch.object(self.target, '_verify_backing_lun', side_effect=lambda x, y: True): self.assertEqual( 1, self.target.create_iscsi_target( self.test_vol, 1, 0, self.fake_volumes_dir)) # 3 - default retries count value for utils.retry self.assertEqual(3, get_target.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test.py0000664000175000017500000005561000000000000020014 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for our unit tests. Allows overriding of CONF for use of fakes, and some black magic for inline callbacks. """ import copy import logging import os from unittest import mock import uuid from eventlet import tpool import fixtures from keystonemiddleware import auth_token from oslo_concurrency import lockutils from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_log.fixture import logging_error as log_fixture import oslo_messaging from oslo_messaging import conffixture as messaging_conffixture from oslo_serialization import jsonutils from oslo_utils import strutils from oslo_utils import timeutils import testtools from cinder.api import common as api_common from cinder.common import config from cinder import context from cinder import coordination from cinder.db import migration from cinder.db.sqlalchemy import api as sqla_api from cinder import i18n from cinder.objects import base as objects_base from cinder import rpc from cinder import service from cinder.tests import fixtures as cinder_fixtures from cinder.tests import unit as cinder_unit from cinder.tests.unit import conf_fixture from cinder.tests.unit import fake_notifier from cinder.tests.unit import known_issues as issues from cinder.volume import configuration from cinder.volume import driver as vol_driver from cinder.volume import volume_types from cinder.volume import volume_utils CONF = config.CONF _DB_CACHE = None DB_SCHEMA = None class TestingException(Exception): pass class Database(fixtures.Fixture): def __init__(self): super().__init__() # Suppress logging for test runs alembic_logger = logging.getLogger('alembic.runtime.migration') alembic_logger.setLevel(logging.WARNING) db_logger = logging.getLogger('cinder.db.migration') db_logger.setLevel(logging.WARNING) def setUp(self): super().setUp() engine = sqla_api.get_engine() engine.dispose() self._cache_schema() conn = engine.connect() conn.connection.executescript(DB_SCHEMA) self.addCleanup(self.cleanup) def _cache_schema(self): global DB_SCHEMA if not DB_SCHEMA: engine = sqla_api.get_engine() conn = engine.connect() migration.db_sync() DB_SCHEMA = "".join(line for line in conn.connection.iterdump()) engine.dispose() def cleanup(self): engine = sqla_api.get_engine() engine.dispose() class TestCase(testtools.TestCase): """Test case base class for all unit tests.""" SOURCE_TREE_ROOT = os.path.abspath( os.path.join( os.path.dirname(__file__), '../../../')) POLICY_PATH = os.path.join(SOURCE_TREE_ROOT, 'cinder/tests/unit/policy.yaml') RESOURCE_FILTER_FILENAME = 'etc/cinder/resource_filters.json' RESOURCE_FILTER_PATH = os.path.join(SOURCE_TREE_ROOT, RESOURCE_FILTER_FILENAME) MOCK_WORKER = True MOCK_TOOZ = True FAKE_OVO_HISTORY_VERSION = '9999.999' def __init__(self, *args, **kwargs): super(TestCase, self).__init__(*args, **kwargs) # Suppress some log messages during test runs castellan_logger = logging.getLogger('castellan') castellan_logger.setLevel(logging.ERROR) stevedore_logger = logging.getLogger('stevedore') stevedore_logger.setLevel(logging.ERROR) def _get_joined_notifier(self, *args, **kwargs): # We create a new fake notifier but we join the notifications with # the default notifier notifier = fake_notifier.get_fake_notifier(*args, **kwargs) notifier.notifications = self.notifier.notifications return notifier def _reset_filter_file(self): self.override_config('resource_query_filters_file', self.RESOURCE_FILTER_PATH) api_common._FILTERS_COLLECTION = None def setUp(self): """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # Create default notifier self.notifier = fake_notifier.get_fake_notifier() # Mock rpc get notifier with fake notifier method that joins all # notifications with the default notifier self.patch('cinder.rpc.get_notifier', side_effect=self._get_joined_notifier) # Protect against any case where someone doesn't directly patch a retry # decorated call. self.patch('tenacity.nap.sleep') if self.MOCK_WORKER: # Mock worker creation for all tests that don't care about it clean_path = 'cinder.objects.cleanable.CinderCleanableObject.%s' for method in ('create_worker', 'set_worker', 'unset_worker'): self.patch(clean_path % method, return_value=None) if self.MOCK_TOOZ: self.patch('cinder.coordination.Coordinator.start') self.patch('cinder.coordination.Coordinator.stop') self.patch('cinder.coordination.Coordinator.get_lock') # Unit tests do not need to use lazy gettext i18n.enable_lazy(False) test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) try: test_timeout = int(test_timeout) except ValueError: # If timeout value is invalid do not set a timeout. test_timeout = 0 if test_timeout > 0: self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) environ_enabled = (lambda var_name: strutils.bool_from_string(os.environ.get(var_name))) if environ_enabled('OS_STDOUT_CAPTURE'): stdout = self.useFixture(fixtures.StringStream('stdout')).stream self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) if environ_enabled('OS_STDERR_CAPTURE'): stderr = self.useFixture(fixtures.StringStream('stderr')).stream self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(cinder_fixtures.StandardLogging()) rpc.add_extra_exmods("cinder.tests.unit") self.addCleanup(rpc.clear_extra_exmods) self.addCleanup(rpc.cleanup) self.messaging_conf = messaging_conffixture.ConfFixture(CONF) self.messaging_conf.transport_url = 'fake:/' self.messaging_conf.response_timeout = 15 self.useFixture(self.messaging_conf) # Load oslo_messaging_notifications config group so we can set an # override to prevent notifications from being ignored due to the # short-circuit mechanism. oslo_messaging.get_notification_transport(CONF) # We need to use a valid driver for the notifications, so we use test. self.override_config('driver', ['test'], group='oslo_messaging_notifications') rpc.init(CONF) # NOTE(geguileo): This is required because _determine_obj_version_cap # and _determine_rpc_version_cap functions in cinder.rpc.RPCAPI cache # versions in LAST_RPC_VERSIONS and LAST_OBJ_VERSIONS so we may have # weird interactions between tests if we don't clear them before each # test. rpc.LAST_OBJ_VERSIONS = {} rpc.LAST_RPC_VERSIONS = {} # Init AuthProtocol to register some base options first, such as # auth_url. auth_token.AuthProtocol('fake_app', {'auth_type': 'password', 'auth_url': 'fake_url'}) conf_fixture.set_defaults(CONF) CONF([], default_config_files=[]) # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. self.start = timeutils.utcnow() CONF.set_default('connection', 'sqlite://', 'database') CONF.set_default('sqlite_synchronous', False, 'database') self.useFixture(Database()) # NOTE(blk-u): WarningsFixture must be after the Database fixture # because sqlalchemy-migrate messes with the warnings filters. self.useFixture(cinder_fixtures.WarningsFixture()) # NOTE(danms): Make sure to reset us back to non-remote objects # for each test to avoid interactions. Also, backup the object # registry. objects_base.CinderObject.indirection_api = None self._base_test_obj_backup = copy.copy( objects_base.CinderObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) self.addCleanup(CONF.reset) self.addCleanup(self._common_cleanup) self._services = [] fake_notifier.mock_notifier(self) # This will be cleaned up by the NestedTempfile fixture lock_path = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture( config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=lock_path, group='oslo_concurrency') lockutils.set_defaults(lock_path) self.override_config('policy_file', os.path.join( os.path.abspath( os.path.dirname(__file__) ), self.POLICY_PATH), group='oslo_policy') self.override_config('resource_query_filters_file', self.RESOURCE_FILTER_PATH) self._disable_osprofiler() # NOTE(geguileo): This is required because common get_by_id method in # cinder.db.sqlalchemy.api caches get methods and if we use a mocked # get method in one test it would carry on to the next test. So we # clear out the cache. sqla_api._GET_METHODS = {} self.override_config('backend_url', 'file://' + lock_path, group='coordination') coordination.COORDINATOR.start() self.addCleanup(coordination.COORDINATOR.stop) # NOTE(mikal): make sure we don't load a privsep helper accidentally self.useFixture(cinder_fixtures.PrivsepNoHelperFixture()) # NOTE: This volume type is created to avoid failure at database since # volume_type_id is non-nullable for volumes and snapshots self.vt = volume_types.get_default_volume_type() # Create fake RPC history if we don't have enough to do tests obj_versions = objects_base.OBJ_VERSIONS if len(obj_versions) == 1: vol_vers = obj_versions.get_current_versions()['Volume'].split('.') new_volume_version = '%s.%s' % (vol_vers[0], int(vol_vers[1]) + 1) obj_versions.add(self.FAKE_OVO_HISTORY_VERSION, {'Volume': new_volume_version}) self.latest_ovo_version = obj_versions.get_current() def _restore_obj_registry(self): objects_base.CinderObjectRegistry._registry._obj_classes = \ self._base_test_obj_backup def _disable_osprofiler(self): """Disable osprofiler. osprofiler should not run for unit tests. """ def side_effect(value): return value mock_decorator = mock.MagicMock(side_effect=side_effect) p = mock.patch("osprofiler.profiler.trace_cls", return_value=mock_decorator) p.start() def _common_cleanup(self): """Runs after each test method to tear down test environment.""" # Stop any services (this stops RPC handlers) for x in self._services: try: x.stop() except Exception: pass # Stop any looping call that has not yet been stopped cinder_unit.stop_looping_calls() # Delete attributes that don't start with _ so they don't pin # memory around unnecessarily for the duration of the test # suite for key in [k for k in self.__dict__ if k[0] != '_']: del self.__dict__[key] if not issues.TPOOL_KILLALL_ISSUE: # Ensure we have the default tpool size value and we don't carry # threads from other test runs. tpool.killall() tpool._nthreads = 20 def override_config(self, name, override, group=None): """Cleanly override CONF variables.""" CONF.set_override(name, override, group) self.addCleanup(CONF.clear_override, name, group) def flags(self, **kw): """Override CONF variables for a test.""" group = kw.pop('group', None) for k, v in kw.items(): self.override_config(k, v, group) def start_service(self, name, host=None, **kwargs): host = host if host else uuid.uuid4().hex kwargs.setdefault('host', host) kwargs.setdefault('binary', 'cinder-%s' % name) svc = service.Service.create(**kwargs) svc.start() self._services.append(svc) return svc def mock_object(self, obj, attr_name, *args, **kwargs): """Use python mock to mock an object attribute Mocks the specified objects attribute with the given value. Automatically performs 'addCleanup' for the mock. """ patcher = mock.patch.object(obj, attr_name, *args, **kwargs) result = patcher.start() self.addCleanup(patcher.stop) return result def patch(self, path, *args, **kwargs): """Use python mock to mock a path with automatic cleanup.""" patcher = mock.patch(path, *args, **kwargs) result = patcher.start() self.addCleanup(patcher.stop) return result # Useful assertions def assert_notify_called(self, mock_notify, calls, any_order=False): if any_order is True: for c in calls: # mock_notify.call_args_list = [ # mock.call('INFO', 'volume.retype', ...), # mock.call('WARN', 'cinder.fire', ...)] # m = mock_notify.call_args_list # m[0] = Call # m[0][0] = tuple('INFO', , 'volume.retype', ...) if not any(m for m in mock_notify.call_args_list if (m[0][0] == c[0] # 'INFO' and m[0][2] == c[1])): # 'volume.retype' raise AssertionError("notify call not found: %s" % c) return for i in range(0, len(calls)): mock_call = mock_notify.call_args_list[i] call = calls[i] posargs = mock_call[0] self.assertEqual(call[0], posargs[0]) self.assertEqual(call[1], posargs[2]) def assertTrue(self, x, *args, **kwargs): """Assert that value is True. If original behavior is required we will need to do: assertTrue(bool(result)) """ # assertTrue uses msg but assertIs uses message keyword argument args = list(args) msg = kwargs.pop('msg', args.pop(0) if args else '') kwargs.setdefault('message', msg) self.assertIs(True, x, *args, **kwargs) def assertFalse(self, x, *args, **kwargs): """Assert that value is False. If original behavior is required we will need to do: assertFalse(bool(result)) """ # assertTrue uses msg but assertIs uses message keyword argument args = list(args) msg = kwargs.pop('msg', args.pop(0) if args else '') kwargs.setdefault('message', msg) self.assertIs(False, x, *args, **kwargs) def stub_out(self, old, new): """Replace a function for the duration of the test. Use the monkey patch fixture to replace a function for the duration of a test. Useful when you want to provide fake methods instead of mocks during testing. This should be used instead of self.stubs.Set (which is based on mox) going forward. """ self.useFixture(fixtures.MonkeyPatch(old, new)) def _set_unique_fqdn_override(self, value, in_shared): """Override the unique_fqdn_network configuration option. Meant for driver tests that use a Mock for their driver configuration instead of a real Oslo Conf. """ # Since we don't use a real oslo config for the driver we don't get # the default initialization, so create a group and register the option cfg.CONF.register_group(cfg.OptGroup('driver_cfg')) new_config = configuration.Configuration([], config_group='driver_cfg') new_config.append_config_values(vol_driver.fqdn_opts) # Now we override the value for this test group = configuration.SHARED_CONF_GROUP if in_shared else 'driver_cfg' self.addCleanup(CONF.clear_override, 'unique_fqdn_network', group=group) cfg.CONF.set_override('unique_fqdn_network', value, group=group) return new_config class ModelsObjectComparatorMixin(object): def _dict_from_object(self, obj, ignored_keys): if ignored_keys is None: ignored_keys = [] obj = jsonutils.to_primitive(obj) # Convert to dict first. items = obj.items() return {k: v for k, v in items if k not in ignored_keys} def _assertEqualObjects(self, obj1, obj2, ignored_keys=None): obj1 = self._dict_from_object(obj1, ignored_keys) obj2 = self._dict_from_object(obj2, ignored_keys) self.assertEqual( len(obj1), len(obj2), "Keys mismatch: %s" % str( set(obj1.keys()) ^ set(obj2.keys()))) for key, value in obj1.items(): self.assertEqual(value, obj2[key]) def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None, msg=None): def obj_to_dict(o): return self._dict_from_object(o, ignored_keys) objs1 = map(obj_to_dict, objs1) objs2 = list(map(obj_to_dict, objs2)) # We don't care about the order of the lists, as long as they are in for obj1 in objs1: self.assertIn(obj1, objs2) objs2.remove(obj1) self.assertEqual([], objs2) def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): self.assertEqual(len(primitives1), len(primitives2)) for primitive in primitives1: self.assertIn(primitive, primitives2) for primitive in primitives2: self.assertIn(primitive, primitives1) class RPCAPITestCase(TestCase, ModelsObjectComparatorMixin): def setUp(self): super(RPCAPITestCase, self).setUp() self.context = context.get_admin_context() self.rpcapi = None self.base_version = '2.0' def _test_rpc_api(self, method, rpc_method, server=None, fanout=False, version=None, expected_method=None, expected_kwargs_diff=None, retval=None, expected_retval=None, **kwargs): """Runs a test against RPC API method. :param method: Name of RPC API method. :param rpc_method: Expected RPC message type (cast or call). :param server: Expected hostname. :param fanout: True if expected call/cast should be fanout. :param version: Expected autocalculated RPC API version. :param expected_method: Expected RPC method name. :param expected_kwargs_diff: Map of expected changes between keyword arguments passed into the method and sent over RPC. :param retval: Value returned by RPC call/cast. :param expected_retval: Expected RPC API response (if different than retval). :param kwargs: Parameters passed into the RPC API method. """ rpcapi = self.rpcapi() expected_kwargs_diff = expected_kwargs_diff or {} version = version or self.base_version topic = None if server is not None: backend = volume_utils.extract_host(server) server = volume_utils.extract_host(server, 'host') topic = 'cinder-volume.%s' % backend if expected_method is None: expected_method = method if expected_retval is None: expected_retval = retval target = { "server": server, "fanout": fanout, "version": version, "topic": topic, } # Initially we expect that we'll pass same arguments to RPC API method # and RPC call/cast... expected_msg = copy.deepcopy(kwargs) # ... but here we're taking exceptions into account. expected_msg.update(expected_kwargs_diff) def _fake_prepare_method(*args, **kwds): # This is checking if target will be properly created. for kwd in kwds: self.assertEqual(target[kwd], kwds[kwd]) return rpcapi.client def _fake_rpc_method(*args, **kwargs): # This checks if positional arguments passed to RPC method match. self.assertEqual((self.context, expected_method), args) # This checks if keyword arguments passed to RPC method match. for kwarg, value in kwargs.items(): # Getting possible changes into account. if isinstance(value, objects_base.CinderObject): # We need to compare objects differently. self._assertEqualObjects(expected_msg[kwarg], value) else: self.assertEqual(expected_msg[kwarg], value) # Returning fake value we're supposed to return. if retval: return retval # Enable mocks that will check everything and run RPC method. with mock.patch.object(rpcapi.client, "prepare", side_effect=_fake_prepare_method): with mock.patch.object(rpcapi.client, rpc_method, side_effect=_fake_rpc_method): real_retval = getattr(rpcapi, method)(self.context, **kwargs) self.assertEqual(expected_retval, real_retval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_api.py0000664000175000017500000000512300000000000020637 0ustar00zuulzuul00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the API endpoint.""" from http import client as http_client import io import webob class FakeHttplibSocket(object): """A fake socket implementation for http_client.HTTPResponse, trivial.""" def __init__(self, response_string): self.response_string = response_string self._buffer = io.StringIO(response_string) def makefile(self, _mode, _other): """Returns the socket's internal buffer.""" return self._buffer class FakeHttplibConnection(object): """A fake http_client.HTTPConnection for boto. requests made via this connection actually get translated and routed into our WSGI app, we then wait for the response and turn it back into the http_client.HTTPResponse that boto expects. """ def __init__(self, app, host, is_secure=False): self.app = app self.host = host def request(self, method, path, data, headers): req = webob.Request.blank(path) req.method = method req.body = data req.headers = headers req.headers['Accept'] = 'text/html' req.host = self.host # Call the WSGI app, get the HTTP response resp = str(req.get_response(self.app)) # For some reason, the response doesn't have "HTTP/1.0 " prepended; I # guess that's a function the web server usually provides. resp = "HTTP/1.0 %s" % resp self.sock = FakeHttplibSocket(resp) self.http_response = http_client.HTTPResponse(self.sock) # NOTE(vish): boto is accessing private variables for some reason self._HTTPConnection__response = self.http_response self.http_response.begin() def getresponse(self): return self.http_response def getresponsebody(self): return self.sock.response_string def close(self): """Required for compatibility with boto/tornado.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_api_urlmap.py0000664000175000017500000002016500000000000022222 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for cinder.api.urlmap.py """ from unittest import mock from cinder.api import urlmap from cinder.tests.unit import test class TestParseFunctions(test.TestCase): def test_unquote_header_value_without_quotes(self): arg = 'TestString' result = urlmap.unquote_header_value(arg) self.assertEqual(arg, result) def test_unquote_header_value_with_quotes(self): result = urlmap.unquote_header_value('"TestString"') self.assertEqual('TestString', result) def test_parse_list_header(self): arg = 'token, "quoted value"' result = urlmap.parse_list_header(arg) self.assertEqual(['token', 'quoted value'], result) def test_parse_options_header(self): result = urlmap.parse_options_header('Content-Type: text/html;' ' mimetype=text/html') self.assertEqual(('Content-Type:', {'mimetype': 'text/html'}), result) def test_parse_options_header_without_value(self): result = urlmap.parse_options_header(None) self.assertEqual(('', {}), result) class TestAccept(test.TestCase): def test_best_match_ValueError(self): arg = 'text/html; q=some_invalud_value' accept = urlmap.Accept(arg) self.assertEqual((None, {}), accept.best_match(['text/html'])) def test_best_match(self): arg = '*/*; q=0.7, application/json; q=0.7, text/html; q=-0.8' accept = urlmap.Accept(arg) self.assertEqual(('application/json', {'q': '0.7'}), accept.best_match(['application/json', 'text/html'])) def test_match_mask_one_asterisk(self): arg = 'text/*; q=0.7' accept = urlmap.Accept(arg) self.assertEqual(('text/html', {'q': '0.7'}), accept.best_match(['text/html'])) def test_match_mask_two_asterisk(self): arg = '*/*; q=0.7' accept = urlmap.Accept(arg) self.assertEqual(('text/html', {'q': '0.7'}), accept.best_match(['text/html'])) def test_match_mask_no_asterisk(self): arg = 'application/json; q=0.7' accept = urlmap.Accept(arg) self.assertEqual((None, {}), accept.best_match(['text/html'])) def test_content_type_params(self): arg = "application/json; q=0.2," \ " text/html; q=0.3" accept = urlmap.Accept(arg) self.assertEqual({'q': '0.2'}, accept.content_type_params('application/json')) def test_content_type_params_wrong_content_type(self): arg = 'text/html; q=0.1' accept = urlmap.Accept(arg) self.assertEqual({}, accept.content_type_params('application/json')) class TestUrlMapFactory(test.TestCase): def setUp(self): super(TestUrlMapFactory, self).setUp() self.global_conf = {'not_found_app': 'app_global', 'domain hoobar.com port 10 /': 'some_app_global'} self.loader = mock.Mock() def test_not_found_app_in_local_conf(self): local_conf = {'not_found_app': 'app_local', 'domain foobar.com port 20 /': 'some_app_local'} self.loader.get_app.side_effect = ['app_local_loader', 'some_app_loader'] calls = [mock.call('app_local', global_conf=self.global_conf), mock.call('some_app_local', global_conf=self.global_conf)] expected_urlmap = urlmap.URLMap(not_found_app='app_local_loader') expected_urlmap['http://foobar.com:20'] = 'some_app_loader' self.assertEqual(expected_urlmap, urlmap.urlmap_factory(self.loader, self.global_conf, **local_conf)) self.loader.get_app.assert_has_calls(calls) def test_not_found_app_not_in_local_conf(self): local_conf = {'domain foobar.com port 20 /': 'some_app_local'} self.loader.get_app.side_effect = ['app_global_loader', 'some_app_returned_by_loader'] calls = [mock.call('app_global', global_conf=self.global_conf), mock.call('some_app_local', global_conf=self.global_conf)] expected_urlmap = urlmap.URLMap(not_found_app='app_global_loader') expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\ '_by_loader' self.assertEqual(expected_urlmap, urlmap.urlmap_factory(self.loader, self.global_conf, **local_conf)) self.loader.get_app.assert_has_calls(calls) def test_not_found_app_is_none(self): local_conf = {'not_found_app': None, 'domain foobar.com port 20 /': 'some_app_local'} self.loader.get_app.return_value = 'some_app_returned_by_loader' expected_urlmap = urlmap.URLMap(not_found_app=None) expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\ '_by_loader' self.assertEqual(expected_urlmap, urlmap.urlmap_factory(self.loader, self.global_conf, **local_conf)) self.loader.get_app.assert_called_once_with( 'some_app_local', global_conf=self.global_conf) class TestURLMap(test.TestCase): def setUp(self): super(TestURLMap, self).setUp() self.urlmap = urlmap.URLMap() def test_match_with_applications(self): self.urlmap[('http://10.20.30.40:50', '/path/somepath')] = 'app' self.assertEqual((None, None), self.urlmap._match('20.30.40.50', '20', 'path/somepath')) def test_match_without_applications(self): self.assertEqual((None, None), self.urlmap._match('host', 20, 'app_url/somepath')) def test_match_path_info_equals_app_url(self): self.urlmap[('http://20.30.40.50:60', '/app_url/somepath')] = 'app' self.assertEqual(('app', '/app_url/somepath'), self.urlmap._match('http://20.30.40.50', '60', '/app_url/somepath')) def test_match_path_info_equals_app_url_many_app(self): self.urlmap[('http://20.30.40.50:60', '/path')] = 'app1' self.urlmap[('http://20.30.40.50:60', '/path/somepath')] = 'app2' self.urlmap[('http://20.30.40.50:60', '/path/somepath/elsepath')] = \ 'app3' self.assertEqual(('app3', '/path/somepath/elsepath'), self.urlmap._match('http://20.30.40.50', '60', '/path/somepath/elsepath')) def test_path_strategy_wrong_path_info(self): self.assertEqual((None, None, None), self.urlmap._path_strategy('http://10.20.30.40', '50', '/resource')) def test_path_strategy_wrong_mime_type(self): self.urlmap[('http://10.20.30.40:50', '/path/elsepath/')] = 'app' with mock.patch.object(self.urlmap, '_munge_path') as mock_munge_path: mock_munge_path.return_value = 'value' self.assertEqual( (None, 'value', '/path/elsepath'), self.urlmap._path_strategy('http://10.20.30.40', '50', '/path/elsepath/resource.abc')) mock_munge_path.assert_called_once_with( 'app', '/path/elsepath/resource.abc', '/path/elsepath') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_cleanable_manager.py0000664000175000017500000002422600000000000023473 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import timeutils from cinder import context from cinder import db from cinder import manager from cinder import objects from cinder.tests.unit import fake_constants from cinder.tests.unit import test from cinder.tests.unit import utils class FakeManager(manager.CleanableManager): def __init__(self, service_id=None, keep_after_clean=False): if service_id: self.service_id = service_id self.keep_after_clean = keep_after_clean def _do_cleanup(self, ctxt, vo_resource): vo_resource.status += '_cleaned' vo_resource.save() return self.keep_after_clean class TestCleanableManager(test.TestCase): def setUp(self): super(TestCleanableManager, self).setUp() self.user_id = fake_constants.USER_ID self.project_id = fake_constants.PROJECT_ID self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) self.service = db.service_create(self.context, {}) @mock.patch('cinder.manager.CleanableManager.do_cleanup', autospec=True) def test_init_host_with_service(self, mock_cleanup): mngr = FakeManager() self.assertFalse(hasattr(mngr, 'service_id')) mngr.init_host(service_id=self.service.id) self.assertEqual(self.service.id, mngr.service_id) mock_cleanup.assert_called_once_with(mngr, mock.ANY, mock.ANY) clean_req = mock_cleanup.call_args[0][2] self.assertIsInstance(clean_req, objects.CleanupRequest) self.assertEqual(self.service.id, clean_req.service_id) def test_do_cleanup(self): """Basic successful cleanup.""" vol = utils.create_volume(self.context, status='creating') db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) mngr.do_cleanup(self.context, clean_req) self.assertListEqual([], db.worker_get_all(self.context)) vol.refresh() self.assertEqual('creating_cleaned', vol.status) def test_do_cleanup_not_cleaning_already_claimed(self): """Basic cleanup that doesn't touch already cleaning works.""" vol = utils.create_volume(self.context, status='creating') worker1 = db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) worker1 = db.worker_get(self.context, id=worker1.id) vol2 = utils.create_volume(self.context, status='deleting') worker2 = db.worker_create(self.context, status='deleting', resource_type='Volume', resource_id=vol2.id, service_id=self.service.id + 1) worker2 = db.worker_get(self.context, id=worker2.id) # Simulate that the change to vol2 worker happened between # worker_get_all and trying to claim a work for cleanup worker2.service_id = self.service.id clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) with mock.patch('cinder.db.worker_get_all') as get_all_mock: get_all_mock.return_value = [worker1, worker2] mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertEqual(1, len(workers)) self.assertEqual(worker2.id, workers[0].id) vol.refresh() self.assertEqual('creating_cleaned', vol.status) vol2.refresh() self.assertEqual('deleting', vol2.status) def test_do_cleanup_not_cleaning_already_claimed_by_us(self): """Basic cleanup that doesn't touch other thread's claimed works.""" now = timeutils.utcnow() delta = timeutils.datetime.timedelta(seconds=1) original_time = now - delta # Creating the worker in the future, and then changing the in-memory # value of worker2.updated_at to an earlier time, we effectively # simulate that the worker entry was created in the past and that it # has been just updated between worker_get_all and trying # to claim a work for cleanup other_thread_claimed_time = now + delta vol = utils.create_volume(self.context, status='creating') worker1 = db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id, updated_at=original_time) worker1 = db.worker_get(self.context, id=worker1.id) vol2 = utils.create_volume(self.context, status='deleting') worker2 = db.worker_create(self.context, status='deleting', resource_type='Volume', resource_id=vol2.id, service_id=self.service.id, updated_at=other_thread_claimed_time) worker2 = db.worker_get(self.context, id=worker2.id) # This with the mock below simulates worker2 was created in the past # and updated right between worker_get_all and worker_claim_for_cleanup worker2.updated_at = original_time clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) with mock.patch('cinder.manager.timeutils.utcnow', return_value=now), \ mock.patch('cinder.db.worker_get_all') as get_all_mock: get_all_mock.return_value = [worker1, worker2] mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertEqual(1, len(workers)) self.assertEqual(worker2.id, workers[0].id) vol.refresh() self.assertEqual('creating_cleaned', vol.status) vol2.refresh() self.assertEqual('deleting', vol2.status) def test_do_cleanup_resource_deleted(self): """Cleanup on a resource that's been already deleted.""" vol = utils.create_volume(self.context, status='creating') db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) vol.destroy() clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertListEqual([], workers) def test_do_cleanup_resource_on_another_service(self): """Cleanup on a resource that's been claimed by other service.""" vol = utils.create_volume(self.context, status='deleting') db.worker_create(self.context, status='deleting', resource_type='Volume', resource_id=vol.id, service_id=self.service.id + 1) clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertEqual(1, len(workers)) vol.refresh() self.assertEqual('deleting', vol.status) def test_do_cleanup_resource_changed_status(self): """Cleanup on a resource that's changed status.""" vol = utils.create_volume(self.context, status='available') db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertListEqual([], workers) vol.refresh() self.assertEqual('available', vol.status) def test_do_cleanup_keep_worker(self): """Cleanup on a resource that will remove worker when cleaning up.""" vol = utils.create_volume(self.context, status='deleting') db.worker_create(self.context, status='deleting', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id, keep_after_clean=True) mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertEqual(1, len(workers)) vol.refresh() self.assertEqual('deleting_cleaned', vol.status) @mock.patch.object(FakeManager, '_do_cleanup', side_effect=Exception) def test_do_cleanup_revive_on_cleanup_fail(self, mock_clean): """Cleanup will revive a worker if cleanup fails.""" vol = utils.create_volume(self.context, status='creating') db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertEqual(1, len(workers)) vol.refresh() self.assertEqual('creating', vol.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_cmd.py0000664000175000017500000034102400000000000020634 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import datetime import errno import io import re import sys import time from unittest import mock import ddt import fixtures import iso8601 from oslo_concurrency import processutils from oslo_config import cfg from oslo_db import exception as oslo_exception from oslo_utils import timeutils # Prevent load failures on macOS if sys.platform == 'darwin': rtslib_fb = mock.MagicMock() cinder_rtstool = mock.MagicMock() else: import rtslib_fb from cinder.cmd import api as cinder_api from cinder.cmd import backup as cinder_backup from cinder.cmd import manage as cinder_manage if sys.platform != 'darwin': from cinder.cmd import rtstool as cinder_rtstool from cinder.cmd import scheduler as cinder_scheduler from cinder.cmd import volume as cinder_volume from cinder.cmd import volume_usage_audit from cinder.common import constants from cinder import context from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_cluster from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_service from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils from cinder import version from cinder.volume import rpcapi CONF = cfg.CONF class TestCinderApiCmd(test.TestCase): """Unit test cases for python modules under cinder/cmd.""" def setUp(self): super(TestCinderApiCmd, self).setUp() sys.argv = ['cinder-api'] @mock.patch('cinder.service.WSGIService') @mock.patch('cinder.service.process_launcher') @mock.patch('cinder.rpc.init') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main(self, log_setup, monkey_patch, rpc_init, process_launcher, wsgi_service): launcher = process_launcher.return_value server = wsgi_service.return_value server.workers = mock.sentinel.worker_count cinder_api.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() rpc_init.assert_called_once_with(CONF) process_launcher.assert_called_once_with() wsgi_service.assert_called_once_with('osapi_volume') launcher.launch_service.assert_called_once_with( server, workers=server.workers) launcher.wait.assert_called_once_with() class TestCinderBackupCmd(test.TestCase): def setUp(self): super(TestCinderBackupCmd, self).setUp() sys.argv = ['cinder-backup'] @mock.patch('cinder.utils.semaphore_factory') @mock.patch('cinder.cmd.backup._launch_backup_process') @mock.patch('cinder.service.wait') @mock.patch('cinder.service.serve') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main(self, log_setup, monkey_patch, service_create, service_serve, service_wait, launch_mock, mock_semaphore): server = service_create.return_value cinder_backup.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() service_create.assert_called_once_with( binary='cinder-backup', coordination=True, service_name='backup', process_number=1, semaphore=mock_semaphore.return_value) service_serve.assert_called_once_with(server) service_wait.assert_called_once_with() launch_mock.assert_not_called() @mock.patch('cinder.utils.Semaphore') @mock.patch('cinder.service.get_launcher') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main_multiprocess(self, log_setup, monkey_patch, service_create, get_launcher, mock_semaphore): if processutils.get_worker_count() < 2: raise test.testtools.TestCase.skipException( 'requires more than 1 cpu (to set backup_workers >1)') CONF.set_override('backup_workers', 2) mock_semaphore.side_effect = [mock.sentinel.semaphore1, mock.sentinel.semaphore2] cinder_backup.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) # Both calls must receive the same semaphore c1 = mock.call(binary=constants.BACKUP_BINARY, coordination=True, process_number=1, semaphore=mock.sentinel.semaphore1, service_name='backup') c2 = mock.call(binary=constants.BACKUP_BINARY, coordination=True, process_number=2, semaphore=mock.sentinel.semaphore1, service_name='backup') service_create.assert_has_calls([c1, c2]) launcher = get_launcher.return_value self.assertEqual(2, launcher.launch_service.call_count) launcher.wait.assert_called_once_with() class TestCinderSchedulerCmd(test.TestCase): def setUp(self): super(TestCinderSchedulerCmd, self).setUp() sys.argv = ['cinder-scheduler'] @mock.patch('cinder.service.wait') @mock.patch('cinder.service.serve') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main(self, log_setup, monkey_patch, service_create, service_serve, service_wait): server = service_create.return_value cinder_scheduler.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() service_create.assert_called_once_with(binary='cinder-scheduler') service_serve.assert_called_once_with(server) service_wait.assert_called_once_with() class TestCinderVolumeCmdPosix(test.TestCase): def setUp(self): super(TestCinderVolumeCmdPosix, self).setUp() sys.argv = ['cinder-volume'] self.patch('os.name', 'posix') @mock.patch('cinder.service.get_launcher') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main(self, log_setup, monkey_patch, service_create, get_launcher): CONF.set_override('enabled_backends', None) self.assertRaises(SystemExit, cinder_volume.main) self.assertFalse(service_create.called) @mock.patch('cinder.service.get_launcher') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main_with_backends(self, log_setup, monkey_patch, service_create, get_launcher): backends = ['', 'backend1', 'backend2', ''] CONF.set_override('enabled_backends', backends) CONF.set_override('host', 'host') CONF.set_override('cluster', None) launcher = get_launcher.return_value cinder_volume.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() get_launcher.assert_called_once_with() c1 = mock.call(binary=constants.VOLUME_BINARY, host='host@backend1', service_name='backend1', coordination=True, cluster=None) c2 = mock.call(binary=constants.VOLUME_BINARY, host='host@backend2', service_name='backend2', coordination=True, cluster=None) service_create.assert_has_calls([c1, c2]) self.assertEqual(2, launcher.launch_service.call_count) launcher.wait.assert_called_once_with() @ddt.ddt @test.testtools.skipIf(sys.platform == 'darwin', 'Not supported on macOS') class TestCinderVolumeCmdWin32(test.TestCase): def setUp(self): super(TestCinderVolumeCmdWin32, self).setUp() sys.argv = ['cinder-volume'] self._mock_win32_proc_launcher = mock.Mock() self.patch('os.name', 'nt') self.patch('cinder.service.WindowsProcessLauncher', lambda *args, **kwargs: self._mock_win32_proc_launcher) @mock.patch('cinder.service.get_launcher') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main(self, log_setup, monkey_patch, service_create, get_launcher): CONF.set_override('enabled_backends', None) self.assertRaises(SystemExit, cinder_volume.main) self.assertFalse(service_create.called) self.assertFalse(self._mock_win32_proc_launcher.called) @mock.patch('cinder.service.get_launcher') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main_invalid_backend(self, log_setup, monkey_patch, service_create, get_launcher): CONF.set_override('enabled_backends', 'backend1') CONF.set_override('backend_name', 'backend2') self.assertRaises(exception.InvalidInput, cinder_volume.main) self.assertFalse(service_create.called) self.assertFalse(self._mock_win32_proc_launcher.called) @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') @ddt.data({}, {'binary_path': 'cinder-volume-script.py', 'exp_py_executable': True}) @ddt.unpack def test_main_with_multiple_backends(self, log_setup, monkey_patch, binary_path='cinder-volume', exp_py_executable=False): # If multiple backends are used, we expect the Windows process # launcher to be used in order to create the child processes. backends = ['', 'backend1', 'backend2', ''] CONF.set_override('enabled_backends', backends) CONF.set_override('host', 'host') launcher = self._mock_win32_proc_launcher # Depending on the setuptools version, '-script.py' and '.exe' # binary path extensions may be trimmed. We need to take this # into consideration when building the command that will be # used to spawn child subprocesses. sys.argv = [binary_path] cinder_volume.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() exp_cmd_prefix = [sys.executable] if exp_py_executable else [] exp_cmds = [ exp_cmd_prefix + sys.argv + ['--backend_name=%s' % backend_name] for backend_name in ['backend1', 'backend2']] launcher.add_process.assert_has_calls( [mock.call(exp_cmd) for exp_cmd in exp_cmds]) launcher.wait.assert_called_once_with() @mock.patch('cinder.service.get_launcher') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main_with_multiple_backends_child( self, log_setup, monkey_patch, service_create, get_launcher): # We're testing the code expected to be run within child processes. backends = ['', 'backend1', 'backend2', ''] CONF.set_override('enabled_backends', backends) CONF.set_override('host', 'host') CONF.set_override('cluster', None) launcher = get_launcher.return_value sys.argv += ['--backend_name', 'backend2'] cinder_volume.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() service_create.assert_called_once_with( binary=constants.VOLUME_BINARY, host='host@backend2', service_name='backend2', coordination=True, cluster=None) launcher.launch_service.assert_called_once_with( service_create.return_value) @mock.patch('cinder.service.get_launcher') @mock.patch('cinder.service.Service.create') @mock.patch('cinder.utils.monkey_patch') @mock.patch('oslo_log.log.setup') def test_main_with_single_backend( self, log_setup, monkey_patch, service_create, get_launcher): # We're expecting the service to be run within the same process. CONF.set_override('enabled_backends', ['backend2']) CONF.set_override('host', 'host') CONF.set_override('cluster', None) launcher = get_launcher.return_value cinder_volume.main() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") monkey_patch.assert_called_once_with() service_create.assert_called_once_with( binary=constants.VOLUME_BINARY, host='host@backend2', service_name='backend2', coordination=True, cluster=None) launcher.launch_service.assert_called_once_with( service_create.return_value) @ddt.ddt class TestCinderManageCmd(test.TestCase): def setUp(self): super(TestCinderManageCmd, self).setUp() sys.argv = ['cinder-manage'] def _test_purge_invalid_age_in_days(self, age_in_days): db_cmds = cinder_manage.DbCommands() ex = self.assertRaises(SystemExit, db_cmds.purge, age_in_days) self.assertEqual(1, ex.code) @mock.patch('cinder.objects.ServiceList.get_all') @mock.patch('cinder.db.migration.db_sync') def test_db_commands_sync(self, db_sync, service_get_mock): version = 11 db_cmds = cinder_manage.DbCommands() db_cmds.sync(version=version) db_sync.assert_called_once_with(version) service_get_mock.assert_not_called() @mock.patch('cinder.objects.Service.save') @mock.patch('cinder.objects.ServiceList.get_all') @mock.patch('cinder.db.migration.db_sync') def test_db_commands_sync_bump_versions(self, db_sync, service_get_mock, service_save): ctxt = context.get_admin_context() services = [fake_service.fake_service_obj(ctxt, binary='cinder-' + binary, rpc_current_version='0.1', object_current_version='0.2') for binary in ('volume', 'scheduler', 'backup')] service_get_mock.return_value = services version = 11 db_cmds = cinder_manage.DbCommands() db_cmds.sync(version=version, bump_versions=True) db_sync.assert_called_once_with(version) self.assertEqual(3, service_save.call_count) for service in services: self.assertEqual(cinder_manage.RPC_VERSIONS[service.binary], service.rpc_current_version) self.assertEqual(cinder_manage.OVO_VERSION, service.object_current_version) @mock.patch('cinder.db.migration.db_version') def test_db_commands_version(self, db_version): db_cmds = cinder_manage.DbCommands() with mock.patch('sys.stdout', new=io.StringIO()): db_cmds.version() self.assertEqual(1, db_version.call_count) def test_db_commands_upgrade_out_of_range(self): version = 2147483647 db_cmds = cinder_manage.DbCommands() exit = self.assertRaises(SystemExit, db_cmds.sync, version + 1) self.assertEqual(1, exit.code) @mock.patch('cinder.db.migration.db_sync') def test_db_commands_script_not_present(self, db_sync): db_sync.side_effect = oslo_exception.DBMigrationError(None) db_cmds = cinder_manage.DbCommands() exit = self.assertRaises(SystemExit, db_cmds.sync, 101) self.assertEqual(1, exit.code) @mock.patch('cinder.cmd.manage.DbCommands.online_migrations', (mock.Mock(side_effect=((2, 2), (0, 0)), __name__='foo'),)) def test_db_commands_online_data_migrations(self): db_cmds = cinder_manage.DbCommands() exit = self.assertRaises(SystemExit, db_cmds.online_data_migrations) self.assertEqual(0, exit.code) cinder_manage.DbCommands.online_migrations[0].assert_has_calls( (mock.call(mock.ANY, 50),) * 2) def _fake_db_command(self, migrations=None): if migrations is None: mock_mig_1 = mock.MagicMock(__name__="mock_mig_1") mock_mig_2 = mock.MagicMock(__name__="mock_mig_2") mock_mig_1.return_value = (5, 4) mock_mig_2.return_value = (6, 6) migrations = (mock_mig_1, mock_mig_2) class _CommandSub(cinder_manage.DbCommands): online_migrations = migrations return _CommandSub @mock.patch('cinder.context.get_admin_context') def test_online_migrations(self, mock_get_context): self.useFixture(fixtures.MonkeyPatch('sys.stdout', io.StringIO())) ctxt = mock_get_context.return_value db_cmds = self._fake_db_command() command = db_cmds() exit = self.assertRaises(SystemExit, command.online_data_migrations, 10) self.assertEqual(1, exit.code) command.online_migrations[0].assert_has_calls([mock.call(ctxt, 10)]) command.online_migrations[1].assert_has_calls([mock.call(ctxt, 6)]) output = sys.stdout.getvalue() matches = re.findall( '5 rows matched query mock_mig_1, 4 migrated', output, re.MULTILINE) self.assertEqual(len(matches), 1) matches = re.findall( '6 rows matched query mock_mig_2, 6 migrated', output, re.MULTILINE) self.assertEqual(len(matches), 1) matches = re.findall( 'mock_mig_1 .* 5 .* 4', output, re.MULTILINE) self.assertEqual(len(matches), 1) matches = re.findall( 'mock_mig_2 .* 6 .* 6', output, re.MULTILINE) self.assertEqual(len(matches), 1) @mock.patch('cinder.context.get_admin_context') def test_online_migrations_no_max_count(self, mock_get_context): self.useFixture(fixtures.MonkeyPatch('sys.stdout', io.StringIO())) fake_remaining = [120] def fake_migration(context, count): self.assertEqual(mock_get_context.return_value, context) found = 120 done = min(fake_remaining[0], count) fake_remaining[0] -= done return found, done command_cls = self._fake_db_command((fake_migration,)) command = command_cls() exit = self.assertRaises(SystemExit, command.online_data_migrations, None) self.assertEqual(0, exit.code) output = sys.stdout.getvalue() self.assertIn('Running batches of 50 until complete.', output) matches = re.findall( '120 rows matched query fake_migration, 50 migrated', output, re.MULTILINE) self.assertEqual(len(matches), 2) matches = re.findall( '120 rows matched query fake_migration, 20 migrated', output, re.MULTILINE) self.assertEqual(len(matches), 1) matches = re.findall( '120 rows matched query fake_migration, 0 migrated', output, re.MULTILINE) self.assertEqual(len(matches), 1) matches = re.findall( 'fake_migration .* 120 .* 120', output, re.MULTILINE) self.assertEqual(len(matches), 1) @mock.patch('cinder.context.get_admin_context') def test_online_migrations_error(self, mock_get_context): self.useFixture(fixtures.MonkeyPatch('sys.stdout', io.StringIO())) good_remaining = [50] def good_migration(context, count): self.assertEqual(mock_get_context.return_value, context) found = 50 done = min(good_remaining[0], count) good_remaining[0] -= done return found, done bad_migration = mock.MagicMock() bad_migration.side_effect = test.TestingException bad_migration.__name__ = 'bad_migration' command_cls = self._fake_db_command((bad_migration, good_migration)) command = command_cls() # bad_migration raises an exception, but it could be because # good_migration had not completed yet. We should get 1 in this case, # because some work was done, and the command should be reiterated. exit = self.assertRaises(SystemExit, command.online_data_migrations, max_count=50) self.assertEqual(1, exit.code) # When running this for the second time, there's no work left for # good_migration to do, but bad_migration still fails - should # get 2 this time. exit = self.assertRaises(SystemExit, command.online_data_migrations, max_count=50) self.assertEqual(2, exit.code) # When --max_count is not used, we should get 2 if all possible # migrations completed but some raise exceptions good_remaining = [50] exit = self.assertRaises(SystemExit, command.online_data_migrations, None) self.assertEqual(2, exit.code) @mock.patch('cinder.cmd.manage.DbCommands.online_migrations', (mock.Mock(side_effect=((2, 2), (0, 0)), __name__='foo'),)) def test_db_commands_online_data_migrations_ignore_state_and_max(self): db_cmds = cinder_manage.DbCommands() exit = self.assertRaises(SystemExit, db_cmds.online_data_migrations, 2) self.assertEqual(1, exit.code) cinder_manage.DbCommands.online_migrations[0].assert_called_once_with( mock.ANY, 2) @mock.patch('cinder.cmd.manage.DbCommands.online_migrations', (mock.Mock(side_effect=((2, 2), (0, 0)), __name__='foo'),)) def test_db_commands_online_data_migrations_max_negative(self): db_cmds = cinder_manage.DbCommands() exit = self.assertRaises(SystemExit, db_cmds.online_data_migrations, -1) self.assertEqual(127, exit.code) cinder_manage.DbCommands.online_migrations[0].assert_not_called() @mock.patch('cinder.db.reset_active_backend') @mock.patch('cinder.context.get_admin_context') def test_db_commands_reset_active_backend(self, admin_ctxt_mock, reset_backend_mock): db_cmds = cinder_manage.DbCommands() db_cmds.reset_active_backend(True, 'fake-backend-id', 'fake-host') reset_backend_mock.assert_called_with(admin_ctxt_mock.return_value, True, 'fake-backend-id', 'fake-host') @mock.patch('cinder.version.version_string') def test_versions_commands_list(self, version_string): version_cmds = cinder_manage.VersionCommands() with mock.patch('sys.stdout', new=io.StringIO()): version_cmds.list() version_string.assert_called_once_with() @mock.patch('cinder.version.version_string') def test_versions_commands_call(self, version_string): version_cmds = cinder_manage.VersionCommands() with mock.patch('sys.stdout', new=io.StringIO()): version_cmds.__call__() version_string.assert_called_once_with() def test_purge_with_negative_age_in_days(self): age_in_days = -1 self._test_purge_invalid_age_in_days(age_in_days) def test_purge_exceeded_age_in_days_limit(self): age_in_days = int(time.time() / 86400) + 1 self._test_purge_invalid_age_in_days(age_in_days) @mock.patch('cinder.db.sqlalchemy.api.purge_deleted_rows') @mock.patch('cinder.context.get_admin_context') def test_purge_less_than_age_in_days_limit(self, get_admin_context, purge_deleted_rows): age_in_days = int(time.time() / 86400) - 1 ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) get_admin_context.return_value = ctxt purge_deleted_rows.return_value = None db_cmds = cinder_manage.DbCommands() db_cmds.purge(age_in_days) get_admin_context.assert_called_once_with() purge_deleted_rows.assert_called_once_with( ctxt, age_in_days=age_in_days) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.context.get_admin_context') def test_host_commands_list(self, get_admin_context, service_get_all): get_admin_context.return_value = mock.sentinel.ctxt service_get_all.return_value = [ {'host': 'fake-host', 'availability_zone': 'fake-az', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: expected_out = ("%(host)-25s\t%(zone)-15s\n" % {'host': 'host', 'zone': 'zone'}) expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" % {'host': 'fake-host', 'availability_zone': 'fake-az'}) host_cmds = cinder_manage.HostCommands() host_cmds.list() get_admin_context.assert_called_once_with() service_get_all.assert_called_once_with(mock.sentinel.ctxt) self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.context.get_admin_context') def test_host_commands_list_with_zone(self, get_admin_context, service_get_all): get_admin_context.return_value = mock.sentinel.ctxt service_get_all.return_value = [ {'host': 'fake-host', 'availability_zone': 'fake-az1', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}, {'host': 'fake-host', 'availability_zone': 'fake-az2', 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}] with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: expected_out = ("%(host)-25s\t%(zone)-15s\n" % {'host': 'host', 'zone': 'zone'}) expected_out += ("%(host)-25s\t%(availability_zone)-15s\n" % {'host': 'fake-host', 'availability_zone': 'fake-az1'}) host_cmds = cinder_manage.HostCommands() host_cmds.list(zone='fake-az1') get_admin_context.assert_called_once_with() service_get_all.assert_called_once_with(mock.sentinel.ctxt) self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.db.sqlalchemy.api.volume_get') @mock.patch('cinder.context.get_admin_context') @mock.patch('cinder.rpc.get_client') @mock.patch('cinder.rpc.init') def test_volume_commands_delete(self, rpc_init, get_client, get_admin_context, volume_get): ctxt = context.RequestContext('admin', 'fake', True) get_admin_context.return_value = ctxt mock_client = mock.MagicMock() cctxt = mock.MagicMock() mock_client.prepare.return_value = cctxt get_client.return_value = mock_client host = 'fake@host' db_volume = {'host': host + '#pool1'} volume = fake_volume.fake_db_volume(**db_volume) volume_obj = fake_volume.fake_volume_obj(ctxt, **volume) volume_id = volume['id'] volume_get.return_value = volume volume_cmds = cinder_manage.VolumeCommands() volume_cmds._client = mock_client volume_cmds.delete(volume_id) volume_get.assert_called_once_with(ctxt, volume_id) mock_client.prepare.assert_called_once_with( server="fake", topic="cinder-volume.fake@host", version="3.0") cctxt.cast.assert_called_once_with( ctxt, 'delete_volume', cascade=False, unmanage_only=False, volume=volume_obj) @mock.patch('cinder.db.volume_destroy') @mock.patch('cinder.db.sqlalchemy.api.volume_get') @mock.patch('cinder.context.get_admin_context') @mock.patch('cinder.rpc.init') def test_volume_commands_delete_no_host(self, rpc_init, get_admin_context, volume_get, volume_destroy): ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=True) get_admin_context.return_value = ctxt volume = fake_volume.fake_db_volume() volume_id = volume['id'] volume_get.return_value = volume with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: expected_out = ('Volume not yet assigned to host.\n' 'Deleting volume from database and skipping' ' rpc.\n') volume_cmds = cinder_manage.VolumeCommands() volume_cmds.delete(volume_id) get_admin_context.assert_called_once_with() volume_get.assert_called_once_with(ctxt, volume_id) self.assertTrue(volume_destroy.called) admin_context = volume_destroy.call_args[0][0] self.assertTrue(admin_context.is_admin) self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.db.volume_destroy') @mock.patch('cinder.db.sqlalchemy.api.volume_get') @mock.patch('cinder.context.get_admin_context') @mock.patch('cinder.rpc.init') def test_volume_commands_delete_volume_in_use(self, rpc_init, get_admin_context, volume_get, volume_destroy): ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) get_admin_context.return_value = ctxt db_volume = {'status': 'in-use', 'host': 'fake-host'} volume = fake_volume.fake_db_volume(**db_volume) volume_id = volume['id'] volume_get.return_value = volume with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: expected_out = ('Volume is in-use.\n' 'Detach volume from instance and then try' ' again.\n') volume_cmds = cinder_manage.VolumeCommands() volume_cmds.delete(volume_id) volume_get.assert_called_once_with(ctxt, volume_id) self.assertEqual(expected_out, fake_out.getvalue()) def test_config_commands_list(self): with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: expected_out = '' for key, value in CONF.items(): expected_out += '%s = %s' % (key, value) + '\n' config_cmds = cinder_manage.ConfigCommands() config_cmds.list() self.assertEqual(expected_out, fake_out.getvalue()) def test_config_commands_list_param(self): with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: CONF.set_override('host', 'fake') expected_out = 'host = fake\n' config_cmds = cinder_manage.ConfigCommands() config_cmds.list(param='host') self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.db.backup_get_all') @mock.patch('cinder.context.get_admin_context') def test_backup_commands_list(self, get_admin_context, backup_get_all): ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) get_admin_context.return_value = ctxt backup = {'id': fake.BACKUP_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'fake-host', 'display_name': 'fake-display-name', 'container': 'fake-container', 'status': fields.BackupStatus.AVAILABLE, 'size': 123, 'object_count': 1, 'volume_id': fake.VOLUME_ID, 'backup_metadata': {}, } backup_get_all.return_value = [backup] with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: hdr = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12s' '\t%-12s') header = hdr % ('ID', 'User ID', 'Project ID', 'Host', 'Name', 'Container', 'Status', 'Size', 'Object Count') res = ('%-32s\t%-32s\t%-32s\t%-24s\t%-24s\t%-12s\t%-12s\t%-12d' '\t%-12s') resource = res % (backup['id'], backup['user_id'], backup['project_id'], backup['host'], backup['display_name'], backup['container'], backup['status'], backup['size'], 1) expected_out = header + '\n' + resource + '\n' backup_cmds = cinder_manage.BackupCommands() backup_cmds.list() get_admin_context.assert_called_once_with() backup_get_all.assert_called_once_with(ctxt, None, None, None, None, None, None) self.assertEqual(expected_out, fake_out.getvalue()) @mock.patch('cinder.db.backup_update') @mock.patch('cinder.db.backup_get_all_by_host') @mock.patch('cinder.context.get_admin_context') def test_update_backup_host(self, get_admin_context, backup_get_by_host, backup_update): ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) get_admin_context.return_value = ctxt backup = {'id': fake.BACKUP_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'fake-host', 'display_name': 'fake-display-name', 'container': 'fake-container', 'status': fields.BackupStatus.AVAILABLE, 'size': 123, 'object_count': 1, 'volume_id': fake.VOLUME_ID, 'backup_metadata': {}, } backup_get_by_host.return_value = [backup] backup_cmds = cinder_manage.BackupCommands() backup_cmds.update_backup_host('fake_host', 'fake_host2') get_admin_context.assert_called_once_with() backup_get_by_host.assert_called_once_with(ctxt, 'fake_host') backup_update.assert_called_once_with(ctxt, fake.BACKUP_ID, {'host': 'fake_host2'}) @mock.patch('cinder.db.consistencygroup_update') @mock.patch('cinder.db.consistencygroup_get_all') @mock.patch('cinder.context.get_admin_context') def test_update_consisgroup_host(self, get_admin_context, consisgroup_get_all, consisgroup_update): ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) get_admin_context.return_value = ctxt consisgroup = {'id': fake.CONSISTENCY_GROUP_ID, 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'host': 'fake-host', 'status': fields.ConsistencyGroupStatus.AVAILABLE } consisgroup_get_all.return_value = [consisgroup] consisgrup_cmds = cinder_manage.ConsistencyGroupCommands() consisgrup_cmds.update_cg_host('fake_host', 'fake_host2') get_admin_context.assert_called_once_with() consisgroup_get_all.assert_called_once_with( ctxt, filters={'host': 'fake_host'}, limit=None, marker=None, offset=None, sort_dirs=None, sort_keys=None) consisgroup_update.assert_called_once_with( ctxt, fake.CONSISTENCY_GROUP_ID, {'host': 'fake_host2'}) @mock.patch('cinder.objects.service.Service.is_up', new_callable=mock.PropertyMock) @mock.patch('cinder.db.service_get_all') @mock.patch('cinder.context.get_admin_context') def _test_service_commands_list(self, service, get_admin_context, service_get_all, service_is_up): ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) get_admin_context.return_value = ctxt service_get_all.return_value = [service] service_is_up.return_value = True with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: format = "%-16s %-36s %-16s %-10s %-5s %-20s %-12s %-15s %-36s" print_format = format % ('Binary', 'Host', 'Zone', 'Status', 'State', 'Updated At', 'RPC Version', 'Object Version', 'Cluster') rpc_version = service['rpc_current_version'] object_version = service['object_current_version'] cluster = service.get('cluster_name', '') service_format = format % (service['binary'], service['host'], service['availability_zone'], 'enabled', ':-)', service['updated_at'], rpc_version, object_version, cluster) expected_out = print_format + '\n' + service_format + '\n' service_cmds = cinder_manage.ServiceCommands() service_cmds.list() self.assertEqual(expected_out, fake_out.getvalue()) get_admin_context.assert_called_with() service_get_all.assert_called_with(ctxt) def test_service_commands_list(self): service = {'binary': 'cinder-binary', 'host': 'fake-host.fake-domain', 'availability_zone': 'fake-zone', 'updated_at': '2014-06-30 11:22:33', 'disabled': False, 'rpc_current_version': '1.1', 'object_current_version': '1.1', 'cluster_name': 'my_cluster', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'} for binary in ('volume', 'scheduler', 'backup'): service['binary'] = 'cinder-%s' % binary self._test_service_commands_list(service) def test_service_commands_list_no_updated_at_or_cluster(self): service = {'binary': 'cinder-binary', 'host': 'fake-host.fake-domain', 'availability_zone': 'fake-zone', 'updated_at': None, 'disabled': False, 'rpc_current_version': '1.1', 'object_current_version': '1.1', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'} for binary in ('volume', 'scheduler', 'backup'): service['binary'] = 'cinder-%s' % binary self._test_service_commands_list(service) @ddt.data(('foobar', 'foobar'), ('-foo bar', 'foo bar'), ('--foo bar', 'foo bar'), ('--foo-bar', 'foo_bar'), ('---foo-bar', '_foo_bar')) @ddt.unpack def test_get_arg_string(self, arg, expected): self.assertEqual(expected, cinder_manage.get_arg_string(arg)) def test_fetch_func_args(self): @cinder_manage.args('--full-rename') @cinder_manage.args('--different-dest', dest='my_dest') @cinder_manage.args('current') def my_func(): pass expected = {'full_rename': mock.sentinel.full_rename, 'my_dest': mock.sentinel.my_dest, 'current': mock.sentinel.current} with mock.patch.object(cinder_manage, 'CONF') as mock_conf: mock_conf.category = mock.Mock(**expected) self.assertDictEqual(expected, cinder_manage.fetch_func_args(my_func)) def test_args_decorator(self): @cinder_manage.args('host-name') @cinder_manage.args('cluster-name', metavar='cluster') @cinder_manage.args('--debug') def my_func(): pass expected = [ (['host_name'], {'metavar': 'host-name'}), (['cluster_name'], {'metavar': 'cluster'}), (['--debug'], {})] self.assertEqual(expected, my_func.args) @mock.patch('cinder.context.get_admin_context') @mock.patch('cinder.db.cluster_get_all') def tests_cluster_commands_list(self, get_all_mock, get_admin_mock, ): now = timeutils.utcnow() cluster = fake_cluster.fake_cluster_orm(num_hosts=4, num_down_hosts=2, created_at=now, last_heartbeat=now) get_all_mock.return_value = [cluster] ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) get_admin_mock.return_value = ctxt with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: format_ = "%-36s %-16s %-10s %-5s %-20s %-7s %-12s %-20s" print_format = format_ % ('Name', 'Binary', 'Status', 'State', 'Heartbeat', 'Hosts', 'Down Hosts', 'Updated At') cluster_format = format_ % (cluster.name, cluster.binary, 'enabled', ':-)', cluster.last_heartbeat, cluster.num_hosts, cluster.num_down_hosts, None) expected_out = print_format + '\n' + cluster_format + '\n' cluster_cmds = cinder_manage.ClusterCommands() cluster_cmds.list() self.assertEqual(expected_out, fake_out.getvalue()) get_admin_mock.assert_called_with() get_all_mock.assert_called_with(ctxt, is_up=None, get_services=False, services_summary=True, read_deleted='no') @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) @mock.patch('cinder.context.get_admin_context') def test_cluster_commands_remove_not_found(self, admin_ctxt_mock, cluster_get_mock): cluster_get_mock.side_effect = exception.ClusterNotFound(id=1) cluster_commands = cinder_manage.ClusterCommands() exit = cluster_commands.remove(False, 'abinary', 'acluster') self.assertEqual(2, exit) cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, None, name='acluster', binary='abinary', get_services=False) @mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True) @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True) @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) @mock.patch('cinder.context.get_admin_context') def test_cluster_commands_remove_fail_has_hosts(self, admin_ctxt_mock, cluster_get_mock, cluster_destroy_mock, service_destroy_mock): cluster = fake_cluster.fake_cluster_ovo(mock.Mock()) cluster_get_mock.return_value = cluster cluster_destroy_mock.side_effect = exception.ClusterHasHosts(id=1) cluster_commands = cinder_manage.ClusterCommands() exit = cluster_commands.remove(False, 'abinary', 'acluster') self.assertEqual(2, exit) cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, None, name='acluster', binary='abinary', get_services=False) cluster_destroy_mock.assert_called_once_with( admin_ctxt_mock.return_value.elevated.return_value, cluster.id) service_destroy_mock.assert_not_called() @mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True) @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True) @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) @mock.patch('cinder.context.get_admin_context') def test_cluster_commands_remove_success_no_hosts(self, admin_ctxt_mock, cluster_get_mock, cluster_destroy_mock, service_destroy_mock): cluster = fake_cluster.fake_cluster_orm() cluster_get_mock.return_value = cluster cluster_commands = cinder_manage.ClusterCommands() exit = cluster_commands.remove(False, 'abinary', 'acluster') self.assertIsNone(exit) cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, None, name='acluster', binary='abinary', get_services=False) cluster_destroy_mock.assert_called_once_with( admin_ctxt_mock.return_value.elevated.return_value, cluster.id) service_destroy_mock.assert_not_called() @mock.patch('cinder.db.sqlalchemy.api.service_destroy', auto_specs=True) @mock.patch('cinder.db.sqlalchemy.api.cluster_destroy', auto_specs=True) @mock.patch('cinder.db.sqlalchemy.api.cluster_get', auto_specs=True) @mock.patch('cinder.context.get_admin_context') def test_cluster_commands_remove_recursive(self, admin_ctxt_mock, cluster_get_mock, cluster_destroy_mock, service_destroy_mock): cluster = fake_cluster.fake_cluster_orm() cluster.services = [fake_service.fake_service_orm()] cluster_get_mock.return_value = cluster cluster_commands = cinder_manage.ClusterCommands() exit = cluster_commands.remove(True, 'abinary', 'acluster') self.assertIsNone(exit) cluster_get_mock.assert_called_once_with(admin_ctxt_mock.return_value, None, name='acluster', binary='abinary', get_services=True) cluster_destroy_mock.assert_called_once_with( admin_ctxt_mock.return_value.elevated.return_value, cluster.id) service_destroy_mock.assert_called_once_with( admin_ctxt_mock.return_value.elevated.return_value, cluster.services[0]['id']) @mock.patch('cinder.db.sqlalchemy.api.volume_include_in_cluster', auto_specs=True, return_value=1) @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_include_in_cluster', auto_specs=True, return_value=2) @mock.patch('cinder.context.get_admin_context') def test_cluster_commands_rename(self, admin_ctxt_mock, volume_include_mock, cg_include_mock): """Test that cluster rename changes volumes and cgs.""" current_cluster_name = mock.sentinel.old_cluster_name new_cluster_name = mock.sentinel.new_cluster_name partial = mock.sentinel.partial cluster_commands = cinder_manage.ClusterCommands() exit = cluster_commands.rename(partial, current_cluster_name, new_cluster_name) self.assertIsNone(exit) volume_include_mock.assert_called_once_with( admin_ctxt_mock.return_value, new_cluster_name, partial, cluster_name=current_cluster_name) cg_include_mock.assert_called_once_with( admin_ctxt_mock.return_value, new_cluster_name, partial, cluster_name=current_cluster_name) @mock.patch('cinder.db.sqlalchemy.api.volume_include_in_cluster', auto_specs=True, return_value=0) @mock.patch('cinder.db.sqlalchemy.api.consistencygroup_include_in_cluster', auto_specs=True, return_value=0) @mock.patch('cinder.context.get_admin_context') def test_cluster_commands_rename_no_changes(self, admin_ctxt_mock, volume_include_mock, cg_include_mock): """Test that we return an error when cluster rename has no effect.""" cluster_commands = cinder_manage.ClusterCommands() exit = cluster_commands.rename(False, 'cluster', 'new_cluster') self.assertEqual(2, exit) @mock.patch('cinder.objects.Cluster.get_by_id') @mock.patch('cinder.context.get_admin_context') def test_main_remove_cluster(self, get_admin_mock, get_cluster_mock): script_name = 'cinder-manage' sys.argv = [script_name, 'cluster', 'remove', 'abinary', 'acluster'] self.mock_object(cinder_manage, 'CONF', cfg.ConfigOpts()) cinder_manage.main() expected_argument = (['cluster_name'], {'type': str, 'help': 'Cluster to delete.', 'metavar': 'cluster-name'}) self.assertIn(expected_argument, cinder_manage.CONF.category.action_fn.args) self.assertTrue(hasattr(cinder_manage.CONF.category, 'cluster_name')) get_admin_mock.assert_called_with() get_cluster_mock.assert_called_with(get_admin_mock.return_value, None, name='acluster', binary='abinary', get_services=False) cluster = get_cluster_mock.return_value cluster.destroy.assert_called() @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_argv_lt_2(self, register_cli_opt): script_name = 'cinder-manage' sys.argv = [script_name] CONF(sys.argv[1:], project='cinder', version=version.version_string()) with mock.patch('sys.stdout', new=io.StringIO()): exit = self.assertRaises(SystemExit, cinder_manage.main) self.assertTrue(register_cli_opt.called) self.assertEqual(2, exit.code) def test_main_missing_action(self): sys.argv = ['cinder-manage', 'backup'] self.mock_object(cinder_manage, 'CONF', cfg.ConfigOpts()) stdout = io.StringIO() with mock.patch('sys.stdout', new=stdout): exit = self.assertRaises(SystemExit, cinder_manage.main) self.assertEqual(2, exit.code) stdout.seek(0) output = stdout.read() self.assertTrue(output.startswith('usage: ')) @mock.patch('oslo_config.cfg.ConfigOpts.__call__') @mock.patch('oslo_log.log.setup') @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_sudo_failed(self, register_cli_opt, log_setup, config_opts_call): script_name = 'cinder-manage' sys.argv = [script_name, 'fake_category', 'fake_action'] config_opts_call.side_effect = cfg.ConfigFilesNotFoundError( mock.sentinel._namespace) with mock.patch('sys.stdout', new=io.StringIO()): exit = self.assertRaises(SystemExit, cinder_manage.main) self.assertTrue(register_cli_opt.called) config_opts_call.assert_called_once_with( sys.argv[1:], project='cinder', version=version.version_string()) self.assertFalse(log_setup.called) self.assertEqual(2, exit.code) @mock.patch('oslo_config.cfg.ConfigOpts.__call__') @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main(self, register_cli_opt, config_opts_call): script_name = 'cinder-manage' sys.argv = [script_name, 'config', 'list'] action_fn = mock.MagicMock() CONF.category = mock.MagicMock(action_fn=action_fn) cinder_manage.main() self.assertTrue(register_cli_opt.called) config_opts_call.assert_called_once_with( sys.argv[1:], project='cinder', version=version.version_string()) self.assertTrue(action_fn.called) @mock.patch('oslo_config.cfg.ConfigOpts.__call__') @mock.patch('oslo_log.log.setup') @mock.patch('oslo_config.cfg.ConfigOpts.register_cli_opt') def test_main_invalid_dir(self, register_cli_opt, log_setup, config_opts_call): script_name = 'cinder-manage' fake_dir = 'fake-dir' invalid_dir = 'Invalid directory:' sys.argv = [script_name, '--config-dir', fake_dir] config_opts_call.side_effect = cfg.ConfigDirNotFoundError(fake_dir) with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: exit = self.assertRaises(SystemExit, cinder_manage.main) self.assertTrue(register_cli_opt.called) config_opts_call.assert_called_once_with( sys.argv[1:], project='cinder', version=version.version_string()) self.assertIn(invalid_dir, fake_out.getvalue()) self.assertIn(fake_dir, fake_out.getvalue()) self.assertFalse(log_setup.called) self.assertEqual(2, exit.code) @mock.patch('cinder.db') def test_remove_service_failure(self, mock_db): mock_db.service_destroy.side_effect = SystemExit(1) service_commands = cinder_manage.ServiceCommands() exit = service_commands.remove('abinary', 'ahost') self.assertEqual(2, exit) @mock.patch('cinder.db.service_destroy') @mock.patch( 'cinder.db.service_get', return_value = {'id': '12', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}) def test_remove_service_success(self, mock_get_by_args, mock_service_destroy): service_commands = cinder_manage.ServiceCommands() self.assertIsNone(service_commands.remove('abinary', 'ahost')) @mock.patch('glob.glob') def test_util__get_resources_locks(self, mock_glob): cinder_manage.cfg.CONF.set_override('lock_path', '/locks', group='oslo_concurrency') cinder_manage.cfg.CONF.set_override('backend_url', 'file:///dlm', group='coordination') vol1 = fake.VOLUME_ID vol2 = fake.VOLUME2_ID snap = fake.SNAPSHOT_ID attach = fake.ATTACHMENT_ID files = [ 'cinder-something', # Non UUID files are ignored f'/locks/cinder-{vol1}-delete_volume', f'/locks/cinder-{vol2}-delete_volume', f'/locks/cinder-{vol2}', f'/locks/cinder-{vol2}-detach_volume', f'/locks/cinder-{snap}-delete_snapshot', '/locks/cinder-cleanup_incomplete_backups_12345', '/locks/cinder-unrelated-backup-named-file', ] dlm_locks = [ f'/dlm/cinder-attachment_update-{vol2}-{attach}', ] mock_glob.side_effect = [files, dlm_locks] commands = cinder_manage.UtilCommands() res = commands._get_resources_locks() self.assertEqual(2, mock_glob.call_count) mock_glob.assert_has_calls([ mock.call('/locks/cinder-*'), mock.call('/dlm/cinder-*') ]) expected_vols = { vol1: [f'/locks/cinder-{vol1}-delete_volume'], vol2: [f'/locks/cinder-{vol2}-delete_volume', f'/locks/cinder-{vol2}', f'/locks/cinder-{vol2}-detach_volume', f'/dlm/cinder-attachment_update-{vol2}-{attach}'], } expected_snaps = { snap: [f'/locks/cinder-{snap}-delete_snapshot'] } expected_backups = { '12345': ['/locks/cinder-cleanup_incomplete_backups_12345'] } expected = (expected_vols, expected_snaps, expected_backups) self.assertEqual(expected, res) @mock.patch.object(cinder_manage, 'open') def test__exclude_running_backups(self, mock_open): mock_running = mock.mock_open(read_data='cinder-backup --config-file ' '/etc/cinder/cinder.conf') file_running = mock_running.return_value.__enter__.return_value mock_other = mock.mock_open(read_data='python') file_other = mock_other.return_value.__enter__.return_value mock_open.side_effect = (FileNotFoundError, mock_running.return_value, mock_other.return_value, ValueError) backups = {'12341': '/locks/cinder-cleanup_incomplete_backups_12341', '12342': '/locks/cinder-cleanup_incomplete_backups_12342', '12343': '/locks/cinder-cleanup_incomplete_backups_12343', '12344': '/locks/cinder-cleanup_incomplete_backups_12344'} expected = {'12341': '/locks/cinder-cleanup_incomplete_backups_12341', '12343': '/locks/cinder-cleanup_incomplete_backups_12343'} commands = cinder_manage.UtilCommands() res = commands._exclude_running_backups(backups) self.assertIsNone(res) self.assertEqual(expected, backups) self.assertEqual(4, mock_open.call_count) mock_open.assert_has_calls([mock.call('/proc/12341/cmdline', 'r'), mock.call('/proc/12342/cmdline', 'r'), mock.call('/proc/12343/cmdline', 'r'), mock.call('/proc/12344/cmdline', 'r')]) file_running.read.assert_called_once_with() file_other.read.assert_called_once_with() @ddt.data(True, False) @mock.patch.object(cinder_manage, 'print') @mock.patch.object(cinder_manage.os, 'remove') @mock.patch.object(cinder_manage.UtilCommands, '_exclude_running_backups') @mock.patch('cinder.objects.Snapshot.exists') @mock.patch('cinder.objects.Volume.exists') @mock.patch.object(cinder_manage.UtilCommands, '_get_resources_locks') @mock.patch.object(cinder_manage.context, 'get_admin_context') def test_clean_locks(self, online, mock_ctxt, mock_get_locks, mock_vol_exists, mock_snap_exists, mock_exclude_backs, mock_remove, mock_print): vol1_files = [f'/locks/cinder-{fake.VOLUME_ID}-delete_volume'] vol2_files = [f'/locks/cinder-{fake.VOLUME2_ID}-delete_volume', f'/locks/cinder-{fake.VOLUME2_ID}', f'/locks/cinder-{fake.VOLUME2_ID}-detach_volume', f'/dlm/cinder-attachment_update-{fake.VOLUME2_ID}-' f'{fake.ATTACHMENT_ID}'] vols = collections.OrderedDict(((fake.VOLUME_ID, vol1_files), (fake.VOLUME2_ID, vol2_files))) snap_files = [f'/locks/cinder-{fake.SNAPSHOT_ID}-delete_snapshot'] snaps = {fake.SNAPSHOT_ID: snap_files} back_files = ['/locks/cinder-cleanup_incomplete_backups_12345'] backs = {'12345': back_files} mock_get_locks.return_value = (vols, snaps, backs) mock_vol_exists.side_effect = (True, False) mock_snap_exists.return_value = False mock_remove.side_effect = [None, errno.ENOENT, None, None, errno.ENOENT, ValueError, None] commands = cinder_manage.UtilCommands() commands.clean_locks(online=online) mock_ctxt.assert_called_once_with() mock_get_locks.assert_called_once_with() expected_calls = ([mock.call(v) for v in vol1_files] + [mock.call(v) for v in vol2_files] + [mock.call(s) for s in snap_files] + [mock.call(b) for b in back_files]) if online: self.assertEqual(2, mock_vol_exists.call_count) mock_vol_exists.assert_has_calls( (mock.call(mock_ctxt.return_value, fake.VOLUME_ID), mock.call(mock_ctxt.return_value, fake.VOLUME2_ID))) mock_snap_exists.assert_called_once_with(mock_ctxt.return_value, fake.SNAPSHOT_ID) mock_exclude_backs.assert_called_once_with(backs) # If services are online we'll check resources that still exist # and then we won't delete those that do. In this case the files # for the first volume. del expected_calls[0] else: mock_vol_exists.assert_not_called() mock_snap_exists.assert_not_called() mock_exclude_backs.assert_not_called() self.assertEqual(len(expected_calls), mock_remove.call_count) mock_remove.assert_has_calls(expected_calls) # Only the ValueError exception should be logged self.assertEqual(1, mock_print.call_count) @test.testtools.skipIf(sys.platform == 'darwin', 'Not supported on macOS') class TestCinderRtstoolCmd(test.TestCase): def setUp(self): super(TestCinderRtstoolCmd, self).setUp() sys.argv = ['cinder-rtstool'] self.INITIATOR_IQN = 'iqn.2015.12.com.example.openstack.i:UNIT1' self.TARGET_IQN = 'iqn.2015.12.com.example.openstack.i:TARGET1' @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_create_rtslib_error(self, rtsroot): rtsroot.side_effect = rtslib_fb.utils.RTSLibError() with mock.patch('sys.stdout', new=io.StringIO()): self.assertRaises(rtslib_fb.utils.RTSLibError, cinder_rtstool.create, mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled) def _test_create_rtslib_error_network_portal(self, ip): with mock.patch.object(rtslib_fb, 'NetworkPortal') as network_portal, \ mock.patch.object(rtslib_fb, 'LUN') as lun, \ mock.patch.object(rtslib_fb, 'TPG') as tpg, \ mock.patch.object(rtslib_fb, 'FabricModule') as fabric_mod, \ mock.patch.object(rtslib_fb, 'Target') as target, \ mock.patch.object(rtslib_fb, 'BlockStorageObject') as \ block_storage_object, \ mock.patch.object(rtslib_fb.root, 'RTSRoot') as rts_root: root_new = mock.MagicMock(storage_objects=mock.MagicMock()) rts_root.return_value = root_new block_storage_object.return_value = mock.sentinel.so_new target.return_value = mock.sentinel.target_new fabric_mod.return_value = mock.sentinel.fabric_new tpg_new = tpg.return_value lun.return_value = mock.sentinel.lun_new if ip == '0.0.0.0': network_portal.side_effect = rtslib_fb.utils.RTSLibError() self.assertRaises(rtslib_fb.utils.RTSLibError, cinder_rtstool.create, mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled) else: cinder_rtstool.create(mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled) rts_root.assert_called_once_with() block_storage_object.assert_called_once_with( name=mock.sentinel.name, dev=mock.sentinel.backing_device) target.assert_called_once_with(mock.sentinel.fabric_new, mock.sentinel.name, 'create') fabric_mod.assert_called_once_with('iscsi') tpg.assert_called_once_with(mock.sentinel.target_new, mode='create') tpg_new.set_attribute.assert_called_once_with('authentication', '1') lun.assert_called_once_with(tpg_new, storage_object=mock.sentinel.so_new) self.assertEqual(1, tpg_new.enable) if ip == '::0': ip = '[::0]' network_portal.assert_any_call(tpg_new, ip, 3260, mode='any') def test_create_rtslib_error_network_portal_ipv4(self): with mock.patch('sys.stdout', new=io.StringIO()): self._test_create_rtslib_error_network_portal('0.0.0.0') def test_create_rtslib_error_network_portal_ipv6(self): with mock.patch('sys.stdout', new=io.StringIO()): self._test_create_rtslib_error_network_portal('::0') def _test_create(self, ip): with mock.patch.object(rtslib_fb, 'NetworkPortal') as network_portal, \ mock.patch.object(rtslib_fb, 'LUN') as lun, \ mock.patch.object(rtslib_fb, 'TPG') as tpg, \ mock.patch.object(rtslib_fb, 'FabricModule') as fabric_mod, \ mock.patch.object(rtslib_fb, 'Target') as target, \ mock.patch.object(rtslib_fb, 'BlockStorageObject') as \ block_storage_object, \ mock.patch.object(rtslib_fb.root, 'RTSRoot') as rts_root: root_new = mock.MagicMock(storage_objects=mock.MagicMock()) rts_root.return_value = root_new block_storage_object.return_value = mock.sentinel.so_new target.return_value = mock.sentinel.target_new fabric_mod.return_value = mock.sentinel.fabric_new tpg_new = tpg.return_value lun.return_value = mock.sentinel.lun_new cinder_rtstool.create(mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled) rts_root.assert_called_once_with() block_storage_object.assert_called_once_with( name=mock.sentinel.name, dev=mock.sentinel.backing_device) target.assert_called_once_with(mock.sentinel.fabric_new, mock.sentinel.name, 'create') fabric_mod.assert_called_once_with('iscsi') tpg.assert_called_once_with(mock.sentinel.target_new, mode='create') tpg_new.set_attribute.assert_called_once_with('authentication', '1') lun.assert_called_once_with(tpg_new, storage_object=mock.sentinel.so_new) self.assertEqual(1, tpg_new.enable) if ip == '::0': ip = '[::0]' network_portal.assert_any_call(tpg_new, ip, 3260, mode='any') def test_create_ipv4(self): self._test_create('0.0.0.0') def test_create_ipv6(self): self._test_create('::0') def _test_create_ips_and_port(self, mock_rtslib, port, ips, expected_ips): mock_rtslib.BlockStorageObject.return_value = mock.sentinel.bso mock_rtslib.Target.return_value = mock.sentinel.target_new mock_rtslib.FabricModule.return_value = mock.sentinel.iscsi_fabric tpg_new = mock_rtslib.TPG.return_value cinder_rtstool.create(mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled, portals_ips=ips, portals_port=port) mock_rtslib.Target.assert_called_once_with(mock.sentinel.iscsi_fabric, mock.sentinel.name, 'create') mock_rtslib.TPG.assert_called_once_with(mock.sentinel.target_new, mode='create') mock_rtslib.LUN.assert_called_once_with( tpg_new, storage_object=mock.sentinel.bso) mock_rtslib.NetworkPortal.assert_has_calls( map(lambda ip: mock.call(tpg_new, ip, port, mode='any'), expected_ips), any_order=True ) @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) def test_create_ips_and_port_ipv4(self, mock_rtslib): ips = ['10.0.0.2', '10.0.0.3', '10.0.0.4'] port = 3261 self._test_create_ips_and_port(mock_rtslib, port, ips, ips) @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) def test_create_ips_and_port_ipv6(self, mock_rtslib): ips = ['fe80::fc16:3eff:fecb:ad2f'] expected_ips = ['[fe80::fc16:3eff:fecb:ad2f]'] port = 3261 self._test_create_ips_and_port(mock_rtslib, port, ips, expected_ips) @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator_rtslib_error(self, rtsroot): rtsroot.side_effect = rtslib_fb.utils.RTSLibError() with mock.patch('sys.stdout', new=io.StringIO()): self.assertRaises(rtslib_fb.utils.RTSLibError, cinder_rtstool.add_initiator, mock.sentinel.target_iqn, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator_rtstool_error(self, rtsroot): rtsroot.targets.return_value = {} self.assertRaises(cinder_rtstool.RtstoolError, cinder_rtstool.add_initiator, mock.sentinel.target_iqn, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator_acl_exists(self, rtsroot, node_acl, mapped_lun): target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': self.INITIATOR_IQN}] acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) tpg = mock.MagicMock(node_acls=[acl]) tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=self.TARGET_IQN) rtsroot.return_value = mock.MagicMock(targets=[target]) cinder_rtstool.add_initiator(self.TARGET_IQN, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) self.assertFalse(node_acl.called) self.assertFalse(mapped_lun.called) @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator_acl_exists_case_1(self, rtsroot, node_acl, mapped_lun): """Ensure initiator iqns are handled in a case-insensitive manner.""" target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': self.INITIATOR_IQN.lower()}] acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) tpg = mock.MagicMock(node_acls=[acl]) tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) rtsroot.return_value = mock.MagicMock(targets=[target]) cinder_rtstool.add_initiator(target_iqn, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) self.assertFalse(node_acl.called) self.assertFalse(mapped_lun.called) @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator_acl_exists_case_2(self, rtsroot, node_acl, mapped_lun): """Ensure initiator iqns are handled in a case-insensitive manner.""" iqn_lower = self.INITIATOR_IQN.lower() target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': self.INITIATOR_IQN}] acl = mock.MagicMock(node_wwn=iqn_lower) tpg = mock.MagicMock(node_acls=[acl]) tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) rtsroot.return_value = mock.MagicMock(targets=[target]) cinder_rtstool.add_initiator(target_iqn, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) self.assertFalse(node_acl.called) self.assertFalse(mapped_lun.called) @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_add_initiator(self, rtsroot, node_acl, mapped_lun): target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': self.INITIATOR_IQN}] tpg = mock.MagicMock() tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) rtsroot.return_value = mock.MagicMock(targets=[target]) acl_new = mock.MagicMock(chap_userid=mock.sentinel.userid, chap_password=mock.sentinel.password) node_acl.return_value = acl_new cinder_rtstool.add_initiator(target_iqn, self.INITIATOR_IQN, mock.sentinel.userid, mock.sentinel.password) node_acl.assert_called_once_with(tpg, self.INITIATOR_IQN, mode='create') mapped_lun.assert_called_once_with(acl_new, 0, tpg_lun=0) @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_get_targets(self, rtsroot): target = mock.MagicMock() target.dump.return_value = {'wwn': 'fake-wwn'} rtsroot.return_value = mock.MagicMock(targets=[target]) with mock.patch('sys.stdout', new=io.StringIO()) as fake_out: cinder_rtstool.get_targets() self.assertEqual(str(target.wwn), fake_out.getvalue().strip()) @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_delete(self, rtsroot): target = mock.MagicMock(wwn=mock.sentinel.iqn) storage_object = mock.MagicMock() name = mock.PropertyMock(return_value=mock.sentinel.iqn) type(storage_object).name = name rtsroot.return_value = mock.MagicMock( targets=[target], storage_objects=[storage_object]) cinder_rtstool.delete(mock.sentinel.iqn) target.delete.assert_called_once_with() storage_object.delete.assert_called_once_with() @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_delete_initiator(self, rtsroot, node_acl, mapped_lun): target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': self.INITIATOR_IQN}] acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) tpg = mock.MagicMock(node_acls=[acl]) tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) rtsroot.return_value = mock.MagicMock(targets=[target]) cinder_rtstool.delete_initiator(target_iqn, self.INITIATOR_IQN) @mock.patch.object(rtslib_fb, 'MappedLUN') @mock.patch.object(rtslib_fb, 'NodeACL') @mock.patch.object(rtslib_fb.root, 'RTSRoot') def test_delete_initiator_case(self, rtsroot, node_acl, mapped_lun): """Ensure iqns are handled in a case-insensitive manner.""" initiator_iqn_lower = self.INITIATOR_IQN.lower() target_iqn = mock.MagicMock() target_iqn.tpgs.return_value = \ [{'node_acls': initiator_iqn_lower}] acl = mock.MagicMock(node_wwn=self.INITIATOR_IQN) tpg = mock.MagicMock(node_acls=[acl]) tpgs = iter([tpg]) target = mock.MagicMock(tpgs=tpgs, wwn=target_iqn) rtsroot.return_value = mock.MagicMock(targets=[target]) cinder_rtstool.delete_initiator(target_iqn, self.INITIATOR_IQN) @mock.patch.object(cinder_rtstool, 'os', autospec=True) @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) def test_save_with_filename(self, mock_rtslib, mock_os): filename = mock.sentinel.filename cinder_rtstool.save_to_file(filename) rtsroot = mock_rtslib.root.RTSRoot rtsroot.assert_called_once_with() self.assertEqual(0, mock_os.path.dirname.call_count) self.assertEqual(0, mock_os.path.exists.call_count) self.assertEqual(0, mock_os.makedirs.call_count) rtsroot.return_value.save_to_file.assert_called_once_with(filename) @mock.patch.object(cinder_rtstool, 'os', **{'path.exists.return_value': True, 'path.dirname.return_value': mock.sentinel.dirname}) @mock.patch.object(cinder_rtstool, 'rtslib_fb', **{'root.default_save_file': mock.sentinel.filename}) def test_save(self, mock_rtslib, mock_os): """Test that we check path exists with default file.""" cinder_rtstool.save_to_file(None) rtsroot = mock_rtslib.root.RTSRoot rtsroot.assert_called_once_with() rtsroot.return_value.save_to_file.assert_called_once_with( mock.sentinel.filename) mock_os.path.dirname.assert_called_once_with(mock.sentinel.filename) mock_os.path.exists.assert_called_once_with(mock.sentinel.dirname) self.assertEqual(0, mock_os.makedirs.call_count) @mock.patch.object(cinder_rtstool, 'os', **{'path.exists.return_value': False, 'path.dirname.return_value': mock.sentinel.dirname}) @mock.patch.object(cinder_rtstool, 'rtslib_fb', **{'root.default_save_file': mock.sentinel.filename}) def test_save_no_targetcli(self, mock_rtslib, mock_os): """Test that we create path if it doesn't exist with default file.""" cinder_rtstool.save_to_file(None) rtsroot = mock_rtslib.root.RTSRoot rtsroot.assert_called_once_with() rtsroot.return_value.save_to_file.assert_called_once_with( mock.sentinel.filename) mock_os.path.dirname.assert_called_once_with(mock.sentinel.filename) mock_os.path.exists.assert_called_once_with(mock.sentinel.dirname) mock_os.makedirs.assert_called_once_with(mock.sentinel.dirname, 0o755) @mock.patch.object(cinder_rtstool, 'os', autospec=True) @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) def test_save_error_creating_dir(self, mock_rtslib, mock_os): mock_os.path.dirname.return_value = 'dirname' mock_os.path.exists.return_value = False mock_os.makedirs.side_effect = OSError('error') regexp = (r'targetcli not installed and could not create default ' r'directory \(dirname\): error$') self.assertRaisesRegex(cinder_rtstool.RtstoolError, regexp, cinder_rtstool.save_to_file, None) @mock.patch.object(cinder_rtstool, 'os', autospec=True) @mock.patch.object(cinder_rtstool, 'rtslib_fb', autospec=True) def test_save_error_saving(self, mock_rtslib, mock_os): save = mock_rtslib.root.RTSRoot.return_value.save_to_file save.side_effect = OSError('error') regexp = r'Could not save configuration to myfile: error' self.assertRaisesRegex(cinder_rtstool.RtstoolError, regexp, cinder_rtstool.save_to_file, 'myfile') @mock.patch.object(cinder_rtstool, 'rtslib_fb', **{'root.default_save_file': mock.sentinel.filename}) def test_restore(self, mock_rtslib): """Test that we restore target configuration with default file.""" cinder_rtstool.restore_from_file(None) rtsroot = mock_rtslib.root.RTSRoot rtsroot.assert_called_once_with() rtsroot.return_value.restore_from_file.assert_called_once_with( mock.sentinel.filename) @mock.patch.object(cinder_rtstool, 'rtslib_fb') def test_restore_with_file(self, mock_rtslib): """Test that we restore target configuration with specified file.""" cinder_rtstool.restore_from_file('saved_file') rtsroot = mock_rtslib.root.RTSRoot rtsroot.return_value.restore_from_file.assert_called_once_with( 'saved_file') @mock.patch('cinder.cmd.rtstool.restore_from_file') def test_restore_error(self, restore_from_file): """Test that we fail to restore target configuration.""" restore_from_file.side_effect = OSError self.assertRaises(OSError, cinder_rtstool.restore_from_file, mock.sentinel.filename) def test_usage(self): with mock.patch('sys.stdout', new=io.StringIO()): exit = self.assertRaises(SystemExit, cinder_rtstool.usage) self.assertEqual(1, exit.code) @mock.patch('cinder.cmd.rtstool.usage') def test_main_argc_lt_2(self, usage): usage.side_effect = SystemExit(1) sys.argv = ['cinder-rtstool'] exit = self.assertRaises(SystemExit, cinder_rtstool.usage) self.assertTrue(usage.called) self.assertEqual(1, exit.code) def test_main_create_argv_lt_6(self): sys.argv = ['cinder-rtstool', 'create'] self._test_main_check_argv() def test_main_create_argv_gt_7(self): sys.argv = ['cinder-rtstool', 'create', 'fake-arg1', 'fake-arg2', 'fake-arg3', 'fake-arg4', 'fake-arg5', 'fake-arg6'] self._test_main_check_argv() def test_main_add_initiator_argv_lt_6(self): sys.argv = ['cinder-rtstool', 'add-initiator'] self._test_main_check_argv() def test_main_delete_argv_lt_3(self): sys.argv = ['cinder-rtstool', 'delete'] self._test_main_check_argv() def test_main_no_action(self): sys.argv = ['cinder-rtstool'] self._test_main_check_argv() def _test_main_check_argv(self): with mock.patch('cinder.cmd.rtstool.usage') as usage: usage.side_effect = SystemExit(1) sys.argv = ['cinder-rtstool', 'create'] exit = self.assertRaises(SystemExit, cinder_rtstool.main) self.assertTrue(usage.called) self.assertEqual(1, exit.code) @mock.patch('cinder.cmd.rtstool.save_to_file') def test_main_save(self, mock_save): sys.argv = ['cinder-rtstool', 'save'] rc = cinder_rtstool.main() mock_save.assert_called_once_with(None) self.assertEqual(0, rc) @mock.patch('cinder.cmd.rtstool.save_to_file') def test_main_save_with_file(self, mock_save): sys.argv = ['cinder-rtstool', 'save', mock.sentinel.filename] rc = cinder_rtstool.main() mock_save.assert_called_once_with(mock.sentinel.filename) self.assertEqual(0, rc) def test_main_create(self): with mock.patch('cinder.cmd.rtstool.create') as create: sys.argv = ['cinder-rtstool', 'create', mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled, str(mock.sentinel.initiator_iqns)] rc = cinder_rtstool.main() create.assert_called_once_with( mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled, initiator_iqns=str(mock.sentinel.initiator_iqns)) self.assertEqual(0, rc) @mock.patch('cinder.cmd.rtstool.create') def test_main_create_ips_and_port(self, mock_create): sys.argv = ['cinder-rtstool', 'create', mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled, str(mock.sentinel.initiator_iqns), '-p3261', '-aip1,ip2,ip3'] rc = cinder_rtstool.main() mock_create.assert_called_once_with( mock.sentinel.backing_device, mock.sentinel.name, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.iser_enabled, initiator_iqns=str(mock.sentinel.initiator_iqns), portals_ips=['ip1', 'ip2', 'ip3'], portals_port=3261) self.assertEqual(0, rc) def test_main_add_initiator(self): with mock.patch('cinder.cmd.rtstool.add_initiator') as add_initiator: sys.argv = ['cinder-rtstool', 'add-initiator', mock.sentinel.target_iqn, mock.sentinel.userid, mock.sentinel.password, mock.sentinel.initiator_iqns] rc = cinder_rtstool.main() add_initiator.assert_called_once_with( mock.sentinel.target_iqn, mock.sentinel.initiator_iqns, mock.sentinel.userid, mock.sentinel.password) self.assertEqual(0, rc) def test_main_get_targets(self): with mock.patch('cinder.cmd.rtstool.get_targets') as get_targets: sys.argv = ['cinder-rtstool', 'get-targets'] rc = cinder_rtstool.main() get_targets.assert_called_once_with() self.assertEqual(0, rc) def test_main_delete(self): with mock.patch('cinder.cmd.rtstool.delete') as delete: sys.argv = ['cinder-rtstool', 'delete', mock.sentinel.iqn] rc = cinder_rtstool.main() delete.assert_called_once_with(mock.sentinel.iqn) self.assertEqual(0, rc) @mock.patch.object(cinder_rtstool, 'verify_rtslib') def test_main_verify(self, mock_verify_rtslib): sys.argv = ['cinder-rtstool', 'verify'] rc = cinder_rtstool.main() mock_verify_rtslib.assert_called_once_with() self.assertEqual(0, rc) class TestCinderVolumeUsageAuditCmd(test.TestCase): def setUp(self): super(TestCinderVolumeUsageAuditCmd, self).setUp() sys.argv = ['cinder-volume-usage-audit'] @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') @mock.patch('cinder.context.get_admin_context') def test_main_time_error(self, get_admin_context, log_setup, get_logger, version_string, rpc_init, last_completed_audit_period): CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2013-01-01 01:00:00') last_completed_audit_period.return_value = (mock.sentinel.begin, mock.sentinel.end) exit = self.assertRaises(SystemExit, volume_usage_audit.main) get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder') self.assertEqual(-1, exit.code) rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') @mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window') @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') @mock.patch('cinder.context.get_admin_context') def test_main_send_create_volume_error(self, get_admin_context, log_setup, get_logger, version_string, rpc_init, last_completed_audit_period, volume_get_all_active_by_window, notify_about_volume_usage): CONF.set_override('send_actions', True) CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2014-02-02 02:00:00') begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.UTC) end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.UTC) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) get_admin_context.return_value = ctxt last_completed_audit_period.return_value = (begin, end) volume1_created = datetime.datetime(2014, 1, 1, 2, 0, tzinfo=iso8601.UTC) volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0, tzinfo=iso8601.UTC) volume1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID, created_at=volume1_created, deleted_at=volume1_deleted) volume_get_all_active_by_window.return_value = [volume1] extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } local_extra_info = { 'audit_period_beginning': str(volume1.created_at), 'audit_period_ending': str(volume1.created_at), } def _notify_about_volume_usage(*args, **kwargs): if 'create.end' in args: raise Exception() else: pass notify_about_volume_usage.side_effect = _notify_about_volume_usage volume_usage_audit.main() get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder') rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() volume_get_all_active_by_window.assert_called_once_with(ctxt, begin, end) notify_about_volume_usage.assert_has_calls([ mock.call(ctxt, volume1, 'exists', extra_usage_info=extra_info), mock.call(ctxt, volume1, 'create.start', extra_usage_info=local_extra_info), mock.call(ctxt, volume1, 'create.end', extra_usage_info=local_extra_info) ]) @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') @mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window') @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') @mock.patch('cinder.context.get_admin_context') def test_main_send_delete_volume_error(self, get_admin_context, log_setup, get_logger, version_string, rpc_init, last_completed_audit_period, volume_get_all_active_by_window, notify_about_volume_usage): CONF.set_override('send_actions', True) CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2014-02-02 02:00:00') begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.UTC) end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.UTC) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) get_admin_context.return_value = ctxt last_completed_audit_period.return_value = (begin, end) volume1_created = datetime.datetime(2014, 1, 1, 2, 0, tzinfo=iso8601.UTC) volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0, tzinfo=iso8601.UTC) volume1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID, created_at=volume1_created, deleted_at=volume1_deleted) volume_get_all_active_by_window.return_value = [volume1] extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } local_extra_info_create = { 'audit_period_beginning': str(volume1.created_at), 'audit_period_ending': str(volume1.created_at), } local_extra_info_delete = { 'audit_period_beginning': str(volume1.deleted_at), 'audit_period_ending': str(volume1.deleted_at), } def _notify_about_volume_usage(*args, **kwargs): if 'delete.end' in args: raise Exception() else: pass notify_about_volume_usage.side_effect = _notify_about_volume_usage volume_usage_audit.main() get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder') rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() volume_get_all_active_by_window.assert_called_once_with(ctxt, begin, end) notify_about_volume_usage.assert_has_calls([ mock.call(ctxt, volume1, 'exists', extra_usage_info=extra_info), mock.call(ctxt, volume1, 'create.start', extra_usage_info=local_extra_info_create), mock.call(ctxt, volume1, 'create.end', extra_usage_info=local_extra_info_create), mock.call(ctxt, volume1, 'delete.start', extra_usage_info=local_extra_info_delete), mock.call(ctxt, volume1, 'delete.end', extra_usage_info=local_extra_info_delete) ]) @mock.patch('cinder.volume.volume_utils.notify_about_snapshot_usage') @mock.patch('cinder.objects.snapshot.SnapshotList.' 'get_all_active_by_window') @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') @mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window') @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') @mock.patch('cinder.context.get_admin_context') def test_main_send_snapshot_error(self, get_admin_context, log_setup, get_logger, version_string, rpc_init, last_completed_audit_period, volume_get_all_active_by_window, notify_about_volume_usage, snapshot_get_all_active_by_window, notify_about_snapshot_usage): CONF.set_override('send_actions', True) CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2014-02-02 02:00:00') begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.UTC) end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.UTC) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) get_admin_context.return_value = ctxt last_completed_audit_period.return_value = (begin, end) snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0, tzinfo=iso8601.UTC) snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0, tzinfo=iso8601.UTC) snapshot1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID, created_at=snapshot1_created, deleted_at=snapshot1_deleted) volume_get_all_active_by_window.return_value = [] snapshot_get_all_active_by_window.return_value = [snapshot1] extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } local_extra_info_create = { 'audit_period_beginning': str(snapshot1.created_at), 'audit_period_ending': str(snapshot1.created_at), } local_extra_info_delete = { 'audit_period_beginning': str(snapshot1.deleted_at), 'audit_period_ending': str(snapshot1.deleted_at), } def _notify_about_snapshot_usage(*args, **kwargs): # notify_about_snapshot_usage raises an exception, but does not # block raise Exception() notify_about_snapshot_usage.side_effect = _notify_about_snapshot_usage volume_usage_audit.main() get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder') rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() volume_get_all_active_by_window.assert_called_once_with(ctxt, begin, end) self.assertFalse(notify_about_volume_usage.called) notify_about_snapshot_usage.assert_has_calls([ mock.call(ctxt, snapshot1, 'exists', extra_info), mock.call(ctxt, snapshot1, 'create.start', extra_usage_info=local_extra_info_create), mock.call(ctxt, snapshot1, 'delete.start', extra_usage_info=local_extra_info_delete) ]) @mock.patch('cinder.volume.volume_utils.notify_about_backup_usage') @mock.patch('cinder.objects.backup.BackupList.get_all_active_by_window') @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') @mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window') @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('cinder.context.get_admin_context') def test_main_send_backup_error(self, get_admin_context, version_string, rpc_init, last_completed_audit_period, volume_get_all_active_by_window, notify_about_volume_usage, backup_get_all_active_by_window, notify_about_backup_usage): CONF.set_override('send_actions', True) CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2014-02-02 02:00:00') begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.UTC) end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.UTC) ctxt = context.RequestContext('fake-user', 'fake-project') get_admin_context.return_value = ctxt last_completed_audit_period.return_value = (begin, end) backup1_created = datetime.datetime(2014, 1, 1, 2, 0, tzinfo=iso8601.UTC) backup1_deleted = datetime.datetime(2014, 1, 1, 3, 0, tzinfo=iso8601.UTC) backup1 = mock.MagicMock(id=fake.BACKUP_ID, project_id=fake.PROJECT_ID, created_at=backup1_created, deleted_at=backup1_deleted) volume_get_all_active_by_window.return_value = [] backup_get_all_active_by_window.return_value = [backup1] extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } local_extra_info_create = { 'audit_period_beginning': str(backup1.created_at), 'audit_period_ending': str(backup1.created_at), } local_extra_info_delete = { 'audit_period_beginning': str(backup1.deleted_at), 'audit_period_ending': str(backup1.deleted_at), } notify_about_backup_usage.side_effect = Exception() volume_usage_audit.main() get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() volume_get_all_active_by_window.assert_called_once_with(ctxt, begin, end) self.assertFalse(notify_about_volume_usage.called) notify_about_backup_usage.assert_any_call(ctxt, backup1, 'exists', extra_info) notify_about_backup_usage.assert_any_call( ctxt, backup1, 'create.start', extra_usage_info=local_extra_info_create) notify_about_backup_usage.assert_any_call( ctxt, backup1, 'delete.start', extra_usage_info=local_extra_info_delete) @mock.patch('cinder.volume.volume_utils.notify_about_backup_usage') @mock.patch('cinder.objects.backup.BackupList.get_all_active_by_window') @mock.patch('cinder.volume.volume_utils.notify_about_snapshot_usage') @mock.patch('cinder.objects.snapshot.SnapshotList.' 'get_all_active_by_window') @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') @mock.patch('cinder.objects.volume.VolumeList.get_all_active_by_window') @mock.patch('cinder.utils.last_completed_audit_period') @mock.patch('cinder.rpc.init') @mock.patch('cinder.version.version_string') @mock.patch('oslo_log.log.getLogger') @mock.patch('oslo_log.log.setup') @mock.patch('cinder.context.get_admin_context') def test_main(self, get_admin_context, log_setup, get_logger, version_string, rpc_init, last_completed_audit_period, volume_get_all_active_by_window, notify_about_volume_usage, snapshot_get_all_active_by_window, notify_about_snapshot_usage, backup_get_all_active_by_window, notify_about_backup_usage): CONF.set_override('send_actions', True) CONF.set_override('start_time', '2014-01-01 01:00:00') CONF.set_override('end_time', '2014-02-02 02:00:00') begin = datetime.datetime(2014, 1, 1, 1, 0, tzinfo=iso8601.UTC) end = datetime.datetime(2014, 2, 2, 2, 0, tzinfo=iso8601.UTC) ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID) get_admin_context.return_value = ctxt last_completed_audit_period.return_value = (begin, end) volume1_created = datetime.datetime(2014, 1, 1, 2, 0, tzinfo=iso8601.UTC) volume1_deleted = datetime.datetime(2014, 1, 1, 3, 0, tzinfo=iso8601.UTC) volume1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID, created_at=volume1_created, deleted_at=volume1_deleted) volume_get_all_active_by_window.return_value = [volume1] extra_info = { 'audit_period_beginning': str(begin), 'audit_period_ending': str(end), } extra_info_volume_create = { 'audit_period_beginning': str(volume1.created_at), 'audit_period_ending': str(volume1.created_at), } extra_info_volume_delete = { 'audit_period_beginning': str(volume1.deleted_at), 'audit_period_ending': str(volume1.deleted_at), } snapshot1_created = datetime.datetime(2014, 1, 1, 2, 0, tzinfo=iso8601.UTC) snapshot1_deleted = datetime.datetime(2014, 1, 1, 3, 0, tzinfo=iso8601.UTC) snapshot1 = mock.MagicMock(id=fake.VOLUME_ID, project_id=fake.PROJECT_ID, created_at=snapshot1_created, deleted_at=snapshot1_deleted) snapshot_get_all_active_by_window.return_value = [snapshot1] extra_info_snapshot_create = { 'audit_period_beginning': str(snapshot1.created_at), 'audit_period_ending': str(snapshot1.created_at), } extra_info_snapshot_delete = { 'audit_period_beginning': str(snapshot1.deleted_at), 'audit_period_ending': str(snapshot1.deleted_at), } backup1_created = datetime.datetime(2014, 1, 1, 2, 0, tzinfo=iso8601.UTC) backup1_deleted = datetime.datetime(2014, 1, 1, 3, 0, tzinfo=iso8601.UTC) backup1 = mock.MagicMock(id=fake.BACKUP_ID, project_id=fake.PROJECT_ID, created_at=backup1_created, deleted_at=backup1_deleted) backup_get_all_active_by_window.return_value = [backup1] extra_info_backup_create = { 'audit_period_beginning': str(backup1.created_at), 'audit_period_ending': str(backup1.created_at), } extra_info_backup_delete = { 'audit_period_beginning': str(backup1.deleted_at), 'audit_period_ending': str(backup1.deleted_at), } volume_usage_audit.main() get_admin_context.assert_called_once_with() self.assertEqual('cinder', CONF.project) self.assertEqual(CONF.version, version.version_string()) log_setup.assert_called_once_with(CONF, "cinder") get_logger.assert_called_once_with('cinder') rpc_init.assert_called_once_with(CONF) last_completed_audit_period.assert_called_once_with() volume_get_all_active_by_window.assert_called_once_with(ctxt, begin, end) notify_about_volume_usage.assert_has_calls([ mock.call(ctxt, volume1, 'exists', extra_usage_info=extra_info), mock.call(ctxt, volume1, 'create.start', extra_usage_info=extra_info_volume_create), mock.call(ctxt, volume1, 'create.end', extra_usage_info=extra_info_volume_create), mock.call(ctxt, volume1, 'delete.start', extra_usage_info=extra_info_volume_delete), mock.call(ctxt, volume1, 'delete.end', extra_usage_info=extra_info_volume_delete) ]) notify_about_snapshot_usage.assert_has_calls([ mock.call(ctxt, snapshot1, 'exists', extra_info), mock.call(ctxt, snapshot1, 'create.start', extra_usage_info=extra_info_snapshot_create), mock.call(ctxt, snapshot1, 'create.end', extra_usage_info=extra_info_snapshot_create), mock.call(ctxt, snapshot1, 'delete.start', extra_usage_info=extra_info_snapshot_delete), mock.call(ctxt, snapshot1, 'delete.end', extra_usage_info=extra_info_snapshot_delete) ]) notify_about_backup_usage.assert_has_calls([ mock.call(ctxt, backup1, 'exists', extra_info), mock.call(ctxt, backup1, 'create.start', extra_usage_info=extra_info_backup_create), mock.call(ctxt, backup1, 'create.end', extra_usage_info=extra_info_backup_create), mock.call(ctxt, backup1, 'delete.start', extra_usage_info=extra_info_backup_delete), mock.call(ctxt, backup1, 'delete.end', extra_usage_info=extra_info_backup_delete) ]) class TestVolumeSharedTargetsOnlineMigration(test.TestCase): """Unit tests for cinder.db.api.service_*.""" def setUp(self): super(TestVolumeSharedTargetsOnlineMigration, self).setUp() def _get_minimum_rpc_version_mock(ctxt, binary): binary_map = { 'cinder-volume': rpcapi.VolumeAPI, } return binary_map[binary].RPC_API_VERSION self.patch('cinder.objects.Service.get_minimum_rpc_version', side_effect=_get_minimum_rpc_version_mock) ctxt = context.get_admin_context() # default value in db for shared_targets on a volume # is True, so don't need to set it here explicitly for i in range(3): sqlalchemy_api.volume_create( ctxt, {'host': 'host1@lvm-driver1#lvm-driver1', 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad', 'volume_type_id': fake.VOLUME_TYPE_ID}) values = { 'host': 'host1@lvm-driver1', 'binary': constants.VOLUME_BINARY, 'topic': constants.VOLUME_TOPIC, 'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'} utils.create_service(ctxt, values) self.ctxt = ctxt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_context.py0000664000175000017500000001767600000000000021572 0ustar00zuulzuul00000000000000 # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_policy import policy as oslo_policy from cinder import context from cinder.objects import base as objects_base from cinder import policy from cinder.tests.unit import test @ddt.ddt class ContextTestCase(test.TestCase): def test_request_context_sets_is_admin(self): ctxt = context.RequestContext('111', '222', roles=['admin', 'weasel']) self.assertTrue(ctxt.is_admin) def test_request_context_sets_is_admin_upcase(self): ctxt = context.RequestContext('111', '222', roles=['Admin', 'weasel']) self.assertTrue(ctxt.is_admin) def test_request_context_read_deleted(self): ctxt = context.RequestContext('111', '222', read_deleted='yes') self.assertEqual('yes', ctxt.read_deleted) ctxt.read_deleted = 'no' self.assertEqual('no', ctxt.read_deleted) def test_request_context_read_deleted_invalid(self): self.assertRaises(ValueError, context.RequestContext, '111', '222', read_deleted=True) ctxt = context.RequestContext('111', '222') self.assertRaises(ValueError, setattr, ctxt, 'read_deleted', True) def test_request_context_elevated(self): user_context = context.RequestContext( 'fake_user', 'fake_project', is_admin=False) self.assertFalse(user_context.is_admin) admin_context = user_context.elevated() self.assertFalse(user_context.is_admin) self.assertTrue(admin_context.is_admin) self.assertNotIn('admin', user_context.roles) self.assertIn('admin', admin_context.roles) def test_service_catalog_nova_and_swift(self): service_catalog = [ {u'type': u'compute', u'name': u'nova'}, {u'type': u's3', u'name': u's3'}, {u'type': u'image', u'name': u'glance'}, {u'type': u'volume', u'name': u'cinder'}, {u'type': u'ec2', u'name': u'ec2'}, {u'type': u'object-store', u'name': u'swift'}, {u'type': u'identity', u'name': u'keystone'}, {u'type': None, u'name': u'S_withtypeNone'}, {u'type': u'co', u'name': u'S_partofcompute'}] compute_catalog = [{u'type': u'compute', u'name': u'nova'}] object_catalog = [{u'name': u'swift', u'type': u'object-store'}] ctxt = context.RequestContext('111', '222', service_catalog=service_catalog) self.assertEqual(4, len(ctxt.service_catalog)) return_compute = [v for v in ctxt.service_catalog if v['type'] == u'compute'] return_object = [v for v in ctxt.service_catalog if v['type'] == u'object-store'] self.assertEqual(compute_catalog, return_compute) self.assertEqual(object_catalog, return_object) def test_user_identity(self): ctx = context.RequestContext("user", "tenant", domain_id="domain", user_domain_id="user-domain", project_domain_id="project-domain") self.assertEqual('user tenant domain user-domain project-domain', ctx.to_dict()["user_identity"]) @ddt.data(('ec729e9946bc43c39ece6dfa7de70eea', 'c466a48309794261b64a4f02cfcc3d64'), ('ec729e9946bc43c39ece6dfa7de70eea', None), (None, 'c466a48309794261b64a4f02cfcc3d64'), (None, None)) @ddt.unpack @mock.patch('cinder.context.CONF') def test_cinder_internal_context(self, project_id, user_id, mock_conf): mock_conf.cinder_internal_tenant_project_id = project_id mock_conf.cinder_internal_tenant_user_id = user_id ctx = context.get_internal_tenant_context() if project_id is None or user_id is None: self.assertIsNone(ctx) else: self.assertEqual(user_id, ctx.user_id) self.assertEqual(project_id, ctx.project_id) def test_request_context_no_roles(self): ctxt = context.RequestContext('111', '222') self.assertEqual([], ctxt.roles) def test_request_context_with_roles(self): roles = ['alpha', 'beta'] ctxt = context.RequestContext('111', '222', roles=roles) self.assertEqual(roles, ctxt.roles) @ddt.ddt class ContextAuthorizeTestCase(test.TestCase): def setUp(self): super(ContextAuthorizeTestCase, self).setUp() rules = [ oslo_policy.RuleDefault("test:something", "project_id:%(project_id)s"), ] policy.reset() policy.init() # before a policy rule can be used, its default has to be registered. policy._ENFORCER.register_defaults(rules) self.context = context.RequestContext(user_id='me', project_id='my_project') self.addCleanup(policy.reset) def _dict_target_obj(project_id): return { 'user_id': 'me', 'project_id': project_id, } def _real_target_obj(project_id): target_obj = objects_base.CinderObject() target_obj.user_id = 'me' target_obj.project_id = project_id return target_obj @ddt.data( { # PASS: target inherits 'my_project' from target_obj dict 'target': None, 'target_obj': _dict_target_obj('my_project'), 'expected': True, }, { # FAIL: target inherits 'other_project' from target_obj dict 'target': None, 'target_obj': _dict_target_obj('other_project'), 'expected': False, }, { # PASS: target inherits 'my_project' from target_obj object 'target': None, 'target_obj': _real_target_obj('my_project'), 'expected': True, }, { # FAIL: target inherits 'other_project' from target_obj object 'target': None, 'target_obj': _real_target_obj('other_project'), 'expected': False, }, { # PASS: target specifies 'my_project' 'target': {'project_id': 'my_project'}, 'target_obj': None, 'expected': True, }, { # FAIL: target specifies 'other_project' 'target': {'project_id': 'other_project'}, 'target_obj': None, 'expected': False, }, { # PASS: target inherits 'my_project' from the context 'target': None, 'target_obj': None, 'expected': True, }, ) @ddt.unpack def test_authorize(self, target, target_obj, expected): result = self.context.authorize("test:something", target, target_obj, fatal=False) self.assertEqual(result, expected) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_coordination.py0000664000175000017500000002605300000000000022563 0ustar00zuulzuul00000000000000# Copyright 2015 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import inspect from unittest import mock import tooz.coordination import tooz.locking from cinder import coordination from cinder.tests.unit import test class Locked(Exception): pass class MockToozLock(tooz.locking.Lock): active_locks = set() def acquire(self, blocking=True): if self.name not in self.active_locks: self.active_locks.add(self.name) return True elif not blocking: return False else: raise Locked def release(self): self.active_locks.remove(self.name) class CoordinatorTestCase(test.TestCase): MOCK_TOOZ = False @mock.patch('cinder.coordination.cfg.CONF.coordination.backend_url') @mock.patch('cinder.coordination.Coordinator._get_file_path') @mock.patch('tooz.coordination.get_coordinator') def test_coordinator_start(self, get_coordinator, mock_get_file_path, mock_backend_url): crd = get_coordinator.return_value agent = coordination.Coordinator() self.assertIsNone(agent._file_path) agent.start() self.assertTrue(get_coordinator.called) self.assertTrue(crd.start.called) agent.start() crd.start.assert_called_once_with(start_heart=True) mock_get_file_path.assert_called_once_with(mock_backend_url) self.assertEqual(mock_get_file_path.return_value, agent._file_path) @mock.patch('tooz.coordination.get_coordinator') def test_coordinator_stop(self, get_coordinator): crd = get_coordinator.return_value agent = coordination.Coordinator() agent.start() self.assertIsNotNone(agent.coordinator) agent.stop() self.assertTrue(crd.stop.called) self.assertIsNone(agent.coordinator) agent.stop() crd.stop.assert_called_once_with() @mock.patch('tooz.coordination.get_coordinator') def test_coordinator_lock(self, get_coordinator): crd = get_coordinator.return_value crd.get_lock.side_effect = lambda n: MockToozLock(n) agent1 = coordination.Coordinator() agent1.start() agent2 = coordination.Coordinator() agent2.start() lock_name = 'lock' expected_name = lock_name.encode('ascii') self.assertNotIn(expected_name, MockToozLock.active_locks) with agent1.get_lock(lock_name): self.assertIn(expected_name, MockToozLock.active_locks) self.assertRaises(Locked, agent1.get_lock(lock_name).acquire) self.assertRaises(Locked, agent2.get_lock(lock_name).acquire) self.assertNotIn(expected_name, MockToozLock.active_locks) @mock.patch('tooz.coordination.get_coordinator') def test_coordinator_offline(self, get_coordinator): crd = get_coordinator.return_value crd.start.side_effect = tooz.coordination.ToozConnectionError('err') agent = coordination.Coordinator() self.assertRaises(tooz.coordination.ToozError, agent.start) self.assertFalse(agent.started) def test_get_file_path(self): backend_url = 'file:///opt/stack/data/cinder' res = coordination.COORDINATOR._get_file_path(backend_url) self.assertEqual('/opt/stack/data/cinder/cinder-', res) def test_get_file_path_non_file(self): backend_url = 'etcd3+http://192.168.1.95:2379' res = coordination.COORDINATOR._get_file_path(backend_url) self.assertIsNone(res) @mock.patch('cinder.coordination.COORDINATOR._file_path', None) @mock.patch('glob.glob') @mock.patch('os.remove') def test_remove_lock_non_file_lock(self, mock_remove, mock_glob): coordination.COORDINATOR.remove_lock('lock-file') mock_glob.assert_not_called() mock_remove.assert_not_called() @mock.patch('cinder.coordination.COORDINATOR._file_path', '/data/cinder-') @mock.patch('glob.glob') @mock.patch('os.remove') def test_remove_lock(self, mock_remove, mock_glob): mock_glob.return_value = ['/data/cinder-attachment_update-UUID-1', '/data/cinder-attachment_update-UUID-2'] coordination.COORDINATOR.remove_lock('attachment_update-UUID-*') mock_glob.assert_called_once_with( '/data/cinder-attachment_update-UUID-*') self.assertEqual(2, mock_remove.call_count) mock_remove.assert_has_calls( [mock.call('/data/cinder-attachment_update-UUID-1'), mock.call('/data/cinder-attachment_update-UUID-2')]) @mock.patch('cinder.coordination.COORDINATOR._file_path', '/data/cinder-') @mock.patch('cinder.coordination.LOG.warning') @mock.patch('glob.glob') @mock.patch('os.remove') def test_remove_lock_missing_file(self, mock_remove, mock_glob, mock_log): mock_glob.return_value = ['/data/cinder-attachment_update-UUID-1', '/data/cinder-attachment_update-UUID-2'] mock_remove.side_effect = [OSError(errno.ENOENT, ''), None] coordination.COORDINATOR.remove_lock('attachment_update-UUID-*') mock_glob.assert_called_once_with( '/data/cinder-attachment_update-UUID-*') self.assertEqual(2, mock_remove.call_count) mock_remove.assert_has_calls( [mock.call('/data/cinder-attachment_update-UUID-1'), mock.call('/data/cinder-attachment_update-UUID-2')]) mock_log.assert_not_called() @mock.patch('cinder.coordination.COORDINATOR._file_path', '/data/cinder-') @mock.patch('cinder.coordination.LOG.warning') @mock.patch('glob.glob') @mock.patch('os.remove') def test_remove_lock_unknown_failure(self, mock_remove, mock_glob, mock_log): mock_glob.return_value = ['/data/cinder-attachment_update-UUID-1', '/data/cinder-attachment_update-UUID-2'] mock_remove.side_effect = [ValueError(), None] coordination.COORDINATOR.remove_lock('attachment_update-UUID-*') mock_glob.assert_called_once_with( '/data/cinder-attachment_update-UUID-*') self.assertEqual(2, mock_remove.call_count) mock_remove.assert_has_calls( [mock.call('/data/cinder-attachment_update-UUID-1'), mock.call('/data/cinder-attachment_update-UUID-2')]) self.assertEqual(1, mock_log.call_count) class CoordinationTestCase(test.TestCase): @mock.patch.object(coordination.COORDINATOR, 'get_lock') def test_synchronized(self, get_lock): @coordination.synchronized('lock-{f_name}-{foo.val}-{bar[val]}') def func(foo, bar): pass foo = mock.Mock() foo.val = 7 bar = mock.MagicMock() bar.__getitem__.return_value = 8 func(foo, bar) get_lock.assert_called_with('lock-func-7-8') self.assertEqual(['foo', 'bar'], inspect.getfullargspec(func)[0]) @mock.patch('cinder.coordination.COORDINATOR.remove_lock') def test_synchronized_remove(self, mock_remove): coordination.synchronized_remove(mock.sentinel.glob_name) mock_remove.assert_called_once_with(mock.sentinel.glob_name) @mock.patch('cinder.coordination.COORDINATOR.remove_lock') def test_synchronized_remove_custom_coordinator(self, mock_remove): coordinator = mock.Mock() coordination.synchronized_remove(mock.sentinel.glob_name, coordinator) coordinator.remove_lock.assert_called_once_with( mock.sentinel.glob_name) @mock.patch.object(coordination.COORDINATOR, 'get_lock') def test_synchronized_multiple_templates(self, get_lock): """Test locks requested in the right order and duplicates removed.""" locks = ['lock-{f_name}-%s-{foo.val}-{bar[val]}' % i for i in range(3)] expect = [f'lock-func-{i}-7-8' for i in range(3)] @coordination.synchronized(locks[1], locks[0], locks[1], locks[2]) def func(foo, bar): pass foo = mock.Mock(val=7) bar = mock.MagicMock() bar.__getitem__.return_value = 8 func(foo, bar) self.assertEqual(len(expect), get_lock.call_count) get_lock.assert_has_calls([mock.call(lock) for lock in expect]) self.assertEqual(len(expect), get_lock.return_value.acquire.call_count) get_lock.return_value.acquire.assert_has_calls( [mock.call(True)] * len(expect)) self.assertEqual(['foo', 'bar'], inspect.getfullargspec(func)[0]) @mock.patch('oslo_utils.timeutils.now', side_effect=[1, 2]) def test___acquire(self, mock_now): lock = mock.Mock() # Using getattr to avoid AttributeError: module 'cinder.coordination' # has no attribute '_CoordinationTestCase__acquire' res = getattr(coordination, '__acquire')(lock, mock.sentinel.blocking, mock.sentinel.f_name) self.assertEqual(2, res) self.assertEqual(2, mock_now.call_count) mock_now.assert_has_calls([mock.call(), mock.call()]) lock.acquire.assert_called_once_with(mock.sentinel.blocking) @mock.patch('oslo_utils.timeutils.now') def test___acquire_propagates_exception(self, mock_now): lock = mock.Mock() lock.acquire.side_effect = ValueError # Using getattr to avoid AttributeError: module 'cinder.coordination' # has no attribute '_CoordinationTestCase__acquire' self.assertRaises(ValueError, getattr(coordination, '__acquire'), lock, mock.sentinel.blocking, mock.sentinel.f_name) mock_now.assert_called_once_with() lock.acquire.assert_called_once_with(mock.sentinel.blocking) @mock.patch('oslo_utils.timeutils.now', return_value=2) def test___release(self, mock_now): lock = mock.Mock() # Using getattr to avoid AttributeError: module 'cinder.coordination' # has no attribute '_CoordinationTestCase__release' getattr(coordination, '__release')(lock, 1, mock.sentinel.f_name) mock_now.assert_called_once_with() lock.release.assert_called_once_with() @mock.patch('oslo_utils.timeutils.now') def test___release_ignores_exception(self, mock_now): lock = mock.Mock() lock.release.side_effect = ValueError # Using getattr to avoid AttributeError: module 'cinder.coordination' # has no attribute '_CoordinationTestCase__release' getattr(coordination, '__release')(lock, 1, mock.sentinel.f_name) mock_now.assert_not_called() lock.release.assert_called_once_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_db_api.py0000664000175000017500000055743200000000000021323 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for cinder.db.api.""" import datetime import enum from unittest import mock from unittest.mock import call import ddt from oslo_config import cfg import oslo_db from oslo_utils import timeutils from oslo_utils import uuidutils from sqlalchemy.sql import operators from cinder.api import common from cinder import context from cinder import db from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder.db.sqlalchemy import models from cinder import exception from cinder import objects from cinder.objects import fields from cinder import quota from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils CONF = cfg.CONF THREE = 3 THREE_HUNDREDS = 300 ONE_HUNDREDS = 100 UTC_NOW = timeutils.utcnow() def _quota_reserve(context, project_id, **resource_dict): """Create sample Quota, QuotaUsage and Reservation objects. There is no method db.quota_usage_create(), so we have to use db.quota_reserve() for creating QuotaUsage objects. Returns reservations uuids. """ def get_sync(resource, usage): def sync(elevated, project_id, session): return {resource: usage} return sync if not resource_dict: resource_dict = {'volumes': 1, 'gigabytes': 2} quotas = {} resources = {} deltas = {} for resource, value in resource_dict.items(): quota_obj = db.quota_create(context, project_id, resource, value) quotas[resource] = quota_obj.hard_limit resources[resource] = quota.ReservableResource(resource, '_sync_%s' % resource) deltas[resource] = value return db.quota_reserve( context, resources, quotas, deltas, datetime.datetime.utcnow(), until_refresh=None, max_age=datetime.timedelta(days=1), project_id=project_id ) class BaseTest(test.TestCase, test.ModelsObjectComparatorMixin): def setUp(self): super(BaseTest, self).setUp() self.ctxt = context.get_admin_context() @ddt.ddt class DBCommonFilterTestCase(BaseTest): def setUp(self): super(DBCommonFilterTestCase, self).setUp() self.fake_volume = db.volume_create( self.ctxt, {'display_name': 'fake_name', 'volume_type_id': fake.VOLUME_TYPE_ID}) self.fake_group = utils.create_group( self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[fake.VOLUME_TYPE_ID]) @mock.patch('sqlalchemy.orm.query.Query.filter') def test__process_model_like_filter(self, mock_filter): filters = { 'display_name': 'fake_name', 'display_description': 'fake_description', 'host': 123, 'status': [], } with sqlalchemy_api.main_context_manager.writer.using(self.ctxt): query = self.ctxt.session.query(models.Volume) mock_filter.return_value = query with mock.patch.object(operators.Operators, 'op') as mock_op: def fake_operator(value): return value mock_op.return_value = fake_operator sqlalchemy_api._process_model_like_filter( models.Volume, query, filters, ) calls = [ call('%fake_description%'), call('%fake_name%'), call('%123%'), ] mock_filter.assert_has_calls(calls, any_order=True) @ddt.data({'handler': [db.volume_create, db.volume_get_all], 'column': 'display_name', 'resource': 'volume'}, {'handler': [db.snapshot_create, db.snapshot_get_all], 'column': 'display_name', 'resource': 'snapshot'}, {'handler': [db.message_create, db.message_get_all], 'column': 'message_level', 'resource': 'message'}, {'handler': [db.backup_create, db.backup_get_all], 'column': 'display_name', 'resource': 'backup'}, {'handler': [db.group_create, db.group_get_all], 'column': 'name', 'resource': 'group'}, {'handler': [utils.create_group_snapshot, db.group_snapshot_get_all], 'column': 'name', 'resource': 'group_snapshot'}) @ddt.unpack def test_resource_get_all_like_filter(self, handler, column, resource): for index in ['001', '002']: option = {column: "fake_%s_%s" % (column, index)} if resource in ['volume', 'snapshot']: option['volume_type_id'] = fake.VOLUME_TYPE_ID if resource in ['snapshot', 'backup']: option['volume_id'] = self.fake_volume.id if resource in ['message']: option['project_id'] = fake.PROJECT_ID option['event_id'] = fake.UUID1 if resource in ['group_snapshot']: handler[0](self.ctxt, self.fake_group.id, name="fake_%s_%s" % (column, index)) else: handler[0](self.ctxt, option) # test exact match exact_filter = {column: 'fake_%s' % column} resources = handler[1](self.ctxt, filters=exact_filter) self.assertEqual(0, len(resources)) # test inexact match inexact_filter = {"%s~" % column: 'fake_%s' % column} resources = handler[1](self.ctxt, filters=inexact_filter) self.assertEqual(2, len(resources)) @ddt.ddt class DBAPIServiceTestCase(BaseTest): """Unit tests for cinder.db.api.service_*.""" def test_service_create(self): # Add a cluster value to the service values = {'cluster_name': 'cluster'} service = utils.create_service(self.ctxt, values) self.assertIsNotNone(service['id']) expected = utils.default_service_values() expected.update(values) for key, value in expected.items(): self.assertEqual(value, service[key]) def test_service_destroy(self): service1 = utils.create_service(self.ctxt, {}) service2 = utils.create_service(self.ctxt, {'host': 'fake_host2'}) self.assertDictEqual( {'deleted': True, 'deleted_at': mock.ANY}, db.service_destroy(self.ctxt, service1['id'])) self.assertRaises(exception.ServiceNotFound, db.service_get, self.ctxt, service1['id']) self._assertEqualObjects( service2, db.service_get(self.ctxt, service2['id'])) def test_service_update(self): service = utils.create_service(self.ctxt, {}) new_values = { 'host': 'fake_host1', 'binary': 'fake_binary1', 'topic': 'fake_topic1', 'report_count': 4, 'disabled': True } db.service_update(self.ctxt, service['id'], new_values) updated_service = db.service_get(self.ctxt, service['id']) for key, value in new_values.items(): self.assertEqual(value, updated_service[key]) def test_service_update_not_found_exception(self): self.assertRaises(exception.ServiceNotFound, db.service_update, self.ctxt, 100500, {}) def test_service_get(self): service1 = utils.create_service(self.ctxt, {}) real_service1 = db.service_get(self.ctxt, service1['id']) self._assertEqualObjects(service1, real_service1) def test_service_get_by_cluster(self): service = utils.create_service(self.ctxt, {'cluster_name': 'cluster@backend'}) # Search with an exact match real_service = db.service_get(self.ctxt, cluster_name='cluster@backend') self._assertEqualObjects(service, real_service) # Search without the backend real_service = db.service_get(self.ctxt, cluster_name='cluster') self._assertEqualObjects(service, real_service) def test_service_get_not_found_exception(self): self.assertRaises(exception.ServiceNotFound, db.service_get, self.ctxt, 100500) def test_service_get_by_host_and_topic(self): service1 = utils.create_service(self.ctxt, {'host': 'host1', 'topic': 'topic1'}) real_service1 = db.service_get(self.ctxt, host='host1', topic='topic1') self._assertEqualObjects(service1, real_service1) @ddt.data('disabled', 'frozen') def test_service_get_all_boolean_by_cluster(self, field_name): values = [ # Enabled/Unfrozen services {'host': 'host1', 'binary': 'b1', field_name: False}, {'host': 'host2', 'binary': 'b1', field_name: False, 'cluster_name': 'enabled_unfrozen_cluster'}, {'host': 'host3', 'binary': 'b1', field_name: True, 'cluster_name': 'enabled_unfrozen_cluster'}, # Disabled/Frozen services {'host': 'host4', 'binary': 'b1', field_name: True}, {'host': 'host5', 'binary': 'b1', field_name: False, 'cluster_name': 'disabled_frozen_cluster'}, {'host': 'host6', 'binary': 'b1', field_name: True, 'cluster_name': 'disabled_frozen_cluster'}, ] db.cluster_create(self.ctxt, {'name': 'enabled_unfrozen_cluster', 'binary': 'b1', field_name: False}), db.cluster_create(self.ctxt, {'name': 'disabled_frozen_cluster', 'binary': 'b1', field_name: True}), services = [utils.create_service(self.ctxt, vals) for vals in values] false_services = db.service_get_all(self.ctxt, **{field_name: False}) true_services = db.service_get_all(self.ctxt, **{field_name: True}) self.assertSetEqual({s.host for s in services[:3]}, {s.host for s in false_services}) self.assertSetEqual({s.host for s in services[3:]}, {s.host for s in true_services}) def test_service_get_all(self): expired = (datetime.datetime.utcnow() - datetime.timedelta(seconds=CONF.service_down_time + 1)) db.cluster_create(self.ctxt, {'name': 'cluster_disabled', 'binary': 'fake_binary', 'disabled': True}) db.cluster_create(self.ctxt, {'name': 'cluster_enabled', 'binary': 'fake_binary', 'disabled': False}) values = [ # Now we are updating updated_at at creation as well so this one # is up. {'host': 'host1', 'binary': 'b1', 'created_at': expired}, {'host': 'host1@ceph', 'binary': 'b2'}, {'host': 'host2', 'binary': 'b2'}, {'disabled': False, 'cluster_name': 'cluster_enabled'}, {'disabled': True, 'cluster_name': 'cluster_enabled'}, {'disabled': False, 'cluster_name': 'cluster_disabled'}, {'disabled': True, 'cluster_name': 'cluster_disabled'}, {'disabled': True, 'created_at': expired, 'updated_at': expired}, ] services = [utils.create_service(self.ctxt, vals) for vals in values] disabled_services = services[-3:] non_disabled_services = services[:-3] up_services = services[:7] down_services = [services[7]] expected = services[:2] expected_bin = services[1:3] compares = [ (services, db.service_get_all(self.ctxt)), (expected, db.service_get_all(self.ctxt, host='host1')), (expected_bin, db.service_get_all(self.ctxt, binary='b2')), (disabled_services, db.service_get_all(self.ctxt, disabled=True)), (non_disabled_services, db.service_get_all(self.ctxt, disabled=False)), (up_services, db.service_get_all(self.ctxt, is_up=True)), (down_services, db.service_get_all(self.ctxt, is_up=False)), ] for i, comp in enumerate(compares): self._assertEqualListsOfObjects(*comp, msg='Error comparing %s' % i) def test_service_get_all_by_topic(self): values = [ {'host': 'host1', 'topic': 't1'}, {'host': 'host2', 'topic': 't1'}, {'host': 'host4', 'disabled': True, 'topic': 't1'}, {'host': 'host3', 'topic': 't2'} ] services = [utils.create_service(self.ctxt, vals) for vals in values] expected = services[:3] real = db.service_get_all(self.ctxt, topic='t1') self._assertEqualListsOfObjects(expected, real) def test_service_get_all_by_binary(self): values = [ {'host': 'host1', 'binary': 'b1'}, {'host': 'host2', 'binary': 'b1'}, {'host': 'host4', 'disabled': True, 'binary': 'b1'}, {'host': 'host3', 'binary': 'b2'} ] services = [utils.create_service(self.ctxt, vals) for vals in values] expected = services[:3] real = db.service_get_all(self.ctxt, binary='b1') self._assertEqualListsOfObjects(expected, real) def test_service_get_by_args(self): values = [ {'host': 'host1', 'binary': 'a'}, {'host': 'host2', 'binary': 'b'} ] services = [utils.create_service(self.ctxt, vals) for vals in values] service1 = db.service_get(self.ctxt, host='host1', binary='a') self._assertEqualObjects(services[0], service1) service2 = db.service_get(self.ctxt, host='host2', binary='b') self._assertEqualObjects(services[1], service2) def test_service_get_all_by_cluster(self): values = [ {'host': 'host1', 'cluster_name': 'cluster'}, {'host': 'host2', 'cluster_name': 'cluster'}, {'host': 'host3', 'cluster_name': 'cluster@backend'}, {'host': 'host4', 'cluster_name': 'cluster2'}, ] services = [utils.create_service(self.ctxt, vals) for vals in values] expected = services[:3] real = db.service_get_all(self.ctxt, cluster_name='cluster') self._assertEqualListsOfObjects(expected, real) def test_service_get_all_by_host_or_cluster(self): values = [ {'host': 'host1', 'cluster_name': 'cluster'}, {'host': 'host2', 'cluster_name': 'host1'}, {'host': 'host3', 'cluster_name': 'cluster@backend'}, {'host': 'host4', 'cluster_name': 'cluster2'}, ] services = [utils.create_service(self.ctxt, vals) for vals in values] expected = services[0:2] real = db.service_get_all(self.ctxt, host_or_cluster='host1') self._assertEqualListsOfObjects(expected, real) def test_service_get_by_args_not_found_exception(self): self.assertRaises(exception.ServiceNotFound, db.service_get, self.ctxt, host='non-exists-host', binary='a') @mock.patch('sqlalchemy.orm.query.Query.filter_by') def test_service_get_by_args_with_case_insensitive(self, filter_by): CONF.set_default('connection', 'mysql://', 'database') db.service_get(self.ctxt, host='host', binary='a') self.assertNotEqual(0, filter_by.call_count) self.assertEqual(1, filter_by.return_value.filter.call_count) or_op = filter_by.return_value.filter.call_args[0][0].clauses[0] self.assertIsInstance(or_op, sqlalchemy_api.sql.elements.BinaryExpression) binary_op = or_op.right self.assertIsInstance(binary_op, sqlalchemy_api.sql.functions.Function) self.assertEqual('binary', binary_op.name) @ddt.ddt class DBAPIVolumeTestCase(BaseTest): """Unit tests for cinder.db.api.volume_*.""" def test_volume_create(self): volume = db.volume_create( self.ctxt, {'host': 'host1', 'volume_type_id': fake.VOLUME_TYPE_ID}) self.assertTrue(uuidutils.is_uuid_like(volume['id'])) self.assertEqual('host1', volume.host) def test_volume_attached_invalid_uuid(self): self.assertRaises(exception.InvalidUUID, db.volume_attached, self.ctxt, 42, 'invalid-uuid', None, '/tmp') def test_volume_attached_to_instance(self): volume = db.volume_create( self.ctxt, {'host': 'host1', 'volume_type_id': fake.VOLUME_TYPE_ID}) instance_uuid = fake.INSTANCE_ID values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.ctxt, values) volume_db, updated_values = db.volume_attached( self.ctxt, attachment['id'], instance_uuid, None, '/tmp') expected_updated_values = { 'mountpoint': '/tmp', 'attach_status': fields.VolumeAttachStatus.ATTACHED, 'instance_uuid': instance_uuid, 'attached_host': None, 'attach_time': mock.ANY, 'attach_mode': 'rw'} self.assertDictEqual(expected_updated_values, updated_values) volume = db.volume_get(self.ctxt, volume['id']) attachment = db.volume_attachment_get(self.ctxt, attachment['id']) self._assertEqualObjects(volume, volume_db, ignored_keys='volume_attachment') self._assertEqualListsOfObjects(volume.volume_attachment, volume_db.volume_attachment, 'volume') self.assertEqual('in-use', volume['status']) self.assertEqual('/tmp', attachment['mountpoint']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) self.assertEqual(volume.project_id, attachment['volume']['project_id']) def test_volume_attached_to_host(self): volume = db.volume_create( self.ctxt, {'host': 'host1', 'volume_type_id': fake.VOLUME_TYPE_ID}) host_name = 'fake_host' values = {'volume_id': volume['id'], 'attached_host': host_name, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.ctxt, values) volume_db, updated_values = db.volume_attached( self.ctxt, attachment['id'], None, host_name, '/tmp') expected_updated_values = { 'mountpoint': '/tmp', 'attach_status': fields.VolumeAttachStatus.ATTACHED, 'instance_uuid': None, 'attached_host': host_name, 'attach_time': mock.ANY, 'attach_mode': 'rw'} self.assertDictEqual(expected_updated_values, updated_values) volume = db.volume_get(self.ctxt, volume['id']) self._assertEqualObjects(volume, volume_db, ignored_keys='volume_attachment') self._assertEqualListsOfObjects(volume.volume_attachment, volume_db.volume_attachment, 'volume') attachment = db.volume_attachment_get(self.ctxt, attachment['id']) self.assertEqual('in-use', volume['status']) self.assertEqual('/tmp', attachment['mountpoint']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertIsNone(attachment['instance_uuid']) self.assertEqual(attachment['attached_host'], host_name) self.assertEqual(volume.project_id, attachment['volume']['project_id']) def test_volume_data_get_for_host(self): for i in range(THREE): for j in range(THREE): db.volume_create( self.ctxt, {'host': 'h%d' % i, 'size': ONE_HUNDREDS, 'volume_type_id': fake.VOLUME_TYPE_ID}) for i in range(THREE): self.assertEqual((THREE, THREE_HUNDREDS), db.volume_data_get_for_host( self.ctxt, 'h%d' % i)) def test_volume_data_get_for_host_for_multi_backend(self): for i in range(THREE): for j in range(THREE): db.volume_create( self.ctxt, {'host': 'h%d@lvmdriver-1#lvmdriver-1' % i, 'size': ONE_HUNDREDS, 'volume_type_id': fake.VOLUME_TYPE_ID}) for i in range(THREE): self.assertEqual((THREE, THREE_HUNDREDS), db.volume_data_get_for_host( self.ctxt, 'h%d@lvmdriver-1' % i)) def test_volume_data_get_for_project(self): for i in range(THREE): for j in range(THREE): db.volume_create( self.ctxt, {'project_id': 'p%d' % i, 'size': ONE_HUNDREDS, 'host': 'h-%d-%d' % (i, j), 'volume_type_id': fake.VOLUME_TYPE_ID}) for i in range(THREE): self.assertEqual((THREE, THREE_HUNDREDS), db.volume_data_get_for_project( self.ctxt, 'p%d' % i)) @mock.patch.object(sqlalchemy_api, '_volume_data_get_for_project') def test_volume_data_get_for_project_migrating(self, mock_vol_data): expected = (mock.sentinel.count, mock.sentinel.gb) mock_vol_data.return_value = expected res = db.volume_data_get_for_project(self.ctxt, mock.sentinel.project_id, mock.sentinel.host) self.assertEqual(expected, res) mock_vol_data.assert_called_once_with(self.ctxt, mock.sentinel.project_id, host=mock.sentinel.host, skip_internal=False) @ddt.data((True, THREE_HUNDREDS, THREE), (False, THREE_HUNDREDS + 2 * ONE_HUNDREDS, THREE + 2)) @ddt.unpack def test__volume_data_get_for_project_migrating(self, skip_internal, gigabytes, count): for i in range(2): db.volume_create(self.ctxt, {'project_id': 'project', 'size': ONE_HUNDREDS, 'host': 'h-%d' % i, 'volume_type_id': fake.VOLUME_TYPE_ID}) # This volume is migrating and will be counted db.volume_create(self.ctxt, {'project_id': 'project', 'size': ONE_HUNDREDS, 'host': 'h-%d' % i, 'volume_type_id': fake.VOLUME_TYPE_ID, 'migration_status': 'migrating'}) # This one will not be counted db.volume_create(self.ctxt, {'project_id': 'project', 'size': ONE_HUNDREDS, 'host': 'h-%d' % i, 'volume_type_id': fake.VOLUME_TYPE_ID, 'migration_status': 'target:vol-id', 'use_quota': False}) # This one will not be counted db.volume_create(self.ctxt, {'project_id': 'project', 'size': ONE_HUNDREDS, 'host': 'h-%d' % i, 'volume_type_id': fake.VOLUME_TYPE_ID, 'use_quota': False}) with sqlalchemy_api.main_context_manager.reader.using(self.ctxt): result = sqlalchemy_api._volume_data_get_for_project( self.ctxt, 'project', skip_internal=skip_internal) self.assertEqual((count, gigabytes), result) @ddt.data((True, THREE_HUNDREDS, THREE), (False, THREE_HUNDREDS + ONE_HUNDREDS, THREE + 1)) @ddt.unpack def test__volume_data_get_for_project_temporary(self, skip_internal, gigabytes, count): for i in range(3): db.volume_create(self.ctxt, {'project_id': 'project', 'size': ONE_HUNDREDS, 'host': 'h-%d' % i, 'volume_type_id': fake.VOLUME_TYPE_ID}) # This is a temporary volume db.volume_create(self.ctxt, {'project_id': 'project', 'size': ONE_HUNDREDS, 'host': 'h-%d' % i, 'volume_type_id': fake.VOLUME_TYPE_ID, 'use_quota': False}) with sqlalchemy_api.main_context_manager.reader.using(self.ctxt): result = sqlalchemy_api._volume_data_get_for_project( self.ctxt, 'project', skip_internal=skip_internal) self.assertEqual((count, gigabytes), result) def test_volume_data_get_for_project_with_host(self): db.volume_create(self.ctxt, {'project_id': fake.PROJECT_ID, 'size': 100, 'host': 'host1', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'project_id': fake.PROJECT2_ID, 'size': 200, 'host': 'host1', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'project_id': fake.PROJECT2_ID, 'size': 300, 'host': 'host2', 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = db.volume_data_get_for_project(self.ctxt, fake.PROJECT2_ID, host='host2') self.assertEqual((1, 300), resp) def test_volume_detached_from_instance(self): volume = db.volume_create(self.ctxt, {'volume_type_id': fake.VOLUME_TYPE_ID}) instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment.id, instance_uuid, None, '/tmp') volume_updates, attachment_updates = ( db.volume_detached(self.ctxt, volume.id, attachment.id)) expected_attachment = { 'attach_status': fields.VolumeAttachStatus.DETACHED, 'detach_time': mock.ANY, 'deleted': True, 'deleted_at': mock.ANY, } self.assertDictEqual(expected_attachment, attachment_updates) expected_volume = { 'status': 'available', 'attach_status': fields.VolumeAttachStatus.DETACHED, } self.assertDictEqual(expected_volume, volume_updates) volume = db.volume_get(self.ctxt, volume.id) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctxt, attachment.id) self.assertEqual('available', volume.status) def test_volume_detached_two_attachments(self): volume = db.volume_create(self.ctxt, {'volume_type_id': fake.VOLUME_TYPE_ID}) instance_uuid = fake.INSTANCE_ID values = {'volume_id': volume.id, 'instance_uuid': instance_uuid, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment.id, instance_uuid, None, '/tmp') values2 = {'volume_id': volume.id, 'instance_uuid': fake.OBJECT_ID, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment2 = db.volume_attach(self.ctxt, values2) db.volume_attached(self.ctxt, attachment2.id, instance_uuid, None, '/tmp') volume_updates, attachment_updates = ( db.volume_detached(self.ctxt, volume.id, attachment.id)) expected_attachment = { 'attach_status': fields.VolumeAttachStatus.DETACHED, 'detach_time': mock.ANY, 'deleted': True, 'deleted_at': mock.ANY, } self.assertDictEqual(expected_attachment, attachment_updates) expected_volume = { 'status': 'in-use', 'attach_status': fields.VolumeAttachStatus.ATTACHED, } self.assertDictEqual(expected_volume, volume_updates) volume = db.volume_get(self.ctxt, volume.id) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctxt, attachment.id) self.assertEqual('in-use', volume.status) def test_volume_detached_invalid_attachment(self): volume = db.volume_create(self.ctxt, {'volume_type_id': fake.VOLUME_TYPE_ID}) # detach it again volume_updates, attachment_updates = ( db.volume_detached(self.ctxt, volume.id, fake.ATTACHMENT_ID)) self.assertIsNone(attachment_updates) expected_volume = { 'status': 'available', 'attach_status': fields.VolumeAttachStatus.DETACHED, } self.assertDictEqual(expected_volume, volume_updates) volume = db.volume_get(self.ctxt, volume.id) self.assertEqual('available', volume.status) def test_volume_detached_from_host(self): volume = db.volume_create(self.ctxt, {'volume_type_id': fake.VOLUME_TYPE_ID}) host_name = 'fake_host' values = {'volume_id': volume.id, 'attach_host': host_name, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment.id, None, host_name, '/tmp') volume_updates, attachment_updates = ( db.volume_detached(self.ctxt, volume.id, attachment.id)) expected_attachment = { 'attach_status': fields.VolumeAttachStatus.DETACHED, 'detach_time': mock.ANY, 'deleted': True, 'deleted_at': mock.ANY} self.assertDictEqual(expected_attachment, attachment_updates) expected_volume = { 'status': 'available', 'attach_status': fields.VolumeAttachStatus.DETACHED, } self.assertDictEqual(expected_volume, volume_updates) volume = db.volume_get(self.ctxt, volume.id) self.assertRaises(exception.VolumeAttachmentNotFound, db.volume_attachment_get, self.ctxt, attachment.id) self.assertEqual('available', volume.status) def test_volume_get(self): volume = db.volume_create(self.ctxt, {'volume_type_id': fake.VOLUME_TYPE_ID}) self._assertEqualObjects(volume, db.volume_get(self.ctxt, volume['id'])) @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) def test_volume_destroy(self, utcnow_mock): volume = db.volume_create(self.ctxt, {'volume_type_id': fake.VOLUME_TYPE_ID}) self.assertDictEqual( {'status': 'deleted', 'deleted': True, 'deleted_at': UTC_NOW, 'migration_status': None}, db.volume_destroy(self.ctxt, volume['id'])) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.ctxt, volume['id']) @mock.patch('cinder.db.sqlalchemy.api.model_query') def test_volume_destroy_deletes_dependent_data(self, mock_model_query): """Addresses LP Bug #1542169.""" db.volume_destroy(self.ctxt, fake.VOLUME_ID) expected_call_count = 1 + len(sqlalchemy_api.VOLUME_DEPENDENT_MODELS) self.assertEqual(expected_call_count, mock_model_query.call_count) def test_volume_get_all(self): volumes = [db.volume_create(self.ctxt, {'host': 'h%d' % i, 'size': i, 'volume_type_id': fake.VOLUME_TYPE_ID}) for i in range(3)] self._assertEqualListsOfObjects(volumes, db.volume_get_all( self.ctxt, None, None, ['host'], None)) @ddt.data('cluster_name', 'host') def test_volume_get_all_filter_host_and_cluster(self, field): volumes = [] for i in range(2): for value in ('host%d@backend#pool', 'host%d@backend', 'host%d'): kwargs = {field: value % i} volumes.append(utils.create_volume(self.ctxt, **kwargs)) for i in range(3): filters = {field: getattr(volumes[i], field)} result = db.volume_get_all(self.ctxt, filters=filters) self.assertEqual(i + 1, len(result)) self.assertSetEqual({v.id for v in volumes[:i + 1]}, {v.id for v in result}) def test_volume_get_all_marker_passed(self): volumes = [ db.volume_create( self.ctxt, {'id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}), db.volume_create( self.ctxt, {'id': 2, 'volume_type_id': fake.VOLUME_TYPE_ID}), db.volume_create( self.ctxt, {'id': 3, 'volume_type_id': fake.VOLUME_TYPE_ID}), db.volume_create( self.ctxt, {'id': 4, 'volume_type_id': fake.VOLUME_TYPE_ID}), ] self._assertEqualListsOfObjects(volumes[2:], db.volume_get_all( self.ctxt, 2, 2, ['id'], ['asc'])) def test_volume_get_all_by_host(self): volumes = [] for i in range(3): volumes.append([db.volume_create( self.ctxt, {'host': 'h%d' % i, 'volume_type_id': fake.VOLUME_TYPE_ID}) for j in range(3)]) for i in range(3): self._assertEqualListsOfObjects(volumes[i], db.volume_get_all_by_host( self.ctxt, 'h%d' % i)) def test_volume_get_all_by_host_with_pools(self): volumes = [] vol_on_host_wo_pool = [db.volume_create( self.ctxt, {'host': 'foo', 'volume_type_id': fake.VOLUME_TYPE_ID}) for j in range(3)] vol_on_host_w_pool = [db.volume_create( self.ctxt, {'host': 'foo#pool0', 'volume_type_id': fake.VOLUME_TYPE_ID})] volumes.append((vol_on_host_wo_pool + vol_on_host_w_pool)) # insert an additional record that doesn't belongs to the same # host as 'foo' and test if it is included in the result db.volume_create(self.ctxt, {'host': 'foobar', 'volume_type_id': fake.VOLUME_TYPE_ID}) self._assertEqualListsOfObjects(volumes[0], db.volume_get_all_by_host( self.ctxt, 'foo')) def test_volume_get_all_by_host_with_filters(self): v1 = db.volume_create( self.ctxt, {'host': 'h1', 'display_name': 'v1', 'status': 'available', 'volume_type_id': fake.VOLUME_TYPE_ID}) v2 = db.volume_create( self.ctxt, {'host': 'h1', 'display_name': 'v2', 'status': 'available', 'volume_type_id': fake.VOLUME_TYPE_ID}) v3 = db.volume_create( self.ctxt, {'host': 'h2', 'display_name': 'v1', 'status': 'available', 'volume_type_id': fake.VOLUME_TYPE_ID}) self._assertEqualListsOfObjects( [v1], db.volume_get_all_by_host(self.ctxt, 'h1', filters={'display_name': 'v1'})) self._assertEqualListsOfObjects( [v1, v2], db.volume_get_all_by_host( self.ctxt, 'h1', filters={'display_name': ['v1', 'v2', 'foo']})) self._assertEqualListsOfObjects( [v1, v2], db.volume_get_all_by_host(self.ctxt, 'h1', filters={'status': 'available'})) self._assertEqualListsOfObjects( [v3], db.volume_get_all_by_host(self.ctxt, 'h2', filters={'display_name': 'v1'})) # No match vols = db.volume_get_all_by_host(self.ctxt, 'h1', filters={'status': 'foo'}) self.assertEqual([], vols) # Bogus filter, should return empty list vols = db.volume_get_all_by_host(self.ctxt, 'h1', filters={'foo': 'bar'}) self.assertEqual([], vols) def test_volume_get_all_by_group(self): volumes = [] for i in range(3): volumes.append([db.volume_create(self.ctxt, { 'consistencygroup_id': 'g%d' % i, 'volume_type_id': fake.VOLUME_TYPE_ID}) for j in range(3)]) for i in range(3): self._assertEqualListsOfObjects(volumes[i], db.volume_get_all_by_group( self.ctxt, 'g%d' % i)) def test_volume_get_all_by_group_with_filters(self): v1 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g1', 'display_name': 'v1', 'volume_type_id': fake.VOLUME_TYPE_ID}) v2 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g1', 'display_name': 'v2', 'volume_type_id': fake.VOLUME_TYPE_ID}) v3 = db.volume_create(self.ctxt, {'consistencygroup_id': 'g2', 'display_name': 'v1', 'volume_type_id': fake.VOLUME_TYPE_ID}) self._assertEqualListsOfObjects( [v1], db.volume_get_all_by_group(self.ctxt, 'g1', filters={'display_name': 'v1'})) self._assertEqualListsOfObjects( [v1, v2], db.volume_get_all_by_group(self.ctxt, 'g1', filters={'display_name': ['v1', 'v2']})) self._assertEqualListsOfObjects( [v3], db.volume_get_all_by_group(self.ctxt, 'g2', filters={'display_name': 'v1'})) # No match vols = db.volume_get_all_by_group(self.ctxt, 'g1', filters={'display_name': 'foo'}) self.assertEqual([], vols) # Bogus filter, should return empty list vols = db.volume_get_all_by_group(self.ctxt, 'g1', filters={'foo': 'bar'}) self.assertEqual([], vols) def test_volume_update_all_by_service(self): volume_service_uuid = '918f24b6-c4c9-48e6-86c6-6871e91f4779' alt_vol_service_uuid = '4b3356a0-31e1-4cec-af1c-07e1e0d7dcf0' service_uuid_1 = 'c7b169f8-8da6-4330-b462-0467069371e2' service_uuid_2 = '38d41b71-2f4e-4d3e-8206-d51ace608bca' host = 'fake_host' alt_host = 'alt_fake_host' binary = 'cinder-volume' # Create 3 volumes with host 'fake_host' for i in range(3): db.volume_create(self.ctxt, { 'service_uuid': volume_service_uuid, 'host': host, 'volume_type_id': fake.VOLUME_TYPE_ID}) # Create 2 volumes with host 'alt_fake_host' for i in range(2): db.volume_create(self.ctxt, { 'service_uuid': alt_vol_service_uuid, 'host': alt_host, 'volume_type_id': fake.VOLUME_TYPE_ID}) # Create service entry for 'fake_host' utils.create_service( self.ctxt, {'uuid': service_uuid_1, 'host': host, 'binary': binary}) # Create service entry for 'alt_fake_host' utils.create_service( self.ctxt, {'uuid': service_uuid_2, 'host': alt_host, 'binary': binary}) db.volume_update_all_by_service(self.ctxt) volumes = db.volume_get_all(self.ctxt) for volume in volumes: if volume.host == host: self.assertEqual(service_uuid_1, volume.service_uuid) elif volume.host == alt_host: self.assertEqual(service_uuid_2, volume.service_uuid) def test_volume_get_all_by_project(self): volumes = [] for i in range(3): volumes.append([db.volume_create(self.ctxt, { 'project_id': 'p%d' % i, 'volume_type_id': fake.VOLUME_TYPE_ID}) for j in range(3)]) for i in range(3): self._assertEqualListsOfObjects(volumes[i], db.volume_get_all_by_project( self.ctxt, 'p%d' % i, None, None, ['host'], None)) def test_volume_get_by_name(self): db.volume_create(self.ctxt, {'display_name': 'vol1', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'display_name': 'vol2', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'display_name': 'vol3', 'volume_type_id': fake.VOLUME_TYPE_ID}) # no name filter volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc']) self.assertEqual(3, len(volumes)) # filter on name volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'display_name': 'vol2'}) self.assertEqual(1, len(volumes)) self.assertEqual('vol2', volumes[0]['display_name']) # filter no match volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'display_name': 'vol4'}) self.assertEqual(0, len(volumes)) def test_volume_list_by_status(self): db.volume_create(self.ctxt, {'display_name': 'vol1', 'status': 'available', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'display_name': 'vol2', 'status': 'available', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'display_name': 'vol3', 'status': 'in-use', 'volume_type_id': fake.VOLUME_TYPE_ID}) # no status filter volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc']) self.assertEqual(3, len(volumes)) # single match volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'status': 'in-use'}) self.assertEqual(1, len(volumes)) self.assertEqual('in-use', volumes[0]['status']) # multiple match volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'status': 'available'}) self.assertEqual(2, len(volumes)) for volume in volumes: self.assertEqual('available', volume['status']) # multiple filters volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'status': 'available', 'display_name': 'vol1'}) self.assertEqual(1, len(volumes)) self.assertEqual('vol1', volumes[0]['display_name']) self.assertEqual('available', volumes[0]['status']) # no match volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['asc'], {'status': 'in-use', 'display_name': 'vol1'}) self.assertEqual(0, len(volumes)) def _assertEqualsVolumeOrderResult(self, correct_order, limit=None, sort_keys=None, sort_dirs=None, filters=None, project_id=None, marker=None, match_keys=['id', 'display_name', 'volume_metadata', 'created_at']): """Verifies that volumes are returned in the correct order.""" if project_id: result = db.volume_get_all_by_project(self.ctxt, project_id, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters) else: result = db.volume_get_all(self.ctxt, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters) self.assertEqual(len(correct_order), len(result)) for vol1, vol2 in zip(result, correct_order): for key in match_keys: val1 = vol1.get(key) val2 = vol2.get(key) # metadata is a dict, compare the 'key' and 'value' of each if key == 'volume_metadata': self.assertEqual(len(val1), len(val2)) val1_dict = {x.key: x.value for x in val1} val2_dict = {x.key: x.value for x in val2} self.assertDictEqual(val1_dict, val2_dict) else: self.assertEqual(val1, val2) return result def test_volume_get_by_filter(self): """Verifies that all filtering is done at the DB layer.""" vols = [] vols.extend([db.volume_create(self.ctxt, {'project_id': 'g1', 'display_name': 'name_%d' % i, 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) for i in range(2)]) vols.extend([db.volume_create(self.ctxt, {'project_id': 'g1', 'display_name': 'name_%d' % i, 'size': 2, 'volume_type_id': fake.VOLUME_TYPE_ID}) for i in range(2)]) vols.extend([db.volume_create(self.ctxt, {'project_id': 'g1', 'display_name': 'name_%d' % i, 'volume_type_id': fake.VOLUME_TYPE_ID}) for i in range(2)]) vols.extend([db.volume_create(self.ctxt, {'project_id': 'g2', 'display_name': 'name_%d' % i, 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) for i in range(2)]) # By project, filter on size and name filters = {'size': '1'} correct_order = [vols[1], vols[0]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, project_id='g1') filters = {'size': '1', 'display_name': 'name_1'} correct_order = [vols[1]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, project_id='g1') # Remove project scope filters = {'size': '1'} correct_order = [vols[7], vols[6], vols[1], vols[0]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters) filters = {'size': '1', 'display_name': 'name_1'} correct_order = [vols[7], vols[1]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters) # Remove size constraint filters = {'display_name': 'name_1'} correct_order = [vols[5], vols[3], vols[1]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, project_id='g1') correct_order = [vols[7], vols[5], vols[3], vols[1]] self._assertEqualsVolumeOrderResult(correct_order, filters=filters) # Verify bogus values return nothing filters = {'display_name': 'name_1', 'bogus_value': 'foo'} self._assertEqualsVolumeOrderResult([], filters=filters, project_id='g1') self._assertEqualsVolumeOrderResult([], project_id='bogus') self._assertEqualsVolumeOrderResult([], filters=filters) self._assertEqualsVolumeOrderResult([], filters={'metadata': 'not valid'}) self._assertEqualsVolumeOrderResult([], filters={'metadata': ['not', 'valid']}) # Verify that relationship property keys return nothing, these # exist on the Volumes model but are not columns filters = {'volume_type': 'bogus_type'} self._assertEqualsVolumeOrderResult([], filters=filters) def test_volume_get_all_filters_limit(self): vol1 = db.volume_create(self.ctxt, {'display_name': 'test1', 'volume_type_id': fake.VOLUME_TYPE_ID}) vol2 = db.volume_create(self.ctxt, {'display_name': 'test2', 'volume_type_id': fake.VOLUME_TYPE_ID}) vol3 = db.volume_create(self.ctxt, {'display_name': 'test2', 'metadata': {'key1': 'val1'}, 'volume_type_id': fake.VOLUME_TYPE_ID}) vol4 = db.volume_create(self.ctxt, {'display_name': 'test3', 'metadata': {'key1': 'val1', 'key2': 'val2'}, 'volume_type_id': fake.VOLUME_TYPE_ID}) vol5 = db.volume_create(self.ctxt, {'display_name': 'test3', 'metadata': {'key2': 'val2', 'key3': 'val3'}, 'host': 'host5', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_admin_metadata_update(self.ctxt, vol5.id, {"readonly": "True"}, False) vols = [vol5, vol4, vol3, vol2, vol1] # Ensure we have 5 total instances self._assertEqualsVolumeOrderResult(vols) # No filters, test limit self._assertEqualsVolumeOrderResult(vols[:1], limit=1) self._assertEqualsVolumeOrderResult(vols[:4], limit=4) # Just the test2 volumes filters = {'display_name': 'test2'} self._assertEqualsVolumeOrderResult([vol3, vol2], filters=filters) self._assertEqualsVolumeOrderResult([vol3], limit=1, filters=filters) self._assertEqualsVolumeOrderResult([vol3, vol2], limit=2, filters=filters) self._assertEqualsVolumeOrderResult([vol3, vol2], limit=100, filters=filters) # metadata filters filters = {'metadata': {'key1': 'val1'}} self._assertEqualsVolumeOrderResult([vol4, vol3], filters=filters) self._assertEqualsVolumeOrderResult([vol4], limit=1, filters=filters) self._assertEqualsVolumeOrderResult([vol4, vol3], limit=10, filters=filters) filters = {'metadata': {'readonly': 'True'}} self._assertEqualsVolumeOrderResult([vol5], filters=filters) filters = {'metadata': {'key1': 'val1', 'key2': 'val2'}} self._assertEqualsVolumeOrderResult([vol4], filters=filters) self._assertEqualsVolumeOrderResult([vol4], limit=1, filters=filters) # No match filters = {'metadata': {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}} self._assertEqualsVolumeOrderResult([], filters=filters) filters = {'metadata': {'key1': 'val1', 'key2': 'bogus'}} self._assertEqualsVolumeOrderResult([], filters=filters) filters = {'metadata': {'key1': 'val1', 'key2': 'val1'}} self._assertEqualsVolumeOrderResult([], filters=filters) # Combination filters = {'display_name': 'test2', 'metadata': {'key1': 'val1'}} self._assertEqualsVolumeOrderResult([vol3], filters=filters) self._assertEqualsVolumeOrderResult([vol3], limit=1, filters=filters) self._assertEqualsVolumeOrderResult([vol3], limit=100, filters=filters) filters = {'display_name': 'test3', 'metadata': {'key2': 'val2', 'key3': 'val3'}, 'host': 'host5'} self._assertEqualsVolumeOrderResult([vol5], filters=filters) self._assertEqualsVolumeOrderResult([vol5], limit=1, filters=filters) def test_volume_get_no_migration_targets(self): """Verifies the unique 'no_migration_targets'=True filter. This filter returns volumes with either a NULL 'migration_status' or a non-NULL value that does not start with 'target:'. """ vol1 = db.volume_create(self.ctxt, {'display_name': 'test1', 'volume_type_id': fake.VOLUME_TYPE_ID}) vol2 = db.volume_create(self.ctxt, {'display_name': 'test2', 'migration_status': 'bogus', 'volume_type_id': fake.VOLUME_TYPE_ID}) vol3 = db.volume_create(self.ctxt, {'display_name': 'test3', 'migration_status': 'btarget:', 'volume_type_id': fake.VOLUME_TYPE_ID}) vol4 = db.volume_create(self.ctxt, {'display_name': 'test4', 'migration_status': 'target:', 'volume_type_id': fake.VOLUME_TYPE_ID}) # Ensure we have 4 total instances, default sort of created_at (desc) self._assertEqualsVolumeOrderResult([vol4, vol3, vol2, vol1]) # Apply the unique filter filters = {'no_migration_targets': True} self._assertEqualsVolumeOrderResult([vol3, vol2, vol1], filters=filters) self._assertEqualsVolumeOrderResult([vol3, vol2], limit=2, filters=filters) filters = {'no_migration_targets': True, 'display_name': 'test4'} self._assertEqualsVolumeOrderResult([], filters=filters) def test_volume_get_all_by_filters_sort_keys(self): # Volumes that will reply to the query test_h1_avail = db.volume_create( self.ctxt, {'display_name': 'test', 'status': 'available', 'host': 'h1', 'volume_type_id': fake.VOLUME_TYPE_ID}) test_h1_error = db.volume_create( self.ctxt, {'display_name': 'test', 'status': 'error', 'host': 'h1', 'volume_type_id': fake.VOLUME_TYPE_ID}) test_h1_error2 = db.volume_create( self.ctxt, {'display_name': 'test', 'status': 'error', 'host': 'h1', 'volume_type_id': fake.VOLUME_TYPE_ID}) test_h2_avail = db.volume_create( self.ctxt, {'display_name': 'test', 'status': 'available', 'host': 'h2', 'volume_type_id': fake.VOLUME_TYPE_ID}) test_h2_error = db.volume_create( self.ctxt, {'display_name': 'test', 'status': 'error', 'host': 'h2', 'volume_type_id': fake.VOLUME_TYPE_ID}) test_h2_error2 = db.volume_create( self.ctxt, {'display_name': 'test', 'status': 'error', 'host': 'h2', 'volume_type_id': fake.VOLUME_TYPE_ID}) # Other volumes in the DB, will not match name filter other_error = db.volume_create( self.ctxt, {'display_name': 'other', 'status': 'error', 'host': 'a', 'volume_type_id': fake.VOLUME_TYPE_ID}) other_active = db.volume_create( self.ctxt, {'display_name': 'other', 'status': 'available', 'host': 'a', 'volume_type_id': fake.VOLUME_TYPE_ID}) filters = {'display_name': 'test'} # Verify different sort key/direction combinations sort_keys = ['host', 'status', 'created_at'] sort_dirs = ['asc', 'asc', 'asc'] correct_order = [test_h1_avail, test_h1_error, test_h1_error2, test_h2_avail, test_h2_error, test_h2_error2] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs) sort_dirs = ['asc', 'desc', 'asc'] correct_order = [test_h1_error, test_h1_error2, test_h1_avail, test_h2_error, test_h2_error2, test_h2_avail] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs) sort_dirs = ['desc', 'desc', 'asc'] correct_order = [test_h2_error, test_h2_error2, test_h2_avail, test_h1_error, test_h1_error2, test_h1_avail] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs) # created_at is added by default if not supplied, descending order sort_keys = ['host', 'status'] sort_dirs = ['desc', 'desc'] correct_order = [test_h2_error2, test_h2_error, test_h2_avail, test_h1_error2, test_h1_error, test_h1_avail] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs) sort_dirs = ['asc', 'asc'] correct_order = [test_h1_avail, test_h1_error, test_h1_error2, test_h2_avail, test_h2_error, test_h2_error2] self._assertEqualsVolumeOrderResult(correct_order, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs) # Remove name filter correct_order = [other_active, other_error, test_h1_avail, test_h1_error, test_h1_error2, test_h2_avail, test_h2_error, test_h2_error2] self._assertEqualsVolumeOrderResult(correct_order, sort_keys=sort_keys, sort_dirs=sort_dirs) # No sort data, default sort of created_at, id (desc) correct_order = [other_active, other_error, test_h2_error2, test_h2_error, test_h2_avail, test_h1_error2, test_h1_error, test_h1_avail] self._assertEqualsVolumeOrderResult(correct_order) def test_volume_get_all_by_filters_sort_keys_paginate(self): """Verifies sort order with pagination.""" # Volumes that will reply to the query test1_avail = db.volume_create( self.ctxt, {'display_name': 'test', 'size': 1, 'status': 'available', 'volume_type_id': fake.VOLUME_TYPE_ID}) test1_error = db.volume_create( self.ctxt, {'display_name': 'test', 'size': 1, 'status': 'error', 'volume_type_id': fake.VOLUME_TYPE_ID}) test1_error2 = db.volume_create( self.ctxt, {'display_name': 'test', 'size': 1, 'status': 'error', 'volume_type_id': fake.VOLUME_TYPE_ID}) test2_avail = db.volume_create( self.ctxt, {'display_name': 'test', 'size': 2, 'status': 'available', 'volume_type_id': fake.VOLUME_TYPE_ID}) test2_error = db.volume_create( self.ctxt, {'display_name': 'test', 'size': 2, 'status': 'error', 'volume_type_id': fake.VOLUME_TYPE_ID}) test2_error2 = db.volume_create( self.ctxt, {'display_name': 'test', 'size': 2, 'status': 'error', 'volume_type_id': fake.VOLUME_TYPE_ID}) # Other volumes in the DB, will not match name filter db.volume_create(self.ctxt, {'display_name': 'other', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'display_name': 'other', 'volume_type_id': fake.VOLUME_TYPE_ID}) filters = {'display_name': 'test'} # Common sort information for every query sort_keys = ['size', 'status', 'created_at'] sort_dirs = ['asc', 'desc', 'asc'] # Overall correct volume order based on the sort keys correct_order = [test1_error, test1_error2, test1_avail, test2_error, test2_error2, test2_avail] # Limits of 1, 2, and 3, verify that the volumes returned are in the # correct sorted order, update the marker to get the next correct page for limit in range(1, 4): marker = None # Include the maximum number of volumes (ie, 6) to ensure that # the last query (with marker pointing to the last volume) # returns 0 servers for i in range(0, 7, limit): if i == len(correct_order): correct = [] else: correct = correct_order[i:i + limit] vols = self._assertEqualsVolumeOrderResult( correct, filters=filters, sort_keys=sort_keys, sort_dirs=sort_dirs, limit=limit, marker=marker) if correct: marker = vols[-1]['id'] self.assertEqual(correct[-1]['id'], marker) def test_volume_get_all_invalid_sort_key(self): for keys in (['foo'], ['display_name', 'foo']): self.assertRaises(exception.InvalidInput, db.volume_get_all, self.ctxt, None, None, sort_keys=keys) def test_volume_update(self): volume = db.volume_create(self.ctxt, {'host': 'h1', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_update(self.ctxt, volume.id, {'host': 'h2', 'metadata': {'m1': 'v1'}}) volume = db.volume_get(self.ctxt, volume.id) self.assertEqual('h2', volume.host) self.assertEqual(1, len(volume.volume_metadata)) db_metadata = volume.volume_metadata[0] self.assertEqual('m1', db_metadata.key) self.assertEqual('v1', db_metadata.value) def test_volume_update_nonexistent(self): self.assertRaises(exception.VolumeNotFound, db.volume_update, self.ctxt, 42, {}) def test_volume_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata, 'volume_type_id': fake.VOLUME_TYPE_ID}) self.assertEqual(metadata, db.volume_metadata_get(self.ctxt, 1)) def test_volume_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, False) self.assertEqual(should_be, db_meta) @mock.patch.object(db.sqlalchemy.api, '_volume_glance_metadata_key_to_id', return_value = '1') def test_volume_glance_metadata_key_to_id_called(self, metadata_key_to_id_mock): image_metadata = {'abc': '123'} # create volume with metadata. db.volume_create(self.ctxt, {'id': 1, 'metadata': image_metadata, 'volume_type_id': fake.VOLUME_TYPE_ID}) # delete metadata associated with the volume. db.volume_metadata_delete(self.ctxt, 1, 'abc', meta_type=common.METADATA_TYPES.image) # assert _volume_glance_metadata_key_to_id() was called exactly once metadata_key_to_id_mock.assert_called_once_with(self.ctxt, 1, 'abc') def test_case_sensitive_glance_metadata_delete(self): user_metadata = {'a': '1', 'c': '2'} image_metadata = {'abc': '123', 'ABC': '123'} # create volume with metadata. db.volume_create(self.ctxt, {'id': 1, 'metadata': user_metadata, 'volume_type_id': fake.VOLUME_TYPE_ID}) # delete user metadata associated with the volume. db.volume_metadata_delete(self.ctxt, 1, 'c', meta_type=common.METADATA_TYPES.user) user_metadata.pop('c') self.assertEqual(user_metadata, db.volume_metadata_get(self.ctxt, 1)) # create image metadata associated with the volume. db.volume_metadata_update( self.ctxt, 1, image_metadata, False, meta_type=common.METADATA_TYPES.image) # delete image metadata associated with the volume. db.volume_metadata_delete( self.ctxt, 1, 'abc', meta_type=common.METADATA_TYPES.image) image_metadata.pop('abc') # parse the result to build the dict. rows = db.volume_glance_metadata_get(self.ctxt, 1) result = {} for row in rows: result[row['key']] = row['value'] self.assertEqual(image_metadata, result) def test_volume_metadata_update_with_metatype(self): user_metadata1 = {'a': '1', 'c': '2'} user_metadata2 = {'a': '3', 'd': '5'} expected1 = {'a': '3', 'c': '2', 'd': '5'} image_metadata1 = {'e': '1', 'f': '2'} image_metadata2 = {'e': '3', 'g': '5'} expected2 = {'e': '3', 'f': '2', 'g': '5'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') db.volume_create(self.ctxt, {'id': 1, 'metadata': user_metadata1, 'volume_type_id': fake.VOLUME_TYPE_ID}) # update user metatdata associated with volume. db_meta = db.volume_metadata_update( self.ctxt, 1, user_metadata2, False, meta_type=common.METADATA_TYPES.user) self.assertEqual(expected1, db_meta) # create image metatdata associated with volume. db_meta = db.volume_metadata_update( self.ctxt, 1, image_metadata1, False, meta_type=common.METADATA_TYPES.image) self.assertEqual(image_metadata1, db_meta) # update image metatdata associated with volume. db_meta = db.volume_metadata_update( self.ctxt, 1, image_metadata2, False, meta_type=common.METADATA_TYPES.image) self.assertEqual(expected2, db_meta) # update volume with invalid metadata type. self.assertRaises(exception.InvalidMetadataType, db.volume_metadata_update, self.ctxt, 1, image_metadata1, False, FAKE_METADATA_TYPE.fake_type) @ddt.data(common.METADATA_TYPES.user, common.METADATA_TYPES.image) @mock.patch.object(timeutils, 'utcnow') @mock.patch.object(sqlalchemy_api, 'resource_exists') @mock.patch.object(sqlalchemy_api, '_conditional_update') @mock.patch.object(sqlalchemy_api, '_volume_x_metadata_get_query') def test_volume_metadata_delete_deleted_at_updated(self, meta_type, mock_query, mock_update, mock_resource, mock_utc): mock_query.all.return_value = {} mock_utc.return_value = 'fake_time' db.volume_metadata_update(self.ctxt, 1, {}, True, meta_type=meta_type) mock_update.assert_called_once_with(mock.ANY, mock.ANY, {'deleted': True, 'deleted_at': 'fake_time'}, mock.ANY) def test_volume_metadata_update_delete(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '4'} should_be = metadata2 db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db_meta = db.volume_metadata_update(self.ctxt, 1, metadata2, True) self.assertEqual(should_be, db_meta) def test_volume_metadata_delete(self): metadata = {'a': 'b', 'c': 'd'} db.volume_create(self.ctxt, {'id': 1, 'metadata': metadata, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_metadata_delete(self.ctxt, 1, 'c') metadata.pop('c') self.assertEqual(metadata, db.volume_metadata_get(self.ctxt, 1)) def test_volume_metadata_delete_with_metatype(self): user_metadata = {'a': '1', 'c': '2'} image_metadata = {'e': '1', 'f': '2'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') # test that user metadata deleted with meta_type specified. db.volume_create(self.ctxt, {'id': 1, 'metadata': user_metadata, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_metadata_delete(self.ctxt, 1, 'c', meta_type=common.METADATA_TYPES.user) user_metadata.pop('c') self.assertEqual(user_metadata, db.volume_metadata_get(self.ctxt, 1)) # update the image metadata associated with the volume. db.volume_metadata_update( self.ctxt, 1, image_metadata, False, meta_type=common.METADATA_TYPES.image) # test that image metadata deleted with meta_type specified. db.volume_metadata_delete(self.ctxt, 1, 'e', meta_type=common.METADATA_TYPES.image) image_metadata.pop('e') # parse the result to build the dict. rows = db.volume_glance_metadata_get(self.ctxt, 1) result = {} for row in rows: result[row['key']] = row['value'] self.assertEqual(image_metadata, result) # delete volume with invalid metadata type. self.assertRaises(exception.InvalidMetadataType, db.volume_metadata_delete, self.ctxt, 1, 'f', FAKE_METADATA_TYPE.fake_type) def test_volume_glance_metadata_create(self): volume = db.volume_create(self.ctxt, {'host': 'h1', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(self.ctxt, volume['id'], 'image_name', u'\xe4\xbd\xa0\xe5\xa5\xbd') glance_meta = db.volume_glance_metadata_get(self.ctxt, volume['id']) for meta_entry in glance_meta: if meta_entry.key == 'image_name': image_name = meta_entry.value self.assertEqual(u'\xe4\xbd\xa0\xe5\xa5\xbd', image_name) def test_volume_glance_metadata_create_idempotency(self): volume = db.volume_create(self.ctxt, {'host': 'h1', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(self.ctxt, volume['id'], 'image_name', u'\xe4\xbd\xa0\xe5\xa5\xbd') db.volume_glance_metadata_create(self.ctxt, volume['id'], 'image_name', u'\xe4\xbd\xa0\xe5\xa5\xbd') glance_meta = db.volume_glance_metadata_get(self.ctxt, volume['id']) self.assertEqual(1, len(glance_meta)) def test_volume_glance_metadata_create_immutability(self): volume = db.volume_create(self.ctxt, {'host': 'h1', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(self.ctxt, volume['id'], 'image_name', u'\xe4\xbd\xa0\xe5\xa5\xbd') self.assertRaises(exception.GlanceMetadataExists, db.volume_glance_metadata_create, self.ctxt, volume['id'], 'image_name', 'new_meta') def test_volume_glance_metadata_bulk_create(self): volume = db.volume_create(self.ctxt, {'host': 'h1', 'volume_type_id': fake.VOLUME_TYPE_ID}) metadata = {'foo': 'bar', 'baz': 'qux'} db.volume_glance_metadata_bulk_create(self.ctxt, volume['id'], metadata) glance_meta = db.volume_glance_metadata_get(self.ctxt, volume['id']) glance_meta = {m.key: m.value for m in glance_meta} self.assertEqual(metadata, glance_meta) def test_volume_glance_metadata_bulk_create_idempotency(self): volume = db.volume_create(self.ctxt, {'host': 'h1', 'volume_type_id': fake.VOLUME_TYPE_ID}) metadata = {'foo': 'bar', 'baz': 'qux'} db.volume_glance_metadata_bulk_create(self.ctxt, volume['id'], metadata) db.volume_glance_metadata_bulk_create(self.ctxt, volume['id'], metadata) glance_meta = db.volume_glance_metadata_get(self.ctxt, volume['id']) glance_meta = {m.key: m.value for m in glance_meta} self.assertEqual(metadata, glance_meta) self.assertEqual(2, len(glance_meta)) def test_volume_glance_metadata_bulk_create_immutability(self): volume = db.volume_create(self.ctxt, {'host': 'h1', 'volume_type_id': fake.VOLUME_TYPE_ID}) metadata = {'foo': 'bar', 'baz': 'qux'} db.volume_glance_metadata_bulk_create(self.ctxt, volume['id'], metadata) metadata['foo'] = 'new_meta' self.assertRaises(exception.GlanceMetadataExists, db.volume_glance_metadata_bulk_create, self.ctxt, volume['id'], metadata) def test_volume_glance_metadata_list_get(self): """Test volume_glance_metadata_list_get in DB API.""" db.volume_create(self.ctxt, {'id': 'fake1', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(self.ctxt, 'fake1', 'key1', 'value1') db.volume_glance_metadata_create(self.ctxt, 'fake1', 'key2', 'value2') db.volume_create(self.ctxt, {'id': 'fake2', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(self.ctxt, 'fake2', 'key3', 'value3') db.volume_glance_metadata_create(self.ctxt, 'fake2', 'key4', 'value4') expect_result = [{'volume_id': 'fake1', 'key': 'key1', 'value': 'value1'}, {'volume_id': 'fake1', 'key': 'key2', 'value': 'value2'}, {'volume_id': 'fake2', 'key': 'key3', 'value': 'value3'}, {'volume_id': 'fake2', 'key': 'key4', 'value': 'value4'}] self._assertEqualListsOfObjects(expect_result, db.volume_glance_metadata_list_get( self.ctxt, ['fake1', 'fake2']), ignored_keys=['id', 'snapshot_id', 'created_at', 'deleted', 'deleted_at', 'updated_at']) def _create_volume_with_image_metadata(self): vol1 = db.volume_create(self.ctxt, {'display_name': 'test1', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(self.ctxt, vol1.id, 'image_name', 'imageTestOne') db.volume_glance_metadata_create(self.ctxt, vol1.id, 'test_image_key', 'test_image_value') vol2 = db.volume_create(self.ctxt, {'display_name': 'test2', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(self.ctxt, vol2.id, 'image_name', 'imageTestTwo') db.volume_glance_metadata_create(self.ctxt, vol2.id, 'disk_format', 'qcow2') return [vol1, vol2] def test_volume_get_all_by_image_name_and_key(self): vols = self._create_volume_with_image_metadata() filters = {'glance_metadata': {'image_name': 'imageTestOne', 'test_image_key': 'test_image_value'}} volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['desc'], filters=filters) self._assertEqualListsOfObjects([vols[0]], volumes) def test_volume_get_all_by_image_name_and_disk_format(self): vols = self._create_volume_with_image_metadata() filters = {'glance_metadata': {'image_name': 'imageTestTwo', 'disk_format': 'qcow2'}} volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['desc'], filters=filters) self._assertEqualListsOfObjects([vols[1]], volumes) def test_volume_get_all_by_invalid_image_metadata(self): # Test with invalid image metadata self._create_volume_with_image_metadata() filters = {'glance_metadata': {'invalid_key': 'invalid_value', 'test_image_key': 'test_image_value'}} volumes = db.volume_get_all(self.ctxt, None, None, ['created_at'], ['desc'], filters=filters) self._assertEqualListsOfObjects([], volumes) def _create_volumes_to_test_include_in(self): """Helper method for test_volume_include_in_* tests.""" return [ db.volume_create(self.ctxt, {'host': 'host1@backend1#pool1', 'cluster_name': 'cluster1@backend1#pool1', 'volume_type_id': fake.VOLUME_TYPE_ID}), db.volume_create(self.ctxt, {'host': 'host1@backend2#pool2', 'cluster_name': 'cluster1@backend2#pool2', 'volume_type_id': fake.VOLUME_TYPE_ID}), db.volume_create(self.ctxt, {'host': 'host2@backend#poo1', 'cluster_name': 'cluster2@backend#pool', 'volume_type_id': fake.VOLUME_TYPE_ID}), ] @ddt.data('host1@backend1#pool1', 'host1@backend1') def test_volume_include_in_cluster_by_host(self, host): """Basic volume include test filtering by host and with full rename.""" vol = self._create_volumes_to_test_include_in()[0] cluster_name = 'my_cluster' result = db.volume_include_in_cluster(self.ctxt, cluster_name, partial_rename=False, host=host) self.assertEqual(1, result) db_vol = db.volume_get(self.ctxt, vol.id) self.assertEqual(cluster_name, db_vol.cluster_name) def test_volume_include_in_cluster_by_host_multiple(self): """Partial cluster rename filtering with host level info.""" vols = self._create_volumes_to_test_include_in()[0:2] host = 'host1' cluster_name = 'my_cluster' result = db.volume_include_in_cluster(self.ctxt, cluster_name, partial_rename=True, host=host) self.assertEqual(2, result) db_vols = [db.volume_get(self.ctxt, vols[0].id), db.volume_get(self.ctxt, vols[1].id)] for i in range(2): self.assertEqual(cluster_name + vols[i].host[len(host):], db_vols[i].cluster_name) @ddt.data('cluster1@backend1#pool1', 'cluster1@backend1') def test_volume_include_in_cluster_by_cluster_name(self, cluster_name): """Basic volume include test filtering by cluster with full rename.""" vol = self._create_volumes_to_test_include_in()[0] new_cluster_name = 'cluster_new@backend1#pool' result = db.volume_include_in_cluster(self.ctxt, new_cluster_name, partial_rename=False, cluster_name=cluster_name) self.assertEqual(1, result) db_vol = db.volume_get(self.ctxt, vol.id) self.assertEqual(new_cluster_name, db_vol.cluster_name) def test_volume_include_in_cluster_by_cluster_multiple(self): """Partial rename filtering with cluster with host level info.""" vols = self._create_volumes_to_test_include_in()[0:2] cluster_name = 'cluster1' new_cluster_name = 'my_cluster' result = db.volume_include_in_cluster(self.ctxt, new_cluster_name, partial_rename=True, cluster_name=cluster_name) self.assertEqual(2, result) db_vols = [db.volume_get(self.ctxt, vols[0].id), db.volume_get(self.ctxt, vols[1].id)] for i in range(2): self.assertEqual( new_cluster_name + vols[i].cluster_name[len(cluster_name):], db_vols[i].cluster_name) @ddt.ddt class DBAPISnapshotTestCase(BaseTest): """Tests for cinder.db.api.snapshot_*.""" def test_snapshot_data_get_for_project(self): actual = db.snapshot_data_get_for_project(self.ctxt, 'project1') self.assertEqual((0, 0), actual) db.volume_create(self.ctxt, {'id': 1, 'project_id': 'project1', 'size': 42, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'project_id': 'project1', 'volume_size': 42, 'volume_type_id': fake.VOLUME_TYPE_ID}) actual = db.snapshot_data_get_for_project(self.ctxt, 'project1') self.assertEqual((1, 42), actual) @ddt.data({'time_collection': [1, 2, 3], 'latest': 1}, {'time_collection': [4, 2, 6], 'latest': 2}, {'time_collection': [8, 2, 1], 'latest': 1}) @ddt.unpack def test_snapshot_get_latest_for_volume(self, time_collection, latest): def hours_ago(hour): return timeutils.utcnow() - datetime.timedelta( hours=hour) db.volume_create(self.ctxt, {'id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) for snapshot in time_collection: db.snapshot_create(self.ctxt, {'id': snapshot, 'volume_id': 1, 'display_name': 'one', 'created_at': hours_ago(snapshot), 'status': fields.SnapshotStatus.AVAILABLE, 'volume_type_id': fake.VOLUME_TYPE_ID}) snapshot = db.snapshot_get_latest_for_volume(self.ctxt, 1) self.assertEqual(str(latest), snapshot['id']) def test_snapshot_get_latest_for_volume_not_found(self): db.volume_create(self.ctxt, {'id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) for t_id in [2, 3]: db.snapshot_create(self.ctxt, {'id': t_id, 'volume_id': t_id, 'display_name': 'one', 'status': fields.SnapshotStatus.AVAILABLE, 'volume_type_id': fake.VOLUME_TYPE_ID}) self.assertRaises(exception.VolumeSnapshotNotFound, db.snapshot_get_latest_for_volume, self.ctxt, 1) def test_snapshot_get_all_by_filter(self): db.volume_create(self.ctxt, {'id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'id': 2, 'volume_type_id': fake.VOLUME_TYPE_ID}) snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'display_name': 'one', 'status': fields.SnapshotStatus.AVAILABLE, 'volume_type_id': fake.VOLUME_TYPE_ID} ) snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 1, 'display_name': 'two', 'status': fields.SnapshotStatus.CREATING, 'volume_type_id': fake.VOLUME_TYPE_ID} ) snapshot3 = db.snapshot_create(self.ctxt, {'id': 3, 'volume_id': 2, 'display_name': 'three', 'status': fields.SnapshotStatus.AVAILABLE, 'volume_type_id': fake.VOLUME_TYPE_ID} ) # no filter filters = {} snapshots = db.snapshot_get_all(self.ctxt, filters=filters) self.assertEqual(3, len(snapshots)) # single match filters = {'display_name': 'two'} self._assertEqualListsOfObjects([snapshot2], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) filters = {'volume_id': 2} self._assertEqualListsOfObjects([snapshot3], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) # filter no match filters = {'volume_id': 5} self._assertEqualListsOfObjects([], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) filters = {'status': fields.SnapshotStatus.ERROR} self._assertEqualListsOfObjects([], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) # multiple match filters = {'volume_id': 1} self._assertEqualListsOfObjects([snapshot1, snapshot2], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) filters = {'status': fields.SnapshotStatus.AVAILABLE} self._assertEqualListsOfObjects([snapshot1, snapshot3], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) filters = {'volume_id': 1, 'status': fields.SnapshotStatus.AVAILABLE} self._assertEqualListsOfObjects([snapshot1], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) filters = {'fake_key': 'fake'} self._assertEqualListsOfObjects([], db.snapshot_get_all( self.ctxt, filters), ignored_keys=['metadata', 'volume']) @ddt.data('cluster_name', 'host') def test_snapshot_get_all_filter_host_and_cluster(self, field): volumes = [] snapshots = [] for i in range(2): for value in ('host%d@backend#pool', 'host%d@backend', 'host%d'): kwargs = {field: value % i} vol = utils.create_volume(self.ctxt, **kwargs) volumes.append(vol) snapshots.append(utils.create_snapshot(self.ctxt, vol.id)) for i in range(3): filters = {field: getattr(volumes[i], field)} result = db.snapshot_get_all(self.ctxt, filters=filters) self.assertEqual(i + 1, len(result)) self.assertSetEqual({s.id for s in snapshots[:i + 1]}, {s.id for s in result}) def test_snapshot_get_all_by_host(self): db.volume_create(self.ctxt, {'id': 1, 'host': 'host1', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'id': 2, 'host': 'host2', 'volume_type_id': fake.VOLUME_TYPE_ID}) snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID} ) snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2, 'status': fields.SnapshotStatus.ERROR, 'volume_type_id': fake.VOLUME_TYPE_ID} ) self._assertEqualListsOfObjects([snapshot1], db.snapshot_get_all_by_host( self.ctxt, 'host1'), ignored_keys='volume') self._assertEqualListsOfObjects([snapshot2], db.snapshot_get_all_by_host( self.ctxt, 'host2'), ignored_keys='volume') self._assertEqualListsOfObjects( [], db.snapshot_get_all_by_host(self.ctxt, 'host2', { 'status': fields.SnapshotStatus.AVAILABLE}), ignored_keys='volume') self._assertEqualListsOfObjects( [snapshot2], db.snapshot_get_all_by_host(self.ctxt, 'host2', { 'status': fields.SnapshotStatus.ERROR}), ignored_keys='volume') self._assertEqualListsOfObjects([], db.snapshot_get_all_by_host( self.ctxt, 'host2', {'fake_key': 'fake'}), ignored_keys='volume') # If host is None or empty string, empty list should be returned. self.assertEqual([], db.snapshot_get_all_by_host(self.ctxt, None)) self.assertEqual([], db.snapshot_get_all_by_host(self.ctxt, '')) def test_snapshot_get_all_by_host_with_pools(self): db.volume_create(self.ctxt, {'id': 1, 'host': 'host1#pool1', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'id': 2, 'host': 'host1#pool2', 'volume_type_id': fake.VOLUME_TYPE_ID}) snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID} ) snapshot2 = db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2, 'volume_type_id': fake.VOLUME_TYPE_ID} ) self._assertEqualListsOfObjects([snapshot1, snapshot2], db.snapshot_get_all_by_host( self.ctxt, 'host1'), ignored_keys='volume') self._assertEqualListsOfObjects([snapshot1], db.snapshot_get_all_by_host( self.ctxt, 'host1#pool1'), ignored_keys='volume') self._assertEqualListsOfObjects([], db.snapshot_get_all_by_host( self.ctxt, 'host1#pool0'), ignored_keys='volume') def test_snapshot_get_all_by_project(self): db.volume_create(self.ctxt, {'id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'id': 2, 'volume_type_id': fake.VOLUME_TYPE_ID}) snapshot1 = db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'project_id': 'project1', 'volume_type_id': fake.VOLUME_TYPE_ID} ) snapshot2 = db.snapshot_create( self.ctxt, {'id': 2, 'volume_id': 2, 'status': fields.SnapshotStatus.ERROR, 'project_id': 'project2', 'volume_type_id': fake.VOLUME_TYPE_ID}) self._assertEqualListsOfObjects([snapshot1], db.snapshot_get_all_by_project( self.ctxt, 'project1'), ignored_keys='volume') self._assertEqualListsOfObjects([snapshot2], db.snapshot_get_all_by_project( self.ctxt, 'project2'), ignored_keys='volume') self._assertEqualListsOfObjects( [], db.snapshot_get_all_by_project( self.ctxt, 'project2', {'status': fields.SnapshotStatus.AVAILABLE}), ignored_keys='volume') self._assertEqualListsOfObjects( [snapshot2], db.snapshot_get_all_by_project( self.ctxt, 'project2', { 'status': fields.SnapshotStatus.ERROR}), ignored_keys='volume') self._assertEqualListsOfObjects([], db.snapshot_get_all_by_project( self.ctxt, 'project2', {'fake_key': 'fake'}), ignored_keys='volume') def test_snapshot_get_all_by_project_with_host(self): db.volume_create(self.ctxt, {'id': 1, 'host': 'host1', 'size': 1, 'project_id': fake.PROJECT_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'id': 2, 'host': 'host1', 'size': 2, 'project_id': fake.PROJECT2_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'id': 3, 'host': 'host2', 'size': 3, 'project_id': fake.PROJECT2_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'project_id': fake.PROJECT_ID, 'volume_size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': 2, 'volume_id': 2, 'project_id': fake.PROJECT2_ID, 'volume_size': 2, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': 3, 'volume_id': 3, 'project_id': fake.PROJECT2_ID, 'volume_size': 3, 'volume_type_id': fake.VOLUME_TYPE_ID}) resp = db.snapshot_data_get_for_project(self.ctxt, fake.PROJECT2_ID, host='host2') self.assertEqual((1, 3), resp) def test_snapshot_metadata_get(self): metadata = {'a': 'b', 'c': 'd'} db.volume_create(self.ctxt, {'id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'metadata': metadata, 'volume_type_id': fake.VOLUME_TYPE_ID}) self.assertEqual(metadata, db.snapshot_metadata_get(self.ctxt, 1)) def test_snapshot_metadata_update(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = {'a': '3', 'c': '2', 'd': '5'} db.volume_create(self.ctxt, {'id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'metadata': metadata1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db_meta = db.snapshot_metadata_update(self.ctxt, 1, metadata2, False) self.assertEqual(should_be, db_meta) def test_snapshot_metadata_update_delete(self): metadata1 = {'a': '1', 'c': '2'} metadata2 = {'a': '3', 'd': '5'} should_be = metadata2 db.volume_create(self.ctxt, {'id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'metadata': metadata1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db_meta = db.snapshot_metadata_update(self.ctxt, 1, metadata2, True) self.assertEqual(should_be, db_meta) @mock.patch.object(timeutils, 'utcnow') @mock.patch.object(sqlalchemy_api, 'resource_exists') @mock.patch.object(sqlalchemy_api, '_snapshot_metadata_get') @mock.patch.object(sqlalchemy_api, '_snapshot_metadata_get_item') def test_snapshot_metadata_delete_deleted_at_updated(self, mock_metadata_item, mock_metadata, mock_resource, mock_utc): fake_metadata = {'fake_key1': 'fake_value1'} mock_item = mock.Mock() mock_metadata.return_value = fake_metadata mock_utc.return_value = 'fake_time' mock_metadata_item.side_effect = [mock_item] db.snapshot_metadata_update(self.ctxt, 1, {}, True) mock_item.update.assert_called_once_with({'deleted': True, 'deleted_at': 'fake_time'}) def test_snapshot_metadata_delete(self): metadata = {'a': '1', 'c': '2'} should_be = {'a': '1'} db.volume_create(self.ctxt, {'id': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': 1, 'volume_id': 1, 'metadata': metadata, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_metadata_delete(self.ctxt, 1, 'c') self.assertEqual(should_be, db.snapshot_metadata_get(self.ctxt, 1)) @ddt.data((True, (THREE, THREE_HUNDREDS)), (False, (THREE + 1, THREE_HUNDREDS + ONE_HUNDREDS))) @ddt.unpack def test__snapshot_data_get_for_project_temp(self, skip_internal, expected): vol = db.volume_create(self.ctxt, {'project_id': 'project', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) # Normal snapshots are always counted db.snapshot_create( self.ctxt, {'project_id': 'project', 'volume_id': vol.id, 'volume_type_id': vol.volume_type_id, 'display_name': 'user snapshot', 'volume_size': ONE_HUNDREDS}) # Old revert temp snapshots are counted, since display_name can be # forged by users db.snapshot_create( self.ctxt, {'project_id': 'project', 'volume_id': vol.id, 'volume_type_id': vol.volume_type_id, 'display_name': '[revert] volume 123 backup snapshot', 'volume_size': ONE_HUNDREDS}) # Old backup temp snapshots are counted, since display_name can be # forged by users db.snapshot_create( self.ctxt, {'project_id': 'project', 'volume_id': vol.id, 'volume_type_id': vol.volume_type_id, 'display_name': 'backup-snap-123', 'volume_size': ONE_HUNDREDS}) # This one will not be counted is skipping internal db.snapshot_create( self.ctxt, {'project_id': 'project', 'volume_id': vol.id, 'volume_type_id': vol.volume_type_id, 'display_name': 'new type of temp snapshot', 'use_quota': False, 'volume_size': ONE_HUNDREDS}) with sqlalchemy_api.main_context_manager.reader.using(self.ctxt): result = sqlalchemy_api._snapshot_data_get_for_project( self.ctxt, 'project', skip_internal=skip_internal) self.assertEqual(expected, result) @ddt.ddt class DBAPIConsistencygroupTestCase(BaseTest): def _create_cgs_to_test_include_in(self): """Helper method for test_consistencygroup_include_in_* tests.""" return [ db.consistencygroup_create( self.ctxt, {'host': 'host1@backend1#pool1', 'cluster_name': 'cluster1@backend1#pool1'}), db.consistencygroup_create( self.ctxt, {'host': 'host1@backend2#pool2', 'cluster_name': 'cluster1@backend2#pool1'}), db.consistencygroup_create( self.ctxt, {'host': 'host2@backend#poo1', 'cluster_name': 'cluster2@backend#pool'}), ] @ddt.data('host1@backend1#pool1', 'host1@backend1') def test_consistencygroup_include_in_cluster_by_host(self, host): """Basic CG include test filtering by host and with full rename.""" cg = self._create_cgs_to_test_include_in()[0] cluster_name = 'my_cluster' result = db.consistencygroup_include_in_cluster(self.ctxt, cluster_name, partial_rename=False, host=host) self.assertEqual(1, result) db_cg = db.consistencygroup_get(self.ctxt, cg.id) self.assertEqual(cluster_name, db_cg.cluster_name) def test_consistencygroup_include_in_cluster_by_host_multiple(self): """Partial cluster rename filtering with host level info.""" cgs = self._create_cgs_to_test_include_in()[0:2] host = 'host1' cluster_name = 'my_cluster' result = db.consistencygroup_include_in_cluster(self.ctxt, cluster_name, partial_rename=True, host=host) self.assertEqual(2, result) db_cgs = [db.consistencygroup_get(self.ctxt, cgs[0].id), db.consistencygroup_get(self.ctxt, cgs[1].id)] for i in range(2): self.assertEqual(cluster_name + cgs[i].host[len(host):], db_cgs[i].cluster_name) @ddt.data('cluster1@backend1#pool1', 'cluster1@backend1') def test_consistencygroup_include_in_cluster_by_cluster_name(self, cluster_name): """Basic CG include test filtering by cluster with full rename.""" cg = self._create_cgs_to_test_include_in()[0] new_cluster_name = 'cluster_new@backend1#pool' result = db.consistencygroup_include_in_cluster( self.ctxt, new_cluster_name, partial_rename=False, cluster_name=cluster_name) self.assertEqual(1, result) db_cg = db.consistencygroup_get(self.ctxt, cg.id) self.assertEqual(new_cluster_name, db_cg.cluster_name) def test_consistencygroup_include_in_cluster_by_cluster_multiple(self): """Partial rename filtering with cluster with host level info.""" cgs = self._create_cgs_to_test_include_in()[0:2] cluster_name = 'cluster1' new_cluster_name = 'my_cluster' result = db.consistencygroup_include_in_cluster( self.ctxt, new_cluster_name, partial_rename=True, cluster_name=cluster_name) self.assertEqual(2, result) db_cgs = [db.consistencygroup_get(self.ctxt, cgs[0].id), db.consistencygroup_get(self.ctxt, cgs[1].id)] for i in range(2): self.assertEqual( new_cluster_name + cgs[i].cluster_name[len(cluster_name):], db_cgs[i].cluster_name) class DBAPICgsnapshotTestCase(BaseTest): """Tests for cinder.db.api.cgsnapshot_*.""" def _cgsnapshot_create(self, values): return utils.create_cgsnapshot(self.ctxt, return_vo=False, **values) def test_cgsnapshot_get_all_by_filter(self): cgsnapshot1 = self._cgsnapshot_create( {'id': fake.CGSNAPSHOT_ID, 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}) cgsnapshot2 = self._cgsnapshot_create( {'id': fake.CGSNAPSHOT2_ID, 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}) cgsnapshot3 = self._cgsnapshot_create( {'id': fake.CGSNAPSHOT3_ID, 'consistencygroup_id': fake.CONSISTENCY_GROUP2_ID}) tests = [ ({'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}, [cgsnapshot1, cgsnapshot2]), ({'id': fake.CGSNAPSHOT3_ID}, [cgsnapshot3]), ({'fake_key': 'fake'}, []) ] # no filter filters = None cgsnapshots = db.cgsnapshot_get_all(self.ctxt, filters=filters) self.assertEqual(3, len(cgsnapshots)) for filters, expected in tests: self._assertEqualListsOfObjects(expected, db.cgsnapshot_get_all( self.ctxt, filters)) def test_cgsnapshot_get_all_by_group(self): cgsnapshot1 = self._cgsnapshot_create( {'id': fake.CGSNAPSHOT_ID, 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}) cgsnapshot2 = self._cgsnapshot_create( {'id': fake.CGSNAPSHOT2_ID, 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}) self._cgsnapshot_create( {'id': fake.CGSNAPSHOT3_ID, 'consistencygroup_id': fake.CONSISTENCY_GROUP2_ID}) tests = [ ({'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}, [cgsnapshot1, cgsnapshot2]), ({'id': fake.CGSNAPSHOT3_ID}, []), ({'consistencygroup_id': fake.CONSISTENCY_GROUP2_ID}, []), (None, [cgsnapshot1, cgsnapshot2]), ] for filters, expected in tests: self._assertEqualListsOfObjects(expected, db.cgsnapshot_get_all_by_group( self.ctxt, fake.CONSISTENCY_GROUP_ID, filters)) db.cgsnapshot_destroy(self.ctxt, '1') db.cgsnapshot_destroy(self.ctxt, '2') db.cgsnapshot_destroy(self.ctxt, '3') def test_cgsnapshot_get_all_by_project(self): cgsnapshot1 = self._cgsnapshot_create( {'id': fake.CGSNAPSHOT_ID, 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID, 'project_id': fake.PROJECT_ID}) cgsnapshot2 = self._cgsnapshot_create( {'id': fake.CGSNAPSHOT2_ID, 'consistencygroup_id': fake.CONSISTENCY_GROUP_ID, 'project_id': fake.PROJECT_ID}) tests = [ ({'id': fake.CGSNAPSHOT_ID}, [cgsnapshot1]), ({'consistencygroup_id': fake.CONSISTENCY_GROUP_ID}, [cgsnapshot1, cgsnapshot2]), ({'fake_key': 'fake'}, []) ] for filters, expected in tests: self._assertEqualListsOfObjects(expected, db.cgsnapshot_get_all_by_project( self.ctxt, fake.PROJECT_ID, filters)) class DBAPIVolumeTypeTestCase(BaseTest): """Tests for the db.api.volume_type_* methods.""" def test_volume_type_create__exists(self): vt = db.volume_type_create(self.ctxt, {'name': 'n2'}) self.assertRaises( exception.VolumeTypeExists, db.volume_type_create, self.ctxt, {'name': 'n2', 'id': vt['id']}, ) def test_volume_type_access_add_remove(self): vt = db.volume_type_create(self.ctxt, {'name': 'n2'}) db.volume_type_access_add(self.ctxt, vt['id'], 'fake_project') vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) self.assertEqual(1, len(vtas)) db.volume_type_access_remove(self.ctxt, vt['id'], 'fake_project') vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) self.assertEqual(0, len(vtas)) def test_volume_type_access_add__exists(self): vt = db.volume_type_create(self.ctxt, {'name': 'n2'}) db.volume_type_access_add(self.ctxt, vt['id'], 'fake_project') vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) self.assertEqual(1, len(vtas)) self.assertRaises( exception.VolumeTypeAccessExists, db.volume_type_access_add, self.ctxt, vt['id'], 'fake_project', ) def test_volume_type_access_remove__high_id(self): vt = db.volume_type_create(self.ctxt, {'name': 'n2'}) vta = db.volume_type_access_add(self.ctxt, vt['id'], 'fake_project') vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) self.assertEqual(1, len(vtas)) # NOTE(dulek): Bug 1496747 uncovered problems when deleting accesses # with id column higher than 128. This is regression test for that # case. with sqlalchemy_api.main_context_manager.writer.using(self.ctxt): vta.id = 150 vta.save(self.ctxt.session) db.volume_type_access_remove(self.ctxt, vt['id'], 'fake_project') vtas = db.volume_type_access_get_all(self.ctxt, vt['id']) self.assertEqual(0, len(vtas)) def test_get_volume_type_extra_specs(self): # Ensure that volume type extra specs can be accessed after # the DB session is closed. vt_extra_specs = {'mock_key': 'mock_value'} vt = db.volume_type_create(self.ctxt, {'name': 'n2', 'extra_specs': vt_extra_specs}) volume_ref = db.volume_create(self.ctxt, {'volume_type_id': vt.id}) with sqlalchemy_api.main_context_manager.reader.using(self.ctxt): volume = sqlalchemy_api._volume_get(self.ctxt, volume_ref.id) actual_specs = {} for spec in volume.volume_type.extra_specs: actual_specs[spec.key] = spec.value self.assertEqual(vt_extra_specs, actual_specs) class DBAPIEncryptionTestCase(BaseTest): """Tests for the db.api.volume_(type_)?encryption_* methods.""" _ignored_keys = [ 'deleted', 'deleted_at', 'created_at', 'updated_at', 'encryption_id', ] def setUp(self): super(DBAPIEncryptionTestCase, self).setUp() self.created = \ [db.volume_type_encryption_create(self.ctxt, values['volume_type_id'], values) for values in self._get_values()] def _get_values(self, one=False, updated=False): base_values = { 'cipher': 'fake_cipher', 'key_size': 256, 'provider': 'fake_provider', 'volume_type_id': 'fake_type', 'control_location': 'front-end', } updated_values = { 'cipher': 'fake_updated_cipher', 'key_size': 512, 'provider': 'fake_updated_provider', 'volume_type_id': 'fake_type', 'control_location': 'front-end', } if one: return base_values if updated: values = updated_values else: values = base_values def compose(val, step): if isinstance(val, str): step = str(step) return val + step return [{k: compose(v, i) for k, v in values.items()} for i in range(1, 4)] def test_volume_type_encryption_create(self): values = self._get_values() for i, encryption in enumerate(self.created): self._assertEqualObjects(values[i], encryption, self._ignored_keys) def test_volume_type_encryption_update(self): for values in self._get_values(updated=True): db.volume_type_encryption_update(self.ctxt, values['volume_type_id'], values) db_enc = db.volume_type_encryption_get(self.ctxt, values['volume_type_id']) self._assertEqualObjects(values, db_enc, self._ignored_keys) def test_volume_type_encryption_get(self): for encryption in self.created: encryption_get = \ db.volume_type_encryption_get(self.ctxt, encryption['volume_type_id']) self._assertEqualObjects(encryption, encryption_get, self._ignored_keys) def test_volume_type_encryption_update_with_no_create(self): self.assertRaises(exception.VolumeTypeEncryptionNotFound, db.volume_type_encryption_update, self.ctxt, 'fake_no_create_type', {'cipher': 'fake_updated_cipher'}) def test_volume_type_encryption_delete(self): values = { 'cipher': 'fake_cipher', 'key_size': 256, 'provider': 'fake_provider', 'volume_type_id': 'fake_type', 'control_location': 'front-end', } encryption = db.volume_type_encryption_create(self.ctxt, 'fake_type', values) self._assertEqualObjects(values, encryption, self._ignored_keys) db.volume_type_encryption_delete(self.ctxt, encryption['volume_type_id']) encryption_get = \ db.volume_type_encryption_get(self.ctxt, encryption['volume_type_id']) self.assertIsNone(encryption_get) def test_volume_type_encryption_delete_no_create(self): self.assertRaises(exception.VolumeTypeEncryptionNotFound, db.volume_type_encryption_delete, self.ctxt, 'fake_no_create_type') def test_volume_encryption_get(self): # normal volume -- metadata should be None volume = db.volume_create(self.ctxt, { 'volume_type_id': fake.VOLUME_TYPE_ID}) values = db.volume_encryption_metadata_get(self.ctxt, volume.id) self.assertEqual({'encryption_key_id': None}, values) # encrypted volume -- metadata should match volume type volume_type = self.created[0] volume = db.volume_create(self.ctxt, {'volume_type_id': volume_type['volume_type_id']}) values = db.volume_encryption_metadata_get(self.ctxt, volume.id) expected = { 'encryption_key_id': volume.encryption_key_id, 'control_location': volume_type['control_location'], 'cipher': volume_type['cipher'], 'key_size': volume_type['key_size'], 'provider': volume_type['provider'], } self.assertEqual(expected, values) class DBAPIReservationTestCase(BaseTest): """Tests for db.api.reservation_* methods.""" def setUp(self): super(DBAPIReservationTestCase, self).setUp() self.values = { 'uuid': 'sample-uuid', 'project_id': 'project1', 'resource': 'resource', 'delta': 42, 'expire': (datetime.datetime.utcnow() + datetime.timedelta(days=1)), 'usage': {'id': 1} } def test__get_reservation_resources(self): reservations = _quota_reserve(self.ctxt, 'project1') expected = ['gigabytes', 'volumes'] with sqlalchemy_api.main_context_manager.reader.using(self.ctxt): resources = sqlalchemy_api._get_reservation_resources( self.ctxt, reservations) self.assertEqual(expected, sorted(resources)) def test_reservation_commit(self): reservations = _quota_reserve(self.ctxt, 'project1') expected = {'project_id': 'project1', 'volumes': {'reserved': 1, 'in_use': 0}, 'gigabytes': {'reserved': 2, 'in_use': 0}, } self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'project1')) db.reservation_commit(self.ctxt, reservations, 'project1') expected = {'project_id': 'project1', 'volumes': {'reserved': 0, 'in_use': 1}, 'gigabytes': {'reserved': 0, 'in_use': 2}, } self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'project1')) def test_reservation_commit_negative_reservation(self): """Verify we can't make reservations negative on commit.""" project = 'project1' reservations = _quota_reserve(self.ctxt, project, volumes=2) # Force a smaller reserved value in quota_usages table vol_usage = db.quota_usage_get(self.ctxt, project, 'volumes') with sqlalchemy_api.main_context_manager.writer.using(self.ctxt): vol_usage.reserved -= 1 vol_usage.save(self.ctxt.session) # When committing 2 volumes from reserved to used reserved should not # go from 1 to -1 but from 1 to 0, but in-use should still increase by # 2 db.reservation_commit(self.ctxt, reservations, project) expected = {'project_id': project, 'volumes': {'reserved': 0, 'in_use': 2}} self.assertEqual(expected, db.quota_usage_get_all_by_project(self.ctxt, project)) def test_reservation_commit_negative_in_use(self): """Verify we can't make in-use negative on commit.""" project = 'project1' reservations = _quota_reserve(self.ctxt, project, volumes=-2) # Force a smaller in_use than the one the reservation will decrease vol_usage = db.quota_usage_get(self.ctxt, 'project1', 'volumes') with sqlalchemy_api.main_context_manager.writer.using(self.ctxt): vol_usage.in_use = 1 vol_usage.save(self.ctxt.session) # When committing -2 volumes from reserved to in-use they should not # make in-use go from 1 to -1, but from 1 to 0 db.reservation_commit(self.ctxt, reservations, project) expected = {'project_id': project, 'volumes': {'reserved': 0, 'in_use': 0}} self.assertEqual(expected, db.quota_usage_get_all_by_project(self.ctxt, project)) def test_reservation_rollback(self): reservations = _quota_reserve(self.ctxt, 'project1') expected = {'project_id': 'project1', 'volumes': {'reserved': 1, 'in_use': 0}, 'gigabytes': {'reserved': 2, 'in_use': 0}, } self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'project1')) db.reservation_rollback(self.ctxt, reservations, 'project1') expected = {'project_id': 'project1', 'volumes': {'reserved': 0, 'in_use': 0}, 'gigabytes': {'reserved': 0, 'in_use': 0}, } self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'project1')) def test_reservation_rollback_negative(self): """Verify we can't make reservations negative on rollback.""" project = 'project1' reservations = _quota_reserve(self.ctxt, project, volumes=2) # Force a smaller reserved value in quota_usages table vol_usage = db.quota_usage_get(self.ctxt, project, 'volumes') with sqlalchemy_api.main_context_manager.writer.using(self.ctxt): vol_usage.reserved -= 1 vol_usage.save(self.ctxt.session) # When rolling back 2 volumes from reserved when there's only 1 in the # quota usage's reserved field, reserved should not go from 1 to -1 # but from 1 to 0 db.reservation_rollback(self.ctxt, reservations, project) expected = {'project_id': project, 'volumes': {'reserved': 0, 'in_use': 0}} self.assertEqual(expected, db.quota_usage_get_all_by_project(self.ctxt, project)) def test_reservation_expire(self): self.values['expire'] = datetime.datetime.utcnow() + \ datetime.timedelta(days=1) _quota_reserve(self.ctxt, 'project1') db.reservation_expire(self.ctxt) expected = {'project_id': 'project1', 'gigabytes': {'reserved': 0, 'in_use': 0}, 'volumes': {'reserved': 0, 'in_use': 0}} self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'project1')) def test_reservation_expire_negative(self): """Verify we can't make reservation negative on expiration.""" project = 'project1' _quota_reserve(self.ctxt, project, volumes=2) # Force a smaller reserved value in quota_usages table vol_usage = db.quota_usage_get(self.ctxt, project, 'volumes') with sqlalchemy_api.main_context_manager.writer.using(self.ctxt): vol_usage.reserved -= 1 vol_usage.save(self.ctxt.session) # When expiring 2 volumes from reserved when there's only 1 in the # quota usage's reserved field, reserved should not go from 1 to -1 # but from 1 to 0 db.reservation_expire(self.ctxt) expected = {'project_id': project, 'volumes': {'reserved': 0, 'in_use': 0}} self.assertEqual(expected, db.quota_usage_get_all_by_project(self.ctxt, project)) @mock.patch('time.sleep', mock.Mock()) def test_quota_reserve_create_usages_race(self): """Test we retry when there is a race in creation.""" orig_get_usages = sqlalchemy_api._get_quota_usages counter = 0 # we want to simulate a duplicate request, so we fake out the first two # attempts to get usages from the database def fake_get_usages(*args, **kwargs): nonlocal counter if counter > 2: return orig_get_usages(*args, **kwargs) counter += 1 return [] resources = quota.QUOTAS.resources quotas = {'volumes': 5} deltas = {'volumes': 2} project_id = 'project1' expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) with mock.patch.object( sqlalchemy_api, '_get_quota_usages', side_effect=fake_get_usages, ): sqlalchemy_api.quota_reserve(self.ctxt, resources, quotas, deltas, expire, 0, 0, project_id=project_id) # Confirm that regardless of who created the DB entry the values are # updated usages = sqlalchemy_api.quota_usage_get_all_by_project(self.ctxt, project_id) expected = {'project_id': project_id, 'volumes': {'in_use': 0, 'reserved': deltas['volumes']}} self.assertEqual(expected, usages) class DBAPIMessageTestCase(BaseTest): """Tests for message operations""" def setUp(self): super(DBAPIMessageTestCase, self).setUp() self.context = context.get_admin_context() def _create_fake_messages(self, m_id, time): db.message_create(self.context, {'id': m_id, 'event_id': m_id, 'message_level': 'error', 'project_id': 'fake_id', 'expires_at': time}) def test_cleanup_expired_messages(self): now = timeutils.utcnow() # message expired 1 day ago self._create_fake_messages( uuidutils.generate_uuid(), now - datetime.timedelta(days=1)) # message expired now self._create_fake_messages( uuidutils.generate_uuid(), now) # message expired 1 day after self._create_fake_messages( uuidutils.generate_uuid(), now + datetime.timedelta(days=1)) with mock.patch.object(timeutils, 'utcnow') as mock_time_now: mock_time_now.return_value = now db.cleanup_expired_messages(self.context) messages = db.message_get_all(self.context) self.assertEqual(2, len(messages)) class DBAPIQuotaClassTestCase(BaseTest): """Tests for db.api.quota_class_* methods.""" def setUp(self): super(DBAPIQuotaClassTestCase, self).setUp() self.sample_qc = db.quota_class_create(self.ctxt, 'test_qc', 'test_resource', 42) def test_quota_class_get(self): qc = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') self._assertEqualObjects(self.sample_qc, qc) @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) def test_quota_class_destroy(self, utcnow_mock): self.assertDictEqual( {'deleted': True, 'deleted_at': UTC_NOW}, db.quota_class_destroy(self.ctxt, 'test_qc', 'test_resource')) self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get, self.ctxt, 'test_qc', 'test_resource') def test_quota_class_get_not_found(self): self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get, self.ctxt, 'nonexistent', 'nonexistent') def test_quota_class_get_all_by_name(self): db.quota_class_create(self.ctxt, 'test2', 'res1', 43) db.quota_class_create(self.ctxt, 'test2', 'res2', 44) self.assertEqual({'class_name': 'test_qc', 'test_resource': 42}, db.quota_class_get_all_by_name(self.ctxt, 'test_qc')) self.assertEqual({'class_name': 'test2', 'res1': 43, 'res2': 44}, db.quota_class_get_all_by_name(self.ctxt, 'test2')) def test_quota_class_update(self): db.quota_class_update(self.ctxt, 'test_qc', 'test_resource', 43) updated = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') self.assertEqual(43, updated['hard_limit']) def test_quota_class_update_resource(self): old = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource') db.quota_class_update_resource(self.ctxt, 'test_resource', 'test_resource1') new = db.quota_class_get(self.ctxt, 'test_qc', 'test_resource1') self.assertEqual(old.id, new.id) self.assertEqual('test_resource1', new.resource) def test_quota_class_destroy_all_by_name(self): db.quota_class_create(self.ctxt, 'test2', 'res1', 43) db.quota_class_create(self.ctxt, 'test2', 'res2', 44) db.quota_class_destroy_all_by_name(self.ctxt, 'test2') self.assertEqual({'class_name': 'test2'}, db.quota_class_get_all_by_name(self.ctxt, 'test2')) class DBAPIQuotaTestCase(BaseTest): """Tests for db.api.reservation_* methods.""" def test_quota_create(self): quota = db.quota_create(self.ctxt, 'project1', 'resource', 99) self.assertEqual('resource', quota.resource) self.assertEqual(99, quota.hard_limit) self.assertEqual('project1', quota.project_id) def test_quota_get(self): quota = db.quota_create(self.ctxt, 'project1', 'resource', 99) quota_db = db.quota_get(self.ctxt, 'project1', 'resource') self._assertEqualObjects(quota, quota_db) def test_quota_get_all_by_project(self): for i in range(3): for j in range(3): db.quota_create(self.ctxt, 'proj%d' % i, 'res%d' % j, j) for i in range(3): quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i) self.assertEqual({'project_id': 'proj%d' % i, 'res0': 0, 'res1': 1, 'res2': 2}, quotas_db) def test_quota_update(self): db.quota_create(self.ctxt, 'project1', 'resource1', 41) db.quota_update(self.ctxt, 'project1', 'resource1', 42) quota = db.quota_get(self.ctxt, 'project1', 'resource1') self.assertEqual(42, quota.hard_limit) self.assertEqual('resource1', quota.resource) self.assertEqual('project1', quota.project_id) def test_quota_update_resource(self): old = db.quota_create(self.ctxt, 'project1', 'resource1', 41) db.quota_update_resource(self.ctxt, 'resource1', 'resource2') new = db.quota_get(self.ctxt, 'project1', 'resource2') self.assertEqual(old.id, new.id) self.assertEqual('resource2', new.resource) def test_quota_update_nonexistent(self): self.assertRaises(exception.ProjectQuotaNotFound, db.quota_update, self.ctxt, 'project1', 'resource1', 42) def test_quota_get_nonexistent(self): self.assertRaises(exception.ProjectQuotaNotFound, db.quota_get, self.ctxt, 'project1', 'resource1') def test_quota_reserve(self): reservations = _quota_reserve(self.ctxt, 'project1') self.assertEqual(2, len(reservations)) quota_usage = db.quota_usage_get_all_by_project(self.ctxt, 'project1') self.assertEqual({'project_id': 'project1', 'gigabytes': {'reserved': 2, 'in_use': 0}, 'volumes': {'reserved': 1, 'in_use': 0}}, quota_usage) def test__get_quota_usages(self): _quota_reserve(self.ctxt, 'project1') with sqlalchemy_api.main_context_manager.reader.using(self.ctxt): quota_usage = sqlalchemy_api._get_quota_usages( self.ctxt, 'project1') self.assertEqual(['gigabytes', 'volumes'], sorted(quota_usage.keys())) def test__get_quota_usages_with_resources(self): _quota_reserve(self.ctxt, 'project1') with sqlalchemy_api.main_context_manager.reader.using(self.ctxt): quota_usage = sqlalchemy_api._get_quota_usages( self.ctxt, 'project1', resources=['volumes']) self.assertEqual(['volumes'], list(quota_usage.keys())) @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) def test_quota_destroy(self, utcnow_mock): db.quota_create(self.ctxt, 'project1', 'resource1', 41) self.assertDictEqual( {'deleted': True, 'deleted_at': UTC_NOW}, db.quota_destroy(self.ctxt, 'project1', 'resource1')) self.assertRaises(exception.ProjectQuotaNotFound, db.quota_get, self.ctxt, 'project1', 'resource1') def test_quota_destroy_by_project(self): # Create limits, reservations and usage for project project = 'project1' _quota_reserve(self.ctxt, project) expected_usage = {'project_id': project, 'volumes': {'reserved': 1, 'in_use': 0}, 'gigabytes': {'reserved': 2, 'in_use': 0}} expected = {'project_id': project, 'gigabytes': 2, 'volumes': 1} # Check that quotas are there self.assertEqual(expected, db.quota_get_all_by_project(self.ctxt, project)) self.assertEqual(expected_usage, db.quota_usage_get_all_by_project(self.ctxt, project)) # Destroy only the limits db.quota_destroy_by_project(self.ctxt, project) # Confirm that limits have been removed self.assertEqual({'project_id': project}, db.quota_get_all_by_project(self.ctxt, project)) # But that usage and reservations are the same self.assertEqual(expected_usage, db.quota_usage_get_all_by_project(self.ctxt, project)) def test_quota_destroy_sqlalchemy_all_by_project_(self): # Create limits, reservations and usage for project project = 'project1' _quota_reserve(self.ctxt, project) expected_usage = {'project_id': project, 'volumes': {'reserved': 1, 'in_use': 0}, 'gigabytes': {'reserved': 2, 'in_use': 0}} expected = {'project_id': project, 'gigabytes': 2, 'volumes': 1} expected_result = {'project_id': project} # Check that quotas are there self.assertEqual(expected, db.quota_get_all_by_project(self.ctxt, project)) self.assertEqual(expected_usage, db.quota_usage_get_all_by_project(self.ctxt, project)) # Destroy all quotas using SQLAlchemy Implementation sqlalchemy_api.quota_destroy_all_by_project(self.ctxt, project, only_quotas=False) # Check that all quotas have been deleted self.assertEqual(expected_result, db.quota_get_all_by_project(self.ctxt, project)) self.assertEqual(expected_result, db.quota_usage_get_all_by_project(self.ctxt, project)) def test_quota_usage_get_nonexistent(self): self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get, self.ctxt, 'p1', 'nonexitent_resource') def test_quota_usage_get(self): _quota_reserve(self.ctxt, 'p1') quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'gigabytes') expected = {'resource': 'gigabytes', 'project_id': 'p1', 'in_use': 0, 'reserved': 2, 'total': 2} for key, value in expected.items(): self.assertEqual(value, quota_usage[key], key) def test_quota_usage_get_all_by_project(self): _quota_reserve(self.ctxt, 'p1') expected = {'project_id': 'p1', 'volumes': {'in_use': 0, 'reserved': 1}, 'gigabytes': {'in_use': 0, 'reserved': 2}} self.assertEqual(expected, db.quota_usage_get_all_by_project( self.ctxt, 'p1')) def test__quota_usage_create(self): # the actual _quota_usage_create method isn't wrapped in a decorator so # we create a closure to mimic this @sqlalchemy_api.main_context_manager.writer def _quota_usage_create(context, *args, **kwargs): return sqlalchemy_api._quota_usage_create(context, *args, **kwargs) usage = _quota_usage_create( self.ctxt, 'project1', 'resource', in_use=10, reserved=0, until_refresh=None, ) self.assertEqual('project1', usage.project_id) self.assertEqual('resource', usage.resource) self.assertEqual(10, usage.in_use) self.assertEqual(0, usage.reserved) self.assertIsNone(usage.until_refresh) def test__quota_usage_create_duplicate(self): # the actual _quota_usage_create method isn't wrapped in a decorator so # we create a closure to mimic this @sqlalchemy_api.main_context_manager.writer def _quota_usage_create(context, *args, **kwargs): return sqlalchemy_api._quota_usage_create(context, *args, **kwargs) kwargs = { 'project_id': 'project1', 'resource': 'resource', 'in_use': 10, 'reserved': 0, 'until_refresh': None, } _quota_usage_create(self.ctxt, **kwargs) self.assertRaises( oslo_db.exception.DBDuplicateEntry, _quota_usage_create, self.ctxt, **kwargs) class DBAPIBackupTestCase(BaseTest): """Tests for db.api.backup_* methods.""" _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at', 'data_timestamp', 'backup_metadata'] def setUp(self): super(DBAPIBackupTestCase, self).setUp() self.created = [db.backup_create(self.ctxt, values) for values in self._get_values()] def _get_values(self, one=False): base_values = { 'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'volume_id': 'volume', 'host': 'host', 'availability_zone': 'zone', 'display_name': 'display', 'display_description': 'description', 'container': 'container', 'status': 'status', 'fail_reason': 'test', 'service_metadata': 'metadata', 'service': 'service', 'parent_id': "parent_id", 'size': 1000, 'object_count': 100, 'temp_volume_id': 'temp_volume_id', 'temp_snapshot_id': 'temp_snapshot_id', 'num_dependent_backups': 0, 'snapshot_id': 'snapshot_id', 'encryption_key_id': 'encryption_key_id', 'restore_volume_id': 'restore_volume_id'} if one: return base_values def compose(val, step): if isinstance(val, bool): return val if isinstance(val, str): step = str(step) return val + step return [{k: compose(v, i) for k, v in base_values.items()} for i in range(1, 4)] def test_backup_create(self): values = self._get_values() for i, backup in enumerate(self.created): self.assertEqual(36, len(backup['id'])) # dynamic UUID self._assertEqualObjects(values[i], backup, self._ignored_keys) def test_backup_get(self): for backup in self.created: backup_get = db.backup_get(self.ctxt, backup['id']) self._assertEqualObjects(backup, backup_get) def test_backup_get_deleted(self): backup_dic = {'user_id': fake.USER_ID, 'project_id': fake.PROJECT_ID, 'volume_id': fake.VOLUME_ID, 'size': 1, 'object_count': 1} backup = objects.Backup(self.ctxt, **backup_dic) backup.create() backup.destroy() backup_get = db.backup_get(self.ctxt, backup.id, read_deleted='yes') self.assertEqual(backup.id, backup_get.id) def tests_backup_get_all(self): all_backups = db.backup_get_all(self.ctxt) self._assertEqualListsOfObjects(self.created, all_backups) def tests_backup_get_all_by_filter(self): filters = {'status': self.created[1]['status']} filtered_backups = db.backup_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects([self.created[1]], filtered_backups) filters = {'display_name': self.created[1]['display_name']} filtered_backups = db.backup_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects([self.created[1]], filtered_backups) filters = {'volume_id': self.created[1]['volume_id']} filtered_backups = db.backup_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects([self.created[1]], filtered_backups) filters = {'fake_key': 'fake'} filtered_backups = db.backup_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects([], filtered_backups) def tests_backup_get_all_by_filter_metadata(self): backups = self._get_values() for i in range(3): backup = backups[i] backup['metadata'] = {'fake_key': 'fake' + str(i)} created = [db.backup_create(self.ctxt, values) for values in backups] filters = {'metadata': created[1]['metadata']} filtered_backups = db.backup_get_all(self.ctxt, filters=filters) self.assertEqual(len([created[1]]), len(filtered_backups)) def test_backup_get_all_by_host(self): byhost = db.backup_get_all_by_host(self.ctxt, self.created[1]['host']) self._assertEqualObjects(self.created[1], byhost[0]) def test_backup_get_all_by_project(self): byproj = db.backup_get_all_by_project(self.ctxt, self.created[1]['project_id']) self._assertEqualObjects(self.created[1], byproj[0]) byproj = db.backup_get_all_by_project(self.ctxt, self.created[1]['project_id'], {'fake_key': 'fake'}) self._assertEqualListsOfObjects([], byproj) @mock.patch.object(sqlalchemy_api, 'authorize_project_context') def test_backup_get_all_by_volume(self, mock_authorize): byvol = db.backup_get_all_by_volume( self.ctxt, self.created[1]['volume_id'], 'fake_proj') self._assertEqualObjects(self.created[1], byvol[0]) byvol = db.backup_get_all_by_volume(self.ctxt, self.created[1]['volume_id'], 'fake_proj', {'fake_key': 'fake'}) self._assertEqualListsOfObjects([], byvol) mock_authorize.assert_has_calls([ mock.call(self.ctxt, 'fake_proj'), mock.call(self.ctxt, 'fake_proj') ]) def test_backup_update_nonexistent(self): self.assertRaises(exception.BackupNotFound, db.backup_update, self.ctxt, 'nonexistent', {}) def test_backup_update(self): updated_values = self._get_values(one=True) update_id = self.created[1]['id'] db.backup_update(self.ctxt, update_id, updated_values) updated_backup = db.backup_get(self.ctxt, update_id) self._assertEqualObjects(updated_values, updated_backup, self._ignored_keys) def test_backup_update_with_fail_reason_truncation(self): updated_values = self._get_values(one=True) fail_reason = '0' * 512 updated_values['fail_reason'] = fail_reason update_id = self.created[1]['id'] db.backup_update(self.ctxt, update_id, updated_values) updated_backup = db.backup_get(self.ctxt, update_id) updated_values['fail_reason'] = fail_reason[:255] self._assertEqualObjects(updated_values, updated_backup, self._ignored_keys) @mock.patch('oslo_utils.timeutils.utcnow', return_value=UTC_NOW) def test_backup_destroy(self, utcnow_mock): for backup in self.created: self.assertDictEqual( {'status': fields.BackupStatus.DELETED, 'deleted': True, 'deleted_at': UTC_NOW}, db.backup_destroy(self.ctxt, backup['id'])) self.assertEqual([], db.backup_get_all(self.ctxt)) def test_backup_not_found(self): self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, 'notinbase') class DBAPIProcessSortParamTestCase(test.TestCase): def test_process_sort_params_defaults(self): """Verifies default sort parameters.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], []) self.assertEqual(['created_at', 'id'], sort_keys) self.assertEqual(['asc', 'asc'], sort_dirs) sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None) self.assertEqual(['created_at', 'id'], sort_keys) self.assertEqual(['asc', 'asc'], sort_dirs) def test_process_sort_params_override_default_keys(self): """Verifies that the default keys can be overridden.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_keys=['key1', 'key2', 'key3']) self.assertEqual(['key1', 'key2', 'key3'], sort_keys) self.assertEqual(['asc', 'asc', 'asc'], sort_dirs) def test_process_sort_params_override_default_dir(self): """Verifies that the default direction can be overridden.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_dir='dir1') self.assertEqual(['created_at', 'id'], sort_keys) self.assertEqual(['dir1', 'dir1'], sort_dirs) def test_process_sort_params_override_default_key_and_dir(self): """Verifies that the default key and dir can be overridden.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_keys=['key1', 'key2', 'key3'], default_dir='dir1') self.assertEqual(['key1', 'key2', 'key3'], sort_keys) self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs) sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( [], [], default_keys=[], default_dir='dir1') self.assertEqual([], sort_keys) self.assertEqual([], sort_dirs) def test_process_sort_params_non_default(self): """Verifies that non-default keys are added correctly.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['key1', 'key2'], ['asc', 'desc']) self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys) # First sort_dir in list is used when adding the default keys self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs) def test_process_sort_params_default(self): """Verifies that default keys are added correctly.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], ['asc', 'desc']) self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['asc', 'desc', 'asc'], sort_dirs) # Include default key value, rely on default direction sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], []) self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['asc', 'asc', 'asc'], sort_dirs) def test_process_sort_params_default_dir(self): """Verifies that the default dir is applied to all keys.""" # Direction is set, ignore default dir sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], ['desc'], default_dir='dir') self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['desc', 'desc', 'desc'], sort_dirs) # But should be used if no direction is set sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2'], [], default_dir='dir') self.assertEqual(['id', 'key2', 'created_at'], sort_keys) self.assertEqual(['dir', 'dir', 'dir'], sort_dirs) def test_process_sort_params_unequal_length(self): """Verifies that a sort direction list is applied correctly.""" sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2', 'key3'], ['desc']) self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs) # Default direction is the first key in the list sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2', 'key3'], ['desc', 'asc']) self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs) sort_keys, sort_dirs = sqlalchemy_api.process_sort_params( ['id', 'key2', 'key3'], ['desc', 'asc', 'asc']) self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys) self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs) def test_process_sort_params_extra_dirs_lengths(self): """InvalidInput raised if more directions are given.""" self.assertRaises(exception.InvalidInput, sqlalchemy_api.process_sort_params, ['key1', 'key2'], ['asc', 'desc', 'desc']) def test_process_sort_params_invalid_sort_dir(self): """InvalidInput raised if invalid directions are given.""" for dirs in [['foo'], ['asc', 'foo'], ['asc', 'desc', 'foo']]: self.assertRaises(exception.InvalidInput, sqlalchemy_api.process_sort_params, ['key'], dirs) class DBAPIDriverInitiatorDataTestCase(BaseTest): initiator = 'iqn.1993-08.org.debian:01:222' namespace = 'test_ns' def test_insert(self): key = 'key1' value = 'foo' db.driver_initiator_data_insert_by_key( self.ctxt, self.initiator, self.namespace, key, value, ) data = db.driver_initiator_data_get( self.ctxt, self.initiator, self.namespace, ) self.assertEqual(data[0].key, key) self.assertEqual(data[0].value, value) def test_insert_already_exists(self): key = 'key1' value = 'foo' db.driver_initiator_data_insert_by_key( self.ctxt, self.initiator, self.namespace, key, value, ) self.assertRaises( exception.DriverInitiatorDataExists, db.driver_initiator_data_insert_by_key, self.ctxt, self.initiator, self.namespace, key, value, ) @ddt.ddt class DBAPIImageVolumeCacheEntryTestCase(BaseTest): def _validate_entry(self, entry, host, cluster_name, image_id, image_updated_at, volume_id, size): self.assertIsNotNone(entry) self.assertIsNotNone(entry['id']) self.assertEqual(host, entry['host']) self.assertEqual(cluster_name, entry['cluster_name']) self.assertEqual(image_id, entry['image_id']) self.assertEqual(image_updated_at, entry['image_updated_at']) self.assertEqual(volume_id, entry['volume_id']) self.assertEqual(size, entry['size']) self.assertIsNotNone(entry['last_used']) def test_create_delete_query_cache_entry(self): host = 'abc@123#poolz' cluster_name = 'def@123#poolz' image_id = 'c06764d7-54b0-4471-acce-62e79452a38b' image_updated_at = datetime.datetime.utcnow() volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1' size = 6 entry = db.image_volume_cache_create(self.ctxt, host, cluster_name, image_id, image_updated_at, volume_id, size) self._validate_entry(entry, host, cluster_name, image_id, image_updated_at, volume_id, size) entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, image_id, host=host) self._validate_entry(entry, host, cluster_name, image_id, image_updated_at, volume_id, size) entry = db.image_volume_cache_get_by_volume_id(self.ctxt, volume_id) self._validate_entry(entry, host, cluster_name, image_id, image_updated_at, volume_id, size) db.image_volume_cache_delete(self.ctxt, entry['volume_id']) entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, image_id, host=host) self.assertIsNone(entry) def test_cache_entry_get_multiple(self): host = 'abc@123#poolz' cluster_name = 'def@123#poolz' image_id = 'c06764d7-54b0-4471-acce-62e79452a38b' image_updated_at = datetime.datetime.utcnow() volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1' size = 6 entries = [] for i in range(0, 3): entries.append(db.image_volume_cache_create(self.ctxt, host, cluster_name, image_id, image_updated_at, volume_id, size)) # It is considered OK for the cache to have multiple of the same # entries. Expect only a single one from the query. entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, image_id, host=host) self._validate_entry(entry, host, cluster_name, image_id, image_updated_at, volume_id, size) # We expect to get the same one on subsequent queries due to the # last_used field being updated each time and ordering by it. entry_id = entry['id'] entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, image_id, host=host) self._validate_entry(entry, host, cluster_name, image_id, image_updated_at, volume_id, size) self.assertEqual(entry_id, entry['id']) # Cleanup for entry in entries: db.image_volume_cache_delete(self.ctxt, entry['volume_id']) def test_cache_entry_get_none(self): host = 'abc@123#poolz' image_id = 'c06764d7-54b0-4471-acce-62e79452a38b' entry = db.image_volume_cache_get_and_update_last_used(self.ctxt, image_id, host=host) self.assertIsNone(entry) def test_cache_entry_get_by_volume_id_none(self): volume_id = 'e0e4f819-24bb-49e6-af1e-67fb77fc07d1' entry = db.image_volume_cache_get_by_volume_id(self.ctxt, volume_id) self.assertIsNone(entry) def test_cache_entry_get_all_for_host(self): host = 'abc@123#poolz' image_updated_at = datetime.datetime.utcnow() size = 6 entries = [] for i in range(0, 3): entries.append(db.image_volume_cache_create(self.ctxt, host, 'cluster-%s' % i, 'image-' + str(i), image_updated_at, 'vol-' + str(i), size)) other_entry = db.image_volume_cache_create(self.ctxt, 'someOtherHost', 'someOtherCluster', 'image-12345', image_updated_at, 'vol-1234', size) found_entries = db.image_volume_cache_get_all(self.ctxt, host=host) self.assertIsNotNone(found_entries) self.assertEqual(len(entries), len(found_entries)) for found_entry in found_entries: for entry in entries: if found_entry['id'] == entry['id']: self._validate_entry(found_entry, entry['host'], entry['cluster_name'], entry['image_id'], entry['image_updated_at'], entry['volume_id'], entry['size']) # Cleanup db.image_volume_cache_delete(self.ctxt, other_entry['volume_id']) for entry in entries: db.image_volume_cache_delete(self.ctxt, entry['volume_id']) def test_cache_entry_get_all_for_host_none(self): host = 'abc@123#poolz' entries = db.image_volume_cache_get_all(self.ctxt, host=host) self.assertEqual([], entries) @ddt.data('host1@backend1#pool1', 'host1@backend1') def test_cache_entry_include_in_cluster_by_host(self, host): """Basic cache include test filtering by host and with full rename.""" image_updated_at = datetime.datetime.utcnow() image_cache = ( db.image_volume_cache_create( self.ctxt, 'host1@backend1#pool1', 'cluster1@backend1#pool1', 'image-1', image_updated_at, 'vol-1', 6), db.image_volume_cache_create( self.ctxt, 'host1@backend2#pool2', 'cluster1@backend2#pool2', 'image-2', image_updated_at, 'vol-2', 6), db.image_volume_cache_create( self.ctxt, 'host2@backend#pool', 'cluster2@backend#pool', 'image-3', image_updated_at, 'vol-3', 6), ) cluster_name = 'my_cluster' result = db.image_volume_cache_include_in_cluster(self.ctxt, cluster_name, partial_rename=False, host=host) self.assertEqual(1, result) db_image_cache = db.image_volume_cache_get_by_volume_id( self.ctxt, image_cache[0].volume_id) self.assertEqual(cluster_name, db_image_cache.cluster_name) class DBAPIGenericTestCase(BaseTest): def test_resource_exists_volume(self): # NOTE(geguileo): We create 2 volumes in this test (even if the second # one is not being used) to confirm that the DB exists subquery is # properly formulated and doesn't result in multiple rows, as such # case would raise an exception when converting the result to an # scalar. db.volume_create(self.ctxt, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(self.ctxt, {'id': fake.VOLUME2_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) model = db.get_model_for_versioned_object(objects.Volume) res = sqlalchemy_api.resource_exists(self.ctxt, model, fake.VOLUME_ID) self.assertTrue(res, msg="Couldn't find existing Volume") def test_resource_exists_volume_fails(self): db.volume_create(self.ctxt, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) model = db.get_model_for_versioned_object(objects.Volume) res = sqlalchemy_api.resource_exists(self.ctxt, model, fake.VOLUME2_ID) self.assertFalse(res, msg='Found nonexistent Volume') def test_resource_exists_snapshot(self): # Read NOTE in test_resource_exists_volume on why we create 2 snapshots vol = db.volume_create(self.ctxt, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': fake.SNAPSHOT_ID, 'volume_id': vol.id, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': fake.SNAPSHOT2_ID, 'volume_id': vol.id, 'volume_type_id': fake.VOLUME_TYPE_ID}) model = db.get_model_for_versioned_object(objects.Snapshot) res = sqlalchemy_api.resource_exists(self.ctxt, model, fake.SNAPSHOT_ID) self.assertTrue(res, msg="Couldn't find existing Snapshot") def test_resource_exists_snapshot_fails(self): vol = db.volume_create(self.ctxt, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.snapshot_create(self.ctxt, {'id': fake.SNAPSHOT_ID, 'volume_id': vol.id, 'volume_type_id': fake.VOLUME_TYPE_ID}) model = db.get_model_for_versioned_object(objects.Snapshot) res = sqlalchemy_api.resource_exists(self.ctxt, model, fake.SNAPSHOT2_ID) self.assertFalse(res, msg='Found nonexistent Snapshot') def test_resource_exists_volume_project_separation(self): user_context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=False) user2_context = context.RequestContext(fake.USER2_ID, fake.PROJECT2_ID, is_admin=False) volume = db.volume_create(user_context, {'project_id': fake.PROJECT_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) model = db.get_model_for_versioned_object(objects.Volume) # Owner can find it res = sqlalchemy_api.resource_exists(user_context, model, volume.id) self.assertTrue(res, msg='Owner cannot find its own Volume') # Non admin user that is not the owner cannot find it res = sqlalchemy_api.resource_exists(user2_context, model, volume.id) self.assertFalse(res, msg="Non admin user can find somebody else's " "volume") # Admin can find it res = sqlalchemy_api.resource_exists(self.ctxt, model, volume.id) self.assertTrue(res, msg="Admin cannot find the volume") def test_resource_exists_snapshot_project_separation(self): user_context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, is_admin=False) user2_context = context.RequestContext(fake.USER2_ID, fake.PROJECT2_ID, is_admin=False) vol = db.volume_create(user_context, {'project_id': fake.PROJECT_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) snap = db.snapshot_create(self.ctxt, {'project_id': fake.PROJECT_ID, 'volume_id': vol.id, 'volume_type_id': fake.VOLUME_TYPE_ID}) model = db.get_model_for_versioned_object(objects.Snapshot) # Owner can find it res = sqlalchemy_api.resource_exists(user_context, model, snap.id) self.assertTrue(res, msg='Owner cannot find its own Snapshot') # Non admin user that is not the owner cannot find it res = sqlalchemy_api.resource_exists(user2_context, model, snap.id) self.assertFalse(res, msg="Non admin user can find somebody else's " "Snapshot") # Admin can find it res = sqlalchemy_api.resource_exists(self.ctxt, model, snap.id) self.assertTrue(res, msg="Admin cannot find the Snapshot") class EngineFacadeTestCase(BaseTest): """Tests for message operations""" def setUp(self): super(EngineFacadeTestCase, self).setUp() self.user_id = fake.USER_ID self.project_id = fake.PROJECT_ID self.context = context.RequestContext(self.user_id, self.project_id) def test_use_single_context_session_writer(self): # Checks that session in context would not be overwritten by # annotation @sqlalchemy_api.main_context_manager.writer if annotation # is used twice. @sqlalchemy_api.main_context_manager.writer def fake_parent_method(context): session = context.session return fake_child_method(context), session @sqlalchemy_api.main_context_manager.writer def fake_child_method(context): session = context.session sqlalchemy_api.model_query(context, models.Volume) return session parent_session, child_session = fake_parent_method(self.context) self.assertEqual(parent_session, child_session) def test_use_single_context_session_reader(self): # Checks that session in context would not be overwritten by # annotation @sqlalchemy_api.main_context_manager.reader if annotation # is used twice. @sqlalchemy_api.main_context_manager.reader def fake_parent_method(context): session = context.session return fake_child_method(context), session @sqlalchemy_api.main_context_manager.reader def fake_child_method(context): session = context.session sqlalchemy_api.model_query(context, models.Volume) return session parent_session, child_session = fake_parent_method(self.context) self.assertEqual(parent_session, child_session) @ddt.ddt class DBAPIBackendTestCase(BaseTest): @ddt.data((True, True), (True, False), (False, True), (False, False)) @ddt.unpack def test_is_backend_frozen_service(self, frozen, pool): service = utils.create_service(self.ctxt, {'frozen': frozen}) utils.create_service(self.ctxt, {'host': service.host + '2', 'frozen': not frozen}) host = service.host if pool: host += '#poolname' self.assertEqual(frozen, db.is_backend_frozen(self.ctxt, host, service.cluster_name)) @ddt.data((True, True), (True, False), (False, True), (False, False)) @ddt.unpack def test_is_backend_frozen_cluster(self, frozen, pool): cluster = utils.create_cluster(self.ctxt, frozen=frozen) utils.create_service(self.ctxt, {'frozen': frozen, 'host': 'hostA', 'cluster_name': cluster.name}) service = utils.create_service(self.ctxt, {'frozen': not frozen, 'host': 'hostB', 'cluster_name': cluster.name}) utils.create_populated_cluster(self.ctxt, 3, 0, frozen=not frozen, name=cluster.name + '2') host = service.host cluster = service.cluster_name if pool: host += '#poolname' cluster += '#poolname' self.assertEqual(frozen, db.is_backend_frozen(self.ctxt, host, cluster)) @ddt.ddt class DBAPIGroupTypeTestCase(BaseTest): """Tests for the db.api.group_type_* methods.""" def test_group_type_create__exists(self): gt = db.group_type_create(self.ctxt, {'name': 'n2'}) self.assertRaises( exception.GroupTypeExists, db.group_type_create, self.ctxt, {'name': gt['name'], 'id': gt['id']}, ) def test_volume_type_access_add_remove(self): gt = db.group_type_create(self.ctxt, {'name': 'n2'}) db.group_type_access_add(self.ctxt, gt['id'], 'fake_project') gtas = db.group_type_access_get_all(self.ctxt, gt['id']) self.assertEqual(1, len(gtas)) db.group_type_access_remove(self.ctxt, gt['id'], 'fake_project') gtas = db.group_type_access_get_all(self.ctxt, gt['id']) self.assertEqual(0, len(gtas)) def test_group_type_access_add__exists(self): gt = db.group_type_create(self.ctxt, {'name': 'my_group_type'}) db.group_type_access_add(self.ctxt, gt['id'], 'fake_project') gtas = db.group_type_access_get_all(self.ctxt, gt['id']) self.assertEqual(1, len(gtas)) self.assertRaises( exception.GroupTypeAccessExists, db.group_type_access_add, self.ctxt, gt['id'], 'fake_project', ) def test_group_get_all_by_host(self): grp_type = db.group_type_create(self.ctxt, {'name': 'my_group_type'}) groups = [] backend = 'host1@lvm' for i in range(3): groups.append([db.group_create( self.ctxt, {'host': '%(b)s%(n)d' % {'b': backend, 'n': i}, 'group_type_id': grp_type['id']}) for j in range(3)]) for i in range(3): host = '%(b)s%(n)d' % {'b': backend, 'n': i} filters = {'host': host, 'backend_match_level': 'backend'} grps = db.group_get_all( self.ctxt, filters=filters) self._assertEqualListsOfObjects(groups[i], grps) for grp in grps: db.group_destroy(self.ctxt, grp['id']) db.group_type_destroy(self.ctxt, grp_type['id']) def test_group_get_all_by_host_with_pools(self): grp_type = db.group_type_create(self.ctxt, {'name': 'my_group_type'}) groups = [] backend = 'host1@lvm' pool = '%s#pool1' % backend grp_on_host_wo_pool = [db.group_create( self.ctxt, {'host': backend, 'group_type_id': grp_type['id']}) for j in range(3)] grp_on_host_w_pool = [db.group_create( self.ctxt, {'host': pool, 'group_type_id': grp_type['id']})] groups.append(grp_on_host_wo_pool + grp_on_host_w_pool) # insert an additional record that doesn't belongs to the same # host as 'foo' and test if it is included in the result grp_foobar = db.group_create(self.ctxt, {'host': '%sfoo' % backend, 'group_type_id': grp_type['id']}) filters = {'host': backend, 'backend_match_level': 'backend'} grps = db.group_get_all(self.ctxt, filters=filters) self._assertEqualListsOfObjects(groups[0], grps) for grp in grps: db.group_destroy(self.ctxt, grp['id']) db.group_destroy(self.ctxt, grp_foobar['id']) db.group_type_destroy(self.ctxt, grp_type['id']) def _create_gs_to_test_include_in(self): """Helper method for test_group_include_in_* tests.""" return [ db.group_create( self.ctxt, {'host': 'host1@backend1#pool1', 'cluster_name': 'cluster1@backend1#pool1'}), db.group_create( self.ctxt, {'host': 'host1@backend2#pool2', 'cluster_name': 'cluster1@backend2#pool1'}), db.group_create( self.ctxt, {'host': 'host2@backend#poo1', 'cluster_name': 'cluster2@backend#pool'}), ] @ddt.data('host1@backend1#pool1', 'host1@backend1') def test_group_include_in_cluster_by_host(self, host): group = self._create_gs_to_test_include_in()[0] cluster_name = 'my_cluster' result = db.group_include_in_cluster(self.ctxt, cluster_name, partial_rename=False, host=host) self.assertEqual(1, result) db_group = db.group_get(self.ctxt, group.id) self.assertEqual(cluster_name, db_group.cluster_name) def test_group_include_in_cluster_by_host_multiple(self): groups = self._create_gs_to_test_include_in()[0:2] host = 'host1' cluster_name = 'my_cluster' result = db.group_include_in_cluster(self.ctxt, cluster_name, partial_rename=True, host=host) self.assertEqual(2, result) db_group = [db.group_get(self.ctxt, groups[0].id), db.group_get(self.ctxt, groups[1].id)] for i in range(2): self.assertEqual(cluster_name + groups[i].host[len(host):], db_group[i].cluster_name) @ddt.data('cluster1@backend1#pool1', 'cluster1@backend1') def test_group_include_in_cluster_by_cluster_name(self, cluster_name): group = self._create_gs_to_test_include_in()[0] new_cluster_name = 'cluster_new@backend1#pool' result = db.group_include_in_cluster(self.ctxt, new_cluster_name, partial_rename=False, cluster_name=cluster_name) self.assertEqual(1, result) db_group = db.group_get(self.ctxt, group.id) self.assertEqual(new_cluster_name, db_group.cluster_name) def test_group_include_in_cluster_by_cluster_multiple(self): groups = self._create_gs_to_test_include_in()[0:2] cluster_name = 'cluster1' new_cluster_name = 'my_cluster' result = db.group_include_in_cluster(self.ctxt, new_cluster_name, partial_rename=True, cluster_name=cluster_name) self.assertEqual(2, result) db_groups = [db.group_get(self.ctxt, groups[0].id), db.group_get(self.ctxt, groups[1].id)] for i in range(2): self.assertEqual( new_cluster_name + groups[i].cluster_name[len(cluster_name):], db_groups[i].cluster_name) class OnlineMigrationTestCase(BaseTest): # TODO: (D Release) remove method and this comment @mock.patch.object(sqlalchemy_api, 'remove_temporary_admin_metadata_data_migration') def test_db_remove_temporary_admin_metadata_data_migration(self, migration_mock): """Test that DB layer method properly calls implementation layer.""" params = (mock.sentinel.ctxt, mock.sentinel.max_count) db.remove_temporary_admin_metadata_data_migration(*params) migration_mock.assert_called_once_with(*params) # TODO: (D Release) remove method and this comment @mock.patch.object(sqlalchemy_api, 'models') @mock.patch.object(sqlalchemy_api, 'model_query') def test_remove_temporary_admin_metadata_data_migration_mocked( self, query_mock, models_mock): """Test method implementation.""" # Call DB API layer directly to test return values total, updated = db.remove_temporary_admin_metadata_data_migration( self.ctxt, mock.sentinel.max_count) self.assertEqual(2, query_mock.call_count) query_mock.assert_called_with(self.ctxt, models_mock.VolumeAdminMetadata) filter_by = query_mock.return_value.filter_by filter_by.assert_called_once_with(key='temporary') query = filter_by.return_value query.count.assert_called_once_with() query.limit.assert_called_once_with(mock.sentinel.max_count) subquery = query.limit.return_value.subquery subquery.assert_called_once_with() del_vals_mock = models_mock.VolumeAdminMetadata.delete_values del_vals_mock.assert_called_once_with() filter_subquery = query_mock.return_value.filter filter_subquery.assert_called_once_with( models_mock.VolumeAdminMetadata.id.in_(filter_subquery)) update = filter_subquery.return_value.update update_args = {'synchronize_session': False} update.assert_called_once_with(del_vals_mock.return_value, **update_args) # TODO: (D Release) remove method and this comment def test_remove_temporary_admin_metadata_data_migration(self): """Test migration's full implementation.""" if not utils.is_db_dialect('mysql'): raise test.testtools.TestCase.skipException( 'Only MySQL supports UPDATE on a LIMIT query') vol1_admin_meta = {'temporary_not': 'false'} vol1 = utils.create_volume(self.ctxt, display_name='normal', admin_metadata=vol1_admin_meta) vol2_meta = {'temporary': True} vol2 = utils.create_volume(self.ctxt, display_name='metadata', metadata=vol2_meta) vol3_admin_meta = {'temporary': 'true', 'temp': 'true'} vol3 = utils.create_volume( self.ctxt, display_name='admin_metadata', use_quota=False, admin_metadata=vol3_admin_meta) # Call DB API layer directly to test return values total, updated = db.remove_temporary_admin_metadata_data_migration( self.ctxt, 4) self.assertEqual(1, updated) vol1.refresh() self.assertEqual(({}, vol1_admin_meta), (vol1.metadata, vol1.admin_metadata)) vol2.refresh() self.assertEqual((vol2_meta, {}), (vol2.metadata, vol2.admin_metadata)) vol3.refresh() vol3_admin_meta.pop('temporary') self.assertEqual(({}, vol3_admin_meta), (vol3.metadata, vol2.admin_metadata)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_db_worker_api.py0000664000175000017500000002521000000000000022674 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for cinder.db.api.Worker""" import time import uuid from oslo_db import exception as db_exception from cinder import context from cinder import db from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class DBAPIWorkerTestCase(test.TestCase, test.ModelsObjectComparatorMixin): worker_fields = {'resource_type': 'Volume', 'resource_id': fake.VOLUME_ID, 'status': 'creating'} def _uuid(self): return str(uuid.uuid4()) def setUp(self): super(DBAPIWorkerTestCase, self).setUp() self.ctxt = context.get_admin_context() def test_worker_create_and_get(self): """Test basic creation of a worker record.""" worker = db.worker_create(self.ctxt, **self.worker_fields) db_worker = db.worker_get(self.ctxt, id=worker.id) self._assertEqualObjects(worker, db_worker) def test_worker_create_unique_constrains(self): """Test when we use an already existing resource type and id.""" db.worker_create(self.ctxt, **self.worker_fields) self.assertRaises(exception.WorkerExists, db.worker_create, self.ctxt, resource_type=self.worker_fields['resource_type'], resource_id=self.worker_fields['resource_id'], status='not_' + self.worker_fields['status']) def test_worker_create_missing_required_field(self): """Try creating a worker with a missing required field.""" for field in self.worker_fields: params = self.worker_fields.copy() del params[field] self.assertRaises(db_exception.DBError, db.worker_create, self.ctxt, **params) def test_worker_create_invalid_field(self): """Try creating a worker with a non existent db field.""" self.assertRaises(TypeError, db.worker_create, self.ctxt, myfield='123', **self.worker_fields) def test_worker_get_non_existent(self): """Check basic non existent worker record get method.""" db.worker_create(self.ctxt, **self.worker_fields) self.assertRaises(exception.WorkerNotFound, db.worker_get, self.ctxt, service_id='1', **self.worker_fields) def _create_workers(self, num, read_back=False, **fields): workers = [] base_params = self.worker_fields.copy() base_params.update(fields) for i in range(num): params = base_params.copy() params['resource_id'] = self._uuid() workers.append(db.worker_create(self.ctxt, **params)) if read_back: for i in range(len(workers)): workers[i] = db.worker_get(self.ctxt, id=workers[i].id) return workers def test_worker_get_all(self): """Test basic get_all method.""" self._create_workers(1) service = db.service_create(self.ctxt, {}) workers = self._create_workers(3, service_id=service.id) db_workers = db.worker_get_all(self.ctxt, service_id=service.id) self._assertEqualListsOfObjects(workers, db_workers) def test_worker_get_all_until(self): """Test get_all until a specific time.""" workers = self._create_workers(3, read_back=True) timestamp = workers[-1].updated_at time.sleep(0.1) self._create_workers(3) db_workers = db.worker_get_all(self.ctxt, until=timestamp) self._assertEqualListsOfObjects(workers, db_workers) def test_worker_get_all_returns_empty(self): """Test that get_all returns an empty list when there's no results.""" self._create_workers(3, deleted=True) db_workers = db.worker_get_all(self.ctxt) self.assertListEqual([], db_workers) def test_worker_update_not_exists(self): """Test worker update when the worker doesn't exist.""" self.assertRaises(exception.WorkerNotFound, db.worker_update, self.ctxt, 1) def test_worker_update(self): """Test basic worker update.""" worker = self._create_workers(1)[0] worker = db.worker_get(self.ctxt, id=worker.id) res = db.worker_update(self.ctxt, worker.id, service_id=1) self.assertEqual(1, res) worker.service_id = 1 db_worker = db.worker_get(self.ctxt, id=worker.id) self._assertEqualObjects(worker, db_worker, ['updated_at', 'race_preventer']) self.assertEqual(worker.race_preventer + 1, db_worker.race_preventer) def test_worker_update_update_orm(self): """Test worker update updating the worker orm object.""" worker = self._create_workers(1)[0] res = db.worker_update(self.ctxt, worker.id, orm_worker=worker, service_id=1) self.assertEqual(1, res) db_worker = db.worker_get(self.ctxt, id=worker.id) # If we are updating the ORM object we don't ignore the update_at field # because it will get updated in the ORM instance. self._assertEqualObjects(worker, db_worker) def test_worker_destroy(self): """Test that worker destroy really deletes the DB entry.""" worker = self._create_workers(1)[0] res = db.worker_destroy(self.ctxt, id=worker.id) self.assertEqual(1, res) db_workers = db.worker_get_all(self.ctxt, read_deleted='yes') self.assertListEqual([], db_workers) def test_worker_destroy_non_existent(self): """Test that worker destroy returns 0 when entry doesn't exist.""" res = db.worker_destroy(self.ctxt, id=100) self.assertEqual(0, res) def test_worker_claim(self): """Test worker claim of normal DB entry.""" service_id = 1 worker = db.worker_create(self.ctxt, resource_type='Volume', resource_id=fake.VOLUME_ID, status='deleting') res = db.worker_claim_for_cleanup(self.ctxt, service_id, worker) self.assertEqual(1, res) db_worker = db.worker_get(self.ctxt, id=worker.id) self._assertEqualObjects(worker, db_worker, ['updated_at']) self.assertEqual(service_id, db_worker.service_id) self.assertEqual(worker.service_id, db_worker.service_id) def test_worker_claim_fails_status_change(self): """Test that claim fails if the work entry has changed its status.""" worker = db.worker_create(self.ctxt, resource_type='Volume', resource_id=fake.VOLUME_ID, status='deleting') worker.status = 'creating' res = db.worker_claim_for_cleanup(self.ctxt, 1, worker) self.assertEqual(0, res) db_worker = db.worker_get(self.ctxt, id=worker.id) self._assertEqualObjects(worker, db_worker, ['status']) self.assertIsNone(db_worker.service_id) def test_worker_claim_fails_service_change(self): """Test that claim fails on worker service change.""" failed_service = 1 working_service = 2 this_service = 3 worker = db.worker_create(self.ctxt, resource_type='Volume', resource_id=fake.VOLUME_ID, status='deleting', service_id=working_service) worker.service_id = failed_service res = db.worker_claim_for_cleanup(self.ctxt, this_service, worker) self.assertEqual(0, res) db_worker = db.worker_get(self.ctxt, id=worker.id) self.assertEqual(working_service, db_worker.service_id) def test_worker_claim_same_service(self): """Test worker claim of a DB entry that has our service_id.""" service_id = 1 worker = db.worker_create(self.ctxt, resource_type='Volume', resource_id=fake.VOLUME_ID, status='deleting', service_id=service_id) # Read from DB to get updated_at field worker = db.worker_get(self.ctxt, id=worker.id) claimed_worker = db.worker_get(self.ctxt, id=worker.id) res = db.worker_claim_for_cleanup(self.ctxt, service_id, claimed_worker) self.assertEqual(1, res) db_worker = db.worker_get(self.ctxt, id=worker.id) self._assertEqualObjects(claimed_worker, db_worker) self._assertEqualObjects(worker, db_worker, ['updated_at', 'race_preventer']) self.assertNotEqual(worker.updated_at, db_worker.updated_at) self.assertEqual(worker.race_preventer + 1, db_worker.race_preventer) def test_worker_claim_fails_this_service_claimed(self): """Test claim fails when worker was already claimed by this service.""" service_id = 1 worker = db.worker_create(self.ctxt, resource_type='Volume', resource_id=fake.VOLUME_ID, status='creating', service_id=service_id) # Read it back to have the updated_at value worker = db.worker_get(self.ctxt, id=worker.id) claimed_worker = db.worker_get(self.ctxt, id=worker.id) time.sleep(0.1) # Simulate that this service starts processing this entry res = db.worker_claim_for_cleanup(self.ctxt, service_id, claimed_worker) self.assertEqual(1, res) res = db.worker_claim_for_cleanup(self.ctxt, service_id, worker) self.assertEqual(0, res) db_worker = db.worker_get(self.ctxt, id=worker.id) self._assertEqualObjects(claimed_worker, db_worker) self._assertEqualObjects(worker, db_worker, ['updated_at', 'race_preventer']) self.assertNotEqual(worker.updated_at, db_worker.updated_at) self.assertEqual(worker.race_preventer + 1, db_worker.race_preventer) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_evaluator.py0000664000175000017500000001503500000000000022073 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import exception from cinder.scheduler.evaluator import evaluator from cinder.tests.unit import test class EvaluatorTestCase(test.TestCase): def test_simple_integer(self): self.assertEqual(2, evaluator.evaluate("1+1")) self.assertEqual(9, evaluator.evaluate("2+3+4")) self.assertEqual(23, evaluator.evaluate("11+12")) self.assertEqual(30, evaluator.evaluate("5*6")) self.assertEqual(2, evaluator.evaluate("22/11")) self.assertEqual(38, evaluator.evaluate("109-71")) self.assertEqual( 493, evaluator.evaluate("872 - 453 + 44 / 22 * 4 + 66")) def test_simple_float(self): self.assertEqual(2.0, evaluator.evaluate("1.0 + 1.0")) self.assertEqual(2.5, evaluator.evaluate("1.5 + 1.0")) self.assertEqual(3.0, evaluator.evaluate("1.5 * 2.0")) def test_int_float_mix(self): self.assertEqual(2.5, evaluator.evaluate("1.5 + 1")) self.assertEqual(4.25, evaluator.evaluate("8.5 / 2")) self.assertEqual(5.25, evaluator.evaluate("10/4+0.75 + 2")) def test_negative_numbers(self): self.assertEqual(-2, evaluator.evaluate("-2")) self.assertEqual(-1, evaluator.evaluate("-2+1")) self.assertEqual(3, evaluator.evaluate("5+-2")) def test_exponent(self): self.assertEqual(8, evaluator.evaluate("2^3")) self.assertEqual(-8, evaluator.evaluate("-2 ^ 3")) self.assertEqual(15.625, evaluator.evaluate("2.5 ^ 3")) self.assertEqual(8, evaluator.evaluate("4 ^ 1.5")) def test_function(self): self.assertEqual(5, evaluator.evaluate("abs(-5)")) self.assertEqual(2, evaluator.evaluate("abs(2)")) self.assertEqual(1, evaluator.evaluate("min(1, 100)")) self.assertEqual(100, evaluator.evaluate("max(1, 100)")) def test_parentheses(self): self.assertEqual(1, evaluator.evaluate("(1)")) self.assertEqual(-1, evaluator.evaluate("(-1)")) self.assertEqual(2, evaluator.evaluate("(1+1)")) self.assertEqual(15, evaluator.evaluate("(1+2) * 5")) self.assertEqual(3, evaluator.evaluate("(1+2)*(3-1)/((1+(2-1)))")) self.assertEqual( -8.0, evaluator. evaluate("((1.0 / 0.5) * (2)) *(-2)")) def test_comparisons(self): self.assertTrue(evaluator.evaluate("1 < 2")) self.assertTrue(evaluator.evaluate("2 > 1")) self.assertTrue(evaluator.evaluate("2 != 1")) self.assertFalse(evaluator.evaluate("1 > 2")) self.assertFalse(evaluator.evaluate("2 < 1")) self.assertFalse(evaluator.evaluate("2 == 1")) self.assertTrue(evaluator.evaluate("(1 == 1) == !(1 == 2)")) def test_logic_ops(self): self.assertTrue(evaluator.evaluate("(1 == 1) AND (2 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) and (2 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) && (2 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) && (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) OR (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) or (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) || (5 == 2)")) self.assertFalse(evaluator.evaluate("(5 == 1) || (5 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) AND NOT (2 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) AND not (2 == 2)")) self.assertFalse(evaluator.evaluate("(1 == 1) AND !(2 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) AND NOT (5 == 2)")) self.assertTrue(evaluator.evaluate("(1 == 1) OR NOT (2 == 2) " "AND (5 == 5)")) def test_ternary_conditional(self): self.assertEqual(5, evaluator.evaluate("(1 < 2) ? 5 : 10")) self.assertEqual(10, evaluator.evaluate("(1 > 2) ? 5 : 10")) def test_variables_dict(self): stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407} request = {'iops': 500, 'size': 4} self.assertEqual(1500, evaluator.evaluate("stats.iops + request.iops", stats=stats, request=request)) def test_missing_var(self): stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407} request = {'iops': 500, 'size': 4} self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "foo.bob + 5", stats=stats, request=request) self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "stats.bob + 5", stats=stats, request=request) self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "fake.var + 1", stats=stats, request=request, fake=None) def test_bad_expression(self): self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "1/*1") def test_nonnumber_comparison(self): nonnumber = {'test': 'foo'} request = {'test': 'bar'} self.assertTrue( evaluator.evaluate("nonnumber.test != request.test", nonnumber=nonnumber, request=request)) self.assertFalse( evaluator.evaluate("nonnumber.test == request.test", nonnumber=nonnumber, request=request)) def test_div_zero(self): self.assertRaises(exception.EvaluatorParseException, evaluator.evaluate, "7 / 0") def test_deep_function(self): """Ensures that maximum recursion depth is not exceeded.""" self.assertGreater(evaluator.evaluate( '(((1 + max(1 + (10 / 20), 2, 3)) / 100) + 1)'), 1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_exception.py0000664000175000017500000001302700000000000022066 0ustar00zuulzuul00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from http import client as http_client from unittest import mock import fixtures import webob.util from cinder import exception from cinder.tests.unit import test class CinderExceptionReraiseFormatError(object): real_log_exception = exception.CinderException._log_exception @classmethod def patch(cls): exception.CinderException._log_exception = cls._wrap_log_exception @staticmethod def _wrap_log_exception(self): CinderExceptionReraiseFormatError.real_log_exception(self) raise # NOTE(melwitt) This needs to be done at import time in order to also catch # CinderException format errors that are in mock decorators. In these cases, # the errors will be raised during test listing, before tests actually run. CinderExceptionReraiseFormatError.patch() class CinderExceptionTestCase(test.TestCase): def test_default_error_msg(self): class FakeCinderException(exception.CinderException): message = "default message" exc = FakeCinderException() self.assertEqual('default message', str(exc)) def test_error_msg(self): self.assertEqual('test', str(exception.CinderException('test'))) def test_default_error_msg_with_kwargs(self): class FakeCinderException(exception.CinderException): message = "default message: %(code)s" exc = FakeCinderException(code=int(http_client.INTERNAL_SERVER_ERROR)) self.assertEqual('default message: 500', str(exc)) def test_error_msg_exception_with_kwargs(self): # NOTE(dprince): disable format errors for this test self.useFixture(fixtures.MonkeyPatch( 'cinder.exception.CinderException._log_exception', CinderExceptionReraiseFormatError.real_log_exception)) class FakeCinderException(exception.CinderException): message = "default message: %(misspelled_code)s" exc = FakeCinderException(code=http_client.INTERNAL_SERVER_ERROR) self.assertEqual('default message: %(misspelled_code)s', str(exc)) def test_default_error_code(self): class FakeCinderException(exception.CinderException): code = http_client.NOT_FOUND exc = FakeCinderException() self.assertEqual(http_client.NOT_FOUND, exc.kwargs['code']) def test_error_code_from_kwarg(self): class FakeCinderException(exception.CinderException): code = http_client.INTERNAL_SERVER_ERROR exc = FakeCinderException(code=http_client.NOT_FOUND) self.assertEqual(http_client.NOT_FOUND, exc.kwargs['code']) def test_error_msg_is_exception_to_string(self): msg = 'test message' exc1 = Exception(msg) exc2 = exception.CinderException(exc1) self.assertEqual(msg, exc2.msg) def test_exception_kwargs_to_string(self): msg = 'test message' exc1 = Exception(msg) exc2 = exception.CinderException(kwarg1=exc1) self.assertEqual(msg, exc2.kwargs['kwarg1']) def test_message_in_format_string(self): class FakeCinderException(exception.CinderException): message = 'FakeCinderException: %(message)s' exc = FakeCinderException(message='message') self.assertEqual('FakeCinderException: message', str(exc)) def test_message_and_kwarg_in_format_string(self): class FakeCinderException(exception.CinderException): message = 'Error %(code)d: %(message)s' exc = FakeCinderException(message='message', code=http_client.NOT_FOUND) self.assertEqual('Error 404: message', str(exc)) def test_message_is_exception_in_format_string(self): class FakeCinderException(exception.CinderException): message = 'Exception: %(message)s' msg = 'test message' exc1 = Exception(msg) exc2 = FakeCinderException(message=exc1) self.assertEqual('Exception: test message', str(exc2)) class CinderConvertedExceptionTestCase(test.TestCase): def test_default_args(self): exc = exception.ConvertedException() self.assertNotEqual('', exc.title) self.assertEqual(http_client.INTERNAL_SERVER_ERROR, exc.code) self.assertEqual('', exc.explanation) def test_standard_status_code(self): with mock.patch.dict(webob.util.status_reasons, {http_client.OK: 'reason'}): exc = exception.ConvertedException(code=int(http_client.OK)) self.assertEqual('reason', exc.title) @mock.patch.dict(webob.util.status_reasons, { http_client.INTERNAL_SERVER_ERROR: 'reason'}) def test_generic_status_code(self): with mock.patch.dict(webob.util.status_generic_reasons, {5: 'generic_reason'}): exc = exception.ConvertedException(code=599) self.assertEqual('generic_reason', exc.title) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_fixtures.py0000664000175000017500000000503500000000000021741 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging as pylogging import fixtures as fx from oslo_log import log as logging import testtools from cinder.tests import fixtures class TestLogging(testtools.TestCase): def test_default_logging(self): stdlog = self.useFixture(fixtures.StandardLogging()) root = logging.getLogger() root.logger.setLevel(pylogging.INFO) # there should be a null handler as well at DEBUG self.assertEqual(2, len(root.handlers), root.handlers) log = logging.getLogger(__name__) log.info("at info") log.debug("at debug") self.assertIn("at info", stdlog.logger.output) self.assertNotIn("at debug", stdlog.logger.output) # broken debug messages should still explode, even though we # aren't logging them in the regular handler self.assertRaises(TypeError, log.warning, "this is broken %s %s", "foo") # and, ensure that one of the terrible log messages isn't # output at info warn_log = logging.getLogger('migrate.versioning.api') warn_log.info("warn_log at info, should be skipped") warn_log.error("warn_log at error") self.assertIn("warn_log at error", stdlog.logger.output) self.assertNotIn("warn_log at info", stdlog.logger.output) def test_debug_logging(self): self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '1')) stdlog = self.useFixture(fixtures.StandardLogging()) root = logging.getLogger() root.logger.setLevel(pylogging.INFO) # there should no longer be a null handler self.assertEqual(1, len(root.handlers), root.handlers) log = logging.getLogger(__name__) log.info("at info") log.debug("at debug") self.assertIn("at info", stdlog.logger.output) self.assertNotIn("at debug", stdlog.logger.output) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_hacking.py0000664000175000017500000003465200000000000021503 0ustar00zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import textwrap from unittest import mock import ddt import pycodestyle from cinder.tests.hacking import checks from cinder.tests.unit import test @ddt.ddt class HackingTestCase(test.TestCase): """This class tests cinder's hacking checks. This class ensures that Cinder's hacking checks are working by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ def test_no_translate_logs(self): self.assertEqual(1, len(list(checks.no_translate_logs( "LOG.debug(_('foo'))", "cinder/scheduler/foo.py")))) self.assertEqual(1, len(list(checks.no_translate_logs( "LOG.error(_('foo'))", "cinder/scheduler/foo.py")))) self.assertEqual(1, len(list(checks.no_translate_logs( "LOG.info(_('foo'))", "cinder/scheduler/foo.py")))) self.assertEqual(1, len(list(checks.no_translate_logs( "LOG.warning(_('foo'))", "cinder/scheduler/foo.py")))) self.assertEqual(1, len(list(checks.no_translate_logs( "LOG.exception(_('foo'))", "cinder/scheduler/foo.py")))) self.assertEqual(1, len(list(checks.no_translate_logs( "LOG.critical(_('foo'))", "cinder/scheduler/foo.py")))) def test_check_explicit_underscore_import(self): self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "cinder.tests.unit/other_files.py")))) self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from cinder.i18n import _", "cinder.tests.unit/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "LOG.info(_('My info message'))", "cinder.tests.unit/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "from cinder.i18n import _", "cinder.tests.unit/other_files2.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files2.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "_ = translations.ugettext", "cinder.tests.unit/other_files3.py")))) self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files3.py")))) # Complete code coverage by falling through all checks self.assertEqual(0, len(list(checks.check_explicit_underscore_import( "LOG.info('My info message')", "cinder.tests.unit/other_files4.py")))) self.assertEqual(1, len(list(checks.check_explicit_underscore_import( "msg = _('My message')", "cinder.tests.unit/other_files5.py")))) # We are patching pycodestyle/pep8 so that only the check under test is # actually installed. # TODO(eharney): don't patch private members of external libraries @mock.patch('pycodestyle._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def _run_check(self, code, checker, filename=None): pycodestyle.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) checker = pycodestyle.Checker(filename=filename, lines=lines) checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def _assert_has_errors(self, code, checker, expected_errors=None, filename=None): actual_errors = [e[:3] for e in self._run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) def _assert_has_no_errors(self, code, checker, filename=None): self._assert_has_errors(code, checker, filename=filename) def test_logging_format_args(self): checker = checks.CheckLoggingFormatArgs code = """ import logging LOG = logging.getLogger() LOG.info("Message without a second argument.") LOG.critical("Message with %s arguments.", 'two') LOG.debug("Volume %s caught fire and is at %d degrees C and" " climbing.", 'volume1', 500) """ self._assert_has_no_errors(code, checker) code = """ import logging LOG = logging.getLogger() LOG.{0}("Volume %s caught fire and is at %d degrees C and " "climbing.", ('volume1', 500)) """ # We don't assert on specific column numbers since there is a small # change in calculation between =py38 for method in checker.LOG_METHODS: self._assert_has_errors(code.format(method), checker, expected_errors=[(4, mock.ANY, 'C310')]) code = """ import logging LOG = logging.getLogger() LOG.log(logging.DEBUG, "Volume %s caught fire and is at %d" " degrees C and climbing.", ('volume1', 500)) """ # We don't assert on specific column numbers since there is a small # change in calculation between =py38 self._assert_has_errors(code, checker, expected_errors=[(4, mock.ANY, 'C310')]) def test_opt_type_registration_args(self): checker = checks.CheckOptRegistrationArgs code = """ CONF.register_opts([opt1, opt2, opt3]) CONF.register_opts((opt4, opt5)) CONF.register_opt(lonely_opt) CONF.register_opts([OPT1, OPT2], group="group_of_opts") CONF.register_opt(single_opt, group=blah) """ self._assert_has_no_errors(code, checker) code = """ CONF.register_opt([opt4, opt5, opt6]) CONF.register_opt((opt7, opt8)) CONF.register_opts(lonely_opt) CONF.register_opt((an_opt, another_opt)) """ # We don't assert on specific column numbers since there is a small # change in calculation between =py38 self._assert_has_errors(code, checker, expected_errors=[(1, 18, 'C311'), (2, mock.ANY, 'C311'), (3, mock.ANY, 'C311'), (4, mock.ANY, 'C311')]) code = """ CONF.register_opt(single_opt) CONF.register_opts(other_opt) CONF.register_opt(multiple_opts) tuple_opts = (one_opt, two_opt) CONF.register_opts(tuple_opts) """ self._assert_has_errors(code, checker, expected_errors=[(2, 19, 'C311'), (3, 18, 'C311')]) def test_no_mutable_default_args(self): self.assertEqual(0, len(list(checks.no_mutable_default_args( "def foo (bar):")))) self.assertEqual(1, len(list(checks.no_mutable_default_args( "def foo (bar=[]):")))) self.assertEqual(1, len(list(checks.no_mutable_default_args( "def foo (bar={}):")))) def test_no_log_warn(self): code = """ LOG.warn("LOG.warn is deprecated") """ errors = [(1, 0, 'C338')] self._assert_has_errors(code, checks.no_log_warn, expected_errors=errors) code = """ LOG.warning("LOG.warn is deprecated") """ self._assert_has_no_errors(code, checks.no_log_warn) def test_check_datetime_now(self): self.assertEqual(1, len(list(checks.check_datetime_now( "datetime.now", False)))) self.assertEqual(0, len(list(checks.check_datetime_now( "timeutils.utcnow", False)))) def test_check_datetime_now_noqa(self): self.assertEqual(0, len(list(checks.check_datetime_now( "datetime.now() # noqa", True)))) def test_no_print_statements(self): self.assertEqual(0, len(list(checks.check_no_print_statements( "a line with no print statement", "cinder/file.py", False)))) self.assertEqual(1, len(list(checks.check_no_print_statements( "print('My print statement')", "cinder/file.py", False)))) self.assertEqual(0, len(list(checks.check_no_print_statements( "print('My print statement in cinder/cmd, which is ok.')", "cinder/cmd/file.py", False)))) self.assertEqual(0, len(list(checks.check_no_print_statements( "print('My print statement that I just must have.')", "cinder.tests.unit/file.py", True)))) self.assertEqual(1, len(list(checks.check_no_print_statements( "print ('My print with space')", "cinder/volume/anotherFile.py", False)))) def test_dict_constructor_with_list_copy(self): self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([(i, connect_info[i])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " attrs = dict([(k, _from_json(v))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " type_names = dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict((value, key) for key, value in")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( "foo(param=dict((k, v) for k, v in bar.items()))")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dict([[i,i] for i in range(3)])")))) self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy( " dd = dict([i,i] for i in range(3))")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " dict()")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " create_kwargs = dict(snapshot=snapshot,")))) self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy( " self._render_dict(xml, data_el, data.__dict__)")))) def test_validate_assertTrue(self): test_value = True self.assertEqual(0, len(list(checks.validate_assertTrue( "assertTrue(True)", 'cinder/volume/stuff/a_file.py')))) self.assertEqual(0, len(list(checks.validate_assertTrue( "assertTrue(True)", 'cinder/tests/unit/test_file.py')))) self.assertEqual(0, len(list(checks.validate_assertTrue( "assertEqual(True, %s)" % test_value, 'cinder/volume/stuff/a_file.py')))) self.assertEqual(1, len(list(checks.validate_assertTrue( "assertEqual(True, %s)" % test_value, 'cinder/tests/unit/test_file.py')))) @ddt.unpack @ddt.data( (1, 'LOG.info', "cinder/tests/unit/fake.py", False), (1, 'LOG.warning', "cinder/tests/fake.py", False), (1, 'LOG.error', "cinder/tests/fake.py", False), (1, 'LOG.exception', "cinder/tests/fake.py", False), (1, 'LOG.debug', "cinder/tests/fake.py", False), (0, 'LOG.info.assert_called_once_with', "cinder/tests/fake.py", False), (0, 'some.LOG.error.call', "cinder/tests/fake.py", False), (0, 'LOG.warning', "cinder/tests/unit/fake.py", True)) def test_no_test_log(self, first, second, third, fourth): self.assertEqual(first, len(list(checks.no_test_log( "%s('arg')" % second, third, fourth)))) @ddt.unpack @ddt.data( (1, 'import mock'), (0, 'from unittest import mock'), (1, 'from mock import patch'), (0, 'from unittest.mock import patch')) def test_no_third_party_mock(self, err_count, line): self.assertEqual(err_count, len(list(checks.no_third_party_mock( line)))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_image_utils.py0000664000175000017500000040306000000000000022372 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for image utils.""" import errno import math from unittest import mock import cryptography import ddt from oslo_concurrency import processutils from oslo_utils import imageutils from oslo_utils import units from cinder import exception from cinder.image import image_utils from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume import throttling class TestQemuImgInfo(test.TestCase): @mock.patch('cinder.privsep.format_inspector.get_format_if_safe') @mock.patch('os.name', new='posix') @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info(self, mock_exec, mock_info, mock_detect): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) mock_detect.return_value = 'mock_fmt' output = image_utils.qemu_img_info(test_path) mock_exec.assert_called_once_with( 'env', 'LC_ALL=C', 'qemu-img', 'info', '-f', 'mock_fmt', '--output=json', test_path, run_as_root=True, prlimit=image_utils.QEMU_IMG_LIMITS) self.assertEqual(mock_info.return_value, output) mock_detect.assert_called_once_with(path=test_path, allow_qcow2_backing_file=False) @mock.patch('cinder.privsep.format_inspector.get_format_if_safe') @mock.patch('os.name', new='posix') @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info_qcow2_backing_ok( self, mock_exec, mock_info, mock_detect): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) mock_detect.return_value = 'qcow2' output = image_utils.qemu_img_info( test_path, allow_qcow2_backing_file=True) mock_exec.assert_called_once_with( 'env', 'LC_ALL=C', 'qemu-img', 'info', '-f', 'qcow2', '--output=json', test_path, run_as_root=True, prlimit=image_utils.QEMU_IMG_LIMITS) self.assertEqual(mock_info.return_value, output) mock_detect.assert_called_once_with(path=test_path, allow_qcow2_backing_file=True) @mock.patch('cinder.privsep.format_inspector.get_format_if_safe') @mock.patch('os.name', new='posix') @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info_raw_not_luks(self, mock_exec, mock_info, mock_detect): """To determine if a raw image is luks, we call qemu-img twice.""" mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.side_effect = [(mock_out, mock_err), # it's not luks, so raise an error processutils.ProcessExecutionError] mock_detect.return_value = 'raw' mock_data = mock.Mock() mock_data.file_format = 'raw' mock_info.return_value = mock_data first = mock.call( 'env', 'LC_ALL=C', 'qemu-img', 'info', '-f', 'raw', '--output=json', test_path, run_as_root=True, prlimit=image_utils.QEMU_IMG_LIMITS) second = mock.call( 'env', 'LC_ALL=C', 'qemu-img', 'info', '-f', 'luks', '--output=json', test_path, run_as_root=True, prlimit=image_utils.QEMU_IMG_LIMITS) output = image_utils.qemu_img_info(test_path) mock_exec.assert_has_calls([first, second]) mock_info.assert_called_once() self.assertEqual(mock_info.return_value, output) mock_detect.assert_called_once_with(path=test_path, allow_qcow2_backing_file=False) @mock.patch('cinder.privsep.format_inspector.get_format_if_safe') @mock.patch('os.name', new='posix') @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info_luks(self, mock_exec, mock_info, mock_detect): # the format_inspector will identify the image as raw, but # we will ask qemu-img for a second opinion, and it say luks mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) mock_detect.return_value = 'raw' mock_data1 = mock.Mock(name='first_time') mock_data1.file_format = 'raw' mock_data2 = mock.Mock(name='second_time') mock_data2.file_format = 'luks' mock_info.side_effect = [mock_data1, mock_data2] first = mock.call( 'env', 'LC_ALL=C', 'qemu-img', 'info', '-f', 'raw', '--output=json', test_path, run_as_root=True, prlimit=image_utils.QEMU_IMG_LIMITS) second = mock.call( 'env', 'LC_ALL=C', 'qemu-img', 'info', '-f', 'luks', '--output=json', test_path, run_as_root=True, prlimit=image_utils.QEMU_IMG_LIMITS) output = image_utils.qemu_img_info(test_path) mock_exec.assert_has_calls([first, second]) self.assertEqual(2, mock_info.call_count) self.assertEqual(mock_data2, output) mock_detect.assert_called_once_with(path=test_path, allow_qcow2_backing_file=False) @mock.patch('cinder.privsep.format_inspector.get_format_if_safe') @mock.patch('os.name', new='posix') @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info_not_root(self, mock_exec, mock_info, mock_detect): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) mock_detect.return_value = 'mock_fmt' output = image_utils.qemu_img_info(test_path, force_share=False, run_as_root=False) mock_exec.assert_called_once_with( 'env', 'LC_ALL=C', 'qemu-img', 'info', '-f', 'mock_fmt', '--output=json', test_path, run_as_root=False, prlimit=image_utils.QEMU_IMG_LIMITS) self.assertEqual(mock_info.return_value, output) mock_detect.assert_called_once_with(path=test_path, allow_qcow2_backing_file=False) @mock.patch('cinder.privsep.format_inspector.get_format_if_safe') @mock.patch('cinder.image.image_utils.os') @mock.patch('oslo_utils.imageutils.QemuImgInfo') @mock.patch('cinder.utils.execute') def test_qemu_img_info_on_nt(self, mock_exec, mock_info, mock_os, mock_detect): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) mock_os.name = 'nt' mock_detect.return_value = 'mock_fmt' output = image_utils.qemu_img_info(test_path) mock_exec.assert_called_once_with( 'qemu-img', 'info', '-f', 'mock_fmt', '--output=json', test_path, run_as_root=True, prlimit=image_utils.QEMU_IMG_LIMITS) self.assertEqual(mock_info.return_value, output) mock_detect.assert_called_once_with(path=test_path, allow_qcow2_backing_file=False) @mock.patch('cinder.privsep.format_inspector.get_format_if_safe') @mock.patch('os.name', new='posix') @mock.patch('cinder.utils.execute') def test_qemu_img_info_malicious(self, mock_exec, mock_detect): mock_out = mock.sentinel.out mock_err = mock.sentinel.err test_path = mock.sentinel.path mock_exec.return_value = (mock_out, mock_err) mock_detect.return_value = None self.assertRaises(exception.Invalid, image_utils.qemu_img_info, test_path, force_share=False, run_as_root=False) mock_exec.assert_not_called() mock_detect.assert_called_once_with(path=test_path, allow_qcow2_backing_file=False) @mock.patch('cinder.utils.execute') def test_get_qemu_img_version(self, mock_exec): mock_out = "qemu-img version 2.0.0" mock_err = mock.sentinel.err mock_exec.return_value = (mock_out, mock_err) expected_version = [2, 0, 0] version = image_utils.get_qemu_img_version() mock_exec.assert_called_once_with('qemu-img', '--version', check_exit_code=False) self.assertEqual(expected_version, version) self.assertEqual(1, mock_exec.call_count) version = image_utils.get_qemu_img_version() # verify that cached value was used instead of calling execute self.assertEqual(expected_version, version) self.assertEqual(1, mock_exec.call_count) @mock.patch.object(image_utils, 'get_qemu_img_version') def test_validate_qemu_img_version(self, mock_get_qemu_img_version): fake_current_version = [1, 8] mock_get_qemu_img_version.return_value = fake_current_version minimum_version = '1.8' image_utils.check_qemu_img_version(minimum_version) mock_get_qemu_img_version.assert_called_once_with() @mock.patch.object(image_utils, 'get_qemu_img_version') def _test_validate_unsupported_qemu_img_version(self, mock_get_qemu_img_version, current_version=None): mock_get_qemu_img_version.return_value = current_version minimum_version = '2.0' self.assertRaises(exception.VolumeBackendAPIException, image_utils.check_qemu_img_version, minimum_version) mock_get_qemu_img_version.assert_called_once_with() def test_validate_qemu_img_version_not_installed(self): self._test_validate_unsupported_qemu_img_version() def test_validate_older_qemu_img_version(self): self._test_validate_unsupported_qemu_img_version( current_version=[1, 8]) @ddt.ddt class TestConvertImage(test.TestCase): @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=True) def test_defaults_block_dev_with_size_info(self, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.return_value.virtual_size = 1048576 throttle = throttling.Throttle(prefix=['cgcmd']) with mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True): output = image_utils.convert_image(source, dest, out_format, throttle=throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert', '-O', out_format, '-t', 'none', source, dest, run_as_root=True) mock_exec.reset_mock() with mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=False): output = image_utils.convert_image(source, dest, out_format) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, source, dest, run_as_root=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=True) def test_defaults_block_dev_without_size_info(self, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.return_value.file_format = 'qcow2' mock_info.return_value.virtual_size = 1048576 mock_info.return_value.format_specific = {'data': {}} throttle = throttling.Throttle(prefix=['cgcmd']) with mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True): output = image_utils.convert_image(source, dest, out_format, throttle=throttle) my_call = mock.call(source, run_as_root=True) mock_info.assert_has_calls([my_call, my_call]) self.assertIsNone(output) mock_exec.assert_called_once_with('cgcmd', 'qemu-img', 'convert', '-O', out_format, '-t', 'none', source, dest, run_as_root=True) mock_exec.reset_mock() with mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=False): output = image_utils.convert_image(source, dest, out_format) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, source, dest, run_as_root=True) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) def test_defaults_not_block_dev_with_size_info(self, mock_isblk, mock_exec, mock_info, mock_odirect): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format out_subformat = 'fake_subformat' mock_info.return_value.virtual_size = 1048576 output = image_utils.convert_image(source, dest, out_format, out_subformat=out_subformat) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, '-o', 'subformat=%s' % out_subformat, source, dest, run_as_root=True) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) def test_defaults_not_block_dev_without_size_info(self, mock_isblk, mock_exec, mock_info, mock_odirect): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format out_subformat = 'fake_subformat' output = image_utils.convert_image(source, dest, out_format, out_subformat=out_subformat) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, '-o', 'subformat=%s' % out_subformat, source, dest, run_as_root=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=True) def test_defaults_block_dev_ami_img(self, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.return_value.virtual_size = 1048576 with mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True): output = image_utils.convert_image(source, dest, out_format, src_format='AMI') self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, '-t', 'none', source, dest, run_as_root=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support') def test_convert_to_vhd(self, mock_check_odirect, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = "vhd" mock_info.return_value.virtual_size = 1048576 output = image_utils.convert_image(source, dest, out_format) self.assertIsNone(output) # Qemu uses the legacy "vpc" format name, instead of "vhd". mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', 'vpc', source, dest, run_as_root=True) @ddt.data(True, False) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) def test_convert_to_qcow2(self, compress_option, mock_isblk, mock_exec, mock_info): self.override_config('image_compress_on_upload', compress_option) source = mock.sentinel.source dest = mock.sentinel.dest out_format = 'qcow2' mock_info.return_value.virtual_size = 1048576 image_utils.convert_image(source, dest, out_format, compress=True) exec_args = ['qemu-img', 'convert', '-O', 'qcow2'] if compress_option: exec_args.append('-c') exec_args.extend((source, dest)) mock_exec.assert_called_once_with(*exec_args, run_as_root=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) def test_convert_disable_sparse(self, mock_isblk, mock_exec, mock_info): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.return_value.virtual_size = 1048576 output = image_utils.convert_image(source, dest, out_format, disable_sparse=True) self.assertIsNone(output) mock_exec.assert_called_once_with('qemu-img', 'convert', '-O', out_format, '-S', '0', source, dest, run_as_root=True) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.utils.is_blk_device', return_value=False) @mock.patch('os.path.dirname', return_value='fakedir') @mock.patch('os.path.ismount', return_value=True) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('cinder.image.image_utils.utils.tempdir') @mock.patch.object(image_utils.LOG, 'error') def test_not_enough_conversion_space(self, mock_log, mock_tempdir, mock_make, mock_ismount, mock_dirname, mock_isblk, mock_exec, mock_info, mock_odirect): source = mock.sentinel.source self.flags(image_conversion_dir='fakedir') dest = ['fakedir'] out_format = mock.sentinel.out_format mock_exec.side_effect = processutils.ProcessExecutionError( stderr='No space left on device') self.assertRaises(processutils.ProcessExecutionError, image_utils.convert_image, source, dest, out_format) mock_log.assert_called_with('Insufficient free space on fakedir for' ' image conversion.') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.utils.execute') @mock.patch('cinder.image.image_utils._get_qemu_convert_cmd') @mock.patch('cinder.utils.is_blk_device', return_value=False) @mock.patch.object(image_utils.LOG, 'info') @mock.patch.object(image_utils.LOG, 'debug') def test__convert_image_no_virt_size(self, mock_debug_log, mock_info_log, mock_isblk, mock_cmd, mock_execute, mock_info): """Make sure we don't try to do math with a None value""" prefix = ('cgexec', '-g', 'blkio:cg') source = '/source' dest = '/dest' out_format = 'unspecified' # 1. no qemu_img_info passed in and qemu_img_info() raises exc mock_info.side_effect = processutils.ProcessExecutionError image_utils._convert_image(prefix, source, dest, out_format) mock_debug_log.assert_not_called() log_msg = mock_info_log.call_args.args[0] self.assertIn("image size is unavailable", log_msg) mock_info.reset_mock(side_effect=True) mock_info_log.reset_mock() # 2. no qemu_img_info passed in, returned obj has no virtual_size mock_info.return_value = imageutils.QemuImgInfo() image_utils._convert_image(prefix, source, dest, out_format) mock_debug_log.assert_not_called() log_msg = mock_info_log.call_args.args[0] self.assertIn("image size is unavailable", log_msg) mock_info.reset_mock(return_value=True) mock_info_log.reset_mock() # 3. no qemu_img_info passed in, returned obj has virtual_size mock_info.return_value = imageutils.QemuImgInfo( '{"virtual-size": 1073741824}', format='json') image_utils._convert_image(prefix, source, dest, out_format) log_msg = mock_debug_log.call_args.args[0] self.assertIn("Image conversion details", log_msg) log_msg = mock_info_log.call_args.args[0] self.assertIn("Converted", log_msg) mock_info.reset_mock() mock_debug_log.reset_mock() mock_info_log.reset_mock() # 4. qemu_img_info passed in but without virtual_size src_img_info = imageutils.QemuImgInfo() image_utils._convert_image(prefix, source, dest, out_format, src_img_info=src_img_info) mock_info.assert_not_called() mock_debug_log.assert_not_called() log_msg = mock_info_log.call_args.args[0] self.assertIn("image size is unavailable", log_msg) mock_info_log.reset_mock() # 5. qemu_img_info passed in with virtual_size src_img_info = imageutils.QemuImgInfo('{"virtual-size": 1073741824}', format='json') image_utils._convert_image(prefix, source, dest, out_format, src_img_info=src_img_info) mock_info.assert_not_called() log_msg = mock_debug_log.call_args.args[0] self.assertIn("Image conversion details", log_msg) log_msg = mock_info_log.call_args.args[0] self.assertIn("Converted", log_msg) @ddt.ddt class TestResizeImage(test.TestCase): @mock.patch('cinder.utils.execute') @ddt.data(None, 'raw', 'qcow2') def test_defaults(self, file_format, mock_exec): source = mock.sentinel.source size = mock.sentinel.size output = image_utils.resize_image(source, size, file_format=file_format) self.assertIsNone(output) if file_format: mock_exec.assert_called_once_with( 'qemu-img', 'resize', '-f', file_format, source, 'sentinel.sizeG', run_as_root=False) else: mock_exec.assert_called_once_with('qemu-img', 'resize', source, 'sentinel.sizeG', run_as_root=False) @mock.patch('cinder.utils.execute') @ddt.data(None, 'raw', 'qcow2') def test_run_as_root(self, file_format, mock_exec): source = mock.sentinel.source size = mock.sentinel.size output = image_utils.resize_image(source, size, run_as_root=True, file_format=file_format) self.assertIsNone(output) if file_format: mock_exec.assert_called_once_with( 'qemu-img', 'resize', '-f', file_format, source, 'sentinel.sizeG', run_as_root=True) else: mock_exec.assert_called_once_with('qemu-img', 'resize', source, 'sentinel.sizeG', run_as_root=True) class TestFetch(test.TestCase): @mock.patch('eventlet.tpool.Proxy') @mock.patch('os.stat') @mock.patch('cinder.image.image_utils.fileutils') def test_defaults(self, mock_fileutils, mock_stat, mock_proxy): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id path = 'test_path' _user_id = mock.sentinel._user_id _project_id = mock.sentinel._project_id mock_open = mock.mock_open() mock_stat.return_value.st_size = 1048576 with mock.patch('cinder.image.image_utils.open', new=mock_open, create=True): output = image_utils.fetch(ctxt, image_service, image_id, path, _user_id, _project_id) self.assertIsNone(output) mock_proxy.assert_called_once_with(mock_open.return_value) image_service.download.assert_called_once_with(ctxt, image_id, mock_proxy.return_value) mock_open.assert_called_once_with(path, 'wb') mock_fileutils.remove_path_on_error.assert_called_once_with(path) (mock_fileutils.remove_path_on_error.return_value.__enter__ .assert_called_once_with()) (mock_fileutils.remove_path_on_error.return_value.__exit__ .assert_called_once_with(None, None, None)) def test_fetch_enospc(self): context = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id e = exception.ImageTooBig(image_id=image_id, reason = "fake") e.errno = errno.ENOSPC image_service.download.side_effect = e path = '/test_path' _user_id = mock.sentinel._user_id _project_id = mock.sentinel._project_id with mock.patch('cinder.image.image_utils.open', new=mock.mock_open(), create=True): self.assertRaises(exception.ImageTooBig, image_utils.fetch, context, image_service, image_id, path, _user_id, _project_id) def test_fetch_ioerror(self): context = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id e = IOError() e.errno = errno.ECONNRESET e.strerror = 'Some descriptive message' image_service.download.side_effect = e path = '/test_path' _user_id = mock.sentinel._user_id _project_id = mock.sentinel._project_id with mock.patch('cinder.image.image_utils.open', new=mock.mock_open(), create=True): self.assertRaisesRegex(exception.ImageDownloadFailed, e.strerror, image_utils.fetch, context, image_service, image_id, path, _user_id, _project_id) class MockVerifier(object): def update(self, data): return def verify(self): return True class BadVerifier(object): def update(self, data): return def verify(self): raise cryptography.exceptions.InvalidSignature( 'Invalid signature.' ) class TestVerifyImageSignature(test.TestCase): @mock.patch('cinder.image.image_utils.open', new_callable=mock.mock_open) @mock.patch('cursive.signature_utils.get_verifier') @mock.patch('oslo_utils.fileutils.remove_path_on_error') def test_image_signature_verify_failed(self, mock_remove, mock_get, mock_open): ctxt = mock.sentinel.context metadata = {'name': 'test image', 'is_public': False, 'protected': False, 'properties': {'img_signature_certificate_uuid': 'fake_uuid', 'img_signature_hash_method': 'SHA-256', 'img_signature': 'signature', 'img_signature_key_type': 'RSA-PSS'}} class FakeImageService(object): def show(self, context, image_id): return metadata self.flags(verify_glance_signatures='enabled') mock_get.return_value = BadVerifier() self.assertRaises(exception.ImageSignatureVerificationException, image_utils.verify_glance_image_signature, ctxt, FakeImageService(), 'fake_id', 'fake_path') mock_get.assert_called_once_with( context=ctxt, img_signature_certificate_uuid='fake_uuid', img_signature_hash_method='SHA-256', img_signature='signature', img_signature_key_type='RSA-PSS') @mock.patch('cursive.signature_utils.get_verifier') def test_image_signature_metadata_missing(self, mock_get): ctxt = mock.sentinel.context metadata = {'name': 'test image', 'is_public': False, 'protected': False, 'properties': {}} class FakeImageService(object): def show(self, context, image_id): return metadata self.flags(verify_glance_signatures='enabled') result = image_utils.verify_glance_image_signature( ctxt, FakeImageService(), 'fake_id', 'fake_path') self.assertFalse(result) mock_get.assert_not_called() @mock.patch('cursive.signature_utils.get_verifier') def test_image_signature_metadata_incomplete(self, mock_get): ctxt = mock.sentinel.context metadata = {'name': 'test image', 'is_public': False, 'protected': False, 'properties': {'img_signature_certificate_uuid': None, 'img_signature_hash_method': 'SHA-256', 'img_signature': 'signature', 'img_signature_key_type': 'RSA-PSS'}} class FakeImageService(object): def show(self, context, image_id): return metadata self.flags(verify_glance_signatures='enabled') self.assertRaises(exception.InvalidSignatureImage, image_utils.verify_glance_image_signature, ctxt, FakeImageService(), 'fake_id', 'fake_path') mock_get.assert_not_called() @mock.patch('cinder.image.image_utils.open', new_callable=mock.mock_open) @mock.patch('eventlet.tpool.execute') @mock.patch('cursive.signature_utils.get_verifier') @mock.patch('oslo_utils.fileutils.remove_path_on_error') def test_image_signature_verify_success(self, mock_remove, mock_get, mock_exec, mock_open): ctxt = mock.sentinel.context metadata = {'name': 'test image', 'is_public': False, 'protected': False, 'properties': {'img_signature_certificate_uuid': 'fake_uuid', 'img_signature_hash_method': 'SHA-256', 'img_signature': 'signature', 'img_signature_key_type': 'RSA-PSS'}} class FakeImageService(object): def show(self, context, image_id): return metadata self.flags(verify_glance_signatures='enabled') mock_get.return_value = MockVerifier() result = image_utils.verify_glance_image_signature( ctxt, FakeImageService(), 'fake_id', 'fake_path') self.assertTrue(result) mock_exec.assert_called_once_with( image_utils._verify_image, mock_open.return_value.__enter__.return_value, mock_get.return_value) mock_get.assert_called_once_with( context=ctxt, img_signature_certificate_uuid='fake_uuid', img_signature_hash_method='SHA-256', img_signature='signature', img_signature_key_type='RSA-PSS') class TestVerifyImage(test.TestCase): @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_defaults(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = None output = image_utils.fetch_verify_image(ctxt, image_service, image_id, dest) self.assertIsNone(output) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, dest, None, None) mock_info.assert_called_once_with(dest, run_as_root=True, force_share=False) mock_fileutils.remove_path_on_error.assert_called_once_with(dest) (mock_fileutils.remove_path_on_error.return_value.__enter__ .assert_called_once_with()) (mock_fileutils.remove_path_on_error.return_value.__exit__ .assert_called_once_with(None, None, None)) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_kwargs(self, mock_fetch, mock_fileutils, mock_info, mock_check_space, mock_check_size): ctxt = mock.sentinel.context image_service = FakeImageService() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = None mock_data.virtual_size = 1 output = image_utils.fetch_verify_image( ctxt, image_service, image_id, dest) self.assertIsNone(output) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, dest, None, None) mock_fileutils.remove_path_on_error.assert_called_once_with(dest) (mock_fileutils.remove_path_on_error.return_value.__enter__ .assert_called_once_with()) (mock_fileutils.remove_path_on_error.return_value.__exit__ .assert_called_once_with(None, None, None)) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_format_error(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_data = mock_info.return_value mock_data.file_format = None mock_data.backing_file = None self.assertRaises(exception.ImageUnacceptable, image_utils.fetch_verify_image, ctxt, image_service, image_id, dest) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_backing_file_error(self, mock_fetch, mock_fileutils, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_data = mock_info.return_value mock_data.file_format = 'test_format' mock_data.backing_file = 'test_backing_file' self.assertRaises(exception.ImageUnacceptable, image_utils.fetch_verify_image, ctxt, image_service, image_id, dest) class TestTemporaryDir(test.TestCase): @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('cinder.image.image_utils.utils.tempdir') def test_conv_dir_exists(self, mock_tempdir, mock_make): self.flags(image_conversion_dir='fake_conv_dir') output = image_utils.temporary_dir() self.assertTrue(mock_make.called) mock_tempdir.assert_called_once_with(dir='fake_conv_dir') self.assertEqual(output, mock_tempdir.return_value) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('cinder.image.image_utils.utils.tempdir') def test_create_conv_dir(self, mock_tempdir, mock_make): self.flags(image_conversion_dir='fake_conv_dir') output = image_utils.temporary_dir() mock_make.assert_called_once_with('fake_conv_dir') mock_tempdir.assert_called_once_with(dir='fake_conv_dir') self.assertEqual(output, mock_tempdir.return_value) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('cinder.image.image_utils.utils.tempdir') def test_no_conv_dir(self, mock_tempdir, mock_make): self.flags(image_conversion_dir=None) output = image_utils.temporary_dir() self.assertTrue(mock_make.called) mock_tempdir.assert_called_once_with(dir=None) self.assertEqual(output, mock_tempdir.return_value) @ddt.ddt class TestUploadVolume(test.TestCase): @ddt.data((mock.sentinel.disk_format, mock.sentinel.disk_format, True), (mock.sentinel.disk_format, mock.sentinel.disk_format, False), ('ploop', 'parallels', True), ('ploop', 'parallels', False)) @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.open', new_callable=mock.mock_open) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_diff_format(self, image_format, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_proxy): input_format, output_format, do_compress = image_format ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': input_format, 'container_format': mock.sentinel.container_format} volume_path = mock.sentinel.volume_path mock_os.name = 'posix' data = mock_info.return_value data.file_format = output_format data.backing_file = None temp_file = mock_temp.return_value.__enter__.return_value output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path, compress=do_compress) self.assertIsNone(output) mock_convert.assert_called_once_with(volume_path, temp_file, output_format, run_as_root=True, compress=do_compress, image_id=image_meta['id'], data=data) mock_info.assert_called_with(temp_file, run_as_root=True) self.assertEqual(2, mock_info.call_count) mock_open.assert_called_once_with(temp_file, 'rb') mock_proxy.assert_called_once_with( mock_open.return_value.__enter__.return_value) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value, store_id=None, base_image_ref=None) @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.open', new_callable=mock.mock_open) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_chown, mock_proxy): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw', 'container_format': mock.sentinel.container_format} volume_path = mock.sentinel.volume_path mock_os.name = 'posix' mock_os.access.return_value = False output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) self.assertFalse(mock_convert.called) self.assertFalse(mock_info.called) mock_chown.assert_called_once_with(volume_path) mock_open.assert_called_once_with(volume_path, 'rb') mock_proxy.assert_called_once_with( mock_open.return_value.__enter__.return_value) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value, store_id=None, base_image_ref=None) @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.open', new_callable=mock.mock_open) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format_fd(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_chown, mock_proxy): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw', 'container_format': mock.sentinel.container_format} mock_os.name = 'posix' mock_os.access.return_value = False output = image_utils.upload_volume(ctxt, image_service, image_meta, None, volume_fd=mock.sentinel.volume_fd) self.assertIsNone(output) self.assertFalse(mock_convert.called) self.assertFalse(mock_info.called) mock_chown.assert_not_called() mock_open.assert_not_called() mock_proxy.assert_called_once_with(mock.sentinel.volume_fd) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value, store_id=None, base_image_ref=None) @mock.patch('cinder.image.accelerator.ImageAccel._get_engine') @mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready', return_value = True) @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.open', new_callable=mock.mock_open) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format_compressed(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_chown, mock_proxy, mock_engine_ready, mock_get_engine): class fakeEngine(object): def __init__(self): pass def compress_img(self, src, dest, run_as_root): pass ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw', 'container_format': 'compressed'} self.flags(allow_compression_on_image_upload=True) volume_path = mock.sentinel.volume_path mock_os.name = 'posix' data = mock_info.return_value data.file_format = 'raw' data.backing_file = None temp_file = mock_temp.return_value.__enter__.return_value mock_engine = mock.Mock(spec=fakeEngine) mock_get_engine.return_value = mock_engine output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) mock_convert.assert_called_once_with(volume_path, temp_file, 'raw', compress=True, run_as_root=True, image_id=image_meta['id'], data=data) mock_info.assert_called_with(temp_file, run_as_root=True) self.assertEqual(2, mock_info.call_count) mock_open.assert_called_once_with(temp_file, 'rb') mock_proxy.assert_called_once_with( mock_open.return_value.__enter__.return_value) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value, store_id=None, base_image_ref=None) mock_engine.compress_img.assert_called() @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.open', new_callable=mock.mock_open) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format_on_nt(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_chown, mock_proxy): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw', 'container_format': 'bare'} volume_path = mock.sentinel.volume_path mock_os.name = 'nt' mock_os.access.return_value = False output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) self.assertFalse(mock_convert.called) self.assertFalse(mock_info.called) mock_open.assert_called_once_with(volume_path, 'rb') mock_proxy.assert_called_once_with( mock_open.return_value.__enter__.return_value) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value, store_id=None, base_image_ref=None) @mock.patch('cinder.image.accelerator.ImageAccel._get_engine') @mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready', return_value = True) @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.open', new_callable=mock.mock_open) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_same_format_on_nt_compressed(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_chown, mock_proxy, mock_engine_ready, mock_get_engine): class fakeEngine(object): def __init__(self): pass def compress_img(self, src, dest, run_as_root): pass ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw', 'container_format': 'compressed'} self.flags(allow_compression_on_image_upload=True) volume_path = mock.sentinel.volume_path mock_os.name = 'posix' data = mock_info.return_value data.file_format = 'raw' data.backing_file = None temp_file = mock_temp.return_value.__enter__.return_value mock_engine = mock.Mock(spec=fakeEngine) mock_get_engine.return_value = mock_engine output = image_utils.upload_volume(ctxt, image_service, image_meta, volume_path) self.assertIsNone(output) mock_convert.assert_called_once_with(volume_path, temp_file, 'raw', compress=True, run_as_root=True, image_id=image_meta['id'], data=data) mock_info.assert_called_with(temp_file, run_as_root=True) self.assertEqual(2, mock_info.call_count) mock_open.assert_called_once_with(temp_file, 'rb') mock_proxy.assert_called_once_with( mock_open.return_value.__enter__.return_value) image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value, store_id=None, base_image_ref=None) mock_engine.compress_img.assert_called() @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_convert_error(self, mock_os, mock_temp, mock_convert, mock_info): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': mock.sentinel.disk_format, 'container_format': mock.sentinel.container_format} volume_path = mock.sentinel.volume_path mock_os.name = 'posix' data = mock_info.return_value data.file_format = mock.sentinel.other_disk_format data.backing_file = None temp_file = mock_temp.return_value.__enter__.return_value self.assertRaises(exception.ImageUnacceptable, image_utils.upload_volume, ctxt, image_service, image_meta, volume_path) mock_convert.assert_called_once_with(volume_path, temp_file, mock.sentinel.disk_format, run_as_root=True, compress=True, image_id=image_meta['id'], data=data) mock_info.assert_called_with(temp_file, run_as_root=True) self.assertEqual(2, mock_info.call_count) self.assertFalse(image_service.update.called) @mock.patch('eventlet.tpool.Proxy') @mock.patch('cinder.image.image_utils.utils.temporary_chown') @mock.patch('cinder.image.image_utils.open', new_callable=mock.mock_open) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.temporary_file') @mock.patch('cinder.image.image_utils.os') def test_base_image_ref(self, mock_os, mock_temp, mock_convert, mock_info, mock_open, mock_chown, mock_proxy): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'id': 'test_id', 'disk_format': 'raw', 'container_format': mock.sentinel.container_format} volume_path = mock.sentinel.volume_path mock_os.name = 'posix' mock_os.access.return_value = False image_utils.upload_volume(ctxt, image_service, image_meta, volume_path, base_image_ref='xyz') mock_open.assert_called_once_with(volume_path, 'rb') image_service.update.assert_called_once_with( ctxt, image_meta['id'], {}, mock_proxy.return_value, store_id=None, base_image_ref='xyz') class TestFetchToVhd(test.TestCase): @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_defaults(self, mock_fetch_to): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize out_subformat = 'fake_subformat' output = image_utils.fetch_to_vhd(ctxt, image_service, image_id, dest, blocksize, volume_subformat=out_subformat) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'vpc', blocksize, volume_subformat=out_subformat, user_id=None, project_id=None, run_as_root=True, disable_sparse=False) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_kwargs(self, mock_fetch_to, mock_check_space): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id run_as_root = mock.sentinel.run_as_root out_subformat = 'fake_subformat' output = image_utils.fetch_to_vhd(ctxt, image_service, image_id, dest, blocksize, user_id=user_id, project_id=project_id, run_as_root=run_as_root, volume_subformat=out_subformat) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'vpc', blocksize, volume_subformat=out_subformat, user_id=user_id, project_id=project_id, run_as_root=run_as_root, disable_sparse=False) class TestFetchToRaw(test.TestCase): @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_defaults(self, mock_fetch_to): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize output = image_utils.fetch_to_raw(ctxt, image_service, image_id, dest, blocksize) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'raw', blocksize, user_id=None, project_id=None, size=None, run_as_root=True, disable_sparse=False) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.fetch_to_volume_format') def test_kwargs(self, mock_fetch_to, mock_check_space): ctxt = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id dest = mock.sentinel.dest blocksize = mock.sentinel.blocksize user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = mock.sentinel.size run_as_root = mock.sentinel.run_as_root output = image_utils.fetch_to_raw(ctxt, image_service, image_id, dest, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_fetch_to.assert_called_once_with(ctxt, image_service, image_id, dest, 'raw', blocksize, user_id=user_id, size=size, project_id=project_id, run_as_root=run_as_root, disable_sparse=False) class FakeImageService(object): def __init__(self, image_service=None, disk_format='raw'): self.temp_images = None self.disk_format = disk_format def show(self, context, image_id): return {'size': 2 * units.Gi, 'disk_format': self.disk_format, 'container_format': 'bare', 'status': 'active'} @ddt.ddt(testNameFormat=ddt.TestNameFormat.INDEX_ONLY) class TestFetchToVolumeFormat(test.TestCase): @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') def test_defaults(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_space): ctxt = mock.sentinel.context ctxt.user_id = mock.sentinel.user_id image_service = FakeImageService() image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize disk_format = 'raw' data = mock_info.return_value data.file_format = disk_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value output = image_utils.fetch_to_volume_format(ctxt, image_service, image_id, dest, volume_format, blocksize) self.assertIsNone(output) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=True), mock.call(tmp, run_as_root=True)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, None, None) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=True, src_format=disk_format, image_id=image_id, data=data, disable_sparse=False) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') def test_kwargs(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_space, mock_check_size): ctxt = mock.sentinel.context disk_format = 'ploop' qemu_img_format = image_utils.QEMU_IMG_FORMAT_MAP[disk_format] image_service = FakeImageService(disk_format=disk_format) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = qemu_img_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=run_as_root, src_format=qemu_img_format, image_id=image_id, data=data, disable_sparse=False) mock_check_size.assert_called_once_with(data.virtual_size, size, image_id) @ddt.data(('raw', 'qcow2', False), ('raw', 'raw', False), ('raw', 'raw', True)) def test_check_image_conversion(self, conversion_opts): image_disk_format, volume_format, image_conversion_disable = \ conversion_opts self.flags(image_conversion_disable=image_conversion_disable) self.assertIsNone(image_utils.check_image_conversion_disable( image_disk_format, volume_format, fake.IMAGE_ID)) @ddt.data((True, 'volume can only be uploaded in the format'), (False, 'must use an image with the disk_format property')) def test_check_image_conversion_disable(self, info): # NOTE: the error message is different depending on direction, # where True means upload direction, message_fragment = info self.flags(image_conversion_disable=True) exc = self.assertRaises(exception.ImageConversionNotAllowed, image_utils.check_image_conversion_disable, 'foo', 'bar', fake.IMAGE_ID, upload=direction) if direction: self.assertIn(message_fragment, str(exc)) else: self.assertIn(message_fragment, str(exc)) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=True) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') def test_convert_from_vhd(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_space, mock_check_size): ctxt = mock.sentinel.context image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root disk_format = 'vhd' data = mock_info.return_value data.file_format = image_utils.QEMU_IMG_FORMAT_MAP[disk_format] data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value image_service = FakeImageService(disk_format=disk_format) expect_format = 'vpc' output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) mock_repl_xen.assert_called_once_with(tmp) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=run_as_root, src_format=expect_format, image_id=image_id, data=data, disable_sparse=False) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') def test_convert_from_iso(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_copy, mock_convert, mock_check_space, mock_check_size): ctxt = mock.sentinel.context image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root disk_format = 'iso' data = mock_info.return_value data.file_format = image_utils.QEMU_IMG_FORMAT_MAP[disk_format] data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value image_service = FakeImageService(disk_format=disk_format) expect_format = 'raw' output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=run_as_root, src_format=expect_format, image_id=image_id, data=data, disable_sparse=False) @mock.patch('cinder.image.image_utils.check_available_space', new=mock.Mock()) @mock.patch('cinder.image.image_utils.is_xenserver_format', new=mock.Mock(return_value=False)) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') def test_temporary_images(self, mock_temp, mock_info, mock_fetch, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context ctxt.user_id = mock.sentinel.user_id disk_format = 'ploop' qemu_img_format = image_utils.QEMU_IMG_FORMAT_MAP[disk_format] image_service = FakeImageService(disk_format=disk_format) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize data = mock_info.return_value data.file_format = qemu_img_format data.backing_file = None data.virtual_size = 1234 tmp = mock.sentinel.tmp dummy = mock.sentinel.dummy mock_temp.return_value.__enter__.side_effect = [tmp, dummy] with image_utils.TemporaryImages.fetch(image_service, ctxt, image_id) as tmp_img: self.assertEqual(tmp_img, tmp) output = image_utils.fetch_to_volume_format(ctxt, image_service, image_id, dest, volume_format, blocksize, disable_sparse=False) self.assertIsNone(output) self.assertEqual(2, mock_temp.call_count) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=True), mock.call(dummy, force_share=False, run_as_root=True), mock.call(tmp, run_as_root=True)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, None, None) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=True, src_format=qemu_img_format, image_id=image_id, data=data, disable_sparse=False) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') def test_no_qemu_img_and_is_raw(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root tmp = mock_temp.return_value.__enter__.return_value image_service.show.return_value = {'disk_format': 'raw', 'size': 41126400} image_size_m = math.ceil(float(41126400) / units.Mi) output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_called_once_with(tmp, force_share=False, run_as_root=run_as_root) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) mock_copy.assert_called_once_with(tmp, dest, image_size_m, blocksize) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') def test_no_qemu_img_not_raw(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root tmp = mock_temp.return_value.__enter__.return_value image_service.show.return_value = {'disk_format': 'not_raw'} self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_called_once_with(tmp, force_share=False, run_as_root=run_as_root) self.assertFalse(mock_fetch.called) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') def test_size_error(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_size): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 1234 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = volume_format data.backing_file = None data.virtual_size = int(1234.5 * units.Gi) tmp = mock_temp.return_value.__enter__.return_value image_service.show.return_value = {'disk_format': 'raw'} mock_check_size.side_effect = exception.ImageUnacceptable( image_id='fake_image_id', reason='test') self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') def test_qemu_img_parse_error(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = None data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value image_service.show.return_value = {'disk_format': 'raw'} self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') def test_backing_file_error(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root image_service.show.return_value = {'disk_format': 'raw'} data = mock_info.return_value data.file_format = volume_format data.backing_file = mock.sentinel.backing_file data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value self.assertRaises( exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) image_service.show.assert_called_once_with(ctxt, image_id) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) self.assertFalse(mock_convert.called) @mock.patch('cinder.image.image_utils.check_virtual_size') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=True) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') def test_xenserver_to_vhd(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_space, mock_check_size): ctxt = mock.sentinel.context disk_format = 'vhd' qemu_img_format = image_utils.QEMU_IMG_FORMAT_MAP[disk_format] image_service = FakeImageService(disk_format=disk_format) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = mock.sentinel.blocksize ctxt.user_id = user_id = mock.sentinel.user_id project_id = mock.sentinel.project_id size = 4321 run_as_root = mock.sentinel.run_as_root data = mock_info.return_value data.file_format = qemu_img_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value output = image_utils.fetch_to_volume_format( ctxt, image_service, image_id, dest, volume_format, blocksize, user_id=user_id, project_id=project_id, size=size, run_as_root=run_as_root) self.assertIsNone(output) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=run_as_root), mock.call(tmp, run_as_root=run_as_root)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, user_id, project_id) mock_repl_xen.assert_called_once_with(tmp) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=None, run_as_root=run_as_root, src_format=qemu_img_format, image_id=image_id, data=data, disable_sparse=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') def test_no_qemu_img_fetch_verify_image(self, mock_temp, mock_info, mock_fetch): ctxt = mock.sentinel.context image_service = mock.Mock(temp_images=None) image_id = mock.sentinel.image_id dest = mock.sentinel.dest ctxt.user_id = mock.sentinel.user_id image_service.show.return_value = {'disk_format': 'raw', 'size': 41126400} image_utils.fetch_verify_image( ctxt, image_service, image_id, dest) image_service.show.assert_called_once_with(ctxt, image_id) mock_info.assert_called_once_with(dest, force_share=False, run_as_root=True) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, dest, None, None) @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') def test_get_qemu_data_returns_none(self, mock_temp, mock_info): image_id = mock.sentinel.image_id dest = mock.sentinel.dest run_as_root = mock.sentinel.run_as_root disk_format_raw = True has_meta = True output = image_utils.get_qemu_data(image_id, has_meta, disk_format_raw, dest, run_as_root=run_as_root) self.assertIsNone(output) @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') def test_get_qemu_data_with_image_meta_exception(self, mock_temp, mock_info): image_id = mock.sentinel.image_id dest = mock.sentinel.dest run_as_root = mock.sentinel.run_as_root disk_format_raw = False has_meta = True self.assertRaises( exception.ImageUnacceptable, image_utils.get_qemu_data, image_id, has_meta, disk_format_raw, dest, run_as_root=run_as_root) @mock.patch('cinder.image.image_utils.qemu_img_info', side_effect=processutils.ProcessExecutionError) @mock.patch('cinder.image.image_utils.temporary_file') def test_get_qemu_data_without_image_meta_except(self, mock_temp, mock_info): image_id = mock.sentinel.image_id dest = mock.sentinel.dest run_as_root = mock.sentinel.run_as_root disk_format_raw = False has_meta = False self.assertRaises( exception.ImageUnacceptable, image_utils.get_qemu_data, image_id, has_meta, disk_format_raw, dest, run_as_root=run_as_root) @mock.patch('cinder.image.accelerator.is_gzip_compressed', return_value = True) @mock.patch('cinder.image.accelerator.ImageAccel._get_engine') @mock.patch('cinder.image.accelerator.ImageAccel.is_engine_ready', return_value = True) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.volume_utils.copy_volume') @mock.patch( 'cinder.image.image_utils.replace_xenserver_image_with_coalesced_vhd') @mock.patch('cinder.image.image_utils.is_xenserver_format', return_value=False) @mock.patch('cinder.image.image_utils.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.temporary_file') # FIXME: what 'defaults' are we talking about here? By default # compression is not enabled! def test_defaults_compressed(self, mock_temp, mock_info, mock_fetch, mock_is_xen, mock_repl_xen, mock_copy, mock_convert, mock_check_space, mock_engine_ready, mock_get_engine, mock_gzip_compressed): class fakeEngine(object): def __init__(self): pass def decompress_img(self, src, dest, run_as_root): pass class FakeImageService(object): def __init__(self, image_service=None, disk_format='raw'): self.temp_images = None self.disk_format = disk_format def show(self, context, image_id): return {'size': 2 * units.Gi, 'disk_format': self.disk_format, 'container_format': 'compressed', 'status': 'active'} self.flags(allow_compression_on_image_upload=True) ctxt = mock.sentinel.context ctxt.user_id = mock.sentinel.user_id disk_format = 'ploop' qemu_img_format = image_utils.QEMU_IMG_FORMAT_MAP[disk_format] image_service = FakeImageService(disk_format=disk_format) image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format out_subformat = None blocksize = mock.sentinel.blocksize data = mock_info.return_value data.file_format = qemu_img_format data.backing_file = None data.virtual_size = 1234 tmp = mock_temp.return_value.__enter__.return_value mock_engine = mock.Mock(spec=fakeEngine) mock_get_engine.return_value = mock_engine output = image_utils.fetch_to_volume_format(ctxt, image_service, image_id, dest, volume_format, blocksize) self.assertIsNone(output) mock_temp.assert_called_once_with(prefix='image_download_%s_' % image_id) mock_info.assert_has_calls([ mock.call(tmp, force_share=False, run_as_root=True), mock.call(tmp, run_as_root=True)]) mock_fetch.assert_called_once_with(ctxt, image_service, image_id, tmp, None, None) self.assertFalse(mock_repl_xen.called) self.assertFalse(mock_copy.called) mock_convert.assert_called_once_with(tmp, dest, volume_format, out_subformat=out_subformat, run_as_root=True, src_format=qemu_img_format, image_id=image_id, data=data, disable_sparse=False) mock_engine.decompress_img.assert_called() class TestXenserverUtils(test.TestCase): def test_is_xenserver_format(self): image_meta1 = {'disk_format': 'vhd', 'container_format': 'ovf'} self.assertTrue(image_utils.is_xenserver_format(image_meta1)) image_meta2 = {'disk_format': 'test_disk_format', 'container_format': 'test_cont_format'} self.assertFalse(image_utils.is_xenserver_format(image_meta2)) @mock.patch('cinder.image.image_utils.utils.execute') def test_extract_targz(self, mock_exec): name = mock.sentinel.archive_name target = mock.sentinel.target output = image_utils.extract_targz(name, target) mock_exec.assert_called_once_with('tar', '-xzf', name, '-C', target) self.assertIsNone(output) class TestVhdUtils(test.TestCase): @mock.patch('cinder.image.image_utils.utils.execute') def test_set_vhd_parent(self, mock_exec): vhd_path = mock.sentinel.vhd_path parentpath = mock.sentinel.parentpath output = image_utils.set_vhd_parent(vhd_path, parentpath) mock_exec.assert_called_once_with('vhd-util', 'modify', '-n', vhd_path, '-p', parentpath) self.assertIsNone(output) @mock.patch('cinder.image.image_utils.set_vhd_parent') def test_fix_vhd_chain(self, mock_set_parent): vhd_chain = (mock.sentinel.first, mock.sentinel.second, mock.sentinel.third, mock.sentinel.fourth, mock.sentinel.fifth) output = image_utils.fix_vhd_chain(vhd_chain) self.assertIsNone(output) mock_set_parent.assert_has_calls([ mock.call(mock.sentinel.first, mock.sentinel.second), mock.call(mock.sentinel.second, mock.sentinel.third), mock.call(mock.sentinel.third, mock.sentinel.fourth), mock.call(mock.sentinel.fourth, mock.sentinel.fifth)]) @mock.patch('cinder.image.image_utils.utils.execute', return_value=(98765.43210, mock.sentinel.error)) def test_get_vhd_size(self, mock_exec): vhd_path = mock.sentinel.vhd_path output = image_utils.get_vhd_size(vhd_path) mock_exec.assert_called_once_with('vhd-util', 'query', '-n', vhd_path, '-v') self.assertEqual(98765, output) @mock.patch('cinder.image.image_utils.utils.execute') def test_resize_vhd(self, mock_exec): vhd_path = mock.sentinel.vhd_path size = 387549349 journal = mock.sentinel.journal output = image_utils.resize_vhd(vhd_path, size, journal) self.assertIsNone(output) mock_exec.assert_called_once_with('vhd-util', 'resize', '-n', vhd_path, '-s', str(size), '-j', journal) @mock.patch('cinder.image.image_utils.utils.execute') def test_coalesce_vhd(self, mock_exec): vhd_path = mock.sentinel.vhd_path output = image_utils.coalesce_vhd(vhd_path) self.assertIsNone(output) mock_exec.assert_called_once_with('vhd-util', 'coalesce', '-n', vhd_path) @mock.patch('cinder.image.image_utils.temporary_dir') @mock.patch('cinder.image.image_utils.coalesce_vhd') @mock.patch('cinder.image.image_utils.resize_vhd') @mock.patch('cinder.image.image_utils.get_vhd_size') @mock.patch('cinder.image.image_utils.utils.execute') def test_coalesce_chain(self, mock_exec, mock_size, mock_resize, mock_coal, mock_temp): vhd_chain = (mock.sentinel.first, mock.sentinel.second, mock.sentinel.third, mock.sentinel.fourth, mock.sentinel.fifth) # os.path.join does not work with MagicMock objects on Windows. mock_temp.return_value.__enter__.return_value = 'fake_temp_dir' output = image_utils.coalesce_chain(vhd_chain) self.assertEqual(mock.sentinel.fifth, output) mock_size.assert_has_calls([ mock.call(mock.sentinel.first), mock.call(mock.sentinel.second), mock.call(mock.sentinel.third), mock.call(mock.sentinel.fourth)]) mock_resize.assert_has_calls([ mock.call(mock.sentinel.second, mock_size.return_value, mock.ANY), mock.call(mock.sentinel.third, mock_size.return_value, mock.ANY), mock.call(mock.sentinel.fourth, mock_size.return_value, mock.ANY), mock.call(mock.sentinel.fifth, mock_size.return_value, mock.ANY)]) mock_coal.assert_has_calls([ mock.call(mock.sentinel.first), mock.call(mock.sentinel.second), mock.call(mock.sentinel.third), mock.call(mock.sentinel.fourth)]) @mock.patch('cinder.image.image_utils.os.path') def test_discover_vhd_chain(self, mock_path): directory = '/some/test/directory' mock_path.join.side_effect = lambda x, y: '/'.join((x, y)) mock_path.exists.side_effect = (True, True, True, False) output = image_utils.discover_vhd_chain(directory) expected_output = ['/some/test/directory/0.vhd', '/some/test/directory/1.vhd', '/some/test/directory/2.vhd'] self.assertEqual(expected_output, output) @mock.patch('cinder.image.image_utils.temporary_dir') @mock.patch('cinder.image.image_utils.os.rename') @mock.patch('cinder.image.image_utils.fileutils.delete_if_exists') @mock.patch('cinder.image.image_utils.coalesce_chain') @mock.patch('cinder.image.image_utils.fix_vhd_chain') @mock.patch('cinder.image.image_utils.discover_vhd_chain') @mock.patch('cinder.image.image_utils.extract_targz') def test_replace_xenserver_image_with_coalesced_vhd( self, mock_targz, mock_discover, mock_fix, mock_coal, mock_delete, mock_rename, mock_temp): image_file = mock.sentinel.image_file tmp = mock_temp.return_value.__enter__.return_value output = image_utils.replace_xenserver_image_with_coalesced_vhd( image_file) self.assertIsNone(output) mock_targz.assert_called_once_with(image_file, tmp) mock_discover.assert_called_once_with(tmp) mock_fix.assert_called_once_with(mock_discover.return_value) mock_coal.assert_called_once_with(mock_discover.return_value) mock_delete.assert_called_once_with(image_file) mock_rename.assert_called_once_with(mock_coal.return_value, image_file) class TestCreateTemporaryFile(test.TestCase): @mock.patch('cinder.image.image_utils.os.close') @mock.patch('cinder.image.image_utils.os.makedirs') @mock.patch('cinder.image.image_utils.tempfile.mkstemp') def test_create_temporary_file_no_dir(self, mock_mkstemp, mock_dirs, mock_close): self.flags(image_conversion_dir=None) fd = mock.sentinel.file_descriptor path = mock.sentinel.absolute_pathname mock_mkstemp.return_value = (fd, path) output = image_utils.create_temporary_file() self.assertEqual(path, output) mock_mkstemp.assert_called_once_with(dir=None) mock_close.assert_called_once_with(fd) @mock.patch('cinder.image.image_utils.os.close') @mock.patch('cinder.image.image_utils.os.makedirs') @mock.patch('cinder.image.image_utils.tempfile.mkstemp') def test_create_temporary_file_with_dir(self, mock_mkstemp, mock_dirs, mock_close): conv_dir = 'fake_conv_dir' self.flags(image_conversion_dir=conv_dir) fd = mock.sentinel.file_descriptor path = mock.sentinel.absolute_pathname mock_mkstemp.return_value = (fd, path) output = image_utils.create_temporary_file() self.assertEqual(path, output) self.assertTrue(mock_dirs.called) mock_mkstemp.assert_called_once_with(dir=conv_dir) mock_close.assert_called_once_with(fd) @mock.patch('cinder.image.image_utils.os.close') @mock.patch('cinder.image.image_utils.fileutils.ensure_tree') @mock.patch('cinder.image.image_utils.tempfile.mkstemp') def test_create_temporary_file_and_dir(self, mock_mkstemp, mock_dirs, mock_close): conv_dir = 'fake_conv_dir' self.flags(image_conversion_dir=conv_dir) fd = mock.sentinel.file_descriptor path = mock.sentinel.absolute_pathname mock_mkstemp.return_value = (fd, path) output = image_utils.create_temporary_file() self.assertEqual(path, output) mock_dirs.assert_called_once_with(conv_dir) mock_mkstemp.assert_called_once_with(dir=conv_dir) mock_close.assert_called_once_with(fd) @mock.patch('cinder.image.image_utils.os.remove') @mock.patch('cinder.image.image_utils.os.path.join') @mock.patch('cinder.image.image_utils.os.listdir') @mock.patch('cinder.image.image_utils.os.path.exists', return_value=True) def test_cleanup_temporary_file(self, mock_path, mock_listdir, mock_join, mock_remove): mock_listdir.return_value = ['tmphost@backend1', 'tmphost@backend2'] conv_dir = 'fake_conv_dir' self.flags(image_conversion_dir=conv_dir) mock_join.return_value = '/test/tmp/tmphost@backend1' image_utils.cleanup_temporary_file('host@backend1') mock_listdir.assert_called_once_with(conv_dir) mock_remove.assert_called_once_with('/test/tmp/tmphost@backend1') @mock.patch('cinder.image.image_utils.os.remove') @mock.patch('cinder.image.image_utils.os.listdir') @mock.patch('cinder.image.image_utils.os.path.exists', return_value=False) def test_cleanup_temporary_file_with_not_exist_path(self, mock_path, mock_listdir, mock_remove): conv_dir = 'fake_conv_dir' self.flags(image_conversion_dir=conv_dir) image_utils.cleanup_temporary_file('host@backend1') self.assertFalse(mock_listdir.called) self.assertFalse(mock_remove.called) @mock.patch('cinder.image.image_utils.os.remove') @mock.patch('cinder.image.image_utils.os.path.join') @mock.patch('cinder.image.image_utils.os.listdir') @mock.patch('cinder.image.image_utils.os.path.exists', return_value=True) def test_cleanup_temporary_file_with_exception(self, mock_path, mock_listdir, mock_join, mock_remove): mock_listdir.return_value = ['tmphost@backend1', 'tmphost@backend2'] conv_dir = 'fake_conv_dir' self.flags(image_conversion_dir=conv_dir) mock_join.return_value = '/test/tmp/tmphost@backend1' mock_remove.side_effect = OSError image_utils.cleanup_temporary_file('host@backend1') mock_listdir.assert_called_once_with(conv_dir) mock_remove.assert_called_once_with('/test/tmp/tmphost@backend1') class TestTemporaryFileContextManager(test.TestCase): @mock.patch('cinder.image.image_utils.create_temporary_file', return_value=mock.sentinel.temporary_file) @mock.patch('cinder.image.image_utils.fileutils.delete_if_exists') def test_temporary_file(self, mock_delete, mock_create): with image_utils.temporary_file() as tmp_file: self.assertEqual(mock.sentinel.temporary_file, tmp_file) self.assertFalse(mock_delete.called) mock_delete.assert_called_once_with(mock.sentinel.temporary_file) class TestImageUtils(test.TestCase): def test_get_virtual_size(self): image_id = fake.IMAGE_ID virtual_size = 1073741824 volume_size = 2 virt_size = image_utils.check_virtual_size(virtual_size, volume_size, image_id) self.assertEqual(1, virt_size) def test_get_bigger_virtual_size(self): image_id = fake.IMAGE_ID virtual_size = 3221225472 volume_size = 2 self.assertRaises(exception.ImageUnacceptable, image_utils.check_virtual_size, virtual_size, volume_size, image_id) def test_decode_cipher(self): expected = {'cipher_alg': 'aes-256', 'cipher_mode': 'xts', 'ivgen_alg': 'essiv'} result = image_utils.decode_cipher('aes-xts-essiv', 256) self.assertEqual(expected, result) def test_decode_cipher_invalid(self): self.assertRaises(exception.InvalidVolumeType, image_utils.decode_cipher, 'aes', 256) @ddt.ddt(testNameFormat=ddt.TestNameFormat.INDEX_ONLY) class TestQcow2ImageChecks(test.TestCase): def setUp(self): super(TestQcow2ImageChecks, self).setUp() # Test data from: # $ qemu-img create -f qcow2 fake.qcow2 1M # $ qemu-img info -f qcow2 fake.qcow2 --output=json qemu_img_info = ''' { "virtual-size": 1048576, "filename": "fake.qcow2", "cluster-size": 65536, "format": "qcow2", "actual-size": 200704, "format-specific": { "type": "qcow2", "data": { "compat": "1.1", "compression-type": "zlib", "lazy-refcounts": false, "refcount-bits": 16, "corrupt": false, "extended-l2": false } }, "dirty-flag": false }''' self.qdata = imageutils.QemuImgInfo(qemu_img_info, format='json') def test_check_qcow2_image_no_problem(self): image_utils.check_qcow2_image(fake.IMAGE_ID, self.qdata) def test_check_qcow2_image_with_datafile(self): self.qdata.format_specific['data']['data-file'] = '/not/good' e = self.assertRaises(exception.ImageUnacceptable, image_utils.check_qcow2_image, fake.IMAGE_ID, self.qdata) self.assertIn('not allowed to have a data file', str(e)) def test_check_qcow2_image_with_backing_file(self): # qcow2 backing file is done as a separate check because # cinder has legitimate uses for a qcow2 with backing file self.qdata.backing_file = '/this/is/ok' image_utils.check_qcow2_image(fake.IMAGE_ID, self.qdata) def test_check_qcow2_image_no_barf_bad_data(self): # should never happen, but you never know ... del self.qdata.format_specific['data'] e = self.assertRaises(exception.ImageUnacceptable, image_utils.check_qcow2_image, fake.IMAGE_ID, self.qdata) self.assertIn('Cannot determine format-specific', str(e)) self.qdata.format_specific = None e = self.assertRaises(exception.ImageUnacceptable, image_utils.check_qcow2_image, fake.IMAGE_ID, self.qdata) self.assertIn('Cannot determine format-specific', str(e)) @ddt.ddt(testNameFormat=ddt.TestNameFormat.INDEX_ONLY) class TestVmdkImageChecks(test.TestCase): def setUp(self): super(TestVmdkImageChecks, self).setUp() # Test data from: # $ qemu-img create -f vmdk fake.vmdk 1M -o subformat=monolithicSparse # $ qemu-img info -f vmdk --output=json fake.vmdk # # What qemu-img calls the "subformat" is called the "createType" in # vmware-speak and it's found at "/format-specific/data/create-type". qemu_img_info = ''' { "virtual-size": 1048576, "filename": "fake.vmdk", "cluster-size": 65536, "format": "vmdk", "actual-size": 12288, "format-specific": { "type": "vmdk", "data": { "cid": 1200165687, "parent-cid": 4294967295, "create-type": "monolithicSparse", "extents": [ { "virtual-size": 1048576, "filename": "fake.vmdk", "cluster-size": 65536, "format": "" } ] } }, "dirty-flag": false }''' self.qdata = imageutils.QemuImgInfo(qemu_img_info, format='json') self.qdata_data = self.qdata.format_specific['data'] # we will populate this in each test self.qdata_data["create-type"] = None @ddt.data('monolithicSparse', 'streamOptimized') def test_check_vmdk_image_default_config(self, subformat): # none of these should raise self.qdata_data["create-type"] = subformat image_utils.check_vmdk_image(fake.IMAGE_ID, self.qdata) @ddt.data('monolithicFlat', 'twoGbMaxExtentFlat') def test_check_vmdk_image_negative_default_config(self, subformat): self.qdata_data["create-type"] = subformat self.assertRaises(exception.ImageUnacceptable, image_utils.check_vmdk_image, fake.IMAGE_ID, self.qdata) def test_check_vmdk_image_handles_missing_info(self): expected = 'Unable to determine VMDK createType' # remove create-type del self.qdata_data['create-type'] iue = self.assertRaises(exception.ImageUnacceptable, image_utils.check_vmdk_image, fake.IMAGE_ID, self.qdata) self.assertIn(expected, str(iue)) # remove entire data section del self.qdata_data iue = self.assertRaises(exception.ImageUnacceptable, image_utils.check_vmdk_image, fake.IMAGE_ID, self.qdata) self.assertIn(expected, str(iue)) # oslo.utils.imageutils guarantees that format_specific is # defined, so let's see what happens when it's empty self.qdata.format_specific = None iue = self.assertRaises(exception.ImageUnacceptable, image_utils.check_vmdk_image, fake.IMAGE_ID, self.qdata) self.assertIn('no format-specific information is available', str(iue)) def test_check_vmdk_image_positive(self): allowed = 'twoGbMaxExtentFlat' self.flags(vmdk_allowed_types=['garbage', allowed]) self.qdata_data["create-type"] = allowed image_utils.check_vmdk_image(fake.IMAGE_ID, self.qdata) @ddt.data('monolithicSparse', 'streamOptimized') def test_check_vmdk_image_negative(self, subformat): allow_list = ['vmfs', 'filler'] self.assertNotIn(subformat, allow_list) self.flags(vmdk_allowed_types=allow_list) self.qdata_data["create-type"] = subformat self.assertRaises(exception.ImageUnacceptable, image_utils.check_vmdk_image, fake.IMAGE_ID, self.qdata) @ddt.data('monolithicSparse', 'streamOptimized', 'twoGbMaxExtentFlat') def test_check_vmdk_image_negative_empty_list(self, subformat): # anything should raise allow_list = [] self.flags(vmdk_allowed_types=allow_list) self.qdata_data["create-type"] = subformat self.assertRaises(exception.ImageUnacceptable, image_utils.check_vmdk_image, fake.IMAGE_ID, self.qdata) # OK, now that we know the function works properly, let's make sure # it's called in all the situations where Bug #1996188 indicates that # we need this check @mock.patch('cinder.image.image_utils.check_vmdk_image') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.fileutils') @mock.patch('cinder.image.image_utils.fetch') def test_vmdk_subformat_checked_fetch_verify_image( self, mock_fetch, mock_fileutils, mock_info, mock_check): ctxt = mock.sentinel.context image_service = mock.Mock() image_id = mock.sentinel.image_id dest = mock.sentinel.dest mock_info.return_value = self.qdata mock_check.side_effect = exception.ImageUnacceptable( image_id=image_id, reason='mock check') iue = self.assertRaises(exception.ImageUnacceptable, image_utils.fetch_verify_image, ctxt, image_service, image_id, dest) self.assertIn('mock check', str(iue)) mock_check.assert_called_with(image_id, self.qdata) @mock.patch('cinder.image.image_utils.check_vmdk_image') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.get_qemu_data') @mock.patch('cinder.image.image_utils.check_image_conversion_disable') def test_vmdk_subformat_checked_fetch_to_volume_format( self, mock_convert, mock_qdata, mock_info, mock_check): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'disk_format': 'vmdk'} image_service.show.return_value = image_meta image_id = mock.sentinel.image_id dest = mock.sentinel.dest volume_format = mock.sentinel.volume_format blocksize = 1024 self.flags(allow_compression_on_image_upload=False) mock_qdata.return_value = self.qdata mock_info.return_value = self.qdata mock_check.side_effect = exception.ImageUnacceptable( image_id=image_id, reason='mock check') iue = self.assertRaises(exception.ImageUnacceptable, image_utils.fetch_to_volume_format, ctxt, image_service, image_id, dest, volume_format, blocksize) self.assertIn('mock check', str(iue)) mock_check.assert_called_with(image_id, self.qdata) @mock.patch('cinder.image.image_utils.check_vmdk_image') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.check_image_conversion_disable') def test_vmdk_subformat_checked_upload_volume( self, mock_convert, mock_info, mock_check): ctxt = mock.sentinel.context image_service = mock.Mock() image_meta = {'disk_format': 'vmdk'} image_id = mock.sentinel.image_id image_meta['id'] = image_id self.flags(allow_compression_on_image_upload=False) mock_info.return_value = self.qdata mock_check.side_effect = exception.ImageUnacceptable( image_id=image_id, reason='mock check') iue = self.assertRaises(exception.ImageUnacceptable, image_utils.upload_volume, ctxt, image_service, image_meta, volume_path=mock.sentinel.volume_path, volume_format=mock.sentinel.volume_format) self.assertIn('mock check', str(iue)) mock_check.assert_called_with(image_id, self.qdata) @mock.patch('cinder.image.image_utils.check_vmdk_image') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_vmdk_checked_convert_image_no_src_format( self, mock_info, mock_check): source = mock.sentinel.source dest = mock.sentinel.dest out_format = mock.sentinel.out_format mock_info.return_value = self.qdata image_id = 'internal image' mock_check.side_effect = exception.ImageUnacceptable( image_id=image_id, reason='mock check') self.assertRaises(exception.ImageUnacceptable, image_utils.convert_image, source, dest, out_format) mock_check.assert_called_with(image_id, self.qdata) @ddt.ddt(testNameFormat=ddt.TestNameFormat.INDEX_ONLY) class TestImageFormatCheck(test.TestCase): def setUp(self): super(TestImageFormatCheck, self).setUp() qemu_img_info = ''' { "virtual-size": 1048576, "filename": "whatever.img", "cluster-size": 65536, "format": "qcow2", "actual-size": 200704, "format-specific": { "type": "qcow2", "data": { "compat": "1.1", "compression-type": "zlib", "lazy-refcounts": false, "refcount-bits": 16, "corrupt": false, "extended-l2": false } }, "dirty-flag": false }''' self.qdata = imageutils.QemuImgInfo(qemu_img_info, format='json') @mock.patch('cinder.image.image_utils.check_qcow2_image') @mock.patch('cinder.image.image_utils.check_vmdk_image') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_check_image_format_defaults(self, mock_info, mock_vmdk, mock_qcow2): """Doesn't blow up when only the mandatory arg is passed.""" src = mock.sentinel.src mock_info.return_value = self.qdata expected_image_id = 'internal image' # empty file_format should raise self.qdata.file_format = None iue = self.assertRaises(exception.ImageUnacceptable, image_utils.check_image_format, src) self.assertIn(expected_image_id, str(iue)) mock_info.assert_called_with(src, run_as_root=True) # a VMDK should trigger an additional check mock_info.reset_mock() self.qdata.file_format = 'vmdk' image_utils.check_image_format(src) mock_vmdk.assert_called_with(expected_image_id, self.qdata) # Bug #2059809: a qcow2 should trigger an additional check mock_info.reset_mock() self.qdata.file_format = 'qcow2' image_utils.check_image_format(src) mock_qcow2.assert_called_with(expected_image_id, self.qdata) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_check_image_format_uses_passed_data(self, mock_info): src = mock.sentinel.src image_utils.check_image_format(src, data=self.qdata) mock_info.assert_not_called() @mock.patch('cinder.image.image_utils.qemu_img_info') def test_check_image_format_mismatch(self, mock_info): src = mock.sentinel.src mock_info.return_value = self.qdata self.qdata.file_format = 'fake_format' src_format = 'qcow2' iue = self.assertRaises(exception.ImageUnacceptable, image_utils.check_image_format, src, src_format=src_format) self.assertIn(src_format, str(iue)) self.assertIn('different format', str(iue)) @ddt.data('AMI', 'ami') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_check_image_format_AMI(self, ami, mock_info): """Mismatch OK in this case, see change Icde4c0f936ce.""" src = mock.sentinel.src mock_info.return_value = self.qdata self.qdata.file_format = 'raw' src_format = ami image_utils.check_image_format(src, src_format=src_format) @mock.patch('cinder.image.image_utils._convert_image') @mock.patch('cinder.image.image_utils.check_image_format') def test_check_image_format_called_by_convert_image( self, mock_check, mock__convert): """Make sure the function we've been testing is actually called.""" src = mock.sentinel.src dest = mock.sentinel.dest out_fmt = mock.sentinel.out_fmt image_utils.convert_image(src, dest, out_fmt) mock_check.assert_called_once_with(src, None, None, None, True) @ddt.ddt class TestFilterReservedNamespaces(test.TestCase): def setUp(self): super(TestFilterReservedNamespaces, self).setUp() self.mock_object(image_utils, 'LOG', side_effect=image_utils.LOG) def test_filter_out_reserved_namespaces_metadata_with_empty_metadata(self): metadata_for_test = None method_return = image_utils.filter_out_reserved_namespaces_metadata( metadata_for_test) self.assertEqual({}, method_return) image_utils.LOG.debug.assert_has_calls( [mock.call("No metadata to be filtered.")] ) @ddt.data( # remove default keys ({"some_key": 13, "other_key": "test", "os_glance_key": "this should be removed", "os_glance_key2": "this should also be removed"}, None, []), # remove nothing ({"some_key": 13, "other_key": "test"}, None, []), # custom config empty ({"some_key": 13, "other_key": "test", "os_glance_key": "this should be removed", "os_glance_key2": "this should also be removed"}, [], []), # custom config ({"some_key": 13, "other_key": "test", "os_glance_key": "this should be removed", "os_glance_key2": "this should also be removed", "custom_key": "this should be removed", "another_custom_key": "this should also be removed"}, ['custom_key', 'another_custom_key'], ['custom_key', 'another_custom_key'])) @ddt.unpack def test_filter_out_reserved_namespaces_metadata( self, metadata_for_test, config, keys_to_pop): hardcoded_keys = image_utils.GLANCE_RESERVED_NAMESPACES keys_to_pop = hardcoded_keys + keys_to_pop if config: self.override_config('reserved_image_namespaces', config) expected_result = {"some_key": 13, "other_key": "test"} method_return = image_utils.filter_out_reserved_namespaces_metadata( metadata_for_test) self.assertEqual(expected_result, method_return) image_utils.LOG.debug.assert_has_calls([ mock.call("The metadata set [%s] was filtered using the reserved " "name spaces [%s], and the result is [%s].", metadata_for_test, keys_to_pop, expected_result) ]) @ddt.data( # remove default keys ({"some_key": 13, "other_key": "test", "os_glance_key": "this should be removed", "os_glance_key2": "this should also be removed", "properties": {"os_glance_key3": "this should be removed", "os_glance_key4": "this should also be removed", "another_key": "foobar"} }, None, []), # remove nothing ({"some_key": 13, "other_key": "test", "properties": {"another_key": "foobar"}}, None, []), # custom config empty ({"some_key": 13, "other_key": "test", "os_glance_key": "this should be removed", "os_glance_key2": "this should also be removed", "properties": {"os_glance_key3": "this should be removed", "os_glance_key4": "this should also be removed", "another_key": "foobar"} }, [], []), # custom config ({"some_key": 13, "other_key": "test", "os_glance_key": "this should be removed", "os_glance_key2": "this should also be removed", "properties": {"os_glance_key3": "this should be removed", "os_glance_key4": "this should also be removed", "custom_key": "this should be removed", "another_custom_key": "this should also be removed", "another_key": "foobar"}, }, ['custom_key', 'another_custom_key'], ['custom_key', 'another_custom_key'])) @ddt.unpack def test_filter_out_reserved_namespaces_metadata_properties( self, metadata_for_test, config, keys_to_pop): hardcoded_keys = image_utils.GLANCE_RESERVED_NAMESPACES keys_to_pop = hardcoded_keys + keys_to_pop if config: self.override_config('reserved_image_namespaces', config) expected_result = { "some_key": 13, "other_key": "test", "properties": { "another_key": "foobar" } } method_return = image_utils.filter_out_reserved_namespaces_metadata( metadata_for_test) self.assertEqual(expected_result, method_return) image_utils.LOG.debug.assert_has_calls([ mock.call("The metadata set [%s] was filtered using the reserved " "name spaces [%s], and the result is [%s].", metadata_for_test, keys_to_pop, expected_result) ]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_interface.py0000664000175000017500000000453200000000000022031 0ustar00zuulzuul00000000000000# Copyright (c) 2019, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.interface import util from cinder.tests.unit import test class GetDriversTestCase(test.TestCase): def test_get_volume_drivers(self): # Just ensure that it doesn't raise an exception drivers = util.get_volume_drivers() self.assertNotEqual(0, len(drivers)) for driver in drivers: self.assertIsInstance(driver, util.DriverInfo) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.get_driver_options') def test_get_volume_drivers_fail(self, driver_opt): driver_opt.side_effect = ValueError self.assertRaises(ValueError, util.get_volume_drivers) def test_get_backup_drivers(self): # Just ensure that it doesn't raise an exception drivers = util.get_backup_drivers() self.assertNotEqual(0, len(drivers)) for driver in drivers: self.assertIsInstance(driver, util.DriverInfo) @mock.patch('cinder.backup.drivers.ceph.CephBackupDriver.' 'get_driver_options') def test_get_backup_drivers_fail(self, driver_opt): driver_opt.side_effect = ValueError self.assertRaises(ValueError, util.get_backup_drivers) def test_get_fczm_drivers(self): # Just ensure that it doesn't raise an exception drivers = util.get_fczm_drivers() self.assertNotEqual(0, len(drivers)) for driver in drivers: self.assertIsInstance(driver, util.DriverInfo) @mock.patch('cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver.' 'CiscoFCZoneDriver.get_driver_options') def test_get_fczm_drivers_fail(self, driver_opt): driver_opt.side_effect = ValueError self.assertRaises(ValueError, util.get_fczm_drivers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_manager.py0000664000175000017500000000430400000000000021500 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import manager from cinder import objects from cinder.tests.unit import test class FakeManager(manager.CleanableManager): def __init__(self, service_id=None, keep_after_clean=False): if service_id: self.service_id = service_id self.keep_after_clean = keep_after_clean def _do_cleanup(self, ctxt, vo_resource): vo_resource.status += '_cleaned' vo_resource.save() return self.keep_after_clean class TestManager(test.TestCase): @mock.patch('cinder.utils.set_log_levels') def test_set_log_levels(self, set_log_mock): service = manager.Manager() log_request = objects.LogLevel(prefix='sqlalchemy.', level='debug') service.set_log_levels(mock.sentinel.context, log_request) set_log_mock.assert_called_once_with(log_request.prefix, log_request.level) @mock.patch('cinder.utils.get_log_levels') def test_get_log_levels(self, get_log_mock): get_log_mock.return_value = {'cinder': 'DEBUG', 'cinder.api': 'ERROR'} service = manager.Manager() log_request = objects.LogLevel(prefix='sqlalchemy.') result = service.get_log_levels(mock.sentinel.context, log_request) get_log_mock.assert_called_once_with(log_request.prefix) expected = (objects.LogLevel(prefix='cinder', level='DEBUG'), objects.LogLevel(prefix='cinder.api', level='ERROR')) self.assertEqual(set(str(r) for r in result.objects), set(str(e) for e in expected)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_paginate_query.py0000664000175000017500000000361700000000000023111 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.common import sqlalchemyutils from cinder import context from cinder.db.sqlalchemy import api as db_api from cinder.db.sqlalchemy import models from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class TestPaginateQuery(test.TestCase): def setUp(self): super(TestPaginateQuery, self).setUp() self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, auth_token=True, is_admin=True) with db_api.main_context_manager.reader.using(self.ctxt): self.query = db_api._volume_get_query(self.ctxt) self.model = models.Volume def test_paginate_query_marker_null(self): marker_object = self.model() self.assertIsNone(marker_object.display_name) self.assertIsNone(marker_object.updated_at) marker_object.size = 1 # There is no error raised here. sqlalchemyutils.paginate_query(self.query, self.model, 10, sort_keys=['display_name', 'updated_at', 'size'], marker=marker_object, sort_dirs=['desc', 'asc', 'desc']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_policy.py0000664000175000017500000001734500000000000021376 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path from unittest import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from oslo_policy import policy as oslo_policy from cinder import context from cinder import exception from cinder import policy from cinder.tests.unit import test from cinder import utils CONF = cfg.CONF class PolicyFileTestCase(test.TestCase): def setUp(self): super(PolicyFileTestCase, self).setUp() self.context = context.get_admin_context() self.target = {} self.fixture = self.useFixture(config_fixture.Config(CONF)) self.addCleanup(policy.reset) def test_modified_policy_reloads(self): with utils.tempdir() as tmpdir: tmpfilename = os.path.join(tmpdir, 'policy') self.fixture.config(policy_file=tmpfilename, group='oslo_policy') policy.reset() policy.init() rule = oslo_policy.RuleDefault('example:test', "") policy._ENFORCER.register_defaults([rule]) action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write('{"example:test": ""}') policy.authorize(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write('{"example:test": "!"}') policy._ENFORCER.load_rules(True) self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, self.context, action, self.target) class PolicyTestCase(test.TestCase): def setUp(self): super(PolicyTestCase, self).setUp() rules = [ oslo_policy.RuleDefault("true", '@'), oslo_policy.RuleDefault("test:allowed", '@'), oslo_policy.RuleDefault("test:denied", "!"), oslo_policy.RuleDefault("test:my_file", "role:compute_admin or " "project_id:%(project_id)s"), oslo_policy.RuleDefault("test:early_and_fail", "! and @"), oslo_policy.RuleDefault("test:early_or_success", "@ or !"), oslo_policy.RuleDefault("test:lowercase_admin", "role:admin"), oslo_policy.RuleDefault("test:uppercase_admin", "role:ADMIN"), oslo_policy.RuleDefault("old_action_not_default", "@"), oslo_policy.RuleDefault("new_action", "@"), oslo_policy.RuleDefault("old_action_default", "rule:admin_api"), ] policy.reset() policy.init() # before a policy rule can be used, its default has to be registered. policy._ENFORCER.register_defaults(rules) self.context = context.RequestContext('fake', 'fake', roles=['member']) self.target = {} self.addCleanup(policy.reset) def test_authorize_nonexistent_action_throws(self): action = "test:noexist" self.assertRaises(oslo_policy.PolicyNotRegistered, policy.authorize, self.context, action, self.target) def test_authorize_bad_action_throws(self): action = "test:denied" self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, self.context, action, self.target) def test_authorize_bad_action_noraise(self): action = "test:denied" result = policy.authorize(self.context, action, self.target, False) self.assertFalse(result) def test_authorize_good_action(self): action = "test:allowed" result = policy.authorize(self.context, action, self.target) self.assertTrue(result) def test_templatized_authorization(self): target_mine = {'project_id': 'fake'} target_not_mine = {'project_id': 'another'} action = "test:my_file" policy.authorize(self.context, action, target_mine) self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, self.context, action, target_not_mine) def test_early_AND_authorization(self): action = "test:early_and_fail" self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, self.context, action, self.target) def test_early_OR_authorization(self): action = "test:early_or_success" policy.authorize(self.context, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "test:lowercase_admin" uppercase_action = "test:uppercase_admin" admin_context = context.RequestContext('admin', 'fake', roles=['AdMiN']) policy.authorize(admin_context, lowercase_action, self.target) policy.authorize(admin_context, uppercase_action, self.target) def test_enforce_properly_handles_invalid_scope_exception(self): self.fixture.config(enforce_scope=True, group='oslo_policy') project_context = context.RequestContext(project_id='fake-project-id', roles=['bar']) policy.reset() policy.init() rule = oslo_policy.RuleDefault('foo', 'role:bar', scope_types=['system']) policy._ENFORCER.register_defaults([rule]) self.assertRaises(exception.PolicyNotAuthorized, policy.enforce, project_context, 'foo', {}) def test_enforce_does_not_raise_forbidden(self): self.fixture.config(enforce_scope=False, group='oslo_policy') project_context = context.RequestContext(project_id='fake-project-id', roles=['bar']) policy.reset() policy.init() rule = oslo_policy.RuleDefault('foo', 'role:bar', scope_types=['system']) policy._ENFORCER.register_defaults([rule]) self.assertTrue(policy.enforce(project_context, 'foo', {})) def test_enforce_passes_context_objects_to_enforcement(self): fake_context = context.RequestContext(roles=['foo']) action = 'foo' target = {} with mock.patch.object(policy._ENFORCER, 'enforce') as fake_enforce: policy.enforce(fake_context, action, target) fake_enforce.assert_called_once_with( action, target, fake_context, do_raise=True, exc=exception.PolicyNotAuthorized, action=action) def test_authorize_passes_context_objects_to_enforcement(self): fake_context = context.RequestContext(project_id='fake-project-id', user_id='fake-user-id', roles=['foo']) action = 'foo' target = {'project_id': 'fake-project-id', 'user_id': 'fake-user-id'} with mock.patch.object(policy._ENFORCER, 'authorize') as fake_authz: fake_context.authorize('foo') fake_authz.assert_called_once_with( action, target, fake_context, do_raise=True, exc=exception.PolicyNotAuthorized, action=action) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_qos_specs.py0000664000175000017500000004003200000000000022063 0ustar00zuulzuul00000000000000 # Copyright (c) 2013 eBay Inc. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for qos specs internal API.""" import time from unittest import mock from oslo_db import exception as db_exc from oslo_utils import timeutils from cinder import context from cinder import db from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume import qos_specs from cinder.volume import volume_types def fake_db_qos_specs_create(context, values): if values['name'] == 'DupQoSName': raise exception.QoSSpecsExists(specs_id=values['name']) elif values['name'] == 'FailQoSName': raise db_exc.DBError() pass def fake_db_get_vol_type(vol_type_number=1): return {'name': 'type-' + str(vol_type_number), 'id': fake.QOS_SPEC_ID, 'updated_at': None, 'created_at': None, 'deleted_at': None, 'description': 'desc', 'deleted': False, 'is_public': True, 'projects': [], 'qos_specs_id': fake.QOS_SPEC_ID, 'extra_specs': None} class QoSSpecsTestCase(test.TestCase): """Test cases for qos specs code.""" def setUp(self): super(QoSSpecsTestCase, self).setUp() self.ctxt = context.get_admin_context() def _create_qos_specs(self, name, consumer='back-end', values=None): """Create a transfer object.""" if values is None: values = {'key1': 'value1', 'key2': 'value2'} specs = {'name': name, 'consumer': consumer, 'specs': values} return db.qos_specs_create(self.ctxt, specs)['id'] def test_create(self): input = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} ref = qos_specs.create(self.ctxt, 'FakeName', input) specs_obj = qos_specs.get_qos_specs(self.ctxt, ref['id']) specs_obj_dic = {'consumer': specs_obj['consumer'], 'id': specs_obj['id'], 'name': specs_obj['name'], 'specs': specs_obj['specs']} expected = {'consumer': 'back-end', 'id': ref['id'], 'name': 'FakeName', 'specs': input} self.assertDictEqual(expected, specs_obj_dic) # qos specs must have unique name self.assertRaises(exception.QoSSpecsExists, qos_specs.create, self.ctxt, 'FakeName', input) # consumer must be one of: front-end, back-end, both input['consumer'] = 'fake' self.assertRaises(exception.InvalidQoSSpecs, qos_specs.create, self.ctxt, 'QoSName', input) del input['consumer'] self.mock_object(db, 'qos_specs_create', fake_db_qos_specs_create) # able to catch DBError self.assertRaises(exception.QoSSpecsCreateFailed, qos_specs.create, self.ctxt, 'FailQoSName', input) def test_update(self): def fake_db_update(context, specs_id, values): raise db_exc.DBError() qos = {'consumer': 'back-end', 'specs': {'key1': 'value1'}} # qos specs must exists self.assertRaises(exception.QoSSpecsNotFound, qos_specs.update, self.ctxt, 'fake_id', qos['specs']) specs_id = self._create_qos_specs('Name', qos['consumer'], qos['specs']) qos_specs.update(self.ctxt, specs_id, {'key1': 'newvalue1', 'key2': 'value2'}) specs = qos_specs.get_qos_specs(self.ctxt, specs_id) self.assertEqual('newvalue1', specs['specs']['key1']) self.assertEqual('value2', specs['specs']['key2']) # consumer must be one of: front-end, back-end, both self.assertRaises(exception.InvalidQoSSpecs, qos_specs.update, self.ctxt, specs_id, {'consumer': 'not-real'}) self.mock_object(db, 'qos_specs_update', fake_db_update) self.assertRaises(exception.QoSSpecsUpdateFailed, qos_specs.update, self.ctxt, specs_id, {'key': 'new_key'}) def test_delete(self): qos_id = self._create_qos_specs('my_qos') def fake_db_associations_get(context, id): vol_types = [] if id == qos_id: vol_types = [fake_db_get_vol_type(id)] return vol_types def fake_db_delete(context, id): return {'deleted': True, 'deleted_at': timeutils.utcnow()} def fake_disassociate_all(context, id): pass self.mock_object(db, 'qos_specs_associations_get', fake_db_associations_get) self.mock_object(qos_specs, 'disassociate_all', fake_disassociate_all) self.mock_object(db, 'qos_specs_delete', fake_db_delete) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.delete, self.ctxt, None) self.assertRaises(exception.QoSSpecsNotFound, qos_specs.delete, self.ctxt, 'NotFound') self.assertRaises(exception.QoSSpecsInUse, qos_specs.delete, self.ctxt, qos_id) # able to delete in-use qos specs if force=True qos_specs.delete(self.ctxt, qos_id, force=True) # Can delete without forcing when no volume types qos_id_with_no_vol_types = self._create_qos_specs('no_vol_types') qos_specs.delete(self.ctxt, qos_id_with_no_vol_types, force=False) def test_delete_keys(self): def fake_db_qos_delete_key(context, id, key): if key == 'NotFound': raise exception.QoSSpecsKeyNotFound(specs_id=id, specs_key=key) else: pass value = {'foo': 'Foo', 'bar': 'Bar', 'zoo': 'tiger'} name = 'QoSName' consumer = 'front-end' specs_id = self._create_qos_specs(name, consumer, value) qos_specs.delete_keys(self.ctxt, specs_id, ['foo', 'bar']) del value['foo'] del value['bar'] expected = {'name': name, 'id': specs_id, 'consumer': consumer, 'specs': value} specs = qos_specs.get_qos_specs(self.ctxt, specs_id) specs_dic = {'consumer': specs['consumer'], 'id': specs['id'], 'name': specs['name'], 'specs': specs['specs']} self.assertDictEqual(expected, specs_dic) self.mock_object(db, 'qos_specs_item_delete', fake_db_qos_delete_key) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.delete_keys, self.ctxt, None, []) self.assertRaises(exception.QoSSpecsNotFound, qos_specs.delete_keys, self.ctxt, 'NotFound', []) self.assertRaises(exception.QoSSpecsKeyNotFound, qos_specs.delete_keys, self.ctxt, specs_id, ['NotFound']) self.assertRaises(exception.QoSSpecsKeyNotFound, qos_specs.delete_keys, self.ctxt, specs_id, ['foo', 'bar', 'NotFound']) @mock.patch.object(db, 'qos_specs_associations_get') def test_get_associations(self, mock_qos_specs_associations_get): vol_types = [fake_db_get_vol_type(x) for x in range(2)] mock_qos_specs_associations_get.return_value = vol_types specs_id = self._create_qos_specs('new_spec') res = qos_specs.get_associations(self.ctxt, specs_id) for vol_type in vol_types: expected_type = { 'association_type': 'volume_type', 'id': vol_type['id'], 'name': vol_type['name'] } self.assertIn(expected_type, res) e = exception.QoSSpecsNotFound(specs_id='Trouble') mock_qos_specs_associations_get.side_effect = e self.assertRaises(exception.CinderException, qos_specs.get_associations, self.ctxt, 'Trouble') def test_associate_qos_with_type(self): def fake_qos_specs_get(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) else: pass def fake_db_associate(context, id, type_id): if id == 'Trouble': raise db_exc.DBError() elif type_id == 'NotFound': raise exception.VolumeTypeNotFound(volume_type_id=type_id) pass def fake_vol_type_qos_get(type_id): if type_id == 'Invalid': return {'qos_specs': {'id': 'Invalid'}} else: return {'qos_specs': None} type_ref = volume_types.create(self.ctxt, 'TypeName') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(1, len(res)) self.assertEqual('TypeName', res[0]['name']) self.assertEqual(type_ref['id'], res[0]['id']) self.mock_object(db, 'qos_specs_associate', fake_db_associate) self.mock_object(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.mock_object(volume_types, 'get_volume_type_qos_specs', fake_vol_type_qos_get) self.assertRaises(exception.VolumeTypeNotFound, qos_specs.associate_qos_with_type, self.ctxt, 'specs-id', 'NotFound') self.assertRaises(exception.QoSSpecsAssociateFailed, qos_specs.associate_qos_with_type, self.ctxt, 'Trouble', 'id') self.assertRaises(exception.QoSSpecsNotFound, qos_specs.associate_qos_with_type, self.ctxt, 'NotFound', 'id') self.assertRaises(exception.InvalidVolumeType, qos_specs.associate_qos_with_type, self.ctxt, 'specs-id', 'Invalid') def test_disassociate_qos_specs(self): def fake_db_disassociate(context, id, type_id): raise db_exc.DBError() type_ref = volume_types.create(self.ctxt, 'TypeName') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(1, len(res)) qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(0, len(res)) self.assertRaises(exception.VolumeTypeNotFound, qos_specs.disassociate_qos_specs, self.ctxt, specs_id, 'NotFound') # Verify we can disassociate specs from volume_type even if they are # not associated with no error qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) self.mock_object(db, 'qos_specs_disassociate', fake_db_disassociate) self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_qos_specs, self.ctxt, specs_id, type_ref['id']) def test_disassociate_all(self): def fake_db_disassociate_all(context, id): if id == 'Trouble': raise db_exc.DBError() pass def fake_qos_specs_get(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) else: pass type1_ref = volume_types.create(self.ctxt, 'TypeName1') type2_ref = volume_types.create(self.ctxt, 'TypeName2') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type1_ref['id']) qos_specs.associate_qos_with_type(self.ctxt, specs_id, type2_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(2, len(res)) qos_specs.disassociate_all(self.ctxt, specs_id) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(0, len(res)) self.mock_object(db, 'qos_specs_disassociate_all', fake_db_disassociate_all) self.mock_object(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_all, self.ctxt, 'Trouble') def test_get_all_specs(self): qos_specs_list = [{'name': 'Specs1', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'consumer': 'both', 'specs': {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}}, {'name': 'Specs2', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'consumer': 'both', 'specs': {'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'key4': 'value4'}}] for index, qos_specs_dict in enumerate(qos_specs_list): qos_specs_id = self._create_qos_specs( qos_specs_dict['name'], qos_specs_dict['consumer'], qos_specs_dict['specs']) qos_specs_dict['id'] = qos_specs_id specs = db.qos_specs_get(self.ctxt, qos_specs_id) qos_specs_list[index]['created_at'] = test_utils.time_format( specs['created_at']) res = qos_specs.get_all_specs(self.ctxt) self.assertEqual(len(qos_specs_list), len(res)) qos_res_simple_dict = [] # Need to make list of dictionaries instead of VOs for assertIn to work for qos in res: qos_res_simple_dict.append( qos.obj_to_primitive()['versioned_object.data']) for qos_spec in qos_specs_list: self.assertIn(qos_spec, qos_res_simple_dict) def test_get_qos_specs(self): one_time_value = str(int(time.time())) specs = {'key1': one_time_value, 'key2': 'value2', 'key3': 'value3'} qos_id = self._create_qos_specs('Specs1', 'both', specs) specs = qos_specs.get_qos_specs(self.ctxt, qos_id) self.assertEqual(one_time_value, specs['specs']['key1']) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.get_qos_specs, self.ctxt, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_quota.py0000664000175000017500000025020600000000000021223 0ustar00zuulzuul00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import datetime from unittest import mock import ddt from oslo_config import cfg from oslo_utils import timeutils from cinder import backup from cinder.backup import api as backup_api from cinder import context from cinder import db from cinder.db.sqlalchemy import api as sqa_api from cinder.db.sqlalchemy import models as sqa_models from cinder import exception from cinder import objects from cinder.objects import fields from cinder import quota from cinder import quota_utils from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume import cinder.tests.unit.image.fake from cinder.tests.unit import test from cinder.tests.unit import utils as tests_utils from cinder import volume CONF = cfg.CONF class QuotaIntegrationTestCase(test.TestCase): def setUp(self): objects.register_all() super(QuotaIntegrationTestCase, self).setUp() self.volume_type_name = CONF.default_volume_type self.volume_type = objects.VolumeType.get_by_name_or_id( context.get_admin_context(), identity=self.volume_type_name) self.flags(quota_volumes=2, quota_snapshots=2, quota_gigabytes=20, quota_backups=2, quota_backup_gigabytes=20) self.user_id = fake.USER_ID self.project_id = fake.PROJECT_ID self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) # Destroy the 'default' quota_class in the database to avoid # conflicts with the test cases here that are setting up their own # defaults. db.quota_class_destroy_all_by_name(self.context, 'default') self.addCleanup(cinder.tests.unit.image.fake.FakeImageService_reset) def _create_volume(self, size=1): """Create a test volume.""" vol = {} vol['user_id'] = self.user_id vol['project_id'] = self.project_id vol['size'] = size vol['status'] = 'available' vol['volume_type_id'] = self.volume_type['id'] vol['host'] = 'fake_host' vol['availability_zone'] = 'fake_zone' vol['attach_status'] = fields.VolumeAttachStatus.DETACHED volume = objects.Volume(context=self.context, **vol) volume.create() return volume def _create_snapshot(self, volume): snapshot = objects.Snapshot(self.context) snapshot.user_id = self.user_id or fake.USER_ID snapshot.project_id = self.project_id or fake.PROJECT_ID snapshot.volume_id = volume['id'] snapshot.volume_size = volume['size'] snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.create() return snapshot def _create_backup(self, volume): backup = {} backup['user_id'] = self.user_id backup['project_id'] = self.project_id backup['volume_id'] = volume['id'] backup['volume_size'] = volume['size'] backup['status'] = fields.BackupStatus.AVAILABLE return db.backup_create(self.context, backup) def test_volume_size_limit_exceeds(self): resource = 'volumes_%s' % self.volume_type_name db.quota_class_create(self.context, 'default', resource, 1) flag_args = { 'quota_volumes': 10, 'quota_gigabytes': 1000, 'per_volume_size_limit': 5 } self.flags(**flag_args) self.assertRaises(exception.VolumeSizeExceedsLimit, volume.API().create, self.context, 10, '', '',) def test_too_many_volumes(self): volume_ids = [] for _i in range(CONF.quota_volumes): vol_ref = self._create_volume() volume_ids.append(vol_ref['id']) ex = self.assertRaises(exception.VolumeLimitExceeded, volume.API().create, self.context, 1, '', '', volume_type=self.volume_type) msg = ("Maximum number of volumes allowed (%d) exceeded for" " quota 'volumes'." % CONF.quota_volumes) self.assertEqual(msg, str(ex)) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) def test_too_many_volumes_of_type(self): resource = 'volumes_%s' % self.volume_type_name db.quota_class_create(self.context, 'default', resource, 1) flag_args = { 'quota_volumes': 2000, 'quota_gigabytes': 2000 } self.flags(**flag_args) vol_ref = self._create_volume() ex = self.assertRaises(exception.VolumeLimitExceeded, volume.API().create, self.context, 1, '', '', volume_type=self.volume_type) msg = ("Maximum number of volumes allowed (1) exceeded for" " quota '%s'." % resource) self.assertEqual(msg, str(ex)) vol_ref.destroy() def test__snapshots_quota_value(self): test_volume1 = tests_utils.create_volume( self.context, status='available', host=CONF.host, volume_type_id=self.vt['id']) test_volume2 = tests_utils.create_volume( self.context, status='available', host=CONF.host, volume_type_id=self.vt['id']) volume_api = volume.API() volume_api.create_snapshots_in_db(self.context, [test_volume1, test_volume2], 'fake_name', 'fake_description', fake.CONSISTENCY_GROUP_ID) usages = db.quota_usage_get_all_by_project(self.context, self.project_id) self.assertEqual(2, usages['snapshots']['in_use']) def test_too_many_snapshots_of_type(self): resource = 'snapshots_%s' % self.volume_type_name db.quota_class_create(self.context, 'default', resource, 1) flag_args = { 'quota_volumes': 2000, 'quota_gigabytes': 2000, } self.flags(**flag_args) vol_ref = self._create_volume() snap_ref = self._create_snapshot(vol_ref) self.assertRaises(exception.SnapshotLimitExceeded, volume.API().create_snapshot, self.context, vol_ref, '', '') snap_ref.destroy() vol_ref.destroy() def test_too_many_backups(self): resource = 'backups' db.quota_class_create(self.context, 'default', resource, 1) flag_args = { 'quota_backups': 2000, 'quota_backup_gigabytes': 2000 } self.flags(**flag_args) vol_ref = self._create_volume() backup_ref = self._create_backup(vol_ref) with mock.patch.object(backup_api.API, '_get_available_backup_service_host') as \ mock__get_available_backup_service: mock__get_available_backup_service.return_value = 'host' self.assertRaises(exception.BackupLimitExceeded, backup.API().create, self.context, 'name', 'description', vol_ref['id'], 'container', False, None) db.backup_destroy(self.context, backup_ref['id']) db.volume_destroy(self.context, vol_ref['id']) def test_too_many_gigabytes(self): volume_ids = [] vol_ref = self._create_volume(size=20) volume_ids.append(vol_ref['id']) raised_exc = self.assertRaises( exception.VolumeSizeExceedsAvailableQuota, volume.API().create, self.context, 1, '', '', volume_type=self.volume_type) expected = exception.VolumeSizeExceedsAvailableQuota( requested=1, quota=20, consumed=20) self.assertEqual(str(expected), str(raised_exc)) for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) def test_too_many_combined_gigabytes(self): vol_ref = self._create_volume(size=10) snap_ref = self._create_snapshot(vol_ref) self.assertRaises(exception.QuotaError, volume.API().create_snapshot, self.context, vol_ref, '', '') usages = db.quota_usage_get_all_by_project(self.context, self.project_id) self.assertEqual(20, usages['gigabytes']['in_use']) snap_ref.destroy() vol_ref.destroy() def test_too_many_combined_backup_gigabytes(self): vol_ref = self._create_volume(size=10000) backup_ref = self._create_backup(vol_ref) with mock.patch.object(backup_api.API, '_get_available_backup_service_host') as \ mock__get_available_backup_service: mock__get_available_backup_service.return_value = 'host' self.assertRaises( exception.VolumeBackupSizeExceedsAvailableQuota, backup.API().create, context=self.context, name='name', description='description', volume_id=vol_ref['id'], container='container', incremental=False) db.backup_destroy(self.context, backup_ref['id']) vol_ref.destroy() def test_no_snapshot_gb_quota_flag(self): self.mock_object(scheduler_rpcapi.SchedulerAPI, 'create_snapshot') self.flags(quota_volumes=2, quota_snapshots=2, quota_gigabytes=20, no_snapshot_gb_quota=True) vol_ref = self._create_volume(size=10) snap_ref = self._create_snapshot(vol_ref) snap_ref2 = volume.API().create_snapshot(self.context, vol_ref, '', '') # Make sure the snapshot volume_size isn't included in usage. vol_ref2 = volume.API().create(self.context, 10, '', '') usages = db.quota_usage_get_all_by_project(self.context, self.project_id) self.assertEqual(20, usages['gigabytes']['in_use']) self.assertEqual(0, usages['gigabytes']['reserved']) snap_ref.destroy() snap_ref2.destroy() vol_ref.destroy() vol_ref2.destroy() def test_backup_gb_quota_flag(self): self.flags(quota_volumes=2, quota_snapshots=2, quota_backups=2, quota_gigabytes=20 ) vol_ref = self._create_volume(size=10) backup_ref = self._create_backup(vol_ref) with mock.patch.object(backup_api.API, '_get_available_backup_service_host') as \ mock_mock__get_available_backup_service: mock_mock__get_available_backup_service.return_value = 'host' backup_ref2 = backup.API().create(self.context, 'name', 'description', vol_ref['id'], 'container', False, None) # Make sure the backup volume_size isn't included in usage. vol_ref2 = volume.API().create(self.context, 10, '', '') usages = db.quota_usage_get_all_by_project(self.context, self.project_id) self.assertEqual(20, usages['gigabytes']['in_use']) self.assertEqual(0, usages['gigabytes']['reserved']) db.backup_destroy(self.context, backup_ref['id']) db.backup_destroy(self.context, backup_ref2['id']) vol_ref.destroy() vol_ref2.destroy() def test_too_many_gigabytes_of_type(self): resource = 'gigabytes_%s' % self.volume_type_name db.quota_class_create(self.context, 'default', resource, 10) flag_args = { 'quota_volumes': 2000, 'quota_gigabytes': 2000, } self.flags(**flag_args) vol_ref = self._create_volume(size=10) raised_exc = self.assertRaises( exception.VolumeSizeExceedsAvailableQuota, volume.API().create, self.context, 1, '', '', volume_type=self.volume_type) expected = exception.VolumeSizeExceedsAvailableQuota( requested=1, quota=10, consumed=10, name=resource) self.assertEqual(str(expected), str(raised_exc)) vol_ref.destroy() class FakeContext(object): def __init__(self, project_id, quota_class): self.is_admin = False self.user_id = 'fake_user' self.project_id = project_id self.quota_class = quota_class def elevated(self): elevated = self.__class__(self.project_id, self.quota_class) elevated.is_admin = True return elevated class FakeDriver(object): def __init__(self, by_project=None, by_class=None, reservations=None): self.called = [] self.by_project = by_project or {} self.by_class = by_class or {} self.reservations = reservations or [] def get_by_project(self, context, project_id, resource): self.called.append(('get_by_project', context, project_id, resource)) try: return self.by_project[project_id][resource] except KeyError: raise exception.ProjectQuotaNotFound(project_id=project_id) def get_by_class(self, context, quota_class, resource): self.called.append(('get_by_class', context, quota_class, resource)) try: return self.by_class[quota_class][resource] except KeyError: raise exception.QuotaClassNotFound(class_name=quota_class) def get_default(self, context, resource, parent_project_id=None): self.called.append(('get_default', context, resource, parent_project_id)) return resource.default def get_defaults(self, context, resources, parent_project_id=None): self.called.append(('get_defaults', context, resources, parent_project_id)) return resources def get_class_quotas(self, context, resources, quota_class, defaults=True): self.called.append(('get_class_quotas', context, resources, quota_class, defaults)) return resources def get_project_quotas(self, context, resources, project_id, quota_class=None, defaults=True, usages=True): self.called.append(('get_project_quotas', context, resources, project_id, quota_class, defaults, usages)) return resources def limit_check(self, context, resources, values, project_id=None): self.called.append(('limit_check', context, resources, values, project_id)) def reserve(self, context, resources, deltas, expire=None, project_id=None): self.called.append(('reserve', context, resources, deltas, expire, project_id)) return self.reservations def commit(self, context, reservations, project_id=None): self.called.append(('commit', context, reservations, project_id)) def rollback(self, context, reservations, project_id=None): self.called.append(('rollback', context, reservations, project_id)) def destroy_by_project(self, context, project_id): self.called.append(('destroy_by_project', context, project_id)) def expire(self, context): self.called.append(('expire', context)) class BaseResourceTestCase(test.TestCase): def test_no_flag(self): resource = quota.BaseResource('test_resource') self.assertEqual('test_resource', resource.name) self.assertIsNone(resource.flag) self.assertEqual(-1, resource.default) def test_with_flag(self): # We know this flag exists, so use it... self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') self.assertEqual('test_resource', resource.name) self.assertEqual('quota_volumes', resource.flag) self.assertEqual(10, resource.default) def test_with_flag_no_quota(self): self.flags(quota_volumes=-1) resource = quota.BaseResource('test_resource', 'quota_volumes') self.assertEqual('test_resource', resource.name) self.assertEqual('quota_volumes', resource.flag) self.assertEqual(-1, resource.default) def test_quota_no_project_no_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver() context = FakeContext(None, None) quota_value = resource.quota(driver, context) self.assertEqual(10, quota_value) def test_quota_with_project_no_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver( by_project=dict( test_project=dict(test_resource=15), )) context = FakeContext('test_project', None) quota_value = resource.quota(driver, context) self.assertEqual(15, quota_value) def test_quota_no_project_with_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver( by_class=dict( test_class=dict(test_resource=20), )) context = FakeContext(None, 'test_class') quota_value = resource.quota(driver, context) self.assertEqual(20, quota_value) def test_quota_with_project_with_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), ), by_class=dict(test_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context) self.assertEqual(15, quota_value) def test_quota_override_project_with_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver(by_project=dict( test_project=dict(test_resource=15), override_project=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, project_id='override_project') self.assertEqual(20, quota_value) def test_quota_override_subproject_no_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes', parent_project_id='test_parent_project') driver = FakeDriver() context = FakeContext('test_project', None) quota_value = resource.quota(driver, context) self.assertEqual(0, quota_value) def test_quota_with_project_override_class(self): self.flags(quota_volumes=10) resource = quota.BaseResource('test_resource', 'quota_volumes') driver = FakeDriver(by_class=dict( test_class=dict(test_resource=15), override_class=dict(test_resource=20), )) context = FakeContext('test_project', 'test_class') quota_value = resource.quota(driver, context, quota_class='override_class') self.assertEqual(20, quota_value) class VolumeTypeResourceTestCase(test.TestCase): def test_name_and_flag(self): volume_type_name = 'foo' volume = {'name': volume_type_name, 'id': 'myid'} resource = quota.VolumeTypeResource('volumes', volume) self.assertEqual('volumes_%s' % volume_type_name, resource.name) self.assertIsNone(resource.flag) self.assertEqual(-1, resource.default) class QuotaEngineTestCase(test.TestCase): def test_init(self): quota_obj = quota.QuotaEngine() self.assertEqual({}, quota_obj.resources) self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver) def test_init_override_string(self): quota_obj = quota.QuotaEngine( quota_driver_class='cinder.tests.unit.test_quota.FakeDriver') self.assertEqual({}, quota_obj.resources) self.assertIsInstance(quota_obj._driver, FakeDriver) def test_init_override_obj(self): quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver) self.assertEqual({}, quota_obj.resources) self.assertEqual(FakeDriver, quota_obj._driver) def test_register_resource(self): quota_obj = quota.QuotaEngine() resource = quota.BaseResource('test_resource') quota_obj.register_resource(resource) self.assertEqual(dict(test_resource=resource), quota_obj.resources) def test_register_resources(self): quota_obj = quota.QuotaEngine() resources = [ quota.BaseResource('test_resource1'), quota.BaseResource('test_resource2'), quota.BaseResource('test_resource3'), ] quota_obj.register_resources(resources) self.assertEqual(dict(test_resource1=resources[0], test_resource2=resources[1], test_resource3=resources[2], ), quota_obj.resources) def test_get_by_project(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver( by_project=dict( test_project=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_project(context, 'test_project', 'test_resource') self.assertEqual([('get_by_project', context, 'test_project', 'test_resource'), ], driver.called) self.assertEqual(42, result) def test_get_by_class(self): context = FakeContext('test_project', 'test_class') driver = FakeDriver( by_class=dict( test_class=dict(test_resource=42))) quota_obj = quota.QuotaEngine(quota_driver_class=driver) result = quota_obj.get_by_class(context, 'test_class', 'test_resource') self.assertEqual([('get_by_class', context, 'test_class', 'test_resource'), ], driver.called) self.assertEqual(42, result) def _make_quota_obj(self, driver): quota_obj = quota.QuotaEngine(quota_driver_class=driver) resources = [ quota.BaseResource('test_resource4'), quota.BaseResource('test_resource3'), quota.BaseResource('test_resource2'), quota.BaseResource('test_resource1'), ] quota_obj.register_resources(resources) return quota_obj def test_get_defaults(self): context = FakeContext(None, None) parent_project_id = None driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result = quota_obj.get_defaults(context) self.assertEqual([('get_defaults', context, quota_obj.resources, parent_project_id), ], driver.called) self.assertEqual(quota_obj.resources, result) def test_get_class_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_class_quotas(context, 'test_class') result2 = quota_obj.get_class_quotas(context, 'test_class', False) self.assertEqual([ ('get_class_quotas', context, quota_obj.resources, 'test_class', True), ('get_class_quotas', context, quota_obj.resources, 'test_class', False), ], driver.called) self.assertEqual(quota_obj.resources, result1) self.assertEqual(quota_obj.resources, result2) def test_get_project_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_project_quotas(context, 'test_project') result2 = quota_obj.get_project_quotas(context, 'test_project', quota_class='test_class', defaults=False, usages=False) self.assertEqual([ ('get_project_quotas', context, quota_obj.resources, 'test_project', None, True, True), ('get_project_quotas', context, quota_obj.resources, 'test_project', 'test_class', False, False), ], driver.called) self.assertEqual(quota_obj.resources, result1) self.assertEqual(quota_obj.resources, result2) def test_get_subproject_quotas(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) result1 = quota_obj.get_project_quotas(context, 'test_project') result2 = quota_obj.get_project_quotas(context, 'test_project', quota_class='test_class', defaults=False, usages=False) self.assertEqual([ ('get_project_quotas', context, quota_obj.resources, 'test_project', None, True, True), ('get_project_quotas', context, quota_obj.resources, 'test_project', 'test_class', False, False), ], driver.called) self.assertEqual(quota_obj.resources, result1) self.assertEqual(quota_obj.resources, result2) def test_limit_check(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.limit_check(context, test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1) self.assertEqual([ ('limit_check', context, quota_obj.resources, dict( test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1,), None), ], driver.called) def test_reserve(self): context = FakeContext(None, None) driver = FakeDriver(reservations=['resv-01', 'resv-02', 'resv-03', 'resv-04', ]) quota_obj = self._make_quota_obj(driver) result1 = quota_obj.reserve(context, test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1) result2 = quota_obj.reserve(context, expire=3600, test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) result3 = quota_obj.reserve(context, project_id='fake_project', test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4) self.assertEqual([ ('reserve', context, quota_obj.resources, dict( test_resource1=4, test_resource2=3, test_resource3=2, test_resource4=1, ), None, None), ('reserve', context, quota_obj.resources, dict( test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4, ), 3600, None), ('reserve', context, quota_obj.resources, dict( test_resource1=1, test_resource2=2, test_resource3=3, test_resource4=4, ), None, 'fake_project'), ], driver.called) self.assertEqual(['resv-01', 'resv-02', 'resv-03', 'resv-04', ], result1) self.assertEqual(['resv-01', 'resv-02', 'resv-03', 'resv-04', ], result2) self.assertEqual(['resv-01', 'resv-02', 'resv-03', 'resv-04', ], result3) def test_commit(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03']) self.assertEqual([('commit', context, ['resv-01', 'resv-02', 'resv-03'], None), ], driver.called) def test_rollback(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03']) self.assertEqual([('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None), ], driver.called) def test_destroy_by_project(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.destroy_by_project(context, 'test_project') self.assertEqual([('destroy_by_project', context, 'test_project'), ], driver.called) def test_expire(self): context = FakeContext(None, None) driver = FakeDriver() quota_obj = self._make_quota_obj(driver) quota_obj.expire(context) self.assertEqual([('expire', context), ], driver.called) def test_resource_names(self): quota_obj = self._make_quota_obj(None) self.assertEqual(['test_resource1', 'test_resource2', 'test_resource3', 'test_resource4'], quota_obj.resource_names) class VolumeTypeQuotaEngineTestCase(test.TestCase): def test_default_resources(self): def fake_vtga(context, inactive=False, filters=None): return {} self.mock_object(db, 'volume_type_get_all', fake_vtga) engine = quota.VolumeTypeQuotaEngine() self.assertEqual(['backup_gigabytes', 'backups', 'gigabytes', 'per_volume_gigabytes', 'snapshots', 'volumes'], engine.resource_names) def test_volume_type_resources(self): ctx = context.RequestContext('admin', 'admin', is_admin=True) vtype = db.volume_type_create(ctx, {'name': 'type1'}) vtype2 = db.volume_type_create(ctx, {'name': 'type_2'}) def fake_vtga(context, inactive=False, filters=None): return { 'type1': { 'id': vtype['id'], 'name': 'type1', 'extra_specs': {}, }, 'type_2': { 'id': vtype['id'], 'name': 'type_2', 'extra_specs': {}, }, } self.mock_object(db, 'volume_type_get_all', fake_vtga) engine = quota.VolumeTypeQuotaEngine() self.assertEqual(['backup_gigabytes', 'backups', 'gigabytes', 'gigabytes_type1', 'gigabytes_type_2', 'per_volume_gigabytes', 'snapshots', 'snapshots_type1', 'snapshots_type_2', 'volumes', 'volumes_type1', 'volumes_type_2', ], engine.resource_names) db.volume_type_destroy(ctx, vtype['id']) db.volume_type_destroy(ctx, vtype2['id']) def test_update_quota_resource(self): ctx = context.RequestContext('admin', 'admin', is_admin=True) engine = quota.VolumeTypeQuotaEngine() engine.update_quota_resource(ctx, 'type1', 'type2') class DbQuotaDriverBaseTestCase(test.TestCase): def setUp(self): super(DbQuotaDriverBaseTestCase, self).setUp() self.flags(quota_volumes=10, quota_snapshots=10, quota_gigabytes=1000, quota_backups=10, quota_backup_gigabytes=1000, reservation_expire=86400, until_refresh=0, max_age=0, ) # These can be used for expected defaults for child/non-child self._default_quotas = dict( volumes=10, snapshots=10, gigabytes=1000, backups=10, backup_gigabytes=1000, per_volume_gigabytes=-1) self.calls = [] patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = datetime.datetime.utcnow() def _mock_quota_class_get_default(self): # Mock quota_class_get_default def fake_qcgd(context): self.calls.append('quota_class_get_defaults') return dict(volumes=10, snapshots=10, gigabytes=1000, backups=10, backup_gigabytes=1000 ) self.mock_object(db, 'quota_class_get_defaults', fake_qcgd) def _mock_volume_type_get_all(self): def fake_vtga(context, inactive=False, filters=None): return {} self.mock_object(db, 'volume_type_get_all', fake_vtga) def _mock_quota_class_get_all_by_name(self): # Mock quota_class_get_all_by_name def fake_qcgabn(context, quota_class): self.calls.append('quota_class_get_all_by_name') self.assertEqual('test_class', quota_class) return dict(gigabytes=500, volumes=10, snapshots=10, backups=10, backup_gigabytes=500) self.mock_object(db, 'quota_class_get_all_by_name', fake_qcgabn) class DbQuotaDriverTestCase(DbQuotaDriverBaseTestCase): def setUp(self): super(DbQuotaDriverTestCase, self).setUp() self.driver = quota.DbQuotaDriver() def test_get_defaults(self): # Use our pre-defined resources self._mock_quota_class_get_default() self._mock_volume_type_get_all() result = self.driver.get_defaults(None, quota.QUOTAS.resources) self.assertEqual( dict( volumes=10, snapshots=10, gigabytes=1000, backups=10, backup_gigabytes=1000, per_volume_gigabytes=-1), result) def test_get_class_quotas(self): self._mock_quota_class_get_all_by_name() self._mock_volume_type_get_all() ctxt = context.RequestContext('admin', 'admin', is_admin=True) result = self.driver.get_class_quotas( ctxt, quota.QUOTAS.resources, 'test_class') self.assertEqual(['quota_class_get_all_by_name'], self.calls) self.assertEqual(dict(volumes=10, gigabytes=500, snapshots=10, backups=10, backup_gigabytes=500, per_volume_gigabytes=-1), result) def test_get_class_quotas_no_defaults(self): self._mock_quota_class_get_all_by_name() ctxt = context.RequestContext('admin', 'admin', is_admin=True) result = self.driver.get_class_quotas( ctxt, quota.QUOTAS.resources, 'test_class', False) self.assertEqual(['quota_class_get_all_by_name'], self.calls) self.assertEqual(dict(volumes=10, gigabytes=500, snapshots=10, backups=10, backup_gigabytes=500), result) def _mock_get_by_project(self): def fake_qgabp(context, project_id): self.calls.append('quota_get_all_by_project') self.assertEqual('test_project', project_id) return dict(volumes=10, gigabytes=50, reserved=0, snapshots=10, backups=10, backup_gigabytes=50) def fake_qugabp(context, project_id): self.calls.append('quota_usage_get_all_by_project') self.assertEqual('test_project', project_id) return dict(volumes=dict(in_use=2, reserved=0), snapshots=dict(in_use=2, reserved=0), gigabytes=dict(in_use=10, reserved=0), backups=dict(in_use=2, reserved=0), backup_gigabytes=dict(in_use=10, reserved=0) ) self.mock_object(db, 'quota_get_all_by_project', fake_qgabp) self.mock_object(db, 'quota_usage_get_all_by_project', fake_qugabp) self._mock_quota_class_get_all_by_name() self._mock_quota_class_get_default() def test_get_project_quotas(self): self._mock_get_by_project() self._mock_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, 'test_project') self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_defaults', ], self.calls) self.assertEqual(dict(volumes=dict(limit=10, in_use=2, reserved=0, ), snapshots=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), backups=dict(limit=10, in_use=2, reserved=0, ), backup_gigabytes=dict(limit=50, in_use=10, reserved=0, ), per_volume_gigabytes=dict(in_use=0, limit=-1, reserved= 0) ), result) @mock.patch('cinder.quota.db.quota_get_all_by_project') @mock.patch('cinder.quota.db.quota_class_get_defaults') def test_get_project_quotas_lazy_load_defaults( self, mock_defaults, mock_quotas): defaults = self._default_quotas volume_types = volume.volume_types.get_all_types( context.get_admin_context()) for vol_type in volume_types: defaults['volumes_' + vol_type] = -1 defaults['snapshots_' + vol_type] = -1 defaults['gigabytes_' + vol_type] = -1 mock_quotas.return_value = defaults self.driver.get_project_quotas( FakeContext('test_project', None), quota.QUOTAS.resources, 'test_project', usages=False) # Shouldn't load a project's defaults if all the quotas are already # defined in the DB self.assertFalse(mock_defaults.called) mock_quotas.return_value = {} self.driver.get_project_quotas( FakeContext('test_project', None), quota.QUOTAS.resources, 'test_project', usages=False) self.assertTrue(mock_defaults.called) def test_get_project_quotas_alt_context_no_class(self): self._mock_get_by_project() self._mock_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), quota.QUOTAS.resources, 'test_project') self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_defaults', ], self.calls) self.assertEqual(dict(volumes=dict(limit=10, in_use=2, reserved=0, ), snapshots=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), backups=dict(limit=10, in_use=2, reserved=0, ), backup_gigabytes=dict(limit=50, in_use=10, reserved=0, ), per_volume_gigabytes=dict(in_use=0, limit=-1, reserved=0) ), result) def test_get_project_quotas_alt_context_with_class(self): self._mock_get_by_project() self._mock_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), quota.QUOTAS.resources, 'test_project', quota_class='test_class') self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_defaults', ], self.calls) self.assertEqual(dict(volumes=dict(limit=10, in_use=2, reserved=0, ), snapshots=dict(limit=10, in_use=2, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), backups=dict(limit=10, in_use=2, reserved=0, ), backup_gigabytes=dict(limit=50, in_use=10, reserved=0, ), per_volume_gigabytes=dict(in_use=0, limit=-1, reserved= 0)), result) def test_get_project_quotas_no_defaults(self): self._mock_get_by_project() self._mock_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, 'test_project', defaults=False) self.assertEqual(['quota_get_all_by_project', 'quota_usage_get_all_by_project', 'quota_class_get_all_by_name'], self.calls) self.assertEqual(dict(backups=dict(limit=10, in_use=2, reserved=0, ), backup_gigabytes=dict(limit=50, in_use=10, reserved=0, ), gigabytes=dict(limit=50, in_use=10, reserved=0, ), snapshots=dict(limit=10, in_use=2, reserved=0, ), volumes=dict(limit=10, in_use=2, reserved=0, ), ), result) def test_get_project_quotas_no_usages(self): self._mock_get_by_project() self._mock_volume_type_get_all() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, 'test_project', usages=False) self.assertEqual(['quota_get_all_by_project', 'quota_class_get_all_by_name', 'quota_class_get_defaults', ], self.calls) self.assertEqual(dict(volumes=dict(limit=10, ), snapshots=dict(limit=10, ), backups=dict(limit=10, ), gigabytes=dict(limit=50, ), backup_gigabytes=dict(limit=50, ), per_volume_gigabytes=dict(limit=-1, )), result) def _mock_get_project_quotas(self): def fake_get_project_quotas(context, resources, project_id, quota_class=None, defaults=True, usages=True, parent_project_id=None): self.calls.append('get_project_quotas') return {k: dict(limit=v.default) for k, v in resources.items()} self.mock_object(self.driver, 'get_project_quotas', fake_get_project_quotas) def test_get_quotas_has_sync_unknown(self): self._mock_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS.resources, ['unknown'], True) self.assertEqual([], self.calls) def test_get_quotas_no_sync_unknown(self): self._mock_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS.resources, ['unknown'], False) self.assertEqual([], self.calls) def test_get_quotas_has_sync_no_sync_resource(self): self._mock_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS.resources, ['metadata_items'], True) self.assertEqual([], self.calls) def test_get_quotas_no_sync_has_sync_resource(self): self._mock_get_project_quotas() self.assertRaises(exception.QuotaResourceUnknown, self.driver._get_quotas, None, quota.QUOTAS.resources, ['volumes'], False) self.assertEqual([], self.calls) def test_get_quotas_has_sync(self): self._mock_get_project_quotas() result = self.driver._get_quotas(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, ['volumes', 'gigabytes'], True) self.assertEqual(['get_project_quotas'], self.calls) self.assertEqual(dict(volumes=10, gigabytes=1000, ), result) def _mock_quota_reserve(self): def fake_quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=None): self.calls.append(('quota_reserve', expire, until_refresh, max_age)) return ['resv-1', 'resv-2', 'resv-3'] self.mock_object(db, 'quota_reserve', fake_quota_reserve) def test_reserve_bad_expire(self): self._mock_get_project_quotas() self._mock_quota_reserve() self.assertRaises(exception.InvalidReservationExpiration, self.driver.reserve, FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire='invalid') self.assertEqual([], self.calls) def test_reserve_default_expire(self): self._mock_get_project_quotas() self._mock_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2)) expire = timeutils.utcnow() + datetime.timedelta(seconds=86400) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_int_expire(self): self._mock_get_project_quotas() self._mock_quota_reserve() result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire=3600) expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_timedelta_expire(self): self._mock_get_project_quotas() self._mock_quota_reserve() expire_delta = datetime.timedelta(seconds=60) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire=expire_delta) expire = timeutils.utcnow() + expire_delta self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_datetime_expire(self): self._mock_get_project_quotas() self._mock_quota_reserve() expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire=expire) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_until_refresh(self): self._mock_get_project_quotas() self._mock_quota_reserve() self.flags(until_refresh=500) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire=expire) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 500, 0), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def test_reserve_max_age(self): self._mock_get_project_quotas() self._mock_quota_reserve() self.flags(max_age=86400) expire = timeutils.utcnow() + datetime.timedelta(seconds=120) result = self.driver.reserve(FakeContext('test_project', 'test_class'), quota.QUOTAS.resources, dict(volumes=2), expire=expire) self.assertEqual(['get_project_quotas', ('quota_reserve', expire, 0, 86400), ], self.calls) self.assertEqual(['resv-1', 'resv-2', 'resv-3'], result) def _mock_quota_destroy_by_project(self): def fake_quota_destroy_by_project(context, project_id): self.calls.append(('quota_destroy_by_project', project_id)) return None self.mock_object(sqa_api, 'quota_destroy_by_project', fake_quota_destroy_by_project) def test_destroy_quota_by_project(self): self._mock_quota_destroy_by_project() self.driver.destroy_by_project(FakeContext('test_project', 'test_class'), 'test_project') self.assertEqual([('quota_destroy_by_project', ('test_project')), ], self.calls) class FakeSession(object): def begin(self): return self def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_traceback): return False def query(self, *args, **kwargs): pass def rollback(self): pass def commit(self): pass class FakeUsage(sqa_models.QuotaUsage): def save(self, *args, **kwargs): pass class QuotaReserveSqlAlchemyTestCase(test.TestCase): # cinder.db.sqlalchemy.api.quota_reserve is so complex it needs its # own test case, and since it's a quota manipulator, this is the # best place to put it... def setUp(self): super(QuotaReserveSqlAlchemyTestCase, self).setUp() self.sync_called = set() def make_sync(res_name): def fake_sync(context, project_id, volume_type_id=None, volume_type_name=None, session=None): self.sync_called.add(res_name) if res_name in self.usages: if self.usages[res_name].in_use < 0: return {res_name: 2} else: return {res_name: self.usages[res_name].in_use - 1} return {res_name: 0} return fake_sync self.resources = {} QUOTA_SYNC_FUNCTIONS = {} for res_name in ('volumes', 'gigabytes'): res = quota.ReservableResource(res_name, '_sync_%s' % res_name) QUOTA_SYNC_FUNCTIONS['_sync_%s' % res_name] = make_sync(res_name) self.resources[res_name] = res self.mock_object(sqa_api, 'QUOTA_SYNC_FUNCTIONS', QUOTA_SYNC_FUNCTIONS) self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600) self.usages = {} self.usages_created = {} self.reservations_created = {} def fake_get_quota_usages(context, project_id, resources=None): return self.usages.copy() def fake_quota_usage_create(context, project_id, resource, in_use, reserved, until_refresh): quota_usage_ref = self._make_quota_usage( project_id, resource, in_use, reserved, until_refresh, timeutils.utcnow(), timeutils.utcnow()) self.usages_created[resource] = quota_usage_ref return quota_usage_ref def fake_reservation_create(context, uuid, usage_id, project_id, resource, delta, expire): reservation_ref = self._make_reservation( uuid, usage_id, project_id, resource, delta, expire, timeutils.utcnow(), timeutils.utcnow()) self.reservations_created[resource] = reservation_ref return reservation_ref self.mock_object(sqa_api, '_get_quota_usages', fake_get_quota_usages) self.mock_object(sqa_api, '_quota_usage_create', fake_quota_usage_create) self.mock_object(sqa_api, '_reservation_create', fake_reservation_create) patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = datetime.datetime.utcnow() def _make_quota_usage(self, project_id, resource, in_use, reserved, until_refresh, created_at, updated_at): quota_usage_ref = FakeUsage() quota_usage_ref.id = len(self.usages) + len(self.usages_created) quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh quota_usage_ref.created_at = created_at quota_usage_ref.updated_at = updated_at quota_usage_ref.deleted_at = None quota_usage_ref.deleted = False return quota_usage_ref def init_usage(self, project_id, resource, in_use, reserved, until_refresh=None, created_at=None, updated_at=None): if created_at is None: created_at = timeutils.utcnow() if updated_at is None: updated_at = timeutils.utcnow() quota_usage_ref = self._make_quota_usage(project_id, resource, in_use, reserved, until_refresh, created_at, updated_at) self.usages[resource] = quota_usage_ref def compare_usage(self, usage_dict, expected): for usage in expected: resource = usage['resource'] for key, value in usage.items(): actual = getattr(usage_dict[resource], key) self.assertEqual(value, actual, "%s != %s on usage for resource %s" % (actual, value, resource)) def _make_reservation(self, uuid, usage_id, project_id, resource, delta, expire, created_at, updated_at): reservation_ref = sqa_models.Reservation() reservation_ref.id = len(self.reservations_created) reservation_ref.uuid = uuid reservation_ref.usage_id = usage_id reservation_ref.project_id = project_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.created_at = created_at reservation_ref.updated_at = updated_at reservation_ref.deleted_at = None reservation_ref.deleted = False return reservation_ref def compare_reservation(self, reservations, expected): reservations = set(reservations) for resv in expected: resource = resv['resource'] resv_obj = self.reservations_created[resource] self.assertIn(resv_obj.uuid, reservations) reservations.discard(resv_obj.uuid) for key, value in resv.items(): actual = getattr(resv_obj, key) self.assertEqual(value, actual, "%s != %s on reservation for resource %s" % (actual, value, resource)) self.assertEqual(0, len(reservations)) @mock.patch.object(sqa_api, '_reservation_create') @mock.patch.object(sqa_api, '_get_sync_updates') @mock.patch.object(sqa_api, '_quota_usage_create') @mock.patch.object(sqa_api, '_get_quota_usages') def test_quota_reserve_create_usages(self, usages_mock, quota_create_mock, sync_mock, reserve_mock): project_id = 'test_project' ctxt = context.RequestContext('admin', project_id, is_admin=True) quotas = collections.OrderedDict([('volumes', 5), ('gigabytes', 10 * 1024)]) deltas = collections.OrderedDict([('volumes', 2), ('gigabytes', 2 * 1024)]) sync_mock.side_effect = [{'volumes': 2}, {'gigabytes': 2 * 1024}] vol_usage = self._make_quota_usage(project_id, 'volumes', 2, 0, None, None, None) gb_usage = self._make_quota_usage(project_id, 'gigabytes', 2 * 1024, 0, None, None, None) usages_mock.side_effect = [ {}, collections.OrderedDict([('volumes', vol_usage), ('gigabytes', gb_usage)]) ] reservations = [mock.Mock(), mock.Mock()] reserve_mock.side_effect = reservations result = sqa_api.quota_reserve(ctxt, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual([r.uuid for r in reservations], result) usages_mock.assert_has_calls([ mock.call(mock.ANY, project_id, resources=deltas.keys()), mock.call(mock.ANY, project_id, resources=deltas.keys()) ]) sync_mock.assert_has_calls([ mock.call(mock.ANY, project_id, self.resources, 'volumes'), mock.call(mock.ANY, project_id, self.resources, 'gigabytes'), ]) quota_create_mock.assert_has_calls([ mock.call(mock.ANY, project_id, 'volumes', 2, 0, None), mock.call(mock.ANY, project_id, 'gigabytes', 2 * 1024, 0, None) ]) reserve_mock.assert_has_calls([ mock.call(mock.ANY, mock.ANY, vol_usage, project_id, 'volumes', 2, mock.ANY), mock.call(mock.ANY, mock.ANY, gb_usage, project_id, 'gigabytes', 2 * 1024, mock.ANY), ]) def test_quota_reserve_negative_in_use(self): self.init_usage('test_project', 'volumes', -1, 0, until_refresh=1) self.init_usage('test_project', 'gigabytes', -1, 0, until_refresh=1) ctxt = context.RequestContext('admin', 'test_project', is_admin=True) quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(ctxt, self.resources, quotas, deltas, self.expire, 5, 0) self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=2, reserved=2, until_refresh=5), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=5), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_until_refresh(self): self.init_usage('test_project', 'volumes', 3, 0, until_refresh=1) self.init_usage('test_project', 'gigabytes', 3, 0, until_refresh=1) ctxt = context.RequestContext('admin', 'test_project', is_admin=True) quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(ctxt, self.resources, quotas, deltas, self.expire, 5, 0) self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=2, reserved=2, until_refresh=5), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=5), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_until_refresh_enable(self): """Test that enabling until_refresh works.""" # Simulate service running with until_refresh disabled self.init_usage('test_project', 'volumes', 3, 0, until_refresh=None) self.init_usage('test_project', 'gigabytes', 100, 0, until_refresh=None) ctxt = context.RequestContext('admin', 'test_project', is_admin=True) quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) # Simulate service is now running with until_refresh set to 5 sqa_api.quota_reserve(ctxt, self.resources, quotas, deltas, self.expire, 5, 0) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=3, reserved=2, until_refresh=5), dict(resource='gigabytes', project_id='test_project', in_use=100, reserved=2 * 1024, until_refresh=5), ]) def test_quota_reserve_until_refresh_disable(self): """Test that disabling until_refresh works.""" # Simulate service running with until_refresh enabled and set to 5 self.init_usage('test_project', 'volumes', 3, 0, until_refresh=5) self.init_usage('test_project', 'gigabytes', 100, 0, until_refresh=5) ctxt = context.RequestContext('admin', 'test_project', is_admin=True) quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) # Simulate service is now running with until_refresh disabled sqa_api.quota_reserve(ctxt, self.resources, quotas, deltas, self.expire, None, 0) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=3, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=100, reserved=2 * 1024, until_refresh=None), ]) def test_quota_reserve_max_age(self): max_age = 3600 record_created = (timeutils.utcnow() - datetime.timedelta(seconds=max_age)) self.init_usage('test_project', 'volumes', 3, 0, created_at=record_created, updated_at=record_created) self.init_usage('test_project', 'gigabytes', 3, 0, created_at=record_created, updated_at=record_created) ctxt = context.RequestContext('admin', 'test_project', is_admin=True) quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(ctxt, self.resources, quotas, deltas, self.expire, 0, max_age) self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=2, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=2, reserved=2 * 1024, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_max_age_negative(self): max_age = 3600 record_created = (timeutils.utcnow() + datetime.timedelta(seconds=max_age)) self.init_usage('test_project', 'volumes', 3, 0, created_at=record_created, updated_at=record_created) self.init_usage('test_project', 'gigabytes', 3, 0, created_at=record_created, updated_at=record_created) ctxt = context.RequestContext('admin', 'test_project', is_admin=True) quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(ctxt, self.resources, quotas, deltas, self.expire, 0, max_age) self.assertEqual(set(), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=3, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=3, reserved=2 * 1024, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_no_refresh(self): self.init_usage('test_project', 'volumes', 3, 0) self.init_usage('test_project', 'gigabytes', 3, 0) ctxt = context.RequestContext('admin', 'test_project', is_admin=True) quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) result = sqa_api.quota_reserve(ctxt, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=3, reserved=2, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=3, reserved=2 * 1024, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=2 * 1024), ]) def test_quota_reserve_unders(self): self.init_usage('test_project', 'volumes', 1, 0) self.init_usage('test_project', 'gigabytes', 1 * 1024, 0) ctxt = context.RequestContext('admin', 'test_project', is_admin=True) quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=-2, gigabytes=-2 * 1024, ) result = sqa_api.quota_reserve(ctxt, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=1, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=1 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=-2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], delta=-2 * 1024), ]) def test_quota_reserve_overs(self): self.init_usage('test_project', 'volumes', 4, 0) self.init_usage('test_project', 'gigabytes', 10 * 1024, 0) ctxt = context.RequestContext('admin', 'test_project', is_admin=True) quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=2, gigabytes=2 * 1024, ) self.assertRaises(exception.OverQuota, sqa_api.quota_reserve, ctxt, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=4, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=10 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.assertEqual({}, self.reservations_created) def test_quota_reserve_reduction(self): self.init_usage('test_project', 'volumes', 10, 0) self.init_usage('test_project', 'gigabytes', 20 * 1024, 0) ctxt = context.RequestContext('admin', 'test_project', is_admin=True) quotas = dict(volumes=5, gigabytes=10 * 1024, ) deltas = dict(volumes=-2, gigabytes=-2 * 1024, ) result = sqa_api.quota_reserve(ctxt, self.resources, quotas, deltas, self.expire, 0, 0) self.assertEqual(set([]), self.sync_called) self.compare_usage(self.usages, [dict(resource='volumes', project_id='test_project', in_use=10, reserved=0, until_refresh=None), dict(resource='gigabytes', project_id='test_project', in_use=20 * 1024, reserved=0, until_refresh=None), ]) self.assertEqual({}, self.usages_created) self.compare_reservation(result, [dict(resource='volumes', usage_id=self.usages['volumes'], project_id='test_project', delta=-2), dict(resource='gigabytes', usage_id=self.usages['gigabytes'], project_id='test_project', delta=-2 * 1024), ]) @ddt.ddt class QuotaVolumeTypeReservationTestCase(test.TestCase): def setUp(self): super(QuotaVolumeTypeReservationTestCase, self).setUp() self.user_id = fake.USER_ID self.project_id = fake.PROJECT_ID self.context = context.RequestContext( self.user_id, self.project_id, is_admin=True, ) self.volume_type_name = CONF.default_volume_type self.volume_type = db.volume_type_get_by_name( self.context, name=self.volume_type_name, ) @mock.patch.object(quota.QUOTAS, 'reserve') @mock.patch.object(quota.QUOTAS, 'add_volume_type_opts') def test_volume_type_reservation( self, mock_add_volume_type_opts, mock_reserve, ): volume = fake_volume.fake_volume_obj( self.context, name='my_vol_name', id=fake.VOLUME_ID, size=1, project_id='vol_project_id', ) quota_utils.get_volume_type_reservation( self.context, volume, self.volume_type['id'], ) reserve_opts = {'volumes': 1, 'gigabytes': volume.size} mock_add_volume_type_opts.assert_called_once_with( self.context, reserve_opts, self.volume_type['id'], ) mock_reserve.assert_called_once_with( self.context, project_id='vol_project_id', gigabytes=1, volumes=1, ) @mock.patch.object(quota.QUOTAS, 'reserve') def test_volume_type_reservation_with_type_only(self, mock_reserve): volume = fake_volume.fake_volume_obj( self.context, name='my_vol_name', id=fake.VOLUME_ID, size=1, project_id='vol_project_id', ) quota_utils.get_volume_type_reservation( self.context, volume, self.volume_type['id'], reserve_vol_type_only=True, ) vtype_volume_quota = "%s_%s" % ('volumes', self.volume_type['name']) vtype_size_quota = "%s_%s" % ('gigabytes', self.volume_type['name']) reserve_opts = { vtype_volume_quota: 1, vtype_size_quota: volume.size, } mock_reserve.assert_called_once_with( self.context, project_id='vol_project_id', **reserve_opts, ) @ddt.data({'count_snaps': True, 'negative': True}, {'count_snaps': True, 'negative': False}, {'count_snaps': False, 'negative': True}, {'count_snaps': False, 'negative': False}) @ddt.unpack @mock.patch.object(quota.QUOTAS, 'reserve') def test_volume_type_reservation_snapshots_with_type_only(self, mock_reserve, count_snaps, negative): """Volume type reservations on volume with snapshots Test that when the volume has snapshots it takes them into account, and even calculates the quota correctly depending on no_snapshot_gb_quota configuration option. It should work for negative and positive quotas. """ self.override_config('no_snapshot_gb_quota', not count_snaps) snaps = [fake_snapshot.fake_db_snapshot(volume_size=1), fake_snapshot.fake_db_snapshot(volume_size=2)] volume = fake_volume.fake_volume_obj( self.context, expected_attrs=['snapshots'], name='my_vol_name', id=fake.VOLUME_ID, size=1, project_id=fake.PROJECT_ID, snapshots=snaps) quota_utils.get_volume_type_reservation( self.context, volume, self.volume_type['id'], reserve_vol_type_only=True, negative=negative) factor = -1 if negative else 1 if count_snaps: snaps_size = (snaps[0]['volume_size'] + snaps[1]['volume_size']) else: snaps_size = 0 vtype_volume_quota = "volumes_%s" % self.volume_type['name'] vtype_snapshot_quota = "snapshots_%s" % self.volume_type['name'] vtype_size_quota = "%s_%s" % ('gigabytes', self.volume_type['name']) reserve_opts = { vtype_volume_quota: factor * 1, vtype_snapshot_quota: factor * 2, vtype_size_quota: factor * (volume['size'] + snaps_size), } mock_reserve.assert_called_once_with( self.context, project_id=fake.PROJECT_ID, **reserve_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_quota_utils.py0000664000175000017500000001606000000000000022441 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from oslo_config import fixture as config_fixture from cinder.api import api_utils from cinder import context from cinder import exception from cinder import quota_utils from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test CONF = cfg.CONF class QuotaUtilsTest(test.TestCase): def setUp(self): super(QuotaUtilsTest, self).setUp() self.auth_url = 'http://localhost:5000' self.context = context.RequestContext('fake_user', 'fake_proj_id') self.fixture = self.useFixture(config_fixture.Config(CONF)) self.fixture.config(auth_url=self.auth_url, group='keystone_authtoken') @mock.patch('keystoneclient.client.Client') @mock.patch('keystoneauth1.session.Session') def test_keystone_client_instantiation(self, ksclient_session, ksclient_class): api_utils._keystone_client(self.context) ksclient_class.assert_called_once_with(auth_url=self.auth_url, session=ksclient_session(), version=(3, 0)) @mock.patch('keystoneclient.client.Client') @mock.patch('keystoneauth1.session.Session') @mock.patch('keystoneauth1.identity.Token') def test_keystone_client_instantiation_system_scope( self, ks_token, ksclient_session, ksclient_class): system_context = context.RequestContext( 'fake_user', 'fake_proj_id', system_scope='all') api_utils._keystone_client(system_context) ks_token.assert_called_once_with( auth_url=self.auth_url, token=system_context.auth_token, system_scope=system_context.system_scope) @mock.patch('keystoneclient.client.Client') @mock.patch('keystoneauth1.session.Session') @mock.patch('keystoneauth1.identity.Token') def test_keystone_client_instantiation_domain_scope( self, ks_token, ksclient_session, ksclient_class): domain_context = context.RequestContext( 'fake_user', 'fake_proj_id', domain_id='default') api_utils._keystone_client(domain_context) ks_token.assert_called_once_with( auth_url=self.auth_url, token=domain_context.auth_token, domain_id=domain_context.domain_id) @mock.patch('keystoneclient.client.Client') @mock.patch('keystoneauth1.session.Session') @mock.patch('keystoneauth1.identity.Token') def test_keystone_client_instantiation_project_scope( self, ks_token, ksclient_session, ksclient_class): project_context = context.RequestContext( 'fake_user', project_id=fake.PROJECT_ID) api_utils._keystone_client(project_context) ks_token.assert_called_once_with( auth_url=self.auth_url, token=project_context.auth_token, project_id=project_context.project_id) def _setup_mock_ksclient(self, mock_client, version='v3', subtree=None, parents=None): keystoneclient = mock_client.return_value keystoneclient.version = version proj = self.FakeProject(self.context.project_id) proj.subtree = subtree if parents: proj.parents = parents proj.parent_id = next(iter(parents.keys())) keystoneclient.projects.get.return_value = proj def _process_reserve_over_quota(self, overs, usages, quotas, expected_ex, resource='volumes'): ctxt = context.get_admin_context() ctxt.project_id = 'fake' size = 1 kwargs = {'overs': overs, 'usages': usages, 'quotas': quotas} exc = exception.OverQuota(**kwargs) self.assertRaises(expected_ex, quota_utils.process_reserve_over_quota, ctxt, exc, resource=resource, size=size) def test_volume_size_exceed_quota(self): overs = ['gigabytes'] usages = {'gigabytes': {'reserved': 1, 'in_use': 9}} quotas = {'gigabytes': 10, 'snapshots': 10} self._process_reserve_over_quota( overs, usages, quotas, exception.VolumeSizeExceedsAvailableQuota) def test_snapshot_limit_exceed_quota(self): overs = ['snapshots'] usages = {'snapshots': {'reserved': 1, 'in_use': 9}} quotas = {'gigabytes': 10, 'snapshots': 10} self._process_reserve_over_quota( overs, usages, quotas, exception.SnapshotLimitExceeded, resource='snapshots') def test_backup_gigabytes_exceed_quota(self): overs = ['backup_gigabytes'] usages = {'backup_gigabytes': {'reserved': 1, 'in_use': 9}} quotas = {'backup_gigabytes': 10} self._process_reserve_over_quota( overs, usages, quotas, exception.VolumeBackupSizeExceedsAvailableQuota, resource='backups') def test_backup_limit_quota(self): overs = ['backups'] usages = {'backups': {'reserved': 1, 'in_use': 9}} quotas = {'backups': 9} self._process_reserve_over_quota( overs, usages, quotas, exception.BackupLimitExceeded, resource='backups') def test_volumes_limit_quota(self): overs = ['volumes'] usages = {'volumes': {'reserved': 1, 'in_use': 9}} quotas = {'volumes': 9} self._process_reserve_over_quota( overs, usages, quotas, exception.VolumeLimitExceeded) def test_groups_limit_quota(self): overs = ['groups'] usages = {'groups': {'reserved': 1, 'in_use': 9}} quotas = {'groups': 9} self._process_reserve_over_quota( overs, usages, quotas, exception.GroupLimitExceeded, resource='groups') def test_unknown_quota(self): overs = ['unknown'] usages = {'volumes': {'reserved': 1, 'in_use': 9}} quotas = {'volumes': 9} self._process_reserve_over_quota( overs, usages, quotas, exception.UnexpectedOverQuota) def test_unknown_quota2(self): overs = ['volumes'] usages = {'volumes': {'reserved': 1, 'in_use': 9}} quotas = {'volumes': 9} self._process_reserve_over_quota( overs, usages, quotas, exception.UnexpectedOverQuota, resource='snapshots') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_rpc.py0000664000175000017500000001064700000000000020661 0ustar00zuulzuul00000000000000# Copyright 2015 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.objects import base from cinder import rpc from cinder.tests.unit import test class FakeAPI(rpc.RPCAPI): RPC_API_VERSION = '1.5' TOPIC = 'cinder-scheduler-topic' BINARY = 'cinder-scheduler' @ddt.ddt class RPCAPITestCase(test.TestCase): """Tests RPCAPI mixin aggregating stuff related to RPC compatibility.""" def setUp(self): super(RPCAPITestCase, self).setUp() # Reset cached version pins rpc.LAST_RPC_VERSIONS = {} rpc.LAST_OBJ_VERSIONS = {} @mock.patch('cinder.objects.Service.get_minimum_rpc_version', return_value='1.2') @mock.patch('cinder.objects.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.get_client') def test_init(self, get_client, get_min_obj, get_min_rpc): def fake_get_client(target, version_cap, serializer): self.assertEqual(FakeAPI.TOPIC, target.topic) self.assertEqual(FakeAPI.RPC_API_VERSION, target.version) self.assertEqual('1.2', version_cap) self.assertEqual(self.latest_ovo_version, serializer.version_cap) get_min_obj.return_value = self.latest_ovo_version get_client.side_effect = fake_get_client FakeAPI() @mock.patch('cinder.objects.Service.get_minimum_rpc_version', return_value=None) @mock.patch('cinder.objects.Service.get_minimum_obj_version', return_value=None) @mock.patch('cinder.objects.base.CinderObjectSerializer') @mock.patch('cinder.rpc.get_client') def test_init_none_caps(self, get_client, serializer, get_min_obj, get_min_rpc): """Test that with no service latest versions are selected.""" FakeAPI() serializer.assert_called_once_with(base.OBJ_VERSIONS.get_current()) get_client.assert_called_once_with(mock.ANY, version_cap=FakeAPI.RPC_API_VERSION, serializer=serializer.return_value) self.assertTrue(get_min_obj.called) self.assertTrue(get_min_rpc.called) @mock.patch('cinder.objects.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.get_client') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-scheduler': '1.4'}) def test_init_cached_caps(self, get_client, get_min_obj, get_min_rpc): def fake_get_client(target, version_cap, serializer): self.assertEqual(FakeAPI.TOPIC, target.topic) self.assertEqual(FakeAPI.RPC_API_VERSION, target.version) self.assertEqual('1.4', version_cap) self.assertEqual(self.latest_ovo_version, serializer.version_cap) get_client.side_effect = fake_get_client with mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-scheduler': self.latest_ovo_version}): FakeAPI() get_min_obj.assert_not_called() get_min_rpc.assert_not_called() @ddt.data([], ['noop'], ['noop', 'noop']) @mock.patch('oslo_messaging.JsonPayloadSerializer', wraps=True) def test_init_no_notifications(self, driver, serializer_mock): """Test short-circuiting notifications with default and noop driver.""" self.override_config('driver', driver, group='oslo_messaging_notifications') rpc.init(test.CONF) self.assertEqual(rpc.utils.DO_NOTHING, rpc.NOTIFIER) serializer_mock.assert_not_called() @mock.patch.object(rpc, 'messaging') def test_init_notifications(self, messaging_mock): rpc.init(test.CONF) self.assertTrue(messaging_mock.JsonPayloadSerializer.called) self.assertTrue(messaging_mock.Notifier.called) self.assertEqual(rpc.NOTIFIER, messaging_mock.Notifier.return_value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_service.py0000664000175000017500000006312000000000000021527 0ustar00zuulzuul00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for remote procedure calls using queue.""" from unittest import mock import ddt from oslo_concurrency import processutils from oslo_config import cfg from oslo_db import exception as db_exc from cinder.common import constants from cinder import context from cinder import db from cinder import exception from cinder import manager from cinder import objects from cinder.objects import fields from cinder import rpc from cinder import service from cinder.tests.unit import test test_service_opts = [ cfg.StrOpt("fake_manager", default="cinder.tests.unit.test_service.FakeManager", help="Manager for testing"), cfg.StrOpt("test_service_listen", help="Host to bind test service to"), cfg.IntOpt("test_service_listen_port", default=0, help="Port number to bind test service to"), ] CONF = cfg.CONF CONF.register_opts(test_service_opts) class FakeManager(manager.Manager): """Fake manager for tests.""" def __init__(self, host=None, service_name=None, cluster=None): super().__init__(host=host, cluster=cluster) def test_method(self): return 'manager' class ExtendedService(service.Service): def test_method(self): return 'service' class ServiceManagerTestCase(test.TestCase): """Test cases for Services.""" def test_message_gets_to_manager(self): serv = service.Service('test', 'test', 'test', 'cinder.tests.unit.test_service.FakeManager') serv.start() self.assertEqual('manager', serv.test_method()) def test_override_manager_method(self): serv = ExtendedService('test', 'test', 'test', 'cinder.tests.unit.test_service.FakeManager') serv.start() self.assertEqual('service', serv.test_method()) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'test': '1.5'}) @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'test': '1.3'}) def test_reset(self): serv = service.Service('test', 'test', 'test', 'cinder.tests.unit.test_service.FakeManager') serv.start() serv.reset() self.assertEqual({}, rpc.LAST_OBJ_VERSIONS) self.assertEqual({}, rpc.LAST_RPC_VERSIONS) def test_start_refresh_serivce_id(self): serv = service.Service('test', 'test', 'test', 'cinder.tests.unit.test_service.FakeManager') # records the original service id serv_id = serv.service_id self.assertEqual(serv.origin_service_id, service.Service.service_id) # update service id to other value service.Service.service_id = serv_id + 1 # make sure the class attr service_id have been changed self.assertNotEqual(serv.origin_service_id, service.Service.service_id) # call start method serv.start() # After start, the service id is refreshed to original service_id self.assertEqual(serv_id, service.Service.service_id) class ServiceFlagsTestCase(test.TestCase): def test_service_enabled_on_create_based_on_flag(self): ctxt = context.get_admin_context() self.flags(enable_new_services=True) host = 'foo' binary = 'cinder-fake' cluster = 'cluster' app = service.Service.create(host=host, binary=binary, cluster=cluster) ref = db.service_get(ctxt, app.service_id) db.service_destroy(ctxt, app.service_id) self.assertFalse(ref.disabled) # Check that the cluster is also enabled db_cluster = objects.ClusterList.get_all(ctxt)[0] self.assertFalse(db_cluster.disabled) db.cluster_destroy(ctxt, db_cluster.id) def test_service_disabled_on_create_based_on_flag(self): ctxt = context.get_admin_context() self.flags(enable_new_services=False) host = 'foo' binary = 'cinder-fake' cluster = 'cluster' app = service.Service.create(host=host, binary=binary, cluster=cluster) ref = db.service_get(ctxt, app.service_id) db.service_destroy(ctxt, app.service_id) self.assertTrue(ref.disabled) # Check that the cluster is also enabled db_cluster = objects.ClusterList.get_all(ctxt)[0] self.assertTrue(db_cluster.disabled) db.cluster_destroy(ctxt, db_cluster.id) @ddt.ddt class ServiceTestCase(test.TestCase): """Test cases for Services.""" def setUp(self): super(ServiceTestCase, self).setUp() self.host = 'foo' self.binary = 'cinder-fake' self.topic = 'fake' self.service_ref = {'host': self.host, 'binary': self.binary, 'topic': self.topic, 'report_count': 0, 'availability_zone': 'nova', 'id': 1, 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'} self.ctxt = context.get_admin_context() def _check_app(self, app, cluster=None, cluster_exists=None, svc_id=None, added_to_cluster=True): """Check that Service instance and DB service and cluster are ok.""" self.assertIsNotNone(app) # Check that we have the service ID self.assertTrue(hasattr(app, 'service_id')) if svc_id: self.assertEqual(svc_id, app.service_id) # Check that cluster has been properly set self.assertEqual(cluster, app.cluster) # Check that the entry has been really created in the DB svc = objects.Service.get_by_id(self.ctxt, app.service_id) cluster_name = cluster if cluster_exists is not False else None # Check that cluster name matches self.assertEqual(cluster_name, svc.cluster_name) clusters = objects.ClusterList.get_all(self.ctxt) if cluster_name: # Make sure we have created the cluster in the DB self.assertEqual(1, len(clusters)) cluster = clusters[0] self.assertEqual(cluster_name, cluster.name) self.assertEqual(self.binary, cluster.binary) else: # Make sure we haven't created any cluster in the DB self.assertListEqual([], clusters.objects) self.assertEqual(added_to_cluster, app.added_to_cluster) def test_create_with_cluster_not_upgrading(self): """Test DB cluster creation when service is created.""" cluster_name = 'cluster' app = service.Service.create(host=self.host, binary=self.binary, cluster=cluster_name, topic=self.topic) self._check_app(app, cluster_name) def test_create_svc_exists_upgrade_cluster(self): """Test that we update cluster_name field when cfg has changed.""" # Create the service in the DB db_svc = db.service_create(context.get_admin_context(), {'host': self.host, 'binary': self.binary, 'topic': self.topic, 'cluster_name': None}) cluster_name = 'cluster' app = service.Service.create(host=self.host, binary=self.binary, cluster=cluster_name, topic=self.topic) self._check_app(app, cluster_name, svc_id=db_svc.id, added_to_cluster=cluster_name) @mock.patch.object(objects.service.Service, 'get_by_args') @mock.patch.object(objects.service.Service, 'get_by_id') def test_report_state_newly_disconnected(self, get_by_id, get_by_args): get_by_args.side_effect = exception.NotFound() get_by_id.side_effect = db_exc.DBConnectionError() with mock.patch.object(objects.service, 'db') as mock_db: mock_db.service_create.return_value = self.service_ref serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager' ) serv.start() serv.report_state() self.assertTrue(serv.model_disconnected) self.assertFalse(mock_db.service_update.called) @mock.patch.object(objects.service.Service, 'get_by_args') @mock.patch.object(objects.service.Service, 'get_by_id') def test_report_state_disconnected_DBError(self, get_by_id, get_by_args): get_by_args.side_effect = exception.NotFound() get_by_id.side_effect = db_exc.DBError() with mock.patch.object(objects.service, 'db') as mock_db: mock_db.service_create.return_value = self.service_ref serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager' ) serv.start() serv.report_state() self.assertTrue(serv.model_disconnected) self.assertFalse(mock_db.service_update.called) @mock.patch('cinder.db.sqlalchemy.api.service_update') @mock.patch('cinder.db.sqlalchemy.api.service_get') def test_report_state_newly_connected(self, get_by_id, service_update): get_by_id.return_value = self.service_ref serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager' ) serv.start() serv.model_disconnected = True serv.report_state() self.assertFalse(serv.model_disconnected) self.assertTrue(service_update.called) def test_report_state_manager_not_working(self): with mock.patch('cinder.db') as mock_db: mock_db.service_get.return_value = self.service_ref serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager' ) serv.manager.is_working = mock.Mock(return_value=False) serv.start() serv.report_state() serv.manager.is_working.assert_called_once_with() self.assertFalse(mock_db.service_update.called) def test_service_with_long_report_interval(self): self.override_config('service_down_time', 10) self.override_config('report_interval', 10) service.Service.create( binary="test_service", manager="cinder.tests.unit.test_service.FakeManager") self.assertEqual(25, CONF.service_down_time) @mock.patch.object(rpc, 'get_server') @mock.patch('cinder.db') def test_service_stop_waits_for_rpcserver(self, mock_db, mock_rpc): serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager' ) serv.start() serv.stop() serv.wait() serv.rpcserver.start.assert_called_once_with() serv.rpcserver.stop.assert_called_once_with() serv.rpcserver.wait.assert_called_once_with() @mock.patch('cinder.service.Service.report_state') @mock.patch('cinder.service.Service.periodic_tasks') @mock.patch.object(rpc, 'get_server') @mock.patch('cinder.db') def test_service_stop_wait(self, mock_db, mock_rpc, mock_periodic, mock_report): """Test that we wait for loopcalls only if stop succeeds.""" serv = service.Service( self.host, self.binary, self.topic, 'cinder.tests.unit.test_service.FakeManager', report_interval=5, periodic_interval=10, ) serv.start() serv.stop() serv.wait() serv.rpcserver.start.assert_called_once_with() serv.rpcserver.stop.assert_called_once_with() serv.rpcserver.wait.assert_called_once_with() @mock.patch('cinder.manager.Manager.init_host') @mock.patch('oslo_messaging.Target') @mock.patch.object(rpc, 'get_server') def _check_rpc_servers_and_init_host(self, app, added_to_cluster, cluster, rpc_mock, target_mock, init_host_mock): app.start() # Since we have created the service entry we call init_host with # added_to_cluster=True init_host_mock.assert_called_once_with( added_to_cluster=added_to_cluster, service_id=self.service_ref['id']) expected_target_calls = [mock.call(topic=self.topic, server=self.host)] expected_rpc_calls = [mock.call(target_mock.return_value, mock.ANY, mock.ANY), mock.call().start()] if cluster and added_to_cluster: self.assertIsNotNone(app.cluster_rpcserver) expected_target_calls.append(mock.call( topic=self.topic + '.' + cluster, server=cluster.split('@')[0])) expected_rpc_calls.extend(expected_rpc_calls[:]) # Check that we create message targets for host and cluster target_mock.assert_has_calls(expected_target_calls) # Check we get and start rpc services for host and cluster rpc_mock.assert_has_calls(expected_rpc_calls) self.assertIsNotNone(app.rpcserver) app.stop() @mock.patch('cinder.objects.Service.get_minimum_obj_version') def test_start_rpc_and_init_host_cluster(self, get_min_obj_mock): """Test that with cluster we create the rpc service.""" # cluster was introduced in 1.7, so latest will be enough to test this get_min_obj_mock.return_value = self.latest_ovo_version cluster = 'cluster@backend#pool' self.host = 'host@backend#pool' app = service.Service.create(host=self.host, binary=constants.VOLUME_BINARY, cluster=cluster, topic=self.topic) self._check_rpc_servers_and_init_host(app, True, cluster) @mock.patch('cinder.objects.Cluster.get_by_id') def test_ensure_cluster_exists_no_cluster(self, get_mock): app = service.Service.create(host=self.host, binary=self.binary, topic=self.topic) svc = objects.Service.get_by_id(self.ctxt, app.service_id) app._ensure_cluster_exists(self.ctxt, svc) get_mock.assert_not_called() self.assertEqual({}, svc.cinder_obj_get_changes()) @mock.patch('cinder.objects.Cluster.get_by_id') def test_ensure_cluster_exists_cluster_exists_non_relicated(self, get_mock): cluster = objects.Cluster( name='cluster_name', active_backend_id=None, frozen=False, replication_status=fields.ReplicationStatus.NOT_CAPABLE) get_mock.return_value = cluster app = service.Service.create(host=self.host, binary=self.binary, topic=self.topic) svc = objects.Service.get_by_id(self.ctxt, app.service_id) app.cluster = cluster.name app._ensure_cluster_exists(self.ctxt, svc) get_mock.assert_called_once_with(self.ctxt, None, name=cluster.name, binary=app.binary) self.assertEqual({}, svc.cinder_obj_get_changes()) @mock.patch('cinder.objects.Cluster.get_by_id') def test_ensure_cluster_exists_cluster_change(self, get_mock): """We copy replication fields from the cluster to the service.""" changes = dict(replication_status=fields.ReplicationStatus.FAILED_OVER, active_backend_id='secondary', frozen=True) cluster = objects.Cluster(name='cluster_name', **changes) get_mock.return_value = cluster app = service.Service.create(host=self.host, binary=self.binary, topic=self.topic) svc = objects.Service.get_by_id(self.ctxt, app.service_id) app.cluster = cluster.name app._ensure_cluster_exists(self.ctxt, svc) get_mock.assert_called_once_with(self.ctxt, None, name=cluster.name, binary=app.binary) self.assertEqual(changes, svc.cinder_obj_get_changes()) @mock.patch('cinder.objects.Cluster.get_by_id') def test_ensure_cluster_exists_cluster_no_change(self, get_mock): """Don't copy replication fields from cluster if replication error.""" changes = dict(replication_status=fields.ReplicationStatus.FAILED_OVER, active_backend_id='secondary', frozen=True) cluster = objects.Cluster(name='cluster_name', **changes) get_mock.return_value = cluster app = service.Service.create(host=self.host, binary=self.binary, topic=self.topic) svc = objects.Service.get_by_id(self.ctxt, app.service_id) svc.replication_status = fields.ReplicationStatus.ERROR svc.obj_reset_changes() app.cluster = cluster.name app._ensure_cluster_exists(self.ctxt, svc) get_mock.assert_called_once_with(self.ctxt, None, name=cluster.name, binary=app.binary) self.assertEqual({}, svc.cinder_obj_get_changes()) def test_ensure_cluster_exists_cluster_create_replicated_and_non(self): """We use service replication fields to create the cluster.""" changes = dict(replication_status=fields.ReplicationStatus.FAILED_OVER, active_backend_id='secondary', frozen=True) app = service.Service.create(host=self.host, binary=self.binary, topic=self.topic) svc = objects.Service.get_by_id(self.ctxt, app.service_id) for key, value in changes.items(): setattr(svc, key, value) app.cluster = 'cluster_name' app._ensure_cluster_exists(self.ctxt, svc) cluster = objects.Cluster.get_by_id(self.ctxt, None, name=app.cluster) for key, value in changes.items(): self.assertEqual(value, getattr(cluster, key)) class TestWSGIService(test.TestCase): @mock.patch('oslo_service.wsgi.Loader') def test_service_random_port(self, mock_loader): test_service = service.WSGIService("test_service") self.assertEqual(0, test_service.port) test_service.start() self.assertNotEqual(0, test_service.port) test_service.stop() self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Loader') def test_reset_pool_size_to_default(self, mock_loader): test_service = service.WSGIService("test_service") test_service.start() # Stopping the service, which in turn sets pool size to 0 test_service.stop() self.assertEqual(0, test_service.server._pool.size) # Resetting pool size to default test_service.reset() test_service.start() self.assertEqual(cfg.CONF.wsgi_default_pool_size, test_service.server._pool.size) self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Loader') def test_workers_set_default(self, mock_loader): self.override_config('osapi_volume_listen_port', CONF.test_service_listen_port) test_service = service.WSGIService("osapi_volume") self.assertEqual(processutils.get_worker_count(), test_service.workers) self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Loader') def test_workers_set_good_user_setting(self, mock_loader): self.override_config('osapi_volume_listen_port', CONF.test_service_listen_port) self.override_config('osapi_volume_workers', 8) test_service = service.WSGIService("osapi_volume") self.assertEqual(8, test_service.workers) self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Loader') def test_workers_set_zero_user_setting(self, mock_loader): self.override_config('osapi_volume_listen_port', CONF.test_service_listen_port) self.override_config('osapi_volume_workers', 0) test_service = service.WSGIService("osapi_volume") # If a value less than 1 is used, defaults to number of procs # available self.assertEqual(processutils.get_worker_count(), test_service.workers) self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Loader') def test_workers_set_negative_user_setting(self, mock_loader): self.override_config('osapi_volume_workers', -1) self.assertRaises(exception.InvalidConfigurationValue, service.WSGIService, "osapi_volume") self.assertTrue(mock_loader.called) @mock.patch('oslo_service.wsgi.Server') @mock.patch('oslo_service.wsgi.Loader') def test_ssl_enabled(self, mock_loader, mock_server): self.override_config('osapi_volume_use_ssl', True) service.WSGIService("osapi_volume") mock_server.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, port=mock.ANY, host=mock.ANY, use_ssl=True) self.assertTrue(mock_loader.called) class OSCompatibilityTestCase(test.TestCase): def _test_service_launcher(self, fake_os): # Note(lpetrut): The cinder-volume service needs to be spawned # differently on Windows due to an eventlet bug. For this reason, # we must check the process launcher used. fake_process_launcher = mock.MagicMock() with mock.patch('os.name', fake_os): with mock.patch('cinder.service.process_launcher', fake_process_launcher): launcher = service.get_launcher() if fake_os == 'nt': self.assertEqual(service.Launcher, type(launcher)) else: self.assertEqual(fake_process_launcher(), launcher) def test_process_launcher_on_windows(self): self._test_service_launcher('nt') def test_process_launcher_on_linux(self): self._test_service_launcher('posix') class WindowsProcessLauncherTestCase(test.TestCase): @mock.patch.object(service, 'os_win_utilsfactory', create=True) @mock.patch('oslo_service.service.SignalHandler') def setUp(self, mock_signal_handler_cls, mock_utilsfactory): super(WindowsProcessLauncherTestCase, self).setUp() self._signal_handler = mock_signal_handler_cls.return_value self._processutils = mock_utilsfactory.get_processutils.return_value self._launcher = service.WindowsProcessLauncher() def test_setup_signal_handlers(self): exp_signal_map = {'SIGINT': self._launcher._terminate, 'SIGTERM': self._launcher._terminate} self._signal_handler.add_handler.assert_has_calls( [mock.call(signal, handler) for signal, handler in exp_signal_map.items()], any_order=True) @mock.patch('sys.exit') def test_terminate_handler(self, mock_exit): self._launcher._terminate(mock.sentinel.signum, mock.sentinel.frame) mock_exit.assert_called_once_with(1) @mock.patch('subprocess.Popen') def test_launch(self, mock_popen): mock_workers = [mock.Mock(), mock.Mock(), mock.Mock()] mock_popen.side_effect = mock_workers self._processutils.kill_process_on_job_close.side_effect = [ exception.CinderException, None, None] # We expect the first process to be cleaned up after failing # to setup a job object. self.assertRaises(exception.CinderException, self._launcher.add_process, mock.sentinel.cmd1) mock_workers[0].kill.assert_called_once_with() self._launcher.add_process(mock.sentinel.cmd2) self._launcher.add_process(mock.sentinel.cmd3) mock_popen.assert_has_calls( [mock.call(cmd) for cmd in [mock.sentinel.cmd1, mock.sentinel.cmd2, mock.sentinel.cmd3]]) self._processutils.kill_process_on_job_close.assert_has_calls( [mock.call(worker.pid) for worker in mock_workers[1:]]) self._launcher.wait() wait_processes = self._processutils.wait_for_multiple_processes wait_processes.assert_called_once_with( [worker.pid for worker in mock_workers[1:]], wait_all=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_service_auth.py0000664000175000017500000000556700000000000022563 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from keystoneauth1.identity.generic import password from keystoneauth1 import loading as ks_loading from keystoneauth1 import service_token from cinder import context from cinder import exception from cinder import service_auth from cinder.tests.unit import test class ServiceAuthTestCase(test.TestCase): def setUp(self): super(ServiceAuthTestCase, self).setUp() self.ctx = context.RequestContext('fake', 'fake') service_auth.reset_globals() @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_no_wraps(self, mock_load): context = mock.MagicMock() context.get_auth_plugin.return_value = "fake" result = service_auth.get_auth_plugin(context) self.assertEqual("fake", result) mock_load.assert_not_called() @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_wraps(self, mock_load): self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) mock_load.assert_called_once_with(mock.ANY, group='service_user') def test_service_auth_requested_but_no_auth_given(self): self.flags(send_service_user_token=True, group='service_user') self.assertRaises(exception.ServiceUserTokenNoAuth, service_auth.get_auth_plugin, self.ctx) @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_with_auth(self, mock_load): self.flags(send_service_user_token=True, group='service_user') mock_load.return_value = password.Password result = service_auth.get_auth_plugin( self.ctx, auth=mock_load.return_value) self.assertEqual(mock_load.return_value, result.user_auth) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) mock_load.assert_called_once_with(mock.ANY, group='service_user') def test_get_auth_plugin_with_auth_and_service_token_false(self): self.flags(send_service_user_token=False, group='service_user') n_auth = password.Password result = service_auth.get_auth_plugin(self.ctx, auth=n_auth) self.assertEqual(n_auth, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_setup_profiler.py0000664000175000017500000000416200000000000023132 0ustar00zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.common import constants from cinder import service from cinder.tests.unit import test class SetupProfilerTestCase(test.TestCase): def setUp(self): super(SetupProfilerTestCase, self).setUp() service.osprofiler_initializer = mock.MagicMock() service.profiler = mock.MagicMock() service.profiler_opts = mock.MagicMock() service.osprofiler_initializer.init_from_conf = mock.MagicMock() def test_profiler_not_present(self): service.profiler = None service.LOG.debug = mock.MagicMock() service.setup_profiler(constants.VOLUME_BINARY, "localhost") service.LOG.debug.assert_called_once_with("osprofiler is not present") @mock.patch("cinder.service.context") def test_profiler_enabled(self, context): service.CONF.profiler.enabled = True return_value = {"Meaning Of Life": 42} context.get_admin_context().to_dict.return_value = return_value service.setup_profiler(constants.VOLUME_BINARY, "localhost") service.osprofiler_initializer.init_from_conf.assert_called_once_with( conf=service.CONF, context=return_value, project="cinder", service=constants.VOLUME_BINARY, host="localhost") def test_profiler_disabled(self): service.CONF.profiler.enabled = False service.setup_profiler(constants.VOLUME_BINARY, "localhost") service.osprofiler_initializer.init_from_conf.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_ssh_utils.py0000664000175000017500000003715600000000000022116 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import paramiko from cinder import exception from cinder import ssh_utils from cinder.tests.unit import test class FakeSock(object): def settimeout(self, timeout): pass class FakeTransport(object): def __init__(self): self.active = True self.sock = FakeSock() def set_keepalive(self, timeout): pass def is_active(self): return self.active class FakeSSHClient(object): def __init__(self): self.id = uuid.uuid4() self.transport = FakeTransport() def set_missing_host_key_policy(self, policy): self.policy = policy def load_system_host_keys(self): self.system_host_keys = 'system_host_keys' def load_host_keys(self, hosts_key_file): self.hosts_key_file = hosts_key_file def connect(self, ip, port=22, username=None, password=None, pkey=None, timeout=10): pass def get_transport(self): return self.transport def get_policy(self): return self.policy def get_host_keys(self): return '127.0.0.1 ssh-rsa deadbeef' def close(self): pass def __call__(self, *args, **kwargs): pass class SSHPoolTestCase(test.TestCase): """Unit test for SSH Connection Pool.""" @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_sshpool_remove(self, mock_isfile, mock_sshclient): ssh_to_remove = mock.MagicMock() mock_sshclient.side_effect = [mock.MagicMock(), ssh_to_remove, mock.MagicMock()] self.override_config('ssh_hosts_key_file', 'dummy') sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=3, max_size=3) self.assertIn(ssh_to_remove, list(sshpool.free_items)) sshpool.remove(ssh_to_remove) self.assertNotIn(ssh_to_remove, list(sshpool.free_items)) @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_sshpool_remove_object_not_in_pool(self, mock_isfile, mock_sshclient): # create an SSH Client that is not a part of sshpool. ssh_to_remove = mock.MagicMock() mock_sshclient.side_effect = [mock.MagicMock(), mock.MagicMock()] self.override_config('ssh_hosts_key_file', 'dummy') sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=2, max_size=2) listBefore = list(sshpool.free_items) self.assertNotIn(ssh_to_remove, listBefore) sshpool.remove(ssh_to_remove) self.assertEqual(listBefore, list(sshpool.free_items)) @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_ssh_default_hosts_key_file(self, mock_isfile, mock_sshclient): mock_ssh = mock.MagicMock() mock_sshclient.return_value = mock_ssh self.override_config('ssh_hosts_key_file', '/var/lib/cinder/ssh_known_hosts') # create with customized setting sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) host_key_files = sshpool.hosts_key_file self.assertEqual('/var/lib/cinder/ssh_known_hosts', host_key_files) mock_ssh.load_host_keys.assert_called_once_with( '/var/lib/cinder/ssh_known_hosts') @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_ssh_host_key_file_kwargs(self, mock_isfile, mock_sshclient): mock_ssh = mock.MagicMock() mock_sshclient.return_value = mock_ssh self.override_config('ssh_hosts_key_file', '/var/lib/cinder/ssh_known_hosts') # create with customized setting sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1, hosts_key_file='dummy_host_keyfile') host_key_files = sshpool.hosts_key_file self.assertIn('dummy_host_keyfile', host_key_files) self.assertIn('/var/lib/cinder/ssh_known_hosts', host_key_files) expected = [ mock.call.load_host_keys('dummy_host_keyfile'), mock.call.load_host_keys('/var/lib/cinder/ssh_known_hosts')] mock_ssh.assert_has_calls(expected, any_order=True) @mock.patch('os.path.isfile', return_value=True) @mock.patch('paramiko.RSAKey.from_private_key_file') @mock.patch('paramiko.SSHClient') def test_single_ssh_connect(self, mock_sshclient, mock_pkey, mock_isfile): self.override_config( 'ssh_hosts_key_file', '/var/lib/cinder/ssh_known_hosts') # create with password sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with sshpool.item() as ssh: first_id = ssh.id with sshpool.item() as ssh: second_id = ssh.id self.assertEqual(first_id, second_id) self.assertEqual(1, mock_sshclient.return_value.connect.call_count) # create with private key sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", privatekey="test", min_size=1, max_size=1) self.assertEqual(2, mock_sshclient.return_value.connect.call_count) # attempt to create with no password or private key self.assertRaises(paramiko.SSHException, ssh_utils.SSHPool, "127.0.0.1", 22, 10, "test", min_size=1, max_size=1) @mock.patch('os.path.isfile', return_value=True) @mock.patch('paramiko.SSHClient') def test_closed_reopened_ssh_connections(self, mock_sshclient, mock_open): mock_sshclient.return_value = FakeSSHClient() sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=4) with sshpool.item() as ssh: mock_sshclient.reset_mock() first_id = ssh.id with sshpool.item() as ssh: second_id = ssh.id ssh.get_transport().active = False sshpool.remove(ssh) self.assertEqual(first_id, second_id) # create a new client mock_sshclient.return_value = FakeSSHClient() with sshpool.item() as ssh: third_id = ssh.id self.assertNotEqual(first_id, third_id) @mock.patch('paramiko.SSHClient') def test_missing_ssh_hosts_key_config(self, mock_sshclient): mock_sshclient.return_value = FakeSSHClient() self.override_config('ssh_hosts_key_file', None) # create with password self.assertRaises(exception.ParameterNotFound, ssh_utils.SSHPool, "127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) @mock.patch('cinder.ssh_utils.open', new_callable=mock.mock_open) @mock.patch('paramiko.SSHClient') def test_create_default_known_hosts_file(self, mock_sshclient, mock_open): mock_sshclient.return_value = FakeSSHClient() self.flags(state_path='/var/lib/cinder', ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts') default_file = '/var/lib/cinder/ssh_known_hosts' ssh_pool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with ssh_pool.item() as ssh: mock_open.assert_called_once_with(default_file, 'a') ssh_pool.remove(ssh) @mock.patch('os.path.isfile', return_value=False) @mock.patch('paramiko.SSHClient') def test_ssh_missing_hosts_key_file(self, mock_sshclient, mock_isfile): mock_sshclient.return_value = FakeSSHClient() self.flags(state_path='/var/lib/cinder', ssh_hosts_key_file='/tmp/blah') self.assertRaises(exception.InvalidInput, ssh_utils.SSHPool, "127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_ssh_strict_host_key_policy(self, mock_isfile, mock_sshclient): mock_sshclient.return_value = FakeSSHClient() self.flags(strict_ssh_host_key_policy=True, ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts') # create with customized setting sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with sshpool.item() as ssh: self.assertIsInstance(ssh.get_policy(), paramiko.RejectPolicy) @mock.patch('paramiko.SSHClient') @mock.patch('os.path.isfile', return_value=True) def test_ssh_not_strict_host_key_policy(self, mock_isfile, mock_sshclient): mock_sshclient.return_value = FakeSSHClient() self.override_config('strict_ssh_host_key_policy', False) # create with customized setting sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) with sshpool.item() as ssh: self.assertIsInstance(ssh.get_policy(), paramiko.AutoAddPolicy) @mock.patch('paramiko.SSHClient') @mock.patch('cinder.ssh_utils.open', new_callable=mock.mock_open) @mock.patch('os.path.isfile', return_value=False) def test_ssh_timeout(self, mock_isfile, mock_open, mock_sshclient): self.flags(state_path='/var/lib/cinder', ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts') default_file = '/var/lib/cinder/ssh_known_hosts' sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=1, max_size=1) mock_open.assert_called_once_with(default_file, 'a') self.assertEqual(1, sshpool.current_size) conn = sshpool.get() conn.connect = mock.MagicMock() # create failed due to time out conn.connect.side_effect = paramiko.SSHException("time out") mock_transport = mock.MagicMock() conn.get_transport.return_value = mock_transport # connection is down mock_transport.is_active.return_value = False sshpool.put(conn) self.assertRaises(paramiko.SSHException, sshpool.get) self.assertEqual(0, sshpool.current_size) @mock.patch('os.path.isfile', return_value=True) @mock.patch('paramiko.RSAKey.from_private_key_file') @mock.patch('paramiko.SSHClient') def test_ssh_put(self, mock_sshclient, mock_pkey, mock_isfile): self.override_config( 'ssh_hosts_key_file', '/var/lib/cinder/ssh_known_hosts') fake_close = mock.MagicMock() fake = FakeSSHClient() fake.close = fake_close mock_sshclient.return_value = fake sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=5, max_size=5) self.assertEqual(5, sshpool.current_size) with sshpool.item(): pass self.assertEqual(5, sshpool.current_size) sshpool.resize(4) with sshpool.item(): pass self.assertEqual(4, sshpool.current_size) fake_close.assert_called_once_with() fake_close.reset_mock() sshpool.resize(3) with sshpool.item(): pass self.assertEqual(3, sshpool.current_size) fake_close.assert_called_once_with() @mock.patch('os.path.isfile', return_value=True) @mock.patch('paramiko.RSAKey.from_private_key_file') @mock.patch('paramiko.SSHClient') def test_ssh_destructor(self, mock_sshclient, mock_pkey, mock_isfile): self.override_config( 'ssh_hosts_key_file', '/var/lib/cinder/ssh_known_hosts') fake_close = mock.MagicMock() fake = FakeSSHClient() fake.close = fake_close mock_sshclient.return_value = fake # create with password sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=5, max_size=5) self.assertEqual(5, sshpool.current_size) close_expect_calls = [mock.call(), mock.call(), mock.call(), mock.call(), mock.call()] sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10, "test", password="test", min_size=5, max_size=5) self.assertEqual(fake_close.mock_calls, close_expect_calls) sshpool = None self.assertEqual(fake_close.mock_calls, close_expect_calls + close_expect_calls) @mock.patch('cinder.ssh_utils.paramiko', new=None) def test_missing_paramiko(self): self.assertRaises(exception.RequirementMissing, ssh_utils.SSHPool, '192.0.2.1', 22, 10, 'test', password='hello') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_test.py0000664000175000017500000000542100000000000021046 0ustar00zuulzuul00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the testing base code.""" from unittest import mock from oslo_config import cfg import oslo_messaging as messaging from cinder import rpc from cinder.tests.unit import test class IsolationTestCase(test.TestCase): """Ensure that things are cleaned up after failed tests. These tests don't really do much here, but if isolation fails a bunch of other tests should fail. """ def test_service_isolation(self): self.start_service('volume') def test_rpc_consumer_isolation(self): class NeverCalled(object): def __getattribute__(self, name): if name == 'target' or name == 'oslo_rpc_server_ping': # oslo.messaging 5.31.0 explicitly looks for 'target' # on the endpoint and checks its type, so we can't avoid # it here, just ignore it if that's the case. return self.fail(msg="I should never get called. name: %s" % name) server = rpc.get_server(messaging.Target(topic='volume', server=cfg.CONF.host), endpoints=[NeverCalled()]) server.start() class MockAssertTestCase(test.TestCase): """Ensure that valid mock assert methods are used.""" def test_assert_has_calls(self): mock_call = mock.MagicMock(return_value=None) mock_call(1) mock_call(2) mock_call.assert_has_calls([mock.call(1), mock.call(2)]) def test_assert_any_call(self): mock_call = mock.MagicMock(return_value=None) mock_call(1) mock_call(2) mock_call(3) mock_call.assert_any_call(1) def test_assert_called_with(self): mock_call = mock.MagicMock(return_value=None) mock_call(1, 'foo', a='123') mock_call.assert_called_with(1, 'foo', a='123') def test_assert_called_once_with(self): mock_call = mock.MagicMock(return_value=None) mock_call(1, 'foobar', a='123') mock_call.assert_called_once_with(1, 'foobar', a='123') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_test_utils.py0000664000175000017500000000310300000000000022261 0ustar00zuulzuul00000000000000# # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils class TestUtilsTestCase(test.TestCase): def test_get_test_admin_context(self): """get_test_admin_context's return value behaves like admin context.""" ctxt = test_utils.get_test_admin_context() self.assertIsNone(ctxt.project_id) self.assertIsNone(ctxt.user_id) self.assertIsNone(ctxt.domain_id) self.assertIsNone(ctxt.project_domain_id) self.assertIsNone(ctxt.user_domain_id) self.assertIsNone(ctxt.project_name) self.assertIsNone(ctxt.remote_address) self.assertIsNone(ctxt.auth_token) self.assertIsNone(ctxt.quota_class) self.assertIsNotNone(ctxt.request_id) self.assertIsNotNone(ctxt.timestamp) self.assertEqual(['admin'], ctxt.roles) self.assertEqual([], ctxt.service_catalog) self.assertEqual('no', ctxt.read_deleted) self.assertTrue(ctxt.is_admin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_utils.py0000664000175000017500000020416700000000000021237 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import json import os import sys from unittest import mock import ddt from oslo_utils import timeutils import webob.exc import cinder from cinder.api import api_utils from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder import utils from cinder.volume import volume_utils POOL_CAPS = {'total_capacity_gb': 0, 'free_capacity_gb': 0, 'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'max_over_subscription_ratio': '1.0', 'thin_provisioning_support': False, 'thick_provisioning_support': True, 'reserved_percentage': 0, 'volume_backend_name': 'lvm1', 'timestamp': timeutils.utcnow(), 'multiattach': True, 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'} class ExecuteTestCase(test.TestCase): @mock.patch('cinder.utils.processutils.execute') def test_execute(self, mock_putils_exe): output = utils.execute('a', 1, foo='bar') self.assertEqual(mock_putils_exe.return_value, output) mock_putils_exe.assert_called_once_with('a', 1, foo='bar') @mock.patch('cinder.utils.get_root_helper') @mock.patch('cinder.utils.processutils.execute') def test_execute_root(self, mock_putils_exe, mock_get_helper): output = utils.execute('a', 1, foo='bar', run_as_root=True) self.assertEqual(mock_putils_exe.return_value, output) mock_helper = mock_get_helper.return_value mock_putils_exe.assert_called_once_with('a', 1, foo='bar', run_as_root=True, root_helper=mock_helper) @mock.patch('cinder.utils.get_root_helper', autospec=True) @mock.patch('cinder.utils.processutils.execute', autospec=True) def test_execute_root_and_helper(self, mock_putils_exe, mock_get_helper): mock_helper = mock.sentinel output = utils.execute('a', 1, foo='bar', run_as_root=True, root_helper=mock_helper) self.assertEqual(mock_putils_exe.return_value, output) mock_get_helper.assert_not_called() mock_putils_exe.assert_called_once_with('a', 1, foo='bar', run_as_root=True, root_helper=mock_helper) @ddt.ddt class GenericUtilsTestCase(test.TestCase): def test_as_int(self): test_obj_int = '2' test_obj_float = '2.2' for obj in [test_obj_int, test_obj_float]: self.assertEqual(2, utils.as_int(obj)) obj = 'not_a_number' self.assertEqual(obj, utils.as_int(obj)) self.assertRaises(TypeError, utils.as_int, obj, quiet=False) def test_check_exclusive_options(self): utils.check_exclusive_options() utils.check_exclusive_options(something=None, pretty_keys=True, unit_test=True) self.assertRaises(exception.InvalidInput, utils.check_exclusive_options, test=True, unit=False, pretty_keys=True) self.assertRaises(exception.InvalidInput, utils.check_exclusive_options, test=True, unit=False, pretty_keys=False) def test_hostname_unicode_sanitization(self): hostname = u"\u7684.test.example.com" self.assertEqual("test.example.com", volume_utils.sanitize_hostname(hostname)) def test_hostname_sanitize_periods(self): hostname = "....test.example.com..." self.assertEqual("test.example.com", volume_utils.sanitize_hostname(hostname)) def test_hostname_sanitize_dashes(self): hostname = "----test.example.com---" self.assertEqual("test.example.com", volume_utils.sanitize_hostname(hostname)) def test_hostname_sanitize_characters(self): hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+" self.assertEqual("91----test-host.example.com-0", volume_utils.sanitize_hostname(hostname)) def test_hostname_translate(self): hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>" self.assertEqual("hello", volume_utils.sanitize_hostname(hostname)) @mock.patch('os.path.join', side_effect=lambda x, y: '/'.join((x, y))) def test_make_dev_path(self, mock_join): self.assertEqual('/dev/xvda', utils.make_dev_path('xvda')) self.assertEqual('/dev/xvdb1', utils.make_dev_path('xvdb', 1)) self.assertEqual('/foo/xvdc1', utils.make_dev_path('xvdc', 1, '/foo')) @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") @mock.patch('tempfile.NamedTemporaryFile') @mock.patch.object(os, 'open') @mock.patch.object(os, 'fdatasync') @mock.patch.object(os, 'fsync') @mock.patch.object(os, 'rename') @mock.patch.object(os, 'close') @mock.patch.object(os.path, 'isfile') @mock.patch.object(os, 'unlink') def test_write_configfile(self, mock_unlink, mock_isfile, mock_close, mock_rename, mock_fsync, mock_fdatasync, mock_open, mock_tmp): filename = 'foo' directory = '/some/random/path' filepath = os.path.join(directory, filename) expected = ('\n\n' ' backing-store %(bspath)s\n' ' driver iscsi\n' ' incominguser chap_foo chap_bar\n' ' bsoflags foo\n' ' write-cache bar\n' '\n' % {'id': filename, 'bspath': filepath}) # Normal case utils.robust_file_write(directory, filename, expected) mock_open.assert_called_once_with(directory, os.O_DIRECTORY) mock_rename.assert_called_once_with(mock.ANY, filepath) self.assertEqual( expected.encode('utf-8'), mock_tmp.return_value.__enter__.return_value.write.call_args[0][0] ) # Failure to write persistent file. tempfile = '/some/tempfile' mock_tmp.return_value.__enter__.return_value.name = tempfile mock_rename.side_effect = OSError self.assertRaises(OSError, utils.robust_file_write, directory, filename, mock.MagicMock()) mock_isfile.assert_called_once_with(tempfile) mock_unlink.assert_called_once_with(tempfile) def test_check_ssh_injection(self): cmd_list = ['ssh', '-D', 'my_name@name_of_remote_computer'] self.assertIsNone(utils.check_ssh_injection(cmd_list)) cmd_list = ['echo', '"quoted arg with space"'] self.assertIsNone(utils.check_ssh_injection(cmd_list)) cmd_list = ['echo', "'quoted arg with space'"] self.assertIsNone(utils.check_ssh_injection(cmd_list)) def test_check_ssh_injection_on_error(self): with_unquoted_space = ['ssh', 'my_name@ name_of_remote_computer'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_unquoted_space) with_danger_chars = ['||', 'my_name@name_of_remote_computer'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_danger_chars) with_danger_char = [';', 'my_name@name_of_remote_computer'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_danger_char) with_special = ['cmd', 'virus;ls'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_special) quoted_with_unescaped = ['cmd', '"arg\"withunescaped"'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, quoted_with_unescaped) bad_before_quotes = ['cmd', 'virus;"quoted argument"'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, bad_before_quotes) bad_after_quotes = ['echo', '"quoted argument";rm -rf'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, bad_after_quotes) bad_within_quotes = ['echo', "'quoted argument `rm -rf`'"] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, bad_within_quotes) with_multiple_quotes = ['echo', '"quoted";virus;"quoted"'] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_multiple_quotes) with_multiple_quotes = ['echo', '"quoted";virus;\'quoted\''] self.assertRaises(exception.SSHInjectionThreat, utils.check_ssh_injection, with_multiple_quotes) @mock.patch('os.stat') def test_get_file_mode(self, mock_stat): class stat_result(object): st_mode = 0o777 st_gid = 33333 test_file = '/var/tmp/made_up_file' mock_stat.return_value = stat_result mode = utils.get_file_mode(test_file) self.assertEqual(0o777, mode) mock_stat.assert_called_once_with(test_file) @mock.patch('os.stat') def test_get_file_gid(self, mock_stat): class stat_result(object): st_mode = 0o777 st_gid = 33333 test_file = '/var/tmp/made_up_file' mock_stat.return_value = stat_result gid = utils.get_file_gid(test_file) self.assertEqual(33333, gid) mock_stat.assert_called_once_with(test_file) @mock.patch('cinder.utils.CONF') def test_get_root_helper(self, mock_conf): mock_conf.rootwrap_config = '/path/to/conf' self.assertEqual('sudo cinder-rootwrap /path/to/conf', utils.get_root_helper()) @ddt.data({'path_a': 'test', 'path_b': 'test', 'exp_eq': True}) @ddt.data({'path_a': 'test', 'path_b': 'other', 'exp_eq': False}) @ddt.unpack @mock.patch('os.path.normcase') def test_paths_normcase_equal(self, mock_normcase, path_a, path_b, exp_eq): # os.path.normcase will lower the path string on Windows # while doing nothing on other platforms. mock_normcase.side_effect = lambda x: x result = utils.paths_normcase_equal(path_a, path_b) self.assertEqual(exp_eq, result) mock_normcase.assert_has_calls([mock.call(path_a), mock.call(path_b)]) class TemporaryChownTestCase(test.TestCase): @mock.patch('os.stat') @mock.patch('os.getuid', return_value=1234) @mock.patch('cinder.utils.execute') def test_get_uid(self, mock_exec, mock_getuid, mock_stat): mock_stat.return_value.st_uid = 5678 test_filename = 'a_file' with utils.temporary_chown(test_filename): mock_exec.assert_called_once_with('chown', '1234', test_filename, run_as_root=True) mock_getuid.assert_called_once_with() mock_stat.assert_called_once_with(test_filename) calls = [mock.call('chown', '1234', test_filename, run_as_root=True), mock.call('chown', '5678', test_filename, run_as_root=True)] mock_exec.assert_has_calls(calls) @mock.patch('os.stat') @mock.patch('os.getuid', return_value=1234) @mock.patch('cinder.utils.execute') def test_supplied_owner_uid(self, mock_exec, mock_getuid, mock_stat): mock_stat.return_value.st_uid = 5678 test_filename = 'a_file' with utils.temporary_chown(test_filename, owner_uid=9101): mock_exec.assert_called_once_with('chown', '9101', test_filename, run_as_root=True) self.assertFalse(mock_getuid.called) mock_stat.assert_called_once_with(test_filename) calls = [mock.call('chown', '9101', test_filename, run_as_root=True), mock.call('chown', '5678', test_filename, run_as_root=True)] mock_exec.assert_has_calls(calls) @mock.patch('os.stat') @mock.patch('os.getuid', return_value=5678) @mock.patch('cinder.utils.execute') def test_matching_uid(self, mock_exec, mock_getuid, mock_stat): mock_stat.return_value.st_uid = 5678 test_filename = 'a_file' with utils.temporary_chown(test_filename): pass mock_getuid.assert_called_once_with() mock_stat.assert_called_once_with(test_filename) self.assertFalse(mock_exec.called) @mock.patch('os.name', 'nt') @mock.patch('os.stat') @mock.patch('cinder.utils.execute') def test_temporary_chown_win32(self, mock_exec, mock_stat): with utils.temporary_chown(mock.sentinel.path): pass mock_exec.assert_not_called() mock_stat.assert_not_called() class TempdirTestCase(test.TestCase): @mock.patch('tempfile.mkdtemp') @mock.patch('shutil.rmtree') def test_tempdir(self, mock_rmtree, mock_mkdtemp): with utils.tempdir(a='1', b=2) as td: self.assertEqual(mock_mkdtemp.return_value, td) self.assertFalse(mock_rmtree.called) mock_mkdtemp.assert_called_once_with(a='1', b=2) mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value) @mock.patch('tempfile.mkdtemp') @mock.patch('shutil.rmtree', side_effect=OSError) def test_tempdir_error(self, mock_rmtree, mock_mkdtemp): with utils.tempdir(a='1', b=2) as td: self.assertEqual(mock_mkdtemp.return_value, td) self.assertFalse(mock_rmtree.called) mock_mkdtemp.assert_called_once_with(a='1', b=2) mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value) class WalkClassHierarchyTestCase(test.TestCase): def test_walk_class_hierarchy(self): class A(object): pass class B(A): pass class C(A): pass class D(B): pass class E(A): pass class_pairs = zip((D, B, E), api_utils.walk_class_hierarchy(A, encountered=[C])) for actual, expected in class_pairs: self.assertEqual(expected, actual) class_pairs = zip((D, B, C, E), api_utils.walk_class_hierarchy(A)) for actual, expected in class_pairs: self.assertEqual(expected, actual) class GetDiskOfPartitionTestCase(test.TestCase): def test_devpath_is_diskpath(self): devpath = '/some/path' st_mock = mock.Mock() output = utils._get_disk_of_partition(devpath, st_mock) self.assertEqual('/some/path', output[0]) self.assertIs(st_mock, output[1]) with mock.patch('os.stat') as mock_stat: devpath = '/some/path' output = utils._get_disk_of_partition(devpath) mock_stat.assert_called_once_with(devpath) self.assertEqual(devpath, output[0]) self.assertIs(mock_stat.return_value, output[1]) @mock.patch('os.stat', side_effect=OSError) def test_stat_oserror(self, mock_stat): st_mock = mock.Mock() devpath = '/some/path1' output = utils._get_disk_of_partition(devpath, st_mock) mock_stat.assert_called_once_with('/some/path') self.assertEqual(devpath, output[0]) self.assertIs(st_mock, output[1]) @mock.patch('stat.S_ISBLK', return_value=True) @mock.patch('os.stat') def test_diskpath_is_block_device(self, mock_stat, mock_isblk): st_mock = mock.Mock() devpath = '/some/path1' output = utils._get_disk_of_partition(devpath, st_mock) self.assertEqual('/some/path', output[0]) self.assertEqual(mock_stat.return_value, output[1]) @mock.patch('stat.S_ISBLK', return_value=False) @mock.patch('os.stat') def test_diskpath_is_not_block_device(self, mock_stat, mock_isblk): st_mock = mock.Mock() devpath = '/some/path1' output = utils._get_disk_of_partition(devpath, st_mock) self.assertEqual(devpath, output[0]) self.assertEqual(st_mock, output[1]) class GetBlkdevMajorMinorTestCase(test.TestCase): @mock.patch('os.stat') def test_get_file_size(self, mock_stat): class stat_result(object): st_mode = 0o777 st_size = 1074253824 test_file = '/var/tmp/made_up_file' mock_stat.return_value = stat_result size = utils.get_file_size(test_file) self.assertEqual(size, stat_result.st_size) mock_stat.assert_called_once_with(test_file) @test.testtools.skipIf(sys.platform == 'darwin', 'Not supported on macOS') @mock.patch('os.stat') def test_get_blkdev_major_minor(self, mock_stat): class stat_result(object): st_mode = 0o60660 st_rdev = os.makedev(253, 7) test_device = '/dev/made_up_blkdev' mock_stat.return_value = stat_result dev = utils.get_blkdev_major_minor(test_device) self.assertEqual('253:7', dev) mock_stat.assert_called_once_with(test_device) @mock.patch('os.stat') @mock.patch.object(utils, 'execute') def _test_get_blkdev_major_minor_file(self, test_partition, mock_exec, mock_stat): mock_exec.return_value = ( 'Filesystem Size Used Avail Use%% Mounted on\n' '%s 4096 2048 2048 50%% /tmp\n' % test_partition, None) test_file = '/tmp/file' test_disk = '/dev/made_up_disk' class stat_result_file(object): st_mode = 0o660 class stat_result_partition(object): st_mode = 0o60660 st_rdev = os.makedev(8, 65) class stat_result_disk(object): st_mode = 0o60660 st_rdev = os.makedev(8, 64) def fake_stat(path): try: return {test_file: stat_result_file, test_partition: stat_result_partition, test_disk: stat_result_disk}[path] except KeyError: raise OSError mock_stat.side_effect = fake_stat dev = utils.get_blkdev_major_minor(test_file) mock_stat.assert_any_call(test_file) mock_exec.assert_called_once_with('df', test_file) if test_partition.startswith('/'): mock_stat.assert_any_call(test_partition) mock_stat.assert_any_call(test_disk) return dev def test_get_blkdev_major_minor_file(self): dev = self._test_get_blkdev_major_minor_file('/dev/made_up_disk1') self.assertEqual('8:64', dev) def test_get_blkdev_major_minor_file_nfs(self): dev = self._test_get_blkdev_major_minor_file('nfs-server:/export/path') self.assertIsNone(dev) @mock.patch('os.stat') @mock.patch('stat.S_ISCHR', return_value=False) @mock.patch('stat.S_ISBLK', return_value=False) def test_get_blkdev_failure(self, mock_isblk, mock_ischr, mock_stat): path = '/some/path' self.assertRaises(exception.CinderException, utils.get_blkdev_major_minor, path, lookup_for_file=False) mock_stat.assert_called_once_with(path) mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode) mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode) @mock.patch('os.stat') @mock.patch('stat.S_ISCHR', return_value=True) @mock.patch('stat.S_ISBLK', return_value=False) def test_get_blkdev_is_chr(self, mock_isblk, mock_ischr, mock_stat): path = '/some/path' output = utils.get_blkdev_major_minor(path, lookup_for_file=False) mock_stat.assert_called_once_with(path) mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode) mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode) self.assertIsNone(output) class MonkeyPatchTestCase(test.TestCase): """Unit test for utils.monkey_patch().""" def setUp(self): super(MonkeyPatchTestCase, self).setUp() self.example_package = 'cinder.tests.unit.monkey_patch_example.' self.flags( monkey_patch=True, monkey_patch_modules=[self.example_package + 'example_a' + ':' + self.example_package + 'example_decorator']) def test_monkey_patch(self): utils.monkey_patch() cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION = [] from cinder.tests.unit.monkey_patch_example import example_a from cinder.tests.unit.monkey_patch_example import example_b self.assertEqual('Example function', example_a.example_function_a()) exampleA = example_a.ExampleClassA() exampleA.example_method() ret_a = exampleA.example_method_add(3, 5) self.assertEqual(8, ret_a) self.assertEqual('Example function', example_b.example_function_b()) exampleB = example_b.ExampleClassB() exampleB.example_method() ret_b = exampleB.example_method_add(3, 5) self.assertEqual(8, ret_b) package_a = self.example_package + 'example_a.' self.assertIn(package_a + 'example_function_a', cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertIn(package_a + 'ExampleClassA.example_method', cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertIn(package_a + 'ExampleClassA.example_method_add', cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) package_b = self.example_package + 'example_b.' self.assertNotIn( package_b + 'example_function_b', cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertNotIn( package_b + 'ExampleClassB.example_method', cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) self.assertNotIn( package_b + 'ExampleClassB.example_method_add', cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION) class AuditPeriodTest(test.TestCase): def setUp(self): super(AuditPeriodTest, self).setUp() test_time = datetime.datetime(second=23, minute=12, hour=8, day=5, month=3, year=2012) patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = test_time def test_hour(self): begin, end = utils.last_completed_audit_period(unit='hour') self.assertEqual(datetime.datetime(hour=7, day=5, month=3, year=2012), begin) self.assertEqual(datetime.datetime(hour=8, day=5, month=3, year=2012), end) def test_hour_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='hour@10') self.assertEqual(datetime.datetime(minute=10, hour=7, day=5, month=3, year=2012), begin) self.assertEqual(datetime.datetime(minute=10, hour=8, day=5, month=3, year=2012), end) def test_hour_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='hour@30') self.assertEqual(datetime.datetime(minute=30, hour=6, day=5, month=3, year=2012), begin) self.assertEqual(datetime.datetime(minute=30, hour=7, day=5, month=3, year=2012), end) def test_day(self): begin, end = utils.last_completed_audit_period(unit='day') self.assertEqual(datetime.datetime(day=4, month=3, year=2012), begin) self.assertEqual(datetime.datetime(day=5, month=3, year=2012), end) def test_day_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='day@6') self.assertEqual(datetime.datetime(hour=6, day=4, month=3, year=2012), begin) self.assertEqual(datetime.datetime(hour=6, day=5, month=3, year=2012), end) def test_day_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='day@10') self.assertEqual(datetime.datetime(hour=10, day=3, month=3, year=2012), begin) self.assertEqual(datetime.datetime(hour=10, day=4, month=3, year=2012), end) def test_month(self): begin, end = utils.last_completed_audit_period(unit='month') self.assertEqual(datetime.datetime(day=1, month=2, year=2012), begin) self.assertEqual(datetime.datetime(day=1, month=3, year=2012), end) def test_month_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='month@2') self.assertEqual(datetime.datetime(day=2, month=2, year=2012), begin) self.assertEqual(datetime.datetime(day=2, month=3, year=2012), end) def test_month_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='month@15') self.assertEqual(datetime.datetime(day=15, month=1, year=2012), begin) self.assertEqual(datetime.datetime(day=15, month=2, year=2012), end) @mock.patch('oslo_utils.timeutils.utcnow', return_value=datetime.datetime(day=1, month=1, year=2012)) def test_month_jan_day_first(self, mock_utcnow): begin, end = utils.last_completed_audit_period(unit='month') self.assertEqual(datetime.datetime(day=1, month=11, year=2011), begin) self.assertEqual(datetime.datetime(day=1, month=12, year=2011), end) @mock.patch('oslo_utils.timeutils.utcnow', return_value=datetime.datetime(day=2, month=1, year=2012)) def test_month_jan_day_not_first(self, mock_utcnow): begin, end = utils.last_completed_audit_period(unit='month') self.assertEqual(datetime.datetime(day=1, month=12, year=2011), begin) self.assertEqual(datetime.datetime(day=1, month=1, year=2012), end) def test_year(self): begin, end = utils.last_completed_audit_period(unit='year') self.assertEqual(datetime.datetime(day=1, month=1, year=2011), begin) self.assertEqual(datetime.datetime(day=1, month=1, year=2012), end) def test_year_with_offset_before_current(self): begin, end = utils.last_completed_audit_period(unit='year@2') self.assertEqual(datetime.datetime(day=1, month=2, year=2011), begin) self.assertEqual(datetime.datetime(day=1, month=2, year=2012), end) def test_year_with_offset_after_current(self): begin, end = utils.last_completed_audit_period(unit='year@6') self.assertEqual(datetime.datetime(day=1, month=6, year=2010), begin) self.assertEqual(datetime.datetime(day=1, month=6, year=2011), end) def test_invalid_unit(self): self.assertRaises(ValueError, utils.last_completed_audit_period, unit='invalid_unit') @mock.patch('cinder.utils.CONF') def test_uses_conf_unit(self, mock_conf): mock_conf.volume_usage_audit_period = 'hour' begin1, end1 = utils.last_completed_audit_period() self.assertEqual(60.0 * 60, (end1 - begin1).total_seconds()) mock_conf.volume_usage_audit_period = 'day' begin2, end2 = utils.last_completed_audit_period() self.assertEqual(60.0 * 60 * 24, (end2 - begin2).total_seconds()) class BrickUtils(test.TestCase): """Unit test to test the brick utility wrapper functions.""" @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('os_brick.initiator.connector.get_connector_properties') @mock.patch('cinder.utils.get_root_helper') def test_brick_get_connector_properties(self, mock_helper, mock_get, mock_conf): mock_conf.my_ip = '1.2.3.4' output = volume_utils.brick_get_connector_properties() mock_helper.assert_called_once_with() mock_get.assert_called_once_with(mock_helper.return_value, '1.2.3.4', False, False) self.assertEqual(mock_get.return_value, output) @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory') @mock.patch('cinder.utils.get_root_helper') def test_brick_get_connector(self, mock_helper, mock_factory): output = volume_utils.brick_get_connector('protocol') mock_helper.assert_called_once_with() self.assertEqual(mock_factory.return_value, output) mock_factory.assert_called_once_with( 'protocol', mock_helper.return_value, driver=None, use_multipath=False, device_scan_attempts=3) @mock.patch('os_brick.encryptors.get_volume_encryptor') @mock.patch('cinder.utils.get_root_helper') def test_brick_attach_volume_encryptor(self, mock_helper, mock_get_encryptor): attach_info = {'device': {'path': 'dev/sda'}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} ctxt = mock.Mock(name='context') mock_encryptor = mock.Mock() mock_get_encryptor.return_value = mock_encryptor volume_utils.brick_attach_volume_encryptor(ctxt, attach_info, encryption) connection_info = attach_info['conn'] connection_info['data']['device_path'] = attach_info['device']['path'] mock_helper.assert_called_once_with() mock_get_encryptor.assert_called_once_with( root_helper=mock_helper.return_value, connection_info=connection_info, keymgr=mock.ANY, **encryption) mock_encryptor.attach_volume.assert_called_once_with( ctxt, **encryption) @mock.patch('os_brick.encryptors.get_volume_encryptor') @mock.patch('cinder.utils.get_root_helper') def test_brick_detach_volume_encryptor(self, mock_helper, mock_get_encryptor): attach_info = {'device': {'path': 'dev/sda'}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} mock_encryptor = mock.Mock() mock_get_encryptor.return_value = mock_encryptor volume_utils.brick_detach_volume_encryptor(attach_info, encryption) mock_helper.assert_called_once_with() connection_info = attach_info['conn'] connection_info['data']['device_path'] = attach_info['device']['path'] mock_get_encryptor.assert_called_once_with( root_helper=mock_helper.return_value, connection_info=connection_info, keymgr=mock.ANY, **encryption) mock_encryptor.detach_volume.assert_called_once_with(**encryption) class StringLengthTestCase(test.TestCase): def test_check_string_length(self): self.assertIsNone(utils.check_string_length( 'test', 'name', max_length=255)) self.assertRaises(exception.InvalidInput, utils.check_string_length, 11, 'name', max_length=255) self.assertRaises(exception.InvalidInput, utils.check_string_length, '', 'name', min_length=1) self.assertRaises(exception.InvalidInput, utils.check_string_length, 'a' * 256, 'name', max_length=255) self.assertRaises(exception.InvalidInput, utils.check_string_length, dict(), 'name', max_length=255) class AddVisibleAdminMetadataTestCase(test.TestCase): def test_add_visible_admin_metadata_visible_key_only(self): admin_metadata = [{"key": "invisible_key", "value": "invisible_value"}, {"key": "readonly", "value": "visible"}, {"key": "attached_mode", "value": "visible"}] metadata = [{"key": "key", "value": "value"}, {"key": "readonly", "value": "existing"}] volume = {'volume_admin_metadata': admin_metadata, 'volume_metadata': metadata} api_utils.add_visible_admin_metadata(volume) self.assertEqual([{"key": "key", "value": "value"}, {"key": "readonly", "value": "visible"}, {"key": "attached_mode", "value": "visible"}], volume['volume_metadata']) admin_metadata = {"invisible_key": "invisible_value", "readonly": "visible", "attached_mode": "visible"} metadata = {"key": "value", "readonly": "existing"} volume = {'admin_metadata': admin_metadata, 'metadata': metadata} api_utils.add_visible_admin_metadata(volume) self.assertEqual({'key': 'value', 'attached_mode': 'visible', 'readonly': 'visible'}, volume['metadata']) def test_add_visible_admin_metadata_no_visible_keys(self): admin_metadata = [ {"key": "invisible_key1", "value": "invisible_value1"}, {"key": "invisible_key2", "value": "invisible_value2"}, {"key": "invisible_key3", "value": "invisible_value3"}] metadata = [{"key": "key", "value": "value"}] volume = {'volume_admin_metadata': admin_metadata, 'volume_metadata': metadata} api_utils.add_visible_admin_metadata(volume) self.assertEqual([{"key": "key", "value": "value"}], volume['volume_metadata']) admin_metadata = {"invisible_key1": "invisible_value1", "invisible_key2": "invisible_value2", "invisible_key3": "invisible_value3"} metadata = {"key": "value"} volume = {'admin_metadata': admin_metadata, 'metadata': metadata} api_utils.add_visible_admin_metadata(volume) self.assertEqual({'key': 'value'}, volume['metadata']) def test_add_visible_admin_metadata_no_existing_metadata(self): admin_metadata = [{"key": "invisible_key", "value": "invisible_value"}, {"key": "readonly", "value": "visible"}, {"key": "attached_mode", "value": "visible"}] volume = {'volume_admin_metadata': admin_metadata} api_utils.add_visible_admin_metadata(volume) self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'}, volume['metadata']) admin_metadata = {"invisible_key": "invisible_value", "readonly": "visible", "attached_mode": "visible"} volume = {'admin_metadata': admin_metadata} api_utils.add_visible_admin_metadata(volume) self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'}, volume['metadata']) class InvalidFilterTestCase(test.TestCase): def test_admin_allows_all_options(self): ctxt = mock.Mock(name='context') ctxt.is_admin = True filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} allowed_search_options = ('allowed1', 'allowed2') allowed_orig = ('allowed1', 'allowed2') api_utils.remove_invalid_filter_options(ctxt, filters, allowed_search_options) self.assertEqual(allowed_orig, allowed_search_options) self.assertEqual(fltrs_orig, filters) def test_admin_allows_some_options(self): ctxt = mock.Mock(name='context') ctxt.is_admin = False filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None} allowed_search_options = ('allowed1', 'allowed2') allowed_orig = ('allowed1', 'allowed2') api_utils.remove_invalid_filter_options(ctxt, filters, allowed_search_options) self.assertEqual(allowed_orig, allowed_search_options) self.assertNotEqual(fltrs_orig, filters) self.assertEqual(allowed_search_options, tuple(sorted(filters.keys()))) class IsBlkDeviceTestCase(test.TestCase): @mock.patch('stat.S_ISBLK', return_value=True) @mock.patch('os.stat') def test_is_blk_device(self, mock_os_stat, mock_S_ISBLK): dev = 'some_device' self.assertTrue(utils.is_blk_device(dev)) @mock.patch('stat.S_ISBLK', return_value=False) @mock.patch('os.stat') def test_not_is_blk_device(self, mock_os_stat, mock_S_ISBLK): dev = 'not_some_device' self.assertFalse(utils.is_blk_device(dev)) @mock.patch('stat.S_ISBLK', side_effect=Exception) @mock.patch('os.stat') def test_fail_is_blk_device(self, mock_os_stat, mock_S_ISBLK): dev = 'device_exception' self.assertFalse(utils.is_blk_device(dev)) class WrongException(Exception): pass class TestRetryDecorator(test.TestCase): def test_no_retry_required(self): self.counter = 0 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException, interval=2, retries=3, backoff_rate=2) def succeeds(): self.counter += 1 return 'success' ret = succeeds() self.assertFalse(mock_sleep.called) self.assertEqual('success', ret) self.assertEqual(1, self.counter) def test_no_retry_required_random(self): self.counter = 0 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException, interval=2, retries=3, backoff_rate=2, wait_random=True) def succeeds(): self.counter += 1 return 'success' ret = succeeds() self.assertFalse(mock_sleep.called) self.assertEqual('success', ret) self.assertEqual(1, self.counter) def test_retries_once(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 3 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException, interval, retries, backoff_rate) def fails_once(): self.counter += 1 if self.counter < 2: raise exception.VolumeBackendAPIException(data='fake') else: return 'success' ret = fails_once() self.assertEqual('success', ret) self.assertEqual(2, self.counter) self.assertEqual(1, mock_sleep.call_count) mock_sleep.assert_called_with(interval) def test_retries_once_random(self): self.counter = 0 interval = 2 backoff_rate = 2 retries = 3 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException, interval, retries, backoff_rate, wait_random=True) def fails_once(): self.counter += 1 if self.counter < 2: raise exception.VolumeBackendAPIException(data='fake') else: return 'success' ret = fails_once() self.assertEqual('success', ret) self.assertEqual(2, self.counter) self.assertEqual(1, mock_sleep.call_count) self.assertTrue(mock_sleep.called) def test_limit_is_reached(self): self.counter = 0 retries = 3 interval = 2 backoff_rate = 4 with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException, interval, retries, backoff_rate) def always_fails(): self.counter += 1 raise exception.VolumeBackendAPIException(data='fake') self.assertRaises(exception.VolumeBackendAPIException, always_fails) self.assertEqual(retries, self.counter) expected_sleep_arg = [] for i in range(retries): if i > 0: interval *= (backoff_rate ** (i - 1)) expected_sleep_arg.append(float(interval)) mock_sleep.assert_has_calls( list(map(mock.call, expected_sleep_arg))) def test_wrong_exception_no_retry(self): with mock.patch('tenacity.nap.sleep') as mock_sleep: @utils.retry(exception.VolumeBackendAPIException) def raise_unexpected_error(): raise WrongException("wrong exception") self.assertRaises(WrongException, raise_unexpected_error) self.assertFalse(mock_sleep.called) @mock.patch('tenacity.nap.sleep') def test_retry_exit_code(self, sleep_mock): exit_code = 5 exception = utils.processutils.ProcessExecutionError @utils.retry(retry=utils.retry_if_exit_code, retry_param=exit_code) def raise_retriable_exit_code(): raise exception(exit_code=exit_code) self.assertRaises(exception, raise_retriable_exit_code) self.assertEqual(2, sleep_mock.call_count) sleep_mock.assert_has_calls([mock.call(1), mock.call(2)]) @mock.patch('tenacity.nap.sleep') def test_retry_exit_code_non_retriable(self, sleep_mock): exit_code = 5 exception = utils.processutils.ProcessExecutionError @utils.retry(retry=utils.retry_if_exit_code, retry_param=exit_code) def raise_non_retriable_exit_code(): raise exception(exit_code=exit_code + 1) self.assertRaises(exception, raise_non_retriable_exit_code) sleep_mock.assert_not_called() @ddt.ddt class TestCalculateVirtualFree(test.TestCase): @ddt.data( {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 1.0, 'thin_support': False, 'thick_support': True, 'is_thin_lun': False, 'expected': 27.01}, {'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0, 'thin_support': True, 'thick_support': False, 'is_thin_lun': True, 'expected': 36.02}, {'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0, 'thin_support': True, 'thick_support': True, 'is_thin_lun': True, 'expected': 36.02}, {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 2.0, 'thin_support': True, 'thick_support': True, 'is_thin_lun': False, 'expected': 27.01}, ) @ddt.unpack def test_utils_calculate_virtual_free_capacity_provision_type( self, total, free, provisioned, max_ratio, thin_support, thick_support, is_thin_lun, expected): host_stat = {'total_capacity_gb': total, 'free_capacity_gb': free, 'provisioned_capacity_gb': provisioned, 'max_over_subscription_ratio': max_ratio, 'thin_provisioning_support': thin_support, 'thick_provisioning_support': thick_support, 'reserved_percentage': 5} free_capacity = utils.calculate_virtual_free_capacity( host_stat['total_capacity_gb'], host_stat['free_capacity_gb'], host_stat['provisioned_capacity_gb'], host_stat['thin_provisioning_support'], host_stat['max_over_subscription_ratio'], host_stat['reserved_percentage'], is_thin_lun) self.assertEqual(expected, free_capacity) @ddt.data( {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 1.0, 'thin_support': False, 'thick_support': True, 'is_thin_lun': False, 'reserved_percentage': 5, 'expected_total_capacity': 30.01, 'expected_reserved_capacity': 1, 'expected_free_capacity': 28.01, 'expected_total_available_capacity': 29.01, 'expected_virtual_free': 27.01, 'expected_free_percent': 93.11, 'expected_provisioned_type': 'thick', 'expected_provisioned_ratio': 0.07}, {'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0, 'thin_support': True, 'thick_support': False, 'is_thin_lun': True, 'reserved_percentage': 10, 'expected_total_capacity': 20.01, 'expected_reserved_capacity': 2, 'expected_free_capacity': 18.01, 'expected_total_available_capacity': 36.02, 'expected_virtual_free': 34.02, 'expected_free_percent': 94.45, 'expected_provisioned_type': 'thin', 'expected_provisioned_ratio': 0.06}, {'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0, 'thin_support': True, 'thick_support': True, 'is_thin_lun': True, 'reserved_percentage': 20, 'expected_total_capacity': 20.01, 'expected_reserved_capacity': 4, 'expected_free_capacity': 18.01, 'expected_total_available_capacity': 32.02, 'expected_virtual_free': 30.02, 'expected_free_percent': 93.75, 'expected_provisioned_type': 'thin', 'expected_provisioned_ratio': 0.06}, {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 2.0, 'thin_support': True, 'thick_support': True, 'is_thin_lun': False, 'reserved_percentage': 10, 'expected_total_capacity': 30.01, 'expected_reserved_capacity': 3, 'expected_free_capacity': 28.01, 'expected_total_available_capacity': 27.01, 'expected_virtual_free': 25.01, 'expected_free_percent': 92.6, 'expected_provisioned_type': 'thick', 'expected_provisioned_ratio': 0.07}, ) @ddt.unpack def test_utils_calculate_capacity_factors( self, total, free, provisioned, max_ratio, thin_support, thick_support, is_thin_lun, reserved_percentage, expected_total_capacity, expected_reserved_capacity, expected_free_capacity, expected_total_available_capacity, expected_virtual_free, expected_free_percent, expected_provisioned_type, expected_provisioned_ratio): host_stat = {'total_capacity_gb': total, 'free_capacity_gb': free, 'provisioned_capacity_gb': provisioned, 'max_over_subscription_ratio': max_ratio, 'thin_provisioning_support': thin_support, 'thick_provisioning_support': thick_support, 'reserved_percentage': reserved_percentage} factors = utils.calculate_capacity_factors( host_stat['total_capacity_gb'], host_stat['free_capacity_gb'], host_stat['provisioned_capacity_gb'], host_stat['thin_provisioning_support'], host_stat['max_over_subscription_ratio'], host_stat['reserved_percentage'], is_thin_lun) self.assertEqual(expected_total_capacity, factors['total_capacity']) self.assertEqual(expected_reserved_capacity, factors['reserved_capacity']) self.assertEqual(expected_free_capacity, factors['free_capacity']) self.assertEqual(expected_total_available_capacity, factors['total_available_capacity']) self.assertEqual(expected_virtual_free, factors['virtual_free_capacity']) self.assertEqual(expected_free_percent, factors['free_percent']) self.assertEqual(expected_provisioned_type, factors['provisioned_type']) self.assertEqual(expected_provisioned_ratio, factors['provisioned_ratio']) class Comparable(utils.ComparableMixin): def __init__(self, value): self.value = value def _cmpkey(self): return self.value class TestComparableMixin(test.TestCase): def setUp(self): super(TestComparableMixin, self).setUp() self.one = Comparable(1) self.two = Comparable(2) def test_lt(self): self.assertTrue(self.one < self.two) self.assertFalse(self.two < self.one) self.assertFalse(self.one < self.one) def test_le(self): self.assertTrue(self.one <= self.two) self.assertFalse(self.two <= self.one) self.assertTrue(self.one <= self.one) def test_eq(self): self.assertFalse(self.one == self.two) self.assertFalse(self.two == self.one) self.assertTrue(self.one == self.one) def test_ge(self): self.assertFalse(self.one >= self.two) self.assertTrue(self.two >= self.one) self.assertTrue(self.one >= self.one) def test_gt(self): self.assertFalse(self.one > self.two) self.assertTrue(self.two > self.one) self.assertFalse(self.one > self.one) def test_ne(self): self.assertTrue(self.one != self.two) self.assertTrue(self.two != self.one) self.assertFalse(self.one != self.one) def test_compare(self): self.assertEqual(NotImplemented, self.one._compare(1, self.one._cmpkey)) @ddt.ddt class TestValidateInteger(test.TestCase): @ddt.data( (2 ** 31) + 1, # More than max value -12, # Less than min value 2.05, # Float value "12.05", # Float value in string format "should be int", # String u"test" # String in unicode format ) def test_validate_integer_raise_assert(self, value): self.assertRaises(webob.exc.HTTPBadRequest, api_utils.validate_integer, value, 'limit', min_value=-1, max_value=(2 ** 31)) @ddt.data( "123", # integer in string format 123, # integer u"123" # integer in unicode format ) def test_validate_integer(self, value): res = api_utils.validate_integer(value, 'limit', min_value=-1, max_value=(2 ** 31)) self.assertEqual(123, res) @ddt.ddt class TestNotificationShortCircuit(test.TestCase): def test_do_nothing_getter(self): """Test any attribute will always return the same instance (self).""" donothing = utils.DoNothing() self.assertIs(donothing, donothing.anyname) def test_do_nothing_caller(self): """Test calling the object will always return the same instance.""" donothing = utils.DoNothing() self.assertIs(donothing, donothing()) def test_do_nothing_json_serializable(self): """Test calling the object will always return the same instance.""" donothing = utils.DoNothing() self.assertEqual('""', json.dumps(donothing)) @utils.if_notifications_enabled def _decorated_method(self): return mock.sentinel.success def test_if_notification_enabled_when_enabled(self): """Test method is called when notifications are enabled.""" result = self._decorated_method() self.assertEqual(mock.sentinel.success, result) @ddt.data([], ['noop'], ['noop', 'noop']) def test_if_notification_enabled_when_disabled(self, driver): """Test method is not called when notifications are disabled.""" self.override_config('driver', driver, group='oslo_messaging_notifications') result = self._decorated_method() self.assertEqual(utils.DO_NOTHING, result) @ddt.ddt class TestLogLevels(test.TestCase): @ddt.data(None, '', 'wronglevel') def test_get_log_method_invalid(self, level): self.assertRaises(exception.InvalidInput, utils.get_log_method, level) @ddt.data(('info', utils.logging.INFO), ('warning', utils.logging.WARNING), ('INFO', utils.logging.INFO), ('wArNiNg', utils.logging.WARNING), ('error', utils.logging.ERROR), ('debug', utils.logging.DEBUG)) @ddt.unpack def test_get_log_method(self, level, logger): result = utils.get_log_method(level) self.assertEqual(logger, result) def test_get_log_levels(self): levels = utils.get_log_levels('cinder.api') self.assertTrue(len(levels) > 1) self.assertSetEqual({'INFO'}, set(levels.values())) @ddt.data(None, '', 'wronglevel') def test_set_log_levels_invalid(self, level): self.assertRaises(exception.InvalidInput, utils.set_log_levels, '', level) def test_set_log_levels(self): prefix = 'cinder.utils' levels = utils.get_log_levels(prefix) utils.set_log_levels(prefix, 'debug') levels = utils.get_log_levels(prefix) self.assertEqual('DEBUG', levels[prefix]) utils.set_log_levels(prefix, 'warning') levels = utils.get_log_levels(prefix) self.assertEqual('WARNING', levels[prefix]) @ddt.ddt class TestCheckMetadataProperties(test.TestCase): @ddt.data( {'a': {'foo': 'bar'}}, # value is a nested dict {'a': 123}, # value is an integer {'a': 123.4}, # value is a float {'a': True}, # value is a bool {'a': ('foo', 'bar')}, # value is a tuple {'a': []}, # value is a list {'a': None} # value is None ) def test_metadata_value_not_string_raise(self, meta): self.assertRaises(exception.InvalidVolumeMetadata, utils.check_metadata_properties, meta) def test_metadata_value_not_dict_raise(self): meta = 123 self.assertRaises(exception.InvalidInput, utils.check_metadata_properties, meta) POOL_CAP1 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, 'thin_provisioning_support': False, 'total_capacity_gb': 10, 'free_capacity_gb': 10, 'max_over_subscription_ratio': 1.0} POOL_CAP2 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, 'thin_provisioning_support': True, 'total_capacity_gb': 100, 'free_capacity_gb': 95, 'max_over_subscription_ratio': None} POOL_CAP3 = {'allocated_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'thin_provisioning_support': True, 'total_capacity_gb': 100, 'free_capacity_gb': 100, 'max_over_subscription_ratio': 'auto'} POOL_CAP4 = {'allocated_capacity_gb': 100, 'thin_provisioning_support': True, 'total_capacity_gb': 2500, 'free_capacity_gb': 500, 'max_over_subscription_ratio': 'auto'} POOL_CAP5 = {'allocated_capacity_gb': 10000, 'thin_provisioning_support': True, 'total_capacity_gb': 2500, 'free_capacity_gb': 0.1, 'max_over_subscription_ratio': 'auto'} POOL_CAP6 = {'allocated_capacity_gb': 1000, 'provisioned_capacity_gb': 1010, 'thin_provisioning_support': True, 'total_capacity_gb': 2500, 'free_capacity_gb': 2500, 'max_over_subscription_ratio': 'auto'} POOL_CAP7 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, 'thin_provisioning_support': True, 'total_capacity_gb': 10, 'free_capacity_gb': 10} POOL_CAP8 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, 'thin_provisioning_support': True, 'total_capacity_gb': 10, 'free_capacity_gb': 10, 'max_over_subscription_ratio': '15.5'} POOL_CAP9 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, 'thin_provisioning_support': True, 'total_capacity_gb': 10, 'free_capacity_gb': 'unknown', 'max_over_subscription_ratio': '15.5'} POOL_CAP10 = {'allocated_capacity_gb': 10, 'provisioned_capacity_gb': 10, 'thin_provisioning_support': True, 'total_capacity_gb': 'infinite', 'free_capacity_gb': 10, 'max_over_subscription_ratio': '15.5'} @ddt.ddt class TestAutoMaxOversubscriptionRatio(test.TestCase): @ddt.data({'data': POOL_CAP1, 'global_max_over_subscription_ratio': 'auto', 'expected_result': 1.0}, {'data': POOL_CAP2, 'global_max_over_subscription_ratio': 'auto', 'expected_result': 2.67}, {'data': POOL_CAP3, 'global_max_over_subscription_ratio': '20.0', 'expected_result': 20}, {'data': POOL_CAP4, 'global_max_over_subscription_ratio': '20.0', 'expected_result': 1.05}, {'data': POOL_CAP5, 'global_max_over_subscription_ratio': '10.0', 'expected_result': 5.0}, {'data': POOL_CAP6, 'global_max_over_subscription_ratio': '20.0', 'expected_result': 1011.0}, {'data': POOL_CAP7, 'global_max_over_subscription_ratio': 'auto', 'expected_result': 11.0}, {'data': POOL_CAP8, 'global_max_over_subscription_ratio': '20.0', 'expected_result': 15.5}, {'data': POOL_CAP9, 'global_max_over_subscription_ratio': '20.0', 'expected_result': 1.0}, {'data': POOL_CAP10, 'global_max_over_subscription_ratio': '20.0', 'expected_result': 1.0}, ) @ddt.unpack def test_calculate_max_over_subscription_ratio( self, data, expected_result, global_max_over_subscription_ratio): result = utils.calculate_max_over_subscription_ratio( data, global_max_over_subscription_ratio) # Just for sake of testing we reduce the float precision if result is not None: result = round(result, 2) self.assertEqual(expected_result, result) @ddt.ddt class LimitOperationsTestCase(test.TestCase): @ddt.data(1, 5) @mock.patch('contextlib.suppress') def test_semaphore_factory_no_limit(self, processes, mock_suppress): res = utils.semaphore_factory(0, processes) mock_suppress.assert_called_once_with() self.assertEqual(mock_suppress.return_value, res) @mock.patch('eventlet.Semaphore') def test_semaphore_factory_with_limit(self, mock_semaphore): max_operations = 15 res = utils.semaphore_factory(max_operations, 1) mock_semaphore.assert_called_once_with(max_operations) self.assertEqual(mock_semaphore.return_value, res) @mock.patch('cinder.utils.Semaphore') def test_semaphore_factory_with_limit_and_workers(self, mock_semaphore): max_operations = 15 processes = 5 res = utils.semaphore_factory(max_operations, processes) mock_semaphore.assert_called_once_with(max_operations) self.assertEqual(mock_semaphore.return_value, res) @mock.patch('multiprocessing.Semaphore') @mock.patch('eventlet.tpool.execute') def test_semaphore(self, mock_exec, mock_semaphore): limit = 15 res = utils.Semaphore(limit) self.assertEqual(limit, res.limit) mocked_semaphore = mock_semaphore.return_value self.assertEqual(mocked_semaphore, res.semaphore) mock_semaphore.assert_called_once_with(limit) with res: mock_exec.assert_called_once_with(mocked_semaphore.__enter__) mocked_semaphore.__exit__.assert_not_called() mocked_semaphore.__exit__.assert_called_once_with(None, None, None) class TestKeystoneProjectGet(test.TestCase): class FakeProject(object): def __init__(self, id='foo', name=None): self.id = id self.name = name self.description = 'fake project description' self.domain_id = 'default' @mock.patch('keystoneclient.client.Client') def test_get_project_keystoneclient_v2(self, ksclient_class): self.context = context.RequestContext('fake_user', 'fake_proj_id') keystoneclient = ksclient_class.return_value keystoneclient.version = 'v2.0' returned_project = self.FakeProject(self.context.project_id, 'bar') keystoneclient.projects.get.return_value = returned_project expected_project = api_utils.GenericProjectInfo( self.context.project_id, 'v2.0', domain_id='default', name='bar', description='fake project description') project = api_utils.get_project( self.context, self.context.project_id) self.assertEqual(expected_project.__dict__, project.__dict__) @mock.patch('keystoneclient.client.Client') def test_get_project_keystoneclient_v3(self, ksclient_class): self.context = context.RequestContext('fake_user', 'fake_proj_id') keystoneclient = ksclient_class.return_value keystoneclient.version = 'v3' returned_project = self.FakeProject(self.context.project_id, 'bar') keystoneclient.projects.get.return_value = returned_project expected_project = api_utils.GenericProjectInfo( self.context.project_id, 'v3', domain_id='default', name='bar', description='fake project description') project = api_utils.get_project( self.context, self.context.project_id) self.assertEqual(expected_project.__dict__, project.__dict__) class TestCleanFileLocks(test.TestCase): @mock.patch('cinder.utils.LOG.warning') @mock.patch('cinder.utils.synchronized_remove') def test_clean_volume_file_locks(self, mock_remove, mock_log): driver = mock.Mock() utils.clean_volume_file_locks('UUID', driver) self.assertEqual(3, mock_remove.call_count) mock_remove.assert_has_calls([mock.call('UUID-delete_volume'), mock.call('UUID'), mock.call('UUID-detach_volume')]) driver.clean_volume_file_locks.assert_called_once_with('UUID') mock_log.assert_not_called() @mock.patch('cinder.utils.LOG.warning') @mock.patch('cinder.utils.synchronized_remove') def test_clean_volume_file_locks_errors(self, mock_remove, mock_log): driver = mock.Mock() driver.clean_volume_file_locks.side_effect = Exception mock_remove.side_effect = [None, Exception, None] utils.clean_volume_file_locks('UUID', driver) self.assertEqual(3, mock_remove.call_count) mock_remove.assert_has_calls([mock.call('UUID-delete_volume'), mock.call('UUID'), mock.call('UUID-detach_volume')]) driver.clean_volume_file_locks.assert_called_once_with('UUID') self.assertEqual(2, mock_log.call_count) @mock.patch('cinder.utils.LOG.warning') @mock.patch('cinder.utils.synchronized_remove') def test_clean_snapshot_file_locks(self, mock_remove, mock_log): driver = mock.Mock() utils.clean_snapshot_file_locks('UUID', driver) mock_remove.assert_called_once_with('UUID-delete_snapshot') driver.clean_snapshot_file_locks.assert_called_once_with('UUID') mock_log.assert_not_called() @mock.patch('cinder.utils.LOG.warning') @mock.patch('cinder.utils.synchronized_remove') def test_clean_snapshot_file_locks_failures(self, mock_remove, mock_log): driver = mock.Mock() driver.clean_snapshot_file_locks.side_effect = Exception mock_remove.side_effect = Exception utils.clean_snapshot_file_locks('UUID', driver) mock_remove.assert_called_once_with('UUID-delete_snapshot') driver.clean_snapshot_file_locks.assert_called_once_with('UUID') self.assertEqual(2, mock_log.call_count) @mock.patch('cinder.coordination.synchronized_remove') def test_api_clean_volume_file_locks(self, mock_remove): utils.api_clean_volume_file_locks('UUID') mock_remove.assert_called_once_with('attachment_update-UUID-*') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_volume_cleanup.py0000664000175000017500000002027500000000000023111 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder import service from cinder.tests.unit.api import fakes from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base CONF = cfg.CONF class VolumeCleanupTestCase(base.BaseVolumeTestCase): MOCK_WORKER = False def setUp(self): super(VolumeCleanupTestCase, self).setUp() self.service_id = 1 self.mock_object(service.Service, 'service_id', self.service_id) self.patch('cinder.volume.volume_utils.clear_volume', autospec=True) def _assert_workers_are_removed(self): workers = db.worker_get_all(self.context, read_deleted='yes') self.assertListEqual([], workers) def test_init_host_clears_uploads_available_volume(self): """init_host will clean an available volume stuck in uploading.""" volume = tests_utils.create_volume(self.context, status='uploading', size=0, host=CONF.host) db.worker_create(self.context, resource_type='Volume', resource_id=volume.id, status=volume.status, service_id=self.service_id) self.volume.init_host(service_id=service.Service.service_id) volume.refresh() self.assertEqual("available", volume.status) self._assert_workers_are_removed() @mock.patch('cinder.manager.CleanableManager.init_host') def test_init_host_clears_uploads_in_use_volume(self, init_host_mock): """init_host will clean an in-use volume stuck in uploading.""" volume = tests_utils.create_volume(self.context, status='uploading', size=0, host=CONF.host) db.worker_create(self.context, resource_type='Volume', resource_id=volume.id, status=volume.status, service_id=self.service_id) fake_uuid = fakes.get_fake_uuid() tests_utils.attach_volume(self.context, volume.id, fake_uuid, 'fake_host', '/dev/vda') self.volume.init_host(service_id=mock.sentinel.service_id) init_host_mock.assert_called_once_with( service_id=mock.sentinel.service_id, added_to_cluster=None) volume.refresh() self.assertEqual("in-use", volume.status) self._assert_workers_are_removed() @mock.patch('cinder.image.image_utils.cleanup_temporary_file') def test_init_host_clears_downloads(self, mock_cleanup_tmp_file): """Test that init_host will unwedge a volume stuck in downloading.""" volume = tests_utils.create_volume(self.context, status='downloading', size=0, host=CONF.host) db.worker_create(self.context, resource_type='Volume', resource_id=volume.id, status=volume.status, service_id=self.service_id) mock_clear = self.mock_object(self.volume.driver, 'clear_download') self.volume.init_host(service_id=service.Service.service_id) self.assertEqual(1, mock_clear.call_count) self.assertEqual(volume.id, mock_clear.call_args[0][1].id) volume.refresh() self.assertEqual("error", volume['status']) mock_cleanup_tmp_file.assert_called_once_with(CONF.host) self.volume.delete_volume(self.context, volume=volume) self._assert_workers_are_removed() @mock.patch('cinder.image.image_utils.cleanup_temporary_file') def test_init_host_resumes_deletes(self, mock_cleanup_tmp_file): """init_host will resume deleting volume in deleting status.""" volume = tests_utils.create_volume(self.context, status='deleting', size=0, host=CONF.host) db.worker_create(self.context, resource_type='Volume', resource_id=volume.id, status=volume.status, service_id=self.service_id) self.volume.init_host(service_id=service.Service.service_id) self.assertRaises(exception.VolumeNotFound, db.volume_get, context.get_admin_context(), volume.id) mock_cleanup_tmp_file.assert_called_once_with(CONF.host) self._assert_workers_are_removed() @mock.patch('cinder.image.image_utils.cleanup_temporary_file') def test_create_volume_fails_with_creating_and_downloading_status( self, mock_cleanup_tmp_file): """Test init_host_with_service in case of volume. While the status of volume is 'creating' or 'downloading', volume process down. After process restarting this 'creating' status is changed to 'error'. """ for status in ('creating', 'downloading'): volume = tests_utils.create_volume(self.context, status=status, size=0, host=CONF.host) db.worker_create(self.context, resource_type='Volume', resource_id=volume.id, status=volume.status, service_id=self.service_id) self.volume.init_host(service_id=service.Service.service_id) volume.refresh() self.assertEqual('error', volume['status']) self.volume.delete_volume(self.context, volume) self.assertTrue(mock_cleanup_tmp_file.called) self._assert_workers_are_removed() def test_create_snapshot_fails_with_creating_status(self): """Test init_host_with_service in case of snapshot. While the status of snapshot is 'creating', volume process down. After process restarting this 'creating' status is changed to 'error'. """ volume = tests_utils.create_volume(self.context, **self.volume_params) snapshot = tests_utils.create_snapshot( self.context, volume.id, status=fields.SnapshotStatus.CREATING) db.worker_create(self.context, resource_type='Snapshot', resource_id=snapshot.id, status=snapshot.status, service_id=self.service_id) self.volume.init_host(service_id=service.Service.service_id) snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot.id) self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status) self.assertEqual(service.Service.service_id, self.volume.service_id) self._assert_workers_are_removed() self.volume.delete_snapshot(self.context, snapshot_obj) self.volume.delete_volume(self.context, volume) def test_init_host_clears_deleting_snapshots(self): """Test that init_host will delete a snapshot stuck in deleting.""" volume = tests_utils.create_volume(self.context, status='deleting', size=1, host=CONF.host) snapshot = tests_utils.create_snapshot(self.context, volume.id, status='deleting') db.worker_create(self.context, resource_type='Volume', resource_id=volume.id, status=volume.status, service_id=self.service_id) self.volume.init_host(service_id=self.service_id) self.assertRaises(exception.VolumeNotFound, volume.refresh) self.assertRaises(exception.SnapshotNotFound, snapshot.refresh) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_volume_configuration.py0000664000175000017500000000615600000000000024333 0ustar00zuulzuul00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the configuration wrapper in volume drivers.""" from oslo_config import cfg from cinder.tests.unit import test from cinder.volume import configuration volume_opts = [ cfg.StrOpt('str_opt', default='STR_OPT'), cfg.BoolOpt('bool_opt', default=False) ] more_volume_opts = [ cfg.IntOpt('int_opt', default=1), ] CONF = cfg.CONF CONF.register_opts(volume_opts) CONF.register_opts(more_volume_opts) class VolumeConfigurationTest(test.TestCase): def test_group_grafts_opts(self): c = configuration.Configuration(volume_opts, config_group='foo') self.assertEqual(c.str_opt, 'STR_OPT') self.assertEqual(c.bool_opt, False) self.assertEqual(c.str_opt, CONF.backend_defaults.str_opt) self.assertEqual(c.bool_opt, CONF.backend_defaults.bool_opt) self.assertIsNone(CONF.foo.str_opt) self.assertIsNone(CONF.foo.bool_opt) def test_opts_no_group(self): c = configuration.Configuration(volume_opts) self.assertEqual(c.str_opt, CONF.str_opt) self.assertEqual(c.bool_opt, CONF.bool_opt) def test_grafting_multiple_opts(self): c = configuration.Configuration(volume_opts, config_group='foo') c.append_config_values(more_volume_opts) self.assertEqual(c.str_opt, 'STR_OPT') self.assertEqual(c.bool_opt, False) self.assertEqual(c.int_opt, 1) # We get the right values, but they are coming from the backend_default # group of CONF no the 'foo' one. self.assertEqual(c.str_opt, CONF.backend_defaults.str_opt) self.assertEqual(c.bool_opt, CONF.backend_defaults.bool_opt) self.assertEqual(c.int_opt, CONF.backend_defaults.int_opt) self.assertIsNone(CONF.foo.str_opt) self.assertIsNone(CONF.foo.bool_opt) self.assertIsNone(CONF.foo.int_opt) def test_safe_get(self): c = configuration.Configuration(volume_opts, config_group='foo') self.assertIsNone(c.safe_get('none_opt')) def test_backend_specific_value(self): c = configuration.Configuration(volume_opts, config_group='foo') # Set some new non-default value CONF.set_override('str_opt', 'bar', group='backend_defaults') actual_value = c.str_opt self.assertEqual('bar', actual_value) CONF.set_override('str_opt', 'notbar', group='foo') actual_value = c.str_opt # Make sure that we pick up the backend value and not the shared group # value... self.assertEqual('notbar', actual_value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_volume_glance_metadata.py0000664000175000017500000002237300000000000024554 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Copyright 2011 University of Southern California # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for volume types extra specs code """ from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test class VolumeGlanceMetadataTestCase(test.TestCase): def setUp(self): super(VolumeGlanceMetadataTestCase, self).setUp() self.ctxt = context.get_admin_context() objects.register_all() def test_vol_glance_metadata_bad_vol_id(self): ctxt = context.get_admin_context() self.assertRaises(exception.VolumeNotFound, db.volume_glance_metadata_create, ctxt, fake.VOLUME_ID, 'key1', 'value1') self.assertRaises(exception.VolumeNotFound, db.volume_glance_metadata_get, ctxt, fake.VOLUME_ID) db.volume_glance_metadata_delete_by_volume(ctxt, fake.VOLUME2_ID) def test_vol_update_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(ctxt, {'id': fake.VOLUME2_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', 'value1') db.volume_glance_metadata_create(ctxt, fake.VOLUME2_ID, 'key1', 'value1') db.volume_glance_metadata_create(ctxt, fake.VOLUME2_ID, 'key2', 'value2') db.volume_glance_metadata_create(ctxt, fake.VOLUME2_ID, 'key3', 123) expected_metadata_1 = {'volume_id': fake.VOLUME_ID, 'key': 'key1', 'value': 'value1'} metadata = db.volume_glance_metadata_get(ctxt, fake.VOLUME_ID) self.assertEqual(1, len(metadata)) for key, value in expected_metadata_1.items(): self.assertEqual(value, metadata[0][key]) expected_metadata_2 = ({'volume_id': fake.VOLUME2_ID, 'key': 'key1', 'value': 'value1'}, {'volume_id': fake.VOLUME2_ID, 'key': 'key2', 'value': 'value2'}, {'volume_id': fake.VOLUME2_ID, 'key': 'key3', 'value': '123'}) metadata = db.volume_glance_metadata_get(ctxt, fake.VOLUME2_ID) self.assertEqual(3, len(metadata)) for expected, meta in zip(expected_metadata_2, metadata): for key, value in expected.items(): self.assertEqual(value, meta[key]) self.assertRaises(exception.GlanceMetadataExists, db.volume_glance_metadata_create, ctxt, fake.VOLUME_ID, 'key1', 'value1a') metadata = db.volume_glance_metadata_get(ctxt, fake.VOLUME_ID) self.assertEqual(1, len(metadata)) for key, value in expected_metadata_1.items(): self.assertEqual(value, metadata[0][key]) def test_vols_get_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(ctxt, {'id': fake.VOLUME2_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(ctxt, {'id': '3', 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', 'value1') db.volume_glance_metadata_create(ctxt, fake.VOLUME2_ID, 'key2', 'value2') db.volume_glance_metadata_create(ctxt, fake.VOLUME2_ID, 'key22', 'value22') metadata = db.volume_glance_metadata_get_all(ctxt) self.assertEqual(3, len(metadata)) self._assert_metadata_equals(fake.VOLUME_ID, 'key1', 'value1', metadata[0]) self._assert_metadata_equals(fake.VOLUME2_ID, 'key2', 'value2', metadata[1]) self._assert_metadata_equals(fake.VOLUME2_ID, 'key22', 'value22', metadata[2]) def _assert_metadata_equals(self, volume_id, key, value, observed): self.assertEqual(volume_id, observed.volume_id) self.assertEqual(key, observed.key) self.assertEqual(value, observed.value) def test_vol_delete_glance_metadata(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_delete_by_volume(ctxt, fake.VOLUME_ID) db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', 'value1') db.volume_glance_metadata_delete_by_volume(ctxt, fake.VOLUME_ID) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_get, ctxt, fake.VOLUME_ID) def test_vol_glance_metadata_copy_to_snapshot(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) snap = objects.Snapshot(ctxt, volume_id=fake.VOLUME_ID) snap.create() db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', 'value1') db.volume_glance_metadata_copy_to_snapshot(ctxt, snap.id, fake.VOLUME_ID) expected_meta = {'snapshot_id': snap.id, 'key': 'key1', 'value': 'value1'} for meta in db.volume_snapshot_glance_metadata_get(ctxt, snap.id): for (key, value) in expected_meta.items(): self.assertEqual(value, meta[key]) snap.destroy() def test_vol_glance_metadata_copy_from_volume_to_volume(self): ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_create(ctxt, {'id': fake.VOLUME2_ID, 'source_volid': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(ctxt, fake.VOLUME_ID, 'key1', 'value1') db.volume_glance_metadata_copy_from_volume_to_volume(ctxt, fake.VOLUME_ID, fake.VOLUME2_ID) expected_meta = {'key': 'key1', 'value': 'value1'} for meta in db.volume_glance_metadata_get(ctxt, fake.VOLUME2_ID): for (key, value) in expected_meta.items(): self.assertEqual(value, meta[key]) def test_volume_glance_metadata_copy_to_volume(self): vol1 = db.volume_create(self.ctxt, {'volume_type_id': fake.VOLUME_TYPE_ID}) vol2 = db.volume_create(self.ctxt, {'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(self.ctxt, vol1['id'], 'm1', 'v1') snapshot = objects.Snapshot(self.ctxt, volume_id=vol1['id']) snapshot.create() db.volume_glance_metadata_copy_to_snapshot(self.ctxt, snapshot.id, vol1['id']) db.volume_glance_metadata_copy_to_volume(self.ctxt, vol2['id'], snapshot.id) metadata = db.volume_glance_metadata_get(self.ctxt, vol2['id']) metadata = {m['key']: m['value'] for m in metadata} self.assertEqual({'m1': 'v1'}, metadata) def test_volume_snapshot_glance_metadata_get_nonexistent(self): vol = db.volume_create(self.ctxt, {'volume_type_id': fake.VOLUME_TYPE_ID}) snapshot = objects.Snapshot(self.ctxt, volume_id=vol['id']) snapshot.create() self.assertRaises(exception.GlanceMetadataNotFound, db.volume_snapshot_glance_metadata_get, self.ctxt, snapshot.id) snapshot.destroy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_volume_throttling.py0000664000175000017500000000517500000000000023662 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume copy throttling helpers.""" from unittest import mock from cinder.tests.unit import test from cinder import utils from cinder.volume import throttling class ThrottleTestCase(test.TestCase): def test_NoThrottle(self): with throttling.Throttle().subcommand('volume1', 'volume2') as cmd: self.assertEqual([], cmd['prefix']) @mock.patch.object(utils, 'get_blkdev_major_minor') @mock.patch('cinder.privsep.cgroup.cgroup_create') @mock.patch('cinder.privsep.cgroup.cgroup_limit') def test_BlkioCgroup(self, mock_limit, mock_create, mock_major_minor): def fake_get_blkdev_major_minor(path): return {'src_volume1': "253:0", 'dst_volume1': "253:1", 'src_volume2': "253:2", 'dst_volume2': "253:3"}[path] mock_major_minor.side_effect = fake_get_blkdev_major_minor throttle = throttling.BlkioCgroup(1024, 'fake_group') with throttle.subcommand('src_volume1', 'dst_volume1') as cmd: self.assertEqual(['cgexec', '-g', 'blkio:fake_group'], cmd['prefix']) # a nested job with throttle.subcommand('src_volume2', 'dst_volume2') as cmd: self.assertEqual(['cgexec', '-g', 'blkio:fake_group'], cmd['prefix']) mock_create.assert_has_calls([mock.call('fake_group')]) mock_limit.assert_has_calls([ mock.call('fake_group', 'read', '253:0', 1024), mock.call('fake_group', 'write', '253:1', 1024), # a nested job starts; bps limit are set to the half mock.call('fake_group', 'read', '253:0', 512), mock.call('fake_group', 'read', '253:2', 512), mock.call('fake_group', 'write', '253:1', 512), mock.call('fake_group', 'write', '253:3', 512), # a nested job ends; bps limit is resumed mock.call('fake_group', 'read', '253:0', 1024), mock.call('fake_group', 'write', '253:1', 1024)]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_volume_transfer.py0000664000175000017500000005251500000000000023310 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for volume transfers.""" from unittest import mock import ddt from oslo_utils import timeutils from cinder import context from cinder import db from cinder.db.sqlalchemy import api as db_api from cinder.db.sqlalchemy import models from cinder import exception from cinder import objects from cinder import quota from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.transfer import api as transfer_api QUOTAS = quota.QUOTAS @ddt.ddt class VolumeTransferTestCase(test.TestCase): """Test cases for volume transfer code.""" def setUp(self): super(VolumeTransferTestCase, self).setUp() self.ctxt = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID) self.updated_at = timeutils.utcnow() def test_transfer_volume_create_delete(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: response = tx_api.create(self.ctxt, volume.id, 'Description') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: tx_api.delete(self.ctxt, response['id']) calls = [mock.call(self.ctxt, mock.ANY, "transfer.delete.start"), mock.call(self.ctxt, mock.ANY, "transfer.delete.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('available', volume['status'], 'Unexpected state') def test_transfer_invalid_volume(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, status='in-use', updated_at=self.updated_at) self.assertRaises(exception.InvalidVolume, tx_api.create, self.ctxt, volume.id, 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('in-use', volume['status'], 'Unexpected state') def test_transfer_invalid_encrypted_volume(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) db.volume_update(self.ctxt, volume.id, {'encryption_key_id': fake.ENCRYPTION_KEY_ID}) self.assertRaises(exception.InvalidVolume, tx_api.create, self.ctxt, volume.id, 'Description') def test_transfer_accept_invalid_authkey(self): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') self.assertRaises(exception.TransferNotFound, tx_api.accept, self.ctxt, '2', transfer['auth_key']) self.assertRaises(exception.InvalidAuthKey, tx_api.accept, self.ctxt, transfer['id'], 'wrong') def test_transfer_accept_invalid_volume(self): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at, volume_type_id=self.vt['id']) with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: transfer = tx_api.create(self.ctxt, volume.id, 'Description') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual('awaiting-transfer', volume['status'], 'Unexpected state') volume.status = 'wrong' volume.save() with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) # Because the InvalidVolume exception is raised in tx_api, so # there is only transfer.accept.start called and missing # transfer.accept.end. calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")] mock_notify.assert_has_calls(calls) self.assertEqual(1, mock_notify.call_count) volume.status = 'awaiting-transfer' volume.save() def test_transfer_accept_volume_in_consistencygroup(self): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() consistencygroup = utils.create_consistencygroup(self.ctxt) volume = utils.create_volume(self.ctxt, updated_at=self.updated_at, consistencygroup_id= consistencygroup.id) transfer = tx_api.create(self.ctxt, volume.id, 'Description') self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) @mock.patch.object(QUOTAS, "limit_check") @mock.patch.object(QUOTAS, "reserve") @mock.patch.object(QUOTAS, "add_volume_type_opts") def test_transfer_accept(self, mock_quota_voltype, mock_quota_reserve, mock_quota_limit): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: transfer = tx_api.create(self.ctxt, volume.id, 'Description') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: response = tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"), mock.call(self.ctxt, mock.ANY, "transfer.accept.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual(fake.PROJECT2_ID, volume.project_id) self.assertEqual(fake.USER2_ID, volume.user_id) self.assertEqual(response['volume_id'], volume.id, 'Unexpected volume id in response.') self.assertEqual(response['id'], transfer['id'], 'Unexpected transfer id in response.') # Check QUOTAS reservation calls # QUOTAS.add_volume_type_opts reserve_opt = {'volumes': 1, 'gigabytes': 1} release_opt = {'volumes': -1, 'gigabytes': -1} calls = [mock.call(self.ctxt, reserve_opt, fake.VOLUME_TYPE_ID), mock.call(self.ctxt, release_opt, fake.VOLUME_TYPE_ID)] mock_quota_voltype.assert_has_calls(calls) # QUOTAS.reserve calls = [mock.call(mock.ANY, **reserve_opt), mock.call(mock.ANY, project_id=fake.PROJECT_ID, **release_opt)] mock_quota_reserve.assert_has_calls(calls) # QUOTAS.limit_check values = {'per_volume_gigabytes': 1} mock_quota_limit.assert_called_once_with(self.ctxt, project_id=fake.PROJECT2_ID, **values) @mock.patch.object(QUOTAS, "reserve") @mock.patch.object(QUOTAS, "add_volume_type_opts") def test_transfer_accept_over_quota(self, mock_quota_voltype, mock_quota_reserve): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: transfer = tx_api.create(self.ctxt, volume.id, 'Description') self.assertEqual(2, mock_notify.call_count) fake_overs = ['volumes_lvmdriver-3'] fake_quotas = {'gigabytes_lvmdriver-3': 1, 'volumes_lvmdriver-3': 10} fake_usages = {'gigabytes_lvmdriver-3': {'reserved': 0, 'in_use': 1}, 'volumes_lvmdriver-3': {'reserved': 0, 'in_use': 1}} mock_quota_reserve.side_effect = exception.OverQuota( overs=fake_overs, quotas=fake_quotas, usages=fake_usages) self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: self.assertRaises(exception.VolumeLimitExceeded, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) # notification of transfer.accept is sent only after quota check # passes self.assertEqual(0, mock_notify.call_count) @mock.patch.object(QUOTAS, "limit_check") def test_transfer_accept_over_quota_check_limit(self, mock_quota_limit): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') fake_overs = ['per_volume_gigabytes'] fake_quotas = {'per_volume_gigabytes': 1} fake_usages = {} mock_quota_limit.side_effect = exception.OverQuota( overs=fake_overs, quotas=fake_quotas, usages=fake_usages) self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: self.assertRaises(exception.VolumeSizeExceedsLimit, tx_api.accept, self.ctxt, transfer['id'], transfer['auth_key']) # notification of transfer.accept is sent only after quota check # passes self.assertEqual(0, mock_notify.call_count) def test_transfer_get(self): tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume['id'], 'Description') t = tx_api.get(self.ctxt, transfer['id']) self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') ts = tx_api.get_all(self.ctxt) self.assertEqual(1, len(ts), 'Unexpected number of transfers.') nctxt = context.RequestContext(user_id=fake.USER2_ID, project_id=fake.PROJECT2_ID) utils.create_volume(nctxt, updated_at=self.updated_at) self.assertRaises(exception.TransferNotFound, tx_api.get, nctxt, transfer['id']) ts = tx_api.get_all(nctxt) self.assertEqual(0, len(ts), 'Unexpected transfers listed.') @ddt.data({'all_tenants': '1', 'name': 'transfer1'}, {'all_tenants': 'true', 'name': 'transfer1'}, {'all_tenants': 'false', 'name': 'transfer1'}, {'all_tenants': '0', 'name': 'transfer1'}, {'name': 'transfer1'}) @mock.patch.object(context.RequestContext, 'authorize') @mock.patch('cinder.db.transfer_get_all') @mock.patch('cinder.db.transfer_get_all_by_project') def test_get_all_transfers_non_admin(self, search_opts, get_all_by_project, get_all, auth_mock): ctxt = context.RequestContext(user_id=None, is_admin=False, project_id=mock.sentinel.project_id, read_deleted='no', overwrite=False) tx_api = transfer_api.API() res = tx_api.get_all(ctxt, mock.sentinel.marker, mock.sentinel.limit, mock.sentinel.sort_keys, mock.sentinel.sort_dirs, search_opts, mock.sentinel.offset) auth_mock.assert_called_once_with(transfer_api.policy.GET_ALL_POLICY) get_all.assert_not_called() get_all_by_project.assert_called_once_with( ctxt, mock.sentinel.project_id, filters={'name': 'transfer1'}, limit=mock.sentinel.limit, marker=mock.sentinel.marker, offset=mock.sentinel.offset, sort_dirs=mock.sentinel.sort_dirs, sort_keys=mock.sentinel.sort_keys) self.assertEqual(get_all_by_project.return_value, res) def test_delete_transfer_with_deleted_volume(self): # create a volume volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) # create a transfer tx_api = transfer_api.API() with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: transfer = tx_api.create(self.ctxt, volume['id'], 'Description') t = tx_api.get(self.ctxt, transfer['id']) calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) self.assertEqual(2, mock_notify.call_count) self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id') # force delete volume volume.destroy() # Make sure transfer has been deleted. self.assertRaises(exception.TransferNotFound, tx_api.get, self.ctxt, transfer['id']) def test_transfer_accept_with_snapshots(self): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) utils.create_volume_type(self.ctxt.elevated(), id=fake.VOLUME_TYPE_ID, name="test_type") utils.create_snapshot(self.ctxt, volume.id, status='available') with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: transfer = tx_api.create(self.ctxt, volume.id, 'Description') calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"), mock.call(self.ctxt, mock.ANY, "transfer.create.end")] mock_notify.assert_has_calls(calls) # The notify_about_volume_usage is called twice at create(). self.assertEqual(2, mock_notify.call_count) # Get volume and snapshot quota before accept self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID usages = db.quota_usage_get_all_by_project(self.ctxt, self.ctxt.project_id) self.assertEqual(0, usages.get('volumes', {}).get('in_use', 0)) self.assertEqual(0, usages.get('snapshots', {}).get('in_use', 0)) with mock.patch('cinder.volume.volume_utils.notify_about_volume_usage' ) as mock_notify: tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"), mock.call(self.ctxt, mock.ANY, "transfer.accept.end")] mock_notify.assert_has_calls(calls) # The notify_about_volume_usage is called twice at accept(). self.assertEqual(2, mock_notify.call_count) volume = objects.Volume.get_by_id(self.ctxt, volume.id) self.assertEqual(fake.PROJECT2_ID, volume.project_id) self.assertEqual(fake.USER2_ID, volume.user_id) # Get volume and snapshot quota after accept self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID usages = db.quota_usage_get_all_by_project(self.ctxt, self.ctxt.project_id) self.assertEqual(1, usages.get('volumes', {}).get('in_use', 0)) self.assertEqual(1, usages.get('snapshots', {}).get('in_use', 0)) def test_transfer_accept_with_snapshots_invalid(self): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) utils.create_volume_type(self.ctxt.elevated(), id=fake.VOLUME_TYPE_ID, name="test_type") utils.create_snapshot(self.ctxt, volume.id, status='deleting') self.assertRaises(exception.InvalidSnapshot, tx_api.create, self.ctxt, volume.id, 'Description') @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') @mock.patch.object(db, 'volume_type_get', v2_fakes.fake_volume_type_get) @mock.patch.object(quota.QUOTAS, 'reserve') def test_transfer_accept_with_detail_records(self, mock_notify, mock_type_get): svc = self.start_service('volume', host='test_host') self.addCleanup(svc.stop) tx_api = transfer_api.API() volume = utils.create_volume(self.ctxt, updated_at=self.updated_at) transfer = tx_api.create(self.ctxt, volume.id, 'Description') self.assertEqual(volume.project_id, transfer['source_project_id']) self.assertIsNone(transfer['destination_project_id']) self.assertFalse(transfer['accepted']) # Get volume and snapshot quota before accept self.ctxt.user_id = fake.USER2_ID self.ctxt.project_id = fake.PROJECT2_ID tx_api.accept(self.ctxt, transfer['id'], transfer['auth_key']) with db_api.main_context_manager.reader.using(self.ctxt): xfer = db_api.model_query( self.ctxt, models.Transfer, read_deleted='yes' ).filter_by(id=transfer['id']).first() self.assertEqual(volume.project_id, xfer['source_project_id']) self.assertTrue(xfer['accepted']) self.assertEqual(fake.PROJECT2_ID, xfer['destination_project_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_volume_types.py0000664000175000017500000010352500000000000022626 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for volume types code.""" import datetime import time from unittest import mock from oslo_db import exception as db_exc from oslo_utils import uuidutils from cinder import context from cinder import db from cinder.db.sqlalchemy import api as db_api from cinder.db.sqlalchemy import models from cinder import exception from cinder.tests.unit import conf_fixture from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume import qos_specs from cinder.volume import volume_types class VolumeTypeTestCase(test.TestCase): """Test cases for volume type code.""" def setUp(self): super(VolumeTypeTestCase, self).setUp() self.ctxt = context.get_admin_context() self.vol_type1_name = str(int(time.time())) self.vol_type1_specs = dict(type="physical drive", drive_type="SAS", size="300", rpm="7200", visible="True") self.vol_type1_description = self.vol_type1_name + '_desc' def test_volume_type_destroy_with_encryption(self): volume_type = volume_types.create(self.ctxt, "type1") volume_type_id = volume_type.get('id') encryption = { 'control_location': 'front-end', 'provider': 'fake_provider', } db_api.volume_type_encryption_create(self.ctxt, volume_type_id, encryption) ret = volume_types.get_volume_type_encryption(self.ctxt, volume_type_id) self.assertIsNotNone(ret) volume_types.destroy(self.ctxt, volume_type_id) ret = volume_types.get_volume_type_encryption(self.ctxt, volume_type_id) self.assertIsNone(ret) def test_get_volume_type_by_name_with_uuid_name(self): """Ensure volume types can be created and found.""" uuid_format_name = uuidutils.generate_uuid() volume_types.create(self.ctxt, uuid_format_name, self.vol_type1_specs, description=self.vol_type1_description) type_ref = volume_types.get_by_name_or_id(self.ctxt, uuid_format_name) self.assertEqual(uuid_format_name, type_ref['name']) def test_volume_type_create_then_destroy(self): """Ensure volume types can be created and deleted.""" project_id = fake.PROJECT_ID prev_all_vtypes = volume_types.get_all_types(self.ctxt) # create type_ref = volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs, description=self.vol_type1_description, projects=[project_id], is_public=False) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) self.assertEqual(self.vol_type1_description, new['description']) for k, v in self.vol_type1_specs.items(): self.assertEqual(v, new['extra_specs'][k], 'one of fields does not match') new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(len(prev_all_vtypes) + 1, len(new_all_vtypes), 'drive type was not created') # Assert that volume type is associated to a project vol_type_access = db.volume_type_access_get_all(self.ctxt, type_ref['id']) self.assertIn(project_id, [a.project_id for a in vol_type_access]) # update new_type_name = self.vol_type1_name + '_updated' new_type_desc = self.vol_type1_description + '_updated' volume_types.update(self.ctxt, type_ref.id, new_type_name, new_type_desc) type_ref_updated = volume_types.get_volume_type(self.ctxt, type_ref.id) self.assertEqual(new_type_name, type_ref_updated['name']) self.assertEqual(new_type_desc, type_ref_updated['description']) # destroy volume_types.destroy(self.ctxt, type_ref['id']) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(prev_all_vtypes, new_all_vtypes, 'drive type was not deleted') # Assert that associated volume type access is deleted successfully # on destroying the volume type with db_api.main_context_manager.reader.using(self.ctxt): vol_type_access = db_api._volume_type_access_query( self.ctxt ).filter_by(volume_type_id=type_ref['id']).all() self.assertEqual([], vol_type_access) @mock.patch('cinder.quota.VolumeTypeQuotaEngine.' 'update_quota_resource') def test_update_volume_type_name(self, mock_update_quota): type_ref = volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs, description=self.vol_type1_description) new_type_name = self.vol_type1_name + '_updated' volume_types.update(self.ctxt, type_ref.id, new_type_name, None) mock_update_quota.assert_called_once_with(self.ctxt, self.vol_type1_name, new_type_name) volume_types.destroy(self.ctxt, type_ref.id) @mock.patch('cinder.quota.VolumeTypeQuotaEngine.' 'update_quota_resource') def test_update_volume_type_name_with_db_error(self, mock_update_quota): type_ref = volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs, description=self.vol_type1_description) mock_update_quota.side_effect = db_exc.DBError new_type_name = self.vol_type1_name + '_updated' description = 'new_test' is_public = False self.assertRaises(exception.VolumeTypeUpdateFailed, volume_types.update, self.ctxt, type_ref.id, new_type_name, description, is_public) mock_update_quota.assert_called_once_with(self.ctxt, self.vol_type1_name, new_type_name) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) self.assertEqual(self.vol_type1_name, new.get('name')) self.assertEqual(self.vol_type1_description, new.get('description')) self.assertTrue(new.get('is_public')) volume_types.destroy(self.ctxt, type_ref.id) def test_volume_type_create_then_destroy_with_non_admin(self): """Ensure volume types can be created and deleted by non-admin user. If a non-admn user is authorized at API, volume type operations should be permitted. """ prev_all_vtypes = volume_types.get_all_types(self.ctxt) self.ctxt = context.RequestContext('fake', 'fake', is_admin=False) # create type_ref = volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs, description=self.vol_type1_description) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) self.assertEqual(self.vol_type1_description, new['description']) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(len(prev_all_vtypes) + 1, len(new_all_vtypes), 'drive type was not created') # update new_type_name = self.vol_type1_name + '_updated' new_type_desc = self.vol_type1_description + '_updated' volume_types.update(self.ctxt, type_ref.id, new_type_name, new_type_desc) type_ref_updated = volume_types.get_volume_type(self.ctxt, type_ref.id) self.assertEqual(new_type_name, type_ref_updated['name']) self.assertEqual(new_type_desc, type_ref_updated['description']) # destroy volume_types.destroy(self.ctxt, type_ref['id']) new_all_vtypes = volume_types.get_all_types(self.ctxt) self.assertEqual(prev_all_vtypes, new_all_vtypes, 'drive type was not deleted') def test_create_volume_type_with_invalid_params(self): """Ensure exception will be returned.""" vol_type_invalid_specs = "invalid_extra_specs" self.assertRaises(exception.VolumeTypeCreateFailed, volume_types.create, self.ctxt, self.vol_type1_name, vol_type_invalid_specs) def test_get_all_volume_types(self): """Ensures that all volume types can be retrieved.""" with db_api.main_context_manager.writer.using(self.ctxt): total_volume_types = self.ctxt.session.query( models.VolumeType, ).count() vol_types = volume_types.get_all_types(self.ctxt) self.assertEqual(total_volume_types, len(vol_types)) def test_get_default_volume_type(self): """Ensures default volume type can be retrieved.""" default_vol_type = volume_types.get_default_volume_type() self.assertEqual(conf_fixture.def_vol_type, default_vol_type.get('name')) def test_get_default_volume_type_not_found(self): """Ensure setting non-existent default type raises error.""" self.flags(default_volume_type='fake_type') self.assertRaises(exception.VolumeTypeDefaultMisconfiguredError, volume_types.get_default_volume_type) def test_delete_default_volume_type(self): """Ensures default volume type cannot be deleted.""" default = volume_types.create(self.ctxt, 'default_type') self.flags(default_volume_type='default_type') self.assertRaises(exception.VolumeTypeDefaultDeletionError, volume_types.destroy, self.ctxt, default['id']) def test_delete_when_default_volume_type_not_found(self): """Ensures volume types cannot be deleted until valid default is set. """ default = volume_types.create(self.ctxt, 'default_type') self.flags(default_volume_type='fake_default') self.assertRaises(exception.VolumeTypeDefaultMisconfiguredError, volume_types.destroy, self.ctxt, default['id']) def test_default_volume_type_missing_in_db(self): """Test default volume type is missing in database. Ensures proper exception raised if default volume type is not in database. """ default_vol_type = volume_types.get_default_volume_type() self.assertEqual( {'created_at': default_vol_type['created_at'], 'deleted': False, 'deleted_at': None, 'description': u'Default Volume Type', 'extra_specs': {}, 'id': default_vol_type['id'], 'is_public': True, 'name': u'__DEFAULT__', 'qos_specs_id': None, 'updated_at': default_vol_type['updated_at']}, default_vol_type) def test_non_existent_vol_type_shouldnt_delete(self): """Ensures that volume type creation fails with invalid args.""" # create a dummy type as DB requires at least 1 type to perform the # delete operation volume_types.create(self.ctxt, self.vol_type1_name) self.assertRaises(exception.VolumeTypeNotFound, volume_types.destroy, self.ctxt, "sfsfsdfdfs") def test_volume_type_with_volumes_shouldnt_delete(self): """Ensures volume type deletion with associated volumes fail.""" type_ref = volume_types.create(self.ctxt, self.vol_type1_name) db.volume_create(self.ctxt, {'id': '1', 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'available', 'volume_type_id': type_ref['id']}) self.assertRaises(exception.VolumeTypeInUse, volume_types.destroy, self.ctxt, type_ref['id']) def test_repeated_vol_types_shouldnt_raise(self): """Ensures that volume duplicates don't raise.""" new_name = self.vol_type1_name + "dup" type_ref = volume_types.create(self.ctxt, new_name) volume_types.destroy(self.ctxt, type_ref['id']) type_ref = volume_types.create(self.ctxt, new_name) def test_invalid_volume_types_params(self): """Ensures that volume type creation fails with invalid args.""" self.assertRaises(exception.InvalidVolumeType, volume_types.destroy, self.ctxt, None) self.assertRaises(exception.InvalidVolumeType, volume_types.get_volume_type, self.ctxt, None) self.assertRaises(exception.InvalidVolumeType, volume_types.get_volume_type_by_name, self.ctxt, None) def test_volume_type_get_by_id_and_name(self): """Ensure volume types get returns same entry.""" volume_types.create(self.ctxt, self.vol_type1_name, self.vol_type1_specs) new = volume_types.get_volume_type_by_name(self.ctxt, self.vol_type1_name) new2 = volume_types.get_volume_type(self.ctxt, new['id']) self.assertEqual(new, new2) def test_volume_type_search_by_extra_spec(self): """Ensure volume types get by extra spec returns correct type.""" volume_types.create(self.ctxt, "type1", {"key1": "val1", "key2": "val2"}) volume_types.create(self.ctxt, "type2", {"key2": "val2", "key3": "val3"}) volume_types.create(self.ctxt, "type3", {"key3": "another_value", "key4": "val4"}) vol_types = volume_types.get_all_types( self.ctxt, filters={'extra_specs': {"key1": "val1"}}) self.assertEqual(1, len(vol_types)) self.assertIn("type1", vol_types.keys()) self.assertEqual({"key1": "val1", "key2": "val2"}, vol_types['type1']['extra_specs']) vol_types = volume_types.get_all_types( self.ctxt, filters={'extra_specs': {"key2": "val2"}}) self.assertEqual(2, len(vol_types)) self.assertIn("type1", vol_types.keys()) self.assertIn("type2", vol_types.keys()) vol_types = volume_types.get_all_types( self.ctxt, filters={'extra_specs': {"key3": "val3"}}) self.assertEqual(1, len(vol_types)) self.assertIn("type2", vol_types.keys()) def test_volume_type_search_by_extra_spec_multiple(self): """Ensure volume types get by extra spec returns correct type.""" volume_types.create(self.ctxt, "type1", {"key1": "val1", "key2": "val2", "key3": "val3"}) volume_types.create(self.ctxt, "type2", {"key2": "val2", "key3": "val3"}) volume_types.create(self.ctxt, "type3", {"key1": "val1", "key3": "val3", "key4": "val4"}) vol_types = volume_types.get_all_types( self.ctxt, filters={'extra_specs': {"key1": "val1", "key3": "val3"}}) self.assertEqual(2, len(vol_types)) self.assertIn("type1", vol_types.keys()) self.assertIn("type3", vol_types.keys()) self.assertEqual({"key1": "val1", "key2": "val2", "key3": "val3"}, vol_types['type1']['extra_specs']) self.assertEqual({"key1": "val1", "key3": "val3", "key4": "val4"}, vol_types['type3']['extra_specs']) def test_is_encrypted(self): volume_type = volume_types.create(self.ctxt, "type1") volume_type_id = volume_type.get('id') self.assertFalse(volume_types.is_encrypted(self.ctxt, volume_type_id)) encryption = { 'control_location': 'front-end', 'provider': 'fake_provider', } db_api.volume_type_encryption_create(self.ctxt, volume_type_id, encryption) self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id)) def test_add_access(self): project_id = fake.PROJECT_ID vtype = volume_types.create(self.ctxt, 'type1', is_public=False) vtype_id = vtype.get('id') volume_types.add_volume_type_access(self.ctxt, vtype_id, project_id) vtype_access = db.volume_type_access_get_all(self.ctxt, vtype_id) self.assertIn(project_id, [a.project_id for a in vtype_access]) def test_remove_access(self): project_id = fake.PROJECT_ID vtype = volume_types.create(self.ctxt, 'type1', projects=[project_id], is_public=False) vtype_id = vtype.get('id') volume_types.remove_volume_type_access(self.ctxt, vtype_id, project_id) vtype_access = db.volume_type_access_get_all(self.ctxt, vtype_id) self.assertNotIn(project_id, vtype_access) def test_add_access_with_non_admin(self): self.ctxt = context.RequestContext('fake', 'fake', is_admin=False) project_id = fake.PROJECT_ID vtype = volume_types.create(self.ctxt, 'type1', is_public=False) vtype_id = vtype.get('id') volume_types.add_volume_type_access(self.ctxt, vtype_id, project_id) vtype_access = db.volume_type_access_get_all(self.ctxt.elevated(), vtype_id) self.assertIn(project_id, [a.project_id for a in vtype_access]) def test_remove_access_with_non_admin(self): self.ctxt = context.RequestContext('fake', 'fake', is_admin=False) project_id = fake.PROJECT_ID vtype = volume_types.create(self.ctxt, 'type1', projects=[project_id], is_public=False) vtype_id = vtype.get('id') volume_types.remove_volume_type_access(self.ctxt, vtype_id, project_id) vtype_access = db.volume_type_access_get_all(self.ctxt.elevated(), vtype_id) self.assertNotIn(project_id, vtype_access) def test_get_volume_type_qos_specs(self): qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}) type_ref = volume_types.create(self.ctxt, "type1", {"key2": "val2", "key3": "val3"}) res = volume_types.get_volume_type_qos_specs(type_ref['id']) self.assertIsNone(res['qos_specs']) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) expected = {'qos_specs': {'id': qos_ref['id'], 'name': 'qos-specs-1', 'consumer': 'back-end', 'specs': { 'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}}} res = volume_types.get_volume_type_qos_specs(type_ref['id']) specs = db.qos_specs_get(self.ctxt, qos_ref['id']) expected['qos_specs']['created_at'] = specs['created_at'] self.assertDictEqual(expected, res) def test_volume_types_diff(self): # type_ref 1 and 2 have the same extra_specs, while 3 has different keyvals1 = {"key1": "val1", "key2": "val2"} keyvals2 = {"key1": "val0", "key2": "val2"} type_ref1 = volume_types.create(self.ctxt, "type1", keyvals1) type_ref2 = volume_types.create(self.ctxt, "type2", keyvals1) type_ref3 = volume_types.create(self.ctxt, "type3", keyvals2) # Check equality with only extra_specs diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], type_ref2['id']) self.assertTrue(same) self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], type_ref3['id']) self.assertFalse(same) self.assertEqual(('val1', 'val0'), diff['extra_specs']['key1']) # qos_ref 1 and 2 have the same specs, while 3 has different qos_keyvals1 = {'k1': 'v1', 'k2': 'v2', 'k3': 'v3', 'k4': 'v4'} qos_keyvals2 = {'k1': 'v0', 'k2': 'v2', 'k3': 'v3'} qos_ref1 = qos_specs.create(self.ctxt, 'qos-specs-1', qos_keyvals1) qos_ref2 = qos_specs.create(self.ctxt, 'qos-specs-2', qos_keyvals1) qos_ref3 = qos_specs.create(self.ctxt, 'qos-specs-3', qos_keyvals2) # Check equality with qos specs too qos_specs.associate_qos_with_type(self.ctxt, qos_ref1['id'], type_ref1['id']) qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'], type_ref2['id']) diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], type_ref2['id']) self.assertTrue(same) for k in ('id', 'name', 'created_at', 'updated_at', 'deleted_at'): self.assertNotIn(k, diff['qos_specs']) self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) self.assertEqual(('v1', 'v1'), diff['qos_specs']['k1']) qos_specs.disassociate_qos_specs(self.ctxt, qos_ref2['id'], type_ref2['id']) qos_specs.associate_qos_with_type(self.ctxt, qos_ref3['id'], type_ref2['id']) diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], type_ref2['id']) self.assertFalse(same) self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) self.assertEqual(('v1', 'v0'), diff['qos_specs']['k1']) qos_specs.disassociate_qos_specs(self.ctxt, qos_ref3['id'], type_ref2['id']) qos_specs.associate_qos_with_type(self.ctxt, qos_ref2['id'], type_ref2['id']) # And add encryption for good measure enc_keyvals1 = {'cipher': 'c1', 'key_size': 256, 'provider': 'p1', 'control_location': 'front-end'} enc_keyvals2 = {'cipher': 'c1', 'key_size': 128, 'provider': 'p1', 'control_location': 'front-end'} db.volume_type_encryption_create(self.ctxt, type_ref1['id'], enc_keyvals1) db.volume_type_encryption_create(self.ctxt, type_ref2['id'], enc_keyvals2) diff, same = volume_types.volume_types_diff(self.ctxt, type_ref1['id'], type_ref2['id']) self.assertFalse(same) self.assertEqual(('val1', 'val1'), diff['extra_specs']['key1']) self.assertEqual(('v1', 'v1'), diff['qos_specs']['k1']) self.assertEqual((256, 128), diff['encryption']['key_size']) # Check diff equals type specs when one type is None diff, same = volume_types.volume_types_diff(self.ctxt, None, type_ref1['id']) self.assertFalse(same) self.assertEqual({'key1': (None, 'val1'), 'key2': (None, 'val2')}, diff['extra_specs']) self.assertEqual({'consumer': (None, 'back-end'), 'k1': (None, 'v1'), 'k2': (None, 'v2'), 'k3': (None, 'v3'), 'k4': (None, 'v4')}, diff['qos_specs']) self.assertEqual({'cipher': (None, 'c1'), 'control_location': (None, 'front-end'), 'deleted': (None, False), 'key_size': (None, 256), 'provider': (None, 'p1')}, diff['encryption']) def test_encryption_create(self): volume_type = volume_types.create(self.ctxt, "type1") volume_type_id = volume_type.get('id') encryption = { 'control_location': 'front-end', 'provider': 'fake_provider', } db_api.volume_type_encryption_create(self.ctxt, volume_type_id, encryption) self.assertTrue(volume_types.is_encrypted(self.ctxt, volume_type_id)) def test_get_volume_type_encryption(self): volume_type = volume_types.create(self.ctxt, "type1") volume_type_id = volume_type.get('id') encryption = { 'control_location': 'front-end', 'provider': 'fake_provider', } db.volume_type_encryption_create(self.ctxt, volume_type_id, encryption) ret = volume_types.get_volume_type_encryption(self.ctxt, volume_type_id) self.assertIsNotNone(ret) def test_get_volume_type_encryption_without_volume_type_id(self): ret = volume_types.get_volume_type_encryption(self.ctxt, None) self.assertIsNone(ret) def test_check_public_volume_type_failed(self): project_id = fake.PROJECT_ID volume_type = volume_types.create(self.ctxt, "type1") volume_type_id = volume_type.get('id') self.assertRaises(exception.InvalidVolumeType, volume_types.add_volume_type_access, self.ctxt, volume_type_id, project_id) self.assertRaises(exception.InvalidVolumeType, volume_types.remove_volume_type_access, self.ctxt, volume_type_id, project_id) def test_check_private_volume_type(self): volume_type = volume_types.create(self.ctxt, "type1", is_public=False) volume_type_id = volume_type.get('id') self.assertFalse(volume_types.is_public_volume_type(self.ctxt, volume_type_id)) def test_ensure__extra_specs_for_non_admin(self): # non-admin users get extra-specs back in type-get/list etc at DB layer ctxt = context.RequestContext('average-joe', 'd802f078-0af1-4e6b-8c02-7fac8d4339aa', auth_token='token', is_admin=False) volume_types.create(self.ctxt, "type-test", is_public=False) vtype = volume_types.get_volume_type_by_name(ctxt, 'type-test') self.assertIsNotNone(vtype.get('extra_specs', None)) def test_ensure_extra_specs_for_admin(self): # admin users should get extra-specs back in type-get/list etc volume_types.create(self.ctxt, "type-test", is_public=False) vtype = volume_types.get_volume_type_by_name(self.ctxt, 'type-test') self.assertIsNotNone(vtype.get('extra_specs', None)) @mock.patch('cinder.volume.volume_types.get_volume_type_encryption') def _exec_volume_types_encryption_changed(self, enc1, enc2, expected_result, mock_get_encryption): def _get_encryption(ctxt, type_id): if enc1 and enc1['volume_type_id'] == type_id: return enc1 if enc2 and enc2['volume_type_id'] == type_id: return enc2 return None mock_get_encryption.side_effect = _get_encryption actual_result = volume_types.volume_types_encryption_changed( self.ctxt, fake.VOLUME_TYPE_ID, fake.VOLUME_TYPE2_ID) self.assertEqual(expected_result, actual_result) def test_volume_types_encryption_changed(self): enc1 = {'volume_type_id': fake.VOLUME_TYPE_ID, 'cipher': 'fake', 'created_at': 'time1', } enc2 = {'volume_type_id': fake.VOLUME_TYPE2_ID, 'cipher': 'fake', 'created_at': 'time2', } self._exec_volume_types_encryption_changed(enc1, enc2, False) def test_volume_types_encryption_changed2(self): enc1 = {'volume_type_id': fake.VOLUME_TYPE_ID, 'cipher': 'fake1', 'created_at': 'time1', } enc2 = {'volume_type_id': fake.VOLUME_TYPE2_ID, 'cipher': 'fake2', 'created_at': 'time1', } self._exec_volume_types_encryption_changed(enc1, enc2, True) def test_volume_types_encryption_changed3(self): self._exec_volume_types_encryption_changed(None, None, False) def test_volume_types_encryption_changed4(self): enc1 = {'volume_type_id': fake.VOLUME_TYPE_ID, 'cipher': 'fake1', 'created_at': 'time1', } self._exec_volume_types_encryption_changed(enc1, None, True) @mock.patch('cinder.volume.volume_types.CONF') @mock.patch('cinder.volume.volume_types.rpc') def test_notify_about_volume_type_access_usage(self, mock_rpc, mock_conf): mock_conf.host = 'host1' project_id = fake.PROJECT_ID volume_type_id = fake.VOLUME_TYPE_ID output = volume_types.notify_about_volume_type_access_usage( mock.sentinel.context, volume_type_id, project_id, 'test_suffix') self.assertIsNone(output) mock_rpc.get_notifier.assert_called_once_with('volume_type_project', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'volume_type_project.test_suffix', {'volume_type_id': volume_type_id, 'project_id': project_id}) def test_provision_filter_on_size(self): volume_types.create(self.ctxt, "type1", {"key1": "val1", "key2": "val2"}) volume_types.create(self.ctxt, "type2", {volume_types.MIN_SIZE_KEY: "12", "key3": "val3"}) volume_types.create(self.ctxt, "type3", {volume_types.MAX_SIZE_KEY: "99", "key4": "val4"}) volume_types.create(self.ctxt, "type4", {volume_types.MIN_SIZE_KEY: "24", volume_types.MAX_SIZE_KEY: "99", "key4": "val4"}) # Make sure we don't raise if there are no min/max set type1 = volume_types.get_by_name_or_id(self.ctxt, 'type1') volume_types.provision_filter_on_size(self.ctxt, type1, "11") # verify minimum size requirements type2 = volume_types.get_by_name_or_id(self.ctxt, 'type2') self.assertRaises(exception.InvalidInput, volume_types.provision_filter_on_size, self.ctxt, type2, "11") volume_types.provision_filter_on_size(self.ctxt, type2, "12") volume_types.provision_filter_on_size(self.ctxt, type2, "100") # verify max size requirements type3 = volume_types.get_by_name_or_id(self.ctxt, 'type3') self.assertRaises(exception.InvalidInput, volume_types.provision_filter_on_size, self.ctxt, type3, "100") volume_types.provision_filter_on_size(self.ctxt, type3, "99") volume_types.provision_filter_on_size(self.ctxt, type3, "1") # verify min and max type4 = volume_types.get_by_name_or_id(self.ctxt, 'type4') self.assertRaises(exception.InvalidInput, volume_types.provision_filter_on_size, self.ctxt, type4, "20") self.assertRaises(exception.InvalidInput, volume_types.provision_filter_on_size, self.ctxt, type4, "130") volume_types.provision_filter_on_size(self.ctxt, type4, "24") volume_types.provision_filter_on_size(self.ctxt, type4, "99") volume_types.provision_filter_on_size(self.ctxt, type4, "30") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_volume_types_extra_specs.py0000664000175000017500000001176100000000000025226 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Copyright 2011 University of Southern California # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit Tests for volume types extra specs code """ from cinder import context from cinder import db from cinder.tests.unit import test class VolumeTypeExtraSpecsTestCase(test.TestCase): def setUp(self): super(VolumeTypeExtraSpecsTestCase, self).setUp() self.context = context.get_admin_context() self.vol_type1 = dict(name="TEST: Regular volume test") self.vol_type1_specs = dict(vol_extra1="value1", vol_extra2="value2", vol_extra3=3) self.vol_type1['extra_specs'] = self.vol_type1_specs ref = db.volume_type_create(self.context, self.vol_type1) self.addCleanup(db.volume_type_destroy, context.get_admin_context(), self.vol_type1['id']) self.volume_type1_id = ref.id for k, v in self.vol_type1_specs.items(): self.vol_type1_specs[k] = str(v) self.vol_type2_noextra = dict(name="TEST: Volume type without extra") ref = db.volume_type_create(self.context, self.vol_type2_noextra) self.addCleanup(db.volume_type_destroy, context.get_admin_context(), self.vol_type2_noextra['id']) self.vol_type2_id = ref.id def test_volume_type_specs_get(self): expected_specs = self.vol_type1_specs.copy() actual_specs = db.volume_type_extra_specs_get( context.get_admin_context(), self.volume_type1_id) self.assertEqual(expected_specs, actual_specs) def test_volume_type_extra_specs_delete(self): expected_specs = self.vol_type1_specs.copy() del expected_specs['vol_extra2'] db.volume_type_extra_specs_delete(context.get_admin_context(), self.volume_type1_id, 'vol_extra2') actual_specs = db.volume_type_extra_specs_get( context.get_admin_context(), self.volume_type1_id) self.assertEqual(expected_specs, actual_specs) def test_volume_type_extra_specs_update(self): expected_specs = self.vol_type1_specs.copy() expected_specs['vol_extra3'] = "4" db.volume_type_extra_specs_update_or_create( context.get_admin_context(), self.volume_type1_id, dict(vol_extra3=4)) actual_specs = db.volume_type_extra_specs_get( context.get_admin_context(), self.volume_type1_id) self.assertEqual(expected_specs, actual_specs) def test_volume_type_extra_specs_create(self): expected_specs = self.vol_type1_specs.copy() expected_specs['vol_extra4'] = 'value4' expected_specs['vol_extra5'] = 'value5' db.volume_type_extra_specs_update_or_create( context.get_admin_context(), self.volume_type1_id, dict(vol_extra4="value4", vol_extra5="value5")) actual_specs = db.volume_type_extra_specs_get( context.get_admin_context(), self.volume_type1_id) self.assertEqual(expected_specs, actual_specs) def test_volume_type_get_with_extra_specs(self): volume_type = db.volume_type_get( context.get_admin_context(), self.volume_type1_id) self.assertEqual(self.vol_type1_specs, volume_type['extra_specs']) volume_type = db.volume_type_get( context.get_admin_context(), self.vol_type2_id) self.assertEqual({}, volume_type['extra_specs']) def test_volume_type_get_by_name_with_extra_specs(self): volume_type = db.volume_type_get_by_name( context.get_admin_context(), self.vol_type1['name']) self.assertEqual(self.vol_type1_specs, volume_type['extra_specs']) volume_type = db.volume_type_get_by_name( context.get_admin_context(), self.vol_type2_noextra['name']) self.assertEqual({}, volume_type['extra_specs']) def test_volume_type_get_all(self): expected_specs = self.vol_type1_specs.copy() types = db.volume_type_get_all(context.get_admin_context()) self.assertEqual(expected_specs, types[self.vol_type1['name']]['extra_specs']) self.assertEqual({}, types[self.vol_type2_noextra['name']]['extra_specs']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/test_volume_utils.py0000664000175000017500000021435100000000000022622 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For miscellaneous util methods used with volume.""" import datetime import functools import io import time from unittest import mock from castellan import key_manager import ddt from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import units from cinder import context from cinder import db from cinder.db.sqlalchemy import models from cinder import exception from cinder.objects import fields from cinder.tests.unit.backup import fake_backup from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_group from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder import utils from cinder.volume import driver from cinder.volume import throttling from cinder.volume import volume_types from cinder.volume import volume_utils CONF = cfg.CONF class NotifyUsageTestCase(test.TestCase): @mock.patch('cinder.volume.volume_utils._usage_from_volume') @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('cinder.volume.volume_utils.rpc') def test_notify_about_volume_usage(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_volume_usage(mock.sentinel.context, mock.sentinel.volume, 'test_suffix') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.context, mock.sentinel.volume) mock_rpc.get_notifier.assert_called_once_with('volume', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'volume.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.volume_utils._usage_from_volume') @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('cinder.volume.volume_utils.rpc') def test_notify_about_volume_usage_with_kwargs(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_volume_usage( mock.sentinel.context, mock.sentinel.volume, 'test_suffix', extra_usage_info={'a': 'b', 'c': 'd'}, host='host2') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.context, mock.sentinel.volume, a='b', c='d') mock_rpc.get_notifier.assert_called_once_with('volume', 'host2') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'volume.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.volume_utils._usage_from_snapshot') @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('cinder.volume.volume_utils.rpc') def test_notify_about_snapshot_usage(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_snapshot_usage( mock.sentinel.context, mock.sentinel.snapshot, 'test_suffix') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.snapshot, mock.sentinel.context) mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'snapshot.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.volume_utils._usage_from_snapshot') @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('cinder.volume.volume_utils.rpc') def test_notify_about_snapshot_usage_with_kwargs(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_snapshot_usage( mock.sentinel.context, mock.sentinel.snapshot, 'test_suffix', extra_usage_info={'a': 'b', 'c': 'd'}, host='host2') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.snapshot, mock.sentinel.context, a='b', c='d') mock_rpc.get_notifier.assert_called_once_with('snapshot', 'host2') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'snapshot.test_suffix', mock_usage.return_value) @mock.patch('cinder.db.volume_get') def test_usage_from_snapshot(self, volume_get): raw_volume = { 'id': fake.VOLUME_ID, 'availability_zone': 'nova' } ctxt = context.get_admin_context() volume_obj = fake_volume.fake_volume_obj(ctxt, **raw_volume) volume_get.return_value = volume_obj raw_snapshot = { 'project_id': fake.PROJECT_ID, 'user_id': fake.USER_ID, 'volume': volume_obj, 'volume_id': fake.VOLUME_ID, 'volume_size': 1, 'id': fake.SNAPSHOT_ID, 'display_name': '11', 'created_at': '2014-12-11T10:10:00', 'status': fields.SnapshotStatus.ERROR, 'deleted': '', 'snapshot_metadata': [{'key': 'fake_snap_meta_key', 'value': 'fake_snap_meta_value'}], 'expected_attrs': ['metadata'], } snapshot_obj = fake_snapshot.fake_snapshot_obj(ctxt, **raw_snapshot) usage_info = volume_utils._usage_from_snapshot(snapshot_obj, ctxt) expected_snapshot = { 'tenant_id': fake.PROJECT_ID, 'user_id': fake.USER_ID, 'availability_zone': 'nova', 'volume_id': fake.VOLUME_ID, 'volume_size': 1, 'snapshot_id': fake.SNAPSHOT_ID, 'display_name': '11', 'created_at': '2014-12-11T10:10:00+00:00', 'status': fields.SnapshotStatus.ERROR, 'deleted': '', 'metadata': str({'fake_snap_meta_key': u'fake_snap_meta_value'}), } self.assertDictEqual(expected_snapshot, usage_info) @mock.patch('cinder.db.volume_get') def test_usage_from_deleted_snapshot(self, volume_get): raw_volume = { 'id': fake.VOLUME_ID, 'availability_zone': 'nova', 'deleted': 1 } ctxt = context.get_admin_context() volume_obj = fake_volume.fake_volume_obj(ctxt, **raw_volume) volume_get.return_value = volume_obj raw_snapshot = { 'project_id': fake.PROJECT_ID, 'user_id': fake.USER_ID, 'volume': volume_obj, 'volume_id': fake.VOLUME_ID, 'volume_size': 1, 'id': fake.SNAPSHOT_ID, 'display_name': '11', 'created_at': '2014-12-11T10:10:00', 'status': fields.SnapshotStatus.ERROR, 'deleted': '', 'snapshot_metadata': [{'key': 'fake_snap_meta_key', 'value': 'fake_snap_meta_value'}], 'expected_attrs': ['metadata'], } snapshot_obj = fake_snapshot.fake_snapshot_obj(ctxt, **raw_snapshot) usage_info = volume_utils._usage_from_snapshot(snapshot_obj, ctxt) expected_snapshot = { 'tenant_id': fake.PROJECT_ID, 'user_id': fake.USER_ID, 'availability_zone': 'nova', 'volume_id': fake.VOLUME_ID, 'volume_size': 1, 'snapshot_id': fake.SNAPSHOT_ID, 'display_name': '11', 'created_at': mock.ANY, 'status': fields.SnapshotStatus.ERROR, 'deleted': '', 'metadata': str({'fake_snap_meta_key': u'fake_snap_meta_value'}), } self.assertDictEqual(expected_snapshot, usage_info) @mock.patch('cinder.db.volume_glance_metadata_get') @mock.patch('cinder.db.volume_attachment_get_all_by_volume_id') def test_usage_from_volume(self, mock_attachment, mock_image_metadata): mock_image_metadata.return_value = {'image_id': 'fake_image_id'} mock_attachment.return_value = [{'instance_uuid': 'fake_instance_id'}] raw_volume = { 'project_id': '12b0330ec2584a', 'user_id': '158cba1b8c2bb6008e', 'host': 'fake_host', 'availability_zone': 'nova', 'volume_type_id': fake.VOLUME_TYPE_ID, 'id': fake.VOLUME_ID, 'size': 1, 'display_name': 'test_volume', 'created_at': datetime.datetime(2015, 1, 1, 1, 1, 1), 'launched_at': datetime.datetime(2015, 1, 1, 1, 1, 1), 'snapshot_id': None, 'replication_status': None, 'replication_extended_status': None, 'replication_driver_data': None, 'status': 'available', 'volume_metadata': {'fake_metadata_key': 'fake_metadata_value'}, } usage_info = volume_utils._usage_from_volume( mock.sentinel.context, raw_volume) expected_volume = { 'tenant_id': '12b0330ec2584a', 'user_id': '158cba1b8c2bb6008e', 'host': 'fake_host', 'availability_zone': 'nova', 'volume_type': fake.VOLUME_TYPE_ID, 'volume_id': fake.VOLUME_ID, 'size': 1, 'display_name': 'test_volume', 'created_at': '2015-01-01T01:01:01', 'launched_at': '2015-01-01T01:01:01', 'snapshot_id': None, 'replication_status': None, 'replication_extended_status': None, 'replication_driver_data': None, 'status': 'available', 'metadata': {'fake_metadata_key': 'fake_metadata_value'}, 'glance_metadata': {'image_id': 'fake_image_id'}, 'volume_attachment': [{'instance_uuid': 'fake_instance_id'}], } self.assertEqual(expected_volume, usage_info) @mock.patch('cinder.volume.volume_utils._usage_from_consistencygroup') @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('cinder.volume.volume_utils.rpc') def test_notify_about_consistencygroup_usage(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_consistencygroup_usage( mock.sentinel.context, mock.sentinel.consistencygroup, 'test_suffix') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.consistencygroup) mock_rpc.get_notifier.assert_called_once_with('consistencygroup', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'consistencygroup.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.volume_utils._usage_from_consistencygroup') @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('cinder.volume.volume_utils.rpc') def test_notify_about_consistencygroup_usage_with_kwargs(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_consistencygroup_usage( mock.sentinel.context, mock.sentinel.consistencygroup, 'test_suffix', extra_usage_info={'a': 'b', 'c': 'd'}, host='host2') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.consistencygroup, a='b', c='d') mock_rpc.get_notifier.assert_called_once_with('consistencygroup', 'host2') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'consistencygroup.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.volume_utils._usage_from_cgsnapshot') @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('cinder.volume.volume_utils.rpc') def test_notify_about_cgsnapshot_usage(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_cgsnapshot_usage( mock.sentinel.context, mock.sentinel.cgsnapshot, 'test_suffix') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.cgsnapshot) mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'cgsnapshot.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.volume_utils._usage_from_cgsnapshot') @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('cinder.volume.volume_utils.rpc') def test_notify_about_cgsnapshot_usage_with_kwargs(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_cgsnapshot_usage( mock.sentinel.context, mock.sentinel.cgsnapshot, 'test_suffix', extra_usage_info={'a': 'b', 'c': 'd'}, host='host2') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.cgsnapshot, a='b', c='d') mock_rpc.get_notifier.assert_called_once_with('cgsnapshot', 'host2') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'cgsnapshot.test_suffix', mock_usage.return_value) def test_usage_from_backup(self): raw_backup = { 'project_id': fake.PROJECT_ID, 'user_id': fake.USER_ID, 'availability_zone': 'nova', 'id': fake.BACKUP_ID, 'host': 'fake_host', 'display_name': 'test_backup', 'created_at': datetime.datetime(2015, 1, 1, 1, 1, 1), 'status': 'available', 'volume_id': fake.VOLUME_ID, 'size': 1, 'service_metadata': None, 'service': 'cinder.backup.drivers.swift', 'fail_reason': None, 'parent_id': fake.BACKUP2_ID, 'num_dependent_backups': 0, 'snapshot_id': None, } ctxt = context.get_admin_context() backup_obj = fake_backup.fake_backup_obj(ctxt, **raw_backup) # Make it easier to find out differences between raw and expected. expected_backup = raw_backup.copy() expected_backup['tenant_id'] = expected_backup.pop('project_id') expected_backup['backup_id'] = expected_backup.pop('id') expected_backup['created_at'] = '2015-01-01T01:01:01+00:00' usage_info = volume_utils._usage_from_backup(backup_obj) self.assertDictEqual(expected_backup, usage_info) class LVMVolumeDriverTestCase(test.TestCase): def test_convert_blocksize_option(self): # Test valid volume_dd_blocksize bs = volume_utils._check_blocksize('10M') self.assertEqual('10M', bs) bs = volume_utils._check_blocksize('1xBBB') self.assertEqual('1M', bs) # Test 'volume_dd_blocksize' with fraction bs = volume_utils._check_blocksize('1.3M') self.assertEqual('1M', bs) # Test zero-size 'volume_dd_blocksize' bs = volume_utils._check_blocksize('0M') self.assertEqual('1M', bs) # Test negative 'volume_dd_blocksize' bs = volume_utils._check_blocksize('-1M') self.assertEqual('1M', bs) # Test non-digital 'volume_dd_blocksize' bs = volume_utils._check_blocksize('ABM') self.assertEqual('1M', bs) @mock.patch('cinder.volume.volume_utils._usage_from_capacity') @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('cinder.volume.volume_utils.rpc') def test_notify_about_capacity_usage(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_capacity_usage( mock.sentinel.context, mock.sentinel.capacity, 'test_suffix') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.capacity) mock_rpc.get_notifier.assert_called_once_with('capacity', 'host1') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'capacity.test_suffix', mock_usage.return_value) @mock.patch('cinder.volume.volume_utils._usage_from_capacity') @mock.patch('cinder.volume.volume_utils.CONF') @mock.patch('cinder.volume.volume_utils.rpc') def test_notify_about_capacity_usage_with_kwargs(self, mock_rpc, mock_conf, mock_usage): mock_conf.host = 'host1' output = volume_utils.notify_about_capacity_usage( mock.sentinel.context, mock.sentinel.capacity, 'test_suffix', extra_usage_info={'a': 'b', 'c': 'd'}, host='host2') self.assertIsNone(output) mock_usage.assert_called_once_with(mock.sentinel.capacity, a='b', c='d') mock_rpc.get_notifier.assert_called_once_with('capacity', 'host2') mock_rpc.get_notifier.return_value.info.assert_called_once_with( mock.sentinel.context, 'capacity.test_suffix', mock_usage.return_value) def test_usage_from_capacity(self): test_capacity = { 'name_to_id': 'host1@backend1#pool1', 'type': 'pool', 'total': '10.01', 'free': '8.01', 'allocated': '2', 'provisioned': '2', 'virtual_free': '8.01', 'reported_at': '2014-12-11T10:10:00', } usage_info = volume_utils._usage_from_capacity( test_capacity) expected_capacity = { 'name_to_id': 'host1@backend1#pool1', 'total': '10.01', 'free': '8.01', 'allocated': '2', 'provisioned': '2', 'virtual_free': '8.01', 'reported_at': '2014-12-11T10:10:00', } self.assertEqual(expected_capacity, usage_info) class OdirectSupportTestCase(test.TestCase): @mock.patch('cinder.utils.execute') def test_check_for_odirect_support(self, mock_exec): output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def') self.assertTrue(output) mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc', 'of=/dev/def', 'oflag=direct', run_as_root=True) mock_exec.reset_mock() output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def', 'iflag=direct') self.assertTrue(output) mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc', 'of=/dev/def', 'iflag=direct', run_as_root=True) mock_exec.reset_mock() output = volume_utils.check_for_odirect_support('/dev/zero', '/dev/def', 'iflag=direct') self.assertFalse(output) mock_exec.reset_mock() output = volume_utils.check_for_odirect_support('/dev/zero', '/dev/def') self.assertTrue(output) mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/zero', 'of=/dev/def', 'oflag=direct', run_as_root=True) @mock.patch('cinder.utils.execute', side_effect=processutils.ProcessExecutionError) def test_check_for_odirect_support_error(self, mock_exec): output = volume_utils.check_for_odirect_support('/dev/abc', '/dev/def') self.assertFalse(output) mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/abc', 'of=/dev/def', 'oflag=direct', run_as_root=True) mock_exec.reset_mock() output = volume_utils.check_for_odirect_support('/dev/zero', '/dev/def') self.assertFalse(output) mock_exec.assert_called_once_with('dd', 'count=0', 'if=/dev/zero', 'of=/dev/def', 'oflag=direct', run_as_root=True) class ClearVolumeTestCase(test.TestCase): @mock.patch('cinder.volume.volume_utils.copy_volume', return_value=None) @mock.patch('cinder.volume.volume_utils.CONF') def test_clear_volume_conf(self, mock_conf, mock_copy): mock_conf.volume_clear = 'zero' mock_conf.volume_clear_size = 0 mock_conf.volume_dd_blocksize = '1M' mock_conf.volume_clear_ionice = '-c3' output = volume_utils.clear_volume(1024, 'volume_path') self.assertIsNone(output) mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1024, '1M', sync=True, execute=utils.execute, ionice='-c3', throttle=None, sparse=False) @mock.patch('cinder.volume.volume_utils.copy_volume', return_value=None) @mock.patch('cinder.volume.volume_utils.CONF') def test_clear_volume_args(self, mock_conf, mock_copy): mock_conf.volume_clear = 'should_override_with_arg' mock_conf.volume_clear_size = 0 mock_conf.volume_dd_blocksize = '1M' mock_conf.volume_clear_ionice = '-c3' output = volume_utils.clear_volume(1024, 'volume_path', 'zero', 1, '-c0') self.assertIsNone(output) mock_copy.assert_called_once_with('/dev/zero', 'volume_path', 1, '1M', sync=True, execute=utils.execute, ionice='-c0', throttle=None, sparse=False) @mock.patch('cinder.volume.volume_utils.CONF') def test_clear_volume_invalid_opt(self, mock_conf): mock_conf.volume_clear = 'non_existent_volume_clearer' mock_conf.volume_clear_size = 0 mock_conf.volume_clear_ionice = None self.assertRaises(exception.InvalidConfigurationValue, volume_utils.clear_volume, 1024, "volume_path") class CopyVolumeTestCase(test.TestCase): @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.volume_utils.CONF') def test_copy_volume_dd_iflag_and_oflag(self, mock_conf, mock_exec, mock_support): fake_throttle = throttling.Throttle(['fake_throttle']) output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', sync=True, execute=utils.execute, ionice=None, throttle=fake_throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('fake_throttle', 'dd', 'if=/dev/zero', 'of=/dev/null', 'count=%s' % units.Gi, 'bs=3M', 'iflag=count_bytes,direct', 'oflag=direct', run_as_root=True) mock_exec.reset_mock() output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', sync=False, execute=utils.execute, ionice=None, throttle=fake_throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('fake_throttle', 'dd', 'if=/dev/zero', 'of=/dev/null', 'count=%s' % units.Gi, 'bs=3M', 'iflag=count_bytes,direct', 'oflag=direct', run_as_root=True) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=False) @mock.patch('cinder.utils.execute') def test_copy_volume_dd_no_iflag_or_oflag(self, mock_exec, mock_support): fake_throttle = throttling.Throttle(['fake_throttle']) output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', sync=True, execute=utils.execute, ionice=None, throttle=fake_throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('fake_throttle', 'dd', 'if=/dev/zero', 'of=/dev/null', 'count=%s' % units.Gi, 'bs=3M', 'iflag=count_bytes', 'conv=fdatasync', run_as_root=True) mock_exec.reset_mock() output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', sync=False, execute=utils.execute, ionice=None, throttle=fake_throttle) self.assertIsNone(output) mock_exec.assert_called_once_with('fake_throttle', 'dd', 'if=/dev/zero', 'of=/dev/null', 'count=%s' % units.Gi, 'bs=3M', 'iflag=count_bytes', run_as_root=True) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=False) @mock.patch('cinder.utils.execute') def test_copy_volume_dd_no_throttle(self, mock_exec, mock_support): output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', sync=True, execute=utils.execute, ionice=None) self.assertIsNone(output) mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null', 'count=%s' % units.Gi, 'bs=3M', 'iflag=count_bytes', 'conv=fdatasync', run_as_root=True) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=False) @mock.patch('cinder.utils.execute') def test_copy_volume_dd_with_ionice(self, mock_exec, mock_support): output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', sync=True, execute=utils.execute, ionice='-c3') self.assertIsNone(output) mock_exec.assert_called_once_with('ionice', '-c3', 'dd', 'if=/dev/zero', 'of=/dev/null', 'count=%s' % units.Gi, 'bs=3M', 'iflag=count_bytes', 'conv=fdatasync', run_as_root=True) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=False) @mock.patch('cinder.utils.execute') def test_copy_volume_dd_with_sparse(self, mock_exec, mock_support): output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', sync=True, execute=utils.execute, sparse=True) self.assertIsNone(output) mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null', 'count=%s' % units.Gi, 'bs=3M', 'iflag=count_bytes', 'conv=fdatasync,sparse', run_as_root=True) @mock.patch('cinder.volume.volume_utils.check_for_odirect_support', return_value=True) @mock.patch('cinder.utils.execute') def test_copy_volume_dd_with_sparse_iflag_and_oflag(self, mock_exec, mock_support): output = volume_utils.copy_volume('/dev/zero', '/dev/null', 1024, '3M', sync=True, execute=utils.execute, sparse=True) self.assertIsNone(output) mock_exec.assert_called_once_with('dd', 'if=/dev/zero', 'of=/dev/null', 'count=%s' % units.Gi, 'bs=3M', 'iflag=count_bytes,direct', 'oflag=direct', 'conv=sparse', run_as_root=True) @mock.patch('cinder.volume.volume_utils._copy_volume_with_file') def test_copy_volume_handles(self, mock_copy): handle1 = io.RawIOBase() handle2 = io.RawIOBase() output = volume_utils.copy_volume(handle1, handle2, 1024, 1) self.assertIsNone(output) mock_copy.assert_called_once_with(handle1, handle2, 1024) @mock.patch('cinder.volume.volume_utils._transfer_data') @mock.patch('cinder.volume.volume_utils._open_volume_with_path') def test_copy_volume_handle_transfer(self, mock_open, mock_transfer): handle = io.RawIOBase() output = volume_utils.copy_volume('/foo/bar', handle, 1024, 1) self.assertIsNone(output) mock_transfer.assert_called_once_with(mock.ANY, mock.ANY, 1073741824, mock.ANY) @ddt.ddt class VolumeUtilsTestCase(test.TestCase): def test_null_safe_str(self): self.assertEqual('', volume_utils.null_safe_str(None)) self.assertEqual('', volume_utils.null_safe_str(False)) self.assertEqual('', volume_utils.null_safe_str(0)) self.assertEqual('', volume_utils.null_safe_str([])) self.assertEqual('', volume_utils.null_safe_str(())) self.assertEqual('', volume_utils.null_safe_str({})) self.assertEqual('', volume_utils.null_safe_str(set())) self.assertEqual('a', volume_utils.null_safe_str('a')) self.assertEqual('1', volume_utils.null_safe_str(1)) self.assertEqual('True', volume_utils.null_safe_str(True)) @mock.patch('cinder.utils.get_root_helper') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning') def test_supports_thin_provisioning(self, mock_supports_thin, mock_helper): self.assertEqual(mock_supports_thin.return_value, volume_utils.supports_thin_provisioning()) mock_helper.assert_called_once_with() @mock.patch('cinder.utils.get_root_helper') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') def test_get_all_physical_volumes(self, mock_get_vols, mock_helper): self.assertEqual(mock_get_vols.return_value, volume_utils.get_all_physical_volumes()) mock_helper.assert_called_once_with() @mock.patch('cinder.utils.get_root_helper') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_volume_groups') def test_get_all_volume_groups(self, mock_get_groups, mock_helper): self.assertEqual(mock_get_groups.return_value, volume_utils.get_all_volume_groups()) mock_helper.assert_called_once_with() def test_generate_password(self): password = volume_utils.generate_password() self.assertTrue(any(c for c in password if c in '23456789')) self.assertTrue(any(c for c in password if c in 'abcdefghijkmnopqrstuvwxyz')) self.assertTrue(any(c for c in password if c in 'ABCDEFGHJKLMNPQRSTUVWXYZ')) self.assertEqual(16, len(password)) self.assertEqual(10, len(volume_utils.generate_password(10))) @mock.patch('cinder.volume.volume_utils.generate_password') def test_generate_username(self, mock_gen_pass): output = volume_utils.generate_username() self.assertEqual(mock_gen_pass.return_value, output) def test_extract_host(self): host = 'Host' # default level is 'backend' self.assertEqual(host, volume_utils.extract_host(host)) self.assertEqual(host, volume_utils.extract_host(host, 'host')) self.assertEqual(host, volume_utils.extract_host(host, 'backend')) # default_pool_name doesn't work for level other than 'pool' self.assertEqual(host, volume_utils.extract_host(host, 'host', True)) self.assertEqual(host, volume_utils.extract_host(host, 'host', False)) self.assertEqual(host, volume_utils.extract_host(host, 'backend', True)) self.assertEqual(host, volume_utils.extract_host(host, 'backend', False)) self.assertIsNone(volume_utils.extract_host(host, 'pool')) self.assertEqual('_pool0', volume_utils.extract_host(host, 'pool', True)) host = 'Host@Backend' self.assertEqual('Host@Backend', volume_utils.extract_host(host)) self.assertEqual('Host', volume_utils.extract_host(host, 'host')) self.assertEqual(host, volume_utils.extract_host(host, 'backend')) self.assertIsNone(volume_utils.extract_host(host, 'pool')) self.assertEqual('_pool0', volume_utils.extract_host(host, 'pool', True)) host = 'Host@Backend#Pool' pool = 'Pool' self.assertEqual('Host@Backend', volume_utils.extract_host(host)) self.assertEqual('Host', volume_utils.extract_host(host, 'host')) self.assertEqual('Host@Backend', volume_utils.extract_host(host, 'backend')) self.assertEqual(pool, volume_utils.extract_host(host, 'pool')) self.assertEqual(pool, volume_utils.extract_host(host, 'pool', True)) host = 'Host#Pool' self.assertEqual('Host', volume_utils.extract_host(host)) self.assertEqual('Host', volume_utils.extract_host(host, 'host')) self.assertEqual('Host', volume_utils.extract_host(host, 'backend')) self.assertEqual(pool, volume_utils.extract_host(host, 'pool')) self.assertEqual(pool, volume_utils.extract_host(host, 'pool', True)) def test_extract_host_none_string(self): self.assertRaises(exception.InvalidVolume, volume_utils.extract_host, None) def test_append_host(self): host = 'Host' pool = 'Pool' expected = 'Host#Pool' self.assertEqual(expected, volume_utils.append_host(host, pool)) pool = None expected = 'Host' self.assertEqual(expected, volume_utils.append_host(host, pool)) host = None pool = 'pool' expected = None self.assertEqual(expected, volume_utils.append_host(host, pool)) host = None pool = None expected = None self.assertEqual(expected, volume_utils.append_host(host, pool)) def test_compare_hosts(self): host_1 = 'fake_host@backend1' host_2 = 'fake_host@backend1#pool1' self.assertTrue(volume_utils.hosts_are_equivalent(host_1, host_2)) host_2 = 'fake_host@backend1' self.assertTrue(volume_utils.hosts_are_equivalent(host_1, host_2)) host_2 = 'fake_host2@backend1' self.assertFalse(volume_utils.hosts_are_equivalent(host_1, host_2)) @mock.patch('cinder.volume.volume_utils.CONF') def test_extract_id_from_volume_name_vol_id_pattern(self, conf_mock): conf_mock.volume_name_template = 'volume-%s' vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' vol_name = conf_mock.volume_name_template % vol_id result = volume_utils.extract_id_from_volume_name(vol_name) self.assertEqual(vol_id, result) @mock.patch('cinder.volume.volume_utils.CONF') def test_extract_id_from_volume_name_vol_id_vol_pattern(self, conf_mock): conf_mock.volume_name_template = 'volume-%s-volume' vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' vol_name = conf_mock.volume_name_template % vol_id result = volume_utils.extract_id_from_volume_name(vol_name) self.assertEqual(vol_id, result) @mock.patch('cinder.volume.volume_utils.CONF') def test_extract_id_from_volume_name_id_vol_pattern(self, conf_mock): conf_mock.volume_name_template = '%s-volume' vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' vol_name = conf_mock.volume_name_template % vol_id result = volume_utils.extract_id_from_volume_name(vol_name) self.assertEqual(vol_id, result) @mock.patch('cinder.volume.volume_utils.CONF') def test_extract_id_from_volume_name_no_match(self, conf_mock): conf_mock.volume_name_template = '%s-volume' vol_name = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' result = volume_utils.extract_id_from_volume_name(vol_name) self.assertIsNone(result) vol_name = 'blahblahblah' result = volume_utils.extract_id_from_volume_name(vol_name) self.assertIsNone(result) @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True) def test_check_managed_volume_already_managed(self, exists_mock): id_ = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' result = volume_utils.check_already_managed_volume(id_) self.assertTrue(result) exists_mock.assert_called_once_with(mock.ANY, models.Volume, id_) @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=False) def test_check_managed_volume_not_managed_proper_uuid(self, exists_mock): id_ = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' result = volume_utils.check_already_managed_volume(id_) self.assertFalse(result) exists_mock.assert_called_once_with(mock.ANY, models.Volume, id_) def test_check_managed_volume_not_managed_invalid_id(self): result = volume_utils.check_already_managed_volume(1) self.assertFalse(result) result = volume_utils.check_already_managed_volume('not-a-uuid') self.assertFalse(result) @mock.patch('cinder.volume.volume_utils.CONF') def test_extract_id_from_snapshot_name(self, conf_mock): conf_mock.snapshot_name_template = '%s-snapshot' snap_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' snap_name = conf_mock.snapshot_name_template % snap_id result = volume_utils.extract_id_from_snapshot_name(snap_name) self.assertEqual(snap_id, result) @mock.patch('cinder.volume.volume_utils.CONF') def test_extract_id_from_snapshot_name_no_match(self, conf_mock): conf_mock.snapshot_name_template = '%s-snapshot' snap_name = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' result = volume_utils.extract_id_from_snapshot_name(snap_name) self.assertIsNone(result) snap_name = 'blahblahblah' result = volume_utils.extract_id_from_snapshot_name(snap_name) self.assertIsNone(result) @ddt.data({"name": "vol02"}, '{"name": "vol02"}') def test_paginate_entries_list_with_marker(self, marker): entries = [{'reference': {'name': 'vol03'}, 'size': 1}, {'reference': {'name': 'vol01'}, 'size': 3}, {'reference': {'name': 'vol02'}, 'size': 3}, {'reference': {'name': 'vol04'}, 'size': 2}, {'reference': {'name': 'vol06'}, 'size': 3}, {'reference': {'name': 'vol07'}, 'size': 1}, {'reference': {'name': 'vol05'}, 'size': 1}] expected = [{'reference': {'name': 'vol04'}, 'size': 2}, {'reference': {'name': 'vol03'}, 'size': 1}, {'reference': {'name': 'vol05'}, 'size': 1}] res = volume_utils.paginate_entries_list(entries, marker, 3, 1, ['size', 'reference'], ['desc', 'asc']) self.assertEqual(expected, res) def test_paginate_entries_list_without_marker(self): entries = [{'reference': {'name': 'vol03'}, 'size': 1}, {'reference': {'name': 'vol01'}, 'size': 3}, {'reference': {'name': 'vol02'}, 'size': 3}, {'reference': {'name': 'vol04'}, 'size': 2}, {'reference': {'name': 'vol06'}, 'size': 3}, {'reference': {'name': 'vol07'}, 'size': 1}, {'reference': {'name': 'vol05'}, 'size': 1}] expected = [{'reference': {'name': 'vol07'}, 'size': 1}, {'reference': {'name': 'vol06'}, 'size': 3}, {'reference': {'name': 'vol05'}, 'size': 1}] res = volume_utils.paginate_entries_list(entries, None, 3, None, ['reference'], ['desc']) self.assertEqual(expected, res) def test_paginate_entries_list_marker_invalid_format(self): entries = [{'reference': {'name': 'vol03'}, 'size': 1}, {'reference': {'name': 'vol01'}, 'size': 3}] self.assertRaises(exception.InvalidInput, volume_utils.paginate_entries_list, entries, "invalid_format", 3, None, ['size', 'reference'], ['desc', 'asc']) def test_paginate_entries_list_marker_not_found(self): entries = [{'reference': {'name': 'vol03'}, 'size': 1}, {'reference': {'name': 'vol01'}, 'size': 3}] self.assertRaises(exception.InvalidInput, volume_utils.paginate_entries_list, entries, {'name': 'vol02'}, 3, None, ['size', 'reference'], ['desc', 'asc']) def test_convert_config_string_to_dict(self): test_string = "{'key-1'='val-1' 'key-2'='val-2' 'key-3'='val-3'}" expected_dict = {'key-1': 'val-1', 'key-2': 'val-2', 'key-3': 'val-3'} self.assertEqual( expected_dict, volume_utils.convert_config_string_to_dict(test_string)) @mock.patch('cinder.volume.volume_types.is_encrypted', return_value=False) def test_create_encryption_key_unencrypted(self, is_encrypted): result = volume_utils.create_encryption_key(mock.ANY, mock.ANY, fake.VOLUME_TYPE_ID) self.assertIsNone(result) @mock.patch('cinder.volume.volume_types.is_encrypted', return_value=True) @mock.patch('cinder.volume.volume_types.get_volume_type_encryption') @mock.patch('cinder.keymgr.conf_key_mgr.ConfKeyManager.create_key') def test_create_encryption_key_encrypted(self, create_key, get_volume_type_encryption, is_encryption): enc_spec = {'cipher': 'aes-xts-plain64', 'key_size': 256, 'provider': 'p1', 'control_location': 'front-end', 'encryption_id': 'uuid1'} ctxt = context.get_admin_context() type_ref1 = volume_types.create(ctxt, "type1") encryption = db.volume_type_encryption_create( ctxt, type_ref1['id'], enc_spec) get_volume_type_encryption.return_value = encryption CONF.set_override( 'backend', 'cinder.keymgr.conf_key_mgr.ConfKeyManager', group='key_manager') km = key_manager.API() volume_utils.create_encryption_key(ctxt, km, fake.VOLUME_TYPE_ID) is_encryption.assert_called_once_with(ctxt, fake.VOLUME_TYPE_ID) get_volume_type_encryption.assert_called_once_with( ctxt, fake.VOLUME_TYPE_ID) create_key.assert_called_once_with(ctxt, algorithm='aes', length=256) @mock.patch('cinder.volume.volume_types.is_encrypted', return_value=True) @mock.patch('cinder.volume.volume_types.get_volume_type_encryption') @mock.patch('cinder.keymgr.conf_key_mgr.ConfKeyManager.create_key') def test_create_encryption_key_invalid_spec(self, create_key, get_volume_type_encryption, is_encryption): enc_spec = {'cipher': None, 'key_size': 256, 'provider': 'p1', 'control_location': 'front-end', 'encryption_id': 'uuid1'} ctxt = context.get_admin_context() type_ref1 = volume_types.create(ctxt, "type1") encryption = db.volume_type_encryption_create( ctxt, type_ref1['id'], enc_spec) get_volume_type_encryption.return_value = encryption CONF.set_override( 'backend', 'cinder.keymgr.conf_key_mgr.ConfKeyManager', group='key_manager') km = key_manager.API() self.assertRaises(exception.Invalid, volume_utils.create_encryption_key, ctxt, km, fake.VOLUME_TYPE_ID) is_encryption.assert_called_once_with(ctxt, fake.VOLUME_TYPE_ID) get_volume_type_encryption.assert_called_once_with( ctxt, fake.VOLUME_TYPE_ID) create_key.assert_not_called() @ddt.data(' True', ' true', ' yes') def test_is_replicated_spec_true(self, enabled): res = volume_utils.is_replicated_spec({'replication_enabled': enabled}) self.assertTrue(res) @ddt.data({}, None, {'key': 'value'}) def test_is_replicated_no_specs(self, extra_specs): res = volume_utils.is_replicated_spec(extra_specs) self.assertFalse(bool(res)) @ddt.data(' False', ' false', ' f', 'baddata', 'bad data') def test_is_replicated_spec_false(self, enabled): res = volume_utils.is_replicated_spec({'replication_enabled': enabled}) self.assertFalse(res) @mock.patch('cinder.db.group_get') def test_group_get_by_id(self, mock_db_group_get): expected = mock.Mock() mock_db_group_get.return_value = expected group_id = fake.GROUP_ID actual = volume_utils.group_get_by_id(group_id) self.assertEqual(expected, actual) @mock.patch('cinder.db.group_get') def test_group_get_by_id_group_not_found(self, mock_db_group_get): group_id = fake.GROUP_ID mock_db_group_get.side_effect = exception.GroupNotFound( group_id=group_id) self.assertRaises( exception.GroupNotFound, volume_utils.group_get_by_id, group_id ) @ddt.data(' False', None, 'notASpecValueWeCareAbout') def test_is_group_a_cg_snapshot_type_is_false(self, spec_value): with mock.patch('cinder.volume.group_types' '.get_group_type_specs') as mock_get_specs: mock_get_specs.return_value = spec_value group = fake_group.fake_group_obj( None, group_type_id=fake.GROUP_TYPE_ID) self.assertFalse(volume_utils.is_group_a_cg_snapshot_type(group)) @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_is_group_a_cg_snapshot_type_is_true(self, mock_get_specs): mock_get_specs.return_value = ' True' group = fake_group.fake_group_obj( None, group_type_id=fake.GROUP_TYPE_ID) self.assertTrue(volume_utils.is_group_a_cg_snapshot_type(group)) @ddt.data({'max_over_subscription_ratio': '10', 'supports_auto': True}, {'max_over_subscription_ratio': 'auto', 'supports_auto': True}, {'max_over_subscription_ratio': 'auto', 'supports_auto': False}, {'max_over_subscription_ratio': '1.2', 'supports_auto': False},) @ddt.unpack def test_get_max_over_subscription_ratio(self, max_over_subscription_ratio, supports_auto): if not supports_auto and max_over_subscription_ratio == 'auto': self.assertRaises(exception.VolumeDriverException, volume_utils.get_max_over_subscription_ratio, max_over_subscription_ratio, supports_auto) elif not supports_auto: mosr = volume_utils.get_max_over_subscription_ratio( max_over_subscription_ratio, supports_auto) self.assertEqual(float(max_over_subscription_ratio), mosr) else: # supports_auto mosr = volume_utils.get_max_over_subscription_ratio( max_over_subscription_ratio, supports_auto) if max_over_subscription_ratio == 'auto': self.assertEqual(max_over_subscription_ratio, mosr) else: self.assertEqual(float(max_over_subscription_ratio), mosr) def test_check_image_metadata(self): image_meta = {'id': 1, 'min_disk': 3, 'status': 'active', 'size': 1 * units.Gi} vol_size = 2 res = self.assertRaises(exception.InvalidInput, volume_utils.check_image_metadata, image_meta, vol_size) self.assertIn("Volume size 2GB cannot be smaller than the image " "minDisk size 3GB.", str(res)) image_meta['size'] = 3 * units.Gi res = self.assertRaises(exception.InvalidInput, volume_utils.check_image_metadata, image_meta, vol_size) self.assertIn("Size of specified image 3GB is larger than volume " "size 2GB.", str(res)) image_meta['status'] = 'error' res = self.assertRaises(exception.InvalidInput, volume_utils.check_image_metadata, image_meta, vol_size) self.assertIn("Image 1 is not active.", str(res)) @ddt.data(None, 1, 3) def test_check_image_metadata_virtual_size(self, fake_virtual_size): image_meta = {'id': 1, 'min_disk': 1, 'status': 'active', 'size': 1 * units.Gi, 'virtual_size': fake_virtual_size * units.Gi if fake_virtual_size else None} vol_size = 2 if fake_virtual_size and fake_virtual_size > vol_size: res = self.assertRaises( exception.ImageUnacceptable, volume_utils.check_image_metadata, image_meta, vol_size) self.assertIn("Image virtual size is %(image_size)dGB" " and doesn't fit in a volume of size" " %(volume_size)dGB." % {'image_size': fake_virtual_size, 'volume_size': vol_size}, str(res)) else: with mock.patch( 'cinder.image.image_utils.check_virtual_size') as \ mock_check_virtual_size: volume_utils.check_image_metadata(image_meta, vol_size) if fake_virtual_size: mock_check_virtual_size.assert_called_once_with( image_meta['virtual_size'], vol_size, image_meta['id']) else: mock_check_virtual_size.assert_not_called() def test_enable_volume_bootable(self): ctxt = context.get_admin_context() volume = test_utils.create_volume(ctxt, bootable=False) volume_utils.enable_bootable_flag(volume) self.assertTrue(volume.bootable) def test_get_volume_image_metadata(self): common_meta = {'container_format': 'fake_type', 'disk_format': 'fake_format', 'min_disk': 3, 'min_ram': 1, 'size': 1 * units.Gi} image_meta = {'id': fake.IMAGE_ID, 'other_metada': 'fake'} image_meta.update(common_meta) expected = {'image_id': image_meta['id']} expected.update(common_meta) self.assertEqual( expected, volume_utils.get_volume_image_metadata(fake.IMAGE_ID, image_meta)) @ddt.data(True, False) def test_copy_image_to_volume(self, is_encrypted): ctxt = context.get_admin_context() fake_driver = mock.MagicMock() key = fake.ENCRYPTION_KEY_ID if is_encrypted else None volume = fake_volume.fake_volume_obj(ctxt, encryption_key_id=key) fake_image_service = fake_image.FakeImageService() image_id = fake.IMAGE_ID image_meta = {'id': image_id} image_location = 'abc' volume_utils.copy_image_to_volume(fake_driver, ctxt, volume, image_meta, image_location, fake_image_service) if is_encrypted: fake_driver.copy_image_to_encrypted_volume.assert_called_once_with( ctxt, volume, fake_image_service, image_id, disable_sparse=False) else: fake_driver.copy_image_to_volume.assert_called_once_with( ctxt, volume, fake_image_service, image_id, disable_sparse=False) @ddt.data({'cipher': 'aes-xts-plain64', 'provider': 'luks'}, {'cipher': 'aes-xts-plain64', 'provider': 'nova.volume.encryptors.luks.LuksEncryptor'}) def test_check_encryption_provider(self, encryption_metadata): ctxt = context.get_admin_context() type_ref = volume_types.create(ctxt, "type1") encryption = db.volume_type_encryption_create( ctxt, type_ref['id'], encryption_metadata) with mock.patch( 'cinder.db.sqlalchemy.api.volume_encryption_metadata_get', return_value=encryption): volume_data = {'id': fake.VOLUME_ID, 'volume_type_id': type_ref['id']} ctxt = context.get_admin_context() volume = fake_volume.fake_volume_obj(ctxt, **volume_data) ret = volume_utils.check_encryption_provider( volume, mock.sentinel.context) self.assertEqual('aes-xts-plain64', ret['cipher']) self.assertEqual('luks', ret['provider']) def test_check_encryption_provider_invalid(self): encryption_metadata = {'cipher': 'aes-xts-plain64', 'provider': 'invalid'} ctxt = context.get_admin_context() type_ref = volume_types.create(ctxt, "type1") encryption = db.volume_type_encryption_create( ctxt, type_ref['id'], encryption_metadata) with mock.patch( 'cinder.db.sqlalchemy.api.volume_encryption_metadata_get', return_value=encryption): volume_data = {'id': fake.VOLUME_ID, 'volume_type_id': type_ref['id']} ctxt = context.get_admin_context() volume = fake_volume.fake_volume_obj(ctxt, **volume_data) self.assertRaises( exception.VolumeDriverException, volume_utils.check_encryption_provider, volume, mock.sentinel.context) @mock.patch('cinder.volume.volume_utils.CONF.list_all_sections') def test_get_backend_configuration_backend_stanza_not_found(self, mock_conf): mock_conf.return_value = [] self.assertRaises(exception.ConfigNotFound, volume_utils.get_backend_configuration, 'backendA') mock_conf.return_value = ['backendB'] self.assertRaises(exception.ConfigNotFound, volume_utils.get_backend_configuration, 'backendA') @mock.patch('cinder.volume.volume_utils.CONF.list_all_sections') @mock.patch('cinder.volume.configuration.Configuration') def test_get_backend_configuration_backend_opts(self, mock_configuration, mock_conf): mock_conf.return_value = ['backendA'] volume_utils.get_backend_configuration('backendA', ['someFakeOpt']) mock_configuration.assert_called_with(driver.volume_opts, config_group='backendA') mock_configuration.return_value.\ append_config_values.assert_called_with(['someFakeOpt']) @mock.patch('cinder.volume.volume_utils.CONF.list_all_sections') @mock.patch('cinder.volume.configuration.Configuration') def test_get_backend_configuration(self, mock_configuration, mock_conf): mock_conf.return_value = ['backendA'] volume_utils.get_backend_configuration('backendA') mock_configuration.assert_called_with(driver.volume_opts, config_group='backendA') def test_require_driver_initialized(self): driver = mock.Mock() driver.initialized = True volume_utils.require_driver_initialized(driver) driver.initialized = False self.assertRaises(exception.DriverNotInitialized, volume_utils.require_driver_initialized, driver) @ddt.ddt class LogTracingTestCase(test.TestCase): def test_utils_setup_tracing(self): self.mock_object(volume_utils, 'LOG') volume_utils.setup_tracing(None) self.assertFalse(volume_utils.TRACE_API) self.assertFalse(volume_utils.TRACE_METHOD) self.assertEqual(0, volume_utils.LOG.warning.call_count) volume_utils.setup_tracing(['method']) self.assertFalse(volume_utils.TRACE_API) self.assertTrue(volume_utils.TRACE_METHOD) self.assertEqual(0, volume_utils.LOG.warning.call_count) volume_utils.setup_tracing(['method', 'api']) self.assertTrue(volume_utils.TRACE_API) self.assertTrue(volume_utils.TRACE_METHOD) self.assertEqual(0, volume_utils.LOG.warning.call_count) def test_utils_setup_tracing_invalid_key(self): self.mock_object(volume_utils, 'LOG') volume_utils.setup_tracing(['fake']) self.assertFalse(volume_utils.TRACE_API) self.assertFalse(volume_utils.TRACE_METHOD) self.assertEqual(1, volume_utils.LOG.warning.call_count) def test_utils_setup_tracing_valid_and_invalid_key(self): self.mock_object(volume_utils, 'LOG') volume_utils.setup_tracing(['method', 'fake']) self.assertFalse(volume_utils.TRACE_API) self.assertTrue(volume_utils.TRACE_METHOD) self.assertEqual(1, volume_utils.LOG.warning.call_count) def test_trace_no_tracing(self): self.mock_object(volume_utils, 'LOG') @volume_utils.trace_method def _trace_test_method(*args, **kwargs): return 'OK' volume_utils.setup_tracing(None) result = _trace_test_method() self.assertEqual('OK', result) self.assertEqual(0, volume_utils.LOG.debug.call_count) def test_utils_trace_method(self): self.mock_object(volume_utils, 'LOG') @volume_utils.trace_method def _trace_test_method(*args, **kwargs): return 'OK' volume_utils.setup_tracing(['method']) result = _trace_test_method() self.assertEqual('OK', result) self.assertEqual(2, volume_utils.LOG.debug.call_count) def test_utils_trace_api(self): self.mock_object(volume_utils, 'LOG') @volume_utils.trace_api def _trace_test_api(*args, **kwargs): return 'OK' volume_utils.setup_tracing(['api']) result = _trace_test_api() self.assertEqual('OK', result) self.assertEqual(2, volume_utils.LOG.debug.call_count) def test_utils_trace_api_filtered(self): self.mock_object(volume_utils, 'LOG') def filter_func(all_args): return False @volume_utils.trace_api(filter_function=filter_func) def _trace_test_api(*args, **kwargs): return 'OK' volume_utils.setup_tracing(['api']) result = _trace_test_api() self.assertEqual('OK', result) self.assertEqual(0, volume_utils.LOG.debug.call_count) def test_utils_trace_filtered(self): self.mock_object(volume_utils, 'LOG') def filter_func(all_args): return False @volume_utils.trace(filter_function=filter_func) def _trace_test(*args, **kwargs): return 'OK' volume_utils.setup_tracing(['api']) result = _trace_test() self.assertEqual('OK', result) self.assertEqual(0, volume_utils.LOG.debug.call_count) def test_utils_trace_method_default_logger(self): mock_log = self.mock_object(volume_utils, 'LOG') @volume_utils.trace_method def _trace_test_method_custom_logger(*args, **kwargs): return 'OK' volume_utils.setup_tracing(['method']) result = _trace_test_method_custom_logger() self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) def test_utils_trace_method_inner_decorator(self): mock_logging = self.mock_object(volume_utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) def _test_decorator(f): def blah(*args, **kwargs): return f(*args, **kwargs) return blah @_test_decorator @volume_utils.trace_method def _trace_test_method(*args, **kwargs): return 'OK' volume_utils.setup_tracing(['method']) result = _trace_test_method(self) self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) # Ensure the correct function name was logged for call in mock_log.debug.call_args_list: self.assertIn('_trace_test_method', str(call)) self.assertNotIn('blah', str(call)) def test_utils_trace_method_outer_decorator(self): mock_logging = self.mock_object(volume_utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) def _test_decorator(f): def blah(*args, **kwargs): return f(*args, **kwargs) return blah @volume_utils.trace_method @_test_decorator def _trace_test_method(*args, **kwargs): return 'OK' volume_utils.setup_tracing(['method']) result = _trace_test_method(self) self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) # Ensure the incorrect function name was logged for call in mock_log.debug.call_args_list: self.assertNotIn('_trace_test_method', str(call)) self.assertIn('blah', str(call)) def test_utils_trace_method_outer_decorator_with_functools(self): mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True self.mock_object(utils.logging, 'getLogger', mock_log) mock_log = self.mock_object(volume_utils, 'LOG') def _test_decorator(f): @functools.wraps(f) def wraps(*args, **kwargs): return f(*args, **kwargs) return wraps @volume_utils.trace_method @_test_decorator def _trace_test_method(*args, **kwargs): return 'OK' volume_utils.setup_tracing(['method']) result = _trace_test_method() self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) # Ensure the incorrect function name was logged for call in mock_log.debug.call_args_list: self.assertIn('_trace_test_method', str(call)) self.assertNotIn('wraps', str(call)) def test_utils_trace_method_with_exception(self): self.LOG = self.mock_object(volume_utils, 'LOG') @volume_utils.trace_method def _trace_test_method(*args, **kwargs): raise exception.APITimeout('test message') volume_utils.setup_tracing(['method']) self.assertRaises(exception.APITimeout, _trace_test_method) exception_log = self.LOG.debug.call_args_list[1] self.assertIn('exception', str(exception_log)) self.assertIn('test message', str(exception_log)) def test_utils_trace_method_with_time(self): mock_logging = self.mock_object(volume_utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) mock_time = mock.Mock(side_effect=[3.1, 6]) self.mock_object(time, 'time', mock_time) @volume_utils.trace_method def _trace_test_method(*args, **kwargs): return 'OK' volume_utils.setup_tracing(['method']) result = _trace_test_method(self) self.assertEqual('OK', result) return_log = mock_log.debug.call_args_list[1] self.assertIn('2900', str(return_log)) def test_utils_trace_wrapper_class(self): mock_logging = self.mock_object(volume_utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) volume_utils.setup_tracing(['method']) class MyClass(object, metaclass=volume_utils.TraceWrapperMetaclass): def trace_test_method(self): return 'OK' test_class = MyClass() result = test_class.trace_test_method() self.assertEqual('OK', result) self.assertEqual(2, mock_log.debug.call_count) def test_utils_trace_method_with_password_dict(self): mock_logging = self.mock_object(volume_utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) @volume_utils.trace_method def _trace_test_method(*args, **kwargs): return {'something': 'test', 'password': 'Now you see me'} volume_utils.setup_tracing(['method']) result = _trace_test_method(self) expected_unmasked_dict = {'something': 'test', 'password': 'Now you see me'} self.assertEqual(expected_unmasked_dict, result) self.assertEqual(2, mock_log.debug.call_count) self.assertIn("'password': '***'", str(mock_log.debug.call_args_list[1])) def test_utils_trace_method_with_password_str(self): mock_logging = self.mock_object(volume_utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) @volume_utils.trace_method def _trace_test_method(*args, **kwargs): return "'adminPass': 'Now you see me'" volume_utils.setup_tracing(['method']) result = _trace_test_method(self) expected_unmasked_str = "'adminPass': 'Now you see me'" self.assertEqual(expected_unmasked_str, result) self.assertEqual(2, mock_log.debug.call_count) self.assertIn("'adminPass': '***'", str(mock_log.debug.call_args_list[1])) def test_utils_trace_method_with_password_in_formal_params(self): mock_logging = self.mock_object(volume_utils, 'logging') mock_log = mock.Mock() mock_log.isEnabledFor = lambda x: True mock_logging.getLogger = mock.Mock(return_value=mock_log) @volume_utils.trace def _trace_test_method(*args, **kwargs): self.assertEqual('verybadpass', kwargs['test_args']['data']['password']) pass test_args = { 'data': { 'password': 'verybadpass' } } _trace_test_method(self, test_args=test_args) self.assertEqual(2, mock_log.debug.call_count) self.assertIn("'password': '***'", str(mock_log.debug.call_args_list[0])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/utils.py0000664000175000017500000005156600000000000020203 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime import functools import socket from unittest import mock import uuid import weakref import fixtures from oslo_config import cfg from oslo_service import loopingcall from oslo_utils import timeutils import oslo_versionedobjects from cinder.common import constants from cinder import context from cinder import db from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake CONF = cfg.CONF def get_test_admin_context(): return context.get_admin_context() def is_db_dialect(dialect_name): db_engine = sqlalchemy_api.get_engine() dialect = db_engine.url.get_dialect() return dialect_name == dialect.name def obj_attr_is_set(obj_class): """Method to allow setting the ID on an OVO on creation.""" original_method = obj_class.obj_attr_is_set def wrapped(self, attr): if attr == 'id' and not hasattr(self, 'id_first_call'): self.id_first_call = False return False else: original_method(self, attr) return wrapped def create_volume(ctxt, host='test_host', display_name='test_volume', display_description='this is a test volume', status='available', size=1, availability_zone='fake_az', replication_status='disabled', testcase_instance=None, id=None, metadata=None, admin_metadata=None, volume_type_id=fake.VOLUME_TYPE2_ID, **kwargs): """Create a volume object in the DB.""" vol = {'size': size, 'host': host, 'user_id': ctxt.user_id, 'project_id': ctxt.project_id, 'status': status, 'display_name': display_name, 'display_description': display_description, 'attach_status': fields.VolumeAttachStatus.DETACHED, 'availability_zone': availability_zone, 'volume_type_id': volume_type_id } if metadata: vol['metadata'] = metadata if admin_metadata: vol['admin_metadata'] = admin_metadata ctxt = ctxt.elevated() for key in kwargs: vol[key] = kwargs[key] vol['replication_status'] = replication_status if id: with mock.patch('cinder.objects.Volume.obj_attr_is_set', obj_attr_is_set(objects.Volume)): volume = objects.Volume(context=ctxt, id=id, **vol) volume.create() else: volume = objects.Volume(context=ctxt, **vol) volume.create() # If we get a TestCase instance we add cleanup if testcase_instance: testcase_instance.addCleanup(volume.destroy) return volume def attach_volume(ctxt, volume_id, instance_uuid, attached_host, mountpoint, mode='rw'): if isinstance(volume_id, objects.Volume): volume_ovo = volume_id volume_id = volume_ovo.id else: volume_ovo = None now = timeutils.utcnow() values = {} values['volume_id'] = volume_id values['attached_host'] = attached_host values['mountpoint'] = mountpoint values['attach_time'] = now attachment = db.volume_attach(ctxt.elevated(), values) volume, updated_values = db.volume_attached( ctxt.elevated(), attachment['id'], instance_uuid, attached_host, mountpoint, mode) if volume_ovo: cls = objects.Volume expected_attrs = cls._get_expected_attrs(ctxt) volume = cls._from_db_object(ctxt, cls(ctxt), volume, expected_attrs=expected_attrs) return volume def create_snapshot(ctxt, volume_id, display_name='test_snapshot', display_description='this is a test snapshot', cgsnapshot_id = None, status=fields.SnapshotStatus.CREATING, testcase_instance=None, id=None, **kwargs): vol = db.volume_get(ctxt, volume_id) snap = objects.Snapshot(ctxt) snap.volume_id = volume_id snap.user_id = ctxt.user_id or fake.USER_ID snap.project_id = ctxt.project_id or fake.PROJECT_ID snap.status = status snap.metadata = {} snap.volume_size = vol['size'] snap.display_name = display_name snap.display_description = display_description snap.cgsnapshot_id = cgsnapshot_id if id: with mock.patch('cinder.objects.Snapshot.obj_attr_is_set', obj_attr_is_set(objects.Snapshot)): snap.id = id snap.create() else: snap.create() # We do the update after creating the snapshot in case we want to set # deleted field snap.update(kwargs) snap.save() # If we get a TestCase instance we add cleanup if testcase_instance: testcase_instance.addCleanup(snap.destroy) return snap def create_consistencygroup(ctxt, host='test_host@fakedrv#fakepool', name='test_cg', description='this is a test cg', status=fields.ConsistencyGroupStatus.AVAILABLE, availability_zone='fake_az', volume_type_id=None, cgsnapshot_id=None, source_cgid=None, **kwargs): """Create a consistencygroup object in the DB.""" cg = objects.ConsistencyGroup(ctxt) cg.host = host cg.user_id = ctxt.user_id or fake.USER_ID cg.project_id = ctxt.project_id or fake.PROJECT_ID cg.status = status cg.name = name cg.description = description cg.availability_zone = availability_zone if volume_type_id: cg.volume_type_id = volume_type_id cg.cgsnapshot_id = cgsnapshot_id cg.source_cgid = source_cgid new_id = kwargs.pop('id', None) cg.update(kwargs) cg.create() if new_id and new_id != cg.id: db.consistencygroup_update(ctxt, cg.id, {'id': new_id}) cg = objects.ConsistencyGroup.get_by_id(ctxt, new_id) return cg def create_group(ctxt, host='test_host@fakedrv#fakepool', name='test_group', description='this is a test group', status=fields.GroupStatus.AVAILABLE, availability_zone='fake_az', group_type_id=None, volume_type_ids=None, **kwargs): """Create a group object in the DB.""" grp = objects.Group(ctxt) grp.host = host grp.user_id = ctxt.user_id or fake.USER_ID grp.project_id = ctxt.project_id or fake.PROJECT_ID grp.status = status grp.name = name grp.description = description grp.availability_zone = availability_zone if group_type_id: grp.group_type_id = group_type_id if volume_type_ids: grp.volume_type_ids = volume_type_ids new_id = kwargs.pop('id', None) grp.update(kwargs) grp.create() if new_id and new_id != grp.id: db.group_update(ctxt, grp.id, {'id': new_id}) grp = objects.Group.get_by_id(ctxt, new_id) return grp def create_cgsnapshot(ctxt, consistencygroup_id, name='test_cgsnapshot', description='this is a test cgsnapshot', status='creating', recursive_create_if_needed=True, return_vo=True, **kwargs): """Create a cgsnapshot object in the DB.""" values = { 'user_id': ctxt.user_id or fake.USER_ID, 'project_id': ctxt.project_id or fake.PROJECT_ID, 'status': status, 'name': name, 'description': description, 'consistencygroup_id': consistencygroup_id} values.update(kwargs) if recursive_create_if_needed and consistencygroup_id: create_cg = False try: objects.ConsistencyGroup.get_by_id(ctxt, consistencygroup_id) create_vol = not db.volume_get_all_by_group( ctxt, consistencygroup_id) except exception.ConsistencyGroupNotFound: create_cg = True create_vol = True if create_cg: create_consistencygroup(ctxt, id=consistencygroup_id) if create_vol: create_volume(ctxt, consistencygroup_id=consistencygroup_id) cgsnap = db.cgsnapshot_create(ctxt, values) if not return_vo: return cgsnap return objects.CGSnapshot.get_by_id(ctxt, cgsnap.id) def create_group_snapshot(ctxt, group_id, group_type_id=None, name='test_group_snapshot', description='this is a test group snapshot', status='creating', recursive_create_if_needed=True, return_vo=True, **kwargs): """Create a group snapshot object in the DB.""" values = { 'user_id': ctxt.user_id or fake.USER_ID, 'project_id': ctxt.project_id or fake.PROJECT_ID, 'status': status, 'name': name, 'description': description, 'group_id': group_id, 'group_type_id': group_type_id} values.update(kwargs) if recursive_create_if_needed and group_id: create_grp = False try: objects.Group.get_by_id(ctxt, group_id) create_vol = not db.volume_get_all_by_generic_group( ctxt, group_id) except exception.GroupNotFound: create_grp = True create_vol = True if create_grp: create_group(ctxt, id=group_id, group_type_id=group_type_id) if create_vol: create_volume(ctxt, group_id=group_id) if not return_vo: return db.group_snapshot_create(ctxt, values) else: group_snapshot = objects.GroupSnapshot(ctxt) new_id = values.pop('id', None) group_snapshot.update(values) group_snapshot.create() if new_id and new_id != group_snapshot.id: db.group_snapshot_update(ctxt, group_snapshot.id, {'id': new_id}) group_snapshot = objects.GroupSnapshot.get_by_id(ctxt, new_id) return group_snapshot def create_backup(ctxt, volume_id=fake.VOLUME_ID, display_name='test_backup', display_description='This is a test backup', status=fields.BackupStatus.CREATING, parent_id=None, temp_volume_id=None, temp_snapshot_id=None, snapshot_id=None, data_timestamp=None, size=None, container=None, availability_zone=None, host=None, metadata=None, **kwargs): """Create a backup object.""" values = { 'user_id': ctxt.user_id or fake.USER_ID, 'project_id': ctxt.project_id or fake.PROJECT_ID, 'volume_id': volume_id, 'status': status, 'display_name': display_name, 'display_description': display_description, 'container': container or 'fake', 'availability_zone': availability_zone or 'fake', 'service': 'fake', 'size': size or 5 * 1024 * 1024, 'object_count': 22, 'host': host or socket.gethostname(), 'parent_id': parent_id, 'temp_volume_id': temp_volume_id, 'temp_snapshot_id': temp_snapshot_id, 'snapshot_id': snapshot_id, 'data_timestamp': data_timestamp, 'metadata': metadata or {}, } values.update(kwargs) backup = objects.Backup(ctxt, **values) backup.create() if not snapshot_id: backup.data_timestamp = backup.created_at backup.save() return backup def create_message(ctxt, project_id='fake_project', request_id='test_backup', resource_type='This is a test backup', resource_uuid='3asf434-3s433df43-434adf3-343df443', action=None, message_level='Error'): """Create a message in the DB.""" expires_at = (timeutils.utcnow() + datetime.timedelta( seconds=30)) message_record = {'project_id': project_id, 'request_id': request_id, 'resource_type': resource_type, 'resource_uuid': resource_uuid, 'action_id': action[0] if action else '', 'event_id': "VOLUME_VOLUME_%s_002" % action[0], 'message_level': message_level, 'expires_at': expires_at} return db.message_create(ctxt, message_record) def create_volume_type(ctxt, testcase_instance=None, **kwargs): vol_type = db.volume_type_create(ctxt, kwargs) # If we get a TestCase instance we add cleanup if testcase_instance: testcase_instance.addCleanup(db.volume_type_destroy, ctxt, vol_type.id) return vol_type def create_encryption(ctxt, vol_type_id, testcase_instance=None, **kwargs): encrypt = db.volume_type_encryption_create(ctxt, vol_type_id, kwargs) # If we get a TestCase instance we add cleanup if testcase_instance: testcase_instance.addCleanup(db.volume_type_encryption_delete, ctxt, vol_type_id) return encrypt def create_qos(ctxt, testcase_instance=None, **kwargs): qos = db.qos_specs_create(ctxt, kwargs) if testcase_instance: testcase_instance.addCleanup(db.qos_specs_delete, ctxt, qos['id']) return qos class ZeroIntervalLoopingCall(loopingcall.FixedIntervalLoopingCall): def start(self, interval, initial_delay=None, stop_on_exception=True): return super(ZeroIntervalLoopingCall, self).start( 0, 0, stop_on_exception) class ZeroIntervalWithTimeoutLoopingCall( loopingcall.FixedIntervalWithTimeoutLoopingCall): def start(self, interval, initial_delay=None, stop_on_exception=True, timeout=0): result = None with mock.patch('time.time', side_effect=[0, (timeout + 1)]): result = super(ZeroIntervalWithTimeoutLoopingCall, self).start( 0, 0, stop_on_exception, timeout) return result def replace_obj_loader(testcase, obj): def fake_obj_load_attr(self, name): # This will raise KeyError for non existing fields as expected field = self.fields[name] if field.default != oslo_versionedobjects.fields.UnspecifiedDefault: value = field.default elif field.nullable: value = None elif isinstance(field, oslo_versionedobjects.fields.StringField): value = '' elif isinstance(field, oslo_versionedobjects.fields.IntegerField): value = 1 elif isinstance(field, oslo_versionedobjects.fields.UUIDField): value = uuid.uuid4() setattr(self, name, value) testcase.addCleanup(setattr, obj, 'obj_load_attr', obj.obj_load_attr) obj.obj_load_attr = fake_obj_load_attr file_spec = None def get_file_spec(): """Return a 'file' spec. This is to be used anywhere that you need to do something such as mock.MagicMock(spec=file) to mock out something with the file attributes. Due to the 'file' built-in method being removed in Python 3 we need to do some special handling for it. """ global file_spec # set on first use if file_spec is None: import _io file_spec = list(set(dir(_io.TextIOWrapper)).union( set(dir(_io.BytesIO)))) def generate_timeout_series(timeout): """Generate a series of times that exceeds the given timeout. Yields a series of fake time.time() floating point numbers such that the difference between each pair in the series just exceeds the timeout value that is passed in. Useful for mocking time.time() in methods that otherwise wait for timeout seconds. """ iteration = 0 while True: iteration += 1 yield (iteration * timeout) + iteration def default_service_values(): return { 'host': 'fake_host', 'cluster_name': None, 'binary': 'fake_binary', 'topic': 'fake_topic', 'report_count': 3, 'disabled': False, 'availability_zone': 'nova', } def create_service(ctxt, values=None): values = values or {} v = default_service_values() v.update(values) service = objects.Service(ctxt, **v) service.create() # We need to read the contents from the DB if we have set updated_at # or created_at fields if 'updated_at' in values or 'created_at' in values: service = db.service_get(ctxt, service.id) return service def default_cluster_values(): return { 'name': 'cluster_name', 'binary': constants.VOLUME_BINARY, 'disabled': False, 'disabled_reason': None, 'deleted': False, 'updated_at': None, 'deleted_at': None, } def create_cluster(ctxt, **values): create_values = default_cluster_values() create_values.update(values) cluster = db.cluster_create(ctxt, create_values) return db.cluster_get(ctxt, cluster.id, services_summary=True) def create_populated_cluster(ctxt, num_services, num_down_svcs=0, **values): """Helper method that creates a cluster with up and down services.""" up_time = timeutils.utcnow() down_time = (up_time - datetime.timedelta(seconds=CONF.service_down_time + 1)) cluster = create_cluster(ctxt, **values) svcs = [ db.service_create( ctxt, {'cluster_name': cluster.name, 'host': 'host' + str(i), 'updated_at': down_time if i < num_down_svcs else up_time}) for i in range(num_services) ] return cluster, svcs def set_timeout(timeout): """Timeout decorator for unit test methods. Use this decorator for tests that are expected to pass in very specific amount of time, not common for all other tests. It can have either big or small value. """ def _decorator(f): @functools.wraps(f) def _wrapper(self, *args, **kwargs): self.useFixture(fixtures.Timeout(timeout, gentle=True)) return f(self, *args, **kwargs) return _wrapper return _decorator def time_format(at=None): """Format datetime string to date. :param at: Type is datetime.datetime (example 'datetime.datetime(2017, 12, 24, 22, 11, 32, 6086)') :returns: Format date (example '2017-12-24T22:11:32Z'). """ if not at: at = timeutils.utcnow() date_string = at.strftime("%Y-%m-%dT%H:%M:%S") tz = at.tzname(None) if at.tzinfo else 'UTC' # Need to handle either iso8601 or python UTC format date_string += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz) return date_string class InstanceTracker(object): """Track instances of a given class. Going through the Garbage collection objects searching for instances makes tests take up to 12 times longer. The slower GC code alternative that was compared was something like: for obj in gc.get_objects(): try: if isinstance(obj, cls): except ReferenceError: pass """ def __init__(self, cls): self.cls = cls self.refs = [] self.init_method = getattr(cls, '__init__') setattr(cls, '__init__', self._track_instances()) def _track_instances(self): def track(init_self, *args, **kwargs): # Use weak references so garbage collector doesn't count these # references. self.refs.append(weakref.ref(init_self)) return self.init_method(init_self, *args, **kwargs) return track def clear(self): self.refs.clear() @property def instances(self): result = [] for ref in self.refs: inst = ref() # Only return instances that have not been garbage collected if inst is not None: result.append(inst) return result ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2351196 cinder-27.0.0/cinder/tests/unit/volume/0000775000175000017500000000000000000000000017763 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/__init__.py0000664000175000017500000001461500000000000022103 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import tempfile from unittest import mock from oslo_config import cfg from oslo_utils import importutils from stevedore import extension from cinder import context from cinder.image import image_utils from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import test from cinder.tests.unit import utils as tests_utils from cinder.volume import api as volume_api from cinder.volume import configuration as conf CONF = cfg.CONF class BaseVolumeTestCase(test.TestCase): """Test Case for volumes.""" FAKE_UUID = fake.IMAGE_ID def setUp(self, *args, **kwargs): super(BaseVolumeTestCase, self).setUp(*args, **kwargs) self.extension_manager = extension.ExtensionManager( "BaseVolumeTestCase") vol_tmpdir = tempfile.mkdtemp() self.flags(volumes_dir=vol_tmpdir) self.addCleanup(self._cleanup) self.volume = importutils.import_object(CONF.volume_manager) self.mock_object(self.volume, '_driver_shares_targets', return_value=False) self.volume.message_api = mock.Mock() self.configuration = mock.Mock(conf.Configuration) self.context = context.get_admin_context() self.context.user_id = fake.USER_ID # NOTE(mriedem): The id is hard-coded here for tracking race fail # assertions with the notification code, it's part of an # elastic-recheck query so don't remove it or change it. self.project_id = '7f265bd4-3a85-465e-a899-5dc4854a86d3' self.user_context = context.RequestContext(user_id=fake.USER_ID, project_id=self.project_id, is_admin=False) self.context.project_id = self.project_id self.volume_params = { 'status': 'creating', 'host': CONF.host, 'size': 1} fake_image.mock_image_service(self) self.mock_object(os.path, 'exists', lambda x: True) self.mock_object(image_utils, 'check_available_space', lambda x, y, z: True) self.volume.driver.set_initialized() self.volume.stats = {'allocated_capacity_gb': 0, 'pools': {}} # keep ordered record of what we execute self.called = [] self.volume_api = volume_api.API() # Don't accidentaly make a call to delete a file from the system self.mock_lock_remove = self.patch('cinder.utils.synchronized_remove') self.mock_dlm_lock_remove = self.patch('cinder.coordination.os.remove') def _cleanup(self): try: shutil.rmtree(CONF.volumes_dir) except OSError: pass def fake_get_all_volume_groups(obj, vg_name=None, no_suffix=True): return [{'name': 'cinder-volumes', 'size': '5.00', 'available': '2.50', 'lv_count': '2', 'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}] @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask._clone_image_volume') def _create_volume_from_image(self, mock_clone_image_volume, mock_fetch_img, fakeout_copy_image_to_volume=False, fakeout_clone_image=False, clone_image_volume=False): """Test function of create_volume_from_image. Test cases call this function to create a volume from image, caller can choose whether to fake out copy_image_to_volume and clone_image, after calling this, test cases should check status of the volume. """ def fake_local_path(volume): return dst_path def fake_copy_image_to_volume(context, volume, image_service, image_id): pass def fake_fetch_to_raw(ctx, image_service, image_id, path, blocksize, size=None, throttle=None, disable_sparse=False): pass def fake_clone_image(ctx, volume_ref, image_location, image_meta, image_service): return {'provider_location': None}, True dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.mock_object(self.volume.driver, 'local_path', fake_local_path) if fakeout_clone_image: self.mock_object(self.volume.driver, 'clone_image', fake_clone_image) self.mock_object(image_utils, 'fetch_to_raw', fake_fetch_to_raw) if fakeout_copy_image_to_volume: self.mock_object(self.volume.driver, 'copy_image_to_volume', fake_copy_image_to_volume) mock_clone_image_volume.return_value = ({}, clone_image_volume) mock_fetch_img.return_value = mock.MagicMock( spec=tests_utils.get_file_spec()) image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' volume = tests_utils.create_volume(self.context, **self.volume_params) # creating volume testdata try: request_spec = { 'volume_properties': self.volume_params, 'image_id': image_id, 'image_size': 1 } self.volume.create_volume(self.context, volume, request_spec) finally: # cleanup os.unlink(dst_path) volume = objects.Volume.get_by_id(self.context, volume.id) return volume ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2431197 cinder-27.0.0/cinder/tests/unit/volume/drivers/0000775000175000017500000000000000000000000021441 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/__init__.py0000664000175000017500000000000000000000000023540 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2431197 cinder-27.0.0/cinder/tests/unit/volume/drivers/ceph/0000775000175000017500000000000000000000000022360 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ceph/__init__.py0000664000175000017500000000000000000000000024457 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ceph/test_rbd_iscsi.py0000664000175000017500000002422100000000000025733 0ustar00zuulzuul00000000000000# Copyright 2012 Josh Durgin # Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test import cinder.volume.drivers.ceph.rbd_iscsi as driver # This is used to collect raised exceptions so that tests may check what was # raised. # NOTE: this must be initialised in test setUp(). RAISED_EXCEPTIONS = [] @ddt.ddt class RBDISCSITestCase(test.TestCase): def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(RBDISCSITestCase, self).setUp() self.context = context.get_admin_context() self.fake_target_iqn = 'iqn.2019-01.com.suse.iscsi-gw:iscsi-igw' self.fake_valid_response = {'status': '200'} self.fake_clients = \ {'response': {'Content-Type': 'application/json', 'Content-Length': '55', 'Server': 'Werkzeug/0.14.1 Python/2.7.15rc1', 'Date': 'Wed, 19 Jun 2019 20:13:18 GMT', 'status': '200', 'content-location': 'http://192.168.121.11:5001/api/clients/' 'XX_REPLACE_ME'}, 'body': {'clients': ['iqn.1993-08.org.debian:01:5d3b9abba13d']}} self.volume_a = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000a', 'id': '4c39c3c7-168f-4b32-b585-77f1b3bf0a38', 'size': 10}) self.volume_b = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000b', 'id': '0c7d1f44-5a06-403f-bb82-ae7ad0d693a6', 'size': 10}) self.volume_c = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000a', 'id': '55555555-222f-4b32-b585-9991b3bf0a99', 'size': 12, 'encryption_key_id': fake.ENCRYPTION_KEY_ID}) def setup_configuration(self): config = mock.MagicMock() config.rbd_cluster_name = 'nondefault' config.rbd_pool = 'rbd' config.rbd_ceph_conf = '/etc/ceph/my_ceph.conf' config.rbd_secret_uuid = None config.rbd_user = 'cinder' config.volume_backend_name = None config.rbd_iscsi_api_user = 'fake_user' config.rbd_iscsi_api_password = 'fake_password' config.rbd_iscsi_api_url = 'http://fake.com:5000' return config @mock.patch('cinder.volume.drivers.rbd.RBDDriver.do_setup', new=mock.MagicMock()) @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.client') @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.rbd_iscsi_client') def test_unsupported_client_version(self, m_rbd_iscsi_client, m_client): m_rbd_iscsi_client.version = '0.1.0' m_client.version = '0.1.0' drv = driver.RBDISCSIDriver(configuration=self.setup_configuration()) drv.set_initialized() self.assertRaisesRegex(exception.InvalidInput, 'version', drv.do_setup, None) @ddt.data({'user': None, 'password': 'foo', 'url': 'http://fake.com:5000', 'iqn': None}, {'user': None, 'password': None, 'url': 'http://fake', 'iqn': None}, {'user': None, 'password': None, 'url': None, 'iqn': None}, {'user': 'fake', 'password': 'fake', 'url': None, 'iqn': None}, {'user': 'fake', 'password': 'fake', 'url': 'fake', 'iqn': None}, ) @ddt.unpack @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.client') @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.rbd_iscsi_client') def test_min_config(self, m_rbd_iscsi_client, m_client, user, password, url, iqn): config = self.setup_configuration() config.rbd_iscsi_api_user = user config.rbd_iscsi_api_password = password config.rbd_iscsi_api_url = url config.rbd_iscsi_target_iqn = iqn drv = driver.RBDISCSIDriver(configuration=config) drv.set_initialized() with mock.patch('cinder.volume.drivers.rbd.RBDDriver' '.check_for_setup_error'): self.assertRaises(exception.InvalidConfigurationValue, drv.check_for_setup_error) @ddt.data({'response': None}, {'response': {'nothing': 'nothing'}}, {'response': {'status': '300'}}) @ddt.unpack @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver.' '_create_client') @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.client') @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.rbd_iscsi_client') def test_do_setup(self, m_rbd_iscsi_client, m_client, m_create_client, response): m_create_client.return_value.get_api.return_value = (response, None) m_client.version = '3.0.0' m_rbd_iscsi_client.version = '3.0.0' drv = driver.RBDISCSIDriver(configuration=self.setup_configuration()) drv.set_initialized() with mock.patch('cinder.volume.drivers.rbd.RBDDriver.do_setup'): self.assertRaises(exception.InvalidConfigurationValue, drv.do_setup, None) @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.client') @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.rbd_iscsi_client') def test_unsupported_version(self, m_rbd_iscsi_client, m_client): m_rbd_iscsi_client.version = '0.1.4' drv = driver.RBDISCSIDriver(configuration=self.setup_configuration()) drv.set_initialized() self.assertRaisesRegex(exception.InvalidInput, 'Invalid rbd_iscsi_client version found', drv._create_client) @ddt.data({'status': '200', 'target_iqn': 'iqn.2019-01.com.suse.iscsi-gw:iscsi-igw', 'clients': ['foo']}, {'status': '300', 'target_iqn': 'iqn.2019-01.com.suse.iscsi-gw:iscsi-igw', 'clients': None} ) @ddt.unpack @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver.' '_create_client') @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.client') @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.rbd_iscsi_client') def test__get_clients(self, m_rbd_iscsi_client, m_client, m_create_client, status, target_iqn, clients): m_create_client.return_value.get_api.return_value = ( self.fake_valid_response, None) config = self.setup_configuration() config.rbd_iscsi_target_iqn = target_iqn drv = driver.RBDISCSIDriver(configuration=config) drv.set_initialized() response = self.fake_clients['response'] response['status'] = status response['content-location'] = ( response['content-location'].replace('XX_REPLACE_ME', target_iqn)) body = self.fake_clients['body'] m_create_client.return_value.get_clients.return_value = (response, body) with mock.patch('cinder.volume.drivers.rbd.RBDDriver.do_setup'): drv.do_setup(None) if status == '200': actual_response = drv._get_clients() self.assertEqual(actual_response, body) else: # we expect an exception self.assertRaisesRegex(exception.VolumeBackendAPIException, 'Failed to get_clients()', drv._get_clients) @ddt.data({'status': '200', 'body': {'created': 'someday', 'discovery_auth': 'somecrap', 'disks': 'fakedisks', 'gateways': 'fakegws', 'targets': 'faketargets'}}, {'status': '300', 'body': None}) @ddt.unpack @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.RBDISCSIDriver.' '_create_client') @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.client') @mock.patch('cinder.volume.drivers.ceph.rbd_iscsi.rbd_iscsi_client') def test__get_config(self, m_rbd_iscsi_client, m_client, m_create_client, status, body): m_create_client.return_value.get_api.return_value = ( self.fake_valid_response, None) config = self.setup_configuration() config.rbd_iscsi_target_iqn = self.fake_target_iqn drv = driver.RBDISCSIDriver(configuration=config) drv.set_initialized() response = self.fake_clients['response'] response['status'] = status response['content-location'] = ( response['content-location'].replace('XX_REPLACE_ME', self.fake_target_iqn)) m_create_client.return_value.get_config.return_value = (response, body) with mock.patch('cinder.volume.drivers.rbd.RBDDriver.do_setup'): drv.do_setup(None) if status == '200': actual_response = drv._get_config() self.assertEqual(body, actual_response) else: # we expect an exception self.assertRaisesRegex(exception.VolumeBackendAPIException, 'Failed to get_config()', drv._get_config) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.24712 cinder-27.0.0/cinder/tests/unit/volume/drivers/datacore/0000775000175000017500000000000000000000000023223 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/datacore/__init__.py0000664000175000017500000000000000000000000025322 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/datacore/test_datacore_api.py0000664000175000017500000007347100000000000027263 0ustar00zuulzuul00000000000000# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for classes that are used to invoke DataCore SANsymphony API.""" from unittest import mock from oslo_utils import netutils from oslo_utils import units import suds from suds.sax import parser from suds import wsdl from cinder.tests.unit import test from cinder.volume.drivers.datacore import api from cinder.volume.drivers.datacore import exception class FakeWebSocketException(Exception): pass class DataCoreClientTestCase(test.TestCase): """Tests for the DataCore SANsymphony client.""" def setUp(self): super(DataCoreClientTestCase, self).setUp() self.mock_storage_services = mock.MagicMock() self.mock_executive_service = mock.MagicMock() self.mock_suds_client = mock.MagicMock() self.mock_object( api.suds_client, 'Client', return_value=self.mock_suds_client) self.mock_channel = mock.MagicMock() mock_websocket = self.mock_object(api, 'websocket') mock_websocket.WebSocketException = FakeWebSocketException mock_websocket.create_connection.return_value = self.mock_channel setattr(self.mock_suds_client.service.__getitem__, 'side_effect', self._get_service_side_effect) # TODO(tkajinam): Because of changes in netaddr 1.0.0, is_valid_ipv6 no # longer "accept" non string object and raises # TypeError. Patch the method until we properly mock # internal behavior to present valid address strings. mock_is_valid_ipv6 = self.mock_object(netutils, 'is_valid_ipv6') mock_is_valid_ipv6.return_value = False self.client = api.DataCoreClient('hostname', 'username', 'password', 1) self.client.API_RETRY_INTERVAL = 0 # Make sure failure logging does not get emitted during testing self.mock_object(api, 'LOG') def _get_service_side_effect(self, service_name): self.assertIn(service_name, [ api.DataCoreClient.STORAGE_SERVICES_BINDING, api.DataCoreClient.EXECUTIVE_SERVICE_BINDING ]) if service_name is api.DataCoreClient.STORAGE_SERVICES_BINDING: return self.mock_storage_services else: return self.mock_executive_service def _assert_storage_services_method_called(self, method_name): return self.mock_storage_services.__getitem__.assert_called_with( method_name) @property def mock_storage_service_context(self): return self.mock_storage_services.__getitem__()() @property def mock_executive_service_context(self): return self.mock_executive_service.__getitem__()() def test_process_request_failed(self): def fail_with_socket_error(): raise FakeWebSocketException() def fail_with_web_fault(message): fault = mock.Mock() fault.faultstring = "General error." document = mock.Mock() raise suds.WebFault(fault, document) self.mock_channel.recv.side_effect = fail_with_socket_error self.assertRaises(exception.DataCoreConnectionException, self.client.get_server_groups) self.mock_channel.recv.side_effect = None (self.mock_storage_service_context.process_reply .side_effect) = fail_with_web_fault self.assertRaises(exception.DataCoreFaultException, self.client.get_server_groups) def test_channel_closing_failed(self): def fail_with_socket_error(): raise FakeWebSocketException() def fail_with_web_fault(message): fault = mock.Mock() fault.faultstring = "General error." document = mock.Mock() raise suds.WebFault(fault, document) self.mock_channel.close.side_effect = fail_with_socket_error (self.mock_storage_service_context.process_reply .side_effect) = fail_with_web_fault self.assertRaises(exception.DataCoreFaultException, self.client.get_server_groups) def test_update_api_endpoints(self): def fail_with_socket_error(): try: raise FakeWebSocketException() finally: self.mock_channel.recv.side_effect = None self.mock_channel.recv.side_effect = fail_with_socket_error mock_executive_endpoints = [{ 'network_address': '127.0.0.1:3794', 'http_endpoint': 'http://127.0.0.1:3794/', 'ws_endpoint': 'ws://127.0.0.1:3794/', }] self.mock_object(self.client, '_executive_service_endpoints', mock_executive_endpoints) mock_storage_endpoint = { 'network_address': '127.0.0.1:3794', 'http_endpoint': 'http://127.0.0.1:3794/', 'ws_endpoint': 'ws://127.0.0.1:3794/', } self.mock_object(self.client, '_storage_services_endpoint', mock_storage_endpoint) node = mock.Mock() node.HostAddress = '127.0.0.1:3794' reply = mock.MagicMock() reply.RegionNodeData = [node] self.mock_storage_service_context.process_reply.return_value = reply result = self.client.get_server_groups() self.assertIsNotNone(result) def test_update_api_endpoints_failed(self): def fail_with_socket_error(): try: raise FakeWebSocketException() finally: self.mock_channel.recv.side_effect = None self.mock_channel.recv.side_effect = fail_with_socket_error mock_executive_endpoints = [{ 'network_address': '127.0.0.1:3794', 'http_endpoint': 'http://127.0.0.1:3794/', 'ws_endpoint': 'ws://127.0.0.1:3794/', }] self.mock_object(self.client, '_executive_service_endpoints', mock_executive_endpoints) reply = mock.MagicMock() reply.RegionNodeData = [] self.mock_storage_service_context.process_reply.return_value = reply self.mock_executive_service_context.process_reply.return_value = None result = self.client.get_server_groups() self.assertIsNotNone(result) def test_get_server_groups(self): self.client.get_server_groups() self._assert_storage_services_method_called('GetServerGroups') def test_get_servers(self): self.client.get_servers() self._assert_storage_services_method_called('GetServers') def test_get_disk_pools(self): self.client.get_disk_pools() self._assert_storage_services_method_called('GetDiskPools') def test_get_logical_disks(self): self.client.get_logical_disks() self._assert_storage_services_method_called('GetLogicalDisks') def test_create_pool_logical_disk(self): pool_id = 'pool_id' pool_volume_type = 'Striped' size = 1 * units.Gi min_quota = 1 max_quota = 1 * units.Gi self.client.create_pool_logical_disk( pool_id, pool_volume_type, size, min_quota, max_quota) self._assert_storage_services_method_called('CreatePoolLogicalDisk') def test_delete_logical_disk(self): logical_disk_id = 'disk_id' self.client.delete_logical_disk(logical_disk_id) self._assert_storage_services_method_called('DeleteLogicalDisk') def test_get_logical_disk_chunk_allocation_map(self): logical_disk_id = 'disk_id' self.client.get_logical_disk_chunk_allocation_map(logical_disk_id) self._assert_storage_services_method_called( 'GetLogicalDiskChunkAllocationMap') def test_get_next_virtual_disk_alias(self): base_alias = 'volume' self.client.get_next_virtual_disk_alias(base_alias) self._assert_storage_services_method_called('GetNextVirtualDiskAlias') def test_get_virtual_disks(self): self.client.get_virtual_disks() self._assert_storage_services_method_called('GetVirtualDisks') def test_build_virtual_disk_data(self): disk_alias = 'alias' disk_type = 'Mirrored' size = 1 * units.Gi description = 'description' storage_profile_id = 'storage_profile_id' vd_data = self.client.build_virtual_disk_data( disk_alias, disk_type, size, description, storage_profile_id) self.assertEqual(disk_alias, vd_data.Alias) self.assertEqual(size, vd_data.Size.Value) self.assertEqual(description, vd_data.Description) self.assertEqual(storage_profile_id, vd_data.StorageProfileId) self.assertTrue(hasattr(vd_data, 'Type')) self.assertTrue(hasattr(vd_data, 'SubType')) self.assertTrue(hasattr(vd_data, 'DiskStatus')) self.assertTrue(hasattr(vd_data, 'RecoveryPriority')) def test_create_virtual_disk_ex2(self): disk_alias = 'alias' disk_type = 'Mirrored' size = 1 * units.Gi description = 'description' storage_profile_id = 'storage_profile_id' first_disk_id = 'disk_id' second_disk_id = 'disk_id' add_redundancy = True vd_data = self.client.build_virtual_disk_data( disk_alias, disk_type, size, description, storage_profile_id) self.client.create_virtual_disk_ex2( vd_data, first_disk_id, second_disk_id, add_redundancy) self._assert_storage_services_method_called('CreateVirtualDiskEx2') def test_set_virtual_disk_size(self): disk_id = 'disk_id' size = 1 * units.Gi self.client.set_virtual_disk_size(disk_id, size) self._assert_storage_services_method_called('SetVirtualDiskSize') def test_delete_virtual_disk(self): virtual_disk_id = 'disk_id' delete_logical_disks = True self.client.delete_virtual_disk(virtual_disk_id, delete_logical_disks) self._assert_storage_services_method_called('DeleteVirtualDisk') def test_serve_virtual_disks_to_host(self): host_id = 'host_id' disks = ['disk_id'] self.client.serve_virtual_disks_to_host(host_id, disks) self._assert_storage_services_method_called('ServeVirtualDisksToHost') def test_unserve_virtual_disks_from_host(self): host_id = 'host_id' disks = ['disk_id'] self.client.unserve_virtual_disks_from_host(host_id, disks) self._assert_storage_services_method_called( 'UnserveVirtualDisksFromHost') def test_unserve_virtual_disks_from_port(self): port_id = 'port_id' disks = ['disk_id'] self.client.unserve_virtual_disks_from_port(port_id, disks) self._assert_storage_services_method_called( 'UnserveVirtualDisksFromPort') def test_bind_logical_disk(self): disk_id = 'disk_id' logical_disk_id = 'disk_id' role = 'Second' create_mirror_mappings = True create_client_mappings = False add_redundancy = True self.client.bind_logical_disk( disk_id, logical_disk_id, role, create_mirror_mappings, create_client_mappings, add_redundancy) self._assert_storage_services_method_called( 'BindLogicalDisk') def test_get_snapshots(self): self.client.get_snapshots() self._assert_storage_services_method_called('GetSnapshots') def test_create_snapshot(self): disk_id = 'disk_id' name = 'name' description = 'description' pool_id = 'pool_id' snapshot_type = 'Full' duplicate_disk_id = False storage_profile_id = 'profile_id' self.client.create_snapshot( disk_id, name, description, pool_id, snapshot_type, duplicate_disk_id, storage_profile_id) self._assert_storage_services_method_called('CreateSnapshot') def test_delete_snapshot(self): snapshot_id = "snapshot_id" self.client.delete_snapshot(snapshot_id) self._assert_storage_services_method_called('DeleteSnapshot') def test_get_storage_profiles(self): self.client.get_storage_profiles() self._assert_storage_services_method_called('GetStorageProfiles') def test_designate_map_store(self): pool_id = 'pool_id' self.client.designate_map_store(pool_id) self._assert_storage_services_method_called('DesignateMapStore') def test_get_performance_by_type(self): types = ['DiskPoolPerformance'] self.client.get_performance_by_type(types) self._assert_storage_services_method_called('GetPerformanceByType') def test_get_ports(self): self.client.get_ports() self._assert_storage_services_method_called('GetPorts') def test_build_scsi_port_data(self): host_id = 'host_id' port_name = 'port_name' port_mode = 'Initiator' port_type = 'iSCSI' port_data = self.client.build_scsi_port_data( host_id, port_name, port_mode, port_type) self.assertEqual(host_id, port_data.HostId) self.assertEqual(port_name, port_data.PortName) self.assertTrue(hasattr(port_data, 'PortMode')) self.assertTrue(hasattr(port_data, 'PortType')) def test_register_port(self): port_data = self.client.build_scsi_port_data( 'host_id', 'port_name', 'initiator', 'iSCSI') self.client.register_port(port_data) self._assert_storage_services_method_called('RegisterPort') def test_assign_port(self): client_id = 'client_id' port_id = 'port_id' self.client.assign_port(client_id, port_id) self._assert_storage_services_method_called('AssignPort') def test_set_server_port_properties(self): port_id = 'port_id' port_properties = mock.MagicMock() self.client.set_server_port_properties(port_id, port_properties) self._assert_storage_services_method_called('SetServerPortProperties') def test_build_access_token(self): initiator_node_name = 'initiator' initiator_username = 'initiator_username' initiator_password = 'initiator_password' mutual_authentication = True target_username = 'target_username' target_password = 'target_password' access_token = self.client.build_access_token( initiator_node_name, initiator_username, initiator_password, mutual_authentication, target_username, target_password) self.assertEqual(initiator_node_name, access_token.InitiatorNodeName) self.assertEqual(initiator_username, access_token.InitiatorUsername) self.assertEqual(initiator_password, access_token.InitiatorPassword) self.assertEqual(mutual_authentication, access_token.MutualAuthentication) self.assertEqual(target_username, access_token.TargetUsername) self.assertEqual(target_password, access_token.TargetPassword) def test_set_access_token(self): port_id = 'port_id' access_token = self.client.build_access_token( 'initiator_name', None, None, False, 'initiator_name', 'password') self.client.set_access_token(port_id, access_token) self._assert_storage_services_method_called('SetAccessToken') def test_get_clients(self): self.client.get_clients() self._assert_storage_services_method_called('GetClients') def test_register_client(self): host_name = 'name' description = 'description' machine_type = 'Other' mode = 'PreferredServer' preferred_server_ids = None self.client.register_client( host_name, description, machine_type, mode, preferred_server_ids) self._assert_storage_services_method_called('RegisterClient') def test_set_client_capabilities(self): client_id = 'client_id' mpio = True alua = True self.client.set_client_capabilities(client_id, mpio, alua) self._assert_storage_services_method_called('SetClientCapabilities') def test_get_target_domains(self): self.client.get_target_domains() self._assert_storage_services_method_called('GetTargetDomains') def test_create_target_domain(self): initiator_host_id = 'host_id' target_host_id = 'host_id' self.client.create_target_domain(initiator_host_id, target_host_id) self._assert_storage_services_method_called('CreateTargetDomain') def test_delete_target_domain(self): domain_id = 'domain_id' self.client.delete_target_domain(domain_id) self._assert_storage_services_method_called('DeleteTargetDomain') def test_get_target_devices(self): self.client.get_target_devices() self._assert_storage_services_method_called('GetTargetDevices') def test_build_scsi_port_nexus_data(self): initiator_id = 'initiator_id' target_id = 'target_id' nexus = self.client.build_scsi_port_nexus_data(initiator_id, target_id) self.assertEqual(initiator_id, nexus.InitiatorPortId) self.assertEqual(target_id, nexus.TargetPortId) def test_create_target_device(self): domain_id = 'domain_id' nexus = self.client.build_scsi_port_nexus_data('initiator_id', 'target_id') self.client.create_target_device(domain_id, nexus) self._assert_storage_services_method_called('CreateTargetDevice') def test_delete_target_device(self): device_id = 'device_id' self.client.delete_target_device(device_id) self._assert_storage_services_method_called('DeleteTargetDevice') def test_get_next_free_lun(self): device_id = 'device_id' self.client.get_next_free_lun(device_id) self._assert_storage_services_method_called('GetNextFreeLun') def test_get_logical_units(self): self.client.get_logical_units() self._assert_storage_services_method_called('GetLogicalUnits') def test_map_logical_disk(self): disk_id = 'disk_id' lun = 0 host_id = 'host_id' mapping_type = 'Client' initiator_id = 'initiator_id' target_id = 'target_id' nexus = self.client.build_scsi_port_nexus_data(initiator_id, target_id) self.client.map_logical_disk( disk_id, nexus, lun, host_id, mapping_type) self._assert_storage_services_method_called('MapLogicalDisk') def test_unmap_logical_disk(self): logical_disk_id = 'disk_id' nexus = self.client.build_scsi_port_nexus_data('initiator_id', 'target_id') self.client.unmap_logical_disk(logical_disk_id, nexus) self._assert_storage_services_method_called('UnmapLogicalDisk') FAKE_WSDL_DOCUMENT = """ ws://mns-vsp-001:3794/IExecutiveServiceEx """ class FaultDefinitionsFilterTestCase(test.TestCase): """Tests for the plugin to process the DataCore API WSDL document.""" @staticmethod def _binding_operation_has_fault(document, operation_name): for binding in document.getChildren('binding', wsdl.wsdlns): for operation in binding.getChildren('operation', wsdl.wsdlns): if operation.get('name') == operation_name: fault = operation.getChildren('fault', wsdl.wsdlns) if fault: return True return False @staticmethod def _port_type_operation_has_fault(document, operation_name): for port_type in document.getChildren('portType', wsdl.wsdlns): for operation in port_type.getChildren('operation', wsdl.wsdlns): if operation.get('name') == operation_name: fault = operation.getChildren('fault', wsdl.wsdlns) if fault: return True return False def _operation_has_fault(self, document, operation_name): _binding_has_fault = self._binding_operation_has_fault( document, operation_name) _port_type_has_fault = self._port_type_operation_has_fault( document, operation_name) self.assertEqual(_binding_has_fault, _port_type_has_fault) return _binding_has_fault def test_parsed(self): context = mock.Mock() sax = parser.Parser() wsdl_document = FAKE_WSDL_DOCUMENT if isinstance(wsdl_document, str): wsdl_document = wsdl_document.encode('utf-8') context.document = sax.parse(string=wsdl_document).root() self.assertTrue(self._operation_has_fault(context.document, 'StartExecutive')) self.assertTrue(self._operation_has_fault(context.document, 'StopExecutive')) self.assertTrue(self._operation_has_fault(context.document, 'ExecutiveStarted')) self.assertTrue(self._operation_has_fault(context.document, 'ExecutiveStopped')) plugin = api.FaultDefinitionsFilter() plugin.parsed(context) self.assertTrue(self._operation_has_fault(context.document, 'StartExecutive')) self.assertTrue(self._operation_has_fault(context.document, 'StopExecutive')) self.assertFalse(self._operation_has_fault(context.document, 'ExecutiveStarted')) self.assertFalse(self._operation_has_fault(context.document, 'ExecutiveStopped')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/datacore/test_datacore_driver.py0000664000175000017500000007652600000000000030011 0ustar00zuulzuul00000000000000# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the base Driver for DataCore SANsymphony storage array.""" import abc import math from unittest import mock from oslo_utils import units from cinder import context from cinder import exception as cinder_exception from cinder.tests.unit import fake_constants from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import utils as testutils from cinder.volume import configuration as conf from cinder.volume.drivers.datacore import driver as datacore_driver from cinder.volume.drivers.datacore import exception as datacore_exception from cinder.volume.drivers.san import san SERVER_GROUPS = [ mock.Mock(Id='server_group_id1', OurGroup=True), mock.Mock(Id='server_group_id2', OurGroup=False), ] SERVERS = [ mock.Mock(Id='server_id1', State='Online'), mock.Mock(Id='server_id2', State='Online'), ] DISK_POOLS = [ mock.Mock(Id='disk_pool_id1', Caption='disk_pool1', ServerId='server_id1', PoolStatus='Running'), mock.Mock(Id='disk_pool_id2', Caption='disk_pool2', ServerId='server_id2', PoolStatus='Running'), mock.Mock(Id='disk_pool_id3', Caption='disk_pool3', ServerId='server_id1', PoolStatus='Offline'), mock.Mock(Id='disk_pool_id4', Caption='disk_pool4', ServerId='server_id2', PoolStatus='Unknown'), ] DISK_POOL_PERFORMANCE = [ mock.Mock(ObjectId='disk_pool_id1', PerformanceData=mock.Mock(BytesTotal=5 * units.Gi, BytesAllocated=2 * units.Gi, BytesAvailable=3 * units.Gi, BytesReserved=0)), mock.Mock(ObjectId='disk_pool_id2', PerformanceData=mock.Mock(BytesTotal=5 * units.Gi, BytesAllocated=3 * units.Gi, BytesAvailable=1 * units.Gi, BytesReserved=1 * units.Gi)), mock.Mock(ObjectId='disk_pool_id3', PerformanceData=None), mock.Mock(ObjectId='disk_pool_id4', PerformanceData=None), ] STORAGE_PROFILES = [ mock.Mock(Id='storage_profile_id1', Caption='storage_profile1'), mock.Mock(Id='storage_profile_id2', Caption='storage_profile2'), mock.Mock(Id='storage_profile_id3', Caption='storage_profile3'), ] VIRTUAL_DISKS = [ mock.Mock(Id='virtual_disk_id1', DiskStatus='Online', IsServed=False, Alias='virtual_disk_id1', Size=mock.Mock(Value=1 * units.Gi), FirstHostId='server_id1'), mock.Mock(Id='virtual_disk_id2', DiskStatus='Failed', IsServed=False, Alias='virtual_disk_id2', Size=mock.Mock(Value=1 * units.Gi), FirstHostId='server_id2'), mock.Mock(Id='virtual_disk_id3', DiskStatus='Online', IsServed=True, Alias='virtual_disk_id3', Size=mock.Mock(Value=1 * units.Gi), FirstHostId='server_id1', SecondHostId='server_id2'), mock.Mock(Id='virtual_disk_id4', DiskStatus='Failed', IsServed=False, Alias='virtual_disk_id4', Size=mock.Mock(Value=1 * units.Gi), FirstHostId='server_id1', SecondHostId='server_id2'), ] EXT_VIRTUAL_DISKS = [ mock.Mock(Id='virtual_disk_id1', DiskStatus='Online', IsServed=False, Alias='virtual_disk_id1', Size=mock.Mock(Value=2 * units.Gi), FirstHostId='server_id1'), ] VIRTUAL_DISK_SNAPSHOTS = [ mock.Mock(Id='snapshot_id1', State='Migrated', Failure='NoFailure', DestinationLogicalDiskId='logical_disk_id1'), mock.Mock(Id='snapshot_id2', State='Failed', Failure='NotAccessible', DestinationLogicalDiskId='logical_disk_id2'), mock.Mock(Id='snapshot_id3', State='Migrated', Failure='NoFailure', DestinationLogicalDiskId='logical_disk_id2'), ] LOGICAL_DISKS = [ mock.Mock(Id='logical_disk_id1', VirtualDiskId='virtual_disk_id1', ServerHostId='server_id1', PoolId='disk_pool_id1', Size=mock.Mock(Value=1 * units.Gi)), mock.Mock(Id='logical_disk_id2', VirtualDiskId='virtual_disk_id2', ServerHostId='server_id1', PoolId='disk_pool_id3', Size=mock.Mock(Value=1 * units.Gi)), mock.Mock(Id='logical_disk_id3', VirtualDiskId='virtual_disk_id3', ServerHostId='server_id1', PoolId='disk_pool_id1', Size=mock.Mock(Value=1 * units.Gi)), mock.Mock(Id='logical_disk_id4', VirtualDiskId='virtual_disk_id3', ServerHostId='server_id2', PoolId='disk_pool_id2', Size=mock.Mock(Value=1 * units.Gi)), mock.Mock(Id='logical_disk_id5', VirtualDiskId='virtual_disk_id4', ServerHostId='server_id1', PoolId='disk_pool_id3', Size=mock.Mock(Value=1 * units.Gi)), mock.Mock(Id='logical_disk_id6', VirtualDiskId='virtual_disk_id4', ServerHostId='server_id2', PoolId='disk_pool_id4', Size=mock.Mock(Value=1 * units.Gi)), ] LOGICAL_UNITS = [ mock.Mock(VirtualTargetDeviceId='target_device_id1', LogicalDiskId='logical_disk_id3'), mock.Mock(VirtualTargetDeviceId='target_device_id2', LogicalDiskId='logical_disk_id4'), ] TARGET_DEVICES = [ mock.Mock(Id='target_device_id1', InitiatorPortId='initiator_port_id1'), mock.Mock(Id='target_device_id2', InitiatorPortId='initiator_port_id1'), ] CLIENTS = [ mock.Mock(Id='client_id1', HostName='client_host_name1'), mock.Mock(Id='client_id2', HostName='client_host_name2'), ] class DataCoreVolumeDriverTestCase(object): """Tests for the base Driver for DataCore SANsymphony storage array.""" def setUp(self): super(DataCoreVolumeDriverTestCase, self).setUp() self.mock_client = mock.Mock() self.mock_client.get_servers.return_value = SERVERS self.mock_client.get_disk_pools.return_value = DISK_POOLS (self.mock_client.get_performance_by_type .return_value) = DISK_POOL_PERFORMANCE self.mock_client.get_virtual_disks.return_value = VIRTUAL_DISKS self.mock_client.get_storage_profiles.return_value = STORAGE_PROFILES self.mock_client.get_snapshots.return_value = VIRTUAL_DISK_SNAPSHOTS self.mock_client.get_logical_disks.return_value = LOGICAL_DISKS self.mock_client.get_clients.return_value = CLIENTS self.mock_client.get_server_groups.return_value = SERVER_GROUPS self.mock_object(datacore_driver.api, 'DataCoreClient', return_value=self.mock_client) self.context = context.get_admin_context() self.volume_a = fake_volume.fake_volume_obj( self.context, **{'name': u'volume_1', 'volume_type_id': None, 'id': fake_constants.VOLUME_ID, 'size': 1}) self.volume_ext = fake_volume.fake_volume_obj( self.context, **{'name': u'volume_1', 'volume_type_id': None, 'id': fake_constants.VOLUME2_ID, 'size': 2}) self.snapshot_a = fake_snapshot.fake_snapshot_obj( self.context, **{'name': u'snapshot_1', 'id': fake_constants.SNAPSHOT_ID, 'size': 1}) @staticmethod @abc.abstractmethod def init_driver(config): raise NotImplementedError() @staticmethod def create_configuration(): config = conf.Configuration(None) config.append_config_values(san.san_opts) config.append_config_values(datacore_driver.datacore_opts) return config def setup_default_configuration(self): config = self.create_configuration() config.volume_backend_name = 'DataCore' config.san_ip = '127.0.0.1' config.san_login = 'dcsadmin' config.san_password = 'password' config.datacore_api_timeout = 0 config.datacore_disk_failed_delay = 0 return config def test_do_setup(self): config = self.setup_default_configuration() self.init_driver(config) def test_do_setup_failed(self): config = self.setup_default_configuration() config.san_ip = None self.assertRaises(cinder_exception.InvalidInput, self.init_driver, config) config = self.setup_default_configuration() config.san_login = None self.assertRaises(cinder_exception.InvalidInput, self.init_driver, config) config = self.setup_default_configuration() config.san_password = None self.assertRaises(cinder_exception.InvalidInput, self.init_driver, config) def test_get_volume_stats(self): aggregation = [(getattr(perf.PerformanceData, 'BytesTotal', 0), getattr(perf.PerformanceData, 'BytesAvailable', 0), getattr(perf.PerformanceData, 'BytesReserved', 0),) for perf in DISK_POOL_PERFORMANCE] total, available, reserved = map(sum, zip(*aggregation)) free = (available + reserved) / units.Gi reserved = 100.0 * reserved / total reserved = math.ceil(reserved) total /= units.Gi provisioned = sum(disk.Size.Value for disk in LOGICAL_DISKS) provisioned /= units.Gi ratio = 2.0 config = self.setup_default_configuration() config.max_over_subscription_ratio = ratio driver = self.init_driver(config) expected_volume_stats = { 'vendor_name': 'DataCore', 'QoS_support': False, 'total_capacity_gb': total, 'free_capacity_gb': free, 'provisioned_capacity_gb': provisioned, 'reserved_percentage': reserved, 'max_over_subscription_ratio': ratio, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'online_extend_support': False, 'volume_backend_name': driver.get_volume_backend_name(), 'driver_version': driver.get_version(), 'storage_protocol': driver.STORAGE_PROTOCOL, } volume_stats = driver.get_volume_stats(refresh=True) self.assertDictEqual(expected_volume_stats, volume_stats) volume_stats_cached = driver.get_volume_stats(refresh=False) self.assertEqual(volume_stats, volume_stats_cached) def test_create_volume(self): virtual_disk = VIRTUAL_DISKS[0] self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a result = driver.create_volume(volume) self.assertIn('provider_location', result) self.assertEqual(virtual_disk.Id, result['provider_location']) def test_create_volume_mirrored_disk_type_specified(self): virtual_disk = VIRTUAL_DISKS[2] self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk config = self.setup_default_configuration() config.datacore_disk_type = 'mirrored' driver = self.init_driver(config) volume = self.volume_a result = driver.create_volume(volume) self.assertIn('provider_location', result) self.assertEqual(virtual_disk.Id, result['provider_location']) driver = self.init_driver(self.setup_default_configuration()) volume_type = { 'extra_specs': {driver.DATACORE_DISK_TYPE_KEY: 'mirrored'} } get_volume_type = self.mock_object(datacore_driver.volume_types, 'get_volume_type') get_volume_type.return_value = volume_type volume = self.volume_a volume['volume_type_id'] = fake_constants.VOLUME_TYPE_ID result = driver.create_volume(volume) self.assertIn('provider_location', result) self.assertEqual(virtual_disk.Id, result['provider_location']) def test_create_volume_profile_specified(self): virtual_disk = VIRTUAL_DISKS[0] self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk config = self.setup_default_configuration() config.datacore_storage_profile = 'storage_profile1' driver = self.init_driver(config) volume = self.volume_a result = driver.create_volume(volume) self.assertIn('provider_location', result) self.assertEqual(virtual_disk.Id, result['provider_location']) volume_type = { 'extra_specs': { driver.DATACORE_STORAGE_PROFILE_KEY: 'storage_profile2' } } get_volume_type = self.mock_object(datacore_driver.volume_types, 'get_volume_type') get_volume_type.return_value = volume_type volume = self.volume_a volume['volume_type_id'] = fake_constants.VOLUME_TYPE_ID result = driver.create_volume(volume) self.assertIn('provider_location', result) self.assertEqual(virtual_disk.Id, result['provider_location']) def test_create_volume_pool_specified(self): virtual_disk = VIRTUAL_DISKS[0] self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk config = self.setup_default_configuration() config.datacore_disk_pools = ['disk_pool1'] driver = self.init_driver(config) volume = self.volume_a result = driver.create_volume(volume) self.assertIn('provider_location', result) self.assertEqual(virtual_disk.Id, result['provider_location']) volume_type = { 'extra_specs': {driver.DATACORE_DISK_POOLS_KEY: 'disk_pool2'} } get_volume_type = self.mock_object(datacore_driver.volume_types, 'get_volume_type') get_volume_type.return_value = volume_type volume = self.volume_a volume['volume_type_id'] = fake_constants.VOLUME_TYPE_ID result = driver.create_volume(volume) self.assertIn('provider_location', result) self.assertEqual(virtual_disk.Id, result['provider_location']) def test_create_volume_failed(self): def fail_with_datacore_fault(*args): raise datacore_exception.DataCoreFaultException( reason="General error.") (self.mock_client.create_virtual_disk_ex2 .side_effect) = fail_with_datacore_fault driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a self.assertRaises(datacore_exception.DataCoreFaultException, driver.create_volume, volume) def test_create_volume_unknown_disk_type_specified(self): config = self.setup_default_configuration() config.datacore_disk_type = 'unknown' driver = self.init_driver(config) volume = self.volume_a self.assertRaises(cinder_exception.VolumeDriverException, driver.create_volume, volume) driver = self.init_driver(self.setup_default_configuration()) volume_type = { 'extra_specs': {driver.DATACORE_DISK_TYPE_KEY: 'unknown'} } get_volume_type = self.mock_object(datacore_driver.volume_types, 'get_volume_type') get_volume_type.return_value = volume_type volume = self.volume_a volume['volume_type_id'] = fake_constants.VOLUME_TYPE_ID self.assertRaises(cinder_exception.VolumeDriverException, driver.create_volume, volume) def test_create_volume_unknown_profile_specified(self): config = self.setup_default_configuration() config.datacore_storage_profile = 'unknown' driver = self.init_driver(config) volume = self.volume_a self.assertRaises(cinder_exception.VolumeDriverException, driver.create_volume, volume) driver = self.init_driver(self.setup_default_configuration()) volume_type = { 'extra_specs': {driver.DATACORE_STORAGE_PROFILE_KEY: 'unknown'} } get_volume_type = self.mock_object(datacore_driver.volume_types, 'get_volume_type') get_volume_type.return_value = volume_type volume = self.volume_a volume['volume_type_id'] = fake_constants.VOLUME_TYPE_ID self.assertRaises(cinder_exception.VolumeDriverException, driver.create_volume, volume) def test_create_volume_on_failed_pool(self): config = self.setup_default_configuration() config.datacore_disk_pools = ['disk_pool3', 'disk_pool4'] driver = self.init_driver(config) volume = self.volume_a self.assertRaises(cinder_exception.VolumeDriverException, driver.create_volume, volume) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_volume_await_online_timed_out(self): virtual_disk = VIRTUAL_DISKS[1] self.mock_client.create_virtual_disk_ex2.return_value = virtual_disk config = self.setup_default_configuration() driver = self.init_driver(config) volume = self.volume_a self.assertRaises(cinder_exception.VolumeDriverException, driver.create_volume, volume) def test_extend_volume(self): virtual_disk = VIRTUAL_DISKS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id self.assertIsNone(driver.extend_volume(volume, 2147483648)) def test_extend_volume_failed_not_found(self): driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = 'wrong_virtual_disk_id' self.assertRaises(cinder_exception.VolumeDriverException, driver.extend_volume, volume, 2147483648) def test_delete_volume(self): virtual_disk = VIRTUAL_DISKS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id driver.delete_volume(volume) def test_delete_volume_assigned(self): self.mock_client.get_logical_disks.return_value = LOGICAL_DISKS self.mock_client.get_logical_units.return_value = LOGICAL_UNITS self.mock_client.get_target_devices.return_value = TARGET_DEVICES driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a virtual_disk = VIRTUAL_DISKS[2] volume.provider_location = virtual_disk.Id driver.delete_volume(volume) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_snapshot(self): virtual_disk = VIRTUAL_DISKS[0] virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id snapshot = self.snapshot_a snapshot.volume = volume result = driver.create_snapshot(snapshot) self.assertIn('provider_location', result) def test_create_snapshot_on_failed_pool(self): virtual_disk = VIRTUAL_DISKS[0] config = self.setup_default_configuration() config.datacore_disk_pools = ['disk_pool3', 'disk_pool4'] driver = self.init_driver(config) volume = self.volume_a volume.provider_location = virtual_disk.Id snapshot = self.snapshot_a snapshot.volume = volume self.assertRaises(cinder_exception.VolumeDriverException, driver.create_snapshot, snapshot) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_snapshot_await_migrated_timed_out(self): virtual_disk = VIRTUAL_DISKS[0] virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[1] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id snapshot = self.snapshot_a snapshot.volume = volume self.assertRaises(cinder_exception.VolumeDriverException, driver.create_snapshot, snapshot) def test_delete_snapshot(self): virtual_disk = VIRTUAL_DISKS[0] driver = self.init_driver(self.setup_default_configuration()) snapshot = self.snapshot_a snapshot.provider_location = virtual_disk.Id driver.delete_snapshot(snapshot) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_volume_from_snapshot(self): virtual_disk = VIRTUAL_DISKS[0] self.mock_client.set_virtual_disk_size.return_value = virtual_disk virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a snapshot = self.snapshot_a snapshot.provider_location = virtual_disk.Id result = driver.create_volume_from_snapshot(volume, snapshot) self.assertIn('provider_location', result) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_volume_from_snapshot_mirrored_disk_type_specified(self): virtual_disk = VIRTUAL_DISKS[0] self.mock_client.set_virtual_disk_size.return_value = virtual_disk virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot config = self.setup_default_configuration() config.datacore_disk_type = 'mirrored' driver = self.init_driver(config) volume = self.volume_a snapshot = self.snapshot_a snapshot.provider_location = virtual_disk.Id result = driver.create_volume_from_snapshot(volume, snapshot) self.assertIn('provider_location', result) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_volume_from_snapshot_on_failed_pool(self): virtual_disk = VIRTUAL_DISKS[0] self.mock_client.set_virtual_disk_size.return_value = virtual_disk virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot config = self.setup_default_configuration() config.datacore_disk_type = 'mirrored' config.datacore_disk_pools = ['disk_pool1', 'disk_pool4'] driver = self.init_driver(config) volume = self.volume_a snapshot = self.snapshot_a snapshot.provider_location = virtual_disk.Id self.assertRaises(cinder_exception.VolumeDriverException, driver.create_volume_from_snapshot, volume, snapshot) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_volume_from_snapshot_await_online_timed_out(self): virtual_disk = VIRTUAL_DISKS[0] snapshot_virtual_disk = VIRTUAL_DISKS[1] (self.mock_client.set_virtual_disk_size .return_value) = snapshot_virtual_disk virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[2] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a snapshot = self.snapshot_a snapshot.provider_location = virtual_disk.Id self.assertRaises(cinder_exception.VolumeDriverException, driver.create_volume_from_snapshot, volume, snapshot) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_cloned_volume(self): virtual_disk = VIRTUAL_DISKS[0] self.mock_client.set_virtual_disk_size.return_value = virtual_disk virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a src_vref = self.volume_a src_vref.provider_location = virtual_disk.Id result = driver.create_cloned_volume(volume, src_vref) self.assertIn('provider_location', result) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_cloned_volume_mirrored_disk_type_specified(self): virtual_disk = VIRTUAL_DISKS[0] self.mock_client.set_virtual_disk_size.return_value = virtual_disk virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot config = self.setup_default_configuration() config.datacore_disk_type = 'mirrored' driver = self.init_driver(config) volume = self.volume_a src_vref = self.volume_a src_vref.provider_location = virtual_disk.Id result = driver.create_cloned_volume(volume, src_vref) self.assertIn('provider_location', result) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_cloned_volume_on_failed_pool(self): virtual_disk = VIRTUAL_DISKS[0] self.mock_client.set_virtual_disk_size.return_value = virtual_disk virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot config = self.setup_default_configuration() config.datacore_disk_type = 'mirrored' config.datacore_disk_pools = ['disk_pool1', 'disk_pool4'] driver = self.init_driver(config) volume = self.volume_a src_vref = self.volume_a src_vref.provider_location = virtual_disk.Id self.assertRaises(cinder_exception.VolumeDriverException, driver.create_cloned_volume, volume, src_vref) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_cloned_volume_await_online_timed_out(self): virtual_disk = VIRTUAL_DISKS[0] snapshot_virtual_disk = VIRTUAL_DISKS[1] (self.mock_client.set_virtual_disk_size .return_value) = snapshot_virtual_disk virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[2] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a src_vref = self.volume_a src_vref.provider_location = virtual_disk.Id self.assertRaises(cinder_exception.VolumeDriverException, driver.create_cloned_volume, volume, src_vref) def test_terminate_connection(self): virtual_disk = VIRTUAL_DISKS[0] client = CLIENTS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id connector = {'host': client.HostName, 'wwpns': ['100000109bddf539']} driver.terminate_connection(volume, connector) def test_terminate_connection_connector_is_none(self): virtual_disk = VIRTUAL_DISKS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id driver.terminate_connection(volume, None) def test_manage_existing(self): volume = self.volume_a driver = self.init_driver(self.setup_default_configuration()) ret = driver.manage_existing( volume, self.existing_ref) self.assertEqual("virtual_disk_id1", ret['provider_location']) def test_manage_existing_get_size(self): volume = self.volume_a driver = self.init_driver(self.setup_default_configuration()) driver.manage_existing_get_size( volume, self.existing_ref) def test_manage_existing_snapshot(self): snapshot = self.snapshot_a driver = self.init_driver(self.setup_default_configuration()) ret = driver.manage_existing_snapshot( snapshot, self.existing_ref) self.assertEqual("virtual_disk_id1", ret['provider_location']) def test_manage_existing_snapshot_get_size(self): snapshot = self.snapshot_a driver = self.init_driver(self.setup_default_configuration()) driver.manage_existing_snapshot_get_size( snapshot, self.existing_ref) def test_create_extended_cloned_volume(self): virtual_disk = EXT_VIRTUAL_DISKS[0] self.mock_client.get_virtual_disks.return_value = EXT_VIRTUAL_DISKS virtual_disk_snapshot = VIRTUAL_DISK_SNAPSHOTS[0] self.mock_client.create_snapshot.return_value = virtual_disk_snapshot driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_ext src_vref = self.volume_a src_vref.provider_location = virtual_disk.Id result = driver.create_cloned_volume(volume, src_vref) self.assertIn('provider_location', result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/datacore/test_datacore_fc.py0000664000175000017500000002757600000000000027107 0ustar00zuulzuul00000000000000# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Fibre Channel Driver for DataCore SANsymphony storage array. """ from unittest import mock from cinder import exception as cinder_exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver from cinder.volume.drivers.datacore import fc PORTS = [ mock.Mock(Id='initiator_port_id1', PortType='FibreChannel', PortMode='Initiator', PortName='AA-AA-AA-AA-AA-AA-AA-AA', HostId='client_id1'), mock.Mock(Id='initiator_port_id2', PortType='FibreChannel', PortMode='Initiator', PortName='BB-BB-BB-BB-BB-BB-BB-BB'), mock.Mock(__class__=mock.Mock(__name__='ServerFcPortData'), Id='target_port_id1', PortType='FibreChannel', PortMode='Target', PortName='CC-CC-CC-CC-CC-CC-CC-CC', HostId='server_id1', PresenceStatus='Present', ServerPortProperties=mock.Mock(Role="Frontend"), StateInfo=mock.Mock(State="LoopLinkUp") ), mock.Mock(Id='target_port_id2', PortType='FibreChannel', PortMode='Target', PortName='DD-DD-DD-DD-DD-DD-DD-DD', HostId='server_id1', PresenceStatus='Present', ServerPortProperties=mock.Mock(Role="Frontend"), StateInfo=mock.Mock(State="LoopLinkUp")), ] LOGICAL_UNITS = [ mock.Mock(VirtualTargetDeviceId='target_device_id1', Lun=mock.Mock(Quad=4)), mock.Mock(VirtualTargetDeviceId='target_device_id2', Lun=mock.Mock(Quad=3)), mock.Mock(VirtualTargetDeviceId='target_device_id3', Lun=mock.Mock(Quad=2)), mock.Mock(VirtualTargetDeviceId='target_device_id4', Lun=mock.Mock(Quad=1)), ] TARGET_DEVICES = [ mock.Mock(Id='target_device_id1', TargetPortId='target_port_id1', InitiatorPortId='initiator_port_id1'), mock.Mock(Id='target_device_id2', TargetPortId='target_port_id2', InitiatorPortId='initiator_port_id1'), mock.Mock(Id='target_device_id3', TargetPortId='target_port_id2', InitiatorPortId='initiator_port_id1'), mock.Mock(Id='target_device_id4', TargetPortId='target_port_id2', InitiatorPortId='initiator_port_id2'), ] class FibreChannelVolumeDriverTestCase( test_datacore_driver.DataCoreVolumeDriverTestCase, test.TestCase): """Tests for the FC Driver for DataCore SANsymphony storage array.""" existing_ref = { 'source-name': 'virtual_disk_id1'} def setUp(self): super(FibreChannelVolumeDriverTestCase, self).setUp() self.mock_client.get_ports.return_value = PORTS (self.mock_client.build_scsi_port_nexus_data .side_effect) = self._build_nexus_data self.mock_client.map_logical_disk.side_effect = self._map_logical_disk @staticmethod def _build_nexus_data(initiator_port_id, target_port_id): return mock.Mock(InitiatorPortId=initiator_port_id, TargetPortId=target_port_id) @staticmethod def _map_logical_disk(logical_disk_id, nexus, *args): target_device_id = next(( device.Id for device in TARGET_DEVICES if device.TargetPortId == nexus.TargetPortId and device.InitiatorPortId == nexus.InitiatorPortId), None) return next(unit for unit in LOGICAL_UNITS if unit.VirtualTargetDeviceId == target_device_id) @staticmethod def init_driver(config): driver = fc.FibreChannelVolumeDriver(configuration=config) driver.do_setup(None) driver.AWAIT_DISK_ONLINE_INTERVAL = 0 driver.AWAIT_SNAPSHOT_ONLINE_INTERVAL = 0 driver.AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY = 0 return driver def test_validate_connector(self): driver = self.init_driver(self.setup_default_configuration()) connector = { 'host': 'host_name', 'wwpns': ['AA-AA-AA-AA-AA-AA-AA-AA'], } driver.validate_connector(connector) def test_validate_connector_failed(self): driver = self.init_driver(self.setup_default_configuration()) connector = {} self.assertRaises(cinder_exception.InvalidConnectorException, driver.validate_connector, connector) connector = {'host': 'host_name'} self.assertRaises(cinder_exception.InvalidConnectorException, driver.validate_connector, connector) connector = {'wwpns': ['AA-AA-AA-AA-AA-AA-AA-AA']} self.assertRaises(cinder_exception.InvalidConnectorException, driver.validate_connector, connector) def test_initialize_connection(self): self.mock_client.get_logical_units.return_value = [] self.mock_client.get_target_domains.return_value = [] self.mock_client.get_target_devices.return_value = TARGET_DEVICES virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_wwpns = [port.PortName.replace('-', '').lower() for port in PORTS if port.PortMode == 'Initiator'] connector = { 'host': client.HostName, 'wwpns': initiator_wwpns, } result = driver.initialize_connection(volume, connector) self.assertEqual('fibre_channel', result['driver_volume_type']) target_wwns = [port.PortName.replace('-', '').lower() for port in PORTS if port.PortMode == 'Target'] self.assertIn(result['data']['target_wwn'][0], target_wwns[0]) target_wwn = result['data']['target_wwn'][0] target_port_id = next(( port.Id for port in PORTS if port.PortName.replace('-', '').lower() == target_wwn), None) target_device_id = next(( device.Id for device in TARGET_DEVICES if device.TargetPortId == target_port_id), None) target_lun = next(( unit.Lun.Quad for unit in LOGICAL_UNITS if unit.VirtualTargetDeviceId == target_device_id), None) self.assertEqual(target_lun, result['data']['target_lun']) self.assertFalse(result['data']['target_discovered']) self.assertEqual(volume.id, result['data']['volume_id']) self.assertEqual('rw', result['data']['access_mode']) def test_initialize_connection_unknown_client(self): client = test_datacore_driver.CLIENTS[0] self.mock_client.register_client.return_value = client (self.mock_client.get_clients .return_value) = test_datacore_driver.CLIENTS[1:] self.mock_client.get_logical_units.return_value = [] self.mock_client.get_target_domains.return_value = [] self.mock_client.get_target_devices.return_value = TARGET_DEVICES virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_wwpns = [port.PortName.replace('-', '').lower() for port in PORTS if port.PortMode == 'Initiator'] connector = { 'host': client.HostName, 'wwpns': initiator_wwpns, } result = driver.initialize_connection(volume, connector) self.assertEqual('fibre_channel', result['driver_volume_type']) target_wwns = [port.PortName.replace('-', '').lower() for port in PORTS if port.PortMode == 'Target'] self.assertIn(result['data']['target_wwn'][0], target_wwns[0]) target_wwn = result['data']['target_wwn'][0] target_port_id = next(( port.Id for port in PORTS if port.PortName.replace('-', '').lower() == target_wwn), None) target_device_id = next(( device.Id for device in TARGET_DEVICES if device.TargetPortId == target_port_id), None) target_lun = next(( unit.Lun.Quad for unit in LOGICAL_UNITS if unit.VirtualTargetDeviceId == target_device_id), None) self.assertEqual(target_lun, result['data']['target_lun']) self.assertFalse(result['data']['target_discovered']) self.assertEqual(volume.id, result['data']['volume_id']) self.assertEqual('rw', result['data']['access_mode']) def test_initialize_connection_failed_not_found(self): client = test_datacore_driver.CLIENTS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = 'wrong_virtual_disk_id' initiator_wwpns = [port.PortName.replace('-', '').lower() for port in PORTS if port.PortMode == 'Initiator'] connector = { 'host': client.HostName, 'wwpns': initiator_wwpns, } self.assertRaises(cinder_exception.VolumeDriverException, driver.initialize_connection, volume, connector) def test_initialize_connection_failed_initiator_not_found(self): self.mock_client.get_logical_units.return_value = [] self.mock_client.get_target_domains.return_value = [] self.mock_client.get_target_devices.return_value = TARGET_DEVICES virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id connector = { 'host': client.HostName, 'wwpns': ['0000000000000000'], } self.assertRaises(cinder_exception.VolumeDriverException, driver.initialize_connection, volume, connector) def test_initialize_connection_failed_on_serve(self): virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] config = self.setup_default_configuration() driver = self.init_driver(config) volume = self.volume_a volume.provider_location = virtual_disk.Id config.datacore_fc_unallowed_targets = [ port.PortName for port in PORTS if port.PortMode == 'Target' ] initiator_wwpns = [port.PortName.replace('-', '').lower() for port in PORTS if port.PortMode == 'Initiator'] connector = { 'host': client.HostName, 'wwpns': initiator_wwpns, } self.assertRaises(cinder_exception.VolumeDriverException, driver.initialize_connection, volume, connector) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/datacore/test_datacore_iscsi.py0000664000175000017500000006055400000000000027622 0ustar00zuulzuul00000000000000# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the iSCSI Driver for DataCore SANsymphony storage array.""" from unittest import mock from cinder import exception as cinder_exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.datacore import test_datacore_driver from cinder.tests.unit.volume.drivers.datacore import test_datacore_passwd from cinder.volume.drivers.datacore import exception as datacore_exception from cinder.volume.drivers.datacore import iscsi ISCSI_PORT_STATE_INFO_READY = mock.Mock( PortalsState=mock.Mock( PortalStateInfo=[mock.Mock(State='Ready')] ) ) ISCSI_PORT_CONFIG_INFO = mock.Mock( PortalsConfig=mock.Mock( iScsiPortalConfigInfo=[mock.Mock( Address=mock.Mock(Address='127.0.0.1'), TcpPort='3260')] ) ) PORTS = [ mock.Mock(Id='initiator_port_id1', PortType='iSCSI', PortMode='Initiator', PortName='iqn.1993-08.org.debian:1:1', HostId='client_id1'), mock.Mock(Id='initiator_port_id2', PortType='iSCSI', PortMode='Initiator', PortName='iqn.1993-08.org.debian:1:2'), mock.Mock(__class__=mock.Mock(__name__='ServeriScsiPortData'), Id='target_port_id1', PortType='iSCSI', PortMode='Target', PortName='iqn.2000-08.com.datacore:server-1-1', HostId='server_id1', PresenceStatus='Present', ServerPortProperties=mock.Mock(Role="Frontend", Authentication='None'), IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY, PortConfigInfo=ISCSI_PORT_CONFIG_INFO), mock.Mock(Id='target_port_id2', PortType='iSCSI', PortMode='Target', PortName='iqn.2000-08.com.datacore:server-1-2', HostId='server_id1', PresenceStatus='Present', ServerPortProperties=mock.Mock(Role="Frontend", Authentication='None'), IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY, PortConfigInfo=ISCSI_PORT_CONFIG_INFO), ] LOGICAL_UNITS = [ mock.Mock(VirtualTargetDeviceId='target_device_id1', Lun=mock.Mock(Quad=4)), mock.Mock(VirtualTargetDeviceId='target_device_id2', Lun=mock.Mock(Quad=3)), mock.Mock(VirtualTargetDeviceId='target_device_id3', Lun=mock.Mock(Quad=2)), mock.Mock(VirtualTargetDeviceId='target_device_id4', Lun=mock.Mock(Quad=1)), ] TARGET_DEVICES = [ mock.Mock(Id='target_device_id1', TargetPortId='target_port_id1', InitiatorPortId='initiator_port_id1'), mock.Mock(Id='target_device_id2', TargetPortId='target_port_id2', InitiatorPortId='initiator_port_id1'), mock.Mock(Id='target_device_id3', TargetPortId='target_port_id2', InitiatorPortId='initiator_port_id1'), mock.Mock(Id='target_device_id4', TargetPortId='target_port_id2', InitiatorPortId='initiator_port_id2'), ] class ISCSIVolumeDriverTestCase( test_datacore_driver.DataCoreVolumeDriverTestCase, test.TestCase): """Tests for the iSCSI Driver for DataCore SANsymphony storage array.""" existing_ref = { 'source-name': 'virtual_disk_id1'} def setUp(self): super(ISCSIVolumeDriverTestCase, self).setUp() self.mock_client.get_ports.return_value = PORTS (self.mock_client.build_scsi_port_nexus_data .side_effect) = self._build_nexus_data self.mock_client.map_logical_disk.side_effect = self._map_logical_disk @staticmethod def _build_nexus_data(initiator_port_id, target_port_id): return mock.Mock(InitiatorPortId=initiator_port_id, TargetPortId=target_port_id) @staticmethod def _map_logical_disk(logical_disk_id, nexus, *args): target_device_id = next(( device.Id for device in TARGET_DEVICES if device.TargetPortId == nexus.TargetPortId and device.InitiatorPortId == nexus.InitiatorPortId), None) return next(unit for unit in LOGICAL_UNITS if unit.VirtualTargetDeviceId == target_device_id) @staticmethod def init_driver(config): driver = iscsi.ISCSIVolumeDriver(configuration=config) driver.do_setup(None) driver.AWAIT_DISK_ONLINE_INTERVAL = 0 driver.AWAIT_SNAPSHOT_ONLINE_INTERVAL = 0 driver.AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY = 0 return driver @staticmethod def create_configuration(): config = super(ISCSIVolumeDriverTestCase, ISCSIVolumeDriverTestCase).create_configuration() config.append_config_values(iscsi.datacore_iscsi_opts) return config def test_do_setup_failed(self): super(ISCSIVolumeDriverTestCase, self).test_do_setup_failed() config = self.setup_default_configuration() config.use_chap_auth = True config.san_ip = '' config.datacore_iscsi_chap_storage = '/var/lib/cinder/.datacore_chap' self.assertRaises(cinder_exception.InvalidInput, self.init_driver, config) def test_validate_connector(self): driver = self.init_driver(self.setup_default_configuration()) connector = { 'host': 'host_name', 'initiator': 'iqn.1993-08.org.debian:1:1', } driver.validate_connector(connector) def test_validate_connector_failed(self): driver = self.init_driver(self.setup_default_configuration()) connector = {} self.assertRaises(cinder_exception.InvalidConnectorException, driver.validate_connector, connector) connector = {'host': 'host_name'} self.assertRaises(cinder_exception.InvalidConnectorException, driver.validate_connector, connector) connector = {'initiator': 'iqn.1993-08.org.debian:1:1'} self.assertRaises(cinder_exception.InvalidConnectorException, driver.validate_connector, connector) def test_initialize_connection(self): self.mock_client.get_logical_units.return_value = [] self.mock_client.get_target_domains.return_value = [] self.mock_client.get_target_devices.return_value = TARGET_DEVICES virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_iqn = PORTS[0].PortName connector = { 'host': client.HostName, 'initiator': initiator_iqn } result = driver.initialize_connection(volume, connector) self.assertEqual('iscsi', result['driver_volume_type']) target_iqn = [port.PortName for port in PORTS if port.PortMode == 'Target'] self.assertIn(result['data']['target_iqn'], target_iqn) target_iqn = result['data']['target_iqn'] target_port = next(( port for port in PORTS if port.PortName == target_iqn), None) target_device_id = next(( device.Id for device in TARGET_DEVICES if device.TargetPortId == target_port.Id), None) target_lun = next(( unit.Lun.Quad for unit in LOGICAL_UNITS if unit.VirtualTargetDeviceId == target_device_id), None) self.assertEqual(target_lun, result['data']['target_lun']) self.assertEqual('127.0.0.1:3260', result['data']['target_portal']) self.assertFalse(result['data']['target_discovered']) self.assertEqual(volume.id, result['data']['volume_id']) self.assertEqual('rw', result['data']['access_mode']) def test_initialize_connection_unknown_client(self): client = test_datacore_driver.CLIENTS[0] self.mock_client.register_client.return_value = client (self.mock_client.get_clients .return_value) = test_datacore_driver.CLIENTS[1:] self.mock_client.get_logical_units.return_value = [] self.mock_client.get_target_domains.return_value = [] self.mock_client.get_target_devices.return_value = TARGET_DEVICES virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_iqn = PORTS[0].PortName connector = { 'host': client.HostName, 'initiator': initiator_iqn } result = driver.initialize_connection(volume, connector) self.assertEqual('iscsi', result['driver_volume_type']) target_iqn = [port.PortName for port in PORTS if port.PortMode == 'Target'] self.assertIn(result['data']['target_iqn'], target_iqn) target_iqn = result['data']['target_iqn'] target_port = next(( port for port in PORTS if port.PortName == target_iqn), None) target_device_id = next(( device.Id for device in TARGET_DEVICES if device.TargetPortId == target_port.Id), None) target_lun = next(( unit.Lun.Quad for unit in LOGICAL_UNITS if unit.VirtualTargetDeviceId == target_device_id), None) self.assertEqual(target_lun, result['data']['target_lun']) self.assertEqual('127.0.0.1:3260', result['data']['target_portal']) self.assertFalse(result['data']['target_discovered']) self.assertEqual(volume.id, result['data']['volume_id']) self.assertEqual('rw', result['data']['access_mode']) def test_initialize_connection_unknown_initiator(self): self.mock_client.register_port.return_value = PORTS[0] self.mock_client.get_ports.return_value = PORTS[1:] self.mock_client.get_logical_units.return_value = [] self.mock_client.get_target_domains.return_value = [] self.mock_client.get_target_devices.return_value = TARGET_DEVICES virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_iqn = PORTS[0].PortName connector = { 'host': client.HostName, 'initiator': initiator_iqn } result = driver.initialize_connection(volume, connector) self.assertEqual('iscsi', result['driver_volume_type']) target_iqn = [port.PortName for port in PORTS if port.PortMode == 'Target'] self.assertIn(result['data']['target_iqn'], target_iqn) target_iqn = result['data']['target_iqn'] target_port = next(( port for port in PORTS if port.PortName == target_iqn), None) target_device_id = next(( device.Id for device in TARGET_DEVICES if device.TargetPortId == target_port.Id), None) target_lun = next(( unit.Lun.Quad for unit in LOGICAL_UNITS if unit.VirtualTargetDeviceId == target_device_id), None) self.assertEqual(target_lun, result['data']['target_lun']) self.assertEqual('127.0.0.1:3260', result['data']['target_portal']) self.assertFalse(result['data']['target_discovered']) self.assertEqual(volume.id, result['data']['volume_id']) self.assertEqual('rw', result['data']['access_mode']) def test_initialize_connection_failed_not_found(self): client = test_datacore_driver.CLIENTS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = 'wrong_virtual_disk_id' initiator_iqn = PORTS[0].PortName connector = { 'host': client.HostName, 'initiator': initiator_iqn } self.assertRaises(cinder_exception.VolumeDriverException, driver.initialize_connection, volume, connector) def test_initialize_connection_failed_target_not_found(self): virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] config = self.setup_default_configuration() config.datacore_iscsi_unallowed_targets = [ port.PortName for port in PORTS if port.PortMode == 'Target' ] driver = self.init_driver(config) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_iqn = PORTS[0].PortName connector = { 'host': client.HostName, 'initiator': initiator_iqn } self.assertRaises(cinder_exception.VolumeDriverException, driver.initialize_connection, volume, connector) def test_initialize_connection_failed_on_map(self): def fail_with_datacore_fault(*args): raise datacore_exception.DataCoreFaultException( reason="General error.") (self.mock_client.map_logical_disk .side_effect) = fail_with_datacore_fault self.mock_client.get_logical_units.return_value = [] self.mock_client.get_target_domains.return_value = [] self.mock_client.get_target_devices.return_value = TARGET_DEVICES virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_iqn = PORTS[0].PortName connector = { 'host': client.HostName, 'initiator': initiator_iqn } self.assertRaises(datacore_exception.DataCoreFaultException, driver.initialize_connection, volume, connector) def test_initialize_connection_chap(self): mock_file_storage = self.mock_object(iscsi.passwd, 'FileStorage') mock_file_storage.return_value = test_datacore_passwd.FakeFileStorage() target_port = mock.Mock( Id='target_port_id1', PortType='iSCSI', PortMode='Target', PortName='iqn.2000-08.com.datacore:server-1-1', HostId='server_id1', PresenceStatus='Present', ServerPortProperties=mock.Mock(Role="Frontend", Authentication='None'), IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY, PortConfigInfo=ISCSI_PORT_CONFIG_INFO, iSCSINodes=mock.Mock(Node=[])) ports = PORTS[:2] ports.append(target_port) self.mock_client.get_ports.return_value = ports self.mock_client.get_logical_units.return_value = [] self.mock_client.get_target_domains.return_value = [] self.mock_client.get_target_devices.return_value = TARGET_DEVICES virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] config = self.setup_default_configuration() config.use_chap_auth = True config.datacore_iscsi_chap_storage = 'fake_file_path' driver = self.init_driver(config) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_iqn = PORTS[0].PortName connector = { 'host': client.HostName, 'initiator': initiator_iqn } result = driver.initialize_connection(volume, connector) self.assertEqual('iscsi', result['driver_volume_type']) target_iqn = [port.PortName for port in PORTS if port.PortMode == 'Target'] self.assertIn(result['data']['target_iqn'], target_iqn) target_iqn = result['data']['target_iqn'] target_port = next(( port for port in PORTS if port.PortName == target_iqn), None) target_device_id = next(( device.Id for device in TARGET_DEVICES if device.TargetPortId == target_port.Id), None) target_lun = next(( unit.Lun.Quad for unit in LOGICAL_UNITS if unit.VirtualTargetDeviceId == target_device_id), None) self.assertEqual(target_lun, result['data']['target_lun']) self.assertEqual('127.0.0.1:3260', result['data']['target_portal']) self.assertFalse(result['data']['target_discovered']) self.assertEqual(volume.id, result['data']['volume_id']) self.assertEqual('rw', result['data']['access_mode']) self.assertEqual('CHAP', result['data']['auth_method']) self.assertEqual(initiator_iqn, result['data']['auth_username']) self.assertIsNotNone(result['data']['auth_password']) def test_initialize_connection_chap_failed_check(self): target_port = mock.Mock( __class__=mock.Mock(__name__='ServeriScsiPortData'), Id='target_port_id2', PortType='iSCSI', PortMode='Target', PortName='iqn.2000-08.com.datacore:server-1-2', HostId='server_id1', PresenceStatus='Present', ServerPortProperties=mock.Mock(Role="Frontend", Authentication='CHAP'), IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY, PortConfigInfo=ISCSI_PORT_CONFIG_INFO) ports = PORTS[:2] ports.append(target_port) self.mock_client.get_ports.return_value = ports self.mock_client.get_target_devices.return_value = TARGET_DEVICES self.mock_client.get_logical_units.return_value = LOGICAL_UNITS self.mock_client.get_target_domains.return_value = [] virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] driver = self.init_driver(self.setup_default_configuration()) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_iqn = PORTS[0].PortName connector = { 'host': client.HostName, 'initiator': initiator_iqn } self.assertRaises(cinder_exception.VolumeDriverException, driver.initialize_connection, volume, connector) def test_initialize_connection_chap_failed_on_set_port_properties(self): def fail_with_datacore_fault(*args): raise datacore_exception.DataCoreFaultException( reason="General error.") mock_file_storage = self.mock_object(iscsi.passwd, 'FileStorage') mock_file_storage.return_value = test_datacore_passwd.FakeFileStorage() target_port = mock.Mock( __class__=mock.Mock(__name__='ServeriScsiPortData'), Id='target_port_id1', PortType='iSCSI', PortMode='Target', PortName='iqn.2000-08.com.datacore:server-1-1', HostId='server_id1', PresenceStatus='Present', ServerPortProperties=mock.Mock(Role="Frontend", Authentication='None'), IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY, PortConfigInfo=ISCSI_PORT_CONFIG_INFO, iSCSINodes=mock.Mock(Node=[])) ports = PORTS[:2] ports.append(target_port) self.mock_client.get_ports.return_value = ports (self.mock_client.set_server_port_properties .side_effect) = fail_with_datacore_fault self.mock_client.get_logical_units.return_value = [] self.mock_client.get_target_domains.return_value = [] self.mock_client.get_target_devices.return_value = TARGET_DEVICES virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] config = self.setup_default_configuration() config.use_chap_auth = True config.datacore_iscsi_chap_storage = 'fake_file_path' driver = self.init_driver(config) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_iqn = PORTS[0].PortName connector = { 'host': client.HostName, 'initiator': initiator_iqn } self.assertRaises(datacore_exception.DataCoreFaultException, driver.initialize_connection, volume, connector) def test_initialize_connection_chap_username_password(self): mock_file_storage = self.mock_object(iscsi.passwd, 'FileStorage') mock_file_storage.return_value = test_datacore_passwd.FakeFileStorage() target_port = mock.Mock( Id='target_port_id1', PortType='iSCSI', PortMode='Target', PortName='iqn.2000-08.com.datacore:server-1-1', HostId='server_id1', PresenceStatus='Present', ServerPortProperties=mock.Mock(Role="Frontend", Authentication='None'), IScsiPortStateInfo=ISCSI_PORT_STATE_INFO_READY, PortConfigInfo=ISCSI_PORT_CONFIG_INFO, iSCSINodes=mock.Mock(Node=[])) ports = PORTS[:2] ports.append(target_port) self.mock_client.get_ports.return_value = ports self.mock_client.get_logical_units.return_value = [] self.mock_client.get_target_domains.return_value = [] self.mock_client.get_target_devices.return_value = TARGET_DEVICES virtual_disk = test_datacore_driver.VIRTUAL_DISKS[0] client = test_datacore_driver.CLIENTS[0] config = self.setup_default_configuration() config.use_chap_auth = True config.chap_username = 'datacore' config.chap_password = 'datacore123456' driver = self.init_driver(config) volume = self.volume_a volume.provider_location = virtual_disk.Id initiator_iqn = PORTS[0].PortName connector = { 'host': client.HostName, 'initiator': initiator_iqn } result = driver.initialize_connection(volume, connector) self.assertEqual('iscsi', result['driver_volume_type']) target_iqn = [port.PortName for port in PORTS if port.PortMode == 'Target'] self.assertIn(result['data']['target_iqn'], target_iqn) target_iqn = result['data']['target_iqn'] target_port = next(( port for port in PORTS if port.PortName == target_iqn), None) target_device_id = next(( device.Id for device in TARGET_DEVICES if device.TargetPortId == target_port.Id), None) target_lun = next(( unit.Lun.Quad for unit in LOGICAL_UNITS if unit.VirtualTargetDeviceId == target_device_id), None) self.assertEqual(target_lun, result['data']['target_lun']) self.assertEqual('127.0.0.1:3260', result['data']['target_portal']) self.assertFalse(result['data']['target_discovered']) self.assertEqual(volume.id, result['data']['volume_id']) self.assertEqual('rw', result['data']['access_mode']) self.assertEqual('CHAP', result['data']['auth_method']) self.assertEqual('datacore', result['data']['auth_username']) self.assertEqual('datacore123456', result['data']['auth_password']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/datacore/test_datacore_passwd.py0000664000175000017500000002355300000000000030007 0ustar00zuulzuul00000000000000# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the password storage.""" import collections import io import json import os import stat from unittest import mock from cinder.tests.unit import test from cinder.volume.drivers.datacore import passwd class FakeFileStorage(object): """Mock FileStorage class.""" def __init__(self): self._storage = { 'resource1': { 'user1': 'resource1-user1', 'user2': 'resource1-user2', }, 'resource2': { 'user1': 'resource2-user1', } } def open(self): return self def load(self): return self._storage def save(self, storage): self._storage = storage def close(self): pass def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() class PasswordFileStorageTestCase(test.TestCase): """Tests for the password storage.""" def test_get_password(self): fake_file_storage = FakeFileStorage() passwords = fake_file_storage.load() resource = next(iter(passwords.keys())) user, expected = next(iter(passwords[resource].items())) self._mock_file_storage(fake_file_storage) password_storage = passwd.PasswordFileStorage('fake_file_path') result = password_storage.get_password(resource, user) self.assertEqual(expected, result) result = password_storage.get_password(resource.upper(), user) self.assertIsNone(result) def test_set_password(self): fake_file_storage = FakeFileStorage() user = 'user3' resource1 = 'resource2' password1 = 'resource2-user3' resource2 = 'resource3' password2 = 'resource3-user3' self._mock_file_storage(fake_file_storage) password_storage = passwd.PasswordFileStorage('fake_file_path') password_storage.set_password(resource1, user, password1) passwords = fake_file_storage.load() self.assertIn(resource1, passwords) self.assertIn(user, passwords[resource1]) self.assertEqual(password1, passwords[resource1][user]) password_storage.set_password(resource2, user, password2) passwords = fake_file_storage.load() self.assertIn(resource2, passwords) self.assertIn(user, passwords[resource2]) self.assertEqual(password2, passwords[resource2][user]) def test_delete_password(self): fake_file_storage = FakeFileStorage() passwords = fake_file_storage.load() resource1, resource2 = 'resource1', 'resource2' user1, res1 = next(iter(passwords[resource1].items())) user2, res2 = next(iter(passwords[resource2].items())) self._mock_file_storage(fake_file_storage) password_storage = passwd.PasswordFileStorage('fake_file_path') password_storage.delete_password(resource1, user1) passwords = fake_file_storage.load() self.assertIn(resource1, passwords) self.assertNotIn(user1, passwords[resource1]) password_storage.delete_password(resource2, user2) passwords = fake_file_storage.load() self.assertNotIn(resource2, passwords) def _mock_file_storage(self, fake_file_storage): self.mock_object(passwd, 'FileStorage', return_value=fake_file_storage) class FileStorageTestCase(test.TestCase): """Test for the file storage.""" def test_open(self): fake_file_path = 'file_storage.data' self.mock_object(passwd.os.path, 'isfile', return_value=True) self.mock_object(passwd.os.path, 'isdir', return_value=True) mock_open = self.mock_object(passwd, 'open', mock.mock_open()) file_storage = passwd.FileStorage(fake_file_path) file_storage.open() mock_open.assert_called_once_with(fake_file_path, 'r+') def test_open_not_existing(self): fake_file_path = '/fake_path/file_storage.data' fake_dir_name = os.path.dirname(fake_file_path) mock_chmod_calls = [ mock.call(fake_dir_name, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP), mock.call(fake_file_path, stat.S_IRUSR | stat.S_IWUSR) ] mock_open_calls = [ mock.call(fake_file_path, 'w'), mock.call(fake_file_path, 'r+'), ] self.mock_object(passwd.os.path, 'isfile', return_value=False) self.mock_object(passwd.os.path, 'isdir', return_value=False) mock_makedirs = self.mock_object(passwd.os, 'makedirs') mock_chmod = self.mock_object(passwd.os, 'chmod') mock_open = self.mock_object( passwd, 'open', return_value=mock.MagicMock()) file_storage = passwd.FileStorage(fake_file_path) file_storage.open() mock_makedirs.assert_called_with(fake_dir_name) mock_chmod.assert_has_calls(mock_chmod_calls, any_order=True) mock_open.assert_has_calls(mock_open_calls, any_order=True) def test_open_not_closed(self): fake_file_path = 'file_storage.data' fake_file = mock.MagicMock() mock_open_calls = [ mock.call(fake_file_path, 'r+'), mock.call(fake_file_path, 'r+'), ] self.mock_object(passwd.os.path, 'isfile', return_value=True) self.mock_object(passwd.os.path, 'isdir', return_value=True) mock_open = self.mock_object(passwd, 'open', return_value=fake_file) file_storage = passwd.FileStorage(fake_file_path) file_storage.open() file_storage.open() mock_open.assert_has_calls(mock_open_calls) fake_file.close.assert_called_once_with() def test_load(self): passwords = { 'resource1': { 'user1': 'resource1-user1', 'user2': 'resource1-user2', }, 'resource2': { 'user1': 'resource2-user1', 'user2': 'resource2-user2' } } fake_file_name = 'file_storage.data' fake_file_content = json.dumps(passwords) fake_file = self._get_fake_file(fake_file_content) fake_os_stat = self._get_fake_os_stat(1) self._mock_file_open(fake_file, fake_os_stat) file_storage = passwd.FileStorage(fake_file_name) file_storage.open() result = file_storage.load() self.assertEqual(passwords, result) def test_load_empty_file(self): fake_file_name = 'file_storage.data' fake_file = self._get_fake_file() fake_os_stat = self._get_fake_os_stat(0) self._mock_file_open(fake_file, fake_os_stat) file_storage = passwd.FileStorage(fake_file_name) file_storage.open() result = file_storage.load() expected = {} self.assertEqual(expected, result) def test_load_malformed_file(self): fake_file_name = 'file_storage.data' fake_file = self._get_fake_file('[1, 2, 3]') fake_os_stat = self._get_fake_os_stat(1) self._mock_file_open(fake_file, fake_os_stat) file_storage = passwd.FileStorage(fake_file_name) file_storage.open() self.assertRaises(ValueError, file_storage.load) def test_save(self): fake_file_name = 'file_storage.data' fake_file = self._get_fake_file('') fake_os_stat = self._get_fake_os_stat(0) self._mock_file_open(fake_file, fake_os_stat) passwords = { 'resource1': { 'user1': 'resource1-user1', 'user2': 'resource1-user2', }, 'resource2': { 'user1': 'resource2-user1', 'user2': 'resource2-user2' } } fake_file_content = json.dumps(passwords) file_storage = passwd.FileStorage(fake_file_name) file_storage.open() file_storage.save(passwords) self.assertEqual(fake_file_content, fake_file.getvalue()) def test_save_not_dictionary(self): fake_file_name = 'file_storage.data' fake_file = self._get_fake_file('') fake_os_stat = self._get_fake_os_stat(0) self._mock_file_open(fake_file, fake_os_stat) file_storage = passwd.FileStorage(fake_file_name) file_storage.open() self.assertRaises(TypeError, file_storage.save, []) def test_close(self): fake_file_name = 'file_storage.data' fake_file = mock.MagicMock() self.mock_object(passwd.os.path, 'isfile', return_value=True) self.mock_object(passwd.os.path, 'isdir', return_value=True) self.mock_object(passwd, 'open', return_value=fake_file) file_storage = passwd.FileStorage(fake_file_name) file_storage.open() file_storage.close() fake_file.close.assert_called_once_with() def _mock_file_open(self, fake_file, fake_os_stat): self.mock_object(passwd.os.path, 'isfile', return_value=True) self.mock_object(passwd.os.path, 'isdir', return_value=True) self.mock_object(passwd.os, 'stat', return_value=fake_os_stat) self.mock_object(passwd, 'open', return_value=fake_file) @staticmethod def _get_fake_file(content=None): return io.StringIO(content) @staticmethod def _get_fake_os_stat(st_size): os_stat = collections.namedtuple('fake_os_stat', ['st_size']) os_stat.st_size = st_size return os_stat ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/datacore/test_datacore_utils.py0000664000175000017500000000660300000000000027643 0ustar00zuulzuul00000000000000# Copyright (c) 2017 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for utilities and helper functions.""" from cinder.tests.unit import test from cinder.volume.drivers.datacore import utils class GenericUtilsTestCase(test.TestCase): """Tests for the generic utilities and helper functions.""" def test_build_network_address(self): ipv4_address = '127.0.0.1' ipv6_address = '::1' host_name = 'localhost' port = 3498 self.assertEqual('%s:%s' % (ipv4_address, port), utils.build_network_address(ipv4_address, port)) self.assertEqual('[%s]:%s' % (ipv6_address, port), utils.build_network_address(ipv6_address, port)) self.assertEqual('%s:%s' % (host_name, port), utils.build_network_address(host_name, port)) def test_get_first(self): disk_a = {'id': 'disk-a', 'type': 'Single', 'size': 5} disk_b = {'id': 'disk-b', 'type': 'Single', 'size': 1} disk_c = {'id': 'disk-c', 'type': 'Mirrored', 'size': 5} disk_d = {'id': 'disk-d', 'type': 'Single', 'size': 10} test_source = [disk_a, disk_b, disk_c, disk_d] first = utils.get_first(lambda item: item['id'] == 'disk-c', test_source) self.assertEqual(disk_c, first) self.assertRaises(StopIteration, utils.get_first, lambda item: item['type'] == 'Dual', test_source) def test_get_first_or_default(self): disk_a = {'id': 'disk-a', 'type': 'Single', 'size': 5} disk_b = {'id': 'disk-b', 'type': 'Single', 'size': 1} disk_c = {'id': 'disk-c', 'type': 'Mirrored', 'size': 5} disk_d = {'id': 'disk-d', 'type': 'Single', 'size': 10} test_source = [disk_a, disk_b, disk_c, disk_d] first = utils.get_first_or_default(lambda item: item['size'] == 1, test_source, None) self.assertEqual(disk_b, first) default = utils.get_first_or_default(lambda item: item['size'] == 15, test_source, None) self.assertIsNone(default) def test_get_distinct_by(self): disk_a = {'id': 'disk-a', 'type': 'Single', 'size': 5} disk_b = {'id': 'disk-b', 'type': 'Single', 'size': 1} disk_c = {'id': 'disk-c', 'type': 'Mirrored', 'size': 5} disk_d = {'id': 'disk-d', 'type': 'Single', 'size': 10} test_source = [disk_a, disk_b, disk_c, disk_d] distinct_values = utils.get_distinct_by(lambda item: item['type'], test_source) self.assertEqual([disk_a, disk_c], distinct_values) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.24712 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/0000775000175000017500000000000000000000000023205 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/__init__.py0000664000175000017500000000000000000000000025304 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2511199 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/0000775000175000017500000000000000000000000025220 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/__init__.py0000664000175000017500000001706500000000000027342 0ustar00zuulzuul00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import requests from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks from cinder.volume import configuration as conf from cinder.volume.drivers.dell_emc.powerflex import driver class CustomResponseMode(object): """A context manager to define a custom set of per-request response modes. Example: with CustomResponseMode(self, **{ 'some/api/path': RESPONSE_MODE.Valid, 'another/api/path': RESPONSE_MODE.BadStatus, 'last/api/path': MockResponse('some data', status_code=403), }): self.assertRaises(SomeException, self.driver.api_call, data) """ def __init__(self, test_instance, **kwargs): self.test_instance = test_instance self.custom_responses = kwargs self.current_responses = None def __enter__(self): self.current_responses = self.test_instance.HTTPS_MOCK_RESPONSES https_responses = copy.deepcopy( self.test_instance.HTTPS_MOCK_RESPONSES ) current_mode = self.test_instance.current_https_response_mode for call, new_mode in self.custom_responses.items(): if isinstance(new_mode, mocks.MockHTTPSResponse): https_responses[current_mode][call] = new_mode else: https_responses[current_mode][call] = \ self.test_instance.get_https_response(call, new_mode) self.test_instance.HTTPS_MOCK_RESPONSES = https_responses def __exit__(self, exc_type, exc_val, exc_tb): self.test_instance.HTTPS_MOCK_RESPONSES = self.current_responses class TestPowerFlexDriver(test.TestCase): """Base ``TestCase`` subclass for the ``PowerFlexDriver``""" RESPONSE_MODE = type(str('ResponseMode'), (object, ), dict( Valid='0', Invalid='1', BadStatus='2', ValidVariant='3', BadStatusWithDetails='4', )) __RESPONSE_MODE_NAMES = { '0': 'Valid', '1': 'Invalid', '2': 'BadStatus', '3': 'ValidVariant', '4': 'BadStatusWithDetails', } BAD_STATUS_RESPONSE = mocks.MockHTTPSResponse( { 'errorCode': 500, 'message': 'BadStatus Response Test', }, 500 ) OLD_VOLUME_NOT_FOUND_ERROR = 78 VOLUME_NOT_FOUND_ERROR = 79 HTTPS_MOCK_RESPONSES = {} __COMMON_HTTPS_MOCK_RESPONSES = { RESPONSE_MODE.Valid: { 'login': 'login_token', 'version': '3.5' }, RESPONSE_MODE.BadStatus: { 'login': mocks.MockHTTPSResponse( { 'errorCode': 403, 'message': 'Bad Login Response Test', }, 403 ), 'version': '3.5' }, } __https_response_mode = RESPONSE_MODE.Valid log = None STORAGE_POOL_ID = str('1') STORAGE_POOL_NAME = 'SP1' PROT_DOMAIN_ID = str('1') PROT_DOMAIN_NAME = 'PD1' STORAGE_POOLS = ['{}:{}'.format(PROT_DOMAIN_NAME, STORAGE_POOL_NAME)] def setUp(self): """Setup a test case environment. Creates a ``PowerFlexDriver`` instance Mocks the ``requests.get/post`` methods to return ``MockHTTPSResponse``'s instead. """ super(TestPowerFlexDriver, self).setUp() self.configuration = conf.Configuration(driver.powerflex_opts, conf.SHARED_CONF_GROUP) self._set_overrides() self.driver = mocks.PowerFlexDriver(configuration=self.configuration) self.driver.primary_client = mocks.PowerFlexClient(self.configuration) self.driver.secondary_client = mocks.PowerFlexClient( self.configuration, is_primary=False) self.driver.do_setup({}) self.mock_object(requests, 'get', self.do_request) self.mock_object(requests, 'post', self.do_request) self.driver.primary_client.do_setup() self.driver.secondary_client.do_setup() def _set_overrides(self): # Override the defaults to fake values self.override_config('san_ip', override='127.0.0.1', group=conf.SHARED_CONF_GROUP) self.override_config('powerflex_rest_server_port', override='8888', group=conf.SHARED_CONF_GROUP) self.override_config('san_login', override='test', group=conf.SHARED_CONF_GROUP) self.override_config('san_password', override='pass', group=conf.SHARED_CONF_GROUP) self.override_config('powerflex_storage_pools', override='PD1:SP1', group=conf.SHARED_CONF_GROUP) self.override_config('max_over_subscription_ratio', override=5.0, group=conf.SHARED_CONF_GROUP) self.override_config('powerflex_server_api_version', override='2.0.0', group=conf.SHARED_CONF_GROUP) def do_request(self, url, *args, **kwargs): """Do a fake GET/POST API request. Splits `url` on '/api/' to get the what API call is, then returns the value of `self.HTTPS_MOCK_RESPONSES[][]` converting to a `MockHTTPSResponse` if necessary. :raises test.TestingException: If the current mode/api_call does not exist. :returns MockHTTPSResponse: """ return self.get_https_response(url.split('/api/')[1]) def set_https_response_mode(self, mode=RESPONSE_MODE.Valid): """Set the HTTPS response mode. RESPONSE_MODE.Valid: Respond with valid data RESPONSE_MODE.Invalid: Respond with invalid data RESPONSE_MODE.BadStatus: Response with not-OK status code. RESPONSE_MODE.BadStatusWithDetails: as BadStatus but with "details". """ self.__https_response_mode = mode def get_https_response(self, api_path, mode=None): if mode is None: mode = self.__https_response_mode try: response = self.HTTPS_MOCK_RESPONSES[mode][api_path] except KeyError: try: response = self.__COMMON_HTTPS_MOCK_RESPONSES[mode][api_path] except KeyError: raise test.TestingException( 'Mock API Endpoint not implemented: [{}]{}'.format( self.__RESPONSE_MODE_NAMES[mode], api_path ) ) if not isinstance(response, mocks.MockHTTPSResponse): return mocks.MockHTTPSResponse(response, 200) return response @property def current_https_response_mode(self): return self.__https_response_mode def https_response_mode_name(self, mode): return self.__RESPONSE_MODE_NAMES[mode] def custom_response_mode(self, **kwargs): return CustomResponseMode(self, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/mocks.py0000664000175000017500000000460000000000000026706 0ustar00zuulzuul00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import requests from cinder.volume.drivers.dell_emc.powerflex import driver from cinder.volume.drivers.dell_emc.powerflex import rest_client class PowerFlexDriver(driver.PowerFlexDriver): """Mock PowerFlex Driver class. Provides some fake configuration options """ def do_setup(self, context): self.provisioning_type = ( "thin" if self.configuration.san_thin_provision else "thick" ) self.configuration.max_over_subscription_ratio = ( self.configuration.powerflex_max_over_subscription_ratio ) def local_path(self, volume): pass def reenable_replication(self, context, volume): pass def promote_replica(self, context, volume): pass def unmanage(self, volume): pass class PowerFlexClient(rest_client.RestClient): """Mock PowerFlex Rest Client class. Provides some fake configuration options """ def is_volume_creation_safe(self, _pd, _sp): return True class MockHTTPSResponse(requests.Response): """Mock HTTP Response Defines the https replies from the mocked calls to do_request() """ def __init__(self, content, status_code=200): super(MockHTTPSResponse, self).__init__() if isinstance(content, str): content = content.encode('utf-8') self._content = content self.status_code = status_code def json(self, **kwargs): if isinstance(self._content, (bytes, str)): return super(MockHTTPSResponse, self).json(**kwargs) return self._content @property def text(self): if not isinstance(self._content, (bytes, str)): return json.dumps(self._content) return super(MockHTTPSResponse, self).text ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2511199 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/0000775000175000017500000000000000000000000026516 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/query_sdc_by_id_response.json 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/query_sdc_by_id_response.js0000664000175000017500000000271200000000000034140 0ustar00zuulzuul00000000000000{ "hostOsFullType": null, "systemId": "2c4a220db6e0520f", "name": null, "mdmConnectionState": "Connected", "softwareVersionInfo": "R5_5.0.0", "peerMdmId": null, "sdtId": null, "sdcApproved": true, "sdcAgentActive": false, "mdmIpAddressesCurrent": false, "sdcIp": "192.168.10.12", "sdcIps": [ "192.168.10.12" ], "osType": "Linux", "perfProfile": "HighPerformance", "socketAllocationFailure": null, "memoryAllocationFailure": null, "versionInfo": "R5_5.0.0", "nqn": null, "maxNumPaths": null, "maxNumSysPorts": null, "sdcType": "AppSdc", "sdcGuid": "028888FA-502A-4FAC-A888-1FA3B256358C", "installedSoftwareVersionInfo": "R5_5.0.0", "kernelVersion": "5.15.179", "kernelBuildNumber": null, "sdcApprovedIps": null, "hostType": "SdcHost", "sdrId": null, "id": "01f7117d0000000b", "links": [ { "rel": "self", "href": "/api/instances/Sdc::01f7117d0000000b" }, { "rel": "/api/Sdc/relationship/Statistics", "href": "/api/instances/Sdc::01f7117d0000000b/relationships/Statistics" }, { "rel": "/api/Sdc/relationship/Volume", "href": "/api/instances/Sdc::01f7117d0000000b/relationships/Volume" }, { "rel": "/api/parent/relationship/systemId", "href": "/api/instances/System::2c4a220db6e0520f" } ] }././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/query_sdc_instances_response.json 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/query_sdc_instances_respons0000664000175000017500000000323200000000000034257 0ustar00zuulzuul00000000000000[ { "hostOsFullType": null, "systemId": "2c4a220db6e0520f", "name": null, "mdmConnectionState": "Connected", "softwareVersionInfo": "R5_5.0.0", "peerMdmId": null, "sdtId": null, "sdcApproved": true, "sdcAgentActive": false, "mdmIpAddressesCurrent": false, "sdcIp": "192.168.10.12", "sdcIps": [ "192.168.10.12" ], "osType": "Linux", "perfProfile": "HighPerformance", "socketAllocationFailure": null, "memoryAllocationFailure": null, "versionInfo": "R5_5.0.0", "nqn": null, "maxNumPaths": null, "maxNumSysPorts": null, "sdcType": "AppSdc", "sdcGuid": "028888FA-502A-4FAC-A888-1FA3B256358C", "installedSoftwareVersionInfo": "R5_5.0.0", "kernelVersion": "5.15.179", "kernelBuildNumber": null, "sdcApprovedIps": null, "hostType": "SdcHost", "sdrId": null, "id": "01f7117d0000000b", "links": [ { "rel": "self", "href": "/api/instances/Sdc::01f7117d0000000b" }, { "rel": "/api/Sdc/relationship/Statistics", "href": "/api/instances/Sdc::01f7117d0000000b/relationships/Statistics" }, { "rel": "/api/Sdc/relationship/Volume", "href": "/api/instances/Sdc::01f7117d0000000b/relationships/Volume" }, { "rel": "/api/parent/relationship/systemId", "href": "/api/instances/System::2c4a220db6e0520f" } ] } ]././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/query_sdc_volumes_response.json 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/query_sdc_volumes_response.0000664000175000017500000001044400000000000034210 0ustar00zuulzuul00000000000000[ { "managedBy": "ScaleIO", "originalExpiryTime": 0, "retentionLevels": [], "snplIdOfSourceVolume": null, "volumeReplicationState": "UnmarkedForReplication", "mappedSdcInfo": [ { "limitIops": 0, "limitBwInMbps": 0, "isDirectBufferMapping": false, "sdcId": "01f7117d0000000b", "sdcIp": "192.168.10.12", "sdcName": null, "accessMode": "ReadWrite", "nqn": null, "hostType": "SdcHost" } ], "replicationJournalVolume": false, "replicationTimeStamp": 0, "name": "yian_sdc_1", "creationTime": 1746889338, "storagePoolId": "fa85edfd00000000", "dataLayout": "MediumGranularity", "compressionMethod": "NotApplicable", "vtreeId": "2b713ee600000007", "sizeInKb": 8388608, "volumeClass": "defaultclass", "accessModeLimit": "ReadWrite", "pairIds": null, "volumeType": "ThinProvisioned", "consistencyGroupId": null, "ancestorVolumeId": null, "notGenuineSnapshot": false, "secureSnapshotExpTime": 0, "useRmcache": false, "snplIdOfAutoSnapshot": null, "lockedAutoSnapshot": false, "lockedAutoSnapshotMarkedForRemoval": false, "autoSnapshotGroupId": null, "timeStampIsAccurate": false, "nsid": 12, "id": "694a2d140000000b", "links": [ { "rel": "self", "href": "/api/instances/Volume::694a2d140000000b" }, { "rel": "/api/Volume/relationship/Statistics", "href": "/api/instances/Volume::694a2d140000000b/relationships/Statistics" }, { "rel": "/api/parent/relationship/vtreeId", "href": "/api/instances/VTree::2b713ee600000007" }, { "rel": "/api/parent/relationship/storagePoolId", "href": "/api/instances/StoragePool::fa85edfd00000000" } ] }, { "managedBy": "ScaleIO", "originalExpiryTime": 0, "retentionLevels": [], "snplIdOfSourceVolume": null, "volumeReplicationState": "UnmarkedForReplication", "mappedSdcInfo": [ { "limitIops": 0, "limitBwInMbps": 0, "isDirectBufferMapping": false, "sdcId": "01f7117d0000000b", "sdcIp": "192.168.10.12", "sdcName": null, "accessMode": "ReadWrite", "nqn": null, "hostType": "SdcHost" } ], "replicationJournalVolume": false, "replicationTimeStamp": 0, "name": "yian_sdc_0", "creationTime": 1746886943, "storagePoolId": "fa85edfd00000000", "dataLayout": "MediumGranularity", "compressionMethod": "NotApplicable", "vtreeId": "2b713ee500000002", "sizeInKb": 8388608, "volumeClass": "defaultclass", "accessModeLimit": "ReadWrite", "pairIds": null, "volumeType": "ThinProvisioned", "consistencyGroupId": null, "ancestorVolumeId": null, "notGenuineSnapshot": false, "secureSnapshotExpTime": 0, "useRmcache": false, "snplIdOfAutoSnapshot": null, "lockedAutoSnapshot": false, "lockedAutoSnapshotMarkedForRemoval": false, "autoSnapshotGroupId": null, "timeStampIsAccurate": false, "nsid": 10, "id": "694a2d1300000009", "links": [ { "rel": "self", "href": "/api/instances/Volume::694a2d1300000009" }, { "rel": "/api/Volume/relationship/Statistics", "href": "/api/instances/Volume::694a2d1300000009/relationships/Statistics" }, { "rel": "/api/parent/relationship/vtreeId", "href": "/api/instances/VTree::2b713ee500000002" }, { "rel": "/api/parent/relationship/storagePoolId", "href": "/api/instances/StoragePool::fa85edfd00000000" } ] } ]././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_attach_detach_volume.py0000664000175000017500000000264100000000000032777 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import context from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex class TestAttachDetachVolume(powerflex.TestPowerFlexDriver): def setUp(self): super(TestAttachDetachVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.fake_path = '/fake/path/vol-xx' self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.driver.connector = FakeConnector() class FakeConnector(object): def connect_volume(self, connection_properties): return {'path': '/fake/path/vol-xx'} def disconnect_volume(self, connection_properties, volume): return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_cloned_volume.py0000664000175000017500000001032100000000000033004 0ustar00zuulzuul00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import urllib.parse from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils class TestCreateClonedVolume(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.create_cloned_volume()``""" def setUp(self): """Setup a test case environment. Creates fake volume objects and sets up the required API responses. """ super(TestCreateClonedVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.src_volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.src_volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote( flex_utils.id_to_base64(self.src_volume.id) ) ) self.new_volume_extras = { 'volumeIdList': ['cloned'], 'snapshotGroupId': 'cloned_snapshot' } self.new_volume = fake_volume.fake_volume_obj( ctx, **self.new_volume_extras ) self.new_volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote( flex_utils.id_to_base64(self.new_volume.id) ) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.src_volume_name_2x_enc: self.src_volume.id, 'instances/System/action/snapshotVolumes': '{}'.format( json.dumps(self.new_volume_extras)), 'instances/Volume::cloned/action/setVolumeSize': None }, self.RESPONSE_MODE.BadStatus: { 'instances/System/action/snapshotVolumes': self.BAD_STATUS_RESPONSE, 'types/Volume/instances/getByName::' + self.src_volume['provider_id']: self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.Invalid: { 'types/Volume/instances/getByName::' + self.src_volume_name_2x_enc: None, 'instances/System/action/snapshotVolumes': mocks.MockHTTPSResponse( { 'errorCode': 400, 'message': 'Invalid Volume Snapshot Test' }, 400 ), }, } def test_bad_login(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, self.new_volume, self.src_volume) def test_invalid_source_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, self.new_volume, self.src_volume) def test_create_cloned_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.create_cloned_volume(self.new_volume, self.src_volume) def test_create_cloned_volume_larger_size(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.new_volume.size = 2 self.driver.create_cloned_volume(self.new_volume, self.src_volume) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_snapshot.py0000664000175000017500000001236300000000000032020 0ustar00zuulzuul00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import urllib.parse from cinder import context from cinder import db from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks from cinder.volume.drivers.dell_emc.powerflex import rest_client from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils class TestCreateSnapShot(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.create_snapshot()``""" def return_fake_volume(self, ctx, id): return self.fake_volume def setUp(self): """Setup a test case environment. Creates fake volume and snapshot objects and sets up the required API responses. """ super(TestCreateSnapShot, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.fake_volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.snapshot = fake_snapshot.fake_snapshot_obj( ctx, **{'volume': self.fake_volume}) self.mock_object(db.sqlalchemy.api, 'volume_get', self.return_fake_volume) snap_vol_id = self.snapshot.volume_id self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(flex_utils.id_to_base64(snap_vol_id)) ) self.snapshot_name_2x_enc = urllib.parse.quote( urllib.parse.quote(flex_utils.id_to_base64(self.snapshot.id)) ) self.snapshot_reply = json.dumps( { 'volumeIdList': ['cloned'], 'snapshotGroupId': 'cloned_snapshot' } ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: '"{}"'.format( self.snapshot.volume_id ), 'instances/System/action/snapshotVolumes': self.snapshot_reply, 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.snapshot.id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE, 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE, 'instances/System/action/snapshotVolumes': self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.BadStatusWithDetails: { 'instances/System/action/snapshotVolumes': mocks.MockHTTPSResponse( { 'errorCode': 0, 'message': 'Error with details', 'details': [ { 'rc': rest_client.TOO_MANY_SNAPS_ERROR, }, ], }, 500 ), }, self.RESPONSE_MODE.Invalid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: None, 'instances/System/action/snapshotVolumes': mocks.MockHTTPSResponse( { 'errorCode': 400, 'message': 'Invalid Volume Snapshot Test' }, 400 ), }, } def test_bad_login(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_snapshot, self.snapshot ) def test_invalid_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_snapshot, self.snapshot ) def test_create_snapshot(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.create_snapshot(self.snapshot) def test_create_snapshot_limit_reached(self): self.set_https_response_mode( self.RESPONSE_MODE.BadStatusWithDetails) self.assertRaises( exception.SnapshotLimitReached, self.driver.create_snapshot, self.snapshot ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_volume.py0000664000175000017500000001156300000000000031471 0ustar00zuulzuul00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import requests.exceptions from cinder import context from cinder import exception from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex @ddt.ddt class TestCreateVolume(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.create_volume()``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestCreateVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj(ctx) host = 'host@backend#{}:{}'.format( self.PROT_DOMAIN_NAME, self.STORAGE_POOL_NAME) self.volume.host = host self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume.name: '"{}"'.format(self.volume.id), 'types/Volume/instances': {'id': self.volume.id}, 'types/Domain/instances/getByName::' + self.PROT_DOMAIN_NAME: '"{}"'.format(self.PROT_DOMAIN_ID), 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME ): '"{}"'.format(self.STORAGE_POOL_ID), 'instances/ProtectionDomain::{}'.format( self.PROT_DOMAIN_ID ): {'id': self.PROT_DOMAIN_ID}, 'instances/StoragePool::{}'.format( self.STORAGE_POOL_ID ): {'id': self.STORAGE_POOL_ID}, }, self.RESPONSE_MODE.Invalid: { 'types/Domain/instances/getByName::' + self.PROT_DOMAIN_NAME: None, 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME ): None, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances': self.BAD_STATUS_RESPONSE, 'types/Domain/instances/getByName::' + self.PROT_DOMAIN_NAME: self.BAD_STATUS_RESPONSE, 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME ): self.BAD_STATUS_RESPONSE, }, } def test_create_volume(self): """Valid create volume parameters""" self.driver.create_volume(self.volume) def test_create_volume_non_8_gran(self): self.volume.size = 14 model_update = self.driver.create_volume(self.volume) self.assertEqual(16, model_update['size']) def test_create_volume_badstatus_response(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume) @ddt.data({'provisioning:type': 'thin'}, {'provisioning:type': 'thick'}) def test_create_thin_thick_volume(self, extraspecs): self.driver._get_volumetype_extraspecs = mock.MagicMock() self.driver._get_volumetype_extraspecs.return_value = extraspecs self.driver.create_volume(self.volume) def test_create_volume_bad_provisioning_type(self): extraspecs = {'provisioning:type': 'other'} self.driver._get_volumetype_extraspecs = mock.MagicMock() self.driver._get_volumetype_extraspecs.return_value = extraspecs self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume) @mock.patch("requests.post") def test_volume_post_connect_timeout_request(self, mock_request): mock_request.side_effect = requests.exceptions.ConnectTimeout() self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume) @mock.patch("requests.post") def test_volume_post_read_timeout_request(self, mock_request): mock_request.side_effect = requests.exceptions.ReadTimeout() self.assertRaises(exception.VolumeBackendAPIException, self.test_create_volume) ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_volume_from_snapshot.py 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_volume_from_snapshot.p0000664000175000017500000001021400000000000034232 0ustar00zuulzuul00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import urllib.parse from cinder import context from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils class TestCreateVolumeFromSnapShot(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.create_volume_from_snapshot()``""" def setUp(self): """Setup a test case environment. Creates fake volume and snapshot objects and sets up the required API responses. """ super(TestCreateVolumeFromSnapShot, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.snapshot = fake_snapshot.fake_snapshot_obj(ctx) self.snapshot_name_2x_enc = urllib.parse.quote( urllib.parse.quote(flex_utils.id_to_base64(self.snapshot.id)) ) self.volume = fake_volume.fake_volume_obj(ctx) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(flex_utils.id_to_base64(self.volume.id)) ) self.snapshot_reply = json.dumps( { 'volumeIdList': [self.volume.id], 'snapshotGroupId': 'snap_group' } ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.snapshot.id, 'instances/System/action/snapshotVolumes': self.snapshot_reply, 'instances/Volume::{}/action/setVolumeSize'.format( self.volume.id): None, }, self.RESPONSE_MODE.BadStatus: { 'instances/System/action/snapshotVolumes': self.BAD_STATUS_RESPONSE, 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.Invalid: { 'instances/System/action/snapshotVolumes': mocks.MockHTTPSResponse( { 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, 'message': 'BadStatus Volume Test', }, 400 ), 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: None, }, } def test_bad_login(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.volume, self.snapshot ) def test_invalid_snapshot(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.volume, self.snapshot ) def test_create_volume_from_snapshot(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.create_volume_from_snapshot(self.volume, self.snapshot) def test_create_volume_from_snapshot_larger(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.volume.size = 2 self.driver.create_volume_from_snapshot(self.volume, self.snapshot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_delete_snapshot.py0000664000175000017500000001061600000000000032016 0ustar00zuulzuul00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib.parse from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.fake_snapshot import fake_snapshot_obj from cinder.tests.unit.fake_volume import fake_volume_obj from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks from cinder.volume import configuration from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils class TestDeleteSnapShot(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.delete_snapshot()``""" def setUp(self): """Setup a test case environment. Creates fake volume and snapshot objects and sets up the required API responses. """ super(TestDeleteSnapShot, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.fake_volume = fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.snapshot = fake_snapshot_obj( ctx, **{'volume': self.fake_volume, 'provider_id': fake.SNAPSHOT_ID}) self.snapshot_name_2x_enc = urllib.parse.quote( urllib.parse.quote( flex_utils.id_to_base64(self.snapshot.id) ) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.snapshot.id, 'instances/Volume::' + self.snapshot.provider_id: {}, 'instances/Volume::{}/action/removeMappedSdc'.format( self.snapshot.provider_id ): self.snapshot.id, 'instances/Volume::{}/action/removeVolume'.format( self.snapshot.provider_id ): self.snapshot.id, }, self.RESPONSE_MODE.BadStatus: { 'instances/Volume::' + self.snapshot.provider_id: self.BAD_STATUS_RESPONSE, 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE, 'instances/Volume::{}/action/removeVolume'.format( self.snapshot.provider_id ): self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.Invalid: { 'types/Volume/instances/getByName::' + self.snapshot_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, 'message': 'Test Delete Invalid Snapshot', }, 400 ), 'instances/Volume::{}/action/removeVolume'.format( self.snapshot.provider_id): mocks.MockHTTPSResponse( { 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, 'message': 'Test Delete Invalid Snapshot', }, 400, ) }, } def test_bad_login(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, self.snapshot) def test_delete_invalid_snapshot(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.delete_snapshot(self.snapshot) def test_delete_snapshot(self): """Setting the unmap volume before delete flag for tests """ self.override_config('powerflex_unmap_volume_before_deletion', True, configuration.SHARED_CONF_GROUP) self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.delete_snapshot(self.snapshot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_delete_volume.py0000664000175000017500000000703300000000000031465 0ustar00zuulzuul00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib.parse from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks from cinder.volume import configuration from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils class TestDeleteVolume(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.delete_volume()``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestDeleteVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(flex_utils.id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'instances/Volume::' + self.volume.provider_id: {}, 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus: { 'instances/Volume::' + self.volume.provider_id: self.BAD_STATUS_RESPONSE, 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), }, } def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_delete_volume(self): """Setting the unmap volume before delete flag for tests """ self.override_config('powerflex_unmap_volume_before_deletion', True, configuration.SHARED_CONF_GROUP) self.driver.delete_volume(self.volume) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_extend_volume.py0000664000175000017500000001107400000000000031512 0ustar00zuulzuul00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urllib.parse from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.fake_volume import fake_volume_obj from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks from cinder.volume import configuration from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils class TestExtendVolume(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.extend_volume()``""" """ New sizes for the volume. Since PowerFlex has a granularity of 8 GB, multiples of 8 always work. The 7 size should be either rounded up to 8 or raise an exception based on the round_volume_capacity config setting. """ NEW_SIZE = 16 BAD_SIZE = 7 def setUp(self): """Setup a test case environment. Creates fake volume object and sets up the required API responses. """ super(TestExtendVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume_obj(ctx, **{'id': fake.VOLUME_ID, 'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(flex_utils.id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: '"{}"'.format(self.volume.id), 'instances/Volume::{}/action/setVolumeSize'.format( self.volume.provider_id ): mocks.MockHTTPSResponse({}, 200), }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE, 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE, 'instances/Volume::{}/action/setVolumeSize'.format( self.volume.provider_id): self.BAD_STATUS_RESPONSE, }, self.RESPONSE_MODE.Invalid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: None, 'instances/Volume::{}/action/setVolumeSize'.format( self.volume.provider_id): mocks.MockHTTPSResponse( { 'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR, 'message': 'BadStatus Volume Test', }, 400 ), }, } def test_bad_login(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.volume, self.NEW_SIZE) def test_invalid_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.volume, self.NEW_SIZE) def test_extend_volume_bad_size_no_round(self): self.override_config('powerflex_round_volume_capacity', False, configuration.SHARED_CONF_GROUP) self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.extend_volume(self.volume, self.BAD_SIZE) def test_extend_volume_bad_size_round(self): self.override_config('powerflex_round_volume_capacity', True, configuration.SHARED_CONF_GROUP) self.driver.extend_volume(self.volume, self.BAD_SIZE) def test_extend_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.driver.extend_volume(self.volume, self.NEW_SIZE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_get_manageable.py0000664000175000017500000001567200000000000031557 0ustar00zuulzuul00000000000000# Copyright (C) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy from unittest import mock import ddt from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdabcdabcd" PROVIDER_ID = "0000000000000001" MANAGEABLE_FLEX_VOLS = [ { "volumeType": "ThinProvisioned", "storagePoolId": "6c6dc54500000000", "sizeInKb": 8388608, "name": "volume1", "id": PROVIDER_ID, "mappedSdcInfo": [], }, { "volumeType": "ThinProvisioned", "storagePoolId": "6c6dc54500000000", "sizeInKb": 8388608, "name": "volume2", "id": "0000000000000002", "mappedSdcInfo": [], }, { "volumeType": "ThickProvisioned", "storagePoolId": "6c6dc54500000000", "sizeInKb": 8388608, "name": "volume3", "id": "0000000000000003", "mappedSdcInfo": [], } ] POWERFLEX_SNAPSHOT = { "volumeType": "Snapshot", "storagePoolId": "6c6dc54500000000", "sizeInKb": 8388608, "name": "snapshot1", "id": "1000000000000001", "mappedSdcInfo": [], } MANAGEABLE_FLEX_VOL_REFS = [ { 'reference': {'source-id': PROVIDER_ID}, 'size': 8, 'safe_to_manage': True, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': { "volumeType": "ThinProvisioned", "name": "volume1" } }, { 'reference': {'source-id': '0000000000000002'}, 'size': 8, 'safe_to_manage': True, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': { "volumeType": "ThinProvisioned", "name": "volume2" } }, { 'reference': {'source-id': '0000000000000003'}, 'size': 8, 'safe_to_manage': True, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': { "volumeType": "ThickProvisioned", "name": "volume3" } } ] @ddt.ddt class PowerFlexManageableCase(powerflex.TestPowerFlexDriver): def setUp(self): """Setup a test case environment.""" super(PowerFlexManageableCase, self).setUp() self.driver.storage_pools = super().STORAGE_POOLS def _test_get_manageable_things(self, powerflex_objects=MANAGEABLE_FLEX_VOLS, expected_refs=MANAGEABLE_FLEX_VOL_REFS, cinder_objs=list()): marker = mock.Mock() limit = mock.Mock() offset = mock.Mock() sort_keys = mock.Mock() sort_dirs = mock.Mock() self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'instances/StoragePool::{}/relationships/Volume'.format( self.STORAGE_POOL_ID ): powerflex_objects, 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME ): '"{}"'.format(self.STORAGE_POOL_ID), 'instances/ProtectionDomain::{}'.format( self.PROT_DOMAIN_ID ): {'id': self.PROT_DOMAIN_ID}, 'instances/StoragePool::{}'.format( self.STORAGE_POOL_ID ): {'id': self.STORAGE_POOL_ID}, 'types/Domain/instances/getByName::' + self.PROT_DOMAIN_NAME: '"{}"'.format(self.PROT_DOMAIN_ID), }, } with mock.patch('cinder.volume.volume_utils.' 'paginate_entries_list') as mpage: test_func = self.driver.get_manageable_volumes test_func(cinder_objs, marker, limit, offset, sort_keys, sort_dirs) mpage.assert_called_once_with( expected_refs, marker, limit, offset, sort_keys, sort_dirs ) def test_get_manageable_volumes(self): """Default success case. Given a list of PowerFlex volumes from the REST API, give back a list of volume references. """ self._test_get_manageable_things() def test_get_manageable_volumes_connected_vol(self): """Make sure volumes connected to hosts are flagged as unsafe.""" mapped_sdc = deepcopy(MANAGEABLE_FLEX_VOLS) mapped_sdc[0]['mappedSdcInfo'] = ["host1"] mapped_sdc[1]['mappedSdcInfo'] = ["host1", "host2"] # change up the expected results expected_refs = deepcopy(MANAGEABLE_FLEX_VOL_REFS) for x in range(len(mapped_sdc)): sdc = mapped_sdc[x]['mappedSdcInfo'] if sdc and len(sdc) > 0: expected_refs[x]['safe_to_manage'] = False expected_refs[x]['reason_not_safe'] \ = 'Volume mapped to %d host(s).' % len(sdc) self._test_get_manageable_things(expected_refs=expected_refs, powerflex_objects=mapped_sdc) def test_get_manageable_volumes_already_managed(self): """Make sure volumes already owned by cinder are flagged as unsafe.""" cinder_vol = fake_volume.fake_volume_obj(mock.MagicMock()) cinder_vol.id = VOLUME_ID cinder_vol.provider_id = PROVIDER_ID cinders_vols = [cinder_vol] # change up the expected results expected_refs = deepcopy(MANAGEABLE_FLEX_VOL_REFS) expected_refs[0]['reference'] = {'source-id': PROVIDER_ID} expected_refs[0]['safe_to_manage'] = False expected_refs[0]['reason_not_safe'] = 'Volume already managed.' expected_refs[0]['cinder_id'] = VOLUME_ID self._test_get_manageable_things(expected_refs=expected_refs, cinder_objs=cinders_vols) def test_get_manageable_volumes_no_snapshots(self): """Make sure refs returned do not include snapshots.""" volumes = deepcopy(MANAGEABLE_FLEX_VOLS) volumes.append(POWERFLEX_SNAPSHOT) self._test_get_manageable_things(powerflex_objects=volumes) def test_get_manageable_volumes_no_powerflex_volumes(self): """Expect no refs to be found if no volumes are on PowerFlex.""" self._test_get_manageable_things(powerflex_objects=[], expected_refs=[]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_groups.py0000664000175000017500000002726600000000000030165 0ustar00zuulzuul00000000000000# Copyright (C) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock from cinder import context from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_group from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks class TestGroups(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver groups support``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestGroups, self).setUp() self.ctx = context.RequestContext('fake', 'fake', auth_token=True) self.fake_grp_snap = {'id': 'group_snap_id', 'name': 'test_group_snapshot', 'group_id': fake.GROUP_ID, 'status': fields.GroupSnapshotStatus.AVAILABLE } self.group = ( fake_group.fake_group_obj( self.ctx, **{'id': fake.GROUP_ID})) fake_volume1 = fake_volume.fake_volume_obj( self.ctx, **{'id': fake.VOLUME_ID, 'provider_id': fake.PROVIDER_ID}) fake_volume2 = fake_volume.fake_volume_obj( self.ctx, **{'id': fake.VOLUME2_ID, 'provider_id': fake.PROVIDER2_ID}) fake_volume3 = fake_volume.fake_volume_obj( self.ctx, **{'id': fake.VOLUME3_ID, 'provider_id': fake.PROVIDER3_ID}) fake_volume4 = fake_volume.fake_volume_obj( self.ctx, **{'id': fake.VOLUME4_ID, 'provider_id': fake.PROVIDER4_ID}) self.volumes = [fake_volume1, fake_volume2] self.volumes2 = [fake_volume3, fake_volume4] fake_snapshot1 = fake_snapshot.fake_snapshot_obj( self.ctx, **{'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, 'volume': fake_volume1}) fake_snapshot2 = fake_snapshot.fake_snapshot_obj( self.ctx, **{'id': fake.SNAPSHOT2_ID, 'volume_id': fake.VOLUME2_ID, 'volume': fake_volume2}) self.snapshots = [fake_snapshot1, fake_snapshot2] self.snapshot_reply = json.dumps({ 'volumeIdList': ['sid1', 'sid2'], 'snapshotGroupId': 'sgid1'}) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'instances/Volume::' + fake_volume1['provider_id']: {}, 'instances/Volume::' + fake_volume2['provider_id']: {}, 'instances/Volume::{}/action/removeVolume'.format( fake_volume1['provider_id'] ): fake_volume1['provider_id'], 'instances/Volume::{}/action/removeVolume'.format( fake_volume2['provider_id'] ): fake_volume2['provider_id'], 'instances/Volume::{}/action/removeMappedSdc'.format( fake_volume1['provider_id'] ): fake_volume1['provider_id'], 'instances/Volume::{}/action/removeMappedSdc'.format( fake_volume2['provider_id'] ): fake_volume2['provider_id'], 'instances/System/action/snapshotVolumes': self.snapshot_reply, }, self.RESPONSE_MODE.BadStatus: { 'instances/Volume::{}/action/removeVolume'.format( fake_volume1['provider_id'] ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format( fake_volume2['provider_id'] ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), 'instances/System/action/snapshotVolumes': self.BAD_STATUS_RESPONSE }, } @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group(self, is_group_a_cg_snapshot_type): """Test group create. should throw NotImplementedError, is_group_a_cg_snapshot_type=False otherwise returns status of 'available' """ is_group_a_cg_snapshot_type.side_effect = [False, True] self.assertRaises(NotImplementedError, self.driver.create_group, self.ctx, self.group) model_update = self.driver.create_group(self.ctx, self.group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group(self, is_group_a_cg_snapshot_type): """Test group deletion. should throw NotImplementedError, is_group_a_cg_snapshot_type=False otherwise returns status of 'deleted' """ is_group_a_cg_snapshot_type.side_effect = [False, True] self.assertRaises(NotImplementedError, self.driver.delete_group, self.ctx, self.group, self.volumes) model_update = self.driver.delete_group(self.ctx, self.group, self.volumes) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_update_group(self, is_group_a_cg_snapshot_type): """Test updating a group should throw NotImplementedError, is_group_a_cg_snapshot_type=False otherwise returns 'None' for each of the updates """ is_group_a_cg_snapshot_type.side_effect = [False, True] self.assertRaises(NotImplementedError, self.driver.update_group, self.ctx, self.group) mod_up, add_up, remove_up = self.driver.update_group(self.ctx, self.group) self.assertIsNone(mod_up) self.assertIsNone(add_up) self.assertIsNone(remove_up) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_from_src_group(self, is_group_a_cg_snapshot_type): """Test creating group from source group should throw NotImplementedError, is_group_a_cg_snapshot_type=False otherwise returns list of volumes in 'available' state """ self.set_https_response_mode(self.RESPONSE_MODE.Valid) is_group_a_cg_snapshot_type.side_effect = [False, True] self.assertRaises(NotImplementedError, self.driver.create_group_from_src, self.ctx, self.group, self.volumes, source_group=self.group, source_vols=self.volumes) result_model_update, result_volumes_model_update = ( self.driver.create_group_from_src( self.ctx, self.group, self.volumes, source_group=self.group, source_vols=self.volumes)) self.assertEqual(fields.GroupStatus.AVAILABLE, result_model_update['status']) self.assertEqual(len(result_volumes_model_update), len(self.volumes)) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_from_src_snapshot(self, is_group_a_cg_snapshot_type): """Test creating group from snapshot """ self.set_https_response_mode(self.RESPONSE_MODE.Valid) is_group_a_cg_snapshot_type.side_effect = [False, True] self.assertRaises(NotImplementedError, self.driver.create_group_from_src, self.ctx, self.group, self.volumes, group_snapshot=self.fake_grp_snap, snapshots=self.snapshots) result_model_update, result_volumes_model_update = ( self.driver.create_group_from_src( self.ctx, self.group, self.volumes, group_snapshot=self.fake_grp_snap, snapshots=self.snapshots)) self.assertEqual(fields.GroupStatus.AVAILABLE, result_model_update['status']) self.assertEqual(len(result_volumes_model_update), len(self.volumes)) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_snapshot(self, is_group_a_cg_snapshot_type): """Test deleting group snapshot should throw NotImplementedError, is_group_a_cg_snapshot_type=False otherwise returns model updates """ is_group_a_cg_snapshot_type.side_effect = [False, True] self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.snapshots[0].volume = self.volumes[0] self.snapshots[1].volume = self.volumes[1] self.snapshots[0].provider_id = fake.PROVIDER_ID self.snapshots[1].provider_id = fake.PROVIDER2_ID self.assertRaises(NotImplementedError, self.driver.delete_group_snapshot, self.ctx, self.group, self.snapshots) result_model_update, result_snapshot_model_update = ( self.driver.delete_group_snapshot( self.ctx, self.group, self.snapshots )) self.assertEqual(fields.GroupSnapshotStatus.DELETED, result_model_update['status']) self.assertTrue(all(snapshot['status'] == 'deleted' for snapshot in result_snapshot_model_update)) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_snapshot(self, is_group_a_cg_snapshot_type): """Test creating group snapshot should throw NotImplementedError, is_group_a_cg_snapshot_type=False otherwise returns model updates """ is_group_a_cg_snapshot_type.side_effect = [False, True] self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.assertRaises(NotImplementedError, self.driver.create_group_snapshot, self.ctx, self.group, self.snapshots) result_model_update, result_snapshot_model_update = ( self.driver.create_group_snapshot( self.ctx, self.group, self.snapshots )) self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, result_model_update['status']) self.assertTrue(all(snapshot['status'] == 'available' for snapshot in result_snapshot_model_update)) self.assertEqual(len(result_snapshot_model_update), len(self.snapshots)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_initialize_connection.py0000664000175000017500000001200600000000000033210 0ustar00zuulzuul00000000000000# Copyright (c) 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import context from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks class TestInitializeConnection(powerflex.TestPowerFlexDriver): def setUp(self): """Setup a test case environment.""" super(TestInitializeConnection, self).setUp() self.connector = {'sdc_guid': 'fake_guid'} self.ctx = ( context.RequestContext('fake', 'fake', True, auth_token=True)) self.volume = fake_volume.fake_volume_obj( self.ctx, **{'provider_id': fake.PROVIDER_ID}) self.sdc = { "id": "sdc1", } self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Sdc/instances': [{'id': "sdc1", 'sdcGuid': 'fake_guid'}], 'instances/Volume::{}/action/setMappedSdcLimits'.format( self.volume.provider_id ): mocks.MockHTTPSResponse({}, 200), }, } def test_only_qos(self): qos = {'maxIOPS': 1000, 'maxBWS': 2048} extraspecs = {} self._initialize_connection(qos, extraspecs)['data'] self.driver.primary_client.set_sdc_limits.assert_called_once_with( self.volume.provider_id, self.sdc["id"], '2048', '1000') def test_no_qos(self): qos = {} extraspecs = {} self._initialize_connection(qos, extraspecs)['data'] self.driver.primary_client.set_sdc_limits.assert_not_called def test_qos_scaling_and_max(self): qos = {'maxIOPS': 100, 'maxBWS': 2048, 'maxIOPSperGB': 10, 'maxBWSperGB': 128} extraspecs = {} self.volume.size = 8 self._initialize_connection(qos, extraspecs)['data'] self.driver.primary_client.set_sdc_limits.assert_called_once_with( self.volume.provider_id, self.sdc["id"], '1024', '80') self.volume.size = 24 self._initialize_connection(qos, extraspecs)['data'] self.driver.primary_client.set_sdc_limits.assert_called_once_with( self.volume.provider_id, self.sdc["id"], '2048', '100') def test_qos_scaling_no_max(self): qos = {'maxIOPSperGB': 10, 'maxBWSperGB': 128} extraspecs = {} self.volume.size = 8 self._initialize_connection(qos, extraspecs)['data'] self.driver.primary_client.set_sdc_limits.assert_called_once_with( self.volume.provider_id, self.sdc["id"], '1024', '80') def test_qos_round_up(self): qos = {'maxBWS': 2000, 'maxBWSperGB': 100} extraspecs = {} self.volume.size = 8 self._initialize_connection(qos, extraspecs)['data'] self.driver.primary_client.set_sdc_limits.assert_called_once_with( self.volume.provider_id, self.sdc["id"], '1024', None) self.volume.size = 24 self._initialize_connection(qos, extraspecs)['data'] self.driver.primary_client.set_sdc_limits.assert_called_once_with( self.volume.provider_id, self.sdc["id"], '2048', None) def test_vol_id(self): extraspecs = qos = {} connection_properties = ( self._initialize_connection(qos, extraspecs)['data']) self.assertEqual(fake.PROVIDER_ID, connection_properties['scaleIO_volume_id']) def _initialize_connection(self, qos, extraspecs): self.driver._get_volumetype_qos = mock.MagicMock() self.driver._get_volumetype_qos.return_value = qos self.driver._get_volumetype_extraspecs = mock.MagicMock() self.driver._get_volumetype_extraspecs.return_value = extraspecs self.driver._attach_volume_to_host = mock.MagicMock( return_value=None ) self.driver._check_volume_mapped = mock.MagicMock( return_value=None ) self.driver.primary_client.set_sdc_limits = mock.MagicMock() res = self.driver.initialize_connection(self.volume, self.connector) self.driver._get_volumetype_extraspecs.assert_called_once_with( self.volume) self.driver._attach_volume_to_host.assert_called_once_with( self.volume, self.sdc['id']) self.driver._check_volume_mapped.assert_called_once_with( self.sdc['id'], self.volume.provider_id) return res ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_initialize_connection_snapshot.py 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_initialize_connection_snapsho0000664000175000017500000001260600000000000034322 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import context from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks class TestInitializeConnectionSnapshot(powerflex.TestPowerFlexDriver): def setUp(self): super(TestInitializeConnectionSnapshot, self).setUp() self.snapshot_id = 'SNAPID' self.ctx = context.RequestContext('fake', 'fake', auth_token=True) self.fake_path = '/fake/path/vol-xx' self.volume = fake_volume.fake_volume_obj( self.ctx, **{'provider_id': fake.PROVIDER_ID}) self.connector = {'sdc_guid': 'fake_guid'} self.sdc = { "id": "sdc1", } self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Sdc/instances': [{'id': "sdc1", 'sdcGuid': 'fake_guid'}], 'instances/Volume::{}/action/setMappedSdcLimits'.format( self.snapshot_id ): mocks.MockHTTPSResponse({}, 200), }, } def test_backup_can_use_snapshots(self): """Make sure the driver can use snapshots for backup.""" use_snaps = self.driver.backup_use_temp_snapshot() self.assertTrue(use_snaps) def test_initialize_connection_without_size(self): """Test initializing when we do not know the snapshot size. ScaleIO can determine QOS specs based upon volume/snapshot size The QOS keys should not be returned """ snapshot = fake_snapshot.fake_snapshot_obj( self.ctx, **{'volume': self.volume, 'provider_id': self.snapshot_id}) self.driver._attach_volume_to_host = mock.MagicMock( return_value=None ) self.driver._check_volume_mapped = mock.MagicMock( return_value=None ) props = self.driver.initialize_connection_snapshot( snapshot, self.connector) # validate the volume type self.assertEqual(props['driver_volume_type'], 'scaleio') # make sure a volume name and id exist self.assertIsNotNone(props['data']['scaleIO_volname']) self.assertEqual(self.snapshot_id, props['data']['scaleIO_volume_id']) # make sure QOS properties are not set self.assertNotIn('iopsLimit', props['data']) def test_initialize_connection_with_size(self): """Test initializing when we know the snapshot size. PowerFlex can determine QOS specs based upon volume/snapshot size The QOS keys should not be returned """ snapshot = fake_snapshot.fake_snapshot_obj( self.ctx, **{'volume': self.volume, 'provider_id': self.snapshot_id, 'volume_size': 8}) self.driver._attach_volume_to_host = mock.MagicMock( return_value=None ) self.driver._check_volume_mapped = mock.MagicMock( return_value=None ) props = self.driver.initialize_connection_snapshot( snapshot, self.connector) # validate the volume type self.assertEqual(props['driver_volume_type'], 'scaleio') # make sure a volume name and id exist self.assertIsNotNone(props['data']['scaleIO_volname']) self.assertEqual(self.snapshot_id, props['data']['scaleIO_volume_id']) # make sure QOS properties are not set self.assertNotIn('iopsLimit', props['data']) def test_qos_specs(self): """Ensure QOS specs are honored if present.""" qos = {'maxIOPS': 1000, 'maxBWS': 2048} snapshot = fake_snapshot.fake_snapshot_obj( self.ctx, **{'volume': self.volume, 'provider_id': self.snapshot_id, 'volume_size': 8}) extraspecs = {} self.driver._get_volumetype_qos = mock.MagicMock() self.driver._get_volumetype_qos.return_value = qos self.driver._get_volumetype_extraspecs = mock.MagicMock() self.driver._get_volumetype_extraspecs.return_value = extraspecs self.driver._attach_volume_to_host = mock.MagicMock( return_value=None ) self.driver._check_volume_mapped = mock.MagicMock( return_value=None ) self.driver.primary_client.set_sdc_limits = mock.MagicMock() self.driver.initialize_connection_snapshot( snapshot, self.connector) self.driver.primary_client.set_sdc_limits.assert_called_once_with( self.snapshot_id, self.sdc["id"], '2048', '1000') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_manage_existing.py0000664000175000017500000001273700000000000032005 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest.mock import patch import urllib.parse from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils from cinder.volume import volume_types class TestManageExisting(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.manage_existing()``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestManageExisting, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.volume_attached = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER2_ID}) self.volume_no_provider_id = fake_volume.fake_volume_obj(ctx) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(flex_utils.id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'instances/Volume::' + self.volume['provider_id']: mocks.MockHTTPSResponse({ 'id': fake.PROVIDER_ID, 'sizeInKb': 8000000, 'mappedSdcInfo': None }, 200) }, self.RESPONSE_MODE.BadStatus: { 'instances/Volume::' + self.volume['provider_id']: mocks.MockHTTPSResponse({ 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401), 'instances/Volume::' + self.volume_attached['provider_id']: mocks.MockHTTPSResponse({ 'id': fake.PROVIDER2_ID, 'sizeInKb': 8388608, 'mappedSdcInfo': 'Mapped' }, 200) } } def test_no_source_id(self): existing_ref = {'source-name': 'scaleioVolName'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume, existing_ref) def test_no_type_id(self): self.volume['volume_type_id'] = None existing_ref = {'source-id': fake.PROVIDER_ID} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, self.volume, existing_ref) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_volume_not_found(self, _mock_volume_type): self.volume['volume_type_id'] = fake.VOLUME_TYPE_ID existing_ref = {'source-id': fake.PROVIDER_ID} self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing, self.volume, existing_ref) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_volume_attached(self, _mock_volume_type): self.volume_attached['volume_type_id'] = fake.VOLUME_TYPE_ID existing_ref = {'source-id': fake.PROVIDER2_ID} self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume_attached, existing_ref) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_manage_get_size_calc(self, _mock_volume_type): self.volume['volume_type_id'] = fake.VOLUME_TYPE_ID existing_ref = {'source-id': fake.PROVIDER_ID} self.set_https_response_mode(self.RESPONSE_MODE.Valid) result = self.driver.manage_existing_get_size(self.volume, existing_ref) self.assertEqual(8, result) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_manage_existing_valid(self, _mock_volume_type): self.volume['volume_type_id'] = fake.VOLUME_TYPE_ID existing_ref = {'source-id': fake.PROVIDER_ID} result = self.driver.manage_existing(self.volume, existing_ref) self.assertEqual(fake.PROVIDER_ID, result['provider_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_manage_existing_snapshot.py0000664000175000017500000001550000000000000033713 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest.mock import patch from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks from cinder.volume import volume_types class TestManageExistingSnapshot(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.manage_existing_snapshot()``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestManageExistingSnapshot, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.snapshot = fake_snapshot.fake_snapshot_obj( ctx, **{'provider_id': fake.PROVIDER2_ID}) self.snapshot2 = fake_snapshot.fake_snapshot_obj( ctx, **{'provider_id': fake.PROVIDER3_ID}) self.snapshot.volume = self.snapshot2.volume = self.volume self.snapshot['volume_type_id'] = fake.VOLUME_TYPE_ID self.snapshot2['volume_type_id'] = fake.VOLUME_TYPE_ID self.snapshot_attached = fake_snapshot.fake_snapshot_obj( ctx, **{'provider_id': fake.PROVIDER4_ID}) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'instances/Volume::' + self.volume['provider_id']: mocks.MockHTTPSResponse({ 'id': fake.PROVIDER_ID, 'sizeInKb': 8388608, 'mappedSdcInfo': None, 'ancestorVolumeId': None }, 200), 'instances/Volume::' + self.snapshot['provider_id']: mocks.MockHTTPSResponse({ 'id': fake.PROVIDER2_ID, 'sizeInKb': 8000000, 'mappedSdcInfo': None, 'ancestorVolumeId': fake.PROVIDER_ID }, 200), 'instances/Volume::' + self.snapshot2['provider_id']: mocks.MockHTTPSResponse({ 'id': fake.PROVIDER3_ID, 'sizeInKb': 8388608, 'mappedSdcInfo': None, 'ancestorVolumeId': fake.PROVIDER2_ID }, 200) }, self.RESPONSE_MODE.BadStatus: { 'instances/Volume::' + self.snapshot['provider_id']: mocks.MockHTTPSResponse({ 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401), 'instances/Volume::' + self.snapshot2['provider_id']: mocks.MockHTTPSResponse({ 'id': fake.PROVIDER3_ID, 'sizeInKb': 8388608, 'ancestorVolumeId': fake.PROVIDER2_ID }, 200), 'instances/Volume::' + self.snapshot_attached['provider_id']: mocks.MockHTTPSResponse({ 'id': fake.PROVIDER4_ID, 'sizeInKb': 8388608, 'mappedSdcInfo': 'Mapped', 'ancestorVolumeId': fake.PROVIDER_ID }, 200) } } def test_no_source_id(self): existing_ref = {'source-name': 'scaleioSnapName'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, self.snapshot, existing_ref) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_snapshot_not_found(self, _mock_volume_type): existing_ref = {'source-id': fake.PROVIDER2_ID} self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_snapshot, self.snapshot, existing_ref) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_snapshot_attached(self, _mock_volume_type): self.snapshot_attached['volume_type_id'] = fake.VOLUME_TYPE_ID existing_ref = {'source-id': fake.PROVIDER4_ID} self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, self.snapshot_attached, existing_ref) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_different_ancestor(self, _mock_volume_type): existing_ref = {'source-id': fake.PROVIDER3_ID} self.set_https_response_mode(self.RESPONSE_MODE.Valid) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, self.snapshot2, existing_ref) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_manage_snapshot_get_size_calc(self, _mock_volume_type): existing_ref = {'source-id': fake.PROVIDER2_ID} self.set_https_response_mode(self.RESPONSE_MODE.Valid) result = self.driver.manage_existing_snapshot_get_size( self.snapshot, existing_ref) self.assertEqual(8, result) @patch.object( volume_types, 'get_volume_type', return_value={'extra_specs': {'volume_backend_name': 'ScaleIO'}}) def test_manage_existing_snapshot_valid(self, _mock_volume_type): existing_ref = {'source-id': fake.PROVIDER2_ID} result = self.driver.manage_existing_snapshot( self.snapshot, existing_ref) self.assertEqual(fake.PROVIDER2_ID, result['provider_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_migrate_volume.py0000664000175000017500000002314000000000000031650 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from oslo_service import loopingcall from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex MIGRATE_VOLUME_PARAMS_CASES = ( # Cases for testing _get_volume_params function. # +----------------------------------------------------+------------------+ # |Volume Type|Real provisioning|Conversion|Compression|Pool support thick| # +-----------+-----------------+----------+-----------+-----+------------+ ('ThinProvisioned', 'ThinProvisioned', 'NoConversion', 'None', False), ('ThinProvisioned', 'ThickProvisioned', 'ThickToThin', 'None', True), ('ThickProvisioned', 'ThinProvisioned', 'NoConversion', 'None', False), ('ThickProvisioned', 'ThinProvisioned', 'ThinToThick', 'None', True), ('ThinProvisioned', 'ThinProvisioned', 'NoConversion', 'Normal', False), ('ThinProvisioned', 'ThickProvisioned', 'ThickToThin', 'Normal', False), ('ThinProvisioned', 'ThickProvisioned', 'ThickToThin', 'None', False) ) @ddt.ddt class TestMigrateVolume(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.migrate_volume()``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestMigrateVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) host = 'host@backend#{}:{}'.format( self.PROT_DOMAIN_NAME, self.STORAGE_POOL_NAME) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID, 'host': host, 'volume_type_id': fake.VOLUME_TYPE_ID}) self.dst_host = {'host': host} self.DST_STORAGE_POOL_NAME = 'SP2' self.DST_STORAGE_POOL_ID = str('2') self.fake_vtree_id = 'c075744900000001' self.migration_success = (True, {}) self.migration_host_assisted = (False, None) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Domain/instances/getByName::{}'.format( self.PROT_DOMAIN_NAME ): '"{}"'.format(self.PROT_DOMAIN_ID), 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME ): '"{}"'.format(self.STORAGE_POOL_ID), 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.DST_STORAGE_POOL_NAME ): '"{}"'.format(self.DST_STORAGE_POOL_ID), 'instances/ProtectionDomain::{}'.format( self.PROT_DOMAIN_ID ): {'id': self.PROT_DOMAIN_ID}, 'instances/StoragePool::{}'.format( self.STORAGE_POOL_ID ): {'id': self.STORAGE_POOL_ID, 'zeroPaddingEnabled': True}, 'instances/StoragePool::{}'.format( self.DST_STORAGE_POOL_ID ): {'id': self.DST_STORAGE_POOL_ID, 'zeroPaddingEnabled': True}, 'instances/Volume::{}'.format( self.volume.provider_id ): {'volumeType': 'ThinProvisioned', 'vtreeId': self.fake_vtree_id}, 'instances/Volume::{}/action/migrateVTree'.format( self.volume.provider_id ): {}, 'instances/VTree::{}'.format( self.fake_vtree_id ): {'vtreeMigrationInfo': { 'migrationStatus': 'NotInMigration', 'migrationPauseReason': None}} }, self.RESPONSE_MODE.Invalid: { 'instances/Volume::{}'.format( self.volume.provider_id ): {'vtreeId': self.fake_vtree_id}, 'instances/VTree::{}'.format( self.fake_vtree_id ): {'vtreeMigrationInfo': {'migrationPauseReason': None}} }, self.RESPONSE_MODE.BadStatus: { 'instances/Volume::{}/action/migrateVTree'.format( self.volume.provider_id ): self.BAD_STATUS_RESPONSE }, } self.volumetype_extraspecs_mock = self.mock_object( self.driver, '_get_volumetype_extraspecs', return_value={'provisioning:type': 'thin'} ) self.volume_is_replicated_mock = self.mock_object( self.volume, 'is_replicated', return_value=False ) def test_migrate_volume(self): ret = self.driver.migrate_volume(None, self.volume, self.dst_host) self.assertEqual(self.migration_success, ret) def test_migrate_replicated_volume(self): self.volume_is_replicated_mock.return_value = True self.assertRaises(exception.InvalidVolume, self.driver.migrate_volume, None, self.volume, self.dst_host) def test_migrate_volume_crossbackend_not_supported(self): dst_host = {'host': 'host@another_backend#PD1:P1'} ret = self.driver.migrate_volume(None, self.volume, dst_host) self.assertEqual(self.migration_host_assisted, ret) def test_migrate_volume_bad_status_response(self): with self.custom_response_mode( **{'instances/Volume::{}/action/migrateVTree'.format( self.volume.provider_id): self.RESPONSE_MODE.BadStatus} ): self.assertRaises(exception.VolumeBackendAPIException, self.driver.migrate_volume, None, self.volume, self.dst_host) def test_migrate_volume_migration_in_progress(self): with self.custom_response_mode( **{'instances/Volume::{}/action/migrateVTree'.format( self.volume.provider_id): powerflex.mocks.MockHTTPSResponse( { 'errorCode': 717, 'message': 'Migration in progress', }, 500)} ): ret = self.driver.migrate_volume(None, self.volume, self.dst_host) self.assertEqual(self.migration_success, ret) @mock.patch( 'cinder.volume.drivers.dell_emc.powerflex.driver.PowerFlexDriver.' '_wait_for_volume_migration_to_complete', side_effect=loopingcall.LoopingCallTimeOut() ) def test_migrate_volume_migration_in_progress_timeout_expired(self, m): _, upd = self.driver.migrate_volume(None, self.volume, self.dst_host) self.assertEqual('maintenance', upd['status']) def test_migrate_volume_migration_failed(self): with self.custom_response_mode( **{'instances/VTree::{}'.format(self.fake_vtree_id): powerflex.mocks.MockHTTPSResponse( {'vtreeMigrationInfo': {'migrationStatus': 'NotInMigration', 'migrationPauseReason': 'MigrationError'}}, 200)} ): self.assertRaises(exception.VolumeMigrationFailed, self.driver.migrate_volume, None, self.volume, self.dst_host) def test_get_real_provisioning_and_vtree_malformed_response(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.MalformedResponse, self.driver._get_real_provisioning_and_vtree, self.volume.provider_id) def test_wait_for_volume_migration_to_complete_malformed_response(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.MalformedResponse, self.driver._wait_for_volume_migration_to_complete, self.fake_vtree_id, self.volume.provider_id) @ddt.data(*MIGRATE_VOLUME_PARAMS_CASES) def test_get_migrate_volume_params(self, data): (vol_type, real_prov, conversion, compression, sup_thick) = data self.mock_object(self.driver, '_get_provisioning_and_compression', return_value=(vol_type, compression)) self.mock_object(self.driver, '_check_pool_support_thick_vols', return_value=sup_thick) domain_name, pool_name = ( self.driver._extract_domain_and_pool_from_host( self.dst_host['host'] ) ) ret = self.driver._get_volume_migration_params(self.volume, domain_name, pool_name, real_prov) self.assertTrue(ret['volTypeConversion'] == conversion) self.assertTrue(ret['compressionMethod'] == compression) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_misc.py0000664000175000017500000003465500000000000027601 0ustar00zuulzuul00000000000000# Copyright (c) 2013 - 2015 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock import ddt from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks from cinder.volume import configuration @ddt.ddt class TestMisc(powerflex.TestPowerFlexDriver): DOMAIN_ID = '1' POOL_ID = '1' def setUp(self): """Set up the test case environment. Defines the mock HTTPS responses for the REST API calls. """ super(TestMisc, self).setUp() self.ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( self.ctx, **{'name': 'vol1', 'provider_id': fake.PROVIDER_ID} ) self.new_volume = fake_volume.fake_volume_obj( self.ctx, **{'name': 'vol2', 'provider_id': fake.PROVIDER2_ID} ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Domain/instances/getByName::{}'.format( self.PROT_DOMAIN_NAME ): '"{}"'.format(self.PROT_DOMAIN_ID), 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME ): '"{}"'.format(self.STORAGE_POOL_ID), 'types/StoragePool/instances/action/querySelectedStatistics': { '"{}"'.format(self.STORAGE_POOL_NAME): { 'capacityAvailableForVolumeAllocationInKb': 5000000, 'capacityLimitInKb': 16000000, 'spareCapacityInKb': 6000000, 'thickCapacityInUseInKb': 266, 'thinCapacityAllocatedInKm': 0, 'snapCapacityInUseInKb': 266, }, }, 'instances/Volume::{}/action/setVolumeName'.format( self.volume['provider_id']): self.new_volume['provider_id'], 'instances/Volume::{}/action/setVolumeName'.format( self.new_volume['provider_id']): self.volume['provider_id'], 'version': '"{}"'.format('2.0.1'), 'instances/StoragePool::{}'.format( self.STORAGE_POOL_ID ): { 'name': self.STORAGE_POOL_NAME, 'id': self.STORAGE_POOL_ID, 'protectionDomainId': self.PROT_DOMAIN_ID, 'zeroPaddingEnabled': 'true', }, 'instances/ProtectionDomain::{}'.format( self.PROT_DOMAIN_ID ): { 'name': self.PROT_DOMAIN_NAME, 'id': self.PROT_DOMAIN_ID }, }, self.RESPONSE_MODE.BadStatus: { 'types/Domain/instances/getByName::' + self.PROT_DOMAIN_NAME: self.BAD_STATUS_RESPONSE, 'instances/Volume::{}/action/setVolumeName'.format( self.volume['provider_id']): mocks.MockHTTPSResponse( { 'message': 'Invalid volume.', 'httpStatusCode': 400, 'errorCode': self.VOLUME_NOT_FOUND_ERROR }, 400), }, self.RESPONSE_MODE.Invalid: { 'types/Domain/instances/getByName::' + self.PROT_DOMAIN_NAME: None, 'instances/Volume::{}/action/setVolumeName'.format( self.volume['provider_id']): mocks.MockHTTPSResponse( { 'message': 'Invalid volume.', 'httpStatusCode': 400, 'errorCode': 0 }, 400), }, } def test_valid_configuration(self): self.driver.storage_pools = self.STORAGE_POOLS self.driver.check_for_setup_error() def test_no_storage_pools(self): """No storage pools. INVALID Storage pools must be set """ self.driver.storage_pools = None self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) def test_invalid_storage_pools(self): """Invalid storage pools data""" self.driver.storage_pools = "test" self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) def test_volume_size_round_true(self): self.driver._check_volume_size(1) def test_volume_size_round_false(self): self.override_config('powerflex_round_volume_capacity', False, configuration.SHARED_CONF_GROUP) self.assertRaises(exception.VolumeBackendAPIException, self.driver._check_volume_size, 1) def test_get_volume_stats_bad_status(self): self.driver.storage_pools = self.STORAGE_POOLS self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.get_volume_stats, True) def test_get_volume_stats_invalid_domain(self): self.driver.storage_pools = self.STORAGE_POOLS self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.VolumeBackendAPIException, self.driver.get_volume_stats, True) def test_get_volume_stats(self): self.driver.storage_pools = self.STORAGE_POOLS self.driver.get_volume_stats(True) def _setup_valid_variant_property(self, property): """Setup valid response that returns a variety of property name """ self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.ValidVariant: { 'types/Domain/instances/getByName::{}'.format( self.PROT_DOMAIN_NAME ): '"{}"'.format(self.PROT_DOMAIN_ID), 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME ): '"{}"'.format(self.STORAGE_POOL_ID), 'instances/ProtectionDomain::{}'.format( self.PROT_DOMAIN_ID ): { 'name': self.PROT_DOMAIN_NAME, 'id': self.PROT_DOMAIN_ID }, 'instances/StoragePool::{}'.format( self.STORAGE_POOL_ID ): { 'name': self.STORAGE_POOL_NAME, 'id': self.STORAGE_POOL_ID, 'protectionDomainId': self.PROT_DOMAIN_ID, 'zeroPaddingEnabled': 'true', }, 'types/StoragePool/instances/action/querySelectedStatistics': { '"{}"'.format(self.STORAGE_POOL_NAME): { 'capacityAvailableForVolumeAllocationInKb': 5000000, 'capacityLimitInKb': 16000000, 'spareCapacityInKb': 6000000, 'thickCapacityInUseInKb': 266, 'snapCapacityInUseInKb': 266, property: 0, }, }, 'instances/Volume::{}/action/setVolumeName'.format( self.volume['provider_id']): self.new_volume['provider_id'], 'instances/Volume::{}/action/setVolumeName'.format( self.new_volume['provider_id']): self.volume['provider_id'], 'version': '"{}"'.format('2.0.1'), } } def test_get_volume_stats_with_varying_properties(self): """Test getting volume stats with various property names In SIO 3.0, a property was renamed. The change is backwards compatible for now but this tests ensures that the driver is tolerant of that change """ self.driver.storage_pools = self.STORAGE_POOLS self._setup_valid_variant_property("thinCapacityAllocatedInKb") self.set_https_response_mode(self.RESPONSE_MODE.ValidVariant) self.driver.get_volume_stats(True) self._setup_valid_variant_property("nonexistentProperty") self.set_https_response_mode(self.RESPONSE_MODE.ValidVariant) self.driver.get_volume_stats(True) @mock.patch( 'cinder.volume.drivers.dell_emc.powerflex.rest_client.RestClient.' 'rename_volume', return_value=None) def test_update_migrated_volume(self, mock_rename): test_vol = self.driver.update_migrated_volume( self.ctx, self.volume, self.new_volume, 'available') mock_rename.assert_called_with(self.new_volume, self.volume['id']) self.assertEqual({'_name_id': None, 'provider_location': None}, test_vol) @mock.patch( 'cinder.volume.drivers.dell_emc.powerflex.rest_client.RestClient.' 'rename_volume', return_value=None) def test_update_unavailable_migrated_volume(self, mock_rename): test_vol = self.driver.update_migrated_volume( self.ctx, self.volume, self.new_volume, 'unavailable') self.assertFalse(mock_rename.called) self.assertEqual({'_name_id': fake.VOLUME_ID, 'provider_location': None}, test_vol) @mock.patch( 'cinder.volume.drivers.dell_emc.powerflex.rest_client.RestClient.' 'rename_volume', side_effect=exception.VolumeBackendAPIException(data='Error!')) def test_fail_update_migrated_volume(self, mock_rename): self.assertRaises( exception.VolumeBackendAPIException, self.driver.update_migrated_volume, self.ctx, self.volume, self.new_volume, 'available' ) mock_rename.assert_called_with(self.volume, "ff" + self.volume['id']) def test_rename_volume(self): rc = self.driver.primary_client.rename_volume( self.volume, self.new_volume['id']) self.assertIsNone(rc) def test_rename_volume_illegal_syntax(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) rc = self.driver.primary_client.rename_volume( self.volume, self.new_volume['id']) self.assertIsNone(rc) def test_rename_volume_non_sio(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) rc = self.driver.primary_client.rename_volume( self.volume, self.new_volume['id']) self.assertIsNone(rc) def test_default_provisioning_type_unspecified(self): empty_storage_type = {} provisioning, compression = ( self.driver._get_provisioning_and_compression( empty_storage_type, self.PROT_DOMAIN_NAME, self.STORAGE_POOL_NAME) ) self.assertEqual('ThinProvisioned', provisioning) @ddt.data((True, 'ThinProvisioned'), (False, 'ThickProvisioned')) @ddt.unpack def test_default_provisioning_type_thin(self, config_provisioning_type, expected_provisioning_type): self.override_config('san_thin_provision', config_provisioning_type, configuration.SHARED_CONF_GROUP) self.driver = mocks.PowerFlexDriver(configuration=self.configuration) self.driver.do_setup({}) self.driver.primary_client = mocks.PowerFlexClient(self.configuration) self.driver.primary_client.do_setup() empty_storage_type = {} provisioning, compression = ( self.driver._get_provisioning_and_compression( empty_storage_type, self.PROT_DOMAIN_NAME, self.STORAGE_POOL_NAME) ) self.assertEqual(expected_provisioning_type, provisioning) @mock.patch('cinder.volume.drivers.dell_emc.powerflex.rest_client.' 'RestClient.query_rest_api_version', return_value="3.0") def test_get_volume_stats_v3(self, mock_version): self.driver.storage_pools = self.STORAGE_POOLS zero_data = { 'types/StoragePool/instances/action/querySelectedStatistics': mocks.MockHTTPSResponse(content=json.dumps( {'"{}"'.format(self.STORAGE_POOL_NAME): { 'snapCapacityInUseInKb': 0, 'thickCapacityInUseInKb': 0, 'netCapacityInUseInKb': 0, 'netUnusedCapacityInKb': 0, 'thinCapacityAllocatedInKb': 0} } )) } with self.custom_response_mode(**zero_data): stats = self.driver.get_volume_stats(True) for s in ["total_capacity_gb", "free_capacity_gb", "provisioned_capacity_gb"]: self.assertEqual(0, stats[s]) data = { 'types/StoragePool/instances/action/querySelectedStatistics': mocks.MockHTTPSResponse(content=json.dumps( {'"{}"'.format(self.STORAGE_POOL_NAME): { 'snapCapacityInUseInKb': 2097152, 'thickCapacityInUseInKb': 67108864, 'netCapacityInUseInKb': 34578432, 'netUnusedCapacityInKb': 102417408, 'thinCapacityAllocatedInKb': 218103808} } )) } with self.custom_response_mode(**data): stats = self.driver.get_volume_stats(True) self.assertEqual(130, stats['total_capacity_gb']) self.assertEqual(97, stats['free_capacity_gb']) self.assertEqual(137, stats['provisioned_capacity_gb']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_powerflex_client.py0000664000175000017500000004335400000000000032213 0ustar00zuulzuul00000000000000# Copyright (c) 2024 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http_client import json import pathlib from unittest import mock import requests.exceptions from requests.models import Response from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.dell_emc.powerflex import driver from cinder.volume.drivers.dell_emc.powerflex import rest_client class TestPowerFlexClient(test.TestCase): params = {'protectionDomainId': '1', 'storagePoolId': '1', 'name': 'HlF355XlSg+xcORfS0afag==', 'volumeType': 'ThinProvisioned', 'volumeSizeInKb': '1048576', 'compressionMethod': 'None'} expected_status_code = 500 status_code_ok = mock.Mock(status_code=http_client.OK) status_code_bad = mock.Mock(status_code=http_client.BAD_REQUEST) response_error = {"errorCode": "123", "message": "Error message"} def setUp(self): super(TestPowerFlexClient, self).setUp() self.configuration = conf.Configuration(driver.powerflex_opts, conf.SHARED_CONF_GROUP) self._set_overrides() self.client = rest_client.RestClient(self.configuration) self.client.do_setup() self.mockup_file_base = ( str(pathlib.Path.cwd()) + "/cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/" ) self.sdc_id = "01f7117d0000000b" self.sdc_guid = "028888FA-502A-4FAC-A888-1FA3B256358C" self.volume_id = "3bd1f78800000019" def _set_overrides(self): # Override the defaults to fake values self.override_config('san_ip', override='127.0.0.1', group=conf.SHARED_CONF_GROUP) self.override_config('powerflex_rest_server_port', override='8888', group=conf.SHARED_CONF_GROUP) self.override_config('san_login', override='test', group=conf.SHARED_CONF_GROUP) self.override_config('san_password', override='pass', group=conf.SHARED_CONF_GROUP) self.override_config('powerflex_storage_pools', override='PD1:SP1', group=conf.SHARED_CONF_GROUP) self.override_config('max_over_subscription_ratio', override=5.0, group=conf.SHARED_CONF_GROUP) self.override_config('powerflex_server_api_version', override='2.0.0', group=conf.SHARED_CONF_GROUP) self.override_config('rest_api_connect_timeout', override=120, group=conf.SHARED_CONF_GROUP) self.override_config('rest_api_read_timeout', override=120, group=conf.SHARED_CONF_GROUP) @mock.patch("requests.get") def test_rest_get_request_connect_timeout_exception(self, mock_request): mock_request.side_effect = (requests. exceptions.ConnectTimeout ('Fake Connect Timeout Exception')) r, res = (self.client. execute_powerflex_get_request(url="/version", **{})) self.assertEqual(self.expected_status_code, r.status_code) self.assertEqual(self.expected_status_code, res['errorCode']) (self.assertEqual ('The request to URL /version failed with timeout exception ' 'Fake Connect Timeout Exception', res['message'])) @mock.patch("requests.get") def test_rest_get_request_read_timeout_exception(self, mock_request): mock_request.side_effect = (requests.exceptions.ReadTimeout ('Fake Read Timeout Exception')) r, res = (self.client. execute_powerflex_get_request(url="/version", **{})) self.assertEqual(self.expected_status_code, r.status_code) self.assertEqual(self.expected_status_code, res['errorCode']) (self.assertEqual ('The request to URL /version failed with timeout exception ' 'Fake Read Timeout Exception', res['message'])) @mock.patch("requests.post") def test_rest_post_request_connect_timeout_exception(self, mock_request): mock_request.side_effect = (requests.exceptions.ConnectTimeout ('Fake Connect Timeout Exception')) r, res = (self.client.execute_powerflex_post_request (url="/types/Volume/instances", params=self.params, **{})) self.assertEqual(self.expected_status_code, r.status_code) self.assertEqual(self.expected_status_code, res['errorCode']) (self.assertEqual ('The request to URL /types/Volume/instances failed with ' 'timeout exception Fake Connect Timeout Exception', res['message'])) @mock.patch("requests.post") def test_rest_post_request_read_timeout_exception(self, mock_request): mock_request.side_effect = (requests.exceptions.ReadTimeout ('Fake Read Timeout Exception')) r, res = (self.client.execute_powerflex_post_request (url="/types/Volume/instances", params=self.params, **{})) self.assertEqual(self.expected_status_code, r.status_code) self.assertEqual(self.expected_status_code, res['errorCode']) (self.assertEqual ('The request to URL /types/Volume/instances failed with ' 'timeout exception Fake Read Timeout Exception', res['message'])) @mock.patch("requests.get") def test_response_check_read_timeout_exception_1(self, mock_request): r = requests.Response r.status_code = http_client.UNAUTHORIZED mock_request.side_effect = [r, (requests.exceptions.ReadTimeout ('Fake Read Timeout Exception'))] r, res = (self.client. execute_powerflex_get_request(url="/version", **{})) self.assertEqual(self.expected_status_code, r.status_code) self.assertEqual(self.expected_status_code, res['errorCode']) (self.assertEqual ('The request to URL /version failed with ' 'timeout exception Fake Read Timeout Exception', res['message'])) @mock.patch("requests.get") def test_response_check_read_timeout_exception_2(self, mock_request): res1 = requests.Response res1.status_code = http_client.UNAUTHORIZED res2 = Response() res2.status_code = 200 res2._content = str.encode(json.dumps('faketoken')) mock_request.side_effect = [res1, res2, (requests.exceptions.ReadTimeout ('Fake Read Timeout Exception'))] r, res = (self.client. execute_powerflex_get_request(url="/version", **{})) self.assertEqual(self.expected_status_code, r.status_code) self.assertEqual(self.expected_status_code, res['errorCode']) (self.assertEqual ('The request to URL /version failed with ' 'timeout exception Fake Read Timeout Exception', res['message'])) @mock.patch("requests.post") @mock.patch("requests.get") def test_response_check_read_timeout_exception_3(self, mock_post_request, mock_get_request): r = requests.Response r.status_code = http_client.UNAUTHORIZED mock_post_request.side_effect = r mock_get_request.side_effect = (requests.exceptions.ReadTimeout ('Fake Read Timeout Exception')) r, res = (self.client.execute_powerflex_post_request (url="/types/Volume/instances", params=self.params, **{})) self.assertEqual(self.expected_status_code, r.status_code) self.assertEqual(self.expected_status_code, res['errorCode']) (self.assertEqual ('The request to URL /types/Volume/instances failed with ' 'timeout exception Fake Read Timeout Exception', res['message'])) def _getJsonFile(self, filename): f = open(self.mockup_file_base + filename) data = json.load(f) f.close() return data def test_query_sdc_id_by_guid_valid(self): response = self._getJsonFile("query_sdc_instances_response.json") with mock.patch.object(self.client, 'execute_powerflex_get_request', return_value=(self.status_code_ok, response)): result = self.client.query_sdc_id_by_guid(self.sdc_guid) self.assertEqual(result, self.sdc_id) self.client.execute_powerflex_get_request.assert_called_with( '/types/Sdc/instances' ) def test_query_sdc_id_by_guid_invalid(self): response = self._getJsonFile("query_sdc_instances_response.json") with mock.patch.object(self.client, 'execute_powerflex_get_request', return_value=(self.status_code_ok, response)): ex = self.assertRaises(exception.VolumeBackendAPIException, self.client.query_sdc_id_by_guid, "invalid_guid") self.assertIn( "Failed to query SDC by guid invalid_guid: Not Found.", ex.msg) def test_query_sdc_id_by_guid_exception(self): with mock.patch.object(self.client, 'execute_powerflex_get_request', return_value=(self.status_code_bad, self.response_error)): ex = self.assertRaises(exception.VolumeBackendAPIException, self.client.query_sdc_id_by_guid, self.sdc_guid) self.assertIn( "Failed to query SDC: Error message.", ex.msg) def test_query_sdc_by_id_success(self): response = self._getJsonFile("query_sdc_by_id_response.json") with mock.patch.object(self.client, 'execute_powerflex_get_request', return_value=(self.status_code_ok, response)): result = self.client.query_sdc_by_id(self.sdc_id) self.assertEqual(result, response) self.client.execute_powerflex_get_request.assert_called_with( '/instances/Sdc::%(sdc_id)s', sdc_id=self.sdc_id ) def test_query_sdc_by_id_failure(self): host_id = "invalid_id" with mock.patch.object(self.client, 'execute_powerflex_get_request', return_value=(self.status_code_bad, self.response_error)): ex = self.assertRaises(exception.VolumeBackendAPIException, self.client.query_sdc_by_id, host_id) self.assertIn( f"Failed to query SDC id {host_id}: Error message.", ex.msg) def test_map_volume_success(self): with mock.patch.object(self.client, 'execute_powerflex_post_request', return_value=(self.status_code_ok, {})): self.client.map_volume(self.volume_id, self.sdc_id) self.client.execute_powerflex_post_request.assert_called_with( f"/instances/Volume::{self.volume_id}/action/addMappedSdc", {"sdcId": self.sdc_id, "allowMultipleMappings": "True"}, ) def test_map_volume_failure(self): with mock.patch.object(self.client, 'execute_powerflex_post_request', return_value=(self.status_code_bad, self.response_error)): ex = self.assertRaises(exception.VolumeBackendAPIException, self.client.map_volume, self.volume_id, self.sdc_id) self.assertIn( ("Failed to map volume %(vol_id)s to SDC %(sdc_id)s" % {"vol_id": self.volume_id, "sdc_id": self.sdc_id}), ex.msg) def test_unmap_volume_success(self): with mock.patch.object(self.client, 'execute_powerflex_post_request', return_value=(self.status_code_ok, {})): self.client.unmap_volume(self.volume_id, self.sdc_id) self.client.execute_powerflex_post_request.assert_called_with( f"/instances/Volume::{self.volume_id}/action/removeMappedSdc", {"sdcId": self.sdc_id} ) def test_unmap_volume_host_none_success(self): with mock.patch.object(self.client, '_unmap_volume_from_all_sdcs', return_value=None): self.client.unmap_volume(self.volume_id) self.client._unmap_volume_from_all_sdcs.assert_called_with( self.volume_id, ) def test_unmap_volume_failure(self): with mock.patch.object(self.client, 'execute_powerflex_post_request', return_value=(self.status_code_bad, self.response_error)): ex = self.assertRaises(exception.VolumeBackendAPIException, self.client.unmap_volume, self.volume_id, self.sdc_id) self.assertIn( ("Failed to unmap volume %(vol_id)s from SDC %(host_id)s" % {"vol_id": self.volume_id, "host_id": self.sdc_id}), ex.msg) def test_query_sdc_volumes_success(self): response = self._getJsonFile("query_sdc_volumes_response.json") with mock.patch.object(self.client, 'execute_powerflex_get_request', return_value=(self.status_code_ok, response)): result = self.client.query_sdc_volumes(self.sdc_id) self.assertEqual(result, ['694a2d140000000b', '694a2d1300000009']) self.client.execute_powerflex_get_request.assert_called_with( f'/instances/Sdc::{self.sdc_id}/relationships/Volume' ) def test_query_sdc_volumes_failure(self): host_id = "invalid_id" with mock.patch.object(self.client, 'execute_powerflex_get_request', return_value=(self.status_code_bad, self.response_error)): ex = self.assertRaises(exception.VolumeBackendAPIException, self.client.query_sdc_volumes, host_id) self.assertIn( "Failed to query SDC volumes: Error message.", ex.msg) def test_set_sdc_limits_bandwith(self): bandwidth_limit = 100 with mock.patch.object(self.client, 'execute_powerflex_post_request', return_value=(self.status_code_ok, {})): self.client.set_sdc_limits( self.volume_id, self.sdc_id, bandwidth_limit=bandwidth_limit) url = ("/instances/Volume::%(vol_id)s/action/" "setMappedSdcLimits" % {'vol_id': self.volume_id}) params = {'sdcId': self.sdc_id, 'bandwidthLimitInKbps': bandwidth_limit} self.client.execute_powerflex_post_request.assert_called_once_with( url, params) def test_set_sdc_limits_iops(self): iops_limit = 10000 with mock.patch.object(self.client, 'execute_powerflex_post_request', return_value=(self.status_code_ok, {})): self.client.set_sdc_limits( self.volume_id, self.sdc_id, iops_limit=iops_limit) url = ("/instances/Volume::%(vol_id)s/action/" "setMappedSdcLimits" % {'vol_id': self.volume_id}) params = {'sdcId': self.sdc_id, 'iopsLimit': iops_limit} self.client.execute_powerflex_post_request.assert_called_once_with( url, params) def test_set_sdc_limits_failure(self): with mock.patch.object(self.client, 'execute_powerflex_post_request', return_value=(self.status_code_bad, self.response_error)): ex = self.assertRaises(exception.VolumeBackendAPIException, self.client.set_sdc_limits, self.volume_id, self.sdc_id) self.assertIn( "Failed to set SDC limits: Error message.", ex.msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_replication.py0000664000175000017500000001226000000000000031143 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ddt from cinder import exception from cinder.tests.unit.volume.drivers.dell_emc import powerflex from cinder.volume import configuration @ddt.ddt class TestReplication(powerflex.TestPowerFlexDriver): """Test cases for PowerFlex replication support.""" def setUp(self): super(TestReplication, self).setUp() self.replication_backend_id = 'powerflex_repl' replication_device = [ { 'backend_id': self.replication_backend_id, 'san_ip': '127.0.0.2', 'san_login': 'test', 'san_password': 'pass' } ] self.override_config('replication_device', override=replication_device, group=configuration.SHARED_CONF_GROUP) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Domain/instances/getByName::' + self.PROT_DOMAIN_NAME: '"{}"'.format(self.PROT_DOMAIN_ID), 'types/Pool/instances/getByName::{},{}'.format( self.PROT_DOMAIN_ID, self.STORAGE_POOL_NAME): '"{}"'.format(self.STORAGE_POOL_ID), 'instances/ProtectionDomain::{}'.format(self.PROT_DOMAIN_ID): {'id': self.PROT_DOMAIN_ID}, 'instances/StoragePool::{}'.format(self.STORAGE_POOL_ID): {'id': self.STORAGE_POOL_ID, 'zeroPaddingEnabled': True}, }, } def test_do_setup_replication_configured(self): super(powerflex.mocks.PowerFlexDriver, self.driver).do_setup({}) self.driver.check_for_setup_error() self.assertTrue(self.driver.secondary_client.is_configured) self.assertTrue(self.driver.replication_enabled) @ddt.data( [ { 'backend_id': 'powerflex_repl1' }, { 'backend_id': 'powerflex_repl2' } ], [ { 'backend_id': 'powerflex_repl1', 'san_ip': '127.0.0.2' }, ] ) def test_do_setup_replication_bad_configuration(self, replication_device): self.override_config('replication_device', override=replication_device, group=configuration.SHARED_CONF_GROUP) self.assertRaises(exception.InvalidInput, super(powerflex.mocks.PowerFlexDriver, self.driver).do_setup, {}) def test_do_setup_already_failed_over(self): self.driver.active_backend_id = 'powerflex_repl' super(powerflex.mocks.PowerFlexDriver, self.driver).do_setup({}) self.driver.check_for_setup_error() self.assertFalse(self.driver.replication_enabled) def test_failover_host(self): self.test_do_setup_replication_configured() self.driver.failover_host({}, [], self.replication_backend_id) self.assertEqual(self.replication_backend_id, self.driver.active_backend_id) def test_failover_host_failback(self): self.test_do_setup_already_failed_over() self.driver.failover_host({}, [], 'default') self.assertEqual('default', self.driver.active_backend_id) @ddt.data("not_valid_target", None) def test_failover_host_secondary_id_invalid(self, secondary_id): self.test_do_setup_replication_configured() self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_host, context={}, volumes=[], secondary_id=secondary_id) def test_failover_aa(self): self.test_do_setup_replication_configured() self.driver.failover({}, [], self.replication_backend_id) self.driver.failover_completed({}, "failed over") self.assertEqual(self.replication_backend_id, self.driver.active_backend_id) def test_failback_aa(self): self.test_do_setup_already_failed_over() self.driver.failover({}, [], 'default') self.driver.failover_completed({}) self.assertEqual('default', self.driver.active_backend_id) def test_failover_completed_invalid(self): self.test_do_setup_replication_configured() self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_completed, context={}, active_backend_id="not_valid_target") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_revert_volume_to_snapshot.py0000664000175000017500000000777400000000000034167 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex class TestRevertVolume(powerflex.TestPowerFlexDriver): """Test cases for ``PowerFlexDriver.revert_to_snapshot()``""" def setUp(self): """Setup a test case environment. Creates a fake volume object and sets up the required API responses. """ super(TestRevertVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) host = 'host@backend#{}:{}'.format( self.PROT_DOMAIN_NAME, self.STORAGE_POOL_NAME) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID, 'host': host, 'volume_type_id': fake.VOLUME_TYPE_ID, 'size': 8}) self.snapshot = fake_snapshot.fake_snapshot_obj( ctx, **{'volume_id': self.volume.id, 'volume_size': self.volume.size} ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'instances/Volume::{}/action/overwriteVolumeContent'.format( self.volume.provider_id ): {}, }, self.RESPONSE_MODE.Invalid: { 'version': "2.6", }, self.RESPONSE_MODE.BadStatus: { 'instances/Volume::{}/action/overwriteVolumeContent'.format( self.volume.provider_id ): self.BAD_STATUS_RESPONSE }, } self.volume_is_replicated_mock = self.mock_object( self.volume, 'is_replicated', return_value=False ) def test_revert_to_snapshot(self): self.driver.revert_to_snapshot(None, self.volume, self.snapshot) def test_revert_to_snapshot_badstatus_response(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.revert_to_snapshot, None, self.volume, self.snapshot) def test_revert_to_snapshot_use_generic(self): self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(NotImplementedError, self.driver.revert_to_snapshot, None, self.volume, self.snapshot) def test_revert_to_snapshot_replicated_volume(self): self.volume_is_replicated_mock.return_value = True self.assertRaisesRegex( exception.InvalidVolume, 'Reverting replicated volume is not allowed.', self.driver.revert_to_snapshot, None, self.volume, self.snapshot ) def test_revert_to_snapshot_size_not_equal(self): patched_volume = mock.MagicMock() patched_volume.id = self.volume.id patched_volume.size = 16 patched_volume.is_replicated.return_value = False self.assertRaisesRegex( exception.InvalidVolume, ('Volume %s size is not equal to snapshot %s size.' % (self.volume.id, self.snapshot.id)), self.driver.revert_to_snapshot, None, patched_volume, self.snapshot ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_sdc.py0000664000175000017500000002370500000000000027411 0ustar00zuulzuul00000000000000# Copyright (c) 2025 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder import context from cinder import exception from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerflex class DictToObject: def __init__(self, dictionary): for key, value in dictionary.items(): setattr(self, key, value) def get(self, key): return self.__dict__.get(key) @ddt.ddt class TestSDC(powerflex.TestPowerFlexDriver): def setUp(self): """Setup a test case environment.""" super(TestSDC, self).setUp() self.client_mock = mock.MagicMock() self.driver._get_client = mock.MagicMock(return_value=self.client_mock) self.connector = { "sdc_guid": "028888FA-502A-4FAC-A888-1FA3B256358C", "host": "hostname"} self.host_id = "13bf228a00010001" self.host = {"name": "hostname"} self.ctx = ( context.RequestContext('fake', 'fake', True, auth_token=True)) self.attachment1 = DictToObject(fake_volume.fake_db_volume_attachment( **{ 'attach_status': 'attached', 'attached_host': self.host['name'] } )) self.attachment2 = DictToObject(fake_volume.fake_db_volume_attachment( **{ 'attach_status': 'attached', 'attached_host': self.host['name'] } )) self.volume = fake_volume.fake_volume_obj( self.ctx, **{'provider_id': '3bd1f78800000019', 'size': 8}) def test_initialize_connection(self): self.driver._initialize_connection = mock.MagicMock() self.driver.initialize_connection(self.volume, self.connector) self.driver._initialize_connection.assert_called_once_with( self.volume, self.connector, self.volume.size) def test__initialize_connection(self): self.client_mock.query_sdc_id_by_guid.return_value = self.host_id self.driver._attach_volume_to_host = mock.MagicMock() self.driver._check_volume_mapped = mock.MagicMock() result = self.driver._initialize_connection( self.volume, self.connector, self.volume.size) self.assertEqual(result['driver_volume_type'], "scaleio") self.driver._attach_volume_to_host.assert_called_with( self.volume, self.host_id) self.driver._check_volume_mapped.assert_called_with( self.host_id, self.volume.provider_id) def test__initialize_connection_no_connector(self): self.assertRaises(exception.InvalidHost, self.driver._initialize_connection, self.volume, {}, self.volume.size) def test__attach_volume_to_host_success(self): self.client_mock.query_sdc_by_id.return_value = self.host self.client_mock.query_volume.return_value = { "mappedSdcInfo": [] } self.client_mock.map_volume.return_value = None self.driver._attach_volume_to_host(self.volume, self.host_id) self.client_mock.query_sdc_by_id.assert_called_once_with( self.host_id) self.client_mock.query_volume.assert_called_once_with( self.volume.provider_id) self.client_mock.map_volume.assert_called_once_with( self.volume.provider_id, self.host_id) def test__attach_volume_to_host_already_attached(self): self.client_mock.query_sdc_by_id.return_value = self.host self.client_mock.query_volume.return_value = { "mappedSdcInfo": [ { "sdcId": self.host_id } ] } self.driver._attach_volume_to_host(self.volume, self.host_id) self.client_mock.query_sdc_by_id.assert_called_once_with( self.host_id) self.client_mock.map_volume.assert_not_called() def test__check_volume_mapped_success(self): self.client_mock.query_sdc_volumes.return_value = [ 'vol1', 'vol2', self.volume.id] self.driver._check_volume_mapped(self.host_id, self.volume.id) self.client_mock.query_sdc_volumes.assert_called_once_with( self.host_id) def test__check_volume_mapped_fail(self): self.client_mock.query_sdc_volumes.return_value = [] self.assertRaises(exception.VolumeBackendAPIException, self.driver._check_volume_mapped, self.host_id, self.volume.id) def test__check_volume_mapped_with_retry(self): self.client_mock.query_sdc_volumes.side_effect = [ [], [self.volume.id] ] self.driver._check_volume_mapped('sdc_id', self.volume.id) self.assertEqual(self.client_mock.query_sdc_volumes.call_count, 2) def test_terminate_connection(self): self.driver._terminate_connection = mock.MagicMock() self.driver.terminate_connection(self.volume, self.connector) self.driver._terminate_connection.assert_called_once_with( self.volume, self.connector) def test__terminate_connection_success(self): self.client_mock.query_sdc_id_by_guid.return_value = self.host_id self.driver._detach_volume_from_host = mock.MagicMock( return_valure=None) self.driver._terminate_connection(self.volume, self.connector) self.client_mock.query_sdc_id_by_guid.assert_called_once_with( self.connector["sdc_guid"]) self.driver._detach_volume_from_host.assert_called_once_with( self.volume, self.host_id) def test__terminate_connection_no_connector(self): self.assertRaises(exception.InvalidHost, self.driver._terminate_connection, self.volume, {}) def test__terminate_connection_multiattached(self): self.driver._is_multiattached_to_host = mock.MagicMock( return_valure=False) self.driver._terminate_connection(self.volume, self.connector) self.client_mock.query_sdc_id_by_guid.assert_not_called() def test__is_multiattached_to_host_false(self): result = self.driver._is_multiattached_to_host( [self.attachment1], self.host['name']) self.assertFalse(result) def test__is_multiattached_to_host_true(self): result = self.driver._is_multiattached_to_host( [self.attachment1, self.attachment2], self.host['name']) self.assertTrue(result) @ddt.data("13bf228a00010001", None) def test__detach_volume_from_host_detached_1(self, host_id): self.client_mock.query_volume.return_value = { "mappedSdcInfo": [] } self.driver._detach_volume_from_host(self.volume, host_id) self.client_mock.unmap_volume.assert_not_called() def test__detach_volume_from_host_detached_2(self): self.client_mock.query_volume.return_value = { "mappedSdcInfo": [ { "sdcId": "fake_id" } ] } self.client_mock.query_sdc_by_id.return_value = self.host self.driver._detach_volume_from_host(self.volume, self.host_id) self.client_mock.query_sdc_by_id.assert_called_once_with(self.host_id) self.client_mock.unmap_volume.assert_not_called() def test__detach_volume_from_host_with_hostid(self): self.client_mock.query_volume.return_value = { "mappedSdcInfo": [ { "sdcId": self.host_id } ] } self.client_mock.query_sdc_by_id.return_value = self.host self.driver._detach_volume_from_host(self.volume, self.host_id) self.client_mock.query_sdc_by_id.assert_called_once_with(self.host_id) self.client_mock.unmap_volume.assert_called_once_with( self.volume.provider_id, self.host_id) def test__detach_volume_from_host_without_hostid(self): self.client_mock.query_volume.return_value = { "mappedSdcInfo": [ { "sdcId": self.host_id } ] } self.client_mock.query_sdc_by_id.return_value = self.host self.driver._detach_volume_from_host(self.volume) self.client_mock.query_sdc_by_id.assert_not_called() self.client_mock.unmap_volume.assert_called_once_with( self.volume.provider_id) def test__check_volume_unmapped_success(self): self.client_mock.query_sdc_volumes.return_value = [] self.driver._check_volume_unmapped(self.host_id, self.volume.id) self.client_mock.query_sdc_volumes.assert_called_once_with( self.host_id) def test__check_volume_unmapped_fail(self): self.client_mock.query_sdc_volumes.return_value = [ 'vol1', 'vol2', self.volume.id] self.assertRaises(exception.VolumeBackendAPIException, self.driver._check_volume_unmapped, self.host_id, self.volume.id) def test__check_volume_unmapped_with_retry(self): self.client_mock.query_sdc_volumes.side_effect = [ [self.volume.id], [] ] self.driver._check_volume_unmapped('sdc_id', self.volume.id) self.assertEqual(self.client_mock.query_sdc_volumes.call_count, 2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_versions.py0000664000175000017500000000745400000000000030513 0ustar00zuulzuul00000000000000# Copyright (C) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import requests.exceptions from cinder import exception from cinder.tests.unit.volume.drivers.dell_emc import powerflex @ddt.ddt class TestMultipleVersions(powerflex.TestPowerFlexDriver): version = '1.2.3.4' good_versions = ['1.2.3.4', '101.102.103.104.105.106.107', '1.0' ] bad_versions = ['bad', 'bad.version.number', '1.0b', '.6' ] # Test cases for ``PowerFlexDriver._get_server_api_version()`` def setUp(self): """Setup a test case environment.""" super(TestMultipleVersions, self).setUp() self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'version': '"{}"'.format(self.version), }, self.RESPONSE_MODE.Invalid: { 'version': None, }, self.RESPONSE_MODE.BadStatus: { 'version': self.BAD_STATUS_RESPONSE, }, } def test_version_api_fails(self): """version api returns a non-200 response.""" self.set_https_response_mode(self.RESPONSE_MODE.Invalid) self.assertRaises(exception.VolumeBackendAPIException, self.test_version) def test_version(self): """Valid version request.""" self.driver.primary_client.query_rest_api_version(False) def test_version_badstatus_response(self): """Version api returns a bad response.""" self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.test_version) def setup_response(self): self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'version': '"{}"'.format(self.version), }, } def test_version_badversions(self): """Version api returns an invalid version number.""" for vers in self.bad_versions: self.version = vers self.setup_response() self.assertRaises(exception.VolumeBackendAPIException, self.test_version) def test_version_goodversions(self): """Version api returns a valid version number.""" for vers in self.good_versions: self.version = vers self.setup_response() self.driver.primary_client.query_rest_api_version(False) self.assertEqual( self.driver.primary_client.query_rest_api_version(False), vers ) @mock.patch("requests.get") def test_get_version_connect_timeout_request(self, mock_request): mock_request.side_effect = requests.exceptions.ConnectTimeout() self.assertRaises(exception.VolumeBackendAPIException, self.test_version) @mock.patch("requests.get") def test_get_version_read_timeout_request(self, mock_request): mock_request.side_effect = requests.exceptions.ReadTimeout() self.assertRaises(exception.VolumeBackendAPIException, self.test_version) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.25912 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/0000775000175000017500000000000000000000000025047 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/__init__.py0000664000175000017500000000000000000000000027146 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py0000664000175000017500000023156300000000000030266 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import random from cinder import context from cinder.objects import fields from cinder.objects import group from cinder.objects import group_snapshot from cinder.objects import volume_attachment from cinder.objects import volume_type from cinder.tests.unit import fake_group from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.volume.drivers.dell_emc.powermax import utils CINDER_EMC_CONFIG_DIR = '/etc/cinder/' class PowerMaxData(object): # array info array = '000197800123' uni_array = u'000197800123' array_herc = '000197900123' array_model = 'PowerMax_8000' srp = 'SRP_1' slo = 'Diamond' slo_diamond = 'Diamond' slo_silver = 'Silver' workload = 'DSS' port_group_name_f = 'OS-fibre-PG' port_group_name_i = 'OS-iscsi-PG' port_group_name_nt = 'OS-nvme-tcp-PG' masking_view_name_f = 'OS-HostX-F-OS-fibre-PG-MV' masking_view_name_nt = 'OS-HostX-NT-VME-PG1558b4-MV' masking_view_name_Y_f = 'OS-HostY-F-OS-fibre-PG-MV' masking_view_name_i = 'OS-HostX-SRP_1-I-OS-iscsi-PG-MV' initiatorgroup_name_f = 'OS-HostX-F-IG' initiatorgroup_name_i = 'OS-HostX-I-IG' parent_sg_f = 'OS-HostX-F-OS-fibre-PG-SG' parent_sg_i = 'OS-HostX-I-OS-iscsi-PG-SG' storagegroup_name_f = 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG' storagegroup_name_i = 'OS-HostX-SRP_1-Diamond-DSS-OS-iscsi-PG' defaultstoragegroup_name = 'OS-SRP_1-Diamond-DSS-SG' storagegroup_list = [defaultstoragegroup_name] default_sg_no_slo = 'OS-no_SLO-SG' default_sg_compr_disabled = 'OS-SRP_1-Diamond-DSS-CD-SG' default_sg_re_enabled = 'OS-SRP_1-Diamond-DSS-RE-SG' default_sg_no_slo_re_enabled = 'OS-SRP_1-Diamond-NONE-RE-SG' failed_resource = 'OS-failed-resource' fake_host = 'HostX@Backend#Diamond+DSS+SRP_1+000197800123' new_host = 'HostX@Backend#Silver+OLTP+SRP_1+000197800123' none_host = 'HostX@Backend#Diamond+None+SRP_1+000197800123' version = '3.1.0' volume_wwn = '600000345' remote_array = '000197800124' device_id = '00001' device_id2 = '00002' device_id3 = '00003' device_id4 = '00004' rdf_group_name_1 = '23_24_007' rdf_group_name_2 = '23_24_008' rdf_group_name_3 = '23_24_009' rdf_group_name_4 = '23_24_010' rdf_group_no_1 = '70' rdf_group_no_2 = '71' rdf_group_no_3 = '72' rdf_group_no_4 = '73' u4p_version = '92' u4p_100_endpoint = '100' storagegroup_name_source = 'Grp_source_sg' storagegroup_name_target = 'Grp_target_sg' group_snapshot_name = 'Grp_snapshot' target_group_name = 'Grp_target' storagegroup_name_with_id = 'GrpId_group_name' rdf_managed_async_grp = 'OS-%s-Asynchronous-rdf-sg' % rdf_group_name_1 default_sg_re_managed_list = [default_sg_re_enabled, rdf_managed_async_grp] volume_id = '2b06255d-f5f0-4520-a953-b029196add6a' no_slo_sg_name = 'OS-HostX-No_SLO-OS-fibre-PG' temp_snapvx = 'temp-00001-snapshot_for_clone' next_gen_ucode = 5978 gvg_group_id = 'test-gvg' sg_tags = 'production,test' snap_id = 118749976833 snap_id_2 = 118749976834 # connector info wwpn1 = '123456789012345' wwpn2 = '123456789054321' wwnn1 = '223456789012345' wwnn2 = '223456789012346' initiator = 'iqn.1993-08.org.debian:01:222' iscsi_dir = 'SE-4E' iscsi_port = '1' ip, ip2 = '123.456.7.8', '123.456.7.9' iqn = 'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001' iqn2 = 'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000002' nvme_tcp_hostid = '0eaf7037-479c-432b-a3d8-62e2889d768e' nqn = ('nqn.2014-08.org.nvmexpress:uuid:' 'ac353d72-eabe-43c7-926c-f08987a8a553') connector = {'ip': ip, 'initiator': initiator, 'wwpns': [wwpn1, wwpn2], 'wwnns': [wwnn1], 'host': 'HostX', 'nvme_hostid': nvme_tcp_hostid, 'nqn': nqn} connector_without_host_id = {'ip': ip, 'initiator': initiator, 'wwpns': [wwpn1, wwpn2], 'wwnns': [wwnn1], 'host': 'HostX', 'nqn': nqn} fabric_name_prefix = 'fakeFabric' end_point_map = {connector['wwpns'][0]: [wwpn1], connector['wwpns'][1]: [wwpn1]} target_wwns = [wwpn1] target_wwns_multi = [wwnn1, wwnn2] zoning_mappings = { 'array': u'000197800123', 'init_targ_map': end_point_map, 'initiator_group': initiatorgroup_name_f, 'port_group': port_group_name_f, 'target_wwns': target_wwns} zoning_mappings_metro = deepcopy(zoning_mappings) zoning_mappings_metro.update({'metro_port_group': port_group_name_f, 'metro_ig': initiatorgroup_name_f, 'metro_array': remote_array}) device_map = {} for wwn in connector['wwpns']: fabric_name = ''.join([fabric_name_prefix, wwn[-2:]]) target_wwn = wwn[::-1] fabric_map = {'initiator_port_wwn_list': [wwn], 'target_port_wwn_list': [target_wwn] } device_map[fabric_name] = fabric_map iscsi_dir_port = '%(dir)s:%(port)s' % {'dir': iscsi_dir, 'port': iscsi_port} iscsi_dir_virtual_port = '%(dir)s:%(port)s' % {'dir': iscsi_dir, 'port': '000'} iscsi_device_info = {'maskingview': masking_view_name_i, 'ip_and_iqn': [{'ip': ip, 'iqn': initiator, 'physical_port': iscsi_dir_port}], 'is_multipath': True, 'array': array, 'controller': {'host': '10.00.00.00'}, 'hostlunid': 3, 'device_id': device_id} iscsi_device_info_metro = deepcopy(iscsi_device_info) iscsi_device_info_metro['metro_ip_and_iqn'] = [{ 'ip': ip2, 'iqn': iqn2, 'physical_port': iscsi_dir_port}] iscsi_device_info_metro['metro_hostlunid'] = 2 fc_device_info = {'maskingview': masking_view_name_f, 'array': array, 'controller': {'host': '10.00.00.00'}, 'hostlunid': 3} nvme_tcp_device_info = {'array': array, 'device_id': '0027C', 'hostlunid': 1, 'ips': ['172.16.22.1', '172.16.22.2'], 'maskingview': masking_view_name_nt, 'target_nqn': 'nqn.1988-11.com.dell:' 'PowerMax_2500:00:000120001602'} director_port_keys_empty = {'symmetrixPortKey': []} director_port_keys_multiple = {'symmetrixPortKey': [ {'directorId': 'SE-1E', 'portId': '1'}, {'directorId': 'SE-1E', 'portId': '2'}]} # snapshot info snapshot_id = '390eeb4d-0f56-4a02-ba14-167167967014' snapshot_display_id = 'my_snap' managed_snap_id = 'OS-390eeb4d-0f56-4a02-ba14-167167967014' test_snapshot_snap_name = 'OS-' + snapshot_id[:6] + snapshot_id[-9:] snap_location = {'snap_name': test_snapshot_snap_name, 'source_id': device_id} # cinder volume info ctx = context.RequestContext('admin', 'fake', True) provider_location = {'array': array, 'device_id': device_id} provider_location2 = {'array': str(array), 'device_id': device_id2} provider_location3 = {'array': str(remote_array), 'device_id': device_id2} provider_location4 = {'array': str(uni_array), 'device_id': device_id} provider_location_clone = {'array': array, 'device_id': device_id, 'snap_name': temp_snapvx, 'source_device_id': device_id} provider_location_snapshot = {'array': array, 'device_id': device_id, 'snap_name': test_snapshot_snap_name, 'source_device_id': device_id} provider_location5 = {'array': remote_array, 'device_id': device_id} replication_update = ( {'replication_status': 'enabled', 'replication_driver_data': str( {'array': remote_array, 'device_id': device_id2})}) legacy_provider_location = { 'classname': 'Symm_StorageVolume', 'keybindings': {'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000197800123', 'DeviceID': device_id, 'SystemCreationClassName': u'Symm_StorageSystem'}} legacy_provider_location2 = { 'classname': 'Symm_StorageVolume', 'keybindings': {'CreationClassName': u'Symm_StorageVolume', 'SystemName': u'SYMMETRIX+000197800123', 'DeviceID': device_id2, 'SystemCreationClassName': u'Symm_StorageSystem'}} test_volume_type = fake_volume.fake_volume_type_obj( context=ctx ) test_volume = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, provider_location=str(provider_location), volume_type=test_volume_type, host=fake_host, replication_driver_data=str(provider_location3)) test_rep_volume = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, provider_location=str(provider_location), volume_type=test_volume_type, host=fake_host, replication_driver_data=str(provider_location3), replication_status=fields.ReplicationStatus.ENABLED) test_attached_volume = fake_volume.fake_volume_obj( id='4732de9b-98a4-4b6d-ae4b-3cafb3d34220', context=ctx, name='vol1', size=0, provider_auth=None, attach_status='attached', provider_location=str(provider_location), host=fake_host, volume_type=test_volume_type, replication_driver_data=str(provider_location3)) test_legacy_vol = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, provider_location=str(legacy_provider_location), replication_driver_data=str(legacy_provider_location2), host=fake_host, volume_type=test_volume_type) test_clone_volume = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, provider_location=str(provider_location2), host=fake_host, source_volid=test_volume.id, snapshot_id=snapshot_id, _name_id=test_volume.id) test_volume_snap_manage = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, display_name='vol1', provider_location=str(provider_location), volume_type=test_volume_type, host=fake_host, replication_driver_data=str(provider_location4)) test_snapshot = fake_snapshot.fake_snapshot_obj( context=ctx, id=snapshot_id, name='my_snap', size=2, provider_location=str(snap_location), host=fake_host, volume=test_volume) test_legacy_snapshot = fake_snapshot.fake_snapshot_obj( context=ctx, id=test_volume.id, name='my_snap', size=2, provider_location=str(legacy_provider_location), host=fake_host, volume=test_volume) test_failed_snap = fake_snapshot.fake_snapshot_obj( context=ctx, id='4732de9b-98a4-4b6d-ae4b-3cafb3d34220', name=failed_resource, size=2, provider_location=str(snap_location), host=fake_host, volume=test_volume) test_snapshot_manage = fake_snapshot.fake_snapshot_obj( context=ctx, id=snapshot_id, name='my_snap', size=2, provider_location=str(snap_location), host=fake_host, volume=test_volume_snap_manage, display_name='my_snap') test_volume_attachment = volume_attachment.VolumeAttachment( id='2b06255d-f5f0-4520-a953-b029196add6b', volume_id=test_volume.id, connector=connector, attached_host='HostX') location_info = {'location_info': '000197800123#SRP_1#Diamond#DSS', 'storage_protocol': 'FC'} test_host = {'capabilities': location_info, 'host': fake_host} # replication rep_backend_id_sync = 'rep_backend_id_sync' rep_backend_id_async = 'rep_backend_id_async' rep_backend_id_metro = 'rep_backend_id_metro' rep_backend_id_sync_2 = 'rep_backend_id_sync_2' rep_dev_1 = { utils.BACKEND_ID: rep_backend_id_sync, 'target_device_id': remote_array, 'remote_port_group': port_group_name_f, 'remote_pool': srp, 'rdf_group_label': rdf_group_name_1, 'mode': utils.REP_SYNC, 'allow_extend': True} rep_dev_2 = { utils.BACKEND_ID: rep_backend_id_async, 'target_device_id': remote_array, 'remote_port_group': port_group_name_f, 'remote_pool': srp, 'rdf_group_label': rdf_group_name_2, 'mode': utils.REP_ASYNC, 'allow_extend': True} rep_dev_3 = { utils.BACKEND_ID: rep_backend_id_metro, 'target_device_id': remote_array, 'remote_port_group': port_group_name_f, 'remote_pool': srp, 'rdf_group_label': rdf_group_name_3, 'mode': utils.REP_METRO, 'allow_extend': True} sync_rep_device = [rep_dev_1] async_rep_device = [rep_dev_2] metro_rep_device = [rep_dev_3] multi_rep_device = [rep_dev_1, rep_dev_2, rep_dev_3] rep_config_sync = { utils.BACKEND_ID: rep_backend_id_sync, 'array': remote_array, 'portgroup': port_group_name_f, 'srp': srp, 'rdf_group_label': rdf_group_name_1, 'mode': utils.REP_SYNC, 'allow_extend': True, 'sync_interval': 3, 'sync_retries': 200} rep_config_async = { utils.BACKEND_ID: rep_backend_id_async, 'array': remote_array, 'portgroup': port_group_name_f, 'srp': srp, 'rdf_group_label': rdf_group_name_2, 'mode': utils.REP_ASYNC, 'allow_extend': True, 'sync_interval': 3, 'sync_retries': 200} rep_config_metro = { utils.BACKEND_ID: rep_backend_id_metro, 'array': remote_array, 'portgroup': port_group_name_f, 'srp': srp, 'rdf_group_label': rdf_group_name_3, 'mode': utils.REP_METRO, 'allow_extend': True, 'sync_interval': 3, 'sync_retries': 200} rep_config_sync_2 = { utils.BACKEND_ID: rep_backend_id_sync_2, 'array': remote_array, 'portgroup': port_group_name_f, 'srp': srp, 'rdf_group_label': rdf_group_name_1, 'mode': utils.REP_SYNC, 'allow_extend': True, 'sync_interval': 3, 'sync_retries': 200} sync_rep_config_list = [rep_config_sync] async_rep_config_list = [rep_config_async] metro_rep_config_list = [rep_config_metro] multi_rep_config_list = [rep_config_sync, rep_config_async, rep_config_metro, rep_config_sync_2] # extra-specs vol_type_extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123'} vol_type_extra_specs_none_pool = { 'pool_name': u'None+NONE+SRP_1+000197800123'} vol_type_extra_specs_optimised_pool = { 'pool_name': u'Optimized+NONE+SRP_1+000197800123'} vol_type_extra_specs_next_gen_pool = { 'pool_name': u'Optimized+SRP_1+000197800123'} vol_type_extra_specs_compr_disabled = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'storagetype:disablecompression': 'true'} vol_type_extra_specs_rep_enabled = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'replication_enabled': ' True'} vol_type_extra_specs_rep_enabled_backend_id_sync = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'replication_enabled': ' True', utils.REPLICATION_DEVICE_BACKEND_ID: rep_backend_id_sync} vol_type_extra_specs_rep_enabled_backend_id_sync_2 = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'replication_enabled': ' True', utils.REPLICATION_DEVICE_BACKEND_ID: rep_backend_id_sync_2} vol_type_extra_specs_rep_enabled_backend_id_async = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'replication_enabled': ' True', utils.REPLICATION_DEVICE_BACKEND_ID: rep_backend_id_async} extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'slo': slo, 'workload': workload, 'srp': srp, 'array': array, 'interval': 3, 'retries': 120} extra_specs_no_workload = { 'pool_name': u'Diamond+NONE+SRP_1+000197800123', 'slo': slo, 'srp': srp, 'workload': 'NONE', 'array': array, 'interval': 3, 'retries': 120} extra_specs_no_pool_name = { 'slo': slo, 'workload': workload, 'srp': srp, 'array': array, 'interval': 3, 'retries': 120} extra_specs_optimized = { 'pool_name': u'Optimized+None+SRP_1+000197800123', 'slo': 'Optimized', 'workload': 'None', 'srp': srp, 'array': array, 'interval': 3, 'retries': 120} vol_type_extra_specs_tags = { 'storagetype:storagegrouptags': u'good, comma, separated,list'} vol_type_extra_specs_tags_bad = { 'storagetype:storagegrouptags': u'B&d, [list]'} extra_specs_port_group_template = deepcopy(extra_specs) extra_specs_port_group_template['port_group_template'] = 'portGroupName' extra_specs_migrate = deepcopy(extra_specs) extra_specs_migrate[utils.PORTGROUPNAME] = port_group_name_f extra_specs_disable_compression = deepcopy(extra_specs) extra_specs_disable_compression[utils.DISABLECOMPRESSION] = 'true' extra_specs_intervals_set = deepcopy(extra_specs) extra_specs_intervals_set['interval'] = 1 extra_specs_intervals_set['retries'] = 1 extra_specs_rep_enabled = deepcopy(extra_specs) extra_specs_rep_enabled['replication_enabled'] = True rep_extra_specs = deepcopy(extra_specs_rep_enabled) rep_extra_specs['array'] = remote_array rep_extra_specs['interval'] = 1 rep_extra_specs['retries'] = 1 rep_extra_specs['srp'] = srp rep_extra_specs['rep_mode'] = 'Synchronous' rep_extra_specs['sync_interval'] = 3 rep_extra_specs['sync_retries'] = 200 rep_extra_specs['rdf_group_label'] = rdf_group_name_1 rep_extra_specs['rdf_group_no'] = rdf_group_no_1 rep_extra_specs[utils.DISABLE_PROTECTED_SNAP] = False rep_extra_specs2 = deepcopy(rep_extra_specs) rep_extra_specs2[utils.PORTGROUPNAME] = port_group_name_f rep_extra_specs3 = deepcopy(rep_extra_specs) rep_extra_specs3['slo'] = slo rep_extra_specs3['workload'] = workload rep_extra_specs4 = deepcopy(rep_extra_specs3) rep_extra_specs4['rdf_group_label'] = rdf_group_name_1 rep_extra_specs5 = deepcopy(rep_extra_specs2) rep_extra_specs5['target_array_model'] = 'VMAX250F' rep_extra_specs5['sync_interval'] = 3 rep_extra_specs5['sync_retries'] = 200 rep_extra_specs6 = deepcopy(rep_extra_specs3) rep_extra_specs6['target_array_model'] = 'PMAX2000' rep_extra_specs_ode = deepcopy(rep_extra_specs2) rep_extra_specs_ode['array'] = array rep_extra_specs_ode.pop('rep_mode') rep_extra_specs_ode['mode'] = 'Metro' rep_extra_specs_legacy = deepcopy(rep_extra_specs_ode) rep_extra_specs_legacy['mode'] = 'Synchronous' rep_extra_specs_rep_config = deepcopy(rep_extra_specs6) rep_extra_specs_rep_config[utils.REP_CONFIG] = rep_config_sync rep_extra_specs_rep_config_metro = deepcopy(rep_extra_specs6) rep_extra_specs_rep_config_metro[utils.REP_CONFIG] = rep_config_metro rep_extra_specs_rep_config_metro[utils.REP_MODE] = utils.REP_METRO extra_specs_tags = deepcopy(extra_specs) extra_specs_tags.update({utils.STORAGE_GROUP_TAGS: sg_tags}) extra_specs_qos = deepcopy(extra_specs) qos_dict = { 'total_iops_sec': '4000', 'DistributionType': 'Always'} extra_specs_qos['qos'] = qos_dict rep_extra_specs_mgmt = deepcopy(rep_extra_specs) rep_extra_specs_mgmt['srp'] = srp rep_extra_specs_mgmt['mgmt_sg_name'] = rdf_managed_async_grp rep_extra_specs_mgmt['sg_name'] = default_sg_no_slo_re_enabled rep_extra_specs_mgmt['rdf_group_no'] = rdf_group_no_1 rep_extra_specs_mgmt['rdf_group_label'] = rdf_group_name_1 rep_extra_specs_mgmt['target_array_model'] = array_model rep_extra_specs_mgmt['slo'] = 'Diamond' rep_extra_specs_mgmt['workload'] = 'NONE' rep_extra_specs_mgmt['sync_interval'] = 2 rep_extra_specs_mgmt['sync_retries'] = 200 rep_extra_specs_metro = deepcopy(rep_extra_specs) rep_extra_specs_metro[utils.REP_MODE] = utils.REP_METRO rep_extra_specs_metro[utils.METROBIAS] = True rep_extra_specs_metro['replication_enabled'] = ' True' rep_extra_specs_async = deepcopy(rep_extra_specs) rep_extra_specs_async[utils.REP_MODE] = utils.REP_ASYNC rep_extra_specs_async[utils.METROBIAS] = True rep_extra_specs_async['replication_enabled'] = ' True' rep_config = { 'array': remote_array, 'srp': srp, 'portgroup': port_group_name_i, 'rdf_group_no': rdf_group_no_1, 'sync_retries': 200, 'sync_interval': 1, 'rdf_group_label': rdf_group_name_1, 'allow_extend': True, 'mode': utils.REP_METRO} ex_specs_rep_config = deepcopy(rep_extra_specs_metro) ex_specs_rep_config['array'] = array ex_specs_rep_config['rep_config'] = rep_config ex_specs_rep_config_no_extend = deepcopy(ex_specs_rep_config) ex_specs_rep_config_no_extend['rep_config']['allow_extend'] = False test_volume_type_1 = volume_type.VolumeType( id='2b06255d-f5f0-4520-a953-b029196add6a', name='abc', extra_specs=extra_specs) ex_specs_rep_config_sync = deepcopy(ex_specs_rep_config) ex_specs_rep_config_sync[utils.REP_MODE] = utils.REP_SYNC ex_specs_rep_config_sync[utils.REP_CONFIG]['mode'] = utils.REP_SYNC test_volume_type_list = volume_type.VolumeTypeList( objects=[test_volume_type_1]) test_vol_grp_name_id_only = 'ec870a2f-6bf7-4152-aa41-75aad8e2ea96' test_vol_grp_name = 'Grp_source_sg_%s' % test_vol_grp_name_id_only test_fo_vol_group = 'fo_vol_group_%s' % test_vol_grp_name_id_only test_group_1 = group.Group( context=None, name=storagegroup_name_source, group_id='abc', size=1, id=test_vol_grp_name_id_only, status='available', provider_auth=None, volume_type_ids=['abc'], group_type_id='grptypeid', volume_types=test_volume_type_list, host=fake_host, provider_location=str(provider_location)) test_group_failed = group.Group( context=None, name=failed_resource, group_id='14b8894e-54ec-450a-b168-c172a16ed166', size=1, id='318c721c-51ad-4160-bfe1-ebde2273836f', status='available', provider_auth=None, volume_type_ids=['abc'], group_type_id='grptypeid', volume_types=test_volume_type_list, host=fake_host, provider_location=str(provider_location), replication_status=fields.ReplicationStatus.DISABLED) test_rep_group = fake_group.fake_group_obj( context=ctx, name=storagegroup_name_source, id=test_vol_grp_name_id_only, host=fake_host, replication_status=fields.ReplicationStatus.ENABLED) test_rep_group2 = fake_group.fake_group_obj( context=ctx, replication_status=fields.ReplicationStatus.ENABLED) test_group = fake_group.fake_group_obj( context=ctx, name=storagegroup_name_source, id=test_vol_grp_name_id_only, host=fake_host) test_group_without_name = fake_group.fake_group_obj( context=ctx, name=None, id=test_vol_grp_name_id_only, host=fake_host) test_group_snapshot_1 = group_snapshot.GroupSnapshot( context=None, id='6560405d-b89a-4f79-9e81-ad1752f5a139', group_id='876d9fbb-de48-4948-9f82-15c913ed05e7', name=group_snapshot_name, group_type_id='c6934c26-dde8-4bf8-a765-82b3d0130e9f', status='available', group=test_group_1) test_group_snapshot_failed = group_snapshot.GroupSnapshot( context=None, id='0819dd5e-9aa1-4ec7-9dda-c78e51b2ad76', group_id='1fc735cb-d36c-4352-8aa6-dc1e16b5a0a7', name=failed_resource, group_type_id='6b70de13-98c5-46b2-8f24-e4e96a8988fa', status='available', group=test_group_failed) test_volume_group_member = fake_volume.fake_volume_obj( context=ctx, name='vol1', size=2, provider_auth=None, provider_location=str(provider_location), volume_type=test_volume_type, host=fake_host, replication_driver_data=str(provider_location3), group_id=test_vol_grp_name_id_only) # masking view dict masking_view_dict = { 'array': array, 'connector': connector, 'device_id': device_id, 'init_group_name': initiatorgroup_name_f, 'initiator_check': False, 'maskingview_name': masking_view_name_f, 'parent_sg_name': parent_sg_f, 'srp': srp, 'storagetype:disablecompression': False, utils.PORTGROUPNAME: port_group_name_f, 'slo': slo, 'storagegroup_name': storagegroup_name_f, 'volume_name': test_volume.name, 'workload': workload, 'replication_enabled': False, 'used_host_name': 'HostX', 'port_group_label': port_group_name_f} masking_view_dict_no_slo = deepcopy(masking_view_dict) masking_view_dict_no_slo.update( {'slo': None, 'workload': None, 'storagegroup_name': no_slo_sg_name}) masking_view_dict_compression_disabled = deepcopy(masking_view_dict) masking_view_dict_compression_disabled.update( {'storagetype:disablecompression': True, 'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-CD'}) masking_view_dict_replication_enabled = deepcopy(masking_view_dict) masking_view_dict_replication_enabled.update( {'replication_enabled': True, 'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE'}) masking_view_dict_multiattach = deepcopy(masking_view_dict) masking_view_dict_multiattach.update( {utils.EXTRA_SPECS: extra_specs, utils.IS_MULTIATTACH: True, utils.OTHER_PARENT_SG: parent_sg_i, utils.FAST_SG: storagegroup_name_i, utils.NO_SLO_SG: no_slo_sg_name}) masking_view_dict_tags = deepcopy(masking_view_dict) masking_view_dict_tags.update( {'tag_list': sg_tags}) # vmax data # sloprovisioning compression_info = {'symmetrixId': ['000197800128']} initiator_group_fc = { 'initiator': [wwpn1], 'hostId': initiatorgroup_name_f, 'maskingview': [masking_view_name_f]} initiator_group_iscsi = { 'initiator': [initiator], 'hostId': initiatorgroup_name_i, 'maskingview': [masking_view_name_i]} initiator_group_empty = { 'hostId': initiatorgroup_name_i, } initiator_group_list = [ initiator_group_fc, initiator_group_iscsi] nvme_tcp_initiator_list = ['OR-1C:001:nqn.2014-08.org.nvmexpress:' 'uuid:ac353d72-eabe-43c7-926c-f08987a8a553:' '0EAF7037479C432BA3D862E2889D768E', 'OR-2C:001:nqn.2014-08.org.nvmexpress:' 'uuid:ac353d72-eabe-43c7-926c-f08987a8a553:' '0EAF7037479C432BA3D862E2889D768E'] initiator_list = [{'host': initiatorgroup_name_f, 'initiatorId': wwpn1, 'maskingview': [masking_view_name_f]}, {'host': initiatorgroup_name_i, 'initiatorId': initiator, 'maskingview': [masking_view_name_i]}, {'initiatorId': [ 'FA-1D:4:' + wwpn1, 'SE-4E:0:' + initiator]}] maskingview = [{'maskingViewId': masking_view_name_f, 'portGroupId': port_group_name_f, 'storageGroupId': storagegroup_name_f, 'hostId': initiatorgroup_name_f, 'maskingViewConnection': [ {'host_lun_address': '0003'}]}, {'maskingViewId': masking_view_name_i, 'portGroupId': port_group_name_i, 'storageGroupId': storagegroup_name_i, 'hostId': initiatorgroup_name_i, 'maskingViewConnection': [ {'host_lun_address': '0003'}]}, {}] maskingview_no_lun = { 'maskingViewId': masking_view_name_f, 'portGroupId': port_group_name_f, 'storageGroupId': storagegroup_name_f, 'hostId': initiatorgroup_name_f, 'maskingViewConnection': []} portgroup = [{'portGroupId': port_group_name_f, 'symmetrixPortKey': [ {'directorId': 'FA-1D', 'portId': '4'}], 'maskingview': [masking_view_name_f]}, {'portGroupId': port_group_name_i, 'symmetrixPortKey': [ {'directorId': 'SE-4E', 'portId': '0'}], 'maskingview': [masking_view_name_i]}] port_list = [ {'symmetrixPort': {'num_of_masking_views': 1, 'maskingview': [masking_view_name_f], 'identifier': wwpn1, 'symmetrixPortKey': { 'directorId': 'FA-1D', 'portId': '4'}, 'portgroup': [port_group_name_f]}}, {'symmetrixPort': {'identifier': initiator, 'symmetrixPortKey': { 'directorId': 'SE-4E', 'portId': '0'}, 'ip_addresses': [ip], 'num_of_masking_views': 1, 'maskingview': [masking_view_name_i], 'portgroup': [port_group_name_i]}}] sg_details = [{'srp': srp, 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': defaultstoragegroup_name, 'slo': slo, 'workload': workload}, {'srp': srp, 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': storagegroup_name_f, 'slo': slo, 'workload': workload, 'maskingview': [masking_view_name_f], 'parent_storage_group': [parent_sg_f]}, {'srp': srp, 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': storagegroup_name_i, 'slo': slo, 'workload': workload, 'maskingview': [masking_view_name_i], 'parent_storage_group': [parent_sg_i]}, {'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': parent_sg_f, 'num_of_child_sgs': 1, 'child_storage_group': [storagegroup_name_f], 'maskingview': [masking_view_name_f]}, {'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': parent_sg_i, 'num_of_child_sgs': 1, 'child_storage_group': [storagegroup_name_i], 'maskingview': [masking_view_name_i], }, {'srp': srp, 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': no_slo_sg_name, 'slo': None, 'workload': None, 'maskingview': [masking_view_name_i], 'parent_storage_group': [parent_sg_i]} ] sg_details_rep = [{'childNames': [], 'numDevicesNonGk': 2, 'isLinkTarget': False, 'rdf': True, 'capacityGB': 2.0, 'name': storagegroup_name_source, 'snapVXSnapshots': ['6560405d-752f5a139'], 'symmetrixId': array, 'numSnapVXSnapshots': 1}] sg_rdf_details = [{'storageGroupName': test_vol_grp_name, 'symmetrixId': array, 'modes': ['Synchronous'], 'rdfGroupNumber': rdf_group_no_1, 'states': ['Synchronized']}, {'storageGroupName': test_fo_vol_group, 'symmetrixId': array, 'modes': ['Synchronous'], 'rdfGroupNumber': rdf_group_no_1, 'states': ['Failed Over']}] sg_rdf_group_details = { "storageGroupName": test_vol_grp_name, "symmetrixId": array, "volumeRdfTypes": ["R1"], "modes": ["Asynchronous"], "totalTracks": 8205, "largerRdfSides": ["Equal"], "rdfGroupNumber": 1, "states": ["suspended"]} sg_list = {'storageGroupId': [storagegroup_name_f, defaultstoragegroup_name]} sg_list_rep = [storagegroup_name_with_id] srp_details = {'srp_capacity': {u'subscribed_total_tb': 93.52, u'usable_used_tb': 8.62, u'usable_total_tb': 24.45, u'snapshot_modified_tb': 0.0, u'subscribed_allocated_tb': 18.77, u'snapshot_total_tb': 1.58}, 'srpId': srp, 'reserved_cap_percent': 10} array_info_wl = {'RestServerIp': '1.1.1.1', 'RestServerPort': 3448, 'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False, 'SerialNumber': array, 'srpName': 'SRP_1', 'PortGroup': port_group_name_i, 'SLO': 'Diamond', 'Workload': 'OLTP'} array_info_no_wl = {'RestServerIp': '1.1.1.1', 'RestServerPort': 3448, 'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False, 'SerialNumber': array, 'srpName': 'SRP_1', 'PortGroup': port_group_name_i, 'SLO': 'Diamond'} volume_details = [{'cap_gb': 2, 'cap_cyl': 1092, 'num_of_storage_groups': 1, 'volumeId': device_id, 'volume_identifier': 'OS-%s' % test_volume.id, 'wwn': volume_wwn, 'snapvx_target': 'false', 'snapvx_source': 'false', 'storageGroupId': [defaultstoragegroup_name, storagegroup_name_f]}, {'cap_gb': 1, 'cap_cyl': 546, 'num_of_storage_groups': 1, 'volumeId': device_id2, 'volume_identifier': 'OS-%s' % test_volume.id, 'wwn': '600012345', 'storageGroupId': [defaultstoragegroup_name, storagegroup_name_f]}, {'cap_gb': 1, 'cap_cyl': 546, 'num_of_storage_groups': 0, 'volumeId': device_id3, 'volume_identifier': '123', 'wwn': '600012345'}, {'cap_gb': 1, 'cap_cyl': 546, 'num_of_storage_groups': 1, 'volumeId': device_id4, 'volume_identifier': 'random_name', 'wwn': '600012345', 'storageGroupId': ['random_sg_1', 'random_sg_2']}, ] volume_details_attached = {'cap_gb': 2, 'num_of_storage_groups': 1, 'volumeId': device_id, 'volume_identifier': 'OS-%s' % test_volume.id, 'wwn': volume_wwn, 'snapvx_target': 'false', 'snapvx_source': 'false', 'storageGroupId': [storagegroup_name_f]} volume_details_no_sg = {'cap_gb': 2, 'num_of_storage_groups': 1, 'volumeId': device_id, 'volume_identifier': 'OS-%s' % test_volume.id, 'wwn': volume_wwn, 'snapvx_target': 'false', 'snapvx_source': 'false', 'storageGroupId': []} volume_details_attached_async = ( {'cap_gb': 2, 'num_of_storage_groups': 1, 'volumeId': device_id, 'volume_identifier': 'OS-%s' % test_volume.id, 'wwn': volume_wwn, 'snapvx_target': 'false', 'snapvx_source': 'false', 'storageGroupId': [ rdf_managed_async_grp, storagegroup_name_f + '-RA']}) volume_details_legacy = {'cap_gb': 2, 'num_of_storage_groups': 1, 'volumeId': device_id, 'volume_identifier': test_volume.id, 'wwn': volume_wwn, 'snapvx_target': 'false', 'snapvx_source': 'false', 'storageGroupId': []} volume_list = [ {'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa', 'count': 2, 'maxPageSize': 1, 'resultList': {'result': [{'volumeId': device_id}], 'from': 0, 'to': 1}}, {'resultList': {'result': [{'volumeId': device_id2}]}}, {'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa', 'count': 2, 'maxPageSize': 1, 'resultList': {'result': [{'volumeId': device_id}, {'volumeId': device_id2}], 'from': 0, 'to': 1}}] private_vol_details = { 'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa', 'count': 2, 'maxPageSize': 1, 'resultList': { 'result': [{ 'timeFinderInfo': { 'snapVXSession': [ {'srcSnapshotGenInfo': [ {'snapshotHeader': { 'snapshotName': 'temp-1', 'device': device_id, 'snapid': snap_id}, 'lnkSnapshotGenInfo': [ {'targetDevice': device_id2, 'state': 'Copied'}]}]}, {'tgtSrcSnapshotGenInfo': { 'snapshotName': 'temp-1', 'targetDevice': device_id2, 'sourceDevice': device_id, 'snapid': snap_id_2, 'state': 'Copied'}}], 'snapVXSrc': 'true', 'snapVXTgt': 'true'}, 'rdfInfo': {'RDFSession': [ {'SRDFStatus': 'Ready', 'pairState': 'Synchronized', 'remoteDeviceID': device_id2, 'remoteSymmetrixID': remote_array}]}}], 'from': 0, 'to': 1}} # Service Levels / Workloads workloadtype = {'workloadId': ['OLTP', 'OLTP_REP', 'DSS', 'DSS_REP']} srp_slo_details = {'serviceLevelDemand': [ {'serviceLevelId': 'None'}, {'serviceLevelId': 'Diamond'}, {'serviceLevelId': 'Gold'}, {'serviceLevelId': 'Optimized'}]} slo_details = ['None', 'Diamond', 'Gold', 'Optimized'] powermax_slo_details = {'sloId': ['Bronze', 'Diamond', 'Gold', 'Optimized', 'Platinum', 'Silver']} powermax_model_details = {'symmetrixId': array, 'model': 'PowerMax_2000', 'ucode': '5978.1091.1092'} powermax_model_100 = {'symmetrixId': array, 'model': 'PowerMax_2500', 'microcode': '6079.65.0'} vmax_slo_details = {'sloId': ['Diamond', 'Optimized']} vmax_model_details = {'model': 'VMAX450F'} # replication volume_snap_vx = {'snapshotLnks': [], 'snapshotSrcs': [ {'snap_id': snap_id, 'linkedDevices': [ {'targetDevice': device_id2, 'percentageCopied': 100, 'state': 'Copied', 'copy': True, 'defined': True, 'linked': True, 'snap_id': snap_id}], 'snapshotName': test_snapshot_snap_name, 'state': 'Established'}]} capabilities = {'symmetrixCapability': [{'rdfCapable': True, 'snapVxCapable': True, 'symmetrixId': '0001111111'}, {'symmetrixId': array, 'snapVxCapable': True, 'rdfCapable': True}]} group_snap_vx = {'generation': 0, 'isLinked': False, 'numUniqueTracks': 0, 'isRestored': False, 'name': group_snapshot_name, 'numStorageGroupVolumes': 1, 'state': ['Established'], 'timeToLiveExpiryDate': 'N/A', 'isExpired': False, 'numSharedTracks': 0, 'timestamp': '00:30:50 Fri, 02 Jun 2017 IST +0100', 'numSourceVolumes': 1 } group_snap_vx_1 = {'generation': 0, 'isLinked': False, 'numUniqueTracks': 0, 'isRestored': False, 'name': group_snapshot_name, 'numStorageGroupVolumes': 1, 'state': ['Copied'], 'timeToLiveExpiryDate': 'N/A', 'isExpired': False, 'numSharedTracks': 0, 'timestamp': '00:30:50 Fri, 02 Jun 2017 IST +0100', 'numSourceVolumes': 1, 'linkedStorageGroup': {'name': target_group_name, 'percentageCopied': 100}, } grp_snapvx_links = [{'name': target_group_name, 'percentageCopied': 100}, {'name': 'another-target', 'percentageCopied': 90}] rdf_group_list = {'rdfGroupID': [{'rdfgNumber': rdf_group_no_1, 'label': rdf_group_name_1}, {'rdfgNumber': rdf_group_no_2, 'label': rdf_group_name_2}, {'rdfgNumber': rdf_group_no_3, 'label': rdf_group_name_3}, {'rdfgNumber': rdf_group_no_4, 'label': rdf_group_name_4}]} rdf_group_details = {'modes': ['Synchronous'], 'remoteSymmetrix': remote_array, 'label': rdf_group_name_1, 'type': 'Dynamic', 'numDevices': 1, 'remoteRdfgNumber': rdf_group_no_1, 'rdfgNumber': rdf_group_no_1} rdf_group_vol_details = {'remoteRdfGroupNumber': rdf_group_no_1, 'localSymmetrixId': array, 'volumeConfig': 'RDF1+TDEV', 'localRdfGroupNumber': rdf_group_no_1, 'localVolumeName': device_id, 'rdfpairState': 'Synchronized', 'remoteVolumeName': device_id2, 'localVolumeState': 'Ready', 'rdfMode': 'Synchronous', 'remoteVolumeState': 'Write Disabled', 'remoteSymmetrixId': remote_array} rdf_group_vol_details_not_synced = { 'remoteRdfGroupNumber': rdf_group_no_1, 'localSymmetrixId': array, 'volumeConfig': 'RDF1+TDEV', 'localRdfGroupNumber': rdf_group_no_1, 'localVolumeName': device_id, 'rdfpairState': 'syncinprog', 'remoteVolumeName': device_id2, 'localVolumeState': 'Ready', 'rdfMode': 'Synchronous', 'remoteVolumeState': 'Write Disabled', 'remoteSymmetrixId': remote_array} # system job_list = [{'status': 'SUCCEEDED', 'jobId': '12345', 'result': 'created', 'resourceLink': 'storagegroup/%s' % storagegroup_name_f}, {'status': 'RUNNING', 'jobId': '55555'}, {'status': 'FAILED', 'jobId': '09999'}] symmetrix = [{'symmetrixId': array, 'model': 'VMAX250F', 'ucode': '5977.1091.1092'}, {'symmetrixId': array_herc, 'model': 'PowerMax 2000', 'ucode': '5978.1091.1092'}] version_details = {'version': 'V9.2.0.0'} headroom = {'headroom': [{'headroomCapacity': 20348.29}]} ucode_5978_foxtail = {'ucode': '5978.435.435'} p_vol_rest_response_single = { 'id': 'f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0', 'count': 1, 'expirationTime': 1521650650793, 'maxPageSize': 1000, 'resultList': {'to': 1, 'from': 1, 'result': [ {'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001', 'status': 'Ready', 'configuration': 'TDEV'}}]}} p_vol_rest_response_none = { 'id': 'f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0', 'count': 0, 'expirationTime': 1521650650793, 'maxPageSize': 1000, 'resultList': {'to': 0, 'from': 0, 'result': []}} p_vol_rest_response_iterator_1 = { 'id': 'f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0', 'count': 1500, 'expirationTime': 1521650650793, 'maxPageSize': 1000, 'resultList': {'to': 1, 'from': 1, 'result': [ {'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00002', 'status': 'Ready', 'configuration': 'TDEV'}}]}} p_vol_rest_response_iterator_2 = { 'to': 2000, 'from': 1001, 'result': [ {'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001', 'status': 'Ready', 'configuration': 'TDEV'}}]} rest_iterator_resonse_one = { 'to': 1000, 'from': 1, 'result': [ {'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001', 'status': 'Ready', 'configuration': 'TDEV'}}]} rest_iterator_resonse_two = { 'to': 1500, 'from': 1001, 'result': [ {'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00002', 'status': 'Ready', 'configuration': 'TDEV'}}]} # COMMON.PY priv_vol_func_response_single = [ {'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00001', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'encapsulated': False, 'formattedName': '00001', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", "userDefinedIdentifier": "N/A", 'configuration': 'TDEV'}, 'maskingInfo': {'masked': False}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'timeFinderInfo': { 'mirror': False, 'snapVXTgt': False, 'cloneTarget': False, 'cloneSrc': False, 'snapVXSrc': True, 'snapVXSession': [ {'srcSnapshotGenInfo': [ {'snapshotHeader': { 'timestamp': 1512763278000, 'expired': False, 'secured': False, 'snapshotName': 'testSnap1', 'device': '00001', 'snapid': snap_id, 'timeToLive': 0, 'generation': 0 }}]}]}}] priv_vol_func_response_multi = [ {'volumeHeader': { 'private': False, 'capGB': 100.0, 'capMB': 102400.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00001', 'status': 'Ready', 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'mapped': False, 'encapsulated': False, 'formattedName': '00001', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", "userDefinedIdentifier": "N/A", 'configuration': 'TDEV'}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'maskingInfo': {'masked': False}, 'timeFinderInfo': { 'mirror': False, 'snapVXTgt': False, 'cloneTarget': False, 'cloneSrc': False, 'snapVXSrc': True, 'snapVXSession': [ {'srcSnapshotGenInfo': [ {'snapshotHeader': { 'timestamp': 1512763278000, 'expired': False, 'secured': False, 'snapshotName': 'testSnap1', 'device': '00001', 'snapid': snap_id, 'timeToLive': 0, 'generation': 0 }}]}]}}, {'volumeHeader': { 'private': False, 'capGB': 200.0, 'capMB': 204800.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00002', 'status': 'Ready', 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'mapped': False, 'encapsulated': False, 'formattedName': '00002', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", "userDefinedIdentifier": "N/A", 'configuration': 'TDEV'}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'maskingInfo': {'masked': False}, 'timeFinderInfo': { 'mirror': False, 'snapVXTgt': False, 'cloneTarget': False, 'cloneSrc': False, 'snapVXSrc': True, 'snapVXSession': [ {'srcSnapshotGenInfo': [ {'snapshotHeader': { 'timestamp': 1512763278000, 'expired': False, 'secured': False, 'snapshotName': 'testSnap2', 'device': '00002', 'snapid': snap_id, 'timeToLive': 0, 'generation': 0 }}]}]}}, {'volumeHeader': { 'private': False, 'capGB': 300.0, 'capMB': 307200.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00003', 'status': 'Ready', 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'mapped': False, 'encapsulated': False, 'formattedName': '00003', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", "userDefinedIdentifier": "N/A", 'configuration': 'TDEV'}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'maskingInfo': {'masked': False}, 'timeFinderInfo': { 'mirror': False, 'snapVXTgt': False, 'cloneTarget': False, 'cloneSrc': False, 'snapVXSrc': True, 'snapVXSession': [ {'srcSnapshotGenInfo': [ {'snapshotHeader': { 'timestamp': 1512763278000, 'expired': False, 'secured': False, 'snapshotName': 'testSnap3', 'device': '00003', 'snapid': snap_id, 'timeToLive': 0, 'generation': 0 }}]}]}}, {'volumeHeader': { 'private': False, 'capGB': 400.0, 'capMB': 409600.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00004', 'status': 'Ready', 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'mapped': False, 'encapsulated': False, 'formattedName': '00004', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", "userDefinedIdentifier": "N/A", 'configuration': 'TDEV'}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'maskingInfo': {'masked': False}, 'timeFinderInfo': { 'mirror': False, 'snapVXTgt': False, 'cloneTarget': False, 'cloneSrc': False, 'snapVXSrc': True, 'snapVXSession': [ {'srcSnapshotGenInfo': [ {'snapshotHeader': { 'timestamp': 1512763278000, 'expired': False, 'secured': False, 'snapshotName': 'testSnap4', 'device': '00004', 'snapid': snap_id, 'timeToLive': 0, 'generation': 0 }}]}]}}] priv_vol_func_response_multi_invalid = [ {'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 10.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00001', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'encapsulated': False, 'formattedName': '00001', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", "userDefinedIdentifier": "N/A", 'configuration': 'TDEV'}, 'maskingInfo': {'masked': False}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'timeFinderInfo': {'snapVXTgt': False, 'snapVXSrc': False}}, {'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00002', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'encapsulated': False, 'formattedName': '00002', 'system_resource': False, 'numSymDevMaskingViews': 1, 'nameModifier': "", "userDefinedIdentifier": "N/A", 'configuration': 'TDEV'}, 'maskingInfo': {'masked': False}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'timeFinderInfo': {'snapVXTgt': False, 'snapVXSrc': False}}, {'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'CKD', 'volumeId': '00003', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'encapsulated': False, 'formattedName': '00003', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", "userDefinedIdentifier": "N/A", 'configuration': 'TDEV'}, 'maskingInfo': {'masked': False}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'timeFinderInfo': {'snapVXTgt': False, 'snapVXSrc': False}}, {'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00004', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'encapsulated': False, 'formattedName': '00004', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", "userDefinedIdentifier": "N/A", 'configuration': 'TDEV'}, 'maskingInfo': {'masked': False}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'timeFinderInfo': {'snapVXTgt': True, 'snapVXSrc': False}}, {'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00005', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'encapsulated': False, 'formattedName': '00005', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': 'OS-vol', "userDefinedIdentifier": "OS-vol", 'configuration': 'TDEV'}, 'maskingInfo': {'masked': False}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': False, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False}, 'timeFinderInfo': {'snapVXTgt': False, 'snapVXSrc': False}}] priv_vol_func_response_multi_sg = deepcopy(priv_vol_func_response_single) priv_vol_func_response_multi_sg[0].get('volumeHeader').update( {'numStorageGroups': 2}) priv_vol_func_response_multi_sg[0].get('volumeHeader').update( {'storageGroup': ['SG1', 'SG2']}) volume_create_info_dict = {utils.ARRAY: array, utils.DEVICE_ID: device_id} volume_info_dict = { 'volume_id': volume_id, 'service_level': 'Diamond', 'masking_view': 'OS-HostX-F-OS-fibre-PG-MV', 'host': fake_host, 'display_name': 'attach_vol_name', 'volume_updated_time': '2018-03-05 20:32:41', 'port_group': 'OS-fibre-PG', 'operation': 'attach', 'srp': 'SRP_1', 'initiator_group': 'OS-HostX-F-IG', 'serial_number': '000197800123', 'parent_storage_group': 'OS-HostX-F-OS-fibre-PG-SG', 'workload': 'DSS', 'child_storage_group': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG'} add_volume_sg_info_dict = { "storageGroupId": defaultstoragegroup_name, "slo": "Optimized", "service_level": "Optimized", "base_slo_name": "Optimized", "srp": "SRP_1", "slo_compliance": "NONE", "num_of_vols": 39, "num_of_child_sgs": 0, "num_of_parent_sgs": 0, "num_of_masking_views": 0, "num_of_snapshots": 0, "cap_gb": 109.06, "device_emulation": "FBA", "type": "Standalone", "unprotected": "true", "compression": "true", "compressionRatio": "1.0:1", "compression_ratio_to_one": 1, "vp_saved_percent": 99.9 } storage_group_with_tags = deepcopy(add_volume_sg_info_dict) storage_group_with_tags.update({"tags": sg_tags}) data_dict = {volume_id: volume_info_dict} platform = 'Linux-4.4.0-104-generic-x86_64-with-Ubuntu-16.04-xenial' unisphere_version = u'V9.2.0.0' unisphere_version_90 = "V9.0.0.1" unisphere_version_100 = "V10.0.0.0" openstack_release = '12.0.0.0b3.dev401' openstack_version = '12.0.0' python_version = '2.7.12' vmax_driver_version = '4.1' vmax_firmware_version = u'5977.1125.1125' vmax_model = u'VMAX250F' version_dict = { 'unisphere_for_powermax_version': unisphere_version, 'openstack_release': openstack_release, 'openstack_version': openstack_version, 'python_version': python_version, 'powermax_cinder_driver_version': vmax_driver_version, 'openstack_platform': platform, 'storage_firmware_version': vmax_firmware_version, 'serial_number': array, 'storage_model': vmax_model} u4p_failover_config = { 'u4p_failover_backoff_factor': '2', 'u4p_failover_retries': '3', 'u4p_failover_timeout': '10', 'u4p_primary': '10.10.10.10', 'u4p_failover_autofailback': 'True', 'u4p_failover_targets': [ {'san_ip': '10.10.10.11', 'san_api_port': '8443', 'san_login': 'test', 'san_password': 'test', 'driver_ssl_cert_verify': '/path/to/cert', 'driver_ssl_cert_path': 'True'}, {'san_ip': '10.10.10.12', 'san_api_port': '8443', 'san_login': 'test', 'san_password': 'test', 'driver_ssl_cert_verify': 'True'}, {'san_ip': '10.10.10.11', 'san_api_port': '8443', 'san_login': 'test', 'san_password': 'test', 'driver_ssl_cert_verify': '/path/to/cert', 'driver_ssl_cert_path': 'False'}]} u4p_failover_target = [{ 'RestServerIp': '10.10.10.11', 'RestServerPort': '8443', 'RestUserName': 'test', 'RestPassword': 'test', 'SSLVerify': '/path/to/cert', 'SerialNumber': array}, {'RestServerIp': '10.10.10.12', 'RestServerPort': '8443', 'RestUserName': 'test', 'RestPassword': 'test', 'SSLVerify': 'True', 'SerialNumber': array}] snapshot_src_details = {'snapshotSrcs': [{ 'snapshotName': 'temp-000AA-snapshot_for_clone', 'snap_id': snap_id, 'state': 'Established', 'expired': False, 'linkedDevices': [{'targetDevice': device_id2, 'state': 'Copied', 'copy': True}]}, {'snapshotName': 'temp-000AA-snapshot_for_clone', 'snap_id': snap_id_2, 'state': 'Established', 'expired': False, 'linkedDevices': [{'targetDevice': device_id3, 'state': 'Copied', 'copy': True}]}], 'snapshotLnks': []} snapshot_tgt_details = {"snapshotLnks": [{ "linkSourceName": device_id2, "state": "Linked", "copy": False}]} snap_tgt_vol_details = {"timeFinderInfo": {"snapVXSession": [{ "tgtSrcSnapshotGenInfo": { "snapid": snap_id, "expired": True, "snapshotName": "temp-000AA-snapshot_for_clone"}}]}} snap_tgt_session = { 'snapid': snap_id, 'expired': False, 'copy_mode': False, 'snap_name': 'temp-000AA-snapshot_for_clone', 'state': 'Copied', 'source_vol_id': device_id, 'target_vol_id': device_id2} snap_tgt_session_cm_enabled = { 'snapid': snap_id, 'expired': False, 'copy_mode': True, 'snap_name': 'temp-000AA-snapshot_for_clone', 'state': 'Copied', 'source_vol_id': device_id, 'target_vol_id': device_id2} snap_src_sessions = [ {'snapid': snap_id, 'expired': False, 'copy_mode': False, 'snap_name': 'temp-000AA-snapshot_for_clone', 'state': 'Copied', 'source_vol_id': device_id, 'target_vol_id': device_id3}, {'snapid': snap_id_2, 'expired': False, 'copy_mode': False, 'snap_name': 'temp-000AA-snapshot_for_clone', 'state': 'Copied', 'source_vol_id': device_id, 'target_vol_id': device_id4}] device_label = 'OS-00001' priv_vol_response_rep = { 'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00001', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'encapsulated': False, 'formattedName': '00001', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV', 'userDefinedIdentifier': 'OS-00001'}, 'maskingInfo': {'masked': False}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': True, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False, 'RDFSession': [ {'SRDFStatus': 'Ready', 'SRDFReplicationMode': 'Synchronized', 'remoteDeviceID': device_id2, 'remoteSymmetrixID': remote_array, 'SRDFGroupNumber': 1, 'SRDFRemoteGroupNumber': 1}]}} priv_vol_response_metro_active_rep = { 'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00001', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'encapsulated': False, 'formattedName': '00001', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV', 'userDefinedIdentifier': 'OS-00001'}, 'maskingInfo': {'masked': False}, 'rdfInfo': { 'dynamicRDF': False, 'RDF': True, 'concurrentRDF': False, 'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False, 'RDFSession': [ {'SRDFStatus': 'Ready', 'SRDFReplicationMode': 'Active', 'remoteDeviceID': device_id2, 'remoteSymmetrixID': remote_array, 'SRDFGroupNumber': 1, 'SRDFRemoteGroupNumber': 1}]}} priv_vol_response_no_rep = { 'volumeHeader': { 'private': False, 'capGB': 1.0, 'capMB': 1026.0, 'serviceState': 'Normal', 'emulationType': 'FBA', 'volumeId': '00001', 'status': 'Ready', 'mapped': False, 'numStorageGroups': 0, 'reservationInfo': {'reserved': False}, 'encapsulated': False, 'formattedName': '00001', 'system_resource': False, 'numSymDevMaskingViews': 0, 'nameModifier': "", 'configuration': 'TDEV', 'userDefinedIdentifier': 'OS-00001'}, 'maskingInfo': {'masked': False}, 'rdfInfo': {'RDF': False}} snap_device_label = ('%(dev)s:%(label)s' % {'dev': device_id, 'label': managed_snap_id}) priv_snap_response = { 'deviceName': snap_device_label, 'snapshotLnks': [], 'snapshotSrcs': [ {'snap_id': snap_id, 'linkedDevices': [ {'targetDevice': device_id2, 'percentageCopied': 100, 'state': 'Copied', 'copy': True, 'defined': True, 'linked': True}], 'snapshotName': test_snapshot_snap_name, 'state': 'Established'}]} priv_snap_response_no_label = deepcopy(priv_snap_response) priv_snap_response_no_label.update({'deviceName': device_id}) volume_metadata = { 'DeviceID': device_id, 'ArrayID': array, 'ArrayModel': array_model} # retype metadata dict retype_metadata_dict = { 'device_id': device_id, 'rdf_group_no': '10', 'remote_array': remote_array, 'target_device_id': device_id, 'rep_mode': 'Asynchronous', 'replication_status': 'enabled', 'target_array_model': array_model} retype_metadata_dict2 = { 'default_sg_name': 'default-sg', 'service_level': 'Diamond' } rep_info_dict = { 'device_id': device_id, 'local_array': array, 'remote_array': remote_array, 'target_device_id': device_id2, 'target_name': 'test_vol', 'rdf_group_no': rdf_group_no_1, 'rep_mode': 'Metro', 'replication_status': 'Enabled', 'rdf_group_label': rdf_group_name_1, 'target_array_model': array_model, 'rdf_mgmt_grp': rdf_managed_async_grp} create_vol_with_replication_payload = { 'executionOption': 'ASYNCHRONOUS', 'editStorageGroupActionParam': { 'expandStorageGroupParam': { 'addVolumeParam': { 'emulation': 'FBA', 'create_new_volumes': 'True', 'volumeAttributes': [ {'num_of_vols': 1, 'volumeIdentifier': { 'identifier_name': ( volume_details[0]['volume_identifier']), 'volumeIdentifierChoice': 'identifier_name'}, 'volume_size': test_volume.size, 'capacityUnit': 'GB'}], 'remoteSymmSGInfoParam': { 'force': 'true', 'remote_symmetrix_1_id': remote_array, 'remote_symmetrix_1_sgs': [ defaultstoragegroup_name]}}}}} r1_sg_list = [default_sg_no_slo_re_enabled, rdf_managed_async_grp] r2_sg_list = deepcopy(r1_sg_list) replication_model = ( {'provider_location': str(provider_location), 'metadata': {'DeviceID': device_id, 'DeviceLabel': 'OS-%s' % volume_id, 'ArrayID': array, 'ArrayModel': array_model, 'ServiceLevel': 'Silver', 'Workload': 'NONE', 'Emulation': 'FBA', 'Configuration': 'TDEV', 'CompressionDisabled': False, 'R2-DeviceID': device_id2, 'R2-ArrayID': remote_array, 'R2-ArrayModel': array_model, 'ReplicationMode': 'Synchronous', 'RDFG-Label': rdf_group_name_1, 'R1-RDFG': rdf_group_no_1, 'R2-RDFG': rdf_group_no_1}}) non_replication_model = ( {'provider_location': str(provider_location), 'metadata': {'DeviceID': device_id, 'DeviceLabel': 'OS-%s' % volume_id, 'ArrayID': array, 'ArrayModel': array_model, 'ServiceLevel': 'Silver', 'Workload': 'NONE', 'Emulation': 'FBA', 'Configuration': 'TDEV', 'CompressionDisabled': False}}) vol_create_desc1 = 'Populating Storage Group(s) with volumes : [00001]' vol_create_desc2 = ('Refresh [Storage Group [OS-SG] ' 'on Symmetrix [000197800123]] ') vol_create_task = [{'execution_order': 1, 'description': vol_create_desc1}, {'execution_order': 2, 'description': vol_create_desc2}] # performance f_date_a = 1593432600000 f_date_b = 1594136400000 l_date = 1594730100000 perf_pb_metric = 'PercentBusy' perf_df_avg = 'Average' perf_port_groups = ['port_group_a', 'port_group_b', 'port_group_c'] perf_ports = ['SE-1E:1', 'SE-1E:2', 'SE-1E:3'] performance_config = { 'load_balance': True, 'load_balance_rt': True, 'perf_registered': True, 'rt_registered': True, 'collection_interval': 5, 'data_format': 'Average', 'look_back': 60, 'look_back_rt': 10, 'port_group_metric': 'PercentBusy', 'port_metric': 'PercentBusy'} array_registration = {"registrationDetailsInfo": [ {"symmetrixId": array, "realtime": True, "message": "Success", "collectionintervalmins": 5, "diagnostic": True}]} array_keys = {"arrayInfo": [ {"symmetrixId": array, "firstAvailableDate": f_date_a, "lastAvailableDate": l_date}, {"symmetrixId": array_herc, "firstAvailableDate": f_date_a, "lastAvailableDate": l_date}, {"symmetrixId": remote_array, "firstAvailableDate": f_date_b, "lastAvailableDate": l_date}]} dummy_performance_data = { "expirationTime": 1594731525645, "count": 10, "maxPageSize": 1000, "id": "3b757302-6e4a-4dbe-887d-e42aed7f5944_0", "resultList": { "result": [ {"PercentBusy": random.uniform(0.0, 100.0), "timestamp": 1593432600000}, {"PercentBusy": random.uniform(0.0, 100.0), "timestamp": 1593432900000}, {"PercentBusy": random.uniform(0.0, 100.0), "timestamp": 1593433200000}, {"PercentBusy": random.uniform(0.0, 100.0), "timestamp": 1593433500000}, {"PercentBusy": random.uniform(0.0, 100.0), "timestamp": 1593433800000}, {"PercentBusy": random.uniform(0.0, 100.0), "timestamp": 1593434100000}, {"PercentBusy": random.uniform(0.0, 100.0), "timestamp": 1593434400000}, {"PercentBusy": random.uniform(0.0, 100.0), "timestamp": 1593434700000}, {"PercentBusy": random.uniform(0.0, 100.0), "timestamp": 1593435000000}, {"PercentBusy": random.uniform(0.0, 100.0), "timestamp": 1593435300000}], "from": 1, "to": 10 } } staging_sg = 'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG' staging_mv1 = 'STG-myhostA-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-MV' staging_mv2 = 'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-MV' staging_mvs = [staging_mv1, staging_mv2] legacy_mv1 = 'OS-myhostA-No_SLO-e14f48b8-MV' legacy_mv2 = 'OS-myhostB-No_SLO-e14f48b8-MV' legacy_shared_sg = 'OS-myhostA-No_SLO-SG' legacy_mvs = [legacy_mv1, legacy_mv2] legacy_not_shared_mv = 'OS-myhostA-SRP_1-Diamond-NONE-MV' legacy_not_shared_sg = 'OS-myhostA-SRP_1-Diamond-NONE-SG' snapshot_metadata = {'SnapshotLabel': test_snapshot_snap_name, 'SourceDeviceID': device_id, 'SourceDeviceLabel': device_label, 'SnapIdList': [snap_id]} port_info = { "symmetrixPort": { "director_status": "Online", "maskingview": [ "Test_MV", ], "port_status": "ON", "symmetrixPortKey": { "directorId": "FA-1D", "portId": "4" }, "portgroup": [ "Test_PG" ] } } port_info_off = deepcopy(port_info) port_info_off.update({"symmetrixPort": { "director_status": "Offline", "port_status": "OFF"}}) port_info_no_status = deepcopy(port_info) port_info_no_status.update({"symmetrixPort": { "symmetrixPortKey": { "directorId": "FA-1D", "portId": "4" } }}) port_info_no_details = deepcopy(port_info) port_info_no_details.pop("symmetrixPort") nvme_tcp_discover_json = { "device": "nvme0", "genctr": 48, "records": [ { "trtype": "tcp", "adrfam": "ipv4", "subtype": "nvme subsystem", "treq": "not specified", "portid": 1025, "trsvcid": "4420", "subnqn": "nqn.1988-11.com.dell:PowerMax_2500:00:000120001602", "traddr": "172.16.22.1", "sectype": "none" }, { "trtype": "tcp", "adrfam": "ipv4", "subtype": "nvme subsystem", "treq": "not specified", "portid": 5121, "trsvcid": "4420", "subnqn": "nqn.1988-11.com.dell:PowerMax_2500:00:000120001602", "traddr": "172.16.22.2", "sectype": "none" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py0000664000175000017500000003622500000000000031772 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import requests from cinder import exception from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) class FakeLookupService(object): def get_device_mapping_from_network(self, initiator_wwns, target_wwns): return tpd.PowerMaxData.device_map class FakeResponse(object): def __init__(self, status_code, return_object): self.status_code = status_code self.return_object = return_object def json(self): if self.return_object: return self.return_object else: raise ValueError def get_status_code(self): return self.status_code() def raise_for_status(self): if 200 <= self.status_code <= 204: return False else: return True class FakeRequestsSession(object): def __init__(self, *args, **kwargs): self.data = tpd.PowerMaxData() def request(self, method, url, params=None, data=None, timeout=None): return_object = '' status_code = 200 if method == 'GET': status_code, return_object = self._get_request(url, params) elif method == 'POST' or method == 'PUT': status_code, return_object = self._post_or_put(url, data) elif method == 'DELETE': status_code, return_object = self._delete(url) elif method == 'TIMEOUT': raise requests.Timeout elif method == 'READTIMEOUT' and timeout is not None: raise requests.ReadTimeout elif method == 'CONNECTTIMEOUT' and timeout is not None: raise requests.ConnectTimeout elif method == 'EXCEPTION': raise Exception elif method == 'CONNECTION': raise requests.ConnectionError elif method == 'HTTP': raise requests.HTTPError elif method == 'SSL': raise requests.exceptions.SSLError elif method == 'EXCEPTION': raise exception.VolumeBackendAPIException return FakeResponse(status_code, return_object) def _get_request(self, url, params): status_code = 200 return_object = None if self.data.failed_resource in url: status_code = 500 return_object = self.data.job_list[2] elif 'sloprovisioning' in url: if 'volume' in url: return_object = self._sloprovisioning_volume(url, params) elif 'storagegroup' in url: return_object = self._sloprovisioning_sg(url) elif 'maskingview' in url: return_object = self._sloprovisioning_mv(url) elif 'portgroup' in url: return_object = self._sloprovisioning_pg(url) elif 'host' in url: return_object = self._sloprovisioning_ig(url) elif 'initiator' in url: return_object = self._sloprovisioning_initiator(url) elif 'service_level_demand_report' in url: return_object = self.data.srp_slo_details elif 'srp' in url: return_object = self.data.srp_details elif 'workloadtype' in url: return_object = self.data.workloadtype elif 'compressionCapable' in url: return_object = self.data.compression_info elif 'slo' in url: return_object = self.data.powermax_slo_details elif 'replication' in url: return_object = self._replication(url) elif 'system' in url: if 'director' in url: url_split = url.split('/') if 'port' in url_split[-1]: return_object = self._system_port_list(url) elif url_split[-2] == 'port': return_object = self._system_port_detail(url) else: return_object = self._system(url) elif 'headroom' in url: return_object = self.data.headroom elif 'performance' in url: if 'Array' in url: if 'registrationdetails' in url: return_object = self._performance_registration(url) if 'keys' in url: return_object = self.data.array_keys return status_code, return_object def _sloprovisioning_volume(self, url, params): return_object = self.data.volume_list[2] if '/private' in url: return_object = self.data.private_vol_details elif params: if '1' in params.values() or 'volume_identifier' in params: return_object = self.data.volume_list[0] elif '2' in params.values(): return_object = self.data.volume_list[1] else: for vol in self.data.volume_details: if vol['volumeId'] in url: return_object = vol break return return_object def _sloprovisioning_sg(self, url): return_object = self.data.sg_list for sg in self.data.sg_details: if sg['storageGroupId'] in url: return_object = sg break return return_object def _sloprovisioning_mv(self, url): if self.data.masking_view_name_i in url: return_object = self.data.maskingview[1] else: return_object = self.data.maskingview[0] return return_object def _sloprovisioning_pg(self, url): return_object = None for pg in self.data.portgroup: if pg['portGroupId'] in url: return_object = pg break return return_object def _system_port_detail(self, url): return_object = None for port in self.data.port_list: if port['symmetrixPort']['symmetrixPortKey']['directorId'] in url: return_object = port break return return_object @staticmethod def _system_port_list(url): url_split = url.split('/') return {'symmetrixPortKey': [{'directorId': url_split[-2], 'portId': '1'}]} def _sloprovisioning_ig(self, url): return_object = None for ig in self.data.initiator_group_list: if ig['hostId'] in url: return_object = ig break return return_object def _sloprovisioning_initiator(self, url): return_object = self.data.initiator_list[2] if self.data.wwpn1 in url: return_object = self.data.initiator_list[0] elif self.data.initiator in url: return_object = self.data.initiator_list[1] return return_object def _replication(self, url): return_object = None if 'storagegroup' in url: return_object = self._replication_sg(url) elif 'rdf_group' in url: if self.data.device_id in url: return_object = self.data.rdf_group_vol_details elif self.data.rdf_group_no_1 in url: return_object = self.data.rdf_group_details else: return_object = self.data.rdf_group_list elif 'snapshot' in url: return_object = self.data.volume_snap_vx elif 'capabilities' in url: return_object = self.data.capabilities return return_object def _replication_sg(self, url): return_object = None if 'snapid' in url: return_object = self.data.group_snap_vx elif 'rdf_group' in url: for sg in self.data.sg_rdf_details: if sg['storageGroupName'] in url: return_object = sg break elif 'storagegroup' in url: return_object = self.data.sg_details_rep[0] return return_object def _system(self, url): return_object = None if 'job' in url: for job in self.data.job_list: if job['jobId'] in url: return_object = job break elif 'info' in url: return_object = self.data.version_details elif 'tag' in url: return_object = [] else: for symm in self.data.symmetrix: if symm['symmetrixId'] in url: return_object = symm break return return_object @staticmethod def _performance_registration(url): url_split = url.split('/') array_id = url_split[-1] return {"registrationDetailsInfo": [ {"symmetrixId": array_id, "realtime": True, "message": "Success", "collectionintervalmins": 5, "diagnostic": True}]} def _post_or_put(self, url, payload): return_object = self.data.job_list[0] status_code = 201 if 'performance' in url: if 'PortGroup' in url: if 'metrics' in url: return 200, self.data.dummy_performance_data elif 'FEPort' in url: if 'metrics' in url: return 200, self.data.dummy_performance_data elif 'realtime' in url: if 'metrics' in url: return 200, self.data.dummy_performance_data elif self.data.failed_resource in url: status_code = 500 return_object = self.data.job_list[2] elif payload: payload = ast.literal_eval(payload) if self.data.failed_resource in payload.values(): status_code = 500 return_object = self.data.job_list[2] if payload.get('executionOption'): status_code = 202 return status_code, return_object def _delete(self, url): if self.data.failed_resource in url: status_code = 500 return_object = self.data.job_list[2] else: status_code = 204 return_object = None return status_code, return_object def session(self): return FakeRequestsSession() def close(self): pass class FakeConfiguration(object): def __init__(self, emc_file=None, volume_backend_name=None, interval=0, retries=0, replication_device=None, **kwargs): self.cinder_dell_emc_config_file = emc_file self.interval = interval self.retries = retries self.volume_backend_name = volume_backend_name self.config_group = volume_backend_name self.filter_function = None self.goodness_function = None self.san_is_local = False self.initiator_check = False self.powermax_service_level = None self.vmax_workload = None self.rest_api_connect_timeout = 30 self.rest_api_read_timeout = 30 if replication_device: self.replication_device = replication_device for key, value in kwargs.items(): if 'san_' in key: self.set_san_config_options(key, value) elif 'powermax_' and '_name_template' in key: self.set_host_name_template_config_options(key, value) elif 'powermax_' in key: self.set_powermax_config_options(key, value) elif 'chap_' in key: self.set_chap_config_options(key, value) elif 'driver_ssl_cert' in key: self.set_ssl_cert_config_options(key, value) elif 'u4p_' in key: self.set_u4p_failover_config_options(key, value) elif 'load_' in key: self.set_performance_config_options(key, value) def set_san_config_options(self, key, value): if key == 'san_login': self.san_login = value elif key == 'san_password': self.san_password = value elif key == 'san_ip': self.san_ip = value elif key == 'san_api_port': self.san_api_port = value def set_powermax_config_options(self, key, value): if key == 'powermax_srp': self.powermax_srp = value elif key == 'powermax_service_level': self.powermax_service_level = value elif key == 'powermax_workload': self.powermax_workload = value elif key == 'powermax_port_groups': self.powermax_port_groups = value elif key == 'powermax_array': self.powermax_array = value def set_chap_config_options(self, key, value): if key == 'use_chap_auth': self.use_chap_auth = value elif key == 'chap_username': self.chap_username = value elif key == 'chap_password': self.chap_password = value def set_ssl_cert_config_options(self, key, value): if key == 'driver_ssl_cert_verify': self.driver_ssl_cert_verify = value elif key == 'driver_ssl_cert_path': self.driver_ssl_cert_path = value def set_u4p_failover_config_options(self, key, value): if key == 'u4p_failover_target': self.u4p_failover_target = value elif key == 'u4p_failover_backoff_factor': self.u4p_failover_backoff_factor = value elif key == 'u4p_failover_retries': self.u4p_failover_retries = value elif key == 'u4p_failover_timeout': self.u4p_failover_timeout = value elif key == 'u4p_primary': self.u4p_primary = value def set_host_name_template_config_options(self, key, value): if key == 'powermax_short_host_name_template': self.powermax_short_host_name_template = value elif key == 'powermax_port_group_name_template': self.powermax_port_group_name_template = value def set_performance_config_options(self, key, value): if key == 'load_balance': self.load_balance = value elif key == 'load_balance_real_time': self.load_balance_real_time = value elif key == 'load_data_format': self.load_data_format = value elif key == 'load_look_back': self.load_look_back = value elif key == 'load_look_back_real_time': self.load_look_back_real_time = value elif key == 'port_group_load_metric': self.port_group_load_metric = value elif key == 'port_load_metric': self.port_load_metric = value def safe_get(self, key): try: return getattr(self, key) except Exception: return None def append_config_values(self, values): pass def set_rest_api_connect_timeout(self, value): self.rest_api_connect_timeout = value def set_rest_api_read_timeout(self, value): self.rest_api_read_timeout = value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py0000664000175000017500000075334100000000000031707 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast from copy import deepcopy import json import time from unittest import mock from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import common from cinder.volume.drivers.dell_emc.powermax import fc from cinder.volume.drivers.dell_emc.powermax import masking from cinder.volume.drivers.dell_emc.powermax import metadata from cinder.volume.drivers.dell_emc.powermax import nvme_tcp from cinder.volume.drivers.dell_emc.powermax import provision from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume.drivers.dell_emc.powermax import utils from cinder.volume import volume_utils class PowerMaxCommonTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxCommonTest, self).setUp() self.mock_object(volume_utils, 'get_max_over_subscription_ratio', return_value=1.0) replication_device = self.data.sync_rep_device configuration = tpfo.FakeConfiguration( emc_file=None, volume_backend_name='CommonTests', interval=1, retries=1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_f], powermax_port_group_name_template='portGroupName', replication_device=replication_device) self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = fc.PowerMaxFCDriver(configuration=configuration) driver_nvme_tcp = (nvme_tcp. PowerMaxNVMETCPDriver(configuration=configuration)) self.driver = driver self.driver_nvme_tcp = driver_nvme_tcp self.common = self.driver.common self.nvme_tcp_common = self.driver_nvme_tcp.common self.masking = self.common.masking self.provision = self.common.provision self.rest = self.common.rest self.utils = self.common.utils self.mock_object( self.utils, 'get_volumetype_extra_specs', return_value=deepcopy(self.data.vol_type_extra_specs)) self.mock_object(self.rest, 'is_snap_id', True) @mock.patch.object(rest.PowerMaxRest, 'get_array_ucode_version', return_value=tpd.PowerMaxData.next_gen_ucode) @mock.patch.object(rest.PowerMaxRest, 'get_array_model_info', return_value=('PowerMax 2000', True)) @mock.patch.object(rest.PowerMaxRest, 'set_rest_credentials') @mock.patch.object(common.PowerMaxCommon, '_get_slo_workload_combinations', return_value=[]) @mock.patch.object(common.PowerMaxCommon, 'get_attributes_from_cinder_config', side_effect=[[], tpd.PowerMaxData.array_info_wl]) def test_gather_info_tests(self, mck_parse, mck_combo, mck_rest, mck_nextgen, mck_ucode): # Use-Case 1: Gather info no-opts configuration = tpfo.FakeConfiguration( None, 'config_group', None, None) fc.PowerMaxFCDriver(configuration=configuration) # Use-Case 2: Gather info next-gen with ucode/version self.common._gather_info() self.assertTrue(self.common.next_gen) self.assertEqual(self.common.ucode_level, self.data.next_gen_ucode) @mock.patch.object(rest.PowerMaxRest, 'get_array_ucode_version', return_value=tpd.PowerMaxData.next_gen_ucode) @mock.patch.object(rest.PowerMaxRest, 'get_array_model_info', return_value=('PowerMax 2000', True)) @mock.patch.object(rest.PowerMaxRest, 'set_rest_credentials') @mock.patch.object( common.PowerMaxCommon, 'get_attributes_from_cinder_config', return_value={'SerialNumber': tpd.PowerMaxData.array}) @mock.patch.object( common.PowerMaxCommon, '_get_attributes_from_config') def test_gather_info_rep_enabled_duplicate_serial_numbers( self, mck_get_cnf, mck_get_c_cnf, mck_set, mck_model, mck_ucode): self.mock_object(self.common, 'replication_enabled', True) self.mock_object(self.common, 'replication_targets', [self.data.array]) self.assertRaises( exception.InvalidConfigurationValue, self.common._gather_info) @mock.patch.object(common.PowerMaxCommon, '_gather_info') def test_get_attributes_from_config_short_host_template( self, mock_gather): configuration = tpfo.FakeConfiguration( emc_file=None, volume_backend_name='config_group', interval='10', retries='10', replication_device=None, powermax_short_host_name_template='shortHostName') driver = fc.PowerMaxFCDriver(configuration=configuration) driver.common._get_attributes_from_config() self.assertEqual( 'shortHostName', driver.common.powermax_short_host_name_template) @mock.patch.object(common.PowerMaxCommon, '_gather_info') def test_get_attributes_from_config_no_short_host_template( self, mock_gather): configuration = tpfo.FakeConfiguration( emc_file=None, volume_backend_name='config_group', interval='10', retries='10', replication_device=None) driver = fc.PowerMaxFCDriver(configuration=configuration) driver.common._get_attributes_from_config() self.assertIsNone(driver.common.powermax_short_host_name_template) @mock.patch.object(common.PowerMaxCommon, '_gather_info') def test_get_attributes_from_config_port_group_template( self, mock_gather): configuration = tpfo.FakeConfiguration( emc_file=None, volume_backend_name='config_group', interval='10', retries='10', replication_device=None, powermax_port_group_name_template='portGroupName') driver = fc.PowerMaxFCDriver(configuration=configuration) driver.common._get_attributes_from_config() self.assertEqual( 'portGroupName', driver.common.powermax_port_group_name_template) @mock.patch.object(common.PowerMaxCommon, '_gather_info') def test_get_attributes_from_config_no_port_group_template( self, mock_gather): configuration = tpfo.FakeConfiguration( emc_file=None, volume_backend_name='config_group', interval='10', retries='10', replication_device=None) driver = fc.PowerMaxFCDriver(configuration=configuration) driver.common._get_attributes_from_config() self.assertIsNone(driver.common.powermax_port_group_name_template) def test_get_slo_workload_combinations_powermax(self): self.mock_object(self.common, 'next_gen', True) self.mock_object(self.common, 'array_model', 'PowerMax_2000') array_info = {} pools = self.common._get_slo_workload_combinations(array_info) self.assertTrue(len(pools) == 24) def test_get_slo_workload_combinations_afa_powermax(self): self.mock_object(self.common, 'next_gen', True) self.mock_object(self.common, 'array_model', 'VMAX250F') array_info = {} pools = self.common._get_slo_workload_combinations(array_info) self.assertTrue(len(pools) == 28) def test_get_slo_workload_combinations_afa_hypermax(self): self.mock_object(self.common, 'next_gen', False) self.mock_object(self.common, 'array_model', 'VMAX250F') array_info = {} pools = self.common._get_slo_workload_combinations(array_info) self.assertTrue(len(pools) == 16) def test_get_slo_workload_combinations_hybrid(self): self.mock_object(self.common, 'next_gen', False) self.mock_object(self.common, 'array_model', 'VMAX100K') array_info = {} pools = self.common._get_slo_workload_combinations(array_info) self.assertTrue(len(pools) == 44) def test_get_slo_workload_combinations_failed(self): self.mock_object(self.common, 'array_model', 'xxxxxx') array_info = {} self.assertRaises( exception.VolumeBackendAPIException, self.common._get_slo_workload_combinations, array_info) @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value={'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2'}) def test_create_volume(self, mck_meta): ref_model_update = ( {'provider_location': str(self.data.provider_location), 'metadata': {'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2', 'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'}}) volume = deepcopy(self.data.test_volume) volume.metadata = {'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'} model_update = self.common.create_volume(volume) self.assertEqual(ref_model_update, model_update) @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', return_value=tpd.PowerMaxData.volume_metadata) def test_create_volume_qos(self, mck_meta): ref_model_update = ( {'provider_location': str(self.data.provider_location), 'metadata': self.data.volume_metadata}) extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs['qos'] = { 'total_iops_sec': '4000', 'DistributionType': 'Always'} with mock.patch.object(self.utils, 'get_volumetype_extra_specs', return_value=extra_specs): model_update = self.common.create_volume(self.data.test_volume) self.assertEqual(ref_model_update, model_update) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', return_value='') def test_create_volume_from_snapshot(self, mck_meta, mck_cleanup_snaps): ref_model_update = ({'provider_location': str( deepcopy(self.data.provider_location_snapshot))}) model_update = self.common.create_volume_from_snapshot( self.data.test_clone_volume, self.data.test_snapshot) self.assertEqual( ast.literal_eval(ref_model_update['provider_location']), ast.literal_eval(model_update['provider_location'])) # Test from legacy snapshot ref_model_update = ( {'provider_location': str( deepcopy(self.data.provider_location_clone))}) model_update = self.common.create_volume_from_snapshot( self.data.test_clone_volume, self.data.test_legacy_snapshot) self.assertEqual( ast.literal_eval(ref_model_update['provider_location']), ast.literal_eval(model_update['provider_location'])) @mock.patch.object(common.PowerMaxCommon, 'gather_replication_updates', return_value=(tpd.PowerMaxData.replication_update, tpd.PowerMaxData.rep_info_dict)) @mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group') @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg', return_value=tpd.PowerMaxData.volume_create_info_dict) @mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details', return_value=(True, tpd.PowerMaxData.rep_extra_specs5, tpd.PowerMaxData.rep_info_dict, True)) def test_create_replication_enabled_volume_first_volume( self, mck_prep, mck_create, mck_protect, mck_updates): array = self.data.array volume = self.data.test_volume volume_name = volume.name volume_size = volume.size rep_extra_specs = self.data.rep_extra_specs rep_extra_specs5 = self.data.rep_extra_specs5 storagegroup_name = self.data.storagegroup_name_f rep_info_dict = self.data.rep_info_dict rep_vol = deepcopy(self.data.volume_create_info_dict) rep_vol.update({'device_uuid': volume_name, 'storage_group': storagegroup_name, 'size': volume_size}) vol, update, info = self.common._create_replication_enabled_volume( array, volume, volume_name, volume_size, rep_extra_specs, storagegroup_name, rep_extra_specs['rep_mode']) mck_prep.assert_called_once_with(self.data.rep_extra_specs) mck_create.assert_called_once_with( array, volume_name, storagegroup_name, volume_size, rep_extra_specs, rep_info_dict) mck_protect.assert_called_once_with( rep_extra_specs, rep_extra_specs5, rep_vol) rep_vol.update({'remote_device_id': self.data.device_id2}) mck_updates.assert_called_once_with( rep_extra_specs, rep_extra_specs5, rep_vol) self.assertEqual(self.data.volume_create_info_dict, vol) self.assertEqual(self.data.replication_update, update) self.assertEqual(self.data.rep_info_dict, info) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') @mock.patch.object(common.PowerMaxCommon, 'gather_replication_updates', return_value=(tpd.PowerMaxData.replication_update, tpd.PowerMaxData.rep_info_dict)) @mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group') @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg', return_value=tpd.PowerMaxData.volume_create_info_dict) @mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details', side_effect=((False, '', '', True), ('', tpd.PowerMaxData.rep_extra_specs5, tpd.PowerMaxData.rep_info_dict, ''))) def test_create_replication_enabled_volume_not_first_volume( self, mck_prepare, mck_create, mck_protect, mck_updates, mck_valid): array = self.data.array volume = self.data.test_volume volume_name = volume.name volume_size = volume.size rep_extra_specs = self.data.rep_extra_specs rep_extra_specs5 = self.data.rep_extra_specs5 storagegroup_name = self.data.storagegroup_name_f rep_info_dict = self.data.rep_info_dict rep_vol = deepcopy(self.data.volume_create_info_dict) rep_vol.update({'device_uuid': volume_name, 'storage_group': storagegroup_name, 'size': volume_size}) vol, update, info = self.common._create_replication_enabled_volume( array, volume, volume_name, volume_size, rep_extra_specs, storagegroup_name, rep_extra_specs['rep_mode']) self.assertEqual(2, mck_prepare.call_count) mck_create.assert_called_once_with( array, volume_name, storagegroup_name, volume_size, rep_extra_specs, rep_info_dict) mck_protect.assert_not_called() mck_valid.assert_called_once_with(array, rep_extra_specs) rep_vol.update({'remote_device_id': self.data.device_id2}) mck_updates.assert_called_once_with( rep_extra_specs, rep_extra_specs5, rep_vol) self.assertEqual(self.data.volume_create_info_dict, vol) self.assertEqual(self.data.replication_update, update) self.assertEqual(self.data.rep_info_dict, info) @mock.patch.object(common.PowerMaxCommon, 'gather_replication_updates', return_value=(tpd.PowerMaxData.replication_update, tpd.PowerMaxData.rep_info_dict)) @mock.patch.object(common.PowerMaxCommon, 'get_and_set_remote_device_uuid', return_value=tpd.PowerMaxData.device_id2) @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object( common.PowerMaxCommon, 'configure_volume_replication', return_value=(None, None, None, tpd.PowerMaxData.rep_extra_specs_mgmt, True)) @mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group') @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg', return_value=tpd.PowerMaxData.volume_create_info_dict) @mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details', return_value=(True, {}, {}, False)) def test_create_replication_enabled_volume_not_first_rdfg_volume( self, mck_prepare, mck_create, mck_protect, mck_configure, mck_resume, mck_get_set, mck_updates): array = self.data.array volume = self.data.test_volume volume_name = volume.name volume_size = volume.size rep_extra_specs = self.data.rep_extra_specs storagegroup_name = self.data.storagegroup_name_f self.common._create_replication_enabled_volume( array, volume, volume_name, volume_size, rep_extra_specs, storagegroup_name, rep_extra_specs['rep_mode']) mck_prepare.assert_called_once() mck_protect.assert_not_called() mck_configure.assert_called_once() mck_resume.assert_called_once() @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', return_value='') def test_cloned_volume(self, mck_meta, mck_cleanup_snaps): array = self.data.array test_volume = self.data.test_clone_volume source_device_id = self.data.device_id extra_specs = self.common._initial_setup(test_volume) ref_model_update = ({'provider_location': str( self.data.provider_location_clone)}) model_update = self.common.create_cloned_volume( self.data.test_clone_volume, self.data.test_volume) self.assertEqual( ast.literal_eval(ref_model_update['provider_location']), ast.literal_eval(model_update['provider_location'])) mck_cleanup_snaps.assert_called_once_with( array, source_device_id, extra_specs) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=list()) def test_delete_volume(self, mck_get_snaps): with mock.patch.object(self.common, '_delete_volume') as mock_delete: self.common.delete_volume(self.data.test_volume) mock_delete.assert_called_once_with(self.data.test_volume) @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap_info', return_value=tpd.PowerMaxData.volume_snap_vx) def test_delete_volume_fail_if_active_snapshots( self, mck_get_snaps, mck_cleanup, mck_delete): array = self.data.array test_volume = self.data.test_volume device_id = self.data.device_id extra_specs = self.common._initial_setup(test_volume) self.assertRaises(exception.VolumeBackendAPIException, self.common._delete_volume, test_volume) mck_cleanup.assert_called_with(array, device_id, extra_specs) mck_delete.assert_not_called() @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object( rest.PowerMaxRest, 'find_snap_vx_sessions', return_value=('', tpd.PowerMaxData.snap_tgt_session_cm_enabled)) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=list()) def test_delete_volume_fail_if_snapvx_target( self, mck_get_snaps, mck_tgt_snap, mck_cleanup, mck_delete): array = self.data.array test_volume = self.data.test_volume device_id = self.data.device_id extra_specs = self.common._initial_setup(test_volume) self.assertRaises(exception.VolumeBackendAPIException, self.common._delete_volume, test_volume) mck_cleanup.assert_called_with(array, device_id, extra_specs) mck_delete.assert_not_called() @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object( common.PowerMaxCommon, 'get_snapshot_metadata', return_value={'snap-meta-key-1': 'snap-meta-value-1', 'snap-meta-key-2': 'snap-meta-value-2'}) def test_create_snapshot(self, mck_meta, mck_cleanup_snaps): ref_model_update = ( {'provider_location': str(self.data.snap_location), 'metadata': {'snap-meta-key-1': 'snap-meta-value-1', 'snap-meta-key-2': 'snap-meta-value-2', 'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'}}) snapshot = deepcopy(self.data.test_snapshot_manage) snapshot.metadata = {'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'} model_update = self.common.create_snapshot( snapshot, self.data.test_volume) self.assertEqual(ref_model_update, model_update) @mock.patch.object( common.PowerMaxCommon, '_parse_snap_info', return_value=(tpd.PowerMaxData.device_id, tpd.PowerMaxData.snap_location['snap_name'], [tpd.PowerMaxData.snap_id])) def test_delete_snapshot(self, mock_parse): snap_name = self.data.snap_location['snap_name'] sourcedevice_id = self.data.snap_location['source_id'] with mock.patch.object( self.provision, 'delete_volume_snap') as mock_delete_snap: self.common.delete_snapshot( self.data.test_snapshot, self.data.test_volume) mock_delete_snap.assert_called_once_with( self.data.array, snap_name, [sourcedevice_id], self.data.snap_id, restored=False) def test_delete_snapshot_not_found(self): with mock.patch.object(self.common, '_parse_snap_info', return_value=(None, 'Something', None)): with mock.patch.object( self.provision, 'delete_volume_snap') as mock_delete_snap: self.common.delete_snapshot(self.data.test_snapshot, self.data.test_volume) mock_delete_snap.assert_not_called() def test_delete_legacy_snap(self): with mock.patch.object(self.common, '_delete_volume') as mock_del: self.common.delete_snapshot(self.data.test_legacy_snapshot, self.data.test_legacy_vol) mock_del.assert_called_once_with(self.data.test_legacy_snapshot) @mock.patch.object(masking.PowerMaxMasking, 'return_volume_to_fast_managed_group') @mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members') def test_remove_members(self, mock_rm, mock_return): array = self.data.array device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs self.common._remove_members( array, volume, device_id, extra_specs, self.data.connector, False) mock_rm.assert_called_once_with( array, volume, device_id, volume_name, extra_specs, True, self.data.connector, async_grp=None, host_template=None) @mock.patch.object(masking.PowerMaxMasking, 'return_volume_to_fast_managed_group') @mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members') def test_remove_members_multiattach_case(self, mock_rm, mock_return): array = self.data.array device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs self.common._remove_members( array, volume, device_id, extra_specs, self.data.connector, True) mock_rm.assert_called_once_with( array, volume, device_id, volume_name, extra_specs, False, self.data.connector, async_grp=None, host_template=None) mock_return.assert_called_once() def test_unmap_lun(self): array = self.data.array device_id = self.data.device_id volume = self.data.test_volume extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f connector = self.data.connector with mock.patch.object(self.common, '_remove_members') as mock_remove: self.common._unmap_lun(volume, connector) mock_remove.assert_called_once_with( array, volume, device_id, extra_specs, connector, False, async_grp=None, host_template=None) def test_unmap_lun_force(self): volume = self.data.test_volume extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f connector = deepcopy(self.data.connector) del connector['host'] with mock.patch.object( self.common.utils, 'get_host_short_name') as mock_host: self.common._unmap_lun(volume, connector) mock_host.assert_not_called() @mock.patch.object(common.PowerMaxCommon, '_remove_members') def test_unmap_lun_attachments(self, mock_rm): volume1 = deepcopy(self.data.test_volume) volume1.volume_attachment.objects = [self.data.test_volume_attachment] connector = self.data.connector self.common._unmap_lun(volume1, connector) mock_rm.assert_called_once() mock_rm.reset_mock() volume2 = deepcopy(volume1) volume2.volume_attachment.objects.append( self.data.test_volume_attachment) self.common._unmap_lun(volume2, connector) mock_rm.assert_not_called() def test_unmap_lun_qos(self): array = self.data.array device_id = self.data.device_id volume = self.data.test_volume extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f extra_specs['qos'] = { 'total_iops_sec': '4000', 'DistributionType': 'Always'} connector = self.data.connector with mock.patch.object(self.common, '_remove_members') as mock_remove: with mock.patch.object(self.utils, 'get_volumetype_extra_specs', return_value=extra_specs): self.common._unmap_lun(volume, connector) mock_remove.assert_called_once_with( array, volume, device_id, extra_specs, connector, False, async_grp=None, host_template=None) def test_unmap_lun_not_mapped(self): volume = self.data.test_volume connector = self.data.connector with mock.patch.object(self.common, 'find_host_lun_id', return_value=({}, False)): with mock.patch.object( self.common, '_remove_members') as mock_remove: self.common._unmap_lun(volume, connector) mock_remove.assert_not_called() def test_unmap_lun_connector_is_none(self): array = self.data.array device_id = self.data.device_id volume = self.data.test_volume extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs['storagetype:portgroupname'] = ( self.data.port_group_name_f) with mock.patch.object(self.common, '_remove_members') as mock_remove: self.common._unmap_lun(volume, None) mock_remove.assert_called_once_with( array, volume, device_id, extra_specs, None, False, async_grp=None, host_template=None) @mock.patch.object(metadata.PowerMaxVolumeMetadata, 'capture_detach_info') @mock.patch.object(common.PowerMaxCommon, '_remove_members') def test_unmap_lun_multiattach_prints_metadata(self, mck_remove, mck_info): volume = deepcopy(self.data.test_volume) connector = deepcopy(self.data.connector) volume.volume_attachment.objects = [ deepcopy(self.data.test_volume_attachment), deepcopy(self.data.test_volume_attachment)] self.common._unmap_lun(volume, connector) self.assertEqual(0, mck_remove.call_count) self.assertEqual(1, mck_info.call_count) @mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload') @mock.patch.object(common.PowerMaxCommon, '_remove_members') @mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id', return_value=(tpd.PowerMaxData.iscsi_device_info, False)) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config) @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config) def test_unmap_lun_replication_force_flag( self, mck_setup, mck_rep, mck_find, mck_rem, mck_slo): volume = deepcopy(self.data.test_volume) connector = deepcopy(self.data.connector) device_info = self.data.provider_location['device_id'] volume.volume_attachment.objects = [ deepcopy(self.data.test_volume_attachment)] extra_specs = deepcopy(self.data.rep_extra_specs_rep_config) array = extra_specs[utils.ARRAY] extra_specs[utils.FORCE_VOL_EDIT] = True self.common._unmap_lun(volume, connector) mck_rem.assert_called_once_with(array, volume, device_info, extra_specs, connector, False, async_grp=None, host_template=None) @mock.patch.object(utils.PowerMaxUtils, 'is_metro_device', return_value=True) @mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload') @mock.patch.object(common.PowerMaxCommon, '_remove_members') @mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id', return_value=(tpd.PowerMaxData.iscsi_device_info, False)) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro) @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro) def test_unmap_lun_replication_metro( self, mck_setup, mck_rep, mck_find, mck_rem, mck_slo, mck_metro): volume = deepcopy(self.data.test_volume) connector = deepcopy(self.data.connector) volume.volume_attachment.objects = [ deepcopy(self.data.test_volume_attachment)] extra_specs = deepcopy(self.data.rep_extra_specs_rep_config) extra_specs[utils.FORCE_VOL_EDIT] = True self.common._unmap_lun(volume, connector) self.assertEqual(2, mck_rem.call_count) @mock.patch.object(utils.PowerMaxUtils, 'is_metro_device', return_value=True) @mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload') @mock.patch.object(common.PowerMaxCommon, '_remove_members') @mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id', return_value=(tpd.PowerMaxData.iscsi_device_info, False)) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro) @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro) def test_unmap_lun_replication_metro_promotion( self, mck_setup, mck_rep, mck_find, mck_rem, mck_slo, mck_metro): volume = deepcopy(self.data.test_volume) connector = deepcopy(self.data.connector) volume.volume_attachment.objects = [ deepcopy(self.data.test_volume_attachment)] extra_specs = deepcopy(self.data.rep_extra_specs_rep_config) extra_specs[utils.FORCE_VOL_EDIT] = True self.mock_object(self.common, 'promotion', True) self.common._unmap_lun(volume, connector) self.assertEqual(1, mck_rem.call_count) @mock.patch.object(common.PowerMaxCommon, '_unmap_lun') @mock.patch.object(metadata.PowerMaxVolumeMetadata, 'capture_detach_info') def test_unmap_lun_promotion_non_replicated_volume( self, mck_unmap, mck_info): volume = deepcopy(self.data.test_volume) connector = deepcopy(self.data.connector) self.common._unmap_lun_promotion(volume, connector) self.assertEqual(0, mck_unmap.call_count) self.assertEqual(0, mck_info.call_count) @mock.patch.object(common.PowerMaxCommon, '_unmap_lun') @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config_metro) def test_unmap_lun_promotion_replicated_metro_volume( self, mck_setup, mck_unmap): volume = deepcopy(self.data.test_rep_volume) connector = deepcopy(self.data.connector) self.common._unmap_lun_promotion(volume, connector) mck_setup.assert_called_once_with(volume) mck_unmap.assert_called_once_with(volume, connector) @mock.patch.object(metadata.PowerMaxVolumeMetadata, 'capture_detach_info') @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config) def test_unmap_lun_promotion_replicated_non_metro_volume( self, mck_setup, mck_capture): volume = deepcopy(self.data.test_rep_volume) connector = deepcopy(self.data.connector) extra_specs = self.data.rep_extra_specs_rep_config device_id = self.data.device_id promotion_key = [utils.PMAX_FAILOVER_START_ARRAY_PROMOTION] self.common._unmap_lun_promotion(volume, connector) mck_setup.assert_called_once_with(volume) mck_capture.assert_called_once_with( volume, extra_specs, device_id, promotion_key, promotion_key) def test_initialize_connection_already_mapped(self): volume = self.data.test_volume connector = self.data.connector host_lun = (self.data.maskingview[0]['maskingViewConnection'][0][ 'host_lun_address']) ref_dict = {'hostlunid': int(host_lun, 16), 'maskingview': self.data.masking_view_name_f, 'array': self.data.array, 'device_id': self.data.device_id} device_info_dict = self.common.initialize_connection(volume, connector) self.assertEqual(ref_dict, device_info_dict) def test_initialize_connection_setup_init_conn(self): volume = self.data.test_volume connector = self.data.connector with mock.patch.object( self.common, '_initial_setup', side_effect=self.common._initial_setup) as mck_setup: self.common.initialize_connection(volume, connector) mck_setup.assert_called_once_with(volume, init_conn=True) def test_initialize_connection_already_mapped_next_gen(self): with mock.patch.object(self.rest, 'is_next_gen_array', return_value=True): volume = self.data.test_volume connector = self.data.connector host_lun = (self.data.maskingview[0]['maskingViewConnection'][0][ 'host_lun_address']) ref_dict = {'hostlunid': int(host_lun, 16), 'maskingview': self.data.masking_view_name_f, 'array': self.data.array, 'device_id': self.data.device_id} device_info_dict = self.common.initialize_connection(volume, connector) self.assertEqual(ref_dict, device_info_dict) @mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id', return_value=({}, False)) @mock.patch.object( common.PowerMaxCommon, '_attach_volume', return_value=({}, tpd.PowerMaxData.port_group_name_f)) def test_initialize_connection_not_mapped(self, mock_attach, mock_id): volume = self.data.test_volume connector = self.data.connector extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f masking_view_dict = self.common._populate_masking_dict( volume, connector, extra_specs) masking_view_dict[utils.IS_MULTIATTACH] = False device_info_dict = self.common.initialize_connection( volume, connector) self.assertEqual({}, device_info_dict) mock_attach.assert_called_once_with( volume, connector, extra_specs, masking_view_dict) @mock.patch.object(rest.PowerMaxRest, 'is_next_gen_array', return_value=True) @mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id', return_value=({}, False)) @mock.patch.object( common.PowerMaxCommon, '_attach_volume', return_value=({}, tpd.PowerMaxData.port_group_name_f)) def test_initialize_connection_not_mapped_next_gen(self, mock_attach, mock_id, mck_gen): volume = self.data.test_volume connector = self.data.connector device_info_dict = self.common.initialize_connection( volume, connector) self.assertEqual({}, device_info_dict) @mock.patch.object( masking.PowerMaxMasking, 'pre_multiattach', return_value=tpd.PowerMaxData.masking_view_dict_multiattach) @mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id', return_value=({}, True)) @mock.patch.object( common.PowerMaxCommon, '_attach_volume', return_value=({}, tpd.PowerMaxData.port_group_name_f)) def test_initialize_connection_multiattach_case( self, mock_attach, mock_id, mock_pre): volume = self.data.test_volume connector = self.data.connector self.common.initialize_connection(volume, connector) mock_attach.assert_called_once() mock_pre.assert_called_once() def test_attach_volume_success(self): volume = self.data.test_volume connector = self.data.connector extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f masking_view_dict = self.common._populate_masking_dict( volume, connector, extra_specs) host_lun = (self.data.maskingview[0]['maskingViewConnection'][0][ 'host_lun_address']) ref_dict = {'hostlunid': int(host_lun, 16), 'maskingview': self.data.masking_view_name_f, 'array': self.data.array, 'device_id': self.data.device_id} with mock.patch.object(self.masking, 'setup_masking_view', return_value={ utils.PORTGROUPNAME: self.data.port_group_name_f}): device_info_dict, pg = self.common._attach_volume( volume, connector, extra_specs, masking_view_dict) self.assertEqual(ref_dict, device_info_dict) @mock.patch.object(masking.PowerMaxMasking, 'check_if_rollback_action_for_masking_required') @mock.patch.object(masking.PowerMaxMasking, 'setup_masking_view', return_value={}) @mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id', return_value=({}, False)) def test_attach_volume_failed(self, mock_lun, mock_setup, mock_rollback): volume = self.data.test_volume connector = self.data.connector extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f masking_view_dict = self.common._populate_masking_dict( volume, connector, extra_specs) self.assertRaises(exception.VolumeBackendAPIException, self.common._attach_volume, volume, connector, extra_specs, masking_view_dict) device_id = self.data.device_id (mock_rollback.assert_called_once_with( self.data.array, volume, device_id, {})) def test_terminate_connection(self): volume = self.data.test_volume connector = self.data.connector with mock.patch.object(self.common, '_unmap_lun') as mock_unmap: self.common.terminate_connection(volume, connector) mock_unmap.assert_called_once_with( volume, connector) def test_terminate_connection_promotion(self): volume = self.data.test_volume connector = self.data.connector with mock.patch.object( self.common, '_unmap_lun_promotion') as mock_unmap: self.mock_object(self.common, 'promotion', True) self.common.terminate_connection(volume, connector) mock_unmap.assert_called_once_with( volume, connector) @mock.patch.object(provision.PowerMaxProvision, 'extend_volume') @mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks') def test_extend_vol_no_rep_success(self, mck_val_chk, mck_extend): volume = self.data.test_volume array = self.data.array device_id = self.data.device_id new_size = self.data.test_volume.size ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f self.common.extend_volume(volume, new_size) mck_extend.assert_called_once_with( array, device_id, new_size, ref_extra_specs, None) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') @mock.patch.object(provision.PowerMaxProvision, 'extend_volume') @mock.patch.object(common.PowerMaxCommon, '_array_ode_capabilities_check', return_value=[True] * 4) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=('1', None)) @mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks') @mock.patch.object(common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.ex_specs_rep_config) def test_extend_vol_rep_success_next_gen( self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_extend, mck_validate): self.mock_object(self.common, 'next_gen', True) volume = self.data.test_volume array = self.data.array device_id = self.data.device_id new_size = self.data.test_volume.size ref_extra_specs = deepcopy(self.data.ex_specs_rep_config) ref_extra_specs['array'] = self.data.array self.common.extend_volume(volume, new_size) mck_extend.assert_called_once_with( array, device_id, new_size, ref_extra_specs, '1') mck_ode.assert_called_once_with( array, ref_extra_specs[utils.REP_CONFIG], True) mck_validate.assert_called_once_with(array, ref_extra_specs) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') @mock.patch.object(provision.PowerMaxProvision, 'extend_volume') @mock.patch.object(common.PowerMaxCommon, '_extend_legacy_replicated_vol') @mock.patch.object(common.PowerMaxCommon, '_array_ode_capabilities_check', return_value=[True, True, False, False]) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=('1', None)) @mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks') @mock.patch.object(common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.ex_specs_rep_config) def test_extend_vol_rep_success_next_gen_legacy_r2( self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_leg_extend, mck_extend, mck_validate): self.mock_object(self.common, 'next_gen', True) self.mock_object(self.common, 'rep_configs', [self.data.rep_config]) volume = self.data.test_volume array = self.data.array device_id = self.data.device_id new_size = self.data.test_volume.size ref_extra_specs = deepcopy(self.data.ex_specs_rep_config) ref_extra_specs['array'] = self.data.array self.common.extend_volume(volume, new_size) mck_leg_extend.assert_called_once_with( array, volume, device_id, volume.name, new_size, ref_extra_specs, '1') mck_ode.assert_called_once_with( array, ref_extra_specs[utils.REP_CONFIG], True) mck_extend.assert_not_called() mck_validate.assert_called_once_with(array, ref_extra_specs) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') @mock.patch.object(provision.PowerMaxProvision, 'extend_volume') @mock.patch.object(common.PowerMaxCommon, '_extend_legacy_replicated_vol') @mock.patch.object(common.PowerMaxCommon, '_array_ode_capabilities_check', return_value=[False, False, False, False]) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=('1', None)) @mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks') @mock.patch.object(common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.ex_specs_rep_config) def test_extend_vol_rep_success_legacy( self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_leg_extend, mck_extend, mck_validate): self.mock_object(self.common, 'rep_configs', [self.data.rep_config]) self.mock_object(self.common, 'next_gen', False) volume = self.data.test_volume array = self.data.array device_id = self.data.device_id new_size = self.data.test_volume.size ref_extra_specs = deepcopy(self.data.ex_specs_rep_config) ref_extra_specs['array'] = self.data.array self.common.extend_volume(volume, new_size) mck_leg_extend.assert_called_once_with( array, volume, device_id, volume.name, new_size, ref_extra_specs, '1') mck_ode.assert_called_once_with( array, ref_extra_specs[utils.REP_CONFIG], True) mck_extend.assert_not_called() mck_validate.assert_called_once_with(array, ref_extra_specs) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') @mock.patch.object(common.PowerMaxCommon, '_array_ode_capabilities_check', return_value=[False, False, False, False]) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=('1', None)) @mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks') @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.ex_specs_rep_config_no_extend) def test_extend_vol_rep_success_legacy_allow_extend_false( self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_validate): self.mock_object(self.common, 'rep_configs', [self.data.rep_config]) self.mock_object(self.common, 'next_gen', False) volume = self.data.test_volume new_size = self.data.test_volume.size self.assertRaises(exception.VolumeBackendAPIException, self.common.extend_volume, volume, new_size) def test_update_volume_stats(self): data = self.common.update_volume_stats() self.assertEqual('CommonTests', data['volume_backend_name']) def test_update_volume_stats_no_wlp(self): with mock.patch.object(self.common, '_update_srp_stats', return_value=('123s#SRP_1#None#None', 100, 90, 90, 10)): data = self.common.update_volume_stats() self.assertEqual('CommonTests', data['volume_backend_name']) def test_update_srp_stats_with_wl(self): with mock.patch.object(self.rest, 'get_srp_by_name', return_value=self.data.srp_details): location_info, __, __, __, __ = self.common._update_srp_stats( self.data.array_info_wl) self.assertEqual(location_info, '000197800123#SRP_1#Diamond#OLTP') def test_update_srp_stats_no_wl(self): with mock.patch.object(self.rest, 'get_srp_by_name', return_value=self.data.srp_details): location_info, __, __, __, __ = self.common._update_srp_stats( self.data.array_info_no_wl) self.assertEqual(location_info, '000197800123#SRP_1#Diamond') def test_find_device_on_array_success(self): volume = self.data.test_volume extra_specs = self.data.extra_specs ref_device_id = self.data.device_id founddevice_id = self.common._find_device_on_array(volume, extra_specs) self.assertEqual(ref_device_id, founddevice_id) def test_find_device_on_array_provider_location_not_string(self): volume = fake_volume.fake_volume_obj( context='cxt', provider_location=None) extra_specs = self.data.extra_specs founddevice_id = self.common._find_device_on_array( volume, extra_specs) self.assertIsNone(founddevice_id) def test_find_legacy_device_on_array(self): volume = self.data.test_legacy_vol extra_specs = self.data.extra_specs ref_device_id = self.data.device_id founddevice_id = self.common._find_device_on_array(volume, extra_specs) self.assertEqual(ref_device_id, founddevice_id) def test_find_host_lun_id_attached(self): volume = self.data.test_volume extra_specs = self.data.extra_specs host = 'HostX' host_lun = ( self.data.maskingview[0]['maskingViewConnection'][0][ 'host_lun_address']) ref_masked = {'hostlunid': int(host_lun, 16), 'maskingview': self.data.masking_view_name_f, 'array': self.data.array, 'device_id': self.data.device_id} maskedvols, __ = self.common.find_host_lun_id(volume, host, extra_specs) self.assertEqual(ref_masked, maskedvols) def test_find_host_lun_id_not_attached(self): volume = self.data.test_volume extra_specs = self.data.extra_specs host = 'HostX' with mock.patch.object(self.rest, 'find_mv_connections_for_vol', return_value=None): maskedvols, __ = self.common.find_host_lun_id( volume, host, extra_specs) self.assertEqual({}, maskedvols) @mock.patch.object( common.PowerMaxCommon, '_get_masking_views_from_volume', return_value=([tpd.PowerMaxData.masking_view_name_f], [tpd.PowerMaxData.masking_view_name_f, tpd.PowerMaxData.masking_view_name_Y_f])) def test_find_host_lun_id_multiattach(self, mock_mask): volume = self.data.test_volume extra_specs = self.data.extra_specs __, is_multiattach = self.common.find_host_lun_id( volume, 'HostX', extra_specs) self.assertTrue(is_multiattach) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) @mock.patch.object(rest.PowerMaxRest, 'get_volume', return_value=tpd.PowerMaxData.volume_details[0]) def test_find_host_lun_id_rep_extra_specs(self, mock_vol, mock_tgt): self.common.find_host_lun_id( self.data.test_volume, 'HostX', self.data.extra_specs, self.data.rep_extra_specs) mock_tgt.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'find_mv_connections_for_vol', return_value='1') @mock.patch.object(common.PowerMaxCommon, '_get_masking_views_from_volume', side_effect=[([], ['OS-HostX-I-PG-MV']), (['OS-HostX-I-PG-MV'], ['OS-HostX-I-PG-MV'])]) @mock.patch.object(rest.PowerMaxRest, 'get_volume', return_value=tpd.PowerMaxData.volume_details[0]) def test_find_host_lun_id_backward_compatible( self, mock_vol, mock_mvs, mock_mv_conns): expected_dict = {'hostlunid': '1', 'maskingview': 'OS-HostX-I-PG-MV', 'array': '000197800123', 'device_id': '00001'} self.mock_object(self.common, 'powermax_short_host_name_template', 'shortHostName[:7]finance') masked_vols, is_multiattach = self.common.find_host_lun_id( self.data.test_volume, 'HostX', self.data.extra_specs) self.assertEqual(expected_dict, masked_vols) self.assertFalse(is_multiattach) mock_mv_conns.assert_called_once() def test_get_masking_views_from_volume(self): array = self.data.array device_id = self.data.device_id host = 'HostX' ref_mv_list = [self.data.masking_view_name_f] maskingview_list, __ = self.common.get_masking_views_from_volume( array, self.data.test_volume, device_id, host) self.assertEqual(ref_mv_list, maskingview_list) # is metro with mock.patch.object(self.utils, 'is_metro_device', return_value=True): __, is_metro = self.common.get_masking_views_from_volume( array, self.data.test_volume, device_id, host) self.assertTrue(is_metro) def test_get_masking_views_from_volume_wrong_host(self): array = self.data.array device_id = self.data.device_id host = 'DifferentHost' maskingview_list, __ = self.common.get_masking_views_from_volume( array, self.data.test_volume, device_id, host) self.assertEqual([], maskingview_list) def test_find_host_lun_id_no_host_check(self): volume = self.data.test_volume extra_specs = self.data.extra_specs host_lun = (self.data.maskingview[0]['maskingViewConnection'][0][ 'host_lun_address']) ref_masked = {'hostlunid': int(host_lun, 16), 'maskingview': self.data.masking_view_name_f, 'array': self.data.array, 'device_id': self.data.device_id} maskedvols, __ = self.common.find_host_lun_id( volume, None, extra_specs) self.assertEqual(ref_masked, maskedvols) def test_initial_setup_success(self): volume = self.data.test_volume ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f extra_specs = self.common._initial_setup(volume) self.assertEqual(ref_extra_specs, extra_specs) def test_initial_setup_failed(self): volume = self.data.test_volume with mock.patch.object( self.common, 'get_attributes_from_cinder_config', return_value=None): self.assertRaises(exception.VolumeBackendAPIException, self.common._initial_setup, volume) def test_initial_setup_success_specs_init_conn_call(self): volume = self.data.test_volume array_info = self.common.get_attributes_from_cinder_config() extra_specs, __ = self.common._set_config_file_and_get_extra_specs( volume) with mock.patch.object( self.common, '_set_vmax_extra_specs', side_effect=self.common._set_vmax_extra_specs) as mck_specs: self.common._initial_setup(volume, init_conn=True) mck_specs.assert_called_once_with( extra_specs, array_info, True) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) def test_populate_masking_dict(self, mock_tgt): volume = self.data.test_volume connector = self.data.connector extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f extra_specs[utils.WORKLOAD] = self.data.workload ref_mv_dict = self.data.masking_view_dict self.mock_object(self.common, 'next_gen', False) self.mock_object(self.common, 'powermax_port_group_name_template', 'portGroupName') extra_specs.pop(utils.IS_RE, None) masking_view_dict = self.common._populate_masking_dict( volume, connector, extra_specs) self.assertEqual(ref_mv_dict, masking_view_dict) # Metro volume, pass in rep_extra_specs and retrieve target device rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f self.common._populate_masking_dict( volume, connector, extra_specs, rep_extra_specs) mock_tgt.assert_called_once() # device_id is None with mock.patch.object(self.common, '_find_device_on_array', return_value=None): self.assertRaises(exception.VolumeBackendAPIException, self.common._populate_masking_dict, volume, connector, extra_specs) def test_populate_masking_dict_no_slo(self): volume = self.data.test_volume connector = self.data.connector extra_specs = {'slo': None, 'workload': None, 'srp': self.data.srp, 'array': self.data.array, utils.PORTGROUPNAME: self.data.port_group_name_f} ref_mv_dict = self.data.masking_view_dict_no_slo masking_view_dict = self.common._populate_masking_dict( volume, connector, extra_specs) self.assertEqual(ref_mv_dict, masking_view_dict) def test_populate_masking_dict_compr_disabled(self): volume = self.data.test_volume connector = self.data.connector extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f extra_specs[utils.DISABLECOMPRESSION] = "true" ref_mv_dict = self.data.masking_view_dict_compression_disabled extra_specs[utils.WORKLOAD] = self.data.workload masking_view_dict = self.common._populate_masking_dict( volume, connector, extra_specs) self.assertEqual(ref_mv_dict, masking_view_dict) def test_populate_masking_dict_next_gen(self): volume = self.data.test_volume connector = self.data.connector extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f self.mock_object(self.common, 'next_gen', True) masking_view_dict = self.common._populate_masking_dict( volume, connector, extra_specs) self.assertEqual('NONE', masking_view_dict[utils.WORKLOAD]) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_create_cloned_volume(self, mck_cleanup_snaps): volume = self.data.test_clone_volume source_volume = self.data.test_volume extra_specs = self.data.extra_specs ref_response = (self.data.provider_location_clone, dict(), dict()) clone_dict, rep_update, rep_info_dict = ( self.common._create_cloned_volume( volume, source_volume, extra_specs)) self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict)) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_create_cloned_volume_is_snapshot(self, mck_cleanup_snaps): volume = self.data.test_snapshot source_volume = self.data.test_volume extra_specs = self.data.extra_specs ref_response = (self.data.snap_location, dict(), dict()) clone_dict, rep_update, rep_info_dict = ( self.common._create_cloned_volume( volume, source_volume, extra_specs, True, False)) self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict)) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_create_cloned_volume_from_snapshot(self, mck_cleanup_snaps): volume = self.data.test_clone_volume source_volume = self.data.test_snapshot extra_specs = self.data.extra_specs ref_response = (self.data.provider_location_snapshot, dict(), dict()) clone_dict, rep_update, rep_info_dict = ( self.common._create_cloned_volume( volume, source_volume, extra_specs, False, True)) self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict)) def test_create_cloned_volume_not_licenced(self): volume = self.data.test_clone_volume source_volume = self.data.test_volume extra_specs = self.data.extra_specs with mock.patch.object(self.rest, 'is_snapvx_licensed', return_value=False): self.assertRaises(exception.VolumeBackendAPIException, self.common._create_cloned_volume, volume, source_volume, extra_specs) @mock.patch.object(common.PowerMaxCommon, '_find_device_on_array') def test_create_cloned_volume_not_licenced_2(self, mock_device): volume = self.data.test_clone_volume source_volume = self.data.test_volume extra_specs = self.data.extra_specs with mock.patch.object(self.rest, 'is_snapvx_licensed', return_value=False): self.assertRaises(exception.VolumeBackendAPIException, self.common._create_cloned_volume, volume, source_volume, extra_specs, False, False) mock_device.assert_not_called() @mock.patch.object(common.PowerMaxCommon, '_find_device_on_array', return_value=None) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_create_cloned_volume_source_not_found( self, mock_check, mock_device): volume = self.data.test_clone_volume source_volume = self.data.test_volume extra_specs = self.data.extra_specs with mock.patch.object(self.rest, 'is_snapvx_licensed', return_value=True): self.assertRaises(exception.VolumeBackendAPIException, self.common._create_cloned_volume, volume, source_volume, extra_specs, False, False) mock_check.assert_not_called() def test_parse_snap_info_found(self): ref_device_id = self.data.device_id ref_snap_name = self.data.snap_location['snap_name'] sourcedevice_id, foundsnap_name, __ = self.common._parse_snap_info( self.data.array, self.data.test_snapshot) self.assertEqual(ref_device_id, sourcedevice_id) self.assertEqual(ref_snap_name, foundsnap_name) def test_parse_snap_info_not_found(self): ref_snap_name = None with mock.patch.object(self.rest, 'get_volume_snap', return_value=None): __, foundsnap_name, __ = self.common._parse_snap_info( self.data.array, self.data.test_snapshot) self.assertIsNone(ref_snap_name, foundsnap_name) def test_parse_snap_info_exception(self): with mock.patch.object( self.rest, 'get_volume_snaps', side_effect=exception.VolumeBackendAPIException): __, foundsnap_name, __ = self.common._parse_snap_info( self.data.array, self.data.test_snapshot) self.assertIsNone(foundsnap_name) def test_parse_snap_info_provider_location_not_string(self): snapshot = fake_snapshot.fake_snapshot_obj( context='ctxt', provider_loaction={'not': 'string'}) sourcedevice_id, foundsnap_name, __ = self.common._parse_snap_info( self.data.array, snapshot) self.assertIsNone(foundsnap_name) def test_create_snapshot_success(self): array = self.data.array snapshot = self.data.test_snapshot source_device_id = self.data.device_id extra_specs = self.data.extra_specs ref_dict = {'snap_name': self.data.test_snapshot_snap_name, 'source_id': self.data.device_id} snap_dict = self.common._create_snapshot( array, snapshot, source_device_id, extra_specs) self.assertEqual(ref_dict, snap_dict) def test_create_snapshot_exception(self): array = self.data.array snapshot = self.data.test_snapshot source_device_id = self.data.device_id extra_specs = self.data.extra_specs with mock.patch.object( self.provision, 'create_volume_snapvx', side_effect=exception.VolumeBackendAPIException): self.assertRaises(exception.VolumeBackendAPIException, self.common._create_snapshot, array, snapshot, source_device_id, extra_specs) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=list()) @mock.patch.object(masking.PowerMaxMasking, 'remove_vol_from_storage_group') def test_delete_volume_from_srp(self, mock_rm, mock_get_snaps): array = self.data.array device_id = self.data.device_id volume_name = self.data.test_volume.name ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f volume = self.data.test_volume with mock.patch.object(self.common, '_cleanup_device_snapvx'): with mock.patch.object( self.common, '_delete_from_srp') as mock_delete: self.common._delete_volume(volume) mock_delete.assert_called_once_with( array, device_id, volume_name, ref_extra_specs) def test_delete_volume_not_found(self): volume = self.data.test_volume with mock.patch.object(self.common, '_find_device_on_array', return_value=None): with mock.patch.object( self.common, '_delete_from_srp') as mock_delete: self.common._delete_volume(volume) mock_delete.assert_not_called() def test_create_volume_success(self): volume = self.data.test_volume volume_name = '1' volume_size = self.data.test_volume.size extra_specs = self.data.extra_specs ref_response = (self.data.provider_location, dict(), dict()) with mock.patch.object(self.rest, 'get_volume', return_value=self.data.volume_details[0]): volume_dict, rep_update, rep_info_dict = ( self.common._create_volume( volume, volume_name, volume_size, extra_specs)) self.assertEqual(ref_response, (volume_dict, rep_update, rep_info_dict)) @mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id', return_value=tpd.PowerMaxData.device_id2) @mock.patch.object( common.PowerMaxCommon, '_create_non_replicated_volume', return_value=deepcopy(tpd.PowerMaxData.provider_location)) @mock.patch.object(rest.PowerMaxRest, 'get_volume', return_value=tpd.PowerMaxData.volume_details[0]) def test_create_volume_update_returning_device_id( self, mck_get, mck_create, mck_find): volume = self.data.test_volume volume_name = '1' volume_size = self.data.test_volume.size extra_specs = self.data.extra_specs ref_response = (self.data.provider_location2, dict(), dict()) volume_dict, rep_update, rep_info_dict = ( self.common._create_volume( volume, volume_name, volume_size, extra_specs)) self.assertEqual(ref_response, (volume_dict, rep_update, rep_info_dict)) def test_create_volume_success_next_gen(self): volume = self.data.test_volume volume_name = '1' volume_size = self.data.test_volume.size extra_specs = self.data.extra_specs self.mock_object(self.common, 'next_gen', True) with mock.patch.object( self.utils, 'is_compression_disabled', return_value=True): with mock.patch.object( self.rest, 'get_array_model_info', return_value=('PowerMax 2000', True)): with mock.patch.object( self.masking, 'get_or_create_default_storage_group') as mock_get: self.common._create_volume( volume, volume_name, volume_size, extra_specs) mock_get.assert_called_once_with( extra_specs['array'], extra_specs[utils.SRP], extra_specs[utils.SLO], 'NONE', extra_specs, True, False, None) @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg', side_effect=exception.VolumeBackendAPIException('')) @mock.patch.object(common.PowerMaxCommon, '_cleanup_non_rdf_volume_create_post_failure') @mock.patch.object(rest.PowerMaxRest, 'delete_storage_group') def test_create_volume_failed(self, mck_del, mck_cleanup, mck_create): volume = self.data.test_volume volume_name = self.data.test_volume.name volume_size = self.data.test_volume.size extra_specs = self.data.extra_specs dev1 = self.data.device_id dev2 = self.data.device_id2 with mock.patch.object( self.rest, 'get_volumes_in_storage_group', side_effect=[[dev1], [dev1, dev2]]): self.assertRaises(exception.VolumeBackendAPIException, self.common._create_volume, volume, volume_name, volume_size, extra_specs) mck_cleanup.assert_called_once_with( volume, volume_name, extra_specs, [dev2]) # path 2: no new volumes created with mock.patch.object( self.rest, 'get_volumes_in_storage_group', side_effect=[[], []]): self.assertRaises(exception.VolumeBackendAPIException, self.common._create_volume, volume, volume_name, volume_size, extra_specs) mck_del.assert_called_once() @mock.patch.object(common.PowerMaxCommon, 'cleanup_rdf_device_pair') @mock.patch.object( rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=('', '', [ {utils.RDF_GROUP_NO: tpd.PowerMaxData.rdf_group_no_1}])) @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object(utils.PowerMaxUtils, 'get_default_storage_group_name', return_value=tpd.PowerMaxData.storagegroup_name_f) @mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details', return_value=('', tpd.PowerMaxData.rep_extra_specs, '', '',)) def test_cleanup_rdf_volume_create_post_failure_sync( self, mck_prep, mck_sg, mck_resume, mck_sess, mck_clean): array = self.data.array volume = self.data.test_volume volume_name = self.data.test_volume.name extra_specs = deepcopy(self.data.extra_specs_rep_enabled) extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync extra_specs['rep_mode'] = utils.REP_SYNC devices = [self.data.device_id] self.common._cleanup_rdf_volume_create_post_failure( volume, volume_name, extra_specs, devices) mck_prep.assert_called_once_with(extra_specs) mck_sg.assert_called_once_with( extra_specs['srp'], extra_specs['slo'], extra_specs['workload'], False, True, extra_specs['rep_mode']) mck_resume.assert_called_once_with( array, self.data.storagegroup_name_f, self.data.rdf_group_no_1, self.data.rep_extra_specs) mck_sess.assert_called_once_with(array, self.data.device_id) mck_clean.assert_called_once_with( array, self.data.rdf_group_no_1, self.data.device_id, extra_specs) @mock.patch.object(common.PowerMaxCommon, 'cleanup_rdf_device_pair') @mock.patch.object( rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=('', '', [ {utils.RDF_GROUP_NO: tpd.PowerMaxData.rdf_group_no_1}])) @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.storagegroup_name_f) @mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details', return_value=('', tpd.PowerMaxData.rep_extra_specs, '', '',)) def test_cleanup_rdf_volume_create_post_failure_non_sync( self, mck_prep, mck_mgmt, mck_resume, mck_sess, mck_clean): array = self.data.array volume = self.data.test_volume volume_name = self.data.test_volume.name extra_specs = deepcopy(self.data.extra_specs_rep_enabled) extra_specs[utils.REP_CONFIG] = self.data.rep_config_async extra_specs['rep_mode'] = utils.REP_ASYNC devices = [self.data.device_id] self.common._cleanup_rdf_volume_create_post_failure( volume, volume_name, extra_specs, devices) mck_prep.assert_called_once_with(extra_specs) mck_mgmt.assert_called_once_with(extra_specs[utils.REP_CONFIG]) mck_resume.assert_called_once_with( array, self.data.storagegroup_name_f, self.data.rdf_group_no_1, self.data.rep_extra_specs) mck_sess.assert_called_once_with(array, self.data.device_id) mck_clean.assert_called_once_with( array, self.data.rdf_group_no_1, self.data.device_id, extra_specs) @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members') @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=('', '', False)) @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.storagegroup_name_f) @mock.patch.object(common.PowerMaxCommon, 'prepare_replication_details', return_value=('', tpd.PowerMaxData.rep_extra_specs, '', '',)) def test_cleanup_rdf_volume_create_post_failure_pre_rdf_establish( self, mck_prep, mck_mgmt, mck_resume, mck_sess, mck_rem, mck_del): array = self.data.array volume = self.data.test_volume volume_name = self.data.test_volume.name extra_specs = deepcopy(self.data.extra_specs_rep_enabled) extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync extra_specs['rep_mode'] = utils.REP_ASYNC devices = [self.data.device_id] self.common._cleanup_rdf_volume_create_post_failure( volume, volume_name, extra_specs, devices) mck_prep.assert_called_once_with(extra_specs) mck_mgmt.assert_called_once_with(extra_specs[utils.REP_CONFIG]) mck_resume.assert_called_once_with( array, self.data.storagegroup_name_f, self.data.rdf_group_no_1, self.data.rep_extra_specs) mck_sess.assert_called_once_with(array, self.data.device_id) mck_rem.assert_called_once_with(array, volume, self.data.device_id, volume_name, extra_specs, False) mck_del.assert_called_once_with(array, self.data.device_id, volume_name, extra_specs) @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members') def test_cleanup_non_rdf_volume_create_post_failure( self, mck_remove, mck_del): array = self.data.array volume = self.data.test_volume volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs devices = [self.data.device_id] self.common._cleanup_non_rdf_volume_create_post_failure( volume, volume_name, extra_specs, devices) mck_remove.assert_called_once_with( array, volume, self.data.device_id, volume_name, extra_specs, False) mck_del.assert_called_once_with( array, self.data.device_id, volume_name, extra_specs) def test_create_volume_incorrect_slo(self): volume = self.data.test_volume volume_name = self.data.test_volume.name volume_size = self.data.test_volume.size extra_specs = {'slo': 'Diamondz', 'workload': 'DSSSS', 'srp': self.data.srp, 'array': self.data.array} self.assertRaises( exception.VolumeBackendAPIException, self.common._create_volume, volume, volume_name, volume_size, extra_specs) def test_set_vmax_extra_specs(self): srp_record = self.common.get_attributes_from_cinder_config() extra_specs = self.common._set_vmax_extra_specs( self.data.vol_type_extra_specs, srp_record) ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f self.assertEqual(ref_extra_specs, extra_specs) def test_set_vmax_extra_specs_no_srp_name(self): srp_record = self.common.get_attributes_from_cinder_config() with mock.patch.object(self.rest, 'get_slo_list', return_value=[]): extra_specs = self.common._set_vmax_extra_specs({}, srp_record) self.assertIsNone(extra_specs['slo']) def test_set_vmax_extra_specs_compr_disabled(self): with mock.patch.object(self.rest, 'is_compression_capable', return_value=True): srp_record = self.common.get_attributes_from_cinder_config() specs = deepcopy(self.data.vol_type_extra_specs_compr_disabled) extra_specs = self.common._set_vmax_extra_specs(specs, srp_record) ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f ref_extra_specs[utils.DISABLECOMPRESSION] = "true" self.assertEqual(ref_extra_specs, extra_specs) def test_set_vmax_extra_specs_compr_disabled_not_compr_capable(self): with mock.patch.object(self.rest, 'is_compression_capable', return_value=False): srp_record = self.common.get_attributes_from_cinder_config() specs = deepcopy(self.data.vol_type_extra_specs_compr_disabled) extra_specs = self.common._set_vmax_extra_specs(specs, srp_record) ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f self.assertEqual(ref_extra_specs, extra_specs) def test_set_vmax_extra_specs_portgroup_as_spec(self): srp_record = self.common.get_attributes_from_cinder_config() extra_specs = self.common._set_vmax_extra_specs( {utils.PORTGROUPNAME: 'extra_spec_pg'}, srp_record) self.assertEqual('extra_spec_pg', extra_specs[utils.PORTGROUPNAME]) def test_set_vmax_extra_specs_no_portgroup_set(self): srp_record = { 'srpName': 'SRP_1', 'RestServerIp': '1.1.1.1', 'RestPassword': 'smc', 'SSLCert': None, 'RestServerPort': 8443, 'SSLVerify': False, 'RestUserName': 'smc', 'SerialNumber': '000197800123'} self.assertRaises(exception.VolumeBackendAPIException, self.common._set_vmax_extra_specs, {}, srp_record) def test_set_vmax_extra_specs_next_gen(self): srp_record = self.common.get_attributes_from_cinder_config() self.mock_object(self.common, 'next_gen', True) extra_specs = self.common._set_vmax_extra_specs( self.data.vol_type_extra_specs, srp_record) ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set) ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f self.assertEqual('NONE', extra_specs[utils.WORKLOAD]) def test_set_vmax_extra_specs_tags_not_set(self): srp_record = self.common.get_attributes_from_cinder_config() extra_specs = self.common._set_vmax_extra_specs( self.data.vol_type_extra_specs, srp_record) self.assertNotIn('storagetype:storagegrouptags', extra_specs) def test_set_vmax_extra_specs_tags_set_correctly(self): srp_record = self.common.get_attributes_from_cinder_config() extra_specs = self.common._set_vmax_extra_specs( self.data.vol_type_extra_specs_tags, srp_record) self.assertEqual( self.data.vol_type_extra_specs_tags[utils.STORAGE_GROUP_TAGS], extra_specs[utils.STORAGE_GROUP_TAGS]) def test_set_vmax_extra_specs_tags_set_incorrectly(self): srp_record = self.common.get_attributes_from_cinder_config() self.assertRaises(exception.VolumeBackendAPIException, self.common._set_vmax_extra_specs, self.data.vol_type_extra_specs_tags_bad, srp_record) def test_set_vmax_extra_specs_pg_specs_init_conn(self): pool_record = self.common.get_attributes_from_cinder_config() with mock.patch.object( self.common, '_select_port_group_for_extra_specs', side_effect=( self.common._select_port_group_for_extra_specs)) as mck_s: self.common._set_vmax_extra_specs( self.data.vol_type_extra_specs, pool_record, init_conn=True) mck_s.assert_called_once_with( self.data.vol_type_extra_specs, pool_record, True) def test_raise_exception_if_array_not_configured(self): self.driver.configuration.powermax_array = None self.assertRaises(exception.InvalidConfigurationValue, self.common.get_attributes_from_cinder_config) def test_raise_exception_if_srp_not_configured(self): self.driver.configuration.powermax_srp = None self.assertRaises(exception.InvalidConfigurationValue, self.common.get_attributes_from_cinder_config) def test_delete_volume_from_srp_success(self): array = self.data.array device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs with mock.patch.object( self.provision, 'delete_volume_from_srp') as mock_del: self.common._delete_from_srp(array, device_id, volume_name, extra_specs) mock_del.assert_called_once_with(array, device_id, volume_name) def test_delete_volume_from_srp_failed(self): self.mock_object(time, 'sleep') array = self.data.array device_id = self.data.failed_resource volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs with mock.patch.object( self.masking, 'add_volume_to_default_storage_group') as mock_add: self.assertRaises(exception.VolumeBackendAPIException, self.common._delete_from_srp, array, device_id, volume_name, extra_specs) mock_add.assert_not_called() @mock.patch.object(utils.PowerMaxUtils, 'is_volume_failed_over', side_effect=[True, False]) @mock.patch.object(common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs) def test_get_target_wwns_from_masking_view(self, mock_rep_specs, mock_fo): ref_wwns = [self.data.wwpn1] for x in range(0, 2): target_wwns = self.common._get_target_wwns_from_masking_view( self.data.device_id, self.data.connector['host'], self.data.extra_specs) self.assertEqual(ref_wwns, target_wwns) def test_get_target_wwns_from_masking_view_no_mv(self): with mock.patch.object(self.common, '_get_masking_views_from_volume', return_value=([], None)): target_wwns = self.common._get_target_wwns_from_masking_view( self.data.device_id, self.data.connector['host'], self.data.extra_specs) self.assertEqual([], target_wwns) @mock.patch.object(common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=('1', tpd.PowerMaxData.remote_array)) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) @mock.patch.object( common.PowerMaxCommon, '_get_target_wwns_from_masking_view', return_value=[tpd.PowerMaxData.wwnn1]) def test_get_target_wwns( self, mck_wwns, mock_tgt, mock_rdf_grp, mock_specs): __, metro_wwns = self.common.get_target_wwns_from_masking_view( self.data.test_volume, self.data.connector) self.assertEqual([], metro_wwns) # Is metro volume with mock.patch.object(common.PowerMaxCommon, '_initial_setup', return_value=self.data.ex_specs_rep_config): __, metro_wwns = self.common.get_target_wwns_from_masking_view( self.data.test_volume, self.data.connector) self.assertEqual([self.data.wwnn1], metro_wwns) @mock.patch.object(common.PowerMaxCommon, '_get_target_wwns_from_masking_view') @mock.patch.object(utils.PowerMaxUtils, 'get_host_name_label', return_value='my_short_h94485') @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', return_value=False) def test_get_target_wwns_host_override( self, mock_rep_check, mock_label, mock_mv): host_record = {'host': 'my_short_host_name'} connector = deepcopy(self.data.connector) connector.update(host_record) extra_specs = {'pool_name': 'Diamond+DSS+SRP_1+000197800123', 'srp': 'SRP_1', 'array': '000197800123', 'storagetype:portgroupname': 'OS-fibre-PG', 'interval': 1, 'retries': 1, 'slo': 'Diamond', 'workload': 'DSS'} host_template = 'shortHostName[:10]uuid[:5]' self.mock_object(self.common, 'powermax_short_host_name_template', host_template) self.common.get_target_wwns_from_masking_view( self.data.test_volume, connector) mock_label.assert_called_once_with( connector['host'], host_template) mock_mv.assert_called_once_with( self.data.device_id, 'my_short_h94485', extra_specs) def test_get_port_group_from_masking_view(self): array = self.data.array maskingview_name = self.data.masking_view_name_f with mock.patch.object(self.rest, 'get_element_from_masking_view') as mock_get: self.common.get_port_group_from_masking_view( array, maskingview_name) mock_get.assert_called_once_with( array, maskingview_name, portgroup=True) def test_get_initiator_group_from_masking_view(self): array = self.data.array maskingview_name = self.data.masking_view_name_f with mock.patch.object( self.rest, 'get_element_from_masking_view') as mock_get: self.common.get_initiator_group_from_masking_view( array, maskingview_name) mock_get.assert_called_once_with( array, maskingview_name, host=True) def test_get_common_masking_views(self): array = self.data.array portgroup_name = self.data.port_group_name_f initiator_group_name = self.data.initiatorgroup_name_f with mock.patch.object( self.rest, 'get_common_masking_views') as mock_get: self.common.get_common_masking_views( array, portgroup_name, initiator_group_name) mock_get.assert_called_once_with( array, portgroup_name, initiator_group_name) def test_get_iscsi_ip_iqn_port(self): self.common.rest.u4p_version = self.data.u4p_100_endpoint phys_port = '%(dir)s:%(port)s' % {'dir': self.data.iscsi_dir, 'port': self.data.iscsi_port} ref_ip_iqn = [{'iqn': self.data.initiator, 'ip': self.data.ip, 'physical_port': phys_port}] director = self.data.portgroup[1]['symmetrixPortKey'][0]['directorId'] port = self.data.portgroup[1]['symmetrixPortKey'][0]['portId'] dirport = "%s:%s" % (director, port) ip_iqn_list = self.common._get_iscsi_ip_iqn_port(self.data.array, dirport) self.assertEqual(ref_ip_iqn, ip_iqn_list) def test_find_ip_and_iqns(self): self.common.rest.u4p_version = self.data.u4p_100_endpoint ref_ip_iqn = [{'iqn': self.data.initiator, 'ip': self.data.ip, 'physical_port': self.data.iscsi_dir_port}] ip_iqn_list = self.common._find_ip_and_iqns( self.data.array, self.data.port_group_name_i) self.assertEqual(ref_ip_iqn, ip_iqn_list) @mock.patch.object(rest.PowerMaxRest, 'get_portgroup', return_value=None) def test_find_ip_and_iqns_no_port_group(self, mock_port): self.assertRaises( exception.VolumeBackendAPIException, self.common._find_ip_and_iqns, self.data.array, self.data.port_group_name_i) def test_create_replica_snap_name(self): array = self.data.array clone_volume = self.data.test_clone_volume source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] ref_response = (self.data.provider_location_snapshot, dict(), dict()) clone_dict, rep_update, rep_info_dict = self.common._create_replica( array, clone_volume, source_device_id, self.data.extra_specs, snap_name) self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict)) @mock.patch.object( rest.PowerMaxRest, 'get_slo_list', return_value=['Diamond']) @mock.patch.object( common.PowerMaxCommon, '_create_volume', return_value=(tpd.PowerMaxData.rep_info_dict, tpd.PowerMaxData.replication_update, tpd.PowerMaxData.rep_info_dict)) @mock.patch.object(rest.PowerMaxRest, 'rdf_resume_with_retries') @mock.patch.object(rest.PowerMaxRest, 'srdf_suspend_replication') @mock.patch.object(rest.PowerMaxRest, 'wait_for_rdf_pair_sync') def test_create_replica_rep_enabled( self, mck_wait, mck_susp, mck_res, mck_create, mck_slo): array = self.data.array clone_volume = self.data.test_clone_volume source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] extra_specs = deepcopy(self.data.rep_extra_specs_rep_config) __, rep_extra_specs, __, __ = self.common.prepare_replication_details( extra_specs) rdfg = extra_specs['rdf_group_no'] self.common._create_replica( array, clone_volume, source_device_id, rep_extra_specs, snap_name) mck_wait.assert_called_once_with( array, rdfg, source_device_id, rep_extra_specs) mck_susp.assert_called_once_with( array, rep_extra_specs['sg_name'], rdfg, rep_extra_specs) mck_res.assert_called_once_with(array, rep_extra_specs) def test_create_replica_no_snap_name(self): array = self.data.array clone_volume = self.data.test_clone_volume source_device_id = self.data.device_id snap_name = "temp-" + source_device_id + "-snapshot_for_clone" ref_response = (self.data.provider_location_clone, dict(), dict()) with mock.patch.object( self.utils, 'get_temp_snap_name', return_value=snap_name) as mock_get_snap: clone_dict, rep_update, rep_info_dict = ( self.common._create_replica( array, clone_volume, source_device_id, self.data.extra_specs)) self.assertEqual(ref_response, (clone_dict, rep_update, rep_info_dict)) mock_get_snap.assert_called_once_with(source_device_id) def test_create_replica_failed_cleanup_target(self): array = self.data.array clone_volume = self.data.test_clone_volume device_id = self.data.device_id snap_name = self.data.failed_resource clone_name = 'OS-' + clone_volume.id extra_specs = self.data.extra_specs with mock.patch.object( self.common, '_cleanup_target') as mock_cleanup: self.assertRaises( exception.VolumeBackendAPIException, self.common._create_replica, array, clone_volume, device_id, self.data.extra_specs, snap_name) mock_cleanup.assert_called_once_with( array, device_id, device_id, clone_name, snap_name, extra_specs, target_volume=clone_volume) def test_create_replica_failed_no_target(self): array = self.data.array clone_volume = self.data.test_clone_volume source_device_id = self.data.device_id snap_name = self.data.failed_resource with mock.patch.object(self.common, '_create_volume', return_value=({'device_id': None}, {}, {})): with mock.patch.object( self.common, '_cleanup_target') as mock_cleanup: self.assertRaises( exception.VolumeBackendAPIException, self.common._create_replica, array, clone_volume, source_device_id, self.data.extra_specs, snap_name) mock_cleanup.assert_not_called() @mock.patch.object( utils.PowerMaxUtils, 'compare_cylinders', side_effect=exception.VolumeBackendAPIException) def test_create_replica_cylinder_mismatch(self, mock_cyl): array = self.data.array clone_volume = self.data.test_clone_volume source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] clone_name = 'OS-' + clone_volume.id with mock.patch.object( self.common, '_cleanup_target') as mock_cleanup: self.assertRaises( # noqa: H202 Exception, self.common._create_replica, array, clone_volume, source_device_id, self.data.extra_specs, snap_name) # noqa: ignore=H202 mock_cleanup.assert_called_once_with( array, source_device_id, source_device_id, clone_name, snap_name, self.data.extra_specs, target_volume=clone_volume) @mock.patch.object(rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=[]) @mock.patch.object(rest.PowerMaxRest, 'get_snap_id', return_value=tpd.PowerMaxData.snap_id) @mock.patch.object( masking.PowerMaxMasking, 'remove_and_reset_members') def test_cleanup_target_sync_present( self, mock_remove, mock_snaps, mock_sgs): array = self.data.array clone_volume = self.data.test_clone_volume source_device_id = self.data.device_id target_device_id = self.data.device_id2 snap_name = self.data.failed_resource clone_name = clone_volume.name extra_specs = self.data.extra_specs with mock.patch.object(self.rest, 'get_sync_session', return_value='session'): with mock.patch.object( self.provision, 'unlink_snapvx_tgt_volume') as mock_break: self.common._cleanup_target( array, target_device_id, source_device_id, clone_name, snap_name, extra_specs) mock_break.assert_called_with( array, target_device_id, source_device_id, snap_name, extra_specs, self.data.snap_id) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snaps', return_value=[{'snap_name': 'snap_name', 'snap_id': tpd.PowerMaxData.snap_id}]) @mock.patch.object(masking.PowerMaxMasking, 'remove_volume_from_sg') def test_cleanup_target_no_sync(self, mock_remove, mock_snaps): array = self.data.array clone_volume = self.data.test_clone_volume source_device_id = self.data.device_id target_device_id = self.data.device_id2 snap_name = self.data.failed_resource clone_name = clone_volume.name extra_specs = self.data.extra_specs with mock.patch.object(self.rest, 'get_sync_session', return_value=None): with mock.patch.object( self.common, '_delete_from_srp') as mock_delete: self.common._cleanup_target( array, target_device_id, source_device_id, clone_name, snap_name, extra_specs) mock_delete.assert_called_once_with( array, target_device_id, clone_name, extra_specs) @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value={'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2'}) def test_manage_existing_success(self, mck_meta): external_ref = {u'source-name': u'00002'} provider_location = {'device_id': u'00002', 'array': u'000197800123'} ref_update = {'provider_location': str(provider_location), 'metadata': {'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2', 'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'}} volume = deepcopy(self.data.test_volume) volume.metadata = {'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'} with mock.patch.object( self.common, '_check_lun_valid_for_cinder_management', return_value=('vol1', 'test_sg')): model_update = self.common.manage_existing(volume, external_ref) self.assertEqual(ref_update, model_update) @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object( common.PowerMaxCommon, '_check_lun_valid_for_cinder_management', return_value=('vol1', 'test_sg')) def test_manage_existing_no_fall_through(self, mock_check, mock_get): external_ref = {u'source-name': self.data.device_id} volume = deepcopy(self.data.test_volume) with mock.patch.object( self.common, '_manage_volume_with_uuid', return_value=( self.data.array, self.data.device_id2)) as mock_uuid: self.common.manage_existing(volume, external_ref) mock_uuid.assert_not_called() @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object( common.PowerMaxCommon, '_check_lun_valid_for_cinder_management', return_value=('vol1', 'test_sg')) def test_manage_existing_fall_through(self, mock_check, mock_get): external_ref = {u'source-name': self.data.volume_id} volume = deepcopy(self.data.test_volume) with mock.patch.object( self.common, '_manage_volume_with_uuid', return_value=( self.data.array, self.data.device_id2)) as mock_uuid: self.common.manage_existing(volume, external_ref) mock_uuid.assert_called() @mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id', return_value=tpd.PowerMaxData.device_id2) def test_manage_volume_with_uuid_success(self, mock_dev): external_ref = {u'source-name': self.data.volume_id} volume = deepcopy(self.data.test_volume) array, device_id = self.common._manage_volume_with_uuid( external_ref, volume) self.assertEqual(array, self.data.array) self.assertEqual(device_id, self.data.device_id2) @mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id', return_value=tpd.PowerMaxData.device_id2) def test_manage_volume_with_prefix_and_uuid_success(self, mock_dev): source_name = 'OS-' + self.data.volume_id external_ref = {u'source-name': source_name} volume = deepcopy(self.data.test_volume) array, device_id = self.common._manage_volume_with_uuid( external_ref, volume) self.assertEqual(array, self.data.array) self.assertEqual(device_id, self.data.device_id2) def test_manage_volume_with_uuid_exception(self): external_ref = {u'source-name': u'non_compliant_string'} volume = deepcopy(self.data.test_volume) self.assertRaises( exception.VolumeBackendAPIException, self.common._manage_volume_with_uuid, external_ref, volume) @mock.patch.object(rest.PowerMaxRest, 'get_volume_list', return_value=[tpd.PowerMaxData.device_id3]) @mock.patch.object( rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=None) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(False, False, None)) def test_check_lun_valid_for_cinder_management( self, mock_rep, mock_mv, mock_list): external_ref = {u'source-name': u'00003'} vol, source_sg = self.common._check_lun_valid_for_cinder_management( self.data.array, self.data.device_id3, self.data.test_volume.id, external_ref) self.assertEqual(vol, '123') self.assertIsNone(source_sg) @mock.patch.object(rest.PowerMaxRest, 'get_volume_list', return_value=[tpd.PowerMaxData.device_id4]) @mock.patch.object( rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=None) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(False, False, None)) def test_check_lun_valid_for_cinder_management_multiple_sg_exception( self, mock_rep, mock_mv, mock_list): external_ref = {u'source-name': u'00004'} self.assertRaises( exception.ManageExistingInvalidReference, self.common._check_lun_valid_for_cinder_management, self.data.array, self.data.device_id4, self.data.test_volume.id, external_ref) @mock.patch.object(rest.PowerMaxRest, 'get_volume_list', return_value=[tpd.PowerMaxData.device_id3]) @mock.patch.object(rest.PowerMaxRest, 'get_volume', side_effect=[None, tpd.PowerMaxData.volume_details[2], tpd.PowerMaxData.volume_details[2], tpd.PowerMaxData.volume_details[1]]) @mock.patch.object( rest.PowerMaxRest, 'get_masking_views_from_storage_group', side_effect=[tpd.PowerMaxData.sg_details[1]['maskingview'], None]) @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=([tpd.PowerMaxData.defaultstoragegroup_name])) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', side_effect=[(True, False, []), (False, False, None)]) def test_check_lun_valid_for_cinder_management_exception( self, mock_rep, mock_sg, mock_mvs, mock_get_vol, mock_list): external_ref = {u'source-name': u'00003'} for x in range(0, 3): self.assertRaises( exception.ManageExistingInvalidReference, self.common._check_lun_valid_for_cinder_management, self.data.array, self.data.device_id3, self.data.test_volume.id, external_ref) self.assertRaises(exception.ManageExistingAlreadyManaged, self.common._check_lun_valid_for_cinder_management, self.data.array, self.data.device_id3, self.data.test_volume.id, external_ref) @mock.patch.object(rest.PowerMaxRest, 'get_volume_list', return_value=[tpd.PowerMaxData.device_id]) @mock.patch.object( rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=None) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(False, False, None)) def test_check_lun_valid_for_cinder_management_non_FBA( self, mock_rep, mock_mv, mock_list): external_ref = {u'source-name': u'00004'} self.assertRaises( exception.ManageExistingVolumeTypeMismatch, self.common._check_lun_valid_for_cinder_management, self.data.array, self.data.device_id4, self.data.test_volume.id, external_ref) def test_manage_existing_get_size(self): external_ref = {u'source-name': u'00001'} size = self.common.manage_existing_get_size( self.data.test_volume, external_ref) self.assertEqual(2, size) def test_manage_existing_get_size_uuid(self): external_ref = {u'source-name': self.data.volume_id} size = self.common.manage_existing_get_size( self.data.test_volume, external_ref) self.assertEqual(2, size) def test_manage_existing_get_size_prefix_and_uuid(self): source_name = 'volume-' + self.data.volume_id external_ref = {u'source-name': source_name} size = self.common.manage_existing_get_size( self.data.test_volume, external_ref) self.assertEqual(2, size) def test_manage_existing_get_size_invalid_input(self): external_ref = {u'source-name': u'invalid_input'} self.assertRaises(exception.VolumeBackendAPIException, self.common.manage_existing_get_size, self.data.test_volume, external_ref) def test_manage_existing_get_size_exception(self): external_ref = {u'source-name': u'00001'} with mock.patch.object(self.rest, 'get_size_of_device_on_array', return_value=3.5): self.assertRaises(exception.ManageExistingInvalidReference, self.common.manage_existing_get_size, self.data.test_volume, external_ref) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(False, False, False)) @mock.patch.object(common.PowerMaxCommon, '_remove_vol_and_cleanup_replication') @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_unmanage_success(self, mck_cleanup_snaps, mock_rm, mck_sess): volume = self.data.test_volume with mock.patch.object(self.rest, 'rename_volume') as mock_rename: self.common.unmanage(volume) mock_rename.assert_called_once_with( self.data.array, self.data.device_id, self.data.test_volume.id) # Test for success when create storage group fails with mock.patch.object(self.rest, 'rename_volume') as mock_rename: with mock.patch.object( self.provision, 'create_storage_group', side_effect=exception.VolumeBackendAPIException): self.common.unmanage(volume) mock_rename.assert_called_once_with( self.data.array, self.data.device_id, self.data.test_volume.id) def test_unmanage_device_not_found(self): volume = self.data.test_volume with mock.patch.object(self.common, '_find_device_on_array', return_value=None): with mock.patch.object(self.rest, 'rename_volume') as mock_rename: self.common.unmanage(volume) mock_rename.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(True, True, False)) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_unmanage_temp_snapshot_links(self, mck_cleanup_snaps, mck_sess): volume = self.data.test_volume self.assertRaises(exception.VolumeIsBusy, self.common.unmanage, volume) @mock.patch.object(common.PowerMaxCommon, '_slo_workload_migration') def test_retype(self, mock_migrate): device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f volume = self.data.test_volume new_type = {'extra_specs': {}} host = {'host': self.data.new_host} self.common.retype(volume, new_type, host) mock_migrate.assert_called_once_with( device_id, volume, host, volume_name, new_type, extra_specs) with mock.patch.object( self.common, '_find_device_on_array', return_value=None): self.assertFalse(self.common.retype(volume, new_type, host)) def test_retype_attached_vol(self): host = {'host': self.data.new_host} new_type = {'extra_specs': {}} with mock.patch.object( self.common, '_find_device_on_array', return_value=True): with mock.patch.object(self.common, '_slo_workload_migration') as mock_retype: self.common.retype(self.data.test_attached_volume, new_type, host) mock_retype.assert_called_once() @mock.patch.object(utils.PowerMaxUtils, 'is_retype_supported', return_value=False) def test_retype_not_supported(self, mck_retype): volume = self.data.test_volume new_type = {'extra_specs': self.data.rep_extra_specs} host = self.data.new_host self.assertFalse(self.common.retype(volume, new_type, host)) @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config) @mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload', return_value=(True, True)) @mock.patch.object(common.PowerMaxCommon, '_slo_workload_migration') def test_retype_promotion_extra_spec_update( self, mck_migrate, mck_slo, mck_setup): device_id = self.data.device_id volume_name = self.data.test_rep_volume.name extra_specs = deepcopy(self.data.rep_extra_specs_rep_config) rep_config = extra_specs[utils.REP_CONFIG] rep_extra_specs = self.common._get_replication_extra_specs( extra_specs, rep_config) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f volume = self.data.test_rep_volume new_type = {'extra_specs': {}} host = {'host': self.data.new_host} self.mock_object(self.common, 'promotion', True) self.common.retype(volume, new_type, host) mck_migrate.assert_called_once_with( device_id, volume, host, volume_name, new_type, rep_extra_specs) def test_slo_workload_migration_valid(self): device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs new_type = {'extra_specs': self.data.vol_type_extra_specs} volume = self.data.test_volume host = {'host': self.data.new_host} with mock.patch.object(self.common, '_migrate_volume') as mock_migrate: self.common._slo_workload_migration( device_id, volume, host, volume_name, new_type, extra_specs) mock_migrate.assert_called_once_with( extra_specs[utils.ARRAY], volume, device_id, extra_specs[utils.SRP], 'Silver', 'OLTP', volume_name, new_type, extra_specs) def test_slo_workload_migration_not_valid(self): device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs volume = self.data.test_volume new_type = {'extra_specs': self.data.vol_type_extra_specs} host = {'host': self.data.new_host} with mock.patch.object( self.common, '_is_valid_for_storage_assisted_migration', return_value=(False, 'Silver', 'OLTP')): migrate_status = self.common._slo_workload_migration( device_id, volume, host, volume_name, new_type, extra_specs) self.assertFalse(migrate_status) def test_slo_workload_migration_same_hosts(self): device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs volume = self.data.test_volume host = {'host': self.data.fake_host} new_type = {'extra_specs': {'slo': 'Bronze'}} migrate_status = self.common._slo_workload_migration( device_id, volume, host, volume_name, new_type, extra_specs) self.assertFalse(migrate_status) @mock.patch.object(rest.PowerMaxRest, 'is_compression_capable', return_value=True) def test_slo_workload_migration_same_host_change_compression( self, mock_cap): device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = self.data.extra_specs volume = self.data.test_volume host = {'host': self.data.fake_host} new_type = {'extra_specs': {utils.DISABLECOMPRESSION: "true"}} with mock.patch.object( self.common, '_is_valid_for_storage_assisted_migration', return_value=(True, self.data.slo, self.data.workload)): with mock.patch.object( self.common, '_migrate_volume') as mock_migrate: migrate_status = self.common._slo_workload_migration( device_id, volume, host, volume_name, new_type, extra_specs) self.assertTrue(bool(migrate_status)) mock_migrate.assert_called_once_with( extra_specs[utils.ARRAY], volume, device_id, extra_specs[utils.SRP], self.data.slo, self.data.workload, volume_name, new_type, extra_specs) @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object( common.PowerMaxCommon, '_retype_volume', return_value=(True, tpd.PowerMaxData.defaultstoragegroup_name)) def test_migrate_volume_success_no_rep(self, mck_retype, mck_get): array_id = self.data.array volume = self.data.test_volume device_id = self.data.device_id srp = self.data.srp target_slo = self.data.slo_silver target_workload = self.data.workload volume_name = volume.name new_type = {'extra_specs': {}} extra_specs = self.data.extra_specs target_extra_specs = { utils.SRP: srp, utils.ARRAY: array_id, utils.SLO: target_slo, utils.WORKLOAD: target_workload, utils.INTERVAL: extra_specs[utils.INTERVAL], utils.RETRIES: extra_specs[utils.RETRIES], utils.DISABLECOMPRESSION: False} success, model_update = self.common._migrate_volume( array_id, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs) mck_retype.assert_called_once_with( array_id, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs) self.assertTrue(success) @mock.patch.object(utils.PowerMaxUtils, 'get_rep_config', return_value=tpd.PowerMaxData.rep_config_metro) @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', side_effect=[False, True]) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') @mock.patch.object(rest.PowerMaxRest, 'get_slo_list', return_value=[]) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=[{'snapshotName': 'name', 'linkedDevices': 'details'}]) def test_migrate_to_metro_exception_on_linked_snapshot_source( self, mck_get, mck_slo, mck_validate, mck_rep, mck_config): array_id = self.data.array volume = self.data.test_volume device_id = self.data.device_id srp = self.data.srp target_slo = self.data.slo_silver target_workload = self.data.workload volume_name = volume.name target_extra_specs = self.data.rep_extra_specs_rep_config_metro new_type = {'extra_specs': target_extra_specs} extra_specs = self.data.extra_specs self.assertRaises( exception.VolumeBackendAPIException, self.common._migrate_volume, array_id, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(utils.PowerMaxUtils, 'get_rep_config', return_value=tpd.PowerMaxData.rep_config_metro) @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', side_effect=[False, True]) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') @mock.patch.object(rest.PowerMaxRest, 'get_slo_list', return_value=[]) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=[{'snapshotName': 'name'}]) @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', return_value=('', {'source_vol_id': 'source_vol_id', 'snap_name': 'snap_name'})) def test_migrate_to_metro_exception_on_snapshot_target( self, mck_find, mck_snap, mck_slo, mck_validate, mck_rep, mck_config, mck_cleanup): array_id = self.data.array volume = self.data.test_volume device_id = self.data.device_id srp = self.data.srp target_slo = self.data.slo_silver target_workload = self.data.workload volume_name = volume.name target_extra_specs = self.data.rep_extra_specs_rep_config_metro new_type = {'extra_specs': target_extra_specs} extra_specs = self.data.extra_specs self.assertRaises( exception.VolumeBackendAPIException, self.common._migrate_volume, array_id, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=['activebias']) @mock.patch.object(common.PowerMaxCommon, '_post_retype_srdf_protect_storage_group', return_value=(True, True, True)) @mock.patch.object(utils.PowerMaxUtils, 'get_volume_element_name', return_value=tpd.PowerMaxData.volume_id) @mock.patch.object( common.PowerMaxCommon, 'configure_volume_replication', return_value=('first_vol_in_rdf_group', True, True, tpd.PowerMaxData.rep_extra_specs_mgmt, False)) @mock.patch.object(common.PowerMaxCommon, '_retype_volume') @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object( common.PowerMaxCommon, 'break_rdf_device_pair_session', return_value=(tpd.PowerMaxData.rep_extra_specs_mgmt, True)) @mock.patch.object(common.PowerMaxCommon, '_retype_remote_volume') @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', return_value=True) def test_cleanup_on_migrate_failure( self, mck_rep_enabled, mck_retype_remote, mck_break, mck_resume, mck_retype, mck_configure, mck_get_vname, mck_protect, mck_states): rdf_pair_broken = True rdf_pair_created = True vol_retyped = True remote_retyped = True extra_specs = deepcopy(self.data.extra_specs_rep_enabled) target_extra_specs = deepcopy(self.data.extra_specs_rep_enabled) rep_extra_specs = deepcopy(self.data.rep_extra_specs_mgmt) volume = self.data.test_volume volume_name = self.data.volume_id device_id = self.data.device_id source_sg = self.data.storagegroup_name_f array = self.data.array srp = extra_specs[utils.SRP] slo = extra_specs[utils.SLO] workload = extra_specs[utils.WORKLOAD] rep_mode = utils.REP_ASYNC extra_specs[utils.REP_MODE] = rep_mode self.common._cleanup_on_migrate_failure( rdf_pair_broken, rdf_pair_created, vol_retyped, remote_retyped, extra_specs, target_extra_specs, volume, volume_name, device_id, source_sg) mck_rep_enabled.assert_called_once_with(extra_specs) mck_retype_remote.assert_called_once_with( array, volume, device_id, volume_name, rep_mode, True, extra_specs) mck_break.assert_called_once_with( array, device_id, volume_name, extra_specs, volume) mck_resume.assert_called_once_with( array, rep_extra_specs['mgmt_sg_name'], rep_extra_specs['rdf_group_no'], rep_extra_specs) mck_retype.assert_called_once_with( array, srp, device_id, volume, volume_name, target_extra_specs, slo, workload, extra_specs) mck_configure.assert_called_once_with( array, volume, device_id, extra_specs) mck_get_vname.assert_called_once_with(volume.id) mck_protect.assert_called_once_with( array, source_sg, device_id, volume_name, rep_extra_specs, volume) @mock.patch.object( masking.PowerMaxMasking, 'return_volume_to_volume_group') @mock.patch.object( masking.PowerMaxMasking, 'move_volume_between_storage_groups') @mock.patch.object(masking.PowerMaxMasking, 'add_child_sg_to_parent_sg') @mock.patch.object(rest.PowerMaxRest, 'create_storage_group') @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_list', return_value=['sg']) def test_cleanup_on_retype_volume_failure_moved_sg( self, mck_get_sgs, mck_create_sg, mck_add_child, mck_move, mck_return): created_child_sg = False add_sg_to_parent = False got_default_sg = False moved_between_sgs = True extra_specs = deepcopy(self.data.extra_specs_rep_enabled) array = extra_specs[utils.ARRAY] source_sg = self.data.storagegroup_name_f parent_sg = self.data.parent_sg_f target_sg_name = self.data.storagegroup_name_i device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.volume_id self.common._cleanup_on_retype_volume_failure( created_child_sg, add_sg_to_parent, got_default_sg, moved_between_sgs, array, source_sg, parent_sg, target_sg_name, extra_specs, device_id, volume, volume_name) mck_get_sgs.assert_called_once_with(array) mck_create_sg.assert_called_once_with( array, source_sg, extra_specs['srp'], extra_specs['slo'], extra_specs['workload'], extra_specs, False) mck_add_child.assert_called_once_with( array, source_sg, parent_sg, extra_specs) mck_move.assert_called_once_with( array, device_id, target_sg_name, source_sg, extra_specs, force=True, parent_sg=parent_sg) mck_return.assert_called_once_with( array, volume, device_id, volume_name, extra_specs) @mock.patch.object(rest.PowerMaxRest, 'delete_storage_group') @mock.patch.object(rest.PowerMaxRest, 'get_volumes_in_storage_group', return_value=[]) def test_cleanup_on_retype_volume_failure_got_default( self, mck_get_vols, mck_del_sg): created_child_sg = False add_sg_to_parent = False got_default_sg = True moved_between_sgs = False extra_specs = deepcopy(self.data.extra_specs_rep_enabled) array = extra_specs[utils.ARRAY] source_sg = self.data.storagegroup_name_f parent_sg = self.data.parent_sg_f target_sg_name = self.data.storagegroup_name_i device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.volume_id self.common._cleanup_on_retype_volume_failure( created_child_sg, add_sg_to_parent, got_default_sg, moved_between_sgs, array, source_sg, parent_sg, target_sg_name, extra_specs, device_id, volume, volume_name) mck_get_vols.assert_called_once_with(array, target_sg_name) mck_del_sg.assert_called_once_with(array, target_sg_name) @mock.patch.object(rest.PowerMaxRest, 'delete_storage_group') @mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg') def test_cleanup_on_retype_volume_failure_created_child( self, mck_remove_child_sg, mck_del_sg): created_child_sg = True add_sg_to_parent = True got_default_sg = False moved_between_sgs = False extra_specs = deepcopy(self.data.extra_specs_rep_enabled) array = extra_specs[utils.ARRAY] source_sg = self.data.storagegroup_name_f parent_sg = self.data.parent_sg_f target_sg_name = self.data.storagegroup_name_i device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.volume_id self.common._cleanup_on_retype_volume_failure( created_child_sg, add_sg_to_parent, got_default_sg, moved_between_sgs, array, source_sg, parent_sg, target_sg_name, extra_specs, device_id, volume, volume_name) mck_remove_child_sg.assert_called_once_with( array, target_sg_name, parent_sg, extra_specs) mck_del_sg.assert_called_once_with(array, target_sg_name) def test_is_valid_for_storage_assisted_migration_true(self): device_id = self.data.device_id host = {'host': self.data.new_host} volume_name = self.data.test_volume.name ref_return = (True, 'Silver', 'OLTP') return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, volume_name, False, False, self.data.slo, self.data.workload, False) self.assertEqual(ref_return, return_val) # No current sgs found with mock.patch.object(self.rest, 'get_storage_groups_from_volume', return_value=None): return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, volume_name, False, False, self.data.slo, self.data.workload, False) self.assertEqual(ref_return, return_val) host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123'} ref_return = (True, 'Silver', 'NONE') return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, volume_name, False, False, self.data.slo, self.data.workload, False) self.assertEqual(ref_return, return_val) def test_is_valid_for_storage_assisted_migration_false(self): device_id = self.data.device_id volume_name = self.data.test_volume.name ref_return = (False, None, None) # IndexError host = {'host': 'HostX@Backend#Silver+SRP_1+000197800123+dummy+data'} return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, volume_name, False, False, self.data.slo, self.data.workload, False) self.assertEqual(ref_return, return_val) # Wrong array host2 = {'host': 'HostX@Backend#Silver+OLTP+SRP_1+00012345678'} return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host2, self.data.array, self.data.srp, volume_name, False, False, self.data.slo, self.data.workload, False) self.assertEqual(ref_return, return_val) # Wrong srp host3 = {'host': 'HostX@Backend#Silver+OLTP+SRP_2+000197800123'} return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host3, self.data.array, self.data.srp, volume_name, False, False, self.data.slo, self.data.workload, False) self.assertEqual(ref_return, return_val) # Already in correct sg with mock.patch.object( self.common.provision, 'get_slo_workload_settings_from_storage_group', return_value='Diamond+DSS') as mock_settings: host4 = {'host': self.data.fake_host} return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host4, self.data.array, self.data.srp, volume_name, False, False, self.data.slo, self.data.workload, False) self.assertEqual(ref_return, return_val) mock_settings.assert_called_once() def test_is_valid_for_storage_assisted_migration_next_gen(self): device_id = self.data.device_id host = {'host': self.data.new_host} volume_name = self.data.test_volume.name ref_return = (True, 'Silver', 'NONE') with mock.patch.object(self.rest, 'is_next_gen_array', return_value=True): return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, volume_name, False, False, self.data.slo, self.data.workload, False) self.assertEqual(ref_return, return_val) def test_is_valid_for_storage_assisted_migration_promotion_change_comp( self): device_id = self.data.device_id host = {'host': self.data.new_host} volume_name = self.data.test_volume.name ref_return = (False, None, None) self.mock_object(self.common, 'promotion', True) return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, volume_name, True, False, self.data.slo_silver, self.data.workload, False) self.assertEqual(ref_return, return_val) def test_is_valid_for_storage_assisted_migration_promotion_change_slo( self): device_id = self.data.device_id host = {'host': self.data.new_host} volume_name = self.data.test_volume.name ref_return = (False, None, None) self.mock_object(self.common, 'promotion', True) return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, volume_name, False, False, self.data.slo, self.data.workload, False) self.assertEqual(ref_return, return_val) def test_is_valid_for_storage_assisted_migration_promotion_change_workload( self): device_id = self.data.device_id host = {'host': self.data.new_host} volume_name = self.data.test_volume.name ref_return = (False, None, None) self.mock_object(self.common, 'promotion', True) return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, volume_name, False, False, self.data.slo_silver, 'fail_workload', False) self.assertEqual(ref_return, return_val) def test_is_valid_for_storage_assisted_migration_promotion_target_not_rep( self): device_id = self.data.device_id host = {'host': self.data.new_host} volume_name = self.data.test_volume.name ref_return = (False, None, None) self.mock_object(self.common, 'promotion', True) return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, volume_name, False, False, self.data.slo_silver, 'OLTP', True) self.assertEqual(ref_return, return_val) @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=tpd.PowerMaxData.default_sg_re_managed_list) def test_is_valid_for_storage_assisted_migration_rep_with_mgmt_group( self, mock_sg_list): device_id = self.data.device_id host = {'host': self.data.fake_host} volume_name = self.data.test_volume.name ref_return = (True, 'Diamond', 'NONE') with mock.patch.object(self.rest, 'is_next_gen_array', return_value=True): return_val = self.common._is_valid_for_storage_assisted_migration( device_id, host, self.data.array, self.data.srp, volume_name, False, False, self.data.slo, self.data.workload, False) self.assertEqual(ref_return, return_val) def test_find_volume_group(self): group = self.data.test_group_1 array = self.data.array volume_group = self.common._find_volume_group(array, group) ref_group = self.data.sg_details_rep[0] self.assertEqual(ref_group, volume_group) def test_get_volume_device_ids(self): array = self.data.array volumes = [self.data.test_volume] ref_device_ids = [self.data.device_id] device_ids = self.common._get_volume_device_ids(volumes, array) self.assertEqual(ref_device_ids, device_ids) @mock.patch.object(common.PowerMaxCommon, '_find_device_on_array', return_value=tpd.PowerMaxData.device_id) def test_get_volume_device_ids_remote_volumes(self, mck_find): array = self.data.array volumes = [self.data.test_rep_volume] ref_device_ids = [self.data.device_id] replication_details = ast.literal_eval( self.data.test_rep_volume.replication_driver_data) remote_array = replication_details.get(utils.ARRAY) specs = {utils.ARRAY: remote_array} device_ids = self.common._get_volume_device_ids(volumes, array, True) self.assertEqual(ref_device_ids, device_ids) mck_find.assert_called_once_with( self.data.test_rep_volume, specs, True) def test_get_members_of_volume_group(self): array = self.data.array group_name = self.data.storagegroup_name_source ref_volumes = [self.data.device_id, self.data.device_id2] member_device_ids = self.common._get_members_of_volume_group( array, group_name) self.assertEqual(ref_volumes, member_device_ids) def test_get_members_of_volume_group_empty(self): array = self.data.array group_name = self.data.storagegroup_name_source with mock.patch.object( self.rest, 'get_volumes_in_storage_group', return_value=None): member_device_ids = self.common._get_members_of_volume_group( array, group_name ) self.assertIsNone(member_device_ids) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_create_group_replica(self, mock_check): source_group = self.data.test_group_1 snap_name = self.data.group_snapshot_name with mock.patch.object( self.common, '_create_group_replica') as mock_create_replica: self.common._create_group_replica( source_group, snap_name) mock_create_replica.assert_called_once_with( source_group, snap_name) def test_create_group_replica_exception(self): source_group = self.data.test_group_failed snap_name = self.data.group_snapshot_name with mock.patch.object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True): self.assertRaises(exception.VolumeBackendAPIException, self.common._create_group_replica, source_group, snap_name) def test_create_group_snapshot(self): context = None group_snapshot = self.data.test_group_snapshot_1 snapshots = [] ref_model_update = {'status': fields.GroupStatus.AVAILABLE} with mock.patch.object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True): model_update, snapshots_model_update = ( self.common.create_group_snapshot( context, group_snapshot, snapshots)) self.assertEqual(ref_model_update, model_update) def test_create_group_snapshot_exception(self): context = None group_snapshot = self.data.test_group_snapshot_failed snapshots = [] with mock.patch.object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True): self.assertRaises(exception.VolumeBackendAPIException, self.common.create_group_snapshot, context, group_snapshot, snapshots) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) def test_create_group(self, mock_type, mock_cg_type): ref_model_update = {'status': fields.GroupStatus.AVAILABLE} model_update = self.common.create_group(None, self.data.test_group_1) self.assertEqual(ref_model_update, model_update) @mock.patch.object(provision.PowerMaxProvision, 'create_volume_group', side_effect=exception.CinderException) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) def test_create_group_exception(self, mock_type, mock_create): context = None group = self.data.test_group_failed with mock.patch.object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True): self.assertRaises(exception.VolumeBackendAPIException, self.common.create_group, context, group) def test_delete_group_snapshot(self): group_snapshot = self.data.test_group_snapshot_1 snapshots = [] context = None ref_model_update = {'status': fields.GroupSnapshotStatus.DELETED} with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True): model_update, snapshots_model_update = ( self.common.delete_group_snapshot(context, group_snapshot, snapshots)) self.assertEqual(ref_model_update, model_update) def test_delete_group_snapshot_success(self): group_snapshot = self.data.test_group_snapshot_1 snapshots = [] ref_model_update = {'status': fields.GroupSnapshotStatus.DELETED} with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True): model_update, snapshots_model_update = ( self.common._delete_group_snapshot(group_snapshot, snapshots)) self.assertEqual(ref_model_update, model_update) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', return_value=None) def test_delete_group_snapshot_not_on_array(self, mock_gvg): group_snapshot = self.data.test_group_snapshot_1 snapshots = [] ref_model_update = ( {'status': fields.GroupSnapshotStatus.DELETED}) with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True): model_update, snapshots_model_update = ( self.common._delete_group_snapshot(group_snapshot, snapshots)) self.assertEqual(ref_model_update, model_update) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_update_group(self, mock_cg_type, mock_type_check): group = self.data.test_group_1 add_vols = [self.data.test_volume] remove_vols = [] ref_model_update = {'status': fields.GroupStatus.AVAILABLE} model_update, __, __ = self.common.update_group( group, add_vols, remove_vols) self.assertEqual(ref_model_update, model_update) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', return_value=None) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_update_group_not_found(self, mock_check, mock_grp): self.assertRaises(exception.GroupNotFound, self.common.update_group, self.data.test_group_1, [], []) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', side_effect=exception.VolumeBackendAPIException) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_update_group_exception(self, mock_check, mock_grp): self.assertRaises(exception.VolumeBackendAPIException, self.common.update_group, self.data.test_group_1, [], []) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_update_group_remove_volumes(self, mock_cg_type, mock_type_check): group = self.data.test_group_1 add_vols = [] remove_vols = [self.data.test_volume_group_member] ref_model_update = {'status': fields.GroupStatus.AVAILABLE} with mock.patch.object( rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=False) as mock_exists: model_update, __, __ = self.common.update_group( group, add_vols, remove_vols) mock_exists.assert_called_once() self.assertEqual(ref_model_update, model_update) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_update_group_failover_failure( self, mock_cg_type, mock_type_check): group = self.data.test_group_1 add_vols = [] remove_vols = [self.data.test_volume_group_member] self.mock_object(self.common, 'failedover', True) self.assertRaises( exception.VolumeBackendAPIException, self.common.update_group, group, add_vols, remove_vols) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @mock.patch.object(common.PowerMaxCommon, '_update_group_promotion') def test_update_group_during_promotion( self, mck_update, mock_cg_type, mock_type_check): group = self.data.test_group_1 add_vols = [] remove_vols = [self.data.test_volume_group_member] ref_model_update = {'status': fields.GroupStatus.AVAILABLE} self.mock_object(self.common, 'promotion', True) model_update, __, __ = self.common.update_group( group, add_vols, remove_vols) mck_update.assert_called_once_with(group, add_vols, remove_vols) self.assertEqual(ref_model_update, model_update) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', return_value=tpd.PowerMaxData.test_rep_group) @mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=True) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config) @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.ex_specs_rep_config) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=True) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @mock.patch.object( masking.PowerMaxMasking, 'remove_volumes_from_storage_group') def test_update_group_promotion( self, mck_rem, mock_cg_type, mock_type_check, mck_setup, mck_rep, mck_in_sg, mck_group): group = self.data.test_rep_group add_vols = [] remove_vols = [self.data.test_volume_group_member] remote_array = self.data.remote_array device_id = [self.data.device_id] group_name = self.data.storagegroup_name_source interval_retries_dict = {utils.INTERVAL: 1, utils.RETRIES: 1, utils.FORCE_VOL_EDIT: True} self.common._update_group_promotion(group, add_vols, remove_vols) mck_rem.assert_called_once_with( remote_array, device_id, group_name, interval_retries_dict) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_update_group_promotion_non_replicated( self, mock_cg_type, mock_type_check): group = self.data.test_group_failed add_vols = [] remove_vols = [self.data.test_volume_group_member] self.assertRaises(exception.VolumeBackendAPIException, self.common._update_group_promotion, group, add_vols, remove_vols) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=True) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_update_group_promotion_add_volumes( self, mock_cg_type, mock_type_check): group = self.data.test_rep_group add_vols = [self.data.test_volume_group_member] remove_vols = [] self.assertRaises(exception.VolumeBackendAPIException, self.common._update_group_promotion, group, add_vols, remove_vols) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=list()) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) def test_delete_group(self, mock_check, mck_snaps): group = self.data.test_group_1 volumes = [self.data.test_volume] context = None ref_model_update = {'status': fields.GroupStatus.DELETED} with mock.patch.object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True), mock.patch.object( self.rest, 'get_volumes_in_storage_group', return_value=[]): model_update, __ = self.common.delete_group( context, group, volumes) self.assertEqual(ref_model_update, model_update) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=list()) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) def test_delete_group_success(self, mock_check, mck_get_snaps): group = self.data.test_group_1 volumes = [] ref_model_update = {'status': fields.GroupStatus.DELETED} with mock.patch.object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True), mock.patch.object( self.rest, 'get_volumes_in_storage_group', return_value=[]): model_update, __ = self.common._delete_group(group, volumes) self.assertEqual(ref_model_update, model_update) @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members') @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(common.PowerMaxCommon, '_get_members_of_volume_group', return_value=[tpd.PowerMaxData.device_id]) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=[]) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) def test_delete_group_snapshot_and_volume_cleanup( self, mock_check, mck_get_snaps, mock_members, mock_cleanup, mock_remove, mock_del): group = self.data.test_group_1 volumes = [fake_volume.fake_volume_obj( context='cxt', provider_location=None)] with mock.patch.object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True), mock.patch.object( self.rest, 'get_volumes_in_storage_group', return_value=[]): self.common._delete_group(group, volumes) mock_cleanup.assert_called_once() mock_remove.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=list()) def test_delete_group_already_deleted(self, mck_get_snaps): group = self.data.test_group_failed ref_model_update = {'status': fields.GroupStatus.DELETED} volumes = [] with mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True): model_update, __ = self.common._delete_group(group, volumes) self.assertEqual(ref_model_update, model_update) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=list()) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_failed( self, mock_check, mock_type_check, mck_get_snaps): group = self.data.test_group_1 volumes = [] ref_model_update = {'status': fields.GroupStatus.ERROR_DELETING} with mock.patch.object( self.rest, 'delete_storage_group', side_effect=exception.VolumeBackendAPIException): model_update, __ = self.common._delete_group( group, volumes) self.assertEqual(ref_model_update, model_update) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=list()) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @mock.patch.object(rest.PowerMaxRest, 'get_volumes_in_storage_group', return_value=[ tpd.PowerMaxData.test_volume_group_member]) @mock.patch.object(common.PowerMaxCommon, '_get_members_of_volume_group', return_value=[tpd.PowerMaxData.device_id]) @mock.patch.object(common.PowerMaxCommon, '_find_device_on_array', return_value=tpd.PowerMaxData.device_id) @mock.patch.object(masking.PowerMaxMasking, 'remove_volumes_from_storage_group') def test_delete_group_cleanup_snapvx( self, mock_rem, mock_find, mock_mems, mock_vols, mock_chk1, mock_chk2, mck_get_snaps): group = self.data.test_group_1 volumes = [self.data.test_volume_group_member] with mock.patch.object( self.common, '_cleanup_device_snapvx') as mock_cleanup_snapvx: self.common._delete_group(group, volumes) mock_cleanup_snapvx.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=[{'snapshotName': 'name'}]) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_delete_group_with_volumes_exception_on_remaining_snapshots( self, mck_cleanup, mck_get): group = self.data.test_group_1 volumes = [self.data.test_volume_group_member] self.assertRaises(exception.VolumeBackendAPIException, self.common._delete_group, group, volumes) @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', return_value=('', {'source_vol_id': 'id', 'snap_name': 'name'})) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=None) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_delete_group_with_volumes_exception_on_target_links( self, mck_cleanup, mck_get, mck_find): group = self.data.test_group_1 volumes = [self.data.test_volume_group_member] self.assertRaises(exception.VolumeBackendAPIException, self.common._delete_group, group, volumes) @mock.patch.object(rest.PowerMaxRest, 'delete_storage_group') @mock.patch.object(common.PowerMaxCommon, '_failover_replication', return_value=(True, None)) @mock.patch.object(masking.PowerMaxMasking, 'add_volumes_to_storage_group') @mock.patch.object(common.PowerMaxCommon, '_get_volume_device_ids', return_value=[tpd.PowerMaxData.device_id]) @mock.patch.object(provision.PowerMaxProvision, 'create_volume_group') @mock.patch.object(common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.ex_specs_rep_config_sync) def test_update_volume_list_from_sync_vol_list( self, mck_setup, mck_grp, mck_ids, mck_add, mck_fover, mck_del): vol_list = [self.data.test_rep_volume] vol_ids = [self.data.device_id] remote_array = self.data.remote_array temp_group = 'OS-23_24_007-temp-rdf-sg' extra_specs = self.data.ex_specs_rep_config_sync self.common._update_volume_list_from_sync_vol_list(vol_list, None) mck_grp.assert_called_once_with(remote_array, temp_group, extra_specs) mck_ids.assert_called_once_with( vol_list, remote_array, remote_volumes=True) mck_add.assert_called_once_with( remote_array, vol_ids, temp_group, extra_specs) mck_fover.assert_called_once_with( vol_list, None, temp_group, secondary_backend_id=None, host=True) mck_del.assert_called_once_with(remote_array, temp_group) @mock.patch.object( common.PowerMaxCommon, '_remove_vol_and_cleanup_replication') @mock.patch.object( masking.PowerMaxMasking, 'remove_volumes_from_storage_group') def test_rollback_create_group_from_src( self, mock_rm, mock_clean): rollback_dict = { 'target_group_name': self.data.target_group_name, 'snap_name': 'snap1', 'source_group_name': 'src_grp', 'volumes': (self.data.device_id, self.data.extra_specs, self.data.test_volume), 'device_ids': [self.data.device_id], 'interval_retries_dict': self.data.extra_specs} for x in range(0, 2): self.common._rollback_create_group_from_src( self.data.array, rollback_dict) self.assertEqual(2, mock_rm.call_count) def test_get_snap_src_dev_list(self): src_dev_ids = self.common._get_snap_src_dev_list( self.data.array, [self.data.test_snapshot]) ref_dev_ids = [self.data.device_id] self.assertEqual(ref_dev_ids, src_dev_ids) def test_get_clone_vol_info(self): ref_dev_id = self.data.device_id source_vols = [self.data.test_volume, self.data.test_attached_volume] src_snapshots = [self.data.test_snapshot] src_dev_id1, extra_specs1, vol_size1, tgt_vol_name1 = ( self.common._get_clone_vol_info( self.data.test_clone_volume, source_vols, [])) src_dev_id2, extra_specs2, vol_size2, tgt_vol_name2 = ( self.common._get_clone_vol_info( self.data.test_clone_volume, [], src_snapshots)) self.assertEqual(ref_dev_id, src_dev_id1) self.assertEqual(ref_dev_id, src_dev_id2) def test_get_attributes_from_cinder_config_new_and_old(self): kwargs_expected = ( {'RestServerIp': '1.1.1.1', 'RestServerPort': 8443, 'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False, 'SerialNumber': self.data.array, 'srpName': 'SRP_1', 'PortGroup': [self.data.port_group_name_i], 'RestAPIConnectTimeout': 30, 'RestAPIReadTimeout': 30}) old_conf = tpfo.FakeConfiguration(None, 'CommonTests', 1, 1) configuration = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_i]) self.mock_object(self.common, 'configuration', configuration) kwargs_returned = self.common.get_attributes_from_cinder_config() self.assertEqual(kwargs_expected, kwargs_returned) self.mock_object(self.common, 'configuration', old_conf) kwargs = self.common.get_attributes_from_cinder_config() self.assertIsNone(kwargs) def test_get_attributes_from_cinder_config_with_port(self): kwargs_expected = ( {'RestServerIp': '1.1.1.1', 'RestServerPort': 3448, 'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False, 'SerialNumber': self.data.array, 'srpName': 'SRP_1', 'PortGroup': [self.data.port_group_name_i], 'RestAPIConnectTimeout': 30, 'RestAPIReadTimeout': 30}) configuration = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=3448, powermax_port_groups=[self.data.port_group_name_i]) self.mock_object(self.common, 'configuration', configuration) kwargs_returned = self.common.get_attributes_from_cinder_config() self.assertEqual(kwargs_expected, kwargs_returned) def test_get_attributes_from_cinder_config_no_port(self): kwargs_expected = ( {'RestServerIp': '1.1.1.1', 'RestServerPort': 8443, 'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False, 'SerialNumber': self.data.array, 'srpName': 'SRP_1', 'PortGroup': [self.data.port_group_name_i], 'RestAPIConnectTimeout': 30, 'RestAPIReadTimeout': 30}) configuration = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', powermax_port_groups=[self.data.port_group_name_i]) self.mock_object(self.common, 'configuration', configuration) kwargs_returned = self.common.get_attributes_from_cinder_config() self.assertEqual(kwargs_expected, kwargs_returned) def test_get_ssl_attributes_from_cinder_config(self): conf = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', powermax_port_groups=[self.data.port_group_name_i], driver_ssl_cert_verify=True, driver_ssl_cert_path='/path/to/cert') self.mock_object(self.common, 'configuration', conf) conf_returned = self.common.get_attributes_from_cinder_config() self.assertEqual('/path/to/cert', conf_returned['SSLVerify']) conf.driver_ssl_cert_verify = True conf.driver_ssl_cert_path = None conf_returned = self.common.get_attributes_from_cinder_config() self.assertTrue(conf_returned['SSLVerify']) conf.driver_ssl_cert_verify = False conf.driver_ssl_cert_path = None conf_returned = self.common.get_attributes_from_cinder_config() self.assertFalse(conf_returned['SSLVerify']) @mock.patch.object(rest.PowerMaxRest, 'get_size_of_device_on_array', return_value=2.0) def test_manage_snapshot_get_size_success(self, mock_get_size): size = self.common.manage_existing_snapshot_get_size( self.data.test_snapshot) self.assertEqual(2, size) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snaps', return_value=[{'snap_name': 'snap_name', 'snap_id': tpd.PowerMaxData.snap_id}]) @mock.patch.object( common.PowerMaxCommon, 'get_snapshot_metadata', return_value={'snap-meta-key-1': 'snap-meta-value-1', 'snap-meta-key-2': 'snap-meta-value-2'}) def test_manage_snapshot_success(self, mck_meta, mock_snap): snapshot = deepcopy(self.data.test_snapshot_manage) snapshot.metadata = {'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'} existing_ref = {u'source-name': u'test_snap'} updates_response = self.common.manage_existing_snapshot( snapshot, existing_ref) prov_loc = {'source_id': self.data.device_id, 'snap_name': 'OS-%s' % existing_ref['source-name']} updates = {'display_name': 'my_snap', 'provider_location': str(prov_loc), 'metadata': {'snap-meta-key-1': 'snap-meta-value-1', 'snap-meta-key-2': 'snap-meta-value-2', 'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'}} self.assertEqual(updates_response, updates) def test_manage_snapshot_fail_already_managed(self): snapshot = self.data.test_snapshot_manage existing_ref = {u'source-name': u'OS-test_snap'} self.assertRaises(exception.VolumeBackendAPIException, self.common.manage_existing_snapshot, snapshot, existing_ref) @mock.patch.object(utils.PowerMaxUtils, 'is_volume_failed_over', return_value=True) def test_manage_snapshot_fail_vol_failed_over(self, mock_failed): snapshot = self.data.test_snapshot_manage existing_ref = {u'source-name': u'test_snap'} self.assertRaises(exception.VolumeBackendAPIException, self.common.manage_existing_snapshot, snapshot, existing_ref) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap', return_value=False) def test_manage_snapshot_fail_vol_not_snap_src(self, mock_snap): snapshot = self.data.test_snapshot_manage existing_ref = {u'source-name': u'test_snap'} self.assertRaises(exception.VolumeBackendAPIException, self.common.manage_existing_snapshot, snapshot, existing_ref) @mock.patch.object(utils.PowerMaxUtils, 'modify_snapshot_prefix', side_effect=exception.VolumeBackendAPIException) def test_manage_snapshot_fail_add_prefix(self, mock_mod): snapshot = self.data.test_snapshot_manage existing_ref = {u'source-name': u'test_snap'} self.assertRaises(exception.VolumeBackendAPIException, self.common.manage_existing_snapshot, snapshot, existing_ref) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snaps', return_value=[{'snap_name': 'snap_name', 'snap_id': tpd.PowerMaxData.snap_id}]) def test_get_snap_id_with_uuid_success(self, mock_get_snaps): snap_uuid = '_snapshot-' + fake.SNAPSHOT_ID snap_id, snap_name = self.common._get_snap_id_with_uuid( self.data.array, self.data.device_id, snap_uuid) self.assertEqual(self.data.snap_id, snap_id) self.assertEqual('253b28496ec7aab', snap_name) snap_uuid = fake.SNAPSHOT_ID snap_id, snap_name = self.common._get_snap_id_with_uuid( self.data.array, self.data.device_id, snap_uuid) self.assertEqual(self.data.snap_id, snap_id) self.assertEqual('253b28496ec7aab', snap_name) @mock.patch.object( common.PowerMaxCommon, 'get_snapshot_metadata', return_value={'snap-meta-key-1': 'snap-meta-value-1', 'snap-meta-key-2': 'snap-meta-value-2'}) @mock.patch.object( rest.PowerMaxRest, 'get_volume_snaps', return_value=[{'snap_name': tpd.PowerMaxData.test_snapshot_snap_name, 'snap_id': tpd.PowerMaxData.snap_id}]) def test_manage_existing_snapshot_no_fall_through( self, mock_get_snaps, mock_meta): external_ref = {u'source-name': u'test_snap'} snapshot = deepcopy(self.data.test_snapshot) with mock.patch.object( self.common, '_get_snap_id_with_uuid', return_value=( self.data.snap_id, self.data.test_snapshot_snap_name)) as mock_uuid: self.common.manage_existing_snapshot(snapshot, external_ref) mock_uuid.assert_not_called() @mock.patch.object( common.PowerMaxCommon, 'get_snapshot_metadata', return_value={'snap-meta-key-1': 'snap-meta-value-1', 'snap-meta-key-2': 'snap-meta-value-2'}) def test_manage_existing_snapshot_fall_through(self, mock_meta): external_ref = {u'source-name': fake.SNAPSHOT_ID} snapshot = deepcopy(self.data.test_snapshot) with mock.patch.object( self.common, '_get_snap_id_with_uuid', return_value=( self.data.snap_id, self.data.test_snapshot_snap_name)) as mock_uuid: self.common.manage_existing_snapshot(snapshot, external_ref) mock_uuid.assert_called() @mock.patch.object(rest.PowerMaxRest, 'modify_volume_snap') def test_unmanage_snapshot_success(self, mock_mod, ): self.common.unmanage_snapshot(self.data.test_snapshot_manage) mock_mod.assert_called_once() @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(rest.PowerMaxRest, 'modify_volume_snap') def test_unmanage_snapshot_no_snapvx_cleanup(self, mock_mod, mock_cleanup): self.common.unmanage_snapshot(self.data.test_snapshot_manage) mock_mod.assert_called_once() mock_cleanup.assert_not_called() @mock.patch.object(utils.PowerMaxUtils, 'is_volume_failed_over', return_value=True) def test_unmanage_snapshot_fail_failover(self, mock_failed): self.assertRaises(exception.VolumeBackendAPIException, self.common.unmanage_snapshot, self.data.test_snapshot_manage) @mock.patch.object(rest.PowerMaxRest, 'modify_volume_snap', side_effect=exception.VolumeBackendAPIException) def test_unmanage_snapshot_fail_rename(self, mock_snap): self.assertRaises(exception.VolumeBackendAPIException, self.common.unmanage_snapshot, self.data.test_snapshot_manage) @mock.patch.object( common.PowerMaxCommon, '_parse_snap_info', return_value=( tpd.PowerMaxData.device_id, tpd.PowerMaxData.snap_location['snap_name'], [tpd.PowerMaxData.snap_id])) @mock.patch.object(provision.PowerMaxProvision, 'delete_volume_snap') @mock.patch.object(provision.PowerMaxProvision, 'is_restore_complete', return_value=True) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(provision.PowerMaxProvision, 'revert_volume_snapshot') def test_revert_to_snapshot(self, mock_revert, mock_clone, mock_complete, mock_delete, mock_parse): volume = self.data.test_volume snapshot = self.data.test_snapshot array = self.data.array device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] snap_id = self.data.snap_id extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs['storagetype:portgroupname'] = ( self.data.port_group_name_f) self.common.revert_to_snapshot(volume, snapshot) mock_revert.assert_called_once_with( array, device_id, snap_name, snap_id, extra_specs) mock_clone.assert_called_once_with(array, device_id, extra_specs) mock_complete.assert_called_once_with(array, device_id, snap_name, snap_id, extra_specs) mock_delete.assert_called_once_with(array, snap_name, device_id, self.data.snap_id, restored=True) @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', return_value=True) def test_revert_to_snapshot_replicated(self, mock_rep): volume = self.data.test_volume snapshot = self.data.test_snapshot self.assertRaises(exception.VolumeDriverException, self.common.revert_to_snapshot, volume, snapshot) def test_get_initiator_check_flag(self): self.mock_object(self.common.configuration, 'initiator_check', False) initiator_check = self.common._get_initiator_check_flag() self.assertFalse(initiator_check) def test_get_initiator_check_flag_true(self): self.mock_object(self.common.configuration, 'initiator_check', True) initiator_check = self.common._get_initiator_check_flag() self.assertTrue(initiator_check) def test_get_manageable_volumes_success(self): marker = limit = offset = sort_keys = sort_dirs = None with mock.patch.object( self.rest, 'get_private_volume_list', return_value=self.data.priv_vol_func_response_single): vols_lists = self.common.get_manageable_volumes( marker, limit, offset, sort_keys, sort_dirs) expected_response = [ {'reference': {'source-id': '00001'}, 'safe_to_manage': True, 'size': 1.0, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}}] self.assertEqual(vols_lists, expected_response) def test_get_manageable_volumes_filters_set(self): marker, limit, offset = '00002', 2, 1 sort_keys, sort_dirs = 'size', 'desc' with mock.patch.object( self.rest, 'get_private_volume_list', return_value=self.data.priv_vol_func_response_multi): vols_lists = self.common.get_manageable_volumes( marker, limit, offset, sort_keys, sort_dirs) expected_response = [ {'reference': {'source-id': '00003'}, 'safe_to_manage': True, 'size': 300, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}}, {'reference': {'source-id': '00004'}, 'safe_to_manage': True, 'size': 400, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': {'config': 'TDEV', 'emulation': 'FBA'}}] self.assertEqual(vols_lists, expected_response) def test_get_manageable_volumes_fail_no_vols(self): marker = limit = offset = sort_keys = sort_dirs = None with mock.patch.object( self.rest, 'get_private_volume_list', return_value=[]): expected_response = [] vol_list = self.common.get_manageable_volumes( marker, limit, offset, sort_keys, sort_dirs) self.assertEqual(vol_list, expected_response) def test_get_manageable_volumes_fail_no_valid_vols(self): marker = limit = offset = sort_keys = sort_dirs = None with mock.patch.object( self.rest, 'get_private_volume_list', return_value=self.data.priv_vol_func_response_multi_invalid): expected_response = [] vol_list = self.common.get_manageable_volumes( marker, limit, offset, sort_keys, sort_dirs) self.assertEqual(vol_list, expected_response) def test_get_manageable_snapshots_success(self): marker = limit = offset = sort_keys = sort_dirs = None with mock.patch.object( self.rest, 'get_private_volume_list', return_value=self.data.priv_vol_func_response_single): snap_list = self.common.get_manageable_snapshots( marker, limit, offset, sort_keys, sort_dirs) expected_response = [{ 'reference': {'source-name': 'testSnap1'}, 'safe_to_manage': True, 'size': 1, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': { 'generation': 0, 'secured': False, 'timeToLive': 'N/A', 'timestamp': mock.ANY, 'snap_id': self.data.snap_id}, 'source_reference': {'source-id': '00001'}}] self.assertEqual(expected_response, snap_list) def test_get_manageable_snapshots_filters_set(self): marker, limit, offset = 'testSnap2', 2, 1 sort_keys, sort_dirs = 'size', 'desc' with mock.patch.object( self.rest, 'get_private_volume_list', return_value=self.data.priv_vol_func_response_multi): vols_lists = self.common.get_manageable_snapshots( marker, limit, offset, sort_keys, sort_dirs) expected_response = [ {'reference': {'source-name': 'testSnap3'}, 'safe_to_manage': True, 'size': 300, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': { 'snap_id': self.data.snap_id, 'secured': False, 'timeToLive': 'N/A', 'timestamp': mock.ANY, 'generation': 0}, 'source_reference': {'source-id': '00003'}}, {'reference': {'source-name': 'testSnap4'}, 'safe_to_manage': True, 'size': 400, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': { 'snap_id': self.data.snap_id, 'secured': False, 'timeToLive': 'N/A', 'timestamp': mock.ANY, 'generation': 0}, 'source_reference': {'source-id': '00004'}}] self.assertEqual(vols_lists, expected_response) def test_get_manageable_snapshots_fail_no_snaps(self): marker = limit = offset = sort_keys = sort_dirs = None with mock.patch.object(self.rest, 'get_private_volume_list', return_value=[]): expected_response = [] vols_lists = self.common.get_manageable_snapshots( marker, limit, offset, sort_keys, sort_dirs) self.assertEqual(vols_lists, expected_response) def test_get_manageable_snapshots_fail_no_valid_snaps(self): marker = limit = offset = sort_keys = sort_dirs = None with mock.patch.object( self.rest, 'get_private_volume_list', return_value=self.data.priv_vol_func_response_multi_invalid): expected_response = [] vols_lists = self.common.get_manageable_snapshots( marker, limit, offset, sort_keys, sort_dirs) self.assertEqual(vols_lists, expected_response) def test_get_slo_workload_combo_from_cinder_conf(self): self.mock_object(self.common.configuration, 'powermax_service_level', 'Diamond') self.mock_object(self.common.configuration, 'vmax_workload', 'DSS') response1 = self.common.get_attributes_from_cinder_config() self.assertEqual('Diamond', response1['ServiceLevel']) self.assertEqual('DSS', response1['Workload']) # powermax_service_level is already set to Diamond self.mock_object(self.common.configuration, 'vmax_workload', None) response2 = self.common.get_attributes_from_cinder_config() self.assertEqual(self.common.configuration.powermax_service_level, response2['ServiceLevel']) self.assertIsNone(response2['Workload']) expected_response = { 'RestServerIp': '1.1.1.1', 'RestServerPort': 8443, 'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False, 'SerialNumber': '000197800123', 'srpName': 'SRP_1', 'PortGroup': ['OS-fibre-PG'], 'RestAPIConnectTimeout': 30, 'RestAPIReadTimeout': 30} self.mock_object(self.common.configuration, 'powermax_service_level', None) self.mock_object(self.common.configuration, 'vmax_workload', 'DSS') response3 = self.common.get_attributes_from_cinder_config() self.assertEqual(expected_response, response3) # powermax_service_level is already set to None self.mock_object(self.common.configuration, 'vmax_workload', None) response4 = self.common.get_attributes_from_cinder_config() self.assertEqual(expected_response, response4) def test_get_u4p_failover_info(self): configuration = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='test', san_password='test', san_api_port=8443, driver_ssl_cert_verify='/path/to/cert', u4p_failover_target=(self.data.u4p_failover_config[ 'u4p_failover_targets']), u4p_failover_backoff_factor='2', u4p_failover_retries='3', u4p_failover_timeout='10', u4p_primary='10.10.10.10', powermax_array=self.data.array, powermax_srp=self.data.srp) self.mock_object(self.common, 'configuration', configuration) self.common._get_u4p_failover_info() self.assertTrue(self.rest.u4p_failover_enabled) self.assertIsNotNone(self.rest.u4p_failover_targets) @mock.patch.object(rest.PowerMaxRest, 'set_u4p_failover_config') def test_get_u4p_failover_info_failover_config(self, mck_set_fo): configuration = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='test', san_password='test', san_api_port=8443, driver_ssl_cert_verify='/path/to/cert', u4p_failover_target=(self.data.u4p_failover_config[ 'u4p_failover_targets']), u4p_failover_backoff_factor='2', u4p_failover_retries='3', u4p_failover_timeout='10', u4p_primary='10.10.10.10', powermax_array=self.data.array, powermax_srp=self.data.srp) expected_u4p_failover_config = { 'u4p_failover_targets': [ {'RestServerIp': '10.10.10.11', 'RestServerPort': '8443', 'RestUserName': 'test', 'RestPassword': 'test', 'SSLVerify': 'True', 'SerialNumber': '000197800123'}, {'RestServerIp': '10.10.10.12', 'RestServerPort': '8443', 'RestUserName': 'test', 'RestPassword': 'test', 'SSLVerify': True, 'SerialNumber': '000197800123'}, {'RestServerIp': '10.10.10.11', 'RestServerPort': '8443', 'RestUserName': 'test', 'RestPassword': 'test', 'SSLVerify': 'False', 'SerialNumber': '000197800123'}], 'u4p_failover_backoff_factor': '2', 'u4p_failover_retries': '3', 'u4p_failover_timeout': '10', 'u4p_failover_autofailback': None, 'u4p_primary': { 'RestServerIp': '1.1.1.1', 'RestServerPort': 8443, 'RestUserName': 'test', 'RestPassword': 'test', 'SerialNumber': '000197800123', 'srpName': 'SRP_1', 'PortGroup': None, 'RestAPIConnectTimeout': 30, 'RestAPIReadTimeout': 30, 'SSLVerify': True}} self.mock_object(self.common, 'configuration', configuration) self.common._get_u4p_failover_info() self.assertIsNotNone(self.rest.u4p_failover_targets) mck_set_fo.assert_called_once_with(expected_u4p_failover_config) def test_update_vol_stats_retest_u4p(self): self.mock_object(self.rest, 'u4p_in_failover', True) self.mock_object(self.rest, 'u4p_failover_autofailback', True) with mock.patch.object( self.common, 'retest_primary_u4p') as mock_retest: self.common.update_volume_stats() mock_retest.assert_called_once() self.mock_object(self.rest, 'u4p_in_failover', False) self.mock_object(self.rest, 'u4p_failover_autofailback', False) with mock.patch.object( self.common, 'retest_primary_u4p') as mock_retest: self.common.update_volume_stats() mock_retest.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'request', return_value=[200, None]) @mock.patch.object( common.PowerMaxCommon, 'get_attributes_from_cinder_config', return_value=tpd.PowerMaxData.u4p_failover_target[0]) def test_retest_primary_u4p(self, mock_primary_u4p, mock_request): self.common.retest_primary_u4p() self.assertFalse(self.rest.u4p_in_failover) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(None, False, None)) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_extend_vol_validation_checks_success(self, mck_cleanup, mck_rep): volume = self.data.test_volume array = self.data.array device_id = self.data.device_id new_size = self.data.test_volume.size + 1 extra_specs = self.data.extra_specs self.common._extend_vol_validation_checks( array, device_id, volume.name, extra_specs, volume.size, new_size) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(None, False, None)) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_extend_vol_val_check_no_device(self, mck_cleanup, mck_rep): volume = self.data.test_volume array = self.data.array device_id = None new_size = self.data.test_volume.size + 1 extra_specs = self.data.extra_specs self.assertRaises( exception.VolumeBackendAPIException, self.common._extend_vol_validation_checks, array, device_id, volume.name, extra_specs, volume.size, new_size) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(None, True, None)) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_extend_vol_val_check_snap_src(self, mck_cleanup, mck_rep): volume = self.data.test_volume array = self.data.array device_id = self.data.device_id new_size = self.data.test_volume.size + 1 extra_specs = deepcopy(self.data.extra_specs) self.mock_object(self.common, 'next_gen', False) self.assertRaises( exception.VolumeBackendAPIException, self.common._extend_vol_validation_checks, array, device_id, volume.name, extra_specs, volume.size, new_size) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(None, False, None)) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_extend_vol_val_check_wrong_size(self, mck_cleanup, mck_rep): volume = self.data.test_volume array = self.data.array device_id = self.data.device_id new_size = volume.size - 1 extra_specs = self.data.extra_specs self.assertRaises( exception.VolumeBackendAPIException, self.common._extend_vol_validation_checks, array, device_id, volume.name, extra_specs, volume.size, new_size) def test_array_ode_capabilities_check_non_next_gen_local(self): """Rep enabled, neither array next gen, returns F,F,F,F""" array = self.data.powermax_model_details['symmetrixId'] self.mock_object(self.common, 'next_gen', False) (r1_ode, r1_ode_metro, r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check( array, self.data.rep_config_metro, True) self.assertFalse(r1_ode) self.assertFalse(r1_ode_metro) self.assertFalse(r2_ode) self.assertFalse(r2_ode_metro) @mock.patch.object(rest.PowerMaxRest, 'get_array_detail', return_value={'ucode': '5977.1.1'}) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=(10, tpd.PowerMaxData.remote_array)) def test_array_ode_capabilities_check_next_gen_non_rep_pre_elm( self, mock_rdf, mock_det): """Rep disabled, local array next gen, pre elm, returns T,F,F,F""" array = self.data.powermax_model_details['symmetrixId'] self.mock_object(self.common, 'ucode_level', '5978.1.1') self.mock_object(self.common, 'next_gen', True) (r1_ode, r1_ode_metro, r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check( array, self.data.rep_config_metro, False) self.assertTrue(r1_ode) self.assertFalse(r1_ode_metro) self.assertFalse(r2_ode) self.assertFalse(r2_ode_metro) @mock.patch.object(rest.PowerMaxRest, 'get_array_detail', return_value={'ucode': '5977.1.1'}) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=(10, tpd.PowerMaxData.remote_array)) def test_array_ode_capabilities_check_next_gen_remote_rep( self, mock_rdf, mock_det): """Rep enabled, remote not next gen, returns T,T,F,F""" array = self.data.powermax_model_details['symmetrixId'] self.mock_object(self.common, 'ucode_level', self.data.powermax_model_details['ucode']) self.mock_object(self.common, 'next_gen', True) (r1_ode, r1_ode_metro, r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check( array, self.data.rep_config_metro, True) self.assertTrue(r1_ode) self.assertTrue(r1_ode_metro) self.assertFalse(r2_ode) self.assertFalse(r2_ode_metro) @mock.patch.object(rest.PowerMaxRest, 'get_array_detail', return_value={'ucode': '5978.1.1'}) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=(10, tpd.PowerMaxData.remote_array)) def test_array_ode_capabilities_check_next_gen_pre_elm_rep( self, mock_rdf, mock_det): """Rep enabled, both array next gen, tgt<5978.221, returns T,T,T,F""" array = self.data.powermax_model_details['symmetrixId'] self.mock_object(self.common, 'ucode_level', self.data.powermax_model_details['ucode']) self.mock_object(self.common, 'next_gen', True) (r1_ode, r1_ode_metro, r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check( array, self.data.rep_config_metro, True) self.assertTrue(r1_ode) self.assertTrue(r1_ode_metro) self.assertTrue(r2_ode) self.assertFalse(r2_ode_metro) @mock.patch.object(rest.PowerMaxRest, 'get_array_detail', return_value=tpd.PowerMaxData.ucode_5978_foxtail) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=(10, tpd.PowerMaxData.remote_array)) def test_array_ode_capabilities_check_next_gen_post_elm_rep( self, mock_rdf, mock_det): """Rep enabled, both array next gen, tgt>5978.221 returns T,T,T,T""" array = self.data.powermax_model_details['symmetrixId'] self.mock_object(self.common, 'ucode_level', self.data.powermax_model_details['ucode']) self.mock_object(self.common, 'next_gen', True) (r1_ode, r1_ode_metro, r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check( array, self.data.rep_config_metro, True) self.assertTrue(r1_ode) self.assertTrue(r1_ode_metro) self.assertTrue(r2_ode) self.assertTrue(r2_ode_metro) @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object(common.PowerMaxCommon, '_protect_storage_group') @mock.patch.object( common.PowerMaxCommon, 'configure_volume_replication', return_value=('first_vol_in_rdf_group', None, None, tpd.PowerMaxData.rep_extra_specs_mgmt, True)) @mock.patch.object(provision.PowerMaxProvision, 'extend_volume') @mock.patch.object(common.PowerMaxCommon, 'break_rdf_device_pair_session') def test_extend_legacy_replicated_vol( self, mck_break, mck_extend, mck_configure, mck_protect, mck_res): volume = self.data.test_volume_group_member array = self.data.array device_id = self.data.device_id new_size = volume.size + 1 extra_specs = self.data.extra_specs rdf_group_no = self.data.rdf_group_no_1 self.common._extend_legacy_replicated_vol( array, volume, device_id, volume.name, new_size, extra_specs, rdf_group_no) mck_protect.assert_called_once() mck_res.assert_called_once() @mock.patch.object( common.PowerMaxCommon, 'break_rdf_device_pair_session', side_effect=exception.VolumeBackendAPIException) def test_extend_legacy_replicated_vol_fail(self, mck_resume): volume = self.data.test_volume_group_member array = self.data.array device_id = self.data.device_id new_size = volume.size + 1 extra_specs = self.data.extra_specs rdf_group_no = self.data.rdf_group_no_1 self.assertRaises( exception.VolumeBackendAPIException, self.common._extend_legacy_replicated_vol, array, device_id, volume.name, extra_specs, volume.size, new_size, rdf_group_no) def test_get_unisphere_port(self): # Test user set port ID configuration = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=1234, powermax_port_groups=[self.data.port_group_name_i]) self.mock_object(self.common, 'configuration', configuration) port = self.common._get_unisphere_port() self.assertEqual(1234, port) # Test no set port ID, use default port configuration = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', powermax_port_groups=[self.data.port_group_name_i]) self.mock_object(self.common, 'configuration', configuration) ref_port = utils.DEFAULT_PORT port = self.common._get_unisphere_port() self.assertEqual(ref_port, port) @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', return_value=(None, tpd.PowerMaxData.snap_tgt_session)) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(True, False, False)) def test_get_target_source_device(self, mck_rep, mck_find): array = self.data.array tgt_device = self.data.device_id2 src_device = self.common._get_target_source_device(array, tgt_device) self.assertEqual(src_device, self.data.device_id) @mock.patch.object(rest.PowerMaxRest, '_get_private_volume', return_value=tpd.PowerMaxData.priv_vol_response_rep) @mock.patch.object(rest.PowerMaxRest, 'get_array_model_info', return_value=(tpd.PowerMaxData.array_model, None)) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_group', return_value=(tpd.PowerMaxData.rdf_group_details)) def test_get_volume_metadata_rep(self, mck_rdf, mck_model, mck_priv): ref_metadata = { 'DeviceID': self.data.device_id, 'DeviceLabel': self.data.device_label, 'ArrayID': self.data.array, 'ArrayModel': self.data.array_model, 'ServiceLevel': 'None', 'Workload': 'None', 'Emulation': 'FBA', 'Configuration': 'TDEV', 'CompressionDisabled': 'True', 'ReplicationEnabled': 'True', 'R2-DeviceID': self.data.device_id2, 'R2-ArrayID': self.data.remote_array, 'R2-ArrayModel': self.data.array_model, 'ReplicationMode': 'Synchronized', 'RDFG-Label': self.data.rdf_group_name_1, 'R1-RDFG': 1, 'R2-RDFG': 1} array = self.data.array device_id = self.data.device_id act_metadata = self.common.get_volume_metadata(array, device_id) self.assertEqual(ref_metadata, act_metadata) @mock.patch.object(rest.PowerMaxRest, '_get_private_volume', return_value=tpd.PowerMaxData. priv_vol_response_metro_active_rep) @mock.patch.object(rest.PowerMaxRest, 'get_array_model_info', return_value=(tpd.PowerMaxData.array_model, None)) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_group', return_value=(tpd.PowerMaxData.rdf_group_details)) def test_get_volume_metadata_metro_active_rep(self, mck_rdf, mck_model, mck_priv): ref_metadata = { 'DeviceID': self.data.device_id, 'DeviceLabel': self.data.device_label, 'ArrayID': self.data.array, 'ArrayModel': self.data.array_model, 'ServiceLevel': 'None', 'Workload': 'None', 'Emulation': 'FBA', 'Configuration': 'TDEV', 'CompressionDisabled': 'True', 'ReplicationEnabled': 'True', 'R2-DeviceID': self.data.device_id2, 'R2-ArrayID': self.data.remote_array, 'R2-ArrayModel': self.data.array_model, 'ReplicationMode': 'Metro', 'RDFG-Label': self.data.rdf_group_name_1, 'R1-RDFG': 1, 'R2-RDFG': 1} array = self.data.array device_id = self.data.device_id act_metadata = self.common.get_volume_metadata(array, device_id) self.assertEqual(ref_metadata, act_metadata) @mock.patch.object(rest.PowerMaxRest, '_get_private_volume', return_value=tpd.PowerMaxData.priv_vol_response_no_rep) @mock.patch.object(rest.PowerMaxRest, 'get_array_model_info', return_value=(tpd.PowerMaxData.array_model, None)) def test_get_volume_metadata_no_rep(self, mck_model, mck_priv): ref_metadata = { 'DeviceID': self.data.device_id, 'DeviceLabel': self.data.device_label, 'ArrayID': self.data.array, 'ArrayModel': self.data.array_model, 'ServiceLevel': 'None', 'Workload': 'None', 'Emulation': 'FBA', 'Configuration': 'TDEV', 'CompressionDisabled': 'True', 'ReplicationEnabled': 'False'} array = self.data.array device_id = self.data.device_id act_metadata = self.common.get_volume_metadata(array, device_id) self.assertEqual(ref_metadata, act_metadata) def test_get_volume_metadata_device_none(self): ref_metadata = {} array = self.data.array device_id = None act_metadata = self.common.get_volume_metadata(array, device_id) self.assertEqual(ref_metadata, act_metadata) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap_info', return_value=tpd.PowerMaxData.priv_snap_response) def test_get_snapshot_metadata(self, mck_snap): array = self.data.array device_id = self.data.device_id device_label = self.data.managed_snap_id snap_name = self.data.test_snapshot_snap_name ref_metadata = {'SnapshotLabel': snap_name, 'SourceDeviceID': device_id, 'SourceDeviceLabel': device_label, 'SnapIdList': str(self.data.snap_id), 'is_snap_id': True} act_metadata = self.common.get_snapshot_metadata( array, device_id, snap_name) self.assertEqual(ref_metadata, act_metadata) @mock.patch.object( rest.PowerMaxRest, 'get_volume_snap_info', return_value=(tpd.PowerMaxData.priv_snap_response_no_label)) def test_get_snapshot_metadata_no_label(self, mck_snap): array = self.data.array device_id = self.data.device_id snap_name = self.data.test_snapshot_snap_name ref_metadata = {'SnapshotLabel': snap_name, 'SourceDeviceID': device_id, 'SnapIdList': str(self.data.snap_id), 'is_snap_id': True} act_metadata = self.common.get_snapshot_metadata( array, device_id, snap_name) self.assertEqual(ref_metadata, act_metadata) def test_update_metadata(self): model_update = {'provider_location': str( self.data.provider_location)} ref_model_update = ( {'provider_location': str(self.data.provider_location), 'metadata': {'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2', 'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'}}) existing_metadata = {'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'} object_metadata = {'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2'} model_update = self.common.update_metadata( model_update, existing_metadata, object_metadata) self.assertEqual(ref_model_update, model_update) def test_update_metadata_no_model(self): model_update = None ref_model_update = ( {'metadata': {'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2', 'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'}}) existing_metadata = {'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'} object_metadata = {'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2'} model_update = self.common.update_metadata( model_update, existing_metadata, object_metadata) self.assertEqual(ref_model_update, model_update) def test_update_metadata_no_existing_metadata(self): model_update = {'provider_location': str( self.data.provider_location)} ref_model_update = ( {'provider_location': str(self.data.provider_location), 'metadata': {'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2'}}) existing_metadata = None object_metadata = {'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2'} model_update = self.common.update_metadata( model_update, existing_metadata, object_metadata) self.assertEqual(ref_model_update, model_update) def test_update_metadata_no_object_metadata(self): model_update = {'provider_location': str( self.data.provider_location)} ref_model_update = ( {'provider_location': str(self.data.provider_location), 'metadata': {'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'}}) existing_metadata = {'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'} object_metadata = {} model_update = self.common.update_metadata( model_update, existing_metadata, object_metadata) self.assertEqual(ref_model_update, model_update) def test_update_metadata_model_list_exception(self): model_update = [{'provider_location': str( self.data.provider_location)}] existing_metadata = None object_metadata = {'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2'} self.assertRaises( exception.VolumeBackendAPIException, self.common.update_metadata, model_update, existing_metadata, object_metadata) def test_remove_stale_data(self): ret_model_update = self.common.remove_stale_data( self.data.replication_model) self.assertEqual(self.data.non_replication_model, ret_model_update) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', return_value=tpd.PowerMaxData.add_volume_sg_info_dict) def test_get_tags_of_storage_group_none(self, mock_sg): self.assertIsNone(self.common.get_tags_of_storage_group( self.data.array, self.data.defaultstoragegroup_name)) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', return_value=tpd.PowerMaxData.storage_group_with_tags) def test_get_tags_of_storage_group_exists(self, mock_sg): tag_list = self.common.get_tags_of_storage_group( self.data.array, self.data.defaultstoragegroup_name) self.assertEqual(tpd.PowerMaxData.sg_tags, tag_list) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', side_effect=exception.APIException) def test_get_tags_of_storage_group_exception(self, mock_sg): self.assertIsNone(self.common.get_tags_of_storage_group( self.data.array, self.data.storagegroup_name_f)) @mock.patch.object(rest.PowerMaxRest, 'add_storage_array_tags') @mock.patch.object(rest.PowerMaxRest, 'get_array_tags', return_value=[]) def test_check_and_add_tags_to_storage_array( self, mock_get_tags, mock_add_tags): array_tag_list = ['OpenStack'] self.common._check_and_add_tags_to_storage_array( self.data.array, array_tag_list, self.data.extra_specs) mock_add_tags.assert_called_with( self.data.array, array_tag_list, self.data.extra_specs) @mock.patch.object(rest.PowerMaxRest, 'add_storage_array_tags') @mock.patch.object(rest.PowerMaxRest, 'get_array_tags', return_value=[]) def test_check_and_add_tags_to_storage_array_add_2_tags( self, mock_get_tags, mock_add_tags): array_tag_list = ['OpenStack', 'Production'] self.common._check_and_add_tags_to_storage_array( self.data.array, array_tag_list, self.data.extra_specs) mock_add_tags.assert_called_with( self.data.array, array_tag_list, self.data.extra_specs) @mock.patch.object(rest.PowerMaxRest, 'add_storage_array_tags') @mock.patch.object(rest.PowerMaxRest, 'get_array_tags', return_value=['Production']) def test_check_and_add_tags_to_storage_array_add_1_tags( self, mock_get_tags, mock_add_tags): array_tag_list = ['OpenStack', 'Production'] add_tag_list = ['OpenStack'] self.common._check_and_add_tags_to_storage_array( self.data.array, array_tag_list, self.data.extra_specs) mock_add_tags.assert_called_with( self.data.array, add_tag_list, self.data.extra_specs) @mock.patch.object(rest.PowerMaxRest, 'add_storage_array_tags') @mock.patch.object(rest.PowerMaxRest, 'get_array_tags', return_value=['openstack']) def test_check_and_add_tags_to_storage_array_already_tagged( self, mock_get_tags, mock_add_tags): array_tag_list = ['OpenStack'] self.common._check_and_add_tags_to_storage_array( self.data.array, array_tag_list, self.data.extra_specs) mock_add_tags.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'get_array_tags', return_value=[]) def test_check_and_add_tags_to_storage_array_invalid_tag( self, mock_get_tags): array_tag_list = ['Open$tack'] self.assertRaises( exception.VolumeBackendAPIException, self.common._check_and_add_tags_to_storage_array, self.data.array, array_tag_list, self.data.extra_specs) def test_validate_storage_group_tag_list_good_tag_list(self): self.common._validate_storage_group_tag_list( self.data.vol_type_extra_specs_tags) @mock.patch.object(utils.PowerMaxUtils, 'verify_tag_list') def test_validate_storage_group_tag_list_no_tag_list( self, mock_verify): self.common._validate_storage_group_tag_list( self.data.extra_specs) mock_verify.assert_not_called() def test_set_config_file_and_get_extra_specs(self): self.mock_object(self.common, 'rep_configs', [{'mode': utils.REP_METRO, utils.METROBIAS: True}]) original_specs = deepcopy(self.data.rep_extra_specs_metro) try: with mock.patch.object( self.utils, 'get_volumetype_extra_specs', return_value=self.data.rep_extra_specs_metro): specs, __ = self.common._set_config_file_and_get_extra_specs( self.data.test_volume, None) self.assertEqual(self.data.rep_extra_specs_metro, specs) finally: self.data.rep_extra_specs_metro = original_specs @mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name') def test_retype_volume_promotion_get_extra_specs_mgmt_group(self, mck_get): array = self.data.array srp = self.data.srp device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.volume_id extra_specs = deepcopy(self.data.rep_extra_specs) target_slo = self.data.slo_silver target_workload = self.data.workload target_extra_specs = deepcopy(self.data.extra_specs) target_extra_specs[utils.DISABLECOMPRESSION] = False extra_specs[utils.REP_CONFIG] = self.data.rep_config_async self.mock_object(self.common, 'promotion', True) self.common._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs) mck_get.assert_called_once_with(extra_specs[utils.REP_CONFIG]) @mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=True) @mock.patch.object(masking.PowerMaxMasking, 'return_volume_to_volume_group') @mock.patch.object(masking.PowerMaxMasking, 'move_volume_between_storage_groups') @mock.patch.object( masking.PowerMaxMasking, 'get_or_create_default_storage_group', return_value=tpd.PowerMaxData.rdf_managed_async_grp) @mock.patch.object(rest.PowerMaxRest, 'get_volume', return_value=tpd.PowerMaxData.volume_details[0]) @mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) def test_retype_volume_detached( self, mck_get_rdf, mck_get_vol, mck_get_sg, mck_move_vol, mck_return_vol, mck_is_vol): array = self.data.array srp = self.data.srp device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.volume_id extra_specs = deepcopy(self.data.rep_extra_specs) target_slo = self.data.slo_silver target_workload = self.data.workload target_extra_specs = deepcopy(self.data.rep_extra_specs) target_extra_specs[utils.DISABLECOMPRESSION] = False group_name = self.data.rdf_managed_async_grp extra_specs[utils.REP_CONFIG] = self.data.rep_config_async target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async success, target_sg_name = self.common._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs, remote=True) mck_get_rdf.assert_called_once_with(self.data.rep_config_async) mck_get_vol.assert_called_once_with(array, device_id) mck_get_sg.assert_called_once_with( array, srp, target_slo, target_workload, extra_specs, False, True, target_extra_specs['rep_mode']) mck_move_vol.assert_called_once_with( array, device_id, self.data.volume_details[0]['storageGroupId'][0], group_name, extra_specs, force=True, parent_sg=None) mck_return_vol.assert_called_once_with( array, volume, device_id, volume_name, extra_specs) mck_is_vol.assert_called_once_with(array, device_id, group_name) self.assertTrue(success) self.assertEqual(group_name, target_sg_name) @mock.patch.object( utils.PowerMaxUtils, 'get_port_name_label', return_value='my_pg') @mock.patch.object( utils.PowerMaxUtils, 'get_volume_attached_hostname', return_value='HostX') @mock.patch.object( rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=True) @mock.patch.object( masking.PowerMaxMasking, 'return_volume_to_volume_group') @mock.patch.object( masking.PowerMaxMasking, 'move_volume_between_storage_groups') @mock.patch.object( masking.PowerMaxMasking, 'add_child_sg_to_parent_sg') @mock.patch.object( provision.PowerMaxProvision, 'create_storage_group') @mock.patch.object( rest.PowerMaxRest, 'get_storage_group', side_effect=[None, tpd.PowerMaxData.volume_info_dict]) @mock.patch.object( rest.PowerMaxRest, 'get_volume', return_value=tpd.PowerMaxData.volume_details[0]) @mock.patch.object( utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) def test_retype_volume_attached( self, mck_get_rdf, mck_get_vol, mck_get_sg, mck_create, mck_add, mck_move_vol, mck_return_vol, mck_is_vol, mck_host, mck_pg): array = self.data.array srp = self.data.srp device_id = self.data.device_id volume = self.data.test_attached_volume volume_name = self.data.volume_id extra_specs = self.data.rep_extra_specs_rep_config target_slo = self.data.slo_silver target_workload = self.data.workload target_extra_specs = deepcopy(self.data.rep_extra_specs) target_extra_specs[utils.DISABLECOMPRESSION] = False target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync success, target_sg_name = self.common._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs) mck_get_rdf.assert_called_once() mck_get_vol.assert_called_once() mck_create.assert_called_once() mck_add.assert_called_once() mck_move_vol.assert_called_once() mck_return_vol.assert_called_once() mck_is_vol.assert_called_once() self.assertEqual(2, mck_get_sg.call_count) self.assertTrue(success) @mock.patch.object( utils.PowerMaxUtils, 'get_port_name_label', return_value='my_pg') @mock.patch.object( utils.PowerMaxUtils, 'get_volume_attached_hostname', return_value='HostX') @mock.patch.object( rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=True) @mock.patch.object( masking.PowerMaxMasking, 'return_volume_to_volume_group') @mock.patch.object( masking.PowerMaxMasking, 'move_volume_between_storage_groups') @mock.patch.object( masking.PowerMaxMasking, 'add_child_sg_to_parent_sg') @mock.patch.object( provision.PowerMaxProvision, 'create_storage_group') @mock.patch.object( rest.PowerMaxRest, 'get_storage_group', side_effect=[None, tpd.PowerMaxData.volume_info_dict]) @mock.patch.object( rest.PowerMaxRest, 'get_volume', return_value=tpd.PowerMaxData.volume_details[0]) @mock.patch.object( utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) def test_retype_volume_attached_metro( self, mck_get_rdf, mck_get_vol, mck_get_sg, mck_create, mck_add, mck_move_vol, mck_return_vol, mck_is_vol, mck_host, mck_pg): array = self.data.array srp = self.data.srp device_id = self.data.device_id volume = self.data.test_attached_volume volume_name = self.data.volume_id extra_specs = self.data.rep_extra_specs_rep_config_metro target_slo = self.data.slo_silver target_workload = self.data.workload target_extra_specs = deepcopy(self.data.rep_extra_specs) target_extra_specs[utils.DISABLECOMPRESSION] = False target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync success, target_sg_name = self.common._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs, remote=True, metro_attach=True) mck_get_rdf.assert_called_once() mck_get_vol.assert_called_once() mck_create.assert_called_once() mck_add.assert_called_once() mck_move_vol.assert_called_once() mck_return_vol.assert_called_once() mck_is_vol.assert_called_once() self.assertEqual(2, mck_get_sg.call_count) self.assertTrue(success) @mock.patch.object( utils.PowerMaxUtils, 'get_volume_attached_hostname', return_value=None) @mock.patch.object( rest.PowerMaxRest, 'get_volume', return_value=tpd.PowerMaxData.volume_details[0]) @mock.patch.object( utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) def test_retype_volume_attached_no_host_fail( self, mck_get_rdf, mck_get_vol, mck_get_host): array = self.data.array srp = self.data.srp device_id = self.data.device_id volume = self.data.test_attached_volume volume_name = self.data.volume_id extra_specs = deepcopy(self.data.rep_extra_specs_rep_config) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f target_slo = self.data.slo_silver target_workload = self.data.workload target_extra_specs = deepcopy(self.data.rep_extra_specs) target_extra_specs[utils.DISABLECOMPRESSION] = False target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async success, target_sg_name = self.common._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs) mck_get_rdf.assert_called_once() mck_get_vol.assert_called_once() self.assertFalse(success) self.assertIsNone(target_sg_name) @mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=False) @mock.patch.object(masking.PowerMaxMasking, 'return_volume_to_volume_group') @mock.patch.object(masking.PowerMaxMasking, 'move_volume_between_storage_groups') @mock.patch.object( masking.PowerMaxMasking, 'get_or_create_default_storage_group', return_value=tpd.PowerMaxData.rdf_managed_async_grp) @mock.patch.object(rest.PowerMaxRest, 'get_volume', return_value=tpd.PowerMaxData.volume_details[0]) @mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) def test_retype_volume_detached_vol_not_in_sg_fail( self, mck_get_rdf, mck_get_vol, mck_get_sg, mck_move_vol, mck_return_vol, mck_is_vol): array = self.data.array srp = self.data.srp device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.volume_id extra_specs = deepcopy(self.data.rep_extra_specs) target_slo = self.data.slo_silver target_workload = self.data.workload target_extra_specs = deepcopy(self.data.rep_extra_specs) target_extra_specs[utils.DISABLECOMPRESSION] = False extra_specs[utils.REP_CONFIG] = self.data.rep_config_async target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async success, target_sg_name = self.common._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs, remote=True) self.assertFalse(success) self.assertIsNone(target_sg_name) @mock.patch.object( rest.PowerMaxRest, 'rename_volume') @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) def test_get_and_set_remote_device_uuid(self, mck_get_pair, mck_rename): extra_specs = self.data.rep_extra_specs rep_extra_specs = self.data.rep_extra_specs_mgmt volume_dict = {'device_id': self.data.device_id, 'device_uuid': self.data.volume_id} remote_vol = self.common.get_and_set_remote_device_uuid( extra_specs, rep_extra_specs, volume_dict) self.assertEqual(remote_vol, self.data.device_id2) @mock.patch.object(utils.PowerMaxUtils, 'get_volume_group_utils', return_value=(None, {'interval': 1, 'retries': 1})) def test_get_volume_group_info(self, mock_group_utils): self.mock_object(self.common, 'interval', 1) self.mock_object(self.common, 'retries', 1) with mock.patch.object( tpfo.FakeConfiguration, 'safe_get') as mock_array: self.common._get_volume_group_info( self.data.test_group_1) mock_group_utils.assert_called_once_with( self.data.test_group_1, self.common.interval, self.common.retries) mock_array.assert_called_once() def test_get_performance_config(self): ref_cinder_conf = tpfo.FakeConfiguration( None, 'ProvisionTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_f], load_balance=True, load_balance_real_time=True, load_data_format='avg', load_look_back=60, load_look_back_real_time=10, port_group_load_metric='PercentBusy', port_load_metric='PercentBusy') ref_perf_conf = self.data.performance_config self.mock_object(volume_utils, 'get_max_over_subscription_ratio') self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = fc.PowerMaxFCDriver(configuration=ref_cinder_conf) self.assertEqual(ref_perf_conf, driver.common.performance.config) def test_select_port_group_for_extra_specs_volume_type(self): """Test _select_port_group_for_extra_specs PG in volume-type.""" extra_specs = {utils.PORTGROUPNAME: self.data.port_group_name_i} pool_record = {} port_group = self.common._select_port_group_for_extra_specs( extra_specs, pool_record) self.assertEqual(self.data.port_group_name_i, port_group) def test_select_port_group_for_extra_specs_cinder_conf_single(self): """Test _select_port_group_for_extra_specs single PG in cinder conf.""" extra_specs = {} pool_record = {utils.PORT_GROUP: [self.data.port_group_name_i]} port_group = self.common._select_port_group_for_extra_specs( extra_specs, pool_record) self.assertEqual(self.data.port_group_name_i, port_group) def test_select_port_group_for_extra_specs_cinder_conf_multi(self): """Test _select_port_group_for_extra_specs multi PG in cinder conf. Random selection is used, no performance configuration supplied. """ extra_specs = {} pool_record = {utils.PORT_GROUP: self.data.perf_port_groups} port_group = self.common._select_port_group_for_extra_specs( extra_specs, pool_record) self.assertIn(port_group, self.data.perf_port_groups) def test_select_port_group_for_extra_specs_load_balanced(self): """Test _select_port_group_for_extra_specs multi PG in cinder conf. Load balanced selection is used, performance configuration supplied. """ extra_specs = {utils.ARRAY: self.data.array} pool_record = {utils.PORT_GROUP: self.data.perf_port_groups} self.mock_object(self.common.performance, 'config', self.data.performance_config) with mock.patch.object( self.common.performance, 'process_port_group_load', side_effect=( self.common.performance.process_port_group_load)) as ( mck_process): port_group = self.common._select_port_group_for_extra_specs( extra_specs, pool_record, init_conn=True) mck_process.assert_called_once_with( self.data.array, self.data.perf_port_groups) self.assertIn(port_group, self.data.perf_port_groups) def test_select_port_group_for_extra_specs_exception(self): """Test _select_port_group_for_extra_specs exception.""" self.assertRaises( exception.VolumeBackendAPIException, self.common._select_port_group_for_extra_specs, {}, {}) @mock.patch.object( common.PowerMaxCommon, '_add_new_volume_to_volume_group', return_value='my_group') @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_add_to_group(self, mock_cond, mock_group): source_volume = self.data.test_volume extra_specs = self.data.extra_specs rep_driver_data = dict() group_name = self.common._add_to_group( source_volume, self.data, source_volume.name, self.data.test_group_1.fields.get('id'), self.data.test_group_1, extra_specs, rep_driver_data) self.assertEqual('my_group', group_name) mock_group.assert_called_once() @mock.patch.object( common.PowerMaxCommon, '_add_new_volume_to_volume_group', return_value='my_group') @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_add_to_group_no_group_obj(self, mock_cond, mock_group): source_volume = self.data.test_volume extra_specs = self.data.extra_specs rep_driver_data = dict() group_name = self.common._add_to_group( source_volume, self.data, source_volume.name, self.data.test_group_1.fields.get('id'), None, extra_specs, rep_driver_data) self.assertIsNone(group_name) mock_group.assert_not_called() @mock.patch.object( common.PowerMaxCommon, '_unlink_and_delete_temporary_snapshots') @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', return_value=(None, 'tgt_session')) @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(True, False, False)) def test_cleanup_device_snapvx(self, mck_is_rep, mck_find, mck_unlink): array = self.data.array device_id = self.data.device_id extra_specs = self.data.extra_specs self.common._cleanup_device_snapvx(array, device_id, extra_specs) mck_unlink.assert_called_once_with('tgt_session', array, extra_specs) @mock.patch.object( common.PowerMaxCommon, '_unlink_and_delete_temporary_snapshots') @mock.patch.object(rest.PowerMaxRest, 'is_vol_in_rep_session', return_value=(False, False, False)) def test_cleanup_device_snapvx_no_sessions(self, mck_is_rep, mck_unlink): array = self.data.array device_id = self.data.device_id extra_specs = self.data.extra_specs self.common._cleanup_device_snapvx(array, device_id, extra_specs) mck_unlink.assert_not_called() @mock.patch.object(common.PowerMaxCommon, '_delete_temp_snapshot') @mock.patch.object(common.PowerMaxCommon, '_unlink_snapshot', return_value=True) def test_unlink_and_delete_temporary_snapshots_session_unlinked( self, mck_unlink, mck_delete): session = self.data.snap_tgt_session array = self.data.array extra_specs = self.data.extra_specs self.common._unlink_and_delete_temporary_snapshots( session, array, extra_specs) mck_unlink.assert_called_once_with(session, array, extra_specs) mck_delete.assert_called_once_with(session, array) @mock.patch.object(common.PowerMaxCommon, '_delete_temp_snapshot') @mock.patch.object(common.PowerMaxCommon, '_unlink_snapshot', return_value=False) def test_unlink_and_delete_temporary_snapshots_session_not_unlinked( self, mck_unlink, mck_delete): session = self.data.snap_tgt_session array = self.data.array extra_specs = self.data.extra_specs self.common._unlink_and_delete_temporary_snapshots( session, array, extra_specs) mck_unlink.assert_called_once_with(session, array, extra_specs) mck_delete.assert_not_called() @mock.patch.object(provision.PowerMaxProvision, 'unlink_snapvx_tgt_volume') @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap', side_effect=[tpd.PowerMaxData.priv_snap_response.get( 'snapshotSrcs')[0], None]) def test_unlink_temp_snapshot(self, mck_get, mck_unlink): array = self.data.array extra_specs = self.data.extra_specs session = self.data.snap_tgt_session source = session.get('source_vol_id') target = session.get('target_vol_id') snap_name = session.get('snap_name') snap_id = session.get('snapid') loop = False is_unlinked = self.common._unlink_snapshot(session, array, extra_specs) mck_unlink.assert_called_once_with( array, target, source, snap_name, extra_specs, snap_id, loop) self.assertTrue(is_unlinked) @mock.patch.object(provision.PowerMaxProvision, 'unlink_snapvx_tgt_volume') @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap', return_value=tpd.PowerMaxData.priv_snap_response.get( 'snapshotSrcs')[0]) def test_unlink_temp_snapshot_not_unlinked(self, mck_get, mck_unlink): array = self.data.array extra_specs = self.data.extra_specs session = self.data.snap_tgt_session source = session.get('source_vol_id') target = session.get('target_vol_id') snap_name = session.get('snap_name') snap_id = session.get('snapid') loop = False is_unlinked = self.common._unlink_snapshot(session, array, extra_specs) mck_unlink.assert_called_once_with( array, target, source, snap_name, extra_specs, snap_id, loop) self.assertFalse(is_unlinked) @mock.patch.object(provision.PowerMaxProvision, 'delete_temp_volume_snap') @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap', return_value=dict()) def test_delete_temp_snapshot(self, mck_get, mck_delete): session = self.data.snap_tgt_session array = self.data.array snap_name = session.get('snap_name') source = session.get('source_vol_id') snap_id = session.get('snapid') self.common._delete_temp_snapshot(session, array) mck_delete.assert_called_once_with(array, snap_name, source, snap_id) @mock.patch.object(provision.PowerMaxProvision, 'delete_temp_volume_snap') @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap', return_value={'linkedDevices': 'details'}) def test_delete_temp_snapshot_is_linked(self, mck_get, mck_delete): session = self.data.snap_tgt_session array = self.data.array self.common._delete_temp_snapshot(session, array) mck_delete.assert_not_called() def test_get_replication_flags(self): rf = self.common._get_replication_flags( self.data.extra_specs, self.data.rep_extra_specs) self.assertFalse(rf.was_rep_enabled) self.assertTrue(rf.is_rep_enabled) self.assertFalse(rf.backend_ids_differ) self.assertEqual('Synchronous', rf.rep_mode) self.assertEqual('Diamond', rf.target_extra_specs.get('slo')) @mock.patch.object( common.PowerMaxCommon, 'configure_volume_replication', return_value=('first_vol_in_rdf_group', True, tpd.PowerMaxData.rep_info_dict, tpd.PowerMaxData.rep_extra_specs_mgmt, False)) def test_prep_non_rep_to_rep(self, mck_vol_rep): volume = fake_volume.fake_volume_obj( context='cxt', provider_location=None) nrr = self.common._prep_non_rep_to_rep( self.data.array, self.data.device_id, volume, False, True, False, self.data.rep_extra_specs_rep_config) self.assertIsInstance(nrr.model_update, dict) self.assertFalse(nrr.rdf_pair_created) self.assertIsInstance(nrr.rep_extra_specs, dict) self.assertIsInstance(nrr.rep_info_dict, dict) self.assertFalse(nrr.resume_target_sg) self.assertEqual('first_vol_in_rdf_group', nrr.rep_status) @mock.patch.object( common.PowerMaxCommon, 'break_rdf_device_pair_session', return_value=(tpd.PowerMaxData.rep_extra_specs_mgmt, True)) def test_prep_rep_to_non_rep(self, mock_break): volume = fake_volume.fake_volume_obj( context='cxt', provider_location=None) rnr = self.common._prep_rep_to_non_rep( self.data.array, self.data.device_id, 'my_vol', volume, True, False, False, self.data.extra_specs) self.assertIsInstance(rnr.model_update, dict) self.assertIsInstance(rnr.resume_original_sg_dict, dict) self.assertTrue(rnr.rdf_pair_broken) self.assertTrue(rnr.resume_original_sg) self.assertFalse(rnr.is_partitioned) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=[]) @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', side_effect=[(None, tpd.PowerMaxData.snap_tgt_session), (None, None)]) def test_cleanup_device_retry(self, mock_snapvx, mock_ss_list, mock_clean): self.common._cleanup_device_retry( self.data.array, self.data.device_id, self.data.extra_specs) self.assertEqual(2, mock_clean.call_count) @mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id', return_value=[tpd.PowerMaxData.device_id, tpd.PowerMaxData.device_id2]) def test_get_device_id_from_identifier_list(self, mock_dev_id): ret_dev = self.common._get_device_id_from_identifier( self.data.array, 'vol', self.data.device_id) self.assertEqual(self.data.device_id, ret_dev) @mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id', return_value=tpd.PowerMaxData.device_id2) def test_get_device_id_from_identifier_wrong(self, mock_dev_id): ret_dev = self.common._get_device_id_from_identifier( self.data.array, 'vol', self.data.device_id) self.assertEqual(self.data.device_id2, ret_dev) @mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id', return_value=tpd.PowerMaxData.device_id) def test_get_device_id_from_identifier_same(self, mock_dev_id): ret_dev = self.common._get_device_id_from_identifier( self.data.array, 'vol', self.data.device_id) self.assertIsNone(ret_dev) @mock.patch.object(rest.PowerMaxRest, 'rename_volume') @mock.patch.object(rest.PowerMaxRest, 'find_volume_identifier', return_value='vol') @mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id', return_value=tpd.PowerMaxData.device_id) def test_reset_identifier_on_rollback_rename( self, mock_dev, mock_ident, mock_rename): self.common._reset_identifier_on_rollback(self.data.array, 'vol') mock_rename.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'rename_volume') @mock.patch.object(rest.PowerMaxRest, 'find_volume_identifier', return_value='diff_vol_name') @mock.patch.object(rest.PowerMaxRest, 'find_volume_device_id', return_value=tpd.PowerMaxData.device_id) def test_reset_identifier_on_rollback_no_rename( self, mock_dev, mock_ident, mock_rename): self.common._reset_identifier_on_rollback(self.data.array, 'vol') mock_rename.assert_not_called() @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object( provision.PowerMaxProvision, 'delete_volume_from_srp', side_effect=[exception.VolumeBackendAPIException, None]) def test_test_delete_from_srp(self, mock_del, mock_clean): self.common._delete_from_srp( self.data.array, 'vol_name', self.data.device_id, self.data.extra_specs) mock_clean.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'srdf_create_device_pair', return_value={ 'tgt_device': tpd.PowerMaxData.device_id2}) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_group', return_value=tpd.PowerMaxData.rdf_group_details) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=(10, tpd.PowerMaxData.remote_array)) def test_configure_volume_replication_srp_same( self, mock_rdf, mock_res, mock_rdf_grp, mock_pair): volume = fake_volume.fake_volume_obj( context='cxt', provider_location=None) with mock.patch.object( self.masking, 'get_or_create_default_storage_group') as mock_sg: self.common.configure_volume_replication( self.data.array, volume, self.data.device_id, self.data.rep_extra_specs_rep_config) mock_sg.assert_called_with( self.data.remote_array, self.data.srp, 'Diamond', 'DSS', self.data.rep_extra_specs_rep_config, False, is_re=True, rep_mode='Synchronous') @mock.patch.object(rest.PowerMaxRest, 'srdf_create_device_pair', return_value={ 'tgt_device': tpd.PowerMaxData.device_id2}) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_group', return_value=tpd.PowerMaxData.rdf_group_details) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=(10, tpd.PowerMaxData.remote_array)) def test_configure_volume_replication_srp_diff( self, mock_rdf, mock_res, mock_rdf_grp, mock_pair): volume = fake_volume.fake_volume_obj( context='cxt', provider_location=None) rep_extra_specs = deepcopy(self.data.rep_extra_specs_rep_config) rep_extra_specs.update({'srp': 'REMOTE_SRP'}) with mock.patch.object( self.masking, 'get_or_create_default_storage_group') as mock_sg: self.common.configure_volume_replication( self.data.array, volume, self.data.device_id, self.data.rep_extra_specs_rep_config) mock_sg.assert_called_with( self.data.remote_array, self.data.srp, 'Diamond', 'DSS', self.data.rep_extra_specs_rep_config, False, is_re=True, rep_mode='Synchronous') def test_get_connect_timeout_from_cinder_config(self): kwargs_expected = ( {'RestServerIp': '1.1.1.1', 'RestServerPort': 3448, 'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False, 'SerialNumber': self.data.array, 'srpName': 'SRP_1', 'PortGroup': [self.data.port_group_name_i], 'RestAPIConnectTimeout': 120, 'RestAPIReadTimeout': 30}) configuration = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=3448, powermax_port_groups=[self.data.port_group_name_i]) configuration.set_rest_api_connect_timeout(120) self.mock_object(self.common, 'configuration', configuration) kwargs_returned = self.common.get_attributes_from_cinder_config() self.assertEqual(kwargs_expected, kwargs_returned) def test_get_read_timeout_from_cinder_config(self): kwargs_expected = ( {'RestServerIp': '1.1.1.1', 'RestServerPort': 3448, 'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False, 'SerialNumber': self.data.array, 'srpName': 'SRP_1', 'PortGroup': [self.data.port_group_name_i], 'RestAPIConnectTimeout': 30, 'RestAPIReadTimeout': 120}) configuration = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=3448, powermax_port_groups=[self.data.port_group_name_i]) configuration.set_rest_api_read_timeout(120) self.mock_object(self.common, 'configuration', configuration) kwargs_returned = self.common.get_attributes_from_cinder_config() self.assertEqual(kwargs_expected, kwargs_returned) def test_get_connect_and_read_timeout_from_cinder_config(self): kwargs_expected = ( {'RestServerIp': '1.1.1.1', 'RestServerPort': 3448, 'RestUserName': 'smc', 'RestPassword': 'smc', 'SSLVerify': False, 'SerialNumber': self.data.array, 'srpName': 'SRP_1', 'PortGroup': [self.data.port_group_name_i], 'RestAPIConnectTimeout': 90, 'RestAPIReadTimeout': 90}) configuration = tpfo.FakeConfiguration( None, 'CommonTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=3448, powermax_port_groups=[self.data.port_group_name_i]) configuration.set_rest_api_connect_timeout(90) configuration.set_rest_api_read_timeout(90) self.mock_object(self.common, 'configuration', configuration) kwargs_returned = self.common.get_attributes_from_cinder_config() self.assertEqual(kwargs_expected, kwargs_returned) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', side_effect=([{'snapshotName': 'temp-clone-snapshot'}], [])) @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', side_effect=[(None, None)]) def test_cleanup_device_retry_1(self, mock_snapvx, mock_ss_list, mock_clean): self.common._cleanup_device_retry( self.data.array, self.data.device_id, self.data.extra_specs) self.assertEqual(2, mock_ss_list.call_count) self.assertEqual(1, mock_snapvx.call_count) self.assertEqual(2, mock_clean.call_count) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=[{'snapshotName': 'temp-clone-snapshot'}]) @mock.patch.object(rest.PowerMaxRest, 'find_snap_vx_sessions', side_effect=[(None, None)]) def test_cleanup_device_retry_2(self, mock_snapvx, mock_ss_list, mock_clean): self.assertRaises( exception.VolumeBackendAPIException, self.common._cleanup_device_retry, self.data.array, self.data.device_id, self.data.extra_specs) self.assertEqual(7, mock_ss_list.call_count) self.assertEqual(0, mock_snapvx.call_count) self.assertEqual(7, mock_clean.call_count) @mock.patch.object(rest.PowerMaxRest, 'get_port_ids', return_value=['OR-1C:001']) @mock.patch.object(rest.PowerMaxRest, 'get_nvme_tcp_ip_address', return_value=(['10.10.10.1'], ['10.10.10.2'])) @mock.patch.object(common.PowerMaxCommon, 'find_host_lun_id', return_value=(tpd.PowerMaxData.nvme_tcp_device_info, False)) def test_initialize_nvme_connection(self, mock_port, mock_ip, mock_lun_id): volume = self.data.test_volume connector = self.data.connector extra_specs = deepcopy(self.data.extra_specs_intervals_set) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_nt with mock.patch.object( self.nvme_tcp_common, '_initial_setup', return_value=extra_specs) as mck_setup: device_info_dict = self.nvme_tcp_common.initialize_connection( volume, connector) self.assertEqual({'array': '000197800123', 'device_id': '0027C', 'hostlunid': 1, 'ips': [['10.10.10.1'], ['10.10.10.2']], 'maskingview': 'OS-HostX-NT-VME-PG1558b4-MV', 'target_nqn': 'nqn.1988-11.com.dell:' 'PowerMax_2500:00:000120001602'}, device_info_dict) mock_port.assert_called_once() mock_ip.assert_called_once() mock_lun_id.assert_called_once() mck_setup.assert_called_once() def test_get_target_nqn(self): fake_discover_json = json.dumps(self.data.nvme_tcp_discover_json) mock_connector = mock.Mock() mock_connector.run_nvme_cli.return_value = (fake_discover_json, None) ips = [('172.16.22.1', 4420, "tcp")] nqn = self.nvme_tcp_common.get_target_nqn(ips, mock_connector) self.assertEqual( 'nqn.1988-11.com.dell:PowerMax_2500:00:000120001602', nqn ) mock_connector.run_nvme_cli.assert_called_once_with( ['discover', '-t', 'tcp', '-a', '172.16.22.1', '-s', utils.POWERMAX_NVME_TCP_PORT, '-o', 'json'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_fc.py0000664000175000017500000003775600000000000031014 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import common from cinder.volume.drivers.dell_emc.powermax import fc from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils class PowerMaxFCTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxFCTest, self).setUp() self.mock_object(volume_utils, 'get_max_over_subscription_ratio') self.configuration = tpfo.FakeConfiguration( None, 'FCTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_i]) self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = fc.PowerMaxFCDriver(configuration=self.configuration) self.driver = driver self.common = self.driver.common self.masking = self.common.masking self.utils = self.common.utils self.mock_object( self.utils, 'get_volumetype_extra_specs', return_value=copy.deepcopy(self.data.vol_type_extra_specs)) def test_create_volume(self): with mock.patch.object(self.common, 'create_volume') as mock_create: self.driver.create_volume(self.data.test_volume) mock_create.assert_called_once_with(self.data.test_volume) def test_create_volume_from_snapshot(self): volume = self.data.test_clone_volume snapshot = self.data.test_snapshot with mock.patch.object( self.common, 'create_volume_from_snapshot') as mock_create: self.driver.create_volume_from_snapshot(volume, snapshot) mock_create.assert_called_once_with(volume, snapshot) def test_create_cloned_volume(self): volume = self.data.test_clone_volume src_volume = self.data.test_volume with mock.patch.object( self.common, 'create_cloned_volume') as mock_create: self.driver.create_cloned_volume(volume, src_volume) mock_create.assert_called_once_with(volume, src_volume) def test_delete_volume(self): with mock.patch.object(self.common, 'delete_volume') as mock_delete: self.driver.delete_volume(self.data.test_volume) mock_delete.assert_called_once_with(self.data.test_volume) def test_create_snapshot(self): with mock.patch.object(self.common, 'create_snapshot') as mock_create: self.driver.create_snapshot(self.data.test_snapshot) mock_create.assert_called_once_with( self.data.test_snapshot, self.data.test_snapshot.volume) def test_delete_snapshot(self): with mock.patch.object(self.common, 'delete_snapshot') as mock_delete: self.driver.delete_snapshot(self.data.test_snapshot) mock_delete.assert_called_once_with( self.data.test_snapshot, self.data.test_snapshot.volume) def test_initialize_connection(self): with mock.patch.object( self.common, 'initialize_connection', return_value=self.data.fc_device_info) as mock_initialize: with mock.patch.object( self.driver, 'populate_data') as mock_populate: self.driver.initialize_connection( self.data.test_volume, self.data.connector) mock_initialize.assert_called_once_with( self.data.test_volume, self.data.connector) mock_populate.assert_called_once_with( self.data.fc_device_info, self.data.test_volume, self.data.connector) def test_populate_data(self): with mock.patch.object(self.driver, '_build_initiator_target_map', return_value=([], {})) as mock_build: ref_data = { 'driver_volume_type': 'fibre_channel', 'data': {'target_lun': self.data.fc_device_info['hostlunid'], 'target_discovered': True, 'target_wwn': [], 'discard': True, 'initiator_target_map': {}}} data = self.driver.populate_data(self.data.fc_device_info, self.data.test_volume, self.data.connector) self.assertEqual(ref_data, data) mock_build.assert_called_once_with( self.data.test_volume, self.data.connector, self.data.fc_device_info) def test_terminate_connection(self): with mock.patch.object( self.common, 'terminate_connection') as mock_terminate: self.driver.terminate_connection( self.data.test_volume, self.data.connector) mock_terminate.assert_called_once_with( self.data.test_volume, self.data.connector) def test_terminate_connection_no_zoning_mappings(self): with mock.patch.object(self.driver, '_get_zoning_mappings', return_value=None): with mock.patch.object( self.common, 'terminate_connection') as mock_terminate: self.driver.terminate_connection(self.data.test_volume, self.data.connector) mock_terminate.assert_not_called() def test_get_zoning_mappings(self): ref_mappings = self.data.zoning_mappings zoning_mappings = self.driver._get_zoning_mappings( self.data.test_volume, copy.deepcopy(self.data.connector)) self.assertEqual(ref_mappings, zoning_mappings) # Legacy vol zoning_mappings2 = self.driver._get_zoning_mappings( self.data.test_legacy_vol, copy.deepcopy(self.data.connector)) self.assertEqual(ref_mappings, zoning_mappings2) def test_get_zoning_mappings_no_mv(self): with mock.patch.object(self.common, 'get_masking_views_from_volume', return_value=(None, False)): zoning_mappings = self.driver._get_zoning_mappings( self.data.test_volume, self.data.connector) self.assertEqual({}, zoning_mappings) @mock.patch.object( common.PowerMaxCommon, 'get_masking_views_from_volume', side_effect = ([(None, False), ([tpd.PowerMaxData.masking_view_name_f], False)])) def test_get_zoning_mappings_retry_backward_compatibility( self, mock_views): with mock.patch.object(self.common.utils, 'get_host_name_label', return_value=None) as mock_label: self.driver._get_zoning_mappings( self.data.test_volume, self.data.connector) self.assertEqual(2, mock_label.call_count) self.assertEqual(2, mock_views.call_count) @mock.patch.object( common.PowerMaxCommon, 'get_masking_views_from_volume', return_value=([tpd.PowerMaxData.masking_view_name_f], True)) def test_get_zoning_mappings_metro(self, mock_mv): ref_mappings = self.data.zoning_mappings_metro zoning_mappings = self.driver._get_zoning_mappings( self.data.test_volume, self.data.connector) self.assertEqual(ref_mappings, zoning_mappings) def test_cleanup_zones_other_vols_mapped(self): ref_data = {'driver_volume_type': 'fibre_channel', 'data': {}} data = self.driver._cleanup_zones(self.data.zoning_mappings) self.assertEqual(ref_data, data) def test_cleanup_zones_no_vols_mapped(self): zoning_mappings = self.data.zoning_mappings ref_data = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': zoning_mappings['target_wwns'], 'initiator_target_map': zoning_mappings['init_targ_map']}} with mock.patch.object(self.common, 'get_common_masking_views', return_value=[]): data = self.driver._cleanup_zones(self.data.zoning_mappings) self.assertEqual(ref_data, data) def test_build_initiator_target_map_default(self): ref_target_map = {'123456789012345': ['543210987654321'], '123456789054321': ['123450987654321']} with mock.patch.object(fczm_utils, 'create_lookup_service', return_value=tpfo.FakeLookupService()): driver = fc.PowerMaxFCDriver(configuration=self.configuration) with mock.patch.object(driver.common, 'get_target_wwns_from_masking_view', return_value=(self.data.target_wwns, [])): targets, target_map = driver._build_initiator_target_map( self.data.test_volume, self.data.connector) self.assertEqual(ref_target_map, target_map) def test_build_initiator_target_map_load_balanced(self): init_wwns = self.data.connector.get('wwpns') init_a, init_b = init_wwns[0], init_wwns[1] self.driver.performance.config = self.data.performance_config with mock.patch.object( self.common, 'get_target_wwns_from_masking_view', return_value=(self.data.target_wwns_multi, [])): targets, target_map = self.driver._build_initiator_target_map( self.data.test_volume, self.data.connector, device_info=self.data.fc_device_info) self.assertEqual(1, len(target_map.get(init_a))) self.assertEqual(1, len(target_map.get(init_b))) self.assertTrue( len(target_map.get(init_a)) < len(self.data.target_wwns_multi)) self.assertTrue( len(target_map.get(init_b)) < len(self.data.target_wwns_multi)) self.assertEqual(self.data.wwpn1, targets[0]) def test_build_initiator_target_map_load_balanced_exception(self): ref_target_map = {'123456789012345': self.data.target_wwns_multi, '123456789054321': self.data.target_wwns_multi} self.driver.performance.config = self.data.performance_config with mock.patch.object( self.common, 'get_target_wwns_from_masking_view', return_value=(self.data.target_wwns_multi, [])) as mck_wwns: with mock.patch.object( self.driver.performance, 'process_port_load', side_effect=exception.VolumeBackendAPIException('')): targets, target_map = self.driver._build_initiator_target_map( self.data.test_volume, self.data.connector, device_info=self.data.iscsi_device_info) self.assertEqual(ref_target_map, target_map) self.assertEqual(mck_wwns.call_count, 2) def test_extend_volume(self): with mock.patch.object(self.common, 'extend_volume') as mock_extend: self.driver.extend_volume(self.data.test_volume, '3') mock_extend.assert_called_once_with(self.data.test_volume, '3') def test_get_volume_stats(self): with mock.patch.object( self.driver, '_update_volume_stats') as mock_update: # with refresh self.driver.get_volume_stats(True) # set fake stats self.driver._stats['driver_version'] = self.driver.VERSION # no refresh self.driver.get_volume_stats() mock_update.assert_called_once_with() def test_update_volume_stats(self): with mock.patch.object(self.common, 'update_volume_stats', return_value={}) as mock_update: self.driver._update_volume_stats() mock_update.assert_called_once_with() def test_check_for_setup_error(self): self.driver.check_for_setup_error() def test_ensure_export(self): self.driver.ensure_export('context', 'volume') def test_create_export(self): self.driver.create_export('context', 'volume', 'connector') def test_remove_export(self): self.driver.remove_export('context', 'volume') def test_check_for_export(self): self.driver.check_for_export('context', 'volume_id') def test_manage_existing(self): with mock.patch.object(self.common, 'manage_existing', return_value={}) as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing(self.data.test_volume, external_ref) mock_manage.assert_called_once_with( self.data.test_volume, external_ref) def test_manage_existing_get_size(self): with mock.patch.object(self.common, 'manage_existing_get_size', return_value='1') as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing_get_size( self.data.test_volume, external_ref) mock_manage.assert_called_once_with( self.data.test_volume, external_ref) def test_unmanage_volume(self): with mock.patch.object(self.common, 'unmanage', return_value={}) as mock_unmanage: self.driver.unmanage(self.data.test_volume) mock_unmanage.assert_called_once_with( self.data.test_volume) def test_retype(self): host = {'host': self.data.new_host} new_type = {'extra_specs': {}} with mock.patch.object(self.common, 'retype', return_value=True) as mck_retype: self.driver.retype({}, self.data.test_volume, new_type, '', host) mck_retype.assert_called_once_with( self.data.test_volume, new_type, host) def test_failover_host(self): with mock.patch.object( self.common, 'failover', return_value=(self.data.remote_array, [], [])) as mock_fo: self.driver.failover_host(self.data.ctx, [self.data.test_volume]) mock_fo.assert_called_once_with([self.data.test_volume], None, None) def test_enable_replication(self): with mock.patch.object( self.common, 'enable_replication') as mock_er: self.driver.enable_replication( self.data.ctx, self.data.test_group, [self.data.test_volume]) mock_er.assert_called_once() def test_disable_replication(self): with mock.patch.object( self.common, 'disable_replication') as mock_dr: self.driver.disable_replication( self.data.ctx, self.data.test_group, [self.data.test_volume]) mock_dr.assert_called_once() def test_failover_replication(self): with mock.patch.object( self.common, 'failover_replication') as mock_fo: self.driver.failover_replication( self.data.ctx, self.data.test_group, [self.data.test_volume]) mock_fo.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_iscsi.py0000664000175000017500000004522600000000000031525 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy from unittest import mock from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import iscsi from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume import volume_utils class PowerMaxISCSITest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxISCSITest, self).setUp() self.mock_object(volume_utils, 'get_max_over_subscription_ratio') configuration = tpfo.FakeConfiguration( None, 'ISCSITests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_i]) self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = iscsi.PowerMaxISCSIDriver(configuration=configuration) self.driver = driver self.common = self.driver.common self.masking = self.common.masking self.utils = self.common.utils self.mock_object( self.utils, 'get_volumetype_extra_specs', return_value=deepcopy(self.data.vol_type_extra_specs)) def test_create_volume(self): with mock.patch.object(self.common, 'create_volume') as mock_create: self.driver.create_volume(self.data.test_volume) mock_create.assert_called_once_with( self.data.test_volume) def test_create_volume_from_snapshot(self): volume = self.data.test_clone_volume snapshot = self.data.test_snapshot with mock.patch.object( self.common, 'create_volume_from_snapshot') as mock_create: self.driver.create_volume_from_snapshot(volume, snapshot) mock_create.assert_called_once_with( volume, snapshot) def test_create_cloned_volume(self): volume = self.data.test_clone_volume src_volume = self.data.test_volume with mock.patch.object( self.common, 'create_cloned_volume') as mock_create: self.driver.create_cloned_volume(volume, src_volume) mock_create.assert_called_once_with(volume, src_volume) def test_delete_volume(self): with mock.patch.object(self.common, 'delete_volume') as mock_delete: self.driver.delete_volume(self.data.test_volume) mock_delete.assert_called_once_with( self.data.test_volume) def test_create_snapshot(self): with mock.patch.object(self.common, 'create_snapshot') as mock_create: self.driver.create_snapshot(self.data.test_snapshot) mock_create.assert_called_once_with( self.data.test_snapshot, self.data.test_snapshot.volume) def test_delete_snapshot(self): with mock.patch.object(self.common, 'delete_snapshot') as mock_delete: self.driver.delete_snapshot(self.data.test_snapshot) mock_delete.assert_called_once_with( self.data.test_snapshot, self.data.test_snapshot.volume) def test_initialize_connection(self): phys_port = '%(dir)s:%(port)s' % {'dir': self.data.iscsi_dir, 'port': self.data.iscsi_port} ref_dict = {'maskingview': self.data.masking_view_name_f, 'array': self.data.array, 'hostlunid': 3, 'device_id': self.data.device_id, 'ip_and_iqn': [{'ip': self.data.ip, 'iqn': self.data.initiator, 'physical_port': phys_port}], 'is_multipath': False} self.common.rest.u4p_version = self.data.u4p_version with mock.patch.object(self.driver, 'get_iscsi_dict') as mock_get: with mock.patch.object( self.common, 'get_port_group_from_masking_view', return_value=self.data.port_group_name_i): self.driver.initialize_connection(self.data.test_volume, self.data.connector) mock_get.assert_called_once_with( ref_dict, self.data.test_volume) def test_get_iscsi_dict_success(self): self.common.rest.u4p_version = self.data.u4p_version ip_and_iqn = self.common._find_ip_and_iqns( self.data.array, self.data.port_group_name_i) host_lun_id = self.data.iscsi_device_info['hostlunid'] volume = self.data.test_volume device_info = self.data.iscsi_device_info ref_data = {'driver_volume_type': 'iscsi', 'data': {}} with mock.patch.object( self.driver, 'vmax_get_iscsi_properties', return_value={}) as mock_get: data = self.driver.get_iscsi_dict(device_info, volume) self.assertEqual(ref_data, data) mock_get.assert_called_once_with( self.data.array, volume, ip_and_iqn, True, host_lun_id, None, None) def test_get_iscsi_dict_exception(self): device_info = {'ip_and_iqn': ''} self.assertRaises(exception.VolumeBackendAPIException, self.driver.get_iscsi_dict, device_info, self.data.test_volume) def test_get_iscsi_dict_metro(self): self.common.rest.u4p_version = self.data.u4p_version ip_and_iqn = self.common._find_ip_and_iqns( self.data.array, self.data.port_group_name_i) host_lun_id = self.data.iscsi_device_info_metro['hostlunid'] volume = self.data.test_volume device_info = self.data.iscsi_device_info_metro ref_data = {'driver_volume_type': 'iscsi', 'data': {}} with mock.patch.object(self.driver, 'vmax_get_iscsi_properties', return_value={}) as mock_get: data = self.driver.get_iscsi_dict(device_info, volume) self.assertEqual(ref_data, data) mock_get.assert_called_once_with( self.data.array, volume, ip_and_iqn, True, host_lun_id, self.data.iscsi_device_info_metro['metro_ip_and_iqn'], self.data.iscsi_device_info_metro['metro_hostlunid']) def test_vmax_get_iscsi_properties_one_target_no_auth(self): vol = deepcopy(self.data.test_volume) self.common.rest.u4p_version = self.data.u4p_version ip_and_iqn = self.common._find_ip_and_iqns( self.data.array, self.data.port_group_name_i) host_lun_id = self.data.iscsi_device_info['hostlunid'] ref_properties = { 'target_discovered': True, 'target_iqn': ip_and_iqn[0]['iqn'].split(',')[0], 'target_portal': ip_and_iqn[0]['ip'] + ':3260', 'target_lun': host_lun_id, 'discard': True, 'volume_id': self.data.test_volume.id} iscsi_properties = self.driver.vmax_get_iscsi_properties( self.data.array, vol, ip_and_iqn, True, host_lun_id, [], None) self.assertEqual(type(ref_properties), type(iscsi_properties)) self.assertEqual(ref_properties, iscsi_properties) def test_vmax_get_iscsi_properties_multiple_targets_random_select(self): ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.initiator}, {'ip': self.data.ip2, 'iqn': self.data.iqn}] host_lun_id = self.data.iscsi_device_info['hostlunid'] iscsi_properties = self.driver.vmax_get_iscsi_properties( self.data.array, self.data.test_volume, ip_and_iqn, True, host_lun_id, [], None) iscsi_tgt_iqn = iscsi_properties.get('target_iqn') iscsi_tgt_portal = iscsi_properties.get('target_portal') self.assertIn(iscsi_tgt_iqn, [self.data.initiator, self.data.iqn]) self.assertIn(iscsi_tgt_portal, [self.data.ip + ":3260", self.data.ip2 + ":3260"]) for ip_iqn in ip_and_iqn: if ip_iqn['ip'] + ":3260" == iscsi_tgt_portal: self.assertEqual(iscsi_tgt_iqn, ip_iqn.get('iqn')) def test_vmax_get_iscsi_properties_multiple_targets_load_balance(self): ip_and_iqn = [ {'ip': self.data.ip, 'iqn': self.data.initiator, 'physical_port': self.data.perf_ports[0]}, {'ip': self.data.ip2, 'iqn': self.data.iqn, 'physical_port': self.data.perf_ports[1]}] host_lun_id = self.data.iscsi_device_info['hostlunid'] self.driver.performance.config = self.data.performance_config ref_tgt_map = {} for tgt in ip_and_iqn: ref_tgt_map.update({ tgt['physical_port']: {'ip': tgt['ip'], 'iqn': tgt['iqn']}}) with mock.patch.object( self.driver.performance, 'process_port_load', side_effect=( self.driver.performance.process_port_load)) as mck_p: iscsi_properties = self.driver.vmax_get_iscsi_properties( self.data.array, self.data.test_volume, ip_and_iqn, False, host_lun_id, None, None) mck_p.assert_called_once_with(self.data.array, ref_tgt_map.keys()) iscsi_tgt_iqn = iscsi_properties.get('target_iqn') iscsi_tgt_portal = iscsi_properties.get('target_portal') self.assertIn(iscsi_tgt_iqn, [self.data.initiator, self.data.iqn]) self.assertIn(iscsi_tgt_portal, [self.data.ip + ":3260", self.data.ip2 + ":3260"]) for ip_iqn in ip_and_iqn: if ip_iqn['ip'] + ":3260" == iscsi_tgt_portal: self.assertEqual(iscsi_tgt_iqn, ip_iqn.get('iqn')) def test_vmax_get_iscsi_properties_multiple_targets_load_balance_exc(self): ip_and_iqn = [ {'ip': self.data.ip, 'iqn': self.data.initiator}, {'ip': self.data.ip2, 'iqn': self.data.iqn}] host_lun_id = self.data.iscsi_device_info['hostlunid'] self.driver.performance.config = self.data.performance_config with mock.patch.object( self.driver.performance, 'process_port_load', side_effect=( self.driver.performance.process_port_load)) as mck_p: iscsi_properties = self.driver.vmax_get_iscsi_properties( self.data.array, self.data.test_volume, ip_and_iqn, False, host_lun_id, None, None) mck_p.assert_not_called() iscsi_tgt_iqn = iscsi_properties.get('target_iqn') iscsi_tgt_portal = iscsi_properties.get('target_portal') self.assertIn(iscsi_tgt_iqn, [self.data.initiator, self.data.iqn]) self.assertIn(iscsi_tgt_portal, [self.data.ip + ":3260", self.data.ip2 + ":3260"]) for ip_iqn in ip_and_iqn: if ip_iqn['ip'] + ":3260" == iscsi_tgt_portal: self.assertEqual(iscsi_tgt_iqn, ip_iqn.get('iqn')) def test_vmax_get_iscsi_properties_auth(self): vol = deepcopy(self.data.test_volume) backup_conf = self.common.configuration configuration = tpfo.FakeConfiguration( None, 'ISCSITests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_rest_port=8443, use_chap_auth=True, chap_username='auth_username', chap_password='auth_secret', powermax_port_groups=[self.data.port_group_name_i]) self.driver.configuration = configuration ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.initiator}, {'ip': self.data.ip, 'iqn': self.data.iqn}] host_lun_id = self.data.iscsi_device_info['hostlunid'] iscsi_properties = self.driver.vmax_get_iscsi_properties( self.data.array, vol, ip_and_iqn, True, host_lun_id, None, None) self.assertIn('auth_method', iscsi_properties.keys()) self.assertIn('auth_username', iscsi_properties.keys()) self.assertIn('auth_password', iscsi_properties.keys()) self.assertEqual('CHAP', iscsi_properties['auth_method']) self.assertEqual('auth_username', iscsi_properties['auth_username']) self.assertEqual('auth_secret', iscsi_properties['auth_password']) self.driver.configuration = backup_conf def test_vmax_get_iscsi_properties_metro(self): ip_and_iqn = [{'ip': self.data.ip, 'iqn': self.data.iqn}] total_ip_list = [{'ip': self.data.ip, 'iqn': self.data.iqn}, {'ip': self.data.ip2, 'iqn': self.data.iqn2}] host_lun_id = self.data.iscsi_device_info['hostlunid'] host_lun_id2 = self.data.iscsi_device_info_metro['metro_hostlunid'] ref_properties = { 'target_portals': ( [t['ip'] + ':3260' for t in total_ip_list]), 'target_iqns': ( [t['iqn'].split(',')[0] for t in total_ip_list]), 'target_luns': [host_lun_id, host_lun_id2], 'target_discovered': True, 'target_iqn': ip_and_iqn[0]['iqn'].split(',')[0], 'target_portal': ip_and_iqn[0]['ip'] + ':3260', 'target_lun': host_lun_id, 'discard': True, 'volume_id': self.data.test_volume.id} iscsi_properties = self.driver.vmax_get_iscsi_properties( self.data.array, self.data.test_volume, ip_and_iqn, True, host_lun_id, self.data.iscsi_device_info_metro['metro_ip_and_iqn'], self.data.iscsi_device_info_metro['metro_hostlunid']) self.assertEqual(ref_properties, iscsi_properties) def test_terminate_connection(self): with mock.patch.object( self.common, 'terminate_connection') as mock_terminate: self.driver.terminate_connection(self.data.test_volume, self.data.connector) mock_terminate.assert_called_once_with( self.data.test_volume, self.data.connector) def test_extend_volume(self): with mock.patch.object( self.common, 'extend_volume') as mock_extend: self.driver.extend_volume(self.data.test_volume, '3') mock_extend.assert_called_once_with(self.data.test_volume, '3') def test_get_volume_stats(self): with mock.patch.object( self.driver, '_update_volume_stats') as mock_update: self.driver.get_volume_stats(True) mock_update.assert_called_once_with() def test_update_volume_stats(self): with mock.patch.object(self.common, 'update_volume_stats', return_value={}) as mock_update: self.driver.get_volume_stats() mock_update.assert_called_once_with() def test_check_for_setup_error(self): self.driver.check_for_setup_error() def test_ensure_export(self): self.driver.ensure_export('context', 'volume') def test_create_export(self): self.driver.create_export('context', 'volume', 'connector') def test_remove_export(self): self.driver.remove_export('context', 'volume') def test_check_for_export(self): self.driver.check_for_export('context', 'volume_id') def test_manage_existing(self): with mock.patch.object(self.common, 'manage_existing', return_value={}) as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing(self.data.test_volume, external_ref) mock_manage.assert_called_once_with( self.data.test_volume, external_ref) def test_manage_existing_get_size(self): with mock.patch.object(self.common, 'manage_existing_get_size', return_value='1') as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing_get_size( self.data.test_volume, external_ref) mock_manage.assert_called_once_with( self.data.test_volume, external_ref) def test_unmanage_volume(self): with mock.patch.object(self.common, 'unmanage', return_value={}) as mock_unmanage: self.driver.unmanage(self.data.test_volume) mock_unmanage.assert_called_once_with(self.data.test_volume) def test_retype(self): host = {'host': self.data.new_host} new_type = {'extra_specs': {}} with mock.patch.object(self.common, 'retype', return_value=True) as mck_retype: self.driver.retype({}, self.data.test_volume, new_type, '', host) mck_retype.assert_called_once_with( self.data.test_volume, new_type, host) def test_failover_host(self): with mock.patch.object(self.common, 'failover', return_value=(None, [], [])) as mock_fo: self.driver.failover_host({}, [self.data.test_volume]) mock_fo.assert_called_once_with([self.data.test_volume], None, None) def test_enable_replication(self): with mock.patch.object(self.common, 'enable_replication') as mock_er: self.driver.enable_replication( self.data.ctx, self.data.test_group, [self.data.test_volume]) mock_er.assert_called_once() def test_disable_replication(self): with mock.patch.object(self.common, 'disable_replication') as mock_dr: self.driver.disable_replication( self.data.ctx, self.data.test_group, [self.data.test_volume]) mock_dr.assert_called_once() def test_failover_replication(self): with mock.patch.object(self.common, 'failover_replication') as mock_fo: self.driver.failover_replication( self.data.ctx, self.data.test_group, [self.data.test_volume]) mock_fo.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py0000664000175000017500000022726400000000000032050 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy from unittest import mock from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import common from cinder.volume.drivers.dell_emc.powermax import masking from cinder.volume.drivers.dell_emc.powermax import provision from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume.drivers.dell_emc.powermax import utils from cinder.volume import volume_utils class PowerMaxMaskingTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxMaskingTest, self).setUp() self.mock_object(volume_utils, 'get_max_over_subscription_ratio') self.replication_device = self.data.sync_rep_device configuration = tpfo.FakeConfiguration( None, 'MaskingTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_f], replication_device=self.replication_device) self._gather_info = common.PowerMaxCommon._gather_info self.mock_object(common.PowerMaxCommon, '_get_u4p_failover_info') self.mock_object(common.PowerMaxCommon, '_gather_info') self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = common.PowerMaxCommon( 'iSCSI', self.data.version, configuration=configuration) driver_fc = common.PowerMaxCommon( 'FC', self.data.version, configuration=configuration) driver_nvme_tcp = common.PowerMaxCommon( 'NVMe-TCP', self.data.version, configuration=configuration) self.driver = driver self.driver_fc = driver_fc self.driver_nvme_tcp = driver_nvme_tcp self.mask = self.driver.masking self.extra_specs = deepcopy(self.data.extra_specs) self.extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_i self.maskingviewdict = self.driver._populate_masking_dict( self.data.test_volume, self.data.connector, self.extra_specs) self.maskingviewdict['extra_specs'] = self.extra_specs self.maskingviewdict[utils.IS_MULTIATTACH] = False self.device_id = self.data.device_id self.volume_name = self.data.volume_details[0]['volume_identifier'] def tearDown(self): super(PowerMaxMaskingTest, self).tearDown() common.PowerMaxCommon._gather_info = self._gather_info def test_sanity_port_group_check_none(self): self.assertRaises( exception.VolumeBackendAPIException, self.driver.masking._sanity_port_group_check, None, self.data.array) @mock.patch.object(rest.PowerMaxRest, 'get_portgroup', return_value=None) def test_sanity_port_group_check_invalid_portgroup(self, mock_pg): self.assertRaises( exception.VolumeBackendAPIException, self.driver.masking._sanity_port_group_check, None, self.data.array) @mock.patch.object( masking.PowerMaxMasking, '_check_director_and_port_status') @mock.patch.object(rest.PowerMaxRest, 'get_portgroup', return_value=tpd.PowerMaxData.portgroup) def test_sanity_port_group_check(self, mock_pg, mock_check): self.driver.masking._sanity_port_group_check( self.data.port_group_name_f, self.data.array) @mock.patch.object(masking.PowerMaxMasking, 'get_or_create_masking_view_and_map_lun') def test_setup_masking_view(self, mock_get_or_create_mv): self.driver.masking.setup_masking_view( self.data.array, self.data.test_volume, self.maskingviewdict, self.extra_specs) mock_get_or_create_mv.assert_called_once() @mock.patch.object(masking.PowerMaxMasking, '_validate_attach', return_value=True) @mock.patch.object(masking.PowerMaxMasking, '_check_adding_volume_to_storage_group') @mock.patch.object(masking.PowerMaxMasking, '_move_vol_from_default_sg', return_value=None) @mock.patch.object(masking.PowerMaxMasking, '_get_or_create_masking_view', side_effect=[None, 'Error in masking view retrieval', exception.VolumeBackendAPIException]) @mock.patch.object(rest.PowerMaxRest, 'get_element_from_masking_view', side_effect=[tpd.PowerMaxData.port_group_name_i, Exception('Exception')]) def test_get_or_create_masking_view_and_map_lun( self, mock_masking_view_element, mock_masking, mock_move, mock_add_volume, mock_validate): rollback_dict = ( self.driver.masking.get_or_create_masking_view_and_map_lun( self.data.array, self.data.test_volume, self.maskingviewdict['maskingview_name'], self.maskingviewdict, self.extra_specs)) self.assertEqual(self.maskingviewdict, rollback_dict) self.assertRaises( exception.VolumeBackendAPIException, self.driver.masking.get_or_create_masking_view_and_map_lun, self.data.array, self.data.test_volume, self.maskingviewdict['maskingview_name'], self.maskingviewdict, self.extra_specs) self.maskingviewdict['slo'] = None self.assertRaises( exception.VolumeBackendAPIException, self.driver.masking.get_or_create_masking_view_and_map_lun, self.data.array, self.data.test_volume, self.maskingviewdict['maskingview_name'], self.maskingviewdict, self.extra_specs) @mock.patch.object(masking.PowerMaxMasking, '_check_adding_volume_to_storage_group', return_value=None) @mock.patch.object( rest.PowerMaxRest, 'move_volume_between_storage_groups', side_effect=[None, exception.VolumeBackendAPIException(data='')]) @mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup', side_effect=[True, False, True]) def test_move_vol_from_default_sg( self, mock_volume_in_sg, mock_move_volume, mock_add): msg = None for x in range(0, 2): msg = self.driver.masking._move_vol_from_default_sg( self.data.array, self.device_id, self.volume_name, self.data.defaultstoragegroup_name, self.data.storagegroup_name_i, self.extra_specs) mock_move_volume.assert_called_once() mock_add.assert_called_once() self.assertIsNone(msg) msg = self.driver.masking._move_vol_from_default_sg( self.data.array, self.device_id, self.volume_name, self.data.defaultstoragegroup_name, self.data.storagegroup_name_i, self.extra_specs) self.assertIsNotNone(msg) @mock.patch.object(rest.PowerMaxRest, 'modify_storage_group', return_value=(200, tpfo.tpd.PowerMaxData.job_list[0])) @mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg') @mock.patch.object(masking.PowerMaxMasking, 'get_parent_sg_from_child', side_effect=[None, tpd.PowerMaxData.parent_sg_f]) @mock.patch.object( rest.PowerMaxRest, 'get_num_vols_in_sg', side_effect=[2, 1, 1]) def test_move_volume_between_storage_groups( self, mock_num, mock_parent, mock_rm, mck_mod): extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'slo': 'Diamond', 'workload': 'DSS', 'srp': 'SRP_1', 'array': '000197800123', 'interval': 3, 'retries': 120} for x in range(0, 3): self.driver.masking.move_volume_between_storage_groups( self.data.array, self.data.device_id, self.data.storagegroup_name_i, self.data.storagegroup_name_f, extra_specs) mock_rm.assert_called_once() ref_payload = ( {"executionOption": "ASYNCHRONOUS", "editStorageGroupActionParam": { "moveVolumeToStorageGroupParam": { "volumeId": [self.data.device_id], "storageGroupId": self.data.storagegroup_name_f, "force": 'false'}}}) mck_mod.assert_called_with( self.data.array, self.data.storagegroup_name_i, ref_payload) @mock.patch.object(rest.PowerMaxRest, 'move_volume_between_storage_groups') def test_move_volume_between_storage_groups_same_target(self, mock_mv): src = 'OS-SG' tgt = 'OS-SG' self.driver.masking.move_volume_between_storage_groups( self.data.array, self.data.device_id, src, tgt, self.data.extra_specs) mock_mv.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg') @mock.patch.object(masking.PowerMaxMasking, 'get_parent_sg_from_child', side_effect=[None, tpd.PowerMaxData.parent_sg_f]) @mock.patch.object(rest.PowerMaxRest, 'move_volume_between_storage_groups') @mock.patch.object( rest.PowerMaxRest, 'get_num_vols_in_sg', return_value=1) def test_force_move_volume_between_storage_groups( self, mock_num, mock_move, mock_parent, mock_rm): self.driver.masking.move_volume_between_storage_groups( self.data.array, self.data.device_id, self.data.storagegroup_name_i, self.data.storagegroup_name_f, self.data.extra_specs, force=True) mock_move.assert_called_once_with( self.data.array, self.data.device_id, self.data.storagegroup_name_i, self.data.storagegroup_name_f, self.data.extra_specs, True) @mock.patch.object( masking.PowerMaxMasking, '_check_director_and_port_status') @mock.patch.object(rest.PowerMaxRest, 'get_masking_view', side_effect=[tpd.PowerMaxData.maskingview, tpd.PowerMaxData.maskingview, None]) @mock.patch.object( masking.PowerMaxMasking, '_validate_existing_masking_view', side_effect=[(tpd.PowerMaxData.maskingview[1]['storageGroupId'], None), (None, 'Error Message')]) @mock.patch.object(masking.PowerMaxMasking, '_create_new_masking_view', return_value=None) def test_get_or_create_masking_view(self, mock_create_mv, mock_validate_mv, mock_get_mv, mock_check): for x in range(0, 3): self.driver.masking._get_or_create_masking_view( self.data.array, self.maskingviewdict, self.data.defaultstoragegroup_name, self.extra_specs) mock_create_mv.assert_called_once() @mock.patch.object( masking.PowerMaxMasking, '_get_or_create_initiator_group', side_effect=[(None, 'Initiator group error'), (None, None), (None, None), (None, None), (None, None), (None, None), (None, None), (None, None)]) @mock.patch.object( masking.PowerMaxMasking, '_get_or_create_storage_group', side_effect=['Storage group not found', None, 'Storage group not found', 'Storage group not found', None, None, None, None, None, None, None]) @mock.patch.object( masking.PowerMaxMasking, '_move_vol_from_default_sg', side_effect=['Storage group error', None, 'Storage group error', None]) @mock.patch.object( masking.PowerMaxMasking, 'create_masking_view', return_value=None) def test_create_new_masking_view( self, mock_create_mv, mock_move, mock_create_SG, mock_create_IG): for x in range(0, 6): self.driver.masking._create_new_masking_view( self.data.array, self.maskingviewdict, self.maskingviewdict['maskingview_name'], self.data.defaultstoragegroup_name, self.extra_specs) mock_create_mv.assert_called_once() @mock.patch.object( masking.PowerMaxMasking, '_check_existing_storage_group', side_effect=[(tpd.PowerMaxData.storagegroup_name_i, None), (tpd.PowerMaxData.storagegroup_name_i, None), (None, 'Error Checking existing storage group')]) @mock.patch.object( rest.PowerMaxRest, 'get_element_from_masking_view', return_value=tpd.PowerMaxData.port_group_name_i) @mock.patch.object( masking.PowerMaxMasking, '_check_port_group', side_effect=[(None, None), (None, 'Error checking pg')]) @mock.patch.object( masking.PowerMaxMasking, '_check_existing_initiator_group', return_value=(tpd.PowerMaxData.initiatorgroup_name_i, None)) def test_validate_existing_masking_view( self, mock_check_ig, mock_check_pg, mock_get_mv_element, mock_check_sg): for x in range(0, 3): self.driver.masking._validate_existing_masking_view( self.data.array, self.maskingviewdict, self.maskingviewdict['maskingview_name'], self.data.defaultstoragegroup_name, self.extra_specs) self.assertEqual(3, mock_check_sg.call_count) mock_get_mv_element.assert_called_with( self.data.array, self.maskingviewdict['maskingview_name'], portgroup=True) mock_check_ig.assert_called_once() @mock.patch.object( rest.PowerMaxRest, 'get_storage_group', side_effect=[tpd.PowerMaxData.storagegroup_name_i, None, tpd.PowerMaxData.storagegroup_name_i]) @mock.patch.object( provision.PowerMaxProvision, 'create_storage_group', side_effect=[tpd.PowerMaxData.storagegroup_name_i, None]) def test_get_or_create_storage_group(self, mock_sg, mock_get_sg): for x in range(0, 2): self.driver.masking._get_or_create_storage_group( self.data.array, self.maskingviewdict, self.data.storagegroup_name_i, self.extra_specs) self.assertEqual(3, mock_get_sg.call_count) self.assertEqual(1, mock_sg.call_count) @mock.patch.object( rest.PowerMaxRest, 'get_storage_group', side_effect=[None, tpd.PowerMaxData.storagegroup_name_i]) @mock.patch.object( provision.PowerMaxProvision, 'create_storage_group', side_effect=[tpd.PowerMaxData.storagegroup_name_i]) def test_get_or_create_storage_group_is_parent(self, mock_sg, mock_get_sg): self.driver.masking._get_or_create_storage_group( self.data.array, self.maskingviewdict, self.data.storagegroup_name_i, self.extra_specs, True) self.assertEqual(2, mock_get_sg.call_count) self.assertEqual(1, mock_sg.call_count) @mock.patch.object( rest.PowerMaxRest, 'update_storagegroup_qos') @mock.patch.object( rest.PowerMaxRest, 'get_storage_group', return_value=tpd.PowerMaxData.storagegroup_name_i) def test_get_or_create_storage_group_is_parent_qos( self, mock_sg, mock_update_sg): self.driver.masking._get_or_create_storage_group( self.data.array, self.maskingviewdict, self.data.storagegroup_name_i, self.data.extra_specs_qos, True) mock_update_sg.assert_not_called() @mock.patch.object( rest.PowerMaxRest, 'update_storagegroup_qos') @mock.patch.object( rest.PowerMaxRest, 'get_storage_group', return_value=tpd.PowerMaxData.storagegroup_name_i) def test_get_or_create_storage_group_is_child_qos( self, mock_sg, mock_update_sg): self.driver.masking._get_or_create_storage_group( self.data.array, self.maskingviewdict, self.data.storagegroup_name_i, self.data.extra_specs_qos, False) mock_update_sg.assert_called_once() @mock.patch.object(masking.PowerMaxMasking, '_move_vol_from_default_sg', return_value=None) @mock.patch.object(masking.PowerMaxMasking, '_get_or_create_storage_group', return_value=None) @mock.patch.object(rest.PowerMaxRest, 'get_element_from_masking_view', return_value=tpd.PowerMaxData.parent_sg_i) @mock.patch.object(rest.PowerMaxRest, 'is_child_sg_in_parent_sg', side_effect=[True, False]) @mock.patch.object(masking.PowerMaxMasking, '_check_add_child_sg_to_parent_sg', return_value=None) def test_check_existing_storage_group_success( self, mock_add_sg, mock_is_child, mock_get_mv_element, mock_create_sg, mock_move): masking_view_dict = deepcopy(self.data.masking_view_dict) masking_view_dict['extra_specs'] = self.data.extra_specs with mock.patch.object( self.driver.rest, 'get_storage_group', side_effect=[tpd.PowerMaxData.parent_sg_i, tpd.PowerMaxData.storagegroup_name_i]): _, msg = (self.driver.masking._check_existing_storage_group( self.data.array, self.maskingviewdict['maskingview_name'], self.data.defaultstoragegroup_name, masking_view_dict, self.data.extra_specs)) self.assertIsNone(msg) mock_create_sg.assert_not_called() with mock.patch.object(self.driver.rest, 'get_storage_group', side_effect=[ tpd.PowerMaxData.parent_sg_i, None]): _, msg = (self.driver.masking._check_existing_storage_group( self.data.array, self.maskingviewdict['maskingview_name'], self.data.defaultstoragegroup_name, masking_view_dict, self.data.extra_specs)) self.assertIsNone(msg) mock_create_sg.assert_called_once_with( self.data.array, masking_view_dict, tpd.PowerMaxData.storagegroup_name_f, self.data.extra_specs) @mock.patch.object(masking.PowerMaxMasking, '_move_vol_from_default_sg', side_effect=[None, 'Error Message']) @mock.patch.object(rest.PowerMaxRest, 'is_child_sg_in_parent_sg', side_effect=[True, False, False]) @mock.patch.object(rest.PowerMaxRest, 'get_element_from_masking_view', return_value=tpd.PowerMaxData.parent_sg_i) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', side_effect=[ None, tpd.PowerMaxData.parent_sg_i, None, tpd.PowerMaxData.parent_sg_i, None, tpd.PowerMaxData.parent_sg_i, None]) def test_check_existing_storage_group_failed( self, mock_get_sg, mock_get_mv_element, mock_child, mock_move): masking_view_dict = deepcopy(self.data.masking_view_dict) masking_view_dict['extra_specs'] = self.data.extra_specs for x in range(0, 4): _, msg = (self.driver.masking._check_existing_storage_group( self.data.array, self.maskingviewdict['maskingview_name'], self.data.defaultstoragegroup_name, masking_view_dict, self.data.extra_specs)) self.assertIsNotNone(msg) self.assertEqual(7, mock_get_sg.call_count) self.assertEqual(1, mock_move.call_count) @mock.patch.object( masking.PowerMaxMasking, '_check_director_and_port_status') @mock.patch.object( rest.PowerMaxRest, 'get_portgroup', side_effect=([tpd.PowerMaxData.port_group_name_i, None])) def test_check_port_group( self, mock_get_pg, mock_check): for x in range(0, 2): _, msg = self.driver.masking._check_port_group( self.data.array, self.maskingviewdict['maskingview_name']) self.assertIsNotNone(msg) self.assertEqual(2, mock_get_pg.call_count) @mock.patch.object( rest.PowerMaxRest, 'get_initiator_group', side_effect=[None, tpd.PowerMaxData.initiator_group_iscsi]) @mock.patch.object( masking.PowerMaxMasking, '_find_initiator_group', side_effect=[tpd.PowerMaxData.initiatorgroup_name_i, None, None]) @mock.patch.object( masking.PowerMaxMasking, '_create_initiator_group', side_effect=([tpd.PowerMaxData.initiatorgroup_name_i, None])) def test_get_or_create_initiator_group( self, mock_create_ig, mock_find_ig, mock_get_ig): self.driver.masking._get_or_create_initiator_group( self.data.array, self.data.initiatorgroup_name_i, self.data.connector, self.extra_specs) mock_create_ig.assert_not_called() found_init_group_name, msg = ( self.driver.masking._get_or_create_initiator_group( self.data.array, self.data.initiatorgroup_name_i, self.data.connector, self.extra_specs)) self.assertIsNone(msg) found_init_group_name, msg = ( self.driver.masking._get_or_create_initiator_group( self.data.array, self.data.initiatorgroup_name_i, self.data.connector, self.extra_specs)) self.assertIsNone(msg) @mock.patch.object( rest.PowerMaxRest, 'get_initiator_group', return_value=tpd.PowerMaxData.initiator_group_iscsi) @mock.patch.object( masking.PowerMaxMasking, '_find_initiator_group', return_value=None) @mock.patch.object( masking.PowerMaxMasking, '_create_initiator_group') def test_get_or_create_initiator_group_not_logged_in( self, mock_create_ig, mock_find_ig, mock_get_ig): found_init_group, msg = ( self.driver.masking._get_or_create_initiator_group( self.data.array, self.data.initiatorgroup_name_i, self.data.connector, self.extra_specs)) mock_create_ig.assert_not_called() self.assertIsNone(msg) @mock.patch.object( rest.PowerMaxRest, 'get_initiator_group', side_effect=[tpd.PowerMaxData.initiator_group_fc, tpd.PowerMaxData.initiator_group_empty]) @mock.patch.object( masking.PowerMaxMasking, '_find_initiator_group', return_value=None) @mock.patch.object( masking.PowerMaxMasking, '_create_initiator_group') def test_get_or_create_initiator_group_not_logged_in_errors( self, mock_create_ig, mock_find_ig, mock_get_ig): found_init_group, msg = ( self.driver.masking._get_or_create_initiator_group( self.data.array, self.data.initiatorgroup_name_i, self.data.connector, self.extra_specs)) expected_msg = ( "Found initiator group OS-HostX-I-IG, but could not find " "initiator_names ['iqn.1993-08.org.debian:01:222'] in " "the login table. The contained initiators ['123456789012345'] " "do match up with those in the connector object. Delete initiator " "group OS-HostX-I-IG and retry.") self.assertEqual(expected_msg, msg) mock_create_ig.assert_not_called() found_init_group, msg = ( self.driver.masking._get_or_create_initiator_group( self.data.array, self.data.initiatorgroup_name_i, self.data.connector, self.extra_specs)) expected_msg = ( "Found initiator group OS-HostX-I-IG, but could not find " "initiator_names ['iqn.1993-08.org.debian:01:222'] in the login " "table. There are no initiators in OS-HostX-I-IG. Delete " "initiator group OS-HostX-I-IG and retry.") self.assertEqual(expected_msg, msg) mock_create_ig.assert_not_called() def test_check_existing_initiator_group(self): with mock.patch.object( rest.PowerMaxRest, 'get_element_from_masking_view', return_value=tpd.PowerMaxData.initiatorgroup_name_f): ig_from_mv, msg = ( self.driver.masking._check_existing_initiator_group( self.data.array, self.maskingviewdict['maskingview_name'], self.maskingviewdict, self.data.storagegroup_name_i, self.data.port_group_name_i, self.extra_specs)) self.assertEqual(self.data.initiatorgroup_name_f, ig_from_mv) def test_check_adding_volume_to_storage_group(self): with mock.patch.object( masking.PowerMaxMasking, '_create_initiator_group'): with mock.patch.object( rest.PowerMaxRest, 'is_volume_in_storagegroup', side_effect=[True, False]): msg = ( self.driver.masking._check_adding_volume_to_storage_group( self.data.array, self.device_id, self.data.storagegroup_name_i, self.maskingviewdict[utils.VOL_NAME], self.maskingviewdict[utils.EXTRA_SPECS])) self.assertIsNone(msg) msg = ( self.driver.masking._check_adding_volume_to_storage_group( self.data.array, self.device_id, self.data.storagegroup_name_i, self.maskingviewdict[utils.VOL_NAME], self.maskingviewdict[utils.EXTRA_SPECS])) @mock.patch.object(rest.PowerMaxRest, 'add_vol_to_sg') def test_add_volume_to_storage_group(self, mock_add_volume): self.driver.masking.add_volume_to_storage_group( self.data.array, self.device_id, self.data.storagegroup_name_i, self.volume_name, self.extra_specs) mock_add_volume.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'remove_vol_from_sg') def test_remove_vol_from_storage_group(self, mock_remove_volume): with mock.patch.object( rest.PowerMaxRest, 'is_volume_in_storagegroup', side_effect=[False, True]): self.driver.masking.remove_vol_from_storage_group( self.data.array, self.device_id, self.data.storagegroup_name_i, self.volume_name, self.extra_specs) mock_remove_volume.assert_called_once() self.assertRaises( exception.VolumeBackendAPIException, self.driver.masking.remove_vol_from_storage_group, self.data.array, self.device_id, self.data.storagegroup_name_i, self.volume_name, self.extra_specs) def test_find_initiator_names(self): foundinitiatornames = self.driver.masking.find_initiator_names( self.data.connector) self.assertEqual(self.data.connector['initiator'], foundinitiatornames[0]) foundinitiatornames = self.driver_fc.masking.find_initiator_names( self.data.connector) self.assertEqual(self.data.connector['wwpns'][0], foundinitiatornames[0]) connector = {'ip': self.data.ip, 'initiator': None, 'host': 'HostX'} self.assertRaises( exception.VolumeBackendAPIException, self.driver.masking.find_initiator_names, connector) self.assertRaises( exception.VolumeBackendAPIException, self.driver_fc.masking.find_initiator_names, connector) def test_find_nvme_tcp_initiator_names(self): foundinitiatornames = (self.driver_nvme_tcp.masking. find_initiator_names(self.data.connector)) self.assertEqual('nqn.2014-08.org.nvmexpress:uuid:' 'ac353d72-eabe-43c7-926c-f08987a8a553:' '0eaf7037479c432ba3d862e2889d768e', foundinitiatornames[0]) def test_find_nvme_tcp_initiator_names_negative(self): self.assertRaises( exception.VolumeBackendAPIException, self.driver_nvme_tcp.masking.find_initiator_names, self.data.connector_without_host_id ) def test_find_initiator_group_found(self): with mock.patch.object( rest.PowerMaxRest, 'get_initiator_list', return_value=self.data.initiator_list[2]['initiatorId']): with mock.patch.object( rest.PowerMaxRest, 'get_initiator_group_from_initiator', return_value=self.data.initiator_list): found_init_group_nam = ( self.driver.masking._find_initiator_group( self.data.array, ['FA-1D:4:123456789012345'])) self.assertEqual(self.data.initiator_list, found_init_group_nam) def test_find_initiator_group_not_found(self): with mock.patch.object( rest.PowerMaxRest, 'get_initiator_list', return_value=self.data.initiator_list[2]['initiatorId']): with mock.patch.object( rest.PowerMaxRest, 'get_initiator_group_from_initiator', return_value=None): found_init_group_nam = ( self.driver.masking._find_initiator_group( self.data.array, ['Error'])) self.assertIsNone(found_init_group_nam) def test_create_masking_view(self): with mock.patch.object(rest.PowerMaxRest, 'create_masking_view', side_effect=[None, Exception]): error_message = self.driver.masking.create_masking_view( self.data.array, self.maskingviewdict['maskingview_name'], self.data.storagegroup_name_i, self.data.port_group_name_i, self.data.initiatorgroup_name_i, self.extra_specs) self.assertIsNone(error_message) error_message = self.driver.masking.create_masking_view( self.data.array, self.maskingviewdict['maskingview_name'], self.data.storagegroup_name_i, self.data.port_group_name_i, self.data.initiatorgroup_name_i, self.extra_specs) self.assertIsNotNone(error_message) @mock.patch.object(masking.PowerMaxMasking, '_return_volume_to_fast_managed_group') @mock.patch.object(masking.PowerMaxMasking, '_check_ig_rollback') def test_check_if_rollback_action_for_masking_required( self, mock_check_ig, mock_return): with mock.patch.object(rest.PowerMaxRest, 'get_storage_groups_from_volume', side_effect=[ exception.VolumeBackendAPIException, self.data.storagegroup_list, self.data.storagegroup_list, None, None, ]): self.assertRaises( exception.VolumeBackendAPIException, self.mask.check_if_rollback_action_for_masking_required, self.data.array, self.data.test_volume, self.device_id, self.maskingviewdict) with mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members'): self.maskingviewdict[ 'default_sg_name'] = self.data.defaultstoragegroup_name self.mask.check_if_rollback_action_for_masking_required( self.data.array, self.data.test_volume, self.device_id, self.maskingviewdict) # Multiattach case self.mask.check_if_rollback_action_for_masking_required( self.data.array, self.data.test_volume, self.device_id, self.data.masking_view_dict_multiattach) mock_return.assert_called_once() @mock.patch.object(masking.PowerMaxMasking, '_recreate_masking_view') @mock.patch.object(rest.PowerMaxRest, 'get_initiator_group', return_value=True) def test_verify_initiator_group_from_masking_view( self, mock_get_ig, mock_recreate_mv): mv_dict = deepcopy(self.maskingviewdict) mv_dict['initiator_check'] = True self.mask._verify_initiator_group_from_masking_view( self.data.array, mv_dict['maskingview_name'], mv_dict, self.data.initiatorgroup_name_i, self.data.storagegroup_name_i, self.data.port_group_name_i, self.extra_specs) mock_recreate_mv.assert_called() @mock.patch.object(masking.PowerMaxMasking, '_recreate_masking_view') @mock.patch.object(rest.PowerMaxRest, 'get_initiator_group', return_value=True) @mock.patch.object( masking.PowerMaxMasking, '_find_initiator_group', return_value=tpd.PowerMaxData.initiatorgroup_name_i) def test_verify_initiator_group_from_masking_view_no_recreate( self, mock_find_ig, mock_get_ig, mock_recreate): mv_dict = deepcopy(self.maskingviewdict) mv_dict['initiator_check'] = False self.assertRaises( exception.VolumeBackendAPIException, self.mask._verify_initiator_group_from_masking_view, self.data.array, mv_dict['maskingview_name'], mv_dict, 'OS-Wrong-Host-I-IG', self.data.storagegroup_name_i, self.data.port_group_name_i, self.extra_specs) mock_recreate.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'delete_initiator_group') @mock.patch.object(rest.PowerMaxRest, 'get_initiator_group', return_value=True) def test_recreate_masking_view( self, mock_get_ig, mock_delete_ig): ig_from_conn = self.data.initiatorgroup_name_i ig_from_mv = self.data.initiatorgroup_name_i ig_openstack = self.data.initiatorgroup_name_i self.mask._recreate_masking_view( self.data.array, ig_from_conn, ig_from_mv, ig_openstack, self.data.masking_view_name_i, [self.data.initiator], self.data.storagegroup_name_i, self.data.port_group_name_i, self.extra_specs) mock_delete_ig.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'delete_initiator_group') @mock.patch.object(rest.PowerMaxRest, 'get_initiator_group', return_value=True) def test_recreate_masking_view_no_ig_from_connector( self, mock_get_ig, mock_delete_ig): ig_from_mv = self.data.initiatorgroup_name_i ig_openstack = self.data.initiatorgroup_name_i self.mask._recreate_masking_view( self.data.array, None, ig_from_mv, ig_openstack, self.data.masking_view_name_i, [self.data.initiator], self.data.storagegroup_name_i, self.data.port_group_name_i, self.extra_specs) mock_delete_ig.assert_called() @mock.patch.object(rest.PowerMaxRest, 'create_masking_view') @mock.patch.object(rest.PowerMaxRest, 'get_initiator_group', return_value=True) def test_recreate_masking_view_wrong_host( self, mock_get_ig, mock_create_mv): ig_from_conn = 'OS-Wrong-Host-I-IG' ig_from_mv = self.data.initiatorgroup_name_i ig_openstack = self.data.initiatorgroup_name_i self.mask._recreate_masking_view( self.data.array, ig_from_conn, ig_from_mv, ig_openstack, self.data.masking_view_name_i, [self.data.initiator], self.data.storagegroup_name_i, self.data.port_group_name_i, self.extra_specs) mock_create_mv.assert_called() @mock.patch.object(rest.PowerMaxRest, 'delete_masking_view') @mock.patch.object(rest.PowerMaxRest, 'delete_initiator_group') @mock.patch.object(rest.PowerMaxRest, 'get_initiator_group', return_value=True) @mock.patch.object( masking.PowerMaxMasking, '_find_initiator_group', return_value=tpd.PowerMaxData.initiatorgroup_name_i) def test_recreate_masking_view_delete_mv( self, mock_find_ig, mock_get_ig, mock_delete_ig, mock_delete_mv): mock_delete_mv.side_effect = [None, Exception] mv_dict = deepcopy(self.maskingviewdict) mv_dict['initiator_check'] = True verify_flag = self.mask._verify_initiator_group_from_masking_view( self.data.array, mv_dict['maskingview_name'], mv_dict, 'OS-Wrong-Host-I-IG', self.data.storagegroup_name_i, self.data.port_group_name_i, self.extra_specs) mock_delete_mv.assert_called() self.assertTrue(verify_flag) @mock.patch.object(rest.PowerMaxRest, 'create_initiator_group') def test_create_initiator_group(self, mock_create_ig): initiator_names = self.mask.find_initiator_names(self.data.connector) ret_init_group_name = self.mask._create_initiator_group( self.data.array, self.data.initiatorgroup_name_i, initiator_names, self.extra_specs) self.assertEqual(self.data.initiatorgroup_name_i, ret_init_group_name) @mock.patch.object(rest.PowerMaxRest, 'create_initiator_group', side_effect=([exception.VolumeBackendAPIException( masking.CREATE_IG_ERROR)])) def test_create_initiator_group_exception(self, mock_create_ig): initiator_names = self.mask.find_initiator_names(self.data.connector) self.assertRaises( exception.VolumeBackendAPIException, self.mask._create_initiator_group, self.data.array, self.data.initiatorgroup_name_i, initiator_names, self.extra_specs) @mock.patch.object(masking.PowerMaxMasking, '_last_volume_delete_initiator_group') def test_check_ig_rollback(self, mock_last_volume): with mock.patch.object( masking.PowerMaxMasking, '_find_initiator_group', side_effect=[None, 'FAKE-I-IG', self.data.initiatorgroup_name_i]): for x in range(0, 2): self.mask._check_ig_rollback(self.data.array, self.data.initiatorgroup_name_i, self.data.connector) mock_last_volume.assert_not_called() self.mask._check_ig_rollback( self.data.array, self.data.initiatorgroup_name_i, self.data.connector) mock_last_volume.assert_called() @mock.patch.object(masking.PowerMaxMasking, '_cleanup_deletion') def test_remove_and_reset_members(self, mock_cleanup): self.mask.remove_and_reset_members( self.data.array, self.device_id, self.data.test_volume, self.volume_name, self.extra_specs, reset=False) mock_cleanup.assert_called_once() @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', side_effect=[[tpd.PowerMaxData.storagegroup_name_i], [tpd.PowerMaxData.storagegroup_name_i], [tpd.PowerMaxData.storagegroup_name_i, tpd.PowerMaxData.storagegroup_name_f]]) @mock.patch.object(masking.PowerMaxMasking, 'remove_volume_from_sg') @mock.patch.object(masking.PowerMaxMasking, 'add_volume_to_default_storage_group') def test_cleanup_deletion(self, mock_add, mock_remove_vol, mock_get_sg): self.mask._cleanup_deletion( self.data.array, self.data.test_volume, self.device_id, self.volume_name, self.extra_specs, None, True, None) mock_add.assert_not_called() self.mask._cleanup_deletion( self.data.array, self.data.test_volume, self.device_id, self.volume_name, self.extra_specs, self.data.connector, True, None) mock_add.assert_not_called() self.mask._cleanup_deletion( self.data.array, self.data.test_volume, self.device_id, self.volume_name, self.extra_specs, None, True, None) mock_add.assert_called_once_with( self.data.array, self.device_id, self.volume_name, self.extra_specs, volume=self.data.test_volume) @mock.patch.object(masking.PowerMaxMasking, '_last_vol_in_sg') @mock.patch.object(masking.PowerMaxMasking, '_multiple_vols_in_sg') def test_remove_volume_from_sg(self, mock_multiple_vols, mock_last_vol): with mock.patch.object( rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=None): with mock.patch.object( rest.PowerMaxRest, 'get_num_vols_in_sg', side_effect=[2, 1]): self.mask.remove_volume_from_sg( self.data.array, self.device_id, self.volume_name, self.data.defaultstoragegroup_name, self.extra_specs) mock_last_vol.assert_not_called() self.mask.remove_volume_from_sg( self.data.array, self.device_id, self.volume_name, self.data.defaultstoragegroup_name, self.extra_specs) mock_last_vol.assert_called() @mock.patch.object(masking.PowerMaxMasking, '_last_vol_in_sg') @mock.patch.object(masking.PowerMaxMasking, '_multiple_vols_in_sg') def test_remove_volume_from_sg_2(self, mock_multiple_vols, mock_last_vol): with mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=True): with mock.patch.object( rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=[self.data.masking_view_name_i]): with mock.patch.object( rest.PowerMaxRest, 'get_num_vols_in_sg', side_effect=[2, 1]): self.mask.remove_volume_from_sg( self.data.array, self.device_id, self.volume_name, self.data.storagegroup_name_i, self.extra_specs) mock_last_vol.assert_not_called() self.mask.remove_volume_from_sg( self.data.array, self.device_id, self.volume_name, self.data.storagegroup_name_i, self.extra_specs) mock_last_vol.assert_called() @mock.patch.object(masking.PowerMaxMasking, '_last_vol_masking_views', return_value=True) @mock.patch.object(masking.PowerMaxMasking, '_last_vol_no_masking_views', return_value=True) def test_last_vol_in_sg(self, mock_no_mv, mock_mv): mv_list = [self.data.masking_view_name_i, self.data.masking_view_name_f] with mock.patch.object(rest.PowerMaxRest, 'get_masking_views_from_storage_group', side_effect=[mv_list, []]): for x in range(0, 2): self.mask._last_vol_in_sg( self.data.array, self.device_id, self.volume_name, self.data.storagegroup_name_i, self.extra_specs, self.data.connector) self.assertEqual(1, mock_mv.call_count) self.assertEqual(1, mock_no_mv.call_count) @mock.patch.object(masking.PowerMaxMasking, '_remove_last_vol_and_delete_sg') @mock.patch.object(masking.PowerMaxMasking, '_delete_cascaded_storage_groups') @mock.patch.object(rest.PowerMaxRest, 'get_num_vols_in_sg', side_effect=[1, 3]) @mock.patch.object(masking.PowerMaxMasking, 'get_parent_sg_from_child', side_effect=[None, 'parent_sg_name', 'parent_sg_name']) def test_last_vol_no_masking_views( self, mock_get_parent, mock_num_vols, mock_delete_casc, mock_remove): for x in range(0, 3): self.mask._last_vol_no_masking_views( self.data.array, self.data.storagegroup_name_i, self.device_id, self.volume_name, self.extra_specs, False) self.assertEqual(1, mock_delete_casc.call_count) self.assertEqual(2, mock_remove.call_count) @mock.patch.object(masking.PowerMaxMasking, '_remove_last_vol_and_delete_sg') @mock.patch.object(masking.PowerMaxMasking, '_delete_mv_ig_and_sg') @mock.patch.object(masking.PowerMaxMasking, '_get_num_vols_from_mv', side_effect=[(1, 'parent_name'), (3, 'parent_name')]) def test_last_vol_masking_views( self, mock_num_vols, mock_delete_all, mock_remove): for x in range(0, 2): self.mask._last_vol_masking_views( self.data.array, self.data.storagegroup_name_i, [self.data.masking_view_name_i], self.device_id, self.volume_name, self.extra_specs, self.data.connector, True) self.assertEqual(1, mock_delete_all.call_count) self.assertEqual(1, mock_remove.call_count) @mock.patch.object(masking.PowerMaxMasking, 'add_volume_to_default_storage_group') @mock.patch.object(rest.PowerMaxRest, 'get_num_vols_in_sg') @mock.patch.object(masking.PowerMaxMasking, 'remove_vol_from_storage_group') def test_multiple_vols_in_sg(self, mock_remove_vol, mock_get_volumes, mock_add): self.mask._multiple_vols_in_sg( self.data.array, self.device_id, self.data.storagegroup_name_i, self.volume_name, self.extra_specs, False) mock_remove_vol.assert_called_once() self.mask._multiple_vols_in_sg( self.data.array, self.device_id, self.data.storagegroup_name_i, self.volume_name, self.extra_specs, True) mock_add.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'get_element_from_masking_view') @mock.patch.object(masking.PowerMaxMasking, '_last_volume_delete_masking_view') @mock.patch.object(masking.PowerMaxMasking, '_last_volume_delete_initiator_group') @mock.patch.object(masking.PowerMaxMasking, '_delete_cascaded_storage_groups') def test_delete_mv_ig_and_sg(self, mock_delete_sg, mock_delete_ig, mock_delete_mv, mock_get_element): self.mask._delete_mv_ig_and_sg( self.data.array, self.data.device_id, self.data.masking_view_name_i, self.data.storagegroup_name_i, self.data.parent_sg_i, self.data.connector, True, self.data.extra_specs) mock_delete_sg.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'delete_masking_view') def test_last_volume_delete_masking_view(self, mock_delete_mv): self.mask._last_volume_delete_masking_view( self.data.array, self.data.masking_view_name_i) mock_delete_mv.assert_called_once() @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=list()) @mock.patch.object(masking.PowerMaxMasking, 'return_volume_to_volume_group') @mock.patch.object(rest.PowerMaxRest, 'move_volume_between_storage_groups') @mock.patch.object(masking.PowerMaxMasking, 'get_or_create_default_storage_group') @mock.patch.object(masking.PowerMaxMasking, 'add_volume_to_storage_group') def test_add_volume_to_default_storage_group( self, mock_add_sg, mock_get_sg, mock_move, mock_return, mock_sgs): self.mask.add_volume_to_default_storage_group( self.data.array, self.device_id, self.volume_name, self.extra_specs) mock_add_sg.assert_called_once() self.mask.add_volume_to_default_storage_group( self.data.array, self.device_id, self.volume_name, self.extra_specs, src_sg=self.data.storagegroup_name_i) mock_move.assert_called_once() vol_grp_member = deepcopy(self.data.test_volume) vol_grp_member.group_id = self.data.test_vol_grp_name_id_only self.mask.add_volume_to_default_storage_group( self.data.array, self.device_id, self.volume_name, self.extra_specs, volume=vol_grp_member) mock_return.assert_called_once() def test_add_volume_to_default_storage_group_next_gen(self): extra_specs = deepcopy(self.data.extra_specs) extra_specs.pop(utils.IS_RE, None) with mock.patch.object(rest.PowerMaxRest, 'is_next_gen_array', return_value=True): with mock.patch.object( self.mask, 'get_or_create_default_storage_group') as mock_get: self.mask.add_volume_to_default_storage_group( self.data.array, self.device_id, self.volume_name, extra_specs) mock_get.assert_called_once_with( self.data.array, self.data.srp, extra_specs[utils.SLO], 'NONE', extra_specs, False, False, None) @mock.patch.object(provision.PowerMaxProvision, 'create_storage_group') def test_get_or_create_default_storage_group(self, mock_create_sg): with mock.patch.object( rest.PowerMaxRest, 'get_vmax_default_storage_group', return_value=(None, self.data.storagegroup_name_i)): storage_group_name = self.mask.get_or_create_default_storage_group( self.data.array, self.data.srp, self.data.slo, self.data.workload, self.extra_specs) self.assertEqual(self.data.storagegroup_name_i, storage_group_name) with mock.patch.object( rest.PowerMaxRest, 'get_vmax_default_storage_group', return_value=('test_sg', self.data.storagegroup_name_i)): with mock.patch.object( rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=self.data.masking_view_name_i): self.assertRaises( exception.VolumeBackendAPIException, self.mask.get_or_create_default_storage_group, self.data.array, self.data.srp, self.data.slo, self.data.workload, self.extra_specs) @mock.patch.object(masking.PowerMaxMasking, 'add_volume_to_default_storage_group') @mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg') @mock.patch.object(rest.PowerMaxRest, 'delete_storage_group') @mock.patch.object(masking.PowerMaxMasking, 'remove_vol_from_storage_group') def test_remove_last_vol_and_delete_sg(self, mock_vol_sg, mock_delete_sg, mock_rm, mock_add): self.mask._remove_last_vol_and_delete_sg( self.data.array, self.device_id, self.volume_name, self.data.storagegroup_name_i, self.extra_specs) self.mask._remove_last_vol_and_delete_sg( self.data.array, self.device_id, self.volume_name, self.data.storagegroup_name_i, self.extra_specs, self.data.parent_sg_i, True) self.assertEqual(2, mock_delete_sg.call_count) self.assertEqual(1, mock_vol_sg.call_count) self.assertEqual(1, mock_rm.call_count) self.assertEqual(1, mock_add.call_count) @mock.patch.object(rest.PowerMaxRest, 'delete_initiator_group') def test_last_volume_delete_initiator_group(self, mock_delete_ig): self.mask._last_volume_delete_initiator_group( self.data.array, self.data.initiatorgroup_name_f, 'Wrong_Host') mock_delete_ig.assert_not_called() self.mask._last_volume_delete_initiator_group( self.data.array, self.data.initiatorgroup_name_f, None) mock_delete_ig.assert_not_called() mv_list = [self.data.masking_view_name_i, self.data.masking_view_name_f] with mock.patch.object( rest.PowerMaxRest, 'get_masking_views_by_initiator_group', side_effect=[mv_list, []]): self.mask._last_volume_delete_initiator_group( self.data.array, self.data.initiatorgroup_name_i, self.data.connector['host']) mock_delete_ig.assert_not_called() self.mask._last_volume_delete_initiator_group( self.data.array, self.data.initiatorgroup_name_i, self.data.connector['host']) mock_delete_ig.assert_called_once() def test_populate_masking_dict_init_check_false(self): extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f connector = self.data.connector with mock.patch.object(self.driver, '_get_initiator_check_flag', return_value=False): masking_view_dict = self.driver._populate_masking_dict( self.data.test_volume, connector, extra_specs) self.assertFalse(masking_view_dict['initiator_check']) def test_populate_masking_dict_init_check_true(self): extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f connector = self.data.connector with mock.patch.object(self.driver, '_get_initiator_check_flag', return_value=True): masking_view_dict = self.driver._populate_masking_dict( self.data.test_volume, connector, extra_specs) self.assertTrue(masking_view_dict['initiator_check']) def test_check_existing_initiator_group_verify_true(self): mv_dict = deepcopy(self.data.masking_view_dict) mv_dict['initiator_check'] = True with mock.patch.object( rest.PowerMaxRest, 'get_element_from_masking_view', return_value=tpd.PowerMaxData.initiatorgroup_name_f): with mock.patch.object( self.mask, '_verify_initiator_group_from_masking_view', return_value=( True, self.data.initiatorgroup_name_f)) as mock_verify: self.mask._check_existing_initiator_group( self.data.array, self.data.masking_view_name_f, mv_dict, self.data.storagegroup_name_f, self.data.port_group_name_f, self.data.extra_specs) mock_verify.assert_called_once_with( self.data.array, self.data.masking_view_name_f, mv_dict, self.data.initiatorgroup_name_f, self.data.storagegroup_name_f, self.data.port_group_name_f, self.data.extra_specs) @mock.patch.object( masking.PowerMaxMasking, 'add_child_sg_to_parent_sg', side_effect=[None, exception.VolumeBackendAPIException]) @mock.patch.object(rest.PowerMaxRest, 'is_child_sg_in_parent_sg', side_effect=[True, False, False]) def test_check_add_child_sg_to_parent_sg(self, mock_is_child, mock_add): for x in range(0, 3): message = self.mask._check_add_child_sg_to_parent_sg( self.data.array, self.data.storagegroup_name_i, self.data.parent_sg_i, self.data.extra_specs) self.assertIsNotNone(message) @mock.patch.object(rest.PowerMaxRest, 'add_child_sg_to_parent_sg') @mock.patch.object(rest.PowerMaxRest, 'is_child_sg_in_parent_sg', side_effect=[True, False]) def test_add_child_sg_to_parent_sg(self, mock_is_child, mock_add): for x in range(0, 2): self.mask.add_child_sg_to_parent_sg( self.data.array, self.data.storagegroup_name_i, self.data.parent_sg_i, self.data.extra_specs) self.assertEqual(1, mock_add.call_count) def test_get_parent_sg_from_child(self): with mock.patch.object(self.driver.rest, 'get_storage_group', side_effect=[None, self.data.sg_details[1]]): sg_name = self.mask.get_parent_sg_from_child( self.data.array, self.data.storagegroup_name_i) self.assertIsNone(sg_name) sg_name2 = self.mask.get_parent_sg_from_child( self.data.array, self.data.storagegroup_name_f) self.assertEqual(self.data.parent_sg_f, sg_name2) @mock.patch.object(rest.PowerMaxRest, 'get_element_from_masking_view', return_value='parent_sg') @mock.patch.object(rest.PowerMaxRest, 'get_num_vols_in_sg', return_value=2) def test_get_num_vols_from_mv(self, mock_num, mock_element): num_vols, sg = self.mask._get_num_vols_from_mv( self.data.array, self.data.masking_view_name_f) self.assertEqual(2, num_vols) @mock.patch.object(masking.PowerMaxMasking, 'add_volume_to_default_storage_group') @mock.patch.object(rest.PowerMaxRest, 'delete_storage_group') def test_delete_cascaded(self, mock_delete, mock_add): self.mask._delete_cascaded_storage_groups( self.data.array, self.data.masking_view_name_f, self.data.parent_sg_f, self.data.extra_specs, self.data.device_id, False) self.assertEqual(2, mock_delete.call_count) mock_add.assert_not_called() # Delete legacy masking view, parent sg = child sg mock_delete.reset_mock() self.mask._delete_cascaded_storage_groups( self.data.array, self.data.masking_view_name_f, self.data.masking_view_name_f, self.data.extra_specs, self.data.device_id, True) self.assertEqual(1, mock_delete.call_count) mock_add.assert_called_once() @mock.patch.object(masking.PowerMaxMasking, 'add_volumes_to_storage_group') def test_add_remote_vols_to_volume_group(self, mock_add): self.mask.add_remote_vols_to_volume_group( [self.data.test_volume], self.data.test_rep_group, self.data.rep_extra_specs) mock_add.assert_called_once() @mock.patch.object(masking.PowerMaxMasking, 'add_remote_vols_to_volume_group') @mock.patch.object(masking.PowerMaxMasking, '_check_adding_volume_to_storage_group') @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @mock.patch.object(volume_utils, 'is_group_a_type', side_effect=[False, False, True, True]) def test_return_volume_to_volume_group(self, mock_type, mock_cg, mock_check, mock_add): vol_grp_member = deepcopy(self.data.test_volume) vol_grp_member.group_id = self.data.test_vol_grp_name_id_only vol_grp_member.group = self.data.test_group for x in range(0, 2): self.mask.return_volume_to_volume_group( self.data.array, vol_grp_member, self.data.device_id, self.data.test_volume.name, self.data.extra_specs) mock_add.assert_called_once() @mock.patch.object(masking.PowerMaxMasking, '_return_volume_to_fast_managed_group') def test_pre_multiattach(self, mock_return): mv_dict = self.mask.pre_multiattach( self.data.array, self.data.device_id, self.data.masking_view_dict_multiattach, self.data.extra_specs) mock_return.assert_not_called() self.assertEqual(self.data.storagegroup_name_f, mv_dict[utils.FAST_SG]) with mock.patch.object( self.mask, 'move_volume_between_storage_groups', side_effect=exception.CinderException): self.assertRaises( exception.VolumeBackendAPIException, self.mask.pre_multiattach, self.data.array, self.data.device_id, self.data.masking_view_dict_multiattach, self.data.extra_specs) mock_return.assert_called_once() def test_pre_multiattach_next_gen(self): with mock.patch.object(utils.PowerMaxUtils, 'truncate_string', return_value='DiamondDSS'): self.mask.pre_multiattach( self.data.array, self.data.device_id, self.data.masking_view_dict_multiattach, self.data.extra_specs) utils.PowerMaxUtils.truncate_string.assert_called_once_with( 'DiamondDSS', 10) @mock.patch.object(masking.PowerMaxMasking, '_clean_up_child_storage_group') @mock.patch.object(masking.PowerMaxMasking, 'move_volume_between_storage_groups') @mock.patch.object(masking.PowerMaxMasking, '_return_volume_to_fast_managed_group') def test_pre_multiattach_pool_none_workload(self, mock_return, mck_move, mck_clean): with mock.patch.object(utils.PowerMaxUtils, 'truncate_string', return_value='OptimdNONE'): self.mask.pre_multiattach( self.data.array, self.data.device_id, self.data.masking_view_dict_multiattach, self.data.extra_specs_optimized) utils.PowerMaxUtils.truncate_string.assert_called_once_with( 'OptimizedNONE', 10) @mock.patch.object(masking.PowerMaxMasking, '_clean_up_child_storage_group') @mock.patch.object(masking.PowerMaxMasking, 'move_volume_between_storage_groups') @mock.patch.object(masking.PowerMaxMasking, '_return_volume_to_fast_managed_group') def test_pre_multiattach_pool_no_pool_name(self, mock_return, mck_move, mck_clean): with mock.patch.object(utils.PowerMaxUtils, 'truncate_string', return_value='DiamondDSS'): self.mask.pre_multiattach( self.data.array, self.data.device_id, self.data.masking_view_dict_multiattach, self.data.extra_specs_no_pool_name) utils.PowerMaxUtils.truncate_string.assert_called_once_with( 'DiamondDSS', 10) @mock.patch.object( rest.PowerMaxRest, 'get_storage_group_list', side_effect=[ {'storageGroupId': [tpd.PowerMaxData.no_slo_sg_name]}, {}]) @mock.patch.object(masking.PowerMaxMasking, '_return_volume_to_fast_managed_group') def test_check_return_volume_to_fast_managed_group( self, mock_return, mock_sg): for x in range(0, 2): self.mask.return_volume_to_fast_managed_group( self.data.array, self.data.device_id, self.data.extra_specs) no_slo_specs = deepcopy(self.data.extra_specs) no_slo_specs[utils.SLO] = None self.mask.return_volume_to_fast_managed_group( self.data.array, self.data.device_id, no_slo_specs) mock_return.assert_called_once() @mock.patch.object(masking.PowerMaxMasking, '_move_vol_from_default_sg') @mock.patch.object(masking.PowerMaxMasking, '_clean_up_child_storage_group') @mock.patch.object(masking.PowerMaxMasking, 'add_child_sg_to_parent_sg') @mock.patch.object(masking.PowerMaxMasking, '_get_or_create_storage_group') @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', side_effect=[[tpd.PowerMaxData.no_slo_sg_name], [tpd.PowerMaxData.storagegroup_name_f]]) def test_return_volume_to_fast_managed_group( self, mock_sg, mock_get, mock_add, mock_clean, mock_move): for x in range(0, 2): self.mask._return_volume_to_fast_managed_group( self.data.array, self.data.device_id, self.data.parent_sg_f, self.data.storagegroup_name_f, self.data.no_slo_sg_name, self.data.extra_specs) mock_get.assert_called_once() mock_clean.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'delete_storage_group') @mock.patch.object(rest.PowerMaxRest, 'remove_child_sg_from_parent_sg') @mock.patch.object(rest.PowerMaxRest, 'is_child_sg_in_parent_sg', side_effect=[False, True]) @mock.patch.object(rest.PowerMaxRest, 'get_num_vols_in_sg', side_effect=[2, 0, 0]) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', side_effect=[ None, 'child_sg', 'child_sg', 'child_sg']) def test_clean_up_child_storage_group( self, mock_sg, mock_num, mock_child, mock_rm, mock_del): # Storage group not found self.mask._clean_up_child_storage_group( self.data.array, self.data.storagegroup_name_f, self.data.parent_sg_f, self.data.extra_specs) mock_num.assert_not_called() # Storage group not empty self.mask._clean_up_child_storage_group( self.data.array, self.data.storagegroup_name_f, self.data.parent_sg_f, self.data.extra_specs) mock_child.assert_not_called() # Storage group not child self.mask._clean_up_child_storage_group( self.data.array, self.data.storagegroup_name_f, self.data.parent_sg_f, self.data.extra_specs) mock_rm.assert_not_called() # Storage group is child, and empty self.mask._clean_up_child_storage_group( self.data.array, self.data.storagegroup_name_f, self.data.parent_sg_f, self.data.extra_specs) mock_rm.assert_called_once() self.assertEqual(2, mock_del.call_count) @mock.patch.object(utils.PowerMaxUtils, 'verify_tag_list') def test_add_tags_to_storage_group_disabled(self, mock_verify): self.mask._add_tags_to_storage_group( self.data.array, self.data.add_volume_sg_info_dict, self.data.extra_specs) mock_verify.assert_not_called() @mock.patch.object(utils.PowerMaxUtils, 'verify_tag_list') def test_add_tags_to_storage_group_enabled(self, mock_verify): self.mask._add_tags_to_storage_group( self.data.array, self.data.add_volume_sg_info_dict, self.data.extra_specs_tags) mock_verify.assert_called() @mock.patch.object(utils.PowerMaxUtils, 'get_new_tags') def test_add_tags_to_storage_group_existing_tags(self, mock_inter): self.mask._add_tags_to_storage_group( self.data.array, self.data.storage_group_with_tags, self.data.extra_specs_tags) mock_inter.assert_called() @mock.patch.object(rest.PowerMaxRest, 'add_storage_group_tag', side_effect=[exception.VolumeBackendAPIException]) def test_add_tags_to_storage_group_exception(self, mock_except): self.mask._add_tags_to_storage_group( self.data.array, self.data.add_volume_sg_info_dict, self.data.extra_specs_tags) mock_except.assert_called() @mock.patch.object(rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=[tpd.PowerMaxData.masking_view_name_f]) def test_get_host_and_port_group_labels(self, mock_mv): host_label, port_group_label = ( self.mask._get_host_and_port_group_labels( self.data.array, self.data.parent_sg_f)) self.assertEqual('HostX', host_label) self.assertEqual('OS-fibre-PG', port_group_label) @mock.patch.object(rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=['OS-HostX699ea-I-p-name3b02c-MV']) def test_get_host_and_port_group_labels_complex(self, mock_mv): host_label, port_group_label = ( self.mask._get_host_and_port_group_labels( self.data.array, self.data.parent_sg_f)) self.assertEqual('HostX699ea', host_label) self.assertEqual('p-name3b02c', port_group_label) @mock.patch.object(rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=['OS-myhost-I-myportgroup-MV']) def test_get_host_and_port_group_labels_plain(self, mock_mv): host_label, port_group_label = ( self.mask._get_host_and_port_group_labels( self.data.array, self.data.parent_sg_f)) self.assertEqual('myhost', host_label) self.assertEqual('myportgroup', port_group_label) @mock.patch.object(rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=[ 'OS-host-with-dash-I-portgroup-with-dashes-MV']) def test_get_host_and_port_group_labels_dashes(self, mock_mv): host_label, port_group_label = ( self.mask._get_host_and_port_group_labels( self.data.array, self.data.parent_sg_f)) self.assertEqual('host-with-dash', host_label) self.assertEqual('portgroup-with-dashes', port_group_label) @mock.patch.object( rest.PowerMaxRest, 'is_child_sg_in_parent_sg', return_value=False) @mock.patch.object( rest.PowerMaxRest, 'add_child_sg_to_parent_sg') @mock.patch.object( rest.PowerMaxRest, 'get_storage_group', side_effect=[None, tpd.PowerMaxData.sg_details[1], tpd.PowerMaxData.sg_details[2]]) @mock.patch.object( provision.PowerMaxProvision, 'create_storage_group') def test_check_child_storage_group_exists_false( self, mock_create, mock_get, mock_add, mock_check): self.mask._check_child_storage_group_exists( self.data.device_id, self.data.array, self.data.storagegroup_name_i, self.data.extra_specs, self.data.parent_sg_i) mock_create.assert_called_once() mock_add.assert_called_once() @mock.patch.object( rest.PowerMaxRest, 'is_child_sg_in_parent_sg', return_value=True) @mock.patch.object( rest.PowerMaxRest, 'add_child_sg_to_parent_sg') @mock.patch.object( rest.PowerMaxRest, 'get_storage_group', side_effect=[tpd.PowerMaxData.sg_details[1], tpd.PowerMaxData.sg_details[3]]) @mock.patch.object( provision.PowerMaxProvision, 'create_storage_group') def test_check_child_storage_group_exists_true( self, mock_create, mock_get, mock_add, mock_check): self.mask._check_child_storage_group_exists( self.data.device_id, self.data.array, self.data.storagegroup_name_i, self.data.extra_specs, self.data.parent_sg_i) mock_create.assert_not_called mock_add.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'get_port', return_value=tpd.PowerMaxData.port_info) @mock.patch.object(rest.PowerMaxRest, 'get_port_ids', return_value=['FA-1D:4']) def test_check_director_and_port_status(self, mock_port_ids, mock_port): self.mask._check_director_and_port_status( self.data.array, self.data.port_group_name_f) @mock.patch.object(rest.PowerMaxRest, 'get_port', return_value=tpd.PowerMaxData.port_info_off) @mock.patch.object(rest.PowerMaxRest, 'get_port_ids', return_value=['FA-1D:4']) def test_check_director_and_port_status_invalid_status( self, mock_port_ids, mock_port): exception_message = ( r"The director status is Offline and the port status is OFF for " r"dir:port FA-1D:4.") with self.assertRaisesRegex( exception.VolumeBackendAPIException, exception_message): self.mask._check_director_and_port_status( self.data.array, self.data.port_group_name_f) @mock.patch.object(rest.PowerMaxRest, 'get_port', return_value=tpd.PowerMaxData.port_info_no_status) @mock.patch.object(rest.PowerMaxRest, 'get_port_ids', return_value=['FA-1D:4']) def test_check_director_and_port_status_no_status( self, mock_port_ids, mock_port): exception_message = ( r"Unable to get the director or port status for dir:port " r"FA-1D:4.") with self.assertRaisesRegex( exception.VolumeBackendAPIException, exception_message): self.mask._check_director_and_port_status( self.data.array, self.data.port_group_name_f) @mock.patch.object(rest.PowerMaxRest, 'get_port', return_value=tpd.PowerMaxData.port_info_no_details) @mock.patch.object(rest.PowerMaxRest, 'get_port_ids', return_value=['FA-1D:4']) def test_check_director_and_port_status_no_details( self, mock_port_ids, mock_port): exception_message = ( r"Unable to get port information for dir:port FA-1D:4.") with self.assertRaisesRegex( exception.VolumeBackendAPIException, exception_message): self.mask._check_director_and_port_status( self.data.array, self.data.port_group_name_f) @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=([tpd.PowerMaxData.storagegroup_name_f])) @mock.patch.object( rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=[tpd.PowerMaxData.masking_view_name_f]) def test_validate_attach(self, mock_sgs, mock_mvs): self.assertTrue(self.mask._validate_attach( self.data.array, self.data.device_id, self.data.storagegroup_name_f, self.data.masking_view_name_f)) @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=([tpd.PowerMaxData.storagegroup_name_f])) @mock.patch.object( rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=[]) def test_validate_attach_no_mvs(self, mock_sgs, mock_mvs): self.assertFalse(self.mask._validate_attach( self.data.array, self.data.device_id, self.data.storagegroup_name_f, self.data.masking_view_name_f)) @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=([tpd.PowerMaxData.defaultstoragegroup_name])) @mock.patch.object( rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=[]) def test_validate_attach_incorrect_sg(self, mock_sgs, mock_mvs): self.assertFalse(self.mask._validate_attach( self.data.array, self.data.device_id, self.data.storagegroup_name_f, self.data.masking_view_name_f)) def test_find_nvme_tcp_initiator_group_found(self): with mock.patch.object( rest.PowerMaxRest, 'get_initiator_list', return_value=self.data.nvme_tcp_initiator_list): with mock.patch.object( rest.PowerMaxRest, 'get_initiator_group_from_initiator', return_value='OS-host001010-NT-IG'): found_init_group_name = ( self.driver_nvme_tcp.masking._find_initiator_group( self.data.array, ['nqn.2014-08.org.' 'nvmexpress:uuid:' 'ac353d72-eabe-43c7' '-926c-f08987a8a553:' '0eaf7037479c432ba3d862e2889d768e'])) self.assertEqual('OS-host001010-NT-IG', found_init_group_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_metadata.py0000664000175000017500000003537000000000000032172 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime import platform import time from unittest import mock from cinder.objects import fields from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder import version as openstack_version from cinder.volume.drivers.dell_emc.powermax import metadata from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume.drivers.dell_emc.powermax import utils mock_time = mock.MagicMock() mock_time.return_value = time.mktime(datetime(1970, 1, 1).timetuple()) class PowerMaxVolumeMetadataNoDebugTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxVolumeMetadataNoDebugTest, self).setUp() is_debug = False self.volume_metadata = metadata.PowerMaxVolumeMetadata( rest.PowerMaxRest, '4.0', is_debug) @mock.patch.object( metadata.PowerMaxVolumeMetadata, '_fill_volume_trace_dict', return_value={}) def test_gather_volume_info(self, mock_fvtd): self.volume_metadata.gather_volume_info( self.data.volume_id, 'create', False, volume_size=1) mock_fvtd.assert_not_called() class PowerMaxVolumeMetadataDebugTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxVolumeMetadataDebugTest, self).setUp() is_debug = True self.volume_metadata = metadata.PowerMaxVolumeMetadata( rest.PowerMaxRest, '4.1', is_debug) self.utils = self.volume_metadata.utils self.rest = self.volume_metadata.rest @mock.patch.object( metadata.PowerMaxVolumeMetadata, '_fill_volume_trace_dict', return_value={}) def test_gather_volume_info(self, mock_fvtd): self.volume_metadata.gather_volume_info( self.data.volume_id, 'create', False, volume_size=1) mock_fvtd.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_attach_info(self, mock_uvim): self.volume_metadata.capture_attach_info( self.data.test_volume, self.data.extra_specs, self.data.masking_view_dict, self.data.fake_host, False, False) mock_uvim.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_attach_info_tags(self, mock_uvim): self.volume_metadata.capture_attach_info( self.data.test_volume, self.data.extra_specs, self.data.masking_view_dict_tags, self.data.fake_host, False, False) mock_uvim.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_create_volume(self, mock_uvim): self.volume_metadata.capture_create_volume( self.data.device_id, self.data.test_volume, 'test_group', 'test_group_id', self.data.extra_specs, {}, 'create', None) mock_uvim.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_delete_info(self, mock_uvim): self.volume_metadata.capture_delete_info(self.data.test_volume) mock_uvim.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_manage_existing(self, mock_uvim): self.volume_metadata.capture_manage_existing( self.data.test_volume, {}, self.data.device_id, self.data.extra_specs) mock_uvim.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) @mock.patch('time.time', mock_time) def test_capture_manage_existing_no_backup_id(self, mock_uvim): manage_existing_metadata = ( {'1e5177e7-95e5-4a0f-b170-e45f4b469f6a': { 'volume_id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a', 'successful_operation': 'manage_existing_volume', 'volume_size': 2, 'device_id': '00001', 'default_sg_name': 'OS-SRP_1-Diamond-NONE-SG', 'serial_number': '000197800123', 'service_level': 'Diamond', 'workload': 'None', 'srp': 'SRP_1', 'identifier_name': 'OS-1e5177e7-95e5-4a0f-b170-e45f4b469f6a', 'rdf_group_no': '70', 'target_name': 'test_vol', 'remote_array': '000197800124', 'target_device_id': '00002', 'rep_mode': 'Metro', 'replication_status': 'Enabled', 'rdf_group_label': '23_24_007', 'volume_updated_time': '1970-01-01 00:00:00'}}) self.volume_metadata.capture_manage_existing( self.data.test_volume, self.data.rep_info_dict, self.data.device_id, self.data.extra_specs_no_workload) mock_uvim.assert_called_with(manage_existing_metadata, {}) @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_failover_volume(self, mock_uvim): self.volume_metadata.capture_failover_volume( self.data.test_volume, self.data.device_id2, self.data.remote_array, self.data.rdf_group_name_1, self.data.device_id, self.data.array, self.data.extra_specs, True, None, fields.ReplicationStatus.FAILED_OVER, utils.REP_SYNC) mock_uvim.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_modify_group(self, mock_uvim): self.volume_metadata.capture_modify_group( 'test_group', 'test_group_id', [self.data.test_volume], [], self.data.array) mock_uvim.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_extend_info(self, mock_uvim): self.volume_metadata.capture_extend_info( self.data.test_volume, 5, self.data.device_id, self.data.extra_specs, self.data.array) mock_uvim.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_detach_info(self, mock_uvim): self.volume_metadata.capture_detach_info( self.data.test_volume, self.data.extra_specs, self.data.device_id, None, None) mock_uvim.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_snapshot_info(self, mock_uvim): self.volume_metadata.capture_snapshot_info( self.data.test_volume, self.data.extra_specs, 'createSnapshot', self.data.snapshot_metadata) mock_uvim.assert_called_once() @mock.patch.object( metadata.PowerMaxVolumeMetadata, 'update_volume_info_metadata', return_value={}) def test_capture_retype_info(self, mock_uvim): self.volume_metadata.capture_retype_info( self.data.test_volume, self.data.device_id, self.data.array, self.data.srp, self.data.slo, self.data.workload, self.data.storagegroup_name_target, False, None, False, None) mock_uvim.assert_called_once() def test_update_volume_info_metadata(self): volume_metadata = self.volume_metadata.update_volume_info_metadata( self.data.data_dict, self.data.version_dict) self.assertEqual('2.7.12', volume_metadata['python_version']) self.assertEqual('VMAX250F', volume_metadata['storage_model']) self.assertEqual('DSS', volume_metadata['workload']) self.assertEqual('OS-fibre-PG', volume_metadata['port_group']) def test_fill_volume_trace_dict(self): datadict = {} volume_trace_dict = {} volume_key_value = {} result_dict = {'successful_operation': 'create', 'volume_id': self.data.test_volume.id} volume_metadata = self.volume_metadata._fill_volume_trace_dict( self.data.test_volume.id, 'create', False, target_name=None, datadict=datadict, volume_key_value=volume_key_value, volume_trace_dict=volume_trace_dict) self.assertEqual(result_dict, volume_metadata) def test_fill_volume_trace_dict_multi_attach(self): mv_list = ['mv1', 'mv2', 'mv3'] sg_list = ['sg1', 'sg2', 'sg3'] datadict = {} volume_trace_dict = {} volume_key_value = {} result_dict = { 'masking_view_1': 'mv1', 'masking_view_2': 'mv2', 'masking_view_3': 'mv3', 'successful_operation': 'attach', 'storage_group_1': 'sg1', 'storage_group_2': 'sg2', 'storage_group_3': 'sg3', 'volume_id': self.data.test_volume.id} volume_metadata = self.volume_metadata._fill_volume_trace_dict( self.data.test_volume.id, 'attach', False, target_name=None, datadict=datadict, volume_trace_dict=volume_trace_dict, volume_key_value=volume_key_value, mv_list=mv_list, sg_list=sg_list) self.assertEqual(result_dict, volume_metadata) def test_fill_volume_trace_dict_array_tags(self): datadict = {} volume_trace_dict = {} volume_key_value = {} result_dict = {'successful_operation': 'create', 'volume_id': self.data.test_volume.id, 'array_tag_list': ['one', 'two']} volume_metadata = self.volume_metadata._fill_volume_trace_dict( self.data.test_volume.id, 'create', False, target_name=None, datadict=datadict, volume_key_value=volume_key_value, volume_trace_dict=volume_trace_dict, array_tag_list=['one', 'two']) self.assertEqual(result_dict, volume_metadata) @mock.patch.object(utils.PowerMaxUtils, 'merge_dicts', return_value={}) def test_consolidate_volume_trace_list(self, mock_m2d): self.volume_metadata.volume_trace_list = [self.data.data_dict] volume_trace_dict = {'volume_updated_time': '2018-03-06 16:51:40', 'operation': 'delete', 'volume_id': self.data.volume_id} volume_key_value = {self.data.volume_id: volume_trace_dict} self.volume_metadata._consolidate_volume_trace_list( self.data.volume_id, volume_trace_dict, volume_key_value) mock_m2d.assert_called_once() def test_merge_dicts_multiple(self): d1 = {'a': 1, 'b': 2} d2 = {'c': 3, 'd': 4} d3 = {'e': 5, 'f': 6} res_d = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6} result_dict = self.utils.merge_dicts( d1, d2, d3) self.assertEqual(res_d, result_dict) def test_merge_dicts_multiple_2(self): d1 = {'a': 1, 'b': 2} d2 = {'b': 3, 'd': 4} d3 = {'d': 5, 'e': 6} res_d = {'a': 1, 'b': 2, 'd': 4, 'e': 6} result_dict = self.utils.merge_dicts( d1, d2, d3) self.assertEqual(res_d, result_dict) def test_merge_dicts(self): self.volume_metadata.volume_trace_list = [self.data.data_dict] volume_trace_dict = {'volume_updated_time': '2018-03-06 16:51:40', 'operation': 'delete', 'volume_id': self.data.volume_id} result_dict = self.utils.merge_dicts( volume_trace_dict, self.data.volume_info_dict) self.assertEqual('delete', result_dict['operation']) self.assertEqual( '2018-03-06 16:51:40', result_dict['volume_updated_time']) self.assertEqual('OS-fibre-PG', result_dict['port_group']) @mock.patch.object(platform, 'platform', return_value=tpd.PowerMaxData.platform) @mock.patch.object(platform, 'python_version', return_value=tpd.PowerMaxData.python_version) @mock.patch.object(openstack_version.version_info, 'version_string', return_value=tpd.PowerMaxData.openstack_version) @mock.patch.object(openstack_version.version_info, 'release_string', return_value=tpd.PowerMaxData.openstack_release) @mock.patch.object( rest.PowerMaxRest, 'get_unisphere_version', return_value={'version': tpd.PowerMaxData.unisphere_version}) @mock.patch.object( rest.PowerMaxRest, 'get_array_detail', return_value={'ucode': tpd.PowerMaxData.vmax_firmware_version, 'model': tpd.PowerMaxData.vmax_model}) def test_gather_version_info( self, mock_vi, mock_ur, mock_or, mock_ov, mock_pv, mock_p): self.volume_metadata.gather_version_info(self.data.array) self.assertEqual( self.data.version_dict, self.volume_metadata.version_dict) def test_gather_replication_info_target_model(self): rep_extra_specs = {'rep_mode': 'Synchronous', 'target_array_model': 'PowerMax_2000'} rdf_group_no = '70' remote_array = '000197800124' rep_config = {'mode': 'Synchronous', 'rdf_group_label': '23_24_007', 'portgroup': 'OS-fibre-PG', 'allow_extend': True, 'array': '000197800124', 'srp': 'SRP_2'} rep_info_dict = self.volume_metadata.gather_replication_info( self.data.volume_id, 'replication', False, rdf_group_no=rdf_group_no, target_name='target_name', remote_array=remote_array, target_device_id=self.data.device_id2, replication_status=fields.ReplicationStatus.ENABLED, rep_mode=rep_extra_specs['rep_mode'], rdf_group_label=rep_config['rdf_group_label'], target_array_model=rep_extra_specs['target_array_model']) self.assertEqual( 'PowerMax_2000', rep_info_dict['target_array_model']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py0000664000175000017500000006453200000000000032044 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy from unittest import mock from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import iscsi from cinder.volume.drivers.dell_emc.powermax import migrate from cinder.volume.drivers.dell_emc.powermax import provision from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume import volume_utils class PowerMaxMigrateTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() self.mock_object(volume_utils, 'get_max_over_subscription_ratio') super(PowerMaxMigrateTest, self).setUp() configuration = tpfo.FakeConfiguration( None, 'MaskingTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, vmax_port_groups=[self.data.port_group_name_f]) self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = iscsi.PowerMaxISCSIDriver(configuration=configuration) self.driver = driver self.common = self.driver.common self.migrate = self.common.migrate def test_get_masking_view_component_dict_shared_format_1(self): """Test for get_masking_view_component_dict, legacy case 1.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-No_SLO-8970da0c-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('No_SLO', component_dict['no_slo']) self.assertEqual('-8970da0c', component_dict['uuid']) self.assertEqual('MV', component_dict['postfix']) def test_get_masking_view_component_dict_shared_format_2(self): """Test for get_masking_view_component_dict, legacy case 2.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-No_SLO-F-8970da0c-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('-F', component_dict['protocol']) self.assertEqual('No_SLO', component_dict['no_slo']) self.assertEqual('-8970da0c', component_dict['uuid']) self.assertEqual('MV', component_dict['postfix']) def test_get_masking_view_component_dict_shared_format_3(self): """Test for get_masking_view_component_dict, legacy case 3.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-SRP_1-Silver-NONE-74346a64-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('SRP_1', component_dict['srp']) self.assertEqual('Silver', component_dict['slo']) self.assertEqual('NONE', component_dict['workload']) self.assertEqual('-74346a64', component_dict['uuid']) self.assertEqual('MV', component_dict['postfix']) def test_get_masking_view_component_dict_shared_format_4(self): """Test for get_masking_view_component_dict, legacy case 4.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-SRP_1-Bronze-DSS-I-1b454e9f-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('SRP_1', component_dict['srp']) self.assertEqual('Bronze', component_dict['slo']) self.assertEqual('DSS', component_dict['workload']) self.assertEqual('-I', component_dict['protocol']) self.assertEqual('-1b454e9f', component_dict['uuid']) self.assertEqual('MV', component_dict['postfix']) def test_get_masking_view_component_dict_non_shared_format_5(self): """Test for get_masking_view_component_dict, legacy case 5.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-No_SLO-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('No_SLO', component_dict['no_slo']) self.assertEqual('MV', component_dict['postfix']) def test_get_masking_view_component_dict_non_shared_format_6(self): """Test for get_masking_view_component_dict, legacy case 6.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-No_SLO-F-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('No_SLO', component_dict['no_slo']) self.assertEqual('-F', component_dict['protocol']) self.assertEqual('MV', component_dict['postfix']) def test_get_masking_view_component_dict_non_shared_format_7(self): """Test for get_masking_view_component_dict, legacy case 7.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-SRP_1-Diamond-OLTP-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('SRP_1', component_dict['srp']) self.assertEqual('Diamond', component_dict['slo']) self.assertEqual('OLTP', component_dict['workload']) self.assertEqual('MV', component_dict['postfix']) def test_get_masking_view_component_dict_non_shared_format_8(self): """Test for get_masking_view_component_dict, legacy case 8.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-SRP_1-Gold-NONE-F-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('SRP_1', component_dict['srp']) self.assertEqual('Gold', component_dict['slo']) self.assertEqual('NONE', component_dict['workload']) self.assertEqual('-F', component_dict['protocol']) self.assertEqual('MV', component_dict['postfix']) def test_get_masking_view_component_dict_host_with_dashes_no_slo( self): """Test for get_masking_view_component_dict, dashes in host.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-host-with-dashes-No_SLO-I-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('host-with-dashes', component_dict['host']) self.assertEqual('No_SLO', component_dict['no_slo']) self.assertEqual('-I', component_dict['protocol']) self.assertEqual('MV', component_dict['postfix']) def test_get_masking_view_component_dict_host_with_dashes_slo(self): """Test for get_masking_view_component_dict, dashes and slo.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-host-with-dashes-SRP_1-Diamond-NONE-I-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('host-with-dashes', component_dict['host']) self.assertEqual('SRP_1', component_dict['srp']) self.assertEqual('Diamond', component_dict['slo']) self.assertEqual('NONE', component_dict['workload']) self.assertEqual('-I', component_dict['protocol']) self.assertEqual('MV', component_dict['postfix']) def test_get_masking_view_component_dict_replication_enabled(self): """Test for get_masking_view_component_dict, replication enabled.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-SRP_1-Diamond-OLTP-I-RE-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('-I', component_dict['protocol']) self.assertEqual('Diamond', component_dict['slo']) self.assertEqual('OLTP', component_dict['workload']) self.assertEqual('-RE', component_dict['RE']) def test_get_masking_view_component_dict_compression_disabled(self): """Test for get_masking_view_component_dict, compression disabled.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-SRP_1-Bronze-DSS_REP-I-CD-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('-I', component_dict['protocol']) self.assertEqual('Bronze', component_dict['slo']) self.assertEqual('DSS_REP', component_dict['workload']) self.assertEqual('-CD', component_dict['CD']) def test_get_masking_view_component_dict_CD_RE(self): """Test for get_masking_view_component_dict, CD and RE.""" component_dict = self.migrate.get_masking_view_component_dict( 'OS-myhost-SRP_1-Platinum-OLTP_REP-I-CD-RE-MV', 'SRP_1') self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('-I', component_dict['protocol']) self.assertEqual('Platinum', component_dict['slo']) self.assertEqual('OLTP_REP', component_dict['workload']) self.assertEqual('-CD', component_dict['CD']) self.assertEqual('-RE', component_dict['RE']) @mock.patch.object(migrate.PowerMaxMigrate, '_perform_migration', return_value=True) @mock.patch.object(migrate.PowerMaxMigrate, '_get_mvs_and_sgs_from_volume', return_value=(tpd.PowerMaxData.legacy_mvs, [tpd.PowerMaxData.legacy_shared_sg])) @mock.patch.object(migrate.PowerMaxMigrate, 'get_volume_host_list', return_value=['myhostB']) def test_do_migrate_if_candidate( self, mock_mvs, mock_os_host, mock_migrate): self.assertTrue(self.migrate.do_migrate_if_candidate( self.data.array, self.data.srp, self.data.device_id, self.data.test_volume, self.data.connector)) @mock.patch.object(migrate.PowerMaxMigrate, '_get_mvs_and_sgs_from_volume', return_value=([tpd.PowerMaxData.legacy_not_shared_mv], [tpd.PowerMaxData.legacy_not_shared_sg])) def test_do_migrate_if_candidate_not_shared( self, mock_mvs): self.assertFalse(self.migrate.do_migrate_if_candidate( self.data.array, self.data.srp, self.data.device_id, self.data.test_volume, self.data.connector)) @mock.patch.object(migrate.PowerMaxMigrate, '_get_mvs_and_sgs_from_volume', return_value=(tpd.PowerMaxData.legacy_mvs, [tpd.PowerMaxData.legacy_shared_sg, 'non_fast_sg'])) def test_do_migrate_if_candidate_in_multiple_sgs( self, mock_mvs): self.assertFalse(self.migrate.do_migrate_if_candidate( self.data.array, self.data.srp, self.data.device_id, self.data.test_volume, self.data.connector)) @mock.patch.object(migrate.PowerMaxMigrate, '_perform_migration', return_value=True) @mock.patch.object(migrate.PowerMaxMigrate, '_get_mvs_and_sgs_from_volume', return_value=(tpd.PowerMaxData.legacy_mvs, [tpd.PowerMaxData.legacy_shared_sg])) @mock.patch.object(migrate.PowerMaxMigrate, 'get_volume_host_list', return_value=['myhostA', 'myhostB']) def test_dp_migrate_if_candidate_multiple_os_hosts( self, mock_mvs, mock_os_host, mock_migrate): self.assertFalse(self.migrate.do_migrate_if_candidate( self.data.array, self.data.srp, self.data.device_id, self.data.test_volume, self.data.connector)) @mock.patch.object(migrate.PowerMaxMigrate, '_delete_staging_masking_views') @mock.patch.object(migrate.PowerMaxMigrate, '_get_mvs_and_sgs_from_volume', side_effect=[(tpd.PowerMaxData.staging_mvs, [tpd.PowerMaxData.staging_sg]), ([tpd.PowerMaxData.staging_mv2], [tpd.PowerMaxData.staging_sg])]) @mock.patch.object(migrate.PowerMaxMigrate, '_create_stg_masking_views', return_value=tpd.PowerMaxData.staging_mvs) @mock.patch.object(migrate.PowerMaxMigrate, '_create_stg_storage_group_with_vol', return_value=tpd.PowerMaxData.staging_sg) def test_perform_migration(self, mock_sg, mock_mvs, mock_new, mock_del): """Test to perform migration""" source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG' mv_details_list = list() mv_details_list.append(self.migrate.get_masking_view_component_dict( 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1')) mv_details_list.append(self.migrate.get_masking_view_component_dict( 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1')) self.assertTrue(self.migrate._perform_migration( self.data.array, self.data.device_id, mv_details_list, source_sg_name, 'myhostB')) @mock.patch.object(migrate.PowerMaxMigrate, '_create_stg_storage_group_with_vol', return_value=None) def test_perform_migration_storage_group_fail(self, mock_sg): """Test to perform migration""" source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG' mv_details_list = list() mv_details_list.append(self.migrate.get_masking_view_component_dict( 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1')) mv_details_list.append(self.migrate.get_masking_view_component_dict( 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1')) self.assertRaises( exception.VolumeBackendAPIException, self.migrate._perform_migration, self.data.array, self.data.device_id, mv_details_list, source_sg_name, 'myhostB') with self.assertRaisesRegex( exception.VolumeBackendAPIException, 'MIGRATE - Unable to create staging storage group.'): self.migrate._perform_migration( self.data.array, self.data.device_id, mv_details_list, source_sg_name, 'myhostB') @mock.patch.object(migrate.PowerMaxMigrate, '_create_stg_masking_views', return_value=[]) @mock.patch.object(migrate.PowerMaxMigrate, '_create_stg_storage_group_with_vol', return_value=tpd.PowerMaxData.staging_sg) def test_perform_migration_masking_views_fail(self, mock_sg, mock_mvs): """Test to perform migration""" source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG' mv_details_list = list() mv_details_list.append(self.migrate.get_masking_view_component_dict( 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1')) mv_details_list.append(self.migrate.get_masking_view_component_dict( 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1')) with self.assertRaisesRegex( exception.VolumeBackendAPIException, 'MIGRATE - Unable to create staging masking views.'): self.migrate._perform_migration( self.data.array, self.data.device_id, mv_details_list, source_sg_name, 'myhostB') @mock.patch.object(migrate.PowerMaxMigrate, '_get_mvs_and_sgs_from_volume', return_value=(tpd.PowerMaxData.staging_mvs, [tpd.PowerMaxData.staging_sg, tpd.PowerMaxData.staging_sg])) @mock.patch.object(migrate.PowerMaxMigrate, '_create_stg_masking_views', return_value=tpd.PowerMaxData.staging_mvs) @mock.patch.object(migrate.PowerMaxMigrate, '_create_stg_storage_group_with_vol', return_value=tpd.PowerMaxData.staging_sg) def test_perform_migration_sg_list_len_fail( self, mock_sg, mock_mvs, mock_new): """Test to perform migration""" source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG' mv_details_list = list() mv_details_list.append(self.migrate.get_masking_view_component_dict( 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1')) mv_details_list.append(self.migrate.get_masking_view_component_dict( 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1')) exception_message = ( r"MIGRATE - The current storage group list has 2 " r"members. The list is " r"\[\'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG\', " r"\'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG\'\]. " r"Will not proceed with cleanup. Please contact customer " r"representative.") with self.assertRaisesRegex( exception.VolumeBackendAPIException, exception_message): self.migrate._perform_migration( self.data.array, self.data.device_id, mv_details_list, source_sg_name, 'myhostB') @mock.patch.object(migrate.PowerMaxMigrate, '_get_mvs_and_sgs_from_volume', return_value=(tpd.PowerMaxData.staging_mvs, ['not_staging_sg'])) @mock.patch.object(migrate.PowerMaxMigrate, '_create_stg_masking_views', return_value=tpd.PowerMaxData.staging_mvs) @mock.patch.object(migrate.PowerMaxMigrate, '_create_stg_storage_group_with_vol', return_value=tpd.PowerMaxData.staging_sg) def test_perform_migration_stg_sg_mismatch_fail( self, mock_sg, mock_mvs, mock_new): """Test to perform migration""" source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG' mv_details_list = list() mv_details_list.append(self.migrate.get_masking_view_component_dict( 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1')) mv_details_list.append(self.migrate.get_masking_view_component_dict( 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1')) with self.assertRaisesRegex( exception.VolumeBackendAPIException, 'MIGRATE - The current storage group not_staging_sg does not ' 'match STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG. ' 'Will not proceed with cleanup. Please contact customer ' 'representative.'): self.migrate._perform_migration( self.data.array, self.data.device_id, mv_details_list, source_sg_name, 'myhostB') @mock.patch.object(rest.PowerMaxRest, 'delete_masking_view') def test_delete_staging_masking_views(self, mock_del): self.assertTrue(self.migrate._delete_staging_masking_views( self.data.array, self.data.staging_mvs, 'myhostB')) mock_del.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'delete_masking_view') def test_delete_staging_masking_views_no_host_match(self, mock_del): self.assertFalse(self.migrate._delete_staging_masking_views( self.data.array, self.data.staging_mvs, 'myhostC')) mock_del.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'create_masking_view') @mock.patch.object(rest.PowerMaxRest, 'get_masking_view', return_value=tpd.PowerMaxData.maskingview[0]) def test_create_stg_masking_views(self, mock_get, mock_create): mv_detail_list = list() for masking_view in self.data.legacy_mvs: masking_view_dict = self.migrate.get_masking_view_component_dict( masking_view, 'SRP_1') if masking_view_dict: mv_detail_list.append(masking_view_dict) self.assertIsNotNone(self.migrate._create_stg_masking_views( self.data.array, mv_detail_list, self.data.staging_sg, self.data.extra_specs)) self.assertEqual(2, mock_create.call_count) @mock.patch.object(rest.PowerMaxRest, 'create_masking_view') @mock.patch.object(rest.PowerMaxRest, 'get_masking_view', side_effect=[tpd.PowerMaxData.maskingview[0], None]) def test_create_stg_masking_views_mv_not_created( self, mock_get, mock_create): mv_detail_list = list() for masking_view in self.data.legacy_mvs: masking_view_dict = self.migrate.get_masking_view_component_dict( masking_view, 'SRP_1') if masking_view_dict: mv_detail_list.append(masking_view_dict) self.assertIsNone(self.migrate._create_stg_masking_views( self.data.array, mv_detail_list, self.data.staging_sg, self.data.extra_specs)) @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg') @mock.patch.object(provision.PowerMaxProvision, 'create_storage_group', return_value=tpd.PowerMaxData.staging_mvs[0]) def test_create_stg_storage_group_with_vol(self, mock_mv, mock_create): self.migrate._create_stg_storage_group_with_vol( self.data.array, 'myhostB', self.data.extra_specs) mock_create.assert_called_once() @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg') @mock.patch.object(provision.PowerMaxProvision, 'create_storage_group', return_value=None) def test_create_stg_storage_group_with_vol_None( self, mock_mv, mock_create): self.assertIsNone(self.migrate._create_stg_storage_group_with_vol( self.data.array, 'myhostB', self.data.extra_specs)) @mock.patch.object(rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=tpd.PowerMaxData.legacy_mvs) @mock.patch.object(rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=[tpd.PowerMaxData.legacy_shared_sg]) def test_get_mvs_and_sgs_from_volume(self, mock_sgs, mock_mvs): mv_list, sg_list = self.migrate._get_mvs_and_sgs_from_volume( self.data.array, self.data.device_id) mock_mvs.assert_called_once() self.assertEqual([self.data.legacy_shared_sg], sg_list) self.assertEqual(self.data.legacy_mvs, mv_list) @mock.patch.object(rest.PowerMaxRest, 'get_masking_views_from_storage_group') @mock.patch.object(rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=list()) def test_get_mvs_and_sgs_from_volume_empty_sg_list( self, mock_sgs, mock_mvs): mv_list, sg_list = self.migrate._get_mvs_and_sgs_from_volume( self.data.array, self.data.device_id) mock_mvs.assert_not_called() self.assertTrue(len(sg_list) == 0) self.assertTrue(len(mv_list) == 0) def test_get_volume_host_list(self): volume1 = deepcopy(self.data.test_volume) volume1.volume_attachment.objects = [self.data.test_volume_attachment] os_host_list = self.migrate.get_volume_host_list( volume1, self.data.connector) self.assertEqual('HostX', os_host_list[0]) def test_get_volume_host_list_no_attachments(self): _volume_attachment = deepcopy(self.data.test_volume_attachment) _volume_attachment.update({'connector': None}) volume1 = deepcopy(self.data.test_volume) volume1.volume_attachment.objects = [_volume_attachment] os_host_list = self.migrate.get_volume_host_list( volume1, self.data.connector) self.assertTrue(len(os_host_list) == 0) @mock.patch.object(rest.PowerMaxRest, 'delete_masking_view') @mock.patch.object(rest.PowerMaxRest, 'get_masking_views_from_storage_group', return_value=[tpd.PowerMaxData.staging_mv1]) @mock.patch.object(rest.PowerMaxRest, 'get_volumes_in_storage_group', return_value=[tpd.PowerMaxData.volume_id]) def test_cleanup_staging_objects(self, mock_vols, mock_mvs, mock_del_mv): self.migrate.cleanup_staging_objects( self.data.array, [self.data.staging_sg], self.data.extra_specs) mock_del_mv.assert_called_once_with( self.data.array, self.data.staging_mv1) @mock.patch.object(rest.PowerMaxRest, 'delete_masking_view') def test_cleanup_staging_objects_not_staging(self, mock_del_mv): self.migrate.cleanup_staging_objects( self.data.array, [self.data.storagegroup_name_f], self.data.extra_specs) mock_del_mv.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'get_masking_views_from_storage_group') @mock.patch.object(rest.PowerMaxRest, 'get_volumes_in_storage_group', return_value=[tpd.PowerMaxData.device_id, tpd.PowerMaxData.device_id2], ) def test_cleanup_staging_objects_multiple_vols(self, mock_vols, mock_mvs): self.migrate.cleanup_staging_objects( self.data.array, [self.data.storagegroup_name_f], self.data.extra_specs) mock_mvs.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_nvme_tcp.py0000664000175000017500000004552300000000000032226 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy from unittest import mock from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import nvme_tcp from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume import volume_utils class PowerMaxNVMeTCPTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxNVMeTCPTest, self).setUp() self.mock_object(volume_utils, 'get_max_over_subscription_ratio') self.configuration = tpfo.FakeConfiguration( None, 'NVMeTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_i]) self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = (nvme_tcp. PowerMaxNVMETCPDriver(configuration=self.configuration)) self.driver = driver self.common = self.driver.common self.masking = self.common.masking self.utils = self.common.utils self.rest = self.common.rest self.mock_object( self.utils, 'get_volumetype_extra_specs', return_value=deepcopy(self.data.vol_type_extra_specs)) def test_create_volume(self): with mock.patch.object(self.common, 'create_volume') as mock_create: self.driver.create_volume(self.data.test_volume) mock_create.assert_called_once_with(self.data.test_volume) def test_create_volume_from_snapshot(self): volume = self.data.test_clone_volume snapshot = self.data.test_snapshot with mock.patch.object( self.common, 'create_volume_from_snapshot') as mock_create: self.driver.create_volume_from_snapshot(volume, snapshot) mock_create.assert_called_once_with(volume, snapshot) def test_create_cloned_volume(self): volume = self.data.test_clone_volume src_volume = self.data.test_volume with mock.patch.object( self.common, 'create_cloned_volume') as mock_create: self.driver.create_cloned_volume(volume, src_volume) mock_create.assert_called_once_with(volume, src_volume) def test_delete_volume(self): with mock.patch.object(self.common, 'delete_volume') as mock_delete: self.driver.delete_volume(self.data.test_volume) mock_delete.assert_called_once_with(self.data.test_volume) def test_create_snapshot(self): with mock.patch.object(self.common, 'create_snapshot') as mock_create: self.driver.create_snapshot(self.data.test_snapshot) mock_create.assert_called_once_with( self.data.test_snapshot, self.data.test_snapshot.volume) def test_delete_snapshot(self): with mock.patch.object(self.common, 'delete_snapshot') as mock_delete: self.driver.delete_snapshot(self.data.test_snapshot) mock_delete.assert_called_once_with( self.data.test_snapshot, self.data.test_snapshot.volume) def test_update_volume_stats(self): with mock.patch.object(self.common, 'update_volume_stats', return_value={}) as mock_update: self.driver._update_volume_stats() mock_update.assert_called_once_with() def test_check_for_setup_error_with_valid_versions(self): with mock.patch.object( self.common.rest, 'get_uni_version', return_value=('10.1.0.6', '101')) as mock_uni_version: with mock.patch.object( self.common.rest, 'get_vmax_model', return_value=('Powermax_2500')) as mock_pmax_version: self.driver.check_for_setup_error() mock_uni_version.assert_called_once() mock_pmax_version.assert_called_once() def test_check_for_setup_error_with_valid_powermax_850_version(self): with mock.patch.object( self.common.rest, 'get_uni_version', return_value=('10.1.0.6', '101')) as mock_uni_version: with mock.patch.object( self.common.rest, 'get_vmax_model', return_value=('Powermax_8500')) as mock_pmax_version: self.driver.check_for_setup_error() mock_uni_version.assert_called_once() mock_pmax_version.assert_called_once() def test_check_for_setup_error_exception(self): with mock.patch.object( self.common.rest, 'get_uni_version', return_value=('9.2.0.0', '92')) as mock_rest: with mock.patch.object( self.common.rest, 'get_vmax_model', return_value=('Powermax_2000')) as mock_pmax_version: mock_rest.assert_not_called() mock_pmax_version.assert_not_called() self.assertRaises( exception.InvalidConfigurationValue, self.driver.check_for_setup_error) def test_check_for_setup_error_exception_with_invalid_powermax_version( self): with mock.patch.object( self.common.rest, 'get_uni_version', return_value=('10.1.0.6', '101')) as mock_uni_version: with mock.patch.object( self.common.rest, 'get_vmax_model', return_value=('Powermax_2000')) as mock_pmax_version: mock_uni_version.assert_not_called() mock_pmax_version.assert_not_called() self.assertRaises( exception.InvalidConfigurationValue, self.driver.check_for_setup_error) def test_check_for_setup_error_exception_with_invalid_powermax_version_2( self): with mock.patch.object( self.common.rest, 'get_uni_version', return_value=('10.1.0.6', '101')) as mock_uni_version: with mock.patch.object( self.common.rest, 'get_vmax_model', return_value=('Powermax_8000')) as mock_pmax_version: mock_uni_version.assert_not_called() mock_pmax_version.assert_not_called() self.assertRaises( exception.InvalidConfigurationValue, self.driver.check_for_setup_error) def test_check_for_setup_error_exception_without_unisphere_version(self): with mock.patch.object( self.common.rest, 'get_uni_version', return_value=(None, None)) as mock_rest: with mock.patch.object( self.common.rest, 'get_vmax_model', return_value=('Powermax_8500')) as mock_pmax_version: mock_rest.assert_not_called() mock_pmax_version.assert_not_called() self.assertRaises( exception.InvalidConfigurationValue, self.driver.check_for_setup_error) def test_check_for_setup_error_exception_without_powermax_version(self): with mock.patch.object( self.common.rest, 'get_uni_version', return_value=('10.1.0.6', '101')) as mock_rest: with mock.patch.object( self.common.rest, 'get_vmax_model', return_value=None) as mock_pmax_version: mock_rest.assert_not_called() mock_pmax_version.assert_not_called() self.assertRaises( exception.InvalidConfigurationValue, self.driver.check_for_setup_error) def test_ensure_export(self): self.driver.ensure_export('context', 'volume') def test_create_export(self): self.driver.create_export('context', 'volume', 'connector') def test_remove_export(self): self.driver.remove_export('context', 'volume') def test_check_for_export(self): self.driver.check_for_export('context', 'volume_id') def test_extend_volume(self): with mock.patch.object(self.common, 'extend_volume') as mock_extend: self.driver.extend_volume(self.data.test_volume, '8') mock_extend.assert_called_once_with(self.data.test_volume, '8') def test_manage_existing(self): with mock.patch.object(self.common, 'manage_existing', return_value={}) as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing(self.data.test_volume, external_ref) mock_manage.assert_called_once_with( self.data.test_volume, external_ref) def test_manage_existing_get_size(self): with mock.patch.object(self.common, 'manage_existing_get_size', return_value='1') as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing_get_size( self.data.test_volume, external_ref) mock_manage.assert_called_once_with( self.data.test_volume, external_ref) def test_unmanage_volume(self): with mock.patch.object(self.common, 'unmanage', return_value={}) as mock_unmanage: self.driver.unmanage(self.data.test_volume) mock_unmanage.assert_called_once_with( self.data.test_volume) def test_retype(self): host = {'host': self.data.new_host} new_type = {'extra_specs': {}} with mock.patch.object(self.common, 'retype', return_value=True) as mck_retype: self.driver.retype({}, self.data.test_volume, new_type, '', host) mck_retype.assert_called_once_with( self.data.test_volume, new_type, host) def test_failover_host(self): with mock.patch.object( self.common, 'failover', return_value=(self.data.remote_array, [], [])) as mock_fo: self.driver.failover_host(self.data.ctx, [self.data.test_volume]) mock_fo.assert_called_once_with([self.data.test_volume], None, None) def test_enable_replication(self): with mock.patch.object( self.common, 'enable_replication') as mock_er: self.driver.enable_replication( self.data.ctx, self.data.test_group, [self.data.test_volume]) mock_er.assert_called_once() def test_disable_replication(self): with mock.patch.object( self.common, 'disable_replication') as mock_dr: self.driver.disable_replication( self.data.ctx, self.data.test_group, [self.data.test_volume]) mock_dr.assert_called_once() def test_failover_replication(self): with mock.patch.object( self.common, 'failover_replication') as mock_fo: self.driver.failover_replication( self.data.ctx, self.data.test_group, [self.data.test_volume]) mock_fo.assert_called_once() def test_initialize_connection(self): with (mock.patch.object( self.common, 'initialize_connection', return_value=self.data.nvme_tcp_device_info) as mock_initialize): with mock.patch.object( self.driver, '_populate_data') as mock_populate: self.driver.initialize_connection( self.data.test_volume, self.data.connector) mock_initialize.assert_called_once_with( self.data.test_volume, self.data.connector) mock_populate.assert_called_once_with( self.data.nvme_tcp_device_info) def test_terminate_connection(self): with mock.patch.object( self.common, 'terminate_connection') as mock_terminate: self.driver.terminate_connection( self.data.test_volume, self.data.connector) mock_terminate.assert_called_once_with( self.data.test_volume, self.data.connector) def test_manage_existing_snapshot(self): with mock.patch.object(self.common, 'manage_existing_snapshot', return_value={}) as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing_snapshot( self.data.test_snapshot_manage, external_ref) mock_manage.assert_called_once_with( self.data.test_snapshot_manage, external_ref) def test_manage_existing_snapshot_get_size(self): with mock.patch.object(self.common, 'manage_existing_snapshot_get_size', return_value='1') as mock_manage: external_ref = {u'source-name': u'00002'} self.driver.manage_existing_snapshot_get_size( self.data.test_snapshot_manage, external_ref) mock_manage.assert_called_once_with( self.data.test_snapshot_manage) def test_unmanage_snapshot(self): with mock.patch.object(self.common, 'unmanage_snapshot', return_value={}) as mock_unmanage: self.driver.unmanage_snapshot(self.data.test_snapshot_manage) mock_unmanage.assert_called_once_with( self.data.test_snapshot_manage) def test_get_manageable_volumes(self): cinder_volumes = marker = limit = offset = sort_keys = sort_dirs = None with mock.patch.object(self.common, 'get_manageable_volumes', return_value={}) as mock_manage: self.driver.get_manageable_volumes(cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) mock_manage.assert_called_once_with( marker, limit, offset, sort_keys, sort_dirs) def test_get_manageable_snapshots(self): cinder_snapshots = marker = limit = offset = \ sort_keys = sort_dirs = None with mock.patch.object(self.common, 'get_manageable_snapshots', return_value={}) as mock_manage: self.driver.get_manageable_snapshots(cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs) mock_manage.assert_called_once_with( marker, limit, offset, sort_keys, sort_dirs) def test_create_group(self): context = {'dummy_key': 'dummy_value'} group = 'dummy_group' with mock.patch.object(self.common, 'create_group', return_value={}) as mock_group: self.driver.create_group(context, group) mock_group.assert_called_once_with( context, group) def test_delete_group(self): context = {'dummy_key': 'dummy_value'} group = 'dummy_group' volumes = ['dummy_volume'] with mock.patch.object(self.common, 'delete_group', return_value={}) as mock_group: self.driver.delete_group(context, group, volumes) mock_group.assert_called_once_with( context, group, volumes) def test_create_group_snapshot(self): context = {'dummy_key': 'dummy_value'} group_snapshot = 'dummy_group_snapshot' snapshots = ['dummy_snapshot'] with mock.patch.object(self.common, 'create_group_snapshot', return_value={}) as mock_group: self.driver.create_group_snapshot(context, group_snapshot, snapshots) mock_group.assert_called_once_with( context, group_snapshot, snapshots) def test_delete_group_snapshot(self): context = {'dummy_key': 'dummy_value'} group_snapshot = 'dummy_group_snapshot' snapshots = ['dummy_snapshot'] with mock.patch.object(self.common, 'delete_group_snapshot', return_value={}) as mock_group: self.driver.delete_group_snapshot(context, group_snapshot, snapshots) mock_group.assert_called_once_with( context, group_snapshot, snapshots) def test_update_group(self): context = {'dummy_key': 'dummy_value'} group = 'dummy_group' add_volumes = ['dummy_add_volume'] remove_volumes = ['dummy_remove_volume'] with mock.patch.object(self.common, 'update_group', return_value={}) as mock_group: self.driver.update_group(context, group, add_volumes, remove_volumes) mock_group.assert_called_once_with( group, add_volumes, remove_volumes) def test_create_group_from_src(self): context = {'dummy_key': 'dummy_value'} group = 'dummy_group' group_snapshot = 'dummy_group_snapshot' snapshots = ['dummy_snapshot'] volumes = ['dummy_add_volume'] source_group = 'dummy_source_group' source_volumes = ['dummy_source_volume'] with mock.patch.object(self.common, 'create_group_from_src', return_value={}) as mock_group: self.driver.create_group_from_src(context, group, volumes, group_snapshot, snapshots, source_group, source_volumes) mock_group.assert_called_once_with( context, group, volumes, group_snapshot, snapshots, source_group, source_volumes) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_performance.py0000664000175000017500000004315000000000000032706 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy from unittest import mock from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import iscsi from cinder.volume.drivers.dell_emc.powermax import performance from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume.drivers.dell_emc.powermax import utils from cinder.volume import volume_utils class PowerMaxPerformanceTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() self.reference_cinder_conf = tpfo.FakeConfiguration( None, 'ProvisionTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_i], load_balance=True, load_balance_real_time=True, load_data_format='avg', load_look_back=60, load_look_back_real_time=10, port_group_load_metric='PercentBusy', port_load_metric='PercentBusy') self.reference_perf_conf = { 'load_balance': True, 'load_balance_rt': True, 'perf_registered': True, 'rt_registered': True, 'collection_interval': 5, 'data_format': 'Average', 'look_back': 60, 'look_back_rt': 10, 'port_group_metric': 'PercentBusy', 'port_metric': 'PercentBusy'} super(PowerMaxPerformanceTest, self).setUp() self.mock_object(volume_utils, 'get_max_over_subscription_ratio') self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = iscsi.PowerMaxISCSIDriver( configuration=self.reference_cinder_conf) self.driver = driver self.common = self.driver.common self.performance = self.driver.performance self.rest = self.common.rest def test_set_performance_configuration(self): """Test set_performance_configuration diagnostic & real time.""" self.assertEqual(self.reference_perf_conf, self.performance.config) @mock.patch.object( performance.PowerMaxPerformance, 'get_array_registration_details', return_value=(True, False, 5)) def test_set_performance_configuration_no_rt_reg_rt_disabled( self, mck_reg): """Test set_performance_configuration real-time disabled. Test configurations settings when real-time is disabled in cinder.conf and real-time metrics are not registered in Unisphere. """ cinder_conf = deepcopy(self.reference_cinder_conf) cinder_conf.load_balance_real_time = False rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) perf_conf = deepcopy(self.reference_perf_conf) perf_conf['load_balance_rt'] = False perf_conf['rt_registered'] = False self.assertEqual(perf_conf, temp_driver.performance.config) def test_set_performance_configuration_rt_reg_rt_disabled(self): """Test set_performance_configuration real-time disabled v2. Test configurations settings when real-time is disabled in cinder.conf and real-time metrics are registered in Unisphere. """ cinder_conf = deepcopy(self.reference_cinder_conf) cinder_conf.load_balance_real_time = False rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) perf_conf = deepcopy(self.reference_perf_conf) perf_conf['load_balance_rt'] = False perf_conf['rt_registered'] = True self.assertEqual(perf_conf, temp_driver.performance.config) @mock.patch.object( performance.PowerMaxPerformance, 'get_array_registration_details', return_value=(False, False, 5)) def test_set_performance_configuration_not_perf_registered(self, mck_reg): """Test set_performance_configuration performance metrics not enabled. This tests config settings where user has enabled load balancing in cinder.conf but Unisphere is not registered for performance metrics. """ cinder_conf = deepcopy(self.reference_cinder_conf) rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) perf_conf = {'load_balance': False} self.assertEqual(perf_conf, temp_driver.performance.config) def test_set_performance_configuration_invalid_data_format(self): """Test set_performance_configuration invalid data format, avg set.""" cinder_conf = deepcopy(self.reference_cinder_conf) cinder_conf.load_data_format = 'InvalidFormat' rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) self.assertEqual(self.reference_perf_conf, temp_driver.performance.config) def test_set_performance_configuration_max_data_format(self): """Test set_performance_configuration max data format, max set.""" cinder_conf = deepcopy(self.reference_cinder_conf) cinder_conf.load_data_format = 'MAXIMUM' rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) perf_conf = deepcopy(self.reference_perf_conf) perf_conf['data_format'] = 'Maximum' self.assertEqual(perf_conf, temp_driver.performance.config) def test_set_performance_configuration_lookback_invalid(self): """Test set_performance_configuration invalid lookback windows.""" # Window set to negative value cinder_conf = deepcopy(self.reference_cinder_conf) cinder_conf.load_look_back = -1 rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) perf_conf = deepcopy(self.reference_perf_conf) perf_conf['look_back'] = 60 self.assertEqual(perf_conf, temp_driver.performance.config) # Window set to value larger than upper limit of 1440 cinder_conf.load_look_back = 9999 temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) self.assertEqual(perf_conf, temp_driver.performance.config) def test_set_performance_configuration_rt_lookback_invalid(self): """Test set_performance_configuration invalid rt lookback windows.""" # Window set to negative value cinder_conf = deepcopy(self.reference_cinder_conf) cinder_conf.load_look_back_real_time = -1 rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) perf_conf = deepcopy(self.reference_perf_conf) perf_conf['look_back_rt'] = 1 self.assertEqual(perf_conf, temp_driver.performance.config) # Window set to value larger than upper limit of 1440 cinder_conf.load_look_back_real_time = 100 temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) self.assertEqual(perf_conf, temp_driver.performance.config) def test_set_performance_configuration_invalid_pg_metric(self): """Test set_performance_configuration invalid pg metric.""" cinder_conf = deepcopy(self.reference_cinder_conf) cinder_conf.port_group_load_metric = 'InvalidMetric' rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) self.assertEqual(self.reference_perf_conf, temp_driver.performance.config) def test_set_performance_configuration_invalid_port_metric(self): """Test set_performance_configuration invalid port metric.""" cinder_conf = deepcopy(self.reference_cinder_conf) cinder_conf.port_load_metric = 'InvalidMetric' rest.PowerMaxRest._establish_rest_session = mock.Mock( return_value=tpfo.FakeRequestsSession()) temp_driver = iscsi.PowerMaxISCSIDriver(configuration=cinder_conf) self.assertEqual(self.reference_perf_conf, temp_driver.performance.config) def test_get_array_registration_details(self): """Test get_array_registration_details.""" p_reg, rt_reg, c_int = self.performance.get_array_registration_details( self.data.array) self.assertEqual((True, True, 5), (p_reg, rt_reg, c_int)) def test_get_array_performance_keys(self): """Test get_array_performance_keys.""" f_date, l_date = self.performance.get_array_performance_keys( self.data.array) self.assertEqual(self.data.f_date_a, f_date) self.assertEqual(self.data.l_date, l_date) def test_get_look_back_window_interval_timestamp(self): """Test _get_look_back_window_interval_timestamp.""" self.assertEqual( self.data.l_date - (utils.ONE_MINUTE * 10), self.performance._get_look_back_window_interval_timestamp( self.data.l_date, 10)) def test_process_load(self): """Test _process_load to calculate average of all intervals.""" performance_data = self.data.dummy_performance_data perf_metrics = performance_data['resultList']['result'] metric = self.data.perf_pb_metric ref_total = 0 for interval in perf_metrics: ref_total += interval.get(metric) ref_avg = ref_total / len(perf_metrics) avg, total, count = self.performance._process_load( performance_data, metric) self.assertEqual(avg, ref_avg) self.assertEqual(total, ref_total) self.assertEqual(count, len(perf_metrics)) def test_get_port_group_performance_stats(self): """Test _get_port_group_performance_stats.""" array_id = self.data.array port_group_id = self.data.port_group_name_i f_date = self.data.f_date_a l_date = self.data.l_date metric = self.data.perf_pb_metric data_format = self.data.perf_df_avg avg, total, count = self.performance._get_port_group_performance_stats( array_id, port_group_id, f_date, l_date, metric, data_format) self.assertTrue(avg > 0) self.assertIsInstance(avg, float) self.assertTrue(total > 0) self.assertIsInstance(total, float) self.assertTrue(count > 0) self.assertIsInstance(count, int) def test_get_port_performance_stats_diagnostic(self): """Test _get_port_performance_stats diagnostic.""" array_id = self.data.array dir_id = self.data.iscsi_dir port_id = self.data.iscsi_port f_date = self.data.f_date_a l_date = self.data.l_date metric = self.data.perf_pb_metric data_format = self.data.perf_df_avg res_type = 'diagnostic' ref_target_uri = '/performance/FEPort/metrics' ref_resource = '%(res)s Port performance metrics' % {'res': res_type} ref_request_body = { utils.SYMM_ID: array_id, utils.DIR_ID: dir_id, utils.PORT_ID: port_id, utils.S_DATE: f_date, utils.E_DATE: l_date, utils.DATA_FORMAT: data_format, utils.METRICS: [metric]} with mock.patch.object( self.rest, 'post_request', side_effect=self.rest.post_request) as mck_post: avg, total, count = self.performance._get_port_performance_stats( array_id, dir_id, port_id, f_date, l_date, metric, data_format, real_time=False) mck_post.assert_called_once_with( ref_target_uri, ref_resource, ref_request_body) self.assertTrue(avg > 0) self.assertIsInstance(avg, float) self.assertTrue(total > 0) self.assertIsInstance(total, float) self.assertTrue(count > 0) self.assertIsInstance(count, int) def test_get_port_performance_stats_real_time(self): """Test _get_port_performance_stats real-time.""" array_id = self.data.array dir_id = self.data.iscsi_dir port_id = self.data.iscsi_port f_date = self.data.f_date_a l_date = self.data.l_date metric = self.data.perf_pb_metric res_type = 'real-time' ref_target_uri = '/performance/realtime/metrics' ref_resource = '%(res)s Port performance metrics' % {'res': res_type} ref_request_body = { utils.SYMM_ID: array_id, utils.INST_ID: self.data.iscsi_dir_port, utils.S_DATE: f_date, utils.E_DATE: l_date, utils.CAT: utils.FE_PORT_RT, utils.METRICS: [metric]} with mock.patch.object( self.rest, 'post_request', side_effect=self.rest.post_request) as mck_post: avg, total, count = self.performance._get_port_performance_stats( array_id, dir_id, port_id, f_date, l_date, metric, real_time=True) mck_post.assert_called_once_with( ref_target_uri, ref_resource, ref_request_body) self.assertTrue(avg > 0) self.assertIsInstance(avg, float) self.assertTrue(total > 0) self.assertIsInstance(total, float) self.assertTrue(count > 0) self.assertIsInstance(count, int) def test_process_port_group_load_min(self): """Test process_port_group_load min load.""" array_id = self.data.array port_groups = self.data.perf_port_groups avg, metric, port_group = self.performance.process_port_group_load( array_id, port_groups) self.assertTrue(avg > 0) self.assertIsInstance(avg, float) self.assertEqual(metric, self.performance.config.get('port_group_metric')) self.assertIn(port_group, port_groups) def test_process_port_group_load_max(self): """Test process_port_group_load max load.""" array_id = self.data.array port_groups = self.data.perf_port_groups avg, metric, port_group = self.performance.process_port_group_load( array_id, port_groups, max_load=True) self.assertTrue(abs(avg) > 0) self.assertIsInstance(avg, float) self.assertEqual(metric, self.performance.config.get('port_group_metric')) self.assertIn(port_group, port_groups) def test_process_port_load_real_time_min(self): """Test process_port_load min load real-time.""" array_id = self.data.array ports = self.data.perf_ports avg, metric, port = self.performance.process_port_group_load( array_id, ports) self.assertTrue(avg > 0) self.assertIsInstance(avg, float) self.assertEqual(metric, self.performance.config.get('port_group_metric')) self.assertIn(port, ports) def test_process_port_load_real_time_max(self): """Test process_port_load max load real-time.""" array_id = self.data.array ports = self.data.perf_ports avg, metric, port = self.performance.process_port_group_load( array_id, ports, max_load=True) self.assertTrue(abs(avg) > 0) self.assertIsInstance(avg, float) self.assertEqual(metric, self.performance.config.get('port_group_metric')) self.assertIn(port, ports) def test_process_port_load_diagnostic_min(self): """Test process_port_load min load real-time.""" array_id = self.data.array ports = self.data.perf_ports self.performance.config['load_balance_rt'] = False avg, metric, port = self.performance.process_port_group_load( array_id, ports) self.assertTrue(avg > 0) self.assertIsInstance(avg, float) self.assertEqual(metric, self.performance.config.get('port_group_metric')) self.assertIn(port, ports) def test_process_port_load_diagnostic_max(self): """Test process_port_load min load real-time.""" array_id = self.data.array ports = self.data.perf_ports self.performance.config['load_balance_rt'] = False avg, metric, port = self.performance.process_port_group_load( array_id, ports, max_load=True) self.assertTrue(abs(avg) > 0) self.assertIsInstance(avg, float) self.assertEqual(metric, self.performance.config.get('port_group_metric')) self.assertIn(port, ports) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py0000664000175000017500000006627000000000000032445 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import call from cinder import exception from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import iscsi from cinder.volume.drivers.dell_emc.powermax import provision from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume import volume_utils class PowerMaxProvisionTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxProvisionTest, self).setUp() self.mock_object(volume_utils, 'get_max_over_subscription_ratio') configuration = tpfo.FakeConfiguration( None, 'ProvisionTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_i]) self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = iscsi.PowerMaxISCSIDriver(configuration=configuration) self.driver = driver self.common = self.driver.common self.provision = self.common.provision self.utils = self.common.utils self.rest = self.common.rest @mock.patch.object(rest.PowerMaxRest, 'create_storage_group', return_value=tpd.PowerMaxData.storagegroup_name_f) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', side_effect=[ tpd.PowerMaxData.storagegroup_name_f, None]) def test_create_storage_group(self, mock_get_sg, mock_create): array = self.data.array storagegroup_name = self.data.storagegroup_name_f srp = self.data.srp slo = self.data.slo workload = self.data.workload extra_specs = self.data.extra_specs for x in range(0, 2): storagegroup = self.provision.create_storage_group( array, storagegroup_name, srp, slo, workload, extra_specs) self.assertEqual(storagegroup_name, storagegroup) mock_create.assert_called_once() def test_create_volume_from_sg(self): array = self.data.array storagegroup_name = self.data.storagegroup_name_f volume_id = self.data.test_volume.id volume_name = self.utils.get_volume_element_name(volume_id) volume_size = self.data.test_volume.size extra_specs = self.data.extra_specs ref_dict = self.data.provider_location volume_dict = self.provision.create_volume_from_sg( array, volume_name, storagegroup_name, volume_size, extra_specs) self.assertEqual(ref_dict, volume_dict) @mock.patch.object(rest.PowerMaxRest, 'create_volume_from_sg') def test_create_volume_from_sg_with_rep_info(self, mck_create): array = self.data.array storagegroup_name = self.data.storagegroup_name_f volume_id = self.data.test_volume.id volume_name = self.utils.get_volume_element_name(volume_id) volume_size = self.data.test_volume.size extra_specs = self.data.extra_specs rep_info_dict = self.data.rep_info_dict self.provision.create_volume_from_sg( array, volume_name, storagegroup_name, volume_size, extra_specs, rep_info=rep_info_dict) mck_create.assert_called_once_with( array, volume_name, storagegroup_name, volume_size, extra_specs, rep_info_dict) def test_delete_volume_from_srp(self): array = self.data.array device_id = self.data.device_id volume_name = self.data.volume_details[0]['volume_identifier'] with mock.patch.object(self.provision.rest, 'delete_volume'): self.provision.delete_volume_from_srp( array, device_id, volume_name) self.provision.rest.delete_volume.assert_called_once_with( array, device_id) def test_create_volume_snap_vx(self): array = self.data.array source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs ttl = 0 with mock.patch.object(self.provision.rest, 'create_volume_snap'): self.provision.create_volume_snapvx( array, source_device_id, snap_name, extra_specs) self.provision.rest.create_volume_snap.assert_called_once_with( array, snap_name, source_device_id, extra_specs, ttl) def test_create_volume_replica_create_snap_true(self): array = self.data.array source_device_id = self.data.device_id target_device_id = self.data.device_id2 snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs # TTL of 1 hours ttl = 1 with mock.patch.object( self.provision, 'create_volume_snapvx') as mock_create_snapvx: with mock.patch.object( self.provision.rest, 'modify_volume_snap') as mock_modify: self.provision.create_volume_replica( array, source_device_id, target_device_id, snap_name, extra_specs, create_snap=True) mock_modify.assert_called_once_with( array, source_device_id, target_device_id, snap_name, extra_specs, link=True, copy=False) mock_create_snapvx.assert_called_once_with( array, source_device_id, snap_name, extra_specs, ttl=ttl) def test_create_volume_replica_create_snap_false(self): array = self.data.array source_device_id = self.data.device_id target_device_id = self.data.device_id2 snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs with mock.patch.object( self.provision, 'create_volume_snapvx') as mock_create_snapvx: with mock.patch.object( self.provision.rest, 'modify_volume_snap') as mock_modify: self.provision.create_volume_replica( array, source_device_id, target_device_id, snap_name, extra_specs, create_snap=False, copy_mode=True) mock_modify.assert_called_once_with( array, source_device_id, target_device_id, snap_name, extra_specs, link=True, copy=True) mock_create_snapvx.assert_not_called() def test_unlink_snapvx_tgt_volume(self): array = self.data.array source_device_id = self.data.device_id target_device_id = self.data.device_id2 snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs with mock.patch.object( self.provision, '_unlink_volume') as mock_unlink: self.provision.unlink_snapvx_tgt_volume( array, target_device_id, source_device_id, snap_name, extra_specs, self.data.snap_id, loop=True) mock_unlink.assert_called_once_with( array, source_device_id, target_device_id, snap_name, extra_specs, snap_id=self.data.snap_id, list_volume_pairs=None, loop=True, symforce=False) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=test_utils.ZeroIntervalLoopingCall) def test_unlink_volume(self): with mock.patch.object(self.rest, 'modify_volume_snap') as mock_mod: self.provision._unlink_volume( self.data.array, self.data.device_id, self.data.device_id2, self.data.snap_location['snap_name'], self.data.extra_specs, snap_id=self.data.snap_id) mock_mod.assert_called_once_with( self.data.array, self.data.device_id, self.data.device_id2, self.data.snap_location['snap_name'], self.data.extra_specs, snap_id=self.data.snap_id, list_volume_pairs=None, unlink=True, symforce=False) mock_mod.reset_mock() self.provision._unlink_volume( self.data.array, self.data.device_id, self.data.device_id2, self.data.snap_location['snap_name'], self.data.extra_specs, snap_id=self.data.snap_id, loop=False) mock_mod.assert_called_once_with( self.data.array, self.data.device_id, self.data.device_id2, self.data.snap_location['snap_name'], self.data.extra_specs, snap_id=self.data.snap_id, list_volume_pairs=None, unlink=True, symforce=False) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=test_utils.ZeroIntervalLoopingCall) def test_unlink_volume_exception(self): with mock.patch.object( self.rest, 'modify_volume_snap', side_effect=[exception.VolumeBackendAPIException(data=''), ''] ) as mock_mod: self.provision._unlink_volume( self.data.array, self.data.device_id, self.data.device_id2, self.data.snap_location['snap_name'], self.data.extra_specs, self.data.snap_id) self.assertEqual(2, mock_mod.call_count) def test_delete_volume_snap(self): array = self.data.array source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] with mock.patch.object(self.provision.rest, 'delete_volume_snap'): self.provision.delete_volume_snap( array, snap_name, source_device_id, snap_id=self.data.snap_id) self.provision.rest.delete_volume_snap.assert_called_once_with( array, snap_name, source_device_id, snap_id=self.data.snap_id, restored=False) def test_delete_volume_snap_restore(self): array = self.data.array source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] restored = True with mock.patch.object(self.provision.rest, 'delete_volume_snap'): self.provision.delete_volume_snap( array, snap_name, source_device_id, snap_id=self.data.snap_id, restored=restored) self.provision.rest.delete_volume_snap.assert_called_once_with( array, snap_name, source_device_id, snap_id=self.data.snap_id, restored=True) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=test_utils.ZeroIntervalLoopingCall) def test_restore_complete(self): array = self.data.array source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] snap_id = self.data.snap_id extra_specs = self.data.extra_specs with mock.patch.object( self.provision, '_is_restore_complete', return_value=True): isrestored = self.provision.is_restore_complete( array, source_device_id, snap_name, snap_id, extra_specs) self.assertTrue(isrestored) with mock.patch.object( self.provision, '_is_restore_complete', side_effect=exception.CinderException): self.assertRaises(exception.VolumeBackendAPIException, self.provision.is_restore_complete, array, source_device_id, snap_name, snap_id, extra_specs) def test_is_restore_complete(self): array = self.data.array source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] snap_id = self.data.snap_id snap_details = { 'linkedDevices': [{'targetDevice': source_device_id, 'state': 'Restored'}]} with mock.patch.object(self.provision.rest, 'get_volume_snap', return_value=snap_details): isrestored = self.provision._is_restore_complete( array, source_device_id, snap_name, snap_id) self.assertTrue(isrestored) snap_details['linkedDevices'][0]['state'] = 'Restoring' with mock.patch.object(self.provision.rest, 'get_volume_snap', return_value=snap_details): isrestored = self.provision._is_restore_complete( array, source_device_id, snap_name, snap_id) self.assertFalse(isrestored) def test_revert_volume_snapshot(self): array = self.data.array source_device_id = self.data.device_id snap_name = self.data.snap_location['snap_name'] extra_specs = self.data.extra_specs snap_id = self.data.snap_id with mock.patch.object( self.provision.rest, 'modify_volume_snap', return_value=None): self.provision.revert_volume_snapshot( array, source_device_id, snap_name, snap_id, extra_specs) self.provision.rest.modify_volume_snap.assert_called_once_with( array, source_device_id, "", snap_name, extra_specs, snap_id=snap_id, restore=True) def test_extend_volume(self): array = self.data.array device_id = self.data.device_id new_size = '3' extra_specs = self.data.extra_specs rdfg_num = self.data.rdf_group_no_1 with mock.patch.object(self.provision.rest, 'extend_volume' ) as mock_ex: self.provision.extend_volume(array, device_id, new_size, extra_specs) mock_ex.assert_called_once_with( array, device_id, new_size, extra_specs) mock_ex.reset_mock() # Pass in rdf group self.provision.extend_volume(array, device_id, new_size, extra_specs, rdfg_num) mock_ex.assert_called_once_with( array, device_id, new_size, extra_specs, rdfg_num) def test_get_srp_pool_stats(self): array = self.data.array array_info = self.common.pool_info['arrays_info'][0] srp_capacity = self.data.srp_details['srp_capacity'] ref_stats = ((srp_capacity['usable_total_tb'] * 1024), float((srp_capacity['usable_total_tb'] * 1024) - (srp_capacity['usable_used_tb'] * 1024)), (srp_capacity['subscribed_total_tb'] * 1024), self.data.srp_details['reserved_cap_percent']) stats = self.provision.get_srp_pool_stats(array, array_info) self.assertEqual(ref_stats, stats) def test_get_srp_pool_stats_errors(self): # cannot retrieve srp array = self.data.array array_info = {'srpName': self.data.failed_resource} ref_stats = (0, 0, 0, 0) stats = self.provision.get_srp_pool_stats(array, array_info) self.assertEqual(ref_stats, stats) # cannot report on all stats with mock.patch.object( self.provision.rest, 'get_srp_by_name', return_value={'srp_capacity': {'usable_total_tb': 33}}): ref_stats = (33 * 1024, 0, 0, 0) stats = self.provision.get_srp_pool_stats(array, array_info) self.assertEqual(ref_stats, stats) def test_verify_slo_workload_true(self): # with slo and workload array = self.data.array slo = self.data.slo workload = self.data.workload srp = self.data.srp valid_slo, valid_workload = self.provision.verify_slo_workload( array, slo, workload, srp) self.assertTrue(valid_slo) self.assertTrue(valid_workload) # slo and workload = none slo2 = None workload2 = None valid_slo2, valid_workload2 = self.provision.verify_slo_workload( array, slo2, workload2, srp) self.assertTrue(valid_slo2) self.assertTrue(valid_workload2) slo2 = None workload2 = 'None' valid_slo2, valid_workload2 = self.provision.verify_slo_workload( array, slo2, workload2, srp) self.assertTrue(valid_slo2) self.assertTrue(valid_workload2) def test_verify_slo_workload_false(self): # Both wrong array = self.data.array slo = 'Diamante' workload = 'DSSS' srp = self.data.srp valid_slo, valid_workload = self.provision.verify_slo_workload( array, slo, workload, srp) self.assertFalse(valid_slo) self.assertFalse(valid_workload) # Workload set, no slo set valid_slo, valid_workload = self.provision.verify_slo_workload( array, None, self.data.workload, srp) self.assertTrue(valid_slo) self.assertFalse(valid_workload) def test_get_slo_workload_settings_from_storage_group(self): ref_settings = 'Diamond+DSS' sg_slo_settings = ( self.provision.get_slo_workload_settings_from_storage_group( self.data.array, self.data.defaultstoragegroup_name)) self.assertEqual(ref_settings, sg_slo_settings) # No workload with mock.patch.object(self.provision.rest, 'get_storage_group', return_value={'slo': 'Silver'}): ref_settings2 = 'Silver+NONE' sg_slo_settings2 = ( self.provision.get_slo_workload_settings_from_storage_group( self.data.array, 'no_workload_sg')) self.assertEqual(ref_settings2, sg_slo_settings2) # NextGen Array with mock.patch.object(self.rest, 'is_next_gen_array', return_value=True): ref_settings3 = 'Diamond+NONE' sg_slo_settings3 = ( self.provision.get_slo_workload_settings_from_storage_group( self.data.array, self.data.defaultstoragegroup_name)) self.assertEqual(ref_settings3, sg_slo_settings3) @mock.patch.object(rest.PowerMaxRest, 'srdf_delete_device_pair') @mock.patch.object(rest.PowerMaxRest, 'srdf_suspend_replication') @mock.patch.object(rest.PowerMaxRest, 'wait_for_rdf_pair_sync') def test_break_rdf_relationship(self, mock_wait, mock_suspend, mock_del): array = self.data.array device_id = self.data.device_id sg_name = self.data.storagegroup_name_f rdf_group = self.data.rdf_group_no_1 extra_specs = self.data.rep_extra_specs # sync still in progress self.provision.break_rdf_relationship( array, device_id, sg_name, rdf_group, extra_specs, 'SyncInProg') mock_wait.assert_called_once_with(array, rdf_group, device_id, extra_specs) mock_del.assert_called_once_with(array, rdf_group, device_id) mock_wait.reset_mock() mock_suspend.reset_mock() mock_del.reset_mock() # State is Consistent, need to suspend self.provision.break_rdf_relationship( array, device_id, sg_name, rdf_group, extra_specs, 'Consistent') mock_suspend.assert_called_once_with(array, sg_name, rdf_group, extra_specs) mock_del.assert_called_once_with(array, rdf_group, device_id) mock_del.reset_mock() # State is synchronized self.provision.break_rdf_relationship( array, device_id, sg_name, rdf_group, extra_specs, 'Synchronized') mock_del.assert_called_once_with(array, rdf_group, device_id) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', return_value=None) def test_create_volume_group_success(self, mock_get_sg): array = self.data.array group_name = self.data.storagegroup_name_source extra_specs = self.data.extra_specs ref_value = self.data.storagegroup_name_source storagegroup = self.provision.create_volume_group( array, group_name, extra_specs) self.assertEqual(ref_value, storagegroup) def test_create_group_replica(self): array = self.data.array source_group = self.data.storagegroup_name_source snap_name = self.data.group_snapshot_name extra_specs = self.data.extra_specs with mock.patch.object( self.provision, 'create_group_replica') as mock_create_replica: self.provision.create_group_replica( array, source_group, snap_name, extra_specs) mock_create_replica.assert_called_once_with( array, source_group, snap_name, extra_specs) def test_delete_group_replica(self): array = self.data.array snap_name = self.data.group_snapshot_name source_group_name = self.data.storagegroup_name_source with mock.patch.object( self.provision, 'delete_group_replica') as mock_delete_replica: self.provision.delete_group_replica( array, snap_name, source_group_name) mock_delete_replica.assert_called_once_with( array, snap_name, source_group_name) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_snap_id_list', side_effect=[[tpd.PowerMaxData.snap_id, tpd.PowerMaxData.snap_id_2, tpd.PowerMaxData.snap_id, tpd.PowerMaxData.snap_id_2], [tpd.PowerMaxData.snap_id, tpd.PowerMaxData.snap_id_2], [tpd.PowerMaxData.snap_id], list()]) def test_delete_group_replica_side_effect(self, mock_list): array = self.data.array snap_name = self.data.group_snapshot_name source_group_name = self.data.storagegroup_name_source with mock.patch.object( self.rest, 'delete_storagegroup_snap') as mock_del: self.provision.delete_group_replica( array, snap_name, source_group_name) self.assertEqual(4, mock_del.call_count) mock_del.reset_mock() self.provision.delete_group_replica( array, snap_name, source_group_name) self.assertEqual(2, mock_del.call_count) mock_del.reset_mock() self.provision.delete_group_replica( array, snap_name, source_group_name) self.assertEqual(1, mock_del.call_count) mock_del.reset_mock() self.provision.delete_group_replica( array, snap_name, source_group_name) mock_del.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_snap_id_list', return_value=[tpd.PowerMaxData.snap_id]) @mock.patch.object(rest.PowerMaxRest, 'delete_storagegroup_snap') def test_delete_group_replica_exception_call_args( self, mck_del_ss, mck_snap_list): array = self.data.array snap_name = self.data.group_snapshot_name source_group_name = self.data.storagegroup_name_source self.provision.delete_group_replica( array, snap_name, source_group_name) expected = [call('000197800123', 'Grp_source_sg', 'Grp_snapshot', 118749976833, force=True)] self.assertEqual(expected, mck_del_ss.call_args_list) def test_link_and_break_replica(self): array = self.data.array source_group_name = self.data.storagegroup_name_source target_group_name = self.data.target_group_name snap_name = self.data.group_snapshot_name extra_specs = self.data.extra_specs delete_snapshot = False with mock.patch.object( self.provision, 'link_and_break_replica') as mock_link_and_break_replica: self.provision.link_and_break_replica( array, source_group_name, target_group_name, snap_name, extra_specs, delete_snapshot) mock_link_and_break_replica.assert_called_once_with( array, source_group_name, target_group_name, snap_name, extra_specs, delete_snapshot) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', side_effect=[None, tpd.PowerMaxData.sg_details[1]]) @mock.patch.object(provision.PowerMaxProvision, 'create_volume_group') def test_get_or_create_volume_group(self, mock_create, mock_sg): for x in range(0, 2): self.provision.get_or_create_volume_group( self.data.array, self.data.test_group, self.data.extra_specs) self.assertEqual(2, mock_sg.call_count) self.assertEqual(1, mock_create.call_count) @mock.patch.object(rest.PowerMaxRest, 'create_resource', return_value=(202, tpd.PowerMaxData.job_list[0])) def test_replicate_group(self, mock_create): self.rest.replicate_group( self.data.array, self.data.test_rep_group, self.data.rdf_group_no_1, self.data.remote_array, self.data.extra_specs) mock_create.assert_called_once() @mock.patch.object( rest.PowerMaxRest, 'get_snap_linked_device_list', side_effect=[[{'targetDevice': tpd.PowerMaxData.device_id2, 'defined': False}], [{'targetDevice': tpd.PowerMaxData.device_id2, 'defined': False}, {'targetDevice': tpd.PowerMaxData.device_id3, 'defined': False}]]) @mock.patch.object(provision.PowerMaxProvision, '_unlink_volume') def test_delete_volume_snap_check_for_links(self, mock_unlink, mock_tgts): self.provision.delete_volume_snap_check_for_links( self.data.array, self.data.test_snapshot_snap_name, self.data.device_id, self.data.extra_specs, self.data.snap_id) mock_unlink.assert_called_once_with( self.data.array, "", "", self.data.test_snapshot_snap_name, self.data.extra_specs, snap_id=self.data.snap_id, list_volume_pairs=[ (self.data.device_id, tpd.PowerMaxData.device_id2)], symforce=False) mock_unlink.reset_mock() self.provision.delete_volume_snap_check_for_links( self.data.array, self.data.test_snapshot_snap_name, self.data.device_id, self.data.extra_specs, self.data.snap_id) self.assertEqual(2, mock_unlink.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py0000664000175000017500000033500100000000000032715 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast from copy import deepcopy from unittest import mock from unittest.mock import call from cinder import exception from cinder.objects import fields from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import common from cinder.volume.drivers.dell_emc.powermax import fc from cinder.volume.drivers.dell_emc.powermax import iscsi from cinder.volume.drivers.dell_emc.powermax import masking from cinder.volume.drivers.dell_emc.powermax import provision from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume.drivers.dell_emc.powermax import utils from cinder.volume import volume_utils class PowerMaxReplicationTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxReplicationTest, self).setUp() self.replication_device = self.data.sync_rep_device self.mock_object(volume_utils, 'get_max_over_subscription_ratio') configuration = tpfo.FakeConfiguration( None, 'CommonReplicationTests', interval=1, retries=1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_f], replication_device=self.replication_device) self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = fc.PowerMaxFCDriver(configuration=configuration) iscsi_config = tpfo.FakeConfiguration( None, 'CommonReplicationTests', interval=1, retries=1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_i], replication_device=self.replication_device) iscsi_driver = iscsi.PowerMaxISCSIDriver(configuration=iscsi_config) self.iscsi_common = iscsi_driver.common self.driver = driver self.common = self.driver.common self.masking = self.common.masking self.provision = self.common.provision self.rest = self.common.rest self.utils = self.common.utils self.mock_object( self.utils, 'get_volumetype_extra_specs', return_value=deepcopy(self.data.vol_type_extra_specs_rep_enabled)) self.extra_specs = deepcopy(self.data.extra_specs_rep_enabled) self.extra_specs['retries'] = 1 self.extra_specs['interval'] = 1 self.extra_specs['rep_mode'] = 'Synchronous' self.async_rep_device = self.data.async_rep_device async_configuration = tpfo.FakeConfiguration( None, 'CommonReplicationTests', interval=1, retries=1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_f], replication_device=self.async_rep_device) self.async_driver = fc.PowerMaxFCDriver( configuration=async_configuration) self.metro_rep_device = self.data.metro_rep_device metro_configuration = tpfo.FakeConfiguration( None, 'CommonReplicationTests', interval=1, retries=1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_f], replication_device=self.metro_rep_device) self.metro_driver = fc.PowerMaxFCDriver( configuration=metro_configuration) def test_get_replication_info(self): self.common._get_replication_info() self.assertTrue(self.common.replication_enabled) @mock.patch.object(common.PowerMaxCommon, '_remove_members') @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs2) @mock.patch.object( utils.PowerMaxUtils, 'is_volume_failed_over', return_value=True) def test_unmap_lun_volume_failed_over(self, mock_fo, mock_es, mock_rm): extra_specs = deepcopy(self.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f extra_specs[utils.IS_RE] = True extra_specs[utils.FORCE_VOL_EDIT] = True extra_specs[utils.DISABLE_PROTECTED_SNAP] = False rep_config = self.data.rep_config_sync rep_config = deepcopy(self.data.rep_config_sync) rep_config[utils.RDF_CONS_EXEMPT] = False extra_specs[utils.REP_CONFIG] = rep_config self.common._unmap_lun(self.data.test_volume, self.data.connector) mock_es.assert_called_once_with(extra_specs, rep_config) @mock.patch.object(common.PowerMaxCommon, '_remove_members') @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs) @mock.patch.object( utils.PowerMaxUtils, 'is_metro_device', return_value=True) def test_unmap_lun_metro(self, mock_md, mock_es, mock_rm): extra_specs = deepcopy(self.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f self.common._unmap_lun(self.data.test_volume, self.data.connector) self.assertEqual(2, mock_rm.call_count) @mock.patch.object( utils.PowerMaxUtils, 'is_volume_failed_over', return_value=True) def test_initialize_connection_vol_failed_over(self, mock_fo): rep_extra_specs = { 'pool_name': 'Diamond+NONE+SRP_1+000197800123', 'slo': 'Diamond', 'workload': 'NONE', 'srp': 'SRP_1', 'array': '000197800124', 'interval': 1, 'retries': 1, 'replication_enabled': True, 'rep_mode': 'Synchronous', 'sync_interval': 3, 'sync_retries': 200, 'rdf_group_label': '23_24_007', 'rdf_group_no': '70', 'storagetype:portgroupname': 'OS-fibre-PG'} rep_config = { 'backend_id': 'rep_backend_id_sync', 'array': '000197800124', 'portgroup': 'OS-fibre-PG', 'srp': 'SRP_1', 'rdf_group_label': '23_24_007', 'mode': 'Synchronous', 'allow_extend': True, 'sync_interval': 3, 'sync_retries': 200, 'exempt': False} extra_specs = { 'pool_name': 'Diamond+NONE+SRP_1+000197800123', 'slo': 'Diamond', 'workload': 'NONE', 'srp': 'SRP_1', 'array': '000197800123', 'interval': 1, 'retries': 1, 'replication_enabled': True, 'rep_mode': 'Synchronous', 'storagetype:portgroupname': 'OS-fibre-PG', 'rep_config': { 'backend_id': 'rep_backend_id_sync', 'array': '000197800124', 'portgroup': 'OS-fibre-PG', 'srp': 'SRP_1', 'rdf_group_label': '23_24_007', 'mode': 'Synchronous', 'allow_extend': True, 'sync_interval': 3, 'sync_retries': 200, 'exempt': False}} with mock.patch.object(self.common, '_get_replication_extra_specs', return_value=rep_extra_specs) as mock_es: with mock.patch.object(self.common, '_initial_setup', return_value=extra_specs): self.common.initialize_connection( self.data.test_volume, self.data.connector) mock_es.assert_called_once_with(extra_specs, rep_config) @mock.patch.object(utils.PowerMaxUtils, 'is_metro_device', return_value=True) @mock.patch.object(rest.PowerMaxRest, 'get_array_model_info', return_value=('VMAX250F', False)) def test_initialize_connection_vol_metro(self, mock_model, mock_md): metro_connector = deepcopy(self.data.connector) metro_connector['multipath'] = True info_dict = self.common.initialize_connection( self.data.test_volume, metro_connector) ref_dict = {'array': self.data.array, 'device_id': self.data.device_id, 'hostlunid': 3, 'maskingview': self.data.masking_view_name_f, 'metro_hostlunid': 3} self.assertEqual(ref_dict, info_dict) @mock.patch.object(rest.PowerMaxRest, 'get_ip_interface_physical_port', return_value="FA-1D:1") @mock.patch.object(rest.PowerMaxRest, 'get_iscsi_ip_address_and_iqn', return_value=([tpd.PowerMaxData.ip], tpd.PowerMaxData.initiator)) @mock.patch.object(common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs) @mock.patch.object(utils.PowerMaxUtils, 'is_metro_device', return_value=True) def test_initialize_connection_vol_metro_iscsi(self, mock_md, mock_es, mock_ip, mock_dp): metro_connector = deepcopy(self.data.connector) metro_connector['multipath'] = True phys_port = '%(dir)s:%(port)s' % { 'dir': self.data.portgroup[0]['symmetrixPortKey'][0]['directorId'], 'port': '1'} info_dict = self.iscsi_common.initialize_connection( self.data.test_volume, metro_connector) ref_dict = {'array': self.data.array, 'device_id': self.data.device_id, 'hostlunid': 3, 'maskingview': self.data.masking_view_name_f, 'ip_and_iqn': [{'ip': self.data.ip, 'iqn': self.data.initiator, 'physical_port': phys_port}], 'metro_hostlunid': 3, 'is_multipath': True, 'metro_ip_and_iqn': [{'ip': self.data.ip, 'iqn': self.data.initiator, 'physical_port': phys_port}]} self.assertEqual(ref_dict, info_dict) @mock.patch.object(utils.PowerMaxUtils, 'is_metro_device', return_value=True) def test_initialize_connection_no_multipath_iscsi(self, mock_md): self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_common.initialize_connection, self.data.test_volume, self.data.connector) @mock.patch.object(masking.PowerMaxMasking, '_validate_attach', return_value=True) @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', side_effect=[ [], [], [], [], [tpd.PowerMaxData.storagegroup_name_f], [], [tpd.PowerMaxData.storagegroup_name_f], [tpd.PowerMaxData.storagegroup_name_f]], ) @mock.patch.object( masking.PowerMaxMasking, '_check_director_and_port_status') @mock.patch.object( masking.PowerMaxMasking, 'pre_multiattach', return_value=tpd.PowerMaxData.masking_view_dict_multiattach) def test_attach_metro_volume( self, mock_pre, mock_check, mock_sgs, mock_validate): rep_extra_specs = deepcopy(tpd.PowerMaxData.rep_extra_specs) rep_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f hostlunid, remote_port_group = self.common._attach_metro_volume( self.data.test_volume, self.data.connector, False, self.data.extra_specs, rep_extra_specs) self.assertEqual(self.data.port_group_name_f, remote_port_group) # Multiattach case self.common._attach_metro_volume( self.data.test_volume, self.data.connector, True, self.data.extra_specs, rep_extra_specs) mock_pre.assert_called_once() def test_set_config_file_get_extra_specs_rep_enabled(self): extra_specs, _ = self.common._set_config_file_and_get_extra_specs( self.data.test_volume) self.assertTrue(extra_specs['replication_enabled']) def test_populate_masking_dict_is_re(self): extra_specs = deepcopy(self.extra_specs) extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f masking_dict = self.common._populate_masking_dict( self.data.test_volume, self.data.connector, extra_specs) self.assertTrue(masking_dict['replication_enabled']) self.assertEqual('OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE', masking_dict[utils.SG_NAME]) @mock.patch.object(common.PowerMaxCommon, '_remove_vol_and_cleanup_replication') @mock.patch.object(masking.PowerMaxMasking, 'remove_vol_from_storage_group') @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_cleanup_replication_source( self, mck_cleanup, mock_del, mock_rm, mock_clean): self.common._cleanup_replication_source( self.data.array, self.data.test_volume, 'vol1', {'device_id': self.data.device_id}, self.extra_specs) mock_del.assert_called_once_with( self.data.array, self.data.device_id, 'vol1', self.extra_specs) def test_get_rdf_details(self): rdf_group_no, remote_array = self.common.get_rdf_details( self.data.array, self.data.rep_config_sync) self.assertEqual(self.data.rdf_group_no_1, rdf_group_no) self.assertEqual(self.data.remote_array, remote_array) def test_get_rdf_details_exception(self): with mock.patch.object(self.rest, 'get_rdf_group_number', return_value=None): self.assertRaises(exception.VolumeBackendAPIException, self.common.get_rdf_details, self.data.array, self.data.rep_config_sync) @mock.patch.object( common.PowerMaxCommon, '_populate_volume_and_group_update_lists', return_value=('vol_list', 'group_list')) @mock.patch.object(utils.PowerMaxUtils, 'validate_failover_request', return_value=(True, 'val')) @mock.patch.object(rest.PowerMaxRest, 'get_arrays_list', return_value=['123']) def test_failover_host(self, mck_arrays, mck_validate, mck_populate): volumes = [self.data.test_volume, self.data.test_clone_volume] groups = [self.data.test_group] backend_id = self.data.rep_backend_id_sync rep_configs = self.common.rep_configs secondary_id, volume_update_list, group_update_list = ( self.common.failover(volumes, backend_id, groups)) self.common.failover_completed(backend_id) mck_validate.assert_called_once_with( False, backend_id, rep_configs, self.data.array, ['123'], False) mck_populate.assert_called_once_with(volumes, groups, None) self.assertEqual(backend_id, secondary_id) self.assertEqual('vol_list', volume_update_list) self.assertEqual('group_list', group_update_list) @mock.patch.object(utils.PowerMaxUtils, 'validate_failover_request', return_value=(False, 'val')) @mock.patch.object(rest.PowerMaxRest, 'get_arrays_list', return_value=['123']) def test_failover_host_invalid(self, mck_arrays, mck_validate): volumes = [self.data.test_volume, self.data.test_clone_volume] backend_id = self.data.rep_backend_id_sync rep_configs = self.common.rep_configs self.assertRaises(exception.InvalidReplicationTarget, self.common.failover, volumes, backend_id) mck_validate.assert_called_once_with( False, backend_id, rep_configs, self.data.array, ['123'], False) @mock.patch.object( common.PowerMaxCommon, '_populate_volume_and_group_update_lists') @mock.patch.object(utils.PowerMaxUtils, 'validate_failover_request', return_value=(True, 'val')) @mock.patch.object(rest.PowerMaxRest, 'get_arrays_list', return_value=['123']) def test_failover_host_start_promotion( self, mck_arrays, mck_validate, mck_populate): volumes = [self.data.test_volume, self.data.test_clone_volume] groups = [self.data.test_group] backend_id = utils.PMAX_FAILOVER_START_ARRAY_PROMOTION rep_configs = self.common.rep_configs secondary_id, volume_update_list, group_update_list = ( self.common.failover(volumes, backend_id, groups)) self.common.failover_completed(backend_id) self.assertEqual(0, mck_populate.call_count) self.assertEqual(backend_id, secondary_id) self.assertEqual(list(), volume_update_list) self.assertEqual(list(), group_update_list) self.assertEqual(self.common.promotion, True) self.common.promotion = False mck_validate.assert_called_once_with( False, backend_id, rep_configs, self.data.array, ['123'], False) @mock.patch.object( common.PowerMaxCommon, '_populate_volume_and_group_update_lists', return_value=(list(), list())) @mock.patch.object(utils.PowerMaxUtils, 'validate_failover_request', return_value=(True, 'val')) @mock.patch.object(rest.PowerMaxRest, 'get_arrays_list', return_value=['123']) def test_failover_host_complete_promotion( self, mck_arrays, mck_validate, mck_populate): volume = deepcopy(self.data.test_rep_volume) volume.replication_status = fields.ReplicationStatus.ERROR volumes = [volume] groups = [self.data.test_group] backend_id = 'default' rep_configs = self.common.rep_configs self.common.promotion = True secondary_id, volume_update_list, group_update_list = ( self.common.failover(volumes, backend_id, groups)) self.common.failover_completed(backend_id) mck_populate.assert_called_once_with(volumes, groups, None) mck_validate.assert_called_once_with( False, backend_id, rep_configs, self.data.array, ['123'], True) self.assertEqual(backend_id, secondary_id) self.assertEqual(list(), volume_update_list) self.assertEqual(list(), group_update_list) self.assertEqual(self.common.promotion, False) @mock.patch.object(common.PowerMaxCommon, '_update_volume_list_from_sync_vol_list', return_value={'vol_updates'}) @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.ex_specs_rep_config_sync.copy()) @mock.patch.object(common.PowerMaxCommon, 'failover_replication', return_value=('grp_updates', {'grp_vol_updates'})) def test_populate_volume_and_group_update_lists( self, mck_failover_rep, mck_setup, mck_from_sync): test_volume = deepcopy(self.data.test_volume) test_volume.group_id = self.data.test_rep_group.id volumes = [test_volume, self.data.test_rep_volume] groups = [self.data.test_rep_group] group_volumes = [test_volume] volume_updates, group_updates = ( self.common._populate_volume_and_group_update_lists( volumes, groups, None)) mck_failover_rep.assert_called_once_with( None, groups[0], group_volumes, None, host=True) mck_setup.assert_called_once_with(self.data.test_rep_volume) mck_from_sync.assert_called_once_with( [self.data.test_rep_volume], None) vol_updates_ref = ['grp_vol_updates', 'vol_updates'] self.assertEqual(vol_updates_ref, volume_updates) group_updates_ref = [{'group_id': test_volume.group_id, 'updates': 'grp_updates'}] self.assertEqual(group_updates_ref, group_updates) @mock.patch.object(common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.extra_specs.copy()) def test_populate_volume_and_group_update_lists_promotion_non_rep( self, mck_setup): volumes = [self.data.test_volume] groups = [] ref_model_update = { 'volume_id': volumes[0].id, 'updates': { 'replication_status': fields.ReplicationStatus.DISABLED}} self.common.promotion = True volume_updates, group_updates = ( self.common._populate_volume_and_group_update_lists( volumes, groups, None)) self.common.promotion = False self.assertEqual(ref_model_update, volume_updates[0]) def test_failover_replication_empty_group(self): with mock.patch.object(volume_utils, 'is_group_a_type', return_value=True): model_update, __ = self.common.failover_replication( None, self.data.test_group, []) self.assertEqual({}, model_update) @mock.patch.object(rest.PowerMaxRest, 'srdf_failover_group', return_value=tpd.PowerMaxData.rdf_group_no_1) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=tpd.PowerMaxData.rdf_group_no_1) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', return_value=tpd.PowerMaxData.test_group) def test_failover_replication_failover(self, mck_find_vol_grp, mck_get_rdf_grp, mck_failover): volumes = [self.data.test_volume_group_member] vol_group = self.data.test_group vol_grp_name = self.data.test_group.name model_update, __ = self.common._failover_replication( volumes, vol_group, vol_grp_name, host=True) self.assertEqual(fields.ReplicationStatus.FAILED_OVER, model_update['replication_status']) @mock.patch.object(rest.PowerMaxRest, 'srdf_failover_group', return_value=tpd.PowerMaxData.rdf_group_no_1) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=tpd.PowerMaxData.rdf_group_no_1) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', return_value=tpd.PowerMaxData.test_group) def test_failover_replication_failback(self, mck_find_vol_grp, mck_get_rdf_grp, mck_failover): volumes = [self.data.test_volume_group_member] vol_group = self.data.test_group vol_grp_name = self.data.test_group.name model_update, __ = self.common._failover_replication( volumes, vol_group, vol_grp_name, host=True, secondary_backend_id='default') self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=None) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', return_value=tpd.PowerMaxData.test_group) def test_failover_replication_exception(self, mck_find_vol_grp, mck_get_rdf_grp): volumes = [self.data.test_volume_group_member] vol_group = self.data.test_group vol_grp_name = self.data.test_group.name model_update, __ = self.common._failover_replication( volumes, vol_group, vol_grp_name) self.assertEqual(fields.ReplicationStatus.ERROR, model_update['replication_status']) @mock.patch.object(common.PowerMaxCommon, '_rdf_vols_partitioned', return_value=True) @mock.patch.object(rest.PowerMaxRest, 'srdf_failover_group', return_value=tpd.PowerMaxData.rdf_group_no_1) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=tpd.PowerMaxData.rdf_group_no_1) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', return_value=tpd.PowerMaxData.test_group) def test_failover_replication_failover_partitioned( self, mck_find_vol_grp, mck_get_rdf_grp, mck_failover, mck_part): volumes = [self.data.test_volume_group_member] vol_group = self.data.test_group vol_grp_name = self.data.test_group.name model_update, __ = self.common._failover_replication( volumes, vol_group, vol_grp_name, host=True) self.assertEqual(fields.ReplicationStatus.FAILED_OVER, model_update['replication_status']) self.assertEqual(0, mck_failover.call_count) @mock.patch.object(common.PowerMaxCommon, '_failover_replication', return_value=({}, {})) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(rest.PowerMaxRest, 'get_arrays_list', return_value=['123']) def test_failover_host_async(self, mck_arrays, mck_cleanup, mock_fg): volumes = [self.data.test_volume] extra_specs = deepcopy(self.extra_specs) extra_specs['rep_mode'] = utils.REP_ASYNC with mock.patch.object(common.PowerMaxCommon, '_initial_setup', return_value=extra_specs): self.async_driver.common.failover(volumes, None, []) self.common.failover_completed(None) mock_fg.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'get_array_model_info', return_value=('VMAX250F', False)) def test_get_replication_extra_specs(self, mock_model): rep_config = self.data.rep_config_sync # Path one - disable compression extra_specs1 = deepcopy(self.extra_specs) extra_specs1[utils.DISABLECOMPRESSION] = 'true' ref_specs1 = deepcopy(self.data.rep_extra_specs5) ref_specs1['rdf_group_label'] = self.data.rdf_group_name_1 ref_specs1['rdf_group_no'] = self.data.rdf_group_no_1 rep_extra_specs1 = self.common._get_replication_extra_specs( extra_specs1, rep_config) self.assertEqual(ref_specs1, rep_extra_specs1) # Path two - disable compression, not all flash ref_specs2 = deepcopy(self.data.rep_extra_specs5) ref_specs2['rdf_group_label'] = self.data.rdf_group_name_1 ref_specs2['rdf_group_no'] = self.data.rdf_group_no_1 with mock.patch.object(self.rest, 'is_compression_capable', return_value=False): rep_extra_specs2 = self.common._get_replication_extra_specs( extra_specs1, rep_config) self.assertEqual(ref_specs2, rep_extra_specs2) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=(1, True)) @mock.patch.object(rest.PowerMaxRest, 'get_array_model_info', return_value=('VMAX250F', False)) def test_get_replication_extra_specs_get_rdf_group_promotion( self, mock_model, mck_rdf): self.common.promotion = True remote_array = self.data.remote_array rep_config = self.data.rep_config_sync extra_specs1 = deepcopy(self.extra_specs) self.common._get_replication_extra_specs(extra_specs1, rep_config) mck_rdf.assert_called_with(remote_array, rep_config) @mock.patch.object(rest.PowerMaxRest, 'get_array_model_info', return_value=('PowerMax 2000', True)) def test_get_replication_extra_specs_powermax(self, mock_model): rep_config = self.data.rep_config_sync rep_specs = deepcopy(self.data.rep_extra_specs5) extra_specs = deepcopy(self.extra_specs) # SLO not valid, both SLO and Workload set to NONE rep_specs['slo'] = None rep_specs['workload'] = None rep_specs['target_array_model'] = 'PowerMax 2000' rep_specs['rdf_group_label'] = self.data.rdf_group_name_1 rep_specs['rdf_group_no'] = self.data.rdf_group_no_1 with mock.patch.object(self.provision, 'verify_slo_workload', return_value=(False, False)): rep_extra_specs = self.common._get_replication_extra_specs( extra_specs, rep_config) self.assertEqual(rep_specs, rep_extra_specs) # SL valid, workload invalid, only workload set to NONE rep_specs['slo'] = 'Diamond' rep_specs['workload'] = None rep_specs['target_array_model'] = 'PowerMax 2000' with mock.patch.object(self.provision, 'verify_slo_workload', return_value=(True, False)): rep_extra_specs = self.common._get_replication_extra_specs( extra_specs, rep_config) self.assertEqual(rep_specs, rep_extra_specs) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value={utils.RDF_PAIR_STATE: utils.RDF_PARTITIONED_STATE}) def test_rdf_vols_partitioned_true_partitioned(self, mck_pair): array = self.data.array volumes = [self.data.test_rep_volume] rdfg = self.data.rdf_group_no_1 device_id = self.data.device_id2 is_partitioned = self.common._rdf_vols_partitioned( array, volumes, rdfg) self.assertTrue(is_partitioned) mck_pair.assert_called_once_with(array, rdfg, device_id) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value={utils.RDF_PAIR_STATE: utils.RDF_TRANSIDLE_STATE}) def test_rdf_vols_partitioned_true_transidle(self, mck_pair): array = self.data.array volumes = [self.data.test_rep_volume] rdfg = self.data.rdf_group_no_1 device_id = self.data.device_id2 is_partitioned = self.common._rdf_vols_partitioned( array, volumes, rdfg) self.assertTrue(is_partitioned) mck_pair.assert_called_once_with(array, rdfg, device_id) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value={utils.RDF_PAIR_STATE: utils.RDF_SUSPENDED_STATE}) def test_rdf_vols_partitioned_false(self, mck_pair): array = self.data.array volumes = [self.data.test_rep_volume] rdfg = self.data.rdf_group_no_1 device_id = self.data.device_id2 is_partitioned = self.common._rdf_vols_partitioned( array, volumes, rdfg) self.assertFalse(is_partitioned) mck_pair.assert_called_once_with(array, rdfg, device_id) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value={utils.RDF_PAIR_STATE: utils.RDF_PARTITIONED_STATE}) def test_rdf_vols_partitioned_true_promotion(self, mck_pair): self.common.promotion = True array = self.data.array volumes = [self.data.test_rep_volume] rdfg = self.data.rdf_group_no_1 device_id = self.data.device_id is_partitioned = self.common._rdf_vols_partitioned( array, volumes, rdfg) self.assertTrue(is_partitioned) self.common.promotion = False mck_pair.assert_called_once_with(array, rdfg, device_id) def test_get_secondary_stats(self): rep_config = self.data.rep_config_sync array_map = self.common.get_attributes_from_cinder_config() finalarrayinfolist = self.common._get_slo_workload_combinations( array_map) array_info = finalarrayinfolist[0] ref_info = deepcopy(array_info) ref_info['SerialNumber'] = str(rep_config['array']) ref_info['srpName'] = rep_config['srp'] secondary_info = self.common.get_secondary_stats_info( rep_config, array_info) self.assertEqual(ref_info, secondary_info) @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', return_value=tpd.PowerMaxData.volume_metadata) def test_replicate_group(self, mck_meta): volume_model_update = { 'id': self.data.test_volume.id, 'provider_location': self.data.test_volume.provider_location} extra_specs = deepcopy(self.data.extra_specs_rep_enabled) extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync vols_model_update = self.common._replicate_group( self.data.array, [volume_model_update], self.data.test_vol_grp_name, extra_specs) ref_rep_data = {'array': self.data.remote_array, 'device_id': self.data.device_id2} ref_vol_update = { 'id': self.data.test_volume.id, 'provider_location': self.data.test_volume.provider_location, 'replication_driver_data': ref_rep_data, 'replication_status': fields.ReplicationStatus.ENABLED, 'metadata': self.data.volume_metadata} # Decode string representations of dicts into dicts, because # the string representations are randomly ordered and therefore # hard to compare. vols_model_update[0]['replication_driver_data'] = ast.literal_eval( vols_model_update[0]['replication_driver_data']) self.assertEqual(ref_vol_update, vols_model_update[0]) @mock.patch.object(common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.extra_specs.copy()) def test_populate_volume_and_group_update_lists_group_update_vol_list( self, mck_setup): volume = deepcopy(self.data.test_volume) volume.group_id = self.data.test_group.id volumes = [volume] groups = [self.data.test_group] volume_updates, group_updates = ( self.common._populate_volume_and_group_update_lists( volumes, groups, None)) self.assertEqual([volume], volumes) @mock.patch.object( utils.PowerMaxUtils, 'validate_non_replication_group_config') @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=False) def test_create_group(self, mock_type, mock_cg_type, mck_validate): ref_model_update = { 'status': fields.GroupStatus.AVAILABLE} model_update = self.common.create_group(None, self.data.test_group_1) self.assertEqual(ref_model_update, model_update) extra_specs_list = [self.data.vol_type_extra_specs_rep_enabled] mck_validate.assert_called_once_with(extra_specs_list) @mock.patch.object( utils.PowerMaxUtils, 'validate_replication_group_config') @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=False) @mock.patch.object(volume_utils, 'is_group_a_type', return_value=True) def test_create_replicaton_group( self, mock_type, mock_cg_type, mck_validate): ref_model_update = { 'status': fields.GroupStatus.AVAILABLE, 'replication_status': fields.ReplicationStatus.ENABLED} model_update = self.common.create_group(None, self.data.test_group_1) self.assertEqual(ref_model_update, model_update) extra_specs_list = [self.data.vol_type_extra_specs_rep_enabled] mck_validate.assert_called_once_with( self.common.rep_configs, extra_specs_list) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', side_effect=[tpd.PowerMaxData.test_group, None]) def test_enable_replication(self, mock_vg): # Case 1: Group not replicated with mock.patch.object(volume_utils, 'is_group_a_type', return_value=False): self.assertRaises(NotImplementedError, self.common.enable_replication, None, self.data.test_group, [self.data.test_volume]) with mock.patch.object(volume_utils, 'is_group_a_type', return_value=True): # Case 2: Empty group model_update, __ = self.common.enable_replication( None, self.data.test_group, []) self.assertEqual({}, model_update) # Case 3: Successfully enabled model_update, __ = self.common.enable_replication( None, self.data.test_group, [self.data.test_volume]) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Case 4: Exception model_update, __ = self.common.enable_replication( None, self.data.test_group_failed, [self.data.test_volume]) self.assertEqual(fields.ReplicationStatus.ERROR, model_update['replication_status']) @mock.patch.object(common.PowerMaxCommon, '_find_volume_group', side_effect=[tpd.PowerMaxData.test_group, None]) def test_disable_replication(self, mock_vg): # Case 1: Group not replicated with mock.patch.object(volume_utils, 'is_group_a_type', return_value=False): self.assertRaises(NotImplementedError, self.common.disable_replication, None, self.data.test_group, [self.data.test_volume]) with mock.patch.object(volume_utils, 'is_group_a_type', return_value=True): # Case 2: Empty group model_update, __ = self.common.disable_replication( None, self.data.test_group, []) self.assertEqual({}, model_update) # Case 3: Successfully disabled model_update, __ = self.common.disable_replication( None, self.data.test_group, [self.data.test_volume]) self.assertEqual(fields.ReplicationStatus.DISABLED, model_update['replication_status']) # Case 4: Exception model_update, __ = self.common.disable_replication( None, self.data.test_group_failed, [self.data.test_volume]) self.assertEqual(fields.ReplicationStatus.ERROR, model_update['replication_status']) @mock.patch.object(utils.PowerMaxUtils, 'get_volumetype_extra_specs', return_value={utils.REPLICATION_DEVICE_BACKEND_ID: tpd.PowerMaxData.rep_backend_id_sync}) @mock.patch.object(utils.PowerMaxUtils, 'get_volume_group_utils', return_value=(tpd.PowerMaxData.array, {})) @mock.patch.object(common.PowerMaxCommon, '_cleanup_group_replication') @mock.patch.object(volume_utils, 'is_group_a_type', return_value=True) def test_delete_replication_group(self, mock_check, mock_cleanup, mock_utils, mock_get): group = self.data.test_rep_group group['volume_types'] = self.data.test_volume_type_list self.common._delete_group(group, []) mock_cleanup.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=True) @mock.patch.object(masking.PowerMaxMasking, 'add_volumes_to_storage_group') @mock.patch.object(masking.PowerMaxMasking, 'remove_volumes_from_storage_group') @mock.patch.object(utils.PowerMaxUtils, 'check_rep_status_enabled') @mock.patch.object(common.PowerMaxCommon, '_remove_remote_vols_from_volume_group') @mock.patch.object(masking.PowerMaxMasking, 'add_remote_vols_to_volume_group') @mock.patch.object(volume_utils, 'is_group_a_type', return_value=True) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) def test_update_replicated_group( self, mock_cg_type, mock_type_check, mock_add_remote, mock_remove_remote, mock_check, mock_remove_local, mock_add_local, mock_vol_in_sg): array = self.data.array add_vols = [self.data.test_volume] add_vols_id = [self.data.device_id] remove_vols = [self.data.test_clone_volume] remove_vols_id = [self.data.device_id2] group = self.data.test_group_1 group_sg = self.data.storagegroup_name_source extra_specs = { utils.INTERVAL: 1, utils.RETRIES: 1, utils.FORCE_VOL_EDIT: True} self.common.update_group(group, add_vols, remove_vols) mock_add_local.assert_called_once_with( array, add_vols_id, group_sg, extra_specs) mock_add_remote.assert_called_once_with(add_vols, group, extra_specs) mock_remove_local.assert_called_once_with( array, remove_vols_id, group_sg, extra_specs) mock_remove_remote.assert_called_once_with( array, remove_vols, group, extra_specs) @mock.patch.object(masking.PowerMaxMasking, 'remove_volumes_from_storage_group') def test_remove_remote_vols_from_volume_group(self, mock_rm): self.common._remove_remote_vols_from_volume_group( self.data.remote_array, [self.data.test_volume], self.data.test_rep_group, self.data.rep_extra_specs) mock_rm.assert_called_once() @mock.patch.object(rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=[]) @mock.patch.object(masking.PowerMaxMasking, 'remove_and_reset_members') @mock.patch.object(masking.PowerMaxMasking, 'remove_volumes_from_storage_group') def test_cleanup_group_replication(self, mock_rm, mock_rm_reset, mock_sgs): self.common._cleanup_group_replication( self.data.array, self.data.test_vol_grp_name, [self.data.device_id], self.extra_specs, self.data.rep_config_sync) mock_rm.assert_called_once() @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value={}) @mock.patch.object( rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object( common.PowerMaxCommon, '_protect_storage_group', return_value=(fields.ReplicationStatus.ENABLED, tpd.PowerMaxData.replication_update, tpd.PowerMaxData.rep_info_dict)) @mock.patch.object( masking.PowerMaxMasking, 'add_volume_to_default_storage_group') @mock.patch.object( common.PowerMaxCommon, 'configure_volume_replication', return_value=( 'first_vol_in_rdf_group', tpd.PowerMaxData.replication_update, tpd.PowerMaxData.rep_info_dict, tpd.PowerMaxData.rep_extra_specs_mgmt, True)) @mock.patch.object( rest.PowerMaxRest, 'rename_volume') @mock.patch.object( common.PowerMaxCommon, '_check_lun_valid_for_cinder_management', return_value=(tpd.PowerMaxData.test_volume.name, tpd.PowerMaxData.storagegroup_name_source)) @mock.patch.object( utils.PowerMaxUtils, 'get_array_and_device_id', return_value=(tpd.PowerMaxData.array, tpd.PowerMaxData.device_id)) @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.rep_extra_specs) def test_manage_existing_enable_replication( self, mck_setup, mck_get_array, mck_check_lun, mck_rename, mck_configure, mck_add, mck_post, mck_resume, mck_meta): external_ref = {u'source-name': u'00002'} volume = self.data.test_volume ref_model_update = { 'metadata': {'BackendID': 'None'}, 'provider_location': str({ 'device_id': self.data.device_id, 'array': self.data.array}), 'replication_driver_data': str({ 'device_id': self.data.device_id2, 'array': self.data.remote_array}), 'replication_status': fields.ReplicationStatus.ENABLED} model_update = self.common.manage_existing(volume, external_ref) mck_configure.assert_called_once() mck_add.assert_called_once() mck_post.assert_called_once() mck_resume.assert_called_once() self.assertEqual(ref_model_update, model_update) @mock.patch.object( masking.PowerMaxMasking, 'add_volume_to_default_storage_group', side_effect=exception.VolumeBackendAPIException) @mock.patch.object( common.PowerMaxCommon, 'configure_volume_replication', return_value=( 'first_vol_in_rdf_group', tpd.PowerMaxData.replication_update, tpd.PowerMaxData.rep_info_dict, tpd.PowerMaxData.rep_extra_specs, True)) @mock.patch.object( rest.PowerMaxRest, 'rename_volume') @mock.patch.object( common.PowerMaxCommon, '_check_lun_valid_for_cinder_management', return_value=(tpd.PowerMaxData.test_volume.name, tpd.PowerMaxData.storagegroup_name_source)) @mock.patch.object( utils.PowerMaxUtils, 'get_array_and_device_id', return_value=(tpd.PowerMaxData.array, tpd.PowerMaxData.device_id)) @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.rep_extra_specs) def test_manage_existing_enable_replication_exception( self, mck_setup, mck_get_array, mck_check_lun, mck_rename, mck_configure, mck_add): external_ref = {u'source-name': u'00002'} volume = self.data.test_volume self.assertRaises(exception.VolumeBackendAPIException, self.common.manage_existing, volume, external_ref) self.assertEqual(2, mck_rename.call_count) @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value={'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2'}) @mock.patch.object( common.PowerMaxCommon, '_create_volume', return_value=( tpd.PowerMaxData.provider_location, {'replication_driver_data': tpd.PowerMaxData.provider_location2}, {})) @mock.patch.object( common.PowerMaxCommon, '_initial_setup', return_value=tpd.PowerMaxData.rep_extra_specs_rep_config) def test_create_rep_volume(self, mck_initial, mck_create, mck_meta): ref_model_update = ( {'provider_location': str(self.data.provider_location), 'replication_driver_data': ( tpd.PowerMaxData.provider_location2), 'metadata': {'BackendID': self.data.rep_backend_id_sync, 'device-meta-key-1': 'device-meta-value-1', 'device-meta-key-2': 'device-meta-value-2', 'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'}}) volume = deepcopy(self.data.test_volume) volume.metadata = {'user-meta-key-1': 'user-meta-value-1', 'user-meta-key-2': 'user-meta-value-2'} model_update = self.common.create_volume(volume) self.assertEqual(ref_model_update, model_update) @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', return_value={}) @mock.patch.object( common.PowerMaxCommon, '_create_cloned_volume', return_value=( tpd.PowerMaxData.provider_location, tpd.PowerMaxData.replication_update, {})) def test_create_rep_volume_from_snapshot(self, mck_meta, mck_clone_chk): ref_model_update = ( {'provider_location': str(self.data.provider_location), 'metadata': {'BackendID': self.data.rep_backend_id_sync}}) ref_model_update.update(self.data.replication_update) model_update = self.common.create_volume_from_snapshot( self.data.test_clone_volume, self.data.test_snapshot) self.assertEqual(ref_model_update, model_update) @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value=tpd.PowerMaxData.volume_metadata) @mock.patch.object( common.PowerMaxCommon, '_create_cloned_volume', return_value=( tpd.PowerMaxData.provider_location_clone, tpd.PowerMaxData.replication_update, {})) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') def test_cloned_rep_volume(self, mck_cleanup, mck_meta, mck_clone_chk): metadata = deepcopy(self.data.volume_metadata) metadata['BackendID'] = self.data.rep_backend_id_sync ref_model_update = { 'provider_location': str( self.data.provider_location_clone), 'metadata': metadata} ref_model_update.update(self.data.replication_update) model_update = self.common.create_cloned_volume( self.data.test_clone_volume, self.data.test_volume) self.assertEqual(ref_model_update, model_update) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') @mock.patch.object( common.PowerMaxCommon, '_add_volume_to_rdf_management_group') @mock.patch.object( common.PowerMaxCommon, 'get_and_set_remote_device_uuid', return_value=tpd.PowerMaxData.device_id2) @mock.patch.object(common.PowerMaxCommon, 'srdf_protect_storage_group') @mock.patch.object( provision.PowerMaxProvision, 'create_volume_from_sg', return_value=tpd.PowerMaxData.provider_location) @mock.patch.object( masking.PowerMaxMasking, 'get_or_create_default_storage_group', return_value=tpd.PowerMaxData.default_sg_re_enabled) @mock.patch.object( common.PowerMaxCommon, 'prepare_replication_details', return_value=(True, tpd.PowerMaxData.rep_extra_specs_rep_config, {}, True)) @mock.patch.object( provision.PowerMaxProvision, 'verify_slo_workload', return_value=(True, True)) def test_create_volume_rep_enabled( self, mck_slo, mck_prep, mck_get, mck_create, mck_protect, mck_set, mck_add, mck_valid): volume = self.data.test_volume volume_name = self.data.volume_id volume_size = 1 extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs['mode'] = utils.REP_ASYNC extra_specs[utils.REP_CONFIG] = self.data.rep_config_async volume_dict, rep_update, rep_info_dict = self.common._create_volume( volume, volume_name, volume_size, extra_specs) mck_valid.assert_not_called() self.assertEqual(self.data.provider_location, volume_dict) self.assertEqual(self.data.replication_update, rep_update) self.assertIsNone(rep_info_dict) @mock.patch.object(common.PowerMaxCommon, 'get_rdf_details', return_value=(tpd.PowerMaxData.rdf_group_no_1, None)) @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', side_effect=[False, True]) def test_remove_vol_and_cleanup_replication(self, mck_rep, mck_get): array = self.data.array rdf_group_no = self.data.rdf_group_no_1 device_id = self.data.device_id volume = self.data.test_volume volume_name = self.data.test_volume.name extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync with mock.patch.object( self.masking, 'remove_and_reset_members') as mock_rm: self.common._remove_vol_and_cleanup_replication( array, device_id, volume_name, extra_specs, volume) mock_rm.assert_called_once_with( array, volume, device_id, volume_name, extra_specs, False) with mock.patch.object( self.common, 'cleanup_rdf_device_pair') as mock_clean: self.common._remove_vol_and_cleanup_replication( array, device_id, volume_name, extra_specs, volume) mock_clean.assert_called_once_with( array, rdf_group_no, device_id, extra_specs) @mock.patch.object(utils.PowerMaxUtils, 'is_replication_enabled', return_value=False) def test_remove_vol_and_cleanup_replication_host_assisted_migration( self, mck_rep): array = self.data.array device_id = self.data.device_id volume = deepcopy(self.data.test_volume) volume.migration_status = 'deleting' metadata = deepcopy(self.data.volume_metadata) metadata[utils.IS_RE_CAMEL] = 'False' volume.metadata = metadata volume_name = self.data.test_volume.name extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync ref_extra_specs = deepcopy(extra_specs) ref_extra_specs.pop(utils.IS_RE) with mock.patch.object( self.masking, 'remove_and_reset_members') as mock_rm: self.common._remove_vol_and_cleanup_replication( array, device_id, volume_name, extra_specs, volume) mock_rm.assert_called_once_with( array, volume, device_id, volume_name, ref_extra_specs, False) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object( common.PowerMaxCommon, '_retype_volume', return_value=(True, tpd.PowerMaxData.defaultstoragegroup_name)) @mock.patch.object( common.PowerMaxCommon, 'break_rdf_device_pair_session', return_value=({'mgmt_sg_name': tpd.PowerMaxData.rdf_managed_async_grp, 'rdf_group_no': tpd.PowerMaxData.rdf_group_no_1}, True)) def test_migrate_volume_success_rep_to_no_rep( self, mck_break, mck_retype, mck_resume, mck_get, mck_valid): array_id = self.data.array volume = self.data.test_volume device_id = self.data.device_id srp = self.data.srp target_slo = self.data.slo_silver target_workload = self.data.workload volume_name = volume.name new_type = {'extra_specs': {}} extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync target_extra_specs = { utils.SRP: srp, utils.ARRAY: array_id, utils.SLO: target_slo, utils.WORKLOAD: target_workload, utils.INTERVAL: extra_specs[utils.INTERVAL], utils.RETRIES: extra_specs[utils.RETRIES], utils.DISABLECOMPRESSION: False} success, model_update = self.common._migrate_volume( array_id, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs) mck_break.assert_called_once_with( array_id, device_id, volume_name, extra_specs, volume) mck_retype.assert_called_once_with( array_id, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs) self.assertTrue(success) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object( common.PowerMaxCommon, '_post_retype_srdf_protect_storage_group', return_value=('Enabled', tpd.PowerMaxData.rdf_group_vol_details, tpd.PowerMaxData.device_id2)) @mock.patch.object( common.PowerMaxCommon, '_retype_volume', return_value=(True, tpd.PowerMaxData.defaultstoragegroup_name)) @mock.patch.object( common.PowerMaxCommon, 'configure_volume_replication', return_value=('first_vol_in_rdf_group', {}, {'target_device_id': tpd.PowerMaxData.device_id2, 'remote_array': tpd.PowerMaxData.remote_array}, tpd.PowerMaxData.rep_extra_specs, False)) def test_migrate_volume_success_no_rep_to_rep( self, mck_configure, mck_retype, mck_protect, mck_get, mck_cleanup, mck_valid): self.common.rep_config = {'mode': utils.REP_SYNC, 'array': self.data.array} array_id = self.data.array volume = deepcopy(self.data.test_volume) volume.id = self.data.volume_id device_id = self.data.device_id srp = self.data.srp target_slo = self.data.slo_silver target_workload = self.data.workload volume_name = volume.name updated_volume_name = self.utils.get_volume_element_name(volume.id) target_storage_group = self.data.defaultstoragegroup_name extra_specs = deepcopy(self.data.extra_specs) rep_config_sync = deepcopy(self.data.rep_config_sync) rep_config_sync['exempt'] = False new_type = {'extra_specs': self.data.rep_extra_specs} target_extra_specs = deepcopy(new_type['extra_specs']) target_extra_specs.update({ utils.SRP: srp, utils.ARRAY: array_id, utils.SLO: target_slo, utils.WORKLOAD: target_workload, utils.INTERVAL: extra_specs[utils.INTERVAL], utils.RETRIES: extra_specs[utils.RETRIES], utils.DISABLECOMPRESSION: False, utils.REP_MODE: utils.REP_SYNC, utils.REP_CONFIG: rep_config_sync}) success, model_update = self.common._migrate_volume( array_id, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs) mck_configure.assert_called_once_with( array_id, volume, device_id, target_extra_specs) mck_retype.assert_called_once_with( array_id, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs) mck_protect.assert_called_once_with( array_id, target_storage_group, device_id, updated_volume_name, self.data.rep_extra_specs, volume) self.assertTrue(success) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snapshot_list', return_value=list()) @mock.patch.object(utils.PowerMaxUtils, 'get_rep_config', return_value=tpd.PowerMaxData.rep_config_async) @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object(common.PowerMaxCommon, 'update_metadata', return_value=tpd.PowerMaxData.replication_model) @mock.patch.object(provision.PowerMaxProvision, 'verify_slo_workload', return_value=(True, True)) @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object(common.PowerMaxCommon, '_retype_volume', return_value=(True, 'storage_group')) @mock.patch.object(common.PowerMaxCommon, 'configure_volume_replication', return_value=('status', 'data', tpd.PowerMaxData.rep_info_dict, tpd.PowerMaxData.rep_extra_specs_mgmt, True)) @mock.patch.object(common.PowerMaxCommon, '_cleanup_device_snapvx') @mock.patch.object(common.PowerMaxCommon, 'break_rdf_device_pair_session', return_value=(tpd.PowerMaxData.rep_extra_specs_mgmt, True)) @mock.patch.object(common.PowerMaxCommon, '_validate_rdfg_status') def test_migrate_volume_success_rep_to_rep( self, mck_valid, mck_break, mck_cleanup, mck_rep, mck_retype, mck_resume, mck_slo, mck_upd_meta, mck_get_meta, mck_rep_conf, mck_get_snaps): array = self.data.array volume = self.data.test_volume device_id = self.data.device_id srp = self.data.srp target_slo = self.data.slo_silver target_workload = self.data.workload volume_name = volume.name extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = ( self.data.rep_config_sync[utils.BACKEND_ID]) target_extra_specs = deepcopy(self.data.rep_extra_specs) target_extra_specs['array'] = self.data.array target_extra_specs['slo'] = target_slo target_extra_specs['rep_mode'] = utils.REP_ASYNC target_extra_specs['rdf_group_no'] = self.data.rdf_group_name_2 target_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async target_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = ( self.data.rep_config_async[utils.BACKEND_ID]) target_extra_specs['storagetype:disablecompression'] = False new_type = {'extra_specs': target_extra_specs} success, model_update = self.common._migrate_volume( array, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs) self.assertEqual(2, mck_valid.call_count) mck_valid.assert_any_call(array, extra_specs) mck_break.assert_called_once_with( array, device_id, volume_name, extra_specs, volume) mck_cleanup.assert_called_once_with(array, device_id, extra_specs) mck_rep.assert_called_once_with( array, volume, device_id, target_extra_specs) mck_retype.assert_called_once() self.assertEqual(2, mck_resume.call_count) mck_resume.assert_called_with( array, self.data.rep_extra_specs_mgmt['mgmt_sg_name'], extra_specs['rdf_group_no'], self.data.rep_extra_specs_mgmt) self.assertTrue(success) self.assertEqual(self.data.replication_model, model_update) @mock.patch.object( provision.PowerMaxProvision, 'verify_slo_workload', return_value=(True, True)) @mock.patch.object( common.PowerMaxCommon, 'break_rdf_device_pair_session_promotion') @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object( common.PowerMaxCommon, '_retype_volume', return_value=(True, tpd.PowerMaxData.defaultstoragegroup_name)) def test_migrate_volume_success_rep_promotion( self, mck_retype, mck_get, mck_break, mck_valid): array_id = self.data.remote_array volume = self.data.test_rep_volume device_id = self.data.device_id srp = 'SRP_2' target_slo = self.data.slo_silver target_workload = self.data.workload volume_name = volume.name new_type = {'extra_specs': {}} extra_specs = self.data.rep_extra_specs_rep_config updated_host = 'HostX@Backend#Diamond+DSS+SRP_2+000197800124' self.common.promotion = True target_extra_specs = { utils.SRP: srp, utils.ARRAY: array_id, utils.SLO: target_slo, utils.WORKLOAD: target_workload, utils.INTERVAL: extra_specs[utils.INTERVAL], utils.RETRIES: extra_specs[utils.RETRIES], utils.DISABLECOMPRESSION: False} success, model_update = self.common._migrate_volume( array_id, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs) self.assertEqual(model_update['host'], updated_host) mck_break.assert_called_once_with( array_id, device_id, volume_name, extra_specs) mck_retype.assert_called_once_with( array_id, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs) self.assertTrue(success) self.common.promotion = False @mock.patch.object( common.PowerMaxCommon, 'update_metadata', return_value={'metadata': { 'Configuration': 'RDF2+TDEV', 'ReplicationEnabled': 'True'}}) @mock.patch.object( common.PowerMaxCommon, '_rdf_vols_partitioned', return_value=True) @mock.patch.object( provision.PowerMaxProvision, 'verify_slo_workload', return_value=(True, True)) @mock.patch.object( common.PowerMaxCommon, 'break_rdf_device_pair_session_promotion') @mock.patch.object( common.PowerMaxCommon, 'get_volume_metadata', return_value='') @mock.patch.object( common.PowerMaxCommon, '_retype_volume', return_value=(True, tpd.PowerMaxData.defaultstoragegroup_name)) def test_migrate_volume_success_rep_partitioned( self, mck_retype, mck_get, mck_break, mck_valid, mck_partitioned, mck_update): array_id = self.data.array volume = self.data.test_rep_volume device_id = self.data.device_id srp = self.data.srp target_slo = self.data.slo_silver target_workload = self.data.workload volume_name = volume.name new_type = {'extra_specs': {}} extra_specs = self.data.rep_extra_specs_rep_config self.common.promotion = True target_extra_specs = { utils.SRP: srp, utils.ARRAY: array_id, utils.SLO: target_slo, utils.WORKLOAD: target_workload, utils.INTERVAL: extra_specs[utils.INTERVAL], utils.RETRIES: extra_specs[utils.RETRIES], utils.DISABLECOMPRESSION: False} success, model_update = self.common._migrate_volume( array_id, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs) self.assertEqual(0, mck_break.call_count) mck_retype.assert_called_once_with( array_id, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs) self.assertTrue(success) self.common.promotion = False config_metadata = model_update['metadata']['Configuration'] rep_metadata = model_update['metadata']['ReplicationEnabled'] self.assertEqual('TDEV', config_metadata) self.assertEqual('False', rep_metadata) @mock.patch.object(masking.PowerMaxMasking, 'add_volume_to_storage_group') @mock.patch.object(provision.PowerMaxProvision, 'get_or_create_group') @mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) def test_add_volume_to_rdf_management_group(self, mck_get_rdf, mck_get_grp, mck_add): array = self.data.array device_id = self.data.device_id volume_name = self.data.volume_id remote_array = self.data.remote_array target_device_id = self.data.device_id2 extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync group_name = self.data.rdf_managed_async_grp get_create_grp_calls = [ call(array, group_name, extra_specs), call(remote_array, group_name, extra_specs)] add_vol_calls = [ call(array, device_id, group_name, volume_name, extra_specs, force=True), call(remote_array, target_device_id, group_name, volume_name, extra_specs, force=True)] self.common._add_volume_to_rdf_management_group( array, device_id, volume_name, remote_array, target_device_id, extra_specs) mck_get_grp.assert_has_calls(get_create_grp_calls) mck_add.assert_has_calls(add_vol_calls) @mock.patch.object( common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object( rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object( rest.PowerMaxRest, 'get_rdf_group', return_value=tpd.PowerMaxData.rdf_group_details) @mock.patch.object( masking.PowerMaxMasking, 'remove_and_reset_members') @mock.patch.object( provision.PowerMaxProvision, 'break_rdf_relationship') @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=tpd.PowerMaxData.default_sg_re_enabled) @mock.patch.object( utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) @mock.patch.object( rest.PowerMaxRest, 'are_vols_rdf_paired', return_value=(True, None, utils.RDF_SYNC_STATE)) def test_cleanup_remote_target_async_metro( self, mck_paired, mck_get_rdf, mck_get_sg, mck_break, mck_reset, mck_get_rdf_grp, mck_resume, mck_delete): array = self.data.array volume = self.data.test_volume remote_array = self.data.remote_array device_id = self.data.device_id target_device_id = self.data.device_id2 rdf_group_no = self.data.rdf_group_no_1 volume_name = self.data.volume_id rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs[utils.REP_MODE] = utils.REP_METRO rep_extra_specs[utils.REP_CONFIG] = self.data.rep_config_metro sg_name = self.data.default_sg_re_enabled async_grp = self.data.rdf_managed_async_grp pair_state = utils.RDF_SYNC_STATE reset_calls = [ call(remote_array, volume, target_device_id, volume_name, rep_extra_specs, sg_name), call(remote_array, volume, target_device_id, volume_name, rep_extra_specs, async_grp)] self.common._cleanup_remote_target( array, volume, remote_array, device_id, target_device_id, rdf_group_no, volume_name, rep_extra_specs) mck_paired.assert_called_once_with( array, remote_array, device_id, target_device_id) mck_get_rdf.assert_called_once_with(self.data.rep_config_metro) mck_get_sg.assert_called_once_with(array, device_id) mck_break.assert_called_once_with( array, device_id, sg_name, rdf_group_no, rep_extra_specs, pair_state) mck_reset.assert_has_calls(reset_calls) mck_get_rdf_grp.assert_called_once_with(array, rdf_group_no) mck_resume.assert_called_once_with( array, sg_name, rdf_group_no, rep_extra_specs) mck_delete.assert_called_once_with( remote_array, target_device_id, volume_name, rep_extra_specs) @mock.patch.object( common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object( rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object( rest.PowerMaxRest, 'get_rdf_group', return_value=tpd.PowerMaxData.rdf_group_details) @mock.patch.object( masking.PowerMaxMasking, 'remove_and_reset_members') @mock.patch.object( provision.PowerMaxProvision, 'break_rdf_relationship') @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=tpd.PowerMaxData.default_sg_re_enabled) @mock.patch.object( rest.PowerMaxRest, 'are_vols_rdf_paired', return_value=(True, None, utils.RDF_SYNC_STATE)) def test_cleanup_remote_target_sync( self, mck_paired, mck_get_sg, mck_break, mck_reset, mck_get_rdf_grp, mck_resume, mck_delete): array = self.data.array volume = self.data.test_volume remote_array = self.data.remote_array device_id = self.data.device_id target_device_id = self.data.device_id2 rdf_group_no = self.data.rdf_group_no_1 volume_name = self.data.volume_id rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs[utils.REP_MODE] = utils.REP_SYNC sg_name = self.data.default_sg_re_enabled pair_state = utils.RDF_SYNC_STATE self.common._cleanup_remote_target( array, volume, remote_array, device_id, target_device_id, rdf_group_no, volume_name, rep_extra_specs) mck_paired.assert_called_once_with( array, remote_array, device_id, target_device_id) mck_get_sg.assert_called_once_with(array, device_id) mck_break.assert_called_once_with( array, device_id, sg_name, rdf_group_no, rep_extra_specs, pair_state) mck_reset.assert_called_once_with( remote_array, volume, target_device_id, volume_name, rep_extra_specs, sg_name) mck_get_rdf_grp.assert_called_once_with(array, rdf_group_no) mck_resume.assert_called_once_with( array, sg_name, rdf_group_no, rep_extra_specs) mck_delete.assert_called_once_with( remote_array, target_device_id, volume_name, rep_extra_specs) @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=tpd.PowerMaxData.sg_list['storageGroupId']) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs) @mock.patch.object( common.PowerMaxCommon, 'get_rdf_details', return_value=(tpd.PowerMaxData.rdf_group_no_1, tpd.PowerMaxData.remote_array)) def test_cleanup_rdf_device_pair_vol_cnt_exception( self, mck_get_rdf, mck_get_rep, mck_get_rdf_pair, mck_get_sg_list): array = self.data.array rdf_group_no = self.data.rdf_group_no_1 device_id = self.data.device_id extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.REP_MODE] = utils.REP_SYNC extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync self.assertRaises( exception.VolumeBackendAPIException, self.common.cleanup_rdf_device_pair, array, rdf_group_no, device_id, extra_specs) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=['activebias']) @mock.patch.object( rest.PowerMaxRest, 'is_volume_in_storagegroup', return_value=True) @mock.patch.object( rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object( common.PowerMaxCommon, '_cleanup_rdf_storage_groups_post_r2_delete') @mock.patch.object( rest.PowerMaxRest, 'delete_volume') @mock.patch.object( rest.PowerMaxRest, 'remove_vol_from_sg') @mock.patch.object( rest.PowerMaxRest, 'srdf_remove_device_pair_from_storage_group') @mock.patch.object( rest.PowerMaxRest, 'srdf_suspend_replication') @mock.patch.object( rest.PowerMaxRest, 'get_num_vols_in_sg', return_value=2) @mock.patch.object( utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) @mock.patch.object( rest.PowerMaxRest, 'wait_for_rdf_pair_sync') @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=tpd.PowerMaxData.sg_list_rep) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs_mgmt) @mock.patch.object( common.PowerMaxCommon, 'get_rdf_details', return_value=(tpd.PowerMaxData.rdf_group_no_1, tpd.PowerMaxData.remote_array)) def test_cleanup_rdf_device_pair( self, mck_get_rdf, mck_get_rep, mck_get_rdf_pair, mck_get_sg_list, mck_wait, mck_get_mgmt_grp, mck_get_num_vols, mck_suspend, mck_srdf_remove, mck_remove, mck_delete, mck_cleanup, mck_resume, mock_is_vol, mock_states): array = self.data.array rdf_group_no = self.data.rdf_group_no_1 device_id = self.data.device_id target_device_id = self.data.device_id2 extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.REP_MODE] = utils.REP_METRO extra_specs[utils.REP_CONFIG] = self.data.rep_config_metro rep_extra_specs = deepcopy(self.data.rep_extra_specs_mgmt) rdf_mgmt_grp = self.data.rdf_managed_async_grp self.common.cleanup_rdf_device_pair( array, rdf_group_no, device_id, extra_specs) remove_calls = [ call(array, rdf_mgmt_grp, device_id, extra_specs), call(self.data.remote_array, rdf_mgmt_grp, target_device_id, rep_extra_specs)] mck_suspend.assert_called_once_with( array, rdf_mgmt_grp, rdf_group_no, rep_extra_specs) mck_remove.assert_has_calls(remove_calls) mck_resume.assert_called_once_with( array, rdf_mgmt_grp, rdf_group_no, rep_extra_specs) @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object( rest.PowerMaxRest, 'srdf_remove_device_pair_from_storage_group', side_effect=exception.CinderException) @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=[tpd.PowerMaxData.storagegroup_name_f]) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs) @mock.patch.object( common.PowerMaxCommon, 'get_rdf_details', return_value=(tpd.PowerMaxData.rdf_group_no_1, tpd.PowerMaxData.remote_array)) def test_cleanup_rdf_device_pair_attempt_resume_on_exception( self, mck_rdf, mck_rep, mck_pair, mck_sg, mck_rem, mck_resume): array = self.data.array rdf_group_no = self.data.rdf_group_no_1 device_id = self.data.device_id extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.REP_MODE] = utils.REP_SYNC extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync rep_extra_specs = self.common._get_replication_extra_specs( extra_specs, extra_specs[utils.REP_CONFIG]) self.assertRaises( exception.CinderException, self.common.cleanup_rdf_device_pair, array, rdf_group_no, device_id, extra_specs) mck_resume.assert_called_once_with( array, self.data.storagegroup_name_f, rdf_group_no, rep_extra_specs, False) @mock.patch.object( rest.PowerMaxRest, 'get_num_vols_in_sg', return_value=1) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.rep_extra_specs_mgmt) def test_prepare_replication_details(self, mck_get_rep, mck_get_vols): extra_specs = deepcopy(self.data.extra_specs_rep_enabled) extra_specs['workload'] = 'NONE' extra_specs['rep_mode'] = utils.REP_SYNC extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync rep_extra_specs = self.data.rep_extra_specs_mgmt ref_info_dict = { 'initial_device_list': ['00001', '00002'], 'local_array': self.data.array, 'rdf_group_no': self.data.rdf_group_no_1, 'remote_array': self.data.remote_array, 'rep_mode': utils.REP_SYNC, 'service_level': self.data.slo_diamond, 'sg_name': self.data.default_sg_no_slo_re_enabled, 'sync_interval': 2, 'sync_retries': 200} rep_first_vol, resp_extra_specs, rep_info_dict, rdfg_empty = ( self.common.prepare_replication_details(extra_specs)) self.assertFalse(rep_first_vol) self.assertEqual(rep_extra_specs, resp_extra_specs) self.assertEqual(ref_info_dict, rep_info_dict) self.assertFalse(rdfg_empty) @mock.patch.object( rest.PowerMaxRest, 'srdf_protect_storage_group') def test_srdf_protect_storage_group(self, mck_protect): extra_specs = self.data.rep_extra_specs rep_extra_specs = self.data.rep_extra_specs_mgmt volume_dict = {'storage_group': self.data.rdf_managed_async_grp} self.common.srdf_protect_storage_group(extra_specs, rep_extra_specs, volume_dict) mck_protect.assert_called_once_with( extra_specs['array'], rep_extra_specs['array'], rep_extra_specs['rdf_group_no'], extra_specs['rep_mode'], volume_dict['storage_group'], rep_extra_specs['slo'], extra_specs) def test_gather_replication_updates(self): self.common.rep_config = { 'rdf_group_label': self.data.rdf_group_name_1} extra_specs = self.data.rep_extra_specs rep_extra_specs = deepcopy(self.data.rep_extra_specs_mgmt) rep_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async volume_dict = {'storage_group': self.data.rdf_managed_async_grp, 'remote_device_id': self.data.device_id2, 'device_uuid': self.data.volume_id} ref_replication_update = ( {'replication_status': common.REPLICATION_ENABLED, 'replication_driver_data': str( {'array': self.data.remote_array, 'device_id': self.data.device_id2})}) replication_update, rep_info_dict = ( self.common.gather_replication_updates( extra_specs, rep_extra_specs, volume_dict)) self.assertEqual(ref_replication_update, replication_update) @mock.patch.object( common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object( rest.PowerMaxRest, 'get_volumes_in_storage_group', return_value=0) @mock.patch.object( masking.PowerMaxMasking, 'remove_volume_from_sg') @mock.patch.object( rest.PowerMaxRest, 'srdf_delete_device_pair') @mock.patch.object( rest.PowerMaxRest, 'srdf_suspend_replication') @mock.patch.object( rest.PowerMaxRest, 'wait_for_rdf_group_sync') @mock.patch.object( rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_SYNCINPROG_STATE]) @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', side_effect=[tpd.PowerMaxData.r1_sg_list, tpd.PowerMaxData.r2_sg_list]) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=tpd.PowerMaxData.ex_specs_rep_config[utils.REP_CONFIG]) def test_break_rdf_device_pair_session_metro_async( self, mck_get_rep, mck_get_rdf, mck_get_sg, mck_get_sg_state, mck_wait, mck_suspend, mck_delete_rdf_pair, mck_remove, mck_get_vols, mck_delete): array = self.data.array device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = deepcopy(self.data.ex_specs_rep_config) volume = self.data.test_volume rep_extra_specs, resume_rdf = ( self.common.break_rdf_device_pair_session( array, device_id, volume_name, extra_specs, volume)) extra_specs[utils.REP_CONFIG][utils.FORCE_VOL_EDIT] = True self.assertEqual(extra_specs[utils.REP_CONFIG], rep_extra_specs) self.assertFalse(resume_rdf) @mock.patch.object( common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object( rest.PowerMaxRest, 'get_volumes_in_storage_group', return_value=10) @mock.patch.object( masking.PowerMaxMasking, 'remove_volume_from_sg') @mock.patch.object( rest.PowerMaxRest, 'srdf_delete_device_pair') @mock.patch.object( rest.PowerMaxRest, 'srdf_suspend_replication') @mock.patch.object( rest.PowerMaxRest, 'wait_for_rdf_group_sync') @mock.patch.object( rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_SYNCINPROG_STATE]) @mock.patch.object( rest.PowerMaxRest, 'get_storage_groups_from_volume', side_effect=[tpd.PowerMaxData.r1_sg_list, tpd.PowerMaxData.r2_sg_list]) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) @mock.patch.object( common.PowerMaxCommon, '_get_replication_extra_specs', return_value=( tpd.PowerMaxData.ex_specs_rep_config_sync[utils.REP_CONFIG])) def test_break_rdf_device_pair_session_sync( self, mck_get_rep, mck_get_rdf, mck_get_sg, mck_get_sg_state, mck_wait, mck_suspend, mck_delete_rdf_pair, mck_remove, mck_get_vols, mck_delete): array = self.data.array device_id = self.data.device_id volume_name = self.data.test_volume.name extra_specs = deepcopy(self.data.ex_specs_rep_config) extra_specs[utils.REP_MODE] = utils.REP_SYNC extra_specs[utils.REP_CONFIG]['mode'] = utils.REP_SYNC volume = self.data.test_volume rep_extra_specs, resume_rdf = ( self.common.break_rdf_device_pair_session( array, device_id, volume_name, extra_specs, volume)) extra_specs[utils.REP_CONFIG][utils.FORCE_VOL_EDIT] = True extra_specs[utils.REP_CONFIG]['mgmt_sg_name'] = ( self.data.default_sg_no_slo_re_enabled) self.assertEqual(extra_specs[utils.REP_CONFIG], rep_extra_specs) self.assertTrue(resume_rdf) @mock.patch.object(masking.PowerMaxMasking, 'remove_volume_from_sg') @mock.patch.object(rest.PowerMaxRest, 'srdf_delete_device_pair') @mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) def test_break_rdf_device_pair_session_promotion_metro( self, mck_get, mck_del, mck_rem): array = self.data.array device_id = self.data.device_id volume_name = self.data.test_rep_volume.name extra_specs = self.data.ex_specs_rep_config rep_config = extra_specs[utils.REP_CONFIG] mgmt_group = self.data.rdf_managed_async_grp rdfg_no = extra_specs['rdf_group_no'] self.common.break_rdf_device_pair_session_promotion( array, device_id, volume_name, extra_specs) mck_get.assert_called_once_with(rep_config) mck_del.assert_called_once_with(array, rdfg_no, device_id) mck_rem.assert_called_once_with( array, device_id, volume_name, mgmt_group, extra_specs) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_group', return_value=tpd.PowerMaxData.rdf_group_details) @mock.patch.object( provision.PowerMaxProvision, 'verify_slo_workload', return_value=(True, True)) @mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) @mock.patch.object(common.PowerMaxCommon, '_validate_management_group_volume_consistency', return_value=True) @mock.patch.object(common.PowerMaxCommon, '_validate_storage_group_rdf_states', side_effect=[True, True]) @mock.patch.object(common.PowerMaxCommon, '_validate_rdf_group_storage_group_exclusivity', side_effect=[True, True]) @mock.patch.object(common.PowerMaxCommon, '_validate_storage_group_is_replication_enabled', side_effect=[True, True]) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', return_value=tpd.PowerMaxData.sg_details[0]) def test_validate_rdfg_status_success( self, mck_get, mck_is_rep, mck_is_excl, mck_states, mck_cons, mck_mgrp_name, mck_slo, mck_rdf): array = self.data.array extra_specs = deepcopy(self.data.rep_extra_specs6) extra_specs[utils.REP_MODE] = utils.REP_ASYNC extra_specs[utils.REP_CONFIG] = self.data.rep_config_async management_sg_name = self.data.rdf_managed_async_grp rdfg = self.data.rdf_group_no_2 mode = utils.REP_ASYNC self.common._validate_rdfg_status(array, extra_specs) self.assertEqual(2, mck_get.call_count) self.assertEqual(2, mck_is_rep.call_count) self.assertEqual(2, mck_is_excl.call_count) self.assertEqual(2, mck_states.call_count) self.assertEqual(1, mck_cons.call_count) self.assertEqual(1, mck_mgrp_name.call_count) self.assertEqual(3, mck_rdf.call_count) mck_is_rep.assert_called_with(array, management_sg_name) mck_is_excl.assert_called_with(array, management_sg_name) mck_states.assert_called_with(array, management_sg_name, rdfg, mode) mck_cons.assert_called_with(array, management_sg_name, rdfg) @mock.patch.object( provision.PowerMaxProvision, 'verify_slo_workload', return_value=(True, True)) @mock.patch.object(common.PowerMaxCommon, '_validate_storage_group_rdf_states', return_value=False) @mock.patch.object(common.PowerMaxCommon, '_validate_rdf_group_storage_group_exclusivity', return_value=True) @mock.patch.object(common.PowerMaxCommon, '_validate_storage_group_is_replication_enabled', return_value=True) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', return_value=tpd.PowerMaxData.sg_details[0]) def test_validate_rdfg_status_failure_default_sg( self, mck_get, mck_is_rep, mck_is_excl, mck_states, mck_slo): array = self.data.array extra_specs = deepcopy(self.data.rep_extra_specs6) extra_specs[utils.REP_MODE] = utils.REP_ASYNC extra_specs[utils.REP_CONFIG] = self.data.rep_config_async rdfg = self.data.rdf_group_no_2 mode = utils.REP_ASYNC disable_compression = self.utils.is_compression_disabled(extra_specs) storage_group = self.utils.get_default_storage_group_name( extra_specs['srp'], extra_specs['slo'], extra_specs['workload'], disable_compression, True, extra_specs['rep_mode']) self.assertRaises(exception.VolumeBackendAPIException, self.common._validate_rdfg_status, array, extra_specs) self.assertEqual(1, mck_get.call_count) self.assertEqual(1, mck_is_rep.call_count) self.assertEqual(1, mck_is_excl.call_count) self.assertEqual(1, mck_states.call_count) mck_is_rep.assert_called_with(array, storage_group) mck_is_excl.assert_called_with(array, storage_group) mck_states.assert_called_with(array, storage_group, rdfg, mode) @mock.patch.object( provision.PowerMaxProvision, 'verify_slo_workload', return_value=(True, True)) @mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) @mock.patch.object(common.PowerMaxCommon, '_validate_management_group_volume_consistency', return_value=False) @mock.patch.object(common.PowerMaxCommon, '_validate_storage_group_rdf_states', side_effect=[True, True]) @mock.patch.object(common.PowerMaxCommon, '_validate_rdf_group_storage_group_exclusivity', side_effect=[True, True]) @mock.patch.object(common.PowerMaxCommon, '_validate_storage_group_is_replication_enabled', side_effect=[True, True]) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', return_value=tpd.PowerMaxData.sg_details[0]) def test_validate_rdfg_status_failure_management_sg( self, mck_get, mck_is_rep, mck_is_excl, mck_states, mck_cons, mck_mgrp_name, mck_slo): array = self.data.array extra_specs = deepcopy(self.data.rep_extra_specs6) extra_specs[utils.REP_MODE] = utils.REP_ASYNC extra_specs[utils.REP_CONFIG] = self.data.rep_config_async management_sg_name = self.data.rdf_managed_async_grp rdfg = self.data.rdf_group_no_2 mode = utils.REP_ASYNC self.assertRaises(exception.VolumeBackendAPIException, self.common._validate_rdfg_status, array, extra_specs) self.assertEqual(2, mck_get.call_count) self.assertEqual(2, mck_is_rep.call_count) self.assertEqual(2, mck_is_excl.call_count) self.assertEqual(2, mck_states.call_count) self.assertEqual(1, mck_cons.call_count) self.assertEqual(1, mck_mgrp_name.call_count) mck_is_rep.assert_called_with(array, management_sg_name) mck_is_excl.assert_called_with(array, management_sg_name) mck_states.assert_called_with(array, management_sg_name, rdfg, mode) mck_cons.assert_called_with(array, management_sg_name, rdfg) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_group', side_effect=(tpd.PowerMaxData.rdf_group_details, tpd.PowerMaxData.rdf_group_details, {'numDevices': '1000'})) @mock.patch.object( provision.PowerMaxProvision, 'verify_slo_workload', return_value=(True, True)) @mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name', return_value=tpd.PowerMaxData.rdf_managed_async_grp) @mock.patch.object(common.PowerMaxCommon, '_validate_management_group_volume_consistency', return_value=True) @mock.patch.object(common.PowerMaxCommon, '_validate_storage_group_rdf_states', side_effect=[True, True]) @mock.patch.object(common.PowerMaxCommon, '_validate_rdf_group_storage_group_exclusivity', side_effect=[True, True]) @mock.patch.object(common.PowerMaxCommon, '_validate_storage_group_is_replication_enabled', side_effect=[True, True]) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group', return_value=tpd.PowerMaxData.sg_details[0]) def test_validate_rdfg_status_failure_device_counts( self, mck_get, mck_is_rep, mck_is_excl, mck_states, mck_cons, mck_mgrp_name, mck_slo, mck_rdf): array = self.data.array extra_specs = deepcopy(self.data.rep_extra_specs6) extra_specs[utils.REP_MODE] = utils.REP_ASYNC extra_specs[utils.REP_CONFIG] = self.data.rep_config_async management_sg_name = self.data.rdf_managed_async_grp rdfg = self.data.rdf_group_no_2 mode = utils.REP_ASYNC self.assertRaises(exception.VolumeDriverException, self.common._validate_rdfg_status, array, extra_specs) self.assertEqual(2, mck_get.call_count) self.assertEqual(2, mck_is_rep.call_count) self.assertEqual(2, mck_is_excl.call_count) self.assertEqual(2, mck_states.call_count) self.assertEqual(1, mck_cons.call_count) self.assertEqual(1, mck_mgrp_name.call_count) self.assertEqual(3, mck_rdf.call_count) mck_is_rep.assert_called_with(array, management_sg_name) mck_is_excl.assert_called_with(array, management_sg_name) mck_states.assert_called_with(array, management_sg_name, rdfg, mode) mck_cons.assert_called_with(array, management_sg_name, rdfg) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rep', return_value={'rdf': True}) def test_validate_storage_group_is_replication_enabled_success( self, mck_get): array = self.data.array storage_group = self.data.storagegroup_name_f is_valid = self.common._validate_storage_group_is_replication_enabled( array, storage_group) self.assertTrue(is_valid) mck_get.assert_called_once_with(array, storage_group) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rep', return_value={'rdf': False}) def test_validate_storage_group_is_replication_enabled_failure( self, mck_get): array = self.data.array storage_group = self.data.storagegroup_name_f is_valid = self.common._validate_storage_group_is_replication_enabled( array, storage_group) self.assertFalse(is_valid) mck_get.assert_called_once_with(array, storage_group) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_SYNC_STATE]) def test_validate_storage_group_rdf_states_success(self, mck_get): array = self.data.array storage_group = self.data.storagegroup_name_f rdf_group_no = self.data.rdf_group_no_1 rep_mode = utils.REP_SYNC is_valid = self.common._validate_storage_group_rdf_states( array, storage_group, rdf_group_no, rep_mode) self.assertTrue(is_valid) mck_get.assert_called_once_with(array, storage_group, rdf_group_no) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_SYNC_STATE, utils.RDF_ACTIVE]) def test_validate_storage_group_rdf_states_multi_async_state_failure( self, mck_get): array = self.data.array storage_group = self.data.storagegroup_name_f rdf_group_no = self.data.rdf_group_no_1 rep_mode = utils.REP_ASYNC is_valid = self.common._validate_storage_group_rdf_states( array, storage_group, rdf_group_no, rep_mode) self.assertFalse(is_valid) mck_get.assert_called_once_with(array, storage_group, rdf_group_no) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=['invalid_state']) def test_validate_storage_group_rdf_states_invalid_state_failure( self, mck_get): array = self.data.array storage_group = self.data.storagegroup_name_f rdf_group_no = self.data.rdf_group_no_1 rep_mode = utils.REP_ASYNC is_valid = self.common._validate_storage_group_rdf_states( array, storage_group, rdf_group_no, rep_mode) self.assertFalse(is_valid) mck_get.assert_called_once_with(array, storage_group, rdf_group_no) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_groups', return_value=[tpd.PowerMaxData.rdf_group_no_1]) def test_validate_rdf_group_storage_group_exclusivity_success( self, mck_get): array = self.data.array storage_group = self.data.storagegroup_name_f is_valid = self.common._validate_rdf_group_storage_group_exclusivity( array, storage_group) self.assertTrue(is_valid) mck_get.assert_called_once_with(array, storage_group) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_groups', return_value=[tpd.PowerMaxData.rdf_group_no_1, tpd.PowerMaxData.rdf_group_no_2]) def test_validate_rdf_group_storage_group_exclusivity_failure( self, mck_get): array = self.data.array storage_group = self.data.storagegroup_name_f is_valid = self.common._validate_rdf_group_storage_group_exclusivity( array, storage_group) self.assertFalse(is_valid) mck_get.assert_called_once_with(array, storage_group) @mock.patch.object(rest.PowerMaxRest, 'get_volumes_in_storage_group', return_value=[tpd.PowerMaxData.device_id]) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_group_volume_list', return_value=[tpd.PowerMaxData.device_id]) def test_validate_management_group_volume_consistency_success( self, mck_rdf, mck_sg): array = self.data.array storage_group = self.data.rdf_managed_async_grp rdf_group = self.data.rdf_group_no_1 is_valid = self.common._validate_management_group_volume_consistency( array, storage_group, rdf_group) self.assertTrue(is_valid) mck_rdf.assert_called_once_with(array, rdf_group) mck_sg.assert_called_once_with(array, storage_group) @mock.patch.object(rest.PowerMaxRest, 'get_volumes_in_storage_group', return_value=[tpd.PowerMaxData.device_id]) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_group_volume_list', return_value=[tpd.PowerMaxData.device_id, tpd.PowerMaxData.device_id2]) def test_validate_management_group_volume_consistency_failure( self, mck_rdf, mck_sg): array = self.data.array storage_group = self.data.rdf_managed_async_grp rdf_group = self.data.rdf_group_no_1 is_valid = self.common._validate_management_group_volume_consistency( array, storage_group, rdf_group) self.assertFalse(is_valid) mck_rdf.assert_called_once_with(array, rdf_group) mck_sg.assert_called_once_with(array, storage_group) @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') def test_cleanup_on_configure_volume_replication_failure_resume( self, mck_resume): resume_rdf = True rdf_pair_created = False remote_sg_get = False add_to_mgmt_sg = False r1_device_id = self.data.device_id r2_device_id = self.data.device_id2 mgmt_sg_name = self.data.rdf_managed_async_grp array = self.data.array remote_array = self.data.remote_array extra_specs = self.data.extra_specs_rep_enabled rep_extra_specs = self.data.rep_extra_specs_mgmt rdf_group_no = rep_extra_specs['rdf_group_no'] volume = self.data.test_volume tgt_sg_name = self.data.storagegroup_name_i self.common._cleanup_on_configure_volume_replication_failure( resume_rdf, rdf_pair_created, remote_sg_get, add_to_mgmt_sg, r1_device_id, r2_device_id, mgmt_sg_name, array, remote_array, rdf_group_no, extra_specs, rep_extra_specs, volume, tgt_sg_name) mck_resume.assert_called_once_with( array, mgmt_sg_name, rdf_group_no, rep_extra_specs) @mock.patch.object(rest.PowerMaxRest, 'delete_storage_group') @mock.patch.object(rest.PowerMaxRest, 'get_volumes_in_storage_group', return_value=[]) @mock.patch.object( masking.PowerMaxMasking, 'remove_vol_from_storage_group') @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object( common.PowerMaxCommon, 'break_rdf_device_pair_session', return_value=(tpd.PowerMaxData.rep_extra_specs_mgmt, True)) @mock.patch.object(utils.PowerMaxUtils, 'get_volume_element_name', return_value=tpd.PowerMaxData.volume_id) def test_cleanup_on_configure_volume_replication_failure_pair_created( self, mck_elem, mck_break, mck_resume, mck_remove, mck_get, mck_del): resume_rdf = True rdf_pair_created = True remote_sg_get = True add_to_mgmt_sg = True r1_device_id = self.data.device_id r2_device_id = self.data.device_id2 mgmt_sg_name = self.data.rdf_managed_async_grp array = self.data.array remote_array = self.data.remote_array extra_specs = self.data.extra_specs_rep_enabled rep_extra_specs = self.data.rep_extra_specs_mgmt rdf_group_no = self.data.rdf_group_no_1 volume = self.data.test_volume tgt_sg_name = self.data.storagegroup_name_i volume_name = self.data.volume_id self.common._cleanup_on_configure_volume_replication_failure( resume_rdf, rdf_pair_created, remote_sg_get, add_to_mgmt_sg, r1_device_id, r2_device_id, mgmt_sg_name, array, remote_array, rdf_group_no, extra_specs, rep_extra_specs, volume, tgt_sg_name) mck_elem.assert_called_once_with(volume.id) mck_break.assert_called_once_with( array, r1_device_id, volume_name, extra_specs, volume) mck_resume.assert_called_once_with( array, mgmt_sg_name, rdf_group_no, rep_extra_specs) mck_remove.assert_called_with( remote_array, r2_device_id, mgmt_sg_name, '', rep_extra_specs) self.assertEqual(2, mck_remove.call_count) mck_get.assert_called_once_with(remote_array, tgt_sg_name) mck_del.assert_called_once_with(remote_array, tgt_sg_name) @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') def test_cleanup_on_break_rdf_device_pair_session_failure_resume( self, mck_resume): rdfg_suspended = True pair_deleted = False r2_sg_remove = False array = self.data.array management_sg = self.data.rdf_managed_async_grp extra_specs = self.data.extra_specs_rep_enabled rep_extra_specs = self.data.rep_extra_specs rdf_group_no = rep_extra_specs['rdf_group_no'] r2_sg_names = [self.data.storagegroup_name_i] device_id = self.data.device_id remote_array = self.data.remote_array remote_device_id = self.data.device_id2 volume = self.data.test_volume volume_name = self.data.volume_id self.common._cleanup_on_break_rdf_device_pair_session_failure( rdfg_suspended, pair_deleted, r2_sg_remove, array, management_sg, rdf_group_no, extra_specs, r2_sg_names, device_id, remote_array, remote_device_id, volume, volume_name, rep_extra_specs) mck_resume.assert_called_once_with( array, management_sg, rdf_group_no, extra_specs) @mock.patch.object(rest.PowerMaxRest, 'srdf_resume_replication') @mock.patch.object(common.PowerMaxCommon, '_protect_storage_group') @mock.patch.object(utils.PowerMaxUtils, 'get_volume_element_name', return_value=tpd.PowerMaxData.volume_id) @mock.patch.object( common.PowerMaxCommon, 'configure_volume_replication', return_value=('first_vol_in_rdf_group', True, True, tpd.PowerMaxData.rep_extra_specs_mgmt, True)) @mock.patch.object(common.PowerMaxCommon, '_delete_from_srp') @mock.patch.object(masking.PowerMaxMasking, 'remove_volume_from_sg') def test_cleanup_on_break_rdf_device_pair_session_failure_pair_created( self, mck_remove, mck_delete, mck_configure, mck_elem, mck_protect, mck_resume): rdfg_suspended = True pair_deleted = True r2_sg_remove = False array = self.data.array management_sg = self.data.rdf_managed_async_grp extra_specs = self.data.extra_specs_rep_enabled rep_extra_specs = self.data.rep_extra_specs_mgmt rdf_group_no = rep_extra_specs['rdf_group_no'] r2_sg_names = [self.data.storagegroup_name_i] device_id = self.data.device_id remote_array = self.data.remote_array remote_device_id = self.data.device_id2 volume = self.data.test_volume volume_name = self.data.volume_id self.common._cleanup_on_break_rdf_device_pair_session_failure( rdfg_suspended, pair_deleted, r2_sg_remove, array, management_sg, rdf_group_no, extra_specs, r2_sg_names, device_id, remote_array, remote_device_id, volume, volume_name, rep_extra_specs) mck_remove.assert_called_once_with( remote_array, remote_device_id, volume_name, r2_sg_names[0], rep_extra_specs) mck_delete.assert_called_once_with( remote_array, remote_device_id, volume_name, extra_specs) mck_configure.assert_called_once_with( array, volume, device_id, extra_specs) mck_elem.assert_called_once_with(volume.id) mck_protect.assert_called_once_with( array, device_id, volume, volume_name, rep_extra_specs) mck_resume.assert_called_once_with( array, management_sg, rdf_group_no, rep_extra_specs) @mock.patch.object(common.PowerMaxCommon, '_add_replicated_volumes_to_default_storage_group') @mock.patch.object(common.PowerMaxCommon, '_replicate_group') @mock.patch.object(provision.PowerMaxProvision, 'link_and_break_replica') @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_snap_id_list', return_value=[tpd.PowerMaxData.snap_id]) @mock.patch.object(common.PowerMaxCommon, 'get_volume_metadata') @mock.patch.object(common.PowerMaxCommon, '_create_non_replicated_volume') @mock.patch.object(utils.PowerMaxUtils, 'get_volume_group_utils', return_value=(None, {'interval': 1, 'retries': 1})) def test_create_group_from_src_replication( self, mock_grp_utils, mock_create, mock_metadata, mock_snap, mock_link, mock_rep, mock_add): context = None group_snapshot = self.data.test_group_snapshot_1 snapshots = [] source_vols = [self.data.test_volume] volumes = [self.data.test_volume] source_group = self.data.test_group_1 with mock.patch.object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True), mock.patch.object( volume_utils, 'is_group_a_type', return_value=True): self.common.create_group_from_src( context, self.data.test_rep_group2, volumes, group_snapshot, snapshots, source_group, source_vols) mock_create.assert_called_once() mock_link.assert_called_once() mock_metadata.assert_called_once() mock_rep.assert_called_once() mock_add.assert_called_once() @mock.patch.object(masking.PowerMaxMasking, 'add_volumes_to_storage_group') def test_add_replicated_volumes_to_default_storage_group(self, mock_add): volumes_model_update = [{'provider_location': str( self.data.provider_location), 'replication_driver_data': str( {'array': self.data.remote_array, 'device_id': self.data.device_id2})}] rep_extra_specs = deepcopy(self.data.rep_extra_specs) self.common._add_replicated_volumes_to_default_storage_group( self.data.array, volumes_model_update, rep_extra_specs) mock_add.assert_has_calls( [call(self.data.array, [self.data.device_id], 'OS-SRP_1-Diamond-DSS-RE-SG', rep_extra_specs), call(self.data.remote_array, [self.data.device_id2], 'OS-SRP_1-Diamond-DSS-RE-SG', rep_extra_specs)]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py0000664000175000017500000036467600000000000031405 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import time from unittest import mock from unittest.mock import call import requests from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import fc from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume.drivers.dell_emc.powermax import utils from cinder.volume import volume_utils class PowerMaxRestTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() super(PowerMaxRestTest, self).setUp() self.mock_object(volume_utils, 'get_max_over_subscription_ratio') configuration = tpfo.FakeConfiguration( None, 'RestTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_i]) self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = fc.PowerMaxFCDriver(configuration=configuration) self.driver = driver self.common = self.driver.common self.rest = self.common.rest self.rest.is_snap_id = True self.rest.u4p_version = rest.U4P_100_VERSION self.utils = self.common.utils def test_rest_request_no_response(self): with mock.patch.object(self.rest.session, 'request', return_value=tpfo.FakeResponse(None, None)): sc, msg = self.rest.request('TIMEOUT', '/fake_url') self.assertIsNone(sc) self.assertIsNone(msg) def test_rest_request_timeout_exception(self): self.assertRaises(requests.exceptions.Timeout, self.rest.request, '', 'TIMEOUT') def test_rest_request_read_timeout_exception(self): self.assertRaises(requests.exceptions.ReadTimeout, self.rest.request, '', 'READTIMEOUT', (60, 60)) def test_rest_request_connect_timeout_exception(self): self.assertRaises(requests.exceptions.ConnectTimeout, self.rest.request, '', 'CONNECTTIMEOUT', (60, 60)) def test_rest_request_connection_exception(self): self.assertRaises(requests.exceptions.ConnectionError, self.rest.request, '', 'CONNECTION') def test_rest_request_http_exception(self): self.assertRaises(requests.exceptions.HTTPError, self.rest.request, '', 'HTTP') def test_rest_request_ssl_exception(self): self.assertRaises(requests.exceptions.SSLError, self.rest.request, '', 'SSL') def test_rest_request_undefined_exception(self): self.assertRaises(exception.VolumeBackendAPIException, self.rest.request, '', 'EXCEPTION') def test_rest_request_handle_failover(self): response = tpfo.FakeResponse(200, 'Success') with mock.patch.object(self.rest, '_handle_u4p_failover')as mock_fail: with mock.patch.object(self.rest.session, 'request', side_effect=[requests.ConnectionError, response]): self.mock_object(self.rest, 'u4p_failover_enabled', True) self.rest.request('/fake_uri', 'GET') mock_fail.assert_called_once() @mock.patch.object(time, 'sleep') def test_rest_request_failover_escape(self, mck_sleep): self.mock_object(self.rest, 'u4p_failover_lock', True) response = tpfo.FakeResponse(200, 'Success') with mock.patch.object(self.rest, '_handle_u4p_failover')as mock_fail: with mock.patch.object(self.rest.session, 'request', side_effect=[requests.ConnectionError, response]): self.mock_object(self.rest, 'u4p_failover_enabled', True) self.rest.request('/fake_uri', 'GET') mock_fail.assert_called_once() def test_wait_for_job_complete(self): rc, job, status, task = self.rest.wait_for_job_complete( {'status': 'created', 'jobId': '12345'}, self.data.extra_specs) self.assertEqual(0, rc) def test_wait_for_job_complete_failed(self): with mock.patch.object(self.rest, '_is_job_finished', side_effect=exception.BadHTTPResponseStatus): self.assertRaises(exception.VolumeBackendAPIException, self.rest.wait_for_job_complete, self.data.job_list[0], self.data.extra_specs) def test_is_job_finished_false(self): job_id = '55555' complete, response, rc, status, task = self.rest._is_job_finished( job_id) self.assertFalse(complete) def test_is_job_finished_failed(self): job_id = '55555' complete, response, rc, status, task = self.rest._is_job_finished( job_id) self.assertFalse(complete) with mock.patch.object(self.rest, 'request', return_value=(200, {'status': 'FAILED'})): complete, response, rc, status, task = self.rest._is_job_finished( job_id) self.assertTrue(complete) self.assertEqual(-1, rc) def test_check_status_code_success(self): status_code = 200 self.rest.check_status_code_success('test success', status_code, "") def test_check_status_code_not_success(self): status_code = 500 self.assertRaises(exception.VolumeBackendAPIException, self.rest.check_status_code_success, 'test exception', status_code, "") def test_wait_for_job_success(self): operation = 'test' status_code = 202 job = self.data.job_list[0] extra_specs = self.data.extra_specs self.rest.wait_for_job(operation, status_code, job, extra_specs) def test_wait_for_job_failed(self): operation = 'test' status_code = 202 job = self.data.job_list[2] extra_specs = self.data.extra_specs with mock.patch.object(self.rest, 'wait_for_job_complete', return_value=(-1, '', '', '')): self.assertRaises(exception.VolumeBackendAPIException, self.rest.wait_for_job, operation, status_code, job, extra_specs) def test_get_resource_present(self): array = self.data.array category = 'sloprovisioning' resource_type = 'storagegroup' resource = self.rest.get_resource(array, category, resource_type) self.assertEqual(self.data.sg_list, resource) def test_get_resource_not_present(self): array = self.data.array category = 'sloprovisioning' resource_type = self.data.failed_resource resource = self.rest.get_resource(array, category, resource_type) self.assertIsNone(resource) def test_create_resource_success(self): array = self.data.array category = '' resource_type = '' payload = {'someKey': 'someValue'} status_code, message = self.rest.create_resource( array, category, resource_type, payload) self.assertEqual(self.data.job_list[0], message) def test_create_resource_failed(self): array = self.data.array category = '' resource_type = '' payload = {'someKey': self.data.failed_resource} self.assertRaises(exception.VolumeBackendAPIException, self.rest.create_resource, array, category, resource_type, payload) def test_modify_resource(self): array = self.data.array category = '' resource_type = '' payload = {'someKey': 'someValue'} status_code, message = self.rest.modify_resource( array, category, resource_type, payload) self.assertEqual(self.data.job_list[0], message) def test_modify_resource_failed(self): array = self.data.array category = '' resource_type = '' payload = {'someKey': self.data.failed_resource} self.assertRaises(exception.VolumeBackendAPIException, self.rest.modify_resource, array, category, resource_type, payload) def test_delete_resource(self): operation = 'delete res resource' status_code = 204 message = None array = self.data.array category = 'cat' resource_type = 'res' resource_name = 'name' with mock.patch.object(self.rest, 'check_status_code_success'): self.rest.delete_resource( array, category, resource_type, resource_name) self.rest.check_status_code_success.assert_called_with( operation, status_code, message) def test_delete_resource_failed(self): array = self.data.array category = self.data.failed_resource resource_type = self.data.failed_resource resource_name = self.data.failed_resource self.assertRaises(exception.VolumeBackendAPIException, self.rest.modify_resource, array, category, resource_type, resource_name) def test_get_arrays_list(self): ret_val = {'symmetrixId': tpd.PowerMaxData.array} with mock.patch.object(self.rest, 'get_request', return_value=ret_val): ref_details = self.data.array array_details = self.rest.get_arrays_list() self.assertEqual(ref_details, array_details) def test_get_arrays_list_failed(self): with mock.patch.object(self.rest, 'get_request', return_value=dict()): array_details = self.rest.get_arrays_list() self.assertEqual(list(), array_details) def test_get_array_detail(self): ref_details = self.data.symmetrix[0] array_details = self.rest.get_array_detail(self.data.array) self.assertEqual(ref_details, array_details) def test_get_array_detail_failed(self): array_details = self.rest.get_array_detail(self.data.failed_resource) self.assertIsNone(array_details) def test_get_uni_version_success(self): ret_val = (200, tpd.PowerMaxData.version_details) current_major_version = tpd.PowerMaxData.u4p_version with mock.patch.object(self.rest, 'request', return_value=ret_val): version, major_version = self.rest.get_uni_version() self.assertIsNotNone(version) self.assertIsNotNone(major_version) self.assertEqual(major_version, current_major_version) def test_get_uni_version_failed(self): ret_val = (500, '') with mock.patch.object(self.rest, 'request', return_value=ret_val): version, major_version = self.rest.get_uni_version() self.assertIsNone(version) self.assertIsNone(major_version) def test_get_srp_by_name(self): ref_details = self.data.srp_details srp_details = self.rest.get_srp_by_name( self.data.array, self.data.srp) self.assertEqual(ref_details, srp_details) def test_get_slo_list_powermax(self): ref_settings = self.data.powermax_slo_details['sloId'] slo_settings = self.rest.get_slo_list( self.data.array, True, 'PowerMax 2000') self.assertEqual(ref_settings, slo_settings) def test_get_slo_list_vmax(self): ref_settings = ['Diamond'] with mock.patch.object(self.rest, 'get_resource', return_value=self.data.vmax_slo_details): slo_settings = self.rest.get_slo_list( self.data.array, False, 'VMAX250F') self.assertEqual(ref_settings, slo_settings) def test_get_workload_settings(self): ref_settings = self.data.workloadtype['workloadId'] wl_settings = self.rest.get_workload_settings( self.data.array, False) self.assertEqual(ref_settings, wl_settings) def test_get_workload_settings_next_gen(self): wl_settings = self.rest.get_workload_settings( self.data.array_herc, True) self.assertEqual(['None'], wl_settings) def test_get_workload_settings_failed(self): wl_settings = self.rest.get_workload_settings( self.data.failed_resource, False) self.assertEqual([], wl_settings) def test_is_compression_capable_true(self): compr_capable = self.rest.is_compression_capable('000197800128') self.assertTrue(compr_capable) def test_is_compression_capable_false(self): compr_capable = self.rest.is_compression_capable(self.data.array) self.assertFalse(compr_capable) with mock.patch.object(self.rest, 'request', return_value=(200, {})): compr_capable = self.rest.is_compression_capable(self.data.array) self.assertFalse(compr_capable) def test_get_storage_group(self): ref_details = self.data.sg_details[0] sg_details = self.rest.get_storage_group( self.data.array, self.data.defaultstoragegroup_name) self.assertEqual(ref_details, sg_details) def test_get_storage_group_list(self): sg_list = self.rest.get_storage_group_list(self.data.array) self.assertEqual(self.data.sg_list, sg_list) def test_create_storage_group(self): with mock.patch.object(self.rest, 'create_resource') as mock_create: payload = {'someKey': 'someValue'} self.rest._create_storagegroup(self.data.array, payload) mock_create.assert_called_once_with( self.data.array, 'sloprovisioning', 'storagegroup', payload) def test_create_storage_group_success(self): sg_name = self.rest.create_storage_group( self.data.array, self.data.storagegroup_name_f, self.data.srp, self.data.slo, self.data.workload, self.data.extra_specs) self.assertEqual(self.data.storagegroup_name_f, sg_name) def test_create_storage_group_next_gen(self): with mock.patch.object(self.rest, 'is_next_gen_array', return_value=True): with mock.patch.object( self.rest, '_create_storagegroup', return_value=(200, self.data.job_list[0])) as mock_sg: self.rest.create_storage_group( self.data.array, self.data.storagegroup_name_f, self.data.srp, self.data.slo, self.data.workload, self.data.extra_specs) payload = {'srpId': self.data.srp, 'storageGroupId': self.data.storagegroup_name_f, 'emulation': 'FBA', 'sloBasedStorageGroupParam': [ {'sloId': self.data.slo, 'workloadSelection': 'NONE', 'volumeAttributes': [{ 'volume_size': '0', 'capacityUnit': 'GB', 'num_of_vols': 0}]}]} mock_sg.assert_called_once_with(self.data.array, payload) def test_create_storage_group_failed(self): self.assertRaises( exception.VolumeBackendAPIException, self.rest.create_storage_group, self.data.array, self.data.failed_resource, self.data.srp, self.data.slo, self.data.workload, self.data.extra_specs) def test_create_storage_group_no_slo(self): sg_name = self.rest.create_storage_group( self.data.array, self.data.default_sg_no_slo, self.data.srp, None, None, self.data.extra_specs) self.assertEqual(self.data.default_sg_no_slo, sg_name) def test_create_storage_group_compression_disabled(self): with mock.patch.object( self.rest, '_create_storagegroup', return_value=(200, self.data.job_list[0]))as mock_sg: self.rest.create_storage_group( self.data.array, self.data.default_sg_compr_disabled, self.data.srp, self.data.slo, self.data.workload, self.data.extra_specs, True) payload = {'srpId': self.data.srp, 'storageGroupId': self.data.default_sg_compr_disabled, 'emulation': 'FBA', 'sloBasedStorageGroupParam': [ {'sloId': self.data.slo, 'workloadSelection': self.data.workload, 'volumeAttributes': [{ 'volume_size': '0', 'capacityUnit': 'GB', 'num_of_vols': 0}], 'noCompression': 'true'}]} mock_sg.assert_called_once_with(self.data.array, payload) def test_modify_storage_group(self): array = self.data.array storagegroup = self.data.defaultstoragegroup_name return_message = self.data.add_volume_sg_info_dict payload = ( {"executionOption": "ASYNCHRONOUS", "editStorageGroupActionParam": { "expandStorageGroupParam": { "addVolumeParam": { "emulation": "FBA", "create_new_volumes": "False", "volumeAttributes": [ { "num_of_vols": 1, "volumeIdentifier": { "identifier_name": "os-123-456", "volumeIdentifierChoice": "identifier_name" }, "volume_size": 1, "capacityUnit": "GB"}]}}}}) endpoint_version = self.data.u4p_100_endpoint with mock.patch.object(self.rest, 'modify_resource', return_value=(200, return_message)) as mock_modify: status_code, message = self.rest.modify_storage_group( array, storagegroup, payload) mock_modify.assert_called_once_with( self.data.array, 'sloprovisioning', 'storagegroup', payload, endpoint_version, resource_name=storagegroup) self.assertEqual(1, mock_modify.call_count) self.assertEqual(200, status_code) self.assertEqual(return_message, message) def test_create_volume_from_sg_success(self): volume_name = self.data.volume_details[0]['volume_identifier'] ref_dict = self.data.provider_location volume_dict = self.rest.create_volume_from_sg( self.data.array, volume_name, self.data.defaultstoragegroup_name, self.data.test_volume.size, self.data.extra_specs) self.assertEqual(ref_dict, volume_dict) @mock.patch.object(rest.PowerMaxRest, 'get_volume') @mock.patch.object(rest.PowerMaxRest, 'wait_for_job', return_value=tpd.PowerMaxData.vol_create_task) def test_create_volume_from_sg_existing_volume_success( self, mock_task, mock_get): volume_name = self.data.volume_details[0]['volume_identifier'] self.rest.create_volume_from_sg( self.data.array, volume_name, self.data.defaultstoragegroup_name, self.data.test_volume.size, self.data.extra_specs) mock_get.assert_called_with(self.data.array, self.data.device_id) def test_create_volume_from_sg_failed(self): volume_name = self.data.volume_details[0]['volume_identifier'] self.assertRaises( exception.VolumeBackendAPIException, self.rest.create_volume_from_sg, self.data.array, volume_name, self.data.failed_resource, self.data.test_volume.size, self.data.extra_specs) def test_create_volume_from_sg_cannot_retrieve_device_id(self): with mock.patch.object(self.rest, 'find_volume_device_id', return_value=None): volume_name = self.data.volume_details[0]['volume_identifier'] self.assertRaises( exception.VolumeBackendAPIException, self.rest.create_volume_from_sg, self.data.array, volume_name, self.data.failed_resource, self.data.test_volume.size, self.data.extra_specs) @mock.patch.object(rest.PowerMaxRest, 'rename_volume') @mock.patch.object(rest.PowerMaxRest, 'get_volume_list', return_value=['00001', '00002', '00003', '00004']) @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') @mock.patch.object(rest.PowerMaxRest, 'modify_storage_group', return_value=(200, 'job')) def test_create_volume_from_sg_rep_info( self, mck_modify, mck_wait, mck_get_vol, mck_rename): volume_name = self.data.volume_details[0]['volume_identifier'] sg_name = self.data.defaultstoragegroup_name rep_info = self.data.rep_info_dict rep_info['initial_device_list'] = ['00001', '00002', '00003'] ref_payload = self.data.create_vol_with_replication_payload ref_volume_dict = {utils.ARRAY: self.data.array, utils.DEVICE_ID: '00004'} volume_dict = self.rest.create_volume_from_sg( self.data.array, volume_name, sg_name, self.data.test_volume.size, self.data.extra_specs, rep_info) mck_modify.assert_called_once_with( self.data.array, self.data.defaultstoragegroup_name, ref_payload) self.assertEqual(ref_volume_dict, volume_dict) @mock.patch.object(rest.PowerMaxRest, 'get_volume_list', return_value=['00001', '00002', '00003', '00004']) @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') @mock.patch.object(rest.PowerMaxRest, 'modify_storage_group', return_value=(200, 'job')) def test_create_volume_from_sg_rep_info_vol_cnt_exception( self, mck_modify, mck_wait, mck_get_vol): volume_name = self.data.volume_details[0]['volume_identifier'] sg_name = self.data.defaultstoragegroup_name rep_info = self.data.rep_info_dict rep_info['initial_device_list'] = ['00001', '00002'] self.assertRaises(exception.VolumeBackendAPIException, self.rest.create_volume_from_sg, self.data.array, volume_name, sg_name, self.data.test_volume.size, self.data.extra_specs, rep_info) def test_add_vol_to_sg_success(self): operation = 'Add volume to sg' status_code = 202 message = self.data.job_list[0] with mock.patch.object(self.rest, 'wait_for_job') as mock_wait: device_id = self.data.device_id self.rest.add_vol_to_sg( self.data.array, self.data.storagegroup_name_f, device_id, self.data.extra_specs) mock_wait.assert_called_with( operation, status_code, message, self.data.extra_specs) def test_add_vol_to_sg_failed(self): device_id = [self.data.device_id] self.assertRaises(exception.VolumeBackendAPIException, self.rest.add_vol_to_sg, self.data.array, self.data.failed_resource, device_id, self.data.extra_specs) def test_remove_vol_from_sg_success(self): operation = 'Remove vol from sg' status_code = 202 message = self.data.job_list[0] with mock.patch.object(self.rest, 'wait_for_job') as mock_wait: device_id = self.data.device_id self.rest.remove_vol_from_sg( self.data.array, self.data.storagegroup_name_f, device_id, self.data.extra_specs) mock_wait.assert_called_with( operation, status_code, message, self.data.extra_specs) @mock.patch.object(time, 'sleep') def test_remove_vol_from_sg_failed(self, mock_sleep): device_id = [self.data.volume_details[0]['volumeId']] self.assertRaises(exception.VolumeBackendAPIException, self.rest.remove_vol_from_sg, self.data.array, self.data.failed_resource, device_id, self.data.extra_specs) @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') def test_remove_vol_from_sg_force_true(self, mck_wait): device_id = self.data.device_id extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.FORCE_VOL_EDIT] = True expected_payload = ( {"executionOption": "ASYNCHRONOUS", "editStorageGroupActionParam": { "removeVolumeParam": { "volumeId": [device_id], "remoteSymmSGInfoParam": { "force": "true"}}}}) with mock.patch.object( self.rest, 'modify_storage_group', return_value=( 200, tpd.PowerMaxData.job_list)) as mck_mod: self.rest.remove_vol_from_sg( self.data.array, self.data.storagegroup_name_f, device_id, extra_specs) mck_mod.assert_called_with( self.data.array, self.data.storagegroup_name_f, expected_payload) @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') def test_remove_vol_from_sg_force_false(self, mck_wait): device_id = self.data.device_id extra_specs = deepcopy(self.data.extra_specs) extra_specs.pop(utils.FORCE_VOL_EDIT, None) expected_payload = ( {"executionOption": "ASYNCHRONOUS", "editStorageGroupActionParam": { "removeVolumeParam": { "volumeId": [device_id], "remoteSymmSGInfoParam": { "force": "false"}}}}) with mock.patch.object( self.rest, 'modify_storage_group', return_value=( 200, tpd.PowerMaxData.job_list)) as mck_mod: self.rest.remove_vol_from_sg( self.data.array, self.data.storagegroup_name_f, device_id, extra_specs) mck_mod.assert_called_with( self.data.array, self.data.storagegroup_name_f, expected_payload) def test_get_vmax_default_storage_group(self): ref_storage_group = self.data.sg_details[0] ref_sg_name = self.data.defaultstoragegroup_name storagegroup, storagegroup_name = ( self.rest.get_vmax_default_storage_group( self.data.array, self.data.srp, self.data.slo, self.data.workload)) self.assertEqual(ref_sg_name, storagegroup_name) self.assertEqual(ref_storage_group, storagegroup) def test_get_vmax_default_storage_group_next_gen(self): with mock.patch.object(self.rest, 'is_next_gen_array', return_value=True): __, storagegroup_name = self.rest.get_vmax_default_storage_group( self.data.array, self.data.srp, self.data.slo, self.data.workload) self.assertEqual('OS-SRP_1-Diamond-NONE-SG', storagegroup_name) def test_delete_storage_group(self): operation = 'delete storagegroup resource' status_code = 204 message = None with mock.patch.object( self.rest, 'check_status_code_success') as mock_check: self.rest.delete_storage_group( self.data.array, self.data.storagegroup_name_f) mock_check.assert_called_with(operation, status_code, message) def test_is_child_sg_in_parent_sg(self): is_child1 = self.rest.is_child_sg_in_parent_sg( self.data.array, self.data.storagegroup_name_f, self.data.parent_sg_f) is_child2 = self.rest.is_child_sg_in_parent_sg( self.data.array, self.data.defaultstoragegroup_name, self.data.parent_sg_f) self.assertTrue(is_child1) self.assertFalse(is_child2) def test_is_child_sg_in_parent_sg_case_not_matching(self): lower_case_host = 'OS-hostx-SRP_1-DiamondDSS-os-fibre-PG' is_child1 = self.rest.is_child_sg_in_parent_sg( self.data.array, lower_case_host, self.data.parent_sg_f) self.assertTrue(is_child1) def test_is_child_sg_in_parent_sg_spelling_mistake(self): lower_case_host = 'OS-hosty-SRP_1-DiamondDSS-os-fiber-PG' is_child1 = self.rest.is_child_sg_in_parent_sg( self.data.array, lower_case_host, self.data.parent_sg_f) self.assertFalse(is_child1) def test_add_child_sg_to_parent_sg(self): payload = {'editStorageGroupActionParam': { 'expandStorageGroupParam': { 'addExistingStorageGroupParam': { 'storageGroupId': [self.data.storagegroup_name_f]}}}} with mock.patch.object( self.rest, 'modify_storage_group', return_value=(202, self.data.job_list[0])) as mck_mod_sg: self.rest.add_child_sg_to_parent_sg( self.data.array, self.data.storagegroup_name_f, self.data.parent_sg_f, self.data.extra_specs) mck_mod_sg.assert_called_once_with( self.data.array, self.data.parent_sg_f, payload) def test_remove_child_sg_from_parent_sg(self): payload = {'editStorageGroupActionParam': { 'removeStorageGroupParam': { 'storageGroupId': [self.data.storagegroup_name_f], 'force': 'true'}}} with mock.patch.object( self.rest, 'modify_storage_group', return_value=(202, self.data.job_list[0])) as mock_modify: self.rest.remove_child_sg_from_parent_sg( self.data.array, self.data.storagegroup_name_f, self.data.parent_sg_f, self.data.extra_specs) mock_modify.assert_called_once_with( self.data.array, self.data.parent_sg_f, payload) def test_get_volume_list(self): ref_volumes = [self.data.device_id, self.data.device_id2] volumes = self.rest.get_volume_list(self.data.array, {}) self.assertEqual(ref_volumes, volumes) def test_get_volume(self): ref_volumes = self.data.volume_details[0] device_id = self.data.device_id volumes = self.rest.get_volume(self.data.array, device_id) self.assertEqual(ref_volumes, volumes) def test_get_private_volume(self): device_id = self.data.device_id ref_volume = self.data.private_vol_details['resultList']['result'][0] volume = self.rest._get_private_volume(self.data.array, device_id) self.assertEqual(ref_volume, volume) def test_get_private_volume_exception(self): device_id = self.data.device_id with mock.patch.object(self.rest, 'get_resource', return_value={}): self.assertRaises(exception.VolumeBackendAPIException, self.rest._get_private_volume, self.data.array, device_id) def test_modify_volume_success(self): array = self.data.array device_id = self.data.device_id payload = {'someKey': 'someValue'} with mock.patch.object(self.rest, 'modify_resource') as mock_modify: self.rest._modify_volume(array, device_id, payload) mock_modify.assert_called_once_with( self.data.array, 'sloprovisioning', 'volume', payload, resource_name=device_id) def test_modify_volume_failed(self): payload = {'someKey': self.data.failed_resource} device_id = self.data.device_id self.assertRaises( exception.VolumeBackendAPIException, self.rest._modify_volume, self.data.array, device_id, payload) @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') def test_extend_volume(self, mck_wait): array = self.data.array device_id = self.data.device_id new_size = '3' extra_specs = self.data.extra_specs rdfg_num = self.data.rdf_group_no_1 extend_vol_payload = {'executionOption': 'ASYNCHRONOUS', 'editVolumeActionParam': { 'expandVolumeParam': { 'volumeAttribute': { 'volume_size': new_size, 'capacityUnit': 'GB'}, 'rdfGroupNumber': rdfg_num}}} with mock.patch.object( self.rest, '_modify_volume', return_value=(202, self.data.job_list[0])) as mck_modify: self.rest.extend_volume(array, device_id, new_size, extra_specs, rdfg_num) mck_modify.assert_called_once_with(array, device_id, extend_vol_payload) @mock.patch.object(rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=[]) def test_legacy_delete_volume(self, mock_sgs): device_id = self.data.device_id vb_except = exception.VolumeBackendAPIException with mock.patch.object(self.rest, 'delete_resource') as mock_delete, ( mock.patch.object( self.rest, '_modify_volume', side_effect=[None, None, None, vb_except])) as mock_modify: for _ in range(0, 2): self.rest.delete_volume(self.data.array, device_id) mod_call_count = mock_modify.call_count self.assertEqual(4, mod_call_count) mock_delete.assert_called_once_with( self.data.array, 'sloprovisioning', 'volume', device_id) @mock.patch.object(rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=[]) def test_delete_volume(self, mock_sgs): device_id = self.data.device_id self.mock_object(self.rest, 'ucode_major_level', utils.UCODE_5978) self.mock_object(self.rest, 'ucode_minor_level', utils.UCODE_5978_HICKORY) with mock.patch.object( self.rest, 'delete_resource') as mock_delete: self.rest.delete_volume(self.data.array, device_id) mock_delete.assert_called_once_with( self.data.array, 'sloprovisioning', 'volume', device_id) @mock.patch.object(rest.PowerMaxRest, 'get_storage_groups_from_volume', return_value=['OS-SG']) def test_delete_volume_in_sg(self, mock_sgs): device_id = self.data.device_id self.mock_object(self.rest, 'ucode_major_level', utils.UCODE_5978) self.mock_object(self.rest, 'ucode_minor_level', utils.UCODE_5978_HICKORY) self.assertRaises( exception.VolumeBackendAPIException, self.rest.delete_volume, self.data.array, device_id) def test_rename_volume(self): device_id = self.data.device_id payload = {'editVolumeActionParam': { 'modifyVolumeIdentifierParam': { 'volumeIdentifier': { 'identifier_name': 'new_name', 'volumeIdentifierChoice': 'identifier_name'}}}} payload2 = {'editVolumeActionParam': {'modifyVolumeIdentifierParam': { 'volumeIdentifier': {'volumeIdentifierChoice': 'none'}}}} with mock.patch.object(self.rest, '_modify_volume') as mock_mod: self.rest.rename_volume(self.data.array, device_id, 'new_name') mock_mod.assert_called_once_with( self.data.array, device_id, payload) mock_mod.reset_mock() self.rest.rename_volume(self.data.array, device_id, None) mock_mod.assert_called_once_with( self.data.array, device_id, payload2) def test_check_volume_device_id(self): element_name = self.utils.get_volume_element_name( self.data.test_volume.id) found_dev_id = self.rest.check_volume_device_id( self.data.array, self.data.device_id, element_name) self.assertEqual(self.data.device_id, found_dev_id) found_dev_id2 = self.rest.check_volume_device_id( self.data.array, self.data.device_id3, element_name) self.assertIsNone(found_dev_id2) def test_check_volume_device_id_host_migration_case(self): element_name = self.utils.get_volume_element_name( self.data.test_clone_volume.id) found_dev_id = self.rest.check_volume_device_id( self.data.array, self.data.device_id, element_name, name_id=self.data.test_clone_volume._name_id) self.assertEqual(self.data.device_id, found_dev_id) def test_check_volume_device_id_legacy_case(self): element_name = self.utils.get_volume_element_name( self.data.test_volume.id) with mock.patch.object(self.rest, 'get_volume', return_value=self.data.volume_details_legacy): found_dev_id = self.rest.check_volume_device_id( self.data.array, self.data.device_id, element_name) self.assertEqual(self.data.device_id, found_dev_id) def test_check_volume_device_id_legacy_case_no_match(self): element_name = self.utils.get_volume_element_name( self.data.test_volume.id) volume_details_no_match = deepcopy(self.data.volume_details_legacy) volume_details_no_match['volume_identifier'] = 'no_match' with mock.patch.object(self.rest, 'get_volume', return_value=volume_details_no_match): found_dev_id = self.rest.check_volume_device_id( self.data.array, self.data.device_id, element_name) self.assertIsNone(found_dev_id) def test_check_volume_device_id_volume_identifier_none(self): element_name = self.utils.get_volume_element_name( self.data.test_volume.id) vol_details_vol_identifier_none = deepcopy( self.data.volume_details_legacy) vol_details_vol_identifier_none['volume_identifier'] = None with mock.patch.object(self.rest, 'get_volume', return_value=vol_details_vol_identifier_none): found_dev_id = self.rest.check_volume_device_id( self.data.array, self.data.device_id, element_name) self.assertIsNone(found_dev_id) def test_find_mv_connections_for_vol(self): device_id = self.data.device_id ref_lun_id = int( (self.data.maskingview[0]['maskingViewConnection'][0][ 'host_lun_address']), 16) host_lun_id = self.rest.find_mv_connections_for_vol( self.data.array, self.data.masking_view_name_f, device_id) self.assertEqual(ref_lun_id, host_lun_id) def test_find_mv_connections_for_vol_missing_host_lun_address(self): with mock.patch.object(self.rest, 'get_resource', return_value=self.data.maskingview_no_lun): self.assertRaises(exception.VolumeBackendAPIException, self.rest.find_mv_connections_for_vol, self.data.array, self.data.masking_view_name_f, self.data.device_id) def test_find_mv_connections_for_vol_failed(self): # no masking view info retrieved device_id = self.data.volume_details[0]['volumeId'] host_lun_id = self.rest.find_mv_connections_for_vol( self.data.array, self.data.failed_resource, device_id) self.assertIsNone(host_lun_id) # no connection info received with mock.patch.object(self.rest, 'get_resource', return_value={'no_conn': 'no_info'}): self.assertRaises(exception.VolumeBackendAPIException, self.rest.find_mv_connections_for_vol, self.data.array, self.data.masking_view_name_f, self.data.device_id) def test_get_storage_groups_from_volume(self): array = self.data.array device_id = self.data.device_id ref_list = self.data.volume_details[0]['storageGroupId'] sg_list = self.rest.get_storage_groups_from_volume(array, device_id) self.assertEqual(ref_list, sg_list) def test_get_num_vols_in_sg(self): num_vol = self.rest.get_num_vols_in_sg( self.data.array, self.data.defaultstoragegroup_name) self.assertEqual(2, num_vol) def test_get_num_vols_in_sg_no_num(self): with mock.patch.object(self.rest, 'get_storage_group', return_value={}): num_vol = self.rest.get_num_vols_in_sg( self.data.array, self.data.defaultstoragegroup_name) self.assertEqual(0, num_vol) def test_is_volume_in_storagegroup(self): # True array = self.data.array device_id = self.data.device_id storagegroup = self.data.defaultstoragegroup_name is_vol1 = self.rest.is_volume_in_storagegroup( array, device_id, storagegroup) # False with mock.patch.object(self.rest, 'get_storage_groups_from_volume', return_value=[]): is_vol2 = self.rest.is_volume_in_storagegroup( array, device_id, storagegroup) self.assertTrue(is_vol1) self.assertFalse(is_vol2) def test_find_volume_device_number(self): array = self.data.array volume_name = self.data.volume_details[0]['volume_identifier'] ref_device = self.data.device_id device_number = self.rest.find_volume_device_id(array, volume_name) self.assertEqual(ref_device, device_number) def test_find_volume_device_number_failed(self): array = self.data.array with mock.patch.object(self.rest, 'get_volume_list', return_value=[]): device_number = self.rest.find_volume_device_id(array, 'name') self.assertIsNone(device_number) def test_get_volume_success(self): array = self.data.array device_id = self.data.device_id ref_volume = self.data.volume_details[0] volume = self.rest.get_volume(array, device_id) self.assertEqual(ref_volume, volume) def test_get_volume_failed(self): array = self.data.array device_id = self.data.failed_resource self.assertRaises(exception.VolumeBackendAPIException, self.rest.get_volume, array, device_id) def test_find_volume_identifier(self): array = self.data.array device_id = self.data.device_id ref_name = self.data.volume_details[0]['volume_identifier'] vol_name = self.rest.find_volume_identifier(array, device_id) self.assertEqual(ref_name, vol_name) def test_get_volume_size(self): array = self.data.array device_id = self.data.device_id ref_size = self.data.test_volume.size size = self.rest.get_size_of_device_on_array(array, device_id) self.assertEqual(ref_size, size) def test_get_volume_size_exception(self): array = self.data.array device_id = self.data.device_id with mock.patch.object(self.rest, 'get_volume', return_value=None): size = self.rest.get_size_of_device_on_array(array, device_id) self.assertIsNone(size) def test_get_portgroup(self): array = self.data.array pg_name = self.data.port_group_name_f ref_pg = self.data.portgroup[0] portgroup = self.rest.get_portgroup(array, pg_name) self.assertEqual(ref_pg, portgroup) def test_get_port_ids(self): array = self.data.array pg_name = self.data.port_group_name_f ref_ports = ['FA-1D:4'] port_ids = self.rest.get_port_ids(array, pg_name) self.assertEqual(ref_ports, port_ids) def test_get_port_ids_no_portgroup(self): array = self.data.array pg_name = self.data.port_group_name_f with mock.patch.object(self.rest, 'get_portgroup', return_value=None): with self.assertRaisesRegex( exception.VolumeBackendAPIException, 'Cannot find port group OS-fibre-PG.'): self.rest.get_port_ids(array, pg_name) def test_get_port(self): array = self.data.array port_id = 'FA-1D:4' ref_port = self.data.port_list[0] port = self.rest.get_port(array, port_id) self.assertEqual(ref_port, port) def test_get_iscsi_ip_address_and_iqn(self): array = self.data.array port_id = 'SE-4E:0' ref_ip = [self.data.ip] ref_iqn = self.data.initiator ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn( array, port_id) self.assertEqual(ref_ip, ip_addresses) self.assertEqual(ref_iqn, iqn) def test_get_iscsi_ip_address_and_iqn_no_port(self): array = self.data.array port_id = 'SE-4E:0' with mock.patch.object(self.rest, 'get_port', return_value=None): ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn( array, port_id) self.assertIsNone(ip_addresses) self.assertIsNone(iqn) def test_get_target_wwns(self): array = self.data.array pg_name = self.data.port_group_name_f ref_wwns = [self.data.wwpn1] target_wwns = self.rest.get_target_wwns(array, pg_name) self.assertEqual(ref_wwns, target_wwns) def test_get_target_wwns_failed(self): array = self.data.array pg_name = self.data.port_group_name_f with mock.patch.object(self.rest, 'get_port', return_value=None): target_wwns = self.rest.get_target_wwns(array, pg_name) self.assertEqual([], target_wwns) def test_get_initiator_group(self): array = self.data.array ig_name = self.data.initiatorgroup_name_f ref_ig = self.data.initiator_group_fc response_ig = self.rest.get_initiator_group(array, ig_name) self.assertEqual(ref_ig, response_ig) def test_get_initiator(self): array = self.data.array initiator_name = self.data.initiator ref_initiator = self.data.initiator_list[1] response_initiator = self.rest.get_initiator(array, initiator_name) self.assertEqual(ref_initiator, response_initiator) def test_get_initiator_list(self): array = self.data.array with mock.patch.object(self.rest, 'get_resource', return_value={'initiatorId': '1234'}): init_list = self.rest.get_initiator_list(array) self.assertIsNotNone(init_list) def test_get_initiator_list_empty(self): array = self.data.array with mock.patch.object(self.rest, 'get_resource', return_value={}): init_list = self.rest.get_initiator_list(array) self.assertEqual([], init_list) def test_get_initiator_list_none(self): array = self.data.array with mock.patch.object(self.rest, 'get_resource', return_value=None): init_list = self.rest.get_initiator_list(array) self.assertIsNotNone(init_list) def test_get_initiator_group_from_initiator(self): initiator = self.data.wwpn1 ref_group = self.data.initiatorgroup_name_f init_group = self.rest.get_initiator_group_from_initiator( self.data.array, initiator) self.assertEqual(ref_group, init_group) def test_get_initiator_group_from_initiator_failed(self): initiator = self.data.wwpn1 with mock.patch.object(self.rest, 'get_initiator', return_value=None): init_group = self.rest.get_initiator_group_from_initiator( self.data.array, initiator) self.assertIsNone(init_group) with mock.patch.object(self.rest, 'get_initiator', return_value={'name': 'no_host'}): init_group = self.rest.get_initiator_group_from_initiator( self.data.array, initiator) self.assertIsNone(init_group) def test_create_initiator_group(self): init_group_name = self.data.initiatorgroup_name_f init_list = [self.data.wwpn1] extra_specs = self.data.extra_specs with mock.patch.object( self.rest, 'create_resource', return_value=(202, self.data.job_list[0])) as mock_create: payload = ({'executionOption': 'ASYNCHRONOUS', 'hostId': init_group_name, 'initiatorId': init_list}) self.rest.create_initiator_group( self.data.array, init_group_name, init_list, extra_specs) mock_create.assert_called_once_with( self.data.array, 'sloprovisioning', 'host', payload) def test_delete_initiator_group(self): with mock.patch.object(self.rest, 'delete_resource') as mock_delete: self.rest.delete_initiator_group( self.data.array, self.data.initiatorgroup_name_f) mock_delete.assert_called_once_with( self.data.array, 'sloprovisioning', 'host', self.data.initiatorgroup_name_f) def test_get_masking_view(self): array = self.data.array masking_view_name = self.data.masking_view_name_f ref_mask_view = self.data.maskingview[0] masking_view = self.rest.get_masking_view(array, masking_view_name) self.assertEqual(ref_mask_view, masking_view) def test_get_masking_views_from_storage_group(self): array = self.data.array storagegroup_name = self.data.storagegroup_name_f ref_mask_view = [self.data.masking_view_name_f] masking_view = self.rest.get_masking_views_from_storage_group( array, storagegroup_name) self.assertEqual(ref_mask_view, masking_view) def test_get_masking_views_by_initiator_group(self): array = self.data.array initiatorgroup_name = self.data.initiatorgroup_name_f ref_mask_view = [self.data.masking_view_name_f] masking_view = self.rest.get_masking_views_by_initiator_group( array, initiatorgroup_name) self.assertEqual(ref_mask_view, masking_view) def test_get_masking_views_by_initiator_group_failed(self): array = self.data.array initiatorgroup_name = self.data.initiatorgroup_name_f with mock.patch.object(self.rest, 'get_initiator_group', return_value=None): masking_view = self.rest.get_masking_views_by_initiator_group( array, initiatorgroup_name) self.assertEqual([], masking_view) with mock.patch.object(self.rest, 'get_initiator_group', return_value={'name': 'no_mv'}): masking_view = self.rest.get_masking_views_by_initiator_group( array, initiatorgroup_name) self.assertEqual([], masking_view) def test_get_element_from_masking_view(self): array = self.data.array maskingview_name = self.data.masking_view_name_f # storage group ref_sg = self.data.storagegroup_name_f storagegroup = self.rest.get_element_from_masking_view( array, maskingview_name, storagegroup=True) self.assertEqual(ref_sg, storagegroup) # initiator group ref_ig = self.data.initiatorgroup_name_f initiatorgroup = self.rest.get_element_from_masking_view( array, maskingview_name, host=True) self.assertEqual(ref_ig, initiatorgroup) # portgroup ref_pg = self.data.port_group_name_f portgroup = self.rest.get_element_from_masking_view( array, maskingview_name, portgroup=True) self.assertEqual(ref_pg, portgroup) def test_get_element_from_masking_view_failed(self): array = self.data.array maskingview_name = self.data.masking_view_name_f # no element chosen element = self.rest.get_element_from_masking_view( array, maskingview_name) self.assertIsNone(element) # cannot retrieve maskingview with mock.patch.object(self.rest, 'get_masking_view', return_value=None): self.assertRaises(exception.VolumeBackendAPIException, self.rest.get_element_from_masking_view, array, maskingview_name) def test_get_common_masking_views(self): array = self.data.array initiatorgroup = self.data.initiatorgroup_name_f portgroup = self.data.port_group_name_f ref_maskingview = self.data.masking_view_name_f maskingview_list = self.rest.get_common_masking_views( array, portgroup, initiatorgroup) self.assertEqual(ref_maskingview, maskingview_list) def test_get_common_masking_views_none(self): array = self.data.array initiatorgroup = self.data.initiatorgroup_name_f portgroup = self.data.port_group_name_f with mock.patch.object(self.rest, 'get_masking_view_list', return_value=[]): maskingview_list = self.rest.get_common_masking_views( array, portgroup, initiatorgroup) self.assertEqual([], maskingview_list) def test_create_masking_view(self): maskingview_name = self.data.masking_view_name_f storagegroup_name = self.data.storagegroup_name_f port_group_name = self.data.port_group_name_f init_group_name = self.data.initiatorgroup_name_f extra_specs = self.data.extra_specs with mock.patch.object( self.rest, 'create_resource', return_value=(202, self.data.job_list[0])) as mock_create: payload = ({'executionOption': 'ASYNCHRONOUS', 'portGroupSelection': { 'useExistingPortGroupParam': { 'portGroupId': port_group_name}}, 'maskingViewId': maskingview_name, 'hostOrHostGroupSelection': { 'useExistingHostParam': { 'hostId': init_group_name}}, 'storageGroupSelection': { 'useExistingStorageGroupParam': { 'storageGroupId': storagegroup_name}}}) self.rest.create_masking_view( self.data.array, maskingview_name, storagegroup_name, port_group_name, init_group_name, extra_specs) mock_create.assert_called_once_with( self.data.array, 'sloprovisioning', 'maskingview', payload) def test_delete_masking_view(self): with mock.patch.object(self.rest, 'delete_resource') as mock_delete: self.rest.delete_masking_view( self.data.array, self.data.masking_view_name_f) mock_delete.assert_called_once_with( self.data.array, 'sloprovisioning', 'maskingview', self.data.masking_view_name_f) def test_get_replication_capabilities(self): ref_response = self.data.capabilities['symmetrixCapability'][1] capabilities = self.rest.get_replication_capabilities(self.data.array) self.assertEqual(ref_response, capabilities) def test_is_clone_licenced(self): licence = self.rest.is_snapvx_licensed(self.data.array) self.assertTrue(licence) false_response = {'rdfCapable': True, 'snapVxCapable': False, 'symmetrixId': '000197800123'} with mock.patch.object(self.rest, 'get_replication_capabilities', return_value=false_response): licence2 = self.rest.is_snapvx_licensed(self.data.array) self.assertFalse(licence2) def test_is_clone_licenced_error(self): with mock.patch.object(self.rest, 'get_replication_capabilities', return_value=None): licence3 = self.rest.is_snapvx_licensed(self.data.array) self.assertFalse(licence3) def test_create_volume_snap(self): snap_name = self.data.volume_snap_vx[ 'snapshotSrcs'][0]['snapshotName'] device_id = self.data.device_id extra_specs = self.data.extra_specs payload = {'deviceNameListSource': [{'name': device_id}], 'bothSides': 'false', 'star': 'false', 'force': 'false'} resource_type = 'snapshot/%(snap)s' % {'snap': snap_name} with mock.patch.object( self.rest, 'create_resource', return_value=(202, self.data.job_list[0])) as mock_create: self.rest.create_volume_snap( self.data.array, snap_name, device_id, extra_specs) mock_create.assert_called_once_with( self.data.array, 'replication', resource_type, payload, private='/private') ttl = 1 payload = {'deviceNameListSource': [{'name': device_id}], 'bothSides': 'false', 'star': 'false', 'force': 'false', 'timeToLive': ttl, 'timeInHours': 'true'} with mock.patch.object( self.rest, 'create_resource', return_value=(202, self.data.job_list[0])) as mock_create: self.rest.create_volume_snap( self.data.array, snap_name, device_id, extra_specs, ttl) mock_create.assert_called_once_with( self.data.array, 'replication', resource_type, payload, private='/private') def test_modify_volume_snap(self): array = self.data.array source_id = self.data.device_id target_id = self.data.volume_snap_vx[ 'snapshotSrcs'][0]['linkedDevices'][0]['targetDevice'] snap_name = self.data.volume_snap_vx['snapshotSrcs'][0]['snapshotName'] extra_specs = deepcopy(self.data.extra_specs) extra_specs.pop(utils.FORCE_VOL_EDIT, None) payload = {'deviceNameListSource': [{'name': source_id}], 'deviceNameListTarget': [ {'name': target_id}], 'copy': 'false', 'action': "", 'star': 'false', 'force': 'false', 'exact': 'false', 'remote': 'false', 'symforce': 'false', 'snap_id': self.data.snap_id} payload_restore = {'deviceNameListSource': [{'name': source_id}], 'deviceNameListTarget': [{'name': source_id}], 'action': 'Restore', 'star': 'false', 'force': 'false', 'snap_id': self.data.snap_id} with mock.patch.object( self.rest, 'modify_resource', return_value=(202, self.data.job_list[0])) as mock_modify: # link payload['action'] = 'Link' self.rest.modify_volume_snap( array, source_id, target_id, snap_name, extra_specs, self.data.snap_id, link=True) mock_modify.assert_called_once_with( array, 'replication', 'snapshot', payload, resource_name=snap_name, private='/private') # unlink mock_modify.reset_mock() payload['action'] = 'Unlink' self.rest.modify_volume_snap( array, source_id, target_id, snap_name, extra_specs, self.data.snap_id, unlink=True) mock_modify.assert_called_once_with( array, 'replication', 'snapshot', payload, resource_name=snap_name, private='/private') # restore mock_modify.reset_mock() payload['action'] = 'Restore' self.rest.modify_volume_snap( array, source_id, "", snap_name, extra_specs, self.data.snap_id, unlink=False, restore=True) mock_modify.assert_called_once_with( array, 'replication', 'snapshot', payload_restore, resource_name=snap_name, private='/private') # link or unlink, list of volumes mock_modify.reset_mock() payload['action'] = 'Link' self.rest.modify_volume_snap( array, "", "", snap_name, extra_specs, self.data.snap_id, unlink=False, link=True, list_volume_pairs=[(source_id, target_id)]) mock_modify.assert_called_once_with( array, 'replication', 'snapshot', payload, resource_name=snap_name, private='/private') # none selected mock_modify.reset_mock() self.rest.modify_volume_snap( array, source_id, target_id, snap_name, extra_specs, self.data.snap_id) mock_modify.assert_not_called() # copy mode is True payload['copy'] = 'true' self.rest.modify_volume_snap( array, source_id, target_id, snap_name, extra_specs, self.data.snap_id, link=True, copy=True) mock_modify.assert_called_once_with( array, 'replication', 'snapshot', payload, resource_name=snap_name, private='/private') def test_delete_volume_snap(self): array = self.data.array snap_name = self.data.volume_snap_vx['snapshotSrcs'][0]['snapshotName'] source_device_id = self.data.device_id payload = {'deviceNameListSource': [{'name': source_device_id}], 'snap_id': self.data.snap_id} with mock.patch.object(self.rest, 'delete_resource') as mock_delete: self.rest.delete_volume_snap( array, snap_name, source_device_id, self.data.snap_id) mock_delete.assert_called_once_with( array, 'replication', 'snapshot', snap_name, payload=payload, private='/private') def test_delete_volume_snap_restore(self): array = self.data.array snap_name = self.data.volume_snap_vx['snapshotSrcs'][0]['snapshotName'] source_device_id = self.data.device_id payload = {'deviceNameListSource': [{'name': source_device_id}], 'restore': True, 'snap_id': self.data.snap_id} with mock.patch.object(self.rest, 'delete_resource') as mock_delete: self.rest.delete_volume_snap( array, snap_name, source_device_id, self.data.snap_id, restored=True) mock_delete.assert_called_once_with( array, 'replication', 'snapshot', snap_name, payload=payload, private='/private') def test_get_volume_snap_info(self): array = self.data.array source_device_id = self.data.device_id ref_snap_info = self.data.volume_snap_vx snap_info = self.rest.get_volume_snap_info(array, source_device_id) self.assertEqual(ref_snap_info, snap_info) def test_get_volume_snap(self): array = self.data.array snap_id = self.data.snap_id snap_name = self.data.volume_snap_vx['snapshotSrcs'][0]['snapshotName'] device_id = self.data.device_id ref_snap = self.data.volume_snap_vx['snapshotSrcs'][0] snap = self.rest.get_volume_snap(array, device_id, snap_name, snap_id) self.assertEqual(ref_snap, snap) def test_get_volume_snap_none(self): array = self.data.array snap_id = self.data.snap_id snap_name = self.data.volume_snap_vx['snapshotSrcs'][0]['snapshotName'] device_id = self.data.device_id with mock.patch.object(self.rest, 'get_volume_snap_info', return_value=None): snap = self.rest.get_volume_snap( array, device_id, snap_name, snap_id) self.assertIsNone(snap) with mock.patch.object(self.rest, 'get_volume_snap_info', return_value={'snapshotSrcs': []}): snap = self.rest.get_volume_snap( array, device_id, snap_name, snap_id) self.assertIsNone(snap) def test_get_snap_linked_device_dict_list(self): array = self.data.array snap_name = 'temp-snapshot' device_id = self.data.device_id snap_list = [{'linked_vols': [ {'target_device': device_id, 'state': 'Copied'}], 'snap_name': snap_name, 'snapid': self.data.snap_id}] ref_snap_list = [{'snapid': self.data.snap_id, 'linked_vols': [ {'state': 'Copied', 'target_device': '00001'}]}] with mock.patch.object(self.rest, '_find_snap_vx_source_sessions', return_value=snap_list): snap_dict_list = self.rest._get_snap_linked_device_dict_list( array, device_id, snap_name) self.assertEqual(ref_snap_list, snap_dict_list) def test_get_sync_session(self): array = self.data.array source_id = self.data.device_id target_id = self.data.volume_snap_vx[ 'snapshotSrcs'][0]['linkedDevices'][0]['targetDevice'] snap_name = self.data.volume_snap_vx['snapshotSrcs'][0]['snapshotName'] ref_sync = self.data.volume_snap_vx[ 'snapshotSrcs'][0]['linkedDevices'][0] sync = self.rest.get_sync_session( array, source_id, snap_name, target_id, self.data.snap_id) self.assertEqual(ref_sync, sync) def test_find_snap_vx_sessions(self): array = self.data.array source_id = self.data.device_id ref_sessions = [{'snapid': self.data.snap_id, 'snap_name': 'temp-000AA-snapshot_for_clone', 'source_vol_id': self.data.device_id, 'target_vol_id': self.data.device_id2, 'expired': False, 'copy_mode': True, 'state': 'Copied'}, {'snapid': self.data.snap_id_2, 'snap_name': 'temp-000AA-snapshot_for_clone', 'source_vol_id': self.data.device_id, 'target_vol_id': self.data.device_id3, 'expired': False, 'copy_mode': True, 'state': 'Copied'}] with mock.patch.object(self.rest, 'get_volume_snap_info', return_value=self.data.snapshot_src_details): src_list, __ = self.rest.find_snap_vx_sessions(array, source_id) self.assertEqual(ref_sessions, src_list) self.assertIsInstance(src_list, list) @mock.patch.object(rest.PowerMaxRest, '_get_private_volume', return_value=tpd.PowerMaxData.snap_tgt_vol_details) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snap_info', return_value=tpd.PowerMaxData.snapshot_tgt_details) def test_find_snap_vx_sessions_tgt_only(self, mck_snap, mck_vol): array = self.data.array source_id = self.data.device_id ref_session = {'snapid': self.data.snap_id, 'state': 'Linked', 'copy_mode': False, 'snap_name': 'temp-000AA-snapshot_for_clone', 'source_vol_id': self.data.device_id2, 'target_vol_id': source_id, 'expired': True} __, snap_tgt = self.rest.find_snap_vx_sessions( array, source_id, tgt_only=True) self.assertEqual(ref_session, snap_tgt) self.assertIsInstance(snap_tgt, dict) def test_update_storagegroup_qos(self): sg_qos = {'srp': self.data.srp, 'num_of_vols': 2, 'cap_gb': 2, 'storageGroupId': 'OS-QOS-SG', 'slo': self.data.slo, 'workload': self.data.workload, 'hostIOLimit': {'host_io_limit_io_sec': '4000', 'dynamicDistribution': 'Always', 'host_io_limit_mb_sec': '4000'}} self.data.sg_details.append(sg_qos) array = self.data.array extra_specs = deepcopy(self.data.extra_specs) extra_specs['qos'] = {'total_iops_sec': '4000', 'DistributionType': 'Always'} return_value = self.rest.update_storagegroup_qos( array, 'OS-QOS-SG', extra_specs) self.assertEqual(False, return_value) extra_specs['qos'] = {'DistributionType': 'onFailure', 'total_bytes_sec': '419430400'} return_value = self.rest.update_storagegroup_qos( array, 'OS-QOS-SG', extra_specs) self.assertTrue(return_value) def test_update_storagegroup_qos_exception(self): array = self.data.array storage_group = self.data.defaultstoragegroup_name extra_specs = deepcopy(self.data.extra_specs) extra_specs['qos'] = {'total_iops_sec': '4000', 'DistributionType': 'Wrong', 'total_bytes_sec': '4194304000'} with mock.patch.object(self.rest, 'check_status_code_success', side_effect=[None, None, None, Exception]): self.assertRaises(exception.VolumeBackendAPIException, self.rest.update_storagegroup_qos, array, storage_group, extra_specs) extra_specs['qos']['DistributionType'] = 'Always' return_value = self.rest.update_storagegroup_qos( array, 'OS-QOS-SG', extra_specs) self.assertFalse(return_value) @mock.patch.object(rest.PowerMaxRest, 'modify_storage_group', return_value=(202, tpd.PowerMaxData.job_list[0])) def test_set_storagegroup_srp(self, mock_mod): self.rest.set_storagegroup_srp( self.data.array, self.data.test_vol_grp_name, self.data.srp, self.data.extra_specs) mock_mod.assert_called_once() def test_get_rdf_group(self): with mock.patch.object(self.rest, 'get_resource') as mock_get: self.rest.get_rdf_group(self.data.array, self.data.rdf_group_no_1) mock_get.assert_called_once_with( self.data.array, 'replication', 'rdf_group', self.data.rdf_group_no_1) def test_get_rdf_group_list(self): rdf_list = self.rest.get_rdf_group_list(self.data.array) self.assertEqual(self.data.rdf_group_list, rdf_list) def test_get_rdf_group_volume(self): vol_details = self.data.private_vol_details['resultList']['result'][0] with mock.patch.object(self.rest, '_get_private_volume', return_value=vol_details) as mock_get: self.rest.get_rdf_group_volume( self.data.array, self.data.device_id) mock_get.assert_called_once_with( self.data.array, self.data.device_id) def test_are_vols_rdf_paired(self): are_vols1, local_state, pair_state = self.rest.are_vols_rdf_paired( self.data.array, self.data.remote_array, self.data.device_id, self.data.device_id2) self.assertTrue(are_vols1) are_vols2, local_state, pair_state = self.rest.are_vols_rdf_paired( self.data.array, '00012345', self.data.device_id, self.data.device_id2) self.assertFalse(are_vols2) with mock.patch.object(self.rest, 'get_rdf_group_volume', return_value=None): are_vols3, local, pair = self.rest.are_vols_rdf_paired( self.data.array, self.data.remote_array, self.data.device_id, self.data.device_id2) self.assertFalse(are_vols3) def test_get_rdf_group_number(self): rdfg_num = self.rest.get_rdf_group_number( self.data.array, self.data.rdf_group_name_1) self.assertEqual(self.data.rdf_group_no_1, rdfg_num) with mock.patch.object(self.rest, 'get_rdf_group_list', return_value=None): rdfg_num2 = self.rest.get_rdf_group_number( self.data.array, self.data.rdf_group_name_1) self.assertIsNone(rdfg_num2) with mock.patch.object(self.rest, 'get_rdf_group', return_value=None): rdfg_num3 = self.rest.get_rdf_group_number( self.data.array, self.data.rdf_group_name_1) self.assertIsNone(rdfg_num3) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_group', side_effect=[{'numDevices': 0}, {'numDevices': 0}, {'numDevices': 1}, {'numDevices': 1}]) def test_get_metro_payload_info(self, mock_rdfg): payload_in = {'establish': 'true', 'rdfMode': 'Active', 'rdfType': 'RDF1'} # First volume out, Metro use bias not set act_payload_1 = self.rest.get_metro_payload_info( self.data.array, payload_in.copy(), self.data.rdf_group_no_1, {}, True) self.assertEqual(payload_in, act_payload_1) # First volume out, Metro use bias set act_payload_2 = self.rest.get_metro_payload_info( self.data.array, payload_in.copy(), self.data.rdf_group_no_1, {'metro_bias': True}, True) self.assertEqual('true', act_payload_2['metroBias']) # Not first vol in RDFG, consistency exempt not set act_payload_3 = self.rest.get_metro_payload_info( self.data.array, payload_in.copy(), self.data.rdf_group_no_1, {'exempt': False}, False) ref_payload_3 = {'rdfMode': 'Active', 'rdfType': 'RDF1'} self.assertEqual(ref_payload_3, act_payload_3) # Not first vol in RDFG, consistency exempt set act_payload_4 = self.rest.get_metro_payload_info( self.data.array, payload_in.copy(), self.data.rdf_group_no_1, {'exempt': True}, True) ref_payload_4 = {'rdfType': 'RDF1', 'exempt': 'true', 'rdfMode': 'Active'} self.assertEqual(ref_payload_4, act_payload_4) def test_get_storage_group_rep(self): array = self.data.array source_group_name = self.data.storagegroup_name_source ref_details = self.data.sg_details_rep[0] volume_group = self.rest.get_storage_group_rep(array, source_group_name) self.assertEqual(volume_group, ref_details) def test_get_volumes_in_storage_group(self): array = self.data.array storagegroup_name = self.data.storagegroup_name_source ref_volumes = [self.data.device_id, self.data.device_id2] volume_list = self.rest.get_volumes_in_storage_group( array, storagegroup_name) self.assertEqual(ref_volumes, volume_list) def test_create_storagegroup_snap(self): array = self.data.array extra_specs = self.data.extra_specs source_group = self.data.storagegroup_name_source snap_name = self.data.group_snapshot_name with mock.patch.object( self.rest, 'create_storagegroup_snap') as mock_create: self.rest.create_storagegroup_snap( array, source_group, snap_name, extra_specs) mock_create.assert_called_once_with( array, source_group, snap_name, extra_specs) def test_delete_storagegroup_snap(self): array = self.data.array source_group = self.data.storagegroup_name_source snap_name = self.data.group_snapshot_name with mock.patch.object( self.rest, 'delete_storagegroup_snap') as mock_delete: self.rest.delete_storagegroup_snap( array, source_group, snap_name, '0') mock_delete.assert_called_once_with( array, source_group, snap_name, '0') @mock.patch.object(rest.PowerMaxRest, 'get_resource', return_value={'snapids': [tpd.PowerMaxData.snap_id, tpd.PowerMaxData.snap_id_2]}) def test_get_storagegroup_snap_id_list(self, mock_list): array = self.data.array source_group = self.data.storagegroup_name_source snap_name = self.data.group_snapshot_name ret_list = self.rest.get_storage_group_snap_id_list( array, source_group, snap_name) self.assertEqual([self.data.snap_id, self.data.snap_id_2], ret_list) def test_get_storagegroup_rdf_details(self): details = self.rest.get_storagegroup_rdf_details( self.data.array, self.data.test_vol_grp_name, self.data.rdf_group_no_1) self.assertEqual(self.data.sg_rdf_details[0], details) def test_verify_rdf_state(self): verify1 = self.rest._verify_rdf_state( self.data.array, self.data.test_vol_grp_name, self.data.rdf_group_no_1, 'Failover') self.assertTrue(verify1) verify2 = self.rest._verify_rdf_state( self.data.array, self.data.test_fo_vol_group, self.data.rdf_group_no_1, 'Establish') self.assertTrue(verify2) def test_delete_storagegroup_rdf(self): with mock.patch.object( self.rest, 'delete_resource') as mock_del: self.rest.delete_storagegroup_rdf( self.data.array, self.data.test_vol_grp_name, self.data.rdf_group_no_1) mock_del.assert_called_once() def test_is_next_gen_array(self): is_next_gen = self.rest.is_next_gen_array(self.data.array) self.assertFalse(is_next_gen) is_next_gen2 = self.rest.is_next_gen_array(self.data.array_herc) self.assertTrue(is_next_gen2) def test_get_array_model_info(self): array_model_vmax, is_next_gen = self.rest.get_array_model_info( self.data.array) self.assertEqual('VMAX250F', array_model_vmax) self.assertFalse(is_next_gen) array_model_powermax, is_next_gen2 = self.rest.get_array_model_info( self.data.array_herc) self.assertTrue(is_next_gen2) self.assertEqual('PowerMax 2000', array_model_powermax) @mock.patch.object(rest.PowerMaxRest, 'modify_resource', return_value=('200', 'JobComplete')) def test_modify_volume_snap_rename(self, mock_modify): array = self.data.array source_id = self.data.device_id old_snap_backend_name = self.data.snapshot_id new_snap_backend_name = self.data.managed_snap_id self.rest.modify_volume_snap( array, source_id, source_id, old_snap_backend_name, self.data.extra_specs, self.data.snap_id, link=False, unlink=False, rename=True, new_snap_name=new_snap_backend_name) mock_modify.assert_called_once() def test_get_private_volume_list_pass(self): array_id = self.data.array response = {'count': 1, 'expirationTime': 1521650650793, 'id': 'f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0', 'maxPageSize': 1000, 'resultList': {'from': 1, 'result': [{'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001', 'status': 'Ready', 'configuration': 'TDEV'}}], 'to': 1}} with mock.patch.object( self.rest, 'get_resource', return_value=self.data.p_vol_rest_response_single): volume = self.rest.get_private_volume_list(array_id) self.assertEqual(response, volume) def test_get_private_volume_list_none(self): array_id = self.data.array response = [] with mock.patch.object( self.rest, 'request', return_value=( 200, tpd.PowerMaxData.p_vol_rest_response_none)): vol_list = self.rest.get_private_volume_list(array_id) self.assertEqual(response, vol_list) @mock.patch.object( rest.PowerMaxRest, 'get_iterator_page_list', return_value=(tpd.PowerMaxData.p_vol_rest_response_iterator_2[ 'result'])) def test_get_private_volume_list_iterator(self, mock_iterator): array_id = self.data.array response = [ {'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00002', 'status': 'Ready', 'configuration': 'TDEV'}}, {'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001', 'status': 'Ready', 'configuration': 'TDEV'}}] with mock.patch.object( self.rest, 'request', return_value=(200, deepcopy( self.data.p_vol_rest_response_iterator_1))): volume = self.rest.get_private_volume_list(array_id) self.assertEqual(response, volume) @mock.patch.object(rest.PowerMaxRest, 'get_resource') def test_get_private_volume_list_params_dict_input(self, mck_get): array_id = self.data.array input_param = {'unit-test': True} ref = {'unit-test': True, 'expiration_time_mins': rest.ITERATOR_EXPIRATION} self.rest.get_private_volume_list(array_id, input_param) mck_get.assert_called_once_with( self.data.array, rest.SLOPROVISIONING, 'volume', params=ref, private='/private') @mock.patch.object(rest.PowerMaxRest, 'get_resource') def test_get_private_volume_list_params_str_input(self, mck_get): array_id = self.data.array input_param = '&unit-test=True' ref = '&unit-test=True&expiration_time_mins=%(expire)s' % { 'expire': rest.ITERATOR_EXPIRATION} self.rest.get_private_volume_list(array_id, input_param) mck_get.assert_called_once_with( self.data.array, rest.SLOPROVISIONING, 'volume', params=ref, private='/private') @mock.patch.object(rest.PowerMaxRest, 'get_resource') def test_get_private_volume_list_params_no_input(self, mck_get): array_id = self.data.array ref = {'expiration_time_mins': rest.ITERATOR_EXPIRATION} self.rest.get_private_volume_list(array_id) mck_get.assert_called_once_with( self.data.array, rest.SLOPROVISIONING, 'volume', params=ref, private='/private') @mock.patch.object(rest.PowerMaxRest, '_delete_iterator') def test_get_iterator_list(self, mck_del): with mock.patch.object( self.rest, 'get_request', side_effect=[ self.data.rest_iterator_resonse_one, self.data.rest_iterator_resonse_two]): expected_response = [ {'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001', 'status': 'Ready', 'configuration': 'TDEV'}}, {'volumeHeader': { 'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00002', 'status': 'Ready', 'configuration': 'TDEV'}}] iterator_id = 'test_iterator_id' result_count = 1500 start_position = 1 end_position = 1000 max_page_size = 1000 actual_response = self.rest.get_iterator_page_list( iterator_id, result_count, start_position, end_position, max_page_size) mck_del.assert_called_once_with(iterator_id) self.assertEqual(expected_response, actual_response) @mock.patch.object(rest.PowerMaxRest, 'request', return_value=(204, 'Deleted Iterator')) def test_delete_iterator(self, mck_del): iterator_id = 'test_iterator_id' self.rest._delete_iterator(iterator_id) mck_del.assert_called_once_with( '/common/Iterator/%(iter)s' % {'iter': iterator_id}, rest.DELETE) def test_set_rest_credentials(self): array_info = { 'RestServerIp': '10.10.10.10', 'RestServerPort': '8443', 'RestUserName': 'user_test', 'RestPassword': 'pass_test', 'SerialNumber': self.data.array, 'SSLVerify': True, } self.rest.set_rest_credentials(array_info) self.assertEqual('user_test', self.rest.user) self.assertEqual('pass_test', self.rest.passwd) self.assertTrue(self.rest.verify) self.assertEqual('https://10.10.10.10:8443/univmax/restapi', self.rest.base_uri) @mock.patch.object( rest.PowerMaxRest, 'get_iterator_page_list', return_value=( tpd.PowerMaxData.p_vol_rest_response_iterator_2[ 'result'])) def test_list_pagination(self, mock_iter): result_list = self.rest.list_pagination( deepcopy(self.data.p_vol_rest_response_iterator_1)) # reflects sample data, 1 from first iterator page and 1 from # second iterator page self.assertTrue(2 == len(result_list)) def test_get_vmax_model(self): reference = 'PowerMax_2000' with mock.patch.object(self.rest, 'get_request', return_value=self.data.powermax_model_details): self.assertEqual(self.rest.get_vmax_model(self.data.array), reference) def test_set_u4p_failover_config(self): self.rest.set_u4p_failover_config(self.data.u4p_failover_config) self.assertTrue(self.rest.u4p_failover_enabled) self.assertEqual('3', self.rest.u4p_failover_retries) self.assertEqual('10', self.rest.u4p_failover_timeout) self.assertEqual('2', self.rest.u4p_failover_backoff_factor) self.assertEqual('10.10.10.10', self.rest.primary_u4p) self.assertEqual('10.10.10.11', self.rest.u4p_failover_targets[0]['san_ip']) self.assertEqual('10.10.10.12', self.rest.u4p_failover_targets[1]['san_ip']) def test_handle_u4p_failover_with_targets(self): self.mock_object(self.rest, 'u4p_failover_targets', self.data.u4p_failover_target) self.rest._handle_u4p_failover() self.assertTrue(self.rest.u4p_in_failover) self.assertEqual('test', self.rest.user) self.assertEqual('test', self.rest.passwd) self.assertEqual('/path/to/cert', self.rest.verify) self.assertEqual('https://10.10.10.11:8443/univmax/restapi', self.rest.base_uri) def test_handle_u4p_failover_no_targets_exception(self): self.mock_object(self.rest, 'u4p_failover_targets', []) self.assertRaises(exception.VolumeBackendAPIException, self.rest._handle_u4p_failover) @mock.patch.object(rest.PowerMaxRest, 'get_array_detail', return_value=tpd.PowerMaxData.powermax_model_details) def test_get_array_ucode(self, mck_ucode): array = self.data.array ucode = self.rest.get_array_ucode_version(array) self.assertEqual(self.data.powermax_model_details['ucode'], ucode) @mock.patch.object(rest.PowerMaxRest, 'get_array_detail', return_value=tpd.PowerMaxData.powermax_model_100) def test_get_array_microcode(self, mck_ucode): array = self.data.array microcode = self.rest.get_array_ucode_version(array) self.assertEqual(self.data.powermax_model_100.get( 'microcode'), microcode) def test_validate_unisphere_version_suceess(self): version = tpd.PowerMaxData.unisphere_version returned_version = {'version': version} with mock.patch.object(self.rest, "request", return_value=(200, returned_version)) as mock_req: valid_version = self.rest.validate_unisphere_version() self.assertTrue(valid_version) request_count = mock_req.call_count self.assertEqual(1, request_count) def test_validate_unisphere_version_fail(self): version = tpd.PowerMaxData.unisphere_version_90 returned_version = {'version': version} with mock.patch.object(self.rest, "request", return_value=(200, returned_version))as mock_req: valid_version = self.rest.validate_unisphere_version() self.assertFalse(valid_version) request_count = mock_req.call_count self.assertEqual(1, request_count) def test_validate_unisphere_version_no_connection(self): with mock.patch.object(self.rest, "request", return_value=(500, '')) as mock_req: valid_version = self.rest.validate_unisphere_version() self.assertFalse(valid_version) request_count = mock_req.call_count self.assertEqual(1, request_count) @mock.patch.object(rest.PowerMaxRest, 'get_resource', return_value=tpd.PowerMaxData.sg_rdf_group_details) def test_get_storage_group_rdf_group_state(self, mck_get): ref_get_resource = ('storagegroup/%(sg)s/rdf_group/%(rdfg)s' % { 'sg': self.data.test_vol_grp_name, 'rdfg': self.data.rdf_group_no_1}) states = self.rest.get_storage_group_rdf_group_state( self.data.array, self.data.test_vol_grp_name, self.data.rdf_group_no_1) mck_get.assert_called_once_with( self.data.array, 'replication', ref_get_resource) self.assertEqual(states, [utils.RDF_SUSPENDED_STATE]) @mock.patch.object(rest.PowerMaxRest, 'get_resource', return_value={'rdfgs': [100, 200]}) def test_get_storage_group_rdf_groups(self, mck_get): rdf_groups = self.rest.get_storage_group_rdf_groups( self.data.array, self.data.storagegroup_name_f) self.assertEqual([100, 200], rdf_groups) @mock.patch.object(rest.PowerMaxRest, 'get_resource', return_value={"name": ["00038", "00039"]}) def test_get_rdf_group_volume_list(self, mck_get): volumes_list = self.rest.get_rdf_group_volume_list( self.data.array, self.data.rdf_group_no_1) self.assertEqual(["00038", "00039"], volumes_list) @mock.patch.object(rest.PowerMaxRest, 'get_resource') def test_get_rdf_pair_volume(self, mck_get): rdf_grp_no = self.data.rdf_group_no_1 device_id = self.data.device_id array = self.data.array ref_get_resource = ('rdf_group/%(rdf_group)s/volume/%(device)s' % { 'rdf_group': rdf_grp_no, 'device': device_id}) self.rest.get_rdf_pair_volume(array, rdf_grp_no, device_id) mck_get.assert_called_once_with(array, 'replication', ref_get_resource) @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') @mock.patch.object(rest.PowerMaxRest, 'create_resource', return_value=(200, 'job')) def test_srdf_protect_storage_group(self, mck_create, mck_wait): array_id = self.data.array remote_array_id = self.data.remote_array rdf_group_no = self.data.rdf_group_no_1 replication_mode = utils.REP_METRO sg_name = self.data.default_sg_re_enabled service_level = 'Diamond' extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.METROBIAS] = True remote_sg = self.data.rdf_managed_async_grp ref_payload = { 'executionOption': 'ASYNCHRONOUS', 'metroBias': 'true', 'replicationMode': 'Active', 'remoteSLO': service_level, 'remoteSymmId': remote_array_id, 'rdfgNumber': rdf_group_no, 'remoteStorageGroupName': remote_sg, 'establish': 'true'} ref_resource = ('storagegroup/%(sg_name)s/rdf_group' % {'sg_name': sg_name}) self.rest.srdf_protect_storage_group( array_id, remote_array_id, rdf_group_no, replication_mode, sg_name, service_level, extra_specs, target_sg=remote_sg) mck_create.assert_called_once_with( array_id, 'replication', ref_resource, ref_payload) @mock.patch.object( rest.PowerMaxRest, 'wait_for_job', side_effect=exception.VolumeBackendAPIException('')) @mock.patch.object(rest.PowerMaxRest, 'create_resource', return_value=(200, 'job')) def test_srdf_protect_storage_group_retries(self, mck_create, mck_wait): array_id = self.data.array remote_array_id = self.data.remote_array rdf_group_no = self.data.rdf_group_no_1 replication_mode = utils.REP_METRO sg_name = self.data.default_sg_re_enabled service_level = 'Diamond' extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs[utils.METROBIAS] = True remote_sg = self.data.rdf_managed_async_grp self.assertRaises(exception.VolumeBackendAPIException, self.rest.srdf_protect_storage_group, array_id, remote_array_id, rdf_group_no, replication_mode, sg_name, service_level, extra_specs, target_sg=remote_sg) # 6 retries on a VolumeBackendAPIException self.assertEqual(6, mck_create.call_count) @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') @mock.patch.object(rest.PowerMaxRest, 'modify_resource', return_value=(200, 'job')) def test_srdf_modify_group(self, mck_modify, mck_wait): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled payload = {'executionOption': 'ASYNCHRONOUS', 'action': 'Suspend'} extra_specs = self.data.rep_extra_specs msg = 'test' resource = ('storagegroup/%(sg_name)s/rdf_group/%(rdf_group_no)s' % { 'sg_name': sg_name, 'rdf_group_no': rdf_group_no}) self.rest.srdf_modify_group( array_id, rdf_group_no, sg_name, payload, extra_specs, msg) mck_modify.assert_called_once_with( array_id, 'replication', resource, payload) mck_wait.assert_called_once_with(msg, 200, 'job', extra_specs) @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') @mock.patch.object(rest.PowerMaxRest, 'modify_resource', return_value=(200, 'job')) def test_srdf_modify_group_async_call_false(self, mck_modify, mck_wait): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled payload = {'action': 'Suspend'} extra_specs = self.data.rep_extra_specs msg = 'test' resource = ('storagegroup/%(sg_name)s/rdf_group/%(rdf_group_no)s' % { 'sg_name': sg_name, 'rdf_group_no': rdf_group_no}) self.rest.srdf_modify_group( array_id, rdf_group_no, sg_name, payload, extra_specs, msg, False) mck_modify.assert_called_once_with( array_id, 'replication', resource, payload) mck_wait.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_CONSISTENT_STATE]) def test_srdf_suspend_replication(self, mck_get, mck_modify): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs self.rest.srdf_suspend_replication( array_id, sg_name, rdf_group_no, rep_extra_specs) # Replication mode in this test is synchronous, so the expectation # is that the consistency exempt flag is false. mck_modify.assert_called_once_with( array_id, rdf_group_no, sg_name, {'suspend': {'force': 'true', 'consExempt': 'false'}, 'action': 'Suspend'}, rep_extra_specs, 'Suspend SRDF Group Replication') @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_SUSPENDED_STATE, utils.RDF_CONSISTENT_STATE]) def test_srdf_suspend_replication_dual_states(self, mck_get, mck_modify): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs self.rest.srdf_suspend_replication( array_id, sg_name, rdf_group_no, rep_extra_specs) # Replication mode in this test is synchronous, so the expectation # is that the consistency exempt flag is false. mck_modify.assert_called_once_with( array_id, rdf_group_no, sg_name, {'suspend': {'force': 'true', 'consExempt': 'false'}, 'action': 'Suspend'}, rep_extra_specs, 'Suspend SRDF Group Replication') @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_SUSPENDED_STATE, utils.RDF_CONSISTENT_STATE]) def test_srdf_suspend_metro_replication_dual_states(self, mck_get, mck_modify): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs_async self.rest.srdf_suspend_replication( array_id, sg_name, rdf_group_no, rep_extra_specs) # Replication mode in this test is asynchronous, so the expectation # is that the consistency exempt flag is true. mck_modify.assert_called_once_with( array_id, rdf_group_no, sg_name, {'suspend': {'force': 'true', 'consExempt': 'true'}, 'action': 'Suspend'}, rep_extra_specs, 'Suspend SRDF Group Replication') @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_SUSPENDED_STATE]) def test_srdf_suspend_replication_already_suspended(self, mck_get, mck_modify): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs self.rest.srdf_suspend_replication( array_id, sg_name, rdf_group_no, rep_extra_specs) mck_modify.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_SUSPENDED_STATE]) def test_srdf_resume_replication(self, mck_get, mck_modify): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async rep_extra_specs[utils.REP_MODE] = utils.REP_ASYNC self.rest.srdf_resume_replication( array_id, sg_name, rdf_group_no, rep_extra_specs) mck_modify.assert_called_once_with( array_id, rdf_group_no, sg_name, {'action': 'Resume'}, rep_extra_specs, 'Resume SRDF Group Replication', True) @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_SUSPENDED_STATE]) def test_srdf_resume_replication_metro(self, mck_get, mck_modify): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = deepcopy(self.data.rep_extra_specs_metro) rep_extra_specs[utils.REP_MODE] = utils.REP_METRO self.rest.srdf_resume_replication( array_id, sg_name, rdf_group_no, rep_extra_specs) mck_modify.assert_called_once_with( array_id, rdf_group_no, sg_name, {"action": "Establish", "establish": {"metroBias": "true"}}, rep_extra_specs, 'Resume SRDF Group Replication', True) @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_CONSISTENT_STATE]) def test_srdf_resume_replication_already_resumed(self, mck_get, mck_modify): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs self.rest.srdf_resume_replication( array_id, sg_name, rdf_group_no, rep_extra_specs) mck_modify.assert_not_called() @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_CONSISTENT_STATE]) def test_srdf_establish_replication(self, mck_get, mck_modify): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs first_call = call(array_id, rdf_group_no, sg_name, {'action': 'Suspend'}, rep_extra_specs, 'Suspend SRDF Group Replication') second_call = call(array_id, rdf_group_no, sg_name, {'action': 'Establish'}, rep_extra_specs, 'Incremental Establish SRDF Group Replication') self.rest.srdf_establish_replication( array_id, sg_name, rdf_group_no, rep_extra_specs) mck_modify.assert_has_calls([first_call, second_call], any_order=False) @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') def test_srdf_failover_group(self, mck_modify): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs self.rest.srdf_failover_group( array_id, sg_name, rdf_group_no, rep_extra_specs) mck_modify.assert_called_once_with( array_id, rdf_group_no, sg_name, {'action': 'Failover'}, rep_extra_specs, 'Failing over SRDF group replication') @mock.patch.object(rest.PowerMaxRest, 'srdf_modify_group') def test_srdf_failback_group(self, mck_modify): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = self.data.rep_extra_specs self.rest.srdf_failback_group( array_id, sg_name, rdf_group_no, rep_extra_specs) mck_modify.assert_called_once_with( array_id, rdf_group_no, sg_name, {'action': 'Failback'}, rep_extra_specs, 'Failing back SRDF group replication') @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') @mock.patch.object(rest.PowerMaxRest, 'modify_storage_group', return_value=(200, 'job')) def test_srdf_remove_device_pair_from_storage_group(self, mck_modify, mck_wait): array_id = self.data.array sg_name = self.data.default_sg_re_enabled remote_array_id = self.data.remote_array device_id = self.data.device_id rep_extra_specs = self.data.rep_extra_specs ref_payload = { 'editStorageGroupActionParam': { 'removeVolumeParam': { 'volumeId': [device_id], 'remoteSymmSGInfoParam': { 'remote_symmetrix_1_id': remote_array_id, 'remote_symmetrix_1_sgs': [sg_name]}}}} self.rest.srdf_remove_device_pair_from_storage_group( array_id, sg_name, remote_array_id, device_id, rep_extra_specs) mck_modify.assert_called_once_with( array_id, sg_name, ref_payload) @mock.patch.object(rest.PowerMaxRest, 'delete_resource') def test_srdf_delete_device_pair(self, mck_del): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 device_id = self.data.device_id ref_resource = ('%(rdfg)s/volume/%(dev)s' % { 'rdfg': rdf_group_no, 'dev': device_id}) self.rest.srdf_delete_device_pair( array_id, rdf_group_no, device_id) mck_del.assert_called_once_with( array_id, 'replication', 'rdf_group', ref_resource) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') @mock.patch.object(rest.PowerMaxRest, 'create_resource', return_value=(200, 'job')) def test_srdf_create_device_pair_async( self, mck_create, mck_wait, mck_get): array_id = self.data.array remote_array = self.data.remote_array rdf_group_no = self.data.rdf_group_no_1 mode = utils.REP_ASYNC device_id = self.data.device_id tgt_device_id = self.data.device_id2 rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs['array'] = remote_array ref_payload = { 'executionOption': 'ASYNCHRONOUS', 'rdfMode': mode, 'localDeviceListCriteriaParam': {'localDeviceList': [device_id]}, 'rdfType': 'RDF1', 'invalidateR2': 'true', 'exempt': 'true'} ref_resource = 'rdf_group/%(rdfg)s/volume' % {'rdfg': rdf_group_no} ref_response = { 'array': array_id, 'remote_array': remote_array, 'src_device': device_id, 'tgt_device': tgt_device_id, 'session_info': self.data.rdf_group_vol_details} create_response = self.rest.srdf_create_device_pair( array_id, rdf_group_no, mode, device_id, rep_extra_specs, True) mck_create.assert_called_once_with( array_id, 'replication', ref_resource, ref_payload) mck_get.assert_called_once_with( array_id, rdf_group_no, device_id) self.assertEqual(ref_response, create_response) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) @mock.patch.object(rest.PowerMaxRest, 'wait_for_job') @mock.patch.object(rest.PowerMaxRest, 'create_resource', return_value=(200, 'job')) def test_srdf_create_device_pair_sync( self, mck_create, mck_wait, mck_get): array_id = self.data.array remote_array = self.data.remote_array rdf_group_no = self.data.rdf_group_no_1 mode = utils.REP_SYNC device_id = self.data.device_id tgt_device_id = self.data.device_id2 rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs[utils.ARRAY] = remote_array ref_payload = { 'executionOption': 'ASYNCHRONOUS', 'rdfMode': mode, 'localDeviceListCriteriaParam': {'localDeviceList': [device_id]}, 'rdfType': 'RDF1', 'establish': 'true'} ref_resource = 'rdf_group/%(rdfg)s/volume' % {'rdfg': rdf_group_no} ref_response = { 'array': array_id, 'remote_array': remote_array, 'src_device': device_id, 'tgt_device': tgt_device_id, 'session_info': self.data.rdf_group_vol_details} create_response = self.rest.srdf_create_device_pair( array_id, rdf_group_no, mode, device_id, rep_extra_specs, True) mck_create.assert_called_once_with( array_id, 'replication', ref_resource, ref_payload) mck_get.assert_called_once_with( array_id, rdf_group_no, device_id) self.assertEqual(ref_response, create_response) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) @mock.patch.object( rest.PowerMaxRest, 'wait_for_job', side_effect=exception.VolumeBackendAPIException('')) @mock.patch.object(rest.PowerMaxRest, 'create_resource', return_value=(200, 'job')) def test_srdf_create_device_pair_retry( self, mck_create, mck_wait, mck_get): array_id = self.data.array remote_array = self.data.remote_array rdf_group_no = self.data.rdf_group_no_1 mode = utils.REP_ASYNC device_id = self.data.device_id rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs['array'] = remote_array self.assertRaises(exception.VolumeBackendAPIException, self.rest.srdf_create_device_pair, array_id, rdf_group_no, mode, device_id, rep_extra_specs, True) # 6 retries on a VolumeBackendAPIException self.assertEqual(6, mck_create.call_count) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_CONSISTENT_STATE]) def test_wait_for_rdf_group_sync(self, mck_get): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs['sync_retries'] = 2 rep_extra_specs['sync_interval'] = 1 self.rest.wait_for_rdf_group_sync( array_id, sg_name, rdf_group_no, rep_extra_specs) mck_get.assert_called_once_with(array_id, sg_name, rdf_group_no) @mock.patch.object(rest.PowerMaxRest, 'get_storage_group_rdf_group_state', return_value=[utils.RDF_SYNCINPROG_STATE]) def test_wait_for_rdf_group_sync_fail(self, mck_get): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs['sync_retries'] = 1 rep_extra_specs['sync_interval'] = 1 self.assertRaises(exception.VolumeBackendAPIException, self.rest.wait_for_rdf_group_sync, array_id, sg_name, rdf_group_no, rep_extra_specs) @mock.patch.object(rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details) def test_wait_for_rdf_pair_sync(self, mck_get): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs['sync_retries'] = 2 rep_extra_specs['sync_interval'] = 1 self.rest.wait_for_rdf_pair_sync( array_id, sg_name, rdf_group_no, rep_extra_specs) mck_get.assert_called_once_with(array_id, sg_name, rdf_group_no) @mock.patch.object( rest.PowerMaxRest, 'get_rdf_pair_volume', return_value=tpd.PowerMaxData.rdf_group_vol_details_not_synced) def test_wait_for_rdf_pair_sync_fail(self, mck_get): array_id = self.data.array rdf_group_no = self.data.rdf_group_no_1 sg_name = self.data.default_sg_re_enabled rep_extra_specs = deepcopy(self.data.rep_extra_specs) rep_extra_specs['sync_retries'] = 1 rep_extra_specs['sync_interval'] = 1 self.assertRaises(exception.VolumeBackendAPIException, self.rest.wait_for_rdf_pair_sync, array_id, sg_name, rdf_group_no, rep_extra_specs) def test_validate_unisphere_version_unofficial_success(self): version = 'x10.0.0.425' returned_version = {'version': version} with mock.patch.object(self.rest, "request", return_value=(200, returned_version)) as mock_req: valid_version = self.rest.validate_unisphere_version() self.assertTrue(valid_version) request_count = mock_req.call_count self.assertEqual(1, request_count) def test_validate_unisphere_version_unofficial_failure(self): version = 'T9.0.0.1054' self.rest.u4p_version = 'T9.0.0.1054' returned_version = {'version': version} with mock.patch.object(self.rest, "request", return_value=(200, returned_version)): valid_version = self.rest.validate_unisphere_version() self.assertFalse(valid_version) def test_validate_unisphere_version_unofficial_greater_than(self): version = 'x10.0.0.425' returned_version = {'version': version} with mock.patch.object(self.rest, "request", return_value=(200, returned_version)) as mock_req: valid_version = self.rest.validate_unisphere_version() self.assertTrue(valid_version) request_count = mock_req.call_count self.assertEqual(1, request_count) def test_validate_unisphere_version_101(self): version = 'T10.1.0.501' returned_version = {'version': version} with mock.patch.object(self.rest, "request", return_value=(200, returned_version)) as mock_req: valid_version = self.rest.validate_unisphere_version() self.assertTrue(valid_version) self.assertEqual(self.rest.u4p_version, rest.U4P_100_VERSION) request_count = mock_req.call_count self.assertEqual(1, request_count) def test_validate_unisphere_version_110(self): version = 'T11.1.0.501' returned_version = {'version': version} with mock.patch.object(self.rest, "request", return_value=(200, returned_version)): self.assertRaises(exception.InvalidConfigurationValue, self.rest.validate_unisphere_version) @mock.patch.object(rest.PowerMaxRest, '_build_uri_kwargs') @mock.patch.object(rest.PowerMaxRest, '_build_uri_legacy_args') def test_build_uri_legacy(self, mck_build_legacy, mck_build_kwargs): self.rest.build_uri('array', f_key='test') mck_build_legacy.assert_called_once() mck_build_kwargs.assert_not_called() @mock.patch.object(rest.PowerMaxRest, '_build_uri_kwargs') @mock.patch.object(rest.PowerMaxRest, '_build_uri_legacy_args') def test_build_uri_kwargs(self, mck_build_legacy, mck_build_kwargs): self.rest.build_uri(array='test', f_key='test') mck_build_legacy.assert_not_called() mck_build_kwargs.assert_called_once() def test_build_uri_legacy_args_private_no_version(self): target_uri = self.rest._build_uri_legacy_args( self.data.array, 'sloprovisioning', 'storagegroup', resource_name='test-sg', private=True, no_version=True) expected_uri = ( '/private/sloprovisioning/symmetrix/%(arr)s/storagegroup/test-sg' % {'arr': self.data.array}) self.assertEqual(target_uri, expected_uri) def test_build_uri_legacy_args_public_version(self): target_uri = self.rest._build_uri_legacy_args( self.data.array, 'sloprovisioning', 'storagegroup', resource_name='test-sg') expected_uri = ( '/%(ver)s/sloprovisioning/symmetrix/%(arr)s/storagegroup/test-sg' % {'ver': rest.U4P_100_VERSION, 'arr': self.data.array}) self.assertEqual(target_uri, expected_uri) def test_build_uri_kwargs_private_no_version(self): target_uri = self.rest._build_uri_kwargs( no_version=True, private=True, category='test') expected_uri = '/private/test' self.assertEqual(target_uri, expected_uri) def test_build_uri_kwargs_public_version(self): target_uri = self.rest._build_uri_kwargs(category='test') expected_uri = '/%(ver)s/test' % {'ver': rest.U4P_100_VERSION} self.assertEqual(target_uri, expected_uri) def test_build_uri_kwargs_full_uri(self): target_uri = self.rest._build_uri_kwargs( category='test-cat', resource_level='res-level', resource_level_id='id1', resource_type='res-type', resource_type_id='id2', resource='res', resource_id='id3', object_type='obj', object_type_id='id4') expected_uri = ( '/%(ver)s/test-cat/res-level/id1/res-type/id2/res/id3/obj/id4' % { 'ver': rest.U4P_100_VERSION}) self.assertEqual(target_uri, expected_uri) @mock.patch.object( rest.PowerMaxRest, 'request', return_value=(200, {'success': True})) def test_post_request(self, mck_request): test_uri = '/92/test/uri' test_op = 'performance metrics' test_filters = {'filters': False} response_obj = self.rest.post_request(test_uri, test_op, test_filters) mck_request.assert_called_once_with( test_uri, rest.POST, request_object=test_filters) self.assertEqual(response_obj, {'success': True}) def test_get_ip_interface_physical_port(self): array_id = self.data.array virtual_port = self.data.iscsi_dir_virtual_port ip_address = self.data.ip response_dir_port = self.rest.get_ip_interface_physical_port( array_id, virtual_port, ip_address) self.assertEqual(self.data.iscsi_dir_port, response_dir_port) @mock.patch.object( rest.PowerMaxRest, 'get_request', side_effect=[tpd.PowerMaxData.director_port_keys_empty, tpd.PowerMaxData.director_port_keys_multiple, {}]) def test_get_ip_interface_physical_port_exceptions(self, mck_get): array_id = self.data.array virtual_port = self.data.iscsi_dir_virtual_port ip_address = self.data.ip # No physical port keys returned with self.assertRaisesRegex( exception.VolumeBackendAPIException, 'are not associated a physical director:port.'): self.rest.get_ip_interface_physical_port( array_id, virtual_port, ip_address) # Multiple physical port keys returned with self.assertRaisesRegex( exception.VolumeBackendAPIException, 'associated with more than one physical director:port.'): self.rest.get_ip_interface_physical_port( array_id, virtual_port, ip_address) # Empty response with self.assertRaisesRegex( exception.VolumeBackendAPIException, 'Unable to get port IP interface from Virtual port'): self.rest.get_ip_interface_physical_port( array_id, virtual_port, ip_address) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snaps', return_value=[{'snap_name': 'snap_name', 'snap_id': tpd.PowerMaxData.snap_id}]) def test_get_snap_id(self, mock_snaps): snap_id = self.rest.get_snap_id( self.data.array, self.data.device_id, self.data.test_snapshot_snap_name) self.assertEqual(self.data.snap_id, snap_id) @mock.patch.object(rest.PowerMaxRest, 'get_volume_snaps', side_effect=[[{'snap_name': 'generation_int', 'generation': 0}], [{'snap_name': 'generation_string', 'generation': '0'}]]) def test_get_snap_id_legacy_generation(self, mock_snaps): self.mock_object(self.rest, 'is_snap_id', False) for x in range(0, 2): snap_id = self.rest.get_snap_id( self.data.array, self.data.device_id, self.data.test_snapshot_snap_name) self.assertEqual('0', snap_id) def test_check_force(self): extra_specs = {'pool_name': 'Diamond+DSS+SRP_1+000197800123', 'slo': 'Diamond', 'srp': 'SRP_1', 'array': '000123456789', 'interval': 3, 'retries': 120} self.assertEqual( 'false', self.rest._check_force(extra_specs)) self.assertEqual( 'false', self.rest._check_force( extra_specs, force_flag=False)) self.assertEqual( 'true', self.rest._check_force( extra_specs, force_flag=True)) extra_specs[utils.FORCE_VOL_EDIT] = True self.assertEqual( 'true', self.rest._check_force(extra_specs)) self.assertEqual( 'true', self.rest._check_force( extra_specs, force_flag=False)) self.assertEqual( 'true', self.rest._check_force( extra_specs, force_flag=True)) def test_get_nvme_tcp_ip_address(self): array = self.data.array port_id = 'OR-1C:001' with mock.patch.object(self.rest, 'get_port', return_value= {'symmetrixPort': {'ip_addresses': ['10.10.10.1']}}): ip_addresses = self.rest.get_nvme_tcp_ip_address( array, port_id) self.assertIsNotNone(ip_addresses) self.assertEqual(ip_addresses[0], '10.10.10.1') def test_get_device_nguid(self): with mock.patch.object(self.rest, 'get_resource', return_value={'nguid': '1602533030324644' '0000976000012000'}): nguid = self.rest.get_device_nguid(self.data.array, 'fake_device_id') self.assertIsNotNone(nguid) self.assertEqual( nguid, '16025330303246440000976000012000') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py0000664000175000017500000023012700000000000031547 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import datetime from unittest import mock from ddt import data from ddt import ddt from cinder import exception from cinder.objects import fields from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_data as tpd) from cinder.tests.unit.volume.drivers.dell_emc.powermax import ( powermax_fake_objects as tpfo) from cinder.volume.drivers.dell_emc.powermax import iscsi from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume.drivers.dell_emc.powermax import utils from cinder.volume import volume_types from cinder.volume import volume_utils @ddt class PowerMaxUtilsTest(test.TestCase): def setUp(self): self.data = tpd.PowerMaxData() self.mock_object(volume_utils, 'get_max_over_subscription_ratio') super(PowerMaxUtilsTest, self).setUp() self.replication_device = self.data.sync_rep_device configuration = tpfo.FakeConfiguration( None, 'UtilsTests', 1, 1, san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array, powermax_srp='SRP_1', san_password='smc', san_api_port=8443, powermax_port_groups=[self.data.port_group_name_i], replication_device=self.replication_device) self.mock_object(rest.PowerMaxRest, '_establish_rest_session', return_value=tpfo.FakeRequestsSession()) driver = iscsi.PowerMaxISCSIDriver(configuration=configuration) self.driver = driver self.common = self.driver.common self.utils = self.common.utils def test_get_volumetype_extra_specs(self): with mock.patch.object(volume_types, 'get_volume_type_extra_specs', return_value={'specs'}) as type_mock: # path 1: volume_type_id not passed in volume = deepcopy(self.data.test_volume) volume.volume_type_id = self.data.test_volume_type.id self.utils.get_volumetype_extra_specs(volume) type_mock.assert_called_once_with(self.data.test_volume_type.id) type_mock.reset_mock() # path 2: volume_type_id passed in self.utils.get_volumetype_extra_specs(volume, '123') type_mock.assert_called_once_with('123') type_mock.reset_mock() # path 3: no type_id volume = deepcopy(self.data.test_clone_volume) self.utils.get_volumetype_extra_specs(volume) type_mock.assert_not_called() def test_get_volumetype_extra_specs_exception(self): extra_specs = self.utils.get_volumetype_extra_specs( {'name': 'no_type_id'}) self.assertEqual({}, extra_specs) def test_get_host_short_name(self): host_under_16_chars = 'host_13_chars' host1 = self.utils.get_host_short_name( host_under_16_chars) self.assertEqual(host_under_16_chars, host1) host_over_16_chars = ( 'host_over_16_chars_host_over_16_chars_host_over_16_chars') # Check that the same md5 value is retrieved from multiple calls host2 = self.utils.get_host_short_name( host_over_16_chars) host3 = self.utils.get_host_short_name( host_over_16_chars) self.assertEqual(host2, host3) host_with_period = 'hostname.with.many.parts' ref_host_name = self.utils.generate_unique_trunc_host('hostname') host4 = self.utils.get_host_short_name(host_with_period) self.assertEqual(ref_host_name, host4) def test_get_volume_element_name(self): volume_id = 'ea95aa39-080b-4f11-9856-a03acf9112ad' volume_element_name = self.utils.get_volume_element_name(volume_id) expect_vol_element_name = ('OS-' + volume_id) self.assertEqual(expect_vol_element_name, volume_element_name) def test_truncate_string(self): # string is less than max number str_to_truncate = 'string' response = self.utils.truncate_string(str_to_truncate, 10) self.assertEqual(str_to_truncate, response) def test_get_default_oversubscription_ratio(self): default_ratio = 20.0 max_over_sub_ratio1 = 30.0 returned_max = self.utils.get_default_oversubscription_ratio( max_over_sub_ratio1) self.assertEqual(max_over_sub_ratio1, returned_max) max_over_sub_ratio2 = 0.5 returned_max = self.utils.get_default_oversubscription_ratio( max_over_sub_ratio2) self.assertEqual(default_ratio, returned_max) def test_get_default_storage_group_name_slo_workload(self): srp_name = self.data.srp slo = self.data.slo workload = self.data.workload sg_name = self.utils.get_default_storage_group_name( srp_name, slo, workload) self.assertEqual(self.data.defaultstoragegroup_name, sg_name) def test_get_default_storage_group_name_no_slo(self): srp_name = self.data.srp slo = None workload = None sg_name = self.utils.get_default_storage_group_name( srp_name, slo, workload) self.assertEqual(self.data.default_sg_no_slo, sg_name) def test_get_default_storage_group_name_compr_disabled(self): srp_name = self.data.srp slo = self.data.slo workload = self.data.workload sg_name = self.utils.get_default_storage_group_name( srp_name, slo, workload, True) self.assertEqual(self.data.default_sg_compr_disabled, sg_name) def test_get_time_delta(self): start_time = 1487781721.09 end_time = 1487781758.16 delta = end_time - start_time ref_delta = str(datetime.timedelta(seconds=int(delta))) time_delta = self.utils.get_time_delta(start_time, end_time) self.assertEqual(ref_delta, time_delta) def test_get_short_protocol_type(self): # iscsi short_i_protocol = self.utils.get_short_protocol_type('iscsi') self.assertEqual('I', short_i_protocol) # fc short_f_protocol = self.utils.get_short_protocol_type('FC') self.assertEqual('F', short_f_protocol) # nvme-tcp short_n_protocol = self.utils.get_short_protocol_type('nvme-tcp') self.assertEqual('NT', short_n_protocol) # else other_protocol = self.utils.get_short_protocol_type('OTHER') self.assertEqual('OTHER', other_protocol) def test_get_temp_snap_name(self): source_device_id = self.data.device_id ref_name = self.data.temp_snapvx snap_name = self.utils.get_temp_snap_name(source_device_id) self.assertEqual(ref_name, snap_name) def test_get_array_and_device_id(self): volume = deepcopy(self.data.test_volume) external_ref = {u'source-name': u'00002'} array, device_id = self.utils.get_array_and_device_id( volume, external_ref) self.assertEqual(self.data.array, array) self.assertEqual('00002', device_id) # Test to check if device id returned is in upper case external_ref = {u'source-name': u'0028a'} __, device_id = self.utils.get_array_and_device_id( volume, external_ref) ref_device_id = u'0028A' self.assertEqual(ref_device_id, device_id) def test_get_array_and_device_id_exception(self): volume = deepcopy(self.data.test_volume) external_ref = {u'source-name': None} self.assertRaises(exception.VolumeBackendAPIException, self.utils.get_array_and_device_id, volume, external_ref) @data({u'source-name': u'000001'}, {u'source-name': u'00028A'}) def test_get_array_and_device_id_invalid_long_id(self, external_ref): volume = deepcopy(self.data.test_volume) # Test for device id more than 5 digits self.assertRaises(exception.VolumeBackendAPIException, self.utils.get_array_and_device_id, volume, external_ref) @data({u'source-name': u'01'}, {u'source-name': u'028A'}, {u'source-name': u'0001'}) def test_get_array_and_device_id_invalid_short_id(self, external_ref): volume = deepcopy(self.data.test_volume) # Test for device id less than 5 digits self.assertRaises(exception.VolumeBackendAPIException, self.utils.get_array_and_device_id, volume, external_ref) def test_get_pg_short_name(self): pg_under_12_chars = 'pg_11_chars' pg1 = self.utils.get_pg_short_name(pg_under_12_chars) self.assertEqual(pg_under_12_chars, pg1) pg_over_12_chars = 'portgroup_over_12_characters' # Check that the same md5 value is retrieved from multiple calls pg2 = self.utils.get_pg_short_name(pg_over_12_chars) pg3 = self.utils.get_pg_short_name(pg_over_12_chars) self.assertEqual(pg2, pg3) def test_is_compression_disabled_true(self): # Compression disabled in extra specs extra_specs = self.data.extra_specs_disable_compression self.assertTrue(self.utils.is_compression_disabled(extra_specs)) # Compression disabled by no SL/WL combination extra_specs = deepcopy(self.data.vol_type_extra_specs_none_pool) self.assertTrue(self.utils.is_compression_disabled(extra_specs)) extra_specs3 = deepcopy(extra_specs) extra_specs3.update({utils.DISABLECOMPRESSION: ' True'}) self.assertTrue(self.utils.is_compression_disabled(extra_specs3)) extra_specs4 = deepcopy(extra_specs) extra_specs4.update({utils.DISABLECOMPRESSION: 'True'}) self.assertTrue(self.utils.is_compression_disabled(extra_specs4)) def test_is_compression_disabled_false(self): # Path 1: no compression extra spec set extra_specs = self.data.extra_specs self.assertFalse(self.utils.is_compression_disabled(extra_specs)) # Path 2: compression extra spec set to false extra_specs2 = deepcopy(extra_specs) extra_specs2.update({utils.DISABLECOMPRESSION: 'false'}) self.assertFalse(self.utils.is_compression_disabled(extra_specs2)) extra_specs3 = deepcopy(extra_specs) extra_specs3.update({utils.DISABLECOMPRESSION: ' False'}) self.assertFalse(self.utils.is_compression_disabled(extra_specs3)) extra_specs4 = deepcopy(extra_specs) extra_specs4.update({utils.DISABLECOMPRESSION: 'False'}) self.assertFalse(self.utils.is_compression_disabled(extra_specs4)) def test_change_compression_type_true(self): source_compr_disabled = True new_type_compr_disabled_1 = { 'extra_specs': {utils.DISABLECOMPRESSION: 'false'}} self.assertTrue(self.utils.change_compression_type( source_compr_disabled, new_type_compr_disabled_1)) new_type_compr_disabled_2 = { 'extra_specs': {utils.DISABLECOMPRESSION: ' False'}} self.assertTrue(self.utils.change_compression_type( source_compr_disabled, new_type_compr_disabled_2)) def test_change_compression_type_false(self): source_compr_disabled = True new_type_compr_disabled = { 'extra_specs': {utils.DISABLECOMPRESSION: 'true'}} self.assertFalse(self.utils.change_compression_type( source_compr_disabled, new_type_compr_disabled)) new_type_compr_disabled_2 = { 'extra_specs': {utils.DISABLECOMPRESSION: ' True'}} self.assertFalse(self.utils.change_compression_type( source_compr_disabled, new_type_compr_disabled_2)) def test_is_replication_enabled(self): is_re = self.utils.is_replication_enabled( self.data.vol_type_extra_specs_rep_enabled) self.assertTrue(is_re) is_re2 = self.utils.is_replication_enabled(self.data.extra_specs) self.assertFalse(is_re2) def test_get_replication_config(self): # Success, allow_extend false rep_device_list1 = [{'target_device_id': self.data.remote_array, 'remote_pool': self.data.srp, 'remote_port_group': self.data.port_group_name_f, 'rdf_group_label': self.data.rdf_group_name_1}] rep_config1 = self.utils.get_replication_config(rep_device_list1)[0] self.assertEqual(self.data.remote_array, rep_config1['array']) # Success, allow_extend true rep_device_list2 = rep_device_list1 rep_device_list2[0]['allow_extend'] = 'true' rep_config2 = self.utils.get_replication_config(rep_device_list2)[0] self.assertTrue(rep_config2['allow_extend']) # No rep_device_list rep_device_list3 = [] rep_config3 = self.utils.get_replication_config(rep_device_list3) self.assertIsNone(rep_config3) # Exception rep_device_list4 = [{'target_device_id': self.data.remote_array, 'remote_pool': self.data.srp}] self.assertRaises(exception.VolumeBackendAPIException, self.utils.get_replication_config, rep_device_list4) # Success, mode is async rep_device_list5 = rep_device_list2 rep_device_list5[0]['mode'] = 'async' rep_config5 = self.utils.get_replication_config(rep_device_list5)[0] self.assertEqual(utils.REP_ASYNC, rep_config5['mode']) # Success, mode is metro - no other options set rep_device_list6 = rep_device_list5 rep_device_list6[0]['mode'] = 'metro' rep_config6 = self.utils.get_replication_config(rep_device_list6)[0] self.assertFalse(rep_config6['metro_bias']) # Success, mode is metro - metro options true rep_device_list7 = rep_device_list6 rep_device_list7[0].update({'metro_use_bias': 'true'}) rep_config7 = self.utils.get_replication_config(rep_device_list7)[0] self.assertTrue(rep_config7['metro_bias']) # Success, no backend id self.assertIsNone(rep_config7.get(utils.BACKEND_ID)) # Success, backend id rep_device_list8 = rep_device_list6 rep_device_list8[0].update( {utils.BACKEND_ID: self.data.rep_backend_id_sync}) rep_config8 = self.utils.get_replication_config(rep_device_list8)[0] self.assertEqual( self.data.rep_backend_id_sync, rep_config8[utils.BACKEND_ID]) # Success, multi-rep multi_rep_device_list = self.data.multi_rep_device multi_rep_config = self.utils.get_replication_config( multi_rep_device_list) self.assertTrue(len(multi_rep_config) > 1) for rep_config in multi_rep_config: self.assertEqual(rep_config['array'], self.data.remote_array) def test_get_replication_config_sync_retries_intervals(self): # Default sync interval & retry values rep_device_list1 = [{'target_device_id': self.data.remote_array, 'remote_pool': self.data.srp, 'remote_port_group': self.data.port_group_name_f, 'rdf_group_label': self.data.rdf_group_name_1}] rep_config1 = self.utils.get_replication_config(rep_device_list1)[0] self.assertEqual(200, rep_config1['sync_retries']) self.assertEqual(3, rep_config1['sync_interval']) # User set interval & retry values rep_device_list2 = deepcopy(rep_device_list1) rep_device_list2[0].update({'sync_retries': 300, 'sync_interval': 1}) rep_config2 = self.utils.get_replication_config(rep_device_list2)[0] self.assertEqual(300, rep_config2['sync_retries']) self.assertEqual(1, rep_config2['sync_interval']) def test_is_volume_failed_over(self): vol = deepcopy(self.data.test_volume) vol.replication_status = fields.ReplicationStatus.FAILED_OVER is_fo1 = self.utils.is_volume_failed_over(vol) self.assertTrue(is_fo1) is_fo2 = self.utils.is_volume_failed_over(self.data.test_volume) self.assertFalse(is_fo2) is_fo3 = self.utils.is_volume_failed_over(None) self.assertFalse(is_fo3) def test_add_legacy_pools(self): pools = [{'pool_name': 'Diamond+None+SRP_1+000197800111'}, {'pool_name': 'Diamond+OLTP+SRP_1+000197800111'}] new_pools = self.utils.add_legacy_pools(pools) ref_pools = [{'pool_name': 'Diamond+None+SRP_1+000197800111'}, {'pool_name': 'Diamond+OLTP+SRP_1+000197800111'}, {'pool_name': 'Diamond+SRP_1+000197800111'}] self.assertEqual(ref_pools, new_pools) def test_add_promotion_pools(self): array = self.data.array pools = [{'pool_name': 'Diamond+None+SRP_1+000197800111', 'location_info': '000197800111#SRP_1#None#Diamond'}, {'pool_name': 'Gold+OLTP+SRP_1+000197800111', 'location_info': '000197800111#SRP_1#OLTP#Gold'}] new_pools = self.utils.add_promotion_pools(pools, array) ref_pools = [{'pool_name': 'Diamond+None+SRP_1+000197800111', 'location_info': '000197800111#SRP_1#None#Diamond'}, {'pool_name': 'Gold+OLTP+SRP_1+000197800111', 'location_info': '000197800111#SRP_1#OLTP#Gold'}, {'pool_name': 'Diamond+None+SRP_1+000197800123', 'location_info': '000197800123#SRP_1#None#Diamond'}, {'pool_name': 'Gold+OLTP+SRP_1+000197800123', 'location_info': '000197800123#SRP_1#OLTP#Gold'}] self.assertEqual(ref_pools, new_pools) def test_update_volume_group_name(self): group = self.data.test_group_1 ref_group_name = self.data.test_vol_grp_name vol_grp_name = self.utils.update_volume_group_name(group) self.assertEqual(ref_group_name, vol_grp_name) def test_update_volume_group_name_id_only(self): group = self.data.test_group_without_name ref_group_name = self.data.test_vol_grp_name_id_only vol_grp_name = self.utils.update_volume_group_name(group) self.assertEqual(ref_group_name, vol_grp_name) def test_get_volume_group_utils(self): array, intervals_retries = self.utils.get_volume_group_utils( self.data.test_group_1, interval=1, retries=1) ref_array = self.data.array self.assertEqual(ref_array, array) def test_update_volume_model_updates(self): volume_model_updates = [{'id': '1', 'status': 'available'}] volumes = [self.data.test_volume] ref_val = {'id': self.data.test_volume.id, 'status': 'error_deleting'} ret_val = self.utils.update_volume_model_updates( volume_model_updates, volumes, 'abc', status='error_deleting') self.assertEqual(ref_val, ret_val[1]) def test_update_volume_model_updates_empty_update_list(self): volume_model_updates = [] volumes = [self.data.test_volume] ref_val = [{'id': self.data.test_volume.id, 'status': 'available'}] ret_val = self.utils.update_volume_model_updates( volume_model_updates, volumes, 'abc') self.assertEqual(ref_val, ret_val) def test_update_volume_model_updates_empty_vol_list(self): volume_model_updates, volumes, ref_val = [], [], [] ret_val = self.utils.update_volume_model_updates( volume_model_updates, volumes, 'abc') self.assertEqual(ref_val, ret_val) def test_check_replication_matched(self): # Check 1: Volume is not part of a group self.utils.check_replication_matched( self.data.test_volume, self.data.extra_specs) group_volume = deepcopy(self.data.test_volume) group_volume.group = self.data.test_group with mock.patch.object(volume_utils, 'is_group_a_type', return_value=False): # Check 2: Both volume and group have the same rep status self.utils.check_replication_matched( group_volume, self.data.extra_specs) # Check 3: Volume and group have different rep status with mock.patch.object(self.utils, 'is_replication_enabled', return_value=True): self.assertRaises(exception.InvalidInput, self.utils.check_replication_matched, group_volume, self.data.extra_specs) def test_check_rep_status_enabled(self): # Check 1: not replication enabled with mock.patch.object(volume_utils, 'is_group_a_type', return_value=False): self.utils.check_rep_status_enabled(self.data.test_group) # Check 2: replication enabled, status enabled with mock.patch.object(volume_utils, 'is_group_a_type', return_value=True): self.utils.check_rep_status_enabled(self.data.test_rep_group) # Check 3: replication enabled, status disabled self.assertRaises(exception.InvalidInput, self.utils.check_rep_status_enabled, self.data.test_group) def test_get_replication_prefix(self): async_prefix = self.utils.get_replication_prefix(utils.REP_ASYNC) self.assertEqual('-RA', async_prefix) sync_prefix = self.utils.get_replication_prefix(utils.REP_SYNC) self.assertEqual('-RE', sync_prefix) metro_prefix = self.utils.get_replication_prefix(utils.REP_METRO) self.assertEqual('-RM', metro_prefix) def test_get_rdf_management_group_name(self): rep_config = {'rdf_group_label': self.data.rdf_group_name_1, 'mode': utils.REP_ASYNC} grp_name = self.utils.get_rdf_management_group_name(rep_config) self.assertEqual(self.data.rdf_managed_async_grp, grp_name) def test_is_metro_device(self): rep_config = {'mode': utils.REP_METRO} is_metro = self.utils.is_metro_device( rep_config, self.data.rep_extra_specs) self.assertTrue(is_metro) rep_config2 = {'mode': utils.REP_ASYNC} is_metro2 = self.utils.is_metro_device( rep_config2, self.data.rep_extra_specs) self.assertFalse(is_metro2) def test_does_vol_need_rdf_management_group(self): extra_specs = deepcopy(self.data.rep_extra_specs) extra_specs['rep_mode'] = utils.REP_SYNC self.assertFalse(self.utils.does_vol_need_rdf_management_group( extra_specs)) extra_specs[utils.REP_MODE] = utils.REP_ASYNC self.assertTrue(self.utils.does_vol_need_rdf_management_group( extra_specs)) def test_modify_snapshot_prefix_manage(self): snap_name = self.data.snapshot_id expected_snap_name = self.data.managed_snap_id updated_name = self.utils.modify_snapshot_prefix( snap_name, manage=True) self.assertEqual(expected_snap_name, updated_name) def test_modify_snapshot_prefix_unmanage(self): snap_name = self.data.managed_snap_id expected_snap_name = self.data.snapshot_id updated_name = self.utils.modify_snapshot_prefix( snap_name, unmanage=True) self.assertEqual(expected_snap_name, updated_name) def test_change_replication(self): non_rep_extra_specs = self.data.extra_specs rep_extra_specs = self.data.extra_specs_rep_enabled change_rep = self.utils.change_replication( non_rep_extra_specs, rep_extra_specs) self.assertTrue(change_rep) def test_change_replication_different_backend_id(self): rep_extra_specs_a = deepcopy(self.data.extra_specs_rep_enabled) rep_extra_specs_a[utils.REPLICATION_DEVICE_BACKEND_ID] = 'A' rep_extra_specs_b = deepcopy(self.data.extra_specs_rep_enabled) rep_extra_specs_b[utils.REPLICATION_DEVICE_BACKEND_ID] = 'B' change_rep = self.utils.change_replication( rep_extra_specs_a, rep_extra_specs_b) self.assertTrue(change_rep) def test_change_replication_no_change(self): non_rep_extra_specs_a = self.data.extra_specs non_rep_extra_specs_b = self.data.extra_specs change_rep = self.utils.change_replication( non_rep_extra_specs_a, non_rep_extra_specs_b) self.assertFalse(change_rep) def test_change_replication_no_change_same_backend_id(self): rep_extra_specs_a = deepcopy(self.data.extra_specs_rep_enabled) rep_extra_specs_a[utils.REPLICATION_DEVICE_BACKEND_ID] = 'A' rep_extra_specs_b = deepcopy(self.data.extra_specs_rep_enabled) rep_extra_specs_b[utils.REPLICATION_DEVICE_BACKEND_ID] = 'A' change_rep = self.utils.change_replication( rep_extra_specs_a, rep_extra_specs_b) self.assertFalse(change_rep) def test_get_child_sg_name(self): host_name = 'HostX' port_group_label = self.data.port_group_name_f # Slo and rep enabled extra_specs1 = { 'pool_name': u'Diamond+DSS+SRP_1+000197800123', 'slo': 'Diamond', 'workload': 'DSS', 'srp': 'SRP_1', 'array': self.data.array, 'interval': 3, 'retries': 120, 'replication_enabled': True, 'rep_mode': 'Synchronous', utils.PORTGROUPNAME: self.data.port_group_name_f} child_sg_name, do_disable_compression, rep_enabled = ( self.utils.get_child_sg_name( host_name, extra_specs1, port_group_label)) re_name = self.data.storagegroup_name_f + '-RE' self.assertEqual(re_name, child_sg_name) # Disable compression extra_specs2 = deepcopy(self.data.extra_specs_disable_compression) child_sg_name, do_disable_compression, rep_enabled = ( self.utils.get_child_sg_name( host_name, extra_specs2, port_group_label)) cd_name = self.data.storagegroup_name_f + '-CD' self.assertEqual(cd_name, child_sg_name) # No slo extra_specs3 = deepcopy(self.data.extra_specs) extra_specs3[utils.SLO] = None child_sg_name, do_disable_compression, rep_enabled = ( self.utils.get_child_sg_name( host_name, extra_specs3, port_group_label)) self.assertEqual(self.data.no_slo_sg_name, child_sg_name) def test_change_multiattach(self): extra_specs_ma_true = {'multiattach': ' True'} extra_specs_ma_false = {'multiattach': ' False'} self.assertTrue(self.utils.change_multiattach( extra_specs_ma_true, extra_specs_ma_false)) self.assertFalse(self.utils.change_multiattach( extra_specs_ma_true, extra_specs_ma_true)) self.assertFalse(self.utils.change_multiattach( extra_specs_ma_false, extra_specs_ma_false)) def test_is_volume_manageable(self): for volume in self.data.priv_vol_func_response_multi: self.assertTrue( self.utils.is_volume_manageable(volume)) for volume in self.data.priv_vol_func_response_multi_invalid: self.assertFalse( self.utils.is_volume_manageable(volume)) def test_is_volume_manageable_multi_sg(self): for volume in self.data.priv_vol_func_response_multi_sg: self.assertFalse( self.utils.is_volume_manageable(volume)) def test_is_snapshot_manageable(self): for volume in self.data.priv_vol_func_response_multi: self.assertTrue( self.utils.is_snapshot_manageable(volume)) for volume in self.data.priv_vol_func_response_multi_invalid: self.assertFalse( self.utils.is_snapshot_manageable(volume)) def test_get_volume_attached_hostname(self): attached_volume = deepcopy(self.data.test_volume) attached_volume.volume_attachment.objects = [ self.data.test_volume_attachment] # Success hostname = self.utils.get_volume_attached_hostname(attached_volume) self.assertEqual('HostX', hostname) def test_validate_qos_input_exception(self): qos_extra_spec = {'total_iops_sec': 90, 'DistributionType': 'Wrong', 'total_bytes_sec': 100} input_key = 'total_iops_sec' sg_value = 4000 self.assertRaises(exception.VolumeBackendAPIException, self.utils.validate_qos_input, input_key, sg_value, qos_extra_spec, {}) input_key = 'total_bytes_sec' sg_value = 4000 self.assertRaises(exception.VolumeBackendAPIException, self.utils.validate_qos_input, input_key, sg_value, qos_extra_spec, {}) def test_validate_qos_distribution_type(self): qos_extra_spec = {'total_iops_sec': 4000, 'DistributionType': 'Always', 'total_bytes_sec': 4194304000} input_prop_dict = {'total_iops_sec': 4000} sg_value = 'Always' ret_prop_dict = self.utils.validate_qos_distribution_type( sg_value, qos_extra_spec, input_prop_dict) self.assertEqual(input_prop_dict, ret_prop_dict) def test_validate_qos_cast_to_int(self): qos_extra_spec = {'total_iops_sec': '500', 'total_bytes_sec': '104857600', 'DistributionType': 'Always'} property_dict = {'host_io_limit_io_sec': 500} input_prop_dict = {'host_io_limit_io_sec': 500, 'host_io_limit_mb_sec': 100} input_key = 'total_bytes_sec' ret_prop_dict = self.utils.validate_qos_input( input_key, None, qos_extra_spec, property_dict) self.assertEqual(input_prop_dict, ret_prop_dict) def test_validate_qos_cast_to_int_drop_fraction(self): qos_extra_spec = {'total_iops_sec': '500', 'total_bytes_sec': '105000000', 'DistributionType': 'Always'} property_dict = {'host_io_limit_io_sec': 500} input_prop_dict = {'host_io_limit_io_sec': 500, 'host_io_limit_mb_sec': 100} input_key = 'total_bytes_sec' ret_prop_dict = self.utils.validate_qos_input( input_key, None, qos_extra_spec, property_dict) self.assertEqual(input_prop_dict, ret_prop_dict) def test_compare_cylinders(self): source_cylinders = '12345' target_cylinders = '12345' self.utils.compare_cylinders(source_cylinders, target_cylinders) def test_compare_cylinders_target_larger(self): source_cylinders = '12345' target_cylinders = '12346' self.utils.compare_cylinders(source_cylinders, target_cylinders) def test_compare_cylinders_source_larger(self): source_cylinders = '12347' target_cylinders = '12346' self.assertRaises(exception.VolumeBackendAPIException, self.utils.compare_cylinders, source_cylinders, target_cylinders) def test_get_grp_volume_model_update(self): volume = self.data.test_volume volume_dict = self.data.provider_location group_id = self.data.gvg_group_id metadata = self.data.volume_metadata ref_model_update_meta = { 'id': volume.id, 'status': 'available', 'metadata': metadata, 'provider_location': str(volume_dict)} act_model_update_meta = self.utils.get_grp_volume_model_update( volume, volume_dict, group_id, metadata) self.assertEqual(ref_model_update_meta, act_model_update_meta) ref_model_update_no_meta = { 'id': volume.id, 'status': 'available', 'provider_location': str(volume_dict)} act_model_update_no_meta = self.utils.get_grp_volume_model_update( volume, volume_dict, group_id) self.assertEqual(ref_model_update_no_meta, act_model_update_no_meta) def test_get_service_level_workload(self): # Service Level set to None extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.SLO] = None sl_1, wl_1 = self.utils.get_service_level_workload(extra_specs) self.assertEqual('None', sl_1) self.assertEqual('None', wl_1) # Service Level set to None and Workload set extra_specs[utils.WORKLOAD] = 'DSS' sl_2, wl_2 = self.utils.get_service_level_workload(extra_specs) self.assertEqual('None', sl_2) self.assertEqual('None', wl_2) # Service Level and Workload both set extra_specs[utils.SLO] = 'Diamond' extra_specs[utils.WORKLOAD] = 'DSS' sl_3, wl_3 = self.utils.get_service_level_workload(extra_specs) self.assertEqual('Diamond', sl_3) self.assertEqual('DSS', wl_3) def test_get_new_tags_none(self): list_str1 = 'finance, production, test' list_str2 = 'production,test,finance' self.assertEqual( [], self.utils.get_new_tags(list_str1, list_str2)) def test_get_new_tags_one(self): list_str1 = 'finance, production, test' list_str2 = 'production,test' self.assertEqual( ['finance'], self.utils.get_new_tags(list_str1, list_str2)) def test_get_new_tags_two(self): list_str1 = 'finance, production, test, test2' list_str2 = 'production,test' self.assertEqual( ['finance', 'test2'], self.utils.get_new_tags( list_str1, list_str2)) def test_get_new_tags_case(self): list_str1 = 'Finance, Production, test, tEst2' list_str2 = 'production,test' self.assertEqual( ['Finance', 'tEst2'], self.utils.get_new_tags( list_str1, list_str2)) def test_get_new_tags_empty_string_first(self): list_str1 = '' list_str2 = 'production,test' self.assertEqual( [], self.utils.get_new_tags( list_str1, list_str2)) def test_get_new_tags_empty_string_second(self): list_str1 = 'production,test' list_str2 = ' ' self.assertEqual( ['production', 'test'], self.utils.get_new_tags( list_str1, list_str2)) def test_get_intersection(self): list_str1 = 'finance,production' list_str2 = 'production' common_list = self.utils._get_intersection( list_str1, list_str2) self.assertEqual(['production'], common_list) def test_get_intersection_unordered_list(self): list_str1 = 'finance,production' list_str2 = 'production, finance' common_list = ( self.utils._get_intersection(list_str1, list_str2)) self.assertEqual(['finance', 'production'], common_list) def test_verify_tag_list_good(self): tag_list = ['no', 'InValid', 'characters', 'dash-allowed', '123', 'underscore_allowed', ' leading_space', 'trailing-space '] self.assertTrue(self.utils.verify_tag_list(tag_list)) def test_verify_tag_list_space(self): tag_list = ['bad space'] self.assertFalse(self.utils.verify_tag_list(tag_list)) def test_verify_tag_list_forward_slash(self): tag_list = ['\\forward\\slash'] self.assertFalse(self.utils.verify_tag_list(tag_list)) def test_verify_tag_list_square_bracket(self): tag_list = ['[squareBrackets]'] self.assertFalse(self.utils.verify_tag_list(tag_list)) def test_verify_tag_list_backward_slash(self): tag_list = ['/backward/slash'] self.assertFalse(self.utils.verify_tag_list(tag_list)) def test_verify_tag_list_curly_bracket(self): tag_list = ['{curlyBrackets}'] self.assertFalse(self.utils.verify_tag_list(tag_list)) def test_verify_tag_list_empty_list(self): tag_list = [] self.assertFalse(self.utils.verify_tag_list(tag_list)) def test_verify_tag_list_not_a_list(self): tag_list = '1,2,3,4' self.assertFalse(self.utils.verify_tag_list(tag_list)) def test_verify_tag_list_exceeds_8(self): tag_list = ['1', '2', '3', '4', '5', '6', '7', '8', '9'] self.assertFalse(self.utils.verify_tag_list(tag_list)) def test_convert_list_to_string(self): input_list = ['one', 'two', 'three'] output_string = self.utils.convert_list_to_string(input_list) self.assertEqual('one,two,three', output_string) def test_convert_list_to_string_input_string(self): input_list = 'one,two,three' output_string = self.utils.convert_list_to_string(input_list) self.assertEqual('one,two,three', output_string) def test_regex_check_case_2(self): test_template = 'shortHostName[:10]uuid[:5]' is_ok, case = self.utils.regex_check(test_template, True) self.assertTrue(is_ok) self.assertEqual('2', case) def test_regex_check_case_3(self): test_template = 'shortHostName[-10:]uuid[:5]' is_ok, case = self.utils.regex_check(test_template, True) self.assertTrue(is_ok) self.assertEqual('3', case) def test_regex_check_case_4(self): test_template = 'shortHostName[:7]finance' is_ok, case = self.utils.regex_check(test_template, True) self.assertTrue(is_ok) self.assertEqual('4', case) def test_regex_check_case_5(self): test_template = 'shortHostName[-6:]production' is_ok, case = self.utils.regex_check(test_template, True) self.assertTrue(is_ok) self.assertEqual('5', case) def test_regex_check_case_2_misspelt(self): test_template = 'shortHstName[:10]uuid[:5]' is_ok, case = self.utils.regex_check(test_template, True) self.assertFalse(is_ok) self.assertEqual('0', case) def test_regex_check_case_3_misspelt(self): test_template = 'shortHostName[-10:]uud[:5]' is_ok, case = self.utils.regex_check(test_template, True) self.assertFalse(is_ok) self.assertEqual('0', case) def test_regex_check_case_4_misspelt(self): test_template = 'shortHotName[:7]finance' is_ok, case = self.utils.regex_check(test_template, True) self.assertFalse(is_ok) self.assertEqual('0', case) def test_regex_check_case_5_misspelt(self): test_template = 'shortHstName[-6:]production' is_ok, case = self.utils.regex_check(test_template, True) self.assertFalse(is_ok) self.assertEqual('0', case) def test_regex_check_case_4_invalid_chars(self): test_template = 'shortHostName[:7]f*n&nce' is_ok, case = self.utils.regex_check(test_template, True) self.assertFalse(is_ok) self.assertEqual('0', case) def test_regex_check_case_5_invalid_chars(self): test_template = 'shortHostName[-6:]pr*ducti*n' is_ok, case = self.utils.regex_check(test_template, True) self.assertFalse(is_ok) self.assertEqual('0', case) def test_regex_check_case_2_missing_square_bracket(self): test_template = 'shortHostName[:10uuid[:5]' is_ok, case = self.utils.regex_check(test_template, True) self.assertFalse(is_ok) self.assertEqual('0', case) def test_regex_check_case_4_missing_square_bracket(self): test_template = 'shortHostName[:10finance' is_ok, case = self.utils.regex_check(test_template, True) self.assertFalse(is_ok) self.assertEqual('0', case) def test_prepare_string_entity_case_2(self): test_template = 'shortHostName[:10]uuid[:5]' altered_string = self.utils.prepare_string_entity( test_template, 'my_short_host_name', True) self.assertEqual( 'my_short_host_name[:10]uuid[:5]', altered_string) def test_prepare_string_entity_case_3(self): test_template = 'shortHostName[-10:]uuid[:5]' altered_string = self.utils.prepare_string_entity( test_template, 'my_short_host_name', True) self.assertEqual( 'my_short_host_name[-10:]uuid[:5]', altered_string) def test_prepare_string_entity_case_4(self): test_template = 'shortHostName[:7]finance' altered_string = self.utils.prepare_string_entity( test_template, 'my_short_host_name', True) self.assertEqual( 'my_short_host_name[:7]finance', altered_string) def test_prepare_string_entity_case_5(self): test_template = 'shortHostName[-6:]production' altered_string = self.utils.prepare_string_entity( test_template, 'my_short_host_name', True) self.assertEqual( 'my_short_host_name[-6:]production', altered_string) def test_prepare_string_with_uuid_case_2(self): test_template = 'shortHostName[:10]uuid[:5]' pass_two, uuid = self.utils.prepare_string_with_uuid( test_template, 'my_short_host_name', True) self.assertEqual( 'my_short_host_name[:10]944854dce45898b544a1cb9071d3cc35[:5]', pass_two) self.assertEqual('944854dce45898b544a1cb9071d3cc35', uuid) def test_prepare_string_with_uuid_case_3(self): test_template = 'shortHostName[-10:]uuid[:5]' pass_two, uuid = self.utils.prepare_string_with_uuid( test_template, 'my_short_host_name', True) self.assertEqual( 'my_short_host_name[-10:]944854dce45898b544a1cb9071d3cc35[:5]', pass_two) self.assertEqual('944854dce45898b544a1cb9071d3cc35', uuid) def test_check_upper_limit_short_host(self): self.assertRaises(exception.VolumeBackendAPIException, self.utils.check_upper_limit, 12, 12, True) def test_check_upper_limit_short_host_case_4(self): user_define_name = 'Little_too_long' self.assertRaises(exception.VolumeBackendAPIException, self.utils.check_upper_limit, 12, len(user_define_name), True) def test_validate_short_host_name_from_template_case_1(self): test_template = 'shortHostName' short_host_name = 'my_short_host' result_string = self.utils.validate_short_host_name_from_template( test_template, short_host_name) self.assertEqual('my_short_host', result_string) def test_validate_short_host_name_from_template_case_1_exceeds_16char( self): test_template = 'shortHostName' short_host_name = 'my_short_host_greater_than_16chars' result_string = self.utils.validate_short_host_name_from_template( test_template, short_host_name) self.assertEqual('6chars0bc43f914e', result_string) def test_validate_short_host_name_from_template_case_1_template_misspelt( self): test_template = 'shortHstName' short_host_name = 'my_short_host' self.assertRaises(exception.VolumeBackendAPIException, self.utils.validate_short_host_name_from_template, test_template, short_host_name) def test_validate_short_host_name_from_template_case_2(self): test_template = 'shortHostName[:10]uuid[:5]' short_host_name = 'my_short_host_name' result_string = self.utils.validate_short_host_name_from_template( test_template, short_host_name) self.assertEqual('my_short_h94485', result_string) def test_validate_short_host_name_from_template_case_2_shorter_than(self): test_template = 'shortHostName[:10]uuid[:5]' short_host_name = 'HostX' result_string = self.utils.validate_short_host_name_from_template( test_template, short_host_name) self.assertEqual('HostX699ea', result_string) def test_validate_short_host_name_from_template_case_3(self): test_template = 'shortHostName[-10:]uuid[:5]' short_host_name = 'my_short_host_name' result_string = self.utils.validate_short_host_name_from_template( test_template, short_host_name) self.assertEqual('_host_name94485', result_string) def test_validate_short_host_name_from_template_case_3_shorter_than(self): test_template = 'shortHostName[-10:]uuid[:5]' short_host_name = 'HostX' result_string = self.utils.validate_short_host_name_from_template( test_template, short_host_name) self.assertEqual('HostX699ea', result_string) def test_validate_short_host_name_from_template_case_4(self): test_template = 'shortHostName[:7]finance' short_host_name = 'my_short_host_name' result_string = self.utils.validate_short_host_name_from_template( test_template, short_host_name) self.assertEqual('my_shorfinance', result_string) def test_validate_short_host_name_from_template_case_5(self): test_template = 'shortHostName[-6:]production' short_host_name = 'my_short_host_name' result_string = self.utils.validate_short_host_name_from_template( test_template, short_host_name) self.assertEqual('t_nameproduction', result_string) def test_validate_short_host_name_exception_missing_minus(self): test_template = 'shortHostName[6:]production' short_host_name = 'my_short_host_name' self.assertRaises(exception.VolumeBackendAPIException, self.utils.validate_short_host_name_from_template, test_template, short_host_name) def test_validate_port_group_from_template_case_1(self): test_template = 'portGroupName' port_group_name = 'my_pg' result_string = self.utils.validate_port_group_name_from_template( test_template, port_group_name) self.assertEqual('my_pg', result_string) def test_validate_port_group_from_template_case_1_long(self): test_template = 'portGroupName' port_group_name = 'my_port_group_name' result_string = self.utils.validate_port_group_name_from_template( test_template, port_group_name) self.assertEqual('p_name5ba163', result_string) def test_validate_port_group_from_template_case_1_misspelt(self): test_template = 'portGr*upName' port_group_name = 'my_port_group_name' self.assertRaises(exception.VolumeBackendAPIException, self.utils.validate_port_group_name_from_template, test_template, port_group_name) def test_validate_port_group_from_template_case_2(self): test_template = 'portGroupName[:6]uuid[:5]' port_group_name = 'my_port_group_name' result_string = self.utils.validate_port_group_name_from_template( test_template, port_group_name) self.assertEqual('my_por3b02c', result_string) def test_validate_port_group_from_template_case_3(self): test_template = 'portGroupName[-6:]uuid[:5]' port_group_name = 'my_port_group_name' result_string = self.utils.validate_port_group_name_from_template( test_template, port_group_name) self.assertEqual('p_name3b02c', result_string) def test_validate_port_group_from_template_case_4(self): test_template = 'portGroupName[:6]test' port_group_name = 'my_port_group_name' result_string = self.utils.validate_port_group_name_from_template( test_template, port_group_name) self.assertEqual('my_portest', result_string) def test_validate_port_group_from_template_case_5(self): test_template = 'portGroupName[-7:]test' port_group_name = 'my_port_group_name' result_string = self.utils.validate_port_group_name_from_template( test_template, port_group_name) self.assertEqual('up_nametest', result_string) def test_validate_port_group_name_exception_missing_minus(self): test_template = 'portGroupName[6:]test' port_group_name = 'my_port_group_name' self.assertRaises(exception.VolumeBackendAPIException, self.utils.validate_port_group_name_from_template, test_template, port_group_name) def test_validate_port_group_name_exception_chars_exceeded(self): test_template = 'portGroupName[:10]test' port_group_name = 'my_port_group_name' self.assertRaises(exception.VolumeBackendAPIException, self.utils.validate_port_group_name_from_template, test_template, port_group_name) def test_get_port_name_label_default(self): port_name_in = 'my_port_group_name' port_group_template = 'portGroupName' port_name_out = self.utils.get_port_name_label( port_name_in, port_group_template) self.assertEqual('p_name5ba163', port_name_out) def test_get_port_name_label_template(self): port_name_in = 'my_port_group_name' port_group_template = 'portGroupName[-6:]uuid[:5]' port_name_out = self.utils.get_port_name_label( port_name_in, port_group_template) self.assertEqual('p_name3b02c', port_name_out) def test_get_rdf_managed_storage_group(self): rdf_component_dict = ('OS-23_24_007-Asynchronous-rdf-sg', {'prefix': 'OS', 'rdf_label': '23_24_007', 'sync_mode': 'Asynchronous', 'after_mode': 'rdf-sg'}) async_rdf_details = ( self.utils.get_rdf_managed_storage_group( self.data.volume_details_attached_async)) self.assertEqual(rdf_component_dict, async_rdf_details) def test_get_storage_group_component_dict_no_slo(self): """Test for get_storage_group_component_dict. REST and no SLO. """ sg_no_slo = 'OS-myhost-No_SLO-os-iscsi-pg' component_dict = self.utils.get_storage_group_component_dict( sg_no_slo) self.assertEqual('myhost', component_dict['host']) self.assertEqual('OS', component_dict['prefix']) self.assertEqual('No_SLO', component_dict['no_slo']) self.assertEqual('os-iscsi-pg', component_dict['portgroup']) self.assertIsNone(component_dict['sloworkload']) self.assertIsNone(component_dict['srp']) def test_get_storage_group_component_dict_slo_workload_2(self): """Test for get_storage_group_component_dict. SLO, workload and test 2. """ sg_slo_workload = 'OS-myhost-SRP_1-DiamodOLTP-os-iscsi-pg-RE' component_dict = self.utils.get_storage_group_component_dict( sg_slo_workload) self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('SRP_1', component_dict['srp']) self.assertEqual('os-iscsi-pg', component_dict['portgroup']) self.assertEqual('DiamodOLTP', component_dict['sloworkload']) self.assertIsNone(component_dict['no_slo']) def test_get_storage_group_component_dict_compression_disabled(self): """Test for get_storage_group_component_dict. Compression disabled. """ sg_compression_disabled = 'OS-myhost-SRP_1-DiamodNONE-os-iscsi-pg-CD' component_dict = self.utils.get_storage_group_component_dict( sg_compression_disabled) self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('SRP_1', component_dict['srp']) self.assertEqual('os-iscsi-pg', component_dict['portgroup']) self.assertEqual('DiamodNONE', component_dict['sloworkload']) self.assertEqual('-CD', component_dict['after_pg']) self.assertIsNone(component_dict['no_slo']) def test_get_storage_group_component_dict_replication_enabled(self): """Test for get_storage_group_component_dict. Replication enabled. """ sg_slo_workload_rep = 'OS-myhost-SRP_1-DiamodOLTP-os-iscsi-pg-RE' component_dict = self.utils.get_storage_group_component_dict( sg_slo_workload_rep) self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('SRP_1', component_dict['srp']) self.assertEqual('os-iscsi-pg', component_dict['portgroup']) self.assertEqual('DiamodOLTP', component_dict['sloworkload']) self.assertEqual('-RE', component_dict['after_pg']) self.assertIsNone(component_dict['no_slo']) def test_get_storage_group_component_dict_slo_no_workload(self): """Test for get_storage_group_component_dict. SLO and no workload. """ sg_slo_no_workload = 'OS-myhost-SRP_1-DiamodNONE-os-iscsi-pg' component_dict = self.utils.get_storage_group_component_dict( sg_slo_no_workload) self.assertEqual('OS', component_dict['prefix']) self.assertEqual('myhost', component_dict['host']) self.assertEqual('SRP_1', component_dict['srp']) self.assertEqual('os-iscsi-pg', component_dict['portgroup']) self.assertEqual('DiamodNONE', component_dict['sloworkload']) self.assertIsNone(component_dict['no_slo']) def test_get_storage_group_component_dict_dashes(self): """Test for get_storage_group_component_dict, dashes.""" sg_host_with_dashes = ( 'OS-host-with-dashes-SRP_1-DiamodOLTP-myportgroup-RE') component_dict = self.utils.get_storage_group_component_dict( sg_host_with_dashes) self.assertEqual('host-with-dashes', component_dict['host']) self.assertEqual('OS', component_dict['prefix']) self.assertEqual('SRP_1', component_dict['srp']) self.assertEqual('DiamodOLTP', component_dict['sloworkload']) self.assertEqual('myportgroup', component_dict['portgroup']) self.assertEqual('-RE', component_dict['after_pg']) def test_delete_values_from_dict(self): """Test delete_values_from_dict""" delete_list = ['rdf_group_no', 'rep_mode', 'target_array_model', 'service_level', 'remote_array', 'target_device_id', 'replication_status', 'rdf_group_label'] data_dict = self.utils.delete_values_from_dict( self.data.retype_metadata_dict, delete_list) self.assertEqual({'device_id': self.data.device_id}, data_dict) def test_update_values_in_dict(self): """Test delete_values_from_dict""" update_list = [('default_sg_name', 'source_sg_name'), ('service_level', 'source_service_level')] update_dict = {'default_sg_name': 'default-sg', 'service_level': 'Diamond'} ret_dict = {'source_sg_name': 'default-sg', 'source_service_level': 'Diamond'} data_dict = self.utils.update_values_in_dict( update_dict, update_list) self.assertEqual(ret_dict, data_dict) def test_get_unique_device_ids_from_lists(self): list_a = ['00001', '00002', '00003'] list_b = ['00002', '00003', '00004'] unique_ids = self.utils.get_unique_device_ids_from_lists( list_a, list_b) self.assertEqual(['00004'], unique_ids) def test_update_payload_for_rdf_vol_create(self): payload = { 'array': self.data.array, 'editStorageGroupActionParam': { 'expandStorageGroupParam': { 'addVolumeParam': {'create_new_volumes': 'False'}}}} updated_payload = self.utils.update_payload_for_rdf_vol_create( payload, self.data.remote_array, self.data.storagegroup_name_f) expected_payload = { 'array': self.data.array, 'editStorageGroupActionParam': { 'expandStorageGroupParam': { 'addVolumeParam': { 'create_new_volumes': 'True', 'remoteSymmSGInfoParam': { 'force': 'true', 'remote_symmetrix_1_id': self.data.remote_array, 'remote_symmetrix_1_sgs': [ self.data.storagegroup_name_f]}}}}} self.assertEqual(expected_payload, updated_payload) def test_is_retype_supported(self): # Volume source type not replicated, target type Metro replicated, # volume is detached, host-assisted retype supported volume = deepcopy(self.data.test_volume) volume.attach_status = 'detached' src_extra_specs = deepcopy(self.data.extra_specs) src_extra_specs['rep_mode'] = None tgt_extra_specs = deepcopy(self.data.rep_extra_specs) tgt_extra_specs['rep_mode'] = utils.REP_METRO rep_configs = self.data.multi_rep_config_list src_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = ( self.data.rep_backend_id_sync) tgt_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = ( self.data.rep_backend_id_metro) self.assertTrue(self.utils.is_retype_supported( volume, src_extra_specs, tgt_extra_specs, rep_configs)) # Volume source type not replicated, target type Metro replicated, # volume is attached, host-assisted retype not supported volume.attach_status = 'attached' self.assertFalse(self.utils.is_retype_supported( volume, src_extra_specs, tgt_extra_specs, rep_configs)) # Volume source type Async replicated, target type Metro replicated, # volume is attached, host-assisted retype not supported src_extra_specs['rep_mode'] = utils.REP_ASYNC self.assertFalse(self.utils.is_retype_supported( volume, src_extra_specs, tgt_extra_specs, rep_configs)) # Volume source type Metro replicated, target type Metro replicated, # volume is attached, host-assisted retype supported src_extra_specs['rep_mode'] = utils.REP_METRO self.assertTrue(self.utils.is_retype_supported( volume, src_extra_specs, tgt_extra_specs, rep_configs)) def test_validate_multiple_rep_device(self): self.utils.validate_multiple_rep_device(self.data.multi_rep_device) def test_validate_multiple_rep_device_non_unique_backend_id(self): rep_devices = deepcopy(self.data.multi_rep_device) rep_devices[0][utils.BACKEND_ID] = rep_devices[1][utils.BACKEND_ID] self.assertRaises( exception.InvalidConfigurationValue, self.utils.validate_multiple_rep_device, rep_devices) def test_validate_multiple_rep_device_promotion_start_backend_id(self): backend_id = utils.PMAX_FAILOVER_START_ARRAY_PROMOTION rep_devices = deepcopy(self.data.multi_rep_device) rep_devices[0][utils.BACKEND_ID] = backend_id self.assertRaises( exception.InvalidConfigurationValue, self.utils.validate_multiple_rep_device, rep_devices) def test_validate_multiple_rep_device_missing_backend_id(self): rep_devices = deepcopy(self.data.multi_rep_device) rep_devices[0].pop(utils.BACKEND_ID) self.assertRaises( exception.InvalidConfigurationValue, self.utils.validate_multiple_rep_device, rep_devices) def test_validate_multiple_rep_device_non_unique_rdf_label(self): rep_devices = deepcopy(self.data.multi_rep_device) rep_devices[0]['rdf_group_label'] = rep_devices[1]['rdf_group_label'] self.assertRaises( exception.InvalidConfigurationValue, self.utils.validate_multiple_rep_device, rep_devices) def test_validate_multiple_rep_device_non_unique_rdf_modes(self): rep_devices = [self.data.rep_dev_1, deepcopy(self.data.rep_dev_2)] rep_devices[1]['mode'] = rep_devices[0]['mode'] self.assertRaises( exception.InvalidConfigurationValue, self.utils.validate_multiple_rep_device, rep_devices) def test_validate_multiple_rep_device_defaulting_rdf_modes(self): rep_devices = [ deepcopy(self.data.rep_dev_1), deepcopy(self.data.rep_dev_2)] rep_devices[0]['mode'] = '' rep_devices[1]['mode'] = 'testing' self.assertRaises( exception.InvalidConfigurationValue, self.utils.validate_multiple_rep_device, rep_devices) def test_validate_multiple_rep_device_multiple_targets(self): rep_devices = [self.data.rep_dev_1, deepcopy(self.data.rep_dev_2)] rep_devices[1]['target_device_id'] = 1234 self.assertRaises( exception.InvalidConfigurationValue, self.utils.validate_multiple_rep_device, rep_devices) def test_validate_multiple_rep_device_length(self): rep_devices = [1, 2, 3, 4] self.assertRaises( exception.InvalidConfigurationValue, self.utils.validate_multiple_rep_device, rep_devices) def test_get_rep_config_single_rep(self): rep_configs = self.data.sync_rep_config_list rep_config = self.utils.get_rep_config('test', rep_configs) self.assertEqual(rep_config, rep_configs[0]) def test_get_rep_config_multi_rep(self): rep_configs = self.data.multi_rep_config_list backend_id = rep_configs[0][utils.BACKEND_ID] rep_device = self.utils.get_rep_config(backend_id, rep_configs) self.assertEqual(rep_configs[0], rep_device) def test_get_rep_config_fail_non_legacy_backend_id_message(self): rep_configs = self.data.multi_rep_config_list backend_id = 'invalid_backend_id' try: self.utils.get_rep_config(backend_id, rep_configs) except exception.InvalidInput as e: expected_str = 'Could not find replication_device. Legacy' excep_msg = str(e) self.assertNotIn(expected_str, excep_msg) def test_get_rep_config_fail_legacy_backend_id_message(self): rep_configs = self.data.multi_rep_config_list backend_id = utils.BACKEND_ID_LEGACY_REP try: self.utils.get_rep_config(backend_id, rep_configs) except exception.InvalidInput as e: expected_str = 'Could not find replication_device. Legacy' excep_msg = str(e) self.assertIn(expected_str, excep_msg) def test_get_rep_config_promotion_stats(self): rep_configs = self.data.multi_rep_config_list backend_id = 'testing' rep_device = self.utils.get_rep_config(backend_id, rep_configs, True) self.assertEqual(rep_configs[0], rep_device) def test_get_replication_targets(self): rep_targets_expected = [self.data.remote_array] rep_configs = self.data.multi_rep_config_list rep_targets_actual = self.utils.get_replication_targets(rep_configs) self.assertEqual(rep_targets_expected, rep_targets_actual) def test_validate_failover_request_success(self): is_failed_over = False is_promoted = False failover_backend_id = self.data.rep_backend_id_sync rep_configs = self.data.multi_rep_config_list primary_array = self.data.array array_list = [self.data.array] is_valid, msg = self.utils.validate_failover_request( is_failed_over, failover_backend_id, rep_configs, primary_array, array_list, is_promoted) self.assertTrue(is_valid) self.assertEqual("", msg) def test_validate_failover_request_already_failed_over(self): is_failed_over = True is_promoted = False failover_backend_id = self.data.rep_backend_id_sync rep_configs = self.data.multi_rep_config_list primary_array = self.data.array array_list = [self.data.array] is_valid, msg = self.utils.validate_failover_request( is_failed_over, failover_backend_id, rep_configs, primary_array, array_list, is_promoted) self.assertFalse(is_valid) expected_msg = ('Cannot failover, the backend is already in a failed ' 'over state, if you meant to failback, please add ' '--backend_id default to the command.') self.assertEqual(expected_msg, msg) def test_validate_failover_request_failback_missing_array(self): is_failed_over = True is_promoted = False failover_backend_id = 'default' rep_configs = self.data.multi_rep_config_list primary_array = self.data.array array_list = [self.data.remote_array] is_valid, msg = self.utils.validate_failover_request( is_failed_over, failover_backend_id, rep_configs, primary_array, array_list, is_promoted) self.assertFalse(is_valid) expected_msg = ('Cannot failback, the configured primary array is ' 'not currently available to perform failback to. ' 'Please ensure array %s is visible in ' 'Unisphere.') % primary_array self.assertEqual(expected_msg, msg) def test_validate_failover_request_promotion_finalize(self): is_failed_over = True is_promoted = True failover_backend_id = utils.PMAX_FAILOVER_START_ARRAY_PROMOTION rep_configs = self.data.multi_rep_config_list primary_array = self.data.array array_list = [self.data.array] is_valid, msg = self.utils.validate_failover_request( is_failed_over, failover_backend_id, rep_configs, primary_array, array_list, is_promoted) self.assertFalse(is_valid) expected_msg = ('Failover promotion currently in progress, please ' 'finish the promotion process and issue a failover ' 'using the "default" backend_id to complete this ' 'process.') self.assertEqual(expected_msg, msg) def test_validate_failover_request_invalid_failback(self): is_failed_over = False is_promoted = False failover_backend_id = 'default' rep_configs = self.data.multi_rep_config_list primary_array = self.data.array array_list = [self.data.array] is_valid, msg = self.utils.validate_failover_request( is_failed_over, failover_backend_id, rep_configs, primary_array, array_list, is_promoted) self.assertFalse(is_valid) expected_msg = ('Cannot failback, backend is not in a failed over ' 'state. If you meant to failover, please either omit ' 'the --backend_id parameter or use the --backend_id ' 'parameter with a valid backend id.') self.assertEqual(expected_msg, msg) def test_validate_replication_group_config_success(self): rep_configs = deepcopy(self.data.multi_rep_config_list) extra_specs = deepcopy( self.data.vol_type_extra_specs_rep_enabled_backend_id_sync) extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = ( self.data.rep_backend_id_sync) self.utils.validate_replication_group_config( rep_configs, [extra_specs]) def test_validate_replication_group_config_no_rep_configured(self): rep_configs = None extra_specs_list = [ self.data.vol_type_extra_specs_rep_enabled_backend_id_sync] self.assertRaises(exception.InvalidInput, self.utils.validate_replication_group_config, rep_configs, extra_specs_list) try: self.utils.validate_replication_group_config( rep_configs, extra_specs_list) except exception.InvalidInput as e: expected_msg = ( 'Invalid input received: No replication devices are defined ' 'in cinder.conf, can not enable volume group replication.') self.assertEqual(expected_msg, e.msg) def test_validate_replication_group_config_vol_type_not_rep_enabled(self): rep_configs = self.data.multi_rep_config_list extra_specs_list = [self.data.vol_type_extra_specs] self.assertRaises(exception.InvalidInput, self.utils.validate_replication_group_config, rep_configs, extra_specs_list) try: self.utils.validate_replication_group_config( rep_configs, extra_specs_list) except exception.InvalidInput as e: expected_msg = ( 'Invalid input received: Replication is not enabled for a ' 'Volume Type, all Volume Types in a replication enabled ' 'Volume Group must have replication enabled.') self.assertEqual(expected_msg, e.msg) def test_validate_replication_group_config_cant_get_rep_config(self): rep_configs = self.data.multi_rep_config_list vt_extra_specs = ( self.data.vol_type_extra_specs_rep_enabled_backend_id_sync) vt_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = 'invalid' extra_specs_list = [vt_extra_specs] self.assertRaises(exception.InvalidInput, self.utils.validate_replication_group_config, rep_configs, extra_specs_list) try: self.utils.validate_replication_group_config( rep_configs, extra_specs_list) except exception.InvalidInput as e: expected_msg = ( 'Invalid input received: Unable to determine which ' 'rep_device to use from cinder.conf. Could not validate ' 'volume types being added to group.') self.assertEqual(expected_msg, e.msg) def test_validate_replication_group_config_non_sync_mode(self): rep_configs = self.data.multi_rep_config_list extra_specs_list = [ self.data.vol_type_extra_specs_rep_enabled_backend_id_async] self.assertRaises(exception.InvalidInput, self.utils.validate_replication_group_config, rep_configs, extra_specs_list) try: self.utils.validate_replication_group_config( rep_configs, extra_specs_list) except exception.InvalidInput as e: expected_msg = ( 'Invalid input received: Replication for Volume Type is not ' 'set to Synchronous. Only Synchronous can be used with ' 'replication groups') self.assertEqual(expected_msg, e.msg) @mock.patch.object(utils.PowerMaxUtils, 'get_rep_config') def test_validate_replication_group_config_multiple_rep_backend_ids( self, mck_get): side_effect_list = [ self.data.rep_config_sync, self.data.rep_config_sync_2] mck_get.side_effect = side_effect_list rep_configs = self.data.multi_rep_config_list ex_specs_1 = deepcopy( self.data.vol_type_extra_specs_rep_enabled_backend_id_sync) ex_specs_2 = deepcopy( self.data.vol_type_extra_specs_rep_enabled_backend_id_sync_2) extra_specs_list = [ex_specs_1, ex_specs_2] self.assertRaises(exception.InvalidInput, self.utils.validate_replication_group_config, rep_configs, extra_specs_list) mck_get.side_effect = side_effect_list try: self.utils.validate_replication_group_config( rep_configs, extra_specs_list) except exception.InvalidInput as e: expected_msg = ( 'Invalid input received: Multiple replication backend ids ' 'detected please ensure only a single replication device ' '(backend_id) is used for all Volume Types in a Volume ' 'Group.') self.assertEqual(expected_msg, e.msg) def test_validate_non_replication_group_config_success(self): extra_specs_list = [ self.data.vol_type_extra_specs] self.utils.validate_non_replication_group_config(extra_specs_list) def test_validate_non_replication_group_config_failure(self): extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123', utils.IS_RE: ' True'} self.assertRaises(exception.InvalidInput, self.utils.validate_non_replication_group_config, [extra_specs]) try: self.utils.validate_non_replication_group_config([extra_specs]) except exception.InvalidInput as e: expected_msg = ( 'Invalid input received: Replication is enabled in one or ' 'more of the Volume Types being added to new Volume Group ' 'but the Volume Group is not replication enabled. Please ' 'enable replication in the Volume Group or select only ' 'non-replicated Volume Types.') self.assertEqual(expected_msg, e.msg) def test_get_migration_delete_extra_specs_replicated(self): volume = deepcopy(self.data.test_volume) metadata = deepcopy(self.data.volume_metadata) metadata[utils.IS_RE_CAMEL] = 'True' metadata['ReplicationMode'] = utils.REP_SYNC metadata['RDFG-Label'] = self.data.rdf_group_name_1 volume.metadata = metadata extra_specs = deepcopy(self.data.extra_specs) rep_configs = self.data.multi_rep_config_list updated_extra_specs = self.utils.get_migration_delete_extra_specs( volume, extra_specs, rep_configs) ref_extra_specs = deepcopy(self.data.extra_specs) ref_extra_specs[utils.IS_RE] = True ref_extra_specs[utils.REP_MODE] = utils.REP_SYNC ref_extra_specs[utils.REP_CONFIG] = self.data.rep_config_sync ref_extra_specs[utils.REPLICATION_DEVICE_BACKEND_ID] = ( self.data.rep_backend_id_sync) self.assertEqual(ref_extra_specs, updated_extra_specs) def test_get_migration_delete_extra_specs_non_replicated(self): volume = deepcopy(self.data.test_volume) volume.metadata = self.data.volume_metadata extra_specs = deepcopy(self.data.extra_specs) extra_specs[utils.IS_RE] = True updated_extra_specs = self.utils.get_migration_delete_extra_specs( volume, extra_specs, None) self.assertEqual(self.data.extra_specs, updated_extra_specs) def test_version_meet_req_true(self): version = '9.1.0.14' minimum_version = '9.1.0.5' self.assertTrue( self.utils.version_meet_req(version, minimum_version)) def test_version_meet_req_false(self): version = '9.1.0.3' minimum_version = '9.1.0.5' self.assertFalse( self.utils.version_meet_req(version, minimum_version)) def test_version_meet_req_major_true(self): version = '9.2.0.1' minimum_version = '9.1.0.5' self.assertTrue( self.utils.version_meet_req(version, minimum_version)) def test_parse_specs_from_pool_name_workload_included(self): pool_name = self.data.vol_type_extra_specs.get('pool_name') array_id, srp, service_level, workload = ( self.utils.parse_specs_from_pool_name(pool_name)) pool_details = pool_name.split('+') self.assertEqual(array_id, pool_details[3]) self.assertEqual(srp, pool_details[2]) self.assertEqual(workload, pool_details[1]) self.assertEqual(service_level, pool_details[0]) def test_parse_specs_from_pool_name_workload_not_included(self): pool_name = ( self.data.vol_type_extra_specs_next_gen_pool.get('pool_name')) array_id, srp, service_level, workload = ( self.utils.parse_specs_from_pool_name(pool_name)) pool_details = pool_name.split('+') self.assertEqual(array_id, pool_details[2]) self.assertEqual(srp, pool_details[1]) self.assertEqual(service_level, pool_details[0]) self.assertEqual(workload, str()) def test_parse_specs_from_pool_name_invalid_pool(self): pool_name = 'This+Is+An+Invalid+Pool' self.assertRaises(exception.VolumeBackendAPIException, self.utils.parse_specs_from_pool_name, pool_name) def test_parse_specs_from_pool_name_no_pool(self): self.assertRaises(exception.VolumeBackendAPIException, self.utils.parse_specs_from_pool_name, '') ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.25912 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/0000775000175000017500000000000000000000000025416 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/__init__.py0000664000175000017500000000576200000000000027541 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock import requests from cinder import context from cinder.tests.unit import test from cinder.volume import configuration from cinder.volume.drivers.dell_emc.powerstore import driver from cinder.volume.drivers.dell_emc.powerstore import options class MockResponse(requests.Response): def __init__(self, content=None, rc=200): super(MockResponse, self).__init__() if content is None: content = [] if isinstance(content, str): content = content.encode() self._content = content self.request = mock.MagicMock() self.status_code = rc def json(self, **kwargs): if isinstance(self._content, bytes): return super(MockResponse, self).json(**kwargs) return self._content @property def text(self): if not isinstance(self._content, bytes): return json.dumps(self._content) return super(MockResponse, self).text class TestPowerStoreDriver(test.TestCase): def setUp(self): super(TestPowerStoreDriver, self).setUp() self.context = context.RequestContext('fake', 'fake', auth_token=True) self.configuration = configuration.Configuration( options.POWERSTORE_OPTS, configuration.SHARED_CONF_GROUP ) self._set_overrides() self.driver = driver.PowerStoreDriver(configuration=self.configuration) self.driver.do_setup({}) self.iscsi_driver = self.driver self._override_shared_conf("storage_protocol", override="FC") self.fc_driver = driver.PowerStoreDriver( configuration=self.configuration ) self.fc_driver.do_setup({}) self._override_shared_conf("powerstore_nvme", override=True) self.nvme_driver = driver.PowerStoreDriver( configuration=self.configuration ) self.nvme_driver.do_setup({}) def _override_shared_conf(self, *args, **kwargs): return self.override_config(*args, **kwargs, group=configuration.SHARED_CONF_GROUP) def _set_overrides(self): # Override the defaults to fake values self._override_shared_conf("san_ip", override="127.0.0.1") self._override_shared_conf("san_login", override="test") self._override_shared_conf("san_password", override="test") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_base.py0000664000175000017500000001276500000000000027754 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.tests.unit.volume.drivers.dell_emc import powerstore class TestBase(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def test_configuration(self, mock_chap): self.driver.check_for_setup_error() def test_configuration_rest_parameters_not_set(self): self.driver.adapter.client.rest_ip = None self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_array_version") def test_configuration_nvme_not_supported(self, mock_version): mock_version.return_value = "2.0.0.0" self.nvme_driver.do_setup({}) self.assertRaises(exception.InvalidInput, self.nvme_driver.check_for_setup_error) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_metrics") def test_update_volume_stats(self, mock_metrics, mock_chap): mock_metrics.return_value = { "physical_total": 2147483648, "physical_used": 1073741824, } self.driver.check_for_setup_error() self.driver._update_volume_stats() @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") @mock.patch("requests.request") def test_update_volume_stats_bad_status(self, mock_metrics, mock_chap): mock_metrics.return_value = powerstore.MockResponse(rc=400) self.driver.check_for_setup_error() error = self.assertRaises(exception.VolumeBackendAPIException, self.driver._update_volume_stats) self.assertIn("Failed to query PowerStore metrics", error.msg) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_array_version") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def test_configuration_with_replication(self, mock_chap, mock_version): replication_device = [ { "backend_id": "repl_1", "san_ip": "127.0.0.2", "san_login": "test_1", "san_password": "test_2" } ] mock_version.return_value = "3.0.0.0" self._override_shared_conf("replication_device", override=replication_device) self.driver.do_setup({}) self.driver.check_for_setup_error() self.assertEqual(2, len(self.driver.adapters)) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def test_configuration_with_replication_2_rep_devices(self, mock_chap): device = { "backend_id": "repl_1", "san_ip": "127.0.0.2", "san_login": "test_1", "san_password": "test_2" } replication_device = [device] * 2 self._override_shared_conf("replication_device", override=replication_device) self.driver.do_setup({}) error = self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self.assertIn("PowerStore driver does not support more than one " "replication device.", error.msg) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_array_version") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def test_configuration_with_replication_failed_over(self, mock_chap, mock_version): replication_device = [ { "backend_id": "repl_1", "san_ip": "127.0.0.2", "san_login": "test_1", "san_password": "test_2" } ] mock_version.return_value = "3.0.0.0" self._override_shared_conf("replication_device", override=replication_device) self.driver.do_setup({}) self.driver.check_for_setup_error() self.driver.active_backend_id = "repl_1" self.assertFalse(self.driver.replication_enabled) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_client.py0000664000175000017500000002753700000000000030323 0ustar00zuulzuul00000000000000# Copyright (c) 2021 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import ddt import requests.exceptions from cinder import exception from cinder.tests.unit import fake_constants from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.powerstore import MockResponse from cinder.volume.drivers.dell_emc.powerstore import ( exception as powerstore_exception) from cinder.volume.drivers.dell_emc.powerstore import client CLIENT_OPTIONS = { "rest_ip": "127.0.0.1", "rest_username": "fake_user", "rest_password": "fake_password", "verify_certificate": False, "certificate_path": None, "rest_api_connect_timeout": 60, "rest_api_read_timeout": 60 } ISCSI_IP_POOL_RESP = [ { "address": "1.2.3.4", "ip_port": { "target_iqn": "iqn.2022-07.com.dell:dellemc-powerstore-fake-iqn-1" }, }, { "address": "5.6.7.8", "ip_port": { "target_iqn": "iqn.2022-07.com.dell:dellemc-powerstore-fake-iqn-1" }, }, ] NVME_IP_POOL_RESP = [ { "address": "11.22.33.44" }, { "address": "55.66.77.88" } ] QOS_IO_RULE_PARAMS = { "name": "io-rule-6b6e5489-4b5b-4468-a1f7-32cec2ffa3bf", "type": "Absolute", "max_iops": "200", "max_bw": "18000", "burst_percentage": "50" } QOS_POLICY_PARAMS = { "name": "qos-policy-6b6e5489-4b5b-4468-a1f7-32cec2ffa3bf", "io_limit_rule_id": "9beb10ff-a00c-4d88-a7d9-692be2b3073f" } QOS_UPDATE_IO_RULE_PARAMS = { "type": "Absolute", "max_iops": "500", "max_bw": "225000", "burst_percentage": "89" } @ddt.ddt class TestClient(test.TestCase): def setUp(self): super(TestClient, self).setUp() self.client = client.PowerStoreClient(**CLIENT_OPTIONS) self.fake_volume = str(uuid.uuid4()) @ddt.data(("iSCSI", ISCSI_IP_POOL_RESP), ("NVMe", NVME_IP_POOL_RESP)) @ddt.unpack @mock.patch("requests.request") def test_get_ip_pool_address(self, protocol, ip_pool, mock_request): mock_request.return_value = MockResponse(ip_pool, rc=200) response = self.client.get_ip_pool_address(protocol) mock_request.assert_called_once() self.assertEqual(response, ip_pool) @mock.patch("requests.request") def test_get_volume_nguid(self, mock_request): mock_request.return_value = MockResponse( content={ "nguid": "nguid.76e02b0999y439958ttf546800ea7fe8" }, rc=200 ) self.assertEqual(self.client.get_volume_nguid(self.fake_volume), "76e02b0999y439958ttf546800ea7fe8") @mock.patch("requests.request") def test_get_array_version(self, mock_request): mock_request.return_value = MockResponse( content=[ { "release_version": "3.0.0.0", } ], rc=200 ) self.assertEqual(self.client.get_array_version(), "3.0.0.0") @mock.patch("requests.request") def test_get_qos_policy_id_by_name(self, mock_request): mock_request.return_value = MockResponse( content=[ { "id": "d69f7131-4617-4bae-89f8-a540a6bda94b", } ], rc=200 ) self.assertEqual( self.client.get_qos_policy_id_by_name("qos-" "policy-6b6e5489" "-4b5b-4468-a1f7-" "32cec2ffa3bf"), "d69f7131-4617-4bae-89f8-a540a6bda94b") @mock.patch("requests.request") def test_get_qos_policy_id_by_name_exception(self, mock_request): mock_request.return_value = MockResponse(rc=400) self.assertRaises( exception.VolumeBackendAPIException, self.client.get_qos_policy_id_by_name, "qos-policy-6b6e5489-4b5b-4468-a1f7-32cec2ffa3bf") @mock.patch("requests.request") def test_create_qos_io_rule(self, mock_request): mock_request.return_value = MockResponse( content={ "id": "9beb10ff-a00c-4d88-a7d9-692be2b3073f" }, rc=200 ) self.assertEqual( self.client.create_qos_io_rule(QOS_IO_RULE_PARAMS), "9beb10ff-a00c-4d88-a7d9-692be2b3073f") @mock.patch("requests.request") def test_create_duplicate_qos_io_rule(self, mock_request): mock_request.return_value = MockResponse( content={ "messages": [ { "code": "0xE0A0E0010009", "severity": "Error", "message_l10n": "The rule name " "io-rule-9899a65f-70fe-46c9-8f6c-22625c7e19df " "is already used by another rule. " "It needs to be unique (case-insensitive). " "Please use a different name.", "arguments": [ "io-rule-6b6e5489-4b5b-4468-a1f7-32cec2ffa3bf" ] } ] }, rc=400 ) self.assertRaises( powerstore_exception.DellPowerStoreQoSIORuleExists, self.client.create_qos_io_rule, QOS_IO_RULE_PARAMS) @mock.patch("requests.request") def test_create_duplicate_qos_io_rule_with_unexpected_error( self, mock_request): mock_request.return_value = MockResponse( content={ "messages": [ { "code": "0xE0101001000C", "severity": "Error", "message_l10n": "The system encountered unexpected " "backend errors. " "Please contact support." } ] }, rc=400 ) self.assertRaises( powerstore_exception.DellPowerStoreQoSIORuleExists, self.client.create_qos_io_rule, QOS_IO_RULE_PARAMS) @mock.patch("requests.request") def test_create_qos_policy(self, mock_request): mock_request.return_value = MockResponse( content={ "id": "d69f7131-4617-4bae-89f8-a540a6bda94b", }, rc=200 ) self.assertEqual( self.client.create_qos_policy(QOS_POLICY_PARAMS), "d69f7131-4617-4bae-89f8-a540a6bda94b") @mock.patch("requests.request") def test_create_duplicate_qos_policy(self, mock_request): mock_request.return_value = MockResponse( content={ "messages": [ { "code": "0xE02020010004", "severity": "Error", "message_l10n": "The new policy name qos-policy-" "6b6e5489-4b5b-4468-a1f7-32cec2ffa3bf " "is in use. It must be unique " "regardless of character cases.", "arguments": [ "qos-policy-6b6e5489-4b5b-4468-a1f7-32cec2ffa3bf" ] } ] }, rc=400 ) self.assertRaises( powerstore_exception.DellPowerStoreQoSPolicyExists, self.client.create_qos_policy, QOS_POLICY_PARAMS) @mock.patch("requests.request") def test_update_volume_with_qos_policy(self, mock_request): mock_request.return_value = MockResponse(rc=200) self.client.update_volume_with_qos_policy( fake_constants.VOLUME_ID, "qos-policy-6b6e5489-4b5b-4468-a1f7-32cec2ffa3bf") mock_request.assert_called_once() @mock.patch("requests.request") def test_update_volume_with_qos_policy_exception(self, mock_request): mock_request.return_value = MockResponse(rc=400) self.assertRaises(exception.VolumeBackendAPIException, self.client.update_volume_with_qos_policy, fake_constants.VOLUME_ID, "qos-policy-6b6e5489-4b5b-4468-a1f7-32cec2ffa3bf") @mock.patch("requests.request") def test_update_qos_io_rule(self, mock_request): mock_request.return_value = MockResponse(rc=200) self.client.update_qos_io_rule( "io-rule-6b6e5489-4b5b-4468-a1f7-32cec2ffa3bf", QOS_UPDATE_IO_RULE_PARAMS) mock_request.assert_called_once() @mock.patch("requests.request") def test_update_qos_io_rule_exception(self, mock_request): mock_request.return_value = MockResponse(rc=400) self.assertRaises(exception.VolumeBackendAPIException, self.client.update_qos_io_rule, "io-rule-6b6e5489-4b5b-4468-a1f7-32cec2ffa3bf", QOS_UPDATE_IO_RULE_PARAMS) @mock.patch("requests.request") def test_get_request_timeout_exception(self, mock_request): mock_request.return_value = MockResponse( rc=501 ) error = self.assertRaises( exception.VolumeBackendAPIException, self.client.get_array_version) self.assertEqual('Bad or unexpected response from the ' 'storage volume backend API: Failed to ' 'query PowerStore array version.', error.msg) @mock.patch("requests.request") def test_send_get_request_connect_timeout_exception(self, mock_request): mock_request.side_effect = requests.exceptions.ConnectTimeout() r, resp = self.client._send_request("GET", "/api/version") self.assertEqual(500, r.status_code) @mock.patch("requests.request") def test_send_get_request_read_timeout_exception(self, mock_request): mock_request.side_effect = requests.exceptions.ReadTimeout() r, resp = self.client._send_request("GET", "/api/version") self.assertEqual(500, r.status_code) @mock.patch("requests.request") def test_send_post_request_connect_timeout_exception(self, mock_request): params = {} mock_request.side_effect = requests.exceptions.ConnectTimeout() r, resp = self.client._send_request("POST", "/metrics/generate", params) self.assertEqual(500, r.status_code) @mock.patch("requests.request") def test_send_post_request_read_timeout_exception(self, mock_request): params = {} mock_request.side_effect = requests.exceptions.ReadTimeout() r, resp = self.client._send_request("POST", "/metrics/generate", params) self.assertEqual(500, r.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_nfs.py0000664000175000017500000004202700000000000027622 0ustar00zuulzuul00000000000000# Copyright (c) 2021 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os from unittest import mock import ddt from oslo_concurrency import processutils as putils from oslo_utils import imageutils from oslo_utils import units from cinder import context from cinder import exception from cinder.image import image_utils from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.dell_emc.powerstore import nfs from cinder.volume import volume_utils NFS_CONFIG = {'max_over_subscription_ratio': 1.0, 'reserved_percentage': 0, 'nfs_sparsed_volumes': True, 'nfs_qcow2_volumes': False, 'nas_secure_file_permissions': 'false', 'nas_secure_file_operations': 'false'} QEMU_IMG_INFO_OUT1 = """{ "image": "%(volid)s", "format": "raw", "virtual-size": %(size_b)s, "actual-size": 177152 }""" QEMU_IMG_INFO_OUT2 = """{ "image": "%(volid)s", "format": "qcow2", "virtual-size": %(size_b)s, "actual-size": 177152 }""" QEMU_IMG_INFO_OUT3 = """{ "image": "volume-%(volid)s.%(snapid)s", "format": "qcow2", "virtual-size": %(size_b)s, "actual-size": 200704, "cluster-size": 65536, "backing-filename": "volume-%(volid)s", "backing-filename-format": "qcow2", "format-specific": { "compat": 1.1, "lazy-refcounts": false, "refcount-bits": 16, "corrupt": false } }""" @ddt.ddt class PowerStoreNFSDriverInitializeTestCase(test.TestCase): TEST_NFS_HOST = 'nfs-host1' def setUp(self): super(PowerStoreNFSDriverInitializeTestCase, self).setUp() self.context = mock.Mock() self.create_configuration() self.override_config('compute_api_class', 'unittest.mock.Mock') self.drv = nfs.PowerStoreNFSDriverInitialization( configuration=self.configuration) def create_configuration(self): config = conf.Configuration(None) config.append_config_values(nfs.nfs_opts) self.configuration = config def test_check_multiattach_support(self): drv = self.drv self.configuration.nfs_qcow2_volumes = False drv._check_multiattach_support() self.assertEqual(not self.configuration.nfs_qcow2_volumes, drv.multiattach_support) def test_check_multiattach_support_disable(self): drv = self.drv drv.configuration.nfs_qcow2_volumes = True drv._check_multiattach_support() self.assertEqual(not self.configuration.nfs_qcow2_volumes, drv.multiattach_support) def test_check_snapshot_support(self): drv = self.drv drv.configuration.nfs_snapshot_support = True drv.configuration.nas_secure_file_operations = 'false' drv._check_snapshot_support() self.assertTrue(drv.configuration.nfs_snapshot_support) def test_check_snapshot_support_disable(self): drv = self.drv drv.configuration.nfs_snapshot_support = False drv.configuration.nas_secure_file_operations = 'false' self.assertRaises(exception.VolumeDriverException, drv._check_snapshot_support) def test_check_snapshot_support_nas_true(self): drv = self.drv drv.configuration.nfs_snapshot_support = True drv.configuration.nas_secure_file_operations = 'true' self.assertRaises(exception.VolumeDriverException, drv._check_snapshot_support) @mock.patch("cinder.volume.drivers.nfs.NfsDriver.do_setup") def test_do_setup(self, mock_super_do_setup): drv = self.drv drv.configuration.nas_host = self.TEST_NFS_HOST mock_check_multiattach_support = self.mock_object( drv, '_check_multiattach_support' ) drv.do_setup(self.context) self.assertTrue(mock_check_multiattach_support.called) def test_check_package_is_installed(self): drv = self.drv package = 'dellfcopy' mock_execute = self.mock_object(drv, '_execute') drv._check_package_is_installed(package) mock_execute.assert_called_once_with(package, check_exit_code=False, run_as_root=False) def test_check_package_is_not_installed(self): drv = self.drv package = 'dellfcopy' drv._execute = mock.Mock( side_effect=OSError( errno.ENOENT, 'No such file or directory' ) ) self.assertRaises(exception.VolumeDriverException, drv._check_package_is_installed, package) drv._execute.assert_called_once_with(package, check_exit_code=False, run_as_root=False) def test_check_for_setup_error(self): drv = self.drv mock_check_package_is_installed = self.mock_object( drv, '_check_package_is_installed') drv.check_for_setup_error() mock_check_package_is_installed.assert_called_once_with('dellfcopy') def test_check_for_setup_error_not_passed(self): drv = self.drv drv._execute = mock.Mock( side_effect=OSError( errno.ENOENT, 'No such file or directory' ) ) self.assertRaises(exception.VolumeDriverException, drv.check_for_setup_error) drv._execute.assert_called_once_with('dellfcopy', check_exit_code=False, run_as_root=False) def test_update_volume_stats_has_multiattach(self): drv = self.drv self.mock_object(nfs.NfsDriver, '_update_volume_stats') drv.multiattach_support = True drv._stats = {} drv._update_volume_stats() self.assertIn('multiattach', drv._stats) self.assertTrue(drv._stats['multiattach']) @ddt.ddt class PowerStoreNFSDriverTestCase(test.TestCase): TEST_NFS_HOST = 'nfs-host1' TEST_NFS_SHARE_PATH = '/export' TEST_NFS_EXPORT = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH) TEST_SIZE_IN_GB = 1 TEST_MNT_POINT = '/mnt/nfs' TEST_MNT_POINT_BASE_EXTRA_SLASH = '/opt/stack/data/cinder//mnt' TEST_MNT_POINT_BASE = '/mnt/test' TEST_LOCAL_PATH = '/mnt/nfs/volume-123' TEST_FILE_NAME = 'test.txt' VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab' def setUp(self): super(PowerStoreNFSDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.append_config_values(mock.ANY) self.configuration.nfs_sparsed_volumes = True self.configuration.nas_secure_file_permissions = 'false' self.configuration.nas_secure_file_operations = 'false' self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE self.configuration.nfs_snapshot_support = True self.configuration.max_over_subscription_ratio = 1.0 self.configuration.reserved_percentage = 5 self.configuration.nfs_mount_options = None self.configuration.nfs_qcow2_volumes = True self.configuration.nas_host = '0.0.0.0' self.configuration.nas_share_path = None self.mock_object(volume_utils, 'get_max_over_subscription_ratio', return_value=1) self.context = context.get_admin_context() self._driver = nfs.PowerStoreNFSDriver( configuration=self.configuration) self._driver.shares = {} self.mock_object(self._driver, '_execute') def test_do_fast_clone_file(self): drv = self._driver volume_path = 'fake/path' new_volume_path = 'fake/new_path' drv._do_fast_clone_file(volume_path, new_volume_path) drv._execute.assert_called_once_with( 'dellfcopy', '-o', 'fastclone', '-s', volume_path, '-d', new_volume_path, '-v', '1', run_as_root=True ) def test_do_fast_clone_file_raise_error(self): drv = self._driver volume_path = 'fake/path' new_volume_path = 'fake/new_path' drv._execute = mock.Mock( side_effect=putils.ProcessExecutionError() ) self.assertRaises(putils.ProcessExecutionError, drv._do_fast_clone_file, volume_path, new_volume_path) drv._execute.assert_called_once_with( 'dellfcopy', '-o', 'fastclone', '-s', volume_path, '-d', new_volume_path, '-v', '1', run_as_root=True ) def _simple_volume(self, **kwargs): updates = {'id': self.VOLUME_UUID, 'provider_location': self.TEST_NFS_EXPORT, 'display_name': f'volume-{self.VOLUME_UUID}', 'name': f'volume-{self.VOLUME_UUID}', 'size': 10, 'status': 'available'} updates.update(kwargs) if 'display_name' not in updates: updates['display_name'] = 'volume-%s' % updates['id'] return fake_volume.fake_volume_obj(self.context, **updates) def test_delete_volume_without_info(self): drv = self._driver volume = fake_volume.fake_volume_obj( self.context, display_name='volume', provider_location=self.TEST_NFS_EXPORT ) vol_path = '/path/to/vol' mock_ensure_share_mounted = self.mock_object( drv, '_ensure_share_mounted') mock_local_path_volume_info = self.mock_object( drv, '_local_path_volume_info' ) mock_local_path_volume_info.return_value = self.TEST_LOCAL_PATH mock_read_info_file = self.mock_object(drv, '_read_info_file') mock_read_info_file.return_value = {} mock_local_path_volume = self.mock_object(drv, '_local_path_volume') mock_local_path_volume.return_value = vol_path drv.delete_volume(volume) mock_ensure_share_mounted.assert_called_once_with( self.TEST_NFS_EXPORT) mock_local_path_volume.assert_called_once_with(volume) mock_read_info_file.assert_called_once_with( self.TEST_LOCAL_PATH, empty_if_missing=True) mock_local_path_volume.assert_called_once_with(volume) drv._execute.assert_called_once_with( 'rm', '-f', vol_path, run_as_root=True) def test_delete_volume_with_info(self): drv = self._driver volume = fake_volume.fake_volume_obj( self.context, display_name='volume', provider_location=self.TEST_NFS_EXPORT ) vol_path = '/path/to/vol' with mock.patch.object(drv, '_ensure_share_mounted'): mock_local_path_volume_info = self.mock_object( drv, '_local_path_volume_info' ) mock_local_path_volume_info.return_value = self.TEST_LOCAL_PATH mock_read_info_file = self.mock_object(drv, '_read_info_file') mock_read_info_file.return_value = {'active': '/path/to/active'} mock_local_path_volume = self.mock_object( drv, '_local_path_volume') mock_local_path_volume.return_value = vol_path drv.delete_volume(volume) self.assertEqual(drv._execute.call_count, 3) def test_delete_volume_without_provider_location(self): drv = self._driver volume = fake_volume.fake_volume_obj( self.context, display_name='volume', provider_location='' ) drv.delete_volume(volume) self.assertFalse(bool(drv._execute.call_count)) @ddt.data([None, QEMU_IMG_INFO_OUT1], ['raw', QEMU_IMG_INFO_OUT1], ['qcow2', QEMU_IMG_INFO_OUT2]) @ddt.unpack @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_extend_volume(self, file_format, qemu_img_info, mock_get): drv = self._driver volume = fake_volume.fake_volume_obj( self.context, id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', size=1, provider_location='nfs_share') if file_format: volume.admin_metadata = {'format': file_format} mock_get.return_value = volume path = 'path' new_size = volume['size'] + 1 mock_img_utils = self.mock_object(drv, '_qemu_img_info') img_out = qemu_img_info % {'volid': volume.id, 'size_b': volume.size * units.Gi} mock_img_utils.return_value = imageutils.QemuImgInfo( img_out, format='json') with mock.patch.object(image_utils, 'resize_image') as resize: with mock.patch.object(drv, 'local_path', return_value=path): with mock.patch.object(drv, '_is_share_eligible', return_value=True): drv.extend_volume(volume, new_size) resize.assert_called_once_with(path, new_size) def test_create_volume_from_snapshot(self): drv = self._driver src_volume = self._simple_volume(size=10) src_volume.id = fake.VOLUME_ID fake_snap = fake_snapshot.fake_snapshot_obj(self.context) fake_snap.volume = src_volume fake_snap.size = 10 fake_snap.status = 'available' new_volume = self._simple_volume(size=src_volume.size) drv._find_share = mock.Mock(return_value=self.TEST_NFS_EXPORT) drv._copy_volume_from_snapshot = mock.Mock() drv._create_volume_from_snapshot(new_volume, fake_snap) drv._find_share.assert_called_once_with(new_volume) drv._copy_volume_from_snapshot.assert_called_once_with( fake_snap, new_volume, new_volume.size ) @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_cloned_volume(self, mock_get): drv = self._driver volume = self._simple_volume() mock_get.return_value = volume vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(volume.provider_location)) vol_path = os.path.join(vol_dir, volume.name) new_volume = self._simple_volume() new_vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str( volume.provider_location)) new_vol_path = os.path.join(new_vol_dir, volume.name) drv._create_cloned_volume(new_volume, volume, self.context) command = ['dellfcopy', '-o', 'fastclone', '-s', vol_path, '-d', new_vol_path, '-v', '1'] calls = [mock.call(*command, run_as_root=True)] drv._execute.assert_has_calls(calls) @ddt.data([QEMU_IMG_INFO_OUT3]) @ddt.unpack @mock.patch('cinder.objects.volume.Volume.save') def test_copy_volume_from_snapshot(self, qemu_img_info, mock_save): drv = self._driver src_volume = self._simple_volume(size=10) src_volume.id = fake.VOLUME_ID fake_snap = fake_snapshot.fake_snapshot_obj(self.context) snap_file = src_volume.name + '.' + fake_snap.id fake_snap.volume = src_volume fake_snap.size = 10 fake_source_vol_path = os.path.join( drv._local_volume_dir(fake_snap.volume), src_volume.name ) new_volume = self._simple_volume(size=10) new_vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str( src_volume.provider_location)) new_vol_path = os.path.join(new_vol_dir, new_volume.name) mock_read_info_file = self.mock_object(drv, '_read_info_file') mock_read_info_file.return_value = {'active': snap_file, fake_snap.id: snap_file} mock_img_utils = self.mock_object(drv, '_qemu_img_info') img_out = qemu_img_info % {'volid': src_volume.id, 'snapid': fake_snap.id, 'size_b': src_volume.size * units.Gi} mock_img_utils.return_value = imageutils.QemuImgInfo( img_out, format='json') drv._copy_volume_from_snapshot(fake_snap, new_volume, new_volume.size) command = ['dellfcopy', '-o', 'fastclone', '-s', fake_source_vol_path, '-d', new_vol_path, '-v', '1'] calls = [mock.call(*command, run_as_root=True)] drv._execute.assert_has_calls(calls) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_replication.py0000664000175000017500000001711600000000000031346 0ustar00zuulzuul00000000000000# Copyright (c) 2021 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerstore from cinder.volume.drivers.dell_emc.powerstore import client class TestReplication(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_array_version") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def setUp(self, mock_chap, mock_version): super(TestReplication, self).setUp() self.replication_backend_id = "repl_1" replication_device = [ { "backend_id": self.replication_backend_id, "san_ip": "127.0.0.2", "san_login": "test_1", "san_password": "test_2" } ] self._override_shared_conf("replication_device", override=replication_device) mock_version.return_value = "3.0.0.0" self.driver.do_setup({}) self.driver.check_for_setup_error() self.volume = fake_volume.fake_volume_obj( self.context, host="host@backend", provider_id="fake_id", size=8, replication_status="enabled" ) def test_failover_host_no_volumes(self): self.driver.failover_host({}, [], self.replication_backend_id) self.assertEqual(self.replication_backend_id, self.driver.active_backend_id) def test_failover_host_invalid_secondary_id(self): error = self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_host, {}, [], "invalid_id") self.assertIn("is not a valid choice", error.msg) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.adapter." "NVMEoFAdapter.failover_host") def test_failover_valid_secondary_id(self, mock_adapter_failover): volumes = [self.volume] mock_adapter_failover.return_value = (volumes, None) result = self.driver.failover({}, volumes, self.replication_backend_id) self.assertEqual(result, (self.replication_backend_id, volumes, None)) self.driver.adapter.failover_host.assert_called_once_with( volumes, None, False) def test_failover_invalid_secondary_id(self): volumes = [self.volume] secondary_id = "invalid_id" self.driver.active_backend_id = None self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover, {}, volumes, secondary_id, None) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.adapter." "NVMEoFAdapter.failover_host") def test_failover_is_failback(self, mock_adapter_failover): volumes = [self.volume] mock_adapter_failover.return_value = (volumes, None) secondary_id = "default" self.driver.active_backend_id = self.replication_backend_id result = self.driver.failover({}, volumes, secondary_id, None) self.assertEqual(result, (secondary_id, volumes, None)) self.driver.adapter.failover_host.assert_called_once_with( volumes, None, True) def test_failover_completed_failback(self): self.driver.failover_completed({}, None) self.assertEqual(self.driver.active_backend_id, "default") def test_failover_completed_failover(self): self.driver.replication_devices = [{"backend_id": self.replication_backend_id}] self.driver.failover_completed({}, "failed over") self.assertEqual(self.driver.active_backend_id, self.replication_backend_id) def test_failover_completed_invalid_target(self): self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_completed, {}, "invalid_target") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.wait_for_failover_completion") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.failover_volume_replication_session") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_volume_replication_session_id") def test_failover_volume(self, mock_rep_session, mock_failover, mock_wait_failover): updates = self.driver.adapter.failover_volume(self.volume, is_failback=False) self.assertIsNone(updates) @mock.patch("requests.request") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.failover_volume_replication_session") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_volume_replication_session_id") def test_failover_volume_already_failed_over(self, mock_rep_session, mock_failover, mock_wait_failover): mock_wait_failover.return_value = powerstore.MockResponse( content={ "response_body": { "messages": [ { "code": client.SESSION_ALREADY_FAILED_OVER_ERROR, }, ], }, }, rc=200 ) updates = self.driver.adapter.failover_volume(self.volume, is_failback=False) self.assertIsNone(updates) @mock.patch("requests.request") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.failover_volume_replication_session") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_volume_replication_session_id") def test_failover_volume_failover_error(self, mock_rep_session, mock_failover, mock_wait_failover): mock_wait_failover.return_value = powerstore.MockResponse( content={ "state": "FAILED", "response_body": None, }, rc=200 ) updates = self.driver.adapter.failover_volume(self.volume, is_failback=False) self.assertEqual(self.volume.id, updates["volume_id"]) self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR, updates["updates"]["replication_status"]) ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_snapshot_create_delete_revert.py 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_snapshot_create_delete_rever0000664000175000017500000000673400000000000034321 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerstore class TestSnapshotCreateDelete(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def setUp(self, mock_chap): super(TestSnapshotCreateDelete, self).setUp() self.driver.check_for_setup_error() self.volume = fake_volume.fake_volume_obj( self.context, host="host@backend", provider_id="fake_id", size=8 ) self.snapshot = fake_snapshot.fake_snapshot_obj( self.context, volume=self.volume ) self.mock_object(self.driver.adapter.client, "get_snapshot_id_by_name", return_value="fake_id_1") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.create_snapshot") def test_create_snapshot(self, mock_create): self.driver.create_snapshot(self.snapshot) @mock.patch("requests.request") def test_create_snapshot_bad_status(self, mock_create_request): mock_create_request.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_snapshot, self.snapshot ) self.assertIn("Failed to create snapshot", error.msg) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.delete_volume_or_snapshot") def test_delete_snapshot(self, mock_delete): self.driver.delete_snapshot(self.snapshot) @mock.patch("requests.request") def test_delete_snapshot_bad_status(self, mock_delete): mock_delete.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises( exception.VolumeBackendAPIException, self.driver.delete_snapshot, self.snapshot ) self.assertIn("Failed to delete PowerStore snapshot", error.msg) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.restore_from_snapshot") def test_revert_to_snapshot(self, mock_revert): self.driver.revert_to_snapshot({}, self.volume, self.snapshot) @mock.patch("requests.request") def test_revert_to_snapshot_bad_status(self, mock_revert): mock_revert.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises( exception.VolumeBackendAPIException, self.driver.revert_to_snapshot, {}, self.volume, self.snapshot ) self.assertIn("Failed to restore PowerStore volume", error.msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_attach_detach.py0000664000175000017500000004516200000000000033202 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.objects import fields from cinder.objects import volume_attachment from cinder.tests.unit import fake_constants from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerstore from cinder.volume.drivers.dell_emc.powerstore import utils FAKE_HOST = { "name": "fake_host", "id": "fake_id" } class TestVolumeAttachDetach(powerstore.TestPowerStoreDriver): QOS_SPECS = { 'qos_specs': { 'name': 'powerstore_qos', 'id': 'd8c88f5a-4c6f-4f89-97c5-da1ef059006e', 'created_at': 'fake_date', 'consumer': 'back-end', 'specs': { 'max_bw': '104857600', 'max_iops': '500', 'bandwidth_limit_type': 'Absolute', 'burst_percentage': '50' } } } @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def setUp(self, mock_chap): super(TestVolumeAttachDetach, self).setUp() mock_chap.return_value = {"mode": "Single"} self.iscsi_driver.check_for_setup_error() self.fc_driver.check_for_setup_error() with mock.patch.object(self.nvme_driver.adapter.client, "get_array_version", return_value=( "3.0.0.0" )): self.nvme_driver.check_for_setup_error() self.volume = fake_volume.fake_volume_obj( self.context, host="host@backend", provider_id="fake_id", size=8, volume_type_id=fake_constants.VOLUME_TYPE_ID ) self.volume.volume_attachment = ( volume_attachment.VolumeAttachmentList() ) self.volume.volume_attachment.objects = [ volume_attachment.VolumeAttachment( attach_status=fields.VolumeAttachStatus.ATTACHED, attached_host=self.volume.host ), volume_attachment.VolumeAttachment( attach_status=fields.VolumeAttachStatus.ATTACHED, attached_host=self.volume.host ) ] fake_iscsi_targets_response = [ { "address": "1.2.3.4", "ip_port": { "target_iqn": "iqn.2020-07.com.dell:dellemc-powerstore-test-iqn-1" }, }, { "address": "5.6.7.8", "ip_port": { "target_iqn": "iqn.2020-07.com.dell:dellemc-powerstore-test-iqn-1" }, }, ] fake_fc_wwns_response = [ { "wwn": "58:cc:f0:98:49:21:07:02" }, { "wwn": "58:cc:f0:98:49:23:07:02" }, ] fake_nvme_portals_response = [ { "address": "11.22.33.44" }, { "address": "55.66.77.88" } ] fake_nvme_nqn_response = [ { "nvm_subsystem_nqn": "nqn.2020-07.com.dell:powerstore:00:test-nqn" } ] self.fake_connector = { "host": self.volume.host, "wwpns": ["58:cc:f0:98:49:21:07:02", "58:cc:f0:98:49:23:07:02"], "initiator": "fake_initiator", } self.iscsi_targets_mock = self.mock_object( self.iscsi_driver.adapter.client, "get_ip_pool_address", return_value=fake_iscsi_targets_response ) self.fc_wwns_mock = self.mock_object( self.fc_driver.adapter.client, "get_fc_port", return_value=fake_fc_wwns_response ) self.nvme_portal_mock = self.mock_object( self.nvme_driver.adapter.client, "get_ip_pool_address", return_value=fake_nvme_portals_response ) self.nvme_nqn_mock = self.mock_object( self.nvme_driver.adapter.client, "get_subsystem_nqn", return_value=fake_nvme_nqn_response ) def test_initialize_connection_chap_enabled(self): self.iscsi_driver.adapter.use_chap_auth = True with mock.patch.object(self.iscsi_driver.adapter, "_create_host_and_attach", return_value=( utils.get_chap_credentials(), 1 )): connection_properties = self.iscsi_driver.initialize_connection( self.volume, self.fake_connector ) self.assertIn("auth_username", connection_properties["data"]) self.assertIn("auth_password", connection_properties["data"]) def test_initialize_connection_chap_disabled(self): self.iscsi_driver.adapter.use_chap_auth = False with mock.patch.object(self.iscsi_driver.adapter, "_create_host_and_attach", return_value=( utils.get_chap_credentials(), 1 )): connection_properties = self.iscsi_driver.initialize_connection( self.volume, self.fake_connector ) self.assertNotIn("auth_username", connection_properties["data"]) self.assertNotIn("auth_password", connection_properties["data"]) def test_get_fc_targets(self): wwns = self.fc_driver.adapter._get_fc_targets() self.assertEqual(2, len(wwns)) def test_get_fc_targets_filtered(self): self.fc_driver.adapter.allowed_ports = ["58:cc:f0:98:49:23:07:02"] wwns = self.fc_driver.adapter._get_fc_targets() self.assertEqual(1, len(wwns)) self.assertNotIn( utils.fc_wwn_to_string("58:cc:f0:98:49:21:07:02"), wwns ) def test_get_fc_targets_filtered_no_matched_ports(self): self.fc_driver.adapter.allowed_ports = ["fc_wwn_1", "fc_wwn_2"] error = self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.adapter._get_fc_targets) self.assertIn("There are no accessible Fibre Channel targets on the " "system.", error.msg) def test_get_iscsi_targets(self): iqns, portals = self.iscsi_driver.adapter._get_iscsi_targets() self.assertTrue(len(iqns) == len(portals)) self.assertEqual(2, len(portals)) def test_get_iscsi_targets_filtered(self): self.iscsi_driver.adapter.allowed_ports = ["1.2.3.4"] iqns, portals = self.iscsi_driver.adapter._get_iscsi_targets() self.assertTrue(len(iqns) == len(portals)) self.assertEqual(1, len(portals)) self.assertNotIn( "iqn.2020-07.com.dell:dellemc-powerstore-test-iqn-2", iqns ) def test_get_iscsi_targets_filtered_no_matched_ports(self): self.iscsi_driver.adapter.allowed_ports = ["1.1.1.1", "2.2.2.2"] error = self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.adapter._get_iscsi_targets) self.assertIn("There are no accessible iSCSI targets on the system.", error.msg) def test_get_nvme_targets(self): portals, nqn = self.nvme_driver.adapter._get_nvme_targets() self.assertEqual(2, len(portals)) def test_get_connection_properties(self): volume_identifier = '123' portals, nqn = self.nvme_driver.adapter._get_nvme_targets() result = { 'driver_volume_type': 'nvmeof', 'data': { 'portals': [('11.22.33.44', 4420, 'tcp'), ('55.66.77.88', 4420, 'tcp')], 'target_nqn': [{ 'nvm_subsystem_nqn': 'nqn.2020-07.com.dell:powerstore:00:test-nqn' }], 'volume_nguid': '123', 'discard': True } } self.assertEqual(result, self.nvme_driver.adapter. _get_connection_properties(volume_identifier)) def test_get_connection_properties_no_volume_identifier(self): portals, nqn = self.nvme_driver.adapter._get_nvme_targets() result = { 'driver_volume_type': 'nvmeof', 'data': { 'portals': [('11.22.33.44', 4420, 'tcp'), ('55.66.77.88', 4420, 'tcp')], 'target_nqn': [{ 'nvm_subsystem_nqn': 'nqn.2020-07.com.dell:powerstore:00:test-nqn' }], 'volume_nguid': None, 'discard': True } } self.assertEqual(result, self.nvme_driver.adapter. _get_connection_properties(None)) def test_get_connection_properties_no_nqn(self): volume_identifier = '123' with mock.patch.object(self.nvme_driver.adapter, "_get_nvme_targets", return_value=(['11.22.33.44', '55.66.77.88'], [])): result = { 'driver_volume_type': 'nvmeof', 'data': { 'portals': [('11.22.33.44', 4420, 'tcp'), ('55.66.77.88', 4420, 'tcp')], 'target_nqn': [], 'volume_nguid': '123', 'discard': True } } self.assertEqual(result, self.nvme_driver.adapter. _get_connection_properties(volume_identifier)) def test_get_connection_properties_no_portals(self): volume_identifier = '123' with mock.patch.object(self.nvme_driver.adapter, "_get_nvme_targets", return_value=( [], [{ 'nvm_subsystem_nqn': 'nqn.2020-07.com.dell:powerstore:0' '0:test-nqn' }] )): result = { 'driver_volume_type': 'nvmeof', 'data': { 'portals': [], 'target_nqn': [{ 'nvm_subsystem_nqn': 'nqn.2020-07.com.dell:powerstore:00:test-nqn' }], 'volume_nguid': '123', 'discard': True } } self.assertEqual(result, self.nvme_driver.adapter. _get_connection_properties(volume_identifier)) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.adapter." "CommonAdapter._detach_volume_from_hosts") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.adapter." "CommonAdapter._filter_hosts_by_initiators") def test_detach_multiattached_volume(self, mock_filter_hosts, mock_detach): self.iscsi_driver.terminate_connection(self.volume, self.fake_connector) mock_filter_hosts.assert_not_called() mock_detach.assert_not_called() self.volume.volume_attachment.objects.pop() self.iscsi_driver.terminate_connection(self.volume, self.fake_connector) mock_filter_hosts.assert_called_once() mock_detach.assert_called_once() @mock.patch('cinder.volume.volume_types.' 'get_volume_type_qos_specs', return_value=QOS_SPECS) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.attach_volume_to_host") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_volume_lun") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_array_version", return_value='4.0') @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_qos_policy_id_by_name", return_value=None) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.create_qos_io_rule") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.create_qos_policy") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.update_volume_with_qos_policy") def test_volume_qos_policy_create(self, mock_volume_types, mock_attach_volume, mock_get_volume_lun, mock_get_array_version, mock_get_qos_policy, mock_qos_io_rule, mock_qos_policy, mock_volume_qos_update): self.iscsi_driver.adapter.use_chap_auth = False self.mock_object(self.iscsi_driver.adapter, "_create_host_if_not_exist", return_value=( FAKE_HOST, utils.get_chap_credentials(), )) self.iscsi_driver.initialize_connection( self.volume, self.fake_connector ) mock_get_volume_lun.return_value = "fake_volume_identifier" mock_qos_io_rule.return_value = "9beb10ff-a00c-4d88-a7d9-692be2b3073f" mock_qos_policy.return_value = "d69f7131-4617-4bae-89f8-a540a6bda94b" mock_volume_types.assert_called_once() mock_get_array_version.assert_called_once() mock_get_qos_policy.assert_called_once() mock_attach_volume.assert_called_once() mock_qos_policy.assert_called_once() mock_volume_qos_update.assert_called_once() @mock.patch('cinder.volume.volume_types.' 'get_volume_type_qos_specs', return_value=QOS_SPECS) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.attach_volume_to_host") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_volume_lun") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_array_version", return_value='4.0') @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_qos_policy_id_by_name") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.update_qos_io_rule") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.update_volume_with_qos_policy") def test_volume_qos_io_rule_update(self, mock_volume_types, mock_attach_volume, mock_get_volume_lun, mock_get_array_version, mock_get_qos_policy, mock_update_qos_io_rule, mock_volume_qos_update): self.iscsi_driver.adapter.use_chap_auth = False self.mock_object(self.iscsi_driver.adapter, "_create_host_if_not_exist", return_value=( FAKE_HOST, utils.get_chap_credentials(), )) self.iscsi_driver.initialize_connection( self.volume, self.fake_connector ) mock_get_volume_lun.return_value = "fake_volume_identifier" mock_get_qos_policy.return_value = ("d69f7131-" "4617-4bae-89f8-a540a6bda94b") mock_volume_types.assert_called_once() mock_attach_volume.assert_called_once() mock_get_array_version.assert_called_once() mock_update_qos_io_rule.assert_called_once() mock_volume_qos_update.assert_called_once() @mock.patch('cinder.volume.volume_types.' 'get_volume_type_qos_specs', return_value=QOS_SPECS) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.utils." "is_multiattached_to_host", return_value=False) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.detach_volume_from_host") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_array_version", return_value='4.0') @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.update_volume_with_qos_policy") def test_volume_qos_policy_update(self, mock_volume_types, mock_multi_attached_host, mock_detach_volume, mock_get_array_version, mock_volume_qos_update): self.mock_object(self.iscsi_driver.adapter, "_filter_hosts_by_initiators", return_value=FAKE_HOST) self.iscsi_driver.terminate_connection(self.volume, self.fake_connector) mock_volume_types.assert_called_once() mock_multi_attached_host.assert_called_once() mock_detach_volume.assert_called_once() mock_get_array_version.assert_called_once() mock_volume_qos_update.assert_called_once() ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_delete_extend.py 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_delete_extend.0000664000175000017500000002005600000000000034204 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerstore from cinder.volume.drivers.dell_emc.powerstore import client class TestVolumeCreateDeleteExtend(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def setUp(self, mock_chap): super(TestVolumeCreateDeleteExtend, self).setUp() self.driver.check_for_setup_error() self.volume = fake_volume.fake_volume_obj( self.context, host="host@backend", provider_id="fake_id", size=8 ) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.create_volume") def test_create_volume(self, mock_create): mock_create.return_value = "fake_id" self.driver.create_volume(self.volume) @mock.patch("requests.request") def test_create_volume_bad_status(self, mock_create_request): mock_create_request.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.volume) self.assertIn("Failed to create PowerStore volume", error.msg) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.adapter." "CommonAdapter._detach_volume_from_hosts") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.delete_volume_or_snapshot") def test_delete_volume(self, mock_delete, mock_detach): self.driver.delete_volume(self.volume) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.adapter." "CommonAdapter._detach_volume_from_hosts") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.delete_volume_or_snapshot") def test_delete_volume_no_provider_id(self, mock_delete, mock_detach): self.volume.provider_id = None self.driver.delete_volume(self.volume) mock_detach.assert_not_called() mock_delete.assert_not_called() @mock.patch("cinder.volume.drivers.dell_emc.powerstore.adapter." "CommonAdapter._detach_volume_from_hosts") @mock.patch("requests.request") def test_delete_volume_not_found(self, mock_delete_request, mock_detach): mock_delete_request.return_value = powerstore.MockResponse(rc=404) self.driver.delete_volume(self.volume) @mock.patch("requests.request") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_volume_mapped_hosts") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.delete_volume_or_snapshot") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.adapter." "CommonAdapter._create_or_update_volume_qos_policy") def test_delete_volume_detach_not_found(self, mock_delete, mock_mapped_hosts, mock_detach_request, mock_qos_policy): mock_mapped_hosts.return_value = ["fake_host_id"] mock_detach_request.return_value = powerstore.MockResponse( content={}, rc=404 ) self.driver.delete_volume(self.volume) mock_qos_policy.assert_not_called() @mock.patch("requests.request") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_volume_mapped_hosts") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.delete_volume_or_snapshot") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.adapter." "CommonAdapter._create_or_update_volume_qos_policy") def test_delete_volume_detach_not_mapped(self, mock_delete, mock_mapped_hosts, mock_detach_request, mock_qos_policy): mock_mapped_hosts.return_value = ["fake_host_id"] mock_detach_request.return_value = powerstore.MockResponse( content={ "messages": [ { "code": client.VOLUME_NOT_MAPPED_ERROR, }, ], }, rc=422 ) self.driver.delete_volume(self.volume) mock_qos_policy.assert_not_called() @mock.patch("cinder.volume.drivers.dell_emc.powerstore.adapter." "CommonAdapter._detach_volume_from_hosts") @mock.patch("requests.request") def test_delete_volume_bad_status(self, mock_delete, mock_detach): mock_delete.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) self.assertIn("Failed to delete PowerStore volume", error.msg) @mock.patch("requests.request") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_volume_mapped_hosts") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.delete_volume_or_snapshot") def test_delete_volume_detach_bad_status(self, mock_delete, mock_mapped_hosts, mock_detach_request): mock_mapped_hosts.return_value = ["fake_host_id"] mock_detach_request.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) self.assertIn("Failed to detach PowerStore volume", error.msg) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.extend_volume") def test_extend_volume(self, mock_extend): self.driver.extend_volume(self.volume, 16) @mock.patch("requests.request") def test_extend_volume_bad_status(self, mock_extend_request): mock_extend_request.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.volume, 16) self.assertIn("Failed to extend PowerStore volume", error.msg) @mock.patch("requests.request") def test_post_request_timeout_exception(self, mock_request): mock_request.return_value = powerstore.MockResponse( rc=501 ) error = self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.volume, 16) self.assertEqual('Bad or unexpected response from the ' 'storage volume backend API: Failed to ' 'extend PowerStore volume with id fake_id.', error.msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_from_source.py0000664000175000017500000001345500000000000034274 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerstore from cinder.volume.drivers.dell_emc.powerstore import client class TestVolumeCreateFromSource(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def setUp(self, mock_chap): super(TestVolumeCreateFromSource, self).setUp() self.driver.check_for_setup_error() self.volume = fake_volume.fake_volume_obj( self.context, host="host@backend", provider_id="fake_id", size=8 ) self.source_volume = fake_volume.fake_volume_obj( self.context, host="host@backend", provider_id="fake_id_1", size=8 ) self.source_snapshot = fake_snapshot.fake_snapshot_obj( self.context, volume=self.source_volume, volume_size=8 ) self.mock_object(self.driver.adapter.client, "get_snapshot_id_by_name", return_value="fake_id_1") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.clone_volume_or_snapshot") def test_create_cloned_volume(self, mock_create_cloned): mock_create_cloned.return_value = self.volume.provider_id self.driver.create_cloned_volume(self.volume, self.source_volume) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.extend_volume") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.clone_volume_or_snapshot") def test_create_cloned_volume_extended(self, mock_create_cloned, mock_extend): mock_create_cloned.return_value = self.volume.provider_id self.volume.size = 16 self.driver.create_cloned_volume(self.volume, self.source_volume) mock_extend.assert_called_once() @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.clone_volume_or_snapshot") def test_create_volume_from_snapshot(self, mock_create_from_snap): mock_create_from_snap.return_value = self.volume.provider_id self.driver.create_volume_from_snapshot(self.volume, self.source_snapshot) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.extend_volume") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.clone_volume_or_snapshot") def test_create_volume_from_snapshot_extended(self, mock_create_from_snap, mock_extend): mock_create_from_snap.return_value = self.volume.provider_id self.volume.size = 16 self.driver.create_volume_from_snapshot(self.volume, self.source_snapshot) mock_extend.assert_called_once() @mock.patch("requests.request") def test_create_volume_from_source_bad_status(self, mock_create_request): mock_create_request.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises( exception.VolumeBackendAPIException, self.driver.adapter.create_volume_from_source, self.volume, self.source_volume ) self.assertIn("Failed to create clone", error.msg) @mock.patch("requests.request") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.clone_volume_or_snapshot") def test_create_volume_from_source_extended_bad_status( self, mock_create_from_source, mock_extend_request ): mock_extend_request.return_value = powerstore.MockResponse(rc=400) self.volume.size = 16 error = self.assertRaises( exception.VolumeBackendAPIException, self.driver.adapter.create_volume_from_source, self.volume, self.source_volume ) self.assertIn("Failed to extend PowerStore volume", error.msg) @mock.patch("requests.request") def test_create_snapshot_limit_reached(self, mock_create_snap_limit): snapshot_limit_reached_response = { 'errorCode': 0, 'message': 'messages', 'messages': [ { 'code': client.TOO_MANY_SNAPS_ERROR, } ] } mock_create_snap_limit.return_value = powerstore.MockResponse( snapshot_limit_reached_response, rc=400) error = self.assertRaises( exception.SnapshotLimitReached, self.driver.adapter.create_volume_from_source, self.volume, self.source_volume ) self.assertIn( "Exceeded the configured limit of 32 snapshots per volume.", error.msg) ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_group_create_delete_update.py 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_group_create_delete_u0000664000175000017500000001515200000000000034320 0ustar00zuulzuul00000000000000# Copyright (c) 2021 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.tests.unit import fake_group from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerstore class TestVolumeGroupCreateDeleteUpdate(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def setUp(self, mock_chap): super(TestVolumeGroupCreateDeleteUpdate, self).setUp() self.driver.check_for_setup_error() self.volume1 = fake_volume.fake_volume_obj( self.context, host="host@backend", provider_id="fake_id", size=8 ) self.volume2 = fake_volume.fake_volume_obj( self.context, host="host@backend", provider_id="fake_id", size=8 ) self.group = fake_group.fake_group_obj( self.context, ) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.create_vg") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_create_volume_group(self, mock_is_cg, mock_create): mock_create.return_value = "fake_id" mock_is_cg.return_value = True self.driver.create_group(self.context, self.group) @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_create_volume_group_fallback_to_generic(self, mock_is_cg): mock_is_cg.return_value = False self.assertRaises(NotImplementedError, self.driver.create_group, self.context, self.group) @mock.patch("requests.request") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_create_volume_group_bad_status(self, mock_is_cg, mock_create_request): mock_create_request.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_group, self.context, self.group) self.assertIn("Failed to create PowerStore volume group", error.msg) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.delete_volume_or_snapshot") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_id_by_name") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_delete_volume_group(self, mock_is_cg, mock_get_id, mock_delete): self.driver.delete_group(self.context, self.group, []) @mock.patch("requests.request") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_id_by_name") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_delete_volume_group_bad_status(self, mock_is_cg, mock_get_id, mock_delete): mock_delete.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises( exception.VolumeBackendAPIException, self.driver.delete_group, self.context, self.group, [] ) self.assertIn("Failed to delete PowerStore volume group", error.msg) @mock.patch("cinder.objects.volume.Volume.is_replicated") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_update_volume_group_add_replicated_volumes(self, mock_is_cg, mock_replicated): mock_replicated.return_value = True self.assertRaises(exception.InvalidVolume, self.driver.update_group, self.context, self.group, [self.volume1], []) @mock.patch("requests.request") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_id_by_name") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_update_volume_group_add_volumes_bad_status(self, mock_is_cg, mock_get_vg_id, mock_add_volumes): mock_add_volumes.return_value = powerstore.MockResponse(rc=400) self.assertRaises(exception.VolumeBackendAPIException, self.driver.update_group, self.context, self.group, [self.volume1], []) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.remove_volumes_from_vg") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.add_volumes_to_vg") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_id_by_name") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_update_volume_group_add_remove_volumes(self, mock_is_cg, mock_get_vg_id, mock_add_volumes, mock_remove_volumes): self.driver.update_group(self.context, self.group, add_volumes=[self.volume1], remove_volumes=[self.volume2]) ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_group_create_from_source.py 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_group_create_from_sou0000664000175000017500000001156500000000000034367 0ustar00zuulzuul00000000000000# Copyright (c) 2021 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc import powerstore class TestVolumeGroupCreateFromSource(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def setUp(self, mock_chap): super(TestVolumeGroupCreateFromSource, self).setUp() self.driver.check_for_setup_error() self.volume = fake_volume.fake_volume_obj( self.context, host="host@backend", provider_id="fake_id", size=8 ) self.source_volume = fake_volume.fake_volume_obj( self.context, host="host@backend", provider_id="fake_id", size=8 ) self.source_volume_snap = fake_snapshot.fake_snapshot_obj( self.context, volume=self.source_volume, volume_size=8 ) self.group = fake_group.fake_group_obj( self.context, ) self.source_group = fake_group.fake_group_obj( self.context, ) self.source_group_snap = fake_group_snapshot.fake_group_snapshot_obj( self.context ) self.source_group_snap.group = self.source_group @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.rename_volume") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_volume_id_by_name") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.clone_vg_or_vg_snapshot") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_id_by_name") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_create_volume_group_clone(self, mock_is_cg, mock_get_group_id, mock_clone, mock_get_volume_id, mock_rename): mock_get_volume_id.return_value = "fake_id" group_updates, volume_updates = self.driver.create_group_from_src( self.context, self.group, volumes=[self.volume], source_group=self.source_group, source_vols=[self.source_volume] ) self.assertEqual(1, len(volume_updates)) self.assertEqual("fake_id", volume_updates[0]["provider_id"]) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.rename_volume") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_volume_id_by_name") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.clone_vg_or_vg_snapshot") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_snapshot_id_by_name") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_id_by_name") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_create_volume_group_from_snapshot(self, mock_is_cg, mock_get_group_id, mock_get_snapshot_id, mock_clone, mock_get_volume_id, mock_rename): mock_get_volume_id.return_value = "fake_id" group_updates, volume_updates = self.driver.create_group_from_src( self.context, self.group, volumes=[self.volume], snapshots=[self.source_volume_snap], group_snapshot=self.source_group_snap ) self.assertEqual(1, len(volume_updates)) self.assertEqual("fake_id", volume_updates[0]["provider_id"]) ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_group_snapshot_create_delete.py 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_group_snapshot_create0000664000175000017500000001172500000000000034373 0ustar00zuulzuul00000000000000# Copyright (c) 2021 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit.volume.drivers.dell_emc import powerstore class TestVolumeGroupSnapshotCreateDelete(powerstore.TestPowerStoreDriver): @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_chap_config") def setUp(self, mock_chap): super(TestVolumeGroupSnapshotCreateDelete, self).setUp() self.driver.check_for_setup_error() self.group = fake_group.fake_group_obj( self.context, ) self.group_snapshot = fake_group_snapshot.fake_group_snapshot_obj( self.context ) self.group_snapshot.group = self.group @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.create_vg_snapshot") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_id_by_name") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_create_volume_group_snapshot(self, mock_is_cg, mock_get_id, mock_create): self.driver.create_group_snapshot(self.context, self.group_snapshot, []) @mock.patch("requests.request") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_id_by_name") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_create_volume_group_snapshot_bad_status(self, mock_is_cg, mock_get_id, mock_create): mock_create.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_group_snapshot, self.context, self.group_snapshot, []) self.assertIn("Failed to create snapshot", error.msg) @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.delete_volume_or_snapshot") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_snapshot_id_by_name") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_id_by_name") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_delete_volume_group_snapshot(self, mock_is_cg, mock_get_group_id, mock_get_snapshot_id, mock_delete): self.driver.delete_group_snapshot(self.context, self.group_snapshot, []) @mock.patch("requests.request") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_snapshot_id_by_name") @mock.patch("cinder.volume.drivers.dell_emc.powerstore.client." "PowerStoreClient.get_vg_id_by_name") @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type") def test_delete_volume_group_snapshot_bad_status(self, mock_is_cg, mock_get_group_id, mock_get_snapshot_id, mock_delete): mock_delete.return_value = powerstore.MockResponse(rc=400) error = self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_group_snapshot, self.context, self.group_snapshot, []) self.assertIn("Failed to delete PowerStore volume group snapshot", error.msg) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.26312 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/sc/0000775000175000017500000000000000000000000023612 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/sc/__init__.py0000664000175000017500000000000000000000000025711 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/sc/test_fc.py0000664000175000017500000015055000000000000025621 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume.drivers.dell_emc.sc import storagecenter_api from cinder.volume.drivers.dell_emc.sc import storagecenter_fc # We patch these here as they are used by every test to keep # from trying to contact a Dell Storage Center. @mock.patch.object(storagecenter_api.HttpClient, '__init__', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'open_connection') @mock.patch.object(storagecenter_api.SCApi, 'close_connection') class DellSCSanFCDriverTestCase(test.TestCase): VOLUME = {u'instanceId': u'64702.4829', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 4831, u'objectType': u'ScVolume', u'index': 4829, u'volumeFolderPath': u'dopnstktst/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', u'statusMessage': u'', u'status': u'Down', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'opnstktst', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe0000000000000012df', u'active': False, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-000012df', u'replayAllowed': False, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False} SCSERVER = {u'scName': u'Storage Center 64702', u'volumeCount': 0, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 4, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'Server_21000024ff30441d', u'instanceId': u'64702.47', u'serverFolderPath': u'opnstktst/', u'portType': [u'FibreChannel'], u'type': u'Physical', u'statusMessage': u'Only 5 of 6 expected paths are up', u'status': u'Degraded', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.4', u'instanceName': u'opnstktst', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Partial', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 5, u'name': u'Server_21000024ff30441d', u'hbaPresent': True, u'hbaCount': 2, u'notes': u'Created by Dell EMC Cinder Driver', u'mapped': False, u'operatingSystem': {u'instanceId': u'64702.38', u'instanceName': u'Red Hat Linux 6.x', u'objectType': u'ScServerOperatingSystem'} } MAPPING = {u'instanceId': u'64702.2183', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'lunUsed': [1], u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.4829', u'instanceName': u'5729f1db-4c45-416c-bc15-c8ea13a4465d', u'objectType': u'ScVolume'}, u'connectivity': u'Up', u'readOnly': False, u'objectType': u'ScMappingProfile', u'hostCache': False, u'mappedVia': u'Server', u'mapCount': 2, u'instanceName': u'4829-47', u'lunRequested': u'N/A' } def setUp(self): super(DellSCSanFCDriverTestCase, self).setUp() # configuration is a mock. A mock is pretty much a blank # slate. I believe mock's done in setup are not happy time # mocks. So we just do a few things like driver config here. self.configuration = mock.Mock() self.configuration.san_is_local = False self.configuration.san_ip = "192.168.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "pwd" self.configuration.dell_sc_ssn = 64702 self.configuration.dell_sc_server_folder = 'opnstktst' self.configuration.dell_sc_volume_folder = 'opnstktst' self.configuration.dell_sc_api_port = 3033 self.configuration.excluded_domain_ip = None self.configuration.excluded_domain_ips = [] self.configuration.included_domain_ips = [] self._context = context.get_admin_context() self.driver = storagecenter_fc.SCFCDriver( configuration=self.configuration) self.driver.do_setup(None) self.driver._stats = {'QoS_support': False, 'volume_backend_name': 'dell-1', 'free_capacity_gb': 12123, 'driver_version': '1.0.1', 'total_capacity_gb': 12388, 'reserved_percentage': 0, 'vendor_name': 'Dell', 'storage_protocol': 'FC'} # Start with none. Add in the specific tests later. # Mock tests bozo this. self.driver.backends = None self.driver.replication_enabled = False self.volid = '5729f1db-4c45-416c-bc15-c8ea13a4465d' self.volume_name = "volume" + self.volid self.connector = {'ip': '192.168.0.77', 'host': 'cinderfc-vm', 'wwnns': ['20000024ff30441c', '20000024ff30441d'], 'initiator': 'iqn.1993-08.org.debian:01:e1b1312f9e1', 'wwpns': ['21000024ff30441c', '21000024ff30441d']} @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) def test_initialize_connection(self, mock_find_wwns, mock_map_volume, mock_get_volume, mock_find_volume, mock_create_server, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector res = self.driver.initialize_connection(volume, connector) expected = {'data': {'discard': True, 'initiator_target_map': {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}, 'target_discovered': True, 'target_lun': 1, 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') # verify find_volume has been called and that is has been called twice mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, False) mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId']) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') @mock.patch.object(storagecenter_api.SCApi, 'find_wwns') @mock.patch.object(storagecenter_fc.SCFCDriver, 'initialize_secondary') @mock.patch.object(storagecenter_api.SCApi, 'get_live_volume') def test_initialize_connection_live_vol(self, mock_get_live_volume, mock_initialize_secondary, mock_find_wwns, mock_is_live_volume, mock_map_volume, mock_get_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102, 'secondaryRole': 'Secondary'} mock_is_live_volume.return_value = True mock_find_wwns.return_value = ( 1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}) mock_initialize_secondary.return_value = ( 1, [u'5000D31000FCBE3E', u'5000D31000FCBE36'], {u'21000024FF30441E': [u'5000D31000FCBE36'], u'21000024FF30441F': [u'5000D31000FCBE3E']}) mock_get_live_volume.return_value = sclivevol res = self.driver.initialize_connection(volume, connector) expected = {'data': {'discard': True, 'initiator_target_map': {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D'], u'21000024FF30441E': [u'5000D31000FCBE36'], u'21000024FF30441F': [u'5000D31000FCBE3E']}, 'target_discovered': True, 'target_lun': 1, 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35', u'5000D31000FCBE3E', u'5000D31000FCBE36']}, 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') # verify find_volume has been called and that is has been called twice mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, True) mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId']) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'get_volume') @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') @mock.patch.object(storagecenter_api.SCApi, 'find_wwns') @mock.patch.object(storagecenter_fc.SCFCDriver, 'initialize_secondary') @mock.patch.object(storagecenter_api.SCApi, 'get_live_volume') def test_initialize_connection_live_vol_afo(self, mock_get_live_volume, mock_initialize_secondary, mock_find_wwns, mock_is_live_volume, mock_map_volume, mock_get_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'} scvol = {'instanceId': '102.101'} mock_find_volume.return_value = scvol mock_get_volume.return_value = scvol connector = self.connector sclivevol = {'instanceId': '101.10001', 'primaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'primaryScSerialNumber': 102, 'secondaryVolume': {'instanceId': '101.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 101, 'secondaryRole': 'Activated'} mock_is_live_volume.return_value = True mock_find_wwns.return_value = ( 1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}) mock_get_live_volume.return_value = sclivevol res = self.driver.initialize_connection(volume, connector) expected = {'data': {'discard': True, 'initiator_target_map': {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}, 'target_discovered': True, 'target_lun': 1, 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') # verify find_volume has been called and that is has been called twice self.assertFalse(mock_initialize_secondary.called) mock_find_volume.assert_called_once_with( fake.VOLUME_ID, '101.101', True) mock_get_volume.assert_called_once_with('102.101') @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_no_wwns(self, mock_find_wwns, mock_map_volume, mock_get_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_no_server(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPING) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_vol_not_found(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(None, [], {})) def test_initialize_connection_map_vol_fail(self, mock_find_wwns, mock_map_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): # Test case where map_volume returns None (no mappings) volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) def test_initialize_secondary(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock( return_value=self.VOLUME) find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}) mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret) mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) ret = self.driver.initialize_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) self.assertEqual(find_wwns_ret, ret) def test_initialize_secondary_create_server(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=None) mock_api.create_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock( return_value=self.VOLUME) find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}) mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret) mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) ret = self.driver.initialize_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) self.assertEqual(find_wwns_ret, ret) def test_initialize_secondary_no_server(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=None) mock_api.create_server = mock.MagicMock(return_value=None) ret = self.driver.initialize_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) expected = (None, [], {}) self.assertEqual(expected, ret) def test_initialize_secondary_map_fail(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock(return_value=None) ret = self.driver.initialize_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) expected = (None, [], {}) self.assertEqual(expected, ret) def test_initialize_secondary_vol_not_found(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock( return_value=self.VOLUME) mock_api.get_volume = mock.MagicMock(return_value=None) ret = self.driver.initialize_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) expected = (None, [], {}) self.assertEqual(expected, ret) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'unmap_all') @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') def test_force_detach(self, mock_is_live_vol, mock_unmap_all, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = False scvol = {'instandId': '12345.1'} mock_find_volume.return_value = scvol mock_unmap_all.return_value = True volume = {'id': fake.VOLUME_ID} res = self.driver.force_detach(volume) mock_unmap_all.assert_called_once_with(scvol) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res) mock_unmap_all.assert_called_once_with(scvol) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'unmap_all') @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') def test_force_detach_fail(self, mock_is_live_vol, mock_unmap_all, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = False scvol = {'instandId': '12345.1'} mock_find_volume.return_value = scvol mock_unmap_all.return_value = False volume = {'id': fake.VOLUME_ID} self.assertRaises(exception.VolumeBackendAPIException, self.driver.force_detach, volume) mock_unmap_all.assert_called_once_with(scvol) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'unmap_all') @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') @mock.patch.object(storagecenter_fc.SCFCDriver, 'terminate_secondary') @mock.patch.object(storagecenter_api.SCApi, 'get_live_volume') def test_force_detach_lv(self, mock_get_live_volume, mock_terminate_secondary, mock_is_live_vol, mock_unmap_all, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = True scvol = {'instandId': '12345.1'} mock_find_volume.return_value = scvol sclivevol = {'instandId': '12345.1.0'} mock_get_live_volume.return_value = sclivevol mock_terminate_secondary.return_value = True volume = {'id': fake.VOLUME_ID} mock_unmap_all.return_value = True res = self.driver.force_detach(volume) mock_unmap_all.assert_called_once_with(scvol) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res) self.assertEqual(1, mock_terminate_secondary.call_count) mock_unmap_all.assert_called_once_with(scvol) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') def test_force_detach_vol_not_found(self, mock_is_live_vol, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = False mock_find_volume.return_value = None volume = {'id': fake.VOLUME_ID} res = self.driver.force_detach(volume) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res, 'Unexpected return data') @mock.patch.object(storagecenter_fc.SCFCDriver, 'force_detach') def test_terminate_connection_none_connector(self, mock_force_detach, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} self.driver.terminate_connection(volume, None) mock_force_detach.assert_called_once_with(volume) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) @mock.patch.object(storagecenter_fc.SCFCDriver, '_is_live_vol') @mock.patch.object(storagecenter_fc.SCFCDriver, 'terminate_secondary') def test_terminate_connection_live_vol(self, mock_terminate_secondary, mock_is_live_vol, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector mock_terminate_secondary.return_value = (None, [], {}) mock_is_live_vol.return_value = True res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res, 'Unexpected return data') @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection_no_server(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection_no_volume(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(None, [], {})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection_no_wwns(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector res = self.driver.terminate_connection(volume, connector) expected = {'driver_volume_type': 'fibre_channel', 'data': {}} self.assertEqual(expected, res, 'Unexpected return data') @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=False) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection_failure(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=0) def test_terminate_connection_vol_count_zero(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): # Test case where get_volume_count is zero volume = {'id': fake.VOLUME_ID} connector = self.connector res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) expected = {'data': {'initiator_target_map': {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']}, 'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']}, 'driver_volume_type': 'fibre_channel'} self.assertEqual(expected, res, 'Unexpected return data') @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection_multiattached_host(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): connector = self.connector attachment1 = fake_volume.volume_attachment_ovo(self._context) attachment1.connector = connector attachment1.attached_host = connector['host'] attachment1.attach_status = 'attached' attachment2 = fake_volume.volume_attachment_ovo(self._context) attachment2.connector = connector attachment2.attached_host = connector['host'] attachment2.attach_status = 'attached' vol = fake_volume.fake_volume_obj(self._context) vol.multiattach = True vol.volume_attachment.objects.append(attachment1) vol.volume_attachment.objects.append(attachment2) self.driver.terminate_connection(vol, connector) mock_unmap_volume.assert_not_called() @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_wwns', return_value=(1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'], {u'21000024FF30441C': [u'5000D31000FCBE35'], u'21000024FF30441D': [u'5000D31000FCBE3D']})) @mock.patch.object(storagecenter_api.SCApi, 'get_volume_count', return_value=1) def test_terminate_connection_multiattached_diffhost(self, mock_get_volume_count, mock_find_wwns, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): connector = self.connector attachment1 = fake_volume.volume_attachment_ovo(self._context) attachment1.connector = connector attachment1.attached_host = connector['host'] attachment1.attach_status = 'attached' attachment2 = fake_volume.volume_attachment_ovo(self._context) attachment2.connector = connector attachment2.attached_host = 'host2' attachment2.attach_status = 'attached' vol = fake_volume.fake_volume_obj(self._context) vol.multiattach = True vol.volume_attachment.objects.append(attachment1) vol.volume_attachment.objects.append(attachment2) self.driver.terminate_connection(vol, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) def test_terminate_secondary(self, mock_close_connection, mock_open_connection, mock_init): mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) mock_api.find_wwns = mock.MagicMock(return_value=(None, [], {})) mock_api.unmap_volume = mock.MagicMock(return_value=True) sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} ret = self.driver.terminate_secondary(mock_api, sclivevol, ['wwn1', 'wwn2']) expected = (None, [], {}) self.assertEqual(expected, ret) @mock.patch.object(storagecenter_api.SCApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_update_volume_stats_with_refresh(self, mock_get_storage_usage, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) mock_get_storage_usage.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_get_volume_stats_no_refresh(self, mock_get_storage_usage, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(False) self.assertEqual('FC', stats['storage_protocol']) mock_get_storage_usage.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/sc/test_sc.py0000664000175000017500000065524600000000000025652 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import eventlet from cinder import context from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume.drivers.dell_emc.sc import storagecenter_api from cinder.volume.drivers.dell_emc.sc import storagecenter_iscsi from cinder.volume import volume_types # We patch these here as they are used by every test to keep # from trying to contact a Dell Storage Center. MOCKAPI = mock.MagicMock() @mock.patch.object(storagecenter_api.HttpClient, '__init__', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'open_connection', return_value=MOCKAPI) @mock.patch.object(storagecenter_api.SCApi, 'close_connection') class DellSCSanISCSIDriverTestCase(test.TestCase): VOLUME = {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False} SCSERVER = {u'scName': u'Storage Center 64702', u'volumeCount': 0, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 4, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'Server_21000024ff30441d', u'instanceId': u'64702.47', u'serverFolderPath': u'devstacksrv/', u'portType': [u'FibreChannel'], u'type': u'Physical', u'statusMessage': u'Only 5 of 6 expected paths are up', u'status': u'Degraded', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.4', u'instanceName': u'devstacksrv', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Partial', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 5, u'name': u'Server_21000024ff30441d', u'hbaPresent': True, u'hbaCount': 2, u'notes': u'Created by Dell EMC Cinder Driver', u'mapped': False, u'operatingSystem': {u'instanceId': u'64702.38', u'instanceName': u'Red Hat Linux 6.x', u'objectType': u'ScServerOperatingSystem'} } MAPPINGS = [{u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}] RPLAY = {u'scSerialNumber': 64702, u'globalIndex': u'64702-46-250', u'description': u'Cinder Clone Replay', u'parent': {u'instanceId': u'64702.46.249', u'instanceName': u'64702-46-249', u'objectType': u'ScReplay'}, u'instanceId': u'64702.46.250', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'12/09/2014 03:52:08 PM', u'createVolume': {u'instanceId': u'64702.46', u'instanceName': u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b', u'objectType': u'ScVolume'}, u'expireTime': u'12/09/2014 04:52:08 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7910, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'12/09/2014 03:52:08 PM', u'size': u'0.0 Bytes' } SCRPLAYPROFILE = {u'ruleCount': 0, u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', u'volumeCount': 0, u'scName': u'Storage Center 64702', u'notes': u'Created by Dell EMC Cinder Driver', u'scSerialNumber': 64702, u'userCreated': True, u'instanceName': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', u'instanceId': u'64702.11', u'enforceReplayCreationTimeout': False, u'replayCreationTimeout': 20, u'objectType': u'ScReplayProfile', u'type': u'Consistent', u'expireIncompleteReplaySets': True} IQN = 'iqn.2002-03.com.compellent:5000D31000000001' ISCSI_PROPERTIES = {'access_mode': 'rw', 'discard': True, 'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe44'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260', u'192.168.0.22:3260']} def setUp(self): super(DellSCSanISCSIDriverTestCase, self).setUp() # configuration is a mock. A mock is pretty much a blank # slate. I believe mock's done in setup are not happy time # mocks. So we just do a few things like driver config here. self.configuration = mock.Mock() self.configuration.san_is_local = False self.configuration.san_ip = "192.168.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "mmm" self.configuration.dell_sc_ssn = 12345 self.configuration.dell_sc_server_folder = 'opnstktst' self.configuration.dell_sc_volume_folder = 'opnstktst' self.configuration.dell_sc_api_port = 3033 self.configuration.target_ip_address = '192.168.1.1' self.configuration.target_port = 3260 self.configuration.excluded_domain_ip = None self.configuration.excluded_domain_ips = [] self.configuration.included_domain_ips = [] self._context = context.get_admin_context() self.driver = storagecenter_iscsi.SCISCSIDriver( configuration=self.configuration) self.driver.do_setup(None) self.driver._stats = {'QoS_support': False, 'volume_backend_name': 'dell-1', 'free_capacity_gb': 12123, 'driver_version': '1.0.1', 'total_capacity_gb': 12388, 'reserved_percentage': 0, 'vendor_name': 'Dell EMC', 'storage_protocol': 'iSCSI'} # Start with none. Add in the specific tests later. # Mock tests bozo this. self.driver.backends = None self.driver.replication_enabled = False self.mock_sleep = self.mock_object(eventlet, 'sleep') self.volid = fake.VOLUME_ID self.volume_name = "volume" + self.volid self.connector = { 'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:2227dab76162', 'host': 'fakehost'} self.connector_multipath = { 'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:2227dab76162', 'host': 'fakehost', 'multipath': True} self.access_record_output = [ "ID Initiator Ipaddress AuthMethod UserName Apply-To", "--- --------------- ------------- ---------- ---------- --------", "1 iqn.1993-08.org.debian:01:222 *.*.*.* none both", " 7dab76162"] self.fake_iqn = 'iqn.2002-03.com.compellent:5000D31000000001' self.properties = { 'target_discovered': True, 'target_portal': '%s:3260' % self.driver.configuration.dell_sc_iscsi_ip, 'target_iqn': self.fake_iqn, 'volume_id': 1} self._model_update = { 'provider_location': "%s:3260,1 %s 0" % (self.driver.configuration.dell_sc_iscsi_ip, self.fake_iqn) } @mock.patch.object(storagecenter_api.SCApi, 'find_sc') def test_check_for_setup_error(self, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): # Fail, Fail due to repl partner not found, success. mock_find_sc.side_effect = [exception.VolumeBackendAPIException(''), 10000, 12345, exception.VolumeBackendAPIException(''), 10000, 12345, 67890] # Find SC throws self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) # Replication enabled but one backend is down. self.driver.replication_enabled = True self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'otherqos'}] self.assertRaises(exception.InvalidHost, self.driver.check_for_setup_error) # Good run. Should run without exceptions. self.driver.check_for_setup_error() # failed over run mock_find_sc.side_effect = None mock_find_sc.reset_mock() mock_find_sc.return_value = 10000 self.driver.failed_over = True self.driver.check_for_setup_error() # find sc should be called exactly once mock_find_sc.assert_called_once_with() # No repl run mock_find_sc.reset_mock() mock_find_sc.return_value = 10000 self.driver.failed_over = False self.driver.replication_enabled = False self.driver.backends = None self.driver.check_for_setup_error() mock_find_sc.assert_called_once_with() @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_volume_extra_specs') def test__create_replications(self, mock_get_volume_extra_specs, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends mock_get_volume_extra_specs.return_value = { 'replication_enabled': ' True'} model_update = {'replication_status': 'enabled', 'replication_driver_data': '12345,67890'} vol = {'id': fake.VOLUME_ID, 'replication_driver_data': ''} scvol = {'name': fake.VOLUME_ID} self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'otherqos'}] mock_api = mock.MagicMock() mock_api.create_replication = mock.MagicMock( return_value={'instanceId': '1'}) # Create regular replication test. res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', False, None, False) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', False, None, False) self.assertEqual(model_update, res) # Create replication with activereplay set. mock_get_volume_extra_specs.return_value = { 'replication:activereplay': ' True', 'replication_enabled': ' True'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', False, None, True) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', False, None, True) self.assertEqual(model_update, res) # Create replication with sync set. mock_get_volume_extra_specs.return_value = { 'replication:activereplay': ' True', 'replication_enabled': ' True', 'replication_type': ' sync'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', True, None, True) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', True, None, True) self.assertEqual(model_update, res) # Create replication with disk folder set. self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos', 'diskfolder': 'ssd'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'otherqos', 'diskfolder': 'ssd'}] mock_get_volume_extra_specs.return_value = { 'replication:activereplay': ' True', 'replication_enabled': ' True', 'replication_type': ' sync'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_replication.assert_any_call( scvol, '12345', 'cinderqos', True, 'ssd', True) mock_api.create_replication.assert_any_call( scvol, '67890', 'otherqos', True, 'ssd', True) self.assertEqual(model_update, res) # Failed to create replication test. mock_api.create_replication.return_value = None self.assertRaises(exception.VolumeBackendAPIException, self.driver._create_replications, mock_api, vol, scvol) # Replication not enabled test mock_get_volume_extra_specs.return_value = {} res = self.driver._create_replications(mock_api, vol, scvol) self.assertEqual({}, res) self.driver.backends = backends @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_volume_extra_specs') def test__create_replications_live_volume(self, mock_get_volume_extra_specs, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends model_update = {'replication_status': 'enabled', 'replication_driver_data': '12345'} vol = {'id': fake.VOLUME_ID, 'replication_driver_data': ''} scvol = {'name': fake.VOLUME_ID} mock_api = mock.MagicMock() mock_api.create_live_volume = mock.MagicMock( return_value={'instanceId': '1'}) # Live volume with two backends defined. self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos', 'remoteqos': 'remoteqos'}, {'target_device_id': '67890', 'managed_backend_name': 'host@dell2', 'qosnode': 'otherqos', 'remoteqos': 'remoteqos'}] mock_get_volume_extra_specs.return_value = { 'replication:activereplay': ' True', 'replication_enabled': ' True', 'replication:livevolume': ' True'} self.assertRaises(exception.ReplicationError, self.driver._create_replications, mock_api, vol, scvol) # Live volume self.driver.backends = [{'target_device_id': '12345', 'managed_backend_name': 'host@dell1', 'qosnode': 'cinderqos', 'diskfolder': 'ssd', 'remoteqos': 'remoteqos'}] res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_live_volume.assert_called_once_with( scvol, '12345', True, False, False, 'cinderqos', 'remoteqos') self.assertEqual(model_update, res) # Active replay False mock_get_volume_extra_specs.return_value = { 'replication_enabled': ' True', 'replication:livevolume': ' True'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_live_volume.assert_called_with( scvol, '12345', False, False, False, 'cinderqos', 'remoteqos') self.assertEqual(model_update, res) # Sync mock_get_volume_extra_specs.return_value = { 'replication_enabled': ' True', 'replication:livevolume': ' True', 'replication_type': ' sync'} res = self.driver._create_replications(mock_api, vol, scvol) mock_api.create_live_volume.assert_called_with( scvol, '12345', False, True, False, 'cinderqos', 'remoteqos') self.assertEqual(model_update, res) self.driver.backends = backends @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_volume_extra_specs') def test__delete_replications(self, mock_get_volume_extra_specs, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends vol = {'id': fake.VOLUME_ID} scvol = {'instanceId': '1'} mock_api = mock.MagicMock() mock_api.delete_replication = mock.MagicMock() mock_api.find_volume = mock.MagicMock(return_value=scvol) # Start replication disabled. Should fail immediately. mock_get_volume_extra_specs.return_value = {} self.driver._delete_replications(mock_api, vol) self.assertFalse(mock_api.delete_replication.called) # Replication enabled. No replications listed. mock_get_volume_extra_specs.return_value = { 'replication_enabled': ' True'} vol = {'id': fake.VOLUME_ID, 'replication_driver_data': ''} self.driver._delete_replications(mock_api, vol) self.assertFalse(mock_api.delete_replication.called) # Something to call. vol = {'id': fake.VOLUME_ID, 'replication_driver_data': '12345,67890'} self.driver._delete_replications(mock_api, vol) mock_api.delete_replication.assert_any_call(scvol, 12345) mock_api.delete_replication.assert_any_call(scvol, 67890) self.driver.backends = backends @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_volume_extra_specs') def test__delete_live_volume(self, mock_get_volume_extra_specs, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends vol = {'id': fake.VOLUME_ID, 'provider_id': '101.101'} mock_api = mock.MagicMock() sclivevol = {'instanceId': '101.102', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102, 'secondaryRole': 'Secondary'} mock_api.get_live_volume = mock.MagicMock(return_value=sclivevol) # No replication driver data. ret = self.driver._delete_live_volume(mock_api, vol) self.assertFalse(mock_api.get_live_volume.called) self.assertFalse(ret) # Bogus rdd vol = {'id': fake.VOLUME_ID, 'provider_id': '101.101', 'replication_driver_data': ''} ret = self.driver._delete_live_volume(mock_api, vol) self.assertFalse(mock_api.get_live_volume.called) self.assertFalse(ret) # Valid delete. mock_api.delete_live_volume = mock.MagicMock(return_value=True) vol = {'id': fake.VOLUME_ID, 'provider_id': '101.101', 'replication_driver_data': '102'} ret = self.driver._delete_live_volume(mock_api, vol) mock_api.get_live_volume.assert_called_with('101.101', fake.VOLUME_ID) self.assertTrue(ret) # Wrong ssn. vol = {'id': fake.VOLUME_ID, 'provider_id': '101.101', 'replication_driver_data': '103'} ret = self.driver._delete_live_volume(mock_api, vol) mock_api.get_live_volume.assert_called_with('101.101', fake.VOLUME_ID) self.assertFalse(ret) # No live volume found. mock_api.get_live_volume.return_value = None ret = self.driver._delete_live_volume(mock_api, vol) mock_api.get_live_volume.assert_called_with('101.101', fake.VOLUME_ID) self.assertFalse(ret) self.driver.backends = backends @mock.patch.object(storagecenter_api.SCApi, 'create_volume', return_value=VOLUME) def test_create_volume(self, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with( fake.VOLUME_ID, 1, None, None, None, None, None) @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value='fake') @mock.patch.object(storagecenter_api.SCApi, 'update_cg_volumes') @mock.patch.object(storagecenter_api.SCApi, 'create_volume', return_value=VOLUME) def test_create_volume_with_group(self, mock_create_volume, mock_update_cg_volumes, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1, 'group_id': fake.GROUP_ID} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with( fake.VOLUME_ID, 1, None, None, None, None, None) self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_update_cg_volumes.called) @mock.patch.object(storagecenter_api.SCApi, 'create_volume', return_value=VOLUME) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype:volumeqos': 'volumeqos'}) def test_create_volume_volumeqos_profile(self, mock_extra, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1, 'volume_type_id': 'abc'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with( fake.VOLUME_ID, 1, None, None, 'volumeqos', None, None) @mock.patch.object(storagecenter_api.SCApi, 'create_volume', return_value=VOLUME) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype:groupqos': 'groupqos'}) def test_create_volume_groupqos_profile(self, mock_extra, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1, 'volume_type_id': 'abc'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with( fake.VOLUME_ID, 1, None, None, None, 'groupqos', None) @mock.patch.object(storagecenter_api.SCApi, 'create_volume', return_value=VOLUME) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype:datareductionprofile': 'drprofile'}) def test_create_volume_data_reduction_profile(self, mock_extra, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1, 'volume_type_id': 'abc'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with( fake.VOLUME_ID, 1, None, None, None, None, 'drprofile') @mock.patch.object(storagecenter_api.SCApi, 'create_volume', return_value=VOLUME) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype:storageprofile': 'HighPriority'}) def test_create_volume_storage_profile(self, mock_extra, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1, 'volume_type_id': 'abc'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with( fake.VOLUME_ID, 1, "HighPriority", None, None, None, None) @mock.patch.object(storagecenter_api.SCApi, 'create_volume', return_value=VOLUME) @mock.patch.object( volume_types, 'get_volume_type_extra_specs', return_value={'storagetype:replayprofiles': 'Daily'}) def test_create_volume_replay_profiles(self, mock_extra, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1, 'volume_type_id': 'abc'} self.driver.create_volume(volume) mock_create_volume.assert_called_once_with( fake.VOLUME_ID, 1, None, 'Daily', None, None, None) @mock.patch.object(storagecenter_api.SCApi, 'create_volume', return_value=VOLUME) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications', return_value={'replication_status': 'enabled', 'replication_driver_data': 'ssn'}) def test_create_volume_replication(self, mock_create_replications, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1} ret = self.driver.create_volume(volume) self.assertEqual({'replication_status': 'enabled', 'replication_driver_data': 'ssn', 'provider_id': self.VOLUME[u'instanceId']}, ret) @mock.patch.object(storagecenter_api.SCApi, 'create_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications') def test_create_volume_replication_raises(self, mock_create_replications, mock_delete_volume, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1} mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) self.assertTrue(mock_delete_volume.called) @mock.patch.object(storagecenter_api.SCApi, 'create_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') def test_create_volume_failure(self, mock_delete_volume, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) self.assertTrue(mock_delete_volume.called) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_delete_replications') @mock.patch.object(storagecenter_api.SCApi, 'delete_volume', return_value=True) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_replication_specs', return_value={'enabled': True, 'live': False}) def test_delete_volume(self, mock_get_replication_specs, mock_delete_volume, mock_delete_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} self.driver.delete_volume(volume) mock_delete_volume.assert_called_once_with(fake.VOLUME_ID, None) self.assertTrue(mock_delete_replications.called) self.assertEqual(1, mock_delete_replications.call_count) volume = {'id': fake.VOLUME_ID, 'provider_id': '1.1'} self.driver.delete_volume(volume) mock_delete_volume.assert_called_with(fake.VOLUME_ID, '1.1') self.assertTrue(mock_delete_replications.called) self.assertEqual(2, mock_delete_replications.call_count) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_delete_replications') @mock.patch.object(storagecenter_api.SCApi, 'delete_volume', return_value=True) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_replication_specs', return_value={'enabled': True, 'live': False}) def test_delete_volume_migrating(self, mock_get_replication_specs, mock_delete_volume, mock_delete_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, '_name_id': fake.VOLUME2_ID, 'provider_id': '12345.100', 'migration_status': 'deleting'} self.driver.delete_volume(volume) mock_delete_volume.assert_called_once_with(fake.VOLUME2_ID, None) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_delete_live_volume') @mock.patch.object(storagecenter_api.SCApi, 'delete_volume', return_value=True) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_replication_specs', return_value={'enabled': True, 'live': True}) def test_delete_volume_live_volume(self, mock_get_replication_specs, mock_delete_volume, mock_delete_live_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'provider_id': '1.1'} self.driver.delete_volume(volume) mock_delete_volume.assert_called_with(fake.VOLUME_ID, '1.1') self.assertTrue(mock_delete_live_volume.called) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_delete_replications') @mock.patch.object(storagecenter_api.SCApi, 'delete_volume', return_value=False) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_replication_specs', return_value={'enabled': True, 'live': False}) def test_delete_volume_failure(self, mock_get_replication_specs, mock_delete_volume, mock_delete_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1} self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, volume) self.assertTrue(mock_delete_replications.called) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPINGS[0]) @mock.patch.object(storagecenter_api.SCApi, 'find_iscsi_properties', return_value=ISCSI_PROPERTIES) def test_initialize_connection(self, mock_find_iscsi_props, mock_map_volume, mock_get_volume, mock_find_volume, mock_create_server, mock_find_server, mock_close_connection, mock_open_connection, mock_init): provider_id = self.VOLUME[u'instanceId'] volume = {'id': fake.VOLUME_ID, 'provider_id': provider_id} connector = self.connector data = self.driver.initialize_connection(volume, connector) self.assertEqual('iscsi', data['driver_volume_type']) # verify find_volume has been called and that is has been called twice mock_find_volume.assert_called_once_with( fake.VOLUME_ID, provider_id, False) mock_get_volume.assert_called_once_with(provider_id) expected = {'data': self.ISCSI_PROPERTIES, 'driver_volume_type': 'iscsi'} self.assertEqual(expected, data, 'Unexpected return value') @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPINGS[0]) @mock.patch.object(storagecenter_api.SCApi, 'find_iscsi_properties', return_value=ISCSI_PROPERTIES) def test_initialize_connection_multi_path(self, mock_find_iscsi_props, mock_map_volume, mock_get_volume, mock_find_volume, mock_create_server, mock_find_server, mock_close_connection, mock_open_connection, mock_init): # Test case where connection is multipath provider_id = self.VOLUME[u'instanceId'] volume = {'id': fake.VOLUME_ID, 'provider_id': provider_id} connector = self.connector_multipath data = self.driver.initialize_connection(volume, connector) self.assertEqual('iscsi', data['driver_volume_type']) # verify find_volume has been called and that is has been called twice mock_find_volume.assert_called_once_with(fake.VOLUME_ID, provider_id, False) mock_get_volume.assert_called_once_with(provider_id) props = self.ISCSI_PROPERTIES.copy() expected = {'data': props, 'driver_volume_type': 'iscsi'} self.assertEqual(expected, data, 'Unexpected return value') @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, 'find_iscsi_properties', return_value=None) def test_initialize_connection_no_iqn(self, mock_find_iscsi_properties, mock_map_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = {} mock_find_iscsi_properties.side_effect = Exception('abc') self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, 'find_iscsi_properties', return_value=None) def test_initialize_connection_no_server(self, mock_find_iscsi_properties, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = {} self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, 'find_iscsi_properties', return_value=None) def test_initialize_connection_vol_not_found(self, mock_find_iscsi_properties, mock_map_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'name': fake.VOLUME_ID} connector = {} self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'find_iscsi_properties', return_value=ISCSI_PROPERTIES) def test_initialize_connection_map_vol_fail(self, mock_find_iscsi_props, mock_map_volume, mock_find_volume, mock_create_server, mock_find_server, mock_close_connection, mock_open_connection, mock_init): # Test case where map_volume returns None (no mappings) volume = {'id': fake.VOLUME_ID} connector = self.connector self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPINGS[0]) @mock.patch.object(storagecenter_api.SCApi, 'find_iscsi_properties', return_value=ISCSI_PROPERTIES) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_is_live_vol') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'initialize_secondary') def test_initialize_connection_live_volume(self, mock_initialize_secondary, mock_is_live_vol, mock_find_iscsi_props, mock_map_volume, mock_get_volume, mock_find_volume, mock_create_server, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector mock_is_live_vol.return_value = True lvol_properties = {'access_mode': 'rw', 'target_discovered': False, 'target_iqn': u'iqn:1', 'target_iqns': [ u'iqn:1', u'iqn:2'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'192.168.1.21:3260', 'target_portals': [u'192.168.1.21:3260', u'192.168.1.22:3260']} mock_initialize_secondary.return_value = lvol_properties props = self.ISCSI_PROPERTIES.copy() props['target_iqns'] += lvol_properties['target_iqns'] props['target_luns'] += lvol_properties['target_luns'] props['target_portals'] += lvol_properties['target_portals'] ret = self.driver.initialize_connection(volume, connector) expected = {'data': props, 'driver_volume_type': 'iscsi'} self.assertEqual(expected, ret) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'get_volume') @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPINGS[0]) @mock.patch.object(storagecenter_api.SCApi, 'find_iscsi_properties') @mock.patch.object(storagecenter_api.SCApi, 'get_live_volume') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_is_live_vol') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'initialize_secondary') def test_initialize_connection_live_volume_afo(self, mock_initialize_secondary, mock_is_live_vol, mock_get_live_vol, mock_find_iscsi_props, mock_map_volume, mock_get_volume, mock_find_volume, mock_create_server, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'} scvol = {'instanceId': '102.101'} mock_find_volume.return_value = scvol mock_get_volume.return_value = scvol connector = self.connector sclivevol = {'instanceId': '101.10001', 'primaryVolume': {'instanceId': '101.101', 'instanceName': fake.VOLUME_ID}, 'primaryScSerialNumber': 101, 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102, 'secondaryRole': 'Activated'} mock_is_live_vol.return_value = True mock_get_live_vol.return_value = sclivevol props = { 'access_mode': 'rw', 'target_discovered': False, 'target_iqn': u'iqn:1', 'target_iqns': [u'iqn:1', u'iqn:2'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'192.168.1.21:3260', 'target_portals': [u'192.168.1.21:3260', u'192.168.1.22:3260'] } mock_find_iscsi_props.return_value = props ret = self.driver.initialize_connection(volume, connector) expected = {'data': props, 'driver_volume_type': 'iscsi'} expected['data']['discard'] = True self.assertEqual(expected, ret) self.assertFalse(mock_initialize_secondary.called) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_replication_specs', return_value={'enabled': True, 'live': True}) def test_is_live_vol(self, mock_get_replication_specs, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'provider_id': '101.1'} ret = self.driver._is_live_vol(volume) self.assertTrue(ret) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_replication_specs', return_value={'enabled': True, 'live': False}) def test_is_live_vol_repl_not_live(self, mock_get_replication_specs, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} ret = self.driver._is_live_vol(volume) self.assertFalse(ret) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_replication_specs', return_value={'enabled': False, 'live': False}) def test_is_live_vol_no_repl(self, mock_get_replication_specs, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} ret = self.driver._is_live_vol(volume) self.assertFalse(ret) def test_initialize_secondary(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock( return_value=self.VOLUME) mock_api.find_iscsi_properties = mock.MagicMock( return_value=self.ISCSI_PROPERTIES) mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') self.assertEqual(self.ISCSI_PROPERTIES, ret) def test_initialize_secondary_create_server(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=None) mock_api.create_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock( return_value=self.VOLUME) mock_api.find_iscsi_properties = mock.MagicMock( return_value=self.ISCSI_PROPERTIES) mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') self.assertEqual(self.ISCSI_PROPERTIES, ret) def test_initialize_secondary_no_server(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=None) mock_api.create_server = mock.MagicMock(return_value=None) expected = {'target_discovered': False, 'target_iqn': None, 'target_iqns': [], 'target_portal': None, 'target_portals': [], 'target_lun': None, 'target_luns': [], } ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') self.assertEqual(expected, ret) def test_initialize_secondary_map_fail(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock(return_value=None) expected = {'target_discovered': False, 'target_iqn': None, 'target_iqns': [], 'target_portal': None, 'target_portals': [], 'target_lun': None, 'target_luns': [], } ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') self.assertEqual(expected, ret) def test_initialize_secondary_vol_not_found(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.map_secondary_volume = mock.MagicMock( return_value=self.VOLUME) mock_api.get_volume = mock.MagicMock(return_value=None) expected = {'target_discovered': False, 'target_iqn': None, 'target_iqns': [], 'target_portal': None, 'target_portals': [], 'target_lun': None, 'target_luns': [], } ret = self.driver.initialize_secondary(mock_api, sclivevol, 'iqn') self.assertEqual(expected, ret) def test_terminate_secondary(self, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102} mock_api = mock.MagicMock() mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER) mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME) mock_api.unmap_volume = mock.MagicMock() self.driver.terminate_secondary(mock_api, sclivevol, 'iqn') mock_api.find_server.assert_called_once_with('iqn', 102) mock_api.get_volume.assert_called_once_with('102.101') mock_api.unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'unmap_all') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_is_live_vol') def test_force_detach(self, mock_is_live_vol, mock_unmap_all, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = False scvol = {'instandId': '12345.1'} mock_find_volume.return_value = scvol mock_unmap_all.return_value = True volume = {'id': fake.VOLUME_ID} res = self.driver.force_detach(volume) mock_unmap_all.assert_called_once_with(scvol) self.assertTrue(res) mock_unmap_all.assert_called_once_with(scvol) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'unmap_all') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_is_live_vol') def test_force_detach_fail(self, mock_is_live_vol, mock_unmap_all, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = False scvol = {'instandId': '12345.1'} mock_find_volume.return_value = scvol mock_unmap_all.return_value = False volume = {'id': fake.VOLUME_ID} res = self.driver.force_detach(volume) mock_unmap_all.assert_called_once_with(scvol) self.assertFalse(res) mock_unmap_all.assert_called_once_with(scvol) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'unmap_all') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_is_live_vol') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'terminate_secondary') @mock.patch.object(storagecenter_api.SCApi, 'get_live_volume') def test_force_detach_lv(self, mock_get_live_volume, mock_terminate_secondary, mock_is_live_vol, mock_unmap_all, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = True scvol = {'instandId': '12345.1'} mock_find_volume.return_value = scvol sclivevol = {'instandId': '12345.1.0'} mock_get_live_volume.return_value = sclivevol mock_terminate_secondary.return_value = True volume = {'id': fake.VOLUME_ID} mock_unmap_all.return_value = True res = self.driver.force_detach(volume) mock_unmap_all.assert_called_once_with(scvol) self.assertTrue(res) self.assertEqual(1, mock_terminate_secondary.call_count) mock_unmap_all.assert_called_once_with(scvol) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_is_live_vol') def test_force_detach_vol_not_found(self, mock_is_live_vol, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_is_live_vol.return_value = False mock_find_volume.return_value = None volume = {'id': fake.VOLUME_ID} res = self.driver.force_detach(volume) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) def test_terminate_connection(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = self.connector res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'force_detach') def test_terminate_connection_no_connector(self, mock_force_detach, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} self.driver.terminate_connection(volume, None) mock_force_detach.assert_called_once_with(volume) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_is_live_vol') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'terminate_secondary') @mock.patch.object(storagecenter_api.SCApi, 'get_live_volume') def test_terminate_connection_live_volume(self, mock_get_live_vol, mock_terminate_secondary, mock_is_live_vol, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102, 'secondaryRole': 'Secondary'} mock_is_live_vol.return_value = True mock_get_live_vol.return_value = sclivevol connector = self.connector res = self.driver.terminate_connection(volume, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) self.assertIsNone(res, 'None expected') self.assertTrue(mock_terminate_secondary.called) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_all', return_value=True) def test_terminate_connection_no_server(self, mock_unmap_all, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'} connector = {'initiator': ''} res = self.driver.terminate_connection(volume, connector) mock_find_volume.assert_called_once_with(fake.VOLUME_ID, '101.101', False) mock_unmap_all.assert_called_once_with(self.VOLUME) self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) def test_terminate_connection_no_volume(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = {'initiator': 'fake'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=False) def test_terminate_connection_failure(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} connector = {'initiator': 'fake'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, connector) @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) def test_terminate_connection_multiattached_host(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): connector = self.connector attachment1 = fake_volume.volume_attachment_ovo(self._context) attachment1.connector = connector attachment1.attached_host = connector['host'] attachment1.attach_status = 'attached' attachment2 = fake_volume.volume_attachment_ovo(self._context) attachment2.connector = connector attachment2.attached_host = connector['host'] attachment2.attach_status = 'attached' vol = fake_volume.fake_volume_obj(self._context) vol.multiattach = True vol.volume_attachment.objects.append(attachment1) vol.volume_attachment.objects.append(attachment2) self.driver.terminate_connection(vol, connector) mock_unmap_volume.assert_not_called() @mock.patch.object(storagecenter_api.SCApi, 'find_server', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) def test_terminate_connection_multiattached_diffhost(self, mock_unmap_volume, mock_find_volume, mock_find_server, mock_close_connection, mock_open_connection, mock_init): connector = self.connector attachment1 = fake_volume.volume_attachment_ovo(self._context) attachment1.connector = connector attachment1.attached_host = connector['host'] attachment1.attach_status = 'attached' attachment2 = fake_volume.volume_attachment_ovo(self._context) attachment2.connector = connector attachment2.attached_host = 'host2' attachment2.attach_status = 'attached' vol = fake_volume.fake_volume_obj(self._context) vol.multiattach = True vol.volume_attachment.objects.append(attachment1) vol.volume_attachment.objects.append(attachment2) self.driver.terminate_connection(vol, connector) mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER) def _simple_volume(self, **kwargs): updates = {'display_name': fake.VOLUME_NAME, 'id': fake.VOLUME_ID, 'provider_id': self.VOLUME[u'instanceId']} updates.update(kwargs) return fake_volume.fake_volume_obj(self._context, **updates) def _simple_snapshot(self, **kwargs): updates = {'id': fake.SNAPSHOT_ID, 'display_name': fake.SNAPSHOT_NAME, 'status': 'available', 'provider_location': None, 'volume_size': 1} updates.update(kwargs) snapshot = fake_snapshot.fake_snapshot_obj(self._context, **updates) volume = self._simple_volume() snapshot.volume = volume return snapshot @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'create_replay', return_value='fake') def test_create_snapshot(self, mock_create_replay, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): provider_id = self.VOLUME[u'instanceId'] snapshot = self._simple_snapshot() expected = {'status': 'available', 'provider_id': provider_id} ret = self.driver.create_snapshot(snapshot) self.assertEqual(expected, ret) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_replay', return_value=None) def test_create_snapshot_no_volume(self, mock_create_replay, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): snapshot = self._simple_snapshot() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snapshot) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'create_replay', return_value=None) def test_create_snapshot_failure(self, mock_create_replay, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): snapshot = self._simple_snapshot() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snapshot) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile') @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'find_replay', return_value='fake') @mock.patch.object(storagecenter_api.SCApi, 'create_view_volume', return_value=VOLUME) def test_create_volume_from_snapshot(self, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_replay_profile, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): model_update = {'something': 'something'} mock_create_replications.return_value = model_update volume = {'id': fake.VOLUME_ID, 'size': 1} snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, 'volume_size': 1} res = self.driver.create_volume_from_snapshot(volume, snapshot) mock_create_view_volume.assert_called_once_with( fake.VOLUME_ID, 'fake', None, None, None, None) self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_find_replay_profile.called) # This just makes sure that we created self.assertTrue(mock_create_replications.called) self.assertEqual(model_update, res) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile') @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'find_replay') @mock.patch.object(storagecenter_api.SCApi, 'create_view_volume') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_volume_extra_specs') def test_create_volume_from_snapshot_with_profiles( self, mock_get_volume_extra_specs, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_replay_profile, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): mock_get_volume_extra_specs.return_value = { 'storagetype:replayprofiles': 'replayprofiles', 'storagetype:volumeqos': 'volumeqos', 'storagetype:groupqos': 'groupqos', 'storagetype:datareductionprofile': 'drprofile'} mock_create_view_volume.return_value = self.VOLUME mock_find_replay.return_value = 'fake' mock_find_volume.return_value = self.VOLUME model_update = {'something': 'something'} mock_create_replications.return_value = model_update volume = {'id': fake.VOLUME_ID, 'size': 1} snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, 'volume_size': 1} res = self.driver.create_volume_from_snapshot(volume, snapshot) mock_create_view_volume.assert_called_once_with( fake.VOLUME_ID, 'fake', 'replayprofiles', 'volumeqos', 'groupqos', 'drprofile') self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_find_replay_profile.called) # This just makes sure that we created self.assertTrue(mock_create_replications.called) self.assertEqual(model_update, res) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile') @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'find_replay', return_value='fake') @mock.patch.object(storagecenter_api.SCApi, 'create_view_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'expand_volume', return_value=VOLUME) def test_create_volume_from_snapshot_expand(self, mock_expand_volume, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_find_replay_profile, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): model_update = {'something': 'something'} mock_create_replications.return_value = model_update volume = {'id': fake.VOLUME_ID, 'size': 2} snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, 'volume_size': 1} res = self.driver.create_volume_from_snapshot(volume, snapshot) mock_create_view_volume.assert_called_once_with( fake.VOLUME_ID, 'fake', None, None, None, None) self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_find_replay_profile.called) # This just makes sure that we created self.assertTrue(mock_create_replications.called) mock_expand_volume.assert_called_once_with(self.VOLUME, 2) self.assertEqual(model_update, res) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value='fake') @mock.patch.object(storagecenter_api.SCApi, 'update_cg_volumes') @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'find_replay', return_value='fake') @mock.patch.object(storagecenter_api.SCApi, 'create_view_volume', return_value=VOLUME) def test_create_volume_from_snapshot_cg(self, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_update_cg_volumes, mock_find_replay_profile, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): model_update = {'something': 'something'} mock_create_replications.return_value = model_update volume = {'id': fake.VOLUME_ID, 'group_id': fake.GROUP_ID, 'size': 1} snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, 'volume_size': 1} res = self.driver.create_volume_from_snapshot(volume, snapshot) mock_create_view_volume.assert_called_once_with( fake.VOLUME_ID, 'fake', None, None, None, None) self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_update_cg_volumes.called) # This just makes sure that we created self.assertTrue(mock_create_replications.called) self.assertEqual(model_update, res) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'find_replay', return_value='fake') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile') @mock.patch.object(storagecenter_api.SCApi, 'create_view_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') def test_create_volume_from_snapshot_failed(self, mock_delete_volume, mock_create_view_volume, mock_find_replay_profile, mock_find_replay, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume, snapshot) self.assertTrue(mock_find_replay.called) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_find_replay_profile.called) self.assertTrue(mock_delete_volume.called) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications') @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'find_replay', return_value='fake') @mock.patch.object(storagecenter_api.SCApi, 'create_view_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') def test_create_volume_from_snapshot_failed_replication( self, mock_delete_volume, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) volume = {'id': fake.VOLUME_ID, 'size': 1} snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID, 'volume_size': 1} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume, snapshot) self.assertTrue(mock_delete_volume.called) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'find_replay', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_view_volume', return_value=VOLUME) def test_create_volume_from_snapshot_no_replay(self, mock_create_view_volume, mock_find_replay, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} snapshot = {'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME2_ID} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volume, snapshot) self.assertTrue(mock_find_volume.called) self.assertTrue(mock_find_replay.called) self.assertFalse(mock_create_view_volume.called) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications', return_value={}) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume(self, mock_create_cloned_volume, mock_find_volume, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): provider_id = self.VOLUME[u'instanceId'] volume = {'id': fake.VOLUME_ID, 'size': 1} src_vref = {'id': fake.VOLUME2_ID, 'size': 1} ret = self.driver.create_cloned_volume(volume, src_vref) mock_create_cloned_volume.assert_called_once_with( fake.VOLUME_ID, self.VOLUME, None, None, None, None, None) self.assertTrue(mock_find_volume.called) self.assertEqual({'provider_id': provider_id}, ret) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications') @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'create_cloned_volume') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_volume_extra_specs') def test_create_cloned_volume_with_profiles( self, mock_get_volume_extra_specs, mock_create_cloned_volume, mock_find_volume, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): mock_get_volume_extra_specs.return_value = { 'storagetype:storageprofile': 'storageprofile', 'storagetype:replayprofiles': 'replayprofiles', 'storagetype:volumeqos': 'volumeqos', 'storagetype:groupqos': 'groupqos', 'storagetype:datareductionprofile': 'drprofile'} mock_find_volume.return_value = self.VOLUME mock_create_cloned_volume.return_value = self.VOLUME mock_create_replications.return_value = {} provider_id = self.VOLUME[u'instanceId'] volume = {'id': fake.VOLUME_ID, 'size': 1} src_vref = {'id': fake.VOLUME2_ID, 'size': 1} ret = self.driver.create_cloned_volume(volume, src_vref) mock_create_cloned_volume.assert_called_once_with( fake.VOLUME_ID, self.VOLUME, 'storageprofile', 'replayprofiles', 'volumeqos', 'groupqos', 'drprofile') self.assertTrue(mock_find_volume.called) self.assertEqual({'provider_id': provider_id}, ret) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications', return_value={}) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'create_cloned_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'expand_volume', return_value=VOLUME) def test_create_cloned_volume_expand(self, mock_expand_volume, mock_create_cloned_volume, mock_find_volume, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): provider_id = self.VOLUME[u'instanceId'] volume = {'id': fake.VOLUME_ID, 'size': 2} src_vref = {'id': fake.VOLUME2_ID, 'size': 1} ret = self.driver.create_cloned_volume(volume, src_vref) mock_create_cloned_volume.assert_called_once_with( fake.VOLUME_ID, self.VOLUME, None, None, None, None, None) self.assertTrue(mock_find_volume.called) self.assertEqual({'provider_id': provider_id}, ret) self.assertTrue(mock_expand_volume.called) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications', return_value={}) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'create_cloned_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') def test_create_cloned_volume_failed(self, mock_delete_volume, mock_create_cloned_volume, mock_find_volume, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} src_vref = {'id': fake.VOLUME2_ID} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_delete_volume.called) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications', return_value={}) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'create_cloned_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') @mock.patch.object(storagecenter_api.SCApi, 'expand_volume') def test_create_cloned_volume_expand_failed(self, mock_expand_volume, mock_delete_volume, mock_create_cloned_volume, mock_find_volume, mock_create_replications, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 2} src_vref = {'id': fake.VOLUME2_ID, 'size': 1} mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_delete_volume.called) @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications') @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume_replication_fail(self, mock_create_cloned_volume, mock_find_volume, mock_create_replications, mock_delete_volume, mock_close_connection, mock_open_connection, mock_init): mock_create_replications.side_effect = ( exception.VolumeBackendAPIException(data='abc')) volume = {'id': fake.VOLUME_ID, 'size': 1} src_vref = {'id': fake.VOLUME2_ID, 'size': 1} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_delete_volume.called) @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value='fake') @mock.patch.object(storagecenter_api.SCApi, 'update_cg_volumes') @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume_consistency_group(self, mock_create_cloned_volume, mock_find_volume, mock_update_cg_volumes, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'group_id': fake.CONSISTENCY_GROUP_ID, 'size': 1} src_vref = {'id': fake.VOLUME2_ID, 'size': 1} self.driver.create_cloned_volume(volume, src_vref) mock_create_cloned_volume.assert_called_once_with( fake.VOLUME_ID, self.VOLUME, None, None, None, None, None) self.assertTrue(mock_find_volume.called) self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_update_cg_volumes.called) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'create_cloned_volume', return_value=VOLUME) def test_create_cloned_volume_no_volume(self, mock_create_cloned_volume, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} src_vref = {'id': fake.VOLUME2_ID} self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, volume, src_vref) self.assertTrue(mock_find_volume.called) self.assertFalse(mock_create_cloned_volume.called) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'delete_replay', return_value=True) def test_delete_snapshot(self, mock_delete_replay, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': fake.VOLUME_ID, 'id': fake.SNAPSHOT_ID} self.driver.delete_snapshot(snapshot) mock_delete_replay.assert_called_once_with( self.VOLUME, fake.SNAPSHOT_ID) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'delete_replay', return_value=True) def test_delete_snapshot_no_volume(self, mock_delete_replay, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': fake.VOLUME_ID, 'id': fake.SNAPSHOT_ID} self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, snapshot) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) def test_ensure_export(self, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): context = {} volume = {'id': fake.VOLUME_ID, 'provider_id': 'fake'} self.driver.ensure_export(context, volume) mock_find_volume.assert_called_once_with(fake.VOLUME_ID, 'fake', False) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) def test_ensure_export_failed(self, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): context = {} volume = {'id': fake.VOLUME_ID} self.assertRaises(exception.VolumeBackendAPIException, self.driver.ensure_export, context, volume) mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, False) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) def test_ensure_export_no_volume(self, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): context = {} volume = {'id': fake.VOLUME_ID, 'provider_id': 'fake'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.ensure_export, context, volume) mock_find_volume.assert_called_once_with(fake.VOLUME_ID, 'fake', False) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'expand_volume', return_value=VOLUME) def test_extend_volume(self, mock_expand_volume, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1} new_size = 2 self.driver.extend_volume(volume, new_size) mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None) mock_expand_volume.assert_called_once_with(self.VOLUME, new_size) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'expand_volume', return_value=None) def test_extend_volume_no_volume(self, mock_expand_volume, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'provider_id': 'fake', 'size': 1} new_size = 2 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, volume, new_size) mock_find_volume.assert_called_once_with(fake.VOLUME_ID, 'fake') @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'expand_volume', return_value=None) def test_extend_volume_fail(self, mock_expand_volume, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'size': 1} new_size = 2 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, volume, new_size) mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None) mock_expand_volume.assert_called_once_with(self.VOLUME, new_size) @mock.patch.object(storagecenter_api.SCApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_update_volume_stats_with_refresh(self, mock_get_storage_usage, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertTrue(mock_get_storage_usage.called) @mock.patch.object(storagecenter_api.SCApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_update_volume_stats_with_refresh_and_repl( self, mock_get_storage_usage, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends repliation_enabled = self.driver.replication_enabled self.driver.backends = [{'a': 'a'}, {'b': 'b'}, {'c': 'c'}] self.driver.replication_enabled = True stats = self.driver.get_volume_stats(True) self.assertEqual(3, stats['replication_count']) self.assertEqual(['async', 'sync'], stats['replication_type']) self.assertTrue(stats['replication_enabled']) self.assertTrue(mock_get_storage_usage.called) self.driver.backends = backends self.driver.replication_enabled = repliation_enabled @mock.patch.object(storagecenter_api.SCApi, 'get_storage_usage', return_value={'availableSpace': 100, 'freeSpace': 50}) def test_get_volume_stats_no_refresh(self, mock_get_storage_usage, mock_close_connection, mock_open_connection, mock_init): stats = self.driver.get_volume_stats(False) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertFalse(mock_get_storage_usage.called) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'rename_volume', return_value=True) def test_update_migrated_volume(self, mock_rename_volume, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} backend_volume = {'id': fake.VOLUME2_ID} model_update = {'_name_id': None, 'provider_id': self.VOLUME['instanceId']} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') mock_rename_volume.assert_called_once_with(self.VOLUME, fake.VOLUME_ID) self.assertEqual(model_update, rt) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'rename_volume', return_value=False) def test_update_migrated_volume_rename_fail(self, mock_rename_volume, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} backend_volume = {'id': fake.VOLUME2_ID, '_name_id': fake.VOLUME2_NAME_ID} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') mock_rename_volume.assert_called_once_with(self.VOLUME, fake.VOLUME_ID) self.assertEqual({'_name_id': fake.VOLUME2_NAME_ID}, rt) def test_update_migrated_volume_no_volume_id(self, mock_close_connection, mock_open_connection, mock_init): volume = {'id': None} backend_volume = {'id': fake.VOLUME2_ID, '_name_id': fake.VOLUME2_NAME_ID} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') self.assertEqual({'_name_id': fake.VOLUME2_NAME_ID}, rt) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) def test_update_migrated_volume_no_backend_id(self, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} backend_volume = {'id': None, '_name_id': None} rt = self.driver.update_migrated_volume(None, volume, backend_volume, 'available') mock_find_volume.assert_called_once_with(None, None) self.assertEqual({'_name_id': None}, rt) @mock.patch.object(storagecenter_api.SCApi, 'create_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group(self, mock_is_cg, mock_create_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP_ID} model_update = self.driver.create_group(context, group) mock_create_replay_profile.assert_called_once_with(fake.GROUP_ID) self.assertEqual({'status': 'available'}, model_update) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_create_group_not_a_cg(self, mock_is_cg, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP_ID} self.assertRaises(NotImplementedError, self.driver.create_group, context, group) @mock.patch.object(storagecenter_api.SCApi, 'create_replay_profile', return_value=None) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_fail(self, mock_is_cg, mock_create_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP_ID} model_update = self.driver.create_group(context, group) mock_create_replay_profile.assert_called_once_with(fake.GROUP_ID) self.assertEqual({'status': 'error'}, model_update) @mock.patch.object(storagecenter_api.SCApi, 'delete_replay_profile') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'delete_volume') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group(self, mock_is_cg, mock_delete_volume, mock_find_replay_profile, mock_delete_replay_profile, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} expected_volumes = [{'id': fake.VOLUME_ID, 'status': 'deleted'}] context = {} group = {'id': fake.GROUP_ID, 'status': fields.ConsistencyGroupStatus.DELETED} model_update, volumes = self.driver.delete_group( context, group, [volume]) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) mock_delete_replay_profile.assert_called_once_with(self.SCRPLAYPROFILE) mock_delete_volume.assert_called_once_with(volume) self.assertEqual(group['status'], model_update['status']) self.assertEqual(expected_volumes, volumes) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_delete_group_not_a_cg( self, mock_is_cg, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} context = {} group = {'id': fake.GROUP_ID, 'status': fields.ConsistencyGroupStatus.DELETED} self.assertRaises(NotImplementedError, self.driver.delete_group, context, group, [volume]) @mock.patch.object(storagecenter_api.SCApi, 'delete_replay_profile') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=None) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'delete_volume') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_not_found(self, mock_is_cg, mock_delete_volume, mock_find_replay_profile, mock_delete_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP_ID, 'status': fields.ConsistencyGroupStatus.DELETED} model_update, volumes = self.driver.delete_group(context, group, []) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) self.assertFalse(mock_delete_replay_profile.called) self.assertFalse(mock_delete_volume.called) self.assertEqual(group['status'], model_update['status']) self.assertEqual([], volumes) @mock.patch.object(storagecenter_api.SCApi, 'update_cg_volumes', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_update_group(self, mock_is_cg, mock_find_replay_profile, mock_update_cg_volumes, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP_ID} add_volumes = [{'id': fake.VOLUME_ID}] remove_volumes = [{'id': fake.VOLUME2_ID}] rt1, rt2, rt3 = self.driver.update_group(context, group, add_volumes, remove_volumes) mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE, add_volumes, remove_volumes) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) self.assertIsNone(rt1) self.assertIsNone(rt2) self.assertIsNone(rt3) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_update_group_not_a_cg(self, mock_is_cg, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP_ID} add_volumes = [{'id': fake.VOLUME_ID}] remove_volumes = [{'id': fake.VOLUME2_ID}] self.assertRaises(NotImplementedError, self.driver.update_group, context, group, add_volumes, remove_volumes) @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=None) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_update_group_not_found(self, mock_is_cg, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP_ID} add_volumes = [{'id': fake.VOLUME_ID}] remove_volumes = [{'id': fake.VOLUME2_ID}] self.assertRaises(exception.VolumeBackendAPIException, self.driver.update_group, context, group, add_volumes, remove_volumes) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) @mock.patch.object(storagecenter_api.SCApi, 'update_cg_volumes', return_value=False) @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_update_group_error(self, mock_is_cg, mock_find_replay_profile, mock_update_cg_volumes, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP_ID} add_volumes = [{'id': fake.VOLUME_ID}] remove_volumes = [{'id': fake.VOLUME2_ID}] self.assertRaises(exception.VolumeBackendAPIException, self.driver.update_group, context, group, add_volumes, remove_volumes) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) mock_update_cg_volumes.assert_called_once_with(self.SCRPLAYPROFILE, add_volumes, remove_volumes) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'update_group') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'create_group') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'create_cloned_volume') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_from_src( self, mock_is_cg, mock_create_cloned_volume, mock_create_group, mock_update_group, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP2_ID} volumes = [{'id': fake.VOLUME3_ID}, {'id': fake.VOLUME4_ID}] source_group = {'id': fake.GROUP_ID} source_volumes = [{'id': fake.VOLUME_ID}, {'id': fake.VOLUME2_ID}] # create_cloned_volume returns the sc specific provider_id. mock_create_cloned_volume.side_effect = [{'provider_id': '12345.1'}, {'provider_id': '12345.2'}] mock_create_group.return_value = {'status': 'available'} model_update, volumes_model_update = self.driver.create_group_from_src( context, group, volumes, group_snapshot=None, snapshots=None, source_group=source_group, source_vols=source_volumes) expected = [{'id': fake.VOLUME3_ID, 'provider_id': '12345.1', 'status': 'available'}, {'id': fake.VOLUME4_ID, 'provider_id': '12345.2', 'status': 'available'}] self.assertEqual({'status': 'available'}, model_update) self.assertEqual(expected, volumes_model_update) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'update_group') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'create_group') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'create_volume_from_snapshot') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_from_src_from_snapshot( self, mock_is_cg, mock_create_volume_from_snapshot, mock_create_group, mock_update_group, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP_ID} volumes = [{'id': fake.VOLUME_ID}, {'id': fake.VOLUME2_ID}] group_snapshot = {'id': fake.GROUP_SNAPSHOT_ID} source_snapshots = [{'id': fake.SNAPSHOT_ID}, {'id': fake.SNAPSHOT2_ID}] # create_volume_from_snapshot returns the sc specific provider_id. mock_create_volume_from_snapshot.side_effect = [ {'provider_id': '12345.1'}, {'provider_id': '12345.2'}] mock_create_group.return_value = {'status': 'available'} model_update, volumes_model_update = self.driver.create_group_from_src( context, group, volumes, group_snapshot=group_snapshot, snapshots=source_snapshots, source_group=None, source_vols=None) expected = [{'id': fake.VOLUME_ID, 'provider_id': '12345.1', 'status': 'available'}, {'id': fake.VOLUME2_ID, 'provider_id': '12345.2', 'status': 'available'}] self.assertEqual({'status': 'available'}, model_update) self.assertEqual(expected, volumes_model_update) def test_create_group_from_src_bad_input( self, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP2_ID} volumes = [{'id': fake.VOLUME3_ID}, {'id': fake.VOLUME4_ID}] self.assertRaises(exception.InvalidInput, self.driver.create_group_from_src, context, group, volumes, None, None, None, None) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_create_group_from_src_not_a_cg( self, mock_is_cg, mock_close_connection, mock_open_connection, mock_init): context = {} group = {'id': fake.GROUP2_ID} volumes = [{'id': fake.VOLUME3_ID}, {'id': fake.VOLUME4_ID}] source_group = {'id': fake.GROUP_ID} source_volumes = [{'id': fake.VOLUME_ID}, {'id': fake.VOLUME2_ID}] self.assertRaises(NotImplementedError, self.driver.create_group_from_src, context, group, volumes, None, None, source_group, source_volumes) @mock.patch.object(storagecenter_api.SCApi, 'snap_cg_replay', return_value={'instanceId': '100'}) @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_snapshot(self, mock_is_cg, mock_find_replay_profile, mock_snap_cg_replay, mock_close_connection, mock_open_connection, mock_init): mock_snapshot = mock.MagicMock() mock_snapshot.id = fake.SNAPSHOT_ID expected_snapshots = [{'id': fake.SNAPSHOT_ID, 'status': 'available'}] context = {} cggrp = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID} model_update, snapshots = self.driver.create_group_snapshot( context, cggrp, [mock_snapshot]) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, fake.GROUP_SNAPSHOT_ID, 0) self.assertEqual('available', model_update['status']) self.assertEqual(expected_snapshots, snapshots) @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=None) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_snapshot_profile_not_found(self, mock_is_cg, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): context = {} cggrp = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID} model_update, snapshot_updates = self.driver.create_group_snapshot( context, cggrp, []) self.assertEqual({'status': 'error'}, model_update) self.assertIsNone(snapshot_updates) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_create_group_snapshot_not_a_cg( self, mock_is_cg, mock_close_connection, mock_open_connection, mock_init): context = {} cggrp = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID} self.assertRaises(NotImplementedError, self.driver.create_group_snapshot, context, cggrp, []) @mock.patch.object(storagecenter_api.SCApi, 'snap_cg_replay', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_snapshot_fail(self, mock_is_cg, mock_find_replay_profile, mock_snap_cg_replay, mock_close_connection, mock_open_connection, mock_init): context = {} cggrp = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID} model_update, snapshot_updates = self.driver.create_group_snapshot( context, cggrp, []) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) mock_snap_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, fake.GROUP_SNAPSHOT_ID, 0) self.assertEqual({'status': 'error'}, model_update) self.assertIsNone(snapshot_updates) @mock.patch.object(storagecenter_api.SCApi, 'delete_cg_replay', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_snapshot(self, mock_is_cg, mock_find_replay_profile, mock_delete_cg_replay, mock_close_connection, mock_open_connection, mock_init): mock_snapshot = {'id': fake.SNAPSHOT_ID, 'status': 'available'} context = {} cgsnap = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID, 'status': 'deleted'} model_update, snapshots = self.driver.delete_group_snapshot( context, cgsnap, [mock_snapshot]) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, fake.GROUP_SNAPSHOT_ID) self.assertEqual({'status': cgsnap['status']}, model_update) self.assertEqual([{'id': fake.SNAPSHOT_ID, 'status': 'deleted'}], snapshots) @mock.patch.object(storagecenter_api.SCApi, 'delete_cg_replay') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=None) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_snapshot_profile_not_found(self, mock_is_cg, mock_find_replay_profile, mock_delete_cg_replay, mock_close_connection, mock_open_connection, mock_init): snapshot = {'id': fake.SNAPSHOT_ID, 'status': 'available'} context = {} cgsnap = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID, 'status': 'available'} model_update, snapshots = self.driver.delete_group_snapshot( context, cgsnap, [snapshot]) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) self.assertFalse(mock_delete_cg_replay.called) self.assertEqual({'status': 'error'}, model_update) self.assertIsNone(snapshots) @mock.patch.object(storagecenter_api.SCApi, 'delete_cg_replay', return_value=False) @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=SCRPLAYPROFILE) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_snapshot_profile_failed_delete( self, mock_is_cg, mock_find_replay_profile, mock_delete_cg_replay, mock_close_connection, mock_open_connection, mock_init): context = {} cgsnap = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID, 'status': 'available'} model_update, snapshot_updates = self.driver.delete_group_snapshot( context, cgsnap, []) self.assertEqual({'status': 'error_deleting'}, model_update) mock_find_replay_profile.assert_called_once_with(fake.GROUP_ID) mock_delete_cg_replay.assert_called_once_with(self.SCRPLAYPROFILE, fake.GROUP_SNAPSHOT_ID) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_delete_group_snapshot_not_a_cg( self, mock_is_cg, mock_close_connection, mock_open_connection, mock_init): context = {} cgsnap = {'group_id': fake.GROUP_ID, 'id': fake.GROUP_SNAPSHOT_ID, 'status': 'available'} self.assertRaises(NotImplementedError, self.driver.delete_group_snapshot, context, cgsnap, []) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value={'id': 'guid'}) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'manage_existing') def test_manage_existing(self, mock_manage_existing, mock_create_replications, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): # Very little to do in this one. The call is sent # straight down. volume = {'id': fake.VOLUME_ID} existing_ref = {'source-name': 'imavolumename'} self.driver.manage_existing(volume, existing_ref) mock_manage_existing.assert_called_once_with(fake.VOLUME_ID, existing_ref) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value={'id': 'guid'}) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'manage_existing') def test_manage_existing_id(self, mock_manage_existing, mock_create_replications, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): # Very little to do in this one. The call is sent # straight down. volume = {'id': fake.VOLUME_ID} existing_ref = {'source-id': 'imadeviceid'} self.driver.manage_existing(volume, existing_ref) mock_manage_existing.assert_called_once_with(fake.VOLUME_ID, existing_ref) def test_manage_existing_bad_ref(self, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} existing_ref = {'banana-name': 'imavolumename'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, existing_ref) @mock.patch.object(storagecenter_api.SCApi, 'get_unmanaged_volume_size', return_value=4) def test_manage_existing_get_size(self, mock_get_unmanaged_volume_size, mock_close_connection, mock_open_connection, mock_init): # Almost nothing to test here. Just that we call our function. volume = {'id': fake.VOLUME_ID} existing_ref = {'source-name': 'imavolumename'} res = self.driver.manage_existing_get_size(volume, existing_ref) mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref) # The above is 4GB and change. self.assertEqual(4, res) @mock.patch.object(storagecenter_api.SCApi, 'get_unmanaged_volume_size', return_value=4) def test_manage_existing_get_size_id(self, mock_get_unmanaged_volume_size, mock_close_connection, mock_open_connection, mock_init): # Almost nothing to test here. Just that we call our function. volume = {'id': fake.VOLUME_ID} existing_ref = {'source-id': 'imadeviceid'} res = self.driver.manage_existing_get_size(volume, existing_ref) mock_get_unmanaged_volume_size.assert_called_once_with(existing_ref) # The above is 4GB and change. self.assertEqual(4, res) def test_manage_existing_get_size_bad_ref(self, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} existing_ref = {'banana-name': 'imavolumename'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, existing_ref) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'update_storage_profile') @mock.patch.object(storagecenter_api.SCApi, 'update_replay_profiles') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications') @mock.patch.object(storagecenter_api.SCApi, 'update_replicate_active_replay') def test_retype_not_our_extra_specs(self, mock_update_replicate_active_replay, mock_create_replications, mock_update_replay_profile, mock_update_storage_profile, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': fake.VOLUME_ID}, None, {'extra_specs': None}, None) self.assertTrue(res) self.assertFalse(mock_update_replicate_active_replay.called) self.assertFalse(mock_create_replications.called) self.assertFalse(mock_update_replay_profile.called) self.assertFalse(mock_update_storage_profile.called) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'update_replay_profiles') def test_retype_replay_profiles(self, mock_update_replay_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): mock_update_replay_profiles.side_effect = [True, False] # Normal successful run. res = self.driver.retype( None, {'id': fake.VOLUME_ID}, None, {'extra_specs': {'storagetype:replayprofiles': ['A', 'B']}}, None) mock_update_replay_profiles.assert_called_once_with(self.VOLUME, 'B') self.assertTrue(res) # Run fails. Make sure this returns False. res = self.driver.retype( None, {'id': fake.VOLUME_ID}, None, {'extra_specs': {'storagetype:replayprofiles': ['B', 'A']}}, None) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_create_replications', return_value={'replication_status': 'enabled', 'replication_driver_data': '54321'}) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_delete_replications') def test_retype_create_replications(self, mock_delete_replications, mock_create_replications, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': fake.VOLUME_ID}, {'extra_specs': {'replication_enabled': [None, ' True']}}, {'extra_specs': {'replication_enabled': [None, ' True']}}, None) self.assertTrue(mock_create_replications.called) self.assertFalse(mock_delete_replications.called) self.assertEqual((True, {'replication_status': 'enabled', 'replication_driver_data': '54321'}), res) res = self.driver.retype( None, {'id': fake.VOLUME_ID}, None, {'extra_specs': {'replication_enabled': [' True', None]}}, None) self.assertTrue(mock_delete_replications.called) self.assertEqual((True, {'replication_status': 'disabled', 'replication_driver_data': ''}), res) @mock.patch.object(storagecenter_api.SCApi, 'update_replicate_active_replay') @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) def test_retype_active_replay(self, mock_find_volume, mock_update_replicate_active_replay, mock_close_connection, mock_open_connection, mock_init): # Success, Success, Not called and fail. mock_update_replicate_active_replay.side_effect = [True, True, False] res = self.driver.retype( None, {'id': fake.VOLUME_ID}, None, {'extra_specs': {'replication:activereplay': ['', ' True']}}, None) self.assertTrue(res) res = self.driver.retype( None, {'id': fake.VOLUME_ID}, None, {'extra_specs': {'replication:activereplay': [' True', '']}}, None) self.assertTrue(res) res = self.driver.retype( None, {'id': fake.VOLUME_ID}, None, {'extra_specs': {'replication:activereplay': ['', '']}}, None) self.assertTrue(res) res = self.driver.retype( None, {'id': fake.VOLUME_ID}, None, {'extra_specs': {'replication:activereplay': ['', ' True']}}, None) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) def test_retype_same(self, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': fake.VOLUME_ID}, None, {'extra_specs': {'storagetype:storageprofile': ['A', 'A']}}, None) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmanage') def test_unmanage(self, mock_unmanage, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'provider_id': '11111.1'} self.driver.unmanage(volume) mock_find_volume.assert_called_once_with(fake.VOLUME_ID, '11111.1') mock_unmanage.assert_called_once_with(self.VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'unmanage') def test_unmanage_volume_not_found(self, mock_unmanage, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID, 'provider_id': '11111.1'} self.driver.unmanage(volume) mock_find_volume.assert_called_once_with(fake.VOLUME_ID, '11111.1') self.assertFalse(mock_unmanage.called) @mock.patch.object(storagecenter_api.SCApi, 'update_storage_profile') @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) def test_retype(self, mock_find_volume, mock_update_storage_profile, mock_close_connection, mock_open_connection, mock_init): res = self.driver.retype( None, {'id': fake.VOLUME_ID}, None, {'extra_specs': {'storagetype:storageprofile': ['A', 'B']}}, None) mock_update_storage_profile.assert_called_once_with( self.VOLUME, 'B') self.assertTrue(res) def test__parse_secondary(self, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'qosnode': 'cinderqos'}] mock_api = mock.MagicMock() # Good run. Secondary in replication_driver_data and backend. sc up. destssn = self.driver._parse_secondary(mock_api, '67890') self.assertEqual(67890, destssn) # Bad run. Secondary not in backend. destssn = self.driver._parse_secondary(mock_api, '99999') self.assertIsNone(destssn) # Good run. destssn = self.driver._parse_secondary(mock_api, '12345') self.assertEqual(12345, destssn) self.driver.backends = backends @mock.patch.object(storagecenter_api.SCApi, 'find_sc') def test__parse_secondary_sc_down(self, mock_find_sc, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'qosnode': 'cinderqos'}] mock_api = mock.MagicMock() # Bad run. Good selection. SC down. mock_api.find_sc = mock.MagicMock( side_effect=exception.VolumeBackendAPIException(data='1234')) destssn = self.driver._parse_secondary(mock_api, '12345') self.assertIsNone(destssn) self.driver.backends = backends def test__failover_live_volume(self, mock_close_connection, mock_open_connection, mock_init): mock_api = mock.MagicMock() sclivevol = {'instanceId': '101.100', 'primaryVolume': {'instanceId': '101.101', 'instanceName': fake.VOLUME2_ID}, 'secondaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 102, 'secondaryRole': 'Secondary'} postfail = {'instanceId': '101.100', 'primaryVolume': {'instanceId': '102.101', 'instanceName': fake.VOLUME_ID}, 'secondaryVolume': {'instanceId': '101.101', 'instanceName': fake.VOLUME2_ID}, 'secondaryScSerialNumber': 102, 'secondaryRole': 'Secondary'} mock_api.get_live_volume = mock.MagicMock() mock_api.get_live_volume.side_effect = [sclivevol, postfail, sclivevol, sclivevol] # Good run. mock_api.is_swapped = mock.MagicMock(return_value=False) mock_api.swap_roles_live_volume = mock.MagicMock(return_value=True) model_update = {'provider_id': '102.101', 'replication_status': 'failed-over'} ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID, '101.101') self.assertEqual(model_update, ret) # Swap fail mock_api.swap_roles_live_volume.return_value = False model_update = {'status': 'error'} ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID, '101.101') self.assertEqual(model_update, ret) # Can't find live volume. mock_api.get_live_volume.return_value = None ret = self.driver._failover_live_volume(mock_api, fake.VOLUME_ID, '101.101') self.assertEqual(model_update, ret) def test__failover_replication(self, mock_close_connection, mock_open_connection, mock_init): rvol = {'instanceId': '102.101'} mock_api = mock.MagicMock() mock_api.break_replication = mock.MagicMock(return_value=rvol) # Good run. model_update = {'replication_status': 'failed-over', 'provider_id': '102.101'} ret = self.driver._failover_replication(mock_api, fake.VOLUME_ID, '101.100', 102) self.assertEqual(model_update, ret) # break fail mock_api.break_replication.return_value = None model_update = {'status': 'error'} ret = self.driver._failover_replication(mock_api, fake.VOLUME_ID, '101.100', 102) self.assertEqual(model_update, ret) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_failover_replication') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_parse_secondary') @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'remove_mappings') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'failback_volumes') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_replication_specs') def test_failover_host(self, mock_get_replication_specs, mock_failback_volumes, mock_remove_mappings, mock_find_volume, mock_parse_secondary, mock_failover_replication, mock_close_connection, mock_open_connection, mock_init): mock_get_replication_specs.return_value = {'enabled': False, 'live': False} self.driver.replication_enabled = False self.driver.failed_over = False volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345', 'provider_id': '1.1'}, {'id': fake.VOLUME2_ID, 'replication_driver_data': '12345', 'provider_id': '1.2'}] # No run. Not doing repl. Should raise. self.assertRaises(exception.VolumeBackendAPIException, self.driver.failover_host, {}, volumes, '12345') # Good run self.driver.replication_enabled = True mock_get_replication_specs.return_value = {'enabled': True, 'live': False} mock_parse_secondary.return_value = 12345 expected_destssn = 12345 mock_failover_replication.side_effect = [ {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 1 {'provider_id': '2.2', 'replication_status': 'failed-over'}, {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 2 {'provider_id': '2.1', 'replication_status': 'failed-over'}] # 3 expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': {'replication_status': 'failed-over', 'provider_id': '2.1'}}, {'volume_id': fake.VOLUME2_ID, 'updates': {'replication_status': 'failed-over', 'provider_id': '2.2'}}] destssn, volume_update, __ = self.driver.failover_host( {}, volumes, '12345', []) self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Good run. Not all volumes replicated. volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345'}, {'id': fake.VOLUME2_ID, 'replication_driver_data': ''}] expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': {'replication_status': 'failed-over', 'provider_id': '2.1'}}, {'volume_id': fake.VOLUME2_ID, 'updates': {'status': 'error'}}] self.driver.failed_over = False self.driver.active_backend_id = None destssn, volume_update, __ = self.driver.failover_host( {}, volumes, '12345', []) self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Good run. Not all volumes replicated. No replication_driver_data. volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345'}, {'id': fake.VOLUME2_ID}] expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': {'replication_status': 'failed-over', 'provider_id': '2.1'}}, {'volume_id': fake.VOLUME2_ID, 'updates': {'status': 'error'}}] self.driver.failed_over = False self.driver.active_backend_id = None destssn, volume_update, __ = self.driver.failover_host( {}, volumes, '12345', []) self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Good run. No volumes replicated. No replication_driver_data. volumes = [{'id': fake.VOLUME_ID}, {'id': fake.VOLUME2_ID}] expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': {'status': 'error'}}, {'volume_id': fake.VOLUME2_ID, 'updates': {'status': 'error'}}] self.driver.failed_over = False self.driver.active_backend_id = None destssn, volume_update, __ = self.driver.failover_host( {}, volumes, '12345', []) self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Secondary not found. mock_parse_secondary.return_value = None self.driver.failed_over = False self.driver.active_backend_id = None self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_host, {}, volumes, '54321', []) # Already failed over. self.driver.failed_over = True self.driver.failover_host({}, volumes, 'default') mock_failback_volumes.assert_called_once_with(volumes) # Already failed over. self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_host, {}, volumes, '67890', []) self.driver.replication_enabled = False @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_failover_live_volume') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_parse_secondary') @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'remove_mappings') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, 'failback_volumes') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_replication_specs') def test_failover_host_live_volume(self, mock_get_replication_specs, mock_failback_volumes, mock_remove_mappings, mock_find_volume, mock_parse_secondary, mock_failover_live_volume, mock_close_connection, mock_open_connection, mock_init): mock_get_replication_specs.return_value = {'enabled': False, 'live': False} self.driver.replication_enabled = False self.driver.failed_over = False volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345', 'provider_id': '1.1'}, {'id': fake.VOLUME2_ID, 'replication_driver_data': '12345', 'provider_id': '1.2'}] # No run. Not doing repl. Should raise. self.assertRaises(exception.VolumeBackendAPIException, self.driver.failover_host, {}, volumes, '12345') # Good run self.driver.replication_enabled = True mock_get_replication_specs.return_value = {'enabled': True, 'live': True} mock_parse_secondary.return_value = 12345 expected_destssn = 12345 mock_failover_live_volume.side_effect = [ {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 1 {'provider_id': '2.2', 'replication_status': 'failed-over'}, {'provider_id': '2.1', 'replication_status': 'failed-over'}, # 2 {'provider_id': '2.1', 'replication_status': 'failed-over'}] # 3 expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': {'replication_status': 'failed-over', 'provider_id': '2.1'}}, {'volume_id': fake.VOLUME2_ID, 'updates': {'replication_status': 'failed-over', 'provider_id': '2.2'}}] destssn, volume_update, __ = self.driver.failover_host( {}, volumes, '12345', []) self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Good run. Not all volumes replicated. volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345'}, {'id': fake.VOLUME2_ID, 'replication_driver_data': ''}] expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': {'replication_status': 'failed-over', 'provider_id': '2.1'}}, {'volume_id': fake.VOLUME2_ID, 'updates': {'status': 'error'}}] self.driver.failed_over = False self.driver.active_backend_id = None destssn, volume_update, __ = self.driver.failover_host( {}, volumes, '12345', []) self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Good run. Not all volumes replicated. No replication_driver_data. volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345'}, {'id': fake.VOLUME2_ID}] expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': {'replication_status': 'failed-over', 'provider_id': '2.1'}}, {'volume_id': fake.VOLUME2_ID, 'updates': {'status': 'error'}}] self.driver.failed_over = False self.driver.active_backend_id = None destssn, volume_update, __ = self.driver.failover_host( {}, volumes, '12345', []) self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Good run. No volumes replicated. No replication_driver_data. volumes = [{'id': fake.VOLUME_ID}, {'id': fake.VOLUME2_ID}] expected_volume_update = [{'volume_id': fake.VOLUME_ID, 'updates': {'status': 'error'}}, {'volume_id': fake.VOLUME2_ID, 'updates': {'status': 'error'}}] self.driver.failed_over = False self.driver.active_backend_id = None destssn, volume_update, __ = self.driver.failover_host( {}, volumes, '12345', []) self.assertEqual(expected_destssn, destssn) self.assertEqual(expected_volume_update, volume_update) # Secondary not found. mock_parse_secondary.return_value = None self.driver.failed_over = False self.driver.active_backend_id = None self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_host, {}, volumes, '54321', []) # Already failed over. self.driver.failed_over = True self.driver.failover_host({}, volumes, 'default') mock_failback_volumes.assert_called_once_with(volumes) self.driver.replication_enabled = False def test__get_unmanaged_replay(self, mock_close_connection, mock_open_connection, mock_init): mock_api = mock.MagicMock() existing_ref = None self.assertRaises(exception.ManageExistingInvalidReference, self.driver._get_unmanaged_replay, mock_api, fake.VOLUME_ID, '11111.1', existing_ref) existing_ref = {'source-id': 'Not a source-name'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver._get_unmanaged_replay, mock_api, fake.VOLUME_ID, '11111.1', existing_ref) existing_ref = {'source-name': 'name'} mock_api.find_volume = mock.MagicMock(return_value=None) self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_unmanaged_replay, mock_api, fake.VOLUME_ID, '11111.1', existing_ref) mock_api.find_volume.return_value = {'instanceId': '11111.1'} mock_api.find_replay = mock.MagicMock(return_value=None) self.assertRaises(exception.ManageExistingInvalidReference, self.driver._get_unmanaged_replay, mock_api, fake.VOLUME_ID, '11111.1', existing_ref) mock_api.find_replay.return_value = {'instanceId': '11111.101'} ret = self.driver._get_unmanaged_replay(mock_api, fake.VOLUME_ID, '11111.1', existing_ref) self.assertEqual({'instanceId': '11111.101'}, ret) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_unmanaged_replay') @mock.patch.object(storagecenter_api.SCApi, 'manage_replay') def test_manage_existing_snapshot(self, mock_manage_replay, mock_get_unmanaged_replay, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': fake.VOLUME_ID, 'id': fake.SNAPSHOT_ID} existing_ref = {'source-name': 'name'} screplay = {'description': 'name', 'createVolume': {'instanceId': '1'}} expected = {'provider_id': '1'} mock_get_unmanaged_replay.return_value = screplay mock_manage_replay.return_value = True ret = self.driver.manage_existing_snapshot(snapshot, existing_ref) self.assertEqual(expected, ret) self.assertEqual(1, mock_get_unmanaged_replay.call_count) mock_manage_replay.assert_called_once_with(screplay, fake.SNAPSHOT_ID) mock_manage_replay.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_snapshot, snapshot, existing_ref) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_unmanaged_replay') def test_manage_existing_snapshot_get_size(self, mock_get_unmanaged_replay, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': fake.VOLUME_ID, 'id': fake.SNAPSHOT_ID} existing_ref = {'source-name'} # Good size. mock_get_unmanaged_replay.return_value = {'size': '1.073741824E9 Bytes'} ret = self.driver.manage_existing_snapshot_get_size(snapshot, existing_ref) self.assertEqual(1, ret) # Not on 1GB boundries. mock_get_unmanaged_replay.return_value = {'size': '2.073741824E9 Bytes'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_snapshot_get_size, snapshot, existing_ref) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'find_replay') @mock.patch.object(storagecenter_api.SCApi, 'unmanage_replay') def test_unmanage_snapshot(self, mock_unmanage_replay, mock_find_replay, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): snapshot = {'volume_id': fake.VOLUME_ID, 'id': fake.SNAPSHOT_ID} mock_find_volume.return_value = None self.assertRaises(exception.VolumeBackendAPIException, self.driver.unmanage_snapshot, snapshot) mock_find_volume.return_value = {'name': fake.VOLUME_ID} mock_find_replay.return_value = None self.assertRaises(exception.VolumeBackendAPIException, self.driver.unmanage_snapshot, snapshot) screplay = {'description': fake.SNAPSHOT_ID} mock_find_replay.return_value = screplay self.driver.unmanage_snapshot(snapshot) mock_unmanage_replay.assert_called_once_with(screplay) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_qos', return_value='cinderqos') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_parse_extraspecs', return_value={'replay_profile_string': 'pro'}) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'find_repl_volume') @mock.patch.object(storagecenter_api.SCApi, 'delete_replication') @mock.patch.object(storagecenter_api.SCApi, 'replicate_to_common') @mock.patch.object(storagecenter_api.SCApi, 'remove_mappings') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_wait_for_replication') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_reattach_remaining_replications') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_fixup_types') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_volume_updates', return_value=[]) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_update_backend') def test_failback_volumes(self, mock_update_backend, mock_volume_updates, mock_fixup_types, mock_reattach_remaining_replications, mock_wait_for_replication, mock_remove_mappings, mock_replicate_to_common, mock_delete_replication, mock_find_repl_volume, mock_find_volume, mock_parse_extraspecs, mock_get_qos, mock_close_connection, mock_open_connection, mock_init): self.driver.replication_enabled = True self.driver.failed_over = True self.driver.active_backend_id = 12345 self.driver.primaryssn = 11111 backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'qosnode': 'cinderqos'}] volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345', 'provider_id': '12345.1'}, {'id': fake.VOLUME2_ID, 'replication_driver_data': '12345', 'provider_id': '12345.2'}] mock_find_volume.side_effect = [{'instanceId': '12345.1'}, {'instanceId': '12345.2'}] mock_find_repl_volume.side_effect = [{'instanceId': '11111.1'}, {'instanceId': '11111.2'}] mock_replicate_to_common.side_effect = [{'instanceId': '12345.100', 'destinationVolume': {'instanceId': '11111.3'} }, {'instanceId': '12345.200', 'destinationVolume': {'instanceId': '11111.4'} }] # we don't care about the return. We just want to make sure that # _wait_for_replication is called with the proper replitems. self.driver.failback_volumes(volumes) expected = [{'volume': volumes[0], 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345', 'status': 'inprogress'}, {'volume': volumes[1], 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345', 'status': 'inprogress'} ] # We are stubbing everything out so we just want to be sure this hits # _volume_updates as expected. (Ordinarily this would be modified by # the time it hit this but since it isn't we use this to our advantage # and check that our replitems was set correctly coming out of the # main loop.) mock_volume_updates.assert_called_once_with(expected) self.driver.replication_enabled = False self.driver.failed_over = False self.driver.backends = backends @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_qos', return_value='cinderqos') @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_update_backend') @mock.patch.object(storagecenter_api.SCApi, 'get_live_volume') @mock.patch.object(storagecenter_api.SCApi, 'swap_roles_live_volume') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_replication_specs') def test_failback_volumes_live_vol(self, mock_get_replication_specs, mock_swap_roles_live_volume, mock_get_live_volume, mock_update_backend, mock_find_volume, mock_get_qos, mock_close_connection, mock_open_connection, mock_init): self.driver.replication_enabled = True self.driver.failed_over = True self.driver.active_backend_id = 12345 self.driver.primaryssn = 11111 backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos', 'remoteqos': 'remoteqos'}] volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345', 'provider_id': '12345.1'}, {'id': fake.VOLUME2_ID, 'replication_driver_data': '12345', 'provider_id': '12345.2'}] mock_get_live_volume.side_effect = [ {'instanceId': '11111.101', 'secondaryVolume': {'instanceId': '11111.1001', 'instanceName': fake.VOLUME_ID}, 'secondaryScSerialNumber': 11111}, {'instanceId': '11111.102', 'secondaryVolume': {'instanceId': '11111.1002', 'instanceName': fake.VOLUME2_ID}, 'secondaryScSerialNumber': 11111} ] mock_get_replication_specs.return_value = {'enabled': True, 'live': True} mock_swap_roles_live_volume.side_effect = [True, True] mock_find_volume.side_effect = [{'instanceId': '12345.1'}, {'instanceId': '12345.2'}] # we don't care about the return. We just want to make sure that # _wait_for_replication is called with the proper replitems. ret = self.driver.failback_volumes(volumes) expected = [{'updates': {'provider_id': '11111.1001', 'replication_status': 'enabled', 'status': 'available'}, 'volume_id': fake.VOLUME_ID}, {'updates': {'provider_id': '11111.1002', 'replication_status': 'enabled', 'status': 'available'}, 'volume_id': fake.VOLUME2_ID}] self.assertEqual(expected, ret) self.driver.replication_enabled = False self.driver.failed_over = False self.driver.backends = backends @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_qos', return_value='cinderqos') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_parse_extraspecs', return_value={'replay_profile_string': 'pro'}) @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'find_repl_volume') @mock.patch.object(storagecenter_api.SCApi, 'delete_replication') @mock.patch.object(storagecenter_api.SCApi, 'replicate_to_common') @mock.patch.object(storagecenter_api.SCApi, 'remove_mappings') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_wait_for_replication') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_reattach_remaining_replications') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_fixup_types') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_volume_updates', return_value=[]) @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_update_backend') def test_failback_volumes_with_some_not_replicated( self, mock_update_backend, mock_volume_updates, mock_fixup_types, mock_reattach_remaining_replications, mock_wait_for_replication, mock_remove_mappings, mock_replicate_to_common, mock_delete_replication, mock_find_repl_volume, mock_find_volume, mock_parse_extraspecs, mock_get_qos, mock_close_connection, mock_open_connection, mock_init): self.driver.replication_enabled = True self.driver.failed_over = True self.driver.active_backend_id = 12345 self.driver.primaryssn = 11111 backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'qosnode': 'cinderqos'}] volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345', 'provider_id': '12345.1'}, {'id': fake.VOLUME2_ID, 'replication_driver_data': '12345', 'provider_id': '12345.2'}, {'id': fake.VOLUME3_ID, 'provider_id': '11111.10'}] mock_find_volume.side_effect = [{'instanceId': '12345.1'}, {'instanceId': '12345.2'}] mock_find_repl_volume.side_effect = [{'instanceId': '11111.1'}, {'instanceId': '11111.2'}] mock_replicate_to_common.side_effect = [{'instanceId': '12345.100', 'destinationVolume': {'instanceId': '11111.3'} }, {'instanceId': '12345.200', 'destinationVolume': {'instanceId': '11111.4'} }] expected = [{'volume': volumes[0], 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345', 'status': 'inprogress'}, {'volume': volumes[1], 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345', 'status': 'inprogress'} ] ret = self.driver.failback_volumes(volumes) mock_volume_updates.assert_called_once_with(expected) # make sure ret is right. In this case just the unreplicated volume # as our volume updates elsewhere return nothing. expected_updates = [{'volume_id': fake.VOLUME3_ID, 'updates': {'status': 'available'}}] self.assertEqual(expected_updates, ret) self.driver.replication_enabled = False self.driver.failed_over = False self.driver.backends = backends @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_qos', return_value='cinderqos') @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_update_backend') def test_failback_volumes_with_none_replicated( self, mock_update_backend, mock_get_qos, mock_close_connection, mock_open_connection, mock_init): self.driver.replication_enabled = True self.driver.failed_over = True self.driver.active_backend_id = 12345 self.driver.primaryssn = 11111 backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'qosnode': 'cinderqos'}] volumes = [{'id': fake.VOLUME_ID, 'provider_id': '11111.1'}, {'id': fake.VOLUME2_ID, 'provider_id': '11111.2'}, {'id': fake.VOLUME3_ID, 'provider_id': '11111.10'}] ret = self.driver.failback_volumes(volumes) # make sure ret is right. In this case just the unreplicated volume # as our volume updates elsewhere return nothing. expected_updates = [{'volume_id': fake.VOLUME_ID, 'updates': {'status': 'available'}}, {'volume_id': fake.VOLUME2_ID, 'updates': {'status': 'available'}}, {'volume_id': fake.VOLUME3_ID, 'updates': {'status': 'available'}}] self.assertEqual(expected_updates, ret) self.driver.replication_enabled = False self.driver.failed_over = False self.driver.backends = backends def test_volume_updates(self, mock_close_connection, mock_open_connection, mock_init): items = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345,67890', 'status': 'available'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345,67890', 'status': 'available'} ] ret = self.driver._volume_updates(items) expected = [{'volume_id': fake.VOLUME_ID, 'updates': {'status': 'available', 'replication_status': 'enabled', 'provider_id': '11111.3', 'replication_driver_data': '12345,67890'}}, {'volume_id': fake.VOLUME2_ID, 'updates': {'status': 'available', 'replication_status': 'enabled', 'provider_id': '11111.4', 'replication_driver_data': '12345,67890'}} ] self.assertEqual(expected, ret) items.append({'volume': {'id': fake.VOLUME3_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.300', 'cvol': '12345.5', 'ovol': '11111.5', 'nvol': '11111.6', 'rdd': '12345', 'status': 'error'}) ret = self.driver._volume_updates(items) expected.append({'volume_id': fake.VOLUME3_ID, 'updates': {'status': 'error', 'replication_status': 'error', 'provider_id': '11111.6', 'replication_driver_data': '12345'}}) self.assertEqual(expected, ret) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) def test_fixup_types(self, mock_get_volume, mock_close_connection, mock_open_connection, mock_init): items = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345,67890', 'status': 'reattached'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345,67890', 'status': 'reattached'} ] mock_api = mock.Mock() mock_api.update_replay_profiles.return_value = True self.driver._fixup_types(mock_api, items) expected = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345,67890', 'status': 'available'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345,67890', 'status': 'available'}] self.assertEqual(expected, items) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) def test_fixup_types_with_error(self, mock_get_volume, mock_close_connection, mock_open_connection, mock_init): items = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345,67890', 'status': 'reattached'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345,67890', 'status': 'reattached'} ] # One good one fail. mock_api = mock.Mock() mock_api.update_replay_profiles.side_effect = [True, False] self.driver._fixup_types(mock_api, items) expected = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345,67890', 'status': 'available'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345,67890', 'status': 'error'}] self.assertEqual(expected, items) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) def test_fixup_types_with_previous_error(self, mock_get_volume, mock_close_connection, mock_open_connection, mock_init): items = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345,67890', 'status': 'reattached'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345,67890', 'status': 'error'} ] mock_api = mock.Mock() mock_api.update_replay_profiles.return_value = True self.driver._fixup_types(mock_api, items) expected = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345,67890', 'status': 'available'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replay_profile_string': 'pro'}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345,67890', 'status': 'error'}] self.assertEqual(expected, items) def test_reattach_remaining_replications(self, mock_close_connection, mock_open_connection, mock_init): self.driver.replication_enabled = True self.driver.failed_over = True self.driver.active_backend_id = 12345 self.driver.primaryssn = 11111 backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'qosnode': 'cinderqos'}] items = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replicationtype': 'Synchronous', 'activereplay': False}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345', 'status': 'synced'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replicationtype': 'Asynchronous', 'activereplay': True}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345', 'status': 'synced'} ] mock_api = mock.Mock() mock_api.ssn = self.driver.active_backend_id mock_api.get_volume.return_value = self.VOLUME mock_api.find_repl_volume.return_value = self.VOLUME mock_api.start_replication.side_effect = [{'instanceId': '11111.1001'}, {'instanceId': '11111.1002'}, None, {'instanceId': '11111.1001'}] self.driver._reattach_remaining_replications(mock_api, items) expected = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replicationtype': 'Synchronous', 'activereplay': False}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345,67890', 'status': 'reattached'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replicationtype': 'Asynchronous', 'activereplay': True}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345,67890', 'status': 'reattached'}] self.assertEqual(expected, items) mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME, 'Synchronous', 'cinderqos', False) mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME, 'Asynchronous', 'cinderqos', True) items = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replicationtype': 'Synchronous', 'activereplay': False}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345', 'status': 'synced'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replicationtype': 'Asynchronous', 'activereplay': True}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345', 'status': 'synced'} ] self.driver._reattach_remaining_replications(mock_api, items) expected = [{'volume': {'id': fake.VOLUME_ID}, 'specs': {'replicationtype': 'Synchronous', 'activereplay': False}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345', 'status': 'error'}, {'volume': {'id': fake.VOLUME2_ID}, 'specs': {'replicationtype': 'Asynchronous', 'activereplay': True}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345,67890', 'status': 'reattached'}] self.assertEqual(expected, items) mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME, 'Synchronous', 'cinderqos', False) mock_api.start_replication.assert_any_call(self.VOLUME, self.VOLUME, 'Asynchronous', 'cinderqos', True) self.driver.backends = backends def _setup_items(self): self.driver.replication_enabled = True self.driver.failed_over = True self.driver.active_backend_id = 12345 self.driver.primaryssn = 11111 backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos'}, {'target_device_id': '67890', 'qosnode': 'cinderqos'}] volumes = [{'id': fake.VOLUME_ID, 'replication_driver_data': '12345', 'provider_id': '12345.1'}, {'id': fake.VOLUME2_ID, 'replication_driver_data': '12345', 'provider_id': '12345.2'}] items = [{'volume': volumes[0], 'specs': {'replay_profile_string': 'pro', 'replicationtype': 'Asynchronous', 'activereplay': True}, 'qosnode': 'cinderqos', 'screpl': '12345.100', 'cvol': '12345.1', 'ovol': '11111.1', 'nvol': '11111.3', 'rdd': '12345', 'status': 'inprogress'}, {'volume': volumes[1], 'specs': {'replay_profile_string': 'pro', 'replicationtype': 'Asynchronous', 'activereplay': True}, 'qosnode': 'cinderqos', 'screpl': '12345.200', 'cvol': '12345.2', 'ovol': '11111.2', 'nvol': '11111.4', 'rdd': '12345', 'status': 'inprogress'} ] return items, backends def test_wait_for_replication(self, mock_close_connection, mock_open_connection, mock_init): items, backends = self._setup_items() expected = [] for item in items: expected.append(dict(item)) expected[0]['status'] = 'synced' expected[1]['status'] = 'synced' mock_api = mock.Mock() mock_api.flip_replication.return_value = True mock_api.get_volume.return_value = self.VOLUME mock_api.replication_progress.return_value = (True, 0) mock_api.rename_volume.return_value = True self.driver._wait_for_replication(mock_api, items) self.assertEqual(expected, items) self.backends = backends def test_wait_for_replication_flip_flops(self, mock_close_connection, mock_open_connection, mock_init): items, backends = self._setup_items() expected = [] for item in items: expected.append(dict(item)) expected[0]['status'] = 'synced' expected[1]['status'] = 'error' mock_api = mock.Mock() mock_api.flip_replication.side_effect = [True, False] mock_api.get_volume.return_value = self.VOLUME mock_api.replication_progress.return_value = (True, 0) mock_api.rename_volume.return_value = True self.driver._wait_for_replication(mock_api, items) self.assertEqual(expected, items) self.backends = backends def test_wait_for_replication_flip_no_vol(self, mock_close_connection, mock_open_connection, mock_init): items, backends = self._setup_items() expected = [] for item in items: expected.append(dict(item)) expected[0]['status'] = 'synced' expected[1]['status'] = 'error' mock_api = mock.Mock() mock_api.flip_replication.return_value = True mock_api.get_volume.side_effect = [self.VOLUME, self.VOLUME, self.VOLUME, self.VOLUME, None] mock_api.replication_progress.return_value = (True, 0) mock_api.rename_volume.return_value = True self.driver._wait_for_replication(mock_api, items) self.assertEqual(expected, items) self.backends = backends def test_wait_for_replication_cant_find_orig(self, mock_close_connection, mock_open_connection, mock_init): items, backends = self._setup_items() expected = [] for item in items: expected.append(dict(item)) expected[0]['status'] = 'synced' expected[1]['status'] = 'synced' mock_api = mock.Mock() mock_api.flip_replication.return_value = True mock_api.get_volume.side_effect = [self.VOLUME, self.VOLUME, None, self.VOLUME, self.VOLUME, None] mock_api.replication_progress.return_value = (True, 0) mock_api.rename_volume.return_value = True self.driver._wait_for_replication(mock_api, items) self.assertEqual(expected, items) self.backends = backends def test_wait_for_replication_rename_fail(self, mock_close_connection, mock_open_connection, mock_init): items, backends = self._setup_items() expected = [] for item in items: expected.append(dict(item)) expected[0]['status'] = 'synced' expected[1]['status'] = 'synced' mock_api = mock.Mock() mock_api.flip_replication.return_value = True mock_api.get_volume.return_value = self.VOLUME mock_api.replication_progress.return_value = (True, 0) mock_api.rename_volume.return_value = True self.driver._wait_for_replication(mock_api, items) self.assertEqual(expected, items) self.backends = backends def test_wait_for_replication_timeout(self, mock_close_connection, mock_open_connection, mock_init): items, backends = self._setup_items() expected = [] for item in items: expected.append(dict(item)) expected[0]['status'] = 'error' expected[1]['status'] = 'error' self.assertNotEqual(items, expected) mock_api = mock.Mock() mock_api.get_volume.side_effect = [self.VOLUME, self.VOLUME, self.VOLUME, self.VOLUME, None] mock_api.replication_progress.return_value = (False, 500) self.driver.failback_timeout = 1 self.driver._wait_for_replication(mock_api, items) self.assertEqual(expected, items) calls = [mock.call(1)] * 5 self.mock_sleep.assert_has_calls(calls) self.backends = backends @mock.patch.object(storagecenter_iscsi.SCISCSIDriver, '_get_volume_extra_specs') def test_parse_extraspecs(self, mock_get_volume_extra_specs, mock_close_connection, mock_open_connection, mock_init): volume = {'id': fake.VOLUME_ID} mock_get_volume_extra_specs.return_value = {} ret = self.driver._parse_extraspecs(volume) expected = {'replicationtype': 'Asynchronous', 'activereplay': False, 'storage_profile': None, 'replay_profile_string': None} self.assertEqual(expected, ret) def test_get_qos(self, mock_close_connection, mock_open_connection, mock_init): backends = self.driver.backends self.driver.backends = [{'target_device_id': '12345', 'qosnode': 'cinderqos1'}, {'target_device_id': '67890', 'qosnode': 'cinderqos2'}] ret = self.driver._get_qos(12345) self.assertEqual('cinderqos1', ret) ret = self.driver._get_qos(67890) self.assertEqual('cinderqos2', ret) ret = self.driver._get_qos(11111) self.assertIsNone(ret) self.driver.backends[0] = {'target_device_id': '12345'} ret = self.driver._get_qos(12345) self.assertEqual('cinderqos', ret) self.driver.backends = backends def test_thaw_backend(self, mock_close_connection, mock_open_connection, mock_init): self.driver.failed_over = False ret = self.driver.thaw_backend(self._context) self.assertTrue(ret) def test_thaw_backend_failed_over(self, mock_close_connection, mock_open_connection, mock_init): self.driver.failed_over = True self.assertRaises(exception.Invalid, self.driver.thaw_backend, self._context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/sc/test_scapi.py0000664000175000017500000155125700000000000026342 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock import uuid import ddt import eventlet import requests from requests import models from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume.drivers.dell_emc.sc import storagecenter_api # We patch these here as they are used by every test to keep # from trying to contact a Dell Storage Center. @ddt.ddt @mock.patch.object(storagecenter_api.SCApi, '__init__', return_value=None) @mock.patch.object(storagecenter_api.SCApi, 'open_connection') @mock.patch.object(storagecenter_api.SCApi, 'close_connection') class DellSCSanAPITestCase(test.TestCase): """DellSCSanAPITestCase Class to test the Storage Center API using Mock. """ SC = {u'IPv6ManagementIPPrefix': 128, u'connectionError': u'', u'instanceId': u'64702', u'scSerialNumber': 64702, u'dataProgressionRunning': False, u'hostOrIpAddress': u'192.168.0.80', u'userConnected': True, u'portsBalanced': True, u'managementIp': u'192.168.0.80', u'version': u'6.5.1.269', u'location': u'', u'objectType': u'StorageCenter', u'instanceName': u'Storage Center 64702', u'statusMessage': u'', u'status': u'Up', u'flashOptimizedConfigured': False, u'connected': True, u'operationMode': u'Normal', u'userName': u'Admin', u'nonFlashOptimizedConfigured': True, u'name': u'Storage Center 64702', u'scName': u'Storage Center 64702', u'notes': u'', u'serialNumber': 64702, u'raidRebalanceRunning': False, u'userPasswordExpired': False, u'contact': u'', u'IPv6ManagementIP': u'::'} VOLUME = {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False} VOLUME_LIST = [{u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False}] # Volume list that contains multiple volumes VOLUME_LIST_MULTI_VOLS = [ {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False}, {u'instanceId': u'64702.3495', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3495, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da9', u'active': True, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False}] VOLUME_CONFIG = \ {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'maximumSiblingCount': 100, u'writeCacheStatus': u'Up', u'objectType': u'ScVolumeConfiguration', u'currentSiblingConfiguredSize': u'2.147483648E9 Bytes', u'compressionPaused': False, u'enforceConsumptionLimit': False, u'volumeSpaceConsumptionLimit': u'2.147483648E9 Bytes', u'readCacheEnabled': True, u'writeCacheEnabled': True, u'instanceName': u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b', u'dateModified': u'04/03/2015 12:01:08 AM', u'modifyUser': u'Admin', u'replayExpirationPaused': False, u'currentSiblingCount': 1, u'replayCreationPaused': False, u'replayProfileList': [{u'instanceId': u'64702.2', u'instanceName': u'Daily', u'objectType': u'ScReplayProfile'}], u'dateCreated': u'04/04/2014 03:54:26 AM', u'volume': {u'instanceId': u'64702.3494', u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'objectType': u'ScVolume'}, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'coalesceIntoActive': False, u'createUser': u'Admin', u'importToLowestTier': False, u'readCacheStatus': u'Up', u'maximumSiblingConfiguredSpace': u'5.49755813888E14 Bytes', u'storageProfile': {u'instanceId': u'64702.1', u'instanceName': u'Recommended', u'objectType': u'ScStorageProfile'}, u'scName': u'Storage Center 64702', u'notes': u'', u'diskFolder': {u'instanceId': u'64702.3', u'instanceName': u'Assigned', u'objectType': u'ScDiskFolder'}, u'openVmsUniqueDiskId': 48, u'compressionEnabled': False} INACTIVE_VOLUME = \ {u'instanceId': u'64702.3494', u'scSerialNumber': 64702, u'replicationSource': False, u'liveVolume': False, u'vpdId': 3496, u'objectType': u'ScVolume', u'index': 3494, u'volumeFolderPath': u'devstackvol/fcvm/', u'hostCacheEnabled': False, u'usedByLegacyFluidFsNasVolume': False, u'inRecycleBin': False, u'volumeFolderIndex': 17, u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'statusMessage': u'', u'status': u'Up', u'storageType': {u'instanceId': u'64702.1', u'instanceName': u'Assigned - Redundant - 2 MB', u'objectType': u'ScStorageType'}, u'cmmDestination': False, u'replicationDestination': False, u'volumeFolder': {u'instanceId': u'64702.17', u'instanceName': u'fcvm', u'objectType': u'ScVolumeFolder'}, u'deviceId': u'6000d31000fcbe000000000000000da8', u'active': False, u'portableVolumeDestination': False, u'deleteAllowed': True, u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea', u'scName': u'Storage Center 64702', u'secureDataUsed': False, u'serialNumber': u'0000fcbe-00000da8', u'replayAllowed': True, u'flashOptimized': False, u'configuredSize': u'1.073741824E9 Bytes', u'mapped': False, u'cmmSource': False} SCSERVER = {u'scName': u'Storage Center 64702', u'volumeCount': 0, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 4, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'Server_21000024ff30441d', u'instanceId': u'64702.47', u'serverFolderPath': u'devstacksrv/', u'portType': [u'FibreChannel'], u'type': u'Physical', u'statusMessage': u'Only 5 of 6 expected paths are up', u'status': u'Degraded', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.4', u'instanceName': u'devstacksrv', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Partial', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 5, u'name': u'Server_21000024ff30441d', u'hbaPresent': True, u'hbaCount': 2, u'notes': u'Created by Dell EMC Cinder Driver', u'mapped': False, u'operatingSystem': {u'instanceId': u'64702.38', u'instanceName': u'Red Hat Linux 6.x', u'objectType': u'ScServerOperatingSystem'} } # ScServer where deletedAllowed=False (not allowed to be deleted) SCSERVER_NO_DEL = {u'scName': u'Storage Center 64702', u'volumeCount': 0, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 4, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'Server_21000024ff30441d', u'instanceId': u'64702.47', u'serverFolderPath': u'devstacksrv/', u'portType': [u'FibreChannel'], u'type': u'Physical', u'statusMessage': u'Only 5 of 6 expected paths are up', u'status': u'Degraded', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.4', u'instanceName': u'devstacksrv', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Partial', u'hostCacheIndex': 0, u'deleteAllowed': False, u'pathCount': 5, u'name': u'Server_21000024ff30441d', u'hbaPresent': True, u'hbaCount': 2, u'notes': u'Created by Dell EMC Cinder Driver', u'mapped': False, u'operatingSystem': {u'instanceId': u'64702.38', u'instanceName': u'Red Hat Linux 6.x', u'objectType': u'ScServerOperatingSystem'} } SCSERVERS = [{u'scName': u'Storage Center 64702', u'volumeCount': 5, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 0, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'openstack4', u'instanceId': u'64702.1', u'serverFolderPath': u'', u'portType': [u'Iscsi'], u'type': u'Physical', u'statusMessage': u'', u'status': u'Up', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.0', u'instanceName': u'Servers', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Up', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 0, u'name': u'openstack4', u'hbaPresent': True, u'hbaCount': 1, u'notes': u'', u'mapped': True, u'operatingSystem': {u'instanceId': u'64702.3', u'instanceName': u'Other Multipath', u'objectType': u'ScServerOperatingSystem'}}, {u'scName': u'Storage Center 64702', u'volumeCount': 1, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 0, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'openstack5', u'instanceId': u'64702.2', u'serverFolderPath': u'', u'portType': [u'Iscsi'], u'type': u'Physical', u'statusMessage': u'', u'status': u'Up', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.0', u'instanceName': u'Servers', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Up', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 0, u'name': u'openstack5', u'hbaPresent': True, u'hbaCount': 1, u'notes': u'', u'mapped': True, u'operatingSystem': {u'instanceId': u'64702.2', u'instanceName': u'Other Singlepath', u'objectType': u'ScServerOperatingSystem'}}] # ScServers list where status = Down SCSERVERS_DOWN = \ [{u'scName': u'Storage Center 64702', u'volumeCount': 5, u'removeHbasAllowed': True, u'legacyFluidFs': False, u'serverFolderIndex': 0, u'alertOnConnectivity': True, u'objectType': u'ScPhysicalServer', u'instanceName': u'openstack4', u'instanceId': u'64702.1', u'serverFolderPath': u'', u'portType': [u'Iscsi'], u'type': u'Physical', u'statusMessage': u'', u'status': u'Down', u'scSerialNumber': 64702, u'serverFolder': {u'instanceId': u'64702.0', u'instanceName': u'Servers', u'objectType': u'ScServerFolder'}, u'parentIndex': 0, u'connectivity': u'Up', u'hostCacheIndex': 0, u'deleteAllowed': True, u'pathCount': 0, u'name': u'openstack4', u'hbaPresent': True, u'hbaCount': 1, u'notes': u'', u'mapped': True, u'operatingSystem': {u'instanceId': u'64702.3', u'instanceName': u'Other Multipath', u'objectType': u'ScServerOperatingSystem'}}] MAP_PROFILE = {u'instanceId': u'64702.2941', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'lunUsed': [1], u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'connectivity': u'Up', u'readOnly': False, u'objectType': u'ScMappingProfile', u'hostCache': False, u'mappedVia': u'Server', u'mapCount': 3, u'instanceName': u'6025-47', u'lunRequested': u'N/A'} MAP_PROFILES = [MAP_PROFILE] MAPPINGS = [{u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}] # Multiple mappings to test find_iscsi_properties with multiple portals MAPPINGS_MULTI_PORTAL = \ [{u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}, {u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}] MAPPINGS_READ_ONLY = \ [{u'profile': {u'instanceId': u'64702.104', u'instanceName': u'92-30', u'objectType': u'ScMappingProfile'}, u'status': u'Down', u'statusMessage': u'', u'instanceId': u'64702.969.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.30', u'instanceName': u'Server_iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.92', u'instanceName': u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf', u'objectType': u'ScVolume'}, u'readOnly': True, u'lun': 1, u'lunUsed': [1], u'serverHba': {u'instanceId': u'64702.3454975614', u'instanceName': u'iqn.1993-08.org.debian:01:3776df826e4f', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64702.31.8', u'instanceName': u'iqn.1993-08.org.debian:' '01:3776df826e4f-5000D31000FCBE43', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736131.91', u'instanceName': u'5000D31000FCBE43', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-969', u'transport': u'Iscsi', u'objectType': u'ScMapping'}] FC_MAPPINGS = [{u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7639.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'serverHba': {u'instanceId': u'64702.3282218607', u'instanceName': u'21000024ff30441c', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.27.73', u'instanceName': u'21000024ff30441c-5000d31000fcbe36', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736118.50', u'instanceName': u'5000d31000fcbe36', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7639', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}, {u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7640.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'serverHba': {u'instanceId': u'64702.3282218606', u'instanceName': u'21000024ff30441d', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.27.78', u'instanceName': u'21000024ff30441d-5000d31000fcbe36', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736118.50', u'instanceName': u'5000d31000fcbe36', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7640', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}, {u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7638.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'serverHba': {u'instanceId': u'64702.3282218606', u'instanceName': u'21000024ff30441d', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.28.76', u'instanceName': u'21000024ff30441d-5000D31000FCBE3E', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736126.60', u'instanceName': u'5000D31000FCBE3E', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7638', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}] FC_MAPPINGS_LUN_MISMATCH = \ [{u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7639.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'serverHba': {u'instanceId': u'64702.3282218607', u'instanceName': u'21000024ff30441c', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.27.73', u'instanceName': u'21000024ff30441c-5000d31000fcbe36', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736118.50', u'instanceName': u'5000d31000fcbe36', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7639', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}, {u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7640.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 1, u'serverHba': {u'instanceId': u'64702.3282218606', u'instanceName': u'21000024ff30441d', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.27.78', u'instanceName': u'21000024ff30441d-5000d31000fcbe36', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736118.50', u'instanceName': u'5000d31000fcbe36', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7640', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}, {u'profile': {u'instanceId': u'64702.2941', u'instanceName': u'6025-47', u'objectType': u'ScMappingProfile'}, u'status': u'Up', u'statusMessage': u'', u'instanceId': u'64702.7638.64702', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'volume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'readOnly': False, u'lun': 2, u'serverHba': {u'instanceId': u'64702.3282218606', u'instanceName': u'21000024ff30441d', u'objectType': u'ScServerHba'}, u'path': {u'instanceId': u'64702.64702.64703.28.76', u'instanceName': u'21000024ff30441d-5000D31000FCBE3E', u'objectType': u'ScServerHbaPath'}, u'controllerPort': {u'instanceId': u'64702.5764839588723736126.60', u'instanceName': u'5000D31000FCBE3E', u'objectType': u'ScControllerPort'}, u'instanceName': u'64702-7638', u'transport': u'FibreChannel', u'objectType': u'ScMapping'}] RPLAY = {u'scSerialNumber': 64702, u'globalIndex': u'64702-46-250', u'description': u'Cinder Clone Replay', u'parent': {u'instanceId': u'64702.46.249', u'instanceName': u'64702-46-249', u'objectType': u'ScReplay'}, u'instanceId': u'64702.46.250', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'12/09/2014 03:52:08 PM', u'createVolume': {u'instanceId': u'64702.46', u'instanceName': u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b', u'objectType': u'ScVolume'}, u'expireTime': u'12/09/2014 04:52:08 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7910, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'12/09/2014 03:52:08 PM', u'size': u'0.0 Bytes' } RPLAYS = [{u'scSerialNumber': 64702, u'globalIndex': u'64702-6025-5', u'description': u'Manually Created', u'parent': {u'instanceId': u'64702.6025.4', u'instanceName': u'64702-6025-4', u'objectType': u'ScReplay'}, u'instanceId': u'64702.6025.5', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'02/02/2015 08:23:55 PM', u'createVolume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'expireTime': u'02/02/2015 09:23:55 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7889, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'02/02/2015 08:23:55 PM', u'size': u'0.0 Bytes'}, {u'scSerialNumber': 64702, u'globalIndex': u'64702-6025-4', u'description': u'Cinder Test Replay012345678910', u'parent': {u'instanceId': u'64702.6025.3', u'instanceName': u'64702-6025-3', u'objectType': u'ScReplay'}, u'instanceId': u'64702.6025.4', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'02/02/2015 08:23:47 PM', u'createVolume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'expireTime': u'02/02/2015 09:23:47 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7869, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'02/02/2015 08:23:47 PM', u'size': u'0.0 Bytes'}] TST_RPLAY = {u'scSerialNumber': 64702, u'globalIndex': u'64702-6025-4', u'description': u'Cinder Test Replay012345678910', u'parent': {u'instanceId': u'64702.6025.3', u'instanceName': u'64702-6025-3', u'objectType': u'ScReplay'}, u'instanceId': u'64702.6025.4', u'scName': u'Storage Center 64702', u'consistent': False, u'expires': True, u'freezeTime': u'02/02/2015 08:23:47 PM', u'createVolume': {u'instanceId': u'64702.6025', u'instanceName': u'Server_21000024ff30441d Test Vol', u'objectType': u'ScVolume'}, u'expireTime': u'02/02/2015 09:23:47 PM', u'source': u'Manual', u'spaceRecovery': False, u'writesHeldDuration': 7869, u'active': False, u'markedForExpiration': False, u'objectType': u'ScReplay', u'instanceName': u'02/02/2015 08:23:47 PM', u'size': u'0.0 Bytes'} FLDR = {u'status': u'Up', u'instanceName': u'opnstktst', u'name': u'opnstktst', u'parent': {u'instanceId': u'64702.0', u'instanceName': u'Volumes', u'objectType': u'ScVolumeFolder'}, u'instanceId': u'64702.43', u'scName': u'Storage Center 64702', u'notes': u'Folder for OpenStack Cinder Driver', u'scSerialNumber': 64702, u'parentIndex': 0, u'okToDelete': True, u'folderPath': u'', u'root': False, u'statusMessage': u'', u'objectType': u'ScVolumeFolder'} SVR_FLDR = {u'status': u'Up', u'instanceName': u'devstacksrv', u'name': u'devstacksrv', u'parent': {u'instanceId': u'64702.0', u'instanceName': u'Servers', u'objectType': u'ScServerFolder'}, u'instanceId': u'64702.4', u'scName': u'Storage Center 64702', u'notes': u'Folder for OpenStack Cinder Driver', u'scSerialNumber': 64702, u'parentIndex': 0, u'okToDelete': False, u'folderPath': u'', u'root': False, u'statusMessage': u'', u'objectType': u'ScServerFolder'} ISCSI_HBA = {u'portWwnList': [], u'iscsiIpAddress': u'0.0.0.0', u'pathCount': 1, u'name': u'iqn.1993-08.org.debian:01:52332b70525', u'connectivity': u'Down', u'instanceId': u'64702.3786433166', u'scName': u'Storage Center 64702', u'notes': u'', u'scSerialNumber': 64702, u'server': {u'instanceId': u'64702.38', u'instanceName': u'Server_iqn.1993-08.org.debian:01:52332b70525', u'objectType': u'ScPhysicalServer'}, u'remoteStorageCenter': False, u'iscsiName': u'', u'portType': u'Iscsi', u'instanceName': u'iqn.1993-08.org.debian:01:52332b70525', u'objectType': u'ScServerHba'} FC_HBAS = [{u'portWwnList': [], u'iscsiIpAddress': u'0.0.0.0', u'pathCount': 2, u'name': u'21000024ff30441c', u'connectivity': u'Up', u'instanceId': u'64702.3282218607', u'scName': u'Storage Center 64702', u'notes': u'', u'scSerialNumber': 64702, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'remoteStorageCenter': False, u'iscsiName': u'', u'portType': u'FibreChannel', u'instanceName': u'21000024ff30441c', u'objectType': u'ScServerHba'}, {u'portWwnList': [], u'iscsiIpAddress': u'0.0.0.0', u'pathCount': 3, u'name': u'21000024ff30441d', u'connectivity': u'Partial', u'instanceId': u'64702.3282218606', u'scName': u'Storage Center 64702', u'notes': u'', u'scSerialNumber': 64702, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'remoteStorageCenter': False, u'iscsiName': u'', u'portType': u'FibreChannel', u'instanceName': u'21000024ff30441d', u'objectType': u'ScServerHba'}] FC_HBA = {u'portWwnList': [], u'iscsiIpAddress': u'0.0.0.0', u'pathCount': 3, u'name': u'21000024ff30441d', u'connectivity': u'Partial', u'instanceId': u'64702.3282218606', u'scName': u'Storage Center 64702', u'notes': u'', u'scSerialNumber': 64702, u'server': {u'instanceId': u'64702.47', u'instanceName': u'Server_21000024ff30441d', u'objectType': u'ScPhysicalServer'}, u'remoteStorageCenter': False, u'iscsiName': u'', u'portType': u'FibreChannel', u'instanceName': u'21000024ff30441d', u'objectType': u'ScServerHba'} SVR_OS_S = [{u'allowsLunGaps': True, u'product': u'Red Hat Linux', u'supportsActiveMappingDeletion': True, u'version': u'6.x', u'requiresLunZero': False, u'scName': u'Storage Center 64702', u'virtualMachineGuest': True, u'virtualMachineHost': False, u'allowsCrossTransportMapping': False, u'objectType': u'ScServerOperatingSystem', u'instanceId': u'64702.38', u'lunCanVaryAcrossPaths': False, u'scSerialNumber': 64702, u'maximumVolumeSize': u'0.0 Bytes', u'multipath': True, u'instanceName': u'Red Hat Linux 6.x', u'supportsActiveMappingCreation': True, u'name': u'Red Hat Linux 6.x'}] ISCSI_FLT_DOMAINS = [{u'headerDigestEnabled': False, u'classOfServicePriority': 0, u'wellKnownIpAddress': u'192.168.0.21', u'scSerialNumber': 64702, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe42', u'portNumber': 3260, u'subnetMask': u'255.255.255.0', u'gateway': u'192.168.0.1', u'objectType': u'ScIscsiFaultDomain', u'chapEnabled': False, u'instanceId': u'64702.6.5.3', u'childStatus': u'Up', u'defaultTimeToRetain': u'SECONDS_20', u'dataDigestEnabled': False, u'instanceName': u'iSCSI 10G 2', u'statusMessage': u'', u'status': u'Up', u'transportType': u'Iscsi', u'vlanId': 0, u'windowSize': u'131072.0 Bytes', u'defaultTimeToWait': u'SECONDS_2', u'scsiCommandTimeout': u'MINUTES_1', u'deleteAllowed': False, u'name': u'iSCSI 10G 2', u'immediateDataWriteEnabled': False, u'scName': u'Storage Center 64702', u'notes': u'', u'mtu': u'MTU_1500', u'bidirectionalChapSecret': u'', u'keepAliveTimeout': u'SECONDS_30'}] # For testing find_iscsi_properties where multiple portals are found ISCSI_FLT_DOMAINS_MULTI_PORTALS = \ [{u'headerDigestEnabled': False, u'classOfServicePriority': 0, u'wellKnownIpAddress': u'192.168.0.21', u'scSerialNumber': 64702, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe42', u'portNumber': 3260, u'subnetMask': u'255.255.255.0', u'gateway': u'192.168.0.1', u'objectType': u'ScIscsiFaultDomain', u'chapEnabled': False, u'instanceId': u'64702.6.5.3', u'childStatus': u'Up', u'defaultTimeToRetain': u'SECONDS_20', u'dataDigestEnabled': False, u'instanceName': u'iSCSI 10G 2', u'statusMessage': u'', u'status': u'Up', u'transportType': u'Iscsi', u'vlanId': 0, u'windowSize': u'131072.0 Bytes', u'defaultTimeToWait': u'SECONDS_2', u'scsiCommandTimeout': u'MINUTES_1', u'deleteAllowed': False, u'name': u'iSCSI 10G 2', u'immediateDataWriteEnabled': False, u'scName': u'Storage Center 64702', u'notes': u'', u'mtu': u'MTU_1500', u'bidirectionalChapSecret': u'', u'keepAliveTimeout': u'SECONDS_30'}, {u'headerDigestEnabled': False, u'classOfServicePriority': 0, u'wellKnownIpAddress': u'192.168.0.25', u'scSerialNumber': 64702, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe42', u'portNumber': 3260, u'subnetMask': u'255.255.255.0', u'gateway': u'192.168.0.1', u'objectType': u'ScIscsiFaultDomain', u'chapEnabled': False, u'instanceId': u'64702.6.5.3', u'childStatus': u'Up', u'defaultTimeToRetain': u'SECONDS_20', u'dataDigestEnabled': False, u'instanceName': u'iSCSI 10G 2', u'statusMessage': u'', u'status': u'Up', u'transportType': u'Iscsi', u'vlanId': 0, u'windowSize': u'131072.0 Bytes', u'defaultTimeToWait': u'SECONDS_2', u'scsiCommandTimeout': u'MINUTES_1', u'deleteAllowed': False, u'name': u'iSCSI 10G 2', u'immediateDataWriteEnabled': False, u'scName': u'Storage Center 64702', u'notes': u'', u'mtu': u'MTU_1500', u'bidirectionalChapSecret': u'', u'keepAliveTimeout': u'SECONDS_30'}] ISCSI_FLT_DOMAINS_MULTI_PORTALS_IPV6 = \ [{u'headerDigestEnabled': False, u'classOfServicePriority': 0, u'wellKnownIpAddress': u'0:0:0:0:0:ffff:c0a8:15', u'scSerialNumber': 64702, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe42', u'portNumber': 3260, u'subnetMask': u'255.255.255.0', u'gateway': u'192.168.0.1', u'objectType': u'ScIscsiFaultDomain', u'chapEnabled': False, u'instanceId': u'64702.6.5.3', u'childStatus': u'Up', u'defaultTimeToRetain': u'SECONDS_20', u'dataDigestEnabled': False, u'instanceName': u'iSCSI 10G 2', u'statusMessage': u'', u'status': u'Up', u'transportType': u'Iscsi', u'vlanId': 0, u'windowSize': u'131072.0 Bytes', u'defaultTimeToWait': u'SECONDS_2', u'scsiCommandTimeout': u'MINUTES_1', u'deleteAllowed': False, u'name': u'iSCSI 10G 2', u'immediateDataWriteEnabled': False, u'scName': u'Storage Center 64702', u'notes': u'', u'mtu': u'MTU_1500', u'bidirectionalChapSecret': u'', u'keepAliveTimeout': u'SECONDS_30'}, {u'headerDigestEnabled': False, u'classOfServicePriority': 0, u'wellKnownIpAddress': u'0:0:0:0:0:ffff:c0a8:19', u'scSerialNumber': 64702, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe42', u'portNumber': 3260, u'subnetMask': u'255.255.255.0', u'gateway': u'192.168.0.1', u'objectType': u'ScIscsiFaultDomain', u'chapEnabled': False, u'instanceId': u'64702.6.5.3', u'childStatus': u'Up', u'defaultTimeToRetain': u'SECONDS_20', u'dataDigestEnabled': False, u'instanceName': u'iSCSI 10G 2', u'statusMessage': u'', u'status': u'Up', u'transportType': u'Iscsi', u'vlanId': 0, u'windowSize': u'131072.0 Bytes', u'defaultTimeToWait': u'SECONDS_2', u'scsiCommandTimeout': u'MINUTES_1', u'deleteAllowed': False, u'name': u'iSCSI 10G 2', u'immediateDataWriteEnabled': False, u'scName': u'Storage Center 64702', u'notes': u'', u'mtu': u'MTU_1500', u'bidirectionalChapSecret': u'', u'keepAliveTimeout': u'SECONDS_30'}] ISCSI_FLT_DOMAIN = {u'headerDigestEnabled': False, u'classOfServicePriority': 0, u'wellKnownIpAddress': u'192.168.0.21', u'scSerialNumber': 64702, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe42', u'portNumber': 3260, u'subnetMask': u'255.255.255.0', u'gateway': u'192.168.0.1', u'objectType': u'ScIscsiFaultDomain', u'chapEnabled': False, u'instanceId': u'64702.6.5.3', u'childStatus': u'Up', u'defaultTimeToRetain': u'SECONDS_20', u'dataDigestEnabled': False, u'instanceName': u'iSCSI 10G 2', u'statusMessage': u'', u'status': u'Up', u'transportType': u'Iscsi', u'vlanId': 0, u'windowSize': u'131072.0 Bytes', u'defaultTimeToWait': u'SECONDS_2', u'scsiCommandTimeout': u'MINUTES_1', u'deleteAllowed': False, u'name': u'iSCSI 10G 2', u'immediateDataWriteEnabled': False, u'scName': u'Storage Center 64702', u'notes': u'', u'mtu': u'MTU_1500', u'bidirectionalChapSecret': u'', u'keepAliveTimeout': u'SECONDS_30'} CTRLR_PORT = {u'status': u'Up', u'iscsiIpAddress': u'0.0.0.0', u'WWN': u'5000D31000FCBE06', u'name': u'5000D31000FCBE06', u'iscsiGateway': u'0.0.0.0', u'instanceId': u'64702.5764839588723736070.51', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'transportType': u'FibreChannel', u'virtual': False, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'iscsiName': u'', u'purpose': u'FrontEnd', u'iscsiSubnetMask': u'0.0.0.0', u'faultDomain': {u'instanceId': u'64702.4.3', u'instanceName': u'Domain 1', u'objectType': u'ScControllerPortFaultDomain'}, u'instanceName': u'5000D31000FCBE06', u'statusMessage': u'', u'objectType': u'ScControllerPort'} ISCSI_CTRLR_PORT = {u'preferredParent': {u'instanceId': u'64702.5764839588723736074.69', u'instanceName': u'5000D31000FCBE0A', u'objectType': u'ScControllerPort'}, u'status': u'Up', u'iscsiIpAddress': u'10.23.8.235', u'WWN': u'5000D31000FCBE43', u'name': u'5000D31000FCBE43', u'parent': {u'instanceId': u'64702.5764839588723736074.69', u'instanceName': u'5000D31000FCBE0A', u'objectType': u'ScControllerPort'}, u'iscsiGateway': u'0.0.0.0', u'instanceId': u'64702.5764839588723736131.91', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'transportType': u'Iscsi', u'virtual': True, u'controller': {u'instanceId': u'64702.64702', u'instanceName': u'SN 64702', u'objectType': u'ScController'}, u'iscsiName': u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'purpose': u'FrontEnd', u'iscsiSubnetMask': u'0.0.0.0', u'faultDomain': {u'instanceId': u'64702.6.5', u'instanceName': u'iSCSI 10G 2', u'objectType': u'ScControllerPortFaultDomain'}, u'instanceName': u'5000D31000FCBE43', u'childStatus': u'Up', u'statusMessage': u'', u'objectType': u'ScControllerPort'} FC_CTRLR_PORT = {u'preferredParent': {u'instanceId': u'64702.5764839588723736093.57', u'instanceName': u'5000D31000FCBE1D', u'objectType': u'ScControllerPort'}, u'status': u'Up', u'iscsiIpAddress': u'0.0.0.0', u'WWN': u'5000d31000fcbe36', u'name': u'5000d31000fcbe36', u'parent': {u'instanceId': u'64702.5764839588723736093.57', u'instanceName': u'5000D31000FCBE1D', u'objectType': u'ScControllerPort'}, u'iscsiGateway': u'0.0.0.0', u'instanceId': u'64702.5764839588723736118.50', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'transportType': u'FibreChannel', u'virtual': True, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'iscsiName': u'', u'purpose': u'FrontEnd', u'iscsiSubnetMask': u'0.0.0.0', u'faultDomain': {u'instanceId': u'64702.1.0', u'instanceName': u'Domain 0', u'objectType': u'ScControllerPortFaultDomain'}, u'instanceName': u'5000d31000fcbe36', u'childStatus': u'Up', u'statusMessage': u'', u'objectType': u'ScControllerPort'} FC_CTRLR_PORT_WWN_ERROR = \ {u'preferredParent': {u'instanceId': u'64702.5764839588723736093.57', u'instanceName': u'5000D31000FCBE1D', u'objectType': u'ScControllerPort'}, u'status': u'Up', u'iscsiIpAddress': u'0.0.0.0', u'wWN': u'5000d31000fcbe36', u'name': u'5000d31000fcbe36', u'parent': {u'instanceId': u'64702.5764839588723736093.57', u'instanceName': u'5000D31000FCBE1D', u'objectType': u'ScControllerPort'}, u'iscsiGateway': u'0.0.0.0', u'instanceId': u'64702.5764839588723736118.50', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'transportType': u'FibreChannel', u'virtual': True, u'controller': {u'instanceId': u'64702.64703', u'instanceName': u'SN 64703', u'objectType': u'ScController'}, u'iscsiName': u'', u'purpose': u'FrontEnd', u'iscsiSubnetMask': u'0.0.0.0', u'faultDomain': {u'instanceId': u'64702.1.0', u'instanceName': u'Domain 0', u'objectType': u'ScControllerPortFaultDomain'}, u'instanceName': u'5000d31000fcbe36', u'childStatus': u'Up', u'statusMessage': u'', u'objectType': u'ScControllerPort'} STRG_USAGE = {u'systemSpace': u'7.38197504E8 Bytes', u'freeSpace': u'1.297659461632E13 Bytes', u'oversubscribedSpace': u'0.0 Bytes', u'instanceId': u'64702', u'scName': u'Storage Center 64702', u'savingVsRaidTen': u'1.13737990144E11 Bytes', u'allocatedSpace': u'1.66791217152E12 Bytes', u'usedSpace': u'3.25716017152E11 Bytes', u'configuredSpace': u'9.155796533248E12 Bytes', u'alertThresholdSpace': u'1.197207956992E13 Bytes', u'availableSpace': u'1.3302310633472E13 Bytes', u'badSpace': u'0.0 Bytes', u'time': u'02/02/2015 02:23:39 PM', u'scSerialNumber': 64702, u'instanceName': u'Storage Center 64702', u'storageAlertThreshold': 10, u'objectType': u'StorageCenterStorageUsage'} RPLAY_PROFILE = {u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3', u'type': u'Consistent', u'notes': u'Created by Dell EMC Cinder Driver', u'volumeCount': 0, u'expireIncompleteReplaySets': True, u'replayCreationTimeout': 20, u'enforceReplayCreationTimeout': False, u'ruleCount': 0, u'userCreated': True, u'scSerialNumber': 64702, u'scName': u'Storage Center 64702', u'objectType': u'ScReplayProfile', u'instanceId': u'64702.11', u'instanceName': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3'} STORAGE_PROFILE_LIST = [ {u'allowedForFlashOptimized': False, u'allowedForNonFlashOptimized': True, u'index': 1, u'instanceId': u'64158.1', u'instanceName': u'Recommended', u'name': u'Recommended', u'notes': u'', u'objectType': u'ScStorageProfile', u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', u'raidTypeUsed': u'Mixed', u'scName': u'Storage Center 64158', u'scSerialNumber': 64158, u'tiersUsedDescription': u'Tier 1, Tier 2, Tier 3', u'useTier1Storage': True, u'useTier2Storage': True, u'useTier3Storage': True, u'userCreated': False, u'volumeCount': 125}, {u'allowedForFlashOptimized': False, u'allowedForNonFlashOptimized': True, u'index': 2, u'instanceId': u'64158.2', u'instanceName': u'High Priority', u'name': u'High Priority', u'notes': u'', u'objectType': u'ScStorageProfile', u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', u'raidTypeUsed': u'Mixed', u'scName': u'Storage Center 64158', u'scSerialNumber': 64158, u'tiersUsedDescription': u'Tier 1', u'useTier1Storage': True, u'useTier2Storage': False, u'useTier3Storage': False, u'userCreated': False, u'volumeCount': 0}, {u'allowedForFlashOptimized': False, u'allowedForNonFlashOptimized': True, u'index': 3, u'instanceId': u'64158.3', u'instanceName': u'Medium Priority', u'name': u'Medium Priority', u'notes': u'', u'objectType': u'ScStorageProfile', u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', u'raidTypeUsed': u'Mixed', u'scName': u'Storage Center 64158', u'scSerialNumber': 64158, u'tiersUsedDescription': u'Tier 2', u'useTier1Storage': False, u'useTier2Storage': True, u'useTier3Storage': False, u'userCreated': False, u'volumeCount': 0}, {u'allowedForFlashOptimized': True, u'allowedForNonFlashOptimized': True, u'index': 4, u'instanceId': u'64158.4', u'instanceName': u'Low Priority', u'name': u'Low Priority', u'notes': u'', u'objectType': u'ScStorageProfile', u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay', u'raidTypeUsed': u'Mixed', u'scName': u'Storage Center 64158', u'scSerialNumber': 64158, u'tiersUsedDescription': u'Tier 3', u'useTier1Storage': False, u'useTier2Storage': False, u'useTier3Storage': True, u'userCreated': False, u'volumeCount': 0}] CGS = [{u'profile': {u'instanceId': u'65690.4', u'instanceName': u'0869559e-6881-454e-ba18-15c6726d33c1', u'objectType': u'ScReplayProfile'}, u'scSerialNumber': 65690, u'globalIndex': u'65690-4-2', u'description': u'GUID1-0869559e-6881-454e-ba18-15c6726d33c1', u'instanceId': u'65690.65690.4.2', u'scName': u'Storage Center 65690', u'expires': False, u'freezeTime': u'2015-09-28T14:00:59-05:00', u'expireTime': u'1969-12-31T18:00:00-06:00', u'expectedReplayCount': 2, u'writesHeldDuration': 19809, u'replayCount': 2, u'instanceName': u'Name1', u'objectType': u'ScReplayConsistencyGroup'}, {u'profile': {u'instanceId': u'65690.4', u'instanceName': u'0869559e-6881-454e-ba18-15c6726d33c1', u'objectType': u'ScReplayProfile'}, u'scSerialNumber': 65690, u'globalIndex': u'65690-4-3', u'description': u'GUID2-0869559e-6881-454e-ba18-15c6726d33c1', u'instanceId': u'65690.65690.4.3', u'scName': u'Storage Center 65690', u'expires': False, u'freezeTime': u'2015-09-28T14:00:59-05:00', u'expireTime': u'1969-12-31T18:00:00-06:00', u'expectedReplayCount': 2, u'writesHeldDuration': 19809, u'replayCount': 2, u'instanceName': u'Name2', u'objectType': u'ScReplayConsistencyGroup'} ] ISCSI_CONFIG = { u'initialReadyToTransfer': True, u'scSerialNumber': 64065, u'macAddress': u'00c0dd-1da173', u'instanceId': u'64065.5764839588723573038.6', u'vlanTagging': False, u'mapCount': 8, u'cardModel': u'Qle4062', u'portNumber': 3260, u'firstBurstSize': 256, u'deviceName': u'PCIDEV09', u'subnetMask': u'255.255.255.0', u'speed': u'1 Gbps', u'maximumVlanCount': 0, u'gatewayIpAddress': u'192.168.0.1', u'slot': 4, u'sfpData': u'', u'dataDigest': False, u'chapEnabled': False, u'firmwareVersion': u'03.00.01.77', u'preferredControllerIndex': 64066, u'defaultTimeToRetain': 20, u'objectType': u'ScControllerPortIscsiConfiguration', u'instanceName': u'5000d31000FCBE43', u'scName': u'sc64065', u'revision': u'0', u'controllerPortIndex': 5764839588723573038, u'maxBurstSize': 512, u'targetCount': 20, u'description': u'QLogic QLE4062 iSCSI Adapter Rev 0 Copper', u'vlanSupported': True, u'chapName': u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'windowSize': 128, u'vlanId': 0, u'defaultTimeToWait': 2, u'headerDigest': False, u'slotPort': 2, u'immediateDataWrite': False, u'storageCenterTargetCount': 20, u'vlanCount': 0, u'scsiCommandTimeout': 60, u'slotType': u'PCI4', u'ipAddress': u'192.168.0.21', u'vlanUserPriority': 0, u'bothCount': 0, u'initiatorCount': 33, u'keepAliveTimeout': 30, u'homeControllerIndex': 64066, u'chapSecret': u'', u'maximumTransmissionUnit': 1500} SCQOS = {u'linkSpeed': u'1 Gbps', u'numberDevices': 1, u'bandwidthLimited': False, u'name': u'Cinder QoS', u'instanceId': u'64702.2', u'scName': u'Storage Center 64702', u'scSerialNumber': 64702, u'instanceName': u'Cinder QoS', u'advancedSettings': {u'globalMaxSectorPerIo': 512, u'destinationMaxSectorCount': 65536, u'queuePassMaxSectorCount': 65536, u'destinationMaxIoCount': 18, u'globalMaxIoCount': 32, u'queuePassMaxIoCount': 8}, u'objectType': u'ScReplicationQosNode'} SCREPL = [{u'destinationVolume': {u'instanceId': u'65495.167', u'instanceName': u'Cinder repl of abcd9' u'5b2-1284-4cf0-a397-9' u'70fa6c68092', u'objectType': u'ScVolume'}, u'instanceId': u'64702.9', u'scSerialNumber': 64702, u'syncStatus': u'NotApplicable', u'objectType': u'ScReplication', u'sourceStorageCenter': {u'instanceId': u'64702', u'instanceName': u'Storage Center ' '64702', u'objectType': u'StorageCenter'}, u'secondaryTransportTypes': [], u'dedup': False, u'state': u'Up', u'replicateActiveReplay': False, u'qosNode': {u'instanceId': u'64702.2', u'instanceName': u'Cinder QoS', u'objectType': u'ScReplicationQosNode'}, u'sourceVolume': {u'instanceId': u'64702.13108', u'instanceName': u'abcd95b2-1284-4cf0-a397-' u'970fa6c68092', u'objectType': u'ScVolume'}, u'type': u'Asynchronous', u'statusMessage': u'', u'status': u'Up', u'syncMode': u'None', u'stateMessage': u'', u'managedByLiveVolume': False, u'destinationScSerialNumber': 65495, u'pauseAllowed': True, u'instanceName': u"Replication of 'abcd95b2-1284-4cf0-" u"a397-970fa6c68092'", u'simulation': False, u'transportTypes': [u'FibreChannel'], u'replicateStorageToLowestTier': True, u'scName': u'Storage Center 64702', u'destinationStorageCenter': {u'instanceId': u'65495', u'instanceName': u'Storage Center' u' 65495', u'objectType': u'StorageCenter'}}] IQN = 'iqn.2002-03.com.compellent:5000D31000000001' WWN = u'21000024ff30441c' WWNS = [u'21000024ff30441c', u'21000024ff30441d'] # Used to test finding no match in find_wwns WWNS_NO_MATCH = [u'21000024FF30451C', u'21000024FF30451D'] FLDR_PATH = 'StorageCenter/ScVolumeFolder/' # Create a Response object that indicates OK response_ok = models.Response() response_ok.status_code = 200 response_ok.reason = u'ok' response_ok._content = '' response_ok._content_consumed = True RESPONSE_200 = response_ok # Create a Response object that indicates created response_created = models.Response() response_created.status_code = 201 response_created.reason = u'created' response_created._content = '' response_created._content_consumed = True RESPONSE_201 = response_created # Create a Response object that can indicate a failure. Although # 204 can be a success with no return. (Know your calls!) response_nc = models.Response() response_nc.status_code = 204 response_nc.reason = u'duplicate' response_nc._content = '' response_nc._content_consumed = True RESPONSE_204 = response_nc # Create a Response object is a pure error. response_bad = models.Response() response_bad.status_code = 400 response_bad.reason = u'bad request' response_bad._content = '' response_bad._content_consumed = True RESPONSE_400 = response_bad # Create a Response object is a pure error. response_bad = models.Response() response_bad.status_code = 404 response_bad.reason = u'not found' response_bad._content = '' response_bad._content_consumed = True RESPONSE_404 = response_bad def setUp(self): super(DellSCSanAPITestCase, self).setUp() # Configuration is a mock. A mock is pretty much a blank # slate. I believe mock's done in setup are not happy time # mocks. So we just do a few things like driver config here. self.configuration = mock.Mock() self.configuration.san_is_local = False self.configuration.san_ip = "192.168.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "mmm" self.configuration.dell_sc_ssn = 12345 self.configuration.dell_sc_server_folder = 'opnstktst' self.configuration.dell_sc_volume_folder = 'opnstktst' # Note that we set this to True even though we do not # test this functionality. This is sent directly to # the requests calls as the verify parameter and as # that is a third party library deeply stubbed out is # not directly testable by this code. Note that in the # case that this fails the driver fails to even come # up. self.configuration.dell_sc_verify_cert = True self.configuration.dell_sc_api_port = 3033 self.configuration.target_ip_address = '192.168.1.1' self.configuration.target_port = 3260 self._context = context.get_admin_context() self.apiversion = '2.0' self.asynctimeout = 15 self.synctimeout = 30 # Set up the SCApi self.scapi = storagecenter_api.SCApi( self.configuration.san_ip, self.configuration.dell_sc_api_port, self.configuration.san_login, self.configuration.san_password, self.configuration.dell_sc_verify_cert, self.asynctimeout, self.synctimeout, self.apiversion) # Set up the scapi configuration vars self.scapi.ssn = self.configuration.dell_sc_ssn self.scapi.sfname = self.configuration.dell_sc_server_folder self.scapi.vfname = self.configuration.dell_sc_volume_folder # Note that we set this to True (or not) on the replication tests. self.scapi.failed_over = False # Legacy folder names are still current so we default this to true. self.scapi.legacyfoldernames = True self.volid = str(uuid.uuid4()) self.volume_name = "volume" + self.volid self.repl_name = "Cinder repl of volume" + self.volid def test_path_to_array(self, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._path_to_array(u'folder1/folder2/folder3') expected = [u'folder1', u'folder2', u'folder3'] self.assertEqual(expected, res, 'Unexpected folder path') @mock.patch.object(storagecenter_api.SCApi, '_get_result', return_value=SC) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_sc(self, mock_get, mock_get_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_sc() mock_get.assert_called_once_with('StorageCenter/StorageCenter') self.assertTrue(mock_get_result.called) self.assertEqual(u'64702', res, 'Unexpected SSN') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_get_result', return_value=None) def test_find_sc_failure(self, mock_get_result, mock_get, mock_close_connection, mock_open_connection, mock_init): self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_sc) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=FLDR) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_folder(self, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._create_folder( 'StorageCenter/ScVolumeFolder', '', self.configuration.dell_sc_volume_folder) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.FLDR, res, 'Unexpected Folder') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=FLDR) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_folder_with_parent(self, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): # Test case where parent folder name is specified res = self.scapi._create_folder( 'StorageCenter/ScVolumeFolder', 'parentFolder', self.configuration.dell_sc_volume_folder) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.FLDR, res, 'Unexpected Folder') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_create_folder_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._create_folder( 'StorageCenter/ScVolumeFolder', '', self.configuration.dell_sc_volume_folder) self.assertIsNone(res, 'Test Create folder - None expected') @mock.patch.object(storagecenter_api.SCApi, '_find_folder', return_value=FLDR) @mock.patch.object(storagecenter_api.SCApi, '_path_to_array', return_value=['Cinder_Test_Folder']) def test_create_folder_path(self, mock_path_to_array, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._create_folder_path( 'StorageCenter/ScVolumeFolder', self.configuration.dell_sc_volume_folder) mock_path_to_array.assert_called_once_with( self.configuration.dell_sc_volume_folder) self.assertTrue(mock_find_folder.called) self.assertEqual(self.FLDR, res, 'Unexpected ScFolder') @mock.patch.object(storagecenter_api.SCApi, '_create_folder', return_value=FLDR) @mock.patch.object(storagecenter_api.SCApi, '_find_folder', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_path_to_array', return_value=['Cinder_Test_Folder']) def test_create_folder_path_create_fldr(self, mock_path_to_array, mock_find_folder, mock_create_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where folder is not found and must be created res = self.scapi._create_folder_path( 'StorageCenter/ScVolumeFolder', self.configuration.dell_sc_volume_folder) mock_path_to_array.assert_called_once_with( self.configuration.dell_sc_volume_folder) self.assertTrue(mock_find_folder.called) self.assertTrue(mock_create_folder.called) self.assertEqual(self.FLDR, res, 'Unexpected ScFolder') @mock.patch.object(storagecenter_api.SCApi, '_create_folder', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_folder', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_path_to_array', return_value=['Cinder_Test_Folder']) def test_create_folder_path_failure(self, mock_path_to_array, mock_find_folder, mock_create_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where folder is not found, must be created # and creation fails res = self.scapi._create_folder_path( 'StorageCenter/ScVolumeFolder', self.configuration.dell_sc_volume_folder) mock_path_to_array.assert_called_once_with( self.configuration.dell_sc_volume_folder) self.assertTrue(mock_find_folder.called) self.assertTrue(mock_create_folder.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_get_result') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_folder(self, mock_post, mock_get_result, mock_close_connection, mock_open_connection, mock_init): self.scapi._find_folder('StorageCenter/ScVolumeFolder/GetList', 'devstackvol/fcvm', 12345) expected_payload = {'filter': {'filterType': 'AND', 'filters': [ {'filterType': 'Equals', 'attributeName': 'scSerialNumber', 'attributeValue': 12345}, {'filterType': 'Equals', 'attributeName': 'Name', 'attributeValue': 'fcvm'}, {'filterType': 'Equals', 'attributeName': 'folderPath', 'attributeValue': 'devstackvol/'}]}} mock_post.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', expected_payload) self.assertTrue(mock_get_result.called) @mock.patch.object(storagecenter_api.SCApi, '_get_result') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_folder_not_legacy(self, mock_post, mock_get_result, mock_close_connection, mock_open_connection, mock_init): self.scapi.legacyfoldernames = False self.scapi._find_folder('StorageCenter/ScVolumeFolder/GetList', 'devstackvol/fcvm', 12345) expected_payload = {'filter': {'filterType': 'AND', 'filters': [ {'filterType': 'Equals', 'attributeName': 'scSerialNumber', 'attributeValue': 12345}, {'filterType': 'Equals', 'attributeName': 'Name', 'attributeValue': 'fcvm'}, {'filterType': 'Equals', 'attributeName': 'folderPath', 'attributeValue': '/devstackvol/'}]}} mock_post.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', expected_payload) self.assertTrue(mock_get_result.called) self.scapi.legacyfoldernames = True @mock.patch.object(storagecenter_api.SCApi, '_get_result') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_folder_legacy_root(self, mock_post, mock_get_result, mock_close_connection, mock_open_connection, mock_init): self.scapi._find_folder('StorageCenter/ScVolumeFolder/GetList', 'devstackvol', 12345) expected_payload = {'filter': {'filterType': 'AND', 'filters': [ {'filterType': 'Equals', 'attributeName': 'scSerialNumber', 'attributeValue': 12345}, {'filterType': 'Equals', 'attributeName': 'Name', 'attributeValue': 'devstackvol'}, {'filterType': 'Equals', 'attributeName': 'folderPath', 'attributeValue': ''}]}} mock_post.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', expected_payload) self.assertTrue(mock_get_result.called) @mock.patch.object(storagecenter_api.SCApi, '_get_result') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_folder_non_legacy_root(self, mock_post, mock_get_result, mock_close_connection, mock_open_connection, mock_init): self.scapi.legacyfoldernames = False self.scapi._find_folder('StorageCenter/ScVolumeFolder/GetList', 'devstackvol', 12345) expected_payload = {'filter': {'filterType': 'AND', 'filters': [ {'filterType': 'Equals', 'attributeName': 'scSerialNumber', 'attributeValue': 12345}, {'filterType': 'Equals', 'attributeName': 'Name', 'attributeValue': 'devstackvol'}, {'filterType': 'Equals', 'attributeName': 'folderPath', 'attributeValue': '/'}]}} mock_post.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', expected_payload) self.assertTrue(mock_get_result.called) self.scapi.legacyfoldernames = True @mock.patch.object(storagecenter_api.SCApi, '_get_result', return_value=u'devstackvol/fcvm/') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_folder_multi_fldr(self, mock_post, mock_get_result, mock_close_connection, mock_open_connection, mock_init): # Test case for folder path with multiple folders res = self.scapi._find_folder( 'StorageCenter/ScVolumeFolder', u'testParentFolder/opnstktst') self.assertTrue(mock_post.called) self.assertTrue(mock_get_result.called) self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_find_folder_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_folder( 'StorageCenter/ScVolumeFolder', self.configuration.dell_sc_volume_folder) self.assertIsNone(res, 'Test find folder - None expected') @mock.patch.object(storagecenter_api.SCApi, '_find_folder', return_value=None) def test_find_volume_folder_fail(self, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where _find_volume_folder returns none res = self.scapi._find_volume_folder( False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', self.configuration.dell_sc_volume_folder, -1) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_find_folder', return_value=FLDR) def test_find_volume_folder(self, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_volume_folder( False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', self.configuration.dell_sc_volume_folder, -1) self.assertEqual(self.FLDR, res, 'Unexpected Folder') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=STORAGE_PROFILE_LIST) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_storage_profile_fail(self, mock_json, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where _find_volume_folder returns none res = self.scapi._find_storage_profile("Blah") self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=STORAGE_PROFILE_LIST) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_storage_profile_none(self, mock_json, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where _find_storage_profile returns none res = self.scapi._find_storage_profile(None) self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=STORAGE_PROFILE_LIST) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @ddt.data('HighPriority', 'highpriority', 'High Priority') def test_find_storage_profile(self, value, mock_json, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_storage_profile(value) self.assertIsNotNone(res, 'Expected matching storage profile!') self.assertEqual(self.STORAGE_PROFILE_LIST[1]['instanceId'], res.get('instanceId')) @mock.patch.object(storagecenter_api.SCApi, '_create_folder_path', return_value=FLDR) @mock.patch.object(storagecenter_api.SCApi, '_find_folder', return_value=None) def test_find_volume_folder_create_folder(self, mock_find_folder, mock_create_folder_path, mock_close_connection, mock_open_connection, mock_init): # Test case where _find_volume_folder returns none and folder must be # created res = self.scapi._find_volume_folder( True) mock_find_folder.assert_called_once_with( 'StorageCenter/ScVolumeFolder/GetList', self.configuration.dell_sc_volume_folder, -1) self.assertTrue(mock_create_folder_path.called) self.assertEqual(self.FLDR, res, 'Unexpected Folder') @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=SCSERVERS) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_init_volume(self, mock_post, mock_get_json, mock_map_volume, mock_unmap_volume, mock_get_volume, mock_close_connection, mock_open_connection, mock_init): self.scapi._init_volume(self.VOLUME) self.assertTrue(mock_map_volume.called) self.assertTrue(mock_unmap_volume.called) @mock.patch.object(storagecenter_api.SCApi, 'get_volume') @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume') @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_init_volume_retry(self, mock_post, mock_get_json, mock_map_volume, mock_unmap_volume, mock_get_volume, mock_close_connection, mock_open_connection, mock_init): mock_get_json.return_value = [{'name': 'srv1', 'status': 'up', 'type': 'physical'}, {'name': 'srv2', 'status': 'up', 'type': 'physical'}] mock_get_volume.side_effect = [{'name': 'guid', 'active': False, 'instanceId': '12345.1'}, {'name': 'guid', 'active': True, 'instanceId': '12345.1'}] self.scapi._init_volume(self.VOLUME) # First return wasn't active. So try second. self.assertEqual(2, mock_map_volume.call_count) self.assertEqual(2, mock_unmap_volume.call_count) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_init_volume_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case where ScServer list fails self.scapi._init_volume(self.VOLUME) self.assertTrue(mock_post.called) @mock.patch.object(storagecenter_api.SCApi, 'unmap_volume', return_value=True) @mock.patch.object(storagecenter_api.SCApi, 'map_volume', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=SCSERVERS_DOWN) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_init_volume_servers_down(self, mock_post, mock_get_json, mock_map_volume, mock_unmap_volume, mock_close_connection, mock_open_connection, mock_init): # Test case where ScServer Status = Down self.scapi._init_volume(self.VOLUME) self.assertFalse(mock_map_volume.called) self.assertFalse(mock_unmap_volume.called) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_volume(self, mock_post, mock_find_volume_folder, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_volume( self.volume_name, 1) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) mock_find_volume_folder.assert_called_once_with(True) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') @mock.patch.object(storagecenter_api.SCApi, '_find_data_reduction_profile') @mock.patch.object(storagecenter_api.SCApi, '_find_storage_profile') @mock.patch.object(storagecenter_api.SCApi, '_find_replay_profiles') def test_create_volume_with_profiles(self, mock_find_replay_profiles, mock_find_storage_profile, mock_find_data_reduction_profile, mock_find_qos_profile, mock_post, mock_find_volume_folder, mock_get_json, mock_close_connection, mock_open_connection, mock_init): mock_find_replay_profiles.return_value = (['12345.4'], []) mock_get_json.return_value = self.VOLUME mock_find_volume_folder.return_value = {'instanceId': '12345.200'} mock_post.return_value = self.RESPONSE_201 mock_find_storage_profile.return_value = {'instanceId': '12345.0'} mock_find_data_reduction_profile.return_value = {'instanceId': '12345.1'} mock_find_qos_profile.side_effect = [{'instanceId': '12345.2'}, {'instanceId': '12345.3'}] res = self.scapi.create_volume(self.volume_name, 1, 'storage_profile', 'replay_profile_string', 'volume_qos', 'group_qos', 'datareductionprofile') expected_payload = {'Name': self.volume_name, 'Notes': 'Created by Dell EMC Cinder Driver', 'Size': '1 GB', 'StorageCenter': 12345, 'VolumeFolder': '12345.200', 'StorageProfile': '12345.0', 'VolumeQosProfile': '12345.2', 'GroupQosProfile': '12345.3', 'DataReductionProfile': '12345.1', 'ReplayProfileList': ['12345.4']} mock_find_volume_folder.assert_called_once_with(True) mock_post.assert_called_once_with('StorageCenter/ScVolume', expected_payload, True) self.assertEqual(self.VOLUME, res) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') @mock.patch.object(storagecenter_api.SCApi, '_find_storage_profile') @mock.patch.object(storagecenter_api.SCApi, '_find_replay_profiles') def test_create_volume_profile_not_found(self, mock_find_replay_profiles, mock_find_storage_profile, mock_find_qos_profile, mock_find_volume_folder, mock_close_connection, mock_open_connection, mock_init): mock_find_replay_profiles.return_value = (['12345.4'], []) mock_find_volume_folder.return_value = self.FLDR mock_find_storage_profile.return_value = [{'instanceId': '12345.0'}] # Failure is on the volumeqosprofile. mock_find_qos_profile.return_value = None self.assertRaises(exception.VolumeBackendAPIException, self.scapi.create_volume, self.volume_name, 1, 'storage_profile', 'replay_profile_string', 'volume_qos', 'group_qos', 'datareductionprofile') @mock.patch.object(storagecenter_api.SCApi, '_find_storage_profile', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder', return_value=FLDR) def test_create_volume_storage_profile_missing(self, mock_find_volume_folder, mock_find_storage_profile, mock_close_connection, mock_open_connection, mock_init): self.assertRaises(exception.VolumeBackendAPIException, self.scapi.create_volume, self.volume_name, 1, 'Blah') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, '_find_storage_profile', return_value=STORAGE_PROFILE_LIST[0]) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_volume_storage_profile(self, mock_post, mock_find_volume_folder, mock_find_storage_profile, mock_get_json, mock_close_connection, mock_open_connection, mock_init): self.scapi.create_volume( self.volume_name, 1, 'Recommended') actual = mock_post.call_args[0][1]['StorageProfile'] expected = self.STORAGE_PROFILE_LIST[0]['instanceId'] self.assertEqual(expected, actual) @mock.patch.object(storagecenter_api.SCApi, '_search_for_volume', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_volume_retry_find(self, mock_post, mock_find_volume_folder, mock_get_json, mock_search_for_volume, mock_close_connection, mock_open_connection, mock_init): # Test case where find_volume is used to do a retry of finding the # created volume res = self.scapi.create_volume( self.volume_name, 1) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) mock_search_for_volume.assert_called_once_with(self.volume_name) mock_find_volume_folder.assert_called_once_with(True) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder', return_value=None) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_vol_folder_fail(self, mock_post, mock_find_volume_folder, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test calling create_volume where volume folder does not exist and # fails to be created res = self.scapi.create_volume( self.volume_name, 1) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) mock_find_volume_folder.assert_called_once_with(True) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_create_volume_failure(self, mock_post, mock_find_volume_folder, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_volume( self.volume_name, 1) mock_find_volume_folder.assert_called_once_with(True) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=VOLUME_LIST) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test__get_volume_list_enforce_vol_fldr(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case to find volume in the configured volume folder res = self.scapi._get_volume_list(self.volume_name, None, True) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.VOLUME_LIST, res, 'Unexpected volume list') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=VOLUME_LIST) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test__get_volume_list_any_fldr(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case to find volume anywhere in the configured SC res = self.scapi._get_volume_list(self.volume_name, None, False) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.VOLUME_LIST, res, 'Unexpected volume list') def test_get_volume_list_no_name_no_id(self, mock_close_connection, mock_open_connection, mock_init): # Test case specified volume name is None and device id is None. res = self.scapi._get_volume_list(None, None, True) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test__get_volume_list_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case to find volume in the configured volume folder res = self.scapi._get_volume_list(self.volume_name, None, True) self.assertTrue(mock_post.called) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.SCApi, '_search_for_volume', return_value=VOLUME) def test_find_volume(self, mock_search_for_volume, mock_close_connection, mock_open_connection, mock_init): # Test case to find volume by name res = self.scapi.find_volume(self.volume_name, None) mock_search_for_volume.assert_called_once_with(self.volume_name) self.assertEqual(self.VOLUME, res) @mock.patch.object(storagecenter_api.SCApi, '_search_for_volume', return_value=None) def test_find_volume_not_found(self, mock_search_for_volume, mock_close_connection, mock_open_connection, mock_init): # Test case to find volume by name res = self.scapi.find_volume(self.volume_name, None) mock_search_for_volume.assert_called_once_with(self.volume_name) self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=VOLUME) def test_find_volume_with_provider_id(self, mock_get_volume, mock_close_connection, mock_open_connection, mock_init): provider_id = str(self.scapi.ssn) + '.1' res = self.scapi.find_volume(self.volume_name, provider_id) mock_get_volume.assert_called_once_with(provider_id) self.assertEqual(self.VOLUME, res) @mock.patch.object(storagecenter_api.SCApi, 'get_volume') @mock.patch.object(storagecenter_api.SCApi, '_search_for_volume', return_value=VOLUME) def test_find_volume_with_invalid_provider_id(self, mock_search_for_volume, mock_get_volume, mock_close_connection, mock_open_connection, mock_init): provider_id = 'WrongSSN.1' res = self.scapi.find_volume(self.volume_name, provider_id) mock_search_for_volume.assert_called_once_with(self.volume_name) self.assertFalse(mock_get_volume.called) self.assertEqual(self.VOLUME, res) @mock.patch.object(storagecenter_api.SCApi, 'get_volume', return_value=None) def test_find_volume_with_provider_id_not_found(self, mock_get_volume, mock_close_connection, mock_open_connection, mock_init): provider_id = str(self.scapi.ssn) + '.1' res = self.scapi.find_volume(self.volume_name, provider_id) mock_get_volume.assert_called_once_with(provider_id) self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, 'get_volume') @mock.patch.object(storagecenter_api.SCApi, '_import_one', return_value=VOLUME) def test_find_volume_with_provider_id_complete_replication( self, mock_import_one, mock_get_volume, mock_close_connection, mock_open_connection, mock_init): provider_id = str(self.scapi.ssn) + '.1' # Configure to middle of failover. self.scapi.failed_over = True mock_get_volume.return_value = {'name': self.repl_name} res = self.scapi.find_volume(self.volume_name, provider_id) self.scapi.failed_over = False mock_import_one.assert_called_once_with(mock_get_volume.return_value, self.volume_name) mock_get_volume.assert_called_once_with(provider_id) self.assertEqual(self.VOLUME, res, 'Unexpected volume') @mock.patch.object(storagecenter_api.SCApi, 'get_volume') @mock.patch.object(storagecenter_api.SCApi, '_import_one', return_value=None) def test_find_volume_with_provider_id_import_fail(self, mock_import_one, mock_get_volume, mock_close_connection, mock_open_connection, mock_init): provider_id = str(self.scapi.ssn) + '.1' # Configure to middle of failover. self.scapi.failed_over = True mock_get_volume.return_value = {'name': self.repl_name} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_volume, self.volume_name, provider_id) self.scapi.failed_over = False mock_import_one.assert_called_once_with(mock_get_volume.return_value, self.volume_name) mock_get_volume.assert_called_once_with(provider_id) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=None) def test_search_for_volume_no_name(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Test calling find_volume with no name or instanceid res = self.scapi._search_for_volume(None) self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list') def test_search_for_volume_not_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Test calling find_volume with result of no volume found mock_get_volume_list.side_effect = [[], []] res = self.scapi._search_for_volume(self.volume_name) self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=VOLUME_LIST_MULTI_VOLS) def test_search_for_volume_multi_vols_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Test case where multiple volumes are found self.assertRaises(exception.VolumeBackendAPIException, self.scapi._search_for_volume, self.volume_name) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=VOLUME) def test_get_volume(self, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): provider_id = str(self.scapi.ssn) + '.1' res = self.scapi.get_volume(provider_id) mock_get.assert_called_once_with( 'StorageCenter/ScVolume/' + provider_id) self.assertEqual(self.VOLUME, res) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_get_volume_error(self, mock_get, mock_close_connection, mock_open_connection, mock_init): provider_id = str(self.scapi.ssn) + '.1' res = self.scapi.get_volume(provider_id) mock_get.assert_called_once_with( 'StorageCenter/ScVolume/' + provider_id) self.assertIsNone(res) def test_get_volume_no_id(self, mock_close_connection, mock_open_connection, mock_init): provider_id = None res = self.scapi.get_volume(provider_id) self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=True) @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) def test_delete_volume(self, mock_find_volume, mock_delete, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.delete_volume(self.volume_name) self.assertTrue(mock_delete.called) mock_find_volume.assert_called_once_with(self.volume_name, None) self.assertTrue(mock_get_json.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=True) @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) def test_delete_volume_with_provider_id(self, mock_find_volume, mock_delete, mock_get_json, mock_close_connection, mock_open_connection, mock_init): provider_id = str(self.scapi.ssn) + '.1' res = self.scapi.delete_volume(self.volume_name, provider_id) mock_find_volume.assert_called_once_with(self.volume_name, provider_id) self.assertTrue(mock_delete.called) self.assertTrue(mock_get_json.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_400) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=VOLUME) def test_delete_volume_failure(self, mock_find_volume, mock_delete, mock_close_connection, mock_open_connection, mock_init): provider_id = str(self.scapi.ssn) + '.1' self.assertRaises(exception.VolumeBackendAPIException, self.scapi.delete_volume, self.volume_name, provider_id) mock_find_volume.assert_called_once_with(self.volume_name, provider_id) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=None) def test_delete_volume_no_vol_found(self, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): # Test case where volume to be deleted does not exist res = self.scapi.delete_volume(self.volume_name, None) mock_find_volume.assert_called_once_with(self.volume_name, None) self.assertTrue(res, 'Expected True') @mock.patch.object(storagecenter_api.SCApi, '_find_folder', return_value=SVR_FLDR) def test_find_server_folder(self, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_server_folder(False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScServerFolder/GetList', self.configuration.dell_sc_server_folder, 12345) self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder') @mock.patch.object(storagecenter_api.SCApi, '_create_folder_path', return_value=SVR_FLDR) @mock.patch.object(storagecenter_api.SCApi, '_find_folder', return_value=None) def test_find_server_folder_create_folder(self, mock_find_folder, mock_create_folder_path, mock_close_connection, mock_open_connection, mock_init): # Test case where specified server folder is not found and must be # created res = self.scapi._find_server_folder(True) mock_find_folder.assert_called_once_with( 'StorageCenter/ScServerFolder/GetList', self.configuration.dell_sc_server_folder, 12345) self.assertTrue(mock_create_folder_path.called) self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder') @mock.patch.object(storagecenter_api.SCApi, '_find_folder', return_value=None) def test_find_server_folder_fail(self, mock_find_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where _find_server_folder returns none res = self.scapi._find_server_folder( False) mock_find_folder.assert_called_once_with( 'StorageCenter/ScServerFolder/GetList', self.configuration.dell_sc_volume_folder, 12345) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_add_hba(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._add_hba(self.SCSERVER, self.IQN) self.assertTrue(mock_post.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_add_hba_fc(self, mock_post, mock_close_connection, mock_open_connection, mock_init): saveproto = self.scapi.protocol self.scapi.protocol = 'FibreChannel' res = self.scapi._add_hba(self.SCSERVER, self.WWN) self.assertTrue(mock_post.called) self.assertTrue(res) self.scapi.protocol = saveproto @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_add_hba_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._add_hba(self.SCSERVER, self.IQN) self.assertTrue(mock_post.called) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=SVR_OS_S) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_serveros(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_serveros('Red Hat Linux 6.x') self.assertTrue(mock_get_json.called) self.assertTrue(mock_post.called) self.assertEqual('64702.38', res, 'Wrong InstanceId') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=SVR_OS_S) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_serveros_not_found(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test requesting a Server OS that will not be found res = self.scapi._find_serveros('Non existent OS') self.assertTrue(mock_get_json.called) self.assertTrue(mock_post.called) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_find_serveros_failed(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_serveros('Red Hat Linux 6.x') self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.SCApi, '_find_server_folder', return_value=SVR_FLDR) @mock.patch.object(storagecenter_api.SCApi, '_add_hba', return_value=FC_HBA) @mock.patch.object(storagecenter_api.SCApi, '_create_server', return_value=SCSERVER) def test_create_server_multiple_hbas(self, mock_create_server, mock_add_hba, mock_find_server_folder, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_server(self.WWNS, 'Red Hat Linux 6.x') self.assertTrue(mock_create_server.called) self.assertTrue(mock_add_hba.called) self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') @mock.patch.object(storagecenter_api.SCApi, '_add_hba', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, '_find_server_folder', return_value=SVR_FLDR) @mock.patch.object(storagecenter_api.SCApi, '_find_serveros', return_value='64702.38') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_server(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_server(self.IQN, 'Red Hat Linux 6.x') self.assertTrue(mock_find_serveros.called) self.assertTrue(mock_find_server_folder.called) self.assertTrue(mock_first_result.called) self.assertTrue(mock_add_hba.called) self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') @mock.patch.object(storagecenter_api.SCApi, '_add_hba', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, '_find_server_folder', return_value=SVR_FLDR) @mock.patch.object(storagecenter_api.SCApi, '_find_serveros', return_value=None) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_server_os_not_found(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_server(self.IQN, 'Red Hat Binux 6.x') self.assertTrue(mock_find_serveros.called) self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') @mock.patch.object(storagecenter_api.SCApi, '_add_hba', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, '_find_server_folder', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_serveros', return_value='64702.38') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_server_fldr_not_found(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_server(self.IQN, 'Red Hat Linux 6.x') self.assertTrue(mock_find_server_folder.called) self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer') @mock.patch.object(storagecenter_api.SCApi, '_add_hba', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, '_find_server_folder', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_serveros', return_value='64702.38') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_create_server_failure(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_server(self.IQN, 'Red Hat Linux 6.x') self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.SCApi, '_add_hba', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_server_folder', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_serveros', return_value='64702.38') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_server_not_found(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_close_connection, mock_open_connection, mock_init): # Test create server where _first_result is None res = self.scapi.create_server(self.IQN, 'Red Hat Linux 6.x') self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.SCApi, '_delete_server', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_add_hba', return_value=False) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, '_find_server_folder', return_value=SVR_FLDR) @mock.patch.object(storagecenter_api.SCApi, '_find_serveros', return_value='64702.38') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_server_addhba_fail(self, mock_post, mock_find_serveros, mock_find_server_folder, mock_first_result, mock_add_hba, mock_delete_server, mock_close_connection, mock_open_connection, mock_init): # Tests create server where add hba fails res = self.scapi.create_server(self.IQN, 'Red Hat Linux 6.x') self.assertTrue(mock_delete_server.called) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=SCSERVER) @mock.patch.object(storagecenter_api.SCApi, '_find_serverhba', return_value=ISCSI_HBA) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_server(self, mock_post, mock_find_serverhba, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_server(self.IQN) self.assertTrue(mock_find_serverhba.called) self.assertTrue(mock_first_result.called) self.assertIsNotNone(res, 'Expected ScServer') @mock.patch.object(storagecenter_api.SCApi, '_find_serverhba', return_value=None) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_server_no_hba(self, mock_post, mock_find_serverhba, mock_close_connection, mock_open_connection, mock_init): # Test case where a ScServer HBA does not exist with the specified IQN # or WWN res = self.scapi.find_server(self.IQN) self.assertTrue(mock_find_serverhba.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_find_serverhba', return_value=ISCSI_HBA) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_find_server_failure(self, mock_post, mock_find_serverhba, mock_close_connection, mock_open_connection, mock_init): # Test case where a ScServer does not exist with the specified # ScServerHba res = self.scapi.find_server(self.IQN) self.assertTrue(mock_find_serverhba.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=ISCSI_HBA) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_serverhba(self, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_server(self.IQN) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertIsNotNone(res, 'Expected ScServerHba') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_find_serverhba_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case where a ScServer does not exist with the specified # ScServerHba res = self.scapi.find_server(self.IQN) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=ISCSI_FLT_DOMAINS) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_domains(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_domains(u'64702.5764839588723736074.69') self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual( self.ISCSI_FLT_DOMAINS, res, 'Unexpected ScIscsiFaultDomain') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_204) def test_find_domains_error(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case where get of ScControllerPort FaultDomainList fails res = self.scapi._find_domains(u'64702.5764839588723736074.69') self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=FC_HBAS) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_initiators(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_initiators(self.SCSERVER) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertIsNotNone(res, 'Expected WWN list') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_initiators_error(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case where get of ScServer HbaList fails res = self.scapi._find_initiators(self.SCSERVER) self.assertListEqual([], res, 'Expected empty list') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_get_volume_count(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.get_volume_count(self.SCSERVER) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual(len(self.MAPPINGS), res, 'Mapping count mismatch') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_get_volume_count_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case of where get of ScServer MappingList fails res = self.scapi.get_volume_count(self.SCSERVER) self.assertTrue(mock_get.called) self.assertEqual(-1, res, 'Mapping count not -1') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[]) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_get_volume_count_no_volumes(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.get_volume_count(self.SCSERVER) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual(len([]), res, 'Mapping count mismatch') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_mappings(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_mappings(self.VOLUME) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.MAPPINGS, res, 'Mapping mismatch') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_mappings_inactive_vol(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test getting volume mappings on inactive volume res = self.scapi._find_mappings(self.INACTIVE_VOLUME) self.assertFalse(mock_get.called) self.assertEqual([], res, 'No mappings expected') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_mappings_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case of where get of ScVolume MappingList fails res = self.scapi._find_mappings(self.VOLUME) self.assertTrue(mock_get.called) self.assertEqual([], res, 'Mapping count not empty') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[]) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_mappings_no_mappings(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume has no mappings res = self.scapi._find_mappings(self.VOLUME) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual([], res, 'Mapping count mismatch') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=MAP_PROFILES) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_mapping_profiles(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume has no mappings res = self.scapi._find_mapping_profiles(self.VOLUME) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.MAP_PROFILES, res) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_mapping_profiles_error(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume has no mappings res = self.scapi._find_mapping_profiles(self.VOLUME) self.assertTrue(mock_get.called) self.assertEqual([], res) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=CTRLR_PORT) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_controller_port(self, mock_get, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_controller_port(u'64702.5764839588723736070.51') self.assertTrue(mock_get.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.CTRLR_PORT, res, 'ScControllerPort mismatch') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_204) def test_find_controller_port_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case where get of ScVolume MappingList fails res = self.scapi._find_controller_port(self.VOLUME) self.assertTrue(mock_get.called) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=FC_CTRLR_PORT) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=FC_MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_find_initiators', return_value=WWNS) def test_find_wwns(self, mock_find_initiators, mock_find_mappings, mock_find_controller_port, mock_close_connection, mock_open_connection, mock_init): lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_controller_port.called) # The _find_controller_port is Mocked, so all mapping pairs # will have the same WWN for the ScControllerPort itmapCompare = {u'21000024ff30441c': [u'5000d31000fcbe36'], u'21000024ff30441d': [u'5000d31000fcbe36', u'5000d31000fcbe36']} self.assertEqual(1, lun, 'Incorrect LUN') self.assertIsNotNone(wwns, 'WWNs is None') self.assertEqual(itmapCompare, itmap, 'WWN mapping incorrect') @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=[]) @mock.patch.object(storagecenter_api.SCApi, '_find_initiators', return_value=FC_HBAS) def test_find_wwns_no_mappings(self, mock_find_initiators, mock_find_mappings, mock_close_connection, mock_open_connection, mock_init): # Test case where there are no ScMapping(s) lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertIsNone(lun, 'Incorrect LUN') self.assertEqual([], wwns, 'WWNs is not empty') self.assertEqual({}, itmap, 'WWN mapping not empty') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=FC_MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_find_initiators', return_value=WWNS) def test_find_wwns_no_ctlr_port(self, mock_find_initiators, mock_find_mappings, mock_find_controller_port, mock_close_connection, mock_open_connection, mock_init): # Test case where ScControllerPort is none lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_controller_port.called) self.assertIsNone(lun, 'Incorrect LUN') self.assertEqual([], wwns, 'WWNs is not empty') self.assertEqual({}, itmap, 'WWN mapping not empty') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=FC_CTRLR_PORT_WWN_ERROR) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=FC_MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_find_initiators', return_value=WWNS) def test_find_wwns_wwn_resilient(self, mock_find_initiators, mock_find_mappings, mock_find_controller_port, mock_close_connection, mock_open_connection, mock_init): # Test case where ScControllerPort object has wWN instead of wwn (as # seen in some cases) for a property but we are still able to find it. lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_controller_port.called) self.assertEqual(1, lun, 'Incorrect LUN') expected_wwn = ['5000d31000fcbe36', '5000d31000fcbe36', '5000d31000fcbe36'] self.assertEqual(expected_wwn, wwns, 'WWNs incorrect') expected_itmap = {'21000024ff30441c': ['5000d31000fcbe36'], '21000024ff30441d': ['5000d31000fcbe36', '5000d31000fcbe36']} self.assertEqual(expected_itmap, itmap, 'WWN mapping incorrect') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=FC_CTRLR_PORT) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=FC_MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_find_initiators', return_value=WWNS_NO_MATCH) # Test case where HBA name is not found in list of initiators def test_find_wwns_hbaname_not_found(self, mock_find_initiators, mock_find_mappings, mock_find_controller_port, mock_close_connection, mock_open_connection, mock_init): lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_controller_port.called) self.assertIsNone(lun, 'Incorrect LUN') self.assertEqual([], wwns, 'WWNs is not empty') self.assertEqual({}, itmap, 'WWN mapping not empty') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=FC_CTRLR_PORT) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=FC_MAPPINGS_LUN_MISMATCH) @mock.patch.object(storagecenter_api.SCApi, '_find_initiators', return_value=WWNS) # Test case where FC mappings contain a LUN mismatch def test_find_wwns_lun_mismatch(self, mock_find_initiators, mock_find_mappings, mock_find_controller_port, mock_close_connection, mock_open_connection, mock_init): lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_initiators.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_controller_port.called) # The _find_controller_port is Mocked, so all mapping pairs # will have the same WWN for the ScControllerPort itmapCompare = {u'21000024ff30441c': [u'5000d31000fcbe36'], u'21000024ff30441d': [u'5000d31000fcbe36', u'5000d31000fcbe36']} self.assertEqual(1, lun, 'Incorrect LUN') self.assertIsNotNone(wwns, 'WWNs is None') self.assertEqual(itmapCompare, itmap, 'WWN mapping incorrect') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=VOLUME_CONFIG) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_active_controller(self, mock_get, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_active_controller(self.VOLUME) self.assertTrue(mock_get.called) self.assertTrue(mock_first_result.called) self.assertEqual('64702.64703', res, 'Unexpected Active Controller') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_active_controller_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case of where get of ScVolume MappingList fails res = self.scapi._find_active_controller(self.VOLUME) self.assertTrue(mock_get.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.5764839588723736131.91') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(storagecenter_api.SCApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_mappings(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): scserver = {'instanceId': '64702.30'} res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.5764839588723736131.91') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(storagecenter_api.SCApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_multiple_servers_mapped( self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): mappings = [{'instanceId': '64702.970.64702', 'server': {'instanceId': '64702.47'}, 'volume': {'instanceId': '64702.92'}}] mappings.append(self.MAPPINGS[0].copy()) scserver = {'instanceId': '64702.30'} res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=[]) def test_find_iscsi_properties_no_mapping(self, mock_find_mappings, mock_close_connection, mock_open_connection, mock_init): scserver = {'instanceId': '64702.30'} # Test case where there are no ScMapping(s) self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_iscsi_properties, self.VOLUME, scserver) self.assertTrue(mock_find_mappings.called) @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(storagecenter_api.SCApi, '_find_domains', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_no_domain(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): scserver = {'instanceId': '64702.30'} # Test case where there are no ScFaultDomain(s) self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_iscsi_properties, self.VOLUME, scserver) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_no_ctrl_port(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): scserver = {'instanceId': '64702.30'} # Test case where there are no ScFaultDomain(s) self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_iscsi_properties, self.VOLUME, scserver) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(storagecenter_api.SCApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS_READ_ONLY) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_ro(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): scserver = {'instanceId': '64702.30'} # Test case where Read Only mappings are found res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port') @mock.patch.object(storagecenter_api.SCApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS_MULTI_PORTAL) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_multi_portals(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where there are multiple portals mock_find_ctrl_port.side_effect = [ {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'}, {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe44'}] scserver = {'instanceId': '64702.30'} res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_is_virtualport_mode.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe44', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe44', u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe44'], 'target_lun': 1, 'target_luns': [1, 1, 1, 1], 'target_portal': u'192.168.0.25:3260', 'target_portals': [u'192.168.0.25:3260', u'192.168.0.21:3260', u'192.168.0.25:3260', u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port') @mock.patch.object(storagecenter_api.SCApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS_MULTI_PORTAL) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_multi_portals_duplicates( self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where there are multiple portals and mock_find_ctrl_port.return_value = { 'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'} scserver = {'instanceId': '64702.30'} res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_is_virtualport_mode.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'192.168.0.25:3260', 'target_portals': [u'192.168.0.25:3260', u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port') @mock.patch.object(storagecenter_api.SCApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS_MULTI_PORTAL) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_excluded(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where ips are blacklisted using excluded_domain_ips mock_find_ctrl_port.side_effect = [ {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'}, {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe44'}] scserver = {'instanceId': '64702.30'} self.scapi.excluded_domain_ips = ['192.168.0.21'] self.scapi.included_domain_ips = [] res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_is_virtualport_mode.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe44', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe44', u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'192.168.0.25:3260', 'target_portals': [u'192.168.0.25:3260', u'192.168.0.25:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port') @mock.patch.object(storagecenter_api.SCApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS_MULTI_PORTAL) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_included(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where of included_domain_ips aka whitelisting mock_find_ctrl_port.side_effect = [ {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'}, {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe44'}] scserver = {'instanceId': '64702.30'} self.scapi.excluded_domain_ips = [] self.scapi.included_domain_ips = ['192.168.0.25'] res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_is_virtualport_mode.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe44', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe44', u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'192.168.0.25:3260', 'target_portals': [u'192.168.0.25:3260', u'192.168.0.25:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port') @mock.patch.object(storagecenter_api.SCApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS_IPV6) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS_MULTI_PORTAL) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_included1(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case included_domain_ips aka whitelisting # For ipv6 addresses mock_find_ctrl_port.side_effect = [ {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'}, {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe44'}] scserver = {'instanceId': '64702.30'} self.scapi.excluded_domain_ips = [] self.scapi.included_domain_ips = ['0:0:0:0:0:ffff:c0a8:19'] res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_is_virtualport_mode.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe44', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe44', u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'0:0:0:0:0:ffff:c0a8:19:3260', 'target_portals': [u'0:0:0:0:0:ffff:c0a8:19:3260', u'0:0:0:0:0:ffff:c0a8:19:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port') @mock.patch.object(storagecenter_api.SCApi, '_find_domains', return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS_MULTI_PORTAL) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=True) def test_find_iscsi_properties_include2(self, mock_is_virtualport_mode, mock_find_mappings, mock_find_domains, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): # Test case where included_domain_ips(whitelisting) takes precendence # over excluded_domain_ips ( blacklisting) mock_find_ctrl_port.side_effect = [ {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'}, {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe44'}] scserver = {'instanceId': '64702.30'} self.scapi.excluded_domain_ips = ['192.168.0.21'] self.scapi.included_domain_ips = ['192.168.0.25', '192.168.0.21'] res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_domains.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_is_virtualport_mode.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe44', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe44', u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe43', u'iqn.2002-03.com.compellent:5000d31000fcbe44'], 'target_lun': 1, 'target_luns': [1, 1, 1, 1], 'target_portal': u'192.168.0.25:3260', 'target_portals': [u'192.168.0.25:3260', u'192.168.0.21:3260', u'192.168.0.25:3260', u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.5764839588723736131.91') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=False) @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port_iscsi_config', return_value=ISCSI_CONFIG) def test_find_iscsi_properties_mappings_legacy( self, mock_find_controller_port_iscsi_config, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): scserver = {'instanceId': '64702.30'} res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_controller_port_iscsi_config.called) self.assertTrue(mock_find_active_controller.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.5764839588723736131.91') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=False) @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port_iscsi_config', return_value=None) def test_find_iscsi_properties_mappings_legacy_no_iscsi_config( self, mock_find_controller_port_iscsi_config, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): scserver = {'instanceId': '64702.30'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_iscsi_properties, self.VOLUME, scserver) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_controller_port_iscsi_config.called) self.assertTrue(mock_find_active_controller.called) @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port', return_value=ISCSI_CTRLR_PORT) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS_READ_ONLY) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=False) @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port_iscsi_config', return_value=ISCSI_CONFIG) def test_find_iscsi_properties_ro_legacy(self, mock_find_iscsi_config, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): scserver = {'instanceId': '64702.30'} # Test case where Read Only mappings are found res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_find_iscsi_config.called) expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe43', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_find_active_controller', return_value='64702.64702') @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port') @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=MAPPINGS_MULTI_PORTAL) @mock.patch.object(storagecenter_api.SCApi, '_is_virtualport_mode', return_value=False) @mock.patch.object(storagecenter_api.SCApi, '_find_controller_port_iscsi_config', return_value=ISCSI_CONFIG) def test_find_iscsi_properties_multi_portals_legacy( self, mock_find_controller_port_iscsi_config, mock_is_virtualport_mode, mock_find_mappings, mock_find_ctrl_port, mock_find_active_controller, mock_close_connection, mock_open_connection, mock_init): mock_find_ctrl_port.side_effect = [ {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe43'}, {'iscsiName': 'iqn.2002-03.com.compellent:5000d31000fcbe44'}] scserver = {'instanceId': '64702.30'} # Test case where there are multiple portals res = self.scapi.find_iscsi_properties(self.VOLUME, scserver) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_find_ctrl_port.called) self.assertTrue(mock_find_active_controller.called) self.assertTrue(mock_is_virtualport_mode.called) self.assertTrue(mock_find_controller_port_iscsi_config.called) # We're feeding the same info back multiple times the information # will be scrubbed to a single item. expected = {'target_discovered': False, 'target_iqn': u'iqn.2002-03.com.compellent:5000d31000fcbe44', 'target_iqns': [u'iqn.2002-03.com.compellent:5000d31000fcbe44', u'iqn.2002-03.com.compellent:5000d31000fcbe43'], 'target_lun': 1, 'target_luns': [1, 1], 'target_portal': u'192.168.0.21:3260', 'target_portals': [u'192.168.0.21:3260', u'192.168.0.21:3260']} self.assertEqual(expected, res, 'Wrong Target Info') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=MAP_PROFILE) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles', return_value=[]) def test_map_volume(self, mock_find_mapping_profiles, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.map_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=MAP_PROFILE) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles', return_value=MAP_PROFILES) def test_map_volume_existing_mapping(self, mock_find_mappings, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.map_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mappings.called) self.assertFalse(mock_post.called) self.assertFalse(mock_first_result.called) self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=MAP_PROFILE) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles', return_value=[]) def test_map_volume_existing_mapping_not_us(self, mock_find_mappings, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): server = {'instanceId': 64702.48, 'name': 'Server X'} res = self.scapi.map_volume(self.VOLUME, server) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile') @mock.patch.object(storagecenter_api.SCApi, '_get_id') @mock.patch.object(storagecenter_api.SCApi, '_first_result') @mock.patch.object(storagecenter_api.HttpClient, 'post') def test_map_volume_no_vol_id(self, mock_post, mock_first_result, mock_get_id, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume instanceId is None mock_get_id.side_effect = [None, '64702.47'] res = self.scapi.map_volume(self.VOLUME, self.SCSERVER) self.assertFalse(mock_post.called) self.assertFalse(mock_first_result.called) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.SCApi, '_get_id') @mock.patch.object(storagecenter_api.SCApi, '_first_result') @mock.patch.object(storagecenter_api.HttpClient, 'post') def test_map_volume_no_server_id(self, mock_post, mock_first_result, mock_get_id, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume instanceId is None mock_get_id.side_effect = ['64702.3494', None] res = self.scapi.map_volume(self.VOLUME, self.SCSERVER) self.assertFalse(mock_post.called) self.assertFalse(mock_first_result.called) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles', return_value=[]) def test_map_volume_failure(self, mock_find_mapping_profiles, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case where mapping volume to server fails res = self.scapi.map_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(mock_post.called) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles', return_value=MAP_PROFILES) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value={'result': True}) def test_unmap_volume(self, mock_get_json, mock_find_mapping_profiles, mock_delete, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(mock_delete.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles', return_value=MAP_PROFILES) @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_204) def test_unmap_volume_failure(self, mock_delete, mock_find_mapping_profiles, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(mock_delete.called) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles', return_value=[]) def test_unmap_volume_no_map_profile(self, mock_find_mapping_profiles, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_204) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles', return_value=MAP_PROFILES) def test_unmap_volume_del_fail(self, mock_find_mapping_profiles, mock_delete, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertTrue(mock_find_mapping_profiles.called) self.assertTrue(mock_delete.called) self.assertFalse(res, False) @mock.patch.object(storagecenter_api.SCApi, '_get_id') @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles', return_value=MAP_PROFILES) def test_unmap_volume_no_vol_id(self, mock_find_mapping_profiles, mock_delete, mock_get_id, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume instanceId = None mock_get_id.side_effect = [None, '64702.47'] res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertFalse(mock_find_mapping_profiles.called) self.assertFalse(mock_delete.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, '_get_id') @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles', return_value=MAP_PROFILES) def test_unmap_volume_no_server_id(self, mock_find_mapping_profiles, mock_delete, mock_get_id, mock_close_connection, mock_open_connection, mock_init): # Test case where ScVolume instanceId = None mock_get_id.side_effect = ['64702.3494', None] res = self.scapi.unmap_volume(self.VOLUME, self.SCSERVER) self.assertFalse(mock_find_mapping_profiles.called) self.assertFalse(mock_delete.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.HttpClient, 'delete') @mock.patch.object(storagecenter_api.HttpClient, 'get') @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles') @mock.patch.object(storagecenter_api.SCApi, '_get_json') def test_unmap_all(self, mock_get_json, mock_find_mapping_profiles, mock_get, mock_delete, mock_close_connection, mock_open_connection, mock_init): mock_delete.return_value = self.RESPONSE_200 mock_get.return_value = self.RESPONSE_200 mock_find_mapping_profiles.return_value = [ {'instanceId': '12345.0.1', 'server': {'instanceId': '12345.100', 'instanceName': 'Srv1'}}, {'instanceId': '12345.0.2', 'server': {'instanceId': '12345.101', 'instanceName': 'Srv2'}}, {'instanceId': '12345.0.3', 'server': {'instanceId': '12345.102', 'instanceName': 'Srv3'}}, ] # server, result pairs mock_get_json.side_effect = [ {'instanceId': '12345.100', 'instanceName': 'Srv1', 'type': 'Physical'}, {'result': True}, {'instanceId': '12345.101', 'instanceName': 'Srv2', 'type': 'Physical'}, {'result': True}, {'instanceId': '12345.102', 'instanceName': 'Srv3', 'type': 'Physical'}, {'result': True} ] vol = {'instanceId': '12345.0', 'name': 'vol1'} res = self.scapi.unmap_all(vol) # Success and 3 delete calls self.assertTrue(res) self.assertEqual(3, mock_delete.call_count) @mock.patch.object(storagecenter_api.HttpClient, 'delete') @mock.patch.object(storagecenter_api.HttpClient, 'get') @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles') @mock.patch.object(storagecenter_api.SCApi, '_get_json') def test_unmap_all_with_remote(self, mock_get_json, mock_find_mapping_profiles, mock_get, mock_delete, mock_close_connection, mock_open_connection, mock_init): mock_delete.return_value = self.RESPONSE_200 mock_get.return_value = self.RESPONSE_200 mock_find_mapping_profiles.return_value = [ {'instanceId': '12345.0.1', 'server': {'instanceId': '12345.100', 'instanceName': 'Srv1'}}, {'instanceId': '12345.0.2', 'server': {'instanceId': '12345.101', 'instanceName': 'Srv2'}}, {'instanceId': '12345.0.3', 'server': {'instanceId': '12345.102', 'instanceName': 'Srv3'}}, ] # server, result pairs mock_get_json.side_effect = [ {'instanceId': '12345.100', 'instanceName': 'Srv1', 'type': 'Physical'}, {'result': True}, {'instanceId': '12345.101', 'instanceName': 'Srv2', 'type': 'RemoteStorageCenter'}, {'instanceId': '12345.102', 'instanceName': 'Srv3', 'type': 'Physical'}, {'result': True} ] vol = {'instanceId': '12345.0', 'name': 'vol1'} res = self.scapi.unmap_all(vol) # Should succeed but call delete only twice self.assertTrue(res) self.assertEqual(2, mock_delete.call_count) @mock.patch.object(storagecenter_api.HttpClient, 'delete') @mock.patch.object(storagecenter_api.HttpClient, 'get') @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles') @mock.patch.object(storagecenter_api.SCApi, '_get_json') def test_unmap_all_fail(self, mock_get_json, mock_find_mapping_profiles, mock_get, mock_delete, mock_close_connection, mock_open_connection, mock_init): mock_delete.return_value = self.RESPONSE_400 mock_get.return_value = self.RESPONSE_200 mock_find_mapping_profiles.return_value = [ {'instanceId': '12345.0.1', 'server': {'instanceId': '12345.100', 'instanceName': 'Srv1'}}, {'instanceId': '12345.0.2', 'server': {'instanceId': '12345.101', 'instanceName': 'Srv2'}}, {'instanceId': '12345.0.3', 'server': {'instanceId': '12345.102', 'instanceName': 'Srv3'}}, ] # server, result pairs mock_get_json.side_effect = [ {'instanceId': '12345.100', 'instanceName': 'Srv1', 'type': 'Physical'} ] vol = {'instanceId': '12345.0', 'name': 'vol1'} res = self.scapi.unmap_all(vol) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_find_mapping_profiles') def test_unmap_all_no_profiles(self, mock_find_mapping_profiles, mock_close_connection, mock_open_connection, mock_init): mock_find_mapping_profiles.return_value = [] vol = {'instanceId': '12345.0', 'name': 'vol1'} res = self.scapi.unmap_all(vol) # Should exit with success. self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[{'a': 1}, {'a': 2}]) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_controller_port_iscsi_config(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Not much to test here. Just make sure we call our stuff and # that we return the first item returned to us. res = self.scapi._find_controller_port_iscsi_config('guid') self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual({'a': 1}, res) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_controller_port_iscsi_config_err(self, mock_get, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_controller_port_iscsi_config('guid') self.assertTrue(mock_get.called) self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=STRG_USAGE) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_get_storage_usage(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.get_storage_usage() self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.STRG_USAGE, res, 'Unexpected ScStorageUsage') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_204) def test_get_storage_usage_no_ssn(self, mock_get, mock_close_connection, mock_open_connection, mock_init): # Test case where SSN is none self.scapi.ssn = None res = self.scapi.get_storage_usage() self.scapi.ssn = 12345 self.assertFalse(mock_get.called) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_204) # Test case where get of Storage Usage fails def test_get_storage_usage_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.get_storage_usage() self.assertTrue(mock_get.called) self.assertIsNone(res, 'None expected') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=RPLAY) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_replay(self, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_replay(self.VOLUME, 'Test Replay', 60) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=RPLAY) @mock.patch.object(storagecenter_api.SCApi, '_init_volume') @mock.patch.object(storagecenter_api.SCApi, 'get_volume') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_replay_inact_vol(self, mock_post, mock_get_volume, mock_init_volume, mock_first_result, mock_close_connection, mock_open_connection, mock_init): # Test case where the specified volume is inactive mock_get_volume.return_value = self.VOLUME res = self.scapi.create_replay(self.INACTIVE_VOLUME, 'Test Replay', 60) self.assertTrue(mock_post.called) mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME) self.assertTrue(mock_first_result.called) self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay') @mock.patch.object(storagecenter_api.SCApi, '_init_volume') @mock.patch.object(storagecenter_api.SCApi, 'get_volume') def test_create_replay_inact_vol_init_fail( self, mock_get_volume, mock_init_volume, mock_close_connection, mock_open_connection, mock_init): # Test case where the specified volume is inactive mock_get_volume.return_value = self.INACTIVE_VOLUME self.assertRaises(exception.VolumeBackendAPIException, self.scapi.create_replay, self.INACTIVE_VOLUME, 'Test Replay', 60) mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=RPLAY) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_replay_no_expire(self, mock_post, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_replay(self.VOLUME, 'Test Replay', 0) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_replay_no_volume(self, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case where no ScVolume is specified res = self.scapi.create_replay(None, 'Test Replay', 60) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_create_replay_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): # Test case where create ScReplay fails res = self.scapi.create_replay(self.VOLUME, 'Test Replay', 60) self.assertTrue(mock_post.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=RPLAYS) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_replay(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_replay(self.VOLUME, u'Cinder Test Replay012345678910') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.TST_RPLAY, res, 'Unexpected ScReplay') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[]) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_replay_no_replays(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case where no replays are found res = self.scapi.find_replay(self.VOLUME, u'Cinder Test Replay012345678910') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=None) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_204) def test_find_replay_failure(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Test case where None is returned for replays res = self.scapi.find_replay(self.VOLUME, u'Cinder Test Replay012345678910') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, 'find_replay', return_value=RPLAYS) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_delete_replay(self, mock_post, mock_find_replay, mock_close_connection, mock_open_connection, mock_init): replayId = u'Cinder Test Replay012345678910' res = self.scapi.delete_replay(self.VOLUME, replayId) self.assertTrue(mock_post.called) mock_find_replay.assert_called_once_with(self.VOLUME, replayId) self.assertTrue(res, 'Expected True') @mock.patch.object(storagecenter_api.SCApi, 'find_replay', return_value=None) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_delete_replay_no_replay(self, mock_post, mock_find_replay, mock_close_connection, mock_open_connection, mock_init): # Test case where specified ScReplay does not exist replayId = u'Cinder Test Replay012345678910' res = self.scapi.delete_replay(self.VOLUME, replayId) self.assertFalse(mock_post.called) mock_find_replay.assert_called_once_with(self.VOLUME, replayId) self.assertTrue(res, 'Expected True') @mock.patch.object(storagecenter_api.SCApi, 'find_replay', return_value=TST_RPLAY) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_delete_replay_failure(self, mock_post, mock_find_replay, mock_close_connection, mock_open_connection, mock_init): # Test case where delete ScReplay results in an error replayId = u'Cinder Test Replay012345678910' res = self.scapi.delete_replay(self.VOLUME, replayId) self.assertTrue(mock_post.called) mock_find_replay.assert_called_once_with(self.VOLUME, replayId) self.assertFalse(res, 'Expected False') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_view_volume(self, mock_post, mock_find_volume_folder, mock_first_result, mock_close_connection, mock_open_connection, mock_init): vol_name = u'Test_create_vol' res = self.scapi.create_view_volume( vol_name, self.TST_RPLAY, None, None, None, None) self.assertTrue(mock_post.called) mock_find_volume_folder.assert_called_once_with(True) self.assertTrue(mock_first_result.called) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder', return_value=None) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_view_volume_create_fldr(self, mock_post, mock_find_volume_folder, mock_first_result, mock_close_connection, mock_open_connection, mock_init): # Test case where volume folder does not exist and must be created vol_name = u'Test_create_vol' res = self.scapi.create_view_volume( vol_name, self.TST_RPLAY, None, None, None, None) self.assertTrue(mock_post.called) mock_find_volume_folder.assert_called_once_with(True) self.assertTrue(mock_first_result.called) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=VOLUME) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder', return_value=None) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_create_view_volume_no_vol_fldr(self, mock_post, mock_find_volume_folder, mock_first_result, mock_close_connection, mock_open_connection, mock_init): # Test case where volume folder does not exist and cannot be created vol_name = u'Test_create_vol' res = self.scapi.create_view_volume( vol_name, self.TST_RPLAY, None, None, None, None) self.assertTrue(mock_post.called) mock_find_volume_folder.assert_called_once_with(True) self.assertTrue(mock_first_result.called) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder', return_value=FLDR) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_create_view_volume_failure(self, mock_post, mock_find_volume_folder, mock_close_connection, mock_open_connection, mock_init): # Test case where view volume create fails vol_name = u'Test_create_vol' res = self.scapi.create_view_volume( vol_name, self.TST_RPLAY, None, None, None, None) self.assertTrue(mock_post.called) mock_find_volume_folder.assert_called_once_with(True) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_first_result') @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') @mock.patch.object(storagecenter_api.SCApi, '_find_replay_profiles') @mock.patch.object(storagecenter_api.SCApi, 'update_datareduction_profile') def test_create_view_volume_with_profiles( self, mock_update_datareduction_profile, mock_find_replay_profiles, mock_find_qos_profile, mock_post, mock_find_volume_folder, mock_first_result, mock_close_connection, mock_open_connection, mock_init): mock_find_replay_profiles.return_value = (['12345.4'], []) mock_first_result.return_value = {'name': 'name'} mock_post.return_value = self.RESPONSE_200 mock_find_volume_folder.return_value = {'instanceId': '12345.200'} mock_find_qos_profile.side_effect = [{'instanceId': '12345.2'}, {'instanceId': '12345.3'}] screplay = {'instanceId': '12345.100.1'} res = self.scapi.create_view_volume( 'name', screplay, 'replay_profile_string', 'volume_qos', 'group_qos', 'datareductionprofile') expected_payload = {'Name': 'name', 'Notes': 'Created by Dell EMC Cinder Driver', 'VolumeFolder': '12345.200', 'ReplayProfileList': ['12345.4'], 'VolumeQosProfile': '12345.2', 'GroupQosProfile': '12345.3'} mock_find_volume_folder.assert_called_once_with(True) mock_post.assert_called_once_with( 'StorageCenter/ScReplay/12345.100.1/CreateView', expected_payload, True) mock_update_datareduction_profile.assert_called_once_with( {'name': 'name'}, 'datareductionprofile') self.assertEqual({'name': 'name'}, res) @mock.patch.object(storagecenter_api.SCApi, '_first_result') @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') @mock.patch.object(storagecenter_api.SCApi, '_find_replay_profiles') @mock.patch.object(storagecenter_api.SCApi, 'update_datareduction_profile') def test_create_view_volume_with_profiles_no_dr( self, mock_update_datareduction_profile, mock_find_replay_profiles, mock_find_qos_profile, mock_post, mock_find_volume_folder, mock_first_result, mock_close_connection, mock_open_connection, mock_init): mock_find_replay_profiles.return_value = (['12345.4'], []) mock_first_result.return_value = {'name': 'name'} mock_post.return_value = self.RESPONSE_200 mock_find_volume_folder.return_value = {'instanceId': '12345.200'} mock_find_qos_profile.side_effect = [{'instanceId': '12345.2'}, {'instanceId': '12345.3'}] screplay = {'instanceId': '12345.100.1'} res = self.scapi.create_view_volume('name', screplay, 'replay_profile_string', 'volume_qos', 'group_qos', None) expected_payload = {'Name': 'name', 'Notes': 'Created by Dell EMC Cinder Driver', 'VolumeFolder': '12345.200', 'ReplayProfileList': ['12345.4'], 'VolumeQosProfile': '12345.2', 'GroupQosProfile': '12345.3'} mock_find_volume_folder.assert_called_once_with(True) mock_post.assert_called_once_with( 'StorageCenter/ScReplay/12345.100.1/CreateView', expected_payload, True) mock_update_datareduction_profile.assert_not_called() self.assertEqual({'name': 'name'}, res) @mock.patch.object(storagecenter_api.SCApi, '_first_result') @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') def test_create_view_volume_with_profiles_no_replayprofiles( self, mock_find_qos_profile, mock_post, mock_find_volume_folder, mock_first_result, mock_close_connection, mock_open_connection, mock_init): mock_first_result.return_value = {'name': 'name'} mock_post.return_value = self.RESPONSE_200 mock_find_volume_folder.return_value = {'instanceId': '12345.200'} mock_find_qos_profile.side_effect = [{'instanceId': '12345.2'}, {'instanceId': '12345.3'}] screplay = {'instanceId': '12345.100.1'} res = self.scapi.create_view_volume('name', screplay, None, 'volume_qos', 'group_qos', None) expected_payload = {'Name': 'name', 'Notes': 'Created by Dell EMC Cinder Driver', 'VolumeFolder': '12345.200', 'VolumeQosProfile': '12345.2', 'GroupQosProfile': '12345.3'} mock_find_volume_folder.assert_called_once_with(True) mock_post.assert_called_once_with( 'StorageCenter/ScReplay/12345.100.1/CreateView', expected_payload, True) self.assertEqual({'name': 'name'}, res) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') @mock.patch.object(storagecenter_api.SCApi, '_find_replay_profiles') def test_create_view_volume_with_profiles_not_found( self, mock_find_replay_profiles, mock_find_qos_profile, mock_find_volume_folder, mock_close_connection, mock_open_connection, mock_init): mock_find_replay_profiles.return_value = (['12345.4'], []) mock_find_volume_folder.return_value = {'instanceId': '12345.200'} # Our qos profile isn't found. mock_find_qos_profile.return_value = None screplay = {'instanceId': '12345.100.1'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.create_view_volume, 'name', screplay, 'replay_profile_string', 'volume_qos', 'group_qos', 'datareductionprofile') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.HttpClient, 'get') @mock.patch.object(storagecenter_api.SCApi, '_get_json') def test__expire_all_replays(self, mock_get_json, mock_get, mock_post, mock_close_connection, mock_open_connection, mock_init): scvolume = {'instanceId': '12345.1'} mock_get.return_value = self.RESPONSE_200 mock_get_json.return_value = [{'instanceId': '12345.100', 'active': False}, {'instanceId': '12345.101', 'active': True}] self.scapi._expire_all_replays(scvolume) mock_get.assert_called_once_with( 'StorageCenter/ScVolume/12345.1/ReplayList') mock_post.assert_called_once_with( 'StorageCenter/ScReplay/12345.100/Expire', {}, True) @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.HttpClient, 'get') def test__expire_all_replays_error(self, mock_get, mock_post, mock_close_connection, mock_open_connection, mock_init): scvolume = {'instanceId': '12345.1'} mock_get.return_value = self.RESPONSE_400 self.scapi._expire_all_replays(scvolume) mock_get.assert_called_once_with( 'StorageCenter/ScVolume/12345.1/ReplayList') self.assertFalse(mock_post.called) @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.HttpClient, 'get') @mock.patch.object(storagecenter_api.SCApi, '_get_json') def test__expire_all_replays_no_replays(self, mock_get_json, mock_get, mock_post, mock_close_connection, mock_open_connection, mock_init): scvolume = {'instanceId': '12345.1'} mock_get.return_value = self.RESPONSE_200 mock_get_json.return_value = None self.scapi._expire_all_replays(scvolume) mock_get.assert_called_once_with( 'StorageCenter/ScVolume/12345.1/ReplayList') self.assertFalse(mock_post.called) @mock.patch.object(storagecenter_api.HttpClient, 'get') @mock.patch.object(storagecenter_api.SCApi, '_get_json') def test__wait_for_cmm( self, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): cmm = {'instanceId': '12345.300'} scvolume = {'name': fake.VOLUME2_ID, 'instanceId': '12345.1'} replayid = '12345.200' mock_get.return_value = self.RESPONSE_200 mock_get_json.return_value = {'instanceId': '12345.300', 'state': 'Finished'} ret = self.scapi._wait_for_cmm(cmm, scvolume, replayid) self.assertTrue(ret) mock_get_json.return_value['state'] = 'Erred' ret = self.scapi._wait_for_cmm(cmm, scvolume, replayid) self.assertFalse(ret) mock_get_json.return_value['state'] = 'Paused' ret = self.scapi._wait_for_cmm(cmm, scvolume, replayid) self.assertFalse(ret) @mock.patch.object(storagecenter_api.HttpClient, 'get') @mock.patch.object(storagecenter_api.SCApi, 'find_replay') def test__wait_for_cmm_404( self, mock_find_replay, mock_get, mock_close_connection, mock_open_connection, mock_init): cmm = {'instanceId': '12345.300'} scvolume = {'name': fake.VOLUME2_ID, 'instanceId': '12345.1'} replayid = '12345.200' mock_get.return_value = self.RESPONSE_404 mock_find_replay.return_value = {'instanceId': '12345.200'} ret = self.scapi._wait_for_cmm(cmm, scvolume, replayid) self.assertTrue(ret) @mock.patch.object(storagecenter_api.HttpClient, 'get') @mock.patch.object(storagecenter_api.SCApi, 'find_replay') @mock.patch.object(eventlet, 'sleep') def test__wait_for_cmm_timeout( self, mock_sleep, mock_find_replay, mock_get, mock_close_connection, mock_open_connection, mock_init): cmm = {'instanceId': '12345.300'} scvolume = {'name': fake.VOLUME2_ID, 'instanceId': '12345.1'} replayid = '12345.200' mock_get.return_value = self.RESPONSE_404 mock_find_replay.return_value = None ret = self.scapi._wait_for_cmm(cmm, scvolume, replayid) self.assertFalse(ret) self.assertEqual(21, mock_sleep.call_count) @mock.patch.object(storagecenter_api.SCApi, 'create_volume') @mock.patch.object(storagecenter_api.SCApi, 'create_replay') @mock.patch.object(uuid, 'uuid4') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.SCApi, '_wait_for_cmm') @mock.patch.object(storagecenter_api.SCApi, '_expire_all_replays') def test_create_cloned_volume( self, mock_expire_all_replays, mock_wait_for_cmm, mock_get_json, mock_post, mock_uuid4, mock_create_replay, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): # our state. vol_name = fake.VOLUME_ID scvolume = {'name': fake.VOLUME2_ID, 'instanceId': '12345.1', 'configuredSize': '1073741824 Bytes'} newvol = {'instanceId': '12345.2', 'configuredSize': '1073741824 Bytes'} storage_profile = 'profile1' replay_profile_list = ['profile2'] volume_qos = 'vqos' group_qos = 'gqos' dr_profile = 'dqos' cmm = {'state': 'Running'} # our call returns replayuuid = uuid.uuid4() mock_uuid4.return_value = replayuuid mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = cmm mock_create_replay.return_value = {'instanceId': '12345.100'} mock_create_volume.return_value = newvol mock_wait_for_cmm.return_value = True # our call res = self.scapi.create_cloned_volume( vol_name, scvolume, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) # assert expected mock_create_volume.assert_called_once_with( vol_name, 1, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) mock_create_replay.assert_called_once_with( scvolume, str(replayuuid), 60) expected_payload = {} expected_payload['CopyReplays'] = True expected_payload['DestinationVolume'] = '12345.2' expected_payload['SourceVolume'] = '12345.1' expected_payload['StorageCenter'] = 12345 expected_payload['Priority'] = 'High' mock_post.assert_called_once_with( 'StorageCenter/ScCopyMirrorMigrate/Copy', expected_payload, True) mock_wait_for_cmm.assert_called_once_with(cmm, newvol, str(replayuuid)) mock_expire_all_replays.assert_called_once_with(newvol) self.assertEqual(newvol, res) @mock.patch.object(storagecenter_api.SCApi, 'create_volume') def test_create_cloned_volume_create_vol_fail( self, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): # our state. vol_name = fake.VOLUME_ID scvolume = {'name': fake.VOLUME2_ID, 'instanceId': '12345.1', 'configuredSize': '1073741824 Bytes'} newvol = None storage_profile = 'profile1' replay_profile_list = ['profile2'] volume_qos = 'vqos' group_qos = 'gqos' dr_profile = 'dqos' # our call returns mock_create_volume.return_value = newvol # our call res = self.scapi.create_cloned_volume( vol_name, scvolume, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) # assert expected mock_create_volume.assert_called_once_with( vol_name, 1, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, 'create_volume') @mock.patch.object(storagecenter_api.SCApi, 'create_replay') @mock.patch.object(uuid, 'uuid4') @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') def test_create_cloned_volume_replay_fail( self, mock_delete_volume, mock_uuid4, mock_create_replay, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): # our state. vol_name = fake.VOLUME_ID scvolume = {'name': fake.VOLUME2_ID, 'instanceId': '12345.1', 'configuredSize': '1073741824 Bytes'} newvol = {'instanceId': '12345.2', 'configuredSize': '1073741824 Bytes'} storage_profile = 'profile1' replay_profile_list = ['profile2'] volume_qos = 'vqos' group_qos = 'gqos' dr_profile = 'dqos' # our call returns replayuuid = uuid.uuid4() mock_uuid4.return_value = replayuuid mock_create_replay.return_value = None mock_create_volume.return_value = newvol # our call self.assertRaises(exception.VolumeBackendAPIException, self.scapi.create_cloned_volume, vol_name, scvolume, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) # assert expected mock_create_volume.assert_called_once_with( vol_name, 1, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) mock_create_replay.assert_called_once_with( scvolume, str(replayuuid), 60) mock_delete_volume.assert_called_once_with(vol_name, '12345.2') @mock.patch.object(storagecenter_api.SCApi, 'create_volume') @mock.patch.object(storagecenter_api.SCApi, 'create_replay') @mock.patch.object(uuid, 'uuid4') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') def test_create_cloned_volume_copy_fail( self, mock_delete_volume, mock_post, mock_uuid4, mock_create_replay, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): # our state. vol_name = fake.VOLUME_ID scvolume = {'name': fake.VOLUME2_ID, 'instanceId': '12345.1', 'configuredSize': '1073741824 Bytes'} newvol = {'instanceId': '12345.2', 'configuredSize': '1073741824 Bytes'} storage_profile = 'profile1' replay_profile_list = ['profile2'] volume_qos = 'vqos' group_qos = 'gqos' dr_profile = 'dqos' # our call returns replayuuid = uuid.uuid4() mock_uuid4.return_value = replayuuid mock_post.return_value = self.RESPONSE_400 mock_create_replay.return_value = {'instanceId': '12345.100'} mock_create_volume.return_value = newvol # our call self.assertRaises(exception.VolumeBackendAPIException, self.scapi.create_cloned_volume, vol_name, scvolume, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) # assert expected mock_create_volume.assert_called_once_with( vol_name, 1, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) mock_create_replay.assert_called_once_with( scvolume, str(replayuuid), 60) expected_payload = {} expected_payload['CopyReplays'] = True expected_payload['DestinationVolume'] = '12345.2' expected_payload['SourceVolume'] = '12345.1' expected_payload['StorageCenter'] = 12345 expected_payload['Priority'] = 'High' mock_post.assert_called_once_with( 'StorageCenter/ScCopyMirrorMigrate/Copy', expected_payload, True) mock_delete_volume.assert_called_once_with(vol_name, '12345.2') @mock.patch.object(storagecenter_api.SCApi, 'create_volume') @mock.patch.object(storagecenter_api.SCApi, 'create_replay') @mock.patch.object(uuid, 'uuid4') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') def test_create_cloned_volume_cmm_erred( self, mock_delete_volume, mock_get_json, mock_post, mock_uuid4, mock_create_replay, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): # our state. vol_name = fake.VOLUME_ID scvolume = {'name': fake.VOLUME2_ID, 'instanceId': '12345.1', 'configuredSize': '1073741824 Bytes'} newvol = {'instanceId': '12345.2', 'configuredSize': '1073741824 Bytes'} storage_profile = 'profile1' replay_profile_list = ['profile2'] volume_qos = 'vqos' group_qos = 'gqos' dr_profile = 'dqos' cmm = {'state': 'Erred'} # our call returns replayuuid = uuid.uuid4() mock_uuid4.return_value = replayuuid mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = cmm mock_create_replay.return_value = {'instanceId': '12345.100'} mock_create_volume.return_value = newvol # our call self.assertRaises(exception.VolumeBackendAPIException, self.scapi.create_cloned_volume, vol_name, scvolume, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) # assert expected mock_create_volume.assert_called_once_with( vol_name, 1, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) mock_create_replay.assert_called_once_with( scvolume, str(replayuuid), 60) expected_payload = {} expected_payload['CopyReplays'] = True expected_payload['DestinationVolume'] = '12345.2' expected_payload['SourceVolume'] = '12345.1' expected_payload['StorageCenter'] = 12345 expected_payload['Priority'] = 'High' mock_post.assert_called_once_with( 'StorageCenter/ScCopyMirrorMigrate/Copy', expected_payload, True) mock_delete_volume.assert_called_once_with(vol_name, '12345.2') @mock.patch.object(storagecenter_api.SCApi, 'create_volume') @mock.patch.object(storagecenter_api.SCApi, 'create_replay') @mock.patch.object(uuid, 'uuid4') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') def test_create_cloned_volume_cmm_paused( self, mock_delete_volume, mock_get_json, mock_post, mock_uuid4, mock_create_replay, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): # our state. vol_name = fake.VOLUME_ID scvolume = {'name': fake.VOLUME2_ID, 'instanceId': '12345.1', 'configuredSize': '1073741824 Bytes'} newvol = {'instanceId': '12345.2', 'configuredSize': '1073741824 Bytes'} storage_profile = 'profile1' replay_profile_list = ['profile2'] volume_qos = 'vqos' group_qos = 'gqos' dr_profile = 'dqos' cmm = {'state': 'Paused'} # our call returns replayuuid = uuid.uuid4() mock_uuid4.return_value = replayuuid mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = cmm mock_create_replay.return_value = {'instanceId': '12345.100'} mock_create_volume.return_value = newvol # our call self.assertRaises(exception.VolumeBackendAPIException, self.scapi.create_cloned_volume, vol_name, scvolume, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) # assert expected mock_create_volume.assert_called_once_with( vol_name, 1, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) mock_create_replay.assert_called_once_with( scvolume, str(replayuuid), 60) expected_payload = {} expected_payload['CopyReplays'] = True expected_payload['DestinationVolume'] = '12345.2' expected_payload['SourceVolume'] = '12345.1' expected_payload['StorageCenter'] = 12345 expected_payload['Priority'] = 'High' mock_post.assert_called_once_with( 'StorageCenter/ScCopyMirrorMigrate/Copy', expected_payload, True) mock_delete_volume.assert_called_once_with(vol_name, '12345.2') @mock.patch.object(storagecenter_api.SCApi, 'create_volume') @mock.patch.object(storagecenter_api.SCApi, 'create_replay') @mock.patch.object(uuid, 'uuid4') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.SCApi, '_wait_for_cmm') @mock.patch.object(storagecenter_api.SCApi, 'delete_volume') def test_create_cloned_volume_cmm_wait_for_cmm_fail( self, mock_delete_volume, mock_wait_for_cmm, mock_get_json, mock_post, mock_uuid4, mock_create_replay, mock_create_volume, mock_close_connection, mock_open_connection, mock_init): # our state. vol_name = fake.VOLUME_ID scvolume = {'name': fake.VOLUME2_ID, 'instanceId': '12345.1', 'configuredSize': '1073741824 Bytes'} newvol = {'instanceId': '12345.2', 'configuredSize': '1073741824 Bytes'} storage_profile = 'profile1' replay_profile_list = ['profile2'] volume_qos = 'vqos' group_qos = 'gqos' dr_profile = 'dqos' cmm = {'state': 'Running'} # our call returns replayuuid = uuid.uuid4() mock_uuid4.return_value = replayuuid mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = cmm mock_create_replay.return_value = {'instanceId': '12345.100'} mock_create_volume.return_value = newvol mock_wait_for_cmm.return_value = False # our call self.assertRaises(exception.VolumeBackendAPIException, self.scapi.create_cloned_volume, vol_name, scvolume, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) # assert expected mock_create_volume.assert_called_once_with( vol_name, 1, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) mock_create_replay.assert_called_once_with( scvolume, str(replayuuid), 60) expected_payload = {} expected_payload['CopyReplays'] = True expected_payload['DestinationVolume'] = '12345.2' expected_payload['SourceVolume'] = '12345.1' expected_payload['StorageCenter'] = 12345 expected_payload['Priority'] = 'High' mock_post.assert_called_once_with( 'StorageCenter/ScCopyMirrorMigrate/Copy', expected_payload, True) mock_wait_for_cmm.assert_called_once_with(cmm, newvol, str(replayuuid)) mock_delete_volume.assert_called_once_with(vol_name, '12345.2') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=VOLUME) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_expand_volume(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.expand_volume(self.VOLUME, 550) self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_expand_volume_failure(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.expand_volume(self.VOLUME, 550) self.assertTrue(mock_post.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.HttpClient, 'put', return_value=RESPONSE_200) def test_rename_volume(self, mock_put, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.rename_volume(self.VOLUME, 'newname') self.assertTrue(mock_put.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.HttpClient, 'put', return_value=RESPONSE_400) def test_rename_volume_failure(self, mock_put, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.rename_volume(self.VOLUME, 'newname') self.assertTrue(mock_put.called) self.assertFalse(res) @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) def test_delete_server(self, mock_delete, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._delete_server(self.SCSERVER) self.assertTrue(mock_delete.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) def test_delete_server_del_not_allowed(self, mock_delete, mock_close_connection, mock_open_connection, mock_init): # Test case where delete of ScServer not allowed res = self.scapi._delete_server(self.SCSERVER_NO_DEL) self.assertFalse(mock_delete.called) self.assertIsNone(res, 'Expected None') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value={'test': 'test'}) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_get_user_preferences(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): # Not really testing anything other than the ability to mock, but # including for completeness. res = self.scapi._get_user_preferences() self.assertEqual({'test': 'test'}, res) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_get_user_preferences_failure(self, mock_get, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._get_user_preferences() self.assertEqual({}, res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences', return_value=None) def test_update_storage_profile_noprefs(self, mock_prefs, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.update_storage_profile(None, None) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences', return_value={'allowStorageProfileSelection': False}) def test_update_storage_profile_not_allowed(self, mock_prefs, mock_close_connection, mock_open_connection, mock_init): LOG = self.mock_object(storagecenter_api, "LOG") res = self.scapi.update_storage_profile(None, None) self.assertFalse(res) self.assertEqual(1, LOG.error.call_count) @mock.patch.object(storagecenter_api.SCApi, '_find_storage_profile', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences', return_value={'allowStorageProfileSelection': True}) def test_update_storage_profile_prefs_not_found(self, mock_profile, mock_prefs, mock_close_connection, mock_open_connection, mock_init): LOG = self.mock_object(storagecenter_api, "LOG") res = self.scapi.update_storage_profile(None, 'Fake') self.assertFalse(res) self.assertEqual(1, LOG.error.call_count) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences', return_value={'allowStorageProfileSelection': True, 'storageProfile': None}) def test_update_storage_profile_default_not_found(self, mock_prefs, mock_close_connection, mock_open_connection, mock_init): LOG = self.mock_object(storagecenter_api, "LOG") res = self.scapi.update_storage_profile(None, None) self.assertFalse(res) self.assertEqual(1, LOG.error.call_count) @mock.patch.object( storagecenter_api.SCApi, '_get_user_preferences', return_value={'allowStorageProfileSelection': True, 'storageProfile': {'name': 'Fake', 'instanceId': 'fakeId'}}) @mock.patch.object(storagecenter_api.HttpClient, 'put', return_value=RESPONSE_200) def test_update_storage_profile(self, mock_put, mock_prefs, mock_close_connection, mock_open_connection, mock_init): LOG = self.mock_object(storagecenter_api, "LOG") fake_scvolume = {'name': 'name', 'instanceId': 'id'} res = self.scapi.update_storage_profile(fake_scvolume, None) self.assertTrue(res) self.assertIn('fakeId', repr(mock_put.call_args_list[0])) self.assertEqual(1, LOG.info.call_count) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[RPLAY_PROFILE]) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_replay_profile(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_replay_profile('guid') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[RPLAY_PROFILE, RPLAY_PROFILE]) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_replay_profile_more_than_one(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): self.assertRaises(exception.VolumeBackendAPIException, self.scapi.find_replay_profile, 'guid') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[]) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_find_replay_profile_empty_list(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_replay_profile('guid') self.assertTrue(mock_post.called) self.assertTrue(mock_get_json.called) self.assertIsNone(res, 'Unexpected return') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_find_replay_profile_error(self, mock_post, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.find_replay_profile('guid') self.assertTrue(mock_post.called) self.assertIsNone(res, 'Unexpected return') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=None) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=RPLAY_PROFILE) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_201) def test_create_replay_profile(self, mock_post, mock_first_result, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_replay_profile('guid') self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_post.called) self.assertTrue(mock_first_result.called) self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=RPLAY_PROFILE) def test_create_replay_profile_exists(self, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_replay_profile('guid') self.assertTrue(mock_find_replay_profile.called) self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile') @mock.patch.object(storagecenter_api.SCApi, 'find_replay_profile', return_value=None) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_create_replay_profile_fail(self, mock_post, mock_find_replay_profile, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.create_replay_profile('guid') self.assertTrue(mock_find_replay_profile.called) self.assertTrue(mock_post.called) self.assertIsNone(res, 'Unexpected return') @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_id') def test_delete_replay_profile(self, mock_get_id, mock_delete, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} self.scapi.delete_replay_profile(profile) self.assertTrue(mock_get_id.called) self.assertTrue(mock_delete.called) @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_400) @mock.patch.object(storagecenter_api.SCApi, '_get_id') def test_delete_replay_profile_fail(self, mock_get_id, mock_delete, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.delete_replay_profile, profile) self.assertTrue(mock_get_id.called) self.assertTrue(mock_delete.called) @mock.patch.object(storagecenter_api.SCApi, '_first_result', return_value=VOLUME_CONFIG) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_id') def test_get_volume_configuration(self, mock_get_id, mock_get, mock_first_result, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._get_volume_configuration({}) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get.called) self.assertEqual(self.VOLUME_CONFIG, res, 'Unexpected config') @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) @mock.patch.object(storagecenter_api.SCApi, '_get_id') def test_get_volume_configuration_bad_response(self, mock_get_id, mock_get, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._get_volume_configuration({}) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get.called) self.assertIsNone(res, 'Unexpected result') @mock.patch.object(storagecenter_api.SCApi, '_get_volume_configuration', return_value=VOLUME_CONFIG) @mock.patch.object(storagecenter_api.HttpClient, 'put', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_id') def test_update_volume_profiles(self, mock_get_id, mock_put, mock_get_volume_configuration, mock_close_connection, mock_open_connection, mock_init): scvolume = {'instanceId': '1'} existingid = self.VOLUME_CONFIG[u'replayProfileList'][0][u'instanceId'] vcid = self.VOLUME_CONFIG[u'instanceId'] # First get_id is for our existing replay profile id and the second # is for the volume config and the last is for the volume id. And # then we do this again for the second call below. mock_get_id.side_effect = [existingid, vcid, scvolume['instanceId'], existingid, vcid, scvolume['instanceId']] newid = '64702.1' expected_payload = {'ReplayProfileList': [newid, existingid]} expected_url = 'StorageCenter/ScVolumeConfiguration/' + vcid res = self.scapi._update_volume_profiles(scvolume, newid, None) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get_volume_configuration.called) mock_put.assert_called_once_with(expected_url, expected_payload, True) self.assertTrue(res) # Now do a remove. (Restarting with the original config so this will # end up as an empty list.) expected_payload['ReplayProfileList'] = [] res = self.scapi._update_volume_profiles(scvolume, None, existingid) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get_volume_configuration.called) mock_put.assert_called_with(expected_url, expected_payload, True) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_configuration', return_value=VOLUME_CONFIG) @mock.patch.object(storagecenter_api.HttpClient, 'put', return_value=RESPONSE_400) # We set this to 1 so we can check our payload @mock.patch.object(storagecenter_api.SCApi, '_get_id') def test_update_volume_profiles_bad_response(self, mock_get_id, mock_put, mock_get_volume_configuration, mock_close_connection, mock_open_connection, mock_init): scvolume = {'instanceId': '1'} existingid = self.VOLUME_CONFIG[u'replayProfileList'][0][u'instanceId'] vcid = self.VOLUME_CONFIG[u'instanceId'] # First get_id is for our existing replay profile id and the second # is for the volume config and the last is for the volume id. And # then we do this again for the second call below. mock_get_id.side_effect = [existingid, vcid, scvolume['instanceId'], existingid, vcid, scvolume['instanceId']] newid = '64702.1' expected_payload = {'ReplayProfileList': [newid, existingid]} expected_url = 'StorageCenter/ScVolumeConfiguration/' + vcid res = self.scapi._update_volume_profiles(scvolume, newid, None) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get_volume_configuration.called) mock_put.assert_called_once_with(expected_url, expected_payload, True) self.assertFalse(res) # Now do a remove. (Restarting with the original config so this will # end up as an empty list.) expected_payload['ReplayProfileList'] = [] res = self.scapi._update_volume_profiles(scvolume, None, existingid) self.assertTrue(mock_get_id.called) self.assertTrue(mock_get_volume_configuration.called) mock_put.assert_called_with(expected_url, expected_payload, True) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_configuration', return_value=None) def test_update_volume_profiles_no_config(self, mock_get_volume_configuration, mock_close_connection, mock_open_connection, mock_init): scvolume = {'instanceId': '1'} res = self.scapi._update_volume_profiles(scvolume, '64702.2', None) self.assertTrue(mock_get_volume_configuration.called) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=999) @mock.patch.object(storagecenter_api.SCApi, '_update_volume_profiles', return_value=True) def test_add_cg_volumes(self, mock_update_volume_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): profileid = '100' add_volumes = [{'id': '1', 'provider_id': '1'}] res = self.scapi._add_cg_volumes(profileid, add_volumes) self.assertTrue(mock_find_volume.called) mock_update_volume_profiles.assert_called_once_with(999, addid=profileid, removeid=None) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=999) @mock.patch.object(storagecenter_api.SCApi, '_update_volume_profiles', return_value=False) def test_add_cg_volumes_fail(self, mock_update_volume_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): profileid = '100' add_volumes = [{'id': '1', 'provider_id': '1'}] res = self.scapi._add_cg_volumes(profileid, add_volumes) self.assertTrue(mock_find_volume.called) mock_update_volume_profiles.assert_called_once_with(999, addid=profileid, removeid=None) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=999) @mock.patch.object(storagecenter_api.SCApi, '_update_volume_profiles', return_value=True) def test_remove_cg_volumes(self, mock_update_volume_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): profileid = '100' remove_volumes = [{'id': '1', 'provider_id': '1'}] res = self.scapi._remove_cg_volumes(profileid, remove_volumes) self.assertTrue(mock_find_volume.called) mock_update_volume_profiles.assert_called_once_with(999, addid=None, removeid=profileid) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, 'find_volume', return_value=999) @mock.patch.object(storagecenter_api.SCApi, '_update_volume_profiles', return_value=False) def test_remove_cg_volumes_false(self, mock_update_volume_profiles, mock_find_volume, mock_close_connection, mock_open_connection, mock_init): profileid = '100' remove_volumes = [{'id': '1', 'provider_id': '1'}] res = self.scapi._remove_cg_volumes(profileid, remove_volumes) self.assertTrue(mock_find_volume.called) mock_update_volume_profiles.assert_called_once_with(999, addid=None, removeid=profileid) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_remove_cg_volumes', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_add_cg_volumes', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_get_id', return_value='100') def test_update_cg_volumes(self, mock_get_id, mock_add_cg_volumes, mock_remove_cg_volumes, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} add_volumes = [{'id': '1'}] remove_volumes = [{'id': '2'}] res = self.scapi.update_cg_volumes(profile, add_volumes, remove_volumes) self.assertTrue(mock_get_id.called) mock_add_cg_volumes.assert_called_once_with('100', add_volumes) mock_remove_cg_volumes.assert_called_once_with('100', remove_volumes) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, '_remove_cg_volumes', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_add_cg_volumes', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_get_id', return_value='100') def test_update_cg_volumes_no_remove(self, mock_get_id, mock_add_cg_volumes, mock_remove_cg_volumes, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} add_volumes = [{'id': '1'}] remove_volumes = [] res = self.scapi.update_cg_volumes(profile, add_volumes, remove_volumes) self.assertTrue(mock_get_id.called) mock_add_cg_volumes.assert_called_once_with('100', add_volumes) self.assertFalse(mock_remove_cg_volumes.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, '_remove_cg_volumes', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_add_cg_volumes', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_get_id', return_value='100') def test_update_cg_volumes_no_add(self, mock_get_id, mock_add_cg_volumes, mock_remove_cg_volumes, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} add_volumes = [] remove_volumes = [{'id': '1'}] res = self.scapi.update_cg_volumes(profile, add_volumes, remove_volumes) self.assertTrue(mock_get_id.called) mock_remove_cg_volumes.assert_called_once_with('100', remove_volumes) self.assertFalse(mock_add_cg_volumes.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, '_remove_cg_volumes') @mock.patch.object(storagecenter_api.SCApi, '_add_cg_volumes', return_value=False) @mock.patch.object(storagecenter_api.SCApi, '_get_id', return_value='100') def test_update_cg_volumes_add_fail(self, mock_get_id, mock_add_cg_volumes, mock_remove_cg_volumes, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} add_volumes = [{'id': '1'}] remove_volumes = [{'id': '2'}] res = self.scapi.update_cg_volumes(profile, add_volumes, remove_volumes) self.assertTrue(mock_get_id.called) mock_add_cg_volumes.assert_called_once_with('100', add_volumes) self.assertTrue(not mock_remove_cg_volumes.called) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_remove_cg_volumes', return_value=False) @mock.patch.object(storagecenter_api.SCApi, '_add_cg_volumes', return_value=True) @mock.patch.object(storagecenter_api.SCApi, '_get_id', return_value='100') def test_update_cg_volumes_remove_fail(self, mock_get_id, mock_add_cg_volumes, mock_remove_cg_volumes, mock_close_connection, mock_open_connection, mock_init): profile = {'name': 'guid'} add_volumes = [{'id': '1'}] remove_volumes = [{'id': '2'}] res = self.scapi.update_cg_volumes(profile, add_volumes, remove_volumes) self.assertTrue(mock_get_id.called) mock_add_cg_volumes.assert_called_once_with('100', add_volumes) mock_remove_cg_volumes.assert_called_once_with('100', remove_volumes) self.assertFalse(res) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[INACTIVE_VOLUME]) @mock.patch.object(storagecenter_api.SCApi, '_init_volume') def test_init_cg_volumes_inactive(self, mock_init_volume, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): profileid = 100 self.scapi._init_cg_volumes(profileid) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[VOLUME]) @mock.patch.object(storagecenter_api.SCApi, '_init_volume') def test_init_cg_volumes_active(self, mock_init_volume, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): profileid = 100 self.scapi._init_cg_volumes(profileid) self.assertTrue(mock_get.called) self.assertTrue(mock_get_json.called) self.assertFalse(mock_init_volume.called) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) @mock.patch.object(storagecenter_api.SCApi, '_get_id', return_value='100') @mock.patch.object(storagecenter_api.SCApi, '_init_cg_volumes') def test_snap_cg_replay(self, mock_init_cg_volumes, mock_get_id, mock_post, mock_close_connection, mock_open_connection, mock_init): replayid = 'guid' expire = 0 profile = {'instanceId': '100'} # See the 100 from get_id above? expected_url = 'StorageCenter/ScReplayProfile/100/CreateReplay' expected_payload = {'description': replayid, 'expireTime': expire} res = self.scapi.snap_cg_replay(profile, replayid, expire) mock_post.assert_called_once_with(expected_url, expected_payload, True) self.assertTrue(mock_get_id.called) self.assertTrue(mock_init_cg_volumes.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) @mock.patch.object(storagecenter_api.SCApi, '_get_id', return_value='100') @mock.patch.object(storagecenter_api.SCApi, '_init_cg_volumes') def test_snap_cg_replay_bad_return(self, mock_init_cg_volumes, mock_get_id, mock_post, mock_close_connection, mock_open_connection, mock_init): replayid = 'guid' expire = 0 profile = {'instanceId': '100'} # See the 100 from get_id above? expected_url = 'StorageCenter/ScReplayProfile/100/CreateReplay' expected_payload = {'description': replayid, 'expireTime': expire} res = self.scapi.snap_cg_replay(profile, replayid, expire) mock_post.assert_called_once_with(expected_url, expected_payload, True) self.assertTrue(mock_get_id.called) self.assertTrue(mock_init_cg_volumes.called) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=CGS) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_sc_cg(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_sc_cg( {}, 'GUID1-0869559e-6881-454e-ba18-15c6726d33c1') self.assertEqual(self.CGS[0], res) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=CGS) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_find_sc_cg_not_found(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_sc_cg( {}, 'GUID3-0869559e-6881-454e-ba18-15c6726d33c1') self.assertIsNone(res) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_find_sc_cg_fail(self, mock_get, mock_close_connection, mock_open_connection, mock_init): res = self.scapi._find_sc_cg( {}, 'GUID1-0869559e-6881-454e-ba18-15c6726d33c1') self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, '_find_sc_cg', return_value={'instanceId': 101}) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=RPLAYS) @mock.patch.object(storagecenter_api.HttpClient, 'get') def test_find_cg_replays(self, mock_get, mock_get_json, mock_find_sc_cg, mock_close_connection, mock_open_connection, mock_init): profile = {'instanceId': '100'} replayid = 'Cinder Test Replay012345678910' res = self.scapi._find_cg_replays(profile, replayid) expected_url = 'StorageCenter/ScReplayConsistencyGroup/101/ReplayList' mock_get.assert_called_once_with(expected_url) self.assertTrue(mock_find_sc_cg.called) self.assertTrue(mock_get_json.called) # We should fine RPLAYS self.assertEqual(self.RPLAYS, res) @mock.patch.object(storagecenter_api.SCApi, '_find_sc_cg', return_value=None) def test_find_cg_replays_no_cg(self, mock_find_sc_cg, mock_close_connection, mock_open_connection, mock_init): profile = {'instanceId': '100'} replayid = 'Cinder Test Replay012345678910' res = self.scapi._find_cg_replays(profile, replayid) self.assertTrue(mock_find_sc_cg.called) # We should return an empty list. self.assertEqual([], res) @mock.patch.object(storagecenter_api.SCApi, '_find_sc_cg', return_value={'instanceId': 101}) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=None) @mock.patch.object(storagecenter_api.HttpClient, 'get') def test_find_cg_replays_bad_json(self, mock_get, mock_get_json, mock_find_sc_cg, mock_close_connection, mock_open_connection, mock_init): profile = {'instanceId': '100'} replayid = 'Cinder Test Replay012345678910' res = self.scapi._find_cg_replays(profile, replayid) expected_url = 'StorageCenter/ScReplayConsistencyGroup/101/ReplayList' mock_get.assert_called_once_with(expected_url) self.assertTrue(mock_find_sc_cg.called) self.assertTrue(mock_get_json.called) self.assertIsNone(res) @mock.patch.object(storagecenter_api.SCApi, '_find_cg_replays', return_value=RPLAYS) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_delete_cg_replay(self, mock_post, mock_find_cg_replays, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.delete_cg_replay({}, '') expected_url = ('StorageCenter/ScReplay/' + self.RPLAYS[0]['instanceId'] + '/Expire') mock_post.assert_any_call(expected_url, {}, True) expected_url = ('StorageCenter/ScReplay/' + self.RPLAYS[1]['instanceId'] + '/Expire') mock_post.assert_any_call(expected_url, {}, True) self.assertTrue(mock_find_cg_replays.called) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, '_find_cg_replays', return_value=RPLAYS) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_delete_cg_replay_error(self, mock_post, mock_find_cg_replays, mock_close_connection, mock_open_connection, mock_init): expected_url = ('StorageCenter/ScReplay/' + self.RPLAYS[0]['instanceId'] + '/Expire') res = self.scapi.delete_cg_replay({}, '') mock_post.assert_called_once_with(expected_url, {}, True) self.assertTrue(mock_find_cg_replays.called) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_find_cg_replays', return_value=[]) def test_delete_cg_replay_cant_find(self, mock_find_cg_replays, mock_close_connection, mock_open_connection, mock_init): res = self.scapi.delete_cg_replay({}, '') self.assertTrue(mock_find_cg_replays.called) self.assertTrue(res) def test_size_to_gb(self, mock_close_connection, mock_open_connection, mock_init): gb, rem = self.scapi.size_to_gb('1.073741824E9 Byte') self.assertEqual(1, gb) self.assertEqual(0, rem) self.assertRaises(exception.VolumeBackendAPIException, self.scapi.size_to_gb, 'banana') gb, rem = self.scapi.size_to_gb('1.073741924E9 Byte') self.assertEqual(1, gb) self.assertEqual(100, rem) @mock.patch.object(storagecenter_api.SCApi, '_find_volume_folder') @mock.patch.object(storagecenter_api.HttpClient, 'put', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=VOLUME) def test_import_one(self, mock_get_json, mock_put, mock_find_volume_folder, mock_close_connection, mock_open_connection, mock_init): newname = 'guid' # First test is folder found. Second ist is not found. mock_find_volume_folder.side_effect = [{'instanceId': '1'}, None] expected_url = 'StorageCenter/ScVolume/100' expected_payload = {'Name': newname, 'VolumeFolder': '1'} self.scapi._import_one({'instanceId': '100'}, newname) mock_put.assert_called_once_with(expected_url, expected_payload, True) self.assertTrue(mock_find_volume_folder.called) expected_payload = {'Name': newname} self.scapi._import_one({'instanceId': '100'}, newname) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741824E9 Bytes'}]) @mock.patch.object(storagecenter_api.SCApi, 'size_to_gb', return_value=(1, 0)) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=[]) @mock.patch.object(storagecenter_api.SCApi, '_import_one', return_value=VOLUME) def test_manage_existing(self, mock_import_one, mock_find_mappings, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): newname = 'guid' existing = {'source-name': 'scvolname'} self.scapi.manage_existing(newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), None, False) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_size_to_gb.called) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=[]) def test_manage_existing_vol_not_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Same as above only we don't have a volume folder. newname = 'guid' existing = {'source-name': 'scvolname'} self.assertRaises(exception.ManageExistingInvalidReference, self.scapi.manage_existing, newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=[{}, {}, {}]) def test_manage_existing_vol_multiple_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Same as above only we don't have a volume folder. newname = 'guid' existing = {'source-name': 'scvolname'} self.assertRaises(exception.ManageExistingInvalidReference, self.scapi.manage_existing, newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741924E9 Bytes'}]) @mock.patch.object(storagecenter_api.SCApi, 'size_to_gb', return_value=(1, 100)) def test_manage_existing_bad_size(self, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # Same as above only we don't have a volume folder. newname = 'guid' existing = {'source-name': 'scvolname'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.manage_existing, newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) self.assertTrue(mock_size_to_gb.called) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741824E9 Bytes'}]) @mock.patch.object(storagecenter_api.SCApi, 'size_to_gb', return_value=(1, 0)) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=[{}, {}]) def test_manage_existing_already_mapped(self, mock_find_mappings, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): newname = 'guid' existing = {'source-name': 'scvolname'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.manage_existing, newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_size_to_gb.called) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741824E9 Bytes'}]) @mock.patch.object(storagecenter_api.SCApi, 'size_to_gb', return_value=(1, 0)) @mock.patch.object(storagecenter_api.SCApi, '_find_mappings', return_value=[]) @mock.patch.object(storagecenter_api.SCApi, '_import_one', return_value=None) def test_manage_existing_import_fail(self, mock_import_one, mock_find_mappings, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): # We fail on the _find_volume_folder to make this easier. newname = 'guid' existing = {'source-name': 'scvolname'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.manage_existing, newname, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) self.assertTrue(mock_find_mappings.called) self.assertTrue(mock_size_to_gb.called) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741824E9 Bytes'}]) @mock.patch.object(storagecenter_api.SCApi, 'size_to_gb', return_value=(1, 0)) def test_get_unmanaged_volume_size(self, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): existing = {'source-name': 'scvolname'} res = self.scapi.get_unmanaged_volume_size(existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) self.assertTrue(mock_size_to_gb.called) self.assertEqual(1, res) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=[]) def test_get_unmanaged_volume_size_not_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): existing = {'source-name': 'scvolname'} self.assertRaises(exception.ManageExistingInvalidReference, self.scapi.get_unmanaged_volume_size, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=[{}, {}, {}]) def test_get_unmanaged_volume_size_many_found(self, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): existing = {'source-name': 'scvolname'} self.assertRaises(exception.ManageExistingInvalidReference, self.scapi.get_unmanaged_volume_size, existing) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) @mock.patch.object(storagecenter_api.SCApi, '_get_volume_list', return_value=[{'configuredSize': '1.073741924E9 Bytes'}]) @mock.patch.object(storagecenter_api.SCApi, 'size_to_gb', return_value=(1, 100)) def test_get_unmanaged_volume_size_bad_size(self, mock_size_to_gb, mock_get_volume_list, mock_close_connection, mock_open_connection, mock_init): existing = {'source-name': 'scvolname'} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.get_unmanaged_volume_size, existing) self.assertTrue(mock_size_to_gb.called) mock_get_volume_list.assert_called_once_with( existing.get('source-name'), existing.get('source-id'), False) @mock.patch.object(storagecenter_api.HttpClient, 'put', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_id', return_value='100') def test_unmanage(self, mock_get_id, mock_put, mock_close_connection, mock_open_connection, mock_init): # Same as above only we don't have a volume folder. scvolume = {'name': 'guid'} expected_url = 'StorageCenter/ScVolume/100' newname = 'Unmanaged_' + scvolume['name'] expected_payload = {'Name': newname} self.scapi.unmanage(scvolume) self.assertTrue(mock_get_id.called) mock_put.assert_called_once_with(expected_url, expected_payload, True) @mock.patch.object(storagecenter_api.HttpClient, 'put', return_value=RESPONSE_400) @mock.patch.object(storagecenter_api.SCApi, '_get_id', return_value='100') def test_unmanage_fail(self, mock_get_id, mock_put, mock_close_connection, mock_open_connection, mock_init): # Same as above only we don't have a volume folder. scvolume = {'name': 'guid'} expected_url = 'StorageCenter/ScVolume/100' newname = 'Unmanaged_' + scvolume['name'] expected_payload = {'Name': newname} self.assertRaises(exception.VolumeBackendAPIException, self.scapi.unmanage, scvolume) self.assertTrue(mock_get_id.called) mock_put.assert_called_once_with(expected_url, expected_payload, True) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[SCQOS]) # def _find_qos(self, qosnode): def test__find_qos(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi._find_qos('Cinder QoS') self.assertDictEqual(self.SCQOS, ret) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json') # def _find_qos(self, qosnode): def test__find_qos_not_found(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): # set side effect for posts. # first empty second returns qosnode mock_get_json.side_effect = [[], self.SCQOS] ret = self.scapi._find_qos('Cinder QoS') self.assertDictEqual(self.SCQOS, ret) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) # def _find_qos(self, qosnode): def test__find_qos_find_fail(self, mock_post, mock_close_connection, mock_open_connection, mock_init): self.assertRaises(exception.VolumeBackendAPIException, self.scapi._find_qos, 'Cinder QoS') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[]) # def _find_qos(self, qosnode): def test__find_qos_create_fail(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): mock_post.side_effect = [self.RESPONSE_200, self.RESPONSE_400] self.assertRaises(exception.VolumeBackendAPIException, self.scapi._find_qos, 'Cinder QoS') @mock.patch.object(storagecenter_api.HttpClient, 'put', return_value=RESPONSE_400) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=SCREPL) def test_update_replicate_active_replay_fail(self, mock_get_json, mock_get, mock_put, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, True) self.assertFalse(ret) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=SCREPL) def test_update_replicate_active_replay_nothing_to_do( self, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, False) self.assertTrue(ret) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[]) def test_update_replicate_active_replay_not_found(self, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, True) self.assertTrue(ret) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[]) def test_update_replicate_active_replay_not_found2(self, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.update_replicate_active_replay({'instanceId': '1'}, True) self.assertTrue(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[{'instanceId': '12345.1'}]) def test_get_disk_folder(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi._get_disk_folder(12345, 'name') expected_payload = {'filter': {'filterType': 'AND', 'filters': [ {'filterType': 'Equals', 'attributeName': 'scSerialNumber', 'attributeValue': 12345}, {'filterType': 'Equals', 'attributeName': 'name', 'attributeValue': 'name'}]}} mock_post.assert_called_once_with('StorageCenter/ScDiskFolder/GetList', expected_payload) self.assertEqual({'instanceId': '12345.1'}, ret) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_get_disk_folder_fail(self, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi._get_disk_folder(12345, 'name') expected_payload = {'filter': {'filterType': 'AND', 'filters': [ {'filterType': 'Equals', 'attributeName': 'scSerialNumber', 'attributeValue': 12345}, {'filterType': 'Equals', 'attributeName': 'name', 'attributeValue': 'name'}]}} mock_post.assert_called_once_with('StorageCenter/ScDiskFolder/GetList', expected_payload) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json') def test_get_disk_folder_fail_bad_json(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): mock_get_json.side_effect = (exception.VolumeBackendAPIException('')) ret = self.scapi._get_disk_folder(12345, 'name') expected_payload = {'filter': {'filterType': 'AND', 'filters': [ {'filterType': 'Equals', 'attributeName': 'scSerialNumber', 'attributeValue': 12345}, {'filterType': 'Equals', 'attributeName': 'name', 'attributeValue': 'name'}]}} mock_post.assert_called_once_with('StorageCenter/ScDiskFolder/GetList', expected_payload) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=SCREPL) def test_get_screplication(self, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.get_screplication({'instanceId': '1'}, 65495) self.assertDictEqual(self.SCREPL[0], ret) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[]) def test_get_screplication_not_found(self, mock_get_json, mock_get, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.get_screplication({'instanceId': '1'}, 65496) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_get_screplication_error(self, mock_get, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.get_screplication({'instanceId': '1'}, 65495) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, 'get_screplication', return_value=SCREPL[0]) @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_200) def test_delete_replication(self, mock_delete, mock_get_screplication, mock_close_connection, mock_open_connection, mock_init): destssn = 65495 expected = 'StorageCenter/ScReplication/%s' % ( self.SCREPL[0]['instanceId']) expected_payload = {'DeleteDestinationVolume': True, 'RecycleDestinationVolume': True, 'DeleteRestorePoint': True} ret = self.scapi.delete_replication(self.VOLUME, destssn) mock_delete.assert_any_call(expected, payload=expected_payload, async_call=True) self.assertTrue(ret) @mock.patch.object(storagecenter_api.SCApi, 'get_screplication', return_value=None) def test_delete_replication_not_found(self, mock_get_screplication, mock_close_connection, mock_open_connection, mock_init): destssn = 65495 ret = self.scapi.delete_replication(self.VOLUME, destssn) self.assertFalse(ret) ret = self.scapi.delete_replication(self.VOLUME, destssn) self.assertFalse(ret) @mock.patch.object(storagecenter_api.SCApi, 'get_screplication', return_value=SCREPL[0]) @mock.patch.object(storagecenter_api.HttpClient, 'delete', return_value=RESPONSE_400) def test_delete_replication_error(self, mock_delete, mock_get_screplication, mock_close_connection, mock_open_connection, mock_init): destssn = 65495 expected = 'StorageCenter/ScReplication/%s' % ( self.SCREPL[0]['instanceId']) expected_payload = {'DeleteDestinationVolume': True, 'RecycleDestinationVolume': True, 'DeleteRestorePoint': True} ret = self.scapi.delete_replication(self.VOLUME, destssn) mock_delete.assert_any_call(expected, payload=expected_payload, async_call=True) self.assertFalse(ret) @mock.patch.object(storagecenter_api.SCApi, '_find_qos', return_value=SCQOS) @mock.patch.object(storagecenter_api.SCApi, 'find_sc') @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=SCREPL[0]) def test_create_replication(self, mock_get_json, mock_post, mock_find_sc, mock_find_qos, mock_close_connection, mock_open_connection, mock_init): # We don't test diskfolder. If one is found we include it. If not # then we leave it out. Checking for disk folder is tested elsewhere. ssn = 64702 destssn = 65495 qosnode = 'Cinder QoS' notes = 'Created by Dell EMC Cinder Driver' repl_prefix = 'Cinder repl of ' mock_find_sc.side_effect = [destssn, ssn, destssn, ssn, destssn, ssn] payload = {'DestinationStorageCenter': destssn, 'QosNode': self.SCQOS['instanceId'], 'SourceVolume': self.VOLUME['instanceId'], 'StorageCenter': ssn, 'ReplicateActiveReplay': False, 'Type': 'Asynchronous', 'DestinationVolumeAttributes': {'CreateSourceVolumeFolderPath': True, 'Notes': notes, 'Name': repl_prefix + self.VOLUME['name']} } ret = self.scapi.create_replication(self.VOLUME, str(destssn), qosnode, False, None, False) mock_post.assert_any_call('StorageCenter/ScReplication', payload, True) self.assertDictEqual(self.SCREPL[0], ret) payload['Type'] = 'Synchronous' payload['ReplicateActiveReplay'] = True payload['SyncMode'] = 'HighAvailability' ret = self.scapi.create_replication(self.VOLUME, str(destssn), qosnode, True, None, False) mock_post.assert_any_call('StorageCenter/ScReplication', payload, True) self.assertDictEqual(self.SCREPL[0], ret) ret = self.scapi.create_replication(self.VOLUME, str(destssn), qosnode, True, None, True) mock_post.assert_any_call('StorageCenter/ScReplication', payload, True) self.assertDictEqual(self.SCREPL[0], ret) @mock.patch.object(storagecenter_api.SCApi, '_find_qos', return_value=SCQOS) @mock.patch.object(storagecenter_api.SCApi, 'find_sc') @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=SCREPL[0]) def test_create_replication_error(self, mock_get_json, mock_post, mock_find_sc, mock_find_qos, mock_close_connection, mock_open_connection, mock_init): ssn = 64702 destssn = 65495 qosnode = 'Cinder QoS' notes = 'Created by Dell EMC Cinder Driver' repl_prefix = 'Cinder repl of ' mock_find_sc.side_effect = [destssn, ssn, destssn, ssn] mock_post.side_effect = [self.RESPONSE_400, self.RESPONSE_400, self.RESPONSE_400, self.RESPONSE_400] payload = {'DestinationStorageCenter': destssn, 'QosNode': self.SCQOS['instanceId'], 'SourceVolume': self.VOLUME['instanceId'], 'StorageCenter': ssn, 'ReplicateActiveReplay': False, 'Type': 'Asynchronous', 'DestinationVolumeAttributes': {'CreateSourceVolumeFolderPath': True, 'Notes': notes, 'Name': repl_prefix + self.VOLUME['name']} } ret = self.scapi.create_replication(self.VOLUME, str(destssn), qosnode, False, None, False) mock_post.assert_any_call('StorageCenter/ScReplication', payload, True) self.assertIsNone(ret) payload['Type'] = 'Synchronous' payload['ReplicateActiveReplay'] = True payload['SyncMode'] = 'HighAvailability' ret = self.scapi.create_replication(self.VOLUME, str(destssn), qosnode, True, None, True) mock_post.assert_any_call('StorageCenter/ScReplication', payload, True) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=SCREPL) def test_find_repl_volume(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.find_repl_volume('guid', 65495) self.assertDictEqual(self.SCREPL[0], ret) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[]) def test_find_repl_volume_empty_list(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.find_repl_volume('guid', 65495) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=[{'instanceId': '1'}, {'instanceId': '2'}]) def test_find_repl_volume_multiple_results(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.find_repl_volume('guid', 65495) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_find_repl_volume_error(self, mock_post, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi.find_repl_volume('guid', 65495) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, 'get_screplication') @mock.patch.object(storagecenter_api.SCApi, 'find_repl_volume') @mock.patch.object(storagecenter_api.SCApi, 'find_volume') @mock.patch.object(storagecenter_api.SCApi, 'remove_mappings') def test_break_replication(self, mock_remove_mappings, mock_find_volume, mock_find_repl_volume, mock_get_screplication, mock_close_connection, mock_open_connection, mock_init): # Find_volume doesn't actually matter. We do not gate on this. # Switch it up just to prove that. mock_find_volume.side_effect = [self.VOLUME, # 1 self.VOLUME, # 2 None, # 3 None] # 4 # Much like find volume we do not gate on this. mock_get_screplication.side_effect = [self.SCREPL[0], # 1 None] # 2 # This mock_find_repl_volume.side_effect = [self.VOLUME, # 1 self.VOLUME, # 2 self.VOLUME, # 3 self.VOLUME] # 4 mock_remove_mappings.side_effect = [True, # 1 True, True, # 2 False, True, # 3 True, False] # 4 # Good path. ret = self.scapi.break_replication('name', None, 65495) self.assertEqual(self.VOLUME, ret) # Source found, screpl not found. ret = self.scapi.break_replication('name', None, 65495) self.assertEqual(self.VOLUME, ret) # No source vol good path. ret = self.scapi.break_replication('name', None, 65495) self.assertEqual(self.VOLUME, ret) # fail remove mappings ret = self.scapi.break_replication('name', None, 65495) self.assertEqual(self.VOLUME, ret) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') def test__find_user_replay_profiles(self, mock_get_user_preferences, mock_close_connection, mock_open_connection, mock_init): mock_get_user_preferences.return_value = {} ret = self.scapi._find_user_replay_profiles() self.assertEqual([], ret) mock_get_user_preferences.return_value = {'test': 'test', 'replayProfileList': []} ret = self.scapi._find_user_replay_profiles() self.assertEqual([], ret) mock_get_user_preferences.return_value = { 'test': 'test', 'replayProfileList': [{'instanceId': 'a'}, {'instanceId': 'b'}]} ret = self.scapi._find_user_replay_profiles() self.assertEqual(['a', 'b'], ret) @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json') def test__find_daily_replay_profile(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = [{'instanceId': 'a'}] ret = self.scapi._find_daily_replay_profile() self.assertEqual('a', ret) mock_get_json.return_value = [] ret = self.scapi._find_daily_replay_profile() self.assertIsNone(ret) mock_get_json.return_value = None ret = self.scapi._find_daily_replay_profile() self.assertIsNone(ret) mock_post.return_value = self.RESPONSE_400 ret = self.scapi._find_daily_replay_profile() self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json') def test__find_replay_profiles(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): # Good run. rps = 'a,b' mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = [{'name': 'a', 'instanceId': 'a'}, {'name': 'b', 'instanceId': 'b'}, {'name': 'c', 'instanceId': 'c'}] reta, retb = self.scapi._find_replay_profiles(rps) self.assertEqual(['a', 'b'], reta) self.assertEqual(['c'], retb) # Looking for profile that doesn't exist. rps = 'a,b,d' self.assertRaises(exception.VolumeBackendAPIException, self.scapi._find_replay_profiles, rps) # Looking for nothing. rps = '' reta, retb = self.scapi._find_replay_profiles(rps) self.assertEqual([], reta) self.assertEqual([], retb) # Still Looking for nothing. rps = None reta, retb = self.scapi._find_replay_profiles(rps) self.assertEqual([], reta) self.assertEqual([], retb) # Bad call. rps = 'a,b' mock_post.return_value = self.RESPONSE_400 self.assertRaises(exception.VolumeBackendAPIException, self.scapi._find_replay_profiles, rps) @mock.patch.object(storagecenter_api.SCApi, '_find_replay_profiles') @mock.patch.object(storagecenter_api.SCApi, '_find_user_replay_profiles') @mock.patch.object(storagecenter_api.SCApi, '_find_daily_replay_profile') @mock.patch.object(storagecenter_api.SCApi, '_update_volume_profiles') def test_update_replay_profiles(self, mock_update_volume_profiles, mock_find_daily_replay_profile, mock_find_user_replay_profiles, mock_find_replay_profiles, mock_close_connection, mock_open_connection, mock_init): scvol = {} mock_find_replay_profiles.return_value = (['a', 'b'], ['c']) mock_update_volume_profiles.side_effect = [ True, True, True, False, True, True, False, True, True, True, True, True, True, True, True, True, False] ret = self.scapi.update_replay_profiles(scvol, 'a,b') # Two adds and one remove self.assertEqual(3, mock_update_volume_profiles.call_count) self.assertTrue(ret) # Now update fails. ret = self.scapi.update_replay_profiles(scvol, 'a,b') # 1 failed update plus 3 from before. self.assertEqual(4, mock_update_volume_profiles.call_count) self.assertFalse(ret) # Fail adding Ids.. ret = self.scapi.update_replay_profiles(scvol, 'a,b') # 3 more 4 from before. self.assertEqual(7, mock_update_volume_profiles.call_count) self.assertFalse(ret) # User clearing profiles. mock_find_replay_profiles.return_value = ([], ['a', 'b', 'c']) mock_find_user_replay_profiles.return_value = ['d', 'u'] ret = self.scapi.update_replay_profiles(scvol, '') # 3 removes and 2 adds plus 7 from before self.assertEqual(12, mock_update_volume_profiles.call_count) self.assertTrue(ret) # User clearing profiles and no defaults. (Probably not possible.) mock_find_user_replay_profiles.return_value = [] mock_find_daily_replay_profile.return_value = 'd' ret = self.scapi.update_replay_profiles(scvol, '') # 3 removes and 1 add plus 12 from before. self.assertEqual(16, mock_update_volume_profiles.call_count) self.assertTrue(ret) # _find_replay_profiles blows up so we do too. mock_find_replay_profiles.side_effect = ( exception.VolumeBackendAPIException('aaa')) self.assertRaises(exception.VolumeBackendAPIException, self.scapi.update_replay_profiles, scvol, 'a,b') @mock.patch.object(storagecenter_api.SCApi, '_sc_live_volumes') @mock.patch.object(storagecenter_api.SCApi, '_get_live_volumes') def test_get_live_volume(self, mock_get_live_volumes, mock_sc_live_volumes, mock_close_connection, mock_open_connection, mock_init): # Basic check retlv = self.scapi.get_live_volume(None) self.assertIsNone(retlv) lv1 = {'primaryVolume': {'instanceId': '12345.1'}, 'secondaryVolume': {'instanceId': '67890.1'}} lv2 = {'primaryVolume': {'instanceId': '12345.2'}} mock_sc_live_volumes.return_value = [lv1, lv2] # Good Run retlv = self.scapi.get_live_volume('12345.2') self.assertEqual(lv2, retlv) mock_sc_live_volumes.assert_called_once_with('12345') self.assertFalse(mock_get_live_volumes.called) @mock.patch.object(storagecenter_api.SCApi, '_sc_live_volumes') @mock.patch.object(storagecenter_api.SCApi, '_get_live_volumes') def test_get_live_volume_on_secondary(self, mock_get_live_volumes, mock_sc_live_volumes, mock_close_connection, mock_open_connection, mock_init): # Basic check retlv = self.scapi.get_live_volume(None) self.assertIsNone(retlv) lv1 = {'primaryVolume': {'instanceId': '12345.1'}, 'secondaryVolume': {'instanceId': '67890.1'}} lv2 = {'primaryVolume': {'instanceId': '12345.2'}} mock_sc_live_volumes.return_value = [] mock_get_live_volumes.return_value = [lv1, lv2] # Good Run retlv = self.scapi.get_live_volume('12345.2') self.assertEqual(lv2, retlv) mock_sc_live_volumes.assert_called_once_with('12345') mock_get_live_volumes.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApi, '_sc_live_volumes') @mock.patch.object(storagecenter_api.SCApi, '_get_live_volumes') def test_get_live_volume_not_found(self, mock_get_live_volumes, mock_sc_live_volumes, mock_close_connection, mock_open_connection, mock_init): lv1 = {'primaryVolume': {'instanceId': '12345.1'}, 'secondaryVolume': {'instanceId': '67890.1'}} lv2 = {'primaryVolume': {'instanceId': '12345.2'}, 'secondaryVolume': {'instanceId': '67890.2'}} mock_get_live_volumes.return_value = [lv1, lv2] mock_sc_live_volumes.return_value = [] retlv = self.scapi.get_live_volume('12345.3') self.assertIsNone(retlv) mock_sc_live_volumes.assert_called_once_with('12345') mock_get_live_volumes.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApi, '_sc_live_volumes') @mock.patch.object(storagecenter_api.SCApi, '_get_live_volumes') def test_get_live_volume_swapped(self, mock_get_live_volumes, mock_sc_live_volumes, mock_close_connection, mock_open_connection, mock_init): lv1 = {'primaryVolume': {'instanceId': '12345.1'}, 'secondaryVolume': {'instanceId': '67890.1'}} lv2 = {'primaryVolume': {'instanceId': '67890.2'}, 'secondaryVolume': {'instanceId': '12345.2'}} mock_get_live_volumes.return_value = [lv1, lv2] mock_sc_live_volumes.return_value = [] retlv = self.scapi.get_live_volume('12345.2') self.assertEqual(lv2, retlv) mock_sc_live_volumes.assert_called_once_with('12345') mock_get_live_volumes.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApi, '_sc_live_volumes') @mock.patch.object(storagecenter_api.SCApi, '_get_live_volumes') def test_get_live_volume_error(self, mock_get_live_volumes, mock_sc_live_volumes, mock_close_connection, mock_open_connection, mock_init): mock_get_live_volumes.return_value = [] mock_sc_live_volumes.return_value = [] retlv = self.scapi.get_live_volume('12345.2') self.assertIsNone(retlv) @mock.patch.object(storagecenter_api.SCApi, '_sc_live_volumes') @mock.patch.object(storagecenter_api.SCApi, '_get_live_volumes') def test_get_live_volume_by_name(self, mock_get_live_volumes, mock_sc_live_volumes, mock_close_connection, mock_open_connection, mock_init): lv1 = {'primaryVolume': {'instanceId': '12345.1'}, 'secondaryVolume': {'instanceId': '67890.1', 'instanceName': fake.VOLUME2_ID}, 'instanceName': 'Live volume of ' + fake.VOLUME2_ID} lv2 = {'primaryVolume': {'instanceId': '67890.2'}, 'secondaryVolume': {'instanceId': '12345.2', 'instanceName': fake.VOLUME_ID}, 'instanceName': 'Live volume of ' + fake.VOLUME_ID} mock_get_live_volumes.return_value = [lv1, lv2] mock_sc_live_volumes.return_value = [] retlv = self.scapi.get_live_volume('12345.2', fake.VOLUME_ID) self.assertEqual(lv2, retlv) mock_sc_live_volumes.assert_called_once_with('12345') mock_get_live_volumes.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApi, '_sc_live_volumes') @mock.patch.object(storagecenter_api.SCApi, '_get_live_volumes') def test_get_live_volume_by_name_unknown(self, mock_get_live_volumes, mock_sc_live_volumes, mock_close_connection, mock_open_connection, mock_init): lv1 = {'primaryVolume': {'instanceId': '12345.1'}, 'secondaryVolume': {'instanceId': '67890.1', 'instanceName': fake.VOLUME2_ID}, 'instanceName': 'Live volume of ' + fake.VOLUME2_ID} lv2 = {'secondaryVolume': {'instanceId': '12345.2', 'instanceName': fake.VOLUME_ID}, 'instanceName': 'unknown'} mock_get_live_volumes.return_value = [lv1, lv2] mock_sc_live_volumes.return_value = [] retlv = self.scapi.get_live_volume('12345.3', fake.VOLUME_ID) self.assertEqual(lv2, retlv) mock_sc_live_volumes.assert_called_once_with('12345') mock_get_live_volumes.assert_called_once_with() @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json') def test_map_secondary_volume(self, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101'}, 'secondaryScSerialNumber': 102} scdestsrv = {'instanceId': '102.1000'} mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = {'instanceId': '102.101.1'} ret = self.scapi.map_secondary_volume(sclivevol, scdestsrv) expected_payload = {'Server': '102.1000', 'Advanced': {'MapToDownServerHbas': True}} mock_post.assert_called_once_with( 'StorageCenter/ScLiveVolume/101.101/MapSecondaryVolume', expected_payload, True ) self.assertEqual({'instanceId': '102.101.1'}, ret) @mock.patch.object(storagecenter_api.HttpClient, 'post') def test_map_secondary_volume_err(self, mock_post, mock_close_connection, mock_open_connection, mock_init): sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101'}, 'secondaryScSerialNumber': 102} scdestsrv = {'instanceId': '102.1000'} mock_post.return_value = self.RESPONSE_400 ret = self.scapi.map_secondary_volume(sclivevol, scdestsrv) expected_payload = {'Server': '102.1000', 'Advanced': {'MapToDownServerHbas': True}} mock_post.assert_called_once_with( 'StorageCenter/ScLiveVolume/101.101/MapSecondaryVolume', expected_payload, True ) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.SCApi, '_find_qos') @mock.patch.object(storagecenter_api.SCApi, 'find_sc') def test_create_live_volume(self, mock_find_sc, mock_find_qos, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): scvol = {'instanceId': '101.1', 'name': 'name'} sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101'}, 'secondaryScSerialNumber': 102} remotessn = '102' active = True sync = False primaryqos = 'fast' secondaryqos = 'slow' mock_find_sc.return_value = 102 mock_find_qos.side_effect = [{'instanceId': '101.1001'}, {'instanceId': '102.1001'}] mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = sclivevol ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, False, primaryqos, secondaryqos) mock_find_sc.assert_called_once_with(102) mock_find_qos.assert_any_call(primaryqos) mock_find_qos.assert_any_call(secondaryqos, 102) self.assertEqual(sclivevol, ret) @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.SCApi, '_find_qos') @mock.patch.object(storagecenter_api.SCApi, 'find_sc') def test_create_live_volume_autofailover(self, mock_find_sc, mock_find_qos, mock_get_json, mock_post, mock_close_connection, mock_open_connection, mock_init): scvol = {'instanceId': '101.1', 'name': 'name'} sclivevol = {'instanceId': '101.101', 'secondaryVolume': {'instanceId': '102.101'}, 'secondaryScSerialNumber': 102} remotessn = '102' active = True sync = False primaryqos = 'fast' secondaryqos = 'slow' mock_find_sc.return_value = 102 mock_find_qos.side_effect = [{'instanceId': '101.1001'}, {'instanceId': '102.1001'}] mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = sclivevol ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, True, primaryqos, secondaryqos) mock_find_sc.assert_called_once_with(102) mock_find_qos.assert_any_call(primaryqos) mock_find_qos.assert_any_call(secondaryqos, 102) self.assertEqual(sclivevol, ret) # Make sure sync flipped and that we set HighAvailability. expected = {'SyncMode': 'HighAvailability', 'SwapRolesAutomaticallyEnabled': False, 'SecondaryStorageCenter': 102, 'FailoverAutomaticallyEnabled': True, 'StorageCenter': 12345, 'RestoreAutomaticallyEnabled': True, 'SecondaryQosNode': '102.1001', 'ReplicateActiveReplay': True, 'PrimaryQosNode': '101.1001', 'Type': 'Synchronous', 'PrimaryVolume': '101.1', 'SecondaryVolumeAttributes': {'Notes': 'Created by Dell EMC Cinder Driver', 'CreateSourceVolumeFolderPath': True, 'Name': 'name'} } mock_post.assert_called_once_with('StorageCenter/ScLiveVolume', expected, True) @mock.patch.object(storagecenter_api.HttpClient, 'post') @mock.patch.object(storagecenter_api.SCApi, '_find_qos') @mock.patch.object(storagecenter_api.SCApi, 'find_sc') def test_create_live_volume_error(self, mock_find_sc, mock_find_qos, mock_post, mock_close_connection, mock_open_connection, mock_init): scvol = {'instanceId': '101.1', 'name': 'name'} remotessn = '102' active = True sync = False primaryqos = 'fast' secondaryqos = 'slow' mock_find_sc.return_value = 102 mock_find_qos.side_effect = [{'instanceId': '101.1001'}, {'instanceId': '102.1001'}] mock_post.return_value = self.RESPONSE_400 ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, False, primaryqos, secondaryqos) mock_find_sc.assert_called_once_with(102) mock_find_qos.assert_any_call(primaryqos) mock_find_qos.assert_any_call(secondaryqos, 102) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, '_find_qos') @mock.patch.object(storagecenter_api.SCApi, 'find_sc') def test_create_live_volume_no_dest(self, mock_find_sc, mock_find_qos, mock_close_connection, mock_open_connection, mock_init): scvol = {'instanceId': '101.1', 'name': 'name'} remotessn = '102' active = True sync = False primaryqos = 'fast' secondaryqos = 'slow' mock_find_sc.return_value = 102 mock_find_qos.return_value = {} ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, False, primaryqos, secondaryqos) mock_find_sc.assert_called_once_with(102) mock_find_qos.assert_any_call(primaryqos) mock_find_qos.assert_any_call(secondaryqos, 102) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, '_find_qos') @mock.patch.object(storagecenter_api.SCApi, 'find_sc') def test_create_live_volume_no_qos(self, mock_find_sc, mock_find_qos, mock_close_connection, mock_open_connection, mock_init): scvol = {'instanceId': '101.1', 'name': 'name'} remotessn = '102' active = True sync = False primaryqos = 'fast' secondaryqos = 'slow' mock_find_sc.return_value = 102 mock_find_qos.return_value = None ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, False, primaryqos, secondaryqos) mock_find_sc.assert_called_once_with(102) mock_find_qos.assert_any_call(primaryqos) mock_find_qos.assert_any_call(secondaryqos, 102) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, '_find_qos') @mock.patch.object(storagecenter_api.SCApi, 'find_sc') def test_create_live_volume_no_secondary_qos(self, mock_find_sc, mock_find_qos, mock_close_connection, mock_open_connection, mock_init): scvol = {'instanceId': '101.1', 'name': 'name'} remotessn = '102' active = True sync = False primaryqos = 'fast' secondaryqos = 'slow' mock_find_sc.return_value = 102 mock_find_qos.side_effect = [{'instanceId': '101.1001'}, None] ret = self.scapi.create_live_volume(scvol, remotessn, active, sync, False, primaryqos, secondaryqos) mock_find_sc.assert_called_once_with(102) mock_find_qos.assert_any_call(primaryqos) mock_find_qos.assert_any_call(secondaryqos, 102) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'put') def test_manage_replay(self, mock_put, mock_close_connection, mock_open_connection, mock_init): screplay = {'description': 'notguid', 'instanceId': 1} payload = {'description': 'guid', 'expireTime': 0} mock_put.return_value = self.RESPONSE_200 ret = self.scapi.manage_replay(screplay, 'guid') self.assertTrue(ret) mock_put.assert_called_once_with('StorageCenter/ScReplay/1', payload, True) mock_put.return_value = self.RESPONSE_400 ret = self.scapi.manage_replay(screplay, 'guid') self.assertFalse(ret) @mock.patch.object(storagecenter_api.HttpClient, 'put') def test_unmanage_replay(self, mock_put, mock_close_connection, mock_open_connection, mock_init): screplay = {'description': 'guid', 'instanceId': 1} payload = {'expireTime': 1440} mock_put.return_value = self.RESPONSE_200 ret = self.scapi.unmanage_replay(screplay) self.assertTrue(ret) mock_put.assert_called_once_with('StorageCenter/ScReplay/1', payload, True) mock_put.return_value = self.RESPONSE_400 ret = self.scapi.unmanage_replay(screplay) self.assertFalse(ret) @mock.patch.object(storagecenter_api.SCApi, '_get_replay_list') def test_find_common_replay(self, mock_get_replay_list, mock_close_connection, mock_open_connection, mock_init): dreplays = [{'globalIndex': '11111.113'}, {'globalIndex': '11111.112'}, {'globalIndex': '11111.111'}] sreplays = [{'globalIndex': '12345.112'}, {'globalIndex': '12345.111'}, {'globalIndex': '11111.112'}, {'globalIndex': '11111.111'}] xreplays = [{'globalIndex': '12345.112'}, {'globalIndex': '12345.111'}] mock_get_replay_list.side_effect = [dreplays, sreplays, dreplays, xreplays] ret = self.scapi.find_common_replay({'instanceId': '12345.1'}, {'instanceId': '11111.1'}) self.assertEqual({'globalIndex': '11111.112'}, ret) ret = self.scapi.find_common_replay(None, {'instanceId': '11111.1'}) self.assertIsNone(ret) ret = self.scapi.find_common_replay({'instanceId': '12345.1'}, None) self.assertIsNone(ret) ret = self.scapi.find_common_replay({'instanceId': '12345.1'}, {'instanceId': '11111.1'}) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, '_find_qos') @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.HttpClient, 'post') def test_start_replication(self, mock_post, mock_get_json, mock_find_qos, mock_close_connection, mock_open_connection, mock_init): svolume = {'name': 'guida', 'instanceId': '12345.101', 'scSerialNumber': 12345} dvolume = {'name': 'guidb', 'instanceId': '11111.101', 'scSerialNumber': 11111} mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = {'instanceId': '12345.201'} mock_find_qos.return_value = {'instanceId': '12345.1'} expected = {'QosNode': '12345.1', 'SourceVolume': '12345.101', 'StorageCenter': 12345, 'ReplicateActiveReplay': False, 'Type': 'Asynchronous', 'DestinationVolume': '11111.101', 'DestinationStorageCenter': 11111} ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous', 'cinderqos', False) self.assertEqual(mock_get_json.return_value, ret) mock_post.assert_called_once_with('StorageCenter/ScReplication', expected, True) mock_post.return_value = self.RESPONSE_400 ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous', 'cinderqos', False) self.assertIsNone(ret) mock_post.return_value = self.RESPONSE_200 mock_find_qos.return_value = None ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous', 'cinderqos', False) self.assertIsNone(ret) mock_find_qos.return_value = {'instanceId': '12345.1'} ret = self.scapi.start_replication(None, dvolume, 'Asynchronous', 'cinderqos', False) self.assertIsNone(ret) ret = self.scapi.start_replication(svolume, None, 'Asynchronous', 'cinderqos', False) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, 'find_common_replay') @mock.patch.object(storagecenter_api.SCApi, 'create_replay') @mock.patch.object(storagecenter_api.SCApi, 'start_replication') @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.HttpClient, 'post') def test_replicate_to_common(self, mock_post, mock_get_json, mock_start_replication, mock_create_replay, mock_find_common_replay, mock_close_connection, mock_open_connection, mock_init): creplay = {'instanceId': '11111.201'} svolume = {'name': 'guida'} dvolume = {'name': 'guidb', 'volumeFolder': {'instanceId': '11111.1'}} vvolume = {'name': 'guidc'} mock_find_common_replay.return_value = creplay mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = vvolume mock_create_replay.return_value = {'instanceId': '12345.202'} mock_start_replication.return_value = {'instanceId': '12345.203'} # Simple common test. ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos') self.assertEqual(mock_start_replication.return_value, ret) mock_post.assert_called_once_with( 'StorageCenter/ScReplay/11111.201/CreateView', {'Name': 'fback:guidb', 'Notes': 'Created by Dell EMC Cinder Driver', 'VolumeFolder': '11111.1'}, True) mock_create_replay.assert_called_once_with(svolume, 'failback', 600) mock_start_replication.assert_called_once_with(svolume, vvolume, 'Asynchronous', 'cinderqos', False) mock_create_replay.return_value = None # Unable to create a replay. ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos') self.assertIsNone(ret) mock_create_replay.return_value = {'instanceId': '12345.202'} mock_get_json.return_value = None # Create view volume fails. ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos') self.assertIsNone(ret) mock_get_json.return_value = vvolume mock_post.return_value = self.RESPONSE_400 # Post call returns an error. ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos') self.assertIsNone(ret) mock_post.return_value = self.RESPONSE_200 mock_find_common_replay.return_value = None # No common replay found. ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos') self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, 'delete_replication') @mock.patch.object(storagecenter_api.SCApi, 'start_replication') @mock.patch.object(storagecenter_api.SCApi, 'rename_volume') def test_flip_replication(self, mock_rename_volume, mock_start_replication, mock_delete_replication, mock_close_connection, mock_open_connection, mock_init): svolume = {'scSerialNumber': '12345.1'} dvolume = {'scSerialNumber': '11111.1'} name = 'guid' replicationtype = 'Synchronous' qosnode = 'cinderqos' activereplay = True mock_delete_replication.return_value = True mock_start_replication.return_value = {'instanceId': '11111.101'} mock_rename_volume.return_value = True # Good run. ret = self.scapi.flip_replication(svolume, dvolume, name, replicationtype, qosnode, activereplay) self.assertTrue(ret) mock_delete_replication.assert_called_once_with(svolume, '11111.1', False) mock_start_replication.assert_called_once_with(dvolume, svolume, replicationtype, qosnode, activereplay) mock_rename_volume.assert_any_call(svolume, 'Cinder repl of guid') mock_rename_volume.assert_any_call(dvolume, 'guid') mock_rename_volume.return_value = False # Unable to rename volumes. ret = self.scapi.flip_replication(svolume, dvolume, name, replicationtype, qosnode, activereplay) self.assertFalse(ret) mock_rename_volume.return_value = True mock_start_replication.return_value = None # Start replication call fails. ret = self.scapi.flip_replication(svolume, dvolume, name, replicationtype, qosnode, activereplay) self.assertFalse(ret) mock_delete_replication.return_value = False mock_start_replication.return_value = {'instanceId': '11111.101'} # Delete old replication call fails. ret = self.scapi.flip_replication(svolume, dvolume, name, replicationtype, qosnode, activereplay) self.assertFalse(ret) @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.HttpClient, 'get') def test_replication_progress(self, mock_get, mock_get_json, mock_close_connection, mock_open_connection, mock_init): mock_get.return_value = self.RESPONSE_200 mock_get_json.return_value = {'synced': True, 'amountRemaining': '0 Bytes'} # Good run retbool, retnum = self.scapi.replication_progress('11111.101') self.assertTrue(retbool) self.assertEqual(0.0, retnum) # SC replication ID is None. retbool, retnum = self.scapi.replication_progress(None) self.assertIsNone(retbool) self.assertIsNone(retnum) mock_get.return_value = self.RESPONSE_400 # Get progress call fails. retbool, retnum = self.scapi.replication_progress('11111.101') self.assertIsNone(retbool) self.assertIsNone(retnum) @mock.patch.object(storagecenter_api.HttpClient, 'delete') def test_delete_live_volume(self, mock_delete, mock_close_connection, mock_open_connection, mock_init): mock_delete.return_value = self.RESPONSE_200 ret = self.scapi.delete_live_volume({'instanceId': '12345.101'}, True) self.assertTrue(ret) mock_delete.return_value = self.RESPONSE_400 ret = self.scapi.delete_live_volume({'instanceId': '12345.101'}, True) self.assertFalse(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post') def test_swap_roles_live_volume(self, mock_post, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_200 lv = {'instanceId': '12345.0'} ret = self.scapi.swap_roles_live_volume(lv) self.assertTrue(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post') def test_swap_roles_live_volume_fail(self, mock_post, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_400 lv = {'instanceId': '12345.0'} ret = self.scapi.swap_roles_live_volume(lv) self.assertFalse(ret) @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.HttpClient, 'post') def test__find_qos_profile(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = [{'instanceId': '12345.0'}] expected_payload = {'filter': {'filterType': 'AND', 'filters': [ {'filterType': 'Equals', 'attributeName': 'ScSerialNumber', 'attributeValue': 12345}, {'filterType': 'Equals', 'attributeName': 'Name', 'attributeValue': 'Default'}, {'filterType': 'Equals', 'attributeName': 'profileType', 'attributeValue': 'VolumeQosProfile'}]}} ret = self.scapi._find_qos_profile('Default', False) self.assertEqual({'instanceId': '12345.0'}, ret) mock_post.assert_called_once_with('StorageCenter/ScQosProfile/GetList', expected_payload) def test__find_qos_no_qosprofile(self, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi._find_qos_profile('', False) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post') def test__find_qos_error(self, mock_post, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_400 ret = self.scapi._find_qos_profile('Default', False) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.HttpClient, 'post') def test__find_qos_profile_empty_list(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = [] ret = self.scapi._find_qos_profile('Default', False) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.HttpClient, 'post') def test__find_qos_profile_group(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = [{'instanceId': '12345.0'}] expected_payload = {'filter': {'filterType': 'AND', 'filters': [ {'filterType': 'Equals', 'attributeName': 'ScSerialNumber', 'attributeValue': 12345}, {'filterType': 'Equals', 'attributeName': 'Name', 'attributeValue': 'Default'}, {'filterType': 'Equals', 'attributeName': 'profileType', 'attributeValue': 'GroupQosProfile'}]}} ret = self.scapi._find_qos_profile('Default', True) self.assertEqual({'instanceId': '12345.0'}, ret) mock_post.assert_called_once_with('StorageCenter/ScQosProfile/GetList', expected_payload) @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.HttpClient, 'post') def test__find_datareduction_profile(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = [{'instanceId': '12345.0'}] expected_payload = {'filter': {'filterType': 'AND', 'filters': [ {'filterType': 'Equals', 'attributeName': 'ScSerialNumber', 'attributeValue': 12345}, {'filterType': 'Equals', 'attributeName': 'type', 'attributeValue': 'Compression'}]}} ret = self.scapi._find_data_reduction_profile('Compression') self.assertEqual({'instanceId': '12345.0'}, ret) mock_post.assert_called_once_with( 'StorageCenter/ScDataReductionProfile/GetList', expected_payload) def test__find_datareduction_profile_no_drprofile(self, mock_close_connection, mock_open_connection, mock_init): ret = self.scapi._find_data_reduction_profile('') self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'post') def test__find_datareduction_profile_error(self, mock_post, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_400 ret = self.scapi._find_data_reduction_profile('Compression') self.assertIsNone(ret) @mock.patch.object(storagecenter_api.SCApi, '_get_json') @mock.patch.object(storagecenter_api.HttpClient, 'post') def test__find_datareduction_profile_empty_list(self, mock_post, mock_get_json, mock_close_connection, mock_open_connection, mock_init): mock_post.return_value = self.RESPONSE_200 mock_get_json.return_value = [] ret = self.scapi._find_data_reduction_profile('Compression') self.assertIsNone(ret) def test__check_add_profile_payload(self, mock_close_connection, mock_open_connection, mock_init): payload = {} profile = {'instanceId': '12345.0'} self.scapi._check_add_profile_payload(payload, profile, 'Profile1', 'GroupQosProfile') self.assertEqual({'GroupQosProfile': '12345.0'}, payload) def test__check_add_profile_payload_no_name(self, mock_close_connection, mock_open_connection, mock_init): payload = {} profile = {'instanceId': '12345.0'} self.scapi._check_add_profile_payload(payload, profile, None, 'GroupQosProfile') self.assertEqual({}, payload) def test__check_add_profile_payload_no_profile(self, mock_close_connection, mock_open_connection, mock_init): payload = {} profile = None self.assertRaises(exception.VolumeBackendAPIException, self.scapi._check_add_profile_payload, payload, profile, 'Profile1', 'VolumeQosProfile') @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.HttpClient, 'put') @mock.patch.object(storagecenter_api.SCApi, '_find_data_reduction_profile') def test_update_datareduction_profile( self, mock_find_datareduction_profile, mock_put, mock_prefs, mock_close_connection, mock_open_connection, mock_init): # Test we get and set our default mock_find_datareduction_profile.return_value = {} mock_prefs.return_value = { 'allowDataReductionSelection': True, 'dataReductionProfile': {'name': 'Default', 'instanceId': '12345.0'}} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} mock_put.return_value = self.RESPONSE_200 expected = {'dataReductionProfile': '12345.0'} res = self.scapi.update_datareduction_profile(scvolume, None) self.assertTrue(res) mock_put.assert_called_once_with( 'StorageCenter/ScVolumeConfiguration/12345.101', expected, True) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.HttpClient, 'put') @mock.patch.object(storagecenter_api.SCApi, '_find_data_reduction_profile') def test_update_datareduction_profile_error( self, mock_find_datareduction_profile, mock_put, mock_prefs, mock_close_connection, mock_open_connection, mock_init): # Test we get and set our default mock_find_datareduction_profile.return_value = {} mock_prefs.return_value = { 'allowDataReductionSelection': True, 'dataReductionProfile': {'name': 'Default', 'instanceId': '12345.0'}} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} mock_put.return_value = self.RESPONSE_400 expected = {'dataReductionProfile': '12345.0'} res = self.scapi.update_datareduction_profile(scvolume, None) self.assertFalse(res) mock_put.assert_called_once_with( 'StorageCenter/ScVolumeConfiguration/12345.101', expected, True) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.SCApi, '_find_data_reduction_profile') def test_update_datareduction_profile_not_found( self, mock_find_datareduction_profile, mock_prefs, mock_close_connection, mock_open_connection, mock_init): mock_find_datareduction_profile.return_value = None mock_prefs.return_value = {'allowDataReductionSelection': True} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} res = self.scapi.update_datareduction_profile(scvolume, 'Profile') self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.SCApi, '_find_data_reduction_profile') def test_update_datareduction_profile_not_allowed( self, mock_find_datareduction_profile, mock_prefs, mock_close_connection, mock_open_connection, mock_init): mock_find_datareduction_profile.return_value = None mock_prefs.return_value = {'allowDataReductionSelection': False} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} res = self.scapi.update_datareduction_profile(scvolume, None) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.SCApi, '_find_data_reduction_profile') def test_update_datareduction_profile_prefs_not_found( self, mock_find_datareduction_profile, mock_prefs, mock_close_connection, mock_open_connection, mock_init): mock_find_datareduction_profile.return_value = None mock_prefs.return_value = None scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} res = self.scapi.update_datareduction_profile(scvolume, None) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.SCApi, '_find_data_reduction_profile') def test_update_datareduction_profile_default_not_found( self, mock_find_datareduction_profile, mock_prefs, mock_close_connection, mock_open_connection, mock_init): mock_find_datareduction_profile.return_value = None mock_prefs.return_value = {'allowDataReductionSelection': True} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} res = self.scapi.update_datareduction_profile(scvolume, None) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.HttpClient, 'put', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_find_data_reduction_profile') def test_update_datareduction_profile_default( self, mock_find_datareduction_profile, mock_put, mock_prefs, mock_close_connection, mock_open_connection, mock_init): # Test we get and set our default mock_find_datareduction_profile.return_value = None mock_prefs.return_value = { 'allowDataReductionSelection': True, 'dataReductionProfile': {'name': 'Default', 'instanceId': '12345.0'}} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} res = self.scapi.update_datareduction_profile(scvolume, None) self.assertTrue(res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.HttpClient, 'put') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') def test_update_qos_profile( self, mock_find_qos_profile, mock_put, mock_prefs, mock_close_connection, mock_open_connection, mock_init): # Test we get and set our default mock_find_qos_profile.return_value = {} mock_prefs.return_value = { 'allowQosProfileSelection': True, 'volumeQosProfile': {'name': 'Default', 'instanceId': '12345.0'}} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} mock_put.return_value = self.RESPONSE_200 expected = {'volumeQosProfile': '12345.0'} res = self.scapi.update_qos_profile(scvolume, None) self.assertTrue(res) mock_put.assert_called_once_with( 'StorageCenter/ScVolumeConfiguration/12345.101', expected, True) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.HttpClient, 'put') @mock.patch.object(storagecenter_api.SCApi, '_find_data_reduction_profile') def test_update_qos_profile_error( self, mock_find_qos_profile, mock_put, mock_prefs, mock_close_connection, mock_open_connection, mock_init): # Test we get and set our default mock_find_qos_profile.return_value = {} mock_prefs.return_value = { 'allowQosProfileSelection': True, 'volumeQosProfile': {'name': 'Default', 'instanceId': '12345.0'}} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} mock_put.return_value = self.RESPONSE_400 expected = {'volumeQosProfile': '12345.0'} res = self.scapi.update_qos_profile(scvolume, None) self.assertFalse(res) mock_put.assert_called_once_with( 'StorageCenter/ScVolumeConfiguration/12345.101', expected, True) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') def test_update_qos_profile_not_found( self, mock_find_qos_profile, mock_prefs, mock_close_connection, mock_open_connection, mock_init): mock_find_qos_profile.return_value = None mock_prefs.return_value = {'allowQosProfileSelection': True} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} res = self.scapi.update_qos_profile(scvolume, 'Profile') self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') def test_update_qos_profile_not_allowed( self, mock_find_qos_profile, mock_prefs, mock_close_connection, mock_open_connection, mock_init): mock_find_qos_profile.return_value = None mock_prefs.return_value = {'allowQosProfileSelection': False} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} res = self.scapi.update_qos_profile(scvolume, None) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') def test_update_qos_profile_prefs_not_found( self, mock_find_qos_profile, mock_prefs, mock_close_connection, mock_open_connection, mock_init): mock_find_qos_profile.return_value = None mock_prefs.return_value = None scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} res = self.scapi.update_qos_profile(scvolume, None) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') def test_update_qos_profile_default_not_found( self, mock_find_qos_profile, mock_prefs, mock_close_connection, mock_open_connection, mock_init): mock_find_qos_profile.return_value = None mock_prefs.return_value = {'allowQosProfileSelection': True} scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} res = self.scapi.update_qos_profile(scvolume, None) self.assertFalse(res) @mock.patch.object(storagecenter_api.SCApi, '_get_user_preferences') @mock.patch.object(storagecenter_api.HttpClient, 'put') @mock.patch.object(storagecenter_api.SCApi, '_find_qos_profile') def test_update_qos_profile_default( self, mock_find_qos_profile, mock_put, mock_prefs, mock_close_connection, mock_open_connection, mock_init): # Test we get and set our default mock_find_qos_profile.return_value = None mock_prefs.return_value = { 'allowQosProfileSelection': True, 'volumeQosProfile': {'name': 'Default', 'instanceId': '12345.0'}} mock_put.return_value = self.RESPONSE_200 scvolume = {'name': fake.VOLUME_ID, 'instanceId': '12345.101'} res = self.scapi.update_qos_profile(scvolume, None) self.assertTrue(res) class DellSCSanAPIConnectionTestCase(test.TestCase): """DellSCSanAPIConnectionTestCase Class to test the Storage Center API connection using Mock. """ # Create a Response object that indicates OK response_ok = models.Response() response_ok.status_code = 200 response_ok.reason = u'ok' RESPONSE_200 = response_ok # Create a Response object with no content response_nc = models.Response() response_nc.status_code = 204 response_nc.reason = u'duplicate' RESPONSE_204 = response_nc # Create a Response object is a pure error. response_bad = models.Response() response_bad.status_code = 400 response_bad._content = '' response_bad._content_consumed = True response_bad.reason = u'bad request' response_bad._content = '' response_bad._content_consumed = True RESPONSE_400 = response_bad APIDICT = {u'instanceId': u'0', u'hostName': u'192.168.0.200', u'userId': 434226, u'connectionKey': u'', u'minApiVersion': u'0.1', u'webServicesPort': 3033, u'locale': u'en_US', u'objectType': u'ApiConnection', u'secureString': u'', u'applicationVersion': u'2.0.1', u'source': u'REST', u'commandLine': False, u'application': u'Cinder REST Driver', u'sessionKey': 1436460614863, u'provider': u'EnterpriseManager', u'instanceName': u'ApiConnection', u'connected': True, u'userName': u'Admin', u'useHttps': False, u'providerVersion': u'15.3.1.186', u'apiVersion': u'2.2', u'apiBuild': 199} def setUp(self): super(DellSCSanAPIConnectionTestCase, self).setUp() # Configuration is a mock. A mock is pretty much a blank # slate. I believe mock's done in setup are not happy time # mocks. So we just do a few things like driver config here. self.configuration = mock.Mock() self.configuration.san_is_local = False self.configuration.san_ip = "192.168.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "mmm" self.configuration.dell_sc_ssn = 12345 self.configuration.dell_sc_server_folder = 'openstack' self.configuration.dell_sc_volume_folder = 'openstack' # Note that we set this to True even though we do not # test this functionality. This is sent directly to # the requests calls as the verify parameter and as # that is a third party library deeply stubbed out is # not directly testable by this code. Note that in the # case that this fails the driver fails to even come # up. self.configuration.dell_sc_verify_cert = True self.configuration.dell_sc_api_port = 3033 self.configuration.target_ip_address = '192.168.1.1' self.configuration.target_port = 3260 self._context = context.get_admin_context() self.asynctimeout = 15 self.synctimeout = 30 self.apiversion = '2.0' # Set up the SCApi self.scapi = storagecenter_api.SCApi( self.configuration.san_ip, self.configuration.dell_sc_api_port, self.configuration.san_login, self.configuration.san_password, self.configuration.dell_sc_verify_cert, self.asynctimeout, self.synctimeout, self.apiversion) # Set up the scapi configuration vars self.scapi.ssn = self.configuration.dell_sc_ssn self.scapi.sfname = self.configuration.dell_sc_server_folder self.scapi.vfname = self.configuration.dell_sc_volume_folder @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=APIDICT) def test_open_connection(self, mock_get_json, mock_post): self.scapi.open_connection() self.assertTrue(mock_post.called) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) @mock.patch.object(storagecenter_api.SCApi, '_check_version_fail', return_value=RESPONSE_400) def test_open_connection_failure(self, mock_check_version_fail, mock_post): self.assertRaises(exception.VolumeBackendAPIException, self.scapi.open_connection) self.assertTrue(mock_check_version_fail.called) @mock.patch.object(storagecenter_api.SCApi, '_check_version_fail', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.SCApi, '_get_json', return_value=APIDICT) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_400) def test_open_connection_sc(self, mock_post, mock_get_json, mock_check_version_fail): self.scapi.open_connection() self.assertTrue(mock_check_version_fail.called) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_204) def test_close_connection(self, mock_post): self.scapi.close_connection() self.assertTrue(mock_post.called) @mock.patch.object(storagecenter_api.HttpClient, 'post', return_value=RESPONSE_200) def test_close_connection_failure(self, mock_post): self.scapi.close_connection() self.assertTrue(mock_post.called) class DellHttpClientTestCase(test.TestCase): """DellSCSanAPIConnectionTestCase Class to test the Storage Center API connection using Mock. """ ASYNCTASK = {"state": "Running", "methodName": "GetScUserPreferencesDefaults", "error": "", "started": True, "userName": "", "localizedError": "", "returnValue": "https://localhost:3033/api/rest/" "ApiConnection/AsyncTask/1418394170395", "storageCenter": 0, "errorState": "None", "successful": False, "stepMessage": "Running Method [Object: ScUserPreferences] " "[Method: GetScUserPreferencesDefaults]", "localizedStepMessage": "", "warningList": [], "totalSteps": 2, "timeFinished": "1969-12-31T18:00:00-06:00", "timeStarted": "2015-01-07T14:07:10-06:00", "currentStep": 1, "objectTypeName": "ScUserPreferences", "objectType": "AsyncTask", "instanceName": "1418394170395", "instanceId": "1418394170395"} # Create a Response object that indicates OK response_ok = models.Response() response_ok.status_code = 200 response_ok.reason = u'ok' response_ok._content = '' response_ok._content_consumed = True RESPONSE_200 = response_ok # Create a Response object with no content response_nc = models.Response() response_nc.status_code = 204 response_nc.reason = u'duplicate' response_nc._content = '' response_nc._content_consumed = True RESPONSE_204 = response_nc # Create a Response object is a pure error. response_bad = models.Response() response_bad.status_code = 400 response_bad.reason = u'bad request' response_bad._content = '' response_bad._content_consumed = True RESPONSE_400 = response_bad def setUp(self): super(DellHttpClientTestCase, self).setUp() self.host = 'localhost' self.port = '3033' self.user = 'johnnyuser' self.password = 'password' self.verify = False self.asynctimeout = 15 self.synctimeout = 30 self.apiversion = '3.1' self.httpclient = storagecenter_api.HttpClient( self.host, self.port, self.user, self.password, self.verify, self.asynctimeout, self.synctimeout, self.apiversion) def test_get_async_url(self): url = self.httpclient._get_async_url(self.ASYNCTASK) self.assertEqual('api/rest/ApiConnection/AsyncTask/1418394170395', url) def test_get_async_url_no_id_on_url(self): badTask = self.ASYNCTASK.copy() badTask['returnValue'] = ('https://localhost:3033/api/rest/' 'ApiConnection/AsyncTask/') url = self.httpclient._get_async_url(badTask) self.assertEqual('api/rest/ApiConnection/AsyncTask/1418394170395', url) def test_get_async_url_none(self): self.assertRaises(AttributeError, self.httpclient._get_async_url, None) def test_get_async_url_no_id(self): badTask = self.ASYNCTASK.copy() badTask['returnValue'] = ('https://localhost:3033/api/rest/' 'ApiConnection/AsyncTask/') badTask['instanceId'] = '' self.assertRaises(exception.VolumeBackendAPIException, self.httpclient._get_async_url, badTask) def test_get_async_url_no_returnvalue(self): badTask = self.ASYNCTASK.copy() badTask['returnValue'] = None url = self.httpclient._get_async_url(badTask) self.assertEqual('api/rest/ApiConnection/AsyncTask/1418394170395', url) def test_get_async_url_no_blank_returnvalue(self): badTask = self.ASYNCTASK.copy() badTask['returnValue'] = '' url = self.httpclient._get_async_url(badTask) self.assertEqual('api/rest/ApiConnection/AsyncTask/1418394170395', url) def test_get_async_url_xml_returnvalue(self): badTask = self.ASYNCTASK.copy() badTask['returnValue'] = ('' '1' '' '1' '' 'ApiMethodReturn' '1' 'True' '' 'false') self.assertRaises(exception.VolumeBackendAPIException, self.httpclient._get_async_url, badTask) def test_rest_ret(self): rest_response = self.RESPONSE_200 response = self.httpclient._rest_ret(rest_response, False) self.assertEqual(self.RESPONSE_200, response) @mock.patch.object(storagecenter_api.HttpClient, '_wait_for_async_complete', return_value=RESPONSE_200) def test_rest_ret_async(self, mock_wait_for_async_complete): mock_rest_response = mock.MagicMock() mock_rest_response.status_code = 202 response = self.httpclient._rest_ret(mock_rest_response, True) self.assertEqual(self.RESPONSE_200, response) self.assertTrue(mock_wait_for_async_complete.called) def test_rest_ret_async_error(self): mock_rest_response = mock.MagicMock() mock_rest_response.status_code = 400 self.assertRaises(exception.VolumeBackendAPIException, self.httpclient._rest_ret, mock_rest_response, True) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_wait_for_async_complete(self, mock_get): ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK) self.assertEqual(self.RESPONSE_200, ret) @mock.patch.object(storagecenter_api.HttpClient, '_get_async_url', return_value=None) def test_wait_for_async_complete_bad_url(self, mock_get_async_url): ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK) self.assertIsNone(ret) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_400) def test_wait_for_async_complete_bad_result(self, mock_get): ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK) self.assertEqual(self.RESPONSE_400, ret) @mock.patch.object(storagecenter_api.HttpClient, 'get', return_value=RESPONSE_200) def test_wait_for_async_complete_loop(self, mock_get): mock_response = mock.MagicMock() mock_response.content = mock.MagicMock() mock_response.json = mock.MagicMock() mock_response.json.side_effect = [self.ASYNCTASK, {'objectType': 'ScVol'}] ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK) self.assertEqual(self.RESPONSE_200, ret) @mock.patch.object(storagecenter_api.HttpClient, 'get') def test_wait_for_async_complete_get_raises(self, mock_get): mock_get.side_effect = ( storagecenter_api.DellDriverRetryableException()) self.assertRaises(exception.VolumeBackendAPIException, self.httpclient._wait_for_async_complete, self.ASYNCTASK) @mock.patch.object(requests.Session, 'get', return_value=RESPONSE_200) def test_get(self, mock_get): ret = self.httpclient.get('url') self.assertEqual(self.RESPONSE_200, ret) expected_headers = self.httpclient.header.copy() mock_get.assert_called_once_with('https://localhost:3033/api/rest/url', headers=expected_headers, timeout=30, verify=False) @mock.patch.object(requests.Session, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.HttpClient, '_rest_ret') def test_post(self, mock_rest_ret, mock_post): payload = {'payload': 'payload'} self.httpclient.post('url', payload, True) expected_headers = self.httpclient.header.copy() expected_headers['async'] = 'True' mock_post.assert_called_once_with( 'https://localhost:3033/api/rest/url', data=json.dumps(payload, ensure_ascii=False).encode('utf-8'), headers=expected_headers, timeout=15, verify=False) @mock.patch.object(requests.Session, 'post', return_value=RESPONSE_200) @mock.patch.object(storagecenter_api.HttpClient, '_rest_ret') def test_post_sync(self, mock_rest_ret, mock_post): payload = {'payload': 'payload'} self.httpclient.post('url', payload, False) expected_headers = self.httpclient.header.copy() mock_post.assert_called_once_with( 'https://localhost:3033/api/rest/url', data=json.dumps(payload, ensure_ascii=False).encode('utf-8'), headers=expected_headers, timeout=30, verify=False) class DellStorageCenterApiHelperTestCase(test.TestCase): """DellStorageCenterApiHelper test case Class to test the Storage Center API helper using Mock. """ @mock.patch.object(storagecenter_api.SCApi, 'open_connection') def test_setup_connection(self, mock_open_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.dell_sc_volume_folder = 'a' config.dell_sc_server_folder = 'a' config.dell_sc_verify_cert = False config.san_port = 3033 helper = storagecenter_api.SCApiHelper(config, None, 'FC') ret = helper._setup_connection() self.assertEqual(12345, ret.primaryssn) self.assertEqual(12345, ret.ssn) self.assertEqual('FibreChannel', ret.protocol) mock_open_connection.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApi, 'open_connection') def test_setup_connection_excluded1(self, mock_open_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.dell_sc_volume_folder = 'a' config.dell_sc_server_folder = 'a' config.dell_sc_verify_cert = False config.san_port = 3033 config.excluded_domain_ip = ['192.168.0.1'] config.excluded_domain_ips = ['192.168.0.2', '192.168.0.3'] helper = storagecenter_api.SCApiHelper(config, None, 'FC') ret = helper._setup_connection() self.assertEqual(set(ret.excluded_domain_ips), set(['192.168.0.2', '192.168.0.3', '192.168.0.1'])) self.assertEqual(12345, ret.primaryssn) self.assertEqual(12345, ret.ssn) self.assertEqual('FibreChannel', ret.protocol) mock_open_connection.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApi, 'open_connection') def test_setup_connection_excluded2(self, mock_open_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.dell_sc_volume_folder = 'a' config.dell_sc_server_folder = 'a' config.dell_sc_verify_cert = False config.san_port = 3033 config.excluded_domain_ip = None config.excluded_domain_ips = ['192.168.0.2', '192.168.0.3'] helper = storagecenter_api.SCApiHelper(config, None, 'FC') ret = helper._setup_connection() self.assertEqual(set(ret.excluded_domain_ips), set(['192.168.0.2', '192.168.0.3'])) @mock.patch.object(storagecenter_api.SCApi, 'open_connection') def test_setup_connection_excluded3(self, mock_open_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.dell_sc_volume_folder = 'a' config.dell_sc_server_folder = 'a' config.dell_sc_verify_cert = False config.san_port = 3033 config.excluded_domain_ip = ['192.168.0.1'] config.excluded_domain_ips = [] helper = storagecenter_api.SCApiHelper(config, None, 'FC') ret = helper._setup_connection() self.assertEqual(ret.excluded_domain_ips, ['192.168.0.1']) @mock.patch.object(storagecenter_api.SCApi, 'open_connection') def test_setup_connection_excluded4(self, mock_open_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.dell_sc_volume_folder = 'a' config.dell_sc_server_folder = 'a' config.dell_sc_verify_cert = False config.san_port = 3033 config.excluded_domain_ip = None config.excluded_domain_ips = [] helper = storagecenter_api.SCApiHelper(config, None, 'FC') ret = helper._setup_connection() self.assertEqual(ret.excluded_domain_ips, []) @mock.patch.object(storagecenter_api.SCApi, 'open_connection') def test_setup_connection_excluded5(self, mock_open_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.dell_sc_volume_folder = 'a' config.dell_sc_server_folder = 'a' config.dell_sc_verify_cert = False config.san_port = 3033 config.excluded_domain_ip = ['192.168.0.1'] config.excluded_domain_ips = ['192.168.0.1', '192.168.0.2'] helper = storagecenter_api.SCApiHelper(config, None, 'FC') ret = helper._setup_connection() self.assertEqual(set(ret.excluded_domain_ips), set(['192.168.0.2', '192.168.0.1'])) self.assertEqual(12345, ret.primaryssn) self.assertEqual(12345, ret.ssn) self.assertEqual('FibreChannel', ret.protocol) mock_open_connection.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApi, 'open_connection') def test_setup_connection_iscsi(self, mock_open_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.dell_sc_volume_folder = 'a' config.dell_sc_server_folder = 'a' config.dell_sc_verify_cert = False config.san_port = 3033 helper = storagecenter_api.SCApiHelper(config, None, 'iSCSI') ret = helper._setup_connection() self.assertEqual(12345, ret.primaryssn) self.assertEqual(12345, ret.ssn) self.assertEqual('Iscsi', ret.protocol) mock_open_connection.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApi, 'open_connection') def test_setup_connection_failover(self, mock_open_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.dell_sc_volume_folder = 'a' config.dell_sc_server_folder = 'a' config.dell_sc_verify_cert = False config.san_port = 3033 helper = storagecenter_api.SCApiHelper(config, '67890', 'iSCSI') ret = helper._setup_connection() self.assertEqual(12345, ret.primaryssn) self.assertEqual(67890, ret.ssn) self.assertEqual('Iscsi', ret.protocol) mock_open_connection.assert_called_once_with() @mock.patch.object(storagecenter_api.SCApiHelper, '_setup_connection') def test_open_connection(self, mock_setup_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.san_port = 3033 helper = storagecenter_api.SCApiHelper(config, None, 'FC') mock_connection = mock.MagicMock() mock_connection.apiversion = '3.1' mock_setup_connection.return_value = mock_connection ret = helper.open_connection() self.assertEqual('3.1', ret.apiversion) self.assertEqual('192.168.0.101', helper.san_ip) self.assertEqual('username', helper.san_login) self.assertEqual('password', helper.san_password) @mock.patch.object(storagecenter_api.SCApiHelper, '_setup_connection') def test_open_connection_fail_no_secondary(self, mock_setup_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.san_port = 3033 config.secondary_san_ip = '' helper = storagecenter_api.SCApiHelper(config, None, 'FC') mock_setup_connection.side_effect = ( exception.VolumeBackendAPIException('abc')) self.assertRaises(exception.VolumeBackendAPIException, helper.open_connection) mock_setup_connection.assert_called_once_with() self.assertEqual('192.168.0.101', helper.san_ip) self.assertEqual('username', helper.san_login) self.assertEqual('password', helper.san_password) @mock.patch.object(storagecenter_api.SCApiHelper, '_setup_connection') def test_open_connection_secondary(self, mock_setup_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.san_port = 3033 config.secondary_san_ip = '192.168.0.102' config.secondary_san_login = 'username2' config.secondary_san_password = 'password2' helper = storagecenter_api.SCApiHelper(config, None, 'FC') mock_connection = mock.MagicMock() mock_connection.apiversion = '3.1' mock_setup_connection.side_effect = [ (exception.VolumeBackendAPIException('abc')), mock_connection] ret = helper.open_connection() self.assertEqual('3.1', ret.apiversion) self.assertEqual(2, mock_setup_connection.call_count) self.assertEqual('192.168.0.102', helper.san_ip) self.assertEqual('username2', helper.san_login) self.assertEqual('password2', helper.san_password) @mock.patch.object(storagecenter_api.SCApiHelper, '_setup_connection') def test_open_connection_fail_partial_secondary_config( self, mock_setup_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.san_port = 3033 config.secondary_san_ip = '192.168.0.102' config.secondary_san_login = 'username2' config.secondary_san_password = '' helper = storagecenter_api.SCApiHelper(config, None, 'FC') mock_setup_connection.side_effect = ( exception.VolumeBackendAPIException('abc')) self.assertRaises(exception.VolumeBackendAPIException, helper.open_connection) mock_setup_connection.assert_called_once_with() self.assertEqual('192.168.0.101', helper.san_ip) self.assertEqual('username', helper.san_login) self.assertEqual('password', helper.san_password) @mock.patch.object(storagecenter_api.SCApiHelper, '_setup_connection') def test_open_connection_to_secondary_and_back(self, mock_setup_connection): config = mock.MagicMock() config.dell_sc_ssn = 12345 config.san_ip = '192.168.0.101' config.san_login = 'username' config.san_password = 'password' config.san_port = 3033 config.secondary_san_ip = '192.168.0.102' config.secondary_san_login = 'username2' config.secondary_san_password = 'password2' helper = storagecenter_api.SCApiHelper(config, None, 'FC') mock_connection = mock.MagicMock() mock_connection.apiversion = '3.1' mock_setup_connection.side_effect = [ (exception.VolumeBackendAPIException('abc')), mock_connection, (exception.VolumeBackendAPIException('abc')), mock_connection] helper.open_connection() self.assertEqual('192.168.0.102', helper.san_ip) self.assertEqual('username2', helper.san_login) self.assertEqual('password2', helper.san_password) self.assertEqual(2, mock_setup_connection.call_count) helper.open_connection() self.assertEqual('192.168.0.101', helper.san_ip) self.assertEqual('username', helper.san_login) self.assertEqual('password', helper.san_password) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/test_xtremio.py0000664000175000017500000020666000000000000026317 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import re import time from unittest import mock from oslo_utils import netutils from cinder import context from cinder import exception from cinder.objects import volume_attachment from cinder.tests.unit.consistencygroup import fake_consistencygroup as fake_cg from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit.fake_volume import fake_volume_obj from cinder.tests.unit.fake_volume import fake_volume_type_obj from cinder.tests.unit import test from cinder.volume.drivers.dell_emc import xtremio typ2id = {'volumes': 'vol-id', 'snapshots': 'vol-id', 'initiators': 'initiator-id', 'initiator-groups': 'ig-id', 'lun-maps': 'mapping-id', 'consistency-groups': 'cg-id', 'consistency-group-volumes': 'cg-vol-id', } xms_init = {'xms': {1: {'version': '4.2.0', 'sw-version': '4.2.0-30'}}, 'clusters': {1: {'name': 'brick1', 'sys-sw-version': "4.2.0-devel_ba23ee5381eeab73", 'ud-ssd-space': '8146708710', 'ud-ssd-space-in-use': '708710', 'vol-size': '29884416', 'chap-authentication-mode': 'disabled', 'chap-discovery-mode': 'disabled', "index": 1, }, }, 'target-groups': {'Default': {"index": 1, "name": "Default"}, }, 'iscsi-portals': {'10.205.68.5/16': {"port-address": "iqn.2008-05.com.xtremio:001e67939c34", "ip-port": 3260, "ip-addr": "10.205.68.5/16", "name": "10.205.68.5/16", "index": 1, }, '10.205.68.6/16': {"port-address": "iqn.2008-05.com.xtremio:002e67939c34", "ip-port": 3260, "ip-addr": "10.205.68.6/16", "name": "10.205.68.6/16", "index": 1, }, }, 'targets': {'X1-SC2-target1': {'index': 1, "name": "X1-SC2-fc1", "port-address": "21:00:00:24:ff:57:b2:36", 'port-type': 'fc', 'port-state': 'up', }, 'X1-SC2-target2': {'index': 2, "name": "X1-SC2-fc2", "port-address": "21:00:00:24:ff:57:b2:55", 'port-type': 'fc', 'port-state': 'up', } }, 'volumes': {}, 'initiator-groups': {}, 'initiators': {}, 'lun-maps': {}, 'consistency-groups': {}, 'consistency-group-volumes': {}, } xms_data = None xms_filters = { 'eq': lambda x, y: x == y, 'ne': lambda x, y: x != y, 'gt': lambda x, y: x > y, 'ge': lambda x, y: x >= y, 'lt': lambda x, y: x < y, 'le': lambda x, y: x <= y, } def get_xms_obj_by_name(typ, name): for item in xms_data[typ].values(): if 'name' in item and item['name'] == name: return item raise exception.NotFound() def clean_xms_data(): global xms_data xms_data = copy.deepcopy(xms_init) def fix_data(data, object_type): d = {} for key, value in data.items(): if 'name' in key: key = 'name' d[key] = value if object_type == 'lun-maps': d['lun'] = 1 vol_idx = get_xms_obj_by_name('volumes', data['vol-id'])['index'] ig_idx = get_xms_obj_by_name('initiator-groups', data['ig-id'])['index'] d['name'] = '_'.join([str(vol_idx), str(ig_idx), '1']) d[typ2id[object_type]] = ["a91e8c81c2d14ae4865187ce4f866f8a", d.get('name'), len(xms_data.get(object_type, [])) + 1] d['index'] = len(xms_data[object_type]) + 1 return d def get_xms_obj_key(data): for key in data.keys(): if 'name' in key: return key def get_obj(typ, name, idx): if name: return {"content": get_xms_obj_by_name(typ, name)} elif idx: if idx not in xms_data.get(typ, {}): raise exception.NotFound() return {"content": xms_data[typ][idx]} def xms_request(object_type='volumes', method='GET', data=None, name=None, idx=None, ver='v1'): if object_type == 'snapshots': object_type = 'volumes' try: res = xms_data[object_type] except KeyError: raise exception.VolumeDriverException if method == 'GET': if name or idx: return get_obj(object_type, name, idx) else: if data and data.get('full') == 1: filter_term = data.get('filter') if not filter_term: entities = list(res.values()) else: field, oper, value = filter_term.split(':', 2) comp = xms_filters[oper] entities = [o for o in res.values() if comp(o.get(field), value)] return {object_type: entities} else: return {object_type: [{"href": "/%s/%d" % (object_type, obj['index']), "name": obj.get('name')} for obj in res.values()]} elif method == 'POST': data = fix_data(data, object_type) name_key = get_xms_obj_key(data) try: if name_key and get_xms_obj_by_name(object_type, data[name_key]): raise (exception .VolumeBackendAPIException ('Volume by this name already exists')) except exception.NotFound: pass data['index'] = len(xms_data[object_type]) + 1 xms_data[object_type][data['index']] = data # find the name key if name_key: data['name'] = data[name_key] if object_type == 'lun-maps': data['ig-name'] = data['ig-id'] return {"links": [{"href": "/%s/%d" % (object_type, data[typ2id[object_type]][2])}]} elif method == 'DELETE': if object_type == 'consistency-group-volumes': data = [cgv for cgv in xms_data['consistency-group-volumes'].values() if cgv['vol-id'] == data['vol-id'] and cgv['cg-id'] == data['cg-id']][0] else: data = get_obj(object_type, name, idx)['content'] if data: del xms_data[object_type][data['index']] else: raise exception.NotFound() elif method == 'PUT': obj = get_obj(object_type, name, idx)['content'] data = fix_data(data, object_type) del data['index'] obj.update(data) def xms_bad_request(object_type='volumes', method='GET', data=None, name=None, idx=None, ver='v1'): if method == 'GET': raise exception.NotFound() elif method == 'POST': raise exception.VolumeBackendAPIException('Failed to create ig') def xms_failed_rename_snapshot_request(object_type='volumes', method='GET', data=None, name=None, idx=None, ver='v1'): if method == 'POST': xms_data['volumes'][27] = {} return { "links": [ { "href": "https://host/api/json/v2/types/snapshots/27", "rel": "self"}]} elif method == 'PUT': raise exception.VolumeBackendAPIException(data='Failed to delete') elif method == 'DELETE': del xms_data['volumes'][27] class D(dict): def update(self, *args, **kwargs): self.__dict__.update(*args, **kwargs) return dict.update(self, *args, **kwargs) class CommonData(object): context = context.RequestContext('admin', 'fake', True) connector = {'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'wwpns': ["123456789012345", "123456789054321"], 'wwnns': ["223456789012345", "223456789054321"], 'host': 'fakehost', } test_volume_type = fake_volume_type_obj( context=context ) test_volume = fake_volume_obj(context, volume_type = test_volume_type, name='vol1', volume_name='vol1', display_name='vol1', display_description='test volume', size=1, id='192eb39b-6c2f-420c-bae3-3cfd117f0001', provider_auth=None, project_id='project', volume_type_id=None, consistencygroup_id= '192eb39b-6c2f-420c-bae3-3cfd117f0345', ) test_snapshot = D() test_snapshot.update({'name': 'snapshot1', 'size': 1, 'volume_size': 1, 'id': '192eb39b-6c2f-420c-bae3-3cfd117f0002', 'volume_name': 'vol-vol1', 'volume_id': '192eb39b-6c2f-420c-bae3-3cfd117f0001', 'project_id': 'project', 'consistencygroup_id': '192eb39b-6c2f-420c-bae3-3cfd117f0345', }) test_snapshot.__dict__.update(test_snapshot) test_volume2 = {'name': 'vol2', 'size': 1, 'volume_name': 'vol2', 'id': '192eb39b-6c2f-420c-bae3-3cfd117f0004', 'provider_auth': None, 'project_id': 'project', 'display_name': 'vol2', 'display_description': 'test volume 2', 'volume_type_id': None, 'consistencygroup_id': '192eb39b-6c2f-420c-bae3-3cfd117f0345', } test_clone = {'name': 'clone1', 'size': 1, 'volume_name': 'vol3', 'id': '192eb39b-6c2f-420c-bae3-3cfd117f0003', 'provider_auth': None, 'project_id': 'project', 'display_name': 'clone1', 'display_description': 'volume created from snapshot', 'volume_type_id': None, 'consistencygroup_id': '192eb39b-6c2f-420c-bae3-3cfd117f0345', } unmanaged1 = {'id': 'unmanaged1', 'name': 'unmanaged1', 'size': 3, } group = {'id': '192eb39b-6c2f-420c-bae3-3cfd117f0345', 'name': 'cg1', 'status': 'OK', } cgsnapshot = { 'id': '192eb39b-6c2f-420c-bae3-3cfd117f9876', 'consistencygroup_id': group['id'], 'group_id': None, } cgsnapshot_as_group_id = { 'id': '192eb39b-6c2f-420c-bae3-3cfd117f9876', 'consistencygroup_id': None, 'group_id': group['id'], } test_volume_attachment = volume_attachment.VolumeAttachment( id='2b06255d-f5f0-4520-a953-b029196add6b', volume_id=test_volume.id, connector=connector) class BaseXtremIODriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(BaseXtremIODriverTestCase, self).__init__(*args, **kwargs) self.config = mock.Mock(san_login='', san_password='', san_ip='', xtremio_cluster_name='brick1', xtremio_provisioning_factor=20.0, max_over_subscription_ratio=20.0, xtremio_volumes_per_glance_cache=100, driver_ssl_cert_verify=True, driver_ssl_cert_path='/test/path/root_ca.crt', xtremio_array_busy_retry_count=5, xtremio_array_busy_retry_interval=5, xtremio_clean_unused_ig=False, xtremio_ports=[]) def safe_get(key): return getattr(self.config, key) self.config.safe_get = safe_get def setUp(self): super(BaseXtremIODriverTestCase, self).setUp() clean_xms_data() self.driver = xtremio.XtremIOISCSIDriver(configuration=self.config) self.driver.client = xtremio.XtremIOClient42(self.config, self.config .xtremio_cluster_name) self.data = CommonData() @mock.patch('cinder.volume.drivers.dell_emc.xtremio.XtremIOClient.req') class XtremIODriverISCSITestCase(BaseXtremIODriverTestCase): # ##### SetUp Check ##### def test_check_for_setup_error(self, req): req.side_effect = xms_request self.driver.check_for_setup_error() self.assertEqual(self.driver.client.__class__.__name__, 'XtremIOClient42') def test_fail_check_for_setup_error(self, req): req.side_effect = xms_request clusters = xms_data.pop('clusters') self.assertRaises(exception.VolumeDriverException, self.driver.check_for_setup_error) xms_data['clusters'] = clusters def test_check_for_setup_error_ver4(self, req): req.side_effect = xms_request xms_data['xms'][1]['sw-version'] = '4.0.10-34.hotfix1' self.driver.check_for_setup_error() self.assertEqual(self.driver.client.__class__.__name__, 'XtremIOClient4') def test_fail_check_for_array_version(self, req): req.side_effect = xms_request cluster = xms_data['clusters'][1] ver = cluster['sys-sw-version'] cluster['sys-sw-version'] = '2.0.0-test' self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) cluster['sys-sw-version'] = ver def test_client4_uses_v2(self, req): def base_req(*args, **kwargs): self.assertIn('v2', args) req.side_effect = base_req self.driver.client.req('volumes') def test_get_stats(self, req): req.side_effect = xms_request stats = self.driver.get_volume_stats(True) self.assertEqual(self.driver.backend_name, stats['volume_backend_name']) # ##### Volumes ##### def test_create_volume_with_cg(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) def test_extend_volume(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.extend_volume(self.data.test_volume, 5) def test_fail_extend_volume(self, req): req.side_effect = xms_request self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, self.data.test_volume, 5) def test_delete_volume(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.delete_volume(self.data.test_volume) def test_duplicate_volume(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.data.test_volume) # ##### Snapshots ##### def test_create_snapshot(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.create_snapshot(self.data.test_snapshot) self.assertEqual(self.data.test_snapshot['id'], xms_data['volumes'][2]['name']) def test_create_delete_snapshot(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.create_snapshot(self.data.test_snapshot) self.assertEqual(self.data.test_snapshot['id'], xms_data['volumes'][2]['name']) self.driver.delete_snapshot(self.data.test_snapshot) def test_failed_rename_snapshot(self, req): req.side_effect = xms_failed_rename_snapshot_request self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, self.data.test_snapshot) self.assertEqual(0, len(xms_data['volumes'])) def test_volume_from_snapshot(self, req): req.side_effect = xms_request xms_data['volumes'] = {} self.driver.create_volume(self.data.test_volume) self.driver.create_snapshot(self.data.test_snapshot) self.driver.create_volume_from_snapshot(self.data.test_volume2, self.data.test_snapshot) def test_volume_from_snapshot_and_resize(self, req): req.side_effect = xms_request xms_data['volumes'] = {} self.driver.create_volume(self.data.test_volume) clone_volume = self.data.test_clone.copy() clone_volume['size'] = 2 self.driver.create_snapshot(self.data.test_snapshot) with mock.patch.object(self.driver, 'extend_volume') as extend: self.driver.create_volume_from_snapshot(clone_volume, self.data.test_snapshot) extend.assert_called_once_with(clone_volume, clone_volume['size']) def test_volume_from_snapshot_and_resize_fail(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) vol = xms_data['volumes'][1] def failed_extend(obj_type='volumes', method='GET', data=None, *args, **kwargs): if method == 'GET': return {'content': vol} elif method == 'POST': return {'links': [{'href': 'volume/2'}]} elif method == 'PUT': if 'name' in data: return raise exception.VolumeBackendAPIException('Failed Clone') self.driver.create_snapshot(self.data.test_snapshot) req.side_effect = failed_extend self.driver.db = mock.Mock() (self.driver.db. image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() clone = self.data.test_clone.copy() clone['size'] = 2 with mock.patch.object(self.driver, 'delete_volume') as delete: self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, clone, self.data.test_snapshot) self.assertTrue(delete.called) # ##### Clone Volume ##### def test_clone_volume(self, req): req.side_effect = xms_request self.driver.db = mock.Mock() (self.driver.db. image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() self.driver.create_volume(self.data.test_volume) xms_data['volumes'][1]['num-of-dest-snaps'] = 50 self.driver.create_cloned_volume(self.data.test_clone, self.data.test_volume) def test_clone_volume_exceed_conf_limit(self, req): req.side_effect = xms_request self.driver.db = mock.Mock() (self.driver.db. image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() self.driver.create_volume(self.data.test_volume) xms_data['volumes'][1]['num-of-dest-snaps'] = 200 self.assertRaises(exception.CinderException, self.driver.create_cloned_volume, self.data.test_clone, self.data.test_volume) @mock.patch.object(xtremio.XtremIOClient4, 'create_snapshot') def test_clone_volume_exceed_array_limit(self, create_snap, req): create_snap.side_effect = xtremio.XtremIOSnapshotsLimitExceeded() req.side_effect = xms_request self.driver.db = mock.Mock() (self.driver.db. image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() self.driver.create_volume(self.data.test_volume) xms_data['volumes'][1]['num-of-dest-snaps'] = 50 self.assertRaises(exception.CinderException, self.driver.create_cloned_volume, self.data.test_clone, self.data.test_volume) def test_clone_volume_too_many_snaps(self, req): req.side_effect = xms_request response = mock.MagicMock() response.status_code = 400 response.json.return_value = { "message": "too_many_snapshots_per_vol", "error_code": 400 } self.assertRaises(xtremio.XtremIOSnapshotsLimitExceeded, self.driver.client.handle_errors, response, '', '') def test_clone_volume_too_many_objs(self, req): req.side_effect = xms_request response = mock.MagicMock() response.status_code = 400 response.json.return_value = { "message": "too_many_objs", "error_code": 400 } self.assertRaises(xtremio.XtremIOSnapshotsLimitExceeded, self.driver.client.handle_errors, response, '', '') def test_update_migrated_volume(self, req): original = self.data.test_volume new = self.data.test_volume2 update = (self.driver. update_migrated_volume({}, original, new, 'available')) req.assert_called_once_with('volumes', 'PUT', {'name': original['id']}, new['id'], None, 'v2') self.assertEqual({'_name_id': None, 'provider_location': None}, update) def test_update_migrated_volume_failed_rename(self, req): req.side_effect = exception.VolumeBackendAPIException( data='failed rename') original = self.data.test_volume new = copy.deepcopy(self.data.test_volume2) fake_provider = '__provider' new['provider_location'] = fake_provider new['_name_id'] = None update = (self.driver. update_migrated_volume({}, original, new, 'available')) self.assertEqual({'_name_id': new['id'], 'provider_location': fake_provider}, update) def test_clone_volume_and_resize(self, req): req.side_effect = xms_request self.driver.db = mock.Mock() (self.driver.db. image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() self.driver.create_volume(self.data.test_volume) vol = xms_data['volumes'][1] vol['num-of-dest-snaps'] = 0 clone = self.data.test_clone.copy() clone['size'] = 2 with mock.patch.object(self.driver, 'extend_volume') as extend: self.driver.create_cloned_volume(clone, self.data.test_volume) extend.assert_called_once_with(clone, clone['size']) def test_clone_volume_and_resize_fail(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) vol = xms_data['volumes'][1] def failed_extend(obj_type='volumes', method='GET', data=None, *args, **kwargs): if method == 'GET': return {'content': vol} elif method == 'POST': return {'links': [{'href': 'volume/2'}]} elif method == 'PUT': if 'name' in data: return raise exception.VolumeBackendAPIException('Failed Clone') req.side_effect = failed_extend self.driver.db = mock.Mock() (self.driver.db. image_volume_cache_get_by_volume_id.return_value) = mock.MagicMock() vol['num-of-dest-snaps'] = 0 clone = self.data.test_clone.copy() clone['size'] = 2 with mock.patch.object(self.driver, 'delete_volume') as delete: self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, clone, self.data.test_volume) self.assertTrue(delete.called) # ##### Connection ##### def test_no_portals_configured(self, req): req.side_effect = xms_request portals = xms_data['iscsi-portals'].copy() xms_data['iscsi-portals'].clear() lunmap = {'lun': 4} self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_iscsi_properties, lunmap) xms_data['iscsi-portals'] = portals def test_no_allowed_portals(self, req): req.side_effect = xms_request lunmap = {'lun': 4} self.driver.allowed_ports = ['1.2.3.4'] self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_iscsi_properties, lunmap) def test_filtered_portals(self, req): req.side_effect = xms_request lunmap = {'lun': 4} self.driver.allowed_ports = ['10.205.68.6'] connection_properties = self.driver._get_iscsi_properties(lunmap) self.assertEqual(1, len(connection_properties['target_portals'])) self.assertIn('10.205.68.6:3260', connection_properties['target_portals']) def test_initialize_connection(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.create_volume(self.data.test_volume2) map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertEqual(1, map_data['data']['target_lun']) def test_initialize_connection_existing_ig(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.create_volume(self.data.test_volume2) self.driver.initialize_connection(self.data.test_volume, self.data.connector) i1 = xms_data['initiators'][1] i1['ig-id'] = ['', i1['ig-id'], 1] i1['chap-authentication-initiator-password'] = 'chap_password1' i1['chap-discovery-initiator-password'] = 'chap_password2' self.driver.initialize_connection(self.data.test_volume2, self.data.connector) def test_initialize_connection_escape_ipv6(self, req): req.side_effect = xms_request portals = xms_data['iscsi-portals'].copy() xms_data['iscsi-portals'] = { 'fd00:206:553::7/16': { "port-address": "iqn.2008-05.com.xtremio:003e67939c34", "ip-port": 3260, "ip-addr": "fd00:206:553::7/16", "name": "fd00:206:553::7/16", "index": 1, }, } lunmap = {'lun': 4} connection_properties = self.driver._get_iscsi_properties(lunmap) result_addr, _ = connection_properties['target_portal'].rsplit(':', 1) self.assertEqual(netutils.escape_ipv6('fd00:206:553::7'), result_addr) xms_data['iscsi-portals'] = portals def test_terminate_connection(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.initialize_connection(self.data.test_volume, self.data.connector) i1 = xms_data['initiators'][1] i1['ig-id'] = ['', i1['ig-id'], 1] self.driver.terminate_connection(self.data.test_volume, self.data.connector) self.assertEqual(1, len(xms_data['initiator-groups'])) def test_terminate_connection_clean_ig(self, req): self.driver.clean_ig = True req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.initialize_connection(self.data.test_volume, self.data.connector) i1 = xms_data['initiators'][1] i1['ig-id'] = ['', i1['ig-id'], 1] xms_data['initiator-groups'][1]['num-of-vols'] = 0 # lun mapping list is a list of triplets (IG OID, TG OID, lun number) self.driver.terminate_connection(self.data.test_volume, self.data.connector) self.assertEqual(0, len(xms_data['initiator-groups'])) def test_terminate_connection_fail_on_bad_volume(self, req): req.side_effect = xms_request self.assertRaises(exception.NotFound, self.driver.terminate_connection, self.data.test_volume, self.data.connector) def test_get_ig_indexes_from_initiators_called_once(self, req): req.side_effect = xms_request volume1 = copy.deepcopy(self.data.test_volume) volume1.volume_attachment.objects = [self.data.test_volume_attachment] self.driver.create_volume(volume1) map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) i1 = xms_data['initiators'][1] i1['ig-id'] = ['', i1['ig-id'], 1] self.assertEqual(1, map_data['data']['target_lun']) with mock.patch.object(self.driver, '_get_ig_indexes_from_initiators') as get_idx: get_idx.return_value = [1] self.driver.terminate_connection(self.data.test_volume, self.data.connector) get_idx.assert_called_once_with(self.data.connector) def test_initialize_connection_after_enabling_chap(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.create_volume(self.data.test_volume2) map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertNotIn('access_mode', map_data['data']) c1 = xms_data['clusters'][1] c1['chap-authentication-mode'] = 'initiator' c1['chap-discovery-mode'] = 'initiator' i1 = xms_data['initiators'][1] i1['ig-id'] = ['', i1['ig-id'], 1] i1['chap-authentication-initiator-password'] = 'chap_password1' i1['chap-discovery-initiator-password'] = 'chap_password2' map_data = self.driver.initialize_connection(self.data.test_volume2, self.data.connector) self.assertEqual('chap_password1', map_data['data']['auth_password']) self.assertEqual('chap_password2', map_data['data']['discovery_auth_password']) def test_initialize_connection_after_disabling_chap(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.create_volume(self.data.test_volume2) c1 = xms_data['clusters'][1] c1['chap-authentication-mode'] = 'initiator' c1['chap-discovery-mode'] = 'initiator' self.driver.initialize_connection(self.data.test_volume, self.data.connector) i1 = xms_data['initiators'][1] i1['ig-id'] = ['', i1['ig-id'], 1] i1['chap-authentication-initiator-password'] = 'chap_password1' i1['chap-discovery-initiator-password'] = 'chap_password2' i1['chap-authentication-initiator-password'] = None i1['chap-discovery-initiator-password'] = None self.driver.initialize_connection(self.data.test_volume2, self.data.connector) @mock.patch('oslo_utils.strutils.mask_dict_password') def test_initialize_connection_masks_password(self, mask_dict, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertTrue(mask_dict.called) def test_add_auth(self, req): req.side_effect = xms_request data = {} self.driver._add_auth(data, True, True) self.assertIn('initiator-discovery-user-name', data, 'Missing discovery user in data') self.assertIn('initiator-discovery-password', data, 'Missing discovery password in data') def test_initialize_connection_bad_ig(self, req): req.side_effect = xms_bad_request self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.data.test_volume, self.data.connector) self.driver.delete_volume(self.data.test_volume) # ##### Manage Volumes ##### def test_manage_volume(self, req): req.side_effect = xms_request xms_data['volumes'] = {1: {'name': 'unmanaged1', 'index': 1, 'vol-size': '3', }, } ref_vol = {"source-name": "unmanaged1"} self.driver.manage_existing(self.data.test_volume, ref_vol) def test_failed_manage_volume(self, req): req.side_effect = xms_request xms_data['volumes'] = {1: {'name': 'unmanaged1', 'index': 1, 'vol-size': '3', }, } invalid_ref = {"source-name": "invalid"} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.data.test_volume, invalid_ref) def test_get_manage_volume_size(self, req): req.side_effect = xms_request xms_data['volumes'] = {1: {'name': 'unmanaged1', 'index': 1, 'vol-size': '1000000', }, } ref_vol = {"source-name": "unmanaged1"} size = self.driver.manage_existing_get_size(self.data.test_volume, ref_vol) self.assertEqual(1, size) def test_manage_volume_size_invalid_input(self, req): self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self.data.test_volume, {}) def test_failed_manage_volume_size(self, req): req.side_effect = xms_request xms_data['volumes'] = {1: {'name': 'unmanaged1', 'index': 1, 'vol-size': '3', }, } invalid_ref = {"source-name": "invalid"} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self.data.test_volume, invalid_ref) def test_unmanage_volume(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.unmanage(self.data.test_volume) def test_failed_unmanage_volume(self, req): req.side_effect = xms_request self.assertRaises(exception.VolumeNotFound, self.driver.unmanage, self.data.test_volume2) def test_manage_snapshot(self, req): req.side_effect = xms_request vol_uid = self.data.test_snapshot.volume_id xms_data['volumes'] = {1: {'name': vol_uid, 'index': 1, 'vol-size': '3', }, 2: {'name': 'unmanaged', 'index': 2, 'ancestor-vol-id': ['', vol_uid, 1], 'vol-size': '3'} } ref_vol = {"source-name": "unmanaged"} self.driver.manage_existing_snapshot(self.data.test_snapshot, ref_vol) def test_get_manage_snapshot_size(self, req): req.side_effect = xms_request vol_uid = self.data.test_snapshot.volume_id xms_data['volumes'] = {1: {'name': vol_uid, 'index': 1, 'vol-size': '3', }, 2: {'name': 'unmanaged', 'index': 2, 'ancestor-vol-id': ['', vol_uid, 1], 'vol-size': '3'} } ref_vol = {"source-name": "unmanaged"} self.driver.manage_existing_snapshot_get_size(self.data.test_snapshot, ref_vol) def test_manage_snapshot_invalid_snapshot(self, req): req.side_effect = xms_request xms_data['volumes'] = {1: {'name': 'unmanaged1', 'index': 1, 'vol-size': '3', 'ancestor-vol-id': []} } ref_vol = {"source-name": "unmanaged1"} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, self.data.test_snapshot, ref_vol) def test_unmanage_snapshot(self, req): req.side_effect = xms_request vol_uid = self.data.test_snapshot.volume_id xms_data['volumes'] = {1: {'name': vol_uid, 'index': 1, 'vol-size': '3', }, 2: {'name': 'unmanaged', 'index': 2, 'ancestor-vol-id': ['', vol_uid, 1], 'vol-size': '3'} } ref_vol = {"source-name": "unmanaged"} self.driver.manage_existing_snapshot(self.data.test_snapshot, ref_vol) self.driver.unmanage_snapshot(self.data.test_snapshot) # ##### Consistancy Groups ##### @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_create(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.driver.create_consistencygroup(d.context, d.group) self.assertEqual(1, len(xms_data['consistency-groups'])) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_update(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.driver.create_consistencygroup(d.context, d.group) self.driver.update_consistencygroup(d.context, d.group, add_volumes=[d.test_volume, d.test_volume2]) self.assertEqual(2, len(xms_data['consistency-group-volumes'])) self.driver.update_consistencygroup(d.context, d.group, remove_volumes=[d.test_volume2]) self.assertEqual(1, len(xms_data['consistency-group-volumes'])) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_create_cg(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.driver.create_consistencygroup(d.context, d.group) self.driver.update_consistencygroup(d.context, d.group, add_volumes=[d.test_volume, d.test_volume2]) self.driver.db = mock.Mock() (self.driver.db. volume_get_all_by_group.return_value) = [mock.MagicMock()] res = self.driver.create_cgsnapshot(d.context, d.cgsnapshot, [snapshot_obj]) self.assertEqual((None, None), res) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_delete(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.driver.create_consistencygroup(d.context, d.group) self.driver.update_consistencygroup(d.context, d.group, add_volumes=[d.test_volume, d.test_volume2]) self.driver.db = mock.Mock() self.driver.create_cgsnapshot(d.context, d.cgsnapshot, [snapshot_obj]) self.driver.delete_consistencygroup(d.context, d.group, []) def test_cg_delete_with_volume(self, req): req.side_effect = xms_request d = self.data self.driver.create_consistencygroup(d.context, d.group) self.driver.create_volume(d.test_volume) self.driver.update_consistencygroup(d.context, d.group, add_volumes=[d.test_volume]) self.driver.db = mock.Mock() results, volumes = \ self.driver.delete_consistencygroup(d.context, d.group, [d.test_volume]) self.assertTrue(all(volume['status'] == 'deleted' for volume in volumes)) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_snapshot(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.driver.create_consistencygroup(d.context, d.group) self.driver.update_consistencygroup(d.context, d.group, add_volumes=[d.test_volume, d.test_volume2]) snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) self.assertEqual(snapset_name, '192eb39b6c2f420cbae33cfd117f0345192eb39b6c2f420cbae' '33cfd117f9876') snapset1 = {'ancestor-vol-id': ['', d.test_volume['id'], 2], 'consistencygroup_id': d.group['id'], 'name': snapset_name, 'index': 1} xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} res = self.driver.delete_cgsnapshot(d.context, d.cgsnapshot, [snapshot_obj]) self.assertEqual((None, None), res) def test_delete_cgsnapshot(self, req): d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] self.driver.delete_cgsnapshot(d.context, d.cgsnapshot, [snapshot_obj]) req.assert_called_once_with('snapshot-sets', 'DELETE', None, '192eb39b6c2f420cbae33cfd117f0345192eb39' 'b6c2f420cbae33cfd117f9876', None, 'v2') @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_from_src_snapshot(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] snapshot_obj.volume_id = d.test_volume['id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.driver.create_consistencygroup(d.context, d.group) self.driver.create_volume(d.test_volume) self.driver.create_cgsnapshot(d.context, d.cgsnapshot, []) xms_data['volumes'][2]['ancestor-vol-id'] = (xms_data['volumes'][1] ['vol-id']) snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) snapset1 = {'vol-list': [xms_data['volumes'][2]['vol-id']], 'name': snapset_name, 'index': 1} xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} cg_obj = fake_cg.fake_consistencyobject_obj(d.context) new_vol1 = fake_volume_obj(d.context) snapshot1 = (fake_snapshot .fake_snapshot_obj (d.context, volume_id=d.test_volume['id'])) res = self.driver.create_consistencygroup_from_src(d.context, cg_obj, [new_vol1], d.cgsnapshot, [snapshot1]) self.assertEqual((None, None), res) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_cg_from_src_cg(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] snapshot_obj.volume_id = d.test_volume['id'] get_all_for_cgsnapshot.return_value = [snapshot_obj] self.driver.create_consistencygroup(d.context, d.group) self.driver.create_volume(d.test_volume) self.driver.create_cgsnapshot(d.context, d.cgsnapshot, []) xms_data['volumes'][2]['ancestor-vol-id'] = (xms_data['volumes'][1] ['vol-id']) snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) snapset1 = {'vol-list': [xms_data['volumes'][2]['vol-id']], 'name': snapset_name, 'index': 1} xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} cg_obj = fake_cg.fake_consistencyobject_obj(d.context) new_vol1 = fake_volume_obj(d.context) new_cg_obj = fake_cg.fake_consistencyobject_obj( d.context, id=fake.CONSISTENCY_GROUP2_ID) snapset2_name = new_cg_obj.id new_vol1.id = '192eb39b-6c2f-420c-bae3-3cfd117f0001' new_vol2 = fake_volume_obj(d.context) snapset2 = {'vol-list': [xms_data['volumes'][2]['vol-id']], 'name': snapset2_name, 'index': 1} xms_data['snapshot-sets'].update({5: snapset2, snapset2_name: snapset2}) self.driver.create_consistencygroup_from_src(d.context, new_cg_obj, [new_vol2], None, None, cg_obj, [new_vol1]) @mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_cgsnapshot') def test_invalid_cg_from_src_input(self, get_all_for_cgsnapshot, req): req.side_effect = xms_request d = self.data self.assertRaises(exception.InvalidInput, self.driver.create_consistencygroup_from_src, d.context, d.group, [], None, None, None, None) # #### Groups #### def test_group_create(self, req): """Test group create.""" req.side_effect = xms_request d = self.data self.driver.create_group(d.context, d.group) self.assertEqual(1, len(xms_data['consistency-groups'])) def test_group_update(self, req): """Test group update.""" req.side_effect = xms_request d = self.data self.driver.create_consistencygroup(d.context, d.group) self.driver.update_consistencygroup(d.context, d.group, add_volumes=[d.test_volume, d.test_volume2]) self.assertEqual(2, len(xms_data['consistency-group-volumes'])) self.driver.update_group(d.context, d.group, remove_volumes=[d.test_volume2]) self.assertEqual(1, len(xms_data['consistency-group-volumes'])) def test_create_group_snapshot(self, req): """Test create group snapshot.""" req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] self.driver.create_group(d.context, d.group) self.driver.update_group(d.context, d.group, add_volumes=[d.test_volume, d.test_volume2]) res = self.driver.create_group_snapshot(d.context, d.cgsnapshot, [snapshot_obj]) self.assertEqual((None, None), res) def test_group_delete(self, req): """"Test delete group.""" req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] self.driver.create_group(d.context, d.group) self.driver.update_group(d.context, d.group, add_volumes=[d.test_volume, d.test_volume2]) self.driver.db = mock.Mock() (self.driver.db. volume_get_all_by_group.return_value) = [mock.MagicMock()] self.driver.create_group_snapshot(d.context, d.cgsnapshot, [snapshot_obj]) self.driver.delete_group(d.context, d.group, []) def test_group_delete_with_volume(self, req): req.side_effect = xms_request d = self.data self.driver.create_consistencygroup(d.context, d.group) self.driver.create_volume(d.test_volume) self.driver.update_consistencygroup(d.context, d.group, add_volumes=[d.test_volume]) self.driver.db = mock.Mock() results, volumes = \ self.driver.delete_group(d.context, d.group, [d.test_volume]) self.assertTrue(all(volume['status'] == 'deleted' for volume in volumes)) def test_group_snapshot(self, req): """test group snapshot.""" req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] self.driver.create_group(d.context, d.group) self.driver.update_group(d.context, d.group, add_volumes=[d.test_volume, d.test_volume2]) snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) self.assertEqual(snapset_name, '192eb39b6c2f420cbae33cfd117f0345192eb39b6c2f420cbae' '33cfd117f9876') snapset1 = {'ancestor-vol-id': ['', d.test_volume['id'], 2], 'consistencygroup_id': d.group['id'], 'name': snapset_name, 'index': 1} xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} res = self.driver.delete_group_snapshot(d.context, d.cgsnapshot, [snapshot_obj]) self.assertEqual((None, None), res) def test_group_snapshot_with_generic_group(self, req): """test group snapshot shot with generic group .""" req.side_effect = xms_request d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] self.driver.create_group(d.context, d.group) self.driver.update_group(d.context, d.group, add_volumes=[d.test_volume, d.test_volume2]) snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot_as_group_id) self.assertEqual(snapset_name, '192eb39b6c2f420cbae33cfd117f0345192eb39b6c2f420cbae' '33cfd117f9876') snapset1 = {'ancestor-vol-id': ['', d.test_volume['id'], 2], 'consistencygroup_id': d.group['id'], 'name': snapset_name, 'index': 1} xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} res = self.driver.delete_group_snapshot(d.context, d.cgsnapshot, [snapshot_obj]) self.assertEqual((None, None), res) def test_delete_group_snapshot(self, req): """test delete group snapshot.""" d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] self.driver.delete_group_snapshot(d.context, d.cgsnapshot, [snapshot_obj]) req.assert_called_once_with('snapshot-sets', 'DELETE', None, '192eb39b6c2f420cbae33cfd117f0345192eb39' 'b6c2f420cbae33cfd117f9876', None, 'v2') def test_delete_group_snapshot_with_generic_group(self, req): """test delete group snapshot.""" d = self.data snapshot_obj = fake_snapshot.fake_snapshot_obj(d.context) snapshot_obj.consistencygroup_id = d.group['id'] self.driver.delete_group_snapshot(d.context, d.cgsnapshot_as_group_id, [snapshot_obj]) req.assert_called_once_with('snapshot-sets', 'DELETE', None, '192eb39b6c2f420cbae33cfd117f0345192eb39' 'b6c2f420cbae33cfd117f9876', None, 'v2') def test_group_from_src_snapshot(self, req): """test group from source snapshot.""" req.side_effect = xms_request d = self.data self.driver.create_group(d.context, d.group) self.driver.create_volume(d.test_volume) self.driver.create_group_snapshot(d.context, d.cgsnapshot, []) xms_data['volumes'][2]['ancestor-vol-id'] = (xms_data['volumes'][1] ['vol-id']) snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) snapset1 = {'vol-list': [xms_data['volumes'][2]['vol-id']], 'name': snapset_name, 'index': 1} xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} cg_obj = fake_cg.fake_consistencyobject_obj(d.context) new_vol1 = fake_volume_obj(d.context) snapshot1 = (fake_snapshot .fake_snapshot_obj (d.context, volume_id=d.test_volume['id'])) res = self.driver.create_group_from_src(d.context, cg_obj, [new_vol1], d.cgsnapshot, [snapshot1]) self.assertEqual((None, None), res) def test_group_from_src_group(self, req): """test group from source group.""" req.side_effect = xms_request d = self.data self.driver.create_group(d.context, d.group) self.driver.create_volume(d.test_volume) self.driver.create_group_snapshot(d.context, d.cgsnapshot, []) xms_data['volumes'][2]['ancestor-vol-id'] = (xms_data['volumes'][1] ['vol-id']) snapset_name = self.driver._get_cgsnap_name(d.cgsnapshot) snapset1 = {'vol-list': [xms_data['volumes'][2]['vol-id']], 'name': snapset_name, 'index': 1} xms_data['snapshot-sets'] = {snapset_name: snapset1, 1: snapset1} cg_obj = fake_cg.fake_consistencyobject_obj(d.context) new_vol1 = fake_volume_obj(d.context) new_cg_obj = fake_cg.fake_consistencyobject_obj( d.context, id=fake.CONSISTENCY_GROUP2_ID) snapset2_name = new_cg_obj.id new_vol1.id = '192eb39b-6c2f-420c-bae3-3cfd117f0001' new_vol2 = fake_volume_obj(d.context) snapset2 = {'vol-list': [xms_data['volumes'][2]['vol-id']], 'name': snapset2_name, 'index': 1} xms_data['snapshot-sets'].update({5: snapset2, snapset2_name: snapset2}) self.driver.create_group_from_src(d.context, new_cg_obj, [new_vol2], None, None, cg_obj, [new_vol1]) def test_invalid_group_from_src_input(self, req): """test invalid group from source.""" req.side_effect = xms_request d = self.data self.assertRaises(exception.InvalidInput, self.driver.create_group_from_src, d.context, d.group, [], None, None, None, None) def test_get_password(self, _req): p = self.driver._get_password() self.assertEqual(len(p), 12) self.assertIsNotNone(re.match(r'[A-Z0-9]{12}', p), p) @mock.patch('requests.request') class XtremIODriverTestCase(BaseXtremIODriverTestCase): # ##### XMS Client ##### @mock.patch.object(time, 'sleep', mock.Mock(return_value=0)) def test_retry_request(self, req): busy_response = mock.MagicMock() busy_response.status_code = 400 busy_response.json.return_value = { "message": "system_is_busy", "error_code": 400 } good_response = mock.MagicMock() good_response.status_code = 200 XtremIODriverTestCase.req_count = 0 def busy_request(*args, **kwargs): if XtremIODriverTestCase.req_count < 1: XtremIODriverTestCase.req_count += 1 return busy_response return good_response req.side_effect = busy_request self.driver.create_volume(self.data.test_volume) def test_verify_cert(self, req): good_response = mock.MagicMock() good_response.status_code = 200 def request_verify_cert(*args, **kwargs): self.assertEqual(kwargs['verify'], '/test/path/root_ca.crt') return good_response req.side_effect = request_verify_cert self.driver.client.req('volumes') @mock.patch('cinder.volume.drivers.dell_emc.xtremio.XtremIOClient.req') class XtremIODriverFCTestCase(BaseXtremIODriverTestCase): def setUp(self): super(XtremIODriverFCTestCase, self).setUp() self.driver = xtremio.XtremIOFCDriver( configuration=self.config) # ##### Connection FC##### def test_no_targets_configured(self, req): req.side_effect = xms_request targets = xms_data['targets'].copy() xms_data['targets'].clear() self.assertRaises(exception.VolumeBackendAPIException, self.driver.get_targets) xms_data['targets'] = targets def test_no_allowed_targets(self, req): req.side_effect = xms_request self.driver.allowed_ports = ['58:cc:f0:98:49:22:07:02'] self.assertRaises(exception.VolumeBackendAPIException, self.driver.get_targets) def test_filtered_targets(self, req): req.side_effect = xms_request self.driver.allowed_ports = ['21:00:00:24:ff:57:b2:36'] targets = self.driver.get_targets() self.assertEqual(1, len(targets)) self.assertIn('21000024ff57b236', targets) def test_initialize_connection(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertEqual(1, map_data['data']['target_lun']) def test_terminate_connection(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.initialize_connection(self.data.test_volume, self.data.connector) for i1 in xms_data['initiators'].values(): i1['ig-id'] = ['', i1['ig-id'], 1] self.driver.terminate_connection(self.data.test_volume, self.data.connector) def test_force_terminate_connection(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) self.driver.initialize_connection(self.data.test_volume, self.data.connector) vol1 = xms_data['volumes'][1] # lun mapping list is a list of triplets (IG OID, TG OID, lun number) vol1['lun-mapping-list'] = [[['a91e8c81c2d14ae4865187ce4f866f8a', 'iqn.1993-08.org.debian:01:222', 1], ['', 'Default', 1], 1]] self.driver.terminate_connection(self.data.test_volume, None) def test_initialize_existing_ig_connection(self, req): req.side_effect = xms_request self.driver.create_volume(self.data.test_volume) pre_existing = 'pre_existing_host' self.driver._create_ig(pre_existing) wwpns = self.driver._get_initiator_names(self.data.connector) for wwpn in wwpns: data = {'initiator-name': wwpn, 'ig-id': pre_existing, 'port-address': wwpn} self.driver.client.req('initiators', 'POST', data) def get_fake_initiator(wwpn): return {'port-address': wwpn, 'ig-id': ['', pre_existing, 1]} with mock.patch.object(self.driver.client, 'get_initiator', side_effect=get_fake_initiator): map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertEqual(1, map_data['data']['target_lun']) self.assertEqual(1, len(xms_data['initiator-groups'])) def test_get_initiator_igs_ver4(self, req): req.side_effect = xms_request wwpn1 = '11:22:33:44:55:66:77:88' wwpn2 = '11:22:33:44:55:66:77:89' port_addresses = [wwpn1, wwpn2] ig_id = ['', 'my_ig', 1] self.driver.client = xtremio.XtremIOClient4(self.config, self.config .xtremio_cluster_name) def get_fake_initiator(wwpn): return {'port-address': wwpn, 'ig-id': ig_id} with mock.patch.object(self.driver.client, 'get_initiator', side_effect=get_fake_initiator): self.driver.client.get_initiators_igs(port_addresses) def test_get_free_lun(self, req): def lm_response(*args, **kwargs): return {'lun-maps': [{'lun': 1}]} req.side_effect = lm_response ig_names = ['test1', 'test2'] self.driver._get_free_lun(ig_names) def test_race_on_terminate_connection(self, req): """Test for race conditions on num_of_mapped_volumes. This test confirms that num_of_mapped_volumes won't break even if we receive a NotFound exception when retrieving info on a specific mapping, as that specific mapping could have been deleted between the request to get the list of exiting mappings and the request to get the info on one of them. """ req.side_effect = xms_request self.driver.client = xtremio.XtremIOClient3( self.config, self.config.xtremio_cluster_name) # We'll wrap num_of_mapped_volumes, we'll store here original method original_method = self.driver.client.num_of_mapped_volumes def fake_num_of_mapped_volumes(*args, **kwargs): # Add a nonexistent mapping mappings = [{'href': 'volumes/1'}, {'href': 'volumes/12'}] # Side effects will be: 1st call returns the list, then we return # data for existing mappings, and on the nonexistent one we added # we return NotFound side_effect = [{'lun-maps': mappings}, {'content': xms_data['lun-maps'][1]}, exception.NotFound] with mock.patch.object(self.driver.client, 'req', side_effect=side_effect): return original_method(*args, **kwargs) self.driver.create_volume(self.data.test_volume) map_data = self.driver.initialize_connection(self.data.test_volume, self.data.connector) self.assertEqual(1, map_data['data']['target_lun']) with mock.patch.object(self.driver.client, 'num_of_mapped_volumes', side_effect=fake_num_of_mapped_volumes): self.driver.terminate_connection(self.data.test_volume, self.data.connector) self.driver.delete_volume(self.data.test_volume) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.26712 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/unity/0000775000175000017500000000000000000000000024355 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/unity/__init__.py0000664000175000017500000000000000000000000026454 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/unity/fake_enum.py0000664000175000017500000000165600000000000026671 0ustar00zuulzuul00000000000000# Copyright (c) 2017-2019 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum class TieringPolicyEnum(enum.Enum): AUTOTIER_HIGH = (0, 'Start Highest and Auto-tier') AUTOTIER = (1, 'Auto-tier') HIGHEST = (2, 'Highest') LOWEST = (3, 'Lowest') NO_DATA_MOVEMENT = (4, 'No Data Movement') MIXED = (0xffff, 'Different Tier Policies') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/unity/fake_exception.py0000664000175000017500000000367000000000000027721 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class StoropsException(Exception): message = 'Storops Error.' class UnityException(StoropsException): pass class UnityLunNameInUseError(UnityException): pass class UnityResourceNotFoundError(UnityException): pass class UnitySnapNameInUseError(UnityException): pass class UnityDeleteAttachedSnapError(UnityException): pass class UnityResourceAlreadyAttachedError(UnityException): pass class UnityPolicyNameInUseError(UnityException): pass class UnityNothingToModifyError(UnityException): pass class UnityThinCloneLimitExceededError(UnityException): pass class ExtendLunError(Exception): pass class DetachIsCalled(Exception): pass class DetachAllIsCalled(Exception): pass class DetachFromIsCalled(Exception): pass class LunDeleteIsCalled(Exception): pass class SnapDeleteIsCalled(Exception): pass class UnexpectedLunDeletion(Exception): pass class AdapterSetupError(Exception): pass class ReplicationManagerSetupError(Exception): pass class HostDeleteIsCalled(Exception): pass class UnityThinCloneNotAllowedError(UnityException): pass class SystemAPINotSupported(UnityException): pass class UnityDeleteLunInReplicationError(UnityException): pass class UnityConsistencyGroupNameInUseError(StoropsException): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/unity/test_adapter.py0000664000175000017500000023543500000000000027422 0ustar00zuulzuul00000000000000# Copyright (c) 2016 - 2018 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import functools from unittest import mock import ddt from oslo_utils import units from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.unity \ import fake_enum as enums from cinder.tests.unit.volume.drivers.dell_emc.unity \ import fake_exception as ex from cinder.tests.unit.volume.drivers.dell_emc.unity import test_client from cinder.volume.drivers.dell_emc.unity import adapter from cinder.volume.drivers.dell_emc.unity import client from cinder.volume.drivers.dell_emc.unity import replication ######################## # # Start of Mocks # ######################## class MockConfig(object): def __init__(self): self.config_group = 'test_backend' self.unity_storage_pool_names = ['pool1', 'pool2'] self.unity_io_ports = None self.reserved_percentage = 5 self.max_over_subscription_ratio = 300 self.volume_backend_name = 'backend' self.san_ip = '1.2.3.4' self.san_login = 'user' self.san_password = 'pass' self.driver_ssl_cert_verify = True self.driver_ssl_cert_path = None self.remove_empty_host = False self.use_multipath_for_image_xfer = False self.enforce_multipath_for_image_xfer = False def safe_get(self, name): return getattr(self, name) class MockConnector(object): @staticmethod def disconnect_volume(data, device): pass class MockDriver(object): def __init__(self): self.configuration = mock.Mock(volume_dd_blocksize='1M') self.replication_manager = MockReplicationManager() self.protocol = 'iSCSI' @staticmethod def _connect_device(conn): return {'connector': MockConnector(), 'device': {'path': 'dev'}, 'conn': {'data': {}}} def get_version(self): return '1.0.0' class MockReplicationManager(object): def __init__(self): self.is_replication_configured = False self.replication_devices = {} self.active_backend_id = None self.is_service_failed_over = None self.default_device = None self.active_adapter = None def failover_service(self, backend_id): if backend_id == 'default': self.is_service_failed_over = False elif backend_id == 'secondary_unity': self.is_service_failed_over = True else: raise exception.VolumeBackendAPIException() class MockClient(object): def __init__(self): self._system = test_client.MockSystem() self.host = '10.10.10.10' # fake unity IP @staticmethod def get_pools(): return test_client.MockResourceList(['pool0', 'pool1']) @staticmethod def create_lun(name, size, pool, description=None, io_limit_policy=None, is_thin=None, is_compressed=None, tiering_policy=None): lun_id = name if is_thin is not None and not is_thin: lun_id += '_thick' if tiering_policy: if tiering_policy is enums.TieringPolicyEnum.AUTOTIER: lun_id += '_auto' elif tiering_policy is enums.TieringPolicyEnum.LOWEST: lun_id += '_low' return test_client.MockResource(_id=lun_id, name=name) @staticmethod def lun_has_snapshot(lun): return lun.name == 'volume_has_snapshot' @staticmethod def get_lun(name=None, lun_id=None): if lun_id is None: lun_id = 'lun_4' if lun_id in ('lun_43',): # for thin clone cases return test_client.MockResource(_id=lun_id, name=name) if name == 'not_exists': ret = test_client.MockResource(name=lun_id) ret.existed = False else: if name is None: name = lun_id ret = test_client.MockResource(_id=lun_id, name=name) return ret @staticmethod def delete_lun(lun_id): if lun_id != 'lun_4': raise ex.UnexpectedLunDeletion() @staticmethod def get_serial(): return 'CLIENT_SERIAL' @staticmethod def create_snap(src_lun_id, name=None): if src_lun_id in ('lun_53', 'lun_55'): # for thin clone cases return test_client.MockResource( _id='snap_clone_{}'.format(src_lun_id)) return test_client.MockResource(name=name, _id=src_lun_id) @staticmethod def get_snap(name=None): if name in ('snap_50',): # for thin clone cases return name snap = test_client.MockResource(name=name, _id=name) if name is not None: ret = snap else: ret = [snap] return ret @staticmethod def delete_snap(snap): if snap.name in ('abc-def_snap',): raise ex.SnapDeleteIsCalled() @staticmethod def create_host(name): return test_client.MockResource(name=name) @staticmethod def create_host_wo_lock(name): return test_client.MockResource(name=name) @staticmethod def delete_host_wo_lock(host): if host.name == 'empty-host': raise ex.HostDeleteIsCalled() @staticmethod def attach(host, lun_or_snap): return 10 @staticmethod def detach(host, lun_or_snap): error_ids = ['lun_43', 'snap_0'] if host.name == 'host1' and lun_or_snap.get_id() in error_ids: raise ex.DetachIsCalled() @staticmethod def detach_all(lun): error_ids = ['lun_44'] if lun.get_id() in error_ids: raise ex.DetachAllIsCalled() @staticmethod def get_iscsi_target_info(allowed_ports=None): return [{'portal': '1.2.3.4:1234', 'iqn': 'iqn.1-1.com.e:c.a.a0'}, {'portal': '1.2.3.5:1234', 'iqn': 'iqn.1-1.com.e:c.a.a1'}] @staticmethod def get_fc_target_info(host=None, logged_in_only=False, allowed_ports=None): if host and host.name == 'no_target': ret = [] else: ret = ['8899AABBCCDDEEFF', '8899AABBCCDDFFEE'] return ret @staticmethod def create_lookup_service(): return {} @staticmethod def get_io_limit_policy(specs): mock_io_policy = (test_client.MockResource(name=specs.get('id')) if specs else None) return mock_io_policy @staticmethod def extend_lun(lun_id, size_gib): if size_gib <= 0: raise ex.ExtendLunError @staticmethod def get_fc_ports(): return test_client.MockResourceList(ids=['spa_iom_0_fc0', 'spa_iom_0_fc1']) @staticmethod def get_ethernet_ports(): return test_client.MockResourceList(ids=['spa_eth0', 'spb_eth0']) @staticmethod def thin_clone(obj, name, io_limit_policy, description, new_size_gb): if (obj.name, name) in ( ('snap_61', 'lun_60'), ('lun_63', 'lun_60')): return test_client.MockResource(_id=name) elif (obj.name, name) in (('snap_71', 'lun_70'), ('lun_72', 'lun_70')): raise ex.UnityThinCloneNotAllowedError() else: raise ex.UnityThinCloneLimitExceededError @staticmethod def update_host_initiators(host, wwns): return None @property def system(self): return self._system def restore_snapshot(self, snap_name): return test_client.MockResource(name="back_snap") def get_pool_id_by_name(self, name): pools = {'PoolA': 'pool_1', 'PoolB': 'pool_2', 'PoolC': 'pool_3'} return pools.get(name, None) def migrate_lun(self, lun_id, dest_pool_id, provision=None): if dest_pool_id == 'pool_2': return True if dest_pool_id == 'pool_3': return False def get_remote_system(self, name=None): if name == 'not-found-remote-system': return None return test_client.MockResource(_id='RS_1') def get_replication_session(self, name=None): if name == 'not-found-rep-session': raise client.ClientReplicationError() rep_session = test_client.MockResource(_id='rep_session_id_1') rep_session.name = name rep_session.src_resource_id = 'sv_1' rep_session.dst_resource_id = 'sv_99' return rep_session def create_replication(self, src_lun, max_time_out_of_sync, dst_pool_id, remote_system): if (src_lun.get_id() == 'sv_1' and max_time_out_of_sync == 60 and dst_pool_id == 'pool_1' and remote_system.get_id() == 'RS_1'): rep_session = test_client.MockResource(_id='rep_session_id_1') rep_session.name = 'rep_session_name_1' return rep_session return None def failover_replication(self, rep_session): if rep_session.name != 'rep_session_name_1': raise client.ClientReplicationError() def failback_replication(self, rep_session): if rep_session.name != 'rep_session_name_1': raise client.ClientReplicationError() def is_cg_replicated(self, cg_id): return cg_id and 'is_replicated' in cg_id def get_cg(self, name): return test_client.MockResource(_id=name) def create_cg_replication(self, group_id, pool_id, remote_system, max_time): if group_id and 'error' in group_id: raise Exception('has issue when creating cg replication session.') def delete_cg_rep_session(self, group_id): if group_id and 'error' in group_id: raise Exception('has issue when deleting cg replication session.') def failover_cg_rep_session(self, group_id, need_sync): if group_id and 'error' in group_id: raise Exception('has issue when failover cg replication session.') def failback_cg_rep_session(self, group_id): if group_id and 'error' in group_id: raise Exception('has issue when failback cg replication session.') class MockLookupService(object): @staticmethod def get_device_mapping_from_network(initiator_wwns, target_wwns): return { 'san_1': { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'), 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121') } } class MockOSResource(mock.Mock): def __init__(self, *args, **kwargs): super(MockOSResource, self).__init__(*args, **kwargs) if 'name' in kwargs: self.name = kwargs['name'] self.kwargs = kwargs def __getitem__(self, key): return self.kwargs[key] def mock_replication_device(device_conf=None, serial_number=None, max_time_out_of_sync=None, destination_pool_id=None): if device_conf is None: device_conf = { 'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2' } if serial_number is None: serial_number = 'SECONDARY_UNITY_SN' if max_time_out_of_sync is None: max_time_out_of_sync = 60 if destination_pool_id is None: destination_pool_id = 'pool_1' rep_device = replication.ReplicationDevice(device_conf, MockDriver()) rep_device._adapter = mock_adapter(adapter.CommonAdapter) rep_device._adapter._serial_number = serial_number rep_device.max_time_out_of_sync = max_time_out_of_sync rep_device._dst_pool = test_client.MockResource(_id=destination_pool_id) return rep_device def mock_adapter(driver_clz): ret = driver_clz() ret._client = MockClient() with mock.patch('cinder.volume.drivers.dell_emc.unity.adapter.' 'CommonAdapter.validate_ports'), patch_storops(): ret.do_setup(MockDriver(), MockConfig()) ret.lookup_service = MockLookupService() return ret def get_backend_qos_specs(volume): return None def get_connector_properties(use_multipath, enforce_multipath): return {'host': 'host1', 'wwpns': 'abcdefg'} def get_lun_pl(name): return 'id^%s|system^CLIENT_SERIAL|type^lun|version^None' % name def get_snap_lun_pl(name): return 'id^%s|system^CLIENT_SERIAL|type^snap_lun|version^None' % name def get_snap_pl(name): return 'id^%s|system^CLIENT_SERIAL|type^snapshot|version^None' % name def get_connector_uids(adapter, connector): return [] def get_connection_info(adapter, hlu, host, connector): return {} def get_volume_type_qos_specs(qos_id): if qos_id == 'qos': return {'qos_specs': {'id': u'qos_type_id_1', 'consumer': u'back-end', u'maxBWS': u'102400', u'maxIOPS': u'500'}} if qos_id == 'qos_2': return {'qos_specs': {'id': u'qos_type_id_2', 'consumer': u'back-end', u'maxBWS': u'102402', u'maxIOPS': u'502'}} return {'qos_specs': {}} def get_volume_type_extra_specs(type_id): if type_id == 'thick': return {'provisioning:type': 'thick', 'thick_provisioning_support': ' True'} if type_id == 'tier_auto': return {'storagetype:tiering': 'Auto', 'fast_support': ' True'} if type_id == 'tier_lowest': return {'storagetype:tiering': 'LowestAvailable', 'fast_support': ' True'} if type_id == 'compressed': return {'provisioning:type': 'compressed', 'compression_support': ' True'} return {} def get_group_type_specs(group_type_id): if group_type_id == '': return {'consistent_group_snapshot_enabled': ' True', 'group_type_id': group_type_id} return {} def group_is_cg(group): return group.id != 'not_cg' def patch_for_unity_adapter(func): @functools.wraps(func) @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs', new=get_volume_type_extra_specs) @mock.patch('cinder.volume.group_types.get_group_type_specs', new=get_group_type_specs) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs', new=get_volume_type_qos_specs) @mock.patch('cinder.volume.drivers.dell_emc.unity.utils.' 'get_backend_qos_specs', new=get_backend_qos_specs) @mock.patch('cinder.volume.drivers.dell_emc.unity.utils.' 'group_is_cg', new=group_is_cg) @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties', new=get_connector_properties) def func_wrapper(*args, **kwargs): return func(*args, **kwargs) return func_wrapper def patch_for_concrete_adapter(clz_str): def inner_decorator(func): @functools.wraps(func) @mock.patch('%s.get_connector_uids' % clz_str, new=get_connector_uids) @mock.patch('%s.get_connection_info' % clz_str, new=get_connection_info) def func_wrapper(*args, **kwargs): return func(*args, **kwargs) return func_wrapper return inner_decorator patch_for_iscsi_adapter = patch_for_concrete_adapter( 'cinder.volume.drivers.dell_emc.unity.adapter.ISCSIAdapter') patch_for_fc_adapter = patch_for_concrete_adapter( 'cinder.volume.drivers.dell_emc.unity.adapter.FCAdapter') @contextlib.contextmanager def patch_thin_clone(cloned_lun): with mock.patch.object(adapter.CommonAdapter, '_thin_clone') as tc: tc.return_value = cloned_lun yield tc @contextlib.contextmanager def patch_dd_copy(copied_lun): with mock.patch.object(adapter.CommonAdapter, '_dd_copy') as dd: dd.return_value = copied_lun yield dd @contextlib.contextmanager def patch_copy_volume(): with mock.patch('cinder.volume.volume_utils.copy_volume') as mocked: yield mocked @contextlib.contextmanager def patch_storops(): with mock.patch.object(adapter, 'storops') as storops: storops.ThinCloneActionEnum = mock.Mock(DD_COPY='DD_COPY') yield storops class IdMatcher(object): def __init__(self, obj): self._obj = obj def __eq__(self, other): return self._obj._id == other._id ######################## # # Start of Tests # ######################## @ddt.ddt @mock.patch.object(adapter, 'storops_ex', new=ex) @mock.patch.object(adapter, 'enums', new=enums) @mock.patch.object(adapter.volume_utils, 'is_group_a_cg_snapshot_type', new=lambda x: True) class CommonAdapterTest(test.TestCase): def setUp(self): super(CommonAdapterTest, self).setUp() self.adapter = mock_adapter(adapter.CommonAdapter) def test_get_managed_pools(self): ret = self.adapter.get_managed_pools() self.assertIn('pool1', ret) self.assertNotIn('pool0', ret) self.assertNotIn('pool2', ret) @patch_for_unity_adapter def test_create_volume(self): volume = MockOSResource(name='lun_3', size=5, host='unity#pool1', group=None) ret = self.adapter.create_volume(volume) expected = get_lun_pl('lun_3') self.assertEqual(expected, ret['provider_location']) @patch_for_unity_adapter def test_create_volume_thick(self): volume = MockOSResource(name='lun_3', size=5, host='unity#pool1', group=None, volume_type_id='thick') ret = self.adapter.create_volume(volume) expected = get_lun_pl('lun_3_thick') self.assertEqual(expected, ret['provider_location']) @patch_for_unity_adapter def test_create_compressed_volume(self): volume_type = MockOSResource( extra_specs={'compression_support': ' True'}) volume = MockOSResource(name='lun_3', size=5, host='unity#pool1', group=None, volume_type=volume_type) ret = self.adapter.create_volume(volume) expected = get_lun_pl('lun_3') self.assertEqual(expected, ret['provider_location']) @patch_for_unity_adapter def test_create_auto_tiering_volume(self): volume = MockOSResource(name='lun_3', size=5, host='unity#pool1', group=None, volume_type_id='tier_auto') ret = self.adapter.create_volume(volume) expected = get_lun_pl('lun_3_auto') self.assertEqual(expected, ret['provider_location']) @patch_for_unity_adapter def test_create_lowest_tiering_volume(self): volume = MockOSResource(name='lun_3', size=5, host='unity#pool1', group=None, volume_type_id='tier_lowest') ret = self.adapter.create_volume(volume) expected = get_lun_pl('lun_3_low') self.assertEqual(expected, ret['provider_location']) def test_create_snapshot(self): volume = MockOSResource(provider_location='id^lun_43') snap = MockOSResource(volume=volume, name='abc-def_snap') result = self.adapter.create_snapshot(snap) self.assertEqual(get_snap_pl('lun_43'), result['provider_location']) self.assertEqual('lun_43', result['provider_id']) def test_delete_snap(self): def f(): snap = MockOSResource(name='abc-def_snap') self.adapter.delete_snapshot(snap) self.assertRaises(ex.SnapDeleteIsCalled, f) def test_get_lun_id_has_location(self): volume = MockOSResource(provider_location='id^lun_43') self.assertEqual('lun_43', self.adapter.get_lun_id(volume)) def test_get_lun_id_no_location(self): volume = MockOSResource(provider_location=None) self.assertEqual('lun_4', self.adapter.get_lun_id(volume)) def test_delete_volume(self): volume = MockOSResource(provider_location='id^lun_4') self.adapter.delete_volume(volume) @patch_for_unity_adapter def test_retype_volume_has_snapshot(self): volume = MockOSResource(name='volume_has_snapshot', size=5, host='HostA@BackendB#PoolB') ctxt = None diff = None new_type = {'name': u'type01', 'id': 'compressed'} host = {'host': 'HostA@BackendB#PoolB'} result = self.adapter.retype(ctxt, volume, new_type, diff, host) self.assertFalse(result) @patch_for_unity_adapter def test_retype_volume_thick_to_compressed(self): volume = MockOSResource(name='thick_volume', size=5, host='HostA@BackendB#PoolA', provider_location='id^lun_33') ctxt = None diff = None new_type = {'name': u'compressed_type', 'id': 'compressed'} host = {'host': 'HostA@BackendB#PoolB'} result = self.adapter.retype(ctxt, volume, new_type, diff, host) self.assertEqual((True, {}), result) @patch_for_unity_adapter def test_retype_volume_to_compressed(self): volume = MockOSResource(name='thin_volume', size=5, host='HostA@BackendB#PoolB') ctxt = None diff = None new_type = {'name': u'compressed_type', 'id': 'compressed'} host = {'host': 'HostA@BackendB#PoolB'} result = self.adapter.retype(ctxt, volume, new_type, diff, host) self.assertTrue(result) @patch_for_unity_adapter def test_retype_volume_to_qos(self): volume = MockOSResource(name='thin_volume', size=5, host='HostA@BackendB#PoolB') ctxt = None diff = None new_type = {'name': u'qos_type', 'id': 'qos'} host = {'host': 'HostA@BackendB#PoolB'} result = self.adapter.retype(ctxt, volume, new_type, diff, host) self.assertTrue(result) @patch_for_unity_adapter def test_retype_volume_revert_qos(self): volume = MockOSResource(name='qos_volume', size=5, host='HostA@BackendB#PoolB', volume_type_id='qos_2') ctxt = None diff = None new_type = {'name': u'no_qos_type', 'id': ''} host = {'host': 'HostA@BackendB#PoolB'} result = self.adapter.retype(ctxt, volume, new_type, diff, host) self.assertTrue(result) def test_get_pool_stats(self): stats_list = self.adapter.get_pools_stats() self.assertEqual(1, len(stats_list)) stats = stats_list[0] self.assertEqual('pool1', stats['pool_name']) self.assertEqual(5, stats['total_capacity_gb']) self.assertEqual('pool1|CLIENT_SERIAL', stats['location_info']) self.assertEqual(6, stats['provisioned_capacity_gb']) self.assertEqual(2, stats['free_capacity_gb']) self.assertEqual(300, stats['max_over_subscription_ratio']) self.assertEqual(5, stats['reserved_percentage']) self.assertTrue(stats['thick_provisioning_support']) self.assertTrue(stats['thin_provisioning_support']) self.assertTrue(stats['compression_support']) self.assertTrue(stats['consistent_group_snapshot_enabled']) self.assertFalse(stats['replication_enabled']) self.assertEqual(0, len(stats['replication_targets'])) self.assertTrue(stats['fast_support']) def test_update_volume_stats(self): stats = self.adapter.update_volume_stats() self.assertEqual('backend', stats['volume_backend_name']) self.assertEqual('unknown', stats['storage_protocol']) self.assertTrue(stats['thin_provisioning_support']) self.assertTrue(stats['thick_provisioning_support']) self.assertTrue(stats['consistent_group_snapshot_enabled']) self.assertFalse(stats['replication_enabled']) self.assertEqual(0, len(stats['replication_targets'])) self.assertTrue(stats['fast_support']) self.assertEqual(1, len(stats['pools'])) def test_get_replication_stats(self): self.adapter.replication_manager.is_replication_configured = True self.adapter.replication_manager.replication_devices = { 'secondary_unity': None } stats = self.adapter.update_volume_stats() self.assertTrue(stats['replication_enabled']) self.assertEqual(['secondary_unity'], stats['replication_targets']) self.assertEqual(1, len(stats['pools'])) pool_stats = stats['pools'][0] self.assertTrue(pool_stats['replication_enabled']) self.assertEqual(['secondary_unity'], pool_stats['replication_targets']) def test_serial_number(self): self.assertEqual('CLIENT_SERIAL', self.adapter.serial_number) def test_do_setup(self): self.assertEqual('1.2.3.4', self.adapter.ip) self.assertEqual('user', self.adapter.username) self.assertEqual('pass', self.adapter.password) self.assertTrue(self.adapter.array_cert_verify) self.assertIsNone(self.adapter.array_ca_cert_path) def test_do_setup_version_before_4_1(self): def f(): with mock.patch('cinder.volume.drivers.dell_emc.unity.adapter.' 'CommonAdapter.validate_ports'): self.adapter._client.system.system_version = '4.0.0' self.adapter.do_setup(self.adapter.driver, MockConfig()) self.assertRaises(exception.VolumeBackendAPIException, f) def test_verify_cert_false_path_none(self): self.adapter.array_cert_verify = False self.adapter.array_ca_cert_path = None self.assertFalse(self.adapter.verify_cert) def test_verify_cert_false_path_not_none(self): self.adapter.array_cert_verify = False self.adapter.array_ca_cert_path = '/tmp/array_ca.crt' self.assertFalse(self.adapter.verify_cert) def test_verify_cert_true_path_none(self): self.adapter.array_cert_verify = True self.adapter.array_ca_cert_path = None self.assertTrue(self.adapter.verify_cert) def test_verify_cert_true_path_valide(self): self.adapter.array_cert_verify = True self.adapter.array_ca_cert_path = '/tmp/array_ca.crt' self.assertEqual(self.adapter.array_ca_cert_path, self.adapter.verify_cert) def test_terminate_connection_volume(self): def f(): volume = MockOSResource(provider_location='id^lun_43', id='id_43', volume_attachment=None) connector = {'host': 'host1'} self.adapter.terminate_connection(volume, connector) self.assertRaises(ex.DetachIsCalled, f) def test_terminate_connection_force_detach(self): def f(): volume = MockOSResource(provider_location='id^lun_44', id='id_44', volume_attachment=None) self.adapter.terminate_connection(volume, None) self.assertRaises(ex.DetachAllIsCalled, f) def test_terminate_connection_snapshot(self): def f(): connector = {'host': 'host1'} snap = MockOSResource(name='snap_0', id='snap_0', volume_attachment=None) self.adapter.terminate_connection_snapshot(snap, connector) self.assertRaises(ex.DetachIsCalled, f) def test_terminate_connection_remove_empty_host(self): self.adapter.remove_empty_host = True def f(): connector = {'host': 'empty-host'} vol = MockOSResource(provider_location='id^lun_45', id='id_45', volume_attachment=None) self.adapter.terminate_connection(vol, connector) self.assertRaises(ex.HostDeleteIsCalled, f) def test_terminate_connection_multiattached_volume(self): def f(): connector = {'host': 'host1'} attachments = [MockOSResource(id='id-1', attach_status='attached', attached_host='host1'), MockOSResource(id='id-2', attach_status='attached', attached_host='host1')] vol = MockOSResource(provider_location='id^lun_45', id='id_45', volume_attachment=attachments) self.adapter.terminate_connection(vol, connector) self.assertIsNone(f()) def test_manage_existing_by_name(self): ref = {'source-id': 12} volume = MockOSResource(name='lun1') ret = self.adapter.manage_existing(volume, ref) expected = get_lun_pl('12') self.assertEqual(expected, ret['provider_location']) def test_manage_existing_by_id(self): ref = {'source-name': 'lunx'} volume = MockOSResource(name='lun1') ret = self.adapter.manage_existing(volume, ref) expected = get_lun_pl('lun_4') self.assertEqual(expected, ret['provider_location']) def test_manage_existing_invalid_ref(self): def f(): ref = {} volume = MockOSResource(name='lun1') self.adapter.manage_existing(volume, ref) self.assertRaises(exception.ManageExistingInvalidReference, f) def test_manage_existing_lun_not_found(self): def f(): ref = {'source-name': 'not_exists'} volume = MockOSResource(name='lun1') self.adapter.manage_existing(volume, ref) self.assertRaises(exception.ManageExistingInvalidReference, f) @patch_for_unity_adapter def test_manage_existing_get_size_invalid_backend(self): def f(): volume = MockOSResource(volume_type_id='thin', host='host@backend#pool1') ref = {'source-id': 12} self.adapter.manage_existing_get_size(volume, ref) self.assertRaises(exception.ManageExistingInvalidReference, f) @patch_for_unity_adapter def test_manage_existing_get_size_success(self): volume = MockOSResource(volume_type_id='thin', host='host@backend#pool0') ref = {'source-id': 12} volume_size = self.adapter.manage_existing_get_size(volume, ref) self.assertEqual(5, volume_size) @patch_for_unity_adapter def test_create_volume_from_snapshot(self): lun_id = 'lun_50' volume = MockOSResource(name=lun_id, id=lun_id, host='unity#pool1') snap_id = 'snap_50' snap = MockOSResource(name=snap_id) with patch_thin_clone(test_client.MockResource(_id=lun_id)) as tc: ret = self.adapter.create_volume_from_snapshot(volume, snap) self.assertEqual(get_snap_lun_pl(lun_id), ret['provider_location']) tc.assert_called_with(adapter.VolumeParams(self.adapter, volume), snap_id) @patch_for_unity_adapter def test_create_cloned_volume_attached(self): lun_id = 'lun_51' src_lun_id = 'lun_53' volume = MockOSResource(name=lun_id, id=lun_id, host='unity#pool1') src_vref = MockOSResource(id=src_lun_id, name=src_lun_id, provider_location=get_lun_pl(src_lun_id), volume_attachment=['not_care']) with patch_dd_copy(test_client.MockResource(_id=lun_id)) as dd: ret = self.adapter.create_cloned_volume(volume, src_vref) dd.assert_called_with( adapter.VolumeParams(self.adapter, volume), IdMatcher(test_client.MockResource( _id='snap_clone_{}'.format(src_lun_id))), src_lun=IdMatcher(test_client.MockResource(_id=src_lun_id))) self.assertEqual(get_lun_pl(lun_id), ret['provider_location']) @patch_for_unity_adapter def test_create_cloned_volume_available(self): lun_id = 'lun_54' src_lun_id = 'lun_55' volume = MockOSResource(id=lun_id, host='unity#pool1', size=3, provider_location=get_lun_pl(lun_id)) src_vref = MockOSResource(id=src_lun_id, name=src_lun_id, provider_location=get_lun_pl(src_lun_id), volume_attachment=None) with patch_thin_clone(test_client.MockResource(_id=lun_id)) as tc: ret = self.adapter.create_cloned_volume(volume, src_vref) tc.assert_called_with( adapter.VolumeParams(self.adapter, volume), IdMatcher(test_client.MockResource( _id='snap_clone_{}'.format(src_lun_id))), src_lun=IdMatcher(test_client.MockResource(_id=src_lun_id))) self.assertEqual(get_snap_lun_pl(lun_id), ret['provider_location']) @patch_for_unity_adapter def test_dd_copy_with_src_lun(self): lun_id = 'lun_56' src_lun_id = 'lun_57' src_snap_id = 'snap_57' volume = MockOSResource(name=lun_id, id=lun_id, host='unity#pool1', provider_location=get_lun_pl(lun_id)) src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) src_lun = test_client.MockResource(name=src_lun_id, _id=src_lun_id) src_lun.size_total = 6 * units.Gi with patch_copy_volume() as copy_volume: ret = self.adapter._dd_copy( adapter.VolumeParams(self.adapter, volume), src_snap, src_lun=src_lun) copy_volume.assert_called_with('dev', 'dev', 6144, '1M', sparse=True) self.assertEqual(IdMatcher(test_client.MockResource(_id=lun_id)), ret) @patch_for_unity_adapter def test_dd_copy_wo_src_lun(self): lun_id = 'lun_58' src_lun_id = 'lun_59' src_snap_id = 'snap_59' volume = MockOSResource(name=lun_id, id=lun_id, host='unity#pool1', provider_location=get_lun_pl(lun_id)) src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) src_snap.size = 5 * units.Gi src_snap.storage_resource = test_client.MockResource(name=src_lun_id, _id=src_lun_id) with patch_copy_volume() as copy_volume: ret = self.adapter._dd_copy( adapter.VolumeParams(self.adapter, volume), src_snap) copy_volume.assert_called_with('dev', 'dev', 5120, '1M', sparse=True) self.assertEqual(IdMatcher(test_client.MockResource(_id=lun_id)), ret) @patch_for_unity_adapter def test_dd_copy_raise(self): lun_id = 'lun_58' src_snap_id = 'snap_59' volume = MockOSResource(name=lun_id, id=lun_id, host='unity#pool1', provider_location=get_lun_pl(lun_id)) src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) with patch_copy_volume() as copy_volume: copy_volume.side_effect = AttributeError self.assertRaises(AttributeError, self.adapter._dd_copy, volume, src_snap) @patch_for_unity_adapter def test_thin_clone(self): lun_id = 'lun_60' src_snap_id = 'snap_61' volume = MockOSResource(name=lun_id, id=lun_id, size=1, provider_location=get_snap_lun_pl(lun_id)) src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) ret = self.adapter._thin_clone(volume, src_snap) self.assertEqual(IdMatcher(test_client.MockResource(_id=lun_id)), ret) @patch_for_unity_adapter def test_thin_clone_downgraded_with_src_lun(self): lun_id = 'lun_60' src_snap_id = 'snap_62' src_lun_id = 'lun_62' volume = MockOSResource(name=lun_id, id=lun_id, size=1, provider_location=get_snap_lun_pl(lun_id)) src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) src_lun = test_client.MockResource(name=src_lun_id, _id=src_lun_id) new_dd_lun = test_client.MockResource(name='lun_63') with patch_storops() as mocked_storops, \ patch_dd_copy(new_dd_lun) as dd: ret = self.adapter._thin_clone( adapter.VolumeParams(self.adapter, volume), src_snap, src_lun=src_lun) vol_params = adapter.VolumeParams(self.adapter, volume) vol_params.name = 'hidden-{}'.format(volume.name) vol_params.description = 'hidden-{}'.format(volume.description) dd.assert_called_with(vol_params, src_snap, src_lun=src_lun) mocked_storops.TCHelper.notify.assert_called_with(src_lun, 'DD_COPY', new_dd_lun) self.assertEqual(IdMatcher(test_client.MockResource(_id=lun_id)), ret) @patch_for_unity_adapter def test_thin_clone_downgraded_wo_src_lun(self): lun_id = 'lun_60' src_snap_id = 'snap_62' volume = MockOSResource(name=lun_id, id=lun_id, size=1, provider_location=get_snap_lun_pl(lun_id)) src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) new_dd_lun = test_client.MockResource(name='lun_63') with patch_storops() as mocked_storops, \ patch_dd_copy(new_dd_lun) as dd: ret = self.adapter._thin_clone( adapter.VolumeParams(self.adapter, volume), src_snap) vol_params = adapter.VolumeParams(self.adapter, volume) vol_params.name = 'hidden-{}'.format(volume.name) vol_params.description = 'hidden-{}'.format(volume.description) dd.assert_called_with(vol_params, src_snap, src_lun=None) mocked_storops.TCHelper.notify.assert_called_with(src_snap, 'DD_COPY', new_dd_lun) self.assertEqual(IdMatcher(test_client.MockResource(_id=lun_id)), ret) @patch_for_unity_adapter def test_thin_clone_thick(self): lun_id = 'lun_70' src_snap_id = 'snap_71' volume = MockOSResource(name=lun_id, id=lun_id, size=1, provider_location=get_snap_lun_pl(lun_id)) src_snap = test_client.MockResource(name=src_snap_id, _id=src_snap_id) new_dd_lun = test_client.MockResource(name='lun_73') with patch_storops(), patch_dd_copy(new_dd_lun) as dd: vol_params = adapter.VolumeParams(self.adapter, volume) ret = self.adapter._thin_clone(vol_params, src_snap) dd.assert_called_with(vol_params, src_snap, src_lun=None) self.assertEqual(ret, new_dd_lun) def test_extend_volume_error(self): def f(): volume = MockOSResource(id='l56', provider_location=get_lun_pl('lun56')) self.adapter.extend_volume(volume, -1) self.assertRaises(ex.ExtendLunError, f) def test_extend_volume_no_id(self): def f(): volume = MockOSResource(provider_location='type^lun') self.adapter.extend_volume(volume, 5) self.assertRaises(exception.VolumeBackendAPIException, f) def test_normalize_config(self): config = MockConfig() config.unity_storage_pool_names = [' pool_1 ', '', ' '] config.unity_io_ports = [' spa_eth2 ', '', ' '] normalized = self.adapter.normalize_config(config) self.assertEqual(['pool_1'], normalized.unity_storage_pool_names) self.assertEqual(['spa_eth2'], normalized.unity_io_ports) def test_normalize_config_raise(self): with self.assertRaisesRegex(exception.InvalidConfigurationValue, 'unity_storage_pool_names'): config = MockConfig() config.unity_storage_pool_names = ['', ' '] self.adapter.normalize_config(config) with self.assertRaisesRegex(exception.InvalidConfigurationValue, 'unity_io_ports'): config = MockConfig() config.unity_io_ports = ['', ' '] self.adapter.normalize_config(config) def test_restore_snapshot(self): volume = MockOSResource(id='1', name='vol_1') snapshot = MockOSResource(id='2', name='snap_1') self.adapter.restore_snapshot(volume, snapshot) def test_get_pool_id_by_name(self): pool_name = 'PoolA' pool_id = self.adapter.get_pool_id_by_name(pool_name) self.assertEqual('pool_1', pool_id) def test_migrate_volume(self): provider_location = 'id^1|system^FNM001|type^lun|version^05.00' volume = MockOSResource(id='1', name='vol_1', host='HostA@BackendB#PoolA', provider_location=provider_location) host = {'host': 'HostA@BackendB#PoolB'} ret = self.adapter.migrate_volume(volume, host) self.assertEqual((True, {}), ret) def test_migrate_volume_failed(self): provider_location = 'id^1|system^FNM001|type^lun|version^05.00' volume = MockOSResource(id='1', name='vol_1', host='HostA@BackendB#PoolA', provider_location=provider_location) host = {'host': 'HostA@BackendB#PoolC'} ret = self.adapter.migrate_volume(volume, host) self.assertEqual((False, None), ret) def test_migrate_volume_cross_backends(self): provider_location = 'id^1|system^FNM001|type^lun|version^05.00' volume = MockOSResource(id='1', name='vol_1', host='HostA@BackendA#PoolA', provider_location=provider_location) host = {'host': 'HostA@BackendB#PoolB'} ret = self.adapter.migrate_volume(volume, host) self.assertEqual((False, None), ret) @ddt.unpack @ddt.data((('group-1', 'group-1_name', 'group-1_description'), ('group-1', 'group-1_description')), (('group-2', 'group-2_name', None), ('group-2', 'group-2_name')), (('group-3', 'group-3_name', ''), ('group-3', 'group-3_name'))) def test_create_group(self, inputs, expected): cg_id, cg_name, cg_description = inputs cg = MockOSResource(id=cg_id, name=cg_name, description=cg_description) with mock.patch.object(self.adapter.client, 'create_cg', create=True) as mocked: model_update = self.adapter.create_group(cg) self.assertEqual('available', model_update['status']) mocked.assert_called_once_with(expected[0], description=expected[1]) def test_delete_group(self): cg = MockOSResource(id='group-1') with mock.patch.object(self.adapter.client, 'delete_cg', create=True) as mocked: ret = self.adapter.delete_group(cg) self.assertIsNone(ret[0]) self.assertIsNone(ret[1]) mocked.assert_called_once_with('group-1') def test_update_group(self): cg = MockOSResource(id='group-1') add_volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-1', 'sv_1'), ('volume-2', 'sv_2'))] remove_volumes = [MockOSResource( id='volume-3', provider_location=get_lun_pl('sv_3'))] with mock.patch.object(self.adapter.client, 'update_cg', create=True) as mocked: ret = self.adapter.update_group(cg, add_volumes, remove_volumes) self.assertEqual('available', ret[0]['status']) self.assertIsNone(ret[1]) self.assertIsNone(ret[2]) mocked.assert_called_once_with('group-1', {'sv_1', 'sv_2'}, {'sv_3'}) def test_update_group_add_volumes_none(self): cg = MockOSResource(id='group-1') remove_volumes = [MockOSResource( id='volume-3', provider_location=get_lun_pl('sv_3'))] with mock.patch.object(self.adapter.client, 'update_cg', create=True) as mocked: ret = self.adapter.update_group(cg, None, remove_volumes) self.assertEqual('available', ret[0]['status']) self.assertIsNone(ret[1]) self.assertIsNone(ret[2]) mocked.assert_called_once_with('group-1', set(), {'sv_3'}) def test_update_group_remove_volumes_none(self): cg = MockOSResource(id='group-1') add_volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-1', 'sv_1'), ('volume-2', 'sv_2'))] with mock.patch.object(self.adapter.client, 'update_cg', create=True) as mocked: ret = self.adapter.update_group(cg, add_volumes, None) self.assertEqual('available', ret[0]['status']) self.assertIsNone(ret[1]) self.assertIsNone(ret[2]) mocked.assert_called_once_with('group-1', {'sv_1', 'sv_2'}, set()) def test_update_group_add_remove_volumes_none(self): cg = MockOSResource(id='group-1') with mock.patch.object(self.adapter.client, 'update_cg', create=True) as mocked: ret = self.adapter.update_group(cg, None, None) self.assertEqual('available', ret[0]['status']) self.assertIsNone(ret[1]) self.assertIsNone(ret[2]) mocked.assert_called_once_with('group-1', set(), set()) @patch_for_unity_adapter def test_copy_luns_in_group(self): cg = MockOSResource(id='group-1') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] src_cg_snap = test_client.MockResource(_id='id_src_cg_snap') src_volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-1', 'sv_1'), ('volume-2', 'sv_2'))] copied_luns = [test_client.MockResource(_id=lun_id) for lun_id in ('sv_3', 'sv_4')] def _prepare_lun_snaps(lun_id): lun_snap = test_client.MockResource(_id='snap_{}'.format(lun_id)) lun_snap.lun = test_client.MockResource(_id=lun_id) return lun_snap lun_snaps = list(map(_prepare_lun_snaps, ('sv_1', 'sv_2'))) with mock.patch.object(self.adapter.client, 'filter_snaps_in_cg_snap', create=True) as mocked_filter, \ mock.patch.object(self.adapter.client, 'create_cg', create=True) as mocked_create_cg, \ patch_dd_copy(None) as mocked_dd: mocked_filter.return_value = lun_snaps mocked_dd.side_effect = copied_luns ret = self.adapter.copy_luns_in_group(cg, volumes, src_cg_snap, src_volumes) mocked_filter.assert_called_once_with('id_src_cg_snap') dd_args = zip([adapter.VolumeParams(self.adapter, vol) for vol in volumes], lun_snaps) mocked_dd.assert_has_calls([mock.call(*args) for args in dd_args]) mocked_create_cg.assert_called_once_with('group-1', lun_add=copied_luns) self.assertEqual('available', ret[0]['status']) self.assertEqual(2, len(ret[1])) for vol_id in ('volume-3', 'volume-4'): self.assertIn({'id': vol_id, 'status': 'available'}, ret[1]) def test_create_group_from_snap(self): cg = MockOSResource(id='group-2') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] cg_snap = MockOSResource(id='snap-group-1') vol_1 = MockOSResource(id='volume-1') vol_2 = MockOSResource(id='volume-2') vol_snaps = [MockOSResource(id='snap-volume-1', volume=vol_1), MockOSResource(id='snap-volume-2', volume=vol_2)] src_cg_snap = test_client.MockResource(_id='id_src_cg_snap') with mock.patch.object(self.adapter.client, 'get_snap', create=True, return_value=src_cg_snap), \ mock.patch.object(self.adapter, 'copy_luns_in_group', create=True) as mocked_copy: mocked_copy.return_value = ({'status': 'available'}, [{'id': 'volume-3', 'status': 'available'}, {'id': 'volume-4', 'status': 'available'}]) ret = self.adapter.create_group_from_snap(cg, volumes, cg_snap, vol_snaps) mocked_copy.assert_called_once_with(cg, volumes, src_cg_snap, [vol_1, vol_2]) self.assertEqual('available', ret[0]['status']) self.assertEqual(2, len(ret[1])) for vol_id in ('volume-3', 'volume-4'): self.assertIn({'id': vol_id, 'status': 'available'}, ret[1]) def test_create_group_from_snap_none_snapshots(self): cg = MockOSResource(id='group-2') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] cg_snap = MockOSResource(id='snap-group-1') src_cg_snap = test_client.MockResource(_id='id_src_cg_snap') with mock.patch.object(self.adapter.client, 'get_snap', create=True, return_value=src_cg_snap), \ mock.patch.object(self.adapter, 'copy_luns_in_group', create=True) as mocked_copy: mocked_copy.return_value = ({'status': 'available'}, [{'id': 'volume-3', 'status': 'available'}, {'id': 'volume-4', 'status': 'available'}]) ret = self.adapter.create_group_from_snap(cg, volumes, cg_snap, None) mocked_copy.assert_called_once_with(cg, volumes, src_cg_snap, []) self.assertEqual('available', ret[0]['status']) self.assertEqual(2, len(ret[1])) for vol_id in ('volume-3', 'volume-4'): self.assertIn({'id': vol_id, 'status': 'available'}, ret[1]) def test_create_cloned_group(self): cg = MockOSResource(id='group-2') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] src_cg = MockOSResource(id='group-1') vol_1 = MockOSResource(id='volume-1') vol_2 = MockOSResource(id='volume-2') src_vols = [vol_1, vol_2] src_cg_snap = test_client.MockResource(_id='id_src_cg_snap') with mock.patch.object(self.adapter.client, 'create_cg_snap', create=True, return_value=src_cg_snap) as mocked_create, \ mock.patch.object(self.adapter, 'copy_luns_in_group', create=True) as mocked_copy: mocked_create.__name__ = 'create_cg_snap' mocked_copy.return_value = ({'status': 'available'}, [{'id': 'volume-3', 'status': 'available'}, {'id': 'volume-4', 'status': 'available'}]) ret = self.adapter.create_cloned_group(cg, volumes, src_cg, src_vols) mocked_create.assert_called_once_with('group-1', 'snap_clone_group_group-1') mocked_copy.assert_called_once_with(cg, volumes, src_cg_snap, [vol_1, vol_2]) self.assertEqual('available', ret[0]['status']) self.assertEqual(2, len(ret[1])) for vol_id in ('volume-3', 'volume-4'): self.assertIn({'id': vol_id, 'status': 'available'}, ret[1]) def test_create_cloned_group_none_source_vols(self): cg = MockOSResource(id='group-2') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] src_cg = MockOSResource(id='group-1') src_cg_snap = test_client.MockResource(_id='id_src_cg_snap') with mock.patch.object(self.adapter.client, 'create_cg_snap', create=True, return_value=src_cg_snap) as mocked_create, \ mock.patch.object(self.adapter, 'copy_luns_in_group', create=True) as mocked_copy: mocked_create.__name__ = 'create_cg_snap' mocked_copy.return_value = ({'status': 'available'}, [{'id': 'volume-3', 'status': 'available'}, {'id': 'volume-4', 'status': 'available'}]) ret = self.adapter.create_cloned_group(cg, volumes, src_cg, None) mocked_create.assert_called_once_with('group-1', 'snap_clone_group_group-1') mocked_copy.assert_called_once_with(cg, volumes, src_cg_snap, []) self.assertEqual('available', ret[0]['status']) self.assertEqual(2, len(ret[1])) for vol_id in ('volume-3', 'volume-4'): self.assertIn({'id': vol_id, 'status': 'available'}, ret[1]) def test_create_group_snapshot(self): cg_snap = MockOSResource(id='snap-group-1', group_id='group-1') vol_1 = MockOSResource(id='volume-1') vol_2 = MockOSResource(id='volume-2') vol_snaps = [MockOSResource(id='snap-volume-1', volume=vol_1), MockOSResource(id='snap-volume-2', volume=vol_2)] with mock.patch.object(self.adapter.client, 'create_cg_snap', create=True) as mocked_create: mocked_create.return_value = ({'status': 'available'}, [{'id': 'snap-volume-1', 'status': 'available'}, {'id': 'snap-volume-2', 'status': 'available'}]) ret = self.adapter.create_group_snapshot(cg_snap, vol_snaps) mocked_create.assert_called_once_with('group-1', snap_name='snap-group-1') self.assertEqual({'status': 'available'}, ret[0]) self.assertEqual(2, len(ret[1])) for snap_id in ('snap-volume-1', 'snap-volume-2'): self.assertIn({'id': snap_id, 'status': 'available'}, ret[1]) def test_delete_group_snapshot(self): group_snap = MockOSResource(id='snap-group-1') cg_snap = test_client.MockResource(_id='snap_cg_1') with mock.patch.object(self.adapter.client, 'get_snap', create=True, return_value=cg_snap) as mocked_get, \ mock.patch.object(self.adapter.client, 'delete_snap', create=True) as mocked_delete: ret = self.adapter.delete_group_snapshot(group_snap) mocked_get.assert_called_once_with('snap-group-1') mocked_delete.assert_called_once_with(cg_snap) self.assertEqual((None, None), ret) def test_setup_replications(self): secondary_device = mock_replication_device() self.adapter.replication_manager.is_replication_configured = True self.adapter.replication_manager.replication_devices = { 'secondary_unity': secondary_device } model_update = self.adapter.setup_replications( test_client.MockResource(_id='sv_1'), {}) self.assertIn('replication_status', model_update) self.assertEqual('enabled', model_update['replication_status']) self.assertIn('replication_driver_data', model_update) self.assertEqual('{"secondary_unity": "rep_session_name_1"}', model_update['replication_driver_data']) def test_setup_replications_not_configured_replication(self): model_update = self.adapter.setup_replications( test_client.MockResource(_id='sv_1'), {}) self.assertEqual(0, len(model_update)) def test_setup_replications_raise(self): secondary_device = mock_replication_device( serial_number='not-found-remote-system') self.adapter.replication_manager.is_replication_configured = True self.adapter.replication_manager.replication_devices = { 'secondary_unity': secondary_device } self.assertRaises(exception.VolumeBackendAPIException, self.adapter.setup_replications, test_client.MockResource(_id='sv_1'), {}) @ddt.data({'failover_to': 'secondary_unity'}, {'failover_to': None}) @ddt.unpack def test_failover(self, failover_to): secondary_id = 'secondary_unity' secondary_device = mock_replication_device() self.adapter.replication_manager.is_replication_configured = True self.adapter.replication_manager.replication_devices = { secondary_id: secondary_device } volume = MockOSResource( id='volume-id-1', name='volume-name-1', replication_driver_data='{"secondary_unity":"rep_session_name_1"}') model_update = self.adapter.failover([volume], secondary_id=failover_to) self.assertEqual(3, len(model_update)) active_backend_id, volumes_update, groups_update = model_update self.assertEqual(secondary_id, active_backend_id) self.assertEqual([], groups_update) self.assertEqual(1, len(volumes_update)) model_update = volumes_update[0] self.assertIn('volume_id', model_update) self.assertEqual('volume-id-1', model_update['volume_id']) self.assertIn('updates', model_update) self.assertEqual( {'provider_id': 'sv_99', 'provider_location': 'id^sv_99|system^SECONDARY_UNITY_SN|type^lun|version^None'}, model_update['updates']) self.assertTrue( self.adapter.replication_manager.is_service_failed_over) def test_failover_raise(self): secondary_id = 'secondary_unity' secondary_device = mock_replication_device() self.adapter.replication_manager.is_replication_configured = True self.adapter.replication_manager.replication_devices = { secondary_id: secondary_device } vol1 = MockOSResource( id='volume-id-1', name='volume-name-1', replication_driver_data='{"secondary_unity":"rep_session_name_1"}') vol2 = MockOSResource( id='volume-id-2', name='volume-name-2', replication_driver_data='{"secondary_unity":"rep_session_name_2"}') model_update = self.adapter.failover([vol1, vol2], secondary_id=secondary_id) active_backend_id, volumes_update, groups_update = model_update self.assertEqual(secondary_id, active_backend_id) self.assertEqual([], groups_update) self.assertEqual(2, len(volumes_update)) m = volumes_update[0] self.assertIn('volume_id', m) self.assertEqual('volume-id-1', m['volume_id']) self.assertIn('updates', m) self.assertEqual( {'provider_id': 'sv_99', 'provider_location': 'id^sv_99|system^SECONDARY_UNITY_SN|type^lun|version^None'}, m['updates']) m = volumes_update[1] self.assertIn('volume_id', m) self.assertEqual('volume-id-2', m['volume_id']) self.assertIn('updates', m) self.assertEqual({'replication_status': 'failover-error'}, m['updates']) self.assertTrue( self.adapter.replication_manager.is_service_failed_over) def test_failover_failback(self): secondary_id = 'secondary_unity' secondary_device = mock_replication_device() self.adapter.replication_manager.is_replication_configured = True self.adapter.replication_manager.replication_devices = { secondary_id: secondary_device } default_device = mock_replication_device( device_conf={ 'backend_id': 'default', 'san_ip': '10.10.10.10' }, serial_number='PRIMARY_UNITY_SN' ) self.adapter.replication_manager.default_device = default_device self.adapter.replication_manager.active_adapter = ( self.adapter.replication_manager.replication_devices[ secondary_id].adapter) self.adapter.replication_manager.active_backend_id = secondary_id volume = MockOSResource( id='volume-id-1', name='volume-name-1', replication_driver_data='{"secondary_unity":"rep_session_name_1"}') model_update = self.adapter.failover([volume], secondary_id='default') active_backend_id, volumes_update, groups_update = model_update self.assertEqual('default', active_backend_id) self.assertEqual([], groups_update) self.assertEqual(1, len(volumes_update)) model_update = volumes_update[0] self.assertIn('volume_id', model_update) self.assertEqual('volume-id-1', model_update['volume_id']) self.assertIn('updates', model_update) self.assertEqual( {'provider_id': 'sv_1', 'provider_location': 'id^sv_1|system^PRIMARY_UNITY_SN|type^lun|version^None'}, model_update['updates']) self.assertFalse( self.adapter.replication_manager.is_service_failed_over) @patch_for_unity_adapter def test_failed_enable_replication(self): cg = MockOSResource(id='not_cg', name='cg_name', description='cg_description') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] self.assertRaises(exception.InvalidGroupType, self.adapter.enable_replication, None, cg, volumes) @patch_for_unity_adapter def test_enable_replication(self): cg = MockOSResource(id='test_cg_1', name='cg_name', description='cg_description') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] secondary_device = mock_replication_device() self.adapter.replication_manager.replication_devices = { 'secondary_unity': secondary_device } result = self.adapter.enable_replication(None, cg, volumes) self.assertEqual(({'replication_status': 'enabled'}, None), result) @patch_for_unity_adapter def test_cannot_disable_replication_on_generic_group(self): cg = MockOSResource(id='not_cg', name='cg_name', description='cg_description') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] self.assertRaises(exception.InvalidGroupType, self.adapter.disable_replication, None, cg, volumes) @patch_for_unity_adapter def test_disable_replication(self): cg = MockOSResource(id='cg_is_replicated', name='cg_name', description='cg_description') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] result = self.adapter.disable_replication(None, cg, volumes) self.assertEqual(({'replication_status': 'disabled'}, None), result) @patch_for_unity_adapter def test_failover_replication(self): cg = MockOSResource(id='cg_is_replicated', name='cg_name', description='cg_description') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] real_secondary_id = 'secondary_unity' secondary_device = mock_replication_device() self.adapter.replication_manager.replication_devices = { real_secondary_id: secondary_device } result = self.adapter.failover_replication(None, cg, volumes, real_secondary_id) self.assertEqual(({'replication_status': 'failed-over'}, [{'id': 'volume-3', 'replication_status': 'failed-over'}, {'id': 'volume-4', 'replication_status': 'failed-over'}]), result) @patch_for_unity_adapter def test_failback_replication(self): cg = MockOSResource(id='cg_is_replicated', name='cg_name', description='cg_description') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] input_secondary_id = 'default' real_secondary_id = 'secondary_unity' secondary_device = mock_replication_device() self.adapter.replication_manager.replication_devices = { real_secondary_id: secondary_device } result = self.adapter.failover_replication(None, cg, volumes, input_secondary_id) self.assertEqual(({'replication_status': 'enabled'}, [{'id': 'volume-3', 'replication_status': 'enabled'}, {'id': 'volume-4', 'replication_status': 'enabled'}]), result) failed_cg = MockOSResource(id='cg_is_replicated_but_has_error', name='cg_name', description='cg_description') failed_result = self.adapter.failover_replication( None, failed_cg, volumes, real_secondary_id) self.assertEqual(({'replication_status': 'error'}, [{'id': 'volume-3', 'replication_status': 'error'}, {'id': 'volume-4', 'replication_status': 'error'}]), failed_result) @patch_for_unity_adapter def test_failover_replication_error(self): cg = MockOSResource(id='cg_is_replicated_but_has_error', name='cg_name', description='cg_description') volumes = [MockOSResource(id=vol_id, provider_location=get_lun_pl(lun_id)) for vol_id, lun_id in (('volume-3', 'sv_3'), ('volume-4', 'sv_4'))] real_secondary_id = 'default' secondary_device = mock_replication_device() self.adapter.replication_manager.replication_devices = { real_secondary_id: secondary_device } result = self.adapter.failover_replication( None, cg, volumes, real_secondary_id) self.assertEqual(({'replication_status': 'error'}, [{'id': 'volume-3', 'replication_status': 'error'}, {'id': 'volume-4', 'replication_status': 'error'}]), result) class FCAdapterTest(test.TestCase): def setUp(self): super(FCAdapterTest, self).setUp() self.adapter = mock_adapter(adapter.FCAdapter) def test_setup(self): self.assertIsNotNone(self.adapter.lookup_service) def test_auto_zone_enabled(self): self.assertTrue(self.adapter.auto_zone_enabled) def test_fc_protocol(self): stats = mock_adapter(adapter.FCAdapter).update_volume_stats() self.assertEqual('FC', stats['storage_protocol']) def test_get_connector_uids(self): connector = {'host': 'fake_host', 'wwnns': ['1111111111111111', '2222222222222222'], 'wwpns': ['3333333333333333', '4444444444444444'] } expected = ['11:11:11:11:11:11:11:11:33:33:33:33:33:33:33:33', '22:22:22:22:22:22:22:22:44:44:44:44:44:44:44:44'] ret = self.adapter.get_connector_uids(connector) self.assertListEqual(expected, ret) def test_get_connection_info_no_targets(self): def f(): host = test_client.MockResource('no_target') self.adapter.get_connection_info(12, host, {}) self.assertRaises(exception.VolumeBackendAPIException, f) def test_get_connection_info_auto_zone_enabled(self): host = test_client.MockResource('host1') connector = {'wwpns': 'abcdefg'} ret = self.adapter.get_connection_info(10, host, connector) target_wwns = ['100000051e55a100', '100000051e55a121'] self.assertListEqual(target_wwns, ret['target_wwn']) init_target_map = { '200000051e55a100': ('100000051e55a100', '100000051e55a121'), '200000051e55a121': ('100000051e55a100', '100000051e55a121')} self.assertDictEqual(init_target_map, ret['initiator_target_map']) self.assertEqual(10, ret['target_lun']) def test_get_connection_info_auto_zone_disabled(self): self.adapter.lookup_service = None host = test_client.MockResource('host1') connector = {'wwpns': 'abcdefg'} ret = self.adapter.get_connection_info(10, host, connector) self.assertEqual(10, ret['target_lun']) wwns = ['8899AABBCCDDEEFF', '8899AABBCCDDFFEE'] self.assertListEqual(wwns, ret['target_wwn']) @patch_for_fc_adapter def test_initialize_connection_volume(self): volume = MockOSResource(provider_location='id^lun_43', id='id_43') connector = {'host': 'host1'} conn_info = self.adapter.initialize_connection(volume, connector) self.assertEqual('fibre_channel', conn_info['driver_volume_type']) self.assertTrue(conn_info['data']['target_discovered']) self.assertEqual('id_43', conn_info['data']['volume_id']) @patch_for_fc_adapter def test_initialize_connection_snapshot(self): snap = MockOSResource(id='snap_1', name='snap_1') connector = {'host': 'host1'} conn_info = self.adapter.initialize_connection_snapshot( snap, connector) self.assertEqual('fibre_channel', conn_info['driver_volume_type']) self.assertTrue(conn_info['data']['target_discovered']) self.assertEqual('snap_1', conn_info['data']['volume_id']) def test_terminate_connection_auto_zone_enabled(self): connector = {'host': 'host1', 'wwpns': 'abcdefg'} volume = MockOSResource(provider_location='id^lun_41', id='id_41', volume_attachment=None) ret = self.adapter.terminate_connection(volume, connector) self.assertEqual('fibre_channel', ret['driver_volume_type']) data = ret['data'] target_map = { '200000051e55a100': ('100000051e55a100', '100000051e55a121'), '200000051e55a121': ('100000051e55a100', '100000051e55a121')} self.assertDictEqual(target_map, data['initiator_target_map']) target_wwn = ['100000051e55a100', '100000051e55a121'] self.assertListEqual(target_wwn, data['target_wwn']) def test_terminate_connection_auto_zone_enabled_none_host_luns(self): connector = {'host': 'host-no-host_luns', 'wwpns': 'abcdefg'} volume = MockOSResource(provider_location='id^lun_41', id='id_41', volume_attachment=None) ret = self.adapter.terminate_connection(volume, connector) self.assertEqual('fibre_channel', ret['driver_volume_type']) data = ret['data'] target_map = { '200000051e55a100': ('100000051e55a100', '100000051e55a121'), '200000051e55a121': ('100000051e55a100', '100000051e55a121')} self.assertDictEqual(target_map, data['initiator_target_map']) target_wwn = ['100000051e55a100', '100000051e55a121'] self.assertListEqual(target_wwn, data['target_wwn']) def test_terminate_connection_remove_empty_host_return_data(self): self.adapter.remove_empty_host = True connector = {'host': 'empty-host-return-data', 'wwpns': 'abcdefg'} volume = MockOSResource(provider_location='id^lun_41', id='id_41', volume_attachment=None) ret = self.adapter.terminate_connection(volume, connector) self.assertEqual('fibre_channel', ret['driver_volume_type']) data = ret['data'] target_map = { '200000051e55a100': ('100000051e55a100', '100000051e55a121'), '200000051e55a121': ('100000051e55a100', '100000051e55a121')} self.assertDictEqual(target_map, data['initiator_target_map']) target_wwn = ['100000051e55a100', '100000051e55a121'] self.assertListEqual(target_wwn, data['target_wwn']) def test_validate_ports_whitelist_none(self): ports = self.adapter.validate_ports(None) self.assertEqual(set(('spa_iom_0_fc0', 'spa_iom_0_fc1')), set(ports)) def test_validate_ports(self): ports = self.adapter.validate_ports(['spa_iom_0_fc0']) self.assertEqual(set(('spa_iom_0_fc0',)), set(ports)) def test_validate_ports_asterisk(self): ports = self.adapter.validate_ports(['spa*']) self.assertEqual(set(('spa_iom_0_fc0', 'spa_iom_0_fc1')), set(ports)) def test_validate_ports_question_mark(self): ports = self.adapter.validate_ports(['spa_iom_0_fc?']) self.assertEqual(set(('spa_iom_0_fc0', 'spa_iom_0_fc1')), set(ports)) def test_validate_ports_no_matched(self): with self.assertRaisesRegex(exception.InvalidConfigurationValue, 'unity_io_ports'): self.adapter.validate_ports(['spc_invalid']) def test_validate_ports_unmatched_whitelist(self): with self.assertRaisesRegex(exception.InvalidConfigurationValue, 'unity_io_ports'): self.adapter.validate_ports(['spa_iom*', 'spc_invalid']) class ISCSIAdapterTest(test.TestCase): def setUp(self): super(ISCSIAdapterTest, self).setUp() self.adapter = mock_adapter(adapter.ISCSIAdapter) def test_iscsi_protocol(self): stats = self.adapter.update_volume_stats() self.assertEqual('iSCSI', stats['storage_protocol']) def test_get_connector_uids(self): connector = {'host': 'fake_host', 'initiator': 'fake_iqn'} ret = self.adapter.get_connector_uids(connector) self.assertListEqual(['fake_iqn'], ret) def test_get_connection_info(self): connector = {'host': 'fake_host', 'initiator': 'fake_iqn'} hlu = 10 info = self.adapter.get_connection_info(hlu, None, connector) target_iqns = ['iqn.1-1.com.e:c.a.a0', 'iqn.1-1.com.e:c.a.a1'] target_portals = ['1.2.3.4:1234', '1.2.3.5:1234'] self.assertListEqual(target_iqns, info['target_iqns']) self.assertListEqual([hlu, hlu], info['target_luns']) self.assertListEqual(target_portals, info['target_portals']) self.assertEqual(hlu, info['target_lun']) self.assertIn(info['target_portal'], target_portals) self.assertIn(info['target_iqn'], target_iqns) @patch_for_iscsi_adapter def test_initialize_connection_volume(self): volume = MockOSResource(provider_location='id^lun_43', id='id_43') connector = {'host': 'host1'} conn_info = self.adapter.initialize_connection(volume, connector) self.assertEqual('iscsi', conn_info['driver_volume_type']) self.assertTrue(conn_info['data']['target_discovered']) self.assertEqual('id_43', conn_info['data']['volume_id']) @patch_for_iscsi_adapter def test_initialize_connection_snapshot(self): snap = MockOSResource(id='snap_1', name='snap_1') connector = {'host': 'host1'} conn_info = self.adapter.initialize_connection_snapshot( snap, connector) self.assertEqual('iscsi', conn_info['driver_volume_type']) self.assertTrue(conn_info['data']['target_discovered']) self.assertEqual('snap_1', conn_info['data']['volume_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/unity/test_client.py0000664000175000017500000010620700000000000027252 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from unittest import mock import ddt from oslo_utils import units from cinder import coordination from cinder.tests.unit.volume.drivers.dell_emc.unity \ import fake_enum as enums from cinder.tests.unit.volume.drivers.dell_emc.unity \ import fake_exception as ex from cinder.volume.drivers.dell_emc.unity import client ######################## # # Start of Mocks # ######################## class MockResource(object): def __init__(self, name=None, _id=None): self.name = name self._id = _id self.existed = True self.size_total = 5 * units.Gi self.size_subscribed = 6 * units.Gi self.size_free = 2 * units.Gi self.is_auto_delete = None self.initiator_id = [] self.alu_hlu_map = {'already_attached': 99} self.ip_address = None self.is_logged_in = None self.wwn = None self.max_iops = None self.max_kbps = None self.pool_name = 'Pool0' self._storage_resource = None self.host_cache = [] self.is_thin = None self.is_all_flash = True self.description = None self.luns = None self.lun = None self.tiering_policy = None self.pool_fast_vp = None self.snap = True @property def id(self): return self._id def get_id(self): return self._id def delete(self, force_snap_delete=None): if self.get_id() in ['snap_2']: raise ex.SnapDeleteIsCalled() elif self.get_id() == 'not_found': raise ex.UnityResourceNotFoundError() elif self.get_id() == 'snap_in_use': raise ex.UnityDeleteAttachedSnapError() elif self.name == 'empty-host': raise ex.HostDeleteIsCalled() elif self.get_id() == 'lun_in_replication': if not force_snap_delete: raise ex.UnityDeleteLunInReplicationError() elif self.get_id() == 'lun_rep_session_1': raise ex.UnityResourceNotFoundError() @property def pool(self): return MockResource('pool0') @property def iscsi_host_initiators(self): iscsi_initiator = MockResource('iscsi_initiator') iscsi_initiator.initiator_id = ['iqn.1-1.com.e:c.host.0', 'iqn.1-1.com.e:c.host.1'] return iscsi_initiator @property def total_size_gb(self): return self.size_total / units.Gi @total_size_gb.setter def total_size_gb(self, value): if value == self.total_size_gb: raise ex.UnityNothingToModifyError() else: self.size_total = value * units.Gi def add_initiator(self, uid, force_create=None): self.initiator_id.append(uid) def attach(self, lun_or_snap, skip_hlu_0=True): if lun_or_snap.get_id() == 'already_attached': raise ex.UnityResourceAlreadyAttachedError() self.alu_hlu_map[lun_or_snap.get_id()] = len(self.alu_hlu_map) return self.get_hlu(lun_or_snap) @staticmethod def detach(lun_or_snap): if lun_or_snap.name == 'detach_failure': raise ex.DetachIsCalled() @staticmethod def detach_from(host): if host is None: raise ex.DetachFromIsCalled() def get_hlu(self, lun): return self.alu_hlu_map.get(lun.get_id(), None) @staticmethod def create_lun(lun_name, size_gb, description=None, io_limit_policy=None, is_thin=None, is_compression=None, tiering_policy=None): if lun_name == 'in_use': raise ex.UnityLunNameInUseError() ret = MockResource(lun_name, 'lun_2') if io_limit_policy is not None: ret.max_iops = io_limit_policy.max_iops ret.max_kbps = io_limit_policy.max_kbps if is_thin is not None: ret.is_thin = is_thin if tiering_policy is not None: ret.tiering_policy = tiering_policy return ret @staticmethod def create_snap(name, is_auto_delete=False): if name == 'in_use': raise ex.UnitySnapNameInUseError() ret = MockResource(name) ret.is_auto_delete = is_auto_delete return ret @staticmethod def update(data=None): pass @property def iscsi_node(self): name = 'iqn.1-1.com.e:c.%s.0' % self.name return MockResource(name) @property def fc_host_initiators(self): init0 = MockResource('fhi_0') init0.initiator_id = '00:11:22:33:44:55:66:77:88:99:AA:BB:CC:CD:EE:FF' init1 = MockResource('fhi_1') init1.initiator_id = '00:11:22:33:44:55:66:77:88:99:AA:BB:BC:CD:EE:FF' return MockResourceList.create(init0, init1) @property def paths(self): path0 = MockResource('%s_path_0' % self.name) path0.is_logged_in = True path1 = MockResource('%s_path_1' % self.name) path1.is_logged_in = False path2 = MockResource('%s_path_2' % self.name) path2.is_logged_in = True return MockResourceList.create(path0, path1) @property def fc_port(self): ret = MockResource(_id='spa_iom_0_fc0') ret.wwn = '00:11:22:33:44:55:66:77:88:99:AA:BB:CC:DD:EE:FF' return ret @property def host_luns(self): if self.name == 'host-no-host_luns': return None return [] @property def storage_resource(self): if self._storage_resource is None: self._storage_resource = MockResource(_id='sr_%s' % self._id, name='sr_%s' % self.name) return self._storage_resource @storage_resource.setter def storage_resource(self, value): self._storage_resource = value def modify(self, name=None, is_compression=None, io_limit_policy=None): self.name = name def remove_from_storage(self, lun): pass def thin_clone(self, name, io_limit_policy=None, description=None): if name == 'thin_clone_name_in_use': raise ex.UnityLunNameInUseError return MockResource(_id=name, name=name) def get_snap(self, name): return MockResource(_id=name, name=name) def restore(self, delete_backup): return MockResource(_id='snap_1', name="internal_snap") def migrate(self, dest_pool, **kwargs): if dest_pool.id == 'fail_migration_pool': return False return True def replicate_cg_with_dst_resource_provisioning(self, max_time_out_of_sync, source_luns, dst_pool_id, remote_system=None, dst_cg_name=None): return {'max_time_out_of_sync': max_time_out_of_sync, 'dst_pool_id': dst_pool_id, 'remote_system': remote_system, 'dst_cg_name': dst_cg_name} def replicate_with_dst_resource_provisioning(self, max_time_out_of_sync, dst_pool_id, remote_system=None, dst_lun_name=None): return {'max_time_out_of_sync': max_time_out_of_sync, 'dst_pool_id': dst_pool_id, 'remote_system': remote_system, 'dst_lun_name': dst_lun_name} def failover(self, sync=None): return {'sync': sync} def failback(self, force_full_copy=None): return {'force_full_copy': force_full_copy} def check_cg_is_replicated(self): if self.name == 'replicated_cg': return True return False class MockResourceList(object): def __init__(self, names=None, ids=None): if names is not None: self.resources = [MockResource(name=name) for name in names] elif ids is not None: self.resources = [MockResource(_id=_id) for _id in ids] @staticmethod def create(*rsc_list): ret = MockResourceList([]) ret.resources = rsc_list return ret @property def name(self): return map(lambda i: i.name, self.resources) @property def list(self): return self.resources @list.setter def list(self, value): self.resources = [] def __iter__(self): return self.resources.__iter__() def __len__(self): return len(self.resources) def __getattr__(self, item): return [getattr(i, item) for i in self.resources] def shadow_copy(self, **kwargs): if list(filter(None, kwargs.values())): return MockResourceList.create(self.resources[0]) else: return self class MockSystem(object): def __init__(self): self.serial_number = 'SYSTEM_SERIAL' self.system_version = '4.1.0' @property def info(self): mocked_info = mock.Mock() mocked_info.name = self.serial_number return mocked_info @staticmethod def get_lun(_id=None, name=None): if _id == 'not_found': raise ex.UnityResourceNotFoundError() if _id == 'tc_80': # for thin clone with extending size lun = MockResource(name=_id, _id=_id) lun.total_size_gb = 7 return lun return MockResource(name, _id) @staticmethod def get_pool(_id=None, name=None): if name == 'Pool 3': return MockResource(name, 'pool_3') if name or _id: return MockResource(name, _id) return MockResourceList(['Pool 1', 'Pool 2']) @staticmethod def get_snap(name): if name == 'not_found': raise ex.UnityResourceNotFoundError() return MockResource(name) @staticmethod def get_cg(name): if not name: raise ex.UnityResourceNotFoundError() return MockResource(name, _id=name) @staticmethod def create_host(name): return MockResource(name) @staticmethod def get_host(name): if name == 'not_found': raise ex.UnityResourceNotFoundError() if name == 'host1': ret = MockResource(name) ret.initiator_id = ['old-iqn'] return ret return MockResource(name) @staticmethod def get_iscsi_portal(): portal0 = MockResource('p0') portal0.ip_address = '1.1.1.1' portal1 = MockResource('p1') portal1.ip_address = '1.1.1.2' return MockResourceList.create(portal0, portal1) @staticmethod def get_fc_port(): port0 = MockResource('fcp0') port0.wwn = '00:11:22:33:44:55:66:77:88:99:AA:BB:CC:DD:EE:FF' port1 = MockResource('fcp1') port1.wwn = '00:11:22:33:44:55:66:77:88:99:AA:BB:CC:DD:FF:EE' return MockResourceList.create(port0, port1) @staticmethod def create_io_limit_policy(name, max_iops=None, max_kbps=None): if name == 'in_use': raise ex.UnityPolicyNameInUseError() ret = MockResource(name) ret.max_iops = max_iops ret.max_kbps = max_kbps return ret @staticmethod def get_io_limit_policy(name): return MockResource(name=name) def get_remote_system(self, name=None): if name == 'not-exist': raise ex.UnityResourceNotFoundError() else: return {'name': name} def get_replication_session(self, name=None, src_resource_id=None, dst_resource_id=None): if name == 'not-exist': raise ex.UnityResourceNotFoundError() elif src_resource_id == 'lun_in_replication': return [MockResource(name='rep_session')] elif src_resource_id == 'lun_not_in_replication': raise ex.UnityResourceNotFoundError() elif src_resource_id == 'lun_in_multiple_replications': return [MockResource(_id='lun_rep_session_1'), MockResource(_id='lun_rep_session_2')] elif src_resource_id and ('is_in_replication' in src_resource_id): return [MockResource(name='rep_session')] elif dst_resource_id and ('is_in_replication' in dst_resource_id): return [MockResource(name='rep_session')] else: return {'name': name, 'src_resource_id': src_resource_id, 'dst_resource_id': dst_resource_id} @mock.patch.object(client, 'storops', new='True') def get_client(): ret = client.UnityClient('1.2.3.4', 'user', 'pass') ret._system = MockSystem() return ret ######################## # # Start of Tests # ######################## @ddt.ddt @mock.patch.object(client, 'storops_ex', new=ex) class ClientTest(unittest.TestCase): def setUp(self): self.client = get_client() def test_get_serial(self): self.assertEqual('SYSTEM_SERIAL', self.client.get_serial()) def test_create_lun_success(self): name = 'LUN 3' pool = MockResource('Pool 0') lun = self.client.create_lun(name, 5, pool) self.assertEqual(name, lun.name) def test_create_lun_name_in_use(self): name = 'in_use' pool = MockResource('Pool 0') lun = self.client.create_lun(name, 6, pool) self.assertEqual('in_use', lun.name) def test_create_lun_with_io_limit(self): pool = MockResource('Pool 0') limit = MockResource('limit') limit.max_kbps = 100 lun = self.client.create_lun('LUN 4', 6, pool, io_limit_policy=limit) self.assertEqual(100, lun.max_kbps) def test_create_lun_thick(self): name = 'thick_lun' pool = MockResource('Pool 0') lun = self.client.create_lun(name, 6, pool, is_thin=False) self.assertIsNotNone(lun.is_thin) self.assertFalse(lun.is_thin) self.assertIsNone(lun.tiering_policy) def test_create_auto_tier_lun(self): name = 'auto_tier_lun' tiering_policy = enums.TieringPolicyEnum.AUTOTIER pool = MockResource('Pool 0') lun = self.client.create_lun(name, 6, pool, tiering_policy=tiering_policy) self.assertIsNotNone(lun.tiering_policy) self.assertEqual(enums.TieringPolicyEnum.AUTOTIER, lun.tiering_policy) def test_create_high_tier_lun(self): name = 'high_tier_lun' tiering_policy = enums.TieringPolicyEnum.HIGHEST pool = MockResource('Pool 0') lun = self.client.create_lun(name, 6, pool, tiering_policy=tiering_policy) self.assertIsNotNone(lun.tiering_policy) self.assertEqual(enums.TieringPolicyEnum.HIGHEST, lun.tiering_policy) def test_thin_clone_success(self): name = 'tc_77' src_lun = MockResource(_id='id_77') lun = self.client.thin_clone(src_lun, name) self.assertEqual(name, lun.name) def test_thin_clone_name_in_used(self): name = 'thin_clone_name_in_use' src_lun = MockResource(_id='id_79') lun = self.client.thin_clone(src_lun, name) self.assertEqual(name, lun.name) def test_thin_clone_extend_size(self): name = 'tc_80' src_lun = MockResource(_id='id_80') lun = self.client.thin_clone(src_lun, name, io_limit_policy=None, new_size_gb=7) self.assertEqual(name, lun.name) self.assertEqual(7, lun.total_size_gb) def test_delete_lun_normal(self): self.assertIsNone(self.client.delete_lun('lun3')) def test_delete_lun_not_found(self): try: self.client.delete_lun('not_found') except ex.StoropsException: self.fail('not found error should be dealt with silently.') def test_delete_lun_in_replication(self): self.client.delete_lun('lun_in_replication') @ddt.data({'lun_id': 'lun_not_in_replication'}, {'lun_id': 'lun_in_multiple_replications'}) @ddt.unpack def test_delete_lun_replications(self, lun_id): self.client.delete_lun_replications(lun_id) def test_get_lun_with_id(self): lun = self.client.get_lun('lun4') self.assertEqual('lun4', lun.get_id()) def test_get_lun_with_name(self): lun = self.client.get_lun(name='LUN 4') self.assertEqual('LUN 4', lun.name) def test_get_lun_not_found(self): ret = self.client.get_lun(lun_id='not_found') self.assertIsNone(ret) def test_get_pools(self): pools = self.client.get_pools() self.assertEqual(2, len(pools)) def test_create_snap_normal(self): snap = self.client.create_snap('lun_1', 'snap_1') self.assertEqual('snap_1', snap.name) def test_create_snap_in_use(self): snap = self.client.create_snap('lun_1', 'in_use') self.assertEqual('in_use', snap.name) def test_delete_snap_error(self): def f(): snap = MockResource(_id='snap_2') self.client.delete_snap(snap) self.assertRaises(ex.SnapDeleteIsCalled, f) def test_delete_snap_not_found(self): try: snap = MockResource(_id='not_found') self.client.delete_snap(snap) except ex.StoropsException: self.fail('snap not found should not raise exception.') def test_delete_snap_none(self): try: ret = self.client.delete_snap(None) self.assertIsNone(ret) except ex.StoropsException: self.fail('delete none should not raise exception.') def test_delete_snap_in_use(self): def f(): snap = MockResource(_id='snap_in_use') self.client.delete_snap(snap) self.assertRaises(ex.UnityDeleteAttachedSnapError, f) def test_get_snap_found(self): snap = self.client.get_snap('snap_2') self.assertEqual('snap_2', snap.name) def test_get_snap_not_found(self): ret = self.client.get_snap('not_found') self.assertIsNone(ret) @mock.patch.object(coordination.Coordinator, 'get_lock') def test_create_host_found(self, fake_coordination): host = self.client.create_host('host1') self.assertEqual('host1', host.name) self.assertLessEqual(['iqn.1-1.com.e:c.a.a0'], host.initiator_id) @mock.patch.object(coordination.Coordinator, 'get_lock') def test_create_host_not_found(self, fake): host = self.client.create_host('not_found') self.assertEqual('not_found', host.name) self.assertIn('not_found', self.client.host_cache) def test_attach_lun(self): lun = MockResource(_id='lun1', name='l1') host = MockResource('host1') self.assertEqual(1, self.client.attach(host, lun)) def test_attach_already_attached(self): lun = MockResource(_id='already_attached') host = MockResource('host1') hlu = self.client.attach(host, lun) self.assertEqual(99, hlu) def test_detach_lun(self): def f(): lun = MockResource('detach_failure') host = MockResource('host1') self.client.detach(host, lun) self.assertRaises(ex.DetachIsCalled, f) def test_detach_all(self): def f(): lun = MockResource('lun_44') self.client.detach_all(lun) self.assertRaises(ex.DetachFromIsCalled, f) @mock.patch.object(coordination.Coordinator, 'get_lock') def test_create_host(self, fake): self.assertEqual('host2', self.client.create_host('host2').name) @mock.patch.object(coordination.Coordinator, 'get_lock') def test_create_host_in_cache(self, fake): self.client.host_cache['already_in'] = MockResource(name='already_in') host = self.client.create_host('already_in') self.assertIn('already_in', self.client.host_cache) self.assertEqual('already_in', host.name) def test_update_host_initiators(self): host = MockResource(name='host_init') host = self.client.update_host_initiators(host, 'fake-iqn-1') def test_get_iscsi_target_info(self): ret = self.client.get_iscsi_target_info() expected = [{'iqn': 'iqn.1-1.com.e:c.p0.0', 'portal': '1.1.1.1:3260'}, {'iqn': 'iqn.1-1.com.e:c.p1.0', 'portal': '1.1.1.2:3260'}] self.assertListEqual(expected, ret) def test_get_iscsi_target_info_allowed_ports(self): ret = self.client.get_iscsi_target_info(allowed_ports=['spa_eth0']) expected = [{'iqn': 'iqn.1-1.com.e:c.p0.0', 'portal': '1.1.1.1:3260'}] self.assertListEqual(expected, ret) def test_get_fc_target_info_without_host(self): ret = self.client.get_fc_target_info() self.assertListEqual(['8899AABBCCDDEEFF', '8899AABBCCDDFFEE'], sorted(ret)) def test_get_fc_target_info_without_host_but_allowed_ports(self): ret = self.client.get_fc_target_info(allowed_ports=['spa_fc0']) self.assertListEqual(['8899AABBCCDDEEFF'], ret) def test_get_fc_target_info_with_host(self): host = MockResource('host0') ret = self.client.get_fc_target_info(host, True) self.assertListEqual(['8899AABBCCDDEEFF'], ret) def test_get_fc_target_info_with_host_and_allowed_ports(self): host = MockResource('host0') ret = self.client.get_fc_target_info(host, True, allowed_ports=['spb_iom_0_fc0']) self.assertListEqual([], ret) def test_get_io_limit_policy_none(self): ret = self.client.get_io_limit_policy(None) self.assertIsNone(ret) def test_get_io_limit_policy_create_new(self): specs = {'maxBWS': 2, 'id': 'max_2_mbps', 'maxIOPS': None} limit = self.client.get_io_limit_policy(specs) self.assertEqual('max_2_mbps', limit.name) self.assertEqual(2, limit.max_kbps) def test_create_io_limit_policy_success(self): limit = self.client.create_io_limit_policy('3kiops', max_iops=3000) self.assertEqual('3kiops', limit.name) self.assertEqual(3000, limit.max_iops) def test_create_io_limit_policy_in_use(self): limit = self.client.create_io_limit_policy('in_use', max_iops=100) self.assertEqual('in_use', limit.name) def test_expand_lun_success(self): lun = self.client.extend_lun('ev_3', 6) self.assertEqual(6, lun.total_size_gb) def test_expand_lun_nothing_to_modify(self): lun = self.client.extend_lun('ev_4', 5) self.assertEqual(5, lun.total_size_gb) def test_migrate_lun_success(self): ret = self.client.migrate_lun('lun_0', 'pool_1') self.assertTrue(ret) def test_migrate_lun_failed(self): ret = self.client.migrate_lun('lun_0', 'fail_migration_pool') self.assertFalse(ret) def test_migrate_lun_thick(self): ret = self.client.migrate_lun('lun_thick', 'pool_2', 'thick') self.assertTrue(ret) def test_migrate_lun_compressed(self): ret = self.client.migrate_lun('lun_compressed', 'pool_2', 'compressed') self.assertTrue(ret) def test_get_pool_id_by_name(self): self.assertEqual('pool_3', self.client.get_pool_id_by_name('Pool 3')) def test_get_pool_name(self): self.assertEqual('Pool0', self.client.get_pool_name('lun_0')) def test_restore_snapshot(self): back_snap = self.client.restore_snapshot('snap1') self.assertEqual("internal_snap", back_snap.name) def test_delete_host_wo_lock(self): host = MockResource(name='empty-host') self.client.host_cache['empty-host'] = host self.assertRaises(ex.HostDeleteIsCalled, self.client.delete_host_wo_lock, host) def test_delete_host_wo_lock_remove_from_cache(self): host = MockResource(name='empty-host-in-cache') self.client.host_cache['empty-host-in-cache'] = host self.client.delete_host_wo_lock(host) self.assertNotIn(host.name, self.client.host_cache) @ddt.data(('cg_1', 'cg_1_description', [MockResource(_id='sv_1')]), ('cg_2', None, None), ('cg_3', None, [MockResource(_id='sv_2')]), ('cg_4', 'cg_4_description', None)) @ddt.unpack def test_create_cg(self, cg_name, cg_description, lun_add): created_cg = MockResource(_id='cg_1') with mock.patch.object(self.client.system, 'create_cg', create=True, return_value=created_cg ) as mocked_create: ret = self.client.create_cg(cg_name, description=cg_description, lun_add=lun_add) mocked_create.assert_called_once_with(cg_name, description=cg_description, lun_add=lun_add) self.assertEqual(created_cg, ret) def test_create_cg_existing_name(self): existing_cg = MockResource(_id='cg_1') with mock.patch.object( self.client.system, 'create_cg', side_effect=ex.UnityConsistencyGroupNameInUseError, create=True) as mocked_create, \ mock.patch.object(self.client.system, 'get_cg', create=True, return_value=existing_cg) as mocked_get: ret = self.client.create_cg('existing_name') mocked_create.assert_called_once_with('existing_name', description=None, lun_add=None) mocked_get.assert_called_once_with(name='existing_name') self.assertEqual(existing_cg, ret) def test_get_cg(self): existing_cg = MockResource(_id='cg_1') with mock.patch.object(self.client.system, 'get_cg', create=True, return_value=existing_cg) as mocked_get: ret = self.client.get_cg('existing_name') mocked_get.assert_called_once_with(name='existing_name') self.assertEqual(existing_cg, ret) def test_get_cg_not_found(self): with mock.patch.object(self.client.system, 'get_cg', create=True, side_effect=ex.UnityResourceNotFoundError ) as mocked_get: ret = self.client.get_cg('not_found_name') mocked_get.assert_called_once_with(name='not_found_name') self.assertIsNone(ret) def test_delete_cg(self): existing_cg = MockResource(_id='cg_1') with mock.patch.object(existing_cg, 'delete', create=True ) as mocked_delete, \ mock.patch.object(self.client, 'get_cg', create=True, return_value=existing_cg) as mocked_get: ret = self.client.delete_cg('cg_1_name') mocked_get.assert_called_once_with('cg_1_name') mocked_delete.assert_called_once() self.assertIsNone(ret) def test_update_cg(self): existing_cg = MockResource(_id='cg_1') lun_1 = MockResource(_id='sv_1') lun_2 = MockResource(_id='sv_2') lun_3 = MockResource(_id='sv_3') def _mocked_get_lun(lun_id): if lun_id == 'sv_1': return lun_1 if lun_id == 'sv_2': return lun_2 if lun_id == 'sv_3': return lun_3 with mock.patch.object(existing_cg, 'update_lun', create=True ) as mocked_update, \ mock.patch.object(self.client, 'get_cg', create=True, return_value=existing_cg) as mocked_get, \ mock.patch.object(self.client, 'get_lun', side_effect=_mocked_get_lun): ret = self.client.update_cg('cg_1_name', ['sv_1', 'sv_2'], ['sv_3']) mocked_get.assert_called_once_with('cg_1_name') mocked_update.assert_called_once_with(add_luns=[lun_1, lun_2], remove_luns=[lun_3]) self.assertIsNone(ret) def test_update_cg_empty_lun_ids(self): existing_cg = MockResource(_id='cg_1') with mock.patch.object(existing_cg, 'update_lun', create=True ) as mocked_update, \ mock.patch.object(self.client, 'get_cg', create=True, return_value=existing_cg) as mocked_get: ret = self.client.update_cg('cg_1_name', set(), set()) mocked_get.assert_called_once_with('cg_1_name') mocked_update.assert_called_once_with(add_luns=[], remove_luns=[]) self.assertIsNone(ret) def test_create_cg_group(self): existing_cg = MockResource(_id='cg_1') created_snap = MockResource(_id='snap_cg_1', name='snap_name_cg_1') with mock.patch.object(existing_cg, 'create_snap', create=True, return_value=created_snap) as mocked_create, \ mock.patch.object(self.client, 'get_cg', create=True, return_value=existing_cg) as mocked_get: ret = self.client.create_cg_snap('cg_1_name', snap_name='snap_name_cg_1') mocked_get.assert_called_once_with('cg_1_name') mocked_create.assert_called_once_with(name='snap_name_cg_1', is_auto_delete=False) self.assertEqual(created_snap, ret) def test_create_cg_group_none_name(self): existing_cg = MockResource(_id='cg_1') created_snap = MockResource(_id='snap_cg_1') with mock.patch.object(existing_cg, 'create_snap', create=True, return_value=created_snap) as mocked_create, \ mock.patch.object(self.client, 'get_cg', create=True, return_value=existing_cg) as mocked_get: ret = self.client.create_cg_snap('cg_1_name') mocked_get.assert_called_once_with('cg_1_name') mocked_create.assert_called_once_with(name=None, is_auto_delete=False) self.assertEqual(created_snap, ret) def test_filter_snaps_in_cg_snap(self): snaps = [MockResource(_id='snap_{}'.format(n)) for n in (1, 2)] snap_list = mock.MagicMock() snap_list.list = snaps with mock.patch.object(self.client.system, 'get_snap', create=True, return_value=snap_list) as mocked_get: ret = self.client.filter_snaps_in_cg_snap('snap_cg_1') mocked_get.assert_called_once_with(snap_group='snap_cg_1') self.assertEqual(snaps, ret) def test_create_replication(self): remote_system = MockResource(_id='RS_1') lun = MockResource(_id='sv_1') called = self.client.create_replication(lun, 60, 'pool_1', remote_system) self.assertEqual(called['max_time_out_of_sync'], 60) self.assertEqual(called['dst_pool_id'], 'pool_1') self.assertIs(called['remote_system'], remote_system) def test_get_remote_system(self): called = self.client.get_remote_system(name='remote-unity') self.assertEqual(called['name'], 'remote-unity') def test_get_remote_system_not_exist(self): called = self.client.get_remote_system(name='not-exist') self.assertIsNone(called) def test_get_replication_session(self): called = self.client.get_replication_session(name='rep-name') self.assertEqual(called['name'], 'rep-name') def test_get_replication_session_not_exist(self): self.assertRaises(client.ClientReplicationError, self.client.get_replication_session, name='not-exist') def test_failover_replication(self): rep_session = MockResource(_id='rep_id_1') called = self.client.failover_replication(rep_session) self.assertFalse(called['sync']) def test_failover_replication_raise(self): rep_session = MockResource(_id='rep_id_1') def mock_failover(sync=None): raise ex.UnityResourceNotFoundError() rep_session.failover = mock_failover self.assertRaises(client.ClientReplicationError, self.client.failover_replication, rep_session) def test_failback_replication(self): rep_session = MockResource(_id='rep_id_1') called = self.client.failback_replication(rep_session) self.assertTrue(called['force_full_copy']) def test_failback_replication_raise(self): rep_session = MockResource(_id='rep_id_1') def mock_failback(force_full_copy=None): raise ex.UnityResourceNotFoundError() rep_session.failback = mock_failback self.assertRaises(client.ClientReplicationError, self.client.failback_replication, rep_session) def test_create_cg_replication(self): remote_system = MockResource(_id='RS_2') cg_name = 'test_cg' called = self.client.create_cg_replication( cg_name, 'pool_1', remote_system, 60) self.assertEqual(60, called['max_time_out_of_sync']) self.assertEqual('pool_1', called['dst_pool_id']) self.assertEqual('test_cg', called['dst_cg_name']) self.assertIs(remote_system, called['remote_system']) def test_cg_in_replciation(self): existing_cg = MockResource(_id='replicated_cg') result = self.client.is_cg_replicated(existing_cg.id) self.assertTrue(result) def test_cg_not_in_replciation(self): existing_cg = MockResource(_id='test_cg') result = self.client.is_cg_replicated(existing_cg.id) self.assertFalse(result) def test_delete_cg_rep_session(self): src_cg = MockResource(_id='cg_is_in_replication') result = self.client.delete_cg_rep_session(src_cg.id) self.assertIsNone(result) def test_failover_cg_rep_session(self): src_cg = MockResource(_id='failover_cg_is_in_replication') result = self.client.failover_cg_rep_session(src_cg.id, True) self.assertIsNone(result) def test_failback_cg_rep_session(self): src_cg = MockResource(_id='failback_cg_is_in_replication') result = self.client.failback_cg_rep_session(src_cg.id) self.assertIsNone(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/unity/test_driver.py0000664000175000017500000004523300000000000027270 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import unittest from unittest import mock from cinder.objects import fields from cinder.tests.unit.volume.drivers.dell_emc.unity \ import fake_exception as ex from cinder.tests.unit.volume.drivers.dell_emc.unity import test_adapter from cinder.volume import configuration as conf from cinder.volume.drivers.dell_emc.unity import driver ######################## # # Start of Mocks # ######################## class MockAdapter(object): def __init__(self): self.is_setup = False def do_setup(self, driver_object, configuration): self.is_setup = True raise ex.AdapterSetupError() @staticmethod def create_volume(volume): return volume @staticmethod def create_volume_from_snapshot(volume, snapshot): return volume @staticmethod def create_cloned_volume(volume, src_vref): return volume @staticmethod def extend_volume(volume, new_size): volume.size = new_size @staticmethod def delete_volume(volume): volume.exists = False @staticmethod def create_snapshot(snapshot): snapshot.exists = True return snapshot @staticmethod def delete_snapshot(snapshot): snapshot.exists = False @staticmethod def initialize_connection(volume, connector): return {'volume': volume, 'connector': connector} @staticmethod def terminate_connection(volume, connector): return {'volume': volume, 'connector': connector} @staticmethod def update_volume_stats(): return {'stats': 123} @staticmethod def manage_existing(volume, existing_ref): volume.managed = True return volume @staticmethod def manage_existing_get_size(volume, existing_ref): volume.managed = True volume.size = 7 return volume @staticmethod def get_pool_name(volume): return 'pool_0' @staticmethod def initialize_connection_snapshot(snapshot, connector): return {'snapshot': snapshot, 'connector': connector} @staticmethod def terminate_connection_snapshot(snapshot, connector): return {'snapshot': snapshot, 'connector': connector} @staticmethod def restore_snapshot(volume, snapshot): return True @staticmethod def migrate_volume(volume, host): return True, {} @staticmethod def create_group(group): return group @staticmethod def delete_group(group): return group @staticmethod def update_group(group, add_volumes, remove_volumes): return group, add_volumes, remove_volumes @staticmethod def create_group_from_snap(group, volumes, group_snapshot, snapshots): return group, volumes, group_snapshot, snapshots @staticmethod def create_cloned_group(group, volumes, source_group, source_vols): return group, volumes, source_group, source_vols @staticmethod def create_group_snapshot(group_snapshot, snapshots): return group_snapshot, snapshots @staticmethod def delete_group_snapshot(group_snapshot): return group_snapshot def failover(self, volumes, secondary_id=None, groups=None): return {'volumes': volumes, 'secondary_id': secondary_id, 'groups': groups} @staticmethod def enable_replication(context, group, volumes): if volumes and group: return {'replication_status': fields.ReplicationStatus.ENABLED}, None return {}, None @staticmethod def disable_replication(context, group, volumes): if volumes and group: return {'replication_status': fields.ReplicationStatus.DISABLED}, None return {}, None @staticmethod def failover_replication(context, group, volumes, secondary_backend_id): group_update = {} volumes_update = [] if volumes and group and secondary_backend_id: group_update = {'replication_status': fields.ReplicationStatus.FAILED_OVER} for volume in volumes: volume_update = { 'id': volume.id, 'replication_status': fields.ReplicationStatus.FAILED_OVER} volumes_update.append(volume_update) return group_update, volumes_update return group_update, None def retype(self, ctxt, volume, new_type, diff, host): return True class MockReplicationManager(object): def __init__(self): self.active_adapter = MockAdapter() def do_setup(self, d): if isinstance(d, driver.UnityDriver): raise ex.ReplicationManagerSetupError() ######################## # # Start of Tests # ######################## patch_check_cg = mock.patch( 'cinder.volume.volume_utils.is_group_a_cg_snapshot_type', side_effect=lambda g: not g.id.endswith('_generic')) class UnityDriverTest(unittest.TestCase): @staticmethod def get_volume(): return test_adapter.MockOSResource(provider_location='id^lun_43', id='id_43') @staticmethod def get_volumes(): volumes = [] for number in ['50', '51', '52', '53']: volume = test_adapter.MockOSResource( provider_location='id^lun_' + number, id='id_' + number) volumes.append(volume) return volumes @staticmethod def get_generic_group(): return test_adapter.MockOSResource(name='group_name_generic', id='group_id_generic') @staticmethod def get_cg(): return test_adapter.MockOSResource(name='group_name_cg', id='group_id_cg') @classmethod def get_snapshot(cls): return test_adapter.MockOSResource(volume=cls.get_volume()) @classmethod def get_generic_group_snapshot(cls): return test_adapter.MockOSResource(group=cls.get_generic_group(), id='group_snapshot_id_generic') @classmethod def get_cg_group_snapshot(cls): return test_adapter.MockOSResource(group=cls.get_cg(), id='group_snapshot_id_cg') @staticmethod def get_context(): return None @staticmethod def get_connector(): return {'host': 'host1'} def setUp(self): self.config = conf.Configuration(None) self.driver = driver.UnityDriver(configuration=self.config) self.driver.replication_manager = MockReplicationManager() def test_default_initialize(self): config = conf.Configuration(None) iscsi_driver = driver.UnityDriver(configuration=config) self.assertListEqual([], config.unity_storage_pool_names) self.assertListEqual([], config.unity_io_ports) self.assertTrue(config.san_thin_provision) self.assertEqual('', config.san_ip) self.assertEqual('admin', config.san_login) self.assertEqual('', config.san_password) self.assertEqual('', config.san_private_key) self.assertEqual('', config.san_clustername) self.assertEqual(22, config.san_ssh_port) self.assertEqual(False, config.san_is_local) self.assertEqual(30, config.ssh_conn_timeout) self.assertEqual(1, config.ssh_min_pool_conn) self.assertEqual(5, config.ssh_max_pool_conn) self.assertEqual('iSCSI', iscsi_driver.protocol) self.assertIsNone(iscsi_driver.active_backend_id) def test_initialize_with_active_backend_id(self): config = conf.Configuration(None) iscsi_driver = driver.UnityDriver(configuration=config, active_backend_id='secondary_unity') self.assertEqual('secondary_unity', iscsi_driver.active_backend_id) def test_fc_initialize(self): config = conf.Configuration(None) config.storage_protocol = 'fc' fc_driver = driver.UnityDriver(configuration=config) self.assertEqual('FC', fc_driver.protocol) def test_do_setup(self): def f(): self.driver.do_setup(None) self.assertRaises(ex.ReplicationManagerSetupError, f) def test_create_volume(self): volume = self.get_volume() self.assertEqual(volume, self.driver.create_volume(volume)) def test_create_volume_from_snapshot(self): volume = self.get_volume() snap = self.get_snapshot() self.assertEqual( volume, self.driver.create_volume_from_snapshot(volume, snap)) def test_create_cloned_volume(self): volume = self.get_volume() self.assertEqual( volume, self.driver.create_cloned_volume(volume, None)) def test_extend_volume(self): volume = self.get_volume() self.driver.extend_volume(volume, 6) self.assertEqual(6, volume.size) def test_delete_volume(self): volume = self.get_volume() self.driver.delete_volume(volume) self.assertFalse(volume.exists) def test_migrate_volume(self): volume = self.get_volume() ret = self.driver.migrate_volume(self.get_context(), volume, 'HostA@BackendB#PoolC') self.assertEqual((True, {}), ret) def test_retype_volume(self): volume = self.get_volume() new_type = {'name': u'type01', 'qos_specs_id': 'test_qos_id', 'extra_specs': {}, 'id': u'd67c4480-a61b-44c0-a58b-24c0357cadeb'} diff = None ret = self.driver.retype(self.get_context(), volume, new_type, diff, 'HostA@BackendB#PoolC') self.assertTrue(ret) def test_create_snapshot(self): snapshot = self.get_snapshot() self.driver.create_snapshot(snapshot) self.assertTrue(snapshot.exists) def test_delete_snapshot(self): snapshot = self.get_snapshot() self.driver.delete_snapshot(snapshot) self.assertFalse(snapshot.exists) def test_ensure_export(self): self.assertIsNone(self.driver.ensure_export( self.get_context(), self.get_volume())) def test_create_export(self): self.assertIsNone(self.driver.create_export( self.get_context(), self.get_volume(), self.get_connector())) def test_remove_export(self): self.assertIsNone(self.driver.remove_export( self.get_context(), self.get_volume())) def test_check_for_export(self): self.assertIsNone(self.driver.check_for_export( self.get_context(), self.get_volume())) def test_initialize_connection(self): volume = self.get_volume() connector = self.get_connector() conn_info = self.driver.initialize_connection(volume, connector) self.assertEqual(volume, conn_info['volume']) self.assertEqual(connector, conn_info['connector']) def test_terminate_connection(self): volume = self.get_volume() connector = self.get_connector() conn_info = self.driver.terminate_connection(volume, connector) self.assertEqual(volume, conn_info['volume']) self.assertEqual(connector, conn_info['connector']) def test_update_volume_stats(self): stats = self.driver.get_volume_stats(True) self.assertEqual(123, stats['stats']) self.assertEqual(self.driver.VERSION, stats['driver_version']) self.assertEqual(self.driver.VENDOR, stats['vendor_name']) def test_manage_existing(self): volume = self.driver.manage_existing(self.get_volume(), None) self.assertTrue(volume.managed) def test_manage_existing_get_size(self): volume = self.driver.manage_existing_get_size(self.get_volume(), None) self.assertTrue(volume.managed) self.assertEqual(7, volume.size) def test_get_pool(self): self.assertEqual('pool_0', self.driver.get_pool(self.get_volume())) def test_unmanage(self): ret = self.driver.unmanage(None) self.assertIsNone(ret) def test_backup_use_temp_snapshot(self): self.assertTrue(self.driver.backup_use_temp_snapshot()) def test_initialize_connection_snapshot(self): snapshot = self.get_snapshot() conn_info = self.driver.initialize_connection_snapshot( snapshot, self.get_connector()) self.assertEqual(snapshot, conn_info['snapshot']) def test_terminate_connection_snapshot(self): snapshot = self.get_snapshot() conn_info = self.driver.terminate_connection_snapshot( snapshot, self.get_connector()) self.assertEqual(snapshot, conn_info['snapshot']) def test_restore_snapshot(self): snapshot = self.get_snapshot() volume = self.get_volume() r = self.driver.revert_to_snapshot(None, volume, snapshot) self.assertTrue(r) @patch_check_cg def test_operate_generic_group_not_implemented(self, _): group = self.get_generic_group() context = self.get_context() for func in (self.driver.create_group, self.driver.update_group): self.assertRaises(NotImplementedError, functools.partial(func, context, group)) volumes = [self.get_volume()] for func in (self.driver.delete_group, self.driver.create_group_from_src): self.assertRaises(NotImplementedError, functools.partial(func, context, group, volumes)) group_snap = self.get_generic_group_snapshot() volume_snaps = [self.get_snapshot()] for func in (self.driver.create_group_snapshot, self.driver.delete_group_snapshot): self.assertRaises(NotImplementedError, functools.partial(func, context, group_snap, volume_snaps)) @patch_check_cg def test_create_group_cg(self, _): cg = self.get_cg() ret = self.driver.create_group(self.get_context(), cg) self.assertEqual(ret, cg) @patch_check_cg def test_delete_group_cg(self, _): cg = self.get_cg() volumes = [self.get_volume()] ret = self.driver.delete_group(self.get_context(), cg, volumes) self.assertEqual(ret, cg) @patch_check_cg def test_update_group_cg(self, _): cg = self.get_cg() volumes = [self.get_volume()] ret = self.driver.update_group(self.get_context(), cg, add_volumes=volumes) self.assertEqual(ret[0], cg) self.assertListEqual(ret[1], volumes) self.assertIsNone(ret[2]) @patch_check_cg def test_create_group_from_src_group(self, _): cg = self.get_cg() volumes = [self.get_volume()] source_group = cg ret = self.driver.create_group_from_src(self.get_context(), cg, volumes, source_group=source_group) self.assertEqual(ret[0], cg) self.assertListEqual(ret[1], volumes) self.assertEqual(ret[2], source_group) self.assertIsNone(ret[3]) @patch_check_cg def test_create_group_from_src_group_snapshot(self, _): cg = self.get_cg() volumes = [self.get_volume()] cg_snap = self.get_cg_group_snapshot() ret = self.driver.create_group_from_src(self.get_context(), cg, volumes, group_snapshot=cg_snap) self.assertEqual(ret[0], cg) self.assertListEqual(ret[1], volumes) self.assertEqual(ret[2], cg_snap) self.assertIsNone(ret[3]) @patch_check_cg def test_create_group_snapshot_cg(self, _): cg_snap = self.get_cg_group_snapshot() ret = self.driver.create_group_snapshot(self.get_context(), cg_snap, None) self.assertEqual(ret[0], cg_snap) self.assertIsNone(ret[1]) @patch_check_cg def test_delete_group_snapshot_cg(self, _): cg_snap = self.get_cg_group_snapshot() ret = self.driver.delete_group_snapshot(self.get_context(), cg_snap, None) self.assertEqual(ret, cg_snap) def test_failover_host(self): volume = self.get_volume() called = self.driver.failover_host(None, [volume], secondary_id='secondary_unity', groups=None) self.assertListEqual(called['volumes'], [volume]) self.assertEqual('secondary_unity', called['secondary_id']) self.assertIsNone(called['groups']) def test_enable_replication(self): cg = self.get_cg() volumes = self.get_volumes() result = self.driver.enable_replication(None, cg, volumes) self.assertEqual(result, ({'replication_status': fields.ReplicationStatus.ENABLED}, None)) def test_disable_replication(self): cg = self.get_cg() volumes = self.get_volumes() result = self.driver.disable_replication(None, cg, volumes) self.assertEqual(result, ({'replication_status': fields.ReplicationStatus.DISABLED}, None)) def test_failover_replication(self): cg = self.get_cg() volumes = self.get_volumes() result = self.driver.failover_replication( None, cg, volumes, 'test_secondary_id') volumes = [{'id': 'id_50', 'replication_status': 'failed-over'}, {'id': 'id_51', 'replication_status': 'failed-over'}, {'id': 'id_52', 'replication_status': 'failed-over'}, {'id': 'id_53', 'replication_status': 'failed-over'}] self.assertEqual(result, ({'replication_status': fields.ReplicationStatus.FAILED_OVER}, volumes)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/unity/test_replication.py0000664000175000017500000003262000000000000030302 0ustar00zuulzuul00000000000000# Copyright (c) 2016 - 2019 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from unittest import mock import ddt from cinder import exception from cinder.volume import configuration as conf from cinder.volume.drivers.dell_emc.unity import adapter as unity_adapter from cinder.volume.drivers.dell_emc.unity import driver from cinder.volume.drivers.dell_emc.unity import replication from cinder.volume.drivers.san.san import san_opts @ddt.ddt class UnityReplicationTest(unittest.TestCase): @ddt.data({'version': '1.0.0', 'protocol': 'FC', 'expected': unity_adapter.FCAdapter}, {'version': '2.0.0', 'protocol': 'iSCSI', 'expected': unity_adapter.ISCSIAdapter}) @ddt.unpack def test_init_adapter(self, version, protocol, expected): a = replication.init_adapter(version, protocol) self.assertIsInstance(a, expected) self.assertEqual(version, a.version) @ddt.ddt class UnityReplicationDeviceTest(unittest.TestCase): def setUp(self): self.config = conf.Configuration(san_opts, config_group='unity-backend') self.config.san_ip = '1.1.1.1' self.config.san_login = 'user1' self.config.san_password = 'password1' self.driver = driver.UnityDriver(configuration=self.config) conf_dict = {'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2'} self.mock_adapter = mock.MagicMock(is_setup=False) def mock_do_setup(*args): self.mock_adapter.is_setup = True self.mock_adapter.do_setup = mock.MagicMock(side_effect=mock_do_setup) with mock.patch('cinder.volume.drivers.dell_emc.unity.' 'replication.init_adapter', return_value=self.mock_adapter): self.replication_device = replication.ReplicationDevice( conf_dict, self.driver) @ddt.data( { 'conf_dict': { 'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2' }, 'expected': [ 'secondary_unity', '2.2.2.2', 'user1', 'password1', 60 ] }, { 'conf_dict': { 'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2', 'san_login': 'user2', 'san_password': 'password2', 'max_time_out_of_sync': 180 }, 'expected': [ 'secondary_unity', '2.2.2.2', 'user2', 'password2', 180 ] }, ) @ddt.unpack def test_init(self, conf_dict, expected): self.driver.configuration.replication_device = conf_dict device = replication.ReplicationDevice(conf_dict, self.driver) self.assertListEqual( [device.backend_id, device.san_ip, device.san_login, device.san_password, device.max_time_out_of_sync], expected) self.assertIs(self.driver, device.driver) @ddt.data( { 'conf_dict': {'san_ip': '2.2.2.2'}, }, { 'conf_dict': {'backend_id': ' ', 'san_ip': '2.2.2.2'}, }, { 'conf_dict': {'backend_id': 'secondary_unity'}, }, { 'conf_dict': {'backend_id': 'secondary_unity', 'san_ip': ' '}, }, { 'conf_dict': { 'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2', 'san_login': 'user2', 'san_password': 'password2', 'max_time_out_of_sync': 'NOT_A_NUMBER' }, }, ) @ddt.unpack def test_init_raise(self, conf_dict): self.driver.configuration.replication_device = conf_dict self.assertRaisesRegex(exception.InvalidConfigurationValue, 'Value .* is not valid for configuration ' 'option "unity-backend.replication_device"', replication.ReplicationDevice, conf_dict, self.driver) @ddt.data( { 'conf_dict': { 'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2' }, 'expected': [ '2.2.2.2', 'user1', 'password1' ] }, { 'conf_dict': { 'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2', 'san_login': 'user2', 'san_password': 'password2', 'max_time_out_of_sync': 180 }, 'expected': [ '2.2.2.2', 'user2', 'password2' ] }, ) @ddt.unpack def test_device_conf(self, conf_dict, expected): self.driver.configuration.replication_device = conf_dict device = replication.ReplicationDevice(conf_dict, self.driver) c = device.device_conf self.assertListEqual([c.san_ip, c.san_login, c.san_password], expected) def test_setup_adapter(self): self.replication_device.setup_adapter() # Not call adapter.do_setup after initial setup done. self.replication_device.setup_adapter() self.mock_adapter.do_setup.assert_called_once() def test_setup_adapter_fail(self): def f(*args): raise exception.VolumeBackendAPIException('adapter setup failed') self.mock_adapter.do_setup = mock.MagicMock(side_effect=f) with self.assertRaises(exception.VolumeBackendAPIException): self.replication_device.setup_adapter() def test_adapter(self): self.assertIs(self.mock_adapter, self.replication_device.adapter) self.mock_adapter.do_setup.assert_called_once() def test_destination_pool(self): self.mock_adapter.storage_pools_map = {'pool-1': 'pool-1'} self.assertEqual('pool-1', self.replication_device.destination_pool) @ddt.ddt class UnityReplicationManagerTest(unittest.TestCase): def setUp(self): self.config = conf.Configuration(san_opts, config_group='unity-backend') self.config.san_ip = '1.1.1.1' self.config.san_login = 'user1' self.config.san_password = 'password1' self.config.replication_device = [ {'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2'} ] self.driver = driver.UnityDriver(configuration=self.config) self.replication_manager = replication.ReplicationManager() @mock.patch('cinder.volume.drivers.dell_emc.unity.' 'replication.ReplicationDevice.setup_adapter') def test_do_setup(self, mock_setup_adapter): self.replication_manager.do_setup(self.driver) calls = [mock.call(), mock.call()] default_device = self.replication_manager.default_device self.assertEqual('1.1.1.1', default_device.san_ip) self.assertEqual('user1', default_device.san_login) self.assertEqual('password1', default_device.san_password) devices = self.replication_manager.replication_devices self.assertEqual(1, len(devices)) self.assertIn('secondary_unity', devices) rep_device = devices['secondary_unity'] self.assertEqual('2.2.2.2', rep_device.san_ip) self.assertEqual('user1', rep_device.san_login) self.assertEqual('password1', rep_device.san_password) self.assertTrue(self.replication_manager.is_replication_configured) self.assertTrue( self.replication_manager.active_backend_id is None or self.replication_manager.active_backend_id == 'default') self.assertFalse(self.replication_manager.is_service_failed_over) active_adapter = self.replication_manager.active_adapter calls.append(mock.call()) self.assertIs(default_device.adapter, active_adapter) calls.append(mock.call()) mock_setup_adapter.assert_has_calls(calls) @mock.patch('cinder.volume.drivers.dell_emc.unity.' 'replication.ReplicationDevice.setup_adapter') def test_do_setup_replication_not_configured(self, mock_setup_adapter): self.driver.configuration.replication_device = None self.replication_manager.do_setup(self.driver) calls = [mock.call()] default_device = self.replication_manager.default_device self.assertEqual('1.1.1.1', default_device.san_ip) self.assertEqual('user1', default_device.san_login) self.assertEqual('password1', default_device.san_password) devices = self.replication_manager.replication_devices self.assertEqual(0, len(devices)) self.assertFalse(self.replication_manager.is_replication_configured) self.assertTrue( self.replication_manager.active_backend_id is None or self.replication_manager.active_backend_id == 'default') self.assertFalse(self.replication_manager.is_service_failed_over) active_adapter = self.replication_manager.active_adapter calls.append(mock.call()) self.assertIs(default_device.adapter, active_adapter) calls.append(mock.call()) mock_setup_adapter.assert_has_calls(calls) @mock.patch('cinder.volume.drivers.dell_emc.unity.' 'replication.ReplicationDevice.setup_adapter') def test_do_setup_failed_over(self, mock_setup_adapter): self.driver = driver.UnityDriver(configuration=self.config, active_backend_id='secondary_unity') self.replication_manager.do_setup(self.driver) calls = [mock.call()] default_device = self.replication_manager.default_device self.assertEqual('1.1.1.1', default_device.san_ip) self.assertEqual('user1', default_device.san_login) self.assertEqual('password1', default_device.san_password) devices = self.replication_manager.replication_devices self.assertEqual(1, len(devices)) self.assertIn('secondary_unity', devices) rep_device = devices['secondary_unity'] self.assertEqual('2.2.2.2', rep_device.san_ip) self.assertEqual('user1', rep_device.san_login) self.assertEqual('password1', rep_device.san_password) self.assertTrue(self.replication_manager.is_replication_configured) self.assertEqual('secondary_unity', self.replication_manager.active_backend_id) self.assertTrue(self.replication_manager.is_service_failed_over) active_adapter = self.replication_manager.active_adapter calls.append(mock.call()) self.assertIs(rep_device.adapter, active_adapter) calls.append(mock.call()) mock_setup_adapter.assert_has_calls(calls) @ddt.data( { 'rep_device': [{ 'backend_id': 'default', 'san_ip': '2.2.2.2' }] }, { 'rep_device': [{ 'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2' }, { 'backend_id': 'default', 'san_ip': '3.3.3.3' }] }, { 'rep_device': [{ 'backend_id': 'secondary_unity', 'san_ip': '2.2.2.2' }, { 'backend_id': 'third_unity', 'san_ip': '3.3.3.3' }] }, ) @ddt.unpack @mock.patch('cinder.volume.drivers.dell_emc.unity.' 'replication.ReplicationDevice.setup_adapter') def test_do_setup_raise_invalid_rep_device(self, mock_setup_adapter, rep_device): self.driver.configuration.replication_device = rep_device self.assertRaises(exception.InvalidConfigurationValue, self.replication_manager.do_setup, self.driver) @mock.patch('cinder.volume.drivers.dell_emc.unity.' 'replication.ReplicationDevice.setup_adapter') def test_do_setup_raise_invalid_active_backend_id(self, mock_setup_adapter): self.driver = driver.UnityDriver(configuration=self.config, active_backend_id='third_unity') self.assertRaises(exception.InvalidConfigurationValue, self.replication_manager.do_setup, self.driver) @mock.patch('cinder.volume.drivers.dell_emc.unity.' 'replication.ReplicationDevice.setup_adapter') def test_failover_service(self, mock_setup_adapter): self.assertIsNone(self.replication_manager.active_backend_id) self.replication_manager.do_setup(self.driver) self.replication_manager.active_adapter self.assertEqual('default', self.replication_manager.active_backend_id) self.replication_manager.failover_service('secondary_unity') self.assertEqual('secondary_unity', self.replication_manager.active_backend_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/unity/test_utils.py0000664000175000017500000003354100000000000027134 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import unittest from unittest import mock from oslo_utils import units from cinder import exception from cinder.tests.unit.volume.drivers.dell_emc.unity import test_adapter from cinder.tests.unit.volume.drivers.dell_emc.unity import test_driver from cinder.volume.drivers.dell_emc.unity import utils def get_volume_type_extra_specs(volume_type): return {'provisioning:type': volume_type} def get_group_type_specs(group_type): return {'consistent_group_snapshot_enabled': ' True', 'group_type_id': group_type} def get_volume_type_qos_specs(type_id): if type_id == 'invalid_backend_qos_consumer': ret = {'qos_specs': {'consumer': 'invalid'}} elif type_id == 'both_none': ret = {'qos_specs': {'consumer': 'back-end', 'specs': {}}} elif type_id == 'max_1000_iops': ret = { 'qos_specs': { 'id': 'max_1000_iops', 'consumer': 'both', 'specs': { 'maxIOPS': 1000 } } } elif type_id == 'max_2_mbps': ret = { 'qos_specs': { 'id': 'max_2_mbps', 'consumer': 'back-end', 'specs': { 'maxBWS': 2 } } } else: ret = None return ret def patch_volume_types(func): @functools.wraps(func) @mock.patch(target=('cinder.volume.volume_types' '.get_volume_type_extra_specs'), new=get_volume_type_extra_specs) @mock.patch(target=('cinder.volume.volume_types' '.get_volume_type_qos_specs'), new=get_volume_type_qos_specs) def func_wrapper(*args, **kwargs): return func(*args, **kwargs) return func_wrapper def patch_group_types(func): @functools.wraps(func) @mock.patch(target=('cinder.volume.group_types' '.get_group_type_specs'), new=get_group_type_specs) def func_wrapper(*args, **kwargs): return func(*args, **kwargs) return func_wrapper class UnityUtilsTest(unittest.TestCase): def test_validate_pool_names_filter(self): all_pools = list('acd') pool_names = utils.validate_pool_names(list('abc'), all_pools) self.assertIn('a', pool_names) self.assertIn('c', pool_names) self.assertNotIn('b', pool_names) self.assertNotIn('d', pool_names) def test_validate_pool_names_non_exists(self): def f(): all_pools = list('abc') utils.validate_pool_names(list('efg'), all_pools) self.assertRaises(exception.VolumeBackendAPIException, f) def test_validate_pool_names_default(self): all_pools = list('ab') pool_names = utils.validate_pool_names([], all_pools) self.assertEqual(2, len(pool_names)) pool_names = utils.validate_pool_names(None, all_pools) self.assertEqual(2, len(pool_names)) def test_build_provider_location(self): location = utils.build_provider_location('unity', 'thin', 'ev_1', '3') expected = 'id^ev_1|system^unity|type^thin|version^3' self.assertEqual(expected, location) def test_extract_provider_location_version(self): location = 'id^ev_1|system^unity|type^thin|version^3' self.assertEqual('3', utils.extract_provider_location(location, 'version')) def test_extract_provider_location_type(self): location = 'id^ev_1|system^unity|type^thin|version^3' self.assertEqual('thin', utils.extract_provider_location(location, 'type')) def test_extract_provider_location_system(self): location = 'id^ev_1|system^unity|type^thin|version^3' self.assertEqual('unity', utils.extract_provider_location(location, 'system')) def test_extract_provider_location_id(self): location = 'id^ev_1|system^unity|type^thin|version^3' self.assertEqual('ev_1', utils.extract_provider_location(location, 'id')) def test_extract_provider_location_not_found(self): location = 'id^ev_1|system^unity|type^thin|version^3' self.assertIsNone(utils.extract_provider_location(location, 'na')) def test_extract_provider_location_none(self): self.assertIsNone(utils.extract_provider_location(None, 'abc')) def test_extract_iscsi_uids(self): connector = {'host': 'fake_host', 'initiator': 'fake_iqn'} self.assertEqual(['fake_iqn'], utils.extract_iscsi_uids(connector)) def test_extract_iscsi_uids_not_found(self): connector = {'host': 'fake_host'} self.assertRaises(exception.VolumeBackendAPIException, utils.extract_iscsi_uids, connector) def test_extract_fc_uids(self): connector = {'host': 'fake_host', 'wwnns': ['1111111111111111', '2222222222222222'], 'wwpns': ['3333333333333333', '4444444444444444'] } self.assertEqual(['11:11:11:11:11:11:11:11:33:33:33:33:33:33:33:33', '22:22:22:22:22:22:22:22:44:44:44:44:44:44:44:44', ], utils.extract_fc_uids(connector)) def test_extract_fc_uids_not_found(self): connector = {'host': 'fake_host'} self.assertRaises(exception.VolumeBackendAPIException, utils.extract_iscsi_uids, connector) def test_byte_to_gib(self): self.assertEqual(5, utils.byte_to_gib(5 * units.Gi)) def test_byte_to_mib(self): self.assertEqual(5, utils.byte_to_mib(5 * units.Mi)) def test_gib_to_mib(self): self.assertEqual(5 * units.Gi / units.Mi, utils.gib_to_mib(5)) def test_convert_ip_to_portal(self): self.assertEqual('1.2.3.4:3260', utils.convert_ip_to_portal('1.2.3.4')) self.assertEqual('[fd27:2e95:e174::100]:3260', utils.convert_ip_to_portal('fd27:2e95:e174::100')) self.assertEqual('[fd27:2e95:e174::100]:3260', utils.convert_ip_to_portal('[fd27:2e95:e174::100]')) def test_convert_to_itor_tgt_map(self): zone_mapping = { 'san_1': { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'), 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121') } } ret = utils.convert_to_itor_tgt_map(zone_mapping) self.assertEqual(['100000051e55a100', '100000051e55a121'], ret[0]) mapping = ret[1] targets = ('100000051e55a100', '100000051e55a121') self.assertEqual(targets, mapping['200000051e55a100']) self.assertEqual(targets, mapping['200000051e55a121']) def test_get_pool_name(self): volume = test_adapter.MockOSResource(host='host@backend#pool_name') self.assertEqual('pool_name', utils.get_pool_name(volume)) def test_get_pool_name_from_host(self): host = {'host': 'host@backend#pool_name'} ret = utils.get_pool_name_from_host(host) self.assertEqual('pool_name', ret) def get_backend_name_from_volume(self): volume = test_adapter.MockOSResource(host='host@backend#pool_name') ret = utils.get_backend_name_from_volume(volume) self.assertEqual('host@backend', ret) def get_backend_name_from_host(self): host = {'host': 'host@backend#pool_name'} ret = utils.get_backend_name_from_volume(host) self.assertEqual('host@backend', ret) def test_ignore_exception(self): class IgnoredException(Exception): pass def f(): raise IgnoredException('any exception') try: utils.ignore_exception(f) except IgnoredException: self.fail('should not raise any exception.') def test_assure_cleanup(self): data = [0] def _enter(): data[0] += 10 return data[0] def _exit(x): data[0] = x - 1 ctx = utils.assure_cleanup(_enter, _exit, True) with ctx as r: self.assertEqual(10, r) self.assertEqual(9, data[0]) def test_get_backend_qos_specs_type_none(self): volume = test_adapter.MockOSResource(volume_type_id=None) ret = utils.get_backend_qos_specs(volume) self.assertIsNone(ret) @patch_volume_types def test_get_backend_qos_specs_none(self): volume = test_adapter.MockOSResource(volume_type_id='no_qos') ret = utils.get_backend_qos_specs(volume) self.assertIsNone(ret) @patch_volume_types def test_get_backend_qos_invalid_consumer(self): volume = test_adapter.MockOSResource( volume_type_id='invalid_backend_qos_consumer') ret = utils.get_backend_qos_specs(volume) self.assertIsNone(ret) @patch_volume_types def test_get_backend_qos_both_none(self): volume = test_adapter.MockOSResource(volume_type_id='both_none') ret = utils.get_backend_qos_specs(volume) self.assertIsNone(ret) @patch_volume_types def test_get_backend_qos_iops(self): volume = test_adapter.MockOSResource(volume_type_id='max_1000_iops') ret = utils.get_backend_qos_specs(volume) expected = {'maxBWS': None, 'id': 'max_1000_iops', 'maxIOPS': 1000} self.assertEqual(expected, ret) @patch_volume_types def test_get_backend_qos_mbps(self): volume = test_adapter.MockOSResource(volume_type_id='max_2_mbps') ret = utils.get_backend_qos_specs(volume) expected = {'maxBWS': 2, 'id': 'max_2_mbps', 'maxIOPS': None} self.assertEqual(expected, ret) def test_remove_empty(self): option = mock.Mock() value_list = [' pool1', 'pool2 ', ' pool3 '] ret = utils.remove_empty(option, value_list) expected = ['pool1', 'pool2', 'pool3'] self.assertListEqual(expected, ret) def test_remove_empty_none(self): option = mock.Mock() value_list = None ret = utils.remove_empty(option, value_list) expected = None self.assertEqual(expected, ret) def test_remove_empty_empty_list(self): option = mock.Mock() value_list = [] ret = utils.remove_empty(option, value_list) expected = None self.assertEqual(expected, ret) @patch_group_types def test_group_is_cg(self): cg = test_driver.UnityDriverTest.get_cg() result = utils.group_is_cg(cg) self.assertTrue(result) @patch_group_types def test_get_group_specs_by_key(self): cg = test_driver.UnityDriverTest.get_cg() result = utils.get_group_specs(cg, 'consistent_group_snapshot_enabled') self.assertEqual(' True', result) @patch_group_types def test_no_group_specs_key(self): cg = test_driver.UnityDriverTest.get_cg() result = utils.get_group_specs(cg, 'test_key') self.assertIsNone(result) @patch_volume_types def test_retype_no_need_migration_when_same_host(self): volume = test_adapter.MockOSResource(volume_type_id='host_1', host='host_1') new_host = {'name': 'new_name', 'host': 'host_1'} ret = utils.retype_need_migration(volume, None, None, new_host) self.assertFalse(ret) @patch_volume_types def test_retype_need_migration_when_diff_host(self): volume = test_adapter.MockOSResource(volume_type_id='host_1', host='host_1') new_host = {'name': 'new_name', 'host': 'new_host'} ret = utils.retype_need_migration(volume, None, None, new_host) self.assertTrue(ret) @patch_volume_types def test_retype_no_need_migration_thin_to_compressed(self): volume = test_adapter.MockOSResource(volume_type_id='host_1', host='host_1') new_host = {'name': 'new_name', 'host': 'host_1'} old_provision = '' new_provision = 'compressed' ret = utils.retype_need_migration(volume, old_provision, new_provision, new_host) self.assertFalse(ret) @patch_volume_types def test_retype_no_need_migration_compressed_to_thin(self): volume = test_adapter.MockOSResource(volume_type_id='host_1', host='host_1') new_host = {'name': 'new_name', 'host': 'host_1'} old_provision = 'compressed' new_provision = '' ret = utils.retype_need_migration(volume, old_provision, new_provision, new_host) self.assertFalse(ret) @patch_volume_types def test_retype_need_migration_thin_to_thick(self): volume = test_adapter.MockOSResource(volume_type_id='host_1', host='host_1') new_host = {'name': 'new_name', 'host': 'host_1'} old_provision = '' new_provision = 'thick' ret = utils.retype_need_migration(volume, old_provision, new_provision, new_host) self.assertTrue(ret) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.27112 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/0000775000175000017500000000000000000000000024020 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/__init__.py0000664000175000017500000000163100000000000026132 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops fake_vnx = mock.Mock() fake_storops.exception = fake_exception fake_storops.vnx = fake_vnx sys.modules['storops'] = fake_storops ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_enum.py0000664000175000017500000000721100000000000026325 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum class Enum(enum.Enum): @classmethod def verify(cls, value, allow_none=True): if value is None and not allow_none: raise ValueError( 'None is not allowed here for %s.') % cls.__name__ elif value is not None and not isinstance(value, cls): raise ValueError('%(value)s is not an instance of %(name)s.') % { 'value': value, 'name': cls.__name__} @classmethod def get_all(cls): return list(cls) @classmethod def get_opt(cls, value): option_map = cls.get_option_map() if option_map is None: raise NotImplementedError( 'Option map is not defined for %s.') % cls.__name__ ret = option_map.get(value, None) if ret is None: raise ValueError('%(value)s is not a valid option for %(name)s.' ) % {'value': value, 'name': cls.__name__} return ret @classmethod def parse(cls, value): if isinstance(value, str): ret = cls.from_str(value) elif isinstance(value, int): ret = cls.from_int(value) elif isinstance(value, cls): ret = value elif value is None: ret = None else: raise ValueError( 'Not supported value type: %s.') % type(value) return ret def is_equal(self, value): if isinstance(value, str): ret = self.value.lower() == value.lower() else: ret = self.value == value return ret @classmethod def from_int(cls, value): ret = None int_index = cls.get_int_index() if int_index is not None: try: ret = int_index[value] except IndexError: pass else: try: ret = next(i for i in cls.get_all() if i.is_equal(value)) except StopIteration: pass if ret is None: raise ValueError return ret @classmethod def from_str(cls, value): ret = None if value is not None: for item in cls.get_all(): if item.is_equal(value): ret = item break else: cls._raise_invalid_value(value) return ret @classmethod def _raise_invalid_value(cls, value): msg = ('%(value)s is not a valid value for %(name)s.' ) % {'value': value, 'name': cls.__name__} raise ValueError(msg) @classmethod def get_option_map(cls): raise None @classmethod def get_int_index(cls): return None @classmethod def values(cls): return [m.value for m in cls.__members__.values()] @classmethod def enum_name(cls): return cls.__name__ class VNXCtrlMethod(object): LIMIT_CTRL = 'limit' def __init__(self, method, metric, value): self.method = method self.metric = metric self.value = value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_exception.py0000664000175000017500000001067600000000000027370 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class StoropsException(Exception): message = 'Storops Error.' class VNXException(StoropsException): message = "VNX Error." class VNXStorageGroupError(VNXException): pass class VNXAttachAluError(VNXException): pass class VNXAluAlreadyAttachedError(VNXAttachAluError): message = ( 'LUN already exists in the specified storage group', 'Requested LUN has already been added to this Storage Group') class VNXDetachAluError(VNXStorageGroupError): pass class VNXDetachAluNotFoundError(VNXDetachAluError): message = 'No such Host LUN in this Storage Group' class VNXCreateStorageGroupError(VNXStorageGroupError): pass class VNXStorageGroupNameInUseError(VNXCreateStorageGroupError): message = 'Storage Group name already in use' class VNXNoHluAvailableError(VNXStorageGroupError): pass class VNXMigrationError(VNXException): pass class VNXLunNotMigratingError(VNXException): pass class VNXLunSyncCompletedError(VNXMigrationError): error_code = 0x714a8021 class VNXTargetNotReadyError(VNXMigrationError): message = 'The destination LUN is not available for migration' class VNXSnapError(VNXException): pass class VNXDeleteAttachedSnapError(VNXSnapError): error_code = 0x716d8003 class VNXCreateSnapError(VNXException): message = 'Cannot create the snapshot.' class VNXAttachSnapError(VNXSnapError): message = 'Cannot attach the snapshot.' class VNXDetachSnapError(VNXSnapError): message = 'Cannot detach the snapshot.' class VNXSnapAlreadyMountedError(VNXSnapError): error_code = 0x716d8055 class VNXSnapNameInUseError(VNXSnapError): error_code = 0x716d8005 class VNXSnapNotExistsError(VNXSnapError): message = 'The specified snapshot does not exist.' class VNXLunError(VNXException): pass class VNXCreateLunError(VNXLunError): pass class VNXLunNameInUseError(VNXCreateLunError): error_code = 0x712d8d04 class VNXLunExtendError(VNXLunError): pass class VNXLunExpandSizeError(VNXLunExtendError): error_code = 0x712d8e04 class VNXLunPreparingError(VNXLunError): error_code = 0x712d8e0e class VNXLunNotFoundError(VNXLunError): message = 'Could not retrieve the specified (pool lun).' class VNXDeleteLunError(VNXLunError): pass class VNXLunUsedByFeatureError(VNXLunError): pass class VNXCompressionError(VNXLunError): pass class VNXCompressionAlreadyEnabledError(VNXCompressionError): message = 'Compression on the specified LUN is already turned on.' class VNXConsistencyGroupError(VNXException): pass class VNXCreateConsistencyGroupError(VNXConsistencyGroupError): pass class VNXConsistencyGroupNameInUseError(VNXCreateConsistencyGroupError): error_code = 0x716d8021 class VNXConsistencyGroupNotFoundError(VNXConsistencyGroupError): message = 'Cannot find the consistency group' class VNXPingNodeError(VNXException): pass class VNXMirrorException(VNXException): pass class VNXMirrorNameInUseError(VNXMirrorException): message = 'Mirror name already in use' class VNXMirrorPromotePrimaryError(VNXMirrorException): message = 'Cannot remove or promote a primary image.' class VNXMirrorNotFoundError(VNXMirrorException): message = 'Mirror not found' class VNXMirrorGroupNameInUseError(VNXMirrorException): message = 'Mirror Group name already in use' class VNXMirrorGroupNotFoundError(VNXMirrorException): message = 'Unable to locate the specified group' class VNXMirrorGroupAlreadyMemberError(VNXMirrorException): message = 'The mirror is already a member of a group' class VNXMirrorGroupMirrorNotMemberError(VNXMirrorException): message = 'The specified mirror is not a member of the group' class VNXMirrorGroupAlreadyPromotedError(VNXMirrorException): message = 'The Consistency Group has no secondary images to promote' ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.27112 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/0000775000175000017500000000000000000000000026517 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/__init__.py0000664000175000017500000000350000000000000030626 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_enum class VNXSystem(object): pass class VNXEnum(fake_enum.Enum): pass class VNXSPEnum(VNXEnum): SP_A = 'SP A' SP_B = 'SP B' CONTROL_STATION = 'Celerra' class VNXProvisionEnum(VNXEnum): # value of spec "provisioning:type" THIN = 'thin' THICK = 'thick' COMPRESSED = 'compressed' DEDUPED = 'deduplicated' class VNXMigrationRate(VNXEnum): LOW = 'low' MEDIUM = 'medium' HIGH = 'high' ASAP = 'asap' class VNXTieringEnum(VNXEnum): NONE = 'none' HIGH_AUTO = 'starthighthenauto' AUTO = 'auto' HIGH = 'highestavailable' LOW = 'lowestavailable' NO_MOVE = 'nomovement' class VNXMirrorViewRecoveryPolicy(VNXEnum): MANUAL = 'manual' AUTO = 'automatic' class VNXMirrorViewSyncRate(VNXEnum): HIGH = 'high' MEDIUM = 'medium' LOW = 'low' class VNXMirrorImageState(VNXEnum): SYNCHRONIZED = 'Synchronized' OUT_OF_SYNC = 'Out-of-Sync' SYNCHRONIZING = 'Synchronizing' CONSISTENT = 'Consistent' SCRAMBLED = 'Scrambled' INCOMPLETE = 'Incomplete' LOCAL_ONLY = 'Local Only' EMPTY = 'Empty' VNXCtrlMethod = fake_enum.VNXCtrlMethod ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.27112 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/lib/0000775000175000017500000000000000000000000027265 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/lib/__init__.py0000664000175000017500000000000000000000000031364 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/lib/tasks.py0000664000175000017500000000155400000000000030771 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class PQueue(object): def __init__(self, path, interval=None): self.path = path self._interval = interval self.started = False def put(self, item): return item def start(self): self.started = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/mocked_cinder.yaml0000664000175000017500000003147100000000000027500 0ustar00zuulzuul00000000000000########################################################### # Common ########################################################### volume: &volume_base _type: 'volume' _properties: &volume_base_properties status: 'creating' size: 1 id: _uuid: volume_id provider_auth: 'None' host: 'host@backendsec#unit_test_pool' project_id: _uuid: project_id provider_location: &provider_location _build_provider_location: &provider_location_dict id: 1 type: 'lun' system: 'fake_serial' base_lun_name: 'test' version: '07.00.00' display_name: 'volume-1' display_description: 'test volume' volume_type_id: consistencygroup_id: group_id: volume_attachment: _properties: {} volume_metadata: _properties: {} group: _type: 'group' _properties: {} host: &host_base _properties: host: 'host@backendsec#unit_test_pool' consistency_group: &cg_base _type: 'cg' _properties: &cg_base_properties id: _uuid: consistency_group_id status: 'creating' name: 'cg_name' host: 'host@backend#unit_test_pool' consistency_group_with_type: &cg_base_with_type _type: 'cg' _properties: <<: *cg_base_properties volume_type_id: 'type1' snapshot: &snapshot_base _type: 'snapshot' _properties: &snapshot_base_properties id: _uuid: snapshot_id status: available name: 'snapshot_name' volume: _type: 'volume' _properties: <<: *volume_base_properties name: 'attached_volume_name' volume_name: 'attached_volume_name' cg_snapshot: &cg_snapshot_base _type: 'cg_snapshot' _properties: &cg_snapshot_base_properties id: _uuid: cgsnapshot_id status: 'creating' group: &group_base _type: 'group' _properties: &group_base_properties id: _uuid: group_id name: 'test_group' status: 'creating' replication_status: 'enabled' connector: &connector_base _properties: host: host_1 initiator: ['iqn.2012-07.org.fake:01'] ip: 192.168.1.111 ########################################################### # TestCommonAdapter, TestISCSIAdapter, TestFCAdapter ########################################################### test_mock_driver_input_inner: volume: *volume_base test_create_volume: &test_create_volume volume: *volume_base test_create_volume_error: *test_create_volume test_create_thick_volume: *test_create_volume test_create_volume_with_qos: volume: _type: 'volume' _properties: <<: *volume_base_properties name: "volume_with_qos" volume_type_id: _uuid: volume_type_id test_migrate_volume: volume: *volume_base test_migrate_volume_host_assisted: volume: *volume_base test_delete_volume_not_force: &test_delete_volume_not_force volume: *volume_base test_delete_volume_force: *test_delete_volume_not_force test_delete_async_volume: volume: *volume_base test_delete_async_volume_migrating: volume: *volume_base test_delete_async_volume_not_from_snapshot: volume: *volume_base test_delete_async_volume_from_snapshot: volume: *volume_base test_retype_need_migration_when_host_changed: volume: *volume_base host: _properties: host: 'host@backendsec#another_pool' test_retype_need_migration_for_smp_volume: volume: _type: 'volume' _properties: <<: *volume_base_properties provider_location: _build_provider_location: <<: *provider_location_dict type: 'smp' host: *host_base test_retype_need_migration_when_provision_changed: volume: *volume_base host: *host_base test_retype_not_need_migration_when_provision_changed: volume: *volume_base host: *host_base test_retype_not_need_migration: volume: *volume_base host: *host_base test_retype_need_migration: volume: _type: 'volume' _properties: <<: *volume_base_properties volume_type_id: _uuid: volume_type_id host: *host_base test_retype_lun_has_snap: volume: *volume_base host: *host_base test_retype_turn_on_compression_change_tier: volume: *volume_base host: *host_base test_retype_change_tier: volume: *volume_base host: *host_base test_create_consistencygroup: cg: *cg_base test_delete_consistencygroup: cg: *cg_base test_delete_consistencygroup_with_volume: cg: *cg_base vol1: *volume_base vol2: *volume_base test_delete_consistencygroup_error: cg: *cg_base vol1: *volume_base vol2: *volume_base test_delete_consistencygroup_volume_error: cg: *cg_base vol1: *volume_base vol2: *volume_base test_extend_volume: volume: *volume_base test_create_snapshot_adapter: snapshot: *snapshot_base test_delete_snapshot_adapter: snapshot: *snapshot_base test_restore_snapshot_adapter: volume: *volume_base snapshot: *snapshot_base test_do_create_cgsnap: &cg_snap_and_snaps cg_snap: *cg_snapshot_base snap1: *snapshot_base snap2: *snapshot_base test_do_delete_cgsnap: *cg_snap_and_snaps test_manage_existing_lun_no_exist: volume: *volume_base test_manage_existing_invalid_pool: volume: *volume_base test_manage_existing_get_size: volume: *volume_base test_manage_existing_type_mismatch: volume: _type: 'volume' _properties: <<: *volume_base_properties volume_type_id: _uuid: volume_type_id test_manage_existing: volume: _type: 'volume' _properties: <<: *volume_base_properties volume_type_id: _uuid: volume_type_id test_manage_existing_smp: volume: *volume_base test_create_cloned_volume: volume: *volume_base src_vref: _type: volume _properties: <<: *volume_base_properties id: _uuid: volume2_id size: 2 test_create_cloned_volume_snapcopy: volume: _type: volume _properties: <<: *volume_base_properties src_vref: _type: volume _properties: <<: *volume_base_properties id: _uuid: volume2_id size: 2 test_create_volume_from_snapshot: volume: *volume_base snapshot: *snapshot_base test_create_volume_from_snapshot_snapcopy: volume: *volume_base snapshot: *snapshot_base test_get_base_lun_name: volume: *volume_base test_do_create_cg_from_cgsnap: vol1: _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: volume_id vol2: _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: volume2_id snap1: _type: 'snapshot' _properties: <<: *snapshot_base_properties id: _uuid: snapshot_id snap2: _type: 'snapshot' _properties: <<: *snapshot_base_properties id: _uuid: snapshot2_id test_do_clone_cg: vol1: _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: consistency_group_id src_vol1: _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: consistency_group2_id test_assure_host_access: volume: *volume_base test_assure_host_access_without_auto_register_new_sg: volume: *volume_base test_assure_host_access_without_auto_register: volume: *volume_base test_auto_register_initiator: volume: *volume_base test_auto_register_initiator_no_white_list: volume: *volume_base test_auto_register_initiator_no_port_to_reg: volume: *volume_base test_terminate_connection: volume: *volume_base connector: *connector_base test_terminate_connection_force_detach: volume: *volume_base test_remove_host_access: volume: *volume_base test_remove_host_access_sg_absent: volume: *volume_base test_remove_host_access_volume_not_in_sg: volume: *volume_base test_do_update_cg: cg: *cg_base volume_add: <<: *volume_base _properties: <<: *volume_base_properties provider_location: _build_provider_location: <<: *provider_location_dict id: 1 volume_remove: <<: *volume_base _properties: <<: *volume_base_properties provider_location: _build_provider_location: <<: *provider_location_dict id: 2 test_create_export_snapshot: snapshot: *snapshot_base test_remove_export_snapshot: snapshot: *snapshot_base test_initialize_connection_snapshot: snapshot: *snapshot_base test_terminate_connection_snapshot: snapshot: *snapshot_base test_setup_lun_replication: vol1: &volume_for_replication _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: volume_id volume_type_id: _uuid: volume_type_id test_setup_lun_replication_in_group: group1: _type: 'group' _properties: <<: *group_base_properties group_type_id: _uuid: group_type_id vol1: *volume_for_replication test_cleanup_replication: vol1: _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: volume2_id volume_type_id: _uuid: volume_type_id test_failover_host: vol1: _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: volume3_id volume_type_id: _uuid: volume_type_id test_failover_host_invalid_backend_id: vol1: _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: volume4_id volume_type_id: _uuid: volume_type_id test_failover_host_failback: vol1: _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: volume5_id volume_type_id: _uuid: volume_type_id replication_status: enabled test_failover_host_groups: group1: _type: 'group' _properties: <<: *group_base_properties id: _uuid: group_id group_type_id: _uuid: group_type_id replication_status: failed-over volumes: [*volume_base, *volume_base] vol1: _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: volume4_id volume_type_id: _uuid: volume_type_id replication_status: failed-over vol2: _type: 'volume' _properties: <<: *volume_base_properties id: _uuid: volume4_id volume_type_id: _uuid: volume_type_id replication_status: failed-over test_get_pool_name: volume: *volume_base test_update_migrated_volume: volume: *volume_base new_volume: *volume_base test_update_migrated_volume_smp: volume: *volume_base new_volume: <<: *volume_base _properties: <<: *volume_base_properties provider_location: _build_provider_location: <<: *provider_location_dict type: smp test_create_group_snap: test_create_cloned_cg: test_create_cloned_group: test_create_cg_from_cgsnapshot: test_create_group_from_group_snapshot: test_create_cgsnapshot: test_create_group_snapshot: test_delete_group_snapshot: test_delete_cgsnapshot: ########################################################### # TestReplicationAdaper ########################################################### test_enable_replication: volume1: *volume_base volume2: *volume_base group: *group_base test_disable_replication: volume1: *volume_base volume2: *volume_base group: *group_base test_failover_replication: volume1: *volume_base volume2: *volume_base group: *group_base ########################################################### # TestUtils ########################################################### test_validate_cg_type: cg: _properties: id: _uuid: GROUP_ID volume_type_ids: ['type1'] test_require_consistent_group_snapshot_enabled: group: _type: 'group' _properties: id: _uuid: group_id group_type_id: _uuid: group_type_id test_is_image_cache_volume_false: volume: *volume_base test_is_image_cache_volume_true: volume: *volume_base test_calc_migrate_and_provision_image_cache: volume: *volume_base test_calc_migrate_and_provision: volume: *volume_base test_calc_migrate_and_provision_default: volume: *volume_base test_get_backend_qos_specs: volume: _type: 'volume' _properties: <<: *volume_base_properties volume_type_id: _uuid: volume_type_id test_check_type_matched_invalid: volume: _type: 'volume' _properties: <<: *volume_base_properties volume_type_id: _uuid: volume_type_id group: _type: 'group' _properties: id: _uuid: group_id group_type_id: _uuid: group_type_id test_check_rep_status_matched_disabled: group: _type: 'group' _properties: id: _uuid: group_id group_type_id: _uuid: group_type_id replication_status: 'disabled' ########################################################### # TestClient ########################################################### test_get_lun_id: volume: *volume_base test_get_lun_id_without_provider_location: volume: <<: *volume_base _properties: <<: *volume_base_properties provider_location: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/mocked_vnx.yaml0000664000175000017500000015127300000000000027052 0ustar00zuulzuul00000000000000########################################################### # Example: # vnx: # _properties: # properties # serial: serial_1 # name: lun_1 # state: # _side_effect: [Ready, Offline] # side effect for property # # _methods: # methods # get_pool: *pool_1 # return value of method # get_lun: # _raise: # GetLunError: Unkown Error # method raise exception # get_cg: # _side_effect: [cg_1, cg_2] # side effect for method # ########################################################### ########################################################### # Common ########################################################### lun_base: &lun_base _properties: &lun_base_prop name: lun_name lun_id: lun_id wwn: 'fake_wwn' poll: False operation: None state: Ready existed: true cg_base: _properties: &cg_base_prop fake_prop: fake_prop_value cg_snap_base: _properties: &cg_snap_base_prop id: 'cg_snap_id' pool_base: &pool_base _properties: &pool_base_prop name: pool_name pool_id: 0 state: Ready user_capacity_gbs: 1311 total_subscribed_capacity_gbs: 131 available_capacity_gbs: 132 percent_full_threshold: 70 fast_cache: True pool_feature_base: _properties: &pool_feature_base_prop max_pool_luns: 3000 total_pool_luns: 151 vnx_base: &vnx_base _properties: &vnx_base_prop serial: fake_serial snapshot_base: &snapshot_base _properties: &snapshot_base_prop status: existed: true name: snapshot_name state: sg: &sg_base _properties: &sg_base_prop existed: true name: sg_name spa: &spa _enum: VNXSPEnum: SP A spb: &spb _enum: VNXSPEnum: SP B iscsi_port_base: &iscsi_port_base _type: 'VNXPort' _properties: &iscsi_port_base_prop sp: *spa port_id: 0 vport_id: 0 all_iscsi_ports: &all_iscsi_ports - &iscsi_port_a-0-0 <<: *iscsi_port_base _properties: <<: *iscsi_port_base_prop port_id: 0 vport_id: 0 - &iscsi_port_a-0-1 <<: *iscsi_port_base _properties: <<: *iscsi_port_base_prop port_id: 0 vport_id: 1 - &iscsi_port_a-1-0 <<: *iscsi_port_base _properties: <<: *iscsi_port_base_prop port_id: 1 vport_id: 0 - &iscsi_port_b-0-1 <<: *iscsi_port_base _properties: <<: *iscsi_port_base_prop sp: *spb port_id: 0 vport_id: 1 fc_port_base: &fc_port_base _type: 'VNXPort' _properties: &fc_port_base_prop sp: *spa port_id: 1 vport_id: None wwn: 'wwn' link_status: 'Up' port_status: 'Online' all_fc_ports: &all_fc_ports - &fc_port_a-1 <<: *fc_port_base _properties: <<: *fc_port_base_prop port_id: 1 wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:A1' - &fc_port_a-2 <<: *fc_port_base _properties: <<: *fc_port_base_prop port_id: 2 wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:A2' port_status: 'Offline' - &fc_port_b-2 <<: *fc_port_base _properties: <<: *fc_port_base_prop sp: *spb port_id: 2 wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:B2' mirror_base: &mirror_base _properties: &mirror_base_prop image_state: _type: VNXMirrorImageState value: 'SYNCHRONIZED' mirror_group_base: &mirror_group_base _properties: &mirror_group_base_prop condition: 'Active' existed: true name: 'base_group' role: 'Primary' state: 'Synchronized' ########################################################### # TestClient ########################################################### test_create_lun: &test_create_lun lun: &lun_test_create_lun _properties: <<: *lun_base_prop name: lun1 _methods: update: with_no_poll: _context pool: &pool_test_create_lun _properties: <<: *pool_base_prop name: pool1 _methods: create_lun: *lun_test_create_lun with_no_poll: _context vnx: _properties: <<: *vnx_base_prop _methods: get_pool: *pool_test_create_lun test_create_lun_error: &test_create_lun_error pool: &pool_test_create_lun_error _properties: <<: *pool_base_prop _methods: create_lun: _raise: VNXCreateLunError: Unkown Error with_no_poll: _context vnx: _properties: <<: *vnx_base_prop _methods: get_pool: *pool_test_create_lun_error test_is_lun_io_ready_false: lun: _properties: <<: *lun_base_prop state: Initializing _methods: update: with_no_poll: _context test_is_lun_io_ready_true: lun: _properties: <<: *lun_base_prop state: Ready operation: None _methods: update: with_no_poll: _context test_is_lun_io_ready_exception: lun: _properties: <<: *lun_base_prop state: Deleting _methods: update: with_no_poll: _context test_create_lun_in_cg: cg: &cg_test_create_lun_in_cg _properties: <<: *cg_base_prop _methods: add_member: vnx: _properties: <<: *vnx_base_prop _methods: get_lun: *lun_test_create_lun get_pool: *pool_test_create_lun get_cg: *cg_test_create_lun_in_cg test_create_lun_compression: lun: &lun_test_create_lun_compression _properties: <<: *lun_base_prop name: lun2 _methods: update: with_no_poll: _context pool: &pool_test_create_lun_compression _properties: <<: *pool_base_prop _methods: create_lun: *lun_test_create_lun_compression with_no_poll: _context vnx: _properties: <<: *vnx_base_prop _methods: get_pool: *pool_test_create_lun_compression test_create_lun_already_existed: lun: &lun_test_create_lun_already_existed _properties: <<: *lun_base_prop name: lun3 _methods: update: with_no_poll: _context pool: &pool_test_create_lun_already_existed _properties: <<: *pool_base_prop _methods: with_no_poll: _context create_lun: _raise: VNXLunNameInUseError: Lun already exists(0x712d8d04) vnx: _properties: <<: *vnx_base_prop _methods: get_lun: *lun_test_create_lun_already_existed get_pool: *pool_test_create_lun_already_existed test_migrate_lun: lun: &lun_migrate _properties: <<: *lun_base_prop _methods: migrate: vnx: _methods: get_lun: *lun_migrate test_migrate_lun_with_retry: lun: &lun_migrate_retry _properties: <<: *lun_base_prop _methods: migrate: _raise: VNXTargetNotReadyError: 'The destination LUN is not available for migration' vnx: _methods: get_lun: *lun_migrate_retry test_session_finished_faulted: session: &session_faulted _properties: existed: true current_state: 'FAULTED' vnx: _methods: get_lun: *lun_base get_migration_session: *session_faulted test_session_finished_migrating: session: &session_migrating _properties: existed: true current_state: 'MIGRATING' vnx: _methods: get_lun: *lun_base get_migration_session: *session_migrating test_session_finished_not_existed: session: &session_not_existed _properties: existed: false vnx: _methods: get_lun: *lun_base get_migration_session: *session_not_existed test_migrate_lun_error: lun1: &lun_migrate_error <<: *lun_base_prop _methods: migrate: _raise: VNXMigrationError: 'Unknown Error' vnx: _methods: get_lun: *lun_migrate_error test_verify_migration: lun1: &src_lun _properties: <<: *lun_base_prop lun2: &dst_lun _properties: poll: false wwn: 'fake_wwn' session: &session_verify _properties: existed: false vnx: _methods: get_lun: _side_effect: [*src_lun, *dst_lun] get_migration_session: *session_verify test_verify_migration_false: vnx: _methods: get_lun: _side_effect: [*src_lun, *dst_lun] get_migration_session: *session_verify test_cleanup_migration: session: &session_cancel _properties: existed: true dest_lu_id: 2 lun: &lun_cancel_migrate _methods: cancel_migrate: vnx: _methods: get_migration_session: *session_cancel get_lun: *lun_cancel_migrate test_cleanup_migration_not_migrating: lun: &lun_cancel_migrate_not_migrating _methods: cancel_migrate: _raise: VNXLunNotMigratingError: The LUN is not migrating vnx: _methods: get_migration_session: *session_cancel get_lun: *lun_cancel_migrate_not_migrating test_cleanup_migration_cancel_failed: lun: &lun_cancel_migrate_cancel_failed _methods: cancel_migrate: _raise: VNXLunSyncCompletedError: The LUN is not migrating _properties: wwn: test vnx: _methods: get_migration_session: _side_effect: [*session_cancel, *session_verify] get_lun: *lun_cancel_migrate_cancel_failed test_get_lun_by_name: lun: &lun_test_get_lun_by_name _properties: <<: *lun_base_prop lun_id: 888 name: lun_name_test_get_lun_by_name vnx: _properties: <<: *vnx_base_prop _methods: get_lun: *lun_test_get_lun_by_name test_delete_lun: &test_delete_lun lun: &lun_test_delete_lun _properties: <<: *lun_base_prop name: lun_test_delete_lun is_snap_mount_point: False _methods: delete: vnx: _properties: <<: *vnx_base_prop _methods: get_lun: *lun_test_delete_lun test_delete_smp: &test_delete_smp snapshot: &snapshot_test_delete_smp _properties: name: snapshot_test_delete_smp _methods: delete: lun: &lun_test_delete_smp _properties: <<: *lun_base_prop name: lun_test_delete_smp _methods: delete: vnx: _properties: <<: *vnx_base_prop _methods: get_lun: *lun_test_delete_smp get_snap: *snapshot_test_delete_smp test_delete_lun_not_exist: lun: &lun_test_delete_lun_not_exist _properties: <<: *lun_base_prop name: lun_test_delete_lun_not_exist is_snap_mount_point: False _methods: delete: _raise: VNXLunNotFoundError: Lun to delete doesn't exist. vnx: _properties: <<: *vnx_base_prop _methods: get_lun: *lun_test_delete_lun_not_exist test_delete_lun_exception: lun: &lun_test_delete_lun_exception _properties: <<: *lun_base_prop name: lun_test_delete_lun_exception is_snap_mount_point: False _methods: delete: _raise: VNXDeleteLunError: General lun delete error. vnx: _properties: <<: *vnx_base_prop _methods: get_lun: *lun_test_delete_lun_exception test_cleanup_async_lun: lun: &lun_test_cleanup_async_lun _properties: <<: *lun_base_prop name: lun_test_cleanup_async_lun is_snap_mount_point: True _methods: delete: cancel_migrate: snap: &snap_test_cleanup_async_lun _methods: delete: vnx: _properties: <<: *vnx_base_prop _methods: get_lun: *lun_test_cleanup_async_lun get_snap: *snap_test_cleanup_async_lun get_migration_session: *session_migrating test_create_cg: &test_create_cg cg: &cg_for_create _properties: existed: True _methods: update: with_no_poll: _context vnx: _methods: create_cg: *cg_for_create test_create_cg_already_existed: vnx: _methods: create_cg: _raise: VNXConsistencyGroupNameInUseError: Already in use get_cg: *cg_for_create test_delete_cg: cg: &cg_for_deletion _methods: delete: vnx: _methods: get_cg: *cg_for_deletion test_delete_cg_not_existed: cg: &cg_delete_no_existed _methods: delete: _raise: VNXConsistencyGroupNotFoundError: CG not found vnx: _methods: get_cg: *cg_delete_no_existed test_expand_lun: &test_expand_lun lun: &lun_test_expand_lun _properties: <<: *lun_base_prop name: lun_test_expand_lun total_capacity_gb: 10 _methods: expand: update: with_no_poll: _context vnx: _properties: *vnx_base_prop _methods: get_lun: *lun_test_expand_lun test_expand_lun_not_poll: *test_expand_lun test_expand_lun_already_expanded: lun: &lun_test_expand_lun_already_expanded _properties: <<: *lun_base_prop total_capacity_gb: 10 _methods: update: with_no_poll: _context expand: _raise: VNXLunExpandSizeError: LUN already expanded. vnx: _properties: *vnx_base_prop _methods: get_lun: *lun_test_expand_lun_already_expanded test_expand_lun_not_ops_ready: lun: &lun_test_expand_lun_not_ops_ready _properties: <<: *lun_base_prop total_capacity_gb: 10 operation: 'None' _methods: update: with_no_poll: _context expand: _raise: VNXLunPreparingError: LUN operation not ready. vnx: _properties: *vnx_base_prop _methods: get_lun: *lun_test_expand_lun_not_ops_ready test_create_snapshot: &test_create_snapshot lun: &lun_test_create_snapshot <<: *lun_base _methods: create_snap: vnx: <<: *vnx_base _methods: get_lun: *lun_test_create_snapshot test_create_snapshot_snap_name_exist_error: lun: &lun_test_create_snapshot_snap_name_exist_error <<: *lun_base _methods: create_snap: _raise: VNXSnapNameInUseError: Snapshot name is in use. vnx: <<: *vnx_base _methods: get_lun: *lun_test_create_snapshot_snap_name_exist_error test_delete_snapshot: &test_delete_snapshot snapshot: &snapshot_test_delete_snapshot <<: *snapshot_base _methods: delete: vnx: <<: *vnx_base _methods: get_snap: *snapshot_test_delete_snapshot test_delete_snapshot_delete_attached_error: snapshot: &snapshot_test_delete_snapshot_delete_attached_error <<: *snapshot_base _methods: delete: _raise: VNXDeleteAttachedSnapError: Snapshot is attached to a LUN. vnx: <<: *vnx_base _methods: get_snap: *snapshot_test_delete_snapshot_delete_attached_error test_copy_snapshot: snap: &snap_copy _methods: copy: vnx: _methods: get_snap: *snap_copy test_create_mount_point: lun: &lun_mount_point _methods: create_mount_point: vnx: _methods: get_lun: *lun_mount_point test_attach_mount_point: lun: &lun_attach_snap _methods: attach_snap: vnx: _methods: get_lun: *lun_attach_snap test_detach_mount_point: lun: &lun_detach _methods: detach_snap: vnx: _methods: get_lun: *lun_detach test_modify_snapshot: snap: &snap_modify _methods: modify: vnx: _methods: get_snap: *snap_modify test_restore_snapshot: &test_restore_snapshot lun: &lun_restore _methods: restore_snap: vnx: _methods: get_lun: *lun_restore test_create_cg_snapshot: &test_create_cg_snapshot cg_snap: &cg_snap_exist _properties: existed: True _methods: update: with_no_poll: _context cg: &cg_test_create_cg_snapshot _methods: create_snap: *cg_snap_exist vnx: _methods: get_cg: *cg_test_create_cg_snapshot test_create_cg_snapshot_already_existed: cg: &cg_create_cg_snapshot_in_use_error _methods: with_no_poll: _context create_snap: _raise: VNXSnapNameInUseError: 'Already in use' vnx: _methods: get_cg: *cg_create_cg_snapshot_in_use_error get_snap: *cg_snap_exist test_delete_cg_snapshot: *test_delete_snapshot test_create_sg: sg: &sg_test_create_sg <<: *sg_base vnx: <<: *vnx_base _methods: create_sg: *sg_test_create_sg test_create_sg_name_in_use: vnx: <<: *vnx_base _methods: create_sg: _raise: VNXStorageGroupNameInUseError: Storage group name is in use. get_sg: *sg_base test_get_storage_group: sg: &sg_test_get_storage_group <<: *sg_base vnx: <<: *vnx_base _methods: get_sg: *sg_test_get_storage_group test_register_initiator: sg: &sg_test_register_initiator <<: *sg_base _methods: connect_hba: update: with_poll: _context vnx: *vnx_base test_register_initiator_exception: sg: &sg_test_register_initiator_exception <<: *sg_base _methods: connect_hba: _raise: VNXStorageGroupError: set_path error. update: with_poll: _context vnx: *vnx_base test_ping_node: iscsi_port: &iscsi_port_test_ping_node <<: *iscsi_port_base _methods: ping_node: vnx: <<: *vnx_base _methods: get_iscsi_port: *iscsi_port_test_ping_node test_ping_node_fail: iscsi_port: &iscsi_port_test_ping_node_fail <<: *iscsi_port_base _methods: ping_node: _raise: VNXPingNodeError: Failed to ping node. vnx: <<: *vnx_base _methods: get_iscsi_port: *iscsi_port_test_ping_node_fail test_add_lun_to_sg: sg: &sg_test_add_lun_to_sg <<: *sg_base _methods: attach_alu: 1 vnx: *vnx_base test_add_lun_to_sg_alu_already_attached: sg: &sg_test_add_lun_to_sg_alu_already_attached <<: *sg_base _methods: attach_alu: _raise: VNXAluAlreadyAttachedError: ALU is already attached. get_hlu: 1 vnx: *vnx_base test_add_lun_to_sg_alu_in_use: lun: _properties: <<: *lun_base_prop lun_id: 1 sg: &sg_test_add_lun_to_sg_alu_in_use <<: *sg_base _methods: attach_alu: _raise: VNXNoHluAvailableError: No HLU available. get_hlu: 1 vnx: *vnx_base test_update_consistencygroup_no_lun_in_cg: cg: _properties: <<: *cg_base_prop lun_list: [] _methods: replace_member: lun_1: _properties: <<: *lun_base_prop lun_id: 1 lun_2: _properties: <<: *lun_base_prop lun_id: 2 vnx: *vnx_base test_update_consistencygroup_lun_in_cg: lun_1: &lun_1_test_update_consistencygroup_lun_in_cg _properties: <<: *lun_base_prop lun_id: 1 lun_2: _properties: <<: *lun_base_prop lun_id: 2 cg: _properties: <<: *cg_base_prop lun_list: - *lun_1_test_update_consistencygroup_lun_in_cg _methods: replace_member: vnx: *vnx_base test_update_consistencygroup_remove_all: lun_1: &lun_1_test_update_consistencygroup_remove_all _properties: <<: *lun_base_prop lun_id: 1 cg: _properties: <<: *cg_base_prop lun_list: - *lun_1_test_update_consistencygroup_remove_all _methods: delete_member: vnx: *vnx_base test_create_export_snapshot: test_remove_export_snapshot: test_initialize_connection_snapshot: lun: &lun_test_initialize_connection_snapshot _properties: <<: *lun_base_prop lun_id: 100 vnx: <<: *vnx_base _methods: get_lun: *lun_test_initialize_connection_snapshot test_terminate_connection_snapshot: lun: &lun_test_terminate_connection_snapshot _properties: <<: *lun_base_prop lun_id: 100 vnx: <<: *vnx_base _methods: get_lun: *lun_test_terminate_connection_snapshot test_get_available_ip: vnx: _properties: alive_sp_ip: '192.168.1.5' test_create_mirror: vnx: _methods: get_lun: *lun_base create_mirror_view: *mirror_base test_create_mirror_already_created: vnx: _methods: get_lun: *lun_base create_mirror_view: _raise: VNXMirrorNameInUseError: 'name in use' get_mirror_view: *mirror_base test_delete_mirror: mirror: &mirror_test_delete_mirror _methods: delete: vnx: _methods: get_mirror_view: *mirror_test_delete_mirror test_delete_mirror_already_deleted: mirror: &mirror_delete_error _methods: delete: _raise: VNXMirrorNotFoundError: 'not found' vnx: _methods: get_mirror_view: *mirror_delete_error test_add_image: mirror: &mirror_test_add_image _methods: add_image: update: with_no_poll: _context with_poll: _context _properties: secondary_image: _properties: state: _enum: VNXMirrorImageState: 'Synchronized' vnx: _methods: get_mirror_view: *mirror_test_add_image test_remove_image: mirror: &mirror_remove_image _methods: remove_image: vnx: _methods: get_mirror_view: *mirror_remove_image test_fracture_image: mirror: &mirror_fracture_image _methods: fracture_image: vnx: _methods: get_mirror_view: *mirror_fracture_image test_sync_image: mirror: &mirror_sync_image _properties: <<: *mirror_base_prop secondary_image: _properties: state: _enum: VNXMirrorImageState: 'SYNCHRONIZED' _methods: sync_image: with_no_poll: _context update: vnx: _methods: get_mirror_view: *mirror_sync_image test_promote_image: mirror: &mirror_promote_image _methods: promote_image: vnx: _methods: get_mirror_view: *mirror_promote_image # Mirror group tests start test_create_mirror_group: vnx: _methods: create_mirror_group: *mirror_group_base test_create_mirror_group_name_in_use: vnx: _methods: create_mirror_group: _raise: VNXMirrorGroupNameInUseError: Mirror Group name already in use get_mirror_group: *mirror_group_base test_delete_mirror_group: group: &group_to_delete _methods: delete: vnx: _methods: get_mirror_group: *group_to_delete test_delete_mirror_group_not_found: group: &group_to_delete_not_found _methods: delete: _raise: VNXMirrorGroupNotFoundError: Unable to locate vnx: _methods: get_mirror_group: *group_to_delete_not_found test_add_mirror: group: &group_to_add _methods: add_mirror: vnx: _methods: get_mirror_group: *group_to_add get_mirror_view: *mirror_base test_add_mirror_already_added: group: &group_to_add_added _methods: add_mirror: _raise: VNXMirrorGroupAlreadyMemberError: already a member of a group vnx: _methods: get_mirror_group: *group_to_add_added get_mirror_view: *mirror_base test_remove_mirror: group: &group_to_remove _methods: remove_mirror: vnx: _methods: get_mirror_group: *group_to_remove get_mirror_view: *mirror_base test_remove_mirror_not_member: group: &group_to_remove_not_member _methods: remove_mirror: _raise: VNXMirrorGroupMirrorNotMemberError: not a member of the group vnx: _methods: get_mirror_group: *group_to_remove_not_member get_mirror_view: *mirror_base test_promote_mirror_group: group: &group_to_promote _methods: promote_group: vnx: _methods: get_mirror_group: *group_to_promote test_promote_mirror_group_already_promoted: group: &group_to_promote_already_promoted _methods: promote_group: _raise: VNXMirrorGroupAlreadyPromotedError: no secondary images to promote vnx: _methods: get_mirror_group: *group_to_promote_already_promoted test_sync_mirror_group: group: &group_to_sync _methods: sync_group: vnx: _methods: get_mirror_group: *group_to_sync test_fracture_mirror_group: group: &group_to_fracture _methods: fracture_group: vnx: _methods: get_mirror_group: *group_to_fracture test_get_lun_id: test_get_lun_id_without_provider_location: lun: &test_get_lun_id_without_provider_location <<: *lun_base _properties: <<: *lun_base_prop lun_id: 1 vnx: _methods: get_lun: *test_get_lun_id_without_provider_location test_get_ioclass: ioclass_false: &ioclass_false _properties: existed: False ioclass_true: &ioclass_true _properties: existed: True _methods: add_lun: vnx: _methods: get_ioclass: *ioclass_false create_ioclass: *ioclass_true test_create_ioclass_iops: vnx: _methods: create_ioclass: *ioclass_true test_create_ioclass_bws: vnx: _methods: create_ioclass: *ioclass_true test_create_policy: policy: &policy _properties: state: "Running" existed: False _methods: add_class: run_policy: vnx: _methods: get_policy: *policy create_policy: *policy test_get_running_policy: vnx: _methods: get_policy: [*policy, *policy] test_add_lun_to_ioclass: vnx: _methods: get_ioclass: *ioclass_true test_set_max_luns_per_sg: vnx: *vnx_base ########################################################### # TestCommonAdapter ########################################################### test_create_volume: *test_create_lun test_create_volume_error: *test_create_lun_error test_create_thick_volume: *test_create_lun test_create_volume_with_qos: vnx: _properties: <<: *vnx_base_prop _methods: get_pool: *pool_test_create_lun get_ioclass: *ioclass_true get_policy: [*policy] test_migrate_volume: lun: &src_lun_1 _properties: <<: *lun_base_prop lun_id: 4 wwn: 'src_wwn' poll: false _methods: migrate: update: with_no_poll: _context lun2: &lun_migrate_1 _properties: lun_id: 5 wwn: 'dst_wwn' _methods: cancel_migrate: lun3: &lun_not_existed _properties: wwn: session: &session _properties: existed: false pool: &migrate_pool _methods: create_lun: *src_lun_1 with_no_poll: _context vnx: _methods: get_lun: _side_effect: [*lun_migrate_1, *src_lun_1, *src_lun_1, *lun_not_existed] get_migration_session: *session get_pool: *migrate_pool test_migrate_volume_host_assisted: vnx: _methods: test_create_cloned_volume: snap: &snap_for_clone _methods: delete: thick_base_lun: &test_create_cloned_volume_thick_base_lun _properties: is_thin_lun: fase smp: &smp_migrate _properties: <<: *lun_base_prop lun_id: 4 wwn: 'src_wwn' poll: false is_thin_lun: false total_capacity_gb: 10 primary_lun: *test_create_cloned_volume_thick_base_lun _methods: migrate: update: with_no_poll: _context lun2: &lun_migrate_2 _properties: lun_id: 5 wwn: 'dst_wwn' _methods: cancel_migrate: create_snap: create_mount_point: attach_snap: lun3: &lun_not_existed_2 _properties: wwn: session: &session_2 _properties: existed: false pool: &migrate_pool_2 _methods: create_lun: *smp_migrate with_no_poll: _context vnx: _properties: serial: fake_serial _methods: get_lun: _side_effect: [*lun_migrate_2, *lun_migrate_2, *smp_migrate, *lun_migrate_2, *smp_migrate, *lun_not_existed_2, *smp_migrate, *smp_migrate, *lun_not_existed_2] get_migration_session: *session_2 get_pool: *migrate_pool_2 get_snap: *snap_for_clone test_create_cloned_volume_snapcopy: lun: &lun_for_snapcopy _methods: create_mount_point: create_snap: smp: &smp_for_snapcopy _properties: lun_id: 11 _methods: attach_snap: vnx: _properties: serial: fake_serial _methods: get_lun: _side_effect: [*lun_for_snapcopy, *lun_for_snapcopy, *smp_for_snapcopy, *smp_for_snapcopy] get_pool: *pool_base test_create_volume_from_snapshot: lun: &lun_from_snapshot _properties: lun_id: 16 _methods: create_mount_point: smp: &smp_from_lun _properties: is_thin_lun: false total_capacity_gb: 10 primary_lun: *test_create_cloned_volume_thick_base_lun _methods: attach_snap: vnx: _properties: serial: fake_serial _methods: get_lun: _side_effect: [*lun_from_snapshot, *lun_from_snapshot, *smp_from_lun, *smp_from_lun, *dst_lun, *src_lun_1, *src_lun_1, *lun_not_existed] get_pool: *pool_test_create_lun get_migration_session: *session test_create_volume_from_snapshot_snapcopy: snap: &snap_for_snapcopy _methods: copy: modify: vnx: _properties: serial: fake_serial _methods: get_snap: _side_effect: [*snap_for_snapcopy, *snap_for_snapcopy] get_lun: _side_effect: [*lun_from_snapshot, *lun_from_snapshot, *smp_from_lun] test_parse_pools: &test_parse_pools pool1: &pool_test_parse_pools_1 _properties: <<: *pool_base_prop name: 'pool5' pool2: &pool_test_parse_pools_2 _properties: <<: *pool_base_prop name: 'pool6' vnx: _properties: <<: *vnx_base_prop _methods: get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2] test_parse_pools_one_invalid_pool: *test_parse_pools test_parse_pools_all_invalid_pools: *test_parse_pools test_get_enabler_stats: &test_get_enabler_stats vnx: &vnx_test_get_enabler_stats _properties: <<: *vnx_base_prop _methods: is_compression_enabled: True is_dedup_enabled: True is_fast_cache_enabled: True is_thin_enabled: True is_snap_enabled: True is_auto_tiering_enabled: True test_get_pool_stats: pool_feature: &pool_feature_test_get_pool_stats _properties: <<: *pool_feature_base_prop vnx: _properties: <<: *vnx_base_prop _methods: get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2] get_pool_feature: *pool_feature_test_get_pool_stats is_auto_tiering_enabled: True test_get_pool_stats_max_luns_reached: pool_feature: &pool_feature_test_get_pool_stats_max_luns_reached _properties: <<: *pool_feature_base_prop total_pool_luns: 3001 vnx: _properties: <<: *vnx_base_prop _methods: get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2] get_pool_feature: *pool_feature_test_get_pool_stats_max_luns_reached is_auto_tiering_enabled: True test_get_pool_stats_with_reserved: vnx: _properties: <<: *vnx_base_prop _methods: get_pool: [*pool_test_parse_pools_1, *pool_test_parse_pools_2] get_pool_feature: *pool_feature_test_get_pool_stats is_auto_tiering_enabled: True test_get_pool_stats_offline: pool1: &pool_test_get_pool_stats_offline_1 _properties: <<: *pool_base_prop name: 'pool7' state: 'Offline' pool2: &pool_test_get_pool_stats_offline_2 _properties: <<: *pool_base_prop name: 'pool8' state: 'Offline' vnx: _properties: <<: *vnx_base_prop _methods: get_pool: [*pool_test_get_pool_stats_offline_1, *pool_test_get_pool_stats_offline_2] get_pool_feature: *pool_feature_test_get_pool_stats is_compression_enabled: False is_dedup_enabled: True is_fast_cache_enabled: False is_thin_enabled: True is_snap_enabled: False is_auto_tiering_enabled: True test_update_volume_stats: *test_get_enabler_stats test_append_volume_stats: vnx: _properties: serial: fake_serial test_delete_volume_not_force: *test_delete_lun test_delete_volume_force: *test_delete_lun test_delete_async_volume: snap: &snap_test_delete_async_volume _methods: delete: vnx: _methods: get_lun: *lun_test_delete_lun get_snap: *snap_test_delete_async_volume test_delete_async_volume_migrating: lun: &lun_used_by_feature _properties: is_snap_mount_point: false _methods: cancel_migrate: delete: _raise: VNXLunUsedByFeatureError: vnx: _methods: get_lun: *lun_used_by_feature get_snap: *snap_test_delete_async_volume test_delete_async_volume_not_from_snapshot: vnx: _methods: get_lun: *lun_test_delete_lun test_delete_async_volume_from_snapshot: snap: &snap_test_delete_async_volume_from_snapshot _methods: delete: vnx: _methods: get_lun: *lun_test_delete_lun get_snap: *snap_test_delete_async_volume_from_snapshot test_enable_compression: lun: _properties: <<: *lun_base_prop _methods: enable_compression: test_enable_compression_on_compressed_lun: lun: _properties: <<: *lun_base_prop _methods: enable_compression: _raise: VNXCompressionAlreadyEnabledError: test_lun_has_snapshot_false: lun: _properties: <<: *lun_base_prop _methods: get_snap: [] test_lun_has_snapshot_true: lun: _properties: <<: *lun_base_prop _methods: get_snap: ['fake_snap'] test_get_vnx_enabler_status: vnx: _methods: is_dedup_enabled: True is_compression_enabled: False is_thin_enabled: True is_fast_cache_enabled: True is_auto_tiering_enabled: False is_snap_enabled: True test_retype_type_invalid: vnx: _methods: is_dedup_enabled: True is_compression_enabled: True is_thin_enabled: True is_fast_cache_enabled: True is_auto_tiering_enabled: True is_snap_enabled: True test_retype_need_migration: lun: &lun_retype_need_migration _properties: <<: *lun_base_prop _methods: get_snap: [] with_no_poll: _context update: vnx: _methods: get_lun: _side_effect: [*lun_retype_need_migration] test_retype_turn_on_compression_change_tier: lun: &lun_retype_turn_on_compression_change_tier _properties: <<: *lun_base_prop provision: _enum: VNXProvisionEnum: 'thin' tier: _enum: VNXTieringEnum: 'auto' _methods: enable_compression: get_snap: [] with_no_poll: _context update: vnx: _methods: get_lun: *lun_retype_turn_on_compression_change_tier test_retype_lun_has_snap: lun: &lun_retype_lun_has_snap _properties: <<: *lun_base_prop provision: _enum: VNXProvisionEnum: 'thick' tier: _enum: VNXTieringEnum: 'auto' _methods: get_snap: ['fake_snap'] with_no_poll: _context update: vnx: _methods: get_lun: *lun_retype_lun_has_snap test_retype_change_tier: lun: &lun_retype_change_tier _properties: <<: *lun_base_prop provision: _enum: VNXProvisionEnum: 'thick' tier: _enum: VNXTieringEnum: 'nomovement' _methods: with_no_poll: _context update: vnx: _methods: get_lun: *lun_retype_change_tier test_create_consistencygroup: *test_create_cg test_delete_consistencygroup: vnx: _methods: get_cg: *cg_for_deletion test_delete_consistencygroup_with_volume: vnx: _methods: get_cg: *cg_for_deletion get_lun: *lun_test_delete_lun test_delete_consistencygroup_error: cg: &cg_delete_error _methods: delete: _raise: VNXConsistencyGroupError: Unable to delete cg vnx: _methods: get_cg: *cg_delete_error test_delete_consistencygroup_volume_error: vnx: _methods: get_cg: *cg_for_deletion get_lun: *lun_test_delete_lun_exception test_extend_volume: *test_expand_lun test_create_snapshot_adapter: *test_create_snapshot test_delete_snapshot_adapter: *test_delete_snapshot test_restore_snapshot_adapter: *test_restore_snapshot test_do_create_cgsnap: *test_create_cg_snapshot test_do_delete_cgsnap: cg_snap: &cg_snap_delete _methods: delete: vnx: _methods: get_snap: *cg_snap_delete test_do_create_cg_from_cgsnap: snap: &copied_cg_snap _methods: copy: modify: smp: &smp_from_src_lun _properties: lun_id: 12 _methods: attach_snap: lun: &src_lun_in_cg _methods: create_mount_point: *smp_from_src_lun lun2: &new_lun _properties: poll: false wwn: 'new_wwn' vnx: _properties: _methods: get_snap: _side_effect: [*copied_cg_snap, *copied_cg_snap, *snapshot_test_delete_snapshot] get_lun: _side_effect: [*src_lun_in_cg, *smp_from_src_lun, *smp_from_src_lun, *new_lun, *lun_migrate, *src_lun, *new_lun] get_pool: *pool_test_create_lun get_migration_session: *session_verify create_cg: *cg_for_create test_do_clone_cg: vnx: _properties: _methods: get_cg: *cg_test_create_cg_snapshot get_snap: *snapshot_test_delete_snapshot get_lun: _side_effect: [*src_lun_in_cg, *smp_from_src_lun, *smp_from_src_lun, *new_lun, *lun_migrate, *src_lun, *new_lun] get_pool: *pool_test_create_lun get_migration_session: *session_verify create_cg: *cg_for_create test_validate_ports_iscsi: &test_validate_ports_iscsi iscsi_port_a-0-0: *iscsi_port_a-0-0 vnx: <<: *vnx_base _methods: get_iscsi_port: *all_iscsi_ports test_validate_ports_iscsi_invalid: *test_validate_ports_iscsi test_validate_ports_iscsi_not_exist: *test_validate_ports_iscsi test_validate_ports_fc: &test_validate_ports_fc fc_port_a-1: *fc_port_a-1 vnx: <<: *vnx_base _methods: get_fc_port: *all_fc_ports test_validate_ports_fc_invalid: *test_validate_ports_fc test_validate_ports_fc_not_exist: *test_validate_ports_fc test_manage_existing_lun_no_exist: lun: &lun_manage_lun_not_exist _properties: existed: False vnx: _methods: get_lun: *lun_manage_lun_not_exist test_manage_existing_invalid_ref: lun: *lun_manage_lun_not_exist test_manage_existing_invalid_pool: lun: &lun_manage_in_other_pool _properties: existed: True pool_name: 'unmanaged_pool' vnx: _methods: get_lun: *lun_manage_in_other_pool test_manage_existing_get_size: lun: &lun_manage_get_size _properties: existed: True pool_name: 'unit_test_pool' total_capacity_gb: 5 vnx: _methods: get_lun: *lun_manage_get_size test_manage_existing_type_mismatch: lun: &lun_manage_type_mismatch _properties: existed: True pool_name: 'unit_test_pool' provision: _enum: VNXProvisionEnum: 'thick' tier: _enum: VNXTieringEnum: 'highestavailable' total_capacity_gb: 5 vnx: _methods: get_lun: *lun_manage_type_mismatch test_manage_existing: lun: &lun_manage_existing _properties: &lun_manage_existing_props lun_id: 1 existed: True pool_name: 'unit_test_pool' provision: _enum: VNXProvisionEnum: 'deduplicated' tier: _enum: VNXTieringEnum: 'auto' total_capacity_gb: 5 primary_lun: 'N/A' is_snap_mount_point: False _methods: rename: test_manage_existing_smp: lun: &manage_existing_smp _properties: lun_id: 2 existed: True pool_name: 'unit_test_pool' primary_lun: 'src_lun' is_snap_mount_point: True _methods: rename: vnx: _methods: get_lun: *manage_existing_smp test_assure_storage_group: sg: &sg_test_assure_storage_group _properties: <<: *sg_base_prop existed: True _methods: update: with_poll: _context vnx: <<: *vnx_base _methods: get_sg: *sg_test_assure_storage_group test_assure_storage_group_create_new: sg: &sg_test_assure_storage_group_create_new _properties: <<: *sg_base_prop existed: False _methods: update: with_poll: _context vnx: <<: *vnx_base _methods: get_sg: *sg_test_assure_storage_group_create_new create_sg: *sg_test_assure_storage_group_create_new test_assure_host_access: sg: &sg_test_assure_host_access <<: *sg_base _methods: update: with_poll: _context lun: &lun_test_assure_host_access <<: *lun_base vnx: <<: *vnx_base _methods: get_lun: *lun_test_assure_host_access test_assure_host_access_without_auto_register_new_sg: &test_assure_host_access_without_auto_register_new_sg sg: &sg_test_assure_host_access_without_auto_register_new_sg <<: *sg_base _methods: update: connect_host: with_poll: _context lun: &lun_test_assure_host_access_without_auto_register_new_sg <<: *lun_base vnx: <<: *vnx_base _methods: get_lun: *lun_test_assure_host_access_without_auto_register_new_sg test_assure_host_access_without_auto_register: *test_assure_host_access_without_auto_register_new_sg test_auto_register_initiator: &test_auto_register_initiator allowed_ports: *all_iscsi_ports reg_ports: [*iscsi_port_a-0-0] sg: &sg_auto_register_initiator _properties: <<: *sg_base_prop initiator_uid_list: ['iqn-reg-1', 'iqn-reg-2'] _methods: get_ports: [*iscsi_port_a-0-0] vnx: <<: *vnx_base test_auto_register_initiator_no_white_list: *test_auto_register_initiator test_auto_register_initiator_no_port_to_reg: allowed_ports: [*iscsi_port_a-0-0] reg_ports: [*iscsi_port_a-0-0] sg: _properties: <<: *sg_base_prop initiator_uid_list: ['iqn-reg-1', 'iqn-reg-2'] _methods: get_ports: [*iscsi_port_a-0-0] vnx: <<: *vnx_base test_build_provider_location: vnx: _properties: serial: 'vnx-serial' test_terminate_connection: sg: &sg_terminate_connection _properties: existed: True vnx: _methods: get_sg: *sg_terminate_connection test_terminate_connection_force_detach: sg: &sg_terminate_connection_force_detach _properties: existed: True sgs: &sgs_terminate_connection_force_detach _methods: shadow_copy: [*sg_terminate_connection_force_detach] vnx: _methods: get_sg: *sgs_terminate_connection_force_detach test_remove_host_access: sg: &sg_remove_host_access _properties: existed: True _methods: detach_alu: vnx: _methods: get_sg: *sg_remove_host_access get_lun: *lun_base test_set_extra_spec_defaults: vnx: <<: *vnx_base_prop _methods: is_auto_tiering_enabled: True test_remove_host_access_sg_absent: sg: &sg_remove_host_access_sg_absent _properties: existed: False vnx: _methods: get_sg: *sg_remove_host_access_sg_absent get_lun: *lun_base test_setup_lun_replication: vnx: _properties: serial: 'vnx-serial' lun: _properties: lun_id: 222 wwn: fake_wwn test_setup_lun_replication_in_group: group: &group_for_enable _methods: add_mirror: vnx: _properties: serial: 'vnx-serial' _methods: get_mirror_view: *mirror_base get_mirror_group: *group_for_enable lun: _properties: lun_id: 222 wwn: fake_wwn test_cleanup_replication: vnx: _properties: serial: 'vnx-serial' _methods: is_mirror_view_sync_enabled: True test_build_mirror_view: vnx: _properties: serial: 'vnx-serial' _methods: is_mirror_view_sync_enabled: True test_build_mirror_view_no_device: vnx: _properties: serial: 'vnx-serial' test_build_mirror_view_2_device: vnx: _properties: serial: 'vnx-serial' _methods: is_mirror_view_sync_enabled: True test_build_mirror_view_no_enabler: vnx: _properties: serial: 'vnx-serial' _methods: is_mirror_view_sync_enabled: False test_build_mirror_view_failover_false: vnx: _properties: serial: 'vnx-serial' _methods: is_mirror_view_sync_enabled: True test_failover_host: lun1: _properties: lun_id: 11 test_failover_host_invalid_backend_id: test_failover_host_failback: lun1: _properties: lun_id: 22 test_failover_host_groups: lun1: _properties: lun_id: 22 test_get_pool_name: lun: &lun_test_get_pool_name _properties: <<: *lun_base_prop pool_name: pool_1 _methods: with_no_poll: _context update: vnx: _methods: get_lun: *lun_test_get_pool_name test_normalize_config_naviseccli_path: test_normalize_config_queue_path: test_normalize_config_naviseccli_path_none: test_normalize_config_pool_names: test_normalize_config_pool_names_none: test_normalize_config_pool_names_empty_list: test_normalize_config_io_port_list: test_normalize_config_io_port_list_none: test_normalize_config_io_port_list_empty_list: ########################################################### # TestISCSIAdapter ########################################################### test_parse_ports_iscsi: &test_parse_ports_iscsi connection_port: &port_test_parse_ports_iscsi _properties: existed: False vnx: _methods: get_sg: *sg_remove_host_access_sg_absent get_lun: *lun_base test_remove_host_access_volume_not_in_sg: sg: &remove_host_access_volume_not_in_sg _properties: *sg_base_prop _methods: detach_alu: _raises: VNXDetachAluNotFoundError vnx: _methods: get_sg: *remove_host_access_volume_not_in_sg get_lun: *lun_base test_terminate_connection_cleanup_remove_sg: sg: _properties: *sg_base_prop _methods: delete: disconnect_host: get_alu_hlu_map: {} update: with_poll: _context test_terminate_connection_cleanup_sg_absent: sg: _properties: existed: False test_terminate_connection_cleanup_deregister: sg: _properties: *sg_base_prop _methods: delete: disconnect_host: get_alu_hlu_map: {} update: with_poll: _context vnx: _methods: delete_hba: test_terminate_connection_cleanup_sg_is_not_empty: sg: _properties: *sg_base_prop _methods: get_alu_hlu_map: {'1': '1'} test_update_consistencygroup: test_do_update_cg: test_update_migrated_volume: test_update_migrated_volume_smp: test_normalize_config_iscsi_initiators: test_normalize_config_iscsi_initiators_none: test_normalize_config_iscsi_initiators_empty_str: test_normalize_config_iscsi_initiators_not_dict: test_create_group_snap: test_create_cgsnapshot: test_create_cloned_cg: test_create_cloned_group: test_create_cg_from_cgsnapshot: test_create_group_from_group_snapshot: test_create_group_snapshot: test_delete_group_snapshot: test_delete_cgsnapshot: ########################################################### # TestISCSIAdapter ########################################################### test_update_volume_stats_iscsi: vnx: _properties: <<: *vnx_base_prop _methods: get_iscsi_port: *all_iscsi_ports test_build_terminate_connection_return_data_iscsi: ########################################################### # TestFCAdapter ########################################################### test_build_terminate_connection_return_data_without_autozone: test_build_terminate_connection_return_data_sg_absent: sg: _properties: <<: *sg_base_prop existed: False test_build_terminate_connection_return_data_auto_zone: sg: _properties: <<: *sg_base_prop name: 'fake_host' fc_ports: - *fc_port_a-1 _methods: get_alu_hlu_map: {} vnx: _methods: get_fc_port: *all_fc_ports test_mock_vnx_objects_foo: *test_create_lun test_get_tgt_list_and_initiator_tgt_map_allow_port_only: sg: _properties: <<: *sg_base_prop fc_ports: - *fc_port_a-1 - <<: *fc_port_base _properties: <<: *fc_port_base_prop sp: *spb port_id: 1 wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:B1' - *fc_port_b-2 adapter: _properties: allowed_ports: - <<: *fc_port_base _properties: <<: *fc_port_base_prop sp: *spb port_id: 1 wwn: '50:06:01:60:B6:E0:1C:F4:50:06:01:66:36:E0:1C:B1' - *fc_port_b-2 vnx: _methods: get_fc_port: *all_fc_ports ########################################################## # TestReplicationAdapter ########################################################## test_enable_replication: vnx: _methods: create_mirror_group: *mirror_group_base get_mirror_view: *mirror_base get_mirror_group: *group_for_enable test_disable_replication: group: &group_for_disable _methods: remove_mirror: delete: vnx: _methods: get_mirror_view: *mirror_base get_mirror_group: *group_for_disable test_failover_replication: lun1: *lun_base ########################################################## # TestTaskflow ########################################################## test_copy_snapshot_task: vnx: _methods: get_snap: *snap_copy test_copy_snapshot_task_revert: snap: &snap_copy_error _methods: copy: _raise: VNXSnapError: Unable to copy snap delete: vnx: _methods: get_snap: *snap_copy_error test_create_smp_task: smp: &smp _properties: lun_id: 15 lun: &lun_create_smp _methods: create_mount_point: *smp vnx: _methods: get_lun: _side_effect: [*lun_create_smp, *smp] test_create_smp_task_revert: lun: &lun_create_smp_error _methods: create_mount_point: _raise: VNXCreateLunError: 'Unable to create mount point' delete: _properties: is_snap_mount_point: False vnx: _methods: get_lun: *lun_create_smp_error test_attach_snap_task: vnx: _methods: get_lun: *lun_attach_snap test_attach_snap_task_revert: lun: &lun_attach_snap_error _methods: attach_snap: _raise: VNXAttachSnapError: 'Unable to attach snapshot' detach_snap: vnx: _methods: get_lun: *lun_attach_snap_error test_create_snapshot_task: lun: &lun_snap _methods: create_snap: vnx: _methods: get_lun: *lun_snap test_create_snapshot_task_revert: snap: &snap_delete _methods: delete: lun: &lun_snap_error _methods: create_snap: _raise: VNXCreateSnapError: 'Unable to create snap' vnx: _methods: get_lun: *lun_snap_error get_snap: *snap_delete test_allow_read_write_task: vnx: _methods: get_snap: *snap_modify test_allow_read_write_task_revert: snap: &snap_modify_error _methods: modify: _raise: VNXSnapError: Unable to modify snap vnx: _methods: get_snap: *snap_modify_error test_wait_migrations_task: vnx: test_create_consistency_group_task: vnx: test_create_consistency_group_task_revert: vnx: test_create_cg_snapshot_task: *test_create_cg_snapshot test_create_cg_snapshot_task_revert: cg: &create_cg_snapshot_error _methods: create_snap: _raise: VNXCreateSnapError: 'Create failed' snap: &snap_create_cg_revert _methods: delete: vnx: _methods: get_cg: *create_cg_snapshot_error get_snap: *snap_create_cg_revert test_extend_smp_task: thin_base_lun: &test_extend_smp_task_thin_base_lun _properties: is_thin_lun: true lun: &lun_test_extend_smp_task _properties: <<: *lun_base_prop name: lun_test_extend_smp_task is_thin_lun: true total_capacity_gb: 10 primary_lun: *test_extend_smp_task_thin_base_lun new_lun: &new_lun_test_extend_smp_task _properties: <<: *lun_base_prop name: new_lun_test_extend_smp_task is_thin_lun: true total_capacity_gb: 100 _methods: expand: with_no_poll: _context update: vnx: _methods: get_lun: _side_effect: [*lun_test_extend_smp_task, *new_lun_test_extend_smp_task] test_extend_smp_task_skip_small_size: lun: &lun_test_extend_smp_task_skip_small_size _properties: <<: *lun_base_prop name: lun_test_extend_smp_task_skip_small_size is_thin_lun: true total_capacity_gb: 1 primary_lun: *test_extend_smp_task_thin_base_lun vnx: _methods: get_lun: *lun_test_extend_smp_task_skip_small_size test_extend_smp_task_skip_thick: &test_extend_smp_task_skip_thick thick_base_lun: &test_extend_smp_task_thick_base_lun _properties: is_thin_lun: false lun: &lun_test_extend_smp_task_skip_thick _properties: <<: *lun_base_prop name: lun_test_extend_smp_task_skip_thick is_thin_lun: false total_capacity_gb: 10 primary_lun: *test_extend_smp_task_thick_base_lun vnx: _methods: get_lun: *lun_test_extend_smp_task_skip_thick ########################################################### # TestExtraSpecs ########################################################### test_generate_extra_specs_from_lun: lun: _properties: provision: _enum: VNXProvisionEnum: 'compressed' tier: _enum: VNXTieringEnum: 'highestavailable' deduped_lun: _properties: provision: _enum: VNXProvisionEnum: 'deduplicated' tier: _enum: VNXTieringEnum: 'auto' test_extra_specs_match_with_lun: lun: _properties: provision: _enum: VNXProvisionEnum: 'thin' tier: _enum: VNXTieringEnum: 'nomovement' deduped_lun: _properties: provision: _enum: VNXProvisionEnum: 'deduplicated' tier: _enum: VNXTieringEnum: 'nomovement' test_extra_specs_not_match_with_lun: lun: _properties: provision: _enum: VNXProvisionEnum: 'thick' tier: _enum: VNXTieringEnum: 'lowestavailable' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/res_mock.py0000664000175000017500000003707000000000000026203 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from unittest import mock from cinder import context from cinder.tests.unit.consistencygroup import fake_cgsnapshot from cinder.tests.unit.consistencygroup import fake_consistencygroup from cinder.tests.unit import fake_constants from cinder.tests.unit import fake_group from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception as \ lib_ex from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops as \ storops from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils from cinder.volume.drivers.dell_emc.vnx import adapter from cinder.volume.drivers.dell_emc.vnx import client from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.dell_emc.vnx import driver from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils SYMBOL_TYPE = '_type' SYMBOL_PROPERTIES = '_properties' SYMBOL_METHODS = '_methods' SYMBOL_SIDE_EFFECT = '_side_effect' SYMBOL_RAISE = '_raise' SYMBOL_CONTEXT = '_context' UUID = '_uuid' SYMBOL_ENUM = '_enum' def _is_driver_object(obj_body): return isinstance(obj_body, dict) and SYMBOL_PROPERTIES in obj_body class DriverResourceMock(dict): fake_func_mapping = {} def __init__(self, yaml_file): yaml_dict = utils.load_yaml(yaml_file) if not isinstance(yaml_dict, dict): return for case_name, case_res in yaml_dict.items(): if not isinstance(case_res, dict): continue self[case_name] = {} for obj_name, obj_body in case_res.items(): self[case_name][obj_name] = self._parse_driver_object(obj_body) def _parse_driver_object(self, obj_body): if isinstance(obj_body, dict): obj_body = {k: self._parse_driver_object(v) for k, v in obj_body.items()} if _is_driver_object(obj_body): return self._create_object(obj_body) else: return obj_body elif isinstance(obj_body, list): return map(self._parse_driver_object, obj_body) else: return obj_body def _create_object(self, obj_body): props = obj_body[SYMBOL_PROPERTIES] for prop_name, prop_value in props.items(): if isinstance(prop_value, dict) and prop_value: # get the first key as the convert function func_name = list(prop_value.keys())[0] if func_name.startswith('_'): func = getattr(self, func_name) props[prop_name] = func(prop_value[func_name]) if (SYMBOL_TYPE in obj_body and obj_body[SYMBOL_TYPE] in self.fake_func_mapping): return self.fake_func_mapping[obj_body[SYMBOL_TYPE]](**props) else: return props @staticmethod def _uuid(uuid_key): uuid_key = uuid_key.upper() return getattr(fake_constants, uuid_key) def _fake_volume_wrapper(*args, **kwargs): expected_attrs_key = {'volume_attachment': 'volume_attachment', 'volume_metadata': 'metadata'} if 'group' in kwargs: expected_attrs_key['group'] = kwargs['group'] return fake_volume.fake_volume_obj( context.get_admin_context(), expected_attrs=[ v for (k, v) in expected_attrs_key.items() if k in kwargs], **kwargs) def _fake_cg_wrapper(*args, **kwargs): return fake_consistencygroup.fake_consistencyobject_obj( 'fake_context', **kwargs) def _fake_snapshot_wrapper(*args, **kwargs): return fake_snapshot.fake_snapshot_obj('fake_context', expected_attrs=( ['volume'] if 'volume' in kwargs else None), **kwargs) def _fake_cg_snapshot_wrapper(*args, **kwargs): return fake_cgsnapshot.fake_cgsnapshot_obj(None, **kwargs) def _fake_group_wrapper(*args, **kwargs): return fake_group.fake_group_obj(None, **kwargs) class EnumBuilder(object): def __init__(self, enum_dict): enum_dict = enum_dict[SYMBOL_ENUM] for k, v in enum_dict.items(): self.klazz = k self.value = v def __call__(self, *args, **kwargs): return getattr(storops, self.klazz).parse(self.value) class CinderResourceMock(DriverResourceMock): # fake_func in the mapping should be like func(*args, **kwargs) fake_func_mapping = {'volume': _fake_volume_wrapper, 'cg': _fake_cg_wrapper, 'snapshot': _fake_snapshot_wrapper, 'cg_snapshot': _fake_cg_snapshot_wrapper, 'group': _fake_group_wrapper} def __init__(self, yaml_file): super(CinderResourceMock, self).__init__(yaml_file) @staticmethod def _build_provider_location(props): return vnx_utils.build_provider_location( props.get('system'), props.get('type'), str(props.get('id')), str(props.get('base_lun_name')), props.get('version')) class ContextMock(object): """Mocks the return value of a context function.""" def __enter__(self): pass def __exit__(self, exc_type, exc_valu, exc_tb): pass class MockBase(object): """Base object of all the Mocks. This mock convert the dict to object when the '_type' is included in the dict """ def _is_mock_object(self, yaml_info): return (isinstance(yaml_info, dict) and (SYMBOL_PROPERTIES in yaml_info or SYMBOL_METHODS in yaml_info)) def _is_object_with_type(self, yaml_dict): return isinstance(yaml_dict, dict) and SYMBOL_TYPE in yaml_dict def _is_object_with_enum(self, yaml_dict): return isinstance(yaml_dict, dict) and SYMBOL_ENUM in yaml_dict def _build_mock_object(self, yaml_dict): if self._is_object_with_type(yaml_dict): return FakePort(yaml_dict) elif self._is_object_with_enum(yaml_dict): return EnumBuilder(yaml_dict)() elif self._is_mock_object(yaml_dict): return StorageObjectMock(yaml_dict) elif isinstance(yaml_dict, dict): return {k: self._build_mock_object(v) for k, v in yaml_dict.items()} elif isinstance(yaml_dict, list): return [self._build_mock_object(each) for each in yaml_dict] else: return yaml_dict class StorageObjectMock(object): PROPS = 'props' def __init__(self, yaml_dict): self.__dict__[StorageObjectMock.PROPS] = {} props = yaml_dict.get(SYMBOL_PROPERTIES, None) if props: for k, v in props.items(): setattr(self, k, StoragePropertyMock(k, v)()) methods = yaml_dict.get(SYMBOL_METHODS, None) if methods: for k, v in methods.items(): setattr(self, k, StorageMethodMock(k, v)) def __setattr__(self, key, value): self.__dict__[StorageObjectMock.PROPS][key] = value def __getattr__(self, item): try: super(StorageObjectMock, self).__getattr__(item) except AttributeError: return self.__dict__[StorageObjectMock.PROPS][item] except KeyError: raise KeyError('%(item)s not exist in mock object.' ) % {'item': item} class FakePort(StorageObjectMock): def __eq__(self, other): o_sp = other.sp o_port_id = other.port_id o_vport_id = other.vport_id ret = True ret &= self.sp == o_sp ret &= self.port_id == o_port_id ret &= self.vport_id == o_vport_id return ret def __hash__(self): return hash((self.sp, self.port_id, self.vport_id)) class StoragePropertyMock(mock.PropertyMock, MockBase): def __init__(self, name, property_body): return_value = property_body side_effect = None # only support return_value and side_effect for property if (isinstance(property_body, dict) and SYMBOL_SIDE_EFFECT in property_body): side_effect = self._build_mock_object( property_body[SYMBOL_SIDE_EFFECT]) return_value = None if side_effect is not None: super(StoragePropertyMock, self).__init__( name=name, side_effect=side_effect) else: return_value = self._build_mock_object(return_value) super(StoragePropertyMock, self).__init__( name=name, return_value=return_value) class StorageMethodMock(mock.Mock, MockBase): def __init__(self, name, method_body): return_value = method_body exception = None side_effect = None # support return_value, side_effect and exception for method if isinstance(method_body, dict): if (SYMBOL_SIDE_EFFECT in method_body or SYMBOL_RAISE in method_body): exception = method_body.get(SYMBOL_RAISE, None) side_effect = method_body.get(SYMBOL_SIDE_EFFECT, None) return_value = None if exception is not None: ex = None if isinstance(exception, dict) and exception: ex_name = list(exception.keys())[0] ex_tmp = [getattr(ex_module, ex_name, None) for ex_module in [lib_ex, common]] try: ex = [each for each in ex_tmp if each is not None][0] super(StorageMethodMock, self).__init__( name=name, side_effect=ex(exception[ex_name])) except IndexError: raise KeyError('Exception %(ex_name)s not found.' % {'ex_name': ex_name}) else: raise KeyError('Invalid Exception body, should be a dict.') elif side_effect is not None: super(StorageMethodMock, self).__init__( name=name, side_effect=self._build_mock_object(side_effect)) elif return_value is not None: super(StorageMethodMock, self).__init__( name=name, return_value=(ContextMock() if return_value == SYMBOL_CONTEXT else self._build_mock_object(return_value))) else: super(StorageMethodMock, self).__init__( name=name, return_value=None) class StorageResourceMock(dict, MockBase): def __init__(self, yaml_file): yaml_dict = utils.load_yaml(yaml_file) if not isinstance(yaml_dict, dict): return for section, sec_body in yaml_dict.items(): if isinstance(sec_body, dict): self[section] = {obj_name: self._build_mock_object(obj_body) for obj_name, obj_body in sec_body.items()} else: self[section] = {} cinder_res = CinderResourceMock('mocked_cinder.yaml') DRIVER_RES_MAPPING = { 'TestResMock': cinder_res, 'TestCommonAdapter': cinder_res, 'TestReplicationAdapter': cinder_res, 'TestISCSIAdapter': cinder_res, 'TestFCAdapter': cinder_res, 'TestUtils': cinder_res, 'TestClient': cinder_res } def mock_driver_input(func): @functools.wraps(func) def decorated(cls, *args, **kwargs): return func(cls, DRIVER_RES_MAPPING[cls.__class__.__name__][func.__name__], *args, **kwargs) return decorated vnx_res = StorageResourceMock('mocked_vnx.yaml') STORAGE_RES_MAPPING = { 'TestResMock': StorageResourceMock('test_res_mock.yaml'), 'TestCondition': vnx_res, 'TestClient': vnx_res, 'TestCommonAdapter': vnx_res, 'TestReplicationAdapter': vnx_res, 'TestISCSIAdapter': vnx_res, 'TestFCAdapter': vnx_res, 'TestTaskflow': vnx_res, 'TestExtraSpecs': vnx_res, } DEFAULT_STORAGE_RES = 'vnx' def _build_client(): return client.Client(ip='192.168.1.2', username='sysadmin', password='sysadmin', scope='global', naviseccli=None, sec_file=None, queue_path='vnx-cinder') def patch_client(func): @functools.wraps(func) def decorated(cls, *args, **kwargs): storage_res = ( STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) with utils.patch_vnxsystem as patched_vnx: if DEFAULT_STORAGE_RES in storage_res: patched_vnx.return_value = storage_res[DEFAULT_STORAGE_RES] client = _build_client() return func(cls, client, storage_res, *args, **kwargs) return decorated PROTOCOL_COMMON = 'Common' PROTOCOL_MAPPING = { PROTOCOL_COMMON: adapter.CommonAdapter, common.PROTOCOL_ISCSI: adapter.ISCSIAdapter, common.PROTOCOL_FC: adapter.FCAdapter } def patch_adapter_init(protocol): def inner_patch_adapter(func): @functools.wraps(func) def decorated(cls, *args, **kwargs): storage_res = ( STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) with utils.patch_vnxsystem as patched_vnx: if DEFAULT_STORAGE_RES in storage_res: patched_vnx.return_value = storage_res[DEFAULT_STORAGE_RES] adapter = PROTOCOL_MAPPING[protocol](cls.configuration) return func(cls, adapter, storage_res, *args, **kwargs) return decorated return inner_patch_adapter def _patch_adapter_prop(adapter, client): try: adapter.serial_number = client.get_serial() except KeyError: adapter.serial_number = 'faked_serial_number' adapter.VERSION = driver.VNXDriver.VERSION def patch_adapter(protocol): def inner_patch_adapter(func): @functools.wraps(func) def decorated(cls, *args, **kwargs): storage_res = ( STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) with utils.patch_vnxsystem: client = _build_client() adapter = PROTOCOL_MAPPING[protocol](cls.configuration, None) if DEFAULT_STORAGE_RES in storage_res: client.vnx = storage_res[DEFAULT_STORAGE_RES] adapter.client = client _patch_adapter_prop(adapter, client) return func(cls, adapter, storage_res, *args, **kwargs) return decorated return inner_patch_adapter patch_common_adapter = patch_adapter(PROTOCOL_COMMON) patch_iscsi_adapter = patch_adapter(common.PROTOCOL_ISCSI) patch_fc_adapter = patch_adapter(common.PROTOCOL_FC) def mock_storage_resources(func): @functools.wraps(func) def decorated(cls, *args, **kwargs): storage_res = ( STORAGE_RES_MAPPING[cls.__class__.__name__][func.__name__]) return func(cls, storage_res, *args, **kwargs) return decorated ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_adapter.py0000664000175000017500000022727500000000000027070 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re from unittest import mock from oslo_config import cfg from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_constants from cinder.tests.unit import utils as test_utils from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \ as storops_ex from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \ as storops from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock from cinder.tests.unit.volume.drivers.dell_emc.vnx import test_base from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils from cinder.volume.drivers.dell_emc.vnx import adapter from cinder.volume.drivers.dell_emc.vnx import client from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils class TestCommonAdapter(test_base.TestCase): def setUp(self): super(TestCommonAdapter, self).setUp() vnx_utils.init_ops(self.configuration) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_volume(self, vnx_common, _ignore, mocked_input): volume = mocked_input['volume'] with mock.patch.object(vnx_utils, 'get_backend_qos_specs', return_value=None): model_update = vnx_common.create_volume(volume) self.assertEqual('False', model_update.get('metadata')['snapcopy']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_volume_error(self, vnx_common, _ignore, mocked_input): def inner(): with mock.patch.object(vnx_utils, 'get_backend_qos_specs', return_value=None): vnx_common.create_volume(mocked_input['volume']) self.assertRaises(storops_ex.VNXCreateLunError, inner) @utils.patch_extra_specs({'provisioning:type': 'thick'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_thick_volume(self, vnx_common, _ignore, mocked_input): volume = mocked_input['volume'] expected_pool = volume.host.split('#')[1] with mock.patch.object(vnx_utils, 'get_backend_qos_specs', return_value=None): vnx_common.create_volume(volume) vnx_common.client.vnx.get_pool.assert_called_with( name=expected_pool) @utils.patch_extra_specs({'provisioning:type': 'thin'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_volume_with_qos(self, vnx_common, _ignore, mocked_input): volume = mocked_input['volume'] with mock.patch.object(vnx_utils, 'get_backend_qos_specs', return_value={'id': 'test', 'maxBWS': 100, 'maxIOPS': 123}): model_update = vnx_common.create_volume(volume) self.assertEqual('False', model_update.get('metadata')['snapcopy']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_migrate_volume(self, vnx_common, mocked, cinder_input): volume = cinder_input['volume'] host = {'capabilities': {'location_info': 'pool_name|fake_serial', 'storage_protocol': 'iscsi'}, 'host': 'hostname@backend_name#pool_name'} vnx_common.serial_number = 'fake_serial' migrated, update = vnx_common.migrate_volume(None, volume, host) self.assertTrue(migrated) self.assertEqual('False', update['metadata']['snapcopy']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_migrate_volume_host_assisted(self, vnx_common, mocked, cinder_input): volume1 = cinder_input['volume'] host = { 'capabilities': { 'location_info': 'pool_name|fake_serial', 'storage_protocol': 'iscsi'}, 'host': 'hostname@backend_name#pool_name'} vnx_common.serial_number = 'new_serial' migrated, update = vnx_common.migrate_volume(None, volume1, host) self.assertFalse(migrated) self.assertIsNone(update) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_cloned_volume( self, vnx_common, mocked, cinder_input): volume = cinder_input['volume'] src_vref = cinder_input['src_vref'] model_update = vnx_common.create_cloned_volume(volume, src_vref) self.assertEqual('False', model_update['metadata']['snapcopy']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_cloned_volume_snapcopy( self, vnx_common, mocked, cinder_input): volume = cinder_input['volume'] volume.metadata = {'snapcopy': 'True'} src_vref = cinder_input['src_vref'] model_update = vnx_common.create_cloned_volume(volume, src_vref) self.assertEqual('True', model_update['metadata']['snapcopy']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_volume_from_snapshot( self, vnx_common, mocked, cinder_input): volume = cinder_input['volume'] volume['metadata'] = {'async_migrate': 'False'} snapshot = cinder_input['snapshot'] snapshot.volume = volume update = vnx_common.create_volume_from_snapshot(volume, snapshot) self.assertEqual('False', update['metadata']['snapcopy']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_volume_from_snapshot_snapcopy( self, vnx_common, mocked, cinder_input): volume = cinder_input['volume'] volume.metadata = {'snapcopy': 'True'} snapshot = cinder_input['snapshot'] snapshot.volume = volume update = vnx_common.create_volume_from_snapshot(volume, snapshot) self.assertEqual('True', update['metadata']['snapcopy']) @res_mock.patch_common_adapter def test_create_cg_from_cgsnapshot(self, common, _): common.do_create_cg_from_cgsnap = mock.Mock( return_value='fake_return') new_cg = test_utils.create_consistencygroup( self.ctxt, id=fake_constants.CONSISTENCY_GROUP_ID, host='host@backend#unit_test_pool', group_type_id=fake_constants.VOLUME_TYPE_ID) cg_snapshot = test_utils.create_cgsnapshot( self.ctxt, fake_constants.CONSISTENCY_GROUP2_ID) vol = test_utils.create_volume(self.ctxt) snaps = [ test_utils.create_snapshot(self.ctxt, vol.id)] vol_new = test_utils.create_volume(self.ctxt) ret = common.create_cg_from_cgsnapshot( None, new_cg, [vol_new], cg_snapshot, snaps) self.assertEqual('fake_return', ret) common.do_create_cg_from_cgsnap.assert_called_once_with( new_cg.id, new_cg.host, [vol_new], cg_snapshot.id, snaps) @res_mock.patch_common_adapter def test_create_group_from_group_snapshot(self, common, _): common.do_create_cg_from_cgsnap = mock.Mock( return_value='fake_return') group = test_utils.create_group( self.ctxt, id=fake_constants.CONSISTENCY_GROUP_ID, host='host@backend#unit_test_pool', group_type_id=fake_constants.VOLUME_TYPE_ID) group_snapshot = test_utils.create_group_snapshot( self.ctxt, fake_constants.CGSNAPSHOT_ID, group_type_id=fake_constants.VOLUME_TYPE_ID) vol = test_utils.create_volume(self.ctxt) snaps = [ test_utils.create_snapshot(self.ctxt, vol.id)] vol_new = test_utils.create_volume(self.ctxt) ret = common.create_group_from_group_snapshot( None, group, [vol_new], group_snapshot, snaps) self.assertEqual('fake_return', ret) common.do_create_cg_from_cgsnap.assert_called_once_with( group.id, group.host, [vol_new], group_snapshot.id, snaps) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_do_create_cg_from_cgsnap( self, vnx_common, mocked, cinder_input): cg_id = fake_constants.CONSISTENCY_GROUP_ID cg_host = 'host@backend#unit_test_pool' volumes = [cinder_input['vol1']] cgsnap_id = fake_constants.CGSNAPSHOT_ID snaps = [cinder_input['snap1']] model_update, volume_updates = ( vnx_common.do_create_cg_from_cgsnap( cg_id, cg_host, volumes, cgsnap_id, snaps)) self.assertIsNone(model_update) provider_location = re.findall(r'id\^12', volume_updates[0]['provider_location']) self.assertEqual(1, len(provider_location)) @res_mock.patch_common_adapter def test_create_cloned_cg(self, common, _): common.do_clone_cg = mock.Mock( return_value='fake_return') group = test_utils.create_consistencygroup( self.ctxt, id=fake_constants.CONSISTENCY_GROUP_ID, host='host@backend#unit_test_pool', group_type_id=fake_constants.VOLUME_TYPE_ID) src_group = test_utils.create_consistencygroup( self.ctxt, id=fake_constants.CONSISTENCY_GROUP2_ID, host='host@backend#unit_test_pool2', group_type_id=fake_constants.VOLUME_TYPE_ID) vol = test_utils.create_volume(self.ctxt) src_vol = test_utils.create_volume(self.ctxt) ret = common.create_cloned_group( None, group, [vol], src_group, [src_vol]) self.assertEqual('fake_return', ret) common.do_clone_cg.assert_called_once_with( group.id, group.host, [vol], src_group.id, [src_vol]) @res_mock.patch_common_adapter def test_create_cloned_group(self, common, _): common.do_clone_cg = mock.Mock( return_value='fake_return') group = test_utils.create_group( self.ctxt, id=fake_constants.GROUP_ID, host='host@backend#unit_test_pool', group_type_id=fake_constants.VOLUME_TYPE_ID) src_group = test_utils.create_group( self.ctxt, id=fake_constants.GROUP2_ID, host='host@backend#unit_test_pool2', group_type_id=fake_constants.VOLUME_TYPE_ID) vol = test_utils.create_volume(self.ctxt) src_vol = test_utils.create_volume(self.ctxt) ret = common.create_cloned_group( None, group, [vol], src_group, [src_vol]) self.assertEqual('fake_return', ret) common.do_clone_cg.assert_called_once_with( group.id, group.host, [vol], src_group.id, [src_vol]) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_do_clone_cg(self, vnx_common, _, cinder_input): cg_id = fake_constants.CONSISTENCY_GROUP_ID cg_host = 'host@backend#unit_test_pool' volumes = [cinder_input['vol1']] src_cg_id = fake_constants.CONSISTENCY_GROUP2_ID src_volumes = [cinder_input['src_vol1']] model_update, volume_updates = vnx_common.do_clone_cg( cg_id, cg_host, volumes, src_cg_id, src_volumes) self.assertIsNone(model_update) provider_location = re.findall(r'id\^12', volume_updates[0]['provider_location']) self.assertEqual(1, len(provider_location)) @res_mock.patch_common_adapter def test_parse_pools(self, vnx_common, mocked): vnx_common.config.storage_vnx_pool_names = ['pool5', 'pool6'] parsed = vnx_common.parse_pools() self.assertEqual( len(vnx_common.config.storage_vnx_pool_names), len(parsed)) pools = vnx_common.client.get_pools() self.assertEqual(pools, parsed) @res_mock.patch_common_adapter def test_parse_pools_one_invalid_pool(self, vnx_common, mocked): vnx_common.config.storage_vnx_pool_names = ['pool5', 'pool7'] parsed = vnx_common.parse_pools() pools = vnx_common.client.get_pools() self.assertIn(parsed[0], pools) @res_mock.patch_common_adapter def test_parse_pools_all_invalid_pools(self, vnx_common, mocked): vnx_common.config.storage_vnx_pool_names = ['pool7', 'pool8'] self.assertRaises(exception.VolumeBackendAPIException, vnx_common.parse_pools) @res_mock.patch_common_adapter def test_get_enabler_stats(self, vnx_common, mocked): stats = vnx_common.get_enabler_stats() self.assertTrue(stats['compression_support']) self.assertTrue(stats['fast_support']) self.assertTrue(stats['deduplication_support']) self.assertTrue(stats['thin_provisioning_support']) self.assertTrue(stats['consistent_group_snapshot_enabled']) @res_mock.patch_common_adapter def test_get_pool_stats(self, vnx_common, mocked): pools = vnx_common.client.vnx.get_pool() vnx_common.config.storage_vnx_pool_names = [ pool.name for pool in pools] stats = { 'compression_support': True, 'fast_support': True, 'deduplication_support': True, 'thin_provisioning_support': True, 'consistent_group_snapshot_enabled': True, 'consistencygroup_support': True } pool_stats = vnx_common.get_pool_stats(stats) self.assertEqual(2, len(pool_stats)) for stat in pool_stats: self.assertTrue(stat['fast_cache_enabled']) self.assertTrue(stat['QoS_support']) self.assertIn(stat['pool_name'], [pools[0].name, pools[1].name]) self.assertFalse(stat['replication_enabled']) self.assertEqual([], stat['replication_targets']) @res_mock.patch_common_adapter def test_get_pool_stats_offline(self, vnx_common, mocked): vnx_common.config.storage_vnx_pool_names = [] pool_stats = vnx_common.get_pool_stats() for stat in pool_stats: self.assertTrue(stat['fast_cache_enabled']) self.assertEqual(0, stat['free_capacity_gb']) @res_mock.patch_common_adapter def test_get_pool_stats_max_luns_reached(self, vnx_common, mocked): pools = vnx_common.client.vnx.get_pool() vnx_common.config.storage_vnx_pool_names = [ pool.name for pool in pools] stats = { 'compression_support': True, 'fast_support': True, 'deduplication_support': True, 'thin_provisioning_support': True, 'consistent_group_snapshot_enabled': True, 'consistencygroup_support': True } pool_stats = vnx_common.get_pool_stats(stats) for stat in pool_stats: self.assertTrue(stat['fast_cache_enabled']) self.assertEqual(0, stat['free_capacity_gb']) @res_mock.patch_common_adapter def test_get_pool_stats_with_reserved(self, vnx_common, mocked): pools = vnx_common.client.vnx.get_pool() vnx_common.config.storage_vnx_pool_names = [ pool.name for pool in pools] stats = { 'compression_support': True, 'fast_support': True, 'deduplication_support': True, 'thin_provisioning_support': True, 'consistent_group_snapshot_enabled': True, 'consistencygroup_support': True } vnx_common.reserved_percentage = 15 pool_stats = vnx_common.get_pool_stats(stats) for stat in pool_stats: self.assertTrue(stat['fast_cache_enabled']) self.assertIsNot(0, stat['free_capacity_gb']) self.assertEqual(15, stat['reserved_percentage']) @res_mock.patch_common_adapter def test_update_volume_stats(self, vnx_common, mocked): with mock.patch.object(adapter.CommonAdapter, 'get_pool_stats'): stats = vnx_common.update_volume_stats() pools_stats = stats['pools'] for stat in pools_stats: self.assertFalse(stat['replication_enabled']) self.assertEqual([], stat['replication_targets']) @res_mock.patch_common_adapter def test_append_volume_stats(self, vnx_common, mocked): device = utils.get_replication_device() vnx_common.config.replication_device = [device] vnx_common.mirror_view = utils.build_fake_mirror_view() stats = {} vnx_common.append_replication_stats(stats) self.assertTrue(stats['replication_enabled']) self.assertEqual(1, stats['replication_count']) self.assertEqual(['sync'], stats['replication_type']) self.assertEqual([device['backend_id']], stats['replication_targets']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_volume_not_force(self, vnx_common, mocked, mocked_input): vnx_common.force_delete_lun_in_sg = False volume = mocked_input['volume'] volume['metadata'] = {'async_migrate': 'False'} vnx_common.delete_volume(volume) lun = vnx_common.client.vnx.get_lun() lun.delete.assert_called_with(force_detach=True, detach_from_sg=False) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_volume_force(self, vnx_common, mocked, mocked_input): vnx_common.force_delete_lun_in_sg = True volume = mocked_input['volume'] volume['metadata'] = {'async_migrate': 'False'} vnx_common.delete_volume(volume) lun = vnx_common.client.vnx.get_lun() lun.delete.assert_called_with(force_detach=True, detach_from_sg=True) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_async_volume(self, vnx_common, mocked, mocked_input): volume = mocked_input['volume'] volume.metadata = {'async_migrate': 'True'} vnx_common.force_delete_lun_in_sg = True vnx_common.delete_volume(volume) lun = vnx_common.client.vnx.get_lun() lun.delete.assert_called_with(force_detach=True, detach_from_sg=True) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_async_volume_migrating(self, vnx_common, mocked, mocked_input): volume = mocked_input['volume'] volume.metadata = {'async_migrate': 'True'} vnx_common.force_delete_lun_in_sg = True vnx_common.client.cleanup_async_lun = mock.Mock() vnx_common.delete_volume(volume) lun = vnx_common.client.vnx.get_lun() lun.delete.assert_called_with(force_detach=True, detach_from_sg=True) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_async_volume_not_from_snapshot(self, vnx_common, mocked, mocked_input): volume = mocked_input['volume'] volume.metadata = {'async_migrate': 'True'} vnx_common.force_delete_lun_in_sg = True vnx_common.delete_volume(volume) lun = vnx_common.client.vnx.get_lun() lun.delete.assert_called_with(force_detach=True, detach_from_sg=True) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_async_volume_from_snapshot(self, vnx_common, mocked, mocked_input): volume = mocked_input['volume'] volume.metadata = {'async_migrate': 'True'} volume.snapshot_id = fake_constants.SNAPSHOT_ID vnx_common.force_delete_lun_in_sg = True vnx_common.delete_volume(volume) lun = vnx_common.client.vnx.get_lun() lun.delete.assert_called_with(force_detach=True, detach_from_sg=True) snap = vnx_common.client.vnx.get_snap() snap.delete.assert_called_with() @utils.patch_extra_specs_validate(side_effect=exception.InvalidVolumeType( reason='fake_reason')) @res_mock.patch_common_adapter def test_retype_type_invalid(self, vnx_common, mocked): self.assertRaises(exception.InvalidVolumeType, vnx_common.retype, None, None, {'extra_specs': 'fake_spec'}, None, None) @mock.patch.object(client.Client, 'get_vnx_enabler_status') @utils.patch_extra_specs_validate(return_value=True) @utils.patch_extra_specs({'storagetype:tiering': 'auto', 'provisioning:type': 'thin'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_retype_need_migration( self, vnx_common, mocked, driver_in, enabler_status): new_type = { 'extra_specs': {'provisioning:type': 'deduplicated', 'storagetype:tiering': 'starthighthenauto'}} volume = driver_in['volume'] host = driver_in['host'] fake_migrate_return = (True, ['fake_model_update']) vnx_common._migrate_volume = mock.Mock( return_value=fake_migrate_return) ret = vnx_common.retype(None, volume, new_type, None, host) self.assertEqual(fake_migrate_return, ret) vnx_common._migrate_volume.assert_called_once_with( volume, host, common.ExtraSpecs(new_type['extra_specs'])) @mock.patch.object(client.Client, 'get_vnx_enabler_status') @utils.patch_extra_specs_validate(return_value=True) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_retype_turn_on_compression_change_tier( self, vnx_common, mocked, driver_in, enabler_status): new_type = { 'extra_specs': {'provisioning:type': 'compressed', 'storagetype:tiering': 'starthighthenauto'}} volume = driver_in['volume'] host = driver_in['host'] lun = mocked['lun'] vnx_common.client.get_lun = mock.Mock(return_value=lun) ret = vnx_common.retype(None, volume, new_type, None, host) self.assertTrue(ret) lun.enable_compression.assert_called_once_with(ignore_thresholds=True) self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, lun.tier) @mock.patch.object(client.Client, 'get_vnx_enabler_status') @utils.patch_extra_specs_validate(return_value=True) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_retype_lun_has_snap( self, vnx_common, mocked, driver_in, enabler_status): new_type = { 'extra_specs': {'provisioning:type': 'thin', 'storagetype:tiering': 'auto'}} volume = driver_in['volume'] host = driver_in['host'] new_type = { 'extra_specs': {'provisioning:type': 'thin', 'storagetype:tiering': 'auto'}} ret = vnx_common.retype(None, volume, new_type, None, host) self.assertFalse(ret) new_type = { 'extra_specs': {'provisioning:type': 'compressed', 'storagetype:tiering': 'auto'}} ret = vnx_common.retype(None, volume, new_type, None, host) self.assertFalse(ret) @mock.patch.object(client.Client, 'get_vnx_enabler_status') @utils.patch_extra_specs_validate(return_value=True) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_retype_change_tier( self, vnx_common, mocked, driver_in, enabler_status): new_type = { 'extra_specs': {'storagetype:tiering': 'auto'}} volume = driver_in['volume'] host = driver_in['host'] lun = mocked['lun'] vnx_common.client.get_lun = mock.Mock(return_value=lun) ret = vnx_common.retype(None, volume, new_type, None, host) self.assertTrue(ret) self.assertEqual(storops.VNXTieringEnum.AUTO, lun.tier) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_consistencygroup(self, vnx_common, mocked, mocked_input): cg = mocked_input['cg'] model_update = vnx_common.create_consistencygroup(None, group=cg) self.assertEqual(fields.ConsistencyGroupStatus.AVAILABLE, model_update['status']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_consistencygroup(self, vnx_common, mocked, mocked_input): cg = mocked_input['cg'] model_update, vol_update_list = vnx_common.delete_consistencygroup( None, group=cg, volumes=[]) self.assertEqual(cg.status, model_update['status']) self.assertEqual([], vol_update_list) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_consistencygroup_with_volume( self, vnx_common, mocked, mocked_input): cg = mocked_input['cg'] vol1 = mocked_input['vol1'] vol2 = mocked_input['vol2'] model_update, vol_update_list = vnx_common.delete_consistencygroup( None, group=cg, volumes=[vol1, vol2]) self.assertEqual(cg.status, model_update['status']) for update in vol_update_list: self.assertEqual(fields.ConsistencyGroupStatus.DELETED, update['status']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_consistencygroup_error(self, vnx_common, mocked, mocked_input): cg = mocked_input['cg'] self.assertRaises( storops_ex.VNXConsistencyGroupError, vnx_common.delete_consistencygroup, context=None, group=cg, volumes=[]) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_consistencygroup_volume_error(self, vnx_common, mocked, mocked_input): cg = mocked_input['cg'] vol1 = mocked_input['vol1'] vol2 = mocked_input['vol2'] model_update, vol_update_list = vnx_common.delete_consistencygroup( None, group=cg, volumes=[vol1, vol2]) self.assertEqual(cg.status, model_update['status']) for update in vol_update_list: self.assertEqual(fields.ConsistencyGroupStatus.ERROR_DELETING, update['status']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_extend_volume(self, common_adapter, _ignore, mocked_input): common_adapter.extend_volume(mocked_input['volume'], 10) lun = common_adapter.client.vnx.get_lun() lun.expand.assert_called_once_with(10, ignore_thresholds=True) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_snapshot_adapter(self, common_adapter, _ignore, mocked_input): common_adapter.create_snapshot(mocked_input['snapshot']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_delete_snapshot_adapter(self, common_adapter, _ignore, mocked_input): common_adapter.delete_snapshot(mocked_input['snapshot']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_restore_snapshot_adapter(self, common_adapter, _ignore, mocked_input): common_adapter.restore_snapshot(mocked_input['volume'], mocked_input['snapshot']) @res_mock.patch_common_adapter def test_create_cgsnapshot(self, common_adapter, _): common_adapter.do_create_cgsnap = mock.Mock( return_value='fake_return') cg_snapshot = test_utils.create_cgsnapshot( self.ctxt, fake_constants.CONSISTENCY_GROUP_ID) vol = test_utils.create_volume(self.ctxt) snaps = [ test_utils.create_snapshot(self.ctxt, vol.id)] ret = common_adapter.create_cgsnapshot( None, cg_snapshot, snaps) self.assertEqual('fake_return', ret) common_adapter.do_create_cgsnap.assert_called_once_with( cg_snapshot.consistencygroup_id, cg_snapshot.id, snaps) @res_mock.patch_common_adapter def test_create_group_snap(self, common_adapter, _): common_adapter.do_create_cgsnap = mock.Mock( return_value='fake_return') group_snapshot = test_utils.create_group_snapshot( self.ctxt, fake_constants.GROUP_ID, group_type_id=fake_constants.VOLUME_TYPE_ID) vol = test_utils.create_volume(self.ctxt) snaps = [ test_utils.create_snapshot(self.ctxt, vol.id)] ret = common_adapter.create_group_snapshot( None, group_snapshot, snaps) self.assertEqual('fake_return', ret) common_adapter.do_create_cgsnap.assert_called_once_with( group_snapshot.group_id, group_snapshot.id, snaps) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_do_create_cgsnap(self, common_adapter, _, mocked_input): group_name = fake_constants.CONSISTENCY_GROUP_ID snap_name = fake_constants.CGSNAPSHOT_ID snap1 = mocked_input['snap1'] snap2 = mocked_input['snap2'] model_update, snapshots_model_update = ( common_adapter.do_create_cgsnap(group_name, snap_name, [snap1, snap2])) self.assertEqual('available', model_update['status']) for update in snapshots_model_update: self.assertEqual(fields.SnapshotStatus.AVAILABLE, update['status']) @res_mock.patch_common_adapter def test_delete_group_snapshot(self, common_adapter, _): common_adapter.do_delete_cgsnap = mock.Mock( return_value='fake_return') group_snapshot = test_utils.create_group_snapshot( self.ctxt, fake_constants.GROUP_ID, group_type_id=fake_constants.VOLUME_TYPE_ID) vol = test_utils.create_volume(self.ctxt) snaps = [ test_utils.create_snapshot(self.ctxt, vol.id)] ret = common_adapter.delete_group_snapshot( None, group_snapshot, snaps) self.assertEqual('fake_return', ret) common_adapter.do_delete_cgsnap.assert_called_once_with( group_snapshot.group_id, group_snapshot.id, group_snapshot.status, snaps) @res_mock.patch_common_adapter def test_delete_cgsnapshot(self, common_adapter, _): common_adapter.do_delete_cgsnap = mock.Mock( return_value='fake_return') cg_snapshot = test_utils.create_cgsnapshot( self.ctxt, fake_constants.CONSISTENCY_GROUP_ID) vol = test_utils.create_volume(self.ctxt) snaps = [ test_utils.create_snapshot(self.ctxt, vol.id)] ret = common_adapter.delete_cgsnapshot(None, cg_snapshot, snaps) self.assertEqual('fake_return', ret) common_adapter.do_delete_cgsnap.assert_called_once_with( cg_snapshot.consistencygroup_id, cg_snapshot.id, cg_snapshot.status, snaps) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_do_delete_cgsnap(self, common_adapter, _, mocked_input): group_name = fake_constants.CGSNAPSHOT_ID snap_name = fake_constants.CGSNAPSHOT_ID model_update, snapshot_updates = ( common_adapter.do_delete_cgsnap( group_name, snap_name, 'available', [mocked_input['snap1'], mocked_input['snap2']])) self.assertEqual('deleted', model_update['status']) for snap in snapshot_updates: self.assertEqual(fields.SnapshotStatus.DELETED, snap['status']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_manage_existing_lun_no_exist( self, common_adapter, _ignore, mocked_input): self.assertRaises( exception.ManageExistingInvalidReference, common_adapter.manage_existing_get_size, mocked_input['volume'], {'source-name': 'fake'}) common_adapter.client.vnx.get_lun.assert_called_once_with( name='fake', lun_id=None) @res_mock.patch_common_adapter def test_manage_existing_invalid_ref( self, common_adapter, _ignore): self.assertRaises( exception.ManageExistingInvalidReference, common_adapter.manage_existing_get_size, None, {'invalidkey': 'fake'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_manage_existing_invalid_pool( self, common_adapter, _ignore, mocked_input): self.assertRaises( exception.ManageExistingInvalidReference, common_adapter.manage_existing_get_size, mocked_input['volume'], {'source-id': '6'}) common_adapter.client.vnx.get_lun.assert_called_once_with( lun_id='6', name=None) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_manage_existing_get_size( self, common_adapter, mocked_res, mocked_input): size = common_adapter.manage_existing_get_size( mocked_input['volume'], {'source-name': 'test_lun'}) self.assertEqual(size, mocked_res['lun'].total_capacity_gb) @utils.patch_extra_specs({'provisioning:type': 'thin', 'storagetype:tiering': 'auto'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_manage_existing_type_mismatch( self, common_adapter, mocked_res, mocked_input): self.assertRaises(exception.ManageExistingVolumeTypeMismatch, common_adapter.manage_existing, mocked_input['volume'], {'source-name': 'test_lun'}) @utils.patch_extra_specs({'provisioning:type': 'deduplicated'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_manage_existing( self, common_adapter, mocked_res, mocked_input): test_lun = mocked_res['lun'] common_adapter.client.get_lun = mock.Mock(return_value=test_lun) lun_name = mocked_input['volume'].name common_adapter._build_provider_location = mock.Mock( return_value="fake_pl") pl = common_adapter.manage_existing( mocked_input['volume'], {'source-name': 'test_lun'}) common_adapter._build_provider_location.assert_called_with( lun_type='lun', lun_id=1, base_lun_name=lun_name) self.assertEqual('fake_pl', pl['provider_location']) test_lun.rename.assert_called_once_with( lun_name) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_manage_existing_smp( self, common_adapter, mocked_res, mocked_input): common_adapter._build_provider_location = mock.Mock( return_value="fake_pl") pl = common_adapter.manage_existing( mocked_input['volume'], {'source-name': 'test_lun'}) common_adapter._build_provider_location.assert_called_with( lun_id=2, lun_type='smp', base_lun_name='src_lun') self.assertEqual('fake_pl', pl['provider_location']) @res_mock.patch_common_adapter def test_assure_storage_group(self, common_adapter, mocked_res): host = common.Host('host', ['initiators']) common_adapter.assure_storage_group(host) @res_mock.patch_common_adapter def test_assure_storage_group_create_new(self, common_adapter, mocked_res): host = common.Host('host', ['initiators']) common_adapter.assure_storage_group(host) common_adapter.client.vnx.create_sg.assert_called_once_with(host.name) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_assure_host_access(self, common_adapter, mocked_res, mocked_input): common_adapter.config.initiator_auto_registration = True common_adapter.max_retries = 3 common_adapter.auto_register_initiator = mock.Mock() common_adapter.client.add_lun_to_sg = mock.Mock() sg = mocked_res['sg'] host = common.Host('host', ['initiators']) cinder_volume = mocked_input['volume'] volume = common.Volume(cinder_volume.name, cinder_volume.id, common_adapter.client.get_lun_id(cinder_volume)) lun = common_adapter.client.get_lun() common_adapter.assure_host_access(sg, host, volume, True) common_adapter.auto_register_initiator.assert_called_once_with( sg, host) common_adapter.client.add_lun_to_sg.assert_called_once_with( sg, lun, common_adapter.max_retries) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_assure_host_access_without_auto_register_new_sg( self, common_adapter, mocked_res, mocked_input): common_adapter.config.initiator_auto_registration = False common_adapter.max_retries = 3 common_adapter.client.add_lun_to_sg = mock.Mock() sg = mocked_res['sg'] host = common.Host('host', ['initiators']) cinder_volume = mocked_input['volume'] volume = common.Volume(cinder_volume.name, cinder_volume.id, common_adapter.client.get_lun_id(cinder_volume)) lun = common_adapter.client.get_lun() common_adapter.assure_host_access(sg, host, volume, True) sg.connect_host.assert_called_once_with(host.name) common_adapter.client.add_lun_to_sg.assert_called_once_with( sg, lun, common_adapter.max_retries) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_assure_host_access_without_auto_register( self, common_adapter, mocked_res, mocked_input): common_adapter.config.initiator_auto_registration = False common_adapter.max_retries = 3 common_adapter.client.add_lun_to_sg = mock.Mock() sg = mocked_res['sg'] host = common.Host('host', ['initiators']) cinder_volume = mocked_input['volume'] volume = common.Volume(cinder_volume.name, cinder_volume.id, common_adapter.client.get_lun_id(cinder_volume)) lun = common_adapter.client.get_lun() common_adapter.assure_host_access(sg, host, volume, False) sg.connect_host.assert_not_called() common_adapter.client.add_lun_to_sg.assert_called_once_with( sg, lun, common_adapter.max_retries) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_auto_register_initiator( self, common_adapter, mocked_res, mocked_input): common_adapter.client.register_initiator = mock.Mock() common_adapter.config.io_port_list = ['a-0-0', 'a-0-1', 'a-1-0', 'b-0-1'] allowed_ports = mocked_res['allowed_ports'] common_adapter.allowed_ports = allowed_ports reg_ports = mocked_res['reg_ports'] sg = mocked_res['sg'] host = common.Host('host', ['iqn-host-1', 'iqn-reg-2']) common_adapter.auto_register_initiator(sg, host) initiator_port_map = {'iqn-host-1': set(allowed_ports), 'iqn-reg-2': set(allowed_ports) - set(reg_ports)} common_adapter.client.register_initiator.assert_called_once_with( sg, host, initiator_port_map) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_auto_register_initiator_no_white_list( self, common_adapter, mocked_res, mocked_input): for io_port_list in (None, ): common_adapter.client.register_initiator = mock.Mock() common_adapter.config.io_port_list = io_port_list allowed_ports = mocked_res['allowed_ports'] common_adapter.allowed_ports = allowed_ports sg = mocked_res['sg'] host = common.Host('host', ['iqn-host-1', 'iqn-reg-2']) common_adapter.auto_register_initiator(sg, host) initiator_port_map = {'iqn-host-1': set(allowed_ports)} common_adapter.client.register_initiator.assert_called_once_with( sg, host, initiator_port_map) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_auto_register_initiator_no_port_to_reg( self, common_adapter, mocked_res, mocked_input): common_adapter.config.io_port_list = ['a-0-0'] allowed_ports = mocked_res['allowed_ports'] common_adapter.allowed_ports = allowed_ports sg = mocked_res['sg'] host = common.Host('host', ['iqn-reg-1', 'iqn-reg-2']) with mock.patch.object(common_adapter.client, 'register_initiator'): common_adapter.auto_register_initiator(sg, host) common_adapter.client.register_initiator.assert_called_once_with( sg, host, {}) @res_mock.patch_common_adapter def test_build_provider_location(self, common_adapter, mocked_res): common_adapter.serial_number = 'vnx-serial' pl = common_adapter._build_provider_location( lun_id='fake_id', lun_type='smp', base_lun_name='fake_name') expected_pl = vnx_utils.build_provider_location( system='vnx-serial', lun_type='smp', lun_id='fake_id', base_lun_name='fake_name', version=common_adapter.VERSION) self.assertEqual(expected_pl, pl) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_remove_host_access( self, common_adapter, mocked_res, mocked_input): host = common.Host('fake_host', ['fake_initiator']) cinder_volume = mocked_input['volume'] volume = common.Volume(cinder_volume.name, cinder_volume.id, common_adapter.client.get_lun_id(cinder_volume)) sg = mocked_res['sg'] common_adapter.remove_host_access(volume, host, sg) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_remove_host_access_sg_absent( self, common_adapter, mocked_res, mocked_input): host = common.Host('fake_host', ['fake_initiator']) cinder_volume = mocked_input['volume'] volume = common.Volume(cinder_volume.name, cinder_volume.id, common_adapter.client.get_lun_id(cinder_volume)) sg = mocked_res['sg'] common_adapter.remove_host_access(volume, host, sg) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_remove_host_access_volume_not_in_sg( self, common_adapter, mocked_res, mocked_input): host = common.Host('fake_host', ['fake_initiator']) cinder_volume = mocked_input['volume'] volume = common.Volume(cinder_volume.name, cinder_volume.id, common_adapter.client.get_lun_id(cinder_volume)) sg = mocked_res['sg'] common_adapter.remove_host_access(volume, host, sg) @res_mock.patch_common_adapter def test_terminate_connection_cleanup_sg_absent( self, common_adapter, mocked_res): common_adapter.destroy_empty_sg = True common_adapter.itor_auto_dereg = True host = common.Host('fake_host', ['fake_initiator']) sg = mocked_res['sg'] common_adapter.terminate_connection_cleanup(host, sg) @res_mock.patch_common_adapter def test_terminate_connection_cleanup_remove_sg( self, common_adapter, mocked_res): common_adapter.destroy_empty_sg = True common_adapter.itor_auto_dereg = False host = common.Host('fake_host', ['fake_initiator']) sg = mocked_res['sg'] common_adapter.terminate_connection_cleanup(host, sg) @res_mock.patch_common_adapter def test_terminate_connection_cleanup_deregister( self, common_adapter, mocked_res): common_adapter.destroy_empty_sg = True common_adapter.itor_auto_dereg = True host = common.Host('fake_host', ['fake_initiator1', 'fake_initiator2']) sg = mocked_res['sg'] common_adapter.terminate_connection_cleanup(host, sg) common_adapter.client.vnx.delete_hba.assert_any_call( 'fake_initiator1') common_adapter.client.vnx.delete_hba.assert_any_call( 'fake_initiator2') @res_mock.patch_common_adapter def test_terminate_connection_cleanup_sg_is_not_empty( self, common_adapter, mocked_res): common_adapter.destroy_empty_sg = True common_adapter.itor_auto_dereg = True host = common.Host('fake_host', ['fake_initiator']) sg = mocked_res['sg'] common_adapter.terminate_connection_cleanup(host, sg) @res_mock.patch_common_adapter def test_set_extra_spec_defaults(self, common_adapter, mocked_res): common_adapter.set_extra_spec_defaults() self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, common.ExtraSpecs.TIER_DEFAULT) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_do_update_cg(self, common_adapter, _, mocked_input): common_adapter.client.update_consistencygroup = mock.Mock() cg = mocked_input['cg'] common_adapter.client.get_cg = mock.Mock(return_value=cg) common_adapter.do_update_cg(cg.id, [mocked_input['volume_add']], [mocked_input['volume_remove']]) common_adapter.client.update_consistencygroup.assert_called_once_with( cg, [1], [2]) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_create_export_snapshot(self, common_adapter, mocked_res, mocked_input): common_adapter.client.create_mount_point = mock.Mock() snapshot = mocked_input['snapshot'] common_adapter.create_export_snapshot(None, snapshot, None) common_adapter.client.create_mount_point.assert_called_once_with( snapshot.volume_name, 'tmp-smp-' + snapshot.id) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_remove_export_snapshot(self, common_adapter, mocked_res, mocked_input): common_adapter.client.delete_lun = mock.Mock() snapshot = mocked_input['snapshot'] common_adapter.remove_export_snapshot(None, snapshot) common_adapter.client.delete_lun.assert_called_once_with( 'tmp-smp-' + snapshot.id) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_initialize_connection_snapshot(self, common_adapter, mocked_res, mocked_input): common_adapter.client.attach_snapshot = mock.Mock() common_adapter._initialize_connection = mock.Mock(return_value='fake') snapshot = mocked_input['snapshot'] smp_name = 'tmp-smp-' + snapshot.id conn = common_adapter.initialize_connection_snapshot(snapshot, None) common_adapter.client.attach_snapshot.assert_called_once_with( smp_name, snapshot.name) lun = mocked_res['lun'] called_volume = common_adapter._initialize_connection.call_args[0][0] self.assertEqual((smp_name, snapshot.id, lun.lun_id), (called_volume.name, called_volume.id, called_volume.vnx_lun_id)) self.assertIsNone( common_adapter._initialize_connection.call_args[0][1]) self.assertIs(common_adapter._initialize_connection(), conn) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_terminate_connection_snapshot(self, common_adapter, mocked_res, mocked_input): common_adapter.client.detach_snapshot = mock.Mock() common_adapter._terminate_connection = mock.Mock() snapshot = mocked_input['snapshot'] smp_name = 'tmp-smp-' + snapshot.id common_adapter.terminate_connection_snapshot(snapshot, None) lun = mocked_res['lun'] called_volume = common_adapter._terminate_connection.call_args[0][0] self.assertEqual((smp_name, snapshot.id, lun.lun_id), (called_volume.name, called_volume.id, called_volume.vnx_lun_id)) self.assertIsNone(common_adapter._terminate_connection.call_args[0][1]) common_adapter.client.detach_snapshot.assert_called_once_with( smp_name) @utils.patch_extra_specs({'replication_enabled': ' True'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_setup_lun_replication(self, common_adapter, mocked_res, mocked_input): vol1 = mocked_input['vol1'] fake_mirror = utils.build_fake_mirror_view() fake_mirror.secondary_client.create_lun.return_value = ( mocked_res['lun']) common_adapter.mirror_view = fake_mirror common_adapter.config.replication_device = ( [utils.get_replication_device()]) rep_update = common_adapter.setup_lun_replication( vol1, 111) fake_mirror.create_mirror.assert_called_once_with( 'mirror_' + vol1.id, 111) fake_mirror.add_image.assert_called_once_with( 'mirror_' + vol1.id, mocked_res['lun'].lun_id) self.assertEqual(fields.ReplicationStatus.ENABLED, rep_update['replication_status']) @utils.patch_extra_specs({'replication_enabled': ' True'}) @utils.patch_group_specs({'consistent_group_replication_enabled': ' True'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_setup_lun_replication_in_group( self, common_adapter, mocked_res, mocked_input): vol1 = mocked_input['vol1'] group1 = mocked_input['group1'] vol1.group = group1 fake_mirror = utils.build_fake_mirror_view() fake_mirror.secondary_client.create_lun.return_value = ( mocked_res['lun']) common_adapter.mirror_view = fake_mirror common_adapter.config.replication_device = ( [utils.get_replication_device()]) rep_update = common_adapter.setup_lun_replication( vol1, 111) fake_mirror.create_mirror.assert_called_once_with( 'mirror_' + vol1.id, 111) fake_mirror.add_image.assert_called_once_with( 'mirror_' + vol1.id, mocked_res['lun'].lun_id) self.assertEqual(fields.ReplicationStatus.ENABLED, rep_update['replication_status']) @utils.patch_extra_specs({'replication_enabled': ' True'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_cleanup_replication(self, common_adapter, mocked_res, mocked_input): fake_mirror = utils.build_fake_mirror_view() vol1 = mocked_input['vol1'] with mock.patch.object(common_adapter, 'build_mirror_view') as fake: fake.return_value = fake_mirror common_adapter.cleanup_lun_replication(vol1) fake_mirror.destroy_mirror.assert_called_once_with( 'mirror_' + vol1.id, vol1.name) @res_mock.patch_common_adapter def test_build_mirror_view(self, common_adapter, mocked_res): common_adapter.config.replication_device = [ utils.get_replication_device()] with utils.patch_vnxsystem: mirror = common_adapter.build_mirror_view( common_adapter.config) self.assertIsNotNone(mirror) @res_mock.patch_common_adapter def test_build_mirror_view_no_device( self, common_adapter, mocked_res): common_adapter.config.replication_device = [] mirror = common_adapter.build_mirror_view( common_adapter.config) self.assertIsNone(mirror) @res_mock.patch_common_adapter def test_build_mirror_view_2_device(self, common_adapter, mocked_res): device = utils.get_replication_device() device1 = device.copy() common_adapter.config.replication_device = [device, device1] self.assertRaises(exception.InvalidInput, common_adapter.build_mirror_view, common_adapter.config) @res_mock.patch_common_adapter def test_build_mirror_view_no_enabler(self, common_adapter, mocked_res): common_adapter.config.replication_device = [ utils.get_replication_device()] self.assertRaises(exception.InvalidInput, common_adapter.build_mirror_view, common_adapter.config) @res_mock.patch_common_adapter def test_build_mirror_view_failover_false(self, common_adapter, mocked_res): common_adapter.config.replication_device = [ utils.get_replication_device()] with utils.patch_vnxsystem: failover_mirror = common_adapter.build_mirror_view( common_adapter.config, failover=False) self.assertIsNotNone(failover_mirror) @utils.patch_extra_specs({'replication_enabled': ' True'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_failover_host(self, common_adapter, mocked_res, mocked_input): device = utils.get_replication_device() common_adapter.config.replication_device = [device] vol1 = mocked_input['vol1'] lun1 = mocked_res['lun1'] with mock.patch.object(common_adapter, 'build_mirror_view') as fake: fake_mirror = utils.build_fake_mirror_view() fake_mirror.secondary_client.get_lun.return_value = lun1 fake_mirror.secondary_client.get_serial.return_value = ( device['backend_id']) fake.return_value = fake_mirror backend_id, updates, __ = common_adapter.failover_host( None, [vol1], device['backend_id'], []) fake_mirror.promote_image.assert_called_once_with( 'mirror_' + vol1.id) fake_mirror.secondary_client.get_serial.assert_called_with() fake_mirror.secondary_client.get_lun.assert_called_with( name=vol1.name) self.assertEqual(fake_mirror.secondary_client, common_adapter.client) self.assertEqual(device['backend_id'], common_adapter.active_backend_id) self.assertEqual(device['backend_id'], backend_id) for update in updates: self.assertEqual(fields.ReplicationStatus.FAILED_OVER, update['updates']['replication_status']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_failover_host_invalid_backend_id(self, common_adapter, mocked_res, mocked_input): common_adapter.config.replication_device = [ utils.get_replication_device()] vol1 = mocked_input['vol1'] self.assertRaises(exception.InvalidReplicationTarget, common_adapter.failover_host, None, [vol1], 'new_id', []) @utils.patch_extra_specs({'replication_enabled': ' True'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_failover_host_failback(self, common_adapter, mocked_res, mocked_input): device = utils.get_replication_device() common_adapter.config.replication_device = [device] common_adapter.active_backend_id = device['backend_id'] vol1 = mocked_input['vol1'] lun1 = mocked_res['lun1'] with mock.patch.object(common_adapter, 'build_mirror_view') as fake: fake_mirror = utils.build_fake_mirror_view() fake_mirror.secondary_client.get_lun.return_value = lun1 fake_mirror.secondary_client.get_serial.return_value = ( device['backend_id']) fake.return_value = fake_mirror backend_id, updates, __ = common_adapter.failover_host( None, [vol1], 'default', []) fake_mirror.promote_image.assert_called_once_with( 'mirror_' + vol1.id) fake_mirror.secondary_client.get_serial.assert_called_with() fake_mirror.secondary_client.get_lun.assert_called_with( name=vol1.name) self.assertEqual(fake_mirror.secondary_client, common_adapter.client) self.assertIsNone(common_adapter.active_backend_id) self.assertFalse(fake_mirror.primary_client == fake_mirror.secondary_client) self.assertEqual('default', backend_id) for update in updates: self.assertEqual(fields.ReplicationStatus.ENABLED, update['updates']['replication_status']) @utils.patch_group_specs({'consistent_group_replication_enabled': ' True'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_failover_host_groups(self, common_adapter, mocked_res, mocked_input): device = utils.get_replication_device() common_adapter.config.replication_device = [device] common_adapter.active_backend_id = device['backend_id'] mocked_group = mocked_input['group1'] group1 = mock.Mock() group1.id = mocked_group.id group1.replication_status = mocked_group.replication_status group1.volumes = [mocked_input['vol1'], mocked_input['vol2']] lun1 = mocked_res['lun1'] with mock.patch.object(common_adapter, 'build_mirror_view') as fake: fake_mirror = utils.build_fake_mirror_view() fake_mirror.secondary_client.get_lun.return_value = lun1 fake_mirror.secondary_client.get_serial.return_value = ( device['backend_id']) fake.return_value = fake_mirror backend_id, updates, group_update_list = ( common_adapter.failover_host(None, [], 'default', [group1])) fake_mirror.promote_mirror_group.assert_called_once_with( group1.id.replace('-', '')) fake_mirror.secondary_client.get_serial.assert_called_with() fake_mirror.secondary_client.get_lun.assert_called_with( name=mocked_input['vol1'].name) self.assertEqual(fake_mirror.secondary_client, common_adapter.client) self.assertEqual([{ 'group_id': group1.id, 'updates': {'replication_status': fields.ReplicationStatus.ENABLED}}], group_update_list) self.assertEqual(2, len(updates)) self.assertIsNone(common_adapter.active_backend_id) self.assertEqual('default', backend_id) for update in updates: self.assertEqual(fields.ReplicationStatus.ENABLED, update['updates']['replication_status']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_get_pool_name(self, common_adapter, mocked_res, mocked_input): self.assertEqual(mocked_res['lun'].pool_name, common_adapter.get_pool_name(mocked_input['volume'])) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_update_migrated_volume(self, common_adapter, mocked_res, mocked_input): data = common_adapter.update_migrated_volume( None, mocked_input['volume'], mocked_input['new_volume']) self.assertEqual(mocked_input['new_volume'].provider_location, data['provider_location']) self.assertEqual('False', data['metadata']['snapcopy']) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_update_migrated_volume_smp(self, common_adapter, mocked_res, mocked_input): data = common_adapter.update_migrated_volume( None, mocked_input['volume'], mocked_input['new_volume']) self.assertEqual(mocked_input['new_volume'].provider_location, data['provider_location']) self.assertEqual('True', data['metadata']['snapcopy']) @res_mock.patch_common_adapter def test_normalize_config_queue_path(self, common_adapter, mocked_res): common_adapter._normalize_config() self.assertEqual(os.path.join(cfg.CONF.state_path, 'vnx', 'vnx_backend'), common_adapter.queue_path) @res_mock.patch_common_adapter def test_normalize_config_naviseccli_path(self, common_adapter, mocked_res): old_value = common_adapter.config.naviseccli_path common_adapter._normalize_config() self.assertEqual(old_value, common_adapter.config.naviseccli_path) @res_mock.patch_common_adapter def test_normalize_config_naviseccli_path_none(self, common_adapter, mocked_res): common_adapter.config.naviseccli_path = "" common_adapter._normalize_config() self.assertIsNone(common_adapter.config.naviseccli_path) common_adapter.config.naviseccli_path = " " common_adapter._normalize_config() self.assertIsNone(common_adapter.config.naviseccli_path) common_adapter.config.naviseccli_path = None common_adapter._normalize_config() self.assertIsNone(common_adapter.config.naviseccli_path) @res_mock.patch_common_adapter def test_normalize_config_pool_names(self, common_adapter, mocked_res): common_adapter.config.storage_vnx_pool_names = [ 'pool_1', ' pool_2 ', '', ' '] common_adapter._normalize_config() self.assertEqual(['pool_1', 'pool_2'], common_adapter.config.storage_vnx_pool_names) @res_mock.patch_common_adapter def test_normalize_config_pool_names_none(self, common_adapter, mocked_res): common_adapter.config.storage_vnx_pool_names = None common_adapter._normalize_config() self.assertIsNone(common_adapter.config.storage_vnx_pool_names) @res_mock.patch_common_adapter def test_normalize_config_pool_names_empty_list(self, common_adapter, mocked_res): common_adapter.config.storage_vnx_pool_names = [] self.assertRaises(exception.InvalidConfigurationValue, common_adapter._normalize_config) common_adapter.config.storage_vnx_pool_names = [' ', ''] self.assertRaises(exception.InvalidConfigurationValue, common_adapter._normalize_config) @res_mock.patch_common_adapter def test_normalize_config_io_port_list(self, common_adapter, mocked_res): common_adapter.config.io_port_list = [ 'a-0-1', ' b-1 ', '', ' '] common_adapter._normalize_config() self.assertEqual(['A-0-1', 'B-1'], common_adapter.config.io_port_list) @res_mock.patch_common_adapter def test_normalize_config_io_port_list_none(self, common_adapter, mocked_res): common_adapter.config.io_port_list = None common_adapter._normalize_config() self.assertIsNone(common_adapter.config.io_port_list) @res_mock.patch_common_adapter def test_normalize_config_io_port_list_empty_list(self, common_adapter, mocked_res): common_adapter.config.io_port_list = [] self.assertRaises(exception.InvalidConfigurationValue, common_adapter._normalize_config) common_adapter.config.io_port_list = [' ', ''] self.assertRaises(exception.InvalidConfigurationValue, common_adapter._normalize_config) class TestISCSIAdapter(test_base.TestCase): STORAGE_PROTOCOL = common.PROTOCOL_ISCSI def setUp(self): super(TestISCSIAdapter, self).setUp() vnx_utils.init_ops(self.configuration) self.configuration.storage_protocol = self.STORAGE_PROTOCOL @res_mock.patch_iscsi_adapter def test_validate_ports_iscsi(self, vnx_iscsi, mocked): all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets() valid_ports = vnx_iscsi.validate_ports(all_iscsi_ports, ['A-0-0']) self.assertEqual([mocked['iscsi_port_a-0-0']], valid_ports) @res_mock.patch_iscsi_adapter def test_validate_ports_iscsi_invalid(self, vnx_iscsi, mocked): invalid_white_list = ['A-0-0', 'A-B-0'] all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets() self.assertRaisesRegex( exception.VolumeBackendAPIException, 'Invalid iscsi ports %s specified for io_port_list.' % 'A-B-0', vnx_iscsi.validate_ports, all_iscsi_ports, invalid_white_list) @res_mock.patch_iscsi_adapter def test_validate_ports_iscsi_not_exist(self, vnx_iscsi, mocked): nonexistent_ports = ['A-0-0', 'A-6-1'] all_iscsi_ports = vnx_iscsi.client.get_iscsi_targets() self.assertRaisesRegex( exception.VolumeBackendAPIException, 'Invalid iscsi ports %s specified for io_port_list' % 'A-6-1', vnx_iscsi.validate_ports, all_iscsi_ports, nonexistent_ports) @res_mock.patch_iscsi_adapter def test_update_volume_stats_iscsi(self, vnx_iscsi, mocked): with mock.patch.object(adapter.CommonAdapter, 'update_volume_stats', return_value={'storage_protocol': self.STORAGE_PROTOCOL}): stats = vnx_iscsi.update_volume_stats() self.assertEqual(self.STORAGE_PROTOCOL, stats['storage_protocol']) self.assertEqual('VNXISCSIDriver', stats['volume_backend_name']) @res_mock.patch_iscsi_adapter def test_build_terminate_connection_return_data_iscsi( self, vnx_iscsi, mocked): re = vnx_iscsi.build_terminate_connection_return_data(None, None) self.assertIsNone(re) @res_mock.patch_iscsi_adapter def test_normalize_config_iscsi_initiators( self, vnx_iscsi, mocked): vnx_iscsi.config.iscsi_initiators = ( '{"host1":["10.0.0.1", "10.0.0.2"],"host2":["10.0.0.3"]}') vnx_iscsi._normalize_config() expected = {"host1": ["10.0.0.1", "10.0.0.2"], "host2": ["10.0.0.3"]} self.assertEqual(expected, vnx_iscsi.config.iscsi_initiators) vnx_iscsi.config.iscsi_initiators = '{}' vnx_iscsi._normalize_config() expected = {} self.assertEqual(expected, vnx_iscsi.config.iscsi_initiators) @res_mock.patch_iscsi_adapter def test_normalize_config_iscsi_initiators_none( self, vnx_iscsi, mocked): vnx_iscsi.config.iscsi_initiators = None vnx_iscsi._normalize_config() self.assertIsNone(vnx_iscsi.config.iscsi_initiators) @res_mock.patch_iscsi_adapter def test_normalize_config_iscsi_initiators_empty_str( self, vnx_iscsi, mocked): vnx_iscsi.config.iscsi_initiators = '' self.assertRaises(exception.InvalidConfigurationValue, vnx_iscsi._normalize_config) vnx_iscsi.config.iscsi_initiators = ' ' self.assertRaises(exception.InvalidConfigurationValue, vnx_iscsi._normalize_config) @res_mock.patch_iscsi_adapter def test_normalize_config_iscsi_initiators_not_dict( self, vnx_iscsi, mocked): vnx_iscsi.config.iscsi_initiators = '["a", "b"]' self.assertRaises(exception.InvalidConfigurationValue, vnx_iscsi._normalize_config) @res_mock.mock_driver_input @res_mock.patch_iscsi_adapter def test_terminate_connection(self, adapter, mocked_res, mocked_input): cinder_volume = mocked_input['volume'] connector = mocked_input['connector'] adapter.remove_host_access = mock.Mock() adapter.update_storage_group_if_required = mock.Mock() adapter.build_terminate_connection_return_data = mock.Mock() adapter.terminate_connection_cleanup = mock.Mock() adapter.terminate_connection(cinder_volume, connector) adapter.remove_host_access.assert_called_once() adapter.update_storage_group_if_required.assert_called_once() adapter.build_terminate_connection_return_data \ .assert_called_once() adapter.terminate_connection_cleanup.assert_called_once() @res_mock.mock_driver_input @res_mock.patch_iscsi_adapter def test_terminate_connection_force_detach(self, adapter, mocked_res, mocked_input): cinder_volume = mocked_input['volume'] connector = None adapter.remove_host_access = mock.Mock() adapter.update_storage_group_if_required = mock.Mock() adapter.build_terminate_connection_return_data = mock.Mock() adapter.terminate_connection_cleanup = mock.Mock() adapter.terminate_connection(cinder_volume, connector) adapter.remove_host_access.assert_called() adapter.update_storage_group_if_required.assert_called() adapter.build_terminate_connection_return_data \ .assert_not_called() adapter.terminate_connection_cleanup.assert_called() class TestFCAdapter(test_base.TestCase): STORAGE_PROTOCOL = common.PROTOCOL_FC def setUp(self): super(TestFCAdapter, self).setUp() vnx_utils.init_ops(self.configuration) self.configuration.storage_protocol = self.STORAGE_PROTOCOL @res_mock.patch_fc_adapter def test_validate_ports_fc(self, vnx_fc, mocked): all_fc_ports = vnx_fc.client.get_fc_targets() valid_ports = vnx_fc.validate_ports(all_fc_ports, ['A-1']) self.assertEqual([mocked['fc_port_a-1']], valid_ports) @res_mock.patch_fc_adapter def test_validate_ports_fc_invalid(self, vnx_fc, mocked): invalid_white_list = ['A-1', 'A-B'] all_fc_ports = vnx_fc.client.get_fc_targets() self.assertRaisesRegex( exception.VolumeBackendAPIException, 'Invalid fc ports %s specified for io_port_list.' % 'A-B', vnx_fc.validate_ports, all_fc_ports, invalid_white_list) @res_mock.patch_fc_adapter def test_validate_ports_fc_not_exist(self, vnx_fc, mocked): nonexistent_ports = ['A-1', 'A-6'] all_fc_ports = vnx_fc.client.get_fc_targets() self.assertRaisesRegex( exception.VolumeBackendAPIException, 'Invalid fc ports %s specified for io_port_list' % 'A-6', vnx_fc.validate_ports, all_fc_ports, nonexistent_ports) @res_mock.patch_fc_adapter def test_update_volume_stats(self, vnx_fc, mocked): with mock.patch.object(adapter.CommonAdapter, 'get_pool_stats'): stats = vnx_fc.update_volume_stats() self.assertEqual(self.STORAGE_PROTOCOL, stats['storage_protocol']) self.assertEqual('VNXFCDriver', stats['volume_backend_name']) @mock.patch.object(vnx_utils, 'convert_to_tgt_list_and_itor_tgt_map') @res_mock.patch_fc_adapter def test_build_terminate_connection_return_data_auto_zone( self, vnx_fc, mocked, converter): vnx_fc.lookup_service = mock.Mock() get_mapping = vnx_fc.lookup_service.get_device_mapping_from_network itor_tgt_map = { 'wwn1': ['wwnt1', 'wwnt2', 'wwnt3'], 'wwn2': ['wwnt1', 'wwnt2'] } converter.return_value = ([], itor_tgt_map) host = common.Host('fake_host', ['fake_hba1'], wwpns=['wwn1', 'wwn2']) sg = mocked['sg'] re = vnx_fc.build_terminate_connection_return_data(host, sg) get_mapping.assert_called_once_with( ['wwn1', 'wwn2'], ['5006016636E01CA1']) self.assertEqual(itor_tgt_map, re['data']['initiator_target_map']) @res_mock.patch_fc_adapter def test_build_terminate_connection_return_data_sg_absent( self, vnx_fc, mocked): sg = mocked['sg'] re = vnx_fc.build_terminate_connection_return_data(None, sg) self.assertEqual('fibre_channel', re['driver_volume_type']) self.assertEqual({}, re['data']) @res_mock.patch_fc_adapter def test_build_terminate_connection_return_data_without_autozone( self, vnx_fc, mocked): self.lookup_service = None re = vnx_fc.build_terminate_connection_return_data(None, None) self.assertEqual('fibre_channel', re['driver_volume_type']) self.assertEqual({}, re['data']) @res_mock.patch_fc_adapter def test_get_tgt_list_and_initiator_tgt_map_allow_port_only( self, vnx_fc, mocked): sg = mocked['sg'] host = common.Host('fake_host', ['fake_hba1'], wwpns=['wwn1', 'wwn2']) mapping = { 'san_1': {'initiator_port_wwn_list': ['wwn1'], 'target_port_wwn_list': ['5006016636E01CB2']}} vnx_fc.lookup_service = mock.Mock() vnx_fc.lookup_service.get_device_mapping_from_network = mock.Mock( return_value=mapping) get_mapping = vnx_fc.lookup_service.get_device_mapping_from_network vnx_fc.allowed_ports = mocked['adapter'].allowed_ports targets, tgt_map = vnx_fc._get_tgt_list_and_initiator_tgt_map( sg, host, True) self.assertEqual(['5006016636E01CB2'], targets) self.assertEqual({'wwn1': ['5006016636E01CB2']}, tgt_map) get_mapping.assert_called_once_with( ['wwn1', 'wwn2'], ['5006016636E01CB2']) @res_mock.mock_driver_input @res_mock.patch_iscsi_adapter def test_terminate_connection(self, adapter, mocked_res, mocked_input): cinder_volume = mocked_input['volume'] connector = mocked_input['connector'] adapter.remove_host_access = mock.Mock() adapter.update_storage_group_if_required = mock.Mock() adapter.build_terminate_connection_return_data = mock.Mock() adapter.terminate_connection_cleanup = mock.Mock() adapter.terminate_connection(cinder_volume, connector) adapter.remove_host_access.assert_called_once() adapter.update_storage_group_if_required.assert_called_once() adapter.build_terminate_connection_return_data \ .assert_called_once() adapter.terminate_connection_cleanup.assert_called_once() @res_mock.mock_driver_input @res_mock.patch_iscsi_adapter def test_terminate_connection_force_detach(self, adapter, mocked_res, mocked_input): cinder_volume = mocked_input['volume'] connector = None adapter.remove_host_access = mock.Mock() adapter.update_storage_group_if_required = mock.Mock() adapter.build_terminate_connection_return_data = mock.Mock() adapter.terminate_connection_cleanup = mock.Mock() adapter.terminate_connection(cinder_volume, connector) adapter.remove_host_access.assert_called() adapter.update_storage_group_if_required.assert_called() adapter.build_terminate_connection_return_data \ .assert_not_called() adapter.terminate_connection_cleanup.assert_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_base.py0000664000175000017500000000223500000000000026345 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common VNC test needs.""" from cinder import context from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.dell_emc.vnx import common class TestCase(test.TestCase): def setUp(self): super(TestCase, self).setUp() self.configuration = conf.Configuration(None) self.configuration.san_ip = '192.168.1.1' self.configuration.storage_vnx_authentication_type = 'global' self.configuration.config_group = 'vnx_backend' self.ctxt = context.get_admin_context() common.DEFAULT_TIMEOUT = 0 common.INTERVAL_30_SEC = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_client.py0000664000175000017500000005164600000000000026723 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from cinder import exception from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \ as storops_ex from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \ as storops from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock from cinder.tests.unit.volume.drivers.dell_emc.vnx import test_base from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils from cinder.volume.drivers.dell_emc.vnx import client as vnx_client from cinder.volume.drivers.dell_emc.vnx import common as vnx_common class TestCondition(test_base.TestCase): @res_mock.patch_client def test_is_lun_io_ready_false(self, client, mocked): r = vnx_client.Condition.is_lun_io_ready(mocked['lun']) self.assertFalse(r) @res_mock.patch_client def test_is_lun_io_ready_true(self, client, mocked): r = vnx_client.Condition.is_lun_io_ready(mocked['lun']) self.assertTrue(r) @res_mock.patch_client def test_is_lun_io_ready_exception(self, client, mocked): self.assertRaises(exception.VolumeBackendAPIException, vnx_client.Condition.is_lun_io_ready, mocked['lun']) class TestClient(test_base.TestCase): @res_mock.patch_client def test_create_lun(self, client, mocked): client.create_lun(pool='pool1', name='test', size=1, provision=None, tier=None, cg_id=None, ignore_thresholds=False) client.vnx.get_pool.assert_called_once_with(name='pool1') pool = client.vnx.get_pool(name='pool1') pool.create_lun.assert_called_with(lun_name='test', size_gb=1, provision=None, tier=None, ignore_thresholds=False) @res_mock.patch_client def test_create_lun_error(self, client, mocked): self.assertRaises(storops_ex.VNXCreateLunError, client.create_lun, pool='pool1', name='test', size=1, provision=None, tier=None, cg_id=None, ignore_thresholds=False) client.vnx.get_pool.assert_called_once_with(name='pool1') @res_mock.patch_client def test_create_lun_already_existed(self, client, mocked): client.create_lun(pool='pool1', name='lun3', size=1, provision=None, tier=None, cg_id=None, ignore_thresholds=False) client.vnx.get_lun.assert_called_once_with(name='lun3') @res_mock.patch_client def test_create_lun_in_cg(self, client, mocked): client.create_lun( pool='pool1', name='test', size=1, provision=None, tier=None, cg_id='cg1', ignore_thresholds=False) @res_mock.patch_client def test_create_lun_compression(self, client, mocked): client.create_lun(pool='pool1', name='lun2', size=1, provision=storops.VNXProvisionEnum.COMPRESSED, tier=None, cg_id=None, ignore_thresholds=False) @res_mock.patch_client def test_migrate_lun(self, client, mocked): client.migrate_lun(src_id=1, dst_id=2) lun = client.vnx.get_lun() lun.migrate.assert_called_with(2, storops.VNXMigrationRate.HIGH) @unittest.skip("Skip until bug #1578986 is fixed") @utils.patch_sleep @res_mock.patch_client def test_migrate_lun_with_retry(self, client, mocked, mock_sleep): lun = client.vnx.get_lun() self.assertRaises(storops_ex.VNXTargetNotReadyError, client.migrate_lun, src_id=4, dst_id=5) lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH) @res_mock.patch_client def test_session_finished_faulted(self, client, mocked): lun = client.vnx.get_lun() r = client.session_finished(lun) self.assertTrue(r) @res_mock.patch_client def test_session_finished_migrating(self, client, mocked): lun = client.vnx.get_lun() r = client.session_finished(lun) self.assertFalse(r) @res_mock.patch_client def test_session_finished_not_existed(self, client, mocked): lun = client.vnx.get_lun() r = client.session_finished(lun) self.assertTrue(r) @res_mock.patch_client def test_migrate_lun_error(self, client, mocked): lun = client.vnx.get_lun() self.assertRaises(storops_ex.VNXMigrationError, client.migrate_lun, src_id=4, dst_id=5) lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH) @res_mock.patch_client def test_verify_migration(self, client, mocked): r = client.verify_migration(1, 2, 'test_wwn') self.assertTrue(r) @res_mock.patch_client def test_verify_migration_false(self, client, mocked): r = client.verify_migration(1, 2, 'fake_wwn') self.assertFalse(r) @res_mock.patch_client def test_cleanup_migration(self, client, mocked): client.cleanup_migration(1, 2) @res_mock.patch_client def test_cleanup_migration_not_migrating(self, client, mocked): client.cleanup_migration(1, 2) @res_mock.patch_client def test_cleanup_migration_cancel_failed(self, client, mocked): client.cleanup_migration(1, 2) @res_mock.patch_client def test_get_lun_by_name(self, client, mocked): lun = client.get_lun(name='lun_name_test_get_lun_by_name') self.assertEqual(888, lun.lun_id) @res_mock.patch_client def test_delete_lun(self, client, mocked): client.delete_lun(mocked['lun'].name) @res_mock.patch_client def test_delete_smp(self, client, mocked): client.delete_lun(mocked['lun'].name, snap_copy='snap-as-vol') @res_mock.patch_client def test_delete_lun_not_exist(self, client, mocked): client.delete_lun(mocked['lun'].name) @res_mock.patch_client def test_delete_lun_exception(self, client, mocked): self.assertRaisesRegex(storops_ex.VNXDeleteLunError, 'General lun delete error.', client.delete_lun, mocked['lun'].name) @res_mock.patch_client def test_cleanup_async_lun(self, client, mocked): client.cleanup_async_lun( mocked['lun'].name, force=True) @res_mock.patch_client def test_enable_compression(self, client, mocked): lun_obj = mocked['lun'] client.enable_compression(lun_obj) lun_obj.enable_compression.assert_called_with(ignore_thresholds=True) @res_mock.patch_client def test_enable_compression_on_compressed_lun(self, client, mocked): lun_obj = mocked['lun'] client.enable_compression(lun_obj) @res_mock.patch_client def test_get_vnx_enabler_status(self, client, mocked): re = client.get_vnx_enabler_status() self.assertTrue(re.dedup_enabled) self.assertFalse(re.compression_enabled) self.assertTrue(re.thin_enabled) self.assertFalse(re.fast_enabled) self.assertTrue(re.snap_enabled) @res_mock.patch_client def test_lun_has_snapshot_true(self, client, mocked): re = client.lun_has_snapshot(mocked['lun']) self.assertTrue(re) @res_mock.patch_client def test_lun_has_snapshot_false(self, client, mocked): re = client.lun_has_snapshot(mocked['lun']) self.assertFalse(re) @res_mock.patch_client def test_create_cg(self, client, mocked): cg = client.create_consistency_group('cg_name') self.assertIsNotNone(cg) @res_mock.patch_client def test_create_cg_already_existed(self, client, mocked): cg = client.create_consistency_group('cg_name_already_existed') self.assertIsNotNone(cg) @res_mock.patch_client def test_delete_cg(self, client, mocked): client.delete_consistency_group('deleted_name') @res_mock.patch_client def test_delete_cg_not_existed(self, client, mocked): client.delete_consistency_group('not_existed') @res_mock.patch_client def test_expand_lun(self, client, _ignore): client.expand_lun('lun', 10, poll=True) @res_mock.patch_client def test_expand_lun_not_poll(self, client, _ignore): client.expand_lun('lun', 10, poll=False) @res_mock.patch_client def test_expand_lun_already_expanded(self, client, _ignore): client.expand_lun('lun', 10) @res_mock.patch_client def test_expand_lun_not_ops_ready(self, client, _ignore): self.assertRaises(storops_ex.VNXLunPreparingError, client.expand_lun, 'lun', 10) lun = client.vnx.get_lun() lun.expand.assert_called_once_with(10, ignore_thresholds=True) # Called twice lun.expand.assert_called_once_with(10, ignore_thresholds=True) @res_mock.patch_client def test_create_snapshot(self, client, _ignore): client.create_snapshot('lun_test_create_snapshot', 'snap_test_create_snapshot') lun = client.vnx.get_lun() lun.create_snap.assert_called_once_with('snap_test_create_snapshot', allow_rw=True, auto_delete=False, keep_for=None) @res_mock.patch_client def test_create_snapshot_snap_name_exist_error(self, client, _ignore): client.create_snapshot('lun_name', 'snapshot_name') @res_mock.patch_client def test_delete_snapshot(self, client, _ignore): client.delete_snapshot('snapshot_name') @res_mock.patch_client def test_delete_snapshot_delete_attached_error(self, client, _ignore): self.assertRaises(storops_ex.VNXDeleteAttachedSnapError, client.delete_snapshot, 'snapshot_name') @res_mock.patch_client def test_copy_snapshot(self, client, mocked): client.copy_snapshot('old_name', 'new_name') @res_mock.patch_client def test_create_mount_point(self, client, mocked): client.create_mount_point('lun_name', 'smp_name') @res_mock.patch_client def test_attach_mount_point(self, client, mocked): client.attach_snapshot('smp_name', 'snap_name') @res_mock.patch_client def test_detach_mount_point(self, client, mocked): client.detach_snapshot('smp_name') @res_mock.patch_client def test_modify_snapshot(self, client, mocked): client.modify_snapshot('snap_name', True, True) @res_mock.patch_client def test_restore_snapshot(self, client, mocked): client.restore_snapshot('lun-id', 'snap_name') @res_mock.patch_client def test_create_cg_snapshot(self, client, mocked): snap = client.create_cg_snapshot('cg_snap_name', 'cg_name') self.assertIsNotNone(snap) @res_mock.patch_client def test_create_cg_snapshot_already_existed(self, client, mocked): snap = client.create_cg_snapshot('cg_snap_name', 'cg_name') self.assertIsNotNone(snap) @res_mock.patch_client def test_delete_cg_snapshot(self, client, mocked): client.delete_cg_snapshot(cg_snap_name='test_snap') @res_mock.patch_client def test_create_sg(self, client, mocked): client.create_storage_group('sg_name') @res_mock.patch_client def test_create_sg_name_in_use(self, client, mocked): client.create_storage_group('sg_name') self.assertIsNotNone(client.sg_cache['sg_name']) self.assertTrue(client.sg_cache['sg_name'].existed) @res_mock.patch_client def test_get_storage_group(self, client, mocked): sg = client.get_storage_group('sg_name') self.assertEqual('sg_name', sg.name) @res_mock.patch_client def test_register_initiator(self, client, mocked): host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip') client.register_initiator(mocked['sg'], host, {'host_initiator': 'port_1'}) @res_mock.patch_client def test_register_initiator_exception(self, client, mocked): host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip') client.register_initiator(mocked['sg'], host, {'host_initiator': 'port_1'}) @res_mock.patch_client def test_ping_node(self, client, mocked): self.assertTrue(client.ping_node(mocked['iscsi_port'], 'ip')) @res_mock.patch_client def test_ping_node_fail(self, client, mocked): self.assertFalse(client.ping_node(mocked['iscsi_port'], 'ip')) @res_mock.patch_client def test_add_lun_to_sg(self, client, mocked): lun = 'not_care' self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3)) @res_mock.patch_client def test_add_lun_to_sg_alu_already_attached(self, client, mocked): lun = 'not_care' self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3)) @res_mock.patch_client def test_add_lun_to_sg_alu_in_use(self, client, mocked): self.assertRaisesRegex(storops_ex.VNXNoHluAvailableError, 'No HLU available.', client.add_lun_to_sg, mocked['sg'], mocked['lun'], 3) @res_mock.patch_client def test_update_consistencygroup_no_lun_in_cg(self, client, mocked): lun_1 = mocked['lun_1'] lun_2 = mocked['lun_2'] def _get_lun(lun_id): return [x for x in (lun_1, lun_2) if x.lun_id == lun_id][0] client.get_lun = _get_lun cg = mocked['cg'] client.update_consistencygroup(cg, [lun_1.lun_id, lun_2.lun_id], []) cg.replace_member.assert_called_once_with(lun_1, lun_2) @res_mock.patch_client def test_update_consistencygroup_lun_in_cg(self, client, mocked): lun_1 = mocked['lun_1'] lun_2 = mocked['lun_2'] def _get_lun(lun_id): return [x for x in (lun_1, lun_2) if x.lun_id == lun_id][0] client.get_lun = _get_lun cg = mocked['cg'] client.update_consistencygroup(cg, [lun_2.lun_id], [lun_1.lun_id]) cg.replace_member.assert_called_once_with(lun_2) @res_mock.patch_client def test_update_consistencygroup_remove_all(self, client, mocked): lun_1 = mocked['lun_1'] def _get_lun(lun_id): return [x for x in (lun_1,) if x.lun_id == lun_id][0] client.get_lun = _get_lun cg = mocked['cg'] client.update_consistencygroup(cg, [], [lun_1.lun_id]) cg.delete_member.assert_called_once_with(lun_1) @res_mock.patch_client def test_get_available_ip(self, client, mocked): ip = client.get_available_ip() self.assertEqual('192.168.1.5', ip) @res_mock.patch_client def test_create_mirror(self, client, mocked): mv = client.create_mirror('test_mirror_name', 11) self.assertIsNotNone(mv) @res_mock.patch_client def test_create_mirror_already_created(self, client, mocked): mv = client.create_mirror('error_mirror', 12) self.assertIsNotNone(mv) @res_mock.patch_client def test_delete_mirror(self, client, mocked): client.delete_mirror('mirror_name') @res_mock.patch_client def test_delete_mirror_already_deleted(self, client, mocked): client.delete_mirror('mirror_name_deleted') @res_mock.patch_client def test_add_image(self, client, mocked): client.add_image('mirror_namex', '192.168.1.11', 31) @res_mock.patch_client def test_remove_image(self, client, mocked): client.remove_image('mirror_remove') @res_mock.patch_client def test_fracture_image(self, client, mocked): client.fracture_image('mirror_fracture') @res_mock.patch_client def test_sync_image(self, client, mocked): client.sync_image('mirror_sync') @res_mock.patch_client def test_promote_image(self, client, mocked): client.promote_image('mirror_promote') @res_mock.patch_client def test_create_mirror_group(self, client, mocked): group_name = 'test_mg' mg = client.create_mirror_group(group_name) self.assertIsNotNone(mg) @res_mock.patch_client def test_create_mirror_group_name_in_use(self, client, mocked): group_name = 'test_mg_name_in_use' mg = client.create_mirror_group(group_name) self.assertIsNotNone(mg) @res_mock.patch_client def test_delete_mirror_group(self, client, mocked): group_name = 'delete_name' client.delete_mirror_group(group_name) @res_mock.patch_client def test_delete_mirror_group_not_found(self, client, mocked): group_name = 'group_not_found' client.delete_mirror_group(group_name) @res_mock.patch_client def test_add_mirror(self, client, mocked): group_name = 'group_add_mirror' mirror_name = 'mirror_name' client.add_mirror(group_name, mirror_name) @res_mock.patch_client def test_add_mirror_already_added(self, client, mocked): group_name = 'group_already_added' mirror_name = 'mirror_name' client.add_mirror(group_name, mirror_name) @res_mock.patch_client def test_remove_mirror(self, client, mocked): group_name = 'group_mirror' mirror_name = 'mirror_name' client.remove_mirror(group_name, mirror_name) @res_mock.patch_client def test_remove_mirror_not_member(self, client, mocked): group_name = 'group_mirror' mirror_name = 'mirror_name_not_member' client.remove_mirror(group_name, mirror_name) @res_mock.patch_client def test_promote_mirror_group(self, client, mocked): group_name = 'group_promote' client.promote_mirror_group(group_name) @res_mock.patch_client def test_promote_mirror_group_already_promoted(self, client, mocked): group_name = 'group_promote' client.promote_mirror_group(group_name) @res_mock.patch_client def test_sync_mirror_group(self, client, mocked): group_name = 'group_sync' client.sync_mirror_group(group_name) @res_mock.patch_client def test_fracture_mirror_group(self, client, mocked): group_name = 'group_fracture' client.fracture_mirror_group(group_name) @res_mock.mock_driver_input @res_mock.patch_client def test_get_lun_id(self, client, mocked, cinder_input): lun_id = client.get_lun_id(cinder_input['volume']) self.assertEqual(1, lun_id) @res_mock.mock_driver_input @res_mock.patch_client def test_get_lun_id_without_provider_location(self, client, mocked, cinder_input): lun_id = client.get_lun_id(cinder_input['volume']) self.assertIsInstance(lun_id, int) self.assertEqual(mocked['lun'].lun_id, lun_id) @res_mock.patch_client def test_get_ioclass(self, client, mocked): qos_specs = {'id': 'qos', vnx_common.QOS_MAX_IOPS: 10, vnx_common.QOS_MAX_BWS: 100} ioclasses = client.get_ioclass(qos_specs) self.assertEqual(2, len(ioclasses)) @res_mock.patch_client def test_create_ioclass_iops(self, client, mocked): ioclass = client.create_ioclass_iops('test', 1000) self.assertIsNotNone(ioclass) @res_mock.patch_client def test_create_ioclass_bws(self, client, mocked): ioclass = client.create_ioclass_bws('test', 100) self.assertIsNotNone(ioclass) @res_mock.patch_client def test_create_policy(self, client, mocked): policy = client.create_policy('policy_name') self.assertIsNotNone(policy) @res_mock.patch_client def test_get_running_policy(self, client, mocked): policy, is_new = client.get_running_policy() self.assertIn(policy.state, ['Running', 'Measuring']) self.assertFalse(is_new) @res_mock.patch_client def test_add_lun_to_ioclass(self, client, mocked): client.add_lun_to_ioclass('test_ioclass', 1) @res_mock.patch_client def test_set_max_luns_per_sg(self, client, mocked): with utils.patch_vnxstoragegroup as patch_sg: client.set_max_luns_per_sg(300) patch_sg.set_max_luns_per_sg.assert_called_with(300) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_common.py0000664000175000017500000003375000000000000026731 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \ as storops from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock from cinder.tests.unit.volume.drivers.dell_emc.vnx import test_base from cinder.volume.drivers.dell_emc.vnx import client from cinder.volume.drivers.dell_emc.vnx import common class TestExtraSpecs(test_base.TestCase): def test_valid_extra_spec(self): extra_spec = { 'provisioning:type': 'deduplicated', 'storagetype:tiering': 'nomovement', } spec_obj = common.ExtraSpecs(extra_spec) self.assertEqual(storops.VNXProvisionEnum.DEDUPED, spec_obj.provision) self.assertEqual(storops.VNXTieringEnum.NO_MOVE, spec_obj.tier) def test_extra_spec_case_insensitive(self): extra_spec = { 'provisioning:type': 'Thin', 'storagetype:tiering': 'StartHighThenAuto', } spec_obj = common.ExtraSpecs(extra_spec) self.assertEqual(storops.VNXProvisionEnum.THIN, spec_obj.provision) self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, spec_obj.tier) def test_empty_extra_spec(self): extra_spec = {} common.ExtraSpecs.set_defaults(storops.VNXProvisionEnum.THICK, storops.VNXTieringEnum.HIGH_AUTO) spec_obj = common.ExtraSpecs(extra_spec) self.assertEqual(storops.VNXProvisionEnum.THICK, spec_obj.provision) self.assertEqual(storops.VNXTieringEnum.HIGH_AUTO, spec_obj.tier) def test_invalid_provision(self): extra_spec = { 'provisioning:type': 'invalid', } self.assertRaises(exception.InvalidVolumeType, common.ExtraSpecs, extra_spec) def test_invalid_tiering(self): extra_spec = { 'storagetype:tiering': 'invalid', } self.assertRaises(exception.InvalidVolumeType, common.ExtraSpecs, extra_spec) def test_validate_extra_spec_dedup_and_tier_failed(self): spec_obj = common.ExtraSpecs({ 'storagetype:pool': 'fake_pool', 'provisioning:type': 'deduplicated', 'storagetype:tiering': 'auto', }) enabler_status = common.VNXEnablerStatus( dedup=True, fast=True, thin=True) self.assertRaises(exception.InvalidVolumeType, spec_obj.validate, enabler_status) def test_tier_is_not_set_to_default_for_dedup_provision(self): common.ExtraSpecs.set_defaults(storops.VNXProvisionEnum.THICK, storops.VNXTieringEnum.HIGH_AUTO) spec_obj = common.ExtraSpecs({'provisioning:type': 'deduplicated'}) self.assertEqual(storops.VNXProvisionEnum.DEDUPED, spec_obj.provision) self.assertIsNone(spec_obj.tier) def test_validate_extra_spec_is_valid(self): spec_obj = common.ExtraSpecs({ 'storagetype:pool': 'fake_pool', 'provisioning:type': 'thin', 'storagetype:tiering': 'auto', }) enabler_status = common.VNXEnablerStatus( dedup=True, fast=True, thin=True) re = spec_obj.validate(enabler_status) self.assertTrue(re) def test_validate_extra_spec_dedup_invalid(self): spec_obj = common.ExtraSpecs({ 'provisioning:type': 'deduplicated', }) enabler_status = common.VNXEnablerStatus(dedup=False) self.assertRaises(exception.InvalidVolumeType, spec_obj.validate, enabler_status) def test_validate_extra_spec_compress_invalid(self): spec_obj = common.ExtraSpecs({ 'provisioning:type': 'compressed', }) enabler_status = common.VNXEnablerStatus(compression=False) self.assertRaises(exception.InvalidVolumeType, spec_obj.validate, enabler_status) def test_validate_extra_spec_no_thin_invalid(self): spec_obj = common.ExtraSpecs({ 'provisioning:type': 'compressed', }) enabler_status = common.VNXEnablerStatus(compression=True, thin=False) self.assertRaises(exception.InvalidVolumeType, spec_obj.validate, enabler_status) def test_validate_extra_spec_tier_invalid(self): spec_obj = common.ExtraSpecs({ 'storagetype:tiering': 'auto', }) enabler_status = common.VNXEnablerStatus( dedup=True, fast=False, compression=True, snap=True, thin=True) self.assertRaises(exception.InvalidVolumeType, spec_obj.validate, enabler_status) def test_get_raw_data(self): spec_obj = common.ExtraSpecs({'key1': 'value1'}) self.assertIn('key1', spec_obj) self.assertNotIn('key2', spec_obj) self.assertEqual('value1', spec_obj['key1']) @res_mock.mock_storage_resources def test_generate_extra_specs_from_lun(self, mocked_res): lun = mocked_res['lun'] spec = common.ExtraSpecs.from_lun(lun) self.assertEqual(storops.VNXProvisionEnum.COMPRESSED, spec.provision) self.assertEqual(storops.VNXTieringEnum.HIGH, spec.tier) lun = mocked_res['deduped_lun'] spec = common.ExtraSpecs.from_lun(lun) self.assertEqual(storops.VNXProvisionEnum.DEDUPED, spec.provision) self.assertIsNone(spec.tier) @res_mock.mock_storage_resources def test_extra_specs_match_with_lun(self, mocked_res): lun = mocked_res['lun'] spec_obj = common.ExtraSpecs({ 'provisioning:type': 'thin', 'storagetype:tiering': 'nomovement', }) self.assertTrue(spec_obj.match_with_lun(lun)) lun = mocked_res['deduped_lun'] spec_obj = common.ExtraSpecs({ 'provisioning:type': 'deduplicated', }) self.assertTrue(spec_obj.match_with_lun(lun)) @res_mock.mock_storage_resources def test_extra_specs_not_match_with_lun(self, mocked_res): lun = mocked_res['lun'] spec_obj = common.ExtraSpecs({ 'provisioning:type': 'thick', 'storagetype:tiering': 'nomovement', }) self.assertFalse(spec_obj.match_with_lun(lun)) class FakeConfiguration(object): def __init__(self): self.replication_device = [] class TestReplicationDeviceList(test_base.TestCase): def setUp(self): super(TestReplicationDeviceList, self).setUp() self.configuration = FakeConfiguration() replication_devices = [] device = {'backend_id': 'array_id_1', 'san_ip': '192.168.1.1', 'san_login': 'admin', 'san_password': 'admin', 'storage_vnx_authentication_type': 'global', 'storage_vnx_security_file_dir': '/home/stack/'} replication_devices.append(device) self.configuration.replication_device = replication_devices def test_get_device(self): devices_list = common.ReplicationDeviceList(self.configuration) device = devices_list.get_device('array_id_1') self.assertIsNotNone(device) self.assertEqual('192.168.1.1', device.san_ip) self.assertEqual('admin', device.san_login) self.assertEqual('admin', device.san_password) self.assertEqual('global', device.storage_vnx_authentication_type) self.assertEqual('/home/stack/', device.storage_vnx_security_file_dir) def test_device_no_backend_id(self): device = {'san_ip': '192.168.1.2'} config = FakeConfiguration() config.replication_device = [device] self.assertRaises( exception.InvalidInput, common.ReplicationDeviceList, config) def test_device_no_secfile(self): device = {'backend_id': 'test_id', 'san_ip': '192.168.1.2'} config = FakeConfiguration() config.replication_device = [device] rep_list = common.ReplicationDeviceList(config) self.assertIsNone(rep_list[0].storage_vnx_security_file_dir) def test_get_device_not_found(self): devices_list = common.ReplicationDeviceList(self.configuration) device = devices_list.get_device('array_id_not_existed') self.assertIsNone(device) def test_devices(self): devices_list = common.ReplicationDeviceList(self.configuration) self.assertEqual(1, len(devices_list.devices)) self.assertEqual(1, len(devices_list)) self.assertIsNotNone(devices_list[0]) def test_get_backend_ids(self): backend_ids = common.ReplicationDeviceList.get_backend_ids( self.configuration) self.assertEqual(1, len(backend_ids)) self.assertIn('array_id_1', backend_ids) class TestVNXMirrorView(test_base.TestCase): def setUp(self): super(TestVNXMirrorView, self).setUp() self.primary_client = mock.create_autospec(client.Client) self.secondary_client = mock.create_autospec(client.Client) self.mirror_view = common.VNXMirrorView( self.primary_client, self.secondary_client) def test_create_mirror(self): self.mirror_view.create_mirror('mirror_test', 11) self.primary_client.create_mirror.assert_called_once_with( 'mirror_test', 11) def test_create_secondary_lun(self): self.mirror_view.create_secondary_lun('pool_name', 'lun_name', 10, 'thick', 'auto') self.secondary_client.create_lun.assert_called_once_with( 'pool_name', 'lun_name', 10, 'thick', 'auto') def test_delete_secondary_lun(self): self.mirror_view.delete_secondary_lun('lun_name') self.secondary_client.delete_lun.assert_called_once_with('lun_name') def test_delete_mirror(self): self.mirror_view.delete_mirror('mirror_name') self.primary_client.delete_mirror.assert_called_once_with( 'mirror_name') def test_add_image(self): self.secondary_client.get_available_ip.return_value = '192.168.1.2' self.mirror_view.add_image('mirror_name', 111) self.secondary_client.get_available_ip.assert_called_once_with() self.primary_client.add_image.assert_called_once_with( 'mirror_name', '192.168.1.2', 111) def test_remove_image(self): self.mirror_view.remove_image('mirror_remove') self.primary_client.remove_image.assert_called_once_with( 'mirror_remove') def test_fracture_image(self): self.mirror_view.fracture_image('mirror_fracture') self.primary_client.fracture_image.assert_called_once_with( 'mirror_fracture') def test_promote_image(self): self.mirror_view.promote_image('mirror_promote') self.secondary_client.promote_image.assert_called_once_with( 'mirror_promote') def test_destroy_mirror(self): mv = mock.Mock() mv.existed = True self.primary_client.get_mirror.return_value = mv self.mirror_view.destroy_mirror('mirror_name', 'sec_lun_name') self.primary_client.get_mirror.assert_called_once_with( 'mirror_name') self.primary_client.fracture_image.assert_called_once_with( 'mirror_name') self.primary_client.remove_image.assert_called_once_with( 'mirror_name') self.primary_client.delete_mirror.assert_called_once_with( 'mirror_name') self.secondary_client.delete_lun.assert_called_once_with( 'sec_lun_name') def test_destroy_mirror_not_existed(self): mv = mock.Mock() mv.existed = False self.primary_client.get_mirror.return_value = mv self.mirror_view.destroy_mirror('mirror_name', 'sec_lun_name') self.primary_client.get_mirror.assert_called_once_with( 'mirror_name') self.assertFalse(self.primary_client.fracture_image.called) def test_create_mirror_group(self): self.mirror_view.create_mirror_group('test_group') self.primary_client.create_mirror_group.assert_called_once_with( 'test_group') def test_delete_mirror_group(self): self.mirror_view.delete_mirror_group('test_group') self.primary_client.delete_mirror_group.assert_called_once_with( 'test_group') def test_add_mirror(self): self.mirror_view.add_mirror('test_group', 'test_mirror') self.primary_client.add_mirror.assert_called_once_with( 'test_group', 'test_mirror') def test_remove_mirror(self): self.mirror_view.remove_mirror('test_group', 'test_mirror') self.primary_client.remove_mirror('test_group', 'test_mirror') def test_sync_mirror_group(self): self.mirror_view.sync_mirror_group('test_group') self.primary_client.sync_mirror_group.assert_called_once_with( 'test_group') def test_promote_mirror_group(self): self.mirror_view.promote_mirror_group('test_group') self.secondary_client.promote_mirror_group.assert_called_once_with( 'test_group') def test_fracture_mirror_group(self): self.mirror_view.fracture_mirror_group('test_group') self.primary_client.fracture_mirror_group.assert_called_once_with( 'test_group') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_driver.py0000664000175000017500000000732200000000000026730 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.tests.unit.volume.drivers.dell_emc.vnx import test_base from cinder.volume.drivers.dell_emc.vnx import driver class TestVNXDriver(test_base.TestCase): def setUp(self): super(TestVNXDriver, self).setUp() self.fc_adapter_patcher = mock.patch( 'cinder.volume.drivers.dell_emc.vnx.adapter.FCAdapter', autospec=True) self.fc_adapter_patcher.start() self.iscsi_adapter_patcher = mock.patch( 'cinder.volume.drivers.dell_emc.vnx.adapter.ISCSIAdapter', autospec=True) self.iscsi_adapter_patcher.start() self.driver = None self.addCleanup(self.fc_adapter_patcher.stop) self.addCleanup(self.iscsi_adapter_patcher.stop) def _get_driver(self, protocol): self.configuration.storage_protocol = protocol drv = driver.VNXDriver(configuration=self.configuration, active_backend_id=None) drv.do_setup(None) return drv def test_init_iscsi_driver(self): _driver = self._get_driver('iscsi') driver_name = str(_driver.adapter) self.assertIn('ISCSIAdapter', driver_name) self.assertEqual(driver.VNXDriver.VERSION, _driver.VERSION) def test_init_fc_driver(self): _driver = self._get_driver('FC') driver_name = str(_driver.adapter) self.assertIn('FCAdapter', driver_name) self.assertEqual(driver.VNXDriver.VERSION, _driver.VERSION) def test_create_volume(self): _driver = self._get_driver('iscsi') _driver.create_volume('fake_volume') _driver.adapter.create_volume.assert_called_once_with('fake_volume') def test_initialize_connection(self): _driver = self._get_driver('iscsi') _driver.initialize_connection('fake_volume', {'host': 'fake_host'}) _driver.adapter.initialize_connection.assert_called_once_with( 'fake_volume', {'host': 'fake_host'}) def test_terminate_connection(self): _driver = self._get_driver('iscsi') _driver.terminate_connection('fake_volume', {'host': 'fake_host'}) _driver.adapter.terminate_connection.assert_called_once_with( 'fake_volume', {'host': 'fake_host'}) def test_is_consistent_group_snapshot_enabled(self): _driver = self._get_driver('iscsi') _driver._stats = {'consistent_group_snapshot_enabled': True} self.assertTrue(_driver.is_consistent_group_snapshot_enabled()) _driver._stats = {'consistent_group_snapshot_enabled': False} self.assertFalse(_driver.is_consistent_group_snapshot_enabled()) self.assertFalse(_driver.is_consistent_group_snapshot_enabled()) def test_enable_replication(self): _driver = self._get_driver('iscsi') _driver.enable_replication(None, 'group', 'volumes') def test_disable_replication(self): _driver = self._get_driver('iscsi') _driver.disable_replication(None, 'group', 'volumes') def test_failover_replication(self): _driver = self._get_driver('iscsi') _driver.failover_replication(None, 'group', 'volumes', 'backend_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_replication.py0000664000175000017500000000730400000000000027746 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.objects import fields from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock from cinder.tests.unit.volume.drivers.dell_emc.vnx import test_base from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils class TestReplicationAdapter(test_base.TestCase): def setUp(self): super(TestReplicationAdapter, self).setUp() vnx_utils.init_ops(self.configuration) @utils.patch_group_specs({ 'consistent_group_replication_enabled': ' True'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_enable_replication(self, common_adapter, mocked_res, mocked_input): group = mocked_input['group'] volumes = [mocked_input['volume1'], mocked_input['volume2']] volumes[0].group = group volumes[1].group = group common_adapter.enable_replication(self.ctxt, group, volumes) @utils.patch_group_specs({ 'consistent_group_replication_enabled': ' True'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_disable_replication(self, common_adapter, mocked_res, mocked_input): group = mocked_input['group'] volumes = [mocked_input['volume1'], mocked_input['volume2']] volumes[0].group = group volumes[1].group = group common_adapter.disable_replication(self.ctxt, group, volumes) @utils.patch_group_specs({ 'consistent_group_replication_enabled': ' True'}) @res_mock.mock_driver_input @res_mock.patch_common_adapter def test_failover_replication(self, common_adapter, mocked_res, mocked_input): device = utils.get_replication_device() common_adapter.config.replication_device = [device] group = mocked_input['group'] volumes = [mocked_input['volume1'], mocked_input['volume2']] lun1 = mocked_res['lun1'] volumes[0].group = group volumes[1].group = group secondary_backend_id = 'fake_serial' with mock.patch.object(common_adapter, 'build_mirror_view') as fake: fake_mirror = utils.build_fake_mirror_view() fake_mirror.secondary_client.get_lun.return_value = lun1 fake_mirror.secondary_client.get_serial.return_value = ( device['backend_id']) fake.return_value = fake_mirror model_update, volume_updates = common_adapter.failover_replication( self.ctxt, group, volumes, secondary_backend_id) fake_mirror.promote_mirror_group.assert_called_with( group.id.replace('-', '')) self.assertEqual(fields.ReplicationStatus.FAILED_OVER, model_update['replication_status']) for update in volume_updates: self.assertEqual(fields.ReplicationStatus.FAILED_OVER, update['replication_status']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_res_mock.py0000664000175000017500000000652400000000000027242 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock from cinder.volume import configuration as conf from cinder.volume.drivers.dell_emc.vnx import utils class TestResMock(test.TestCase): def test_load_cinder_resource(self): cinder_res = res_mock.CinderResourceMock('mocked_cinder.yaml') volume = cinder_res['test_mock_driver_input_inner']['volume'] items = ['base_lun_name^test', 'version^07.00.00', 'type^lun', 'system^fake_serial', 'id^1'] self.assertEqual(sorted(items), sorted(volume.provider_location.split('|'))) def test_mock_driver_input(self): @res_mock.mock_driver_input def test_mock_driver_input_inner(self, mocked_input): items = ['base_lun_name^test', 'version^07.00.00', 'type^lun', 'system^fake_serial', 'id^1'] mocked_items = mocked_input['volume'].provider_location.split('|') self.assertEqual(sorted(items), sorted(mocked_items)) test_mock_driver_input_inner(self) def test_load_storage_resource(self): vnx_res = res_mock.StorageResourceMock('test_res_mock.yaml') lun = vnx_res['test_load_storage_resource']['lun'] pool = vnx_res['test_load_storage_resource']['pool'] created_lun = pool.create_lun() self.assertEqual(lun.lun_id, created_lun.lun_id) self.assertEqual(lun.poll, created_lun.poll) self.assertEqual(lun.state, created_lun.state) def test_patch_client(self): @res_mock.patch_client def test_patch_client_inner(self, patched_client, mocked): vnx = patched_client.vnx self.assertEqual('fake_serial', vnx.serial) pool = vnx.get_pool() self.assertEqual('pool_name', pool.name) test_patch_client_inner(self) def test_patch_client_mocked(self): @res_mock.patch_client def test_patch_client_mocked_inner(self, patched_client, mocked): lun = mocked['lun'] self.assertEqual('Offline', lun.state) test_patch_client_mocked_inner(self) def test_patch_adapter_common(self): self.configuration = conf.Configuration(None) utils.init_ops(self.configuration) self.configuration.san_ip = '192.168.1.1' self.configuration.storage_vnx_authentication_type = 'global' self.configuration.storage_vnx_pool_names = 'pool1,unit_test_pool' @res_mock.patch_common_adapter def test_patch_common_adapter_inner(self, patched_adapter, mocked): pool = patched_adapter.client.vnx.get_pool() self.assertEqual('pool_name', pool.name) test_patch_common_adapter_inner(self) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_res_mock.yaml0000664000175000017500000000227400000000000027552 0ustar00zuulzuul00000000000000################################################# # Storage resource ################################################# # Common lun_base: _properties: &lun_base_prop lun_id: lun_id poll: False operation: None state: Ready pool_base: _properties: &pool_base_prop name: pool_name pool_id: 0 state: Ready user_capacity_gbs: 1311 total_subscribed_capacity_gbs: 131 available_capacity_gbs: 132 percent_full_threshold: 70 fast_cache: True vnx_base: _properties: &vnx_base_prop serial: fake_serial test_load_storage_resource: &test_load_storage_resource lun: &lun1 _properties: <<: *lun_base_prop state: Offline _methods: update: pool: &pool1 _properties: <<: *pool_base_prop _methods: create_lun: *lun1 vnx: _properties: <<: *vnx_base_prop _methods: get_pool: *pool1 test_patch_client_inner: *test_load_storage_resource test_patch_client_mocked_inner: *test_load_storage_resource test_patch_common_adapter_inner: *test_load_storage_resource test_property_side_effect_inner: lun: _properties: <<: *lun_base_prop total_capacity_gb: _side_effect: [5, 10] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_taskflows.py0000664000175000017500000001777200000000000027464 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \ as vnx_ex from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock from cinder.tests.unit.volume.drivers.dell_emc.vnx import test_base import cinder.volume.drivers.dell_emc.vnx.taskflows as vnx_taskflow class TestTaskflow(test_base.TestCase): def setUp(self): super(TestTaskflow, self).setUp() self.work_flow = linear_flow.Flow('test_task') @res_mock.patch_client def test_copy_snapshot_task(self, client, mocked): store_spec = {'client': client, 'snap_name': 'original_name', 'new_snap_name': 'new_name' } self.work_flow.add(vnx_taskflow.CopySnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() @res_mock.patch_client def test_copy_snapshot_task_revert(self, client, mocked): store_spec = {'client': client, 'snap_name': 'original_name', 'new_snap_name': 'new_name' } self.work_flow.add(vnx_taskflow.CopySnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXSnapError, engine.run) @res_mock.patch_client def test_create_smp_task(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'mount_point_name', 'base_lun_name': 'base_name' } self.work_flow.add(vnx_taskflow.CreateSMPTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() smp_id = engine.storage.fetch('smp_id') self.assertEqual(15, smp_id) @res_mock.patch_client def test_create_smp_task_revert(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'mount_point_name', 'base_lun_name': 'base_name' } self.work_flow.add(vnx_taskflow.CreateSMPTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXCreateLunError, engine.run) smp_id = engine.storage.fetch('smp_id') self.assertIsInstance(smp_id, failure.Failure) @res_mock.patch_client def test_attach_snap_task(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'mount_point_name', 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.AttachSnapTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() @res_mock.patch_client def test_attach_snap_task_revert(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'mount_point_name', 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.AttachSnapTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXAttachSnapError, engine.run) @res_mock.patch_client def test_create_snapshot_task(self, client, mocked): store_spec = { 'client': client, 'lun_id': 12, 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.CreateSnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() @res_mock.patch_client def test_create_snapshot_task_revert(self, client, mocked): store_spec = { 'client': client, 'lun_id': 13, 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.CreateSnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXCreateSnapError, engine.run) @res_mock.patch_client def test_allow_read_write_task(self, client, mocked): store_spec = { 'client': client, 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.ModifySnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() @res_mock.patch_client def test_allow_read_write_task_revert(self, client, mocked): store_spec = { 'client': client, 'snap_name': 'snap_name' } self.work_flow.add(vnx_taskflow.ModifySnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXSnapError, engine.run) @res_mock.patch_client def test_create_cg_snapshot_task(self, client, mocked): store_spec = { 'client': client, 'cg_name': 'test_cg', 'cg_snap_name': 'my_snap_name' } self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() snap_name = engine.storage.fetch('new_cg_snap_name') self.assertIsInstance(snap_name, res_mock.StorageObjectMock) @res_mock.patch_client def test_create_cg_snapshot_task_revert(self, client, mocked): store_spec = { 'client': client, 'cg_name': 'test_cg', 'cg_snap_name': 'my_snap_name' } self.work_flow.add(vnx_taskflow.CreateCGSnapshotTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) self.assertRaises(vnx_ex.VNXCreateSnapError, engine.run) @res_mock.patch_client def test_extend_smp_task(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'lun_test_extend_smp_task', 'lun_size': 100 } self.work_flow.add(vnx_taskflow.ExtendSMPTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() @res_mock.patch_client def test_extend_smp_task_skip_small_size(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'lun_test_extend_smp_task', 'lun_size': 1 } self.work_flow.add(vnx_taskflow.ExtendSMPTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() @res_mock.patch_client def test_extend_smp_task_skip_thick(self, client, mocked): store_spec = { 'client': client, 'smp_name': 'lun_test_extend_smp_task_skip_thick', 'lun_size': 100 } self.work_flow.add(vnx_taskflow.ExtendSMPTask()) engine = taskflow.engines.load(self.work_flow, store=store_spec) engine.run() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/test_utils.py0000664000175000017500000002561300000000000026600 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import exception from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \ as storops_ex from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \ as storops from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock from cinder.tests.unit.volume.drivers.dell_emc.vnx import test_base from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils as ut_utils from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.dell_emc.vnx import utils as vnx_utils from cinder.volume import volume_types class FakeDriver(object): @vnx_utils.require_consistent_group_snapshot_enabled def fake_group_method(self, context, group_or_snap): return True class TestUtils(test_base.TestCase): def test_wait_until(self): mock_testmethod = mock.Mock(return_value=True) vnx_utils.wait_until(mock_testmethod, interval=0) mock_testmethod.assert_has_calls([mock.call()]) def test_wait_until_with_exception(self): mock_testmethod = mock.Mock( side_effect=storops_ex.VNXAttachSnapError('Unknown error')) mock_testmethod.__name__ = 'test_method' self.assertRaises(storops_ex.VNXAttachSnapError, vnx_utils.wait_until, mock_testmethod, timeout=1, interval=0, reraise_arbiter=( lambda ex: not isinstance( ex, storops_ex.VNXCreateLunError))) mock_testmethod.assert_has_calls([mock.call()]) def test_wait_until_with_params(self): mock_testmethod = mock.Mock(return_value=True) vnx_utils.wait_until(mock_testmethod, param1=1, param2='test') mock_testmethod.assert_has_calls( [mock.call(param1=1, param2='test')]) mock_testmethod.assert_has_calls([mock.call(param1=1, param2='test')]) @res_mock.mock_driver_input def test_retype_need_migration_when_host_changed(self, driver_in): volume = driver_in['volume'] another_host = driver_in['host'] re = vnx_utils.retype_need_migration( volume, None, None, another_host) self.assertTrue(re) @res_mock.mock_driver_input def test_retype_need_migration_for_smp_volume(self, driver_in): volume = driver_in['volume'] host = driver_in['host'] re = vnx_utils.retype_need_migration( volume, None, None, host) self.assertTrue(re) @res_mock.mock_driver_input def test_retype_need_migration_when_provision_changed( self, driver_in): volume = driver_in['volume'] host = driver_in['host'] old_spec = common.ExtraSpecs({'provisioning:type': 'thin'}) new_spec = common.ExtraSpecs({'provisioning:type': 'deduplicated'}) re = vnx_utils.retype_need_migration( volume, old_spec.provision, new_spec.provision, host) self.assertTrue(re) @res_mock.mock_driver_input def test_retype_not_need_migration_when_provision_changed( self, driver_in): volume = driver_in['volume'] host = driver_in['host'] old_spec = common.ExtraSpecs({'provisioning:type': 'thick'}) new_spec = common.ExtraSpecs({'provisioning:type': 'compressed'}) re = vnx_utils.retype_need_migration( volume, old_spec.provision, new_spec.provision, host) self.assertFalse(re) @res_mock.mock_driver_input def test_retype_not_need_migration(self, driver_in): volume = driver_in['volume'] host = driver_in['host'] old_spec = common.ExtraSpecs({'storagetype:tiering': 'auto'}) new_spec = common.ExtraSpecs( {'storagetype:tiering': 'starthighthenauto'}) re = vnx_utils.retype_need_migration( volume, old_spec.provision, new_spec.provision, host) self.assertFalse(re) def test_retype_need_change_tier(self): re = vnx_utils.retype_need_change_tier( storops.VNXTieringEnum.AUTO, storops.VNXTieringEnum.HIGH_AUTO) self.assertTrue(re) def test_retype_need_turn_on_compression(self): re = vnx_utils.retype_need_turn_on_compression( storops.VNXProvisionEnum.THIN, storops.VNXProvisionEnum.COMPRESSED) self.assertTrue(re) re = vnx_utils.retype_need_turn_on_compression( storops.VNXProvisionEnum.THICK, storops.VNXProvisionEnum.COMPRESSED) self.assertTrue(re) def test_retype_not_need_turn_on_compression(self): re = vnx_utils.retype_need_turn_on_compression( storops.VNXProvisionEnum.DEDUPED, storops.VNXProvisionEnum.COMPRESSED) self.assertFalse(re) re = vnx_utils.retype_need_turn_on_compression( storops.VNXProvisionEnum.DEDUPED, storops.VNXProvisionEnum.COMPRESSED) self.assertFalse(re) @res_mock.mock_driver_input def test_get_base_lun_name(self, mocked): volume = mocked['volume'] self.assertEqual( 'test', vnx_utils.get_base_lun_name(volume)) def test_convert_to_tgt_list_and_itor_tgt_map(self): zone_mapping = { 'san_1': {'initiator_port_wwn_list': ['wwn1_1'], 'target_port_wwn_list': ['wwnt_1', 'wwnt_2']}, 'san_2': {'initiator_port_wwn_list': ['wwn2_1', 'wwn2_2'], 'target_port_wwn_list': ['wwnt_1', 'wwnt_3']}, } tgt_wwns, itor_tgt_map = ( vnx_utils.convert_to_tgt_list_and_itor_tgt_map(zone_mapping)) self.assertEqual({'wwnt_1', 'wwnt_2', 'wwnt_3'}, set(tgt_wwns)) self.assertEqual({'wwn1_1': ['wwnt_1', 'wwnt_2'], 'wwn2_1': ['wwnt_1', 'wwnt_3'], 'wwn2_2': ['wwnt_1', 'wwnt_3']}, itor_tgt_map) @ut_utils.patch_group_specs(' True') @res_mock.mock_driver_input def test_require_consistent_group_snapshot_enabled(self, input): driver = FakeDriver() is_called = driver.fake_group_method('context', input['group']) self.assertTrue(is_called) @res_mock.mock_driver_input def test_is_image_cache_volume_false(self, mocked): volume = mocked['volume'] volume.display_name = 'volume-ca86b9a0-d0d5-4267-8cd5-c62274056cc0' self.assertFalse(vnx_utils.is_image_cache_volume(volume)) volume.display_name = 'volume-ca86b9a0-d0d5-c62274056cc0' self.assertFalse(vnx_utils.is_image_cache_volume(volume)) @res_mock.mock_driver_input def test_is_image_cache_volume_true(self, mocked): volume = mocked['volume'] volume.display_name = 'image-ca86b9a0-d0d5-4267-8cd5-c62274056cc0' self.assertTrue(vnx_utils.is_image_cache_volume(volume)) @res_mock.mock_driver_input def test_calc_migrate_and_provision_image_cache(self, mocked): volume = mocked['volume'] volume.display_name = 'image-ca86b9a0-d0d5-4267-8cd5-c62274056cc0' self.assertTrue(vnx_utils.is_image_cache_volume(volume)) async_migrate, provision = vnx_utils.calc_migrate_and_provision(volume) self.assertFalse(async_migrate) self.assertEqual(provision.name, 'THIN') @res_mock.mock_driver_input def test_calc_migrate_and_provision(self, mocked): volume = mocked['volume'] volume.display_name = 'volume-ca86b9a0-d0d5-4267-8cd5-c62274056cc0' async_migrate, provision = vnx_utils.calc_migrate_and_provision(volume) self.assertEqual(vnx_utils.is_async_migrate_enabled(volume), async_migrate) self.assertEqual(provision.name, 'THICK') @res_mock.mock_driver_input def test_calc_migrate_and_provision_default(self, mocked): volume = mocked['volume'] volume.display_name = 'volume-ca86b9a0-d0d5-4267-8cd5-c62274056cc0' async_migrate, provision = vnx_utils.calc_migrate_and_provision( volume, default_async_migrate=False) self.assertFalse(async_migrate) self.assertEqual(provision.name, 'THICK') async_migrate, provision = vnx_utils.calc_migrate_and_provision( volume, default_async_migrate=True) self.assertTrue(async_migrate) @ut_utils.patch_extra_specs({}) @res_mock.mock_driver_input def test_get_backend_qos_specs(self, cinder_input): volume = cinder_input['volume'] with mock.patch.object(volume_types, 'get_volume_type_qos_specs', return_value={'qos_specs': None}): r = vnx_utils.get_backend_qos_specs(volume) self.assertIsNone(r) with mock.patch.object(volume_types, 'get_volume_type_qos_specs', return_value={ 'qos_specs': {'consumer': 'frontend'}}): r = vnx_utils.get_backend_qos_specs(volume) self.assertIsNone(r) with mock.patch.object(volume_types, 'get_volume_type_qos_specs', return_value={ 'qos_specs': {'id': 'test', 'consumer': 'back-end', 'specs': { common.QOS_MAX_BWS: 100, common.QOS_MAX_IOPS: 10}}}): r = vnx_utils.get_backend_qos_specs(volume) self.assertIsNotNone(r) self.assertEqual(100, r[common.QOS_MAX_BWS]) self.assertEqual(10, r[common.QOS_MAX_IOPS]) @ut_utils.patch_group_specs({ 'consistent_group_replication_enabled': ' True'}) @ut_utils.patch_extra_specs({ 'replication_enabled': ' False'}) @res_mock.mock_driver_input def test_check_type_matched_invalid(self, mocked): volume = mocked['volume'] volume.group = mocked['group'] self.assertRaises(exception.InvalidInput, vnx_utils.check_type_matched, volume) @ut_utils.patch_group_specs({ 'consistent_group_replication_enabled': ' True'}) @res_mock.mock_driver_input def test_check_rep_status_matched_disabled(self, mocked): group = mocked['group'] self.assertRaises(exception.InvalidInput, vnx_utils.check_rep_status_matched, group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/dell_emc/vnx/utils.py0000664000175000017500000000540600000000000025537 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from os import path from unittest import mock import yaml from cinder.volume.drivers.dell_emc.vnx import client from cinder.volume.drivers.dell_emc.vnx import common patch_sleep = mock.patch('time.sleep') patch_vnxsystem = mock.patch('storops.VNXSystem') patch_vnxstoragegroup = mock.patch('storops.vnx.resource.sg.VNXStorageGroup') def load_yaml(file_name): yaml_file = '{}/{}'.format(path.dirname( path.abspath(__file__)), file_name) with open(yaml_file) as f: res = yaml.safe_load(f) return res def patch_extra_specs(specs): return _build_patch_decorator( 'cinder.volume.volume_types.get_volume_type_extra_specs', return_value=specs) def patch_group_specs(specs): return _build_patch_decorator( 'cinder.volume.group_types.get_group_type_specs', return_value=specs) def patch_extra_specs_validate(return_value=None, side_effect=None): return _build_patch_decorator( 'cinder.volume.drivers.dell_emc.vnx.common.ExtraSpecs.validate', return_value=return_value, side_effect=side_effect) def _build_patch_decorator(module_str, return_value=None, side_effect=None): def _inner_mock(func): @functools.wraps(func) def decorator(*args, **kwargs): with mock.patch( module_str, return_value=return_value, side_effect=side_effect): return func(*args, **kwargs) return decorator return _inner_mock def build_fake_mirror_view(): primary_client = mock.create_autospec(spec=client.Client) secondary_client = mock.create_autospec(spec=client.Client) mirror_view = mock.create_autospec(spec=common.VNXMirrorView) mirror_view.primary_client = primary_client mirror_view.secondary_client = secondary_client return mirror_view def get_replication_device(): return { 'backend_id': 'fake_serial', 'san_ip': '192.168.1.12', 'san_login': 'admin', 'san_password': 'admin', 'storage_vnx_authentication_type': 'global', 'storage_vnx_security_file_dir': None, 'pool_name': 'remote_pool', } ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.27112 cinder-27.0.0/cinder/tests/unit/volume/drivers/fungible/0000775000175000017500000000000000000000000023234 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/fungible/__init__.py0000664000175000017500000000000000000000000025333 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/fungible/test_adapter.py0000664000175000017500000000167600000000000026277 0ustar00zuulzuul00000000000000# (c) Copyright 2022 Fungible, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock class MockResource(mock.Mock): def __init__(self, *args, **kwargs): super(MockResource, self).__init__(*args, **kwargs) if 'name' in kwargs: self.name = kwargs['name'] self.kwargs = kwargs def __getitem__(self, key): return self.kwargs[key] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/fungible/test_driver.py0000664000175000017500000013062300000000000026145 0ustar00zuulzuul00000000000000# (c) Copyright 2022 Fungible, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from unittest import mock import uuid from cinder import context from cinder import exception from cinder.image import image_utils from cinder.objects import fields from cinder.tests.unit import fake_constants from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit.volume.drivers.fungible import test_adapter from cinder import volume from cinder.volume import configuration from cinder.volume.drivers.fungible import constants from cinder.volume.drivers.fungible import driver from cinder.volume.drivers.fungible import rest_client from cinder.volume.drivers.fungible import \ swagger_api_client as swagger_client from cinder.volume import volume_types from cinder.volume import volume_utils common_success_res = swagger_client.CommonResponseFields( status=True, message='healthy') common_failure_res = swagger_client.CommonResponseFields( status=False, message='error') success_uuid = swagger_client.ResponseDataWithCreateUuid( status=True, data={'uuid': 'mock_id'}) success_response = swagger_client.SuccessResponseFields( status=True, message="mock_message") get_volume_details = swagger_client.ResponseDataWithCreateUuid( status=True, data={"dpu": "mock_dpu_uuid", "secy_dpu": "mock_dpu_uuid", "ports": {"mock_id": {"host_nqn": "mock_nqn", "host_uuid": "mock_host_id", "transport": "TCP"}}}) get_topology = swagger_client.ResponseDpuDriveHierarchy( status=True, data={"mock_device_uuid": { "available": True, "dpus": [{"dataplane_ip": "mock_dataplae_ip", "uuid": "mock_dpu_uuid"}]}}) get_host_id_list = swagger_client.ResponseDataWithListOfHostUuids( status=True, data={"total_hosts_with_fac": 0, "total_hosts_without_fac": 1, "host_uuids": ["mock_host_id"]}) get_host_info = swagger_client.ResponseDataWithHostInfo( status=True, data={"host_uuid": "mock_host_id", "host_nqn": "mock_nqn", "fac_enabled": False}) fetch_hosts_with_ids = swagger_client.ResponseDataWithListOfHosts( status=True, data=[ { "host_uuid": "mock_host_id", "host_nqn": "mock_nqn", "fac_enabled": False } ]) create_copy_task = swagger_client.ResponseCreateVolumeCopyTask( status=True, data={'task_uuid': 'mock_id'}) get_task_success = swagger_client.ResponseGetVolumeCopyTask( status=True, data={'task_state': 'SUCCESS'}) class FungibleDriverTest(unittest.TestCase): def setUp(self): super(FungibleDriverTest, self).setUp() self.configuration = mock.Mock(spec=configuration.Configuration) self.configuration.san_ip = '127.0.0.1' self.configuration.san_api_port = 443 self.configuration.san_login = 'admin' self.configuration.san_password = 'password' self.configuration.nvme_connect_port = 4420 self.configuration.api_enable_ssl = False self.driver = driver.FungibleDriver(configuration=self.configuration) self.driver.do_setup(context=None) self.context = context.get_admin_context() self.api_exception = swagger_client.ApiException( status=400, reason="Bad Request", http_resp=self.get_api_exception_response()) @staticmethod def get_volume(): volume = fake_volume.fake_volume_obj(mock.MagicMock()) volume.size = 1 volume.provider_id = fake_constants.UUID1 volume.migration_status = '' volume.id = str(uuid.uuid4()) volume.display_name = 'volume' volume.host = 'mock_host_name' volume.volume_type_id = fake_constants.VOLUME_TYPE_ID volume.metadata = {} return volume @staticmethod def get_snapshot(): snapshot = fake_snapshot.fake_snapshot_obj(mock.MagicMock()) snapshot.display_name = 'snapshot' snapshot.provider_id = fake_constants.UUID1 snapshot.id = str(uuid.uuid4()) snapshot.volume = FungibleDriverTest.get_volume() return snapshot @staticmethod def get_connector(): return {"nqn": "mock_nqn"} @staticmethod def get_specs(): return { constants.FSC_SPACE_ALLOCATION_POLICY: "write_optimized", constants.FSC_COMPRESSION: "true", constants.FSC_QOS_BAND: "gold", constants.FSC_SNAPSHOTS: "false", constants.FSC_BLK_SIZE: "4096" } @staticmethod def get_metadata(): return { constants.FSC_SPACE_ALLOCATION_POLICY: "write_optimized", constants.FSC_COMPRESSION: "false", constants.FSC_QOS_BAND: "bronze", constants.FSC_EC_SCHEME: constants.EC_4_2 } @staticmethod def get_api_exception_response(): return test_adapter.MockResource( status=False, data='{"error_message":"mock_error_message","status":false}') '''@staticmethod def get_volume_details(): return { "data": { "ports": { "mock_id": { "host_nqn": "mock_nqn", "ip": "127.0.0.1" } } } }''' def test_get_driver_options(self): self.assertIsNotNone(self.driver.get_driver_options()) def test_volume_stats(self): self.assertIsNotNone(self.driver.get_volume_stats()) @mock.patch.object(swagger_client.ApigatewayApi, 'get_fc_health') def test_check_for_setup_error_success(self, mock_success_response): mock_success_response.return_value = common_success_res result = self.driver.check_for_setup_error() self.assertIsNone(result) @mock.patch.object(swagger_client.ApigatewayApi, 'get_fc_health') def test_check_for_setup_error_fail(self, mock_staus): mock_staus.return_value = common_failure_res with self.assertRaises(exception.VolumeBackendAPIException): self.driver.check_for_setup_error() @mock.patch.object(rest_client.RestClient, 'check_for_setup_error') def test_check_for_setup_error_exception(self, mock_staus): mock_staus.side_effect = Exception("mock exception") with self.assertRaises(exception.VolumeBackendAPIException): self.driver.check_for_setup_error() @mock.patch.object(rest_client.RestClient, 'check_for_setup_error') def test_check_for_setup_error_api_exception(self, mock_exception): mock_exception.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.check_for_setup_error() @mock.patch.object(volume_types, 'get_volume_type') def test_get_volume_stats_without_volume_type(self, mock_get_volume_type): volume = self.get_volume() volume.volume_type_id = fake_constants.VOLUME_TYPE_ID mock_get_volume_type.return_value = {"extra_specs": self.get_specs()} with self.assertRaises(exception.VolumeBackendAPIException): self.driver._get_volume_type_extra_specs(self, volume=volume) @mock.patch.object(volume_types, 'get_volume_type') def test_get_volume_stats_with_volume_type(self, mock_get_volume_type): volume = {"volume_type_id": fake_constants.VOLUME_TYPE_ID} extra_specs = self.get_specs() extra_specs.update({constants.FSC_VOL_TYPE: constants.VOLUME_TYPE_RAW}) mock_get_volume_type.return_value = {"extra_specs": extra_specs} self.assertIsNotNone( self.driver._get_volume_type_extra_specs(self, volume=volume)) @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_create_volume(self, mock_create_volume): volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_create_volume.return_value = success_uuid ret = self.driver.create_volume(volume) self.assertIsNotNone(ret) self.assertEqual(volume['size'], ret['size']) @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_create_ec_volume_8_2(self, mock_create_volume): volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{constants.FSC_EC_SCHEME: "8_2"}, constants.VOLUME_TYPE_EC]) mock_create_volume.return_value = success_uuid ret = self.driver.create_volume(volume) self.assertIsNotNone(ret) self.assertEqual(volume['size'], ret['size']) @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_create_replicated_volume(self, mock_create_volume): volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_REPLICA]) ret = self.driver.create_volume(volume) mock_create_volume.return_value = success_uuid self.assertIsNotNone(ret) self.assertEqual(volume['size'], ret['size']) @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_create_volume_with_specs(self, mock_create_volume): volume = self.get_volume() mock_ret = self.get_specs() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[mock_ret, constants.VOLUME_TYPE_EC]) mock_create_volume.return_value = success_uuid ret = self.driver.create_volume(volume) self.assertIsNotNone(ret) self.assertEqual(volume['size'], ret['size']) @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_create_volume_with_metadata(self, mock_create_volume): volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_create_volume.return_value = success_uuid volume['metadata'].update(self.get_metadata()) ret = self.driver.create_volume(volume) self.assertIsNotNone(ret) self.assertEqual(volume['size'], ret['size']) @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_create_volume_with_fault_domains(self, mock_create_volume): volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_RAW]) volume['metadata'].update(self.get_metadata()) volume['metadata'].update({constants.FSC_FD_IDS: 'fake_id1, fake_id2'}) volume['metadata'].update( {constants.FSC_FD_OP: constants.FSC_FD_OPS[0]}) mock_create_volume.return_value = success_uuid ret = self.driver.create_volume(volume) self.assertIsNotNone(ret) self.assertEqual(volume['size'], ret['size']) @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_negative_create_volume_with_fault_domains( self, mock_create_volume): volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_RAW]) volume['metadata'].update(self.get_metadata()) volume['metadata'].update( {constants.FSC_FD_IDS: 'fake_id1,fake_id2,fake_id3'}) volume['metadata'].update({constants.FSC_FD_OP: 'mock_value'}) mock_create_volume.return_value = success_uuid with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_volume(volume) @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_negative_create_volume_without_fault_domains_op( self, mock_create_volume): volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_RAW]) volume['metadata'].update(self.get_metadata()) volume['metadata'].update({constants.FSC_FD_IDS: 'fake_id1,fake_id2'}) mock_create_volume.return_value = success_uuid ret = self.driver.create_volume(volume) self.assertIsNotNone(ret) self.assertEqual(volume['size'], ret['size']) def test_negative_create_volume_with_metadata(self): volume = self.get_volume() volume['metadata'].update(self.get_specs()) volume['metadata'].update({constants.FSC_QOS_BAND: 'wrong value'}) volume['metadata'].update( {constants.FSC_SPACE_ALLOCATION_POLICY: 'wrong value'}) volume['metadata'].update({constants.FSC_COMPRESSION: 'wrong value'}) volume['metadata'].update({constants.FSC_EC_SCHEME: 'wrong value'}) volume['metadata'].update({constants.FSC_SNAPSHOTS: 'wrong value'}) volume['metadata'].update({constants.FSC_BLK_SIZE: 'wrong value'}) self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_volume(volume) def test_negative_encrypted_create_volume(self): volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) volume['metadata'].update(self.get_metadata()) volume['metadata'].update({constants.FSC_KMIP_SECRET_KEY: 'fake key'}) with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_volume(volume) def test_negative_create_volume_with_specs(self): volume = self.get_volume() mock_ret = self.get_specs() mock_ret.update({constants.FSC_QOS_BAND: 'wrong value'}) self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[mock_ret, constants.VOLUME_TYPE_EC]) with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_volume(volume) @mock.patch.object(rest_client.RestClient, 'create_volume') def test_negative_create_volume_api_exception(self, mock_create_volume): volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_create_volume.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_volume(volume) @mock.patch.object(swagger_client.StorageApi, 'delete_volume') def test_delete_volume(self, mock_delete_volume): volume = self.get_volume() mock_delete_volume.return_value = success_response self.assertIsNone(self.driver.delete_volume(volume)) @mock.patch.object(rest_client.RestClient, 'delete_volume') def test_negative_delete_volume_exception(self, mock_delete_volume): mock_volume = self.get_volume() mock_volume['provider_id'] = fake_constants.UUID1 mock_delete_volume.side_effect = Exception("mock exception") with self.assertRaises(exception.VolumeBackendAPIException): self.driver.delete_volume(mock_volume) def test_negative_delete_volume_without_provider_id(self): volume = self.get_volume() volume['provider_id'] = None self.assertIsNone(self.driver.delete_volume(volume)) def test_negative_delete_volume_without_provider_id_attr(self): volume = self.get_volume() del volume.provider_id with self.assertRaises(exception.VolumeBackendAPIException): self.driver.delete_volume(volume) @mock.patch.object(swagger_client.StorageApi, 'delete_volume') def test_negative_delete_volume_api_exception(self, mock_delete_volume): volume = self.get_volume() mock_delete_volume.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.delete_volume(volume) @mock.patch.object(swagger_client.StorageApi, 'get_volume') @mock.patch.object(swagger_client.StorageApi, 'attach_volume') @mock.patch.object(swagger_client.TopologyApi, 'get_host_id_list') @mock.patch.object(swagger_client.TopologyApi, 'get_host_info') @mock.patch.object(swagger_client.TopologyApi, 'get_hierarchical_topology') def test_initialize_connection( self, mock_get_topology, mock_get_host_info, mock_get_host_id_list, mock_attach_volume, mock_get_volume): mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_attach_volume.return_value = success_uuid mock_get_volume.return_value = get_volume_details mock_get_host_id_list.return_value = get_host_id_list mock_get_host_info.return_value = get_host_info mock_get_topology.return_value = get_topology conn_info = self.driver.initialize_connection(mock_volume, connector) self.assertIsNotNone(conn_info) self.assertEqual(conn_info.get("driver_volume_type"), "nvmeof") self.assertIsNotNone(conn_info.get("data")) self.assertEqual( conn_info.get("data").get("vol_uuid"), fake_constants.UUID1) self.assertEqual(conn_info.get("data").get("host_nqn"), self.get_connector().get("nqn")) '''Add more validation here''' def test_negative_initialize_connection_without_nqn(self): mock_volume = self.get_volume() connector = {} with self.assertRaises(exception.VolumeBackendAPIException): self.driver.initialize_connection(mock_volume, connector) def test_negative_initialize_connection_without_provider_id(self): mock_volume = {} self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_volume["provider_id"] = None with self.assertRaises(exception.VolumeBackendAPIException): self.driver.initialize_connection(mock_volume, connector) @mock.patch.object(swagger_client.StorageApi, 'attach_volume') def test_negative_initialize_connection_api_exception( self, mock_attach_volume): mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_attach_volume.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.initialize_connection(mock_volume, connector) @mock.patch.object(swagger_client.StorageApi, 'attach_volume') def test_initialize_connection_exception(self, mock_attach_volume): mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_attach_volume.side_effect = Exception("mock exception") with self.assertRaises(exception.VolumeBackendAPIException): self.driver.initialize_connection(mock_volume, connector) @mock.patch.object(swagger_client.StorageApi, 'get_volume') @mock.patch.object(swagger_client.StorageApi, 'attach_volume') @mock.patch.object(swagger_client.TopologyApi, 'get_host_id_list') @mock.patch.object(swagger_client.TopologyApi, 'get_host_info') @mock.patch.object(swagger_client.TopologyApi, 'get_hierarchical_topology') def test_initialize_connection_iops_connection( self, mock_get_topology, mock_get_host_info, mock_get_host_id_list, mock_attach_volume, mock_get_volume): mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_attach_volume.return_value = success_uuid mock_get_volume.return_value = get_volume_details mock_get_host_id_list.return_value = get_host_id_list mock_get_host_info.return_value = get_host_info mock_get_topology.return_value = get_topology connector[constants.FSC_IOPS_IMG_MIG] = True conn_info = self.driver.initialize_connection(mock_volume, connector) self.assertIsNotNone(conn_info) @mock.patch.object(swagger_client.StorageApi, 'get_volume') @mock.patch.object(swagger_client.StorageApi, 'attach_volume') @mock.patch.object(swagger_client.TopologyApi, 'get_host_id_list') @mock.patch.object(swagger_client.TopologyApi, 'get_host_info') @mock.patch.object(swagger_client.TopologyApi, 'get_hierarchical_topology') def test_initialize_connection_iops_migration( self, mock_get_topology, mock_get_host_info, mock_get_host_id_list, mock_attach_volume, mock_get_volume): mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_attach_volume.return_value = success_uuid mock_get_volume.return_value = get_volume_details mock_get_host_id_list.return_value = get_host_id_list mock_get_host_info.return_value = get_host_info mock_get_topology.return_value = get_topology mock_volume['migration_status'] = "migrating" conn_info = self.driver.initialize_connection(mock_volume, connector) self.assertIsNotNone(conn_info) @mock.patch.object(swagger_client.StorageApi, 'get_volume') @mock.patch.object(swagger_client.StorageApi, 'delete_port') @mock.patch.object(swagger_client.TopologyApi, 'get_host_id_list') @mock.patch.object(swagger_client.TopologyApi, 'fetch_hosts_with_ids') def test_terminate_connection( self, mock_fetch_hosts_with_ids, mock_get_host_id_list, mock_detach_volume, mock_get_volume): mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_get_volume.return_value = get_volume_details mock_get_host_id_list.return_value = get_host_id_list mock_fetch_hosts_with_ids.return_value = fetch_hosts_with_ids mock_detach_volume.return_value = success_uuid self.assertIsNone(self.driver.terminate_connection( mock_volume, connector)) def test_negative_terminate_connection_without_provider_id(self): mock_volume = {} self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_volume["provider_id"] = None with self.assertRaises(exception.VolumeBackendAPIException): self.driver.terminate_connection(mock_volume, connector) @mock.patch.object(swagger_client.StorageApi, 'get_volume') @mock.patch.object(swagger_client.StorageApi, 'delete_port') @mock.patch.object(swagger_client.TopologyApi, 'get_host_id_list') @mock.patch.object(swagger_client.TopologyApi, 'fetch_hosts_with_ids') def test_terminate_connection_force_detach( self, mock_fetch_hosts_with_ids, mock_get_host_id_list, mock_detach_volume, mock_get_volume): mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_get_volume.return_value = get_volume_details mock_get_host_id_list.return_value = get_host_id_list mock_fetch_hosts_with_ids.return_value = fetch_hosts_with_ids mock_detach_volume.return_value = success_uuid connector = None self.assertIsNone(self.driver.terminate_connection( mock_volume, connector)) @mock.patch.object(swagger_client.StorageApi, 'get_volume') def test_negative_terminate_connection_without_nqn(self, mock_get_volume): mock_volume = self.get_volume() mock_get_volume.return_value = get_volume_details connector = {} with self.assertRaises(exception.VolumeBackendAPIException): self.driver.terminate_connection(mock_volume, connector) @mock.patch.object(rest_client.RestClient, 'get_volume_detail') def test_negative_terminate_connection_without_port(self, mock_output): mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_output.return_value = {'data': {'ports': None}} with self.assertRaises(exception.VolumeBackendAPIException): self.driver.terminate_connection(mock_volume, connector) @mock.patch.object(rest_client.RestClient, 'get_volume_detail') def test_negative_terminate_connection_with_invalid_port( self, mock_output): mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_output.return_value = get_volume_details connector['nqn'] = "dummy_nqn" with self.assertRaises(exception.VolumeBackendAPIException): self.driver.terminate_connection(mock_volume, connector) @mock.patch.object(swagger_client.StorageApi, 'get_volume') @mock.patch.object(swagger_client.StorageApi, 'delete_port') def test_negative_terminate_connection_api_exception( self, mock_detach_volume, mock_get_volume): mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) connector = self.get_connector() mock_get_volume.return_value = get_volume_details mock_detach_volume.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.terminate_connection(mock_volume, connector) @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_create_volume_from_ec_snapshot(self, mock_create_volume): mock_snapshot = self.get_snapshot() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_snapshot.volume = self.get_volume() mock_snapshot.provider_id = fake_constants.UUID1 mock_volume2 = self.get_volume() mock_create_volume.return_value = success_uuid new_vol_ret = self.driver.create_volume_from_snapshot( mock_volume2, mock_snapshot) self.assertIsNotNone(new_vol_ret) self.assertEqual(mock_volume2['size'], new_vol_ret['size']) @mock.patch.object(rest_client.RestClient, 'create_volume') def test_create_volume_from_snapshot_exception( self, mock_get_volume_detail): mock_volume = self.get_volume() mock_snapshot = self.get_snapshot() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_get_volume_detail.side_effect = Exception("mock exception") with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_volume_from_snapshot(mock_volume, mock_snapshot) @mock.patch.object(rest_client.RestClient, 'create_volume') def test_create_volume_from_snapshot_APIException( self, mock_create_volume): mock_volume = self.get_volume() mock_snapshot = self.get_snapshot() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_create_volume.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_volume_from_snapshot(mock_volume, mock_snapshot) @mock.patch.object(swagger_client.StorageApi, 'delete_volume_copy_task') @mock.patch.object(swagger_client.StorageApi, 'delete_snapshot') @mock.patch.object(swagger_client.StorageApi, 'get_volume_copy_task') @mock.patch.object(swagger_client.StorageApi, 'create_volume_copy_task') @mock.patch.object(swagger_client.StorageApi, 'create_volume') @mock.patch.object(swagger_client.StorageApi, 'create_snapshot') def test_create_cloned_ec_volume( self, mock_create_snapshot, mock_create_volume, mock_create_volume_copy_task, mock_get_task, mock_delete_snapshot, mock_delete_task): target_mock_volume = self.get_volume() source_mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_create_snapshot.return_value = success_uuid mock_create_volume.return_value = success_uuid mock_create_volume_copy_task.return_value = create_copy_task mock_get_task.return_value = get_task_success mock_delete_snapshot.return_value = success_response mock_delete_task.return_value = success_response self.assertIsNotNone(self.driver.create_cloned_volume( target_mock_volume, source_mock_volume)) @mock.patch.object(swagger_client.StorageApi, 'delete_volume_copy_task') @mock.patch.object(swagger_client.StorageApi, 'delete_snapshot') @mock.patch.object(swagger_client.StorageApi, 'get_volume_copy_task') @mock.patch.object(swagger_client.StorageApi, 'create_volume_copy_task') @mock.patch.object(swagger_client.StorageApi, 'create_volume') @mock.patch.object(swagger_client.StorageApi, 'create_snapshot') def test_create_cloned_ec_volume_delete_task_exception( self, mock_create_snapshot, mock_create_volume, mock_create_volume_copy_task, mock_get_task, mock_delete_snapshot, mock_delete_task): target_mock_volume = self.get_volume() source_mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_create_snapshot.return_value = success_uuid mock_create_volume.return_value = success_uuid mock_create_volume_copy_task.return_value = create_copy_task mock_get_task.return_value = get_task_success mock_delete_snapshot.return_value = success_response mock_delete_task.side_effect = self.api_exception self.assertIsNotNone(self.driver.create_cloned_volume( target_mock_volume, source_mock_volume)) @mock.patch.object(swagger_client.StorageApi, 'get_volume_copy_task') @mock.patch.object(swagger_client.StorageApi, 'create_volume_copy_task') @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_create_cloned_ec_volume_get_task_api_exception( self, mock_create_volume, mock_create_volume_copy_task, mock_get_task): target_mock_volume = self.get_volume() source_mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_create_volume.return_value = success_uuid mock_create_volume_copy_task.return_value = create_copy_task mock_get_task.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_cloned_volume( target_mock_volume, source_mock_volume) @mock.patch.object(rest_client.RestClient, 'copy_volume') @mock.patch.object(swagger_client.StorageApi, 'create_volume') def test_create_cloned_ec_volume_copy_task_exception( self, mock_create_volume, mock_create_volume_copy_task): target_mock_volume = self.get_volume() source_mock_volume = self.get_volume() self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) mock_create_volume.return_value = success_uuid mock_create_volume_copy_task.side_effect = Exception("mock exception") with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_cloned_volume( target_mock_volume, source_mock_volume) def test_create_clone_in_use_volume(self): target_mock_volume = self.get_volume() source_mock_volume = self.get_volume() source_mock_volume['attach_status'] = "attached" self.driver._get_volume_type_extra_specs = mock.Mock( return_value=[{}, constants.VOLUME_TYPE_EC]) with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_cloned_volume( target_mock_volume, source_mock_volume) @mock.patch.object(swagger_client.StorageApi, 'create_snapshot') def test_create_snapshot(self, mock_create_snapshot): mock_volume = self.get_volume() snapshot = self.get_snapshot() snapshot['volume'] = mock_volume mock_create_snapshot.return_value = success_uuid ret = self.driver.create_snapshot(snapshot) self.assertIsNotNone(ret) def test_negative_create_snapshot_without_provider_id(self): mock_volume = self.get_volume() snapshot = self.get_snapshot() snapshot['volume'] = mock_volume snapshot['volume']['provider_id'] = None with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_snapshot(snapshot) def test_negative_create_snapshot_without_provider_id_attr(self): mock_volume = self.get_volume() snapshot = self.get_snapshot() snapshot.volume = mock_volume del snapshot.volume.provider_id with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_snapshot(snapshot) @mock.patch.object(rest_client.RestClient, 'create_snapshot') def test_negative_create_snapshot_exception(self, mock_create_snapshot): snapshot = self.get_snapshot() mock_volume = self.get_volume() mock_volume['provider_id'] = fake_constants.UUID1 snapshot['volume'] = mock_volume mock_create_snapshot.side_effect = Exception("mock exception") with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_snapshot(snapshot) @mock.patch.object(rest_client.RestClient, 'create_snapshot') def test_negative_create_snapshot_api_exception( self, mock_create_snapshot): mock_volume = self.get_volume() snapshot = self.get_snapshot() snapshot.volume = mock_volume mock_create_snapshot.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.create_snapshot(snapshot) @mock.patch.object(swagger_client.StorageApi, 'delete_snapshot') def test_delete_snapshot(self, mock_delete_snapshot): mock_volume = self.get_volume() snapshot = self.get_snapshot() snapshot['volume'] = mock_volume mock_delete_snapshot.return_value = success_response self.assertIsNone(self.driver.delete_snapshot(snapshot)) def test_negative_delete_snapshot_without_provider_id(self): snapshot = self.get_snapshot() snapshot['provider_id'] = None self.assertIsNone(self.driver.delete_snapshot(snapshot)) def test_negative_delete_snapshot_without_provider_id_attr(self): mock_volume = self.get_volume() snapshot = self.get_snapshot() snapshot.volume = mock_volume del snapshot.provider_id with self.assertRaises(exception.VolumeBackendAPIException): self.driver.delete_snapshot(snapshot) @mock.patch.object(rest_client.RestClient, 'delete_snapshot') def test_negative_delete_snapshot_exception(self, mock_delete_snapshot): snapshot = self.get_snapshot() mock_volume = self.get_volume() mock_volume['provider_id'] = fake_constants.UUID1 snapshot['volume'] = mock_volume mock_delete_snapshot.side_effect = Exception("mock exception") with self.assertRaises(exception.VolumeBackendAPIException): self.driver.delete_snapshot(snapshot) @mock.patch.object(rest_client.RestClient, 'delete_snapshot') def test_negative_delete_snapshot_api_exception( self, mock_delete_snapshot): mock_volume = self.get_volume() snapshot = self.get_snapshot() snapshot['volume'] = mock_volume mock_delete_snapshot.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.delete_snapshot(snapshot) @mock.patch.object(swagger_client.StorageApi, 'update_volume') def test_extend_ec_volume_success(self, mock_update_volume): mock_volume = self.get_volume() new_size = 100 mock_update_volume.return_value = success_response ret = self.driver.extend_volume(mock_volume, new_size) self.assertIsNone(ret) def test_negative_extend_volume_without_provider_id(self): mock_volume = self.get_volume() new_size = 100 mock_volume['provider_id'] = None self.assertIsNone(self.driver.extend_volume(mock_volume, new_size)) def test_negative_extend_volume__without_provider_id_attr(self): mock_volume = self.get_volume() new_size = 100 del mock_volume.provider_id with self.assertRaises(exception.VolumeBackendAPIException): self.driver.extend_volume(mock_volume, new_size) @mock.patch.object(swagger_client.StorageApi, 'update_volume') def test_extend_volume_exception(self, mock_update_volume): mock_volume = self.get_volume() new_size = 100 mock_update_volume.side_effect = Exception("mock exception") with self.assertRaises(exception.VolumeBackendAPIException): self.driver.extend_volume(mock_volume, new_size) @mock.patch.object(swagger_client.StorageApi, 'update_volume') def test_extend_volume_api_exception(self, mock_update_volume): mock_volume = self.get_volume() new_size = 100 mock_update_volume.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.extend_volume(mock_volume, new_size) @mock.patch.object(swagger_client.StorageApi, 'update_volume') def test_update_migrated_volume_success(self, mock_rename_volume): source_mock_volume = self.get_volume() destination_mock_volume = self.get_volume() source_mock_volume['host'] = "FSC1" destination_mock_volume['host'] = "FSC1" mock_rename_volume.return_value = success_response self.assertIsNotNone(self.driver.update_migrated_volume( self.context, source_mock_volume, destination_mock_volume, fields.VolumeStatus.AVAILABLE)) @mock.patch.object(swagger_client.StorageApi, 'update_volume') def test_update_migrated_volume_without_destination_provider_id( self, mock_rename_volume): source_mock_volume = self.get_volume() destination_mock_volume = self.get_volume() destination_mock_volume['provider_id'] = None mock_rename_volume.side_effect = success_response self.assertIsNotNone(self.driver.update_migrated_volume( self.context, source_mock_volume, destination_mock_volume, fields.VolumeStatus.AVAILABLE)) @mock.patch.object(swagger_client.StorageApi, 'update_volume') def test_update_migrated_volume_without_source_provider_id( self, mock_rename_volume): source_mock_volume = self.get_volume() destination_mock_volume = self.get_volume() source_mock_volume['provider_id'] = None mock_rename_volume.return_value = success_response self.assertIsNotNone(self.driver.update_migrated_volume( self.context, source_mock_volume, destination_mock_volume, fields.VolumeStatus.AVAILABLE)) @mock.patch.object(rest_client.RestClient, 'rename_volume') def test_update_migrated_volume_api_exception(self, mock_rename_volume): source_mock_volume = self.get_volume() destination_mock_volume = self.get_volume() mock_rename_volume[0].side_effect = self.api_exception mock_rename_volume[1].return_value = success_response self.assertIsNotNone(self.driver.update_migrated_volume( self.context, source_mock_volume, destination_mock_volume, fields.VolumeStatus.AVAILABLE)) @mock.patch.object(swagger_client.StorageApi, 'update_volume') def test_update_migrated_volume_backend_exception( self, mock_rename_volume): source_mock_volume = self.get_volume() destination_mock_volume = self.get_volume() source_mock_volume['provider_id'] = None mock_rename_volume.side_effect = self.api_exception with self.assertRaises(exception.VolumeBackendAPIException): self.driver.update_migrated_volume( self.context, source_mock_volume, destination_mock_volume, fields.VolumeStatus.AVAILABLE) @mock.patch.object(swagger_client.StorageApi, 'update_volume') def test_update_migrated_volume_exception(self, mock_rename_volume): source_mock_volume = self.get_volume() destination_mock_volume = self.get_volume() source_mock_volume['provider_id'] = None mock_rename_volume.side_effect = Exception("mock exception") with self.assertRaises(exception.VolumeBackendAPIException): self.driver.update_migrated_volume( self.context, source_mock_volume, destination_mock_volume, fields.VolumeStatus.AVAILABLE) @mock.patch.object(volume.driver.BaseVD, '_detach_volume') @mock.patch.object(image_utils, 'upload_volume') @mock.patch.object(volume.driver.BaseVD, '_attach_volume') @mock.patch.object(volume_utils, 'brick_get_connector_properties') def test_copy_volume_to_image( self, mock_get_connector, mock_attach_volume, mock_upload_volume, mock_detach): mock_volume = self.get_volume() image_service = fake_image.FakeImageService() self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False local_path = 'dev/sda' mock_get_connector.return_value = {} attach_info = {'device': {'path': local_path}, 'conn': {'driver_volume_type': 'nvme', 'data': {}, }} mock_attach_volume.return_value = [attach_info, mock_volume] mock_upload_volume.return_value = None mock_detach.return_value = None self.driver.wait_for_device = mock.Mock( return_value=True) self.assertIsNone( self.driver.copy_volume_to_image( self.context, mock_volume, image_service, fake_constants.IMAGE_ID)) @mock.patch.object(volume.driver.BaseVD, '_detach_volume') @mock.patch.object(image_utils, 'fetch_to_raw') @mock.patch.object(volume.driver.BaseVD, '_attach_volume') @mock.patch.object(volume_utils, 'brick_get_connector_properties') def test_copy_image_to_volume( self, mock_get_connector, mock_attach_volume, mock_fetch_to_raw, mock_detach): mock_volume = self.get_volume() image_service = fake_image.FakeImageService() self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False self.configuration.volume_dd_blocksize = 8 local_path = 'dev/sda' mock_get_connector.return_value = {} attach_info = {'device': {'path': local_path}, 'conn': {'driver_volume_type': 'nvme', 'data': {}, }} mock_attach_volume.return_value = [attach_info, mock_volume] mock_fetch_to_raw.return_value = None mock_detach.return_value = None self.driver.wait_for_device = mock.Mock( return_value=True) self.assertIsNone( self.driver.copy_image_to_volume( self.context, mock_volume, image_service, fake_constants.IMAGE_ID)) if __name__ == '__main__': unittest.main() ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.27112 cinder-27.0.0/cinder/tests/unit/volume/drivers/fusionstorage/0000775000175000017500000000000000000000000024331 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/fusionstorage/__init__.py0000664000175000017500000000000000000000000026430 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/fusionstorage/test_dsware.py0000664000175000017500000005151400000000000027235 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock import uuid import ddt from cinder import exception from cinder import objects from cinder.tests.unit import test from cinder.volume import configuration as config from cinder.volume.drivers.fusionstorage import dsware from cinder.volume.drivers.fusionstorage import fs_client from cinder.volume.drivers.fusionstorage import fs_conf from cinder.volume import volume_utils class FakeDSWAREDriver(dsware.DSWAREDriver): def __init__(self): self.configuration = config.Configuration(None) self.conf = fs_conf.FusionStorageConf(self.configuration, "cinder@fs") self.client = None @ddt.ddt class TestDSWAREDriver(test.TestCase): def setUp(self): super(TestDSWAREDriver, self).setUp() self.fake_driver = FakeDSWAREDriver() self.client = fs_client.RestCommon(None, None, None) def tearDown(self): super(TestDSWAREDriver, self).tearDown() @mock.patch.object(fs_client.RestCommon, 'login') def test_do_setup(self, mock_login): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') update_mocker = self.mock_object( self.fake_driver.conf, 'update_config_value') self.fake_driver.configuration.san_address = 'https://fake_rest_site' self.fake_driver.configuration.san_user = 'fake_san_user' self.fake_driver.configuration.san_password = 'fake_san_password' self.fake_driver.do_setup('context') update_mocker.assert_called_once_with() mock_login.assert_called_once_with() @mock.patch.object(fs_client.RestCommon, 'query_pool_info') def test_check_for_setup_error(self, mock_query_pool_info): self.fake_driver.configuration.pools_name = ['fake_pool_name'] self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') result1 = [{'poolName': 'fake_pool_name'}, {'poolName': 'fake_pool_name1'}] result2 = [{'poolName': 'fake_pool_name1'}, {'poolName': 'fake_pool_name2'}] mock_query_pool_info.return_value = result1 retval = self.fake_driver.check_for_setup_error() self.assertIsNone(retval) mock_query_pool_info.return_value = result2 try: self.fake_driver.check_for_setup_error() except Exception as e: self.assertEqual(exception.InvalidInput, type(e)) @mock.patch.object(fs_client.RestCommon, 'query_pool_info') def test__update_pool_stats(self, mock_query_pool_info): self.fake_driver.configuration.pools_name = ['fake_pool_name'] self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') result = [{'poolName': 'fake_pool_name', 'totalCapacity': 2048, 'usedCapacity': 1024}, {'poolName': 'fake_pool_name1', 'totalCapacity': 2048, 'usedCapacity': 1024}] mock_query_pool_info.return_value = result retval = self.fake_driver._update_pool_stats() self.assertDictEqual( {"volume_backend_name": 'FakeDSWAREDriver', "driver_version": "2.0.9", "QoS_support": False, "thin_provisioning_support": False, "vendor_name": "Huawei", "storage_protocol": "SCSI", "pools": [{"pool_name": 'fake_pool_name', "total_capacity_gb": 2.0, "free_capacity_gb": 1.0}]}, retval) mock_query_pool_info.assert_called_once_with() @mock.patch.object(fs_client.RestCommon, 'keep_alive') @mock.patch.object(dsware.DSWAREDriver, '_update_pool_stats') def test_get_volume_stats(self, mock__update_pool_stats, mock_keep_alive): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') result = {"success"} mock__update_pool_stats.return_value = result retval = self.fake_driver.get_volume_stats() self.assertEqual(result, retval) mock_keep_alive.assert_called_once_with() @mock.patch.object(fs_client.RestCommon, 'query_volume_by_name') def test__check_volume_exist(self, mock_query_volume_by_name): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4()) result1 = {'volName': 'fake_name'} result2 = None mock_query_volume_by_name.return_value = result1 retval = self.fake_driver._check_volume_exist(volume) self.assertEqual(retval, result1) mock_query_volume_by_name.return_value = result2 retval = self.fake_driver._check_volume_exist(volume) self.assertIsNone(retval) @mock.patch.object(volume_utils, 'extract_host') @mock.patch.object(fs_client.RestCommon, 'query_pool_info') def test__get_pool_id(self, mock_query_pool_info, mock_extract_host): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(host='host') pool_name1 = 'fake_pool_name1' pool_name2 = 'fake_pool_name2' pool_info = [{'poolName': 'fake_pool_name', 'poolId': 'fake_id'}, {'poolName': 'fake_pool_name1', 'poolId': 'fake_id1'}] mock_query_pool_info.return_value = pool_info mock_extract_host.return_value = pool_name1 retval = self.fake_driver._get_pool_id(volume) self.assertEqual('fake_id1', retval) mock_extract_host.return_value = pool_name2 try: self.fake_driver._get_pool_id(volume) except Exception as e: self.assertEqual(exception.InvalidInput, type(e)) def test__get_vol_name(self): volume1 = objects.Volume(_name_id=uuid.uuid4()) volume1.update( {"provider_location": json.dumps({"name": "fake_name"})}) volume2 = objects.Volume(_name_id=uuid.uuid4()) retval = self.fake_driver._get_vol_name(volume1) self.assertEqual("fake_name", retval) retval = self.fake_driver._get_vol_name(volume2) self.assertEqual(volume2.name, retval) @mock.patch.object(fs_client.RestCommon, 'create_volume') @mock.patch.object(dsware.DSWAREDriver, '_get_pool_id') def test_create_volume(self, mock__get_pool_id, mock_create_volume): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4(), size=1) mock__get_pool_id.return_value = 'fake_poolID' mock_create_volume.return_value = {'result': 0} retval = self.fake_driver.create_volume(volume) self.assertIsNone(retval) @mock.patch.object(dsware.DSWAREDriver, '_check_volume_exist') @mock.patch.object(fs_client.RestCommon, 'delete_volume') def test_delete_volume(self, mock_delete_volume, mock__check_volume_exist): result = True self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4()) mock_delete_volume.return_value = {'result': 0} mock__check_volume_exist.return_value = result retval = self.fake_driver.delete_volume(volume) self.assertIsNone(retval) mock__check_volume_exist.return_value = False retval = self.fake_driver.delete_volume(volume) self.assertIsNone(retval) @mock.patch.object(dsware.DSWAREDriver, '_check_volume_exist') @mock.patch.object(fs_client.RestCommon, 'expand_volume') def test_extend_volume(self, mock_expand_volume, mock__check_volume_exist): result1 = True result2 = False self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4(), size=2) mock_expand_volume.return_value = { 'volName': 'fake_name', 'size': 'new_size'} mock__check_volume_exist.return_value = result1 retval = self.fake_driver.extend_volume(volume=volume, new_size=3) self.assertIsNone(retval) mock__check_volume_exist.return_value = result2 try: self.fake_driver.extend_volume(volume=volume, new_size=3) except Exception as e: self.assertEqual(exception.VolumeBackendAPIException, type(e)) @mock.patch.object(dsware.DSWAREDriver, '_check_volume_exist') @mock.patch.object(dsware.DSWAREDriver, '_check_snapshot_exist') @mock.patch.object(fs_client.RestCommon, 'create_volume_from_snapshot') def test_create_volume_from_snapshot( self, mock_create_volume_from_snapshot, mock_check_snapshot_exist, mock_check_volume_exist): result1 = True result2 = False self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4()) snapshot = objects.Snapshot( id=uuid.uuid4(), volume_size=2, volume=volume) volume1 = objects.Volume(_name_id=uuid.uuid4(), size=2) volume2 = objects.Volume(_name_id=uuid.uuid4(), size=1) mock_create_volume_from_snapshot.return_value = {'result': 0} mock_check_volume_exist.return_value = result2 mock_check_snapshot_exist.return_value = result1 retval = self.fake_driver.create_volume_from_snapshot( volume1, snapshot) self.assertIsNone(retval) mock_check_volume_exist.return_value = result1 try: self.fake_driver.create_volume_from_snapshot(volume1, snapshot) except Exception as e: self.assertEqual(exception.VolumeBackendAPIException, type(e)) mock_check_volume_exist.return_value = result2 mock_check_snapshot_exist.return_value = result2 try: self.fake_driver.create_volume_from_snapshot(volume1, snapshot) except Exception as e: self.assertEqual(exception.VolumeBackendAPIException, type(e)) mock_check_volume_exist.return_value = result2 mock_check_snapshot_exist.return_value = result1 try: self.fake_driver.create_volume_from_snapshot(volume2, snapshot) except Exception as e: self.assertEqual(exception.VolumeBackendAPIException, type(e)) @mock.patch.object(dsware.DSWAREDriver, '_check_volume_exist') @mock.patch.object(fs_client.RestCommon, 'create_volume_from_volume') def test_cloned_volume( self, mock_create_volume_from_volume, mock__check_volume_exist): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4(), size=1) src_volume = objects.Volume(_name_id=uuid.uuid4()) result1 = True result2 = False mock__check_volume_exist.return_value = result1 retval = self.fake_driver.create_cloned_volume(volume, src_volume) self.assertIsNone(retval) mock_create_volume_from_volume.assert_called_once_with( vol_name=volume.name, vol_size=volume.size * 1024, src_vol_name=src_volume.name) mock__check_volume_exist.return_value = result2 try: self.fake_driver.create_cloned_volume(volume, src_volume) except Exception as e: self.assertEqual(exception.VolumeBackendAPIException, type(e)) def test__get_snapshot_name(self): snapshot1 = objects.Snapshot(id=uuid.uuid4()) snapshot1.update( {"provider_location": json.dumps({"name": "fake_name"})}) snapshot2 = objects.Snapshot(id=uuid.uuid4()) retval = self.fake_driver._get_snapshot_name(snapshot1) self.assertEqual("fake_name", retval) retval = self.fake_driver._get_snapshot_name(snapshot2) self.assertEqual(snapshot2.name, retval) @mock.patch.object(fs_client.RestCommon, 'query_snapshot_by_name') @mock.patch.object(dsware.DSWAREDriver, '_get_pool_id') def test__check_snapshot_exist( self, mock_get_pool_id, mock_query_snapshot_by_name): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4()) snapshot = objects.Snapshot(id=uuid.uuid4()) result1 = {'name': 'fake_name', 'totalNum': 1} result2 = {'name': 'fake_name', 'totalNum': 0} mock_get_pool_id.return_value = "fake_pool_id" mock_query_snapshot_by_name.return_value = result1 retval = self.fake_driver._check_snapshot_exist(volume, snapshot) self.assertEqual({'name': 'fake_name', 'totalNum': 1}, retval) mock_query_snapshot_by_name.return_value = result2 retval = self.fake_driver._check_snapshot_exist(volume, snapshot) self.assertIsNone(retval) @mock.patch.object(fs_client.RestCommon, 'create_snapshot') def test_create_snapshot(self, mock_create_snapshot): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4()) snapshot = objects.Snapshot(id=uuid.uuid4(), volume_id=uuid.uuid4(), volume=volume) retval = self.fake_driver.create_snapshot(snapshot) self.assertIsNone(retval) mock_create_snapshot.assert_called_once_with( snapshot_name=snapshot.name, vol_name=volume.name) @mock.patch.object(dsware.DSWAREDriver, '_check_snapshot_exist') @mock.patch.object(fs_client.RestCommon, 'delete_snapshot') def test_delete_snapshot(self, mock_delete_snapshot, mock_check_snapshot_exist): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(id=uuid.uuid4()) snapshot = objects.Snapshot(id=uuid.uuid4(), volume=volume) result = True mock_delete_snapshot.return_valume = {'result': 0} mock_check_snapshot_exist.return_value = result retval = self.fake_driver.delete_snapshot(snapshot) self.assertIsNone(retval) mock_check_snapshot_exist.return_value = False retval = self.fake_driver.delete_snapshot(snapshot) self.assertIsNone(retval) def test__get_manager_ip(self): context = {'host': 'host1'} host1 = {'host1': '1.1.1.1'} host2 = {'host2': '1.1.1.1'} self.fake_driver.configuration.manager_ips = host1 retval = self.fake_driver._get_manager_ip(context) self.assertEqual('1.1.1.1', retval) self.fake_driver.configuration.manager_ips = host2 try: self.fake_driver._get_manager_ip(context) except Exception as e: self.assertEqual(exception.VolumeBackendAPIException, type(e)) @mock.patch.object(dsware.DSWAREDriver, '_check_volume_exist') @mock.patch.object(dsware.DSWAREDriver, '_get_manager_ip') @mock.patch.object(fs_client.RestCommon, 'attach_volume') def test__attach_volume(self, mock_attach_volume, mock__get_manager_ip, mock__check_volume_exist): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4()) attach_result1 = {volume.name: [{'devName': 'fake_path'}]} attach_result2 = {volume.name: [{'devName': ''}]} result1 = True result2 = False mock__get_manager_ip.return_value = 'fake_ip' mock__check_volume_exist.return_value = result1 mock_attach_volume.return_value = attach_result1 retval, vol = self.fake_driver._attach_volume( "context", volume, "properties") self.assertEqual( ({'device': {'path': 'fake_path'}}, volume), (retval, vol)) mock__get_manager_ip.assert_called_once_with("properties") mock__check_volume_exist.assert_called_once_with(volume) mock_attach_volume.assert_called_once_with(volume.name, 'fake_ip') mock__check_volume_exist.return_value = result2 try: self.fake_driver._attach_volume("context", volume, "properties") except Exception as e: self.assertEqual(exception.VolumeBackendAPIException, type(e)) mock__check_volume_exist.return_value = result1 mock_attach_volume.return_value = attach_result2 try: self.fake_driver._attach_volume("context", volume, "properties") except Exception as e: self.assertEqual(exception.VolumeBackendAPIException, type(e)) @mock.patch.object(dsware.DSWAREDriver, '_check_volume_exist') @mock.patch.object(dsware.DSWAREDriver, '_get_manager_ip') @mock.patch.object(fs_client.RestCommon, 'detach_volume') def test__detach_volume(self, mock_detach_volume, mock__get_manager_ip, mock__check_volume_exist): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4()) result1 = True result2 = False mock__get_manager_ip.return_value = 'fake_ip' mock_detach_volume.return_value = {'result': 0} mock__check_volume_exist.return_value = result1 retval = self.fake_driver._detach_volume( 'context', 'attach_info', volume, 'properties') self.assertIsNone(retval) mock__check_volume_exist.return_value = result2 retval = self.fake_driver._detach_volume( 'context', 'attach_info', volume, 'properties') self.assertIsNone(retval) @mock.patch.object(dsware.DSWAREDriver, '_check_volume_exist') @mock.patch.object(dsware.DSWAREDriver, '_get_manager_ip') @mock.patch.object(fs_client.RestCommon, 'attach_volume') @mock.patch.object(fs_client.RestCommon, 'query_volume_by_name') def test_initialize_connection(self, mock_query_volume_by_name, mock_attach_volume, mock__get_manager_ip, mock__check_volume_exist): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4()) attach_result = {volume.name: [{'devName': 'fake_path'}]} result1 = True result2 = False mock__get_manager_ip.return_value = 'fake_ip' mock_query_volume_by_name.return_value = {'wwn': 'fake_wwn', 'volName': 'fake_name'} mock_attach_volume.return_value = attach_result mock__check_volume_exist.return_value = result1 retval = self.fake_driver.initialize_connection(volume, 'connector') self.assertDictEqual( {'driver_volume_type': 'local', 'data': {'device_path': '/dev/disk/by-id/wwn-0xfake_wwn'}}, retval) mock__check_volume_exist.return_value = result2 try: self.fake_driver.initialize_connection(volume, 'connector') except Exception as e: self.assertEqual(exception.VolumeBackendAPIException, type(e)) @mock.patch.object(dsware.DSWAREDriver, '_check_volume_exist') @mock.patch.object(dsware.DSWAREDriver, '_get_manager_ip') @mock.patch.object(fs_client.RestCommon, 'detach_volume') def test_terminate_connection(self, mock_detach_volume, mock__get_manager_ip, mock__check_volume_exist): self.fake_driver.client = fs_client.RestCommon( 'https://fake_rest_site', 'user', 'password') volume = objects.Volume(_name_id=uuid.uuid4()) result1 = True result2 = False mock__get_manager_ip.return_value = 'fake_ip' mock__check_volume_exist.return_value = result1 retval = self.fake_driver.terminate_connection(volume, 'connector') self.assertIsNone(retval) mock_detach_volume.assert_called_once_with(volume.name, 'fake_ip') mock__check_volume_exist.return_value = result2 retval = self.fake_driver.terminate_connection('volume', 'connector') self.assertIsNone(retval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/fusionstorage/test_fs_client.py0000664000175000017500000003121500000000000027712 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Technologies Co., Ltd # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock import requests from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.fusionstorage import test_utils from cinder.volume.drivers.fusionstorage import fs_client class FakeSession(test_utils.FakeBaseSession): method_map = { 'get': { 'rest/version': {'currentVersion': 'fake_version'}, '/storagePool$': {'storagePools': [{'poolName': 'fake_pool_name', 'poolId': 'fake_pool_id'}]}, r'/storagePool\?poolId=0': {'storagePools': [{'poolName': 'fake_pool_name1', 'poolId': 0}]}, r'/volume/queryByName\?volName=fake_name': {'errorCode': 0, 'lunDetailInfo': [{'volume_id': 'fake_id', 'volume_name': 'fake_name'}]}, r'/volume/queryById\?volId=fake_id': {'errorCode': 0, 'lunDetailInfo': [{'volume_id': 'fake_id', 'volume_name': 'fake_name'}]}, r'/lun/wwn/list\?wwn=fake_wwn': {'errorCode': 0, 'lunDetailInfo': [{'volume_id': 'fake_id', 'volume_wwn': 'fake_wwn'}]}, }, 'post': { '/sec/login': {}, '/sec/logout': {'res': 'fake_logout'}, '/sec/keepAlive': {'res': 'fake_keepAlive'}, '/volume/list': {'errorCode': 0, 'volumeList': [ {'volName': 'fake_name1', 'volId': 'fake_id1'}, {'volName': 'fake_name2', 'volId': 'fake_id2'}]}, '/volume/create': {'ID': 'fake_volume_create_id'}, '/volume/delete': {'ID': 'fake_volume_delete_id'}, '/volume/attach': {'fake_name': [{'errorCode': '0', 'ip': 'fake_ip'}]}, '/volume/detach/': {'ID': 'fake_volume_detach_id'}, '/volume/expand': {'ID': 'fake_volume_expend_id'}, '/volume/snapshot/list': {"snapshotList": [{"snapshot": "fake_name", "size": "fake_size"}]}, '/snapshot/list': {'totalNum': 'fake_snapshot_num', 'snapshotList': [{'snapName': 'fake_snapName'}]}, '/snapshot/create/': {'ID': 'fake_snapshot_create_id'}, '/snapshot/delete/': {'ID': 'fake_snapshot_delete_id'}, '/snapshot/rollback': {'ID': 'fake_snapshot_delete_id'}, '/snapshot/volume/create/': {'ID': 'fake_vol_from_snap_id'}, } } class TestFsclient(test.TestCase): def setUp(self): super(TestFsclient, self).setUp() self.mock_object(requests, 'Session', FakeSession) self.client = fs_client.RestCommon('https://fake_rest_site', 'fake_user', 'fake_password') self.client.login() def tearDown(self): super(TestFsclient, self).tearDown() def test_login(self): self.assertEqual('fake_version', self.client.version) self.assertEqual('fake_token', self.client.session.headers['X-Auth-Token']) def test_keep_alive(self): retval = self.client.keep_alive() self.assertIsNone(retval) def test_logout(self): self.assertIsNone(self.client.logout()) def test_query_all_pool_info(self): with mock.patch.object(self.client.session, 'get', wraps=self.client.session.get) as mocker: retval = self.client.query_pool_info() mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/' 'fake_version/storagePool', timeout=50) self.assertListEqual( [{'poolName': 'fake_pool_name', 'poolId': 'fake_pool_id'}], retval) def test_query_pool_info(self): with mock.patch.object(self.client.session, 'get', wraps=self.client.session.get) as mocker: retval = self.client.query_pool_info(pool_id=0) mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/' 'fake_version/storagePool?poolId=0', timeout=50) self.assertListEqual( [{'poolName': 'fake_pool_name1', 'poolId': 0}], retval) def test_query_volume_by_name(self): with mock.patch.object(self.client.session, 'get', wraps=self.client.session.get) as mocker: retval = self.client.query_volume_by_name(vol_name='fake_name') mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'volume/queryByName?volName=fake_name', timeout=50) self.assertListEqual( [{'volume_id': 'fake_id', 'volume_name': 'fake_name'}], retval) def test_query_volume_by_id(self): with mock.patch.object(self.client.session, 'get', wraps=self.client.session.get) as mocker: retval = self.client.query_volume_by_id(vol_id='fake_id') mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'volume/queryById?volId=fake_id', timeout=50) self.assertListEqual( [{'volume_id': 'fake_id', 'volume_name': 'fake_name'}], retval) def test_create_volume(self): with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.create_volume( vol_name='fake_name', vol_size=1, pool_id='fake_id') except_data = json.dumps( {"volName": "fake_name", "volSize": 1, "poolId": "fake_id"}) mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'volume/create', data=except_data, timeout=50) self.assertIsNone(retval) def test_delete_volume(self): with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.delete_volume(vol_name='fake_name') except_data = json.dumps({"volNames": ['fake_name']}) mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'volume/delete', data=except_data, timeout=50) self.assertIsNone(retval) def test_attach_volume(self): with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.attach_volume( vol_name='fake_name', manage_ip='fake_ip') except_data = json.dumps( {"volName": ['fake_name'], "ipList": ['fake_ip']}) mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'volume/attach', data=except_data, timeout=50) self.assertDictEqual( {'result': 0, 'fake_name': [{'errorCode': '0', 'ip': 'fake_ip'}]}, retval) def test_detach_volume(self): with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.detach_volume( vol_name='fake_name', manage_ip='fake_ip') except_data = json.dumps( {"volName": ['fake_name'], "ipList": ['fake_ip']}) mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'volume/detach/', data=except_data, timeout=50) self.assertIsNone(retval) def test_expand_volume(self): with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.expand_volume( vol_name='fake_name', new_vol_size=2) except_data = json.dumps({"volName": 'fake_name', "newVolSize": 2}) mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'volume/expand', data=except_data, timeout=50) self.assertIsNone(retval) def test_query_snapshot_by_name(self): with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_snapshot_by_name( pool_id='fake_id', snapshot_name='fake_name') except_data = json.dumps( {"poolId": 'fake_id', "pageNum": 1, "pageSize": 1000, "filters": {"volumeName": 'fake_name'}}) mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'snapshot/list', data=except_data, timeout=50) self.assertDictEqual( {'result': 0, 'totalNum': 'fake_snapshot_num', 'snapshotList': [{'snapName': 'fake_snapName'}]}, retval) def test_create_snapshot(self): with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.create_snapshot( snapshot_name='fake_snap', vol_name='fake_name') except_data = json.dumps( {"volName": "fake_name", "snapshotName": "fake_snap"}) mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'snapshot/create/', data=except_data, timeout=50) self.assertIsNone(retval) def test_delete_snapshot(self): with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.delete_snapshot(snapshot_name='fake_snap') except_data = json.dumps({"snapshotName": "fake_snap"}) mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'snapshot/delete/', data=except_data, timeout=50) self.assertIsNone(retval) def test_create_volume_from_snapshot(self): with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.create_volume_from_snapshot( snapshot_name='fake_snap', vol_name='fake_name', vol_size=2) except_data = json.dumps({"src": 'fake_snap', "volName": 'fake_name', "volSize": 2}) mocker.assert_called_once_with( 'https://fake_rest_site/dsware/service/fake_version/' 'snapshot/volume/create/', data=except_data, timeout=50) self.assertIsNone(retval) @mock.patch.object(fs_client.RestCommon, 'create_snapshot') @mock.patch.object(fs_client.RestCommon, 'create_volume_from_snapshot') @mock.patch.object(fs_client.RestCommon, 'delete_snapshot') def test_create_volume_from_volume( self, mock_delete_snapshot, mock_volume_from_snapshot, mock_create_snapshot): vol_name = 'fake_name' vol_size = 3 src_vol_name = 'src_fake_name' temp_snapshot_name = "temp" + src_vol_name + "clone" + vol_name retval = self.client.create_volume_from_volume( vol_name, vol_size, src_vol_name) mock_create_snapshot.assert_called_once_with( vol_name=src_vol_name, snapshot_name=temp_snapshot_name) mock_volume_from_snapshot.assert_called_once_with( snapshot_name=temp_snapshot_name, vol_name=vol_name, vol_size=vol_size) mock_delete_snapshot.assert_called_once_with( snapshot_name=temp_snapshot_name) self.assertIsNone(retval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/fusionstorage/test_fs_conf.py0000664000175000017500000001420200000000000027356 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import configparser import os import shutil import tempfile from unittest import mock import ddt from cinder.tests.unit import test from cinder.volume.drivers.fusionstorage import fs_conf @ddt.ddt class FusionStorageConfTestCase(test.TestCase): def setUp(self): super(FusionStorageConfTestCase, self).setUp() self.tmp_dir = tempfile.mkdtemp() self.conf = mock.Mock() self._create_fake_conf_file() self.fusionstorage_conf = fs_conf.FusionStorageConf( self.conf, "cinder@fs") def tearDown(self): shutil.rmtree(self.tmp_dir) super(FusionStorageConfTestCase, self).tearDown() def _create_fake_conf_file(self): self.conf.cinder_fusionstorage_conf_file = ( self.tmp_dir + '/cinder.conf') config = configparser.ConfigParser() config.add_section('backend_name') config.set('backend_name', 'dsware_rest_url', 'https://fake_rest_site') config.set('backend_name', 'san_login', 'fake_user') config.set('backend_name', 'san_password', 'fake_passwd') config.set('backend_name', 'dsware_storage_pools', 'fake_pool') config.add_section('manager_ip') config.set('manager_ip', 'fake_host', 'fake_ip') with open(self.conf.cinder_fusionstorage_conf_file, 'w') as conf_file: config.write(conf_file) @mock.patch.object(fs_conf.FusionStorageConf, '_encode_authentication') @mock.patch.object(fs_conf.FusionStorageConf, '_pools_name') @mock.patch.object(fs_conf.FusionStorageConf, '_san_address') @mock.patch.object(fs_conf.FusionStorageConf, '_san_user') @mock.patch.object(fs_conf.FusionStorageConf, '_san_password') def test_update_config_value(self, mock_san_password, mock_san_user, mock_san_address, mock_pools_name, mock_encode_authentication): self.fusionstorage_conf.update_config_value() mock_encode_authentication.assert_called_once_with() mock_pools_name.assert_called_once_with() mock_san_address.assert_called_once_with() mock_san_user.assert_called_once_with() mock_san_password.assert_called_once_with() @mock.patch.object(os.path, 'exists') def test__encode_authentication(self, mock_exists): config = configparser.ConfigParser() config.read(self.conf.cinder_fusionstorage_conf_file) mock_exists.return_value = False user_name = 'fake_user' self.mock_object( self.fusionstorage_conf.configuration, 'safe_get', return_value=user_name) self.fusionstorage_conf._encode_authentication() password = 'fake_passwd' self.mock_object( self.fusionstorage_conf.configuration, 'safe_get', return_value=password) self.fusionstorage_conf._encode_authentication() @mock.patch.object(os.path, 'exists') @mock.patch.object(configparser.ConfigParser, 'set') def test__rewrite_conf(self, mock_set, mock_exists): mock_exists.return_value = False mock_set.return_value = "success" self.fusionstorage_conf._rewrite_conf('fake_name', 'fake_pwd') def test__san_address(self): address = 'https://fake_rest_site' self.mock_object( self.fusionstorage_conf.configuration, 'safe_get', return_value=address) self.fusionstorage_conf._san_address() self.assertEqual('https://fake_rest_site', self.fusionstorage_conf.configuration.san_address) def test__san_user(self): user = '!&&&ZmFrZV91c2Vy' self.mock_object( self.fusionstorage_conf.configuration, 'safe_get', return_value=user) self.fusionstorage_conf._san_user() self.assertEqual( 'fake_user', self.fusionstorage_conf.configuration.san_user) user = 'fake_user_2' self.mock_object( self.fusionstorage_conf.configuration, 'safe_get', return_value=user) self.fusionstorage_conf._san_user() self.assertEqual( 'fake_user_2', self.fusionstorage_conf.configuration.san_user) def test__san_password(self): password = '!&&&ZmFrZV9wYXNzd2Q=' self.mock_object( self.fusionstorage_conf.configuration, 'safe_get', return_value=password) self.fusionstorage_conf._san_password() self.assertEqual( 'fake_passwd', self.fusionstorage_conf.configuration.san_password) password = 'fake_passwd_2' self.mock_object( self.fusionstorage_conf.configuration, 'safe_get', return_value=password) self.fusionstorage_conf._san_password() self.assertEqual('fake_passwd_2', self.fusionstorage_conf.configuration.san_password) def test__pools_name(self): pools_name = 'fake_pool' self.mock_object( self.fusionstorage_conf.configuration, 'safe_get', return_value=pools_name) self.fusionstorage_conf._pools_name() self.assertListEqual( ['fake_pool'], self.fusionstorage_conf.configuration.pools_name) def test__manager_ip(self): manager_ips = {'fake_host': 'fake_ip'} self.mock_object( self.fusionstorage_conf.configuration, 'safe_get', return_value=manager_ips) self.fusionstorage_conf._manager_ip() self.assertDictEqual({'fake_host': 'fake_ip'}, self.fusionstorage_conf.configuration.manager_ips) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/fusionstorage/test_utils.py0000664000175000017500000000276500000000000027114 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import re import requests class FakeBaseSession(requests.Session): method_map = {} def _get_response(self, method, url): url_map = self.method_map.get(method, {}) tmp = None data = {} for k in url_map: if re.search(k, url): if not tmp or len(tmp) < len(k): data = url_map[k] tmp = k resp_content = {'result': 0} resp_content.update(data) resp = requests.Response() resp.headers['X-Auth-Token'] = 'fake_token' resp.status_code = 0 resp.encoding = 'utf-8' resp._content = json.dumps(resp_content).encode('utf-8') return resp def get(self, url, **kwargs): return self._get_response('get', url) def post(self, url, **kwargs): return self._get_response('post', url) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.27512 cinder-27.0.0/cinder/tests/unit/volume/drivers/hitachi/0000775000175000017500000000000000000000000023052 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hitachi/__init__.py0000664000175000017500000000000000000000000025151 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py0000664000175000017500000022555300000000000031152 0ustar00zuulzuul00000000000000# Copyright (C) 2022, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Hitachi HBSD Driver.""" import json from unittest import mock from oslo_config import cfg import requests from cinder import context as cinder_context from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder.objects import group_snapshot as obj_group_snap from cinder.objects import snapshot as obj_snap from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_fc from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.hitachi import hbsd_utils from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils # Configuration parameter values CONFIG_MAP = { 'serial': '886000123456', 'my_ip': '127.0.0.1', 'rest_server_ip_addr': '172.16.18.108', 'rest_server_ip_port': '23451', 'port_id': 'CL1-A', 'host_grp_name': 'HBSD-0123456789abcdef', 'host_mode': 'LINUX/IRIX', 'host_wwn': '0123456789abcdef', 'target_wwn': '1111111123456789', 'user_id': 'user', 'user_pass': 'password', 'pool_name': 'test_pool', 'auth_user': 'auth_user', 'auth_password': 'auth_password', } REMOTE_CONFIG_MAP = { 'serial': '886000456789', 'my_ip': '127.0.0.1', 'rest_server_ip_addr': '172.16.18.107', 'rest_server_ip_port': '334', 'port_id': 'CL2-B', 'host_grp_name': 'HBSD-0123456789abcdef', 'host_mode': 'LINUX/IRIX', 'host_wwn': '0123456789abcdef', 'target_wwn': '2222222234567891', 'user_id': 'remote-user', 'user_pass': 'remote-password', 'pool_name': 'remote_pool', 'auth_user': 'remote_user', 'auth_password': 'remote_password', } # Dummy response for FC zoning device mapping DEVICE_MAP = { 'fabric_name': { 'initiator_port_wwn_list': [CONFIG_MAP['host_wwn']], 'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}} REMOTE_DEVICE_MAP = { 'fabric_name': { 'initiator_port_wwn_list': [REMOTE_CONFIG_MAP['host_wwn']], 'target_port_wwn_list': [REMOTE_CONFIG_MAP['target_wwn']]}} DEFAULT_CONNECTOR = { 'host': 'host', 'ip': CONFIG_MAP['my_ip'], 'wwpns': [CONFIG_MAP['host_wwn']], 'multipath': False, } REMOTE_DEFAULT_CONNECTOR = { 'host': 'host', 'ip': REMOTE_CONFIG_MAP['my_ip'], 'wwpns': [REMOTE_CONFIG_MAP['host_wwn']], 'multipath': False, } CTXT = cinder_context.get_admin_context() TEST_VOLUME = [] for i in range(8): volume = {} volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) volume['name'] = 'test-volume{0:d}'.format(i) volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) if i == 3 or i == 7: volume['provider_location'] = None elif i == 4: volume['provider_location'] = json.dumps( {'pldev': 4, 'sldev': 4, 'remote-copy': hbsd_utils.MIRROR_ATTR}) elif i == 5: volume['provider_location'] = json.dumps( {'pldev': 5, 'sldev': 5, 'remote-copy': hbsd_utils.MIRROR_ATTR}) elif i == 6: volume['provider_location'] = json.dumps( {'pldev': 6, 'sldev': 6, 'remote-copy': hbsd_utils.MIRROR_ATTR}) else: volume['provider_location'] = '{0:d}'.format(i) volume['size'] = 128 if i == 2 or i == 6: volume['status'] = 'in-use' elif i == 7: volume['status'] = None else: volume['status'] = 'available' volume = fake_volume.fake_volume_obj(CTXT, **volume) volume.volume_type = fake_volume.fake_volume_type_obj(CTXT) TEST_VOLUME.append(volume) def _volume_get(context, volume_id): """Return predefined volume info.""" return TEST_VOLUME[int(volume_id.replace("-", ""))] TEST_SNAPSHOT = [] for i in range(2): snapshot = {} snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(i) snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(i) snapshot['provider_location'] = '{0:d}'.format(i + 1) snapshot['status'] = 'available' snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) snapshot['volume'] = _volume_get(None, snapshot['volume_id']) snapshot['volume_name'] = 'test-volume{0:d}'.format(i) snapshot['volume_size'] = 128 snapshot = obj_snap.Snapshot._from_db_object( CTXT, obj_snap.Snapshot(), fake_snapshot.fake_db_snapshot(**snapshot)) TEST_SNAPSHOT.append(snapshot) TEST_GROUP = [] for i in range(2): group = {} group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i) group['status'] = 'available' group = fake_group.fake_group_obj(CTXT, **group) TEST_GROUP.append(group) TEST_GROUP_SNAP = [] group_snapshot = {} group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0) group_snapshot['status'] = 'available' group_snapshot = obj_group_snap.GroupSnapshot._from_db_object( CTXT, obj_group_snap.GroupSnapshot(), fake_group_snapshot.fake_db_group_snapshot(**group_snapshot)) TEST_GROUP_SNAP.append(group_snapshot) # Dummy response for REST API POST_SESSIONS_RESULT = { "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "sessionId": 0, } REMOTE_POST_SESSIONS_RESULT = { "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d4", "sessionId": 0, } GET_PORTS_RESULT = { "data": [ { "portId": CONFIG_MAP['port_id'], "portType": "FIBRE", "portAttributes": [ "TAR", "MCU", "RCU", "ELUN" ], "fabricMode": True, "portConnection": "PtoP", "lunSecuritySetting": True, "wwn": CONFIG_MAP['target_wwn'], }, ], } REMOTE_GET_PORTS_RESULT = { "data": [ { "portId": REMOTE_CONFIG_MAP['port_id'], "portType": "FIBRE", "portAttributes": [ "TAR", "MCU", "RCU", "ELUN" ], "fabricMode": True, "portConnection": "PtoP", "lunSecuritySetting": True, "wwn": REMOTE_CONFIG_MAP['target_wwn'], }, ], } GET_HOST_WWNS_RESULT = { "data": [ { "hostGroupNumber": 0, "hostWwn": CONFIG_MAP['host_wwn'], }, ], } REMOTE_GET_HOST_WWNS_RESULT = { "data": [ { "hostGroupNumber": 0, "hostWwn": REMOTE_CONFIG_MAP['host_wwn'], }, ], } COMPLETED_SUCCEEDED_RESULT = { "status": "Completed", "state": "Succeeded", "affectedResources": ('a/b/c/1',), } REMOTE_COMPLETED_SUCCEEDED_RESULT = { "status": "Completed", "state": "Succeeded", "affectedResources": ('a/b/c/2',), } COMPLETED_FAILED_RESULT_LU_DEFINED = { "status": "Completed", "state": "Failed", "error": { "errorCode": { "SSB1": "B958", "SSB2": "015A", }, }, } GET_LDEV_RESULT = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_SPLIT = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", "label": "00000000000000000000000000000004", } GET_LDEV_RESULT_LABEL = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", "label": "00000000000000000000000000000001", } GET_LDEV_RESULT_MAPPED = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "ports": [ { "portId": CONFIG_MAP['port_id'], "hostGroupNumber": 0, "hostGroupName": CONFIG_MAP['host_grp_name'], "lun": 1 }, ], } REMOTE_GET_LDEV_RESULT_MAPPED = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "ports": [ { "portId": REMOTE_CONFIG_MAP['port_id'], "hostGroupNumber": 0, "hostGroupName": REMOTE_CONFIG_MAP['host_grp_name'], "lun": 1 }, ], } GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_REP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "GAD"], "status": "NML", "numOfPorts": 1, "label": "00000000000000000000000000000004", } GET_LDEV_RESULT_REP_LABEL = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "GAD"], "status": "NML", "numOfPorts": 1, "label": "00000000000000000000000000000001", } GET_POOL_RESULT = { "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, "totalLocatedCapacity": 71453172, "virtualVolumeCapacityRate": -1, } GET_POOLS_RESULT = { "data": [ { "poolId": 30, "poolName": CONFIG_MAP['pool_name'], "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, "totalLocatedCapacity": 71453172, "virtualVolumeCapacityRate": -1, }, ], } GET_SNAPSHOTS_RESULT = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PSUS", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_PAIR = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PAIR", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_BUSY = { "data": [ { "primaryOrSecondary": "P-VOL", "status": "PSUP", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_TEST = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PSUS", "pvolLdevId": 1, "muNumber": 1, "svolLdevId": 1, }, ], } GET_LUNS_RESULT = { "data": [ { "ldevId": 0, "lun": 1, }, ], } GET_HOST_GROUP_RESULT = { "hostGroupName": CONFIG_MAP['host_grp_name'], } GET_HOST_GROUPS_RESULT = { "data": [ { "hostGroupNumber": 0, "portId": CONFIG_MAP['port_id'], "hostGroupName": "HBSD-test", }, ], } GET_HOST_GROUPS_RESULT_PAIR = { "data": [ { "hostGroupNumber": 1, "portId": CONFIG_MAP['port_id'], "hostGroupName": "HBSD-pair00", }, ], } REMOTE_GET_HOST_GROUPS_RESULT_PAIR = { "data": [ { "hostGroupNumber": 1, "portId": REMOTE_CONFIG_MAP['port_id'], "hostGroupName": "HBSD-pair00", }, ], } GET_LDEVS_RESULT = { "data": [ { "ldevId": 0, "label": "15960cc738c94c5bb4f1365be5eeed44", }, { "ldevId": 1, "label": "15960cc738c94c5bb4f1365be5eeed45", }, ], } GET_REMOTE_MIRROR_COPYPAIR_RESULT = { 'pvolLdevId': 4, 'svolLdevId': 4, 'pvolStatus': 'PAIR', 'svolStatus': 'PAIR', 'replicationType': hbsd_utils.MIRROR_ATTR, } GET_REMOTE_MIRROR_COPYPAIR_RESULT_SPLIT = { 'pvolLdevId': 4, 'svolLdevId': 4, 'pvolStatus': 'PSUS', 'svolStatus': 'SSUS', 'replicationType': hbsd_utils.MIRROR_ATTR, } GET_REMOTE_MIRROR_COPYGROUP_RESULT = { 'copyGroupName': 'HBSD-127.0.0.100G00', 'copyPairs': [GET_REMOTE_MIRROR_COPYPAIR_RESULT], } GET_REMOTE_MIRROR_COPYGROUP_RESULT_ERROR = { "errorSource": "", "message": "", "solution": "", "messageId": "aaa", "errorCode": { "SSB1": "", "SSB2": "", } } NOTFOUND_RESULT = { "data": [], } ERROR_RESULT = { "errorSource": "", "message": "", "solution": "", "messageId": "", "errorCode": { "SSB1": "", "SSB2": "", } } def _brick_get_connector_properties(multipath=False, enforce_multipath=False): """Return a predefined connector object.""" return DEFAULT_CONNECTOR class FakeLookupService(): """Dummy FC zoning mapping lookup service class.""" def get_device_mapping_from_network(self, initiator_wwns, target_wwns): """Return predefined FC zoning mapping.""" return DEVICE_MAP class FakeResponse(): def __init__(self, status_code, data=None, headers=None): self.status_code = status_code self.data = data self.text = data self.content = data self.headers = {'Content-Type': 'json'} if headers is None else headers def json(self): return self.data class HBSDMIRRORFCDriverTest(test.TestCase): """Unit test class for HBSD MIRROR interface fibre channel module.""" test_existing_ref = {'source-id': '1'} test_existing_ref_name = { 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} def setUp(self): """Set up the test environment.""" def _set_required(opts, required): for opt in opts: opt.required = required # Initialize Cinder and avoid checking driver options. rest_required_opts = [ opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required] common_required_opts = [ opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required] _set_required(rest_required_opts, False) _set_required(common_required_opts, False) super(HBSDMIRRORFCDriverTest, self).setUp() _set_required(rest_required_opts, True) _set_required(common_required_opts, True) self.configuration = conf.Configuration(None) self.ctxt = cinder_context.get_admin_context() self._setup_config() self._setup_driver() def _setup_config(self): """Set configuration parameter values.""" self.configuration.config_group = "REST" self.configuration.volume_backend_name = "RESTFC" self.configuration.volume_driver = ( "cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver") self.configuration.reserved_percentage = "0" self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False self.configuration.max_over_subscription_ratio = 500.0 self.configuration.driver_ssl_cert_verify = False self.configuration.hitachi_storage_id = CONFIG_MAP['serial'] self.configuration.hitachi_pools = ["30"] self.configuration.hitachi_snap_pool = None self.configuration.hitachi_ldev_range = "0-1" self.configuration.hitachi_target_ports = [CONFIG_MAP['port_id']] self.configuration.hitachi_compute_target_ports\ = [CONFIG_MAP['port_id']] self.configuration.hitachi_group_create = True self.configuration.hitachi_group_delete = True self.configuration.hitachi_copy_speed = 3 self.configuration.hitachi_copy_check_interval = 3 self.configuration.hitachi_async_copy_check_interval = 10 self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] self.configuration.san_ip = CONFIG_MAP[ 'rest_server_ip_addr'] self.configuration.san_api_port = CONFIG_MAP[ 'rest_server_ip_port'] self.configuration.hitachi_rest_disable_io_wait = True self.configuration.hitachi_rest_tcp_keepalive = True self.configuration.hitachi_discard_zero_page = True self.configuration.hitachi_lun_timeout = hbsd_rest._LUN_TIMEOUT self.configuration.hitachi_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.hitachi_restore_timeout = hbsd_rest._RESTORE_TIMEOUT self.configuration.hitachi_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) self.configuration.hitachi_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT self.configuration.hitachi_rest_timeout = hbsd_rest_api._REST_TIMEOUT self.configuration.hitachi_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.hitachi_exec_retry_interval = ( hbsd_rest_api._EXEC_RETRY_INTERVAL) self.configuration.hitachi_rest_connect_timeout = ( hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) self.configuration.hitachi_rest_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) self.configuration.hitachi_rest_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) self.configuration.hitachi_rest_server_busy_timeout = ( hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) self.configuration.hitachi_rest_keep_session_loop_interval = ( hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) self.configuration.hitachi_rest_another_ldev_mapped_retry_timeout = ( hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) self.configuration.hitachi_rest_tcp_keepidle = ( hbsd_rest_api._TCP_KEEPIDLE) self.configuration.hitachi_rest_tcp_keepintvl = ( hbsd_rest_api._TCP_KEEPINTVL) self.configuration.hitachi_rest_tcp_keepcnt = ( hbsd_rest_api._TCP_KEEPCNT) self.configuration.hitachi_host_mode_options = [] self.configuration.hitachi_zoning_request = False self.configuration.use_chap_auth = True self.configuration.chap_username = CONFIG_MAP['auth_user'] self.configuration.chap_password = CONFIG_MAP['auth_password'] self.configuration.san_thin_provision = True self.configuration.san_private_key = '' self.configuration.san_clustername = '' self.configuration.san_ssh_port = '22' self.configuration.san_is_local = False self.configuration.ssh_conn_timeout = '30' self.configuration.ssh_min_pool_conn = '1' self.configuration.ssh_max_pool_conn = '5' self.configuration.hitachi_replication_status_check_short_interval = 5 self.configuration.hitachi_replication_status_check_long_interval\ = 10 * 60 self.configuration.hitachi_replication_status_check_timeout\ = 24 * 60 * 60 self.configuration.hitachi_replication_number = 0 self.configuration.hitachi_pair_target_number = 0 self.configuration.hitachi_rest_pair_target_ports\ = [CONFIG_MAP['port_id']] self.configuration.hitachi_quorum_disk_id = 13 self.configuration.hitachi_mirror_copy_speed = 3 self.configuration.hitachi_mirror_storage_id\ = REMOTE_CONFIG_MAP['serial'] self.configuration.hitachi_mirror_pool = '40' self.configuration.hitachi_mirror_snap_pool = None self.configuration.hitachi_mirror_ldev_range = '2-3' self.configuration.hitachi_mirror_target_ports\ = [REMOTE_CONFIG_MAP['port_id']] self.configuration.hitachi_mirror_compute_target_ports\ = [REMOTE_CONFIG_MAP['port_id']] self.configuration.hitachi_mirror_pair_target_number = 0 self.configuration.hitachi_mirror_rest_pair_target_ports\ = [REMOTE_CONFIG_MAP['port_id']] self.configuration.hitachi_mirror_rest_user\ = REMOTE_CONFIG_MAP['user_id'] self.configuration.hitachi_mirror_rest_password\ = REMOTE_CONFIG_MAP['user_pass'] self.configuration.hitachi_mirror_rest_api_ip\ = REMOTE_CONFIG_MAP['rest_server_ip_addr'] self.configuration.hitachi_mirror_rest_api_port\ = REMOTE_CONFIG_MAP['rest_server_ip_port'] self.configuration.hitachi_set_mirror_reserve_attribute = True self.configuration.hitachi_path_group_id = 0 self.configuration.hitachi_mirror_use_chap_auth = True self.configuration.hitachi_mirror_chap_user = CONFIG_MAP['auth_user'] self.configuration.hitachi_mirror_chap_password\ = CONFIG_MAP['auth_password'] self.configuration.hitachi_mirror_ssl_cert_verify = False self.configuration.hitachi_mirror_ssl_cert_path = '/root/path' self.configuration.safe_get = self._fake_safe_get CONF = cfg.CONF CONF.my_ip = CONFIG_MAP['my_ip'] def _fake_safe_get(self, value): """Retrieve a configuration value avoiding throwing an exception.""" try: val = getattr(self.configuration, value) except AttributeError: val = None return val @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def _setup_driver( self, brick_get_connector_properties=None, request=None): """Set up the driver environment.""" self.driver = hbsd_fc.HBSDFCDriver( configuration=self.configuration) def _request_side_effect( method, url, params, json, headers, auth, timeout, verify): if self.configuration.hitachi_storage_id in url: if method == 'POST': return FakeResponse(200, POST_SESSIONS_RESULT) elif '/ports' in url: return FakeResponse(200, GET_PORTS_RESULT) elif '/host-wwns' in url: return FakeResponse(200, GET_HOST_WWNS_RESULT) elif '/host-groups' in url: return FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR) else: if method == 'POST': return FakeResponse(200, REMOTE_POST_SESSIONS_RESULT) elif '/ports' in url: return FakeResponse(200, REMOTE_GET_PORTS_RESULT) elif '/host-wwns' in url: return FakeResponse(200, REMOTE_GET_HOST_WWNS_RESULT) elif '/host-groups' in url: return FakeResponse( 200, REMOTE_GET_HOST_GROUPS_RESULT_PAIR) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver.local_path(None) self.driver.create_export(None, None, None) self.driver.ensure_export(None, None) self.driver.remove_export(None, None) self.driver.create_export_snapshot(None, None, None) self.driver.remove_export_snapshot(None, None) # stop the Loopingcall within the do_setup treatment self.driver.common.rep_primary.client.keep_session_loop.stop() self.driver.common.rep_secondary.client.keep_session_loop.stop() def tearDown(self): self.client = None super(HBSDMIRRORFCDriverTest, self).tearDown() # API test cases @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup(self, brick_get_connector_properties, request): drv = hbsd_fc.HBSDFCDriver( configuration=self.configuration) self._setup_config() self.configuration.hitachi_pair_target_number = 10 self.configuration.hitachi_mirror_pair_target_number = 20 def _request_side_effect( method, url, params, json, headers, auth, timeout, verify): if self.configuration.hitachi_storage_id in url: if method == 'POST': return FakeResponse(200, POST_SESSIONS_RESULT) elif '/ports' in url: return FakeResponse(200, GET_PORTS_RESULT) elif '/host-wwns' in url: return FakeResponse(200, GET_HOST_WWNS_RESULT) elif '/host-groups' in url: return FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR) else: if method == 'POST': return FakeResponse(200, REMOTE_POST_SESSIONS_RESULT) elif '/ports' in url: return FakeResponse(200, REMOTE_GET_PORTS_RESULT) elif '/host-wwns' in url: return FakeResponse(200, REMOTE_GET_HOST_WWNS_RESULT) elif '/host-groups' in url: return FakeResponse( 200, REMOTE_GET_HOST_GROUPS_RESULT_PAIR) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.rep_primary.storage_info['wwns']) self.assertEqual( {REMOTE_CONFIG_MAP['port_id']: REMOTE_CONFIG_MAP['target_wwn']}, drv.common.rep_secondary.storage_info['wwns']) self.assertEqual(2, brick_get_connector_properties.call_count) self.assertEqual(10, request.call_count) self.assertEqual( "HBSD-pair%2d" % self.configuration.hitachi_pair_target_number, drv.common.rep_primary._PAIR_TARGET_NAME) self.assertEqual( ("HBSD-pair%2d" % self.configuration.hitachi_mirror_pair_target_number), drv.common.rep_secondary._PAIR_TARGET_NAME) # stop the Loopingcall within the do_setup treatment drv.common.rep_primary.client.keep_session_loop.stop() drv.common.rep_secondary.client.keep_session_loop.stop() self._setup_config() @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume(self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): extra_specs = {"test1": "aaa"} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] ret = self.driver.create_volume(TEST_VOLUME[7]) actual = {'provider_location': '1'} self.assertEqual(actual, ret) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_replication(self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): extra_specs = {"test1": "aaa", "hbsd:topology": "active_active_mirror_volume"} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} def _request_side_effect( method, url, params, json, headers, auth, timeout, verify): if self.configuration.hitachi_storage_id in url: if method in ('POST', 'PUT'): return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/remote-mirror-copygroups' in url: return FakeResponse(200, NOTFOUND_RESULT) elif '/remote-mirror-copypairs/' in url: return FakeResponse( 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT) else: if method in ('POST', 'PUT'): return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/remote-mirror-copygroups' in url: return FakeResponse(200, NOTFOUND_RESULT) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] ret = self.driver.create_volume(TEST_VOLUME[3]) actual = { 'provider_location': json.dumps( {'pldev': 1, 'sldev': 2, 'remote-copy': hbsd_utils.MIRROR_ATTR})} self.assertEqual(actual, ret) self.assertEqual(14, request.call_count) for args, kwargs in request.call_args_list: if args[0] == 'POST' and 'remote-mirror-copypairs' in args[1]: self.assertEqual('G', kwargs['json']['copyGroupName'][-3]) break else: self.fail('no create pair api') @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_replication_qos( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): input_qos_specs = { 'qos_specs': { 'consumer': 'back-end', 'specs': {'upperIops': '1000'}}} get_volume_type_qos_specs.return_value = input_qos_specs extra_specs = {"test1": "aaa", "hbsd:topology": "active_active_mirror_volume"} get_volume_type_extra_specs.return_value = extra_specs def _request_side_effect( method, url, params, json, headers, auth, timeout, verify): if self.configuration.hitachi_storage_id in url: if method in ('POST', 'PUT'): return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/remote-mirror-copygroups' in url: return FakeResponse(200, NOTFOUND_RESULT) elif '/remote-mirror-copypairs/' in url: return FakeResponse( 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT) else: if method in ('POST', 'PUT'): return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/remote-mirror-copygroups' in url: return FakeResponse(200, NOTFOUND_RESULT) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] ret = self.driver.create_volume(TEST_VOLUME[3]) actual = { 'provider_location': json.dumps( {'pldev': 1, 'sldev': 2, 'remote-copy': hbsd_utils.MIRROR_ATTR})} self.assertEqual(actual, ret) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(16, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume_replication(self, request): self.copygroup_count = 0 self.ldev_count = 0 def _request_side_effect( method, url, params, json, headers, auth, timeout, verify): if self.configuration.hitachi_storage_id in url: if method in ('POST', 'PUT', 'DELETE'): return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/remote-mirror-copygroups/' in url: if self.copygroup_count < 2: self.copygroup_count = self.copygroup_count + 1 return FakeResponse( 200, GET_REMOTE_MIRROR_COPYGROUP_RESULT) else: return FakeResponse( 500, GET_REMOTE_MIRROR_COPYGROUP_RESULT_ERROR, headers={'Content-Type': 'json'}) elif '/remote-mirror-copypairs/' in url: return FakeResponse( 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT_SPLIT) elif '/ldevs/' in url: if self.ldev_count == 0: self.ldev_count = self.ldev_count + 1 return FakeResponse(200, GET_LDEV_RESULT_REP) else: return FakeResponse(200, GET_LDEV_RESULT_SPLIT) else: if method in ('POST', 'PUT', 'DELETE'): return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/ldevs/' in url: return FakeResponse(200, GET_LDEV_RESULT_SPLIT) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.delete_volume(TEST_VOLUME[4]) self.assertEqual(17, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume_primary_is_invalid_ldev(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT_LABEL) self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume_primary_secondary_is_invalid_ldev(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT_REP_LABEL) self.driver.delete_volume(TEST_VOLUME[4]) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume_secondary_is_invalid_ldev(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_REP_LABEL), FakeResponse(200, GET_LDEV_RESULT_REP), FakeResponse(200, GET_LDEV_RESULT_REP), FakeResponse(200, GET_LDEV_RESULT_REP), FakeResponse(200, GET_LDEV_RESULT_REP), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[4]) self.assertEqual(6, request.call_count) @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_extend_volume_replication(self, request): self.ldev_count = 0 self.copypair_count = 0 def _request_side_effect( method, url, params, json, headers, auth, timeout, verify): if self.configuration.hitachi_storage_id in url: if method in ('POST', 'PUT', 'DELETE'): return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/remote-mirror-copygroups/' in url: return FakeResponse( 200, GET_REMOTE_MIRROR_COPYGROUP_RESULT) elif '/remote-mirror-copygroups' in url: return FakeResponse(200, NOTFOUND_RESULT) elif '/remote-mirror-copypairs/' in url: if self.copypair_count == 0: self.copypair_count = self.copypair_count + 1 return FakeResponse( 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT_SPLIT) else: return FakeResponse( 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT) elif '/ldevs/' in url: if self.ldev_count < 2: self.ldev_count = self.ldev_count + 1 return FakeResponse(200, GET_LDEV_RESULT_REP) else: return FakeResponse(200, GET_LDEV_RESULT) else: if method in ('POST', 'PUT', 'DELETE'): return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/ldevs/' in url: return FakeResponse(200, GET_LDEV_RESULT) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.extend_volume(TEST_VOLUME[4], 256) self.assertEqual(23, request.call_count) @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @mock.patch.object(requests.Session, "request") def test_get_volume_stats( self, request, get_filter_function, get_goodness_function): request.return_value = FakeResponse(200, GET_POOLS_RESULT) get_filter_function.return_value = None get_goodness_function.return_value = None stats = self.driver.get_volume_stats(True) self.assertEqual('Hitachi', stats['vendor_name']) self.assertTrue(stats["pools"][0]['multiattach']) self.assertEqual(1, request.call_count) self.assertEqual(1, get_filter_function.call_count) self.assertEqual(1, get_goodness_function.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_snapshot( self, get_volume_type_qos_specs, volume_get, get_volume_type_extra_specs, request): extra_specs = {"test1": "aaa", "hbsd:topology": "active_active_mirror_volume"} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) actual = {'provider_location': '1'} self.assertEqual(actual, ret) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(14, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot_pldev_in_loc(self, request): self.assertRaises(exception.VolumeDriverException, self.driver.delete_snapshot, TEST_SNAPSHOT[1]) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot_snapshot_is_busy(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_TEST)] self.assertRaises(exception.SnapshotIsBusy, self.driver.delete_snapshot, TEST_SNAPSHOT[0]) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_cloned_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): extra_specs = {"test1": "aaa"} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] ret = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) actual = {'provider_location': '1'} self.assertEqual(actual, ret) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_cloned_volume_replication( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): extra_specs = {"hbsd:topology": "active_active_mirror_volume"} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} self.snapshot_count = 0 def _request_side_effect( method, url, params, json, headers, auth, timeout, verify): if self.configuration.hitachi_storage_id in url: if method in ('POST', 'PUT'): return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/remote-mirror-copygroups' in url: return FakeResponse(200, NOTFOUND_RESULT) elif '/remote-mirror-copypairs/' in url: return FakeResponse( 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT) elif '/ldevs/' in url: return FakeResponse(200, GET_LDEV_RESULT_REP) elif '/snapshots' in url: if self.snapshot_count < 1: self.snapshot_count = self.snapshot_count + 1 return FakeResponse(200, GET_SNAPSHOTS_RESULT) else: return FakeResponse(200, NOTFOUND_RESULT) else: if method in ('POST', 'PUT'): return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/remote-mirror-copygroups' in url: return FakeResponse(200, NOTFOUND_RESULT) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] ret = self.driver.create_cloned_volume(TEST_VOLUME[4], TEST_VOLUME[5]) actual = { 'provider_location': json.dumps( {'pldev': 1, 'sldev': 2, 'remote-copy': hbsd_utils.MIRROR_ATTR})} self.assertEqual(actual, ret) self.assertEqual(23, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_from_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): extra_specs = {"test1": "aaa"} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] ret = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) actual = {'provider_location': '1'} self.assertEqual(actual, ret) self.assertEqual(5, request.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection( self, get_volume_type_extra_specs, request, add_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common.rep_primary.lookup_service = FakeLookupService() self.driver.common.rep_secondary.lookup_service = FakeLookupService() extra_specs = {"test1": "aaa"} get_volume_type_extra_specs.return_value = extra_specs def _request_side_effect( method, url, params, json, headers, auth, timeout, verify): if self.configuration.hitachi_storage_id in url: if method in ('POST', 'PUT'): return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': return FakeResponse(200, GET_HOST_WWNS_RESULT) else: if method in ('POST', 'PUT'): return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': return FakeResponse(200, REMOTE_GET_HOST_WWNS_RESULT) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect ret = self.driver.initialize_connection( TEST_VOLUME[4], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual( [CONFIG_MAP['target_wwn'], REMOTE_CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(4, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection(self, request, remove_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common.rep_primary.lookup_service = FakeLookupService() self.driver.common.rep_secondary.lookup_service = FakeLookupService() def _request_side_effect( method, url, params, json, headers, auth, timeout, verify): if self.configuration.hitachi_storage_id in url: if method in ('POST', 'PUT', 'DELETE'): return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/ldevs/' in url: return FakeResponse(200, GET_LDEV_RESULT_MAPPED) elif '/host-wwns' in url: return FakeResponse(200, GET_HOST_WWNS_RESULT) else: return FakeResponse(200, NOTFOUND_RESULT) else: if method in ('POST', 'PUT', 'DELETE'): return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/ldevs/' in url: return FakeResponse(200, REMOTE_GET_LDEV_RESULT_MAPPED) elif '/host-wwns' in url: return FakeResponse(200, REMOTE_GET_HOST_WWNS_RESULT) else: return FakeResponse(200, NOTFOUND_RESULT) return FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) request.side_effect = _request_side_effect self.driver.terminate_connection(TEST_VOLUME[6], DEFAULT_CONNECTOR) self.assertEqual(10, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") def test_initialize_connection_snapshot(self, request, add_fc_zone): self.driver.common.rep_primary.conf.hitachi_zoning_request = True self.driver.common.lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(2, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection_snapshot(self, request, remove_fc_zone): self.driver.common.rep_primary.conf.hitachi_zoning_request = True self.driver.common.lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual(5, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing(self, get_volume_type_qos_specs, request): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref) actual = {'provider_location': '1'} self.assertEqual(actual, ret) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_manage_existing_get_size( self, get_volume_type_extra_specs, request): extra_specs = {"test1": "aaa"} get_volume_type_extra_specs.return_value = extra_specs request.return_value = FakeResponse(200, GET_LDEV_RESULT) self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_unmanage(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") def test_unmanage_has_rep_pair_true(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT_REP) self.assertRaises(exception.VolumeDriverException, self.driver.unmanage, TEST_VOLUME[4]) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' image_id = 'fake_image_id' request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, COMPLETED_SUCCEEDED_RESULT)] with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ as mock_copy_image: self.driver.copy_image_to_volume( self.ctxt, TEST_VOLUME[0], image_service, image_id) mock_copy_image.assert_called_with( self.ctxt, TEST_VOLUME[0], image_service, image_id, disable_sparse=False) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.update_migrated_volume( self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(2, request.call_count) actual = ({'_name_id': TEST_VOLUME[1]['id'], 'provider_location': TEST_VOLUME[1]['provider_location']}) self.assertEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_update_migrated_volume_replication(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_REP), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.update_migrated_volume( self.ctxt, TEST_VOLUME[0], TEST_VOLUME[4], "available") self.assertEqual(3, request.call_count) actual = ({'_name_id': TEST_VOLUME[4]['id'], 'provider_location': TEST_VOLUME[4]['provider_location']}) self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" self.assertRaises( NotImplementedError, self.driver.unmanage_snapshot, TEST_SNAPSHOT[0]) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(obj_snap.SnapshotList, 'get_all_for_volume') def test_retype(self, get_all_for_volume, get_volume_type_extra_specs, request): extra_specs = {'test1': 'aaa', 'hbsd:target_ports': 'CL2-A'} get_volume_type_extra_specs.return_value = extra_specs get_all_for_volume.return_value = True request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT)] old_specs = {'hbsd:target_ports': 'CL1-A'} new_specs = {'hbsd:target_ports': 'CL2-A'} old_type_ref = volume_types.create(self.ctxt, 'old', old_specs) new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id']) diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'], new_type_ref['id'])[0] host = { 'capabilities': { 'location_info': { 'pool_id': 30, }, }, } ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(2, request.call_count) self.assertFalse(ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_retype_replication(self, get_volume_type_extra_specs, request): extra_specs = {'test1': 'aaa', 'hbsd:topology': 'active_active_mirror_volume'} get_volume_type_extra_specs.return_value = extra_specs request.return_value = FakeResponse(200, GET_LDEV_RESULT) new_type_ref = volume_types.create(self.ctxt, 'new', extra_specs) new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id']) diff = {} host = { 'capabilities': { 'location_info': { 'pool_id': 30, }, }, } ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(1, request.call_count) self.assertFalse(ret) @mock.patch.object(requests.Session, "request") def test_migrate_volume( self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT)] host = { 'capabilities': { 'location_info': { 'storage_id': CONFIG_MAP['serial'], 'pool_id': 30, }, }, } ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host) self.assertEqual(3, request.call_count) actual = (True, None) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_revert_to_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.revert_to_snapshot( self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual(8, request.call_count) def test_create_group(self): ret = self.driver.create_group(self.ctxt, TEST_GROUP[0]) self.assertIsNone(ret) @mock.patch.object(requests.Session, "request") def test_delete_group(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]]) self.assertEqual(5, request.call_count) actual = ( {'status': TEST_GROUP[0]['status']}, [{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): extra_specs = {"test1": "aaa"} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_Exception( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): extra_specs = {"test1": "aaa"} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] self.assertRaises(exception.VolumeDriverException, self.driver.create_group_from_src, self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1], TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0], TEST_VOLUME[3]] ) self.assertEqual(10, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): extra_specs = {"test1": "aaa"} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = False ret = self.driver.update_group( self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]]) self.assertTupleEqual((None, None, None), ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_non_cg( self, get_volume_type_qos_specs, is_group_a_cg_snapshot_type, volume_get, get_volume_type_extra_specs, request): is_group_a_cg_snapshot_type.return_value = False extra_specs = {"test1": "aaa"} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]) self.assertEqual(14, request.call_count) actual = ( {'status': TEST_GROUP_SNAP[0]['status']}, [{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_rep_ldev_and_pair_deduplication_compression( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = { 'hbsd:topology': 'active_active_mirror_volume', 'hbsd:capacity_saving': 'deduplication_compression'} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.snapshot_count = 0 def _request_side_effect( method, url, params, json, headers, auth, timeout, verify): if self.configuration.hitachi_storage_id in url: if method in ('POST', 'PUT'): return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if ('/remote-mirror-copygroups' in url or '/journals' in url): return FakeResponse(200, NOTFOUND_RESULT) elif '/remote-mirror-copypairs/' in url: return FakeResponse( 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT) elif '/ldevs/' in url: return FakeResponse(200, GET_LDEV_RESULT_REP) elif '/snapshots' in url: if self.snapshot_count < 1: self.snapshot_count = self.snapshot_count + 1 return FakeResponse(200, GET_SNAPSHOTS_RESULT) else: return FakeResponse(200, NOTFOUND_RESULT) else: if method in ('POST', 'PUT'): return FakeResponse(400, REMOTE_COMPLETED_SUCCEEDED_RESULT) elif method == 'GET': if '/remote-mirror-copygroups' in url: return FakeResponse(200, NOTFOUND_RESULT) elif '/ldevs/' in url: return FakeResponse(200, GET_LDEV_RESULT_REP) if '/ldevs/' in url: return FakeResponse(200, GET_LDEV_RESULT_REP) else: return FakeResponse( 200, COMPLETED_SUCCEEDED_RESULT) self.driver.common.rep_primary._stats = {} self.driver.common.rep_primary._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.driver.common.rep_secondary._stats = {} self.driver.common.rep_secondary._stats['pools'] = [ {'location_info': {'pool_id': 40}}] request.side_effect = _request_side_effect self.assertRaises(exception.VolumeDriverException, self.driver.create_cloned_volume, TEST_VOLUME[4], TEST_VOLUME[5]) self.assertEqual(2, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(14, request.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py0000664000175000017500000025063500000000000030614 0ustar00zuulzuul00000000000000# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Hitachi HBSD Driver.""" import functools from unittest import mock from oslo_config import cfg from oslo_utils import units import requests from requests import models from cinder import context as cinder_context from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder.objects import group_snapshot as obj_group_snap from cinder.objects import snapshot as obj_snap from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_fc from cinder.volume.drivers.hitachi import hbsd_replication from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.hitachi import hbsd_rest_fc from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils # Configuration parameter values CONFIG_MAP = { 'serial': '886000123456', 'my_ip': '127.0.0.1', 'rest_server_ip_addr': '172.16.18.108', 'rest_server_ip_port': '23451', 'port_id': 'CL1-A', 'host_grp_name': 'HBSD-0123456789abcdef', 'host_mode': 'LINUX/IRIX', 'host_wwn': ['0123456789abcdef', '0123456789abcdeg'], 'target_wwn': '1111111123456789', 'user_id': 'user', 'user_pass': 'password', 'pool_name': 'test_pool', 'auth_user': 'auth_user', 'auth_password': 'auth_password', } # Dummy response for FC zoning device mapping DEVICE_MAP = { 'fabric_name': { 'initiator_port_wwn_list': [CONFIG_MAP['host_wwn'][0]], 'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}} DEFAULT_CONNECTOR = { 'host': 'host', 'ip': CONFIG_MAP['my_ip'], 'wwpns': [CONFIG_MAP['host_wwn'][0]], 'multipath': False, } DEVICE_MAP_MULTI_WWN = { 'fabric_name': { 'initiator_port_wwn_list': [ CONFIG_MAP['host_wwn'][0], CONFIG_MAP['host_wwn'][1] ], 'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}} DEFAULT_CONNECTOR_MULTI_WWN = { 'host': 'host', 'ip': CONFIG_MAP['my_ip'], 'wwpns': [CONFIG_MAP['host_wwn'][0], CONFIG_MAP['host_wwn'][1]], 'multipath': False, } CTXT = cinder_context.get_admin_context() TEST_VOLUME = [] for i in range(5): volume = {} volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) volume['name'] = 'test-volume{0:d}'.format(i) volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) if i == 3 or i == 4: volume['provider_location'] = None else: volume['provider_location'] = '{0:d}'.format(i) volume['size'] = 128 if i == 2: volume['status'] = 'in-use' elif i == 4: volume['status'] = None else: volume['status'] = 'available' volume = fake_volume.fake_volume_obj(CTXT, **volume) volume.volume_type = fake_volume.fake_volume_type_obj(CTXT) TEST_VOLUME.append(volume) def _volume_get(context, volume_id): """Return predefined volume info.""" return TEST_VOLUME[int(volume_id.replace("-", ""))] TEST_SNAPSHOT = [] snapshot = {} snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0) snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0) snapshot['provider_location'] = '{0:d}'.format(1) snapshot['status'] = 'available' snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) snapshot['volume'] = _volume_get(None, snapshot['volume_id']) snapshot['volume_name'] = 'test-volume{0:d}'.format(0) snapshot['volume_size'] = 128 snapshot = obj_snap.Snapshot._from_db_object( CTXT, obj_snap.Snapshot(), fake_snapshot.fake_db_snapshot(**snapshot)) TEST_SNAPSHOT.append(snapshot) TEST_GROUP = [] for i in range(2): group = {} group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i) group['status'] = 'available' group = fake_group.fake_group_obj(CTXT, **group) TEST_GROUP.append(group) TEST_GROUP_SNAP = [] group_snapshot = {} group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0) group_snapshot['status'] = 'available' group_snapshot = obj_group_snap.GroupSnapshot._from_db_object( CTXT, obj_group_snap.GroupSnapshot(), fake_group_snapshot.fake_db_group_snapshot(**group_snapshot)) TEST_GROUP_SNAP.append(group_snapshot) # Dummy response for REST API POST_SESSIONS_RESULT = { "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "sessionId": 0, } GET_PORTS_RESULT = { "data": [ { "portId": CONFIG_MAP['port_id'], "portType": "FIBRE", "portAttributes": [ "TAR", "MCU", "RCU", "ELUN" ], "fabricMode": True, "portConnection": "PtoP", "lunSecuritySetting": True, "wwn": CONFIG_MAP['target_wwn'], }, ], } GET_HOST_WWNS_RESULT = { "data": [ { "hostGroupNumber": 0, "hostWwn": CONFIG_MAP['host_wwn'][0], }, ], } GET_HOST_GROUPS_RESULT_TEST = { "data": [ { "hostGroupNumber": 0, "portId": CONFIG_MAP['port_id'], "hostGroupName": CONFIG_MAP['host_grp_name'], }, ], } COMPLETED_SUCCEEDED_RESULT = { "status": "Completed", "state": "Succeeded", "affectedResources": ('a/b/c/1',), } COMPLETED_FAILED_RESULT_LU_DEFINED = { "status": "Completed", "state": "Failed", "error": { "errorCode": { "SSB1": "B958", "SSB2": "015A", }, }, } GET_LDEV_RESULT = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_LABEL = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", "label": "00000000000000000000000000000001", } GET_LDEV_RESULT_SNAP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "ports": [ { "portId": CONFIG_MAP['port_id'], "hostGroupNumber": 0, "hostGroupName": CONFIG_MAP['host_grp_name'], "lun": 1 }, ], } GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_PAIR_SNAP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_PAIR_TEST = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI", "111"], "status": "NML", "snapshotPoolId": 0 } GET_LDEV_RESULT_PAIR_STATUS_TEST = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "TEST", "poolId": 30, "dataReductionStatus": "REHYDRATING", "dataReductionMode": "disabled" } GET_POOL_RESULT = { "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, "totalLocatedCapacity": 71453172, } GET_SNAPSHOTS_RESULT = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PSUS", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_PAIR = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PAIR", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_BUSY = { "data": [ { "primaryOrSecondary": "P-VOL", "status": "PSUP", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_POOLS_RESULT = { "data": [ { "poolId": 30, "poolName": CONFIG_MAP['pool_name'], "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, "totalLocatedCapacity": 71453172, "virtualVolumeCapacityRate": -1, }, ], } GET_LUNS_RESULT = { "data": [ { "ldevId": 0, "lun": 1, }, ], } GET_HOST_GROUP_RESULT = { "hostGroupName": CONFIG_MAP['host_grp_name'], } GET_HOST_GROUPS_RESULT = { "data": [ { "hostGroupNumber": 0, "portId": CONFIG_MAP['port_id'], "hostGroupName": "HBSD-test", }, ], } GET_HOST_GROUPS_RESULT_PAIR = { "data": [ { "hostGroupNumber": 1, "portId": CONFIG_MAP['port_id'], "hostGroupName": "HBSD-pair00", }, ], } GET_LDEVS_RESULT = { "data": [ { "ldevId": 0, "label": "15960cc738c94c5bb4f1365be5eeed44", }, { "ldevId": 1, "label": "15960cc738c94c5bb4f1365be5eeed45", }, ], } GET_LDEVS_RESULT_QOS = { "data": [ { "ldevId": 0, "label": "15960cc738c94c5bb4f1365be5eeed44", "qos": {"upperIops": 1000}, }, ], } NOTFOUND_RESULT = { "data": [], } ERROR_RESULT = { "errorSource": "", "message": "", "solution": "", "messageId": "", "errorCode": { "SSB1": "", "SSB2": "", } } def _brick_get_connector_properties(multipath=False, enforce_multipath=False): """Return a predefined connector object.""" return DEFAULT_CONNECTOR def _brick_get_connector_properties_multi_wwn( multipath=False, enforce_multipath=False): """Return a predefined connector object.""" return DEFAULT_CONNECTOR_MULTI_WWN def reduce_retrying_time(func): @functools.wraps(func) def wrapper(*args, **kwargs): backup_lock_waittime = hbsd_rest_api._LOCK_TIMEOUT backup_exec_max_waittime = hbsd_rest_api._REST_TIMEOUT backup_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) backup_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) backup_extend_waittime = hbsd_rest_api._EXTEND_TIMEOUT backup_exec_retry_interval = hbsd_rest_api._EXEC_RETRY_INTERVAL backup_rest_server_restart_timeout = ( hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT) backup_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) hbsd_rest_api._LOCK_TIMEOUT = 0.01 hbsd_rest_api._REST_TIMEOUT = 0.01 hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT = 0.01 hbsd_rest_api._GET_API_RESPONSE_TIMEOUT = 0.01 hbsd_rest_api._EXTEND_TIMEOUT = 0.01 hbsd_rest_api._EXEC_RETRY_INTERVAL = 0.004 hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT = 0.02 hbsd_rest._STATE_TRANSITION_TIMEOUT = 0.01 func(*args, **kwargs) hbsd_rest_api._LOCK_TIMEOUT = backup_lock_waittime hbsd_rest_api._REST_TIMEOUT = backup_exec_max_waittime hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT = ( backup_job_api_response_timeout) hbsd_rest_api._GET_API_RESPONSE_TIMEOUT = ( backup_get_api_response_timeout) hbsd_rest_api._EXTEND_TIMEOUT = backup_extend_waittime hbsd_rest_api._EXEC_RETRY_INTERVAL = backup_exec_retry_interval hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT = ( backup_rest_server_restart_timeout) hbsd_rest._STATE_TRANSITION_TIMEOUT = ( backup_state_transition_timeout) return wrapper class FakeLookupService(): """Dummy FC zoning mapping lookup service class.""" def get_device_mapping_from_network(self, initiator_wwns, target_wwns): """Return predefined FC zoning mapping.""" return DEVICE_MAP class FakeLookupServiceMultiWwn(): """Dummy FC zoning mapping lookup service class.""" def get_device_mapping_from_network(self, initiator_wwns, target_wwns): """Return predefined FC zoning mapping.""" return DEVICE_MAP_MULTI_WWN class FakeResponse(): def __init__(self, status_code, data=None, headers=None): self.status_code = status_code self.data = data self.text = data self.content = data self.headers = {'Content-Type': 'json'} if headers is None else headers def json(self): return self.data class HBSDRESTFCDriverTest(test.TestCase): """Unit test class for HBSD REST interface fibre channel module.""" test_existing_ref = {'source-id': '1'} test_existing_ref_name = { 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} def setUp(self): """Set up the test environment.""" def _set_required(opts, required): for opt in opts: opt.required = required # Initialize Cinder and avoid checking driver options. rest_required_opts = [ opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required] common_required_opts = [ opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required] _set_required(rest_required_opts, False) _set_required(common_required_opts, False) super(HBSDRESTFCDriverTest, self).setUp() _set_required(rest_required_opts, True) _set_required(common_required_opts, True) self.configuration = mock.Mock(conf.Configuration) self.ctxt = cinder_context.get_admin_context() self._setup_config() self._setup_driver() def _setup_config(self): """Set configuration parameter values.""" self.configuration.config_group = "REST" self.configuration.volume_backend_name = "RESTFC" self.configuration.volume_driver = ( "cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver") self.configuration.reserved_percentage = "0" self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False self.configuration.max_over_subscription_ratio = 500.0 self.configuration.driver_ssl_cert_verify = False self.configuration.hitachi_storage_id = CONFIG_MAP['serial'] self.configuration.hitachi_pools = ["30"] self.configuration.hitachi_snap_pool = None self.configuration.hitachi_ldev_range = "0-1" self.configuration.hitachi_target_ports = [CONFIG_MAP['port_id']] self.configuration.hitachi_compute_target_ports = [ CONFIG_MAP['port_id']] self.configuration.hitachi_group_create = True self.configuration.hitachi_group_delete = True self.configuration.hitachi_copy_speed = 3 self.configuration.hitachi_copy_check_interval = 3 self.configuration.hitachi_async_copy_check_interval = 10 self.configuration.hitachi_port_scheduler = False self.configuration.hitachi_group_name_format = None self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] self.configuration.san_ip = CONFIG_MAP[ 'rest_server_ip_addr'] self.configuration.san_api_port = CONFIG_MAP[ 'rest_server_ip_port'] self.configuration.hitachi_rest_disable_io_wait = True self.configuration.hitachi_rest_tcp_keepalive = True self.configuration.hitachi_discard_zero_page = True self.configuration.hitachi_lun_timeout = hbsd_rest._LUN_TIMEOUT self.configuration.hitachi_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.hitachi_restore_timeout = hbsd_rest._RESTORE_TIMEOUT self.configuration.hitachi_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) self.configuration.hitachi_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT self.configuration.hitachi_rest_timeout = hbsd_rest_api._REST_TIMEOUT self.configuration.hitachi_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.hitachi_exec_retry_interval = ( hbsd_rest_api._EXEC_RETRY_INTERVAL) self.configuration.hitachi_rest_connect_timeout = ( hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) self.configuration.hitachi_rest_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) self.configuration.hitachi_rest_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) self.configuration.hitachi_rest_server_busy_timeout = ( hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) self.configuration.hitachi_rest_keep_session_loop_interval = ( hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) self.configuration.hitachi_rest_another_ldev_mapped_retry_timeout = ( hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) self.configuration.hitachi_rest_tcp_keepidle = ( hbsd_rest_api._TCP_KEEPIDLE) self.configuration.hitachi_rest_tcp_keepintvl = ( hbsd_rest_api._TCP_KEEPINTVL) self.configuration.hitachi_rest_tcp_keepcnt = ( hbsd_rest_api._TCP_KEEPCNT) self.configuration.hitachi_host_mode_options = [] self.configuration.hitachi_zoning_request = False self.configuration.san_thin_provision = True self.configuration.san_private_key = '' self.configuration.san_clustername = '' self.configuration.san_ssh_port = '22' self.configuration.san_is_local = False self.configuration.ssh_conn_timeout = '30' self.configuration.ssh_min_pool_conn = '1' self.configuration.ssh_max_pool_conn = '5' self.configuration.use_chap_auth = True self.configuration.chap_username = CONFIG_MAP['auth_user'] self.configuration.chap_password = CONFIG_MAP['auth_password'] self.configuration.hitachi_replication_number = 0 self.configuration.hitachi_pair_target_number = 0 self.configuration.hitachi_rest_pair_target_ports = [] self.configuration.hitachi_quorum_disk_id = '' self.configuration.hitachi_mirror_copy_speed = '' self.configuration.hitachi_mirror_storage_id = '' self.configuration.hitachi_mirror_pool = '' self.configuration.hitachi_mirror_ldev_range = '' self.configuration.hitachi_mirror_target_ports = '' self.configuration.hitachi_mirror_rest_user = '' self.configuration.hitachi_mirror_rest_password = '' self.configuration.hitachi_mirror_rest_api_ip = '' self.configuration.hitachi_set_mirror_reserve_attribute = '' self.configuration.hitachi_path_group_id = '' self.configuration.safe_get = self._fake_safe_get CONF = cfg.CONF CONF.my_ip = CONFIG_MAP['my_ip'] def _fake_safe_get(self, value): """Retrieve a configuration value avoiding throwing an exception.""" try: val = getattr(self.configuration, value) except AttributeError: val = None return val @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def _setup_driver( self, brick_get_connector_properties=None, request=None): """Set up the driver environment.""" self.driver = hbsd_fc.HBSDFCDriver( configuration=self.configuration) request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)] self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver.local_path(None) self.driver.create_export(None, None, None) self.driver.ensure_export(None, None) self.driver.remove_export(None, None) self.driver.create_export_snapshot(None, None, None) self.driver.remove_export_snapshot(None, None) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() def tearDown(self): self.client = None super(HBSDRESTFCDriverTest, self).tearDown() # API test cases @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup(self, brick_get_connector_properties, request): drv = hbsd_fc.HBSDFCDriver( configuration=self.configuration) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(4, request.call_count) # stop the Loopingcall within the do_setup treatment drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_create_hg(self, brick_get_connector_properties, request): """Normal case: The host group not exists.""" drv = hbsd_fc.HBSDFCDriver( configuration=self.configuration) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_create_hg_format( self, brick_get_connector_properties, request): drv = hbsd_fc.HBSDFCDriver(configuration=self.configuration) self._setup_config() self.configuration.hitachi_group_name_format = ( 'HBSD-{wwn}-{host}-_:.@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@') request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_create_hg_format_error( self, brick_get_connector_properties, request): drv = hbsd_fc.HBSDFCDriver(configuration=self.configuration) self._setup_config() self.configuration.hitachi_group_name_format = '{host}-{wwn}' request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.assertRaises(exception.VolumeDriverException, drv.do_setup, None) @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties_multi_wwn) def test_do_setup_create_hg_port_scheduler( self, brick_get_connector_properties, request): """Normal case: The host group not exists with port scheduler.""" drv = hbsd_fc.HBSDFCDriver( configuration=self.configuration) self._setup_config() self.configuration.hitachi_port_scheduler = True self.configuration.hitachi_zoning_request = True drv.common._lookup_service = FakeLookupServiceMultiWwn() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(10, request.call_count) # stop the Loopingcall within the do_setup treatment drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_pool_name(self, brick_get_connector_properties, request): """Normal case: Specify a pool name instead of pool id""" drv = hbsd_fc.HBSDFCDriver( configuration=self.configuration) self._setup_config() tmp_pools = self.configuration.hitachi_pools self.configuration.hitachi_pools = [CONFIG_MAP['pool_name']] request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_POOLS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(5, request.call_count) self.configuration.hitachi_pools = tmp_pools # stop the Loopingcall within the do_setup treatment drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume(self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_volume(TEST_VOLUME[4]) self.assertEqual('1', ret['provider_location']) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_deduplication_compression( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): extra_specs = {'hbsd:capacity_saving': 'deduplication_compression'} get_volume_type_extra_specs.return_value = extra_specs get_volume_type_qos_specs.return_value = {'qos_specs': None} request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_volume(TEST_VOLUME[3]) args, kwargs = request.call_args_list[0] body = kwargs['json'] self.assertEqual(body.get('dataReductionMode'), 'compression_deduplication') self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(2, request.call_count) @reduce_retrying_time @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_timeout(self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): self.driver.common.conf.hitachi_rest_timeout = 0 self.driver.common.conf.hitachi_exec_retry_interval = 0 get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.return_value = FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, TEST_VOLUME[4]) self.assertGreater(request.call_count, 1) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_qos(self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): specs = {} get_volume_type_extra_specs.return_value = {} input_qos_specs = { 'qos_specs': { 'consumer': 'back-end', 'specs': {'upperIops': '1000', 'upperTransferRate': '2000', 'lowerIops': '3000', 'lowerTransferRate': '4000', 'responsePriority': '3'}}} get_volume_type_qos_specs.return_value = input_qos_specs request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_volume(TEST_VOLUME[0]) for i in range(1, 6): args, kwargs = request.call_args_list[i] body = kwargs['json'] for key, value in body['parameters'].items(): specs[key] = value self.assertEqual(specs['upperIops'], 1000) self.assertEqual(specs['upperTransferRate'], 2000) self.assertEqual(specs['lowerIops'], 3000) self.assertEqual(specs['lowerTransferRate'], 4000) self.assertEqual(specs['responsePriority'], 3) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(7, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume_wait_copy_pair_deleting(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(7, request.call_count) @reduce_retrying_time @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=test_utils.ZeroIntervalLoopingCall) @mock.patch.object(requests.Session, "request") def test_delete_volume_request_failed(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR)] self.assertRaises(exception.VolumeDriverException, self.driver.delete_volume, TEST_VOLUME[0]) self.assertGreater(request.call_count, 2) @mock.patch.object(requests.Session, "request") def test_delete_volume_volume_is_busy(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR)] self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, TEST_VOLUME[0]) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume_is_invalid_ldev(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT_LABEL) self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) self.assertEqual(3, request.call_count) @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @mock.patch.object(requests.Session, "request") def test_get_volume_stats( self, request, get_filter_function, get_goodness_function): request.return_value = FakeResponse(200, GET_POOLS_RESULT) get_filter_function.return_value = None get_goodness_function.return_value = None stats = self.driver.get_volume_stats(True) self.assertEqual('Hitachi', stats['vendor_name']) self.assertEqual(self.configuration.volume_backend_name, stats["pools"][0]['pool_name']) self.assertEqual(self.configuration.reserved_percentage, stats["pools"][0]['reserved_percentage']) self.assertTrue(stats["pools"][0]['QoS_support']) self.assertTrue(stats["pools"][0]['thin_provisioning_support']) self.assertFalse(stats["pools"][0]['thick_provisioning_support']) self.assertTrue(stats["pools"][0]['multiattach']) self.assertTrue(stats["pools"][0]['consistencygroup_support']) self.assertTrue(stats["pools"][0]['consistent_group_snapshot_enabled']) self.assertEqual(self.configuration.max_over_subscription_ratio, stats["pools"][0]['max_over_subscription_ratio']) self.assertEqual( GET_POOL_RESULT['totalPoolCapacity'] // units.Ki, stats["pools"][0]['total_capacity_gb']) self.assertEqual( GET_POOL_RESULT['availableVolumeCapacity'] // units.Ki, stats["pools"][0]['free_capacity_gb']) self.assertEqual( GET_POOL_RESULT['totalLocatedCapacity'] // units.Ki, stats["pools"][0]['provisioned_capacity_gb']) self.assertEqual('up', stats["pools"][0]['backend_state']) self.assertEqual(1, request.call_count) self.assertEqual(1, get_filter_function.call_count) self.assertEqual(1, get_goodness_function.call_count) @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @mock.patch.object(hbsd_rest.HBSDREST, "get_pool_info") @mock.patch.object(requests.Session, 'request', new=mock.MagicMock()) def test_get_volume_stats_error( self, get_pool_info, get_filter_function, get_goodness_function): get_pool_info.side_effect = exception.VolumeDriverException(data='') get_filter_function.return_value = None get_goodness_function.return_value = None stats = self.driver.get_volume_stats(True) self.assertEqual('Hitachi', stats['vendor_name']) self.assertEqual(self.configuration.volume_backend_name, stats["pools"][0]['pool_name']) self.assertEqual(self.configuration.reserved_percentage, stats["pools"][0]['reserved_percentage']) self.assertTrue(stats["pools"][0]['thin_provisioning_support']) self.assertFalse(stats["pools"][0]['thick_provisioning_support']) self.assertTrue(stats["pools"][0]['multiattach']) self.assertTrue(stats["pools"][0]['consistencygroup_support']) self.assertTrue(stats["pools"][0]['consistent_group_snapshot_enabled']) self.assertEqual(self.configuration.max_over_subscription_ratio, stats["pools"][0]['max_over_subscription_ratio']) self.assertEqual(0, stats["pools"][0]['total_capacity_gb']) self.assertEqual(0, stats["pools"][0]['free_capacity_gb']) self.assertEqual(0, stats["pools"][0]['provisioned_capacity_gb']) self.assertEqual('down', stats["pools"][0]['backend_state']) self.assertEqual(1, get_filter_function.call_count) self.assertEqual(1, get_goodness_function.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_snapshot( self, get_volume_type_qos_specs, volume_get, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_snapshot_dedup_false( self, get_volume_type_qos_specs, volume_get, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {'hbsd:capacity_saving': 'disable'} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(14, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_cloned_volume(self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) self.assertEqual('1', vol['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_from_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(5, request.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection( self, get_volume_type_extra_specs, request, add_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() extra_specs = {"hbsd:target_ports": "CL1-A"} get_volume_type_extra_specs.return_value = extra_specs request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(2, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection_already_mapped( self, get_volume_type_extra_specs, request, add_fc_zone): """Normal case: ldev have already mapped.""" self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() extra_specs = {"hbsd:target_ports": "CL1-A"} get_volume_type_extra_specs.return_value = extra_specs request.side_effect = [ FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_FAILED_RESULT_LU_DEFINED), FakeResponse(200, GET_LUNS_RESULT), ] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(3, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection_shared_target( self, get_volume_type_extra_specs, request, add_fc_zone): """Normal case: A target shared with other systems.""" self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() extra_specs = {"hbsd:target_ports": "CL1-A"} get_volume_type_extra_specs.return_value = extra_specs request.side_effect = [FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(5, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_create_target_to_storage_return( self, get_volume_type_extra_specs, request, add_fc_zone): self.configuration.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() extra_specs = {"hbsd:target_ports": "CL1-A"} get_volume_type_extra_specs.return_value = extra_specs request.side_effect = [ FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(400, GET_HOST_GROUPS_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_TEST), FakeResponse(200, GET_HOST_GROUPS_RESULT_TEST), ] self.assertRaises(exception.VolumeDriverException, self.driver.initialize_connection, TEST_VOLUME[1], DEFAULT_CONNECTOR) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(10, request.call_count) self.assertEqual(0, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection(self, request, remove_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) self.assertEqual(5, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection_not_connector(self, request, remove_fc_zone): """Normal case: Connector is None.""" self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], None) self.assertEqual(8, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection_not_lun(self, request, remove_fc_zone): """Normal case: Lun already not exist.""" self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) self.assertEqual(2, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") def test_initialize_connection_snapshot(self, request, add_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(2, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection_snapshot(self, request, remove_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual(5, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing(self, get_volume_type_qos_specs, request): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing_qos(self, get_volume_type_qos_specs, request): input_qos_specs = { 'qos_specs': { 'consumer': 'back-end', 'specs': {'upperIops': '1000'}}} get_volume_type_qos_specs.return_value = input_qos_specs request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing_name(self, get_volume_type_qos_specs, request): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size_name(self, request): request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_unmanage(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_unmanage_volume_is_busy(self, request): request.side_effect = [ FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), ] self.assertRaises(exception.VolumeIsBusy, self.driver.unmanage, TEST_VOLUME[1]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_unmanage_volume_is_busy_raise_ex(self, request): request.side_effect = [ FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(400, GET_SNAPSHOTS_RESULT_BUSY) ] self.assertRaises(exception.VolumeDriverException, self.driver.unmanage, TEST_VOLUME[0]) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' image_id = 'fake_image_id' request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ as mock_copy_image: self.driver.copy_image_to_volume( self.ctxt, TEST_VOLUME[0], image_service, image_id) mock_copy_image.assert_called_with( self.ctxt, TEST_VOLUME[0], image_service, image_id, disable_sparse=False) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) ret = self.driver.update_migrated_volume( self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) actual = ({'_name_id': TEST_VOLUME[1]['id'], 'provider_location': TEST_VOLUME[1]['provider_location']}) self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" self.assertRaises( NotImplementedError, self.driver.unmanage_snapshot, TEST_SNAPSHOT[0]) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_retype(self, get_volume_type_qos_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] get_volume_type_qos_specs.return_value = {'qos_specs': None} host = { 'capabilities': { 'location_info': { 'pool_id': 30, }, }, } extra_specs = {'hbsd:capacity_saving': 'deduplication_compression'} new_type = fake_volume.fake_volume_type_obj( CTXT, id='00000000-0000-0000-0000-{0:012d}'.format(0), extra_specs=extra_specs) old_specs = {'hbsd:capacity_saving': 'disable'} new_specs = {'hbsd:capacity_saving': 'deduplication_compression'} old_type_ref = volume_types.create(self.ctxt, 'old', old_specs) new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'], new_type_ref['id'])[0] ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(4, request.call_count) self.assertTrue(ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_retype_qos(self, get_volume_type_qos_specs, request): input_qos_specs = {'qos_specs': { 'consumer': 'back-end', 'specs': {'upperIops': '2000'}}} get_volume_type_qos_specs.return_value = input_qos_specs request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEVS_RESULT_QOS), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] host = { 'capabilities': { 'location_info': { 'pool_id': 30, }, }, } qos_spec_id = '00000000-0000-0000-0000-000000000001' new_type = fake_volume.fake_volume_type_obj( CTXT, id='00000000-0000-0000-0000-{0:012d}'.format(0), qos_spec_id=qos_spec_id) diff = {'qos_specs': {'upperIops': ('1000', '2000')}} ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(4, request.call_count) self.assertTrue(ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_retype_migrate_qos( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): qos_spec_id = '00000000-0000-0000-0000-000000000001' input_qos_specs = {'qos_specs': { 'consumer': 'back-end', 'id': qos_spec_id, 'specs': {'upperIops': '2000'}}} get_volume_type_qos_specs.return_value = input_qos_specs get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] host = { 'capabilities': { 'location_info': { 'storage_id': CONFIG_MAP['serial'], 'pool_id': 30, }, }, } extra_specs = {'hbsd:target_ports': 'CL1-A'} new_type = fake_volume.fake_volume_type_obj( CTXT, id='00000000-0000-0000-0000-{0:012d}'.format(0), extra_specs=extra_specs, qos_specs_id=qos_spec_id) diff = {'extra_specs': {'hbsd:target_ports': 'CL1-A'}, 'qos_specs': {'upperIops': ('1000', '2000')}, 'encryption': {}} ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(16, request.call_count) actual = (True, {'provider_location': '1'}) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_migrate_volume(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) host = { 'capabilities': { 'location_info': { 'storage_id': CONFIG_MAP['serial'], 'pool_id': 30, }, }, } ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host) self.assertEqual(2, request.call_count) actual = (True, None) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_migrate_volume_diff_pool(self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] host = { 'capabilities': { 'location_info': { 'storage_id': CONFIG_MAP['serial'], 'pool_id': 40, }, }, } ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(15, request.call_count) actual = (True, {'provider_location': '1'}) self.assertTupleEqual(actual, ret) def test_backup_use_temp_snapshot(self): self.assertTrue(self.driver.backup_use_temp_snapshot()) @mock.patch.object(requests.Session, "request") def test_revert_to_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.revert_to_snapshot( self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual(5, request.call_count) def test_session___call__(self): session = self.driver.common.client.Session('id', 'token') req = models.Response() ret = session.__call__(req) self.assertEqual('Session token', ret.headers['Authorization']) def test_create_group(self): ret = self.driver.create_group(self.ctxt, TEST_GROUP[0]) self.assertIsNone(ret) @mock.patch.object(requests.Session, "request") def test_delete_group(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]]) self.assertEqual(4, request.call_count) actual = ( {'status': TEST_GROUP[0]['status']}, [{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume_error( self, get_volume_type_qos_specs): get_volume_type_qos_specs.return_value = {'qos_specs': None} self.assertRaises( exception.VolumeDriverException, self.driver.create_group_from_src, self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]] ) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = False ret = self.driver.update_group( self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]]) self.assertTupleEqual((None, None, None), ret) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group_error(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = True self.assertRaises( exception.VolumeDriverException, self.driver.update_group, self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]], remove_volumes=[TEST_VOLUME[0]] ) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_non_cg( self, get_volume_type_qos_specs, is_group_a_cg_snapshot_type, get_volume_type_extra_specs, volume_get, request): is_group_a_cg_snapshot_type.return_value = False get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_cg( self, get_volume_type_qos_specs, is_group_a_cg_snapshot_type, get_volume_type_extra_specs, volume_get, request): is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]) self.assertEqual(14, request.call_count) actual = ( {'status': TEST_GROUP_SNAP[0]['status']}, [{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(hbsd_fc.HBSDFCDriver, "_get_oslo_driver_opts") def test_get_driver_options(self, _get_oslo_driver_opts): _get_oslo_driver_opts.return_value = [] ret = self.driver.get_driver_options() actual = (hbsd_common.COMMON_VOLUME_OPTS + hbsd_common.COMMON_PORT_OPTS + hbsd_common.COMMON_PAIR_OPTS + hbsd_common.COMMON_NAME_OPTS + hbsd_rest.REST_VOLUME_OPTS + hbsd_rest.REST_PAIR_OPTS + hbsd_rest_fc.FC_VOLUME_OPTS + hbsd_replication._REP_OPTS + hbsd_replication.COMMON_MIRROR_OPTS + hbsd_replication.ISCSI_MIRROR_OPTS + hbsd_replication.REST_MIRROR_OPTS + hbsd_replication.REST_MIRROR_API_OPTS + hbsd_replication.REST_MIRROR_SSL_OPTS) self.assertEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_is_modifiable_dr_value_new_dr_mode_disabled( self, get_volume_type_qos_specs, request): request.side_effect = [ FakeResponse(200, GET_LDEV_RESULT_PAIR_STATUS_TEST), FakeResponse(200, GET_LDEV_RESULT_PAIR_STATUS_TEST), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT), ] get_volume_type_qos_specs.return_value = {'qos_specs': None} host = { 'capabilities': { 'location_info': { 'pool_id': 30, }, }, } extra_specs = {'hbsd:capacity_saving': 'disable'} new_type = fake_volume.fake_volume_type_obj( CTXT, id='00000000-0000-0000-0000-{0:012d}'.format(0), extra_specs=extra_specs) old_specs = {'hbsd:capacity_saving': 'deduplication_compression'} new_specs = {'hbsd:capacity_saving': 'disable'} old_type_ref = volume_types.create(self.ctxt, 'old', old_specs) new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'], new_type_ref['id'])[0] ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(4, request.call_count) self.assertTrue(ret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py0000664000175000017500000016174100000000000031335 0ustar00zuulzuul00000000000000# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Hitachi HBSD Driver.""" from unittest import mock from oslo_config import cfg import requests from cinder import context as cinder_context from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder.objects import group_snapshot as obj_group_snap from cinder.objects import snapshot as obj_snap from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_iscsi from cinder.volume.drivers.hitachi import hbsd_replication from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume import volume_types from cinder.volume import volume_utils # Configuration parameter values CONFIG_MAP = { 'serial': '886000123456', 'my_ip': '127.0.0.1', 'rest_server_ip_addr': '172.16.18.108', 'rest_server_ip_port': '23451', 'port_id': 'CL1-A', 'host_grp_name': 'HBSD-127.0.0.1', 'host_mode': 'LINUX/IRIX', 'host_iscsi_name': 'iqn.hbsd-test-host', 'target_iscsi_name': 'iqn.hbsd-test-target', 'user_id': 'user', 'user_pass': 'password', 'pool_name': 'test_pool', 'ipv4Address': '111.22.333.44', 'tcpPort': '5555', 'auth_user': 'auth_user', 'auth_password': 'auth_password', } DEFAULT_CONNECTOR = { 'host': 'host', 'ip': CONFIG_MAP['my_ip'], 'initiator': CONFIG_MAP['host_iscsi_name'], 'multipath': False, } CTXT = cinder_context.get_admin_context() TEST_VOLUME = [] for i in range(5): volume = {} volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) volume['name'] = 'test-volume{0:d}'.format(i) volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) if i == 3 or i == 4: volume['provider_location'] = None else: volume['provider_location'] = '{0:d}'.format(i) volume['size'] = 128 if i == 2: volume['status'] = 'in-use' elif i == 4: volume['status'] = None else: volume['status'] = 'available' volume = fake_volume.fake_volume_obj(CTXT, **volume) volume.volume_type = fake_volume.fake_volume_type_obj(CTXT) TEST_VOLUME.append(volume) def _volume_get(context, volume_id): """Return predefined volume info.""" return TEST_VOLUME[int(volume_id.replace("-", ""))] TEST_SNAPSHOT = [] for i in range(2): snapshot = {} snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(i) snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(i) snapshot['provider_location'] = '{0:d}'.format(i + 1) snapshot['status'] = 'available' snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) snapshot['volume'] = _volume_get(None, snapshot['volume_id']) snapshot['volume_name'] = 'test-volume{0:d}'.format(i) snapshot['volume_size'] = 128 if i == 1: snapshot['volume_type_id'] =\ '00000000-0000-0000-0000-{0:012d}'.format(i) snapshot = obj_snap.Snapshot._from_db_object( CTXT, obj_snap.Snapshot(), fake_snapshot.fake_db_snapshot(**snapshot)) TEST_SNAPSHOT.append(snapshot) TEST_GROUP = [] for i in range(2): group = {} group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i) group['status'] = 'available' group = fake_group.fake_group_obj(CTXT, **group) TEST_GROUP.append(group) TEST_GROUP_SNAP = [] group_snapshot = {} group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0) group_snapshot['status'] = 'available' group_snapshot = obj_group_snap.GroupSnapshot._from_db_object( CTXT, obj_group_snap.GroupSnapshot(), fake_group_snapshot.fake_db_group_snapshot(**group_snapshot)) TEST_GROUP_SNAP.append(group_snapshot) # Dummy response for REST API POST_SESSIONS_RESULT = { "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "sessionId": 0, } GET_PORTS_RESULT = { "data": [ { "portId": CONFIG_MAP['port_id'], "portType": "ISCSI", "portAttributes": [ "TAR", "MCU", "RCU", "ELUN" ], "portSpeed": "AUT", "loopId": "00", "fabricMode": False, "lunSecuritySetting": True, }, ], } GET_PORT_RESULT = { "ipv4Address": CONFIG_MAP['ipv4Address'], "tcpPort": CONFIG_MAP['tcpPort'], } GET_HOST_ISCSIS_RESULT = { "data": [ { "hostGroupNumber": 0, "iscsiName": CONFIG_MAP['host_iscsi_name'], }, ], } GET_HOST_GROUP_RESULT = { "hostGroupName": CONFIG_MAP['host_grp_name'], "iscsiName": CONFIG_MAP['target_iscsi_name'], } GET_HOST_GROUPS_RESULT = { "data": [ { "hostGroupNumber": 0, "portId": CONFIG_MAP['port_id'], "hostGroupName": "HBSD-test", "iscsiName": CONFIG_MAP['target_iscsi_name'], }, ], } COMPLETED_SUCCEEDED_RESULT = { "status": "Completed", "state": "Succeeded", "affectedResources": ('a/b/c/1',), } GET_LDEV_RESULT = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_SNAP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "poolId": 30, "dataReductionStatus": "DISABLED", "dataReductionMode": "disabled", "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP"], "status": "NML", "ports": [ { "portId": CONFIG_MAP['port_id'], "hostGroupNumber": 0, "hostGroupName": CONFIG_MAP['host_grp_name'], "lun": 1 }, ], } GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "HDP", "HTI"], "status": "NML", "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { "data": [ { "poolId": 30, "poolName": CONFIG_MAP['pool_name'], "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, "totalLocatedCapacity": 71453172, "virtualVolumeCapacityRate": -1, }, ], } GET_SNAPSHOTS_RESULT = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PSUS", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_PAIR = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PAIR", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_HOST_GROUPS_RESULT_PAIR = { "data": [ { "hostGroupNumber": 1, "portId": CONFIG_MAP['port_id'], "hostGroupName": "HBSD-pair00", }, ], } GET_LDEVS_RESULT = { "data": [ { "ldevId": 0, "label": "15960cc738c94c5bb4f1365be5eeed44", }, { "ldevId": 1, "label": "15960cc738c94c5bb4f1365be5eeed45", }, ], } NOTFOUND_RESULT = { "data": [], } def _brick_get_connector_properties(multipath=False, enforce_multipath=False): """Return a predefined connector object.""" return DEFAULT_CONNECTOR class FakeResponse(): def __init__(self, status_code, data=None, headers=None): self.status_code = status_code self.data = data self.text = data self.content = data self.headers = {'Content-Type': 'json'} if headers is None else headers def json(self): return self.data class HBSDRESTISCSIDriverTest(test.TestCase): """Unit test class for HBSD REST interface iSCSI module.""" test_existing_ref = {'source-id': '1'} test_existing_ref_name = { 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} def setUp(self): """Set up the test environment.""" def _set_required(opts, required): for opt in opts: opt.required = required # Initialize Cinder and avoid checking driver options. rest_required_opts = [ opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required] common_required_opts = [ opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required] _set_required(rest_required_opts, False) _set_required(common_required_opts, False) super(HBSDRESTISCSIDriverTest, self).setUp() _set_required(rest_required_opts, True) _set_required(common_required_opts, True) self.configuration = mock.Mock(conf.Configuration) self.ctxt = cinder_context.get_admin_context() self._setup_config() self._setup_driver() def _setup_config(self): """Set configuration parameter values.""" self.configuration.config_group = "REST" self.configuration.volume_backend_name = "RESTISCSI" self.configuration.volume_driver = ( "cinder.volume.drivers.hitachi.hbsd_iscsi.HBSDISCSIDriver") self.configuration.reserved_percentage = "0" self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False self.configuration.max_over_subscription_ratio = 500.0 self.configuration.driver_ssl_cert_verify = False self.configuration.hitachi_storage_id = CONFIG_MAP['serial'] self.configuration.hitachi_pools = ['30'] self.configuration.hitachi_snap_pool = None self.configuration.hitachi_ldev_range = "0-1" self.configuration.hitachi_target_ports = [CONFIG_MAP['port_id']] self.configuration.hitachi_compute_target_ports = [ CONFIG_MAP['port_id']] self.configuration.hitachi_group_create = True self.configuration.hitachi_group_delete = True self.configuration.hitachi_copy_speed = 3 self.configuration.hitachi_copy_check_interval = 3 self.configuration.hitachi_async_copy_check_interval = 10 self.configuration.hitachi_port_scheduler = False self.configuration.hitachi_group_name_format = None self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] self.configuration.san_ip = CONFIG_MAP[ 'rest_server_ip_addr'] self.configuration.san_api_port = CONFIG_MAP[ 'rest_server_ip_port'] self.configuration.hitachi_rest_disable_io_wait = True self.configuration.hitachi_rest_tcp_keepalive = True self.configuration.hitachi_discard_zero_page = True self.configuration.hitachi_lun_timeout = hbsd_rest._LUN_TIMEOUT self.configuration.hitachi_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.hitachi_restore_timeout = hbsd_rest._RESTORE_TIMEOUT self.configuration.hitachi_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) self.configuration.hitachi_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT self.configuration.hitachi_rest_timeout = hbsd_rest_api._REST_TIMEOUT self.configuration.hitachi_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.hitachi_exec_retry_interval = ( hbsd_rest_api._EXEC_RETRY_INTERVAL) self.configuration.hitachi_rest_connect_timeout = ( hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) self.configuration.hitachi_rest_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) self.configuration.hitachi_rest_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) self.configuration.hitachi_rest_server_busy_timeout = ( hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) self.configuration.hitachi_rest_keep_session_loop_interval = ( hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) self.configuration.hitachi_rest_another_ldev_mapped_retry_timeout = ( hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) self.configuration.hitachi_rest_tcp_keepidle = ( hbsd_rest_api._TCP_KEEPIDLE) self.configuration.hitachi_rest_tcp_keepintvl = ( hbsd_rest_api._TCP_KEEPINTVL) self.configuration.hitachi_rest_tcp_keepcnt = ( hbsd_rest_api._TCP_KEEPCNT) self.configuration.hitachi_host_mode_options = [] self.configuration.use_chap_auth = True self.configuration.chap_username = CONFIG_MAP['auth_user'] self.configuration.chap_password = CONFIG_MAP['auth_password'] self.configuration.san_thin_provision = True self.configuration.san_private_key = '' self.configuration.san_clustername = '' self.configuration.san_ssh_port = '22' self.configuration.san_is_local = False self.configuration.ssh_conn_timeout = '30' self.configuration.ssh_min_pool_conn = '1' self.configuration.ssh_max_pool_conn = '5' self.configuration.hitachi_replication_number = 0 self.configuration.hitachi_pair_target_number = 0 self.configuration.hitachi_rest_pair_target_ports = [] self.configuration.hitachi_quorum_disk_id = '' self.configuration.hitachi_mirror_copy_speed = '' self.configuration.hitachi_mirror_storage_id = '' self.configuration.hitachi_mirror_pool = '' self.configuration.hitachi_mirror_ldev_range = '' self.configuration.hitachi_mirror_target_ports = '' self.configuration.hitachi_mirror_rest_user = '' self.configuration.hitachi_mirror_rest_password = '' self.configuration.hitachi_mirror_rest_api_ip = '' self.configuration.hitachi_set_mirror_reserve_attribute = '' self.configuration.hitachi_path_group_id = '' self.configuration.safe_get = self._fake_safe_get CONF = cfg.CONF CONF.my_ip = CONFIG_MAP['my_ip'] def _fake_safe_get(self, value): """Retrieve a configuration value avoiding throwing an exception.""" try: val = getattr(self.configuration, value) except AttributeError: val = None return val @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def _setup_driver( self, brick_get_connector_properties=None, request=None): """Set up the driver environment.""" self.driver = hbsd_iscsi.HBSDISCSIDriver( configuration=self.configuration) request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)] self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver.local_path(None) self.driver.create_export(None, None, None) self.driver.ensure_export(None, None) self.driver.remove_export(None, None) self.driver.create_export_snapshot(None, None, None) self.driver.remove_export_snapshot(None, None) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() def tearDown(self): self.client = None super(HBSDRESTISCSIDriverTest, self).tearDown() # API test cases @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup(self, brick_get_connector_properties, request): drv = hbsd_iscsi.HBSDISCSIDriver( configuration=self.configuration) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort']}}, drv.common.storage_info['portals']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(6, request.call_count) # stop the Loopingcall within the do_setup treatment drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_create_hg(self, brick_get_connector_properties, request): """Normal case: The host group not exists.""" drv = hbsd_iscsi.HBSDISCSIDriver( configuration=self.configuration) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort']}}, drv.common.storage_info['portals']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_create_hg_format( self, brick_get_connector_properties, request): drv = hbsd_iscsi.HBSDISCSIDriver(configuration=self.configuration) self._setup_config() self.configuration.hitachi_group_name_format = 'HBSD-{ip}@{host}-_:.' request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort']}}, drv.common.storage_info['portals']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(9, request.call_count) # stop the Loopingcall within the do_setup treatment drv.common.client.keep_session_loop.stop() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_create_hg_format_error( self, brick_get_connector_properties, request): drv = hbsd_iscsi.HBSDISCSIDriver(configuration=self.configuration) self._setup_config() self.configuration.hitachi_group_name_format = ( 'HBSD-{ip}@{host}ZZZZZZZZZZZ') request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.assertRaises(exception.VolumeDriverException, drv.do_setup, None) @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) self.assertEqual(3, request.call_count) @mock.patch.object(driver.ISCSIDriver, "get_goodness_function") @mock.patch.object(driver.ISCSIDriver, "get_filter_function") @mock.patch.object(requests.Session, "request") def test__update_volume_stats( self, request, get_filter_function, get_goodness_function): request.return_value = FakeResponse(200, GET_POOLS_RESULT) get_filter_function.return_value = None get_goodness_function.return_value = None self.driver._update_volume_stats() self.assertEqual('Hitachi', self.driver._stats['vendor_name']) self.assertTrue(self.driver._stats["pools"][0]['multiattach']) self.assertEqual(1, request.call_count) self.assertEqual(1, get_filter_function.call_count) self.assertEqual(1, get_goodness_function.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume(self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_volume(TEST_VOLUME[4]) self.assertEqual('1', ret['provider_location']) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_snapshot( self, get_volume_type_qos_specs, volume_get, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_snapshot_qos( self, get_volume_type_qos_specs, volume_get, get_volume_type_extra_specs, request): input_qos_specs = { 'qos_specs': { 'consumer': 'back-end', 'specs': {'upperIops': '1000'}}} get_volume_type_qos_specs.return_value = input_qos_specs get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[1]) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(6, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot_is_invalid_ldev(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_cloned_volume(self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) self.assertEqual('1', vol['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_from_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_from_snapshot_qos( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] input_qos_specs = { 'qos_specs': { 'consumer': 'back-end', 'specs': {'upperIops': '1000'}}} get_volume_type_qos_specs.return_value = input_qos_specs get_volume_type_extra_specs.return_value = {} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(6, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection( self, get_volume_type_extra_specs, request): extra_specs = {"hbsd:target_ports": "CL1-A"} get_volume_type_extra_specs.return_value = extra_specs request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('iscsi', ret['driver_volume_type']) self.assertEqual( '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort'], }, ret['data']['target_portal']) self.assertEqual(CONFIG_MAP['target_iscsi_name'], ret['data']['target_iqn']) self.assertEqual('CHAP', ret['data']['auth_method']) self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) self.assertEqual( CONFIG_MAP['auth_password'], ret['data']['auth_password']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection_shared_target( self, get_volume_type_extra_specs, request): """Normal case: A target shared with other systems.""" extra_specs = {"hbsd:target_ports": "CL1-A"} get_volume_type_extra_specs.return_value = extra_specs request.side_effect = [FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('iscsi', ret['driver_volume_type']) self.assertEqual( '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort'], }, ret['data']['target_portal']) self.assertEqual(CONFIG_MAP['target_iscsi_name'], ret['data']['target_iqn']) self.assertEqual('CHAP', ret['data']['auth_method']) self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) self.assertEqual( CONFIG_MAP['auth_password'], ret['data']['auth_password']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_terminate_connection(self, request): request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) self.assertEqual(6, request.call_count) @mock.patch.object(requests.Session, "request") def test_terminate_connection_not_connector(self, request): """Normal case: Connector is None.""" request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], None) self.assertEqual(9, request.call_count) @mock.patch.object(requests.Session, "request") def test_initialize_connection_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual('iscsi', ret['driver_volume_type']) self.assertEqual( '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort'], }, ret['data']['target_portal']) self.assertEqual(CONFIG_MAP['target_iscsi_name'], ret['data']['target_iqn']) self.assertEqual('CHAP', ret['data']['auth_method']) self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) self.assertEqual( CONFIG_MAP['auth_password'], ret['data']['auth_password']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") def test_terminate_connection_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual(6, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing(self, get_volume_type_qos_specs, request): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing_name(self, get_volume_type_qos_specs, request): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual('1', ret['provider_location']) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size_name(self, request): request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_unmanage(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' image_id = 'fake_image_id' request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ as mock_copy_image: self.driver.copy_image_to_volume( self.ctxt, TEST_VOLUME[0], image_service, image_id) mock_copy_image.assert_called_with( self.ctxt, TEST_VOLUME[0], image_service, image_id, disable_sparse=False) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) ret = self.driver.update_migrated_volume( self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) actual = ({'_name_id': TEST_VOLUME[1]['id'], 'provider_location': TEST_VOLUME[1]['provider_location']}) self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" self.assertRaises( NotImplementedError, self.driver.unmanage_snapshot, TEST_SNAPSHOT[0]) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_retype(self, get_volume_type_qos_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] host = { 'capabilities': { 'location_info': { 'pool_id': 30, }, }, } extra_specs = {'hbsd:capacity_saving': 'deduplication_compression'} new_type = fake_volume.fake_volume_type_obj( CTXT, id='00000000-0000-0000-0000-{0:012d}'.format(0), extra_specs=extra_specs) old_specs = {'hbsd:capacity_saving': 'disable'} new_specs = {'hbsd:capacity_saving': 'deduplication_compression'} old_type_ref = volume_types.create(self.ctxt, 'old', old_specs) new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'], new_type_ref['id'])[0] ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(4, request.call_count) self.assertTrue(ret) @mock.patch.object(requests.Session, "request") def test_migrate_volume(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) host = { 'capabilities': { 'location_info': { 'storage_id': CONFIG_MAP['serial'], 'pool_id': 30, }, }, } ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host) self.assertEqual(2, request.call_count) actual = (True, None) self.assertTupleEqual(actual, ret) def test_backup_use_temp_snapshot(self): self.assertTrue(self.driver.backup_use_temp_snapshot()) @mock.patch.object(requests.Session, "request") def test_revert_to_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.revert_to_snapshot( self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual(5, request.call_count) def test_create_group(self): ret = self.driver.create_group(self.ctxt, TEST_GROUP[0]) self.assertIsNone(ret) @mock.patch.object(requests.Session, "request") def test_delete_group(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]]) self.assertEqual(4, request.call_count) actual = ( {'status': TEST_GROUP[0]['status']}, [{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_snapshot_qos( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): input_qos_specs = { 'qos_specs': { 'consumer': 'back-end', 'specs': {'upperIops': '1000'}}} get_volume_type_qos_specs.return_value = input_qos_specs get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[1]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(1, get_volume_type_qos_specs.call_count) self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) def test_create_group_from_src_volume_error(self): self.assertRaises( exception.VolumeDriverException, self.driver.create_group_from_src, self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]] ) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = False ret = self.driver.update_group( self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]]) self.assertTupleEqual((None, None, None), ret) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group_error(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = True self.assertRaises( exception.VolumeDriverException, self.driver.update_group, self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]], remove_volumes=[TEST_VOLUME[0]] ) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_non_cg( self, get_volume_type_qos_specs, is_group_a_cg_snapshot_type, get_volume_type_extra_specs, volume_get, request): is_group_a_cg_snapshot_type.return_value = False get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_cg( self, get_volume_type_qos_specs, is_group_a_cg_snapshot_type, get_volume_type_extra_specs, volume_get, request): is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(1, get_volume_type_extra_specs.call_count) self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]) self.assertEqual(14, request.call_count) actual = ( {'status': TEST_GROUP_SNAP[0]['status']}, [{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(hbsd_iscsi.HBSDISCSIDriver, "_get_oslo_driver_opts") def test_get_driver_options(self, _get_oslo_driver_opts): _get_oslo_driver_opts.return_value = [] ret = self.driver.get_driver_options() actual = (hbsd_common.COMMON_VOLUME_OPTS + hbsd_common.COMMON_PAIR_OPTS + hbsd_common.COMMON_NAME_OPTS + hbsd_rest.REST_VOLUME_OPTS + hbsd_rest.REST_PAIR_OPTS + hbsd_replication._REP_OPTS + hbsd_replication.COMMON_MIRROR_OPTS + hbsd_replication.ISCSI_MIRROR_OPTS + hbsd_replication.REST_MIRROR_OPTS + hbsd_replication.REST_MIRROR_API_OPTS + hbsd_replication.REST_MIRROR_SSL_OPTS) self.assertEqual(actual, ret) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.27512 cinder-27.0.0/cinder/tests/unit/volume/drivers/hpe/0000775000175000017500000000000000000000000022215 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hpe/__init__.py0000664000175000017500000000000000000000000024314 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hpe/fake_hpe_3par_client.py0000664000175000017500000000175300000000000026622 0ustar00zuulzuul00000000000000# (c) Copyright 2014-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Fake HPE client for testing 3PAR without installing the client.""" import sys from unittest import mock from cinder.tests.unit.volume.drivers.hpe \ import fake_hpe_client_exceptions as hpeexceptions hpe3par = mock.Mock() hpe3par.version = "4.2.10" hpe3par.exceptions = hpeexceptions sys.modules['hpe3parclient'] = hpe3par ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hpe/fake_hpe_client_exceptions.py0000664000175000017500000000631000000000000030130 0ustar00zuulzuul00000000000000# (c) Copyright 2014-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Fake HPE client exceptions to use when mocking HPE clients.""" class UnsupportedVersion(Exception): """Unsupported version of the client.""" pass class ClientException(Exception): """The base exception class for these fake exceptions.""" _error_code = None _error_desc = None _error_ref = None _debug1 = None _debug2 = None def __init__(self, error=None): if error: if 'code' in error: self._error_code = error['code'] if 'desc' in error: self._error_desc = error['desc'] if 'ref' in error: self._error_ref = error['ref'] if 'debug1' in error: self._debug1 = error['debug1'] if 'debug2' in error: self._debug2 = error['debug2'] def get_code(self): return self._error_code def get_description(self): return self._error_desc def get_ref(self): return self._error_ref def __str__(self): formatted_string = self.message if self.http_status: formatted_string += " (HTTP %s)" % self.http_status if self._error_code: formatted_string += " %s" % self._error_code if self._error_desc: formatted_string += " - %s" % self._error_desc if self._error_ref: formatted_string += " - %s" % self._error_ref if self._debug1: formatted_string += " (1: '%s')" % self._debug1 if self._debug2: formatted_string += " (2: '%s')" % self._debug2 return formatted_string class HTTPConflict(ClientException): http_status = 409 message = "Conflict" def __init__(self, error=None): if error: super(HTTPConflict, self).__init__(error) if 'message' in error: self._error_desc = error['message'] def get_description(self): return self._error_desc class HTTPNotFound(ClientException): http_status = 404 message = "Not found" class HTTPForbidden(ClientException): http_status = 403 message = "Forbidden" class HTTPBadRequest(ClientException): http_status = 400 message = "Bad request" class HTTPUnauthorized(ClientException): http_status = 401 message = "Unauthorized" class HTTPServerError(ClientException): http_status = 500 message = "Error" def __init__(self, error=None): if error and 'message' in error: self._error_desc = error['message'] def get_description(self): return self._error_desc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py0000664000175000017500000170531600000000000025205 0ustar00zuulzuul00000000000000# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for OpenStack Cinder volume drivers.""" import ast import copy from unittest import mock import ddt from oslo_config import cfg from oslo_utils import units from oslo_utils import uuidutils from cinder import context from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.hpe \ import fake_hpe_3par_client as hpe3parclient from cinder.volume.drivers.hpe import hpe_3par_base as hpedriverbase from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon from cinder.volume.drivers.hpe import hpe_3par_fc as hpefcdriver from cinder.volume.drivers.hpe import hpe_3par_iscsi as hpedriver from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils hpeexceptions = hpe3parclient.hpeexceptions CONF = cfg.CONF HPE3PAR_CPG = 'OpenStackCPG' HPE3PAR_CPG2 = 'fakepool' HPE3PAR_CPG_QOS = 'qospool' HPE3PAR_CPG_SNAP = 'OpenStackCPGSnap' HPE3PAR_USER_NAME = 'testUser' HPE3PAR_USER_PASS = 'testPassword' HPE3PAR_SAN_IP = '2.2.2.2' HPE3PAR_SAN_SSH_PORT = 999 HPE3PAR_SAN_SSH_CON_TIMEOUT = 44 HPE3PAR_SAN_SSH_PRIVATE = 'foobar' GOODNESS_FUNCTION = \ "stats.capacity_utilization < 0.6? 100:25" FILTER_FUNCTION = \ "stats.total_volumes < 400 && stats.capacity_utilization < 0.8" CHAP_USER_KEY = "HPQ-cinder-CHAP-name" CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret" FLASH_CACHE_ENABLED = 1 FLASH_CACHE_DISABLED = 2 # Input/output (total read/write) operations per second. THROUGHPUT = 'throughput' # Data processed (total read/write) per unit time: kilobytes per second. BANDWIDTH = 'bandwidth' # Response time (total read/write): microseconds. LATENCY = 'latency' # IO size (total read/write): kilobytes. IO_SIZE = 'io_size' # Queue length for processing IO requests QUEUE_LENGTH = 'queue_length' # Average busy percentage AVG_BUSY_PERC = 'avg_busy_perc' # replication constants HPE3PAR_CPG_REMOTE = 'DestOpenStackCPG' HPE3PAR_CPG2_REMOTE = 'destfakepool' HPE3PAR_CPG_MAP = 'OpenStackCPG:DestOpenStackCPG fakepool:destfakepool' SYNC_MODE = 1 PERIODIC_MODE = 2 SYNC_PERIOD = 900 # EXISTENT_PATH error code returned from hpe3parclient EXISTENT_PATH = 73 class Comment(object): def __init__(self, expected): self.expected = expected def __eq__(self, actual): return (dict(ast.literal_eval(actual)) == self.expected) def __ne__(self, other): return not self.__eq__(other) class HPE3PARBaseDriver(test.TestCase): VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7' SRC_CG_VOLUME_ID = 'bd21d11b-c765-4c68-896c-6b07f63cfcb6' CLONE_ID = 'd03338a9-9115-48a3-8dfc-000000000000' VOLUME_TYPE_ID_REPLICATED = 'be9181f1-4040-46f2-8298-e7532f2bf9db' VOLUME_TYPE_ID_DEDUP = 'd03338a9-9115-48a3-8dfc-11111111111' VOLUME_TYPE_ID_TIRAMISU = 'd03338a9-9115-48a3-8dfc-44444444444' VOL_TYPE_ID_DEDUP_COMPRESS = 'd03338a9-9115-48a3-8dfc-33333333333' VOLUME_TYPE_ID_FLASH_CACHE = 'd03338a9-9115-48a3-8dfc-22222222222' VOLUME_NAME = 'volume-' + VOLUME_ID SRC_CG_VOLUME_NAME = 'volume-' + SRC_CG_VOLUME_ID VOLUME_NAME_3PAR = 'osv-0DM4qZEVSKON-DXN-NwVpw' SNAPSHOT_ID = '2f823bdc-e36e-4dc8-bd15-de1c7a28ff31' SNAPSHOT_NAME = 'snapshot-2f823bdc-e36e-4dc8-bd15-de1c7a28ff31' VOLUME_3PAR_NAME = 'osv-0DM4qZEVSKON-DXN-NwVpw' VOLUME_NAME_ID_3PAR_NAME = 'osv-L4I73ONuTci9Fd4ceij-MQ' SNAPSHOT_3PAR_NAME = 'oss-L4I73ONuTci9Fd4ceij-MQ' RCG_3PAR_NAME = 'rcg-0DM4qZEVSKON-DXN-N' RCG_3PAR_GROUP_NAME = 'rcg-YET.38iJR1KQDyA50k' GROUP_ID = '6044fedf-c889-4752-900f-2039d247a5df' CONSIS_GROUP_NAME = 'vvs-YET.38iJR1KQDyA50kel3w' SRC_CONSIS_GROUP_ID = '7d7dfa02-ac6e-48cb-96af-8a0cd3008d47' SRC_CONSIS_GROUP_NAME = 'vvs-fX36AqxuSMuWr4oM0wCNRw' CGSNAPSHOT_ID = 'e91c5ed5-daee-4e84-8724-1c9e31e7a1f2' CGSNAPSHOT_BASE_NAME = 'oss-6Rxe1druToSHJByeMeeh8g' CLIENT_ID = "12345" REPLICATION_CLIENT_ID = "54321" REPLICATION_BACKEND_ID = 'target' # fake host on the 3par FAKE_HOST = 'fakehost' FAKE_CINDER_HOST = 'fakehost@foo#' + HPE3PAR_CPG USER_ID = '2689d9a913974c008b1d859013f23607' PROJECT_ID = 'fac88235b9d64685a3530f73e490348f' VOLUME_ID_SNAP = '761fc5e5-5191-4ec7-aeba-33e36de44156' FAKE_DESC = 'test description name' FAKE_FC_PORTS = [{'portPos': {'node': 7, 'slot': 1, 'cardPort': 1}, 'type': 1, 'portWWN': '0987654321234', 'protocol': 1, 'mode': 2, 'linkState': 4}, {'portPos': {'node': 6, 'slot': 1, 'cardPort': 1}, 'type': 1, 'portWWN': '123456789000987', 'protocol': 1, 'mode': 2, 'linkState': 4}] QOS = {'qos:maxIOPS': '1000', 'qos:maxBWS': '50', 'qos:minIOPS': '100', 'qos:minBWS': '25', 'qos:latency': '25', 'qos:priority': 'low'} QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50', 'minIOPS': '100', 'minBWS': '25', 'latency': '25', 'priority': 'low'} VVS_NAME = "myvvs" FAKE_ISCSI_PORT = {'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}, 'protocol': 2, 'mode': 2, 'IPAddr': '1.1.1.2', 'iSCSIName': ('iqn.2000-05.com.3pardata:' '21810002ac00383d'), 'linkState': 4} FAKE_ISCSI_PORT_V6 = {'portPos': {'node': 8, 'slot': 1, 'cardPort': 2}, 'protocol': 2, 'mode': 2, 'IPAddr': '2001:db8:abcd:12:ffff:ffff:ffff:ff02', 'iSCSIName': ('iqn.2000-05.com.3pardata:' '21810002ac00383e'), 'linkState': 4} volume_snapshot = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': None} volume = fake_volume.fake_volume_obj( context.get_admin_context(), name=VOLUME_NAME, id=VOLUME_ID, display_name='Foo Volume', size=2, host=FAKE_CINDER_HOST, volume_type=None, volume_type_id=None, multiattach=False) volume_name_id = fake_volume.fake_volume_obj( context.get_admin_context(), id=VOLUME_ID, _name_id='2f823bdc-e36e-4dc8-bd15-de1c7a28ff31', size=2, host=FAKE_CINDER_HOST, volume_type=None, volume_type_id=None) volume_src_cg = {'name': SRC_CG_VOLUME_NAME, 'id': SRC_CG_VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': None} volume_replicated = fake_volume.fake_volume_obj( context.get_admin_context(), name=VOLUME_NAME, id=VOLUME_ID, display_name='Foo Volume', replication_status='disabled', provider_location = CLIENT_ID, size=2, host=FAKE_CINDER_HOST, volume_type = 'replicated', volume_type_id = VOLUME_TYPE_ID_REPLICATED, migration_status = None) volume_tiramisu = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'replication_status': 'disabled', 'provider_location': CLIENT_ID, 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': 'group_replication', 'volume_type_id': VOLUME_TYPE_ID_TIRAMISU} replication_targets = [{'backend_id': REPLICATION_BACKEND_ID, 'cpg_map': HPE3PAR_CPG_MAP, 'hpe3par_api_url': 'https://1.1.1.1/api/v1', 'hpe3par_username': HPE3PAR_USER_NAME, 'hpe3par_password': HPE3PAR_USER_PASS, 'san_ip': HPE3PAR_SAN_IP, 'san_login': HPE3PAR_USER_NAME, 'san_password': HPE3PAR_USER_PASS, 'san_ssh_port': HPE3PAR_SAN_SSH_PORT, 'ssh_conn_timeout': HPE3PAR_SAN_SSH_CON_TIMEOUT, 'san_private_key': HPE3PAR_SAN_SSH_PRIVATE}] list_rep_targets = [{'backend_id': 'target'}] volume_encrypted = fake_volume.fake_volume_obj( context.get_admin_context(), name=VOLUME_NAME, id=VOLUME_ID, display_name='Foo Volume', size=2, host=FAKE_CINDER_HOST, volume_type=None, volume_type_id=None, encryption_key_id=uuidutils.generate_uuid()) volume_dedup_compression = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 16, 'host': FAKE_CINDER_HOST, 'volume_type': 'dedup_compression', 'volume_type_id': VOL_TYPE_ID_DEDUP_COMPRESS} volume_dedup = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': 'dedup', 'volume_type_id': VOLUME_TYPE_ID_DEDUP} volume_pool = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': volume_utils.append_host(FAKE_HOST, HPE3PAR_CPG2), 'volume_type': None, 'volume_type_id': None} volume_qos = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': 'gold'} volume_flash_cache = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': VOLUME_TYPE_ID_FLASH_CACHE} volume_hos = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': 'hos'} snapshot_volume = {'name': VOLUME_NAME, 'id': VOLUME_ID_SNAP, 'display_name': 'Foo Volume', 'size': 2, 'host': FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': None} snapshot = {'name': SNAPSHOT_NAME, 'id': SNAPSHOT_ID, 'user_id': USER_ID, 'project_id': PROJECT_ID, 'volume_id': VOLUME_ID_SNAP, 'volume_name': VOLUME_NAME, 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'volume_size': 2, 'display_name': 'fakesnap', 'display_description': FAKE_DESC, 'volume': snapshot_volume} snapshot_name_id = {'id': SNAPSHOT_ID, 'volume_id': volume_name_id.id, 'volume_size': 2, 'volume': volume_name_id, 'display_name': 'display-name', 'display_description': 'description', 'volume_name': 'name'} snapshot_obj = fake_snapshot.fake_snapshot_obj( context.get_admin_context(), name=SNAPSHOT_NAME, id=SNAPSHOT_ID, display_name='Foo Snapshot', volume_size=2, volume_id=VOLUME_ID_SNAP) wwn = ["123456789012345", "123456789054321"] connector = {'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'wwpns': [wwn[0], wwn[1]], 'wwnns': ["223456789012345", "223456789054321"], 'host': FAKE_HOST, 'multipath': False} connector_multipath_enabled = {'ip': '10.0.0.2', 'initiator': ('iqn.1993-08.org' '.debian:01:222'), 'wwpns': [wwn[0], wwn[1]], 'wwnns': ["223456789012345", "223456789054321"], 'host': FAKE_HOST, 'multipath': True} volume_type = {'name': 'gold', 'deleted': False, 'updated_at': None, 'extra_specs': {'cpg': HPE3PAR_CPG2, 'qos:maxIOPS': '1000', 'qos:maxBWS': '50', 'qos:minIOPS': '100', 'qos:minBWS': '25', 'qos:latency': '25', 'qos:priority': 'low'}, 'deleted_at': None, 'id': 'gold'} volume_type_replicated = {'name': 'replicated', 'deleted': False, 'updated_at': None, 'extra_specs': {'replication_enabled': ' True'}, 'deleted_at': None, 'id': VOLUME_TYPE_ID_REPLICATED} volume_type_dedup_compression = {'name': 'dedup', 'deleted': False, 'updated_at': None, 'extra_specs': {'cpg': HPE3PAR_CPG2, 'provisioning': 'dedup', 'compression': 'true'}, 'deleted_at': None, 'id': VOL_TYPE_ID_DEDUP_COMPRESS} volume_type_dedup = {'name': 'dedup', 'deleted': False, 'updated_at': None, 'extra_specs': {'cpg': HPE3PAR_CPG2, 'provisioning': 'dedup'}, 'deleted_at': None, 'id': VOLUME_TYPE_ID_DEDUP} volume_type_tiramisu = {'name': 'dedup', 'deleted': False, 'updated_at': None, 'extra_specs': {'cpg': HPE3PAR_CPG2, 'hpe3par:group_replication': ' True', 'replication_enabled': ' True', 'replication:mode': 'sync'}, 'deleted_at': None, 'id': VOLUME_TYPE_ID_TIRAMISU} volume_type_flash_cache = {'name': 'flash-cache-on', 'deleted': False, 'updated_at': None, 'extra_specs': {'cpg': HPE3PAR_CPG2, 'hpe3par:flash_cache': 'true'}, 'deleted_at': None, 'id': VOLUME_TYPE_ID_FLASH_CACHE} volume_type_hos = {'name': 'hos', 'deleted': False, 'updated_at': None, 'extra_specs': {'convert_to_base': False}, 'deleted_at': None, 'id': 'hos'} flash_cache_3par_keys = {'flash_cache': 'true'} cpgs = [ {'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]}, 'incrementMiB': 8192}, 'SAUsage': {'rawTotalMiB': 24576, 'rawUsedMiB': 768, 'totalMiB': 8192, 'usedMiB': 256}, 'SDGrowth': {'LDLayout': {'RAIDType': 4, 'diskPatterns': [{'diskType': 2}]}, 'incrementMiB': 32768}, 'SDUsage': {'rawTotalMiB': 49152, 'rawUsedMiB': 1023, 'totalMiB': 36864, 'usedMiB': 1024 * 1}, 'UsrUsage': {'rawTotalMiB': 57344, 'rawUsedMiB': 43349, 'totalMiB': 43008, 'usedMiB': 1024 * 20}, 'additionalStates': [], 'degradedStates': [], 'failedStates': [], 'id': 5, 'name': HPE3PAR_CPG, 'numFPVVs': 2, 'numTPVVs': 0, 'numTDVVs': 1, 'state': 1, 'uuid': '29c214aa-62b9-41c8-b198-543f6cf24edf'}] TASK_DONE = 1 TASK_ACTIVE = 2 STATUS_DONE = {'status': 1} STATUS_ACTIVE = {'status': 2} mock_client_conf = { 'PORT_MODE_TARGET': 2, 'PORT_STATE_READY': 4, 'PORT_PROTO_ISCSI': 2, 'PORT_PROTO_FC': 1, 'PORT_TYPE_HOST': 1, 'TASK_DONE': TASK_DONE, 'TASK_ACTIVE': TASK_ACTIVE, 'HOST_EDIT_ADD': 1, 'HOST_EDIT_REMOVE': 2, 'CHAP_INITIATOR': 1, 'CHAP_TARGET': 2, 'getPorts.return_value': { 'members': FAKE_FC_PORTS + [FAKE_ISCSI_PORT] + [FAKE_ISCSI_PORT_V6] } } RETYPE_VVS_NAME = "yourvvs" RETYPE_HOST = { u'host': u'mark-stack1@3parfc', u'capabilities': { 'QoS_support': True, u'location_info': u'HPE3PARDriver:1234567:MARK_TEST_CPG', u'timestamp': u'2014-06-04T19:03:32.485540', u'allocated_capacity_gb': 0, u'volume_backend_name': u'3parfc', u'free_capacity_gb': u'infinite', u'driver_version': u'3.0.0', u'total_capacity_gb': u'infinite', u'reserved_percentage': 0, u'vendor_name': u'Hewlett Packard Enterprise', u'storage_protocol': u'FC' } } RETYPE_HOST_NOT3PAR = { u'host': u'mark-stack1@3parfc', u'capabilities': { u'location_info': u'XXXDriverXXX:1610771:MARK_TEST_CPG', } } RETYPE_QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50', 'minIOPS': '100', 'minBWS': '25', 'latency': '25', 'priority': 'high'} RETYPE_VOLUME_TYPE_ID = "FakeVolId" RETYPE_VOLUME_TYPE_0 = { 'name': 'red', 'id': RETYPE_VOLUME_TYPE_ID, 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs': RETYPE_VVS_NAME, 'qos': RETYPE_QOS_SPECS, 'tpvv': True, 'tdvv': False, 'volume_type': volume_type } } RETYPE_VOLUME_TYPE_1 = { 'name': 'white', 'id': RETYPE_VOLUME_TYPE_ID, 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs': VVS_NAME, 'qos': QOS, 'tpvv': True, 'tdvv': False, 'volume_type': volume_type } } RETYPE_VOLUME_TYPE_2 = { 'name': 'blue', 'id': RETYPE_VOLUME_TYPE_ID, 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs': RETYPE_VVS_NAME, 'qos': RETYPE_QOS_SPECS, 'tpvv': True, 'tdvv': False, 'volume_type': volume_type } } RETYPE_VOLUME_TYPE_3 = { 'name': 'purple', 'id': RETYPE_VOLUME_TYPE_ID, 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs': RETYPE_VVS_NAME, 'qos': RETYPE_QOS_SPECS, 'tpvv': False, 'tdvv': True, 'volume_type': volume_type } } RETYPE_VOLUME_TYPE_BAD_PERSONA = { 'name': 'bad_persona', 'id': 'any_id', 'extra_specs': { 'hpe3par:persona': '99 - invalid' } } RETYPE_VOLUME_TYPE_BAD_CPG = { 'name': 'bad_cpg', 'id': 'any_id', 'extra_specs': { 'cpg': 'bogus', 'snap_cpg': 'bogus', 'hpe3par:persona': '2 - Generic-ALUA' } } MANAGE_VOLUME_INFO = { 'userCPG': 'testUserCpg0', 'snapCPG': 'testSnapCpg0', 'provisioningType': 1, 'comment': "{'display_name': 'Foo Volume'}" } MV_INFO_WITH_NO_SNAPCPG = { 'userCPG': 'testUserCpg0', 'provisioningType': 1, 'comment': "{'display_name': 'Foo Volume'}" } RETYPE_TEST_COMMENT = "{'retype_test': 'test comment'}" RETYPE_VOLUME_INFO_0 = { 'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Retype Vol0', 'size': 1, 'host': RETYPE_HOST, 'userCPG': 'testUserCpg0', 'snapCPG': 'testSnapCpg0', 'provisioningType': 1, 'comment': RETYPE_TEST_COMMENT } RETYPE_TEST_COMMENT_1 = "{'retype_test': 'test comment 1'}" RETYPE_VOLUME_INFO_1 = { 'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Retype Vol1', 'size': 1, 'host': RETYPE_HOST, 'userCPG': HPE3PAR_CPG, 'snapCPG': HPE3PAR_CPG_SNAP, 'provisioningType': 1, 'comment': RETYPE_TEST_COMMENT } RETYPE_TEST_COMMENT_2 = "{'retype_test': 'test comment 2'}" RETYPE_VOLUME_INFO_2 = { 'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Retype Vol2', 'size': 1, 'host': RETYPE_HOST, 'userCPG': HPE3PAR_CPG, 'snapCPG': HPE3PAR_CPG_SNAP, 'provisioningType': 3, 'comment': RETYPE_TEST_COMMENT } # Test for when we don't get a snapCPG. RETYPE_VOLUME_INFO_NO_SNAP = { 'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Retype Vol2', 'size': 1, 'host': RETYPE_HOST, 'userCPG': 'testUserCpg2', 'provisioningType': 1, 'comment': '{}' } RETYPE_CONF = { 'TASK_ACTIVE': TASK_ACTIVE, 'TASK_DONE': TASK_DONE, 'getTask.return_value': STATUS_DONE, 'getStorageSystemInfo.return_value': {'id': CLIENT_ID, 'serialNumber': '1234567'}, 'getVolume.return_value': RETYPE_VOLUME_INFO_0, 'modifyVolume.return_value': ("anyResponse", {'taskid': 1}) } # 3PAR retype currently doesn't use the diff. Existing code and fresh info # from the array work better for the most part. Some use of the diff was # intentionally removed to make _retype more usable for other use cases. RETYPE_DIFF = None wsapi_version_312 = {'major': 1, 'build': 30102422, 'minor': 3, 'revision': 1} wsapi_version_for_compression = {'major': 1, 'build': 30301215, 'minor': 6, 'revision': 0} wsapi_version_for_dedup = {'major': 1, 'build': 30201120, 'minor': 4, 'revision': 1} wsapi_version_for_flash_cache = {'major': 1, 'build': 30201200, 'minor': 4, 'revision': 2} wsapi_version_for_remote_copy = {'major': 1, 'build': 30202290, 'minor': 5, 'revision': 0} wsapi_version_2023 = {'major': 1, 'build': 100000050, 'minor': 10, 'revision': 0} wsapi_version_2025 = {'major': 1, 'build': 100500031, 'minor': 15, 'revision': 0} wsapi_version_clone = {'major': 1, 'build': 40600052, 'minor': 10, 'revision': 0} # Use this to point to latest version of wsapi wsapi_version_latest = wsapi_version_for_compression standard_login = [ mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS)] get_id_login = [ mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), mock.call.getWsApiVersion(), mock.call.logout(), mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), mock.call.getStorageSystemInfo()] standard_logout = [ mock.call.logout()] @staticmethod def fake_volume_object(vol_id='d03338a9-9115-48a3-8dfc-35cdfcdc15a7', **kwargs): values = dict(id=vol_id, name='volume-%s' % vol_id, display_name='Foo Volume', size=2, host='fakehost@foo#OpenStackCPG', volume_type=None, volume_type_id=None) values.update(kwargs) return fake_volume.fake_volume_obj(context.get_admin_context(), **values) class fake_group_object(object): def __init__(self, grp_id='6044fedf-c889-4752-900f-2039d247a5df'): self.id = grp_id self.volume_type_ids = ['d03338a9-9115-48a3-8dfc-33333333333'] self.volume_types = ['d03338a9-9115-48a3-8dfc-33333333333'] self.name = 'cg_name' self.group_snapshot_id = None self.host = 'fakehost@foo#OpenStackCPG' self.is_replicated = False self.description = 'consistency group' class fake_group_snapshot_object(object): def __init__(self, cgsnap_id='e91c5ed5-daee-4e84-8724-1c9e31e7a1f2'): self.id = cgsnap_id self.group_id = '6044fedf-c889-4752-900f-2039d247a5df' self.description = 'group_snapshot' self.readOnly = False def setup_configuration(self): configuration = mock.MagicMock() configuration.hpe3par_debug = False configuration.hpe3par_username = HPE3PAR_USER_NAME configuration.hpe3par_password = HPE3PAR_USER_PASS configuration.hpe3par_api_url = 'https://1.1.1.1/api/v1' configuration.hpe3par_cpg = [HPE3PAR_CPG, HPE3PAR_CPG2] configuration.hpe3par_cpg_snap = HPE3PAR_CPG_SNAP configuration.target_ip_address = '1.1.1.2' configuration.target_port = '1234' configuration.san_ip = HPE3PAR_SAN_IP configuration.san_login = HPE3PAR_USER_NAME configuration.san_password = HPE3PAR_USER_PASS configuration.san_ssh_port = HPE3PAR_SAN_SSH_PORT configuration.ssh_conn_timeout = HPE3PAR_SAN_SSH_CON_TIMEOUT configuration.san_private_key = HPE3PAR_SAN_SSH_PRIVATE configuration.hpe3par_snapshot_expiration = "" configuration.hpe3par_snapshot_retention = "" configuration.hpe3par_iscsi_ips = [] configuration.hpe3par_iscsi_chap_enabled = False configuration.goodness_function = GOODNESS_FUNCTION configuration.filter_function = FILTER_FUNCTION configuration.image_volume_cache_enabled = False configuration.replication_device = None configuration.hpe3par_target_nsp = None configuration.unique_fqdn_network = True return configuration @mock.patch('hpe3parclient.client.HPE3ParClient') def setup_mock_client(self, _m_client, driver, conf=None, m_conf=None, is_primera=False, wsapi_version=wsapi_version_latest): _m_client = _m_client.return_value # Configure the base constants, defaults etc... _m_client.configure_mock(**self.mock_client_conf) _m_client.getWsApiVersion.return_value = wsapi_version _m_client.is_primera_array.return_value = is_primera # If m_conf, drop those over the top of the base_conf. if m_conf is not None: _m_client.configure_mock(**m_conf) if conf is None: conf = self.setup_configuration() self.driver = driver(configuration=conf) self.driver.do_setup(None) return _m_client @mock.patch.object(volume_types, 'get_volume_type') def migrate_volume_attached(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'volume_type_id': None, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 2, 'status': 'available', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} mock_client.tuneVolume.return_value = ({'taskid': 1}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume_name_3par = common._encode_name(volume['id']) osv_matcher = 'osv-' + volume_name_3par loc_info = 'HPE3PARDriver:1234567:CPG-FC1' protocol = "FC" if self.properties['driver_volume_type'] == "iscsi": protocol = "iSCSI" host = {'host': 'stack@3parfc1', 'capabilities': {'location_info': loc_info, 'storage_protocol': protocol}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) new_comment = Comment({ "qos": {}, "retype_test": "test comment", }) expected = [ mock.call.modifyVolume(osv_matcher, {'comment': new_comment, 'snapCPG': 'OpenStackCPGSnap'}), mock.call.tuneVolume(osv_matcher, 1, {'action': 6, 'userCPG': 'OpenStackCPG', 'conversionOperation': 1, 'compression': False}), mock.call.getTask(1) ] mock_client.assert_has_calls(expected) self.assertIsNotNone(result) self.assertEqual((True, {'host': 'stack@3parfc1#OpenStackCPG'}), result) @ddt.ddt class TestHPE3PARDriverBase(HPE3PARBaseDriver): def setup_driver(self, config=None, mock_conf=None, wsapi_version=None): self.ctxt = context.get_admin_context() mock_client = self.setup_mock_client( conf=config, m_conf=mock_conf, driver=hpedriverbase.HPE3PARDriverBase) if wsapi_version: mock_client.getWsApiVersion.return_value = ( wsapi_version) else: mock_client.getWsApiVersion.return_value = ( self.wsapi_version_latest) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) mock_client.reset_mock() return mock_client @mock.patch('hpe3parclient.version', "3.0.9") def test_unsupported_client_version(self): self.assertRaises(exception.InvalidInput, self.setup_driver) def test_task_waiter(self): task_statuses = [self.STATUS_ACTIVE, self.STATUS_ACTIVE] def side_effect(*args): return task_statuses and task_statuses.pop(0) or self.STATUS_DONE conf = {'getTask.side_effect': side_effect} mock_client = self.setup_driver(mock_conf=conf) task_id = 1234 interval = .001 with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() waiter = common.TaskWaiter(mock_client, task_id, interval) status = waiter.wait_for_task() expected = [ mock.call.getTask(task_id), mock.call.getTask(task_id), mock.call.getTask(task_id) ] mock_client.assert_has_calls(expected) self.assertEqual(self.STATUS_DONE, status) # (i) wsapi version is old/default # (ii) wsapi version is 2023, then snapCPG isn't required @ddt.data({'wsapi_version': None}, {'wsapi_version': HPE3PARBaseDriver.wsapi_version_2023}) @ddt.unpack def test_create_volume(self, wsapi_version): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver(wsapi_version=wsapi_version) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client if not wsapi_version: # (i) old/default self.driver.create_volume(self.volume) else: # (ii) wsapi 2023 common = self.driver._login() common.create_volume(self.volume) comment = Comment({ "display_name": "Foo Volume", "type": "OpenStack", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}) optional = {'comment': comment, 'tpvv': True, 'tdvv': False} if not wsapi_version: optional['snapCPG'] = HPE3PAR_CPG_SNAP expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, optional)] mock_client.assert_has_calls(expected) def test_create_volume_in_generic_group(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() volume = {'name': self.VOLUME_NAME, 'id': self.VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'group_id': '87101633-13e0-41ee-813b-deabc372267b', 'host': self.FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': None} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.create_volume(volume) comment = Comment({ "display_name": "Foo Volume", "type": "OpenStack", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}) expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP})] mock_client.assert_has_calls(expected) def test_create_volume_in_pool(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client return_model = self.driver.create_volume(self.volume_pool) comment = Comment({ "display_name": "Foo Volume", "type": "OpenStack", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}) expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG2, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP})] mock_client.assert_has_calls(expected) self.assertIsNone(return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_unsupported_dedup_volume_type(self, _mock_volume_types): mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312) _mock_volume_types.return_value = { 'name': 'dedup', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'provisioning': 'dedup', 'volume_type': self.volume_type_dedup}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.assertRaises(exception.InvalidInput, common.get_volume_settings_from_type_id, self.VOLUME_TYPE_ID_DEDUP, "mock") @mock.patch.object(volume_types, 'get_volume_type') def test_get_snap_cpg_from_volume_type(self, _mock_volume_types): mock_client = self.setup_driver() expected_type_snap_cpg = "type_snap_cpg" _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': expected_type_snap_cpg, 'volume_type': self.volume_type}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() result = common.get_volume_settings_from_type_id( "mock", self.driver.configuration.hpe3par_cpg) self.assertEqual(expected_type_snap_cpg, result['snap_cpg']) @mock.patch.object(volume_types, 'get_volume_type') def test_get_snap_cpg_from_volume_type_cpg(self, _mock_volume_types): mock_client = self.setup_driver() expected_cpg = 'use_extra_specs_cpg' _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'cpg': expected_cpg, 'volume_type': self.volume_type}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() result = common.get_volume_settings_from_type_id( "mock", self.driver.configuration.hpe3par_cpg) self.assertEqual(self.driver.configuration.hpe3par_cpg_snap, result['snap_cpg']) @mock.patch.object(volume_types, 'get_volume_type') def test_get_snap_cpg_from_volume_type_conf_snap_cpg( self, _mock_volume_types): _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'volume_type': self.volume_type}} conf = self.setup_configuration() expected_snap_cpg = conf.hpe3par_cpg_snap mock_client = self.setup_driver(config=conf) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() result = common.get_volume_settings_from_type_id( "mock", self.driver.configuration.hpe3par_cpg) self.assertEqual(expected_snap_cpg, result['snap_cpg']) @mock.patch.object(volume_types, 'get_volume_type') def test_get_snap_cpg_from_volume_type_conf_cpg( self, _mock_volume_types): _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'volume_type': self.volume_type}} conf = self.setup_configuration() conf.hpe3par_cpg_snap = None expected_cpg = conf.hpe3par_cpg mock_client = self.setup_driver(config=conf) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() result = common.get_volume_settings_from_type_id( "mock", self.driver.configuration.hpe3par_cpg) self.assertEqual(expected_cpg, result['snap_cpg']) # (i) normal value; eg. 7, 10, etc # (ii) small value less than 1; eg. 0.1, 0.02, etc @ddt.data({'latency': 25}, {'latency': 0.2}) @ddt.unpack @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_qos(self, _mock_volume_types, latency): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() QOS = self.QOS.copy() QOS['qos:latency'] = latency _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': QOS, 'tpvv': True, 'tdvv': False, 'volume_type': self.volume_type}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client return_model = self.driver.create_volume(self.volume_qos) comment = Comment({ "volume_type_name": "gold", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "gold", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP})] mock_client.assert_has_calls(expected) self.assertIsNone(return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_replicated_periodic(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.side_effect = ( hpeexceptions.HTTPNotFound) mock_client.getCPG.return_value = {'domain': None} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client return_model = self.driver.create_volume(self.volume_replicated) comment = Comment({ "volume_type_name": "replicated", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) backend_id = self.replication_targets[0]['backend_id'] expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.createRemoteCopyGroup( self.RCG_3PAR_NAME, [{'userCPG': HPE3PAR_CPG_REMOTE, 'targetName': backend_id, 'mode': PERIODIC_MODE, 'snapCPG': HPE3PAR_CPG_REMOTE}], {'localUserCPG': HPE3PAR_CPG, 'localSnapCPG': HPE3PAR_CPG_SNAP}), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': backend_id}], optional={'volumeAutoCreation': True}), mock.call.modifyRemoteCopyGroup( self.RCG_3PAR_NAME, {'targets': [{'syncPeriod': SYNC_PERIOD, 'targetName': backend_id}]}), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls(expected) self.assertEqual(return_model['replication_status'], 'enabled') @mock.patch.object(volume_types, 'get_volume_type') def test_delete_volume_replicated_failedover(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.return_value = ( {'targets': [{'targetName': 'tgt'}]}) mock_client.getCPG.return_value = {'domain': None} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = copy.deepcopy(self.volume_replicated) volume['status'] = 'available' volume['replication_status'] = 'failed-over' self.driver.delete_volume(volume) rcg_name = self.RCG_3PAR_NAME + ".r" + self.CLIENT_ID expected = [ mock.call.getRemoteCopyGroup(rcg_name), mock.call.toggleRemoteCopyConfigMirror( 'tgt', mirror_config=False), mock.call.stopRemoteCopy(rcg_name), mock.call.removeVolumeFromRemoteCopyGroup( rcg_name, self.VOLUME_3PAR_NAME, removeFromTarget=True), mock.call.removeRemoteCopyGroup(rcg_name), mock.call.deleteVolume(self.VOLUME_3PAR_NAME), mock.call.toggleRemoteCopyConfigMirror( 'tgt', mirror_config=True)] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_replicated_periodic_2023(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(conf, None, self.wsapi_version_2023) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.side_effect = ( hpeexceptions.HTTPNotFound) mock_client.getCPG.return_value = {'domain': None} mock_replicated_client = self.setup_driver(conf, None, self.wsapi_version_2023) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client common = self.driver._login() return_model = common.create_volume(self.volume_replicated) comment = Comment({ "volume_type_name": "replicated", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) backend_id = self.replication_targets[0]['backend_id'] expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False}), mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.createRemoteCopyGroup( self.RCG_3PAR_NAME, [{'userCPG': HPE3PAR_CPG_REMOTE, 'targetName': backend_id, 'mode': PERIODIC_MODE}], {'localUserCPG': HPE3PAR_CPG}), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': backend_id}], optional={'volumeAutoCreation': True}), mock.call.modifyRemoteCopyGroup( self.RCG_3PAR_NAME, {'targets': [{'syncPeriod': SYNC_PERIOD, 'targetName': backend_id}]}), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected) self.assertEqual({'replication_status': 'enabled', 'provider_location': self.CLIENT_ID}, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_replicated_sync(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' self.replication_targets[0]['quorum_witness_ip'] = None conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.side_effect = ( hpeexceptions.HTTPNotFound) mock_client.getCPG.return_value = {'domain': None} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client return_model = self.driver.create_volume(self.volume_replicated) comment = Comment({ "volume_type_name": "replicated", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) backend_id = self.replication_targets[0]['backend_id'] expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.createRemoteCopyGroup( self.RCG_3PAR_NAME, [{'userCPG': HPE3PAR_CPG_REMOTE, 'targetName': backend_id, 'mode': SYNC_MODE, 'snapCPG': HPE3PAR_CPG_REMOTE}], {'localUserCPG': HPE3PAR_CPG, 'localSnapCPG': HPE3PAR_CPG_SNAP}), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': backend_id}], optional={'volumeAutoCreation': True}), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls(expected) self.assertEqual(return_model['replication_status'], 'enabled') @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_replicated_peer_persistence( self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' self.replication_targets[0]['quorum_witness_ip'] = '10.50.3.192' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.side_effect = ( hpeexceptions.HTTPNotFound) mock_client.getCPG.return_value = {'domain': None} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client return_model = self.driver.create_volume(self.volume_replicated) comment = Comment({ "volume_type_name": "replicated", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) backend_id = self.replication_targets[0]['backend_id'] expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.createRemoteCopyGroup( self.RCG_3PAR_NAME, [{'userCPG': HPE3PAR_CPG_REMOTE, 'targetName': backend_id, 'mode': SYNC_MODE, 'snapCPG': HPE3PAR_CPG_REMOTE}], {'localUserCPG': HPE3PAR_CPG, 'localSnapCPG': HPE3PAR_CPG_SNAP}), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': backend_id}], optional={'volumeAutoCreation': True}), mock.call.modifyRemoteCopyGroup( self.RCG_3PAR_NAME, {'targets': [ {'policies': {'autoFailover': True, 'pathManagement': True, 'autoRecover': True}}]}), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls(expected) self.assertEqual(return_model['replication_status'], 'enabled') # (i) wsapi version is old/default # (ii) wsapi version is 2025, then all licenses are enabled @ddt.data({'wsapi_version': None}, {'wsapi_version': HPE3PARBaseDriver.wsapi_version_2025}) @ddt.unpack @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_dedup_compression(self, _mock_volume_types, wsapi_version): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver(wsapi_version=wsapi_version) _mock_volume_types.return_value = { 'name': 'dedup_compression', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'hpe3par:provisioning': 'dedup', 'hpe3par:compression': 'True', 'volume_type': self.volume_type_dedup_compression}} if not wsapi_version: mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234', 'licenseInfo': { 'licenses': [{'name': 'Compression'}, {'name': 'Thin Provisioning (102400G)'}, {'name': 'System Reporter'}] } } else: mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234', 'licenseInfo': { # all licenses are enabled 'licenses': [{'name': 'FIPS Encryption'}, {'name': 'Owned'}, {'name': 'Software and Support SaaS'}] } } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client if not wsapi_version: # (i) old/default return_model = self.driver.create_volume( self.volume_dedup_compression) else: # (ii) wsapi version 2025 common = self.driver._login() return_model = common.create_volume( self.volume_dedup_compression) comment = Comment({ "volume_type_name": "dedup_compression", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "d03338a9-9115-48a3-8dfc-33333333333", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) optional = {'comment': comment, 'tpvv': False, 'tdvv': True, 'compression': True} if not wsapi_version: optional['snapCPG'] = HPE3PAR_CPG_SNAP expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.getStorageSystemInfo(), mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 16384, optional)] if wsapi_version == HPE3PARBaseDriver.wsapi_version_2025: extras = (self.get_id_login + self.standard_logout + self.standard_login) expected = extras + expected mock_client.assert_has_calls(expected) self.assertIsNone(return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_dedup(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() _mock_volume_types.return_value = { 'name': 'dedup', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'provisioning': 'dedup', 'volume_type': self.volume_type_dedup}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client return_model = self.driver.create_volume(self.volume_dedup) comment = Comment({ "volume_type_name": "dedup", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "d03338a9-9115-48a3-8dfc-11111111111", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': False, 'tdvv': True, 'snapCPG': HPE3PAR_CPG_SNAP})] mock_client.assert_has_calls(expected) self.assertIsNone(return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_flash_cache(self, _mock_volume_types): # Setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} _mock_volume_types.return_value = { 'name': 'flash-cache-on', 'extra_specs': { 'cpg': HPE3PAR_CPG2, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'hpe3par:flash_cache': 'true', 'volume_type': self.volume_type_flash_cache}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} mock_client.FLASH_CACHE_ENABLED = FLASH_CACHE_ENABLED mock_client.FLASH_CACHE_DISABLED = FLASH_CACHE_DISABLED return_model = self.driver.create_volume(self.volume_flash_cache) comment = Comment({ "volume_type_name": "flash-cache-on", "display_name": "Foo Volume", "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "volume_type_id": "d03338a9-9115-48a3-8dfc-22222222222", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': comment, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None), mock.call.createQoSRules( 'vvs-0DM4qZEVSKON-DXN-NwVpw', {'priority': 2} ), mock.call.modifyVolumeSet( 'vvs-0DM4qZEVSKON-DXN-NwVpw', flashCachePolicy=1), mock.call.addVolumeToVolumeSet( 'vvs-0DM4qZEVSKON-DXN-NwVpw', 'osv-0DM4qZEVSKON-DXN-NwVpw')] mock_client.assert_has_calls(expected) self.assertIsNone(return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_unsupported_flash_cache_volume(self, _mock_volume_types): mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312) _mock_volume_types.return_value = { 'name': 'flash-cache-on', 'extra_specs': { 'cpg': HPE3PAR_CPG2, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'hpe3par:flash_cache': 'true', 'volume_type': self.volume_type_flash_cache}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.assertRaises(exception.InvalidInput, common.get_flash_cache_policy, self.flash_cache_3par_keys) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_not_3par(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolumeSnapshots.return_value = [] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(exception.InvalidHost, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_1, self.RETYPE_DIFF, self.RETYPE_HOST_NOT3PAR) expected = [mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.getVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_volume_not_found(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolumeSnapshots.return_value = [] mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(hpeexceptions.HTTPNotFound, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_1, self.RETYPE_DIFF, self.RETYPE_HOST) expected = [mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.getVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_specs_error_reverts_snap_cpg(self, _mock_volume_types): _mock_volume_types.side_effect = [ self.RETYPE_VOLUME_TYPE_1, self.RETYPE_VOLUME_TYPE_0] mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_0 # Fail the QOS setting to test the revert of the snap CPG rename. mock_client.addVolumeToVolumeSet.side_effect = \ hpeexceptions.HTTPForbidden with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(hpeexceptions.HTTPForbidden, self.driver.retype, self.ctxt, {'id': self.VOLUME_ID}, self.RETYPE_VOLUME_TYPE_0, self.RETYPE_DIFF, self.RETYPE_HOST) old_settings = { 'snapCPG': self.RETYPE_VOLUME_INFO_0['snapCPG'], 'comment': self.RETYPE_VOLUME_INFO_0['comment']} new_settings = { 'snapCPG': ( self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg']), 'comment': mock.ANY} expected = [ mock.call.modifyVolume(self.VOLUME_3PAR_NAME, new_settings) ] mock_client.assert_has_calls(expected) expected = [ mock.call.modifyVolume(self.VOLUME_3PAR_NAME, old_settings) ] mock_client.assert_has_calls(expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_revert_comment(self, _mock_volume_types): _mock_volume_types.side_effect = [ self.RETYPE_VOLUME_TYPE_2, self.RETYPE_VOLUME_TYPE_1] mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_1 # Fail the QOS setting to test the revert of the snap CPG rename. mock_client.deleteVolumeSet.side_effect = hpeexceptions.HTTPForbidden with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(hpeexceptions.HTTPForbidden, self.driver.retype, self.ctxt, {'id': self.VOLUME_ID}, self.RETYPE_VOLUME_TYPE_2, self.RETYPE_DIFF, self.RETYPE_HOST) original = { 'snapCPG': self.RETYPE_VOLUME_INFO_1['snapCPG'], 'comment': self.RETYPE_VOLUME_INFO_1['comment']} expected = [ mock.call.modifyVolume('osv-0DM4qZEVSKON-DXN-NwVpw', original)] mock_client.assert_has_calls(expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_different_array(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolumeSnapshots.return_value = [] mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': 'XXXXXXX'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(exception.InvalidHost, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_1, self.RETYPE_DIFF, self.RETYPE_HOST) expected = [ mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getStorageSystemInfo()] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_across_cpg_domains(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolumeSnapshots.return_value = [] mock_client.getCPG.side_effect = [ {'domain': 'domain1'}, {'domain': 'domain2'}, ] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(hpecommon.Invalid3PARDomain, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_1, self.RETYPE_DIFF, self.RETYPE_HOST) expected = [ mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getStorageSystemInfo(), mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']), mock.call.getCPG( self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg']) ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_across_snap_cpg_domains(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolumeSnapshots.return_value = [] mock_client.getCPG.side_effect = [ {'domain': 'cpg_domain'}, {'domain': 'cpg_domain'}, {'domain': 'snap_cpg_domain_1'}, ] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(hpecommon.Invalid3PARDomain, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_1, self.RETYPE_DIFF, self.RETYPE_HOST) expected = [ mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getStorageSystemInfo(), mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']), mock.call.getCPG( self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg']), mock.call.getCPG( self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg']) ] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_to_bad_persona(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_BAD_PERSONA mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) mock_client.getVolumeSnapshots.return_value = [] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, self.RETYPE_VOLUME_INFO_0, self.RETYPE_VOLUME_TYPE_BAD_PERSONA, self.RETYPE_DIFF, self.RETYPE_HOST) expected = [mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.getVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_tune(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS) type_ref = volume_types.create(self.ctxt, "type1", {"qos:maxIOPS": "100", "qos:maxBWS": "50", "qos:minIOPS": "10", "qos:minBWS": "20", "qos:latency": "5", "qos:priority": "high"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) volume = {'id': HPE3PARBaseDriver.CLONE_ID} mock_client.tuneVolume.return_value = ({'taskid': 1}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client retyped = self.driver.retype( self.ctxt, volume, type_ref, None, self.RETYPE_HOST) self.assertTrue(retyped[0]) expected = [ mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA', {'comment': mock.ANY, 'snapCPG': 'OpenStackCPGSnap'}), mock.call.deleteVolumeSet('vvs-0DM4qZEVSKON-AAAAAAAAA'), mock.call.addVolumeToVolumeSet('myvvs', 'osv-0DM4qZEVSKON-AAAAAAAAA'), mock.call.tuneVolume('osv-0DM4qZEVSKON-AAAAAAAAA', 1, {'action': 6, 'userCPG': 'OpenStackCPG', 'conversionOperation': 1, 'compression': False}), mock.call.getTask(1), ] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_volume_without_comment(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) volume = {'id': HPE3PARBaseDriver.CLONE_ID} VOLUME_INFO_0 = self.RETYPE_VOLUME_INFO_0.copy() # volume without comment del (VOLUME_INFO_0['comment']) mock_client.getVolume.return_value = VOLUME_INFO_0 mock_client.tuneVolume.return_value = ({'taskid': 1}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client retyped = self.driver.retype( self.ctxt, volume, self.RETYPE_VOLUME_TYPE_1, None, self.RETYPE_HOST) self.assertTrue(retyped[0]) expected = [ mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA', {'comment': mock.ANY, 'snapCPG': 'OpenStackCPGSnap'}), mock.call.deleteVolumeSet('vvs-0DM4qZEVSKON-AAAAAAAAA'), mock.call.addVolumeToVolumeSet('myvvs', 'osv-0DM4qZEVSKON-AAAAAAAAA'), mock.call.tuneVolume('osv-0DM4qZEVSKON-AAAAAAAAA', 1, {'action': 6, 'userCPG': 'OpenStackCPG', 'conversionOperation': 1, 'compression': False}), mock.call.getTask(1), ] mock_client.assert_has_calls(expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_non_rep_type_to_rep_type(self, _mock_volume_types): conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.side_effect = ( hpeexceptions.HTTPNotFound) mock_client.getCPG.return_value = {'domain': None} mock_replicated_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = { 'id': self.REPLICATION_CLIENT_ID, 'serialNumber': '1234567' } mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} mock_client.getVolume.return_value = { 'name': mock.ANY, 'comment': "{'display_name': 'Foo Volume'}", 'provisioningType': mock.ANY, 'userCPG': 'OpenStackCPG', 'snapCPG': 'OpenStackCPGSnap'} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client retyped = self.driver.retype( self.ctxt, self.volume, self.volume_type_replicated, None, self.RETYPE_HOST) self.assertTrue(retyped[0]) backend_id = self.replication_targets[0]['backend_id'] expected = [ mock.call.createRemoteCopyGroup( self.RCG_3PAR_NAME, [{'userCPG': HPE3PAR_CPG_REMOTE, 'targetName': backend_id, 'mode': PERIODIC_MODE, 'snapCPG': HPE3PAR_CPG_REMOTE}], {'localUserCPG': HPE3PAR_CPG, 'localSnapCPG': HPE3PAR_CPG_SNAP}), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': backend_id}], optional={'volumeAutoCreation': True}), mock.call.modifyRemoteCopyGroup( self.RCG_3PAR_NAME, {'targets': [{'syncPeriod': SYNC_PERIOD, 'targetName': backend_id}]}), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls(expected + self.standard_logout) # volume's status and migration_status # (i) default scenario # (ii) deleting temporary volume @ddt.data({'status': 'available', 'migration_status': 'migrating'}, {'status': 'deleting', 'migration_status': 'deleting'}) @ddt.unpack @mock.patch.object(volume_types, 'get_volume_type') def test_retype_rep_type_to_non_rep_type(self, _mock_volume_types, status, migration_status): conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.side_effect = ( hpeexceptions.HTTPNotFound) mock_client.getCPG.return_value = {'domain': None} mock_replicated_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = { 'id': self.REPLICATION_CLIENT_ID, 'serialNumber': '1234567' } mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE volume_1 = {'name': self.VOLUME_NAME, 'id': self.VOLUME_ID, 'display_name': 'Foo Volume', 'replication_status': 'disabled', 'provider_location': self.CLIENT_ID, 'size': 2, 'host': self.FAKE_CINDER_HOST, 'volume_type': 'replicated', 'volume_type_id': 'gold', 'status': status, 'migration_status': migration_status} volume_type = {'name': 'replicated', 'deleted': False, 'updated_at': None, 'deleted_at': None, 'extra_specs': {'replication_enabled': 'False'}, 'id': 'silver'} def get_side_effect(*args): data = {'value': None} if args[1] == 'gold': data['value'] = { 'name': 'replicated', 'id': 'gold', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} elif args[1] == 'silver': data['value'] = {'name': 'silver', 'deleted': False, 'updated_at': None, 'extra_specs': { 'replication_enabled': 'False'}, 'deleted_at': None, 'id': 'silver'} return data['value'] _mock_volume_types.side_effect = get_side_effect mock_client.getVolume.return_value = { 'name': mock.ANY, 'comment': "{'display_name': 'Foo Volume'}", 'provisioningType': mock.ANY, 'userCPG': 'OpenStackCPG', 'snapCPG': 'OpenStackCPGSnap'} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client retyped = self.driver.retype( self.ctxt, volume_1, volume_type, None, self.RETYPE_HOST) self.assertTrue(retyped[0]) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), mock.call.removeVolumeFromRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, removeFromTarget=True), mock.call.removeRemoteCopyGroup(self.RCG_3PAR_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout, any_order=True) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_rep_type_to_rep_type(self, _mock_volume_types): conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.side_effect = ( hpeexceptions.HTTPNotFound) mock_client.getCPG.return_value = {'domain': None} mock_replicated_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = { 'id': self.REPLICATION_CLIENT_ID, 'serialNumber': '1234567' } mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE volume_1 = {'name': self.VOLUME_NAME, 'id': self.VOLUME_ID, 'display_name': 'Foo Volume', 'replication_status': 'disabled', 'provider_location': self.CLIENT_ID, 'size': 2, 'host': self.FAKE_CINDER_HOST, 'volume_type': 'replicated', 'volume_type_id': 'gold', 'status': 'available', 'migration_status': 'migrating'} volume_type = {'name': 'replicated', 'deleted': False, 'updated_at': None, 'deleted_at': None, 'extra_specs': {'replication_enabled': ' True'}, 'id': 'silver'} def get_side_effect(*args): data = {'value': None} if args[1] == 'gold': data['value'] = { 'name': 'replicated', 'id': 'gold', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} elif args[1] == 'silver': data['value'] = { 'name': 'silver', 'deleted': False, 'updated_at': None, 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '1500', 'volume_type': self.volume_type_replicated}, 'deleted_at': None, 'id': 'silver'} return data['value'] _mock_volume_types.side_effect = get_side_effect mock_client.getVolume.return_value = { 'name': mock.ANY, 'comment': "{'display_name': 'Foo Volume'}", 'provisioningType': mock.ANY, 'userCPG': 'OpenStackCPG', 'snapCPG': 'OpenStackCPGSnap'} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client backend_id = self.replication_targets[0]['backend_id'] retyped = self.driver.retype( self.ctxt, volume_1, volume_type, None, self.RETYPE_HOST) self.assertTrue(retyped[0]) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), mock.call.removeVolumeFromRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, removeFromTarget=True), mock.call.removeRemoteCopyGroup(self.RCG_3PAR_NAME), mock.call.createRemoteCopyGroup( self.RCG_3PAR_NAME, [{'userCPG': HPE3PAR_CPG_REMOTE, 'targetName': backend_id, 'mode': PERIODIC_MODE, 'snapCPG': HPE3PAR_CPG_REMOTE}], {'localUserCPG': HPE3PAR_CPG, 'localSnapCPG': HPE3PAR_CPG_SNAP}), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': backend_id}], optional={'volumeAutoCreation': True}), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout, any_order=True) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_qos_spec(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) cpg = "any_cpg" snap_cpg = "any_cpg" with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() common._retype(self.volume, HPE3PARBaseDriver.VOLUME_3PAR_NAME, "old_type", "old_type_id", HPE3PARBaseDriver.RETYPE_HOST, None, cpg, cpg, snap_cpg, snap_cpg, True, False, False, True, None, None, self.QOS_SPECS, self.RETYPE_QOS_SPECS, None, None, "{}", None) expected = [ mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None), mock.call.createQoSRules( 'vvs-0DM4qZEVSKON-DXN-NwVpw', {'ioMinGoal': 100, 'ioMaxLimit': 1000, 'bwMinGoalKB': 25600, 'bwMaxLimitKB': 51200, 'priority': 3, 'latencyGoal': 25} ), mock.call.addVolumeToVolumeSet( 'vvs-0DM4qZEVSKON-DXN-NwVpw', 'osv-0DM4qZEVSKON-DXN-NwVpw')] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_retype_dedup(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_3 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) cpg = "any_cpg" snap_cpg = "any_cpg" mock_client.tuneVolume.return_value = ({'taskid': 1}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() common._retype(self.volume, HPE3PARBaseDriver.VOLUME_3PAR_NAME, "old_type", "old_type_id", HPE3PARBaseDriver.RETYPE_HOST, None, cpg, cpg, snap_cpg, snap_cpg, True, False, False, True, None, None, self.QOS_SPECS, self.RETYPE_QOS_SPECS, None, None, "{}", None) expected = [ mock.call.addVolumeToVolumeSet(u'vvs-0DM4qZEVSKON-DXN-NwVpw', 'osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.tuneVolume('osv-0DM4qZEVSKON-DXN-NwVpw', 1, {'action': 6, 'userCPG': 'any_cpg', 'conversionOperation': 3, 'compression': False}), mock.call.getTask(1)] mock_client.assert_has_calls(expected) @ddt.data('volume', 'volume_name_id') def test_delete_volume(self, volume_attr): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.delete_volume(getattr(self, volume_attr)) name_3par = getattr(self, volume_attr.upper() + '_3PAR_NAME') expected = [mock.call.deleteVolume(name_3par)] mock_client.assert_has_calls(expected) def test_delete_volume_online_clone_active(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client ex = hpeexceptions.HTTPConflict("Online clone is active.") ex._error_code = 151 mock_client.deleteVolume = mock.Mock(side_effect=ex) mock_client.isOnlinePhysicalCopy.return_value = True self.driver.delete_volume(self.volume) expected = [ mock.call.deleteVolume(self.VOLUME_3PAR_NAME), mock.call.isOnlinePhysicalCopy(self.VOLUME_3PAR_NAME), mock.call.stopOnlinePhysicalCopy(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls(expected) def test_delete_volume_online_active_done(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() ex = hpeexceptions.HTTPConflict("Online clone is active.") ex._error_code = 151 mock_client.deleteVolume = mock.Mock(side_effect=[ex, 200]) mock_client.isOnlinePhysicalCopy.return_value = False with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.delete_volume(self.volume) expected = [ mock.call.deleteVolume(self.VOLUME_3PAR_NAME), mock.call.isOnlinePhysicalCopy(self.VOLUME_3PAR_NAME), mock.call.deleteVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls(expected) # Default scenario: vvset name is similar to volume name @mock.patch.object(volume_types, 'get_volume_type') def test_delete_volume_replicated(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} ex = hpeexceptions.HTTPConflict("In use") ex._error_code = 34 mock_client.deleteVolume = mock.Mock(side_effect=[ex, 200]) mock_client.getVolumeSet.return_value = 'vvs-0DM4qZEVSKON-DXN-NwVpw' _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} VVS_NAME = self.VOLUME_3PAR_NAME.replace('osv-', 'vvs-') with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = copy.deepcopy(self.volume_replicated) volume['status'] = 'available' self.driver.delete_volume(volume) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), mock.call.removeVolumeFromRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, removeFromTarget=True), mock.call.removeRemoteCopyGroup(self.RCG_3PAR_NAME), mock.call.deleteVolume(self.VOLUME_3PAR_NAME), mock.call.getVolumeSet(VVS_NAME), mock.call.deleteVolumeSet(VVS_NAME), mock.call.deleteVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls(expected) # Second scenario: vvset name is altogether different from volume name @mock.patch.object(volume_types, 'get_volume_type') def test_delete_volume_repl_different_vvset(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} ex = hpeexceptions.HTTPConflict("In use") ex._error_code = 34 mock_client.deleteVolume = mock.Mock(side_effect=[ex, 200]) ex_not_found = hpeexceptions.HTTPNotFound("Set does not exist") ex_not_found._error_code = 102 mock_client.getVolumeSet = mock.Mock(side_effect=[ex_not_found, 404]) mock_client.findVolumeSet.return_value = self.VVS_NAME _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} vvs_name_similar = self.VOLUME_3PAR_NAME.replace('osv-', 'vvs-') with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = copy.deepcopy(self.volume_replicated) volume['status'] = 'available' self.driver.delete_volume(volume) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), mock.call.removeVolumeFromRemoteCopyGroup( self.RCG_3PAR_NAME, self.VOLUME_3PAR_NAME, removeFromTarget=True), mock.call.removeRemoteCopyGroup(self.RCG_3PAR_NAME), mock.call.deleteVolume(self.VOLUME_3PAR_NAME), mock.call.getVolumeSet(vvs_name_similar), mock.call.findVolumeSet(self.VOLUME_3PAR_NAME), mock.call.removeVolumeFromVolumeSet(self.VVS_NAME, self.VOLUME_3PAR_NAME), mock.call.deleteVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_delete_volume_replicated_migrated(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} mock_client.getVolume.return_value = {'rcopyGroup': 'rcg-CArwlBBhRqq3K-eLUh'} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = copy.deepcopy(self.volume_replicated) volume['status'] = 'available' volume['migration_status'] = 'success' self.driver.delete_volume(volume) rcg_name_updated = 'rcg-CArwlBBhRqq3K-eLUh' expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.stopRemoteCopy(rcg_name_updated), mock.call.removeVolumeFromRemoteCopyGroup( rcg_name_updated, self.VOLUME_3PAR_NAME, removeFromTarget=True), mock.call.removeRemoteCopyGroup(rcg_name_updated), mock.call.deleteVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls(expected) def test_get_cpg_with_volume_return_usercpg(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'name': mock.ANY, 'userCPG': HPE3PAR_CPG2} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {'id': HPE3PARBaseDriver.VOLUME_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'display_name': 'Foo Volume', 'size': 2, 'host': volume_utils.append_host(self.FAKE_HOST, HPE3PAR_CPG2)} common = self.driver._login() user_cpg = common.get_cpg(volume) common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(volume['id']) self.assertEqual(HPE3PAR_CPG2, user_cpg) expected = [mock.call.getVolume(vol_name)] mock_client.assert_has_calls( self.standard_login + expected) def test_get_cpg_with_volume_return_snapcpg(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'name': mock.ANY, 'snapCPG': HPE3PAR_CPG2} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {'id': HPE3PARBaseDriver.VOLUME_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'display_name': 'Foo Volume', 'size': 2, 'host': volume_utils.append_host(self.FAKE_HOST, HPE3PAR_CPG2)} common = self.driver._login() snap_cpg = common.get_cpg(volume, allowSnap=True) common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(volume['id']) self.assertEqual(HPE3PAR_CPG2, snap_cpg) expected = [mock.call.getVolume(vol_name)] mock_client.assert_has_calls( self.standard_login + expected) def test_get_cpg_with_volume_return_no_cpg(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'name': mock.ANY} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {'id': HPE3PARBaseDriver.VOLUME_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'display_name': 'Foo Volume', 'size': 2, 'host': volume_utils.append_host(self.FAKE_HOST, HPE3PAR_CPG2)} common = self.driver._login() cpg_name = common.get_cpg(volume) common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(volume['id']) self.assertEqual(HPE3PAR_CPG2, cpg_name) expected = [mock.call.getVolume(vol_name)] mock_client.assert_has_calls( self.standard_login + expected) @ddt.data({'volume_attr': 'volume', 'wsapi_version': None}, {'volume_attr': 'volume_name_id', 'wsapi_version': None}, {'volume_attr': 'volume', 'wsapi_version': HPE3PARBaseDriver.wsapi_version_clone}, {'volume_attr': 'volume_name_id', 'wsapi_version': HPE3PARBaseDriver.wsapi_version_clone}) @ddt.unpack def test_create_cloned_volume(self, volume_attr, wsapi_version): src_vref = getattr(self, volume_attr) vol_name = getattr(self, volume_attr.upper() + '_3PAR_NAME') # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver(wsapi_version=wsapi_version) mock_client.getVolume.return_value = {'name': mock.ANY} mock_client.copyVolume.return_value = {'taskid': 1} mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': 'XXXXXXX'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': volume_utils.append_host(self.FAKE_HOST, HPE3PAR_CPG2), 'source_volid': src_vref.id} if not wsapi_version: # (i) old/default model_update = self.driver.create_cloned_volume(volume, src_vref) else: # (ii) wsapi having support for comment in cloned volume common = self.driver._login() model_update = common.create_cloned_volume(volume, src_vref) self.assertIsNone(model_update) # snapshot name is random snap_name = mock.ANY optional = mock.ANY optional_fields = {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, 'tdvv': False, 'online': True} if wsapi_version: optional_fields['comment'] = mock.ANY expected = [ mock.call.createSnapshot(snap_name, vol_name, optional), mock.call.getVolume(snap_name), mock.call.copyVolume( snap_name, 'osv-0DM4qZEVSKON-AAAAAAAAA', HPE3PAR_CPG2, optional_fields)] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_clone_volume_with_vvs(self, _mock_volume_types): # Setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() mock_client = self.setup_driver(config=conf) _mock_volume_types.return_value = { 'name': 'gold', 'id': 'gold-id', 'extra_specs': {'vvs': self.VVS_NAME}} mock_client.getVolume.return_value = {'name': mock.ANY} mock_client.copyVolume.return_value = {'taskid': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume_vvs = {'id': self.CLONE_ID, 'name': self.VOLUME_NAME, 'display_name': 'Foo Volume', 'size': 2, 'host': self.FAKE_CINDER_HOST, 'volume_type': 'gold', 'volume_type_id': 'gold-id'} src_vref = {'id': self.VOLUME_ID, 'name': self.VOLUME_NAME, 'size': 2, 'status': 'available', 'volume_type': 'gold', 'host': self.FAKE_CINDER_HOST, 'volume_type_id': 'gold-id'} # creation of the temp snapshot common = hpecommon.HPE3PARCommon(conf) snap_name = mock.ANY vol_name = common._get_3par_vol_name(src_vref['id']) optional = mock.ANY model_update = self.driver.create_cloned_volume(volume_vvs, src_vref) self.assertIsNone(model_update) clone_vol_vvs = common.get_volume_settings_from_type(volume_vvs) source_vol_vvs = common.get_volume_settings_from_type(src_vref) self.assertEqual(clone_vol_vvs, source_vol_vvs) expected = [ mock.call.createSnapshot(snap_name, vol_name, optional), mock.call.getVolume(snap_name), mock.call.copyVolume( snap_name, 'osv-0DM4qZEVSKON-AAAAAAAAA', 'OpenStackCPG', {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, 'tdvv': False, 'online': True}), mock.call.addVolumeToVolumeSet( self.VVS_NAME, 'osv-0DM4qZEVSKON-AAAAAAAAA')] mock_client.assert_has_calls(expected) def test_backup_iscsi_volume_with_chap_disabled(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'name': mock.ANY} mock_client.copyVolume.return_value = {'taskid': 1} mock_client.getVolumeMetaData.side_effect = hpeexceptions.HTTPNotFound with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': volume_utils.append_host(self.FAKE_HOST, HPE3PAR_CPG2)} src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'size': 2, 'status': 'backing-up'} model_update = self.driver.create_cloned_volume(volume, src_vref) self.assertIsNone(model_update) # creation of the temp snapshot common = hpecommon.HPE3PARCommon(None) snap_name = mock.ANY vol_name = common._get_3par_vol_name(src_vref['id']) optional = mock.ANY expected = [ mock.call.createSnapshot(snap_name, vol_name, optional), mock.call.getVolume(snap_name), mock.call.copyVolume( snap_name, 'osv-0DM4qZEVSKON-AAAAAAAAA', HPE3PAR_CPG2, {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, 'tdvv': False, 'online': True})] mock_client.assert_has_calls(expected) def test_create_clone_iscsi_volume_with_chap_disabled(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_client.getVolume.return_value = {'name': mock.ANY} mock_client.copyVolume.return_value = {'taskid': 1} mock_client.getVolumeMetaData.side_effect = hpeexceptions.HTTPNotFound with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': volume_utils.append_host(self.FAKE_HOST, HPE3PAR_CPG2)} src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'size': 2, 'status': 'available'} model_update = self.driver.create_cloned_volume(volume, src_vref) self.assertIsNone(model_update) common = hpecommon.HPE3PARCommon(None) snap_name = mock.ANY vol_name = common._get_3par_vol_name(src_vref['id']) optional = mock.ANY expected = [ mock.call.getVolumeMetaData(vol_name, 'HPQ-cinder-CHAP-name'), mock.call.createSnapshot(snap_name, vol_name, optional), mock.call.getVolume(snap_name), mock.call.copyVolume( snap_name, 'osv-0DM4qZEVSKON-AAAAAAAAA', HPE3PAR_CPG2, {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, 'tdvv': False, 'online': True})] mock_client.assert_has_calls(expected) def test_backup_iscsi_volume_with_chap_enabled(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_client.getVolume.return_value = {'name': mock.ANY} task_id = 1 mock_client.copyVolume.return_value = {'taskid': task_id} mock_client.getVolumeMetaData.return_value = { 'value': 'random-key'} mock_client.getTask.return_value = {'status': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 5, 'host': volume_utils.append_host(self.FAKE_HOST, HPE3PAR_CPG2), 'source_volid': HPE3PARBaseDriver.VOLUME_ID} src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'size': 5, 'status': 'backing-up'} model_update = self.driver.create_cloned_volume(volume, src_vref) self.assertIsNone(model_update) common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(volume['id']) src_vol_name = common._get_3par_vol_name(src_vref['id']) optional = {'priority': 1} comment = mock.ANY expected = [ mock.call.getVolumeMetaData(src_vol_name, 'HPQ-cinder-CHAP-name'), mock.call.createVolume(vol_name, 'fakepool', 5120, comment), mock.call.copyVolume( src_vol_name, vol_name, None, optional=optional), mock.call.getTask(task_id), ] mock_client.assert_has_calls(expected) def test_create_cloned_volume_offline_copy(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'name': mock.ANY} task_id = 1 mock_client.copyVolume.return_value = {'taskid': task_id} mock_client.getTask.return_value = {'status': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 5, 'host': volume_utils.append_host(self.FAKE_HOST, HPE3PAR_CPG2), 'source_volid': HPE3PARBaseDriver.VOLUME_ID} src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'size': 2, 'status': 'available'} model_update = self.driver.create_cloned_volume(volume, src_vref) self.assertIsNone(model_update) common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(volume['id']) src_vol_name = common._get_3par_vol_name(src_vref['id']) optional = {'priority': 1} comment = mock.ANY expected = [ mock.call.createVolume(vol_name, 'fakepool', 5120, comment), mock.call.copyVolume( src_vol_name, vol_name, None, optional=optional), mock.call.getTask(task_id), ] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_create_cloned_qos_volume(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2 mock_client = self.setup_driver() mock_client.getVolume.return_value = {'name': mock.ANY} mock_client.copyVolume.return_value = {'taskid': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client src_vref = {'id': HPE3PARBaseDriver.CLONE_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'size': 2, 'status': 'available'} volume = self.volume_qos.copy() host = "TEST_HOST" pool = "TEST_POOL" volume_host = volume_utils.append_host(host, pool) expected_cpg = pool volume['id'] = HPE3PARBaseDriver.VOLUME_ID volume['host'] = volume_host volume['source_volid'] = HPE3PARBaseDriver.CLONE_ID model_update = self.driver.create_cloned_volume(volume, src_vref) self.assertIsNone(model_update) # creation of the temp snapshot common = hpecommon.HPE3PARCommon(None) snap_name = mock.ANY vol_name = common._get_3par_vol_name(src_vref['id']) optional = mock.ANY expected = [ mock.call.getCPG(expected_cpg), mock.call.createSnapshot(snap_name, vol_name, optional), mock.call.getVolume(snap_name), mock.call.copyVolume( snap_name, self.VOLUME_3PAR_NAME, expected_cpg, {'snapCPG': 'OpenStackCPGSnap', 'tpvv': True, 'tdvv': False, 'online': True}), mock.call.addVolumeToVolumeSet( 'yourvvs', 'osv-0DM4qZEVSKON-DXN-NwVpw')] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_create_cloned_replicated_volume(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.side_effect = ( hpeexceptions.HTTPNotFound) mock_client.getCPG.return_value = {'domain': None} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'volume_type': self.volume_type_replicated}} mock_client = self.setup_driver() mock_client.getVolume.return_value = {'name': mock.ANY} task_id = 1 mock_client.copyVolume.return_value = {'taskid': task_id} mock_client.getTask.return_value = {'status': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client type_id_replicated = HPE3PARBaseDriver.VOLUME_TYPE_ID_REPLICATED volume = copy.deepcopy(self.volume_replicated) src_vref = {'id': HPE3PARBaseDriver.VOLUME_ID, 'name': HPE3PARBaseDriver.VOLUME_NAME, 'size': 2, 'status': 'available', 'volume_type': 'replicated', 'volume_type_id': type_id_replicated} model_update = self.driver.create_cloned_volume(volume, src_vref) self.assertEqual(model_update['replication_status'], fields.ReplicationStatus.ENABLED) common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(volume['id']) src_vol_name = common._get_3par_vol_name(src_vref['id']) optional = {'priority': 1} comment = mock.ANY expected = [ mock.call.createVolume(vol_name, 'OpenStackCPG', 2048, comment), mock.call.copyVolume( src_vol_name, vol_name, None, optional=optional), mock.call.getTask(task_id), mock.call.getRemoteCopyGroup('rcg-0DM4qZEVSKON-DXN-N'), mock.call.startRemoteCopy('rcg-0DM4qZEVSKON-DXN-N') ] mock_client.assert_has_calls(expected) def test_migrate_volume(self): conf = { 'getStorageSystemInfo.return_value': { 'id': self.CLIENT_ID, 'serialNumber': '1234'}, 'getTask.return_value': { 'status': 1}, 'getCPG.return_value': {}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': self.RETYPE_VOLUME_INFO_1 } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.tuneVolume.return_value = ({'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'volume_type_id': None, 'size': 2, 'status': 'available', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume_name_3par = common._encode_name(volume['id']) loc_info = 'HPE3PARDriver:1234:CPG-FC1' host = {'host': 'stack@3parfc1#CPG-FC1', 'capabilities': {'location_info': loc_info}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) self.assertEqual((True, None), result) osv_matcher = 'osv-' + volume_name_3par comment = Comment({ "display_name": "Foo Volume", "qos": {}, }) expected = [ mock.call.modifyVolume( osv_matcher, {'comment': comment, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.tuneVolume(osv_matcher, 1, {'action': 6, 'userCPG': 'CPG-FC1', 'conversionOperation': 1, 'compression': False}), mock.call.getTask(mock.ANY) ] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_migrate_volume_with_type(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2 conf = { 'getStorageSystemInfo.return_value': { 'id': self.CLIENT_ID, 'serialNumber': '1234'}, 'getTask.return_value': { 'status': 1}, 'getCPG.return_value': {}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': self.RETYPE_VOLUME_INFO_1 } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.tuneVolume.return_value = ({'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE display_name = 'Foo Volume' volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': display_name, "volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'], 'size': 2, 'status': 'available', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume_name_3par = common._encode_name(volume['id']) loc_info = 'HPE3PARDriver:1234:CPG-FC1' instance_host = 'stack@3parfc1#CPG-FC1' host = {'host': instance_host, 'capabilities': {'location_info': loc_info}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) # when the host and pool are the same we'll get None self.assertEqual((True, None), result) osv_matcher = 'osv-' + volume_name_3par expected_comment = Comment({ "display_name": display_name, "volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'], "volume_type_name": self.RETYPE_VOLUME_TYPE_2['name'], "vvs": self.RETYPE_VOLUME_TYPE_2['extra_specs']['vvs'] }) expected = [ mock.call.modifyVolume( osv_matcher, {'comment': expected_comment, 'snapCPG': self.RETYPE_VOLUME_TYPE_2 ['extra_specs']['snap_cpg']}), mock.call.tuneVolume( osv_matcher, 1, {'action': 6, 'userCPG': 'CPG-FC1', 'conversionOperation': 1, 'compression': False}), mock.call.getTask(mock.ANY) ] mock_client.assert_has_calls(expected) def test_migrate_volume_diff_host(self): conf = { 'getStorageSystemInfo.return_value': { 'id': self.CLIENT_ID, 'serialNumber': 'different'}, } mock_client = self.setup_driver(mock_conf=conf) volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'volume_type_id': None, 'size': 2, 'status': 'available', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} loc_info = 'HPE3PARDriver:1234:CPG-FC1' host = {'host': 'stack@3parfc1', 'capabilities': {'location_info': loc_info}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) self.assertEqual((False, None), result) @mock.patch.object(volume_types, 'get_volume_type') def test_migrate_volume_diff_domain(self, _mock_volume_types): _mock_volume_types.return_value = self.volume_type conf = { 'getStorageSystemInfo.return_value': { 'id': self.CLIENT_ID, 'serialNumber': '1234'}, 'getTask.return_value': { 'status': 1}, 'getCPG.return_value': {}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': self.RETYPE_VOLUME_INFO_1 } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.tuneVolume.return_value = ({'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'volume_type_id': None, 'size': 2, 'status': 'available', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume_name_3par = common._encode_name(volume['id']) loc_info = 'HPE3PARDriver:1234:CPG-FC1' host = {'host': 'stack@3parfc1#CPG-FC1', 'capabilities': {'location_info': loc_info}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) self.assertEqual((True, None), result) osv_matcher = 'osv-' + volume_name_3par comment = Comment({"qos": {}, "display_name": "Foo Volume"}) expected = [ mock.call.modifyVolume( osv_matcher, {'comment': comment, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.tuneVolume(osv_matcher, 1, {'action': 6, 'userCPG': 'CPG-FC1', 'conversionOperation': 1, 'compression': False}), mock.call.getTask(mock.ANY), ] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_migrate_volume_attached_diff_protocol(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) protocol = "OTHER" volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'volume_type_id': None, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 2, 'status': 'in-use', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} loc_info = 'HPE3PARDriver:1234567:CPG-FC1' host = {'host': 'stack@3parfc1', 'capabilities': {'location_info': loc_info, 'storage_protocol': protocol}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) self.assertEqual((False, None), result) expected = [] mock_client.assert_has_calls(expected) @ddt.data({'temp_rename_side_effect': None, 'rename_side_effect': None}, {'temp_rename_side_effect': hpeexceptions.HTTPNotFound, 'rename_side_effect': None}) @ddt.unpack def test_update_migrated_volume(self, temp_rename_side_effect, rename_side_effect): mock_client = self.setup_driver() mock_client.modifyVolume.side_effect = [temp_rename_side_effect, rename_side_effect, None] fake_old_volume = self.fake_volume_object(self.VOLUME_ID) provider_location = 'foo' fake_new_volume = self.fake_volume_object( self.CLONE_ID, _name_id=self.CLONE_ID, provider_location=provider_location) original_volume_status = 'available' _3common = hpecommon.HPE3PARCommon self.mock_object(_3common, '_create_client', return_value=mock_client) mock_get_comment = self.mock_object(_3common, '_get_updated_comment', side_effect=[mock.sentinel.comm1, mock.sentinel.comm2]) actual_update = self.driver.update_migrated_volume( context.get_admin_context(), fake_old_volume, fake_new_volume, original_volume_status) if rename_side_effect is None: expected_update = {'_name_id': None, 'provider_location': None} else: expected_update = {'_name_id': fake_new_volume['_name_id'], 'provider_location': provider_location} self.assertEqual(expected_update, actual_update) # Initial temp rename always takes place expected = [ mock.call.modifyVolume( 'osv-0DM4qZEVSKON-DXN-NwVpw', {'newName': u'tsv-0DM4qZEVSKON-DXN-NwVpw'}) ] comment_expected = [] # Primary rename will occur unless the temp rename fails if temp_rename_side_effect != hpeexceptions.HTTPConflict: expected += [ mock.call.modifyVolume( 'osv-0DM4qZEVSKON-AAAAAAAAA', {'newName': u'osv-0DM4qZEVSKON-DXN-NwVpw', 'comment': mock.sentinel.comm1}) ] comment_expected.append(mock.call('osv-0DM4qZEVSKON-AAAAAAAAA', volume_id=self.VOLUME_ID, _name_id=None)) # Final temp rename will occur if both of the previous renames # succeed. if (temp_rename_side_effect is None and rename_side_effect is None): expected += [ mock.call.modifyVolume( 'tsv-0DM4qZEVSKON-DXN-NwVpw', {'newName': u'osv-0DM4qZEVSKON-AAAAAAAAA', 'comment': mock.sentinel.comm2}) ] comment_expected.append(mock.call('osv-0DM4qZEVSKON-DXN-NwVpw', volume_id=self.CLONE_ID, _name_id=None)) mock_client.assert_has_calls(expected) mock_get_comment.assert_has_calls(comment_expected) def test_update_migrated_volume_with_name_id(self): """We don't use temp rename mechanism when source uses _name_id.""" mock_client = self.setup_driver() fake_old_volume = self.fake_volume_object( self.VOLUME_ID, _name_id=self.SRC_CG_VOLUME_ID) fake_new_volume = self.fake_volume_object(self.CLONE_ID) _3common = hpecommon.HPE3PARCommon self.mock_object(_3common, '_create_client', return_value=mock_client) mock_get_comment = self.mock_object(_3common, '_get_updated_comment', side_effect=[mock.sentinel.comm]) actual_update = self.driver.update_migrated_volume( context.get_admin_context(), fake_old_volume, fake_new_volume, 'available') expected_update = {'_name_id': None, 'provider_location': None} self.assertEqual(expected_update, actual_update) # # After successfully swapping names we have updated the comments mock_get_comment.assert_called_once_with('osv-0DM4qZEVSKON-AAAAAAAAA', volume_id=self.VOLUME_ID, _name_id=None), expected = [ mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA', {'newName': u'osv-0DM4qZEVSKON-DXN-NwVpw', 'comment': mock.sentinel.comm}), ] mock_client.assert_has_calls(expected) @ddt.data({'temp_rename_side_effect': hpeexceptions.HTTPConflict, 'rename_side_effect': None}, {'temp_rename_side_effect': None, 'rename_side_effect': hpeexceptions.HTTPConflict}, {'temp_rename_side_effect': hpeexceptions.HTTPNotFound, 'rename_side_effect': hpeexceptions.HTTPConflict}) @ddt.unpack def test_update_migrated_volume_failed(self, temp_rename_side_effect, rename_side_effect): mock_client = self.setup_driver() fake_old_volume = {'id': self.VOLUME_ID} provider_location = 'foo' fake_new_volume = {'id': self.CLONE_ID, '_name_id': self.CLONE_ID, 'provider_location': provider_location} original_volume_status = 'available' _3common = hpecommon.HPE3PARCommon self.mock_object(_3common, '_create_client', return_value=mock_client) mock_get_comment = self.mock_object(_3common, '_get_updated_comment', side_effect=[mock.sentinel.comm]) mock_update_comment = self.mock_object(_3common, '_update_comment') mock_client.modifyVolume.side_effect = [ temp_rename_side_effect, rename_side_effect, None ] actual_update = self.driver.update_migrated_volume( context.get_admin_context(), fake_old_volume, fake_new_volume, original_volume_status) expected_update = {'_name_id': self.CLONE_ID, 'provider_location': provider_location} self.assertEqual(expected_update, actual_update) # Initial temp rename always takes place expected = [ mock.call.modifyVolume( 'osv-0DM4qZEVSKON-DXN-NwVpw', {'newName': u'tsv-0DM4qZEVSKON-DXN-NwVpw'}), ] # Primary rename will occur unless the temp rename fails if temp_rename_side_effect != hpeexceptions.HTTPConflict: expected += [ mock.call.modifyVolume( 'osv-0DM4qZEVSKON-AAAAAAAAA', {'newName': u'osv-0DM4qZEVSKON-DXN-NwVpw', 'comment': mock.sentinel.comm}), ] mock_get_comment.assert_called_once_with( 'osv-0DM4qZEVSKON-AAAAAAAAA', volume_id=self.VOLUME_ID, _name_id=None) else: mock_get_comment.assert_not_called() mock_update_comment.assert_called_once_with( 'osv-0DM4qZEVSKON-AAAAAAAAA', volume_id=self.VOLUME_ID, _name_id=self.CLONE_ID) mock_client.assert_has_calls(expected) def test_update_migrated_volume_attached(self): mock_client = self.setup_driver() mock_client.getVolume.return_value = { 'comment': '{"volume_id": %s, "_name_id": ""}' % self.CLONE_ID} # Simulate old volume had already been live migrated fake_old_volume = {'id': self.VOLUME_ID} provider_location = 'foo' fake_new_volume = {'id': self.CLONE_ID, '_name_id': '', 'provider_location': provider_location} original_volume_status = 'in-use' _3common = hpecommon.HPE3PARCommon self.mock_object(_3common, '_create_client', return_value=mock_client) mock_update = self.mock_object(_3common, '_update_comment') actual_update = self.driver.update_migrated_volume( context.get_admin_context(), fake_old_volume, fake_new_volume, original_volume_status) expected_update = {'_name_id': fake_new_volume['id'], 'provider_location': provider_location} self.assertEqual(expected_update, actual_update) vol_name = _3common._get_3par_vol_name(fake_new_volume) mock_update.assert_called_once_with(vol_name, volume_id=fake_old_volume['id'], _name_id=fake_new_volume['id']) @ddt.data(('snapshot', 'osv-dh-F5VGRTseuujPjbeRBVg'), ('snapshot_name_id', HPE3PARBaseDriver.VOLUME_NAME_ID_3PAR_NAME)) @ddt.unpack def test_create_snapshot(self, snapshot_attr, vol_name): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() snapshot = getattr(self, snapshot_attr) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.create_snapshot(snapshot) comment = { "volume_id": snapshot['volume_id'], "display_name": snapshot['display_name'], "description": snapshot['display_description'], "volume_name": snapshot['volume_name'], } if snapshot['volume'].get('_name_id'): comment["_name_id"] = snapshot['volume']['_name_id'] expected = [ mock.call.createSnapshot( 'oss-L4I73ONuTci9Fd4ceij-MQ', vol_name, { 'comment': Comment(comment), 'readOnly': True})] mock_client.assert_has_calls(expected) @ddt.data(('snapshot', 'osv-dh-F5VGRTseuujPjbeRBVg'), ('snapshot_name_id', HPE3PARBaseDriver.VOLUME_NAME_ID_3PAR_NAME)) @ddt.unpack def test_revert_to_snapshot(self, snapshot_attr, vol_name): snapshot = getattr(self, snapshot_attr) snapshot['volume']['migration_status'] = None # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.isOnlinePhysicalCopy.return_value = False mock_client.promoteVirtualCopy.return_value = {'taskid': 1} mock_client.getTask.return_value = {'status': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.revert_to_snapshot(self.ctxt, snapshot['volume'], snapshot) expected = [ mock.call.isOnlinePhysicalCopy(vol_name), mock.call.promoteVirtualCopy('oss-L4I73ONuTci9Fd4ceij-MQ', optional={}), mock.call.getTask(1) ] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_revert_to_snapshot_replicated_volume(self, _mock_volume_types): _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'volume_type': self.volume_type_replicated}} mock_client = self.setup_driver() mock_client.isOnlinePhysicalCopy.return_value = True mock_client.getStorageSystemInfo.return_value = mock.ANY mock_client.promoteVirtualCopy.return_value = {'taskid': 1} mock_client.getTask.return_value = {'status': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.revert_to_snapshot( self.ctxt, self.volume_replicated, self.snapshot) expected = [ mock.call.stopRemoteCopy('rcg-0DM4qZEVSKON-DXN-N'), mock.call.isOnlinePhysicalCopy('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.promoteVirtualCopy( 'oss-L4I73ONuTci9Fd4ceij-MQ', optional={'online': True, 'allowRemoteCopyParent': True}), mock.call.getTask(1), mock.call.startRemoteCopy('rcg-0DM4qZEVSKON-DXN-N') ] mock_client.assert_has_calls(expected) def test_delete_snapshot(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.delete_snapshot(self.snapshot) expected = [ mock.call.deleteVolume('oss-L4I73ONuTci9Fd4ceij-MQ')] mock_client.assert_has_calls(expected) def test_delete_snapshot_in_use(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() volume = self.volume_snapshot.copy() model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertEqual(model_update, {}) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False})] mock_client.assert_has_calls(expected) ex = hpeexceptions.HTTPConflict("In use") ex._error_code = 32 mock_client.deleteVolume = mock.Mock(side_effect=ex) # Deleting the snapshot that a volume is dependent on should fail self.assertRaises(exception.SnapshotIsBusy, self.driver.delete_snapshot, self.snapshot) def test_delete_snapshot_not_found(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.create_snapshot(self.snapshot) try: ex = hpeexceptions.HTTPNotFound("not found") mock_client.deleteVolume = mock.Mock(side_effect=ex) self.driver.delete_snapshot(self.snapshot) except Exception: self.fail("Deleting a snapshot that is missing should act " "as if it worked.") def test_create_volume_from_snapshot(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() volume = self.volume_snapshot.copy() model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertEqual(model_update, {}) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False})] mock_client.assert_has_calls(expected) def test_create_volume_from_snapshot_and_extend(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = { 'getTask.return_value': { 'status': 1}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {} } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolumeSnapshots.return_value = [] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume = self.volume_snapshot.copy() volume['size'] = self.volume['size'] + 10 model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertIsNone(model_update) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) volume_name_3par = common._encode_name(volume['id']) osv_matcher = 'osv-' + volume_name_3par omv_matcher = 'omv-' + volume_name_3par expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False}), mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.copyVolume( osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), mock.call.getTask(mock.ANY), mock.call.getVolume(osv_matcher), mock.call.deleteVolume(osv_matcher), mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}), mock.call.growVolume(osv_matcher, 10 * 1024)] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_and_extend_with_qos( self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = { 'getTask.return_value': { 'status': 1}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {} } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolumeSnapshots.return_value = [] _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'cpg': HPE3PAR_CPG_QOS, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'volume_type': self.volume_type}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume = self.volume_qos.copy() volume['size'] = self.volume['size'] + 10 model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertIsNone(model_update) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) volume_name_3par = common._encode_name(volume['id']) osv_matcher = 'osv-' + volume_name_3par omv_matcher = 'omv-' + volume_name_3par expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False}), mock.call.getCPG(HPE3PAR_CPG), mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.copyVolume( osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), mock.call.getTask(mock.ANY), mock.call.getVolume(osv_matcher), mock.call.deleteVolume(osv_matcher), mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}), mock.call.growVolume(osv_matcher, 10 * 1024)] mock_client.assert_has_calls(expected) def test_create_volume_from_snapshot_and_extend_copy_fail(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = { 'getTask.return_value': { 'status': 4, 'failure message': 'out of disk space'}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {} } mock_client = self.setup_driver(mock_conf=conf) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = self.volume_snapshot.copy() volume['size'] = self.volume['size'] + 10 self.assertRaises(exception.CinderException, self.driver.create_volume_from_snapshot, volume, self.snapshot) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_qos(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() _mock_volume_types.return_value = { 'name': 'gold', 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'volume_type': self.volume_type}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() volume = self.volume_qos.copy() model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertEqual(model_update, {}) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False})] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_as_child(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() _mock_volume_types.return_value = self.volume_type_hos with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() volume = self.volume_hos.copy() model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertEqual(model_update, {}) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False})] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_as_base(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = { 'getTask.return_value': { 'status': 1}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {} } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolumeSnapshots.return_value = [] volume_type_hos = copy.deepcopy(self.volume_type_hos) volume_type_hos['extra_specs']['convert_to_base'] = True _mock_volume_types.return_value = volume_type_hos with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume = self.volume_hos.copy() model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertIsNone(model_update) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) volume_name_3par = common._encode_name(volume['id']) osv_matcher = 'osv-' + volume_name_3par omv_matcher = 'omv-' + volume_name_3par expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False}), mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.copyVolume( osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), mock.call.getTask(mock.ANY), mock.call.getVolume(osv_matcher), mock.call.deleteVolume(osv_matcher), mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher})] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_as_child_and_extend( self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = { 'getTask.return_value': { 'status': 1}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {} } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolumeSnapshots.return_value = [] _mock_volume_types.return_value = self.volume_type_hos with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume = self.volume_hos.copy() volume['size'] = self.volume['size'] + 10 model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertIsNone(model_update) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) volume_name_3par = common._encode_name(volume['id']) osv_matcher = 'osv-' + volume_name_3par omv_matcher = 'omv-' + volume_name_3par expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False}), mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.copyVolume( osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), mock.call.getTask(mock.ANY), mock.call.getVolume(osv_matcher), mock.call.deleteVolume(osv_matcher), mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}), mock.call.growVolume(osv_matcher, 10 * 1024)] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_as_base_and_extend( self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = { 'getTask.return_value': { 'status': 1}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {} } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolumeSnapshots.return_value = [] volume_type_hos = copy.deepcopy(self.volume_type_hos) volume_type_hos['extra_specs']['convert_to_base'] = True _mock_volume_types.return_value = volume_type_hos with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume = self.volume_hos.copy() volume['size'] = self.volume['size'] + 10 model_update = self.driver.create_volume_from_snapshot( volume, self.snapshot) self.assertIsNone(model_update) comment = Comment({ "snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31", "display_name": "Foo Volume", "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", }) volume_name_3par = common._encode_name(volume['id']) osv_matcher = 'osv-' + volume_name_3par omv_matcher = 'omv-' + volume_name_3par expected = [ mock.call.createSnapshot( self.VOLUME_3PAR_NAME, 'oss-L4I73ONuTci9Fd4ceij-MQ', { 'comment': comment, 'readOnly': False}), mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME), mock.call.copyVolume( osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY), mock.call.getTask(mock.ANY), mock.call.getVolume(osv_matcher), mock.call.deleteVolume(osv_matcher), mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}), mock.call.growVolume(osv_matcher, 10 * 1024)] mock_client.assert_has_calls(expected) def test_terminate_connection_from_primary_when_failed_over(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getHostVLUNs.side_effect = hpeexceptions.HTTPNotFound( error={'desc': 'The host does not exist.'}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._active_backend_id = 'some_id' self.driver.terminate_connection( self.volume, self.connector, force=True) # When the volume is still attached to the primary array after a # fail-over, there should be no call to delete the VLUN(s) or the # host. We can assert these methods were not called to make sure # the proper exceptions are being raised. self.assertEqual(0, mock_client.deleteVLUN.call_count) def test_terminate_connection_from_primary_when_group_failed_over(self): mock_conf = { 'getStorageSystemInfo.return_value': { 'id': self.REPLICATION_CLIENT_ID, 'name': 'CSIM-EOS12_1611702'}} conf = self.setup_configuration() conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf, mock_conf=mock_conf) mock_client.getHostVLUNs.side_effect = hpeexceptions.HTTPNotFound( error={'desc': 'The host does not exist.'}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = self.volume_tiramisu.copy() volume['replication_status'] = 'failed-over' volume['replication_driver_data'] = self.REPLICATION_CLIENT_ID self.driver._active_backend_id = "CSIM-EOS12_1611702" self.driver.terminate_connection( self.volume, self.connector, force=True) # When the volume is still attached to the primary array after a # fail-over, there should be no call to delete the VLUN(s) or the # host. We can assert these methods were not called to make sure # the proper exceptions are being raised. self.assertEqual(0, mock_client.deleteVLUN.call_count) def test_extend_volume(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client grow_size = 3 old_size = self.volume['size'] new_size = old_size + grow_size self.driver.extend_volume(self.volume, str(new_size)) growth_size_mib = grow_size * units.Ki expected = [ mock.call.growVolume(self.VOLUME_3PAR_NAME, growth_size_mib)] mock_client.assert_has_calls(expected) def test_extend_volume_non_base(self): extend_ex = hpeexceptions.HTTPForbidden(error={'code': 150}) conf = { 'getTask.return_value': { 'status': 1}, 'getCPG.return_value': {}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {}, # Throw an exception first time only 'growVolume.side_effect': [extend_ex, None], } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolumeSnapshots.return_value = [] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client grow_size = 3 old_size = self.volume['size'] new_size = old_size + grow_size self.driver.extend_volume(self.volume, str(new_size)) self.assertEqual(2, mock_client.growVolume.call_count) def test_extend_volume_non_base_failure(self): extend_ex = hpeexceptions.HTTPForbidden(error={'code': 150}) conf = { 'getTask.return_value': { 'status': 1}, 'getCPG.return_value': {}, 'copyVolume.return_value': {'taskid': 1}, 'getVolume.return_value': {}, # Always fail 'growVolume.side_effect': extend_ex } mock_client = self.setup_driver(mock_conf=conf) mock_client.getVolumeSnapshots.return_value = [] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client grow_size = 3 old_size = self.volume['size'] new_size = old_size + grow_size self.assertRaises(hpeexceptions.HTTPForbidden, self.driver.extend_volume, self.volume, str(new_size)) def test__convert_to_base_volume_failure(self): mock_client = self.setup_driver() mock_client.getVolumeSnapshots.return_value = ( ['oss-nwJVbXaEQMi0w.xPutFRQw']) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.assertRaises(exception.VolumeIsBusy, common._convert_to_base_volume, self.volume) @mock.patch.object(volume_types, 'get_volume_type') def test_extend_volume_replicated(self, _mock_volume_types): # Managed vs. unmanaged and periodic vs. sync are not relevant when # extending a replicated volume type. # We will use managed and periodic as the default. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client grow_size = 3 old_size = self.volume_replicated['size'] new_size = old_size + grow_size # Test a successful extend. self.driver.extend_volume( self.volume_replicated, new_size) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), mock.call.growVolume(self.VOLUME_3PAR_NAME, grow_size * 1024), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls(expected) # Test an unsuccessful extend. growVolume will fail but remote # copy should still be started again. mock_client.growVolume.side_effect = ( hpeexceptions.HTTPForbidden("Error: The volume cannot be " "extended.")) self.assertRaises( hpeexceptions.HTTPForbidden, self.driver.extend_volume, self.volume_replicated, new_size) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME), mock.call.growVolume(self.VOLUME_3PAR_NAME, grow_size * 1024), mock.call.startRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls(expected) def test_get_ports(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getPorts.return_value = { 'members': [ {'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, 'protocol': 2, 'IPAddr': '10.10.120.252', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D2', 'type': 8}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'protocol': 2, 'IPAddr': '10.10.220.253', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D6', 'type': 8}, {'portWWN': '20210002AC00383D', 'protocol': 1, 'linkState': 4, 'mode': 2, 'device': ['cage2'], 'nodeWWN': '20210002AC00383D', 'type': 2, 'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() ports = common.get_ports()['members'] self.assertEqual(3, len(ports)) def test_get_by_qos_spec_with_scoping(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS) type_ref = volume_types.create(self.ctxt, "type1", {"qos:maxIOPS": "100", "qos:maxBWS": "50", "qos:minIOPS": "10", "qos:minBWS": "20", "qos:latency": "5", "qos:priority": "high"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) qos = common._get_qos_by_volume_type(type_ref) self.assertEqual({'maxIOPS': '1000', 'maxBWS': '50', 'minIOPS': '100', 'minBWS': '25', 'latency': '25', 'priority': 'low'}, qos) def test_get_by_qos_spec(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() qos_ref = qos_specs.create( self.ctxt, 'qos-specs-1', self.QOS_SPECS) type_ref = volume_types.create(self.ctxt, "type1", {"qos:maxIOPS": "100", "qos:maxBWS": "50", "qos:minIOPS": "10", "qos:minBWS": "20", "qos:latency": "5", "qos:priority": "high"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) qos = common._get_qos_by_volume_type(type_ref) self.assertEqual({'maxIOPS': '1000', 'maxBWS': '50', 'minIOPS': '100', 'minBWS': '25', 'latency': '25', 'priority': 'low'}, qos) def test_get_by_qos_by_type_only(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() type_ref = volume_types.create(self.ctxt, "type1", {"qos:maxIOPS": "100", "qos:maxBWS": "50", "qos:minIOPS": "10", "qos:minBWS": "20", "qos:latency": "5", "qos:priority": "high"}) type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id']) qos = common._get_qos_by_volume_type(type_ref) self.assertEqual({'maxIOPS': '100', 'maxBWS': '50', 'minIOPS': '10', 'minBWS': '20', 'latency': '5', 'priority': 'high'}, qos) def test_create_vlun(self): host = 'fake-host' lun_id = 11 nsp = '1:2:3' mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client location = ("%(name)s,%(lunid)s,%(host)s,%(nsp)s" % {'name': self.VOLUME_NAME, 'lunid': lun_id, 'host': host, 'nsp': nsp}) mock_client.createVLUN.return_value = location expected_info = {'volume_name': self.VOLUME_NAME, 'lun_id': lun_id, 'host_name': host, 'nsp': nsp} common = self.driver._login() vlun_info = common._create_3par_vlun( self.VOLUME_NAME, host, nsp) self.assertEqual(expected_info, vlun_info) location = ("%(name)s,%(lunid)s,%(host)s" % {'name': self.VOLUME_NAME, 'lunid': lun_id, 'host': host}) mock_client.createVLUN.return_value = location expected_info = {'volume_name': self.VOLUME_NAME, 'lun_id': lun_id, 'host_name': host} vlun_info = common._create_3par_vlun( self.VOLUME_NAME, host, None) self.assertEqual(expected_info, vlun_info) def test_create_vlun_vlunid_zero(self): # This will test "auto" for deactive when Lun ID is 0 host = 'fake-host' lun_id = 0 nsp = '0:1:1' port = {'node': 0, 'slot': 1, 'cardPort': 1} mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client # _create_3par_vlun with nsp location = ("%(name)s,%(lunid)s,%(host)s,%(nsp)s" % {'name': self.VOLUME_NAME, 'lunid': lun_id, 'host': host, 'nsp': nsp}) mock_client.createVLUN.return_value = location expected_info = {'volume_name': self.VOLUME_NAME, 'lun_id': lun_id, 'host_name': host, 'nsp': nsp} common = self.driver._login() vlun_info = common._create_3par_vlun( self.VOLUME_NAME, host, nsp, lun_id=lun_id) self.assertEqual(expected_info, vlun_info) mock_client.createVLUN.assert_called_once_with(self.VOLUME_NAME, hostname=host, auto=False, portPos=port, lun=lun_id) # _create_3par_vlun without nsp mock_client.reset_mock() location = ("%(name)s,%(lunid)s,%(host)s" % {'name': self.VOLUME_NAME, 'lunid': lun_id, 'host': host}) mock_client.createVLUN.return_value = location expected_info = {'volume_name': self.VOLUME_NAME, 'lun_id': lun_id, 'host_name': host} vlun_info = common._create_3par_vlun( self.VOLUME_NAME, host, None, lun_id=lun_id) self.assertEqual(expected_info, vlun_info) mock_client.createVLUN.assert_called_once_with(self.VOLUME_NAME, hostname=host, auto=False, lun=lun_id) def test__get_existing_volume_ref_name(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) ums_matcher = common._get_3par_ums_name(self.volume['id']) existing_ref = {'source-name': unm_matcher} result = common._get_existing_volume_ref_name(existing_ref) self.assertEqual(unm_matcher, result) existing_ref = {'source-id': self.volume['id']} result = common._get_existing_volume_ref_name(existing_ref) self.assertEqual(unm_matcher, result) existing_ref = {'source-id': self.volume['id']} result = common._get_existing_volume_ref_name(existing_ref, True) self.assertEqual(ums_matcher, result) existing_ref = {'bad-key': 'foo'} self.assertRaises( exception.ManageExistingInvalidReference, common._get_existing_volume_ref_name, existing_ref) @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing(self, _mock_volume_types): _mock_volume_types.return_value = self.volume_type mock_client = self.setup_driver() new_comment = Comment({ "display_name": "Foo Volume", "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e", "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e", "type": "OpenStack", }) volume = {'display_name': None, 'host': self.FAKE_CINDER_HOST, 'volume_type': 'gold', 'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.tuneVolume.return_value = ({'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) osv_matcher = common._get_3par_vol_name(volume['id']) vvs_matcher = common._get_3par_vvs_name(volume['id']) existing_ref = {'source-name': unm_matcher} expected_obj = {'display_name': 'Foo Volume'} obj = self.driver.manage_existing(volume, existing_ref) expected_manage = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment}), ] retype_comment_qos = Comment({ "display_name": "Foo Volume", "volume_type_name": self.volume_type['name'], "volume_type_id": self.volume_type['id'], "qos": { 'maxIOPS': '1000', 'maxBWS': '50', 'minIOPS': '100', 'minBWS': '25', 'latency': '25', 'priority': 'low' } }) expected_snap_cpg = HPE3PAR_CPG_SNAP expected_retype_modify = [ mock.call.modifyVolume(osv_matcher, {'comment': retype_comment_qos, 'snapCPG': expected_snap_cpg}), mock.call.deleteVolumeSet(vvs_matcher), ] expected_retype_specs = [ mock.call.createVolumeSet(vvs_matcher, None), mock.call.createQoSRules( vvs_matcher, {'ioMinGoal': 100, 'ioMaxLimit': 1000, 'bwMinGoalKB': 25600, 'priority': 1, 'latencyGoal': 25, 'bwMaxLimitKB': 51200}), mock.call.addVolumeToVolumeSet(vvs_matcher, osv_matcher), mock.call.tuneVolume( osv_matcher, 1, {'action': 6, 'userCPG': HPE3PAR_CPG, 'conversionOperation': 1, 'compression': False}), mock.call.getTask(1) ] mock_client.assert_has_calls(expected_manage) mock_client.assert_has_calls(expected_retype_modify) mock_client.assert_has_calls( expected_retype_specs) self.assertEqual(expected_obj, obj) # (i) wsapi version is old/default # (ii) wsapi version is 2023, then snapCPG isn't required @ddt.data({'wsapi_version': None}, {'wsapi_version': HPE3PARBaseDriver.wsapi_version_2023}) @ddt.unpack @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing_with_no_snap_cpg(self, _mock_volume_types, wsapi_version): _mock_volume_types.return_value = self.volume_type mock_client = self.setup_driver(wsapi_version=wsapi_version) new_comment = Comment({ "display_name": "Foo Volume", "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e", "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e", "type": "OpenStack", }) volume = {'display_name': None, 'host': 'my-stack1@3parxxx#CPGNOTUSED', 'volume_type': 'gold', 'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.return_value = self.MV_INFO_WITH_NO_SNAPCPG mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) osv_matcher = common._get_3par_vol_name(volume['id']) existing_ref = {'source-name': unm_matcher} expected_obj = {'display_name': 'Foo Volume'} obj = self.driver.manage_existing(volume, existing_ref) optional = {'newName': osv_matcher, 'comment': new_comment} if not wsapi_version: # (i) old/default # manage_existing() should be setting # blank snapCPG to the userCPG optional['snapCPG'] = 'testUserCpg0' expected_manage = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume( existing_ref['source-name'], optional) ] mock_client.assert_has_calls(self.standard_login + expected_manage) self.assertEqual(expected_obj, obj) @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing_vvs(self, _mock_volume_types): test_volume_type = self.RETYPE_VOLUME_TYPE_2 vvs = test_volume_type['extra_specs']['vvs'] _mock_volume_types.return_value = test_volume_type mock_client = self.setup_driver() mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.tuneVolume.return_value = ({'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE id = '007abcde-7579-40bc-8f90-a20b3902283e' new_comment = Comment({ "display_name": "Test Volume", "name": ("volume-%s" % id), "volume_id": id, "type": "OpenStack", }) volume = {'display_name': 'Test Volume', 'host': 'my-stack1@3parxxx#CPGNOTUSED', 'volume_type': 'gold', 'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': id} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) osv_matcher = common._get_3par_vol_name(volume['id']) vvs_matcher = common._get_3par_vvs_name(volume['id']) existing_ref = {'source-name': unm_matcher} obj = self.driver.manage_existing(volume, existing_ref) expected_obj = {'display_name': 'Test Volume'} expected_manage = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment}) ] retype_comment_vvs = Comment({ "display_name": "Foo Volume", "volume_type_name": test_volume_type['name'], "volume_type_id": test_volume_type['id'], "vvs": vvs }) expected_retype = [ mock.call.modifyVolume(osv_matcher, {'comment': retype_comment_vvs, 'snapCPG': 'OpenStackCPGSnap'}), mock.call.deleteVolumeSet(vvs_matcher), mock.call.addVolumeToVolumeSet(vvs, osv_matcher), mock.call.tuneVolume(osv_matcher, 1, {'action': 6, 'userCPG': 'CPGNOTUSED', 'conversionOperation': 1, 'compression': False}), mock.call.getTask(1) ] mock_client.assert_has_calls(expected_manage) mock_client.assert_has_calls( expected_retype) self.assertEqual(expected_obj, obj) def test_manage_existing_no_volume_type(self): mock_client = self.setup_driver() comment = repr({"display_name": "Foo Volume"}) new_comment = Comment({ "type": "OpenStack", "display_name": "Foo Volume", "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e", "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e", }) volume = {'display_name': None, 'volume_type': None, 'volume_type_id': None, 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.return_value = {'comment': comment, 'userCPG': 'testUserCpg0'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) osv_matcher = common._get_3par_vol_name(volume['id']) existing_ref = {'source-name': unm_matcher} obj = self.driver.manage_existing(volume, existing_ref) expected_obj = {'display_name': 'Foo Volume'} expected = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment, # manage_existing() should be setting # blank snapCPG to the userCPG 'snapCPG': 'testUserCpg0'}) ] mock_client.assert_has_calls(expected) self.assertEqual(expected_obj, obj) volume['display_name'] = 'Test Volume' obj = self.driver.manage_existing(volume, existing_ref) expected_obj = {'display_name': 'Test Volume'} expected = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment, # manage_existing() should be setting # blank snapCPG to the userCPG 'snapCPG': 'testUserCpg0'}) ] mock_client.assert_has_calls(expected) self.assertEqual(expected_obj, obj) mock_client.getVolume.return_value = {'userCPG': 'testUserCpg0'} volume['display_name'] = None common = self.driver._login() obj = self.driver.manage_existing(volume, existing_ref) expected_obj = {'display_name': None} expected = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': osv_matcher, 'comment': new_comment, # manage_existing() should be setting # blank snapCPG to the userCPG 'snapCPG': 'testUserCpg0'}) ] mock_client.assert_has_calls(expected) self.assertEqual(expected_obj, obj) def test_manage_existing_invalid_input(self): mock_client = self.setup_driver() volume = {'display_name': None, 'volume_type': None, 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound('fake') with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) existing_ref = {'source-name': unm_matcher} self.assertRaises(exception.InvalidInput, self.driver.manage_existing, volume=volume, existing_ref=existing_ref) expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls(expected) def test_manage_existing_volume_type_exception(self): mock_client = self.setup_driver() comment = repr({"display_name": "Foo Volume"}) volume = {'display_name': None, 'volume_type': 'gold', 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.return_value = {'comment': comment} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) existing_ref = {'source-name': unm_matcher} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, volume=volume, existing_ref=existing_ref) expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing_retype_exception(self, _mock_volume_types): mock_client = self.setup_driver() _mock_volume_types.return_value = { 'name': 'gold', 'id': 'gold-id', 'extra_specs': { 'cpg': HPE3PAR_CPG, 'snap_cpg': HPE3PAR_CPG_SNAP, 'vvs_name': self.VVS_NAME, 'qos': self.QOS, 'tpvv': True, 'tdvv': False, 'volume_type': self.volume_type}} volume = {'display_name': None, 'host': 'stack1@3pariscsi#POOL1', 'volume_type': 'gold', 'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e', 'id': '007dbfce-7579-40bc-8f90-a20b3902283e'} mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1}) mock_client.getTask.return_value = self.STATUS_DONE mock_client.getCPG.side_effect = [ {'domain': 'domain1'}, {'domain': 'domain2'}, {'domain': 'domain3'}, ] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) osv_matcher = common._get_3par_vol_name(volume['id']) existing_ref = {'source-name': unm_matcher} self.assertRaises(hpecommon.Invalid3PARDomain, self.driver.manage_existing, volume=volume, existing_ref=existing_ref) expected = [ mock.call.getVolume(unm_matcher), mock.call.modifyVolume( unm_matcher, { 'newName': osv_matcher, 'comment': mock.ANY}), mock.call.getCPG('POOL1'), mock.call.getVolume(osv_matcher), mock.call.getCPG('testUserCpg0'), mock.call.getCPG('POOL1'), mock.call.modifyVolume( osv_matcher, {'newName': unm_matcher, 'comment': self.MANAGE_VOLUME_INFO ['comment']}) ] mock_client.assert_has_calls(expected) def test_manage_existing_snapshot(self): mock_client = self.setup_driver() new_comment = Comment({ "display_name": "snap", "volume_name": self.VOLUME_NAME, "volume_id": self.VOLUME_ID, "description": "", }) volume = {'id': self.VOLUME_ID} snapshot = { 'display_name': None, 'id': self.SNAPSHOT_ID, 'volume': volume, } mock_client.getVolume.return_value = { "comment": "{'display_name': 'snap'}", 'copyOf': self.VOLUME_NAME_3PAR, } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() oss_matcher = common._get_3par_snap_name(snapshot['id']) ums_matcher = common._get_3par_ums_name(snapshot['id']) existing_ref = {'source-name': ums_matcher} expected_obj = {'display_name': 'snap'} obj = self.driver.manage_existing_snapshot(snapshot, existing_ref) expected = [ mock.call.getVolume(existing_ref['source-name']), mock.call.modifyVolume(existing_ref['source-name'], {'newName': oss_matcher, 'comment': new_comment}), ] mock_client.assert_has_calls(expected) self.assertEqual(expected_obj, obj) def test_manage_existing_snapshot_invalid_parent(self): mock_client = self.setup_driver() volume = {'id': self.VOLUME_ID} snapshot = { 'display_name': None, 'id': '007dbfce-7579-40bc-8f90-a20b3902283e', 'volume': volume, } mock_client.getVolume.return_value = { "comment": "{'display_name': 'snap'}", 'copyOf': 'fake-invalid', } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() ums_matcher = common._get_3par_ums_name(snapshot['id']) existing_ref = {'source-name': ums_matcher} self.assertRaises(exception.InvalidInput, self.driver.manage_existing_snapshot, snapshot=snapshot, existing_ref=existing_ref) expected = [ mock.call.getVolume(existing_ref['source-name']), ] mock_client.assert_has_calls(expected) def test_manage_existing_snapshot_failed_over_volume(self): mock_client = self.setup_driver() volume = { 'id': self.VOLUME_ID, 'replication_status': 'failed-over', } snapshot = { 'display_name': None, 'id': '007dbfce-7579-40bc-8f90-a20b3902283e', 'volume': volume, } mock_client.getVolume.return_value = { "comment": "{'display_name': 'snap'}", 'copyOf': self.VOLUME_NAME_3PAR, } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() ums_matcher = common._get_3par_ums_name(snapshot['id']) existing_ref = {'source-name': ums_matcher} self.assertRaises(exception.InvalidInput, self.driver.manage_existing_snapshot, snapshot=snapshot, existing_ref=existing_ref) def test_manage_existing_get_size(self): mock_client = self.setup_driver() mock_client.getVolume.return_value = {'sizeMiB': 2048} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) volume = {} existing_ref = {'source-name': unm_matcher} size = self.driver.manage_existing_get_size(volume, existing_ref) expected_size = 2 expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls(expected) self.assertEqual(expected_size, size) def test_manage_existing_get_size_invalid_reference(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = {} existing_ref = {'source-name': self.VOLUME_3PAR_NAME} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume=volume, existing_ref=existing_ref) existing_ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume=volume, existing_ref=existing_ref) def test_manage_existing_get_size_invalid_input(self): mock_client = self.setup_driver() mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound('fake') with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() unm_matcher = common._get_3par_unm_name(self.volume['id']) volume = {} existing_ref = {'source-name': unm_matcher} self.assertRaises(exception.InvalidInput, self.driver.manage_existing_get_size, volume=volume, existing_ref=existing_ref) expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls(expected) def test_manage_existing_snapshot_get_size(self): mock_client = self.setup_driver() mock_client.getVolume.return_value = {'sizeMiB': 2048} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() ums_matcher = common._get_3par_ums_name(self.snapshot['id']) snapshot = {} existing_ref = {'source-name': ums_matcher} size = self.driver.manage_existing_snapshot_get_size(snapshot, existing_ref) expected_size = 2 expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls(expected) self.assertEqual(expected_size, size) def test_manage_existing_snapshot_get_size_invalid_reference(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client snapshot = {} existing_ref = {'source-name': self.SNAPSHOT_3PAR_NAME} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snapshot=snapshot, existing_ref=existing_ref) existing_ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snapshot=snapshot, existing_ref=existing_ref) def test_manage_existing_snapshot_get_size_invalid_input(self): mock_client = self.setup_driver() mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound('fake') with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() ums_matcher = common._get_3par_ums_name(self.snapshot['id']) snapshot = {} existing_ref = {'source-name': ums_matcher} self.assertRaises(exception.InvalidInput, self.driver.manage_existing_snapshot_get_size, snapshot=snapshot, existing_ref=existing_ref) expected = [mock.call.getVolume(existing_ref['source-name'])] mock_client.assert_has_calls(expected) def test_unmanage(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.unmanage(self.volume) osv_matcher = common._get_3par_vol_name(self.volume['id']) unm_matcher = common._get_3par_unm_name(self.volume['id']) expected = [ mock.call.modifyVolume(osv_matcher, {'newName': unm_matcher}) ] mock_client.assert_has_calls(expected) def test_unmanage_snapshot(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.unmanage_snapshot(self.snapshot) oss_matcher = common._get_3par_snap_name(self.snapshot['id']) ums_matcher = common._get_3par_ums_name(self.snapshot['id']) expected = [ mock.call.modifyVolume(oss_matcher, {'newName': ums_matcher}) ] mock_client.assert_has_calls(expected) def test_unmanage_snapshot_failed_over_volume(self): mock_client = self.setup_driver() volume = {'replication_status': 'failed-over', } snapshot = {'id': self.SNAPSHOT_ID, 'display_name': 'fake_snap', 'volume': volume, } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(exception.SnapshotIsBusy, self.driver.unmanage_snapshot, snapshot=snapshot) def _test_get_manageable(self, cinder_list, expected_output, vol_name, attached=False, snap_name=None): # common test function for: # [a] get_manageable_volumes # [b] get_manageable_snapshots mock_client = self.setup_driver() mock_client.getVolumes.return_value = { 'members': [ {'name': vol_name, 'sizeMiB': 2048, 'userCPG': 'OpenStackCPG'}]} if attached: mock_client.getVLUN.return_value = { 'hostname': 'cssosbe02-b04', } else: mock_client.getVLUN.side_effect = hpeexceptions.HTTPNotFound if snap_name: mock_client.getSnapshotsOfVolume.return_value = [snap_name] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() if snap_name: actual_output = common.get_manageable_snapshots( cinder_list, None, 1000, 0, ['size'], ['asc']) else: actual_output = self.driver.get_manageable_volumes( cinder_list, None, 1000, 0, ['size'], ['asc']) expected_calls = [] expected_calls.append(mock.call.getVolumes()) if attached: expected_calls.append(mock.call.getVLUN(vol_name)) if snap_name: expected_calls.append( mock.call.getSnapshotsOfVolume('OpenStackCPG', vol_name)) mock_client.assert_has_calls(expected_calls) self.assertEqual(expected_output, actual_output) # (i) volume already managed # (ii) volume currently not managed; but attached to some other host # (iii) volume currently not managed @ddt.data({'cinder_vol': [HPE3PARBaseDriver.volume], 'vol_name': 'osv-0DM4qZEVSKON-DXN-NwVpw', 'safe': False, 'reason': 'Volume already managed', 'cinder_id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}, {'cinder_vol': [], 'vol_name': 'volume_2', 'safe': False, 'reason': 'Volume attached to host cssosbe02-b04', 'cinder_id': None, 'attached': True}, {'cinder_vol': [], 'vol_name': 'volume_2', 'safe': True, 'reason': None, 'cinder_id': None}) @ddt.unpack def test_get_manageable_volumes(self, cinder_vol, vol_name, safe, reason, cinder_id, attached=False): expected_output = [ {'reference': {'name': vol_name}, 'size': 2, 'safe_to_manage': safe, 'reason_not_safe': reason, 'cinder_id': cinder_id} ] self._test_get_manageable(cinder_vol, expected_output, vol_name, attached) # (i) snapshot already managed # (ii) snapshot currently not managed @ddt.data({'cinder_snapshot': [HPE3PARBaseDriver.snapshot_obj], 'snap_name': 'oss-L4I73ONuTci9Fd4ceij-MQ', 'vol_name': 'osv-CX7Ilh.dQ2.XdNpmqW408A', 'safe': False, 'reason': 'Snapshot already managed', 'cinder_id': '2f823bdc-e36e-4dc8-bd15-de1c7a28ff31'}, {'cinder_snapshot': [], 'snap_name': 'snap_2', 'vol_name': 'volume_2', 'safe': True, 'reason': None, 'cinder_id': None}) @ddt.unpack def test_get_manageable_snapshots(self, cinder_snapshot, snap_name, vol_name, safe, reason, cinder_id): expected_output = [ {'reference': {'name': snap_name}, 'size': 2, 'safe_to_manage': safe, 'reason_not_safe': reason, 'cinder_id': cinder_id, 'source_reference': {'name': vol_name}} ] self._test_get_manageable(cinder_snapshot, expected_output, vol_name, False, snap_name) @ddt.data(True, False) def test__safe_hostname(self, in_shared): config = self._set_unique_fqdn_override(True, in_shared) my_connector = self.connector.copy() my_connector['host'] = "abc123abc123abc123abc123abc123abc123" fixed_hostname = "abc123abc123abc123abc123abc123a" mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() safe_host = common._safe_hostname(my_connector, config) self.assertEqual(fixed_hostname, safe_host) @ddt.data(True, False) def test__safe_hostname_unique(self, in_shared): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() config = self._set_unique_fqdn_override(False, in_shared) my_connector = self.connector.copy() my_connector['host'] = "abc123abc123abc123abc123abc123abc123" my_connector['initiator'] = 'iqn.1993-08.org.debian:01:222:12345' ret_name = '54321-222-10-naibed.gro.80-3991' safe_host = common._safe_hostname(my_connector, config) self.assertEqual(ret_name, safe_host) @ddt.data(True, False) def test__safe_hostname_unique_without_initiator(self, in_shared): fixed_hostname = "abc123abc123abc123abc123abc123a" mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() conf = self._set_unique_fqdn_override(False, in_shared) my_connector = self.connector.copy() del my_connector['initiator'] my_connector['host'] = "abc123abc123abc123abc123abc123abc123" safe_host = common._safe_hostname(my_connector, conf) self.assertEqual(fixed_hostname, safe_host) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} comment = Comment({ 'group_id': self.GROUP_ID }) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a group group = self.fake_group_object() self.driver.create_group(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=comment)] mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_with_replication_enabled(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) comment = Comment({ 'group_id': self.GROUP_ID }) mock_client.getRemoteCopyGroup.return_value = ( {'volumes': []}) with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a group group = self.fake_group_object() group.is_replicated = True group.volume_types = [self.volume_type_tiramisu] backend_id = self.replication_targets[0]['backend_id'] exp_model_update = {'status': fields.GroupStatus.AVAILABLE, 'replication_status': fields.ReplicationStatus.ENABLED} model_update = \ self.driver.create_group(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG), mock.call.createRemoteCopyGroup( self.RCG_3PAR_GROUP_NAME, [{'targetName': backend_id, 'mode': SYNC_MODE}], {}), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=comment)] mock_client.assert_has_calls(expected) self.assertEqual(exp_model_update, model_update) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_empty_group_with_replication_enabled(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} mock_client.getRemoteCopyGroup.return_value = ( {'volumes': []}) # create a consistency group group = self.fake_group_object() group.is_replicated = True group.volume_types = [self.volume_type_tiramisu] group.status = fields.GroupStatus.DELETING self.driver.delete_group(context.get_admin_context(), group, []) expected = [ mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME), mock.call.removeRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.deleteVolumeSet( self.CONSIS_GROUP_NAME)] mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_with_replication_enabled(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) exp_volume_model_updates = [{'id': self.volume['id'], 'status': 'deleted'}] with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} mock_client.getRemoteCopyGroup.return_value = ( {'volumes': [{'name': self.VOLUME_3PAR_NAME}]}) # create a consistency group group = self.fake_group_object() group.is_replicated = True group.volume_types = [self.volume_type_tiramisu] group.status = fields.GroupStatus.DELETING model_update, volume_model_updates = ( self.driver.delete_group(context.get_admin_context(), group, [self.volume])) expected = [ mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME), mock.call.removeVolumeFromRemoteCopyGroup( self.RCG_3PAR_GROUP_NAME, self.VOLUME_3PAR_NAME, removeFromTarget=True), mock.call.removeRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.deleteVolumeSet( self.CONSIS_GROUP_NAME), mock.call.deleteVolume(self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls(expected) self.assertEqual(exp_volume_model_updates, volume_model_updates) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_enable_group_replication(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getVolumeSet.return_value = ( {'name': self.CONSIS_GROUP_NAME}) with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client group = self.fake_group_object() group.is_replicated = True group.volume_types = [self.volume_type_tiramisu] self.driver.enable_replication(context.get_admin_context(), group, [self.volume]) expected = [ mock.call.getVolumeSet(self.CONSIS_GROUP_NAME), mock.call.startRemoteCopy(self.RCG_3PAR_GROUP_NAME)] mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_disable_group_replication(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getVolumeSet.return_value = ( {'name': self.CONSIS_GROUP_NAME}) with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client group = self.fake_group_object() group.is_replicated = True group.volume_types = [self.volume_type_tiramisu] self.driver.disable_replication(context.get_admin_context(), group, [self.volume]) expected = [ mock.call.getVolumeSet(self.CONSIS_GROUP_NAME), mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME)] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_failover_replication_on_group(self, _mock_volume_types): # periodic vs. sync is not relevant when conducting a failover. We # will just use periodic. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'hpe3par:group_replication': ' True', 'volume_type': self.volume_type_tiramisu}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client valid_backend_id = ( self.replication_targets[0]['backend_id']) # create a group group = self.fake_group_object() group.is_replicated = True group.volume_types = [self.volume_type_tiramisu] volumes = [self.volume_tiramisu] expected_model = [{'id': self.VOLUME_ID, 'replication_status': fields.ReplicationStatus.FAILED_OVER, 'provider_location': self.CLIENT_ID, 'replication_driver_data': self.REPLICATION_CLIENT_ID}] exp_model_update = { 'replication_status': fields.ReplicationStatus.FAILED_OVER} model_update, return_model = self.driver.failover_replication( context.get_admin_context(), group, volumes, valid_backend_id) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertEqual(expected_model, return_model) self.assertEqual(exp_model_update, model_update) @mock.patch.object(volume_types, 'get_volume_type') def test_failback_replication_on_group(self, _mock_volume_types): # Managed vs. unmanaged and periodic vs. sync are not relevant when # failing back a volume. # We will use managed and periodic as the default. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'hpe3par:group_replication': ' True', 'volume_type': self.volume_type_tiramisu}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client volume = self.volume_tiramisu.copy() volume['replication_status'] = 'failed-over' volume['replication_driver_data'] = self.REPLICATION_CLIENT_ID # create a group group = self.fake_group_object() group.is_replicated = True group.volume_types = [self.volume_type_tiramisu] expected_model = [{'id': self.VOLUME_ID, 'replication_status': fields.ReplicationStatus.ENABLED, 'provider_location': self.CLIENT_ID, 'replication_driver_data': self.REPLICATION_CLIENT_ID}] exp_model_update = { 'replication_status': fields.ReplicationStatus.ENABLED} model_update, return_model = self.driver.failover_replication( context.get_admin_context(), group, [volume], 'default') self.assertEqual(expected_model, return_model) self.assertEqual(exp_model_update, model_update) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type') def test_update_replication_enabled_group_add_vol(self, _mock_volume_types, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getCPG.return_value = {'domain': None} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'hpe3par:group_replication': ' True', 'volume_type': self.volume_type_tiramisu}} mock_client.getCPG.return_value = {'domain': None} mock_client.getRemoteCopyGroup.return_value = ( {'volumes': [{'name': self.VOLUME_3PAR_NAME}]}) with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client # create a group group = self.fake_group_object() group.is_replicated = True group.replication_status = fields.ReplicationStatus.ENABLED group.volume_types = [self.volume_type_tiramisu] exp_add_volume = [{'id': self.volume_tiramisu['id'], 'replication_status': fields.ReplicationStatus.ENABLED}] # add a volume to the consistency group model_update, add_volume, remove_volume = \ self.driver.update_group(context.get_admin_context(), group, add_volumes=[self.volume_tiramisu], remove_volumes=[]) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_GROUP_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': 'target'}], optional={'volumeAutoCreation': True}), mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_3PAR_NAME), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.startRemoteCopy(self.RCG_3PAR_GROUP_NAME)] mock_client.assert_has_calls(expected) self.assertEqual(exp_add_volume, add_volume) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type') def test_update_repl_group_add_periodic_vol(self, _mock_volume_types, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getCPG.return_value = {'domain': None} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': 300, 'hpe3par:group_replication': ' True', 'volume_type': self.volume_type_tiramisu}} mock_client.getCPG.return_value = {'domain': None} mock_client.getRemoteCopyGroup.return_value = ( {'volumes': [{'name': self.VOLUME_3PAR_NAME}], 'targets': [{'syncPeriod': 0}]}) with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client # create a group group = self.fake_group_object() group.is_replicated = True group.replication_status = fields.ReplicationStatus.ENABLED group.volume_types = [self.volume_type_tiramisu] exp_add_volume = [{'id': self.volume_tiramisu['id'], 'replication_status': fields.ReplicationStatus.ENABLED}] # add a volume to the consistency group model_update, add_volume, remove_volume = \ self.driver.update_group(context.get_admin_context(), group, add_volumes=[self.volume_tiramisu], remove_volumes=[]) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_GROUP_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': 'target'}], optional={'volumeAutoCreation': True}), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.modifyRemoteCopyGroup( self.RCG_3PAR_GROUP_NAME, {'targets': [ {'syncPeriod': 300, 'targetName': 'target'}]}), mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_3PAR_NAME), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.startRemoteCopy(self.RCG_3PAR_GROUP_NAME)] mock_client.assert_has_calls(expected) self.assertEqual(exp_add_volume, add_volume) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type') def test_update_replication_enabled_group_remove_vol( self, _mock_volume_types, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getCPG.return_value = {'domain': None} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'hpe3par:group_replication': ' True', 'volume_type': self.volume_type_tiramisu}} mock_client.getCPG.return_value = {'domain': None} mock_client.getRemoteCopyGroup.return_value = ( {'volumes': [{'name': self.VOLUME_3PAR_NAME}]}) with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client # create a group group = self.fake_group_object() group.is_replicated = True group.replication_status = fields.ReplicationStatus.ENABLED group.volume_types = [self.volume_type_tiramisu] exp_remove_volume = [{'id': self.volume_tiramisu['id'], 'replication_status': None}] # add a volume to the consistency group model_update, add_volume, remove_volume = \ self.driver.update_group(context.get_admin_context(), group, add_volumes=[], remove_volumes=[self.volume_tiramisu]) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME), mock.call.removeVolumeFromRemoteCopyGroup( self.RCG_3PAR_GROUP_NAME, self.VOLUME_3PAR_NAME, removeFromTarget=True), mock.call.removeVolumeFromVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_3PAR_NAME), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.startRemoteCopy(self.RCG_3PAR_GROUP_NAME)] mock_client.assert_has_calls(expected) self.assertEqual(exp_remove_volume, remove_volume) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_with_tiramisu_volume_type(self, _mock_volume_types, vol_ss_enable): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getCPG.return_value = {'domain': None} _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'hpe3par:group_replication': ' True', 'replication:sync_period': '900', 'volume_type': self.volume_type_tiramisu}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.create_volume(self.volume_replicated) expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, { 'comment': mock.ANY, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP})] mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_with_tiramisu_volume_type_and_added_in_group( self, _mock_volume_types, vol_ss_enable): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getCPG.return_value = {'domain': None} mock_client.getRemoteCopyGroup.return_value = ( {'volumes': [{'name': self.VOLUME_3PAR_NAME}]}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'hpe3par:group_replication': ' True', 'volume_type': self.volume_type_tiramisu}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_create_client.return_value = mock_client group = self.fake_group_object() group.is_replicated = True group.replication_status = fields.ReplicationStatus.ENABLED group.volume_types = [self.volume_type_tiramisu] volume = self.volume_tiramisu.copy() volume['group'] = group volume['group_id'] = group.id return_model = self.driver.create_volume(volume) expected = [ mock.call.createVolume( self.VOLUME_3PAR_NAME, HPE3PAR_CPG, 2048, {'comment': mock.ANY, 'tpvv': True, 'tdvv': False, 'snapCPG': HPE3PAR_CPG_SNAP}), mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_GROUP_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': 'target'}], optional={'volumeAutoCreation': True}), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.startRemoteCopy(self.RCG_3PAR_GROUP_NAME), mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_3PAR_NAME)] mock_client.assert_has_calls(expected) self.assertEqual(return_model['replication_status'], 'enabled') @mock.patch.object(volume_types, 'get_volume_type') def test_revert_to_snapshot_of_volume_in_group(self, _mock_volume_types): _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'hpe3par:group_replication': ' True', 'volume_type': self.volume_type_replicated}} mock_client = self.setup_driver() mock_client.isOnlinePhysicalCopy.return_value = False mock_client.getStorageSystemInfo.return_value = mock.ANY mock_client.promoteVirtualCopy.return_value = {'taskid': 1} mock_client.getTask.return_value = {'status': 1} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = self.volume_tiramisu.copy() group = self.fake_group_object() group.is_replicated = True group.replication_status = fields.ReplicationStatus.ENABLED group.volume_types = [self.volume_type_tiramisu] volume['group'] = group volume['migration_status'] = None self.driver.revert_to_snapshot( self.ctxt, volume, self.snapshot) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME), mock.call.isOnlinePhysicalCopy(self.VOLUME_3PAR_NAME), mock.call.promoteVirtualCopy( 'oss-L4I73ONuTci9Fd4ceij-MQ', optional={'allowRemoteCopyParent': True}), mock.call.getTask(1), mock.call.startRemoteCopy(self.RCG_3PAR_GROUP_NAME) ] mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_failover_host_with_group_exist(self, _mock_volume_types): # periodic vs. sync is not relevant when conducting a failover. We # will just use periodic. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'hpe3par:group_replication': ' True', 'volume_type': self.volume_type_tiramisu}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client valid_backend_id = ( self.replication_targets[0]['backend_id']) # create a group group = self.fake_group_object() group.is_replicated = True group.volume_types = [self.volume_type_tiramisu] volume = self.volume_tiramisu.copy() volume['group'] = group volume['group_id'] = group.id group_model = { 'group_id': group.id, 'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}} expected_model = (self.REPLICATION_BACKEND_ID, [{'updates': { 'id': self.VOLUME_ID, 'replication_status': fields.ReplicationStatus.FAILED_OVER, 'provider_location': self.CLIENT_ID, 'replication_driver_data': self.REPLICATION_CLIENT_ID}, 'volume_id': self.VOLUME_ID}], [group_model]) return_model = self.driver.failover_host( context.get_admin_context(), [volume], valid_backend_id, [group]) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertEqual(expected_model, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_replication_failback_with_group_exist(self, _mock_volume_types): conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'hpe3par:group_replication': ' True', 'volume_type': self.volume_type_tiramisu}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client # create a group group = self.fake_group_object() group.is_replicated = True group.volume_types = [self.volume_type_tiramisu] volume = self.volume_tiramisu.copy() volume['group'] = group volume['group_id'] = group.id volume['replication_status'] = 'failed-over' volume['replication_driver_data'] = self.REPLICATION_CLIENT_ID group_model = { 'group_id': group.id, 'updates': {'replication_status': fields.ReplicationStatus.ENABLED}} expected_model = (None, [{'updates': {'id': self.VOLUME_ID, 'replication_status': fields.ReplicationStatus.ENABLED, 'provider_location': self.CLIENT_ID, 'replication_driver_data': self.REPLICATION_CLIENT_ID}, 'volume_id': self.VOLUME_ID}], [group_model]) return_model = self.driver.failover_host( context.get_admin_context(), [volume], 'default', [group]) self.assertEqual(expected_model, return_model) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'get_volume_settings_from_type') @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'create_group') @mock.patch.object(volume_types, 'get_volume_type') def test_create_group_with_replication_from_src_group( self, _mock_type, _src_group, cg_ss_enable, vol_ss_enable, typ_info): cg_ss_enable.return_value = True vol_ss_enable.return_value = True conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getRemoteCopyGroup.return_value = ( {'volumes': [{'name': self.VOLUME_3PAR_NAME}]}) mock_client.getCPG.return_value = {'domain': None} task_id = 1 mock_client.copyVolume.return_value = {'taskid': task_id} mock_client.getTask.return_value = {'status': 1} type_info = {'cpg': 'OpenStackCPG', 'tpvv': True, 'tdvv': False, 'snap_cpg': 'OpenStackCPG', 'hpe3par_keys': {'group_replication': ' True'}} _mock_type.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'hpe3par:group_replication': ' True', 'volume_type': self.volume_type_tiramisu}} typ_info.return_value = type_info source_volume = self.volume_src_cg volume = self.volume_tiramisu.copy() volume['source_volid'] = source_volume['id'] common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(volume.get('id')) mock_client.getVolume.return_value = {'copyOf': vol_name} group_snap_optional = ( {'expirationHours': 1}) with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client group = self.fake_group_object() _src_group.return_value = group group.is_replicated = True source_grp = self.fake_group_object( grp_id=self.SRC_CONSIS_GROUP_ID) source_grp.is_replicated = True expected = [ mock.call.createSnapshotOfVolumeSet( mock.ANY, self.SRC_CONSIS_GROUP_NAME, optional=group_snap_optional), mock.call.stopRemoteCopy(self.RCG_3PAR_GROUP_NAME), mock.call.getVolume(mock.ANY), mock.call.copyVolume( mock.ANY, self.VOLUME_NAME_3PAR, HPE3PAR_CPG, {'snapCPG': HPE3PAR_CPG, 'online': True, 'tpvv': mock.ANY, 'tdvv': mock.ANY}), mock.call.getTask(task_id), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.addVolumeToRemoteCopyGroup( self.RCG_3PAR_GROUP_NAME, self.VOLUME_3PAR_NAME, [{'secVolumeName': self.VOLUME_3PAR_NAME, 'targetName': 'target'}], optional={'volumeAutoCreation': True}), mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR), mock.call.getRemoteCopyGroup(self.RCG_3PAR_GROUP_NAME), mock.call.startRemoteCopy(self.RCG_3PAR_GROUP_NAME)] # Create a consistency group from a source consistency group. self.driver.create_group_from_src( context.get_admin_context(), group, [volume], source_group=source_grp, source_vols=[source_volume]) mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'get_volume_settings_from_type') @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_from_src(self, cg_ss_enable, vol_ss_enable, typ_info): cg_ss_enable.return_value = True vol_ss_enable.return_value = True mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} type_info = {'cpg': 'OpenStackCPG', 'tpvv': True, 'tdvv': False, 'snap_cpg': 'OpenStackCPG', 'hpe3par_keys': {}} typ_info.return_value = type_info source_volume = self.volume_src_cg volume = self.fake_volume_object(source_volid=source_volume['id']) common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(volume.id) mock_client.getVolume.return_value = {'copyOf': vol_name} group_snap_comment = Comment({ "group_id": "6044fedf-c889-4752-900f-2039d247a5df", "description": "group_snapshot", "group_snapshot_id": "e91c5ed5-daee-4e84-8724-1c9e31e7a1f2", }) group_snap_optional = ( {'comment': group_snap_comment, 'readOnly': False}) group_comment = Comment({ 'group_id': self.GROUP_ID }) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_group_object() self.driver.create_group(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=group_comment)] mock_client.assert_has_calls(expected) mock_client.reset_mock() # add a volume to the consistency group self.driver.update_group(context.get_admin_context(), group, add_volumes=[volume], remove_volumes=[]) expected = [ mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls(expected) mock_client.reset_mock() # create a snapshot of the consistency group grp_snapshot = self.fake_group_snapshot_object() self.driver.create_group_snapshot(context.get_admin_context(), grp_snapshot, []) expected = [ mock.call.createSnapshotOfVolumeSet( self.CGSNAPSHOT_BASE_NAME + "-@count@", self.CONSIS_GROUP_NAME, optional=group_snap_optional)] # create a consistency group from the cgsnapshot self.driver.create_group_from_src( context.get_admin_context(), group, [volume], group_snapshot=grp_snapshot, snapshots=[self.snapshot]) mock_client.assert_has_calls(expected) # (i) wsapi version is old/default # (ii) wsapi version is 2023, then snapCPG isn't required @ddt.data({'wsapi_version': None}, {'wsapi_version': HPE3PARBaseDriver.wsapi_version_2023}) @ddt.unpack @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'get_volume_settings_from_type') @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_from_src_group(self, cg_ss_enable, vol_ss_enable, typ_info, wsapi_version): cg_ss_enable.return_value = True vol_ss_enable.return_value = True mock_client = self.setup_driver(wsapi_version=wsapi_version) task_id = 1 mock_client.copyVolume.return_value = {'taskid': task_id} mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} type_info = {'cpg': 'OpenStackCPG', 'tpvv': True, 'tdvv': False, 'snap_cpg': 'OpenStackCPG', 'hpe3par_keys': {}} typ_info.return_value = type_info source_volume = self.volume_src_cg volume = self.fake_volume_object(source_volid=source_volume['id']) common = hpecommon.HPE3PARCommon(None) vol_name = common._get_3par_vol_name(volume.id) mock_client.getVolume.return_value = {'copyOf': vol_name} group_snap_optional = ( {'expirationHours': 1}) group_comment = Comment({ 'group_id': self.GROUP_ID }) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} group = self.fake_group_object() source_grp = self.fake_group_object( grp_id=self.SRC_CONSIS_GROUP_ID) optional = {'online': True, 'tpvv': mock.ANY, 'tdvv': mock.ANY} if not wsapi_version: optional['snapCPG'] = HPE3PAR_CPG expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=group_comment), mock.call.createSnapshotOfVolumeSet( mock.ANY, self.SRC_CONSIS_GROUP_NAME, optional=group_snap_optional), mock.call.getVolume(mock.ANY), mock.call.copyVolume( mock.ANY, self.VOLUME_NAME_3PAR, HPE3PAR_CPG, optional), mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] # Create a consistency group from a source consistency group. if not wsapi_version: # (i) old/default self.driver.create_group_from_src( context.get_admin_context(), group, [volume], source_group=source_grp, source_vols=[source_volume]) else: # (ii) wsapi 2023 common = self.driver._login() common.create_group_from_src( context.get_admin_context(), group, [volume], source_group=source_grp, source_vols=[source_volume]) mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} comment = Comment({ 'group_id': self.GROUP_ID }) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_group_object() self.driver.create_group(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=comment)] mock_client.assert_has_calls(expected) mock_client.reset_mock() # remove the consistency group group.status = fields.GroupStatus.DELETING self.driver.delete_group(context.get_admin_context(), group, []) expected = [ mock.call.deleteVolumeSet( self.CONSIS_GROUP_NAME)] mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_exceptions(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_group_object() volume = fake_volume.fake_volume_obj(context.get_admin_context()) self.driver.create_group(context.get_admin_context(), group) # remove the consistency group group.status = fields.GroupStatus.DELETING # mock HTTPConflict in delete volume set mock_client.deleteVolumeSet.side_effect = ( hpeexceptions.HTTPConflict()) # no exception should escape method self.driver.delete_group(context.get_admin_context(), group, []) # mock HTTPNotFound in delete volume set mock_client.deleteVolumeSet.side_effect = ( hpeexceptions.HTTPNotFound()) # no exception should escape method self.driver.delete_group(context.get_admin_context(), group, []) # mock HTTPConflict in delete volume mock_client.deleteVolume.side_effect = ( hpeexceptions.HTTPConflict()) # no exception should escape method self.driver.delete_group(context.get_admin_context(), group, [volume]) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_update_group_add_vol(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} volume = self.fake_volume_object() comment = Comment({ 'group_id': self.GROUP_ID }) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_group_object() self.driver.create_group(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=comment)] mock_client.assert_has_calls(expected) mock_client.reset_mock() # add a volume to the consistency group self.driver.update_group(context.get_admin_context(), group, add_volumes=[volume], remove_volumes=[]) expected = [ mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_update_group_remove_vol(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} volume = self.fake_volume_object() comment = Comment({ 'group_id': self.GROUP_ID }) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_group_object() self.driver.create_group(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=comment)] mock_client.assert_has_calls(expected) mock_client.reset_mock() # add a volume to the consistency group self.driver.update_group(context.get_admin_context(), group, add_volumes=[volume], remove_volumes=[]) expected = [ mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls(expected) mock_client.reset_mock() # remove the volume from the consistency group self.driver.update_group(context.get_admin_context(), group, add_volumes=[], remove_volumes=[volume]) expected = [ mock.call.removeVolumeFromVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_snapshot(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} volume = self.fake_volume_object() cg_comment = Comment({ 'group_id': self.GROUP_ID }) group_snap_comment = Comment({ "group_id": "6044fedf-c889-4752-900f-2039d247a5df", "description": "group_snapshot", "group_snapshot_id": "e91c5ed5-daee-4e84-8724-1c9e31e7a1f2"}) cgsnap_optional = ( {'comment': group_snap_comment, 'readOnly': False}) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_group_object() self.driver.create_group(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=cg_comment)] mock_client.assert_has_calls(expected) mock_client.reset_mock() # add a volume to the consistency group self.driver.update_group(context.get_admin_context(), group, add_volumes=[volume], remove_volumes=[]) expected = [ mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls(expected) mock_client.reset_mock() # create a snapshot of the consistency group group_snapshot = self.fake_group_snapshot_object() self.driver.create_group_snapshot(context.get_admin_context(), group_snapshot, []) expected = [ mock.call.createSnapshotOfVolumeSet( self.CGSNAPSHOT_BASE_NAME + "-@count@", self.CONSIS_GROUP_NAME, optional=cgsnap_optional)] mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.' 'is_volume_group_snap_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_snapshot(self, cg_ss_enable, vol_ss_enable): cg_ss_enable.return_value = True vol_ss_enable.return_value = True mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} volume = self.fake_volume_object() group_snapshot = self.fake_group_snapshot_object() cg_comment = Comment({ 'group_id': self.GROUP_ID }) group_snap_comment = Comment({ "group_id": "6044fedf-c889-4752-900f-2039d247a5df", "description": "group_snapshot", "group_snapshot_id": "e91c5ed5-daee-4e84-8724-1c9e31e7a1f2"}) group_snap_optional = {'comment': group_snap_comment, 'readOnly': False} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client mock_client.getCPG.return_value = {'domain': None} # create a consistency group group = self.fake_group_object() self.driver.create_group(context.get_admin_context(), group) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.createVolumeSet( self.CONSIS_GROUP_NAME, domain=None, comment=cg_comment)] mock_client.assert_has_calls(expected) mock_client.reset_mock() # add a volume to the consistency group self.driver.update_group(context.get_admin_context(), group, add_volumes=[volume], remove_volumes=[]) expected = [ mock.call.addVolumeToVolumeSet( self.CONSIS_GROUP_NAME, self.VOLUME_NAME_3PAR)] mock_client.assert_has_calls(expected) mock_client.reset_mock() # create a snapshot of the consistency group self.driver.create_group_snapshot(context.get_admin_context(), group_snapshot, []) expected = [ mock.call.createSnapshotOfVolumeSet( self.CGSNAPSHOT_BASE_NAME + "-@count@", self.CONSIS_GROUP_NAME, optional=group_snap_optional)] # delete the snapshot of the consistency group group_snapshot.status = fields.GroupSnapshotStatus.DELETING self.driver.delete_group_snapshot(context.get_admin_context(), group_snapshot, []) mock_client.assert_has_calls(expected) @mock.patch.object(volume_types, 'get_volume_type') def test_failover_host(self, _mock_volume_types): # periodic vs. sync is not relevant when conducting a failover. We # will just use periodic. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client valid_backend_id = ( self.replication_targets[0]['backend_id']) invalid_backend_id = 'INVALID' volumes = [self.volume_replicated] # Test invalid secondary target. self.assertRaises( exception.InvalidReplicationTarget, self.driver.failover_host, context.get_admin_context(), volumes, invalid_backend_id) # Test no secondary target. self.assertRaises( exception.InvalidReplicationTarget, self.driver.failover_host, context.get_admin_context(), volumes, None) # Test a successful failover. expected_model = (self.REPLICATION_BACKEND_ID, [{'updates': {'replication_status': 'failed-over', 'replication_driver_data': self.REPLICATION_CLIENT_ID}, 'volume_id': self.VOLUME_ID}], []) return_model = self.driver.failover_host( context.get_admin_context(), volumes, valid_backend_id) expected = [ mock.call.stopRemoteCopy(self.RCG_3PAR_NAME)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertEqual(expected_model, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_replication_failback_ready(self, _mock_volume_types): # Managed vs. unmanaged and periodic vs. sync are not relevant when # failing back a volume. # We will use managed and periodic as the default. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client # Test a successful fail-back. volume = copy.deepcopy(self.volume_replicated) volume['replication_status'] = 'failed-over' return_model = self.driver.failover_host( context.get_admin_context(), [volume], 'default') expected_model = (None, [{'updates': {'replication_status': 'available', 'replication_driver_data': self.CLIENT_ID}, 'volume_id': self.VOLUME_ID}], []) self.assertEqual(expected_model, return_model) @mock.patch.object(volume_types, 'get_volume_type') def test_replication_failback_not_ready(self, _mock_volume_types): # Managed vs. unmanaged and periodic vs. sync are not relevant when # failing back a volume. # We will use managed and periodic as the default. conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'periodic', 'replication:sync_period': '900', 'volume_type': self.volume_type_replicated}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_client.getRemoteCopyGroup.side_effect = ( exception.VolumeBackendAPIException( "Error: Remote Copy Group not Ready.")) mock_replication_client.return_value = mock_replicated_client # Test an unsuccessful fail-back. volume = copy.deepcopy(self.volume_replicated) volume['replication_status'] = 'failed-over' self.assertRaises( exception.InvalidReplicationTarget, self.driver.failover_host, context.get_admin_context(), [volume], 'default') def test_get_pool_with_existing_volume(self): mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client actual_cpg = self.driver.get_pool(self.volume) expected_cpg = HPE3PAR_CPG expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME) ] mock_client.assert_has_calls(expected) self.assertEqual(expected_cpg, actual_cpg) def test_get_pool_with_non_existing_volume(self): mock_client = self.setup_driver() mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME) ] try: self.assertRaises( hpeexceptions.HTTPNotFound, self.driver.get_pool, self.volume) except exception.InvalidVolume: mock_client.assert_has_calls(expected) def test_driver_login_with_wrong_credential_and_replication_disabled(self): mock_client = self.setup_driver() mock_client.login.side_effect = hpeexceptions.HTTPUnauthorized with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client expected = [ mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS) ] self.assertRaises( exception.InvalidInput, self.driver._login) mock_client.assert_has_calls(expected) def test_driver_login_with_wrong_credential_and_replication_enabled(self): conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'periodic' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_replicated_client = self.setup_driver(config=conf) mock_client.login.side_effect = hpeexceptions.HTTPUnauthorized with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client expected = [ mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS) ] common = self.driver._login() mock_client.assert_has_calls(expected) self.assertTrue(common._replication_enabled) def test_init_vendor_properties(self): conf = self.setup_configuration() mock_client = self.setup_driver(config=conf) with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client with mock.patch.object(self.driver, 'get_volume_stats') as stats: stats.return_value = {} # calling vendor properties from driver. self.driver.init_capabilities() # calling vendor properties from specific 3par driver. properties, vendor_name = self.driver._init_vendor_properties() for key in self.driver.capabilities['properties']: new_key = key.replace('_', ':') if 'HP:3PAR' in new_key: self.assertIn(new_key, properties) @mock.patch.object(volume_types, 'get_volume_type') def test_rename_migrated_vvset(self, _mock_volume_types): # Setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client dest_volume = {'name': self.VOLUME_NAME, 'id': self.VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': self.FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': None} src_volume = {'name': self.SRC_CG_VOLUME_NAME, 'id': self.SRC_CG_VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': self.FAKE_CINDER_HOST, 'volume_type': None, 'volume_type_id': None} vvs_name_dest = 'vvs-0DM4qZEVSKON-DXN-NwVpw' vvs_name_src = 'vvs-vSHRG8dlTGiJbGsH9jz8tg' vvs_name_temp = 'tos-vSHRG8dlTGiJbGsH9jz8tg' common = self.driver._login() return_model = common._rename_migrated_vvset(src_volume, dest_volume) expected = [ mock.call.modifyVolumeSet( vvs_name_dest, newName=vvs_name_temp), mock.call.modifyVolumeSet( vvs_name_src, newName=vvs_name_dest), mock.call.modifyVolumeSet( vvs_name_temp, newName=vvs_name_src)] mock_client.assert_has_calls(expected) self.assertIsNone(return_model) @ddt.ddt class TestHPE3PARFCDriver(HPE3PARBaseDriver): properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'encrypted': False, 'target_lun': 90, 'target_wwn': ['0987654321234', '123456789000987'], 'target_discovered': True, 'initiator_target_map': {'123456789012345': ['0987654321234', '123456789000987'], '123456789054321': ['0987654321234', '123456789000987'], }}} def setup_driver(self, config=None, mock_conf=None, wsapi_version=None): self.ctxt = context.get_admin_context() mock_client = self.setup_mock_client( conf=config, m_conf=mock_conf, driver=hpefcdriver.HPE3PARFCDriver) if wsapi_version: mock_client.getWsApiVersion.return_value = ( wsapi_version) else: mock_client.getWsApiVersion.return_value = ( self.wsapi_version_latest) expected = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) mock_client.reset_mock() return mock_client @ddt.data('volume', 'volume_name_id') def test_initialize_connection(self, volume_attr): volume = getattr(self, volume_attr) vol_name = getattr(self, volume_attr.upper() + '_3PAR_NAME') # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 7, 'slot': 1}, 'vendor': None, 'wwn': self.wwn[0]}, {'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 6, 'slot': 1}, 'vendor': None, 'wwn': self.wwn[1]}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': vol_name, 'remoteName': self.wwn[1], 'lun': 90, 'type': 0}], [{'active': True, 'volumeName': vol_name, 'remoteName': self.wwn[0], 'lun': 90, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': vol_name, 'lun_id': 90, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location expected_properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'encrypted': False, 'target_lun': 90, 'target_wwn': ['0987654321234', '123456789000987'], 'target_discovered': True, 'initiator_target_map': {'123456789012345': ['0987654321234', '123456789000987'], '123456789054321': ['0987654321234', '123456789000987']}}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume(vol_name), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHost(self.FAKE_HOST), mock.call.getPorts(), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( vol_name, auto=True, hostname=self.FAKE_HOST, lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictEqual(expected_properties, result) def test_initialize_connection_single_path(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'vendor': None, 'wwn': self.wwn[0]}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'portPos': {'node': 7, 'slot': 1, 'cardPort': 1}, 'remoteName': self.wwn[0], 'lun': 90, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': 90, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location expected_properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'encrypted': False, 'target_lun': 90, 'target_wwn': ['0987654321234'], 'target_discovered': True, 'initiator_target_map': {'123456789012345': ['0987654321234']}}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( self.volume, self.connector.copy()) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345']), mock.call.getHost(self.FAKE_HOST), mock.call.getPorts(), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertDictEqual(expected_properties, result) @mock.patch('cinder.zonemanager.utils.create_lookup_service') def test_initialize_connection_with_lookup_single_nsp(self, mock_lookup): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client class fake_lookup_object(object): def get_device_mapping_from_network(self, connector, target_wwns): fake_map = { 'FAB_1': { 'target_port_wwn_list': ['0987654321234'], 'initiator_port_wwn_list': ['123456789012345'] } } return fake_map mock_lookup.return_value = fake_lookup_object() mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 1, 'slot': 2}, 'vendor': None, 'wwn': self.wwn[0]}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': 90, 'type': 0, 'portPos': {'cardPort': 1, 'node': 7, 'slot': 1}}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': 90, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location connector = {'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'wwpns': [self.wwn[0]], 'wwnns': ["223456789012345"], 'host': self.FAKE_HOST} expected_properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'encrypted': False, 'target_lun': 90, 'target_wwn': ['0987654321234'], 'target_discovered': True, 'initiator_target_map': {'123456789012345': ['0987654321234'] }}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection(self.volume, connector) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.ANY, mock.call.getHost(self.FAKE_HOST), mock.call.getPorts(), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.getPorts(), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, portPos={'node': 7, 'slot': 1, 'cardPort': 1}, lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertDictEqual(expected_properties, result) def test_initialize_connection_single_path_target_nsp(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() self.driver.configuration.hpe3par_target_nsp = '2:1:2' mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'vendor': None, 'wwn': self.wwn[0]}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'portPos': {'node': 7, 'slot': 1, 'cardPort': 1}, 'remoteName': self.wwn[0], 'lun': 90, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': 90, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location user_target_wwn = '0987654321234' expected_properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'encrypted': False, 'target_lun': 90, 'target_wwn': [user_target_wwn], 'target_discovered': True, 'initiator_target_map': {'123456789012345': [user_target_wwn]}}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( self.volume, self.connector.copy()) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345']), mock.call.getHost(self.FAKE_HOST), mock.call.getPorts(), mock.call.getPorts(), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertDictEqual(expected_properties, result) def test_initialize_connection_encrypted(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'vendor': None, 'wwn': self.wwn[0]}, {'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'vendor': None, 'wwn': self.wwn[1]}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': 90, 'type': 0, 'remoteName': self.wwn[1]}], [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'remoteName': self.wwn[0], 'lun': 90, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': 90, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location expected_properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'encrypted': True, 'target_lun': 90, 'target_wwn': ['0987654321234', '123456789000987'], 'target_discovered': True, 'initiator_target_map': {'123456789012345': ['0987654321234', '123456789000987'], '123456789054321': ['0987654321234', '123456789000987']}}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( self.volume_encrypted, self.connector_multipath_enabled) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHost(self.FAKE_HOST), mock.call.getPorts(), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictEqual(expected_properties, result) @mock.patch.object(volume_types, 'get_volume_type') def test_initialize_connection_peer_persistence(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' self.replication_targets[0]['quorum_witness_ip'] = '10.50.3.192' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'volume_type': self.volume_type_replicated}} mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 7, 'slot': 1}, 'vendor': None, 'wwn': self.wwn[0]}, {'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 6, 'slot': 1}, 'vendor': None, 'wwn': self.wwn[1]}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'remoteName': self.wwn[1], 'lun': 90, 'type': 0}], [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'remoteName': self.wwn[0], 'lun': 90, 'type': 0}]] mock_replicated_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'remoteName': self.wwn[1], 'lun': 80, 'type': 0}], [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'remoteName': self.wwn[0], 'lun': 80, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': 90, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location location_peer = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': 80, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_replicated_client.createVLUN.return_value = location_peer primary_luns = [90, 90] peer_luns = [80, 80] expected_properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'encrypted': False, 'target_wwn': ['0987654321234', '123456789000987', '0987654321234', '123456789000987'], 'target_luns': primary_luns + peer_luns, 'target_discovered': True, 'initiator_target_map': {'123456789012345': ['0987654321234', '123456789000987', '0987654321234', '123456789000987'], '123456789054321': ['0987654321234', '123456789000987', '0987654321234', '123456789000987']}}} with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client volume = copy.deepcopy(self.volume) volume.replication_status = 'enabled' result = self.driver.initialize_connection( volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHost(self.FAKE_HOST), mock.call.getPorts(), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertDictEqual(expected_properties, result) @ddt.data('volume', 'volume_name_id') def test_terminate_connection(self, volume_attr): volume = getattr(self, volume_attr) vol_name = getattr(self, volume_attr.upper() + '_3PAR_NAME') # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() effects = [ [{'active': False, 'volumeName': vol_name, 'lun': None, 'type': 0}], hpeexceptions.HTTPNotFound, hpeexceptions.HTTPNotFound] mock_client.getHostVLUNs.side_effect = effects mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } expected = [ mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( vol_name, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteHost(self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.getPorts()] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client conn_info = self.driver.terminate_connection(volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertIn('data', conn_info) self.assertIn('initiator_target_map', conn_info['data']) mock_client.reset_mock() mock_client.getHostVLUNs.side_effect = effects # mock some deleteHost exceptions that are handled delete_with_vlun = hpeexceptions.HTTPConflict( error={'message': "has exported VLUN"}) delete_with_hostset = hpeexceptions.HTTPConflict( error={'message': "host is a member of a set"}) mock_client.deleteHost = mock.Mock( side_effect=[delete_with_vlun, delete_with_hostset]) conn_info = self.driver.terminate_connection(volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) mock_client.reset_mock() mock_client.getHostVLUNs.side_effect = effects conn_info = self.driver.terminate_connection(volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_force_detach_volume(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVLUNs.return_value = { 'members': [{ 'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'hostname': self.FAKE_HOST, 'lun': None, 'type': 0}]} mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = hpeexceptions.HTTPNotFound expected = [ mock.call.getVLUNs(), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteHost(self.FAKE_HOST)] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.terminate_connection(self.volume, None) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch('cinder.zonemanager.utils.create_lookup_service') def test_terminate_connection_with_lookup(self, mock_lookup): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client class fake_lookup_object(object): def get_device_mapping_from_network(self, connector, target_wwns): fake_map = { 'FAB_1': { 'target_port_wwn_list': ['0987654321234'], 'initiator_port_wwn_list': ['123456789012345'] } } return fake_map mock_lookup.return_value = fake_lookup_object() mock_client = self.setup_driver() effects = [ [{'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}], hpeexceptions.HTTPNotFound, hpeexceptions.HTTPNotFound] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = effects expected = [ mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteHost(self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.getPorts()] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertIn('data', conn_info) self.assertIn('initiator_target_map', conn_info['data']) mock_client.reset_mock() mock_client.getHostVLUNs.side_effect = effects # mock some deleteHost exceptions that are handled delete_with_vlun = hpeexceptions.HTTPConflict( error={'message': "has exported VLUN"}) delete_with_hostset = hpeexceptions.HTTPConflict( error={'message': "host is a member of a set"}) mock_client.deleteHost = mock.Mock( side_effect=[delete_with_vlun, delete_with_hostset]) conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) mock_client.reset_mock() mock_client.getHostVLUNs.side_effect = effects conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_terminate_connection_more_vols(self): mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) # mock more than one vlun on the host (don't even try to remove host) mock_client.getHostVLUNs.return_value = \ [ {'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}, {'active': True, 'volumeName': 'there-is-another-volume', 'remoteName': '123456789012ABC', 'lun': None, 'type': 0}, {'active': True, 'volumeName': 'there-is-another-volume', 'remoteName': '123456789012ABC', 'lun': None, 'type': 0}, ] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } expect_less = [ mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.modifyHost( 'fakehost', {'FCWWNs': ['123456789012345', '123456789054321'], 'pathOperation': self.mock_client_conf['HOST_EDIT_REMOVE']}), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.getPorts()] expect_conn = { 'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': {'123456789012345': ['0987654321234', '123456789000987'], '123456789054321': ['0987654321234', '123456789000987']}, 'target_wwn': ['0987654321234', '123456789000987']}} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client conn_info = self.driver.terminate_connection(self.volume, self.connector) mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expect_less + self.standard_logout) self.assertEqual(expect_conn, conn_info) @mock.patch.object(volume_types, 'get_volume_type') def test_terminate_connection_peer_persistence(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' self.replication_targets[0]['quorum_witness_ip'] = '10.50.3.192' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'volume_type': self.volume_type_replicated}} effects = [ [{'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}], hpeexceptions.HTTPNotFound, hpeexceptions.HTTPNotFound] mock_client.getHostVLUNs.side_effect = effects mock_replicated_client.getHostVLUNs.side_effect = effects getHost_side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 7, 'slot': 1}, 'vendor': None, 'wwn': self.wwn[0]}, {'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 6, 'slot': 1}, 'vendor': None, 'wwn': self.wwn[1]}]}] queryHost_return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHost.side_effect = getHost_side_effect mock_client.queryHost.return_value = queryHost_return_value mock_replicated_client.getHost.side_effect = getHost_side_effect mock_replicated_client.queryHost.return_value = queryHost_return_value expected = [ mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteHost(self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.getPorts()] volume = copy.deepcopy(self.volume) volume.replication_status = 'enabled' with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client conn_info = self.driver.terminate_connection( volume, self.connector_multipath_enabled) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertIn('data', conn_info) self.assertIn('initiator_target_map', conn_info['data']) mock_client.reset_mock() mock_replicated_client.reset_mock() mock_client.getHostVLUNs.side_effect = effects mock_replicated_client.getHostVLUNs.side_effect = effects # mock some deleteHost exceptions that are handled delete_with_vlun = hpeexceptions.HTTPConflict( error={'message': "has exported VLUN"}) delete_with_hostset = hpeexceptions.HTTPConflict( error={'message': "host is a member of a set"}) mock_client.deleteHost = mock.Mock( side_effect=[delete_with_vlun, delete_with_hostset]) conn_info = self.driver.terminate_connection( volume, self.connector_multipath_enabled) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) mock_client.reset_mock() mock_replicated_client.reset_mock() mock_client.getHostVLUNs.side_effect = effects mock_replicated_client.getHostVLUNs.side_effect = effects conn_info = self.driver.terminate_connection( volume, self.connector_multipath_enabled) mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) def test_get_3par_host_from_wwn_iqn(self): mock_client = self.setup_driver() mock_client.getHosts.return_value = { 'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 1, 'slot': 2}, 'vendor': None, 'wwn': '123ab6789012345'}]} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client hostname = mock_client._get_3par_hostname_from_wwn_iqn( wwns=['123AB6789012345', '123CD6789054321'], iqns=None) self.assertIsNotNone(hostname) # (i) wsapi version is old/default # (ii) wsapi version is 2025, then all licenses are enabled @ddt.data({'wsapi_version': None}, {'wsapi_version': HPE3PARBaseDriver.wsapi_version_2025}) @ddt.unpack def test_get_volume_stats1(self, wsapi_version): # setup_mock_client drive with the configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config, wsapi_version=wsapi_version) mock_client.getCPG.return_value = self.cpgs[0] if not wsapi_version: # Purposely left out the Priority Optimization license in # getStorageSystemInfo to test that QoS_support returns False. mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234', 'licenseInfo': { 'licenses': [{'name': 'Remote Copy'}, {'name': 'Thin Provisioning (102400G)'}, {'name': 'System Reporter'}] } } else: mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234', 'licenseInfo': { # all licenses are enabled 'licenses': [{'name': 'FIPS Encryption'}, {'name': 'Owned'}, {'name': 'Software and Support SaaS'}] } } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 79 } stat_capabilities = { THROUGHPUT: 0, BANDWIDTH: 0, LATENCY: 0, IO_SIZE: 0, QUEUE_LENGTH: 0, AVG_BUSY_PERC: 0 } mock_client.getCPGStatData.return_value = stat_capabilities with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() stats = self.driver.get_volume_stats(True) const = 0.0009765625 self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['thick_provisioning_support']) if not wsapi_version: # (i) old/default self.assertFalse(stats['pools'][0]['QoS_support']) else: # (ii) wsapi version 2025 self.assertTrue(stats['pools'][0]['QoS_support']) self.assertEqual(86.0, stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual(100.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(14.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual('up', stats['pools'][0]['backend_state']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertEqual(stat_capabilities[THROUGHPUT], stats['pools'][0][THROUGHPUT]) self.assertEqual(stat_capabilities[BANDWIDTH], stats['pools'][0][BANDWIDTH]) self.assertEqual(stat_capabilities[LATENCY], stats['pools'][0][LATENCY]) self.assertEqual(stat_capabilities[IO_SIZE], stats['pools'][0][IO_SIZE]) self.assertEqual(stat_capabilities[QUEUE_LENGTH], stats['pools'][0][QUEUE_LENGTH]) self.assertEqual(stat_capabilities[AVG_BUSY_PERC], stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGStatData(HPE3PAR_CPG, 'daily', '7d'), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGStatData(HPE3PAR_CPG2, 'daily', '7d'), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls(expected) stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['thick_provisioning_support']) if not wsapi_version: # (i) old/default self.assertFalse(stats['pools'][0]['QoS_support']) else: # (ii) wsapi version 2025 self.assertTrue(stats['pools'][0]['QoS_support']) self.assertEqual(86.0, stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual(100.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(14.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertEqual(stat_capabilities[THROUGHPUT], stats['pools'][0][THROUGHPUT]) self.assertEqual(stat_capabilities[BANDWIDTH], stats['pools'][0][BANDWIDTH]) self.assertEqual(stat_capabilities[LATENCY], stats['pools'][0][LATENCY]) self.assertEqual(stat_capabilities[IO_SIZE], stats['pools'][0][IO_SIZE]) self.assertEqual(stat_capabilities[QUEUE_LENGTH], stats['pools'][0][QUEUE_LENGTH]) self.assertEqual(stat_capabilities[AVG_BUSY_PERC], stats['pools'][0][AVG_BUSY_PERC]) cpg2 = self.cpgs[0].copy() cpg2.update({'SDGrowth': {'limitMiB': 204800}}) mock_client.getCPG.return_value = cpg2 stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['thick_provisioning_support']) if not wsapi_version: # (i) old/default self.assertFalse(stats['pools'][0]['QoS_support']) else: # (ii) wsapi version 2025 self.assertTrue(stats['pools'][0]['QoS_support']) total_capacity_gb = 200 * 1024 * const self.assertEqual(total_capacity_gb, stats['pools'][0]['total_capacity_gb']) free_capacity_gb = 114 self.assertEqual(free_capacity_gb, stats['pools'][0]['free_capacity_gb']) provisioned_capacity_gb = 86 self.assertEqual(provisioned_capacity_gb, stats['pools'][0]['provisioned_capacity_gb']) cap_util = 43.0 self.assertEqual(cap_util, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertEqual(stat_capabilities[THROUGHPUT], stats['pools'][0][THROUGHPUT]) self.assertEqual(stat_capabilities[BANDWIDTH], stats['pools'][0][BANDWIDTH]) self.assertEqual(stat_capabilities[LATENCY], stats['pools'][0][LATENCY]) self.assertEqual(stat_capabilities[IO_SIZE], stats['pools'][0][IO_SIZE]) self.assertEqual(stat_capabilities[QUEUE_LENGTH], stats['pools'][0][QUEUE_LENGTH]) self.assertEqual(stat_capabilities[AVG_BUSY_PERC], stats['pools'][0][AVG_BUSY_PERC]) common.client.deleteCPG(HPE3PAR_CPG) common.client.createCPG(HPE3PAR_CPG, {}) def test_get_volume_stats2(self): # Testing when the API_VERSION is incompatible with getCPGStatData srstatld_api_version = 30201200 pre_srstatld_api_version = srstatld_api_version - 1 wsapi = {'build': pre_srstatld_api_version} config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config, wsapi_version=wsapi) mock_client.getCPG.return_value = self.cpgs[0] # Purposely left out the Thin Provisioning license in # getStorageSystemInfo to test that thin_provisioning_support returns # False. mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234', 'licenseInfo': { 'licenses': [{'name': 'Remote Copy'}, {'name': 'Priority Optimization'}] } } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 79 } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertFalse(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['QoS_support']) self.assertEqual(100.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(14.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertIsNone(stats['pools'][0][THROUGHPUT]) self.assertIsNone(stats['pools'][0][BANDWIDTH]) self.assertIsNone(stats['pools'][0][LATENCY]) self.assertIsNone(stats['pools'][0][IO_SIZE]) self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls(expected) def test_get_volume_stats3(self): # Testing when the client version is incompatible with getCPGStatData # setup_mock_client drive with the configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config, wsapi_version=self.wsapi_version_312) mock_client.getCPG.return_value = self.cpgs[0] mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234' } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 79 } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertEqual(100.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(14.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertIsNone(stats['pools'][0][THROUGHPUT]) self.assertIsNone(stats['pools'][0][BANDWIDTH]) self.assertIsNone(stats['pools'][0][LATENCY]) self.assertIsNone(stats['pools'][0][IO_SIZE]) self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls(expected) def test_get_volume_stats4(self): # Testing get_volume_stats() when System Reporter license is not active # setup_mock_client drive with the configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config) mock_client.getCPG.return_value = self.cpgs[0] # Purposely left out the System Reporter license in # getStorageSystemInfo to test sr_support mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234', 'licenseInfo': { 'licenses': [{'name': 'Remote Copy'}, {'name': 'Priority Optimization'}, {'name': 'Thin Provisioning'}] } } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 79 } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client stats = self.driver.get_volume_stats(True) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['QoS_support']) self.assertEqual(100.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(14.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertIsNone(stats['pools'][0][THROUGHPUT]) self.assertIsNone(stats['pools'][0][BANDWIDTH]) self.assertIsNone(stats['pools'][0][LATENCY]) self.assertIsNone(stats['pools'][0][IO_SIZE]) self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls(expected) def test_get_volume_stats5(self): # Testing get_volume_stats(refresh=False) for cached values config = self.setup_configuration() mock_client = self.setup_driver(config=config) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client stats = self.driver.get_volume_stats(True) cached_stats = self.driver.get_volume_stats(False) self.assertEqual(stats, cached_stats) def test_create_host_with_unmanage_fc_and_manage_iscsi_hosts(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} def get_side_effect(*args): host = {'name': None} if args[0] == 'fake': host['name'] = 'fake' elif args[0] == self.FAKE_HOST: host['name'] = self.FAKE_HOST return host mock_client.getHost.side_effect = get_side_effect mock_client.queryHost.return_value = { 'members': [{ 'name': 'fake' }] } mock_client.getVLUN.return_value = {'lun': 186} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, cpg = self.driver._create_host( common, self.volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHost('fake')] mock_client.assert_has_calls(expected) self.assertEqual('fake', host['name']) self.assertEqual(HPE3PAR_CPG, cpg) def test_create_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST, 'FCPaths': [{'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 1, 'slot': 2}, 'vendor': None, 'wwn': self.wwn[0]}, {'driverVersion': None, 'firmwareVersion': None, 'hostSpeed': 0, 'model': None, 'portPos': {'cardPort': 1, 'node': 0, 'slot': 2}, 'vendor': None, 'wwn': self.wwn[1]}]}] mock_client.queryHost.return_value = None mock_client.getVLUN.return_value = {'lun': 186} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, cpg = self.driver._create_host( common, self.volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.createHost( self.FAKE_HOST, FCWwns=['123456789012345', '123456789054321'], optional={'domain': None, 'persona': 2}), mock.call.getHost(self.FAKE_HOST)] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) def test_create_invalid_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('Host not found.'), { 'name': 'fakehost.foo', 'FCPaths': [{'wwn': '123456789012345'}, { 'wwn': '123456789054321'}]}] mock_client.queryHost.return_value = { 'members': [{ 'name': 'fakehost.foo' }] } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, cpg = self.driver._create_host( common, self.volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost('fakehost'), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHost('fakehost.foo')] mock_client.assert_has_calls(expected) self.assertEqual('fakehost.foo', host['name']) def test_create_host_concurrent(self): # tests concurrent requests of create host # setup_mock_client driver with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.queryHost.side_effect = [ None, {'members': [{'name': self.FAKE_HOST}] }] mock_client.createHost.side_effect = [ hpeexceptions.HTTPConflict( {'code': EXISTENT_PATH, 'desc': 'host WWN/iSCSI name already used by another host'})] mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, cpg = self.driver._create_host( common, self.volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.createHost( self.FAKE_HOST, FCWwns=['123456789012345', '123456789054321'], optional={'domain': None, 'persona': 2}), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.getHost(self.FAKE_HOST)] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) def test_create_modify_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [{ 'name': self.FAKE_HOST, 'FCPaths': []}, {'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789012345'}, { 'wwn': '123456789054321'}]}] mock_client.queryHost.return_value = None with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, cpg = self.driver._create_host( common, self.volume, self.connector_multipath_enabled) fcwwns = ['123456789054321', '123456789012345'] expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost('fakehost'), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.modifyHost('fakehost', {'FCWWNs': mock.ANY, 'pathOperation': 1}), mock.call.getHost('fakehost')] # We don't know what order fcwwns is supplied in. Since # there are only two members, test it both ways. call1 = mock.call('fakehost', {'FCWWNs': fcwwns, 'pathOperation': 1}) fcwwns_rev = list(fcwwns) fcwwns_rev.reverse() call2 = mock.call('fakehost', {'FCWWNs': fcwwns_rev, 'pathOperation': 1}) self.assertTrue(call1 in mock_client.modifyHost.call_args_list or call2 in mock_client.modifyHost.call_args_list) mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual(2, len(host['FCPaths'])) def test_modify_host_with_new_wwn(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} getHost_ret1 = { 'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789054321'}]} getHost_ret2 = { 'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789012345'}, {'wwn': '123456789054321'}]} mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2] mock_client.queryHost.return_value = None with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, cpg = self.driver._create_host( common, self.volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost('fakehost'), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.modifyHost( 'fakehost', { 'FCWWNs': ['123456789012345'], 'pathOperation': 1}), mock.call.getHost('fakehost')] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual(2, len(host['FCPaths'])) def test_modify_host_with_unknown_wwn_and_new_wwn(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} getHost_ret1 = { 'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789054321'}, {'wwn': 'xxxxxxxxxxxxxxx'}]} getHost_ret2 = { 'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789012345'}, {'wwn': '123456789054321'}, {'wwn': 'xxxxxxxxxxxxxxx'}]} mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2] mock_client.queryHost.return_value = None with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, cpg = self.driver._create_host( common, self.volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost('fakehost'), mock.call.queryHost(wwns=['123456789012345', '123456789054321']), mock.call.modifyHost( 'fakehost', { 'FCWWNs': ['123456789012345'], 'pathOperation': 1}), mock.call.getHost('fakehost')] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual(3, len(host['FCPaths'])) @mock.patch.object(volume_types, 'get_volume_type') def test_migrate_fc_volume_attached_to_iscsi_protocol(self, _mock_volume_types): _mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1 mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF) protocol = "iSCSI" volume = {'name': HPE3PARBaseDriver.VOLUME_NAME, 'volume_type_id': None, 'id': HPE3PARBaseDriver.CLONE_ID, 'display_name': 'Foo Volume', 'size': 2, 'status': 'in-use', 'host': HPE3PARBaseDriver.FAKE_HOST, 'source_volid': HPE3PARBaseDriver.VOLUME_ID} loc_info = 'HPE3PARDriver:1234567:CPG-FC1' host = {'host': 'stack@3parfc1', 'capabilities': {'location_info': loc_info, 'storage_protocol': protocol}} result = self.driver.migrate_volume(context.get_admin_context(), volume, host) self.assertIsNotNone(result) self.assertEqual((False, None), result) expected = [] mock_client.assert_has_calls(expected) def test_migrate_volume_attached(self): self.migrate_volume_attached() @ddt.ddt class TestHPE3PARISCSIDriver(HPE3PARBaseDriver): TARGET_IQN = 'iqn.2000-05.com.3pardata:21810002ac00383d' TARGET_LUN = 186 properties = { 'driver_volume_type': 'iscsi', 'data': {'encrypted': False, 'target_discovered': True, 'target_iqn': TARGET_IQN, 'target_lun': TARGET_LUN, 'target_portal': '1.1.1.2:1234'}} multipath_properties = { 'driver_volume_type': 'iscsi', 'data': {'encrypted': False, 'target_discovered': True, 'target_iqns': [TARGET_IQN], 'target_luns': [TARGET_LUN], 'target_portals': ['1.1.1.2:1234']}} multipath_properties_v6 = { 'driver_volume_type': 'iscsi', 'data': {'encrypted': False, 'target_discovered': True, 'target_iqns': [TARGET_IQN], 'target_luns': [TARGET_LUN], 'target_portals': ['[2001:db8:abcd:12:ffff:ffff:ffff:ff02]:3260']}} def setup_driver(self, config=None, mock_conf=None, wsapi_version=None): self.ctxt = context.get_admin_context() mock_client = self.setup_mock_client( conf=config, m_conf=mock_conf, driver=hpedriver.HPE3PARISCSIDriver) if wsapi_version: mock_client.getWsApiVersion.return_value = ( wsapi_version) else: mock_client.getWsApiVersion.return_value = ( self.wsapi_version_latest) expected_get_cpgs = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2)] expected_get_ports = [mock.call.getPorts()] expected_primera_check = [mock.call.is_primera_array()] mock_client.assert_has_calls( self.standard_login + expected_get_cpgs + self.standard_logout + expected_primera_check + self.standard_login + expected_get_ports + self.standard_logout) mock_client.reset_mock() return mock_client def test_iscsi_primera_old(self): # primera 4.0.xx.yyy wsapi_version_primera_old = {'major': 1, 'build': 40000128, 'minor': 8, 'revision': 1} self.assertRaises(NotImplementedError, self.setup_mock_client, driver=hpedriver.HPE3PARISCSIDriver, is_primera=True, wsapi_version=wsapi_version_primera_old) def test_iscsi_primera_new(self, config=None, mock_conf=None): # primera 4.2.xx.yyy wsapi_version_primera_new = {'major': 1, 'build': 40202010, 'minor': 8, 'revision': 1} self.ctxt = context.get_admin_context() mock_client = self.setup_mock_client( conf=config, m_conf=mock_conf, driver=hpedriver.HPE3PARISCSIDriver, is_primera=True, wsapi_version=wsapi_version_primera_new) expected_get_cpgs = [ mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2)] expected_get_ports = [mock.call.getPorts()] expected_primera = [ mock.call.is_primera_array(), mock.call.login(HPE3PAR_USER_NAME, HPE3PAR_USER_PASS), mock.call.getWsApiVersion()] mock_client.assert_has_calls( self.standard_login + expected_get_cpgs + self.standard_logout + expected_primera + self.standard_login + expected_get_ports + self.standard_logout) @ddt.data('volume', 'volume_name_id') def test_initialize_connection(self, volume_attr): volume = getattr(self, volume_attr) vol_name = getattr(self, volume_attr.upper() + '_3PAR_NAME') # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ [{'hostname': self.FAKE_HOST, 'volumeName': vol_name, 'lun': self.TARGET_LUN, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}], [{'active': True, 'volumeName': vol_name, 'lun': self.TARGET_LUN, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': vol_name, 'lun_id': self.TARGET_LUN, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( volume, self.connector) expected = [ mock.call.getVolume(vol_name), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictEqual(self.properties, result) def test_initialize_connection_multipath(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': self.TARGET_LUN, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location mock_client.getiSCSIPorts.return_value = [{ 'IPAddr': '1.1.1.2', 'iSCSIName': self.TARGET_IQN, }] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = copy.deepcopy(self.volume) volume.replication_status = 'disabled' result = self.driver.initialize_connection( volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getiSCSIPorts( state=self.mock_client_conf['PORT_STATE_READY']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, portPos=self.FAKE_ISCSI_PORT['portPos'], lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictEqual(self.multipath_properties, result) def test_initialize_connection_multipath_existing_nsp(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ [{'hostname': self.FAKE_HOST, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}], [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0}]] mock_client.getiSCSIPorts.return_value = [{ 'IPAddr': '1.1.1.2', 'iSCSIName': self.TARGET_IQN, }] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = copy.deepcopy(self.volume) volume.replication_status = 'disabled' result = self.driver.initialize_connection( volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getiSCSIPorts( state=self.mock_client_conf['PORT_STATE_READY']), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictEqual(self.multipath_properties, result) def test_initialize_connection_encrypted(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ [{'hostname': self.FAKE_HOST, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}], [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': self.TARGET_LUN, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client result = self.driver.initialize_connection( self.volume_encrypted, self.connector) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) expected_properties = self.properties.copy() expected_properties['data'] = self.properties['data'].copy() expected_properties['data']['encrypted'] = True self.assertDictEqual(expected_properties, result) @mock.patch.object(volume_types, 'get_volume_type') def test_initialize_connection_peer_persistence(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' self.replication_targets[0]['quorum_witness_ip'] = '10.50.3.192' self.replication_targets[0]['hpe3par_iscsi_ips'] = '1.1.1.2' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'volume_type': self.volume_type_replicated}} mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}]] mock_replicated_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': self.TARGET_LUN, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location mock_replicated_client.createVLUN.return_value = location mock_client.getiSCSIPorts.return_value = [{ 'IPAddr': '1.1.1.2', 'iSCSIName': self.TARGET_IQN, }] with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client volume = copy.deepcopy(self.volume) volume.replication_status = 'enabled' result = self.driver.initialize_connection( volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getiSCSIPorts(state=4), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, portPos=self.FAKE_ISCSI_PORT['portPos'], lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) self.assertDictEqual(self.multipath_properties, result) # iscsi_ip is 1.1.1.2 # two cases: # (i) vlan_ip is different from iscsi_ip # (ii) vlan_ip is same as iscsi_ip @ddt.data({'vlan_ip': '192.168.100.1'}, {'vlan_ip': '1.1.1.2'}) @ddt.unpack def test_initialize_connection_multipath_vlan_ip(self, vlan_ip): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': self.TARGET_LUN, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location mock_client.getiSCSIPorts.return_value = [{ 'IPAddr': '1.1.1.2', 'iSCSIName': self.TARGET_IQN, 'iSCSIVlans': [{'IPAddr': vlan_ip, 'iSCSIName': self.TARGET_IQN}] }] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = copy.deepcopy(self.volume) volume.replication_status = 'disabled' result = self.driver.initialize_connection( volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getiSCSIPorts( state=self.mock_client_conf['PORT_STATE_READY']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, portPos=self.FAKE_ISCSI_PORT['portPos'], lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictEqual(self.multipath_properties, result) def test_initialize_connection_v6(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() conf.hpe3par_iscsi_ips = ["2001:db8:abcd:12:ffff:ffff:ffff:ff02"] mock_client = self.setup_driver(config=conf) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getHostVLUNs.side_effect = [ hpeexceptions.HTTPNotFound('fake'), [{'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': self.TARGET_LUN, 'type': 0, 'portPos': {'node': 8, 'slot': 1, 'cardPort': 2}}]] location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" % {'volume_name': self.VOLUME_3PAR_NAME, 'lun_id': self.TARGET_LUN, 'host': self.FAKE_HOST, 'nsp': 'something'}) mock_client.createVLUN.return_value = location mock_client.getiSCSIPorts.return_value = [{ 'IPAddr': '2001:db8:abcd:12:ffff:ffff:ffff:ff02', 'iSCSIName': self.TARGET_IQN, }] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client volume = copy.deepcopy(self.volume) volume.replication_status = 'disabled' result = self.driver.initialize_connection( volume, self.connector_multipath_enabled) expected = [ mock.call.getVolume(self.VOLUME_3PAR_NAME), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST), mock.call.getiSCSIPorts( state=self.mock_client_conf['PORT_STATE_READY']), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.createVLUN( self.VOLUME_3PAR_NAME, auto=True, hostname=self.FAKE_HOST, portPos=self.FAKE_ISCSI_PORT_V6['portPos'], lun=None), mock.call.getHostVLUNs(self.FAKE_HOST)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertDictEqual(self.multipath_properties_v6, result) def test_terminate_connection_for_clear_chap_creds_not_found(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getHostVLUNs.return_value = [ {'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID } # Test for clear CHAP creds fails with HTTPNotFound mock_client.removeVolumeMetaData.side_effect = [ hpeexceptions.HTTPNotFound, hpeexceptions.HTTPNotFound] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.terminate_connection( self.volume, self.connector, force=True) expected = [ mock.call.queryHost(iqns=[self.connector['initiator']]), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.modifyHost( 'fakehost', {'pathOperation': 2, 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), mock.call.removeVolumeMetaData( self.VOLUME_3PAR_NAME, CHAP_USER_KEY), mock.call.removeVolumeMetaData( self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_terminate_connection_for_clear_chap_user_key_bad_request(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getHostVLUNs.return_value = [ {'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID } # Test for CHAP USER KEY fails with HTTPBadRequest mock_client.removeVolumeMetaData.side_effect = [ hpeexceptions.HTTPBadRequest] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(hpeexceptions.HTTPBadRequest, self.driver.terminate_connection, self.volume, self.connector, force=True) expected = [ mock.call.queryHost(iqns=[self.connector['initiator']]), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.modifyHost( 'fakehost', {'pathOperation': 2, 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), mock.call.removeVolumeMetaData( self.VOLUME_3PAR_NAME, CHAP_USER_KEY)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_terminate_connection_for_clear_chap_pass_key_bad_request(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getHostVLUNs.return_value = [ {'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, } # Test for CHAP PASS KEY fails with HTTPBadRequest mock_client.removeVolumeMetaData.side_effect = [ None, hpeexceptions.HTTPBadRequest] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.assertRaises(hpeexceptions.HTTPBadRequest, self.driver.terminate_connection, self.volume, self.connector, force=True) expected = [ mock.call.queryHost(iqns=[self.connector['initiator']]), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.modifyHost( 'fakehost', {'pathOperation': 2, 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), mock.call.removeVolumeMetaData( self.VOLUME_3PAR_NAME, CHAP_USER_KEY), mock.call.removeVolumeMetaData( self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) def test_get_volume_stats(self): # setup_mock_client drive with the configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config) mock_client.getCPG.return_value = self.cpgs[0] mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234' } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 79 } stat_capabilities = { THROUGHPUT: 0, BANDWIDTH: 0, LATENCY: 0, IO_SIZE: 0, QUEUE_LENGTH: 0, AVG_BUSY_PERC: 0 } mock_client.getCPGStatData.return_value = stat_capabilities with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client stats = self.driver.get_volume_stats(True) const = 0.0009765625 self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['thick_provisioning_support']) self.assertEqual(100.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(14.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertEqual(stat_capabilities[THROUGHPUT], stats['pools'][0][THROUGHPUT]) self.assertEqual(stat_capabilities[BANDWIDTH], stats['pools'][0][BANDWIDTH]) self.assertEqual(stat_capabilities[LATENCY], stats['pools'][0][LATENCY]) self.assertEqual(stat_capabilities[IO_SIZE], stats['pools'][0][IO_SIZE]) self.assertEqual(stat_capabilities[QUEUE_LENGTH], stats['pools'][0][QUEUE_LENGTH]) self.assertEqual(stat_capabilities[AVG_BUSY_PERC], stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGStatData(HPE3PAR_CPG, 'daily', '7d'), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGStatData(HPE3PAR_CPG2, 'daily', '7d'), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls(expected) cpg2 = self.cpgs[0].copy() cpg2.update({'SDGrowth': {'limitMiB': 204800}}) mock_client.getCPG.return_value = cpg2 stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['thick_provisioning_support']) total_capacity_gb = 200 * 1024 * const self.assertEqual(total_capacity_gb, stats['pools'][0]['total_capacity_gb']) free_capacity_gb = 114 self.assertEqual(free_capacity_gb, stats['pools'][0]['free_capacity_gb']) cap_util = 43.0 self.assertEqual(cap_util, stats['pools'][0]['capacity_utilization']) provisioned_capacity_gb = 86 self.assertEqual(provisioned_capacity_gb, stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertEqual(stat_capabilities[THROUGHPUT], stats['pools'][0][THROUGHPUT]) self.assertEqual(stat_capabilities[BANDWIDTH], stats['pools'][0][BANDWIDTH]) self.assertEqual(stat_capabilities[LATENCY], stats['pools'][0][LATENCY]) self.assertEqual(stat_capabilities[IO_SIZE], stats['pools'][0][IO_SIZE]) self.assertEqual(stat_capabilities[QUEUE_LENGTH], stats['pools'][0][QUEUE_LENGTH]) self.assertEqual(stat_capabilities[AVG_BUSY_PERC], stats['pools'][0][AVG_BUSY_PERC]) def test_get_volume_stats2(self): # Testing when the API_VERSION is incompatible with getCPGStatData srstatld_api_version = 30201200 pre_srstatld_api_version = srstatld_api_version - 1 wsapi = {'build': pre_srstatld_api_version} config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config, wsapi_version=wsapi) mock_client.getCPG.return_value = self.cpgs[0] mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234' } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 79 } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertEqual(100.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(14.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertIsNone(stats['pools'][0][THROUGHPUT]) self.assertIsNone(stats['pools'][0][BANDWIDTH]) self.assertIsNone(stats['pools'][0][LATENCY]) self.assertIsNone(stats['pools'][0][IO_SIZE]) self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls(expected) def test_get_volume_stats3(self): # Testing when the client version is incompatible with getCPGStatData # setup_mock_client drive with the configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config, wsapi_version=self.wsapi_version_312) mock_client.getCPG.return_value = self.cpgs[0] mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234' } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 79 } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver._login() stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertEqual(100.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(14.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertIsNone(stats['pools'][0][THROUGHPUT]) self.assertIsNone(stats['pools'][0][BANDWIDTH]) self.assertIsNone(stats['pools'][0][LATENCY]) self.assertIsNone(stats['pools'][0][IO_SIZE]) self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls(expected) def test_get_volume_stats4(self): # Testing get_volume_stats() when System Reporter license is not active # setup_mock_client drive with the configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.filter_function = FILTER_FUNCTION config.goodness_function = GOODNESS_FUNCTION mock_client = self.setup_driver(config=config) mock_client.getCPG.return_value = self.cpgs[0] # Purposely left out the System Reporter license in # getStorageSystemInfo to test sr_support mock_client.getStorageSystemInfo.return_value = { 'id': self.CLIENT_ID, 'serialNumber': '1234', 'licenseInfo': { 'licenses': [{'name': 'Remote Copy'}, {'name': 'Priority Optimization'}, {'name': 'Thin Provisioning'}] } } # cpg has no limit mock_client.getCPGAvailableSpace.return_value = { "capacityEfficiency": {u'compaction': 594.4}, "rawFreeMiB": 1024.0 * 6, "usableFreeMiB": 1024.0 * 79 } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client stats = self.driver.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual('12345', stats['array_id']) self.assertTrue(stats['pools'][0]['thin_provisioning_support']) self.assertTrue(stats['pools'][0]['QoS_support']) self.assertEqual(100.0, stats['pools'][0]['total_capacity_gb']) self.assertEqual(14.0, stats['pools'][0]['free_capacity_gb']) self.assertEqual(86.0, stats['pools'][0]['capacity_utilization']) self.assertEqual(3, stats['pools'][0]['total_volumes']) self.assertEqual(GOODNESS_FUNCTION, stats['pools'][0]['goodness_function']) self.assertEqual(FILTER_FUNCTION, stats['pools'][0]['filter_function']) self.assertIsNone(stats['pools'][0][THROUGHPUT]) self.assertIsNone(stats['pools'][0][BANDWIDTH]) self.assertIsNone(stats['pools'][0][LATENCY]) self.assertIsNone(stats['pools'][0][IO_SIZE]) self.assertIsNone(stats['pools'][0][QUEUE_LENGTH]) self.assertIsNone(stats['pools'][0][AVG_BUSY_PERC]) expected = [ mock.call.getStorageSystemInfo(), mock.call.getCPG(HPE3PAR_CPG), mock.call.getCPGAvailableSpace(HPE3PAR_CPG), mock.call.getCPG(HPE3PAR_CPG2), mock.call.getCPGAvailableSpace(HPE3PAR_CPG2)] mock_client.assert_has_calls(expected) def test_create_host_with_unmanage_iscsi_and_manage_fc_hosts(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} def get_side_effect(*args): host = {'name': None} if args[0] == 'fake': host['name'] = 'fake' elif args[0] == self.FAKE_HOST: host['name'] = self.FAKE_HOST host['iSCSIPaths'] = [{ "name": "iqn.1993-08.org.debian:01:222"}] return host mock_client.getHost.side_effect = get_side_effect mock_client.queryHost.return_value = { 'members': [{ 'name': 'fake' }] } mock_client.getVLUN.return_value = {'lun': 186} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password, cpg = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=[self.connector['initiator']]), mock.call.getHost('fake')] mock_client.assert_has_calls(expected) self.assertEqual('fake', host['name']) def test_create_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = None mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password, cpg = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.createHost( self.FAKE_HOST, optional={'domain': None, 'persona': 2}, iscsiNames=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST)] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertIsNone(auth_username) self.assertIsNone(auth_password) self.assertEqual(HPE3PAR_CPG, cpg) def test_create_host_chap_enabled(self): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] mock_client.queryHost.return_value = None mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN} expected_mod_request = { 'chapOperation': mock_client.HOST_EDIT_ADD, 'chapOperationMode': mock_client.CHAP_INITIATOR, 'chapName': 'test-user', 'chapSecret': 'test-pass' } def get_side_effect(*args): data = {'value': None} if args[1] == CHAP_USER_KEY: data['value'] = 'test-user' elif args[1] == CHAP_PASS_KEY: data['value'] = 'test-pass' return data mock_client.getVolumeMetaData.side_effect = get_side_effect with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password, cpg = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.createHost( self.FAKE_HOST, optional={'domain': None, 'persona': 2}, iscsiNames=['iqn.1993-08.org.debian:01:222']), mock.call.modifyHost( 'fakehost', expected_mod_request), mock.call.getHost(self.FAKE_HOST) ] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual('test-user', auth_username) self.assertEqual('test-pass', auth_password) def test_create_host_chap_enabled_and_host_with_chap_cred(self): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.return_value = { 'name': self.FAKE_HOST, 'initiatorChapEnabled': True, 'iSCSIPaths': [{ "name": "iqn.1993-08.org.debian:01:222" }] } mock_client.queryHost.return_value = None mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN} expected_mod_request = { 'chapOperation': mock_client.HOST_EDIT_ADD, 'chapOperationMode': mock_client.CHAP_INITIATOR, 'chapName': 'test-user', 'chapSecret': 'test-pass' } def get_side_effect(*args): data = {'value': None} if args[1] == CHAP_USER_KEY: data['value'] = 'test-user' elif args[1] == CHAP_PASS_KEY: data['value'] = 'test-pass' return data mock_client.getVolumeMetaData.side_effect = get_side_effect with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password, cpg = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.modifyHost( 'fakehost', expected_mod_request), mock.call.getHost(self.FAKE_HOST) ] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual('test-user', auth_username) self.assertEqual('test-pass', auth_password) def test_create_host_chap_enabled_and_host_without_chap_cred(self): # setup_mock_client driver # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.queryHost.return_value = None expected_mod_request = { 'chapOperation': mock_client.HOST_EDIT_ADD, 'chapOperationMode': mock_client.CHAP_INITIATOR, 'chapName': 'test-user', 'chapSecret': 'test-pass' } def get_side_effect(*args): data = {'value': None} if args[1] == CHAP_USER_KEY: data['value'] = 'test-user' elif args[1] == CHAP_PASS_KEY: data['value'] = 'test-pass' return data mock_client.getVolumeMetaData.side_effect = get_side_effect mock_client.getHost.return_value = { 'name': self.FAKE_HOST, 'initiatorChapEnabled': False, 'iSCSIPaths': [{ "name": "iqn.1993-08.org.debian:01:222" }] } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password, cpg = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.modifyHost(self.FAKE_HOST, expected_mod_request)] mock_client.assert_has_calls(expected) def test_create_invalid_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('Host not found.'), {'name': 'fakehost.foo'}] mock_client.queryHost.return_value = { 'members': [{ 'name': 'fakehost.foo' }] } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password, cpg = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost('fakehost.foo')] mock_client.assert_has_calls(expected) self.assertEqual('fakehost.foo', host['name']) self.assertIsNone(auth_username) self.assertIsNone(auth_password) def test_create_invalid_host_chap_enabled(self): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('Host not found.'), {'name': 'fakehost.foo'}] mock_client.queryHost.return_value = { 'members': [{ 'name': 'fakehost.foo' }] } def get_side_effect(*args): data = {'value': None} if args[1] == CHAP_USER_KEY: data['value'] = 'test-user' elif args[1] == CHAP_PASS_KEY: data['value'] = 'test-pass' return data mock_client.getVolumeMetaData.side_effect = get_side_effect expected_mod_request = { 'chapOperation': mock_client.HOST_EDIT_ADD, 'chapOperationMode': mock_client.CHAP_INITIATOR, 'chapName': 'test-user', 'chapSecret': 'test-pass' } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password, cpg = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.modifyHost( 'fakehost.foo', expected_mod_request), mock.call.getHost('fakehost.foo') ] mock_client.assert_has_calls(expected) self.assertEqual('fakehost.foo', host['name']) self.assertEqual('test-user', auth_username) self.assertEqual('test-pass', auth_password) def test_create_host_concurrent(self): # tests concurrent requests of create host # setup_mock_client driver with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.queryHost.side_effect = [ None, {'members': [{'name': self.FAKE_HOST}]}] mock_client.createHost.side_effect = [ hpeexceptions.HTTPConflict( {'code': EXISTENT_PATH, 'desc': 'host WWN/iSCSI name already used by another host'})] mock_client.getHost.side_effect = [ hpeexceptions.HTTPNotFound('fake'), {'name': self.FAKE_HOST}] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, user, pwd, cpg = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.createHost( self.FAKE_HOST, optional={'domain': None, 'persona': 2}, iscsiNames=['iqn.1993-08.org.debian:01:222']), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.getHost(self.FAKE_HOST)] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) def test_create_modify_host(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ {'name': self.FAKE_HOST, 'FCPaths': []}, {'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789012345'}, {'wwn': '123456789054321'}]}] mock_client.queryHost.return_value = None with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password, cpg = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.modifyHost( self.FAKE_HOST, {'pathOperation': 1, 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), mock.call.getHost(self.FAKE_HOST)] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertIsNone(auth_username) self.assertIsNone(auth_password) self.assertEqual(2, len(host['FCPaths'])) def test_create_modify_host_chap_enabled(self): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_client.getVolume.return_value = {'userCPG': HPE3PAR_CPG} mock_client.getCPG.return_value = {} mock_client.getHost.side_effect = [ {'name': self.FAKE_HOST, 'FCPaths': []}, {'name': self.FAKE_HOST, 'FCPaths': [{'wwn': '123456789012345'}, {'wwn': '123456789054321'}]}] mock_client.queryHost.return_value = None def get_side_effect(*args): data = {'value': None} if args[1] == CHAP_USER_KEY: data['value'] = 'test-user' elif args[1] == CHAP_PASS_KEY: data['value'] = 'test-pass' return data mock_client.getVolumeMetaData.side_effect = get_side_effect expected_mod_request = { 'chapOperation': mock_client.HOST_EDIT_ADD, 'chapOperationMode': mock_client.CHAP_INITIATOR, 'chapName': 'test-user', 'chapSecret': 'test-pass' } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() host, auth_username, auth_password, cpg = self.driver._create_host( common, self.volume, self.connector) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getCPG(HPE3PAR_CPG), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.getHost(self.FAKE_HOST), mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']), mock.call.modifyHost( self.FAKE_HOST, {'pathOperation': 1, 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), mock.call.modifyHost( self.FAKE_HOST, expected_mod_request ), mock.call.getHost(self.FAKE_HOST)] mock_client.assert_has_calls(expected) self.assertEqual(self.FAKE_HOST, host['name']) self.assertEqual('test-user', auth_username) self.assertEqual('test-pass', auth_password) self.assertEqual(2, len(host['FCPaths'])) def test_get_least_used_nsp_for_host_single(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client # Setup two ISCSI IPs conf = self.setup_configuration() conf.hpe3par_iscsi_ips = ["10.10.220.253"] mock_client = self.setup_driver(config=conf) mock_client.getPorts.return_value = PORTS_RET mock_client.getVLUNs.return_value = VLUNS1_RET with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.initialize_iscsi_ports(common) nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost') self.assertEqual("1:8:1", nsp) def test_get_least_used_nsp_for_host_new(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client # Setup two ISCSI IPs conf = self.setup_configuration() conf.hpe3par_iscsi_ips = ["10.10.220.252", "10.10.220.253"] mock_client = self.setup_driver(config=conf) mock_client.getPorts.return_value = PORTS_RET mock_client.getVLUNs.return_value = VLUNS1_RET with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.initialize_iscsi_ports(common) # Host 'newhost' does not yet have any iscsi paths, # so the 'least used' is returned nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost') self.assertEqual("1:8:2", nsp) def test_get_least_used_nsp_for_host_reuse(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client # Setup two ISCSI IPs conf = self.setup_configuration() conf.hpe3par_iscsi_ips = ["10.10.220.252", "10.10.220.253"] mock_client = self.setup_driver(config=conf) mock_client.getPorts.return_value = PORTS_RET mock_client.getVLUNs.return_value = VLUNS1_RET with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.initialize_iscsi_ports(common) # hosts 'foo' and 'bar' already have active iscsi paths # the same one should be used nsp = self.driver._get_least_used_nsp_for_host(common, 'foo') self.assertEqual("1:8:2", nsp) nsp = self.driver._get_least_used_nsp_for_host(common, 'bar') self.assertEqual("1:8:1", nsp) def test_get_least_used_nps_for_host_fc(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getPorts.return_value = PORTS1_RET mock_client.getVLUNs.return_value = VLUNS5_RET # Setup two ISCSI IPs iscsi_ips = ["10.10.220.252", "10.10.220.253"] self.driver.configuration.hpe3par_iscsi_ips = iscsi_ips with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.initialize_iscsi_ports(common) nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost') self.assertNotEqual("0:6:3", nsp) self.assertEqual("1:8:1", nsp) def test_invalid_iscsi_ip(self): config = self.setup_configuration() config.hpe3par_iscsi_ips = ['10.10.220.250', '10.10.220.251'] config.target_ip_address = '10.10.10.10' mock_conf = { 'getPorts.return_value': { 'members': [ {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'protocol': 2, 'IPAddr': '10.10.220.252', 'linkState': 4, 'device': [], 'iSCSIName': self.TARGET_IQN, 'mode': 2, 'HWAddr': '2C27D75375D2', 'type': 8}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'protocol': 2, 'IPAddr': '10.10.220.253', 'linkState': 4, 'device': [], 'iSCSIName': self.TARGET_IQN, 'mode': 2, 'HWAddr': '2C27D75375D6', 'type': 8}]}} # no valid ip addr should be configured. self.assertRaises(exception.InvalidInput, self.setup_driver, config=config, mock_conf=mock_conf) def test_get_least_used_nsp(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() ports = [ {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 2}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}] mock_client.getVLUNs.return_value = {'members': ports} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() # in use count vluns = common.client.getVLUNs() nsp = self.driver._get_least_used_nsp(common, vluns['members'], ['0:2:1', '1:8:1']) self.assertEqual('1:8:1', nsp) ports = [ {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}] mock_client.getVLUNs.return_value = {'members': ports} # in use count common = self.driver._login() vluns = common.client.getVLUNs() nsp = self.driver._get_least_used_nsp(common, vluns['members'], ['0:2:1', '1:2:1']) self.assertEqual('1:2:1', nsp) ports = [ {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 1, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}, {'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}] mock_client.getVLUNs.return_value = {'members': ports} # in use count common = self.driver._login() vluns = common.client.getVLUNs() nsp = self.driver._get_least_used_nsp(common, vluns['members'], ['1:1:1', '1:2:1']) self.assertEqual('1:1:1', nsp) def test_set_3par_chaps(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() expected = [] self.driver._set_3par_chaps( common, 'test-host', 'test-vol', 'test-host', 'pass') mock_client.assert_has_calls(expected) # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() expected_mod_request = { 'chapOperation': mock_client.HOST_EDIT_ADD, 'chapOperationMode': mock_client.CHAP_INITIATOR, 'chapName': 'test-host', 'chapSecret': 'fake' } expected = [ mock.call.modifyHost('test-host', expected_mod_request) ] self.driver._set_3par_chaps( common, 'test-host', 'test-vol', 'test-host', 'fake') mock_client.assert_has_calls(expected) @mock.patch('cinder.volume.volume_utils.generate_password') def test_do_export(self, mock_utils): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} connector = {'host': 'test-host'} mock_utils.return_value = 'random-pass' mock_client.getHostVLUNs.return_value = [ {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0, 'remoteName': 'iqn.1993-08.org.debian:01:222'} ] mock_client.getHost.return_value = { 'name': 'osv-0DM4qZEVSKON-DXN-NwVpw', 'initiatorChapEnabled': True } mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass' } expected = [] expected_model = {'provider_auth': None} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model = self.driver._do_export(common, volume, connector) mock_client.assert_has_calls(expected) self.assertEqual(expected_model, model) mock_client.reset_mock() # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} mock_utils.return_value = 'random-pass' mock_client.getHostVLUNs.return_value = [ {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0, 'remoteName': 'iqn.1993-08.org.debian:01:222'} ] mock_client.getHost.return_value = { 'name': 'osv-0DM4qZEVSKON-DXN-NwVpw', 'initiatorChapEnabled': True } mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass' } expected = [ mock.call.getHostVLUNs('test-host'), mock.call.getHost('test-host'), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') ] expected_model = {'provider_auth': 'CHAP test-host random-pass'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model = self.driver._do_export(common, volume, connector) mock_client.assert_has_calls(expected) self.assertEqual(expected_model, model) @mock.patch('cinder.volume.volume_utils.generate_password') def test_do_export_host_not_found(self, mock_utils): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} connector = {'host': 'test-host'} mock_utils.return_value = "random-pass" mock_client.getHostVLUNs.side_effect = hpeexceptions.HTTPNotFound( 'fake') mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass' } expected = [ mock.call.getHostVLUNs('test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') ] expected_model = {'provider_auth': 'CHAP test-host random-pass'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model = self.driver._do_export(common, volume, connector) mock_client.assert_has_calls(expected) self.assertEqual(expected_model, model) @mock.patch('cinder.volume.volume_utils.generate_password') def test_do_export_host_chap_disabled(self, mock_utils): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} connector = {'host': 'test-host'} mock_utils.return_value = 'random-pass' mock_client.getHostVLUNs.return_value = [ {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0, 'remoteName': 'iqn.1993-08.org.debian:01:222'} ] mock_client.getHost.return_value = { 'name': 'fake-host', 'initiatorChapEnabled': False } mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass' } expected = [ mock.call.getHostVLUNs('test-host'), mock.call.getHost('test-host'), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') ] expected_model = {'provider_auth': 'CHAP test-host random-pass'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model = self.driver._do_export(common, volume, connector) mock_client.assert_has_calls(expected) self.assertEqual(expected_model, model) @mock.patch('cinder.volume.volume_utils.generate_password') def test_do_export_no_active_vluns(self, mock_utils): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} connector = {'host': 'test-host'} mock_utils.return_value = "random-pass" mock_client.getHostVLUNs.return_value = [ {'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0, 'remoteName': 'iqn.1993-08.org.debian:01:222'} ] mock_client.getHost.return_value = { 'name': 'fake-host', 'initiatorChapEnabled': True } mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass' } expected = [ mock.call.getHostVLUNs('test-host'), mock.call.getHost('test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass') ] expected_model = {'provider_auth': 'CHAP test-host random-pass'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model = self.driver._do_export(common, volume, connector) mock_client.assert_has_calls(expected) self.assertEqual(expected_model, model) @mock.patch('cinder.volume.volume_utils.generate_password') def test_do_export_vlun_missing_chap_credentials(self, mock_utils): # setup_mock_client drive with CHAP enabled configuration # and return the mock HTTP 3PAR client config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) volume = {'host': 'test-host@3pariscsi', 'id': self.VOLUME_ID} connector = {'host': 'test-host'} mock_utils.return_value = 'random-pass' mock_client.getHost.return_value = { 'name': 'osv-0DM4qZEVSKON-DXN-NwVpw', 'initiatorChapEnabled': True} mock_client.getVolumeMetaData.side_effect = hpeexceptions.HTTPNotFound expected = [ mock.call.getHostVLUNs('test-host'), mock.call.getHost('test-host'), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')] expected_model = {'provider_auth': 'CHAP test-host random-pass'} with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() # vlun has remoteName mock_client.getHostVLUNs.return_value = [ {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': 1, 'type': 3, 'remoteName': 'iqn.1993-08.org.debian:01:222'}] model_with_remote_name = self.driver._do_export( common, volume, connector) mock_client.assert_has_calls(expected) self.assertDictEqual(expected_model, model_with_remote_name) # vlun does not has remoteName mock_client.getHostVLUNs.return_value = [ {'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 1}] model_without_remote_name = self.driver._do_export( common, volume, connector) mock_client.assert_has_calls(expected) self.assertDictEqual(expected_model, model_without_remote_name) @mock.patch('cinder.volume.volume_utils.generate_password') def test_create_export(self, mock_utils): config = self.setup_configuration() config.hpe3par_iscsi_chap_enabled = True mock_client = self.setup_driver(config=config) mock_utils.return_value = 'random-pass' volume = {'host': 'test-host@3pariscsi', 'id': self.VOLUME_ID} connector = {'host': 'test-host'} mock_client.getHostVLUNs.return_value = [ {'active': True, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 3, 'remoteName': 'iqn.1993-08.org.debian:01:222'}] mock_client.getHost.return_value = { 'name': 'osv-0DM4qZEVSKON-DXN-NwVpw', 'initiatorChapEnabled': True} mock_client.getVolumeMetaData.return_value = { 'value': 'random-pass'} expected = [ mock.call.getHostVLUNs('test-host'), mock.call.getHost('test-host'), mock.call.getVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'), mock.call.setVolumeMetaData( 'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')] expected_model = {'provider_auth': 'CHAP test-host random-pass'} mock_create_client = self.mock_object(hpecommon.HPE3PARCommon, '_create_client', return_value=mock_client) mock_create_client.return_value = mock_client model = self.driver.create_export(None, volume, connector) mock_client.assert_has_calls(expected) self.assertDictEqual(expected_model, model) # (i) ip_addr is default i.e v4 # (ii) ip_addr is v6 @ddt.data({'ip_addr': '10.10.220.252:1234'}, {'ip_addr': '[2001:db8:abcd:12:ffff:ffff:ffff:ff02]:5678'}) @ddt.unpack def test_initialize_iscsi_ports_with_iscsi_ip_and_port(self, ip_addr): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() conf.hpe3par_iscsi_ips = [ip_addr] mock_client = self.setup_driver(config=conf) mock_client.getPorts.return_value = PORTS_RET expected = [mock.call.getPorts()] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.initialize_iscsi_ports(common) mock_client.assert_has_calls(expected) def test_initialize_iscsi_ports_with_wrong_ip_format_configured(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() conf.hpe3par_iscsi_ips = ["10.10.220.252:1234:4567"] mock_client = self.setup_driver(config=conf) mock_client.getPorts.return_value = PORTS_RET with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.assertRaises(exception.InvalidInput, self.driver.initialize_iscsi_ports, common) def test_initialize_iscsi_ports_with_vlan_ip(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() conf.hpe3par_iscsi_ips = ["192.168.100.1:1234"] mock_client = self.setup_driver(config=conf) mock_client.getPorts.return_value = PORTS_VLAN_RET expected = [mock.call.getPorts()] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() self.driver.initialize_iscsi_ports(common) mock_client.assert_has_calls(expected) def test_ensure_export(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} mock_client.getAllVolumeMetaData.return_value = { 'total': 0, 'members': [] } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client model = self.driver.ensure_export(None, volume) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw') ] expected_model = {'provider_auth': None} mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_model, model) mock_client.getAllVolumeMetaData.return_value = { 'total': 2, 'members': [ { 'creationTimeSec': 1406074222, 'value': 'fake-host', 'key': CHAP_USER_KEY, 'creationTime8601': '2014-07-22T17:10:22-07:00' }, { 'creationTimeSec': 1406074222, 'value': 'random-pass', 'key': CHAP_PASS_KEY, 'creationTime8601': '2014-07-22T17:10:22-07:00' } ] } model = self.driver.ensure_export(None, volume) expected = [ mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'), mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw') ] expected_model = {'provider_auth': "CHAP fake-host random-pass"} mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_model, model) def test_ensure_export_missing_volume(self): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() volume = {'host': 'test-host@3pariscsi', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} mock_client.getVolume.side_effect = hpeexceptions.HTTPNotFound( 'fake') with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client model = self.driver.ensure_export(None, volume) expected = [mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw')] expected_model = None mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) self.assertEqual(expected_model, model) @mock.patch.object(volume_types, 'get_volume_type') def test_get_volume_settings_default_pool(self, _mock_volume_types): _mock_volume_types.return_value = { 'name': 'gold', 'id': 'gold-id', 'extra_specs': {}} mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() volume = {'host': 'test-host@3pariscsi#pool_foo', 'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'} pool = volume_utils.extract_host(volume['host'], 'pool') model = common.get_volume_settings_from_type_id('gold-id', pool) self.assertEqual('pool_foo', model['cpg']) def test_get_model_update(self): mock_client = self.setup_driver() with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client common = self.driver._login() model_update = common._get_model_update('xxx@yyy#zzz', 'CPG') self.assertEqual({'host': 'xxx@yyy#CPG'}, model_update) def test_migrate_volume_attached(self): self.migrate_volume_attached() def test_terminate_connection_multiattach_same_host(self): ctx = context.get_admin_context() mock_client = self.setup_driver() att_1 = fake_volume.volume_attachment_ovo( ctx, id=uuidutils.generate_uuid(), attached_host='same_host') att_2 = fake_volume.volume_attachment_ovo( ctx, id=uuidutils.generate_uuid(), attached_host='same_host') volume = fake_volume.fake_volume_obj( ctx, multiattach=True, host=self.FAKE_CINDER_HOST) volume.volume_attachment.objects = [att_1, att_2] with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client' ) as mock_create_client: mock_create_client.return_value = mock_client self.driver.terminate_connection(volume, self.connector) # When volume is attached to mulitple instances on same host, there # should be no call to delete the VLUN(s) or the host. We # can assert these methods were not called to make sure the # proper code execution is followed. self.assertEqual(0, mock_client.deleteVLUN.call_count) self.assertEqual(0, mock_client.deleteHost.call_count) def test_terminate_connection_multiattach_different_host(self): ctx = context.get_admin_context() att_1 = fake_volume.volume_attachment_ovo( ctx, id=uuidutils.generate_uuid(), attached_host='host_one') att_2 = fake_volume.volume_attachment_ovo( ctx, id=uuidutils.generate_uuid(), attached_host='host_two') volume = fake_volume.fake_volume_obj( ctx, multiattach=True, host=self.FAKE_CINDER_HOST) volume.volume_attachment.objects = [att_1, att_2] vol_name = 'osv-HlF355XlSg.xcORfS0afag' # When volume is attached to instances on different hosts, # VLUN(s) of that host should be deleted. We can assert # appropriate methods were called. mock_client = self.setup_driver() mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_client.getHostVLUNs.return_value = [ {'active': False, 'volumeName': vol_name, 'lun': None, 'type': 0}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.terminate_connection( volume, self.connector, force=True) expected = [ mock.call.queryHost(iqns=[self.connector['initiator']]), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( vol_name, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.modifyHost( 'fakehost', {'pathOperation': 2, 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), mock.call.removeVolumeMetaData(vol_name, CHAP_USER_KEY), mock.call.removeVolumeMetaData(vol_name, CHAP_PASS_KEY)] mock_client.assert_has_calls( self.get_id_login + self.standard_logout + self.standard_login + expected + self.standard_logout) @ddt.data('volume', 'volume_name_id') def test_terminate_connection(self, volume_attr): volume = getattr(self, volume_attr) vol_name = getattr(self, volume_attr.upper() + '_3PAR_NAME') # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client mock_client = self.setup_driver() mock_client.getHostVLUNs.return_value = [ {'active': False, 'volumeName': vol_name, 'lun': None, 'type': 0}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } with mock.patch.object(hpecommon.HPE3PARCommon, '_create_client') as mock_create_client: mock_create_client.return_value = mock_client self.driver.terminate_connection( volume, self.connector, force=True) expected = [ mock.call.queryHost(iqns=[self.connector['initiator']]), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( vol_name, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.modifyHost( 'fakehost', {'pathOperation': 2, 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), mock.call.removeVolumeMetaData(vol_name, CHAP_USER_KEY), mock.call.removeVolumeMetaData(vol_name, CHAP_PASS_KEY)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) @mock.patch.object(volume_types, 'get_volume_type') def test_terminate_connection_peer_persistence(self, _mock_volume_types): # setup_mock_client drive with default configuration # and return the mock HTTP 3PAR client conf = self.setup_configuration() self.replication_targets[0]['replication_mode'] = 'sync' self.replication_targets[0]['quorum_witness_ip'] = '10.50.3.192' conf.replication_device = self.replication_targets mock_client = self.setup_driver(config=conf) mock_client.getStorageSystemInfo.return_value = ( {'id': self.CLIENT_ID}) mock_replicated_client = self.setup_driver(config=conf) mock_replicated_client.getStorageSystemInfo.return_value = ( {'id': self.REPLICATION_CLIENT_ID}) _mock_volume_types.return_value = { 'name': 'replicated', 'extra_specs': { 'replication_enabled': ' True', 'replication:mode': 'sync', 'volume_type': self.volume_type_replicated}} mock_client.getHostVLUNs.return_value = [ {'active': False, 'volumeName': self.VOLUME_3PAR_NAME, 'lun': None, 'type': 0}] mock_client.queryHost.return_value = { 'members': [{ 'name': self.FAKE_HOST }] } volume = copy.deepcopy(self.volume) volume.replication_status = 'enabled' with mock.patch.object( hpecommon.HPE3PARCommon, '_create_client') as mock_create_client, \ mock.patch.object( hpecommon.HPE3PARCommon, '_create_replication_client') as mock_replication_client: mock_create_client.return_value = mock_client mock_replication_client.return_value = mock_replicated_client self.driver.terminate_connection( volume, self.connector_multipath_enabled) expected = [ mock.call.queryHost(iqns=[self.connector['initiator']]), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.deleteVLUN( self.VOLUME_3PAR_NAME, None, hostname=self.FAKE_HOST), mock.call.getHostVLUNs(self.FAKE_HOST), mock.call.modifyHost( 'fakehost', {'pathOperation': 2, 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}), mock.call.removeVolumeMetaData( self.VOLUME_3PAR_NAME, CHAP_USER_KEY), mock.call.removeVolumeMetaData( self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)] mock_client.assert_has_calls( self.standard_login + expected + self.standard_logout) VLUNS5_RET = ({'members': [{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'active': True}]}) PORTS_RET = ({'members': [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'protocol': 2, 'IPAddr': '10.10.220.252', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D2', 'type': 8}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'protocol': 2, 'IPAddr': '10.10.220.253', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D6', 'type': 8}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 3}, 'protocol': 2, 'IPAddr': '2001:db8:abcd:12:ffff:ffff:ffff:ff02', # v6 address 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D8', 'type': 3}]}) PORTS_VLAN_RET = ({'members': [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'protocol': 2, 'IPAddr': '10.10.220.252', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D2', 'type': 8, 'iSCSIVlans': [{'IPAddr': '192.168.100.1'}], }]}) VLUNS1_RET = ({'members': [{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'hostname': 'foo', 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'hostname': 'bar', 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'hostname': 'bar', 'active': True}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'hostname': 'bar', 'active': True}]}) PORTS1_RET = ({'members': [{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2}, 'protocol': 2, 'IPAddr': '10.10.120.252', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D2', 'type': 8}, {'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'protocol': 2, 'IPAddr': '10.10.220.253', 'linkState': 4, 'device': [], 'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d', 'mode': 2, 'HWAddr': '2C27D75375D6', 'type': 8}, {'portWWN': '20210002AC00383D', 'protocol': 1, 'linkState': 4, 'mode': 2, 'device': ['cage2'], 'nodeWWN': '20210002AC00383D', 'type': 2, 'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hpe/test_nimble.py0000664000175000017500000027275200000000000025113 0ustar00zuulzuul00000000000000# Nimble Storage, Inc. (c) 2013-2014 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http_client import sys from unittest import mock from oslo_utils import uuidutils from cinder import context from cinder import exception from cinder.objects import fields from cinder.objects import volume as obj_volume from cinder.objects import volume_type from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_group from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume.drivers.hpe import nimble from cinder.volume import volume_types from cinder.volume import volume_utils NIMBLE_CLIENT = 'cinder.volume.drivers.hpe.nimble.NimbleRestAPIExecutor' NIMBLE_URLLIB2 = 'cinder.volume.drivers.hpe.nimble.requests' NIMBLE_RANDOM = 'cinder.volume.drivers.hpe.nimble.random' NIMBLE_ISCSI_DRIVER = 'cinder.volume.drivers.hpe.nimble.NimbleISCSIDriver' NIMBLE_FC_DRIVER = 'cinder.volume.drivers.hpe.nimble.NimbleFCDriver' DRIVER_VERSION = '4.3.0' nimble.DEFAULT_SLEEP = 0 FAKE_POSITIVE_LOGIN_RESPONSE_1 = '2c20aad78a220ed1dae21dcd6f9446f5' FAKE_POSITIVE_LOGIN_RESPONSE_2 = '2c20aad78a220ed1dae21dcd6f9446ff' FAKE_POSITIVE_HEADERS = {'X-Auth-Token': FAKE_POSITIVE_LOGIN_RESPONSE_1} FAKE_POSITIVE_NETCONFIG_RESPONSE = { 'role': 'active', 'subnet_list': [{'network': '172.18.212.0', 'discovery_ip': '172.18.108.21', 'type': 'data', 'allow_iscsi': True, 'label': 'data1', 'allow_group': True, 'vlan_id': 0}], 'array_list': [{'nic_list': [{'subnet_label': 'data1', 'tagged': False, 'data_ip': '172.18.212.82', 'name': 'eth3'}]}], 'name': 'test-array'} FAKE_NEGATIVE_NETCONFIG_RESPONSE = exception.VolumeDriverException( "Session expired") FAKE_CREATE_VOLUME_POSITIVE_RESPONSE = { 'clone': False, 'name': "testvolume"} FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_ENCRYPTION = { 'clone': False, 'name': "testvolume-encryption"} FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_PERF_POLICY = { 'clone': False, 'name': "testvolume-perf-policy"} FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_MULTI_INITIATOR = { 'clone': False, 'name': "testvolume-multi-initiator"} FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_DEDUPE = { 'clone': False, 'name': "testvolume-dedupe"} FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_QOS = { 'clone': False, 'name': "testvolume-qos"} FAKE_EXTRA_SPECS = {'multiattach': ' True', 'nimble:iops-limit': '1024'} FAKE_GET_VOL_INFO_RESPONSE = {'name': 'testvolume', 'clone': False, 'target_name': 'iqn.test', 'online': True, 'agent_type': 'openstack'} FAKE_GET_VOL_INFO_RESPONSE_MANAGE = {'name': 'testvolume', 'agent_type': 'none', 'online': False, 'target_name': 'iqn.test'} FAKE_GET_VOL_INFO_ONLINE = {'name': 'testvolume', 'size': 2048, 'online': True, 'agent_type': 'none'} FAKE_GET_VOL_INFO_RETYPE = {'name': 'testvolume', 'size': 2048, 'online': True, 'agent_type': 'none', 'pool_id': 'none', 'pool_name': 'none'} FAKE_GET_VOL_INFO_BACKUP_RESPONSE = {'name': 'testvolume', 'clone': True, 'target_name': 'iqn.test', 'online': False, 'agent_type': 'openstack', 'parent_vol_id': 'volume-' + fake.VOLUME2_ID, 'base_snap_id': 'test-backup-snap'} FAKE_GET_SNAP_INFO_BACKUP_RESPONSE = { 'description': "backup-vol-" + fake.VOLUME2_ID, 'name': 'test-backup-snap', 'id': fake.SNAPSHOT_ID, 'vol_id': fake.VOLUME_ID, 'volume_name': 'volume-' + fake.VOLUME_ID} FAKE_POSITIVE_GROUP_CONFIG_RESPONSE = { 'name': 'group-test', 'version_current': '0.0.0.0', 'access_protocol_list': ['iscsi']} FAKE_LOGIN_POST_RESPONSE = { 'data': {'session_token': FAKE_POSITIVE_LOGIN_RESPONSE_1}} FAKE_EXTEND_VOLUME_PARAMS = {'data': {'size': 5120, 'reserve': 0, 'warn_level': 80, 'limit': 100, 'snap_limit': sys.maxsize}} FAKE_IGROUP_LIST_RESPONSE = [ {'iscsi_initiators': [{'iqn': 'test-initiator1'}], 'name': 'test-igrp1'}, {'iscsi_initiators': [{'iqn': 'test-initiator2'}], 'name': 'test-igrp2'}] FAKE_IGROUP_LIST_RESPONSE_FC = [ {'fc_initiators': [{'wwpn': '10:00:00:00:00:00:00:00'}], 'name': 'test-igrp1'}, {'fc_initiators': [{'wwpn': '10:00:00:00:00:00:00:00'}, {'wwpn': '10:00:00:00:00:00:00:01'}], 'name': 'test-igrp2'}] FAKE_GET_VOL_INFO_REVERT = {'name': 'testvolume', 'id': fake.VOLUME_ID, 'clone': False, 'target_name': 'iqn.test', 'online': True, 'agent_type': 'openstack', 'last_snap': {'snap_id': fake.SNAPSHOT_ID}} FAKE_SNAP_INFO_REVERT = {'name': 'testsnap', 'id': fake.SNAPSHOT2_ID} FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE = exception.VolumeBackendAPIException( "Volume testvolume not found") FAKE_VOLUME_INFO_NEGATIVE_RESPONSE = exception.VolumeBackendAPIException( "Volume testvolume not found") FAKE_CREATE_VOLUME_NEGATIVE_ENCRYPTION = exception.VolumeBackendAPIException( "Volume testvolume-encryption not found") FAKE_CREATE_VOLUME_NEGATIVE_PERFPOLICY = exception.VolumeBackendAPIException( "Volume testvolume-perfpolicy not found") FAKE_CREATE_VOLUME_NEGATIVE_DEDUPE = exception.VolumeBackendAPIException( "The specified pool is not capable of hosting deduplicated volumes") FAKE_CREATE_VOLUME_NEGATIVE_QOS = exception.VolumeBackendAPIException( "Please set valid IOPS limitin the range [256, 4294967294]") FAKE_VOLUME_RESTORE_NEGATIVE_RESPONSE = exception.VolumeBackendAPIException( "No recent Snapshot found") FAKE_POSITIVE_GROUP_INFO_RESPONSE = { 'version_current': '3.0.0.0', 'group_target_enabled': False, 'name': 'group-nimble', 'usage_valid': True, 'usable_capacity_bytes': 8016883089408, 'free_space': 101111111901} FAKE_GET_VOL_INFO_RESPONSE = {'name': 'testvolume-cg', 'clone': False, 'target_name': 'iqn.test', 'online': True, 'agent_type': 'openstack'} FAKE_EXTRA_SPECS_CG = {'consistent_group_snapshot_enabled': " False"} FAKE_VOLUME_TYPE = {'extra_specs': FAKE_EXTRA_SPECS_CG} SRC_CG_VOLUME_ID = 'bd21d11b-c765-4c68-896c-6b07f63cfcb6' SRC_CG_VOLUME_NAME = 'volume-' + SRC_CG_VOLUME_ID volume_src_cg = {'name': SRC_CG_VOLUME_NAME, 'id': SRC_CG_VOLUME_ID, 'display_name': 'Foo Volume', 'size': 2, 'host': 'FAKE_CINDER_HOST', 'volume_type': None, 'volume_type_id': None} VOLUME_TYPE_ID_CG = 'd03338a9-9115-48a3-8dfc-44444444444' VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7' admin_context = context.get_admin_context() VOLUME_NAME = 'volume-' + VOLUME_ID FAKE_GROUP = fake_group.fake_group_obj( admin_context, id=fake.GROUP_ID, status='available') volume_cg = {'name': VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'Foo Volume', 'provider_location': 12, 'size': 2, 'host': 'FAKE_CINDER_HOST', 'volume_type': 'cg_type', 'volume_type_id': VOLUME_TYPE_ID_CG} FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_CG = { 'clone': False, 'name': "testvolume-cg"} FAKE_GET_VOLID_INFO_RESPONSE = {'vol_id': fake.VOLUME_ID} FAKE_GET_VOLCOLL_INFO_RESPONSE = {'volcoll_id': fake.VOLUME2_ID} FAKE_ASSOCIATE_VOLCOLL_INFO_RESPONSE = {'vol_id': fake.VOLUME_ID, 'volcoll_id': fake.VOLUME2_ID} FAKE_GENERIC_POSITIVE_RESPONSE = "" FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE = "Object has a clone" FAKE_TYPE_ID = fake.VOLUME_TYPE_ID FAKE_TYPE_ID_NEW = fake.VOLUME_TYPE2_ID FAKE_POOL_ID = fake.GROUP_ID FAKE_PERFORMANCE_POLICY_ID = fake.OBJECT_ID NIMBLE_MANAGEMENT_IP = "10.18.108.55" NIMBLE_SAN_LOGIN = "nimble" NIMBLE_SAN_PASS = "nimble_pass" SRC_CONSIS_GROUP_ID = '7d7dfa02-ac6e-48cb-96af-8a0cd3008d47' FAKE_SRC_GROUP = fake_group.fake_group_obj( admin_context, id = SRC_CONSIS_GROUP_ID, status = 'available') REPL_DEVICES = [{ 'san_login': 'nimble', 'san_password': 'nimble_pass', 'san_ip': '10.18.108.66', 'schedule_name': 'every-minute', 'downstream_partner': 'nimblevsagroup2', 'period': 1, 'period_unit': 'minutes'}] def create_configuration(username, password, ip_address, pool_name=None, subnet_label=None, thin_provision=True, devices=None, max_over_subscription_ratio=20.0): configuration = mock.Mock() configuration.san_login = username configuration.san_password = password configuration.san_ip = ip_address configuration.san_thin_provision = thin_provision configuration.nimble_pool_name = pool_name configuration.nimble_subnet_label = subnet_label configuration.safe_get.return_value = 'NIMBLE' configuration.replication_device = devices configuration.max_over_subscription_ratio = max_over_subscription_ratio return configuration class NimbleDriverBaseTestCase(test.TestCase): """Base Class for the NimbleDriver Tests.""" def setUp(self): super(NimbleDriverBaseTestCase, self).setUp() self.mock_client_service = None self.mock_client_class = None self.driver = None @staticmethod def client_mock_decorator(configuration): def client_mock_wrapper(func): def inner_client_mock( self, mock_client_class, mock_urllib2, *args, **kwargs): self.mock_client_class = mock_client_class self.mock_client_service = mock.MagicMock(name='Client') self.mock_client_class.return_value = self.mock_client_service self.driver = nimble.NimbleISCSIDriver( configuration=configuration) mock_login_response = mock_urllib2.post.return_value mock_login_response = mock.MagicMock() mock_login_response.status_code.return_value = http_client.OK mock_login_response.json.return_value = ( FAKE_LOGIN_POST_RESPONSE) self.driver.do_setup(context.get_admin_context()) self.driver.APIExecutor.login() func(self, *args, **kwargs) return inner_client_mock return client_mock_wrapper @staticmethod def client_mock_decorator_fc(configuration): def client_mock_wrapper(func): def inner_client_mock( self, mock_client_class, mock_urllib2, *args, **kwargs): self.mock_client_class = mock_client_class self.mock_client_service = mock.MagicMock(name='Client') self.mock_client_class.return_value = ( self.mock_client_service) self.driver = nimble.NimbleFCDriver( configuration=configuration) mock_login_response = mock_urllib2.post.return_value mock_login_response = mock.MagicMock() mock_login_response.status_code.return_value = http_client.OK mock_login_response.json.return_value = ( FAKE_LOGIN_POST_RESPONSE) self.driver.do_setup(context.get_admin_context()) self.driver.APIExecutor.login() func(self, *args, **kwargs) return inner_client_mock return client_mock_wrapper @staticmethod def client_mock_decorator_nimble_api(username, password, ip, verify): def client_mock_wrapper(func): def inner_client_mock( self, mock_client_class, mock_urllib2, *args, **kwargs): self.mock_client_class = mock_client_class self.mock_client_service = mock.MagicMock(name='Client') self.mock_client_class.return_value = ( self.mock_client_service) self.driver = nimble.NimbleRestAPIExecutor( username=username, password=password, ip=ip, verify=verify) mock_login_response = mock_urllib2.post.return_value mock_login_response = mock.MagicMock() mock_login_response.status_code.return_value = http_client.OK mock_login_response.json.return_value = ( FAKE_LOGIN_POST_RESPONSE) func(self, *args, **kwargs) return inner_client_mock return client_mock_wrapper class NimbleDriverLoginTestCase(NimbleDriverBaseTestCase): """Tests do_setup api.""" @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( "nimble", "nimble_pass", "10.18.108.55", 'default', '*')) def test_do_setup_positive(self): expected_call_list = [mock.call.login()] self.mock_client_service.assert_has_calls(expected_call_list) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_expire_session_id(self): expected_call_list = [mock.call.login()] self.mock_client_service.assert_has_calls(expected_call_list) self.driver.APIExecutor.get("groups") expected_call_list = [mock.call.get_group_info(), mock.call.login(), mock.call.get("groups")] self.assertEqual( self.mock_client_service.method_calls, expected_call_list) class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase): """Tests volume related api's.""" @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) def test_create_volume_positive(self): self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None}, self.driver.create_volume({'name': 'testvolume', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''})) self.mock_client_service.create_vol.assert_called_once_with( {'name': 'testvolume', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''}, 'default', False, 'iSCSI', False) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) def test_create_volume_with_unicode(self): self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None}, self.driver.create_volume({'name': 'testvolume', 'size': 1, 'volume_type_id': None, 'display_name': u'unicode_name', 'display_description': ''})) self.mock_client_service.create_vol.assert_called_once_with( {'name': 'testvolume', 'size': 1, 'volume_type_id': None, 'display_name': u'unicode_name', 'display_description': ''}, 'default', False, 'iSCSI', False) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes', 'multiattach': 'false'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_encryption_positive(self): self.mock_client_service._execute_create_vol.return_value = ( FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_ENCRYPTION) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) volume = {'name': 'testvolume-encryption', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': ''} self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None}, self.driver.create_volume(volume)) self.mock_client_service.create_vol.assert_called_once_with( {'name': 'testvolume-encryption', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': '', }, 'default', False, 'iSCSI', False) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'VMware ESX', 'nimble:encryption': 'no', 'multiattach': 'false'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_perfpolicy_positive(self): self.mock_client_service._execute_create_vol.return_value = ( FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_PERF_POLICY) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.assertEqual( {'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None}, self.driver.create_volume({'name': 'testvolume-perfpolicy', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': ''})) self.mock_client_service.create_vol.assert_called_once_with( {'name': 'testvolume-perfpolicy', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': '', }, 'default', False, 'iSCSI', False) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'no', 'multiattach': 'true'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_multi_initiator_positive(self): self.mock_client_service._execute_create_vol.return_value = ( FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_MULTI_INITIATOR) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.assertEqual( {'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None}, self.driver.create_volume({'name': 'testvolume-multi-initiator', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': ''})) self.mock_client_service.create_vol.assert_called_once_with( {'name': 'testvolume-multi-initiator', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': '', }, 'default', False, 'iSCSI', False) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'no', 'nimble:dedupe': 'true'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_dedupe_positive(self): self.mock_client_service._execute_create_vol.return_value = ( FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_DEDUPE) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.assertEqual( {'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None}, self.driver.create_volume({'name': 'testvolume-dedupe', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': ''})) self.mock_client_service.create_vol.assert_called_once_with( {'name': 'testvolume-dedupe', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': '', }, 'default', False, 'iSCSI', False) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:iops-limit': '1024'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_qos_positive(self): self.mock_client_service._execute_create_vol.return_value = ( FAKE_CREATE_VOLUME_POSITIVE_RESPONSE_QOS) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.assertEqual( {'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None}, self.driver.create_volume({'name': 'testvolume-qos', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': ''})) self.mock_client_service.create_vol.assert_called_once_with( {'name': 'testvolume-qos', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': '', }, 'default', False, 'iSCSI', False) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'no', 'multiattach': 'false'})) def test_create_volume_negative(self): self.mock_client_service.get_vol_info.side_effect = ( FAKE_CREATE_VOLUME_NEGATIVE_RESPONSE) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, {'name': 'testvolume', 'size': 1, 'volume_type_id': FAKE_TYPE_ID, 'display_name': '', 'display_description': ''}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_encryption_negative(self): self.mock_client_service.get_vol_info.side_effect = ( FAKE_CREATE_VOLUME_NEGATIVE_ENCRYPTION) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, {'name': 'testvolume-encryption', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_perfpolicy_negative(self): self.mock_client_service.get_vol_info.side_effect = ( FAKE_CREATE_VOLUME_NEGATIVE_PERFPOLICY) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, {'name': 'testvolume-perfpolicy', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_dedupe_negative(self): self.mock_client_service.get_vol_info.side_effect = ( FAKE_CREATE_VOLUME_NEGATIVE_DEDUPE) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, {'name': 'testvolume-dedupe', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:iops-limit': '200'})) def test_create_volume_qos_negative(self): self.mock_client_service.get_vol_info.side_effect = ( FAKE_CREATE_VOLUME_NEGATIVE_QOS) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, {'name': 'testvolume-qos', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*', devices=REPL_DEVICES)) def test_create_volume_replicated(self): self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None, 'replication_status': 'enabled'}, self.driver.create_volume({'name': 'testvolume', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''})) self.mock_client_service.create_vol.assert_called_once_with( {'name': 'testvolume', 'size': 1, 'volume_type_id': None, 'display_name': '', 'display_description': ''}, 'default', False, 'iSCSI', False) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock( return_value=['', ''])) def test_delete_volume(self): self.mock_client_service.online_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.delete_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.driver.delete_volume({'name': 'testvolume'}) expected_calls = [mock.call.online_vol( 'testvolume', False), mock.call.delete_vol('testvolume')] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock( return_value=['', ''])) def test_delete_volume_with_clone(self): self.mock_client_service.delete_vol.side_effect = \ nimble.NimbleAPIException(FAKE_VOLUME_DELETE_HAS_CLONE_RESPONSE) self.assertRaises( exception.VolumeIsBusy, self.driver.delete_volume, {'name': 'testvolume'}) expected_calls = [ mock.call.login(), mock.call.online_vol('testvolume', False), mock.call.delete_vol('testvolume'), mock.call.delete_vol('testvolume'), mock.call.delete_vol('testvolume'), mock.call.online_vol('testvolume', True)] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock( return_value=['test-backup-snap', 'volume-' + fake.VOLUME_ID])) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host') def test_delete_volume_with_backup(self, mock_volume_list): mock_volume_list.return_value = [] self.mock_client_service.online_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.delete_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.online_snap.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.delete_snap.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.driver.delete_volume({'name': 'testvolume'}) expected_calls = [mock.call.online_vol( 'testvolume', False), mock.call.delete_vol('testvolume'), mock.call.online_snap('volume-' + fake.VOLUME_ID, False, 'test-backup-snap'), mock.call.delete_snap('volume-' + fake.VOLUME_ID, 'test-backup-snap')] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*', devices=REPL_DEVICES)) @mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock( return_value=['', ''])) def test_delete_volume_replicated(self): self.mock_client_service.online_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.delete_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.driver.delete_volume({'name': 'testvolume'}) expected_calls = [mock.call.online_vol( 'testvolume', False), mock.call.delete_vol('testvolume')] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_extend_volume(self): self.mock_client_service.edit_vol.return_value = ( FAKE_CREATE_VOLUME_POSITIVE_RESPONSE) self.driver.extend_volume({'name': 'testvolume'}, 5) self.mock_client_service.edit_vol.assert_called_once_with( 'testvolume', FAKE_EXTEND_VOLUME_PARAMS) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes', 'multiattach': False, 'nimble:iops-limit': '1024'})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*', False)) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host') @mock.patch(NIMBLE_RANDOM) def test_create_cloned_volume(self, mock_random, mock_volume_list): mock_random.sample.return_value = fake.VOLUME_ID mock_volume_list.return_value = [] self.mock_client_service.snap_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.clone_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) volume = obj_volume.Volume(context.get_admin_context(), id=fake.VOLUME_ID, size=5.0, _name_id=None, display_name='', volume_type_id=FAKE_TYPE_ID ) src_volume = obj_volume.Volume(context.get_admin_context(), id=fake.VOLUME2_ID, _name_id=None, size=5.0) self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None}, self.driver.create_cloned_volume(volume, src_volume)) expected_calls = [mock.call.snap_vol( {'volume_name': "volume-" + fake.VOLUME2_ID, 'name': 'openstack-clone-volume-' + fake.VOLUME_ID + "-" + fake.VOLUME_ID, 'volume_size': src_volume['size'], 'display_name': volume['display_name'], 'display_description': ''}), mock.call.clone_vol(volume, {'volume_name': "volume-" + fake.VOLUME2_ID, 'name': 'openstack-clone-volume-' + fake.VOLUME_ID + "-" + fake.VOLUME_ID, 'volume_size': src_volume['size'], 'display_name': volume['display_name'], 'display_description': ''}, True, False, 'iSCSI', 'default')] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_positive(self): self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE_MANAGE) self.mock_client_service.online_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.edit_vol.return_value = ( FAKE_CREATE_VOLUME_POSITIVE_RESPONSE) self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None}, self.driver.manage_existing({'name': 'volume-abcdef', 'id': fake.VOLUME_ID, 'agent_type': None}, {'source-name': 'test-vol'})) expected_calls = [mock.call.edit_vol( 'test-vol', {'data': {'agent_type': 'openstack', 'name': 'volume-abcdef'}}), mock.call.online_vol('volume-abcdef', True)] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_which_is_online(self): self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_ONLINE) self.assertRaises( exception.InvalidVolume, self.driver.manage_existing, {'name': 'volume-abcdef'}, {'source-name': 'test-vol'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_get_size(self): self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_ONLINE) size = self.driver.manage_existing_get_size( {'name': 'volume-abcdef'}, {'source-name': 'test-vol'}) self.assertEqual(2, size) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_with_improper_ref(self): self.assertRaises( exception.ManageExistingInvalidReference, self.driver.manage_existing, {'name': 'volume-abcdef'}, {'source-id': 'test-vol'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_with_nonexistant_volume(self): self.mock_client_service.get_vol_info.side_effect = ( FAKE_VOLUME_INFO_NEGATIVE_RESPONSE) self.assertRaises( exception.VolumeBackendAPIException, self.driver.manage_existing, {'name': 'volume-abcdef'}, {'source-name': 'test-vol'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_manage_volume_with_wrong_agent_type(self): self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.assertRaises( exception.ManageExistingAlreadyManaged, self.driver.manage_existing, {'id': 'abcdef', 'name': 'volume-abcdef'}, {'source-name': 'test-vol'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_unmanage_volume_positive(self): self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.edit_vol.return_value = ( FAKE_CREATE_VOLUME_POSITIVE_RESPONSE) self.driver.unmanage({'name': 'volume-abcdef'}) expected_calls = [ mock.call.edit_vol( 'volume-abcdef', {'data': {'agent_type': 'none'}}), mock.call.online_vol('volume-abcdef', False)] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_unmanage_with_invalid_volume(self): self.mock_client_service.get_vol_info.side_effect = ( FAKE_VOLUME_INFO_NEGATIVE_RESPONSE) self.assertRaises( exception.VolumeBackendAPIException, self.driver.unmanage, {'name': 'volume-abcdef'} ) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_unmanage_with_invalid_agent_type(self): self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_ONLINE) self.assertRaises( exception.InvalidVolume, self.driver.unmanage, {'name': 'volume-abcdef'} ) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type', mock.Mock(type_id=FAKE_TYPE_ID_NEW, return_value={ 'id': FAKE_TYPE_ID_NEW, 'extra_specs': {'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes', 'multiattach': False, 'nimble:iops-limit': '1024'}})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_retype(self): self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_ONLINE) retype, update = self.driver.retype(None, FAKE_GET_VOL_INFO_ONLINE, volume_types.get_volume_type( None, FAKE_TYPE_ID_NEW), None, None) self.assertTrue(retype) self.assertIsNone(update) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_ISCSI_DRIVER) @mock.patch.object(nimble.NimbleRestAPIExecutor, 'login') @mock.patch.object(nimble.NimbleRestAPIExecutor, 'get_performance_policy_id') @mock.patch.object(nimble.NimbleRestAPIExecutor, 'get_pool_info') @mock.patch.object(nimble.NimbleRestAPIExecutor, 'get_folder_id') @NimbleDriverBaseTestCase.client_mock_decorator_nimble_api( 'nimble', 'nimble_pass', '10.18.108.55', 'False') def test_nimble_extraspecs_retype(self, mock_folder, mock_pool, mock_perf_id, mock_login): mock_folder.return_value = None mock_pool.return_value = None mock_perf_id.return_value = None mock_login.return_value = None data = self.driver.get_valid_nimble_extraspecs( FAKE_EXTRA_SPECS, FAKE_GET_VOL_INFO_RETYPE) self.assertTrue(data['multi_initiator']) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*', True, None, 15.0)) def test_get_volume_stats(self): self.mock_client_service.get_group_info.return_value = ( FAKE_POSITIVE_GROUP_INFO_RESPONSE) expected_res = {'driver_version': DRIVER_VERSION, 'vendor_name': 'Nimble', 'volume_backend_name': 'NIMBLE', 'storage_protocol': 'iSCSI', 'pools': [{'pool_name': 'NIMBLE', 'total_capacity_gb': 7466.30419921875, 'free_capacity_gb': 94.16706105787307, 'reserved_percentage': 0, 'QoS_support': False, 'multiattach': True, 'thin_provisioning_support': True, 'consistent_group_snapshot_enabled': True, 'replication_enabled': False, 'consistent_group_replication_enabled': False, 'max_over_subscription_ratio': 15.0}]} self.assertEqual( expected_res, self.driver.get_volume_stats(refresh=True)) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_is_volume_backup_clone(self): self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_BACKUP_RESPONSE) self.mock_client_service.get_snap_info_by_id.return_value = ( FAKE_GET_SNAP_INFO_BACKUP_RESPONSE) self.mock_client_service.get_snap_info_detail.return_value = ( FAKE_GET_SNAP_INFO_BACKUP_RESPONSE) self.mock_client_service.get_volume_name.return_value = ( 'volume-' + fake.VOLUME2_ID) volume = obj_volume.Volume(context.get_admin_context(), id=fake.VOLUME_ID, _name_id=None) self.assertEqual(("test-backup-snap", "volume-" + fake.VOLUME2_ID), self.driver.is_volume_backup_clone(volume)) expected_calls = [ mock.call.get_vol_info('volume-' + fake.VOLUME_ID), mock.call.get_snap_info_by_id('test-backup-snap', 'volume-' + fake.VOLUME2_ID) ] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*', devices=REPL_DEVICES)) def test_enable_replication(self): ctx = context.get_admin_context() group = mock.MagicMock() volumes = [fake_volume.fake_volume_obj(None)] return_values = self.driver.enable_replication(ctx, group, volumes) self.mock_client_service.set_schedule_for_volcoll.assert_called_once() model_update = return_values[0] self.assertEqual(model_update['replication_status'], 'enabled') @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*', devices=REPL_DEVICES)) def test_disable_replication(self): ctx = context.get_admin_context() group = mock.MagicMock() volumes = [fake_volume.fake_volume_obj(None)] return_values = self.driver.disable_replication(ctx, group, volumes) self.mock_client_service.delete_schedule.assert_called_once() model_update = return_values[0] self.assertEqual(model_update['replication_status'], 'disabled') @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*', devices=REPL_DEVICES)) def test_time_to_secs(self): time_secs = [('01:05', 3900), ('01:02:15am', 3735), ('03:07:20pm', 54440)] for time, seconds in time_secs: ret_secs = self.driver._time_to_secs(time) self.assertEqual(ret_secs, seconds) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*', devices=REPL_DEVICES)) def test_failover_replication(self): ctx = context.get_admin_context() group = mock.MagicMock() volumes = [fake_volume.fake_volume_obj(None)] return_values = self.driver.failover_replication( ctx, group, volumes, 'secondary') self.mock_client_service.handover.assert_called() group_update = return_values[0] self.assertEqual(group_update['replication_status'], 'failed-over') return_values = self.driver.failover_replication( ctx, group, volumes, 'default') self.mock_client_service.handover.assert_called() group_update = return_values[0] self.assertEqual(group_update['replication_status'], 'enabled') class NimbleDriverSnapshotTestCase(NimbleDriverBaseTestCase): """Tests snapshot related api's.""" @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_snapshot(self): self.mock_client_service.snap_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.driver.create_snapshot( {'volume_name': 'testvolume', 'name': 'testvolume-snap1', 'display_name': ''}) self.mock_client_service.snap_vol.assert_called_once_with( {'volume_name': 'testvolume', 'name': 'testvolume-snap1', 'display_name': ''}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_delete_snapshot(self): self.mock_client_service.online_snap.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.delete_snap.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.driver.delete_snapshot( {'volume_name': 'testvolume', 'name': 'testvolume-snap1'}) expected_calls = [mock.call.online_snap( 'testvolume', False, 'testvolume-snap1'), mock.call.delete_snap('testvolume', 'testvolume-snap1')] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @mock.patch.object(volume_types, 'get_volume_type_extra_specs', mock.Mock(type_id=FAKE_TYPE_ID, return_value={ 'nimble:perfpol-name': 'default', 'nimble:encryption': 'yes', 'multiattach': False})) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_create_volume_from_snapshot(self): self.mock_client_service.clone_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_RESPONSE) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.assertEqual({ 'provider_location': '172.18.108.21:3260 iqn.test', 'provider_auth': None}, self.driver.create_volume_from_snapshot( {'name': 'clone-testvolume', 'size': 2, 'volume_type_id': FAKE_TYPE_ID}, {'volume_name': 'testvolume', 'name': 'testvolume-snap1', 'volume_size': 1})) expected_calls = [ mock.call.clone_vol( {'name': 'clone-testvolume', 'volume_type_id': FAKE_TYPE_ID, 'size': 2}, {'volume_name': 'testvolume', 'name': 'testvolume-snap1', 'volume_size': 1}, False, False, 'iSCSI', 'default'), mock.call.edit_vol('clone-testvolume', {'data': {'size': 2048, 'snap_limit': sys.maxsize, 'warn_level': 80, 'reserve': 0, 'limit': 100}})] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_revert_to_snapshot(self): self.mock_client_service.online_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.volume_restore.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_REVERT) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.mock_client_service.get_snap_info.return_value = ( FAKE_SNAP_INFO_REVERT) ctx = context.get_admin_context() self.driver.revert_to_snapshot(ctx, {'id': fake.VOLUME_ID, 'size': 1, 'name': 'testvolume'}, {'id': fake.SNAPSHOT2_ID, 'name': 'testsnap', 'volume_id': fake.VOLUME_ID}) expected_calls = [mock.call.online_vol('testvolume', False), mock.call.volume_restore('testvolume', {'data': {'id': fake.VOLUME_ID, 'base_snap_id': fake.SNAPSHOT2_ID}}), mock.call.online_vol('testvolume', True)] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_revert_to_snapshot_negative(self): self.mock_client_service.online_vol.return_value = ( FAKE_GENERIC_POSITIVE_RESPONSE) self.mock_client_service.volume_restore.side_effect = ( FAKE_VOLUME_RESTORE_NEGATIVE_RESPONSE) self.mock_client_service.get_vol_info.return_value = ( FAKE_GET_VOL_INFO_REVERT) self.mock_client_service.get_netconfig.return_value = ( FAKE_POSITIVE_NETCONFIG_RESPONSE) self.mock_client_service.get_snap_info.return_value = ( FAKE_SNAP_INFO_REVERT) ctx = context.get_admin_context() self.assertRaises(exception.VolumeBackendAPIException, self.driver.revert_to_snapshot, ctx, {'id': fake.VOLUME_ID, 'size': 1, 'name': 'testvolume'}, {'id': fake.SNAPSHOT_ID, 'name': 'testsnap', 'volume_id': fake.VOLUME_ID}) class NimbleDriverConnectionTestCase(NimbleDriverBaseTestCase): """Tests Connection related api's.""" @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_initialize_connection_igroup_exist(self): self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE) expected_res = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'discard': True, 'volume_id': 12, 'target_iqn': '13', 'target_lun': 0, 'target_portal': '12'}} self.assertEqual( expected_res, self.driver.initialize_connection( {'name': 'test-volume', 'provider_location': '12 13', 'id': 12}, {'initiator': 'test-initiator1'})) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_ISCSI_DRIVER + '._get_data_ips') @mock.patch(NIMBLE_ISCSI_DRIVER + ".get_lun_number") @mock.patch(NIMBLE_ISCSI_DRIVER + '._get_gst_for_group') def test_initialize_connection_group_scoped_target(self, mock_gst_name, mock_lun_number, mock_data_ips): mock_data_ips.return_value = ['12', '13'] mock_lun_number.return_value = 0 mock_gst_name.return_value = "group_target_name" self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE) expected_res = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'discard': True, 'volume_id': fake.VOLUME_ID, 'target_iqns': ['group_target_name', 'group_target_name'], 'target_luns': [0, 0], 'target_portals': ['12', '13']}} self.assertEqual( expected_res, self.driver.initialize_connection( {'name': 'test-volume', 'provider_location': '12 group_target_name', 'id': fake.VOLUME_ID}, {'initiator': 'test-initiator1'})) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_initialize_connection_live_migration(self): self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE) expected_res = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'discard': True, 'volume_id': fake.VOLUME_ID, 'target_iqn': '13', 'target_lun': 0, 'target_portal': '12'}} self.assertEqual( expected_res, self.driver.initialize_connection( {'name': 'test-volume', 'provider_location': '12 13', 'id': fake.VOLUME_ID}, {'initiator': 'test-initiator1'})) self.driver.initialize_connection( {'name': 'test-volume', 'provider_location': '12 13', 'id': fake.VOLUME_ID}, {'initiator': 'test-initiator1'}) # 2 or more calls to initialize connection and add_acl for live # migration to work expected_calls = [ mock.call.get_initiator_grp_list(), mock.call.add_acl({'name': 'test-volume', 'provider_location': '12 13', 'id': fake.VOLUME_ID}, 'test-igrp1'), mock.call.get_initiator_grp_list(), mock.call.add_acl({'name': 'test-volume', 'provider_location': '12 13', 'id': fake.VOLUME_ID}, 'test-igrp1')] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_FC_DRIVER + ".get_lun_number") @mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array") def test_initialize_connection_fc_igroup_exist(self, mock_wwpns, mock_lun_number): mock_lun_number.return_value = 13 mock_wwpns.return_value = ["1111111111111101"] self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE_FC) expected_res = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_lun': 13, 'target_discovered': True, 'discard': True, 'target_wwn': ["1111111111111101"], 'initiator_target_map': {'1000000000000000': ['1111111111111101']}}} self.assertEqual( expected_res, self.driver.initialize_connection( {'name': 'test-volume', 'provider_location': 'array1', 'id': fake.VOLUME_ID}, {'initiator': 'test-initiator1', 'wwpns': ['1000000000000000']})) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_RANDOM) def test_initialize_connection_igroup_not_exist(self, mock_random): mock_random.sample.return_value = 'abcdefghijkl' self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE) expected_res = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'discard': True, 'target_lun': 0, 'volume_id': fake.VOLUME_ID, 'target_iqn': '13', 'target_portal': '12'}} self.assertEqual( expected_res, self.driver.initialize_connection( {'name': 'test-volume', 'provider_location': '12 13', 'id': fake.VOLUME_ID}, {'initiator': 'test-initiator3'})) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array") @mock.patch(NIMBLE_FC_DRIVER + ".get_lun_number") @mock.patch(NIMBLE_RANDOM) def test_initialize_connection_fc_igroup_not_exist(self, mock_random, mock_lun_number, mock_wwpns): mock_random.sample.return_value = 'abcdefghijkl' mock_lun_number.return_value = 13 mock_wwpns.return_value = ["1111111111111101"] self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE_FC) expected_res = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_lun': 13, 'target_discovered': True, 'discard': True, 'target_wwn': ["1111111111111101"], 'initiator_target_map': {'1000000000000000': ['1111111111111101']}}} self.driver._create_igroup_for_initiator("test-initiator3", [1111111111111101]) self.assertEqual( expected_res, self.driver.initialize_connection( {'name': 'test-volume', 'provider_location': 'array1', 'id': fake.VOLUME_ID}, {'initiator': 'test-initiator3', 'wwpns': ['1000000000000000']})) expected_calls = [mock.call.create_initiator_group_fc( 'openstack-abcdefghijkl'), mock.call.add_initiator_to_igroup_fc('openstack-abcdefghijkl', 1111111111111101)] self.mock_client_service.assert_has_calls(expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_terminate_connection_positive(self): self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE) ctx = context.get_admin_context() volume = fake_volume.fake_volume_obj( ctx, name='test-volume', host='fakehost@nimble#Openstack', provider_location='12 13', id=fake.VOLUME_ID, multiattach=False) self.driver.terminate_connection( volume, {'initiator': 'test-initiator1'}) expected_calls = [mock.call._get_igroupname_for_initiator( 'test-initiator1'), mock.call.remove_acl({'name': 'test-volume'}, 'test-igrp1')] self.mock_client_service.assert_has_calls( self.mock_client_service.method_calls, expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_terminate_connection_without_connector(self): self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE) self.driver.terminate_connection( {'name': 'test-volume', 'provider_location': '12 13', 'id': fake.VOLUME_ID}, None) expected_calls = [mock.call._get_igroupname_for_initiator( 'test-initiator1'), mock.call.remove_all_acls({'name': 'test-volume'})] self.mock_client_service.assert_has_calls( self.mock_client_service.method_calls, expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array") def test_terminate_connection_positive_fc(self, mock_wwpns): mock_wwpns.return_value = ["1111111111111101"] self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE_FC) ctx = context.get_admin_context() volume = fake_volume.fake_volume_obj( ctx, name='test-volume', host='fakehost@nimble#Openstack', provider_location='12 13', id=fake.VOLUME_ID, multiattach=False) self.driver.terminate_connection( volume, {'initiator': 'test-initiator1', 'wwpns': ['1000000000000000']}) expected_calls = [ mock.call.get_igroupname_for_initiator_fc( "10:00:00:00:00:00:00:00"), mock.call.remove_acl({'name': 'test-volume'}, 'test-igrp1')] self.mock_client_service.assert_has_calls( self.mock_client_service.method_calls, expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_terminate_connection_negative(self): self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE) ctx = context.get_admin_context() volume = fake_volume.fake_volume_obj( ctx, name='test-volume', host='fakehost@nimble#Openstack', provider_location='12 13', id=fake.VOLUME_ID, multiattach=False) self.assertRaises( exception.VolumeDriverException, self.driver.terminate_connection, volume, {'initiator': 'test-initiator3'}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array") def test_terminate_connection_negative_fc(self, mock_wwpns): mock_wwpns.return_value = ["1111111111111101"] self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE_FC) ctx = context.get_admin_context() volume = fake_volume.fake_volume_obj( ctx, name='test-volume', host='fakehost@nimble#Openstack', provider_location='12 13', id=fake.VOLUME_ID, multiattach=False) self.assertRaises( exception.VolumeDriverException, self.driver.terminate_connection, volume, {'initiator': 'test-initiator3', 'wwpns': ['1000000000000010']}) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_terminate_connection_multiattach(self): self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE) ctx = context.get_admin_context() att_1 = fake_volume.volume_attachment_ovo( ctx, id=uuidutils.generate_uuid()) att_2 = fake_volume.volume_attachment_ovo( ctx, id=uuidutils.generate_uuid()) volume = fake_volume.fake_volume_obj( ctx, name='test-volume', host='fakehost@nimble#Openstack', provider_location='12 13', id=fake.VOLUME_ID, multiattach=True) volume.volume_attachment.objects = [att_1, att_2] self.driver.terminate_connection( volume, {'initiator': 'test-initiator1'}) self.mock_client_service.remove_acl.assert_not_called() @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) def test_terminate_connection_multiattach_complete(self): self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE) ctx = context.get_admin_context() att_1 = fake_volume.volume_attachment_ovo( ctx, id=uuidutils.generate_uuid()) volume = fake_volume.fake_volume_obj( ctx, name='test-volume', host='fakehost@nimble#Openstack', provider_location='12 13', id=fake.VOLUME_ID, multiattach=True) volume.volume_attachment.objects = [att_1] self.driver.terminate_connection( volume, {'initiator': 'test-initiator1'}) expected_calls = [mock.call._get_igroupname_for_initiator( 'test-initiator1'), mock.call.remove_acl({'name': 'test-volume'}, 'test-igrp1')] self.mock_client_service.assert_has_calls( self.mock_client_service.method_calls, expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array") def test_terminate_connection_multiattach_fc(self, mock_wwpns): mock_wwpns.return_value = ["1111111111111101"] self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE_FC) ctx = context.get_admin_context() att_1 = fake_volume.volume_attachment_ovo( ctx, id=uuidutils.generate_uuid()) att_2 = fake_volume.volume_attachment_ovo( ctx, id=uuidutils.generate_uuid()) volume = fake_volume.fake_volume_obj( ctx, name='test-volume', host='fakehost@nimble#Openstack', provider_location='12 13', id=fake.VOLUME_ID, multiattach=True) volume.volume_attachment.objects = [att_1, att_2] self.driver.terminate_connection( volume, {'initiator': 'test-initiator1', 'wwpns': ['1000000000000000']}) self.mock_client_service.remove_acl.assert_not_called() @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @mock.patch.object(obj_volume.VolumeList, 'get_all_by_host', mock.Mock(return_value=[])) @NimbleDriverBaseTestCase.client_mock_decorator_fc(create_configuration( 'nimble', 'nimble_pass', '10.18.108.55', 'default', '*')) @mock.patch(NIMBLE_FC_DRIVER + ".get_wwpns_from_array") def test_terminate_connection_multiattach_complete_fc(self, mock_wwpns): mock_wwpns.return_value = ["1111111111111101"] self.mock_client_service.get_initiator_grp_list.return_value = ( FAKE_IGROUP_LIST_RESPONSE_FC) ctx = context.get_admin_context() att_1 = fake_volume.volume_attachment_ovo( ctx, id=uuidutils.generate_uuid()) volume = fake_volume.fake_volume_obj( ctx, name='test-volume', host='fakehost@nimble#Openstack', provider_location='12 13', id=fake.VOLUME_ID, multiattach=True) volume.volume_attachment.objects = [att_1] self.driver.terminate_connection( volume, {'initiator': 'test-initiator1', 'wwpns': ['1000000000000000']}) expected_calls = [ mock.call.get_igroupname_for_initiator_fc( "10:00:00:00:00:00:00:00"), mock.call.remove_acl({'name': 'test-volume'}, 'test-igrp1')] self.mock_client_service.assert_has_calls( self.mock_client_service.method_calls, expected_calls) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_create_group_positive(self, mock_is_cg): mock_is_cg.return_value = True ctx = context.get_admin_context() self.group = fake_group.fake_group_obj( ctx, id = fake.GROUP_ID) model_update = self.driver.create_group(ctx, self.group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_create_generic_group(self, mock_is_cg): mock_is_cg.return_value = False ctx = context.get_admin_context() self.group = fake_group.fake_group_obj( ctx, id=fake.GROUP_ID, status='available') self.assertRaises( NotImplementedError, self.driver.create_group, ctx, self.group ) mock_is_cg.assert_called_once_with(self.group) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_delete_generic_group(self, mock_is_cg): mock_is_cg.return_value = False ctx = context.get_admin_context() group = mock.MagicMock() volumes = [fake_volume.fake_volume_obj(None)] self.assertRaises( NotImplementedError, self.driver.delete_group, ctx, group, volumes ) mock_is_cg.assert_called_once() @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_delete_group_positive(self, mock_get_specs, mock_is_cg): mock_get_specs.return_value = ' True' mock_is_cg.return_value = True ctx = context.get_admin_context() group = mock.MagicMock() volumes = [fake_volume.fake_volume_obj(None)] self.driver.delete_group(ctx, group, volumes) self.mock_client_service.delete_volcoll.assert_called_once() @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group(self, mock_is_cg): mock_is_cg.return_value = False group = mock.MagicMock() ctx = context.get_admin_context() self.assertRaises( NotImplementedError, self.driver.update_group, ctx, group ) mock_is_cg.assert_called_once_with(group) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch('cinder.volume.group_types.get_group_type_specs') @mock.patch(NIMBLE_ISCSI_DRIVER + '.is_volume_group_snap_type') def test_update_group_positive(self, vol_gs_enable, mock_get_specs, mock_is_cg): mock_get_specs.return_value = ' True' mock_is_cg.return_value = True self.mock_client_service.get_volume_id_by_name.return_value = ( FAKE_GET_VOLID_INFO_RESPONSE) self.mock_client_service.get_volcoll_id_by_name.return_value = ( FAKE_GET_VOLCOLL_INFO_RESPONSE) self.mock_client_service.associate_volcoll.return_value = ( FAKE_GET_SNAP_INFO_BACKUP_RESPONSE) ctx = context.get_admin_context() group = mock.MagicMock() volume1 = fake_volume.fake_volume_obj( ctx, name='testvolume-cg1', host='fakehost@nimble#Openstack', provider_location='12 13', id=fake.VOLUME_ID, consistency_group_snapshot_enabled=True) addvollist = [volume1] remvollist = [volume1] model_update = self.driver.update_group( ctx, group, addvollist, remvollist ) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update[0]['status']) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_create_group_from_src(self, mock_is_cg): mock_is_cg.return_value = False group = mock.MagicMock() ctx = context.get_admin_context() volumes = [fake_volume.fake_volume_obj(None)] self.assertRaises( NotImplementedError, self.driver.create_group_from_src, ctx, group, volumes ) mock_is_cg.assert_called_once_with(group) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch('cinder.volume.group_types.get_group_type_specs') @mock.patch(NIMBLE_ISCSI_DRIVER + ".create_cloned_volume") def test_create_group_from_src_positive(self, mock_clone, mock_get_specs, mock_is_cg): source_volume = volume_src_cg volume = volume_cg volume['source_volid'] = source_volume['id'] volume['display_name'] = "cg-volume" source_volume['display_name'] = "source-volume" mock_get_specs.return_value = ' True' mock_clone.return_value = volume['name'] mock_is_cg.return_value = True self.driver.create_group_from_src( context.get_admin_context(), FAKE_GROUP, [volume], source_group=FAKE_SRC_GROUP, source_vols=[source_volume]) self.mock_client_service.associate_volcoll.assert_called_once() @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_create_group_snapshot_positive(self, mock_get_specs, mock_is_cg): mock_get_specs.return_value = ' True' mock_is_cg.return_value = True ctx = context.get_admin_context() group_snapshot = mock.MagicMock() snapshots = [fake_snapshot.fake_snapshot_obj(None)] self.driver.create_group_snapshot( ctx, group_snapshot, snapshots ) self.mock_client_service.snapcoll_create.assert_called_once() @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_delete_generic_group_snapshot(self, mock_is_cg): mock_is_cg.return_value = False group_snapshot = mock.MagicMock() snapshots = [fake_snapshot.fake_snapshot_obj(None)] ctx = context.get_admin_context() self.assertRaises( NotImplementedError, self.driver.delete_group_snapshot, ctx, group_snapshot, snapshots ) mock_is_cg.assert_called_once_with(group_snapshot) @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_delete_group_snapshot_positive(self, mock_get_specs, mock_is_cg): mock_get_specs.return_value = ' True' mock_is_cg.return_value = True ctx = context.get_admin_context() group_snapshot = mock.MagicMock() snapshots = [mock.Mock()] self.driver.delete_group_snapshot( ctx, group_snapshot, snapshots ) self.mock_client_service.snapcoll_delete.assert_called_once() @mock.patch(NIMBLE_URLLIB2) @mock.patch(NIMBLE_CLIENT) @NimbleDriverBaseTestCase.client_mock_decorator(create_configuration( NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP, 'default', '*')) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_create_group_negative(self, mock_is_cg): mock_is_cg.return_value = True ctx = context.get_admin_context() self.vol_type = volume_type.VolumeType( name='volume_type', extra_specs= {'consistent_group_snapshot_enabled': ' False'}) FAKE_GROUP.volume_types = volume_type.VolumeTypeList( objects=[self.vol_type]) self.assertRaises(exception.InvalidInput, self.driver.create_group, ctx, FAKE_GROUP) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2791202 cinder-27.0.0/cinder/tests/unit/volume/drivers/hpe/xp/0000775000175000017500000000000000000000000022644 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hpe/xp/__init__.py0000664000175000017500000000000000000000000024743 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py0000664000175000017500000015410000000000000027246 0ustar00zuulzuul00000000000000# Copyright (C) 2022, 2024, Hewlett Packard Enterprise, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Hewlett Packard Enterprise Driver.""" import functools from unittest import mock from oslo_config import cfg import requests from requests import models from cinder import context as cinder_context from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder.objects import group_snapshot as obj_group_snap from cinder.objects import snapshot as obj_snap from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.hpe.xp import hpe_xp_fc from cinder.volume.drivers.hpe.xp import hpe_xp_rest from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils # Configuration parameter values CONFIG_MAP = { 'serial': '886000123456', 'my_ip': '127.0.0.1', 'rest_server_ip_addr': '172.16.18.108', 'rest_server_ip_port': '23451', 'port_id': 'CL1-A', 'host_grp_name': 'HPEXP-0123456789abcdef', 'host_mode': 'LINUX/IRIX', 'host_wwn': '0123456789abcdef', 'target_wwn': '1111111123456789', 'user_id': 'user', 'user_pass': 'password', 'pool_name': 'test_pool', 'auth_user': 'auth_user', 'auth_password': 'auth_password', } # Dummy response for FC zoning device mapping DEVICE_MAP = { 'fabric_name': { 'initiator_port_wwn_list': [CONFIG_MAP['host_wwn']], 'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}} DEFAULT_CONNECTOR = { 'host': 'host', 'ip': CONFIG_MAP['my_ip'], 'wwpns': [CONFIG_MAP['host_wwn']], 'multipath': False, } CTXT = cinder_context.get_admin_context() TEST_VOLUME = [] for i in range(5): volume = {} volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) volume['name'] = 'test-volume{0:d}'.format(i) volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) if i == 3 or i == 4: volume['provider_location'] = None else: volume['provider_location'] = '{0:d}'.format(i) volume['size'] = 128 if i == 2: volume['status'] = 'in-use' elif i == 4: volume['status'] = None else: volume['status'] = 'available' volume = fake_volume.fake_volume_obj(CTXT, **volume) volume.volume_type = fake_volume.fake_volume_type_obj(CTXT) TEST_VOLUME.append(volume) def _volume_get(context, volume_id): """Return predefined volume info.""" return TEST_VOLUME[int(volume_id.replace("-", ""))] TEST_SNAPSHOT = [] snapshot = {} snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0) snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0) snapshot['provider_location'] = '{0:d}'.format(1) snapshot['status'] = 'available' snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) snapshot['volume'] = _volume_get(None, snapshot['volume_id']) snapshot['volume_name'] = 'test-volume{0:d}'.format(0) snapshot['volume_size'] = 128 snapshot = obj_snap.Snapshot._from_db_object( CTXT, obj_snap.Snapshot(), fake_snapshot.fake_db_snapshot(**snapshot)) TEST_SNAPSHOT.append(snapshot) TEST_GROUP = [] for i in range(2): group = {} group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i) group['status'] = 'available' group = fake_group.fake_group_obj(CTXT, **group) TEST_GROUP.append(group) TEST_GROUP_SNAP = [] group_snapshot = {} group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0) group_snapshot['status'] = 'available' group_snapshot = obj_group_snap.GroupSnapshot._from_db_object( CTXT, obj_group_snap.GroupSnapshot(), fake_group_snapshot.fake_db_group_snapshot(**group_snapshot)) TEST_GROUP_SNAP.append(group_snapshot) # Dummy response for REST API POST_SESSIONS_RESULT = { "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "sessionId": 0, } GET_PORTS_RESULT = { "data": [ { "portId": CONFIG_MAP['port_id'], "portType": "FIBRE", "portAttributes": [ "TAR", "MCU", "RCU", "ELUN" ], "fabricMode": True, "portConnection": "PtoP", "lunSecuritySetting": True, "wwn": CONFIG_MAP['target_wwn'], }, ], } GET_HOST_WWNS_RESULT = { "data": [ { "hostGroupNumber": 0, "hostWwn": CONFIG_MAP['host_wwn'], }, ], } COMPLETED_SUCCEEDED_RESULT = { "status": "Completed", "state": "Succeeded", "affectedResources": ('a/b/c/1',), } COMPLETED_FAILED_RESULT_LU_DEFINED = { "status": "Completed", "state": "Failed", "error": { "errorCode": { "SSB1": "B958", "SSB2": "015A", }, }, } GET_LDEV_RESULT = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP"], "status": "NML", "poolId": 30, "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP"], "status": "NML", "ports": [ { "portId": CONFIG_MAP['port_id'], "hostGroupNumber": 0, "hostGroupName": CONFIG_MAP['host_grp_name'], "lun": 1 }, ], } GET_LDEV_RESULT_SNAP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP"], "status": "NML", "poolId": 30, "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP", "FS"], "status": "NML", "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_PAIR_SNAP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP", "FS"], "status": "NML", "label": "10000000000000000000000000000000", } GET_POOL_RESULT = { "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, "totalLocatedCapacity": 71453172, } GET_SNAPSHOTS_RESULT = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PSUS", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_PAIR = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PAIR", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_BUSY = { "data": [ { "primaryOrSecondary": "P-VOL", "status": "PSUP", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_POOLS_RESULT = { "data": [ { "poolId": 30, "poolName": CONFIG_MAP['pool_name'], "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, "totalLocatedCapacity": 71453172, "virtualVolumeCapacityRate": -1, }, ], } GET_LUNS_RESULT = { "data": [ { "ldevId": 0, "lun": 1, }, ], } GET_HOST_GROUP_RESULT = { "hostGroupName": CONFIG_MAP['host_grp_name'], } GET_HOST_GROUPS_RESULT = { "data": [ { "hostGroupNumber": 0, "portId": CONFIG_MAP['port_id'], "hostGroupName": "HPEXP-test", }, ], } GET_LDEVS_RESULT = { "data": [ { "ldevId": 0, "label": "15960cc738c94c5bb4f1365be5eeed44", }, { "ldevId": 1, "label": "15960cc738c94c5bb4f1365be5eeed45", }, ], } NOTFOUND_RESULT = { "data": [], } ERROR_RESULT = { "errorSource": "", "message": "", "solution": "", "messageId": "", "errorCode": { "SSB1": "", "SSB2": "", } } def _brick_get_connector_properties(multipath=False, enforce_multipath=False): """Return a predefined connector object.""" return DEFAULT_CONNECTOR def reduce_retrying_time(func): @functools.wraps(func) def wrapper(*args, **kwargs): backup_lock_waittime = hbsd_rest_api._LOCK_TIMEOUT backup_exec_max_waittime = hbsd_rest_api._REST_TIMEOUT backup_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) backup_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) backup_extend_waittime = hbsd_rest_api._EXTEND_TIMEOUT backup_exec_retry_interval = hbsd_rest_api._EXEC_RETRY_INTERVAL backup_rest_server_restart_timeout = ( hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT) backup_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) hbsd_rest_api._LOCK_TIMEOUT = 0.01 hbsd_rest_api._REST_TIMEOUT = 0.01 hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT = 0.01 hbsd_rest_api._GET_API_RESPONSE_TIMEOUT = 0.01 hbsd_rest_api._EXTEND_TIMEOUT = 0.01 hbsd_rest_api._EXEC_RETRY_INTERVAL = 0.004 hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT = 0.02 hbsd_rest._STATE_TRANSITION_TIMEOUT = 0.01 func(*args, **kwargs) hbsd_rest_api._LOCK_TIMEOUT = backup_lock_waittime hbsd_rest_api._REST_TIMEOUT = backup_exec_max_waittime hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT = ( backup_job_api_response_timeout) hbsd_rest_api._GET_API_RESPONSE_TIMEOUT = ( backup_get_api_response_timeout) hbsd_rest_api._EXTEND_TIMEOUT = backup_extend_waittime hbsd_rest_api._EXEC_RETRY_INTERVAL = backup_exec_retry_interval hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT = ( backup_rest_server_restart_timeout) hbsd_rest._STATE_TRANSITION_TIMEOUT = ( backup_state_transition_timeout) return wrapper class FakeLookupService(): """Dummy FC zoning mapping lookup service class.""" def get_device_mapping_from_network(self, initiator_wwns, target_wwns): """Return predefined FC zoning mapping.""" return DEVICE_MAP class FakeResponse(): def __init__(self, status_code, data=None, headers=None): self.status_code = status_code self.data = data self.text = data self.content = data self.headers = {'Content-Type': 'json'} if headers is None else headers def json(self): return self.data class HPEXPRESTFCDriverTest(test.TestCase): """Unit test class for HPEXP REST interface fibre channel module.""" test_existing_ref = {'source-id': '1'} test_existing_ref_name = { 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} def setUp(self): """Set up the test environment.""" def _set_required(opts, required): for opt in opts: opt.required = required # Initialize Cinder and avoid checking driver options. rest_required_opts = [ opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required] common_required_opts = [ opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required] _set_required(rest_required_opts, False) _set_required(common_required_opts, False) super(HPEXPRESTFCDriverTest, self).setUp() _set_required(rest_required_opts, True) _set_required(common_required_opts, True) self.configuration = mock.Mock(conf.Configuration) self.ctxt = cinder_context.get_admin_context() self._setup_config() self._setup_driver() def _setup_config(self): """Set configuration parameter values.""" self.configuration.config_group = "REST" self.configuration.volume_backend_name = "RESTFC" self.configuration.volume_driver = ( "cinder.volume.drivers.hpe.xp.hpe_xp_fc.HPEXPFCDriver") self.configuration.reserved_percentage = "0" self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False self.configuration.max_over_subscription_ratio = 500.0 self.configuration.driver_ssl_cert_verify = False self.configuration.hpexp_storage_id = CONFIG_MAP['serial'] self.configuration.hpexp_pools = ["30"] self.configuration.hpexp_snap_pool = None self.configuration.hpexp_ldev_range = "0-1" self.configuration.hpexp_target_ports = [CONFIG_MAP['port_id']] self.configuration.hpexp_compute_target_ports = [ CONFIG_MAP['port_id']] self.configuration.hpexp_group_create = True self.configuration.hpexp_group_delete = True self.configuration.hpexp_copy_speed = 3 self.configuration.hpexp_copy_check_interval = 3 self.configuration.hpexp_async_copy_check_interval = 10 self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] self.configuration.san_ip = CONFIG_MAP[ 'rest_server_ip_addr'] self.configuration.san_api_port = CONFIG_MAP[ 'rest_server_ip_port'] self.configuration.hpexp_rest_disable_io_wait = True self.configuration.hpexp_rest_tcp_keepalive = True self.configuration.hpexp_discard_zero_page = True self.configuration.hpexp_rest_number = "0" self.configuration.hpexp_lun_timeout = hbsd_rest._LUN_TIMEOUT self.configuration.hpexp_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.hpexp_restore_timeout = hbsd_rest._RESTORE_TIMEOUT self.configuration.hpexp_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) self.configuration.hpexp_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT self.configuration.hpexp_rest_timeout = hbsd_rest_api._REST_TIMEOUT self.configuration.hpexp_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.hpexp_exec_retry_interval = ( hbsd_rest_api._EXEC_RETRY_INTERVAL) self.configuration.hpexp_rest_connect_timeout = ( hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) self.configuration.hpexp_rest_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) self.configuration.hpexp_rest_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) self.configuration.hpexp_rest_server_busy_timeout = ( hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) self.configuration.hpexp_rest_keep_session_loop_interval = ( hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) self.configuration.hpexp_rest_another_ldev_mapped_retry_timeout = ( hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) self.configuration.hpexp_rest_tcp_keepidle = ( hbsd_rest_api._TCP_KEEPIDLE) self.configuration.hpexp_rest_tcp_keepintvl = ( hbsd_rest_api._TCP_KEEPINTVL) self.configuration.hpexp_rest_tcp_keepcnt = ( hbsd_rest_api._TCP_KEEPCNT) self.configuration.hpexp_host_mode_options = [] self.configuration.hpexp_zoning_request = False self.configuration.san_thin_provision = True self.configuration.san_private_key = '' self.configuration.san_clustername = '' self.configuration.san_ssh_port = '22' self.configuration.san_is_local = False self.configuration.ssh_conn_timeout = '30' self.configuration.ssh_min_pool_conn = '1' self.configuration.ssh_max_pool_conn = '5' self.configuration.use_chap_auth = True self.configuration.chap_username = CONFIG_MAP['auth_user'] self.configuration.chap_password = CONFIG_MAP['auth_password'] self.configuration.safe_get = self._fake_safe_get CONF = cfg.CONF CONF.my_ip = CONFIG_MAP['my_ip'] def _fake_safe_get(self, value): """Retrieve a configuration value avoiding throwing an exception.""" try: val = getattr(self.configuration, value) except AttributeError: val = None return val @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def _setup_driver( self, brick_get_connector_properties=None, request=None): """Set up the driver environment.""" self.driver = hpe_xp_fc.HPEXPFCDriver( configuration=self.configuration) request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT)] self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver.local_path(None) self.driver.create_export(None, None, None) self.driver.ensure_export(None, None) self.driver.remove_export(None, None) self.driver.create_export_snapshot(None, None, None) self.driver.remove_export_snapshot(None, None) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() def tearDown(self): self.client = None super(HPEXPRESTFCDriverTest, self).tearDown() # API test cases @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup(self, brick_get_connector_properties, request): drv = hpe_xp_fc.HPEXPFCDriver( configuration=self.configuration) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(3, request.call_count) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() self.driver.common.client.keep_session_loop.wait() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_create_hg(self, brick_get_connector_properties, request): """Normal case: The host group not exists.""" drv = hpe_xp_fc.HPEXPFCDriver( configuration=self.configuration) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(CONFIG_MAP['host_grp_name'], drv.common.format_info['group_name_format'].format( wwn=min(DEFAULT_CONNECTOR['wwpns']))) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(8, request.call_count) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() self.driver.common.client.keep_session_loop.wait() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_pool_name(self, brick_get_connector_properties, request): """Normal case: Specify a pool name instead of pool id""" drv = hpe_xp_fc.HPEXPFCDriver( configuration=self.configuration) self._setup_config() tmp_pools = self.configuration.hitachi_pools self.configuration.hitachi_pools = [CONFIG_MAP['pool_name']] request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_POOLS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(4, request.call_count) self.configuration.hitachi_pools = tmp_pools # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() self.driver.common.client.keep_session_loop.wait() @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_volume(TEST_VOLUME[4]) self.assertEqual('1', ret['provider_location']) self.assertEqual(2, request.call_count) @reduce_retrying_time @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_timeout( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.return_value = FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, TEST_VOLUME[4]) self.assertGreater(request.call_count, 1) @mock.patch.object(requests.Session, "request") def test_delete_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume_temporary_busy(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(7, request.call_count) @reduce_retrying_time @mock.patch.object(requests.Session, "request") def test_delete_volume_busy_timeout(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR)] self.assertRaises(exception.VolumeDriverException, self.driver.delete_volume, TEST_VOLUME[0]) self.assertGreater(request.call_count, 2) @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) self.assertEqual(3, request.call_count) @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @mock.patch.object(requests.Session, "request") def test__update_volume_stats( self, request, get_filter_function, get_goodness_function): request.return_value = FakeResponse(200, GET_POOLS_RESULT) get_filter_function.return_value = None get_goodness_function.return_value = None self.driver._update_volume_stats() self.assertEqual( 'Hewlett Packard Enterprise', self.driver._stats['vendor_name']) self.assertTrue(self.driver._stats["pools"][0]['multiattach']) self.assertEqual(1, request.call_count) self.assertEqual(1, get_filter_function.call_count) self.assertEqual(1, get_goodness_function.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(10, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_cloned_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) self.assertEqual('1', vol['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_from_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection( self, get_volume_type_extra_specs, request, add_fc_zone): get_volume_type_extra_specs.return_value = {} self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(2, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection_already_mapped( self, get_volume_type_extra_specs, request, add_fc_zone): """Normal case: ldev have already mapped.""" get_volume_type_extra_specs.return_value = {} self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [ FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_FAILED_RESULT_LU_DEFINED), FakeResponse(200, GET_LUNS_RESULT), ] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(3, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection_shared_target( self, get_volume_type_extra_specs, request, add_fc_zone): """Normal case: A target shared with other systems.""" self.driver.common.conf.hitachi_zoning_request = True get_volume_type_extra_specs.return_value = {} self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(5, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection(self, request, remove_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) self.assertEqual(5, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection_not_connector(self, request, remove_fc_zone): """Normal case: Connector is None.""" self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], None) self.assertEqual(8, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection_not_lun(self, request, remove_fc_zone): """Normal case: Lun already not exist.""" self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) self.assertEqual(2, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") def test_initialize_connection_snapshot(self, request, add_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(2, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection_snapshot(self, request, remove_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual(5, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing(self, get_volume_type_qos_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] get_volume_type_qos_specs.return_value = {'qos_specs': None} ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual('1', ret['provider_location']) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing_name(self, get_volume_type_qos_specs, request): request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] get_volume_type_qos_specs.return_value = {'qos_specs': None} ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual('1', ret['provider_location']) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size_name(self, request): request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_unmanage(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' image_id = 'fake_image_id' request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ as mock_copy_image: self.driver.copy_image_to_volume( self.ctxt, TEST_VOLUME[0], image_service, image_id) mock_copy_image.assert_called_with( self.ctxt, TEST_VOLUME[0], image_service, image_id, disable_sparse=False) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) ret = self.driver.update_migrated_volume( self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) actual = ({'_name_id': TEST_VOLUME[1]['id'], 'provider_location': TEST_VOLUME[1]['provider_location']}) self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" self.assertRaises( NotImplementedError, self.driver.unmanage_snapshot, TEST_SNAPSHOT[0]) @mock.patch.object(requests.Session, "request") def test_retype(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) new_specs = {'hpe_xp:test': 'test'} new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id']) diff = {} host = { 'capabilities': { 'location_info': { 'pool_id': 30, }, }, } ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(1, request.call_count) self.assertTrue(ret) def test_backup_use_temp_snapshot(self): self.assertTrue(self.driver.backup_use_temp_snapshot()) @mock.patch.object(requests.Session, "request") def test_revert_to_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.revert_to_snapshot( self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual(5, request.call_count) def test_session___call__(self): session = self.driver.common.client.Session('id', 'token') req = models.Response() ret = session.__call__(req) self.assertEqual('Session token', ret.headers['Authorization']) def test_create_group(self): ret = self.driver.create_group(self.ctxt, TEST_GROUP[0]) self.assertIsNone(ret) @mock.patch.object(requests.Session, "request") def test_delete_group(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]]) self.assertEqual(4, request.call_count) actual = ( {'status': TEST_GROUP[0]['status']}, [{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume_error( self, get_volume_type_qos_specs): get_volume_type_qos_specs.return_value = {'qos_specs': None} self.assertRaises( exception.VolumeDriverException, self.driver.create_group_from_src, self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]] ) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = False ret = self.driver.update_group( self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]]) self.assertTupleEqual((None, None, None), ret) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group_error(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = True self.assertRaises( exception.VolumeDriverException, self.driver.update_group, self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]], remove_volumes=[TEST_VOLUME[0]] ) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_non_cg( self, get_volume_type_qos_specs, get_volume_type_extra_specs, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = False get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_cg( self, get_volume_type_qos_specs, get_volume_type_extra_specs, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]) self.assertEqual(10, request.call_count) actual = ( {'status': TEST_GROUP_SNAP[0]['status']}, [{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(hpe_xp_fc.HPEXPFCDriver, "_get_oslo_driver_opts") def test_get_driver_options(self, _get_oslo_driver_opts): _get_oslo_driver_opts.return_value = [] ret = self.driver.get_driver_options() actual = (hpe_xp_rest.COMMON_VOLUME_OPTS + hpe_xp_rest.REST_VOLUME_OPTS + hpe_xp_rest.FC_VOLUME_OPTS) self.assertEqual(actual, ret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py0000664000175000017500000013155100000000000027775 0ustar00zuulzuul00000000000000# Copyright (C) 2022, 2024, Hewlett Packard Enterprise, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Hewlett Packard Enterprise Driver.""" from unittest import mock from oslo_config import cfg import requests from cinder import context as cinder_context from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder.objects import group_snapshot as obj_group_snap from cinder.objects import snapshot as obj_snap from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.hpe.xp import hpe_xp_iscsi from cinder.volume.drivers.hpe.xp import hpe_xp_rest from cinder.volume import volume_types from cinder.volume import volume_utils # Configuration parameter values CONFIG_MAP = { 'serial': '886000123456', 'my_ip': '127.0.0.1', 'rest_server_ip_addr': '172.16.18.108', 'rest_server_ip_port': '23451', 'port_id': 'CL1-A', 'host_grp_name': 'HPEXP-127.0.0.1', 'host_mode': 'LINUX/IRIX', 'host_iscsi_name': 'iqn.hpexp-test-host', 'target_iscsi_name': 'iqn.hpexp-test-target', 'user_id': 'user', 'user_pass': 'password', 'pool_name': 'test_pool', 'ipv4Address': '111.22.333.44', 'tcpPort': '5555', 'auth_user': 'auth_user', 'auth_password': 'auth_password', } DEFAULT_CONNECTOR = { 'host': 'host', 'ip': CONFIG_MAP['my_ip'], 'initiator': CONFIG_MAP['host_iscsi_name'], 'multipath': False, } CTXT = cinder_context.get_admin_context() TEST_VOLUME = [] for i in range(5): volume = {} volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) volume['name'] = 'test-volume{0:d}'.format(i) volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) if i == 3 or i == 4: volume['provider_location'] = None else: volume['provider_location'] = '{0:d}'.format(i) volume['size'] = 128 if i == 2: volume['status'] = 'in-use' elif i == 4: volume['status'] = None else: volume['status'] = 'available' volume = fake_volume.fake_volume_obj(CTXT, **volume) volume.volume_type = fake_volume.fake_volume_type_obj(CTXT) TEST_VOLUME.append(volume) def _volume_get(context, volume_id): """Return predefined volume info.""" return TEST_VOLUME[int(volume_id.replace("-", ""))] TEST_SNAPSHOT = [] snapshot = {} snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0) snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0) snapshot['provider_location'] = '{0:d}'.format(1) snapshot['status'] = 'available' snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) snapshot['volume'] = _volume_get(None, snapshot['volume_id']) snapshot['volume_name'] = 'test-volume{0:d}'.format(0) snapshot['volume_size'] = 128 snapshot = obj_snap.Snapshot._from_db_object( CTXT, obj_snap.Snapshot(), fake_snapshot.fake_db_snapshot(**snapshot)) TEST_SNAPSHOT.append(snapshot) TEST_GROUP = [] for i in range(2): group = {} group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i) group['status'] = 'available' group = fake_group.fake_group_obj(CTXT, **group) TEST_GROUP.append(group) TEST_GROUP_SNAP = [] group_snapshot = {} group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0) group_snapshot['status'] = 'available' group_snapshot = obj_group_snap.GroupSnapshot._from_db_object( CTXT, obj_group_snap.GroupSnapshot(), fake_group_snapshot.fake_db_group_snapshot(**group_snapshot)) TEST_GROUP_SNAP.append(group_snapshot) # Dummy response for REST API POST_SESSIONS_RESULT = { "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "sessionId": 0, } GET_PORTS_RESULT = { "data": [ { "portId": CONFIG_MAP['port_id'], "portType": "ISCSI", "portAttributes": [ "TAR", "MCU", "RCU", "ELUN" ], "portSpeed": "AUT", "loopId": "00", "fabricMode": False, "lunSecuritySetting": True, }, ], } GET_PORT_RESULT = { "ipv4Address": CONFIG_MAP['ipv4Address'], "tcpPort": CONFIG_MAP['tcpPort'], } GET_HOST_ISCSIS_RESULT = { "data": [ { "hostGroupNumber": 0, "iscsiName": CONFIG_MAP['host_iscsi_name'], }, ], } GET_HOST_GROUP_RESULT = { "hostGroupName": CONFIG_MAP['host_grp_name'], "iscsiName": CONFIG_MAP['target_iscsi_name'], } GET_HOST_GROUPS_RESULT = { "data": [ { "hostGroupNumber": 0, "portId": CONFIG_MAP['port_id'], "hostGroupName": "HPEXP-test", "iscsiName": CONFIG_MAP['target_iscsi_name'], }, ], } COMPLETED_SUCCEEDED_RESULT = { "status": "Completed", "state": "Succeeded", "affectedResources": ('a/b/c/1',), } GET_LDEV_RESULT = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP"], "status": "NML", "poolId": 30, "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP"], "status": "NML", "ports": [ { "portId": CONFIG_MAP['port_id'], "hostGroupNumber": 0, "hostGroupName": CONFIG_MAP['host_grp_name'], "lun": 1 }, ], } GET_LDEV_RESULT_SNAP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP"], "status": "NML", "poolId": 30, "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "THP", "FS"], "status": "NML", "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { "data": [ { "poolId": 30, "poolName": CONFIG_MAP['pool_name'], "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, "totalLocatedCapacity": 71453172, "virtualVolumeCapacityRate": -1, }, ], } GET_SNAPSHOTS_RESULT = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PSUS", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_PAIR = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PAIR", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_LDEVS_RESULT = { "data": [ { "ldevId": 0, "label": "15960cc738c94c5bb4f1365be5eeed44", }, { "ldevId": 1, "label": "15960cc738c94c5bb4f1365be5eeed45", }, ], } NOTFOUND_RESULT = { "data": [], } def _brick_get_connector_properties(multipath=False, enforce_multipath=False): """Return a predefined connector object.""" return DEFAULT_CONNECTOR class FakeResponse(): def __init__(self, status_code, data=None, headers=None): self.status_code = status_code self.data = data self.text = data self.content = data self.headers = {'Content-Type': 'json'} if headers is None else headers def json(self): return self.data class HPEXPRESTISCSIDriverTest(test.TestCase): """Unit test class for HPEXP REST interface iSCSI module.""" test_existing_ref = {'source-id': '1'} test_existing_ref_name = { 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} def setUp(self): """Set up the test environment.""" def _set_required(opts, required): for opt in opts: opt.required = required # Initialize Cinder and avoid checking driver options. rest_required_opts = [ opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required] common_required_opts = [ opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required] _set_required(rest_required_opts, False) _set_required(common_required_opts, False) super(HPEXPRESTISCSIDriverTest, self).setUp() _set_required(rest_required_opts, True) _set_required(common_required_opts, True) self.configuration = mock.Mock(conf.Configuration) self.ctxt = cinder_context.get_admin_context() self._setup_config() self._setup_driver() def _setup_config(self): """Set configuration parameter values.""" self.configuration.config_group = "REST" self.configuration.volume_backend_name = "RESTISCSI" self.configuration.volume_driver = ( "cinder.volume.drivers.hpe.xp.hpe_xp_iscsi.HPEXPISCSIDriver") self.configuration.reserved_percentage = "0" self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False self.configuration.max_over_subscription_ratio = 500.0 self.configuration.driver_ssl_cert_verify = False self.configuration.hpexp_storage_id = CONFIG_MAP['serial'] self.configuration.hpexp_pools = ["30"] self.configuration.hpexp_snap_pool = None self.configuration.hpexp_ldev_range = "0-1" self.configuration.hpexp_target_ports = [CONFIG_MAP['port_id']] self.configuration.hpexp_compute_target_ports = [ CONFIG_MAP['port_id']] self.configuration.hpexp_group_create = True self.configuration.hpexp_group_delete = True self.configuration.hpexp_copy_speed = 3 self.configuration.hpexp_copy_check_interval = 3 self.configuration.hpexp_async_copy_check_interval = 10 self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] self.configuration.san_ip = CONFIG_MAP[ 'rest_server_ip_addr'] self.configuration.san_api_port = CONFIG_MAP[ 'rest_server_ip_port'] self.configuration.hpexp_rest_disable_io_wait = True self.configuration.hpexp_rest_tcp_keepalive = True self.configuration.hpexp_discard_zero_page = True self.configuration.hpexp_rest_number = "0" self.configuration.hpexp_lun_timeout = hbsd_rest._LUN_TIMEOUT self.configuration.hpexp_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.hpexp_restore_timeout = hbsd_rest._RESTORE_TIMEOUT self.configuration.hpexp_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) self.configuration.hpexp_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT self.configuration.hpexp_rest_timeout = hbsd_rest_api._REST_TIMEOUT self.configuration.hpexp_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.hpexp_exec_retry_interval = ( hbsd_rest_api._EXEC_RETRY_INTERVAL) self.configuration.hpexp_rest_connect_timeout = ( hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) self.configuration.hpexp_rest_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) self.configuration.hpexp_rest_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) self.configuration.hpexp_rest_server_busy_timeout = ( hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) self.configuration.hpexp_rest_keep_session_loop_interval = ( hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) self.configuration.hpexp_rest_another_ldev_mapped_retry_timeout = ( hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) self.configuration.hpexp_rest_tcp_keepidle = ( hbsd_rest_api._TCP_KEEPIDLE) self.configuration.hpexp_rest_tcp_keepintvl = ( hbsd_rest_api._TCP_KEEPINTVL) self.configuration.hpexp_rest_tcp_keepcnt = ( hbsd_rest_api._TCP_KEEPCNT) self.configuration.hpexp_host_mode_options = [] self.configuration.use_chap_auth = True self.configuration.chap_username = CONFIG_MAP['auth_user'] self.configuration.chap_password = CONFIG_MAP['auth_password'] self.configuration.san_thin_provision = True self.configuration.san_private_key = '' self.configuration.san_clustername = '' self.configuration.san_ssh_port = '22' self.configuration.san_is_local = False self.configuration.ssh_conn_timeout = '30' self.configuration.ssh_min_pool_conn = '1' self.configuration.ssh_max_pool_conn = '5' self.configuration.safe_get = self._fake_safe_get CONF = cfg.CONF CONF.my_ip = CONFIG_MAP['my_ip'] def _fake_safe_get(self, value): """Retrieve a configuration value avoiding throwing an exception.""" try: val = getattr(self.configuration, value) except AttributeError: val = None return val @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def _setup_driver( self, brick_get_connector_properties=None, request=None): """Set up the driver environment.""" self.driver = hpe_xp_iscsi.HPEXPISCSIDriver( configuration=self.configuration) request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT)] self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver.local_path(None) self.driver.create_export(None, None, None) self.driver.ensure_export(None, None) self.driver.remove_export(None, None) self.driver.create_export_snapshot(None, None, None) self.driver.remove_export_snapshot(None, None) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() def tearDown(self): self.client = None super(HPEXPRESTISCSIDriverTest, self).tearDown() # API test cases @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup(self, brick_get_connector_properties, request): drv = hpe_xp_iscsi.HPEXPISCSIDriver( configuration=self.configuration) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort']}}, drv.common.storage_info['portals']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(5, request.call_count) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() self.driver.common.client.keep_session_loop.wait() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_create_hg(self, brick_get_connector_properties, request): """Normal case: The host group not exists.""" drv = hpe_xp_iscsi.HPEXPISCSIDriver( configuration=self.configuration) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort']}}, drv.common.storage_info['portals']) self.assertEqual(CONFIG_MAP['host_grp_name'], drv.common.format_info['group_name_format'].format( ip=DEFAULT_CONNECTOR['ip'])) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(8, request.call_count) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() self.driver.common.client.keep_session_loop.wait() @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) self.assertEqual(3, request.call_count) @mock.patch.object(driver.ISCSIDriver, "get_goodness_function") @mock.patch.object(driver.ISCSIDriver, "get_filter_function") @mock.patch.object(requests.Session, "request") def test__update_volume_stats( self, request, get_filter_function, get_goodness_function): request.return_value = FakeResponse(200, GET_POOLS_RESULT) get_filter_function.return_value = None get_goodness_function.return_value = None self.driver._update_volume_stats() self.assertEqual( 'Hewlett Packard Enterprise', self.driver._stats['vendor_name']) self.assertTrue(self.driver._stats["pools"][0]['multiattach']) self.assertEqual(1, request.call_count) self.assertEqual(1, get_filter_function.call_count) self.assertEqual(1, get_goodness_function.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_volume(TEST_VOLUME[4]) self.assertEqual('1', ret['provider_location']) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_cloned_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) self.assertEqual('1', vol['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_from_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection( self, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('iscsi', ret['driver_volume_type']) self.assertEqual( '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort'], }, ret['data']['target_portal']) self.assertEqual(CONFIG_MAP['target_iscsi_name'], ret['data']['target_iqn']) self.assertEqual('CHAP', ret['data']['auth_method']) self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) self.assertEqual( CONFIG_MAP['auth_password'], ret['data']['auth_password']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection_shared_target( self, get_volume_type_extra_specs, request): """Normal case: A target shared with other systems.""" get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('iscsi', ret['driver_volume_type']) self.assertEqual( '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort'], }, ret['data']['target_portal']) self.assertEqual(CONFIG_MAP['target_iscsi_name'], ret['data']['target_iqn']) self.assertEqual('CHAP', ret['data']['auth_method']) self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) self.assertEqual( CONFIG_MAP['auth_password'], ret['data']['auth_password']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_terminate_connection(self, request): request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) self.assertEqual(6, request.call_count) @mock.patch.object(requests.Session, "request") def test_terminate_connection_not_connector(self, request): """Normal case: Connector is None.""" request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], None) self.assertEqual(9, request.call_count) @mock.patch.object(requests.Session, "request") def test_initialize_connection_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual('iscsi', ret['driver_volume_type']) self.assertEqual( '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort'], }, ret['data']['target_portal']) self.assertEqual(CONFIG_MAP['target_iscsi_name'], ret['data']['target_iqn']) self.assertEqual('CHAP', ret['data']['auth_method']) self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) self.assertEqual( CONFIG_MAP['auth_password'], ret['data']['auth_password']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") def test_terminate_connection_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual(6, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing(self, get_volume_type_qos_specs, request): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual('1', ret['provider_location']) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing_name(self, get_volume_type_qos_specs, request): get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual('1', ret['provider_location']) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size_name(self, request): request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_unmanage(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' image_id = 'fake_image_id' request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ as mock_copy_image: self.driver.copy_image_to_volume( self.ctxt, TEST_VOLUME[0], image_service, image_id) mock_copy_image.assert_called_with( self.ctxt, TEST_VOLUME[0], image_service, image_id, disable_sparse=False) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) ret = self.driver.update_migrated_volume( self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) actual = ({'_name_id': TEST_VOLUME[1]['id'], 'provider_location': TEST_VOLUME[1]['provider_location']}) self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" self.assertRaises( NotImplementedError, self.driver.unmanage_snapshot, TEST_SNAPSHOT[0]) @mock.patch.object(requests.Session, "request") def test_retype(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) new_specs = {'hpe_xp:test': 'test'} new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id']) diff = {} host = { 'capabilities': { 'location_info': { 'pool_id': 30, }, }, } ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(1, request.call_count) self.assertTrue(ret) def test_backup_use_temp_snapshot(self): self.assertTrue(self.driver.backup_use_temp_snapshot()) @mock.patch.object(requests.Session, "request") def test_revert_to_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.revert_to_snapshot( self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual(5, request.call_count) def test_create_group(self): ret = self.driver.create_group(self.ctxt, TEST_GROUP[0]) self.assertIsNone(ret) @mock.patch.object(requests.Session, "request") def test_delete_group(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]]) self.assertEqual(4, request.call_count) actual = ( {'status': TEST_GROUP[0]['status']}, [{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) def test_create_group_from_src_volume_error(self): self.assertRaises( exception.VolumeDriverException, self.driver.create_group_from_src, self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]] ) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = False ret = self.driver.update_group( self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]]) self.assertTupleEqual((None, None, None), ret) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group_error(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = True self.assertRaises( exception.VolumeDriverException, self.driver.update_group, self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]], remove_volumes=[TEST_VOLUME[0]] ) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_non_cg( self, get_volume_type_qos_specs, get_volume_type_extra_specs, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = False get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_cg( self, get_volume_type_qos_specs, get_volume_type_extra_specs, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]) self.assertEqual(10, request.call_count) actual = ( {'status': TEST_GROUP_SNAP[0]['status']}, [{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(hpe_xp_iscsi.HPEXPISCSIDriver, "_get_oslo_driver_opts") def test_get_driver_options(self, _get_oslo_driver_opts): _get_oslo_driver_opts.return_value = [] ret = self.driver.get_driver_options() actual = (hpe_xp_rest.COMMON_VOLUME_OPTS + hpe_xp_rest.REST_VOLUME_OPTS) self.assertEqual(actual, ret) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2791202 cinder-27.0.0/cinder/tests/unit/volume/drivers/huawei/0000775000175000017500000000000000000000000022723 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/huawei/__init__.py0000664000175000017500000000000000000000000025022 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py0000664000175000017500000066110600000000000027366 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for huawei drivers.""" import collections import copy import json import re import tempfile from unittest import mock from xml.dom import minidom from xml.etree import ElementTree import ddt import requests from cinder import context from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.volume import configuration as conf from cinder.volume.drivers.huawei import common from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import fc_zone_helper from cinder.volume.drivers.huawei import huawei_conf from cinder.volume.drivers.huawei import huawei_driver from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import hypermetro from cinder.volume.drivers.huawei import replication from cinder.volume.drivers.huawei import rest_client from cinder.volume.drivers.huawei import smartx from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils admin_contex = context.get_admin_context() vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') Volume = collections.namedtuple('Volume', vol_attrs) PROVIDER_LOCATION = ('{"huawei_lun_id": "11", ' '"huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"}') PROVIDER_LOCATION_WITH_HYPERMETRO = ( '{"huawei_lun_id": "11", ' '"huawei_lun_wwn": "6643e8c1004c5f6723e9f454003", ' '"hypermetro_id": "11", ' '"remote_lun_id": "1"}') SNAP_PROVIDER_LOCATION = '{"huawei_snapshot_id": "11"}' HOST = 'ubuntu001@backend001#OpenStack_Pool' ENCODE_HOST_NAME = huawei_utils.encode_host_name(HOST) HOST2 = 'ubuntu002@backend002#OpenStack_Pool' OLD_ENCODE_HOST_NAME = huawei_utils.old_encode_host_name(HOST2) ID = '21ec7341-9256-497b-97d9-ef48edcf0635' ENCODE_NAME = huawei_utils.encode_name(ID) ID2 = 'ee00eb7c-40dc-4256-bfea-6c3a16ab850d' OLD_ENCODE_NAME = huawei_utils.old_encode_name(ID2) METADATA = [{'key': 'huawei_lun_id', 'value': '11'}, {'key': 'huawei_lun_wwn', 'value': '6643e8c1004c5f6723e9f454003'}] TEST_PAIR_ID = "3400a30d844d0004" VOL_METADATA = [{'key': 'hypermetro_id', 'value': '11'}, {'key': 'remote_lun_id', 'value': '1'}] ADMIN_METADATA = [{'key': 'huawei_lun_wwn', 'value': 'FAKE_WWN'}] REPLICA_DRIVER_DATA = ('{"pair_id": "%s", "rmt_lun_id": "1", ' '"rmt_lun_wwn": "FAKE_RMT_LUN_WWN"}') % TEST_PAIR_ID hypermetro_devices = """{ "remote_device": { "RestURL": "http://192.0.2.69:8082/deviceManager/rest", "UserName": "admin", "UserPassword": "Admin@storage1", "StoragePool": "OpenStack_Pool", "domain_name": "hypermetro-domain", "remote_target_ip": "192.0.2.241" } } """ fake_smartx_value = {'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', 'thin_provisioning_support': 'true', 'thick_provisioning_support': False, 'policy': '2', 'cachename': 'cache-test', 'partitionname': 'partition-test', } fake_hypermetro_opts = {'hypermetro': 'true', 'smarttier': False, 'smartcache': False, 'smartpartition': False, 'thin_provisioning_support': False, 'thick_provisioning_support': False, } sync_replica_specs = {'capabilities:replication_enabled': ' True', 'replication_type': ' sync'} async_replica_specs = {'capabilities:replication_enabled': ' True', 'replication_type': ' async'} replica_hypermetro_specs = {'capabilities:hypermetro': ' True', 'capabilities:replication_enabled': ' True'} test_host = {'host': 'ubuntu001@backend001#OpenStack_Pool', 'capabilities': {'smartcache': True, 'location_info': '210235G7J20000000000', 'QoS_support': True, 'pool_name': 'OpenStack_Pool', 'timestamp': '2015-07-13T11:41:00.513549', 'smartpartition': True, 'allocated_capacity_gb': 0, 'volume_backend_name': 'HuaweiFCDriver', 'free_capacity_gb': 20.0, 'driver_version': '1.1.0', 'total_capacity_gb': 20.0, 'smarttier': True, 'hypermetro': True, 'reserved_percentage': 0, 'vendor_name': None, 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'storage_protocol': 'FC', } } test_new_type = { 'name': u'new_type', 'qos_specs_id': None, 'deleted': False, 'created_at': None, 'updated_at': None, 'extra_specs': { 'capabilities:smarttier': ' true', 'capabilities:smartcache': ' true', 'capabilities:smartpartition': ' true', 'capabilities:thin_provisioning_support': ' true', 'capabilities:thick_provisioning_support': ' False', 'policy': '2', 'smartcache:cachename': 'cache-test', 'smartpartition:partitionname': 'partition-test', }, 'is_public': True, 'deleted_at': None, 'id': u'530a56e1-a1a4-49f3-ab6c-779a6e5d999f', 'description': None, } test_new_replication_type = { 'name': u'new_type', 'qos_specs_id': None, 'deleted': False, 'created_at': None, 'updated_at': None, 'extra_specs': { 'capabilities:replication_enabled': ' True', 'replication_type': ' sync', }, 'is_public': True, 'deleted_at': None, 'id': u'530a56e1-a1a4-49f3-ab6c-779a6e5d999f', 'description': None, } test_hypermetro_type = { 'name': u'new_type', 'qos_specs_id': None, 'deleted': False, 'created_at': None, 'updated_at': None, 'extra_specs': { 'capabilities:hypermetro': ' True' }, 'is_public': True, 'deleted_at': None, 'id': u'550c089b-bfdd-4f7f-86e1-3ba88125555c', 'description': None, } hypermetro_devices = """ { "remote_device": { "RestURL": "http://192.0.2.69:8082/deviceManager/rest", "UserName":"admin", "UserPassword":"Admin@storage2", "StoragePool":"OpenStack_Pool", "domain_name":"hypermetro_test"} } """ FAKE_FIND_POOL_RESPONSE = {'CAPACITY': '985661440', 'ID': '0', 'TOTALCAPACITY': '985661440'} FAKE_CREATE_VOLUME_RESPONSE = {"ID": "1", "NAME": "5mFHcBv4RkCcD+JyrWc0SA", "WWN": '6643e8c1004c5f6723e9f454003'} FakeConnector = {'initiator': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'multipath': False, 'wwpns': ['10000090fa0d6754'], 'wwnns': ['10000090fa0d6755'], 'host': 'ubuntuc', } smarttier_opts = {'smarttier': 'true', 'smartpartition': False, 'smartcache': False, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'policy': '3', 'readcachepolicy': '1', 'writecachepolicy': None, } fake_fabric_mapping = { 'swd1': { 'target_port_wwn_list': ['2000643e8c4c5f66'], 'initiator_port_wwn_list': ['10000090fa0d6754'] } } fake_fabric_mapping_no_ports = { 'swd1': { 'target_port_wwn_list': [], 'initiator_port_wwn_list': ['10000090fa0d6754'] } } fake_fabric_mapping_no_wwn = { 'swd1': { 'target_port_wwn_list': ['2000643e8c4c5f66'], 'initiator_port_wwn_list': [] } } CHANGE_OPTS = {'policy': ('1', '2'), 'partitionid': (['1', 'partition001'], ['2', 'partition002']), 'cacheid': (['1', 'cache001'], ['2', 'cache002']), 'qos': (['11', {'MAXIOPS': '100', 'IOType': '1'}], {'MAXIOPS': '100', 'IOType': '2', 'MIN': 1, 'LATENCY': 1}), 'host': ('ubuntu@huawei#OpenStack_Pool', 'ubuntu@huawei#OpenStack_Pool'), 'LUNType': ('0', '1'), } # A fake response of create a host FAKE_CREATE_HOST_RESPONSE = """ { "error": { "code": 0 }, "data":{"NAME": "ubuntuc001", "ID": "1"} } """ FAKE_GET_HOST_RESPONSE = """ { "error": { "code": 0 }, "data":{"NAME": "ubuntuc001", "ID": "1", "ISADD2HOSTGROUP": "true"} } """ FAKE_PATCH_GET_HOST_RESPONSE = """ { "error": { "code": 0 }, "data":[{"NAME": "ubuntuc001", "ID": "1"}] } """ # A fake response of success response storage FAKE_COMMON_SUCCESS_RESPONSE = """ { "error": { "code": 0, "description": "None" }, "data":{} } """ # A fake response of fail response storage FAKE_COMMON_FAIL_RESPONSE = """ { "error": { "code": 50331651, "description": "An error occurs to the parameter." }, "data":{} } """ # A fake response of login huawei storage FAKE_GET_LOGIN_STORAGE_RESPONSE = """ { "error": { "code": 0 }, "data": { "username": "admin", "iBaseToken": "2001031430", "deviceid": "210235G7J20000000000", "accountstate": 2 } } """ # A fake response of login out huawei storage FAKE_LOGIN_OUT_STORAGE_RESPONSE = """ { "error": { "code": 0 }, "data": { "ID": 11 } } """ # A fake response of mock storage pool info FAKE_STORAGE_POOL_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "USERFREECAPACITY": "985661440", "ID": "0", "NAME": "OpenStack_Pool", "USERTOTALCAPACITY": "985661440", "TIER0CAPACITY": "100", "TIER1CAPACITY": "0", "TIER2CAPACITY": "0" }] } """ # A fake response of lun or lungroup response FAKE_LUN_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": { "ID": "1", "NAME": "5mFHcBv4RkCcD+JyrWc0SA", "WWN": "6643e8c1004c5f6723e9f454003", "DESCRIPTION": "21ec7341-9256-497b-97d9-ef48edcf0635", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "27", "ALLOCTYPE": "1", "CAPACITY": "2097152" } } """ # A fake report of mock storage pool info FAKE_POOLS_UNSUPPORT_REPORT = { 'pool_name': 'StoragePool', 'location_info': '2102350BVB10F2000020', 'QoS_support': False, 'smartcache': False, 'thick_provisioning_support': False, 'splitmirror': False, 'allocated_capacity_gb': 7, 'thin_provisioning_support': True, 'free_capacity_gb': 400.0, 'smartpartition': False, 'total_capacity_gb': 400.0, 'reserved_percentage': 0, 'max_over_subscription_ratio': 20.0, 'luncopy': False } FAKE_POOLS_SUPPORT_REPORT = { 'pool_name': 'StoragePool', 'location_info': '2102350BVB10F2000020', 'QoS_support': True, 'smartcache': True, 'thick_provisioning_support': True, 'splitmirror': True, 'allocated_capacity_gb': 7, 'thin_provisioning_support': True, 'free_capacity_gb': 400.0, 'smartpartition': True, 'total_capacity_gb': 400.0, 'reserved_percentage': 0, 'max_over_subscription_ratio': 20.0, 'luncopy': True, 'hypermetro': True, 'consistent_group_snapshot_enabled': True } FAKE_LUN_GET_SUCCESS_RESPONSE = """ { "error": { "code": 0 }, "data": { "ID": "11", "IOCLASSID": "11", "NAME": "5mFHcBv4RkCcD+JyrWc0SA", "DESCRIPTION": "21ec7341-9256-497b-97d9-ef48edcf0635", "RUNNINGSTATUS": "10", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "27", "LUNLIST": "[]", "ALLOCTYPE": "1", "CAPACITY": "2097152", "WRITEPOLICY": "1", "MIRRORPOLICY": "0", "PREFETCHPOLICY": "1", "PREFETCHVALUE": "20", "DATATRANSFERPOLICY": "1", "READCACHEPOLICY": "2", "WRITECACHEPOLICY": "5", "OWNINGCONTROLLER": "0B", "SMARTCACHEPARTITIONID": "", "CACHEPARTITIONID": "", "WWN": "6643e8c1004c5f6723e9f454003", "PARENTNAME": "OpenStack_Pool" } } """ FAKE_QUERY_ALL_LUN_RESPONSE = { "error": { "code": 0 }, "data": [{ "ID": "11", "NAME": ENCODE_NAME, "WWN": "6643e8c1004c5f6723e9f454003", "DESCRIPTION": "21ec7341-9256-497b-97d9-ef48edcf0635", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "27", "LUNLIST": "[]", "ALLOCTYPE": "1", "CAPACITY": "2097152", "WRITEPOLICY": "1", "MIRRORPOLICY": "0", "PREFETCHPOLICY": "1", "PREFETCHVALUE": "20", "DATATRANSFERPOLICY": "1", "READCACHEPOLICY": "2", "WRITECACHEPOLICY": "5", "OWNINGCONTROLLER": "0B", "SMARTCACHEPARTITIONID": "", "CACHEPARTITIONID": "", }] } FAKE_LUN_ASSOCIATE_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "ID":"11" }] } """ FAKE_QUERY_LUN_GROUP_INFO_RESPONSE = """ { "error": { "code":0 }, "data":[{ "NAME":"OpenStack_LunGroup_1", "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", "ID":"11", "TYPE":256 }] } """ FAKE_QUERY_LUN_GROUP_RESPONSE = """ { "error": { "code":0 }, "data":{ "NAME":"5mFHcBv4RkCcD+JyrWc0SA", "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", "ID":"11", "TYPE":256 } } """ FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE = """ { "error":{ "code":0 }, "data":{ "NAME":"5mFHcBv4RkCcD+JyrWc0SA", "DESCRIPTION":"5mFHcBv4RkCcD+JyrWc0SA", "ID":"11", "TYPE":256 } } """ FAKE_LUN_COUNT_RESPONSE = """ { "data":{ "COUNT":"0" }, "error":{ "code":0, "description":"0" } } """ # A fake response of snapshot list response FAKE_SNAPSHOT_LIST_INFO_RESPONSE = { "error": { "code": 0, "description": "0" }, "data": [{ "ID": "11", "NAME": ENCODE_NAME }, ] } # A fake response of create snapshot response FAKE_CREATE_SNAPSHOT_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": { "ID": "11", "NAME": "YheUoRwbSX2BxN7", "WWN": "fake-wwn" } } """ # A fake response of get snapshot response FAKE_GET_SNAPSHOT_INFO_RESPONSE = """ { "error": { "code": 0, "description": "0" }, "data": { "ID": "11", "NAME": "YheUoRwbSX2BxN7", "WWN": "fake-wwn" } } """ FAKE_SNAPSHOT_COUNT_RESPONSE = """ { "data":{ "COUNT":"2" }, "error":{ "code":0, "description":"0" } } """ # A fake response of get iscsi response FAKE_GET_ISCSI_INFO_RESPONSE = """ { "data": [{ "ETHPORTID": "139267", "ID": "0+iqn.oceanstor:21004846fb8ca15f::22004:192.0.2.1,t,0x2005", "TPGT": "8197", "TYPE": 249 }, { "ETHPORTID": "139268", "ID": "1+iqn.oceanstor:21004846fb8ca15f::22003:192.0.2.2,t,0x2004", "TPGT": "8196", "TYPE": 249 } ], "error": { "code": 0, "description": "0" } } """ # A fake response of get eth info response FAKE_GET_ETH_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "PARENTTYPE": 209, "MACADDRESS": "00:22:a1:0a:79:57", "ETHNEGOTIATE": "-1", "ERRORPACKETS": "0", "IPV4ADDR": "192.0.2.2", "IPV6GATEWAY": "", "IPV6MASK": "0", "OVERFLOWEDPACKETS": "0", "ISCSINAME": "P0", "HEALTHSTATUS": "1", "ETHDUPLEX": "2", "ID": "16909568", "LOSTPACKETS": "0", "TYPE": 213, "NAME": "P0", "INIORTGT": "4", "RUNNINGSTATUS": "10", "IPV4GATEWAY": "", "BONDNAME": "", "STARTTIME": "1371684218", "SPEED": "1000", "ISCSITCPPORT": "0", "IPV4MASK": "255.255.0.0", "IPV6ADDR": "", "LOGICTYPE": "0", "LOCATION": "ENG0.A5.P0", "MTU": "1500", "PARENTID": "1.5" }, { "PARENTTYPE": 209, "MACADDRESS": "00:22:a1:0a:79:57", "ETHNEGOTIATE": "-1", "ERRORPACKETS": "0", "IPV4ADDR": "192.0.2.1", "IPV6GATEWAY": "", "IPV6MASK": "0", "OVERFLOWEDPACKETS": "0", "ISCSINAME": "P0", "HEALTHSTATUS": "1", "ETHDUPLEX": "2", "ID": "16909568", "LOSTPACKETS": "0", "TYPE": 213, "NAME": "P0", "INIORTGT": "4", "RUNNINGSTATUS": "10", "IPV4GATEWAY": "", "BONDNAME": "", "STARTTIME": "1371684218", "SPEED": "1000", "ISCSITCPPORT": "0", "IPV4MASK": "255.255.0.0", "IPV6ADDR": "", "LOGICTYPE": "0", "LOCATION": "ENG0.A5.P3", "MTU": "1500", "PARENTID": "1.5" }] } """ FAKE_GET_ETH_ASSOCIATE_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "IPV4ADDR": "192.0.2.1", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "10" }, { "IPV4ADDR": "192.0.2.2", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "10" } ] } """ # A fake response of get iscsi device info response FAKE_GET_ISCSI_DEVICE_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "CMO_ISCSI_DEVICE_NAME": "iqn.2006-08.com.huawei:oceanstor:21000022a:" }] } """ # A fake response of get iscsi device info response FAKE_GET_ALL_HOST_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "PARENTTYPE": 245, "NAME": "ubuntuc", "DESCRIPTION": "", "RUNNINGSTATUS": "1", "IP": "", "PARENTNAME": "", "OPERATIONSYSTEM": "0", "LOCATION": "", "HEALTHSTATUS": "1", "MODEL": "", "ID": "1", "PARENTID": "", "NETWORKNAME": "", "TYPE": 21 }, { "PARENTTYPE": 245, "NAME": "ubuntu", "DESCRIPTION": "", "RUNNINGSTATUS": "1", "IP": "", "PARENTNAME": "", "OPERATIONSYSTEM": "0", "LOCATION": "", "HEALTHSTATUS": "1", "MODEL": "", "ID": "2", "PARENTID": "", "NETWORKNAME": "", "TYPE": 21 }] } """ # A fake response of get host or hostgroup info response FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "NAME":"ubuntuc", "DESCRIPTION":"", "ID":"0", "TYPE":14 }, {"NAME":"OpenStack_HostGroup_1", "DESCRIPTION":"", "ID":"0", "TYPE":14 } ] } """ FAKE_GET_HOST_GROUP_INFO_RESPONSE = """ { "error": { "code": 0 }, "data":{ "NAME":"ubuntuc", "DESCRIPTION":"", "ID":"0", "TYPE":14 } } """ # A fake response of lun copy info response FAKE_GET_LUN_COPY_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": { "COPYSTOPTIME": "-1", "HEALTHSTATUS": "1", "NAME": "w1PSNvu6RumcZMmSh4/l+Q==", "RUNNINGSTATUS": "36", "DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==", "ID": "0", "LUNCOPYTYPE": "1", "COPYPROGRESS": "0", "COPYSPEED": "2", "TYPE": 219, "COPYSTARTTIME": "-1" } } """ # A fake response of lun copy list info response FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE = """ { "error": { "code": 0 }, "data": [{ "COPYSTOPTIME": "1372209335", "HEALTHSTATUS": "1", "NAME": "w1PSNvu6RumcZMmSh4/l+Q==", "RUNNINGSTATUS": "40", "DESCRIPTION": "w1PSNvu6RumcZMmSh4/l+Q==", "ID": "0", "LUNCOPYTYPE": "1", "COPYPROGRESS": "100", "COPYSPEED": "2", "TYPE": 219, "COPYSTARTTIME": "1372209329" }] } """ # A fake response of mappingview info response FAKE_GET_MAPPING_VIEW_INFO_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "WORKMODE":"255", "HEALTHSTATUS":"1", "NAME":"OpenStack_Mapping_View_1", "RUNNINGSTATUS":"27", "DESCRIPTION":"", "ENABLEINBANDCOMMAND":"true", "ID":"1", "INBANDLUNWWN":"", "TYPE":245 }, { "WORKMODE":"255", "HEALTHSTATUS":"1", "NAME":"YheUoRwbSX2BxN767nvLSw", "RUNNINGSTATUS":"27", "DESCRIPTION":"", "ENABLEINBANDCOMMAND":"true", "ID":"2", "INBANDLUNWWN": "", "TYPE": 245 }] } """ FAKE_GET_MAPPING_VIEW_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "WORKMODE":"255", "HEALTHSTATUS":"1", "NAME":"mOWtSXnaQKi3hpB3tdFRIQ", "RUNNINGSTATUS":"27", "DESCRIPTION":"", "ENABLEINBANDCOMMAND":"true", "ID":"11", "INBANDLUNWWN":"", "TYPE": 245, "AVAILABLEHOSTLUNIDLIST": "" }] } """ FAKE_GET_SPEC_MAPPING_VIEW_RESPONSE = """ { "error":{ "code":0 }, "data":{ "WORKMODE":"255", "HEALTHSTATUS":"1", "NAME":"mOWtSXnaQKi3hpB3tdFRIQ", "RUNNINGSTATUS":"27", "DESCRIPTION":"", "ENABLEINBANDCOMMAND":"true", "ID":"1", "INBANDLUNWWN":"", "TYPE":245, "AVAILABLEHOSTLUNIDLIST": "[1]" } } """ FAKE_FC_INFO_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "HEALTHSTATUS":"1", "NAME":"", "MULTIPATHTYPE":"1", "ISFREE":"true", "RUNNINGSTATUS":"27", "ID":"10000090fa0d6754", "OPERATIONSYSTEM":"255", "TYPE":223 }, { "HEALTHSTATUS":"1", "NAME":"", "MULTIPATHTYPE":"1", "ISFREE":"true", "RUNNINGSTATUS":"27", "ID":"10000090fa0d6755", "OPERATIONSYSTEM":"255", "TYPE":223 }] } """ FAKE_ISCSI_INITIATOR_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "CHAPNAME":"mm-user", "HEALTHSTATUS":"1", "ID":"iqn.1993-08.org.debian:01:9073aba6c6f", "ISFREE":"true", "MULTIPATHTYPE":"1", "NAME":"", "OPERATIONSYSTEM":"255", "RUNNINGSTATUS":"28", "TYPE":222, "USECHAP":"true" }, { "ISFREE":"true", "ID":"ini-1" }, { "ISFREE":"false", "ID":"ini-2", "PARENTNAME":"Host2", "PARENTID":"2" }] } """ FAKE_HOST_LINK_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "PARENTTYPE":21, "TARGET_ID":"0000000000000000", "INITIATOR_NODE_WWN":"20000090fa0d6754", "INITIATOR_TYPE":"223", "RUNNINGSTATUS":"27", "PARENTNAME":"ubuntuc", "INITIATOR_ID":"10000090fa0d6754", "TARGET_PORT_WWN":"24000022a10a2a39", "HEALTHSTATUS":"1", "INITIATOR_PORT_WWN":"10000090fa0d6754", "ID":"010000090fa0d675-0000000000110400", "TARGET_NODE_WWN":"21000022a10a2a39", "PARENTID":"1", "CTRL_ID":"0", "TYPE":255, "TARGET_TYPE":"212" }] } """ FAKE_PORT_GROUP_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "ID":11, "NAME": "portgroup-test" }] } """ FAKE_ERROR_INFO_RESPONSE = """ { "error":{ "code":31755596 } } """ FAKE_ERROR_CONNECT_RESPONSE = """ { "error":{ "code":-403 } } """ FAKE_ERROR_LUN_INFO_RESPONSE = """ { "error":{ "code":0 }, "data":{ "ID":"11", "IOCLASSID":"11", "NAME":"5mFHcBv4RkCcD+JyrWc0SA", "ALLOCTYPE": "0", "DATATRANSFERPOLICY": "0", "SMARTCACHEPARTITIONID": "0", "CACHEPARTITIONID": "0" } } """ FAKE_GET_FC_INI_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "ID":"10000090fa0d6754", "ISFREE":"true" }] } """ FAKE_SYSTEM_VERSION_RESPONSE = """ { "error":{ "code": 0 }, "data":{ "PRODUCTVERSION": "V100R001C10", "wwn": "21003400a30d844d" } } """ FAKE_GET_LUN_MIGRATION_RESPONSE = """ { "data":[{"ENDTIME":"1436816174", "ID":"9", "PARENTID":"11", "PARENTNAME":"xmRBHMlVRruql5vwthpPXQ", "PROCESS":"-1", "RUNNINGSTATUS":"76", "SPEED":"2", "STARTTIME":"1436816111", "TARGETLUNID":"1", "TARGETLUNNAME":"4924891454902893639", "TYPE":253, "WORKMODE":"0" }], "error":{"code":0, "description":"0"} } """ FAKE_HYPERMETRODOMAIN_RESPONSE = """ { "error":{ "code": 0 }, "data":[{ "PRODUCTVERSION": "V100R001C10", "ID": "11", "NAME": "hypermetro_test", "RUNNINGSTATUS": "1", "HEALTHSTATUS": "0" }] } """ FAKE_HYPERMETRO_RESPONSE = """ { "error":{ "code": 0 }, "data":{ "PRODUCTVERSION": "V100R001C10", "ID": "11", "NAME": "hypermetro_test", "RUNNINGSTATUS": "1", "HEALTHSTATUS": "1" } } """ FAKE_QOS_INFO_RESPONSE = """ { "error":{ "code": 0 }, "data":{ "ID": "11" } } """ FAKE_GET_FC_PORT_RESPONSE = """ { "error":{ "code":0 }, "data":[{ "RUNNINGSTATUS":"10", "WWN":"2000643e8c4c5f66", "PARENTID":"0A.1", "ID": "1114368", "RUNSPEED": "16000" }, { "RUNNINGSTATUS":"10", "WWN":"2000643e8c4c5f67", "PARENTID":"0A.1", "ID": "1114369", "RUNSPEED": "16000" }] } """ FAKE_SMARTCACHEPARTITION_RESPONSE = """ { "error":{ "code":0 }, "data":{ "ID":"11", "NAME":"cache-name" } } """ FAKE_CONNECT_FC_RESPONSE = { "driver_volume_type": 'fibre_channel', "data": { "target_wwn": ["10000090fa0d6754"], "target_lun": "1", "volume_id": ID } } FAKE_METRO_INFO_RESPONSE = { "PRODUCTVERSION": "V100R001C10", "ID": "11", "NAME": "hypermetro_test", "RUNNINGSTATUS": "42", "HEALTHSTATUS": "0" } FAKE_METRO_INFO_NEW_RESPONSE = """{ "error": { "code": 0 }, "data": { "PRODUCTVERSION": "V100R001C10", "ID": "11", "NAME": "hypermetro_test", "RUNNINGSTATUS": "1", "HEALTHSTATUS": "1" } } """ FAKE_CREATE_METROROUP_RESPONSE = """ { "data": { "DESCRIPTION": "", "DOMAINID": "643e8c4c5f670100", "DOMAINNAME": "hypermetro-domain", "HEALTHSTATUS": "1", "ID": "3400a30d844d8002", "ISEMPTY": "true", "NAME": "6F7kdHZcQJ2zbzxHmBl4FQ", "PRIORITYSTATIONTYPE": "0", "RECOVERYPOLICY": "1", "RESOURCETYPE": "11", "RUNNINGSTATUS": "41", "SPEED": "2", "SYNCDIRECTION": "1", "TYPE": 15364 }, "error": { "code": 0, "description": "0" } } """ FAKE_GET_METROROUP_RESPONSE = { "data": [{ "DESCRIPTION": "", "DOMAINID": "643e8c4c5f670100", "DOMAINNAME": "hypermetro-domain", "HEALTHSTATUS": "1", "ID": "11", "ISEMPTY": "true", "NAME": huawei_utils.encode_name(ID), "PRIORITYSTATIONTYPE": "0", "RECOVERYPOLICY": "1", "RESOURCETYPE": "11", "RUNNINGSTATUS": "41", "SPEED": "2", "SYNCDIRECTION": "1", "TYPE": 15364 }], "error": { "code": 0, "description": "0" }, } FAKE_GET_METROROUP_ID_RESPONSE = """ { "data": { "DESCRIPTION": "", "DOMAINID": "643e8c4c5f670100", "DOMAINNAME": "hypermetro-domain", "HEALTHSTATUS": "1", "ID": "11", "ISEMPTY": "false", "NAME": "IexzQZJWSXuX2e9I7c8GNQ", "PRIORITYSTATIONTYPE": "0", "RECOVERYPOLICY": "1", "RESOURCETYPE": "11", "RUNNINGSTATUS": "1", "SPEED": "2", "SYNCDIRECTION": "1", "TYPE": 15364 }, "error": { "code": 0, "description": "0" } } """ # mock login info map MAP_COMMAND_TO_FAKE_RESPONSE = {} MAP_COMMAND_TO_FAKE_RESPONSE['/xx/sessions/POST'] = ( FAKE_GET_LOGIN_STORAGE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/sessions/DELETE'] = ( FAKE_LOGIN_OUT_STORAGE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION/POST'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION?range=[0-256]/GET'] = ( FAKE_GET_LUN_MIGRATION_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUN_MIGRATION/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock storage info map MAP_COMMAND_TO_FAKE_RESPONSE['/storagepool/GET'] = ( FAKE_STORAGE_POOL_RESPONSE) # mock lun info map MAP_COMMAND_TO_FAKE_RESPONSE['/lun/POST'] = ( FAKE_LUN_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/GET'] = ( FAKE_LUN_GET_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/GET'] = ( FAKE_LUN_GET_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/1/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/11/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun?filter=NAME::%s/GET' % ENCODE_NAME] = ( json.dumps(FAKE_QUERY_ALL_LUN_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE['/lun?filter=NAME::%s/GET' % OLD_ENCODE_NAME] = ( json.dumps(FAKE_QUERY_ALL_LUN_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256' '&ASSOCIATEOBJID=11/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=256' '&ASSOCIATEOBJID=12/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?ID=1&TYPE=11&ASSOCIATEOBJTYPE=21' '&ASSOCIATEOBJID=0/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate?TYPE=11&ASSOCIATEOBJTYPE=21' '&ASSOCIATEOBJID=1/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition?ID=1' '&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=11' '/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/associate?TYPE=27&ASSOCIATEOBJTYPE=21' '&ASSOCIATEOBJID=1/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/associate?TYPE=27&ASSOCIATEOBJTYPE=256' '&ASSOCIATEOBJID=11/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup?range=[0-8191]/GET'] = ( FAKE_QUERY_LUN_GROUP_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/POST'] = ( FAKE_QUERY_LUN_GROUP_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate/POST'] = ( FAKE_QUERY_LUN_GROUP_ASSOCIAT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUNGroup/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=1/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=11/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=1/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=11&ASSOCIATEOBJTYPE=27' '&ASSOCIATEOBJID=11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/count?TYPE=11&ASSOCIATEOBJTYPE=256' '&ASSOCIATEOBJID=11/GET'] = ( FAKE_LUN_COUNT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/count?TYPE=27&ASSOCIATEOBJTYPE=256' '&ASSOCIATEOBJID=1/GET'] = ( FAKE_SNAPSHOT_COUNT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/count?TYPE=27&ASSOCIATEOBJTYPE=256' '&ASSOCIATEOBJID=11/GET'] = ( FAKE_SNAPSHOT_COUNT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?TYPE=256&ASSOCIATEOBJTYPE=27' '&ASSOCIATEOBJID=11/GET'] = ( FAKE_LUN_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/expand/PUT'] = ( FAKE_LUN_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate?ID=12&ASSOCIATEOBJTYPE=11' '&ASSOCIATEOBJID=12/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock snapshot info map MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/POST'] = ( FAKE_CREATE_SNAPSHOT_INFO_RESPONSE) # mock snapshot info map MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/11/GET'] = ( FAKE_GET_SNAPSHOT_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/activate/POST'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/stop/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot?filter=NAME::%s/GET' % ENCODE_NAME] = ( json.dumps(FAKE_SNAPSHOT_LIST_INFO_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE['/snapshot?filter=NAME::%s/GET' % OLD_ENCODE_NAME] = ( json.dumps(FAKE_SNAPSHOT_LIST_INFO_RESPONSE)) # mock QoS info map MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/GET'] = ( FAKE_LUN_GET_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/11/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/active/11/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/POST'] = ( FAKE_QOS_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/ioclass/count/GET'] = ( FAKE_COMMON_FAIL_RESPONSE) # mock iscsi info map MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_tgt_port/GET'] = ( FAKE_GET_ISCSI_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/eth_port/GET'] = ( FAKE_GET_ETH_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE' '=257&ASSOCIATEOBJID=11/GET'] = ( FAKE_GET_ETH_ASSOCIATE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsidevicename/GET'] = ( FAKE_GET_ISCSI_DEVICE_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator?range=[0-256]/GET'] = ( FAKE_ISCSI_INITIATOR_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/GET'] = ( FAKE_ISCSI_INITIATOR_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/POST'] = ( FAKE_ISCSI_INITIATOR_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/PUT'] = ( FAKE_ISCSI_INITIATOR_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator?PARENTTYPE=21&PARENTID' '=1/GET'] = ( FAKE_ISCSI_INITIATOR_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/remove_iscsi_from_host/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/iscsi_initiator/' 'iqn.1993-08.debian:01:ec2bff7ac3a3/PUT'] = ( FAKE_ISCSI_INITIATOR_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host?filter=NAME::%s/GET' % ENCODE_HOST_NAME ] = FAKE_PATCH_GET_HOST_RESPONSE MAP_COMMAND_TO_FAKE_RESPONSE['/host?filter=NAME::%s/GET' % OLD_ENCODE_HOST_NAME ] = FAKE_PATCH_GET_HOST_RESPONSE MAP_COMMAND_TO_FAKE_RESPONSE['/host/1/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/1/GET'] = ( FAKE_GET_HOST_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/POST'] = ( FAKE_CREATE_HOST_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup?range=[0-8191]/GET'] = ( FAKE_GET_ALL_HOST_GROUP_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/GET'] = ( FAKE_GET_HOST_GROUP_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=14&ID=0' '&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=1' '/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=14&ID=0' '&ASSOCIATEOBJID=0/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=21&' 'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/0/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host/associate?TYPE=21&' 'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/hostgroup/associate/POST'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock copy info map MAP_COMMAND_TO_FAKE_RESPONSE['/luncopy/POST'] = ( FAKE_GET_LUN_COPY_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY?range=[0-1023]/GET'] = ( FAKE_GET_LUN_COPY_LIST_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY/start/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/LUNCOPY/0/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock mapping view info map MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview?range=[0-8191]/GET'] = ( FAKE_GET_MAPPING_VIEW_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/POST'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/PUT'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/1/GET'] = ( FAKE_GET_SPEC_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/1/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/REMOVE_ASSOCIATE/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/lungroup?TYPE=256&' 'ASSOCIATEOBJTYPE=245&ASSOCIATEOBJID=1/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' 'ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=0/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' 'ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=11/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' 'ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=0/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate?TYPE=245&' 'ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=11/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) FAKE_GET_ENGINES_RESPONSE = """ { "error":{ "code": 0 }, "data":[{ "NODELIST": "[]", "ID": "0" }] } """ MAP_COMMAND_TO_FAKE_RESPONSE['/storageengine/GET'] = ( FAKE_GET_ENGINES_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate?ASSOCIATEOBJTYPE=245&' 'ASSOCIATEOBJID=1&range=[0-8191]/GET'] = ( FAKE_GET_MAPPING_VIEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock FC info map MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?ISFREE=true&' 'range=[0-8191]/GET'] = ( FAKE_FC_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/MAPPINGVIEW/CREATE_ASSOCIATE/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) # mock FC info map MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?ISFREE=true&' 'range=[0-8191]/GET'] = ( FAKE_FC_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/10000090fa0d6754/GET'] = ( FAKE_FC_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/10000090fa0d6754/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/host_link?INITIATOR_TYPE=223' '&INITIATOR_PORT_WWN=10000090fa0d6754/GET'] = ( FAKE_HOST_LINK_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup?range=[0-8191]&TYPE=257/GET'] = ( FAKE_PORT_GROUP_RESPONSE) # mock system info map MAP_COMMAND_TO_FAKE_RESPONSE['/system//GET'] = ( FAKE_SYSTEM_VERSION_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-256]/GET'] = ( FAKE_GET_FC_INI_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['fc_initiator?range=[0-256]/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?PARENTTYPE=21&PARENTID=1/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/lun/associate/cachepartition/POST'] = ( FAKE_SYSTEM_VERSION_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-256]&PARENTID=1/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?PARENTTYPE=21&PARENTID=1/GET'] = ( FAKE_GET_FC_PORT_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator/count/GET'] = ''' {"data":{"COUNT":"2"},"error":{"code":0}}''' MAP_COMMAND_TO_FAKE_RESPONSE['/fc_initiator?range=[0-100]/GET'] = ( FAKE_FC_INFO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/0/GET'] = ( FAKE_SMARTCACHEPARTITION_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/REMOVE_ASSOCIATE/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/SMARTCACHEPARTITION/count/GET'] = ( FAKE_COMMON_FAIL_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/cachepartition/0/GET'] = ( FAKE_SMARTCACHEPARTITION_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroDomain?range=[0-32]/GET'] = ( FAKE_HYPERMETRODOMAIN_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/POST'] = ( FAKE_HYPERMETRO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/3400a30d844d0007/GET'] = ( FAKE_METRO_INFO_NEW_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/disable_hcpair/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/hyperMetro/associate/pair/POST'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/hyperMetro/associate/pair/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/11/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/11/GET'] = ( FAKE_HYPERMETRO_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair?range=[0-4095]/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetroPair/synchronize_hcpair/PUT'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/splitmirror?range=[0-8191]/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/splitmirror/count/GET'] = ( FAKE_COMMON_FAIL_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/smartcachepool/count/GET'] = ( FAKE_COMMON_FAIL_RESPONSE) FAKE_GET_PORTG_BY_VIEW = """ { "data": [{ "DESCRIPTION": "Please do NOT modify this. Engine ID: 0", "ID": "0", "NAME": "OpenStack_PortGroup_1", "TYPE": 257 }], "error": { "code": 0 } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate/mappingview?TYPE=257&AS' 'SOCIATEOBJTYPE=245&ASSOCIATEOBJID=1/GET'] = ( FAKE_GET_PORTG_BY_VIEW) FAKE_GET_PORT_BY_PORTG = """ { "data":[{ "CONFSPEED":"0","FCCONFMODE":"3", "FCRUNMODE":"0","HEALTHSTATUS":"1","ID":"2000643e8c4c5f66", "MAXSUPPORTSPEED":"16000","NAME":"P0","PARENTID":"0B.1", "PARENTTYPE":209,"RUNNINGSTATUS":"10","RUNSPEED":"8000", "WWN":"2000643e8c4c5f66" }], "error":{ "code":0,"description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/associate/portgroup?TYPE=212&ASSOCI' 'ATEOBJTYPE=257&ASSOCIATEOBJID=0/GET'] = ( FAKE_GET_PORT_BY_PORTG) FAKE_GET_PORTG = """ { "data": { "TYPE": 257, "NAME": "OpenStack_PortGroup_1", "DESCRIPTION": "Please DO NOT change thefollowing message: 0", "ID": "0" }, "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/0/GET'] = FAKE_GET_PORTG MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/0/PUT'] = FAKE_GET_PORTG MAP_COMMAND_TO_FAKE_RESPONSE['/port/associate/portgroup/POST'] = ( FAKE_GET_PORT_BY_PORTG) MAP_COMMAND_TO_FAKE_RESPONSE['/port/associate/portgroup?ID=0&TYPE=257&ASSOCIA' 'TEOBJTYPE=212&ASSOCIATEOBJID=2000643e8c4c5f66/DE' 'LETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) FAKE_CREATE_PORTG = """ { "data": { "DESCRIPTION": "Please DO NOT change the following message: 0", "ID": "0", "NAME": "OpenStack_PortGroup_1", "TYPE": 257 }, "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/PortGroup/POST'] = FAKE_CREATE_PORTG MAP_COMMAND_TO_FAKE_RESPONSE['/PortGroup/1/DELETE'] = ( FAKE_COMMON_SUCCESS_RESPONSE) FAKE_GET_PORTG_FROM_PORT = """ { "data": [{ "TYPE": 257, "NAME": "OpenStack_PortGroup_1", "DESCRIPTION": "PleaseDONOTchangethefollowingmessage: 0", "ID": "0" }], "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate/fc_port?TYPE=257&ASSOCIA' 'TEOBJTYPE=212&ASSOCIATEOBJID=1114368/GET'] = ( FAKE_GET_PORTG_FROM_PORT) FAKE_GET_VIEW_BY_PORTG = """ { "data": [{ "ASSOCIATEOBJID": "0", "COUNT": "0", "ASSOCIATEOBJTYPE": "0", "INBANDLUNWWN": "", "FORFILESYSTEM": "false", "ID": "2", "ENABLEINBANDCOMMAND": "false", "NAME": "OpenStack_Mapping_View_1", "WORKMODE": "0", "TYPE": 245, "HOSTLUNID": "0", "DESCRIPTION": "" }], "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/portgroup?TYPE=245&ASS' 'OCIATEOBJTYPE=257&ASSOCIATEOBJID=0/GET'] = ( FAKE_GET_VIEW_BY_PORTG) FAKE_GET_LUNG_BY_VIEW = """ { "data": [{ "TYPE": 256, "NAME": "OpenStack_LunGroup_1", "DESCRIPTION": "OpenStack_LunGroup_1", "ID": "1" }], "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/lungroup/associate/mappingview?TYPE=256&ASSO' 'CIATEOBJTYPE=245&ASSOCIATEOBJID=2/GET'] = ( FAKE_GET_LUNG_BY_VIEW) FAKE_LUN_COUNT_RESPONSE_1 = """ { "data":{ "COUNT":"2" }, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/lun/count?TYPE=11&ASSOCIATEOB' 'JTYPE=256&ASSOCIATEOBJID=1/GET'] = ( FAKE_LUN_COUNT_RESPONSE_1) FAKE_PORTS_IN_PG_RESPONSE = """ { "data": [{ "ID": "1114114", "WWN": "2002643e8c4c5f66" }, { "ID": "1114113", "WWN": "2001643e8c4c5f66" }], "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/associate?TYPE=213&ASSOCIATEOBJTYPE=' '257&ASSOCIATEOBJID=0/GET'] = ( FAKE_PORTS_IN_PG_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/HyperMetro_ConsistentGroup/POST'] = ( FAKE_CREATE_METROROUP_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE["/HyperMetro_ConsistentGroup?type" "='15364'/GET"] = ( json.dumps(FAKE_GET_METROROUP_RESPONSE)) MAP_COMMAND_TO_FAKE_RESPONSE["/HyperMetro_ConsistentGroup/11/GET"] = ( FAKE_GET_METROROUP_ID_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE["/HyperMetro_ConsistentGroup/11/DELETE"] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE["/HyperMetro_ConsistentGroup/stop/PUT"] = ( FAKE_COMMON_SUCCESS_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE["/HyperMetro_ConsistentGroup/sync/PUT"] = ( FAKE_COMMON_SUCCESS_RESPONSE) FAKE_GET_REMOTEDEV_RESPONSE = """ { "data":[{ "ARRAYTYPE":"1", "HEALTHSTATUS":"1", "ID":"0", "NAME":"Huawei.Storage", "RUNNINGSTATUS":"1", "WWN":"21003400a30d844d" }], "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/remote_device/GET'] = ( FAKE_GET_REMOTEDEV_RESPONSE) FAKE_CREATE_PAIR_RESPONSE = """ { "data":{ "ID":"%s" }, "error":{ "code":0, "description":"0" } } """ % TEST_PAIR_ID MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/POST'] = ( FAKE_CREATE_PAIR_RESPONSE) FAKE_DELETE_PAIR_RESPONSE = """ { "data":{}, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/DELETE' % TEST_PAIR_ID] = ( FAKE_DELETE_PAIR_RESPONSE) FAKE_SET_PAIR_ACCESS_RESPONSE = """ { "data":{}, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/PUT' % TEST_PAIR_ID] = ( FAKE_SET_PAIR_ACCESS_RESPONSE) FAKE_GET_PAIR_NORMAL_RESPONSE = """ { "data":{ "REPLICATIONMODEL": "1", "RUNNINGSTATUS": "1", "SECRESACCESS": "2", "HEALTHSTATUS": "1", "ISPRIMARY": "true" }, "error":{ "code":0, "description":"0" } } """ FAKE_GET_PAIR_SPLIT_RESPONSE = """ { "data":{ "REPLICATIONMODEL": "1", "RUNNINGSTATUS": "26", "SECRESACCESS": "2", "ISPRIMARY": "true" }, "error":{ "code":0, "description":"0" } } """ FAKE_GET_PAIR_SYNC_RESPONSE = """ { "data":{ "REPLICATIONMODEL": "1", "RUNNINGSTATUS": "23", "SECRESACCESS": "2" }, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/%s/GET' % TEST_PAIR_ID] = ( FAKE_GET_PAIR_NORMAL_RESPONSE) FAKE_SYNC_PAIR_RESPONSE = """ { "data":{}, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/sync/PUT'] = ( FAKE_SYNC_PAIR_RESPONSE) FAKE_SPLIT_PAIR_RESPONSE = """ { "data":{}, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/split/PUT'] = ( FAKE_SPLIT_PAIR_RESPONSE) FAKE_SWITCH_PAIR_RESPONSE = """ { "data":{}, "error":{ "code":0, "description":"0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/REPLICATIONPAIR/switch/PUT'] = ( FAKE_SWITCH_PAIR_RESPONSE) FAKE_PORTS_IN_PG_RESPONSE = """ { "data": [{ "ID": "1114114", "WWN": "2002643e8c4c5f66" }, { "ID": "1114113", "WWN": "2001643e8c4c5f66" }], "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/fc_port/associate?TYPE=213&ASSOCIATEOBJTYPE=' '257&ASSOCIATEOBJID=0/GET'] = ( FAKE_PORTS_IN_PG_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/portgroup/associate/fc_port?TYPE=257&ASSOCIA' 'TEOBJTYPE=212&ASSOCIATEOBJID=1114369/GET'] = ( FAKE_PORTS_IN_PG_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/portgroup?TYPE=245&ASSOC' 'IATEOBJTYPE=257&ASSOCIATEOBJID=1114114/GET'] = ( FAKE_SWITCH_PAIR_RESPONSE) MAP_COMMAND_TO_FAKE_RESPONSE['/mappingview/associate/portgroup?TYPE=245&ASSOC' 'IATEOBJTYPE=257&ASSOCIATEOBJID=1114113/GET'] = ( FAKE_COMMON_SUCCESS_RESPONSE) FAKE_CLONEPAIR_PAIRID_GET_RESPONSE = """ { "data":{ "name": "fake_clonepair_name", "copyStatus": "0", "syncStatus": "2" }, "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/clonepair/fake_clonepair_id/GET'] = ( FAKE_CLONEPAIR_PAIRID_GET_RESPONSE) FAKE_CLONEPAIR_PAIRID_DELETE_RESPONSE = """ { "data":{ "name": "fake_clonepair_name" }, "error": { "code": 0, "description": "0" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/clonepair/fake_clonepair_id/DELETE'] = ( FAKE_CLONEPAIR_PAIRID_DELETE_RESPONSE) FAKE_CLONEPAIR_RELATION_RESPONSE = """ { "data":{ "ID": "fake_clonepair_id" }, "error": { "code": 0, "description": "" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/clonepair/relation/POST'] = ( FAKE_CLONEPAIR_RELATION_RESPONSE) FAKE_CLONEPAIR_SYNCHRONIZE_RESPONSE = """ { "data":{}, "error": { "code": 0, "description": "" } } """ MAP_COMMAND_TO_FAKE_RESPONSE['/clonepair/synchronize/PUT'] = ( FAKE_CLONEPAIR_SYNCHRONIZE_RESPONSE) REPLICA_BACKEND_ID = 'huawei-replica-1' class FakeHuaweiConf(huawei_conf.HuaweiConf): def __init__(self, conf, protocol): self.conf = conf self.protocol = protocol def safe_get(self, key): try: return getattr(self.conf, key) except Exception: return def update_config_value(self): setattr(self.conf, 'volume_backend_name', 'huawei_storage') setattr(self.conf, 'san_address', ['http://192.0.2.69:8082/deviceManager/rest/']) setattr(self.conf, 'san_user', 'admin') setattr(self.conf, 'san_password', 'Admin@storage') setattr(self.conf, 'san_product', 'V3') setattr(self.conf, 'san_protocol', self.protocol) setattr(self.conf, 'lun_type', constants.THICK_LUNTYPE) setattr(self.conf, 'lun_ready_wait_interval', 2) setattr(self.conf, 'lun_copy_wait_interval', 2) setattr(self.conf, 'lun_timeout', 43200) setattr(self.conf, 'lun_write_type', '1') setattr(self.conf, 'lun_mirror_switch', '1') setattr(self.conf, 'lun_prefetch_type', '1') setattr(self.conf, 'lun_prefetch_value', '0') setattr(self.conf, 'lun_policy', '0') setattr(self.conf, 'lun_read_cache_policy', '2') setattr(self.conf, 'lun_write_cache_policy', '5') setattr(self.conf, 'storage_pools', ['OpenStack_Pool']) setattr(self.conf, 'iscsi_default_target_ip', ['192.0.2.68']) setattr(self.conf, 'metro_san_address', ['https://192.0.2.240:8088/deviceManager/rest/']) setattr(self.conf, 'metro_storage_pools', 'OpenStack_Pool') setattr(self.conf, 'metro_san_user', 'admin') setattr(self.conf, 'metro_san_password', 'Admin@storage1') setattr(self.conf, 'metro_domain_name', 'hypermetro_test') setattr(self.conf, 'min_fc_ini_online', 0) iscsi_info = { 'default_target_ips': '192.0.2.2', 'initiators': { 'iqn.1993-08.debian:01:ec2bff7ac3a3': { 'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'CHAPinfo': 'mm-user;mm-user@storage', 'ALUA': '1', 'TargetPortGroup': 'portgroup-test', } } } setattr(self.conf, 'iscsi_info', iscsi_info) rmt_iscsi_info = {'Name': 'iqn.1993-08.debian:01:ec2bff7acxxx', 'TargetIP': '1.1.1.1', 'CHAPinfo': 'mm-user;mm-user@storage', 'ALUA': '1', 'TargetPortGroup': 'portgroup-test'} target = {'backend_id': REPLICA_BACKEND_ID, 'storage_pool': 'OpenStack_Pool', 'san_address': 'https://192.0.2.69:8088/deviceManager/rest/', 'san_user': 'admin', 'san_password': 'Admin@storage1', 'iscsi_info': rmt_iscsi_info} setattr(self.conf, 'replication', target) setattr(self.conf, 'safe_get', self.safe_get) class FakeClient(rest_client.RestClient): def __init__(self, configuration): san_address = configuration.san_address san_user = configuration.san_user san_password = configuration.san_password rest_client.RestClient.__init__(self, configuration, san_address, san_user, san_password) self.test_fail = False self.test_multi_url_flag = False self.cache_not_exist = False self.partition_not_exist = False def _get_snapshotid_by_name(self, snapshot_name): return "11" def _check_snapshot_exist(self, snapshot_id): return True def get_partition_id_by_name(self, name): if self.partition_not_exist: return None return "11" def get_cache_id_by_name(self, name): if self.cache_not_exist: return None return "11" def add_lun_to_cache(self, lunid, cache_id): pass def do_call(self, url, data, method, calltimeout=4, log_filter_flag=False): url = url.replace('http://192.0.2.69:8082/deviceManager/rest', '') command = url.replace('/210235G7J20000000000/', '') data = FAKE_COMMON_SUCCESS_RESPONSE if method: command = command + "/" + method for item in MAP_COMMAND_TO_FAKE_RESPONSE: if command == item: data = MAP_COMMAND_TO_FAKE_RESPONSE[item] if self.test_fail: data = FAKE_ERROR_INFO_RESPONSE if command == 'lun/11/GET': data = FAKE_ERROR_LUN_INFO_RESPONSE self.test_fail = False if self.test_multi_url_flag: data = FAKE_ERROR_CONNECT_RESPONSE self.test_multi_url_flag = False return json.loads(data) class FakeReplicaPairManager(replication.ReplicaPairManager): def _init_rmt_client(self): self.rmt_client = FakeClient(self.conf) class FakeISCSIStorage(huawei_driver.HuaweiISCSIDriver): """Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver.""" def __init__(self, configuration): self.configuration = configuration self.huawei_conf = FakeHuaweiConf(self.configuration, 'iSCSI') self.active_backend_id = None self.replica = None self.support_func = None self.is_dorado_v6 = False def do_setup(self): self.metro_flag = True self.huawei_conf.update_config_value() self.get_local_and_remote_dev_conf() self.client = FakeClient(configuration=self.configuration) self.rmt_client = FakeClient(configuration=self.configuration) self.replica_client = FakeClient(configuration=self.configuration) self.metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) self.replica = FakeReplicaPairManager(self.client, self.replica_client, self.configuration) class FakeFCStorage(huawei_driver.HuaweiFCDriver): """Fake Huawei Storage, Rewrite some methods of HuaweiISCSIDriver.""" def __init__(self, configuration): self.configuration = configuration self.fcsan = None self.huawei_conf = FakeHuaweiConf(self.configuration, 'iSCSI') self.active_backend_id = None self.replica = None self.support_func = None self.is_dorado_v6 = False def do_setup(self): self.metro_flag = True self.huawei_conf.update_config_value() self.get_local_and_remote_dev_conf() self.client = FakeClient(configuration=self.configuration) self.rmt_client = FakeClient(configuration=self.configuration) self.replica_client = FakeClient(configuration=self.configuration) self.metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) self.replica = FakeReplicaPairManager(self.client, self.replica_client, self.configuration) @ddt.ddt class HuaweiTestBase(test.TestCase): """Base class for Huawei test cases. Implement common setup operations or test cases in this class. """ def setUp(self): super(HuaweiTestBase, self).setUp() self.configuration = mock.Mock(spec=conf.Configuration) self.driver = FakeISCSIStorage(configuration=self.configuration) self.driver.do_setup() self.volume = fake_volume.fake_volume_obj( admin_contex, host=HOST, provider_location=PROVIDER_LOCATION, metadata=METADATA, id=ID) self.snapshot = fake_snapshot.fake_snapshot_obj( admin_contex, provider_location=SNAP_PROVIDER_LOCATION, id=ID) self.snapshot.volume = self.volume self.replica_volume = fake_volume.fake_volume_obj( admin_contex, host=HOST, provider_location=PROVIDER_LOCATION, volume_metadata=METADATA, replication_status='disabled', replication_driver_data=REPLICA_DRIVER_DATA, id=ID) self.hyper_volume = fake_volume.fake_volume_obj( admin_contex, host=HOST, provider_location=PROVIDER_LOCATION_WITH_HYPERMETRO, id=ID) self.original_volume = fake_volume.fake_volume_obj(admin_contex, id=ID) self.current_volume = fake_volume.fake_volume_obj( admin_contex, id=ID, provider_location=PROVIDER_LOCATION, name_id=ID) self.group_snapshot = fake_group_snapshot.fake_group_snapshot_obj( admin_contex, id=ID, group_id=ID, status='available') self.group = fake_group.fake_group_obj( admin_contex, id=ID, status='available') constants.DEFAULT_REPLICA_WAIT_INTERVAL = .1 constants.DEFAULT_REPLICA_WAIT_TIMEOUT = .5 constants.DEFAULT_WAIT_INTERVAL = .1 constants.DEFAULT_WAIT_TIMEOUT = .5 constants.MIGRATION_WAIT_INTERVAL = .1 constants.QOS_SPEC_KEYS = ( 'maxIOPS', 'minIOPS', 'minBandWidth', 'maxBandWidth', 'latency', 'IOType') constants.QOS_IOTYPES = ('0', '1', '2') constants.SUPPORT_LUN_TYPES = ('Thick', 'Thin') constants.DEFAULT_LUN_TYPE = 'Thick' def test_encode_name(self): lun_name = huawei_utils.encode_name(self.volume.id) self.assertEqual('21ec7341-ca82ece92e1ac480c963f1', lun_name) @ddt.data({'name': '9548e5e7-ca1c-46bf-b132', 'expected': '9548e5e7-ca1c-46bf-b132'}, {'name': '9548e5e7ca1c46bfb132891b425a81f', 'expected': '9548e5e7ca1c46bfb132891b425a81f'}, {'name': '9548e5e7-ca1c-46bf-b132-891b425a81f5', 'expected': '45d6964d772b2efcaad0e3c59538ecc'}) @ddt.unpack def test_encode_host_name(self, name, expected): self.assertEqual(expected, huawei_utils.encode_host_name(name)) @mock.patch.object(rest_client, 'RestClient') def test_create_snapshot_success(self, mock_client): lun_info = self.driver.create_snapshot(self.snapshot) self.assertDictEqual( {"huawei_snapshot_id": "11", "huawei_snapshot_wwn": "fake-wwn"}, json.loads(lun_info['provider_location'])) self.snapshot.volume_id = ID self.snapshot.volume = self.volume lun_info = self.driver.create_snapshot(self.snapshot) self.assertDictEqual( {"huawei_snapshot_id": "11", "huawei_snapshot_wwn": "fake-wwn"}, json.loads(lun_info['provider_location'])) @ddt.data('1', '', '0') def test_copy_volume(self, input_speed): self.driver.configuration.lun_copy_wait_interval = 1 self.volume.metadata = {'copyspeed': input_speed} mocker = self.mock_object( self.driver.client, 'create_luncopy', mock.Mock(wraps=self.driver.client.create_luncopy)) self.driver._copy_volume(self.volume, 'fake_copy_name', 'fake_src_lun', 'fake_tgt_lun') mocker.assert_called_once_with('fake_copy_name', 'fake_src_lun', 'fake_tgt_lun', input_speed) @ddt.data({'input_speed': '1', 'actual_speed': '1'}, {'input_speed': '', 'actual_speed': '2'}, {'input_speed': None, 'actual_speed': '2'}, {'input_speed': '5', 'actual_speed': '2'}) @ddt.unpack def test_client_create_luncopy(self, input_speed, actual_speed): mocker = self.mock_object( self.driver.client, 'call', mock.Mock(wraps=self.driver.client.call)) self.driver.client.create_luncopy('fake_copy_name', 'fake_src_lun', 'fake_tgt_lun', input_speed) mocker.assert_called_once_with( mock.ANY, {"TYPE": 219, "NAME": 'fake_copy_name', "DESCRIPTION": 'fake_copy_name', "COPYSPEED": actual_speed, "LUNCOPYTYPE": "1", "SOURCELUN": "INVALID;fake_src_lun;INVALID;INVALID;INVALID", "TARGETLUN": "INVALID;fake_tgt_lun;INVALID;INVALID;INVALID"}, 'POST' ) @ddt.data( { 'volume': fake_volume.fake_volume_obj( admin_contex, provider_location=PROVIDER_LOCATION), 'expect': {'huawei_lun_id': '11', 'huawei_lun_wwn': '6643e8c1004c5f6723e9f454003'} }, { 'volume': fake_volume.fake_volume_obj( admin_contex, provider_location=None), 'expect': {} }, { 'volume': fake_volume.fake_volume_obj( admin_contex, provider_location=''), 'expect': {} }, { 'volume': fake_volume.fake_volume_obj( admin_contex, provider_location='11', volume_admin_metadata=ADMIN_METADATA, volume_metadata=VOL_METADATA ), 'expect': {'huawei_lun_id': '11', 'huawei_lun_wwn': 'FAKE_WWN', 'huawei_sn': None, 'hypermetro_id': '11', 'remote_lun_id': '1'} } ) @ddt.unpack def test_get_volume_private_data(self, volume, expect): metadata = huawei_utils.get_volume_private_data(volume) self.assertEqual(expect, metadata) @ddt.data( { 'snapshot': fake_snapshot.fake_snapshot_obj( admin_contex, provider_location=SNAP_PROVIDER_LOCATION), 'expect': {'huawei_snapshot_id': '11'} }, { 'snapshot': fake_snapshot.fake_snapshot_obj( admin_contex, provider_location=None), 'expect': {} }, { 'snapshot': fake_snapshot.fake_snapshot_obj( admin_contex, provider_location=''), 'expect': {} }, { 'snapshot': fake_snapshot.fake_snapshot_obj( admin_contex, provider_location='11', snapshot_metadata=[{'key': 'huawei_snapshot_wwn', 'value': 'fake_wwn'}, ], expected_attrs=['metadata'], ), 'expect': {'huawei_snapshot_id': '11', 'huawei_snapshot_wwn': 'fake_wwn', } } ) @ddt.unpack def test_get_snapshot_private_data(self, snapshot, expect): metadata = huawei_utils.get_snapshot_private_data(snapshot) self.assertDictEqual(expect, metadata) @ddt.data( { 'provider_location': PROVIDER_LOCATION, 'mock_func': None, }, { 'provider_location': '', 'mock_func': None, }, { 'provider_location': PROVIDER_LOCATION, 'mock_func': 'get_lun_info_by_name', }, { 'provider_location': '{"huawei_lun_wwn": "fake_wwn"}', 'mock_func': None, }, ) @ddt.unpack def test_get_lun_info(self, provider_location, mock_func): volume = fake_volume.fake_volume_obj( admin_contex, id=ID, provider_location=provider_location) if mock_func: self.mock_object(self.driver.client, mock_func, return_value=None) lun_info = huawei_utils.get_lun_info(self.driver.client, volume) if provider_location in (PROVIDER_LOCATION, ''): self.assertEqual('6643e8c1004c5f6723e9f454003', lun_info['WWN']) else: self.assertIsNone(lun_info) @ddt.data( { 'snapshot': fake_snapshot.fake_snapshot_obj( admin_contex, id=ID), 'expect': '11', }, { 'snapshot': fake_snapshot.fake_snapshot_obj( admin_contex, id=ID2), 'expect': '11', }, { 'snapshot': fake_snapshot.fake_snapshot_obj( admin_contex, id='e9c9ca0f-01e8-4780-9585-369c15026001'), 'expect': None } ) @ddt.unpack def test_get_snapshot_info(self, snapshot, expect): snapshot_info = huawei_utils.get_snapshot_info( self.driver.client, snapshot) if expect: self.assertEqual(expect, snapshot_info['ID']) else: self.assertIsNone(snapshot_info) @ddt.data( {'host_name': HOST, 'expect': '1'}, {'host_name': HOST2, 'expect': '1'}, {'host_name': 'fake_host_name', 'expect': None}, ) @ddt.unpack def test_get_host_id(self, host_name, expect): host_id = huawei_utils.get_host_id(self.driver.client, host_name) self.assertEqual(expect, host_id) @ddt.ddt class HuaweiISCSIDriverTestCase(HuaweiTestBase): def setUp(self): super(HuaweiISCSIDriverTestCase, self).setUp() self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.hypermetro_devices = hypermetro_devices self.flags(transport_url='fake:/') self.driver = FakeISCSIStorage(configuration=self.configuration) self.driver.do_setup() self.portgroup = 'portgroup-test' self.iscsi_iqns = ['iqn.2006-08.com.huawei:oceanstor:21000022a:' ':20503:192.0.2.1', 'iqn.2006-08.com.huawei:oceanstor:21000022a:' ':20500:192.0.2.2'] self.target_ips = ['192.0.2.1', '192.0.2.2'] self.portgroup_id = 11 self.driver.client.login() def test_parse_rmt_iscsi_info(self): rmt_dev = self.driver.configuration.replication expected_iscsi_info = {'Name': 'iqn.1993-08.debian:01:ec2bff7acxxx', 'TargetIP': '1.1.1.1', 'CHAPinfo': 'mm-user;mm-user@storage', 'ALUA': '1', 'TargetPortGroup': 'portgroup-test'} self.assertDictEqual(expected_iscsi_info, rmt_dev['iscsi_info']) def test_login_success(self): device_id = self.driver.client.login() self.assertEqual('210235G7J20000000000', device_id) @ddt.data(constants.PWD_EXPIRED, constants.PWD_RESET) def test_login_password_expires_and_reset_fail(self, state): with mock.patch.object(self.driver.client, 'logout') as mock_logout: self.mock_object(FakeClient, 'do_call', return_value={"error": {"code": 0}, "data": { "username": "admin", "iBaseToken": "2001031430", "deviceid": "210235G7J20000000000", "accountstate": state}}) self.assertRaises(exception.VolumeBackendAPIException, self.driver.client.login) mock_logout.assert_called_once_with() def test_login_logout_fail(self): login_info = {"error": {"code": 0}, "data": {"username": "admin", "iBaseToken": "2001031430", "deviceid": "210235G7J20000000000", "accountstate": 3}} logout_info = {"error": {"code": 1}, "data": {}} self.mock_object(FakeClient, 'do_call', side_effect=[login_info, logout_info]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.client.login) def test_check_volume_exist_on_array(self): self.mock_object(rest_client.RestClient, 'get_lun_id_by_name', return_value=None) self.driver._check_volume_exist_on_array( self.volume, constants.VOLUME_NOT_EXISTS_WARN) def test_create_volume_success(self): # Have pool info in the volume. self.volume.host = 'ubuntu001@backend001#OpenStack_Pool' lun_info = self.driver.create_volume(self.volume) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(lun_info['provider_location'])) # No pool info in the volume. self.volume.host = 'ubuntu001@backend001' lun_info = self.driver.create_volume(self.volume) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(lun_info['provider_location'])) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_delete_replication_fail(self, pool_data): self.driver.support_func = pool_data self.mock_object(replication.ReplicaCommonDriver, 'split') self.mock_object( common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': sync_replica_specs}) self.mock_object(rest_client.RestClient, 'delete_lun', side_effect=exception.VolumeBackendAPIException( data='err')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.replica_volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_migrate_volume_success_no_data(self, pool_data): self.driver.support_func = pool_data task_info = {"data": [{"ENDTIME": "1436816174", "ID": "9", "PARENTID": "11", "PARENTNAME": "xmRBHMlVRruql5vwthpPXQ", "PROCESS": "-1", "RUNNINGSTATUS": "76", "SPEED": "2", "STARTTIME": "1436816111", "TARGETLUNID": "1", "TARGETLUNNAME": "4924891454902893639", "TYPE": 253, "WORKMODE": "0" }], "error": {"code": 0, "description": "0"} } moved = False empty_dict = {} self.mock_object(rest_client.RestClient, 'get_lun_migration_task', side_effect=[{}, task_info]) moved, model_update = self.driver.migrate_volume(None, self.volume, test_host) self.assertTrue(moved) self.assertEqual(empty_dict, model_update) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_migrate_volume_success_with_replication(self, pool_data): self.driver.support_func = pool_data task_info = {"data": [{"ENDTIME": "1436816174", "ID": "9", "PARENTID": "11", "PARENTNAME": "xmRBHMlVRruql5vwthpPXQ", "PROCESS": "-1", "RUNNINGSTATUS": "76", "SPEED": "2", "STARTTIME": "1436816111", "TARGETLUNID": "1", "TARGETLUNNAME": "4924891454902893639", "TYPE": 253, "WORKMODE": "0" }], "error": {"code": 0, "description": "0"} } moved = False empty_dict = {} self.mock_object(rest_client.RestClient, 'get_lun_migration_task', return_value=task_info) moved, model_update = self.driver.migrate_volume(None, self.replica_volume, test_host) self.assertTrue(moved) self.assertEqual(empty_dict, model_update) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_migrate_volume_fail_migration_fault(self, pool_data): self.driver.support_func = pool_data task_info = {"data": [{"ENDTIME": "1436816174", "ID": "9", "PARENTID": "11", "PARENTNAME": "xmRBHMlVRruql5vwthpPXQ", "PROCESS": "-1", "RUNNINGSTATUS": "74", "SPEED": "2", "STARTTIME": "1436816111", "TARGETLUNID": "1", "TARGETLUNNAME": "4924891454902893639", "TYPE": 253, "WORKMODE": "0" }], "error": {"code": 0, "description": "0"} } self.mock_object(rest_client.RestClient, 'get_lun_migration_task', return_value=task_info) self.assertRaises(exception.VolumeBackendAPIException, self.driver.migrate_volume, None, self.volume, test_host) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_migrate_volume_fail_no_migrate_task(self, pool_data): self.driver.support_func = pool_data task_info = {"data": [{"ENDTIME": "1436816174", "ID": "9", "PARENTID": "12", "PARENTNAME": "xmRBHMlVRruql5vwthpPXQ", "PROCESS": "-1", "RUNNINGSTATUS": "76", "SPEED": "2", "STARTTIME": "1436816111", "TARGETLUNID": "1", "TARGETLUNNAME": "4924891454902893639", "TYPE": 253, "WORKMODE": "0" }], "error": {"code": 0, "description": "0"} } self.mock_object(rest_client.RestClient, 'get_lun_migration_task', return_value=task_info) self.assertRaises(exception.VolumeBackendAPIException, self.driver.migrate_volume, None, self.volume, test_host) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_migrate_volume_with_type_id(self, pool_data): self.driver.support_func = pool_data self.volume.volume_type = None self.volume.volume_type_id = '550c089b-bfdd-4f7f-86e1-3ba88125555c' task_info = {"data": [{"ENDTIME": "1436816174", "ID": "9", "PARENTID": "11", "PARENTNAME": "xmRBHMlVRruql5vwthpPXQ", "PROCESS": "-1", "RUNNINGSTATUS": "76", "SPEED": "2", "STARTTIME": "1436816111", "TARGETLUNID": "1", "TARGETLUNNAME": "4924891454902893639", "TYPE": 253, "WORKMODE": "0" }], "error": {"code": 0, "description": "0"} } empty_dict = {} self.mock_object(volume_types, 'get_volume_type', return_value=test_new_type) self.mock_object(rest_client.RestClient, 'get_lun_migration_task', return_value=task_info) moved, model_update = self.driver.migrate_volume(None, self.volume, test_host) self.assertTrue(moved) self.assertEqual(empty_dict, model_update) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_manage_existing_fail(self, pool_data): self.driver.support_func = pool_data self.mock_object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ALLOCTYPE': 1}) self.mock_object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') self.mock_object(rest_client.RestClient, 'rename_lun') self.mock_object(common.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value={ 'PARENTNAME': 'OpenStack_Pool', 'SNAPSHOTIDS': [], 'ID': 'ID1', 'HEALTHSTATUS': constants.STATUS_HEALTH, 'WWN': '6643e8c1004c5f6723e9f454003'}) self.mock_object(volume_types, 'get_volume_type', return_value={'extra_specs': test_new_type}) self.mock_object(common.HuaweiBaseDriver, '_check_needed_changes', return_value={}) external_ref = {'source-name': 'test1', 'source-id': 'ID1'} self.driver.manage_existing(self.volume, external_ref) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_delete_volume_success(self, pool_data): self.driver.support_func = pool_data self.driver.delete_volume(self.volume) def test_delete_snapshot_success(self): self.driver.delete_snapshot(self.snapshot) @ddt.data(True, False) def test_create_volume_from_snapsuccess(self, is_dorado_v6): self.mock_object( huawei_utils, 'get_volume_params', return_value={'replication_enabled': True}) self.mock_object(replication.ReplicaCommonDriver, 'sync') self.mock_object(self.driver.client, 'get_snapshot_info_by_name', return_value= {'ID': ID, 'RUNNINGSTATUS': constants.STATUS_ACTIVE}) self.configuration.lun_copy_speed = 2 self.driver.is_dorado_v6 = is_dorado_v6 model_update = self.driver.create_volume_from_snapshot(self.volume, self.snapshot) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(model_update['provider_location'])) driver_data = {'pair_id': TEST_PAIR_ID, 'rmt_lun_id': '1', 'rmt_lun_wwn': '6643e8c1004c5f6723e9f454003'} self.assertDictEqual( driver_data, json.loads(model_update['replication_driver_data'])) self.assertEqual('available', model_update['replication_status']) @mock.patch.object(huawei_driver.HuaweiISCSIDriver, 'initialize_connection', return_value={"data": {'target_lun': 1}}) def test_initialize_connection_snapshot_success(self, mock_iscsi_init): iscsi_properties = self.driver.initialize_connection_snapshot( self.snapshot, FakeConnector) volume = Volume(id=self.snapshot.id, provider_location=self.snapshot.provider_location, lun_type='27', metadata=None) self.assertEqual(1, iscsi_properties['data']['target_lun']) mock_iscsi_init.assert_called_with(volume, FakeConnector) def test_initialize_connection_success_multipath_portgroup(self): temp_connector = copy.deepcopy(FakeConnector) temp_connector['multipath'] = True self.mock_object(rest_client.RestClient, 'get_tgt_port_group', return_value='11') iscsi_properties = self.driver.initialize_connection(self.volume, temp_connector) self.assertEqual([1, 1], iscsi_properties['data']['target_luns']) def test_initialize_connection_fail_multipath_portgroup(self): temp_connector = copy.deepcopy(FakeConnector) temp_connector['multipath'] = True self.mock_object(rest_client.RestClient, 'get_tgt_port_group', return_value='12') self.mock_object(rest_client.RestClient, '_get_tgt_ip_from_portgroup', return_value=[]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.volume, temp_connector) def test_initialize_connection_success_multipath_targetip(self): iscsi_info = { 'initiators': { 'iqn.1993-08.debian:01:ec2bff7ac3a3': { 'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'TargetIP': '192.0.2.2', 'CHAPinfo': 'mm-user;mm-user@storage', 'ALUA': '1', } } } configuration = mock.Mock(spec=conf.Configuration) configuration.hypermetro_devices = hypermetro_devices driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() driver.configuration.iscsi_info = iscsi_info driver.client.iscsi_info = iscsi_info temp_connector = copy.deepcopy(FakeConnector) temp_connector['multipath'] = True iscsi_properties = driver.initialize_connection(self.volume, temp_connector) self.assertEqual([1], iscsi_properties['data']['target_luns']) def test_initialize_connection_fail_multipath_targetip(self): iscsi_info = { 'initiators': { 'iqn.1993-08.debian:01:ec2bff7ac3a3': { 'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'TargetIP': '192.0.2.6', 'CHAPinfo': 'mm-user;mm-user@storage', 'ALUA': '1', } } } configuration = mock.Mock(spec=conf.Configuration) configuration.hypermetro_devices = hypermetro_devices driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() driver.configuration.iscsi_info = iscsi_info driver.client.iscsi_info = iscsi_info temp_connector = copy.deepcopy(FakeConnector) temp_connector['multipath'] = True self.assertRaises(exception.VolumeBackendAPIException, driver.initialize_connection, self.volume, temp_connector) def test_initialize_connection_success_multipath_defaultip(self): iscsi_info = { 'default_target_ips': ['192.0.2.2'], 'initiators': { 'iqn.1993-08.debian:01:ec2bff7ac3a3': { 'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'CHAPinfo': 'mm-user;mm-user@storage', 'ALUA': '1', } } } configuration = mock.Mock(spec=conf.Configuration) configuration.hypermetro_devices = hypermetro_devices driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() driver.configuration.iscsi_info = iscsi_info driver.client.iscsi_info = iscsi_info temp_connector = copy.deepcopy(FakeConnector) temp_connector['multipath'] = True iscsi_properties = driver.initialize_connection(self.volume, temp_connector) self.assertEqual([1], iscsi_properties['data']['target_luns']) def test_initialize_connection_fail_multipath_defaultip(self): iscsi_info = { 'default_target_ips': ['192.0.2.6'], 'initiators': { 'iqn.1993-08.debian:01:ec2bff7ac3a3': { 'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'CHAPinfo': 'mm-user;mm-user@storage', 'ALUA': '1', } }, } configuration = mock.Mock(spec=conf.Configuration) configuration.hypermetro_devices = hypermetro_devices driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() driver.configuration.iscsi_info = iscsi_info driver.client.iscsi_info = iscsi_info temp_connector = copy.deepcopy(FakeConnector) temp_connector['multipath'] = True self.assertRaises(exception.VolumeBackendAPIException, driver.initialize_connection, self.volume, temp_connector) def test_initialize_connection_fail_no_port_in_portgroup(self): temp_connector = copy.deepcopy(FakeConnector) temp_connector['multipath'] = True self.mock_object(rest_client.RestClient, 'get_tgt_port_group', return_value='11') self.mock_object(rest_client.RestClient, '_get_tgt_ip_from_portgroup', return_value=[]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.volume, temp_connector) def test_initialize_connection_fail_multipath_no_ip(self): iscsi_info = { 'default_target_ips': [], 'initiators': { 'iqn.1993-08.debian:01:ec2bff7ac3a3': { 'Name': 'iqn.1993-08.debian:01:ec2bff7ac3a3', 'CHAPinfo': 'mm-user;mm-user@storage', 'ALUA': '1', } } } configuration = mock.Mock(spec=conf.Configuration) configuration.hypermetro_devices = hypermetro_devices driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() driver.configuration.iscsi_info = iscsi_info driver.client.iscsi_info = iscsi_info temp_connector = copy.deepcopy(FakeConnector) temp_connector['multipath'] = True self.assertRaises(exception.VolumeBackendAPIException, driver.initialize_connection, self.volume, temp_connector) @mock.patch.object(huawei_driver.HuaweiISCSIDriver, 'terminate_connection') def test_terminate_connection_snapshot_success(self, mock_iscsi_term): self.driver.terminate_connection_snapshot(self.snapshot, FakeConnector) volume = Volume(id=self.snapshot.id, provider_location=self.snapshot.provider_location, lun_type='27', metadata=None) mock_iscsi_term.assert_called_with(volume, FakeConnector) def test_terminate_connection_success(self): self.driver.terminate_connection(self.volume, FakeConnector) def test_get_volume_status(self): data = self.driver.get_volume_stats() self.assertEqual(self.driver.VERSION, data['driver_version']) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={"CAPACITY": 6291456}) @mock.patch.object(rest_client.RestClient, 'extend_lun') def test_extend_volume_size_equal(self, mock_extend, mock_lun_info): self.driver.extend_volume(self.volume, 3) self.assertEqual(0, mock_extend.call_count) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={"CAPACITY": 5291456}) @mock.patch.object(rest_client.RestClient, 'extend_lun') def test_extend_volume_success(self, mock_extend, mock_lun_info): self.driver.extend_volume(self.volume, 3) self.assertEqual(1, mock_extend.call_count) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={"CAPACITY": 7291456}) def test_extend_volume_fail(self, mock_lun_info): self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.volume, 3) def test_extend_nonexistent_volume(self): self.volume = fake_volume.fake_volume_obj(admin_contex) self.mock_object(rest_client.RestClient, 'get_lun_id_by_name', return_value=None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.volume, 3) def test_get_volume_metadata(self): metadata = [{'key': 'huawei_lun_wwn', 'value': '1'}] tmp_volume = fake_volume.fake_volume_obj( admin_contex, volume_metadata=metadata) expected_value = {'huawei_lun_wwn': '1'} metadata = huawei_utils.get_volume_metadata(tmp_volume) self.assertEqual(expected_value, metadata) expected_value = {'huawei_lun_wwn': '1'} tmp_volume = fake_volume.fake_volume_obj(admin_contex) tmp_volume.metadata = expected_value metadata = huawei_utils.get_volume_metadata(tmp_volume) self.assertEqual(expected_value, metadata) def test_login_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.client.login) def test_create_snapshot_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, self.snapshot) def test_create_volume_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.volume) def test_delete_volume_fail(self): self.mock_object( self.driver.client, 'delete_lun', side_effect=exception.VolumeBackendAPIException(data='')) self.driver.support_func = {} self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_delete_snapshot_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, self.snapshot) def test_delete_snapshot_with_snapshot_nonexistent(self): self.snapshot.provider_location = None self.driver.delete_snapshot(self.snapshot) def test_initialize_connection_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.volume, FakeConnector) def test_lun_is_associated_to_lungroup(self): self.driver.client.associate_lun_to_lungroup('11', '11') result = self.driver.client._is_lun_associated_to_lungroup('11', '11') self.assertTrue(result) def test_lun_is_not_associated_to_lun_group(self): self.driver.client.associate_lun_to_lungroup('12', '12') self.driver.client.remove_lun_from_lungroup('12', '12') result = self.driver.client._is_lun_associated_to_lungroup('12', '12') self.assertFalse(result) def test_get_tgtip(self): portg_id = self.driver.client.get_tgt_port_group(self.portgroup) target_ip = self.driver.client._get_tgt_ip_from_portgroup(portg_id) self.assertEqual(self.target_ips, target_ip) def test_find_chap_info(self): iscsi_info = { 'initiators': { 'fake.iqn': { 'Name': 'fake.iqn', 'CHAPinfo': 'mm-user;mm-user@storage', } } } chapinfo = self.driver.client.find_chap_info(iscsi_info, 'fake.iqn') chap_username, chap_password = chapinfo.split(';') self.assertEqual('mm-user', chap_username) self.assertEqual('mm-user@storage', chap_password) def test_find_alua_info(self): iscsi_info = { 'initiators': { 'fake.iqn': { 'Name': 'fake.iqn', 'ALUA': '1', } } } type = self.driver.client._find_alua_info(iscsi_info, 'fake.iqn') self.assertEqual('1', type) def test_get_pool_info(self): pools = [{"NAME": "test001", "ID": "0", "USERFREECAPACITY": "36", "USERTOTALCAPACITY": "48", "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE, "TIER0CAPACITY": "48", "TIER1CAPACITY": "0", "TIER2CAPACITY": "0"}, {"NAME": "test002", "ID": "1", "USERFREECAPACITY": "37", "USERTOTALCAPACITY": "49", "USAGETYPE": constants.FILE_SYSTEM_POOL_TYPE, "TIER0CAPACITY": "0", "TIER1CAPACITY": "49", "TIER2CAPACITY": "0"}, {"NAME": "test003", "ID": "0", "USERFREECAPACITY": "36", "DATASPACE": "35", "USERTOTALCAPACITY": "48", "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE, "TIER0CAPACITY": "0", "TIER1CAPACITY": "0", "TIER2CAPACITY": "48"}, {"NAME": "test004", "ID": "0", "USERFREECAPACITY": "36", "DATASPACE": "35", "USERTOTALCAPACITY": "48", "USAGETYPE": constants.BLOCK_STORAGE_POOL_TYPE, "TIER0CAPACITY": "40"}] pool_name = 'test001' test_info = {'CAPACITY': '36', 'ID': '0', 'TOTALCAPACITY': '48', 'TIER0CAPACITY': '48', 'TIER1CAPACITY': '0', 'TIER2CAPACITY': '0'} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) pool_name = 'test002' test_info = {} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) pool_name = 'test000' test_info = {} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) pool_name = 'test003' test_info = {'CAPACITY': '35', 'ID': '0', 'TOTALCAPACITY': '48', 'TIER0CAPACITY': '0', 'TIER1CAPACITY': '0', 'TIER2CAPACITY': '48'} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) pool_name = 'test004' test_info = {'CAPACITY': '35', 'ID': '0', 'TOTALCAPACITY': '48', 'TIER0CAPACITY': '40', 'TIER1CAPACITY': '0', 'TIER2CAPACITY': '0'} pool_info = self.driver.client.get_pool_info(pool_name, pools) self.assertEqual(test_info, pool_info) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(huawei_utils, 'get_volume_params', return_value={'qos': {'MAXIOPS': '100', 'IOType': '2'} }) def test_create_smartqos(self, mock_qos_value, pool_data): self.driver.support_func = pool_data lun_info = self.driver.create_volume(self.volume) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(lun_info['provider_location'])) @ddt.data('front-end', 'back-end') @mock.patch.object(huawei_utils, 'get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', 'thin_provisioning_support': 'true', 'thick_provisioning_support': 'false', 'policy': '2', 'cachename': 'cache-test', 'partitionname': 'partition-test'}) @mock.patch.object(common.HuaweiBaseDriver, '_get_volume_type', return_value={'qos_specs_id': u'025ce295-15e9-41a7'}) def test_create_smartqos_success(self, mock_consumer, mock_qos_specs, mock_value_type): self.mock_object(qos_specs, 'get_qos_specs', return_value={'specs': {'maxBandWidth': '100', 'IOType': '0'}, 'consumer': mock_consumer}) self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT lun_info = self.driver.create_volume(self.volume) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(lun_info['provider_location'])) @ddt.data([{'specs': {'maxBandWidth': '100', 'IOType': '3'}}, FAKE_POOLS_UNSUPPORT_REPORT], [{'specs': {'maxBandWidth': '100', 'IOType': '3'}}, FAKE_POOLS_SUPPORT_REPORT], [{'specs': {'minBandWidth': '0', 'IOType': '2'}}, FAKE_POOLS_UNSUPPORT_REPORT], [{'specs': {'minBandWidth': '0', 'IOType': '2'}}, FAKE_POOLS_SUPPORT_REPORT]) @ddt.unpack def test_create_smartqos_failed(self, qos_specs_value, pool_data): self.driver.support_func = pool_data self.mock_object(qos_specs, 'get_qos_specs', return_value=qos_specs_value) self.volume.volume_type = objects.VolumeType( extra_specs={}, qos_specs_id=ID) self.assertRaises(exception.InvalidInput, self.driver.create_volume, self.volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_create_smartqos_without_huawei_type(self, pool_data): self.driver.support_func = pool_data self.mock_object(qos_specs, 'get_qos_specs', return_value={'specs': {'fake_qos_type': '100', 'IOType': '2'}} ) self.volume.volume_type = objects.VolumeType( extra_specs={}, qos_specs_id=ID) self.assertRaises(exception.InvalidInput, self.driver.create_volume, self.volume) @mock.patch.object(huawei_utils, 'get_volume_params', return_value={'qos': {'MAXIOPS': '100', 'IOType': '2'} }) @mock.patch.object(rest_client.RestClient, 'find_array_version', return_value='V300R003C00') @mock.patch.object(rest_client.RestClient, 'find_available_qos', return_value=(None, [])) def test_create_smartqos_on_v3r3_with_no_qos(self, mock_find_available_qos, mock_qos_value, mock_array_version): self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT lun_info = self.driver.create_volume(self.volume) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(lun_info['provider_location'])) @mock.patch.object(huawei_utils, 'get_volume_params', return_value={'qos': {'MAXIOPS': '100', 'IOType': '2'} }) @mock.patch.object(rest_client.RestClient, 'find_array_version', return_value='V300R003C00') @mock.patch.object(rest_client.RestClient, 'find_available_qos', return_value=('11', u'["0", "2", "3"]')) def test_create_smartqos_on_v3r3_with_qos(self, mock_find_available_qos, mock_qos_value, mock_array_version): self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT lun_info = self.driver.create_volume(self.volume) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(lun_info['provider_location'])) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_create_smartqos_on_v3r3_active_failed(self, pool_data): self.driver.support_func = pool_data self.mock_object(huawei_utils, 'get_volume_params', return_value={'qos': {'MAXIOPS': '100', 'IOType': '2'} } ) self.mock_object(self.driver.client, 'activate_deactivate_qos', side_effect=exception.VolumeBackendAPIException( data='Activate or deactivate QoS error.') ) self.mock_object(smartx.SmartQos, 'remove') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(huawei_utils, 'get_volume_params', return_value={'qos': {'MAXIOPS': '100', 'IOType': '2'} }) @mock.patch.object(rest_client.RestClient, 'find_array_version', return_value='V300R003C00') @mock.patch.object(rest_client.RestClient, 'find_available_qos', return_value=(None, [])) @mock.patch.object(rest_client.RestClient, 'create_qos') def test_create_smartqos_on_v3r3_qos_failed(self, pool_data, mock_create_qos, mock_find_available_qos, mock_qos_value, mock_array_version): self.driver.support_func = pool_data mock_create_qos.side_effect = ( exception.VolumeBackendAPIException(data='Create QoS policy ' 'error.')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(rest_client.RestClient, 'get_qos_info', return_value={"LUNLIST": u'["1", "2", "3"]', "RUNNINGSTATUS": "2"}) def test_delete_smartqos_with_lun_left(self, mock_qos_info, pool_data): self.driver.support_func = pool_data self.driver.delete_volume(self.volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(rest_client.RestClient, 'get_qos_info', return_value={"LUNLIST": u'["1"]', "RUNNINGSTATUS": "2"}) def test_delete_smartqos_with_no_lun_left(self, mock_qos_info, pool_data): self.driver.support_func = pool_data self.driver.delete_volume(self.volume) @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') @mock.patch.object(huawei_utils, 'get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', 'thin_provisioning_support': 'true', 'thick_provisioning_support': 'false', 'policy': '2', 'cachename': 'cache-test', 'partitionname': 'partition-test'}) def test_create_smartx(self, mock_volume_types, mock_add_lun_to_partition): lun_info = self.driver.create_volume(self.volume) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(lun_info['provider_location'])) @ddt.data({'capabilities:smartcache': 'true', 'cachename': 'fake_name'}, {'capabilities:smartcache': ' true', 'cachename': None}, {'capabilities:smartcache': ' true', 'cachename': ''}, ) def test_create_smartCache_failed(self, extra_specs): self.volume.volume_type = objects.VolumeType(extra_specs=extra_specs) self.assertRaises(exception.InvalidInput, self.driver.create_volume, self.volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(huawei_utils, 'get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', 'thin_provisioning_support': 'true', 'thick_provisioning_support': 'false', 'policy': '2', 'cachename': 'cache-test', 'partitionname': 'partition-test'}) def test_create_smartCache_failed_with_no_cacheid(self, mock_volume_type, pool_data): self.driver.client.cache_not_exist = True self.driver.support_func = pool_data self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(huawei_utils, 'get_volume_params', return_value={'smarttier': 'true', 'smartcache': 'true', 'smartpartition': 'true', 'thin_provisioning_support': 'true', 'thick_provisioning_support': 'false', 'policy': '2', 'cachename': 'cache-test', 'partitionname': 'partition-test'}) def test_create_smartPartition_failed_with_no_partid(self, mock_volume_type, pool_data): self.driver.client.partition_not_exist = True self.driver.support_func = pool_data self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.volume) def test_find_available_qos(self): qos = {'MAXIOPS': '100', 'IOType': '2'} fake_qos_info_response_equal = { "error": { "code": 0 }, "data": [{ "ID": "11", "MAXIOPS": "100", "LATENCY": "0", "IOType": "2", "FSLIST": u'[""]', 'RUNNINGSTATUS': "2", "NAME": "OpenStack_57_20151225102851", "LUNLIST": u'["1", "2", "3", "4", "5", "6", "7", "8", "9",\ "10", ,"11", "12", "13", "14", "15", "16", "17", "18", "19",\ "20", ,"21", "22", "23", "24", "25", "26", "27", "28", "29",\ "30", ,"31", "32", "33", "34", "35", "36", "37", "38", "39",\ "40", ,"41", "42", "43", "44", "45", "46", "47", "48", "49",\ "50", ,"51", "52", "53", "54", "55", "56", "57", "58", "59",\ "60", ,"61", "62", "63", "64"]' }] } # Number of LUNs in QoS is equal to 64 with mock.patch.object(rest_client.RestClient, 'get_qos', return_value=fake_qos_info_response_equal): (qos_id, lun_list) = self.driver.client.find_available_qos(qos) self.assertEqual((None, []), (qos_id, lun_list)) # Number of LUNs in QoS is less than 64 fake_qos_info_response_less = { "error": { "code": 0 }, "data": [{ "ID": "11", "MAXIOPS": "100", "LATENCY": "0", "IOType": "2", "FSLIST": u'[""]', 'RUNNINGSTATUS': "2", "NAME": "OpenStack_57_20151225102851", "LUNLIST": u'["0", "1", "2"]' }] } with mock.patch.object(rest_client.RestClient, 'get_qos', return_value=fake_qos_info_response_less): (qos_id, lun_list) = self.driver.client.find_available_qos(qos) self.assertEqual(("11", u'["0", "1", "2"]'), (qos_id, lun_list)) @mock.patch.object(huawei_utils, 'get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value=FAKE_FIND_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', return_value='11') @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', return_value=True) def test_create_hypermetro_success(self, mock_volume_ready, mock_hyper_domain, mock_pool_info, mock_all_pool_info, mock_login_return): location = {"huawei_lun_id": "1", "hypermetro_id": "11", "remote_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} lun_info = self.driver.create_volume(self.hyper_volume) self.assertDictEqual(location, json.loads(lun_info['provider_location'])) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(huawei_utils, 'get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value=FAKE_FIND_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', return_value='11') @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', return_value=True) @mock.patch.object(hypermetro.HuaweiHyperMetro, '_create_hypermetro_pair') @mock.patch.object(rest_client.RestClient, 'delete_lun') def test_create_hypermetro_fail(self, pool_data, mock_delete_lun, mock_hyper_pair_info, mock_volume_ready, mock_hyper_domain, mock_pool_info, mock_all_pool_info, mock_hypermetro_opts ): self.driver.client.login() self.driver.support_func = pool_data mock_hyper_pair_info.side_effect = exception.VolumeBackendAPIException( data='Create hypermetro error.') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.hyper_volume) mock_delete_lun.assert_called_with('1') @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value={}) def test_create_hypermetro_remote_pool_none_fail(self, mock_pool_info, mock_all_pool_info): param = {'TYPE': '11', 'PARENTID': ''} self.driver.client.login() self.assertRaises(exception.VolumeBackendAPIException, self.driver.metro.create_hypermetro, '2', param) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value=FAKE_FIND_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'create_lun', return_value={'CAPACITY': '2097152', 'DESCRIPTION': '2f0635', 'HEALTHSTATUS': '1', 'ALLOCTYPE': '1', 'WWN': '6643e8c1004c5f6723e9f454003', 'ID': '1', 'RUNNINGSTATUS': '27', 'NAME': '5mFHcBv4RkCcD'}) @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', return_value='11') @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', return_value=True) def test_create_hypermetro_remote_pool_parentid(self, mock_volume_ready, mock_hyper_domain, mock_create_lun, mock_pool_info, mock_all_pool_info): param = {'TYPE': '11', 'PARENTID': ''} self.driver.metro.create_hypermetro('2', param) lun_PARENTID = mock_create_lun.call_args[0][0]['PARENTID'] self.assertEqual(FAKE_FIND_POOL_RESPONSE['ID'], lun_PARENTID) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(rest_client.RestClient, 'check_lun_exist', return_value=True) @mock.patch.object(rest_client.RestClient, 'check_hypermetro_exist', return_value=True) @mock.patch.object(rest_client.RestClient, 'delete_hypermetro', return_value=FAKE_COMMON_SUCCESS_RESPONSE) @mock.patch.object(rest_client.RestClient, 'delete_lun', return_value=None) def test_delete_hypermetro_success(self, mock_delete_lun, mock_delete_hypermetro, mock_check_hyermetro, mock_lun_exit, pool_data): self.driver.support_func = pool_data self.driver.delete_volume(self.hyper_volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(rest_client.RestClient, 'check_lun_exist', return_value=True) @mock.patch.object(rest_client.RestClient, 'check_hypermetro_exist', return_value=True) @mock.patch.object(rest_client.RestClient, 'get_hypermetro_by_id', return_value=FAKE_METRO_INFO_RESPONSE) @mock.patch.object(rest_client.RestClient, 'delete_hypermetro') @mock.patch.object(rest_client.RestClient, 'delete_lun', return_value=None) def test_delete_hypermetro_fail(self, pool_data, mock_delete_lun, mock_delete_hypermetro, mock_metro_info, mock_check_hyermetro, mock_lun_exit): self.driver.support_func = pool_data mock_delete_hypermetro.side_effect = ( exception.VolumeBackendAPIException(data='Delete hypermetro ' 'error.')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.hyper_volume) mock_delete_lun.assert_called_with('11') def test_manage_existing_get_size_invalid_reference(self): # Can't find LUN by source-name. external_ref = {'source-name': 'LUN1'} with mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value=None): ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self.volume, external_ref) self.assertIsNotNone(re.search('please check the source-name ' 'or source-id', ex.msg)) # Can't find LUN by source-id. external_ref = {'source-id': 'ID1'} with mock.patch.object(rest_client.RestClient, 'get_lun_info') as m_gt: m_gt.side_effect = exception.VolumeBackendAPIException( data='Error') self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, self.volume, external_ref) self.assertIsNotNone(re.search('please check the source-name ' 'or source-id', ex.msg)) @ddt.data({'source-id': 'ID1'}, {'source-name': 'LUN1'}, {'source-name': 'LUN1', 'source-id': 'ID1'}) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 3097152}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_get_size_success(self, mock_get_lun_id_by_name, mock_get_lun_info, external_ref): size = self.driver.manage_existing_get_size(self.volume, external_ref) self.assertEqual(2, size) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'OpenStack_Pool'}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_pool_mismatch(self, mock_get_by_name, mock_get_info): # LUN does not belong to the specified pool. with mock.patch.object(common.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value={'PARENTNAME': 'StoragePool'}): external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume, external_ref) self.assertIsNotNone(re.search('The specified LUN does not belong' ' to the given pool', ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'OpenStack_Pool'}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_lun_abnormal(self, mock_get_by_name, mock_get_info): # Status is not normal. ret = {'PARENTNAME': "OpenStack_Pool", 'HEALTHSTATUS': '2'} with mock.patch.object(common.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value=ret): external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume, external_ref) self.assertIsNotNone(re.search('LUN status is not normal', ex.msg)) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(rest_client.RestClient, 'get_hypermetro_pairs', return_value=[{'LOCALOBJID': 'ID1'}]) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'OpenStack_Pool', 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_with_hypermetro(self, mock_get_by_name, mock_get_info, mock_get_hyper_pairs, pool_data): self.driver.support_func = pool_data # Exists in a HyperMetroPair. with mock.patch.object(rest_client.RestClient, 'get_hypermetro_pairs', return_value=[{'LOCALOBJID': 'ID1'}]): external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume, external_ref) self.assertIsNotNone(re.search('HyperMetroPair', ex.msg)) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(rest_client.RestClient, 'get_hypermetro_pairs') @mock.patch.object(rest_client.RestClient, 'rename_lun') @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'OpenStack_Pool', 'HEALTHSTATUS': constants.STATUS_HEALTH, 'WWN': '6643e8c1004c5f6723e9f454003'}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_with_lower_version(self, pool_data, mock_get_by_name, mock_get_info, mock_rename, mock_get_hyper_pairs): self.driver.support_func = pool_data mock_get_hyper_pairs.side_effect = ( exception.VolumeBackendAPIException(data='err')) external_ref = {'source-name': 'LUN1'} model_update = self.driver.manage_existing(self.volume, external_ref) location = {"huawei_lun_wwn": "6643e8c1004c5f6723e9f454003", "huawei_lun_id": "ID1"} self.assertDictEqual(location, json.loads(model_update['provider_location'])) @ddt.data([[{'PRILUNID': 'ID1'}], []], [[{'PRILUNID': 'ID2'}], ['ID1', 'ID2']]) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'OpenStack_Pool', 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_with_splitmirror(self, ddt_data, mock_get_by_name, mock_get_info): self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT # Exists in a SplitMirror. with mock.patch.object(rest_client.RestClient, 'get_split_mirrors', return_value=ddt_data[0]), \ mock.patch.object(rest_client.RestClient, 'get_target_luns', return_value=ddt_data[1]): external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume, external_ref) self.assertIsNotNone(re.search('SplitMirror', ex.msg)) @ddt.data([[{'PARENTID': 'ID1'}], FAKE_POOLS_UNSUPPORT_REPORT], [[{'TARGETLUNID': 'ID1'}], FAKE_POOLS_UNSUPPORT_REPORT], [[{'PARENTID': 'ID1'}], FAKE_POOLS_SUPPORT_REPORT], [[{'TARGETLUNID': 'ID1'}], FAKE_POOLS_SUPPORT_REPORT]) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'OpenStack_Pool', 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') @ddt.unpack def test_manage_existing_under_migration(self, ddt_data, pool_data, mock_get_by_name, mock_get_info): self.driver.support_func = pool_data # Exists in a migration task. with mock.patch.object(rest_client.RestClient, 'get_migration_task', return_value=ddt_data): external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume, external_ref) self.assertIsNotNone(re.search('migration', ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ID': 'ID1', 'PARENTNAME': 'OpenStack_Pool', 'SNAPSHOTIDS': [], 'ISADD2LUNGROUP': 'true', 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') def test_manage_existing_with_lungroup(self, mock_get_by_name, mock_get_info): # Already in LUN group. external_ref = {'source-name': 'LUN1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.volume, external_ref) self.assertIsNotNone(re.search('Already exists in a LUN group', ex.msg)) @ddt.data([{'source-name': 'LUN1'}, FAKE_POOLS_UNSUPPORT_REPORT], [{'source-name': 'LUN1'}, FAKE_POOLS_SUPPORT_REPORT], [{'source-id': 'ID1'}, FAKE_POOLS_UNSUPPORT_REPORT], [{'source-id': 'ID1'}, FAKE_POOLS_SUPPORT_REPORT]) @mock.patch.object(rest_client.RestClient, 'rename_lun') @mock.patch.object(common.HuaweiBaseDriver, '_get_lun_info_by_ref', return_value={'PARENTNAME': 'OpenStack_Pool', 'SNAPSHOTIDS': [], 'ID': 'ID1', 'HEALTHSTATUS': constants.STATUS_HEALTH, 'WWN': '6643e8c1004c5f6723e9f454003'}) @mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value={'CAPACITY': 2097152, 'ALLOCTYPE': 1}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value='ID1') @ddt.unpack def test_manage_existing_success(self, mock_get_by_name, mock_get_info, mock_check_lun, mock_rename, external_ref, pool_data): self.driver.support_func = pool_data model_update = self.driver.manage_existing(self.volume, external_ref) expected = {"huawei_lun_wwn": "6643e8c1004c5f6723e9f454003", "huawei_lun_id": "ID1"} self.assertDictEqual(expected, json.loads(model_update['provider_location'])) def test_unmanage(self): self.driver.unmanage(self.volume) def test_manage_existing_snapshot_abnormal(self): with mock.patch.object(common.HuaweiBaseDriver, '_get_snapshot_info_by_ref', return_value={'HEALTHSTATUS': '2', 'PARENTID': '11'}): external_ref = {'source-name': 'test1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, self.snapshot, external_ref) self.assertIsNotNone(re.search('Snapshot status is not normal', ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', return_value={'ID': 'ID1', 'EXPOSEDTOINITIATOR': 'true', 'NAME': 'test1', 'PARENTID': '11', 'USERCAPACITY': 2097152, 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value='ID1') def test_manage_existing_snapshot_with_lungroup(self, mock_get_by_name, mock_get_info): # Already in LUN group. external_ref = {'source-name': 'test1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, self.snapshot, external_ref) self.assertIsNotNone(re.search('Snapshot is exposed to initiator', ex.msg)) @mock.patch.object(rest_client.RestClient, 'rename_snapshot') @mock.patch.object(common.HuaweiBaseDriver, '_get_snapshot_info_by_ref', return_value={'ID': 'ID1', 'EXPOSEDTOINITIATOR': 'false', 'NAME': 'test1', 'PARENTID': '11', 'USERCAPACITY': 2097152, 'HEALTHSTATUS': constants.STATUS_HEALTH}) def test_manage_existing_snapshot_success(self, mock_get_info, mock_rename): external_ref = {'source-name': 'test1'} model_update = self.driver.manage_existing_snapshot(self.snapshot, external_ref) expect_value = {'provider_location': '{"huawei_snapshot_id": "ID1"}'} self.assertEqual(expect_value, model_update) external_ref = {'source-id': 'ID1'} model_update = self.driver.manage_existing_snapshot(self.snapshot, external_ref) expect_value = {'provider_location': '{"huawei_snapshot_id": "ID1"}'} self.assertEqual(expect_value, model_update) @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', return_value={'ID': 'ID1', 'EXPOSEDTOINITIATOR': 'false', 'NAME': 'test1', 'USERCAPACITY': 2097152, 'PARENTID': '12', 'HEALTHSTATUS': constants.STATUS_HEALTH}) @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value='ID1') def test_manage_existing_snapshot_mismatch_lun(self, mock_get_by_name, mock_get_info): external_ref = {'source-name': 'test1'} ex = self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, self.snapshot, external_ref) self.assertIsNotNone(re.search("Snapshot doesn't belong to volume", ex.msg)) @mock.patch.object(rest_client.RestClient, 'get_snapshot_info', return_value={'USERCAPACITY': 3097152}) @mock.patch.object(rest_client.RestClient, 'get_snapshot_id_by_name', return_value='ID1') def test_manage_existing_snapshot_get_size_success(self, mock_get_id_by_name, mock_get_info): external_ref = {'source-name': 'test1', 'source-id': 'ID1'} size = self.driver.manage_existing_snapshot_get_size(self.snapshot, external_ref) self.assertEqual(2, size) external_ref = {'source-name': 'test1'} size = self.driver.manage_existing_snapshot_get_size(self.snapshot, external_ref) self.assertEqual(2, size) external_ref = {'source-id': 'ID1'} size = self.driver.manage_existing_snapshot_get_size(self.snapshot, external_ref) self.assertEqual(2, size) def test_unmanage_snapshot(self): self.driver.unmanage_snapshot(self.snapshot) @ddt.data(sync_replica_specs, async_replica_specs) def test_create_replication_success(self, mock_type): self.mock_object(replication.ReplicaCommonDriver, 'sync') self.replica_volume.volume_type = objects.VolumeType( extra_specs=mock_type, qos_specs_id=None) model_update = self.driver.create_volume(self.replica_volume) driver_data = {'pair_id': TEST_PAIR_ID, 'rmt_lun_id': '1', 'rmt_lun_wwn': '6643e8c1004c5f6723e9f454003'} self.assertDictEqual( driver_data, json.loads(model_update['replication_driver_data'])) self.assertEqual('available', model_update['replication_status']) @ddt.data( [ rest_client.RestClient, 'get_array_info', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')), FAKE_POOLS_UNSUPPORT_REPORT ], [ rest_client.RestClient, 'get_remote_devices', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')), FAKE_POOLS_UNSUPPORT_REPORT ], [ rest_client.RestClient, 'get_remote_devices', mock.Mock(return_value={}), FAKE_POOLS_UNSUPPORT_REPORT ], [ replication.ReplicaPairManager, 'wait_volume_online', mock.Mock(side_effect=[ None, exception.VolumeBackendAPIException(data='err')]), FAKE_POOLS_UNSUPPORT_REPORT ], [ rest_client.RestClient, 'create_pair', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')), FAKE_POOLS_UNSUPPORT_REPORT ], [ replication.ReplicaCommonDriver, 'sync', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')), FAKE_POOLS_UNSUPPORT_REPORT ], [ rest_client.RestClient, 'get_array_info', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')), FAKE_POOLS_SUPPORT_REPORT ], [ rest_client.RestClient, 'get_remote_devices', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')), FAKE_POOLS_SUPPORT_REPORT ], [ rest_client.RestClient, 'get_remote_devices', mock.Mock(return_value={}), FAKE_POOLS_SUPPORT_REPORT ], [ replication.ReplicaPairManager, 'wait_volume_online', mock.Mock(side_effect=[ None, exception.VolumeBackendAPIException(data='err')]), FAKE_POOLS_SUPPORT_REPORT ], [ rest_client.RestClient, 'create_pair', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')), FAKE_POOLS_SUPPORT_REPORT ], [ replication.ReplicaCommonDriver, 'sync', mock.Mock( side_effect=exception.VolumeBackendAPIException(data='err')), FAKE_POOLS_SUPPORT_REPORT ], ) @ddt.unpack def test_create_replication_fail(self, mock_module, mock_func, mock_value, pool_data): self.driver.support_func = pool_data self.replica_volume.volume_type = objects.VolumeType( extra_specs=sync_replica_specs, qos_specs_id=None) self.mock_object(replication.ReplicaPairManager, '_delete_pair') self.mock_object(mock_module, mock_func, mock_value) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, self.replica_volume) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_delete_replication_success(self, pool_data): self.driver.support_func = pool_data self.mock_object(replication.ReplicaCommonDriver, 'split') self.mock_object( common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': sync_replica_specs}) self.driver.delete_volume(self.replica_volume) self.mock_object(rest_client.RestClient, 'check_lun_exist', return_value=False) self.driver.delete_volume(self.replica_volume) def test_wait_volume_online(self): replica = FakeReplicaPairManager(self.driver.client, self.driver.replica_client, self.configuration) lun_info = {'ID': '11'} replica.wait_volume_online(self.driver.client, lun_info) offline_status = {'RUNNINGSTATUS': '28'} replica.wait_volume_online(self.driver.client, lun_info) with mock.patch.object(rest_client.RestClient, 'get_lun_info', return_value=offline_status): self.assertRaises(exception.VolumeDriverException, replica.wait_volume_online, self.driver.client, lun_info) def test_wait_second_access(self): pair_id = '1' access_ro = constants.REPLICA_SECOND_RO access_rw = constants.REPLICA_SECOND_RW op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) self.mock_object(replication.PairOp, 'get_replica_info', return_value={'SECRESACCESS': access_ro}) common_driver.wait_second_access(pair_id, access_ro) self.assertRaises(exception.VolumeDriverException, common_driver.wait_second_access, pair_id, access_rw) def test_wait_replica_ready(self): normal_status = { 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_NORMAL, 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL } split_status = { 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_SPLIT, 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL } sync_status = { 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_SYNC, 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL } pair_id = '1' op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) with mock.patch.object(replication.PairOp, 'get_replica_info', return_value=normal_status): common_driver.wait_replica_ready(pair_id) with mock.patch.object( replication.PairOp, 'get_replica_info', side_effect=[sync_status, normal_status]): common_driver.wait_replica_ready(pair_id) with mock.patch.object(replication.PairOp, 'get_replica_info', return_value=split_status): self.assertRaises(exception.VolumeBackendAPIException, common_driver.wait_replica_ready, pair_id) def test_failover_to_current(self): driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update, __ = driver.failover_host( None, [self.volume], 'default', []) self.assertIn(driver.active_backend_id, ('', None)) self.assertEqual(old_client, driver.client) self.assertEqual(old_replica_client, driver.replica_client) self.assertEqual(old_replica, driver.replica) self.assertEqual('default', secondary_id) self.assertEqual(0, len(volumes_update)) def test_failover_normal_volumes(self): driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update, __ = driver.failover_host( None, [self.volume], REPLICA_BACKEND_ID, []) self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id) self.assertEqual(old_client, driver.replica_client) self.assertEqual(old_replica_client, driver.client) self.assertNotEqual(old_replica, driver.replica) self.assertEqual(REPLICA_BACKEND_ID, secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(self.volume.id, v_id) self.assertEqual('error', v_update['status']) self.assertEqual(self.volume['status'], v_update['metadata']['old_status']) def test_failback_to_current(self): driver = FakeISCSIStorage(configuration=self.configuration) driver.active_backend_id = REPLICA_BACKEND_ID driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update, __ = driver.failover_host( None, [self.volume], REPLICA_BACKEND_ID, []) self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id) self.assertEqual(old_client, driver.client) self.assertEqual(old_replica_client, driver.replica_client) self.assertEqual(old_replica, driver.replica) self.assertEqual(REPLICA_BACKEND_ID, secondary_id) self.assertEqual(0, len(volumes_update)) def test_failback_normal_volumes(self): self.volume.status = 'error' self.volume.metadata = {'old_status': 'available'} driver = FakeISCSIStorage(configuration=self.configuration) driver.active_backend_id = REPLICA_BACKEND_ID driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update, __ = driver.failover_host( None, [self.volume], 'default', []) self.assertIn(driver.active_backend_id, ('', None)) self.assertEqual(old_client, driver.replica_client) self.assertEqual(old_replica_client, driver.client) self.assertNotEqual(old_replica, driver.replica) self.assertEqual('default', secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(self.volume.id, v_id) self.assertEqual('available', v_update['status']) self.assertNotIn('old_status', v_update['metadata']) def test_failover_replica_volumes(self): driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica self.mock_object(replication.ReplicaCommonDriver, 'failover') self.mock_object(huawei_utils, 'get_volume_params', return_value={'replication_enabled': True}) secondary_id, volumes_update, __ = driver.failover_host( None, [self.replica_volume], REPLICA_BACKEND_ID, []) self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id) self.assertEqual(old_client, driver.replica_client) self.assertEqual(old_replica_client, driver.client) self.assertNotEqual(old_replica, driver.replica) self.assertEqual(REPLICA_BACKEND_ID, secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(self.replica_volume.id, v_id) expect_location = {"huawei_lun_wwn": "FAKE_RMT_LUN_WWN", "huawei_lun_id": "1"} self.assertDictEqual( expect_location, json.loads(v_update['provider_location'])) self.assertEqual('failed-over', v_update['replication_status']) metadata = huawei_utils.get_volume_metadata(self.replica_volume) new_drv_data = {'pair_id': TEST_PAIR_ID, 'rmt_lun_id': metadata['huawei_lun_id'], 'rmt_lun_wwn': metadata['huawei_lun_wwn']} self.assertDictEqual( new_drv_data, json.loads(v_update['replication_driver_data'])) @ddt.data({}, {'pair_id': TEST_PAIR_ID}) def test_failover_replica_volumes_invalid_drv_data(self, mock_drv_data): volume = self.replica_volume volume['replication_driver_data'] = replication.to_string( mock_drv_data) driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica self.mock_object(huawei_utils, 'get_volume_params', return_value={'replication_enabled': True}) secondary_id, volumes_update, __ = driver.failover_host( None, [volume], REPLICA_BACKEND_ID, []) self.assertEqual(driver.active_backend_id, REPLICA_BACKEND_ID) self.assertEqual(old_client, driver.replica_client) self.assertEqual(old_replica_client, driver.client) self.assertNotEqual(old_replica, driver.replica) self.assertEqual(REPLICA_BACKEND_ID, secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(volume.id, v_id) self.assertEqual('error', v_update['replication_status']) def test_failback_replica_volumes(self): self.mock_object(replication.ReplicaCommonDriver, 'enable') self.mock_object(replication.ReplicaCommonDriver, 'wait_replica_ready') self.mock_object(replication.ReplicaCommonDriver, 'failover') self.mock_object(huawei_utils, 'get_volume_params', return_value={'replication_enabled': True}) volume = self.replica_volume driver = FakeISCSIStorage(configuration=self.configuration) driver.active_backend_id = REPLICA_BACKEND_ID driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update, __ = driver.failover_host( None, [volume], 'default', []) self.assertIn(driver.active_backend_id, ('', None)) self.assertEqual(old_client, driver.replica_client) self.assertEqual(old_replica_client, driver.client) self.assertNotEqual(old_replica, driver.replica) self.assertEqual('default', secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(self.replica_volume.id, v_id) expect_location = {"huawei_lun_wwn": "FAKE_RMT_LUN_WWN", "huawei_lun_id": "1"} self.assertDictEqual( expect_location, json.loads(v_update['provider_location'])) self.assertEqual('available', v_update['replication_status']) metadata = huawei_utils.get_volume_metadata(self.replica_volume) new_drv_data = {'pair_id': TEST_PAIR_ID, 'rmt_lun_id': metadata['huawei_lun_id'], 'rmt_lun_wwn': metadata['huawei_lun_wwn']} self.assertDictEqual( new_drv_data, json.loads(v_update['replication_driver_data'])) @ddt.data({}, {'pair_id': TEST_PAIR_ID}) def test_failback_replica_volumes_invalid_drv_data(self, mock_drv_data): self.mock_object(huawei_utils, 'get_volume_params', return_value={'replication_enabled': True}) volume = self.replica_volume volume['replication_driver_data'] = replication.to_string( mock_drv_data) driver = FakeISCSIStorage(configuration=self.configuration) driver.active_backend_id = REPLICA_BACKEND_ID driver.do_setup() old_client = driver.client old_replica_client = driver.replica_client old_replica = driver.replica secondary_id, volumes_update, __ = driver.failover_host( None, [volume], 'default', []) self.assertIn(driver.active_backend_id, ('', None)) self.assertEqual(old_client, driver.replica_client) self.assertEqual(old_replica_client, driver.client) self.assertNotEqual(old_replica, driver.replica) self.assertEqual('default', secondary_id) self.assertEqual(1, len(volumes_update)) v_id = volumes_update[0]['volume_id'] v_update = volumes_update[0]['updates'] self.assertEqual(self.replica_volume.id, v_id) self.assertEqual('error', v_update['replication_status']) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) @mock.patch.object(replication.PairOp, 'is_primary', side_effect=[False, True]) @mock.patch.object(replication.ReplicaCommonDriver, 'split') @mock.patch.object(replication.ReplicaCommonDriver, 'unprotect_second') def test_replication_driver_enable_success(self, mock_unprotect, mock_split, mock_is_primary): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) common_driver.enable(replica_id) self.assertTrue(mock_unprotect.called) self.assertTrue(mock_split.called) self.assertTrue(mock_is_primary.called) @mock.patch.object(replication.PairOp, 'is_primary', return_value=False) @mock.patch.object(replication.ReplicaCommonDriver, 'split') def test_replication_driver_failover_success(self, mock_split, mock_is_primary): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) common_driver.failover(replica_id) self.assertTrue(mock_split.called) self.assertTrue(mock_is_primary.called) @mock.patch.object(replication.PairOp, 'is_primary', return_value=True) def test_replication_driver_failover_fail(self, mock_is_primary): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) self.assertRaises( exception.VolumeBackendAPIException, common_driver.failover, replica_id) @ddt.data(constants.REPLICA_SECOND_RW, constants.REPLICA_SECOND_RO) def test_replication_driver_protect_second(self, mock_access): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) self.mock_object(replication.ReplicaCommonDriver, 'wait_second_access') self.mock_object( replication.PairOp, 'get_replica_info', return_value={'SECRESACCESS': mock_access}) common_driver.protect_second(replica_id) common_driver.unprotect_second(replica_id) def test_replication_driver_sync(self): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) async_normal_status = { 'REPLICATIONMODEL': constants.REPLICA_ASYNC_MODEL, 'RUNNINGSTATUS': constants.REPLICA_RUNNING_STATUS_NORMAL, 'HEALTHSTATUS': constants.REPLICA_HEALTH_STATUS_NORMAL } self.mock_object(replication.ReplicaCommonDriver, 'protect_second') self.mock_object(replication.PairOp, 'get_replica_info', return_value=async_normal_status) common_driver.sync(replica_id, True) common_driver.sync(replica_id, False) def test_replication_driver_split(self): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) self.mock_object(replication.ReplicaCommonDriver, 'wait_expect_state') self.mock_object( replication.PairOp, 'split', side_effect=exception.VolumeBackendAPIException(data='err')) common_driver.split(replica_id) @mock.patch.object(replication.PairOp, 'split') @ddt.data(constants.REPLICA_RUNNING_STATUS_SPLIT, constants.REPLICA_RUNNING_STATUS_INVALID, constants.REPLICA_RUNNING_STATUS_ERRUPTED) def test_replication_driver_split_already_disabled(self, mock_status, mock_op_split): replica_id = TEST_PAIR_ID op = replication.PairOp(self.driver.client) common_driver = replication.ReplicaCommonDriver(self.configuration, op) pair_info = json.loads(FAKE_GET_PAIR_NORMAL_RESPONSE)['data'] pair_info['RUNNINGSTATUS'] = mock_status self.mock_object(rest_client.RestClient, 'get_pair_by_id', return_value=pair_info) common_driver.split(replica_id) self.assertFalse(mock_op_split.called) def test_replication_base_op(self): replica_id = '1' op = replication.AbsReplicaOp(None) op.create() op.delete(replica_id) op.protect_second(replica_id) op.unprotect_second(replica_id) op.sync(replica_id) op.split(replica_id) op.switch(replica_id) op.is_primary({}) op.get_replica_info(replica_id) op._is_status(None, {'key': 'volue'}, None) @mock.patch.object(rest_client.RestClient, 'call', return_value={"error": {"code": 0}}) def test_get_tgt_port_group_no_portg_exist(self, mock_call): portg = self.driver.client.get_tgt_port_group('test_portg') self.assertIsNone(portg) def test_get_tgt_iqn_from_rest_match(self): match_res = { 'data': [{ 'TYPE': 249, 'ID': '0+iqn.2006-08.com: 210048cee9d: 111.111.111.19,t,0x01' }, { 'TYPE': 249, 'ID': '0+iqn.2006-08.com: 210048cee9d: 111.111.111.191,t,0x01' }], 'error': { 'code': 0 } } ip = '111.111.111.19' expected_iqn = 'iqn.2006-08.com: 210048cee9d: 111.111.111.19' self.mock_object(rest_client.RestClient, 'call', return_value=match_res) iqn = self.driver.client._get_tgt_iqn_from_rest(ip) self.assertEqual(expected_iqn, iqn) def test_get_tgt_iqn_from_rest_mismatch(self): match_res = { 'data': [{ 'TYPE': 249, 'ID': '0+iqn.2006-08.com: 210048cee9d: 192.0.2.191,t,0x01' }, { 'TYPE': 249, 'ID': '0+iqn.2006-08.com: 210048cee9d: 192.0.2.192,t,0x01' }], 'error': { 'code': 0 } } ip = '192.0.2.19' self.mock_object(rest_client.RestClient, 'call', return_value=match_res) iqn = self.driver.client._get_tgt_iqn_from_rest(ip) self.assertIsNone(iqn) def test_create_group_snapshot(self): test_snapshots = [self.snapshot] ctxt = context.get_admin_context() self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) model, snapshots = self.driver.create_group_snapshot( ctxt, self.group_snapshot, test_snapshots) self.assertEqual('21ec7341-9256-497b-97d9-ef48edcf0635', snapshots[0]['id']) self.assertEqual('available', snapshots[0]['status']) self.assertDictEqual( {'huawei_snapshot_id': '11', 'huawei_snapshot_wwn': 'fake-wwn'}, json.loads(snapshots[0]['provider_location'])) self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, model['status']) def test_create_group_snapshot_with_create_snapshot_fail(self): test_snapshots = [self.snapshot] ctxt = context.get_admin_context() self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) self.mock_object( rest_client.RestClient, 'create_snapshot', side_effect=exception.VolumeBackendAPIException(data='err')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_group_snapshot, ctxt, self.group_snapshot, test_snapshots) def test_create_group_snapshot_with_active_snapshot_fail(self): test_snapshots = [self.snapshot] ctxt = context.get_admin_context() self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) self.mock_object( rest_client.RestClient, 'activate_snapshot', side_effect=exception.VolumeBackendAPIException(data='err')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_group_snapshot, ctxt, self.group_snapshot, test_snapshots) def test_delete_group_snapshot(self): test_snapshots = [self.snapshot] ctxt = context.get_admin_context() self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) self.driver.delete_group_snapshot( ctxt, self.group_snapshot, test_snapshots) class FCSanLookupService(object): def get_device_mapping_from_network(self, initiator_list, target_list): return fake_fabric_mapping @ddt.ddt class HuaweiFCDriverTestCase(HuaweiTestBase): def setUp(self): super(HuaweiFCDriverTestCase, self).setUp() self.configuration = mock.Mock(spec=conf.Configuration) self.flags(transport_url='fake:/') self.huawei_conf = FakeHuaweiConf(self.configuration, 'FC') self.configuration.hypermetro_devices = hypermetro_devices driver = FakeFCStorage(configuration=self.configuration) self.driver = driver self.driver.do_setup() self.driver.client.login() def test_login_success(self): device_id = self.driver.client.login() self.assertEqual('210235G7J20000000000', device_id) def test_create_volume_success(self): lun_info = self.driver.create_volume(self.volume) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(lun_info['provider_location'])) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_delete_volume_success(self, pool_data): self.driver.support_func = pool_data self.driver.delete_volume(self.volume) def test_delete_snapshot_success(self): self.driver.delete_snapshot(self.snapshot) @ddt.data(True, False) def test_create_volume_from_snapsuccess(self, is_dorado_v6): self.configuration.lun_copy_speed = 2 self.driver.is_dorado_v6 = is_dorado_v6 self.mock_object(self.driver.client, 'get_snapshot_info_by_name', return_value= {'ID': ID, 'RUNNINGSTATUS': constants.STATUS_ACTIVE}) lun_info = self.driver.create_volume_from_snapshot(self.volume, self.snapshot) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(lun_info['provider_location'])) @mock.patch.object(huawei_driver.HuaweiFCDriver, 'initialize_connection', return_value={"data": {'target_lun': 1}}) def test_initialize_connection_snapshot_success(self, mock_fc_init): iscsi_properties = self.driver.initialize_connection_snapshot( self.snapshot, FakeConnector) volume = Volume(id=self.snapshot.id, provider_location=self.snapshot.provider_location, lun_type='27', metadata=None) self.assertEqual(1, iscsi_properties['data']['target_lun']) mock_fc_init.assert_called_with(volume, FakeConnector) def test_initialize_connection_success(self): do_mapping_mocker = self.mock_object( self.driver.client, 'do_mapping', wraps=self.driver.client.do_mapping) iscsi_properties = self.driver.initialize_connection(self.volume, FakeConnector) self.assertEqual(1, iscsi_properties['data']['target_lun']) do_mapping_mocker.assert_called_once_with( '11', '0', '1', None, '11', False) def test_initialize_connection_fail_no_online_wwns_in_host(self): self.mock_object(rest_client.RestClient, 'get_online_free_wwns', return_value=[]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.volume, FakeConnector) def test_initialize_connection_no_local_ini_tgt_map(self): self.mock_object(rest_client.RestClient, 'get_init_targ_map', return_value=('', '')) self.mock_object(huawei_driver.HuaweiFCDriver, '_get_same_hostid', return_value='') self.mock_object(rest_client.RestClient, 'change_hostlun_id', return_value=None) self.mock_object(rest_client.RestClient, 'do_mapping', return_value={'lun_id': '1', 'view_id': '1', 'aval_luns': '[1]'}) self.driver.initialize_connection(self.hyper_volume, FakeConnector) def test_hypermetro_connection_success(self): self.mock_object(rest_client.RestClient, 'find_array_version', return_value='V300R003C00') fc_properties = self.driver.initialize_connection(self.hyper_volume, FakeConnector) self.assertEqual(1, fc_properties['data']['target_lun']) @mock.patch.object(huawei_driver.HuaweiFCDriver, 'terminate_connection') def test_terminate_connection_snapshot_success(self, mock_fc_term): self.driver.terminate_connection_snapshot(self.snapshot, FakeConnector) volume = Volume(id=self.snapshot.id, provider_location=self.snapshot.provider_location, lun_type='27', metadata=None) mock_fc_term.assert_called_with(volume, FakeConnector) def test_terminate_connection_success(self): self.driver.client.terminateFlag = True self.driver.terminate_connection(self.volume, FakeConnector) self.assertTrue(self.driver.client.terminateFlag) def test_terminate_connection_portgroup_associated(self): self.mock_object(rest_client.RestClient, 'is_portgroup_associated_to_view', return_value=True) self.mock_object(huawei_driver.HuaweiFCDriver, '_delete_zone_and_remove_fc_initiators', return_value=({}, 1)) self.driver.terminate_connection(self.volume, FakeConnector) def test_terminate_connection_fc_initiators_exist_in_host(self): self.mock_object(rest_client.RestClient, 'check_fc_initiators_exist_in_host', return_value=True) self.driver.terminate_connection(self.volume, FakeConnector) def test_terminate_connection_hypermetro_in_metadata(self): self.driver.terminate_connection(self.hyper_volume, FakeConnector) def test_get_volume_status(self): remote_device_info = {"ARRAYTYPE": "1", "HEALTHSTATUS": "1", "RUNNINGSTATUS": "10"} self.mock_object( replication.ReplicaPairManager, 'get_remote_device_by_wwn', return_value=remote_device_info) data = self.driver.get_volume_stats() self.assertEqual(self.driver.VERSION, data['driver_version']) self.assertTrue(data['pools'][0]['replication_enabled']) self.assertListEqual(['sync', 'async'], data['pools'][0]['replication_type']) self.mock_object( replication.ReplicaPairManager, 'get_remote_device_by_wwn', return_value={}) data = self.driver.get_volume_stats() self.assertNotIn('replication_enabled', data['pools'][0]) self.mock_object( replication.ReplicaPairManager, 'try_get_remote_wwn', return_value={}) data = self.driver.get_volume_stats() self.assertEqual(self.driver.VERSION, data['driver_version']) self.assertNotIn('replication_enabled', data['pools'][0]) @ddt.data({'TIER0CAPACITY': '100', 'TIER1CAPACITY': '0', 'TIER2CAPACITY': '0', 'disktype': 'ssd'}, {'TIER0CAPACITY': '0', 'TIER1CAPACITY': '100', 'TIER2CAPACITY': '0', 'disktype': 'sas'}, {'TIER0CAPACITY': '0', 'TIER1CAPACITY': '0', 'TIER2CAPACITY': '100', 'disktype': 'nl_sas'}, {'TIER0CAPACITY': '100', 'TIER1CAPACITY': '100', 'TIER2CAPACITY': '100', 'disktype': 'mix'}, {'TIER0CAPACITY': '0', 'TIER1CAPACITY': '0', 'TIER2CAPACITY': '0', 'disktype': ''}) def test_get_volume_disk_type(self, disk_type_value): response_dict = json.loads(FAKE_STORAGE_POOL_RESPONSE) storage_pool_sas = copy.deepcopy(response_dict) storage_pool_sas['data'][0]['TIER0CAPACITY'] = ( disk_type_value['TIER0CAPACITY']) storage_pool_sas['data'][0]['TIER1CAPACITY'] = ( disk_type_value['TIER1CAPACITY']) storage_pool_sas['data'][0]['TIER2CAPACITY'] = ( disk_type_value['TIER2CAPACITY']) driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() driver.replica = None self.mock_object(rest_client.RestClient, 'get_all_pools', return_value=storage_pool_sas['data']) data = driver.get_volume_stats() if disk_type_value['disktype']: self.assertEqual(disk_type_value['disktype'], data['pools'][0]['disk_type']) else: self.assertNotIn('disk_type', data['pools'][0]) def test_get_disk_type_pool_info_none(self): driver = FakeISCSIStorage(configuration=self.configuration) driver.do_setup() driver.replica = None self.mock_object(rest_client.RestClient, 'get_pool_info', return_value=None) data = driver.get_volume_stats() self.assertNotIn('disk_type', data['pools'][0]) def test_extend_volume(self): self.driver.extend_volume(self.volume, 3) def test_login_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.client.login) def test_create_snapshot_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, self.snapshot) def test_create_volume_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, self.volume) def test_initialize_connection_fail(self): self.driver.client.test_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.volume, FakeConnector) def test_lun_is_associated_to_lungroup(self): self.driver.client.associate_lun_to_lungroup('11', '11') result = self.driver.client._is_lun_associated_to_lungroup('11', '11') self.assertTrue(result) def test_lun_is_not_associated_to_lun_group(self): self.driver.client.associate_lun_to_lungroup('12', '12') self.driver.client.remove_lun_from_lungroup('12', '12') result = self.driver.client._is_lun_associated_to_lungroup('12', '12') self.assertFalse(result) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(rest_client, 'RestClient') def test_migrate_volume_success(self, mock_add_lun_to_partition, pool_data): # Migrate volume without new type. empty_dict = {} self.driver.support_func = pool_data moved, model_update = self.driver.migrate_volume(None, self.volume, test_host) self.assertTrue(moved) self.assertEqual(empty_dict, model_update) def test_migrate_volume_fail(self): self.driver.client.test_fail = True # Migrate volume without new type. self.assertRaises(exception.VolumeBackendAPIException, self.driver.migrate_volume, None, self.volume, test_host) def test_check_migration_valid(self): is_valid = self.driver._check_migration_valid(test_host, self.volume) self.assertTrue(is_valid) # No pool_name in capabilities. invalid_host1 = {'host': 'ubuntu001@backend002#OpenStack_Pool', 'capabilities': {'location_info': '210235G7J20000000000', 'allocated_capacity_gb': 0, 'volume_backend_name': 'HuaweiFCDriver', 'storage_protocol': 'FC'}} is_valid = self.driver._check_migration_valid(invalid_host1, self.volume) self.assertFalse(is_valid) # location_info in capabilities is not matched. invalid_host2 = {'host': 'ubuntu001@backend002#OpenStack_Pool', 'capabilities': {'location_info': '210235G7J20000000001', 'allocated_capacity_gb': 0, 'pool_name': 'OpenStack_Pool', 'volume_backend_name': 'HuaweiFCDriver', 'storage_protocol': 'FC'}} is_valid = self.driver._check_migration_valid(invalid_host2, self.volume) self.assertFalse(is_valid) # storage_protocol is not match current protocol and volume status is # 'in-use'. location = ('{"huawei_lun_wwn": "6643e8c1004c5f6723e9f454003", ' '"huawei_lun_id": "11"}') volume_in_use = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635', 'size': 2, 'volume_name': 'vol1', 'id': ID, 'volume_id': '21ec7341-9256-497b-97d9-ef48edcf0635', 'volume_attachment': 'in-use', 'provider_location': location} invalid_host2 = {'host': 'ubuntu001@backend002#OpenStack_Pool', 'capabilities': {'location_info': '210235G7J20000000001', 'allocated_capacity_gb': 0, 'pool_name': 'OpenStack_Pool', 'volume_backend_name': 'HuaweiFCDriver', 'storage_protocol': 'iSCSI'}} is_valid = self.driver._check_migration_valid(invalid_host2, volume_in_use) self.assertFalse(is_valid) # pool_name is empty. invalid_host3 = {'host': 'ubuntu001@backend002#OpenStack_Pool', 'capabilities': {'location_info': '210235G7J20000000001', 'allocated_capacity_gb': 0, 'pool_name': '', 'volume_backend_name': 'HuaweiFCDriver', 'storage_protocol': 'iSCSI'}} is_valid = self.driver._check_migration_valid(invalid_host3, self.volume) self.assertFalse(is_valid) @mock.patch.object(rest_client.RestClient, 'rename_lun') def test_update_migrated_volume_success(self, mock_rename_lun): model_update = self.driver.update_migrated_volume( None, self.original_volume, self.current_volume, 'available') self.assertIsNone(model_update['_name_id']) self.assertDictEqual(json.loads(PROVIDER_LOCATION), json.loads(model_update['provider_location'])) @mock.patch.object(rest_client.RestClient, 'rename_lun') def test_update_migrated_volume_fail(self, mock_rename_lun): mock_rename_lun.side_effect = exception.VolumeBackendAPIException( data='Error occurred.') model_update = self.driver.update_migrated_volume(None, self.original_volume, self.current_volume, 'available') self.assertEqual(self.current_volume.name_id, model_update['_name_id']) self.assertDictEqual(json.loads(PROVIDER_LOCATION), json.loads(model_update['provider_location'])) @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') def test_retype_volume_success(self, mock_add_lun_to_partition): self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT retype = self.driver.retype(None, self.volume, test_new_type, None, test_host) self.assertTrue(retype[0]) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(rest_client, 'RestClient') @mock.patch.object( common.HuaweiBaseDriver, '_get_volume_type', return_value={'extra_specs': sync_replica_specs}) def test_retype_replication_volume_success(self, mock_get_type, mock_add_lun_to_partition, pool_data): self.driver.support_func = pool_data retype = self.driver.retype(None, self.volume, test_new_replication_type, None, test_host) self.assertTrue(retype[0]) @ddt.data( [ replication.ReplicaPairManager, 'create_replica', exception.VolumeBackendAPIException( data='Can\'t support smarttier on the array.'), FAKE_POOLS_UNSUPPORT_REPORT ], [ replication.ReplicaPairManager, 'create_replica', exception.VolumeBackendAPIException( data='Can\'t support smarttier on the array.'), FAKE_POOLS_SUPPORT_REPORT ], [ replication.ReplicaPairManager, 'delete_replica', exception.VolumeBackendAPIException( data='Can\'t support smarttier on the array.'), FAKE_POOLS_SUPPORT_REPORT ], [ replication.ReplicaPairManager, 'delete_replica', exception.VolumeBackendAPIException( data='Can\'t support smarttier on the array.'), FAKE_POOLS_UNSUPPORT_REPORT ], ) @ddt.unpack def test_retype_replication_volume_fail(self, mock_module, mock_func, side_effect, pool_data): self.driver.support_func = pool_data self.mock_object(mock_module, mock_func, side_effect=side_effect) self.mock_object(rest_client.RestClient, 'add_lun_to_partition') if mock_func == 'create_replica': extra_specs = {} new_type = test_new_replication_type else: extra_specs = {'capabilities:replication_enabled': ' True'} new_type = {'extra_specs': {}, } self.volume.volume_type = objects.VolumeType( extra_specs=extra_specs, qos_specs_id=None) retype = self.driver.retype(None, self.volume, new_type, None, test_host) self.assertFalse(retype) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_retype_volume_cache_fail(self, pool_data): self.driver.client.cache_not_exist = True self.driver.support_func = pool_data self.assertRaises(exception.VolumeBackendAPIException, self.driver.retype, None, self.volume, test_new_type, None, test_host) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) def test_retype_volume_partition_fail(self, pool_data): self.driver.support_func = pool_data self.driver.client.partition_not_exist = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.retype, None, self.volume, test_new_type, None, test_host) @mock.patch.object(rest_client.RestClient, 'add_lun_to_partition') def test_retype_volume_fail(self, mock_add_lun_to_partition): self.driver.support_func = FAKE_POOLS_SUPPORT_REPORT mock_add_lun_to_partition.side_effect = ( exception.VolumeBackendAPIException(data='Error occurred.')) retype = self.driver.retype(None, self.volume, test_new_type, None, test_host) self.assertFalse(retype) @mock.patch.object(rest_client.RestClient, 'get_all_engines', return_value=[{'NODELIST': '["0A","0B"]', 'ID': '0'}]) def test_build_ini_targ_map_engie_recorded(self, mock_engines): fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( ['10000090fa0d6754'], '1', '11') target_port_wwns = ['2000643e8c4c5f66'] self.assertEqual(target_port_wwns, tgt_wwns) self.assertEqual({}, init_targ_map) @ddt.data(fake_fabric_mapping_no_ports, fake_fabric_mapping_no_wwn) def test_filter_by_fabric_fail(self, ddt_map): self.mock_object( FCSanLookupService, 'get_device_mapping_from_network', return_value=ddt_map) fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) self.assertRaises(exception.VolumeBackendAPIException, zone_helper._filter_by_fabric, ['10000090fa0d6754'], None) @mock.patch.object(rest_client.RestClient, 'get_all_engines', return_value=[{'NODELIST': '["0A"]', 'ID': '0'}, {'NODELIST': '["0B"]', 'ID': '1'}]) @mock.patch.object(fc_zone_helper.FCZoneHelper, '_build_contr_port_map', return_value={'0B': ['2000643e8c4c5f67']}) def test_build_ini_targ_map_engie_not_recorded(self, mock_engines, map): fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( ['10000090fa0d6754'], '1', '11') expected_wwns = ['2000643e8c4c5f67', '2000643e8c4c5f66'] expected_map = {'10000090fa0d6754': expected_wwns} self.assertEqual(expected_wwns, tgt_wwns) self.assertEqual(expected_map, init_targ_map) @mock.patch.object(rest_client.RestClient, 'get_all_engines', return_value=[{'NODELIST': '["0A", "0B"]', 'ID': '0'}]) def test_build_ini_targ_map_no_map(self, mock_engines): fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) # Host with id '5' has no map on the array. (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( ['10000090fa0d6754'], '5', '11') expected_wwns = ['2000643e8c4c5f66'] expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']} self.assertEqual(expected_wwns, tgt_wwns) self.assertEqual(expected_map, init_targ_map) @mock.patch.object(rest_client.RestClient, 'get_all_engines', return_value=[{'NODELIST': '["0A", "0B"]', 'ID': '0'}]) @mock.patch.object(rest_client.RestClient, 'get_tgt_port_group', return_value='0') @mock.patch.object(rest_client.RestClient, 'delete_portgroup') def test_build_ini_targ_map_exist_portg(self, delete, engines, portg): fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) # Host with id '5' has no map on the array. (tgt_wwns, portg_id, init_targ_map) = zone_helper.build_ini_targ_map( ['10000090fa0d6754'], '5', '11') expected_wwns = ['2000643e8c4c5f66'] expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']} self.assertEqual(expected_wwns, tgt_wwns) self.assertEqual(expected_map, init_targ_map) self.assertEqual(1, delete.call_count) def test_get_init_targ_map(self): fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) (tgt_wwns, portg_id, init_targ_map) = zone_helper.get_init_targ_map( ['10000090fa0d6754'], '1') expected_wwns = ['2000643e8c4c5f66'] expected_map = {'10000090fa0d6754': ['2000643e8c4c5f66']} self.assertEqual(expected_wwns, tgt_wwns) self.assertEqual(expected_map, init_targ_map) def test_get_init_targ_map_no_host(self): fake_lookup_service = FCSanLookupService() zone_helper = fc_zone_helper.FCZoneHelper( fake_lookup_service, self.driver.client) ret = zone_helper.get_init_targ_map( ['10000090fa0d6754'], None) expected_ret = ([], None, {}) self.assertEqual(expected_ret, ret) def test_multi_resturls_success(self): self.driver.client.test_multi_url_flag = True lun_info = self.driver.create_volume(self.volume) expect_value = {"huawei_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} self.assertDictEqual(expect_value, json.loads(lun_info['provider_location'])) def test_get_id_from_result(self): result = {} name = 'test_name' key = 'NAME' re = self.driver.client._get_id_from_result(result, name, key) self.assertIsNone(re) result = {'data': {}} re = self.driver.client._get_id_from_result(result, name, key) self.assertIsNone(re) result = {'data': [{'COUNT': 1, 'ID': '1'}, {'COUNT': 2, 'ID': '2'}]} re = self.driver.client._get_id_from_result(result, name, key) self.assertIsNone(re) result = {'data': [{'NAME': 'test_name1', 'ID': '1'}, {'NAME': 'test_name2', 'ID': '2'}]} re = self.driver.client._get_id_from_result(result, name, key) self.assertIsNone(re) result = {'data': [{'NAME': 'test_name', 'ID': '1'}, {'NAME': 'test_name2', 'ID': '2'}]} re = self.driver.client._get_id_from_result(result, name, key) self.assertEqual('1', re) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value={'ID': 1, 'CAPACITY': 110362624, 'TOTALCAPACITY': 209715200}) def test_get_capacity(self, mock_get_pool_info): expected_pool_capacity = {'total_capacity': 100.0, 'free_capacity': 52.625} pool_capacity = self.driver.client._get_capacity(None, None) self.assertEqual(expected_pool_capacity, pool_capacity) @mock.patch.object(huawei_utils, 'get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value=FAKE_FIND_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', return_value='11') @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', return_value=True) @mock.patch.object(hypermetro.HuaweiHyperMetro, '_create_hypermetro_pair', return_value={"ID": '11', "NAME": 'hypermetro-pair'}) @mock.patch.object(rest_client.RestClient, 'logout', return_value=None) def test_create_hypermetro_success(self, mock_hypermetro_opts, mock_login_return, mock_all_pool_info, mock_pool_info, mock_hyper_domain, mock_volume_ready, mock_logout): location = {"huawei_lun_id": "1", "hypermetro_id": "11", "remote_lun_id": "1", "huawei_lun_wwn": "6643e8c1004c5f6723e9f454003"} lun_info = self.driver.create_volume(self.hyper_volume) self.assertDictEqual(location, json.loads(lun_info['provider_location'])) @ddt.data(FAKE_POOLS_UNSUPPORT_REPORT, FAKE_POOLS_SUPPORT_REPORT) @mock.patch.object(huawei_utils, 'get_volume_params', return_value=fake_hypermetro_opts) @mock.patch.object(rest_client.RestClient, 'get_all_pools', return_value=FAKE_STORAGE_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_pool_info', return_value=FAKE_FIND_POOL_RESPONSE) @mock.patch.object(rest_client.RestClient, 'get_hyper_domain_id', return_value='11') @mock.patch.object(hypermetro.HuaweiHyperMetro, '_wait_volume_ready', return_value=True) @mock.patch.object(rest_client.RestClient, 'create_hypermetro') def test_create_hypermetro_fail(self, pool_data, mock_pair_info, mock_hypermetro_opts, mock_all_pool_info, mock_pool_info, mock_hyper_domain, mock_volume_ready ): self.driver.support_func = pool_data mock_pair_info.side_effect = ( exception.VolumeBackendAPIException(data='Error occurred.')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.metro.create_hypermetro, "11", {}) @mock.patch.object(huawei_utils, 'get_volume_private_data', return_value={'hypermetro_id': '3400a30d844d0007', 'remote_lun_id': '1'}) @mock.patch.object(rest_client.RestClient, 'do_mapping', return_value={'lun_id': '1', 'view_id': '1', 'aval_luns': '[1]'}) def test_hypermetro_connection_success_2(self, mock_map, mock_metadata): fc_properties = self.driver.metro.connect_volume_fc(self.volume, FakeConnector) self.assertEqual(1, fc_properties['data']['target_lun']) mock_map.assert_called_once_with('1', '0', '1', hypermetro_lun=True) @mock.patch.object(huawei_driver.huawei_utils, 'get_volume_metadata', return_value={'hypermetro_id': '3400a30d844d0007', 'remote_lun_id': '1'}) def test_terminate_hypermetro_connection_success(self, mock_metradata): self.driver.metro.disconnect_volume_fc(self.volume, FakeConnector) @mock.patch.object(huawei_utils, 'get_volume_private_data', return_value={'hypermetro_id': '3400a30d844d0007', 'remote_lun_id': None}) @mock.patch.object(rest_client.RestClient, 'get_lun_id_by_name', return_value=None) def test_hypermetroid_none_fail(self, mock_metadata, moke_metro_name): self.assertRaises(exception.VolumeBackendAPIException, self.driver.metro.connect_volume_fc, self.hyper_volume, FakeConnector) def test_wait_volume_ready_success(self): flag = self.driver.metro._wait_volume_ready("11") self.assertIsNone(flag) @mock.patch.object(huawei_driver.huawei_utils, 'get_volume_metadata', return_value={'hypermetro_id': '3400a30d844d0007', 'remote_lun_id': '1'}) @mock.patch.object(rest_client.RestClient, 'get_online_free_wwns', return_value=[]) @mock.patch.object(rest_client.RestClient, 'get_host_iscsi_initiators', return_value=[]) def test_hypermetro_connection_fail(self, mock_metadata, mock_fc_initiator, mock_host_initiators): self.assertRaises(exception.VolumeBackendAPIException, self.driver.metro.connect_volume_fc, self.hyper_volume, FakeConnector) def test_create_snapshot_fail_hypermetro(self): self.volume.volume_type = objects.VolumeType( extra_specs=replica_hypermetro_specs, qos_specs_id=None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.volume, self.snapshot) def test_create_snapshot_fail_no_snapshot_id(self): self.snapshot.provider_location = None self.mock_object(self.driver.client, 'get_snapshot_info_by_name', return_value=None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, self.volume, self.snapshot) @mock.patch.object(rest_client.RestClient, 'call', return_value={"data": [{"RUNNINGSTATUS": "27", "ID": '1'}, {"RUNNINGSTATUS": "26", "ID": '2'}], "error": {"code": 0}}) def test_get_online_free_wwns(self, mock_call): wwns = self.driver.client.get_online_free_wwns() self.assertEqual(['1'], wwns) def test_get_fc_initiator_on_array(self): wwns = self.driver.client.get_fc_initiator_on_array() self.assertListEqual(["10000090fa0d6754", "10000090fa0d6755"], wwns) @mock.patch.object(rest_client.RestClient, 'call', return_value={"data": {"ID": 1}, "error": {"code": 0}}) def test_rename_lun(self, mock_call): des = 'This LUN is renamed.' new_name = 'test_name' self.driver.client.rename_lun('1', new_name, des) self.assertEqual(1, mock_call.call_count) url = "/lun/1" data = {"NAME": new_name, "DESCRIPTION": des} mock_call.assert_called_once_with(url, data, "PUT") @mock.patch.object(rest_client.RestClient, 'call', return_value={"data": {}}) def test_is_host_associated_to_hostgroup_no_data(self, mock_call): res = self.driver.client.is_host_associated_to_hostgroup('1') self.assertFalse(res) @mock.patch.object(rest_client.RestClient, 'call', return_value={"data": {'ISADD2HOSTGROUP': 'true'}}) def test_is_host_associated_to_hostgroup_true(self, mock_call): res = self.driver.client.is_host_associated_to_hostgroup('1') self.assertTrue(res) @mock.patch.object(rest_client.RestClient, 'call', return_value={"data": {'ISADD2HOSTGROUP': 'false'}}) def test_is_host_associated_to_hostgroup_false(self, mock_call): res = self.driver.client.is_host_associated_to_hostgroup('1') self.assertFalse(res) @ddt.data([{"hypermetro": "true"}], []) def test_create_group_success(self, cg_type): self.mock_object(common.HuaweiBaseDriver, '_get_group_type', return_value=cg_type) self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) model_update = self.driver.create_group(None, self.group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) @ddt.data( ([fake_snapshot.fake_snapshot_obj( None, provider_location=SNAP_PROVIDER_LOCATION, id=ID, volume_size=1)], [], False), ([], [fake_volume.fake_volume_obj( None, provider_location=PROVIDER_LOCATION, id=ID, size=1)], True), ) @ddt.unpack def test_create_group_from_src(self, snapshots, source_vols, tmp_snap): self.mock_object(common.HuaweiBaseDriver, '_get_group_type', return_value=[]) self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) create_snap_mock = self.mock_object( self.driver, '_create_group_snapshot', wraps=self.driver._create_group_snapshot) delete_snap_mock = self.mock_object( self.driver, '_delete_group_snapshot', wraps=self.driver._delete_group_snapshot) self.mock_object(self.driver.client, 'get_snapshot_info_by_name', return_value= {'ID': ID, 'RUNNINGSTATUS': constants.STATUS_ACTIVE}) model_update, volumes_model_update = self.driver.create_group_from_src( None, self.group, [self.volume], snapshots=snapshots, source_vols=source_vols) if tmp_snap: create_snap_mock.assert_called_once() delete_snap_mock.assert_called_once() else: create_snap_mock.assert_not_called() delete_snap_mock.assert_not_called() self.assertDictEqual({'status': fields.GroupStatus.AVAILABLE}, model_update) self.assertEqual(1, len(volumes_model_update)) self.assertEqual(ID, volumes_model_update[0]['id']) @ddt.data([{"hypermetro": "true"}], []) def test_delete_group_success(self, cg_type): test_volumes = [self.volume] ctxt = context.get_admin_context() self.mock_object(common.HuaweiBaseDriver, '_get_group_type', return_value=cg_type) self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) model, volumes = self.driver.delete_group( ctxt, self.group, test_volumes) self.assertEqual(fields.GroupStatus.DELETED, model['status']) @mock.patch.object(common.HuaweiBaseDriver, '_get_group_type', return_value=[{"hypermetro": "true"}]) @mock.patch.object(huawei_driver.huawei_utils, 'get_volume_metadata', return_value={'hypermetro_id': '3400a30d844d0007', 'remote_lun_id': '59'}) def test_update_group_success(self, mock_grouptype, mock_metadata): ctxt = context.get_admin_context() add_volumes = [self.hyper_volume] remove_volumes = [] self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) model_update = self.driver.update_group( ctxt, self.group, add_volumes, remove_volumes) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update[0]['status']) def test_is_initiator_associated_to_host_raise(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.client.is_initiator_associated_to_host, 'ini-2', '1') def test_is_initiator_associated_to_host_true(self): ret = self.driver.client.is_initiator_associated_to_host('ini-1', '1') self.assertFalse(ret) ret = self.driver.client.is_initiator_associated_to_host('ini-2', '2') self.assertTrue(ret) @ddt.ddt class HuaweiConfTestCase(test.TestCase): def setUp(self): super(HuaweiConfTestCase, self).setUp() self.tmp_dir = tempfile.mkdtemp() self.fake_xml_file = self.tmp_dir + '/cinder_huawei_conf.xml' self.conf = mock.Mock() self.conf.cinder_huawei_conf_file = self.fake_xml_file self.huawei_conf = huawei_conf.HuaweiConf(self.conf) def _create_fake_conf_file(self, configs): """Create a fake Config file. Huawei storage customize a XML configuration file, the configuration file is used to set the Huawei storage custom parameters, therefore, in the UT test we need to simulate such a configuration file. """ doc = minidom.Document() config = doc.createElement('config') doc.appendChild(config) storage = doc.createElement('Storage') config.appendChild(storage) url = doc.createElement('RestURL') url_text = doc.createTextNode('http://192.0.2.69:8082/' 'deviceManager/rest/') url.appendChild(url_text) storage.appendChild(url) username = doc.createElement('UserName') username_text = doc.createTextNode('admin') username.appendChild(username_text) storage.appendChild(username) password = doc.createElement('UserPassword') password_text = doc.createTextNode('Admin@storage') password.appendChild(password_text) storage.appendChild(password) product = doc.createElement('Product') product_text = doc.createTextNode(configs.get('Product', 'V3')) product.appendChild(product_text) storage.appendChild(product) protocol = doc.createElement('Protocol') protocol_text = doc.createTextNode('iSCSI') protocol.appendChild(protocol_text) storage.appendChild(protocol) lun = doc.createElement('LUN') config.appendChild(lun) if 'LUNType' in configs: luntype = doc.createElement('LUNType') luntype_text = doc.createTextNode(configs['LUNType']) luntype.appendChild(luntype_text) lun.appendChild(luntype) lun_ready_wait_interval = doc.createElement('LUNReadyWaitInterval') lun_ready_wait_interval_text = doc.createTextNode('2') lun_ready_wait_interval.appendChild(lun_ready_wait_interval_text) lun.appendChild(lun_ready_wait_interval) lun_copy_wait_interval = doc.createElement('LUNcopyWaitInterval') lun_copy_wait_interval_text = doc.createTextNode('2') lun_copy_wait_interval.appendChild(lun_copy_wait_interval_text) lun.appendChild(lun_copy_wait_interval) timeout = doc.createElement('Timeout') timeout_text = doc.createTextNode('43200') timeout.appendChild(timeout_text) lun.appendChild(timeout) write_type = doc.createElement('WriteType') write_type_text = doc.createTextNode('1') write_type.appendChild(write_type_text) lun.appendChild(write_type) mirror_switch = doc.createElement('MirrorSwitch') mirror_switch_text = doc.createTextNode('1') mirror_switch.appendChild(mirror_switch_text) lun.appendChild(mirror_switch) prefetch = doc.createElement('Prefetch') prefetch.setAttribute('Type', '1') prefetch.setAttribute('Value', '0') lun.appendChild(prefetch) pool = doc.createElement('StoragePool') pool_text = doc.createTextNode('OpenStack_Pool') pool.appendChild(pool_text) lun.appendChild(pool) iscsi = doc.createElement('iSCSI') config.appendChild(iscsi) defaulttargetip = doc.createElement('DefaultTargetIP') defaulttargetip_text = doc.createTextNode('192.0.2.68') defaulttargetip.appendChild(defaulttargetip_text) iscsi.appendChild(defaulttargetip) initiator = doc.createElement('Initiator') initiator.setAttribute('Name', 'iqn.1993-08.debian:01:ec2bff7ac3a3') initiator.setAttribute('TargetIP', '192.0.2.2') initiator.setAttribute('CHAPinfo', 'mm-user;mm-user@storage') initiator.setAttribute('ALUA', '1') initiator.setAttribute('TargetPortGroup', 'PortGroup001') iscsi.appendChild(initiator) fakefile = open(self.conf.cinder_huawei_conf_file, 'w') fakefile.write(doc.toprettyxml(indent='')) fakefile.close() @ddt.data( ( { 'Product': 'Dorado', 'LUNType': 'Thin', }, 1, ), ( { 'Product': 'Dorado', }, 1, ), ( { 'Product': 'Dorado', 'LUNType': 'Thick', }, exception.InvalidInput, ), ( { 'Product': 'V3', 'LUNType': 'Thick', }, 0, ), ( { 'Product': 'V3', 'LUNType': 'invalid', }, exception.InvalidInput, ), ) @ddt.unpack def test_luntype_config(self, custom_configs, expect_result): self._create_fake_conf_file(custom_configs) tree = ElementTree.parse(self.conf.cinder_huawei_conf_file) xml_root = tree.getroot() self.huawei_conf._san_product(xml_root) if isinstance(expect_result, int): self.huawei_conf._lun_type(xml_root) self.assertEqual(expect_result, self.conf.lun_type) else: self.assertRaises(expect_result, self.huawei_conf._lun_type, xml_root) @ddt.data( [{ 'backend_id': "default", 'san_address': 'https://192.0.2.69:8088/deviceManager/rest/', 'san_user': 'admin', 'san_password': '123456', 'storage_pool': 'OpenStack_Pool', 'iscsi_info': '{Name:iqn;CHAPinfo:user#pwd;ALUA:1}' }] ) def test_get_replication_devices(self, config): self.mock_object(self.conf, 'safe_get', mock.Mock(return_value=config) ) self.huawei_conf._replication_devices(None) expected = { 'backend_id': 'default', 'san_address': ['https://192.0.2.69:8088/deviceManager/rest/'], 'san_password': '123456', 'san_user': 'admin', 'storage_pools': ['OpenStack_Pool'], 'vstore_name': None, 'iscsi_info': { 'initiators': { 'iqn': {'ALUA': '1', 'CHAPinfo': 'user;pwd', 'Name': 'iqn'} }, 'default_target_ips': [], }, 'fc_info': { 'initiators': {}, 'default_target_ips': [], }, } self.assertDictEqual(expected, self.conf.replication) @ddt.ddt class HuaweiRestClientTestCase(test.TestCase): def setUp(self): super(HuaweiRestClientTestCase, self).setUp() config = mock.Mock(spec=conf.Configuration) huawei_conf = FakeHuaweiConf(config, 'iSCSI') huawei_conf.update_config_value() self.client = rest_client.RestClient( config, config.san_address, config.san_user, config.san_password) def test_init_http_head(self): self.client.init_http_head() self.assertIsNone(self.client.url) self.assertEqual("keep-alive", self.client.session.headers["Connection"]) self.assertEqual("application/json", self.client.session.headers["Content-Type"]) self.assertEqual(False, self.client.session.verify) @ddt.data('POST', 'PUT', 'GET', 'DELETE') def test_do_call_method(self, method): self.client.init_http_head() if method: mock_func = self.mock_object(self.client.session, method.lower()) else: mock_func = self.mock_object(self.client.session, 'post') self.client.do_call("http://fake-rest-url", None, method) mock_func.assert_called_once_with("http://fake-rest-url", timeout=constants.SOCKET_TIMEOUT) def test_do_call_method_invalid(self): self.assertRaises(exception.VolumeBackendAPIException, self.client.do_call, "http://fake-rest-url", None, 'fake-method') def test_do_call_http_error(self): self.client.init_http_head() fake_res = requests.Response() fake_res.reason = 'something wrong' fake_res.status_code = 500 fake_res.url = "http://fake-rest-url" self.mock_object(self.client.session, 'post', return_value=fake_res) res = self.client.do_call("http://fake-rest-url", None, 'POST') expected = {"error": {"code": 500, "description": '500 Server Error: something wrong for ' 'url: http://fake-rest-url'}} self.assertEqual(expected, res) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2831202 cinder-27.0.0/cinder/tests/unit/volume/drivers/ibm/0000775000175000017500000000000000000000000022210 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ibm/__init__.py0000664000175000017500000000000000000000000024307 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ibm/fake_pyxcli.py0000664000175000017500000000244500000000000025065 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Fake pyxcli-client for testing the driver without installing pyxcli""" import sys from unittest import mock from cinder.tests.unit.volume.drivers.ibm import fake_pyxcli_exceptions pyxcli_client = mock.Mock() pyxcli_client.errors = fake_pyxcli_exceptions pyxcli_client.events = mock.Mock() pyxcli_client.mirroring = mock.Mock() pyxcli_client.transports = fake_pyxcli_exceptions pyxcli_client.mirroring.cg_recovery_manager = mock.Mock() pyxcli_client.version = '1.1.6' pyxcli_client.mirroring.mirrored_entities = mock.Mock() sys.modules['pyxcli'] = pyxcli_client sys.modules['pyxcli.events'] = pyxcli_client.events sys.modules['pyxcli.mirroring'] = pyxcli_client.mirroring ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ibm/fake_pyxcli_exceptions.py0000664000175000017500000000314500000000000027324 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Fake pyxcli exceptions for testing the driver without installing pyxcli""" class XCLIError(Exception): pass class VolumeBadNameError(XCLIError): pass class CredentialsError(XCLIError): pass class ConnectionError(XCLIError): pass class CgHasMirrorError(XCLIError): pass class CgDoesNotExistError(XCLIError): pass class CgEmptyError(XCLIError): pass class PoolSnapshotLimitReachedError(XCLIError): pass class CommandFailedRuntimeError(XCLIError): pass class PoolOutOfSpaceError(XCLIError): pass class CgLimitReachedError(XCLIError): pass class HostBadNameError(XCLIError): pass class CgNotEmptyError(XCLIError): pass class SystemOutOfSpaceError(XCLIError): pass class CgNameExistsError(XCLIError): pass class CgBadNameError(XCLIError): pass class SnapshotGroupDoesNotExistError(XCLIError): pass class ClosedTransportError(XCLIError): pass class VolumeNotInConsGroup(XCLIError): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py0000664000175000017500000061511100000000000025740 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the IBM DS8K family driver.""" import ast import copy import json from unittest import mock import ddt import eventlet from cinder import context from cinder import exception from cinder.objects import fields from cinder.tests.unit import test from cinder.tests.unit import utils as testutils from cinder.volume import configuration as conf import cinder.volume.drivers.ibm.ibm_storage as storage from cinder.volume.drivers.ibm.ibm_storage import proxy from cinder.volume import group_types from cinder.volume import volume_types # mock decorator logger for all unit test cases. mock_logger = mock.patch.object(proxy, 'logger', lambda x: x) mock_logger.start() from cinder.volume.drivers.ibm.ibm_storage import ( ds8k_replication as replication) from cinder.volume.drivers.ibm.ibm_storage import ds8k_helper as helper from cinder.volume.drivers.ibm.ibm_storage import ds8k_proxy as ds8kproxy from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient mock_logger.stop() TEST_VOLUME_ID = '0001' TEST_VOLUME_ID_2 = '0002' TEST_HOST_ID = 'H1' TEST_VOLUME_BACKEND_NAME = 'ds8k_backend' TEST_GROUP_HOST = 'test_host@' + TEST_VOLUME_BACKEND_NAME + '#fakepool' TEST_HOST_1 = 'test_host@' + TEST_VOLUME_BACKEND_NAME TEST_HOST_2 = TEST_GROUP_HOST TEST_LUN_ID = '00' TEST_POOLS_STR = 'P0,P1' TEST_POOL_ID_1 = 'P0' TEST_POOL_ID_2 = 'P1' TEST_POOL_NAME_1 = 'OPENSTACK_DEV_0' TEST_POOL_NAME_2 = 'OPENSTACK_DEV_1' TEST_SOURCE_DS8K_IP = '1.1.1.1' TEST_TARGET_DS8K_IP = '2.2.2.2' TEST_SOURCE_WWNN = '5000000000FFC111' TEST_TARGET_WWNN = '5000000000FFD222' TEST_SOURCE_WWPN_1 = '10000090fa3418bc' TEST_SOURCE_WWPN_2 = '10000090FA3418BD' TEST_SOURCE_IOPORT = 'I0001' TEST_TARGET_IOPORT = 'I0002' TEST_LSS_ID_1 = '00' TEST_LSS_ID_2 = '01' TEST_LSS_ID_3 = '02' TEST_PPRC_PATH_ID_1 = (TEST_SOURCE_WWNN + "_" + TEST_LSS_ID_1 + ":" + TEST_TARGET_WWNN + "_" + TEST_LSS_ID_1) TEST_PPRC_PATH_ID_2 = (TEST_TARGET_WWNN + "_" + TEST_LSS_ID_1 + ":" + TEST_SOURCE_WWNN + "_" + TEST_LSS_ID_1) TEST_ECKD_VOLUME_ID = '1001' TEST_ECKD_POOL_ID = 'P10' TEST_ECKD_POOL_NAME = 'OPENSTACK_DEV_10' TEST_LCU_ID = '10' TEST_ECKD_PPRC_PATH_ID = (TEST_SOURCE_WWNN + "_" + TEST_LCU_ID + ":" + TEST_TARGET_WWNN + "_" + TEST_LCU_ID) TEST_SOURCE_SYSTEM_UNIT = u'2107-1111111' TEST_TARGET_SYSTEM_UNIT = u'2107-2222222' TEST_SOURCE_VOLUME_ID = TEST_VOLUME_ID TEST_TARGET_VOLUME_ID = TEST_VOLUME_ID TEST_PPRC_PAIR_ID = (TEST_SOURCE_SYSTEM_UNIT + '_' + TEST_SOURCE_VOLUME_ID + ':' + TEST_TARGET_SYSTEM_UNIT + '_' + TEST_TARGET_VOLUME_ID) TEST_FLASHCOPY = { 'sourcevolume': {'id': 'fake_volume_id_1'}, 'targetvolume': {'id': 'fake_volume_id_2'}, 'persistent': 'enabled', 'recording': 'enabled', 'backgroundcopy': 'disabled', 'state': 'valid' } TEST_CONNECTOR = { 'ip': '192.168.1.2', 'initiator': 'iqn.1993-08.org.debian:01:fdf9fdfd', 'wwpns': [TEST_SOURCE_WWPN_1, TEST_SOURCE_WWPN_2], 'platform': 'x86_64', 'os_type': 'linux2', 'host': 'fakehost' } TEST_REPLICATION_DEVICE = { 'san_ip': TEST_TARGET_DS8K_IP, 'san_login': 'fake', 'san_clustername': TEST_POOL_ID_1, 'san_password': 'fake', 'backend_id': TEST_TARGET_DS8K_IP, 'connection_type': storage.XIV_CONNECTION_TYPE_FC, 'ds8k_logical_control_unit_range': '' } FAKE_GET_LSS_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "lss": [ { "id": TEST_LSS_ID_1, "group": "0", "addrgrp": "0", "type": "fb", "configvols": "10" }, { "id": TEST_LSS_ID_2, "group": "1", "addrgrp": "0", "type": "fb", "configvols": "20" }, { "id": TEST_LSS_ID_3, "group": "0", "addrgrp": "0", "type": "fb", "configvols": "30" }, { "id": "10", "group": "0", "addrgrp": "1", "type": "ckd", "configvols": "12" } ] } } FAKE_GET_FB_LSS_RESPONSE_1 = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "lss": [ { "id": TEST_LSS_ID_1, "group": "0", "addrgrp": "0", "type": "fb", "configvols": "10", } ] } } FAKE_GET_FB_LSS_RESPONSE_2 = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "lss": [ { "id": TEST_LSS_ID_2, "group": "1", "addrgrp": "0", "type": "fb", "configvols": "20", } ] } } FAKE_GET_FB_LSS_RESPONSE_3 = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "lss": [ { "id": TEST_LSS_ID_3, "group": "0", "addrgrp": "0", "type": "fb", "configvols": "30", } ] } } FAKE_GET_CKD_LSS_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "lss": [ { "id": "10", "group": "0", "addrgrp": "1", "type": "ckd", "configvols": "10", } ] } } FAKE_CREATE_VOLUME_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "volumes": [ { "id": TEST_VOLUME_ID, "name": "fake_volume" } ] }, "link": { "rel": "self", "href": "https://1.1.1.1:8452/api/v1/volumes/" + TEST_VOLUME_ID } } FAKE_GET_PPRC_PATH_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "paths": [ { "id": TEST_PPRC_PATH_ID_1, "source_lss_id": TEST_LSS_ID_1, "target_lss_id": TEST_LSS_ID_1, "target_system_wwnn": TEST_TARGET_WWNN, "port_pairs": [ { "source_port_id": TEST_SOURCE_IOPORT, "target_port_id": TEST_TARGET_IOPORT, "state": "success" } ] }, { "id": TEST_ECKD_PPRC_PATH_ID, "source_lss_id": TEST_LCU_ID, "target_lss_id": TEST_LCU_ID, "target_system_wwnn": TEST_TARGET_WWNN, "port_pairs": [ { "source_port_id": TEST_SOURCE_IOPORT, "target_port_id": TEST_TARGET_IOPORT, "state": "success" } ] } ] } } FAKE_GET_PPRC_PATH_RESPONSE_1 = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "paths": [ { "id": TEST_PPRC_PATH_ID_1, "source_lss_id": TEST_LSS_ID_1, "target_lss_id": TEST_LSS_ID_1, "target_system_wwnn": TEST_TARGET_WWNN, "port_pairs": [ { "source_port_id": TEST_SOURCE_IOPORT, "target_port_id": TEST_TARGET_IOPORT, "state": "success" } ] } ] } } FAKE_GET_PPRC_PATH_RESPONSE_2 = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "paths": [ { "id": TEST_PPRC_PATH_ID_2, "source_lss_id": TEST_LSS_ID_1, "target_lss_id": TEST_LSS_ID_1, "target_system_wwnn": TEST_SOURCE_WWNN, "port_pairs": [ { "source_port_id": TEST_TARGET_IOPORT, "target_port_id": TEST_SOURCE_IOPORT, "state": "success" } ] } ] } } FAKE_GET_ECKD_PPRC_PATH_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "paths": [ { "id": TEST_ECKD_PPRC_PATH_ID, "source_lss_id": TEST_LCU_ID, "target_lss_id": TEST_LCU_ID, "target_system_wwnn": TEST_TARGET_WWNN, "port_pairs": [ { "source_port_id": TEST_SOURCE_IOPORT, "target_port_id": TEST_TARGET_IOPORT, "state": "success" } ] } ] } } FAKE_GET_PPRCS_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "pprcs": [ { "id": TEST_PPRC_PAIR_ID, "source_volume": { "name": TEST_SOURCE_VOLUME_ID, }, "source_system": { "id": TEST_SOURCE_SYSTEM_UNIT, }, "target_volume": { "name": TEST_TARGET_VOLUME_ID, }, "target_system": { "id": TEST_TARGET_SYSTEM_UNIT, }, "type": "metro_mirror", "state": "full_duplex" } ] } } FAKE_GET_POOL_RESPONSE_1 = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "pools": [ { "id": TEST_POOL_ID_1, "name": TEST_POOL_NAME_1, "node": "0", "stgtype": "fb", "cap": "10737418240", "capavail": "10737418240" } ] } } FAKE_GET_POOL_RESPONSE_2 = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "pools": [ { "id": TEST_POOL_ID_2, "name": TEST_POOL_NAME_2, "node": "1", "stgtype": "fb", "cap": "10737418240", "capavail": "10737418240" } ] } } FAKE_GET_ECKD_POOL_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "pools": [ { "id": TEST_ECKD_POOL_ID, "name": TEST_ECKD_POOL_NAME, "node": "0", "stgtype": "ckd", "cap": "10737418240", "capavail": "10737418240" } ] } } FAKE_GET_TOKEN_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "token": { "token": "8cf01a2771a04035bcffb7f4a62e9df8", "expired_time": "2016-08-06T06:36:54-0700", "max_idle_interval": "1800000" } } FAKE_GET_PHYSICAL_LINKS_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "physical_links": [ { "source_port_id": TEST_SOURCE_IOPORT, "target_port_id": TEST_TARGET_IOPORT } ] } } FAKE_GET_SYSTEM_RESPONSE_1 = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "systems": [ { "id": TEST_SOURCE_SYSTEM_UNIT, "name": "", "state": "online", "release": "7.5.1", "bundle": "87.51.63.0", "MTM": "2421-961", "sn": "1300741", "wwnn": TEST_SOURCE_WWNN, "cap": "28019290210304", "capalloc": "6933150957568", "capavail": "21086139252736", "capraw": "40265318400000" } ] } } FAKE_GET_SYSTEM_RESPONSE_2 = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "systems": [ { "id": TEST_TARGET_SYSTEM_UNIT, "name": "", "state": "online", "release": "7.5.1", "bundle": "87.51.63.0", "MTM": "2421-962", "sn": "1300742", "wwnn": TEST_TARGET_WWNN, "cap": "20019290210304", "capalloc": "4833150957560", "capavail": "31086139252736", "capraw": "20265318400000" } ] } } FAKE_GET_REST_VERSION_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "api_info": [ { "bundle_version": "5.7.51.1068" } ] } } FAKE_GET_HOST_PORTS_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "host_ports": [ { "wwpn": TEST_SOURCE_WWPN_1, "link": {}, "state": "logged in", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "lbs": "512", "wwnn": "", "login_type": "", "logical_path_established": "", "login_ports": [], "host_id": TEST_HOST_ID, "host": { "name": "OShost:fakehost", "link": {} } } ] } } FAKE_MAP_VOLUME_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "mappings": [ { "lunid": TEST_LUN_ID, } ] }, "link": { "rel": "self", "href": ("https://1.1.1.1:8452/api/v1/hosts[id=" + TEST_HOST_ID + "]/mappings/" + TEST_LUN_ID) } } FAKE_GET_IOPORT_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "ioports": [ { "id": "I0001", "link": { "rel": "self", "href": "https://1.1.1.1:8452/api/v1/ioports/I0001" }, "state": "online", "protocol": "SCSI-FCP", "wwpn": TEST_SOURCE_WWPN_1, "type": "Fibre Channel-SW", "speed": "8 Gb/s", "loc": "U1400.1B3.RJ03177-P1-C1-T0", "io_enclosure": { "id": "2", "link": {} } } ] } } FAKE_CREATE_HOST_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "hosts": [ { "id": TEST_HOST_ID } ] }, "link": { "rel": "self", "href": "https://1.1.1.1:8452/api/v1/hosts/testHost_1" } } FAKE_GET_MAPPINGS_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "mappings": [ { "lunid": TEST_LUN_ID, "link": {}, "volume": { "id": TEST_VOLUME_ID, "link": {} } }, { "lunid": "01", "link": {}, "volume": { "id": "0002", "link": {} } } ] } } FAKE_GET_VOLUME_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "data": { "volumes": [ { "id": TEST_VOLUME_ID, "link": {}, "name": "OSvol:vol_1001", "pool": { "id": TEST_POOL_ID_1, "link": {} } } ] } } FAKE_GENERIC_RESPONSE = { "server": { "status": "ok", "code": "", "message": "Operation done successfully." }, "responses": [ { "server": { "status": "ok", "code": "", "message": "Operation done successfully." } } ] } FAKE_DELETE_VOLUME_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_DELETE_PPRC_PAIR_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_FAILBACK_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_FAILOVER_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_CHANGE_VOLUME_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_POST_FLASHCOPIES_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_DELETE_FLASHCOPIES_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_POST_UNFREEZE_FLASHCOPIES_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_CREATE_LCU_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_ASSIGN_HOST_PORT_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_DELETE_MAPPINGS_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_DELETE_HOST_PORTS_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_DELETE_HOSTS_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_PAUSE_RESPONSE = FAKE_GENERIC_RESPONSE FAKE_REST_API_RESPONSES = { TEST_SOURCE_DS8K_IP + '/get': FAKE_GET_REST_VERSION_RESPONSE, TEST_TARGET_DS8K_IP + '/get': FAKE_GET_REST_VERSION_RESPONSE, TEST_SOURCE_DS8K_IP + '/systems/get': FAKE_GET_SYSTEM_RESPONSE_1, TEST_TARGET_DS8K_IP + '/systems/get': FAKE_GET_SYSTEM_RESPONSE_2, TEST_SOURCE_DS8K_IP + '/volumes/post': FAKE_CREATE_VOLUME_RESPONSE, TEST_TARGET_DS8K_IP + '/volumes/post': FAKE_CREATE_VOLUME_RESPONSE, TEST_SOURCE_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/get': FAKE_GET_VOLUME_RESPONSE, TEST_TARGET_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/get': FAKE_GET_VOLUME_RESPONSE, TEST_SOURCE_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/put': FAKE_CHANGE_VOLUME_RESPONSE, TEST_TARGET_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/put': FAKE_CHANGE_VOLUME_RESPONSE, TEST_SOURCE_DS8K_IP + '/volumes/' + TEST_VOLUME_ID_2 + '/get': FAKE_GET_VOLUME_RESPONSE, TEST_SOURCE_DS8K_IP + '/volumes/delete': FAKE_DELETE_VOLUME_RESPONSE, TEST_SOURCE_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/delete': FAKE_DELETE_VOLUME_RESPONSE, TEST_TARGET_DS8K_IP + '/volumes/' + TEST_VOLUME_ID + '/delete': FAKE_DELETE_VOLUME_RESPONSE, TEST_SOURCE_DS8K_IP + '/volumes/' + TEST_VOLUME_ID_2 + '/delete': FAKE_DELETE_VOLUME_RESPONSE, TEST_SOURCE_DS8K_IP + '/lss/get': FAKE_GET_LSS_RESPONSE, TEST_TARGET_DS8K_IP + '/lss/get': FAKE_GET_LSS_RESPONSE, TEST_SOURCE_DS8K_IP + '/lss/' + TEST_LSS_ID_1 + '/get': FAKE_GET_FB_LSS_RESPONSE_1, TEST_TARGET_DS8K_IP + '/lss/' + TEST_LSS_ID_1 + '/get': FAKE_GET_FB_LSS_RESPONSE_1, TEST_SOURCE_DS8K_IP + '/lss/' + TEST_LSS_ID_2 + '/get': FAKE_GET_FB_LSS_RESPONSE_2, TEST_TARGET_DS8K_IP + '/lss/' + TEST_LSS_ID_2 + '/get': FAKE_GET_FB_LSS_RESPONSE_2, TEST_SOURCE_DS8K_IP + '/lss/' + TEST_LSS_ID_3 + '/get': FAKE_GET_FB_LSS_RESPONSE_3, TEST_TARGET_DS8K_IP + '/lss/' + TEST_LSS_ID_3 + '/get': FAKE_GET_FB_LSS_RESPONSE_3, TEST_SOURCE_DS8K_IP + '/lss/' + TEST_LCU_ID + '/get': FAKE_GET_CKD_LSS_RESPONSE, TEST_TARGET_DS8K_IP + '/lss/' + TEST_LCU_ID + '/get': FAKE_GET_CKD_LSS_RESPONSE, TEST_SOURCE_DS8K_IP + '/lss/fb/get': FAKE_GET_FB_LSS_RESPONSE_1, TEST_SOURCE_DS8K_IP + '/lss/ckd/get': FAKE_GET_CKD_LSS_RESPONSE, TEST_SOURCE_DS8K_IP + '/lss/post': FAKE_CREATE_LCU_RESPONSE, TEST_SOURCE_DS8K_IP + '/pools/' + TEST_POOL_ID_1 + '/get': FAKE_GET_POOL_RESPONSE_1, TEST_TARGET_DS8K_IP + '/pools/' + TEST_POOL_ID_1 + '/get': FAKE_GET_POOL_RESPONSE_1, TEST_SOURCE_DS8K_IP + '/pools/' + TEST_POOL_ID_2 + '/get': FAKE_GET_POOL_RESPONSE_2, TEST_TARGET_DS8K_IP + '/pools/' + TEST_POOL_ID_2 + '/get': FAKE_GET_POOL_RESPONSE_2, TEST_SOURCE_DS8K_IP + '/pools/' + TEST_ECKD_POOL_ID + '/get': FAKE_GET_ECKD_POOL_RESPONSE, TEST_TARGET_DS8K_IP + '/pools/' + TEST_ECKD_POOL_ID + '/get': FAKE_GET_ECKD_POOL_RESPONSE, TEST_SOURCE_DS8K_IP + '/tokens/post': FAKE_GET_TOKEN_RESPONSE, TEST_TARGET_DS8K_IP + '/tokens/post': FAKE_GET_TOKEN_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/pprcs/paths/' + TEST_PPRC_PATH_ID_1 + '/get': FAKE_GET_PPRC_PATH_RESPONSE_1, TEST_TARGET_DS8K_IP + '/cs/pprcs/paths/' + TEST_PPRC_PATH_ID_2 + '/get': FAKE_GET_PPRC_PATH_RESPONSE_2, TEST_SOURCE_DS8K_IP + '/cs/pprcs/paths/' + TEST_ECKD_PPRC_PATH_ID + '/get': FAKE_GET_ECKD_PPRC_PATH_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/pprcs/paths/get': FAKE_GET_PPRC_PATH_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/pprcs/get': FAKE_GET_PPRCS_RESPONSE, TEST_TARGET_DS8K_IP + '/cs/pprcs/get': FAKE_GET_PPRCS_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/pprcs/post': FAKE_FAILOVER_RESPONSE, TEST_TARGET_DS8K_IP + '/cs/pprcs/post': FAKE_FAILOVER_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/pprcs/delete/post': FAKE_DELETE_PPRC_PAIR_RESPONSE, TEST_TARGET_DS8K_IP + '/cs/pprcs/delete/post': FAKE_FAILBACK_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/pprcs/resume/post': FAKE_FAILBACK_RESPONSE, TEST_TARGET_DS8K_IP + '/cs/pprcs/resume/post': FAKE_FAILBACK_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/pprcs/pause/post': FAKE_PAUSE_RESPONSE, TEST_TARGET_DS8K_IP + '/cs/pprcs/pause/post': FAKE_PAUSE_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/flashcopies/post': FAKE_POST_FLASHCOPIES_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/flashcopies/' + TEST_VOLUME_ID + ":" + TEST_VOLUME_ID_2 + '/delete': FAKE_DELETE_FLASHCOPIES_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/flashcopies/unfreeze/post': FAKE_POST_UNFREEZE_FLASHCOPIES_RESPONSE, TEST_SOURCE_DS8K_IP + '/cs/pprcs/physical_links/get': FAKE_GET_PHYSICAL_LINKS_RESPONSE, TEST_SOURCE_DS8K_IP + '/host_ports/get': FAKE_GET_HOST_PORTS_RESPONSE, TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/mappings/post': FAKE_MAP_VOLUME_RESPONSE, TEST_SOURCE_DS8K_IP + '/ioports/get': FAKE_GET_IOPORT_RESPONSE, TEST_TARGET_DS8K_IP + '/ioports/get': FAKE_GET_IOPORT_RESPONSE, TEST_SOURCE_DS8K_IP + '/hosts/post': FAKE_CREATE_HOST_RESPONSE, TEST_SOURCE_DS8K_IP + '/host_ports/assign/post': FAKE_ASSIGN_HOST_PORT_RESPONSE, TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/mappings/get': FAKE_GET_MAPPINGS_RESPONSE, TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/mappings/' + TEST_LUN_ID + '/delete': FAKE_DELETE_MAPPINGS_RESPONSE, TEST_TARGET_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/mappings/' + TEST_LUN_ID + '/delete': FAKE_DELETE_MAPPINGS_RESPONSE, TEST_SOURCE_DS8K_IP + '/host_ports/' + TEST_SOURCE_WWPN_2 + '/delete': FAKE_DELETE_HOST_PORTS_RESPONSE, TEST_TARGET_DS8K_IP + '/host_ports/' + TEST_SOURCE_WWPN_2 + '/delete': FAKE_DELETE_HOST_PORTS_RESPONSE, TEST_SOURCE_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/delete': FAKE_DELETE_HOSTS_RESPONSE, TEST_TARGET_DS8K_IP + '/hosts%5Bid=' + TEST_HOST_ID + '%5D/delete': FAKE_DELETE_HOSTS_RESPONSE } class FakeDefaultRESTConnector(restclient.DefaultRESTConnector): """Fake the Default Connector.""" def connect(self): pass def send(self, method='', url='', headers=None, payload='', timeout=900): host = url.split('https://')[1].split(':8452')[0] endpoint = url.split('v1')[1].split('?')[0] start = url.index('type') if 'type=' in url else None if start: type_str = url[start:].split('&')[0].split('=')[1] else: type_str = '' request = host + endpoint + '/' + type_str + method.lower() return 200, json.dumps(FAKE_REST_API_RESPONSES[request]) class FakeRESTScheduler(restclient.RESTScheduler): """Fake REST Scheduler.""" def __init__(self, host, user, passw, connector_obj, verify=False): self.token = '' self.host = host self.port = '8452' self.user = user self.passw = passw self.connector = connector_obj or FakeDefaultRESTConnector(verify) self.connect() class FakeDS8KCommonHelper(helper.DS8KCommonHelper): """Fake IBM DS8K Helper.""" def __init__(self, conf, HTTPConnectorObject=None): self.conf = conf self._connector_obj = HTTPConnectorObject self._connection_type = self._get_value('connection_type') self._storage_pools = None self.backend = {} self.setup() self._existing_pool_ids = [TEST_POOL_ID_1, TEST_POOL_ID_2, TEST_ECKD_POOL_ID] def _get_value(self, key): value = getattr(self.conf, key, None) if not value and key not in self.OPTIONAL_PARAMS: value = self.conf.get(key) return value def _create_client(self): self._client = FakeRESTScheduler(self._get_value('san_ip'), self._get_value('san_login'), self._get_value('san_password'), None, True) self.backend['rest_version'] = self._get_version()['bundle'] class FakeDS8KECKDHelper(FakeDS8KCommonHelper, helper.DS8KECKDHelper): """Fake IBM DS8K ECKD Helper.""" pass class FakeDS8KReplSourceHelper(FakeDS8KCommonHelper, helper.DS8KReplicationSourceHelper): """Fake IBM DS8K Replication Target Helper.""" pass class FakeDS8KReplTargetHelper(FakeDS8KReplSourceHelper, helper.DS8KReplicationTargetHelper): """Fake IBM DS8K Replication Target Helper.""" pass class FakeDS8KReplTargetECKDHelper(FakeDS8KECKDHelper, helper.DS8KReplicationTargetECKDHelper): """Fake IBM DS8K Replication Target ECKD Helper.""" pass class FakeReplication(replication.Replication): """Fake Replication class.""" def __init__(self, source_helper, device): self._source_helper = source_helper if device.get('connection_type') == storage.XIV_CONNECTION_TYPE_FC: self._target_helper = FakeDS8KReplTargetHelper(device) else: self._target_helper = FakeDS8KReplTargetECKDHelper(device) self._mm_manager = replication.MetroMirrorManager(self._source_helper, self._target_helper) class FakeDS8KProxy(ds8kproxy.DS8KProxy): """Fake IBM DS8K Proxy Driver.""" def __init__(self, storage_info, logger, exception, driver=None, active_backend_id=None, HTTPConnectorObject=None, host=TEST_HOST_1): with mock.patch.object(proxy.IBMStorageProxy, '_get_safely_from_configuration') as get_conf: get_conf.side_effect = [{}, False] proxy.IBMStorageProxy.__init__(self, storage_info, logger, exception, driver, active_backend_id) self._helper = None self._replication = None self._connector_obj = HTTPConnectorObject self._replication_enabled = False self._active_backend_id = active_backend_id self.configuration = driver.configuration self.consisgroup_cache = {} self._host = host self.setup(None) def setup(self, context): connection_type = self.configuration.connection_type repl_devices = getattr(self.configuration, 'replication_device', None) if connection_type == storage.XIV_CONNECTION_TYPE_FC: if not repl_devices: self._helper = FakeDS8KCommonHelper(self.configuration, self._connector_obj) else: self._helper = FakeDS8KReplSourceHelper( self.configuration, self._connector_obj) else: self._helper = FakeDS8KECKDHelper(self.configuration, self._connector_obj) # set up replication target if repl_devices: self._do_replication_setup(repl_devices, self._helper) self._check_async_cloned_volumes() def _do_replication_setup(self, devices, src_helper): self._replication = FakeReplication(src_helper, devices[0]) if self._active_backend_id: self._replication.switch_source_and_target_client() else: self._replication.check_physical_links() self._replication_enabled = True @ddt.ddt class DS8KProxyTest(test.TestCase): """Test proxy for DS8K volume driver.""" VERSION = "2.0.0" def setUp(self): """Initialize IBM DS8K Driver.""" super(DS8KProxyTest, self).setUp() self.ctxt = context.get_admin_context() self.configuration = mock.Mock(conf.Configuration) self.configuration.connection_type = storage.XIV_CONNECTION_TYPE_FC self.configuration.chap = 'disabled' self.configuration.san_ip = TEST_SOURCE_DS8K_IP self.configuration.management_ips = '' self.configuration.san_login = 'fake' self.configuration.san_clustername = TEST_POOL_ID_1 self.configuration.san_password = 'fake' self.configuration.volume_backend_name = TEST_VOLUME_BACKEND_NAME self.configuration.ds8k_host_type = 'auto' self.configuration.reserved_percentage = 0 self.storage_info = mock.MagicMock() self.logger = mock.MagicMock() self.exception = mock.MagicMock() self.patch('eventlet.sleep') def _create_volume(self, **kwargs): properties = { 'host': TEST_HOST_2, 'size': 1, 'volume_type_id': self.vt['id'] } for p in properties.keys(): if p not in kwargs: kwargs[p] = properties[p] return testutils.create_volume(self.ctxt, **kwargs) def _create_snapshot(self, **kwargs): return testutils.create_snapshot(self.ctxt, **kwargs) def _create_group(self, **kwargs): return testutils.create_group(self.ctxt, **kwargs) def _create_group_snapshot(self, group_id, **kwargs): return testutils.create_group_snapshot(self.ctxt, group_id=group_id, **kwargs) def test_check_host_type(self): """host type should be a valid one.""" self.configuration.ds8k_host_type = 'fake_OS' self.assertRaises(exception.InvalidParameterValue, FakeDS8KCommonHelper, self.configuration, None) @ddt.data('25- 27-', '-25- 27', '25-27 122', '25, 26', '25-#28') def test_get_lss_ids_for_cg_1(self, lss_range_for_cg): """lss_range_for_cg should have the right format.""" self.configuration.lss_range_for_cg = lss_range_for_cg self.assertRaises(exception.InvalidParameterValue, FakeDS8KCommonHelper, self.configuration, None) def test_get_lss_ids_for_cg_2(self): """get value from lss_range_for_cg""" self.configuration.lss_range_for_cg = '25- 27 30 32 85-88 EF' cmn_helper = FakeDS8KCommonHelper(self.configuration, None) lss_ids = cmn_helper._get_lss_ids_for_cg() test_lss_ids = set(['25', '26', '27', '30', '32', '85', '86', '87', '88', 'EF']) self.assertEqual(test_lss_ids, lss_ids) @mock.patch.object(helper.DS8KCommonHelper, 'get_systems') def test_verify_version_of_8_0_1(self, mock_get_systems): """8.0.1 should not use this driver.""" mock_get_systems.return_value = { "id": TEST_SOURCE_SYSTEM_UNIT, "release": "8.0.1", "wwnn": TEST_SOURCE_WWNN, } self.assertRaises(exception.VolumeDriverException, FakeDS8KCommonHelper, self.configuration, None) @mock.patch.object(helper.DS8KCommonHelper, '_get_version') def test_verify_rest_version_for_5_7_fb(self, mock_get_version): """test the min version of REST for fb volume in 7.x.""" mock_get_version.return_value = { "bundle": "87.50.38.0" } self.assertRaises(exception.VolumeDriverException, FakeDS8KCommonHelper, self.configuration, None) @mock.patch.object(helper.DS8KCommonHelper, '_get_version') def test_verify_rest_version_for_5_8_fb(self, mock_get_version): """test the min version of REST for fb volume in 8.1.""" mock_get_version.return_value = { "bundle": "88.10.112.0" } FakeDS8KCommonHelper(self.configuration, None) @mock.patch.object(helper.DS8KECKDHelper, '_get_version') def test_verify_rest_version_for_5_7_eckd(self, mock_get_version): """test the min version of REST for eckd volume in 7.x.""" self.configuration.connection_type = ( storage.XIV_CONNECTION_TYPE_FC_ECKD) self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' self.configuration.ds8k_ssid_prefix = 'FF' self.configuration.san_clustername = TEST_ECKD_POOL_ID mock_get_version.return_value = { "bundle": "87.50.22.0" } self.assertRaises(exception.VolumeDriverException, FakeDS8KECKDHelper, self.configuration, None) @mock.patch.object(helper.DS8KECKDHelper, '_get_version') def test_verify_rest_version_for_5_8_eckd_1(self, mock_get_version): """test the min version of REST for eckd volume in 8.1.""" self.configuration.connection_type = ( storage.XIV_CONNECTION_TYPE_FC_ECKD) self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' self.configuration.ds8k_ssid_prefix = 'FF' self.configuration.san_clustername = TEST_ECKD_POOL_ID mock_get_version.return_value = { "bundle": "88.10.112.0" } self.assertRaises(exception.VolumeDriverException, FakeDS8KECKDHelper, self.configuration, None) @mock.patch.object(helper.DS8KECKDHelper, '_get_version') def test_verify_rest_version_for_5_8_eckd_2(self, mock_get_version): """test the min version of REST for eckd volume in 8.2.""" self.configuration.connection_type = ( storage.XIV_CONNECTION_TYPE_FC_ECKD) self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' self.configuration.ds8k_ssid_prefix = 'FF' self.configuration.san_clustername = TEST_ECKD_POOL_ID mock_get_version.return_value = { "bundle": "88.20.40.0" } self.assertRaises(exception.VolumeDriverException, FakeDS8KECKDHelper, self.configuration, None) def test_verify_pools_with_wrong_type(self): """pool should be set according to the connection type.""" self.configuration.san_clustername = TEST_POOLS_STR self.configuration.connection_type = ( storage.XIV_CONNECTION_TYPE_FC_ECKD) self.assertRaises(exception.InvalidParameterValue, FakeDS8KCommonHelper, self.configuration, None) def test_verify_pools_with_wrong_type_2(self): """set wrong connection type should raise exception.""" self.configuration.connection_type = 'fake_type' self.assertRaises(exception.InvalidParameterValue, FakeDS8KCommonHelper, self.configuration, None) def test_get_storage_information(self): """should get id, wwnn and release fields from system.""" cmn_helper = FakeDS8KCommonHelper(self.configuration, None) self.assertIn('storage_unit', cmn_helper.backend.keys()) self.assertIn('storage_wwnn', cmn_helper.backend.keys()) self.assertIn('storage_version', cmn_helper.backend.keys()) def test_update_stats(self): """verify the fields returned by _update_stats.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) expected_result = { "volume_backend_name": TEST_VOLUME_BACKEND_NAME, "serial_number": TEST_SOURCE_SYSTEM_UNIT, "extent_pools": TEST_POOL_ID_1, "vendor_name": 'IBM', "driver_version": 'IBM Storage (v2.0.0)', "storage_protocol": storage.XIV_CONNECTION_TYPE_FC, "total_capacity_gb": 10, "free_capacity_gb": 10, "reserved_percentage": 0, "consistent_group_snapshot_enabled": True, "group_replication_enabled": True, "consistent_group_replication_enabled": True, "multiattach": True, "backend_state": 'up' } self.driver._update_stats() self.assertDictEqual(expected_result, self.driver.meta['stat']) def test_update_stats_when_driver_initialize_failed(self): """update stats raises exception if driver initialized failed.""" with mock.patch(__name__ + '.FakeDS8KCommonHelper') as mock_helper: mock_helper.return_value = None self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) self.assertRaises(exception.CinderException, self.driver._update_stats) def test_update_stats_when_can_not_get_pools(self): """update stats raises exception if get pools failed.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) with mock.patch.object(helper.DS8KCommonHelper, 'get_pools') as mock_get_pools: mock_get_pools.return_value = [] stats = self.driver.get_volume_stats() self.assertEqual('down', stats['backend_state']) self.assertEqual('None', stats['extent_pools']) self.assertEqual(0, stats['total_capacity_gb']) self.assertEqual(0, stats['free_capacity_gb']) @mock.patch.object(helper.DS8KCommonHelper, 'get_pools') def test_get_volume_status(self, mock_get_pools): self.configuration.san_clustername = 'P0, P1' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) from collections import OrderedDict mock_get_pools.side_effect = [OrderedDict([('P0', {'node': 0, 'cap': 21474836480, 'capavail': 21474836480, 'name': 'pool1', 'stgtype': 'fb'}), ('P1', {'node': 1, 'cap': 21474836480, 'capavail': 21474836480, 'name': 'pool1', 'stgtype': 'fb'})]), OrderedDict([('P1', {'node': 1, 'cap': 21474836480, 'capavail': 21474836480, 'name': 'pool1', 'stgtype': 'fb'})])] self.driver.setup(self.ctxt) expected_result = { "volume_backend_name": TEST_VOLUME_BACKEND_NAME, "serial_number": TEST_SOURCE_SYSTEM_UNIT, "extent_pools": 'P1', "vendor_name": 'IBM', "driver_version": 'IBM Storage (v2.0.0)', "storage_protocol": storage.XIV_CONNECTION_TYPE_FC, "total_capacity_gb": 20, "free_capacity_gb": 20, "reserved_percentage": 0, "consistent_group_snapshot_enabled": True, "group_replication_enabled": True, "consistent_group_replication_enabled": True, "multiattach": True, "backend_state": 'up' } stats = self.driver.get_volume_stats() self.assertDictEqual(expected_result, stats) def test_find_pool_should_choose_biggest_pool(self): """create volume should choose biggest pool.""" self.configuration.san_clustername = TEST_POOLS_STR cmn_helper = FakeDS8KCommonHelper(self.configuration, None) pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set()) self.assertEqual(TEST_POOL_ID_1, pool_id) @mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss') def test_find_lss_when_lss_in_pprc_path(self, mock_get_all_lss): """find LSS when existing LSSs are in PPRC path.""" mock_get_all_lss.return_value = [{ "id": TEST_LSS_ID_1, "group": "0", "addrgrp": "0", "type": "fb", "configvols": "0" }] cmn_helper = FakeDS8KCommonHelper(self.configuration, None) pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set()) self.assertNotEqual(TEST_LSS_ID_1, lss_id) @mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss') def test_find_lss_when_existing_lss_available(self, mock_get_all_lss): """find LSS when existing LSSs are available.""" mock_get_all_lss.return_value = [{ "id": TEST_LSS_ID_2, "group": "0", "addrgrp": "0", "type": "fb", "configvols": "0" }] cmn_helper = FakeDS8KCommonHelper(self.configuration, None) pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set()) self.assertEqual(TEST_LSS_ID_2, lss_id) @mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss') def test_find_lss_should_choose_emptiest_one(self, mock_get_all_lss): """find LSS should choose the emptiest one.""" mock_get_all_lss.return_value = [ { "id": TEST_LSS_ID_1, "group": "0", "addrgrp": "0", "type": "fb", "configvols": "200" }, { "id": TEST_LSS_ID_2, "group": "0", "addrgrp": "0", "type": "fb", "configvols": "100" }, { "id": TEST_LSS_ID_3, "group": "0", "addrgrp": "0", "type": "fb", "configvols": "150" } ] cmn_helper = FakeDS8KCommonHelper(self.configuration, None) pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set()) self.assertEqual(TEST_LSS_ID_2, lss_id) @mock.patch.object(helper.DS8KCommonHelper, 'get_all_lss') @mock.patch.object(helper.DS8KCommonHelper, '_find_from_nonexistent_lss') def test_find_lss_when_no_existing_lss_available(self, mock_find_lss, mock_get_all_lss): """find LSS when no existing LSSs are available.""" mock_get_all_lss.return_value = [{ "id": TEST_LSS_ID_2, "group": "0", "addrgrp": "0", "type": "fb", "configvols": "256" }] cmn_helper = FakeDS8KCommonHelper(self.configuration, None) pool_id, lss_id = cmn_helper.find_pool_lss_pair(None, False, set()) self.assertTrue(mock_find_lss.called) @mock.patch.object(helper.DS8KCommonHelper, '_find_lss') def test_find_lss_when_all_lss_exhausted(self, mock_find_lss): """when all LSSs are exhausted should raise exception.""" cmn_helper = FakeDS8KCommonHelper(self.configuration, None) mock_find_lss.return_value = None self.assertRaises(restclient.LssIDExhaustError, cmn_helper.find_pool_lss_pair, None, False, None) def test_find_lss_for_volume_which_belongs_to_cg(self): """find lss for volume, which is in empty CG.""" self.configuration.lss_range_for_cg = '20-23' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) volume = self._create_volume(group_id=group.id) lun = ds8kproxy.Lun(volume) self.driver._create_lun_helper(lun) pid, lss = lun.pool_lss_pair['source'] self.assertIn(lss, ('20', '21', '22', '23')) def test_find_lss_for_volume_which_belongs_to_cg2(self): """find lss for volume, which is in CG having volumes.""" self.configuration.lss_range_for_cg = '20-23' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) location = str({'vol_hex_id': '2000'}) self._create_volume(group_id=group.id, provider_location=location) volume = self._create_volume(group_id=group.id) lun = ds8kproxy.Lun(volume) self.driver._create_lun_helper(lun) pid, lss = lun.pool_lss_pair['source'] self.assertEqual(lss, '20') def test_find_lss_for_volume_which_belongs_to_cg3(self): """find lss for volume, and other CGs have volumes.""" self.configuration.lss_range_for_cg = '20-23' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) volume = self._create_volume(group_id=group.id) group_type2 = group_types.create( self.ctxt, 'group2', {'consistent_group_snapshot_enabled': ' True'} ) group2 = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type2.id) location = str({'vol_hex_id': '2000'}) self._create_volume(group_id=group2.id, provider_location=location) lun = ds8kproxy.Lun(volume) self.driver._create_lun_helper(lun) pid, lss = lun.pool_lss_pair['source'] self.assertNotEqual(lss, '20') def test_find_lss_for_volume_which_belongs_to_cg4(self): """find lss for volume, and other CGs are in error state.""" self.configuration.lss_range_for_cg = '20' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) volume = self._create_volume(group_id=group.id) group_type2 = group_types.create( self.ctxt, 'group2', {'consistent_group_snapshot_enabled': ' True'} ) group2 = self._create_group(status='error', host=TEST_GROUP_HOST, group_type_id=group_type2.id) location = str({'vol_hex_id': '2000'}) self._create_volume(group_id=group2.id, provider_location=location) lun = ds8kproxy.Lun(volume) self.driver._create_lun_helper(lun) pid, lss = lun.pool_lss_pair['source'] # error group will be ignored, so LSS 20 can be used. self.assertEqual(lss, '20') @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') def test_create_volume_but_lss_full_afterwards(self, mock_create_lun): """create volume in a LSS which is full afterwards.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) volume = self._create_volume(volume_type_id=vol_type.id) mock_create_lun.side_effect = [ restclient.LssFullException('LSS is full.'), TEST_VOLUME_ID] vol = self.driver.create_volume(volume) self.assertEqual( TEST_VOLUME_ID, ast.literal_eval(vol['provider_location'])['vol_hex_id']) @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') def test_create_volume_for_cg_but_lss_full(self, mock_create_lun): """Just reserve one LSS for CG.""" self.configuration.lss_range_for_cg = '22' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) volume = self._create_volume(volume_type_id=vol_type.id, group_id=group.id) mock_create_lun.side_effect = [ restclient.LssFullException('LSS is full.'), TEST_VOLUME_ID] self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, volume) def test_create_volume_of_FB512(self): """create volume which type is FB 512.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) volume = self._create_volume(volume_type_id=vol_type.id) vol = self.driver.create_volume(volume) self.assertEqual('FB 512', vol['metadata']['data_type']) self.assertEqual(TEST_VOLUME_ID, vol['metadata']['vol_hex_id']) self.assertEqual( TEST_VOLUME_ID, ast.literal_eval(vol['provider_location'])['vol_hex_id']) def test_create_volume_of_OS400_050(self): """create volume which type is OS400 050.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) extra_spec = {'drivers:os400': '050'} vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) volume = self._create_volume(volume_type_id=vol_type.id) vol = self.driver.create_volume(volume) self.assertEqual( TEST_VOLUME_ID, ast.literal_eval(vol['provider_location'])['vol_hex_id']) self.assertEqual('050 FB 520UV', vol['metadata']['data_type']) def test_create_volume_when_specify_area(self): """create volume and put it in specific pool and lss.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', { 'drivers:storage_pool_ids': TEST_POOL_ID_1, 'drivers:storage_lss_ids': TEST_LSS_ID_1 }) volume = self._create_volume(volume_type_id=vol_type.id) lun = ds8kproxy.Lun(volume) pool, lss = self.driver._find_pool_lss_pair_from_spec(lun, set()) self.assertEqual(TEST_POOL_ID_1, pool) self.assertEqual(TEST_LSS_ID_1, lss) def test_create_volume_only_specify_lss(self): """create volume and put it in specific lss.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', { 'drivers:storage_lss_ids': TEST_LSS_ID_1 }) volume = self._create_volume(volume_type_id=vol_type.id) lun = ds8kproxy.Lun(volume) pool, lss = self.driver._find_pool_lss_pair_from_spec(lun, set()) # if not specify pool, choose pools set in configuration file. self.assertIn(pool, self.configuration.san_clustername.split(',')) self.assertEqual(TEST_LSS_ID_1, lss) def test_create_volume_only_specify_pool(self): """create volume and put it in specific pool.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', { 'drivers:storage_pool_ids': TEST_POOL_ID_1 }) volume = self._create_volume(volume_type_id=vol_type.id) lun = ds8kproxy.Lun(volume) pool, lss = self.driver._find_pool_lss_pair_from_spec(lun, set()) self.assertEqual(TEST_POOL_ID_1, pool) def test_create_volume_but_specify_wrong_lss_id(self): """create volume, but specify a wrong lss id.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', { 'drivers:storage_pool_ids': TEST_POOL_ID_1, 'drivers:storage_lss_ids': '100' }) volume = self._create_volume(volume_type_id=vol_type.id) lun = ds8kproxy.Lun(volume) self.assertRaises(exception.InvalidParameterValue, self.driver._find_pool_lss_pair_from_spec, lun, set()) @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') def test_create_eckd_volume(self, mock_create_lun): """create volume which type is ECKD.""" self.configuration.connection_type = ( storage.XIV_CONNECTION_TYPE_FC_ECKD) self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' self.configuration.ds8k_ssid_prefix = 'FF' self.configuration.san_clustername = TEST_ECKD_POOL_ID self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) extra_spec = {'drivers:thin_provision': 'False'} vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) volume = self._create_volume(volume_type_id=vol_type.id) mock_create_lun.return_value = TEST_ECKD_VOLUME_ID vol = self.driver.create_volume(volume) location = ast.literal_eval(vol['provider_location']) self.assertEqual('3390', vol['metadata']['data_type']) self.assertEqual(TEST_ECKD_VOLUME_ID, vol['metadata']['vol_hex_id']) self.assertEqual(TEST_ECKD_VOLUME_ID, location['vol_hex_id']) @mock.patch.object(helper.DS8KCommonHelper, 'get_physical_links') def test_check_physical_links(self, mock_get_physical_links): """check physical links when user do not connect DS8K.""" src_helper = FakeDS8KCommonHelper(self.configuration, None) repl = FakeReplication(src_helper, TEST_REPLICATION_DEVICE) mock_get_physical_links.return_value = None self.assertRaises(exception.CinderException, repl.check_physical_links) @mock.patch.object(helper.DS8KCommonHelper, 'get_physical_links') def test_check_physical_links2(self, mock_get_physical_links): """check physical links if more than eight physical links.""" src_helper = FakeDS8KCommonHelper(self.configuration, None) repl = FakeReplication(src_helper, TEST_REPLICATION_DEVICE) mock_get_physical_links.return_value = [ {"source_port_id": 'I0001', "target_port_id": 'I0001'}, {"source_port_id": 'I0002', "target_port_id": 'I0002'}, {"source_port_id": 'I0003', "target_port_id": 'I0003'}, {"source_port_id": 'I0004', "target_port_id": 'I0004'}, {"source_port_id": 'I0005', "target_port_id": 'I0005'}, {"source_port_id": 'I0006', "target_port_id": 'I0006'}, {"source_port_id": 'I0007', "target_port_id": 'I0007'}, {"source_port_id": 'I0008', "target_port_id": 'I0008'}, {"source_port_id": 'I0009', "target_port_id": 'I0009'} ] repl.check_physical_links() port_pairs = repl._target_helper.backend['port_pairs'] self.assertEqual(8, len(port_pairs)) def test_check_physical_links3(self): """check physical links when user set them in configure file.""" src_helper = FakeDS8KCommonHelper(self.configuration, None) device = TEST_REPLICATION_DEVICE.copy() device['port_pairs'] = TEST_SOURCE_IOPORT + '-' + TEST_TARGET_IOPORT repl = FakeReplication(src_helper, device) expected_port_pairs = [ {'source_port_id': TEST_SOURCE_IOPORT, 'target_port_id': TEST_TARGET_IOPORT} ] repl.check_physical_links() self.assertEqual(expected_port_pairs, repl._target_helper.backend['port_pairs']) @mock.patch.object(proxy.IBMStorageProxy, '__init__') def test_do_replication_setup(self, mock_init): """driver supports only one replication target.""" replication_device = ['fake_device_1', 'fake_device_2'] ds8k_proxy = ds8kproxy.DS8KProxy(self.storage_info, self.logger, self.exception, self) self.assertRaises(exception.InvalidParameterValue, ds8k_proxy._do_replication_setup, replication_device, None) @mock.patch.object(proxy.IBMStorageProxy, '__init__') @mock.patch.object(replication, 'Replication') @mock.patch.object(replication.Replication, 'switch_source_and_target_client') def test_switch_backend_connection(self, mock_switch_connection, mock_replication, mock_proxy_init): """driver should switch connection if it has been failed over.""" ds8k_proxy = ds8kproxy.DS8KProxy(self.storage_info, self.logger, self.exception, self, TEST_TARGET_DS8K_IP) src_helper = FakeDS8KCommonHelper(self.configuration, None) mock_replication.return_value = FakeReplication( src_helper, TEST_REPLICATION_DEVICE) ds8k_proxy._do_replication_setup( [TEST_REPLICATION_DEVICE], src_helper) self.assertTrue(mock_switch_connection.called) def test_find_lcu_for_eckd_replicated_volume(self): """find LCU for eckd replicated volume when pprc path is available.""" self.configuration.connection_type = ( storage.XIV_CONNECTION_TYPE_FC_ECKD) self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' self.configuration.ds8k_ssid_prefix = 'FF' self.configuration.san_clustername = TEST_ECKD_POOL_ID src_helper = FakeDS8KECKDHelper(self.configuration, None) device = TEST_REPLICATION_DEVICE.copy() device['connection_type'] = storage.XIV_CONNECTION_TYPE_FC_ECKD device['ds8k_devadd_unitadd_mapping'] = 'A4-10' device['ds8k_ssid_prefix'] = 'FF' device['san_clustername'] = TEST_ECKD_POOL_ID repl = FakeReplication(src_helper, device) repl.check_physical_links() pool_lss_pair = repl.find_pool_lss_pair(None) expected_pair = {'source': (TEST_ECKD_POOL_ID, TEST_LCU_ID), 'target': (TEST_ECKD_POOL_ID, TEST_LCU_ID)} self.assertDictEqual(expected_pair, pool_lss_pair) @mock.patch.object(eventlet, 'sleep') def test_create_fb_replicated_volume(self, mock_sleep): """create FB volume when enable replication.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) extra_spec = {'replication_enabled': ' True'} vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) volume = self._create_volume(volume_type_id=vol_type.id) vol = self.driver.create_volume(volume) self.assertEqual( TEST_VOLUME_ID, ast.literal_eval(vol['provider_location'])['vol_hex_id']) repl = eval(vol['metadata']['replication']) self.assertEqual(TEST_VOLUME_ID, repl[TEST_TARGET_DS8K_IP]['vol_hex_id']) @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_paths') @mock.patch.object(replication.MetroMirrorManager, 'create_pprc_path') @mock.patch.object(eventlet, 'sleep') def test_create_fb_replicated_vol_but_no_path_available(self, mock_sleep, create_pprc_path, get_pprc_paths): """create replicated volume but no pprc paths are available.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) extra_spec = {'replication_enabled': ' True'} vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) volume = self._create_volume(volume_type_id=vol_type.id) get_pprc_paths.return_value = [ { 'source_lss_id': TEST_LSS_ID_1, 'target_lss_id': TEST_LSS_ID_1, 'port_pairs': [ { 'source_port_id': TEST_SOURCE_IOPORT, 'target_port_id': TEST_TARGET_IOPORT, 'state': 'failed' } ], 'target_system_wwnn': TEST_TARGET_WWNN } ] self.driver.create_volume(volume) self.assertTrue(create_pprc_path.called) @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_paths') @mock.patch.object(eventlet, 'sleep') def test_create_fb_replicated_vol_and_verify_lss_in_path( self, mock_sleep, get_pprc_paths): """create replicated volume should verify the LSS in pprc paths.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) extra_spec = {'replication_enabled': ' True'} vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) volume = self._create_volume(volume_type_id=vol_type.id) get_pprc_paths.return_value = [ { 'source_lss_id': TEST_LSS_ID_1, 'target_lss_id': TEST_LSS_ID_1, 'port_pairs': [ { 'source_port_id': TEST_SOURCE_IOPORT, 'target_port_id': TEST_TARGET_IOPORT, 'state': 'success' } ], 'target_system_wwnn': TEST_TARGET_WWNN }, { 'source_lss_id': TEST_LSS_ID_2, 'target_lss_id': TEST_LSS_ID_2, 'port_pairs': [ { 'source_port_id': TEST_SOURCE_IOPORT, 'target_port_id': TEST_TARGET_IOPORT, 'state': 'success' } ], 'target_system_wwnn': TEST_TARGET_WWNN } ] vol = self.driver.create_volume(volume) # locate the volume in pprc path which LSS matches the pool. self.assertEqual( TEST_LSS_ID_1, ast.literal_eval(vol['provider_location'])['vol_hex_id'][:2]) repl = eval(vol['metadata']['replication']) self.assertEqual(TEST_LSS_ID_1, repl[TEST_TARGET_DS8K_IP]['vol_hex_id'][:2]) @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_paths') @mock.patch.object(eventlet, 'sleep') def test_create_fb_replicated_vol_when_paths_available( self, mock_sleep, get_pprc_paths): """create replicated volume when multiple pprc paths are available.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) extra_spec = {'replication_enabled': ' True'} vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) volume = self._create_volume(volume_type_id=vol_type.id) get_pprc_paths.return_value = [ { 'source_lss_id': TEST_LSS_ID_1, 'target_lss_id': TEST_LSS_ID_1, 'port_pairs': [ { 'source_port_id': TEST_SOURCE_IOPORT, 'target_port_id': TEST_TARGET_IOPORT, 'state': 'success' } ], 'target_system_wwnn': TEST_TARGET_WWNN }, { 'source_lss_id': TEST_LSS_ID_3, 'target_lss_id': TEST_LSS_ID_3, 'port_pairs': [ { 'source_port_id': TEST_SOURCE_IOPORT, 'target_port_id': TEST_TARGET_IOPORT, 'state': 'success' } ], 'target_system_wwnn': TEST_TARGET_WWNN } ] vol = self.driver.create_volume(volume) # locate the volume in pprc path which has emptest LSS. self.assertEqual( TEST_LSS_ID_1, ast.literal_eval(vol['provider_location'])['vol_hex_id'][:2]) repl = eval(vol['metadata']['replication']) self.assertEqual(TEST_LSS_ID_1, repl[TEST_TARGET_DS8K_IP]['vol_hex_id'][:2]) @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') @mock.patch.object(eventlet, 'sleep') def test_create_replicated_vol_but_lss_full_afterwards( self, mock_sleep, create_lun): """create replicated volume but lss is full afterwards.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) create_lun.side_effect = [ restclient.LssFullException('LSS is full.'), TEST_VOLUME_ID, TEST_VOLUME_ID ] extra_spec = {'replication_enabled': ' True'} vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) volume = self._create_volume(volume_type_id=vol_type.id) with mock.patch.object(replication.MetroMirrorManager, '_is_pprc_paths_healthy') as check_pprc_path: check_pprc_path.return_value = replication.PPRC_PATH_HEALTHY vol = self.driver.create_volume(volume) self.assertEqual( TEST_VOLUME_ID, ast.literal_eval(vol['provider_location'])['vol_hex_id']) repl = eval(vol['metadata']['replication']) self.assertEqual(TEST_VOLUME_ID, repl[TEST_TARGET_DS8K_IP]['vol_hex_id']) @mock.patch.object(helper.DS8KCommonHelper, '_delete_lun') def test_delete_volume(self, mock_delete_lun): """delete volume successfully.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) self.driver.delete_volume(volume) self.assertTrue(mock_delete_lun.called) @mock.patch.object(helper.DS8KCommonHelper, '_delete_lun') def test_delete_volume_return_if_no_volume_id(self, mock_delete_lun): """should not try to delete volume if the volume id is None.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) volume = self._create_volume() self.driver.delete_volume(volume) self.assertFalse(mock_delete_lun.called) @mock.patch.object(helper.DS8KCommonHelper, 'lun_exists') @mock.patch.object(helper.DS8KCommonHelper, '_delete_lun') def test_delete_volume_return_if_volume_not_exist(self, mock_delete_lun, mock_lun_exists): """should not delete volume if the volume doesn't exist.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) mock_lun_exists.return_value = False self.driver.delete_volume(volume) self.assertFalse(mock_delete_lun.called) @mock.patch.object(helper.DS8KCommonHelper, 'delete_lun_by_id') @mock.patch.object(helper.DS8KCommonHelper, 'delete_lun') def test_delete_fb_replicated_volume(self, mock_delete_lun, mock_delete_lun_by_id): """Delete volume when enable replication.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) extra_spec = {'replication_enabled': ' True'} vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) self.driver.delete_volume(volume) self.assertTrue(mock_delete_lun_by_id.called) self.assertTrue(mock_delete_lun.called) @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_async_clone_volume(self, mock_get_flashcopy): """clone the volume asynchronously.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) src_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) location = str({'vol_hex_id': None}) metadata = [{'key': 'async_clone', 'value': True}] tgt_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location, volume_metadata=metadata) self.mock_object(eventlet, 'spawn') mock_get_flashcopy.return_value = [TEST_FLASHCOPY] volume_update = self.driver.create_cloned_volume(tgt_vol, src_vol) self.assertEqual( TEST_VOLUME_ID, ast.literal_eval(volume_update['provider_location'])['vol_hex_id']) self.assertEqual('started', volume_update['metadata']['flashcopy']) eventlet.spawn.assert_called() def test_check_async_cloned_volumes_when_initialize_driver(self): """initialize driver should check volumes cloned asynchronously.""" vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) src_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) location = str({'vol_hex_id': TEST_VOLUME_ID_2}) metadata = [{'key': 'flashcopy', 'value': 'started'}] self._create_volume(volume_type_id=vol_type.id, source_volid=src_vol.id, provider_location=location, volume_metadata=metadata) self.mock_object(eventlet, 'spawn') self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) eventlet.spawn.assert_called() @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_wait_flashcopy_when_async_clone_volume( self, mock_get_flashcopy, mock_sleep): """clone volume asynchronously when flashcopy failed.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) src_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) location = str({'vol_hex_id': TEST_VOLUME_ID_2}) metadata = [{'key': 'async_clone', 'value': True}] tgt_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location, volume_metadata=metadata) src_lun = ds8kproxy.Lun(src_vol) tgt_lun = ds8kproxy.Lun(tgt_vol) mock_get_flashcopy.side_effect = ( restclient.APIException('flashcopy fails.')) self.driver._wait_flashcopy([src_lun], [tgt_lun]) self.assertEqual('error', tgt_lun.status) self.assertEqual('error', tgt_vol.metadata['flashcopy']) self.assertEqual('error', tgt_vol.status) self.assertIsNotNone(tgt_vol.metadata.get('error_msg')) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_wait_flashcopy_when_async_clone_volume_2( self, mock_get_flashcopy, mock_sleep): """clone volume asynchronously when flashcopy successed.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) src_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) location = str({'vol_hex_id': TEST_VOLUME_ID_2}) metadata = [{'key': 'async_clone', 'value': True}] tgt_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location, volume_metadata=metadata) src_lun = ds8kproxy.Lun(src_vol) tgt_lun = ds8kproxy.Lun(tgt_vol) mock_get_flashcopy.return_value = {} self.driver._wait_flashcopy([src_lun], [tgt_lun]) self.assertEqual('available', tgt_lun.status) self.assertEqual('success', tgt_vol.metadata['flashcopy']) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_create_cloned_volume(self, mock_get_flashcopy, mock_sleep): """clone the volume successfully.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) src_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) location = str({'vol_hex_id': None}) tgt_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] volume_update = self.driver.create_cloned_volume(tgt_vol, src_vol) self.assertEqual( TEST_VOLUME_ID, ast.literal_eval(volume_update['provider_location'])['vol_hex_id']) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') @mock.patch.object(helper.DS8KCommonHelper, 'change_lun') def test_create_cloned_volume2(self, mock_change_lun, mock_get_flashcopy, mock_sleep): """clone from source volume to a bigger target volume.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) src_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) location = str({'vol_hex_id': None}) tgt_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location, size=2) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] self.driver.create_cloned_volume(tgt_vol, src_vol) self.assertTrue(mock_change_lun.called) def test_create_cloned_volume3(self): """clone source volume which should be smaller than target volume.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) src_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location, size=2) location = str({'vol_hex_id': None}) tgt_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) self.assertRaises(exception.VolumeDriverException, self.driver.create_cloned_volume, tgt_vol, src_vol) @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_create_cloned_volume4(self, mock_get_flashcopy): """clone a volume which should not be a target in flashcopy.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) src_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) location = str({'vol_hex_id': None}) tgt_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) flashcopy_relationship = copy.deepcopy(TEST_FLASHCOPY) flashcopy_relationship['targetvolume']['id'] = TEST_VOLUME_ID mock_get_flashcopy.return_value = [flashcopy_relationship] self.assertRaises(restclient.APIException, self.driver.create_cloned_volume, tgt_vol, src_vol) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') @mock.patch.object(helper.DS8KCommonHelper, 'lun_exists') @mock.patch.object(helper.DS8KCommonHelper, 'create_lun') def test_create_cloned_volume5(self, mock_create_lun, mock_lun_exists, mock_get_flashcopy, mock_sleep): """clone a volume when target has volume ID but it is nonexistent.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) src_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location) location = str({'vol_hex_id': '0003'}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] tgt_vol = self._create_volume(volume_type_id=vol_type.id, provider_location=location, volume_metadata=metadata) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] mock_lun_exists.return_value = False self.driver.create_cloned_volume(tgt_vol, src_vol) self.assertTrue(mock_create_lun.called) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_create_volume_from_snapshot(self, mock_get_flashcopy, mock_sleep): """create volume from snapshot.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) volume = self._create_volume(volume_type_id=vol_type.id) location = str({'vol_hex_id': '0002'}) snap = self._create_snapshot(volume_id=volume.id, volume_type_id=vol_type.id, provider_location=location) vol = self._create_volume(volume_type_id=vol_type.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] volume_update = self.driver.create_volume_from_snapshot(vol, snap) self.assertEqual( TEST_VOLUME_ID, ast.literal_eval(volume_update['provider_location'])['vol_hex_id']) def test_extend_volume(self): """extend unreplicated volume.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) self.driver.extend_volume(volume, 2) @mock.patch.object(eventlet, 'sleep') def test_extend_replicated_volume(self, mock_sleep): """extend replicated volume.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) self.driver.extend_volume(volume, 2) def test_extend_replicated_volume_that_has_been_failed_over(self): """extend replicated volume which has been failed over should fail.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self, TEST_TARGET_DS8K_IP) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) self.assertRaises(exception.CinderException, self.driver.extend_volume, volume, 2) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_create_snapshot(self, mock_get_flashcopy, mock_sleep): """test a successful creation of snapshot.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': '0002'}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) snapshot = self._create_snapshot(volume_id=volume.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] snapshot_update = self.driver.create_snapshot(snapshot) location = ast.literal_eval(snapshot_update['provider_location']) self.assertEqual(TEST_VOLUME_ID, location['vol_hex_id']) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_retype_from_thick_to_thin(self, mock_get_flashcopy, mock_sleep): """retype from thick-provision to thin-provision.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': {'drivers:thin_provision': ('False', 'True')} } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'drivers:thin_provision': 'False'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) self.assertTrue(retyped) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_retype_from_thin_to_thick(self, mock_get_flashcopy, mock_sleep): """retype from thin-provision to thick-provision.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': {'drivers:thin_provision': ('True', 'False')} } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'drivers:thin_provision': 'True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) self.assertTrue(retyped) @mock.patch.object(eventlet, 'sleep') def test_retype_from_unreplicated_to_replicated(self, mock_sleep): """retype from unreplicated to replicated.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'replication_enabled': (' False', ' True') } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' False'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata) retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) self.assertTrue(retyped) @mock.patch.object(eventlet, 'sleep') def test_retype_from_replicated_to_unreplicated(self, mock_sleep): """retype from replicated to unreplicated.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'replication_enabled': (' True', ' False') } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata) retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) self.assertTrue(retyped) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_retype_from_thin_to_thick_and_replicated(self, mock_get_flashcopy, mock_sleep): """retype from thin-provision to thick-provision and replicated.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'drivers:thin_provision': ('True', 'False'), 'replication_enabled': (' False', ' True') } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) self.assertTrue(retyped) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_retype_thin_replicated_vol_to_thick_vol(self, mock_get_flashcopy, mock_sleep): """retype from thin-provision and replicated to thick-provision.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'drivers:thin_provision': ('True', 'False'), 'replication_enabled': (' True', ' False') } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) self.assertTrue(retyped) @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') @mock.patch.object(eventlet, 'sleep') def test_retype_replicated_volume_from_thin_to_thick(self, mock_sleep, mock_get_flashcopy): """retype replicated volume from thin-provision to thick-provision.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'drivers:thin_provision': ('True', 'False'), 'replication_enabled': (' True', ' True') } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) self.assertTrue(retyped) @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') @mock.patch.object(helper.DS8KCommonHelper, 'get_lun_pool') @mock.patch.object(eventlet, 'sleep') def test_retype_thin_vol_to_thick_vol_in_specific_area( self, mock_sleep, mock_get_lun_pool, mock_get_flashcopy): """retype thin volume to thick volume located in specific area.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'drivers:thin_provision': ('True', 'False'), 'drivers:storage_pool_ids': (None, TEST_POOL_ID_1), 'drivers:storage_lss_ids': (None, TEST_LSS_ID_1) } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'drivers:thin_provision': 'False'}) location = str({'vol_hex_id': '0400'}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] mock_get_lun_pool.return_value = {'id': TEST_POOL_ID_1} retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) location = ast.literal_eval(retype_model_update['provider_location']) self.assertEqual(TEST_LSS_ID_1, location['vol_hex_id'][:2]) self.assertTrue(retyped) @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') @mock.patch.object(helper.DS8KCommonHelper, 'get_lun_pool') @mock.patch.object(eventlet, 'sleep') def test_retype_replicated_vol_to_vol_in_specific_area( self, mock_sleep, mock_get_lun_pool, mock_get_flashcopy): """retype replicated volume to a specific area.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'replication_enabled': (' True', ' True'), 'drivers:storage_pool_ids': (None, TEST_POOL_ID_1), 'drivers:storage_lss_ids': (None, TEST_LSS_ID_1) } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': '0400'}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] mock_get_lun_pool.return_value = {'id': TEST_POOL_ID_1} retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) location = ast.literal_eval(retype_model_update['provider_location']) self.assertEqual(TEST_LSS_ID_1, location['vol_hex_id'][:2]) self.assertTrue(retyped) def test_retype_vol_in_specific_area_to_another_area(self): """retype volume from a specific area to another area.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'drivers:storage_pool_ids': (TEST_POOL_ID_1, TEST_POOL_ID_2), 'drivers:storage_lss_ids': (TEST_LSS_ID_1, TEST_LSS_ID_2) } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', { 'drivers:storage_pool_ids': TEST_POOL_ID_1, 'drivers:storage_lss_ids': TEST_LSS_ID_1}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) self.assertRaises(exception.VolumeDriverException, self.driver.retype, self.ctxt, volume, new_type, diff, host) def test_retype_vol_from_non_multiattch_to_multiattch(self): """retype volume from a non multiattach to multiattach.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'multiattach': (' False', ' True') } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'multiattach': ' False'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, volume_metadata=metadata) retyped, retype_model_update = self.driver.retype(self.ctxt, volume, new_type, diff, host) self.assertTrue(retype_model_update['multiattach']) self.assertTrue(retyped) def test_retype_vol_from_multiattch_to_non_multiattch(self): """retype volume from a multiattach to non multiattach.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'multiattach': (' True', ' False') } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'multiattach': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, volume_metadata=metadata) retyped, retype_model_update = self.driver.retype(self.ctxt, volume, new_type, diff, host) self.assertFalse(retype_model_update['multiattach']) self.assertTrue(retyped) @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_retype_vol_from_non_multiattach_to_multiattach_and_replicated( self, mock_get_flashcopy): """retype from non multiattach to multiattach and replicated.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'multiattach': (' False', ' True'), 'replication_enabled': (' False', ' True') } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) self.assertTrue(retype_model_update['multiattach']) self.assertTrue(retyped) @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') @mock.patch.object(helper.DS8KCommonHelper, 'get_lun_pool') def test_retype_non_multiattach_vol_to_multiattach_vol_in_specific_area( self, mock_get_lun_pool, mock_get_flashcopy): self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) new_type = {} diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': { 'multiattach': (' False', ' True'), 'drivers:storage_pool_ids': (None, TEST_POOL_ID_1), 'drivers:storage_lss_ids': (None, TEST_LSS_ID_1) } } host = None vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'multiattach': ' False'}) location = str({'vol_hex_id': '0400'}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] mock_get_lun_pool.return_value = {'id': TEST_POOL_ID_1} retyped, retype_model_update = self.driver.retype( self.ctxt, volume, new_type, diff, host) location = ast.literal_eval(retype_model_update['provider_location']) self.assertEqual(TEST_LSS_ID_1, location['vol_hex_id'][:2]) self.assertTrue(retype_model_update['multiattach']) self.assertTrue(retyped) def test_migrate_replicated_volume(self): """migrate replicated volume should be failed.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) self.driver._update_stats() vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) backend = { 'host': 'host@backend#pool_id', 'capabilities': { 'extent_pools': TEST_POOL_ID_1, 'serial_number': TEST_SOURCE_SYSTEM_UNIT, 'vendor_name': 'IBM', 'storage_protocol': 'fibre_channel' } } self.assertRaises(exception.VolumeDriverException, self.driver.migrate_volume, self.ctxt, volume, backend) def test_migrate_and_try_pools_in_same_rank(self): """migrate volume and try pool in same rank.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) self.driver._update_stats() vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) backend = { 'host': 'host@backend#pool_id', 'capabilities': { 'extent_pools': TEST_POOL_ID_1, 'serial_number': TEST_SOURCE_SYSTEM_UNIT, 'vendor_name': 'IBM', 'storage_protocol': 'fibre_channel' } } moved, model_update = self.driver.migrate_volume( self.ctxt, volume, backend) self.assertTrue(moved) @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') @mock.patch.object(eventlet, 'sleep') def test_migrate_and_try_pools_in_opposite_rank(self, mock_sleep, mock_get_flashcopy): """migrate volume and try pool in opposite rank.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) self.driver._update_stats() vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) backend = { 'host': 'host@backend#pool_id', 'capabilities': { 'extent_pools': TEST_POOL_ID_2, 'serial_number': TEST_SOURCE_SYSTEM_UNIT, 'vendor_name': 'IBM', 'storage_protocol': 'fibre_channel' } } mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] with mock.patch.object(helper.DS8KCommonHelper, '_get_pool') as get_pool: get_pool.return_value = FAKE_GET_POOL_RESPONSE_2['data'][ 'pools'][0] moved, model_update = self.driver.migrate_volume( self.ctxt, volume, backend) self.assertTrue(moved) def test_initialize_connection_of_fb_volume(self): """attach a FB volume to host.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) map_data = self.driver.initialize_connection(volume, TEST_CONNECTOR) self.assertEqual(int(TEST_LUN_ID), map_data['data']['target_lun']) self.assertEqual(sorted(list( map_data['data']['initiator_target_map'].keys()), key=str.lower), [TEST_SOURCE_WWPN_1, TEST_SOURCE_WWPN_2]) def test_initialize_connection_of_eckd_volume(self): """attach a ECKD volume to host.""" self.configuration.connection_type = ( storage.XIV_CONNECTION_TYPE_FC_ECKD) self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' self.configuration.ds8k_ssid_prefix = 'FF' self.configuration.san_clustername = TEST_ECKD_POOL_ID self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_ECKD_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) map_data = self.driver.initialize_connection(volume, {}) self.assertEqual(int('C4', 16), map_data['data']['cula']) self.assertEqual(int(TEST_ECKD_VOLUME_ID[2:4], 16), map_data['data']['unit_address']) @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') def test_initialize_connection_when_no_existing_host(self, mock_get_host_ports): """attach volume to host which has not been created.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) host_ports = [ { "wwpn": TEST_SOURCE_WWPN_1, "state": "unconfigured", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": '' } ] mock_get_host_ports.side_effect = [host_ports] map_data = self.driver.initialize_connection(volume, TEST_CONNECTOR) self.assertEqual(int(TEST_LUN_ID), map_data['data']['target_lun']) @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') def test_initialize_connection_with_multiple_hosts(self, mock_get_host_ports): """attach volume to multiple hosts.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) host_ports = [ { "wwpn": TEST_SOURCE_WWPN_1, "state": "logged in", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": 'H1' }, { "wwpn": TEST_SOURCE_WWPN_1, "state": "logged in", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": 'H2' } ] mock_get_host_ports.side_effect = [host_ports] self.assertRaises(restclient.APIException, self.driver.initialize_connection, volume, TEST_CONNECTOR) def test_terminate_connection_of_fb_volume(self): """detach a FB volume from host.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) self.driver.terminate_connection(volume, TEST_CONNECTOR) def test_terminate_connection_of_eckd_volume(self): """attach a ECKD volume to host.""" self.configuration.connection_type = ( storage.XIV_CONNECTION_TYPE_FC_ECKD) self.configuration.ds8k_devadd_unitadd_mapping = 'C4-10' self.configuration.ds8k_ssid_prefix = 'FF' self.configuration.san_clustername = TEST_ECKD_POOL_ID self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_ECKD_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) unmap_data = self.driver.terminate_connection(volume, {}) self.assertIsNone(unmap_data) @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') def test_terminate_connection_with_multiple_hosts(self, mock_get_host_ports): """detach volume from multiple hosts.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) host_ports = [ { "wwpn": TEST_SOURCE_WWPN_1, "state": "logged in", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": 'H1' }, { "wwpn": TEST_SOURCE_WWPN_1, "state": "logged in", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": 'H2' } ] mock_get_host_ports.side_effect = [host_ports] self.assertRaises(restclient.APIException, self.driver.terminate_connection, volume, TEST_CONNECTOR) @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') def test_terminate_connection_but_can_not_find_host(self, mock_get_host_ports): """detach volume but can not find host.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) host_ports = [ { "wwpn": TEST_SOURCE_WWPN_1, "state": "unconfigured", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": '' } ] mock_get_host_ports.side_effect = [host_ports] self.driver.terminate_connection(volume, TEST_CONNECTOR) @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') @mock.patch.object(helper.DS8KCommonHelper, '_get_mappings') def test_terminate_connection_and_remove_host(self, mock_get_mappings, mock_get_host_ports): """detach volume and remove host in DS8K.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) host_ports = [ { "wwpn": TEST_SOURCE_WWPN_1, "state": "logged in", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": TEST_HOST_ID }, { "wwpn": TEST_SOURCE_WWPN_2, "state": "unconfigured", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": '' } ] mappings = [ { "lunid": TEST_LUN_ID, "link": {}, "volume": {"id": TEST_VOLUME_ID, "link": {}} } ] mock_get_host_ports.side_effect = [host_ports] mock_get_mappings.side_effect = [mappings] ret_info = self.driver.terminate_connection(volume, TEST_CONNECTOR) self.assertEqual(sorted(list( ret_info['data']['initiator_target_map'].keys()), key=str.lower), [TEST_SOURCE_WWPN_1, TEST_SOURCE_WWPN_2]) @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') @mock.patch.object(helper.DS8KCommonHelper, '_get_mappings') def test_detach_with_host_has_failed_over(self, mock_get_mappings, mock_get_host_ports): self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self, TEST_TARGET_DS8K_IP) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {'default': {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) host_ports_1 = [ { "wwpn": TEST_SOURCE_WWPN_1, "state": "logged in", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": TEST_HOST_ID }, { "wwpn": TEST_SOURCE_WWPN_2, "state": "unconfigured", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": '' } ] host_ports_2 = [ { "wwpn": TEST_SOURCE_WWPN_1, "state": "logged in", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": TEST_HOST_ID }, { "wwpn": TEST_SOURCE_WWPN_2, "state": "unconfigured", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": '' } ] mappings_1 = [ { "lunid": TEST_LUN_ID, "link": {}, "volume": {"id": TEST_VOLUME_ID_2, "link": {}} } ] mappings_2 = [ { "lunid": TEST_LUN_ID, "link": {}, "volume": {"id": TEST_VOLUME_ID, "link": {}} } ] mock_get_host_ports.side_effect = [host_ports_1, host_ports_2] mock_get_mappings.side_effect = [mappings_1, mappings_2] self.driver.terminate_connection(volume, TEST_CONNECTOR) @mock.patch.object(helper.DS8KCommonHelper, '_get_host_ports') @mock.patch.object(helper.DS8KCommonHelper, '_get_mappings') def test_detach_with_group_has_failed_over(self, mock_get_mappings, mock_get_host_ports): self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='failed-over') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {'default': {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, group_id=group.id, replication_status='failed-over') host_ports = [ { "wwpn": TEST_SOURCE_WWPN_1, "state": "logged in", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": TEST_HOST_ID }, { "wwpn": TEST_SOURCE_WWPN_2, "state": "unconfigured", "hosttype": "LinuxRHEL", "addrdiscovery": "lunpolling", "host_id": '' } ] mappings = [ { "lunid": TEST_LUN_ID, "link": {}, "volume": {"id": TEST_VOLUME_ID, "link": {}} } ] mock_get_host_ports.side_effect = [host_ports] mock_get_mappings.side_effect = [mappings] self.driver.terminate_connection(volume, TEST_CONNECTOR) def test_create_consistency_group(self): """user should reserve LSS for consistency group.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) self.assertRaises(exception.VolumeDriverException, self.driver.create_group, self.ctxt, group) def test_create_generic_group_not_implemented(self): """create generic group is not implemented.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group' ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) self.assertRaises(NotImplementedError, self.driver.create_group, self.ctxt, group) def test_create_replication_cg_should_verify_volume_types(self): """Cannot put non-replication volume type into replication cg.""" self.configuration.lss_range_for_cg = '20-23' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_replication_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, volume_type_ids=[vol_type.id]) self.assertRaises(exception.VolumeDriverException, self.driver.create_group, self.ctxt, group) @ddt.data({'bundle': "87.51.60.0"}, {'bundle': "88.20.47.0"}) @mock.patch.object(helper.DS8KCommonHelper, '_get_version') def test_create_replication_consisgroup_should_verify_rest_version( self, rest_version, mock_get_version): """Driver should verify whether does REST support pprc cg or not.""" self.configuration.lss_range_for_cg = '20-23' mock_get_version.return_value = rest_version self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create( self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_replication_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, volume_type_ids=[vol_type.id]) self.assertRaises(exception.VolumeDriverException, self.driver.create_group, self.ctxt, group) def test_create_consistency_group_without_reserve_lss(self): """user should reserve LSS for group if it enables cg.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) self.assertRaises(exception.VolumeDriverException, self.driver.create_group, self.ctxt, group) @ddt.data('group_replication_enabled', 'consistent_group_replication_enabled') def test_create_replication_group_update_replication_status(self, key): """create replication group should update replication_status.""" self.configuration.lss_range_for_cg = '20-23' self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create(self.ctxt, 'group', {key: ' True'}) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) model_update = self.driver.create_group(self.ctxt, group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) def test_delete_consistency_group_sucessfully(self): """test a successful consistency group deletion.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(provider_location=location, group_id=group.id) model_update, volumes_model_update = ( self.driver.delete_group(self.ctxt, group, [volume])) self.assertEqual('deleted', volumes_model_update[0]['status']) self.assertEqual(fields.GroupStatus.DELETED, model_update['status']) @mock.patch.object(helper.DS8KCommonHelper, 'delete_lun') def test_delete_consistency_group_failed(self, mock_delete_lun): """test a failed consistency group deletion.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(provider_location=location, group_id=group.id) mock_delete_lun.side_effect = ( restclient.APIException('delete volume failed.')) model_update, volumes_model_update = ( self.driver.delete_group(self.ctxt, group, [volume])) self.assertEqual('error_deleting', volumes_model_update[0]['status']) self.assertEqual(fields.GroupStatus.ERROR_DELETING, model_update['status']) def test_delete_replication_group_is_not_implemented(self): """delete replication group is not implemented.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'group_replication_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, group_id=group.id) self.assertRaises(NotImplementedError, self.driver.delete_group, self.ctxt, group, [volume]) def test_add_in_use_vol_into_group_by_using_update_group(self): self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(provider_location=location, status='in-use') self.assertRaises(exception.VolumeDriverException, self.driver.update_group, self.ctxt, group, [volume], []) def test_remove_in_use_vol_from_group_by_using_update_group(self): self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(provider_location=location, status='in-use', group_id=group.id) self.assertRaises(exception.VolumeDriverException, self.driver.update_group, self.ctxt, group, [], [volume]) def test_update_replication_group_is_not_implemented(self): """update replication group is not implemented.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'group_replication_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, group_id=group.id) self.assertRaises(NotImplementedError, self.driver.update_group, self.ctxt, group, [volume], []) def test_update_generic_group_is_not_implemented(self): """update group which not enable cg is not implemented.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create(self.ctxt, 'group', {}) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(provider_location=location) self.assertRaises(NotImplementedError, self.driver.update_group, self.ctxt, group, [volume], []) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') @mock.patch.object(helper.DS8KCommonHelper, 'lun_exists') def test_update_generic_group_when_enable_cg(self, mock_lun_exists, mock_create_lun, mock_get_flashcopy, mock_sleep): """update group, but volume is not in LSS which belongs to group.""" self.configuration.lss_range_for_cg = '20-23' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) location = str({'vol_hex_id': TEST_VOLUME_ID}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(provider_location=location, volume_metadata=metadata) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] mock_create_lun.return_value = '2200' mock_lun_exists.return_value = True model_update, add_volumes_update, remove_volumes_update = ( self.driver.update_group(self.ctxt, group, [volume], [])) location = ast.literal_eval(add_volumes_update[0]['provider_location']) self.assertEqual('2200', location['vol_hex_id']) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') @mock.patch.object(helper.DS8KCommonHelper, 'lun_exists') def test_update_generic_group_when_enable_cg2(self, mock_lun_exists, mock_create_lun, mock_get_flashcopy, mock_sleep): """add replicated volume into group.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.configuration.lss_range_for_cg = '20-23' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) vol_type = volume_types.create( self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] mock_create_lun.return_value = '2200' mock_lun_exists.return_value = True model_update, add_volumes_update, remove_volumes_update = ( self.driver.update_group(self.ctxt, group, [volume], [])) location = ast.literal_eval(add_volumes_update[0]['provider_location']) self.assertEqual('2200', location['vol_hex_id']) def test_delete_generic_group_not_implemented(self): """delete generic group but it is not implemented.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create(self.ctxt, 'group', {}) group = self._create_group(group_type_id=group_type.id) location = str({'vol_hex_id': TEST_VOLUME_ID}) volume = self._create_volume(group_type_id=group_type.id, provider_location=location, group_id=group.id) self.assertRaises(NotImplementedError, self.driver.delete_group, self.ctxt, group, [volume]) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') def test_create_consistency_group_snapshot_sucessfully( self, mock_create_lun, mock_get_flashcopy, mock_sleep): """test a successful consistency group snapshot creation.""" self.configuration.lss_range_for_cg = '20-23' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(group_type_id=group_type.id) location = str({'vol_hex_id': '2000'}) volume = self._create_volume(provider_location=location, group_id=group.id) group_snapshot = ( self._create_group_snapshot(group_id=group.id, group_type_id=group_type.id)) snapshot = self._create_snapshot(volume_id=volume.id, group_snapshot_id=group_snapshot.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] mock_create_lun.return_value = '2200' model_update, snapshots_model_update = ( self.driver.create_group_snapshot( self.ctxt, group_snapshot, [snapshot])) location = ast.literal_eval( snapshots_model_update[0]['provider_location']) self.assertEqual('2200', location['vol_hex_id']) self.assertEqual('available', snapshots_model_update[0]['status']) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_create_consistency_group_snapshot_not_in_lss_range_for_cg( self, mock_get_flashcopy, mock_sleep): """test a successful consistency group snapshot creation.""" self.configuration.lss_range_for_cg = '20-23' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(group_type_id=group_type.id) location = str({'vol_hex_id': '2000'}) volume = self._create_volume(provider_location=location, group_id=group.id) group_snapshot = ( self._create_group_snapshot(group_id=group.id, group_type_id=group_type.id)) snapshot = self._create_snapshot(volume_id=volume.id, group_snapshot_id=group_snapshot.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] model_update, snapshots_model_update = ( self.driver.create_group_snapshot( self.ctxt, group_snapshot, [snapshot])) location = ast.literal_eval( snapshots_model_update[0]['provider_location']) self.assertNotIn(location['vol_hex_id'][:2], (20, 21, 22, 23)) self.assertEqual('available', snapshots_model_update[0]['status']) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) def test_delete_consistency_group_snapshot_sucessfully(self): """test a successful consistency group snapshot deletion.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(group_type_id=group_type.id) location = str({'vol_hex_id': '2000'}) volume = self._create_volume(provider_location=location, group_id=group.id) group_snapshot = ( self._create_group_snapshot(group_id=group.id, group_type_id=group_type.id)) snapshot = self._create_snapshot(volume_id=volume.id, group_snapshot_id=group_snapshot.id) model_update, snapshots_model_update = ( self.driver.delete_group_snapshot( self.ctxt, group_snapshot, [snapshot])) self.assertEqual('deleted', snapshots_model_update[0]['status']) self.assertEqual(fields.GroupSnapshotStatus.DELETED, model_update['status']) @mock.patch.object(helper.DS8KCommonHelper, 'delete_lun') def test_delete_consistency_group_snapshot_failed(self, mock_delete_lun): """test a failed consistency group snapshot deletion.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(group_type_id=group_type.id) location = str({'vol_hex_id': '2000'}) volume = self._create_volume(provider_location=location, group_id=group.id) group_snapshot = ( self._create_group_snapshot(group_id=group.id, group_type_id=group_type.id)) snapshot = self._create_snapshot(volume_id=volume.id, group_snapshot_id=group_snapshot.id) mock_delete_lun.side_effect = ( restclient.APIException('delete snapshot failed.')) model_update, snapshots_model_update = ( self.driver.delete_group_snapshot( self.ctxt, group_snapshot, [snapshot])) self.assertEqual('error_deleting', snapshots_model_update[0]['status']) self.assertEqual(fields.GroupSnapshotStatus.ERROR_DELETING, model_update['status']) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_create_consisgroup_from_consisgroup(self, mock_get_flashcopy, mock_create_lun, mock_sleep): """test creation of consistency group from consistency group.""" self.configuration.lss_range_for_cg = '20-23' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) src_group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) location = str({'vol_hex_id': '2000'}) src_vol = self._create_volume(provider_location=location, group_id=src_group.id) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) volume = self._create_volume(group_id=group.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] mock_create_lun.return_value = '2200' model_update, volumes_model_update = ( self.driver.create_group_from_src( self.ctxt, group, [volume], None, None, src_group, [src_vol])) self.assertEqual('2200', volumes_model_update[0]['metadata']['vol_hex_id']) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, '_create_lun') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_create_consisgroup_from_cgsnapshot(self, mock_get_flashcopy, mock_create_lun, mock_sleep): """test creation of consistency group from cgsnapshot.""" self.configuration.lss_range_for_cg = '20-23' self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) src_group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) src_vol = self._create_volume(group_id=src_group.id) group_snapshot = ( self._create_group_snapshot(group_id=src_group.id, group_type_id=group_type.id)) location = str({'vol_hex_id': '2000'}) snapshot = self._create_snapshot(volume_id=src_vol.id, provider_location=location, group_snapshot_id=group_snapshot.id) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) volume = self._create_volume(group_id=group.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] mock_create_lun.return_value = '2200' model_update, volumes_model_update = ( self.driver.create_group_from_src( self.ctxt, group, [volume], group_snapshot, [snapshot], None, None)) self.assertEqual( '2200', volumes_model_update[0]['metadata']['vol_hex_id']) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_create_group_from_replication_group(self, mock_get_flashcopy, mock_sleep): """create group from replication group.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'group_replication_enabled': ' True'} ) src_group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) src_volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, group_id=src_group.id) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) volume = self._create_volume(group_id=group.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] model_update, volumes_model_update = self.driver.create_group_from_src( self.ctxt, group, [volume], None, None, src_group, [src_volume]) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_failover_host_successfully(self, mock_get_pprc_pairs, mock_sleep): """Failover host to valid secondary successfully.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata) pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs[0]['state'] = 'suspended' mock_get_pprc_pairs.side_effect = [pprc_pairs] secondary_id, volume_update_list, __ = self.driver.failover_host( self.ctxt, [volume], TEST_TARGET_DS8K_IP, []) self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_failover_host_with_group(self, mock_get_pprc_pairs, mock_sleep): """Failover host with group.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'group_replication_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='enabled') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id) pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs[0]['state'] = 'suspended' mock_get_pprc_pairs.side_effect = [pprc_pairs] secondary_id, volume_update_list, group_update_list = ( self.driver.failover_host(self.ctxt, [volume], TEST_TARGET_DS8K_IP, [group])) self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id) volume_update = volume_update_list[0] self.assertEqual(volume_update['volume_id'], volume.id) self.assertEqual(fields.ReplicationStatus.FAILED_OVER, volume_update['updates']['replication_status']) group_update = group_update_list[0] self.assertEqual(group_update['group_id'], group.id) self.assertEqual(fields.ReplicationStatus.FAILED_OVER, group_update['updates']['replication_status']) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_failover_host_with_group_failed_over(self, mock_get_pprc_pairs, mock_sleep): """Failover host with group that has been failed over.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'group_replication_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='failed-over') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {'default': {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id) pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs[0]['state'] = 'suspended' mock_get_pprc_pairs.side_effect = [pprc_pairs] secondary_id, volume_update_list, group_update_list = ( self.driver.failover_host(self.ctxt, [volume], TEST_TARGET_DS8K_IP, [group])) self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id) self.assertEqual(volume_update_list, []) self.assertEqual(group_update_list, []) @mock.patch.object(replication.Replication, 'start_host_pprc_failover') def test_failover_host_failed(self, mock_host_pprc_failover): """Failover host should raise exception when failed.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata) mock_host_pprc_failover.side_effect = ( restclient.APIException('failed to do failover.')) self.assertRaises(exception.UnableToFailOver, self.driver.failover_host, self.ctxt, [volume], TEST_TARGET_DS8K_IP, []) def test_failover_host_to_invalid_target(self): """Failover host to invalid secondary should fail.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_host, self.ctxt, [volume], 'fake_target', []) def test_failover_host_that_has_been_failed_over(self): """Failover host that has been failed over should just return.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self, TEST_TARGET_DS8K_IP) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {'default': {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) secondary_id, volume_update_list, __ = self.driver.failover_host( self.ctxt, [volume], TEST_TARGET_DS8K_IP, []) self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id) self.assertEqual([], volume_update_list) def test_failback_host_that_has_been_failed_back(self): """Failback host that has been failed back should just return.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {'default': {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) secondary_id, volume_update_list, __ = self.driver.failover_host( self.ctxt, [volume], 'default', []) self.assertIsNone(secondary_id) self.assertEqual([], volume_update_list) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_failback_host_successfully(self, mock_get_pprc_pairs, mock_sleep): """Failback host to primary successfully.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self, TEST_TARGET_DS8K_IP) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {'default': {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata) pprc_pairs_full_duplex = FAKE_GET_PPRCS_RESPONSE['data']['pprcs'] pprc_pairs_suspended = copy.deepcopy(pprc_pairs_full_duplex) pprc_pairs_suspended[0]['state'] = 'suspended' mock_get_pprc_pairs.side_effect = [pprc_pairs_full_duplex, pprc_pairs_suspended, pprc_pairs_full_duplex] secondary_id, volume_update_list, __ = self.driver.failover_host( self.ctxt, [volume], 'default', []) self.assertEqual('default', secondary_id) @mock.patch.object(replication.Replication, 'start_host_pprc_failback') def test_failback_host_failed(self, mock_start_host_pprc_failback): """Failback host should raise exception when failed.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self, TEST_TARGET_DS8K_IP) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {'default': {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data) mock_start_host_pprc_failback.side_effect = ( restclient.APIException('failed to do failback.')) self.assertRaises(exception.UnableToFailOver, self.driver.failover_host, self.ctxt, [volume], 'default', []) @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_enable_replication_successfully(self, mock_get_pprc_pairs): """Enable replication for the group successfully.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='disabled') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id, replication_status='disabled') pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs[0]['state'] = 'suspended' mock_get_pprc_pairs.side_effect = [pprc_pairs] model_update, volumes_update_list = self.driver.enable_replication( self.ctxt, group, [volume]) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update.get('replication_status')) for volume_update in volumes_update_list: self.assertEqual(fields.ReplicationStatus.ENABLED, volume_update.get('replication_status')) @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_enable_replication_if_pprc_in_invalid_state( self, mock_get_pprc_pairs): """Enable replication but pprc relationship is in invalid state.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='disabled') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id, replication_status='disabled') pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs[0]['state'] = 'invalid' mock_get_pprc_pairs.side_effect = [pprc_pairs] self.assertRaises(exception.VolumeDriverException, self.driver.enable_replication, self.ctxt, group, [volume]) @mock.patch.object(helper.DS8KCommonHelper, 'resume_pprc_pairs') @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_enable_replication_but_resume_fails(self, mock_get_pprc_pairs, mock_resume_pprc_pairs): """Enable replication but resume fails.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='disabled') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id, replication_status='disabled') pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs[0]['state'] = 'suspended' mock_get_pprc_pairs.side_effect = [pprc_pairs] mock_resume_pprc_pairs.side_effect = ( restclient.APIException('failed to resume replication.')) self.assertRaises(exception.VolumeDriverException, self.driver.enable_replication, self.ctxt, group, [volume]) @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_disable_replication_successfully(self, mock_get_pprc_pairs): """Disable replication for the group successfully.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='enabled') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id, replication_status='enabled') pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs[0]['state'] = 'full_duplex' mock_get_pprc_pairs.side_effect = [pprc_pairs] model_update, volumes_update_list = self.driver.disable_replication( self.ctxt, group, [volume]) self.assertEqual(fields.ReplicationStatus.DISABLED, model_update.get('replication_status')) for volume_update in volumes_update_list: self.assertEqual(fields.ReplicationStatus.DISABLED, volume_update.get('replication_status')) @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_disable_replication_if_pprc_in_invalid_state( self, mock_get_pprc_pairs): """Disable replication but pprc relationship is in invalid state.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='enabled') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id, replication_status='enabled') pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs[0]['state'] = 'invalid' mock_get_pprc_pairs.side_effect = [pprc_pairs] self.assertRaises(exception.VolumeDriverException, self.driver.disable_replication, self.ctxt, group, [volume]) @mock.patch.object(helper.DS8KCommonHelper, 'pause_pprc_pairs') @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_disable_replication_but_pause_fails(self, mock_get_pprc_pairs, mock_pause_pprc_pairs): """Disable replication but pause fails.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='enabled') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id, replication_status='enabled') pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs[0]['state'] = 'full_duplex' mock_get_pprc_pairs.side_effect = [pprc_pairs] mock_pause_pprc_pairs.side_effect = ( restclient.APIException('failed to pause replication.')) self.assertRaises(exception.VolumeDriverException, self.driver.disable_replication, self.ctxt, group, [volume]) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_failover_group_successfully(self, mock_get_pprc_pairs, mock_sleep): """Failover group to valid secondary successfully.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id) pprc_pairs_1 = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs_1[0]['state'] = 'suspended' pprc_pairs_2 = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs_2[0]['state'] = 'full_duplex' mock_get_pprc_pairs.side_effect = [pprc_pairs_1, pprc_pairs_2] model_update, volumes_update_list = self.driver.failover_replication( self.ctxt, group, [volume], TEST_TARGET_DS8K_IP) self.assertEqual(fields.ReplicationStatus.FAILED_OVER, model_update.get('replication_status')) for volume_update in volumes_update_list: self.assertEqual(fields.ReplicationStatus.FAILED_OVER, volume_update.get('replication_status')) @mock.patch.object(replication.Replication, 'start_group_pprc_failover') def test_failover_group_failed(self, mock_group_pprc_failover): """Failover group should raise exception when failed.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id) mock_group_pprc_failover.side_effect = ( restclient.APIException('failed to failover group.')) self.assertRaises(exception.VolumeDriverException, self.driver.failover_replication, self.ctxt, group, [volume], TEST_TARGET_DS8K_IP) def test_failover_group_to_invalid_target(self): """Failover group to invalid secondary should fail.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, group_id=group.id) self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_replication, self.ctxt, group, [volume], 'fake_target') def test_failover_group_that_has_been_failed_over(self): """Failover group that has been failed over should just return.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='failed-over') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {'default': {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, group_id=group.id, replication_status='failed-over') model_update, volumes_update_list = self.driver.failover_replication( self.ctxt, group, [volume], TEST_TARGET_DS8K_IP) self.assertEqual({}, model_update) self.assertEqual([], volumes_update_list) def test_failback_group_that_has_been_failed_back(self): """Failback group that has been failed back should just return.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self, TEST_TARGET_DS8K_IP) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id, replication_status='enabled') vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, group_id=group.id, replication_status='available') model_update, volume_update_list = self.driver.failover_replication( self.ctxt, group, [volume], 'default') self.assertEqual({}, model_update) self.assertEqual([], volume_update_list) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') @mock.patch.object(replication.MetroMirrorManager, 'do_pprc_failback') def test_start_group_pprc_failover(self, mock_do_pprc_failback, mock_get_pprc_pairs, mock_sleep): """group failover should not invoke do_pprc_failback.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) data = json.dumps( {TEST_TARGET_DS8K_IP: {'vol_hex_id': TEST_VOLUME_ID}}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id) pprc_pairs_1 = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs_1[0]['state'] = 'suspended' pprc_pairs_2 = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs_2[0]['state'] = 'full_duplex' mock_get_pprc_pairs.side_effect = [pprc_pairs_1] self.driver.failover_replication(self.ctxt, group, [volume], TEST_TARGET_DS8K_IP) self.assertFalse(mock_do_pprc_failback.called) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_pprc_pairs') def test_start_group_pprc_failback(self, mock_get_pprc_pairs, mock_sleep): """Failback group should invoke pprc failback.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) group_type = group_types.create( self.ctxt, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = self._create_group(host=TEST_GROUP_HOST, group_type_id=group_type.id) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {'replication_enabled': ' True'}) location = str({'vol_hex_id': TEST_VOLUME_ID}) metadata = [{'key': 'data_type', 'value': 'FB 512'}] data = json.dumps( {'default': {'vol_hex_id': TEST_VOLUME_ID_2}}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location, replication_driver_data=data, volume_metadata=metadata, group_id=group.id) pprc_pairs_1 = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs_1[0]['state'] = 'suspended' pprc_pairs_1[0]['source_volume']['name'] = TEST_VOLUME_ID_2 pprc_pairs_2 = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs_2[0]['state'] = 'full_duplex' pprc_pairs_3 = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs']) pprc_pairs_3[0]['state'] = 'full_duplex' mock_get_pprc_pairs.side_effect = [pprc_pairs_1, pprc_pairs_2, pprc_pairs_3] self.driver.failover_replication(self.ctxt, group, [volume], 'default') self.assertTrue(mock_get_pprc_pairs.called) @mock.patch('cinder.volume.volume_utils.CONF') def test_create_volume_with_template(self, mock_conf): self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) mock_conf.volume_name_template = 'volume-%s' vol_id = 'd403b4d9-473a-42d0-94c5-be45a1268928' vol_name = mock_conf.volume_name_template % vol_id volume = self._create_volume(id=vol_id) lun = ds8kproxy.Lun(volume) exp_vol_name = helper.filter_alnum(vol_name)[:16] self.assertEqual(lun.ds_name, exp_vol_name) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_create_snapshot_with_tmpt(self, mock_get_flashcopy, mock_sleep): """test a successful creation of snapshot.""" self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', {}) location = str({'vol_hex_id': '0002'}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) snapshot = self._create_snapshot(volume_id=volume.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] snapshot_update = self.driver.create_snapshot(snapshot) location = ast.literal_eval(snapshot_update['provider_location']) self.assertEqual(TEST_VOLUME_ID, location['vol_hex_id']) lun = ds8kproxy.Lun(snapshot, is_snapshot=True) exp_snap_name = helper.filter_alnum(snapshot.name)[:16] self.assertIn(lun.ds_name, exp_snap_name) @mock.patch.object(eventlet, 'sleep') def test_create_fb_replicated_volume_with_tmpt(self, mock_sleep): """create FB volume when enable replication.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) extra_spec = {'replication_enabled': ' True'} vol_type = volume_types.create(self.ctxt, 'VOL_TYPE', extra_spec) volume = self._create_volume(volume_type_id=vol_type.id) lun = ds8kproxy.Lun(volume) exp_repl_name = helper.filter_alnum(volume.name)[:16] self.assertEqual(lun.replica_ds_name, exp_repl_name) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_revert_to_snapshot_normal_vol(self, mock_get_flashcopy, mock_sleep): self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = testutils.create_volume_type(self.ctxt, name='VOL_TYPE') location = str({'vol_hex_id': '0002'}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) snapshot = self._create_snapshot(volume_id=volume.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] self.driver.revert_to_snapshot(self.ctxt, volume, snapshot) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_revert_to_snapshot_replication_vol(self, mock_get_flashcopy, mock_sleep): """test a successful creation of snapshot.""" self.configuration.replication_device = [TEST_REPLICATION_DEVICE] self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) extra_specs = {'replication_enabled': ' True'} vol_type = testutils.create_volume_type(self.ctxt, name='VOL_TYPE', extra_specs=extra_specs) location = str({'vol_hex_id': '0002'}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) snapshot = self._create_snapshot(volume_id=volume.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] self.assertRaises(exception.VolumeDriverException, self.driver.revert_to_snapshot, self.ctxt, volume, snapshot) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_revert_to_snapshot_tar_vol_is_in_fc(self, mock_get_flashcopy, mock_sleep): self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = testutils.create_volume_type(self.ctxt, name='VOL_TYPE') location = str({'vol_hex_id': 'fake_volume_id_2'}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) snapshot = self._create_snapshot(volume_id=volume.id) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] self.assertRaises(exception.VolumeBackendAPIException, self.driver.revert_to_snapshot, self.ctxt, volume, snapshot) @mock.patch.object(eventlet, 'sleep') @mock.patch.object(helper.DS8KCommonHelper, 'get_flashcopy') def test_revert_to_snapshot_with_diff_size(self, mock_get_flashcopy, mock_sleep): self.driver = FakeDS8KProxy(self.storage_info, self.logger, self.exception, self) self.driver.setup(self.ctxt) vol_type = testutils.create_volume_type(self.ctxt, name='VOL_TYPE') location = str({'vol_hex_id': 'fake_volume_id_2'}) volume = self._create_volume(volume_type_id=vol_type.id, provider_location=location) snapshot = self._create_snapshot(volume_id=volume.id) volume1 = self._create_volume(volume_type_id=vol_type.id, provider_location=location, size=2) mock_get_flashcopy.side_effect = [[TEST_FLASHCOPY], {}] self.assertRaises(exception.InvalidInput, self.driver.revert_to_snapshot, self.ctxt, volume1, snapshot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem.py0000664000175000017500000015235400000000000027024 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the IBM FlashSystem volume driver.""" import random import re from unittest import mock from oslo_concurrency import processutils from oslo_utils import units from cinder import context from cinder import exception from cinder.tests.unit import test from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.ibm import flashsystem_fc from cinder.volume import volume_types from cinder.volume import volume_utils class FlashSystemManagementSimulator(object): def __init__(self): # Default protocol is FC self._protocol = 'FC' self._volumes_list = {} self._hosts_list = {} self._mappings_list = {} self._next_cmd_error = { 'lsnode': '', 'lssystem': '', 'lsmdiskgrp': '' } self._errors = { # CMMVC50000 is a fake error which indicates that command has not # got expected results. This error represents kinds of CLI errors. 'CMMVC50000': ('', 'CMMVC50000 The command can not be executed ' 'successfully.'), 'CMMVC6045E': ('', 'CMMVC6045E The action failed, as the ' '-force flag was not entered.'), 'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping ' 'was not created because the VDisk is ' 'already mapped to a host.') } self._multi_host_map_error = None self._multi_host_map_errors = ['CMMVC6045E', 'CMMVC6071E'] @staticmethod def _find_unused_id(d): ids = [] for v in d.values(): ids.append(int(v['id'])) ids.sort() for index, n in enumerate(ids): if n > index: return str(index) return str(len(ids)) @staticmethod def _is_invalid_name(name): if re.match(r'^[a-zA-Z_][\w ._-]*$', name): return False return True @staticmethod def _cmd_to_dict(arg_list): no_param_args = [ 'bytes', 'force' ] one_param_args = [ 'delim', 'hbawwpn', 'host', 'iogrp', 'iscsiname', 'mdiskgrp', 'name', 'scsi', 'size', 'unit' ] # All commands should begin with svcinfo or svctask if arg_list[0] not in ('svcinfo', 'svctask') or len(arg_list) < 2: raise exception.InvalidInput(reason=str(arg_list)) ret = {'cmd': arg_list[1]} skip = False for i in range(2, len(arg_list)): if skip: skip = False continue if arg_list[i][0] == '-': param = arg_list[i][1:] if param in no_param_args: ret[param] = True elif param in one_param_args: ret[param] = arg_list[i + 1] skip = True else: raise exception.InvalidInput( reason=('unrecognized argument %s') % arg_list[i]) else: ret['obj'] = arg_list[i] return ret @staticmethod def _print_cmd_info(rows, delim=' ', nohdr=False, **kwargs): """Generic function for printing information.""" if nohdr: del rows[0] for index in range(len(rows)): rows[index] = delim.join(rows[index]) return ('%s' % '\n'.join(rows), '') @staticmethod def _convert_units_bytes(num, unit): unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] unit_index = 0 while unit.lower() != unit_array[unit_index].lower(): num = num * 1024 unit_index += 1 return str(num) def _cmd_lshost(self, **kwargs): """lshost command. svcinfo lshost -delim ! svcinfo lshost -delim ! """ if 'obj' not in kwargs: rows = [] rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) for host in self._hosts_list.values(): rows.append([host['id'], host['host_name'], '1', '1', 'degraded']) if len(rows) > 1: return self._print_cmd_info(rows=rows, **kwargs) else: return ('', '') else: host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC50000'] host = self._hosts_list[host_name] rows = [] rows.append(['id', host['id']]) rows.append(['name', host['host_name']]) rows.append(['port_count', '1']) rows.append(['type', 'generic']) rows.append(['mask', '1111']) rows.append(['iogrp_count', '1']) rows.append(['status', 'degraded']) for port in host['iscsi_names']: rows.append(['iscsi_name', port]) rows.append(['node_logged_in_count', '0']) rows.append(['state', 'offline']) for port in host['wwpns']: rows.append(['WWPN', port]) rows.append(['node_logged_in_count', '0']) rows.append(['state', 'active']) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_lshostvdiskmap(self, **kwargs): """svcinfo lshostvdiskmap -delim ! """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC50000'] rows = [] rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', 'vdisk_UID']) for mapping in self._mappings_list.values(): if (host_name == '') or (mapping['host'] == host_name): volume = self._volumes_list[mapping['vol']] rows.append([mapping['id'], mapping['host'], mapping['lun'], volume['id'], volume['name'], volume['vdisk_UID']]) return self._print_cmd_info(rows=rows, **kwargs) def _cmd_lsmdiskgrp(self, **kwargs): """svcinfo lsmdiskgrp -gui -bytes -delim ! """ status = 'online' if self._next_cmd_error['lsmdiskgrp'] == 'error': self._next_cmd_error['lsmdiskgrp'] = '' return self._errors['CMMVC50000'] if self._next_cmd_error['lsmdiskgrp'] == 'status=offline': self._next_cmd_error['lsmdiskgrp'] = '' status = 'offline' rows = [None] * 2 rows[0] = ['id', 'status', 'mdisk_count', 'vdisk_count', 'capacity', 'free_capacity', 'virtual_capacity', 'used_capacity', 'real_capacity', 'encrypted', 'type', 'encrypt'] rows[1] = ['0', status, '1', '0', '3573412790272', '3529432325160', '1693247906775', '277841182', '38203734097', 'no', 'parent', 'no'] if kwargs['obj'] == 'mdiskgrp0': row = rows[1] else: return self._errors['CMMVC50000'] objrows = [] for idx, val in enumerate(rows[0]): objrows.append([val, row[idx]]) if 'delim' in kwargs: for index in range(len(objrows)): objrows[index] = kwargs['delim'].join(objrows[index]) return ('%s' % '\n'.join(objrows), '') def _cmd_lsnode(self, **kwargs): """lsnode command. svcinfo lsnode -delim ! svcinfo lsnode -delim ! """ if self._protocol == 'FC' or self._protocol == 'both': port_status = 'active' else: port_status = 'unconfigured' rows1 = [None] * 7 rows1[0] = ['name', 'node1'] rows1[1] = ['port_id', '000000000000001'] rows1[2] = ['port_status', port_status] rows1[3] = ['port_speed', '8Gb'] rows1[4] = ['port_id', '000000000000001'] rows1[5] = ['port_status', port_status] rows1[6] = ['port_speed', '8Gb'] rows2 = [None] * 7 rows2[0] = ['name', 'node2'] rows2[1] = ['port_id', '000000000000002'] rows2[2] = ['port_status', port_status] rows2[3] = ['port_speed', '8Gb'] rows2[4] = ['port_id', '000000000000002'] rows2[5] = ['port_status', port_status] rows2[6] = ['port_speed', 'N/A'] rows3 = [None] * 3 rows3[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status', 'IO_group_id', 'IO_group_name', 'config_node', 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias', 'panel_name', 'enclosure_id', 'canister_id', 'enclosure_serial_number'] rows3[1] = ['1', 'node1', '', '0123456789ABCDEF', 'online', '0', 'io_grp0', 'yes', '', 'TR1', 'naa.0123456789ABCDEF', '', '01-1', '1', '1', 'H441028'] rows3[2] = ['2', 'node2', '', '0123456789ABCDEF', 'online', '0', 'io_grp0', 'no', '', 'TR1', 'naa.0123456789ABCDEF', '', '01-2', '1', '2', 'H441028'] if self._next_cmd_error['lsnode'] == 'error': self._next_cmd_error['lsnode'] = '' return self._errors['CMMVC50000'] rows = None if 'obj' not in kwargs: rows = rows3 elif kwargs['obj'] == '1': rows = rows1 elif kwargs['obj'] == '2': rows = rows2 else: return self._errors['CMMVC50000'] if self._next_cmd_error['lsnode'] == 'header_mismatch': rows[0].pop(2) self._next_cmd_error['lsnode'] = '' return self._print_cmd_info(rows=rows, delim=kwargs.get('delim', None)) def _cmd_lssystem(self, **kwargs): """svcinfo lssystem -delim !""" open_access_enabled = 'off' if self._next_cmd_error['lssystem'] == 'error': self._next_cmd_error['lssystem'] = '' return self._errors['CMMVC50000'] if self._next_cmd_error['lssystem'] == 'open_access_enabled=on': self._next_cmd_error['lssystem'] = '' open_access_enabled = 'on' rows = [None] * 3 rows[0] = ['id', '0123456789ABCDEF'] rows[1] = ['name', 'flashsystem_1.2.3.4'] rows[2] = ['open_access_enabled', open_access_enabled] return self._print_cmd_info(rows=rows, **kwargs) def _cmd_lsportfc(self, **kwargs): """svcinfo lsportfc""" if self._protocol == 'FC' or self._protocol == 'both': status = 'active' else: status = 'unconfigured' rows = [None] * 3 rows[0] = ['id', 'canister_id', 'adapter_id', 'port_id', 'type', 'port_speed', 'node_id', 'node_name', 'WWPN', 'nportid', 'status', 'attachment', 'topology'] rows[1] = ['0', '1', '1', '1', 'fc', '8Gb', '1', 'node_1', 'AABBCCDDEEFF0011', '000000', status, 'host', 'al'] rows[2] = ['1', '1', '1', '1', 'fc', '8Gb', '1', 'node_1', 'AABBCCDDEEFF0010', '000000', status, 'host', 'al'] return self._print_cmd_info(rows=rows, **kwargs) def _cmd_lsportip(self, **kwargs): """svcinfo lsportip""" if self._protocol == 'iSCSI' or self._protocol == 'both': IP_address1 = '192.168.1.10' IP_address2 = '192.168.1.11' state = 'online' speed = '8G' else: IP_address1 = '' IP_address2 = '' state = '' speed = '' rows = [None] * 3 rows[0] = ['id', 'node_id', 'node_name', 'canister_id', 'adapter_id', 'port_id', 'IP_address', 'mask', 'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC', 'duplex', 'state', 'speed', 'failover', 'link_state', 'host', 'host_6', 'vlan', 'vlan_6', 'adapter_location', 'adapter_port_id'] rows[1] = ['1', '1', 'node1', '0', '0', '0', IP_address1, '', '', '', '0', '', '11:22:33:44:55:AA', '', state, speed, 'no', 'active', '', '', '', '', '0', '0'] rows[2] = ['2', '2', 'node2', '0', '0', '0', IP_address2, '', '', '', '0', '', '11:22:33:44:55:BB', '', state, speed, 'no', 'active', '', '', '', '', '0', '0'] return self._print_cmd_info(rows=rows, **kwargs) def _cmd_lsvdisk(self, **kwargs): """cmd: svcinfo lsvdisk -gui -bytes -delim ! """ if 'obj' not in kwargs or ( 'delim' not in kwargs) or ( 'bytes' not in kwargs): return self._errors['CMMVC50000'] if kwargs['obj'] not in self._volumes_list: return self._errors['CMMVC50000'] vol = self._volumes_list[kwargs['obj']] rows = [] rows.append(['id', vol['id']]) rows.append(['name', vol['name']]) rows.append(['status', vol['status']]) rows.append(['capacity', vol['capacity']]) rows.append(['vdisk_UID', vol['vdisk_UID']]) rows.append(['udid', '']) rows.append(['open_access_scsi_id', '1']) rows.append(['parent_mdisk_grp_id', '0']) rows.append(['parent_mdisk_grp_name', 'mdiskgrp0']) for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_lsvdiskhostmap(self, **kwargs): """svcinfo lsvdiskhostmap -delim ! """ if 'obj' not in kwargs or ( 'delim' not in kwargs): return self._errors['CMMVC50000'] vdisk_name = kwargs['obj'] if vdisk_name not in self._volumes_list: return self._errors['CMMVC50000'] rows = [] rows.append(['id', 'name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID', 'IO_group_id', 'IO_group_name']) mappings_found = 0 for mapping in self._mappings_list.values(): if (mapping['vol'] == vdisk_name): mappings_found += 1 volume = self._volumes_list[mapping['vol']] host = self._hosts_list[mapping['host']] rows.append([volume['id'], volume['name'], '1', host['id'], host['host_name'], volume['vdisk_UID'], '0', 'mdiskgrp0']) if mappings_found: return self._print_cmd_info(rows=rows, **kwargs) else: return ('', '') def _cmd_expandvdisksize(self, **kwargs): """svctask expandvdisksize -size -unit gb """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] vol_name = kwargs['obj'].strip('\'\"') if 'size' not in kwargs: return self._errors['CMMVC50000'] size = int(kwargs['size']) if vol_name not in self._volumes_list: return self._errors['CMMVC50000'] curr_size = int(self._volumes_list[vol_name]['capacity']) addition = size * units.Gi self._volumes_list[vol_name]['capacity'] = str( curr_size + addition) return ('', '') def _cmd_mkvdisk(self, **kwargs): """mkvdisk command. svctask mkvdisk -name -mdiskgrp -iogrp -size -unit """ if 'name' not in kwargs or ( 'size' not in kwargs) or ( 'unit' not in kwargs): return self._errors['CMMVC50000'] vdisk_info = {} vdisk_info['id'] = self._find_unused_id(self._volumes_list) vdisk_info['name'] = kwargs['name'].strip('\'\"') vdisk_info['status'] = 'online' vdisk_info['capacity'] = self._convert_units_bytes( int(kwargs['size']), kwargs['unit']) vdisk_info['vdisk_UID'] = ('60050760') + ('0' * 14) + vdisk_info['id'] if vdisk_info['name'] in self._volumes_list: return self._errors['CMMVC50000'] else: self._volumes_list[vdisk_info['name']] = vdisk_info return ('Virtual Disk, id [%s], successfully created' % (vdisk_info['id']), '') def _cmd_chvdisk(self, **kwargs): """chvdisk command svcask chvdisk -name -udid -open_access_scsi_id """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] source_name = kwargs['obj'].strip('\'\"') dest_name = kwargs['name'].strip('\'\"') vol = self._volumes_list[source_name] vol['name'] = dest_name del self._volumes_list[source_name] self._volumes_list[dest_name] = vol return ('', '') def _cmd_rmvdisk(self, **kwargs): """svctask rmvdisk -force """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] vdisk_name = kwargs['obj'].strip('\'\"') if vdisk_name not in self._volumes_list: return self._errors['CMMVC50000'] del self._volumes_list[vdisk_name] return ('', '') def _add_port_to_host(self, host_info, **kwargs): if 'iscsiname' in kwargs: added_key = 'iscsi_names' added_val = kwargs['iscsiname'].strip('\'\"') elif 'hbawwpn' in kwargs: added_key = 'wwpns' added_val = kwargs['hbawwpn'].strip('\'\"') else: return self._errors['CMMVC50000'] host_info[added_key].append(added_val) for v in self._hosts_list.values(): if v['id'] == host_info['id']: continue for port in v[added_key]: if port == added_val: return self._errors['CMMVC50000'] return ('', '') def _cmd_mkhost(self, **kwargs): """mkhost command. svctask mkhost -force -hbawwpn -name svctask mkhost -force -iscsiname -name """ if 'name' not in kwargs: return self._errors['CMMVC50000'] host_name = kwargs['name'].strip('\'\"') if self._is_invalid_name(host_name): return self._errors['CMMVC50000'] if host_name in self._hosts_list: return self._errors['CMMVC50000'] host_info = {} host_info['id'] = self._find_unused_id(self._hosts_list) host_info['host_name'] = host_name host_info['iscsi_names'] = [] host_info['wwpns'] = [] out, err = self._add_port_to_host(host_info, **kwargs) if not len(err): self._hosts_list[host_name] = host_info return ('Host, id [%s], successfully created' % (host_info['id']), '') else: return (out, err) def _cmd_addhostport(self, **kwargs): """addhostport command. svctask addhostport -force -hbawwpn svctask addhostport -force -iscsiname """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC50000'] host_info = self._hosts_list[host_name] return self._add_port_to_host(host_info, **kwargs) def _cmd_rmhost(self, **kwargs): """svctask rmhost """ if 'obj' not in kwargs: return self._errors['CMMVC50000'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC50000'] for v in self._mappings_list.values(): if (v['host'] == host_name): return self._errors['CMMVC50000'] del self._hosts_list[host_name] return ('', '') def _cmd_mkvdiskhostmap(self, **kwargs): """svctask mkvdiskhostmap -host -scsi """ mapping_info = {} mapping_info['id'] = self._find_unused_id(self._mappings_list) if 'host' not in kwargs or ( 'scsi' not in kwargs) or ( 'obj' not in kwargs): return self._errors['CMMVC50000'] mapping_info['host'] = kwargs['host'].strip('\'\"') mapping_info['lun'] = kwargs['scsi'].strip('\'\"') mapping_info['vol'] = kwargs['obj'].strip('\'\"') if mapping_info['vol'] not in self._volumes_list: return self._errors['CMMVC50000'] if mapping_info['host'] not in self._hosts_list: return self._errors['CMMVC50000'] for v in self._mappings_list.values(): if (v['vol'] == mapping_info['vol']) and ('force' not in kwargs): return self._errors[self._multi_host_map_error or 'CMMVC50000'] if ((v['host'] == mapping_info['host']) and (v['lun'] == mapping_info['lun'])): return self._errors['CMMVC50000'] if (v['lun'] == mapping_info['lun']) and ('force' not in kwargs): return self._errors['CMMVC50000'] self._mappings_list[mapping_info['id']] = mapping_info return ('Virtual Disk to Host map, id [%s], successfully created' % (mapping_info['id']), '') def _cmd_rmvdiskhostmap(self, **kwargs): """svctask rmvdiskhostmap -host """ if 'host' not in kwargs or 'obj' not in kwargs: return self._errors['CMMVC50000'] host = kwargs['host'].strip('\'\"') vdisk = kwargs['obj'].strip('\'\"') mapping_ids = [] for v in self._mappings_list.values(): if v['vol'] == vdisk: mapping_ids.append(v['id']) if not mapping_ids: return self._errors['CMMVC50000'] this_mapping = None for mapping_id in mapping_ids: if self._mappings_list[mapping_id]['host'] == host: this_mapping = mapping_id if this_mapping is None: return self._errors['CMMVC50000'] del self._mappings_list[this_mapping] return ('', '') def set_protocol(self, protocol): self._protocol = protocol def execute_command(self, cmd, check_exit_code=True): try: kwargs = self._cmd_to_dict(cmd) except exception.InvalidInput: return self._errors['CMMVC50000'] command = kwargs.pop('cmd') func = getattr(self, '_cmd_' + command) out, err = func(**kwargs) if (check_exit_code) and (len(err) != 0): raise processutils.ProcessExecutionError(exit_code=1, stdout=out, stderr=err, cmd=command) return (out, err) def error_injection(self, cmd, error): self._next_cmd_error[cmd] = error class FlashSystemFakeDriver(flashsystem_fc.FlashSystemFCDriver): def __init__(self, *args, **kwargs): super(FlashSystemFakeDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _ssh(self, cmd, check_exit_code=True): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class FlashSystemDriverTestCase(test.TestCase): def _set_flag(self, flag, value): group = self.driver.configuration.config_group self.driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _generate_vol_info(self, vol_name, vol_size=10, vol_status='available'): rand_id = str(random.randint(10000, 99999)) if not vol_name: vol_name = 'test_volume%s' % rand_id return {'name': vol_name, 'size': vol_size, 'id': '%s' % rand_id, 'volume_type_id': None, 'status': vol_status, 'mdisk_grp_name': 'mdiskgrp0'} def _generate_snap_info(self, vol_name, vol_id, vol_size, vol_status, snap_status='available'): rand_id = str(random.randint(10000, 99999)) return {'name': 'test_snap_%s' % rand_id, 'id': rand_id, 'volume': {'name': vol_name, 'id': vol_id, 'size': vol_size, 'status': vol_status}, 'volume_size': vol_size, 'status': snap_status, 'mdisk_grp_name': 'mdiskgrp0'} def setUp(self): super(FlashSystemDriverTestCase, self).setUp() self._def_flags = {'san_ip': 'hostname', 'san_login': 'username', 'san_password': 'password', 'flashsystem_connection_protocol': 'FC', 'flashsystem_multihostmap_enabled': True} self.connector = { 'host': 'flashsystem', 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], 'wwpns': ['abcd000000000001', 'abcd000000000002'], 'initiator': 'iqn.123456'} self.alt_connector = { 'host': 'other', 'wwnns': ['0123456789fedcba', '0123456789badcfe'], 'wwpns': ['dcba000000000001', 'dcba000000000002'], 'initiator': 'iqn.654321' } self.sim = FlashSystemManagementSimulator() self.driver = FlashSystemFakeDriver( configuration=conf.Configuration(None)) self.driver.set_fake_storage(self.sim) self._reset_flags() self.ctxt = context.get_admin_context() self.driver.do_setup(None) self.driver.check_for_setup_error() self.sleeppatch = mock.patch('eventlet.greenthread.sleep') self.sleeppatch.start() def tearDown(self): self.sleeppatch.stop() super(FlashSystemDriverTestCase, self).tearDown() def test_flashsystem_do_setup(self): # case 1: cmd lssystem encounters error self.sim.error_injection('lssystem', 'error') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # case 2: open_access_enabled is not off self.sim.error_injection('lssystem', 'open_access_enabled=on') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # case 3: cmd lsmdiskgrp encounters error self.sim.error_injection('lsmdiskgrp', 'error') self.assertRaises(exception.InvalidInput, self.driver.do_setup, None) # case 4: status is not online self.sim.error_injection('lsmdiskgrp', 'status=offline') self.assertRaises(exception.InvalidInput, self.driver.do_setup, None) # case 5: cmd lsnode encounters error self.sim.error_injection('lsnode', 'error') self.assertRaises(processutils.ProcessExecutionError, self.driver.do_setup, None) # case 6: cmd lsnode header does not match self.sim.error_injection('lsnode', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # case 7: set as FC self.sim.set_protocol('FC') self.driver.do_setup(None) self.assertEqual('FC', self.driver._protocol) # case 8: no configured nodes available self.sim.set_protocol('unknown') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # clear environment self.sim.set_protocol('FC') self.driver.do_setup(None) def test_flashsystem_check_for_setup_error(self): self._set_flag('san_ip', '') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('san_ssh_port', '') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('san_login', '') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('san_password', None) self._set_flag('san_private_key', None) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('flashsystem_connection_protocol', 'foo') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() # clear environment self.driver.do_setup(None) def test_flashsystem_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'foo'} conn_fc = {'host': 'host', 'wwpns': 'bar'} conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} protocol = self.driver._protocol # case 1: when protocol is FC self.driver._protocol = 'FC' self.driver.validate_connector(conn_fc) self.driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.driver.validate_connector, conn_iscsi) self.assertRaises(exception.InvalidConnectorException, self.driver.validate_connector, conn_neither) # clear environment self.driver._protocol = protocol def test_flashsystem_volumes(self): # case 1: create volume vol = self._generate_vol_info(None) self.driver.create_volume(vol) # Check whether volume is created successfully attributes = self.driver._get_vdisk_attributes(vol['name']) attr_size = float(attributes['capacity']) / units.Gi self.assertEqual(float(vol['size']), attr_size) # case 2: create volume with empty returning value with mock.patch.object(FlashSystemFakeDriver, '_ssh') as mock_ssh: mock_ssh.return_value = ("", "") vol1 = self._generate_vol_info(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, vol1) # case 3: create volume with error returning value with mock.patch.object(FlashSystemFakeDriver, '_ssh') as mock_ssh: mock_ssh.return_value = ("CMMVC6070E", "An invalid or duplicated " "parameter has been detected.") vol2 = self._generate_vol_info(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, vol2) # case 4: delete volume self.driver.delete_volume(vol) # case 5: delete volume that doesn't exist (expected not fail) vol_no_exist = self._generate_vol_info(None) self.driver.delete_volume(vol_no_exist) def test_flashsystem_extend_volume(self): vol = self._generate_vol_info(None) self.driver.create_volume(vol) self.driver.extend_volume(vol, '200') attrs = self.driver._get_vdisk_attributes(vol['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 200) # clear environment self.driver.delete_volume(vol) def test_flashsystem_connection(self): # case 1: initialize_connection/terminate_connection for good path vol1 = self._generate_vol_info(None) self.driver.create_volume(vol1) self.driver.initialize_connection(vol1, self.connector) self.driver.terminate_connection(vol1, self.connector) # case 2: when volume is not existed vol2 = self._generate_vol_info(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, vol2, self.connector) # case 3: _get_vdisk_map_properties raises exception with mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_get_vdisk_map_properties') as get_properties: get_properties.side_effect = ( exception.VolumeBackendAPIException(data='')) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, vol1, self.connector) # case 4: terminate_connection with no host with mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_get_hostvdisk_mappings') as mock_host: mock_host.return_value = {} vol3 = self._generate_vol_info(None) self.driver.create_volume(vol3) self.driver.initialize_connection(vol3, self.connector) return_value = self.driver.terminate_connection(vol3, self.connector) self.assertNotEqual({}, return_value['data']) # case 5: terminate_connection with host vol4 = self._generate_vol_info(None) self.driver.create_volume(vol4) self.driver.initialize_connection(vol4, self.connector) vol5 = self._generate_vol_info(None) self.driver.create_volume(vol5) self.driver.initialize_connection(vol5, self.connector) return_value = self.driver.terminate_connection(vol4, self.connector) self.assertEqual({}, return_value['data']) # clear environment self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) self.driver.delete_volume(vol3) self.driver.delete_volume(vol4) self.driver.delete_volume(vol5) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_create_and_copy_vdisk_data') def test_flashsystem_create_snapshot(self, _create_and_copy_vdisk_data): # case 1: good path vol1 = self._generate_vol_info(None) snap1 = self._generate_snap_info(vol1['name'], vol1['id'], vol1['size'], vol1['status']) self.driver.create_snapshot(snap1) # case 2: when volume status is error vol2 = self._generate_vol_info(None, vol_status='error') snap2 = self._generate_snap_info(vol2['name'], vol2['id'], vol2['size'], vol2['status']) self.assertRaises(exception.InvalidVolume, self.driver.create_snapshot, snap2) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_delete_vdisk') def test_flashsystem_delete_snapshot(self, _delete_vdisk): vol1 = self._generate_vol_info(None) snap1 = self._generate_snap_info(vol1['name'], vol1['id'], vol1['size'], vol1['status']) self.driver.delete_snapshot(snap1) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_create_and_copy_vdisk_data') def test_flashsystem_create_volume_from_snapshot( self, _create_and_copy_vdisk_data): # case 1: good path vol = self._generate_vol_info(None) snap = self._generate_snap_info(vol['name'], vol['id'], vol['size'], vol['status']) self.driver.create_volume_from_snapshot(vol, snap) # case 2: when size does not match vol = self._generate_vol_info(None, vol_size=100) snap = self._generate_snap_info(vol['name'], vol['id'], 200, vol['status']) self.assertRaises(exception.VolumeDriverException, self.driver.create_volume_from_snapshot, vol, snap) # case 3: when snapshot status is not available vol = self._generate_vol_info(None) snap = self._generate_snap_info(vol['name'], vol['id'], vol['size'], vol['status'], snap_status='error') self.assertRaises(exception.InvalidSnapshot, self.driver.create_volume_from_snapshot, vol, snap) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_create_and_copy_vdisk_data') def test_flashsystem_create_cloned_volume( self, _create_and_copy_vdisk_data): # case 1: good path vol1 = self._generate_vol_info(None) vol2 = self._generate_vol_info(None) self.driver.create_cloned_volume(vol2, vol1) # case 2: destination larger than source vol1 = self._generate_vol_info(None, vol_size=10) vol2 = self._generate_vol_info(None, vol_size=20) self.driver.create_cloned_volume(vol2, vol1) # case 3: destination smaller than source self.assertRaises(exception.VolumeDriverException, self.driver.create_cloned_volume, vol1, vol2) def test_flashsystem_get_volume_stats(self): # case 1: good path self._set_flag('reserved_percentage', 25) self._set_flag('flashsystem_multihostmap_enabled', False) pool = 'mdiskgrp0' backend_name = 'flashsystem_1.2.3.4' + '_' + pool stats = self.driver.get_volume_stats() self.assertEqual(25, stats['reserved_percentage']) self.assertEqual('IBM', stats['vendor_name']) self.assertEqual('FC', stats['storage_protocol']) self.assertEqual(backend_name, stats['volume_backend_name']) self.assertEqual(False, stats['multiattach']) self._reset_flags() # case 2: when lsmdiskgrp returns error self.sim.error_injection('lsmdiskgrp', 'error') self.assertRaises(exception.VolumeBackendAPIException, self.driver.get_volume_stats, refresh=True) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_copy_vdisk_data') def test_flashsystem_create_and_copy_vdisk_data(self, _copy_vdisk_data): # case 1: when volume does not exist vol1 = self._generate_vol_info(None) vol2 = self._generate_vol_info(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver._create_and_copy_vdisk_data, vol1['name'], vol1['id'], vol2['name'], vol2['id']) # case 2: good path self.driver.create_volume(vol1) self.driver._create_and_copy_vdisk_data( vol1['name'], vol1['id'], vol2['name'], vol2['id']) self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) # case 3: _copy_vdisk_data raises exception self.driver.create_volume(vol1) _copy_vdisk_data.side_effect = ( exception.VolumeBackendAPIException(data='')) self.assertRaises( exception.VolumeBackendAPIException, self.driver._create_and_copy_vdisk_data, vol1['name'], vol1['id'], vol2['name'], vol2['id']) self.assertEqual(set(), self.driver._vdisk_copy_in_progress) # clear environment self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) @mock.patch.object(volume_utils, 'copy_volume') @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_scan_device') @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_remove_device') @mock.patch.object(volume_utils, 'brick_get_connector_properties') def test_flashsystem_copy_vdisk_data(self, _connector, _remove_device, _scan_device, copy_volume): connector = _connector.return_value = self.connector vol1 = self._generate_vol_info(None) vol2 = self._generate_vol_info(None) self.driver.create_volume(vol1) self.driver.create_volume(vol2) # case 1: no mapped before copy self.driver._copy_vdisk_data( vol1['name'], vol1['id'], vol2['name'], vol2['id']) (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) self.assertFalse(v1_mapped) self.assertFalse(v2_mapped) # case 2: mapped before copy self.driver.initialize_connection(vol1, connector) self.driver.initialize_connection(vol2, connector) self.driver._copy_vdisk_data( vol1['name'], vol1['id'], vol2['name'], vol2['id']) (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) self.assertTrue(v1_mapped) self.assertTrue(v2_mapped) self.driver.terminate_connection(vol1, connector) self.driver.terminate_connection(vol2, connector) # case 3: no mapped before copy, raise exception when scan _scan_device.side_effect = exception.VolumeBackendAPIException(data='') self.assertRaises( exception.VolumeBackendAPIException, self.driver._copy_vdisk_data, vol1['name'], vol1['id'], vol2['name'], vol2['id']) (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) self.assertFalse(v1_mapped) self.assertFalse(v2_mapped) # case 4: no mapped before copy, raise exception when copy copy_volume.side_effect = exception.VolumeBackendAPIException(data='') self.assertRaises( exception.VolumeBackendAPIException, self.driver._copy_vdisk_data, vol1['name'], vol1['id'], vol2['name'], vol2['id']) (v1_mapped, lun) = self.driver._is_vdisk_map(vol1['name'], connector) (v2_mapped, lun) = self.driver._is_vdisk_map(vol2['name'], connector) self.assertFalse(v1_mapped) self.assertFalse(v2_mapped) # clear environment self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) def test_flashsystem_connector_to_hostname_prefix(self): # Invalid characters will be translated to '-' # case 1: host name is unicode with invalid characters conn = {'host': u'unicode.test}.abc{.abc'} self.assertEqual(u'unicode.test-.abc-.abc', self.driver._connector_to_hostname_prefix(conn)) # case 2: host name is string with invalid characters conn = {'host': 'string.test}.abc{.abc'} self.assertEqual('string.test-.abc-.abc', self.driver._connector_to_hostname_prefix(conn)) # case 3: host name is neither unicode nor string conn = {'host': 12345} self.assertRaises(exception.NoValidBackend, self.driver._connector_to_hostname_prefix, conn) # case 4: host name started with number will be translated conn = {'host': '192.168.1.1'} self.assertEqual('_192.168.1.1', self.driver._connector_to_hostname_prefix(conn)) def test_flashsystem_create_host(self): # case 1: create host conn = { 'host': 'flashsystem', 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], 'wwpns': ['abcd000000000001', 'abcd000000000002'], 'initiator': 'iqn.123456'} host = self.driver._create_host(conn) # case 2: create host that already exists self.assertRaises(processutils.ProcessExecutionError, self.driver._create_host, conn) # case 3: delete host self.driver._delete_host(host) # case 4: create host with empty ports conn = {'host': 'flashsystem', 'wwpns': []} self.assertRaises(exception.VolumeBackendAPIException, self.driver._create_host, conn) def test_flashsystem_find_host_exhaustive(self): # case 1: create host and find it conn1 = { 'host': 'flashsystem-01', 'wwnns': ['1111111111abcdef', '1111111111abcdeg'], 'wwpns': ['1111111111000001', '1111111111000002'], 'initiator': 'iqn.111111'} conn2 = { 'host': 'flashsystem-02', 'wwnns': ['2222222222abcdef', '2222222222abcdeg'], 'wwpns': ['2222222222000001', '2222222222000002'], 'initiator': 'iqn.222222'} conn3 = { 'host': 'flashsystem-03', 'wwnns': ['3333333333abcdef', '3333333333abcdeg'], 'wwpns': ['3333333333000001', '3333333333000002'], 'initiator': 'iqn.333333'} host1 = self.driver._create_host(conn1) host2 = self.driver._create_host(conn2) self.assertEqual( host2, self.driver._find_host_exhaustive(conn2, [host1, host2])) self.assertIsNone(self.driver._find_host_exhaustive(conn3, [host1, host2])) # case 2: hosts contains non-existent host info with mock.patch.object(FlashSystemFakeDriver, '_ssh') as mock_ssh: mock_ssh.return_value = ("pass", "") self.driver._find_host_exhaustive(conn1, [host2]) self.assertFalse(mock_ssh.called) # clear environment self.driver._delete_host(host1) self.driver._delete_host(host2) def test_flashsystem_get_vdisk_params(self): # case 1: use default params self.driver._get_vdisk_params(None) # case 2: use extra params from type opts1 = {'storage_protocol': 'FC'} opts2 = {'capabilities:storage_protocol': 'FC'} opts3 = {'storage_protocol': 'iSCSI'} type1 = volume_types.create(self.ctxt, 'opts1', opts1) type2 = volume_types.create(self.ctxt, 'opts2', opts2) type3 = volume_types.create(self.ctxt, 'opts3', opts3) self.assertEqual( 'FC', self.driver._get_vdisk_params(type1['id'])['protocol']) self.assertEqual( 'FC', self.driver._get_vdisk_params(type2['id'])['protocol']) self.assertRaises(exception.InvalidInput, self.driver._get_vdisk_params, type3['id']) # clear environment volume_types.destroy(self.ctxt, type1['id']) volume_types.destroy(self.ctxt, type2['id']) def test_flashsystem_map_vdisk_to_host(self): # case 1: no host found vol1 = self._generate_vol_info(None) self.driver.create_volume(vol1) self.assertEqual( # lun id shoud begin with 1 1, self.driver._map_vdisk_to_host(vol1['name'], self.connector)) # case 2: host already exists vol2 = self._generate_vol_info(None) self.driver.create_volume(vol2) self.assertEqual( # lun id shoud be sequential 2, self.driver._map_vdisk_to_host(vol2['name'], self.connector)) # case 3: test if already mapped self.assertEqual( 1, self.driver._map_vdisk_to_host(vol1['name'], self.connector)) # case 4: multi-host mapping, good path for error in self.sim._multi_host_map_errors: self.sim._multi_host_map_error = error self.assertEqual( 1, self.driver._map_vdisk_to_host( vol1['name'], self.alt_connector ) ) self.driver._unmap_vdisk_from_host( vol1['name'], self.alt_connector ) self.sim._multi_host_map_error = None # case 5: multi-host mapping, bad path self.assertRaises( exception.VolumeBackendAPIException, self.driver._map_vdisk_to_host, vol1['name'], self.alt_connector) # clean environment self.driver._unmap_vdisk_from_host(vol1['name'], self.connector) self.driver._unmap_vdisk_from_host(vol2['name'], self.connector) self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) # case 4: If there is no vdisk mapped to host, host should be removed self.assertIsNone(self.driver._get_host_from_connector(self.connector)) def test_flashsystem_manage_existing(self): # case 1: manage a vdisk good path kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} self.sim._cmd_mkvdisk(**kwargs) vol1 = self._generate_vol_info(None) existing_ref = {'source-name': u'unmanage-vol-01'} self.driver.manage_existing(vol1, existing_ref) self.driver.delete_volume(vol1) # case 2: manage a vdisk not exist vol1 = self._generate_vol_info(None) existing_ref = {'source-name': u'unmanage-vol-01'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol1, existing_ref) # case 3: manage a vdisk without name and uid kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} self.sim._cmd_mkvdisk(**kwargs) vol1 = self._generate_vol_info(None) existing_ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol1, existing_ref) vdisk1 = {'obj': u'unmanage-vol-01'} self.sim._cmd_rmvdisk(**vdisk1) @mock.patch.object(flashsystem_fc.FlashSystemFCDriver, '_get_vdiskhost_mappings') def test_flashsystem_manage_existing_get_size_mapped( self, _get_vdiskhost_mappings_mock): # manage a vdisk with mappings _get_vdiskhost_mappings_mock.return_value = {'mapped': u'yes'} kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} self.sim._cmd_mkvdisk(**kwargs) vol1 = self._generate_vol_info(None) existing_ref = {'source-name': u'unmanage-vol-01'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol1, existing_ref) # clean environment vdisk1 = {'obj': u'unmanage-vol-01'} self.sim._cmd_rmvdisk(**vdisk1) def test_flashsystem_manage_existing_get_size_bad_ref(self): # bad existing_ref vol1 = self._generate_vol_info(None, None) existing_ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol1, existing_ref) def test_flashsystem_manage_existing_get_size_vdisk_not_exist(self): # vdisk not exist vol1 = self._generate_vol_info(None) existing_ref = {'source-name': u'unmanage-vol-01'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol1, existing_ref) def test_flashsystem_manage_existing_get_size(self): # good path kwargs = {'name': u'unmanage-vol-01', 'size': u'10001', 'unit': 'gb'} self.sim._cmd_mkvdisk(**kwargs) vol1 = self._generate_vol_info(None) existing_ref = {'source-name': u'unmanage-vol-01'} vdisk_size = self.driver.manage_existing_get_size(vol1, existing_ref) self.assertEqual(10001, vdisk_size) # clean environment vdisk1 = {'obj': u'unmanage-vol-01'} self.sim._cmd_rmvdisk(**vdisk1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem_iscsi.py0000664000175000017500000004041100000000000030204 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the IBM FlashSystem iSCSI volume driver.""" import random from unittest import mock from cinder import context from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.ibm \ import test_ibm_flashsystem as fscommon from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.ibm import flashsystem_iscsi from cinder.volume import volume_types class FlashSystemManagementSimulator(fscommon.FlashSystemManagementSimulator): def __init__(self): # Default protocol is iSCSI self._protocol = 'iSCSI' self._volumes_list = {} self._hosts_list = {} self._mappings_list = {} self._next_cmd_error = { 'lsnode': '', 'lssystem': '', 'lsmdiskgrp': '' } self._errors = { # CMMVC50000 is a fake error which indicates that command has not # got expected results. This error represents kinds of CLI errors. 'CMMVC50000': ('', 'CMMVC50000 The command can not be executed ' 'successfully.') } class FlashSystemFakeISCSIDriver(flashsystem_iscsi.FlashSystemISCSIDriver): def __init__(self, *args, **kwargs): super(FlashSystemFakeISCSIDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _ssh(self, cmd, check_exit_code=True): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class FlashSystemISCSIDriverTestCase(test.TestCase): def _set_flag(self, flag, value): group = self.driver.configuration.config_group self.driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _generate_vol_info(self, vol_name, vol_size=10, vol_status='available'): rand_id = str(random.randint(10000, 99999)) if not vol_name: vol_name = 'test_volume%s' % rand_id return {'name': vol_name, 'size': vol_size, 'id': '%s' % rand_id, 'volume_type_id': None, 'status': vol_status, 'mdisk_grp_name': 'mdiskgrp0'} def _generate_snap_info(self, vol_name, vol_id, vol_size, vol_status, snap_status='available'): rand_id = str(random.randint(10000, 99999)) return {'name': 'test_snap_%s' % rand_id, 'id': rand_id, 'volume': {'name': vol_name, 'id': vol_id, 'size': vol_size, 'status': vol_status}, 'volume_size': vol_size, 'status': snap_status, 'mdisk_grp_name': 'mdiskgrp0'} def setUp(self): super(FlashSystemISCSIDriverTestCase, self).setUp() self._def_flags = {'san_ip': 'hostname', 'san_login': 'username', 'san_password': 'password', 'flashsystem_connection_protocol': 'iSCSI', 'flashsystem_multihostmap_enabled': True, 'target_ip_address': '192.168.1.10', 'flashsystem_iscsi_portid': 1} self.connector = { 'host': 'flashsystem', 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], 'wwpns': ['abcd000000000001', 'abcd000000000002'], 'initiator': 'iqn.123456'} self.sim = FlashSystemManagementSimulator() self.driver = FlashSystemFakeISCSIDriver( configuration=conf.Configuration(None)) self.driver.set_fake_storage(self.sim) self._reset_flags() self.ctxt = context.get_admin_context() self.driver.do_setup(None) self.driver.check_for_setup_error() self.sleeppatch = mock.patch('eventlet.greenthread.sleep') self.sleeppatch.start() def tearDown(self): self.sleeppatch.stop() super(FlashSystemISCSIDriverTestCase, self).tearDown() def test_flashsystem_do_setup(self): # case 1: set as iSCSI self.sim.set_protocol('iSCSI') self._set_flag('flashsystem_connection_protocol', 'iSCSI') self.driver.do_setup(None) self.assertEqual('iSCSI', self.driver._protocol) # clear environment self.sim.set_protocol('iSCSI') self._reset_flags() def test_flashsystem_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'foo'} conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} protocol = self.driver._protocol # case 1: when protocol is iSCSI self.driver._protocol = 'iSCSI' self.driver.validate_connector(conn_iscsi) self.driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.driver.validate_connector, conn_neither) # clear environment self.driver._protocol = protocol def test_flashsystem_connection(self): # case 1: initialize_connection/terminate_connection with iSCSI self.sim.set_protocol('iSCSI') self._set_flag('flashsystem_connection_protocol', 'iSCSI') self.driver.do_setup(None) vol1 = self._generate_vol_info(None) self.driver.create_volume(vol1) self.driver.initialize_connection(vol1, self.connector) self.driver.terminate_connection(vol1, self.connector) # clear environment self.driver.delete_volume(vol1) self.sim.set_protocol('iSCSI') self._reset_flags() def test_flashsystem_create_host(self): # case 1: create host with iqn self.sim.set_protocol('iSCSI') self._set_flag('flashsystem_connection_protocol', 'iSCSI') self.driver.do_setup(None) conn = { 'host': 'flashsystem', 'wwnns': ['0123456789abcdef', '0123456789abcdeg'], 'wwpns': ['abcd000000000001', 'abcd000000000002'], 'initiator': 'iqn.123456'} host = self.driver._create_host(conn) # case 2: delete host self.driver._delete_host(host) # clear environment self.sim.set_protocol('iSCSI') self._reset_flags() def test_flashsystem_get_vdisk_params(self): # case 1: use default params self.driver._get_vdisk_params(None) # case 2: use extra params from type opts1 = {'storage_protocol': 'iSCSI'} opts2 = {'capabilities:storage_protocol': 'iSCSI'} opts3 = {'storage_protocol': 'FC'} type1 = volume_types.create(self.ctxt, 'opts1', opts1) type2 = volume_types.create(self.ctxt, 'opts2', opts2) type3 = volume_types.create(self.ctxt, 'opts3', opts3) self.assertEqual( 'iSCSI', self.driver._get_vdisk_params(type1['id'])['protocol']) self.assertEqual( 'iSCSI', self.driver._get_vdisk_params(type2['id'])['protocol']) self.assertRaises(exception.InvalidInput, self.driver._get_vdisk_params, type3['id']) # clear environment volume_types.destroy(self.ctxt, type1['id']) volume_types.destroy(self.ctxt, type2['id']) volume_types.destroy(self.ctxt, type3['id']) def test_flashsystem_map_vdisk_to_host(self): # case 1: no host found vol1 = self._generate_vol_info(None) self.driver.create_volume(vol1) self.assertEqual( # lun id shoud begin with 1 1, self.driver._map_vdisk_to_host(vol1['name'], self.connector)) # case 2: host already exists vol2 = self._generate_vol_info(None) self.driver.create_volume(vol2) self.assertEqual( # lun id shoud be sequential 2, self.driver._map_vdisk_to_host(vol2['name'], self.connector)) # case 3: test if already mapped self.assertEqual( 1, self.driver._map_vdisk_to_host(vol1['name'], self.connector)) # clean environment self.driver._unmap_vdisk_from_host(vol1['name'], self.connector) self.driver._unmap_vdisk_from_host(vol2['name'], self.connector) self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) # case 4: If there is no vdisk mapped to host, host should be removed self.assertIsNone(self.driver._get_host_from_connector(self.connector)) def test_terminate_connection_with_normal_path(self): connector = {'host': 'flashsystem-host', 'wwnns': ['10000090fa17311e', '10000090fa17311f'], 'wwpns': ['20000090fa17311e', '20000090fa17311f'], 'initiator': 'iqn.1993-08.org.debian:01:89ad29bbdc43'} # create test volume volume_iscsi = self._generate_vol_info(None) self.driver.create_volume(volume_iscsi) # normal connection test self.driver.initialize_connection(volume_iscsi, connector) host = self.driver._get_host_from_connector(connector) self.assertIsNotNone(host) self.driver.terminate_connection(volume_iscsi, connector) host = self.driver._get_host_from_connector(connector) self.assertIsNone(host) # clean environment self.driver.delete_volume(volume_iscsi) def test_terminate_connection_with_resource_leak_check(self): connector = {'host': 'flashsystem-host', 'wwnns': ['10000090fa17311e', '10000090fa17311f'], 'wwpns': ['20000090fa17311e', '20000090fa17311f'], 'initiator': 'iqn.1993-08.org.debian:01:89ad29bbdc43'} # create test volume volume_iscsi = self._generate_vol_info(None) self.driver.create_volume(volume_iscsi) # volume mapping removed before terminate connection self.driver.initialize_connection(volume_iscsi, connector) host = self.driver._get_host_from_connector(connector) self.assertIsNotNone(host) rmmap_cmd = {'host': host, 'obj': volume_iscsi['name']} self.sim._cmd_rmvdiskhostmap(**rmmap_cmd) self.driver.terminate_connection(volume_iscsi, connector) host = self.driver._get_host_from_connector(connector) self.assertIsNone(host) # clean environment self.driver.delete_volume(volume_iscsi) def test_flashsystem_find_host_exhaustive(self): # case 1: create host and find it self.sim.set_protocol('iSCSI') self._set_flag('flashsystem_connection_protocol', 'iSCSI') conn1 = { 'host': 'flashsystem-01', 'wwnns': ['1111111111abcdef', '1111111111abcdeg'], 'wwpns': ['1111111111000001', '1111111111000002'], 'initiator': 'iqn.111111'} conn2 = { 'host': 'flashsystem-02', 'wwnns': ['2222222222abcdef', '2222222222abcdeg'], 'wwpns': ['2222222222000001', '2222222222000002'], 'initiator': 'iqn.222222'} conn3 = { 'host': 'flashsystem-03', 'wwnns': ['3333333333abcdef', '3333333333abcdeg'], 'wwpns': ['3333333333000001', '3333333333000002'], 'initiator': 'iqn.333333'} host1 = self.driver._create_host(conn1) host2 = self.driver._create_host(conn2) self.assertEqual( host2, self.driver._find_host_exhaustive(conn2, [host1, host2])) self.assertIsNone(self.driver._find_host_exhaustive(conn3, [host1, host2])) # case 2: hosts contains non-existent host info with mock.patch.object(FlashSystemFakeISCSIDriver, '_ssh') as mock_ssh: mock_ssh.return_value = ("pass", "") self.driver._find_host_exhaustive(conn1, [host2]) self.assertFalse(mock_ssh.called) # clear environment self.driver._delete_host(host1) self.driver._delete_host(host2) self.sim.set_protocol('iSCSI') self._reset_flags() def test_flashsystem_manage_existing(self): # case 1: manage a vdisk good path kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} self.sim._cmd_mkvdisk(**kwargs) vol1 = self._generate_vol_info(None) existing_ref = {'source-name': u'unmanage-vol-01'} self.driver.manage_existing(vol1, existing_ref) self.driver.delete_volume(vol1) # case 2: manage a vdisk not exist vol1 = self._generate_vol_info(None) existing_ref = {'source-name': u'unmanage-vol-01'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol1, existing_ref) # case 3: manage a vdisk without name and uid kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} self.sim._cmd_mkvdisk(**kwargs) vol1 = self._generate_vol_info(None) existing_ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol1, existing_ref) vdisk1 = {'obj': u'unmanage-vol-01'} self.sim._cmd_rmvdisk(**vdisk1) @mock.patch.object(flashsystem_iscsi.FlashSystemISCSIDriver, '_get_vdiskhost_mappings') def test_flashsystem_manage_existing_get_size_mapped( self, _get_vdiskhost_mappings_mock): # case 2: manage a vdisk with mappings _get_vdiskhost_mappings_mock.return_value = {'mapped': u'yes'} kwargs = {'name': u'unmanage-vol-01', 'size': u'1', 'unit': 'gb'} self.sim._cmd_mkvdisk(**kwargs) vol1 = self._generate_vol_info(None) existing_ref = {'source-name': u'unmanage-vol-01'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol1, existing_ref) # clean environment vdisk1 = {'obj': u'unmanage-vol-01'} self.sim._cmd_rmvdisk(**vdisk1) def test_flashsystem_manage_existing_get_size_bad_ref(self): # bad existing_ref vol1 = self._generate_vol_info(None, None) existing_ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol1, existing_ref) def test_flashsystem_manage_existing_get_size_vdisk_not_exist(self): # vdisk not exist vol1 = self._generate_vol_info(None) existing_ref = {'source-name': u'unmanage-vol-01'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol1, existing_ref) def test_flashsystem_manage_existing_get_size(self): # good path kwargs = {'name': u'unmanage-vol-01', 'size': u'10001', 'unit': 'gb'} self.sim._cmd_mkvdisk(**kwargs) vol1 = self._generate_vol_info(None) existing_ref = {'source-name': u'unmanage-vol-01'} vdisk_size = self.driver.manage_existing_get_size(vol1, existing_ref) self.assertEqual(10001, vdisk_size) # clean environment vdisk1 = {'obj': u'unmanage-vol-01'} self.sim._cmd_rmvdisk(**vdisk1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py0000664000175000017500000010574100000000000026124 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from cinder import context from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.ibm.ibm_storage import ibm_storage from cinder.volume import volume_types FAKE = "fake" FAKE2 = "fake2" CANNOT_DELETE = "Can not delete" TOO_BIG_VOLUME_SIZE = 12000 POOL_SIZE = 100 GROUP_ID = 1 VOLUME = {'size': 16, 'name': FAKE, 'id': 1, 'status': 'available'} VOLUME2 = {'size': 32, 'name': FAKE2, 'id': 2, 'status': 'available'} GROUP_VOLUME = {'size': 16, 'name': FAKE, 'id': 3, 'group_id': GROUP_ID, 'status': 'available'} MANAGED_FAKE = "managed_fake" MANAGED_VOLUME = {'size': 16, 'name': MANAGED_FAKE, 'id': 2} REPLICA_FAKE = "repicated_fake" REPLICATED_VOLUME = {'size': 64, 'name': REPLICA_FAKE, 'id': '2', 'replication_status': fields.ReplicationStatus.ENABLED} REPLICATED_VOLUME_DISABLED = REPLICATED_VOLUME.copy() REPLICATED_VOLUME_DISABLED['replication_status'] = ( fields.ReplicationStatus.DISABLED) REPLICATION_TARGETS = [{'target_device_id': 'fakedevice'}] SECONDARY = 'fakedevice' FAKE_FAILOVER_HOST = 'fakehost@fakebackend#fakepool' FAKE_PROVIDER_LOCATION = 'fake_provider_location' FAKE_DRIVER_DATA = 'fake_driver_data' CONTEXT = {} FAKESNAPSHOT = 'fakesnapshot' SNAPSHOT = {'name': 'fakesnapshot', 'id': 3} GROUP = {'id': GROUP_ID, } GROUP_SNAPSHOT_ID = 1 GROUP_SNAPSHOT = {'id': GROUP_SNAPSHOT_ID, 'group_id': GROUP_ID} CONNECTOR = {'initiator': "iqn.2012-07.org.fake:01:948f189c4695", } FAKE_PROXY = 'cinder.tests.unit.volume.drivers.ibm.test_ibm_storage' \ '.IBMStorageFakeProxyDriver' class IBMStorageFakeProxyDriver(object): """Fake IBM Storage driver Fake IBM Storage driver for IBM XIV, Spectrum Accelerate, FlashSystem A9000, FlashSystem A9000R and DS8000 storage systems. """ def __init__(self, ibm_storage_info, logger, expt, driver=None, active_backend_id=None, host=None): """Initialize Proxy.""" self.ibm_storage_info = ibm_storage_info self.logger = logger self.exception = expt self.storage_portal = \ self.storage_iqn = FAKE self.volumes = {} self.snapshots = {} self.driver = driver def setup(self, context): if self.ibm_storage_info['user'] != self.driver\ .configuration.san_login: raise self.exception.NotAuthorized() if self.ibm_storage_info['address'] != self.driver\ .configuration.san_ip: raise self.exception.HostNotFound(host='fake') def create_volume(self, volume): if volume['size'] > POOL_SIZE: raise self.exception.VolumeBackendAPIException(data='blah') self.volumes[volume['name']] = volume def volume_exists(self, volume): return self.volumes.get(volume['name'], None) is not None def delete_volume(self, volume): if self.volumes.get(volume['name'], None) is not None: del self.volumes[volume['name']] def manage_volume_get_size(self, volume, existing_ref): if self.volumes.get(existing_ref['source-name'], None) is None: raise self.exception.VolumeNotFound(volume_id=volume['id']) return self.volumes[existing_ref['source-name']]['size'] def manage_volume(self, volume, existing_ref): if self.volumes.get(existing_ref['source-name'], None) is None: raise self.exception.VolumeNotFound(volume_id=volume['id']) volume['size'] = MANAGED_VOLUME['size'] return {} def unmanage_volume(self, volume): pass def initialize_connection(self, volume, connector): if not self.volume_exists(volume): raise self.exception.VolumeNotFound(volume_id=volume['id']) lun_id = volume['id'] self.volumes[volume['name']]['attached'] = connector return {'driver_volume_type': 'iscsi', 'data': {'target_discovered': True, 'target_portal': self.storage_portal, 'target_iqn': self.storage_iqn, 'target_lun': lun_id, 'volume_id': volume['id'], 'multipath': True, 'provider_location': "%s,1 %s %s" % ( self.storage_portal, self.storage_iqn, lun_id), }, } def terminate_connection(self, volume, connector): if not self.volume_exists(volume): raise self.exception.VolumeNotFound(volume_id=volume['id']) if not self.is_volume_attached(volume, connector): raise self.exception.NotFound(_('Volume not found for ' 'instance %(instance_id)s.') % {'instance_id': 'fake'}) del self.volumes[volume['name']]['attached'] def is_volume_attached(self, volume, connector): if not self.volume_exists(volume): raise self.exception.VolumeNotFound(volume_id=volume['id']) return (self.volumes[volume['name']].get('attached', None) == connector) def get_replication_status(self, context, volume): if volume['replication_status'] == 'invalid_status_val': raise exception.CinderException() return {'replication_status': 'active'} def retype(self, ctxt, volume, new_type, diff, host): volume['easytier'] = new_type['extra_specs']['easytier'] return True, volume def create_group(self, ctxt, group): volumes = [volume for k, volume in self.volumes.items() if volume['group_id'] == group['id']] if volumes: raise exception.CinderException( message='The group id of volume may be wrong.') return {'status': fields.GroupStatus.AVAILABLE} def delete_group(self, ctxt, group, volumes): for volume in self.volumes.values(): if group.get('id') == volume.get('group_id'): if volume['name'] == CANNOT_DELETE: raise exception.VolumeBackendAPIException( message='Volume can not be deleted') else: volume['status'] = 'deleted' volumes.append(volume) # Delete snapshots in group self.snapshots = {k: snap for k, snap in self.snapshots.items() if not (snap.get('group_id') == group.get('id'))} # Delete volume in group self.volumes = {k: vol for k, vol in self.volumes.items() if not (vol.get('group_id') == group.get('id'))} return {'status': fields.GroupStatus.DELETED}, volumes def update_group(self, context, group, add_volumes, remove_volumes): model_update = {'status': fields.GroupStatus.AVAILABLE} return model_update, None, None def create_group_from_src(self, context, group, volumes, group_snapshot, snapshots, source_group=None, source_vols=None): return None, None def create_group_snapshot(self, ctxt, group_snapshot, snapshots): for volume in self.volumes.values(): if group_snapshot.get('group_id') == volume.get('group_id'): if volume['size'] > POOL_SIZE / 2: raise self.exception.VolumeBackendAPIException(data='blah') snapshot = copy.deepcopy(volume) snapshot['name'] = ( CANNOT_DELETE if snapshot['name'] == CANNOT_DELETE else snapshot['name'] + 'Snapshot') snapshot['status'] = 'available' snapshot['group_snapshot_id'] = group_snapshot.get('id') snapshot['group_id'] = group_snapshot.get('group_id') self.snapshots[snapshot['name']] = snapshot snapshots.append(snapshot) return {'status': fields.GroupSnapshotStatus.AVAILABLE}, snapshots def delete_group_snapshot(self, ctxt, group_snapshot, snapshots): updated_snapshots = [] for snapshot in snapshots: if snapshot['name'] == CANNOT_DELETE: raise exception.VolumeBackendAPIException( message='Snapshot can not be deleted') else: snapshot['status'] = 'deleted' updated_snapshots.append(snapshot) # Delete snapshots in group self.snapshots = {k: snap for k, snap in self.snapshots.items() if not (snap.get('group_id') == group_snapshot.get('group_snapshot_id'))} return {'status': 'deleted'}, updated_snapshots def freeze_backend(self, context): return True def thaw_backend(self, context): return True def failover_host(self, context, volumes, secondary_id, groups=None): target_id = 'BLA' volume_update_list = [] for volume in volumes: status = 'failed-over' if volume['replication_status'] == 'invalid_status_val': status = 'error' volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': status}}) return target_id, volume_update_list, [] def enable_replication(self, context, group, volumes): vol_status = [] for vol in volumes: vol_status.append( {'id': vol['id'], 'replication_status': fields.ReplicationStatus.ENABLED}) return ( {'replication_status': fields.ReplicationStatus.ENABLED}, vol_status) def disable_replication(self, context, group, volumes): volume_update_list = [] for volume in volumes: volume_update_list.append( {'id': volume['id'], 'replication_status': fields.ReplicationStatus.DISABLED}) return ( {'replication_status': fields.ReplicationStatus.DISABLED}, volume_update_list) def failover_replication(self, context, group, volumes, secondary_id): volume_update_list = [] for volume in volumes: volume_update_list.append( {'id': volume['id'], 'replication_status': fields.ReplicationStatus.FAILED_OVER}) return ({'replication_status': fields.ReplicationStatus.FAILED_OVER}, volume_update_list) def get_replication_error_status(self, context, groups): return ( [{'group_id': groups[0]['id'], 'replication_status': fields.ReplicationStatus.ERROR}], [{'volume_id': VOLUME['id'], 'replication_status': fields.ReplicationStatus.ERROR}]) class IBMStorageVolumeDriverTest(test.TestCase): """Test IBM Storage driver Test IBM Storage driver for IBM XIV, Spectrum Accelerate, FlashSystem A9000, FlashSystem A9000R and DS8000 storage Systems. """ def setUp(self): """Initialize IBM Storage Driver.""" super(IBMStorageVolumeDriverTest, self).setUp() configuration = mock.Mock(conf.Configuration) configuration.san_is_local = False configuration.proxy = FAKE_PROXY configuration.connection_type = 'iscsi' configuration.chap = 'disabled' configuration.san_ip = FAKE configuration.management_ips = FAKE configuration.san_login = FAKE configuration.san_clustername = FAKE configuration.san_password = FAKE configuration.append_config_values(mock.ANY) self.driver = ibm_storage.IBMStorageDriver( configuration=configuration) def test_initialized_should_set_ibm_storage_info(self): """Test that the san flags are passed to the IBM proxy.""" self.assertEqual( self.driver.proxy.ibm_storage_info['user'], self.driver.configuration.san_login) self.assertEqual( self.driver.proxy.ibm_storage_info['password'], self.driver.configuration.san_password) self.assertEqual( self.driver.proxy.ibm_storage_info['address'], self.driver.configuration.san_ip) self.assertEqual( self.driver.proxy.ibm_storage_info['vol_pool'], self.driver.configuration.san_clustername) def test_setup_should_fail_if_credentials_are_invalid(self): """Test that the proxy validates credentials.""" self.driver.proxy.ibm_storage_info['user'] = 'invalid' self.assertRaises(exception.NotAuthorized, self.driver.do_setup, None) def test_setup_should_fail_if_connection_is_invalid(self): """Test that the proxy validates connection.""" self.driver.proxy.ibm_storage_info['address'] = \ 'invalid' self.assertRaises(exception.HostNotFound, self.driver.do_setup, None) def test_create_volume(self): """Test creating a volume.""" self.driver.do_setup(None) self.driver.create_volume(VOLUME) has_volume = self.driver.proxy.volume_exists(VOLUME) self.assertTrue(has_volume) self.driver.delete_volume(VOLUME) def test_volume_exists(self): """Test the volume exist method with a volume that doesn't exist.""" self.driver.do_setup(None) self.assertFalse( self.driver.proxy.volume_exists({'name': FAKE}) ) def test_delete_volume(self): """Verify that a volume is deleted.""" self.driver.do_setup(None) self.driver.create_volume(VOLUME) self.driver.delete_volume(VOLUME) has_volume = self.driver.proxy.volume_exists(VOLUME) self.assertFalse(has_volume) def test_delete_volume_should_fail_for_not_existing_volume(self): """Verify that deleting a non-existing volume is OK.""" self.driver.do_setup(None) self.driver.delete_volume(VOLUME) def test_create_volume_should_fail_if_no_pool_space_left(self): """Verify that the proxy validates volume pool space.""" self.driver.do_setup(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, {'name': FAKE, 'id': 1, 'size': TOO_BIG_VOLUME_SIZE}) def test_initialize_connection(self): """Test that inititialize connection attaches volume to host.""" self.driver.do_setup(None) self.driver.create_volume(VOLUME) self.driver.initialize_connection(VOLUME, CONNECTOR) self.assertTrue( self.driver.proxy.is_volume_attached(VOLUME, CONNECTOR)) self.driver.terminate_connection(VOLUME, CONNECTOR) self.driver.delete_volume(VOLUME) def test_initialize_connection_should_fail_for_non_existing_volume(self): """Verify that initialize won't work for non-existing volume.""" self.driver.do_setup(None) self.assertRaises(exception.VolumeNotFound, self.driver.initialize_connection, VOLUME, CONNECTOR) def test_terminate_connection(self): """Test terminating a connection.""" self.driver.do_setup(None) self.driver.create_volume(VOLUME) self.driver.initialize_connection(VOLUME, CONNECTOR) self.driver.terminate_connection(VOLUME, CONNECTOR) self.assertFalse(self.driver.proxy.is_volume_attached( VOLUME, CONNECTOR)) self.driver.delete_volume(VOLUME) def test_terminate_connection_should_fail_on_non_existing_volume(self): """Test that terminate won't work for non-existing volumes.""" self.driver.do_setup(None) self.assertRaises(exception.VolumeNotFound, self.driver.terminate_connection, VOLUME, CONNECTOR) def test_manage_existing_get_size(self): """Test that manage_existing_get_size returns the expected size. """ self.driver.do_setup(None) self.driver.create_volume(MANAGED_VOLUME) existing_ref = {'source-name': MANAGED_VOLUME['name']} return_size = self.driver.manage_existing_get_size( VOLUME, existing_ref) self.assertEqual(MANAGED_VOLUME['size'], return_size) # cover both case, whether driver renames the volume or not self.driver.delete_volume(VOLUME) self.driver.delete_volume(MANAGED_VOLUME) def test_manage_existing_get_size_should_fail_on_non_existing_volume(self): """Test that manage_existing_get_size fails on non existing volume. """ self.driver.do_setup(None) # on purpose - do NOT create managed volume existing_ref = {'source-name': MANAGED_VOLUME['name']} self.assertRaises(exception.VolumeNotFound, self.driver.manage_existing_get_size, VOLUME, existing_ref) def test_manage_existing(self): """Test that manage_existing returns successfully. """ self.driver.do_setup(None) self.driver.create_volume(MANAGED_VOLUME) existing_ref = {'source-name': MANAGED_VOLUME['name']} self.driver.manage_existing(VOLUME, existing_ref) self.assertEqual(MANAGED_VOLUME['size'], VOLUME['size']) # cover both case, whether driver renames the volume or not self.driver.delete_volume(VOLUME) self.driver.delete_volume(MANAGED_VOLUME) def test_manage_existing_should_fail_on_non_existing_volume(self): """Test that manage_existing fails on non existing volume. """ self.driver.do_setup(None) # on purpose - do NOT create managed volume existing_ref = {'source-name': MANAGED_VOLUME['name']} self.assertRaises(exception.VolumeNotFound, self.driver.manage_existing, VOLUME, existing_ref) def test_get_replication_status(self): """Test that get_replication_status return successfully. """ self.driver.do_setup(None) # assume the replicated volume is inactive replicated_volume = copy.deepcopy(REPLICATED_VOLUME) replicated_volume['replication_status'] = 'inactive' model_update = self.driver.get_replication_status( CONTEXT, replicated_volume ) self.assertEqual('active', model_update['replication_status']) def test_get_replication_status_fail_on_exception(self): """Test that get_replication_status fails on exception""" self.driver.do_setup(None) replicated_volume = copy.deepcopy(REPLICATED_VOLUME) # on purpose - set invalid value to replication_status # expect an exception. replicated_volume['replication_status'] = 'invalid_status_val' self.assertRaises( exception.CinderException, self.driver.get_replication_status, CONTEXT, replicated_volume ) def test_retype(self): """Test that retype returns successfully.""" self.driver.do_setup(None) # prepare parameters ctxt = context.get_admin_context() host = { 'host': 'foo', 'capabilities': { 'location_info': 'ibm_storage_fake_1', 'extent_size': '1024' } } key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True} key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, equal = volume_types.volume_types_diff( ctxt, old_type_ref['id'], new_type_ref['id'], ) volume = copy.deepcopy(VOLUME) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) self.driver.create_volume(volume) ret = self.driver.retype(ctxt, volume, new_type, diff, host) self.assertTrue(bool(ret)) self.assertEqual('1', volume['easytier']) def test_retype_fail_on_exception(self): """Test that retype fails on exception.""" self.driver.do_setup(None) # prepare parameters ctxt = context.get_admin_context() host = { 'host': 'foo', 'capabilities': { 'location_info': 'ibm_storage_fake_1', 'extent_size': '1024' } } key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new') diff, equal = volume_types.volume_types_diff( ctxt, old_type_ref['id'], new_type_ref['id'], ) volume = copy.deepcopy(VOLUME) old_type = volume_types.get_volume_type(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.assertRaises( KeyError, self.driver.retype, ctxt, volume, new_type, diff, host ) def test_create_group(self): """Test that create_group return successfully.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create group model_update = self.driver.create_group(ctxt, GROUP) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "Group created failed") def test_create_group_fail_on_group_not_empty(self): """Test create_group with empty group.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create volumes # And add the volumes into the group before creating group self.driver.create_volume(GROUP_VOLUME) self.assertRaises(exception.CinderException, self.driver.create_group, ctxt, GROUP) def test_delete_group(self): """Test that delete_group return successfully.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create group self.driver.create_group(ctxt, GROUP) # Create volumes and add them to group self.driver.create_volume(GROUP_VOLUME) # Delete group model_update, volumes = self.driver.delete_group(ctxt, GROUP, [GROUP_VOLUME]) # Verify the result self.assertEqual(fields.GroupStatus.DELETED, model_update['status'], 'Group deleted failed') for volume in volumes: self.assertEqual('deleted', volume['status'], 'Group deleted failed') def test_delete_group_fail_on_volume_not_delete(self): """Test delete_group with volume delete failure.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create group self.driver.create_group(ctxt, GROUP) # Set the volume not to be deleted volume = copy.deepcopy(GROUP_VOLUME) volume['name'] = CANNOT_DELETE # Create volumes and add them to group self.driver.create_volume(volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_group, ctxt, GROUP, [volume]) def test_create_group_snapshot(self): """Test that create_group_snapshot return successfully.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create group self.driver.create_group(ctxt, GROUP) # Create volumes and add them to group self.driver.create_volume(VOLUME) # Create group snapshot model_update, snapshots = self.driver.create_group_snapshot( ctxt, GROUP_SNAPSHOT, [VOLUME]) # Verify the result self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, model_update['status'], 'Group Snapshot created failed') for snap in snapshots: self.assertEqual('available', snap['status']) # Clean the environment self.driver.delete_group_snapshot(ctxt, GROUP_SNAPSHOT, [VOLUME]) self.driver.delete_group(ctxt, GROUP, [VOLUME]) def test_create_group_snapshot_fail_on_no_pool_space_left(self): """Test create_group_snapshot when no pool space left.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create group self.driver.create_group(ctxt, GROUP) # Set the volume size volume = copy.deepcopy(GROUP_VOLUME) volume['size'] = POOL_SIZE / 2 + 1 # Create volumes and add them to group self.driver.create_volume(volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_group_snapshot, ctxt, GROUP_SNAPSHOT, [volume]) # Clean the environment self.driver.volumes = None self.driver.delete_group(ctxt, GROUP, [volume]) def test_delete_group_snapshot(self): """Test that delete_group_snapshot return successfully.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create group self.driver.create_group(ctxt, GROUP) # Create volumes and add them to group self.driver.create_volume(GROUP_VOLUME) # Create group snapshot self.driver.create_group_snapshot(ctxt, GROUP_SNAPSHOT, [GROUP_VOLUME]) # Delete group snapshot model_update, snapshots = self.driver.delete_group_snapshot( ctxt, GROUP_SNAPSHOT, [GROUP_VOLUME]) # Verify the result self.assertEqual(fields.GroupSnapshotStatus.DELETED, model_update['status'], 'Group Snapshot deleted failed') for snap in snapshots: self.assertEqual('deleted', snap['status']) # Clean the environment self.driver.delete_group(ctxt, GROUP, [GROUP_VOLUME]) def test_delete_group_snapshot_fail_on_snapshot_not_delete(self): """Test delete_group_snapshot when the snapshot cannot be deleted.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create group self.driver.create_group(ctxt, GROUP) # Set the snapshot not to be deleted volume = copy.deepcopy(GROUP_VOLUME) volume['name'] = CANNOT_DELETE # Create volumes and add them to group self.driver.create_volume(volume) # Create group snapshot self.driver.create_group_snapshot(ctxt, GROUP_SNAPSHOT, [volume]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_group_snapshot, ctxt, GROUP_SNAPSHOT, [volume]) def test_update_group_without_volumes(self): """Test update_group when there are no volumes specified.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Update group model_update, added, removed = self.driver.update_group( ctxt, GROUP, [], []) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "Group update failed") self.assertIsNone(added, "added volumes list is not empty") self.assertIsNone(removed, "removed volumes list is not empty") def test_update_group_with_volumes(self): """Test update_group when there are volumes specified.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Update group model_update, added, removed = self.driver.update_group( ctxt, GROUP, [VOLUME], [VOLUME2]) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "Group update failed") self.assertIsNone(added, "added volumes list is not empty") self.assertIsNone(removed, "removed volumes list is not empty") def test_create_group_from_src_without_volumes(self): """Test create_group_from_src with no volumes specified.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create group from source model_update, volumes_model_update = ( self.driver.create_group_from_src( ctxt, GROUP, [], GROUP_SNAPSHOT, [])) # model_update can be None or return available in status if model_update: self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "Group create from source failed") # volumes_model_update can be None or return available in status if volumes_model_update: self.assertFalse(volumes_model_update, "volumes list is not empty") def test_create_group_from_src_with_volumes(self): """Test create_group_from_src with volumes specified.""" self.driver.do_setup(None) ctxt = context.get_admin_context() # Create group from source model_update, volumes_model_update = ( self.driver.create_group_from_src( ctxt, GROUP, [VOLUME], GROUP_SNAPSHOT, [SNAPSHOT])) # model_update can be None or return available in status if model_update: self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "Group create from source failed") # volumes_model_update can be None or return available in status if volumes_model_update: self.assertEqual(fields.GroupStatus.AVAILABLE, volumes_model_update['status'], "volumes list status failed") def test_freeze_backend(self): """Test that freeze_backend returns successful""" self.driver.do_setup(None) # not much we can test here... self.assertTrue(self.driver.freeze_backend(CONTEXT)) def test_thaw_backend(self): """Test that thaw_backend returns successful""" self.driver.do_setup(None) # not much we can test here... self.assertTrue(self.driver.thaw_backend(CONTEXT)) def test_failover_host(self): """Test that failover_host returns expected values""" self.driver.do_setup(None) replicated_volume = copy.deepcopy(REPLICATED_VOLUME) # assume the replication_status is active replicated_volume['replication_status'] = 'active' expected_target_id = 'BLA' expected_volume_update_list = [ {'volume_id': REPLICATED_VOLUME['id'], 'updates': {'replication_status': 'failed-over'}}] target_id, volume_update_list, __ = self.driver.failover_host( CONTEXT, [replicated_volume], SECONDARY, [] ) self.assertEqual(expected_target_id, target_id) self.assertEqual(expected_volume_update_list, volume_update_list) def test_failover_host_bad_state(self): """Test that failover_host returns with error""" self.driver.do_setup(None) replicated_volume = copy.deepcopy(REPLICATED_VOLUME) # assume the replication_status is active replicated_volume['replication_status'] = 'invalid_status_val' expected_target_id = 'BLA' expected_volume_update_list = [ {'volume_id': REPLICATED_VOLUME['id'], 'updates': {'replication_status': 'error'}}] target_id, volume_update_list, __ = self.driver.failover_host( CONTEXT, [replicated_volume], SECONDARY, [] ) self.assertEqual(expected_target_id, target_id) self.assertEqual(expected_volume_update_list, volume_update_list) def test_enable_replication(self): self.driver.do_setup(None) model_update, volumes_model_update = self.driver.enable_replication( CONTEXT, GROUP, [REPLICATED_VOLUME]) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) for vol in volumes_model_update: self.assertEqual(fields.ReplicationStatus.ENABLED, vol['replication_status']) def test_disable_replication(self): self.driver.do_setup(None) model_update, volumes_model_update = self.driver.disable_replication( CONTEXT, GROUP, [REPLICATED_VOLUME_DISABLED]) self.assertEqual(fields.ReplicationStatus.DISABLED, model_update['replication_status']) for vol in volumes_model_update: self.assertEqual(fields.ReplicationStatus.DISABLED, volumes_model_update[0]['replication_status']) def test_failover_replication(self): self.driver.do_setup(None) model_update, volumes_model_update = self.driver.failover_replication( CONTEXT, GROUP, [VOLUME], SECONDARY) self.assertEqual(fields.ReplicationStatus.FAILED_OVER, model_update['replication_status']) def test_get_replication_error_status(self): self.driver.do_setup(None) group_model_updates, volume_model_updates = ( self.driver.get_replication_error_status(CONTEXT, [GROUP])) self.assertEqual(fields.ReplicationStatus.ERROR, group_model_updates[0]['replication_status']) self.assertEqual(fields.ReplicationStatus.ERROR, volume_model_updates[0]['replication_status']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py0000664000175000017500000267735600000000000026414 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the IBM Storwize family and SVC volume driver.""" import json import random import re import time from unittest import mock import ddt from oslo_concurrency import processutils from oslo_config import cfg from oslo_service import loopingcall from oslo_utils import units import paramiko from cinder import context import cinder.db from cinder.db.sqlalchemy import models from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import ssh_utils from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as testutils from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.ibm.storwize_svc import ( replication as storwize_rep) from cinder.volume.drivers.ibm.storwize_svc import storwize_const from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi from cinder.volume import group_types from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils SVC_POOLS = ['openstack', 'openstack1'] SVC_SOURCE_CHILD_POOL = 'openstack2' SVC_TARGET_CHILD_POOL = 'openstack3' CONF = cfg.CONF def _get_test_pool(get_all=False): if get_all: return SVC_POOLS else: return SVC_POOLS[0] class StorwizeSVCManagementSimulator(object): def __init__(self, pool_name): self._flags = {'storwize_svc_volpool_name': pool_name} self._volumes_list = {} self._hosts_list = {} self._mappings_list = {} self._fcmappings_list = {} self._fcconsistgrp_list = {} self._rcrelationship_list = {} self._partnership_list = {} self._partnershipcandidate_list = {} self._rcconsistgrp_list = {} self._volumegroup_list = {} self._volumegroup_snapshot_list = {} self._system_list = {'storwize-svc-sim': {'id': '0123456789ABCDEF', 'name': 'storwize-svc-sim'}, 'aux-svc-sim': {'id': 'ABCDEF0123456789', 'name': 'aux-svc-sim'}} self._other_pools = {'openstack2': {}, 'openstack3': {}} self._next_cmd_error = { 'lsportip': '', 'lsip': '', 'lsfabric': '', 'lsfcportsetmember': '', 'lsiscsiauth': '', 'lsnodecanister': '', 'mkvdisk': '', 'lsvdisk': '', 'lsfcmap': '', 'prestartfcmap': '', 'startfcmap': '', 'rmfcmap': '', 'lslicense': '', 'lsguicapabilities': '', 'lshost': '', 'lsrcrelationship': '', 'expandvdisksize': '' } self._errors = { 'CMMVC5701E': ('', 'CMMVC5701E No object ID was specified.'), 'CMMVC6035E': ('', 'CMMVC6035E The action failed as the ' 'object already exists.'), 'CMMVC5753E': ('', 'CMMVC5753E The specified object does not ' 'exist or is not a suitable candidate.'), 'CMMVC5707E': ('', 'CMMVC5707E Required parameters are missing.'), 'CMMVC6581E': ('', 'CMMVC6581E The command has failed because ' 'the maximum number of allowed iSCSI ' 'qualified names (IQNs) has been reached, ' 'or the IQN is already assigned or is not ' 'valid.'), 'CMMVC5754E': ('', 'CMMVC5754E The specified object does not ' 'exist, or the name supplied does not meet ' 'the naming rules.'), 'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping was ' 'not created because the VDisk is already ' 'mapped to a host.'), 'CMMVC5879E': ('', 'CMMVC5879E The VDisk-to-host mapping was ' 'not created because a VDisk is already ' 'mapped to this host with this SCSI LUN.'), 'CMMVC5840E': ('', 'CMMVC5840E The virtual disk (VDisk) was ' 'not deleted because it is mapped to a ' 'host or because it is part of a FlashCopy ' 'or Remote Copy mapping, or is involved in ' 'an image mode migrate.'), 'CMMVC6527E': ('', 'CMMVC6527E The name that you have entered ' 'is not valid. The name can contain letters, ' 'numbers, spaces, periods, dashes, and ' 'underscores. The name must begin with a ' 'letter or an underscore. The name must not ' 'begin or end with a space.'), 'CMMVC5871E': ('', 'CMMVC5871E The action failed because one or ' 'more of the configured port names is in a ' 'mapping.'), 'CMMVC5924E': ('', 'CMMVC5924E The FlashCopy mapping was not ' 'created because the source and target ' 'virtual disks (VDisks) are different sizes.'), 'CMMVC6303E': ('', 'CMMVC6303E The create failed because the ' 'source and target VDisks are the same.'), 'CMMVC7050E': ('', 'CMMVC7050E The command failed because at ' 'least one node in the I/O group does not ' 'support compressed VDisks.'), 'CMMVC6430E': ('', 'CMMVC6430E The command failed because the ' 'target and source managed disk groups must ' 'be different.'), 'CMMVC6353E': ('', 'CMMVC6353E The command failed because the ' 'copy specified does not exist.'), 'CMMVC6446E': ('', 'The command failed because the managed disk ' 'groups have different extent sizes.'), # Catch-all for invalid state transitions: 'CMMVC5903E': ('', 'CMMVC5903E The FlashCopy mapping was not ' 'changed because the mapping or consistency ' 'group is another state.'), 'CMMVC5709E': ('', 'CMMVC5709E [-%(VALUE)s] is not a supported ' 'parameter.'), 'CMMVC5982E': ('', 'CMMVC5982E The operation was not performed ' 'because it is not valid given the current ' 'relationship state.'), 'CMMVC5963E': ('', 'CMMVC5963E No direction has been defined.'), 'CMMVC5713E': ('', 'CMMVC5713E Some parameters are mutually ' 'exclusive.'), 'CMMVC5804E': ('', 'CMMVC5804E The action failed because an ' 'object that was specified in the command ' 'does not exist.'), 'CMMVC6065E': ('', 'CMMVC6065E The action failed as the object ' 'is not in a group.'), 'CMMVC9012E': ('', 'CMMVC9012E The copy type differs from other ' 'copies already in the consistency group.'), 'CMMVC5951E': ('', 'CMMVC5951E The operation cannot be performed ' 'because the relationship is not a stand-alone ' 'relationship.'), 'CMMVC9201E': ('', 'CMMVC9201E Task failed because volume has a ' 'copy that is fully allocated and is part of a ' 'Metro Mirror or Global Mirror relationship.'), 'CMMVC8587E': ('', 'CMMVC8587E The command failed because the ' 'volume is fast formatting.'), 'CMMVC8783E': ('', 'CMMVC8783E The volume copy was not deleted ' 'because the volume is part of a consistency ' 'group.'), 'CMMVC6578E': ('', 'CMMVC6578E The command has failed because ' 'the iSCSI name is already assigned or is ' 'not valid.'), } self._fc_transitions = {'begin': {'make': 'idle_or_copied'}, 'idle_or_copied': {'prepare': 'preparing', 'delete': 'end', 'delete_force': 'end'}, 'preparing': {'flush_failed': 'stopped', 'wait': 'prepared'}, 'end': None, 'stopped': {'prepare': 'preparing', 'delete_force': 'end'}, 'prepared': {'stop': 'stopped', 'start': 'copying'}, 'copying': {'wait': 'idle_or_copied', 'stop': 'stopping'}, # Assume the worst case where stopping->stopped # rather than stopping idle_or_copied 'stopping': {'wait': 'stopped'}, } self._fc_cg_transitions = {'begin': {'make': 'empty'}, 'empty': {'add': 'idle_or_copied'}, 'idle_or_copied': {'prepare': 'preparing', 'delete': 'end', 'delete_force': 'end'}, 'preparing': {'flush_failed': 'stopped', 'wait': 'prepared'}, 'end': None, 'stopped': {'prepare': 'preparing', 'delete_force': 'end'}, 'prepared': {'stop': 'stopped', 'start': 'copying', 'delete_force': 'end', 'delete': 'end'}, 'copying': {'wait': 'idle_or_copied', 'stop': 'stopping', 'delete_force': 'end', 'delete': 'end'}, # Assume the case where stopping->stopped # rather than stopping idle_or_copied 'stopping': {'wait': 'stopped'}, } self._rc_transitions = {'inconsistent_stopped': {'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'inconsistent_copying': { 'wait': 'consistent_synchronized', 'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'consistent_synchronized': { 'start': 'consistent_synchronized', 'stop': 'consistent_stopped', 'stop_access': 'idling', 'delete': 'end', 'delete_force': 'end'}, 'consistent_copying': { 'start': 'consistent_copying', 'stop': 'consistent_stopped', 'stop_access': 'idling', 'delete': 'end', 'delete_force': 'end'}, 'consistent_stopped': {'start': 'consistent_synchronized', 'stop': 'consistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'end': None, 'idling': { 'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'stop_access': 'idling', 'delete': 'end', 'delete_force': 'end'}, } self._rccg_transitions = {'empty': {'add': 'inconsistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'inconsistent_stopped': {'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'inconsistent_copying': { 'wait': 'consistent_synchronized', 'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'consistent_synchronized': { 'start': 'consistent_synchronized', 'stop': 'consistent_stopped', 'stop_access': 'idling', 'delete': 'end', 'delete_force': 'end'}, 'consistent_stopped': {'start': 'consistent_synchronized', 'stop': 'consistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'consistent_copying': { 'start': 'consistent_copying', 'stop': 'consistent_stopped', 'stop_access': 'idling', 'delete': 'end', 'delete_force': 'end'}, 'end': None, 'idling': { 'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'stop_access': 'idling', 'delete': 'end', 'delete_force': 'end'}, } def _state_transition(self, function, fcmap): if (function == 'wait' and 'wait' not in self._fc_transitions[fcmap['status']]): return ('', '') if fcmap['status'] == 'copying' and function == 'wait': if fcmap['copyrate'] != '0': if fcmap['progress'] == '0': fcmap['progress'] = '50' else: fcmap['progress'] = '100' fcmap['status'] = 'idle_or_copied' return ('', '') else: try: curr_state = fcmap['status'] fcmap['status'] = self._fc_transitions[curr_state][function] return ('', '') except Exception: return self._errors['CMMVC5903E'] def _fc_cg_state_transition(self, function, fc_consistgrp): if (function == 'wait' and 'wait' not in self._fc_transitions[fc_consistgrp['status']]): return ('', '') try: curr_state = fc_consistgrp['status'] fc_consistgrp['status'] \ = self._fc_cg_transitions[curr_state][function] return ('', '') except Exception: return self._errors['CMMVC5903E'] # Find an unused ID @staticmethod def _find_unused_id(d): ids = [] for v in d.values(): ids.append(int(v['id'])) ids.sort() for index, n in enumerate(ids): if n > index: return str(index) return str(len(ids)) # Check if name is valid @staticmethod def _is_invalid_name(name): if re.match(r'^[a-zA-Z_][\w._-]*$', name): return False return True # Convert argument string to dictionary @staticmethod def _cmd_to_dict(arg_list): no_param_args = [ 'autodelete', 'bytes', 'compressed', 'force', 'nohdr', 'nofmtdisk', 'noconsistgrp', 'global', 'access', 'start', 'thin', 'removehostmappings', 'removefcmaps', 'removercrelationships', 'novolumegroup', 'ignorelegacy' ] one_param_args = [ 'chapsecret', 'cleanrate', 'copy', 'copyrate', 'delim', 'easytier', 'filtervalue', 'grainsize', 'hbawwpn', 'host', 'iogrp', 'iscsiname', 'mdiskgrp', 'name', 'rsize', 'scsi', 'size', 'source', 'target', 'unit', 'vdisk', 'warning', 'wwpn', 'primary', 'consistgrp', 'master', 'aux', 'cluster', 'linkbandwidthmbits', 'backgroundcopyrate', 'copies', 'cyclingmode', 'cycleperiodseconds', 'masterchange', 'auxchange', 'pool', 'site', 'buffersize', 'volumegroup', 'snapshot' ] no_or_one_param_args = [ 'autoexpand', ] # Handle the special case of lsnode which is a two-word command # Use the one word version of the command internally if arg_list[0] in ('svcinfo', 'svctask'): if arg_list[1] == 'lsnode': if len(arg_list) > 4: # e.g. svcinfo lsnode -delim ! ret = {'cmd': 'lsnode', 'node_id': arg_list[-1]} else: ret = {'cmd': 'lsnodecanister'} else: ret = {'cmd': arg_list[1]} arg_list.pop(0) else: ret = {'cmd': arg_list[0]} skip = False for i in range(1, len(arg_list)): if skip: skip = False continue # Check for a quoted command argument for volumes and strip # quotes so that the simulater can match it later. Just # match against test naming convensions for now. if arg_list[i][0] == '"' and ('volume' in arg_list[i] or 'snapshot' in arg_list[i]): arg_list[i] = arg_list[i][1:-1] if arg_list[i][0] == '-': if arg_list[i][1:] in no_param_args: ret[arg_list[i][1:]] = True elif arg_list[i][1:] in one_param_args: ret[arg_list[i][1:]] = arg_list[i + 1] skip = True elif arg_list[i][1:] in no_or_one_param_args: if i == (len(arg_list) - 1) or arg_list[i + 1][0] == '-': ret[arg_list[i][1:]] = True else: ret[arg_list[i][1:]] = arg_list[i + 1] skip = True else: raise exception.InvalidInput( reason=_('unrecognized argument %s') % arg_list[i]) else: ret['obj'] = arg_list[i] return ret @staticmethod def _print_info_cmd(rows, delim=' ', nohdr=False, **kwargs): """Generic function for printing information.""" if nohdr: del rows[0] for index in range(len(rows)): rows[index] = delim.join(rows[index]) return ('%s' % '\n'.join(rows), '') @staticmethod def _print_info_obj_cmd(header, row, delim=' ', nohdr=False): """Generic function for printing information for a specific object.""" objrows = [] for idx, val in enumerate(header): objrows.append([val, row[idx]]) if nohdr: for index in range(len(objrows)): objrows[index] = ' '.join(objrows[index][1:]) for index in range(len(objrows)): objrows[index] = delim.join(objrows[index]) return ('%s' % '\n'.join(objrows), '') @staticmethod def _convert_bytes_units(bytestr): num = int(bytestr) unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] unit_index = 0 while num > 1024: num = num / 1024 unit_index += 1 return '%d%s' % (num, unit_array[unit_index]) @staticmethod def _convert_units_bytes(num, unit): unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] unit_index = 0 while unit.lower() != unit_array[unit_index].lower(): num = num * 1024 unit_index += 1 return str(num) def _cmd_lslicense(self, **kwargs): rows = [None] * 3 rows[0] = ['used_compression_capacity', '0.08'] rows[1] = ['license_compression_capacity', '0'] if self._next_cmd_error['lslicense'] == 'no_compression': self._next_cmd_error['lslicense'] = '' rows[2] = ['license_compression_enclosures', '0'] else: rows[2] = ['license_compression_enclosures', '1'] return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lsguicapabilities(self, **kwargs): rows = [None] * 2 if self._next_cmd_error['lsguicapabilities'] == 'no_compression': self._next_cmd_error['lsguicapabilities'] = '' rows[0] = ['license_scheme', '0'] else: rows[0] = ['license_scheme', 'flex'] rows[1] = ['product_key', storwize_const.DEV_MODEL_SVC] return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax def _cmd_lssystem(self, **kwargs): rows = [None] * 4 rows[0] = ['id', '0123456789ABCDEF'] rows[1] = ['name', 'storwize-svc-sim'] rows[2] = ['code_level', '7.2.0.0 (build 87.0.1311291000)'] rows[3] = ['topology', ''] return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lssystem_aux(self, **kwargs): rows = [None] * 4 rows[0] = ['id', 'ABCDEF0123456789'] rows[1] = ['name', 'aux-svc-sim'] rows[2] = ['code_level', '7.2.0.0 (build 87.0.1311291000)'] rows[3] = ['topology', ''] return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax, assume -bytes passed def _cmd_lsmdiskgrp(self, **kwargs): pool_num = len(self._flags['storwize_svc_volpool_name']) rows = [] rows.append(['id', 'name', 'status', 'mdisk_count', 'vdisk_count', 'capacity', 'extent_size', 'free_capacity', 'virtual_capacity', 'used_capacity', 'real_capacity', 'overallocation', 'warning', 'easy_tier', 'easy_tier_status', 'site_id', 'site_name', 'data_reduction']) for i in range(pool_num): row_data = [str(i + 1), self._flags['storwize_svc_volpool_name'][i], 'online', '1', str(len(self._volumes_list)), '3573412790272', '256', '3529926246400', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive', '', '', 'no'] rows.append(row_data) rows.append([str(pool_num + 1), 'openstack2', 'online', '1', '0', '3573412790272', '256', '3529432325160', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive', '', '', 'no']) rows.append([str(pool_num + 2), 'openstack3', 'offline', '1', '0', '3573412790272', '128', '3529432325160', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive', '', '', 'yes']) rows.append([str(pool_num + 3), 'hyperswap1', 'online', '1', '0', '3573412790272', '256', '3529432325160', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive', '1', 'site1', 'no']) rows.append([str(pool_num + 4), 'hyperswap2', 'online', '1', '0', '3573412790272', '128', '3529432325160', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive', '2', 'site2', 'no']) rows.append([str(pool_num + 5), 'dr_pool1', 'online', '1', '0', '3573412790272', '128', '3529432325160', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive', '1', 'site1', 'yes']) rows.append([str(pool_num + 6), 'dr_pool2', 'online', '1', '0', '3573412790272', '128', '3529432325160', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive', '2', 'site2', 'yes']) if 'obj' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) else: pool_name = kwargs['obj'].strip('\'\"') if pool_name == kwargs['obj']: raise exception.InvalidInput( reason=_('obj missing quotes %s') % kwargs['obj']) elif pool_name in self._flags['storwize_svc_volpool_name']: for each_row in rows: if pool_name in each_row: row = each_row break elif pool_name == 'openstack2': row = rows[-6] elif pool_name == 'openstack3': row = rows[-5] elif pool_name == 'hyperswap1': row = rows[-4] elif pool_name == 'hyperswap2': row = rows[-3] elif pool_name == 'dr_pool1': row = rows[-2] elif pool_name == 'dr_pool2': row = rows[-1] else: return self._errors['CMMVC5754E'] objrows = [] for idx, val in enumerate(rows[0]): objrows.append([val, row[idx]]) if 'nohdr' in kwargs: for index in range(len(objrows)): objrows[index] = ' '.join(objrows[index][1:]) if 'delim' in kwargs: for index in range(len(objrows)): objrows[index] = kwargs['delim'].join(objrows[index]) return ('%s' % '\n'.join(objrows), '') def _get_mdiskgrp_id(self, mdiskgrp): grp_num = len(self._flags['storwize_svc_volpool_name']) if mdiskgrp in self._flags['storwize_svc_volpool_name']: for i in range(grp_num): if mdiskgrp == self._flags['storwize_svc_volpool_name'][i]: return i + 1 elif mdiskgrp == 'openstack2': return grp_num + 1 elif mdiskgrp == 'openstack3': return grp_num + 2 else: return None # Print mostly made-up stuff in the correct syntax def _cmd_lsnodecanister(self, **kwargs): rows = [None] * 3 rows[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status', 'IO_group_id', 'IO_group_name', 'config_node', 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias', 'panel_name', 'enclosure_id', 'canister_id', 'enclosure_serial_number', 'site_id'] rows[1] = ['1', 'node1', '', '123456789ABCDEF0', 'online', '0', 'io_grp0', 'yes', '123456789ABCDEF0', '100', 'iqn.1982-01.com.ibm:1234.sim.node1', '', '01-1', '1', '1', '0123ABC', '1'] rows[2] = ['2', 'node2', '', '123456789ABCDEF1', 'online', '1', 'io_grp0', 'no', '123456789ABCDEF1', '100', 'iqn.1982-01.com.ibm:1234.sim.node2', '', '01-2', '1', '2', '0123ABC', '2'] if self._next_cmd_error['lsnodecanister'] == 'header_mismatch': rows[0].pop(2) self._next_cmd_error['lsnodecanister'] = '' if self._next_cmd_error['lsnodecanister'] == 'remove_field': for row in rows: row.pop(0) self._next_cmd_error['lsnodecanister'] = '' return self._print_info_cmd(rows=rows, **kwargs) # Print information of every single node of SVC def _cmd_lsnode(self, **kwargs): node_infos = dict() node_infos['1'] = r'''id!1 name!node1 port_id!500507680210C744 port_status!active port_speed!8Gb port_id!500507680220C744 port_status!active port_speed!8Gb ''' node_infos['2'] = r'''id!2 name!node2 port_id!500507680220C745 port_status!active port_speed!8Gb port_id!500507680230C745 port_status!inactive port_speed!N/A ''' node_id = kwargs.get('node_id', None) stdout = node_infos.get(node_id, '') return stdout, '' # Print made up stuff for the ports def _cmd_lsportfc(self, **kwargs): node_1 = [None] * 7 node_1[0] = ['id', 'fc_io_port_id', 'port_id', 'type', 'port_speed', 'node_id', 'node_name', 'WWPN', 'nportid', 'status', 'attachment'] node_1[1] = ['0', '1', '1', 'fc', '8Gb', '1', 'node1', '5005076802132ADE', '012E00', 'active', 'switch'] node_1[2] = ['1', '2', '2', 'fc', '8Gb', '1', 'node1', '5005076802232ADE', '012E00', 'active', 'switch'] node_1[3] = ['2', '3', '3', 'fc', '8Gb', '1', 'node1', '5005076802332ADE', '9B0600', 'active', 'switch'] node_1[4] = ['3', '4', '4', 'fc', '8Gb', '1', 'node1', '5005076802432ADE', '012A00', 'active', 'switch'] node_1[5] = ['4', '5', '5', 'fc', '8Gb', '1', 'node1', '5005076802532ADE', '014A00', 'active', 'switch'] node_1[6] = ['5', '6', '4', 'ethernet', 'N/A', '1', 'node1', '5005076802632ADE', '000000', 'inactive_unconfigured', 'none'] node_2 = [None] * 7 node_2[0] = ['id', 'fc_io_port_id', 'port_id', 'type', 'port_speed', 'node_id', 'node_name', 'WWPN', 'nportid', 'status', 'attachment'] node_2[1] = ['6', '7', '7', 'fc', '8Gb', '2', 'node2', '5005086802132ADE', '012E00', 'active', 'switch'] node_2[2] = ['7', '8', '8', 'fc', '8Gb', '2', 'node2', '5005086802232ADE', '012E00', 'active', 'switch'] node_2[3] = ['8', '9', '9', 'fc', '8Gb', '2', 'node2', '5005086802332ADE', '9B0600', 'active', 'switch'] node_2[4] = ['9', '10', '10', 'fc', '8Gb', '2', 'node2', '5005086802432ADE', '012A00', 'active', 'switch'] node_2[5] = ['10', '11', '11', 'fc', '8Gb', '2', 'node2', '5005086802532ADE', '014A00', 'active', 'switch'] node_2[6] = ['11', '12', '12', 'ethernet', 'N/A', '2', 'node2', '5005086802632ADE', '000000', 'inactive_unconfigured', 'none'] node_infos = [node_1, node_2] node_id = int(kwargs['filtervalue'].split('=')[1]) - 1 return self._print_info_cmd(rows=node_infos[node_id], **kwargs) def _cmd_lstargetportfc(self, **kwargs): ports = [None] * 17 ports[0] = ['id', 'WWPN', 'WWNN', 'port_id', 'owning_node_id', 'current_node_id', 'nportid', 'host_io_permitted', 'virtualized', 'fc_io_port_id'] ports[1] = ['0', '5005076801106CFE', '5005076801106CFE', '1', '1', '1', '042200', 'no', 'no', ''] ports[2] = ['0', '5005076801996CFE', '5005076801106CFE', '1', '1', '1', '042200', 'yes', 'yes', ''] ports[3] = ['0', '5005076801206CFE', '5005076801106CFE', '2', '1', '1', '042200', 'no', 'no', ''] ports[4] = ['0', '5005076801A96CFE', '5005076801106CFE', '2', '1', '1', '042200', 'yes', 'yes', ''] ports[5] = ['0', '5005076801306CFE', '5005076801106CFE', '3', '1', '', '042200', 'no', 'no', ''] ports[6] = ['0', '5005076801B96CFE', '5005076801106CFE', '3', '1', '', '042200', 'yes', 'yes', ''] ports[7] = ['0', '5005076801406CFE', '5005076801106CFE', '4', '1', '', '042200', 'no', 'no', ''] ports[8] = ['0', '5005076801C96CFE', '5005076801106CFE', '4', '1', '', '042200', 'yes', 'yes', ''] ports[9] = ['0', '5005076801101806', '5005076801101806', '1', '2', '2', '042200', 'no', 'no', ''] ports[10] = ['0', '5005076801991806', '5005076801101806', '1', '2', '2', '042200', 'yes', 'yes', ''] ports[11] = ['0', '5005076801201806', '5005076801101806', '2', '2', '2', '042200', 'no', 'no', ''] ports[12] = ['0', '5005076801A91806', '5005076801101806', '2', '2', '2', '042200', 'yes', 'yes', ''] ports[13] = ['0', '5005076801301806', '5005076801101806', '3', '2', '', '042200', 'no', 'no', ''] ports[14] = ['0', '5005076801B91806', '5005076801101806', '3', '2', '', '042200', 'yes', 'yes', ''] ports[15] = ['0', '5005076801401806', '5005076801101806', '4', '2', '', '042200', 'no', 'no', ''] ports[16] = ['0', '5005076801C91806', '5005076801101806', '4', '2', '', '042200', 'yes', 'yes', ''] if 'filtervalue' in kwargs: rows = [] rows.append(['id', 'WWPN', 'WWNN', 'port_id', 'owning_node_id', 'current_node_id', 'nportid', 'host_io_permitted', 'virtualized', 'fc_io_port_id']) if ':' in kwargs['filtervalue']: filter1 = kwargs['filtervalue'].split(':')[0] filter2 = kwargs['filtervalue'].split(':')[1] value1 = filter1.split('=')[1] value2 = filter2.split('=')[1] for v in ports: if str(v[5]) == value1 and str(v[7]) == value2: rows.append(v) else: value = kwargs['filtervalue'].split('=')[1] for v in ports: if str(v[5]) == value: rows.append(v) else: rows = ports return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax def _cmd_lsfcportsetmember(self, **kwargs): rows = [None] * 7 rows[0] = ['id', 'fc_io_port_id', 'portset_id', 'portset_name', 'owner_id', 'owner_name'] rows[1] = ['0', '5', '6', 'portset6', '', ''] rows[2] = ['1', '5', '64', 'portset64', '', ''] rows[3] = ['2', '6', '6', 'portset6', '', ''] rows[4] = ['3', '6', '64', 'portset64', '', ''] rows[5] = ['4', '7', '64', 'portset64', '', ''] rows[6] = ['5', '8', '64', 'portset64', '', ''] if self._next_cmd_error['lsfcportsetmember'] == 'header_mismatch': rows[0].pop(2) self._next_cmd_error['lsfcportsetmember'] = '' if self._next_cmd_error['lsfcportsetmember'] == 'remove_field': for row in rows: row.pop(1) self._next_cmd_error['lsfcportsetmember'] = '' return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax def _cmd_lsip(self, **kwargs): ports = [None] * 9 ports[0] = ['id', 'node_id', 'node_name', 'port_id', 'portset_id', 'portset_name', 'IP_address', 'prefix', 'vlan', 'gateway', 'owner_id', 'owner_name'] ports[1] = ['0', '1', 'node1', '5', '0', 'portset0', '1.234.50.11', '24', '1001', '', '', ''] ports[2] = ['1', '1', 'node1', '6', '4', 'portset4', '1.234.51.11', '24', '1002', '', '', ''] ports[3] = ['2', '1', 'node1', '7', '5', 'portset5', '1.234.52.11', '24', '1003', '', '', ''] ports[4] = ['3', '1', 'node1', '8', '6', 'portset6', '1.234.53.11', '24', '1004', '', '', ''] ports[5] = ['4', '2', 'node2', '5', '0', 'portset0', '1.234.54.11', '24', '1005', '', '', ''] ports[6] = ['5', '2', 'node2', '6', '4', 'portset4', '1.234.55.11', '24', '1006', '', '', ''] ports[7] = ['6', '2', 'node2', '7', '5', 'portset5', '1.234.56.11', '24', '1007', '', '', ''] ports[8] = ['7', '2', 'node2', '8', '6', 'portset6', '1.234.57.11', '24', '1008', '', '', ''] if 'filtervalue' in kwargs: rows = [] rows.append(['id', 'node_id', 'node_name', 'port_id', 'portset_id', 'portset_name', 'IP_address', 'prefix', 'vlan', 'gateway', 'owner_id', 'owner_name']) value = kwargs['filtervalue'].split('=')[1] for v in ports: if str(v[5]) == value: rows.append(v) else: rows = ports if self._next_cmd_error['lsip'] == 'header_mismatch': rows[0].pop(2) self._next_cmd_error['lsip'] = '' if self._next_cmd_error['lsip'] == 'remove_field': for row in rows: row.pop(1) self._next_cmd_error['lsip'] = '' return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax def _cmd_lsportip(self, **kwargs): if self._next_cmd_error['lsportip'] == 'ip_no_config': self._next_cmd_error['lsportip'] = '' ip_addr1 = '' ip_addr2 = '' gw = '' else: ip_addr1 = '1.234.56.78' ip_addr2 = '1.234.56.79' ip_addr3 = '1.234.56.80' ip_addr4 = '1.234.56.81' gw = '1.234.56.1' rows = [None] * 17 rows[0] = ['id', 'node_id', 'node_name', 'IP_address', 'mask', 'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC', 'duplex', 'state', 'speed', 'failover', 'link_state'] rows[1] = ['1', '1', 'node1', ip_addr1, '255.255.255.0', gw, '', '', '', '01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'no', 'active'] rows[2] = ['1', '1', 'node1', '', '', '', '', '', '', '01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'yes', ''] rows[3] = ['2', '1', 'node1', ip_addr3, '255.255.255.0', gw, '', '', '', '01:23:45:67:89:01', 'Full', 'configured', '1Gb/s', 'no', 'active'] rows[4] = ['2', '1', 'node1', '', '', '', '', '', '', '01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', 'yes', 'inactive'] rows[5] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no', ''] rows[6] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes', ''] rows[7] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no', ''] rows[8] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes', ''] rows[9] = ['1', '2', 'node2', ip_addr2, '255.255.255.0', gw, '', '', '', '01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'no', ''] rows[10] = ['1', '2', 'node2', '', '', '', '', '', '', '01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'yes', ''] rows[11] = ['2', '2', 'node2', ip_addr4, '255.255.255.0', gw, '', '', '', '01:23:45:67:89:03', 'Full', 'configured', '1Gb/s', 'no', 'inactive'] rows[12] = ['2', '2', 'node2', '', '', '', '', '', '', '01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s', 'yes', ''] rows[13] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no', ''] rows[14] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes', ''] rows[15] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no', ''] rows[16] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes', ''] if self._next_cmd_error['lsportip'] == 'header_mismatch': rows[0].pop(2) self._next_cmd_error['lsportip'] = '' if self._next_cmd_error['lsportip'] == 'remove_field': for row in rows: row.pop(1) self._next_cmd_error['lsportip'] = '' return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lsfabric(self, **kwargs): if self._next_cmd_error['lsfabric'] == 'no_hosts': return ('', '') host_name = kwargs['host'].strip('\'\"') if 'host' in kwargs else None target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None host_infos = [] for hv in self._hosts_list.values(): if (not host_name) or (hv['host_name'] == host_name): if not target_wwpn or target_wwpn in hv['wwpns']: host_infos.append(hv) break if not len(host_infos): return ('', '') rows = [] rows.append(['remote_wwpn', 'remote_nportid', 'id', 'node_name', 'local_wwpn', 'local_port', 'local_nportid', 'state', 'name', 'cluster_name', 'type']) for host_info in host_infos: for wwpn in host_info['wwpns']: rows.append([wwpn, '123456', host_info['id'], 'nodeN', 'AABBCCDDEEFF0011', '1', '0123ABC', 'active', host_info['host_name'], '', 'host']) if self._next_cmd_error['lsfabric'] == 'header_mismatch': rows[0].pop(0) self._next_cmd_error['lsfabric'] = '' if self._next_cmd_error['lsfabric'] == 'remove_field': for row in rows: row.pop(0) self._next_cmd_error['lsfabric'] = '' if self._next_cmd_error['lsfabric'] == 'remove_rows': rows = [] return self._print_info_cmd(rows=rows, **kwargs) # Create a vdisk def _cmd_mkvdisk(self, **kwargs): # We only save the id/uid, name, and size - all else will be made up volume_info = {} volume_info['id'] = self._find_unused_id(self._volumes_list) volume_info['uid'] = ('ABCDEF' * 3) + ('0' * 14) + volume_info['id'] mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') sec_pool = None is_mirror_vol = False if 'copies' in kwargs: # it is a mirror volume pool_split = mdiskgrp.split(':') if len(pool_split) != 2: raise exception.InvalidInput( reason=_('mdiskgrp %s is invalid for mirror ' 'volume') % kwargs['mdiskgrp']) else: is_mirror_vol = True mdiskgrp = pool_split[0] sec_pool = pool_split[1] if mdiskgrp == kwargs['mdiskgrp']: raise exception.InvalidInput( reason=_('mdiskgrp missing quotes %s') % kwargs['mdiskgrp']) mdiskgrp_id = self._get_mdiskgrp_id(mdiskgrp) sec_pool_id = self._get_mdiskgrp_id(sec_pool) volume_info['mdisk_grp_name'] = mdiskgrp volume_info['mdisk_grp_id'] = str(mdiskgrp_id) if 'name' in kwargs: volume_info['name'] = kwargs['name'].strip('\'\"') else: volume_info['name'] = 'vdisk' + volume_info['id'] # Assume size and unit are given, store it in bytes capacity = int(kwargs['size']) unit = kwargs['unit'] volume_info['capacity'] = self._convert_units_bytes(capacity, unit) volume_info['IO_group_id'] = kwargs['iogrp'] volume_info['IO_group_name'] = 'io_grp%s' % kwargs['iogrp'] volume_info['RC_name'] = '' volume_info['RC_id'] = '' if 'easytier' in kwargs: if kwargs['easytier'] == 'on': volume_info['easy_tier'] = 'on' else: volume_info['easy_tier'] = 'off' if 'rsize' in kwargs: volume_info['formatted'] = 'no' # Fake numbers volume_info['used_capacity'] = '786432' volume_info['real_capacity'] = '21474816' volume_info['free_capacity'] = '38219264' if 'warning' in kwargs: volume_info['warning'] = kwargs['warning'].rstrip('%') else: volume_info['warning'] = '80' if 'autoexpand' in kwargs: volume_info['autoexpand'] = 'on' else: volume_info['autoexpand'] = 'off' if 'grainsize' in kwargs: volume_info['grainsize'] = kwargs['grainsize'] else: volume_info['grainsize'] = '32' if 'compressed' in kwargs: volume_info['compressed_copy'] = 'yes' else: volume_info['compressed_copy'] = 'no' else: volume_info['used_capacity'] = volume_info['capacity'] volume_info['real_capacity'] = volume_info['capacity'] volume_info['free_capacity'] = '0' volume_info['warning'] = '' volume_info['autoexpand'] = '' volume_info['grainsize'] = '' volume_info['compressed_copy'] = 'no' volume_info['formatted'] = 'yes' if 'nofmtdisk' in kwargs: if kwargs['nofmtdisk']: volume_info['formatted'] = 'no' vol_cp = {'id': '0', 'status': 'online', 'sync': 'yes', 'primary': 'yes', 'mdisk_grp_id': str(mdiskgrp_id), 'mdisk_grp_name': mdiskgrp, 'easy_tier': (volume_info[ 'easy_tier'] if 'easy_tier' in volume_info else 'on'), 'compressed_copy': volume_info['compressed_copy']} volume_info['copies'] = {'0': vol_cp} if is_mirror_vol: vol_cp1 = {'id': '1', 'status': 'online', 'sync': 'yes', 'primary': 'no', 'mdisk_grp_id': str(sec_pool_id), 'mdisk_grp_name': sec_pool, 'easy_tier': (volume_info['easy_tier'] if 'easy_tier' in volume_info else 'on'), 'compressed_copy': volume_info['compressed_copy']} volume_info['copies']['1'] = vol_cp1 if volume_info['name'] in self._volumes_list: return self._errors['CMMVC6035E'] else: self._volumes_list[volume_info['name']] = volume_info return ('Virtual Disk, id [%s], successfully created' % (volume_info['id']), '') # Delete a vdisk def _cmd_rmvdisk(self, **kwargs): force = True if 'force' in kwargs else False if 'force' not in kwargs and 'force_unmap' in kwargs: force_unmap = True else: force_unmap = False if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] if not force and not force_unmap: for mapping in self._mappings_list.values(): if mapping['vol'] == vol_name: return self._errors['CMMVC5840E'] for fcmap in self._fcmappings_list.values(): if ((fcmap['source'] == vol_name) or (fcmap['target'] == vol_name)): return self._errors['CMMVC5840E'] del self._volumes_list[vol_name] return ('', '') def _cmd_expandvdisksize(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') # Assume unit is gb if 'size' not in kwargs: return self._errors['CMMVC5707E'] size = int(kwargs['size']) if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] vol = self._volumes_list[kwargs['obj']] if self._next_cmd_error['expandvdisksize'] == 'fast_formatting': if vol['RC_name']: rcrel = self._rcrelationship_list[vol['RC_name']] if rcrel.get('copy_type', None): return self._errors['CMMVC9201E'] return self._errors['CMMVC8587E'] curr_size = int(self._volumes_list[vol_name]['capacity']) addition = size * units.Gi self._volumes_list[vol_name]['capacity'] = ( str(curr_size + addition)) return ('', '') def _get_fcmap_info(self, vol_name): ret_vals = { 'fc_id': '', 'fc_name': '', 'fc_map_count': '0', } for fcmap in self._fcmappings_list.values(): if ((fcmap['source'] == vol_name) or (fcmap['target'] == vol_name)): ret_vals['fc_id'] = fcmap['id'] ret_vals['fc_name'] = fcmap['name'] ret_vals['fc_map_count'] = '1' return ret_vals # List information about vdisks def _cmd_lsvdisk(self, **kwargs): rows = [] rows.append(['id', 'name', 'IO_group_id', 'IO_group_name', 'status', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity', 'type', 'FC_id', 'FC_name', 'RC_id', 'RC_name', 'vdisk_UID', 'fc_map_count', 'copy_count', 'fast_write_state', 'se_copy_count', 'RC_change']) for vol in self._volumes_list.values(): if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == 'name=' + vol['name']) or (kwargs['filtervalue'] == 'vdisk_UID=' + vol['uid'])): fcmap_info = self._get_fcmap_info(vol['name']) if 'bytes' in kwargs: cap = self._convert_bytes_units(vol['capacity']) else: cap = vol['capacity'] rows.append([str(vol['id']), vol['name'], vol['IO_group_id'], vol['IO_group_name'], 'online', '0', _get_test_pool(), cap, 'striped', fcmap_info['fc_id'], fcmap_info['fc_name'], '', '', vol['uid'], fcmap_info['fc_map_count'], '1', 'empty', '1', 'no']) if 'obj' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) else: if kwargs['obj'] not in self._volumes_list: return self._errors['CMMVC5754E'] vol = self._volumes_list[kwargs['obj']] fcmap_info = self._get_fcmap_info(vol['name']) cap = vol['capacity'] cap_u = vol['used_capacity'] cap_r = vol['real_capacity'] cap_f = vol['free_capacity'] if 'bytes' not in kwargs: for item in [cap, cap_u, cap_r, cap_f]: item = self._convert_bytes_units(item) rows = [] rows.append(['id', str(vol['id'])]) rows.append(['name', vol['name']]) rows.append(['IO_group_id', vol['IO_group_id']]) rows.append(['IO_group_name', vol['IO_group_name']]) rows.append(['status', 'online']) rows.append(['capacity', cap]) rows.append(['formatted', vol['formatted']]) rows.append(['mdisk_id', '']) rows.append(['mdisk_name', '']) rows.append(['FC_id', fcmap_info['fc_id']]) rows.append(['FC_name', fcmap_info['fc_name']]) rows.append(['RC_id', vol['RC_id']]) rows.append(['RC_name', vol['RC_name']]) rows.append(['vdisk_UID', vol['uid']]) rows.append(['throttling', '0']) if self._next_cmd_error['lsvdisk'] == 'blank_pref_node': rows.append(['preferred_node_id', '']) self._next_cmd_error['lsvdisk'] = '' elif self._next_cmd_error['lsvdisk'] == 'no_pref_node': self._next_cmd_error['lsvdisk'] = '' else: rows.append(['preferred_node_id', '1']) rows.append(['fast_write_state', 'empty']) rows.append(['cache', 'readwrite']) rows.append(['udid', '']) rows.append(['fc_map_count', fcmap_info['fc_map_count']]) rows.append(['sync_rate', '50']) rows.append(['copy_count', '1']) rows.append(['se_copy_count', '0']) rows.append(['mirror_write_priority', 'latency']) rows.append(['RC_change', 'no']) for copy in vol['copies'].values(): rows.append(['copy_id', copy['id']]) rows.append(['status', copy['status']]) rows.append(['primary', copy['primary']]) rows.append(['mdisk_grp_id', copy['mdisk_grp_id']]) rows.append(['mdisk_grp_name', copy['mdisk_grp_name']]) rows.append(['type', 'striped']) rows.append(['used_capacity', cap_u]) rows.append(['real_capacity', cap_r]) rows.append(['free_capacity', cap_f]) rows.append(['easy_tier', copy['easy_tier']]) rows.append(['compressed_copy', copy['compressed_copy']]) rows.append(['autoexpand', vol['autoexpand']]) rows.append(['warning', vol['warning']]) rows.append(['grainsize', vol['grainsize']]) if 'nohdr' in kwargs: for index in range(len(rows)): rows[index] = ' '.join(rows[index][1:]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_lsiogrp(self, **kwargs): rows = [None] * 6 rows[0] = ['id', 'name', 'node_count', 'vdisk_count', 'host_count'] rows[1] = ['0', 'io_grp0', '2', '0', '4'] rows[2] = ['1', 'io_grp1', '2', '0', '4'] rows[3] = ['2', 'io_grp2', '0', '0', '4'] rows[4] = ['3', 'io_grp3', '0', '0', '4'] rows[5] = ['4', 'recovery_io_grp', '0', '0', '0'] return self._print_info_cmd(rows=rows, **kwargs) def _add_port_to_host(self, host_info, **kwargs): if 'iscsiname' in kwargs: added_key = 'iscsi_names' added_val = kwargs['iscsiname'].strip('\'\"') elif 'hbawwpn' in kwargs: added_key = 'wwpns' added_val = kwargs['hbawwpn'].strip('\'\"') else: return self._errors['CMMVC5707E'] host_info[added_key].append(added_val) for v in self._hosts_list.values(): if v['id'] == host_info['id']: continue for port in v[added_key]: if port == added_val: error = 'CMMVC6035E' if 'iscsiname' in kwargs: error = 'CMMVC6578E' return self._errors[error] return ('', '') # Make a host def _cmd_mkhost(self, **kwargs): host_info = {} host_info['id'] = self._find_unused_id(self._hosts_list) if 'name' in kwargs: host_name = kwargs['name'].strip('\'\"') else: host_name = 'host' + str(host_info['id']) if self._is_invalid_name(host_name): return self._errors['CMMVC6527E'] if host_name in self._hosts_list: return self._errors['CMMVC6035E'] host_info['host_name'] = host_name host_info['iscsi_names'] = [] host_info['wwpns'] = [] if 'site' in kwargs: host_info['site_name'] = kwargs['site'].strip('\'\"') else: host_info['site_name'] = '' if 'portset' in kwargs: host_info['portset_name'] = kwargs['portset'].strip('\'\"') else: host_info['portset_name'] = '' out, err = self._add_port_to_host(host_info, **kwargs) if not len(err): self._hosts_list[host_name] = host_info return ('Host, id [%s], successfully created' % (host_info['id']), '') else: return (out, err) # Add ports to an existing host def _cmd_addhostport(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] host_info = self._hosts_list[host_name] return self._add_port_to_host(host_info, **kwargs) # Change host properties def _cmd_chhost(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] if 'chapsecret' in kwargs: secret = kwargs['chapsecret'].strip('\'\"') self._hosts_list[host_name]['chapsecret'] = secret if 'site' in kwargs: site_name = kwargs['site'].strip('\'\"') self._hosts_list[host_name]['site_name'] = site_name if 'chapsecret' not in kwargs and 'site' not in kwargs: return self._errors['CMMVC5707E'] return ('', '') # Remove a host def _cmd_rmhost(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] for v in self._mappings_list.values(): if (v['host'] == host_name): return self._errors['CMMVC5871E'] del self._hosts_list[host_name] return ('', '') # List information about hosts def _cmd_lshost(self, **kwargs): if 'obj' not in kwargs: rows = [] rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status', 'site_name']) found = False # Sort hosts by names to give predictable order for tests # depend on it. for host_name in sorted(self._hosts_list.keys()): host = self._hosts_list[host_name] filterstr = 'name=' + host['host_name'] if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == filterstr)): rows.append([host['id'], host['host_name'], '1', '4', 'offline', host['site_name']]) found = True if found: return self._print_info_cmd(rows=rows, **kwargs) else: return ('', '') else: if self._next_cmd_error['lshost'] == 'missing_host': self._next_cmd_error['lshost'] = '' return self._errors['CMMVC5754E'] elif self._next_cmd_error['lshost'] == 'bigger_troubles': return self._errors['CMMVC6527E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5754E'] if (self._next_cmd_error['lshost'] == 'fail_fastpath' and host_name == 'DifferentHost'): return self._errors['CMMVC5701E'] host = self._hosts_list[host_name] rows = [] rows.append(['id', host['id']]) rows.append(['name', host['host_name']]) rows.append(['port_count', '1']) rows.append(['type', 'generic']) rows.append(['mask', '1111']) rows.append(['iogrp_count', '4']) rows.append(['status', 'online']) rows.append(['site_name', host['site_name']]) for port in host['iscsi_names']: rows.append(['iscsi_name', port]) rows.append(['node_logged_in_count', '0']) rows.append(['state', 'offline']) for port in host['wwpns']: rows.append(['WWPN', port]) rows.append(['node_logged_in_count', '0']) rows.append(['state', 'active']) if 'nohdr' in kwargs: for index in range(len(rows)): rows[index] = ' '.join(rows[index][1:]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') # List iSCSI authorization information about hosts def _cmd_lsiscsiauth(self, **kwargs): if self._next_cmd_error['lsiscsiauth'] == 'no_info': self._next_cmd_error['lsiscsiauth'] = '' return ('', '') rows = [] rows.append(['type', 'id', 'name', 'iscsi_auth_method', 'iscsi_chap_secret']) for host in self._hosts_list.values(): method = 'none' secret = '' if 'chapsecret' in host: method = 'chap' secret = host['chapsecret'] rows.append(['host', host['id'], host['host_name'], method, secret]) return self._print_info_cmd(rows=rows, **kwargs) # Create a vdisk-host mapping def _cmd_mkvdiskhostmap(self, **kwargs): mapping_info = {} mapping_info['id'] = self._find_unused_id(self._mappings_list) if 'host' not in kwargs: return self._errors['CMMVC5707E'] mapping_info['host'] = kwargs['host'].strip('\'\"') if 'scsi' in kwargs: mapping_info['lun'] = kwargs['scsi'].strip('\'\"') else: mapping_info['lun'] = mapping_info['id'] if 'obj' not in kwargs: return self._errors['CMMVC5707E'] mapping_info['vol'] = kwargs['obj'].strip('\'\"') if mapping_info['vol'] not in self._volumes_list: return self._errors['CMMVC5753E'] if mapping_info['host'] not in self._hosts_list: return self._errors['CMMVC5754E'] if mapping_info['vol'] in self._mappings_list: return self._errors['CMMVC6071E'] for v in self._mappings_list.values(): if ((v['host'] == mapping_info['host']) and (v['lun'] == mapping_info['lun'])): return self._errors['CMMVC5879E'] for v in self._mappings_list.values(): if (v['vol'] == mapping_info['vol']) and ('force' not in kwargs): return self._errors['CMMVC6071E'] self._mappings_list[mapping_info['id']] = mapping_info return ('Virtual Disk to Host map, id [%s], successfully created' % (mapping_info['id']), '') # Delete a vdisk-host mapping def _cmd_rmvdiskhostmap(self, **kwargs): if 'host' not in kwargs: return self._errors['CMMVC5707E'] host = kwargs['host'].strip('\'\"') if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol = kwargs['obj'].strip('\'\"') mapping_ids = [] for v in self._mappings_list.values(): if v['vol'] == vol: mapping_ids.append(v['id']) if not mapping_ids: return self._errors['CMMVC5753E'] this_mapping = None for mapping_id in mapping_ids: if self._mappings_list[mapping_id]['host'] == host: this_mapping = mapping_id if this_mapping is None: return self._errors['CMMVC5753E'] del self._mappings_list[this_mapping] return ('', '') # List information about host->vdisk mappings def _cmd_lshostvdiskmap(self, **kwargs): host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5754E'] rows = [] rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', 'vdisk_UID']) for mapping in self._mappings_list.values(): if (host_name == '') or (mapping['host'] == host_name): volume = self._volumes_list[mapping['vol']] rows.append([mapping['id'], mapping['host'], mapping['lun'], volume['id'], volume['name'], volume['uid']]) return self._print_info_cmd(rows=rows, **kwargs) # List information about vdisk->host mappings def _cmd_lsvdiskhostmap(self, **kwargs): mappings_found = 0 vdisk_name = kwargs['obj'].strip('\'\"') if vdisk_name not in self._volumes_list: return self._errors['CMMVC5753E'] rows = [] rows.append(['id name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID', 'IO_group_id', 'IO_group_name']) for mapping in self._mappings_list.values(): if (mapping['vol'] == vdisk_name): mappings_found += 1 volume = self._volumes_list[mapping['vol']] host = self._hosts_list[mapping['host']] rows.append([volume['id'], mapping['lun'], host['id'], host['host_name'], volume['uid'], volume['IO_group_id'], volume['IO_group_name']]) if mappings_found: return self._print_info_cmd(rows=rows, **kwargs) else: return ('', '') # Create a FlashCopy mapping def _cmd_mkfcmap(self, **kwargs): source = '' target = '' copyrate = kwargs['copyrate'] if 'copyrate' in kwargs else '50' cleanrate = kwargs['cleanrate'] if 'clean_rate' in kwargs else '50' if 'source' not in kwargs: return self._errors['CMMVC5707E'] source = kwargs['source'].strip('\'\"') if source not in self._volumes_list: return self._errors['CMMVC5754E'] if 'target' not in kwargs: return self._errors['CMMVC5707E'] target = kwargs['target'].strip('\'\"') if target not in self._volumes_list: return self._errors['CMMVC5754E'] if source == target: return self._errors['CMMVC6303E'] if (self._volumes_list[source]['capacity'] != self._volumes_list[target]['capacity']): return self._errors['CMMVC5754E'] fcmap_info = {} fcmap_info['source'] = source fcmap_info['target'] = target fcmap_info['id'] = self._find_unused_id(self._fcmappings_list) fcmap_info['name'] = 'fcmap' + fcmap_info['id'] fcmap_info['copyrate'] = copyrate fcmap_info['cleanrate'] = cleanrate fcmap_info['progress'] = '0' fcmap_info['autodelete'] = True if 'autodelete' in kwargs else False fcmap_info['status'] = 'idle_or_copied' fcmap_info['rc_controlled'] = 'no' # Add fcmap to consistency group if 'consistgrp' in kwargs: consistgrp = kwargs['consistgrp'] # if is digit, assume is cg id, else is cg name cg_id = 0 if not consistgrp.isdigit(): for consistgrp_key in self._fcconsistgrp_list.keys(): if (self._fcconsistgrp_list[consistgrp_key]['name'] == consistgrp): cg_id = consistgrp_key fcmap_info['consistgrp'] = consistgrp_key break else: if int(consistgrp) in self._fcconsistgrp_list.keys(): cg_id = int(consistgrp) # If can't find exist consistgrp id, return not exist error if not cg_id: return self._errors['CMMVC5754E'] fcmap_info['consistgrp'] = cg_id # Add fcmap to consistgrp self._fcconsistgrp_list[cg_id]['fcmaps'][fcmap_info['id']] = ( fcmap_info['name']) self._fc_cg_state_transition('add', self._fcconsistgrp_list[cg_id]) self._fcmappings_list[fcmap_info['id']] = fcmap_info return ('FlashCopy Mapping, id [' + fcmap_info['id'] + '], successfully created', '') def _cmd_prestartfcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] if self._next_cmd_error['prestartfcmap'] == 'bad_id': id_num = -1 self._next_cmd_error['prestartfcmap'] = '' try: fcmap = self._fcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._state_transition('prepare', fcmap) def _cmd_startfcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] if self._next_cmd_error['startfcmap'] == 'bad_id': id_num = -1 self._next_cmd_error['startfcmap'] = '' try: fcmap = self._fcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._state_transition('start', fcmap) def _cmd_stopfcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] try: fcmap = self._fcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._state_transition('stop', fcmap) def _cmd_rmfcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] force = True if 'force' in kwargs else False if self._next_cmd_error['rmfcmap'] == 'bad_id': id_num = -1 self._next_cmd_error['rmfcmap'] = '' try: fcmap = self._fcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] function = 'delete_force' if force else 'delete' ret = self._state_transition(function, fcmap) if fcmap['status'] == 'end': del self._fcmappings_list[id_num] return ret def _cmd_lsvdiskfcmappings(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] vdisk = kwargs['obj'] rows = [] rows.append(['id', 'name']) for v in self._fcmappings_list.values(): if v['source'] == vdisk or v['target'] == vdisk: rows.append([v['id'], v['name']]) return self._print_info_cmd(rows=rows, **kwargs) def _cmd_chfcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] id_num = kwargs['obj'] try: fcmap = self._fcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] for key in ['name', 'copyrate', 'cleanrate', 'autodelete']: if key in kwargs: fcmap[key] = kwargs[key] return ('', '') def _cmd_lsfcmap(self, **kwargs): rows = [] rows.append(['id', 'name', 'source_vdisk_id', 'source_vdisk_name', 'target_vdisk_id', 'target_vdisk_name', 'group_id', 'group_name', 'status', 'progress', 'copy_rate', 'clean_progress', 'incremental', 'partner_FC_id', 'partner_FC_name', 'restoring', 'start_time', 'rc_controlled']) # Assume we always get a filtervalue argument filter_key = kwargs['filtervalue'].split('=')[0] filter_value = kwargs['filtervalue'].split('=')[1] to_delete = [] for k, v in self._fcmappings_list.items(): if str(v[filter_key]) == filter_value: source = self._volumes_list[v['source']] target = self._volumes_list[v['target']] self._state_transition('wait', v) if self._next_cmd_error['lsfcmap'] == 'speed_up': self._next_cmd_error['lsfcmap'] = '' curr_state = v['status'] while self._state_transition('wait', v) == ("", ""): if curr_state == v['status']: break curr_state = v['status'] if ((v['status'] == 'idle_or_copied' and v['autodelete'] and v['progress'] == '100') or (v['status'] == 'end')): to_delete.append(k) else: rows.append([v['id'], v['name'], source['id'], source['name'], target['id'], target['name'], '', '', v['status'], v['progress'], v['copyrate'], '100', 'off', '', '', 'no', '', v['rc_controlled']]) for d in to_delete: del self._fcmappings_list[d] return self._print_info_cmd(rows=rows, **kwargs) # Create a FlashCopy mapping def _cmd_mkfcconsistgrp(self, **kwargs): fcconsistgrp_info = {} fcconsistgrp_info['id'] = self._find_unused_id(self._fcconsistgrp_list) if 'name' in kwargs: fcconsistgrp_info['name'] = kwargs['name'].strip('\'\"') else: fcconsistgrp_info['name'] = 'fccstgrp' + fcconsistgrp_info['id'] if 'autodelete' in kwargs: fcconsistgrp_info['autodelete'] = True else: fcconsistgrp_info['autodelete'] = False fcconsistgrp_info['status'] = 'empty' fcconsistgrp_info['start_time'] = None fcconsistgrp_info['fcmaps'] = {} self._fcconsistgrp_list[fcconsistgrp_info['id']] = fcconsistgrp_info return ('FlashCopy Consistency Group, id [' + fcconsistgrp_info['id'] + '], successfully created', '') def _cmd_prestartfcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] cg_name = kwargs['obj'] cg_id = 0 for cg_id in self._fcconsistgrp_list.keys(): if cg_name == self._fcconsistgrp_list[cg_id]['name']: break return self._fc_cg_state_transition('prepare', self._fcconsistgrp_list[cg_id]) def _cmd_startfcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] cg_name = kwargs['obj'] cg_id = 0 for cg_id in self._fcconsistgrp_list.keys(): if cg_name == self._fcconsistgrp_list[cg_id]['name']: break return self._fc_cg_state_transition('start', self._fcconsistgrp_list[cg_id]) def _cmd_stopfcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] try: fcconsistgrps = self._fcconsistgrp_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._fc_cg_state_transition('stop', fcconsistgrps) def _cmd_rmfcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] cg_name = kwargs['obj'] force = True if 'force' in kwargs else False cg_id = 0 for cg_id in self._fcconsistgrp_list.keys(): if cg_name == self._fcconsistgrp_list[cg_id]['name']: break if not cg_id: return self._errors['CMMVC5753E'] fcconsistgrps = self._fcconsistgrp_list[cg_id] function = 'delete_force' if force else 'delete' ret = self._fc_cg_state_transition(function, fcconsistgrps) if fcconsistgrps['status'] == 'end': del self._fcconsistgrp_list[cg_id] return ret def _cmd_lsfcconsistgrp(self, **kwargs): rows = [] if 'obj' not in kwargs: rows.append(['id', 'name', 'status' 'start_time']) for fcconsistgrp in self._fcconsistgrp_list.values(): rows.append([fcconsistgrp['id'], fcconsistgrp['name'], fcconsistgrp['status'], fcconsistgrp['start_time']]) return self._print_info_cmd(rows=rows, **kwargs) else: fcconsistgrp = None cg_id = 0 for cg_id in self._fcconsistgrp_list.keys(): if self._fcconsistgrp_list[cg_id]['name'] == kwargs['obj']: fcconsistgrp = self._fcconsistgrp_list[cg_id] break rows = [] rows.append(['id', str(cg_id)]) rows.append(['name', fcconsistgrp['name']]) rows.append(['status', fcconsistgrp['status']]) rows.append(['autodelete', str(fcconsistgrp['autodelete'])]) rows.append(['start_time', str(fcconsistgrp['start_time'])]) for fcmap_id in fcconsistgrp['fcmaps'].keys(): rows.append(['FC_mapping_id', str(fcmap_id)]) rows.append(['FC_mapping_name', fcconsistgrp['fcmaps'][fcmap_id]]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) self._fc_cg_state_transition('wait', fcconsistgrp) return ('%s' % '\n'.join(rows), '') def _cmd_migratevdisk(self, **kwargs): if 'mdiskgrp' not in kwargs or 'vdisk' not in kwargs: return self._errors['CMMVC5707E'] mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') vdisk = kwargs['vdisk'].strip('\'\"') copy_id = kwargs['copy'] if vdisk not in self._volumes_list: return self._errors['CMMVC5753E'] mdiskgrp_id = str(self._get_mdiskgrp_id(mdiskgrp)) self._volumes_list[vdisk]['mdisk_grp_name'] = mdiskgrp self._volumes_list[vdisk]['mdisk_grp_id'] = mdiskgrp_id vol = self._volumes_list[vdisk] vol['copies'][copy_id]['mdisk_grp_name'] = mdiskgrp vol['copies'][copy_id]['mdisk_grp_id'] = mdiskgrp_id return ('', '') def _cmd_addvdiskcopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] vol = self._volumes_list[vol_name] if 'mdiskgrp' not in kwargs: return self._errors['CMMVC5707E'] mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') if mdiskgrp == kwargs['mdiskgrp']: raise exception.InvalidInput( reason=_('mdiskgrp missing quotes %s') % kwargs['mdiskgrp']) auto_del = True if 'autodelete' in kwargs else False copy_info = {} copy_info['id'] = self._find_unused_id(vol['copies']) copy_info['status'] = 'online' copy_info['sync'] = 'no' copy_info['primary'] = 'no' copy_info['mdisk_grp_name'] = mdiskgrp copy_info['mdisk_grp_id'] = str(self._get_mdiskgrp_id(mdiskgrp)) if 'easytier' in kwargs: if kwargs['easytier'] == 'on': copy_info['easy_tier'] = 'on' else: copy_info['easy_tier'] = 'off' if 'rsize' in kwargs: if 'compressed' in kwargs: copy_info['compressed_copy'] = 'yes' else: copy_info['compressed_copy'] = 'no' vol['copies'][copy_info['id']] = copy_info if auto_del: del_copy_id = None for v in vol['copies'].values(): if v['id'] != copy_info['id']: del_copy_id = v['id'] break if del_copy_id: del vol['copies'][del_copy_id] return ('Vdisk [%(vid)s] copy [%(cid)s] successfully created' % {'vid': vol['id'], 'cid': copy_info['id']}, '') def _cmd_lsvdiskcopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5804E'] name = kwargs['obj'] vol = self._volumes_list[name] rows = [] rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'status', 'sync', 'primary', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity', 'type', 'se_copy', 'easy_tier', 'easy_tier_status', 'compressed_copy']) for copy in vol['copies'].values(): if 'compressed_copy' not in copy: copy['compressed_copy'] = 'False' rows.append([vol['id'], vol['name'], copy['id'], copy['status'], copy['sync'], copy['primary'], copy['mdisk_grp_id'], copy['mdisk_grp_name'], vol['capacity'], 'striped', 'yes', copy['easy_tier'], 'inactive', copy['compressed_copy']]) if 'copy' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) else: copy_id = kwargs['copy'].strip('\'\"') if copy_id not in vol['copies']: return self._errors['CMMVC6353E'] copy = vol['copies'][copy_id] rows = [] rows.append(['vdisk_id', vol['id']]) rows.append(['vdisk_name', vol['name']]) rows.append(['capacity', vol['capacity']]) rows.append(['copy_id', copy['id']]) rows.append(['status', copy['status']]) rows.append(['sync', copy['sync']]) copy['sync'] = 'yes' rows.append(['primary', copy['primary']]) rows.append(['mdisk_grp_id', copy['mdisk_grp_id']]) rows.append(['mdisk_grp_name', copy['mdisk_grp_name']]) rows.append(['easy_tier', copy['easy_tier']]) rows.append(['easy_tier_status', 'inactive']) rows.append(['compressed_copy', copy['compressed_copy']]) rows.append(['autoexpand', vol['autoexpand']]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_rmvdiskcopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') if 'copy' not in kwargs: return self._errors['CMMVC5707E'] copy_id = kwargs['copy'].strip('\'\"') if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] vol = self._volumes_list[vol_name] if copy_id not in vol['copies']: return self._errors['CMMVC6353E'] del vol['copies'][copy_id] return ('', '') def _cmd_lsvdisks_from_filter(self, filter_name, value): volumes = [] if filter_name == 'mdisk_grp_name': for vol in self._volumes_list: vol_info = self._volumes_list[vol] if vol_info['mdisk_grp_name'] == value: volumes.append(vol) return volumes def _cmd_chvdisk(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') vol = self._volumes_list[vol_name] kwargs.pop('obj') params = ['name', 'warning', 'udid', 'autoexpand', 'easytier', 'primary', 'volumegroup', 'novolumegroup'] for key, value in kwargs.items(): if key == 'easytier': vol['easy_tier'] = value for copy in vol['copies'].values(): vol['copies'][copy['id']]['easy_tier'] = value continue if key == 'warning': vol['warning'] = value.rstrip('%') continue if key == 'name': vol['name'] = value del self._volumes_list[vol_name] self._volumes_list[value] = vol if key == 'primary': if value == '0': self._volumes_list[vol_name]['copies']['0']['primary']\ = 'yes' self._volumes_list[vol_name]['copies']['1']['primary']\ = 'no' elif value == '1': self._volumes_list[vol_name]['copies']['0']['primary']\ = 'no' self._volumes_list[vol_name]['copies']['1']['primary']\ = 'yes' else: err = self._errors['CMMVC6353E'][1] % {'VALUE': key} return ('', err) if key == 'volumegroup': self._volumes_list[vol_name]['volume_group_id'] = value if key in params: vol[key] = value if key == 'autoexpand': for copy in vol['copies'].values(): vol['copies'][copy['id']]['autoexpand'] = value else: err = self._errors['CMMVC5709E'][1] % {'VALUE': key} return ('', err) return ('', '') def _cmd_movevdisk(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') vol = self._volumes_list[vol_name] if 'iogrp' not in kwargs: return self._errors['CMMVC5707E'] iogrp = kwargs['iogrp'] if iogrp.isdigit(): vol['IO_group_id'] = iogrp vol['IO_group_name'] = 'io_grp%s' % iogrp else: vol['IO_group_id'] = iogrp[6:] vol['IO_group_name'] = iogrp return ('', '') def _cmd_addvdiskaccess(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] return ('', '') def _cmd_rmvdiskaccess(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] return ('', '') # list vdisk sync process def _cmd_lsvdisksyncprogress(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5804E'] name = kwargs['obj'] copy_id = kwargs.get('copy', None) vol = self._volumes_list[name] rows = [] rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'progress', 'estimated_completion_time']) copy_found = False for copy in vol['copies'].values(): if not copy_id or copy_id == copy['id']: copy_found = True row = [vol['id'], name, copy['id']] if copy['sync'] == 'yes': row.extend(['100', '']) else: row.extend(['50', '140210115226']) copy['sync'] = 'yes' rows.append(row) if not copy_found: return self._errors['CMMVC5804E'] return self._print_info_cmd(rows=rows, **kwargs) def _add_host_to_list(self, connector): host_info = {} host_info['id'] = self._find_unused_id(self._hosts_list) host_info['host_name'] = connector['host'] host_info['iscsi_names'] = [] host_info['site_name'] = '' host_info['wwpns'] = [] if 'initiator' in connector: host_info['iscsi_names'].append(connector['initiator']) if 'wwpns' in connector: host_info['wwpns'] = host_info['wwpns'] + connector['wwpns'] self._hosts_list[connector['host']] = host_info def _host_in_list(self, host_name): for k in self._hosts_list: if k.startswith(host_name): return k return None # Replication related command # Create a remote copy def _cmd_mkrcrelationship(self, **kwargs): master_vol = '' aux_vol = '' aux_cluster = '' master_sys = self._system_list['storwize-svc-sim'] aux_sys = self._system_list['aux-svc-sim'] if 'master' not in kwargs: return self._errors['CMMVC5707E'] master_vol = kwargs['master'].strip('\'\"') if master_vol not in self._volumes_list: return self._errors['CMMVC5754E'] if 'aux' not in kwargs: return self._errors['CMMVC5707E'] aux_vol = kwargs['aux'].strip('\'\"') if aux_vol not in self._volumes_list: return self._errors['CMMVC5754E'] if 'cluster' not in kwargs: return self._errors['CMMVC5707E'] aux_cluster = kwargs['cluster'].strip('\'\"') if aux_cluster != aux_sys['id']: return self._errors['CMMVC5754E'] if (self._volumes_list[master_vol]['capacity'] != self._volumes_list[aux_vol]['capacity']): return self._errors['CMMVC5754E'] cyclingmode = None if 'cyclingmode' in kwargs: cyclingmode = kwargs['cyclingmode'].strip('\'\"') rcrel_info = {} rcrel_info['id'] = self._find_unused_id(self._rcrelationship_list) rcrel_info['name'] = 'rcrel' + rcrel_info['id'] rcrel_info['master_cluster_id'] = master_sys['id'] rcrel_info['master_cluster_name'] = master_sys['name'] rcrel_info['master_vdisk_id'] = self._volumes_list[master_vol]['id'] rcrel_info['master_vdisk_name'] = master_vol rcrel_info['aux_cluster_id'] = aux_sys['id'] rcrel_info['aux_cluster_name'] = aux_sys['name'] rcrel_info['aux_vdisk_id'] = self._volumes_list[aux_vol]['id'] rcrel_info['aux_vdisk_name'] = aux_vol rcrel_info['primary'] = 'master' rcrel_info['consistency_group_id'] = '' rcrel_info['consistency_group_name'] = '' rcrel_info['state'] = 'inconsistent_stopped' rcrel_info['bg_copy_priority'] = '50' rcrel_info['progress'] = '0' rcrel_info['freeze_time'] = '' rcrel_info['status'] = 'online' rcrel_info['sync'] = '' rcrel_info['copy_type'] = 'global' if 'global' in kwargs else 'metro' rcrel_info['cycling_mode'] = cyclingmode if cyclingmode else 'none' rcrel_info['cycle_period_seconds'] = '300' rcrel_info['master_change_vdisk_id'] = '' rcrel_info['master_change_vdisk_name'] = '' rcrel_info['aux_change_vdisk_id'] = '' rcrel_info['aux_change_vdisk_name'] = '' self._rcrelationship_list[rcrel_info['name']] = rcrel_info self._volumes_list[master_vol]['RC_name'] = rcrel_info['name'] self._volumes_list[master_vol]['RC_id'] = rcrel_info['id'] self._volumes_list[aux_vol]['RC_name'] = rcrel_info['name'] self._volumes_list[aux_vol]['RC_id'] = rcrel_info['id'] return ('RC Relationship, id [' + rcrel_info['id'] + '], successfully created', '') def _cmd_lsrcrelationship(self, **kwargs): rows = [] if 'obj' in kwargs: name = kwargs['obj'] for k, v in self._rcrelationship_list.items(): if ((str(v['name']) == name) or (str(v['id']) == name)): self._rc_state_transition('wait', v) if self._next_cmd_error['lsrcrelationship'] == 'speed_up': self._next_cmd_error['lsrcrelationship'] = '' curr_state = v['status'] while self._rc_state_transition('wait', v) == ("", ""): if curr_state == v['status']: break curr_state = v['status'] rows.append(['id', v['id']]) rows.append(['name', v['name']]) rows.append(['master_cluster_id', v['master_cluster_id']]) rows.append(['master_cluster_name', v['master_cluster_name']]) rows.append(['master_vdisk_id', v['master_vdisk_id']]) rows.append(['master_vdisk_name', v['master_vdisk_name']]) rows.append(['aux_cluster_id', v['aux_cluster_id']]) rows.append(['aux_cluster_name', v['aux_cluster_name']]) rows.append(['aux_vdisk_id', v['aux_vdisk_id']]) rows.append(['aux_vdisk_name', v['aux_vdisk_name']]) rows.append(['consistency_group_id', v['consistency_group_id']]) rows.append(['primary', v['primary']]) rows.append(['consistency_group_name', v['consistency_group_name']]) rows.append(['state', v['state']]) rows.append(['bg_copy_priority', v['bg_copy_priority']]) rows.append(['progress', v['progress']]) rows.append(['freeze_time', v['freeze_time']]) rows.append(['status', v['status']]) rows.append(['sync', v['sync']]) rows.append(['copy_type', v['copy_type']]) rows.append(['cycling_mode', v['cycling_mode']]) rows.append(['cycle_period_seconds', v['cycle_period_seconds']]) rows.append(['master_change_vdisk_id', v['master_change_vdisk_id']]) rows.append(['master_change_vdisk_name', v['master_change_vdisk_name']]) rows.append(['aux_change_vdisk_id', v['aux_change_vdisk_id']]) rows.append(['aux_change_vdisk_name', v['aux_change_vdisk_name']]) if 'nohdr' in kwargs: for index in range(len(rows)): rows[index] = ' '.join(rows[index][1:]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_startrcrelationship(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] primary_vol = None if 'primary' in kwargs: primary_vol = kwargs['primary'].strip('\'\"') try: rcrel = self._rcrelationship_list[id_num] except KeyError: return self._errors['CMMVC5753E'] if rcrel['state'] == 'idling' and not primary_vol: return self._errors['CMMVC5963E'] self._rc_state_transition('start', rcrel) if primary_vol: self._rcrelationship_list[id_num]['primary'] = primary_vol return ('', '') def _cmd_stoprcrelationship(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] force_access = True if 'access' in kwargs else False try: rcrel = self._rcrelationship_list[id_num] except KeyError: return self._errors['CMMVC5753E'] if rcrel.get('consistency_group_name', None): return self._errors['CMMVC5951E'] function = 'stop_access' if force_access else 'stop' self._rc_state_transition(function, rcrel) if force_access: self._rcrelationship_list[id_num]['primary'] = '' return ('', '') def _cmd_switchrcrelationship(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] id_num = kwargs['obj'] try: rcrel = self._rcrelationship_list[id_num] except KeyError: return self._errors['CMMVC5753E'] if rcrel['state'] == storwize_const.REP_CONSIS_SYNC: rcrel['primary'] = kwargs['primary'] return ('', '') else: return self._errors['CMMVC5753E'] def _cmd_chrcrelationship(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] try: rcrel = self._rcrelationship_list[id_num] except KeyError: return self._errors['CMMVC5753E'] remove_from_rccg = True if 'noconsistgrp' in kwargs else False add_to_rccg = True if 'consistgrp' in kwargs else False if remove_from_rccg: if rcrel['consistency_group_name']: rccg_name = rcrel['consistency_group_name'] else: return self._errors['CMMVC6065E'] elif add_to_rccg: rccg_name = (kwargs['consistgrp'].strip('\'\"') if 'consistgrp' in kwargs else None) else: return self._chrcrelationship_attr(**kwargs) try: rccg = self._rcconsistgrp_list[rccg_name] except KeyError: return self._errors['CMMVC5753E'] if remove_from_rccg: rcrel['consistency_group_name'] = '' rcrel['consistency_group_id'] = '' if int(rccg['relationship_count']) > 0: rccg['relationship_count'] = str( int(rccg['relationship_count']) - 1) if rccg['relationship_count'] == '0': rccg['state'] = 'empty' rccg['copy_type'] = 'empty_group' else: if rccg['copy_type'] == 'empty_group': rccg['copy_type'] = rcrel['copy_type'] elif rccg['copy_type'] != rcrel['copy_type']: return self._errors['CMMVC9012E'] rcrel['consistency_group_name'] = rccg['name'] rcrel['consistency_group_id'] = rccg['id'] rccg['relationship_count'] = str( int(rccg['relationship_count']) + 1) if rccg['state'] == 'empty': rccg['state'] = rcrel['state'] rccg['primary'] = rcrel['primary'] rccg['cycling_mode'] = rcrel['cycling_mode'] rccg['cycle_period_seconds'] = rcrel['cycle_period_seconds'] return '', '' def _cmd_chrcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] return self._chrcconsistgrp_attr(**kwargs) def _cmd_rmrcrelationship(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] force = True if 'force' in kwargs else False try: rcrel = self._rcrelationship_list[id_num] except KeyError: return self._errors['CMMVC5753E'] function = 'delete_force' if force else 'delete' self._rc_state_transition(function, rcrel) if rcrel['state'] == 'end': self._volumes_list[rcrel['master_vdisk_name']]['RC_name'] = '' self._volumes_list[rcrel['master_vdisk_name']]['RC_id'] = '' self._volumes_list[rcrel['aux_vdisk_name']]['RC_name'] = '' self._volumes_list[rcrel['aux_vdisk_name']]['RC_id'] = '' del self._rcrelationship_list[id_num] return ('', '') def _chrcrelationship_attr(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] id_num = kwargs['obj'] try: rcrel = self._rcrelationship_list[id_num] except KeyError: return self._errors['CMMVC5753E'] nonull_num = 0 masterchange = None if 'masterchange' in kwargs: masterchange = kwargs['masterchange'].strip('\'\"') nonull_num += 1 auxchange = None if 'auxchange' in kwargs: auxchange = kwargs['auxchange'].strip('\'\"') nonull_num += 1 cycleperiodseconds = None if 'cycleperiodseconds' in kwargs: cycleperiodseconds = kwargs['cycleperiodseconds'].strip('\'\"') nonull_num += 1 cyclingmode = None if 'cyclingmode' in kwargs: cyclingmode = kwargs['cyclingmode'].strip('\'\"') nonull_num += 1 if nonull_num > 1: return self._errors['CMMVC5713E'] elif masterchange: rcrel['master_change_vdisk_name'] = masterchange return ('', '') elif auxchange: rcrel['aux_change_vdisk_name'] = auxchange return ('', '') elif cycleperiodseconds: rcrel['cycle_period_seconds'] = cycleperiodseconds elif cyclingmode: rcrel['cycling_mode'] = cyclingmode return ('', '') def _chrcconsistgrp_attr(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] id_num = kwargs['obj'] try: rccg = self._rcconsistgrp_list[id_num] except KeyError: return self._errors['CMMVC5753E'] if 'cyclingmode' in kwargs: cyclingmode = kwargs['cyclingmode'].strip('\'\"') rccg['cycling_mode'] = cyclingmode return ('', '') def _rc_state_transition(self, function, rcrel): if (function == 'wait' and 'wait' not in self._rc_transitions[rcrel['state']]): return ('', '') if rcrel['state'] == 'inconsistent_copying' and function == 'wait': if rcrel['progress'] == '0': rcrel['progress'] = '50' elif (storwize_const.GMCV_MULTI == rcrel['cycling_mode'] and storwize_const.GLOBAL == rcrel['copy_type']): rcrel['progress'] = '100' rcrel['state'] = 'consistent_copying' else: rcrel['progress'] = '100' rcrel['state'] = 'consistent_synchronized' return ('', '') else: try: curr_state = rcrel['state'] rcrel['state'] = self._rc_transitions[curr_state][function] return ('', '') except Exception: return self._errors['CMMVC5982E'] def _rccg_state_transition(self, function, rccg): if (function == 'wait' and 'wait' not in self._rccg_transitions[rccg['state']]): return ('', '') if rccg['state'] == 'inconsistent_copying' and function == 'wait': if rccg['cycling_mode'] == storwize_const.GMCV_MULTI: rccg['state'] = storwize_const.REP_CONSIS_COPYING else: rccg['state'] = storwize_const.REP_CONSIS_SYNC for rcrel_info in self._rcrelationship_list.values(): if rcrel_info['consistency_group_name'] == rccg['name']: rcrel_info['progress'] = '100' rcrel_info['state'] = rccg['state'] return ('', '') else: try: curr_state = rccg['state'] rccg['state'] = self._rccg_transitions[curr_state][function] return ('', '') except Exception: return self._errors['CMMVC5982E'] def _cmd_mkvolumegroup(self, **kwargs): # Create a Volume group volumegroup_info = {} volumegroup_info['id'] = self._find_unused_id(self._volumegroup_list) if 'name' in kwargs: volumegroup_info['name'] = kwargs["name"].strip('\'\"') else: volumegroup_info['name'] = self.driver._get_volumegroup_name( None, volumegroup_info['id']) volumegroup_info['volume_count'] = '0' volumegroup_info['backup_status'] = 'empty' volumegroup_info['last_backup_time'] = '' volumegroup_info['owner_id'] = '' volumegroup_info['owner_name'] = '' volumegroup_info['safeguarded_policy_id'] = '' volumegroup_info['safeguarded_policy_name'] = '' volumegroup_info['safeguarded_policy_start_time'] = '' volumegroup_info['volume_group_type'] = '' volumegroup_info['uid'] = (('ABCDEF' * 3) + ('0' * 14) + volumegroup_info['id']) volumegroup_info['source_volume_group_id'] = '' volumegroup_info['source_volume_group_name'] = '' volumegroup_info['parent_uid'] = '' volumegroup_info['source_snapshot_id'] = '' volumegroup_info['source_snapshot'] = '' volumegroup_info['snapshot_count'] = '0' volumegroup_info['protection_provisioned_capacity'] = '0.00MB' volumegroup_info['protection_written_capacity'] = '0.00MB' volumegroup_info['snapshot_policy_id'] = '' volumegroup_info['snapshot_policy_name'] = '' self._volumegroup_list[volumegroup_info['name']] = volumegroup_info return ('Volume Group, id [' + volumegroup_info['id'] + '], successfully created', '') def _cmd_lsvolumegroup(self, **kwargs): # List the volume group if 'obj' not in kwargs: rows = [] rows.append(['id', 'name', 'volume_count', 'backup_status', 'last_backup_time', 'owner_id', 'owner_name', 'safeguarded_policy_id', 'safeguarded_policy_name', 'safeguarded_policy_start_time', 'volume_group_type', 'uid', 'source_volume_group_id', 'source_volume_group_name', 'parent_uid', 'source_snapshot_id', 'source_snapshot', 'snapshot_count', 'protection_provisioned_capacity', 'protection_written_capacity', 'snapshot_policy_id', 'snapshot_policy_name']) found = False for volumegroup_name in sorted(self._volumegroup_list.keys()): volumegroup = self._volumegroup_list[volumegroup_name] filterstr = 'name=' + volumegroup['name'] if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == filterstr)): rows.append(['0', 'empty', '', '', '', '', '', '', '', '((\'ABCDEF\' * 3) + (\'0\' * 14) +\ vg_info[\'id\'])', '', '', '', '', '', '0', '0.00MB', '0.00MB', '', '']) found = True if found: return self._print_info_cmd(rows=rows, **kwargs) else: return ('', '') else: volumegroup_info = kwargs['obj'].strip('\'\"') if volumegroup_info not in self._volumegroup_list: return self._errors['CMMVC5804E'] volumegroup_info = self._volumegroup_list[volumegroup_info] rows = [] rows.append(['id', volumegroup_info['id']]) rows.append(['name', volumegroup_info['name']]) rows.append(['volume_count', '1']) rows.append(['backup_status', 'off']) rows.append(['last_backup_time', volumegroup_info['last_backup_time']]) rows.append(['owner_id', volumegroup_info['owner_id']]) rows.append(['owner_name', volumegroup_info['owner_name']]) rows.append(['safeguarded_policy_id', volumegroup_info['safeguarded_policy_id']]) rows.append(['safeguarded_policy_name', volumegroup_info['safeguarded_policy_name']]) rows.append(['safeguarded_policy_start_time', volumegroup_info['safeguarded_policy_start_time']]) rows.append(['volume_group_type', volumegroup_info['volume_group_type']]) rows.append(['source_volume_group_id', volumegroup_info['source_volume_group_id']]) rows.append(['source_volume_group_name', volumegroup_info['source_volume_group_name']]) rows.append(['parent_uid', volumegroup_info['parent_uid']]) rows.append(['source_snapshot_id', volumegroup_info['source_snapshot_id']]) rows.append(['source_snapshot', volumegroup_info['source_snapshot']]) rows.append(['snapshot_count', volumegroup_info['snapshot_count']]) rows.append(['protection_provisioned_capacity', '1.00GB']) rows.append(['protection_written_capacity', '0.75MB']) rows.append(['snapshot_policy_id', volumegroup_info['snapshot_policy_id']]) rows.append(['snapshot_policy_name', volumegroup_info['snapshot_policy_name']]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_rmvolumegroup(self, **kwargs): # Delete a Volume Group if 'obj' not in kwargs: return self._errors['CMMVC5701E'] volumegroup_name = kwargs['obj'].strip('\'\"') del self._volumegroup_list[volumegroup_name] return ('', '') def _cmd_addsnapshot(self, **kwargs): # Create a Volumegroup snapshot volumegroup_snapshot_info = {} volumegroup_snapshot_info['id'] = self._find_unused_id( self._volumegroup_snapshot_list) volumegroup_name = kwargs['volumegroup'] if 'name' in kwargs: volumegroup_snapshot_info['name'] = kwargs["name"].strip('\'\"') else: volumegroup_snapshot_info['name'] = ( 'vg_snap-' + volumegroup_snapshot_info['id']) volumegroup_snapshot_info['volume_group_id'] = ( self._volumegroup_list[volumegroup_name]['id']) volumegroup_snapshot_info['volume_group_name'] = volumegroup_name volumegroup_snapshot_info['time'] = '' volumegroup_snapshot_info['state'] = 'active' volumegroup_snapshot_info['matches_group'] = 'yes' volumegroup_snapshot_info['parent_uid'] = ( self._volumegroup_list[volumegroup_name]['uid']) volumegroup_snapshot_info['expiration_time'] = '' volumegroup_snapshot_info['protection_provisioned_capacity'] = '1.00GB' volumegroup_snapshot_info['protection_written_capacity'] = '0.75MB' volumegroup_snapshot_info['operation_start_time'] = '' volumegroup_snapshot_info['operation_completion_estimate'] = '' volumegroup_snapshot_info['owner_id'] = '' volumegroup_snapshot_info['owner_name'] = '' volumegroup_snapshot_info['auto_snapshot'] = 'no' self._volumegroup_snapshot_list[volumegroup_snapshot_info['name']] = ( volumegroup_snapshot_info) return ('Snapshot, id [' + volumegroup_snapshot_info['id'] + '], successfully created or triggered', '') def _cmd_lsvolumegroupsnapshot(self, **kwargs): # List the volume group snapshot rows = [] rows.append(['id', 'name', 'volume_group_id', 'volume_group_name', 'time', 'state', 'matches_group', 'parent_uid', 'expiration_time', 'protection_provisioned_capacity', 'protection_written_capacity', 'operation_start_time', 'operation_completion_estimate', 'owner_id', 'owner_name', 'auto_snapshot']) if 'snapshot' and 'volumegroup' not in kwargs: found = False for volumegroup_snapshot in sorted( self._volumegroup_snapshot_list.keys()): volumegroup_snapshot_info = self._volumegroup_snapshot_list[ volumegroup_snapshot] if 'filtervalue' not in kwargs: rows.append( [volumegroup_snapshot_info['id'], volumegroup_snapshot_info['name'], volumegroup_snapshot_info['volume_group_id'], volumegroup_snapshot_info['volume_group_name'], '', 'active', 'yes', volumegroup_snapshot_info['parent_uid'], '', '1.00GB', '0.75MB', '', '', '', '', 'no']) found = True if found: return self._print_info_cmd(rows=rows, **kwargs) else: return ('', '') else: volumegroup_snapshot_info = kwargs['snapshot'].strip('\'\"') if volumegroup_snapshot_info not in ( self._volumegroup_snapshot_list): return self._errors['CMMVC5804E'] volumegroup_snapshot_info = self._volumegroup_snapshot_list[ volumegroup_snapshot_info] rows.append( [volumegroup_snapshot_info['id'], volumegroup_snapshot_info['name'], volumegroup_snapshot_info['volume_group_id'], volumegroup_snapshot_info['volume_group_name'], volumegroup_snapshot_info['time'], volumegroup_snapshot_info['state'], volumegroup_snapshot_info['matches_group'], volumegroup_snapshot_info['parent_uid'], volumegroup_snapshot_info['expiration_time'], volumegroup_snapshot_info['protection_provisioned_capacity'], volumegroup_snapshot_info['protection_written_capacity'], volumegroup_snapshot_info['operation_start_time'], volumegroup_snapshot_info['operation_completion_estimate'], volumegroup_snapshot_info['owner_id'], volumegroup_snapshot_info['owner_name'], volumegroup_snapshot_info['auto_snapshot']]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_rmsnapshot(self, **kwargs): # Delete a Volume Group snapshot if 'snapshot' and 'volumegroup' not in kwargs: return self._errors['CMMVC5701E'] volumegroup_snapshot_name = kwargs['snapshot'].strip('\'\"') if volumegroup_snapshot_name not in self._volumegroup_snapshot_list: return self._errors['CMMVC9755E'] del self._volumegroup_snapshot_list[volumegroup_snapshot_name] return ('', '') def _cmd_mkrcconsistgrp(self, **kwargs): master_sys = self._system_list['storwize-svc-sim'] aux_sys = self._system_list['aux-svc-sim'] if 'cluster' not in kwargs: return self._errors['CMMVC5707E'] aux_cluster = kwargs['cluster'].strip('\'\"') if (aux_cluster != aux_sys['id'] and aux_cluster != master_sys['id']): return self._errors['CMMVC5754E'] rccg_info = {} rccg_info['id'] = self._find_unused_id(self._rcconsistgrp_list) if 'name' in kwargs: rccg_info['name'] = kwargs['name'].strip('\'\"') else: rccg_info['name'] = self.driver._get_rccg_name(None, rccg_info['id']) rccg_info['master_cluster_id'] = master_sys['id'] rccg_info['master_cluster_name'] = master_sys['name'] rccg_info['aux_cluster_id'] = aux_sys['id'] rccg_info['aux_cluster_name'] = aux_sys['name'] rccg_info['primary'] = '' rccg_info['state'] = 'empty' rccg_info['relationship_count'] = '0' rccg_info['freeze_time'] = '' rccg_info['status'] = '' rccg_info['sync'] = '' rccg_info['copy_type'] = 'empty_group' rccg_info['cycling_mode'] = '' rccg_info['cycle_period_seconds'] = '300' self._rcconsistgrp_list[rccg_info['name']] = rccg_info return ('RC Consistency Group, id [' + rccg_info['id'] + '], successfully created', '') def _cmd_lsrcconsistgrp(self, **kwargs): rows = [] if 'obj' not in kwargs: rows.append(['id', 'name', 'master_cluster_id', 'master_cluster_name', 'aux_cluster_id', 'aux_cluster_name', 'primary', 'state', 'relationship_count', 'copy_type', 'cycling_mode', 'freeze_time']) for rccg_info in self._rcconsistgrp_list.values(): rows.append([rccg_info['id'], rccg_info['name'], rccg_info['master_cluster_id'], rccg_info['master_cluster_name'], rccg_info['aux_cluster_id'], rccg_info['aux_cluster_name'], rccg_info['primary'], rccg_info['state'], rccg_info['relationship_count'], rccg_info['copy_type'], rccg_info['cycling_mode'], rccg_info['freeze_time']]) return self._print_info_cmd(rows=rows, **kwargs) else: try: rccg_info = self._rcconsistgrp_list[kwargs['obj']] except KeyError: return self._errors['CMMVC5804E'] rows = [] rows.append(['id', rccg_info['id']]) rows.append(['name', rccg_info['name']]) rows.append(['master_cluster_id', rccg_info['master_cluster_id']]) rows.append(['master_cluster_name', rccg_info['master_cluster_name']]) rows.append(['aux_cluster_id', rccg_info['aux_cluster_id']]) rows.append(['aux_cluster_name', rccg_info['aux_cluster_name']]) rows.append(['primary', rccg_info['primary']]) rows.append(['state', rccg_info['state']]) rows.append(['relationship_count', rccg_info['relationship_count']]) rows.append(['freeze_time', rccg_info['freeze_time']]) rows.append(['status', rccg_info['status']]) rows.append(['sync', rccg_info['sync']]) rows.append(['copy_type', rccg_info['copy_type']]) rows.append(['cycling_mode', rccg_info['cycling_mode']]) rows.append(['cycle_period_seconds', rccg_info['cycle_period_seconds']]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_startrcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] primary = (kwargs['primary'].strip('\'\"') if 'primary' in kwargs else None) try: rccg = self._rcconsistgrp_list[id_num] except KeyError: return self._errors['CMMVC5753E'] if rccg['state'] == 'idling' and not primary: return self._errors['CMMVC5963E'] self._rccg_state_transition('start', rccg) for rcrel_info in self._rcrelationship_list.values(): if rcrel_info['consistency_group_name'] == rccg: self._rc_state_transition('start', rcrel_info) if primary: self._rcconsistgrp_list[id_num]['primary'] = primary for rcrel_info in self._rcrelationship_list.values(): if rcrel_info['consistency_group_name'] == rccg['name']: rcrel_info['primary'] = primary return ('', '') def _cmd_stoprcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] force_access = True if 'access' in kwargs else False try: rccg = self._rcconsistgrp_list[id_num] except KeyError: return self._errors['CMMVC5753E'] function = 'stop' self._rccg_state_transition(function, rccg) for rcrel_info in self._rcrelationship_list.values(): if rcrel_info['consistency_group_name'] == rccg['name']: self._rc_state_transition(function, rcrel_info) if force_access: self._rcconsistgrp_list[id_num]['primary'] = '' for rcrel_info in self._rcrelationship_list.values(): if rcrel_info['consistency_group_name'] == rccg['name']: rcrel_info['primary'] = '' return ('', '') def _cmd_switchrcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] id_num = kwargs['obj'] try: rccg = self._rcconsistgrp_list[id_num] except KeyError: return self._errors['CMMVC5753E'] if (rccg['state'] == storwize_const.REP_CONSIS_SYNC or (rccg['cycling_mode'] == storwize_const.GMCV_MULTI and rccg['state'] == storwize_const.REP_CONSIS_COPYING)): rccg['primary'] = kwargs['primary'] for rcrel_info in self._rcrelationship_list.values(): if rcrel_info['consistency_group_name'] == rccg['name']: rcrel_info['primary'] = kwargs['primary'] return ('', '') else: return self._errors['CMMVC5753E'] def _cmd_rmrcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] rccg_name = kwargs['obj'].strip('\'\"') force = True if 'force' in kwargs else False try: rccg = self._rcconsistgrp_list[rccg_name] except KeyError: return self._errors['CMMVC5804E'] function = 'delete_force' if force else 'delete' self._rccg_state_transition(function, rccg) if rccg['state'] == 'end': for rcrel_info in self._rcrelationship_list.values(): if rcrel_info['consistency_group_name'] == rccg['name']: rcrel_info['consistency_group_name'] = '' rcrel_info['consistency_group_id'] = '' del self._rcconsistgrp_list[rccg_name] return ('', '') def _cmd_lspartnershipcandidate(self, **kwargs): rows = [None] * 4 master_sys = self._system_list['storwize-svc-sim'] aux_sys = self._system_list['aux-svc-sim'] rows[0] = ['id', 'configured', 'name'] rows[1] = [master_sys['id'], 'no', master_sys['name']] rows[2] = [aux_sys['id'], 'no', aux_sys['name']] rows[3] = ['0123456789001234', 'no', 'fake_svc'] return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lspartnership(self, **kwargs): rows = [] rows.append(['id', 'name', 'location', 'partnership', 'type', 'cluster_ip', 'event_log_sequence']) master_sys = self._system_list['storwize-svc-sim'] if master_sys['name'] not in self._partnership_list: local_info = {} local_info['id'] = master_sys['id'] local_info['name'] = master_sys['name'] local_info['location'] = 'local' local_info['type'] = '' local_info['cluster_ip'] = '' local_info['event_log_sequence'] = '' local_info['chap_secret'] = '' local_info['linkbandwidthmbits'] = '' local_info['backgroundcopyrate'] = '' local_info['partnership'] = '' self._partnership_list[master_sys['id']] = local_info # Assume we always get a filtervalue argument filter_key = kwargs['filtervalue'].split('=')[0] filter_value = kwargs['filtervalue'].split('=')[1] for k, v in self._partnership_list.items(): if str(v[filter_key]) == filter_value: rows.append([v['id'], v['name'], v['location'], v['partnership'], v['type'], v['cluster_ip'], v['event_log_sequence']]) return self._print_info_cmd(rows=rows, **kwargs) def _cmd_mkippartnership(self, **kwargs): if 'clusterip' not in kwargs: return self._errors['CMMVC5707E'] clusterip = kwargs['master'].strip('\'\"') if 'linkbandwidthmbits' not in kwargs: return self._errors['CMMVC5707E'] bandwidth = kwargs['linkbandwidthmbits'].strip('\'\"') if 'backgroundcopyrate' not in kwargs: return self._errors['CMMVC5707E'] copyrate = kwargs['backgroundcopyrate'].strip('\'\"') if clusterip == '192.168.10.21': partner_info_id = self._system_list['storwize-svc-sim']['id'] partner_info_name = self._system_list['storwize-svc-sim']['name'] else: partner_info_id = self._system_list['aux-svc-sim']['id'] partner_info_name = self._system_list['aux-svc-sim']['name'] partner_info = {} partner_info['id'] = partner_info_id partner_info['name'] = partner_info_name partner_info['location'] = 'remote' partner_info['type'] = 'ipv4' partner_info['cluster_ip'] = clusterip partner_info['event_log_sequence'] = '' partner_info['chap_secret'] = '' partner_info['linkbandwidthmbits'] = bandwidth partner_info['backgroundcopyrate'] = copyrate partner_info['partnership'] = 'fully_configured' self._partnership_list[partner_info['id']] = partner_info return ('', '') def _cmd_mkfcpartnership(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] peer_sys = kwargs['obj'] if 'linkbandwidthmbits' not in kwargs: return self._errors['CMMVC5707E'] bandwidth = kwargs['linkbandwidthmbits'].strip('\'\"') if 'backgroundcopyrate' not in kwargs: return self._errors['CMMVC5707E'] copyrate = kwargs['backgroundcopyrate'].strip('\'\"') partner_info = {} partner_info['id'] = self._system_list[peer_sys]['id'] partner_info['name'] = peer_sys partner_info['location'] = 'remote' partner_info['type'] = 'fc' partner_info['cluster_ip'] = '' partner_info['event_log_sequence'] = '' partner_info['chap_secret'] = '' partner_info['linkbandwidthmbits'] = bandwidth partner_info['backgroundcopyrate'] = copyrate partner_info['partnership'] = 'fully_configured' self._partnership_list[partner_info['id']] = partner_info return ('', '') def _cmd_chpartnership(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] peer_sys = kwargs['obj'] if peer_sys not in self._partnership_list: return self._errors['CMMVC5753E'] partner_state = ('fully_configured' if 'start' in kwargs else 'fully_configured_stopped') self._partnership_list[peer_sys]['partnership'] = partner_state return ('', '') # The main function to run commands on the management simulator def execute_command(self, cmd, check_exit_code=True): try: kwargs = self._cmd_to_dict(cmd) except IndexError: return self._errors['CMMVC5707E'] command = kwargs.pop('cmd') func = getattr(self, '_cmd_' + command) out, err = func(**kwargs) if (check_exit_code) and (len(err) != 0): raise processutils.ProcessExecutionError(exit_code=1, stdout=out, stderr=err, cmd=' '.join(cmd)) return (out, err) # After calling this function, the next call to the specified command will # result in in the error specified def error_injection(self, cmd, error): self._next_cmd_error[cmd] = error def change_vdiskcopy_attr(self, vol_name, key, value, copy="primary"): if copy == 'primary': self._volumes_list[vol_name]['copies']['0'][key] = value elif copy == 'secondary': self._volumes_list[vol_name]['copies']['1'][key] = value else: msg = _("The copy should be primary or secondary") raise exception.InvalidInput(reason=msg) def create_site_volume_and_fcmapping(self, kwargs, name, sitepool, fcmapping=False, source=None): sitepool_id = self._get_mdiskgrp_id(sitepool) site_volume_info = {} site_volume_info['id'] = self._find_unused_id(self._volumes_list) site_volume_info['uid'] = ('ABCDEF' * 3) + ( '0' * 14) + site_volume_info['id'] site_volume_info['mdisk_grp_name'] = sitepool site_volume_info['mdisk_grp_id'] = str(sitepool_id) if 'name' in kwargs or 'obj' in kwargs: site_volume_info['name'] = name else: site_volume_info['name'] = name + site_volume_info['id'] # Assume size and unit are given, store it in bytes if "size" in kwargs: capacity = int(kwargs['size']) unit = kwargs['unit'] site_volume_info['capacity'] = self._convert_units_bytes( capacity, unit) else: site_volume_info['capacity'] = source['capacity'] site_volume_info['IO_group_id'] = '0' site_volume_info['IO_group_name'] = 'io_grp0' site_volume_info['RC_name'] = '' site_volume_info['RC_id'] = '' if 'thin' in kwargs or 'compressed' in kwargs: site_volume_info['formatted'] = 'no' # Fake numbers site_volume_info['used_capacity'] = '786432' site_volume_info['real_capacity'] = '21474816' site_volume_info['free_capacity'] = '38219264' if 'warning' in kwargs: site_volume_info['warning'] = kwargs['warning'].rstrip('%') else: site_volume_info['warning'] = '80' if 'noautoexpand' in kwargs: site_volume_info['autoexpand'] = 'off' else: site_volume_info['autoexpand'] = 'on' if 'compressed' in kwargs: site_volume_info['compressed_copy'] = 'yes' else: site_volume_info['compressed_copy'] = 'no' if 'thin' in kwargs: site_volume_info['formatted'] = 'no' # Fake numbers site_volume_info['used_capacity'] = '786432' site_volume_info['real_capacity'] = '21474816' site_volume_info['free_capacity'] = '38219264' if 'grainsize' in kwargs: site_volume_info['grainsize'] = kwargs['grainsize'] else: site_volume_info['grainsize'] = '32' else: site_volume_info['used_capacity'] = site_volume_info['capacity'] site_volume_info['real_capacity'] = site_volume_info['capacity'] site_volume_info['free_capacity'] = '0' site_volume_info['warning'] = '' site_volume_info['autoexpand'] = '' site_volume_info['grainsize'] = '' site_volume_info['compressed_copy'] = 'no' site_volume_info['formatted'] = 'yes' vol_cp = {'id': '0', 'status': 'online', 'sync': 'yes', 'primary': 'yes', 'mdisk_grp_id': str(sitepool_id), 'mdisk_grp_name': sitepool, 'easy_tier': 'on', 'compressed_copy': site_volume_info['compressed_copy']} site_volume_info['copies'] = {'0': vol_cp} if site_volume_info['name'] in self._volumes_list: return self._errors['CMMVC6035E'] else: self._volumes_list[site_volume_info['name']] = site_volume_info # create a flashcopy mapping for site volume and site flashcopy volume if fcmapping: site_fcmap_info = {} site_fcmap_info['source'] = source['name'] site_fcmap_info['target'] = site_volume_info['name'] site_fcmap_info['id'] = self._find_unused_id(self._fcmappings_list) site_fcmap_info['name'] = 'fcmap' + site_fcmap_info['id'] site_fcmap_info['copyrate'] = '50' site_fcmap_info['progress'] = '0' site_fcmap_info['autodelete'] = (True if 'autodelete' in kwargs else False) site_fcmap_info['status'] = 'idle_or_copied' site_fcmap_info['rc_controlled'] = 'yes' self._fcmappings_list[site_fcmap_info['id']] = site_fcmap_info return site_volume_info def _cmd_mkvolume(self, **kwargs): pool = kwargs['pool'].strip('\'\"') pool_split = pool.split(':') if len(pool_split) != 2: raise exception.InvalidInput( reason=_('pool %s is invalid for hyperswap ' 'volume') % kwargs['pool']) else: site1pool = pool_split[0] site2pool = pool_split[1] if pool == kwargs['pool']: raise exception.InvalidInput( reason=_('pool missing quotes %s') % kwargs['pool']) if 'name' in kwargs: site1name = kwargs['name'].strip('\'\"') site1fcname = 'fcsite1' + kwargs['name'].strip('\'\"') site2name = 'site2' + kwargs['name'].strip('\'\"') site2fcname = 'fcsite2' + kwargs['name'].strip('\'\"') else: site1name = 'vdisk' site1fcname = 'fcsite1vdisk' site2name = 'site2vdisk' site2fcname = 'fcsite2vdisk' # create hyperswap volume on site1 site1_volume_info = self.create_site_volume_and_fcmapping( kwargs, site1name, site1pool, False, None) # create flashcopy volume on site1 self.create_site_volume_and_fcmapping(kwargs, site1fcname, site1pool, True, site1_volume_info) # create hyperswap volume on site2 site2_volume_info = self.create_site_volume_and_fcmapping( kwargs, site2name, site2pool, False, site1_volume_info) # create flashcopy volume on site2 self.create_site_volume_and_fcmapping(kwargs, site2fcname, site2pool, True, site2_volume_info) # Create remote copy for site1volume and site2volume master_sys = self._system_list['storwize-svc-sim'] aux_sys = self._system_list['storwize-svc-sim'] rcrel_info = {} rcrel_info['id'] = self._find_unused_id(self._rcrelationship_list) rcrel_info['name'] = 'rcrel' + rcrel_info['id'] rcrel_info['master_cluster_id'] = master_sys['id'] rcrel_info['master_cluster_name'] = master_sys['name'] rcrel_info['master_vdisk_id'] = site1_volume_info['id'] rcrel_info['master_vdisk_name'] = site1_volume_info['name'] rcrel_info['aux_cluster_id'] = aux_sys['id'] rcrel_info['aux_cluster_name'] = aux_sys['name'] rcrel_info['aux_vdisk_id'] = site2_volume_info['id'] rcrel_info['aux_vdisk_name'] = site2_volume_info['name'] rcrel_info['primary'] = 'master' rcrel_info['consistency_group_id'] = '' rcrel_info['consistency_group_name'] = '' rcrel_info['state'] = 'inconsistent_stopped' rcrel_info['bg_copy_priority'] = '50' rcrel_info['progress'] = '0' rcrel_info['freeze_time'] = '' rcrel_info['status'] = 'online' rcrel_info['sync'] = '' rcrel_info['copy_type'] = 'activeactive' rcrel_info['cycling_mode'] = '' rcrel_info['cycle_period_seconds'] = '300' rcrel_info['master_change_vdisk_id'] = '' rcrel_info['master_change_vdisk_name'] = '' rcrel_info['aux_change_vdisk_id'] = '' rcrel_info['aux_change_vdisk_name'] = '' self._rcrelationship_list[rcrel_info['name']] = rcrel_info site1_volume_info['RC_name'] = rcrel_info['name'] site1_volume_info['RC_id'] = rcrel_info['id'] site2_volume_info['RC_name'] = rcrel_info['name'] site2_volume_info['RC_id'] = rcrel_info['id'] return ('Hyperswap volume, id [%s], successfully created' % (site1_volume_info['id']), '') def _cmd_addvolumecopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') site1_volume_info = self._volumes_list[vol_name] site1pool = site1_volume_info['mdisk_grp_name'] site2pool = kwargs['pool'].strip('\'\"') site1fcname = 'fcsite1' + vol_name site2name = 'site2' + vol_name site2fcname = 'fcsite2' + vol_name # create flashcopy volume on site1 self.create_site_volume_and_fcmapping(kwargs, site1fcname, site1pool, True, site1_volume_info) # create hyperswap volume on site2 site2_volume_info = self.create_site_volume_and_fcmapping( kwargs, site2name, site1pool, False, site1_volume_info) # create flashcopy volume on site2 self.create_site_volume_and_fcmapping(kwargs, site2fcname, site2pool, True, site2_volume_info) # create remote copy for site1volume and site2volume master_sys = self._system_list['storwize-svc-sim'] aux_sys = self._system_list['storwize-svc-sim'] rcrel_info = {} rcrel_info['id'] = self._find_unused_id(self._rcrelationship_list) rcrel_info['name'] = 'rcrel' + rcrel_info['id'] rcrel_info['master_cluster_id'] = master_sys['id'] rcrel_info['master_cluster_name'] = master_sys['name'] rcrel_info['master_vdisk_id'] = site1_volume_info['id'] rcrel_info['master_vdisk_name'] = site1_volume_info['name'] rcrel_info['aux_cluster_id'] = aux_sys['id'] rcrel_info['aux_cluster_name'] = aux_sys['name'] rcrel_info['aux_vdisk_id'] = site2_volume_info['id'] rcrel_info['aux_vdisk_name'] = site2_volume_info['name'] rcrel_info['primary'] = 'master' rcrel_info['consistency_group_id'] = '' rcrel_info['consistency_group_name'] = '' rcrel_info['state'] = 'inconsistent_stopped' rcrel_info['bg_copy_priority'] = '50' rcrel_info['progress'] = '0' rcrel_info['freeze_time'] = '' rcrel_info['status'] = 'online' rcrel_info['sync'] = '' rcrel_info['copy_type'] = 'activeactive' rcrel_info['cycling_mode'] = '' rcrel_info['cycle_period_seconds'] = '300' rcrel_info['master_change_vdisk_id'] = '' rcrel_info['master_change_vdisk_name'] = '' rcrel_info['aux_change_vdisk_id'] = '' rcrel_info['aux_change_vdisk_name'] = '' self._rcrelationship_list[rcrel_info['name']] = rcrel_info site1_volume_info['RC_name'] = rcrel_info['name'] site1_volume_info['RC_id'] = rcrel_info['id'] site2_volume_info['RC_name'] = rcrel_info['name'] site2_volume_info['RC_id'] = rcrel_info['id'] return ('', '') def _cmd_rmvolumecopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') site1_volume_info = self._volumes_list[vol_name] if site1_volume_info['RC_name']: rcrel = self._rcrelationship_list[site1_volume_info['RC_name']] if rcrel.get('consistency_group_name', None): return self._errors['CMMVC8783E'] site2_volume_info = self._volumes_list['site2' + vol_name] del self._rcrelationship_list[self._volumes_list[vol_name]['RC_name']] site1fcmap = None site2fcmap = None for fcmap in self._fcmappings_list.values(): if ((fcmap['source'] == vol_name) and (fcmap['target'] == 'fcsite1' + vol_name)): site1fcmap = fcmap continue elif ((fcmap['source'] == 'site2' + vol_name) and (fcmap['target'] == 'fcsite2' + vol_name)): site2fcmap = fcmap continue if site1fcmap: del self._fcmappings_list[site1fcmap['id']] if site2fcmap: del self._fcmappings_list[site2fcmap['id']] del site2_volume_info del self._volumes_list['site2' + vol_name] del self._volumes_list['fcsite1' + vol_name] del self._volumes_list['fcsite2' + vol_name] site1_volume_info['RC_name'] = '' site1_volume_info['RC_id'] = '' return ('', '') def _cmd_rmvolume(self, **kwargs): removehostmappings = True if 'removehostmappings' in kwargs else False if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] site1fcmap = None site2fcmap = None for fcmap in self._fcmappings_list.values(): if ((fcmap['source'] == vol_name) and (fcmap['target'] == 'fcsite1' + vol_name)): site1fcmap = fcmap continue elif ((fcmap['source'] == 'site2' + vol_name) and (fcmap['target'] == 'fcsite2' + vol_name)): site2fcmap = fcmap continue if site1fcmap: del self._fcmappings_list[site1fcmap['id']] if site2fcmap: del self._fcmappings_list[site2fcmap['id']] if not removehostmappings: for mapping in self._mappings_list.values(): if mapping['vol'] == vol_name: return self._errors['CMMVC5840E'] del self._rcrelationship_list[self._volumes_list[vol_name]['RC_name']] del self._volumes_list[vol_name] del self._volumes_list['fcsite1' + vol_name] del self._volumes_list['site2' + vol_name] del self._volumes_list['fcsite2' + vol_name] return ('', '') class StorwizeSVCISCSIFakeDriver(storwize_svc_iscsi.StorwizeSVCISCSIDriver): def __init__(self, *args, **kwargs): super(StorwizeSVCISCSIFakeDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _run_ssh(self, cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class StorwizeSVCFcFakeDriver(storwize_svc_fc.StorwizeSVCFCDriver): def __init__(self, *args, **kwargs): super(StorwizeSVCFcFakeDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _run_ssh(self, cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret @ddt.ddt class StorwizeSVCISCSIDriverTestCase(test.TestCase): @mock.patch.object(time, 'sleep') def setUp(self, mock_sleep): super(StorwizeSVCISCSIDriverTestCase, self).setUp() self.USESIM = True if self.USESIM: self.iscsi_driver = StorwizeSVCISCSIFakeDriver( configuration=conf.Configuration([], conf.SHARED_CONF_GROUP)) self.host_site = {'site1': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self._def_flags = {'san_ip': 'hostname', 'san_login': 'user', 'san_password': 'pass', 'storwize_svc_volpool_name': ['openstack'], 'storwize_svc_flashcopy_timeout': 20, 'storwize_svc_flashcopy_rate': 49, 'storwize_svc_multipath_enabled': False, 'storwize_svc_allow_tenant_qos': True, 'storwize_preferred_host_site': self.host_site} wwpns = [ str(random.randint(0, 9999999999999999)).zfill(16), str(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % str( random.randint(10000, 99999)) self._connector = {'ip': '1.234.56.78', 'host': 'storwize-svc-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = StorwizeSVCManagementSimulator(['openstack']) self.iscsi_driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self._reset_flags() self.ctxt = context.get_admin_context() self.db = cinder.db self.iscsi_driver.db = self.db self.iscsi_driver.do_setup(None) self.iscsi_driver.check_for_setup_error() self.iscsi_driver._helpers.check_fcmapping_interval = 0 def _set_flag(self, flag, value): group = self.iscsi_driver.configuration.config_group self.override_config(flag, value, group) def _reset_flags(self): CONF.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _create_volume(self, **kwargs): pool = _get_test_pool() prop = {'host': 'openstack@svc#%s' % pool, 'size': 1, 'volume_type_id': self.vt['id']} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.iscsi_driver.create_volume(vol) return vol def _delete_volume(self, volume): self.iscsi_driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _generate_vol_info(self, vol_name, vol_id): pool = _get_test_pool() prop = {'mdisk_grp_name': pool} if vol_name: prop.update(volume_name=vol_name, volume_id=vol_id, volume_size=10) else: prop.update(size=10, volume_type_id=None, mdisk_grp_name=pool, host='openstack@svc#%s' % pool) vol = testutils.create_volume(self.ctxt, **prop) return vol def _generate_snap_info(self, vol_id, size=10): prop = {'volume_id': vol_id, 'volume_size': size} snap = testutils.create_snapshot(self.ctxt, **prop) return snap def _assert_vol_exists(self, name, exists): is_vol_defined = self.iscsi_driver._helpers.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def test_storwize_svc_iscsi_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'foo'} conn_fc = {'host': 'host', 'wwpns': 'bar'} conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI']) self.iscsi_driver.validate_connector(conn_iscsi) self.iscsi_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_fc) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_neither) self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI', 'FC']) self.iscsi_driver.validate_connector(conn_iscsi) self.iscsi_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_neither) def test_storwize_terminate_iscsi_connection(self): # create a iSCSI volume volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.iscsi_driver.initialize_connection(volume_iSCSI, connector) self.iscsi_driver.terminate_connection(volume_iSCSI, connector) @ddt.data(({'is_multi_attach': True}, 1), ({'is_multi_attach': True}, 2), ({'is_multi_attach': False}, 1)) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'initialize_host_info') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsvdiskhostmap') @mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_do_terminate_connection') @mock.patch.object(cinder.db, 'volume_attachment_get_all_by_volume_id') @ddt.unpack def test_storwize_terminate_iscsi_multi_attach(self, vol_spec, no_of_fake_attachments, get_db_vol_attach, do_term_conn, lsvdishosmap, init_host_info): # create a iSCSI volume volume_iscsi = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iscsi = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iscsi['volume_type_id'] = vol_type_iscsi['id'] volume_iscsi['multiattach'] = vol_spec['is_multi_attach'] connector1 = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} connector2 = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.iscsi_driver.initialize_connection(volume_iscsi, connector1) self.iscsi_driver.initialize_connection(volume_iscsi, connector2) init_host_info.assert_called() for conn in [connector1, connector2]: host = self.iscsi_driver._helpers.get_host_from_connector( conn, iscsi=True) self.assertIsNotNone(host) vol_updates = {'id': volume_iscsi['id'], 'size': 1} volume_model = models.Volume(**vol_updates) attachment_updates1 = { 'volume': volume_model, 'volume_id': volume_iscsi['id'], 'id': '271eb937-5a5a-45bc-86a1-014afa8e4c37', 'attach_status': 'attached', 'attached_host': 'storwize-svc-host' } db_attachment1 = models.VolumeAttachment(**attachment_updates1) attachment_updates2 = { 'volume': volume_model, 'volume_id': volume_iscsi['id'], 'id': '9a3b9fc4-2524-4367-8092-5382a43e5125', 'attach_status': 'attached', 'attached_host': 'storwize-svc-host' } db_attachment2 = models.VolumeAttachment(**attachment_updates2) if no_of_fake_attachments == 1: get_db_vol_attach.return_value = [db_attachment1] else: get_db_vol_attach.return_value = [db_attachment1, db_attachment2] attachments = objects.VolumeAttachmentList.get_all_by_volume_id( self.ctxt, volume_iscsi['id']) volume_iscsi['volume_attachment'] = attachments attachment_list = volume_iscsi['volume_attachment'] attachment_count = 0 if volume_iscsi['multiattach']: self.iscsi_driver.terminate_connection(volume_iscsi, connector1) try: for attachment in attachment_list: if (attachment.attach_status == "attached" and attachment.attached_host == "storwize-svc-host"): attachment_count += 1 except AttributeError: pass if attachment_count > 1: self.assertEqual(0, do_term_conn.call_count) def test_storwize_get_host_from_connector_with_both_fc_iscsi_host(self): volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] connector = {'host': 'storwize-svc-host', 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} if self.USESIM: self.sim._cmd_mkhost(name='storwize-svc-host-99999999', hbawwpn='123') self.iscsi_driver.initialize_connection(volume_iSCSI, connector) self.iscsi_driver.terminate_connection(volume_iSCSI, connector) def test_storwize_iscsi_connection_snapshot(self): # create a iSCSI volume volume_iSCSI = self._create_volume() snapshot = self._generate_snap_info(volume_iSCSI.id) self.iscsi_driver.create_snapshot(snapshot) connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.iscsi_driver.initialize_connection_snapshot(snapshot, connector) self.iscsi_driver.terminate_connection_snapshot(snapshot, connector) def test_storwize_replication_failover_iscsi_connection_snapshot(self): volume_iSCSI = self._create_volume() snapshot = self._generate_snap_info(volume_iSCSI.id) self.iscsi_driver.create_snapshot(snapshot) connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} # a snapshot of a replication failover volume. attach will be failed with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_volume_replicated_type') as rep_type: rep_type.return_value = True with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_vol_sys_info') as sys_info: sys_info.return_value = {'volume_name': 'voliscsi', 'backend_helper': 'self._aux_backend_helpers', 'node_state': 'self._state'} self.assertRaises(exception.VolumeDriverException, self.iscsi_driver. initialize_connection_snapshot, snapshot, connector) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'initialize_host_info') def test_storwize_initialize_iscsi_connection_with_host_site(self, init_host): connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} volume_iSCSI_1 = self._create_volume() volume_iSCSI = self._create_volume() extra_spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'openstack1'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] volume_iSCSI_2 = self._create_volume() volume_iSCSI_2['volume_type_id'] = vol_type_iSCSI['id'] self.iscsi_driver.initialize_connection(volume_iSCSI, connector) init_host.assert_called() self.assertEqual(1, init_host.call_count) host_name = self.iscsi_driver._helpers.get_host_from_connector( connector, iscsi=True) host_info = self.iscsi_driver._helpers.ssh.lshost(host=host_name) self.assertEqual('site1', host_info[0]['site_name']) self.iscsi_driver.terminate_connection(volume_iSCSI, connector) self.iscsi_driver.initialize_connection(volume_iSCSI_1, connector) self.iscsi_driver.initialize_connection(volume_iSCSI, connector) host_site = {'site1': 'iqn.1993-08.org.debian:01:eac5ccc1aaa', 'site2': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self._set_flag('storwize_preferred_host_site', host_site) self.assertRaises(exception.InvalidConfigurationValue, self.iscsi_driver.initialize_connection, volume_iSCSI_2, connector) @mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_do_terminate_connection') @mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_do_initialize_connection') def test_storwize_do_terminate_iscsi_connection(self, init_conn, term_conn): # create an iSCSI volume volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.iscsi_driver.initialize_connection(volume_iSCSI, connector) self.iscsi_driver.terminate_connection(volume_iSCSI, connector) init_conn.assert_called_once_with(volume_iSCSI, connector) term_conn.assert_called_once_with(volume_iSCSI, connector) @mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_do_terminate_connection') def test_storwize_initialize_iscsi_connection_failure(self, term_conn): # create an iSCSI volume volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.iscsi_driver._state['storage_nodes'] = {} self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.initialize_connection, volume_iSCSI, connector) term_conn.assert_called_once_with(volume_iSCSI, connector) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'initialize_host_info') def test_storwize_terminate_iscsi_connection_multi_attach(self, init_host_info): # create an iSCSI volume volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} connector2 = {'host': 'STORWIZE-SVC-HOST', 'wwnns': ['30000090fa17311e', '30000090fa17311f'], 'wwpns': ['ffff000000000000', 'ffff000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1bbb'} # map and unmap the volume to two hosts normal case self.iscsi_driver.initialize_connection(volume_iSCSI, connector) init_host_info.assert_called() self.iscsi_driver.initialize_connection(volume_iSCSI, connector2) for conn in [connector, connector2]: host = self.iscsi_driver._helpers.get_host_from_connector( conn, iscsi=True) self.assertIsNotNone(host) self.iscsi_driver.terminate_connection(volume_iSCSI, connector) self.iscsi_driver.terminate_connection(volume_iSCSI, connector2) # validate that the host entries are deleted for conn in [connector, connector2]: host = self.iscsi_driver._helpers.get_host_from_connector(conn) self.assertIsNone(host) # map and unmap the volume to two hosts with the mapping removed self.iscsi_driver.initialize_connection(volume_iSCSI, connector) self.iscsi_driver.initialize_connection(volume_iSCSI, connector2) # Test multiple attachments case host_name = self.iscsi_driver._helpers.get_host_from_connector( connector2, iscsi=True) self.iscsi_driver._helpers.unmap_vol_from_host( volume_iSCSI['name'], host_name) host_name = self.iscsi_driver._helpers.get_host_from_connector( connector2, iscsi=True) self.assertIsNotNone(host_name) with mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmvdiskhostmap') as rmmap: rmmap.side_effect = Exception('boom') self.iscsi_driver.terminate_connection(volume_iSCSI, connector2) init_host_info.assert_called() host_name = self.iscsi_driver._helpers.get_host_from_connector( connector2, iscsi=True) self.assertIsNone(host_name) # Test single attachment case self.iscsi_driver._helpers.unmap_vol_from_host( volume_iSCSI['name'], host_name) with mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmvdiskhostmap') as rmmap: rmmap.side_effect = Exception('boom') self.iscsi_driver.terminate_connection(volume_iSCSI, connector) # validate that the host entries are deleted for conn in [connector, connector2]: host = self.iscsi_driver._helpers.get_host_from_connector( conn, iscsi=True) self.assertIsNone(host) def test_storwize_initialize_iscsi_connection_single_path(self): # Test the return value for _get_iscsi_properties connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} # Expected single path host-volume map return value exp_s_path = {'driver_volume_type': 'iscsi', 'data': {'target_discovered': False, 'target_iqn': 'iqn.1982-01.com.ibm:1234.sim.node1', 'target_portal': '1.234.56.78:3260', 'target_lun': 0, 'auth_method': 'CHAP', 'discovery_auth_method': 'CHAP'}} volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume_iSCSI['name'], True) # Check case where no hosts exist ret = self.iscsi_driver._helpers.get_host_from_connector( connector, iscsi=True) self.assertIsNone(ret) # Initialize connection to map volume to a host ret = self.iscsi_driver.initialize_connection( volume_iSCSI, connector) self.assertEqual(exp_s_path['driver_volume_type'], ret['driver_volume_type']) # Check the single path host-volume map return value for k, v in exp_s_path['data'].items(): self.assertEqual(v, ret['data'][k]) ret = self.iscsi_driver._helpers.get_host_from_connector( connector, iscsi=True) self.assertIsNotNone(ret) def test_storwize_initialize_iscsi_connection_multipath(self): # Test the return value for _get_iscsi_properties connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa', 'multipath': True} # Expected multipath host-volume map return value exp_m_path = {'driver_volume_type': 'iscsi', 'data': {'target_discovered': False, 'target_iqn': 'iqn.1982-01.com.ibm:1234.sim.node1', 'target_portal': '1.234.56.78:3260', 'target_lun': 0, 'target_iqns': [ 'iqn.1982-01.com.ibm:1234.sim.node1', 'iqn.1982-01.com.ibm:1234.sim.node1', 'iqn.1982-01.com.ibm:1234.sim.node2'], 'target_portals': ['1.234.56.78:3260', '1.234.56.80:3260', '1.234.56.79:3260'], 'target_luns': [0, 0, 0], 'auth_method': 'CHAP', 'discovery_auth_method': 'CHAP'}} volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] # Check case where no hosts exist ret = self.iscsi_driver._helpers.get_host_from_connector( connector, iscsi=True) self.assertIsNone(ret) # Initialize connection to map volume to a host ret = self.iscsi_driver.initialize_connection( volume_iSCSI, connector) self.assertEqual(exp_m_path['driver_volume_type'], ret['driver_volume_type']) # Check the multipath host-volume map return value # target_iqns and target_portals have no guaranteed order self.assertCountEqual(exp_m_path['data']['target_iqns'], ret['data']['target_iqns']) del exp_m_path['data']['target_iqns'] self.assertCountEqual(exp_m_path['data']['target_portals'], ret['data']['target_portals']) del exp_m_path['data']['target_portals'] for k, v in exp_m_path['data'].items(): self.assertEqual(v, ret['data'][k]) ret = self.iscsi_driver._helpers.get_host_from_connector( connector, iscsi=True) self.assertIsNotNone(ret) def test_storwize_svc_iscsi_host_maps(self): # Create two volumes to be used in mappings ctxt = context.get_admin_context() volume1 = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume1) volume2 = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume2) # Create volume types that we created types = {} for protocol in ['iSCSI']: opts = {'storage_protocol': ' ' + protocol} types[protocol] = volume_types.create(ctxt, protocol, opts) expected = {'iSCSI': {'driver_volume_type': 'iscsi', 'data': {'target_discovered': False, 'target_iqn': 'iqn.1982-01.com.ibm:1234.sim.node1', 'target_portal': '1.234.56.78:3260', 'target_lun': 0, 'auth_method': 'CHAP', 'discovery_auth_method': 'CHAP'}}} volume1['volume_type_id'] = types[protocol]['id'] volume2['volume_type_id'] = types[protocol]['id'] # Check case where no hosts exist if self.USESIM: ret = self.iscsi_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(ret) # Make sure that the volumes have been created self._assert_vol_exists(volume1['name'], True) self._assert_vol_exists(volume2['name'], True) # Initialize connection from the first volume to a host ret = self.iscsi_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected[protocol]['driver_volume_type'], ret['driver_volume_type']) for k, v in expected[protocol]['data'].items(): self.assertEqual(v, ret['data'][k]) # Initialize again, should notice it and do nothing ret = self.iscsi_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected[protocol]['driver_volume_type'], ret['driver_volume_type']) for k, v in expected[protocol]['data'].items(): self.assertEqual(v, ret['data'][k]) # Try to delete the 1st volume (should fail because it is mapped) self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.delete_volume, volume1) ret = self.iscsi_driver.terminate_connection(volume1, self._connector) if self.USESIM: ret = self.iscsi_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(ret) # Check cases with no auth set for host if self.USESIM: for auth_enabled in [True, False]: for host_exists in ['yes-auth', 'yes-noauth', 'no']: self._set_flag('storwize_svc_iscsi_chap_enabled', auth_enabled) case = 'en' + str( auth_enabled) + 'ex' + str(host_exists) conn_na = {'initiator': 'test:init:%s' % random.randint(10000, 99999), 'ip': '11.11.11.11', 'host': 'host-%s' % case} if host_exists.startswith('yes'): self.sim._add_host_to_list(conn_na) if host_exists == 'yes-auth': kwargs = {'chapsecret': 'foo', 'obj': conn_na['host']} self.sim._cmd_chhost(**kwargs) volume1['volume_type_id'] = types['iSCSI']['id'] init_ret = self.iscsi_driver.initialize_connection(volume1, conn_na) host_name = self.sim._host_in_list(conn_na['host']) chap_ret = ( self.iscsi_driver._helpers.get_chap_secret_for_host( host_name)) if auth_enabled or host_exists == 'yes-auth': self.assertIn('auth_password', init_ret['data']) self.assertIsNotNone(chap_ret) else: self.assertNotIn('auth_password', init_ret['data']) self.assertIsNone(chap_ret) self.iscsi_driver.terminate_connection(volume1, conn_na) self._set_flag('storwize_svc_iscsi_chap_enabled', True) # Test no preferred node if self.USESIM: self.sim.error_injection('lsvdisk', 'no_pref_node') self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.initialize_connection, volume1, self._connector) # Initialize connection from the second volume to the host with no # preferred node set if in simulation mode, otherwise, just # another initialize connection. if self.USESIM: self.sim.error_injection('lsvdisk', 'blank_pref_node') self.iscsi_driver.initialize_connection(volume2, self._connector) # Try to remove connection from host that doesn't exist (should fail) conn_no_exist = self._connector.copy() conn_no_exist['initiator'] = 'i_dont_exist' conn_no_exist['wwpns'] = ['0000000000000000'] self.assertRaises(exception.VolumeDriverException, self.iscsi_driver.terminate_connection, volume1, conn_no_exist) # Try to remove connection from volume that isn't mapped (should print # message but NOT fail) unmapped_vol = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(unmapped_vol) self.iscsi_driver.terminate_connection(unmapped_vol, self._connector) self.iscsi_driver.delete_volume(unmapped_vol) # Remove the mapping from the 1st volume and delete it self.iscsi_driver.terminate_connection(volume1, self._connector) self.iscsi_driver.delete_volume(volume1) self._assert_vol_exists(volume1['name'], False) # Make sure our host still exists host_name = self.iscsi_driver._helpers.get_host_from_connector( self._connector, iscsi=True) self.assertIsNotNone(host_name) # Remove the mapping from the 2nd volume. The host should # be automatically removed because there are no more mappings. self.iscsi_driver.terminate_connection(volume2, self._connector) # Check if we successfully terminate connections when the host is not # specified (see bug #1244257) fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} self.iscsi_driver.initialize_connection(volume2, self._connector) host_name = self.iscsi_driver._helpers.get_host_from_connector( self._connector, iscsi=True) self.assertIsNotNone(host_name) self.iscsi_driver.terminate_connection(volume2, fake_conn) host_name = self.iscsi_driver._helpers.get_host_from_connector( self._connector, iscsi=True) self.assertIsNone(host_name) self.iscsi_driver.delete_volume(volume2) self._assert_vol_exists(volume2['name'], False) # Delete volume types that we created for protocol in ['iSCSI']: volume_types.destroy(ctxt, types[protocol]['id']) # Check if our host still exists (it should not) if self.USESIM: ret = ( self.iscsi_driver._helpers.get_host_from_connector( self._connector, iscsi=True)) self.assertIsNone(ret) def test_storwize_svc_iscsi_multi_host_maps(self): # We can't test connecting to multiple hosts from a single host when # using real storage if not self.USESIM: return # Create a volume to be used in mappings ctxt = context.get_admin_context() volume = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume) # Create volume types for protocols types = {} for protocol in ['iSCSI']: opts = {'storage_protocol': ' ' + protocol} types[protocol] = volume_types.create(ctxt, protocol, opts) # Create a connector for the second 'host' wwpns = [str(random.randint(0, 9999999999999999)).zfill(16), str(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % str(random.randint(10000, 99999)) conn2 = {'ip': '1.234.56.79', 'host': 'storwize-svc-test2', 'wwpns': wwpns, 'initiator': initiator} # Check protocols for iSCSI volume['volume_type_id'] = types[protocol]['id'] # Make sure that the volume has been created self._assert_vol_exists(volume['name'], True) self.iscsi_driver.initialize_connection(volume, self._connector) self._set_flag('storwize_svc_multihostmap_enabled', False) self.assertRaises( exception.CinderException, self.iscsi_driver.initialize_connection, volume, conn2) self._set_flag('storwize_svc_multihostmap_enabled', True) self.iscsi_driver.initialize_connection(volume, conn2) self.iscsi_driver.terminate_connection(volume, conn2) self.iscsi_driver.terminate_connection(volume, self._connector) def test_add_vdisk_copy_iscsi(self): # Ensure only iSCSI is available self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI']) volume = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume) self.iscsi_driver.add_vdisk_copy(volume['name'], 'fake-pool', None) @ddt.ddt class StorwizeSVCFcDriverTestCase(test.TestCase): @mock.patch.object(time, 'sleep') def setUp(self, mock_sleep): super(StorwizeSVCFcDriverTestCase, self).setUp() self.USESIM = True if self.USESIM: self.fc_driver = StorwizeSVCFcFakeDriver( configuration=conf.Configuration(None)) self._def_flags = {'san_ip': 'hostname', 'san_login': 'user', 'san_password': 'pass', 'storwize_svc_volpool_name': SVC_POOLS, 'storwize_svc_flashcopy_timeout': 20, 'storwize_svc_flashcopy_rate': 49, 'storwize_svc_multipath_enabled': False, 'storwize_svc_allow_tenant_qos': True} wwpns = [ str(random.randint(0, 9999999999999999)).zfill(16), str(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % str( random.randint(10000, 99999)) self._connector = {'ip': '1.234.56.78', 'host': 'storwize-svc-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = StorwizeSVCManagementSimulator(SVC_POOLS) self.fc_driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self._reset_flags() self.ctxt = context.get_admin_context() self.db = cinder.db self.fc_driver.db = self.db self.fc_driver.do_setup(None) self.fc_driver.check_for_setup_error() self.fc_driver._helpers.check_fcmapping_interval = 0 def _set_flag(self, flag, value): group = self.fc_driver.configuration.config_group self.fc_driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.fc_driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _create_volume(self, **kwargs): pool = _get_test_pool() prop = {'host': 'openstack@svc#%s' % pool, 'size': 1, 'volume_type_id': self.vt['id']} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.fc_driver.create_volume(vol) return vol def _delete_volume(self, volume): self.fc_driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _generate_vol_info(self, vol_name, vol_id): pool = _get_test_pool() prop = {'mdisk_grp_name': pool} if vol_name: prop.update(volume_name=vol_name, volume_id=vol_id, volume_size=10) else: prop.update(size=10, volume_type_id=None, mdisk_grp_name=pool, host='openstack@svc#%s' % pool) vol = testutils.create_volume(self.ctxt, **prop) return vol def _generate_snap_info(self, vol_id, size=10): prop = {'volume_id': vol_id, 'volume_size': size} snap = testutils.create_snapshot(self.ctxt, **prop) return snap def _assert_vol_exists(self, name, exists): is_vol_defined = self.fc_driver._helpers.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def test_storwize_get_host_with_fc_connection(self): # Create a FC host del self._connector['initiator'] helper = self.fc_driver._helpers host_name = helper.create_host(self._connector) # Remove the first wwpn from connector, and then try get host wwpns = self._connector['wwpns'] wwpns.remove(wwpns[0]) host_name = helper.get_host_from_connector(self._connector) self.assertIsNotNone(host_name) def test_storwize_fc_connection_snapshot(self): # create a fc volume snapshot volume_fc = self._create_volume() snapshot = self._generate_snap_info(volume_fc.id) self.fc_driver.create_snapshot(snapshot) connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver.initialize_connection_snapshot(snapshot, connector) self.fc_driver.terminate_connection_snapshot(snapshot, connector) def test_storwize_replication_failover_fc_connection_snapshot(self): volume_fc = self._create_volume() volume_fc['replication_status'] = fields.ReplicationStatus.FAILED_OVER snapshot = self._generate_snap_info(volume_fc.id) self.fc_driver.create_snapshot(snapshot) connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} # a snapshot of a replication failover volume. attach will be failed with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_volume_replicated_type') as rep_type: rep_type.return_value = True with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_vol_sys_info') as sys_info: sys_info.return_value = {'volume_name': 'volfc', 'backend_helper': 'self._aux_backend_helpers', 'node_state': 'self._state'} self.assertRaises(exception.VolumeDriverException, self.fc_driver. initialize_connection_snapshot, snapshot, connector) def test_storwize_get_host_with_fc_connection_with_volume(self): # create a FC volume volume_fc = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume_fc) extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver.initialize_connection(volume_fc, connector) # Create a FC host helper = self.fc_driver._helpers host_name = helper.get_host_from_connector( connector, volume_fc['name']) self.assertIsNotNone(host_name) def test_storwize_get_host_from_connector_with_lshost_failure(self): self._connector.pop('initiator') helper = self.fc_driver._helpers # Create two hosts. The first is not related to the connector and # we use the simulator for that. The second is for the connector. # We will force the missing_host error for the first host, but # then tolerate and find the second host on the slow path normally. if self.USESIM: self.sim._cmd_mkhost(name='storwize-svc-test-9', hbawwpn='123456') helper.create_host(self._connector) # tell lshost to fail while calling get_host_from_connector if self.USESIM: # tell lshost to fail while called from get_host_from_connector self.sim.error_injection('lshost', 'missing_host') # tell lsfabric to skip rows so that we skip past fast path self.sim.error_injection('lsfabric', 'remove_rows') # Run test host_name = helper.get_host_from_connector(self._connector) self.assertIsNotNone(host_name) def test_storwize_get_host_from_connector_with_lshost_failure2(self): self._connector.pop('initiator') self._connector['wwpns'] = [] # Clearing will skip over fast-path helper = self.fc_driver._helpers if self.USESIM: # Add a host to the simulator. We don't need it to match the # connector since we will force a bad failure for lshost. self.sim._cmd_mkhost(name='DifferentHost', hbawwpn='123456') # tell lshost to fail badly while called from # get_host_from_connector self.sim.error_injection('lshost', 'bigger_troubles') self.assertRaises(exception.VolumeBackendAPIException, helper.get_host_from_connector, self._connector) def test_storwize_get_host_from_connector_not_found(self): self._connector.pop('initiator') helper = self.fc_driver._helpers # Create some hosts. The first is not related to the connector and # we use the simulator for that. The second is for the connector. # We will force the missing_host error for the first host, but # then tolerate and find the second host on the slow path normally. if self.USESIM: self.sim._cmd_mkhost(name='storwize-svc-test-3', hbawwpn='1234567') self.sim._cmd_mkhost(name='storwize-svc-test-2', hbawwpn='2345678') self.sim._cmd_mkhost(name='storwize-svc-test-1', hbawwpn='3456789') self.sim._cmd_mkhost(name='A-Different-host', hbawwpn='9345678') self.sim._cmd_mkhost(name='B-Different-host', hbawwpn='8345678') self.sim._cmd_mkhost(name='C-Different-host', hbawwpn='7345678') # tell lshost to fail while calling get_host_from_connector if self.USESIM: # tell lsfabric to skip rows so that we skip past fast path self.sim.error_injection('lsfabric', 'remove_rows') # Run test host_name = helper.get_host_from_connector(self._connector) self.assertIsNone(host_name) def test_storwize_get_host_from_connector_fast_path(self): self._connector.pop('initiator') helper = self.fc_driver._helpers # Create two hosts. Our lshost will return the hosts in sorted # Order. The extra host will be returned before the target # host. If we get detailed lshost info on our host without # gettting detailed info on the other host we used the fast path if self.USESIM: self.sim._cmd_mkhost(name='A-DifferentHost', hbawwpn='123456') helper.create_host(self._connector) # tell lshost to fail while calling get_host_from_connector if self.USESIM: # tell lshost to fail while called from get_host_from_connector self.sim.error_injection('lshost', 'fail_fastpath') # tell lsfabric to skip rows so that we skip past fast path self.sim.error_injection('lsfabric', 'remove_rows') # Run test host_name = helper.get_host_from_connector(self._connector) self.assertIsNotNone(host_name) # Need to assert that lshost was actually called. The way # we do that is check that the next simulator error for lshost # has not been reset. self.assertEqual(self.sim._next_cmd_error['lshost'], 'fail_fastpath', "lshost was not called in the simulator. The " "queued error still remains.") def test_storwize_initiator_multiple_wwpns_connected(self): # Generate us a test volume volume = self._create_volume() # Fibre Channel volume type extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type = volume_types.create(self.ctxt, 'FC', extra_spec) volume['volume_type_id'] = vol_type['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume['name'], True) # Set up one WWPN that won't match and one that will. self.fc_driver._state['storage_nodes']['1']['WWPN'] = [ '123456789ABCDEF0', 'AABBCCDDEEFF0010'] wwpns = ['ff00000000000000', 'ff00000000000001'] connector = {'host': 'storwize-svc-test', 'wwpns': wwpns} with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_conn_fc_wwpns') as get_mappings: mapped_wwpns = ['AABBCCDDEEFF0001', 'AABBCCDDEEFF0002', 'AABBCCDDEEFF0010', 'AABBCCDDEEFF0012'] get_mappings.return_value = mapped_wwpns # Initialize the connection init_ret = self.fc_driver.initialize_connection(volume, connector) # Make sure we return all wwpns which where mapped as part of the # connection self.assertEqual(mapped_wwpns, init_ret['data']['target_wwn']) def test_storwize_svc_fc_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'foo'} conn_fc = {'host': 'host', 'wwpns': 'bar'} conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} self.fc_driver._state['enabled_protocols'] = set(['FC']) self.fc_driver.validate_connector(conn_fc) self.fc_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.fc_driver.validate_connector, conn_iscsi) self.assertRaises(exception.InvalidConnectorException, self.fc_driver.validate_connector, conn_neither) self.fc_driver._state['enabled_protocols'] = set(['iSCSI', 'FC']) self.fc_driver.validate_connector(conn_fc) self.fc_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.fc_driver.validate_connector, conn_neither) def test_storwize_terminate_fc_connection(self): # create a FC volume volume_fc = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.terminate_connection(volume_fc, connector) with mock.patch.object( storwize_svc_common.StorwizeSSH, 'mkvdiskhostmap') as mkvdiskhostmap: ex = exception.VolumeBackendAPIException(data='CMMVC5879E') mkvdiskhostmap.side_effect = [ex, ex, mock.MagicMock()] self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.terminate_connection(volume_fc, connector) mkvdiskhostmap.side_effect = ex self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, volume_fc, connector) ex1 = exception.VolumeBackendAPIException(data='CMMVC6071E') mkvdiskhostmap.side_effect = ex1 self._set_flag('storwize_svc_multihostmap_enabled', False) self.assertRaises(exception.VolumeDriverException, self.fc_driver.initialize_connection, volume_fc, connector) ex2 = exception.VolumeBackendAPIException(data='CMMVC5707E') mkvdiskhostmap.side_effect = ex2 self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, volume_fc, connector) def test_storwize_initialize_fc_connection_with_host_site(self): connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ffff000000000000', 'ffff000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} # attach hyperswap volume without host_site volume_fc = self._create_volume() extra_spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'openstack1'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] volume_fc_2 = self._create_volume() volume_fc_2['volume_type_id'] = vol_type_fc['id'] self.assertRaises(exception.VolumeDriverException, self.fc_driver.initialize_connection, volume_fc, connector) # the wwpns of 1 host config to 2 different sites host_site = {'site1': 'ffff000000000000', 'site2': 'ffff000000000001'} self.fc_driver.configuration.set_override( 'storwize_preferred_host_site', host_site) self.assertRaises(exception.InvalidConfigurationValue, self.fc_driver.initialize_connection, volume_fc, connector) # All the wwpns of this host are not configured. host_site_2 = {'site1': 'ff00000000000000', 'site2': 'ff00000000000001'} self.fc_driver.configuration.set_override( 'storwize_preferred_host_site', host_site_2) self.assertRaises(exception.VolumeDriverException, self.fc_driver.initialize_connection, volume_fc, connector) # All the wwpns of this host are configured host_site_3 = {'site1': 'ffff000000000000&ffff000000000001'} self.fc_driver.configuration.set_override( 'storwize_preferred_host_site', host_site_3) self.fc_driver.initialize_connection(volume_fc, connector) host_name = self.fc_driver._helpers.get_host_from_connector( connector, iscsi=True) host_info = self.fc_driver._helpers.ssh.lshost(host=host_name) self.assertEqual('site1', host_info[0]['site_name']) # Partial wwpns of this host are configured host_site_4 = {'site1': 'ff00000000000000', 'site2': 'ffff000000000001'} self.fc_driver.configuration.set_override( 'storwize_preferred_host_site', host_site_4) self.assertRaises(exception.InvalidConfigurationValue, self.fc_driver.initialize_connection, volume_fc_2, connector) @mock.patch.object(storwize_svc_fc.StorwizeSVCFCDriver, '_do_terminate_connection') @mock.patch.object(storwize_svc_fc.StorwizeSVCFCDriver, '_do_initialize_connection') def test_storwize_do_terminate_fc_connection(self, init_conn, term_conn): # create a FC volume volume_fc = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.terminate_connection(volume_fc, connector) init_conn.assert_called_once_with(volume_fc, connector) term_conn.assert_called_once_with(volume_fc, connector) @mock.patch.object(storwize_svc_fc.StorwizeSVCFCDriver, '_do_terminate_connection') def test_storwize_initialize_fc_connection_failure(self, term_conn): # create a FC volume volume_fc = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver._state['storage_nodes'] = {} self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, volume_fc, connector) term_conn.assert_called_once_with(volume_fc, connector) @ddt.data(({'is_multi_attach': True}, 1), ({'is_multi_attach': True}, 2), ({'is_multi_attach': False}, 1)) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'initialize_host_info') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsvdiskhostmap') @mock.patch.object(storwize_svc_fc.StorwizeSVCFCDriver, '_do_terminate_connection') @mock.patch.object(cinder.db, 'volume_attachment_get_all_by_volume_id') @ddt.unpack def test_storwize_terminate_conn_fc_multi_attach(self, vol_spec, no_of_fake_attachments, get_db_vol_attach, do_term_conn, lsvdishosmap, init_host_info): # create a FC volume volume_fc = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] volume_fc['multiattach'] = vol_spec['is_multi_attach'] connector1 = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} connector2 = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver.initialize_connection(volume_fc, connector1) self.fc_driver.initialize_connection(volume_fc, connector2) init_host_info.assert_called() for conn in [connector1, connector2]: host = self.fc_driver._helpers.get_host_from_connector(conn) self.assertIsNotNone(host) vol_updates = {'id': volume_fc['id'], 'size': 1} volume_model = models.Volume(**vol_updates) attachment_updates = { 'volume': volume_model, 'volume_id': volume_fc['id'], 'id': '271eb937-5a5a-45bc-86a1-014afa8e4c37', 'attach_status': 'attached', 'attached_host': 'storwize-svc-host' } db_attachment1 = models.VolumeAttachment(**attachment_updates) attachment_updates2 = { 'volume': volume_model, 'volume_id': volume_fc['id'], 'id': '9a3b9fc4-2524-4367-8092-5382a43e5125', 'attach_status': 'attached', 'attached_host': 'storwize-svc-host' } db_attachment2 = models.VolumeAttachment(**attachment_updates2) if no_of_fake_attachments == 1: get_db_vol_attach.return_value = [db_attachment1] else: get_db_vol_attach.return_value = [db_attachment1, db_attachment2] attachments = objects.VolumeAttachmentList.get_all_by_volume_id( self.ctxt, volume_fc['id']) volume_fc['volume_attachment'] = attachments attachment_list = volume_fc['volume_attachment'] attachment_count = 0 if volume_fc['multiattach']: self.fc_driver.terminate_connection(volume_fc, connector1) try: for attachment in attachment_list: if (attachment.attach_status == "attached" and attachment.attached_host == "storwize-svc-host"): attachment_count += 1 except AttributeError: pass if attachment_count > 1: self.assertEqual(0, do_term_conn.call_count) def test_storwize_terminate_fc_connection_multi_attach(self): # create a FC volume volume_fc = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} connector2 = {'host': 'STORWIZE-SVC-HOST', 'wwnns': ['30000090fa17311e', '30000090fa17311f'], 'wwpns': ['ffff000000000000', 'ffff000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1bbb'} # map and unmap the volume to two hosts normal case self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.initialize_connection(volume_fc, connector2) # validate that the host entries are created for conn in [connector, connector2]: host = self.fc_driver._helpers.get_host_from_connector(conn) self.assertIsNotNone(host) self.fc_driver.terminate_connection(volume_fc, connector) self.fc_driver.terminate_connection(volume_fc, connector2) # validate that the host entries are deleted for conn in [connector, connector2]: host = self.fc_driver._helpers.get_host_from_connector(conn) self.assertIsNone(host) # map and unmap the volume to two hosts with the mapping gone self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.initialize_connection(volume_fc, connector2) # Test multiple attachments case host_name = self.fc_driver._helpers.get_host_from_connector(connector2) self.fc_driver._helpers.unmap_vol_from_host( volume_fc['name'], host_name) host_name = self.fc_driver._helpers.get_host_from_connector(connector2) self.assertIsNotNone(host_name) with mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmvdiskhostmap') as rmmap: rmmap.side_effect = Exception('boom') self.fc_driver.terminate_connection(volume_fc, connector2) host_name = self.fc_driver._helpers.get_host_from_connector(connector2) self.assertIsNone(host_name) # Test single attachment case self.fc_driver._helpers.unmap_vol_from_host( volume_fc['name'], host_name) with mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmvdiskhostmap') as rmmap: rmmap.side_effect = Exception('boom') self.fc_driver.terminate_connection(volume_fc, connector) # validate that the host entries are deleted for conn in [connector, connector2]: host = self.fc_driver._helpers.get_host_from_connector(conn) self.assertIsNone(host) def test_storwize_initiator_target_map(self): # Generate us a test volume volume = self._create_volume() # FIbre Channel volume type extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type = volume_types.create(self.ctxt, 'FC', extra_spec) volume['volume_type_id'] = vol_type['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume['name'], True) wwpns = ['ff00000000000000', 'ff00000000000001'] connector = {'host': 'storwize-svc-test', 'wwpns': wwpns} # Initialise the connection init_ret = self.fc_driver.initialize_connection(volume, connector) # Check that the initiator_target_map is as expected init_data = {'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': {'ff00000000000000': ['AABBCCDDEEFF0011'], 'ff00000000000001': ['AABBCCDDEEFF0011']}, 'target_discovered': False, 'target_lun': 0, 'target_wwn': ['AABBCCDDEEFF0011'], 'volume_id': volume['id'] } } self.assertEqual(init_data, init_ret) # Terminate connection term_ret = self.fc_driver.terminate_connection(volume, connector) # Check that the initiator_target_map is as expected term_data = {'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': {'ff00000000000000': ['5005076802432ADE', '5005076802332ADE', '5005076802532ADE', '5005076802232ADE', '5005076802132ADE', '5005086802132ADE', '5005086802332ADE', '5005086802532ADE', '5005086802232ADE', '5005086802432ADE'], 'ff00000000000001': ['5005076802432ADE', '5005076802332ADE', '5005076802532ADE', '5005076802232ADE', '5005076802132ADE', '5005086802132ADE', '5005086802332ADE', '5005086802532ADE', '5005086802232ADE', '5005086802432ADE']} } } self.assertCountEqual(term_data, term_ret) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_conn_fc_wwpns') def test_storwize_npiv_initiator_target_map(self, get_fc_wwpns): # create a FC volume get_fc_wwpns.side_effect = [[]] with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'standard', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.fc_driver.do_setup(None) volume_fc = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} conn_info = self.fc_driver.initialize_connection(volume_fc, connector) expected_target_wwn = ['5005076801A91806', '5005076801A96CFE', '5005076801996CFE', '5005076801991806'] self.assertCountEqual(expected_target_wwn, conn_info[ 'data']['target_wwn']) # Terminate connection term_ret = self.fc_driver.terminate_connection(volume_fc, connector) target_wwn1 = term_ret['data']['initiator_target_map'][ 'ff00000000000000'] target_wwn2 = term_ret['data']['initiator_target_map'][ 'ff00000000000001'] # Check that the initiator_target_map is as expected expected_term_data = ['5005076801A96CFE', '5005076801A91806', '5005076801201806', '5005076801991806', '5005076801101806', '5005076801996CFE', '5005076801206CFE', '5005076801106CFE'] self.assertCountEqual(expected_term_data, target_wwn1) self.assertCountEqual(expected_term_data, target_wwn2) def test_storwize_svc_fc_host_maps(self): # Create two volumes to be used in mappings ctxt = context.get_admin_context() volume1 = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume1) volume2 = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume2) # Create volume types that we created types = {} for protocol in ['FC']: opts = {'storage_protocol': ' ' + protocol} types[protocol] = volume_types.create(ctxt, protocol, opts) expected = {'FC': {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': 0, 'target_wwn': ['AABBCCDDEEFF0011'], 'target_discovered': False}}} volume1['volume_type_id'] = types[protocol]['id'] volume2['volume_type_id'] = types[protocol]['id'] # Check case where no hosts exist if self.USESIM: ret = self.fc_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(ret) # Make sure that the volumes have been created self._assert_vol_exists(volume1['name'], True) self._assert_vol_exists(volume2['name'], True) # Initialize connection from the first volume to a host ret = self.fc_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected[protocol]['driver_volume_type'], ret['driver_volume_type']) for k, v in expected[protocol]['data'].items(): self.assertEqual(v, ret['data'][k]) # Initialize again, should notice it and do nothing ret = self.fc_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected[protocol]['driver_volume_type'], ret['driver_volume_type']) for k, v in expected[protocol]['data'].items(): self.assertEqual(v, ret['data'][k]) # Try to delete the 1st volume (should fail because it is mapped) self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.delete_volume, volume1) # Check bad output from lsfabric for the 2nd volume if protocol == 'FC' and self.USESIM: with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_conn_fc_wwpns') as conn_fc_wwpns: conn_fc_wwpns.return_value = [] ret = self.fc_driver.initialize_connection(volume2, self._connector) ret = self.fc_driver.terminate_connection(volume1, self._connector) if protocol == 'FC' and self.USESIM: # For the first volume detach, ret['data'] should be empty # only ret['driver_volume_type'] returned self.assertEqual({}, ret['data']) self.assertEqual('fibre_channel', ret['driver_volume_type']) ret = self.fc_driver.terminate_connection(volume2, self._connector) self.assertEqual('fibre_channel', ret['driver_volume_type']) # wwpn is randomly created self.assertNotEqual({}, ret['data']) if self.USESIM: ret = self.fc_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(ret) # Test no preferred node if self.USESIM: self.sim.error_injection('lsvdisk', 'no_pref_node') self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, volume1, self._connector) # Initialize connection from the second volume to the host with no # preferred node set if in simulation mode, otherwise, just # another initialize connection. if self.USESIM: self.sim.error_injection('lsvdisk', 'blank_pref_node') self.fc_driver.initialize_connection(volume2, self._connector) # Try to remove connection from host that doesn't exist (should fail) conn_no_exist = self._connector.copy() conn_no_exist['initiator'] = 'i_dont_exist' conn_no_exist['wwpns'] = ['0000000000000000'] self.assertRaises(exception.VolumeDriverException, self.fc_driver.terminate_connection, volume1, conn_no_exist) # Try to remove connection from volume that isn't mapped (should print # message but NOT fail) unmapped_vol = self._generate_vol_info(None, None) self.fc_driver.create_volume(unmapped_vol) self.fc_driver.terminate_connection(unmapped_vol, self._connector) self.fc_driver.delete_volume(unmapped_vol) # Remove the mapping from the 1st volume and delete it self.fc_driver.terminate_connection(volume1, self._connector) self.fc_driver.delete_volume(volume1) self._assert_vol_exists(volume1['name'], False) # Make sure our host still exists host_name = self.fc_driver._helpers.get_host_from_connector( self._connector) self.assertIsNotNone(host_name) # Remove the mapping from the 2nd volume. The host should # be automatically removed because there are no more mappings. self.fc_driver.terminate_connection(volume2, self._connector) # Check if we successfully terminate connections when the host is not # specified (see bug #1244257) fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} self.fc_driver.initialize_connection(volume2, self._connector) host_name = self.fc_driver._helpers.get_host_from_connector( self._connector) self.assertIsNotNone(host_name) self.fc_driver.terminate_connection(volume2, fake_conn) host_name = self.fc_driver._helpers.get_host_from_connector( self._connector) self.assertIsNone(host_name) self.fc_driver.delete_volume(volume2) self._assert_vol_exists(volume2['name'], False) # Delete volume types that we created for protocol in ['FC']: volume_types.destroy(ctxt, types[protocol]['id']) # Check if our host still exists (it should not) if self.USESIM: ret = (self.fc_driver._helpers.get_host_from_connector( self._connector)) self.assertIsNone(ret) def test_storwize_svc_fc_multi_host_maps(self): # We can't test connecting to multiple hosts from a single host when # using real storage if not self.USESIM: return # Create a volume to be used in mappings ctxt = context.get_admin_context() volume = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume) # Create volume types for protocols types = {} for protocol in ['FC']: opts = {'storage_protocol': ' ' + protocol} types[protocol] = volume_types.create(ctxt, protocol, opts) # Create a connector for the second 'host' wwpns = [str(random.randint(0, 9999999999999999)).zfill(16), str(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % str(random.randint(10000, 99999)) conn2 = {'ip': '1.234.56.79', 'host': 'storwize-svc-test2', 'wwpns': wwpns, 'initiator': initiator} # Check protocols for FC volume['volume_type_id'] = types[protocol]['id'] # Make sure that the volume has been created self._assert_vol_exists(volume['name'], True) self.fc_driver.initialize_connection(volume, self._connector) self._set_flag('storwize_svc_multihostmap_enabled', False) self.assertRaises( exception.CinderException, self.fc_driver.initialize_connection, volume, conn2) self._set_flag('storwize_svc_multihostmap_enabled', True) self.fc_driver.initialize_connection(volume, conn2) self.fc_driver.terminate_connection(volume, conn2) self.fc_driver.terminate_connection(volume, self._connector) def test_add_vdisk_copy_fc(self): # Ensure only FC is available self.fc_driver._state['enabled_protocols'] = set(['FC']) volume = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume) self.fc_driver.add_vdisk_copy(volume['name'], 'fake-pool', None) @ddt.ddt class StorwizeSVCCommonDriverTestCase(test.TestCase): # Volumegroup and temporary volumegroup functionality # minimum code level constants VOLUMEGROUP_CODE_LEVEL = (8, 5, 1, 0) TEMP_VOLUMEGROUP_CODE_LEVEL = (8, 6, 2, 0) @mock.patch.object(time, 'sleep') def setUp(self, mock_sleep): super(StorwizeSVCCommonDriverTestCase, self).setUp() self.USESIM = True if self.USESIM: self._def_flags = {'san_ip': 'hostname', 'storwize_san_secondary_ip': 'secondaryname', 'san_login': 'user', 'san_password': 'pass', 'storwize_svc_volpool_name': SVC_POOLS, 'storwize_svc_flashcopy_timeout': 20, 'storwize_svc_flashcopy_rate': 49, 'storwize_svc_clean_rate': 50, 'storwize_svc_allow_tenant_qos': True} config = conf.Configuration(storwize_svc_common.storwize_svc_opts, conf.SHARED_CONF_GROUP) # Override any configs that may get set in __init__ self._reset_flags(config) self.driver = StorwizeSVCISCSIFakeDriver( configuration=config) self._driver = storwize_svc_iscsi.StorwizeSVCISCSIDriver( configuration=config) self.fcdriver = StorwizeSVCFcFakeDriver( configuration=config) wwpns = [ str(random.randint(0, 9999999999999999)).zfill(16), str(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % str( random.randint(10000, 99999)) self._connector = {'ip': '1.234.56.78', 'host': 'storwize-svc-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = StorwizeSVCManagementSimulator(SVC_POOLS) self.driver.set_fake_storage(self.sim) self.fcdriver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() else: self._reset_flags() self.ctxt = context.get_admin_context() self.db = cinder.db self.driver.db = self.db self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver._helpers.check_fcmapping_interval = 0 self.mock_object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, 'DEFAULT_GR_SLEEP', 0) self._create_test_volume_types() def _set_flag(self, flag, value, configuration=None): if not configuration: configuration = self.driver.configuration group = configuration.config_group self.override_config(flag, value, group) def _reset_flags(self, configuration=None): if not configuration: configuration = self.driver.configuration CONF.reset() for k, v in self._def_flags.items(): self._set_flag(k, v, configuration) def _assert_vol_exists(self, name, exists): is_vol_defined = self.driver._helpers.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def _create_test_volume_types(self): spec = {'mirror_pool': 'openstack1'} self.mirror_vol_type = self._create_volume_type(spec, 'mirror_type') self.default_vol_type = self._create_volume_type(None, 'default_type') def test_storwize_svc_connectivity(self): # Make sure we detect if the pool doesn't exist no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999) self._set_flag('storwize_svc_volpool_name', no_exist_pool) self.assertRaises(exception.InvalidInput, self.driver.do_setup, None) self._reset_flags() # Check the case where the user didn't configure IP addresses # as well as receiving unexpected results from the storage if self.USESIM: self.sim.error_injection('lsnodecanister', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsnodecanister', 'remove_field') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsportip', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsportip', 'remove_field') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsfcportsetmember', 'invalid_input') self.driver.do_setup(None) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (8, 5, 0, 0), 'topology': 'standard', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info if self.USESIM: self.sim.error_injection('lsip', 'invalid_portset') self.driver.do_setup(None) self.sim.error_injection('lsip', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsip', 'remove_field') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # Check with bad parameters self._set_flag('san_ip', '') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('san_password', None) self._set_flag('san_private_key', None) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('storwize_svc_vol_grainsize', 42) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('storwize_svc_vol_compression', True) self._set_flag('storwize_svc_vol_rsize', -1) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('storwize_svc_vol_rsize', 2) self._set_flag('storwize_svc_vol_nofmtdisk', True) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('storwize_svc_vol_iogrp', 5) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() if self.USESIM: self.sim.error_injection('lslicense', 'no_compression') self.sim.error_injection('lsguicapabilities', 'no_compression') self._set_flag('storwize_svc_vol_compression', True) self.driver.do_setup(None) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() # Finally, check with good parameters self.driver.do_setup(None) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsip') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_node_info') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') def test_storwize_add_iscsi_ip_address(self, get_system_info, get_node_info, lsip): helper = self.driver._master_backend_helpers helper.state = {'storage_nodes': {}, 'enabled_protocols': set(), 'compression_enabled': False, 'available_iogrps': [], 'system_name': None, 'system_id': None, 'code_level': None} get_system_info.return_value = {'code_level': (8, 5, 0, 0), 'topology': 'standard', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_node_info.return_value = {'1': {'id': '1', 'name': 'node1', 'IO_group': 0, 'iscsi_name': 'test_iscsi1', 'site_id': '1', 'site_name': 'site1', 'ipv4': [], 'ipv6': [], 'IP_address': [], 'WWPN': [], 'enabled_protocols': [], 'status': 'online'}, '2': {'id': '2', 'name': 'node2', 'IO_group': 1, 'iscsi_name': 'test_iscsi2', 'site_id': '1', 'site_name': 'site1', 'ipv4': [], 'ipv6': [], 'IP_address': [], 'WWPN': [], 'enabled_protocols': [], 'status': 'online'}} lsip.return_value = [{'node_id': '1', 'IP_address': '1.1.1.1'}, {'node_id': '2', 'IP_address': '2.2.2.2'}] # Initially the storage_nodes will be empty self.assertEqual(helper.state["storage_nodes"], {}) # _update_storwize_state will update the code_level # and node info. After that it will call add_iscsi_ip_addrs to # update the IP_address for the corresponding node_id in storage_nodes self.driver._update_storwize_state(helper.state, helper) # Now, IPs of both the node_id in storage_nodes is updated correctly # Which means add_iscsi_ip_addrs was successful. self.assertNotEqual(helper.state["storage_nodes"], {}) self.assertEqual(helper.state["storage_nodes"]['1']['IP_address'], ['1.1.1.1']) self.assertEqual(helper.state["storage_nodes"]['2']['IP_address'], ['2.2.2.2']) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'mkhost') def test_storwize_create_host_with_portset(self, mkhost): self.driver.do_setup(self.ctxt) connector = {'host': 'storwize-svc-host', 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa', 'ip': '127.0.0.1'} # Using portset other than default portset0 portset = "portset1" self.driver._helpers.create_host(connector, iscsi=True, portset=portset) host_name = self.driver._helpers.get_host_from_connector( connector, iscsi=True) self.assertIsNotNone(host_name) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'mkhost') def test_storwize_create_host_with_portset_from_config(self, mkhost): self.driver.do_setup(self.ctxt) connector = {'host': 'storwize-svc-host', 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa', 'ip': '127.0.0.1'} # Using portset other than default portset0 self._set_flag('storwize_portset', "portset1") self.driver._helpers.create_host( connector, iscsi=True, portset=self.driver.configuration.storwize_portset) host_name = self.driver._helpers.get_host_from_connector( connector, iscsi=True) self.assertIsNotNone(host_name) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_set_up_with_san_ip(self, mock_ssh_execute, mock_ssh_pool): ssh_cmd = ['svcinfo'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_once_with( self._driver.configuration.san_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_set_up_with_secondary_ip(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_pool.side_effect = [paramiko.SSHException, mock.MagicMock()] ssh_cmd = ['svcinfo'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_with( self._driver.configuration.storwize_san_secondary_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(random, 'randint', mock.Mock(return_value=0)) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_fail_to_secondary_ip(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, mock.MagicMock()] ssh_cmd = ['svcinfo'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_with( self._driver.configuration.storwize_san_secondary_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_secondary_ip_ssh_fail_to_san_ip(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_pool.side_effect = [ paramiko.SSHException, mock.MagicMock( ip=self._driver.configuration.storwize_san_secondary_ip), mock.MagicMock()] mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, mock.MagicMock()] ssh_cmd = ['svcinfo'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_with( self._driver.configuration.san_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_both_ip_set_failure(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_pool.side_effect = [ paramiko.SSHException, mock.MagicMock(), mock.MagicMock()] mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, processutils.ProcessExecutionError] ssh_cmd = ['svcinfo'] self.assertRaises(processutils.ProcessExecutionError, self._driver._run_ssh, ssh_cmd) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_second_ip_not_set_failure(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, mock.MagicMock()] self._set_flag('storwize_san_secondary_ip', None) ssh_cmd = ['svcinfo'] self.assertRaises(processutils.ProcessExecutionError, self._driver._run_ssh, ssh_cmd) @mock.patch.object(random, 'randint', mock.Mock(return_value=0)) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_consistent_active_ip(self, mock_ssh_execute, mock_ssh_pool): ssh_cmd = ['svcinfo'] self._driver._run_ssh(ssh_cmd) self._driver._run_ssh(ssh_cmd) self._driver._run_ssh(ssh_cmd) self.assertEqual(self._driver.configuration.san_ip, self._driver.active_ip) mock_ssh_execute.side_effect = [paramiko.SSHException, mock.MagicMock(), mock.MagicMock()] self._driver._run_ssh(ssh_cmd) self._driver._run_ssh(ssh_cmd) self.assertEqual(self._driver.configuration.storwize_san_secondary_ip, self._driver.active_ip) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_response_no_ascii(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_execute.side_effect = processutils.ProcessExecutionError( u'', 'CMMVC6035E \xe6\x93\x8d\xe4\xbd\x9c\xe5\xa4\xb1\xe8\xb4\xa5\n', 1, u'svctask lsmdiskgrp "openstack"', None) self.assertRaises(exception.InvalidInput, self._driver._validate_pools_exist) @ddt.data({'node1': 'online', 'node2': 'online', 'node3': 'online', 'node4': 'online', 'state': 'enabled', 'reason': None}, {'node1': 'online', 'node2': 'online', 'node3': 'offline', 'node4': 'offline', 'state': 'disabled', 'reason': 'site2 is down'}, {'node1': 'offline', 'node2': 'offline', 'node3': 'online', 'node4': 'online', 'state': 'disabled', 'reason': 'site1 is down'}, {'node1': 'offline', 'node2': 'offline', 'node3': 'offline', 'node4': 'offline', 'state': 'disabled', 'reason': 'site1 is down'}) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_node_info') def test_get_hyperswap_storage_state(self, node_data, get_node_info): get_node_info.return_value = {'7': {'id': '7', 'name': 'node1', 'status': node_data['node1'], 'site_id': '1', 'site_name': 'site1'}, '8': {'id': '8', 'name': 'node2', 'status': node_data['node2'], 'site_id': '1', 'site_name': 'site1'}, '9': {'id': '9', 'name': 'node3', 'status': node_data['node3'], 'site_id': '2', 'site_name': 'site2'}, '10': {'id': '10', 'name': 'node4', 'status': node_data['node4'], 'site_id': '2', 'site_name': 'site2'}} state, reason = self.driver.get_hyperswap_storage_state() self.assertEqual(state, node_data['state']) self.assertEqual(reason, node_data['reason']) @ddt.data((True, 'online'), (True, 'offline'), (False, 'online'), (False, 'offline')) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsnode') @ddt.unpack def test_get_node_info(self, online_node, node_status, lsnode): empty_nodes_info = {} fake_lsnode_info = [{ 'id': 1, 'name': 'test', 'IO_group_id': 'test_io_group', 'iscsi_name': 'test_iscsi', 'WWNN': '123456', 'status': node_status, 'WWPN': '8999', 'ipv4': '192.9.123.1', 'ipv6': '1.2.3.4', 'enabled_protocols': 'ipv6', 'site_id': '1783', 'site_name': 'test-sitename' }] lsnode.return_value = fake_lsnode_info nodes = self.driver._helpers.get_node_info(online_node) if not online_node or online_node and node_status == 'online': self.assertIsNotNone(nodes) elif online_node and node_status == 'offline': self.assertEqual(nodes, empty_nodes_info) @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_build_pool_stats') def test_update_volume_stats_non_replication(self, _build_pool_stats): self.driver._update_volume_stats() self.assertFalse(self.driver._replica_enabled) self.assertEqual(SVC_POOLS, self.driver._get_backend_pools()) self.assertEqual(len(SVC_POOLS), _build_pool_stats.call_count) self.assertIsNotNone(self.driver._master_backend_helpers.stats) self.assertIsNone(self.driver._aux_backend_helpers) @ddt.data((False, 'enabled', ''), (False, 'disabled', 'site 2 down'), (True, '', '')) @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, 'get_hyperswap_storage_state') @ddt.unpack def test_update_volume_stats(self, is_replica_enabled, replication_status, reason, get_hs_storage_state): self._replica_enabled = is_replica_enabled self.driver._update_volume_stats() if not self._replica_enabled: with mock.patch.object( storwize_svc_common.StorwizeHelpers, 'is_system_topology_hyperswap') as is_hyperswap: with mock.patch.object( storwize_svc_common.StorwizeHelpers, 'get_node_info') as get_node_info: is_hyperswap.return_value = is_hyperswap if is_hyperswap: get_node_info.return_value = None get_hs_storage_state.side_effect =\ exception.VolumeBackendAPIException(data='') self.assertRaises(exception.VolumeBackendAPIException, get_hs_storage_state) get_hs_storage_state.return_value = ( replication_status, reason) if replication_status != 'enabled': self.assertNotEqual( fields.ReplicationStatus.ENABLED, replication_status) self.assertIsNotNone(reason) else: self.assertEqual(fields.ReplicationStatus.ENABLED, replication_status) self.assertEqual(reason, '') else: self.assertFalse(get_hs_storage_state.called) def _get_pool_volumes(self, pool): vdisks = self.sim._cmd_lsvdisks_from_filter('mdisk_grp_name', pool) return vdisks def test_get_all_volumes(self): _volumes_list = [] pools = _get_test_pool(get_all=True) for pool in pools: host = 'openstack@svc#%s' % pool vol1 = testutils.create_volume( self.ctxt, host=host, volume_type_id=self.vt['id']) self.driver.create_volume(vol1) vol2 = testutils.create_volume( self.ctxt, host=host, volume_type_id=self.vt['id']) self.driver.create_volume(vol2) for pool in pools: pool_vols = self._get_pool_volumes(pool) for pool_vol in pool_vols: _volumes_list.append(pool_vol) for vol in _volumes_list: self.assertIn(vol, self.sim._volumes_list) def _create_volume_type(self, opts, type_name): type_ref = volume_types.create(self.ctxt, type_name, opts) vol_type = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) return vol_type def _create_hyperswap_type(self, type_name): spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'hyperswap2'} hyper_type = self._create_volume_type(spec, type_name) return hyper_type def _create_hyperswap_volume(self, hyper_type, **kwargs): pool = 'hyperswap1' prop = {'host': 'openstack@svc#%s' % pool, 'size': 1} prop['volume_type_id'] = hyper_type.id for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.driver.create_volume(vol) return vol def _generate_hyperswap_vol_info(self, hyper_type, size=10): pool = 'hyperswap1' prop = {'host': 'openstack@svc#%s' % pool, 'size': size, 'volume_type_id': hyper_type.id} vol = testutils.create_volume(self.ctxt, **prop) return vol def _generate_vol_info(self, vol_type=None, size=10): pool = _get_test_pool() prop = {'size': size, 'host': 'openstack@svc#%s' % pool, 'volume_type_id': self.vt['id']} if vol_type: prop['volume_type_id'] = vol_type.id vol = testutils.create_volume(self.ctxt, **prop) return vol def _generate_vol_info_on_dr_pool(self, vol_type=None, size=10): pool = 'dr_pool1' prop = {'size': size, 'host': 'openstack@svc#%s' % pool} if vol_type: prop['volume_type_id'] = vol_type.id vol = testutils.create_volume(self.ctxt, **prop) return vol def _generate_snap_info(self, vol_id, size=10): prop = {'volume_id': vol_id, 'volume_size': size} snap = testutils.create_snapshot(self.ctxt, **prop) return snap def _create_volume(self, **kwargs): pool = _get_test_pool() prop = {'host': 'openstack@svc#%s' % pool, 'size': 1, 'volume_type_id': self.vt['id']} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.driver.create_volume(vol) return vol def _delete_volume(self, volume): self.driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _create_group_in_db(self, **kwargs): cg = testutils.create_group(self.ctxt, **kwargs) return cg def _create_group(self, **kwargs): grp = self._create_group_in_db(**kwargs) model_update = self.driver.create_group(self.ctxt, grp) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "CG created failed") return grp def _create_volumegroup_type_and_volumegroup(self, vol_type_ref, is_pool=None, is_io_grp=None): # Create volumegroup type volumegroup_spec = {'volume_group_enabled': ' True'} if is_pool: volumegroup_spec.update({'volume_group_pool': is_pool}) if is_io_grp: volumegroup_spec.update({'volume_group_iogrp': is_io_grp}) volumegroup_type_ref = group_types.create(self.ctxt, 'volumegroup_type', volumegroup_spec) volumegroup_type = objects.GroupType.get_by_id( self.ctxt, volumegroup_type_ref['id']) # Create volumegroup volumegroup = testutils.create_group( self.ctxt, group_type_id=volumegroup_type.id, volume_type_ids=[vol_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, volumegroup) return (volumegroup_type_ref, volumegroup_type, volumegroup, model_update) def _create_group_snapshot_in_db(self, group_id, **kwargs): group_snapshot = testutils.create_group_snapshot(self.ctxt, group_id=group_id, **kwargs) snapshots = [] volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), group_id) if not volumes: msg = _("Group is empty. No cgsnapshot will be created.") raise exception.InvalidGroup(reason=msg) for volume in volumes: snapshots.append(testutils.create_snapshot( self.ctxt, volume['id'], group_snapshot.id, group_snapshot.name, group_snapshot.id, fields.SnapshotStatus.CREATING)) return group_snapshot, snapshots def _create_group_snapshot(self, cg_id, **kwargs): group_snapshot, snapshots = self._create_group_snapshot_in_db( cg_id, **kwargs) model_update, snapshots_model = ( self.driver.create_group_snapshot(self.ctxt, group_snapshot, snapshots)) self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, model_update['status'], "CGSnapshot created failed") for snapshot in snapshots_model: self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot['status']) return group_snapshot, snapshots def _create_test_vol(self, opts): ctxt = testutils.get_test_admin_context() type_ref = volume_types.create(ctxt, 'testtype', opts) volume = self._generate_vol_info() volume.volume_type_id = type_ref['id'] volume.volume_typ = objects.VolumeType.get_by_id(ctxt, type_ref['id']) self.driver.create_volume(volume) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.driver.delete_volume(volume) volume_types.destroy(ctxt, type_ref['id']) return attrs def _get_default_opts(self): opt = {'rsize': 2, 'warning': 0, 'autoexpand': True, 'grainsize': 256, 'compression': False, 'easytier': True, 'iogrp': '0', 'qos': None, 'replication': False, 'stretched_cluster': None, 'nofmtdisk': False, 'flashcopy_rate': 49, 'clean_rate': 50, 'mirror_pool': None, 'aux_mirror_pool': None, 'volume_topology': None, 'peer_pool': None, 'storwize_portset': None, 'storwize_svc_src_child_pool': None, 'storwize_svc_target_child_pool': None, 'cycle_period_seconds': 300 } return opt @ddt.data(('5000', 'iops', True), ('500', 'iops_per_gb', False), ('3000', 'mbps', False)) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_qos') @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_vdisk_params') @ddt.unpack def test_storwize_svc_create_volume_with_qos(self, fake_iothrottling_value, fake_iothrottling_unit, empty_qos, get_vdisk_params, add_vdisk_qos): fake_opts = self._get_default_opts() # If the qos is empty, chvdisk should not be called # for create_volume. get_vdisk_params.return_value = fake_opts vol = self._create_volume() if empty_qos: self._assert_vol_exists(vol['name'], True) self.assertFalse(add_vdisk_qos.called) self.driver.delete_volume(vol) # If the qos is not empty, chvdisk should be called # for create_volume. fake_opts['qos'] = {'IOThrottling': fake_iothrottling_value, 'IOThrottling_unit': fake_iothrottling_unit} get_vdisk_params.return_value = fake_opts self.driver.create_volume(vol) self._assert_vol_exists(vol['name'], True) add_vdisk_qos.assert_called_once_with(vol['name'], fake_opts['qos'], vol['size']) self.driver.delete_volume(vol) self._assert_vol_exists(vol['name'], False) def test_storwize_svc_snapshots(self): vol1 = self._create_volume() snap1 = self._generate_snap_info(vol1.id) # Test timeout and volume cleanup self._set_flag('storwize_svc_flashcopy_timeout', 1) self.assertRaises(exception.VolumeDriverException, self.driver.create_snapshot, snap1) self._assert_vol_exists(snap1['name'], False) self._reset_flags() # Test flashcopy_rate > 100 on 7.2.0.0 self._set_flag('storwize_svc_flashcopy_rate', 149) self.assertRaises(exception.VolumeDriverException, self.driver.create_snapshot, snap1) self._assert_vol_exists(snap1['name'], False) self._reset_flags() # Test clean_rate < 150 on 7.2.0.0 self._set_flag('storwize_svc_clean_rate', 100) vol2 = self._create_volume() snap2 = self._generate_snap_info(vol2.id) self.driver.create_snapshot(snap2) self._assert_vol_exists(snap2['name'], True) self._reset_flags() # Test flashcopy_rate out of range spec = {'flashcopy_rate': 151} type_ref = volume_types.create(self.ctxt, "fccopy_rate", spec) vol3 = self._generate_vol_info(type_ref) self.driver.create_volume(vol3) snap3 = self._generate_snap_info(vol3.id) self.assertRaises(exception.InvalidInput, self.driver.create_snapshot, snap3) self._assert_vol_exists(snap3['name'], False) # Test prestartfcmap failing with mock.patch.object( storwize_svc_common.StorwizeSSH, 'prestartfcmap') as prestart: prestart.side_effect = exception.VolumeBackendAPIException(data='') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap1) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.sim.error_injection('startfcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap1) self._assert_vol_exists(snap1['name'], False) self.sim.error_injection('prestartfcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap1) self._assert_vol_exists(snap1['name'], False) # Test successful snapshot self.driver.create_snapshot(snap1) self._assert_vol_exists(snap1['name'], True) # Try to create a snapshot from an non-existing volume - should fail vol2 = self._generate_vol_info() snap_novol = self._generate_snap_info(vol2.id) self.assertRaises(exception.VolumeDriverException, self.driver.create_snapshot, snap_novol) # We support deleting a volume that has snapshots, so delete the volume # first self.driver.delete_volume(vol1) self.driver.delete_snapshot(snap1) def test_storwize_svc_create_cloned_volume(self): vol1 = self._create_volume() vol2 = testutils.create_volume( self.ctxt, volume_type_id=self.vt['id']) vol3 = testutils.create_volume( self.ctxt, volume_type_id=self.vt['id']) vol4 = testutils.create_volume( self.ctxt, volume_type_id=self.vt['id']) # Try to clone where source size = target size vol1['size'] = vol2['size'] if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_cloned_volume(vol2, vol1) if self.USESIM: # validate copyrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['target'] == vol2['name']: self.assertEqual('49', fcmap['copyrate']) self._assert_vol_exists(vol2['name'], True) # Try to clone where source size < target size vol3['size'] = vol1['size'] + 1 if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_cloned_volume(vol3, vol1) if self.USESIM: # Validate copyrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['target'] == vol3['name']: self.assertEqual('49', fcmap['copyrate']) self._assert_vol_exists(vol3['name'], True) # Try to clone and check if clean_rate is set to default if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_cloned_volume(vol4, vol1) if self.USESIM: # Validate copyrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['target'] == vol4['name']: self.assertEqual('50', fcmap['cleanrate']) self._assert_vol_exists(vol4['name'], True) # Delete in the 'opposite' order to make sure it works self.driver.delete_volume(vol4) self._assert_vol_exists(vol4['name'], False) self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) # retype the flashcopy_rate ctxt = context.get_admin_context() key_specs_old = {'flashcopy_rate': 49} key_specs_new = {'flashcopy_rate': 149} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) host = {'host': 'openstack@svc#openstack'} diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume = self._generate_vol_info(old_type) volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) volume2 = testutils.create_volume( self.ctxt, volume_type_id=self.vt['id']) self.driver.create_cloned_volume(volume2, volume) if self.USESIM: # Validate copyrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['target'] == volume2['name']: self.assertEqual('49', fcmap['copyrate']) self.driver.retype(ctxt, volume, new_type, diff, host) if self.USESIM: # Validate copyrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['source'] == volume['name']: self.assertEqual('149', fcmap['copyrate']) # create cloned volume with new type diffrent iogrp key_specs_old = {'iogrp': '0'} key_specs_new = {'iogrp': '1'} old_type_ref = volume_types.create(ctxt, 'oldio', key_specs_old) new_type_ref = volume_types.create(ctxt, 'newio', key_specs_new) old_io_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) new_io_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) volume3 = self._generate_vol_info(old_io_type) self.driver.create_volume(volume3) volume4 = self._generate_vol_info(new_io_type) self.driver.create_cloned_volume(volume4, volume) attributes = self.driver._helpers.get_vdisk_attributes(volume4['name']) self.assertEqual('1', attributes['IO_group_id']) def test_storwize_svc_retype_only_change_clean_rate(self): self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() key_specs_old = {'clean_rate': 50} key_specs_new = {'clean_rate': 100} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) host = {'host': 'openstack@svc#openstack'} diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume = self._generate_vol_info(old_type) volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) volume2 = testutils.create_volume( self.ctxt, volume_type_id=self.vt['id']) # Create the snapshot of the source volume snap = self._generate_snap_info(volume.id) self.driver.create_snapshot(snap) if self.USESIM: # Validate cleanrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['source'] == volume['name']: self.assertEqual('50', fcmap['cleanrate']) # Try to retype the source volume self.driver.retype(ctxt, volume, new_type, diff, host) if self.USESIM: # Validate cleanrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['source'] == volume['name']: self.assertEqual('100', fcmap['cleanrate']) # Delete the volumes self.driver.delete_volume(volume2) self._assert_vol_exists(volume2['name'], False) self.driver.delete_volume(volume) self._assert_vol_exists(volume['name'], False) def test_storwize_svc_create_volume_from_snapshot(self): vol1 = self._create_volume() snap1 = self._generate_snap_info(vol1.id) self.driver.create_snapshot(snap1) vol2 = self._generate_vol_info() vol3 = self._generate_vol_info() # Try to create a volume from a non-existing snapshot vol_novol = self._generate_vol_info() snap_novol = self._generate_snap_info(vol_novol.id) self.assertRaises(exception.VolumeDriverException, self.driver.create_volume_from_snapshot, vol_novol, snap_novol) # Fail the snapshot with mock.patch.object( storwize_svc_common.StorwizeSSH, 'prestartfcmap') as prestart: prestart.side_effect = exception.VolumeBackendAPIException( data='') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, vol2, snap1) self._assert_vol_exists(vol2['name'], False) # Try to create where volume size > snapshot size vol2['size'] += 1 if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2['name'], True) vol2['size'] -= 1 # Try to create where volume size = snapshot size if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_volume_from_snapshot(vol3, snap1) self._assert_vol_exists(vol3['name'], True) # Delete in the 'opposite' order to make sure it works self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_snapshot(snap1) self._assert_vol_exists(snap1['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) @ddt.data(('5000', 'iops', True), ('500', 'iops_per_gb', False), ('3000', 'mbps', False)) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_qos') @ddt.unpack def test_storwize_svc_create_volfromsnap_clone_with_qos( self, fake_iothrottling_value, fake_iothrottling_unit, empty_qos, add_vdisk_qos): vol1 = self._create_volume() snap1 = self._generate_snap_info(vol1.id) self.driver.create_snapshot(snap1) vol2 = self._generate_vol_info() vol3 = self._generate_vol_info() fake_opts = self._get_default_opts() # Succeed if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') # If the qos is empty, chvdisk should not be called # for create_volume_from_snapshot. with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: if empty_qos: get_vdisk_params.return_value = fake_opts self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2['name'], True) self.assertFalse(add_vdisk_qos.called) self.driver.delete_volume(vol2) # If the qos is not empty, chvdisk should be called # for create_volume_from_snapshot. fake_opts['qos'] = {'IOThrottling': fake_iothrottling_value, 'IOThrottling_unit': fake_iothrottling_unit} get_vdisk_params.return_value = fake_opts self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2['name'], True) add_vdisk_qos.assert_called_once_with(vol2['name'], fake_opts['qos'], vol2['size']) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') # If the qos is empty, chvdisk should not be called # for create_cloned_volume. add_vdisk_qos.reset_mock() if empty_qos: fake_opts['qos'] = None get_vdisk_params.return_value = fake_opts self.driver.create_cloned_volume(vol3, vol2) self._assert_vol_exists(vol3['name'], True) self.assertFalse(add_vdisk_qos.called) self.driver.delete_volume(vol3) # If the qos is not empty, chvdisk should be called # for create_cloned_volume. fake_opts['qos'] = {'IOThrottling': fake_iothrottling_value, 'IOThrottling_unit': fake_iothrottling_unit} get_vdisk_params.return_value = fake_opts self.driver.create_cloned_volume(vol3, vol2) self._assert_vol_exists(vol3['name'], True) add_vdisk_qos.assert_called_once_with(vol3['name'], fake_opts['qos'], vol3['size']) # Delete in the 'opposite' order to make sure it works self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_snapshot(snap1) self._assert_vol_exists(snap1['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) def test_storwize_svc_delete_vol_with_fcmap(self): vol1 = self._create_volume() # create two snapshots snap1 = self._generate_snap_info(vol1.id) snap2 = self._generate_snap_info(vol1.id) self.driver.create_snapshot(snap1) self.driver.create_snapshot(snap2) vol2 = self._generate_vol_info() vol3 = self._generate_vol_info() # Create vol from the second snapshot if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_volume_from_snapshot(vol2, snap2) if self.USESIM: # validate copyrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['target'] == vol2['name']: self.assertEqual('copying', fcmap['status']) self._assert_vol_exists(vol2['name'], True) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_cloned_volume(vol3, vol2) if self.USESIM: # validate copyrate was set on the flash copy for i, fcmap in self.sim._fcmappings_list.items(): if fcmap['target'] == vol3['name']: self.assertEqual('copying', fcmap['status']) self._assert_vol_exists(vol3['name'], True) # Delete in the 'opposite' order to make sure it works self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_snapshot(snap2) self._assert_vol_exists(snap2['name'], False) self.driver.delete_snapshot(snap1) self._assert_vol_exists(snap1['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) def test_storwize_svc_volumes(self): # Create a first volume volume = self._generate_vol_info() self.driver.create_volume(volume) self.driver.ensure_export(None, volume) # Do nothing self.driver.create_export(None, volume, {}) self.driver.remove_export(None, volume) # Make sure volume attributes are as they should be attributes = self.driver._helpers.get_vdisk_attributes(volume['name']) attr_size = float(attributes['capacity']) / units.Gi # bytes to GB self.assertEqual(attr_size, float(volume['size'])) pool = _get_test_pool() self.assertEqual(attributes['mdisk_grp_name'], pool) # Try to create the volume again (should fail) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) # Try to delete a volume that doesn't exist (should not fail) vol_no_exist = self._generate_vol_info() self.driver.delete_volume(vol_no_exist) # Ensure export for volume that doesn't exist (should not fail) self.driver.ensure_export(None, vol_no_exist) # Delete the volume self.driver.delete_volume(volume) def test_storwize_svc_volume_name(self): volume = self._generate_vol_info() self.driver.create_volume(volume) self.driver.ensure_export(None, volume) # Ensure lsvdisk can find the volume by name attributes = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertIn('name', attributes) self.assertEqual(volume['name'], attributes['name']) self.driver.delete_volume(volume) def test_storwize_svc_volume_params(self): # Option test matrix # Option Value Covered by test # # rsize -1 1 # rsize 2 2,3 # warning 0 2 # warning 80 3 # autoexpand True 2 # autoexpand False 3 # grainsize 32 2 # grainsize 256 3 # compression True 4 # compression False 2,3 # easytier True 1,3 # easytier False 2 # iogrp 0 1 # iogrp 1 2 # nofmtdisk False 1 # nofmtdisk True 1 opts_list = [] chck_list = [] opts_list.append({'rsize': -1, 'easytier': True, 'iogrp': '0'}) chck_list.append({'free_capacity': '0', 'easy_tier': 'on', 'IO_group_id': '0'}) opts_list.append({'rsize': -1, 'nofmtdisk': False}) chck_list.append({'formatted': 'yes'}) opts_list.append({'rsize': -1, 'nofmtdisk': True}) chck_list.append({'formatted': 'no'}) test_iogrp = '1' if self.USESIM else '0' opts_list.append({'rsize': 2, 'compression': False, 'warning': 0, 'autoexpand': True, 'grainsize': 32, 'easytier': False, 'iogrp': test_iogrp}) chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', 'warning': '0', 'autoexpand': 'on', 'grainsize': '32', 'easy_tier': 'off', 'IO_group_id': (test_iogrp)}) opts_list.append({'rsize': 2, 'compression': False, 'warning': 80, 'autoexpand': False, 'grainsize': 256, 'easytier': True}) chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', 'warning': '80', 'autoexpand': 'off', 'grainsize': '256', 'easy_tier': 'on'}) opts_list.append({'rsize': 2, 'compression': True}) chck_list.append({'-free_capacity': '0', 'compressed_copy': 'yes'}) for idx in range(len(opts_list)): attrs = self._create_test_vol(opts_list[idx]) for k, v in chck_list[idx].items(): try: if k[0] == '-': k = k[1:] self.assertNotEqual(v, attrs[k]) else: self.assertEqual(v, attrs[k]) except processutils.ProcessExecutionError as e: if 'CMMVC7050E' not in e.stderr: raise @ddt.data(('yes'), ('Yes'), ('no'), ('NO'), ('')) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_pool_attrs') def test_build_pool_stats_drp(self, is_drp, get_pool_attrs): get_pool_attrs.return_value = {'id': 1, 'name': 'openstack', 'data_reduction': is_drp, 'easy_tier': 'on', 'capacity': '20', 'free_capacity': '40', 'used_capacity': '0', 'real_capacity': '0', 'virtual_capacity': '0', 'status': 'online', 'site_id': '1', 'site_name': 'site1'} pool = 'openstack' pool_stats = self.driver._build_pool_stats(pool) if is_drp in ['yes', 'Yes']: self.assertTrue(pool_stats['data_reduction']) else: self.assertFalse(pool_stats['data_reduction']) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_pool_attrs') def test_build_pool_stats_drp_none(self, get_pool_attrs): get_pool_attrs.return_value = {'id': 1, 'name': 'openstack1', 'easy_tier': 'on', 'capacity': '20', 'free_capacity': '40', 'used_capacity': '0', 'real_capacity': '0', 'virtual_capacity': '0', 'status': 'online', 'site_id': '1', 'site_name': 'site1'} pool = 'openstack1' pool_stats = self.driver._build_pool_stats(pool) self.assertFalse(pool_stats['data_reduction']) @ddt.data(('IOPs_limit', "50"), ('bandwidth_limit_MB', "100")) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_pool_volumes') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsthrottle') @ddt.unpack def test_storwize_svc_max_pool_throttle_rate(self, fake_throttle_name, fake_throttle_value, io_throttles, get_pool_volumes): pools = _get_test_pool(get_all=True) for pool in pools: vol_throttles = [] expected_throttle_value = 0 if 'openstack' == pool: # Create volumes in the pool 'openstack' host = 'openstack@svc#%s' % pool vol1 = testutils.create_volume( self.ctxt, host=host, volume_type_id=self.vt['id']) self.driver.create_volume(vol1) self._assert_vol_exists(vol1['name'], True) vol2 = testutils.create_volume( self.ctxt, host=host, volume_type_id=self.vt['id']) self.driver.create_volume(vol2) self._assert_vol_exists(vol1['name'], True) # Set io_throttle values to volumes vol_throttles = [ {'object_name': vol1.name, 'IOPs_limit': '20', 'bandwidth_limit_MB': '40'}, {'object_name': vol2.name, 'IOPs_limit': '30', 'bandwidth_limit_MB': '60'}] expected_throttle_value = int(fake_throttle_value) get_pool_volumes.return_value = [vol1, vol2] io_throttles.return_value = vol_throttles iothrottle_value = ( self.driver._helpers.get_pool_max_throttle_rate_vdisk( pool, fake_throttle_name)) # Check the sum of throttle values set to volumes from a pool self.assertEqual(expected_throttle_value, iothrottle_value) self.assertTrue(get_pool_volumes.called) self.assertTrue(io_throttles.called) def test_storwize_svc_unicode_host_and_volume_names(self): # We'll check with iSCSI only - nothing protocol-dependent here self.driver.do_setup(None) rand_id = random.randint(10000, 99999) volume1 = self._generate_vol_info() self.driver.create_volume(volume1) self._assert_vol_exists(volume1['name'], True) self.assertRaises(exception.VolumeDriverException, self.driver._helpers.create_host, {'host': 12345}) # Add a host first to make life interesting (this host and # conn['host'] should be translated to the same prefix, and the # initiator should differentiate tmpconn1 = {'initiator': u'unicode:initiator1.%s' % rand_id, 'ip': '10.10.10.10', 'host': u'unicode.foo}.bar{.baz-%s' % rand_id} self.driver._helpers.create_host(tmpconn1, iscsi=True) # Add a host with a different prefix tmpconn2 = {'initiator': u'unicode:initiator2.%s' % rand_id, 'ip': '10.10.10.11', 'host': u'unicode.hello.world-%s' % rand_id} self.driver._helpers.create_host(tmpconn2, iscsi=True) conn = {'initiator': u'unicode:initiator3.%s' % rand_id, 'ip': '10.10.10.12', 'host': u'unicode.foo}.bar}.baz-%s' % rand_id} self.driver.initialize_connection(volume1, conn) host_name = self.driver._helpers.get_host_from_connector( conn, iscsi=True) self.assertIsNotNone(host_name) self.driver.terminate_connection(volume1, conn) host_name = self.driver._helpers.get_host_from_connector(conn) self.assertIsNone(host_name) self.driver.delete_volume(volume1) # Clean up temporary hosts for tmpconn in [tmpconn1, tmpconn2]: host_name = self.driver._helpers.get_host_from_connector( tmpconn, iscsi=True) self.assertIsNotNone(host_name) self.driver._helpers.delete_host(host_name) def test_storwize_svc_delete_volume_snapshots(self): # Create a volume with two snapshots master = self._create_volume() # Fail creating a snapshot - will force delete the snapshot if self.USESIM and False: snap = self._generate_snap_info(master.id) self.sim.error_injection('startfcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap) self._assert_vol_exists(snap['name'], False) # Delete a snapshot snap = self._generate_snap_info(master.id) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.driver.delete_snapshot(snap) self._assert_vol_exists(snap['name'], False) # Delete a volume with snapshots (regular) snap = self._generate_snap_info(master.id) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.driver.delete_volume(master) self._assert_vol_exists(master['name'], False) # Fail create volume from snapshot - will force delete the volume if self.USESIM: volfs = self._generate_vol_info() self.sim.error_injection('startfcmap', 'bad_id') self.sim.error_injection('lsfcmap', 'speed_up') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volfs, snap) self._assert_vol_exists(volfs['name'], False) # Create volume from snapshot and delete it volfs = self._generate_vol_info() if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_volume_from_snapshot(volfs, snap) self._assert_vol_exists(volfs['name'], True) self.driver.delete_volume(volfs) self._assert_vol_exists(volfs['name'], False) # Create volume from snapshot and delete the snapshot volfs = self._generate_vol_info() if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_volume_from_snapshot(volfs, snap) self.driver.delete_snapshot(snap) self._assert_vol_exists(snap['name'], False) # Fail create clone - will force delete the target volume if self.USESIM: clone = self._generate_vol_info() self.sim.error_injection('startfcmap', 'bad_id') self.sim.error_injection('lsfcmap', 'speed_up') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, clone, volfs) self._assert_vol_exists(clone['name'], False) # Create the clone, delete the source and target clone = self._generate_vol_info() if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_cloned_volume(clone, volfs) self._assert_vol_exists(clone['name'], True) self.driver.delete_volume(volfs) self._assert_vol_exists(volfs['name'], False) self.driver.delete_volume(clone) self._assert_vol_exists(clone['name'], False) @ddt.data((True, None), (True, 5), (False, -1), (False, 100)) @ddt.unpack def test_storwize_svc_get_volume_stats( self, is_thin_provisioning_enabled, rsize): self._set_flag('reserved_percentage', 25) self._set_flag('storwize_svc_multihostmap_enabled', True) self._set_flag('storwize_svc_vol_rsize', rsize) stats = self.driver.get_volume_stats(True) for each_pool in stats['pools']: self.assertIn(each_pool['pool_name'], self._def_flags['storwize_svc_volpool_name']) self.assertTrue(each_pool['multiattach']) self.assertLessEqual(each_pool['free_capacity_gb'], each_pool['total_capacity_gb']) self.assertEqual(25, each_pool['reserved_percentage']) self.assertEqual(is_thin_provisioning_enabled, each_pool['thin_provisioning_support']) self.assertEqual(not is_thin_provisioning_enabled, each_pool['thick_provisioning_support']) self.assertTrue(each_pool['consistent_group_snapshot_enabled']) if self.USESIM: expected = 'storwize-svc-sim' self.assertEqual(expected, stats['volume_backend_name']) for each_pool in stats['pools']: self.assertIn(each_pool['pool_name'], self._def_flags['storwize_svc_volpool_name']) self.assertAlmostEqual(3328.0, each_pool['total_capacity_gb']) self.assertAlmostEqual(3287.5, each_pool['free_capacity_gb']) if is_thin_provisioning_enabled: self.assertAlmostEqual( 1576.96, each_pool['provisioned_capacity_gb']) def test_storwize_svc_get_volume_stats_backend_state(self): self._set_flag('storwize_svc_volpool_name', ['openstack', 'openstack1', 'openstack2']) stats = self.driver.get_volume_stats() for each_pool in stats['pools']: self.assertEqual('up', each_pool['backend_state']) self._reset_flags() self._set_flag('storwize_svc_volpool_name', ['openstack3', 'openstack4', 'openstack5']) stats = self.driver.get_volume_stats(True) for each_pool in stats['pools']: self.assertEqual('down', each_pool['backend_state']) def test_get_pool(self): ctxt = testutils.get_test_admin_context() type_ref = volume_types.create(ctxt, 'testtype', None) volume = self._generate_vol_info() volume.volume_type_id = type_ref['id'] volume.volume_type = objects.VolumeType.get_by_id(ctxt, type_ref['id']) self.driver.create_volume(volume) vol = self.driver._helpers.get_vdisk_attributes(volume.name) self.assertEqual(vol['mdisk_grp_name'], self.driver.get_pool(volume)) self.driver.delete_volume(volume) volume_types.destroy(ctxt, type_ref['id']) @ddt.data(('100', 'iops', '100', True), ('100', 'iops_per_gb', '1500', False), ('200', 'mbps', '200', False)) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'update_vdisk_qos') @ddt.unpack def test_storwize_svc_extend_volume(self, old_iothrottling_value, iothrottling_unit, new_iothrottling_value, empty_qos, update_vdisk_qos): volume = self._create_volume() if empty_qos: self.driver.extend_volume(volume, '13') attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) self.assertFalse(update_vdisk_qos.called) snap = self._generate_snap_info(volume.id) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, volume, '16') self.driver.delete_snapshot(snap) with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is specified for source volume with 'iops_per_gb' as # IOThrottling_unit, update_vdisk_qos will be called for # extend_volume. fake_opts_qos = self._get_default_opts() fake_opts_qos['qos'] = {'IOThrottling': new_iothrottling_value, 'IOThrottling_unit': iothrottling_unit} get_vdisk_params.return_value = fake_opts_qos self.driver.extend_volume(volume, 15) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi if fake_opts_qos['qos']['IOThrottling_unit'] == 'iops_per_gb': update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos'], vol_size) else: self.assertFalse(update_vdisk_qos.called) self.driver.delete_volume(volume) @mock.patch.object(storwize_rep.StorwizeSVCReplicationGlobalMirror, 'create_relationship') @mock.patch.object(storwize_rep.StorwizeSVCReplicationGlobalMirror, 'extend_target_volume') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_relationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def _storwize_svc_extend_volume_replication(self, get_relationship, delete_relationship, extend_target_volume, create_relationship): fake_target = mock.Mock() rep_type = 'global' self.driver.replications[rep_type] = ( self.driver.replication_factory(rep_type, fake_target)) volume = self._create_volume() volume['replication_status'] = fields.ReplicationStatus.ENABLED fake_target_vol = 'vol-target-id' get_relationship.return_value = {'aux_vdisk_name': fake_target_vol} with mock.patch.object( self.driver, '_get_volume_replicated_type_mirror') as mirror_type: mirror_type.return_value = 'global' self.driver.extend_volume(volume, '13') attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) delete_relationship.assert_called_once_with(volume['name']) extend_target_volume.assert_called_once_with(fake_target_vol, 12) create_relationship.assert_called_once_with(volume, fake_target_vol) self.driver.delete_volume(volume) def _storwize_svc_extend_volume_replication_failover(self): volume = self._create_volume() volume['replication_status'] = fields.ReplicationStatus.FAILED_OVER with mock.patch.object( self.driver, '_get_volume_replicated_type_mirror') as mirror_type: mirror_type.return_value = 'global' self.driver.extend_volume(volume, '13') attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) self.driver.delete_volume(volume) def _check_loc_info(self, capabilities, expected): host = {'host': 'foo', 'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1} ctxt = context.get_admin_context() moved, model_update = self.driver.migrate_volume(ctxt, vol, host) self.assertEqual(expected['moved'], moved) self.assertEqual(expected['model_update'], model_update) def test_storwize_svc_migrate_bad_loc_info(self): self._check_loc_info({}, {'moved': False, 'model_update': None}) cap = {'location_info': 'foo'} self._check_loc_info(cap, {'moved': False, 'model_update': None}) cap = {'location_info': 'FooDriver:foo:bar'} self._check_loc_info(cap, {'moved': False, 'model_update': None}) cap = {'location_info': 'StorwizeSVCDriver:foo:bar'} self._check_loc_info(cap, {'moved': False, 'model_update': None}) def test_storwize_svc_volume_migrate(self): # Make sure we don't call migrate_volume_vdiskcopy self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack2') cap = {'location_info': loc, 'extent_size': '256'} host = {'host': 'openstack@svc#openstack2', 'capabilities': cap} ctxt = context.get_admin_context() volume = self._create_volume() volume['volume_type_id'] = None self.driver.migrate_volume(ctxt, volume, host) self._delete_volume(volume) @ddt.data(('5000', 'iops', 5000, 'iops', True), ('500', 'iops_per_gb', 500, 'iops_per_gb', False), ('2000', 'mbps', 2000, 'mbps', False)) @ddt.unpack def test_storwize_svc_get_vdisk_params(self, fake_iothrottling_value, fake_iothrottling_unit, expected_iothrottling_value, expected_iothrottling_unit, empty_qos): self.driver.do_setup(None) fake_qos = {'qos:IOThrottling': fake_iothrottling_value, 'qos:IOThrottling_unit': fake_iothrottling_unit} expected_qos = {'IOThrottling': float(expected_iothrottling_value), 'IOThrottling_unit': expected_iothrottling_unit} fake_opts = self._get_default_opts() # The parameters retured should be the same to the default options, # if the QoS is empty. if empty_qos: vol_type_empty_qos = self._create_volume_type_qos(True, None) type_id = vol_type_empty_qos['id'] params = \ self.driver._get_vdisk_params(type_id, volume_type=vol_type_empty_qos, volume_metadata=None) self.assertEqual(fake_opts, params) volume_types.destroy(self.ctxt, type_id) # If the QoS is set via the qos association with the volume type, # qos value should be set in the retured parameters. vol_type_qos = self._create_volume_type_qos(False, fake_qos) type_id = vol_type_qos['id'] # If type_id is not none and volume_type is none, it should work fine. params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is not none and volume_type is not none, it should # work fine. params = self.driver._get_vdisk_params(type_id, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is none and volume_type is not none, it should work fine. params = self.driver._get_vdisk_params(None, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If both type_id and volume_type are none, no qos will be returned # in the parameter. params = self.driver._get_vdisk_params(None, volume_type=None, volume_metadata=None) self.assertIsNone(params['qos']) qos_spec = volume_types.get_volume_type_qos_specs(type_id) volume_types.destroy(self.ctxt, type_id) qos_specs.delete(self.ctxt, qos_spec['qos_specs']['id']) # If the QoS is set via the extra specs in the volume type, # qos value should be set in the retured parameters. vol_type_qos = self._create_volume_type_qos(True, fake_qos) type_id = vol_type_qos['id'] # If type_id is not none and volume_type is none, it should work fine. params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is not none and volume_type is not none, # it should work fine. params = self.driver._get_vdisk_params(type_id, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is none and volume_type is not none, # it should work fine. params = self.driver._get_vdisk_params(None, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If both type_id and volume_type are none, no qos will be returned # in the parameter. params = self.driver._get_vdisk_params(None, volume_type=None, volume_metadata=None) self.assertIsNone(params['qos']) volume_types.destroy(self.ctxt, type_id) # If the QoS is set in the volume metadata, # qos value should be set in the retured parameters. metadata = [{'key': 'qos:IOThrottling', 'value': '4000'}, {'key': 'qos:IOThrottling_unit', 'value': fake_iothrottling_unit}] expected_qos_metadata = { 'IOThrottling': 4000.0, 'IOThrottling_unit': expected_iothrottling_unit} params = self.driver._get_vdisk_params(None, volume_type=None, volume_metadata=metadata) self.assertEqual(expected_qos_metadata, params['qos']) # If the QoS is set both in the metadata and the volume type, the one # in the volume type will take effect. vol_type_qos = self._create_volume_type_qos(True, fake_qos) type_id = vol_type_qos['id'] params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=metadata) self.assertEqual(expected_qos, params['qos']) volume_types.destroy(self.ctxt, type_id) # If the QoS is set both via the qos association and the # extra specs, the one from the qos association will take effect. fake_qos_associate = {'qos:IOThrottling': '6000', 'qos:IOThrottling_unit': fake_iothrottling_unit} expected_qos_associate = { 'IOThrottling': 6000.0, 'IOThrottling_unit': expected_iothrottling_unit} vol_type_qos = self._create_volume_type_qos_both(fake_qos, fake_qos_associate) type_id = vol_type_qos['id'] params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=None) self.assertEqual(expected_qos_associate, params['qos']) qos_spec = volume_types.get_volume_type_qos_specs(type_id) volume_types.destroy(self.ctxt, type_id) qos_specs.delete(self.ctxt, qos_spec['qos_specs']['id']) @ddt.data(('5000', 'iops', 500, 'iops_per_gb', True), ('5000', 'iops', '2000', 'mbps', False), ('500', 'iops_per_gb', '5000', 'iops', False), ('500', 'iops_per_gb', '2000', 'mbps', False), ('2000', 'mbps', '5000', 'iops', False), ('2000', 'mbps', '500', 'iops_per_gb', False)) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'disable_vdisk_qos') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'update_vdisk_qos') @ddt.unpack def test_storwize_svc_retype_no_copy(self, old_iothrottling_value, old_iothrottling_unit, new_iothrottling_value, new_iothrottling_unit, empty_qos, update_vdisk_qos, disable_vdisk_qos): self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() key_specs_old = {'easytier': False, 'warning': 2, 'autoexpand': True} key_specs_new = {'easytier': True, 'warning': 5, 'autoexpand': False} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume = self._generate_vol_info(old_type) volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.driver.retype(ctxt, volume, new_type, diff, host) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertEqual('on', attrs['easy_tier'], 'Volume retype failed') self.assertEqual('5', attrs['warning'], 'Volume retype failed') self.assertEqual('off', attrs['autoexpand'], 'Volume retype failed') self.driver.delete_volume(volume) fake_opts = self._get_default_opts() fake_opts_old = self._get_default_opts() fake_opts_old['qos'] = {'IOThrottling': old_iothrottling_value, 'IOThrottling_unit': old_iothrottling_unit} fake_opts_qos = self._get_default_opts() fake_opts_qos['qos'] = {'IOThrottling': new_iothrottling_value, 'IOThrottling_unit': new_iothrottling_unit} self.driver.create_volume(volume) with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for both the source and target volumes, # update_vdisk_qos and disable_vdisk_qos will not be called for # retype. if empty_qos: get_vdisk_params.side_effect = [fake_opts, fake_opts, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is specified for both source and target volumes, # update_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts_old, fake_opts_qos, fake_opts_old] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos'], volume['size']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for source and speficied for target volume, # update_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts, fake_opts_qos, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos'], volume['size']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for target volume and specified for source # volume, update_vdisk_qos will not be called for retype, and # disable_vdisk_qos will be called. get_vdisk_params.side_effect = [fake_opts_qos, fake_opts, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) disable_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.driver.delete_volume(volume) def test_storwize_svc_retype_only_change_iogrp(self): self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() key_specs_old = {'iogrp': 0} key_specs_new = {'iogrp': 1} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume = self._generate_vol_info(old_type) volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.driver.retype(ctxt, volume, new_type, diff, host) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertEqual('1', attrs['IO_group_id'], 'Volume retype ' 'failed') self.driver.delete_volume(volume) # retype a volume in dr_pool loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack3') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack3', 'capabilities': cap} volume = testutils.create_volume( self.ctxt, volume_type_id=old_type.id, host='openstack@svc#hyperswap3') volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.assertRaises(exception.VolumeDriverException, self.driver.retype, ctxt, volume, new_type, diff, host) @ddt.data(('5000', 'iops', 500, 'iops_per_gb', True), ('5000', 'iops', '2000', 'mbps', False), ('500', 'iops_per_gb', '5000', 'iops', False), ('500', 'iops_per_gb', '2000', 'mbps', False), ('2000', 'mbps', '5000', 'iops', False), ('2000', 'mbps', '500', 'iops_per_gb', False)) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'disable_vdisk_qos') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'update_vdisk_qos') @ddt.unpack def test_storwize_svc_retype_need_copy(self, old_iothrottling_value, old_iothrottling_unit, new_iothrottling_value, new_iothrottling_unit, empty_qos, update_vdisk_qos, disable_vdisk_qos): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'standard', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() key_specs_old = {'compression': True, 'iogrp': 0} key_specs_new = {'compression': False, 'iogrp': 1} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume = self._generate_vol_info(old_type) volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.driver.retype(ctxt, volume, new_type, diff, host) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertEqual('no', attrs['compressed_copy']) self.assertEqual('1', attrs['IO_group_id'], 'Volume retype ' 'failed') self.driver.delete_volume(volume) fake_opts = self._get_default_opts() fake_opts_old = self._get_default_opts() fake_opts_old['qos'] = {'IOThrottling': old_iothrottling_value, 'IOThrottling_unit': old_iothrottling_unit} fake_opts_qos = self._get_default_opts() fake_opts_qos['qos'] = {'IOThrottling': new_iothrottling_value, 'IOThrottling_unit': new_iothrottling_unit} self.driver.create_volume(volume) with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for both the source and target volumes, # update_vdisk_qos and disable_vdisk_qos will not be called for # retype. if empty_qos: get_vdisk_params.side_effect = [fake_opts, fake_opts, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is specified for both source and target volumes, # update_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts_old, fake_opts_qos, fake_opts_qos] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos'], volume['size']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for source and speficied for target volume, # update_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts, fake_opts_qos, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos'], volume['size']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(storwize_svc_iscsi.StorwizeSVCISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for target volume and specified for source # volume, update_vdisk_qos will not be called for retype, and # disable_vdisk_qos will be called. get_vdisk_params.side_effect = [fake_opts_qos, fake_opts, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) disable_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.driver.delete_volume(volume) def test_set_storage_code_level_success(self): res = self.driver._helpers.get_system_info() if self.USESIM: self.assertEqual((7, 2, 0, 0), res['code_level'], 'Get code level error') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'rename_vdisk') def test_storwize_update_migrated_volume(self, rename_vdisk): ctxt = testutils.get_test_admin_context() backend_volume = self._create_volume() volume = self._create_volume() model_update = self.driver.update_migrated_volume(ctxt, volume, backend_volume, 'available') rename_vdisk.assert_called_once_with(backend_volume.name, volume.name) self.assertEqual({'_name_id': None}, model_update) rename_vdisk.reset_mock() rename_vdisk.side_effect = exception.VolumeBackendAPIException(data='') model_update = self.driver.update_migrated_volume(ctxt, volume, backend_volume, 'available') self.assertEqual({'_name_id': backend_volume.id}, model_update) rename_vdisk.reset_mock() rename_vdisk.side_effect = exception.VolumeBackendAPIException(data='') model_update = self.driver.update_migrated_volume(ctxt, volume, backend_volume, 'attached') self.assertEqual({'_name_id': backend_volume.id}, model_update) rename_vdisk.assert_called_once_with(backend_volume.name, volume.name) # Now back to first 'available' test, but with volume names that don't # match the driver's name template. Need to use mock vols to set name. rename_vdisk.reset_mock() rename_vdisk.side_effect = None class MockVol(dict): def __getattr__(self, attr): return self.get(attr, None) target_vol = MockVol(id='1', name='new-vol-name', volume_type_id=None) orig_vol = MockVol(id='2', name='orig-vol-name', volume_type_id=None) model_update = self.driver.update_migrated_volume(ctxt, orig_vol, target_vol, 'available') rename_vdisk.assert_called_once_with('new-vol-name', 'orig-vol-name') self.assertEqual({'_name_id': None}, model_update) def test_storwize_vdisk_copy_ops(self): ctxt = testutils.get_test_admin_context() volume = self._create_volume() driver = self.driver dest_pool = volume_utils.extract_host(volume['host'], 'pool') new_ops = driver._helpers.add_vdisk_copy(volume['name'], dest_pool, None, self.driver._state, self.driver.configuration) self.driver._add_vdisk_copy_op(ctxt, volume, new_ops) admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume['id']) self.assertEqual(":".join(x for x in new_ops), admin_metadata['vdiskcopyops'], 'Storwize driver add vdisk copy error.') self.driver._check_volume_copy_ops() self.driver._rm_vdisk_copy_op(ctxt, volume, new_ops[0], new_ops[1]) admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume['id']) self.assertNotIn('vdiskcopyops', admin_metadata, 'Storwize driver delete vdisk copy error') self._delete_volume(volume) def test_storwize_delete_with_vdisk_copy_ops(self): volume = self._create_volume() self.driver._vdiskcopyops = {volume['id']: [('0', '1')]} with mock.patch.object(self.driver, '_vdiskcopyops_loop'): self.assertIn(volume['id'], self.driver._vdiskcopyops) self.driver.delete_volume(volume) self.assertNotIn(volume['id'], self.driver._vdiskcopyops) # Test groups operation #### @ddt.data(({'group_replication_enabled': ' True'}, {}), ({'group_replication_enabled': ' True', 'consistent_group_snapshot_enabled': ' True'}, {}), ({'group_snapshot_enabled': ' True'}, {}), ({'consistent_group_snapshot_enabled': ' True'}, {'replication_enabled': ' True', 'replication_type': ' metro'})) @ddt.unpack def test_storwize_group_create_with_replication(self, grp_sepc, vol_spec): """Test group create.""" gr_type_ref = group_types.create(self.ctxt, 'gr_type', grp_sepc) gr_type = objects.GroupType.get_by_id(self.ctxt, gr_type_ref['id']) vol_type_ref = volume_types.create(self.ctxt, 'vol_type', vol_spec) group = testutils.create_group(self.ctxt, group_type_id=gr_type.id, volume_type_ids=[vol_type_ref['id']]) if 'group_snapshot_enabled' in grp_sepc: self.assertRaises(NotImplementedError, self.driver.create_group, self.ctxt, group) else: model_update = self.driver.create_group(self.ctxt, group) self.assertEqual(fields.GroupStatus.ERROR, model_update['status']) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_volumegroup') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_volumegroup') def test_storwize_create_and_delete_volumegroup(self, delete_volumegroup, create_volumegroup): """Test volume group creation and deletion""" with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': self.VOLUMEGROUP_CODE_LEVEL, 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) volumegroup_spec = {'volume_group_enabled': ' True'} volumegroup_type_ref = group_types.create(self.ctxt, 'volumegroup_type', volumegroup_spec) volumegroup_type = objects.GroupType.get_by_id( self.ctxt, volumegroup_type_ref['id']) vol_type_ref = volume_types.create(self.ctxt, 'non_rep_type', {}) volumegroup = testutils.create_group( self.ctxt, group_type_id=volumegroup_type.id, volume_type_ids=[vol_type_ref['id']]) # Create Volume Group model_update = self.driver.create_group(self.ctxt, volumegroup) self.assertTrue(create_volumegroup.called) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) # Delete Volume Group model_update = self.driver.delete_group(self.ctxt, volumegroup, []) self.assertTrue(delete_volumegroup.called) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_volumegroup') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_volumegroup') def test_storwize_create_and_delete_temp_volumegroup( self, delete_volumegroup, create_volumegroup): """Test temporary volume group creation""" with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': self.TEMP_VOLUMEGROUP_CODE_LEVEL, 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) volumegroup_spec = {'temporary_volume_group_enabled': ' True'} volumegroup_type_ref = group_types.create(self.ctxt, 'volumegroup_type', volumegroup_spec) volumegroup_type = objects.GroupType.get_by_id( self.ctxt, volumegroup_type_ref['id']) vol_type_ref = volume_types.create(self.ctxt, 'non_rep_type', {}) volumegroup = testutils.create_group( self.ctxt, group_type_id=volumegroup_type.id, volume_type_ids=[vol_type_ref['id']]) # Create Volume Group model_update = self.driver.create_group(self.ctxt, volumegroup) self.assertFalse(create_volumegroup.called) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) # Delete Volume Group model_update = self.driver.delete_group(self.ctxt, volumegroup, []) self.assertFalse(delete_volumegroup.called) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) def test_check_codelevel_for_temp_volumegroup_fail(self): code_level = (8, 5, 2, 0) self.assertRaises(exception.VolumeDriverException, self.driver._helpers .check_codelevel_for_temp_volumegroup, code_level) def test_check_code_level_within_limit(self): test_cases = [ ((8, 5, 2, 0), True), # test case for True result ((8, 5, 4, 0), False) # test case for False result ] for code_level, expected in test_cases: result = self.driver._helpers.check_code_level_within_limit( (8, 5, 1, 0), (8, 5, 3, 0), code_level) if expected: self.assertTrue(result) else: self.assertFalse(result) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_volumegroup') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_volumegroup') def test_storwize_update_volumegroup(self, delete_volumegroup, create_volumegroup): """Test volume group updation""" with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': self.VOLUMEGROUP_CODE_LEVEL, 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) # Create volumegroup type volumegroup_spec = {'volume_group_enabled': ' True'} volumegroup_type_ref = group_types.create(self.ctxt, 'volumegroup_type', volumegroup_spec) volumegroup_type = objects.GroupType.get_by_id( self.ctxt, volumegroup_type_ref['id']) # Create volume vol_type_ref = volume_types.create(self.ctxt, 'non_rep_type', {}) vol_type = objects.VolumeType.get_by_id(self.ctxt, vol_type_ref['id']) volume = self._generate_vol_info(vol_type) self.driver.create_volume(volume) # Create volumegroup volumegroup = testutils.create_group( self.ctxt, group_type_id=volumegroup_type.id, volume_type_ids=[vol_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, volumegroup) self.assertTrue(create_volumegroup.called) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) add_vols = [volume] remove_vols = [volume] with mock.patch.object( storwize_svc_common.StorwizeSVCCommonDriver, '_update_volumegroup') as _update_volumegroup: model_update = {'status': 'available'} fake_update_volumegroup_info = [model_update, add_vols, None] _update_volumegroup.return_value = fake_update_volumegroup_info (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, volumegroup, add_vols, []) self.assertTrue(_update_volumegroup.called) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) model_update = {'status': 'available'} fake_update_volumegroup_info = [model_update, None, remove_vols] _update_volumegroup.return_value = ( fake_update_volumegroup_info) (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, volumegroup, [], remove_vols) self.assertTrue(_update_volumegroup.called) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) # Delete Volume Group model_update = self.driver.delete_group(self.ctxt, volumegroup, []) self.assertTrue(delete_volumegroup.called) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) def test_storwize_update_temp_volumegroup(self): """Test temporary volume group updation""" with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': self.TEMP_VOLUMEGROUP_CODE_LEVEL, 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) # Create temporary volumegroup type volumegroup_spec = {'temporary_volume_group_enabled': ' True'} volumegroup_type_ref = group_types.create(self.ctxt, 'volumegroup_type', volumegroup_spec) volumegroup_type = objects.GroupType.get_by_id( self.ctxt, volumegroup_type_ref['id']) # Create volume vol_type_ref = volume_types.create(self.ctxt, 'non_rep_type', {}) vol_type = objects.VolumeType.get_by_id(self.ctxt, vol_type_ref['id']) volume = self._generate_vol_info(vol_type) self.driver.create_volume(volume) # Create volumegroup volumegroup = testutils.create_group( self.ctxt, group_type_id=volumegroup_type.id, volume_type_ids=[vol_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, volumegroup) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) add_vols = [volume] remove_vols = [volume] with mock.patch.object( storwize_svc_common.StorwizeSVCCommonDriver, '_update_temporary_volumegroup') as _update_temp_vg: model_update = {'status': 'available'} fake_update_volumegroup_info = [model_update, add_vols, None] _update_temp_vg.return_value = fake_update_volumegroup_info (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, volumegroup, add_vols, []) self.assertTrue(_update_temp_vg.called) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) model_update = {'status': 'available'} fake_update_volumegroup_info = [model_update, None, remove_vols] _update_temp_vg.return_value = ( fake_update_volumegroup_info) (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, volumegroup, [], remove_vols) self.assertTrue(_update_temp_vg.called) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') def test_storwize_create_and_delete_volumegroup_snapshot( self, get_system_info): """Test creation and deletion of volumegroup snapshot""" fake_system_info = {'code_level': self.VOLUMEGROUP_CODE_LEVEL, 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) # Create volume vol_type_ref = volume_types.create(self.ctxt, 'non_rep_type', {}) vol_type = objects.VolumeType.get_by_id(self.ctxt, vol_type_ref['id']) volume = self._generate_vol_info(vol_type) self.driver.create_volume(volume) # Create volumegroup type and volumegroup (volumegroup_type_ref, volumegroup_type, volumegroup, model_update) = self._create_volumegroup_type_and_volumegroup( vol_type_ref) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) # Add volumes to volumegroup add_vols = [volume] remove_vols = [volume] (model_update, add_volumes_update, remove_volumes_update) = ( self.driver.update_group(self.ctxt, volumegroup, add_vols, [])) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) # Create group-snapshot group_snapshot, snapshots = self._create_group_snapshot_in_db( volumegroup.id, group_type_id=volumegroup_type_ref.id) model_update, snapshots_model = ( self.driver.create_group_snapshot(self.ctxt, group_snapshot, snapshots)) self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, model_update['status']) for snapshot in snapshots_model: self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot['status']) # Validating the snapshot_name property value # from metadata of the snapshot groupsnapshot_name = self.driver._get_volumegroup_snapshot_name( group_snapshot) for snapshot in snapshots: self.assertEqual(groupsnapshot_name, snapshot.metadata['snapshot_name']) # Delete group-snapshot model_update, snapshots_model = self.driver.delete_group_snapshot( self.ctxt, group_snapshot, snapshots) self.assertEqual(fields.GroupSnapshotStatus.DELETED, model_update['status']) for snapshot in snapshots_model: self.assertEqual(fields.SnapshotStatus.DELETED, snapshot['status']) # Remove the volumes from volumegroup (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, volumegroup, [], remove_vols) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) # Delete Volume Group model_update = self.driver.delete_group(self.ctxt, volumegroup, []) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') def test_storwize_delete_volumegroup_with_delete_volumes(self, get_system_info): """Test volume group creation and deletion""" fake_system_info = {'code_level': self.VOLUMEGROUP_CODE_LEVEL, 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) # Create volumegroup type volumegroup_spec = {'volume_group_enabled': ' True'} volumegroup_type_ref = group_types.create(self.ctxt, 'volumegroup_type', volumegroup_spec) volumegroup_type = objects.GroupType.get_by_id( self.ctxt, volumegroup_type_ref['id']) # Create source volume vol_type_ref = volume_types.create(self.ctxt, 'non_rep_type', {}) vol_type = objects.VolumeType.get_by_id(self.ctxt, vol_type_ref['id']) source_vol = self._generate_vol_info(vol_type) self.driver.create_volume(source_vol) # Create source volumegroup source_volumegroup = testutils.create_group( self.ctxt, group_type_id=volumegroup_type.id, volume_type_ids=[vol_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, source_volumegroup) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) # Add source volumes to source volumegroup (model_update, add_volumes_update, remove_volumes_update) = ( self.driver.update_group(self.ctxt, source_volumegroup, [source_vol], [])) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) source_volumegroup_name = self.driver._get_volumegroup_name( source_volumegroup) self.assertEqual(source_volumegroup_name, source_vol.metadata['Volume Group Name']) # Delete Volume Group model_update = self.driver.delete_group(self.ctxt, source_volumegroup, [source_vol]) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') @mock.patch.object(cinder.volume.volume_utils, 'is_group_a_type') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_volumegroup_snapshot') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_volumegroup_snapshot') def test_storwize_create_and_delete_volumegroup_snapshot_calls( self, delete_volumegroup_snapshot, create_volumegroup_snapshot, is_grp_a_cg_snapshot_type, vg_type, get_system_info): """Test creation and deletion of volumegroup snapshot""" fake_system_info = {'code_level': self.VOLUMEGROUP_CODE_LEVEL, 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) # Mocking volume-group-enabled spec as true is_grp_a_cg_snapshot_type.side_effect = [False, False, False] vg_type.side_effect = [False, False, True, False, True, False, False, True, True, False, False, True] # Create volume group type_ref = volume_types.create(self.ctxt, 'testtype', None) group = testutils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[type_ref['id']]) # Create volume group snapshot group_snapshot, snapshots = self._create_group_snapshot_in_db( group.id) model_update, snapshots_model = ( self.driver.create_group_snapshot(self.ctxt, group_snapshot, snapshots)) self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, model_update['status']) self.assertTrue(create_volumegroup_snapshot.called) # Delete volume group snapshot model_update, snapshots_model = ( self.driver.delete_group_snapshot(self.ctxt, group_snapshot, snapshots)) self.assertEqual(fields.GroupSnapshotStatus.DELETED, model_update['status']) self.assertTrue(delete_volumegroup_snapshot.called) # Delete volume group self.driver.delete_group(self.ctxt, group, []) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_rccg') def test_storwize_group_create(self, create_rccg): """Test group create.""" rccg_spec = {'consistent_group_snapshot_enabled': ' True'} rccg_type_ref = group_types.create(self.ctxt, 'cg_type', rccg_spec) rccg_type = objects.GroupType.get_by_id(self.ctxt, rccg_type_ref['id']) rep_type_ref = volume_types.create(self.ctxt, 'rep_type', {}) rep_group = testutils.create_group( self.ctxt, group_type_id=rccg_type.id, volume_type_ids=[rep_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, rep_group) self.assertFalse(create_rccg.called) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'openstack1'} vol_type_ref = volume_types.create(self.ctxt, 'hypertype', spec) group = testutils.create_group( self.ctxt, name='cggroup', group_type_id=rccg_type.id, volume_type_ids=[vol_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, group) self.assertEqual(fields.GroupStatus.ERROR, model_update['status']) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') @mock.patch('cinder.volume.volume_utils.is_group_a_type') @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_delete_replication_grp') def test_storwize_delete_group(self, _del_rep_grp, is_grp_a_cg_rep_type, is_grp_a_cg_snapshot_type): is_grp_a_cg_snapshot_type.side_effect = [True, True, False, True] is_grp_a_cg_rep_type.side_effect = [False, False, False, False, False, False, False, False] type_ref = volume_types.create(self.ctxt, 'testtype', None) group = testutils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[type_ref['id']]) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), group.id) self.assertRaises(NotImplementedError, self.driver.delete_group, self.ctxt, group, volumes) model_update = self.driver.delete_group(self.ctxt, group, volumes) self.assertFalse(_del_rep_grp.called) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') @mock.patch('cinder.volume.volume_utils.is_group_a_type') @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_replication_grp') def test_storwize_group_update(self, _update_rep_grp, is_grp_a_cg_rep_type, is_grp_a_cg_snapshot_type): """Test group update.""" is_grp_a_cg_snapshot_type.side_effect = [False, True, True, False] is_grp_a_cg_rep_type.side_effect = [False, False, False, False, False, False, True, True] group = mock.MagicMock() self.assertRaises(NotImplementedError, self.driver.update_group, self.ctxt, group, None, None) (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, group) self.assertFalse(_update_rep_grp.called) self.assertIsNone(model_update) self.assertIsNone(add_volumes_update) self.assertIsNone(remove_volumes_update) self.driver.update_group(self.ctxt, group) self.assertTrue(_update_rep_grp.called) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_storwize_create_group_snapshot(self, is_grp_a_cg_snapshot_type): is_grp_a_cg_snapshot_type.side_effect = [True, True, False, True] type_ref = volume_types.create(self.ctxt, 'testtype', None) group = testutils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[type_ref['id']]) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) group_snapshot, snapshots = self._create_group_snapshot_in_db( group.id) self.assertRaises(NotImplementedError, self.driver.create_group_snapshot, self.ctxt, group_snapshot, snapshots) (model_update, snapshots_model_update) = self.driver.create_group_snapshot( self.ctxt, group_snapshot, snapshots) self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, model_update['status'], "CGSnapshot created failed") for snapshot in snapshots_model_update: self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot['status']) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_storwize_delete_group_snapshot(self, is_grp_a_cg_snapshot_type): is_grp_a_cg_snapshot_type.side_effect = [True, True, True, False, True] type_ref = volume_types.create(self.ctxt, 'testtype', None) group = testutils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[type_ref['id']]) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) group_snapshot, snapshots = self._create_group_snapshot(group.id) self.assertRaises(NotImplementedError, self.driver.delete_group_snapshot, self.ctxt, group_snapshot, snapshots) model_update = self.driver.delete_group_snapshot(self.ctxt, group_snapshot, snapshots) self.assertEqual(fields.GroupSnapshotStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual(fields.SnapshotStatus.DELETED, volume['status']) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_vdisk') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_fc_consistgrp') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_storwize_delete_consistgroup_snapshot(self, is_grp_a_cg_snapshot_type, delete_fc_consistgrp, delete_vdisk): is_grp_a_cg_snapshot_type.side_effect = [True, True, True, False, True] type_ref = volume_types.create(self.ctxt, 'testtype', None) group = testutils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[type_ref['id']]) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) group_snapshot, snapshots = self._create_group_snapshot(group.id) cgsnapshot_id = group_snapshot.id cg_name = 'cg_snap-' + cgsnapshot_id self.driver._helpers.delete_consistgrp_snapshots(cg_name, snapshots) delete_fc_consistgrp.assert_has_calls([mock.call(cg_name)]) self.assertEqual(2, delete_fc_consistgrp.call_count) calls = [mock.call(snapshots[0]['name'], force_delete=True, force_unmap=False), mock.call(snapshots[1]['name'], force_delete=True, force_unmap=False)] delete_vdisk.assert_has_calls(calls, any_order=True) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_vdisk') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_fc_consistgrp') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_storwize_delete_consistgroup_snapshot_1(self, is_grp_a_cg_snapshot_type, delete_fc_consistgrp, delete_vdisk): is_grp_a_cg_snapshot_type.side_effect = [True, True, True, False, True] type_ref = volume_types.create(self.ctxt, 'testtype', None) group = testutils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[type_ref['id']]) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) group_snapshot, snapshots = self._create_group_snapshot(group.id) cgsnapshot_id = group_snapshot.id cg_name = 'cg_snap-' + cgsnapshot_id delete_vdisk.side_effect = exception.VolumeBackendAPIException(data='') (model_update, snap_model_update) = self.driver._helpers.delete_consistgrp_snapshots( cg_name, snapshots) self.assertEqual(fields.GroupSnapshotStatus.ERROR_DELETING, model_update['status']) for snapshot in snap_model_update: self.assertEqual(fields.SnapshotStatus.ERROR_DELETING, snapshot['status']) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_storwize_create_group_from_src_invalid(self): # Invalid input case for create group from src type_ref = volume_types.create(self.ctxt, 'testtype', None) cg_spec = {'consistent_group_snapshot_enabled': ' True'} rccg_spec = {'consistent_group_replication_enabled': ' True'} cg_type_ref = group_types.create(self.ctxt, 'cg_type', cg_spec) rccg_type_ref = group_types.create(self.ctxt, 'rccg_type', rccg_spec) vg_type_ref = group_types.create(self.ctxt, 'vg_type', None) # create group in db group = self._create_group_in_db(volume_type_ids=[type_ref.id], group_type_id=vg_type_ref.id) self.assertRaises(NotImplementedError, self.driver.create_group_from_src, self.ctxt, group, None, None, None, None, None) group = self._create_group_in_db(volume_type_ids=[type_ref.id], group_type_id=rccg_type_ref.id) vol1 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, group_id=group.id) self.assertRaises(exception.InvalidInput, self.driver.create_group_from_src, self.ctxt, group, [vol1]) hyper_specs = {'hyperswap_group_enabled': ' False'} hyper_type_ref = group_types.create(self.ctxt, 'hypergroup', hyper_specs) group = self._create_group_in_db(volume_type_ids=[type_ref.id], group_type_id=hyper_type_ref.id) vol1 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, group_id=group.id) self.assertRaises(NotImplementedError, self.driver.create_group_from_src, self.ctxt, group, [vol1]) group = self._create_group_in_db(volume_type_id=type_ref.id, group_type_id=cg_type_ref.id) # create volumes in db vol1 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, group_id=group.id) vol2 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, group_id=group.id) volumes = [vol1, vol2] source_cg = self._create_group_in_db(volume_type_ids=[type_ref.id], group_type_id=cg_type_ref.id) # Add volumes to source CG src_vol1 = self._create_volume(volume_type_id=type_ref.id, group_id=source_cg['id']) src_vol2 = self._create_volume(volume_type_id=type_ref.id, group_id=source_cg['id']) source_vols = [src_vol1, src_vol2] group_snapshot, snapshots = self._create_group_snapshot( source_cg['id'], group_type_id=cg_type_ref.id) # Create group from src with null input self.assertRaises(exception.InvalidInput, self.driver.create_group_from_src, self.ctxt, group, volumes, None, None, None, None) # Create cg from src with source_cg and empty source_vols self.assertRaises(exception.InvalidInput, self.driver.create_group_from_src, self.ctxt, group, volumes, None, None, source_cg, None) # Create cg from src with source_vols and empty source_cg self.assertRaises(exception.InvalidInput, self.driver.create_group_from_src, self.ctxt, group, volumes, None, None, None, source_vols) # Create cg from src with cgsnapshot and empty snapshots self.assertRaises(exception.InvalidInput, self.driver.create_group_from_src, self.ctxt, group, volumes, group_snapshot, None, None, None) # Create cg from src with snapshots and empty cgsnapshot self.assertRaises(exception.InvalidInput, self.driver.create_group_from_src, self.ctxt, group, volumes, None, snapshots, None, None) model_update = self.driver.delete_group(self.ctxt, group, volumes) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) model_update = self.driver.delete_group(self.ctxt, source_cg, source_vols) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) model_update = self.driver.delete_group(self.ctxt, group_snapshot, snapshots) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) @ddt.data(('5000', 'iops', True), ('500', 'iops_per_gb', False), ('3000', 'mbps', False)) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_attributes') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_vdisk') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'mkfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_pool') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_qos') @ddt.unpack def test_storwize_create_flashcopy_to_consistgrp(self, fake_iothrottling_value, fake_iothrottling_unit, empty_qos, add_vdisk_qos, _get_pool, mkfcmap, create_vdisk, get_vdisk_attributes): source = "volume-36cd5a6f-a13c-456c-8129-c3e8874fb15c" target = "volume-55eb6c7e-a13c-456c-8129-c3e8874kl34f" consistgrp = "cg_snap-9021b016-ce1e-4145-a1f0-0bd4007a3a78" fake_target_size = 0 config = self.driver.configuration pool = "openstack2" if empty_qos: opts = {'rsize': 2, 'iogrp': 0, 'qos': None, 'flashcopy_rate': 50, 'clean_rate': 50} self.driver._helpers.create_flashcopy_to_consistgrp( source, target, consistgrp, config, opts, full_copy=False, pool=pool) _get_pool.assert_not_called() add_vdisk_qos.assert_not_called() qos = {'IOThrottling': fake_iothrottling_value, 'IOThrottling_unit': fake_iothrottling_unit} opts = {'rsize': 2, 'iogrp': 0, 'qos': qos, 'flashcopy_rate': 50, 'clean_rate': 50} self.driver._helpers.create_flashcopy_to_consistgrp(source, target, consistgrp, config, opts, full_copy=False, pool=pool) add_vdisk_qos.assert_called_with(target, opts['qos'], fake_target_size) pool = None self.driver._helpers.create_flashcopy_to_consistgrp(source, target, consistgrp, config, opts, full_copy=False, pool=pool) _get_pool.assert_called_with(get_vdisk_attributes()) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_copies') def test_storwize_get_pool(self, get_vdisk_copies): vol_attrs = {'mdisk_grp_name': 'openstack', 'IO_group_id': 0, 'capacity': 1, 'name': 'vol1'} self.driver._helpers._get_pool(vol_attrs) get_vdisk_copies.assert_not_called() vol_attrs['mdisk_grp_name'] = 'many' self.driver._helpers._get_pool(vol_attrs) get_vdisk_copies.assert_called_once() @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_flashcopy_to_consistgrp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_params') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'start_fc_consistgrp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'prepare_fc_consistgrp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_fc_consistgrp') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_pool') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_attributes') def test_run_consistgrp_snapshots_forhost( self, get_vdisk_attributes, _get_pool, is_grp_a_cg_snapshot_type, delete_fc_consistgrp, prepare_fc_consistgrp, start_fc_consistgrp, get_vdisk_params, create_flashcopy_to_consistgrp): fake_opts = self._get_default_opts() get_vdisk_params.return_value = fake_opts is_grp_a_cg_snapshot_type.side_effect = [True, True, True, False, True] type_ref = volume_types.create(self.ctxt, 'testtype', None) group = testutils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[type_ref['id']]) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) group_snapshot, snapshots = self._create_group_snapshot(group.id) cgsnapshot_id = group_snapshot.id fc_consistgrp = 'cg_snap-' + cgsnapshot_id config = None state = self.driver._state timeout = 20 self.driver._helpers.run_consistgrp_snapshots(fc_consistgrp, snapshots, state, config, timeout) start_fc_consistgrp.assert_called_with(fc_consistgrp) _get_pool.assert_not_called() get_vdisk_attributes.assert_not_called() @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_flashcopy_to_consistgrp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_params') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'start_fc_consistgrp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'prepare_fc_consistgrp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_fc_consistgrp') @mock.patch('cinder.volume.volume_utils.extract_host') def test_create_cg_from_source_forhost( self, extract_host, delete_fc_consistgrp, prepare_fc_consistgrp, start_fc_consistgrp, get_vdisk_params, create_flashcopy_to_consistgrp): fake_opts = self._get_default_opts() get_vdisk_params.return_value = fake_opts extract_host.return_value = 'openstack' # Valid case for create cg from src type_ref = volume_types.create(self.ctxt, 'testtype', None) spec = {'consistent_group_snapshot_enabled': ' True'} cg_type_ref = group_types.create(self.ctxt, 'cg_type', spec) pool = _get_test_pool() # Create cg in db tgt_group = self._create_group_in_db(volume_type_ids=[type_ref.id], group_type_id=cg_type_ref.id) # Create volumes in db without hash testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, group_id=tgt_group.id, host='openstack@svc%s' % pool) testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, consistencygroup_id=tgt_group.id, host='openstack@svc%s' % pool) tgt_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), tgt_group.id) # Create source CG source_cg = self._create_group_in_db(volume_type_ids=[type_ref.id], group_type_id=cg_type_ref.id) # Add volumes to source CG self._create_volume(volume_type_id=type_ref.id, group_id=source_cg['id']) self._create_volume(volume_type_id=type_ref.id, group_id=source_cg['id']) source_vols = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), source_cg['id']) fc_consistgrp = 'cg_snap-' + source_cg.id config = None state = self.driver._state timeout = 20 # test create_cg_from_source from volume group self.driver._helpers.create_cg_from_source(tgt_group, fc_consistgrp, source_vols, tgt_volumes, state, config, timeout) start_fc_consistgrp.assert_called_with(fc_consistgrp) self.assertEqual(2, extract_host.call_count) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_storwize_group_from_src(self): # Valid case for create cg from src type_ref = volume_types.create(self.ctxt, 'testtype', None) spec = {'consistent_group_snapshot_enabled': ' True'} cg_type_ref = group_types.create(self.ctxt, 'cg_type', spec) pool = _get_test_pool() # Create cg in db group = self._create_group_in_db(volume_type_ids=[type_ref.id], group_type_id=cg_type_ref.id) # Create volumes in db testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, group_id=group.id, host='openstack@svc#%s' % pool) testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, consistencygroup_id=group.id, host='openstack@svc#%s' % pool) volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), group.id) # Create source CG source_cg = self._create_group_in_db(volume_type_ids=[type_ref.id], group_type_id=cg_type_ref.id) # Add volumes to source CG self._create_volume(volume_type_id=type_ref.id, group_id=source_cg['id']) self._create_volume(volume_type_id=type_ref.id, group_id=source_cg['id']) source_vols = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), source_cg['id']) # Create cgsnapshot group_snapshot, snapshots = self._create_group_snapshot( source_cg['id'], group_type_id=cg_type_ref.id) # Create cg from source cg model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, group, volumes, None, None, source_cg, source_vols)) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "CG create from src created failed") for each_vol in volumes_model_update: self.assertEqual('available', each_vol['status']) # Delete the Group model_update = self.driver.delete_group(self.ctxt, group, volumes) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) for each_vol in model_update[1]: self.assertEqual('deleted', each_vol['status']) with (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_rccg')) as create_rccg: # Create cg from source cg model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, group, volumes, None, None, source_cg, source_vols)) create_rccg.assert_not_called() # Delete the Group model_update = self.driver.delete_group(self.ctxt, group, volumes) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) for each_vol in model_update[1]: self.assertEqual('deleted', each_vol['status']) # Create cg from cg snapshot model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, group, volumes, group_snapshot, snapshots, None, None)) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "CG create from src created failed") for each_vol in volumes_model_update: self.assertEqual('available', each_vol['status']) model_update = self.driver.delete_group(self.ctxt, group, volumes) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) for each_vol in model_update[1]: self.assertEqual('deleted', each_vol['status']) with (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_rccg')) as create_rccg: # Create cg from cg snapshot model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, group, volumes, group_snapshot, snapshots, None, None)) create_rccg.assert_not_called() model_update = self.driver.delete_group_snapshot(self.ctxt, group_snapshot, snapshots) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) # mirror/strtch cluster volume test cases def test_storwize_svc_create_mirror_volume(self): # create mirror volume in invalid pool spec = {'mirror_pool': 'invalid_pool'} mirror_vol_type = self._create_volume_type(spec, 'invalid_mirror_type') vol = self._generate_vol_info(mirror_vol_type) self.assertRaises(exception.InvalidInput, self.driver.create_volume, vol) spec = {'mirror_pool': 'openstack1'} mirror_vol_type = self._create_volume_type(spec, 'test_mirror_type') vol = self._generate_vol_info(mirror_vol_type) self.driver.create_volume(vol) self._assert_vol_exists(vol.name, True) copies = self.driver._helpers.get_vdisk_copies(vol.name) self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') self.driver.delete_volume(vol) self._assert_vol_exists(vol['name'], False) def test_storwize_svc_snapshots_mirror_volume(self): vol1 = self._generate_vol_info(self.mirror_vol_type) self.driver.create_volume(vol1) snap1 = self._generate_snap_info(vol1.id) self._assert_vol_exists(snap1.name, False) self.driver.create_snapshot(snap1) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self._assert_vol_exists(snap1.name, True) copies = self.driver._helpers.get_vdisk_copies(snap1.name) self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') self.driver.delete_snapshot(snap1) self.driver.delete_volume(vol1) def test_storwize_svc_create_cloned_mirror_volume(self): vol1 = self._generate_vol_info(self.mirror_vol_type) self.driver.create_volume(vol1) vol2 = self._generate_vol_info(self.mirror_vol_type) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.create_cloned_volume(vol2, vol1) self._assert_vol_exists(vol2.name, True) copies = self.driver._helpers.get_vdisk_copies(vol2.name) self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') self.driver.delete_volume(vol2) self._assert_vol_exists(vol2.name, False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1.name, False) def test_storwize_svc_create_mirror_volume_from_snapshot(self): vol1 = self._generate_vol_info(self.mirror_vol_type) self.driver.create_volume(vol1) snap1 = self._generate_snap_info(vol1.id) self.driver.create_snapshot(snap1) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') vol2 = self._generate_vol_info(self.mirror_vol_type) self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2.name, True) copies = self.driver._helpers.get_vdisk_copies(vol2.name) self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_snapshot(snap1) self._assert_vol_exists(snap1['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'add_vdisk_copy') def test_storwize_svc_mirror_volume_migrate(self, add_vdisk_copy): # use migratevdisk for mirror volume migration, rather than # addvdiskcopy self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack2') host = {'host': 'openstack@svc#openstack2', 'capabilities': {'location_info': loc}} ctxt = context.get_admin_context() vol1 = self._generate_vol_info(self.mirror_vol_type) self.driver.create_volume(vol1) copies = self.driver._helpers.get_vdisk_copies(vol1.name) self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') self.driver.migrate_volume(ctxt, vol1, host) copies = self.driver._helpers.get_vdisk_copies(vol1.name) self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack2') self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') self.assertFalse(add_vdisk_copy.called) self._delete_volume(vol1) @ddt.data(({'mirror_pool': 'openstack1'}, {'mirror_pool': 'openstack1', 'compression': True}), ({'compression': False}, {'mirror_pool': 'openstack1', 'compression': True}), ({}, {'mirror_pool': 'invalidpool'})) @ddt.unpack def test_storwize_svc_retype_mirror_volume_invalid(self, old_opts, new_opts): self.driver.do_setup(self.ctxt) host = {'host': 'openstack@svc#openstack'} ctxt = context.get_admin_context() vol_type1 = self._create_volume_type(old_opts, 'old') vol_type2 = self._create_volume_type(new_opts, 'new') diff, _equal = volume_types.volume_types_diff(ctxt, vol_type1.id, vol_type2.id) vol1 = self._generate_vol_info(vol_type1) self.driver.create_volume(vol1) self.assertRaises(exception.VolumeDriverException, self.driver.retype, self.ctxt, vol1, vol_type2, diff, host) self.driver.delete_volume(vol1) @ddt.data(({'mirror_pool': 'openstack1'}, {}), ({'mirror_pool': 'openstack1'}, {'mirror_pool': ''})) @ddt.unpack def test_storwize_retype_from_mirror_to_none_mirror(self, old_opts, new_opts): self.driver.do_setup(self.ctxt) host = {'host': 'openstack@svc#openstack'} ctxt = context.get_admin_context() vol_type1 = self._create_volume_type(old_opts, 'old') vol_type2 = self._create_volume_type(new_opts, 'new') diff, _equal = volume_types.volume_types_diff(ctxt, vol_type1.id, vol_type2.id) vol1 = self._generate_vol_info(vol_type1) self.driver.create_volume(vol1) self._assert_vol_exists(vol1.name, True) copies = self.driver._helpers.lsvdiskcopy(vol1.name) self.assertEqual(len(copies), 2) self.driver.retype(self.ctxt, vol1, vol_type2, diff, host) copies = self.driver._helpers.lsvdiskcopy(vol1.name) self.assertEqual(len(copies), 1) copies = self.driver._helpers.get_vdisk_copies(vol1.name) self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') self.driver.delete_volume(vol1) @ddt.data(({}, {'mirror_pool': 'openstack1'}), ({'mirror_pool': ''}, {'mirror_pool': 'openstack1'})) @ddt.unpack def test_storwize_retype_from_none_to_mirror_volume(self, old_opts, new_opts): self.driver.do_setup(self.ctxt) host = {'host': 'openstack@svc#openstack'} ctxt = context.get_admin_context() old_opts = {} new_opts = {'mirror_pool': 'openstack1'} vol_type1 = self._create_volume_type(old_opts, 'old') vol_type2 = self._create_volume_type(new_opts, 'new') diff, _equal = volume_types.volume_types_diff(ctxt, vol_type1.id, vol_type2.id) vol1 = self._generate_vol_info(vol_type1) self.driver.create_volume(vol1) self._assert_vol_exists(vol1.name, True) copies = self.driver._helpers.lsvdiskcopy(vol1.name) self.assertEqual(len(copies), 1) self.driver.retype(self.ctxt, vol1, vol_type2, diff, host) copies = self.driver._helpers.lsvdiskcopy(vol1.name) self.assertEqual(len(copies), 2) copies = self.driver._helpers.get_vdisk_copies(vol1.name) self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') self.driver.delete_volume(vol1) @ddt.data(({'mirror_pool': 'openstack1'}, {'mirror_pool': 'openstack2'})) @ddt.unpack def test_storwize_retype_from_mirror_to_different_mirror(self, old_opts, new_opts): self.driver.do_setup(self.ctxt) host = {'host': 'openstack@svc#openstack'} ctxt = context.get_admin_context() vol_type1 = self._create_volume_type(old_opts, 'old') vol_type2 = self._create_volume_type(new_opts, 'new') diff, _equal = volume_types.volume_types_diff(ctxt, vol_type1.id, vol_type2.id) vol1 = self._generate_vol_info(vol_type1) self.driver.create_volume(vol1) self._assert_vol_exists(vol1.name, True) copies = self.driver._helpers.lsvdiskcopy(vol1.name) self.assertEqual(len(copies), 2) copies = self.driver._helpers.get_vdisk_copies(vol1.name) self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack1') self.driver.retype(self.ctxt, vol1, vol_type2, diff, host) copies = self.driver._helpers.lsvdiskcopy(vol1.name) self.assertEqual(len(copies), 2) copies = self.driver._helpers.get_vdisk_copies(vol1.name) self.assertEqual(copies['primary']['mdisk_grp_name'], 'openstack') self.assertEqual(copies['secondary']['mdisk_grp_name'], 'openstack2') self.driver.delete_volume(vol1) @ddt.data(({}, {'mirror_pool': 'openstack1'}), ({'mirror_pool': ''}, {'mirror_pool': 'openstack1'}), ({'mirror_pool': 'openstack1'}, {}), ({'mirror_pool': 'openstack1'}, {'mirror_pool': ''}), ({'mirror_pool': 'openstack1'}, {'mirror_pool': 'invalidpool'})) @ddt.unpack def test_storwize_manage_existing_mismatch_with_mirror_volume( self, opts1, opts2): self.driver.do_setup(self.ctxt) vol_type1 = self._create_volume_type(opts1, 'vol_type1') vol_type2 = self._create_volume_type(opts2, 'vol_type2') vol1 = self._generate_vol_info(vol_type1) self.driver.create_volume(vol1) vol2 = self._generate_vol_info(vol_type2) ref = {'source-name': vol1.name} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, vol2, ref) self.driver.delete_volume(vol1) def test_storwize_manage_existing_with_mirror_volume(self): self.driver.do_setup(self.ctxt) vol1 = self._generate_vol_info(self.mirror_vol_type) self.driver.create_volume(vol1) uid_of_vol1 = self._get_vdisk_uid(vol1.name) opts1 = {'mirror_pool': 'openstack1'} new_volume_type = self._create_volume_type(opts1, 'new_mirror_type') new_volume = self._generate_vol_info(new_volume_type) ref = {'source-name': vol1.name} self.driver.manage_existing(new_volume, ref) # Check the uid of the volume which has been renamed. uid_of_new_vol = self._get_vdisk_uid(new_volume.name) self.assertEqual(uid_of_vol1, uid_of_new_vol) self.driver.delete_volume(new_volume) def _create_volume_type_qos(self, extra_specs, fake_qos): # Generate a QoS volume type for volume. if extra_specs: spec = fake_qos type_ref = volume_types.create(self.ctxt, "qos_extra_specs", spec) else: type_ref = volume_types.create(self.ctxt, "qos_associate", None) if fake_qos: qos_ref = qos_specs.create(self.ctxt, 'qos-specs', fake_qos) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return qos_type def _create_volume_type_qos_both(self, fake_qos, fake_qos_associate): type_ref = volume_types.create(self.ctxt, "qos_extra_specs", fake_qos) qos_ref = qos_specs.create(self.ctxt, 'qos-specs', fake_qos_associate) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return qos_type def _create_replication_volume_type(self, enable): # Generate a volume type for volume repliation. if enable: spec = {'capabilities:replication': ' True'} type_ref = volume_types.create(self.ctxt, "replication_1", spec) else: spec = {'capabilities:replication': ' False'} type_ref = volume_types.create(self.ctxt, "replication_2", spec) replication_type = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) return replication_type def _create_consistency_group_volume_type(self): # Generate a volume type for volume consistencygroup. spec = {'capabilities:consistencygroup_support': ' True'} type_ref = volume_types.create(self.ctxt, "cg", spec) cg_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return cg_type def _get_vdisk_uid(self, vdisk_name): """Return vdisk_UID for given vdisk. Given a vdisk by name, performs an lvdisk command that extracts the vdisk_UID parameter and returns it. Returns None if the specified vdisk does not exist. """ vdisk_properties, _err = self.sim._cmd_lsvdisk(obj=vdisk_name, delim='!') # Iterate through each row until we find the vdisk_UID entry for row in vdisk_properties.split('\n'): words = row.split('!') if words[0] == 'vdisk_UID': return words[1] return None def _create_volume_and_return_uid(self, volume_name): """Creates a volume and returns its UID. Creates a volume with the specified name, and returns the UID that the Storwize controller allocated for it. We do this by executing a create_volume and then calling into the simulator to perform an lsvdisk directly. """ volume = self._generate_vol_info() self.driver.create_volume(volume) return (volume, self._get_vdisk_uid(volume['name'])) def test_manage_existing_get_size_bad_ref(self): """Error on manage with bad reference. This test case attempts to manage an existing volume but passes in a bad reference that the Storwize driver doesn't understand. We expect an exception to be raised. """ volume = self._generate_vol_info() ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) def test_manage_existing_get_size_bad_uid(self): """Error when the specified UUID does not exist.""" volume = self._generate_vol_info() ref = {'source-id': 'bad_uid'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) pass def test_manage_existing_get_size_bad_name(self): """Error when the specified name does not exist.""" volume = self._generate_vol_info() ref = {'source-name': 'bad_name'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) def test_manage_existing_bad_ref(self): """Error on manage with bad reference. This test case attempts to manage an existing volume but passes in a bad reference that the Storwize driver doesn't understand. We expect an exception to be raised. """ # Error when neither UUID nor name are specified. volume = self._generate_vol_info() ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, ref) # Error when the specified UUID does not exist. volume = self._generate_vol_info() ref = {'source-id': 'bad_uid'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, ref) # Error when the specified name does not exist. volume = self._generate_vol_info() ref = {'source-name': 'bad_name'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, ref) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_copy_attrs') def test_manage_existing_mismatch(self, get_vdisk_copy_attrs): ctxt = testutils.get_test_admin_context() _volume, uid = self._create_volume_and_return_uid('manage_test') opts = {'rsize': -1} type_thick_ref = volume_types.create(ctxt, 'testtype1', opts) opts = {'rsize': 2} type_thin_ref = volume_types.create(ctxt, 'testtype2', opts) opts = {'rsize': 2, 'compression': True} type_comp_ref = volume_types.create(ctxt, 'testtype3', opts) opts = {'rsize': -1, 'iogrp': 1} type_iogrp_ref = volume_types.create(ctxt, 'testtype4', opts) new_volume = self._generate_vol_info() ref = {'source-name': _volume['name']} fake_copy_thin = self._get_default_opts() fake_copy_thin['autoexpand'] = 'on' fake_copy_comp = self._get_default_opts() fake_copy_comp['autoexpand'] = 'on' fake_copy_comp['compressed_copy'] = 'yes' fake_copy_thick = self._get_default_opts() fake_copy_thick['autoexpand'] = '' fake_copy_thick['compressed_copy'] = 'no' fake_copy_no_comp = self._get_default_opts() fake_copy_no_comp['compressed_copy'] = 'no' valid_iogrp = self.driver._state['available_iogrps'] self.driver._state['available_iogrps'] = [9999] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) self.driver._state['available_iogrps'] = valid_iogrp get_vdisk_copy_attrs.side_effect = [fake_copy_thin, fake_copy_thick, fake_copy_no_comp, fake_copy_comp, fake_copy_thick, fake_copy_thick ] new_volume['volume_type_id'] = type_thick_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_thin_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_comp_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_thin_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_iogrp_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_thick_ref['id'] no_exist_pool = 'i-dont-exist-%s' % random.randint(10000, 99999) new_volume['host'] = 'openstack@svc#%s' % no_exist_pool self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) self._reset_flags() volume_types.destroy(ctxt, type_thick_ref['id']) volume_types.destroy(ctxt, type_comp_ref['id']) volume_types.destroy(ctxt, type_iogrp_ref['id']) def test_manage_existing_good_uid_not_mapped(self): """Tests managing a volume with no mappings. This test case attempts to manage an existing volume by UID, and we expect it to succeed. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UID of that vdisk. _volume, uid = self._create_volume_and_return_uid('manage_test') # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info() # Submit the request to manage it. ref = {'source-id': uid} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) def test_manage_existing_good_name_not_mapped(self): """Tests managing a volume with no mappings. This test case attempts to manage an existing volume by name, and we expect it to succeed. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UID of that vdisk. _volume, uid = self._create_volume_and_return_uid('manage_test') # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info() # Submit the request to manage it. ref = {'source-name': _volume['name']} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) def test_manage_existing_mapped(self): """Tests managing a mapped volume with no override. This test case attempts to manage an existing volume by UID, but the volume is mapped to a host, so we expect to see an exception raised. """ # Create a volume as a way of getting a vdisk created, and find out the # UUID of that vdisk. # Set replication target. volume, uid = self._create_volume_and_return_uid('manage_test') # Map a host to the disk conn = {'initiator': u'unicode:initiator3', 'ip': '10.10.10.12', 'host': u'unicode.foo}.bar}.baz'} self.driver.initialize_connection(volume, conn) # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. volume = self._generate_vol_info() ref = {'source-id': uid} # Attempt to manage this disk, and except an exception beause the # volume is already mapped. self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) ref = {'source-name': volume['name']} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) def test_manage_existing_good_uid_mapped_with_override(self): """Tests managing a mapped volume with override. This test case attempts to manage an existing volume by UID, when it already mapped to a host, but the ref specifies that this is OK. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UUID of that vdisk. volume, uid = self._create_volume_and_return_uid('manage_test') # Map a host to the disk conn = {'initiator': u'unicode:initiator3', 'ip': '10.10.10.12', 'host': u'unicode.foo}.bar}.baz'} self.driver.initialize_connection(volume, conn) # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info() # Submit the request to manage it, specifying that it is OK to # manage a volume that is already attached. ref = {'source-id': uid, 'manage_if_in_use': True} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) def test_manage_existing_good_name_mapped_with_override(self): """Tests managing a mapped volume with override. This test case attempts to manage an existing volume by name, when it already mapped to a host, but the ref specifies that this is OK. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UUID of that vdisk. volume, uid = self._create_volume_and_return_uid('manage_test') # Map a host to the disk conn = {'initiator': u'unicode:initiator3', 'ip': '10.10.10.12', 'host': u'unicode.foo}.bar}.baz'} self.driver.initialize_connection(volume, conn) # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info() # Submit the request to manage it, specifying that it is OK to # manage a volume that is already attached. ref = {'source-name': volume['name'], 'manage_if_in_use': True} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'mkfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_prepare_fc_map') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'startfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'stop_relationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'start_relationship') def test_revert_to_snapshot(self, start_relationship, stop_relationship, startfcmap, prepare_fc_map, mkfcmap): mkfcmap.side_effect = ['1'] vol1 = self._generate_vol_info() snap1 = self._generate_snap_info(vol1.id) vol1.size = '11' self.assertRaises(exception.InvalidInput, self.driver.revert_to_snapshot, self.ctxt, vol1, snap1) vol2 = self._generate_vol_info() snap2 = self._generate_snap_info(vol2.id) with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_volume_replicated_type') as vol_rep_type: vol_rep_type.side_effect = [False] self.driver.revert_to_snapshot(self.ctxt, vol2, snap2) mkfcmap.assert_called_once_with(snap2.name, vol2.name, True, self.driver.configuration. storwize_svc_flashcopy_rate, self.driver.configuration. storwize_svc_clean_rate) prepare_fc_map.assert_called_once_with( '1', self.driver.configuration.storwize_svc_flashcopy_timeout, True) startfcmap.assert_called_once_with('1', True) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'mkfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_prepare_fc_map') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'startfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'start_relationship') def test_revert_to_snapshot_replication_type(self, start_relationship, startfcmap, prepare_fc_map, mkfcmap): vol1 = self._generate_vol_info() snap1 = self._generate_snap_info(vol1.id) vol1.size = '11' self.assertRaises(exception.InvalidInput, self.driver.revert_to_snapshot, self.ctxt, vol1, snap1) vol2 = self._generate_vol_info() snap2 = self._generate_snap_info(vol2.id) with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_volume_replicated_type') as vol_rep_type: vol_rep_type.side_effect = [True] self.assertRaises(exception.VolumeBackendAPIException, self.driver.revert_to_snapshot, self.ctxt, vol2, snap2) mkfcmap.assert_not_called() prepare_fc_map.assert_not_called() startfcmap.assert_not_called() with mock.patch.object( storwize_svc_common.StorwizeHelpers, 'get_rccg_name_by_volume_name') as rccg_name: vol_rep_type.side_effect = [True] rccg_name.side_effect = ["fake_rccg-1"] self.assertRaises(exception.VolumeBackendAPIException, self.driver.revert_to_snapshot, self.ctxt, vol2, snap2) mkfcmap.assert_not_called() prepare_fc_map.assert_not_called() startfcmap.assert_not_called() def test_storwize_create_volume_with_group_id(self): """Tests creating volume with gorup_id.""" type_ref = volume_types.create(self.ctxt, 'testtype', None) cg_spec = {'consistent_group_snapshot_enabled': ' True'} rccg_spec = {'consistent_group_replication_enabled': ' True'} cg_type_ref = group_types.create(self.ctxt, 'cg_type_1', cg_spec) rccg_type_ref = group_types.create(self.ctxt, 'rccg_type_2', rccg_spec) group1 = self._create_group_in_db(volume_type_ids=[type_ref.id], group_type_id=rccg_type_ref.id) group2 = self._create_group_in_db(volume_type_ids=[type_ref.id], group_type_id=cg_type_ref.id) # Create volume with replication group id will be success vol1 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, host='openstack@svc#openstack', group_id=group1.id) self.driver.create_volume(vol1) # Create volume with cg_snapshot group id will success. vol2 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, host='openstack@svc#openstack', group_id=group2.id) self.driver.create_volume(vol2) # Create cloned volume with replication group id will be success vol3 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, host='openstack@svc#openstack', group_id=group1.id, source_volid=vol2.id) self.driver.create_cloned_volume(vol3, vol2) # Create cloned volume with cg_snapshot group id will be success. vol4 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, group_id=group2.id, host='openstack@svc#openstack', source_volid=vol2.id) self.driver.create_cloned_volume(vol4, vol2) snapshot = self._generate_snap_info(vol2.id) self.driver.create_snapshot(snapshot) # Create volume from snapshot with replication group id will be success vol5 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, host='openstack@svc#openstack', group_id=group1.id, snapshot_id=snapshot.id) self.driver.create_volume_from_snapshot(vol5, snapshot) # Create volume from snapshot with cg_snapshot group id will success. vol6 = testutils.create_volume(self.ctxt, volume_type_id=type_ref.id, group_id=group2.id, host='openstack@svc#openstack', snapshot_id=snapshot.id) self.driver.create_volume_from_snapshot(vol6, snapshot) @ddt.data({'pool': 'openstack2', 'peer_pool': 'openstack3'}, {'pool': 'openstack', 'peer_pool': None}, {'pool': None, 'peer_pool': 'openstack1'}) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsmdiskgrp') def test_storwize_svc_get_hyperswap_pool_io_grp(self, pools, lsmdiskgrp): lsmdiskgrp.side_effect = [{'site_id': '1'}, {'site_id': '2'}] if pools['pool'] and pools['peer_pool']: iogrp_list = self.driver._helpers.get_hyperswap_pool_io_grp( self.driver._state, pools['pool'], pools['peer_pool']) lsmdiskgrp.assert_called() self.assertEqual(2, lsmdiskgrp.call_count) self.assertEqual(['0', '1'], iogrp_list) else: self.assertRaises(exception.InvalidInput, self.driver._helpers.get_hyperswap_pool_io_grp, self.driver._state, pools['pool'], pools['peer_pool']) lsmdiskgrp.assert_not_called() self.assertEqual(0, lsmdiskgrp.call_count) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsmdiskgrp') def test_storwize_svc_select_iogrp_with_pool_site(self, lsmdiskgrp): opts = {} state = self.driver._state lsmdiskgrp.side_effect = [{'site_id': ''}, {'site_id': '1'}, {'site_id': '2'}, {'site_id': '2'}] state['storage_nodes']['1']['site_id'] = '1' state['storage_nodes']['1']['IO_group'] = '0' state['storage_nodes']['2']['site_id'] = '1' state['storage_nodes']['2']['IO_group'] = '1' pool = 'openstack2' opts['iogrp'] = '0,1' opts['volume_topology'] = 'hyperswap' state['available_iogrps'] = [0, 1, 2, 3] iog = self.driver._helpers.select_io_group(state, opts, pool) self.assertEqual(0, iog) pool = 'openstack2' opts['iogrp'] = '0,1' state['available_iogrps'] = [0, 1, 2, 3] iog = self.driver._helpers.select_io_group(state, opts, pool) self.assertEqual(0, iog) pool = 'openstack3' opts['iogrp'] = '0,1' state['available_iogrps'] = [0, 1, 2, 3] iog = self.driver._helpers.select_io_group(state, opts, pool) self.assertEqual(0, iog) state['storage_nodes']['2']['site_id'] = '2' pool = 'openstack2' opts['iogrp'] = '0,1' state['available_iogrps'] = [0, 1, 2, 3] iog = self.driver._helpers.select_io_group(state, opts, pool) self.assertEqual(1, iog) # test hyperswap volume def test_create_hyperswap_volume(self): # create hyperswap volume on code_level less than 7.7.0.0 spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'openstack1'} invalid_release_type = self._create_volume_type( spec, 'invalid_release_type') vol = self._generate_vol_info(invalid_release_type) self.assertRaises(exception.InvalidInput, self.driver.create_volume, vol) # create hyperswap on svc topology not 'hyperswap' with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'standard', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'openstack1'} invalid_topo_type = self._create_volume_type( spec, 'invalid_topo_type') vol = self._generate_vol_info(invalid_topo_type) self.assertRaises(exception.InvalidInput, self.driver.create_volume, vol) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) # create hyperswap volume vith invalid pool spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'invalid_pool'} invalid_pool_type = self._create_volume_type(spec, 'invalid_pool_type') vol = self._generate_vol_info(invalid_pool_type) self.assertRaises(exception.InvalidInput, self.driver.create_volume, vol) # create hyperswap volume vith easytier off spec = {'drivers:volume_topology': 'hyperswap', 'drivers:easytier': False} easytier_type = self._create_volume_type(spec, 'easytier_type') vol = self._generate_vol_info(easytier_type) self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, vol) # create hyperswap volume without peer_pool spec = {'drivers:volume_topology': 'hyperswap'} no_peerpool_type = self._create_volume_type(spec, 'no_peerpool_type') vol = self._generate_vol_info(no_peerpool_type) self.assertRaises(exception.InvalidInput, self.driver.create_volume, vol) # Create hyperswap volume, there is no site_id on peer_pool spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'openstack'} same_pool_type = self._create_volume_type(spec, 'same_pool_type') vol = self._generate_vol_info(same_pool_type) self.assertRaises(exception.InvalidInput, self.driver.create_volume, vol) # Create hyperswap volume, pool and peer pool are on the same site spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'hyperswap1'} same_site_type = self._create_volume_type(spec, 'same_site_type') vol = testutils.create_volume(self.ctxt, host='openstack@svc#hyperswap1', volume_type_id=same_site_type.id) self.assertRaises(exception.InvalidInput, self.driver.create_volume, vol) # create hyperswap volume with strech cluster spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'openstack1', 'mirror_pool': 'openstack1'} invalid_vol_type = self._create_volume_type(spec, 'invalid_hyperswap_type') vol = self._generate_vol_info(invalid_vol_type) self.assertRaises(exception.InvalidInput, self.driver.create_volume, vol) # create hyperswap volume with replication spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'openstack1', 'replication_enabled': ' True', 'replication_type': ' metro'} invalid_vol_type = self._create_volume_type(spec, 'invalid_hyperswap_type_2') vol = self._generate_vol_info(invalid_vol_type) self.assertRaises(exception.InvalidInput, self.driver.create_volume, vol) hyper_type = self._create_hyperswap_type('test_hyperswap_type') vol = self._create_hyperswap_volume(hyper_type) self._assert_vol_exists(vol.name, True) self._assert_vol_exists('site2' + vol.name, True) self._assert_vol_exists('fcsite1' + vol.name, True) self._assert_vol_exists('fcsite2' + vol.name, True) self.driver.delete_volume(vol) self._assert_vol_exists(vol.name, False) self._assert_vol_exists('site2' + vol.name, False) self._assert_vol_exists('fcsite1' + vol.name, False) self._assert_vol_exists('fcsite2' + vol.name, False) # Validate that _update_replication_properties is being called on # Hyperswap volume creation with mock.patch.object( storwize_svc_common.StorwizeSVCCommonDriver, '_update_replication_properties') as update_rep_properties: vol = self._create_hyperswap_volume(hyper_type) self.assertEqual(fields.VolumeStatus.AVAILABLE, vol['status']) self.assertTrue(update_rep_properties.called) self.driver.delete_volume(vol) # Validate that _update_replication_properties is handling # the exception from get_relationship_info call on Hyperswap volume # creation by raising an exception with mock.patch.object( storwize_svc_common.StorwizeHelpers, 'get_relationship_info') as get_relationship_info: get_relationship_info.side_effect = [ exception.VolumeBackendAPIException] volume = self._generate_hyperswap_vol_info(hyper_type) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) self.assertTrue(get_relationship_info.called) self.assertEqual(3, get_relationship_info.call_count) self.driver.delete_volume(volume) def test_create_snapshot_to_hyperswap_volume(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) hyper_type = self._create_hyperswap_type('test_hyperswap_type') vol = self._create_hyperswap_volume(hyper_type) self._assert_vol_exists(vol.name, True) snap = testutils.create_snapshot(self.ctxt, vol.id) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.sim.error_injection('startfcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap) self._assert_vol_exists(snap['name'], False) self.sim.error_injection('prestartfcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap) self._assert_vol_exists(snap['name'], False) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.driver.delete_volume(vol) self._assert_vol_exists(vol.name, False) self.driver.delete_snapshot(snap) self._assert_vol_exists(snap['name'], False) def test_create_hyperswap_volume_from_snapshot(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) hyper_type = self._create_hyperswap_type('test_hyperswap_type') vol = self._create_hyperswap_volume(hyper_type) self._assert_vol_exists(vol.name, True) snap = testutils.create_snapshot(self.ctxt, vol.id) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) vol1 = testutils.create_volume(self.ctxt, host='openstack@svc#hyperswap1', volume_type_id=hyper_type.id) self.driver.create_volume_from_snapshot(vol1, snap) self._assert_vol_exists(vol1.name, True) self._assert_vol_exists('site2' + vol1.name, True) self._assert_vol_exists('fcsite1' + vol1.name, True) self._assert_vol_exists('fcsite2' + vol1.name, True) vol2 = testutils.create_volume(self.ctxt, host='openstack@svc#hyperswap1', volume_type_id=hyper_type.id) with (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'convert_volume_to_hyperswap')) as convert_volume_to_hyperswap, \ (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'ensure_vdisk_no_fc_mappings')) as ensure_vdisk_no_fc_mappings: self.driver.create_volume_from_snapshot(vol2, snap) ensure_vdisk_no_fc_mappings.assert_called() convert_volume_to_hyperswap.assert_called() self.assertEqual(1, convert_volume_to_hyperswap.call_count) self.assertEqual(1, ensure_vdisk_no_fc_mappings.call_count) self._assert_vol_exists(vol2.name, True) self.driver.delete_volume(vol) self._assert_vol_exists(vol.name, False) self._assert_vol_exists('site2' + vol.name, False) self._assert_vol_exists('fcsite1' + vol.name, False) self._assert_vol_exists('fcsite2' + vol.name, False) self.driver.delete_snapshot(snap) self._assert_vol_exists(snap['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1.name, False) def test_create_cloned_hyperswap_volume(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) hyper_type = self._create_hyperswap_type('test_hyperswap_type') vol = self._create_hyperswap_volume(hyper_type) self._assert_vol_exists(vol.name, True) vol2 = testutils.create_volume(self.ctxt, host='openstack@svc#hyperswap1', volume_type_id=vol.volume_type_id) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_attributes') as vdisk_attr: vdisk_attr.return_value = None self.assertRaises(exception.VolumeDriverException, self.driver.create_cloned_volume, vol2, vol) self.driver.create_cloned_volume(vol2, vol) self._assert_vol_exists(vol2.name, True) self._assert_vol_exists('site2' + vol2.name, True) self._assert_vol_exists('fcsite1' + vol2.name, True) self._assert_vol_exists('fcsite2' + vol2.name, True) self.driver.delete_volume(vol) self._assert_vol_exists(vol.name, False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2.name, False) def test_extend_hyperswap_volume(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) vol_type_ref = self._create_hyperswap_type('test_hyperswap_type') vol = self._create_hyperswap_volume(vol_type_ref) self._assert_vol_exists(vol.name, True) self.driver.extend_volume(vol, '13') attrs = self.driver._helpers.get_vdisk_attributes(vol['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) self.driver.delete_volume(vol) # Extend hyperswap volume that added to group. group_specs = {'hyperswap_group_enabled': ' True'} group_type_ref = group_types.create(self.ctxt, 'testgroup', group_specs) hyper_group = testutils.create_group( self.ctxt, name='hypergroup', group_type_id=group_type_ref['id'], volume_type_ids=[vol_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, hyper_group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) vol = self._create_hyperswap_volume(vol_type_ref) self.db.volume_update(context.get_admin_context(), vol['id'], {'group_id': hyper_group.id}) add_volumes = [vol] del_volumes = [] (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, hyper_group, add_volumes, del_volumes) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual([{'id': vol.id, 'group_id': hyper_group.id}], add_volumes_update) self.assertEqual([], remove_volumes_update) self.driver.extend_volume(vol, '15') attrs = self.driver._helpers.get_vdisk_attributes(vol['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 15) self.driver.delete_volume(vol) # Extend hyperswap volume with thick_provisioning_support. spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'hyperswap2', 'drivers:rsize': -1} hs_thick_type = volume_types.create( self.ctxt, 'test_hyperswap_thick_type', spec) hs_vol = self._create_hyperswap_volume(hs_thick_type) self._assert_vol_exists(hs_vol.name, True) if self.USESIM: # tell expandvdisksize to fail while called extend_volume # because volume is fast formatting self.sim.error_injection('expandvdisksize', 'fast_formatting') self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, hs_vol, 15) attrs = self.driver._helpers.get_vdisk_attributes(hs_vol['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 1) self.driver.delete_volume(hs_vol) def test_migrate_hyperswap_volume(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) hyper_type = self._create_hyperswap_type('test_hyperswap_type') vol = self._create_hyperswap_volume(hyper_type) self._assert_vol_exists(vol.name, True) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack2') cap = {'location_info': loc, 'extent_size': '256'} host = {'host': 'openstack@svc#openstack2', 'capabilities': cap} ctxt = context.get_admin_context() self.assertRaises(exception.InvalidInput, self.driver.migrate_volume, ctxt, vol, host) self._delete_volume(vol) def test_manage_existing_hyperswap_volume(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) hyperswap_vol_type = self._create_hyperswap_type('test_hyperswap_type') hyper_volume = self._create_hyperswap_volume(hyperswap_vol_type) self._assert_vol_exists(hyper_volume.name, True) spec1 = {} non_hyper_type = self._create_volume_type(spec1, 'non_hyper_type') non_hyper_volume = self._create_volume() # test volume is hyperswap volume but volume type is non-hyper type new_volume = self._generate_vol_info() ref = {'source-name': hyper_volume['name']} new_volume['volume_type_id'] = non_hyper_type['id'] new_volume['volume_type'] = non_hyper_type self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) # test volume is non hyperswap volume but volum type is hyper type ref = {'source-name': non_hyper_volume['name']} new_volume['volume_type_id'] = hyperswap_vol_type['id'] new_volume['volume_type'] = hyperswap_vol_type self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) # Test hyperswap volume peer_pool and backend peer_pool does not match new_volume = testutils.create_volume(self.ctxt, host='openstack@svc#hyperswap1') spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'hyperswap1'} hyper_type_2 = self._create_volume_type(spec, 'hyper_type_2') ref = {'source-name': hyper_volume['name']} new_volume['volume_type_id'] = hyper_type_2['id'] new_volume['volume_type'] = hyper_type_2 self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) # test volume type match uid_of_master = self._get_vdisk_uid(hyper_volume.name) new_volume = testutils.create_volume(self.ctxt, host='openstack@svc#hyperswap1') ref = {'source-name': hyper_volume['name']} new_volume['volume_type_id'] = hyperswap_vol_type['id'] new_volume['volume_type'] = hyperswap_vol_type self.driver.manage_existing(new_volume, ref) # Check the uid of the volume which has been renamed. uid_of_master_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid_of_master, uid_of_master_volume) self.driver.delete_volume(hyper_volume) def test_retype_hyperswap_volume(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) hyperswap_vol_type = self._create_hyperswap_type('test_hyperswap_type') spec1 = {'drivers:iogrp': '0,1'} non_hyper_type = self._create_volume_type(spec1, 'non_hyper_type') volume = testutils.create_volume(self.ctxt, volume_type_id=non_hyper_type.id, host='openstack@svc#hyperswap1') self.driver.create_volume(volume) host = {'host': 'openstack@svc#hyperswap1'} # Retype from non hyperswap volume type to # hyperswap volume type without peer_pool spec = {'drivers:volume_topology': 'hyperswap'} hyper_type_no_peer = self._create_volume_type(spec, 'hypertypenopeer') diff, _equal = volume_types.volume_types_diff( self.ctxt, non_hyper_type['id'], hyper_type_no_peer['id']) self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, volume, hyper_type_no_peer, diff, host) spec = {'drivers:volume_topology': 'hyperswap', 'drivers:easytier': False} easytier_type = self._create_volume_type(spec, 'easytier_type') diff, _equal = volume_types.volume_types_diff( self.ctxt, non_hyper_type['id'], easytier_type['id']) self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, volume, easytier_type, diff, host) # retype from normal volume with snapshot to hyperswap volume snap = testutils.create_snapshot(self.ctxt, volume.id) self.driver.create_snapshot(snap) diff, _equal = volume_types.volume_types_diff( self.ctxt, non_hyper_type['id'], hyperswap_vol_type['id']) self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, volume, hyperswap_vol_type, diff, host) self.driver.delete_snapshot(snap) # Retype from non-hyperswap volume to hyperswap volume diff, _equal = volume_types.volume_types_diff( self.ctxt, non_hyper_type['id'], hyperswap_vol_type['id']) self.driver.retype( self.ctxt, volume, hyperswap_vol_type, diff, host) volume['volume_type_id'] = hyperswap_vol_type['id'] volume['volume_type'] = hyperswap_vol_type self._assert_vol_exists(volume.name, True) self._assert_vol_exists('site2' + volume.name, True) self._assert_vol_exists('fcsite1' + volume.name, True) self._assert_vol_exists('fcsite2' + volume.name, True) # Retype from hyperswap volume to non hyperswap volume---move site2 diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], non_hyper_type['id']) self.driver.retype( self.ctxt, volume, non_hyper_type, diff, host) volume['volume_type_id'] = non_hyper_type['id'] volume['volume_type'] = non_hyper_type self.driver.delete_volume(volume) # Retype from hyperswap volume to non hyperswap volume---move site1 host2 = {'host': 'openstack@svc#hyperswap2'} volume = self._create_hyperswap_volume(hyperswap_vol_type) diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], non_hyper_type['id']) self.driver.retype( self.ctxt, volume, non_hyper_type, diff, host2) volume['volume_type_id'] = non_hyper_type['id'] volume['volume_type'] = non_hyper_type self.driver.delete_volume(volume) # Retype a hyperswap volume to hyperswap volume with keys change spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'hyperswap2', 'drivers:warning': '50'} warning_type = self._create_volume_type(spec, 'warning_type') volume = self._create_hyperswap_volume(hyperswap_vol_type) diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], warning_type['id']) self.driver.retype(self.ctxt, volume, warning_type, diff, host) def test_storwize_update_replication_properties_on_retype_hyperswap_volume( self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) hyperswap_vol_type = self._create_hyperswap_type('test_hyperswap_type') spec1 = {'drivers:iogrp': '0,1'} non_hyper_type = self._create_volume_type(spec1, 'non_hyper_type') volume1 = testutils.create_volume(self.ctxt, volume_type_id=non_hyper_type.id, host='openstack@svc#hyperswap1') self.driver.create_volume(volume1) volume2 = testutils.create_volume(self.ctxt, volume_type_id=non_hyper_type.id, host='openstack@svc#hyperswap1') self.driver.create_volume(volume2) volume3 = self._create_hyperswap_volume(hyperswap_vol_type) host = {'host': 'openstack@svc#hyperswap1'} # Validate that _update_replication_properties is handling # the exception from get_relationship_info call while retyping a # normal volume to Hyperswap volume by raising an exception with mock.patch.object( storwize_svc_common.StorwizeHelpers, 'get_relationship_info') as get_relationship_info: get_relationship_info.side_effect = [ exception.VolumeBackendAPIException] diff, _equal = volume_types.volume_types_diff( self.ctxt, non_hyper_type['id'], hyperswap_vol_type['id']) self.assertRaises(exception.VolumeBackendAPIException, self.driver.retype, self.ctxt, volume1, hyperswap_vol_type, diff, host) self.assertTrue(get_relationship_info.called) self.assertEqual(3, get_relationship_info.call_count) volume1['volume_type_id'] = hyperswap_vol_type['id'] volume1['volume_type'] = hyperswap_vol_type self.driver.delete_volume(volume1) # Validate that _update_replication_properties is being called while # retyping a normal volume to Hyperswap volume with mock.patch.object( storwize_svc_common.StorwizeSVCCommonDriver, '_update_replication_properties') as update_rep_properties: diff, _equal = volume_types.volume_types_diff( self.ctxt, non_hyper_type['id'], hyperswap_vol_type['id']) self.driver.retype( self.ctxt, volume2, hyperswap_vol_type, diff, host) volume2['volume_type_id'] = hyperswap_vol_type['id'] volume2['volume_type'] = hyperswap_vol_type self._assert_vol_exists(volume2.name, True) self._assert_vol_exists('site2' + volume2.name, True) self._assert_vol_exists('fcsite1' + volume2.name, True) self._assert_vol_exists('fcsite2' + volume2.name, True) self.assertTrue(update_rep_properties.called) # Validate that _update_replication_properties is being called while # retyping a Hyperswap volume to normal volume with mock.patch.object( storwize_svc_common.StorwizeSVCCommonDriver, '_update_replication_properties') as update_rep_properties: diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], non_hyper_type['id']) self.driver.retype( self.ctxt, volume2, non_hyper_type, diff, host) volume2['volume_type_id'] = non_hyper_type['id'] volume2['volume_type'] = non_hyper_type self.assertTrue(update_rep_properties.called) self.driver.delete_volume(volume2) # Validate that _update_replication_properties is handling # the exception from get_relationship_info call while retyping a # Hyperswap volume to normal volume by raising an exception with mock.patch.object( storwize_svc_common.StorwizeHelpers, 'get_relationship_info') as get_relationship_info: get_relationship_info.side_effect = [ exception.VolumeBackendAPIException] diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], non_hyper_type['id']) self.assertRaises(exception.VolumeBackendAPIException, self.driver.retype, self.ctxt, volume3, non_hyper_type, diff, host) self.assertTrue(get_relationship_info.called) self.assertEqual(3, get_relationship_info.call_count) volume3['volume_type_id'] = non_hyper_type['id'] volume3['volume_type'] = non_hyper_type self.driver.delete_volume(volume3) @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_replication_properties') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_hyperswap_pool_io_grp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') def test_retype_hyperswap_inuse_volume_fc(self, get_system_info, get_hyperswap_pool_io_grp, update_rep_prop): get_system_info.return_value = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} self.fcdriver.do_setup(None) spec1 = {'drivers:iogrp': '0,1'} non_hyper_type = self._create_volume_type(spec1, 'non_hyper_type') volume = testutils.create_volume(self.ctxt, volume_type_id=non_hyper_type.id, host='openstack@svc#hyperswap1') self.fcdriver.create_volume(volume) volume.previous_status = 'in-use' host = {'host': 'openstack@svc#hyperswap1'} spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'hyperswap2'} hyper_type = self._create_volume_type(spec, 'hypertype') diff, _equal = volume_types.volume_types_diff(self.ctxt, non_hyper_type['id'], hyper_type['id']) self.fcdriver.retype(self.ctxt, volume, hyper_type, diff, host) self._assert_vol_exists(volume.name, True) self._assert_vol_exists('site2' + volume.name, True) self._assert_vol_exists('fcsite1' + volume.name, True) self._assert_vol_exists('fcsite2' + volume.name, True) get_hyperswap_pool_io_grp.assert_called() @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_hyperswap_pool_io_grp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') def test_retype_hyperswap_inuse_volume_iscsi(self, get_system_info, get_hyperswap_pool_io_grp): get_system_info.return_value = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} self.driver.do_setup(None) spec1 = {'drivers:iogrp': '0,1'} non_hyper_type = self._create_volume_type(spec1, 'non_hyper_type') volume = testutils.create_volume(self.ctxt, volume_type_id=non_hyper_type.id, host='openstack@svc#hyperswap1') self.driver.create_volume(volume) volume.previous_status = 'in-use' host = {'host': 'openstack@svc#hyperswap1'} spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'hyperswap2'} hyper_type = self._create_volume_type(spec, 'hypertype') diff, _equal = volume_types.volume_types_diff(self.ctxt, non_hyper_type['id'], hyper_type['id']) self.driver.retype(self.ctxt, volume, hyper_type, diff, host) self._assert_vol_exists(volume.name, True) self._assert_vol_exists('site2' + volume.name, True) self._assert_vol_exists('fcsite1' + volume.name, True) self._assert_vol_exists('fcsite2' + volume.name, True) get_hyperswap_pool_io_grp.assert_not_called() def test_retype_hyperswap_volume_failure_case(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) hyperswap_vol_type = self._create_hyperswap_type('test_hyperswap_type') host = {'host': 'openstack@svc#hyperswap1'} # Retype a hyperswap volume to hyperswap volume with peer_pool changes spec = {'drivers:volume_topology': 'hyperswap'} peer_type = self._create_volume_type(spec, 'peer_type') volume = self._create_hyperswap_volume(hyperswap_vol_type) self._assert_vol_exists(volume.name, True) diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], peer_type['id']) self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, volume, peer_type, diff, host) # Retype a hyperswap volume to hyperswap volume with iogrp changes spec = {'drivers:volume_topology': 'hyperswap', 'drivers:iogrp': '1'} hyperswap_vol_type_2 = self._create_volume_type(spec, 'hyperswap_type_2') with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'select_io_group') as select_io_group: select_io_group.return_value = {1} diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], hyperswap_vol_type_2['id']) self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, volume, hyperswap_vol_type_2, diff, host) host2 = {'host': 'openstack@svc#hyperswap2'} # Retype a hyperswap volume to hyperswap volume with pool change spec = {'drivers:volume_topology': 'hyperswap', 'drivers:iogrp': '0,1'} hyperswap_type_3 = self._create_volume_type(spec, 'hyperswap_type_3') diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], hyperswap_type_3['id']) self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, volume, hyperswap_type_3, diff, host2) # Retype a hyperswap volume in-use inuse_type = self._create_hyperswap_type('in-use_type') volume.previous_status = 'in-use' diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], inuse_type['id']) self.driver.retype(self.ctxt, volume, inuse_type, diff, host) # retype from hyperswap volume to replication volume spec3 = {'replication_enabled': ' True', 'replication_type': ' metro'} self.driver._replica_target['pool_name'] = 'openstack2' replication_type = self._create_volume_type(spec3, 'test_replication_type') diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], replication_type['id']) self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, volume, replication_type, diff, host) # retype from hyperswap volume to streched cluster volume spec4 = {'mirror_pool': 'openstack1'} mirror_type = self._create_volume_type(spec4, 'test_mirror_type') diff, _equal = volume_types.volume_types_diff( self.ctxt, hyperswap_vol_type['id'], mirror_type['id']) self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, volume, mirror_type, diff, host) # retype from streched cluster volume to hyperswap volume host3 = {'host': 'openstack@svc#openstack'} mirror_volume = self._create_volume(volume_type_id=mirror_type.id) diff, _equal = volume_types.volume_types_diff( self.ctxt, mirror_type['id'], hyperswap_vol_type['id']) self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, mirror_volume, hyperswap_vol_type, diff, host3) @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_rccg_name') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_rccg') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_storwize_hyperswap_group_create(self, is_grp_a_cg_snapshot_type, create_rccg, get_rccg_name): """Test group create.""" is_grp_a_cg_snapshot_type.side_effect = [False, False, False, False] with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) vol_type_ref = volume_types.create(self.ctxt, 'nonhypertype', None) group_specs = {'hyperswap_group_enabled': ' True'} group_type_ref = group_types.create(self.ctxt, 'testgroup', group_specs) group = testutils.create_group(self.ctxt, group_type_id=group_type_ref['id'], volume_type_ids=[vol_type_ref['id']]) # create hyperswap group with nonhyper volume type model_update = self.driver.create_group(self.ctxt, group) self.assertEqual(fields.GroupStatus.ERROR, model_update['status']) create_rccg.assert_not_called() get_rccg_name.assert_not_called() # create hyperswap group with hyper volume type. spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'openstack1'} vol_type_ref = volume_types.create(self.ctxt, 'hypertype', spec) hyper_group = testutils.create_group( self.ctxt, name='hypergroup', group_type_id=group_type_ref['id'], volume_type_ids=[vol_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, hyper_group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) create_rccg.assert_not_called() get_rccg_name.assert_not_called() @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_rccg_name') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_rccg') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_storwize_hyperswap_group_delete(self, is_grp_a_cg_snapshot_type, delete_rccg, get_rccg_name): """Test group create.""" is_grp_a_cg_snapshot_type.side_effect = [False, False, False] with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) group_specs = {'hyperswap_group_enabled': ' True'} group_type_ref = group_types.create(self.ctxt, 'testgroup', group_specs) # create hyperswap group with hyper volume type. vol_type_ref = self._create_hyperswap_type( 'hyper_type') hyper_group = testutils.create_group( self.ctxt, name='hypergroup', group_type_id=group_type_ref['id'], volume_type_ids=[vol_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, hyper_group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) vol1 = self._create_hyperswap_volume(vol_type_ref) vol2 = self._create_hyperswap_volume(vol_type_ref) ctxt = context.get_admin_context() self.db.volume_update(ctxt, vol1['id'], {'group_id': hyper_group.id}) self.db.volume_update(ctxt, vol2['id'], {'group_id': hyper_group.id}) volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), hyper_group.id) model_update = self.driver.delete_group(self.ctxt, hyper_group, volumes) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual('deleted', volume['status']) delete_rccg.assert_not_called() get_rccg_name.assert_not_called() @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_rccg_name') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'chrcrelationship') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_storwize_hyperswap_group_update(self, is_grp_a_cg_snapshot_type, chrcrelationship, get_relationship_info, get_rccg_name): """Test group create.""" is_grp_a_cg_snapshot_type.side_effect = [False, False, False, False, False] with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) group_specs = {'hyperswap_group_enabled': ' True'} group_type_ref = group_types.create(self.ctxt, 'testgroup', group_specs) # create hyperswap group with hyper volume type. volume_type_ref = self._create_hyperswap_type( 'hyper_type') hyper_group = testutils.create_group( self.ctxt, name='hypergroup', group_type_id=group_type_ref['id'], volume_type_ids=[volume_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, hyper_group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) vol1 = self._create_hyperswap_volume(volume_type_ref) vol2 = self._create_hyperswap_volume(volume_type_ref) ctxt = context.get_admin_context() self.db.volume_update(ctxt, vol1['id'], {'group_id': hyper_group.id}) self.db.volume_update(ctxt, vol2['id'], {'group_id': hyper_group.id}) add_volumes = [vol1, vol2] del_volumes = [] get_relationship_info.assert_called() self.assertEqual(2, get_relationship_info.call_count) # add hyperswap volume (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, hyper_group, add_volumes, del_volumes) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual([{'id': vol1.id, 'group_id': hyper_group.id}, {'id': vol2.id, 'group_id': hyper_group.id}], add_volumes_update, ) self.assertEqual([], remove_volumes_update) chrcrelationship.assert_not_called() get_relationship_info.assert_called() self.assertEqual(2, get_relationship_info.call_count) get_rccg_name.assert_not_called() # del hyperswap volume from volume group add_volumes = [] del_volumes = [vol1, vol2] (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, hyper_group, add_volumes, del_volumes) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual([], add_volumes_update) self.assertEqual([{'id': vol1.id, 'group_id': None}, {'id': vol2.id, 'group_id': None}], remove_volumes_update) chrcrelationship.assert_not_called() get_relationship_info.assert_called() self.assertEqual(2, get_relationship_info.call_count) get_rccg_name.assert_not_called() # add non-hyper volume non_type_ref = volume_types.create(self.ctxt, 'nonhypertype', None) add_vol3 = self._create_volume(volume_type_id=non_type_ref['id']) (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, hyper_group, [add_vol3, vol1], []) self.assertEqual(fields.GroupStatus.ERROR, model_update['status']) self.assertEqual([{'id': vol1.id, 'group_id': hyper_group.id}], add_volumes_update) self.assertEqual([], remove_volumes_update) # del non-hyper volume vol4 = self._create_volume(volume_type_id=non_type_ref['id']) (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, hyper_group, [], [vol4, vol1]) self.assertEqual(fields.GroupStatus.ERROR, model_update['status']) self.assertEqual([{'id': vol1.id, 'group_id': None}], remove_volumes_update) self.assertEqual([], add_volumes_update) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_hyperswap_create_group_from_grp(self, is_group_a_cg_snap_type): # Valid case for create hyperswap group from src is_group_a_cg_snap_type.return_value = False with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) group_specs = {'hyperswap_group_enabled': ' True'} group_type_ref = group_types.create(self.ctxt, 'testgroup', group_specs) # create hyperswap group with hyper volume type. volume_type_ref = self._create_hyperswap_type( 'hyper_type') source_hyper_group = testutils.create_group( self.ctxt, name='src_hypergroup', group_type_id=group_type_ref['id'], volume_type_ids=[volume_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, source_hyper_group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) src_vol1 = self._create_hyperswap_volume(volume_type_ref) src_vol2 = self._create_hyperswap_volume(volume_type_ref) ctxt = context.get_admin_context() self.db.volume_update(ctxt, src_vol1['id'], {'group_id': source_hyper_group.id}) self.db.volume_update(ctxt, src_vol2['id'], {'group_id': source_hyper_group.id}) add_volumes = [src_vol1, src_vol2] del_volumes = [] # add hyperswap volume (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, source_hyper_group, add_volumes, del_volumes) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual([{'id': src_vol1.id, 'group_id': source_hyper_group.id}, {'id': src_vol2.id, 'group_id': source_hyper_group.id}], add_volumes_update, ) source_vols = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), source_hyper_group['id']) # clone hyper group Clone_hyper_group = testutils.create_group( self.ctxt, name='clon_hypergroup', group_type_id=group_type_ref['id'], volume_type_ids=[volume_type_ref['id']]) clone_vol1 = testutils.create_volume( self.ctxt, host='openstack@svc#hyperswap1', volume_type_id=volume_type_ref.id, group_id=Clone_hyper_group.id) clone_vol2 = testutils.create_volume( self.ctxt, host='openstack@svc#hyperswap1', volume_type_id=volume_type_ref.id, group_id=Clone_hyper_group.id) clone_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), Clone_hyper_group['id']) # Create hyperswap group from source hyperswap group model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, Clone_hyper_group, clone_volumes, None, None, source_hyper_group, source_vols)) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "CG create from src created failed") for each_vol in volumes_model_update: self.assertEqual('available', each_vol['status']) for vol in clone_volumes: self._assert_vol_exists(vol.name, True) self._assert_vol_exists('site2' + vol.name, True) self._assert_vol_exists('fcsite1' + vol.name, True) self._assert_vol_exists('fcsite2' + vol.name, True) self.driver.delete_group(self.ctxt, Clone_hyper_group, [clone_vol1, clone_vol2]) with (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'convert_volume_to_hyperswap')) as convert_volume_to_hyperswap, \ (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'ensure_vdisk_no_fc_mappings')) as ensure_vdisk_no_fc_mappings: # Create cg from source cg model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, Clone_hyper_group, clone_volumes, None, None, source_hyper_group, source_vols)) ensure_vdisk_no_fc_mappings.assert_called() self.assertEqual(2, ensure_vdisk_no_fc_mappings.call_count) convert_volume_to_hyperswap.assert_called() self.assertEqual(2, convert_volume_to_hyperswap.call_count) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "CG create from src created failed") for each_vol in volumes_model_update: self.assertEqual('available', each_vol['status']) for vol in clone_volumes: self._assert_vol_exists(vol.name, True) self._assert_vol_exists('site2' + vol.name, False) self._assert_vol_exists('fcsite1' + vol.name, False) self._assert_vol_exists('fcsite2' + vol.name, False) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_hyperswap_create_group_from_snapshot(self, is_group_a_cg_snap_type): # Valid case for create hyperswap group from src is_group_a_cg_snap_type.return_value = False with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) group_specs = {'hyperswap_group_enabled': ' True'} group_type_ref = group_types.create(self.ctxt, 'testgroup', group_specs) # create hyperswap group with hyper volume type. volume_type_ref = self._create_hyperswap_type( 'hyper_type') source_hyper_group = testutils.create_group( self.ctxt, name='src_hypergroup', group_type_id=group_type_ref['id'], volume_type_ids=[volume_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, source_hyper_group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) src_vol1 = self._create_hyperswap_volume(volume_type_ref) src_vol2 = self._create_hyperswap_volume(volume_type_ref) ctxt = context.get_admin_context() self.db.volume_update(ctxt, src_vol1['id'], {'group_id': source_hyper_group.id}) self.db.volume_update(ctxt, src_vol2['id'], {'group_id': source_hyper_group.id}) add_volumes = [src_vol1, src_vol2] del_volumes = [] # add hyperswap volume (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, source_hyper_group, add_volumes, del_volumes) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual([{'id': src_vol1.id, 'group_id': source_hyper_group.id}, {'id': src_vol2.id, 'group_id': source_hyper_group.id}], add_volumes_update, ) # clone hyper group Clone_hyper_group = testutils.create_group( self.ctxt, name='clon_hypergroup', group_type_id=group_type_ref['id'], volume_type_ids=[volume_type_ref['id']]) clone_vol1 = testutils.create_volume( self.ctxt, host='openstack@svc#hyperswap1', volume_type_id=volume_type_ref.id, group_id=Clone_hyper_group.id) clone_vol2 = testutils.create_volume( self.ctxt, host='openstack@svc#hyperswap1', volume_type_id=volume_type_ref.id, group_id=Clone_hyper_group.id) clone_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), Clone_hyper_group['id']) # Create hyperswap group snapshot group_snapshot, snapshots = self._create_group_snapshot( source_hyper_group['id'], group_type_id=group_type_ref.id) # Create hyperswap group from hyperswap group snapshot model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, Clone_hyper_group, clone_volumes, group_snapshot, snapshots, None, None)) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "CG create from src created failed") for each_vol in volumes_model_update: self.assertEqual('available', each_vol['status']) for vol in clone_volumes: self._assert_vol_exists(vol.name, True) self._assert_vol_exists('site2' + vol.name, True) self._assert_vol_exists('fcsite1' + vol.name, True) self._assert_vol_exists('fcsite2' + vol.name, True) self.driver.delete_group(self.ctxt, Clone_hyper_group, [clone_vol1, clone_vol2]) with (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'convert_volume_to_hyperswap')) as convert_volume_to_hyperswap, \ (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'ensure_vdisk_no_fc_mappings')) as ensure_vdisk_no_fc_mappings: # Create cg from source cg model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, Clone_hyper_group, clone_volumes, group_snapshot, snapshots, None, None)) ensure_vdisk_no_fc_mappings.assert_called() self.assertEqual(2, ensure_vdisk_no_fc_mappings.call_count) convert_volume_to_hyperswap.assert_called() self.assertEqual(2, convert_volume_to_hyperswap.call_count) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "CG create from src created failed") for each_vol in volumes_model_update: self.assertEqual('available', each_vol['status']) for vol in clone_volumes: self._assert_vol_exists(vol.name, True) self._assert_vol_exists('site2' + vol.name, False) self._assert_vol_exists('fcsite1' + vol.name, False) self._assert_vol_exists('fcsite2' + vol.name, False) @ddt.data({'spec': {'rsize': -1}}, {'spec': {'mirror_pool': 'dr_pool2'}}, {'spec': {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'dr_pool2'}}) @ddt.unpack def test_storwize_volumes_on_dr_pool_success_case(self, spec): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) dr_type = self._create_volume_type(spec, 'type_dr') vol = testutils.create_volume(self.ctxt, volume_type_id=dr_type.id, host='openstack@svc#hyperswap1') self.driver.create_volume(vol) vol2 = testutils.create_volume(self.ctxt, volume_type_id=dr_type.id, host='openstack@svc#hyperswap1') ref = {'source-name': vol.name} self.driver.manage_existing(vol2, ref) @ddt.data({'spec': {'warning': 30}}, {'spec': {'rsize': 5}}, {'spec': {'easytier': False}}, {'spec': {'autoexpand': False}}, {'spec': {'grainsize': 128}}) @ddt.unpack def test_storwize_create_thin_volume_on_dr_pool_failure_case(self, spec): # create basic thin volume on dr_pool with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) thin_dr_type = self._create_volume_type(spec, 'type_thin') vol = self._generate_vol_info_on_dr_pool(thin_dr_type) self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, vol) # create mirror volume on dr_pool self._set_flag('storwize_svc_mirror_pool', 'dr_pool1') mirror_dr_type = self._create_volume_type(spec, 'type_mirror') vol = self._generate_vol_info(mirror_dr_type) self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, vol) self._reset_flags() # create hyperswap volume on dr_pool spec.update({'drivers:volume_topology': 'hyperswap', 'peer_pool': 'dr_pool2'}) hyper_dr_type = self._create_volume_type(spec, 'hyper_dr_type') self.assertRaises(exception.VolumeDriverException, self._create_hyperswap_volume, hyper_dr_type) @ddt.data({'spec': {'warning': 30}}, {'spec': {'rsize': 5}}, {'spec': {'easytier': False}}, {'spec': {'autoexpand': False}}, {'spec': {'grainsize': 128}}) @ddt.unpack def test_storwize_manage_volume_on_dr_pool_failure_case(self, spec): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) extra_spec = {} thin_type = self._create_volume_type(extra_spec, 'thin_type') vol_type1 = self._create_volume_type(spec, 'vol_type1') thin_volume = self._generate_vol_info_on_dr_pool(thin_type) self.driver.create_volume(thin_volume) vol1 = self._generate_vol_info_on_dr_pool(vol_type1) ref1 = {'source-name': thin_volume.name} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, vol1, ref1) extra_spec = {'mirror_pool': 'dr_pool1'} mirror_type = self._create_volume_type(extra_spec, 'type_mirror') mirror_volume = self._generate_vol_info(mirror_type) self.driver.create_volume(mirror_volume) spec.update({'mirror_pool': 'dr_pool1'}) vol_type2 = self._create_volume_type(spec, 'vol_type2') vol2 = self._generate_vol_info(vol_type2) ref2 = {'source-name': mirror_volume.name} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, vol2, ref2) spec.pop('mirror_pool') extra_spec = {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'dr_pool2'} hyper_type = self._create_volume_type(extra_spec, 'type_hyper') hyper_volume = testutils.create_volume( self.ctxt, volume_type_id=hyper_type.id, host='openstack@svc#hyperswap1') self.driver.create_volume(hyper_volume) spec.update(extra_spec) vol_type3 = self._create_volume_type(spec, 'vol_type3') vol3 = testutils.create_volume( self.ctxt, volume_type_id=vol_type3.id, host='openstack@svc#hyperswap1') ref3 = {'source-name': hyper_volume.name} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, vol3, ref3) def test_storwize_migrate_volume_between_regular_dr_pool(self): spec = {'mirror_pool': 'openstack1'} mirror_vol_type = self._create_volume_type(spec, 'test_mirror_type') vol = self._generate_vol_info(mirror_vol_type) self.driver.create_volume(vol) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':dr_pool2') cap = {'location_info': loc, 'extent_size': '256'} host = {'host': 'openstack@svc#dr_pool2', 'capabilities': cap} ctxt = context.get_admin_context() self.assertRaises(exception.VolumeDriverException, self.driver.migrate_volume, ctxt, vol, host) vol2 = self._generate_vol_info_on_dr_pool(mirror_vol_type) self.driver.create_volume(vol2) self.assertRaises(exception.VolumeDriverException, self.driver.migrate_volume, ctxt, vol2, host) spec = {'mirror_pool': 'dr_pool1'} mirror_vol_type1 = self._create_volume_type(spec, 'test_mirror_type1') vol3 = self._generate_vol_info(mirror_vol_type1) self.driver.create_volume(vol3) self.assertRaises(exception.VolumeDriverException, self.driver.migrate_volume, ctxt, vol3, host) spec.update({'rsize': -1}) thick_vol_type = self._create_volume_type(spec, 'thick_mirror_type') vol3 = self._generate_vol_info_on_dr_pool(thick_vol_type) self.driver.create_volume(vol3) self.driver.migrate_volume(ctxt, vol3, host) vol4 = self._create_volume() self.driver.migrate_volume(ctxt, vol4, host) spec = {'rsize': '10'} rsize_type = self._create_volume_type(spec, 'rsize_type') vol5 = self._generate_vol_info(rsize_type) self.driver.create_volume(vol5) self.assertRaises(exception.VolumeDriverException, self.driver.migrate_volume, ctxt, vol5, host) @ddt.data(({}, {'easytier': True, 'warning': 5, 'autoexpand': False}), ({}, {'grainsize': 128}), ({'mirror_pool': 'dr_pool2'}, {'mirror_pool': 'hyperswap1'})) @ddt.unpack def test_storwize_svc_retype_old_type_dr_pool(self, key_specs_old, key_specs_new): self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':dr_pool1') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#dr_pool1', 'capabilities': cap} ctxt = context.get_admin_context() old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume = self._generate_vol_info_on_dr_pool(old_type) volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.assertRaises(exception.VolumeDriverException, self.driver.retype, ctxt, volume, new_type, diff, host) @ddt.data(({}, {'mirror_pool': 'dr_pool2', 'warning': 5}), ({'mirror_pool': 'openstack2'}, {'mirror_pool': 'dr_pool2'}), ({'mirror_pool': 'dr_pool2'}, {'mirror_pool': 'hyperswap1'}), ({'autoexpand': False}, {'drivers:volume_topology': 'hyperswap', 'peer_pool': 'dr_pool2', 'autoexpand': False})) @ddt.unpack def test_storwize_svc_retype_new_type_dr_pool(self, key_specs_old, key_specs_new): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 7, 0, 0), 'topology': 'hyperswap', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume = self._generate_vol_info(old_type) volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.assertRaises(exception.VolumeDriverException, self.driver.retype, ctxt, volume, new_type, diff, host) @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_flashcopy_mapping_attributes') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_vdisk_fc_mappings') def test_revert_to_snapshot_with_uncompleted_clone( self, _get_vdisk_fc_mappings, _get_flashcopy_mapping_attributes): vol1 = self._generate_vol_info() snap1 = self._generate_snap_info(vol1.id) self.driver._helpers._get_vdisk_fc_mappings.return_value = ['4'] self.driver._helpers._get_flashcopy_mapping_attributes.return_value = { 'copy_rate': '50', 'progress': '3', 'status': 'copying', 'target_vdisk_name': 'testvol'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.revert_to_snapshot, self.ctxt, vol1, snap1) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_vdisk') def test_storwize_svc_delete_volume_with_lower_code(self, delete_vdisk): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 6, 0, 0), 'topology': 'standard', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info self.driver.do_setup(None) volume = self._generate_vol_info() snap = self._generate_snap_info(volume.id) self.driver.create_volume(volume) self.driver.create_snapshot(snap) self.driver.delete_snapshot(snap) snap_call = [mock.call(snap.name, force_delete=False, force_unmap=False)] delete_vdisk.assert_has_calls(snap_call) self.driver.delete_volume(volume) vol_call = [mock.call(volume.name, force_delete=False, force_unmap=False)] delete_vdisk.assert_has_calls(vol_call) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_vdisk') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') @mock.patch('cinder.volume.volume_utils.is_group_a_type') @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_delete_replication_grp') def test_storwize_delete_group_with_lower_code( self, _del_rep_grp, is_grp_a_cg_rep_type, is_grp_a_cg_snapshot_type, delete_vdisk): is_grp_a_cg_snapshot_type.return_valume = True is_grp_a_cg_rep_type.return_value = False type_ref = volume_types.create(self.ctxt, 'testtype', None) group = testutils.create_group(self.ctxt, group_type_id=fake.GROUP_TYPE_ID, volume_type_ids=[type_ref['id']]) vol1 = self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) vol2 = self._create_volume(volume_type_id=type_ref['id'], group_id=group.id) volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), group.id) self.driver.delete_group(self.ctxt, group, volumes) calls = [mock.call(vol1.name, force_unmap=False, force_delete=True), mock.call(vol2.name, force_unmap=False, force_delete=True)] delete_vdisk.assert_has_calls(calls, any_order=True) def test_storwize_svc_retype_between_iogrps(self): self.driver.do_setup(None) ctxt = context.get_admin_context() key_specs_old = {'iogrp': 0} key_specs_new = {'iogrp': 1} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume = self._generate_vol_info(old_type) new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) conn = {'initiator': u'iqn.1993-08.org.debian:01:eac5ccc1aaa', 'ip': '10.10.10.12', 'host': u'openstack@svc#openstack'} self.driver.initialize_connection(volume, conn) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack2') cap = {'location_info': loc, 'extent_size': '128'} host_name = self.driver._helpers.get_host_from_connector( conn, iscsi=True) self.assertIsNotNone(host_name) host = {'host': host_name, 'capabilities': cap} volume['host'] = host['host'] self.driver.retype(ctxt, volume, new_type, diff, host) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertEqual('1', attrs['IO_group_id'], 'Volume retype ' 'failed') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'addvdiskaccess') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmvdiskaccess') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'movevdisk') def test_storwize_svc_retype_between_iogrps_invalid( self, movevdisk, rmvdiskaccess, addvdiskaccess): self.driver.do_setup(None) ctxt = context.get_admin_context() key_specs_old = {'iogrp': 0} key_specs_new = {'iogrp': 1} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume = self._generate_vol_info(old_type) new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) conn = {'initiator': u'iqn.1993-08.org.debian:01:eac5ccc1aaa', 'ip': '10.10.10.12', 'host': u'openstack@svc#openstack'} self.driver.initialize_connection(volume, conn) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack2') cap = {'location_info': loc, 'extent_size': '128'} host_name = self.driver._helpers.get_host_from_connector( conn, iscsi=True) self.assertIsNotNone(host_name) host = {'host': host_name, 'capabilities': cap} volume['host'] = host['host'] ex = exception.VolumeBackendAPIException(data='CMMVC5879E') movevdisk.side_effect = ex self.assertRaises(exception.VolumeBackendAPIException, self.driver.retype, ctxt, volume, new_type, diff, host) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) self.assertEqual(int(key_specs_old['iogrp']), int(attrs['IO_group_id']), 'Volume retype failed') addvdiskaccess.assert_called() movevdisk.assert_called() rmvdiskaccess.assert_called_with( volume['name'], str(key_specs_new['iogrp'])) class CLIResponseTestCase(test.TestCase): def test_empty(self): self.assertEqual(0, len( storwize_svc_common.CLIResponse(''))) self.assertEqual(0, len( storwize_svc_common.CLIResponse(('', 'stderr')))) def test_header(self): raw = r'''id!name 1!node1 2!node2 ''' resp = storwize_svc_common.CLIResponse(raw, with_header=True) self.assertEqual(2, len(resp)) self.assertEqual('1', resp[0]['id']) self.assertEqual('2', resp[1]['id']) def test_select(self): raw = r'''id!123 name!Bill name!Bill2 age!30 home address!s1 home address!s2 id! 7 name!John name!John2 age!40 home address!s3 home address!s4 ''' resp = storwize_svc_common.CLIResponse(raw, with_header=False) self.assertEqual([('s1', 'Bill', 's1'), ('s2', 'Bill2', 's2'), ('s3', 'John', 's3'), ('s4', 'John2', 's4')], list(resp.select('home address', 'name', 'home address'))) def test_lsnode_all(self): raw = r'''id!name!UPS_serial_number!WWNN!status 1!node1!!500507680200C744!online 2!node2!!500507680200C745!online ''' resp = storwize_svc_common.CLIResponse(raw) self.assertEqual(2, len(resp)) self.assertEqual('1', resp[0]['id']) self.assertEqual('500507680200C744', resp[0]['WWNN']) self.assertEqual('2', resp[1]['id']) self.assertEqual('500507680200C745', resp[1]['WWNN']) def test_lsnode_single(self): raw = r'''id!1 port_id!500507680210C744 port_status!active port_speed!8Gb port_id!500507680240C744 port_status!inactive port_speed!8Gb ''' resp = storwize_svc_common.CLIResponse(raw, with_header=False) self.assertEqual(1, len(resp)) self.assertEqual('1', resp[0]['id']) self.assertEqual([('500507680210C744', 'active'), ('500507680240C744', 'inactive')], list(resp.select('port_id', 'port_status'))) @ddt.ddt class StorwizeHelpersTestCase(test.TestCase): def setUp(self): super(StorwizeHelpersTestCase, self).setUp() self.storwize_svc_common = storwize_svc_common.StorwizeHelpers(None) self.mock_wait_time = mock.patch.object( storwize_svc_common.StorwizeHelpers, "WAIT_TIME", 0) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lslicense') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsguicapabilities') def test_compression_enabled(self, lsguicapabilities, lslicense): fake_license_without_keys = {} fake_license = { 'license_compression_enclosures': '1', 'license_compression_capacity': '1' } fake_license_scheme = { 'license_scheme': '9846' } fake_9100_license_scheme = { 'license_scheme': 'flex' } fake_license_invalid_scheme = { 'license_scheme': '0000' } lslicense.side_effect = [fake_license_without_keys, fake_license_without_keys, fake_license, fake_license_without_keys] lsguicapabilities.side_effect = [fake_license_without_keys, fake_license_invalid_scheme, fake_license_scheme, fake_9100_license_scheme] self.assertFalse(self.storwize_svc_common.compression_enabled()) self.assertFalse(self.storwize_svc_common.compression_enabled()) self.assertTrue(self.storwize_svc_common.compression_enabled()) self.assertTrue(self.storwize_svc_common.compression_enabled()) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsguicapabilities') def test_replication_licensed(self, lsguicapabilities): lsguicapabilities.side_effect = [ {'product_key': '0000'}, {'product_key': storwize_const.DEV_MODEL_STORWIZE_V3500}, {'product_key': storwize_const.DEV_MODEL_STORWIZE_V5000E}, {'product_key': storwize_const.DEV_MODEL_SVC}, {'product_key': storwize_const.DEV_MODEL_STORWIZE}, {'product_key': storwize_const.DEV_MODEL_STORWIZE_V7000}, {'product_key': storwize_const.DEV_MODEL_STORWIZE_V5000}, {'product_key': storwize_const.DEV_MODEL_STORWIZE_V5000_1YR}, {'product_key': storwize_const.DEV_MODEL_FLASH_V9000}, {'product_key': storwize_const.DEV_MODEL_FLEX}] for i in range(2): self.assertFalse(self.storwize_svc_common.replication_licensed()) for i in range(8): self.assertTrue(self.storwize_svc_common.replication_licensed()) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsmdiskgrp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_count_by_io_group') def test_select_io_group(self, get_vdisk_count_by_io_group, lsmdiskgrp): # given io groups opts = {} # system io groups state = {} lsmdiskgrp.return_value = {} fake_iog_vdc1 = {0: 10, 1: 50, 2: 50, 3: 300} fake_iog_vdc2 = {0: 2, 1: 1, 2: 200} fake_iog_vdc3 = {0: 2, 2: 200} fake_iog_vdc4 = {0: 100, 1: 100, 2: 100, 3: 100} fake_iog_vdc5 = {0: 10, 1: 1, 2: 200, 3: 300} get_vdisk_count_by_io_group.side_effect = [fake_iog_vdc1, fake_iog_vdc2, fake_iog_vdc3, fake_iog_vdc4, fake_iog_vdc5] pool = _get_test_pool(False) opts['volume_topology'] = None opts['iogrp'] = '0,2' state['available_iogrps'] = [0, 1, 2, 3] iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertIn(iog, state['available_iogrps']) self.assertEqual(0, iog) opts['iogrp'] = '0' state['available_iogrps'] = [0, 1, 2] iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertIn(iog, state['available_iogrps']) self.assertEqual(0, iog) opts['iogrp'] = '1,2' state['available_iogrps'] = [0, 2] iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertIn(iog, state['available_iogrps']) self.assertEqual(2, iog) opts['iogrp'] = ' 0, 1, 2 ' state['available_iogrps'] = [0, 1, 2, 3] iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertIn(iog, state['available_iogrps']) # since vdisk count in all iogroups is same, it will pick the first self.assertEqual(0, iog) opts['iogrp'] = '0,1,2, 3' state['available_iogrps'] = [0, 1, 2, 3] iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertIn(iog, state['available_iogrps']) self.assertEqual(1, iog) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsmdiskgrp') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_count_by_io_group') def test_select_io_group_hyperswap(self, get_vdisk_count_by_io_group, lsmdiskgrp): # given io groups opts = {} # system io groups state = {} lsmdiskgrp.return_value = {} fake_iog_vdc1 = {0: 100, 1: 50, 2: 50, 3: 300} fake_iog_vdc2 = {0: 2, 1: 1, 2: 200} fake_iog_vdc3 = {0: 2, 2: 200} fake_iog_vdc4 = {0: 100, 1: 100, 2: 100, 3: 100} fake_iog_vdc5 = {0: 10, 1: 1, 2: 200, 3: 300} get_vdisk_count_by_io_group.side_effect = [fake_iog_vdc1, fake_iog_vdc2, fake_iog_vdc3, fake_iog_vdc4, fake_iog_vdc5] pool = _get_test_pool(False) opts['iogrp'] = '0,2' opts['volume_topology'] = 'hyperswap' state['available_iogrps'] = [0, 1, 2, 3] iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertIn(iog, state['available_iogrps']) self.assertEqual(2, iog) opts['iogrp'] = '0' state['available_iogrps'] = [0, 1, 2] iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertIn(iog, state['available_iogrps']) self.assertEqual(0, iog) opts['iogrp'] = '1,2' state['available_iogrps'] = [0, 2] iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertIn(iog, state['available_iogrps']) self.assertEqual(2, iog) opts['iogrp'] = ' 0, 1, 2 ' state['available_iogrps'] = [0, 1, 2, 3] iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertIn(iog, state['available_iogrps']) # since vdisk count in all iogroups is same, it will pick the first self.assertEqual(0, iog) opts['iogrp'] = '0,1,2, 3' state['available_iogrps'] = [0, 1, 2, 3] iog = self.storwize_svc_common.select_io_group(state, opts, pool) self.assertIn(iog, state['available_iogrps']) self.assertEqual(1, iog) @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_flashcopy_mapping_attributes') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_vdisk_fc_mappings') def test_pretreatment_before_revert_uncompleted_clone( self, _get_vdisk_fc_mappings, _get_flashcopy_mapping_attributes): vol = 'testvol' _get_vdisk_fc_mappings.return_value = ['4'] _get_flashcopy_mapping_attributes.return_value = { 'copy_rate': '50', 'progress': '3', 'status': 'copying', 'target_vdisk_name': 'testvol'} self.assertRaises(exception.VolumeDriverException, self.storwize_svc_common.pretreatment_before_revert, vol) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stopfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_flashcopy_mapping_attributes') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_vdisk_fc_mappings') def test_pretreatment_before_revert_completed_clone( self, _get_vdisk_fc_mappings, _get_flashcopy_mapping_attributes, stopfcmap): vol = 'testvol' _get_vdisk_fc_mappings.return_value = ['4'] _get_flashcopy_mapping_attributes.return_value = { 'copy_rate': '50', 'progress': '100', 'status': 'copying', 'target_vdisk_name': 'testvol'} self.storwize_svc_common.pretreatment_before_revert(vol) stopfcmap.assert_called_once_with('4') @ddt.data({'copy_rate': '50', 'progress': '3', 'status': 'copying'}, {'copy_rate': '50', 'progress': '100', 'status': 'copying'}, {'copy_rate': '0', 'progress': '0', 'status': 'copying'}, {'copy_rate': '50', 'progress': '0', 'status': 'copying'}, {'copy_rate': '0', 'progress': '0', 'status': 'idle_or_copied'}) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'chfcmap') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stopfcmap') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_flashcopy_mapping_attributes') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_vdisk_fc_mappings') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_check_vdisk_fc_mappings(self, fc_data, get_relationship_info, get_vdisk_fc_mappings, get_fc_mapping_attributes, rmfcmap, stopfcmap, chfcmap): vol = 'testvol' get_vdisk_fc_mappings.return_value = ['4'] get_fc_mapping_attributes.return_value = { 'copy_rate': fc_data['copy_rate'], 'progress': fc_data['progress'], 'status': fc_data['status'], 'target_vdisk_name': 'tar-testvol', 'rc_controlled': 'no', 'source_vdisk_name': 'testvol'} get_relationship_info.return_value = None if (fc_data['copy_rate'] != '0' and fc_data['progress'] == '100' and fc_data['status'] == 'copying'): (self.assertRaises(loopingcall.LoopingCallDone, self.storwize_svc_common._check_vdisk_fc_mappings, vol, True, False)) stopfcmap.assert_called_with('4', False) self.assertEqual(1, stopfcmap.call_count) else: self.storwize_svc_common._check_vdisk_fc_mappings(vol, True, False) stopfcmap.assert_not_called() self.assertEqual(0, stopfcmap.call_count) get_vdisk_fc_mappings.assert_called() get_fc_mapping_attributes.assert_called_with('4') rmfcmap.assert_not_called() self.assertEqual(1, get_fc_mapping_attributes.call_count) self.assertEqual(0, rmfcmap.call_count) if (fc_data['copy_rate'] == '0' and fc_data['progress'] == '0' and fc_data['status'] in ['copying', 'idle_or_copied']): chfcmap.assert_called_with('4', copyrate='50', autodel='on') self.assertEqual(1, chfcmap.call_count) else: chfcmap.assert_not_called() self.assertEqual(0, chfcmap.call_count) @ddt.data({'copy_rate': '50', 'progress': '3', 'status': 'copying'}, {'copy_rate': '50', 'progress': '100', 'status': 'copying'}, {'copy_rate': '0', 'progress': '0', 'status': 'copying'}, {'copy_rate': '50', 'progress': '0', 'status': 'copying'}, {'copy_rate': '0', 'progress': '0', 'status': 'idle_or_copied'}) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'chfcmap') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stopfcmap') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_flashcopy_mapping_attributes') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_vdisk_fc_mappings') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_check_vdisk_fc_mappings_with_rcrel(self, fc_data, get_relationship_info, get_vdisk_fc_mappings, get_fc_mapping_attributes, rmfcmap, stopfcmap, chfcmap): vol = 'testvol' get_vdisk_fc_mappings.return_value = ['4'] get_fc_mapping_attributes.return_value = { 'copy_rate': fc_data['copy_rate'], 'progress': fc_data['progress'], 'status': fc_data['status'], 'target_vdisk_name': 'tar-testvol', 'rc_controlled': 'no', 'source_vdisk_name': 'testvol'} rel_info = {'name': 'rcrel232'} get_relationship_info.return_value = rel_info if (fc_data['copy_rate'] != '0' and fc_data['progress'] == '100' and fc_data['status'] == 'copying'): (self.assertRaises(loopingcall.LoopingCallDone, self.storwize_svc_common._check_vdisk_fc_mappings, vol, True, False, rel_info)) stopfcmap.assert_called_with('4', True) self.assertEqual(1, stopfcmap.call_count) else: self.storwize_svc_common._check_vdisk_fc_mappings(vol, True, False) stopfcmap.assert_not_called() self.assertEqual(0, stopfcmap.call_count) get_vdisk_fc_mappings.assert_called() get_fc_mapping_attributes.assert_called_with('4') rmfcmap.assert_not_called() self.assertEqual(1, get_fc_mapping_attributes.call_count) self.assertEqual(0, rmfcmap.call_count) if (fc_data['copy_rate'] == '0' and fc_data['progress'] == '0' and fc_data['status'] in ['copying', 'idle_or_copied']): chfcmap.assert_called_with('4', copyrate='50', autodel='on') self.assertEqual(1, chfcmap.call_count) else: chfcmap.assert_not_called() self.assertEqual(0, chfcmap.call_count) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'chfcmap') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stopfcmap') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_flashcopy_mapping_attributes') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_vdisk_fc_mappings') def test_check_vdisk_fc_mappings_tarisvol(self, get_vdisk_fc_mappings, get_fc_mapping_attributes, rmfcmap, stopfcmap, chfcmap): vol = 'tar-testvol' get_vdisk_fc_mappings.return_value = ['4'] get_fc_mapping_attributes.return_value = { 'copy_rate': '0', 'progress': '0', 'status': 'idle_or_copied', 'target_vdisk_name': 'tar-testvol', 'rc_controlled': 'no', 'source_vdisk_name': 'testvol'} self.assertRaises(loopingcall.LoopingCallDone, self.storwize_svc_common._check_vdisk_fc_mappings, vol, True, False) get_vdisk_fc_mappings.assert_called() get_fc_mapping_attributes.assert_called_with('4') stopfcmap.assert_not_called() rmfcmap.assert_called_with('4') chfcmap.assert_not_called() self.assertEqual(1, get_fc_mapping_attributes.call_count) self.assertEqual(0, stopfcmap.call_count) self.assertEqual(1, rmfcmap.call_count) self.assertEqual(0, chfcmap.call_count) @ddt.data(([{'cp_rate': '0', 'prgs': '0', 'status': 'idle_or_copied', 'trg_vdisk': 'testvol', 'src_vdisk': 'tar_testvol'}, {'cp_rate': '50', 'prgs': '100', 'status': 'copying', 'trg_vdisk': 'tar_testvol', 'src_vdisk': 'testvol'}, {'cp_rate': '50', 'prgs': '3', 'status': 'copying', 'trg_vdisk': 'tar_testvol', 'src_vdisk': 'testvol'}], 1), ([{'cp_rate': '50', 'prgs': '100', 'status': 'idle_or_copied', 'trg_vdisk': 'testvol', 'src_vdisk': 'tar_testvol'}, {'cp_rate': '50', 'prgs': '100', 'status': 'copying', 'trg_vdisk': 'tar_testvol', 'src_vdisk': 'testvol'}, {'cp_rate': '50', 'prgs': '100', 'status': 'copying', 'trg_vdisk': 'testvol', 'src_vdisk': 'tar_testvol'}], 1), ([{'cp_rate': '50', 'prgs': '100', 'status': 'idle_or_copied', 'trg_vdisk': 'testvol', 'src_vdisk': 'tar_testvol'}, {'cp_rate': '50', 'prgs': '100', 'status': 'copying', 'trg_vdisk': 'tar_testvol', 'src_vdisk': 'testvol'}, {'cp_rate': '50', 'prgs': '100', 'status': 'copying', 'trg_vdisk': 'tar_testvol_1', 'src_vdisk': 'testvol'}], 2), ([{'cp_rate': '0', 'prgs': '0', 'status': 'copying', 'trg_vdisk': 'testvol', 'src_vdisk': 'snap_testvol'}, {'cp_rate': '50', 'prgs': '0', 'status': 'copying', 'trg_vdisk': 'tar_testvol', 'src_vdisk': 'testvol'}, {'cp_rate': '50', 'prgs': '0', 'status': 'copying', 'trg_vdisk': 'tar_testvol_1', 'src_vdisk': 'testvol'}], 0)) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'chfcmap') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stopfcmap') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_flashcopy_mapping_attributes') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_vdisk_fc_mappings') @ddt.unpack def test_check_vdisk_fc_mappings_mul_fcs(self, fc_data, stopfc_count, get_vdisk_fc_mappings, get_fc_mapping_attributes, rmfcmap, stopfcmap, chfcmap): vol = 'testvol' get_vdisk_fc_mappings.return_value = ['4', '5', '7'] get_fc_mapping_attributes.side_effect = [ { 'copy_rate': fc_data[0]['cp_rate'], 'progress': fc_data[0]['prgs'], 'status': fc_data[0]['status'], 'target_vdisk_name': fc_data[0]['trg_vdisk'], 'rc_controlled': 'no', 'source_vdisk_name': fc_data[0]['src_vdisk']}, { 'copy_rate': fc_data[1]['cp_rate'], 'progress': fc_data[1]['prgs'], 'status': fc_data[1]['status'], 'target_vdisk_name': fc_data[1]['trg_vdisk'], 'rc_controlled': 'no', 'source_vdisk_name': fc_data[1]['src_vdisk']}, { 'copy_rate': fc_data[2]['cp_rate'], 'progress': fc_data[2]['prgs'], 'status': fc_data[2]['status'], 'target_vdisk_name': fc_data[2]['trg_vdisk'], 'rc_controlled': 'no', 'source_vdisk_name': fc_data[2]['src_vdisk']}] self.storwize_svc_common._check_vdisk_fc_mappings(vol, True, True) get_vdisk_fc_mappings.assert_called() get_fc_mapping_attributes.assert_called() rmfcmap.assert_not_called() chfcmap.assert_not_called() self.assertEqual(3, get_fc_mapping_attributes.call_count) self.assertEqual(stopfc_count, stopfcmap.call_count) self.assertEqual(0, rmfcmap.call_count) self.assertEqual(0, chfcmap.call_count) @ddt.data(([{'cp_rate': '0', 'prgs': '0', 'status': 'idle_or_copied', 'trg_vdisk': 'vdisk', 'src_vdisk': 'Hyp_vol'}, {'cp_rate': '50', 'prgs': '0', 'status': 'idle_or_copied', 'trg_vdisk': 'Hyp_vol', 'src_vdisk': 'vdisk'}, {'cp_rate': '50', 'prgs': '3', 'status': 'copying', 'trg_vdisk': 'Snap_vol', 'src_vdisk': 'Hyp_vol'}, {'cp_rate': '50', 'prgs': '0', 'status': 'copying', 'trg_vdisk': 'Snap_vol_1', 'src_vdisk': 'Hyp_vol'}], 0), ([{'cp_rate': '0', 'prgs': '0', 'status': 'idle_or_copied', 'trg_vdisk': 'vdisk', 'src_vdisk': 'Hyp_vol'}, {'cp_rate': '50', 'prgs': '0', 'status': 'idle_or_copied', 'trg_vdisk': 'Hyp_vol', 'src_vdisk': 'vdisk'}, {'cp_rate': '50', 'prgs': '100', 'status': 'copying', 'trg_vdisk': 'Snap_vol', 'src_vdisk': 'Hyp_vol'}, {'cp_rate': '50', 'prgs': '0', 'status': 'copying', 'trg_vdisk': 'Snap_vol_1', 'src_vdisk': 'Hyp_vol'}], 1)) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'chfcmap') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stopfcmap') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'rmfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_flashcopy_mapping_attributes') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_vdisk_fc_mappings') @ddt.unpack def test_check_vdisk_fc_mappings_rc_cont(self, fc_data, stopfc_count, get_vdisk_fc_mappings, get_fc_mapping_attributes, rmfcmap, stopfcmap, chfcmap): vol = 'Hyp_vol' get_vdisk_fc_mappings.return_value = ['4', '5', '7', '9'] get_fc_mapping_attributes.side_effect = [ { 'copy_rate': fc_data[0]['cp_rate'], 'progress': fc_data[0]['prgs'], 'status': fc_data[0]['status'], 'target_vdisk_name': fc_data[0]['trg_vdisk'], 'rc_controlled': 'yes', 'source_vdisk_name': fc_data[0]['src_vdisk']}, { 'copy_rate': fc_data[1]['cp_rate'], 'progress': fc_data[1]['prgs'], 'status': fc_data[1]['status'], 'target_vdisk_name': fc_data[1]['trg_vdisk'], 'rc_controlled': 'yes', 'source_vdisk_name': fc_data[1]['src_vdisk']}, { 'copy_rate': fc_data[2]['cp_rate'], 'progress': fc_data[2]['prgs'], 'status': fc_data[2]['status'], 'target_vdisk_name': fc_data[2]['trg_vdisk'], 'rc_controlled': 'no', 'source_vdisk_name': fc_data[2]['src_vdisk']}, { 'copy_rate': fc_data[3]['cp_rate'], 'progress': fc_data[3]['prgs'], 'status': fc_data[3]['status'], 'target_vdisk_name': fc_data[3]['trg_vdisk'], 'rc_controlled': 'no', 'source_vdisk_name': fc_data[3]['src_vdisk']}] self.storwize_svc_common._check_vdisk_fc_mappings(vol, True, True) get_vdisk_fc_mappings.assert_called() get_fc_mapping_attributes.assert_called() rmfcmap.assert_not_called() chfcmap.assert_not_called() self.assertEqual(4, get_fc_mapping_attributes.call_count) self.assertEqual(stopfc_count, stopfcmap.call_count) self.assertEqual(0, rmfcmap.call_count) self.assertEqual(0, chfcmap.call_count) def test_storwize_check_flashcopy_rate_invalid1(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 6, 0, 0), 'topology': 'standard', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info flashcopy_rate = 120 self.assertRaises(exception.VolumeDriverException, self.storwize_svc_common.check_flashcopy_rate, flashcopy_rate) def test_storwize_check_flashcopy_rate_invalid2(self): with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_system_info: fake_system_info = {'code_level': (7, 8, 1, 2), 'topology': 'standard', 'system_name': 'storwize-svc-sim', 'system_id': '0123456789ABCDEF'} get_system_info.return_value = fake_system_info flashcopy_rate = 200 self.assertRaises(exception.InvalidInput, self.storwize_svc_common.check_flashcopy_rate, flashcopy_rate) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'chfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_get_vdisk_fc_mappings') def test_storwize_update_clean_rate(self, chfcmap, get_vdisk_fc_mappings): get_vdisk_fc_mappings.return_value = ['4'] vol = 'test_vol' new_clean_rate = 50 self.storwize_svc_common.update_clean_rate(vol, new_clean_rate) chfcmap.assert_called() @ddt.data(({'mirror_pool': 'openstack2', 'volume_topology': None, 'peer_pool': None}, True, 1), ({'mirror_pool': 'openstack2', 'volume_topology': None, 'peer_pool': None}, False, 2), ({'mirror_pool': None, 'volume_topology': 'hyperswap', 'peer_pool': 'openstack1'}, True, 1), ({'mirror_pool': None, 'volume_topology': 'hyperswap', 'peer_pool': 'openstack1'}, False, 2)) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'is_data_reduction_pool') @ddt.unpack def test_is_volume_type_dr_pools_dr_pool(self, opts, is_drp, call_count, is_data_reduction_pool): is_data_reduction_pool.return_value = is_drp pool = 'openstack' rep_type = None rep_target_pool = None isdrpool = (self.storwize_svc_common. is_volume_type_dr_pools(pool, opts, rep_type, rep_target_pool)) self.assertEqual(is_drp, isdrpool) is_data_reduction_pool.assert_called() self.assertEqual(call_count, is_data_reduction_pool.call_count) @ddt.data(({'RC_name': None, 'name': 'volume-12d-5'}, True), ({'RC_name': 'fake_rcrel', 'name': 'rep_volume-12d-6'}, False)) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'startrcrelationship') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stoprcrelationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_attributes') @ddt.unpack def test_stop_and_start_rc_relationship(self, opts, access, get_vdisk_attributes, stoprcrelationship, startrcrelationship): get_vdisk_attributes.side_effect = [{'RC_name': opts['RC_name']}, {'RC_name': opts['RC_name']}] self.storwize_svc_common.stop_relationship(opts['name']) self.storwize_svc_common.start_relationship(opts['name']) get_vdisk_attributes.assert_called_with(opts['name']) if not opts['RC_name']: stoprcrelationship.assert_called() startrcrelationship.assert_called() else: stoprcrelationship.assert_called_once_with(opts['RC_name'], access=access) startrcrelationship.assert_called_once_with(opts['RC_name'], None) @ddt.data(({'RC_name': 'fake_rcrel', 'consistency_group_name': 'fake_rccg-1', 'name': 'rep_volume-12d-6'}), ({'RC_name': 'fake_rcrel', 'consistency_group_name': None, 'name': 'rep_volume-12d-6'})) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_attributes') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsrcrelationship') def test_get_rccg_name_by_volume_name(self, opts, lsrcrelationship, get_vdisk_attributes): get_vdisk_attributes.side_effect = [{'RC_name': opts['RC_name']}] lsrcrelationship.side_effect = \ [[{'consistency_group_name': opts['consistency_group_name']}]] rccg_name = self.storwize_svc_common.get_rccg_name_by_volume_name( opts['name']) get_vdisk_attributes.assert_called_with(opts['name']) lsrcrelationship.assert_called_with(opts['RC_name']) self.assertEqual(rccg_name, opts['consistency_group_name']) @ddt.data(({'name': 'rep_volume-12d-6'})) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_attributes') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsrcrelationship') def test_get_rccg_name_by_volume_name_with_None_volume_attributes( self, opts, lsrcrelationship, get_vdisk_attributes): get_vdisk_attributes.side_effect = [None] rccg_name = self.storwize_svc_common.get_rccg_name_by_volume_name( opts['name']) get_vdisk_attributes.assert_called_with(opts['name']) lsrcrelationship.assert_not_called() self.assertIsNone(rccg_name) @ddt.data(({'RC_name': None, 'name': 'volume-12d-7'}, 'multi'), ({'RC_name': 'fake_rcrel', 'name': 'rep_volume-12d-8'}, 'multi'), ({'RC_name': 'fake_rcrel-2', 'name': 'rep_volume-12d-9'}, 'none'), ({'RC_name': 'fake_rcrel-3', 'name': 'rep_volume-12d-10'}, None)) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'ch_rcrelationship_cyclingmode') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_vdisk_attributes') @ddt.unpack def test_change_relationship_cyclingmode(self, opts, cycling_mode, get_vdisk_attributes, ch_rcrelationship_cyclingmode): get_vdisk_attributes.side_effect = [{'RC_name': opts['RC_name']}] self.storwize_svc_common.change_relationship_cyclingmode( opts['name'], cycling_mode) get_vdisk_attributes.assert_called_with(opts['name']) if not opts['RC_name'] or not cycling_mode: ch_rcrelationship_cyclingmode.assert_not_called() else: ch_rcrelationship_cyclingmode.assert_called_once_with( opts['RC_name'], cycling_mode) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_host_from_host_info') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lshost') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsvdiskhostmap') def test_get_host_from_connector_with_vol(self, lsvdishosmap, lshost, get_host_from_host_info): vol = 'testvol' connector = {"wwpns": ["10000090fa3870d7", "C050760825191B00"]} get_host_from_host_info.return_value = "test_host", [] raw = "id!name!host_id!host_name!vdisk_UID\n2594!testvol!315!"\ "test_host!60050768028110A4700000000001168E" ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', '"%s"' % vol] lsvdishosmap.return_value = storwize_svc_common.CLIResponse(raw, ssh_cmd, '!', True) host = self.storwize_svc_common.get_host_from_connector(connector, vol) self.assertEqual(host, "test_host") get_host_from_host_info.assert_called_with(connector, False) self.assertEqual(1, get_host_from_host_info.call_count) lsvdishosmap.assert_called_with(vol) self.assertEqual(1, lsvdishosmap.call_count) lshost.assert_not_called() @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_host_from_host_info') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lshost') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsvdiskhostmap') def test_get_host_from_connector_wo_vol(self, lsvdishosmap, lshost, get_host_from_host_info): vol = 'testvol' connector = {"wwpns": ["10000090fa3870d7", "C050760825191B00"]} get_host_from_host_info.return_value = "test_host", [] raw = "id!name!host_id!host_name!vdisk_UID\n2594!testvol!315!"\ "test_host!60050768028110A4700000000001168E" ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', '"%s"' % vol] lsvdishosmap.return_value = storwize_svc_common.CLIResponse(raw, ssh_cmd, '!', True) host = self.storwize_svc_common.get_host_from_connector(connector) self.assertEqual(host, "test_host") get_host_from_host_info.assert_called_with(connector, False) self.assertEqual(1, get_host_from_host_info.call_count) lsvdishosmap.assert_not_called() self.assertEqual(0, lsvdishosmap.call_count) lshost.assert_not_called() @ddt.ddt class StorwizeSSHTestCase(test.TestCase): def setUp(self): super(StorwizeSSHTestCase, self).setUp() self.fake_driver = StorwizeSVCISCSIFakeDriver( configuration=conf.Configuration(None)) sim = StorwizeSVCManagementSimulator(['openstack']) self.fake_driver.set_fake_storage(sim) self.storwize_ssh = storwize_svc_common.StorwizeSSH( self.fake_driver._run_ssh) def test_ch_rcrelationship_cyclingmode(self): with mock.patch.object( storwize_svc_common.StorwizeSSH, 'run_ssh_assert_no_output') as run_ssh_assert_no_output: run_ssh_assert_no_output.return_value = None ret = self.storwize_ssh.ch_rcrelationship_cyclingmode('rcrel0', 'multi') self.assertIsNone(ret) ret = self.storwize_ssh.ch_rcrelationship_cyclingmode('rcrel1', 'none') self.assertIsNone(ret) def test_ch_rcconsistgrp_cyclingmode(self): with mock.patch.object( storwize_svc_common.StorwizeSSH, 'run_ssh_assert_no_output') as run_ssh_assert_no_output: run_ssh_assert_no_output.return_value = None ret = self.storwize_ssh.ch_rcconsistgrp_cyclingmode('fake_rccg-1', 'multi') self.assertIsNone(ret) ret = self.storwize_ssh.ch_rcconsistgrp_cyclingmode('fake_rccg-1') self.assertIsNone(ret) def test_mkvdiskhostmap(self): # mkvdiskhostmap should not be returning anything with mock.patch.object( storwize_svc_common.StorwizeSSH, 'run_ssh_check_created') as run_ssh_check_created: run_ssh_check_created.return_value = None ret = self.storwize_ssh.mkvdiskhostmap('HOST1', 9999, 511, False) self.assertIsNone(ret) ret = self.storwize_ssh.mkvdiskhostmap('HOST2', 9999, 511, True) self.assertIsNone(ret) ex = exception.VolumeBackendAPIException(data='CMMVC6071E') run_ssh_check_created.side_effect = ex self.assertRaises(exception.VolumeBackendAPIException, self.storwize_ssh.mkvdiskhostmap, 'HOST3', 9999, 511, True) @ddt.data((exception.VolumeBackendAPIException(data='CMMVC6372W'), None), (exception.VolumeBackendAPIException(data='CMMVC6372W'), {'name': 'fakevol', 'id': '0', 'uid': '0', 'IO_group_id': '0', 'IO_group_name': 'fakepool'}), (exception.VolumeBackendAPIException(data='error'), None)) @ddt.unpack def test_mkvdisk_with_warning(self, run_ssh_check, lsvol): opt = {'iogrp': 0} with mock.patch.object(storwize_svc_common.StorwizeSSH, 'run_ssh_check_created', side_effect=run_ssh_check), \ mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsvdisk', return_value=lsvol): if lsvol: ret = self.storwize_ssh.mkvdisk('fakevol', '1', 'gb', 'fakepool', opt, []) self.assertEqual('0', ret) else: self.assertRaises(exception.VolumeBackendAPIException, self.storwize_ssh.mkvdisk, 'fakevol', '1', 'gb', 'fakepool', opt, []) @ddt.ddt class StorwizeSVCReplicationTestCase(test.TestCase): @mock.patch.object(time, 'sleep') def setUp(self, mock_sleep): super(StorwizeSVCReplicationTestCase, self).setUp() def _run_ssh_aux(cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) if len(cmd) > 2 and cmd[1] == 'lssystem': cmd[1] = 'lssystem_aux' ret = self.sim.execute_command(cmd, check_exit_code) return ret aux_connect_patcher = mock.patch( 'cinder.volume.drivers.ibm.storwize_svc.' 'replication.StorwizeSVCReplicationManager._run_ssh') self.aux_ssh_mock = aux_connect_patcher.start() self.addCleanup(aux_connect_patcher.stop) self.aux_ssh_mock.side_effect = _run_ssh_aux self.USESIM = True if self.USESIM: self.driver = StorwizeSVCFcFakeDriver( configuration=conf.Configuration(None)) self.rep_target = {"backend_id": "svc_aux_target_1", "san_ip": "192.168.10.22", "san_login": "admin", "san_password": "admin", "pool_name": _get_test_pool()} self.fake_target = {"backend_id": "svc_id_target", "san_ip": "192.168.10.23", "san_login": "admin", "san_password": "admin", "pool_name": _get_test_pool()} self._def_flags = {'san_ip': '192.168.10.21', 'san_login': 'user', 'san_password': 'pass', 'storwize_svc_volpool_name': SVC_POOLS, 'replication_device': [self.rep_target]} wwpns = [ str(random.randint(0, 9999999999999999)).zfill(16), str(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.initiator.%s' % str( random.randint(10000, 99999)) self._connector = {'ip': '1.234.56.78', 'host': 'storwize-svc-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = StorwizeSVCManagementSimulator(SVC_POOLS) self.driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self._reset_flags() self.ctxt = context.get_admin_context() self.db = cinder.db self.driver.db = self.db self.driver.do_setup(None) self.driver.check_for_setup_error() self._create_test_volume_types() self.rccg_type = self._create_consistent_rep_grp_type() def _create_group_snapshot_in_db(self, group_id, **kwargs): group_snapshot = testutils.create_group_snapshot(self.ctxt, group_id=group_id, **kwargs) snapshots = [] volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), group_id) if not volumes: msg = _("Group is empty. No cgsnapshot will be created.") raise exception.InvalidGroup(reason=msg) for volume in volumes: snapshots.append(testutils.create_snapshot( self.ctxt, volume['id'], group_snapshot.id, group_snapshot.name, group_snapshot.id, fields.SnapshotStatus.CREATING)) return group_snapshot, snapshots def _create_group_snapshot(self, cg_id, **kwargs): group_snapshot, snapshots = self._create_group_snapshot_in_db( cg_id, **kwargs) model_update, snapshots_model = ( self.driver.create_group_snapshot(self.ctxt, group_snapshot, snapshots)) self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE, model_update['status'], "CGSnapshot created failed") for snapshot in snapshots_model: self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot['status']) return group_snapshot, snapshots def _set_flag(self, flag, value): group = self.driver.configuration.config_group self.driver.configuration.set_override(flag, value, group) def _reset_flags(self): for k, v in self._def_flags.items(): self._set_flag(k, v) self.driver.configuration.set_override('replication_device', [self.rep_target]) def _assert_vol_exists(self, name, exists): is_vol_defined = self.driver._helpers.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def _generate_vol_info(self, vol_type=None, **kwargs): pool = _get_test_pool() volume_type = vol_type if vol_type else self.non_replica_type prop = {'size': 1, 'volume_type_id': volume_type.id, 'host': 'openstack@svc#%s' % pool } for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) return vol def _generate_snap_info(self, vol_id): prop = {'volume_id': vol_id} snap = testutils.create_snapshot(self.ctxt, **prop) return snap def _create_replica_volume_type(self, enable, rep_type=storwize_const.METRO, opts=None, vol_type_name=None, cycle_period_seconds=None): # Generate a volume type for volume repliation. if enable: if rep_type == storwize_const.METRO: spec = {'replication_enabled': ' True', 'replication_type': ' metro'} type_name = 'rep_metro' elif rep_type == storwize_const.GMCV: if cycle_period_seconds: spec = {'replication_enabled': ' True', 'replication_type': ' gmcv', 'drivers:cycle_period_seconds': cycle_period_seconds} type_name = 'rep_gmcv_with_cps' + cycle_period_seconds else: spec = {'replication_enabled': ' True', 'replication_type': ' gmcv'} type_name = 'rep_gmcv_default' else: spec = {'replication_enabled': ' True', 'replication_type': ' global'} type_name = 'rep_global' elif opts: spec = opts type_name = vol_type_name else: spec = {'replication_enabled': ' False'} type_name = "non_rep" type_ref = volume_types.create(self.ctxt, type_name, spec) replication_type = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) return replication_type def _create_test_volume_types(self): self.mm_type = self._create_replica_volume_type( True, rep_type=storwize_const.METRO) self.gm_type = self._create_replica_volume_type( True, rep_type=storwize_const.GLOBAL) self.gmcv_default_type = self._create_replica_volume_type( True, rep_type=storwize_const.GMCV) self.gmcv_with_cps600_type = self._create_replica_volume_type( True, rep_type=storwize_const.GMCV, cycle_period_seconds="600") self.gmcv_with_cps900_type = self._create_replica_volume_type( True, rep_type=storwize_const.GMCV, cycle_period_seconds="900") self.gmcv_with_cps86401_type = self._create_replica_volume_type( True, rep_type=storwize_const.GMCV, cycle_period_seconds="86401") self.non_replica_type = self._create_replica_volume_type(False) def _create_test_volume(self, rep_type, **kwargs): volume = self._generate_vol_info(rep_type, **kwargs) model_update = self.driver.create_volume(volume) return volume, model_update def _create_consistent_rep_grp_type(self): rccg_spec = {'consistent_group_replication_enabled': ' True'} rccg_type_ref = group_types.create(self.ctxt, 'cg_type', rccg_spec) rccg_type = objects.GroupType.get_by_id(self.ctxt, rccg_type_ref['id']) return rccg_type def _create_test_rccg(self, rccg_type, vol_type_ids): # create group in db group = testutils.create_group(self.ctxt, volume_type_ids=vol_type_ids, group_type_id=rccg_type.id) if self.rccg_type == rccg_type: group.replication_status = fields.ReplicationStatus.ENABLED self.driver.create_group(self.ctxt, group) return group def _get_vdisk_uid(self, vdisk_name): vdisk_properties, _err = self.sim._cmd_lsvdisk(obj=vdisk_name, delim='!') for row in vdisk_properties.split('\n'): words = row.split('!') if words[0] == 'vdisk_UID': return words[1] return None def _get_pool_volumes(self, pool): vdisks = self.sim._cmd_lsvdisks_from_filter('mdisk_grp_name', pool) return vdisks @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_build_pool_stats') def test_update_volume_stats_replication(self, _build_pool_stats): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver._update_volume_stats() self.assertTrue(self.driver._replica_enabled) target_pools = [self.driver._replica_target.get('pool_name')] # Expected call count = Number of primary Pools and Secondary Pools expected_call_count = len(SVC_POOLS) + len(target_pools) self.assertEqual(expected_call_count, _build_pool_stats.call_count) self.assertIsNotNone(self.driver._master_backend_helpers.stats) self.assertIsNotNone(self.driver._aux_backend_helpers.stats) @ddt.data((False, False), (True, True)) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_pool_attrs') @ddt.unpack def test_build_pool_stats_calls(self, replication_enabled, target, get_pool_attrs): pool = "openstack" master_helper = self.driver._master_backend_helpers target_helper = self.driver._aux_backend_helpers if replication_enabled: self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver._build_pool_stats(pool, target) if target: target_helper.get_pool_attrs.assert_called_once_with(pool) else: master_helper.get_pool_attrs.assert_called_once_with(pool) def test_storwize_do_replication_setup_error(self): fake_targets = [self.rep_target, self.rep_target] self.driver.configuration.set_override('replication_device', [{"backend_id": "svc_id_target"}]) self.assertRaises(exception.InvalidInput, self.driver._do_replication_setup) self.driver.configuration.set_override('replication_device', fake_targets) self.assertRaises(exception.InvalidInput, self.driver._do_replication_setup) self.driver._active_backend_id = 'fake_id' self.driver.configuration.set_override('replication_device', [self.rep_target]) self.assertRaises(exception.InvalidInput, self.driver._do_replication_setup) self.driver._active_backend_id = None self.driver._do_replication_setup() self.assertEqual(self.driver._replica_target, self.rep_target) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'replication_licensed') def test_storwize_setup_replication(self, replication_licensed): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver._active_backend_id = None replication_licensed.side_effect = [False, True, True, True, True] self.driver._get_storwize_config() self.assertEqual(self.driver._helpers, self.driver._master_backend_helpers) self.assertFalse(self.driver._replica_enabled) self.driver._get_storwize_config() self.assertEqual(self.driver._replica_target, self.rep_target) self.assertTrue(self.driver._replica_enabled) self.driver._active_backend_id = self.rep_target['backend_id'] self.driver._get_storwize_config() self.assertEqual(self.driver._helpers, self.driver._aux_backend_helpers) self.assertTrue(self.driver._replica_enabled) self.driver._active_backend_id = None self.driver._get_storwize_config() with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_storwize_state') as update_state: update_state.side_effect = [ exception.VolumeBackendAPIException(data='CMMVC6372W'), exception.VolumeBackendAPIException(data='CMMVC6372W'), None] self.driver._active_backend_id = None self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_storwize_config) self.driver._active_backend_id = self.rep_target['backend_id'] self.driver._get_storwize_config() self.assertEqual(self.driver._helpers, self.driver._aux_backend_helpers) self.assertTrue(self.driver._replica_enabled) def test_storwize_create_volume_with_mirror_replication(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create metro mirror replication. volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) # Create global mirror replication. volume, model_update = self._create_test_volume(self.gm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) # Create global mirror with change volumes replication. volume, model_update = self._create_test_volume( self.gmcv_default_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume, True) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume, True) # gmcv with specified cycle_period_seconds volume, model_update = self._create_test_volume( self.gmcv_with_cps600_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume, True) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume, True) # gmcv with invalid cycle_period_seconds self.assertRaises(exception.InvalidInput, self._create_test_volume, self.gmcv_with_cps86401_type) def test_storwize_create_replication_volume_with_mirror_pool(self): """Create a replication volume with mirror_pool option""" # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create MM volume with mirror_pool spec = {'replication_enabled': ' True', 'replication_type': ' metro', 'drivers:mirror_pool': 'openstack1'} mm_mirror_type = self._create_replica_volume_type( False, opts=spec, vol_type_name='mm_mirror_type') mm_volume, model_update = self._create_test_volume( mm_mirror_type) # Check the parameters and their values self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self.assertEqual('inconsistent_copying', model_update['metadata']['Mirroring State']) # Delete the MM volume self.driver.delete_volume(mm_volume) # Create GM volume with mirror_pool spec = {'replication_enabled': ' True', 'replication_type': ' global', 'drivers:mirror_pool': 'openstack1'} gm_mirror_type = self._create_replica_volume_type( False, opts=spec, vol_type_name='gm_mirror_type') gm_volume, model_update = self._create_test_volume( gm_mirror_type) # Check the parameters and their values self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self.assertEqual('inconsistent_copying', model_update['metadata']['Mirroring State']) # Delete the GM volume self.driver.delete_volume(gm_volume) def test_storwize_retype_mm_replication_volume_with_mirror_pool(self): """Create a MM replication volume with mirror_pool option and retype it to MM replication volume type without mirror pool option """ # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) host = {'host': 'openstack@svc#openstack'} # Create MM volume mm_volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create MM volume-type with mirror_pool option spec = {'replication_enabled': ' True', 'replication_type': ' metro', 'drivers:mirror_pool': 'openstack1'} mm_mirror_type = self._create_replica_volume_type( False, opts=spec, vol_type_name='mm_mirror_type') # Retype the MM-volume to volume-type with mirror_pool option diff, _equal = volume_types.volume_types_diff( self.ctxt, mm_mirror_type['id'], self.mm_type['id']) retyped, model_update = self.driver.retype( self.ctxt, mm_volume, mm_mirror_type, diff, host) self.driver.delete_volume(mm_volume) def test_storwize_retype_gm_replication_volume_with_mirror_pool(self): """Create a GM replication volume with mirror_pool option and retype it to GM replication volume type without mirror pool option """ # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) host = {'host': 'openstack@svc#openstack'} # Create GM volume gm_volume, model_update = self._create_test_volume(self.gm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create GM volume-type with mirror_pool option spec = {'replication_enabled': ' True', 'replication_type': ' global', 'drivers:mirror_pool': 'openstack1'} gm_mirror_type = self._create_replica_volume_type( False, opts=spec, vol_type_name='gm_mirror_type') # Retype the GM-volume to volume-type with mirror_pool option diff, _equal = volume_types.volume_types_diff( self.ctxt, gm_mirror_type['id'], self.gm_type['id']) retyped, model_update = self.driver.retype( self.ctxt, gm_volume, gm_mirror_type, diff, host) self.driver.delete_volume(gm_volume) @ddt.data((None, None), (None, SVC_TARGET_CHILD_POOL), (SVC_SOURCE_CHILD_POOL, None), (SVC_SOURCE_CHILD_POOL, SVC_TARGET_CHILD_POOL)) @ddt.unpack def test_storwize_create_gmcv_volume_with_childpool( self, svc_src_childpool, svc_tgt_childpool): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create gmcv volume with change volumes on child pools spec = {'replication_enabled': ' True', 'replication_type': ' gmcv', 'drivers:storwize_svc_src_child_pool': svc_src_childpool, 'drivers:storwize_svc_target_child_pool': svc_tgt_childpool} gmcv_childpool_type = self._create_replica_volume_type( False, opts=spec, vol_type_name='test_gmcv_childpool_type') volume, model_update = self._create_test_volume( gmcv_childpool_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume, True) src_chg_vol_storage_pool = ( svc_src_childpool if svc_src_childpool else _get_test_pool()) tgt_chg_vol_storage_pool = ( svc_tgt_childpool if svc_tgt_childpool else _get_test_pool()) src_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + volume.name) src_childpool_vols = self._get_pool_volumes(src_chg_vol_storage_pool) self.assertIn(src_change_vol_name, src_childpool_vols) tgt_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name) tgt_childpool_vols = self._get_pool_volumes(tgt_chg_vol_storage_pool) self.assertIn(tgt_change_vol_name, tgt_childpool_vols) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume, True) @ddt.data(({"backend_id": "svc_aux_target_1", "san_ip": "192.168.10.22", "san_login": "admin", "san_password": "admin", "pool_name": "openstack"}, 'openstack@svc#dr_pool1'), ({"backend_id": "svc_aux_target_1", "san_ip": "192.168.10.22", "san_login": "admin", "san_password": "admin", "pool_name": "dr_pool1"}, 'openstack@svc#openstack')) @ddt.unpack def test_storwize_replication_volume_with_dr_pools(self, target, vol_host): # Set replication target self.driver.configuration.set_override('replication_device', [target]) self.driver.do_setup(self.ctxt) # Create metro mirror replication volume on dr_pool. volume = testutils.create_volume( self.ctxt, volume_type_id=self.mm_type.id, host=vol_host) model_update = self.driver.create_volume(volume) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) volume1 = testutils.create_volume( self.ctxt, volume_type_id=self.mm_type.id, host=vol_host) ref = {'source-name': volume.name} self.driver.manage_existing(volume1, ref) spec = {'replication_enabled': ' True', 'replication_type': ' metro', 'easytier': 'False'} type_ref = volume_types.create(self.ctxt, 'type_dr', spec) dr_type = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) volume2 = testutils.create_volume( self.ctxt, volume_type_id=dr_type.id, host=vol_host) self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, volume2) volume3 = testutils.create_volume( self.ctxt, volume_type_id=self.mm_type.id, host=vol_host) model_update = self.driver.create_volume(volume3) ref2 = {'source-name': volume3.name} self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, volume2, ref2) volume4 = testutils.create_volume( self.ctxt, volume_type_id=self.non_replica_type.id, host=vol_host) self.driver.create_volume(volume4) # Retype to mm replica host = {'host': vol_host} diff, _equal = volume_types.volume_types_diff( self.ctxt, self.non_replica_type['id'], self.mm_type['id']) retyped, model_update = self.driver.retype( self.ctxt, volume4, self.mm_type, diff, host) volume4['volume_type_id'] = self.mm_type['id'] volume4['volume_type'] = self.mm_type self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume4) volume5 = testutils.create_volume( self.ctxt, volume_type_id=self.non_replica_type.id, host=vol_host) self.driver.create_volume(volume5) # retype with check dr_pool params failure diff, _equal = volume_types.volume_types_diff( self.ctxt, self.non_replica_type['id'], dr_type['id']) self.assertRaises(exception.VolumeDriverException, self.driver.retype, self.ctxt, volume5, dr_type, diff, host) def _validate_replic_vol_creation(self, volume, isGMCV=False): self._assert_vol_exists(volume['name'], True) self._assert_vol_exists( storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], True) if isGMCV: self._assert_vol_exists( storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], True) self._assert_vol_exists( storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], True) rel_info = self.driver._helpers.get_relationship_info(volume['name']) self.assertIsNotNone(rel_info) if isGMCV: vol_rep_type = rel_info['copy_type'] cycling_mode = rel_info['cycling_mode'] cycle_period_seconds = rel_info['cycle_period_seconds'] rep_type = self.driver._get_volume_replicated_type( self.ctxt, volume) src_opts = self.driver._get_vdisk_params(volume['volume_type_id']) opt_cycle_period_seconds = str( src_opts.get('cycle_period_seconds')) self.assertEqual(opt_cycle_period_seconds, cycle_period_seconds) self.assertEqual(storwize_const.GMCV_MULTI, cycling_mode) self.assertEqual(storwize_const.GLOBAL, vol_rep_type) self.assertEqual(storwize_const.GMCV, rep_type) self.assertEqual('master', rel_info['primary']) self.assertEqual(volume['name'], rel_info['master_vdisk_name']) self.assertEqual( storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], rel_info['aux_vdisk_name']) self.assertIn(rel_info['state'], ['consistent_copying']) self.assertEqual( storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], rel_info['master_change_vdisk_name']) self.assertEqual( storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], rel_info['aux_change_vdisk_name']) self.assertIn(rel_info['state'], ['consistent_copying']) self.sim._rc_state_transition('wait', rel_info) self.assertEqual('consistent_copying', rel_info['state']) else: vol_rep_type = rel_info['copy_type'] rep_type = self.driver._get_volume_replicated_type( self.ctxt, volume) self.assertEqual(rep_type, vol_rep_type) self.assertEqual('master', rel_info['primary']) self.assertEqual(volume['name'], rel_info['master_vdisk_name']) self.assertEqual( storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], rel_info['aux_vdisk_name']) self.assertIn(rel_info['state'], ['consistent_synchronized', 'inconsistent_copying']) self.sim._rc_state_transition('wait', rel_info) self.assertEqual('consistent_synchronized', rel_info['state']) def _validate_gmcv_vol_retype(self, volume): self._assert_vol_exists(volume['name'], True) self._assert_vol_exists( storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], True) self._assert_vol_exists(storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], True) self._assert_vol_exists( storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], True) rel_info = self.driver._helpers.get_relationship_info(volume['name']) self.assertIsNotNone(rel_info) src_opts = self.driver._get_vdisk_params(volume['volume_type_id']) opt_cycle_period_seconds = str( src_opts.get('cycle_period_seconds')) self.assertEqual(opt_cycle_period_seconds, rel_info['cycle_period_seconds']) self.assertEqual(storwize_const.GMCV_MULTI, rel_info['cycling_mode']) self.assertEqual(storwize_const.GLOBAL, rel_info['copy_type']) self.assertEqual(storwize_const.GMCV, self.driver._get_volume_replicated_type( self.ctxt, volume)) self.assertEqual('master', rel_info['primary']) self.assertEqual(volume['name'], rel_info['master_vdisk_name']) self.assertEqual((storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name']), rel_info['master_change_vdisk_name']) aux_vdisk_name = (storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']) self.assertEqual(aux_vdisk_name, rel_info['aux_vdisk_name']) self.assertEqual((storwize_const.REPLICA_CHG_VOL_PREFIX + aux_vdisk_name), rel_info['aux_change_vdisk_name']) def _validate_replic_vol_deletion(self, volume, isGMCV=False): self._assert_vol_exists(volume['name'], False) self._assert_vol_exists( storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) if isGMCV: # All change volumes should be deleted self._assert_vol_exists( storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], False) self._assert_vol_exists( storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) rel_info = self.driver._helpers.get_relationship_info(volume['name']) self.assertIsNone(rel_info) @ddt.data(({'mirror_type': 'mm_type'}), ({'mirror_type': 'gm_type'}), ({'mirror_type': 'gmcv_default_type'}), ({'mirror_type': 'gmcv_with_cps900_type'})) def test_storwize_create_snapshot_volume_with_mirror_replica(self, vol_spec): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) rep_type = getattr(self, vol_spec['mirror_type']) # Create mirror replication volume. is_gmcv = True if "gmcv" in vol_spec['mirror_type'] else False vol1, model_update = self._create_test_volume(rep_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(vol1, isGMCV=is_gmcv) snap = testutils.create_snapshot(self.ctxt, vol1.id) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) vol2 = self._generate_vol_info(rep_type) model_update = self.driver.create_volume_from_snapshot(vol2, snap) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(vol2, isGMCV=is_gmcv) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.delete_volume(vol2) self.driver.delete_snapshot(snap) self.driver.delete_volume(vol1) @ddt.data((None, None), (None, SVC_TARGET_CHILD_POOL), (SVC_SOURCE_CHILD_POOL, None), (SVC_SOURCE_CHILD_POOL, SVC_TARGET_CHILD_POOL)) @ddt.unpack def test_storwize_create_snapshot_gmcv_volume_with_childpool( self, svc_src_childpool, svc_tgt_childpool): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create gmcv replication volume with change volumes on child pools spec = {'replication_enabled': ' True', 'replication_type': ' gmcv', 'drivers:storwize_svc_src_child_pool': svc_src_childpool, 'drivers:storwize_svc_target_child_pool': svc_tgt_childpool} gmcv_childpool_type = self._create_replica_volume_type( False, opts=spec, vol_type_name='test_gmcv_childpool_type') vol1, model_update = self._create_test_volume(gmcv_childpool_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(vol1, True) snap = testutils.create_snapshot(self.ctxt, vol1.id) self.driver.create_snapshot(snap) vol2 = self._generate_vol_info(gmcv_childpool_type) model_update = self.driver.create_volume_from_snapshot(vol2, snap) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(vol2, True) src_chg_vol_storage_pool = ( svc_src_childpool if svc_src_childpool else _get_test_pool()) tgt_chg_vol_storage_pool = ( svc_tgt_childpool if svc_tgt_childpool else _get_test_pool()) src_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + vol2.name) src_childpool_vols = self._get_pool_volumes(src_chg_vol_storage_pool) self.assertIn(src_change_vol_name, src_childpool_vols) tgt_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + vol2.name) tgt_childpool_vols = self._get_pool_volumes(tgt_chg_vol_storage_pool) self.assertIn(tgt_change_vol_name, tgt_childpool_vols) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.delete_volume(vol2) self._validate_replic_vol_deletion(vol2, True) self.driver.delete_snapshot(snap) self.driver.delete_volume(vol1) self._validate_replic_vol_deletion(vol1, True) def test_storwize_create_cloned_volume_with_mirror_replica(self): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create a source metro mirror replication volume. src_volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) volume = self._generate_vol_info(self.mm_type) # Create a cloned volume from source volume. model_update = self.driver.create_cloned_volume(volume, src_volume) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.delete_volume(src_volume) self.driver.delete_volume(volume) # Create a source gmcv replication volume. src_volume, model_update = self._create_test_volume( self.gmcv_default_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) volume = self._generate_vol_info(self.gmcv_default_type) # Create a cloned volume from source volume. model_update = self.driver.create_cloned_volume(volume, src_volume) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume, True) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.delete_volume(src_volume) self.driver.delete_volume(volume) # Create a source gmcv volume with specified cycle_period_seconds src_volume, model_update = self._create_test_volume( self.gmcv_with_cps600_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) volume = self._generate_vol_info(self.gmcv_with_cps600_type) # Create a cloned volume from source volume. model_update = self.driver.create_cloned_volume(volume, src_volume) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume, True) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.delete_volume(src_volume) self.driver.delete_volume(volume) @ddt.data((None, None), (None, SVC_TARGET_CHILD_POOL), (SVC_SOURCE_CHILD_POOL, None), (SVC_SOURCE_CHILD_POOL, SVC_TARGET_CHILD_POOL)) @ddt.unpack def test_storwize_create_cloned_volume_from_gmcv_with_childpool( self, svc_src_childpool, svc_tgt_childpool): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create gmcv volume with change volumes on child pools spec = {'replication_enabled': ' True', 'replication_type': ' gmcv', 'drivers:storwize_svc_src_child_pool': svc_src_childpool, 'drivers:storwize_svc_target_child_pool': svc_tgt_childpool} gmcv_childpool_type = self._create_replica_volume_type( False, opts=spec, vol_type_name='test_gmcv_childpool_type') src_volume, model_update = self._create_test_volume( gmcv_childpool_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(src_volume, True) # Create a cloned volume from source volume. volume = self._generate_vol_info(gmcv_childpool_type) model_update = self.driver.create_cloned_volume(volume, src_volume) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume, True) src_chg_vol_storage_pool = ( svc_src_childpool if svc_src_childpool else _get_test_pool()) tgt_chg_vol_storage_pool = ( svc_tgt_childpool if svc_tgt_childpool else _get_test_pool()) src_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + volume.name) src_childpool_vols = self._get_pool_volumes(src_chg_vol_storage_pool) self.assertIn(src_change_vol_name, src_childpool_vols) tgt_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name) tgt_childpool_vols = self._get_pool_volumes(tgt_chg_vol_storage_pool) self.assertIn(tgt_change_vol_name, tgt_childpool_vols) if self.USESIM: self.sim.error_injection('lsfcmap', 'speed_up') self.driver.delete_volume(src_volume) self._validate_replic_vol_deletion(src_volume, True) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(src_volume, True) @ddt.data(({'replication_enabled': ' True', 'replication_type': ' global'}, {'replication_enabled': ' True', 'replication_type': ' metro'}), ({'replication_enabled': ' True', 'replication_type': ' metro'}, {'replication_enabled': ' True', 'replication_type': ' global'})) @ddt.unpack def test_storwize_retype_invalid_replication(self, old_opts, new_opts): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) host = {'host': 'openstack@svc#openstack'} old_type = self._create_replica_volume_type( False, opts=old_opts, vol_type_name='test_old_type') volume, model_update = self._create_test_volume(old_type) new_type = self._create_replica_volume_type( False, opts=new_opts, vol_type_name='test_new_type') diff, _equal = volume_types.volume_types_diff( self.ctxt, new_type['id'], old_type['id']) self.assertRaises(exception.VolumeDriverException, self.driver.retype, self.ctxt, volume, new_type, diff, host) @ddt.data(({'replication_enabled': ' True', 'replication_type': ' metro'}, {'mirror_pool': 'openstack1'}), ({'mirror_pool': 'openstack1'}, {'mirror_pool': 'openstack1', 'replication_enabled': ' True', 'replication_type': ' metro'}), ({'replication_enabled': ' False'}, {'mirror_pool': 'openstack1', 'replication_enabled': ' True', 'replication_type': ' metro'})) @ddt.unpack def test_storwize_retype_valid_replication(self, old_opts, new_opts): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) host = {'host': 'openstack@svc#openstack'} old_type = self._create_replica_volume_type( False, opts=old_opts, vol_type_name='test_old_type') volume, model_update = self._create_test_volume(old_type) new_type = self._create_replica_volume_type( False, opts=new_opts, vol_type_name='test_new_type') diff, _equal = volume_types.volume_types_diff( self.ctxt, new_type['id'], old_type['id']) self.driver.retype(self.ctxt, volume, new_type, diff, host) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_volume_io_group') def test_storwize_svc_retype_global_mirror_volume_to_thin(self, get_vol_io_grp): self.driver.do_setup(self.ctxt) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() get_vol_io_grp.return_value = 0 type_name = 'rep_global_none' spec = {'replication_enabled': ' True', 'replication_type': ' global', 'drivers:rsize': '-1', 'compression': 'False'} type_ref = volume_types.create(self.ctxt, type_name, spec) vol_type1 = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) type_name = 'rep_global_thin' spec = {'replication_enabled': ' True', 'replication_type': ' global', 'drivers:rsize': '2', 'compression': 'False'} type_ref = volume_types.create(self.ctxt, type_name, spec) vol_type2 = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) diff, _equal = volume_types.volume_types_diff(ctxt, vol_type1.id, vol_type2.id) # Create test volume with volume type with rsize as -1 vol1, model_update = self._create_test_volume(vol_type1) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) vol1['status'] = 'available' copies = self.driver._helpers.lsvdiskcopy(vol1.name) self.assertEqual(1, len(copies)) self.driver.retype(self.ctxt, vol1, vol_type2, diff, host) copies = self.driver._helpers.lsvdiskcopy(vol1.name) self.assertEqual(2, len(copies)) get_vol_io_grp.assert_called_once_with(vol1.name) self.driver.delete_volume(vol1) def test_storwize_svc_retype_global_mirror_volume_to_none(self): self.driver.do_setup(self.ctxt) loc = ('StorwizeSVCDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@svc#openstack', 'capabilities': cap} ctxt = context.get_admin_context() type_name = 'rep_global_thin' spec = {'replication_enabled': ' True', 'replication_type': ' global', 'drivers:rsize': '2', 'compression': 'False'} type_ref = volume_types.create(self.ctxt, type_name, spec) vol_type1 = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) type_name = 'rep_global_none' spec = {'replication_enabled': ' True', 'replication_type': ' global', 'drivers:rsize': '-1', 'compression': 'False'} type_ref = volume_types.create(self.ctxt, type_name, spec) vol_type2 = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) diff, _equal = volume_types.volume_types_diff(ctxt, vol_type1.id, vol_type2.id) # Create test volume with volume type with rsize as 2 vol1, model_update = self._create_test_volume(vol_type1) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) vol1['status'] = 'available' copies = self.driver._helpers.lsvdiskcopy(vol1.name) self.assertEqual(1, len(copies)) self.driver.retype(self.ctxt, vol1, vol_type2, diff, host) copies = self.driver._helpers.lsvdiskcopy(vol1.name) self.assertEqual(2, len(copies)) self.driver.delete_volume(vol1) def test_storwize_retype_from_mirror_to_none_replication(self): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) host = {'host': 'openstack@svc#openstack'} volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) diff, _equal = volume_types.volume_types_diff( self.ctxt, self.mm_type['id'], self.gm_type['id']) # Change the mirror type from mm to gm self.assertRaises(exception.VolumeDriverException, self.driver.retype, self.ctxt, volume, self.gm_type, diff, host) # Retype from mm to gmcv diff, _equal = volume_types.volume_types_diff( self.ctxt, self.mm_type['id'], self.gmcv_with_cps600_type['id']) self.assertRaises(exception.VolumeDriverException, self.driver.retype, self.ctxt, volume, self.gmcv_with_cps600_type, diff, host) diff, _equal = volume_types.volume_types_diff( self.ctxt, self.non_replica_type['id'], self.mm_type['id']) # Retype from mm to non-replica retyped, model_update = self.driver.retype( self.ctxt, volume, self.non_replica_type, diff, host) self.assertEqual(fields.ReplicationStatus.DISABLED, model_update['replication_status']) self._assert_vol_exists( storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) self.driver.delete_volume(volume) self._assert_vol_exists(volume['name'], False) rel_info = self.driver._helpers.get_relationship_info(volume['name']) self.assertIsNone(rel_info) # Create gmcv volume volume, model_update = self._create_test_volume( self.gmcv_with_cps900_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Retype from gmcv to gm diff, _equal = volume_types.volume_types_diff( self.ctxt, self.gmcv_with_cps900_type['id'], self.gm_type['id']) self.assertRaises(exception.VolumeDriverException, self.driver.retype, self.ctxt, volume, self.gm_type, diff, host) # Retype from gmcv to non-replica diff, _equal = volume_types.volume_types_diff( self.ctxt, self.gmcv_with_cps900_type['id'], self.non_replica_type['id']) retyped, model_update = self.driver.retype( self.ctxt, volume, self.non_replica_type, diff, host) self.assertEqual(fields.ReplicationStatus.DISABLED, model_update['replication_status']) # All change volumes should be deleted self._assert_vol_exists( storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) self._assert_vol_exists( storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], False) self._assert_vol_exists( storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) self.driver.delete_volume(volume) self._assert_vol_exists(volume['name'], False) rel_info = self.driver._helpers.get_relationship_info(volume['name']) self.assertIsNone(rel_info) def test_storwize_retype_from_none_to_mirror_replication(self): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) volume, model_update = self._create_test_volume(self.non_replica_type) self.assertEqual(fields.ReplicationStatus.NOT_CAPABLE, model_update['replication_status']) # Retype to mm replica host = {'host': 'openstack@svc#openstack'} diff, _equal = volume_types.volume_types_diff( self.ctxt, self.non_replica_type['id'], self.mm_type['id']) retyped, model_update = self.driver.retype( self.ctxt, volume, self.mm_type, diff, host) volume['volume_type_id'] = self.mm_type['id'] volume['volume_type'] = self.mm_type self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume) self.driver.delete_volume(volume) # Create non-replica volume volume, model_update = self._create_test_volume(self.non_replica_type) self.assertEqual(fields.ReplicationStatus.NOT_CAPABLE, model_update['replication_status']) # Retype to gmcv replica host = {'host': 'openstack@svc#openstack'} diff, _equal = volume_types.volume_types_diff( self.ctxt, self.non_replica_type['id'], self.gmcv_with_cps900_type['id']) retyped, model_update = self.driver.retype( self.ctxt, volume, self.gmcv_with_cps900_type, diff, host) volume['volume_type_id'] = self.gmcv_with_cps900_type['id'] volume['volume_type'] = self.gmcv_with_cps900_type self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume, True) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume, True) @ddt.data((None, None), (None, SVC_TARGET_CHILD_POOL), (SVC_SOURCE_CHILD_POOL, None), (SVC_SOURCE_CHILD_POOL, SVC_TARGET_CHILD_POOL)) @ddt.unpack def test_storwize_retype_from_none_to_gmcv_with_childpool( self, svc_src_childpool, svc_tgt_childpool): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create non-replica volume volume, model_update = self._create_test_volume(self.non_replica_type) self.assertEqual(fields.ReplicationStatus.NOT_CAPABLE, model_update['replication_status']) # Retype to gmcv with childpool spec = {'replication_enabled': ' True', 'replication_type': ' gmcv', 'drivers:storwize_svc_src_child_pool': svc_src_childpool, 'drivers:storwize_svc_target_child_pool': svc_tgt_childpool} gmcv_childpool_type = self._create_replica_volume_type( False, opts=spec, vol_type_name='test_gmcv_childpool_type') host = {'host': 'openstack@svc#openstack'} diff, _equal = volume_types.volume_types_diff( self.ctxt, self.non_replica_type['id'], gmcv_childpool_type['id']) retyped, model_update = self.driver.retype( self.ctxt, volume, gmcv_childpool_type, diff, host) volume['volume_type_id'] = gmcv_childpool_type['id'] volume['volume_type'] = gmcv_childpool_type self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume, True) src_chg_vol_storage_pool = ( svc_src_childpool if svc_src_childpool else _get_test_pool()) tgt_chg_vol_storage_pool = ( svc_tgt_childpool if svc_tgt_childpool else _get_test_pool()) src_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + volume.name) src_childpool_vols = self._get_pool_volumes(src_chg_vol_storage_pool) self.assertIn(src_change_vol_name, src_childpool_vols) tgt_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name) tgt_childpool_vols = self._get_pool_volumes(tgt_chg_vol_storage_pool) self.assertIn(tgt_change_vol_name, tgt_childpool_vols) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume, True) def test_storwize_retype_from_gmcv_to_gmcv_replication(self): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create gmcv default volume volume, model_update = self._create_test_volume(self.gmcv_default_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume, True) # Retype to gmcv with cycle_period_seconds 600 replica host = {'host': 'openstack@svc#openstack'} diff, _equal = volume_types.volume_types_diff( self.ctxt, self.gmcv_default_type['id'], self.gmcv_with_cps600_type['id']) self.driver.retype(self.ctxt, volume, self.gmcv_with_cps600_type, diff, host) volume['volume_type_id'] = self.gmcv_with_cps600_type['id'] volume['volume_type'] = self.gmcv_with_cps600_type self._validate_gmcv_vol_retype(volume) # Retype to gmcv with cycle_period_seconds 900 replica diff, _equal = volume_types.volume_types_diff( self.ctxt, self.gmcv_with_cps600_type['id'], self.gmcv_with_cps900_type['id']) self.driver.retype(self.ctxt, volume, self.gmcv_with_cps900_type, diff, host) volume['volume_type_id'] = self.gmcv_with_cps900_type['id'] volume['volume_type'] = self.gmcv_with_cps900_type self._validate_gmcv_vol_retype(volume) # Retype to gmcv with invalid cycle_period_seconds diff, _equal = volume_types.volume_types_diff( self.ctxt, self.gmcv_with_cps600_type['id'], self.gmcv_with_cps86401_type['id']) self.assertRaises(exception.InvalidInput, self.driver.retype, self.ctxt, volume, self.gmcv_with_cps86401_type, diff, host) # Retype to gmcv default volume diff, _equal = volume_types.volume_types_diff( self.ctxt, self.gmcv_with_cps900_type['id'], self.gmcv_default_type['id']) self.driver.retype(self.ctxt, volume, self.gmcv_default_type, diff, host) volume['volume_type_id'] = self.gmcv_default_type['id'] volume['volume_type'] = self.gmcv_default_type self._validate_gmcv_vol_retype(volume) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume, True) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'mkfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_prepare_fc_map') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'startfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'stop_relationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'start_relationship') def test_revert_to_snapshot_mirror_vol(self, start_relationship, stop_relationship, startfcmap, prepare_fc_map, mkfcmap): mkfcmap.side_effect = ['1'] vol1 = self._generate_vol_info(self.gm_type, replication_status='enabled') snap1 = self._generate_snap_info(vol1.id) with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_volume_replicated_type') as vol_rep_type: vol_rep_type.side_effect = [True, False] self.driver.revert_to_snapshot(self.ctxt, vol1, snap1) mkfcmap.assert_called_once_with( snap1.name, vol1.name, True, self.driver.configuration.storwize_svc_flashcopy_rate, self.driver.configuration.storwize_svc_clean_rate) prepare_fc_map.assert_called_once_with( '1', self.driver.configuration.storwize_svc_flashcopy_timeout, True) startfcmap.assert_called_once_with('1', True) self.assertEqual(fields.ReplicationStatus.ENABLED, vol1.replication_status) stop_relationship.assert_called_once_with("volume-" + vol1.id, access=False) start_relationship.assert_called_once_with("volume-" + vol1.id, primary=None) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'mkfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, '_prepare_fc_map') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'startfcmap') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_rccg_name_by_volume_name') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'stop_rccg') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'start_rccg') def test_revert_to_snapshot_mirror_vol_in_group( self, start_rccg, stop_rccg, get_rccg_name_by_volume_name, startfcmap, prepare_fc_map, mkfcmap): mkfcmap.side_effect = ['1'] vol1 = self._generate_vol_info(self.gm_type, replication_status='enabled') snap1 = self._generate_snap_info(vol1.id) fake_rccg_name = "fake_rccg-1" with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_get_volume_replicated_type') as vol_rep_type: vol_rep_type.side_effect = [True] get_rccg_name_by_volume_name.side_effect = [fake_rccg_name] self.driver.revert_to_snapshot(self.ctxt, vol1, snap1) mkfcmap.assert_called_once_with( snap1.name, vol1.name, True, self.driver.configuration.storwize_svc_flashcopy_rate, self.driver.configuration.storwize_svc_clean_rate) prepare_fc_map.assert_called_once_with( '1', self.driver.configuration.storwize_svc_flashcopy_timeout, True) startfcmap.assert_called_once_with('1', True) self.assertEqual(fields.ReplicationStatus.ENABLED, vol1.replication_status) stop_rccg.assert_called_once_with(fake_rccg_name, access=False) start_rccg.assert_called_once_with(fake_rccg_name, primary=None) def test_storwize_extend_volume_replication(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create metro mirror replication volume. volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self.driver.extend_volume(volume, '13') attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) attrs = self.driver._aux_backend_helpers.get_vdisk_attributes( storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) # Create gmcv replication volume. volume, model_update = self._create_test_volume( self.gmcv_with_cps900_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self.driver.extend_volume(volume, 15) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 15) attrs = self.driver._aux_backend_helpers.get_vdisk_attributes( storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 15) attrs = self.driver._aux_backend_helpers.get_vdisk_attributes( storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 15) attrs = self.driver._helpers.get_vdisk_attributes( storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 15) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) def test_storwize_extend_gmcv_volume_invalid(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Extend gmcv volume with thick_provisioning_support. spec = {'replication_enabled': ' True', 'replication_type': ' gmcv', 'drivers:rsize': -1} gmcv_thick_type = self._create_replica_volume_type( False, opts=spec, vol_type_name='test_gmcv_thik_type') gmcv_volume, model_update = self._create_test_volume(gmcv_thick_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) if self.USESIM: # tell expandvdisksize to fail while called extend_volume # because volume is fast formatting self.sim.error_injection('expandvdisksize', 'fast_formatting') self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, gmcv_volume, 15) attrs = ( self.driver._helpers.get_vdisk_attributes(gmcv_volume['name'])) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 1) self.driver.delete_volume(gmcv_volume) self._validate_replic_vol_deletion(gmcv_volume) def test_storwize_extend_gmcv_volume_part_of_group(self): """Extend gmcv volume that added to group with replication.""" # Create group with replication. group = self._create_test_rccg(self.rccg_type, [self.gmcv_default_type.id]) rccg_name = self.driver._get_rccg_name(group) # Create gmcv volume volume, model_update = self._create_test_volume( self.gmcv_default_type) self._validate_replic_vol_creation(volume, True) rcrel = self.driver._helpers.get_relationship_info(volume.name) self.sim._rc_state_transition('wait', rcrel) # Add gmcv volume to group. add_vols = [volume] (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, group, add_vols, []) self.assertEqual( rccg_name, self.driver._helpers.get_rccg_name_by_volume_name(volume.name)) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual([{'id': volume.id, 'group_id': group.id}], add_volumes_update) self.assertEqual([], remove_volumes_update) # Extend gmcv volume which is a part of group self.driver.extend_volume(volume, 2) attrs = self.driver._helpers.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 2) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'extend_vdisk') as extend_vdisk: self.driver.extend_volume(volume, 3) extend_vdisk.assert_called_with(volume.name, 2) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) def test_convert_global_mirror_volume_to_gmcv(self): """Test volume conversion from global to gmcv.""" self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create global mirror replication. gm_vol, model_update = self._create_test_volume(self.gm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(gm_vol) rcrel = self.driver._helpers.get_relationship_info(gm_vol.name) self.assertEqual('none', rcrel['cycling_mode']) self.assertEqual('', rcrel['master_change_vdisk_name']) self.assertEqual('', rcrel['aux_change_vdisk_name']) # Validating volume conversion from global to gmcv by checking a few # property values of RC relationship target_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + gm_vol.name master_change_vol_name = ( storwize_const.REPLICA_CHG_VOL_PREFIX + gm_vol.name) aux_change_vol_name = ( storwize_const.REPLICA_CHG_VOL_PREFIX + target_vol) size = 1 self.driver._convert_global_mirror_volume_to_gmcv(gm_vol, target_vol, size, rcrel) rcrel = self.driver._helpers.get_relationship_info(gm_vol.name) self.assertEqual('multi', rcrel['cycling_mode']) self.assertEqual(master_change_vol_name, rcrel['master_change_vdisk_name']) self.assertEqual(aux_change_vol_name, rcrel['aux_change_vdisk_name']) self.driver.delete_volume(gm_vol) self._validate_replic_vol_deletion(gm_vol) gm_vol, model_update = self._create_test_volume(self.gm_type) self._validate_replic_vol_creation(gm_vol) rcrel = self.driver._helpers.get_relationship_info(gm_vol.name) with (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_vdisk')) as create_vdisk: with (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'change_relationship_cyclingmode') ) as ch_relationship_cyclingmode: target_vol = ( storwize_const.REPLICA_AUX_VOL_PREFIX + gm_vol.name) size = 1 self.driver._convert_global_mirror_volume_to_gmcv( gm_vol, target_vol, size, rcrel) create_vdisk.assert_called() self.assertEqual(2, create_vdisk.call_count) ch_relationship_cyclingmode.assert_called() self.driver.delete_volume(gm_vol) self._validate_replic_vol_deletion(gm_vol) def test_convert_global_mirror_volume_to_gmcv_part_of_group(self): """Test volume conversion from global to gmcv part of group.""" group = self._create_test_rccg(self.rccg_type, [self.gm_type.id]) rccg_name = self.driver._get_rccg_name(group) gm_vol, model_update = self._create_test_volume(self.gm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(gm_vol) add_vols = [gm_vol] (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, group, add_vols, []) self.assertEqual( rccg_name, self.driver._helpers.get_rccg_name_by_volume_name(gm_vol.name)) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual([{'id': gm_vol.id, 'group_id': group.id}], add_volumes_update) self.assertEqual([], remove_volumes_update) rccg_info = self.driver._helpers.get_rccg_info(gm_vol.name) self.assertEqual('none', rccg_info['cycling_mode']) rcrel = self.driver._helpers.get_relationship_info(gm_vol.name) self.assertEqual('', rcrel['master_change_vdisk_name']) self.assertEqual('', rcrel['aux_change_vdisk_name']) # Validating volume conversion from global to gmcv by checking a few # property values of rccg and RC relationship target_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + gm_vol.name master_change_vol_name = ( storwize_const.REPLICA_CHG_VOL_PREFIX + gm_vol.name) aux_change_vol_name = ( storwize_const.REPLICA_CHG_VOL_PREFIX + target_vol) size = 1 self.driver._convert_global_mirror_volume_to_gmcv( gm_vol, target_vol, size, rcrel, rccg_name=rccg_name) rccg_info = self.driver._helpers.get_rccg_info(gm_vol.name) self.assertEqual('multi', rccg_info['cycling_mode']) rcrel = self.driver._helpers.get_relationship_info(gm_vol.name) self.assertEqual(master_change_vol_name, rcrel['master_change_vdisk_name']) self.assertEqual(aux_change_vol_name, rcrel['aux_change_vdisk_name']) self.driver.delete_volume(gm_vol) self._validate_replic_vol_deletion(gm_vol) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'start_rccg') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'change_consistgrp_cyclingmode') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'stop_rccg') def test_calls_in_convert_global_mirror_volume_to_gmcv_part_of_group( self, start_rccg, change_consistgrp_cyclingmode, stop_rccg): # Create global mirror replication. gm_vol, model_update = self._create_test_volume(self.gm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(gm_vol) rcrel = self.driver._helpers.get_relationship_info(gm_vol.name) rccg_name = "fake_rccg_1" with (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_vdisk')) as create_vdisk: target_vol = ( storwize_const.REPLICA_AUX_VOL_PREFIX + gm_vol.name) size = 1 self.driver._convert_global_mirror_volume_to_gmcv( gm_vol, target_vol, size, rcrel, rccg_name=rccg_name) create_vdisk.assert_called() self.assertEqual(2, create_vdisk.call_count) stop_rccg.assert_called_once_with(rccg_name) change_consistgrp_cyclingmode.assert_called_once_with(rccg_name, 'multi') start_rccg.assert_called_once_with(rccg_name) self.driver.delete_volume(gm_vol) self._validate_replic_vol_deletion(gm_vol) def test_storwize_manage_existing_mismatch_with_volume_replication(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create mm replication volume. rep_volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create non-replication volume. non_rep_volume, model_update = self._create_test_volume( self.non_replica_type) new_volume = self._generate_vol_info() ref = {'source-name': rep_volume['name']} new_volume['volume_type_id'] = self.non_replica_type['id'] new_volume['volume_type'] = self.non_replica_type self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) ref = {'source-name': non_rep_volume['name']} new_volume['volume_type_id'] = self.mm_type['id'] new_volume['volume_type'] = self.mm_type self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) ref = {'source-name': rep_volume['name']} new_volume['volume_type_id'] = self.gm_type['id'] new_volume['volume_type'] = self.gm_type self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) ref = {'source-name': rep_volume['name']} new_volume['volume_type_id'] = self.gmcv_with_cps900_type['id'] new_volume['volume_type'] = self.gmcv_with_cps900_type self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) self.driver.delete_volume(rep_volume) self.driver.delete_volume(new_volume) # Create gmcv default replication volume rep_volume, model_update = self._create_test_volume( self.gmcv_default_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) new_volume = self._generate_vol_info() ref = {'source-name': rep_volume['name']} new_volume['volume_type_id'] = self.gmcv_with_cps900_type['id'] new_volume['volume_type'] = self.gmcv_with_cps900_type # manage existing gmcv volume with different cycle period seconds self.assertRaises( exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) self.driver.delete_volume(rep_volume) self.driver.delete_volume(new_volume) def test_storwize_manage_existing_with_volume_replication(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create mm replication volume. rep_volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) uid_of_master = self._get_vdisk_uid(rep_volume['name']) uid_of_aux = self._get_vdisk_uid( storwize_const.REPLICA_AUX_VOL_PREFIX + rep_volume['name']) new_volume = self._generate_vol_info() ref = {'source-name': rep_volume['name']} new_volume['volume_type_id'] = self.mm_type['id'] new_volume['volume_type'] = self.mm_type self.driver.manage_existing(new_volume, ref) # Check the uid of the volume which has been renamed. uid_of_master_volume = self._get_vdisk_uid(new_volume['name']) uid_of_aux_volume = self._get_vdisk_uid( storwize_const.REPLICA_AUX_VOL_PREFIX + new_volume['name']) self.assertEqual(uid_of_master, uid_of_master_volume) self.assertEqual(uid_of_aux, uid_of_aux_volume) self.driver.delete_volume(rep_volume) # Create gmcv replication volume. rep_volume, model_update = self._create_test_volume( self.gmcv_with_cps900_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) uid_of_master = self._get_vdisk_uid(rep_volume['name']) uid_of_master_change = self._get_vdisk_uid( storwize_const.REPLICA_CHG_VOL_PREFIX + rep_volume['name']) uid_of_aux = self._get_vdisk_uid( storwize_const.REPLICA_AUX_VOL_PREFIX + rep_volume['name']) uid_of_aux_change = self._get_vdisk_uid( storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + rep_volume['name']) new_volume = self._generate_vol_info() ref = {'source-name': rep_volume['name']} new_volume['volume_type_id'] = self.gmcv_with_cps900_type['id'] new_volume['volume_type'] = self.gmcv_with_cps900_type self.driver.manage_existing(new_volume, ref) # Check the uid of the volume which has been renamed. uid_of_new_master = self._get_vdisk_uid(new_volume['name']) uid_of_new_master_change = self._get_vdisk_uid( storwize_const.REPLICA_CHG_VOL_PREFIX + new_volume['name']) uid_of_new_aux = self._get_vdisk_uid( storwize_const.REPLICA_AUX_VOL_PREFIX + new_volume['name']) uid_of_new_aux_change = self._get_vdisk_uid( storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + new_volume['name']) self.assertEqual(uid_of_master, uid_of_new_master) self.assertEqual(uid_of_aux, uid_of_new_aux) self.assertEqual(uid_of_master_change, uid_of_new_master_change) self.assertEqual(uid_of_aux_change, uid_of_new_aux_change) self.driver.delete_volume(rep_volume) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'rename_vdisk') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_storwize_update_migrated_replication_volume( self, get_rp_info, rename_vdisk): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create replication volume. backend_volume, model_update = self._create_test_volume(self.mm_type) volume, model_update = self._create_test_volume(self.mm_type) get_rp_info.side_effect = [{'aux_vdisk_name': 'aux_test'}] model_update = self.driver.update_migrated_volume(self.ctxt, volume, backend_volume, 'available') aux_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name) rename_vdisk.assert_has_calls([mock.call( backend_volume.name, volume.name), mock.call('aux_test', aux_vol)]) self.assertEqual({'_name_id': None}, model_update) rename_vdisk.reset_mock() rename_vdisk.side_effect = exception.VolumeBackendAPIException( data='foo') model_update = self.driver.update_migrated_volume(self.ctxt, volume, backend_volume, 'available') self.assertEqual({'_name_id': backend_volume.id}, model_update) def test_storwize_delete_volume_with_mirror_replication(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create metro mirror replication. volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume) # Delete volume in non-failover state self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) # Create gmcv replication. gmcv_volume, model_update = self._create_test_volume( self.gmcv_with_cps600_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(gmcv_volume, True) # Delete gmcv volume in non-failover state self.driver.delete_volume(gmcv_volume) self._validate_replic_vol_deletion(gmcv_volume, True) volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume) non_replica_vol, model_update = self._create_test_volume( self.non_replica_type) self.assertEqual(fields.ReplicationStatus.NOT_CAPABLE, model_update['replication_status']) gmcv_volume, model_update = self._create_test_volume( self.gmcv_with_cps600_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(gmcv_volume, True) volumes = [volume, non_replica_vol, gmcv_volume] # Delete volume in failover state self.driver.failover_host( self.ctxt, volumes, self.rep_target['backend_id'], []) # Delete non-replicate volume in a failover state self.assertRaises(exception.VolumeDriverException, self.driver.delete_volume, non_replica_vol) # Delete replicate volume in failover state self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) self.driver.delete_volume(gmcv_volume) self._validate_replic_vol_deletion(gmcv_volume, True) self.driver.failover_host( self.ctxt, volumes, 'default', []) self.driver.delete_volume(non_replica_vol) self._assert_vol_exists(non_replica_vol['name'], False) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_vdisk') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_relationship') def test_delete_source_volume(self, delete_relationship, delete_vdisk): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) fake_name = 'volume-%s' % fake.VOLUME_ID self.driver._helpers.delete_rc_volume(fake_name) self.assertFalse(delete_relationship.called) master_change_fake_name = ( storwize_const.REPLICA_CHG_VOL_PREFIX + fake_name) calls = [mock.call(master_change_fake_name, force_delete=False, force_unmap=True), mock.call(fake_name, force_delete=False, force_unmap=True)] delete_vdisk.assert_has_calls(calls, any_order=True) self.assertEqual(2, delete_vdisk.call_count) rel_info = {'name': 'fake_rcrel', 'aux_vdisk_name': fake_name, 'master_vdisk_name': 'volume-%s' % fake.VOLUME_ID} self.driver._helpers.delete_rc_volume(fake_name, rel_info) delete_relationship.assert_called_once_with(fake_name, rcrel=rel_info['name']) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_vdisk') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_relationship') def test_delete_target_volume(self, delete_relationship, delete_vdisk): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) fake_name = 'aux_volume-%s' % fake.VOLUME_ID rel_info = {'name': 'fake_rcrel', 'aux_vdisk_name': fake_name, 'master_vdisk_name': 'volume-%s' % fake.VOLUME_ID} self.driver._aux_backend_helpers.delete_rc_volume(fake_name, rel_info, target_vol=True) delete_relationship.assert_called_with(fake_name, rcrel=rel_info['name']) target_change_fake_name = ( storwize_const.REPLICA_CHG_VOL_PREFIX + fake_name) calls = [mock.call(target_change_fake_name, force_delete=False, force_unmap=True), mock.call(fake_name, force_delete=False, force_unmap=True)] delete_vdisk.assert_has_calls(calls, any_order=True) self.assertEqual(2, delete_vdisk.call_count) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_vdisk') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_relationship') def test_delete_target_volume_no_relationship(self, delete_relationship, delete_vdisk): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) fake_name = 'aux_volume-%s' % fake.VOLUME_ID self.driver._aux_backend_helpers.delete_rc_volume( fake_name, target_vol=True, rel_info=None) self.assertFalse(delete_relationship.called) self.assertTrue(delete_vdisk.called) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_vdisk') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_relationship') def test_delete_target_volume_fail(self, delete_relationship, delete_vdisk): fake_id = fake.VOLUME_ID fake_name = 'aux_volume-%s' % fake_id rel_info = {'name': 'fake_rcrel', 'aux_vdisk_name': fake_name, 'master_vdisk_name': 'volume-%s' % fake_id} delete_vdisk.side_effect = Exception self.assertRaises(exception.VolumeDriverException, self.driver._aux_backend_helpers.delete_rc_volume, fake_name, rel_info, target_vol=True) delete_relationship.assert_called_once_with(fake_name, rcrel=rel_info['name']) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_get_target_volume_information(self, get_relationship_info): volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) fake_aux_volume_name = storwize_const.REPLICA_AUX_VOL_PREFIX + \ volume.name rel_info = {'aux_vdisk_name': fake_aux_volume_name, 'master_vdisk_name': volume.name} get_relationship_info.return_value = rel_info target_volume, rel_info = ( self.driver._helpers.get_target_volume_information(volume)) self.assertEqual(target_volume, fake_aux_volume_name) self.assertIsNotNone(rel_info) get_relationship_info.assert_called_with(volume.name) get_relationship_info.return_value = None target_volume, rel_info = ( self.driver._helpers.get_target_volume_information(volume)) self.assertEqual(target_volume, fake_aux_volume_name) self.assertIsNone(rel_info) get_relationship_info.assert_called_with(volume.name) def test_is_replicated_volume_primary(self): volume, model_update = self._create_test_volume(self.gm_type) rel_info = self.driver._helpers.get_relationship_info(volume.name) flag = self.driver._helpers.is_replicated_volume_primary(volume, rel_info) self.assertEqual(flag, True) # GM volume with auxillary as Primary rel_info["primary"] = "aux" flag = self.driver._helpers.is_replicated_volume_primary(volume, rel_info) self.assertEqual(flag, False) @ddt.data((True, True, 1), (False, True, 2), (True, False, 2), (False, False, 2)) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_vdisk') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'delete_relationship') @ddt.unpack def test_retain_target_volume(self, target_vol, retain_aux_vol, call_count, delete_relationship, delete_vdisk): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) fake_name = 'aux_volume-%s' % fake.VOLUME_ID rel_info = {'name': 'fakercrel', 'aux_vdisk_name': fake_name, 'master_vdisk_name': 'volume-%s' % fake.VOLUME_ID} self.driver._aux_backend_helpers.delete_rc_volume( fake_name, rel_info, target_vol=target_vol, retain_aux_volume=retain_aux_vol) vol_name = fake_name change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + vol_name) if rel_info: delete_relationship.assert_called_once_with(vol_name, rcrel=rel_info['name']) calls = [mock.call(change_vol_name, force_delete=False, force_unmap=True)] if (target_vol and not retain_aux_vol) or not target_vol: calls.extend([mock.call(vol_name, force_delete=False, force_unmap=True)]) delete_vdisk.assert_has_calls(calls, any_order=True) self.assertEqual(call_count, delete_vdisk.call_count) def test_storwize_failover_host_backend_error(self): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create metro mirror replication. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create gmcv replication. gmcv_vol, model_update = self._create_test_volume( self.gmcv_with_cps900_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) volumes = [mm_vol, gmcv_vol] self.driver._replica_enabled = False self.assertRaises(exception.UnableToFailOver, self.driver.failover_host, self.ctxt, volumes, self.rep_target['backend_id'], []) self.driver._replica_enabled = True self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_host, self.ctxt, volumes, self.fake_target['backend_id'], []) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') as get_sys_info: get_sys_info.side_effect = [ exception.VolumeBackendAPIException(data='CMMVC6071E'), exception.VolumeBackendAPIException(data='CMMVC6071E')] self.assertRaises(exception.UnableToFailOver, self.driver.failover_host, self.ctxt, volumes, self.rep_target['backend_id'], []) self.driver._active_backend_id = self.rep_target['backend_id'] self.assertRaises(exception.UnableToFailOver, self.driver.failover_host, self.ctxt, volumes, 'default', []) self.driver.delete_volume(mm_vol) self.driver.delete_volume(gmcv_vol) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') def test_failover_volume_relationship_error(self, get_relationship_info): # Create global mirror replication. gm_vol, model_update = self._create_test_volume(self.gm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create gmcv replication. gmcv_vol, model_update = self._create_test_volume( self.gmcv_default_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) get_relationship_info.side_effect = [None, exception.VolumeDriverException, None, exception.VolumeDriverException] expected_list = [{'updates': {'replication_status': fields.ReplicationStatus.FAILOVER_ERROR, 'status': 'error'}, 'volume_id': gm_vol['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILOVER_ERROR, 'status': 'error'}, 'volume_id': gmcv_vol['id']} ] volumes_update = self.driver._failover_replica_volumes( self.ctxt, [gm_vol, gmcv_vol]) self.assertEqual(expected_list, volumes_update) volumes_update = self.driver._failover_replica_volumes( self.ctxt, [gm_vol, gmcv_vol]) self.assertEqual(expected_list, volumes_update) @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_volume_stats') def test_storwize_failover_host_replica_volumes(self, update_volume_stats): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create metro mirror replication. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create global replication volume. gm_vol, model_update = self._create_test_volume(self.gm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) gm_vol['status'] = 'in-use' # Create global replication volume. gm_vol1, model_update = self._create_test_volume(self.gm_type) gm_vol1['status'] = 'in-use' gm_vol1['previous_status'] = 'in-use' gm_vol2, model_update = self._create_test_volume(self.gm_type) gm_vol2['status'] = 'in-use' gm_vol2['previous_status'] = 'available' # Create gmcv volume. gmcv_vol, model_update = self._create_test_volume( self.gmcv_with_cps600_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) gmcv_vol['status'] = 'available' gmcv_vol['previous_status'] = 'in-use' volumes = [mm_vol, gm_vol, gm_vol1, gm_vol2, gmcv_vol] expected_list = [{'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': mm_vol['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': gm_vol['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': gm_vol1['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': gm_vol2['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': gmcv_vol['id']} ] group1 = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) group2 = self._create_test_rccg(self.rccg_type, [self.gm_type.id]) mm_vol1, model_update = self._create_test_volume( self.mm_type, status='available') mm_vol2, model_update = self._create_test_volume( self.mm_type, status='in-use') gm_vol3, model_update = self._create_test_volume( self.gm_type, status='available', previous_status='in-use') ctxt = context.get_admin_context() self.db.volume_update(ctxt, mm_vol1['id'], {'group_id': group1.id}) self.db.volume_update(ctxt, mm_vol2['id'], {'group_id': group1.id}) self.db.volume_update(ctxt, gm_vol3['id'], {'group_id': group2.id}) vols1 = [mm_vol1, mm_vol2] self.driver.update_group(self.ctxt, group1, vols1, []) mm_vol1.group = group1 mm_vol2.group = group1 group1.volumes = objects.VolumeList.get_all_by_generic_group(self.ctxt, group1.id) vols2 = [gm_vol3] self.driver.update_group(self.ctxt, group2, vols2, []) gm_vol3.group = group2 group2.volumes = objects.VolumeList.get_all_by_generic_group(self.ctxt, group2.id) rccg_name = self.driver._get_rccg_name(group1) self.sim._rccg_state_transition('wait', self.sim._rcconsistgrp_list[rccg_name]) rccg_name = self.driver._get_rccg_name(group2) self.sim._rccg_state_transition('wait', self.sim._rcconsistgrp_list[rccg_name]) volumes.extend(vols1) volumes.extend(vols2) expected_list1 = [{'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'status': 'available'}, 'volume_id': mm_vol1['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'status': 'in-use'}, 'volume_id': mm_vol2['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'status': 'available'}, 'volume_id': gm_vol3['id']}] expected_list.extend(expected_list1) grp_expected = [{'group_id': group1.id, 'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'status': fields.GroupStatus.AVAILABLE}}, {'group_id': group2.id, 'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'status': fields.GroupStatus.AVAILABLE}} ] target_id, volume_list, groups_update = self.driver.failover_host( self.ctxt, volumes, self.rep_target['backend_id'], [group1, group2]) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual(expected_list, volume_list) self.assertEqual(grp_expected, groups_update) self.assertEqual(self.driver._active_backend_id, target_id) self.assertEqual(self.driver._aux_backend_helpers, self.driver._helpers) self.assertEqual([self.driver._replica_target['pool_name']], self.driver._get_backend_pools()) self.assertEqual(self.driver._state, self.driver._aux_state) self.assertTrue(update_volume_stats.called) self.driver.delete_volume(gmcv_vol) target_id, volume_list, groups_update = self.driver.failover_host( self.ctxt, volumes, None, []) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual([], volume_list) self.assertEqual([], groups_update) self.driver.delete_volume(mm_vol) self.driver.delete_volume(gm_vol) self.driver.delete_volume(gm_vol1) self.driver.delete_volume(gm_vol2) self.driver.delete_volume(gmcv_vol) self.driver.delete_group(self.ctxt, group1, vols1) self.driver.delete_group(self.ctxt, group2, vols2) @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_volume_stats') def test_storwize_failover_host_normal_volumes(self, update_volume_stats): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create metro mirror replication. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) mm_vol['status'] = 'error' # Create gmcv replication. gmcv_vol, model_update = self._create_test_volume( self.gmcv_with_cps600_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) gmcv_vol['status'] = 'error' # Create non-replication volume. non_replica_vol, model_update = self._create_test_volume( self.non_replica_type) self.assertEqual(fields.ReplicationStatus.NOT_CAPABLE, model_update['replication_status']) non_replica_vol['status'] = 'error' volumes = [mm_vol, gmcv_vol, non_replica_vol] rep_data1 = json.dumps({'previous_status': mm_vol['status']}) rep_data2 = json.dumps({'previous_status': gmcv_vol['status']}) rep_data3 = json.dumps({'previous_status': non_replica_vol['status']}) expected_list = [{'updates': {'status': 'error', 'replication_driver_data': rep_data1}, 'volume_id': mm_vol['id']}, {'updates': {'status': 'error', 'replication_driver_data': rep_data2}, 'volume_id': gmcv_vol['id']}, {'updates': {'status': 'error', 'replication_driver_data': rep_data3}, 'volume_id': non_replica_vol['id']}, ] target_id, volume_list, groups_update = self.driver.failover_host( self.ctxt, volumes, self.rep_target['backend_id'], []) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual(expected_list, volume_list) self.assertEqual([], groups_update) self.assertEqual(self.driver._active_backend_id, target_id) self.assertEqual(self.driver._aux_backend_helpers, self.driver._helpers) self.assertEqual([self.driver._replica_target['pool_name']], self.driver._get_backend_pools()) self.assertEqual(self.driver._state, self.driver._aux_state) self.assertTrue(update_volume_stats.called) target_id, volume_list, groups_update = self.driver.failover_host( self.ctxt, volumes, None, []) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual([], volume_list) self.assertEqual([], groups_update) # Delete non-replicate volume in a failover state self.assertRaises(exception.VolumeDriverException, self.driver.delete_volume, non_replica_vol) self.driver.failover_host(self.ctxt, volumes, 'default', []) self.driver.delete_volume(mm_vol) self.driver.delete_volume(gmcv_vol) self.driver.delete_volume(non_replica_vol) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'start_relationship') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'stop_relationship') def test_failover_host_by_force_access(self, stop_relationship, start_relationship): replica_obj = self.driver._get_replica_obj(storwize_const.METRO) mm_vol, model_update = self._create_test_volume(self.mm_type) mm_vol_attrs = self.driver._helpers.get_vdisk_attributes(mm_vol.name) target_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + mm_vol.name context = mock.Mock replica_obj.failover_volume_host(context, mm_vol) stop_relationship.assert_called_once_with(target_vol, access=True) calls = [mock.call(mm_vol.name, rcrel=mm_vol_attrs['RC_name']), mock.call(target_vol, 'aux')] start_relationship.assert_has_calls(calls, any_order=True) self.assertEqual(2, start_relationship.call_count) @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_volume_stats') def test_storwize_failback_replica_volumes(self, update_volume_stats): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create metro mirror replication. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create global mirror replication. gm_vol, model_update = self._create_test_volume(self.gm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) gm_vol['status'] = 'in-use' gm_vol['previous_status'] = '' gm_vol1, model_update = self._create_test_volume(self.gm_type) gm_vol1['status'] = 'in-use' gm_vol1['previous_status'] = 'in-use' gm_vol2, model_update = self._create_test_volume(self.gm_type) gm_vol2['status'] = 'in-use' gm_vol2['previous_status'] = 'available' # Create gmcv replication. gmcv_vol, model_update = self._create_test_volume( self.gmcv_with_cps900_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) volumes = [mm_vol, gm_vol, gm_vol1, gm_vol2, gmcv_vol] failover_expect = [{'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': mm_vol['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': gm_vol['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': gm_vol1['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': gm_vol2['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': gmcv_vol['id']} ] group1 = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) group2 = self._create_test_rccg(self.rccg_type, [self.gm_type.id]) mm_vol1, model_update = self._create_test_volume( self.mm_type, status='available') mm_vol2, model_update = self._create_test_volume( self.mm_type, status='in-use') gm_vol3, model_update = self._create_test_volume( self.gm_type, status='available', previous_status='in-use') ctxt = context.get_admin_context() self.db.volume_update(ctxt, mm_vol1['id'], {'group_id': group1.id}) self.db.volume_update(ctxt, mm_vol2['id'], {'group_id': group1.id}) self.db.volume_update(ctxt, gm_vol3['id'], {'group_id': group2.id}) vols1 = [mm_vol1, mm_vol2] self.driver.update_group(self.ctxt, group1, vols1, []) mm_vol1.group = group1 mm_vol2.group = group1 group1.volumes = objects.VolumeList.get_all_by_generic_group(self.ctxt, group1.id) vols2 = [gm_vol3] self.driver.update_group(self.ctxt, group2, vols2, []) gm_vol3.group = group2 group2.volumes = objects.VolumeList.get_all_by_generic_group(self.ctxt, group2.id) rccg_name = self.driver._get_rccg_name(group1) self.sim._rccg_state_transition('wait', self.sim._rcconsistgrp_list[rccg_name]) rccg_name = self.driver._get_rccg_name(group2) self.sim._rccg_state_transition('wait', self.sim._rcconsistgrp_list[rccg_name]) volumes.extend(vols1) volumes.extend(vols2) expected_list1 = [{'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'status': 'available'}, 'volume_id': mm_vol1['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'status': 'in-use'}, 'volume_id': mm_vol2['id']}, {'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'status': 'available'}, 'volume_id': gm_vol3['id']}] failover_expect.extend(expected_list1) grp_expected = [{'group_id': group1.id, 'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'status': fields.GroupStatus.AVAILABLE}}, {'group_id': group2.id, 'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'status': fields.GroupStatus.AVAILABLE}} ] # Already failback target_id, volume_list, groups_update = self.driver.failover_host( self.ctxt, volumes, 'default', [group1, group2]) self.assertIsNone(target_id) self.assertEqual([], volume_list) self.assertEqual([], groups_update) # fail over operation target_id, volume_list, groups_update = self.driver.failover_host( self.ctxt, volumes, self.rep_target['backend_id'], [group1, group2]) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual(failover_expect, volume_list) self.assertEqual(grp_expected, groups_update) self.assertEqual(self.driver._state, self.driver._aux_state) self.assertTrue(update_volume_stats.called) mm_vol['status'] = 'available' mm_vol['previous_status'] = 'available' gm_vol['status'] = 'available' gm_vol['previous_status'] = 'in-use' gm_vol1['status'] = 'in-use' gm_vol1['previous_status'] = 'in-use' gm_vol2['status'] = 'available' gm_vol2['previous_status'] = 'in-use' gmcv_vol['status'] = 'available' gmcv_vol['previous_status'] = '' failback_expect = [{'updates': {'replication_status': fields.ReplicationStatus.ENABLED}, 'volume_id': mm_vol['id']}, {'updates': {'replication_status': fields.ReplicationStatus.ENABLED}, 'volume_id': gm_vol['id']}, {'updates': {'replication_status': fields.ReplicationStatus.ENABLED}, 'volume_id': gm_vol1['id']}, {'updates': {'replication_status': fields.ReplicationStatus.ENABLED}, 'volume_id': gm_vol2['id']}, {'updates': {'replication_status': fields.ReplicationStatus.ENABLED}, 'volume_id': gmcv_vol['id']}, {'updates': {'replication_status': fields.ReplicationStatus.ENABLED, 'status': 'available'}, 'volume_id': mm_vol1['id']}, {'updates': {'replication_status': fields.ReplicationStatus.ENABLED, 'status': 'in-use'}, 'volume_id': mm_vol2['id']}, {'updates': {'replication_status': fields.ReplicationStatus.ENABLED, 'status': 'available'}, 'volume_id': gm_vol3['id']}] grp_expected = [{'group_id': group1.id, 'updates': {'replication_status': fields.ReplicationStatus.ENABLED, 'status': fields.GroupStatus.AVAILABLE}}, {'group_id': group2.id, 'updates': {'replication_status': fields.ReplicationStatus.ENABLED, 'status': fields.GroupStatus.AVAILABLE}} ] # fail back operation target_id, volume_list, groups_update = self.driver.failover_host( self.ctxt, volumes, 'default', [group1, group2]) self.assertEqual('default', target_id) self.assertEqual(failback_expect, volume_list) self.assertEqual(grp_expected, groups_update) self.assertIsNone(self.driver._active_backend_id) self.assertEqual(SVC_POOLS, self.driver._get_backend_pools()) self.assertEqual(self.driver._state, self.driver._master_state) self.assertTrue(update_volume_stats.called) self.driver.delete_volume(mm_vol) self.driver.delete_volume(gm_vol) self.driver.delete_volume(gm_vol1) self.driver.delete_volume(gm_vol2) self.driver.delete_volume(gmcv_vol) self.driver.delete_group(self.ctxt, group1, vols1) self.driver.delete_group(self.ctxt, group2, vols2) @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_volume_stats') def test_storwize_failback_normal_volumes(self, update_volume_stats): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create replication volume. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self.assertEqual('enabled', model_update['replication_status']) mm_vol['status'] = 'error' gm_vol, model_update = self._create_test_volume(self.gm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) gm_vol['status'] = 'available' # Create gmcv replication. gmcv_vol, model_update = self._create_test_volume( self.gmcv_default_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) gmcv_vol['status'] = 'error' volumes = [mm_vol, gmcv_vol, gm_vol] rep_data0 = json.dumps({'previous_status': mm_vol['status']}) rep_data1 = json.dumps({'previous_status': gmcv_vol['status']}) failover_expect = [{'updates': {'replication_status': fields.ReplicationStatus.FAILED_OVER}, 'volume_id': gm_vol['id']}, {'updates': {'status': 'error', 'replication_driver_data': rep_data0}, 'volume_id': mm_vol['id']}, {'updates': {'status': 'error', 'replication_driver_data': rep_data1}, 'volume_id': gmcv_vol['id']}] # Already failback target_id, volume_list, groups_update = self.driver.failover_host( self.ctxt, volumes, 'default', []) self.assertIsNone(target_id) self.assertEqual([], volume_list) self.assertEqual([], groups_update) # fail over operation target_id, volume_list, groups_update = self.driver.failover_host( self.ctxt, volumes, self.rep_target['backend_id'], []) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual(failover_expect, volume_list) self.assertEqual([], groups_update) self.assertEqual(self.driver._state, self.driver._aux_state) self.assertTrue(update_volume_stats.called) # fail back operation mm_vol['replication_driver_data'] = json.dumps( {'previous_status': 'error'}) gmcv_vol['replication_driver_data'] = json.dumps( {'previous_status': 'error'}) gm_vol['status'] = 'in-use' gm_vol['previous_status'] = 'in-use' failback_expect = [{'updates': {'replication_status': fields.ReplicationStatus.ENABLED}, 'volume_id': gm_vol['id']}, {'updates': {'status': 'error', 'replication_driver_data': ''}, 'volume_id': mm_vol['id']}, {'updates': {'status': 'error', 'replication_driver_data': ''}, 'volume_id': gmcv_vol['id']}] target_id, volume_list, groups_update = self.driver.failover_host( self.ctxt, volumes, 'default', []) self.assertEqual('default', target_id) self.assertEqual(failback_expect, volume_list) self.assertEqual([], groups_update) self.assertIsNone(self.driver._active_backend_id) self.assertEqual(SVC_POOLS, self.driver._get_backend_pools()) self.assertEqual(self.driver._state, self.driver._master_state) self.assertTrue(update_volume_stats.called) self.driver.delete_volume(mm_vol) self.driver.delete_volume(gmcv_vol) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') @mock.patch.object(storwize_rep.StorwizeSVCReplicationManager, '_partnership_validate_create') def test_establish_partnership_with_local_sys(self, partnership_create, get_system_info): get_system_info.side_effect = [{'system_name': 'storwize-svc-sim'}, {'system_name': 'storwize-svc-sim'}] rep_mgr = self.driver._get_replica_mgr() rep_mgr.establish_target_partnership() self.assertFalse(partnership_create.called) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_system_info') def test_establish_target_partnership(self, get_system_info): source_system_name = 'storwize-svc-sim' target_system_name = 'aux-svc-sim' get_system_info.side_effect = [{'system_name': source_system_name}, {'system_name': target_system_name}] rep_mgr = self.driver._get_replica_mgr() rep_mgr.establish_target_partnership() partner_info = self.driver._helpers.get_partnership_info( source_system_name) self.assertIsNotNone(partner_info) self.assertEqual(partner_info['name'], source_system_name) partner_info = self.driver._helpers.get_partnership_info( source_system_name) self.assertIsNotNone(partner_info) self.assertEqual(partner_info['name'], source_system_name) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_partnership_info') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'chpartnership') def test_start_partnership(self, chpartnership, get_partnership_info): get_partnership_info.side_effect = [ None, {'partnership': 'fully_configured', 'id': '0'}, {'partnership': 'fully_configured_stopped', 'id': '0'}] rep_mgr = self.driver._get_replica_mgr() rep_mgr._partnership_start(rep_mgr._master_helpers, 'storwize-svc-sim') self.assertFalse(chpartnership.called) rep_mgr._partnership_start(rep_mgr._master_helpers, 'storwize-svc-sim') self.assertFalse(chpartnership.called) rep_mgr._partnership_start(rep_mgr._master_helpers, 'storwize-svc-sim') chpartnership.assert_called_once_with('0') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'start_relationship') def test_sync_replica_volumes_with_aux(self, start_relationship): # Create metro mirror replication. mm_vol = self._generate_vol_info(self.mm_type) tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + mm_vol['name'] # Create gmcv replication. gmcv_vol = self._generate_vol_info(self.gmcv_with_cps600_type) tgt_gmcv_volume = (storwize_const.REPLICA_AUX_VOL_PREFIX + gmcv_vol['name']) volumes = [mm_vol, gmcv_vol] fake_info = {'volume': 'fake', 'master_vdisk_name': 'fake', 'aux_vdisk_name': 'fake', 'name': 'fake_rcrel'} sync_state = {'state': storwize_const.REP_CONSIS_SYNC, 'primary': 'fake'} sync_state.update(fake_info) sync_copying_state = {'state': storwize_const.REP_CONSIS_COPYING, 'primary': 'fake'} sync_copying_state.update(fake_info) disconn_state = {'state': storwize_const.REP_IDL_DISC, 'primary': 'master'} disconn_state.update(fake_info) stop_state = {'state': storwize_const.REP_CONSIS_STOP, 'primary': 'aux'} stop_state.update(fake_info) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=None)): self.driver._sync_with_aux(self.ctxt, volumes) self.assertFalse(start_relationship.called) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=sync_state)): self.driver._sync_with_aux(self.ctxt, volumes) self.assertFalse(start_relationship.called) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=sync_copying_state)): self.driver._sync_with_aux(self.ctxt, volumes) self.assertFalse(start_relationship.called) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=disconn_state)): self.driver._sync_with_aux(self.ctxt, volumes) calls = [mock.call(tgt_volume, rcrel=fake_info['name']), mock.call(tgt_gmcv_volume, rcrel=fake_info['name'])] start_relationship.assert_has_calls(calls, any_order=True) self.assertEqual(2, start_relationship.call_count) start_relationship.reset_mock() with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=stop_state)): self.driver._sync_with_aux(self.ctxt, volumes) calls = [mock.call(tgt_volume, primary='aux', rcrel=fake_info['name']), mock.call(tgt_gmcv_volume, primary='aux', rcrel=fake_info['name'])] start_relationship.assert_has_calls(calls, any_order=True) self.assertEqual(2, start_relationship.call_count) self.driver.delete_volume(mm_vol) self.driver.delete_volume(gmcv_vol) @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info') @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_wait_replica_vol_ready(self, get_relationship_info): # Create metro mirror replication. mm_vol = self._generate_vol_info(self.mm_type) # Create gmcv replication. gmcv_vol = self._generate_vol_info(self.gmcv_with_cps900_type) fake_info = {'volume': 'fake', 'master_vdisk_name': 'fake', 'aux_vdisk_name': 'fake', 'primary': 'fake'} sync_state = {'state': storwize_const.REP_CONSIS_SYNC} sync_state.update(fake_info) sync_copying_state = {'state': storwize_const.REP_CONSIS_COPYING} sync_copying_state.update(fake_info) disconn_state = {'state': storwize_const.REP_IDL_DISC} disconn_state.update(fake_info) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=None)): self.assertRaises(exception.VolumeBackendAPIException, self.driver._wait_replica_vol_ready, self.ctxt, mm_vol) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=None)): self.assertRaises(exception.VolumeBackendAPIException, self.driver._wait_replica_vol_ready, self.ctxt, gmcv_vol) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=sync_state)): self.driver._wait_replica_vol_ready(self.ctxt, mm_vol) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=sync_copying_state)): self.driver._wait_replica_vol_ready(self.ctxt, gmcv_vol) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=disconn_state)): self.assertRaises(exception.VolumeBackendAPIException, self.driver._wait_replica_vol_ready, self.ctxt, mm_vol) with mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_relationship_info', mock.Mock(return_value=disconn_state)): self.assertRaises(exception.VolumeBackendAPIException, self.driver._wait_replica_vol_ready, self.ctxt, gmcv_vol) # Replication groups operation def test_storwize_rep_group_create(self): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # create group in db group = testutils.create_group(self.ctxt, volume_type_ids=[self.mm_type.id], group_type_id=self.rccg_type.id) model_update = self.driver.create_group(self.ctxt, group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) rccg_name = self.driver._get_rccg_name(group) rccg = self.driver._helpers.get_rccg(rccg_name) self.assertEqual(rccg['name'], rccg_name) self.driver.delete_group(self.ctxt, group, []) def test_storwize_rep_group_delete(self): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) mm_vol1, model_update = self._create_test_volume(self.mm_type) mm_vol2, model_update = self._create_test_volume(self.mm_type) vols = [mm_vol1, mm_vol2] group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) self.driver.update_group(self.ctxt, group, vols, []) (model_update, volumes_model_update) = self.driver.delete_group( self.ctxt, group, vols) for vol in vols: self.assertFalse(self.driver._helpers.is_vdisk_defined(vol.name)) self.assertIsNone(self.driver._helpers.get_rccg( self.driver._get_rccg_name(group))) for vol_update in volumes_model_update: self.assertEqual(fields.GroupStatus.DELETED, vol_update['status']) self.assertEqual(fields.GroupStatus.DELETED, model_update['status']) @ddt.data(('state', 'inconsistent_stopped'), ('primary', 'aux'), ('cycling_mode', 'multi'), ('cycle_period_seconds', '500')) @ddt.unpack def test_storwize_rep_group_update_error(self, state, value): """Test group update error.""" self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) mm_vol1, model_update = self._create_test_volume(self.mm_type) mm_vol2, model_update = self._create_test_volume(self.mm_type) self.driver.update_group(self.ctxt, group, [mm_vol1], []) rccg_name = self.driver._get_rccg_name(group) temp_state = self.sim._rcconsistgrp_list[rccg_name][state] self.sim._rcconsistgrp_list[rccg_name][state] = value (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, group, [mm_vol2], []) self.assertEqual(fields.GroupStatus.ERROR, model_update['status']) self.assertEqual([], add_volumes_update) self.assertEqual([], remove_volumes_update) self.sim._rcconsistgrp_list[rccg_name][state] = temp_state self.driver.delete_group(self.ctxt, group, [mm_vol1]) def test_storwize_rep_group_update(self): """Test group update.""" self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) mm_vol, model_update = self._create_test_volume(self.mm_type) gm_vol, model_update = self._create_test_volume(self.gm_type) with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_rccg_properties') as update_rccg_prop: add_vols = [mm_vol, gm_vol] (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, group, add_vols, []) update_rccg_prop.assert_called() self.assertEqual(1, update_rccg_prop.call_count) self.assertEqual(fields.GroupStatus.ERROR, model_update['status']) self.assertEqual([{'id': mm_vol.id, 'group_id': group.id}], add_volumes_update) self.assertEqual([], remove_volumes_update) self.driver.delete_group(self.ctxt, group, add_vols) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) rccg_name = self.driver._get_rccg_name(group) # Create metro mirror replication. mm_vol1, model_update = self._create_test_volume(self.mm_type) rcrel = self.driver._helpers.get_relationship_info(mm_vol1.name) self.sim._rc_state_transition('wait', rcrel) mm_vol2, model_update = self._create_test_volume(self.mm_type) rcrel = self.driver._helpers.get_relationship_info(mm_vol2.name) self.sim._rc_state_transition('wait', rcrel) mm_vol3, model_update = self._create_test_volume(self.mm_type) rcrel = self.driver._helpers.get_relationship_info(mm_vol3.name) self.sim._rc_state_transition('wait', rcrel) mm_vol4, model_update = self._create_test_volume(self.mm_type) rcrel = self.driver._helpers.get_relationship_info(mm_vol4.name) self.sim._rc_state_transition('wait', rcrel) with mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_rccg_properties') as update_rccg_prop: add_vols = [mm_vol1, mm_vol2] (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, group, add_vols, []) update_rccg_prop.assert_called() self.assertEqual(2, update_rccg_prop.call_count) self.assertEqual( rccg_name, self.driver._helpers.get_rccg_info(mm_vol1.name)['name']) self.assertEqual( rccg_name, self.driver._helpers.get_rccg_info(mm_vol2.name)['name']) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual([{'id': mm_vol1.id, 'group_id': group.id}, {'id': mm_vol2.id, 'group_id': group.id}], add_volumes_update) self.assertEqual([], remove_volumes_update) add_vols = [mm_vol3, mm_vol4] rmv_vols = [mm_vol1, mm_vol2] (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group( self.ctxt, group, add_volumes=add_vols, remove_volumes=rmv_vols) # Validating the rccg property value from metadata of the volume # mm_vol3 that is added to a group self.assertEqual(rccg_name, mm_vol3.metadata['Consistency Group Name']) # Validating the rccg property value from metadata of the volume # mm_vol1 that is removed from a group exp_rccg_name = '' self.assertEqual(exp_rccg_name, mm_vol1.metadata['Consistency Group Name']) self.assertIsNone(self.driver._helpers.get_rccg_info(mm_vol1.name)) self.assertIsNone(self.driver._helpers.get_rccg_info(mm_vol2.name)) self.assertEqual( rccg_name, self.driver._helpers.get_rccg_info(mm_vol3.name)['name']) self.assertEqual( rccg_name, self.driver._helpers.get_rccg_info(mm_vol4.name)['name']) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual([{'id': mm_vol3.id, 'group_id': group.id}, {'id': mm_vol4.id, 'group_id': group.id}], add_volumes_update) self.assertEqual([{'id': mm_vol1.id, 'group_id': None}, {'id': mm_vol2.id, 'group_id': None}], remove_volumes_update) self.driver.delete_group(self.ctxt, group, [mm_vol1, mm_vol2, mm_vol3, mm_vol4]) def test_storwize_rep_volume_rccg_properties_update(self): """Test rep volume rccg_properties update.""" self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) # Create metro mirror replication. mm_vol, model_update = self._create_test_volume(self.mm_type) # Validating the rccg property value of volume-metadata that updated # during the call to the function _update_rccg_properties by passing # the parameter 'group' self.driver._update_rccg_properties(self.ctxt, mm_vol, group) rccg_name = self.driver._get_rccg_name(group) self.assertEqual(rccg_name, mm_vol.metadata['Consistency Group Name']) # Validating the rccg property value of volume-metadata that updated # during the call to the function _update_rccg_properties by not # passing the parameter 'group' self.driver._update_rccg_properties(self.ctxt, mm_vol) exp_rccg_name = "" self.assertEqual(exp_rccg_name, mm_vol.metadata['Consistency Group Name']) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'startrcconsistgrp') def test_storwize_enable_replication_error(self, startrccg): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) rccg_name = self.driver._get_rccg_name(group) exp_mod_upd = {'replication_status': fields.ReplicationStatus.ENABLED} exp_mod_upd_err = {'replication_status': fields.ReplicationStatus.ERROR} # enable replicaion on empty group model_update, volumes_update = self.driver.enable_replication( self.ctxt, group, []) self.assertEqual(exp_mod_upd_err, model_update) self.assertEqual([], volumes_update) self.assertFalse(startrccg.called) # Create metro mirror replication. mm_vol1, model_update = self._create_test_volume(self.mm_type) vols = [mm_vol1] self.driver.update_group(self.ctxt, group, vols, []) exp_vols_upd = [ {'id': mm_vol1['id'], 'replication_status': exp_mod_upd['replication_status']}] exp_vols_upd_err = [ {'id': mm_vol1['id'], 'replication_status': exp_mod_upd_err['replication_status']}] with mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsrcconsistgrp', side_effect=[None, {'primary': 'master', 'relationship_count': '1'}, {'primary': 'aux', 'relationship_count': '1'}, {'primary': 'master', 'relationship_count': '1'}]): startrccg.side_effect = [ None, None, exception.VolumeBackendAPIException(data='CMMVC6372W')] model_update, volumes_update = self.driver.enable_replication( self.ctxt, group, vols) self.assertEqual(exp_mod_upd_err, model_update) self.assertEqual(exp_vols_upd_err, volumes_update) self.assertFalse(startrccg.called) model_update, volumes_update = self.driver.enable_replication( self.ctxt, group, vols) self.assertEqual(exp_mod_upd, model_update) self.assertEqual(exp_vols_upd, volumes_update) startrccg.assert_called_with(rccg_name, 'master') model_update, volumes_update = self.driver.enable_replication( self.ctxt, group, vols) self.assertEqual(exp_mod_upd, model_update) self.assertEqual(exp_vols_upd, volumes_update) startrccg.assert_called_with(rccg_name, 'aux') model_update, volumes_update = self.driver.enable_replication( self.ctxt, group, vols) self.assertEqual(exp_mod_upd_err, model_update) self.assertEqual(exp_vols_upd_err, volumes_update) def test_storwize_enable_replication(self): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) # Create metro mirror replication. mm_vol1, model_update = self._create_test_volume(self.mm_type) mm_vol2, model_update = self._create_test_volume(self.mm_type) vols = [mm_vol1, mm_vol2] expect_model_update = {'replication_status': fields.ReplicationStatus.ENABLED} expect_vols_update = [ {'id': mm_vol1['id'], 'replication_status': expect_model_update['replication_status']}, {'id': mm_vol2['id'], 'replication_status': expect_model_update['replication_status']} ] self.driver.update_group(self.ctxt, group, vols, []) model_update, volumes_update = self.driver.enable_replication( self.ctxt, group, vols) self.assertEqual(expect_model_update, model_update) self.assertEqual(expect_vols_update, volumes_update) rccg_name = self.driver._get_rccg_name(group) rccg = self.driver._helpers.get_rccg(rccg_name) self.assertEqual(rccg['primary'], 'master') self.assertIn(rccg['state'], ['inconsistent_copying', 'consistent_synchronized']) self.driver.delete_group(self.ctxt, group, vols) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'stoprcconsistgrp') def test_storwize_disable_replication_error(self, stoprccg): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) rccg_name = self.driver._get_rccg_name(group) exp_mod_upd = {'replication_status': fields.ReplicationStatus.DISABLED} exp_mod_upd_err = {'replication_status': fields.ReplicationStatus.ERROR} # disable replicarion on empty group model_update, volumes_update = self.driver.disable_replication( self.ctxt, group, []) self.assertEqual(exp_mod_upd_err, model_update) self.assertEqual([], volumes_update) self.assertFalse(stoprccg.called) # Create metro mirror replication. mm_vol1, model_update = self._create_test_volume(self.mm_type) vols = [mm_vol1] self.driver.update_group(self.ctxt, group, vols, []) exp_vols_upd = [ {'id': mm_vol1['id'], 'replication_status': exp_mod_upd['replication_status']}] exp_vols_upd_err = [ {'id': mm_vol1['id'], 'replication_status': exp_mod_upd_err['replication_status']}] with mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsrcconsistgrp', side_effect=[None, {'name': rccg_name, 'relationship_count': '1'}, {'name': rccg_name, 'relationship_count': '1'}]): stoprccg.side_effect = [ None, exception.VolumeBackendAPIException(data='CMMVC6372W')] model_update, volumes_update = self.driver.disable_replication( self.ctxt, group, vols) self.assertEqual(exp_mod_upd_err, model_update) self.assertEqual(exp_vols_upd_err, volumes_update) self.assertFalse(stoprccg.called) model_update, volumes_update = self.driver.disable_replication( self.ctxt, group, vols) self.assertEqual(exp_mod_upd, model_update) self.assertEqual(exp_vols_upd, volumes_update) stoprccg.assert_called_with(rccg_name, False) model_update, volumes_update = self.driver.disable_replication( self.ctxt, group, vols) self.assertEqual(exp_mod_upd_err, model_update) self.assertEqual(exp_vols_upd_err, volumes_update) def test_storwize_disable_replication(self): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) # Create metro mirror replication. mm_vol1, model_update = self._create_test_volume(self.mm_type) mm_vol2, model_update = self._create_test_volume(self.mm_type) vols = [mm_vol1, mm_vol2] expect_model_update = {'replication_status': fields.ReplicationStatus.DISABLED} expect_vols_update = [ {'id': mm_vol1['id'], 'replication_status': expect_model_update['replication_status']}, {'id': mm_vol2['id'], 'replication_status': expect_model_update['replication_status']} ] self.driver.update_group(self.ctxt, group, vols, []) model_update, volumes_update = self.driver.disable_replication( self.ctxt, group, vols) self.assertEqual(expect_model_update, model_update) self.assertEqual(expect_vols_update, volumes_update) rccg_name = self.driver._get_rccg_name(group) rccg = self.driver._helpers.get_rccg(rccg_name) self.assertIn(rccg['state'], ['inconsistent_stopped', 'consistent_stopped']) self.driver.delete_group(self.ctxt, group, vols) def test_storwize_failover_group_error(self): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) # Create metro mirror replication. mm_vol1, model_update = self._create_test_volume(self.mm_type) mm_vol2, model_update = self._create_test_volume(self.mm_type) vols = [mm_vol1, mm_vol2] self.driver._replica_enabled = False self.assertRaises(exception.UnableToFailOver, self.driver.failover_replication, self.ctxt, group, vols, self.rep_target['backend_id']) self.driver._replica_enabled = True self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_replication, self.ctxt, group, vols, self.fake_target['backend_id']) with mock.patch.object(storwize_svc_common.StorwizeSSH, 'stoprcconsistgrp') as stoprccg: stoprccg.side_effect = exception.VolumeBackendAPIException( data='CMMVC6071E') self.assertRaises(exception.UnableToFailOver, self.driver.failover_replication, self.ctxt, group, vols, self.rep_target['backend_id']) self.driver.delete_group(self.ctxt, group, vols) def test_storwize_failover_group_without_action(self): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) mm_vol1, model_update = self._create_test_volume(self.mm_type) self.driver.update_group(self.ctxt, group, [mm_vol1], []) rccg_name = self.driver._get_rccg_name(group) self.sim._rccg_state_transition('wait', self.sim._rcconsistgrp_list[rccg_name]) self.sim._rcconsistgrp_list[rccg_name]['primary'] = 'aux' model_update = self.driver._rep_grp_failover(self.ctxt, group) self.assertEqual( {'replication_status': fields.ReplicationStatus.FAILED_OVER}, model_update) self.sim._rcconsistgrp_list[rccg_name]['primary'] = 'master' model_update = self.driver._rep_grp_failback(self.ctxt, group) self.assertEqual( {'replication_status': fields.ReplicationStatus.ENABLED}, model_update) self.driver.delete_group(self.ctxt, group, []) @ddt.data(({'replication_enabled': ' True', 'replication_type': ' metro'}, 'test_rep_metro'), ({'replication_enabled': ' True', 'replication_type': ' global'}, 'test_rep_gm')) @ddt.unpack def test_storwize_failover_replica_group(self, spec, type_name): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) type_ref = volume_types.create(self.ctxt, type_name, spec) rep_type = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) group = self._create_test_rccg(self.rccg_type, [rep_type.id]) vol1, model_update = self._create_test_volume(rep_type) vol2, model_update = self._create_test_volume(rep_type) vol2['status'] = 'in-use' vol3, model_update = self._create_test_volume(rep_type) vol3['status'] = 'available' vol3['previous_status'] = 'in-use' vols = [vol1, vol2, vol3] self.driver.update_group(self.ctxt, group, vols, []) rccg_name = self.driver._get_rccg_name(group) self.sim._rccg_state_transition('wait', self.sim._rcconsistgrp_list[rccg_name]) expected_list = [{'id': vol1['id'], 'replication_status': fields.ReplicationStatus.FAILED_OVER}, {'id': vol2['id'], 'replication_status': fields.ReplicationStatus.FAILED_OVER}, {'id': vol3['id'], 'replication_status': fields.ReplicationStatus.FAILED_OVER}] model_update, volumes_model_update = self.driver.failover_replication( self.ctxt, group, vols, self.rep_target['backend_id']) self.assertEqual( {'replication_status': fields.ReplicationStatus.FAILED_OVER}, model_update) self.assertEqual(expected_list, volumes_model_update) self.assertIsNone(self.driver._active_backend_id) self.assertEqual(self.driver._master_backend_helpers, self.driver._helpers) rccg = self.driver._helpers.get_rccg(rccg_name) self.assertEqual('aux', rccg['primary']) group.replication_status = fields.ReplicationStatus.FAILED_OVER model_update, volumes_model_update = self.driver.failover_replication( self.ctxt, group, vols, None) self.assertEqual( {'replication_status': fields.ReplicationStatus.FAILED_OVER}, model_update) self.assertEqual(expected_list, volumes_model_update) self.assertIsNone(self.driver._active_backend_id) self.assertEqual(self.driver._master_backend_helpers, self.driver._helpers) rccg = self.driver._helpers.get_rccg(rccg_name) self.assertEqual('aux', rccg['primary']) self.driver.delete_group(self.ctxt, group, vols) def test_failover_replica_group_by_force_access(self): self.driver.do_setup(self.ctxt) group = self._create_test_rccg(self.rccg_type, [self.mm_type.id]) mm_vol1, model_update = self._create_test_volume(self.mm_type) self.driver.update_group(self.ctxt, group, [mm_vol1], []) rccg_name = self.driver._get_rccg_name(group) self.sim._rccg_state_transition('wait', self.sim._rcconsistgrp_list[rccg_name]) with mock.patch.object(storwize_svc_common.StorwizeSSH, 'startrcconsistgrp') as startrcconsistgrp: self.driver.failover_replication(self.ctxt, group, [mm_vol1], None) startrcconsistgrp.assert_called_once_with(rccg_name, 'aux') with mock.patch.object(storwize_svc_common.StorwizeSSH, 'stoprcconsistgrp') as stoprccg: stoprccg.side_effect = exception.VolumeBackendAPIException( data='CMMVC6071E') self.assertRaises(exception.UnableToFailOver, self.driver.failover_replication, self.ctxt, group, [mm_vol1], self.rep_target['backend_id']) self.driver.delete_group(self.ctxt, group, [mm_vol1]) @ddt.data(({'replication_enabled': ' True', 'replication_type': ' metro'}, 'test_rep_metro'), ({'replication_enabled': ' True', 'replication_type': ' global'}, 'test_rep_gm_default')) @ddt.unpack def test_storwize_failback_replica_group(self, spec, type_name): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) type_ref = volume_types.create(self.ctxt, type_name, spec) rep_type = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) group = self._create_test_rccg(self.rccg_type, [rep_type.id]) vol1, model_update = self._create_test_volume(rep_type) vol2, model_update = self._create_test_volume(rep_type) vol2['status'] = 'in-use' vol3, model_update = self._create_test_volume(rep_type) vol3['status'] = 'available' vol3['previous_status'] = 'in-use' vols = [vol1, vol2, vol3] self.driver.update_group(self.ctxt, group, vols, []) rccg_name = self.driver._get_rccg_name(group) self.sim._rccg_state_transition('wait', self.sim._rcconsistgrp_list[rccg_name]) failover_expect = [{'id': vol1['id'], 'replication_status': fields.ReplicationStatus.FAILED_OVER}, {'id': vol2['id'], 'replication_status': fields.ReplicationStatus.FAILED_OVER}, {'id': vol3['id'], 'replication_status': fields.ReplicationStatus.FAILED_OVER}] model_update, volumes_model_update = self.driver.failover_replication( self.ctxt, group, vols, self.rep_target['backend_id']) self.assertEqual( {'replication_status': fields.ReplicationStatus.FAILED_OVER}, model_update) self.assertEqual(failover_expect, volumes_model_update) self.assertIsNone(self.driver._active_backend_id) self.assertEqual(self.driver._master_backend_helpers, self.driver._helpers) rccg = self.driver._helpers.get_rccg(rccg_name) self.assertEqual('aux', rccg['primary']) group.replication_status = fields.ReplicationStatus.FAILED_OVER model_update, volumes_model_update = self.driver.failover_replication( self.ctxt, group, vols, None) self.assertEqual( {'replication_status': fields.ReplicationStatus.FAILED_OVER}, model_update) self.assertEqual(failover_expect, volumes_model_update) self.assertIsNone(self.driver._active_backend_id) self.assertEqual(self.driver._master_backend_helpers, self.driver._helpers) rccg = self.driver._helpers.get_rccg(rccg_name) self.assertEqual('aux', rccg['primary']) self.sim._rccg_state_transition('wait', self.sim._rcconsistgrp_list[rccg_name]) vol1['status'] = 'available' vol1['previous_status'] = 'available' vol2['status'] = 'available' vol2['previous_status'] = 'in-use' vol3['status'] = 'in-use' vol3['previous_status'] = 'in-use' failback_expect = [{'id': vol1['id'], 'replication_status': fields.ReplicationStatus.ENABLED}, {'id': vol2['id'], 'replication_status': fields.ReplicationStatus.ENABLED}, {'id': vol3['id'], 'replication_status': fields.ReplicationStatus.ENABLED}] self.driver._active_backend_id = self.rep_target['backend_id'] model_update, volumes_model_update = self.driver.failover_replication( self.ctxt, group, vols, 'default') self.assertEqual( {'replication_status': fields.ReplicationStatus.ENABLED}, model_update) self.assertEqual(failback_expect, volumes_model_update) rccg = self.driver._helpers.get_rccg(rccg_name) self.assertEqual('master', rccg['primary']) group.replication_status = fields.ReplicationStatus.ENABLED model_update, volumes_model_update = self.driver.failover_replication( self.ctxt, group, vols, 'default') self.assertEqual( {'replication_status': fields.ReplicationStatus.ENABLED}, model_update) self.assertEqual(failback_expect, volumes_model_update) rccg = self.driver._helpers.get_rccg(rccg_name) self.assertEqual('master', rccg['primary']) self.driver.delete_group(self.ctxt, group, vols) @mock.patch.object(storwize_svc_common.StorwizeSSH, 'lsrcconsistgrp') @mock.patch.object(storwize_svc_common.StorwizeSSH, 'startrcconsistgrp') def test_sync_replica_group_with_aux(self, startrccg, lsrccg): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) rccg_name = 'fakerccg' sync_state = {'state': storwize_const.REP_CONSIS_SYNC, 'primary': 'fake', 'relationship_count': '1'} sync_copying_state = {'state': storwize_const.REP_CONSIS_COPYING, 'primary': 'fake', 'relationship_count': '1'} disconn_state = {'state': storwize_const.REP_IDL_DISC, 'primary': 'master', 'relationship_count': '1'} stop_state = {'state': storwize_const.REP_CONSIS_STOP, 'primary': 'aux', 'relationship_count': '1'} lsrccg.side_effect = [None, sync_state, sync_copying_state, disconn_state, stop_state] self.driver._sync_with_aux_grp(self.ctxt, rccg_name) self.assertFalse(startrccg.called) self.driver._sync_with_aux_grp(self.ctxt, rccg_name) self.assertFalse(startrccg.called) self.driver._sync_with_aux_grp(self.ctxt, rccg_name) self.assertFalse(startrccg.called) self.driver._sync_with_aux_grp(self.ctxt, rccg_name) startrccg.assert_called_once_with(rccg_name, 'master') self.driver._sync_with_aux_grp(self.ctxt, rccg_name) startrccg.assert_called_with(rccg_name, 'aux') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'initialize_host_info') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'get_host_from_connector') @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'check_vol_mapped_to_host') def test_get_map_info_from_connector(self, is_mapped, get_host_from_conn, initialize_host_info): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) is_mapped.side_effect = [False, False, False, False, False] get_host_from_conn.side_effect = [None, 'fake-host', 'master-host', exception.VolumeBackendAPIException, 'master-host', None, None, 'aux-host'] non_rep_vol, model_update = self._create_test_volume( self.non_replica_type) non_rep_vol['status'] = 'in-use' mm_vol, model_update = self._create_test_volume(self.mm_type) mm_vol['status'] = 'in-use' connector = {} (info, host_name, vol_name, backend_helper, node_state) = self.driver._get_map_info_from_connector( mm_vol, connector, iscsi=False) self.assertEqual(info, {}) self.assertIsNone(host_name) self.assertEqual(vol_name, mm_vol.name) self.assertEqual(self.driver._master_backend_helpers, backend_helper) self.assertEqual(self.driver._master_state, node_state) initialize_host_info.assert_called() self.assertEqual(1, initialize_host_info.call_count) connector = {'host': 'storwize-svc-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.assertRaises(exception.VolumeDriverException, self.driver._get_map_info_from_connector, non_rep_vol, connector, False) (info, host_name, vol_name, backend_helper, node_state) = self.driver._get_map_info_from_connector( non_rep_vol, connector, iscsi=False) self.assertEqual(info['driver_volume_type'], 'fibre_channel') self.assertEqual(host_name, 'fake-host') self.assertEqual(vol_name, non_rep_vol.name) self.assertEqual(self.driver._master_backend_helpers, backend_helper) self.assertEqual(self.driver._master_state, node_state) (info, host_name, vol_name, backend_helper, node_state) = self.driver._get_map_info_from_connector( mm_vol, connector, iscsi=True) self.assertEqual(info['driver_volume_type'], 'iscsi') self.assertIsNone(host_name) self.assertIsNone(vol_name) self.assertIsNone(backend_helper) self.assertIsNone(node_state) self.assertRaises(exception.VolumeDriverException, self.driver._get_map_info_from_connector, mm_vol, connector, False) (info, host_name, vol_name, backend_helper, node_state) = self.driver._get_map_info_from_connector( mm_vol, connector, iscsi=False) self.assertEqual(info['driver_volume_type'], 'fibre_channel') self.assertEqual(host_name, 'aux-host') self.assertEqual(vol_name, storwize_const.REPLICA_AUX_VOL_PREFIX + mm_vol.name) self.assertEqual(self.driver._aux_backend_helpers, backend_helper) self.assertEqual(self.driver._aux_state, node_state) @ddt.data(({'consistent_group_replication_enabled': ' True'}, {'replication_enabled': ' False'}), ({'consistent_group_replication_enabled': ' True'}, {'replication_enabled': ' True', 'replication_type': ' gmcv'})) @ddt.unpack def test_storwize_group_create_with_replication(self, grp_spec, vol_spec): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) """Test group create.""" gr_type_ref = group_types.create(self.ctxt, 'gr_type', grp_spec) gr_type = objects.GroupType.get_by_id(self.ctxt, gr_type_ref['id']) vol_type_ref = volume_types.create(self.ctxt, 'vol_type', vol_spec) group = testutils.create_group(self.ctxt, group_type_id=gr_type.id, volume_type_ids=[vol_type_ref['id']]) model_update = self.driver.create_group(self.ctxt, group) if vol_spec['replication_enabled'] == ' True': self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) else: self.assertEqual(fields.GroupStatus.ERROR, model_update['status']) @ddt.data((None, None), (None, SVC_TARGET_CHILD_POOL), (SVC_SOURCE_CHILD_POOL, None), (SVC_SOURCE_CHILD_POOL, SVC_TARGET_CHILD_POOL)) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @ddt.unpack def test_create_group_from_src_with_gmcv_volume_with_childpool( self, svc_src_childpool, svc_tgt_childpool): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create new volume_type template for gmcv pool = _get_test_pool() spec = {'replication_enabled': ' True', 'replication_type': ' gmcv', 'drivers:storwize_svc_src_child_pool': svc_src_childpool, 'drivers:storwize_svc_target_child_pool': svc_tgt_childpool} gmcv_childpool_type = self._create_replica_volume_type( False, opts=spec, vol_type_name='test_gmcv_childpool_type') # Create source group src_group = testutils.create_group( self.ctxt, volume_type_ids=[gmcv_childpool_type.id], group_type_id=self.rccg_type.id) model_update = self.driver.create_group(self.ctxt, src_group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create gmcv volume src_volume = ( testutils.create_volume(self.ctxt, volume_type_id=gmcv_childpool_type.id, group_id=src_group.id, host='openstack@svc#%s' % pool)) model_update = self.driver.create_volume(src_volume) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(src_volume, True) # Check source gmcv change volumes are created on child storage pools src_chg_vol_storage_pool = ( svc_src_childpool if svc_src_childpool else _get_test_pool()) tgt_chg_vol_storage_pool = ( svc_tgt_childpool if svc_tgt_childpool else _get_test_pool()) src_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + src_volume.name) src_childpool_vols = self._get_pool_volumes(src_chg_vol_storage_pool) self.assertIn(src_change_vol_name, src_childpool_vols) tgt_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + src_volume.name) tgt_childpool_vols = self._get_pool_volumes(tgt_chg_vol_storage_pool) self.assertIn(tgt_change_vol_name, tgt_childpool_vols) rcrel = self.driver._helpers.get_relationship_info(src_volume.name) self.sim._rc_state_transition('wait', rcrel) src_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), src_group.id) # Add volume to source group add_volumes = [src_volume] delete_volumes = [] (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, src_group, add_volumes, delete_volumes) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) self.assertEqual([{'id': src_volume.id, 'group_id': src_group.id}], add_volumes_update) self.assertEqual([], remove_volumes_update) # Create clone group from source group clone_group = ( testutils.create_group( self.ctxt, volume_type_ids=[gmcv_childpool_type.id], group_type_id=self.rccg_type.id)) clone_volume = ( testutils.create_volume(self.ctxt, volume_type_id=gmcv_childpool_type.id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) clone_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), clone_group.id) with mock.patch.object( storwize_svc_common.StorwizeSVCCommonDriver, '_update_rccg_properties'): model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, clone_group, clone_volumes, None, None, src_group, src_volumes)) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) for vol_model_update in volumes_model_update: self.assertEqual(fields.VolumeStatus.AVAILABLE, vol_model_update['status']) src_chg_vol_storage_pool = ( svc_src_childpool if svc_src_childpool else _get_test_pool()) tgt_chg_vol_storage_pool = ( svc_tgt_childpool if svc_tgt_childpool else _get_test_pool()) src_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + clone_volume.name) src_childpool_vols = self._get_pool_volumes(src_chg_vol_storage_pool) self.assertIn(src_change_vol_name, src_childpool_vols) tgt_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + clone_volume.name) tgt_childpool_vols = self._get_pool_volumes(tgt_chg_vol_storage_pool) self.assertIn(tgt_change_vol_name, tgt_childpool_vols) # Create group from source_group_snapshot group_from_src_group_snapshot = ( testutils.create_group( self.ctxt, volume_type_ids=[gmcv_childpool_type.id], group_type_id=self.rccg_type.id)) vol_from_snapshot = ( testutils.create_volume(self.ctxt, volume_type_id=gmcv_childpool_type.id, group_id=group_from_src_group_snapshot.id, host='openstack@svc#%s' % pool)) group_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), group_from_src_group_snapshot.id) group_snapshot, snapshots = self._create_group_snapshot( src_group.id, group_type_id=self.rccg_type.id) with mock.patch.object( storwize_svc_common.StorwizeSVCCommonDriver, '_update_rccg_properties'): model_update, volumes_model_update = ( self.driver.create_group_from_src( self.ctxt, group_from_src_group_snapshot, group_volumes, group_snapshot, snapshots, None, None)) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status']) for vol_model_update in volumes_model_update: self.assertEqual(fields.VolumeStatus.AVAILABLE, vol_model_update['status']) src_chg_vol_storage_pool = ( svc_src_childpool if svc_src_childpool else _get_test_pool()) tgt_chg_vol_storage_pool = ( svc_tgt_childpool if svc_tgt_childpool else _get_test_pool()) src_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + vol_from_snapshot.name) src_childpool_vols = self._get_pool_volumes(src_chg_vol_storage_pool) self.assertIn(src_change_vol_name, src_childpool_vols) tgt_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + storwize_const.REPLICA_AUX_VOL_PREFIX + vol_from_snapshot.name) tgt_childpool_vols = self._get_pool_volumes(tgt_chg_vol_storage_pool) self.assertIn(tgt_change_vol_name, tgt_childpool_vols) # Delete groups model_update = self.driver.delete_group(self.ctxt, group_from_src_group_snapshot, group_volumes) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) with mock.patch( 'cinder.volume.volume_utils.' 'is_group_a_cg_snapshot_type') as is_group_a_cg_snapshot_type: is_group_a_cg_snapshot_type.return_value = True model_update = self.driver.delete_group_snapshot(self.ctxt, group_snapshot, snapshots) self.assertEqual(fields.GroupSnapshotStatus.DELETED, model_update[0]['status']) for snapshot in model_update[1]: self.assertEqual(fields.SnapshotStatus.DELETED, snapshot['status']) model_update = self.driver.delete_group(self.ctxt, clone_group, clone_volumes) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) model_update = self.driver.delete_group(self.ctxt, src_group, src_volumes) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) @ddt.data(({'volume_type': 'mm'}), ({'volume_type': 'gm'}), ({'volume_type': 'gmcv'}) ) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_rccg_properties') def test_create_group_from_src_grp(self, vol_spec, update_rccg_properties): self.driver.configuration.set_override('replication_device', [self.rep_target]) pool = _get_test_pool() if vol_spec['volume_type'] == 'mm': vol_type_id = self.mm_type.id if vol_spec['volume_type'] == 'gm': vol_type_id = self.gm_type.id if vol_spec['volume_type'] == 'gmcv': vol_type_id = self.gmcv_default_type.id # create group in db src_group = testutils.create_group( self.ctxt, volume_type_ids=[vol_type_id], group_type_id=self.rccg_type.id) model_update = self.driver.create_group(self.ctxt, src_group) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create volumes in db src_vol1 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=src_group.id, host='openstack@svc#%s' % pool)) src_vol2 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=src_group.id, host='openstack@svc#%s' % pool)) self.driver.create_volume(src_vol1) self.driver.create_volume(src_vol2) src_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), src_group.id) add_volumes = [src_vol1, src_vol2] del_volumes = [] # add volumes to group (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, src_group, add_volumes, del_volumes) # Clone group for source group clone_group = ( testutils.create_group( self.ctxt, volume_type_ids=[vol_type_id], group_type_id=self.rccg_type.id)) # Create volumes in db clone_vol1 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) clone_vol2 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) clone_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), clone_group.id) # Create group from source group model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, clone_group, clone_volumes, None, None, src_group, src_volumes)) self.assertEqual('available', model_update['status']) for each_vol in volumes_model_update: self.assertEqual('available', each_vol['status']) model_update = self.driver.delete_group(self.ctxt, clone_group, [clone_vol1, clone_vol2]) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) with (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_rccg')) as create_rccg: with ((mock.patch.object( storwize_svc_common.StorwizeSVCCommonDriver, '_update_replication_grp'))) as update_rep_group: update_rep_group.return_value = (dict(), dict(), dict()) # Create group from source group model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, clone_group, clone_volumes, None, None, src_group, src_volumes)) create_rccg.assert_called() self.assertEqual(1, create_rccg.call_count) update_rep_group.assert_called() self.assertEqual(1, update_rep_group.call_count) model_update = self.driver.delete_group( self.ctxt, clone_group, [clone_vol1, clone_vol2]) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) model_update = self.driver.delete_group(self.ctxt, src_group, [src_vol1, src_vol2]) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) @ddt.data(({'volume_type': 'mm'}), ({'volume_type': 'gm'}), ({'volume_type': 'gmcv'}) ) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch.object(storwize_svc_common.StorwizeSVCCommonDriver, '_update_rccg_properties') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_from_grp_snapshot(self, vol_spec, is_group_a_cg_snap_type, update_rccg_properties): self.driver.configuration.set_override('replication_device', [self.rep_target]) is_group_a_cg_snap_type.return_value = False pool = _get_test_pool() if vol_spec['volume_type'] == 'mm': vol_type_id = self.mm_type.id if vol_spec['volume_type'] == 'gm': vol_type_id = self.gm_type.id if vol_spec['volume_type'] == 'gmcv': vol_type_id = self.gmcv_default_type.id # create group in db src_group = testutils.create_group( self.ctxt, volume_type_ids=[vol_type_id], group_type_id=self.rccg_type.id) model_update = self.driver.create_group(self.ctxt, src_group) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create volumes in db src_vol1 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=src_group.id, host='openstack@svc#%s' % pool)) src_vol2 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=src_group.id, host='openstack@svc#%s' % pool)) self.driver.create_volume(src_vol1) self.driver.create_volume(src_vol2) add_volumes = [src_vol1, src_vol2] del_volumes = [] # add volumes to group (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, src_group, add_volumes, del_volumes) # Clone group for source group clone_group = ( testutils.create_group( self.ctxt, volume_type_ids=[vol_type_id], group_type_id=self.rccg_type.id)) # Create volumes in db clone_vol1 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) clone_vol2 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) clone_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), clone_group.id) # Create group snapshot group_snapshot, snapshots = self._create_group_snapshot( src_group.id, group_type_id=self.rccg_type.id) # Create group from source as group snapshot model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, clone_group, clone_volumes, group_snapshot, snapshots, None, None)) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "CG create from src created passed") rccg_name = self.driver._get_rccg_name(clone_group) for each_vol in volumes_model_update: self.assertEqual('available', each_vol['status']) self.assertEqual(rccg_name, each_vol['metadata']['Consistency Group Name']) model_update = self.driver.delete_group(self.ctxt, clone_group, [clone_vol1, clone_vol2]) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) with (mock.patch.object(storwize_svc_common.StorwizeHelpers, 'create_rccg')) as create_rccg: with ((mock.patch.object( storwize_svc_common.StorwizeSVCCommonDriver, '_update_replication_grp'))) as update_rep_group: update_rep_group.return_value = (dict(), dict(), dict()) # Create group from source as group snapshot model_update, volumes_model_update = ( self.driver.create_group_from_src(self.ctxt, clone_group, clone_volumes, group_snapshot, snapshots, None, None)) create_rccg.assert_called() self.assertEqual(1, create_rccg.call_count) update_rep_group.assert_called() self.assertEqual(1, update_rep_group.call_count) model_update = ( self.driver.delete_group(self.ctxt, clone_group, [clone_vol1, clone_vol2])) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) is_group_a_cg_snap_type.return_value = True model_update = ( self.driver.delete_group_snapshot(self.ctxt, group_snapshot, snapshots)) self.assertEqual(fields.GroupSnapshotStatus.DELETED, model_update[0]['status']) for volume in model_update[1]: self.assertEqual(fields.SnapshotStatus.DELETED, volume['status']) @ddt.data(({'volume_type': 'mm'}), ({'volume_type': 'gm'}), ({'volume_type': 'gmcv'}) ) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_create_group_from_src_grp_with_invalid_vol(self, vol_spec): self.driver.configuration.set_override('replication_device', [self.rep_target]) pool = _get_test_pool() if vol_spec['volume_type'] == 'mm': vol_type_id = self.mm_type.id if vol_spec['volume_type'] == 'gm': vol_type_id = self.gm_type.id if vol_spec['volume_type'] == 'gmcv': vol_type_id = self.gmcv_default_type.id # create group in db src_group = testutils.create_group( self.ctxt, volume_type_ids=[vol_type_id], group_type_id=self.rccg_type.id) model_update = self.driver.create_group(self.ctxt, src_group) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create volumes in db src_vol1 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=src_group.id, host='openstack@svc#%s' % pool)) src_vol2 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=src_group.id, host='openstack@svc#%s' % pool)) src_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), src_group.id) add_volumes = [src_vol1, src_vol2] del_volumes = [] # add volumes to group (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, src_group, add_volumes, del_volumes) # Clone group for source group clone_group = ( testutils.create_group( self.ctxt, volume_type_ids=[vol_type_id], group_type_id=self.rccg_type.id)) # Create volumes in db clone_vol1 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) clone_vol2 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) clone_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), clone_group.id) self.assertRaises(exception.VolumeDriverException, self.driver.create_group_from_src, self.ctxt, clone_group, clone_volumes, None, None, src_group, src_volumes) # Delete group model_update = self.driver.delete_group(self.ctxt, clone_group, [clone_vol1, clone_vol2]) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) @ddt.data(({'volume_type': 'mm'}), ({'volume_type': 'gm'}), ({'volume_type': 'gmcv'}) ) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_grp_from_grp_snapshot_invalid(self, vol_spec, is_group_a_cg_snap_type): self.driver.configuration.set_override('replication_device', [self.rep_target]) is_group_a_cg_snap_type.return_value = False pool = _get_test_pool() if vol_spec['volume_type'] == 'mm': vol_type_id = self.mm_type.id if vol_spec['volume_type'] == 'gm': vol_type_id = self.gm_type.id if vol_spec['volume_type'] == 'gmcv': vol_type_id = self.gmcv_default_type.id # create group in db src_group = testutils.create_group( self.ctxt, volume_type_ids=[vol_type_id], group_type_id=self.rccg_type.id) model_update = self.driver.create_group(self.ctxt, src_group) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create volumes in db src_vol1 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=src_group.id, host='openstack@svc#%s' % pool)) src_vol2 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=src_group.id, host='openstack@svc#%s' % pool)) add_volumes = [src_vol1, src_vol2] del_volumes = [] # add volumes to group (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, src_group, add_volumes, del_volumes) # Clone group for source group clone_group = ( testutils.create_group( self.ctxt, volume_type_ids=[vol_type_id], group_type_id=self.rccg_type.id)) # Create volumes in db clone_vol1 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) clone_vol2 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) # Exception raised while creating group snapshot self.assertRaises(exception.VolumeDriverException, self._create_group_snapshot, src_group.id, group_type_id=self.rccg_type.id) # Delete group model_update = self.driver.delete_group(self.ctxt, clone_group, [clone_vol1, clone_vol2]) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) @ddt.data(({'volume_type': 'mm'}), ({'volume_type': 'gm'}), ({'volume_type': 'gmcv'}) ) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_grp_from_empty_grp_snapshot_inv(self, vol_spec, is_group_a_cg_snap_type): self.driver.configuration.set_override('replication_device', [self.rep_target]) is_group_a_cg_snap_type.return_value = False pool = _get_test_pool() if vol_spec['volume_type'] == 'mm': vol_type_id = self.mm_type.id if vol_spec['volume_type'] == 'gm': vol_type_id = self.gm_type.id if vol_spec['volume_type'] == 'gmcv': vol_type_id = self.gmcv_default_type.id # create group in db src_group = testutils.create_group( self.ctxt, volume_type_ids=[vol_type_id], group_type_id=self.rccg_type.id) model_update = self.driver.create_group(self.ctxt, src_group) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create volumes in db src_vol1 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=src_group.id, host='openstack@svc#%s' % pool)) src_vol2 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=src_group.id, host='openstack@svc#%s' % pool)) self.driver.create_volume(src_vol1) self.driver.create_volume(src_vol2) add_volumes = [src_vol1, src_vol2] del_volumes = [] # add volumes to group (model_update, add_volumes_update, remove_volumes_update) = self.driver.update_group(self.ctxt, src_group, add_volumes, del_volumes) # Clone group for source group clone_group = ( testutils.create_group( self.ctxt, volume_type_ids=[vol_type_id], group_type_id=self.rccg_type.id)) # Create volumes in db clone_vol1 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) clone_vol2 = ( testutils.create_volume(self.ctxt, volume_type_id=vol_type_id, group_id=clone_group.id, host='openstack@svc#%s' % pool)) clone_volumes = self.db.volume_get_all_by_generic_group( self.ctxt.elevated(), clone_group.id) # Create group snapshot group_snapshot, snapshots = self._create_group_snapshot_in_db( src_group.id) # Create group from source as group snapshot self.assertRaises(exception.VolumeDriverException, self.driver.create_group_from_src, self.ctxt, clone_group, clone_volumes, group_snapshot, snapshots, None, None) # Delete group model_update = self.driver.delete_group(self.ctxt, clone_group, [clone_vol1, clone_vol2]) self.assertEqual(fields.GroupStatus.DELETED, model_update[0]['status']) @ddt.data(({'volume_type': 'gm_type'}), ({'volume_type': 'mm_type'}), ({'volume_type': 'gmcv_default_type'}), ({'volume_type': 'non_replica_type'}) ) def test_update_replication_properties_on_create_volume(self, vol_spec): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) vol_type = getattr(self, vol_spec['volume_type']) with mock.patch.object( storwize_svc_common.StorwizeSVCCommonDriver, '_update_replication_properties') as update_rep_properties: volume, model_update = self._create_test_volume(vol_type) if vol_type == self.non_replica_type: self.assertFalse(update_rep_properties.called) else: self.assertTrue(update_rep_properties.called) self.driver.delete_volume(volume) # Create metro mirror replication. volume, model_update = self._create_test_volume(vol_type) if 'metadata' in model_update: expected_sync_attr_value = '' expected_freeze_time = '' expected_primary = 'master' expected_mirroring_state = 'inconsistent_copying' self.assertEqual(model_update['metadata']['Sync'], expected_sync_attr_value) self.assertEqual(model_update['metadata']['Freeze Time'], expected_freeze_time) self.assertEqual(model_update['metadata']['Primary'], expected_primary) self.assertEqual(model_update['metadata']['Mirroring State'], expected_mirroring_state) self.driver.delete_volume(volume) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/ibm/test_xiv_proxy.py0000664000175000017500000027533000000000000025702 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from xml.etree import ElementTree from cinder import context from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as testutils from cinder.tests.unit.volume.drivers.ibm import fake_pyxcli import cinder.volume.drivers.ibm.ibm_storage as storage from cinder.volume.drivers.ibm.ibm_storage import cryptish from cinder.volume.drivers.ibm.ibm_storage.xiv_proxy import XIVProxy from cinder.volume.drivers.ibm.ibm_storage import xiv_replication from cinder.volume import group_types errors = fake_pyxcli.pyxcli_client.errors mirroring = fake_pyxcli.pyxcli_client.mirroring mirrored_entities = fake_pyxcli.pyxcli_client.mirroring.mirrored_entities test_mock = mock.MagicMock() module_patcher = mock.MagicMock() test_mock.cinder.exception = exception TEST_LOG_PREFIX = storage.XIV_LOG_PREFIX TEST_VOLUME = { 'name': 'BLA', 'id': 23, 'size': 17, 'group_id': fake.CONSISTENCY_GROUP_ID, } TEST_GROUP_SPECS = { 'group_replication_enabled': ' True', 'replication_type': 'sync', } TEST_EXTRA_SPECS = { 'replication_enabled': ' False', } TEST_EXTRA_SPECS_REPL = { 'replication_enabled': ' True', 'replication_type': 'sync', } TEST_WWPNS = ["50017380FE020160", "50017380FE020161", "50017380FE020162"] TEST_INITIATOR = 'c5507606d5680e05' TEST_CONNECTOR = { 'ip': '129.123.123.123', 'initiator': TEST_INITIATOR, 'wwpns': [TEST_INITIATOR], } TEST_TARGET_MAP = {TEST_INITIATOR: TEST_WWPNS} TEST_HOST_ID = 11 TEST_HOST_NAME = 'WTF32' TEST_CHAP_NAME = 'WTF64' TEST_CHAP_SECRET = 'V1RGNjRfXw==' FC_TARGETS_OPTIMIZED = [ "50017380FE020160", "50017380FE020190", "50017380FE020192"] FC_TARGETS_OPTIMIZED_WITH_HOST = [ "50017380FE020160", "50017380FE020192"] FC_TARGETS_BEFORE_SORTING = [ "50017380FE020160", "50017380FE020161", "50017380FE020162", "50017380FE020190", "50017380FE020191", "50017380FE020192"] FC_TARGETS_AFTER_SORTING = [ "50017380FE020190", "50017380FE020160", "50017380FE020191", "50017380FE020161", "50017380FE020162", "50017380FE020192"] FC_PORT_LIST_OUTPUT = [ {'component_id': '1:FC_Port:4:1', 'port_state': 'Online', 'role': 'Target', 'wwpn': '50017380FE020160'}, {'component_id': '1:FC_Port:5:1', 'port_state': 'Link Problem', 'role': 'Target', 'wwpn': '50017380FE020161'}, {'component_id': '1:FC_Port:6:1', 'port_state': 'Online', 'role': 'Initiator', 'wwpn': '50017380FE020162'}, {'component_id': '1:FC_Port:7:1', 'port_state': 'Link Problem', 'role': 'Initiator', 'wwpn': '50017380FE020163'}, {'component_id': '1:FC_Port:8:1', 'port_state': 'Online', 'role': 'Target', 'wwpn': '50017380FE020190'}, {'component_id': '1:FC_Port:9:1', 'port_state': 'Link Problem', 'role': 'Target', 'wwpn': '50017380FE020191'}, {'component_id': '1:FC_Port:4:1', 'port_state': 'Online', 'role': 'Target', 'wwpn': '50017380FE020192'}, {'component_id': '1:FC_Port:5:1', 'port_state': 'Link Problem', 'role': 'Initiator', 'wwpn': '50017380FE020193'}] HOST_CONNECTIVITY_LIST = [ {'host': 'nova-compute-c5507606d5680e05', 'host_port': '10000000C97D26DB', 'local_fc_port': '1:FC_Port:4:1', 'local_iscsi_port': '', 'module': '1:Module:4', 'type': 'FC'}] HOST_CONNECTIVITY_LIST_UNKNOWN_HOST = [ {'host': 'nova-compute-c5507606d5680f115', 'host_port': '10000000C97D26DE', 'local_fc_port': '1:FC_Port:3:1', 'local_iscsi_port': '', 'module': '1:Module:3', 'type': 'FC'}] REPLICA_ID = 'WTF32' REPLICA_IP = '1.2.3.4' REPLICA_USER = 'WTF64' REPLICA_PASSWORD = 'WTFWTF' REPLICA_POOL = 'WTF64' REPLICA_PARAMS = { 'san_ip': REPLICA_IP, 'san_login': REPLICA_USER, 'san_password': cryptish.encrypt(REPLICA_PASSWORD), 'san_clustername': REPLICA_POOL } TEST_POOL = [ {'name': 'WTF32', 'size': 10026, 'empty_space': 6925}] class XIVProxyTest(test.TestCase): """Tests the main Proxy driver""" def setUp(self): """import at setup to ensure module patchers are in place""" super(XIVProxyTest, self).setUp() self.proxy = XIVProxy self.version = "cinder" self.proxy.configuration = {} self.ctxt = context.get_admin_context() self.default_storage_info = { 'user': "WTF32", 'password': cryptish.encrypt("WTF32"), 'address': "WTF32", 'vol_pool': "WTF32", 'management_ips': "WTF32", 'system_id': "WTF32" } self.proxy.configuration['replication_device'] = { 'backend_id': REPLICA_ID, 'san_ip': REPLICA_IP, 'san_user': REPLICA_USER, 'san_password': REPLICA_PASSWORD, } @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.pyxcli") def test_wrong_pyxcli(self, mock_pyxcli): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) mock_pyxcli.version = '1.1.4' self.assertRaises(test_mock.cinder.exception.CinderException, p.setup, {}) @mock.patch("cinder.volume.drivers.ibm.ibm_storage" ".xiv_proxy.socket.getfqdn", new=mock.MagicMock( return_value='test_hostname')) def test_setup_should_fail_if_password_is_not_encrypted(self): """Passing an unencrypted password should raise an error""" storage_info = self.default_storage_info.copy() storage_info['password'] = "WTF32" p = self.proxy(storage_info, mock.MagicMock(), test_mock.cinder.exception) self.assertRaises(test_mock.cinder.exception.InvalidParameterValue, p.setup, {}) @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy.client." "XCLIClient") def test_setup_should_fail_if_pool_is_invalid(self, mock_xcli): """Setup should raise exception if pool is invalid""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) cmd = mock_xcli.connect_multiendpoint_ssl.return_value.cmd cmd.pool_list.return_value.as_list = [] self.assertRaises(test_mock.cinder.exception.VolumeBackendAPIException, p.setup, {}) @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy.client." "XCLIClient") @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy.socket." "getfqdn", new=mock.MagicMock( return_value='test_hostname')) def test_setup_should_fail_if_credentials_are_invalid(self, mock_xcli): """Passing invalid credentials should raise an error""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) mock_xcli.connect_multiendpoint_ssl = mock.MagicMock( side_effect=errors.CredentialsError( 'bla', 'bla', ElementTree.Element("bla"))) self.assertRaises(test_mock.cinder.exception.NotAuthorized, p.setup, {}) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.client.XCLIClient") @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.socket.getfqdn", new=mock.MagicMock( return_value='test_hostname')) def test_setup_should_fail_if_connection_is_invalid(self, mock_xcli): """Passing an invalid host to the setup should raise an error""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) mock_xcli.connect_multiendpoint_ssl = mock.MagicMock( side_effect=errors.ConnectionError( 'bla', 'bla', ElementTree.Element("bla"))) self.assertRaises(test_mock.cinder.exception.HostNotFound, p.setup, {}) @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy." "client.XCLIClient") @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.storage.get_online_iscsi_ports", mock.MagicMock(return_value=['WTF32'])) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.socket.getfqdn", new=mock.MagicMock( return_value='test_hostname')) def test_setup_should_set_iqn_and_portal(self, mock_xcli): """Test setup Setup should retrieve values from xcli and set the IQN and Portal """ p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception) cmd = mock_xcli.connect_multiendpoint_ssl.return_value.cmd item = cmd.config_get.return_value.as_dict.return_value.__getitem__ item.return_value.value = "BLA" p.setup({}) self.assertEqual("BLA", p.meta.get('ibm_storage_iqn')) self.assertEqual("WTF32:3260", p.meta.get('ibm_storage_portal')) @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy." "client.XCLIClient") @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.storage.get_online_iscsi_ports", mock.MagicMock(return_value=['WTF32'])) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.socket.getfqdn", new=mock.MagicMock( return_value='test_hostname')) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target_params", mock.MagicMock(return_value=REPLICA_PARAMS)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target", mock.MagicMock(return_value="BLABLA")) def test_setup_should_succeed_if_replica_is_set(self, mock_xcli): """Test setup Setup should succeed if replica is set """ p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception) cmd = mock_xcli.connect_multiendpoint_ssl.return_value.cmd item = cmd.config_get.return_value.as_dict.return_value.__getitem__ item.return_value.value = "BLA" SCHEDULE_LIST_RESPONSE = { '00:01:00': {'interval': 120}, '00:02:00': {'interval': 300}, '00:05:00': {'interval': 600}, '00:10:00': {'interval': 1200}, } cmd = mock_xcli.connect_multiendpoint_ssl.return_value.cmd cmd.schedule_list.return_value\ .as_dict.return_value = SCHEDULE_LIST_RESPONSE p.setup({}) @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy." "client.XCLIClient") @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.storage.get_online_iscsi_ports", mock.MagicMock(return_value=['WTF32'])) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.socket.getfqdn", new=mock.MagicMock( return_value='test_hostname')) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target_params", mock.MagicMock(return_value=REPLICA_PARAMS)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target", mock.MagicMock(return_value="BLABLA")) def test_setup_should_fail_if_schedule_create_fails(self, mock_xcli): """Test setup Setup should fail if replica is set and schedule_create fails """ p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception) cmd = mock_xcli.connect_multiendpoint_ssl.return_value.cmd item = cmd.config_get.return_value.as_dict.return_value.__getitem__ item.return_value.value = "BLA" cmd.schedule_list.return_value.as_dict.return_value = {} cmd.schedule_create.side_effect = ( errors.XCLIError('bla')) self.assertRaises(exception.VolumeBackendAPIException, p.setup, {}) def test_get_volume_stats(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.pool_list.return_value.as_list = TEST_POOL stats = p.get_volume_stats() self.assertEqual("up", stats['backend_state']) p.ibm_storage_cli.cmd.pool_list.return_value.as_list = None stats = p.get_volume_stats(refresh=True) self.assertEqual("down", stats['backend_state']) def test_create_volume_should_call_xcli(self): """Create volume should call xcli with the correct parameters""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() volume = testutils.create_volume( self.ctxt, size=16, display_name='WTF32', volume_type_id=self.vt['id']) p.create_volume(volume) p.ibm_storage_cli.cmd.vol_create.assert_called_once_with( vol=volume.name, size_blocks=storage.gigabytes_to_blocks(16), pool='WTF32') def test_create_volume_from_snapshot(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() volume = testutils.create_volume( self.ctxt, size=16, display_name='WTF32', volume_type_id=self.vt['id']) snapshot = testutils.create_snapshot(self.ctxt, volume.id) p.create_volume_from_snapshot(volume, snapshot) p.ibm_storage_cli.cmd.vol_copy.assert_called_once_with( vol_src=snapshot.name, vol_trg=volume.name) def test_create_volume_should_fail_if_no_pool_space(self): """Test create volume Create volume should raise an error if there's no pool space left """ driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_create.side_effect = ( errors.PoolOutOfSpaceError( 'bla', 'bla', ElementTree.Element('bla'))) volume = testutils.create_volume( self.ctxt, size=16, display_name='WTF32', volume_type_id=self.vt['id']) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_volume, volume) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.VolumeReplication.create_replication", mock.MagicMock()) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.GroupReplication.create_replication", mock.MagicMock()) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target_params", mock.MagicMock(return_value=REPLICA_PARAMS)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target", mock.MagicMock(return_value="BLABLA")) def test_enable_replication(self): """Test enable_replication""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p._call_remote_xiv_xcli = mock.MagicMock() p._update_consistencygroup = mock.MagicMock() p.targets = {'tgt1': 'info1'} group = self._create_test_group('WTF') vol = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) ret = p.enable_replication(self.ctxt, group, [vol]) self.assertEqual(( {'replication_status': fields.ReplicationStatus.ENABLED}, [{'id': vol['id'], 'replication_status': fields.ReplicationStatus.ENABLED}]), ret) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.VolumeReplication.create_replication", mock.MagicMock()) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.GroupReplication.create_replication", mock.MagicMock()) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target_params", mock.MagicMock(return_value=REPLICA_PARAMS)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target", mock.MagicMock(return_value="BLABLA")) @mock.patch("cinder.volume.group_types.get_group_type_specs", mock.MagicMock(return_value=TEST_GROUP_SPECS)) def test_enable_replication_remote_cg_exists(self): """Test enable_replication""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p._call_remote_xiv_xcli = mock.MagicMock() p._update_consistencygroup = mock.MagicMock() p.targets = {'tgt1': 'info1'} error = errors.CgNameExistsError('bla', 'bla', ElementTree.Element('bla')) p._call_remote_xiv_xcli.cmd.cg_create.side_effect = error group = self._create_test_group('WTF') vol = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) ret = p.enable_replication(self.ctxt, group, [vol]) self.assertEqual(( {'replication_status': fields.ReplicationStatus.ENABLED}, [{'id': vol['id'], 'replication_status': fields.ReplicationStatus.ENABLED}]), ret) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.VolumeReplication.delete_replication", mock.MagicMock()) @mock.patch("cinder.volume.group_types.get_group_type_specs", mock.MagicMock(return_value=TEST_GROUP_SPECS)) def test_disable_replication(self): """Test disable_replication""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p._call_remote_xiv_xcli = mock.MagicMock() p._update_consistencygroup = mock.MagicMock() group = self._create_test_group('WTF') ret = p.disable_replication(self.ctxt, group, []) self.assertEqual(( {'replication_status': fields.ReplicationStatus.DISABLED}, []), ret) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._using_default_backend", mock.MagicMock(return_value=False)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target_params", mock.MagicMock(return_value={'san_clustername': "master"})) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._init_xcli", mock.MagicMock()) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._init_xcli", mock.MagicMock()) @mock.patch("cinder.volume.group_types.get_group_type_specs", mock.MagicMock(return_value=TEST_GROUP_SPECS)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.GroupReplication.failover", mock.MagicMock(return_value=(True, 'good'))) def test_failover_replication_with_default(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) group = self._create_test_group('WTF') group.replication_status = fields.ReplicationStatus.FAILED_OVER vol = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) group_update, vol_update = p.failover_replication(self.ctxt, group, [vol], 'default') updates = {'status': 'available'} self.assertEqual(({'replication_status': 'enabled'}, [{'id': vol['id'], 'updates': updates}]), (group_update, vol_update)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._using_default_backend", mock.MagicMock(return_value=True)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target_params", mock.MagicMock(return_value={'san_clustername': "master"})) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._init_xcli", mock.MagicMock()) @mock.patch("cinder.volume.group_types.get_group_type_specs", mock.MagicMock(return_value=TEST_GROUP_SPECS)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.GroupReplication.failover", mock.MagicMock(return_value=(True, 'good'))) def test_failover_replication(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) group = self._create_test_group('WTF') failed_over = fields.ReplicationStatus.FAILED_OVER group.replication_status = failed_over vol = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) group_update, vol_update = p.failover_replication(self.ctxt, group, [vol], 'secondary_id') failed_over = fields.ReplicationStatus.FAILED_OVER updates = {'status': failed_over} self.assertEqual(({'replication_status': failed_over}, [{'id': vol['id'], 'updates': updates}]), (group_update, vol_update)) def test_failover_resource_no_mirror(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) recovery_mgr = mock.MagicMock() recovery_mgr.is_mirror_active = mock.MagicMock() recovery_mgr.is_mirror_active.return_value = False group = self._create_test_group('WTF') ret = xiv_replication.Replication(p)._failover_resource( group, recovery_mgr, mock.MagicMock, 'cg', True) msg = ("%(rep_type)s %(res)s: no active mirroring and can not " "failback" % {'rep_type': 'cg', 'res': group['name']}) self.assertEqual((False, msg), ret) def test_failover_resource_mirror(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) recovery_mgr = mock.MagicMock() recovery_mgr.is_mirror_active = mock.MagicMock() recovery_mgr.is_mirror_active.return_value = True group = self._create_test_group('WTF') ret = xiv_replication.Replication(p)._failover_resource( group, recovery_mgr, mock.MagicMock, 'cg', True) self.assertEqual((True, None), ret) def test_failover_resource_change_role(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) recovery_mgr = mock.MagicMock() recovery_mgr.is_mirror_active = mock.MagicMock() recovery_mgr.is_mirror_active.return_value = True recovery_mgr.switch_roles.side_effect = ( errors.XCLIError('')) failover_rep_mgr = mock.MagicMock() failover_rep_mgr.change_role = mock.MagicMock() group = self._create_test_group('WTF') xiv_replication.Replication(p)._failover_resource( group, recovery_mgr, failover_rep_mgr, 'cg', True) failover_rep_mgr.change_role.assert_called_once_with( resource_id=group['name'], new_role='Slave') @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target_params", mock.MagicMock(return_value=REPLICA_PARAMS)) def test_pool_with_replication_failover_back(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) pool_name = p._get_backend_pool() self.assertEqual(self.default_storage_info['vol_pool'], pool_name) p_failback = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver, REPLICA_ID) pool_name = p_failback._get_backend_pool() self.assertEqual(REPLICA_POOL, pool_name) @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type", mock.MagicMock(return_value=True)) def test_create_volume_with_consistency_group(self): """Test Create volume with consistency_group""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p._cg_name_from_volume = mock.MagicMock(return_value="cg") vol_type = testutils.create_volume_type(self.ctxt, name='WTF') volume = testutils.create_volume( self.ctxt, size=16, volume_type_id=vol_type.id) grp = self._create_test_group('WTF') volume.group = grp p.create_volume(volume) p.ibm_storage_cli.cmd.vol_create.assert_called_once_with( vol=volume['name'], size_blocks=storage.gigabytes_to_blocks(16), pool='WTF32') p.ibm_storage_cli.cmd.cg_add_vol.assert_called_once_with( vol=volume['name'], cg='cg') @mock.patch('pyxcli.mirroring.mirrored_entities.' 'MirroredEntities', mock.MagicMock()) @mock.patch('cinder.volume.volume_utils.is_group_a_type', mock.MagicMock(return_value=True)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_extra_specs", mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.VolumeReplication.create_replication", mock.MagicMock()) def test_create_volume_with_consistency_group_diff_state(self): """Test Create volume with consistency_group but diff state""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p._cg_name_from_volume = mock.MagicMock(return_value="cg") vol_type = testutils.create_volume_type(self.ctxt, name='WTF') volume = testutils.create_volume( self.ctxt, size=16, volume_type_id=vol_type.id, host=self._get_test_host()['name']) grp = self._create_test_group('WTF') grp['replication_status'] = 'enabled' volume.group = grp ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_volume, volume) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.VolumeReplication.create_replication", mock.MagicMock()) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_qos_specs", mock.MagicMock(return_value=None)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_extra_specs", mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) def test_create_volume_with_replication(self): """Test Create volume with replication""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() volume = testutils.create_volume( self.ctxt, size=16, display_name='WTF32', volume_type_id=self.vt['id']) volume.group = None p.create_volume(volume) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.VolumeReplication.create_replication", mock.MagicMock()) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_qos_specs", mock.MagicMock(return_value=None)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_extra_specs", mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) def test_create_volume_with_replication_and_cg(self): """Test Create volume with replication and CG""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() volume = testutils.create_volume( self.ctxt, size=16, display_name='WTF32', volume_type_id=self.vt['id']) grp = testutils.create_group(self.ctxt, name='bla', group_type_id='1') volume.group = grp ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_volume, volume) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_qos_specs", mock.MagicMock(return_value=None)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_extra_specs", mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) def test_create_volume_with_replication_multiple_targets(self): """Test Create volume with replication and multiple targets""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() volume = testutils.create_volume( self.ctxt, size=16, display_name='WTF32', volume_type_id=self.vt['id']) volume.group = None ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_volume, volume) def test_delete_volume_should_pass_the_correct_parameters(self): """Delete volume should call xcli with the correct parameters""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_list.return_value.as_list = ['aa'] p.delete_volume({'name': 'WTF32'}) p.ibm_storage_cli.cmd.vol_delete.assert_called_once_with(vol='WTF32') @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_replication.VolumeReplication.delete_replication", mock.MagicMock()) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_extra_specs", mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) def test_delete_volume_with_replication(self): """Test Delete volume with replication""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() volume = {'size': 16, 'name': 'WTF32', 'volume_type_id': 'WTF'} p.delete_volume(volume) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_extra_specs", mock.MagicMock(return_value=TEST_EXTRA_SPECS_REPL)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.client.XCLIClient") @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target_params", mock.MagicMock(return_value=REPLICA_PARAMS)) def test_failover_host(self, mock_xcli): """Test failover_host with valid target""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock_xcli p.ibm_storage_cli.connect_multiendpoint_ssl.return_value mock_xcli.connect_multiendpoint_ssl.return_value = mock_xcli volume = {'id': 'WTF64', 'size': 16, 'name': 'WTF32', 'volume_type_id': 'WTF'} target = REPLICA_ID p.failover_host({}, [volume], target, []) def test_failover_host_invalid_target(self): """Test failover_host with invalid target""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) volume = {'id': 'WTF64', 'size': 16, 'name': 'WTF32', 'volume_type_id': 'WTF'} target = 'Invalid' ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.failover_host, {}, [volume], target, []) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.client.XCLIClient") @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target_params", mock.MagicMock(return_value=REPLICA_PARAMS)) def test_failover_host_no_connection_to_target(self, mock_xcli): """Test failover_host that fails to connect to target""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock_xcli p.ibm_storage_cli.connect_multiendpoint_ssl.return_value mock_xcli.connect_multiendpoint_ssl.side_effect = errors.XCLIError('') volume = {'id': 'WTF64', 'size': 16, 'name': 'WTF32', 'volume_type_id': 'WTF'} target = REPLICA_ID ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.failover_host, {}, [volume], target, []) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.client.XCLIClient") @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_target_params", mock.MagicMock(return_value=REPLICA_PARAMS)) def test_failback_host(self, mock_xcli): """Test failing back after DR""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) volume = {'id': 'WTF64', 'size': 16, 'name': 'WTF32', 'volume_type_id': 'WTF'} target = 'default' p.failover_host(None, [volume], target, []) def qos_test_empty_name_if_no_specs(self): """Test empty name in case no specs are specified""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) perf_name = p._check_perf_class_on_backend({}) self.assertEqual('', perf_name) def test_qos_class_name_contains_qos_type(self): """Test backend naming Test if the naming convention is correct when getting the right specs with qos type """ driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.perf_class_list.return_value.as_list = [] perf_name = p._check_perf_class_on_backend({'bw': '100', 'type': 'independent'}) self.assertEqual('cinder-qos_bw_100_type_independent', perf_name) def test_qos_called_with_type_parameter(self): """Test xcli call for qos creation with type""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.perf_class_list.return_value.as_list = [] perf_name = p._check_perf_class_on_backend({'bw': '100', 'type': 'independent'}) p.ibm_storage_cli.cmd.perf_class_create.assert_called_once_with( perf_class=perf_name, type='independent') def test_qos_called_with_wrong_type_parameter(self): """Test xcli call for qos creation with wrong type""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.perf_class_list.return_value.as_list = [] p.ibm_storage_cli.cmd.perf_class_create.side_effect = ( errors.XCLIError('llegal value')) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p._check_perf_class_on_backend, {'bw': '100', 'type': 'BAD'}) def test_qos_class_on_backend_name_correct(self): """Test backend naming Test if the naming convention is correct when getting the right specs """ driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.perf_class_list.return_value.as_list = [] perf_name = p._check_perf_class_on_backend({'bw': '100'}) self.assertEqual('cinder-qos_bw_100', perf_name) def test_qos_xcli_exception(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.perf_class_list.side_effect = ( errors.XCLIError('')) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p._check_perf_class_on_backend, {'bw': '100'}) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._qos_create_kwargs_for_xcli", mock.MagicMock(return_value={})) def test_regex_from_perf_class_name(self): """Test type extraction from perf_class with Regex""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) perf_class_names_list = [ {'class_name': 'cinder-qos_iops_1000_type_independent_bw_1000', 'type': 'independent'}, {'class_name': 'cinder-qos_iops_1000_bw_1000_type_shared', 'type': 'shared'}, {'class_name': 'cinder-qos_type_badtype_bw_1000', 'type': None}] for element in perf_class_names_list: _type = p._get_type_from_perf_class_name( perf_class_name=element['class_name']) self.assertEqual(element['type'], _type) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._qos_create_kwargs_for_xcli", mock.MagicMock(return_value={})) def test_create_qos_class_with_type(self): """Test performance class creation with type""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.perf_class_set_rate.return_value = None p.ibm_storage_cli.cmd.perf_class_create.return_value = None perf_class_name = 'cinder-qos_iops_1000_type_independent_bw_1000' p_class_name = p._create_qos_class(perf_class_name=perf_class_name, specs=None) p.ibm_storage_cli.cmd.perf_class_create.assert_called_once_with( perf_class=perf_class_name, type='independent') self.assertEqual('cinder-qos_iops_1000_type_independent_bw_1000', p_class_name) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._check_storage_version_for_qos_support", mock.MagicMock(return_value=True)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_qos_specs", mock.MagicMock(return_value='specs')) def test_qos_specs_exist_if_type_exists(self): """Test a case where type was found and qos were found""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) volume = {'name': 'bla', 'volume_type_id': '7'} specs = p._qos_specs_from_volume(volume) self.assertEqual('specs', specs) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._check_storage_version_for_qos_support", mock.MagicMock(return_value=True)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_qos_specs", mock.MagicMock(return_value=None)) def test_no_qos_but_type_exists(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) volume = {'name': 'bla', 'volume_type_id': '7'} specs = p._qos_specs_from_volume(volume) self.assertIsNone(specs) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._check_storage_version_for_qos_support", mock.MagicMock(return_value=True)) @mock.patch("cinder.volume.drivers.ibm.ibm_storage." "xiv_proxy.XIVProxy._get_qos_specs", mock.MagicMock(return_value=None)) def test_qos_specs_doesnt_exist_if_no_type(self): """Test _qos_specs_from_volume Test a case where no type was defined and therefore no specs exist """ driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) volume = {'name': 'bla'} specs = p._qos_specs_from_volume(volume) self.assertIsNone(specs) def test_manage_volume_should_call_xcli(self): """Manage volume should call xcli with the correct parameters""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_list.return_value.as_list = [ {'name': 'WTF64', 'size': 34}] p.manage_volume(volume={'name': 'WTF32'}, reference={'source-name': 'WTF64'}) p.ibm_storage_cli.cmd.vol_list.assert_called_once_with( vol='WTF64') def test_manage_volume_should_return_volume_if_exists(self): """Manage volume should return with no errors""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_list.return_value.as_list = [ {'name': 'WTF64', 'size': 34}] volume = {'name': 'WTF32'} p.manage_volume(volume=volume, reference={'source-name': 'WTF64'}) self.assertEqual(34, volume['size']) def test_manage_volume_should_raise_exception_if_not_exists(self): """Test manage_volume Manage volume should return with exception if volume does not exist """ driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_list.return_value.as_list = [] ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.manage_volume, volume={'name': 'WTF32'}, reference={'source-name': 'WTF64'}) def test_manage_volume_get_size_if_volume_exists(self): """Manage volume get size should return size""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_list.return_value.as_list = [ {'name': 'WTF64', 'size': 34}] volume = {'name': 'WTF32'} size = p.manage_volume_get_size(volume=volume, reference={'source-name': 'WTF64'}) self.assertEqual(34, size) def test_retype_false_if_no_location(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) volume = {'display_name': 'vol'} new_type = {} new_type['name'] = "type1" host = {'capabilities': ''} diff = {} ret = p.retype({}, volume, new_type, diff, host) self.assertFalse(ret) def test_retype_false_if_dest_not_xiv_backend(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) host = {'capabilities': {'location_info': "IBM-XIV:host:pool"}} volume = {'display_name': 'vol', 'host': "origdest_orighost_origpool"} new_type = {'name': "type1"} diff = {} ret = p.retype({}, volume, new_type, diff, host) self.assertFalse(ret) def test_retype_true_if_dest_is_xiv_backend(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.migrate_volume = mock.MagicMock() p.migrate_volume.return_value = (True, None) p._qos_specs_from_volume = mock.MagicMock() p._get_qos_specs = mock.MagicMock() p._qos_specs_from_volume.return_value = {} p._get_qos_specs.return_value = {} host = {'capabilities': {'location_info': "IBM-XIV:host:pool"}} volume = {'display_name': 'vol', 'host': "IBM-XIV_host_pool"} new_type = {'name': "type1"} diff = {} ret = p.retype({}, volume, new_type, diff, host) self.assertTrue(ret) def test_manage_volume_get_size_should_raise_exception_if_not_exists(self): """Test manage_volume Manage volume get size should raise exception if volume does not exist """ driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_list.return_value.as_list = [] ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.manage_volume_get_size, volume={'name': 'WTF32'}, reference={'source-name': 'WTF64'}) def test_initialize_connection(self): """Test initialize_connection Ensure that initialize connection returns, all the correct connection values """ p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception) p.ibm_storage_iqn = "BLAIQN" p.ibm_storage_portal = "BLAPORTAL" p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_list.return_value.as_list = ['aa'] host = self._get_test_host() setattr( p, '_get_host_and_fc_targets', mock.MagicMock(return_value=( [], host))) setattr( p, '_vol_map_and_get_lun_id', mock.MagicMock(return_value=100)) p.volume_exists = mock.MagicMock(return_value=True) info = p.initialize_connection(TEST_VOLUME, {}) self.assertEqual( p.meta.get('ibm_storage_portal'), info['data']['target_portal']) self.assertEqual( p.meta.get('ibm_storage_iqn'), info['data']['target_iqn']) self.assertEqual(100, info['data']['target_lun']) def test_initialize_connection_no_initiator(self): """Initialize connection raises exception on missing initiator""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) connector = TEST_CONNECTOR.copy() connector['initiator'] = None ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.initialize_connection, TEST_VOLUME, connector) def test_initialize_connection_bad_iqn(self): """Initialize connection raises exception on bad formatted IQN""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) connector = TEST_CONNECTOR.copy() # any string would pass for initiator connector['initiator'] = 5555 ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.initialize_connection, TEST_VOLUME, connector) def test_get_fc_targets_returns_optimized_wwpns_list(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.fc_port_list.return_value = FC_PORT_LIST_OUTPUT fc_targets = p._get_fc_targets(None) self.assertCountEqual(FC_TARGETS_OPTIMIZED, fc_targets) def test_get_fc_targets_returns_host_optimized_wwpns_list(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) hostname = storage.get_host_or_create_from_iqn(TEST_CONNECTOR) host = {'name': hostname} p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.fc_port_list.return_value = FC_PORT_LIST_OUTPUT p.ibm_storage_cli.cmd.host_connectivity_list.return_value = ( HOST_CONNECTIVITY_LIST) fc_targets = p._get_fc_targets(host) self.assertCountEqual(FC_TARGETS_OPTIMIZED_WITH_HOST, fc_targets, "FC targets are different from the expected") def test_get_fc_targets_returns_host_all_wwpns_list(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) hostname = storage.get_host_or_create_from_iqn(TEST_CONNECTOR) host = {'name': hostname} p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.fc_port_list.return_value = FC_PORT_LIST_OUTPUT p.ibm_storage_cli.cmd.host_connectivity_list.return_value = ( HOST_CONNECTIVITY_LIST_UNKNOWN_HOST) fc_targets = p._get_fc_targets(host) self.assertCountEqual(FC_TARGETS_OPTIMIZED, fc_targets, "FC targets are different from the expected") def test_define_fc_returns_all_wwpns_list(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.fc_port_list.return_value = FC_PORT_LIST_OUTPUT p.ibm_storage_cli.fc_connectivity_list.return_value = () fc_targets = p._define_fc(p._define_host(TEST_CONNECTOR)) self.assertCountEqual(FC_TARGETS_OPTIMIZED, fc_targets, "FC targets are different from the expected") def test_define_ports_returns_sorted_wwpns_list(self): driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p._get_connection_type = mock.MagicMock( return_value=storage.XIV_CONNECTION_TYPE_FC) p._define_fc = mock.MagicMock(return_value=FC_TARGETS_BEFORE_SORTING) fc_targets = p._define_ports(self._get_test_host()) fc_result = list(map(lambda x: x[-1:], fc_targets)) expected_result = list(map(lambda x: x[-1:], FC_TARGETS_AFTER_SORTING)) self.assertEqual(expected_result, fc_result, "FC targets are different from the expected") def test_get_host_and_fc_targets_if_host_not_defined(self): """Test host and FC targets Tests that host and fc targets are provided if the host is not defined """ p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception) p.meta = mock.MagicMock() p.meta.ibm_storage_iqn = "BLAIQN" p.meta.ibm_storage_portal = "BLAPORTAL" p.meta.openstack_version = "cinder-2013.2" pool = {'name': "WTF32", 'domain': 'pool_domain_bla'} p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.host_list.return_value.as_list = [] p.ibm_storage_cli.cmd.host_list_ports.return_value = [] p.ibm_storage_cli.cmd.pool_list.return_value.as_list = [pool] p._get_bunch_from_host = mock.MagicMock() p._get_bunch_from_host.return_value = { 'name': "nova-compute-%s" % TEST_INITIATOR, 'initiator': TEST_INITIATOR, 'id': 123, 'wwpns': 111, 'chap': 'chap', } fc_targets, host = getattr(p, '_get_host_and_fc_targets')( TEST_VOLUME, TEST_CONNECTOR) hostname = storage.get_host_or_create_from_iqn(TEST_CONNECTOR) p.ibm_storage_cli.cmd.host_define.assert_called_once_with( host=hostname, domain=pool.get('domain')) p.ibm_storage_cli.cmd.host_add_port.assert_called_once_with( host=hostname, iscsi_name=TEST_CONNECTOR['initiator']) def test_get_lun_id_if_host_already_mapped(self): """Test lun id Tests that a lun is provided if host is already mapped to other volumes """ driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() vol_mapping_list = p.ibm_storage_cli.cmd.vol_mapping_list vol_mapping_list.return_value.as_dict.return_value = {} lun1 = {'lun': 1} lun2 = {'lun': 2} p.ibm_storage_cli.cmd.mapping_list.return_value.as_list = [lun1, lun2] host = self._get_test_host() self.assertEqual( 3, getattr(p, '_vol_map_and_get_lun_id')( TEST_VOLUME, TEST_CONNECTOR, host)) def test_terminate_connection_should_call_unmap_vol(self): """Terminate connection should call unmap vol""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p._get_connection_type = mock.MagicMock( return_value=storage.XIV_CONNECTION_TYPE_FC) p._get_fc_targets = mock.MagicMock(return_value=TEST_WWPNS) p.ibm_storage_cli = mock.MagicMock() vol_mapping_ret = p.ibm_storage_cli.cmd.vol_mapping_list.return_value vol_mapping_ret.as_dict.return_value.has_keys.return_value = True p.ibm_storage_cli.cmd.vol_list.return_value.as_list = ['aa'] hostname = storage.get_host_or_create_from_iqn(TEST_CONNECTOR) host = { 'name': hostname, 'initiator': TEST_CONNECTOR['initiator'], 'id': 1 } TEST_CONNECTOR['wwpns'] = [TEST_INITIATOR] setattr(p, "_get_host", mock.MagicMock(return_value=host)) meta = p.terminate_connection(TEST_VOLUME, TEST_CONNECTOR) self.assertEqual( TEST_TARGET_MAP, meta['data']['initiator_target_map']) p.ibm_storage_cli.cmd.unmap_vol.assert_called_once_with( vol=TEST_VOLUME['name'], host=hostname) def test_terminate_connection_multiple_connections(self): # Terminate connection should not return meta if host is still # connected p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception) p.ibm_storage_cli = mock.MagicMock() vol_dict = p.ibm_storage_cli.cmd.vol_mapping_list.return_value.as_dict vol_dict.return_value.has_keys.return_value = True p.ibm_storage_cli.cmd.vol_list.return_value.as_list = ['aa'] hostname = storage.get_host_or_create_from_iqn(TEST_CONNECTOR) host = { 'name': hostname, 'initiator': TEST_CONNECTOR['initiator'], 'id': 1 } TEST_CONNECTOR['wwpns'] = [TEST_INITIATOR] map_dict = p.ibm_storage_cli.cmd.mapping_list.return_value.as_dict map_dict.return_value.has_keys.return_value = host setattr(p, "_get_host", mock.MagicMock(return_value=host)) meta = p.terminate_connection(TEST_VOLUME, TEST_CONNECTOR) self.assertIsNone(meta) p.ibm_storage_cli.cmd.unmap_vol.assert_called_once_with( vol=TEST_VOLUME['name'], host=hostname) def test_attach_deleted_volume_should_fail_with_info_to_log(self): """Test attach deleted volume should fail with info to log""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() mock_log = mock.MagicMock() setattr(p, "_log", mock_log) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_mapping_list.side_effect = ( errors.VolumeBadNameError('bla', 'bla', ElementTree.Element('Bla'))) p._define_host_according_to_chap = mock.MagicMock() p._define_host_according_to_chap.return_value = dict(id=100) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.initialize_connection, TEST_VOLUME, TEST_CONNECTOR) def _get_test_host(self): host = { 'name': TEST_HOST_NAME, 'initiator': TEST_INITIATOR, 'id': TEST_HOST_ID, 'wwpns': [TEST_INITIATOR], 'chap': (TEST_CHAP_NAME, TEST_CHAP_SECRET) } return host def _create_test_group(self, g_name='group', is_cg=True): extra_specs = {} if is_cg: extra_specs['consistent_group_snapshot_enabled'] = ' True' group_type = group_types.create(self.ctxt, g_name, extra_specs) return testutils.create_group(self.ctxt, host=self._get_test_host()['name'], group_type_id=group_type.id, volume_type_ids=[]) def _create_test_cgsnapshot(self, group_id): group_type = group_types.create( self.ctxt, 'group_snapshot', {'consistent_group_snapshot_enabled': ' True'}) return testutils.create_group_snapshot(self.ctxt, group_id=group_id, group_type_id=group_type.id) def test_create_generic_group(self): """test create generic group""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group(is_cg=False) self.assertRaises(NotImplementedError, p.create_group, {}, group_obj) def test_create_consistencygroup(self): """test a successful cg create""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() model_update = p.create_group({}, group_obj) p.ibm_storage_cli.cmd.cg_create.assert_called_once_with( cg=p._cg_name_from_id(group_obj.id), pool='WTF32') self.assertEqual('available', model_update['status']) def test_create_consistencygroup_already_exists(self): """test create_consistenygroup when cg already exists""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_create.side_effect = errors.CgNameExistsError( 'bla', 'bla', ElementTree.Element('bla')) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group, {}, self._create_test_group()) def test_create_consistencygroup_reached_limit(self): """test create_consistenygroup when reached maximum CGs""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_create.side_effect = ( errors.CgLimitReachedError( 'bla', 'bla', ElementTree.Element('bla'))) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group, {}, self._create_test_group()) @mock.patch("cinder.volume.drivers.ibm.ibm_storage.xiv_proxy." "client.XCLIClient") def test_create_consistencygroup_with_replication(self, mock_xcli): """test create_consistenygroup when replication is set""" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() vol_type = objects.VolumeType(context=self.ctxt, name='volume_type_rep', extra_specs=( {'replication_enabled': ' True', 'replication_type': 'sync'})) group_obj.volume_types = objects.VolumeTypeList(context=self.ctxt, objects=[vol_type]) model_update = p.create_group({}, group_obj) self.assertEqual('available', model_update['status']) def test_create_consistencygroup_from_src_cgsnapshot(self): """test a successful cg create from cgsnapshot""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.create_volume_from_snapshot.return_value = [] group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) snapshot = testutils.create_snapshot(self.ctxt, volume.id) model_update, vols_model_update = p.create_group_from_src( {}, group_obj, [volume], cgsnap_group_obj, [snapshot], None, None) p.ibm_storage_cli.cmd.cg_create.assert_called_once_with( cg=p._cg_name_from_id(group_obj.id), pool='WTF32') self.assertEqual('available', model_update['status']) def test_create_consistencygroup_from_src_cg(self): """test a successful cg create from consistencygroup""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.create_volume_from_snapshot.return_value = [] group_obj = self._create_test_group() src_group_obj = self._create_test_group(g_name='src_group') volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) src_volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) model_update, vols_model_update = p.create_group_from_src( {}, group_obj, [volume], None, None, src_group_obj, [src_volume]) p.ibm_storage_cli.cmd.cg_create.assert_called_once_with(cg=group_obj, pool='WTF32') self.assertEqual('available', model_update['status']) def test_create_consistencygroup_from_src_fails_cg_create_from_cgsnapshot( self): """test cg create from cgsnapshot fails on cg_create""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_create.side_effect = errors.XCLIError( 'bla', 'bla', ElementTree.Element('bla')) group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) snapshot = testutils.create_snapshot(self.ctxt, volume.id) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group_from_src, {}, group_obj, [volume], cgsnap_group_obj, [snapshot], None, None) def test_create_consistencygroup_from_src_fails_cg_create_from_cg(self): """test cg create from cg fails on cg_create""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_create.side_effect = errors.XCLIError( 'bla', 'bla', ElementTree.Element('bla')) group_obj = self._create_test_group() src_group_obj = self._create_test_group(g_name='src_group') volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) src_volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group_from_src, {}, group_obj, [volume], None, None, src_group_obj, [src_volume]) def test_create_consistencygroup_from_src_fails_vol_create_from_cgsnapshot( self): """test cg create from cgsnapshot fails on vol_create""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_create.side_effect = errors.XCLIError( 'bla', 'bla', ElementTree.Element('bla')) group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) snapshot = testutils.create_snapshot(self.ctxt, volume.id) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group_from_src, {}, group_obj, [volume], cgsnap_group_obj, [snapshot], None, None) def test_create_consistencygroup_from_src_fails_vol_create_from_cg(self): """test cg create from cg fails on vol_create""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_create.side_effect = errors.XCLIError( 'bla', 'bla', ElementTree.Element('bla')) group_obj = self._create_test_group() src_group_obj = self._create_test_group(g_name='src_group') volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) src_volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group_from_src, {}, group_obj, [volume], None, None, src_group_obj, [src_volume]) def test_create_consistencygroup_from_src_fails_vol_copy_from_cgsnapshot( self): """test cg create from cgsnapshot fails on vol_copy""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_copy.side_effect = errors.XCLIError( 'bla', 'bla', ElementTree.Element('bla')) group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) snapshot = testutils.create_snapshot(self.ctxt, volume.id) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group_from_src, {}, group_obj, [volume], cgsnap_group_obj, [snapshot], None, None) def test_create_consistencygroup_from_src_fails_vol_copy_from_cg(self): """test cg create from cg fails on vol_copy""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_copy.side_effect = errors.XCLIError( 'bla', 'bla', ElementTree.Element('bla')) group_obj = self._create_test_group() src_group_obj = self._create_test_group(g_name='src_group') volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) src_volume = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group_from_src, {}, group_obj, [volume], None, None, src_group_obj, [src_volume]) def test_delete_consistencygroup_with_no_volumes(self): """test a successful cg delete""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() model_update, volumes = p.delete_group({}, group_obj, []) p.ibm_storage_cli.cmd.cg_delete.assert_called_once_with( cg=p._cg_name_from_id(group_obj.id)) self.assertEqual('deleted', model_update['status']) def test_delete_consistencygroup_not_exists(self): """test delete_consistenygroup when CG does not exist""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_delete.side_effect = ( errors.CgDoesNotExistError( 'bla', 'bla', ElementTree.Element('bla'))) group_obj = self._create_test_group() model_update, volumes = p.delete_group({}, group_obj, []) p.ibm_storage_cli.cmd.cg_delete.assert_called_once_with( cg=p._cg_name_from_id(group_obj.id)) self.assertEqual('deleted', model_update['status']) def test_delete_consistencygroup_not_exists_2(self): """test delete_consistenygroup when CG does not exist bad name""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_delete.side_effect = ( errors.CgBadNameError( 'bla', 'bla', ElementTree.Element('bla'))) group_obj = self._create_test_group() model_update, volumes = p.delete_group({}, group_obj, []) p.ibm_storage_cli.cmd.cg_delete.assert_called_once_with( cg=p._cg_name_from_id(group_obj.id)) self.assertEqual('deleted', model_update['status']) def test_delete_consistencygroup_not_empty(self): """test delete_consistenygroup when CG is not empty""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_delete.side_effect = errors.CgNotEmptyError( 'bla', 'bla', ElementTree.Element('bla')) group_obj = self._create_test_group() ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.delete_group, {}, group_obj, []) def test_delete_consistencygroup_replicated(self): """test delete cg when CG is not empty and replicated""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() group_obj['replication_status'] = fields.ReplicationStatus.ENABLED ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.delete_group, {}, group_obj, []) def test_delete_consistencygroup_faildover(self): """test delete cg when CG is faildover""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() group_obj['replication_status'] = fields.ReplicationStatus.FAILED_OVER ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.delete_group, {}, group_obj, []) def test_delete_consistencygroup_is_mirrored(self): """test delete_consistenygroup when CG is mirroring""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_delete.side_effect = errors.CgHasMirrorError( 'bla', 'bla', ElementTree.Element('bla')) group_obj = self._create_test_group() ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.delete_group, {}, group_obj, []) def test_update_consistencygroup(self): """test update_consistencygroup""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() vol_add = testutils.create_volume(self.ctxt, display_name='WTF32', volume_type_id=self.vt['id']) vol_remove = testutils.create_volume(self.ctxt, display_name='WTF64', volume_type_id=self.vt['id']) model_update, add_model_update, remove_model_update = ( p.update_group({}, group_obj, [vol_add], [vol_remove])) p.ibm_storage_cli.cmd.cg_add_vol.assert_called_once_with( vol=vol_add['name'], cg=p._cg_name_from_id(group_obj.id)) p.ibm_storage_cli.cmd.cg_remove_vol.assert_called_once_with( vol=vol_remove['name']) self.assertEqual('available', model_update['status']) def test_update_consistencygroup_exception_in_add_vol(self): """test update_consistencygroup with exception in cg_add_vol""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_add_vol.side_effect = errors.XCLIError( 'bla', 'bla', ElementTree.Element('bla')) group_obj = self._create_test_group() vol_add = testutils.create_volume(self.ctxt, display_name='WTF32', volume_type_id=self.vt['id']) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.update_group, {}, group_obj, [vol_add], []) def test_update_consistencygroup_exception_in_remove_vol(self): """test update_consistencygroup with exception in cg_remove_vol""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_remove_vol.side_effect = errors.XCLIError( 'bla', 'bla', ElementTree.Element('bla')) group_obj = self._create_test_group() vol_remove = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.update_group, {}, group_obj, [], [vol_remove]) def test_update_consistencygroup_remove_non_exist_vol_(self): """test update_group with exception in cg_remove_vol""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.cg_remove_vol.side_effect = ( errors.VolumeNotInConsGroup( 'bla', 'bla', ElementTree.Element('bla'))) group_obj = self._create_test_group() vol_remove = testutils.create_volume(self.ctxt, volume_type_id=self.vt['id']) model_update, add_model_update, remove_model_update = ( p.update_group({}, group_obj, [], [vol_remove])) p.ibm_storage_cli.cmd.cg_remove_vol.assert_called_once_with( vol=vol_remove['name']) self.assertEqual('available', model_update['status']) def test_create_cgsnapshot(self): """test a successful cgsnapshot create""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) model_update, snapshots_model_update = ( p.create_group_snapshot({}, cgsnap_group_obj, [])) p.ibm_storage_cli.cmd.cg_snapshots_create.assert_called_once_with( cg=p._cg_name_from_cgsnapshot(cgsnap_group_obj), snap_group=p._group_name_from_cgsnapshot_id( cgsnap_group_obj['id'])) self.assertEqual('available', model_update['status']) def test_create_cgsnapshot_is_empty(self): """test create_cgsnapshot when CG is empty""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) p.ibm_storage_cli.cmd.cg_snapshots_create.side_effect = ( errors.CgEmptyError('bla', 'bla', ElementTree.Element('bla'))) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group_snapshot, {}, cgsnap_group_obj, []) def test_create_cgsnapshot_cg_not_exist(self): """test create_cgsnapshot when CG does not exist""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) p.ibm_storage_cli.cmd.cg_snapshots_create.side_effect = ( errors.CgDoesNotExistError( 'bla', 'bla', ElementTree.Element('bla'))) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group_snapshot, {}, cgsnap_group_obj, []) def test_create_cgsnapshot_snapshot_limit(self): """test create_cgsnapshot when reached snapshot limit""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) p.ibm_storage_cli.cmd.cg_snapshots_create.side_effect = ( errors.PoolSnapshotLimitReachedError( 'bla', 'bla', ElementTree.Element('bla'))) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.create_group_snapshot, {}, cgsnap_group_obj, []) def test_delete_cgsnapshot(self): """test a successful cgsnapshot delete""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) model_update, snapshots_model_update = p.delete_group_snapshot( {}, cgsnap_group_obj, []) p.ibm_storage_cli.cmd.snap_group_delete.assert_called_once_with( snap_group=p._group_name_from_cgsnapshot_id( cgsnap_group_obj['id'])) self.assertEqual('deleted', model_update['status']) def test_delete_cgsnapshot_cg_does_not_exist(self): """test delete_cgsnapshot with bad CG name""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) p.ibm_storage_cli.cmd.snap_group_delete.side_effect = ( errors.CgDoesNotExistError( 'bla', 'bla', ElementTree.Element('bla'))) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.delete_group_snapshot, {}, cgsnap_group_obj, []) def test_delete_cgsnapshot_no_space_left_for_snapshots(self): """test delete_cgsnapshot when no space left for snapshots""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) p.ibm_storage_cli.cmd.snap_group_delete.side_effect = ( errors.PoolSnapshotLimitReachedError( 'bla', 'bla', ElementTree.Element('bla'))) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.delete_group_snapshot, {}, cgsnap_group_obj, []) def test_delete_cgsnapshot_with_empty_consistency_group(self): """test delete_cgsnapshot with empty consistency group""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() group_obj = self._create_test_group() cgsnap_group_obj = self._create_test_cgsnapshot(group_obj.id) p.ibm_storage_cli.cmd.snap_group_delete.side_effect = ( errors.CgEmptyError('bla', 'bla', ElementTree.Element('bla'))) ex = getattr(p, "_get_exception")() self.assertRaises(ex, p.delete_group_snapshot, {}, cgsnap_group_obj, []) def test_silent_delete_volume(self): """test _silent_delete_volume fails silently without exception""" driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) p.ibm_storage_cli = mock.MagicMock() p.ibm_storage_cli.cmd.vol_delete.side_effect = errors.XCLIError( 'bla', 'bla', ElementTree.Element('bla')) # check no assertion occurs p._silent_delete_volume(TEST_VOLUME) @mock.patch("cinder.volume.volume_utils.group_get_by_id", mock.MagicMock()) @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type", mock.MagicMock(return_value=False)) def test_create_cloned_volume_calls_vol_create_and_copy(self): """test create_cloned_volume check if calls the appropriate xiv_backend functions are being called """ driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) vol_src = testutils.create_volume(self.ctxt, display_name='bla', size=17, volume_type_id=self.vt['id']) vol_trg = testutils.create_volume(self.ctxt, display_name='bla', size=17, volume_type_id=self.vt['id']) p.ibm_storage_cli = mock.MagicMock() p._cg_name_from_volume = mock.MagicMock(return_value="cg") p.create_cloned_volume(vol_trg, vol_src) p._create_volume = test_mock.MagicMock() p.ibm_storage_cli.cmd.vol_create.assert_called_once_with( pool='WTF32', size_blocks=storage.gigabytes_to_blocks(17), vol=vol_trg['name']) p.ibm_storage_cli.cmd.vol_copy.assert_called_once_with( vol_src=vol_src['name'], vol_trg=vol_trg['name']) @mock.patch("cinder.volume.volume_utils.group_get_by_id", mock.MagicMock()) @mock.patch("cinder.volume.volume_utils.is_group_a_cg_snapshot_type", mock.MagicMock(return_value=False)) def test_handle_created_vol_properties_returns_vol_update(self): """test handle_created_vol_props returns replication enables if replication info is True """ driver = mock.MagicMock() driver.VERSION = "VERSION" p = self.proxy( self.default_storage_info, mock.MagicMock(), test_mock.cinder.exception, driver) self.mock_object(xiv_replication, 'VolumeReplication') grp = testutils.create_group(self.ctxt, name='bla', group_type_id='1') volume = testutils.create_volume(self.ctxt, display_name='bla', volume_type_id=self.vt['id']) volume.group = grp ret_val = p.handle_created_vol_properties({'enabled': True}, volume) self.assertEqual(ret_val, {'replication_status': 'enabled'}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2831202 cinder-27.0.0/cinder/tests/unit/volume/drivers/infortrend/0000775000175000017500000000000000000000000023613 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/infortrend/__init__.py0000664000175000017500000000000000000000000025712 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_cli.py0000664000175000017500000024170400000000000030235 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder.tests.unit import test from cinder.volume.drivers.infortrend.raidcmd_cli import cli_factory as cli class InfortrendCLITestData(object): """CLI Test Data.""" # Infortrend entry fake_lv_id = ['5DE94FF775D81C30', '1234567890', 'HK3345678'] fake_partition_id = ['6A41315B0EDC8EB7', '51B4283E4E159173', '987654321', '123456789', '2667FE351FC505AE', '53F3E98141A2E871'] fake_pair_id = ['55D790F8350B036B', '095A184B0ED2DB10'] fake_snapshot_id = ['2C7A8D211F3B1E36', '60135EE53C14D5EB', '4884610D11FD3335', '5C44BE0A776A2804'] fake_snapshot_name = ['9e8b27e9-568c-44ca-bd7c-2c7af96ab248', '35e8ba6e-3372-4e67-8464-2b68758f3aeb', 'f69696ea-26fc-4f4c-97335-e3ce33ee563', 'cinder-unmanaged-f31d8326-c2d8-4668-'] fake_data_port_ip = ['172.27.0.1', '172.27.0.2', '172.27.0.3', '172.27.0.4', '172.27.0.5', '172.27.0.6'] fake_model = ['DS S12F-G2852-6'] fake_manage_port_ip = ['172.27.0.10'] fake_system_id = ['DEEC'] fake_host_ip = ['172.27.0.2'] fake_target_wwnns = ['100123D02300DEEC', '100123D02310DEEC'] fake_target_wwpns = ['110123D02300DEEC', '120123D02300DEEC', '110123D02310DEEC', '120123D02310DEEC'] fake_initiator_wwnns = ['2234567890123456', '2234567890543216'] fake_initiator_wwpns = ['1234567890123456', '1234567890543216'] fake_initiator_iqn = ['iqn.1991-05.com.infortrend:pc123', 'iqn.1991-05.com.infortrend:pc456'] fake_lun_map = [0, 1, 2] # cinder entry test_provider_location = [( 'system_id^%s@partition_id^%s') % ( int(fake_system_id[0], 16), fake_partition_id[0]), ( 'system_id^%s@partition_id^%s') % ( int(fake_system_id[0], 16), fake_partition_id[1]) ] test_volume = { 'id': '5aa119a8-d25b-45a7-8d1b-88e127885635', 'size': 1, 'name': 'Part-1', 'host': 'infortrend-server1@backend_1#LV-1', 'name_id': '5aa119a8-d25b-45a7-8d1b-88e127885635', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'Part-1', 'volume_type_id': None, 'status': 'available', 'provider_location': test_provider_location[0], 'volume_attachment': [], } test_volume_1 = { 'id': '5aa119a8-d25b-45a7-8d1b-88e127885634', 'size': 1, 'name': 'Part-1', 'host': 'infortrend-server1@backend_1#LV-1', 'name_id': '5aa119a8-d25b-45a7-8d1b-88e127885635', 'provider_auth': None, 'project_id': 'project', 'display_name': None, 'display_description': 'Part-1', 'volume_type_id': None, 'status': 'in-use', 'provider_location': test_provider_location[1], 'volume_attachment': [], } test_dst_volume = { 'id': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'size': 1, 'name': 'Part-1-Copy', 'host': 'infortrend-server1@backend_1#LV-1', 'name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'provider_auth': None, 'project_id': 'project', 'display_name': None, '_name_id': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'display_description': 'Part-1-Copy', 'volume_type_id': None, 'provider_location': '', 'volume_attachment': [], } test_ref_volume = { 'source-id': fake_partition_id[0], 'size': 1, } test_ref_volume_with_id = { 'source-id': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'size': 1, } test_ref_volume_with_name = { 'source-name': 'import_into_openstack', 'size': 1, } test_snapshot = { 'id': 'ffa9bc5e-1172-4021-acaf-cdcd78a9584d', 'volume_id': test_volume['id'], 'volume_name': test_volume['name'], 'volume_size': 2, 'project_id': 'project', 'display_name': None, 'display_description': 'SI-1', 'volume_type_id': None, 'provider_location': fake_snapshot_id[0], } test_snapshot_without_provider_location = { 'id': 'ffa9bc5e-1172-4021-acaf-cdcd78a9584d', 'volume_id': test_volume['id'], 'volume_name': test_volume['name'], 'volume_size': 2, 'project_id': 'project', 'display_name': None, 'display_description': 'SI-1', 'volume_type_id': None, } test_iqn = [( 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( int(fake_system_id[0], 16), 1, 0, 1), ( 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( int(fake_system_id[0], 16), 1, 0, 1), ( 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( int(fake_system_id[0], 16), 2, 0, 1), ] test_iscsi_properties = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': True, 'target_portal': '%s:3260' % fake_data_port_ip[2], 'target_iqn': test_iqn[0], 'target_lun': fake_lun_map[0], 'volume_id': test_volume['id'], }, } test_iscsi_properties_with_mcs = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': True, 'target_portal': '%s:3260' % fake_data_port_ip[4], 'target_iqn': test_iqn[2], 'target_lun': fake_lun_map[0], 'volume_id': test_volume['id'], }, } test_iscsi_properties_with_mcs_1 = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': True, 'target_portal': '%s:3260' % fake_data_port_ip[4], 'target_iqn': test_iqn[2], 'target_lun': fake_lun_map[1], 'volume_id': test_volume_1['id'], }, } test_iqn_empty_map = [( 'iqn.2002-10.com.infortrend:raid.uid%s.%s%s%s') % ( int(fake_system_id[0], 16), 0, 0, 1), ] test_iscsi_properties_empty_map = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': True, 'target_portal': '%s:3260' % fake_data_port_ip[0], 'target_iqn': test_iqn_empty_map[0], 'target_lun': fake_lun_map[0], 'volume_id': test_volume['id'], }, } test_initiator_target_map = { fake_initiator_wwpns[0]: fake_target_wwpns[0:2], fake_initiator_wwpns[1]: fake_target_wwpns[0:2], } test_fc_properties = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': fake_lun_map[0], 'target_wwn': fake_target_wwpns[0:2], 'initiator_target_map': test_initiator_target_map, }, } test_initiator_target_map_specific_channel = { fake_initiator_wwpns[0]: [fake_target_wwpns[1]], fake_initiator_wwpns[1]: [fake_target_wwpns[1]], } test_fc_properties_with_specific_channel = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': fake_lun_map[0], 'target_wwn': [fake_target_wwpns[1]], 'initiator_target_map': test_initiator_target_map_specific_channel, }, } test_target_wwpns_map_multipath_r_model = [ fake_target_wwpns[0], fake_target_wwpns[2], fake_target_wwpns[1], fake_target_wwpns[3], ] test_initiator_target_map_multipath_r_model = { fake_initiator_wwpns[0]: test_target_wwpns_map_multipath_r_model[:], fake_initiator_wwpns[1]: test_target_wwpns_map_multipath_r_model[:], } test_fc_properties_multipath_r_model = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': fake_lun_map[0], 'target_wwn': test_target_wwpns_map_multipath_r_model[:], 'initiator_target_map': test_initiator_target_map_multipath_r_model, }, } test_initiator_target_map_zoning = { fake_initiator_wwpns[0].lower(): [x.lower() for x in fake_target_wwpns[0:2]], fake_initiator_wwpns[1].lower(): [x.lower() for x in fake_target_wwpns[0:2]], } test_fc_properties_zoning = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': fake_lun_map[0], 'target_wwn': [x.lower() for x in fake_target_wwpns[0:2]], 'initiator_target_map': test_initiator_target_map_zoning, }, } test_initiator_target_map_zoning_r_model = { fake_initiator_wwpns[0].lower(): [x.lower() for x in fake_target_wwpns[1:3]], fake_initiator_wwpns[1].lower(): [x.lower() for x in fake_target_wwpns[1:3]], } test_fc_properties_zoning_r_model = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': fake_lun_map[0], 'target_wwn': [x.lower() for x in fake_target_wwpns[1:3]], 'initiator_target_map': test_initiator_target_map_zoning_r_model, }, } test_fc_terminate_conn_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'initiator_target_map': test_initiator_target_map_zoning, }, } test_connector_iscsi = { 'ip': fake_host_ip[0], 'initiator': fake_initiator_iqn[0], 'host': 'infortrend-server1@backend_1', } test_connector_iscsi_1 = { 'ip': fake_host_ip[0], 'initiator': fake_initiator_iqn[1], 'host': 'infortrend-server1@backend_1', } test_connector_fc = { 'wwpns': fake_initiator_wwpns, 'wwnns': fake_initiator_wwnns, 'host': 'infortrend-server1@backend_1', } fake_pool = { 'pool_name': 'LV-2', 'pool_id': fake_lv_id[1], 'total_capacity_gb': 1000, 'free_capacity_gb': 1000, 'reserved_percentage': 0, 'QoS_support': False, 'thin_provisioning_support': False, } test_pools_full = [{ 'pool_name': 'LV-1', 'pool_id': fake_lv_id[0], 'location_info': 'Infortrend:' + fake_system_id[0], 'total_capacity_gb': round(857982.0 / 1024, 2), 'free_capacity_gb': round(841978.0 / 1024, 2), 'reserved_percentage': 0, 'QoS_support': False, 'thick_provisioning_support': True, 'thin_provisioning_support': False, }] test_volume_states_full = { 'volume_backend_name': 'infortrend_backend_1', 'vendor_name': 'Infortrend', 'driver_version': '99.99', 'storage_protocol': 'iSCSI', 'model_type': 'R', 'status': 'Connected', 'system_id': fake_system_id[0], 'pools': test_pools_full, } test_pools_thin = [{ 'pool_name': 'LV-1', 'pool_id': fake_lv_id[0], 'location_info': 'Infortrend:' + fake_system_id[0], 'total_capacity_gb': round(857982.0 / 1024, 2), 'free_capacity_gb': round(841978.0 / 1024, 2), 'reserved_percentage': 0, 'QoS_support': False, 'thick_provisioning_support': True, 'thin_provisioning_support': True, 'provisioned_capacity_gb': round((40000) / 1024, 2), 'max_over_subscription_ratio': 20.0, }] test_volume_states_thin = { 'volume_backend_name': 'infortrend_backend_1', 'vendor_name': 'Infortrend', 'driver_version': '99.99', 'storage_protocol': 'iSCSI', 'model_type': 'R', 'status': 'Connected', 'system_id': fake_system_id[0], 'pools': test_pools_thin, } test_host = { 'host': 'infortrend-server1@backend_1', 'capabilities': test_volume_states_thin, } test_migrate_volume_states = { 'volume_backend_name': 'infortrend_backend_1', 'vendor_name': 'Infortrend', 'driver_version': '99.99', 'storage_protocol': 'iSCSI', 'pool_name': 'LV-1', 'pool_id': fake_lv_id[1], 'location_info': 'Infortrend:' + fake_system_id[0], 'total_capacity_gb': round(857982.0 / 1024, 2), 'free_capacity_gb': round(841978.0 / 1024, 2), 'reserved_percentage': 0, 'QoS_support': False, } test_migrate_host = { 'host': 'infortrend-server1@backend_1#LV-2', 'capabilities': test_migrate_volume_states, } test_migrate_volume_states_2 = { 'volume_backend_name': 'infortrend_backend_1', 'vendor_name': 'Infortrend', 'driver_version': '99.99', 'storage_protocol': 'iSCSI', 'pool_name': 'LV-1', 'pool_id': fake_lv_id[1], 'location_info': 'Infortrend:' + fake_system_id[0], 'total_capacity_gb': round(857982.0 / 1024, 2), 'free_capacity_gb': round(841978.0 / 1024, 2), 'reserved_percentage': 0, 'QoS_support': False, } test_migrate_host_2 = { 'host': 'infortrend-server1@backend_1#LV-1', 'capabilities': test_migrate_volume_states_2, } fake_host = { 'host': 'infortrend-server1@backend_1', 'capabilities': {}, } fake_volume_id = [test_volume['id'], test_dst_volume['id']] fake_lookup_map = { '12345678': { 'initiator_port_wwn_list': [x.lower() for x in fake_initiator_wwpns], 'target_port_wwn_list': [x.lower() for x in fake_target_wwpns[0:2]], }, } fake_lookup_map_r_model = { '12345678': { 'initiator_port_wwn_list': [x.lower() for x in fake_initiator_wwpns[:]], 'target_port_wwn_list': [x.lower() for x in fake_target_wwpns[1:3]], }, } test_new_type = { 'name': 'type0', 'qos_specs_id': None, 'deleted': False, 'extra_specs': {'infortrend:provisioning': 'thin'}, 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', } test_diff = {'extra_specs': {'infortrend:provisioning': ('full', 'thin')}} def get_fake_cli_failed(self): return """ ift cli command CLI: No selected device Return: 0x000c RAIDCmd:> """ def get_fake_cli_failed_with_network(self): return """ ift cli command CLI: Not exist: There is no such partition: 3345678 Return: 0x000b RAIDCmd:> """ def get_fake_cli_succeed(self): return """ ift cli command CLI: Successful: 0 mapping(s) shown Return: 0x0000 RAIDCmd:> """ def get_test_show_empty_list(self): return (0, []) def get_test_show_snapshot(self, partition_id=None, snapshot_id=None): if partition_id and snapshot_id: return (0, [{ 'Map': 'No', 'Partition-ID': partition_id, 'SI-ID': snapshot_id, 'Name': '---', 'Activated-time': 'Thu, Jan 09 01:33:11 2020', 'Index': '1', }]) else: return (0, [{ 'Map': 'No', 'Partition-ID': self.fake_partition_id[0], 'SI-ID': self.fake_snapshot_id[0], 'Name': '---', 'Activated-time': 'Thu, Jan 09 01:33:11 2020', 'Index': '1', }, { 'Map': 'No', 'Partition-ID': self.fake_partition_id[0], 'SI-ID': self.fake_snapshot_id[1], 'Name': '---', 'Activated-time': 'Thu, Jan 09 01:35:50 2020', 'Index': '2', }]) def get_test_show_snapshot_named(self): return (0, [{ 'Map': 'No', 'Partition-ID': self.fake_partition_id[0], 'SI-ID': self.fake_snapshot_id[0], 'Name': self.fake_snapshot_name[0], 'Activated-time': 'Thu, Jan 09 01:33:11 2020', 'Index': '1', }, { 'Map': 'No', 'Partition-ID': self.fake_partition_id[1], 'SI-ID': self.fake_snapshot_id[1], 'Name': self.fake_snapshot_name[1], 'Activated-time': 'Thu, Jan 09 01:35:50 2020', 'Index': '1', }]) def get_fake_show_snapshot(self): msg = r""" show si \/\/\/- \ / - \ / - \/-\/- Index SI-ID Name Partition-ID Map Activated-time --------------------------------------------------------------------------------- 1 %s --- %s No Thu, Jan 09 01:33:11 2020 2 %s --- %s No Thu, Jan 09 01:35:50 2020 CLI: Successful: 2 snapshot image(s) shown Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_snapshot_id[0], self.fake_partition_id[0], self.fake_snapshot_id[1], self.fake_partition_id[0]) def get_test_show_snapshot_detail_filled_block(self): return (0, [{ 'Mapped': 'Yes', 'Created-time': 'Wed, Jun 10 10:57:16 2015', 'ID': self.fake_snapshot_id[0], 'Last-modification-time': 'Wed, Jun 10 10:57:16 2015', 'Description': '---', 'Total-filled-block': '1', 'LV-ID': self.fake_lv_id[0], 'Activation-schedule-time': 'Not Actived', 'Mapping': 'CH:0/ID:0/LUN:1', 'Index': '1', 'Used': '0', 'Name': '---', 'Valid-filled-block': '0', 'Partition-ID': self.fake_partition_id[0], }]) def get_test_show_snapshot_detail(self): return (0, [{ 'Mapped': 'Yes', 'Created-time': 'Wed, Jun 10 10:57:16 2015', 'ID': self.fake_snapshot_id[0], 'Last-modification-time': 'Wed, Jun 10 10:57:16 2015', 'Description': '---', 'Total-filled-block': '0', 'LV-ID': self.fake_lv_id[0], 'Activation-schedule-time': 'Not Actived', 'Mapping': 'CH:0/ID:0/LUN:1', 'Index': '1', 'Used': '0', 'Name': '---', 'Valid-filled-block': '0', 'Partition-ID': self.fake_partition_id[0], }]) def get_test_show_snapshot_get_manage(self): """Show 4 si for api `list si`: 1.Mapped 2.Managed 3.Free 4.WrongLV""" return (0, [{ 'ID': self.fake_snapshot_id[0], 'Index': '1', 'Name': self.fake_snapshot_name[0], 'Partition-ID': self.fake_partition_id[0], 'LV-ID': self.fake_lv_id[0], 'Created-time': 'Fri, Dec 23 07:54:33 2016', 'Last-modification-time': 'Fri, Dec 23 07:54:33 2016', 'Activated-time': 'Fri, Dec 23 08:29:41 2016', 'Activation-schedule-time': 'Not Actived', 'Used': '0', 'Valid-filled-block': '0', 'Total-filled-block': '0', 'Description': '---', 'Mapped': 'No', 'Mapping': '---', 'Backup-to-Cloud': 'false', 'Status': 'OK', 'Progress': '---', }, { 'ID': self.fake_snapshot_id[1], 'Index': '2', 'Name': self.fake_snapshot_name[1], 'Partition-ID': self.fake_partition_id[1], 'LV-ID': self.fake_lv_id[0], 'Created-time': 'Fri, Dec 23 07:54:33 2016', 'Last-modification-time': 'Fri, Dec 23 07:54:33 2016', 'Activated-time': 'Fri, Dec 23 08:29:41 2016', 'Activation-schedule-time': 'Not Actived', 'Used': '0', 'Valid-filled-block': '0', 'Total-filled-block': '0', 'Description': '---', 'Mapped': 'No', 'Mapping': '---', 'Backup-to-Cloud': 'false', 'Status': 'OK', 'Progress': '---' }, { 'ID': self.fake_snapshot_id[2], 'Index': '1', 'Name': self.fake_snapshot_name[2], 'Partition-ID': self.fake_partition_id[2], 'LV-ID': self.fake_lv_id[1], 'Created-time': 'Fri, Dec 23 07:54:33 2016', 'Last-modification-time': 'Fri, Dec 23 07:54:33 2016', 'Activated-time': 'Fri, Dec 23 08:29:41 2016', 'Activation-schedule-time': 'Not Actived', 'Used': '0', 'Valid-filled-block': '0', 'Total-filled-block': '0', 'Description': '---', 'Mapped': 'No', 'Mapping': '---', 'Backup-to-Cloud': 'false', 'Status': 'OK', 'Progress': '---', }, { 'ID': self.fake_snapshot_id[3], 'Index': '1', 'Name': 'test-get-snapshot-list', # Part ID from get_test_show_partition_detail() 'Partition-ID': '123123123123', 'LV-ID': '987654321', 'Created-time': 'Fri, Dec 23 07:54:33 2016', 'Last-modification-time': 'Fri, Dec 23 07:54:33 2016', 'Activated-time': 'Fri, Dec 23 08:29:41 2016', 'Activation-schedule-time': 'Not Actived', 'Used': '0', 'Valid-filled-block': '0', 'Total-filled-block': '0', 'Description': '---', 'Mapped': 'No', 'Mapping': '---', 'Backup-to-Cloud': 'false', 'Status': 'OK', 'Progress': '---' }]) def get_fake_show_snapshot_detail(self): msg = """ show si -l ID: %s Index: 1 Name: --- Partition-ID: %s LV-ID: %s Created-time: Wed, Jun 10 10:57:16 2015 Last-modification-time: Wed, Jun 10 10:57:16 2015 Activation-schedule-time: Not Actived Used: 0 Valid-filled-block: 0 Total-filled-block: 0 Description: --- Mapped: Yes Mapping: CH:0/ID:0/LUN:1 CLI: Successful: 1 snapshot image(s) shown Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_snapshot_id[0], self.fake_partition_id[0], self.fake_lv_id[0]) def get_test_show_net(self): return (0, [{ 'Slot': 'slotA', 'MAC': '10D02380DEEC', 'ID': '1', 'IPv4': self.fake_data_port_ip[0], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': 'slotB', 'MAC': '10D02390DEEC', 'ID': '1', 'IPv4': self.fake_data_port_ip[1], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': 'slotA', 'MAC': '10D02340DEEC', 'ID': '2', 'IPv4': self.fake_data_port_ip[2], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': 'slotB', 'MAC': '10D02350DEEC', 'ID': '2', 'IPv4': self.fake_data_port_ip[3], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': 'slotA', 'MAC': '10D02310DEEC', 'ID': '4', 'IPv4': self.fake_data_port_ip[4], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': 'slotB', 'MAC': '10D02320DEEC', 'ID': '4', 'IPv4': self.fake_data_port_ip[5], 'Mode': 'Disabled', 'IPv6': '---', }, { 'Slot': '---', 'MAC': '10D023077124', 'ID': '32', 'IPv4': '172.27.1.1', 'Mode': 'Disabled', 'IPv6': '---', }]) def get_fake_show_net(self): msg = """ show net ID MAC Mode IPv4 Mode IPv6 Slot --------------------------------------------------------------- 1 10D02380DEEC DHCP %s Disabled --- slotA 1 10D02390DEEC DHCP %s Disabled --- slotB 2 10D02340DEEC DHCP %s Disabled --- slotA 2 10D02350DEEC DHCP %s Disabled --- slotB 4 10D02310DEEC DHCP %s Disabled --- slotA 4 10D02320DEEC DHCP %s Disabled --- slotB 32 10D023077124 DHCP 172.27.1.1 Disabled --- --- CLI: Successful: 2 record(s) found Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_data_port_ip[0], self.fake_data_port_ip[1], self.fake_data_port_ip[2], self.fake_data_port_ip[3], self.fake_data_port_ip[4], self.fake_data_port_ip[5]) def get_test_show_net_detail(self): return (0, [{ 'Slot': 'slotA', 'IPv4-mode': 'DHCP', 'ID': '1', 'IPv6-address': '---', 'Net-mask': '---', 'IPv4-address': '---', 'Route': '---', 'Gateway': '---', 'IPv6-mode': 'Disabled', 'MAC': '00D023877124', 'Prefix-length': '---', }, { 'Slot': '---', 'IPv4-mode': 'DHCP', 'ID': '32', 'IPv6-address': '---', 'Net-mask': '255.255.240.0', 'IPv4-address': '172.27.112.245', 'Route': '---', 'Gateway': '172.27.127.254', 'IPv6-mode': 'Disabled', 'MAC': '00D023077124', 'Prefix-length': '---', }]) def get_fake_show_net_detail(self): msg = """ show net -l ID: 1 MAC: 00D023877124 IPv4-mode: DHCP IPv4-address: --- Net-mask: --- Gateway: --- IPv6-mode: Disabled IPv6-address: --- Prefix-length: --- Route: --- Slot: slotA ID: 32 MAC: 00D023077124 IPv4-mode: DHCP IPv4-address: 172.27.112.245 Net-mask: 255.255.240.0 Gateway: 172.27.127.254 IPv6-mode: Disabled IPv6-address: --- Prefix-length: --- Route: --- Slot: --- CLI: Successful: 3 record(s) found Return: 0x0000 RAIDCmd:> """ return msg def get_test_show_partition(self, volume_id=None, pool_id=None): result = [{ 'ID': self.fake_partition_id[0], 'Used': '20000', 'Name': self.fake_volume_id[0], 'Size': '20000', 'Min-reserve': '20000', 'LV-ID': self.fake_lv_id[0], }, { 'ID': self.fake_partition_id[1], 'Used': '20000', 'Name': self.fake_volume_id[1], 'Size': '20000', 'Min-reserve': '20000', 'LV-ID': self.fake_lv_id[0], }] if volume_id and pool_id: result.append({ 'ID': self.fake_partition_id[2], 'Used': '20000', 'Name': volume_id, 'Size': '20000', 'Min-reserve': '20000', 'LV-ID': pool_id, }) return (0, result) def get_fake_show_partition(self): msg = """ show part ID Name LV-ID Size Used Min-reserve --------------------------------------------------- %s %s %s 20000 20000 20000 %s %s %s 20000 20000 20000 CLI: Successful: 3 partition(s) shown Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_partition_id[0], self.fake_volume_id[0], self.fake_lv_id[0], self.fake_partition_id[1], self.fake_volume_id[1], self.fake_lv_id[0]) def get_test_show_partition_detail_for_map( self, partition_id, mapped='true'): result = [{ 'LV-ID': self.fake_lv_id[0], 'Mapping': 'CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1', 'Used': '20000', 'Size': '20000', 'ID': partition_id, 'Progress': '---', 'Min-reserve': '20000', 'Last-modification-time': 'Wed, Jan 08 20:23:23 2020', 'Valid-filled-block': '100', 'Name': self.fake_volume_id[0], 'Mapped': mapped, 'Total-filled-block': '100', 'Creation-time': 'Wed, Jan 08 20:23:23 2020', }] return (0, result) def get_test_show_partition_detail(self, volume_id=None, pool_id=None): result = [{ 'LV-ID': self.fake_lv_id[0], 'Mapping': 'CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1, CH:4/ID:0/LUN:0', 'Used': '20000', 'Size': '20000', 'ID': self.fake_partition_id[0], 'Progress': '---', 'Min-reserve': '20000', 'Last-modification-time': 'Wed, Jan 08 20:23:23 2020', 'Valid-filled-block': '100', 'Name': self.fake_volume_id[0], 'Mapped': 'true', 'Total-filled-block': '100', 'Creation-time': 'Wed, Jan 08 20:23:23 2020', }, { 'LV-ID': self.fake_lv_id[0], 'Mapping': '---', 'Used': '20000', 'Size': '20000', 'ID': self.fake_partition_id[1], 'Progress': '---', 'Min-reserve': '20000', 'Last-modification-time': 'Sat, Jan 11 22:18:40 2020', 'Valid-filled-block': '100', 'Name': self.fake_volume_id[1], 'Mapped': 'false', 'Total-filled-block': '100', 'Creation-time': 'Sat, Jan 11 22:18:40 2020', }] if volume_id and pool_id: result.extend([{ 'LV-ID': pool_id, 'Mapping': '---', 'Used': '20000', 'Size': '20000', 'ID': self.fake_partition_id[2], 'Progress': '---', 'Min-reserve': '20000', 'Last-modification-time': 'Sat, Jan 15 22:18:40 2020', 'Valid-filled-block': '100', 'Name': volume_id, 'Mapped': 'false', 'Total-filled-block': '100', 'Creation-time': 'Sat, Jan 15 22:18:40 2020', }, { 'LV-ID': '987654321', 'Mapping': '---', 'Used': '20000', 'Size': '20000', 'ID': '123123123123', 'Progress': '---', 'Min-reserve': '20000', 'Last-modification-time': 'Sat, Jan 12 22:18:40 2020', 'Valid-filled-block': '100', 'Name': volume_id, 'Mapped': 'false', 'Total-filled-block': '100', 'Creation-time': 'Sat, Jan 15 22:18:40 2020', }, { 'LV-ID': self.fake_lv_id[0], 'Mapping': '---', 'Used': '20000', 'Size': '20000', 'ID': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'Progress': '---', 'Min-reserve': '20000', 'Last-modification-time': 'Sat, Jan 16 22:18:40 2020', 'Valid-filled-block': '100', 'Name': volume_id, 'Mapped': 'false', 'Total-filled-block': '100', 'Creation-time': 'Sat, Jan 14 22:18:40 2020', }]) return (0, result) def get_fake_show_partition_detail(self): msg = """ show part -l ID: %s Name: %s LV-ID: %s Size: 20000 Used: 20000 Min-reserve: 20000 Creation-time: Wed, Jan 08 20:23:23 2020 Last-modification-time: Wed, Jan 08 20:23:23 2020 Valid-filled-block: 100 Total-filled-block: 100 Progress: --- Mapped: true Mapping: CH:1/ID:0/LUN:0, CH:1/ID:0/LUN:1, CH:4/ID:0/LUN:0 ID: %s Name: %s LV-ID: %s Size: 20000 Used: 20000 Min-reserve: 20000 Creation-time: Sat, Jan 11 22:18:40 2020 Last-modification-time: Sat, Jan 11 22:18:40 2020 Valid-filled-block: 100 Total-filled-block: 100 Progress: --- Mapped: false Mapping: --- CLI: Successful: 3 partition(s) shown Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_partition_id[0], self.fake_volume_id[0], self.fake_lv_id[0], self.fake_partition_id[1], self.fake_volume_id[1], self.fake_lv_id[0]) def get_test_show_replica_detail_for_migrate( self, src_part_id, dst_part_id, volume_id, status='Completed'): result = [{ 'Pair-ID': self.fake_pair_id[0], 'Name': 'Cinder-Snapshot', 'Source-Device': 'DEEC', 'Source': src_part_id, 'Source-Type': 'LV-Partition', 'Source-Name': volume_id, 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'Yes', 'Target-Device': 'DEEC', 'Target': dst_part_id, 'Target-Type': 'LV-Partition', 'Target-Name': volume_id, 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '033EA1FA4EA193EB', 'Target-Mapped': 'No', 'Type': 'Copy', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': status, 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }] return (0, result) def get_test_show_replica_detail_for_si_sync_pair(self): result = [{ 'Pair-ID': self.fake_pair_id[0], 'Name': 'Cinder-Snapshot', 'Source-Device': 'DEEC', 'Source': self.fake_snapshot_id[0], 'Source-Type': 'LV-Partition', 'Source-Name': '', 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'Yes', 'Target-Device': 'DEEC', 'Target': self.fake_partition_id[1], 'Target-Type': 'LV-Partition', 'Target-Name': '', 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '033EA1FA4EA193EB', 'Target-Mapped': 'No', 'Type': 'Copy', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': 'Copy', 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }] return (0, result) def get_test_show_replica_detail_for_sync_pair(self): result = [{ 'Pair-ID': self.fake_pair_id[0], 'Name': 'Cinder-Snapshot', 'Source-Device': 'DEEC', 'Source': self.fake_partition_id[0], 'Source-Type': 'LV-Partition', 'Source-Name': self.fake_volume_id[0], 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'Yes', 'Target-Device': 'DEEC', 'Target': self.fake_partition_id[1], 'Target-Type': 'LV-Partition', 'Target-Name': self.fake_volume_id[1], 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '033EA1FA4EA193EB', 'Target-Mapped': 'No', 'Type': 'Copy', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': 'Copy', 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }] return (0, result) def get_test_show_replica_detail(self): result = [{ 'Pair-ID': '4BF246E26966F015', 'Name': 'Cinder-Snapshot', 'Source-Device': 'DEEC', 'Source': self.fake_partition_id[2], 'Source-Type': 'LV-Partition', 'Source-Name': 'Part-2', 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'No', 'Target-Device': 'DEEC', 'Target': self.fake_partition_id[3], 'Target-Type': 'LV-Partition', 'Target-Name': 'Part-1-Copy', 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '714B80F0335F6E52', 'Target-Mapped': 'No', 'Type': 'Copy', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': 'Completed', 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }, { 'Pair-ID': self.fake_pair_id[0], 'Name': 'Cinder-Migrate', 'Source-Device': 'DEEC', 'Source': self.fake_partition_id[0], 'Source-Type': 'LV-Partition', 'Source-Name': self.fake_volume_id[0], 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'Yes', 'Target-Device': 'DEEC', 'Target': self.fake_partition_id[1], 'Target-Type': 'LV-Partition', 'Target-Name': self.fake_volume_id[1], 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '033EA1FA4EA193EB', 'Target-Mapped': 'No', 'Type': 'Mirror', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': 'Mirror', 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }, { 'Pair-ID': self.fake_pair_id[1], 'Name': 'Cinder-Migrate', 'Source-Device': 'DEEC', 'Source': self.fake_partition_id[4], 'Source-Type': 'LV-Partition', 'Source-Name': self.fake_volume_id[0], 'Source-LV': '5DE94FF775D81C30', 'Source-VS': '2C482316298F7A4E', 'Source-Mapped': 'No', 'Target-Device': 'DEEC', 'Target': self.fake_partition_id[5], 'Target-Type': 'LV-Partition', 'Target-Name': self.fake_volume_id[1], 'Target-LV': '5DE94FF775D81C30', 'Target-VS': '714B80F0335F6E52', 'Target-Mapped': 'Yes', 'Type': 'Mirror', 'Priority': 'Normal', 'Timeout': '---', 'Incremental': '---', 'Compression': '---', 'Status': 'Mirror', 'Progress': '---', 'Created-time': '01/11/2020 22:20 PM', 'Sync-commence-time': '01/11/2020 22:20 PM', 'Split-time': '01/11/2020 22:20 PM', 'Completed-time': '01/11/2020 22:21 PM', 'Description': '---', }] return (0, result) def get_fake_show_replica_detail(self): msg = """ show replica -l Pair-ID: 4BF246E26966F015 Name: Cinder-Snapshot Source-Device: DEEC Source: %s Source-Type: LV-Partition Source-Name: Part-2 Source-LV: 5DE94FF775D81C30 Source-VS: 2C482316298F7A4E Source-Mapped: No Target-Device: DEEC Target: %s Target-Type: LV-Partition Target-Name: Part-1-Copy Target-LV: 5DE94FF775D81C30 Target-VS: 714B80F0335F6E52 Target-Mapped: No Type: Copy Priority: Normal Timeout: --- Incremental: --- Compression: --- Status: Completed Progress: --- Created-time: 01/11/2020 22:20 PM Sync-commence-time: 01/11/2020 22:20 PM Split-time: 01/11/2020 22:20 PM Completed-time: 01/11/2020 22:21 PM Description: --- Pair-ID: %s Name: Cinder-Migrate Source-Device: DEEC Source: %s Source-Type: LV-Partition Source-Name: %s Source-LV: 5DE94FF775D81C30 Source-VS: 2C482316298F7A4E Source-Mapped: Yes Target-Device: DEEC Target: %s Target-Type: LV-Partition Target-Name: %s Target-LV: 5DE94FF775D81C30 Target-VS: 033EA1FA4EA193EB Target-Mapped: No Type: Mirror Priority: Normal Timeout: --- Incremental: --- Compression: --- Status: Mirror Progress: --- Created-time: 01/11/2020 22:20 PM Sync-commence-time: 01/11/2020 22:20 PM Split-time: 01/11/2020 22:20 PM Completed-time: 01/11/2020 22:21 PM Description: --- Pair-ID: %s Name: Cinder-Migrate Source-Device: DEEC Source: %s Source-Type: LV-Partition Source-Name: %s Source-LV: 5DE94FF775D81C30 Source-VS: 2C482316298F7A4E Source-Mapped: No Target-Device: DEEC Target: %s Target-Type: LV-Partition Target-Name: %s Target-LV: 5DE94FF775D81C30 Target-VS: 714B80F0335F6E52 Target-Mapped: Yes Type: Mirror Priority: Normal Timeout: --- Incremental: --- Compression: --- Status: Mirror Progress: --- Created-time: 01/11/2020 22:20 PM Sync-commence-time: 01/11/2020 22:20 PM Split-time: 01/11/2020 22:20 PM Completed-time: 01/11/2020 22:21 PM Description: --- CLI: Successful: 3 replication job(s) shown Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_partition_id[2], self.fake_partition_id[3], self.fake_pair_id[0], self.fake_partition_id[0], self.fake_volume_id[0], self.fake_partition_id[1], self.fake_volume_id[1], self.fake_pair_id[1], self.fake_partition_id[4], self.fake_volume_id[0], self.fake_partition_id[5], self.fake_volume_id[1]) def get_test_show_lv(self): return (0, [{ 'Name': 'LV-1', 'LD-amount': '1', 'Available': '841978 MB', 'ID': self.fake_lv_id[0], 'Progress': '---', 'Size': '857982 MB', 'Status': 'On-line', }]) def get_fake_show_lv(self): msg = """ show lv ID Name LD-amount Size Available Progress Status -------------------------------------------------------------- %s LV-1 1 857982 MB 841978 MB --- On-line CLI: Successful: 1 Logical Volumes(s) shown Return: 0x0000 RAIDCmd:> """ return msg % self.fake_lv_id[0] def get_test_show_lv_detail(self): return (0, [{ 'Policy': 'Default', 'Status': 'On-line', 'ID': self.fake_lv_id[0], 'Available': '841978 MB', 'Expandable-size': '0 MB', 'Name': 'LV-1', 'Size': '857982 MB', 'LD-amount': '1', 'Progress': '---', }]) def get_fake_show_lv_detail(self): msg = """ show lv -l ID: %s Name: LV-1 LD-amount: 1 Size: 857982 MB Available: 841978 MB Expandable-size: 0 MB Policy: Default Progress: --- Status: On-line CLI: Successful: 1 Logical Volumes(s) shown Return: 0x0000 RAIDCmd:> """ return msg % self.fake_lv_id[0] def get_test_show_lv_tier_for_migration(self): return (0, [{ 'LV-Name': 'LV-1', 'LV-ID': self.fake_lv_id[1], 'Tier': '0', 'Size': '418.93 GB', 'Used': '10 GB(2.4%)', 'Data Service': '0 MB(0.0%)', 'Reserved Ratio': '10.0%', }, { 'LV-Name': 'LV-1', 'LV-ID': self.fake_lv_id[1], 'Tier': '3', 'Size': '931.02 GB', 'Used': '0 MB(0.0%)', 'Data Service': '0 MB(0.0%)', 'Reserved Ratio': '0.0%', }]) def get_test_show_lv_tier(self): return (0, [{ 'LV-Name': 'LV-1', 'LV-ID': self.fake_lv_id[0], 'Tier': '0', 'Size': '418.93 GB', 'Used': '10 GB(2.4%)', 'Data Service': '0 MB(0.0%)', 'Reserved Ratio': '10.0%', }, { 'LV-Name': 'LV-1', 'LV-ID': self.fake_lv_id[0], 'Tier': '3', 'Size': '931.02 GB', 'Used': '0 MB(0.0%)', 'Data Service': '0 MB(0.0%)', 'Reserved Ratio': '0.0%', }]) def get_fake_show_lv_tier(self): msg = """ show lv tier LV-Name LV-ID Tier Size Used Data Service Reserved Ratio ------------------------------------------------------------------------------ LV-1 %s 0 418.93 GB 10 GB(2.4%%) 0 MB(0.0%%) 10.0%% LV-1 %s 3 931.02 GB 0 MB(0.0%%) 0 MB(0.0%%) 0.0%% CLI: Successful: 2 lv tiering(s) shown Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_lv_id[0], self.fake_lv_id[0]) def get_test_show_device(self): return (0, [{ 'ID': self.fake_system_id[0], 'Connected-IP': self.fake_manage_port_ip[0], 'Name': '---', 'Index': '0*', 'JBOD-ID': 'N/A', 'Capacity': '1.22 TB', 'Model': self.fake_model[0], 'Service-ID': '8445676', }]) def get_fake_show_device(self): msg = """ show device Index ID Model Name Connected-IP JBOD-ID Capacity Service-ID ------------------------------------------------------------------------ 0* %s %s --- %s N/A 1.22 TB 8445676 CLI: Successful: 1 device(s) found Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_system_id[0], self.fake_model[0], self.fake_manage_port_ip[0]) def get_test_show_channel_single(self): return (0, [{ 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '0', 'MCS': 'N/A', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '1', 'MCS': '0', 'curClock': '---', }]) def get_test_show_channel_with_mcs(self): return (0, [{ 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '0', 'MCS': 'N/A', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '1', 'MCS': '1', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '2', 'MCS': '1', 'curClock': '---', }, { 'ID': '---', 'defClock': '6.0 Gbps', 'Type': 'SAS', 'Mode': 'Drive', 'Width': 'SAS', 'Ch': '3', 'MCS': 'N/A', 'curClock': '6.0 Gbps', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '4', 'MCS': '2', 'curClock': '---', }, { 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '5', 'MCS': 'N/A', 'curClock': '---', }]) def get_test_show_channel_without_mcs(self): return (0, [{ 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '0', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '1', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '2', 'curClock': '---', }, { 'ID': '---', 'defClock': '6.0 Gbps', 'Type': 'SAS', 'Mode': 'Drive', 'Width': 'SAS', 'Ch': '3', 'curClock': '6.0 Gbps', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '4', 'curClock': '---', }, { 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '5', 'curClock': '---', }]) def get_test_show_channel_with_diff_target_id(self): return (0, [{ 'ID': '32', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '0', 'MCS': 'N/A', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '1', 'MCS': '0', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '2', 'MCS': '1', 'curClock': '---', }, { 'ID': '---', 'defClock': '6.0 Gbps', 'Type': 'SAS', 'Mode': 'Drive', 'Width': 'SAS', 'Ch': '3', 'MCS': 'N/A', 'curClock': '6.0 Gbps', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '4', 'MCS': '2', 'curClock': '---', }, { 'ID': '48', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '5', 'MCS': 'N/A', 'curClock': '---', }]) def get_test_show_channel(self): return (0, [{ 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '0', 'MCS': 'N/A', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '1', 'MCS': '0', 'curClock': '---', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '2', 'MCS': '1', 'curClock': '---', }, { 'ID': '---', 'defClock': '6.0 Gbps', 'Type': 'SAS', 'Mode': 'Drive', 'Width': 'SAS', 'Ch': '3', 'MCS': 'N/A', 'curClock': '6.0 Gbps', }, { 'ID': '0', 'defClock': 'Auto', 'Type': 'NETWORK', 'Mode': 'Host', 'Width': 'iSCSI', 'Ch': '4', 'MCS': '2', 'curClock': '---', }, { 'ID': '112', 'defClock': 'Auto', 'Type': 'FIBRE', 'Mode': 'Host', 'Width': '---', 'Ch': '5', 'MCS': 'N/A', 'curClock': '---', }]) def get_fake_show_channel(self): msg = """ show ch Ch Mode Type defClock curClock Width ID MCS --------------------------------------------------------- 0 Host FIBRE Auto --- --- 112 N/A 1 Host NETWORK Auto --- iSCSI 0 0 2 Host NETWORK Auto --- iSCSI 0 1 3 Drive SAS 6.0 Gbps 6.0 Gbps SAS --- N/A 4 Host NETWORK Auto --- iSCSI 0 2 5 Host FIBRE Auto --- --- 112 N/A CLI: Successful: : 6 channel(s) shown Return: 0x0000 RAIDCmd:> """ return msg def get_test_show_channel_r_model_diff_target_id(self): return (0, [{ 'Mode': 'Host', 'AID': '32', 'defClock': 'Auto', 'MCS': 'N/A', 'Ch': '0', 'BID': '33', 'curClock': '---', 'Width': '---', 'Type': 'FIBRE', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '0', 'Ch': '1', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '1', 'Ch': '2', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Drive', 'AID': '---', 'defClock': '6.0 Gbps', 'MCS': 'N/A', 'Ch': '3', 'BID': '---', 'curClock': '6.0 Gbps', 'Width': 'SAS', 'Type': 'SAS', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '2', 'Ch': '4', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Host', 'AID': '48', 'defClock': 'Auto', 'MCS': 'N/A', 'Ch': '5', 'BID': '49', 'curClock': '---', 'Width': '---', 'Type': 'FIBRE', }]) def get_test_show_channel_r_model(self): return (0, [{ 'Mode': 'Host', 'AID': '112', 'defClock': 'Auto', 'MCS': 'N/A', 'Ch': '0', 'BID': '113', 'curClock': '---', 'Width': '---', 'Type': 'FIBRE', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '0', 'Ch': '1', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '1', 'Ch': '2', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Drive', 'AID': '---', 'defClock': '6.0 Gbps', 'MCS': 'N/A', 'Ch': '3', 'BID': '---', 'curClock': '6.0 Gbps', 'Width': 'SAS', 'Type': 'SAS', }, { 'Mode': 'Host', 'AID': '0', 'defClock': 'Auto', 'MCS': '2', 'Ch': '4', 'BID': '1', 'curClock': '---', 'Width': 'iSCSI', 'Type': 'NETWORK', }, { 'Mode': 'Host', 'AID': '112', 'defClock': 'Auto', 'MCS': 'N/A', 'Ch': '5', 'BID': '113', 'curClock': '---', 'Width': '---', 'Type': 'FIBRE', }]) def get_fake_show_channel_r_model(self): msg = """ show ch Ch Mode Type defClock curClock Width AID BID MCS ---------------------------------------------------------------- 0 Host FIBRE Auto --- --- 112 113 N/A 1 Host NETWORK Auto --- iSCSI 0 1 0 2 Host NETWORK Auto --- iSCSI 0 1 1 3 Drive SAS 6.0 Gbps 6.0 Gbps SAS --- --- N/A 4 Host NETWORK Auto --- iSCSI 0 1 2 5 Host FIBRE Auto --- --- 112 113 N/A CLI: Successful: : 9 channel(s) shown Return: 0x0000 RAIDCmd:> """ return msg def get_show_map_with_lun_map_on_zoning(self): return (0, [{ 'Ch': '0', 'LUN': '0', 'Media': 'PART', 'Host-ID': self.fake_initiator_wwpns[0], 'Target': '112', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }]) def get_test_show_map(self, partition_id=None, channel_id=None): if partition_id and channel_id: return (0, [{ 'Ch': channel_id, 'LUN': '0', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': partition_id, }, { 'Ch': channel_id, 'LUN': '1', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': partition_id, }]) else: return (0, [{ 'Ch': '1', 'LUN': '0', 'Media': 'PART', 'Host-ID': self.fake_initiator_iqn[0], 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '1', 'LUN': '1', 'Media': 'PART', 'Host-ID': self.fake_initiator_iqn[0], 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '4', 'LUN': '0', 'Media': 'PART', 'Host-ID': self.fake_initiator_iqn[0], 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }]) def get_test_show_map_fc(self): return (0, [{ 'Ch': '0', 'LUN': '0', 'Media': 'PART', 'Host-ID': self.fake_initiator_wwpns[0], 'Target': '112', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '0', 'LUN': '0', 'Media': 'PART', 'Host-ID': self.fake_initiator_wwpns[1], 'Target': '112', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '5', 'LUN': '0', 'Media': 'PART', 'Host-ID': self.fake_initiator_wwpns[0], 'Target': '112', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '5', 'LUN': '0', 'Media': 'PART', 'Host-ID': self.fake_initiator_wwpns[1], 'Target': '112', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }]) def get_test_show_map_multimap(self): return (0, [{ 'Ch': '1', 'LUN': '0', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '1', 'LUN': '1', 'Media': 'PART', 'Host-ID': '---', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '4', 'LUN': '0', 'Media': 'PART', 'Host-ID': '210000E08B0AADE1', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }, { 'Ch': '4', 'LUN': '0', 'Media': 'PART', 'Host-ID': '210000E08B0AADE2', 'Target': '0', 'Name': 'Part-1', 'ID': self.fake_partition_id[0], }]) def get_fake_show_map(self): msg = """ show map Ch Target LUN Media Name ID Host-ID ----------------------------------------------------------- 1 0 0 PART Part-1 %s %s 1 0 1 PART Part-1 %s %s 4 0 0 PART Part-1 %s %s CLI: Successful: 3 mapping(s) shown Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_partition_id[0], self.fake_initiator_iqn[0], self.fake_partition_id[0], self.fake_initiator_iqn[0], self.fake_partition_id[0], self.fake_initiator_iqn[0]) def get_test_show_license_full(self): return (0, { 'Local Volume Copy': { 'Support': False, 'Amount': '8/256', }, 'Synchronous Remote Mirror': { 'Support': False, 'Amount': '8/256', }, 'Snapshot': { 'Support': False, 'Amount': '1024/16384', }, 'Self-Encryption Drives': { 'Support': False, 'Amount': '---', }, 'Compression': { 'Support': False, 'Amount': '---', }, 'Local volume Mirror': { 'Support': False, 'Amount': '8/256', }, 'Storage Tiering': { 'Support': False, 'Amount': '---', }, 'Asynchronous Remote Mirror': { 'Support': False, 'Amount': '8/256', }, 'Scale-out': { 'Support': False, 'Amount': 'Not Support', }, 'Thin Provisioning': { 'Support': False, 'Amount': '---', }, 'Max JBOD': { 'Support': False, 'Amount': '15', }, 'EonPath': { 'Support': False, 'Amount': '---', } }) def get_test_show_license_thin(self): return (0, { 'Local Volume Copy': { 'Support': False, 'Amount': '8/256', }, 'Synchronous Remote Mirror': { 'Support': False, 'Amount': '8/256', }, 'Snapshot': { 'Support': False, 'Amount': '1024/16384', }, 'Self-Encryption Drives': { 'Support': False, 'Amount': '---', }, 'Compression': { 'Support': False, 'Amount': '---', }, 'Local volume Mirror': { 'Support': False, 'Amount': '8/256', }, 'Storage Tiering': { 'Support': False, 'Amount': '---', }, 'Asynchronous Remote Mirror': { 'Support': False, 'Amount': '8/256', }, 'Scale-out': { 'Support': False, 'Amount': 'Not Support', }, 'Thin Provisioning': { 'Support': True, 'Amount': '---', }, 'Max JBOD': { 'Support': False, 'Amount': '15', }, 'EonPath': { 'Support': False, 'Amount': '---', } }) def get_fake_show_license(self): msg = """ show license License Amount(Partition/Subsystem) Expired ------------------------------------------------------------------ EonPath --- Expired Scale-out Not Support --- Snapshot 1024/16384 Expired Local Volume Copy 8/256 Expired Local volume Mirror 8/256 Expired Synchronous Remote Mirror 8/256 Expired Asynchronous Remote Mirror 8/256 Expired Compression --- Expired Thin Provisioning --- Expired Storage Tiering --- Expired Max JBOD 15 Expired Self-Encryption Drives --- Expired CLI: Successful Return: 0x0000 RAIDCmd:> """ return msg def get_test_show_wwn_with_g_model(self): return (0, [{ 'ID': 'ID:112', 'WWPN': self.fake_target_wwpns[0], 'CH': '0', 'WWNN': self.fake_target_wwnns[0], }, { 'ID': 'ID:112', 'WWPN': self.fake_target_wwpns[1], 'CH': '5', 'WWNN': self.fake_target_wwnns[0], }]) def get_test_show_wwn_with_diff_target_id(self): return (0, [{ 'ID': 'AID:32', 'WWPN': self.fake_target_wwpns[0], 'CH': '0', 'WWNN': self.fake_target_wwnns[0], }, { 'ID': 'BID:33', 'WWPN': self.fake_target_wwpns[2], 'CH': '0', 'WWNN': self.fake_target_wwnns[1], }, { 'ID': 'AID:48', 'WWPN': self.fake_target_wwpns[1], 'CH': '5', 'WWNN': self.fake_target_wwnns[0], }, { 'ID': 'BID:49', 'WWPN': self.fake_target_wwpns[3], 'CH': '5', 'WWNN': self.fake_target_wwnns[1], }]) def get_test_show_wwn(self): return (0, [{ 'ID': 'AID:112', 'WWPN': self.fake_target_wwpns[0], 'CH': '0', 'WWNN': self.fake_target_wwnns[0], }, { 'ID': 'BID:113', 'WWPN': self.fake_target_wwpns[2], 'CH': '0', 'WWNN': self.fake_target_wwnns[1], }, { 'ID': 'AID:112', 'WWPN': self.fake_target_wwpns[1], 'CH': '5', 'WWNN': self.fake_target_wwnns[0], }, { 'ID': 'BID:113', 'WWPN': self.fake_target_wwpns[3], 'CH': '5', 'WWNN': self.fake_target_wwnns[1], }]) def get_fake_show_wwn(self): msg = """ show wwn WWN entries in controller for host channels: CH ID WWNN WWPN ------------------------------------------------- 0 AID:112 %s %s 0 BID:113 %s %s 5 AID:112 %s %s 5 BID:113 %s %s CLI: Successful Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_target_wwnns[0], self.fake_target_wwpns[0], self.fake_target_wwnns[1], self.fake_target_wwpns[2], self.fake_target_wwnns[0], self.fake_target_wwpns[1], self.fake_target_wwnns[1], self.fake_target_wwpns[3]) def get_test_show_iqn(self): return (0, [{ 'Name': self.fake_initiator_iqn[0][-16:], 'IQN': self.fake_initiator_iqn[0], 'User': '---', 'Password': '---', 'Target': '---', 'Target-Password': '---', 'IP': '0.0.0.0', 'Mask': '0.0.0.0', }]) def get_fake_show_iqn(self): msg = """ show iqn Detected host IQN: IQN ---------------------------------------- %s List of initiator IQN(s): -------------------------- Name: %s IQN: %s User: --- Password: --- Target: --- Target-Password: --- IP: 0.0.0.0 Mask: 0.0.0.0 CLI: Successful: 1 initiator iqn(s) shown Return: 0x0000 RAIDCmd:> """ return msg % (self.fake_initiator_iqn[0], self.fake_initiator_iqn[0][-16:], self.fake_initiator_iqn[0]) def get_test_show_host(self): return (0, [{ 'Fibre connection option': 'Point to point only', 'Max queued count': '1024', 'Max LUN per ID': '64', 'CHAP': 'Disabled', 'Jumbo frame': 'Disabled', 'Max concurrent LUN connection': '4', 'LUN connection reserved tags': '4', 'Peripheral device type': 'No Device Present (Type=0x7f)', 'Peripheral device qualifier': 'Connected', 'Removable media support': 'Disabled', 'LUN applicability': 'First Undefined LUN', 'Supported CHS Cylinder': 'Variable', 'Supported CHS Head': 'Variable', 'Supported CHS Sector': 'Variable', }]) def get_fake_show_host(self): msg = """ show host Fibre connection option: Point to point only Max queued count: 1024 Max LUN per ID: 64 CHAP: Disabled Jumbo frame: Disabled Max concurrent LUN connection: 4 LUN connection reserved tags: 4 Peripheral device type: No Device Present (Type=0x7f) Peripheral device qualifier: Connected Removable media support: Disabled LUN applicability: First Undefined LUN Supported CHS Cylinder: Variable Supported CHS Head: Variable Supported CHS Sector: Variable CLI: Successful Return: 0x0000 RAIDCmd:> """ return msg def get_fake_discovery(self, target_iqns, target_portals): template = '%s,1 %s' if len(target_iqns) == 1: result = template % (target_portals[0], target_iqns[0]) return (0, result) result = [] for i in range(len(target_iqns)): result.append(template % ( target_portals[i], target_iqns[i])) return (0, '\n'.join(result)) class Fake_cinder_object(object): id = None def __init__(self, test_volume): self.id = test_volume class Fake_cinder_snapshot(Fake_cinder_object): provider_location = None def __init__(self, id, provider_location): self.id = id self.provider_location = provider_location fake_cinder_volumes = [Fake_cinder_object(test_dst_volume['id'])] fake_cinder_snapshots = [Fake_cinder_object(fake_snapshot_name[1])] class InfortrendCLITestCase(test.TestCase): CommandList = ['CreateLD', 'CreateLV', 'CreatePartition', 'DeletePartition', 'CreateMap', 'DeleteMap', 'CreateSnapshot', 'DeleteSnapshot', 'CreateReplica', 'DeleteReplica', 'CreateIQN', 'DeleteIQN', 'ShowLD', 'ShowLV', 'ShowPartition', 'ShowSnapshot', 'ShowDevice', 'ShowChannel', 'ShowDisk', 'ShowMap', 'ShowNet', 'ShowLicense', 'ShowWWN', 'ShowReplica', 'ShowIQN', 'ShowHost', 'ConnectRaid', 'SetPartition', 'SetLV'] def __init__(self, *args, **kwargs): super(InfortrendCLITestCase, self).__init__(*args, **kwargs) self.cli_data = InfortrendCLITestData() def _cli_set(self, cli, fake_result): cli_conf = { 'path': '', 'password': '', 'ip': '', 'cli_retry_time': 1, 'raidcmd_timeout': 60, 'cli_cache': False, 'pid': 12345, 'fd': 10, } cli = cli(cli_conf) cli._execute = mock.Mock(return_value=fake_result) return cli def _cli_multi_set(self, cli, fake_result_list): cli_conf = { 'path': '', 'password': '', 'ip': '', 'cli_retry_time': 5, 'raidcmd_timeout': 60, 'cli_cache': False, 'pid': 12345, 'fd': 10, } cli = cli(cli_conf) cli._execute = mock.Mock(side_effect=fake_result_list) return cli def _test_command_succeed(self, command): fake_cli_succeed = self.cli_data.get_fake_cli_succeed() test_command = self._cli_set(command, fake_cli_succeed) rc, out = test_command.execute() self.assertEqual(0, rc) def _test_command_failed(self, command): fake_cli_failed = self.cli_data.get_fake_cli_failed() test_command = self._cli_set(command, fake_cli_failed) rc, out = test_command.execute() self.assertEqual(int('0x000c', 16), rc) def _test_command_failed_retry_succeed(self, log_error, command): log_error.reset_mock() LOG_ERROR_STR = ( 'Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s' ) fake_result_list = [ self.cli_data.get_fake_cli_failed(), self.cli_data.get_fake_cli_failed_with_network(), self.cli_data.get_fake_cli_succeed(), ] test_command = self._cli_multi_set(command, fake_result_list) rc, out = test_command.execute() self.assertEqual(11, rc) expect_log_error = [ mock.call(LOG_ERROR_STR, { 'retry': 1, 'method': test_command.__class__.__name__, 'rc': int('0x000c', 16), 'reason': 'No selected device', }), mock.call(LOG_ERROR_STR, { 'retry': 2, 'method': test_command.__class__.__name__, 'rc': int('0x000b', 16), 'reason': 'Not exist: There is no such partition: 3345678', }) ] log_error.assert_has_calls(expect_log_error) def _test_command_failed_retry_timeout(self, log_error, command): log_error.reset_mock() LOG_ERROR_STR = ( 'Retry %(retry)s times: %(method)s Failed %(rc)s: %(reason)s' ) fake_result_list = [ self.cli_data.get_fake_cli_failed(), self.cli_data.get_fake_cli_failed(), self.cli_data.get_fake_cli_failed(), self.cli_data.get_fake_cli_failed(), self.cli_data.get_fake_cli_failed(), ] test_command = self._cli_multi_set(command, fake_result_list) rc, out = test_command.execute() self.assertEqual(int('0x000c', 16), rc) self.assertEqual('No selected device', out) expect_log_error = [ mock.call(LOG_ERROR_STR, { 'retry': 1, 'method': test_command.__class__.__name__, 'rc': int('0x000c', 16), 'reason': 'No selected device', }), mock.call(LOG_ERROR_STR, { 'retry': 2, 'method': test_command.__class__.__name__, 'rc': int('0x000c', 16), 'reason': 'No selected device', }), mock.call(LOG_ERROR_STR, { 'retry': 3, 'method': test_command.__class__.__name__, 'rc': int('0x000c', 16), 'reason': 'No selected device', }), mock.call(LOG_ERROR_STR, { 'retry': 4, 'method': test_command.__class__.__name__, 'rc': int('0x000c', 16), 'reason': 'No selected device', }), mock.call(LOG_ERROR_STR, { 'retry': 5, 'method': test_command.__class__.__name__, 'rc': int('0x000c', 16), 'reason': 'No selected device', }) ] log_error.assert_has_calls(expect_log_error) def _test_show_command(self, fake_data, test_data, command, *params): test_command = self._cli_set(command, fake_data) rc, out = test_command.execute(*params) self.assertEqual(test_data[0], rc) if isinstance(out, list): for i in range(len(test_data[1])): self.assertDictEqual(test_data[1][i], out[i]) else: self.assertDictEqual(test_data[1], out) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_cli_all_command_execute(self): for command in self.CommandList: self._test_command_succeed(getattr(cli, command)) self._test_command_failed(getattr(cli, command)) @mock.patch.object(cli.LOG, 'error') def test_cli_all_command_execute_retry_succeed(self, log_error): for command in self.CommandList: self._test_command_failed_retry_succeed( log_error, getattr(cli, command)) @mock.patch.object(cli.LOG, 'error') def test_cli_all_command_execute_retry_timeout(self, log_error): for command in self.CommandList: self._test_command_failed_retry_timeout( log_error, getattr(cli, command)) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_snapshot(self): self._test_show_command( self.cli_data.get_fake_show_snapshot(), self.cli_data.get_test_show_snapshot(), cli.ShowSnapshot) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_snapshot_detail(self): self._test_show_command( self.cli_data.get_fake_show_snapshot_detail(), self.cli_data.get_test_show_snapshot_detail(), cli.ShowSnapshot, '-l') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_net(self): self._test_show_command( self.cli_data.get_fake_show_net(), self.cli_data.get_test_show_net(), cli.ShowNet) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_detail_net(self): self._test_show_command( self.cli_data.get_fake_show_net_detail(), self.cli_data.get_test_show_net_detail(), cli.ShowNet, '-l') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_partition(self): self._test_show_command( self.cli_data.get_fake_show_partition(), self.cli_data.get_test_show_partition(), cli.ShowPartition) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_partition_detail(self): self._test_show_command( self.cli_data.get_fake_show_partition_detail(), self.cli_data.get_test_show_partition_detail(), cli.ShowPartition, '-l') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_lv(self): self._test_show_command( self.cli_data.get_fake_show_lv(), self.cli_data.get_test_show_lv(), cli.ShowLV) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_lv_detail(self): self._test_show_command( self.cli_data.get_fake_show_lv_detail(), self.cli_data.get_test_show_lv_detail(), cli.ShowLV, '-l') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_lv_tier(self): self._test_show_command( self.cli_data.get_fake_show_lv_tier(), self.cli_data.get_test_show_lv_tier(), cli.ShowLV, 'tier') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_device(self): self._test_show_command( self.cli_data.get_fake_show_device(), self.cli_data.get_test_show_device(), cli.ShowDevice) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_channel(self): self._test_show_command( self.cli_data.get_fake_show_channel(), self.cli_data.get_test_show_channel(), cli.ShowChannel) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_channel_with_r_model(self): self._test_show_command( self.cli_data.get_fake_show_channel_r_model(), self.cli_data.get_test_show_channel_r_model(), cli.ShowChannel) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_map(self): self._test_show_command( self.cli_data.get_fake_show_map(), self.cli_data.get_test_show_map(), cli.ShowMap) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_license(self): self._test_show_command( self.cli_data.get_fake_show_license(), self.cli_data.get_test_show_license_full(), cli.ShowLicense) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_replica_detail(self): self._test_show_command( self.cli_data.get_fake_show_replica_detail(), self.cli_data.get_test_show_replica_detail(), cli.ShowReplica, '-l') @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_wwn(self): self._test_show_command( self.cli_data.get_fake_show_wwn(), self.cli_data.get_test_show_wwn(), cli.ShowWWN) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_iqn(self): self._test_show_command( self.cli_data.get_fake_show_iqn(), self.cli_data.get_test_show_iqn(), cli.ShowIQN) @mock.patch.object(cli.LOG, 'debug', mock.Mock()) def test_show_host(self): self._test_show_command( self.cli_data.get_fake_show_host(), self.cli_data.get_test_show_host(), cli.ShowHost) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py0000664000175000017500000031674200000000000030763 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from cinder import exception from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.tests.unit.volume.drivers.infortrend import test_infortrend_cli from cinder.volume import configuration from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli from cinder.volume import volume_utils SUCCEED = (0, '') FAKE_ERROR_RETURN = (-1, '') class InfortrendTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(InfortrendTestCase, self).__init__(*args, **kwargs) def setUp(self): super(InfortrendTestCase, self).setUp() self.cli_data = test_infortrend_cli.InfortrendCLITestData() self.configuration = configuration.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.safe_get = self._fake_safe_get def _fake_safe_get(self, key): return getattr(self.configuration, key) def _driver_setup(self, mock_commands, configuration=None): if configuration is None: configuration = self.configuration self.driver = self._get_driver(configuration) mock_commands_execute = self._mock_command_execute(mock_commands) mock_cli = mock.Mock(side_effect=mock_commands_execute) self.driver._execute_command = mock_cli def _get_driver(self, conf): raise NotImplementedError def _mock_command_execute(self, mock_commands): def fake_execute_command(cli_type, *args, **kwargs): if cli_type in mock_commands.keys(): if isinstance(mock_commands[cli_type], list): ret = mock_commands[cli_type][0] del mock_commands[cli_type][0] return ret elif isinstance(mock_commands[cli_type], tuple): return mock_commands[cli_type] else: return mock_commands[cli_type](*args, **kwargs) return FAKE_ERROR_RETURN return fake_execute_command def _mock_show_lv_for_migrate(self, *args, **kwargs): if 'tier' in args: return self.cli_data.get_test_show_lv_tier_for_migration() return self.cli_data.get_test_show_lv() def _mock_show_lv(self, *args, **kwargs): if 'tier' in args: return self.cli_data.get_test_show_lv_tier() return self.cli_data.get_test_show_lv() def _assert_cli_has_calls(self, expect_cli_cmd): self.driver._execute_command.assert_has_calls(expect_cli_cmd) class InfortrendFCCommonTestCase(InfortrendTestCase): def __init__(self, *args, **kwargs): super(InfortrendFCCommonTestCase, self).__init__(*args, **kwargs) def setUp(self): super(InfortrendFCCommonTestCase, self).setUp() self.configuration.volume_backend_name = 'infortrend_backend_1' self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0] self.configuration.san_password = '111111' self.configuration.infortrend_provisioning = 'full' self.configuration.infortrend_tiering = '0' self.configuration.infortrend_pools_name = ['LV-1', 'LV-2'] self.configuration.infortrend_slots_a_channels_id = [0, 5] self.configuration.infortrend_slots_b_channels_id = [0, 5] self.pool_dict = { 'LV-1': self.cli_data.fake_lv_id[0], 'LV-2': self.cli_data.fake_lv_id[1], } @mock.patch.object( common_cli.InfortrendCommon, '_init_raidcmd', mock.Mock()) @mock.patch.object( common_cli.InfortrendCommon, '_init_raid_connection', mock.Mock()) @mock.patch.object( common_cli.InfortrendCommon, '_set_raidcmd', mock.Mock()) def _get_driver(self, conf): driver = common_cli.InfortrendCommon('FC', configuration=conf) driver.do_setup() driver.pool_dict = self.pool_dict return driver def test_normal_channel(self): test_map_dict = { 'slot_a': {'0': [], '5': []}, 'slot_b': {}, } test_target_dict = { 'slot_a': {'0': '112', '5': '112'}, 'slot_b': {}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), } self._driver_setup(mock_commands) self.driver._init_map_info() self.assertDictEqual(test_map_dict, self.driver.map_dict) self.assertDictEqual(test_target_dict, self.driver.target_dict) def test_normal_channel_with_r_model(self): test_map_dict = { 'slot_a': {'0': [], '5': []}, 'slot_b': {'0': [], '5': []}, } test_target_dict = { 'slot_a': {'0': '112', '5': '112'}, 'slot_b': {'0': '113', '5': '113'}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), } self._driver_setup(mock_commands) self.driver._init_map_info() self.assertDictEqual(test_map_dict, self.driver.map_dict) self.assertDictEqual(test_target_dict, self.driver.target_dict) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_without_mcs(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictEqual(self.cli_data.test_fc_properties, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_specific_channel(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc configuration = copy.copy(self.configuration) configuration.infortrend_slots_a_channels_id = '5' mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands, configuration) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictEqual( self.cli_data.test_fc_properties_with_specific_channel, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_diff_target_id(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc test_initiator_wwpns = test_connector['wwpns'] test_partition_id = self.cli_data.fake_partition_id[0] configuration = copy.copy(self.configuration) configuration.infortrend_slots_a_channels_id = '5' mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_with_diff_target_id(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands, configuration) properties = self.driver.initialize_connection( test_volume, test_connector) expect_cli_cmd = [ mock.call('ShowDevice'), mock.call('ShowChannel'), mock.call('ShowWWN'), mock.call('ShowMap', 'part=%s' % test_partition_id), mock.call('ShowMap'), mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', 'wwn=%s' % test_initiator_wwpns[1]), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictEqual( self.cli_data.test_fc_properties_with_specific_channel, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_multipath_with_r_model(self): test_volume = self.cli_data.test_volume test_connector = copy.deepcopy(self.cli_data.test_connector_fc) mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn(), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictEqual( self.cli_data.test_fc_properties_multipath_r_model, properties) def test_initialize_connection_with_get_wwn_fail(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.initialize_connection, test_volume, test_connector) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_zoning(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc test_initiator_wwpns = test_connector['wwpns'] test_partition_id = self.cli_data.fake_partition_id[0] test_all_target_wwpns = self.cli_data.fake_target_wwpns[0:2] test_lookup_map = self.cli_data.fake_lookup_map mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) self.driver.fc_lookup_service = mock.Mock() get_device_mapping_from_network = ( self.driver.fc_lookup_service.get_device_mapping_from_network ) get_device_mapping_from_network.return_value = test_lookup_map properties = self.driver.initialize_connection( test_volume, test_connector) get_device_mapping_from_network.assert_has_calls( [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) expect_cli_cmd = [ mock.call('ShowDevice'), mock.call('ShowChannel'), mock.call('ShowWWN'), mock.call('ShowMap', 'part=%s' % test_partition_id), mock.call('ShowMap'), mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '0', '112', '0', 'wwn=%s' % test_initiator_wwpns[1]), mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', 'wwn=%s' % test_initiator_wwpns[1]), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictEqual( self.cli_data.test_fc_properties_zoning, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_zoning_r_model(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc test_initiator_wwpns = test_connector['wwpns'] test_partition_id = self.cli_data.fake_partition_id[0] test_all_target_wwpns = self.cli_data.fake_target_wwpns[:] test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2] test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1] test_lookup_map = self.cli_data.fake_lookup_map_r_model mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn(), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) self.driver.fc_lookup_service = mock.Mock() get_device_mapping_from_network = ( self.driver.fc_lookup_service.get_device_mapping_from_network ) get_device_mapping_from_network.return_value = test_lookup_map properties = self.driver.initialize_connection( test_volume, test_connector) get_device_mapping_from_network.assert_has_calls( [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) expect_cli_cmd = [ mock.call('ShowDevice'), mock.call('ShowChannel'), mock.call('ShowWWN'), mock.call('ShowMap', 'part=%s' % test_partition_id), mock.call('ShowMap'), mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '5', '112', '0', 'wwn=%s' % test_initiator_wwpns[1]), mock.call('CreateMap', 'part', test_partition_id, '0', '113', '0', 'wwn=%s' % test_initiator_wwpns[1]), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictEqual( self.cli_data.test_fc_properties_zoning_r_model, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_zoning_r_model_diff_target_id(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_fc test_initiator_wwpns = test_connector['wwpns'] test_partition_id = self.cli_data.fake_partition_id[0] test_all_target_wwpns = self.cli_data.fake_target_wwpns[:] test_all_target_wwpns[1] = self.cli_data.fake_target_wwpns[2] test_all_target_wwpns[2] = self.cli_data.fake_target_wwpns[1] test_lookup_map = self.cli_data.fake_lookup_map_r_model mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model_diff_target_id(), 'ShowMap': self.cli_data.get_test_show_map(), 'CreateMap': SUCCEED, 'ShowWWN': self.cli_data.get_test_show_wwn_with_diff_target_id(), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) self.driver.fc_lookup_service = mock.Mock() get_device_mapping_from_network = ( self.driver.fc_lookup_service.get_device_mapping_from_network ) get_device_mapping_from_network.return_value = test_lookup_map properties = self.driver.initialize_connection( test_volume, test_connector) get_device_mapping_from_network.assert_has_calls( [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) expect_cli_cmd = [ mock.call('ShowDevice'), mock.call('ShowChannel'), mock.call('ShowWWN'), mock.call('ShowMap', 'part=%s' % test_partition_id), mock.call('ShowMap'), mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0', 'wwn=%s' % test_initiator_wwpns[0]), mock.call('CreateMap', 'part', test_partition_id, '5', '48', '0', 'wwn=%s' % test_initiator_wwpns[1]), mock.call('CreateMap', 'part', test_partition_id, '0', '33', '0', 'wwn=%s' % test_initiator_wwpns[1]), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictEqual( self.cli_data.test_fc_properties_zoning_r_model, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_terminate_connection(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = self.cli_data.test_connector_fc mock_commands = { 'DeleteMap': SUCCEED, 'ShowMap': [self.cli_data.get_test_show_map_fc(), self.cli_data.get_test_show_empty_list()], 'ShowWWN': SUCCEED, 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) self.driver.terminate_connection(test_volume, test_connector) expect_cli_cmd = [ mock.call('ShowDevice'), mock.call('ShowMap', 'part=%s' % test_partition_id), mock.call('DeleteMap', 'part', test_partition_id, '0', '112', '0', '-y'), mock.call('DeleteMap', 'part', test_partition_id, '5', '112', '0', '-y'), mock.call('ShowMap'), mock.call('ShowWWN'), ] self._assert_cli_has_calls(expect_cli_cmd) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_terminate_connection_with_zoning(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = self.cli_data.test_connector_fc test_all_target_wwpns = self.cli_data.fake_target_wwpns[:2] test_lookup_map = self.cli_data.fake_lookup_map mock_commands = { 'DeleteMap': SUCCEED, 'ShowMap': [self.cli_data.get_test_show_map_fc(), self.cli_data.get_test_show_empty_list()], 'ShowWWN': self.cli_data.get_test_show_wwn_with_g_model(), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) self.driver.map_dict = { 'slot_a': {'0': [], '5': []}, 'slot_b': {}, } self.driver.fc_lookup_service = mock.Mock() get_device_mapping_from_network = ( self.driver.fc_lookup_service.get_device_mapping_from_network ) get_device_mapping_from_network.return_value = test_lookup_map conn_info = self.driver.terminate_connection( test_volume, test_connector) get_device_mapping_from_network.assert_has_calls( [mock.call(test_connector['wwpns'], test_all_target_wwpns)]) expect_cli_cmd = [ mock.call('ShowMap', 'part=%s' % test_partition_id), mock.call('DeleteMap', 'part', test_partition_id, '0', '112', '0', '-y'), mock.call('DeleteMap', 'part', test_partition_id, '5', '112', '0', '-y'), mock.call('ShowMap'), mock.call('ShowWWN'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictEqual( self.cli_data.test_fc_terminate_conn_info, conn_info) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_terminate_connection_with_zoning_and_lun_map_exist(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = self.cli_data.test_connector_fc mock_commands = { 'DeleteMap': SUCCEED, 'ShowMap': self.cli_data.get_show_map_with_lun_map_on_zoning(), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) self.driver.map_dict = { 'slot_a': {'0': [], '5': []}, 'slot_b': {}, } self.driver.target_dict = { 'slot_a': {'0': '112', '5': '112'}, 'slot_b': {}, } self.driver.fc_lookup_service = mock.Mock() conn_info = self.driver.terminate_connection( test_volume, test_connector) expect_cli_cmd = [ mock.call('ShowMap', 'part=%s' % test_partition_id), mock.call('DeleteMap', 'part', test_partition_id, '0', '112', '0', '-y'), mock.call('ShowMap'), ] expect_conn_info = {'driver_volume_type': 'fibre_channel', 'data': {}} self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(expect_conn_info, conn_info) class InfortrendiSCSICommonTestCase(InfortrendTestCase): def __init__(self, *args, **kwargs): super(InfortrendiSCSICommonTestCase, self).__init__(*args, **kwargs) def setUp(self): super(InfortrendiSCSICommonTestCase, self).setUp() self.configuration.volume_backend_name = 'infortrend_backend_1' self.configuration.san_ip = self.cli_data.fake_manage_port_ip[0] self.configuration.san_password = '111111' self.configuration.infortrend_provisioning = 'full' self.configuration.infortrend_tiering = '0' self.configuration.infortrend_pools_name = ['LV-1', 'LV-2'] self.configuration.infortrend_slots_a_channels_id = [1, 2, 4] self.configuration.infortrend_slots_b_channels_id = [1, 2, 4] self.pool_dict = { 'LV-1': self.cli_data.fake_lv_id[0], 'LV-2': self.cli_data.fake_lv_id[1], } @mock.patch.object( common_cli.InfortrendCommon, '_init_raidcmd', mock.Mock()) @mock.patch.object( common_cli.InfortrendCommon, '_init_raid_connection', mock.Mock()) @mock.patch.object( common_cli.InfortrendCommon, '_set_raidcmd', mock.Mock()) def _get_driver(self, conf): driver = common_cli.InfortrendCommon('iSCSI', configuration=conf) driver.do_setup() driver.pool_dict = self.pool_dict return driver @mock.patch.object(common_cli.LOG, 'warning') def test_create_map_warning_return_code(self, log_warning): FAKE_RETURN_CODE = (20, '') mock_commands = { 'CreateMap': FAKE_RETURN_CODE, } self._driver_setup(mock_commands) self.driver._execute('CreateMap') self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_delete_map_warning_return_code(self, log_warning): FAKE_RETURN_CODE = (11, '') mock_commands = { 'DeleteMap': FAKE_RETURN_CODE, } self._driver_setup(mock_commands) self.driver._execute('DeleteMap') self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_create_iqn_warning_return_code(self, log_warning): FAKE_RETURN_CODE = (20, '') mock_commands = { 'CreateIQN': FAKE_RETURN_CODE, } self._driver_setup(mock_commands) self.driver._execute('CreateIQN') self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_delete_iqn_warning_return_code_has_map(self, log_warning): FAKE_RETURN_CODE = (20, '') mock_commands = { 'DeleteIQN': FAKE_RETURN_CODE, } self._driver_setup(mock_commands) self.driver._execute('DeleteIQN') self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_delete_iqn_warning_return_code_no_such_name(self, log_warning): FAKE_RETURN_CODE = (11, '') mock_commands = { 'DeleteIQN': FAKE_RETURN_CODE, } self._driver_setup(mock_commands) self.driver._execute('DeleteIQN') self.assertEqual(1, log_warning.call_count) def test_normal_channel(self): test_map_dict = { 'slot_a': {'1': [], '2': [], '4': []}, 'slot_b': {}, } test_target_dict = { 'slot_a': {'1': '0', '2': '0', '4': '0'}, 'slot_b': {}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), } self._driver_setup(mock_commands) self.driver._init_map_info() self.assertDictEqual(test_map_dict, self.driver.map_dict) self.assertDictEqual(test_target_dict, self.driver.target_dict) def test_normal_channel_with_multipath(self): test_map_dict = { 'slot_a': {'1': [], '2': [], '4': []}, 'slot_b': {'1': [], '2': [], '4': []}, } test_target_dict = { 'slot_a': {'1': '0', '2': '0', '4': '0'}, 'slot_b': {'1': '1', '2': '1', '4': '1'}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), } self._driver_setup(mock_commands) self.driver._init_map_info() self.assertDictEqual(test_map_dict, self.driver.map_dict) self.assertDictEqual(test_target_dict, self.driver.target_dict) def test_specific_channel(self): configuration = copy.copy(self.configuration) configuration.infortrend_slots_a_channels_id = '2, 4' test_map_dict = { 'slot_a': {'2': [], '4': []}, 'slot_b': {}, } test_target_dict = { 'slot_a': {'2': '0', '4': '0'}, 'slot_b': {}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), } self._driver_setup(mock_commands, configuration) self.driver._init_map_info() self.assertDictEqual(test_map_dict, self.driver.map_dict) self.assertDictEqual(test_target_dict, self.driver.target_dict) def test_update_mcs_dict(self): configuration = copy.copy(self.configuration) configuration.use_multipath_for_image_xfer = True test_mcs_dict = { 'slot_a': {'1': ['1', '2'], '2': ['4']}, 'slot_b': {}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(), } self._driver_setup(mock_commands, configuration) self.driver._init_map_info() self.assertDictEqual(test_mcs_dict, self.driver.mcs_dict) def test_mapping_info_with_mpio_no_mcs(self): configuration = copy.copy(self.configuration) configuration.use_multipath_for_image_xfer = True fake_mcs_dict = { 'slot_a': {'1': ['1'], '2': ['2'], '4': ['4']}, 'slot_b': {'1': ['1'], '2': ['2'], '4': ['4']}, } lun_list = list(range(0, 127)) fake_map_dict = { 'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]}, 'slot_b': {'1': lun_list[:], '2': lun_list[:], '4': lun_list[:]}, } test_map_chl = { 'slot_a': ['1', '2', '4'], 'slot_b': ['1', '2', '4'], } test_map_lun = ['2'] self.driver = self._get_driver(configuration) self.driver.mcs_dict = fake_mcs_dict self.driver.map_dict = fake_map_dict map_chl, map_lun = self.driver._get_mapping_info_with_mpio() map_chl['slot_a'].sort() map_chl['slot_b'].sort() self.assertDictEqual(test_map_chl, map_chl) self.assertEqual(test_map_lun, map_lun) def test_mapping_info_with_mcs(self): configuration = copy.copy(self.configuration) configuration.use_multipath_for_image_xfer = True fake_mcs_dict = { 'slot_a': {'0': ['1', '2'], '2': ['4']}, 'slot_b': {'0': ['1', '2']}, } lun_list = list(range(0, 127)) fake_map_dict = { 'slot_a': {'1': lun_list[2:], '2': lun_list[:], '4': lun_list[1:]}, 'slot_b': {'1': lun_list[:], '2': lun_list[:]}, } test_map_chl = { 'slot_a': ['1', '4'], 'slot_b': ['1'], } test_map_lun = ['2'] self.driver = self._get_driver(configuration) self.driver.mcs_dict = fake_mcs_dict self.driver.map_dict = fake_map_dict map_chl, map_lun = self.driver._get_mapping_info_with_mpio() map_chl['slot_a'].sort() map_chl['slot_b'].sort() self.assertDictEqual(test_map_chl, map_chl) self.assertEqual(test_map_lun, map_lun) def test_mapping_info_with_mcs_multi_group(self): configuration = copy.copy(self.configuration) configuration.use_multipath_for_image_xfer = True fake_mcs_dict = { 'slot_a': {'0': ['1', '2'], '1': ['3', '4'], '2': ['5']}, 'slot_b': {'0': ['1', '2']}, } lun_list = list(range(0, 127)) fake_map_dict = { 'slot_a': { '1': lun_list[2:], '2': lun_list[3:], '3': lun_list[:], '4': lun_list[1:], '5': lun_list[:], }, 'slot_b': { '1': lun_list[:], '2': lun_list[:], }, } test_map_chl = { 'slot_a': ['1', '3', '5'], 'slot_b': ['1'], } test_map_lun = ['2'] self.driver = self._get_driver(configuration) self.driver.mcs_dict = fake_mcs_dict self.driver.map_dict = fake_map_dict map_chl, map_lun = self.driver._get_mapping_info_with_mpio() map_chl['slot_a'].sort() map_chl['slot_b'].sort() self.assertDictEqual(test_map_chl, map_chl) self.assertEqual(test_map_lun, map_lun) def test_specific_channel_with_multipath(self): configuration = copy.copy(self.configuration) configuration.infortrend_slots_a_channels_id = '1,2' test_map_dict = { 'slot_a': {'1': [], '2': []}, 'slot_b': {}, } test_target_dict = { 'slot_a': {'1': '0', '2': '0'}, 'slot_b': {}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), } self._driver_setup(mock_commands, configuration) self.driver._init_map_info() self.assertDictEqual(test_map_dict, self.driver.map_dict) self.assertDictEqual(test_target_dict, self.driver.target_dict) def test_specific_channel_with_multipath_r_model(self): configuration = copy.copy(self.configuration) configuration.infortrend_slots_a_channels_id = '1,2' configuration.infortrend_slots_b_channels_id = '1' test_map_dict = { 'slot_a': {'1': [], '2': []}, 'slot_b': {'1': []}, } test_target_dict = { 'slot_a': {'1': '0', '2': '0'}, 'slot_b': {'1': '1'}, } mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), } self._driver_setup(mock_commands, configuration) self.driver._init_map_info() self.assertDictEqual(test_map_dict, self.driver.map_dict) self.assertDictEqual(test_target_dict, self.driver.target_dict) @mock.patch.object(common_cli.LOG, 'info') def test_create_volume(self, log_info): test_volume = self.cli_data.test_volume test_model_update = { 'provider_location': 'partition_id^%s@system_id^%s' % ( self.cli_data.fake_partition_id[0], int(self.cli_data.fake_system_id[0], 16) ) } mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'ShowLV': self._mock_show_lv, } self._driver_setup(mock_commands) model_update = self.driver.create_volume(test_volume) self.assertDictEqual(test_model_update, model_update) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_create_volume_with_create_fail(self): test_volume = self.cli_data.test_volume mock_commands = { 'CreatePartition': FAKE_ERROR_RETURN, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'ShowLV': self._mock_show_lv, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.create_volume, test_volume) @mock.patch.object(common_cli.LOG, 'info') def test_delete_volume_with_mapped(self, log_info): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail_for_map( test_partition_id), 'DeleteMap': SUCCEED, 'DeletePartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.delete_volume(test_volume) expect_cli_cmd = [ mock.call('ShowPartition', '-l'), mock.call('DeleteMap', 'part', test_partition_id, '-y'), mock.call('DeletePartition', test_partition_id, '-y'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'info') def test_delete_volume_without_mapped(self, log_info): test_volume = self.cli_data.test_volume_1 test_partition_id = self.cli_data.fake_partition_id[1] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( test_volume['id'], '5DE94FF775D81C30'), 'DeletePartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.delete_volume(test_volume) expect_cli_cmd = [ mock.call('ShowPartition', '-l'), mock.call('DeletePartition', test_partition_id, '-y'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, log_info.call_count) def test_delete_volume_with_delete_fail(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail_for_map( test_partition_id), 'ShowReplica': self.cli_data.get_test_show_replica_detail(), 'DeleteReplica': SUCCEED, 'ShowSnapshot': self.cli_data.get_test_show_snapshot(), 'DeleteSnapshot': SUCCEED, 'ShowMap': self.cli_data.get_test_show_map(), 'DeleteMap': SUCCEED, 'DeletePartition': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.delete_volume, test_volume) @mock.patch.object(common_cli.LOG, 'warning') def test_delete_volume_with_partiton_not_found(self, log_warning): test_volume = self.cli_data.test_volume mock_commands = { 'ShowPartition': self.cli_data.get_test_show_empty_list(), } self._driver_setup(mock_commands) self.driver.delete_volume(test_volume) self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'info') def test_delete_volume_without_provider(self, log_info): test_system_id = self.cli_data.fake_system_id[0] test_volume = copy.deepcopy(self.cli_data.test_volume) test_volume['provider_location'] = 'partition_id^%s@system_id^%s' % ( 'None', int(test_system_id, 16)) test_partition_id = self.cli_data.fake_partition_id[0] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail_for_map( test_partition_id), 'ShowReplica': self.cli_data.get_test_show_replica_detail(), 'DeleteReplica': SUCCEED, 'ShowSnapshot': self.cli_data.get_test_show_snapshot(), 'DeleteSnapshot': SUCCEED, 'ShowMap': self.cli_data.get_test_show_map(), 'DeleteMap': SUCCEED, 'DeletePartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.delete_volume(test_volume) self.assertEqual(1, log_info.call_count) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) @mock.patch.object(common_cli.LOG, 'info') def test_create_cloned_volume(self, log_info): fake_partition_id = self.cli_data.fake_partition_id[0] test_dst_volume = self.cli_data.test_dst_volume test_dst_volume_id = test_dst_volume['id'] test_src_volume = self.cli_data.test_volume test_dst_part_id = self.cli_data.fake_partition_id[1] test_model_update = { 'provider_location': 'partition_id^%s@system_id^%s' % ( self.cli_data.fake_partition_id[1], int(self.cli_data.fake_system_id[0], 16) ) } mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'CreateReplica': SUCCEED, 'ShowLV': self._mock_show_lv, 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_migrate( fake_partition_id, test_dst_part_id, test_dst_volume_id), 'DeleteReplica': SUCCEED, } self._driver_setup(mock_commands) model_update = self.driver.create_cloned_volume( test_dst_volume, test_src_volume) self.assertDictEqual(test_model_update, model_update) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_create_cloned_volume_with_create_replica_fail(self): test_dst_volume = self.cli_data.test_dst_volume test_src_volume = self.cli_data.test_volume mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'CreateReplica': FAKE_ERROR_RETURN, 'ShowLV': self._mock_show_lv, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.create_cloned_volume, test_dst_volume, test_src_volume) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_create_export(self): test_volume = self.cli_data.test_volume test_model_update = { 'provider_location': test_volume['provider_location'], } self.driver = self._get_driver(self.configuration) model_update = self.driver.create_export(None, test_volume) self.assertDictEqual(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_get_volume_stats_full(self): test_volume_states = self.cli_data.test_volume_states_full mock_commands = { 'InitCache': SUCCEED, 'ShowLicense': self.cli_data.get_test_show_license_full(), 'ShowLV': [self.cli_data.get_test_show_lv_tier(), self.cli_data.get_test_show_lv()], 'ShowDevice': self.cli_data.get_test_show_device(), 'CheckConnection': SUCCEED, } self._driver_setup(mock_commands) self.driver.VERSION = '99.99' self.driver.system_id = self.cli_data.fake_system_id[0] volume_states = self.driver.get_volume_stats(True) self.assertDictEqual.__self__.maxDiff = None self.assertDictEqual(test_volume_states, volume_states) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_get_volume_stats_thin(self): test_volume_states = self.cli_data.test_volume_states_thin mock_commands = { 'InitCache': SUCCEED, 'ShowLicense': self.cli_data.get_test_show_license_thin(), 'ShowLV': [self.cli_data.get_test_show_lv_tier(), self.cli_data.get_test_show_lv()], 'ShowPartition': self.cli_data.get_test_show_partition_detail(), 'ShowDevice': self.cli_data.get_test_show_device(), 'CheckConnection': SUCCEED, } self._driver_setup(mock_commands) self.driver.VERSION = '99.99' self.driver.system_id = self.cli_data.fake_system_id[0] volume_states = self.driver.get_volume_stats(True) self.assertDictEqual.__self__.maxDiff = None self.assertDictEqual(test_volume_states, volume_states) def test_get_volume_stats_fail(self): mock_commands = { 'InitCache': SUCCEED, 'ShowLicense': self.cli_data.get_test_show_license_thin(), 'ShowLV': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.get_volume_stats) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_create_snapshot(self): fake_partition_id = self.cli_data.fake_partition_id[0] fake_snapshot_id = self.cli_data.fake_snapshot_id[0] mock_commands = { 'CreateSnapshot': SUCCEED, 'ShowSnapshot': self.cli_data.get_test_show_snapshot( partition_id=fake_partition_id, snapshot_id=fake_snapshot_id), 'ShowPartition': self.cli_data.get_test_show_partition(), } self._driver_setup(mock_commands) model_update = self.driver.create_snapshot(self.cli_data.test_snapshot) self.assertEqual(fake_snapshot_id, model_update['provider_location']) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_create_snapshot_without_partition_id(self): fake_partition_id = self.cli_data.fake_partition_id[0] fake_snapshot_id = self.cli_data.fake_snapshot_id[0] test_snapshot = self.cli_data.test_snapshot mock_commands = { 'CreateSnapshot': SUCCEED, 'ShowSnapshot': self.cli_data.get_test_show_snapshot( partition_id=fake_partition_id, snapshot_id=fake_snapshot_id), 'ShowPartition': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.create_snapshot, test_snapshot) def test_create_snapshot_with_create_fail(self): fake_partition_id = self.cli_data.fake_partition_id[0] fake_snapshot_id = self.cli_data.fake_snapshot_id[0] test_snapshot = self.cli_data.test_snapshot mock_commands = { 'CreateSnapshot': FAKE_ERROR_RETURN, 'ShowSnapshot': self.cli_data.get_test_show_snapshot( partition_id=fake_partition_id, snapshot_id=fake_snapshot_id), 'ShowPartition': self.cli_data.get_test_show_partition(), } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.create_snapshot, test_snapshot) def test_create_snapshot_with_show_fail(self): test_snapshot = self.cli_data.test_snapshot mock_commands = { 'CreateSnapshot': SUCCEED, 'ShowSnapshot': FAKE_ERROR_RETURN, 'ShowPartition': self.cli_data.get_test_show_partition(), } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.create_snapshot, test_snapshot) @mock.patch.object(common_cli.LOG, 'info') def test_delete_snapshot(self, log_info): test_snapshot = self.cli_data.test_snapshot mock_commands = { 'ShowReplica': self.cli_data.get_test_show_replica_detail(), 'DeleteSnapshot': SUCCEED, } self._driver_setup(mock_commands) self.driver.delete_snapshot(test_snapshot) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_delete_snapshot_without_provider_location(self, log_warning): test_snapshot = self.cli_data.test_snapshot_without_provider_location self.driver = self._get_driver(self.configuration) self.driver.delete_snapshot(test_snapshot) self.assertEqual(1, log_warning.call_count) def test_delete_snapshot_with_fail(self): test_snapshot = self.cli_data.test_snapshot mock_commands = { 'DeleteSnapshot': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.delete_snapshot, test_snapshot) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) @mock.patch.object(common_cli.LOG, 'info') def test_create_volume_from_snapshot(self, log_info): test_snapshot = self.cli_data.test_snapshot test_snapshot_id = self.cli_data.fake_snapshot_id[0] test_dst_volume = self.cli_data.test_dst_volume test_dst_volume_id = test_dst_volume['id'] test_dst_part_id = self.cli_data.fake_partition_id[1] test_model_update = { 'provider_location': 'partition_id^%s@system_id^%s' % ( self.cli_data.fake_partition_id[1], int(self.cli_data.fake_system_id[0], 16) ) } mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'CreateReplica': SUCCEED, 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_migrate( test_snapshot_id, test_dst_part_id, test_dst_volume_id), 'DeleteReplica': SUCCEED, } self._driver_setup(mock_commands) model_update = self.driver.create_volume_from_snapshot( test_dst_volume, test_snapshot) self.assertDictEqual(test_model_update, model_update) self.assertEqual(1, log_info.call_count) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) @mock.patch.object(common_cli.LOG, 'info') def test_create_volume_from_snapshot_with_different_size(self, log_info): test_snapshot = self.cli_data.test_snapshot test_snapshot_id = self.cli_data.fake_snapshot_id[0] test_dst_volume = self.cli_data.test_dst_volume test_dst_volume['size'] = 10 test_dst_volume_id = test_dst_volume['id'].replace('-', '') test_dst_part_id = self.cli_data.fake_partition_id[1] test_model_update = { 'provider_location': 'partition_id^%s@system_id^%s' % ( self.cli_data.fake_partition_id[1], int(self.cli_data.fake_system_id[0], 16)) } mock_commands = { 'ShowSnapshot': self.cli_data.get_test_show_snapshot_detail_filled_block(), 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'ShowDevice': self.cli_data.get_test_show_device(), 'CreateReplica': SUCCEED, 'ShowLV': self._mock_show_lv, 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_migrate( test_snapshot_id, test_dst_part_id, test_dst_volume_id), 'DeleteReplica': SUCCEED, } self._driver_setup(mock_commands) model_update = self.driver.create_volume_from_snapshot( test_dst_volume, test_snapshot) self.assertDictEqual(test_model_update, model_update) self.assertEqual(1, log_info.call_count) self.assertEqual(10, test_dst_volume['size']) def test_create_volume_from_snapshot_without_provider_location( self): test_snapshot = self.cli_data.test_snapshot_without_provider_location test_dst_volume = self.cli_data.test_dst_volume self.driver = self._get_driver(self.configuration) self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, test_dst_volume, test_snapshot) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) test_iscsi_properties = self.cli_data.test_iscsi_properties test_target_protal = [test_iscsi_properties['data']['target_portal']] test_target_iqn = [test_iscsi_properties['data']['target_iqn']] test_connector['multipath'] = False mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateMap': SUCCEED, 'ShowNet': self.cli_data.get_test_show_net(), 'ExecuteCommand': self.cli_data.get_fake_discovery( test_target_iqn, test_target_protal), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictEqual(test_iscsi_properties, properties) expect_cli_cmd = [ mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0', 'iqn=%s' % test_connector['initiator']), ] self._assert_cli_has_calls(expect_cli_cmd) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_iqn_not_exist(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_initiator = copy.deepcopy(self.cli_data.fake_initiator_iqn[1]) test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) test_iscsi_properties = self.cli_data.test_iscsi_properties test_target_protal = [test_iscsi_properties['data']['target_portal']] test_target_iqn = [test_iscsi_properties['data']['target_iqn']] test_connector['multipath'] = False test_connector['initiator'] = test_initiator mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateIQN': SUCCEED, 'CreateMap': SUCCEED, 'ShowNet': self.cli_data.get_test_show_net(), 'ExecuteCommand': self.cli_data.get_fake_discovery( test_target_iqn, test_target_protal), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictEqual(test_iscsi_properties, properties) expect_cli_cmd = [ mock.call('ShowDevice'), mock.call('ShowChannel'), mock.call('ShowIQN'), mock.call('CreateIQN', test_initiator, test_initiator[-16:]), mock.call('ShowNet'), mock.call('ShowMap'), mock.call('ShowMap', 'part=6A41315B0EDC8EB7'), mock.call('CreateMap', 'part', test_partition_id, '2', '0', '0', 'iqn=%s' % test_connector['initiator']), ] self._assert_cli_has_calls(expect_cli_cmd) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_empty_map(self): test_volume = self.cli_data.test_volume test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) test_iscsi_properties = self.cli_data.test_iscsi_properties_empty_map test_target_protal = [test_iscsi_properties['data']['target_portal']] test_target_iqn = [test_iscsi_properties['data']['target_iqn']] test_connector['multipath'] = False mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_empty_list(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateMap': SUCCEED, 'ShowNet': self.cli_data.get_test_show_net(), 'ExecuteCommand': self.cli_data.get_fake_discovery( test_target_iqn, test_target_protal), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictEqual( self.cli_data.test_iscsi_properties_empty_map, properties) def test_initialize_connection_with_create_map_fail(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_iscsi mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_r_model(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateMap': FAKE_ERROR_RETURN, 'ShowNet': SUCCEED, 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.initialize_connection, test_volume, test_connector) def test_initialize_connection_with_get_ip_fail(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_iscsi mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateMap': SUCCEED, 'ShowNet': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.initialize_connection, test_volume, test_connector) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_mcs(self): configuration = copy.copy(self.configuration) test_volume = self.cli_data.test_volume_1 test_partition_id = self.cli_data.fake_partition_id[1] test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi_1) test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs_1 test_target_portal = [test_iscsi_properties['data']['target_portal']] test_target_iqn = [test_iscsi_properties['data']['target_iqn']] test_connector['multipath'] = False mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateIQN': SUCCEED, 'CreateMap': SUCCEED, 'ShowNet': self.cli_data.get_test_show_net(), 'ExecuteCommand': self.cli_data.get_fake_discovery( test_target_iqn, test_target_portal), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands, configuration) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictEqual(test_iscsi_properties, properties) expect_cli_cmd = [ mock.call('CreateMap', 'part', test_partition_id, '4', '0', '1', 'iqn=%s' % test_connector['initiator']), ] self._assert_cli_has_calls(expect_cli_cmd) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_initialize_connection_with_exist_map(self): configuration = copy.copy(self.configuration) test_volume = self.cli_data.test_volume test_connector = copy.deepcopy(self.cli_data.test_connector_iscsi) test_iscsi_properties = self.cli_data.test_iscsi_properties_with_mcs test_target_portal = [test_iscsi_properties['data']['target_portal']] test_target_iqn = [test_iscsi_properties['data']['target_iqn']] test_connector['multipath'] = False mock_commands = { 'ShowChannel': self.cli_data.get_test_show_channel_with_mcs(), 'ShowMap': self.cli_data.get_test_show_map(), 'ShowIQN': self.cli_data.get_test_show_iqn(), 'CreateMap': SUCCEED, 'ShowNet': self.cli_data.get_test_show_net(), 'ExecuteCommand': self.cli_data.get_fake_discovery( test_target_iqn, test_target_portal), 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands, configuration) properties = self.driver.initialize_connection( test_volume, test_connector) self.assertDictEqual(test_iscsi_properties, properties) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_extend_volume(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_new_size = 10 test_expand_size = test_new_size - test_volume['size'] mock_commands = { 'SetPartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.extend_volume(test_volume, test_new_size) expect_cli_cmd = [ mock.call('SetPartition', 'expand', test_partition_id, 'size=%sGB' % test_expand_size), ] self._assert_cli_has_calls(expect_cli_cmd) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_extend_volume_mb(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_new_size = 5.5 test_expand_size = round((test_new_size - test_volume['size']) * 1024) mock_commands = { 'SetPartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.extend_volume(test_volume, test_new_size) expect_cli_cmd = [ mock.call('SetPartition', 'expand', test_partition_id, 'size=%sMB' % test_expand_size), ] self._assert_cli_has_calls(expect_cli_cmd) def test_extend_volume_fail(self): test_volume = self.cli_data.test_volume test_new_size = 10 mock_commands = { 'SetPartition': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.extend_volume, test_volume, test_new_size) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_terminate_connection(self): test_volume = self.cli_data.test_volume test_partition_id = self.cli_data.fake_partition_id[0] test_connector = self.cli_data.test_connector_iscsi mock_commands = { 'DeleteMap': SUCCEED, 'ShowMap': [self.cli_data.get_test_show_map(), self.cli_data.get_test_show_empty_list()], 'DeleteIQN': SUCCEED, 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) self.driver.terminate_connection(test_volume, test_connector) expect_cli_cmd = [ mock.call('ShowDevice'), mock.call('ShowMap', 'part=%s' % test_partition_id), mock.call('DeleteMap', 'part', test_partition_id, '1', '0', '0', '-y'), mock.call('DeleteMap', 'part', test_partition_id, '1', '0', '1', '-y'), mock.call('DeleteMap', 'part', test_partition_id, '4', '0', '0', '-y'), mock.call('ShowMap'), mock.call('DeleteIQN', test_connector['initiator'][-16:]), ] self._assert_cli_has_calls(expect_cli_cmd) def test_terminate_connection_fail(self): test_volume = self.cli_data.test_volume test_connector = self.cli_data.test_connector_iscsi mock_commands = { 'DeleteMap': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.terminate_connection, test_volume, test_connector) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=utils.ZeroIntervalLoopingCall) def test_migrate_volume(self): test_host = copy.deepcopy(self.cli_data.test_migrate_host) fake_pool = copy.deepcopy(self.cli_data.fake_pool) test_volume = self.cli_data.test_volume test_volume_id = test_volume['id'] test_src_part_id = self.cli_data.fake_partition_id[0] test_dst_part_id = self.cli_data.fake_partition_id[2] test_pair_id = self.cli_data.fake_pair_id[0] test_model_update = { 'provider_location': 'partition_id^%s@system_id^%s' % ( test_dst_part_id, int(self.cli_data.fake_system_id[0], 16) ) } mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition( test_volume_id, fake_pool['pool_id']), 'CreateReplica': SUCCEED, 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_migrate( test_src_part_id, test_dst_part_id, test_volume_id), 'DeleteReplica': SUCCEED, 'DeleteMap': SUCCEED, 'DeletePartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.system_id = 'DEEC' rc, model_update = self.driver.migrate_volume(test_volume, test_host) expect_cli_cmd = [ mock.call('CreatePartition', fake_pool['pool_id'], test_volume['id'], 'size=%s' % (test_volume['size'] * 1024), ''), mock.call('ShowPartition'), mock.call('CreateReplica', 'Cinder-Migrate', 'part', test_src_part_id, 'part', test_dst_part_id, 'type=mirror'), mock.call('ShowReplica', '-l'), mock.call('DeleteReplica', test_pair_id, '-y'), mock.call('DeleteMap', 'part', test_src_part_id, '-y'), mock.call('DeletePartition', test_src_part_id, '-y'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertTrue(rc) self.assertDictEqual(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'error') def test_migrate_volume_with_invalid_storage(self, log_error): fake_host = self.cli_data.fake_host test_volume = self.cli_data.test_volume mock_commands = { 'ShowLV': self._mock_show_lv_for_migrate, } self._driver_setup(mock_commands) rc, model_update = self.driver.migrate_volume(test_volume, fake_host) self.assertFalse(rc) self.assertIsNone(model_update) self.assertEqual(1, log_error.call_count) @mock.patch('time.sleep') def test_migrate_volume_with_get_part_id_fail(self, mock_sleep): test_host = copy.deepcopy(self.cli_data.test_migrate_host) test_volume = self.cli_data.test_volume mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition(), 'DeleteMap': SUCCEED, 'CreateReplica': SUCCEED, 'CreateMap': SUCCEED, 'ShowLV': self._mock_show_lv_for_migrate, } self._driver_setup(mock_commands) self.driver.system_id = 'DEEC' self.assertRaises( exception.VolumeBackendAPIException, self.driver.migrate_volume, test_volume, test_host) mock_sleep.assert_called() def test_migrate_volume_with_create_replica_fail(self): test_host = copy.deepcopy(self.cli_data.test_migrate_host) fake_pool = copy.deepcopy(self.cli_data.fake_pool) test_volume = self.cli_data.test_volume mock_commands = { 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition( test_volume['id'], fake_pool['pool_id']), 'DeleteMap': SUCCEED, 'CreateReplica': FAKE_ERROR_RETURN, 'CreateMap': SUCCEED, 'ShowLV': self._mock_show_lv_for_migrate, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.migrate_volume, test_volume, test_host) def test_manage_existing_get_size(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume_with_id test_pool = self.cli_data.fake_lv_id[0] test_ref_volume_id = test_ref_volume['source-id'] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), 'ShowMap': SUCCEED, } self._driver_setup(mock_commands) size = self.driver.manage_existing_get_size( test_volume, test_ref_volume) expect_cli_cmd = [ mock.call('ShowPartition', '-l'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(20, size) def test_manage_existing_get_size_with_name(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume_with_name test_pool = self.cli_data.fake_lv_id[0] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( test_ref_volume['source-name'], test_pool), 'ShowMap': SUCCEED, } self._driver_setup(mock_commands) size = self.driver.manage_existing_get_size( test_volume, test_ref_volume) expect_cli_cmd = [ mock.call('ShowPartition', '-l'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(20, size) def test_manage_existing_get_size_in_use(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume test_pool = self.cli_data.fake_lv_id[0] test_ref_volume_id = test_ref_volume['source-id'] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), } self._driver_setup(mock_commands) self.assertRaises( exception.VolumeDriverException, self.driver.manage_existing_get_size, test_volume, test_ref_volume) def test_manage_existing_get_size_no_source_id(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_dst_volume self.driver = self._get_driver(self.configuration) self.assertRaises( exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, test_volume, test_ref_volume) def test_manage_existing_get_size_show_part_fail(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume_with_id mock_commands = { 'ShowPartition': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.manage_existing_get_size, test_volume, test_ref_volume) def test_manage_existing_get_size_with_not_exist(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume_with_id mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail(), } self._driver_setup(mock_commands) self.assertRaises( exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, test_volume, test_ref_volume) @mock.patch.object(common_cli.LOG, 'info') def test_manage_existing(self, log_info): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume_with_id test_pool = self.cli_data.fake_lv_id[0] test_partition_id = self.cli_data.test_dst_volume['id'] test_ref_volume_id = test_ref_volume['source-id'] test_model_update = { 'provider_location': 'partition_id^%s@system_id^%s' % ( test_partition_id, int(self.cli_data.fake_system_id[0], 16) ) } mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), 'SetPartition': SUCCEED, 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) model_update = self.driver.manage_existing( test_volume, test_ref_volume) expect_cli_cmd = [ mock.call('ShowPartition', '-l'), mock.call('SetPartition', test_partition_id, 'name=%s' % test_volume['id']), mock.call('ShowDevice'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, log_info.call_count) self.assertDictEqual(test_model_update, model_update) def test_manage_existing_rename_fail(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume_with_id test_pool = self.cli_data.fake_lv_id[0] test_ref_volume_id = test_ref_volume['source-id'] mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( 'cinder-unmanaged-%s' % test_ref_volume_id[:-17], test_pool), 'SetPartition': FAKE_ERROR_RETURN, } self._driver_setup(mock_commands) self.assertRaises( common_cli.InfortrendCliException, self.driver.manage_existing, test_volume, test_ref_volume) def test_manage_existing_with_part_not_found(self): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume_with_id mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail(), 'SetPartition': SUCCEED, } self._driver_setup(mock_commands) self.assertRaises( exception.ManageExistingInvalidReference, self.driver.manage_existing, test_volume, test_ref_volume) @mock.patch.object(common_cli.LOG, 'info') def test_manage_existing_with_import(self, log_info): test_volume = self.cli_data.test_volume test_ref_volume = self.cli_data.test_ref_volume_with_name test_pool = self.cli_data.fake_lv_id[0] test_partition_id = self.cli_data.fake_partition_id[2] test_model_update = { 'provider_location': 'partition_id^%s@system_id^%s' % ( test_partition_id, int(self.cli_data.fake_system_id[0], 16) ) } mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( test_ref_volume['source-name'], test_pool), 'SetPartition': SUCCEED, 'ShowDevice': self.cli_data.get_test_show_device(), } self._driver_setup(mock_commands) model_update = self.driver.manage_existing( test_volume, test_ref_volume) expect_cli_cmd = [ mock.call('SetPartition', test_partition_id, 'name=%s' % test_volume['id']), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, log_info.call_count) self.assertDictEqual(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'info') def test_unmanage(self, log_info): test_volume = self.cli_data.test_volume test_volume_id = test_volume['id'] test_partition_id = self.cli_data.fake_partition_id[0] mock_commands = { 'SetPartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.unmanage(test_volume) expect_cli_cmd = [ mock.call( 'SetPartition', test_partition_id, 'name=cinder-unmanaged-%s' % test_volume_id[:-17]), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'info') def test_retype_without_change(self, log_info): test_volume = self.cli_data.test_volume test_new_type = self.cli_data.test_new_type test_diff = {'extra_specs': {}} test_host = self.cli_data.test_migrate_host_2 self.driver = self._get_driver(self.configuration) rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertTrue(rc) self.assertEqual(1, log_info.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_retype_with_change_global_provision(self, log_warning): test_volume = self.cli_data.test_volume test_new_type = self.cli_data.test_new_type test_diff = self.cli_data.test_diff test_host = self.cli_data.test_migrate_host_2 self.driver = self._get_driver(self.configuration) rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertFalse(rc) self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_retype_with_change_individual_provision(self, log_warning): test_volume = self.cli_data.test_volume test_host = self.cli_data.test_migrate_host_2 test_new_type = { 'name': 'type1', 'qos_specs_id': None, 'deleted': False, 'extra_specs': { 'infortrend:provisioning': 'LV-1:thin', }, 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', } test_diff = { 'extra_specs': { 'infortrend:provisioning': ('LV-2:thin;LV-1:full', 'LV-1:thin') } } self.driver = self._get_driver(self.configuration) rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertFalse(rc) self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_retype_with_change_mixed_provision(self, log_warning): test_volume = self.cli_data.test_volume test_host = self.cli_data.test_migrate_host_2 test_new_type = { 'name': 'type1', 'qos_specs_id': None, 'deleted': False, 'extra_specs': { 'infortrend:provisioning': 'LV-1:thin', }, 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', } test_diff = { 'extra_specs': { 'infortrend:provisioning': ('full', 'LV-1:thin') } } self.driver = self._get_driver(self.configuration) rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertFalse(rc) self.assertEqual(1, log_warning.call_count) def test_retype_with_change_same_provision(self): test_volume = self.cli_data.test_volume test_host = self.cli_data.test_migrate_host_2 test_new_type = { 'name': 'type1', 'qos_specs_id': None, 'deleted': False, 'extra_specs': { 'infortrend:provisioning': 'LV-1:thin', }, 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', } test_diff = { 'extra_specs': { 'infortrend:provisioning': ('thin', 'LV-1:thin') } } self.driver = self._get_driver(self.configuration) rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertTrue(rc) def test_retype_with_change_global_tier(self): test_volume = self.cli_data.test_volume test_host = self.cli_data.test_migrate_host_2 test_new_type = { 'name': 'type1', 'qos_specs_id': None, 'deleted': False, 'extra_specs': { 'infortrend:provisioning': 'thin', 'infortrend:tiering': '2,3', }, 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', } test_diff = { 'extra_specs': { 'infortrend:tiering': ('0,1', '2,3') } } mock_commands = { 'ShowLV': self._mock_show_lv(), 'SetPartition': SUCCEED, 'SetLV': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition_detail(), } self._driver_setup(mock_commands) self.driver.tier_pools_dict = { self.cli_data.fake_lv_id[0]: [0, 1, 2, 3], } rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertTrue(rc) def test_retype_with_change_individual_tier(self): test_volume = self.cli_data.test_volume test_host = self.cli_data.test_migrate_host_2 test_new_type = { 'name': 'type1', 'qos_specs_id': None, 'deleted': False, 'extra_specs': { 'infortrend:provisioning': 'thin', 'infortrend:tiering': 'LV-1:2,3', }, 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', } test_diff = { 'extra_specs': { 'infortrend:tiering': ('LV-1:0,1', 'LV-1:2,3') } } mock_commands = { 'ShowLV': self._mock_show_lv(), 'SetPartition': SUCCEED, 'SetLV': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition_detail(), } self._driver_setup(mock_commands) self.driver.tier_pools_dict = { self.cli_data.fake_lv_id[0]: [0, 1, 2, 3], } rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertTrue(rc) def test_retype_change_tier_with_multi_settings(self): test_volume = self.cli_data.test_volume test_host = self.cli_data.test_migrate_host_2 test_new_type = { 'name': 'type1', 'qos_specs_id': None, 'deleted': False, 'extra_specs': { 'infortrend:provisioning': 'thin', 'infortrend:tiering': 'LV-2:0;LV-1:2,3', }, 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', } test_diff = { 'extra_specs': { 'infortrend:tiering': ('LV-1:0,1', 'LV-2:0;LV-1:2,3') } } mock_commands = { 'ShowLV': self._mock_show_lv(), 'SetPartition': SUCCEED, 'SetLV': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition_detail(), } self._driver_setup(mock_commands) self.driver.tier_pools_dict = { self.cli_data.fake_lv_id[0]: [0, 1, 2, 3], } rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertTrue(rc) def test_retype_change_with_tier_not_exist(self): test_volume = self.cli_data.test_volume test_host = self.cli_data.test_migrate_host_2 test_new_type = { 'name': 'type1', 'qos_specs_id': None, 'deleted': False, 'extra_specs': { 'infortrend:provisioning': 'thin', 'infortrend:tiering': 'LV-2:0;LV-1:2,3', }, 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', } test_diff = { 'extra_specs': { 'infortrend:tiering': ('LV-1:0,1', 'LV-2:0;LV-1:2,3') } } mock_commands = { 'ShowLV': self._mock_show_lv(), } self._driver_setup(mock_commands) self.driver.tier_pools_dict = { self.cli_data.fake_lv_id[0]: [0, 1, 2], } self.assertRaises( exception.VolumeDriverException, self.driver.retype, None, test_volume, test_new_type, test_diff, test_host) @mock.patch.object(common_cli.LOG, 'warning') def test_retype_change_with_not_a_tier_pool(self, log_warning): test_volume = self.cli_data.test_volume test_host = self.cli_data.test_migrate_host_2 test_new_type = { 'name': 'type1', 'qos_specs_id': None, 'deleted': False, 'extra_specs': { 'infortrend:provisioning': 'full', 'infortrend:tiering': 'LV-1:2', }, 'id': '28c8f82f-416e-148b-b1ae-2556c032d3c0', } test_diff = { 'extra_specs': { 'infortrend:tiering': ('', 'LV-1:2') } } mock_commands = { 'ShowLV': self._mock_show_lv(), } self._driver_setup(mock_commands) self.driver.tier_pools_dict = { self.cli_data.fake_lv_id[2]: [0, 1, 2], } rc = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) self.assertTrue(rc) self.assertEqual(1, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_retype_with_migrate(self): fake_pool = copy.deepcopy(self.cli_data.fake_pool) test_host = copy.deepcopy(self.cli_data.test_migrate_host) test_volume = self.cli_data.test_volume test_volume_id = test_volume['id'] test_new_type = self.cli_data.test_new_type test_diff = self.cli_data.test_diff test_src_part_id = self.cli_data.fake_partition_id[0] test_dst_part_id = self.cli_data.fake_partition_id[2] test_pair_id = self.cli_data.fake_pair_id[0] test_model_update = { 'provider_location': 'partition_id^%s@system_id^%s' % ( test_dst_part_id, int(self.cli_data.fake_system_id[0], 16) ) } mock_commands = { 'ShowSnapshot': SUCCEED, 'CreatePartition': SUCCEED, 'ShowPartition': self.cli_data.get_test_show_partition( test_volume_id, fake_pool['pool_id']), 'CreateReplica': SUCCEED, 'ShowReplica': self.cli_data.get_test_show_replica_detail_for_migrate( test_src_part_id, test_dst_part_id, test_volume_id), 'DeleteReplica': SUCCEED, 'DeleteMap': SUCCEED, 'DeletePartition': SUCCEED, } self._driver_setup(mock_commands) self.driver.system_id = 'DEEC' rc, model_update = self.driver.retype( None, test_volume, test_new_type, test_diff, test_host) min_size = int(test_volume['size'] * 1024 * 0.2) create_params = 'init=disable min=%sMB' % min_size expect_cli_cmd = [ mock.call('ShowSnapshot', 'part=%s' % test_src_part_id), mock.call( 'CreatePartition', fake_pool['pool_id'], test_volume['id'], 'size=%s' % (test_volume['size'] * 1024), create_params, ), mock.call('ShowPartition'), mock.call( 'CreateReplica', 'Cinder-Migrate', 'part', test_src_part_id, 'part', test_dst_part_id, 'type=mirror' ), mock.call('ShowReplica', '-l'), mock.call('DeleteReplica', test_pair_id, '-y'), mock.call('DeleteMap', 'part', test_src_part_id, '-y'), mock.call('DeletePartition', test_src_part_id, '-y'), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertTrue(rc) self.assertDictEqual(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'debug', mock.Mock()) @mock.patch.object(common_cli.LOG, 'info', mock.Mock()) def test_update_migrated_volume(self): src_volume = self.cli_data.test_volume dst_volume = copy.deepcopy(self.cli_data.test_dst_volume) test_dst_part_id = self.cli_data.fake_partition_id[1] dst_volume['provider_location'] = 'partition_id^%s@system_id^%s' % ( test_dst_part_id, int(self.cli_data.fake_system_id[0], 16)) test_model_update = { '_name_id': None, 'provider_location': dst_volume['provider_location'], } mock_commands = { 'SetPartition': SUCCEED, } self._driver_setup(mock_commands) model_update = self.driver.update_migrated_volume( None, src_volume, dst_volume, 'available') expect_cli_cmd = [ mock.call('SetPartition', test_dst_part_id, 'name=%s' % src_volume['id']), ] self._assert_cli_has_calls(expect_cli_cmd) self.assertDictEqual(test_model_update, model_update) @mock.patch.object(common_cli.LOG, 'debug', mock.Mock()) def test_update_migrated_volume_rename_fail(self): src_volume = self.cli_data.test_volume dst_volume = self.cli_data.test_dst_volume dst_volume['_name_id'] = 'fake_name_id' test_dst_part_id = self.cli_data.fake_partition_id[1] dst_volume['provider_location'] = 'partition_id^%s@system_id^%s' % ( test_dst_part_id, int(self.cli_data.fake_system_id[0], 16)) mock_commands = { 'SetPartition': FAKE_ERROR_RETURN } self._driver_setup(mock_commands) model_update = self.driver.update_migrated_volume( None, src_volume, dst_volume, 'available') self.assertEqual({'_name_id': 'fake_name_id'}, model_update) def test_get_extraspecs_set_with_default_setting(self): test_extraspecs = {} test_result = { 'global_provisioning': 'full', 'global_tiering': 'all', } self.driver = self._get_driver(self.configuration) result = self.driver._get_extraspecs_set(test_extraspecs) self.assertEqual(test_result, result) def test_get_extraspecs_set_with_global_settings(self): test_extraspecs = { 'infortrend:tiering': '1,2', 'infortrend:provisioning': 'thin', } test_result = { 'global_provisioning': 'thin', 'global_tiering': [1, 2], } self.driver = self._get_driver(self.configuration) result = self.driver._get_extraspecs_set(test_extraspecs) self.assertEqual(test_result, result) def test_get_extraspecs_set_with_tier_global_settings(self): test_extraspecs = { 'infortrend:tiering': '1,2', } test_result = { 'global_provisioning': 'full', 'global_tiering': [1, 2], } self.driver = self._get_driver(self.configuration) result = self.driver._get_extraspecs_set(test_extraspecs) self.assertEqual(test_result, result) def test_get_extraspecs_set_with_provision_global_settings(self): test_extraspecs = { 'infortrend:provisioning': 'thin', } test_result = { 'global_provisioning': 'thin', 'global_tiering': 'all', } self.driver = self._get_driver(self.configuration) result = self.driver._get_extraspecs_set(test_extraspecs) self.assertEqual(test_result, result) def test_get_extraspecs_set_with_individual_tier_settings(self): test_extraspecs = { 'infortrend:tiering': 'LV-0:0;LV-1:1,2', } test_result = { 'global_provisioning': 'full', 'global_tiering': 'all', 'LV-0': { 'tiering': [0], }, 'LV-1': { 'tiering': [1, 2], }, } self.driver = self._get_driver(self.configuration) self.driver.pool_dict = {'LV-0': '', 'LV-1': '', 'LV-2': ''} result = self.driver._get_extraspecs_set(test_extraspecs) self.assertEqual(test_result, result) @mock.patch.object(common_cli.LOG, 'warning') def test_get_extraspecs_set_with_lv0_not_set_in_config(self, log_warning): test_extraspecs = { 'infortrend:tiering': 'LV-0:0;LV-1:1,2', } test_result = { 'global_provisioning': 'full', 'global_tiering': 'all', 'LV-1': { 'tiering': [1, 2], }, } self.driver = self._get_driver(self.configuration) result = self.driver._get_extraspecs_set(test_extraspecs) self.assertEqual(test_result, result) self.assertEqual(1, log_warning.call_count) def test_get_extraspecs_set_with_individual_provision_settings(self): test_extraspecs = { 'infortrend:provisioning': 'LV-1:FULL; LV-2:Thin', } test_result = { 'global_provisioning': 'full', 'global_tiering': 'all', 'LV-1': { 'provisioning': 'full', }, 'LV-2': { 'provisioning': 'thin', }, } self.driver = self._get_driver(self.configuration) result = self.driver._get_extraspecs_set(test_extraspecs) self.assertEqual(test_result, result) def test_get_extraspecs_set_with_mixed_settings(self): test_extraspecs = { 'infortrend:provisioning': 'LV-1:FULL; LV-2:Thin', 'infortrend:tiering': '1,2', } test_result = { 'global_provisioning': 'full', 'global_tiering': [1, 2], 'LV-1': { 'provisioning': 'full', }, 'LV-2': { 'provisioning': 'thin', }, } self.driver = self._get_driver(self.configuration) result = self.driver._get_extraspecs_set(test_extraspecs) self.assertEqual(test_result, result) @mock.patch.object(common_cli.LOG, 'warning') def test_get_extraspecs_set_with_err_tier(self, log_warning): test_extraspecs = { 'infortrend:provisioning': 'LV-1:FULL; LV-2:Thin', 'infortrend:tiering': 'LV-1:4,3; LV-2:-1,0', } test_result = { 'global_provisioning': 'full', 'global_tiering': 'all', 'LV-1': { 'provisioning': 'full', 'tiering': 'Err:[3, 4]', }, 'LV-2': { 'provisioning': 'thin', 'tiering': 'Err:[0, -1]', }, } self.driver = self._get_driver(self.configuration) result = self.driver._get_extraspecs_set(test_extraspecs) self.assertEqual(test_result, result) self.assertEqual(2, log_warning.call_count) @mock.patch.object(common_cli.LOG, 'warning') def test_get_extraspecs_set_with_err_provision(self, log_warning): test_extraspecs = { 'infortrend:provisioning': 'LV-1:FOO; LV-2:Bar', 'infortrend:tiering': '1,2', } test_result = { 'global_provisioning': 'full', 'global_tiering': [1, 2], 'LV-1': { 'provisioning': 'Err:FOO', }, 'LV-2': { 'provisioning': 'Err:Bar', }, } self.driver = self._get_driver(self.configuration) result = self.driver._get_extraspecs_set(test_extraspecs) self.assertEqual(test_result, result) self.assertEqual(2, log_warning.call_count) def test_get_pool_extraspecs_global(self): test_extraspecs_set = { 'global_provisioning': 'full', 'global_tiering': 'all', 'LV-2': { 'provisioning': 'thin', }, } test_result = { 'provisioning': 'full', 'tiering': 'all', } self.driver = self._get_driver(self.configuration) result = self.driver._get_pool_extraspecs( 'LV-1', test_extraspecs_set) self.assertEqual(test_result, result) def test_get_pool_extraspecs_individual(self): test_extraspecs_set = { 'global_provisioning': 'full', 'global_tiering': [1, 2], 'LV-1': { 'provisioning': 'full', 'tiering': [0], }, 'LV-2': { 'provisioning': 'thin', }, } test_result = { 'provisioning': 'full', 'tiering': [0], } mock_commands = { 'ShowLV': self._mock_show_lv(), } self._driver_setup(mock_commands) result = self.driver._get_pool_extraspecs( 'LV-1', test_extraspecs_set) self.assertEqual(test_result, result) def test_get_pool_extraspecs_mixed(self): test_extraspecs_set = { 'global_provisioning': 'full', 'global_tiering': [1, 2], 'LV-1': { 'provisioning': 'full', }, 'LV-2': { 'provisioning': 'thin', }, } test_result = { 'provisioning': 'thin', 'tiering': [1, 2], } mock_commands = { 'ShowLV': self._mock_show_lv(), } self._driver_setup(mock_commands) result = self.driver._get_pool_extraspecs( 'LV-2', test_extraspecs_set) self.assertEqual(test_result, result) def test_get_pool_extraspecs_conflict(self): test_extraspecs_set = { 'global_provisioning': 'full', 'global_tiering': [1, 2], 'LV-1': { 'provisioning': 'full', }, 'LV-2': { 'provisioning': 'thin', }, } mock_commands = { 'ShowLV': self._mock_show_lv(), } self._driver_setup(mock_commands) self.assertRaises( exception.VolumeDriverException, self.driver._get_pool_extraspecs, 'LV-1', test_extraspecs_set) def test_get_manageable_volumes(self): fake_cinder_volumes = self.cli_data.fake_cinder_volumes mock_commands = { 'ShowPartition': self.cli_data.get_test_show_partition_detail( volume_id='hello-there', pool_id=self.cli_data.fake_lv_id[2]) } ans = [{ 'reference': { 'source-name': self.cli_data.fake_volume_id[0], 'source-id': self.cli_data.fake_partition_id[0], 'pool-name': 'LV-1' }, 'size': 20, 'safe_to_manage': False, 'reason_not_safe': 'Volume In-use', 'cinder_id': None, 'extra_info': None }, { 'reference': { 'source-name': self.cli_data.fake_volume_id[1], 'source-id': self.cli_data.fake_partition_id[1], 'pool-name': 'LV-1' }, 'size': 20, 'safe_to_manage': False, 'reason_not_safe': 'Already Managed', 'cinder_id': self.cli_data.fake_volume_id[1], 'extra_info': None }, { 'reference': { 'source-name': 'hello-there', 'source-id': '6bb119a8-d25b-45a7-8d1b-88e127885666', 'pool-name': 'LV-1' }, 'size': 20, 'safe_to_manage': True, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': None }] self._driver_setup(mock_commands) result = self.driver.get_manageable_volumes(fake_cinder_volumes, None, 1000, 0, ['reference'], ['desc']) ans = volume_utils.paginate_entries_list(ans, None, 1000, 0, ['reference'], ['desc']) self.assertEqual(ans, result) def test_get_manageable_snapshots(self): fake_cinder_snapshots = self.cli_data.fake_cinder_snapshots mock_commands = { 'ShowSnapshot': self.cli_data.get_test_show_snapshot_get_manage(), 'ShowPartition': self.cli_data.get_test_show_partition_detail( volume_id='hello-there', pool_id=self.cli_data.fake_lv_id[2]) } self._driver_setup(mock_commands) ans = [{ 'reference': { 'source-id': self.cli_data.fake_snapshot_id[0], 'source-name': self.cli_data.fake_snapshot_name[0], }, 'size': 20, 'safe_to_manage': False, 'reason_not_safe': 'Volume In-use', 'cinder_id': None, 'extra_info': None, 'source_reference': { 'volume-id': self.cli_data.fake_volume_id[0] } }, { 'reference': { 'source-id': self.cli_data.fake_snapshot_id[1], 'source-name': self.cli_data.fake_snapshot_name[1], }, 'size': 20, 'safe_to_manage': False, 'reason_not_safe': 'Already Managed', 'cinder_id': self.cli_data.fake_snapshot_name[1], 'extra_info': None, 'source_reference': { 'volume-id': self.cli_data.fake_volume_id[1] } }, { 'reference': { 'source-id': self.cli_data.fake_snapshot_id[2], 'source-name': self.cli_data.fake_snapshot_name[2], }, 'size': 20, 'safe_to_manage': True, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': None, 'source_reference': { 'volume-id': 'hello-there' } }] result = self.driver.get_manageable_snapshots(fake_cinder_snapshots, None, 1000, 0, ['reference'], ['desc']) ans = volume_utils.paginate_entries_list(ans, None, 1000, 0, ['reference'], ['desc']) self.assertEqual(ans, result) def test_manage_existing_snapshot(self): fake_snapshot = self.cli_data.fake_cinder_snapshots[0] fake_ref_from_id = { 'source-id': self.cli_data.fake_snapshot_id[1] } fake_ref_from_name = { 'source-name': self.cli_data.fake_snapshot_name[1] } mock_commands = { 'ShowSnapshot': self.cli_data.get_test_show_snapshot_named(), 'SetSnapshot': (0, None) } ans = {'provider_location': self.cli_data.fake_snapshot_id[1]} self._driver_setup(mock_commands) result_from_id = self.driver.manage_existing_snapshot( fake_snapshot, fake_ref_from_id) result_from_name = self.driver.manage_existing_snapshot( fake_snapshot, fake_ref_from_name) self.assertEqual(ans, result_from_id) self.assertEqual(ans, result_from_name) @mock.patch.object(common_cli.LOG, 'warning') def test_get_snapshot_ref_data_err_and_warning(self, mock_warning): fake_snapshot = self.cli_data.fake_cinder_snapshots[0] fake_ref_err1 = { 'invalid-key': 'invalid-content' } fake_ref_err2 = { 'source-id': 'invalid-content' } fake_ref_err_and_warning = { 'source-name': '---' } mock_commands = { 'ShowSnapshot': self.cli_data.get_test_show_snapshot_named() } self._driver_setup(mock_commands) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, fake_snapshot, fake_ref_err1) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, fake_snapshot, fake_ref_err2) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, fake_snapshot, fake_ref_err_and_warning) self.assertEqual(1, mock_warning.call_count) def test_manage_existing_snapshot_get_size(self): fake_snapshot = self.cli_data.fake_cinder_snapshots[0] fake_ref = { 'source-id': self.cli_data.fake_snapshot_id[1] } mock_commands = { 'ShowSnapshot': self.cli_data.get_test_show_snapshot_named(), 'ShowPartition': self.cli_data.get_test_show_partition() } self._driver_setup(mock_commands) result = self.driver.manage_existing_snapshot_get_size(fake_snapshot, fake_ref) self.assertEqual(20, result) def test_unmanage_snapshot(self): fake_snapshot = self.cli_data.Fake_cinder_snapshot( self.cli_data.fake_snapshot_name[1], self.cli_data.fake_snapshot_id[1] ) mock_commands = { 'SetSnapshot': (0, None), } expect_cli_cmd = [ mock.call( 'SetSnapshot', self.cli_data.fake_snapshot_id[1], 'name=cinder-unmanaged-%s' % self.cli_data.fake_snapshot_name[1][:-17] ) ] self._driver_setup(mock_commands) self.driver.unmanage_snapshot(fake_snapshot) self._assert_cli_has_calls(expect_cli_cmd) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2831202 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/0000775000175000017500000000000000000000000022761 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/__init__.py0000664000175000017500000000000000000000000025060 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2831202 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/as13000/0000775000175000017500000000000000000000000023750 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/as13000/__init__.py0000664000175000017500000000000000000000000026047 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/as13000/test_as13000_driver.py0000664000175000017500000016373400000000000027741 0ustar00zuulzuul00000000000000# Copyright 2018 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver test for Inspur AS13000.""" import json import random import time from unittest import mock import ddt import eventlet import requests from cinder import context from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration from cinder.volume.drivers.inspur.as13000 import as13000_driver from cinder.volume import volume_utils test_config = configuration.Configuration(None) test_config.san_ip = 'some_ip' test_config.san_api_port = 'as13000_api_port' test_config.san_login = 'username' test_config.san_password = 'password' test_config.as13000_ipsan_pools = ['fakepool'] test_config.as13000_meta_pool = 'meta_pool' test_config.use_chap_auth = True test_config.chap_username = 'fakeuser' test_config.chap_password = 'fakepass' class FakeResponse(object): def __init__(self, status, output): self.status_code = status self.text = 'return message' self._json = output def json(self): return self._json def close(self): pass @ddt.ddt class RestAPIExecutorTestCase(test.TestCase): def setUp(self): self.rest_api = as13000_driver.RestAPIExecutor( test_config.san_ip, test_config.san_api_port, test_config.san_login, test_config.san_password) super(RestAPIExecutorTestCase, self).setUp() def test_login(self): mock__login = self.mock_object(self.rest_api, '_login', mock.Mock(return_value='fake_token')) self.rest_api.login() mock__login.assert_called_once() self.assertEqual('fake_token', self.rest_api._token) def test__login(self): response = {'token': 'fake_token', 'expireTime': '7200', 'type': 0} mock_sra = self.mock_object(self.rest_api, 'send_rest_api', mock.Mock(return_value=response)) result = self.rest_api._login() self.assertEqual('fake_token', result) login_params = {'name': test_config.san_login, 'password': test_config.san_password} mock_sra.assert_called_once_with(method='security/token', params=login_params, request_type='post') def test_send_rest_api(self): expected = {'value': 'abc'} mock_sa = self.mock_object(self.rest_api, 'send_api', mock.Mock(return_value=expected)) result = self.rest_api.send_rest_api( method='fake_method', params='fake_params', request_type='fake_type') self.assertEqual(expected, result) mock_sa.assert_called_once_with( 'fake_method', 'fake_params', 'fake_type') def test_send_rest_api_retry(self): expected = {'value': 'abc'} mock_sa = self.mock_object( self.rest_api, 'send_api', mock.Mock(side_effect=(exception.VolumeDriverException, expected))) mock_login = self.mock_object(self.rest_api, 'login', mock.Mock()) result = self.rest_api.send_rest_api( method='fake_method', params='fake_params', request_type='fake_type' ) self.assertEqual(expected, result) mock_sa.assert_called_with( 'fake_method', 'fake_params', 'fake_type') mock_login.assert_called_once() def test_send_rest_api_3times_fail(self): mock_sa = self.mock_object( self.rest_api, 'send_api', mock.Mock( side_effect=(exception.VolumeDriverException))) mock_login = self.mock_object(self.rest_api, 'login', mock.Mock()) self.assertRaises( exception.VolumeDriverException, self.rest_api.send_rest_api, method='fake_method', params='fake_params', request_type='fake_type') mock_sa.assert_called_with('fake_method', 'fake_params', 'fake_type') mock_login.assert_called() def test_send_rest_api_backend_error_fail(self): side_effect = exception.VolumeBackendAPIException('fake_err_msg') mock_sa = self.mock_object(self.rest_api, 'send_api', mock.Mock(side_effect=side_effect)) mock_login = self.mock_object(self.rest_api, 'login') self.assertRaises(exception.VolumeBackendAPIException, self.rest_api.send_rest_api, method='fake_method', params='fake_params', request_type='fake_type') mock_sa.assert_called_with('fake_method', 'fake_params', 'fake_type') mock_login.assert_not_called() @ddt.data( {'method': 'fake_method', 'request_type': 'post', 'params': {'fake_param': 'fake_value'}}, {'method': 'fake_method', 'request_type': 'get', 'params': {'fake_param': 'fake_value'}}, {'method': 'fake_method', 'request_type': 'delete', 'params': {'fake_param': 'fake_value'}}, {'method': 'fake_method', 'request_type': 'put', 'params': {'fake_param': 'fake_value'}}, ) @ddt.unpack def test_send_api(self, method, params, request_type): self.rest_api._token = 'fake_token' if request_type in ('post', 'delete', 'put'): fake_output = {'code': 0, 'message': 'success'} elif request_type == 'get': fake_output = {'code': 0, 'data': 'fake_date'} mock_request = self.mock_object( requests, request_type, mock.Mock( return_value=FakeResponse( 200, fake_output))) self.rest_api.send_api( method, params=params, request_type=request_type) mock_request.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.san_ip, test_config.san_api_port, method), data=json.dumps(params), headers={'X-Auth-Token': 'fake_token'}) @ddt.data({'method': r'security/token', 'params': {'name': test_config.san_login, 'password': test_config.san_password}, 'request_type': 'post'}, {'method': r'security/token', 'params': None, 'request_type': 'delete'}) @ddt.unpack def test_send_api_access_success(self, method, params, request_type): if request_type == 'post': fake_value = {'code': 0, 'data': { 'token': 'fake_token', 'expireTime': '7200', 'type': 0}} mock_requests = self.mock_object( requests, 'post', mock.Mock( return_value=FakeResponse( 200, fake_value))) result = self.rest_api.send_api(method, params, request_type) self.assertEqual(fake_value['data'], result) mock_requests.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.san_ip, test_config.san_api_port, method), data=json.dumps(params), headers=None) if request_type == 'delete': fake_value = {'code': 0, 'message': 'Success!'} self.rest_api._token = 'fake_token' mock_requests = self.mock_object( requests, 'delete', mock.Mock( return_value=FakeResponse( 200, fake_value))) self.rest_api.send_api(method, params, request_type) mock_requests.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.san_ip, test_config.san_api_port, method), data=None, headers={'X-Auth-Token': 'fake_token'}) def test_send_api_wrong_access_fail(self): req_params = {'method': r'security/token', 'params': {'name': test_config.san_login, 'password': 'fake_password'}, 'request_type': 'post'} fake_value = {'message': ' User name or password error.', 'code': 400} mock_request = self.mock_object( requests, 'post', mock.Mock( return_value=FakeResponse( 200, fake_value))) self.assertRaises( exception.VolumeBackendAPIException, self.rest_api.send_api, method=req_params['method'], params=req_params['params'], request_type=req_params['request_type']) mock_request.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.san_ip, test_config.san_api_port, req_params['method']), data=json.dumps( req_params['params']), headers=None) def test_send_api_token_overtime_fail(self): self.rest_api._token = 'fake_token' fake_value = {'method': 'fake_url', 'params': 'fake_params', 'reuest_type': 'post'} fake_out_put = {'message': 'Unauthorized access!', 'code': 301} mock_requests = self.mock_object( requests, 'post', mock.Mock( return_value=FakeResponse( 200, fake_out_put))) self.assertRaises(exception.VolumeDriverException, self.rest_api.send_api, method='fake_url', params='fake_params', request_type='post') mock_requests.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.san_ip, test_config.san_api_port, fake_value['method']), data=json.dumps('fake_params'), headers={ 'X-Auth-Token': 'fake_token'}) def test_send_api_fail(self): self.rest_api._token = 'fake_token' fake_output = {'code': 999, 'message': 'fake_message'} mock_request = self.mock_object( requests, 'post', mock.Mock( return_value=FakeResponse( 200, fake_output))) self.assertRaises( exception.VolumeBackendAPIException, self.rest_api.send_api, method='fake_method', params='fake_params', request_type='post') mock_request.assert_called_once_with( 'http://%s:%s/rest/%s' % (test_config.san_ip, test_config.san_api_port, 'fake_method'), data=json.dumps('fake_params'), headers={'X-Auth-Token': 'fake_token'} ) @ddt.ddt class AS13000DriverTestCase(test.TestCase): def __init__(self, *args, **kwds): super(AS13000DriverTestCase, self).__init__(*args, **kwds) self._ctxt = context.get_admin_context() self.configuration = test_config def setUp(self): self.rest_api = as13000_driver.RestAPIExecutor( test_config.san_ip, test_config.san_api_port, test_config.san_login, test_config.san_password) self.as13000_san = as13000_driver.AS13000Driver( configuration=self.configuration) super(AS13000DriverTestCase, self).setUp() @ddt.data(None, 'pool1') def test_do_setup(self, meta_pool): mock_login = self.mock_object(as13000_driver.RestAPIExecutor, 'login', mock.Mock()) fake_nodes = [{'healthStatus': 1, 'ip': 'fakeip1'}, {'healthStatus': 1, 'ip': 'fakeip2'}, {'healthStatus': 1, 'ip': 'fakeip3'}] mock_gcs = self.mock_object(self.as13000_san, '_get_cluster_status', mock.Mock(return_value=fake_nodes)) fake_pools = { 'pool1': {'name': 'pool1', 'type': '1'}, 'pool2': {'name': 'pool2', 'type': 2} } mock_gpi = self.mock_object(self.as13000_san, '_get_pools_info', mock.Mock(return_value=fake_pools)) mock_cp = self.mock_object(self.as13000_san, '_check_pools', mock.Mock()) mock_cmp = self.mock_object(self.as13000_san, '_check_meta_pool', mock.Mock()) self.as13000_san.meta_pool = meta_pool self.as13000_san.pools = ['pool1', 'pool2'] self.as13000_san.do_setup(self._ctxt) mock_login.assert_called_once() mock_gcs.assert_called_once() if meta_pool: mock_gpi.assert_called_with(['pool1', 'pool2', 'pool1']) else: mock_gpi.assert_called_with(['pool1', 'pool2']) self.assertEqual('pool1', self.as13000_san.meta_pool) mock_cp.assert_called_once() mock_cmp.assert_called_once() def test_check_for_setup_error(self): mock_sg = self.mock_object(configuration.Configuration, 'safe_get', mock.Mock(return_value='fake_config')) self.as13000_san.nodes = [{'fakenode': 'fake_name'}] self.as13000_san.check_for_setup_error() mock_sg.assert_called() def test_check_for_setup_error_no_healthy_node_fail(self): mock_sg = self.mock_object(configuration.Configuration, 'safe_get', mock.Mock(return_value='fake_config')) self.as13000_san.nodes = [] self.assertRaises(exception.VolumeDriverException, self.as13000_san.check_for_setup_error) mock_sg.assert_called() def test_check_for_setup_error_no_config_fail(self): mock_sg = self.mock_object(configuration.Configuration, 'safe_get', mock.Mock(return_value=None)) self.as13000_san.nodes = [] self.assertRaises(exception.InvalidConfigurationValue, self.as13000_san.check_for_setup_error) mock_sg.assert_called() def test__check_pools(self): fake_pools_info = { 'pool1': {'name': 'pool1', 'type': '1'}, 'pool2': {'name': 'pool2', 'type': 1} } self.as13000_san.pools = ['pool1'] self.as13000_san.pools_info = fake_pools_info self.as13000_san._check_pools() def test__check_pools_fail(self): fake_pools_info = { 'pool1': {'name': 'pool1', 'type': '1'}, 'pool2': {'name': 'pool2', 'type': 1} } self.as13000_san.pools = ['pool0, pool1'] self.as13000_san.pools_info = fake_pools_info self.assertRaises(exception.InvalidInput, self.as13000_san._check_pools) def test__check_meta_pool(self): fake_pools_info = { 'pool1': {'name': 'pool1', 'type': 2}, 'pool2': {'name': 'pool2', 'type': 1} } self.as13000_san.meta_pool = 'pool2' self.as13000_san.pools_info = fake_pools_info self.as13000_san._check_meta_pool() @ddt.data(None, 'pool0', 'pool1') def test__check_meta_pool_failed(self, meta_pool): fake_pools_info = { 'pool1': {'name': 'pool1', 'type': 2}, 'pool2': {'name': 'pool2', 'type': 1} } self.as13000_san.meta_pool = meta_pool self.as13000_san.pools_info = fake_pools_info self.assertRaises(exception.InvalidInput, self.as13000_san._check_meta_pool) @mock.patch.object(as13000_driver.RestAPIExecutor, 'send_rest_api') def test_create_volume(self, mock_rest): volume = fake_volume.fake_volume_obj(self._ctxt, host='H@B#P') self.as13000_san.pools_info = {'P': {'name': 'P', 'type': 1}} self.as13000_san.meta_pool = 'meta_pool' self.as13000_san.create_volume(volume) mock_rest.assert_called_once_with( method='block/lvm', params={ "name": volume.name.replace('-', '_'), "capacity": volume.size * 1024, "dataPool": 'P', "dataPoolType": 1, "metaPool": 'meta_pool' }, request_type='post') @ddt.data(1, 2) def test_create_volume_from_snapshot(self, size): volume = fake_volume.fake_volume_obj(self._ctxt, size=size) volume2 = fake_volume.fake_volume_obj(self._ctxt) snapshot = fake_snapshot.fake_snapshot_obj(self._ctxt, volume=volume2) mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) _tnd_mock = mock.Mock(side_effect=('source_volume', 'dest_volume', 'snapshot')) mock_tnd = self.mock_object(self.as13000_san, '_trans_name_down', _tnd_mock) mock_lock_op = self.mock_object(self.as13000_san, '_snapshot_lock_op', mock.Mock()) mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) mock_fv = self.mock_object(self.as13000_san, '_filling_volume', mock.Mock()) mock_wvf = self.mock_object(self.as13000_san, '_wait_volume_filled', mock.Mock()) mock_ev = self.mock_object(self.as13000_san, 'extend_volume', mock.Mock()) self.as13000_san.create_volume_from_snapshot(volume, snapshot) lock_op_calls = [ mock.call('lock', 'source_volume', 'snapshot', 'fake_pool'), mock.call('unlock', 'source_volume', 'snapshot', 'fake_pool') ] mock_lock_op.assert_has_calls(lock_op_calls) mock_fv.assert_called_once_with('dest_volume', 'fake_pool') mock_wvf.assert_called_once_with('dest_volume', 'fake_pool') mock_eh.assert_called() mock_tnd.assert_called() params = { 'originalLvm': 'source_volume', 'originalPool': 'fake_pool', 'originalSnap': 'snapshot', 'name': 'dest_volume', 'pool': 'fake_pool'} mock_rest.assert_called_once_with(method='snapshot/volume/cloneLvm', params=params, request_type='post') if size == 2: mock_ev.assert_called_once_with(volume, size) def test_create_volume_from_snapshot_fail(self): volume = fake_volume.fake_volume_obj(self._ctxt) snapshot = fake_snapshot.fake_snapshot_obj(self._ctxt, volume_size=10) self.assertRaises( exception.InvalidInput, self.as13000_san.create_volume_from_snapshot, volume, snapshot) @ddt.data(1, 2) def test_create_cloned_volume(self, size): volume = fake_volume.fake_volume_obj(self._ctxt, size=size) volume_src = fake_volume.fake_volume_obj(self._ctxt) mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) mock_tnd = self.mock_object( self.as13000_san, '_trans_name_down', mock.Mock( side_effect=('fake_name1', 'fake_name2'))) mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) mock_ev = self.mock_object(self.as13000_san, 'extend_volume', mock.Mock()) self.as13000_san.create_cloned_volume(volume, volume_src) mock_eh.assert_called() mock_tnd.assert_called() method = 'block/lvm/clone' params = { 'srcVolumeName': 'fake_name2', 'srcPoolName': 'fake_pool', 'destVolumeName': 'fake_name1', 'destPoolName': 'fake_pool'} request_type = 'post' mock_rest.assert_called_once_with( method=method, params=params, request_type=request_type) if size == 2: mock_ev.assert_called_once_with(volume, size) def test_create_clone_volume_fail(self): volume = fake_volume.fake_volume_obj(self._ctxt) volume_source = fake_volume.fake_volume_obj(self._ctxt, size=2) self.assertRaises( exception.InvalidInput, self.as13000_san.create_cloned_volume, volume, volume_source) def test_extend_volume(self): volume = fake_volume.fake_volume_obj(self._ctxt) mock_tnd = self.mock_object( self.as13000_san, '_trans_name_down', mock.Mock( return_value='fake_name')) mock_cv = self.mock_object(self.as13000_san, '_check_volume', mock.Mock(return_value=True)) mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) self.as13000_san.extend_volume(volume, 10) mock_tnd.assert_called_once_with(volume.name) mock_cv.assert_called_once_with(volume) mock_eh.assert_called_once_with(volume.host, level='pool') method = 'block/lvm' request_type = 'put' params = {'pool': 'fake_pool', 'name': 'fake_name', 'newCapacity': 10240} mock_rest.assert_called_once_with(method=method, request_type=request_type, params=params) def test_extend_volume_fail(self): volume = fake_volume.fake_volume_obj(self._ctxt) mock_tnd = self.mock_object( self.as13000_san, '_trans_name_down', mock.Mock( return_value='fake_name')) mock_cv = self.mock_object(self.as13000_san, '_check_volume', mock.Mock(return_value=False)) self.assertRaises(exception.VolumeDriverException, self.as13000_san.extend_volume, volume, 10) mock_tnd.assert_called_once_with(volume.name) mock_cv.assert_called_once_with(volume) @ddt.data(True, False) def test_delete_volume(self, volume_exist): volume = fake_volume.fake_volume_obj(self._ctxt) mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) mock_tnd = self.mock_object( self.as13000_san, '_trans_name_down', mock.Mock( return_value='fake_name')) mock_cv = self.mock_object(self.as13000_san, '_check_volume', mock.Mock(return_value=volume_exist)) mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) self.as13000_san.delete_volume(volume) mock_tnd.assert_called_once_with(volume.name) mock_cv.assert_called_once_with(volume) if volume_exist: mock_eh.assert_called_once_with(volume.host, level='pool') method = 'block/lvm?pool=%s&lvm=%s' % ('fake_pool', 'fake_name') request_type = 'delete' mock_rest.assert_called_once_with(method=method, request_type=request_type) def test_create_snapshot(self): volume = fake_volume.fake_volume_obj(self._ctxt) snapshot = fake_snapshot.fake_snapshot_obj(self._ctxt, volume=volume) mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) mock_cv = self.mock_object(self.as13000_san, '_check_volume', mock.Mock(return_value=True)) mock_tnd = self.mock_object( self.as13000_san, '_trans_name_down', mock.Mock( side_effect=('fake_name', 'fake_snap'))) mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) self.as13000_san.create_snapshot(snapshot) mock_eh.assert_called_once_with(volume.host, level='pool') mock_tnd.assert_called() mock_cv.assert_called_once_with(snapshot.volume) method = 'snapshot/volume' params = {'snapName': 'fake_snap', 'volumeName': 'fake_name', 'poolName': 'fake_pool', 'snapType': 'r'} request_type = 'post' mock_rest.assert_called_once_with(method=method, params=params, request_type=request_type) def test_create_snapshot_fail(self): volume = fake_volume.fake_volume_obj(self._ctxt) snapshot = fake_snapshot.fake_snapshot_obj(self._ctxt, volume=volume) mock_cv = self.mock_object(self.as13000_san, '_check_volume', mock.Mock(return_value=False)) self.assertRaises(exception.VolumeDriverException, self.as13000_san.create_snapshot, snapshot) mock_cv.assert_called_once_with(snapshot.volume) def test_delete_snapshot(self): volume = fake_volume.fake_volume_obj(self._ctxt) snapshot = fake_snapshot.fake_snapshot_obj(self._ctxt, volume=volume) mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) mock_cv = self.mock_object(self.as13000_san, '_check_volume', mock.Mock(return_value=True)) mock_tnd = self.mock_object( self.as13000_san, '_trans_name_down', mock.Mock( side_effect=('fake_name', 'fake_snap'))) mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) self.as13000_san.delete_snapshot(snapshot) mock_eh.assert_called_once_with(volume.host, level='pool') mock_tnd.assert_called() mock_cv.assert_called_once_with(snapshot.volume) method = ('snapshot/volume?snapName=%s&volumeName=%s&poolName=%s' % ('fake_snap', 'fake_name', 'fake_pool')) request_type = 'delete' mock_rest.assert_called_once_with(method=method, request_type=request_type) def test_delete_snapshot_fail(self): volume = fake_volume.fake_volume_obj(self._ctxt) snapshot = fake_snapshot.fake_snapshot_obj(self._ctxt, volume=volume) mock_cv = self.mock_object(self.as13000_san, '_check_volume', mock.Mock(return_value=False)) self.assertRaises(exception.VolumeDriverException, self.as13000_san.delete_snapshot, snapshot) mock_cv.assert_called_once_with(snapshot.volume) @mock.patch('time.time') @ddt.data(2000, 1000) def test__update_volume_stats(self, time_token, mock_time): mock_time.return_value = 5000 self.as13000_san.VENDOR = 'INSPUR' self.as13000_san.VERSION = 'V1.3.1' self.as13000_san.PROTOCOL = 'iSCSI' mock_sg = self.mock_object(configuration.Configuration, 'safe_get', mock.Mock(return_value='fake_backend_name')) fake_pool_backend = [{'pool_name': 'fake_pool'}, {'pool_name': 'fake_pool1'}] self.as13000_san.pools = ['fake_pool'] mock_gps = self.mock_object(self.as13000_san, '_get_pools_stats', mock.Mock(return_value=fake_pool_backend)) self.as13000_san._stats = None self.as13000_san._token_time = time_token self.as13000_san.token_available_time = 3600 mock_login = self.mock_object(as13000_driver.RestAPIExecutor, 'login') self.as13000_san._update_volume_stats() backend_data = {'driver_version': 'V1.3.1', 'pools': fake_pool_backend, 'storage_protocol': 'iSCSI', 'vendor_name': 'INSPUR', 'volume_backend_name': 'fake_backend_name'} self.assertEqual(backend_data, self.as13000_san._stats) mock_sg.assert_called_once_with('volume_backend_name') mock_gps.assert_called_once() if (time.time() - time_token) > 3600: mock_login.assert_called_once() else: mock_login.assert_not_called() @ddt.data((4, u'127.0.0.1', '3260'), (6, u'FF01::101', '3260')) @ddt.unpack def test__build_target_portal(self, version, ip, port): portal = self.as13000_san._build_target_portal(ip, port) if version == 4: self.assertEqual(portal, '127.0.0.1:3260') else: self.assertEqual(portal, '[FF01::101]:3260') @ddt.data((True, True, True), (True, True, False), (False, True, True), (False, True, False), (False, False, True), (False, False, False), (True, False, True), (True, False, False)) @ddt.unpack def test_initialize_connection(self, host_exist, multipath, chap_enabled): volume = fake_volume.fake_volume_obj(self._ctxt) connector = {'multipath': multipath, 'ip': 'fake_ip', 'host': 'fake_host'} self.as13000_san.configuration.use_chap_auth = chap_enabled fakenode = [{'name': 'fake_name1', 'ip': 'node_ip1'}, {'name': 'fake_name2', 'ip': 'node_ip2'}, {'name': 'fake_name3', 'ip': 'node_ip3'}] self.as13000_san.nodes = fakenode if multipath: mock_gtfc = self.mock_object( self.as13000_san, '_get_target_from_conn', mock.Mock(return_value=(host_exist, 'target_name', ['fake_name1', 'fake_name2']))) else: mock_gtfc = self.mock_object( self.as13000_san, '_get_target_from_conn', mock.Mock(return_value=(host_exist, 'target_name', ['fake_name1']))) mock_altt = self.mock_object(self.as13000_san, '_add_lun_to_target', mock.Mock()) mock_ct = self.mock_object(self.as13000_san, '_create_target', mock.Mock()) mock_ahtt = self.mock_object(self.as13000_san, '_add_host_to_target', mock.Mock()) mock_actt = self.mock_object(self.as13000_san, '_add_chap_to_target', mock.Mock()) mock_gli = self.mock_object(self.as13000_san, '_get_lun_id', mock.Mock(return_value='1')) mock_rr = self.mock_object(random, 'randint', mock.Mock(return_value='12345678')) mock_btp = self.mock_object(self.as13000_san, '_build_target_portal', mock.Mock(side_effect=['node_ip1:3260', 'node_ip2:3260', 'node_ip3:3260'])) connect_info = self.as13000_san.initialize_connection( volume, connector) expect_conn_data = { 'target_discovered': True, 'volume_id': volume.id, } if host_exist: if multipath: expect_conn_data.update({ 'target_portals': ['node_ip1:3260', 'node_ip2:3260'], 'target_luns': [1] * 2, 'target_iqns': ['target_name'] * 2 }) else: expect_conn_data.update({ 'target_portal': 'node_ip1:3260', 'target_lun': 1, 'target_iqn': 'target_name' }) else: target_name = 'target.inspur.fake_host-12345678' if multipath: expect_conn_data.update({ 'target_portals': ['node_ip1:3260', 'node_ip2:3260', 'node_ip3:3260'], 'target_luns': [1] * 3, 'target_iqns': [target_name] * 3 }) else: expect_conn_data.update({ 'target_portal': 'node_ip1:3260', 'target_lun': 1, 'target_iqn': target_name }) if chap_enabled: expect_conn_data['auth_method'] = 'CHAP' expect_conn_data['auth_username'] = 'fakeuser' expect_conn_data['auth_password'] = 'fakepass' expect_datas = { 'driver_volume_type': 'iscsi', 'data': expect_conn_data } self.assertEqual(expect_datas, connect_info) mock_gtfc.assert_called_once_with('fake_ip') mock_altt.assert_called_once() if not host_exist: mock_ct.assert_called_once() mock_ahtt.assert_called_once() mock_rr.assert_called_once() if chap_enabled: mock_actt.assert_called_once() mock_gli.assert_called_once() mock_btp.assert_called() @ddt.data(True, False) def test_terminate_connection(self, delete_target): volume = fake_volume.fake_volume_obj(self._ctxt, host='fakehost') connector = {'multipath': False, 'ip': 'fake_ip', 'host': 'fake_host'} mock_tnd = self.mock_object(self.as13000_san, '_trans_name_down', mock.Mock(return_value='fake_volume')) fake_target_list = [{'hostIp': ['fake_ip'], 'name': 'target_name', 'lun': [ {'lvm': 'fake_volume', 'lunID': 'fake_id'}]}] mock_gtl = self.mock_object(self.as13000_san, '_get_target_list', mock.Mock(return_value=fake_target_list)) mock_dlft = self.mock_object(self.as13000_san, '_delete_lun_from_target', mock.Mock()) if delete_target: mock_gll = self.mock_object(self.as13000_san, '_get_lun_list', mock.Mock(return_value=[])) else: mock_gll = self.mock_object(self.as13000_san, '_get_lun_list', mock.Mock(return_value=[1, 2])) mock_dt = self.mock_object(self.as13000_san, '_delete_target', mock.Mock()) self.as13000_san.terminate_connection(volume, connector) mock_tnd.assert_called_once_with(volume.name) mock_gtl.assert_called_once() mock_dlft.assert_called_once_with(lun_id='fake_id', target_name='target_name') mock_gll.assert_called_once_with('target_name') if delete_target: mock_dt.assert_called_once_with('target_name') else: mock_dt.assert_not_called() @ddt.data(True, False) def test_terminate_connection_force(self, delete_target): volume = fake_volume.fake_volume_obj(self._ctxt, host='fakehost') connector = {} mock_tnd = self.mock_object(self.as13000_san, '_trans_name_down', mock.Mock(return_value='fake_volume')) fake_target_list = [{'hostIp': ['fake_hostIp'], 'name': 'target_name', 'lun': [{'lvm': 'fake_volume', 'lunID': 'fake_id'}]}] mock_gtl = self.mock_object(self.as13000_san, '_get_target_list', mock.Mock(return_value=fake_target_list)) mock_dlft = self.mock_object(self.as13000_san, '_delete_lun_from_target', mock.Mock()) if delete_target: mock_gll = self.mock_object(self.as13000_san, '_get_lun_list', mock.Mock(return_value=[])) else: mock_gll = self.mock_object(self.as13000_san, '_get_lun_list', mock.Mock(return_value=[1, 2])) mock_dt = self.mock_object(self.as13000_san, '_delete_target', mock.Mock()) self.as13000_san.terminate_connection(volume, connector) mock_tnd.assert_called_once_with(volume.name) mock_gtl.assert_called_once() mock_dlft.assert_called_once_with(lun_id='fake_id', target_name='target_name') mock_gll.assert_called_once_with('target_name') if delete_target: mock_dt.assert_called_once_with('target_name') else: mock_dt.assert_not_called() @mock.patch.object(as13000_driver.RestAPIExecutor, 'send_rest_api') def test__get_pools_info(self, mock_rest): fake_pools_data = [{'name': 'pool1', 'type': 1}, {'name': 'pool2', 'type': 2}] mock_rest.return_value = fake_pools_data # get a partial of pools result_pools_info = self.as13000_san._get_pools_info(['pool1']) self.assertEqual(result_pools_info, {'pool1': {'name': 'pool1', 'type': 1}}) # get both exist pools result_pools_info = self.as13000_san._get_pools_info(['pool1', 'pool2']) self.assertEqual(result_pools_info, {'pool1': {'name': 'pool1', 'type': 1}, 'pool2': {'name': 'pool2', 'type': 2}}) # get pools not exist result_pools_info = self.as13000_san._get_pools_info(['pool1', 'pool2', 'pool3']) self.assertEqual(result_pools_info, {'pool1': {'name': 'pool1', 'type': 1}, 'pool2': {'name': 'pool2', 'type': 2}}) def test__get_pools_stats(self): pool_date = [{'ID': 'fake_id', 'name': 'fake_name', 'totalCapacity': '2t', 'usedCapacity': '300g'}] self.as13000_san.pools = ['fake_name'] mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=pool_date)) mock_uc = self.mock_object(self.as13000_san, '_unit_convert', mock.Mock(side_effect=(2000, 300))) pool_info = { 'pool_name': 'fake_name', 'total_capacity_gb': 2000, 'free_capacity_gb': 1700, 'thin_provisioning_support': True, 'thick_provisioning_support': False, } result_pools = self.as13000_san._get_pools_stats() expect_pools = [pool_info] self.assertEqual(expect_pools, result_pools) mock_rest.assert_called_once_with(method='block/pool?type=2', request_type='get') mock_uc.assert_called() @ddt.data('fake_ip3', 'fake_ip5') def test__get_target_from_conn(self, host_ip): target_list = [ { 'hostIp': ['fake_ip1', 'fake_ip2'], 'name': 'fake_target_1', 'node': ['fake_node1', 'fake_node2'] }, { 'hostIp': ['fake_ip3', 'fake_ip4'], 'name': 'fake_target_2', 'node': ['fake_node4', 'fake_node3'] } ] mock_gtl = self.mock_object(self.as13000_san, '_get_target_list', mock.Mock(return_value=target_list)) host_exist, target_name, node = ( self.as13000_san._get_target_from_conn(host_ip)) if host_ip == 'fake_ip3': self.assertEqual((True, 'fake_target_2', ['fake_node4', 'fake_node3']), (host_exist, target_name, node)) else: self.assertEqual((False, None, None), (host_exist, target_name, node)) mock_gtl.assert_called_once() def test__get_target_list(self): mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value='fake_date')) method = 'block/target/detail' request_type = 'get' result = self.as13000_san._get_target_list() self.assertEqual('fake_date', result) mock_rest.assert_called_once_with(method=method, request_type=request_type) def test__create_target(self): mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) target_name = 'fake_name' target_node = 'fake_node' method = 'block/target' params = {'name': target_name, 'nodeName': target_node} request_type = 'post' self.as13000_san._create_target(target_name, target_node) mock_rest.assert_called_once_with(method=method, params=params, request_type=request_type) def test__delete_target(self): mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) target_name = 'fake_name' method = 'block/target?name=%s' % target_name request_type = 'delete' self.as13000_san._delete_target(target_name) mock_rest.assert_called_once_with(method=method, request_type=request_type) def test__add_chap_to_target(self): mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) target_name = 'fake_name' chap_username = 'fake_user' chap_password = 'fake_pass' self.as13000_san._add_chap_to_target(target_name, chap_username, chap_password) method = 'block/chap/bond' params = {'target': target_name, 'user': chap_username, 'password': chap_password} request_type = 'post' mock_rest.assert_called_once_with(method=method, params=params, request_type=request_type) def test__add_host_to_target(self): mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) target_name = 'fake_name' host_ip = 'fake_ip' method = 'block/host' params = {'name': target_name, 'hostIp': host_ip} request_type = 'post' self.as13000_san._add_host_to_target(host_ip, target_name) mock_rest.assert_called_once_with(method=method, params=params, request_type=request_type) def test__add_lun_to_target(self): volume = fake_volume.fake_volume_obj(self._ctxt, host='fakehost') mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) mock_tnd = self.mock_object(self.as13000_san, '_trans_name_down', mock.Mock(return_value='fake_name')) mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) target_name = 'fake_target' self.as13000_san._add_lun_to_target(target_name, volume) method = 'block/lun' params = {'name': target_name, 'pool': 'fake_pool', 'lvm': 'fake_name'} request_type = 'post' mock_eh.assert_called_once_with(volume.host, level='pool') mock_tnd.assert_called_once_with(volume.name) mock_rest.assert_called_once_with(method=method, params=params, request_type=request_type) def test__add_lun_to_target_retry_3times(self): volume = fake_volume.fake_volume_obj(self._ctxt, host='fakehost') mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) mock_tnd = self.mock_object(self.as13000_san, '_trans_name_down', mock.Mock(return_value='fake_name')) mock_rest = self.mock_object( as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock(side_effect=(exception.VolumeDriverException, mock.MagicMock()))) target_name = 'fake_target' self.as13000_san._add_lun_to_target(target_name, volume) method = 'block/lun' params = {'name': target_name, 'pool': 'fake_pool', 'lvm': 'fake_name'} request_type = 'post' mock_eh.assert_called_with(volume.host, level='pool') mock_tnd.assert_called_with(volume.name) mock_rest.assert_called_with(method=method, params=params, request_type=request_type) def test__add_lun_to_target_fail(self): volume = fake_volume.fake_volume_obj(self._ctxt, host='fakehost') mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) mock_tnd = self.mock_object(self.as13000_san, '_trans_name_down', mock.Mock(return_value='fake_name')) mock_rest = self.mock_object( as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock(side_effect=exception.VolumeDriverException)) target_name = 'fake_target' self.assertRaises(exception.VolumeDriverException, self.as13000_san._add_lun_to_target, target_name=target_name, volume=volume) method = 'block/lun' params = {'name': target_name, 'pool': 'fake_pool', 'lvm': 'fake_name'} request_type = 'post' mock_eh.assert_called_with(volume.host, level='pool') mock_tnd.assert_called_with(volume.name) mock_rest.assert_called_with(method=method, params=params, request_type=request_type) def test__delete_lun_from_target(self): mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) target_name = 'fake_target' lun_id = 'fake_id' self.as13000_san._delete_lun_from_target(target_name, lun_id) method = 'block/lun?name=%s&id=%s&force=1' % (target_name, lun_id) request_type = 'delete' mock_rest.assert_called_once_with(method=method, request_type=request_type) @ddt.data('lock', 'unlock') def test__snapshot_lock_op(self, operation): mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) vol_name = 'fake_volume' snap_name = 'fake_snapshot' pool_name = "fake_pool" self.as13000_san._snapshot_lock_op(operation, vol_name, snap_name, pool_name) method = 'snapshot/volume/' + operation request_type = 'post' params = {'snapName': snap_name, 'volumeName': vol_name, 'poolName': pool_name} mock_rest.assert_called_once_with(method=method, params=params, request_type=request_type) def test__filling_volume(self): mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock()) vol_name = 'fake_volume' pool_name = 'fake_pool' self.as13000_san._filling_volume(vol_name, pool_name) params = {'pool': 'fake_pool', 'name': 'fake_volume'} mock_rest.assert_called_once_with(method='block/lvm/filling', params=params, request_type='post') def test__wait_volume_filled(self): # Need to mock sleep as it is called by @utils.retry self.mock_object(time, 'sleep') expected = [{'name': 'fake_v1', 'lvmType': 1}] mock_gv = self.mock_object(self.as13000_san, '_get_volumes', mock.Mock(return_value=expected)) self.as13000_san._wait_volume_filled('fake_v1', 'fake_pool') mock_gv.assert_called_with('fake_pool') def test__wait_volume_filled_failed(self): # Need to mock sleep as it is called by @utils.retry self.mock_object(time, 'sleep') expected = [{'name': 'fake_v1', 'lvmType': 2}] mock_gv = self.mock_object(self.as13000_san, '_get_volumes', mock.Mock(return_value=expected)) self.assertRaises(exception.VolumeDriverException, self.as13000_san._wait_volume_filled, 'fake_v1', 'fake_pool') mock_gv.assert_called_with('fake_pool') def test__get_lun_list(self): target_name = 'fake_name' lun_list = ['lun_1', 'lun_2'] mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=lun_list)) lun_result = self.as13000_san._get_lun_list(target_name) self.assertEqual(lun_list, lun_result) method = 'block/lun?name=%s' % target_name request_type = 'get' mock_rest.assert_called_once_with(method=method, request_type=request_type) @ddt.data(True, False) def test__check_volume(self, exist): volume = fake_volume.fake_volume_obj(self._ctxt, host='fakehost') mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) mock_tnd = self.mock_object(self.as13000_san, '_trans_name_down', mock.Mock(return_value='fake_name')) mock_el = self.mock_object(eventlet, 'sleep', mock.Mock(return_value=None)) if exist: mock_gv = self.mock_object(self.as13000_san, '_get_volumes', mock.Mock(return_value=[ {'name': 'fake_name'}, {'name': 'fake_name2'}])) else: mock_gv = self.mock_object(self.as13000_san, '_get_volumes', mock.Mock(return_value=[ {'name': 'fake_name2'}, {'name': 'fake_name3'}])) expect = self.as13000_san._check_volume(volume) self.assertEqual(exist, expect) mock_eh.assert_called_once_with(volume.host, 'pool') mock_tnd.assert_called_once_with(volume.name) if exist: mock_gv.assert_called_once_with('fake_pool') else: mock_gv.assert_called() mock_el.assert_called() def test__get_volumes(self): volumes = [{'name': 'fake_name1'}, {'name': 'fake_name2'}, {'name': 'fake_name3'}] pool = 'fake_pool' mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=volumes)) result = self.as13000_san._get_volumes(pool) method = 'block/lvm?pool=%s' % pool request_type = 'get' self.assertEqual(volumes, result) mock_rest.assert_called_once_with(method=method, request_type=request_type) def test__get_cluster_status(self): method = 'cluster/node' request_type = 'get' cluster = 'fake_cluster' mock_rest = self.mock_object(as13000_driver.RestAPIExecutor, 'send_rest_api', mock.Mock(return_value=cluster)) result = self.as13000_san._get_cluster_status() self.assertEqual(cluster, result) mock_rest.assert_called_once_with(method=method, request_type=request_type) @ddt.data(True, False) def test__get_lun_id(self, lun_exist): volume = fake_volume.fake_volume_obj(self._ctxt, host='fakehost') if lun_exist: lun_list = [{'id': '01', 'mappingLvm': r'fake_pool/fake_volume1'}, {'id': '02', 'mappingLvm': r'fake_pool/fake_volume2'}] else: lun_list = [{'id': '01', 'mappingLvm': r'fake_pool/fake_volume1'}, {'id': '02', 'mappingLvm': r'fake_pool/fake_volume0'}] mock_eh = self.mock_object(volume_utils, 'extract_host', mock.Mock(return_value='fake_pool')) mock_tnd = self.mock_object(self.as13000_san, '_trans_name_down', mock.Mock(return_value='fake_volume2')) mock_gll = self.mock_object(self.as13000_san, '_get_lun_list', mock.Mock(return_value=lun_list)) lun_id = self.as13000_san._get_lun_id(volume, 'fake_target') if lun_exist: self.assertEqual('02', lun_id) else: self.assertIsNone(lun_id) mock_eh.assert_called_once_with(volume.host, level='pool') mock_tnd.assert_called_once_with(volume.name) mock_gll.assert_called_once_with('fake_target') def test__trans_name_down(self): fake_name = 'test-abcd-1234_1234-234' expect = 'test_abcd_1234_1234_234' result = self.as13000_san._trans_name_down(fake_name) self.assertEqual(expect, result) @ddt.data('5000000000', '5000000k', '5000mb', '50G', '5TB', '5PB', '5EB') def test__unit_convert(self, capacity): trans = {'5000000000': '%.0f' % (float(5000000000) / (1024 ** 3)), '5000000k': '%.0f' % (float(5000000) / (1024 ** 2)), '5000mb': '%.0f' % (float(5000) / 1024), '50G': '%.0f' % float(50), '5TB': '%.0f' % (float(5) * 1024), '5PB': '%.0f' % (float(5) * (1024 ** 2)), '5EB': '%.0f' % (float(5) * (1024 ** 3))} expect = float(trans[capacity]) result = self.as13000_san._unit_convert(capacity) self.assertEqual(expect, result) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.28712 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/instorage/0000775000175000017500000000000000000000000024754 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/instorage/__init__.py0000664000175000017500000000000000000000000027053 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/instorage/fakes.py0000664000175000017500000026003500000000000026425 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the Inspur InStorage volume driver.""" import re from oslo_concurrency import processutils from oslo_utils import units from cinder import exception from cinder import utils from cinder.volume.drivers.inspur.instorage import instorage_const from cinder.volume.drivers.inspur.instorage import instorage_fc from cinder.volume.drivers.inspur.instorage import instorage_iscsi MCS_POOLS = ['openstack', 'openstack1'] def get_test_pool(get_all=False): if get_all: return MCS_POOLS else: return MCS_POOLS[0] class FakeInStorageMCSFcDriver(instorage_fc.InStorageMCSFCDriver): def __init__(self, *args, **kwargs): super(FakeInStorageMCSFcDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _run_ssh(self, cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class FakeInStorageMCSISCSIDriver(instorage_iscsi.InStorageMCSISCSIDriver): def __init__(self, *args, **kwargs): super(FakeInStorageMCSISCSIDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _run_ssh(self, cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) ret = self.fake_storage.execute_command(cmd, check_exit_code) return ret class FakeInStorage(object): def __init__(self, pool_name): self._flags = {'instorage_mcs_volpool_name': pool_name} self._volumes_list = {} self._hosts_list = {} self._mappings_list = {} self._lcmappings_list = {} self._lcconsistgrp_list = {} self._rcrelationship_list = {} self._partnership_list = {} self._partnershipcandidate_list = {} self._system_list = {'instorage-mcs-sim': {'id': '0123456789ABCDEF', 'name': 'instorage-mcs-sim'}, 'aux-mcs-sim': {'id': 'ABCDEF0123456789', 'name': 'aux-mcs-sim'}} self._other_pools = {'openstack2': {}, 'openstack3': {}} self._next_cmd_error = { 'lsportip': '', 'lsfabric': '', 'lsiscsiauth': '', 'lsnodecanister': '', 'mkvdisk': '', 'lsvdisk': '', 'lslcmap': '', 'prestartlcmap': '', 'startlcmap': '', 'rmlcmap': '', 'lslicense': '', 'lsguicapabilities': '', 'lshost': '', 'lsrcrelationship': '' } self._errors = { 'CMMVC5701E': ('', 'CMMVC5701E No object ID was specified.'), 'CMMVC6035E': ('', 'CMMVC6035E The action failed as the ' 'object already exists.'), 'CMMVC5753E': ('', 'CMMVC5753E The specified object does not ' 'exist or is not a suitable candidate.'), 'CMMVC5707E': ('', 'CMMVC5707E Required parameters are missing.'), 'CMMVC6581E': ('', 'CMMVC6581E The command has failed because ' 'the maximum number of allowed iSCSI ' 'qualified names (IQNs) has been reached, ' 'or the IQN is already assigned or is not ' 'valid.'), 'CMMVC5754E': ('', 'CMMVC5754E The specified object does not ' 'exist, or the name supplied does not meet ' 'the naming rules.'), 'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping was ' 'not created because the VDisk is already ' 'mapped to a host.'), 'CMMVC5879E': ('', 'CMMVC5879E The VDisk-to-host mapping was ' 'not created because a VDisk is already ' 'mapped to this host with this SCSI LUN.'), 'CMMVC5840E': ('', 'CMMVC5840E The virtual disk (VDisk) was ' 'not deleted because it is mapped to a ' 'host or because it is part of a LocalCopy ' 'or Remote Copy mapping, or is involved in ' 'an image mode migrate.'), 'CMMVC6527E': ('', 'CMMVC6527E The name that you have entered ' 'is not valid. The name can contain letters, ' 'numbers, spaces, periods, dashes, and ' 'underscores. The name must begin with a ' 'letter or an underscore. The name must not ' 'begin or end with a space.'), 'CMMVC5871E': ('', 'CMMVC5871E The action failed because one or ' 'more of the configured port names is in a ' 'mapping.'), 'CMMVC5924E': ('', 'CMMVC5924E The LocalCopy mapping was not ' 'created because the source and target ' 'virtual disks (VDisks) are different sizes.'), 'CMMVC6303E': ('', 'CMMVC6303E The create failed because the ' 'source and target VDisks are the same.'), 'CMMVC7050E': ('', 'CMMVC7050E The command failed because at ' 'least one node in the I/O group does not ' 'support compressed VDisks.'), 'CMMVC6430E': ('', 'CMMVC6430E The command failed because the ' 'target and source managed disk groups must ' 'be different.'), 'CMMVC6353E': ('', 'CMMVC6353E The command failed because the ' 'copy specified does not exist.'), 'CMMVC6446E': ('', 'The command failed because the managed disk ' 'groups have different extent sizes.'), # Catch-all for invalid state transitions: 'CMMVC5903E': ('', 'CMMVC5903E The LocalCopy mapping was not ' 'changed because the mapping or consistency ' 'group is another state.'), 'CMMVC5709E': ('', 'CMMVC5709E [-%(VALUE)s] is not a supported ' 'parameter.'), 'CMMVC5982E': ('', 'CMMVC5982E The operation was not performed ' 'because it is not valid given the current ' 'relationship state.'), 'CMMVC5963E': ('', 'CMMVC5963E No direction has been defined.'), } self._lc_transitions = {'begin': {'make': 'idle_or_copied'}, 'idle_or_copied': {'prepare': 'preparing', 'delete': 'end', 'delete_force': 'end'}, 'preparing': {'flush_failed': 'stopped', 'wait': 'prepared'}, 'end': None, 'stopped': {'prepare': 'preparing', 'delete_force': 'end'}, 'prepared': {'stop': 'stopped', 'start': 'copying'}, 'copying': {'wait': 'idle_or_copied', 'stop': 'stopping'}, # Assume the worst case where stopping->stopped # rather than stopping idle_or_copied 'stopping': {'wait': 'stopped'}, } self._lc_cg_transitions = {'begin': {'make': 'empty'}, 'empty': {'add': 'idle_or_copied'}, 'idle_or_copied': {'prepare': 'preparing', 'delete': 'end', 'delete_force': 'end'}, 'preparing': {'flush_failed': 'stopped', 'wait': 'prepared'}, 'end': None, 'stopped': {'prepare': 'preparing', 'delete_force': 'end'}, 'prepared': {'stop': 'stopped', 'start': 'copying', 'delete_force': 'end', 'delete': 'end'}, 'copying': {'wait': 'idle_or_copied', 'stop': 'stopping', 'delete_force': 'end', 'delete': 'end'}, # Assume the case where stopping->stopped # rather than stopping idle_or_copied 'stopping': {'wait': 'stopped'}, } self._rc_transitions = {'inconsistent_stopped': {'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'inconsistent_copying': { 'wait': 'consistent_synchronized', 'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'consistent_synchronized': { 'start': 'consistent_synchronized', 'stop': 'consistent_stopped', 'stop_access': 'idling', 'delete': 'end', 'delete_force': 'end'}, 'consistent_stopped': {'start': 'consistent_synchronized', 'stop': 'consistent_stopped', 'delete': 'end', 'delete_force': 'end'}, 'end': None, 'idling': { 'start': 'inconsistent_copying', 'stop': 'inconsistent_stopped', 'stop_access': 'idling', 'delete': 'end', 'delete_force': 'end'}, } def _state_transition(self, function, lcmap): if (function == 'wait' and 'wait' not in self._lc_transitions[lcmap['status']]): return ('', '') if lcmap['status'] == 'copying' and function == 'wait': if lcmap['copyrate'] != '0': if lcmap['progress'] == '0': lcmap['progress'] = '50' else: lcmap['progress'] = '100' lcmap['status'] = 'idle_or_copied' return ('', '') else: try: curr_state = lcmap['status'] lcmap['status'] = self._lc_transitions[curr_state][function] return ('', '') except Exception: return self._errors['CMMVC5903E'] def _lc_cg_state_transition(self, function, lc_consistgrp): if (function == 'wait' and 'wait' not in self._lc_transitions[lc_consistgrp['status']]): return ('', '') try: curr_state = lc_consistgrp['status'] new_state = self._lc_cg_transitions[curr_state][function] lc_consistgrp['status'] = new_state return ('', '') except Exception: return self._errors['CMMVC5903E'] # Find an unused ID @staticmethod def _find_unused_id(d): ids = [] for v in d.values(): ids.append(int(v['id'])) ids.sort() for index, n in enumerate(ids): if n > index: return str(index) return str(len(ids)) # Check if name is valid @staticmethod def _is_invalid_name(name): if re.match(r'^[a-zA-Z_][\w._-]*$', name): return False return True # Convert argument string to dictionary @staticmethod def _cmd_to_dict(arg_list): no_param_args = [ 'autodelete', 'bytes', 'compressed', 'force', 'nohdr', 'nofmtdisk', 'async', 'access', 'start' ] one_param_args = [ 'chapsecret', 'cleanrate', 'copy', 'copyrate', 'delim', 'intier', 'filtervalue', 'grainsize', 'hbawwpn', 'host', 'iogrp', 'iscsiname', 'mdiskgrp', 'name', 'rsize', 'scsi', 'size', 'source', 'target', 'unit', 'vdisk', 'warning', 'wwpn', 'primary', 'consistgrp', 'master', 'aux', 'cluster', 'linkbandwidthmbits', 'backgroundcopyrate' ] no_or_one_param_args = [ 'autoexpand', ] # Handle the special case of lsnode which is a two-word command # Use the one word version of the command internally if arg_list[0] in ('mcsinq', 'mcsop'): if arg_list[1] == 'lsnode': if len(arg_list) > 4: # e.g. mcsinq lsnode -delim ! ret = {'cmd': 'lsnode', 'node_id': arg_list[-1]} else: ret = {'cmd': 'lsnodecanister'} else: ret = {'cmd': arg_list[1]} arg_list.pop(0) else: ret = {'cmd': arg_list[0]} skip = False for i in range(1, len(arg_list)): if skip: skip = False continue # Check for a quoted command argument for volumes and strip # quotes so that the simulater can match it later. Just # match against test naming convensions for now. if arg_list[i][0] == '"' and ('volume' in arg_list[i] or 'snapshot' in arg_list[i]): arg_list[i] = arg_list[i][1:-1] if arg_list[i][0] == '-': if arg_list[i][1:] in no_param_args: ret[arg_list[i][1:]] = True elif arg_list[i][1:] in one_param_args: ret[arg_list[i][1:]] = arg_list[i + 1] skip = True elif arg_list[i][1:] in no_or_one_param_args: if i == (len(arg_list) - 1) or arg_list[i + 1][0] == '-': ret[arg_list[i][1:]] = True else: ret[arg_list[i][1:]] = arg_list[i + 1] skip = True else: raise exception.InvalidInput( reason='unrecognized argument %s' % arg_list[i]) else: ret['obj'] = arg_list[i] return ret @staticmethod def _print_info_cmd(rows, delim=' ', nohdr=False, **kwargs): """Generic function for printing information.""" if nohdr: del rows[0] for index in range(len(rows)): rows[index] = delim.join(rows[index]) return ('%s' % '\n'.join(rows), '') @staticmethod def _print_info_obj_cmd(header, row, delim=' ', nohdr=False): """Generic function for printing information for a specific object.""" objrows = [] for idx, val in enumerate(header): objrows.append([val, row[idx]]) if nohdr: for index in range(len(objrows)): objrows[index] = ' '.join(objrows[index][1:]) for index in range(len(objrows)): objrows[index] = delim.join(objrows[index]) return ('%s' % '\n'.join(objrows), '') @staticmethod def _convert_bytes_units(bytestr): num = int(bytestr) unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] unit_index = 0 while num > 1024: num = num / 1024 unit_index += 1 return '%d%s' % (num, unit_array[unit_index]) @staticmethod def _convert_units_bytes(num, unit): unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] unit_index = 0 while unit.lower() != unit_array[unit_index].lower(): num = num * 1024 unit_index += 1 return str(num) def _cmd_lslicense(self, **kwargs): rows = [None] * 3 rows[0] = ['used_compression_capacity', '0.08'] rows[1] = ['license_compression_capacity', '0'] if self._next_cmd_error['lslicense'] == 'no_compression': self._next_cmd_error['lslicense'] = '' rows[2] = ['license_compression_enclosures', '0'] else: rows[2] = ['license_compression_enclosures', '1'] return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lsguicapabilities(self, **kwargs): rows = [None] * 2 if self._next_cmd_error['lsguicapabilities'] == 'no_compression': self._next_cmd_error['lsguicapabilities'] = '' rows[0] = ['license_scheme', '0'] else: rows[0] = ['license_scheme', '1813'] rows[1] = ['product_key', instorage_const.DEV_MODEL_INSTORAGE] return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax def _cmd_lssystem(self, **kwargs): rows = [None] * 3 rows[0] = ['id', '0123456789ABCDEF'] rows[1] = ['name', 'instorage-mcs-sim'] rows[2] = ['code_level', '3.1.1.0 (build 87.0.1311291000)'] return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lssystem_aux(self, **kwargs): rows = [None] * 3 rows[0] = ['id', 'ABCDEF0123456789'] rows[1] = ['name', 'aux-mcs-sim'] rows[2] = ['code_level', '3.1.1.0 (build 87.0.1311291000)'] return self._print_info_cmd(rows=rows, **kwargs) # Print mostly made-up stuff in the correct syntax, assume -bytes passed def _cmd_lsmdiskgrp(self, **kwargs): pool_num = len(self._flags['instorage_mcs_volpool_name']) rows = [] rows.append(['id', 'name', 'status', 'mdisk_count', 'vdisk_count', 'capacity', 'extent_size', 'free_capacity', 'virtual_capacity', 'used_capacity', 'real_capacity', 'overallocation', 'warning', 'in_tier', 'in_tier_status']) for i in range(pool_num): row_data = [str(i + 1), self._flags['instorage_mcs_volpool_name'][i], 'online', '1', str(len(self._volumes_list)), '3573412790272', '256', '3529926246400', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive'] rows.append(row_data) rows.append([str(pool_num + 1), 'openstack2', 'online', '1', '0', '3573412790272', '256', '3529432325160', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive']) rows.append([str(pool_num + 2), 'openstack3', 'online', '1', '0', '3573412790272', '128', '3529432325160', '1693247906775', '26843545600', '38203734097', '47', '80', 'auto', 'inactive']) if 'obj' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) else: pool_name = kwargs['obj'].strip('\'\"') if pool_name == kwargs['obj']: raise exception.InvalidInput( reason='obj missing quotes %s' % kwargs['obj']) elif pool_name in self._flags['instorage_mcs_volpool_name']: for each_row in rows: if pool_name in each_row: row = each_row break elif pool_name == 'openstack2': row = rows[-2] elif pool_name == 'openstack3': row = rows[-1] else: return self._errors['CMMVC5754E'] objrows = [] for idx, val in enumerate(rows[0]): objrows.append([val, row[idx]]) if 'nohdr' in kwargs: for index in range(len(objrows)): objrows[index] = ' '.join(objrows[index][1:]) if 'delim' in kwargs: for index in range(len(objrows)): objrows[index] = kwargs['delim'].join(objrows[index]) return ('%s' % '\n'.join(objrows), '') # Print mostly made-up stuff in the correct syntax def _cmd_lsnodecanister(self, **kwargs): rows = [None] * 3 rows[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status', 'IO_group_id', 'IO_group_name', 'config_node', 'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias', 'panel_name', 'enclosure_id', 'canister_id', 'enclosure_serial_number'] rows[1] = [ '1', 'node1', '', '123456789ABCDEF0', 'online', '0', 'io_grp0', 'yes', '123456789ABCDEF0', '100', 'iqn.1982-01.com.inspur:1234.sim.node1', '', '01-1', '1', '1', '0123ABC'] rows[2] = [ '2', 'node2', '', '123456789ABCDEF1', 'online', '0', 'io_grp0', 'no', '123456789ABCDEF1', '100', 'iqn.1982-01.com.inspur:1234.sim.node2', '', '01-2', '1', '2', '0123ABC'] if self._next_cmd_error['lsnodecanister'] == 'header_mismatch': rows[0].pop(2) self._next_cmd_error['lsnodecanister'] = '' if self._next_cmd_error['lsnodecanister'] == 'remove_field': for row in rows: row.pop(0) self._next_cmd_error['lsnodecanister'] = '' return self._print_info_cmd(rows=rows, **kwargs) # Print information of every single node of MCS def _cmd_lsnode(self, **kwargs): node_infos = dict() node_infos['1'] = r'''id!1 name!node1 port_id!500507680210C744 port_status!active port_speed!8Gb port_id!500507680220C744 port_status!active port_speed!8Gb ''' node_infos['2'] = r'''id!2 name!node2 port_id!500507680220C745 port_status!active port_speed!8Gb port_id!500507680230C745 port_status!inactive port_speed!N/A ''' node_id = kwargs.get('node_id', None) stdout = node_infos.get(node_id, '') return stdout, '' # Print made up stuff for the ports def _cmd_lsportfc(self, **kwargs): node_1 = [None] * 7 node_1[0] = ['id', 'fc_io_port_id', 'port_id', 'type', 'port_speed', 'node_id', 'node_name', 'WWPN', 'nportid', 'status', 'attachment'] node_1[1] = ['0', '1', '1', 'fc', '8Gb', '1', 'node1', '5005076802132ADE', '012E00', 'active', 'switch'] node_1[2] = ['1', '2', '2', 'fc', '8Gb', '1', 'node1', '5005076802232ADE', '012E00', 'active', 'switch'] node_1[3] = ['2', '3', '3', 'fc', '8Gb', '1', 'node1', '5005076802332ADE', '9B0600', 'active', 'switch'] node_1[4] = ['3', '4', '4', 'fc', '8Gb', '1', 'node1', '5005076802432ADE', '012A00', 'active', 'switch'] node_1[5] = ['4', '5', '5', 'fc', '8Gb', '1', 'node1', '5005076802532ADE', '014A00', 'active', 'switch'] node_1[6] = ['5', '6', '4', 'ethernet', 'N/A', '1', 'node1', '5005076802632ADE', '000000', 'inactive_unconfigured', 'none'] node_2 = [None] * 7 node_2[0] = ['id', 'fc_io_port_id', 'port_id', 'type', 'port_speed', 'node_id', 'node_name', 'WWPN', 'nportid', 'status', 'attachment'] node_2[1] = ['6', '7', '7', 'fc', '8Gb', '2', 'node2', '5005086802132ADE', '012E00', 'active', 'switch'] node_2[2] = ['7', '8', '8', 'fc', '8Gb', '2', 'node2', '5005086802232ADE', '012E00', 'active', 'switch'] node_2[3] = ['8', '9', '9', 'fc', '8Gb', '2', 'node2', '5005086802332ADE', '9B0600', 'active', 'switch'] node_2[4] = ['9', '10', '10', 'fc', '8Gb', '2', 'node2', '5005086802432ADE', '012A00', 'active', 'switch'] node_2[5] = ['10', '11', '11', 'fc', '8Gb', '2', 'node2', '5005086802532ADE', '014A00', 'active', 'switch'] node_2[6] = ['11', '12', '12', 'ethernet', 'N/A', '2', 'node2', '5005086802632ADE', '000000', 'inactive_unconfigured', 'none'] node_infos = [node_1, node_2] node_id = int(kwargs['filtervalue'].split('=')[1]) - 1 return self._print_info_cmd(rows=node_infos[node_id], **kwargs) # Print mostly made-up stuff in the correct syntax def _cmd_lsportip(self, **kwargs): if self._next_cmd_error['lsportip'] == 'ip_no_config': self._next_cmd_error['lsportip'] = '' ip_addr1 = '' ip_addr2 = '' gw = '' else: ip_addr1 = '1.234.56.78' ip_addr2 = '1.234.56.79' ip_addr3 = '1.234.56.80' ip_addr4 = '1.234.56.81' gw = '1.234.56.1' rows = [None] * 17 rows[0] = ['id', 'node_id', 'node_name', 'IP_address', 'mask', 'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC', 'duplex', 'state', 'speed', 'failover', 'link_state'] rows[1] = ['1', '1', 'node1', ip_addr1, '255.255.255.0', gw, '', '', '', '01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'no', 'active'] rows[2] = ['1', '1', 'node1', '', '', '', '', '', '', '01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'yes', ''] rows[3] = ['2', '1', 'node1', ip_addr3, '255.255.255.0', gw, '', '', '', '01:23:45:67:89:01', 'Full', 'configured', '1Gb/s', 'no', 'active'] rows[4] = ['2', '1', 'node1', '', '', '', '', '', '', '01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s', 'yes', 'inactive'] rows[5] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no', ''] rows[6] = ['3', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes', ''] rows[7] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no', ''] rows[8] = ['4', '1', 'node1', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes', ''] rows[9] = ['1', '2', 'node2', ip_addr2, '255.255.255.0', gw, '', '', '', '01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'no', ''] rows[10] = ['1', '2', 'node2', '', '', '', '', '', '', '01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'yes', ''] rows[11] = ['2', '2', 'node2', ip_addr4, '255.255.255.0', gw, '', '', '', '01:23:45:67:89:03', 'Full', 'configured', '1Gb/s', 'no', 'inactive'] rows[12] = ['2', '2', 'node2', '', '', '', '', '', '', '01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s', 'yes', ''] rows[13] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no', ''] rows[14] = ['3', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes', ''] rows[15] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'no', ''] rows[16] = ['4', '2', 'node2', '', '', '', '', '', '', '', '', 'unconfigured', '', 'yes', ''] if self._next_cmd_error['lsportip'] == 'header_mismatch': rows[0].pop(2) self._next_cmd_error['lsportip'] = '' if self._next_cmd_error['lsportip'] == 'remove_field': for row in rows: row.pop(1) self._next_cmd_error['lsportip'] = '' return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lsfabric(self, **kwargs): if self._next_cmd_error['lsfabric'] == 'no_hosts': return ('', '') host_name = kwargs['host'].strip('\'\"') if 'host' in kwargs else None target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None host_infos = [] for hv in self._hosts_list.values(): if (not host_name) or (hv['host_name'] == host_name): if not target_wwpn or target_wwpn in hv['wwpns']: host_infos.append(hv) break if not len(host_infos): return ('', '') rows = [] rows.append(['remote_wwpn', 'remote_nportid', 'id', 'node_name', 'local_wwpn', 'local_port', 'local_nportid', 'state', 'name', 'cluster_name', 'type']) for host_info in host_infos: for wwpn in host_info['wwpns']: rows.append([wwpn, '123456', host_info['id'], 'nodeN', 'AABBCCDDEEFF0011', '1', '0123ABC', 'active', host_info['host_name'], '', 'host']) if self._next_cmd_error['lsfabric'] == 'header_mismatch': rows[0].pop(0) self._next_cmd_error['lsfabric'] = '' if self._next_cmd_error['lsfabric'] == 'remove_field': for row in rows: row.pop(0) self._next_cmd_error['lsfabric'] = '' if self._next_cmd_error['lsfabric'] == 'remove_rows': rows = [] return self._print_info_cmd(rows=rows, **kwargs) def _get_lcmap_info(self, vol_name): ret_vals = { 'fc_id': '', 'fc_name': '', 'lc_map_count': '0', } for lcmap in self._lcmappings_list.values(): if ((lcmap['source'] == vol_name) or (lcmap['target'] == vol_name)): ret_vals['fc_id'] = lcmap['id'] ret_vals['fc_name'] = lcmap['name'] ret_vals['lc_map_count'] = '1' return ret_vals # List information about vdisks def _cmd_lsvdisk(self, **kwargs): rows = [] rows.append(['id', 'name', 'IO_group_id', 'IO_group_name', 'status', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity', 'type', 'FC_id', 'FC_name', 'RC_id', 'RC_name', 'vdisk_UID', 'lc_map_count', 'copy_count', 'fast_write_state', 'se_copy_count', 'RC_change']) for vol in self._volumes_list.values(): if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == 'name=' + vol['name']) or (kwargs['filtervalue'] == 'vdisk_UID=' + vol['uid'])): lcmap_info = self._get_lcmap_info(vol['name']) if 'bytes' in kwargs: cap = self._convert_bytes_units(vol['capacity']) else: cap = vol['capacity'] rows.append([str(vol['id']), vol['name'], vol['IO_group_id'], vol['IO_group_name'], 'online', '0', get_test_pool(), cap, 'striped', lcmap_info['fc_id'], lcmap_info['fc_name'], '', '', vol['uid'], lcmap_info['lc_map_count'], '1', 'empty', '1', 'no']) if 'obj' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) else: if kwargs['obj'] not in self._volumes_list: return self._errors['CMMVC5754E'] vol = self._volumes_list[kwargs['obj']] lcmap_info = self._get_lcmap_info(vol['name']) cap = vol['capacity'] cap_u = vol['used_capacity'] cap_r = vol['real_capacity'] cap_f = vol['free_capacity'] if 'bytes' not in kwargs: for item in [cap, cap_u, cap_r, cap_f]: item = self._convert_bytes_units(item) rows = [] rows.append(['id', str(vol['id'])]) rows.append(['name', vol['name']]) rows.append(['IO_group_id', vol['IO_group_id']]) rows.append(['IO_group_name', vol['IO_group_name']]) rows.append(['status', 'online']) rows.append(['capacity', cap]) rows.append(['formatted', vol['formatted']]) rows.append(['mdisk_id', '']) rows.append(['mdisk_name', '']) rows.append(['FC_id', lcmap_info['fc_id']]) rows.append(['FC_name', lcmap_info['fc_name']]) rows.append(['RC_id', vol['RC_id']]) rows.append(['RC_name', vol['RC_name']]) rows.append(['vdisk_UID', vol['uid']]) rows.append(['throttling', '0']) if self._next_cmd_error['lsvdisk'] == 'blank_pref_node': rows.append(['preferred_node_id', '']) self._next_cmd_error['lsvdisk'] = '' elif self._next_cmd_error['lsvdisk'] == 'no_pref_node': self._next_cmd_error['lsvdisk'] = '' else: rows.append(['preferred_node_id', '1']) rows.append(['fast_write_state', 'empty']) rows.append(['cache', 'readwrite']) rows.append(['udid', '']) rows.append(['lc_map_count', lcmap_info['lc_map_count']]) rows.append(['sync_rate', '50']) rows.append(['copy_count', '1']) rows.append(['se_copy_count', '0']) rows.append(['mirror_write_priority', 'latency']) rows.append(['RC_change', 'no']) for copy in vol['copies'].values(): rows.append(['copy_id', copy['id']]) rows.append(['status', copy['status']]) rows.append(['primary', copy['primary']]) rows.append(['mdisk_grp_id', copy['mdisk_grp_id']]) rows.append(['mdisk_grp_name', copy['mdisk_grp_name']]) rows.append(['type', 'striped']) rows.append(['used_capacity', cap_u]) rows.append(['real_capacity', cap_r]) rows.append(['free_capacity', cap_f]) rows.append(['in_tier', copy['in_tier']]) rows.append(['compressed_copy', copy['compressed_copy']]) rows.append(['autoexpand', vol['autoexpand']]) rows.append(['warning', vol['warning']]) rows.append(['grainsize', vol['grainsize']]) if 'nohdr' in kwargs: for index in range(len(rows)): rows[index] = ' '.join(rows[index][1:]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') def _cmd_lsiogrp(self, **kwargs): rows = [None] * 6 rows[0] = ['id', 'name', 'node_count', 'vdisk_count', 'host_count'] rows[1] = ['0', 'io_grp0', '2', '0', '4'] rows[2] = ['1', 'io_grp1', '2', '0', '4'] rows[3] = ['2', 'io_grp2', '0', '0', '4'] rows[4] = ['3', 'io_grp3', '0', '0', '4'] rows[5] = ['4', 'recovery_io_grp', '0', '0', '0'] return self._print_info_cmd(rows=rows, **kwargs) # List information about hosts def _cmd_lshost(self, **kwargs): if 'obj' not in kwargs: rows = [] rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status']) found = False # Sort hosts by names to give predictable order for tests # depend on it. for host_name in sorted(self._hosts_list.keys()): host = self._hosts_list[host_name] filterstr = 'name=' + host['host_name'] if (('filtervalue' not in kwargs) or (kwargs['filtervalue'] == filterstr)): rows.append([host['id'], host['host_name'], '1', '4', 'offline']) found = True if found: return self._print_info_cmd(rows=rows, **kwargs) else: return ('', '') else: if self._next_cmd_error['lshost'] == 'missing_host': self._next_cmd_error['lshost'] = '' return self._errors['CMMVC5754E'] elif self._next_cmd_error['lshost'] == 'bigger_troubles': return self._errors['CMMVC6527E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5754E'] if (self._next_cmd_error['lshost'] == 'fail_fastpath' and host_name == 'DifferentHost'): return self._errors['CMMVC5701E'] host = self._hosts_list[host_name] rows = [] rows.append(['id', host['id']]) rows.append(['name', host['host_name']]) rows.append(['port_count', '1']) rows.append(['type', 'generic']) rows.append(['mask', '1111']) rows.append(['iogrp_count', '4']) rows.append(['status', 'online']) for port in host['iscsi_names']: rows.append(['iscsi_name', port]) rows.append(['node_logged_in_count', '0']) rows.append(['state', 'offline']) for port in host['wwpns']: rows.append(['WWPN', port]) rows.append(['node_logged_in_count', '0']) rows.append(['state', 'active']) if 'nohdr' in kwargs: for index in range(len(rows)): rows[index] = ' '.join(rows[index][1:]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') # List iSCSI authorization information about hosts def _cmd_lsiscsiauth(self, **kwargs): if self._next_cmd_error['lsiscsiauth'] == 'no_info': self._next_cmd_error['lsiscsiauth'] = '' return ('', '') rows = [] rows.append(['type', 'id', 'name', 'iscsi_auth_method', 'iscsi_chap_secret']) for host in self._hosts_list.values(): method = 'none' secret = '' if 'chapsecret' in host: method = 'chap' secret = host['chapsecret'] rows.append(['host', host['id'], host['host_name'], method, secret]) return self._print_info_cmd(rows=rows, **kwargs) # List information about host->vdisk mappings def _cmd_lshostvdiskmap(self, **kwargs): host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5754E'] rows = [] rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name', 'vdisk_UID']) for mapping in self._mappings_list.values(): if (host_name == '') or (mapping['host'] == host_name): volume = self._volumes_list[mapping['vol']] rows.append([mapping['id'], mapping['host'], mapping['lun'], volume['id'], volume['name'], volume['uid']]) return self._print_info_cmd(rows=rows, **kwargs) # List information about vdisk->host mappings def _cmd_lsvdiskhostmap(self, **kwargs): mappings_found = 0 vdisk_name = kwargs['obj'].strip('\'\"') if vdisk_name not in self._volumes_list: return self._errors['CMMVC5753E'] rows = [] rows.append(['id name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID', 'IO_group_id', 'IO_group_name']) for mapping in self._mappings_list.values(): if (mapping['vol'] == vdisk_name): mappings_found += 1 volume = self._volumes_list[mapping['vol']] host = self._hosts_list[mapping['host']] rows.append([volume['id'], mapping['lun'], host['id'], host['host_name'], volume['uid'], volume['IO_group_id'], volume['IO_group_name']]) if mappings_found: return self._print_info_cmd(rows=rows, **kwargs) else: return ('', '') def _cmd_lsvdisklcmappings(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] vdisk = kwargs['obj'] rows = [] rows.append(['id', 'name']) for v in self._lcmappings_list.values(): if v['source'] == vdisk or v['target'] == vdisk: rows.append([v['id'], v['name']]) return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lslcmap(self, **kwargs): rows = [] rows.append(['id', 'name', 'source_vdisk_id', 'source_vdisk_name', 'target_vdisk_id', 'target_vdisk_name', 'group_id', 'group_name', 'status', 'progress', 'copy_rate', 'clean_progress', 'incremental', 'partner_FC_id', 'partner_FC_name', 'restoring', 'start_time', 'rc_controlled']) # Assume we always get a filtervalue argument filter_key = kwargs['filtervalue'].split('=')[0] filter_value = kwargs['filtervalue'].split('=')[1] to_delete = [] for k, v in self._lcmappings_list.items(): if str(v[filter_key]) == filter_value: source = self._volumes_list[v['source']] target = self._volumes_list[v['target']] self._state_transition('wait', v) if self._next_cmd_error['lslcmap'] == 'speed_up': self._next_cmd_error['lslcmap'] = '' curr_state = v['status'] while self._state_transition('wait', v) == ("", ""): if curr_state == v['status']: break curr_state = v['status'] if ((v['status'] == 'idle_or_copied' and v['autodelete'] and v['progress'] == '100') or (v['status'] == 'end')): to_delete.append(k) else: rows.append([v['id'], v['name'], source['id'], source['name'], target['id'], target['name'], '', '', v['status'], v['progress'], v['copyrate'], '100', 'off', '', '', 'no', '', 'no']) for d in to_delete: del self._lcmappings_list[d] return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lslcconsistgrp(self, **kwargs): rows = [] if 'obj' not in kwargs: rows.append(['id', 'name', 'status' 'start_time']) for lcconsistgrp in self._lcconsistgrp_list.values(): rows.append([lcconsistgrp['id'], lcconsistgrp['name'], lcconsistgrp['status'], lcconsistgrp['start_time']]) return self._print_info_cmd(rows=rows, **kwargs) else: lcconsistgrp = None cg_id = 0 for cg_id in self._lcconsistgrp_list.keys(): if self._lcconsistgrp_list[cg_id]['name'] == kwargs['obj']: lcconsistgrp = self._lcconsistgrp_list[cg_id] rows = [] rows.append(['id', str(cg_id)]) rows.append(['name', lcconsistgrp['name']]) rows.append(['status', lcconsistgrp['status']]) rows.append(['autodelete', str(lcconsistgrp['autodelete'])]) rows.append(['start_time', str(lcconsistgrp['start_time'])]) for lcmap_id in lcconsistgrp['lcmaps'].keys(): rows.append(['FC_mapping_id', str(lcmap_id)]) rows.append(['FC_mapping_name', lcconsistgrp['lcmaps'][lcmap_id]]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) self._lc_cg_state_transition('wait', lcconsistgrp) return ('%s' % '\n'.join(rows), '') def _cmd_lsvdiskcopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5804E'] name = kwargs['obj'] vol = self._volumes_list[name] rows = [] rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'status', 'sync', 'primary', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity', 'type', 'se_copy', 'in_tier', 'in_tier_status', 'compressed_copy']) for copy in vol['copies'].values(): rows.append([vol['id'], vol['name'], copy['id'], copy['status'], copy['sync'], copy['primary'], copy['mdisk_grp_id'], copy['mdisk_grp_name'], vol['capacity'], 'striped', 'yes', copy['in_tier'], 'inactive', copy['compressed_copy']]) if 'copy' not in kwargs: return self._print_info_cmd(rows=rows, **kwargs) else: copy_id = kwargs['copy'].strip('\'\"') if copy_id not in vol['copies']: return self._errors['CMMVC6353E'] copy = vol['copies'][copy_id] rows = [] rows.append(['vdisk_id', vol['id']]) rows.append(['vdisk_name', vol['name']]) rows.append(['capacity', vol['capacity']]) rows.append(['copy_id', copy['id']]) rows.append(['status', copy['status']]) rows.append(['sync', copy['sync']]) copy['sync'] = 'yes' rows.append(['primary', copy['primary']]) rows.append(['mdisk_grp_id', copy['mdisk_grp_id']]) rows.append(['mdisk_grp_name', copy['mdisk_grp_name']]) rows.append(['in_tier', copy['in_tier']]) rows.append(['in_tier_status', 'inactive']) rows.append(['compressed_copy', copy['compressed_copy']]) rows.append(['autoexpand', vol['autoexpand']]) if 'delim' in kwargs: for index in range(len(rows)): rows[index] = kwargs['delim'].join(rows[index]) return ('%s' % '\n'.join(rows), '') # list vdisk sync process def _cmd_lsvdisksyncprogress(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5804E'] name = kwargs['obj'] copy_id = kwargs.get('copy', None) vol = self._volumes_list[name] rows = [] rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'progress', 'estimated_completion_time']) copy_found = False for copy in vol['copies'].values(): if not copy_id or copy_id == copy['id']: copy_found = True row = [vol['id'], name, copy['id']] if copy['sync'] == 'yes': row.extend(['100', '']) else: row.extend(['50', '140210115226']) copy['sync'] = 'yes' rows.append(row) if not copy_found: return self._errors['CMMVC5804E'] return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lsrcrelationship(self, **kwargs): rows = [] rows.append(['id', 'name', 'master_cluster_id', 'master_cluster_name', 'master_vdisk_id', 'master_vdisk_name', 'aux_cluster_id', 'aux_cluster_name', 'aux_vdisk_id', 'aux_vdisk_name', 'consistency_group_id', 'primary', 'consistency_group_name', 'state', 'bg_copy_priority', 'progress', 'freeze_time', 'status', 'sync', 'copy_type', 'cycling_mode', 'cycle_period_seconds', 'master_change_vdisk_id', 'master_change_vdisk_name', 'aux_change_vdisk_id', 'aux_change_vdisk_name']) # Assume we always get a filtervalue argument filter_key = kwargs['filtervalue'].split('=')[0] filter_value = kwargs['filtervalue'].split('=')[1] for k, v in self._rcrelationship_list.items(): if str(v[filter_key]) == filter_value: self._rc_state_transition('wait', v) if self._next_cmd_error['lsrcrelationship'] == 'speed_up': self._next_cmd_error['lsrcrelationship'] = '' curr_state = v['status'] while self._rc_state_transition('wait', v) == ("", ""): if curr_state == v['status']: break curr_state = v['status'] rows.append([v['id'], v['name'], v['master_cluster_id'], v['master_cluster_name'], v['master_vdisk_id'], v['master_vdisk_name'], v['aux_cluster_id'], v['aux_cluster_name'], v['aux_vdisk_id'], v['aux_vdisk_name'], v['consistency_group_id'], v['primary'], v['consistency_group_name'], v['state'], v['bg_copy_priority'], v['progress'], v['freeze_time'], v['status'], v['sync'], v['copy_type'], v['cycling_mode'], v['cycle_period_seconds'], v['master_change_vdisk_id'], v['master_change_vdisk_name'], v['aux_change_vdisk_id'], v['aux_change_vdisk_name']]) return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lspartnershipcandidate(self, **kwargs): rows = [None] * 4 master_sys = self._system_list['instorage-mcs-sim'] aux_sys = self._system_list['aux-mcs-sim'] rows[0] = ['id', 'configured', 'name'] rows[1] = [master_sys['id'], 'no', master_sys['name']] rows[2] = [aux_sys['id'], 'no', aux_sys['name']] rows[3] = ['0123456789001234', 'no', 'fake_mcs'] return self._print_info_cmd(rows=rows, **kwargs) def _cmd_lspartnership(self, **kwargs): rows = [] rows.append(['id', 'name', 'location', 'partnership', 'type', 'cluster_ip', 'event_log_sequence']) master_sys = self._system_list['instorage-mcs-sim'] if master_sys['name'] not in self._partnership_list: local_info = {} local_info['id'] = master_sys['id'] local_info['name'] = master_sys['name'] local_info['location'] = 'local' local_info['type'] = '' local_info['cluster_ip'] = '' local_info['event_log_sequence'] = '' local_info['chap_secret'] = '' local_info['linkbandwidthmbits'] = '' local_info['backgroundcopyrate'] = '' local_info['partnership'] = '' self._partnership_list[master_sys['id']] = local_info # Assume we always get a filtervalue argument filter_key = kwargs['filtervalue'].split('=')[0] filter_value = kwargs['filtervalue'].split('=')[1] for k, v in self._partnership_list.items(): if str(v[filter_key]) == filter_value: rows.append([v['id'], v['name'], v['location'], v['partnership'], v['type'], v['cluster_ip'], v['event_log_sequence']]) return self._print_info_cmd(rows=rows, **kwargs) def _get_mdiskgrp_id(self, mdiskgrp): grp_num = len(self._flags['instorage_mcs_volpool_name']) if mdiskgrp in self._flags['instorage_mcs_volpool_name']: for i in range(grp_num): if mdiskgrp == self._flags['instorage_mcs_volpool_name'][i]: return i + 1 elif mdiskgrp == 'openstack2': return grp_num + 1 elif mdiskgrp == 'openstack3': return grp_num + 2 else: return None # Create a vdisk def _cmd_mkvdisk(self, **kwargs): # We only save the id/uid, name, and size - all else will be made up volume_info = {} volume_info['id'] = self._find_unused_id(self._volumes_list) volume_info['uid'] = ('ABCDEF' * 3) + ('0' * 14) + volume_info['id'] mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') if mdiskgrp == kwargs['mdiskgrp']: raise exception.InvalidInput( reason='mdiskgrp missing quotes %s' % kwargs['mdiskgrp']) mdiskgrp_id = self._get_mdiskgrp_id(mdiskgrp) volume_info['mdisk_grp_name'] = mdiskgrp volume_info['mdisk_grp_id'] = str(mdiskgrp_id) if 'name' in kwargs: volume_info['name'] = kwargs['name'].strip('\'\"') else: volume_info['name'] = 'vdisk' + volume_info['id'] # Assume size and unit are given, store it in bytes capacity = int(kwargs['size']) unit = kwargs['unit'] volume_info['capacity'] = self._convert_units_bytes(capacity, unit) volume_info['IO_group_id'] = kwargs['iogrp'] volume_info['IO_group_name'] = 'io_grp%s' % kwargs['iogrp'] volume_info['RC_name'] = '' volume_info['RC_id'] = '' if 'intier' in kwargs: if kwargs['intier'] == 'on': volume_info['in_tier'] = 'on' else: volume_info['in_tier'] = 'off' if 'rsize' in kwargs: volume_info['formatted'] = 'no' # Fake numbers volume_info['used_capacity'] = '786432' volume_info['real_capacity'] = '21474816' volume_info['free_capacity'] = '38219264' if 'warning' in kwargs: volume_info['warning'] = kwargs['warning'].rstrip('%') else: volume_info['warning'] = '80' if 'autoexpand' in kwargs: volume_info['autoexpand'] = 'on' else: volume_info['autoexpand'] = 'off' if 'grainsize' in kwargs: volume_info['grainsize'] = kwargs['grainsize'] else: volume_info['grainsize'] = '32' if 'compressed' in kwargs: volume_info['compressed_copy'] = 'yes' else: volume_info['compressed_copy'] = 'no' else: volume_info['used_capacity'] = volume_info['capacity'] volume_info['real_capacity'] = volume_info['capacity'] volume_info['free_capacity'] = '0' volume_info['warning'] = '' volume_info['autoexpand'] = '' volume_info['grainsize'] = '' volume_info['compressed_copy'] = 'no' volume_info['formatted'] = 'yes' if 'nofmtdisk' in kwargs: if kwargs['nofmtdisk']: volume_info['formatted'] = 'no' vol_cp = {'id': '0', 'status': 'online', 'sync': 'yes', 'primary': 'yes', 'mdisk_grp_id': str(mdiskgrp_id), 'mdisk_grp_name': mdiskgrp, 'in_tier': volume_info['in_tier'], 'compressed_copy': volume_info['compressed_copy']} volume_info['copies'] = {'0': vol_cp} if volume_info['name'] in self._volumes_list: return self._errors['CMMVC6035E'] else: self._volumes_list[volume_info['name']] = volume_info return ('Virtual Disk, id [%s], successfully created' % (volume_info['id']), '') # Delete a vdisk def _cmd_rmvdisk(self, **kwargs): force = True if 'force' in kwargs else False if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] if not force: for mapping in self._mappings_list.values(): if mapping['vol'] == vol_name: return self._errors['CMMVC5840E'] for lcmap in self._lcmappings_list.values(): if ((lcmap['source'] == vol_name) or (lcmap['target'] == vol_name)): return self._errors['CMMVC5840E'] del self._volumes_list[vol_name] return ('', '') def _cmd_expandvdisksize(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') # Assume unit is gb if 'size' not in kwargs: return self._errors['CMMVC5707E'] size = int(kwargs['size']) if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] curr_size = int(self._volumes_list[vol_name]['capacity']) addition = size * units.Gi self._volumes_list[vol_name]['capacity'] = ( str(curr_size + addition)) return ('', '') def _add_port_to_host(self, host_info, **kwargs): if 'iscsiname' in kwargs: added_key = 'iscsi_names' added_val = kwargs['iscsiname'].strip('\'\"') elif 'hbawwpn' in kwargs: added_key = 'wwpns' added_val = kwargs['hbawwpn'].strip('\'\"') else: return self._errors['CMMVC5707E'] host_info[added_key].append(added_val) for v in self._hosts_list.values(): if v['id'] == host_info['id']: continue for port in v[added_key]: if port == added_val: return self._errors['CMMVC6581E'] return ('', '') # Make a host def _cmd_mkhost(self, **kwargs): host_info = {} host_info['id'] = self._find_unused_id(self._hosts_list) if 'name' in kwargs: host_name = kwargs['name'].strip('\'\"') else: host_name = 'host' + str(host_info['id']) if self._is_invalid_name(host_name): return self._errors['CMMVC6527E'] if host_name in self._hosts_list: return self._errors['CMMVC6035E'] host_info['host_name'] = host_name host_info['iscsi_names'] = [] host_info['wwpns'] = [] out, err = self._add_port_to_host(host_info, **kwargs) if not len(err): self._hosts_list[host_name] = host_info return ('Host, id [%s], successfully created' % (host_info['id']), '') else: return (out, err) # Add ports to an existing host def _cmd_addhostport(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] host_info = self._hosts_list[host_name] return self._add_port_to_host(host_info, **kwargs) # Change host properties def _cmd_chhost(self, **kwargs): if 'chapsecret' not in kwargs: return self._errors['CMMVC5707E'] secret = kwargs['obj'].strip('\'\"') if 'obj' not in kwargs: return self._errors['CMMVC5701E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] self._hosts_list[host_name]['chapsecret'] = secret return ('', '') # Remove a host def _cmd_rmhost(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] host_name = kwargs['obj'].strip('\'\"') if host_name not in self._hosts_list: return self._errors['CMMVC5753E'] for v in self._mappings_list.values(): if (v['host'] == host_name): return self._errors['CMMVC5871E'] del self._hosts_list[host_name] return ('', '') # Create a vdisk-host mapping def _cmd_mkvdiskhostmap(self, **kwargs): mapping_info = {} mapping_info['id'] = self._find_unused_id(self._mappings_list) if 'host' not in kwargs: return self._errors['CMMVC5707E'] mapping_info['host'] = kwargs['host'].strip('\'\"') if 'scsi' in kwargs: mapping_info['lun'] = kwargs['scsi'].strip('\'\"') else: mapping_info['lun'] = mapping_info['id'] if 'obj' not in kwargs: return self._errors['CMMVC5707E'] mapping_info['vol'] = kwargs['obj'].strip('\'\"') if mapping_info['vol'] not in self._volumes_list: return self._errors['CMMVC5753E'] if mapping_info['host'] not in self._hosts_list: return self._errors['CMMVC5754E'] if mapping_info['vol'] in self._mappings_list: return self._errors['CMMVC6071E'] for v in self._mappings_list.values(): if ((v['host'] == mapping_info['host']) and (v['lun'] == mapping_info['lun'])): return self._errors['CMMVC5879E'] for v in self._mappings_list.values(): if (v['vol'] == mapping_info['vol']) and ('force' not in kwargs): return self._errors['CMMVC6071E'] self._mappings_list[mapping_info['id']] = mapping_info return ('Virtual Disk to Host map, id [%s], successfully created' % (mapping_info['id']), '') # Delete a vdisk-host mapping def _cmd_rmvdiskhostmap(self, **kwargs): if 'host' not in kwargs: return self._errors['CMMVC5707E'] host = kwargs['host'].strip('\'\"') if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol = kwargs['obj'].strip('\'\"') mapping_ids = [] for v in self._mappings_list.values(): if v['vol'] == vol: mapping_ids.append(v['id']) if not mapping_ids: return self._errors['CMMVC5753E'] this_mapping = None for mapping_id in mapping_ids: if self._mappings_list[mapping_id]['host'] == host: this_mapping = mapping_id if this_mapping is None: return self._errors['CMMVC5753E'] del self._mappings_list[this_mapping] return ('', '') # Create a LocalCopy mapping def _cmd_mklcmap(self, **kwargs): source = '' target = '' copyrate = kwargs['copyrate'] if 'copyrate' in kwargs else '50' if 'source' not in kwargs: return self._errors['CMMVC5707E'] source = kwargs['source'].strip('\'\"') if source not in self._volumes_list: return self._errors['CMMVC5754E'] if 'target' not in kwargs: return self._errors['CMMVC5707E'] target = kwargs['target'].strip('\'\"') if target not in self._volumes_list: return self._errors['CMMVC5754E'] if source == target: return self._errors['CMMVC6303E'] if (self._volumes_list[source]['capacity'] != self._volumes_list[target]['capacity']): return self._errors['CMMVC5754E'] lcmap_info = {} lcmap_info['source'] = source lcmap_info['target'] = target lcmap_info['id'] = self._find_unused_id(self._lcmappings_list) lcmap_info['name'] = 'lcmap' + lcmap_info['id'] lcmap_info['copyrate'] = copyrate lcmap_info['progress'] = '0' lcmap_info['autodelete'] = True if 'autodelete' in kwargs else False lcmap_info['status'] = 'idle_or_copied' # Add lcmap to consistency group if 'consistgrp' in kwargs: consistgrp = kwargs['consistgrp'] # if is digit, assume is cg id, else is cg name cg_id = 0 if not consistgrp.isdigit(): for consistgrp_key in self._lcconsistgrp_list.keys(): if (self._lcconsistgrp_list[consistgrp_key]['name'] == consistgrp): cg_id = consistgrp_key lcmap_info['consistgrp'] = consistgrp_key break else: if int(consistgrp) in self._lcconsistgrp_list.keys(): cg_id = int(consistgrp) # If can't find exist consistgrp id, return not exist error if not cg_id: return self._errors['CMMVC5754E'] lcmap_info['consistgrp'] = cg_id # Add lcmap to consistgrp self._lcconsistgrp_list[cg_id]['lcmaps'][lcmap_info['id']] = ( lcmap_info['name']) self._lc_cg_state_transition('add', self._lcconsistgrp_list[cg_id]) self._lcmappings_list[lcmap_info['id']] = lcmap_info return ('LocalCopy Mapping, id [' + lcmap_info['id'] + '], successfully created', '') def _cmd_prestartlcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] if self._next_cmd_error['prestartlcmap'] == 'bad_id': id_num = -1 self._next_cmd_error['prestartlcmap'] = '' try: lcmap = self._lcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._state_transition('prepare', lcmap) def _cmd_startlcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] if self._next_cmd_error['startlcmap'] == 'bad_id': id_num = -1 self._next_cmd_error['startlcmap'] = '' try: lcmap = self._lcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._state_transition('start', lcmap) def _cmd_stoplcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] try: lcmap = self._lcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._state_transition('stop', lcmap) def _cmd_rmlcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] force = True if 'force' in kwargs else False if self._next_cmd_error['rmlcmap'] == 'bad_id': id_num = -1 self._next_cmd_error['rmlcmap'] = '' try: lcmap = self._lcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] function = 'delete_force' if force else 'delete' ret = self._state_transition(function, lcmap) if lcmap['status'] == 'end': del self._lcmappings_list[id_num] return ret def _cmd_chlcmap(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] id_num = kwargs['obj'] try: lcmap = self._lcmappings_list[id_num] except KeyError: return self._errors['CMMVC5753E'] for key in ['name', 'copyrate', 'autodelete']: if key in kwargs: lcmap[key] = kwargs[key] return ('', '') # Create a LocalCopy mapping def _cmd_mklcconsistgrp(self, **kwargs): lcconsistgrp_info = {} lcconsistgrp_info['id'] = self._find_unused_id(self._lcconsistgrp_list) if 'name' in kwargs: lcconsistgrp_info['name'] = kwargs['name'].strip('\'\"') else: lcconsistgrp_info['name'] = 'lccstgrp' + lcconsistgrp_info['id'] if 'autodelete' in kwargs: lcconsistgrp_info['autodelete'] = True else: lcconsistgrp_info['autodelete'] = False lcconsistgrp_info['status'] = 'empty' lcconsistgrp_info['start_time'] = None lcconsistgrp_info['lcmaps'] = {} self._lcconsistgrp_list[lcconsistgrp_info['id']] = lcconsistgrp_info return ('LocalCopy Consistency Group, id [' + lcconsistgrp_info['id'] + '], successfully created', '') def _cmd_prestartlcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] cg_name = kwargs['obj'] cg_id = 0 for cg_id in self._lcconsistgrp_list.keys(): if cg_name == self._lcconsistgrp_list[cg_id]['name']: break return self._lc_cg_state_transition('prepare', self._lcconsistgrp_list[cg_id]) def _cmd_startlcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] cg_name = kwargs['obj'] cg_id = 0 for cg_id in self._lcconsistgrp_list.keys(): if cg_name == self._lcconsistgrp_list[cg_id]['name']: break return self._lc_cg_state_transition('start', self._lcconsistgrp_list[cg_id]) def _cmd_stoplcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] try: lcconsistgrps = self._lcconsistgrp_list[id_num] except KeyError: return self._errors['CMMVC5753E'] return self._lc_cg_state_transition('stop', lcconsistgrps) def _cmd_rmlcconsistgrp(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] cg_name = kwargs['obj'] force = True if 'force' in kwargs else False cg_id = 0 for cg_id in self._lcconsistgrp_list.keys(): if cg_name == self._lcconsistgrp_list[cg_id]['name']: break if not cg_id: return self._errors['CMMVC5753E'] lcconsistgrps = self._lcconsistgrp_list[cg_id] function = 'delete_force' if force else 'delete' ret = self._lc_cg_state_transition(function, lcconsistgrps) if lcconsistgrps['status'] == 'end': del self._lcconsistgrp_list[cg_id] return ret def _cmd_migratevdisk(self, **kwargs): if 'mdiskgrp' not in kwargs or 'vdisk' not in kwargs: return self._errors['CMMVC5707E'] mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') vdisk = kwargs['vdisk'].strip('\'\"') if vdisk in self._volumes_list: curr_mdiskgrp = self._volumes_list else: for pool in self._other_pools: if vdisk in pool: curr_mdiskgrp = pool break else: return self._errors['CMMVC5754E'] if mdiskgrp == self._flags['instorage_mcs_volpool_name']: tgt_mdiskgrp = self._volumes_list elif mdiskgrp == 'openstack2': tgt_mdiskgrp = self._other_pools['openstack2'] elif mdiskgrp == 'openstack3': tgt_mdiskgrp = self._other_pools['openstack3'] else: return self._errors['CMMVC5754E'] if curr_mdiskgrp == tgt_mdiskgrp: return self._errors['CMMVC6430E'] vol = curr_mdiskgrp[vdisk] tgt_mdiskgrp[vdisk] = vol del curr_mdiskgrp[vdisk] return ('', '') def _cmd_addvdiskcopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] vol = self._volumes_list[vol_name] if 'mdiskgrp' not in kwargs: return self._errors['CMMVC5707E'] mdiskgrp = kwargs['mdiskgrp'].strip('\'\"') if mdiskgrp == kwargs['mdiskgrp']: raise exception.InvalidInput( reason='mdiskgrp missing quotes %s') % kwargs['mdiskgrp'] copy_info = {} copy_info['id'] = self._find_unused_id(vol['copies']) copy_info['status'] = 'online' copy_info['sync'] = 'no' copy_info['primary'] = 'no' copy_info['mdisk_grp_name'] = mdiskgrp copy_info['mdisk_grp_id'] = str(self._get_mdiskgrp_id(mdiskgrp)) if 'intier' in kwargs: if kwargs['intier'] == 'on': copy_info['in_tier'] = 'on' else: copy_info['in_tier'] = 'off' if 'rsize' in kwargs: if 'compressed' in kwargs: copy_info['compressed_copy'] = 'yes' else: copy_info['compressed_copy'] = 'no' vol['copies'][copy_info['id']] = copy_info return ('Vdisk [%(vid)s] copy [%(cid)s] successfully created' % {'vid': vol['id'], 'cid': copy_info['id']}, '') def _cmd_rmvdiskcopy(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') if 'copy' not in kwargs: return self._errors['CMMVC5707E'] copy_id = kwargs['copy'].strip('\'\"') if vol_name not in self._volumes_list: return self._errors['CMMVC5753E'] vol = self._volumes_list[vol_name] if copy_id not in vol['copies']: return self._errors['CMMVC6353E'] del vol['copies'][copy_id] return ('', '') def _cmd_chvdisk(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') vol = self._volumes_list[vol_name] kwargs.pop('obj') params = ['name', 'warning', 'udid', 'autoexpand', 'intier', 'primary'] for key, value in kwargs.items(): if key == 'intier': vol['in_tier'] = value continue if key == 'warning': vol['warning'] = value.rstrip('%') continue if key == 'name': vol['name'] = value del self._volumes_list[vol_name] self._volumes_list[value] = vol if key == 'primary': copies = self._volumes_list[vol_name]['copies'] if value == '0': copies['0']['primary'] = 'yes' copies['1']['primary'] = 'no' elif value == '1': copies['0']['primary'] = 'no' copies['1']['primary'] = 'yes' else: err = self._errors['CMMVC6353E'][1] % {'VALUE': key} return ('', err) if key in params: vol[key] = value else: err = self._errors['CMMVC5709E'][1] % {'VALUE': key} return ('', err) return ('', '') def _cmd_movevdisk(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] vol_name = kwargs['obj'].strip('\'\"') vol = self._volumes_list[vol_name] if 'iogrp' not in kwargs: return self._errors['CMMVC5707E'] iogrp = kwargs['iogrp'] if iogrp.isdigit(): vol['IO_group_id'] = iogrp vol['IO_group_name'] = 'io_grp%s' % iogrp else: vol['IO_group_id'] = iogrp[6:] vol['IO_group_name'] = iogrp return ('', '') def _cmd_addvdiskaccess(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] return ('', '') def _cmd_rmvdiskaccess(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] return ('', '') def _add_host_to_list(self, connector): host_info = {} host_info['id'] = self._find_unused_id(self._hosts_list) host_info['host_name'] = connector['host'] host_info['iscsi_names'] = [] host_info['wwpns'] = [] if 'initiator' in connector: host_info['iscsi_names'].append(connector['initiator']) if 'wwpns' in connector: host_info['wwpns'] = host_info['wwpns'] + connector['wwpns'] self._hosts_list[connector['host']] = host_info def _host_in_list(self, host_name): for k in self._hosts_list: if k.startswith(host_name): return k return None # Replication related command # Create a remote copy def _cmd_mkrcrelationship(self, **kwargs): master_vol = '' aux_vol = '' aux_cluster = '' master_sys = self._system_list['instorage-mcs-sim'] aux_sys = self._system_list['aux-mcs-sim'] if 'master' not in kwargs: return self._errors['CMMVC5707E'] master_vol = kwargs['master'].strip('\'\"') if master_vol not in self._volumes_list: return self._errors['CMMVC5754E'] if 'aux' not in kwargs: return self._errors['CMMVC5707E'] aux_vol = kwargs['aux'].strip('\'\"') if aux_vol not in self._volumes_list: return self._errors['CMMVC5754E'] if 'cluster' not in kwargs: return self._errors['CMMVC5707E'] aux_cluster = kwargs['cluster'].strip('\'\"') if aux_cluster != aux_sys['name']: return self._errors['CMMVC5754E'] if (self._volumes_list[master_vol]['capacity'] != self._volumes_list[aux_vol]['capacity']): return self._errors['CMMVC5754E'] rcrel_info = {} rcrel_info['id'] = self._find_unused_id(self._rcrelationship_list) rcrel_info['name'] = 'rcrel' + rcrel_info['id'] rcrel_info['master_cluster_id'] = master_sys['id'] rcrel_info['master_cluster_name'] = master_sys['name'] rcrel_info['master_vdisk_id'] = self._volumes_list[master_vol]['id'] rcrel_info['master_vdisk_name'] = master_vol rcrel_info['aux_cluster_id'] = aux_sys['id'] rcrel_info['aux_cluster_name'] = aux_sys['name'] rcrel_info['aux_vdisk_id'] = self._volumes_list[aux_vol]['id'] rcrel_info['aux_vdisk_name'] = aux_vol rcrel_info['primary'] = 'master' rcrel_info['consistency_group_id'] = '' rcrel_info['consistency_group_name'] = '' rcrel_info['state'] = 'inconsistent_stopped' rcrel_info['bg_copy_priority'] = '50' rcrel_info['progress'] = '0' rcrel_info['freeze_time'] = '' rcrel_info['status'] = 'online' rcrel_info['sync'] = '' rcrel_info['copy_type'] = 'async' if 'async' in kwargs else 'sync' rcrel_info['cycling_mode'] = '' rcrel_info['cycle_period_seconds'] = '300' rcrel_info['master_change_vdisk_id'] = '' rcrel_info['master_change_vdisk_name'] = '' rcrel_info['aux_change_vdisk_id'] = '' rcrel_info['aux_change_vdisk_name'] = '' self._rcrelationship_list[rcrel_info['name']] = rcrel_info self._volumes_list[master_vol]['RC_name'] = rcrel_info['name'] self._volumes_list[master_vol]['RC_id'] = rcrel_info['id'] self._volumes_list[aux_vol]['RC_name'] = rcrel_info['name'] self._volumes_list[aux_vol]['RC_id'] = rcrel_info['id'] return ('RC Relationship, id [' + rcrel_info['id'] + '], successfully created', '') def _cmd_startrcrelationship(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] primary_vol = None if 'primary' in kwargs: primary_vol = kwargs['primary'].strip('\'\"') try: rcrel = self._rcrelationship_list[id_num] except KeyError: return self._errors['CMMVC5753E'] if rcrel['state'] == 'idling' and not primary_vol: return self._errors['CMMVC5963E'] self._rc_state_transition('start', rcrel) if primary_vol: self._rcrelationship_list[id_num]['primary'] = primary_vol return ('', '') def _cmd_stoprcrelationship(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] force_access = True if 'access' in kwargs else False try: rcrel = self._rcrelationship_list[id_num] except KeyError: return self._errors['CMMVC5753E'] function = 'stop_access' if force_access else 'stop' self._rc_state_transition(function, rcrel) if force_access: self._rcrelationship_list[id_num]['primary'] = '' return ('', '') def _cmd_switchrcrelationship(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5707E'] id_num = kwargs['obj'] try: rcrel = self._rcrelationship_list[id_num] except KeyError: return self._errors['CMMVC5753E'] if rcrel['state'] == instorage_const.REP_CONSIS_SYNC: rcrel['primary'] = kwargs['primary'] return ('', '') else: return self._errors['CMMVC5753E'] def _cmd_rmrcrelationship(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] id_num = kwargs['obj'] force = True if 'force' in kwargs else False try: rcrel = self._rcrelationship_list[id_num] except KeyError: return self._errors['CMMVC5753E'] function = 'delete_force' if force else 'delete' self._rc_state_transition(function, rcrel) if rcrel['state'] == 'end': self._volumes_list[rcrel['master_vdisk_name']]['RC_name'] = '' self._volumes_list[rcrel['master_vdisk_name']]['RC_id'] = '' self._volumes_list[rcrel['aux_vdisk_name']]['RC_name'] = '' self._volumes_list[rcrel['aux_vdisk_name']]['RC_id'] = '' del self._rcrelationship_list[id_num] return ('', '') def _rc_state_transition(self, function, rcrel): if (function == 'wait' and 'wait' not in self._rc_transitions[rcrel['state']]): return ('', '') if rcrel['state'] == 'inconsistent_copying' and function == 'wait': if rcrel['progress'] == '0': rcrel['progress'] = '50' else: rcrel['progress'] = '100' rcrel['state'] = 'consistent_synchronized' return ('', '') else: try: curr_state = rcrel['state'] rcrel['state'] = self._rc_transitions[curr_state][function] return ('', '') except Exception: return self._errors['CMMVC5982E'] def _cmd_mkippartnership(self, **kwargs): if 'clusterip' not in kwargs: return self._errors['CMMVC5707E'] clusterip = kwargs['master'].strip('\'\"') if 'linkbandwidthmbits' not in kwargs: return self._errors['CMMVC5707E'] bandwidth = kwargs['linkbandwidthmbits'].strip('\'\"') if 'backgroundcopyrate' not in kwargs: return self._errors['CMMVC5707E'] copyrate = kwargs['backgroundcopyrate'].strip('\'\"') if clusterip == '192.168.10.21': partner_info_id = self._system_list['instorage-mcs-sim']['id'] partner_info_name = self._system_list['instorage-mcs-sim']['name'] else: partner_info_id = self._system_list['aux-mcs-sim']['id'] partner_info_name = self._system_list['aux-mcs-sim']['name'] partner_info = {} partner_info['id'] = partner_info_id partner_info['name'] = partner_info_name partner_info['location'] = 'remote' partner_info['type'] = 'ipv4' partner_info['cluster_ip'] = clusterip partner_info['event_log_sequence'] = '' partner_info['chap_secret'] = '' partner_info['linkbandwidthmbits'] = bandwidth partner_info['backgroundcopyrate'] = copyrate partner_info['partnership'] = 'fully_configured' self._partnership_list[partner_info['id']] = partner_info return ('', '') def _cmd_mkfcpartnership(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] peer_sys = kwargs['obj'] if 'linkbandwidthmbits' not in kwargs: return self._errors['CMMVC5707E'] bandwidth = kwargs['linkbandwidthmbits'].strip('\'\"') if 'backgroundcopyrate' not in kwargs: return self._errors['CMMVC5707E'] copyrate = kwargs['backgroundcopyrate'].strip('\'\"') partner_info = {} partner_info['id'] = self._system_list[peer_sys]['id'] partner_info['name'] = peer_sys partner_info['location'] = 'remote' partner_info['type'] = 'fc' partner_info['cluster_ip'] = '' partner_info['event_log_sequence'] = '' partner_info['chap_secret'] = '' partner_info['linkbandwidthmbits'] = bandwidth partner_info['backgroundcopyrate'] = copyrate partner_info['partnership'] = 'fully_configured' self._partnership_list[partner_info['id']] = partner_info return ('', '') def _cmd_chpartnership(self, **kwargs): if 'obj' not in kwargs: return self._errors['CMMVC5701E'] peer_sys = kwargs['obj'] if peer_sys not in self._partnership_list: return self._errors['CMMVC5753E'] partner_state = ('fully_configured' if 'start' in kwargs else 'fully_configured_stopped') self._partnership_list[peer_sys]['partnership'] = partner_state return ('', '') # The main function to run commands on the management simulator def execute_command(self, cmd, check_exit_code=True): try: kwargs = self._cmd_to_dict(cmd) except IndexError: return self._errors['CMMVC5707E'] command = kwargs.pop('cmd') func = getattr(self, '_cmd_' + command) out, err = func(**kwargs) if (check_exit_code) and (len(err) != 0): raise processutils.ProcessExecutionError(exit_code=1, stdout=out, stderr=err, cmd=' '.join(cmd)) return (out, err) # After calling this function, the next call to the specified command will # result in in the error specified def error_injection(self, cmd, error): self._next_cmd_error[cmd] = error def change_vdiskcopy_attr(self, vol_name, key, value, copy="primary"): if copy == 'primary': self._volumes_list[vol_name]['copies']['0'][key] = value elif copy == 'secondary': self._volumes_list[vol_name]['copies']['1'][key] = value else: msg = "The copy should be primary or secondary" raise exception.InvalidInput(reason=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/instorage/test_common.py0000664000175000017500000024151100000000000027661 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the Inspur InStorage volume driver.""" from unittest import mock import ddt from eventlet import greenthread from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import units import paramiko from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder import ssh_utils from cinder.tests.unit import test from cinder.tests.unit import utils as testutils from cinder.tests.unit.volume.drivers.inspur.instorage import fakes from cinder.volume import configuration as conf from cinder.volume.drivers.inspur.instorage import ( replication as instorage_rep) from cinder.volume.drivers.inspur.instorage import instorage_common from cinder.volume.drivers.inspur.instorage import instorage_iscsi from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils CONF = cfg.CONF @ddt.ddt class InStorageMCSCommonDriverTestCase(test.TestCase): def setUp(self): super(InStorageMCSCommonDriverTestCase, self).setUp() self._def_flags = {'san_ip': 'hostname', 'instorage_san_secondary_ip': 'secondaryname', 'san_login': 'user', 'san_password': 'pass', 'instorage_mcs_volpool_name': fakes.MCS_POOLS, 'instorage_mcs_localcopy_timeout': 20, 'instorage_mcs_localcopy_rate': 49, 'instorage_mcs_allow_tenant_qos': True} config = conf.Configuration(instorage_common.instorage_mcs_opts, conf.SHARED_CONF_GROUP) # Override any configs that may get set in __init__ self._reset_flags(config) self.driver = fakes.FakeInStorageMCSISCSIDriver(configuration=config) self._driver = instorage_iscsi.InStorageMCSISCSIDriver( configuration=config) wwpns = ['1234567890123450', '6543210987654325'] initiator = 'test.initiator.%s' % 123450 self._connector = {'ip': '1.234.56.78', 'host': 'instorage-mcs-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = fakes.FakeInStorage(fakes.MCS_POOLS) self.driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self.ctxt = context.get_admin_context() self.driver.db = db self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver._assistant.check_lcmapping_interval = 0 self.mock_object(instorage_iscsi.InStorageMCSISCSIDriver, 'DEFAULT_GR_SLEEP', 0) self.mock_object(greenthread, 'sleep') def _set_flag(self, flag, value, configuration=None): if not configuration: configuration = self.driver.configuration group = configuration.config_group self.override_config(flag, value, group) def _reset_flags(self, configuration=None): if not configuration: configuration = self.driver.configuration CONF.reset() for k, v in self._def_flags.items(): self._set_flag(k, v, configuration) def _assert_vol_exists(self, name, exists): is_vol_defined = self.driver._assistant.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def test_instorage_mcs_connectivity(self): # Make sure we detect if the pool doesn't exist no_exist_pool = 'i-dont-exist-%s' % 56789 self._set_flag('instorage_mcs_volpool_name', no_exist_pool) self.assertRaises(exception.InvalidInput, self.driver.do_setup, None) self._reset_flags() # Check the case where the user didn't configure IP addresses # as well as receiving unexpected results from the storage self.sim.error_injection('lsnodecanister', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsnodecanister', 'remove_field') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsportip', 'header_mismatch') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) self.sim.error_injection('lsportip', 'remove_field') self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, None) # Check with bad parameters self._set_flag('san_ip', '') self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('san_password', None) self._set_flag('san_private_key', None) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('instorage_mcs_vol_grainsize', 42) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('instorage_mcs_vol_compression', True) self._set_flag('instorage_mcs_vol_rsize', -1) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self._set_flag('instorage_mcs_vol_iogrp', 5) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() self.sim.error_injection('lslicense', 'no_compression') self.sim.error_injection('lsguicapabilities', 'no_compression') self._set_flag('instorage_mcs_vol_compression', True) self.driver.do_setup(None) self.assertRaises(exception.InvalidInput, self.driver.check_for_setup_error) self._reset_flags() # Finally, check with good parameters self.driver.do_setup(None) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_set_up_with_san_ip(self, mock_ssh_execute, mock_ssh_pool): ssh_cmd = ['mcsinq'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_once_with( self._driver.configuration.san_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_set_up_with_secondary_ip(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_pool.side_effect = [paramiko.SSHException, mock.MagicMock()] ssh_cmd = ['mcsinq'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_with( self._driver.configuration.instorage_san_secondary_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_fail_to_secondary_ip(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, mock.MagicMock()] ssh_cmd = ['mcsinq'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_with( self._driver.configuration.instorage_san_secondary_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_secondary_ip_ssh_fail_to_san_ip(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_pool.side_effect = [ paramiko.SSHException, mock.MagicMock( ip=self._driver.configuration.instorage_san_secondary_ip), mock.MagicMock()] mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, mock.MagicMock()] ssh_cmd = ['mcsinq'] self._driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_with( self._driver.configuration.san_ip, self._driver.configuration.san_ssh_port, self._driver.configuration.ssh_conn_timeout, self._driver.configuration.san_login, password=self._driver.configuration.san_password, privatekey=self._driver.configuration.san_private_key, min_size=self._driver.configuration.ssh_min_pool_conn, max_size=self._driver.configuration.ssh_max_pool_conn) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_both_ip_set_failure(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_pool.side_effect = [ paramiko.SSHException, mock.MagicMock(), mock.MagicMock()] mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, processutils.ProcessExecutionError] ssh_cmd = ['mcsinq'] self.assertRaises(processutils.ProcessExecutionError, self._driver._run_ssh, ssh_cmd) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_second_ip_not_set_failure(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, mock.MagicMock()] self._set_flag('instorage_san_secondary_ip', None) ssh_cmd = ['mcsinq'] self.assertRaises(processutils.ProcessExecutionError, self._driver._run_ssh, ssh_cmd) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_consistent_active_ip(self, mock_ssh_execute, mock_ssh_pool): ssh_cmd = ['mcsinq'] self._driver._run_ssh(ssh_cmd) self._driver._run_ssh(ssh_cmd) self._driver._run_ssh(ssh_cmd) self.assertEqual(self._driver.configuration.san_ip, self._driver.active_ip) mock_ssh_execute.side_effect = [paramiko.SSHException, mock.MagicMock(), mock.MagicMock()] self._driver._run_ssh(ssh_cmd) self._driver._run_ssh(ssh_cmd) self.assertEqual(self._driver.configuration.instorage_san_secondary_ip, self._driver.active_ip) def _generate_vol_info(self, vol_name, vol_id): pool = fakes.get_test_pool() prop = {'mdisk_grp_name': pool, 'volume_type_id': self.vt['id']} if vol_name: prop.update(volume_name=vol_name, volume_id=vol_id, volume_size=10) else: prop.update(size=10, volume_type_id=None, mdisk_grp_name=pool, host='openstack@mcs#%s' % pool) vol = testutils.create_volume(self.ctxt, **prop) return vol def _generate_snapshot_info(self, vol): snap = testutils.create_snapshot(self.ctxt, vol.id) return snap def _create_volume(self, **kwargs): pool = fakes.get_test_pool() prop = {'host': 'openstack@mcs#%s' % pool, 'size': 1, 'volume_type_id': self.vt['id']} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.driver.create_volume(vol) return vol def _delete_volume(self, volume): self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume['id']) def _create_group_in_db(self, **kwargs): group = testutils.create_group(self.ctxt, **kwargs) return group def _create_group(self, **kwargs): group = self._create_group_in_db(**kwargs) model_update = self.driver.create_group(self.ctxt, group) self.assertEqual(fields.GroupStatus.AVAILABLE, model_update['status'], "Group created failed") return group def _create_group_snapshot_in_db(self, grp_id, **kwargs): group_snapshot = testutils.create_group_snapshot(self.ctxt, group_id=grp_id, **kwargs) snapshots = [] grp_id = group_snapshot['group_id'] volumes = db.volume_get_all_by_group(self.ctxt.elevated(), grp_id) if not volumes: msg = "Group is empty. No group snapshot will be created." raise exception.InvalidGroup(reason=msg) for volume in volumes: snapshots.append(testutils.create_snapshot( self.ctxt, volume['id'], group_snapshot.id, group_snapshot.name, group_snapshot.id, fields.SnapshotStatus.CREATING)) return group_snapshot, snapshots def _create_group_snapshot(self, grp_id, **kwargs): group_snapshot, snapshots = self._create_group_snapshot_in_db( grp_id, **kwargs) model_update, snapshots_model = ( self.driver.create_group_snapshot( self.ctxt, group_snapshot, snapshots)) self.assertEqual('available', model_update['status'], "Group_Snapshot created failed") for snapshot in snapshots_model: self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot['status']) return group_snapshot, snapshots def _create_test_vol(self, opts): ctxt = testutils.get_test_admin_context() type_ref = volume_types.create(ctxt, 'testtype', opts) volume = self._generate_vol_info(None, None) volume.volume_type_id = type_ref['id'] volume.volume_typ = objects.VolumeType.get_by_id(ctxt, type_ref['id']) self.driver.create_volume(volume) attrs = self.driver._assistant.get_vdisk_attributes(volume['name']) self.driver.delete_volume(volume) volume_types.destroy(ctxt, type_ref['id']) return attrs def _get_default_opts(self): opt = {'rsize': 2, 'warning': 0, 'autoexpand': True, 'grainsize': 256, 'compression': False, 'intier': True, 'iogrp': '0', 'qos': None, 'replication': False} return opt @mock.patch.object(instorage_common.InStorageAssistant, 'add_vdisk_qos') @mock.patch.object(instorage_common.InStorageMCSCommonDriver, '_get_vdisk_params') def test_instorage_mcs_create_volume_with_qos(self, get_vdisk_params, add_vdisk_qos): vol = testutils.create_volume( self.ctxt, volume_type_id=self.vt['id']) fake_opts = self._get_default_opts() # If the qos is empty, chvdisk should not be called # for create_volume. get_vdisk_params.return_value = fake_opts self.driver.create_volume(vol) self._assert_vol_exists(vol['name'], True) self.assertFalse(add_vdisk_qos.called) self.driver.delete_volume(vol) # If the qos is not empty, chvdisk should be called # for create_volume. fake_opts['qos'] = {'IOThrottling': 5000} get_vdisk_params.return_value = fake_opts self.driver.create_volume(vol) self._assert_vol_exists(vol['name'], True) add_vdisk_qos.assert_called_once_with(vol['name'], fake_opts['qos']) self.driver.delete_volume(vol) self._assert_vol_exists(vol['name'], False) def test_instorage_mcs_snapshots(self): vol1 = self._create_volume() snap1 = self._generate_snapshot_info(vol1) # Test timeout and volume cleanup self._set_flag('instorage_mcs_localcopy_timeout', 1) self.assertRaises(exception.VolumeDriverException, self.driver.create_snapshot, snap1) self._assert_vol_exists(snap1['name'], False) self._reset_flags() # Test prestartlcmap failing with mock.patch.object( instorage_common.InStorageSSH, 'prestartlcmap') as prestart: prestart.side_effect = exception.VolumeBackendAPIException(data='') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap1) self.sim.error_injection('lslcmap', 'speed_up') self.sim.error_injection('startlcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap1) self._assert_vol_exists(snap1['name'], False) self.sim.error_injection('prestartlcmap', 'bad_id') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snap1) self._assert_vol_exists(snap1['name'], False) # Test successful snapshot self.driver.create_snapshot(snap1) self._assert_vol_exists(snap1['name'], True) # Try to create a snapshot from an non-existing volume - should fail snap_vol_src = self._generate_vol_info(None, None) snap_novol = self._generate_snapshot_info(snap_vol_src) self.assertRaises(exception.VolumeDriverException, self.driver.create_snapshot, snap_novol) # We support deleting a volume that has snapshots, so delete the volume # first self.driver.delete_volume(vol1) self.driver.delete_snapshot(snap1) def test_instorage_mcs_create_cloned_volume(self): vol1 = self._create_volume() vol2 = testutils.create_volume( self.ctxt, volume_type_id=self.vt['id']) vol3 = testutils.create_volume( self.ctxt, volume_type_id=self.vt['id']) # Try to clone where source size > target size vol1['size'] = vol2['size'] + 1 self.assertRaises(exception.InvalidInput, self.driver.create_cloned_volume, vol2, vol1) self._assert_vol_exists(vol2['name'], False) # Try to clone where source size = target size vol1['size'] = vol2['size'] self.sim.error_injection('lslcmap', 'speed_up') self.driver.create_cloned_volume(vol2, vol1) # validate copyrate was set on the local copy for i, lcmap in self.sim._lcmappings_list.items(): if lcmap['target'] == vol1['name']: self.assertEqual('49', lcmap['copyrate']) self._assert_vol_exists(vol2['name'], True) # Try to clone where source size < target size vol3['size'] = vol1['size'] + 1 self.sim.error_injection('lslcmap', 'speed_up') self.driver.create_cloned_volume(vol3, vol1) # Validate copyrate was set on the local copy for i, lcmap in self.sim._lcmappings_list.items(): if lcmap['target'] == vol1['name']: self.assertEqual('49', lcmap['copyrate']) self._assert_vol_exists(vol3['name'], True) # Delete in the 'opposite' order to make sure it works self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) def test_instorage_mcs_create_volume_from_snapshot(self): vol1 = self._create_volume(size=10) snap1 = self._generate_snapshot_info(vol1) self.driver.create_snapshot(snap1) vol2 = self._generate_vol_info(None, None) vol3 = self._generate_vol_info(None, None) # Try to create a volume from a non-existing snapshot snap_vol_src = self._generate_vol_info(None, None) snap_novol = self._generate_snapshot_info(snap_vol_src) vol_novol = self._generate_vol_info(None, None) self.assertRaises(exception.VolumeDriverException, self.driver.create_volume_from_snapshot, vol_novol, snap_novol) # Fail the snapshot with mock.patch.object( instorage_common.InStorageSSH, 'prestartlcmap') as prestart: prestart.side_effect = exception.VolumeBackendAPIException(data='') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, vol2, snap1) self._assert_vol_exists(vol2['name'], False) # Try to create where volume size < snapshot size snap1.volume_size += 1 self.assertRaises(exception.InvalidInput, self.driver.create_volume_from_snapshot, vol2, snap1) self._assert_vol_exists(vol2['name'], False) snap1.volume_size -= 1 # Try to create where volume size > snapshot size vol2['size'] += 1 self.sim.error_injection('lslcmap', 'speed_up') self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2['name'], True) vol2['size'] -= 1 # Try to create where volume size = snapshot size self.sim.error_injection('lslcmap', 'speed_up') self.driver.create_volume_from_snapshot(vol3, snap1) self._assert_vol_exists(vol3['name'], True) # Delete in the 'opposite' order to make sure it works self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_snapshot(snap1) self._assert_vol_exists(snap1['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) @mock.patch.object(instorage_common.InStorageAssistant, 'add_vdisk_qos') def test_instorage_mcs_create_volfromsnap_clone_with_qos(self, add_vdisk_qos): vol1 = self._create_volume() snap1 = self._generate_snapshot_info(vol1) self.driver.create_snapshot(snap1) vol2 = self._generate_vol_info(None, None) vol3 = self._generate_vol_info(None, None) fake_opts = self._get_default_opts() # Succeed self.sim.error_injection('lslcmap', 'speed_up') # If the qos is empty, chvdisk should not be called # for create_volume_from_snapshot. with mock.patch.object(instorage_iscsi.InStorageMCSISCSIDriver, '_get_vdisk_params') as get_vdisk_params: get_vdisk_params.return_value = fake_opts self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2['name'], True) self.assertFalse(add_vdisk_qos.called) self.driver.delete_volume(vol2) # If the qos is not empty, chvdisk should be called # for create_volume_from_snapshot. fake_opts['qos'] = {'IOThrottling': 5000} get_vdisk_params.return_value = fake_opts self.driver.create_volume_from_snapshot(vol2, snap1) self._assert_vol_exists(vol2['name'], True) add_vdisk_qos.assert_called_once_with(vol2['name'], fake_opts['qos']) self.sim.error_injection('lslcmap', 'speed_up') # If the qos is empty, chvdisk should not be called # for create_volume_from_snapshot. add_vdisk_qos.reset_mock() fake_opts['qos'] = None get_vdisk_params.return_value = fake_opts self.driver.create_cloned_volume(vol3, vol2) self._assert_vol_exists(vol3['name'], True) self.assertFalse(add_vdisk_qos.called) self.driver.delete_volume(vol3) # If the qos is not empty, chvdisk should be called # for create_volume_from_snapshot. fake_opts['qos'] = {'IOThrottling': 5000} get_vdisk_params.return_value = fake_opts self.driver.create_cloned_volume(vol3, vol2) self._assert_vol_exists(vol3['name'], True) add_vdisk_qos.assert_called_once_with(vol3['name'], fake_opts['qos']) # Delete in the 'opposite' order to make sure it works self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_snapshot(snap1) self._assert_vol_exists(snap1['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) def test_instorage_mcs_delete_vol_with_lcmap(self): vol1 = self._create_volume() # create two snapshots snap1 = self._generate_snapshot_info(vol1) snap2 = self._generate_snapshot_info(vol1) self.driver.create_snapshot(snap1) self.driver.create_snapshot(snap2) vol2 = self._generate_vol_info(None, None) vol3 = self._generate_vol_info(None, None) # Create vol from the second snapshot self.sim.error_injection('lslcmap', 'speed_up') self.driver.create_volume_from_snapshot(vol2, snap2) # validate copyrate was set on the local copy for i, lcmap in self.sim._lcmappings_list.items(): if lcmap['target'] == vol2['name']: self.assertEqual('copying', lcmap['status']) self._assert_vol_exists(vol2['name'], True) self.sim.error_injection('lslcmap', 'speed_up') self.driver.create_cloned_volume(vol3, vol2) # validate copyrate was set on the local copy for i, lcmap in self.sim._lcmappings_list.items(): if lcmap['target'] == vol3['name']: self.assertEqual('copying', lcmap['status']) self._assert_vol_exists(vol3['name'], True) # Delete in the 'opposite' order to make sure it works self.driver.delete_volume(vol3) self._assert_vol_exists(vol3['name'], False) self.driver.delete_volume(vol2) self._assert_vol_exists(vol2['name'], False) self.driver.delete_snapshot(snap2) self._assert_vol_exists(snap2['name'], False) self.driver.delete_snapshot(snap1) self._assert_vol_exists(snap1['name'], False) self.driver.delete_volume(vol1) self._assert_vol_exists(vol1['name'], False) def test_instorage_mcs_volumes(self): # Create a first volume volume = self._generate_vol_info(None, None) self.driver.create_volume(volume) self.driver.ensure_export(None, volume) # Do nothing self.driver.create_export(None, volume, {}) self.driver.remove_export(None, volume) # Make sure volume attributes are as they should be attributes = self.driver._assistant.get_vdisk_attributes(volume[ 'name']) attr_size = float(attributes['capacity']) / units.Gi # bytes to GB self.assertEqual(float(volume['size']), attr_size) pool = fakes.get_test_pool() self.assertEqual(pool, attributes['mdisk_grp_name']) # Try to create the volume again (should fail) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) # Try to delete a volume that doesn't exist (should not fail) vol_no_exist = self._generate_vol_info('i_dont_exist', '111111') self.driver.delete_volume(vol_no_exist) # Ensure export for volume that doesn't exist (should not fail) self.driver.ensure_export(None, vol_no_exist) # Delete the volume self.driver.delete_volume(volume) def test_instorage_mcs_volume_name(self): # Create a volume with space in name volume = self._create_volume() self.driver.ensure_export(None, volume) # Ensure lsvdisk can find the volume by name attributes = self.driver._assistant.get_vdisk_attributes(volume.name) self.assertIn('name', attributes) self.assertEqual(volume.name, attributes['name']) self.driver.delete_volume(volume) def test_instorage_mcs_volume_params(self): # Option test matrix # Option Value Covered by test # # rsize -1 1 # rsize 2 2,3 # warning 0 2 # warning 80 3 # autoexpand True 2 # autoexpand False 3 # grainsize 32 2 # grainsize 256 3 # compression True 4 # compression False 2,3 # intier True 1,3 # intier False 2 # iogrp 0 1 # iogrp 1 2 opts_list = [] chck_list = [] opts_list.append({'rsize': -1, 'intier': True, 'iogrp': '0'}) chck_list.append({'free_capacity': '0', 'in_tier': 'on', 'IO_group_id': '0'}) test_iogrp = '1' opts_list.append({'rsize': 2, 'compression': False, 'warning': 0, 'autoexpand': True, 'grainsize': 32, 'intier': False, 'iogrp': test_iogrp}) chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', 'warning': '0', 'autoexpand': 'on', 'grainsize': '32', 'in_tier': 'off', 'IO_group_id': (test_iogrp)}) opts_list.append({'rsize': 2, 'compression': False, 'warning': 80, 'autoexpand': False, 'grainsize': 256, 'intier': True}) chck_list.append({'-free_capacity': '0', 'compressed_copy': 'no', 'warning': '80', 'autoexpand': 'off', 'grainsize': '256', 'in_tier': 'on'}) opts_list.append({'rsize': 2, 'compression': True}) chck_list.append({'-free_capacity': '0', 'compressed_copy': 'yes'}) for idx in range(len(opts_list)): attrs = self._create_test_vol(opts_list[idx]) for k, v in chck_list[idx].items(): try: if k[0] == '-': k = k[1:] self.assertNotEqual(v, attrs[k]) else: self.assertEqual(v, attrs[k]) except processutils.ProcessExecutionError as e: if 'CMMVC7050E' not in e.stderr: raise def test_instorage_mcs_unicode_host_and_volume_names(self): # We'll check with iSCSI only - nothing protocol-dependent here self.driver.do_setup(None) rand_id = 56789 volume1 = self._generate_vol_info(None, None) self.driver.create_volume(volume1) self._assert_vol_exists(volume1['name'], True) self.assertRaises(exception.VolumeDriverException, self.driver._assistant.create_host, {'host': 12345}) # Add a host first to make life interesting (this host and # conn['host'] should be translated to the same prefix, and the # initiator should differentiate tmpconn1 = {'initiator': u'unicode:initiator1.%s' % rand_id, 'ip': '10.10.10.10', 'host': u'unicode.foo}.bar{.baz-%s' % rand_id} self.driver._assistant.create_host(tmpconn1) # Add a host with a different prefix tmpconn2 = {'initiator': u'unicode:initiator2.%s' % rand_id, 'ip': '10.10.10.11', 'host': u'unicode.hello.world-%s' % rand_id} self.driver._assistant.create_host(tmpconn2) conn = {'initiator': u'unicode:initiator3.%s' % rand_id, 'ip': '10.10.10.12', 'host': u'unicode.foo.bar.baz-%s' % rand_id} self.driver.initialize_connection(volume1, conn) host_name = self.driver._assistant.get_host_from_connector(conn) self.assertIsNotNone(host_name) self.driver.terminate_connection(volume1, conn) host_name = self.driver._assistant.get_host_from_connector(conn) self.assertIsNone(host_name) self.driver.delete_volume(volume1) # Clean up temporary hosts for tmpconn in [tmpconn1, tmpconn2]: host_name = self.driver._assistant.get_host_from_connector(tmpconn) self.assertIsNotNone(host_name) self.driver._assistant.delete_host(host_name) def test_instorage_mcs_delete_volume_snapshots(self): # Create a volume with two snapshots master = self._create_volume() # Delete a snapshot snap = self._generate_snapshot_info(master) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.driver.delete_snapshot(snap) self._assert_vol_exists(snap['name'], False) # Delete a volume with snapshots (regular) snap = self._generate_snapshot_info(master) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.driver.delete_volume(master) self._assert_vol_exists(master['name'], False) # Fail create volume from snapshot - will force delete the volume volfs = self._generate_vol_info(None, None) self.sim.error_injection('startlcmap', 'bad_id') self.sim.error_injection('lslcmap', 'speed_up') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, volfs, snap) self._assert_vol_exists(volfs['name'], False) # Create volume from snapshot and delete it volfs = self._generate_vol_info(None, None) self.sim.error_injection('lslcmap', 'speed_up') self.driver.create_volume_from_snapshot(volfs, snap) self._assert_vol_exists(volfs['name'], True) self.driver.delete_volume(volfs) self._assert_vol_exists(volfs['name'], False) # Create volume from snapshot and delete the snapshot volfs = self._generate_vol_info(None, None) self.sim.error_injection('lslcmap', 'speed_up') self.driver.create_volume_from_snapshot(volfs, snap) self.driver.delete_snapshot(snap) self._assert_vol_exists(snap['name'], False) # Fail create clone - will force delete the target volume clone = self._generate_vol_info(None, None) self.sim.error_injection('startlcmap', 'bad_id') self.sim.error_injection('lslcmap', 'speed_up') self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, clone, volfs) self._assert_vol_exists(clone['name'], False) # Create the clone, delete the source and target clone = self._generate_vol_info(None, None) self.sim.error_injection('lslcmap', 'speed_up') self.driver.create_cloned_volume(clone, volfs) self._assert_vol_exists(clone['name'], True) self.driver.delete_volume(volfs) self._assert_vol_exists(volfs['name'], False) self.driver.delete_volume(clone) self._assert_vol_exists(clone['name'], False) @ddt.data((True, None), (True, 5), (False, -1), (False, 100)) @ddt.unpack def test_instorage_mcs_get_volume_stats( self, is_thin_provisioning_enabled, rsize): self._set_flag('reserved_percentage', 25) self._set_flag('instorage_mcs_vol_rsize', rsize) stats = self.driver.get_volume_stats() for each_pool in stats['pools']: self.assertIn(each_pool['pool_name'], self._def_flags['instorage_mcs_volpool_name']) self.assertFalse(each_pool['multiattach']) self.assertLessEqual(each_pool['free_capacity_gb'], each_pool['total_capacity_gb']) self.assertEqual(25, each_pool['reserved_percentage']) self.assertEqual(is_thin_provisioning_enabled, each_pool['thin_provisioning_support']) self.assertEqual(not is_thin_provisioning_enabled, each_pool['thick_provisioning_support']) expected = 'instorage-mcs-sim' self.assertEqual(expected, stats['volume_backend_name']) for each_pool in stats['pools']: self.assertIn(each_pool['pool_name'], self._def_flags['instorage_mcs_volpool_name']) self.assertAlmostEqual(3328.0, each_pool['total_capacity_gb']) self.assertAlmostEqual(3287.5, each_pool['free_capacity_gb']) if is_thin_provisioning_enabled: self.assertAlmostEqual( 1576.96, each_pool['provisioned_capacity_gb']) def test_get_pool(self): ctxt = testutils.get_test_admin_context() type_ref = volume_types.create(ctxt, 'testtype', None) volume = self._generate_vol_info(None, None) volume.volume_type_id = type_ref['id'] volume.volume_type = objects.VolumeType.get_by_id(ctxt, type_ref['id']) self.driver.create_volume(volume) self.assertEqual(volume['mdisk_grp_name'], self.driver.get_pool(volume)) self.driver.delete_volume(volume) volume_types.destroy(ctxt, type_ref['id']) def test_instorage_mcs_extend_volume(self): volume = self._create_volume() self.driver.extend_volume(volume, '13') attrs = self.driver._assistant.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) snap = self._generate_snapshot_info(volume) self.driver.create_snapshot(snap) self._assert_vol_exists(snap['name'], True) self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, volume, '16') self.driver.delete_snapshot(snap) self.driver.delete_volume(volume) @mock.patch.object(instorage_rep.InStorageMCSReplicationAsyncCopy, 'create_relationship') @mock.patch.object(instorage_rep.InStorageMCSReplicationAsyncCopy, 'extend_target_volume') @mock.patch.object(instorage_common.InStorageAssistant, 'delete_relationship') @mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info') def _instorage_mcs_extend_volume_replication(self, get_relationship, delete_relationship, extend_target_volume, create_relationship): fake_target = mock.Mock() rep_type = 'async' self.driver.replications[rep_type] = ( self.driver.replication_factory(rep_type, fake_target)) volume = self._create_volume() volume['replication_status'] = 'enabled' fake_target_vol = 'vol-target-id' get_relationship.return_value = {'aux_vdisk_name': fake_target_vol} with mock.patch.object( self.driver, '_get_volume_replicated_type_mirror') as mirror_type: mirror_type.return_value = 'async' self.driver.extend_volume(volume, '13') attrs = self.driver._assistant.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) delete_relationship.assert_called_once_with(volume['name']) extend_target_volume.assert_called_once_with(fake_target_vol, 12) create_relationship.assert_called_once_with(volume, fake_target_vol) self.driver.delete_volume(volume) def _instorage_mcs_extend_volume_replication_failover(self): volume = self._create_volume() volume['replication_status'] = 'failed-over' with mock.patch.object( self.driver, '_get_volume_replicated_type_mirror') as mirror_type: mirror_type.return_value = 'async' self.driver.extend_volume(volume, '13') attrs = self.driver._assistant.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) self.driver.delete_volume(volume) def _check_loc_info(self, capabilities, expected): volume = self._create_volume() host = {'host': 'foo', 'capabilities': capabilities} ctxt = context.get_admin_context() moved, model_update = self.driver.migrate_volume(ctxt, volume, host) self.assertEqual(expected['moved'], moved) self.assertEqual(expected['model_update'], model_update) self.driver.delete_volume(volume) def test_instorage_mcs_migrate_bad_loc_info(self): self._check_loc_info({}, {'moved': False, 'model_update': None}) cap = {'location_info': 'foo'} self._check_loc_info(cap, {'moved': False, 'model_update': None}) cap = {'location_info': 'FooDriver:foo:bar'} self._check_loc_info(cap, {'moved': False, 'model_update': None}) cap = {'location_info': 'InStorageMCSDriver:foo:bar'} self._check_loc_info(cap, {'moved': False, 'model_update': None}) def test_instorage_mcs_volume_migrate(self): # Make sure we don't call migrate_volume_vdiskcopy self.driver.do_setup(None) loc = ('InStorageMCSDriver:' + self.driver._state['system_id'] + ':openstack2') cap = {'location_info': loc, 'extent_size': '256'} host = {'host': 'openstack@mcs#openstack2', 'capabilities': cap} ctxt = context.get_admin_context() volume = self._create_volume() volume['volume_type_id'] = None self.driver.migrate_volume(ctxt, volume, host) self._delete_volume(volume) def test_instorage_mcs_get_vdisk_params(self): self.driver.do_setup(None) fake_qos = {'qos:IOThrottling': '5000'} expected_qos = {'IOThrottling': 5000} fake_opts = self._get_default_opts() # The parameters retured should be the same to the default options, # if the QoS is empty. vol_type_empty_qos = self._create_volume_type_qos(True, None) type_id = vol_type_empty_qos['id'] params = self.driver._get_vdisk_params(type_id, volume_type=vol_type_empty_qos, volume_metadata=None) self.assertEqual(fake_opts, params) volume_types.destroy(self.ctxt, type_id) # If the QoS is set via the qos association with the volume type, # qos value should be set in the retured parameters. vol_type_qos = self._create_volume_type_qos(False, fake_qos) type_id = vol_type_qos['id'] # If type_id is not none and volume_type is none, it should work fine. params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is not none and volume_type is not none, it should # work fine. params = self.driver._get_vdisk_params(type_id, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is none and volume_type is not none, it should work fine. params = self.driver._get_vdisk_params(None, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If both type_id and volume_type are none, no qos will be returned # in the parameter. params = self.driver._get_vdisk_params(None, volume_type=None, volume_metadata=None) self.assertIsNone(params['qos']) qos_spec = volume_types.get_volume_type_qos_specs(type_id) volume_types.destroy(self.ctxt, type_id) qos_specs.delete(self.ctxt, qos_spec['qos_specs']['id']) # If the QoS is set via the extra specs in the volume type, # qos value should be set in the retured parameters. vol_type_qos = self._create_volume_type_qos(True, fake_qos) type_id = vol_type_qos['id'] # If type_id is not none and volume_type is none, it should work fine. params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is not none and volume_type is not none, # it should work fine. params = self.driver._get_vdisk_params(type_id, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If type_id is none and volume_type is not none, # it should work fine. params = self.driver._get_vdisk_params(None, volume_type=vol_type_qos, volume_metadata=None) self.assertEqual(expected_qos, params['qos']) # If both type_id and volume_type are none, no qos will be returned # in the parameter. params = self.driver._get_vdisk_params(None, volume_type=None, volume_metadata=None) self.assertIsNone(params['qos']) volume_types.destroy(self.ctxt, type_id) # If the QoS is set in the volume metadata, # qos value should be set in the retured parameters. metadata = [{'key': 'qos:IOThrottling', 'value': 4000}] expected_qos_metadata = {'IOThrottling': 4000} params = self.driver._get_vdisk_params(None, volume_type=None, volume_metadata=metadata) self.assertEqual(expected_qos_metadata, params['qos']) # If the QoS is set both in the metadata and the volume type, the one # in the volume type will take effect. vol_type_qos = self._create_volume_type_qos(True, fake_qos) type_id = vol_type_qos['id'] params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=metadata) self.assertEqual(expected_qos, params['qos']) volume_types.destroy(self.ctxt, type_id) # If the QoS is set both via the qos association and the # extra specs, the one from the qos association will take effect. fake_qos_associate = {'qos:IOThrottling': '6000'} expected_qos_associate = {'IOThrottling': 6000} vol_type_qos = self._create_volume_type_qos_both(fake_qos, fake_qos_associate) type_id = vol_type_qos['id'] params = self.driver._get_vdisk_params(type_id, volume_type=None, volume_metadata=None) self.assertEqual(expected_qos_associate, params['qos']) qos_spec = volume_types.get_volume_type_qos_specs(type_id) volume_types.destroy(self.ctxt, type_id) qos_specs.delete(self.ctxt, qos_spec['qos_specs']['id']) @mock.patch.object(instorage_common.InStorageAssistant, 'disable_vdisk_qos') @mock.patch.object(instorage_common.InStorageAssistant, 'update_vdisk_qos') def test_instorage_mcs_retype_no_copy(self, update_vdisk_qos, disable_vdisk_qos): self.driver.do_setup(None) loc = ('InStorageMCSDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@mcs#openstack', 'capabilities': cap} ctxt = context.get_admin_context() key_specs_old = {'intier': False, 'warning': 2, 'autoexpand': True} key_specs_new = {'intier': True, 'warning': 5, 'autoexpand': False} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = self._generate_vol_info(None, None) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.driver.retype(ctxt, volume, new_type, diff, host) attrs = self.driver._assistant.get_vdisk_attributes(volume['name']) self.assertEqual('on', attrs['in_tier'], 'Volume retype failed') self.assertEqual('5', attrs['warning'], 'Volume retype failed') self.assertEqual('off', attrs['autoexpand'], 'Volume retype failed') self.driver.delete_volume(volume) fake_opts = self._get_default_opts() fake_opts_old = self._get_default_opts() fake_opts_old['qos'] = {'IOThrottling': 4000} fake_opts_qos = self._get_default_opts() fake_opts_qos['qos'] = {'IOThrottling': 5000} self.driver.create_volume(volume) with mock.patch.object(instorage_iscsi.InStorageMCSISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for both the source and target volumes, # add_vdisk_qos and disable_vdisk_qos will not be called for # retype. get_vdisk_params.side_effect = [fake_opts, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(instorage_iscsi.InStorageMCSISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is specified for both source and target volumes, # add_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts_old, fake_opts_qos] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(instorage_iscsi.InStorageMCSISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for source and speficied for target volume, # add_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts, fake_opts_qos] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(instorage_iscsi.InStorageMCSISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for target volume and specified for source # volume, add_vdisk_qos will not be called for retype, and # disable_vdisk_qos will be called. get_vdisk_params.side_effect = [fake_opts_qos, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) disable_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.driver.delete_volume(volume) def test_instorage_mcs_retype_only_change_iogrp(self): self.driver.do_setup(None) loc = ('InStorageMCSDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@mcs#openstack', 'capabilities': cap} ctxt = context.get_admin_context() key_specs_old = {'iogrp': 0} key_specs_new = {'iogrp': 1} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = self._generate_vol_info(None, None) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) attrs = self.driver._assistant.get_vdisk_attributes(volume['name']) self.assertEqual('0', attrs['IO_group_id'], 'Volume retype ' 'failed') self.driver.retype(ctxt, volume, new_type, diff, host) attrs = self.driver._assistant.get_vdisk_attributes(volume['name']) self.assertEqual('1', attrs['IO_group_id'], 'Volume retype ' 'failed') self.driver.delete_volume(volume) @mock.patch.object(instorage_common.InStorageAssistant, 'disable_vdisk_qos') @mock.patch.object(instorage_common.InStorageAssistant, 'update_vdisk_qos') def test_instorage_mcs_retype_need_copy(self, update_vdisk_qos, disable_vdisk_qos): self.driver.do_setup(None) loc = ('InStorageMCSDriver:' + self.driver._state['system_id'] + ':openstack') cap = {'location_info': loc, 'extent_size': '128'} self.driver._stats = {'location_info': loc} host = {'host': 'openstack@mcs#openstack', 'capabilities': cap} ctxt = context.get_admin_context() key_specs_old = {'compression': True, 'iogrp': 0} key_specs_new = {'compression': False, 'iogrp': 1} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = self._generate_vol_info(None, None) old_type = objects.VolumeType.get_by_id(ctxt, old_type_ref['id']) volume['volume_type'] = old_type volume['host'] = host['host'] new_type = objects.VolumeType.get_by_id(ctxt, new_type_ref['id']) self.driver.create_volume(volume) self.driver.retype(ctxt, volume, new_type, diff, host) attrs = self.driver._assistant.get_vdisk_attributes(volume['name']) self.assertEqual('no', attrs['compressed_copy']) self.assertEqual('1', attrs['IO_group_id'], 'Volume retype ' 'failed') self.driver.delete_volume(volume) fake_opts = self._get_default_opts() fake_opts_old = self._get_default_opts() fake_opts_old['qos'] = {'IOThrottling': 4000} fake_opts_qos = self._get_default_opts() fake_opts_qos['qos'] = {'IOThrottling': 5000} self.driver.create_volume(volume) with mock.patch.object(instorage_iscsi.InStorageMCSISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for both the source and target volumes, # add_vdisk_qos and disable_vdisk_qos will not be called for # retype. get_vdisk_params.side_effect = [fake_opts, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(instorage_iscsi.InStorageMCSISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is specified for both source and target volumes, # add_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts_old, fake_opts_qos] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(instorage_iscsi.InStorageMCSISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for source and speficied for target volume, # add_vdisk_qos will be called for retype, and disable_vdisk_qos # will not be called. get_vdisk_params.side_effect = [fake_opts, fake_opts_qos] self.driver.retype(ctxt, volume, new_type, diff, host) update_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.assertFalse(disable_vdisk_qos.called) self.driver.delete_volume(volume) self.driver.create_volume(volume) update_vdisk_qos.reset_mock() with mock.patch.object(instorage_iscsi.InStorageMCSISCSIDriver, '_get_vdisk_params') as get_vdisk_params: # If qos is empty for target volume and specified for source # volume, add_vdisk_qos will not be called for retype, and # disable_vdisk_qos will be called. get_vdisk_params.side_effect = [fake_opts_qos, fake_opts] self.driver.retype(ctxt, volume, new_type, diff, host) self.assertFalse(update_vdisk_qos.called) disable_vdisk_qos.assert_called_with(volume['name'], fake_opts_qos['qos']) self.driver.delete_volume(volume) def test_set_storage_code_level_success(self): res = self.driver._assistant.get_system_info() self.assertEqual((3, 1, 1, 0), res['code_level'], 'Get code level error') @mock.patch.object(instorage_common.InStorageAssistant, 'rename_vdisk') def test_instorage_update_migrated_volume(self, rename_vdisk): ctxt = testutils.get_test_admin_context() backend_volume = self._create_volume() volume = self._create_volume() model_update = self.driver.update_migrated_volume(ctxt, volume, backend_volume, 'available') rename_vdisk.assert_called_once_with(backend_volume.name, volume.name) self.assertEqual({'_name_id': None}, model_update) rename_vdisk.reset_mock() rename_vdisk.side_effect = exception.VolumeBackendAPIException(data='') model_update = self.driver.update_migrated_volume(ctxt, volume, backend_volume, 'available') self.assertEqual({'_name_id': backend_volume.id}, model_update) rename_vdisk.reset_mock() rename_vdisk.side_effect = exception.VolumeBackendAPIException(data='') model_update = self.driver.update_migrated_volume(ctxt, volume, backend_volume, 'attached') self.assertEqual({'_name_id': backend_volume.id}, model_update) def test_instorage_vdisk_copy_ops(self): ctxt = testutils.get_test_admin_context() volume = self._create_volume() driver = self.driver dest_pool = volume_utils.extract_host(volume['host'], 'pool') new_ops = driver._assistant.add_vdisk_copy(volume['name'], dest_pool, None, self.driver._state, self.driver.configuration) self.driver._add_vdisk_copy_op(ctxt, volume, new_ops) self.assertEqual([new_ops], self.driver._vdiskcopyops[volume.id]['copyops'], 'InStorage driver add vdisk copy error.') self.driver._check_volume_copy_ops() self.driver._rm_vdisk_copy_op(ctxt, volume.id, new_ops[0], new_ops[1]) self.assertNotIn(volume.id, self.driver._vdiskcopyops, 'InStorage driver delete vdisk copy error') self._delete_volume(volume) def test_instorage_delete_with_vdisk_copy_ops(self): volume = self._create_volume() self.driver._vdiskcopyops = {volume['id']: {'name': volume.name, 'copyops': [('0', '1')]}} with mock.patch.object(self.driver, '_vdiskcopyops_loop'): self.assertIn(volume['id'], self.driver._vdiskcopyops) self.driver.delete_volume(volume) self.assertNotIn(volume['id'], self.driver._vdiskcopyops) def _create_volume_type_qos(self, extra_specs, fake_qos): # Generate a QoS volume type for volume. if extra_specs: spec = fake_qos type_ref = volume_types.create(self.ctxt, "qos_extra_specs", spec) else: type_ref = volume_types.create(self.ctxt, "qos_associate", None) if fake_qos: qos_ref = qos_specs.create(self.ctxt, 'qos-specs', fake_qos) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return qos_type def _create_volume_type_qos_both(self, fake_qos, fake_qos_associate): type_ref = volume_types.create(self.ctxt, "qos_extra_specs", fake_qos) qos_ref = qos_specs.create(self.ctxt, 'qos-specs', fake_qos_associate) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return qos_type def _create_replication_volume_type(self, enable): # Generate a volume type for volume repliation. if enable: spec = {'capabilities:replication': ' True'} type_ref = volume_types.create(self.ctxt, "replication_1", spec) else: spec = {'capabilities:replication': ' False'} type_ref = volume_types.create(self.ctxt, "replication_2", spec) replication_type = objects.VolumeType.get_by_id(self.ctxt, type_ref['id']) return replication_type def _create_consistency_group_volume_type(self): # Generate a volume type for volume consistencygroup. spec = {'capabilities:consistencygroup_support': ' True'} type_ref = volume_types.create(self.ctxt, "cg", spec) cg_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return cg_type def _create_group_volume_type(self): # Generate a volume type for volume group. spec = {'capabilities:group_support': ' True'} type_ref = volume_types.create(self.ctxt, "group", spec) group_type = volume_types.get_volume_type(self.ctxt, type_ref['id']) return group_type def _get_vdisk_uid(self, vdisk_name): """Return vdisk_UID for given vdisk. Given a vdisk by name, performs an lvdisk command that extracts the vdisk_UID parameter and returns it. Returns None if the specified vdisk does not exist. """ vdisk_properties, _err = self.sim._cmd_lsvdisk(obj=vdisk_name, delim='!') # Iterate through each row until we find the vdisk_UID entry for row in vdisk_properties.split('\n'): words = row.split('!') if words[0] == 'vdisk_UID': return words[1] return None def _create_volume_and_return_uid(self, volume_name): """Creates a volume and returns its UID. Creates a volume with the specified name, and returns the UID that the InStorage controller allocated for it. We do this by executing a create_volume and then calling into the simulator to perform an lsvdisk directly. """ volume = self._generate_vol_info(None, None) self.driver.create_volume(volume) return (volume, self._get_vdisk_uid(volume['name'])) def test_manage_existing_get_size_bad_ref(self): """Error on manage with bad reference. This test case attempts to manage an existing volume but passes in a bad reference that the InStorage driver doesn't understand. We expect an exception to be raised. """ volume = self._generate_vol_info(None, None) ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) def test_manage_existing_get_size_bad_uid(self): """Error when the specified UUID does not exist.""" volume = self._generate_vol_info(None, None) ref = {'source-id': 'bad_uid'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) pass def test_manage_existing_get_size_bad_name(self): """Error when the specified name does not exist.""" volume = self._generate_vol_info(None, None) ref = {'source-name': 'bad_name'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) def test_manage_existing_bad_ref(self): """Error on manage with bad reference. This test case attempts to manage an existing volume but passes in a bad reference that the InStorage driver doesn't understand. We expect an exception to be raised. """ # Error when neither UUID nor name are specified. volume = self._generate_vol_info(None, None) ref = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, ref) # Error when the specified UUID does not exist. volume = self._generate_vol_info(None, None) ref = {'source-id': 'bad_uid'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, ref) # Error when the specified name does not exist. volume = self._generate_vol_info(None, None) ref = {'source-name': 'bad_name'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, ref) @mock.patch.object(instorage_common.InStorageAssistant, 'get_vdisk_copy_attrs') def test_manage_existing_mismatch(self, get_vdisk_copy_attrs): ctxt = testutils.get_test_admin_context() _volume, uid = self._create_volume_and_return_uid('manage_test') opts = {'rsize': -1} type_thick_ref = volume_types.create(ctxt, 'testtype1', opts) opts = {'rsize': 2} type_thin_ref = volume_types.create(ctxt, 'testtype2', opts) opts = {'rsize': 2, 'compression': True} type_comp_ref = volume_types.create(ctxt, 'testtype3', opts) opts = {'rsize': -1, 'iogrp': 1} type_iogrp_ref = volume_types.create(ctxt, 'testtype4', opts) new_volume = self._generate_vol_info(None, None) ref = {'source-name': _volume['name']} fake_copy_thin = self._get_default_opts() fake_copy_thin['autoexpand'] = 'on' fake_copy_comp = self._get_default_opts() fake_copy_comp['autoexpand'] = 'on' fake_copy_comp['compressed_copy'] = 'yes' fake_copy_thick = self._get_default_opts() fake_copy_thick['autoexpand'] = '' fake_copy_thick['compressed_copy'] = 'no' fake_copy_no_comp = self._get_default_opts() fake_copy_no_comp['compressed_copy'] = 'no' valid_iogrp = self.driver._state['available_iogrps'] self.driver._state['available_iogrps'] = [9999] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) self.driver._state['available_iogrps'] = valid_iogrp get_vdisk_copy_attrs.side_effect = [fake_copy_thin, fake_copy_thick, fake_copy_no_comp, fake_copy_comp, fake_copy_thick, fake_copy_thick ] new_volume['volume_type_id'] = type_thick_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_thin_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_comp_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_thin_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_iogrp_ref['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) new_volume['volume_type_id'] = type_thick_ref['id'] no_exist_pool = 'i-dont-exist-%s' % 56789 new_volume['host'] = 'openstack@mcs#%s' % no_exist_pool self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) self._reset_flags() volume_types.destroy(ctxt, type_thick_ref['id']) volume_types.destroy(ctxt, type_comp_ref['id']) volume_types.destroy(ctxt, type_iogrp_ref['id']) def test_manage_existing_good_uid_not_mapped(self): """Tests managing a volume with no mappings. This test case attempts to manage an existing volume by UID, and we expect it to succeed. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UID of that vdisk. _volume, uid = self._create_volume_and_return_uid('manage_test') # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info(None, None) # Submit the request to manage it. ref = {'source-id': uid} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) def test_manage_existing_good_name_not_mapped(self): """Tests managing a volume with no mappings. This test case attempts to manage an existing volume by name, and we expect it to succeed. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UID of that vdisk. _volume, uid = self._create_volume_and_return_uid('manage_test') # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info(None, None) # Submit the request to manage it. ref = {'source-name': _volume['name']} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) def test_manage_existing_mapped(self): """Tests managing a mapped volume with no override. This test case attempts to manage an existing volume by UID, but the volume is mapped to a host, so we expect to see an exception raised. """ # Create a volume as a way of getting a vdisk created, and find out the # UUID of that vdisk. # Set replication target. volume, uid = self._create_volume_and_return_uid('manage_test') # Map a host to the disk conn = {'initiator': u'unicode:initiator3', 'ip': '10.10.10.12', 'host': u'unicode.foo.bar.baz'} self.driver.initialize_connection(volume, conn) # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. volume = self._generate_vol_info(None, None) ref = {'source-id': uid} # Attempt to manage this disk, and except an exception beause the # volume is already mapped. self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) ref = {'source-name': volume['name']} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, ref) def test_manage_existing_good_uid_mapped_with_override(self): """Tests managing a mapped volume with override. This test case attempts to manage an existing volume by UID, when it already mapped to a host, but the ref specifies that this is OK. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UUID of that vdisk. volume, uid = self._create_volume_and_return_uid('manage_test') # Map a host to the disk conn = {'initiator': u'unicode:initiator3', 'ip': '10.10.10.12', 'host': u'unicode.foo.bar.baz'} self.driver.initialize_connection(volume, conn) # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info(None, None) # Submit the request to manage it, specifying that it is OK to # manage a volume that is already attached. ref = {'source-id': uid, 'manage_if_in_use': True} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) def test_manage_existing_good_name_mapped_with_override(self): """Tests managing a mapped volume with override. This test case attempts to manage an existing volume by name, when it already mapped to a host, but the ref specifies that this is OK. We verify that the backend volume was renamed to have the name of the Cinder volume that we asked for it to be associated with. """ # Create a volume as a way of getting a vdisk created, and find out the # UUID of that vdisk. volume, uid = self._create_volume_and_return_uid('manage_test') # Map a host to the disk conn = {'initiator': u'unicode:initiator3', 'ip': '10.10.10.12', 'host': u'unicode.foo.bar.baz'} self.driver.initialize_connection(volume, conn) # Descriptor of the Cinder volume that we want to own the vdisk # referenced by uid. new_volume = self._generate_vol_info(None, None) # Submit the request to manage it, specifying that it is OK to # manage a volume that is already attached. ref = {'source-name': volume['name'], 'manage_if_in_use': True} size = self.driver.manage_existing_get_size(new_volume, ref) self.assertEqual(10, size) self.driver.manage_existing(new_volume, ref) # Assert that there is a disk named after the new volume that has the # ID that we passed in, indicating that the disk has been renamed. uid_of_new_volume = self._get_vdisk_uid(new_volume['name']) self.assertEqual(uid, uid_of_new_volume) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/instorage/test_fc_driver.py0000664000175000017500000006623300000000000030342 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the Inspur InStorage volume driver.""" from unittest import mock from eventlet import greenthread from cinder import context import cinder.db from cinder import exception from cinder.tests.unit import test from cinder.tests.unit import utils as testutils from cinder.tests.unit.volume.drivers.inspur.instorage import fakes from cinder.volume import configuration as conf from cinder.volume.drivers.inspur.instorage import instorage_common from cinder.volume.drivers.inspur.instorage import instorage_fc from cinder.volume import volume_types class InStorageMCSFcDriverTestCase(test.TestCase): @mock.patch.object(greenthread, 'sleep') def setUp(self, mock_sleep): super(InStorageMCSFcDriverTestCase, self).setUp() self.fc_driver = fakes.FakeInStorageMCSFcDriver( configuration=conf.Configuration(None)) self._def_flags = {'san_ip': 'hostname', 'san_login': 'user', 'san_password': 'pass', 'instorage_mcs_volpool_name': ['openstack'], 'instorage_mcs_localcopy_timeout': 20, 'instorage_mcs_localcopy_rate': 49, 'instorage_mcs_allow_tenant_qos': True} wwpns = ['1234567890123458', '6543210987654323'] initiator = 'test.initiator.%s' % 123458 self._connector = {'ip': '1.234.56.78', 'host': 'instorage-mcs-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = fakes.FakeInStorage(['openstack']) self.fc_driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self._reset_flags() self.ctxt = context.get_admin_context() self.db = cinder.db self.fc_driver.db = self.db self.fc_driver.do_setup(None) self.fc_driver.check_for_setup_error() self.fc_driver._assistant.check_lcmapping_interval = 0 def _set_flag(self, flag, value): group = self.fc_driver.configuration.config_group self.fc_driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.fc_driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _create_volume(self, **kwargs): pool = fakes.get_test_pool() prop = {'host': 'openstack@mcs#%s' % pool, 'size': 1, 'volume_type_id': self.vt['id']} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.fc_driver.create_volume(vol) return vol def _delete_volume(self, volume): self.fc_driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _generate_vol_info(self, vol_name, vol_id): pool = fakes.get_test_pool() prop = {'mdisk_grp_name': pool} if vol_name: prop.update(volume_name=vol_name, volume_id=vol_id, volume_size=10) else: prop.update(size=10, volume_type_id=None, mdisk_grp_name=pool, host='openstack@mcs#%s' % pool) vol = testutils.create_volume(self.ctxt, **prop) return vol def _assert_vol_exists(self, name, exists): is_vol_defined = self.fc_driver._assistant.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def test_instorage_get_host_with_fc_connection(self): # Create a FC host del self._connector['initiator'] assistant = self.fc_driver._assistant host_name = assistant.create_host(self._connector) # Remove the first wwpn from connector, and then try get host wwpns = self._connector['wwpns'] wwpns.remove(wwpns[0]) host_name = assistant.get_host_from_connector(self._connector) self.assertIsNotNone(host_name) def test_instorage_get_host_with_fc_connection_with_volume(self): # create a FC volume extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc = self._generate_vol_info(None, None) volume_fc['volume_type_id'] = vol_type_fc['id'] self.fc_driver.create_volume(volume_fc) connector = {'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver.initialize_connection(volume_fc, connector) # Create a FC host assistant = self.fc_driver._assistant # tell lsfabric to not return anything self.sim.error_injection('lsfabric', 'no_hosts') host_name = assistant.get_host_from_connector( connector, volume_fc['name']) self.assertIsNotNone(host_name) def test_instorage_get_host_from_connector_with_lshost_failure2(self): self._connector.pop('initiator') self._connector['wwpns'] = [] # Clearing will skip over fast-path assistant = self.fc_driver._assistant # Add a host to the simulator. We don't need it to match the # connector since we will force a bad failure for lshost. self.sim._cmd_mkhost(name='DifferentHost', hbawwpn='123456') # tell lshost to fail badly while called from # get_host_from_connector self.sim.error_injection('lshost', 'bigger_troubles') self.assertRaises(exception.VolumeBackendAPIException, assistant.get_host_from_connector, self._connector) def test_instorage_get_host_from_connector_not_found(self): self._connector.pop('initiator') assistant = self.fc_driver._assistant # Create some hosts. The first is not related to the connector and # we use the simulator for that. The second is for the connector. # We will force the missing_host error for the first host, but # then tolerate and find the second host on the slow path normally. self.sim._cmd_mkhost(name='instorage-mcs-test-3', hbawwpn='1234567') self.sim._cmd_mkhost(name='instorage-mcs-test-2', hbawwpn='2345678') self.sim._cmd_mkhost(name='instorage-mcs-test-1', hbawwpn='3456789') self.sim._cmd_mkhost(name='A-Different-host', hbawwpn='9345678') self.sim._cmd_mkhost(name='B-Different-host', hbawwpn='8345678') self.sim._cmd_mkhost(name='C-Different-host', hbawwpn='7345678') # tell lsfabric to skip rows so that we skip past fast path self.sim.error_injection('lsfabric', 'remove_rows') # Run test host_name = assistant.get_host_from_connector(self._connector) self.assertIsNone(host_name) def test_instorage_get_host_from_connector_fast_path(self): self._connector.pop('initiator') assistant = self.fc_driver._assistant # Create two hosts. Our lshost will return the hosts in sorted # Order. The extra host will be returned before the target # host. If we get detailed lshost info on our host without # gettting detailed info on the other host we used the fast path self.sim._cmd_mkhost(name='A-DifferentHost', hbawwpn='123456') assistant.create_host(self._connector) # tell lshost to fail while called from get_host_from_connector self.sim.error_injection('lshost', 'fail_fastpath') # tell lsfabric to skip rows so that we skip past fast path self.sim.error_injection('lsfabric', 'remove_rows') # Run test host_name = assistant.get_host_from_connector(self._connector) self.assertIsNotNone(host_name) # Need to assert that lshost was actually called. The way # we do that is check that the next simulator error for lshost # has not been reset. self.assertEqual(self.sim._next_cmd_error['lshost'], 'fail_fastpath', "lshost was not called in the simulator. The " "queued error still remains.") def test_instorage_initiator_multiple_wwpns_connected(self): # Generate us a test volume volume = self._create_volume() # Fibre Channel volume type extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type = volume_types.create(self.ctxt, 'FC', extra_spec) volume['volume_type_id'] = vol_type['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume['name'], True) # Set up one WWPN that won't match and one that will. self.fc_driver._state['storage_nodes']['1']['WWPN'] = [ '123456789ABCDEF0', 'AABBCCDDEEFF0010'] wwpns = ['ff00000000000000', 'ff00000000000001'] connector = {'host': 'instorage-mcs-test', 'wwpns': wwpns} with mock.patch.object(instorage_common.InStorageAssistant, 'get_conn_fc_wwpns') as get_mappings: mapped_wwpns = ['AABBCCDDEEFF0001', 'AABBCCDDEEFF0002', 'AABBCCDDEEFF0010', 'AABBCCDDEEFF0012'] get_mappings.return_value = mapped_wwpns # Initialize the connection init_ret = self.fc_driver.initialize_connection(volume, connector) # Make sure we return all wwpns which where mapped as part of the # connection self.assertEqual(mapped_wwpns, init_ret['data']['target_wwn']) def test_instorage_mcs_fc_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'foo'} conn_fc = {'host': 'host', 'wwpns': 'bar', 'wwnns': 'foo'} conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar', 'wwnns': 'baz'} self.fc_driver.validate_connector(conn_fc) self.fc_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.fc_driver.validate_connector, conn_iscsi) self.assertRaises(exception.InvalidConnectorException, self.fc_driver.validate_connector, conn_neither) def test_instorage_terminate_fc_connection(self): # create a FC volume volume_fc = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] connector = {'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.terminate_connection(volume_fc, connector) @mock.patch.object(instorage_fc.InStorageMCSFCDriver, '_do_terminate_connection') def test_instorage_initialize_fc_connection_failure(self, term_conn): # create a FC volume volume_fc = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] connector = {'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.fc_driver._state['storage_nodes'] = {} self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, volume_fc, connector) term_conn.assert_called_once_with(volume_fc, connector) def test_instorage_terminate_fc_connection_multi_attach(self): # create a FC volume volume_fc = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type_fc = volume_types.create(self.ctxt, 'FC', extra_spec) volume_fc['volume_type_id'] = vol_type_fc['id'] connector = {'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} connector2 = {'host': 'INSTORAGE-MCS-HOST', 'wwnns': ['30000090fa17311e', '30000090fa17311f'], 'wwpns': ['ffff000000000000', 'ffff000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1bbb'} # map and unmap the volume to two hosts normal case self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.initialize_connection(volume_fc, connector2) # validate that the host entries are created for conn in [connector, connector2]: host = self.fc_driver._assistant.get_host_from_connector(conn) self.assertIsNotNone(host) self.fc_driver.terminate_connection(volume_fc, connector) self.fc_driver.terminate_connection(volume_fc, connector2) # validate that the host entries are deleted for conn in [connector, connector2]: host = self.fc_driver._assistant.get_host_from_connector(conn) self.assertIsNone(host) # map and unmap the volume to two hosts with the mapping gone self.fc_driver.initialize_connection(volume_fc, connector) self.fc_driver.initialize_connection(volume_fc, connector2) # Test multiple attachments case host_name = self.fc_driver._assistant.get_host_from_connector( connector2) self.fc_driver._assistant.unmap_vol_from_host( volume_fc['name'], host_name) host_name = self.fc_driver._assistant.get_host_from_connector( connector2) self.assertIsNotNone(host_name) with mock.patch.object(instorage_common.InStorageSSH, 'rmvdiskhostmap') as rmmap: rmmap.side_effect = Exception('boom') self.fc_driver.terminate_connection(volume_fc, connector2) host_name = self.fc_driver._assistant.get_host_from_connector( connector2) self.assertIsNone(host_name) # Test single attachment case self.fc_driver._assistant.unmap_vol_from_host( volume_fc['name'], host_name) with mock.patch.object(instorage_common.InStorageSSH, 'rmvdiskhostmap') as rmmap: rmmap.side_effect = Exception('boom') self.fc_driver.terminate_connection(volume_fc, connector) # validate that the host entries are deleted for conn in [connector, connector2]: host = self.fc_driver._assistant.get_host_from_connector(conn) self.assertIsNone(host) def test_instorage_initiator_target_map(self): # Generate us a test volume volume = self._create_volume() # FIbre Channel volume type extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type = volume_types.create(self.ctxt, 'FC', extra_spec) volume['volume_type_id'] = vol_type['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume['name'], True) wwpns = ['ff00000000000000', 'ff00000000000001'] connector = {'host': 'instorage-mcs-test', 'wwpns': wwpns} # Initialise the connection init_ret = self.fc_driver.initialize_connection(volume, connector) # Check that the initiator_target_map is as expected init_data = {'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': {'ff00000000000000': ['AABBCCDDEEFF0011'], 'ff00000000000001': ['AABBCCDDEEFF0011']}, 'target_discovered': False, 'target_lun': 0, 'target_wwn': ['AABBCCDDEEFF0011'], 'volume_id': volume['id'] } } self.assertEqual(init_data, init_ret) # Terminate connection term_ret = self.fc_driver.terminate_connection(volume, connector) # Check that the initiator_target_map is as expected term_data = {'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': {'ff00000000000000': ['5005076802432ADE', '5005076802332ADE', '5005076802532ADE', '5005076802232ADE', '5005076802132ADE', '5005086802132ADE', '5005086802332ADE', '5005086802532ADE', '5005086802232ADE', '5005086802432ADE'], 'ff00000000000001': ['5005076802432ADE', '5005076802332ADE', '5005076802532ADE', '5005076802232ADE', '5005076802132ADE', '5005086802132ADE', '5005086802332ADE', '5005086802532ADE', '5005086802232ADE', '5005086802432ADE']} } } self.assertCountEqual(term_data, term_ret) def test_instorage_mcs_fc_host_maps(self): # Create two volumes to be used in mappings ctxt = context.get_admin_context() volume1 = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume1) volume2 = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume2) # FIbre Channel volume type extra_spec = {'capabilities:storage_protocol': ' FC'} vol_type = volume_types.create(self.ctxt, 'FC', extra_spec) expected = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': 0, 'target_wwn': ['AABBCCDDEEFF0011'], 'target_discovered': False}} volume1['volume_type_id'] = vol_type['id'] volume2['volume_type_id'] = vol_type['id'] ret = self.fc_driver._assistant.get_host_from_connector( self._connector) self.assertIsNone(ret) # Make sure that the volumes have been created self._assert_vol_exists(volume1['name'], True) self._assert_vol_exists(volume2['name'], True) # Initialize connection from the first volume to a host ret = self.fc_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected['driver_volume_type'], ret['driver_volume_type']) for k, v in expected['data'].items(): self.assertEqual(v, ret['data'][k]) # Initialize again, should notice it and do nothing ret = self.fc_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected['driver_volume_type'], ret['driver_volume_type']) for k, v in expected['data'].items(): self.assertEqual(v, ret['data'][k]) # Try to delete the 1st volume (should fail because it is mapped) self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.delete_volume, volume1) # Check bad output from lsfabric for the 2nd volume for error in ['remove_field', 'header_mismatch']: self.sim.error_injection('lsfabric', error) self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, volume2, self._connector) with mock.patch.object(instorage_common.InStorageAssistant, 'get_conn_fc_wwpns') as conn_fc_wwpns: conn_fc_wwpns.return_value = [] ret = self.fc_driver.initialize_connection(volume2, self._connector) ret = self.fc_driver.terminate_connection(volume1, self._connector) # For the first volume detach, ret['data'] should be empty # only ret['driver_volume_type'] returned self.assertEqual({}, ret['data']) self.assertEqual('fibre_channel', ret['driver_volume_type']) ret = self.fc_driver.terminate_connection(volume2, self._connector) self.assertEqual('fibre_channel', ret['driver_volume_type']) # wwpn is randomly created self.assertNotEqual({}, ret['data']) ret = self.fc_driver._assistant.get_host_from_connector( self._connector) self.assertIsNone(ret) # Test no preferred node self.sim.error_injection('lsvdisk', 'no_pref_node') self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, volume1, self._connector) # Initialize connection from the second volume to the host with no # preferred node set if in simulation mode, otherwise, just # another initialize connection. self.sim.error_injection('lsvdisk', 'blank_pref_node') self.fc_driver.initialize_connection(volume2, self._connector) # Try to remove connection from host that doesn't exist (should fail) conn_no_exist = self._connector.copy() conn_no_exist['initiator'] = 'i_dont_exist' conn_no_exist['wwpns'] = ['0000000000000000'] self.assertRaises(exception.VolumeDriverException, self.fc_driver.terminate_connection, volume1, conn_no_exist) # Try to remove connection from volume that isn't mapped (should print # message but NOT fail) unmapped_vol = self._generate_vol_info(None, None) self.fc_driver.create_volume(unmapped_vol) self.fc_driver.terminate_connection(unmapped_vol, self._connector) self.fc_driver.delete_volume(unmapped_vol) # Remove the mapping from the 1st volume and delete it self.fc_driver.terminate_connection(volume1, self._connector) self.fc_driver.delete_volume(volume1) self._assert_vol_exists(volume1['name'], False) # Make sure our host still exists host_name = self.fc_driver._assistant.get_host_from_connector( self._connector) self.assertIsNotNone(host_name) # Remove the mapping from the 2nd volume. The host should # be automatically removed because there are no more mappings. self.fc_driver.terminate_connection(volume2, self._connector) # Check if we successfully terminate connections when the host is not # specified fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} self.fc_driver.initialize_connection(volume2, self._connector) host_name = self.fc_driver._assistant.get_host_from_connector( self._connector) self.assertIsNotNone(host_name) self.fc_driver.terminate_connection(volume2, fake_conn) host_name = self.fc_driver._assistant.get_host_from_connector( self._connector) self.assertIsNone(host_name) self.fc_driver.delete_volume(volume2) self._assert_vol_exists(volume2['name'], False) # Delete volume types that we created volume_types.destroy(ctxt, vol_type['id']) ret = (self.fc_driver._assistant.get_host_from_connector( self._connector)) self.assertIsNone(ret) def test_instorage_mcs_fc_multi_host_maps(self): # Create a volume to be used in mappings ctxt = context.get_admin_context() volume = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume) # Create volume types for protocols types = {} for protocol in ['FC']: opts = {'storage_protocol': ' ' + protocol} types[protocol] = volume_types.create(ctxt, protocol, opts) # Create a connector for the second 'host' wwpns = ['1234567890123459', '6543210987654324'] initiator = 'test.initiator.%s' % 123459 conn2 = {'ip': '1.234.56.79', 'host': 'instorage-mcs-test2', 'wwpns': wwpns, 'initiator': initiator} # Check protocols for FC volume['volume_type_id'] = types[protocol]['id'] # Make sure that the volume has been created self._assert_vol_exists(volume['name'], True) self.fc_driver.initialize_connection(volume, self._connector) self.fc_driver.initialize_connection(volume, conn2) self.fc_driver.terminate_connection(volume, conn2) self.fc_driver.terminate_connection(volume, self._connector) def test_add_vdisk_copy_fc(self): # Ensure only FC is available self.fc_driver._state['enabled_protocols'] = set(['FC']) volume = self._generate_vol_info(None, None) self.fc_driver.create_volume(volume) self.fc_driver.add_vdisk_copy(volume['name'], 'fake-pool', None) def test_make_initiator_target_all2all_map(self): initiator_wwpns = ['ff00000000000000', 'ff00000000000001'] target_wwpns = ['bb00000000000000', 'bb00000000000001'] expected = { 'ff00000000000000': ['bb00000000000000', 'bb00000000000001'], 'ff00000000000001': ['bb00000000000000', 'bb00000000000001'] } ret = self.fc_driver.make_initiator_target_all2all_map(initiator_wwpns, target_wwpns) self.assertEqual(ret, expected) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/instorage/test_helper_routines.py0000664000175000017500000002314300000000000031577 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the Inspur InStorage volume driver.""" from unittest import mock import ddt from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.inspur.instorage import fakes from cinder.volume import configuration as conf from cinder.volume.drivers.inspur.instorage import instorage_common class CLIParserTestCase(test.TestCase): def test_empty(self): self.assertEqual(0, len( instorage_common.CLIParser(''))) self.assertEqual(0, len( instorage_common.CLIParser(('', 'stderr')))) def test_header(self): raw = r'''id!name 1!node1 2!node2 ''' resp = instorage_common.CLIParser(raw, with_header=True) self.assertEqual(2, len(resp)) self.assertEqual('1', resp[0]['id']) self.assertEqual('2', resp[1]['id']) def test_select(self): raw = r'''id!123 name!Bill name!Bill2 age!30 home address!s1 home address!s2 id! 7 name!John name!John2 age!40 home address!s3 home address!s4 ''' resp = instorage_common.CLIParser(raw, with_header=False) self.assertEqual([('s1', 'Bill', 's1'), ('s2', 'Bill2', 's2'), ('s3', 'John', 's3'), ('s4', 'John2', 's4')], list(resp.select('home address', 'name', 'home address'))) def test_lsnode_all(self): raw = r'''id!name!UPS_serial_number!WWNN!status 1!node1!!500507680200C744!online 2!node2!!500507680200C745!online ''' resp = instorage_common.CLIParser(raw) self.assertEqual(2, len(resp)) self.assertEqual('1', resp[0]['id']) self.assertEqual('500507680200C744', resp[0]['WWNN']) self.assertEqual('2', resp[1]['id']) self.assertEqual('500507680200C745', resp[1]['WWNN']) def test_lsnode_single(self): raw = r'''id!1 port_id!500507680210C744 port_status!active port_speed!8Gb port_id!500507680240C744 port_status!inactive port_speed!8Gb ''' resp = instorage_common.CLIParser(raw, with_header=False) self.assertEqual(1, len(resp)) self.assertEqual('1', resp[0]['id']) self.assertEqual([('500507680210C744', 'active'), ('500507680240C744', 'inactive')], list(resp.select('port_id', 'port_status'))) class InStorageAssistantTestCase(test.TestCase): def setUp(self): super(InStorageAssistantTestCase, self).setUp() self.instorage_mcs_common = instorage_common.InStorageAssistant(None) self.mock_wait_time = mock.patch.object( instorage_common.InStorageAssistant, "WAIT_TIME", 0) @mock.patch.object(instorage_common.InStorageSSH, 'lslicense') @mock.patch.object(instorage_common.InStorageSSH, 'lsguicapabilities') def test_compression_enabled(self, lsguicapabilities, lslicense): fake_license_without_keys = {} fake_license = { 'license_compression_enclosures': '1', 'license_compression_capacity': '1' } fake_license_scheme = { 'compression': 'yes' } fake_license_invalid_scheme = { 'compression': 'no' } lslicense.side_effect = [fake_license_without_keys, fake_license_without_keys, fake_license, fake_license_without_keys] lsguicapabilities.side_effect = [fake_license_without_keys, fake_license_invalid_scheme, fake_license_scheme] self.assertFalse(self.instorage_mcs_common.compression_enabled()) self.assertFalse(self.instorage_mcs_common.compression_enabled()) self.assertTrue(self.instorage_mcs_common.compression_enabled()) self.assertTrue(self.instorage_mcs_common.compression_enabled()) @mock.patch.object(instorage_common.InStorageAssistant, 'get_vdisk_count_by_io_group') def test_select_io_group(self, get_vdisk_count_by_io_group): # given io groups opts = {} # system io groups state = {} fake_iog_vdc1 = {0: 100, 1: 50, 2: 50, 3: 300} fake_iog_vdc2 = {0: 2, 1: 1, 2: 200} fake_iog_vdc3 = {0: 2, 2: 200} fake_iog_vdc4 = {0: 100, 1: 100, 2: 100, 3: 100} fake_iog_vdc5 = {0: 10, 1: 1, 2: 200, 3: 300} get_vdisk_count_by_io_group.side_effect = [fake_iog_vdc1, fake_iog_vdc2, fake_iog_vdc3, fake_iog_vdc4, fake_iog_vdc5] opts['iogrp'] = '0,2' state['available_iogrps'] = [0, 1, 2, 3] iog = self.instorage_mcs_common.select_io_group(state, opts) self.assertIn(iog, state['available_iogrps']) self.assertEqual(2, iog) opts['iogrp'] = '0' state['available_iogrps'] = [0, 1, 2] iog = self.instorage_mcs_common.select_io_group(state, opts) self.assertIn(iog, state['available_iogrps']) self.assertEqual(0, iog) opts['iogrp'] = '1,2' state['available_iogrps'] = [0, 2] iog = self.instorage_mcs_common.select_io_group(state, opts) self.assertIn(iog, state['available_iogrps']) self.assertEqual(2, iog) opts['iogrp'] = ' 0, 1, 2 ' state['available_iogrps'] = [0, 1, 2, 3] iog = self.instorage_mcs_common.select_io_group(state, opts) self.assertIn(iog, state['available_iogrps']) # since vdisk count in all iogroups is same, it will pick the first self.assertEqual(0, iog) opts['iogrp'] = '0,1,2, 3' state['available_iogrps'] = [0, 1, 2, 3] iog = self.instorage_mcs_common.select_io_group(state, opts) self.assertIn(iog, state['available_iogrps']) self.assertEqual(1, iog) @ddt.ddt class InStorageSSHTestCase(test.TestCase): def setUp(self): super(InStorageSSHTestCase, self).setUp() self.fake_driver = fakes.FakeInStorageMCSISCSIDriver( configuration=conf.Configuration(None)) sim = fakes.FakeInStorage(['openstack']) self.fake_driver.set_fake_storage(sim) self.instorage_ssh = instorage_common.InStorageSSH( self.fake_driver._run_ssh) def test_mkvdiskhostmap(self): # mkvdiskhostmap should not be returning anything self.fake_driver.fake_storage._volumes_list['9999'] = { 'name': ' 9999', 'id': '0', 'uid': '0', 'IO_group_id': '0', 'IO_group_name': 'fakepool'} self.fake_driver.fake_storage._hosts_list['HOST1'] = { 'name': 'HOST1', 'id': '0', 'host_name': 'HOST1'} self.fake_driver.fake_storage._hosts_list['HOST2'] = { 'name': 'HOST2', 'id': '1', 'host_name': 'HOST2'} self.fake_driver.fake_storage._hosts_list['HOST3'] = { 'name': 'HOST3', 'id': '2', 'host_name': 'HOST3'} ret = self.instorage_ssh.mkvdiskhostmap('HOST1', '9999', '511', False) self.assertEqual('511', ret) ret = self.instorage_ssh.mkvdiskhostmap('HOST2', '9999', '512', True) self.assertEqual('512', ret) ret = self.instorage_ssh.mkvdiskhostmap('HOST3', '9999', None, True) self.assertIsNotNone(ret) with mock.patch.object( instorage_common.InStorageSSH, 'run_ssh_check_created') as run_ssh_check_created: ex = exception.VolumeBackendAPIException(data='CMMVC6071E') run_ssh_check_created.side_effect = ex self.assertRaises(exception.VolumeBackendAPIException, self.instorage_ssh.mkvdiskhostmap, 'HOST3', '9999', 511, True) @ddt.data((exception.VolumeBackendAPIException(data='CMMVC6372W'), None), (exception.VolumeBackendAPIException(data='CMMVC6372W'), {'name': 'fakevol', 'id': '0', 'uid': '0', 'IO_group_id': '0', 'IO_group_name': 'fakepool'}), (exception.VolumeBackendAPIException(data='error'), None)) @ddt.unpack def test_mkvdisk_with_warning(self, run_ssh_check, lsvol): opt = {'iogrp': 0} with mock.patch.object(instorage_common.InStorageSSH, 'run_ssh_check_created', side_effect=run_ssh_check): with mock.patch.object(instorage_common.InStorageSSH, 'lsvdisk', return_value=lsvol): if lsvol: ret = self.instorage_ssh.mkvdisk('fakevol', '1', 'gb', 'fakepool', opt, []) self.assertEqual('0', ret) else: self.assertRaises(exception.VolumeBackendAPIException, self.instorage_ssh.mkvdisk, 'fakevol', '1', 'gb', 'fakepool', opt, []) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/instorage/test_iscsi_driver.py0000664000175000017500000005503600000000000031063 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the Inspur InStorage volume driver.""" from unittest import mock from eventlet import greenthread from cinder import context import cinder.db from cinder import exception from cinder.tests.unit import test from cinder.tests.unit import utils as testutils from cinder.tests.unit.volume.drivers.inspur.instorage import fakes from cinder.volume import configuration as conf from cinder.volume.drivers.inspur.instorage import instorage_iscsi from cinder.volume import volume_types class InStorageMCSISCSIDriverTestCase(test.TestCase): def setUp(self): super(InStorageMCSISCSIDriverTestCase, self).setUp() self.mock_object(greenthread, 'sleep') self.iscsi_driver = fakes.FakeInStorageMCSISCSIDriver( configuration=conf.Configuration(None)) self._def_flags = {'san_ip': 'hostname', 'san_login': 'user', 'san_password': 'pass', 'instorage_mcs_volpool_name': ['openstack'], 'instorage_mcs_localcopy_timeout': 20, 'instorage_mcs_localcopy_rate': 49, 'instorage_mcs_allow_tenant_qos': True} wwpns = ['1234567890123456', '6543210987654321'] initiator = 'test.initiator.%s' % 123456 self._connector = {'ip': '1.234.56.78', 'host': 'instorage-mcs-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = fakes.FakeInStorage(['openstack']) self.iscsi_driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self._reset_flags() self.ctxt = context.get_admin_context() self.db = cinder.db self.iscsi_driver.db = self.db self.iscsi_driver.do_setup(None) self.iscsi_driver.check_for_setup_error() self.iscsi_driver._assistant.check_lcmapping_interval = 0 def _set_flag(self, flag, value): group = self.iscsi_driver.configuration.config_group self.iscsi_driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.iscsi_driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) def _create_volume(self, **kwargs): pool = fakes.get_test_pool() prop = {'host': 'openstack@mcs#%s' % pool, 'size': 1, 'volume_type_id': self.vt['id']} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.iscsi_driver.create_volume(vol) return vol def _delete_volume(self, volume): self.iscsi_driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _generate_vol_info(self, vol_name, vol_id): pool = fakes.get_test_pool() prop = {'mdisk_grp_name': pool} if vol_name: prop.update(volume_name=vol_name, volume_id=vol_id, volume_size=10) else: prop.update(size=10, volume_type_id=None, mdisk_grp_name=pool, host='openstack@mcs#%s' % pool) vol = testutils.create_volume(self.ctxt, **prop) return vol def _assert_vol_exists(self, name, exists): is_vol_defined = self.iscsi_driver._assistant.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def test_instorage_mcs_iscsi_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'foo'} conn_fc = {'host': 'host', 'wwpns': 'bar'} conn_both = {'host': 'host', 'initiator': 'foo', 'wwpns': 'bar'} self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI']) self.iscsi_driver.validate_connector(conn_iscsi) self.iscsi_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_fc) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_neither) self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI', 'FC']) self.iscsi_driver.validate_connector(conn_iscsi) self.iscsi_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_neither) def test_instorage_terminate_iscsi_connection(self): # create a iSCSI volume volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] connector = {'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.iscsi_driver.initialize_connection(volume_iSCSI, connector) self.iscsi_driver.terminate_connection(volume_iSCSI, connector) @mock.patch.object(instorage_iscsi.InStorageMCSISCSIDriver, '_do_terminate_connection') def test_instorage_initialize_iscsi_connection_failure(self, term_conn): # create a iSCSI volume volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] connector = {'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} self.iscsi_driver._state['storage_nodes'] = {} self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.initialize_connection, volume_iSCSI, connector) term_conn.assert_called_once_with(volume_iSCSI, connector) def test_instorage_initialize_iscsi_connection_multihost(self): connector_a = {'host': 'instorage-mcs-host-a', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} # host-volume map return value exp_path_a = {'driver_volume_type': 'iscsi', 'data': {'target_discovered': False, 'target_iqn': 'iqn.1982-01.com.inspur:1234.sim.node1', 'target_portal': '1.234.56.78:3260', 'target_lun': 0, 'auth_method': 'CHAP', 'discovery_auth_method': 'CHAP'}} connector_b = {'host': 'instorage-mcs-host-b', 'wwnns': ['30000090fa17311e', '30000090fa17311f'], 'wwpns': ['ff00000000000002', 'ff00000000000003'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aab'} # host-volume map return value exp_path_b = {'driver_volume_type': 'iscsi', 'data': {'target_discovered': False, 'target_iqn': 'iqn.1982-01.com.inspur:1234.sim.node1', 'target_portal': '1.234.56.78:3260', 'target_lun': 1, 'auth_method': 'CHAP', 'discovery_auth_method': 'CHAP'}} volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume_iSCSI['name'], True) # check that the hosts not exist ret = self.iscsi_driver._assistant.get_host_from_connector( connector_a) self.assertIsNone(ret) ret = self.iscsi_driver._assistant.get_host_from_connector( connector_b) self.assertIsNone(ret) # Initialize connection to map volume to host a ret = self.iscsi_driver.initialize_connection( volume_iSCSI, connector_a) self.assertEqual(exp_path_a['driver_volume_type'], ret['driver_volume_type']) # check host-volume map return value for k, v in exp_path_a['data'].items(): self.assertEqual(v, ret['data'][k]) ret = self.iscsi_driver._assistant.get_host_from_connector( connector_a) self.assertIsNotNone(ret) # Initialize connection to map volume to host b ret = self.iscsi_driver.initialize_connection( volume_iSCSI, connector_b) self.assertEqual(exp_path_b['driver_volume_type'], ret['driver_volume_type']) # check the return value for k, v in exp_path_b['data'].items(): self.assertEqual(v, ret['data'][k]) ret = self.iscsi_driver._assistant.get_host_from_connector( connector_b) self.assertIsNotNone(ret) def test_instorage_initialize_iscsi_connection_single_path(self): # Test the return value for _get_iscsi_properties connector = {'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa'} # Expected single path host-volume map return value exp_s_path = {'driver_volume_type': 'iscsi', 'data': {'target_discovered': False, 'target_iqn': 'iqn.1982-01.com.inspur:1234.sim.node1', 'target_portal': '1.234.56.78:3260', 'target_lun': 0, 'auth_method': 'CHAP', 'discovery_auth_method': 'CHAP'}} volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] # Make sure that the volumes have been created self._assert_vol_exists(volume_iSCSI['name'], True) # Check case where no hosts exist ret = self.iscsi_driver._assistant.get_host_from_connector( connector) self.assertIsNone(ret) # Initialize connection to map volume to a host ret = self.iscsi_driver.initialize_connection( volume_iSCSI, connector) self.assertEqual(exp_s_path['driver_volume_type'], ret['driver_volume_type']) # Check the single path host-volume map return value for k, v in exp_s_path['data'].items(): self.assertEqual(v, ret['data'][k]) ret = self.iscsi_driver._assistant.get_host_from_connector( connector) self.assertIsNotNone(ret) def test_instorage_initialize_iscsi_connection_multipath(self): # Test the return value for _get_iscsi_properties connector = {'host': 'instorage-mcs-host', 'wwnns': ['20000090fa17311e', '20000090fa17311f'], 'wwpns': ['ff00000000000000', 'ff00000000000001'], 'initiator': 'iqn.1993-08.org.debian:01:eac5ccc1aaa', 'multipath': True} # Expected multipath host-volume map return value exp_m_path = {'driver_volume_type': 'iscsi', 'data': {'target_discovered': False, 'target_iqn': 'iqn.1982-01.com.inspur:1234.sim.node1', 'target_portal': '1.234.56.78:3260', 'target_lun': 0, 'target_iqns': [ 'iqn.1982-01.com.inspur:1234.sim.node1', 'iqn.1982-01.com.inspur:1234.sim.node1', 'iqn.1982-01.com.inspur:1234.sim.node2'], 'target_portals': ['1.234.56.78:3260', '1.234.56.80:3260', '1.234.56.79:3260'], 'target_luns': [0, 0, 0], 'auth_method': 'CHAP', 'discovery_auth_method': 'CHAP'}} volume_iSCSI = self._create_volume() extra_spec = {'capabilities:storage_protocol': ' iSCSI'} vol_type_iSCSI = volume_types.create(self.ctxt, 'iSCSI', extra_spec) volume_iSCSI['volume_type_id'] = vol_type_iSCSI['id'] # Check case where no hosts exist ret = self.iscsi_driver._assistant.get_host_from_connector( connector) self.assertIsNone(ret) # Initialize connection to map volume to a host ret = self.iscsi_driver.initialize_connection( volume_iSCSI, connector) self.assertEqual(exp_m_path['driver_volume_type'], ret['driver_volume_type']) # Check the multipath host-volume map return value for k, v in exp_m_path['data'].items(): if k in ('target_iqns', 'target_portals'): # These are randomly ordered lists self.assertCountEqual(v, ret['data'][k]) else: self.assertEqual(v, ret['data'][k]) ret = self.iscsi_driver._assistant.get_host_from_connector( connector) self.assertIsNotNone(ret) def test_instorage_mcs_iscsi_host_maps(self): # Create two volumes to be used in mappings ctxt = context.get_admin_context() volume1 = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume1) volume2 = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume2) # Create volume types that we created types = {} for protocol in ['iSCSI']: opts = {'storage_protocol': ' ' + protocol} types[protocol] = volume_types.create(ctxt, protocol, opts) expected = {'iSCSI': {'driver_volume_type': 'iscsi', 'data': {'target_discovered': False, 'target_iqn': 'iqn.1982-01.com.inspur:1234.sim.node1', 'target_portal': '1.234.56.78:3260', 'target_lun': 0, 'auth_method': 'CHAP', 'discovery_auth_method': 'CHAP'}}} volume1['volume_type_id'] = types[protocol]['id'] volume2['volume_type_id'] = types[protocol]['id'] # Check case where no hosts exist ret = self.iscsi_driver._assistant.get_host_from_connector( self._connector) self.assertIsNone(ret) # Make sure that the volumes have been created self._assert_vol_exists(volume1['name'], True) self._assert_vol_exists(volume2['name'], True) # Initialize connection from the first volume to a host ret = self.iscsi_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected[protocol]['driver_volume_type'], ret['driver_volume_type']) for k, v in expected[protocol]['data'].items(): self.assertEqual(v, ret['data'][k]) # Initialize again, should notice it and do nothing ret = self.iscsi_driver.initialize_connection( volume1, self._connector) self.assertEqual(expected[protocol]['driver_volume_type'], ret['driver_volume_type']) for k, v in expected[protocol]['data'].items(): self.assertEqual(v, ret['data'][k]) # Try to delete the 1st volume (should fail because it is mapped) self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.delete_volume, volume1) ret = self.iscsi_driver.terminate_connection(volume1, self._connector) ret = self.iscsi_driver._assistant.get_host_from_connector( self._connector) self.assertIsNone(ret) # Check cases with no auth set for host for auth_enabled in [True, False]: for host_exists in ['yes-auth', 'yes-noauth', 'no']: self._set_flag('instorage_mcs_iscsi_chap_enabled', auth_enabled) case = 'en' + str(auth_enabled) + 'ex' + str(host_exists) conn_na = {'initiator': 'test:init:%s' % 56789, 'ip': '11.11.11.11', 'host': 'host-%s' % case} if host_exists.startswith('yes'): self.sim._add_host_to_list(conn_na) if host_exists == 'yes-auth': kwargs = {'chapsecret': 'foo', 'obj': conn_na['host']} self.sim._cmd_chhost(**kwargs) volume1['volume_type_id'] = types['iSCSI']['id'] init_ret = self.iscsi_driver.initialize_connection(volume1, conn_na) host_name = self.sim._host_in_list(conn_na['host']) chap_ret = ( self.iscsi_driver._assistant.get_chap_secret_for_host( host_name)) if auth_enabled or host_exists == 'yes-auth': self.assertIn('auth_password', init_ret['data']) self.assertIsNotNone(chap_ret) else: self.assertNotIn('auth_password', init_ret['data']) self.assertIsNone(chap_ret) self.iscsi_driver.terminate_connection(volume1, conn_na) self._set_flag('instorage_mcs_iscsi_chap_enabled', True) # Test no preferred node self.sim.error_injection('lsvdisk', 'no_pref_node') self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.initialize_connection, volume1, self._connector) # Initialize connection from the second volume to the host with no # preferred node set if in simulation mode, otherwise, just # another initialize connection. self.sim.error_injection('lsvdisk', 'blank_pref_node') self.iscsi_driver.initialize_connection(volume2, self._connector) # Try to remove connection from host that doesn't exist (should fail) conn_no_exist = self._connector.copy() conn_no_exist['initiator'] = 'i_dont_exist' conn_no_exist['wwpns'] = ['0000000000000000'] self.assertRaises(exception.VolumeDriverException, self.iscsi_driver.terminate_connection, volume1, conn_no_exist) # Try to remove connection from volume that isn't mapped (should print # message but NOT fail) unmapped_vol = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(unmapped_vol) self.iscsi_driver.terminate_connection(unmapped_vol, self._connector) self.iscsi_driver.delete_volume(unmapped_vol) # Remove the mapping from the 1st volume and delete it self.iscsi_driver.terminate_connection(volume1, self._connector) self.iscsi_driver.delete_volume(volume1) self._assert_vol_exists(volume1['name'], False) # Make sure our host still exists host_name = self.iscsi_driver._assistant.get_host_from_connector( self._connector) self.assertIsNotNone(host_name) # Remove the mapping from the 2nd volume. The host should # be automatically removed because there are no more mappings. self.iscsi_driver.terminate_connection(volume2, self._connector) # Check if we successfully terminate connections when the host is not # specified fake_conn = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} self.iscsi_driver.initialize_connection(volume2, self._connector) host_name = self.iscsi_driver._assistant.get_host_from_connector( self._connector) self.assertIsNotNone(host_name) self.iscsi_driver.terminate_connection(volume2, fake_conn) host_name = self.iscsi_driver._assistant.get_host_from_connector( self._connector) self.assertIsNone(host_name) self.iscsi_driver.delete_volume(volume2) self._assert_vol_exists(volume2['name'], False) # Delete volume types that we created for protocol in ['iSCSI']: volume_types.destroy(ctxt, types[protocol]['id']) # Check if our host still exists (it should not) ret = (self.iscsi_driver._assistant.get_host_from_connector( self._connector)) self.assertIsNone(ret) def test_add_vdisk_copy_iscsi(self): # Ensure only iSCSI is available self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI']) volume = self._generate_vol_info(None, None) self.iscsi_driver.create_volume(volume) self.iscsi_driver.add_vdisk_copy(volume['name'], 'fake-pool', None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/inspur/instorage/test_replication.py0000664000175000017500000013413200000000000030702 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Tests for the Inspur InStorage volume driver.""" import json from unittest import mock from eventlet import greenthread from oslo_utils import units from cinder import context import cinder.db from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as testutils from cinder.tests.unit.volume.drivers.inspur.instorage import fakes from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.inspur.instorage import ( replication as instorage_rep) from cinder.volume.drivers.inspur.instorage import instorage_common from cinder.volume.drivers.inspur.instorage import instorage_const from cinder.volume import volume_types class InStorageMCSReplicationTestCase(test.TestCase): def setUp(self): super(InStorageMCSReplicationTestCase, self).setUp() def _run_ssh_aux(cmd, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd) if len(cmd) > 2 and cmd[1] == 'lssystem': cmd[1] = 'lssystem_aux' ret = self.sim.execute_command(cmd, check_exit_code) return ret aux_connect_patcher = mock.patch( 'cinder.volume.drivers.inspur.instorage.' 'replication.InStorageMCSReplicationManager._run_ssh') self.aux_ssh_mock = aux_connect_patcher.start() self.addCleanup(aux_connect_patcher.stop) self.aux_ssh_mock.side_effect = _run_ssh_aux self.driver = fakes.FakeInStorageMCSISCSIDriver( configuration=conf.Configuration(None)) self.rep_target = {"backend_id": "mcs_aux_target_1", "san_ip": "192.168.10.22", "san_login": "admin", "san_password": "admin", "pool_name": fakes.get_test_pool()} self.fake_target = {"backend_id": "mcs_id_target", "san_ip": "192.168.10.23", "san_login": "admin", "san_password": "admin", "pool_name": fakes.get_test_pool()} self._def_flags = {'san_ip': '192.168.10.21', 'san_login': 'user', 'san_password': 'pass', 'instorage_mcs_volpool_name': fakes.MCS_POOLS, 'replication_device': [self.rep_target]} wwpns = ['1234567890123451', '6543210987654326'] initiator = 'test.initiator.%s' % 123451 self._connector = {'ip': '1.234.56.78', 'host': 'instorage-mcs-test', 'wwpns': wwpns, 'initiator': initiator} self.sim = fakes.FakeInStorage(fakes.MCS_POOLS) self.driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self._reset_flags() self.ctxt = context.get_admin_context() self.driver.db = cinder.db self.driver.do_setup(None) self.driver.check_for_setup_error() self._create_test_volume_types() self.mock_object(greenthread, 'sleep') def _set_flag(self, flag, value): group = self.driver.configuration.config_group self.driver.configuration.set_override(flag, value, group) def _reset_flags(self): self.driver.configuration.local_conf.reset() for k, v in self._def_flags.items(): self._set_flag(k, v) self.driver.configuration.set_override('replication_device', [self.rep_target]) def _assert_vol_exists(self, name, exists): is_vol_defined = self.driver._assistant.is_vdisk_defined(name) self.assertEqual(exists, is_vol_defined) def _generate_vol_info(self, vol_name, vol_id, vol_type=None): pool = fakes.get_test_pool() volume_type = self.non_replica_type if vol_type: volume_type = vol_type if vol_name: prop = {'volume_name': vol_name, 'volume_id': vol_id, 'volume_size': 10, 'mdisk_grp_name': pool} else: prop = {'size': 10, 'mdisk_grp_name': pool, 'host': 'openstack@mcs#%s' % pool, 'volume_type_id': volume_type['id']} vol = testutils.create_volume(self.ctxt, **prop) return vol def _generate_snapshot_info(self, vol): snap = testutils.create_snapshot(self.ctxt, vol.id) return snap def _create_replica_volume_type(self, enable, rep_type=instorage_const.SYNC): # Generate a volume type for volume repliation. if enable: if rep_type == instorage_const.SYNC: spec = {'replication_enabled': ' True', 'replication_type': ' sync'} type_name = 'rep_sync' else: spec = {'replication_enabled': ' True', 'replication_type': ' async'} type_name = 'rep_async' else: spec = {'replication_enabled': ' False'} type_name = "non_rep" db_rep_type = testutils.create_volume_type(self.ctxt, name=type_name, extra_specs=spec) rep_type = volume_types.get_volume_type(self.ctxt, db_rep_type.id) return rep_type def _create_test_volume_types(self): self.mm_type = self._create_replica_volume_type( True, rep_type=instorage_const.SYNC) self.gm_type = self._create_replica_volume_type( True, rep_type=instorage_const.ASYNC) self.non_replica_type = self._create_replica_volume_type(False) def _create_test_volume(self, rep_type): volume = self._generate_vol_info(None, None, rep_type) model_update = self.driver.create_volume(volume) return volume, model_update def _get_vdisk_uid(self, vdisk_name): vdisk_properties, _err = self.sim._cmd_lsvdisk(obj=vdisk_name, delim='!') for row in vdisk_properties.split('\n'): words = row.split('!') if words[0] == 'vdisk_UID': return words[1] return None def test_instorage_do_replication_setup_error(self): fake_targets = [self.rep_target, self.rep_target] self.driver.configuration.set_override('replication_device', [{"backend_id": "mcs_id_target"}]) self.assertRaises(exception.InvalidInput, self.driver._do_replication_setup) self.driver.configuration.set_override('replication_device', fake_targets) self.assertRaises(exception.InvalidInput, self.driver._do_replication_setup) self.driver._active_backend_id = 'fake_id' self.driver.configuration.set_override('replication_device', [self.rep_target]) self.assertRaises(exception.InvalidInput, self.driver._do_replication_setup) self.driver._active_backend_id = None self.driver._do_replication_setup() self.assertEqual(self.rep_target, self.driver._replica_target) @mock.patch.object(instorage_common.InStorageAssistant, 'replication_licensed') def test_instorage_setup_replication(self, replication_licensed): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver._active_backend_id = None replication_licensed.side_effect = [False, True, True, True] self.driver._get_instorage_config() self.assertEqual(self.driver._assistant, self.driver._local_backend_assistant) self.assertFalse(self.driver._replica_enabled) self.driver._get_instorage_config() self.assertEqual(self.rep_target, self.driver._replica_target) self.assertTrue(self.driver._replica_enabled) self.driver._active_backend_id = self.rep_target['backend_id'] self.driver._get_instorage_config() self.assertEqual(self.driver._assistant, self.driver._aux_backend_assistant) self.assertTrue(self.driver._replica_enabled) self.driver._active_backend_id = None self.driver._get_instorage_config() def test_instorage_create_volume_with_mirror_replication(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create sync copy replication. volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) self._validate_replic_vol_creation(volume) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) # Create async copy replication. volume, model_update = self._create_test_volume(self.gm_type) self.assertEqual('enabled', model_update['replication_status']) self._validate_replic_vol_creation(volume) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) def _validate_replic_vol_creation(self, volume): # Create sync copy volume self._assert_vol_exists(volume['name'], True) self._assert_vol_exists( instorage_const.REPLICA_AUX_VOL_PREFIX + volume['name'], True) rel_info = self.driver._assistant.get_relationship_info(volume['name']) self.assertIsNotNone(rel_info) vol_rep_type = rel_info['copy_type'] rep_type = self.driver._get_volume_replicated_type(self.ctxt, volume) self.assertEqual(vol_rep_type, rep_type) self.assertEqual('master', rel_info['primary']) self.assertEqual(volume['name'], rel_info['master_vdisk_name']) self.assertEqual( instorage_const.REPLICA_AUX_VOL_PREFIX + volume['name'], rel_info['aux_vdisk_name']) self.assertEqual('inconsistent_copying', rel_info['state']) self.sim._rc_state_transition('wait', rel_info) self.assertEqual('consistent_synchronized', rel_info['state']) def _validate_replic_vol_deletion(self, volume): self._assert_vol_exists(volume['name'], False) self._assert_vol_exists( instorage_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) rel_info = self.driver._assistant.get_relationship_info(volume['name']) self.assertIsNone(rel_info) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_instorage_create_snapshot_volume_with_mirror_replica(self): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create sync copy replication volume. vol1, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) snap = self._generate_snapshot_info(vol1) self.driver.create_snapshot(snap) vol2 = self._generate_vol_info(None, None, self.mm_type) model_update = self.driver.create_volume_from_snapshot(vol2, snap) self.assertEqual('enabled', model_update['replication_status']) self._validate_replic_vol_creation(vol2) self.driver.delete_snapshot(snap) self.driver.delete_volume(vol1) self.driver.delete_volume(vol2) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_instorage_create_cloned_volume_with_mirror_replica(self): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create a source sync copy replication volume. src_volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) volume = self._generate_vol_info(None, None, self.mm_type) # Create a cloned volume from source volume. model_update = self.driver.create_cloned_volume(volume, src_volume) self.assertEqual('enabled', model_update['replication_status']) self._validate_replic_vol_creation(volume) self.driver.delete_volume(src_volume) self.driver.delete_volume(volume) def test_instorage_retype_from_mirror_to_none_replication(self): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) host = {'host': 'openstack@mcs#openstack'} volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) diff, _equal = volume_types.volume_types_diff( self.ctxt, self.mm_type['id'], self.gm_type['id']) # Change the mirror type self.assertRaises(exception.VolumeDriverException, self.driver.retype, self.ctxt, volume, self.gm_type, diff, host) diff, _equal = volume_types.volume_types_diff( self.ctxt, self.non_replica_type['id'], self.mm_type['id']) # Disable replica retyped, model_update = self.driver.retype( self.ctxt, volume, self.non_replica_type, diff, host) self.assertEqual('disabled', model_update['replication_status']) self._assert_vol_exists( instorage_const.REPLICA_AUX_VOL_PREFIX + volume['name'], False) self.driver.delete_volume(volume) self._assert_vol_exists(volume['name'], False) rel_info = self.driver._assistant.get_relationship_info(volume['name']) self.assertIsNone(rel_info) def test_instorage_retype_from_none_to_mirror_replication(self): # Set replication target self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) host = {'host': 'openstack@mcs#openstack'} diff, _equal = volume_types.volume_types_diff( self.ctxt, self.non_replica_type['id'], self.mm_type['id']) volume, model_update = self._create_test_volume(self.non_replica_type) self.assertIsNone(model_update) # Enable replica retyped, model_update = self.driver.retype( self.ctxt, volume, self.mm_type, diff, host) volume['volume_type_id'] = self.mm_type['id'] self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) self._validate_replic_vol_creation(volume) self.driver.delete_volume(volume) def test_instorage_extend_volume_replication(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create sync copy replication. volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) self.driver.extend_volume(volume, '13') attrs = self.driver._assistant.get_vdisk_attributes(volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) attrs = self.driver._aux_backend_assistant.get_vdisk_attributes( instorage_const.REPLICA_AUX_VOL_PREFIX + volume['name']) vol_size = int(attrs['capacity']) / units.Gi self.assertAlmostEqual(vol_size, 13) self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) def test_instorage_manage_existing_mismatch_with_volume_replication(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create replication volume. rep_volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual(fields.ReplicationStatus.ENABLED, model_update['replication_status']) # Create non-replication volume. non_rep_volume, model_update = self._create_test_volume( self.non_replica_type) new_volume = self._generate_vol_info(None, None) ref = {'source-name': rep_volume['name']} new_volume['volume_type_id'] = self.non_replica_type['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) ref = {'source-name': non_rep_volume['name']} new_volume['volume_type_id'] = self.mm_type['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) ref = {'source-name': rep_volume['name']} new_volume['volume_type_id'] = self.gm_type['id'] self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, new_volume, ref) self.driver.delete_volume(rep_volume) self.driver.delete_volume(new_volume) def test_instorage_manage_existing_with_volume_replication(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create replication volume. rep_volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) uid_of_master = self._get_vdisk_uid(rep_volume['name']) uid_of_aux = self._get_vdisk_uid( instorage_const.REPLICA_AUX_VOL_PREFIX + rep_volume['name']) new_volume = self._generate_vol_info(None, None, self.mm_type) ref = {'source-name': rep_volume['name']} self.driver.manage_existing(new_volume, ref) # Check the uid of the volume which has been renamed. uid_of_master_volume = self._get_vdisk_uid(new_volume['name']) uid_of_aux_volume = self._get_vdisk_uid( instorage_const.REPLICA_AUX_VOL_PREFIX + new_volume['name']) self.assertEqual(uid_of_master, uid_of_master_volume) self.assertEqual(uid_of_aux, uid_of_aux_volume) self.driver.delete_volume(rep_volume) def test_instorage_delete_volume_with_mirror_replication(self): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create sync copy replication. volume, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) self._validate_replic_vol_creation(volume) # Delete volume in non-failover state self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) non_replica_vol, model_update = self._create_test_volume( self.non_replica_type) self.assertIsNone(model_update) volumes = [volume, non_replica_vol] # Delete volume in failover state self.driver.failover_host( self.ctxt, volumes, self.rep_target['backend_id']) # Delete non-replicate volume in a failover state self.assertRaises(exception.VolumeDriverException, self.driver.delete_volume, non_replica_vol) # Delete replicate volume in failover state self.driver.delete_volume(volume) self._validate_replic_vol_deletion(volume) self.driver.failover_host( self.ctxt, volumes, 'default') self.driver.delete_volume(non_replica_vol) self._assert_vol_exists(non_replica_vol['name'], False) @mock.patch.object(instorage_common.InStorageAssistant, 'delete_vdisk') @mock.patch.object(instorage_common.InStorageAssistant, 'delete_relationship') @mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info') def test_delete_target_volume(self, get_relationship_info, delete_relationship, delete_vdisk): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) fake_name = 'volume-%s' % fake.VOLUME_ID get_relationship_info.return_value = {'aux_vdisk_name': fake_name} self.driver._assistant.delete_rc_volume(fake_name) get_relationship_info.assert_called_once_with(fake_name) delete_relationship.assert_called_once_with(fake_name) delete_vdisk.assert_called_once_with(fake_name, False) @mock.patch.object(instorage_common.InStorageAssistant, 'delete_vdisk') @mock.patch.object(instorage_common.InStorageAssistant, 'delete_relationship') @mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info') def test_delete_target_volume_no_relationship(self, get_relationship_info, delete_relationship, delete_vdisk): # Set replication target. self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) fake_name = 'volume-%s' % fake.VOLUME_ID get_relationship_info.return_value = None self.driver._assistant.delete_rc_volume(fake_name) get_relationship_info.assert_called_once_with(fake_name) self.assertFalse(delete_relationship.called) self.assertTrue(delete_vdisk.called) @mock.patch.object(instorage_common.InStorageAssistant, 'delete_vdisk') @mock.patch.object(instorage_common.InStorageAssistant, 'delete_relationship') @mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info') def test_delete_target_volume_fail(self, get_relationship_info, delete_relationship, delete_vdisk): fake_id = fake.VOLUME_ID fake_name = 'volume-%s' % fake_id get_relationship_info.return_value = {'aux_vdisk_name': fake_name} delete_vdisk.side_effect = Exception self.assertRaises(exception.VolumeDriverException, self.driver._assistant.delete_rc_volume, fake_name) get_relationship_info.assert_called_once_with(fake_name) delete_relationship.assert_called_once_with(fake_name) def test_instorage_failover_host_backend_error(self): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create sync copy replication. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) volumes = [mm_vol] self.driver._replica_enabled = False self.assertRaises(exception.UnableToFailOver, self.driver.failover_host, self.ctxt, volumes, self.rep_target['backend_id']) self.driver._replica_enabled = True self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_host, self.ctxt, volumes, self.fake_target['backend_id']) with mock.patch.object(instorage_common.InStorageAssistant, 'get_system_info') as get_sys_info: get_sys_info.side_effect = [ exception.VolumeBackendAPIException(data='CMMVC6071E'), exception.VolumeBackendAPIException(data='CMMVC6071E')] self.assertRaises(exception.UnableToFailOver, self.driver.failover_host, self.ctxt, volumes, self.rep_target['backend_id']) self.driver._active_backend_id = self.rep_target['backend_id'] self.assertRaises(exception.UnableToFailOver, self.driver.failover_host, self.ctxt, volumes, 'default') self.driver.delete_volume(mm_vol) @mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info') def test_failover_volume_relationship_error(self, get_relationship_info): # Create async copy replication. gm_vol, model_update = self._create_test_volume(self.gm_type) self.assertEqual('enabled', model_update['replication_status']) get_relationship_info.side_effect = [None, exception.VolumeDriverException] expected_list = [{'updates': {'replication_status': fields.ReplicationStatus.FAILOVER_ERROR, 'status': 'error'}, 'volume_id': gm_vol.id} ] volumes_update = self.driver._failover_replica_volumes(self.ctxt, [gm_vol]) self.assertEqual(expected_list, volumes_update) volumes_update = self.driver._failover_replica_volumes(self.ctxt, [gm_vol]) self.assertEqual(expected_list, volumes_update) @mock.patch.object(instorage_common.InStorageMCSCommonDriver, '_update_volume_stats') @mock.patch.object(instorage_common.InStorageMCSCommonDriver, '_update_instorage_state') def test_instorage_failover_host_replica_volumes(self, update_instorage_state, update_volume_stats): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create sync copy replication. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) # Create async replication volume. gm_vol, model_update = self._create_test_volume(self.gm_type) self.assertEqual('enabled', model_update['replication_status']) volumes = [mm_vol, gm_vol] expected_list = [{'updates': {'replication_status': 'failed-over'}, 'volume_id': mm_vol['id']}, {'updates': {'replication_status': 'failed-over'}, 'volume_id': gm_vol['id']} ] target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, self.rep_target['backend_id']) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual(expected_list, volume_list) self.assertEqual(self.driver._active_backend_id, target_id) self.assertEqual(self.driver._aux_backend_assistant, self.driver._assistant) self.assertEqual([self.driver._replica_target['pool_name']], self.driver._get_backend_pools()) self.assertTrue(update_instorage_state.called) self.assertTrue(update_volume_stats.called) self.driver.delete_volume(mm_vol) self.driver.delete_volume(gm_vol) target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, None) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual([], volume_list) @mock.patch.object(instorage_common.InStorageMCSCommonDriver, '_update_volume_stats') @mock.patch.object(instorage_common.InStorageMCSCommonDriver, '_update_instorage_state') def test_instorage_failover_host_normal_volumes(self, update_instorage_state, update_volume_stats): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create sync copy replication. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) mm_vol['status'] = 'in-use' # Create non-replication volume. non_replica_vol, model_update = self._create_test_volume( self.non_replica_type) self.assertIsNone(model_update) non_replica_vol['status'] = 'error' volumes = [mm_vol, non_replica_vol] rep_data1 = json.dumps({'previous_status': mm_vol['status']}) rep_data2 = json.dumps({'previous_status': non_replica_vol['status']}) expected_list = [{'updates': {'status': 'error', 'replication_driver_data': rep_data1}, 'volume_id': mm_vol['id']}, {'updates': {'status': 'error', 'replication_driver_data': rep_data2}, 'volume_id': non_replica_vol['id']}, ] target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, self.rep_target['backend_id']) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual(expected_list, volume_list) self.assertEqual(self.driver._active_backend_id, target_id) self.assertEqual(self.driver._aux_backend_assistant, self.driver._assistant) self.assertEqual([self.driver._replica_target['pool_name']], self.driver._get_backend_pools()) self.assertTrue(update_instorage_state.called) self.assertTrue(update_volume_stats.called) target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, None) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual([], volume_list) # Delete non-replicate volume in a failover state self.assertRaises(exception.VolumeDriverException, self.driver.delete_volume, non_replica_vol) self.driver.failover_host(self.ctxt, volumes, 'default') self.driver.delete_volume(mm_vol) self.driver.delete_volume(non_replica_vol) @mock.patch.object(instorage_common.InStorageAssistant, 'switch_relationship') @mock.patch.object(instorage_common.InStorageAssistant, 'stop_relationship') @mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info') def test_failover_host_by_force_access(self, get_relationship_info, stop_relationship, switch_relationship): replica_obj = self.driver._get_replica_obj(instorage_const.SYNC) fake_vol_info = {'vol_id': '21345678-1234-5678-1234-567812345683', 'vol_name': 'fake-volume'} fake_vol = self._generate_vol_info(**fake_vol_info) target_vol = instorage_const.REPLICA_AUX_VOL_PREFIX + fake_vol['name'] context = mock.Mock get_relationship_info.side_effect = [{ 'aux_vdisk_name': 'replica-12345678-1234-5678-1234-567812345678', 'name': 'RC_name'}] switch_relationship.side_effect = exception.VolumeDriverException replica_obj.failover_volume_host(context, fake_vol) get_relationship_info.assert_called_once_with(target_vol) switch_relationship.assert_called_once_with('RC_name') stop_relationship.assert_called_once_with(target_vol, access=True) @mock.patch.object(instorage_common.InStorageMCSCommonDriver, '_update_volume_stats') @mock.patch.object(instorage_common.InStorageMCSCommonDriver, '_update_instorage_state') def test_instorage_failback_replica_volumes(self, update_instorage_state, update_volume_stats): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create sync copy replication. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) # Create async copy replication. gm_vol, model_update = self._create_test_volume(self.gm_type) self.assertEqual('enabled', model_update['replication_status']) volumes = [gm_vol, mm_vol] failover_expect = [{'updates': {'replication_status': 'failed-over'}, 'volume_id': gm_vol['id']}, {'updates': {'replication_status': 'failed-over'}, 'volume_id': mm_vol['id']} ] failback_expect = [{'updates': {'replication_status': 'enabled', 'status': 'available'}, 'volume_id': gm_vol['id']}, {'updates': {'replication_status': 'enabled', 'status': 'available'}, 'volume_id': mm_vol['id']}, ] # Already failback target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, 'default') self.assertIsNone(target_id) self.assertEqual([], volume_list) # fail over operation target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, self.rep_target['backend_id']) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual(failover_expect, volume_list) self.assertTrue(update_instorage_state.called) self.assertTrue(update_volume_stats.called) # fail back operation target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, 'default') self.assertEqual('default', target_id) self.assertEqual(failback_expect, volume_list) self.assertIsNone(self.driver._active_backend_id) self.assertEqual(fakes.MCS_POOLS, self.driver._get_backend_pools()) self.assertTrue(update_instorage_state.called) self.assertTrue(update_volume_stats.called) self.driver.delete_volume(mm_vol) self.driver.delete_volume(gm_vol) @mock.patch.object(instorage_common.InStorageMCSCommonDriver, '_update_volume_stats') @mock.patch.object(instorage_common.InStorageMCSCommonDriver, '_update_instorage_state') def test_instorage_failback_normal_volumes(self, update_instorage_state, update_volume_stats): self.driver.configuration.set_override('replication_device', [self.rep_target]) self.driver.do_setup(self.ctxt) # Create sync copy replication. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) mm_vol['status'] = 'in-use' # Create non-replication volume. non_replica_vol1, model_update = self._create_test_volume( self.non_replica_type) self.assertIsNone(model_update) non_replica_vol2, model_update = self._create_test_volume( self.non_replica_type) self.assertIsNone(model_update) non_replica_vol1['status'] = 'error' non_replica_vol2['status'] = 'available' volumes = [mm_vol, non_replica_vol1, non_replica_vol2] rep_data0 = json.dumps({'previous_status': mm_vol['status']}) rep_data1 = json.dumps({'previous_status': non_replica_vol1['status']}) rep_data2 = json.dumps({'previous_status': non_replica_vol2['status']}) failover_expect = [{'updates': {'status': 'error', 'replication_driver_data': rep_data0}, 'volume_id': mm_vol['id']}, {'updates': {'status': 'error', 'replication_driver_data': rep_data1}, 'volume_id': non_replica_vol1['id']}, {'updates': {'status': 'error', 'replication_driver_data': rep_data2}, 'volume_id': non_replica_vol2['id']}] failback_expect = [{'updates': {'status': 'in-use', 'replication_driver_data': ''}, 'volume_id': mm_vol['id']}, {'updates': {'status': 'error', 'replication_driver_data': ''}, 'volume_id': non_replica_vol1['id']}, {'updates': {'status': 'available', 'replication_driver_data': ''}, 'volume_id': non_replica_vol2['id']}] # Already failback target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, 'default') self.assertIsNone(target_id) self.assertEqual([], volume_list) # fail over operation target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, self.rep_target['backend_id']) self.assertEqual(self.rep_target['backend_id'], target_id) self.assertEqual(failover_expect, volume_list) self.assertTrue(update_instorage_state.called) self.assertTrue(update_volume_stats.called) # fail back operation mm_vol['replication_driver_data'] = json.dumps( {'previous_status': 'in-use'}) non_replica_vol1['replication_driver_data'] = json.dumps( {'previous_status': 'error'}) non_replica_vol2['replication_driver_data'] = json.dumps( {'previous_status': 'available'}) target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, 'default') self.assertEqual('default', target_id) self.assertEqual(failback_expect, volume_list) self.assertIsNone(self.driver._active_backend_id) self.assertEqual(fakes.MCS_POOLS, self.driver._get_backend_pools()) self.assertTrue(update_instorage_state.called) self.assertTrue(update_volume_stats.called) self.driver.delete_volume(mm_vol) self.driver.delete_volume(non_replica_vol1) self.driver.delete_volume(non_replica_vol2) @mock.patch.object(instorage_common.InStorageAssistant, 'get_system_info') @mock.patch.object(instorage_rep.InStorageMCSReplicationManager, '_partnership_validate_create') def test_establish_partnership_with_local_sys(self, partnership_create, get_system_info): get_system_info.side_effect = [{'system_name': 'instorage-mcs-sim'}, {'system_name': 'instorage-mcs-sim'}] rep_mgr = self.driver._get_replica_mgr() rep_mgr.establish_target_partnership() self.assertFalse(partnership_create.called) @mock.patch.object(instorage_common.InStorageAssistant, 'get_system_info') def test_establish_target_partnership(self, get_system_info): source_system_name = 'instorage-mcs-sim' target_system_name = 'aux-mcs-sim' get_system_info.side_effect = [{'system_name': source_system_name}, {'system_name': target_system_name}] rep_mgr = self.driver._get_replica_mgr() rep_mgr.establish_target_partnership() partner_info = self.driver._assistant.get_partnership_info( source_system_name) self.assertIsNotNone(partner_info) self.assertEqual(source_system_name, partner_info['name']) partner_info = self.driver._assistant.get_partnership_info( source_system_name) self.assertIsNotNone(partner_info) self.assertEqual(source_system_name, partner_info['name']) @mock.patch.object(instorage_common.InStorageAssistant, 'start_relationship') def test_sync_replica_volumes_with_aux(self, start_relationship): # Create sync copy replication. mm_vol = self._generate_vol_info(None, None, self.mm_type) tgt_volume = instorage_const.REPLICA_AUX_VOL_PREFIX + mm_vol['name'] volumes = [mm_vol] fake_info = {'volume': 'fake', 'master_vdisk_name': 'fake', 'aux_vdisk_name': 'fake'} sync_state = {'state': instorage_const.REP_CONSIS_SYNC, 'primary': 'fake'} sync_state.update(fake_info) disconn_state = {'state': instorage_const.REP_IDL_DISC, 'primary': 'master'} disconn_state.update(fake_info) stop_state = {'state': instorage_const.REP_CONSIS_STOP, 'primary': 'aux'} stop_state.update(fake_info) with mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info', mock.Mock(return_value=None)): self.driver._sync_with_aux(self.ctxt, volumes) self.assertFalse(start_relationship.called) with mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info', mock.Mock(return_value=sync_state)): self.driver._sync_with_aux(self.ctxt, volumes) self.assertFalse(start_relationship.called) with mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info', mock.Mock(return_value=disconn_state)): self.driver._sync_with_aux(self.ctxt, volumes) start_relationship.assert_called_once_with(tgt_volume) with mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info', mock.Mock(return_value=stop_state)): self.driver._sync_with_aux(self.ctxt, volumes) start_relationship.assert_called_with(tgt_volume, primary='aux') self.driver.delete_volume(mm_vol) @mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info') @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=testutils.ZeroIntervalLoopingCall) def test_wait_replica_vol_ready(self, get_relationship_info): # Create sync copy replication. mm_vol = self._generate_vol_info(None, None, self.mm_type) fake_info = {'volume': 'fake', 'master_vdisk_name': 'fake', 'aux_vdisk_name': 'fake', 'primary': 'fake'} sync_state = {'state': instorage_const.REP_CONSIS_SYNC} sync_state.update(fake_info) disconn_state = {'state': instorage_const.REP_IDL_DISC} disconn_state.update(fake_info) with mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info', mock.Mock(return_value=None)): self.assertRaises(exception.VolumeBackendAPIException, self.driver._wait_replica_vol_ready, self.ctxt, mm_vol) with mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info', mock.Mock(return_value=sync_state)): self.driver._wait_replica_vol_ready(self.ctxt, mm_vol) with mock.patch.object(instorage_common.InStorageAssistant, 'get_relationship_info', mock.Mock(return_value=disconn_state)): self.assertRaises(exception.VolumeBackendAPIException, self.driver._wait_replica_vol_ready, self.ctxt, mm_vol) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.28712 cinder-27.0.0/cinder/tests/unit/volume/drivers/lightos/0000775000175000017500000000000000000000000023112 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/lightos/__init__.py0000664000175000017500000000000000000000000025211 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py0000664000175000017500000012764400000000000027736 0ustar00zuulzuul00000000000000# Copyright (C) 2016-2022 Lightbits Labs Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import functools import hashlib import http.client as httpstatus import json import time from typing import Dict from typing import List from typing import Tuple from unittest import mock import uuid from cinder import context from cinder import db from cinder import exception from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume import configuration as conf from cinder.volume.drivers import lightos FAKE_LIGHTOS_CLUSTER_NODES: Dict[str, List] = { "nodes": [ {"UUID": "926e6df8-73e1-11ec-a624-000000000001", "nvmeEndpoint": "192.168.75.10:4420"}, {"UUID": "926e6df8-73e1-11ec-a624-000000000002", "nvmeEndpoint": "192.168.75.11:4420"}, {"UUID": "926e6df8-73e1-11ec-a624-000000000003", "nvmeEndpoint": "192.168.75.12:4420"} ] } IPV6_LIST = ['::192:168:75:10', '::192:168:75:11', '::192:168:75:12'] FAKE_LIGHTOS_CLUSTER_NODES_IPV6: Dict[str, List] = { "nodes": [ {"UUID": "926e6df8-73e1-11ec-a624-000000000001", "nvmeEndpoint": "[{}]:4420".format(IPV6_LIST[0])}, {"UUID": "926e6df8-73e1-11ec-a624-000000000002", "nvmeEndpoint": "[{}]:4420".format(IPV6_LIST[1])}, {"UUID": "926e6df8-73e1-11ec-a624-000000000003", "nvmeEndpoint": "[{}]:4420".format(IPV6_LIST[2])} ] } FAKE_LIGHTOS_CLUSTER_INFO: Dict[str, str] = { 'UUID': "926e6df8-73e1-11ec-a624-07ba3880f6cc", 'subsystemNQN': "nqn.2014-08.org.nvmexpress:NVMf:uuid:" "f4a89ce0-9fc2-4900-bfa3-00ad27995e7b" } FAKE_CLIENT_HOSTNQN = "hostnqn1" FAKE_HOST_IPS = ['10.10.0.1'] VOLUME_BACKEND_NAME = "lightos_backend" RESERVED_PERCENTAGE = 30 DEVICE_SCAN_ATTEMPTS_DEFAULT = 5 LIGHTOS_API_SERVICE_TIMEOUT = 30 VOLUME_BACKEND_NAME = "lightos_backend" RESERVED_PERCENTAGE = 30 DEFAULT_COMPRESSION = False class InitiatorConnectorFactoryMocker: @staticmethod def factory(protocol, root_helper, driver=None, use_multipath=False, device_scan_attempts=DEVICE_SCAN_ATTEMPTS_DEFAULT, arch=None, *args, **kwargs): return InitialConnectorMock() class InitialConnectorMock: nqn = FAKE_CLIENT_HOSTNQN found_discovery_client = True host_ips = FAKE_HOST_IPS def get_hostnqn(self): return self.__class__.nqn def find_dsc(self): return self.__class__.found_discovery_client def get_host_ips(self): return self.__class__.host_ips def get_connector_properties(self, root): return dict(nqn=self.__class__.nqn, found_dsc=self.__class__.found_discovery_client, host_ips=self.__class__.host_ips) def get_connector_properties(): connector = InitialConnectorMock() return connector.get_connector_properties(None) def get_vol_etag(volume): v = deepcopy(volume) v.pop("ETag", None) dump = json.dumps(v, sort_keys=True).encode('utf-8') return hashlib.md5(dump).hexdigest() class DBMock(object): def __init__(self): self.data = { "projects": {}, } def get_or_create_project(self, project_name) -> Dict: project = self.data["projects"].setdefault(project_name, {}) return project def get_project(self, project_name) -> Dict: project = self.data["projects"].get(project_name, None) return project if project else None def delete_project(self, project_name) -> Dict: assert project_name != "default", "can't delete default project" project = self.get_project(project_name) if not project: return None self.data["projects"].remove(project) return project def create_volume(self, volume) -> Tuple[int, Dict]: assert volume["project_name"] and volume["name"], "must be provided" project = self.get_or_create_project(volume["project_name"]) volumes = project.setdefault("volumes", []) existing_volume = next(iter([vol for vol in volumes if vol["name"] == volume["name"]]), None) if not existing_volume: volume["UUID"] = str(uuid.uuid4()) volumes.append(volume) return httpstatus.OK, volume return httpstatus.CONFLICT, None def get_volume_by_uuid(self, project_name, volume_uuid) -> Tuple[int, Dict]: assert project_name and volume_uuid, "must be provided" project = self.get_project(project_name) if not project: return httpstatus.NOT_FOUND, None proj_vols = project.get("volumes", None) if not proj_vols: return httpstatus.NOT_FOUND, None vol = next(iter([vol for vol in proj_vols if vol["UUID"] == volume_uuid]), None) return (httpstatus.OK, vol) if vol else (httpstatus.NOT_FOUND, None) def update_volume_by_uuid(self, project_name, volume_uuid, **kwargs) -> Tuple[int, Dict]: error_code, volume = self.get_volume_by_uuid(project_name, volume_uuid) if error_code != httpstatus.OK: return error_code, None etag = kwargs.get("etag", None) if etag: vol_etag = volume.get("ETag", None) if etag != vol_etag: return httpstatus.BAD_REQUEST, None if kwargs.get("size", None): volume["size"] = kwargs["size"] if kwargs.get("acl", None): volume["acl"] = {'values': kwargs.get('acl')} if kwargs.get("ip_acl", None): volume["IPAcl"] = {'values': kwargs.get('ip_acl')} volume["ETag"] = get_vol_etag(volume) return httpstatus.OK, volume def get_volume_by_name(self, project_name, volume_name) -> Tuple[int, Dict]: assert project_name and volume_name, "must be provided" project = self.get_project(project_name) if not project: return httpstatus.NOT_FOUND, None proj_vols = project.get("volumes", None) if not proj_vols: return httpstatus.NOT_FOUND, None vol = next(iter([vol for vol in proj_vols if vol["name"] == volume_name]), None) return (httpstatus.OK, vol) if vol else (httpstatus.NOT_FOUND, None) def delete_volume(self, project_name, volume_uuid) -> Tuple[int, Dict]: assert project_name and volume_uuid, "must be provided" project = self.get_project(project_name) if not project: return httpstatus.NOT_FOUND, None proj_vols = project.get("volumes", None) if not proj_vols: return httpstatus.NOT_FOUND, None for vol in proj_vols: if vol["UUID"] == volume_uuid: proj_vols.remove(vol) return httpstatus.OK, vol def update_volume(self, **kwargs): assert "project_name" in kwargs and kwargs["project_name"], \ "project_name must be provided" def create_snapshot(self, snapshot) -> Tuple[int, Dict]: assert snapshot["project_name"] and snapshot["name"], \ "must be provided" project = self.get_or_create_project(snapshot["project_name"]) snapshots = project.setdefault("snapshots", []) existing_snap = next(iter([snap for snap in snapshots if snap["name"] == snapshot["name"]]), None) if not existing_snap: snapshot["UUID"] = str(uuid.uuid4()) snapshots.append(snapshot) return httpstatus.OK, snapshot return httpstatus.CONFLICT, None def delete_snapshot(self, project_name, snapshot_uuid) -> Tuple[int, Dict]: assert project_name and snapshot_uuid, "must be provided" project = self.get_project(project_name) if not project: return httpstatus.NOT_FOUND, None proj_snaps = project.get("snapshots", None) if not proj_snaps: return httpstatus.NOT_FOUND, None for snap in proj_snaps: if snap["UUID"] == snapshot_uuid: proj_snaps.remove(snap) return httpstatus.OK, snap def get_snapshot_by_name(self, project_name, snapshot_name) -> Tuple[int, Dict]: assert project_name and snapshot_name, "must be provided" project = self.get_project(project_name) if not project: return httpstatus.NOT_FOUND, None proj_snaps = project.get("snapshots", None) if not proj_snaps: return httpstatus.NOT_FOUND, None snap = next(iter([snap for snap in proj_snaps if snap["name"] == snapshot_name]), None) return (httpstatus.OK, snap) if snap else (httpstatus.NOT_FOUND, None) def get_snapshot_by_uuid(self, project_name, snapshot_uuid) -> Tuple[int, Dict]: assert project_name and snapshot_uuid, "must be provided" project = self.get_project(project_name) if not project: return httpstatus.NOT_FOUND, None proj_snaps = project.get("snapshots", None) if not proj_snaps: return httpstatus.NOT_FOUND, None snap = next(iter([snap for snap in proj_snaps if snap["UUID"] == snapshot_uuid]), None) return (httpstatus.OK, snap) if snap else (httpstatus.NOT_FOUND, None) class LightOSStorageVolumeDriverTest(test.TestCase): def setUp(self): """Initialize LightOS Storage Driver.""" super(LightOSStorageVolumeDriverTest, self).setUp() configuration = mock.Mock(conf.Configuration) configuration.lightos_api_address = \ "10.10.10.71,10.10.10.72,10.10.10.73" configuration.lightos_api_port = 443 configuration.lightos_jwt = None configuration.lightos_snapshotname_prefix = 'openstack_' configuration.lightos_intermediate_snapshot_name_prefix = 'for_clone_' configuration.lightos_default_compression_enabled = ( DEFAULT_COMPRESSION) configuration.lightos_default_num_replicas = 3 configuration.lightos_use_ipacl = True configuration.num_volume_device_scan_tries = ( DEVICE_SCAN_ATTEMPTS_DEFAULT) configuration.lightos_api_service_timeout = LIGHTOS_API_SERVICE_TIMEOUT configuration.driver_ssl_cert_verify = False # for some reason this value is not initialized by the driver parent # configs configuration.volume_name_template = 'volume-%s' configuration.initiator_connector = ( "cinder.tests.unit.volume.drivers.lightos." "test_lightos_storage.InitiatorConnectorFactoryMocker") configuration.volume_backend_name = VOLUME_BACKEND_NAME configuration.reserved_percentage = RESERVED_PERCENTAGE configuration.lightos_api_service_snapshots_max_calls = 5 def mocked_safe_get(config, variable_name): if hasattr(config, variable_name): return config.__getattribute__(variable_name) else: return None configuration.safe_get = functools.partial(mocked_safe_get, configuration) self.driver = lightos.LightOSVolumeDriver(configuration=configuration) self.ctxt = context.get_admin_context() self.db: DBMock = DBMock() # define a default send_cmd override to return default values. def send_cmd_default_mock(cmd, timeout, **kwargs): if cmd == "get_nodes": return (httpstatus.OK, FAKE_LIGHTOS_CLUSTER_NODES) if cmd == "get_node": self.assertTrue(kwargs["UUID"]) for node in FAKE_LIGHTOS_CLUSTER_NODES["nodes"]: if kwargs["UUID"] == node["UUID"]: return (httpstatus.OK, node) return (httpstatus.NOT_FOUND, node) elif cmd == "get_cluster_info": return (httpstatus.OK, FAKE_LIGHTOS_CLUSTER_INFO) elif cmd == "create_volume": project_name = kwargs["project_name"] ipacl = ( {'values': ['ALLOW_NONE']} if self.driver.configuration.lightos_use_ipacl else {'values': ['ALLOW_ANY']}) volume = { "project_name": project_name, "name": kwargs["name"], "size": kwargs["size"], "n_replicas": kwargs["n_replicas"], "compression": kwargs["compression"], "src_snapshot_name": kwargs["src_snapshot_name"], "acl": {'values': kwargs.get('acl')}, "IPAcl": ipacl, "state": "Available", "qosPolicyUUID": kwargs.get("qos_policy", None) } volume["ETag"] = get_vol_etag(volume) code, new_vol = self.db.create_volume(volume) return (code, new_vol) elif cmd == "delete_volume": return self.db.delete_volume(kwargs["project_name"], kwargs["volume_uuid"]) elif cmd == "get_volume": return self.db.get_volume_by_uuid(kwargs["project_name"], kwargs["volume_uuid"]) elif cmd == "get_volume_by_name": return self.db.get_volume_by_name(kwargs["project_name"], kwargs["volume_name"]) elif cmd == "extend_volume": size = kwargs.get("size", None) return self.db.update_volume_by_uuid(kwargs["project_name"], kwargs["volume_uuid"], size=size) elif cmd == "create_snapshot": snapshot = { "project_name": kwargs.get("project_name", None), "name": kwargs.get("name", None), "state": "Available", } return self.db.create_snapshot(snapshot) elif cmd == "delete_snapshot": return self.db.delete_snapshot(kwargs["project_name"], kwargs["snapshot_uuid"]) elif cmd == "get_snapshot": return self.db.get_snapshot_by_uuid(kwargs["project_name"], kwargs["snapshot_uuid"]) elif cmd == "get_snapshot_by_name": return self.db.get_snapshot_by_name(kwargs["project_name"], kwargs["snapshot_name"]) elif cmd == "update_volume": return self.db.update_volume_by_uuid(**kwargs) else: raise RuntimeError( f"'{cmd}' is not implemented. kwargs: {kwargs}") self.driver.cluster.send_cmd = send_cmd_default_mock def test_setup_should_fail_if_lightos_client_cant_auth_cluster(self): """Verify lightos_client fail with bad auth.""" def side_effect(cmd, timeout): if cmd == "get_cluster_info": return (httpstatus.UNAUTHORIZED, None) else: raise RuntimeError() self.driver.cluster.send_cmd = side_effect self.assertRaises(exception.InvalidAuthKey, self.driver.do_setup, None) def test_setup_should_succeed(self): """Test that lightos_client succeed.""" self.driver.do_setup(None) def test_create_volume_should_succeed(self): """Test that lightos_client succeed.""" self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_create_volume_ipacl_off(self): """Test that lightos_client succeed. ipacl false""" self.driver.configuration.lightos_use_ipacl = False self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_create_volume_same_volume_twice_succeed(self): """Test succeed to create an exiting volume.""" self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) self.driver.create_volume(volume) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def _create_volume_in_failed_state(self, vol_state): """Verify scenario of created volume in failed state: Driver is expected to issue a deletion command and raise exception """ def send_cmd_mock(cmd, **kwargs): if cmd == "create_volume": project_name = kwargs["project_name"] ipacl = ( {'values': ['ALLOW_NONE']} if self.driver.configuration.lightos_use_ipacl else {'values': ['ALLOW_ANY']}) volume = { "project_name": project_name, "name": kwargs["name"], "size": kwargs["size"], "n_replicas": kwargs["n_replicas"], "compression": kwargs["compression"], "src_snapshot_name": kwargs["src_snapshot_name"], "acl": {'values': kwargs.get('acl')}, "IPAcl": ipacl, "state": vol_state, } volume["ETag"] = get_vol_etag(volume) code, new_vol = self.db.create_volume(volume) return (code, new_vol) else: return cluster_send_cmd(cmd, **kwargs) self.driver.do_setup(None) cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd) self.driver.cluster.send_cmd = send_cmd_mock vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, volume) proj = self.db.data["projects"][lightos.LIGHTOS_DEFAULT_PROJECT_NAME] actual_volumes = proj["volumes"] self.assertEqual(0, len(actual_volumes)) db.volume_destroy(self.ctxt, volume.id) def test_create_volume_in_failed_state(self): self._create_volume_in_failed_state("Failed") def test_create_volume_in_rollback_state(self): self._create_volume_in_failed_state("Rollback") def test_create_volume_in_migrating_state_succeed(self): """Verify scenario of created volume in migrating state: Driver is expected to succeed. """ def send_cmd_mock(cmd, **kwargs): if cmd == "create_volume": project_name = kwargs["project_name"] ipacl = ( {'values': ['ALLOW_NONE']} if self.driver.configuration.lightos_use_ipacl else {'values': ['ALLOW_ANY']}) volume = { "project_name": project_name, "name": kwargs["name"], "size": kwargs["size"], "n_replicas": kwargs["n_replicas"], "compression": kwargs["compression"], "src_snapshot_name": kwargs["src_snapshot_name"], "acl": {'values': kwargs.get('acl')}, "IPAcl": ipacl, "state": "Migrating", } volume["ETag"] = get_vol_etag(volume) code, new_vol = self.db.create_volume(volume) return (code, new_vol) else: return cluster_send_cmd(cmd, **kwargs) self.driver.do_setup(None) cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd) self.driver.cluster.send_cmd = send_cmd_mock vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) proj = self.db.data["projects"][lightos.LIGHTOS_DEFAULT_PROJECT_NAME] actual_volumes = proj["volumes"] self.assertEqual(1, len(actual_volumes)) db.volume_destroy(self.ctxt, volume.id) def test_delete_volume_fail_if_not_created(self): """Test that lightos_client fail creating an already exists volume.""" self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_extend_volume_should_succeed(self): self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) self.driver.extend_volume(volume, 6) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_get_volume_specs_compression_True(self): self.driver.do_setup(None) vol_type = test_utils.create_volume_type( self.ctxt, self, extra_specs={'compression': 'True'}, name='my_vol_typ1') vol_type2 = test_utils.create_volume_type( self.ctxt, self, extra_specs={'compression': ' True'}, name='my_vol_type2') vol_type3 = test_utils.create_volume_type( self.ctxt, self, name='my_vol_type3') volume1 = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) volume2 = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type2.id) volume3 = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type3.id) compression, _, _, _ = self.driver._get_volume_specs(volume1) self.assertTrue(compression == "True") compression, _, _, _ = self.driver._get_volume_specs(volume2) self.assertTrue(compression == "True") compression, _, _, _ = self.driver._get_volume_specs(volume3) self.assertTrue(compression == "False") db.volume_destroy(self.ctxt, volume1.id) db.volume_destroy(self.ctxt, volume2.id) db.volume_destroy(self.ctxt, volume3.id) def test_get_volume_specs_compression_False(self): self.driver.do_setup(None) self.driver.configuration.lightos_default_compression_enabled = True vol_type = test_utils.create_volume_type( self.ctxt, self, extra_specs={'compression': 'False'}, name='my_vol_typ1') vol_type2 = test_utils.create_volume_type( self.ctxt, self, extra_specs={'compression': ' False'}, name='my_vol_type2') vol_type3 = test_utils.create_volume_type( self.ctxt, self, name='my_vol_type3') volume1 = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) volume2 = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type2.id) volume3 = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type3.id) compression, _, _, _ = self.driver._get_volume_specs(volume1) self.assertTrue(compression == "False") compression, _, _, _ = self.driver._get_volume_specs(volume2) self.assertTrue(compression == "False") compression, _, _, _ = self.driver._get_volume_specs(volume3) self.assertTrue(compression == "True") db.volume_destroy(self.ctxt, volume1.id) db.volume_destroy(self.ctxt, volume2.id) db.volume_destroy(self.ctxt, volume3.id) def test_extend_volume_should_fail_if_volume_does_not_exist(self): self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.assertRaises(exception.VolumeNotFound, self.driver.extend_volume, volume, 6) db.volume_destroy(self.ctxt, volume.id) def test_create_snapshot(self): self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) snapshot = test_utils.create_snapshot(self.ctxt, volume_id=volume.id) self.driver.create_volume(volume) self.driver.create_snapshot(snapshot) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) @mock.patch.object(time, "sleep", return_value=None) def test_create_snapshot_fail_bad_request(self, mock_sleep): def send_cmd_mock(cmd, **kwargs): if cmd == "create_snapshot": return (httpstatus.BAD_REQUEST, {}) else: return cluster_send_cmd(cmd, **kwargs) self.driver.do_setup(None) cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd) self.driver.cluster.send_cmd = send_cmd_mock vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) snapshot = test_utils.create_snapshot(self.ctxt, volume_id=volume.id) self.driver.create_volume(volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, snapshot) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_delete_snapshot(self): self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) snapshot = test_utils.create_snapshot(self.ctxt, volume_id=volume.id) self.driver.create_volume(volume) self.driver.create_snapshot(snapshot) self.driver.delete_snapshot(snapshot) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_create_volume_from_snapshot(self): self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) snapshot = test_utils.create_snapshot(self.ctxt, volume_id=volume.id) self.driver.create_volume_from_snapshot(volume, snapshot) proj = self.db.data["projects"][lightos.LIGHTOS_DEFAULT_PROJECT_NAME] actual_volumes = proj["volumes"] self.assertEqual(1, len(actual_volumes)) self.driver.delete_snapshot(snapshot) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) db.snapshot_destroy(self.ctxt, snapshot.id) def test_initialize_connection(self): InitialConnectorMock.nqn = "hostnqn1" InitialConnectorMock.found_discovery_client = True InitialConnectorMock.host_ips = FAKE_HOST_IPS self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) connection_props = \ self.driver.initialize_connection(volume, get_connector_properties()) self.assertIn('driver_volume_type', connection_props) self.assertEqual('lightos', connection_props['driver_volume_type']) self.assertEqual(FAKE_LIGHTOS_CLUSTER_INFO['subsystemNQN'], connection_props['data']['subsysnqn']) self.assertEqual( self.db.data['projects']['default']['volumes'][0]['UUID'], connection_props['data']['uuid']) self.assertEqual( self.db.data['projects']['default']['volumes'][0]['IPAcl'], {'values': FAKE_HOST_IPS}) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_initialize_connection_ipacl_disabled(self): self.driver.configuration.lightos_use_ipacl = False InitialConnectorMock.nqn = "hostnqn1" InitialConnectorMock.found_discovery_client = True self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) connection_props = \ self.driver.initialize_connection(volume, get_connector_properties()) self.assertIn('driver_volume_type', connection_props) self.assertEqual('lightos', connection_props['driver_volume_type']) self.assertEqual(FAKE_LIGHTOS_CLUSTER_INFO['subsystemNQN'], connection_props['data']['subsysnqn']) self.assertEqual( self.db.data['projects']['default']['volumes'][0]['UUID'], connection_props['data']['uuid']) self.assertEqual( self.db.data['projects']['default']['volumes'][0]['IPAcl'], {'values': ['ALLOW_ANY']}) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_initialize_connection_mirgrating_volume(self): InitialConnectorMock.nqn = "hostnqn1" InitialConnectorMock.found_discovery_client = True def send_cmd_mock(cmd, **kwargs): if cmd == "create_volume": project_name = kwargs["project_name"] ipacl = ( {'values': ['ALLOW_NONE']} if self.driver.configuration.lightos_use_ipacl else {'values': ['ALLOW_ANY']}) volume = { "project_name": project_name, "name": kwargs["name"], "size": kwargs["size"], "n_replicas": kwargs["n_replicas"], "compression": kwargs["compression"], "src_snapshot_name": kwargs["src_snapshot_name"], "acl": {'values': kwargs.get('acl')}, "IPAcl": ipacl, "state": "Migrating", } volume["ETag"] = get_vol_etag(volume) code, new_vol = self.db.create_volume(volume) return (code, new_vol) else: return cluster_send_cmd(cmd, **kwargs) self.driver.do_setup(None) cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd) self.driver.cluster.send_cmd = send_cmd_mock vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) connection_props = ( self.driver.initialize_connection(volume, get_connector_properties())) self.assertIn('driver_volume_type', connection_props) self.assertEqual('lightos', connection_props['driver_volume_type']) self.assertEqual(FAKE_LIGHTOS_CLUSTER_INFO['subsystemNQN'], connection_props['data']['subsysnqn']) self.assertEqual( self.db.data['projects']['default']['volumes'][0]['UUID'], connection_props['data']['uuid']) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_initialize_connection_ipv6(self): def side_effect(cmd, timeout, **kwargs): if cmd == "get_nodes": return (httpstatus.OK, FAKE_LIGHTOS_CLUSTER_NODES_IPV6) else: return cluster_send_cmd(cmd, timeout, **kwargs) cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd) self.driver.cluster.send_cmd = side_effect InitialConnectorMock.nqn = "hostnqn1" InitialConnectorMock.found_discovery_client = True self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) connection_props = ( self.driver.initialize_connection(volume, get_connector_properties())) self.assertIn('driver_volume_type', connection_props) self.assertEqual('lightos', connection_props['driver_volume_type']) self.assertEqual(FAKE_LIGHTOS_CLUSTER_INFO['subsystemNQN'], connection_props['data']['subsysnqn']) self.assertEqual( self.db.data['projects']['default']['volumes'][0]['UUID'], connection_props['data']['uuid']) for connection in connection_props['data']['lightos_nodes']: self.assertIn(connection, IPV6_LIST) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_initialize_connection_no_hostnqn_should_fail(self): InitialConnectorMock.nqn = "" InitialConnectorMock.found_discovery_client = True self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, get_connector_properties()) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_initialize_connection_no_dsc_should_fail(self): InitialConnectorMock.nqn = "hostnqn1" InitialConnectorMock.found_discovery_client = False self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, volume, get_connector_properties()) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_terminate_connection_with_hostnqn(self): InitialConnectorMock.nqn = "hostnqn1" InitialConnectorMock.found_discovery_client = True self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) self.driver.terminate_connection(volume, get_connector_properties()) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_terminate_connection_with_empty_hostnqn_should_fail(self): InitialConnectorMock.nqn = "" InitialConnectorMock.found_discovery_client = True self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, volume, get_connector_properties()) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_force_terminate_connection_with_empty_hostnqn(self): InitialConnectorMock.nqn = "" InitialConnectorMock.found_discovery_client = True self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) self.driver.terminate_connection(volume, get_connector_properties(), force=True) self.driver.delete_volume(volume) db.volume_destroy(self.ctxt, volume.id) def test_check_for_setup_error(self): InitialConnectorMock.nqn = "hostnqn1" InitialConnectorMock.found_discovery_client = True self.driver.do_setup(None) self.driver.check_for_setup_error() def test_check_for_setup_error_no_subsysnqn_should_fail(self): InitialConnectorMock.nqn = "hostnqn1" InitialConnectorMock.found_discovery_client = True self.driver.do_setup(None) self.driver.cluster.subsystemNQN = "" self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) def test_check_for_setup_error_no_hostnqn_should_fail(self): InitialConnectorMock.nqn = "" InitialConnectorMock.found_discovery_client = True self.driver.do_setup(None) self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) def test_check_ip_format(self): InitialConnectorMock.nqn = "" InitialConnectorMock.found_discovery_client = True self.driver.do_setup(None) host = "1.1.1.1" port = 8009 endpoint = self.driver.cluster._format_endpoint(host, port) self.assertEqual("1.1.1.1:8009", endpoint) host = "::1111" endpoint = self.driver.cluster._format_endpoint(host, port) self.assertEqual("[::1111]:8009", endpoint) def test_check_for_setup_error_no_dsc_should_succeed(self): InitialConnectorMock.nqn = "hostnqn1" InitialConnectorMock.found_discovery_client = False self.driver.do_setup(None) self.driver.check_for_setup_error() def test_create_clone(self): self.driver.do_setup(None) vol_type = test_utils.create_volume_type(self.ctxt, self, name='my_vol_type') volume = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) clone = test_utils.create_volume(self.ctxt, size=4, volume_type_id=vol_type.id) self.driver.create_volume(volume) self.driver.create_cloned_volume(clone, volume) self.driver.delete_volume(volume) self.driver.delete_volume(clone) db.volume_destroy(self.ctxt, volume.id) db.volume_destroy(self.ctxt, clone.id) def test_get_volume_stats(self): """Test that lightos_client succeed.""" self.driver.do_setup(None) volumes_data = self.driver.get_volume_stats(refresh=False) assert len(volumes_data) == 0, "Expected empty config" volumes_data = self.driver.get_volume_stats(refresh=True) assert volumes_data['vendor_name'] == 'LightOS Storage', \ "Expected 'LightOS Storage', received %s" % \ volumes_data['vendor_name'] assert volumes_data['volume_backend_name'] == VOLUME_BACKEND_NAME, \ "Expected %s, received %s" % \ (VOLUME_BACKEND_NAME, volumes_data['volume_backend_name']) assert volumes_data['driver_version'] == self.driver.VERSION, \ "Expected %s, received %s" % \ (self.driver.VERSION, volumes_data['driver_version']) assert volumes_data['storage_protocol'] == "lightos", \ "Expected 'lightos', received %s" % \ volumes_data['storage_protocol'] assert volumes_data['reserved_percentage'] == RESERVED_PERCENTAGE, \ "Expected %d, received %s" % \ (RESERVED_PERCENTAGE, volumes_data['reserved_percentage']) assert volumes_data['QoS_support'] is True, \ "Expected False, received %s" % volumes_data['QoS_support'] assert volumes_data['online_extend_support'] is True, \ "Expected True, received %s" % \ volumes_data['online_extend_support'] assert volumes_data['thin_provisioning_support'] is True, \ "Expected True, received %s" % \ volumes_data['thin_provisioning_support'] assert volumes_data['compression'] == [True, False], \ "Expected [True, False], received %s" % volumes_data['compression'] assert volumes_data['multiattach'] is True, \ "Expected True, received %s" % volumes_data['multiattach'] assert volumes_data['free_capacity_gb'] == 'infinite', \ "Expected 'infinite', received %s" % \ volumes_data['free_capacity_gb'] ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.28712 cinder-27.0.0/cinder/tests/unit/volume/drivers/nec/0000775000175000017500000000000000000000000022206 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nec/__init__.py0000664000175000017500000000000000000000000024305 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nec/test_volume.py0000664000175000017500000024115300000000000025134 0ustar00zuulzuul00000000000000# # Copyright (c) 2016 NEC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from unittest import mock import ddt from cinder import context from cinder import exception from cinder.objects import volume_attachment from cinder.tests.unit import fake_constants as constants from cinder.tests.unit.fake_volume import fake_volume_obj from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.nec import cli from cinder.volume.drivers.nec import volume_common from cinder.volume.drivers.nec import volume_helper from cinder.volume import qos_specs from cinder.volume import volume_types xml_out = '''
M310
0000 LX 287RbQoP7VdwR1WsPC2fZT 1073741824 0000 --- MV
0001 backup_SDV0001 5368709120 0001 (invalid attribute) IV
0003 LX 31HxzqBiAFTUxxOlcVn3EA 1073741824 0001 --- RV
0004 LX 287RbQoP7VdwR1WsPC2fZT_back 1073741824 0000 --- RV
0005 LX 20000009910200140005 10737418240 0000 --- RV
0006 LX 287RbQoP7VdwR1WsPC2fZT_l 10737418240 0000 --- IV
0007 20000009910200140007 10737418240 0000 --- IV
0008 20000009910200140008 10737418240 0000 --- IV
0009 20000009910200140009 10737418240 0000 --- IV
000a 2000000991020012000A 6442450944 0000 --- IV
000b 2000000991020012000B 6442450944 0000 --- IV
000c 2000000991020012000C 6442450944 0000 --- IV
000d LX yEUHrXa5AHMjOZZLb93eP 6442450944 0001 --- IV
000e LX 4T7JpyqI3UuPlKeT9D3VQF 6442450944 0001 (invalid attribute) SV
000f LX 59V9KIi0ZHWJ5yvjCG5RQ4_d 6442450944 0001 --- IV
0011 LX 6EWPOChJkdSysJmpMAB9YR 6442450944 0001 --- IV
0fff Pool0000_SYV0FFF 8589934592 0000 (invalid attribute) ---
0000 281320357888 84020297728 197300060160
0001 89657442304 6710886400 82946555904
0002 1950988894208 18446744073441116160 1951257329664
0003 1950988894208 18446744073441116160 1951257329664
00-00 2100000991020012
00-01 2200000991020012
00-02 192.168.1.90 Link Down
00-03 192.168.1.91 Link Down
01-00 2900000991020012
01-01 2A00000991020012
01-02 192.168.2.92 Link Down
01-03 192.168.2.93 Link Up
LX OpenStack1
1000-0090-FAA0-786B
1000-0090-FAA0-786A
0000 0005
0001 0006
0002 0011
LX OpenStack3
1000-0090-FAA0-786D
1000-0090-FAA0-786C
0001 0011
LX OpenStack0 Multi-Target
192.168.1.90:3260
192.168.1.91:3260
192.168.2.92:3260
192.168.2.93:3260
iqn.1994-05.com.redhat:d1d8e8f23255
iqn.2001-03.target0000 0000 0000
iqn.2001-03.target0001 0001 0006
LX OpenStack2 Normal iqn.2001-03.target0002
192.168.1.94:3260
192.168.1.95:3260
192.168.2.96:3260
192.168.2.97:3260
iqn.1994-05.com.redhat:13a80ea272e
Command Completed Successfully!! 0
''' class DummyVolume(object): def __init__(self, volid, volsize=1): super(DummyVolume, self).__init__() self.id = volid self._name_id = None self.size = volsize self.status = None self.volume_type_id = None self.attach_status = None self.volume_attachment = None self.provider_location = None self.name = None @property def name_id(self): return self.id if not self._name_id else self._name_id @name_id.setter def name_id(self, value): self._name_id = value class DummySnapshot(object): def __init__(self, snapid): super(DummySnapshot, self).__init__() self.id = snapid self.volume_id = None @ddt.ddt class VolumeIDConvertTest(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(VolumeIDConvertTest, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.do_setup(None) @ddt.data(("AAAAAAAA", "LX:37mA82"), ("BBBBBBBB", "LX:3R9ZwR")) @ddt.unpack def test_volumeid_should_change_62scale(self, volid, ldname): vol = DummyVolume(volid) actual = self._convert_id2name(vol) self.assertEqual(ldname, actual, "ID:%(volid)s should be change to %(ldname)s" % {'volid': volid, 'ldname': ldname}) @ddt.data(("AAAAAAAA", "LX:37mA82"), ("BBBBBBBB", "LX:3R9ZwR")) @ddt.unpack def test_snap_volumeid_should_change_62scale_andpostfix(self, snapid, ldname): snap = DummySnapshot(snapid) actual = self._convert_id2snapname(snap) self.assertEqual(ldname, actual, "ID:%(snapid)s should be change to %(ldname)s" % {'snapid': snapid, 'ldname': ldname}) @ddt.data(("AAAAAAAA", "LX:37mA82_m"), ("BBBBBBBB", "LX:3R9ZwR_m")) @ddt.unpack def test_ddrsnap_volumeid_should_change_62scale_and_m(self, volid, ldname): vol = DummyVolume(volid) actual = self._convert_id2migratename(vol) self.assertEqual(ldname, actual, "ID:%(volid)s should be change to %(ldname)s" % {'volid': volid, 'ldname': ldname}) def test_convert_deleteldname(self): ldname = self._convert_deleteldname('LX:287RbQoP7VdwR1WsPC2fZT') self.assertEqual(ldname, 'LX:287RbQoP7VdwR1WsPC2fZT_d') class NominatePoolLDTest(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(NominatePoolLDTest, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, '_execute', return_value=('success', 0, 0)) self.mock_object(self._cli, 'view_all', return_value=xml_out) self.do_setup(None) self.xml = xml_out self._properties['cli_fip'] = '10.0.0.1' self._properties['pool_pools'] = {0, 1} self._properties['pool_backup_pools'] = {2, 3} (self.pools, self.lds, self.ldsets, self.used_ldns, self.hostports, self.max_ld_count) = self.configs(self.xml) pool_data = {'pool_num': 1, 'total': 1, 'free': 1, 'ld_list': []} volume = {'id': 'X'} self.test_pools = [] for var in range(0, 1025): pool_data['ld_list'].append(volume) self.test_pools = [pool_data] def test_getxml(self): self.assertIsNotNone(self.xml, "iSMview xml should not be None") def test_selectldn_for_normalvolume(self): ldn = self._select_ldnumber(self.used_ldns, self.max_ld_count) self.assertEqual(2, ldn, "selected ldn should be XXX") def test_selectpool_for_normalvolume(self): vol = DummyVolume(constants.VOLUME_ID, 10) pool = self._select_leastused_poolnumber(vol, self.pools, self.xml) self.assertEqual(1, pool, "selected pool should be 1") # config:pool_pools=[1] vol.size = 999999999999 with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'No available pools found.'): pool = self._select_leastused_poolnumber(vol, self.pools, self.xml) def test_return_poolnumber(self): self.assertEqual(1, self._return_poolnumber(self.test_pools)) def test_selectpool_for_migratevolume(self): vol = DummyVolume("46045673-41e7-44a7-9333-02f07feab04b") self.VERSION = '9.99.9' dummyhost = {} dummyhost['capabilities'] = self._update_volume_status() pool = self._select_migrate_poolnumber(vol, self.pools, self.xml, dummyhost) self.assertEqual(1, pool, "selected pool should be 1") vol.id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d" pool = self._select_migrate_poolnumber(vol, self.pools, self.xml, dummyhost) self.assertEqual(-1, pool, "selected pool is the same pool(return -1)") vol.size = 999999999999 with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'No available pools found.'): pool = self._select_migrate_poolnumber(vol, self.pools, self.xml, dummyhost) def test_selectpool_for_snapvolume(self): savePool1 = self.pools[1]['free'] self.pools[1]['free'] = 0 vol = DummyVolume(constants.VOLUME_ID, 10) pool = self._select_dsv_poolnumber(vol, self.pools) self.assertEqual(2, pool, "selected pool should be 2") # config:pool_backup_pools=[2] self.pools[1]['free'] = savePool1 if len(self.pools[0]['ld_list']) == 1024: savePool2 = self.pools[2]['free'] savePool3 = self.pools[3]['free'] self.pools[2]['free'] = 0 self.pools[3]['free'] = 0 with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'No available pools found.'): pool = self._select_dsv_poolnumber(vol, self.pools) self.pools[2]['free'] = savePool2 self.pools[3]['free'] = savePool3 vol.size = 999999999999 pool = self._select_dsv_poolnumber(vol, self.pools) self.assertEqual(2, pool, "selected pool should be 2") # config:pool_backup_pools=[2] def test_selectpool_for_ddrvolume(self): vol = DummyVolume(constants.VOLUME_ID, 10) pool = self._select_ddr_poolnumber(vol, self.pools, self.xml, 10) self.assertEqual(2, pool, "selected pool should be 2") # config:pool_backup_pools=[2] savePool2 = self.pools[2]['free'] savePool3 = self.pools[3]['free'] self.pools[2]['free'] = 0 self.pools[3]['free'] = 0 with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'No available pools found.'): pool = self._select_ddr_poolnumber(vol, self.pools, self.xml, 10) self.pools[2]['free'] = savePool2 self.pools[3]['free'] = savePool3 vol.size = 999999999999 with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'No available pools found.'): pool = self._select_ddr_poolnumber(vol, self.pools, self.xml, 999999999999) def test_selectpool_for_volddrvolume(self): vol = DummyVolume(constants.VOLUME_ID, 10) pool = self._select_volddr_poolnumber(vol, self.pools, self.xml, 10) self.assertEqual(1, pool, "selected pool should be 1") # config:pool_backup_pools=[2] savePool0 = self.pools[0]['free'] savePool1 = self.pools[1]['free'] self.pools[0]['free'] = 0 self.pools[1]['free'] = 0 with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'No available pools found.'): pool = self._select_volddr_poolnumber(vol, self.pools, self.xml, 10) self.pools[0]['free'] = savePool0 self.pools[1]['free'] = savePool1 vol.size = 999999999999 with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'No available pools found.'): pool = self._select_volddr_poolnumber(vol, self.pools, self.xml, 999999999999) class GetInformationTest(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(GetInformationTest, self).setUp() self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.do_setup(None) def test_get_ldset(self): self.xml = xml_out (self.pools, self.lds, self.ldsets, self.used_ldns, self.hostports, self.max_ld_count) = self.configs(self.xml) self._properties['ldset_name'] = '' ldset = self.get_ldset(self.ldsets) self.assertIsNone(ldset) self._properties['ldset_name'] = 'LX:OpenStack1' ldset = self.get_ldset(self.ldsets) self.assertEqual('LX:OpenStack1', ldset['ldsetname']) self._properties['ldset_name'] = 'LX:OpenStackX' with self.assertRaisesRegex(exception.NotFound, 'Logical Disk Set' ' `LX:OpenStackX`' ' could not be found.'): self.get_ldset(self.ldsets) class VolumeCreateTest(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(VolumeCreateTest, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, '_execute', return_value=('success', 0, 0)) self.mock_object(self._cli, 'view_all', return_value=xml_out) self.do_setup(None) self.xml = xml_out def test_validate_migrate_volume(self): vol = DummyVolume("46045673-41e7-44a7-9333-02f07feab04b", 1) vol.status = 'available' self._validate_migrate_volume(vol, self.xml) vol.id = "AAAAAAAA" vol.status = 'available' with self.assertRaisesRegex(exception.NotFound, 'Logical Disk `LX:37mA82`' ' could not be found.'): self._validate_migrate_volume(vol, self.xml) def test_extend_volume(self): vol = DummyVolume("46045673-41e7-44a7-9333-02f07feab04b", 1) vol.status = 'available' self.extend_volume(vol, 10) vol.id = "00046058-d38e-7f60-67b7-59ed65e54225" # RV with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'RPL Attribute Error. ' 'RPL Attribute = RV.'): self.extend_volume(vol, 10) class BindLDTest(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(BindLDTest, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, '_execute', return_value=('success', 0, 0)) self.mock_object(self._cli, 'view_all', return_value=xml_out) self.do_setup(None) self.mock_object(self, '_bind_ld', return_value=(0, 0, 0)) def test_bindld_CreateVolume(self): vol = DummyVolume(constants.VOLUME_ID, 1) vol.migration_status = "success" self.create_volume(vol) self._bind_ld.assert_called_once_with( vol, vol.size, None, self._convert_id2name, self._select_leastused_poolnumber) def test_bindld_CreateCloneVolume(self): vol = DummyVolume(constants.VOLUME_ID, 1) vol.migration_status = "success" src = DummyVolume("46045673-41e7-44a7-9333-02f07feab04b", 1) self.mock_object(self._cli, 'query_BV_SV_status', return_value='snap/active') self.mock_object(self._cli, 'query_MV_RV_name', return_value='separated') self.mock_object(self._cli, 'backup_restore') self.create_cloned_volume(vol, src) self._bind_ld.assert_called_once_with( vol, vol.size, None, self._convert_id2name, self._select_leastused_poolnumber) self.mock_object(self._cli, 'get_pair_lds', return_value={'lds1', 'lds2', 'lds3'}) with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'Cannot create clone volume. ' 'number of pairs reached 3. ' 'ldname=LX:287RbQoP7VdwR1WsPC2fZT'): self.create_cloned_volume(vol, src) def test_bindld_CreateCloneWaitingInterval(self): self.assertEqual(10, cli.get_sleep_time_for_clone(0)) self.assertEqual(12, cli.get_sleep_time_for_clone(2)) self.assertEqual(60, cli.get_sleep_time_for_clone(19)) def test_delete_volume(self): ldname = "LX:287RbQoP7VdwR1WsPC2fZT" detached = self._detach_from_all(ldname, xml_out) self.assertTrue(detached) ldname = 'LX:31HxzqBiAFTUxxOlcVn3EA' detached = self._detach_from_all(ldname, xml_out) self.assertFalse(detached) vol = DummyVolume("1febb976-86d0-42ed-9bc0-4aa3e158f27d") with mock.patch.object(self._cli, 'unbind') as unbind_mock: self.delete_volume(vol) unbind_mock.assert_called_once_with('LX:yEUHrXa5AHMjOZZLb93eP') pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml_out)) vol = DummyVolume('1febb976-86d0-42ed-9bc0-4aa3e158f27d') vol._name_id = None with mock.patch.object(self._cli, 'unbind') as unbind_mock: self.delete_volume(vol) unbind_mock.assert_called_once_with('LX:yEUHrXa5AHMjOZZLb93eP') vol = DummyVolume('46045673-41e7-44a7-9333-02f07feab04b') vol._name_id = '1febb976-86d0-42ed-9bc0-4aa3e158f27d' with mock.patch.object(self._cli, 'unbind') as unbind_mock: self.delete_volume(vol) unbind_mock.assert_called_once_with('LX:yEUHrXa5AHMjOZZLb93eP') vol = DummyVolume(constants.VOLUME_ID) vol._name_id = 'a951f0eb-27ae-41a7-a5e5-604e721a16d4' with mock.patch.object(self._cli, 'unbind') as unbind_mock: self.delete_volume(vol) unbind_mock.assert_called_once_with('LX:59V9KIi0ZHWJ5yvjCG5RQ4_d') class BindLDTest_Snap(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(BindLDTest_Snap, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, '_execute', return_value=('success', 0, 0)) self.mock_object(self._cli, 'view_all', return_value=xml_out) self.do_setup(None) self.mock_object(self, '_bind_ld', return_value=(0, 0, 0)) self.mock_object(self, '_create_snapshot') def test_bindld_CreateSnapshot(self): snap = DummySnapshot(constants.SNAPSHOT_ID) snap.volume_id = constants.VOLUME_ID self.create_snapshot(snap) self._create_snapshot.assert_called_once_with( snap, self._properties['diskarray_name']) def test_bindld_CreateFromSnapshot(self): vol = DummyVolume(constants.VOLUME_ID) vol.migration_status = "success" snap = DummySnapshot("63410c76-2f12-4473-873d-74a63dfcd3e2") snap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d" self.mock_object(self._cli, 'query_BV_SV_status', return_value='snap/active') self.mock_object(self._cli, 'backup_restore') self.create_volume_from_snapshot(vol, snap) self._bind_ld.assert_called_once_with( vol, 1, None, self._convert_id2name, self._select_volddr_poolnumber, 1) class ExportTest(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(ExportTest, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, '_execute', return_value=('success', 0, 0)) self.mock_object(self._cli, 'view_all', return_value=xml_out) self.do_setup(None) def test_iscsi_initialize_connection(self): vol = DummyVolume("46045673-41e7-44a7-9333-02f07feab04b") connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255", 'multipath': False} info = self.iscsi_initialize_connection(vol, connector) self.assertEqual('iscsi', info['driver_volume_type']) self.assertEqual('iqn.2001-03.target0000', info['data']['target_iqn']) self.assertIn(info['data']['target_portal'], ['192.168.1.90:3260', '192.168.1.91:3260', '192.168.2.92:3260', '192.168.2.93:3260']) self.assertEqual(0, info['data']['target_lun']) vol.id = "87d8d42f-7550-4f43-9a2b-fe722bf86941" with self.assertRaisesRegex(exception.NotFound, 'Logical Disk `LX:48L3QCi4npuqxPX0Lyeu8H`' ' could not be found.'): self.iscsi_initialize_connection(vol, connector) def test_iscsi_multipath_initialize_connection(self): vol = DummyVolume("46045673-41e7-44a7-9333-02f07feab04b") connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255", 'multipath': True} info = self.iscsi_initialize_connection(vol, connector) self.assertEqual('iscsi', info['driver_volume_type']) self.assertEqual('iqn.2001-03.target0000', info['data']['target_iqn']) self.assertIn(info['data']['target_portal'], ['192.168.1.90:3260', '192.168.1.91:3260', '192.168.2.92:3260', '192.168.2.93:3260']) self.assertEqual(0, info['data']['target_lun']) self.assertEqual('iqn.2001-03.target0000', info['data']['target_iqns'][0]) self.assertEqual('iqn.2001-03.target0000', info['data']['target_iqns'][1]) self.assertEqual('iqn.2001-03.target0000', info['data']['target_iqns'][2]) self.assertEqual('iqn.2001-03.target0000', info['data']['target_iqns'][3]) self.assertEqual(info['data']['target_portals'][0], '192.168.1.90:3260') self.assertEqual(info['data']['target_portals'][1], '192.168.1.91:3260') self.assertEqual(info['data']['target_portals'][2], '192.168.2.92:3260') self.assertEqual(info['data']['target_portals'][3], '192.168.2.93:3260') self.assertEqual(0, info['data']['target_luns'][0]) self.assertEqual(0, info['data']['target_luns'][1]) self.assertEqual(0, info['data']['target_luns'][2]) self.assertEqual(0, info['data']['target_luns'][3]) def test_iscsi_terminate_connection(self): ctx = context.RequestContext('admin', 'fake', True) vol = fake_volume_obj(ctx, id='46045673-41e7-44a7-9333-02f07feab04b') connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255", 'multipath': True, 'host': 'DummyHost'} attachment = { 'id': constants.ATTACHMENT_ID, 'volume_id': vol.id, 'connector': connector } attach_object = volume_attachment.VolumeAttachment(**attachment) attachment = volume_attachment.VolumeAttachmentList( objects=[attach_object]) vol.volume_attachment = attachment with mock.patch.object(self._cli, 'delldsetld', return_value=(True, '') ) as delldsetld_mock: ret = self._iscsi_terminate_connection(vol, connector) delldsetld_mock.assert_called_once_with( 'LX:OpenStack0', 'LX:287RbQoP7VdwR1WsPC2fZT') self.assertIsNone(ret) attachment1 = { 'id': constants.ATTACHMENT_ID, 'volume_id': vol.id, 'connector': connector } attachment2 = { 'id': constants.ATTACHMENT2_ID, 'volume_id': vol.id, 'connector': connector } attach_object1 = volume_attachment.VolumeAttachment(**attachment1) attach_object2 = volume_attachment.VolumeAttachment(**attachment2) attachments = volume_attachment.VolumeAttachmentList( objects=[attach_object1, attach_object2]) vol.volume_attachment = attachments with mock.patch.object(self._cli, 'delldsetld', return_value=(True, '') ) as delldsetld_mock: ret = self._iscsi_terminate_connection(vol, connector) delldsetld_mock.assert_not_called() self.assertIsNone(ret) def test_iscsi_terminate_connection_negative(self): ctx = context.RequestContext('admin', 'fake', True) vol = fake_volume_obj(ctx, id='46045673-41e7-44a7-9333-02f07feab04b') connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255", 'multipath': True, 'host': 'DummyHost'} attachment = { 'id': constants.ATTACHMENT_ID, 'volume_id': vol.id, 'connector': connector } attach_object = volume_attachment.VolumeAttachment(**attachment) attachment = volume_attachment.VolumeAttachmentList( objects=[attach_object]) vol.volume_attachment = attachment with self.assertRaisesRegex(exception.VolumeBackendAPIException, r'Failed to unregister Logical Disk from' r' Logical Disk Set \(iSM31064\)'): self.mock_object(self._cli, 'delldsetld', return_value=(False, 'iSM31064')) self._iscsi_terminate_connection(vol, connector) def test_fc_initialize_connection(self): ctx = context.RequestContext('admin', 'fake', True) vol = fake_volume_obj(ctx, id='46045673-41e7-44a7-9333-02f07feab04b') connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"], 'host': 'DummyHost'} attachment = { 'id': constants.ATTACHMENT_ID, 'volume_id': vol.id, 'connector': connector } attach_object = volume_attachment.VolumeAttachment(**attachment) attachment = volume_attachment.VolumeAttachmentList( objects=[attach_object]) vol.volume_attachment = attachment info = self._fc_initialize_connection(vol, connector) self.assertEqual('fibre_channel', info['driver_volume_type']) self.assertEqual('2100000991020012', info['data']['target_wwn'][0]) self.assertEqual('2200000991020012', info['data']['target_wwn'][1]) self.assertEqual('2900000991020012', info['data']['target_wwn'][2]) self.assertEqual('2A00000991020012', info['data']['target_wwn'][3]) self.assertEqual( '2100000991020012', info['data']['initiator_target_map']['10000090FAA0786A'][0]) self.assertEqual( '2100000991020012', info['data']['initiator_target_map']['10000090FAA0786B'][0]) self.assertEqual( '2200000991020012', info['data']['initiator_target_map']['10000090FAA0786A'][1]) self.assertEqual( '2200000991020012', info['data']['initiator_target_map']['10000090FAA0786B'][1]) self.assertEqual( '2900000991020012', info['data']['initiator_target_map']['10000090FAA0786A'][2]) self.assertEqual( '2900000991020012', info['data']['initiator_target_map']['10000090FAA0786B'][2]) self.assertEqual( '2A00000991020012', info['data']['initiator_target_map']['10000090FAA0786A'][3]) self.assertEqual( '2A00000991020012', info['data']['initiator_target_map']['10000090FAA0786B'][3]) with self.assertRaisesRegex(exception.VolumeBackendAPIException, r'Failed to unregister Logical Disk from' r' Logical Disk Set \(iSM31064\)'): self.mock_object(self._cli, 'delldsetld', return_value=(False, 'iSM31064')) self._fc_terminate_connection(vol, connector) ctx = context.RequestContext('admin', 'fake', True) vol = fake_volume_obj(ctx, id='46045673-41e7-44a7-9333-02f07feab04b') attachment = { 'id': constants.ATTACHMENT_ID, 'volume_id': vol.id, 'connector': connector } attach_object = volume_attachment.VolumeAttachment(**attachment) attachment = volume_attachment.VolumeAttachmentList( objects=[attach_object]) vol.volume_attachment = attachment with mock.patch.object(self._cli, 'delldsetld', return_value=(True, '') ) as delldsetld_mock: self._fc_terminate_connection(vol, connector) delldsetld_mock.assert_called_once_with( 'LX:OpenStack1', 'LX:287RbQoP7VdwR1WsPC2fZT') attachment1 = { 'id': constants.ATTACHMENT_ID, 'volume_id': vol.id, 'connector': connector } attachment2 = { 'id': constants.ATTACHMENT2_ID, 'volume_id': vol.id, 'connector': connector } attach_object1 = volume_attachment.VolumeAttachment(**attachment1) attach_object2 = volume_attachment.VolumeAttachment(**attachment2) attachments = volume_attachment.VolumeAttachmentList( objects=[attach_object1, attach_object2]) vol.volume_attachment = attachments with mock.patch.object(self._cli, 'delldsetld', return_value=(True, '') ) as delldsetld_mock: self._fc_terminate_connection(vol, connector) delldsetld_mock.assert_not_called() vol = fake_volume_obj(ctx, id='ccd662e5-2efe-4899-b12f-114b5cad81c3') connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"], 'host': 'HostA'} atchmnt = { 'id': constants.ATTACHMENT_ID, 'volume_id': vol.id, 'connector': connector } attach_object = volume_attachment.VolumeAttachment(**atchmnt) attachment = volume_attachment.VolumeAttachmentList( objects=[attach_object]) vol.volume_attachment = attachment info = self._fc_initialize_connection(vol, connector) self.assertEqual(2, info['data']['target_lun']) connector = {'wwpns': ["10000090FAA0786C", "10000090FAA0786D"], 'host': 'HostB'} atchmnt = { 'id': constants.ATTACHMENT_ID, 'volume_id': vol.id, 'connector': connector } attach_object = volume_attachment.VolumeAttachment(**atchmnt) attachment = volume_attachment.VolumeAttachmentList( objects=[attach_object]) vol.volume_attachment = attachment info = self._fc_initialize_connection(vol, connector) self.assertEqual(1, info['data']['target_lun']) def test_fc_terminate_connection(self): ctx = context.RequestContext('admin', 'fake', True) vol = fake_volume_obj(ctx, id='46045673-41e7-44a7-9333-02f07feab04b') connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"], 'host': 'DummyHost'} attachment = { 'id': constants.ATTACHMENT_ID, 'volume_id': vol.id, 'connector': connector } attach_object = volume_attachment.VolumeAttachment(**attachment) attachment = volume_attachment.VolumeAttachmentList( objects=[attach_object]) vol.volume_attachment = attachment info = self._fc_terminate_connection(vol, connector) self.assertEqual('fibre_channel', info['driver_volume_type']) self.assertEqual('2100000991020012', info['data']['target_wwn'][0]) self.assertEqual('2200000991020012', info['data']['target_wwn'][1]) self.assertEqual('2900000991020012', info['data']['target_wwn'][2]) self.assertEqual('2A00000991020012', info['data']['target_wwn'][3]) self.assertEqual( '2100000991020012', info['data']['initiator_target_map']['10000090FAA0786A'][0]) self.assertEqual( '2100000991020012', info['data']['initiator_target_map']['10000090FAA0786B'][0]) self.assertEqual( '2200000991020012', info['data']['initiator_target_map']['10000090FAA0786A'][1]) self.assertEqual( '2200000991020012', info['data']['initiator_target_map']['10000090FAA0786B'][1]) self.assertEqual( '2900000991020012', info['data']['initiator_target_map']['10000090FAA0786A'][2]) self.assertEqual( '2900000991020012', info['data']['initiator_target_map']['10000090FAA0786B'][2]) self.assertEqual( '2A00000991020012', info['data']['initiator_target_map']['10000090FAA0786A'][3]) self.assertEqual( '2A00000991020012', info['data']['initiator_target_map']['10000090FAA0786B'][3]) info = self._fc_terminate_connection(vol, None) self.assertEqual('fibre_channel', info['driver_volume_type']) self.assertEqual({}, info['data']) def test_is_multi_attachment(self): ctx = context.RequestContext('admin', 'fake', True) vol = fake_volume_obj(ctx, id=constants.VOLUME_ID) connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"], 'host': 'DummyHost'} attachment1 = { 'id': constants.ATTACHMENT_ID, 'volume_id': vol.id, 'connector': connector } attachment2 = { 'id': constants.ATTACHMENT2_ID, 'volume_id': vol.id, 'connector': connector } attach_object1 = volume_attachment.VolumeAttachment(**attachment1) attach_object2 = volume_attachment.VolumeAttachment(**attachment2) attachments = volume_attachment.VolumeAttachmentList( objects=[attach_object1, attach_object2]) vol.volume_attachment = attachments ret = self._is_multi_attachment(vol, connector) self.assertTrue(ret) attachments = volume_attachment.VolumeAttachmentList( objects=[attach_object1]) vol.volume_attachment = attachments ret = self._is_multi_attachment(vol, connector) self.assertFalse(ret) vol.volume_attachment = None ret = self._is_multi_attachment(vol, connector) self.assertFalse(ret) class DeleteDSVVolume_test(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(DeleteDSVVolume_test, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, '_execute', return_value=('success', 0, 0)) self.mock_object(self._cli, 'view_all', return_value=xml_out) self.do_setup(None) def test_delete_snapshot(self): self.mock_object(self._cli, 'query_BV_SV_status', return_value='snap/active') snap = DummySnapshot(constants.SNAPSHOT_ID) snap.volume_id = constants.VOLUME_ID ret = self.delete_snapshot(snap) self.assertIsNone(ret) class NonDisruptiveBackup_test(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(NonDisruptiveBackup_test, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, '_execute', return_value=('success', 0, 0)) self.mock_object(self._cli, 'view_all', return_value=xml_out) self.mock_object(self._cli, 'query_BV_SV_status', return_value='snap/active') self.do_setup(None) self.xml = xml_out (self.pools, self.lds, self.ldsets, self.used_ldns, self.hostports, self.max_ld_count) = self.configs(self.xml) def test_validate_ld_exist(self): vol = DummyVolume("46045673-41e7-44a7-9333-02f07feab04b") ldname = self._validate_ld_exist( self.lds, vol.id, self._properties['ld_name_format']) self.assertEqual('LX:287RbQoP7VdwR1WsPC2fZT', ldname) vol.id = "00000000-0000-0000-0000-6b6d96553b4b" with self.assertRaisesRegex(exception.NotFound, 'Logical Disk `LX:XXXXXXXX`' ' could not be found.'): self._validate_ld_exist( self.lds, vol.id, self._properties['ld_name_format']) def test_validate_iscsildset_exist(self): connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"} ldset = self._validate_iscsildset_exist(self.ldsets, connector) self.assertEqual('LX:OpenStack0', ldset['ldsetname']) connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f232XX"} mock_data = {'ldsetname': 'LX:redhatd1d8e8f23', 'protocol': 'iSCSI', 'mode': 'Multi-Target', 'portal_list': ['1.1.1.1:3260', '2.2.2.2:3260'], 'lds': {}, 'initiator_list': ['iqn.1994-05.com.redhat:d1d8e8f232XX']} mock_ldset = {} mock_ldset['LX:redhatd1d8e8f23'] = mock_data self.mock_object( self, 'configs', return_value=(None, None, mock_ldset, None, None, None)) ldset = self._validate_iscsildset_exist(self.ldsets, connector) self.assertEqual('LX:redhatd1d8e8f23', ldset['ldsetname']) self.assertEqual('iqn.1994-05.com.redhat:d1d8e8f232XX', ldset['initiator_list'][0]) def test_validate_fcldset_exist(self): connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]} ldset = self._validate_fcldset_exist(self.ldsets, connector) self.assertEqual('LX:OpenStack1', ldset['ldsetname']) connector = {'wwpns': ["10000090FAA0786X", "10000090FAA0786Y"]} mock_data = {'ldsetname': 'LX:10000090FAA0786X', 'lds': {}, 'protocol': 'FC', 'wwpn': ["1000-0090-FAA0-786X", "1000-0090-FAA0-786Y"], 'port': []} mock_ldset = {} mock_ldset['LX:10000090FAA0786X'] = mock_data self.mock_object( self, 'configs', return_value=(None, None, mock_ldset, None, None, None)) ldset = self._validate_fcldset_exist(self.ldsets, connector) self.assertEqual('LX:10000090FAA0786X', ldset['ldsetname']) self.assertEqual('1000-0090-FAA0-786X', ldset['wwpn'][0]) self.assertEqual('1000-0090-FAA0-786Y', ldset['wwpn'][1]) def test_enumerate_iscsi_portals(self): connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"} ldset = self._validate_iscsildset_exist(self.ldsets, connector) self.assertEqual('LX:OpenStack0', ldset['ldsetname']) portal = self._enumerate_iscsi_portals(self.hostports, ldset) self.assertEqual('192.168.1.90:3260', portal[0]) self.assertEqual('192.168.1.91:3260', portal[1]) self.assertEqual('192.168.2.92:3260', portal[2]) self.assertEqual('192.168.2.93:3260', portal[3]) def test_initialize_connection_snapshot(self): snap = DummySnapshot('46045673-41e7-44a7-9333-02f07feab04b') snap.volume_id = "92dbc7f4-dbc3-4a87-aef4-d5a2ada3a9af" connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255", 'multipath': True} ret = self.iscsi_initialize_connection_snapshot(snap, connector) self.assertIsNotNone(ret) self.assertEqual('iscsi', ret['driver_volume_type']) connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]} ret = self.fc_initialize_connection_snapshot(snap, connector) self.assertIsNotNone(ret) self.assertEqual('fibre_channel', ret['driver_volume_type']) ldset_lds0 = {'ldsetname': 'LX:OpenStack1', 'lds': {}, 'protocol': 'FC', 'wwpn': ['1000-0090-FAA0-786A', '1000-0090-FAA0-786B'], 'port': []} ldset_lds1 = {'ldsetname': 'LX:OpenStack1', 'lds': {16: {'ldn': 16, 'lun': 0}}, 'protocol': 'FC', 'wwpn': ['1000-0090-FAA0-786A', '1000-0090-FAA0-786B'], 'port': []} ldset_lds2 = {'ldsetname': 'LX:OpenStack1', 'lds': {6: {'ldn': 6, 'lun': 1}}, 'protocol': 'FC', 'wwpn': ['1000-0090-FAA0-786A', '1000-0090-FAA0-786B'], 'port': []} return_ldset = [ldset_lds0, ldset_lds1, ldset_lds2] self.mock_object(self, '_validate_fcldset_exist', side_effect=return_ldset) mocker = self.mock_object(self._cli, 'addldsetld', mock.Mock(wraps=self._cli.addldsetld)) connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]} ret = self.fc_initialize_connection_snapshot(snap, connector) self.assertIsNotNone(ret) self.assertEqual('fibre_channel', ret['driver_volume_type']) mocker.assert_any_call('LX:OpenStack1', 'LX:__ControlVolume_10h', 0) mocker.assert_any_call('LX:OpenStack1', 'LX:287RbQoP7VdwR1WsPC2fZT_l', 1) def test_terminate_connection_snapshot(self): ctx = context.RequestContext('admin', 'fake', True) snap = fake_volume_obj(ctx, id="46045673-41e7-44a7-9333-02f07feab04b") connector = {'initiator': 'iqn.1994-05.com.redhat:d1d8e8f23255', 'host': 'DummyHost'} attachment = { 'id': constants.ATTACHMENT_ID, 'volume_id': snap.id, 'connector': connector } attach_object = volume_attachment.VolumeAttachment(**attachment) attachment = volume_attachment.VolumeAttachmentList( objects=[attach_object]) snap.volume_attachment = attachment self.iscsi_terminate_connection_snapshot(snap, connector) connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"], 'host': 'DummyHost'} attachment = { 'id': constants.ATTACHMENT_ID, 'volume_id': snap.id, 'connector': connector } attach_object = volume_attachment.VolumeAttachment(**attachment) attachment = volume_attachment.VolumeAttachmentList( objects=[attach_object]) snap.volume_attachment = attachment mocker = self.mock_object(self, '_is_multi_attachment', mock.Mock(wraps=self._is_multi_attachment)) ret = self.fc_terminate_connection_snapshot(snap, connector, is_snapshot=True) self.assertEqual('fibre_channel', ret['driver_volume_type']) mocker.assert_not_called() def test_remove_export_snapshot(self): snap = DummySnapshot('46045673-41e7-44a7-9333-02f07feab04b') self.remove_export_snapshot(None, snap) def test_backup_use_temp_snapshot(self): ret = self.backup_use_temp_snapshot() self.assertTrue(ret) class VolumeStats_test(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(VolumeStats_test, self).setUp() self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.do_setup(None) self._properties['cli_fip'] = '10.0.0.1' self._properties['pool_pools'] = {0, 1} self._properties['pool_backup_pools'] = {2, 3} self.VERSION = '9.99.9' def test_update_volume_status(self): self.mock_object(volume_common.MStorageVolumeCommon, 'parse_xml', side_effect=Exception) stats = self._update_volume_status() self.assertEqual('dummy', stats.get('volume_backend_name')) self.assertEqual('NEC', stats.get('vendor_name')) self.assertEqual(self.VERSION, stats.get('driver_version')) self.assertEqual('10.0.0.1', stats.get('location_info').split(':')[0]) self.assertEqual('0,1', stats.get('location_info').split(':')[1]) class GetFreeLun_test(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(GetFreeLun_test, self).setUp() self.do_setup(None) def test_get_free_lun_iscsi_multi(self): ldset = {'protocol': 'iSCSI', 'mode': 'Multi-Target', 'lds': {}} target_lun = self._get_free_lun(ldset) self.assertIsNone(target_lun) def test_get_free_lun_iscsi_lun0(self): ldset = {'protocol': 'iSCSI', 'mode': 'Normal', 'lds': {}} target_lun = self._get_free_lun(ldset) self.assertEqual(0, target_lun) def test_get_free_lun_iscsi_lun2(self): ld0 = {'lun': 0} ld1 = {'lun': 1} ld3 = {'lun': 3} ldsetlds = {} ldsetlds[0] = ld0 ldsetlds[1] = ld1 ldsetlds[3] = ld3 ldset = {'protocol': 'iSCSI', 'mode': 'Normal', 'lds': ldsetlds} target_lun = self._get_free_lun(ldset) self.assertEqual(2, target_lun) def test_get_free_lun_fc_lun1(self): ld0 = {'lun': 0} ldsetlds = {} ldsetlds[0] = ld0 ldset = {'lds': ldsetlds, 'protocol': 'FC'} target_lun = self._get_free_lun(ldset) self.assertEqual(1, target_lun) class Migrate_test(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(Migrate_test, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, '_execute', return_value=('success', 0, 0)) self.mock_object(self._cli, 'view_all', return_value=xml_out) self.mock_object(self, '_bind_ld', return_value=(0, 0, 0)) self.mock_object(self._cli, 'backup_restore') self.mock_object(volume_types, 'get_volume_type', return_value={}) self.mock_object(qos_specs, 'get_qos_specs', return_value={}) self.do_setup(None) self._properties['cli_fip'] = '10.0.0.1' self._properties['pool_pools'] = {0, 1} self._properties['pool_backup_pools'] = {2, 3} self.newvol = DummyVolume(constants.VOLUME_ID) self.sourcevol = DummyVolume(constants.VOLUME2_ID) self.host = {} self.VERSION = '9.99.9' self.host['capabilities'] = self._update_volume_status() self.xml = xml_out def test_update_migrate_volume(self): newvol = DummyVolume(constants.VOLUME_ID) sourcevol = DummyVolume(constants.VOLUME2_ID) update_data = self.update_migrated_volume(None, sourcevol, newvol, 'available') self.assertIsNone(update_data['_name_id']) self.assertIsNone(update_data['provider_location']) def test_migrate_volume(self): vol = DummyVolume(constants.VOLUME2_ID) vol.status = 'available' moved, __ = self.migrate_volume(None, vol, self.host) self.assertTrue(moved) vol = DummyVolume(constants.VOLUME2_ID) vol.status = 'in-use' moved, __ = self.migrate_volume(None, vol, self.host) self.assertFalse(moved) vol.id = "87d8d42f-7550-4f43-9a2b-fe722bf86941" with self.assertRaisesRegex(exception.NotFound, 'Logical Disk `LX:48L3QCi4npuqxPX0Lyeu8H`' ' could not be found.'): self._validate_migrate_volume(vol, xml_out) vol.id = '46045673-41e7-44a7-9333-02f07feab04b' vol.status = 'creating' moved, __ = self.migrate_volume(None, vol, self.host) self.assertFalse(moved) vol.id = "92dbc7f4-dbc3-4a87-aef4-d5a2ada3a9af" vol.status = 'available' with self.assertRaisesRegex(exception.VolumeBackendAPIException, r'Specified Logical Disk ' r'LX:4T7JpyqI3UuPlKeT9D3VQF has an ' r'invalid attribute ' r'\(\(invalid attribute\)\).'): self._validate_migrate_volume(vol, xml_out) def test_retype_volume(self): vol = DummyVolume(constants.VOLUME2_ID) diff = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {u'volume_backend_name': (u'Storage1', u'Storage2')}} new_type = {'id': constants.VOLUME_TYPE_ID} retyped = self.retype(None, vol, new_type, diff, self.host) self.assertTrue(retyped) volume_type = {'name': u'Bronze', 'qos_specs_id': u'57223246-1d49-4565-860f-bbbee6cee122', 'deleted': False, 'created_at': '2019-01-08 08:48:20', 'updated_at': '2019-01-08 08:48:29', 'extra_specs': {}, 'is_public': True, 'deleted_at': None, 'id': u'33cd6136-0465-4ee0-82fa-b5f3a9138249', 'description': None} specs = {'specs': {u'lowerlimit': u'500', u'upperlimit': u'2000'}} volume_types.get_volume_type.return_value = volume_type qos_specs.get_qos_specs.return_value = specs diff = {'encryption': {}, 'qos_specs': {'consumer': (u'back-end', u'back-end'), u'lowerlimit': (u'1000', u'500'), u'upperlimit': (u'3000', u'2000')}, 'extra_specs': {u'volume_backend_name': (u'Storage', None)}} retyped = self.retype(None, vol, new_type, diff, self.host) self.assertTrue(retyped) diff = {'encryption': {}, 'qos_specs': {'consumer': (u'back-end', None), u'lowerlimit': (u'1000', u'500'), u'upperlimit': (u'3000', u'2000')}, 'extra_specs': {}} retyped = self.retype(None, vol, new_type, diff, self.host) self.assertTrue(retyped) vol.attach_status = 'attached' diff = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {u'volume_backend_name': (u'Storage1', u'Storage2')}} retyped = self.retype(None, vol, new_type, diff, self.host) self.assertFalse(retyped) def test_validate_retype_volume(self): vol = DummyVolume("87d8d42f-7550-4f43-9a2b-fe722bf86941") with self.assertRaisesRegex(exception.NotFound, 'Logical Disk `LX:48L3QCi4npuqxPX0Lyeu8H`' ' could not be found.'): self._validate_retype_volume(vol, xml_out) vol = DummyVolume("92dbc7f4-dbc3-4a87-aef4-d5a2ada3a9af") with self.assertRaisesRegex(exception.VolumeBackendAPIException, r'Specified Logical Disk ' r'LX:4T7JpyqI3UuPlKeT9D3VQF has an ' r'invalid attribute ' r'\(\(invalid attribute\)\).'): self._validate_retype_volume(vol, xml_out) def test_spec_is_changed(self): extra_specs = {u'volume_backend_name': (u'Storage', None)} equal = self._spec_is_changed(extra_specs, 'volume_backend_name') self.assertTrue(equal) extra_specs = {u'volume_backend_name': (u'Storage', u'Storage')} equal = self._spec_is_changed(extra_specs, 'volume_backend_name') self.assertFalse(equal) def test_check_same_backend(self): diff = {'encryption': {}, 'qos_specs': {'consumer': (u'back-end', u'back-end'), u'upperlimit': (u'3000', u'2000'), u'lowerlimit': (u'1000', u'500')}, 'extra_specs': {u'volume_backend_name': (u'Storage', None)}} qos = self._check_same_backend(diff) self.assertFalse(qos) diff['extra_specs'] = {u'volume_backend_name': (u'Storage', u'Storage')} qos = self._check_same_backend(diff) self.assertTrue(qos) diff['extra_specs'] = {u'volume_backend_name': (u'Storage', None), u'dummy_specs': None} qos = self._check_same_backend(diff) self.assertFalse(qos) class ManageUnmanage_test(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(ManageUnmanage_test, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, 'view_all', return_value=xml_out) self.do_setup(None) self._properties['pool_pools'] = {0} self._properties['pool_backup_pools'] = {1} def test_is_manageable_volume(self): ld_ok_iv = {'pool_num': 0, 'RPL Attribute': 'IV', 'Purpose': '---'} ld_ok_bv = {'pool_num': 0, 'RPL Attribute': 'BV', 'Purpose': 'INV'} ld_ng_pool = {'pool_num': 1, 'RPL Attribute': 'IV', 'Purpose': '---'} ld_ng_rpl1 = {'pool_num': 0, 'RPL Attribute': 'MV', 'Purpose': 'INV'} ld_ng_rpl2 = {'pool_num': 0, 'RPL Attribute': 'RV', 'Purpose': 'INV'} ld_ng_rpl3 = {'pool_num': 0, 'RPL Attribute': 'SV', 'Purpose': 'INV'} ld_ng_purp = {'pool_num': 0, 'RPL Attribute': 'IV', 'Purpose': 'INV'} self.assertTrue(self._is_manageable_volume(ld_ok_iv)) self.assertTrue(self._is_manageable_volume(ld_ok_bv)) self.assertFalse(self._is_manageable_volume(ld_ng_pool)) self.assertFalse(self._is_manageable_volume(ld_ng_rpl1)) self.assertFalse(self._is_manageable_volume(ld_ng_rpl2)) self.assertFalse(self._is_manageable_volume(ld_ng_rpl3)) self.assertFalse(self._is_manageable_volume(ld_ng_purp)) def test_get_manageable_volumes(self): current_volumes = [] volumes = self.get_manageable_volumes(current_volumes, None, 100, 0, ['reference'], ['dec']) self.assertEqual('LX:287RbQoP7VdwR1WsPC2fZT', volumes[2]['reference']['source-name']) current_volumes = [] volumes = self.get_manageable_volumes(current_volumes, None, 100, 0, ['reference'], ['asc']) self.assertEqual(' :2000000991020012000A', volumes[0]['reference']['source-name']) self.assertEqual(10, len(volumes)) volume = {'id': '46045673-41e7-44a7-9333-02f07feab04b'} current_volumes = [] current_volumes.append(volume) volumes = self.get_manageable_volumes(current_volumes, None, 100, 0, ['reference'], ['dec']) self.assertFalse(volumes[2]['safe_to_manage']) self.assertFalse(volumes[3]['safe_to_manage']) self.assertTrue(volumes[4]['safe_to_manage']) def test_manage_existing(self): self.mock_object(self._cli, 'changeldname') current_volumes = [] volumes = self.get_manageable_volumes(current_volumes, None, 100, 0, ['reference'], ['dec']) newvol = DummyVolume(constants.VOLUME_ID) self.manage_existing(newvol, volumes[4]['reference']) self._cli.changeldname.assert_called_once_with( None, 'LX:vD03hJCiHvGpvP4iSevKk', ' :20000009910200140009') with self.assertRaisesRegex(exception.ManageExistingInvalidReference, 'Specified resource is already in-use.'): self.manage_existing(newvol, volumes[3]['reference']) volume = {'source-name': 'LX:yEUHrXa5AHMjOZZLb93eP'} with self.assertRaisesRegex(exception.ManageExistingVolumeTypeMismatch, 'Volume type is unmatched.'): self.manage_existing(newvol, volume) def test_manage_existing_get_size(self): current_volumes = [] volumes = self.get_manageable_volumes(current_volumes, None, 100, 0, ['reference'], ['dec']) newvol = DummyVolume(constants.VOLUME_ID) size_in_gb = self.manage_existing_get_size(newvol, volumes[3]['reference']) self.assertEqual(10, size_in_gb) class ManageUnmanage_Snap_test(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(ManageUnmanage_Snap_test, self).setUp() self.mock_object(self, '_create_ismview_dir') self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, 'view_all', return_value=xml_out) self.do_setup(None) self._properties['pool_pools'] = {0} self._properties['pool_backup_pools'] = {1} def test_is_manageable_snapshot(self): ld_ok_sv1 = {'pool_num': 1, 'RPL Attribute': 'SV', 'Purpose': 'INV'} ld_ok_sv2 = {'pool_num': 1, 'RPL Attribute': 'SV', 'Purpose': '---'} ld_ng_pool = {'pool_num': 0, 'RPL Attribute': 'SV', 'Purpose': 'INV'} ld_ng_rpl1 = {'pool_num': 1, 'RPL Attribute': 'MV', 'Purpose': 'INV'} ld_ng_rpl2 = {'pool_num': 1, 'RPL Attribute': 'RV', 'Purpose': 'INV'} ld_ng_rpl3 = {'pool_num': 1, 'RPL Attribute': 'IV', 'Purpose': '---'} ld_ng_rpl4 = {'pool_num': 1, 'RPL Attribute': 'BV', 'Purpose': 'INV'} self.assertTrue(self._is_manageable_snapshot(ld_ok_sv1)) self.assertTrue(self._is_manageable_snapshot(ld_ok_sv2)) self.assertFalse(self._is_manageable_snapshot(ld_ng_pool)) self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl1)) self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl2)) self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl3)) self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl4)) def test_get_manageable_snapshots(self): self.mock_object(self._cli, 'get_bvname', return_value='yEUHrXa5AHMjOZZLb93eP') current_snapshots = [] volumes = self.get_manageable_snapshots(current_snapshots, None, 100, 0, ['reference'], ['asc']) self.assertEqual('LX:4T7JpyqI3UuPlKeT9D3VQF', volumes[0]['reference']['source-name']) def test_manage_existing_snapshot(self): self.mock_object(self._cli, 'changeldname') self.mock_object(self._cli, 'get_bvname', return_value='yEUHrXa5AHMjOZZLb93eP') current_snapshots = [] snaps = self.get_manageable_snapshots(current_snapshots, None, 100, 0, ['reference'], ['asc']) newsnap = DummySnapshot('46045673-41e7-44a7-9333-02f07feab04b') newsnap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d" self.manage_existing_snapshot(newsnap, snaps[0]['reference']) self._cli.changeldname.assert_called_once_with( None, 'LX:287RbQoP7VdwR1WsPC2fZT', 'LX:4T7JpyqI3UuPlKeT9D3VQF') newsnap.volume_id = "AAAAAAAA" with self.assertRaisesRegex(exception.ManageExistingInvalidReference, 'Snapshot source is unmatch.'): self.manage_existing_snapshot(newsnap, snaps[0]['reference']) self._cli.get_bvname.return_value = "2000000991020012000C" newsnap.volume_id = "00046058-d38e-7f60-67b7-59ed6422520c" snap = {'source-name': ' :2000000991020012000B'} with self.assertRaisesRegex(exception.ManageExistingVolumeTypeMismatch, 'Volume type is unmatched.'): self.manage_existing_snapshot(newsnap, snap) def test_manage_existing_snapshot_get_size(self): self.mock_object(self._cli, 'get_bvname', return_value='yEUHrXa5AHMjOZZLb93eP') current_snapshots = [] snaps = self.get_manageable_snapshots(current_snapshots, None, 100, 0, ['reference'], ['asc']) newsnap = DummySnapshot('46045673-41e7-44a7-9333-02f07feab04b') newsnap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d" size_in_gb = self.manage_existing_snapshot_get_size( newsnap, snaps[0]['reference']) self.assertEqual(6, size_in_gb) class RevertToSnapshotTestCase(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(RevertToSnapshotTestCase, self).setUp() self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.do_setup(None) self.mock_object(self._cli, 'view_all', return_value=xml_out) def test_revert_to_snapshot(self): vol = DummyVolume("1febb976-86d0-42ed-9bc0-4aa3e158f27d") snap = DummySnapshot("63410c76-2f12-4473-873d-74a63dfcd3e2") self.mock_object(time, 'sleep') self.mock_object(self._cli, '_execute', return_value=('success', 0, 0)) self.mock_object(self._cli, 'query_BV_SV_status', return_value='snap/active') self.revert_to_snapshot(None, vol, snap) self._cli._execute.assert_called_once_with( 'iSMsc_restore -bv yEUHrXa5AHMjOZZLb93eP -bvflg ld ' '-sv 31HxzqBiAFTUxxOlcVn3EA -svflg ld -derivsv keep -nowait') vol.id = constants.VOLUME_ID with self.assertRaisesRegex(exception.NotFound, 'Logical Disk `LX:vD03hJCiHvGpvP4iSevKk` ' 'has unbound already.'): self.revert_to_snapshot(None, vol, snap) vol.id = '1febb976-86d0-42ed-9bc0-4aa3e158f27d' snap.id = constants.SNAPSHOT_ID with self.assertRaisesRegex(exception.NotFound, 'Logical Disk `LX:18FkaTGqa43xSFL8aX4A2N` ' 'has unbound already.'): self.revert_to_snapshot(None, vol, snap) snap.id = '63410c76-2f12-4473-873d-74a63dfcd3e2' self.mock_object(self._cli, 'query_BV_SV_status', return_value='rst/exec') with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'The snapshot does not exist or is ' 'not in snap/active status. ' 'bvname=LX:yEUHrXa5AHMjOZZLb93eP, ' 'svname=LX:31HxzqBiAFTUxxOlcVn3EA, ' 'status=rst/exec'): self.revert_to_snapshot(None, vol, snap) return_status = ['snap/active', 'rst/exec', 'snap/active'] self.mock_object(self._cli, 'query_BV_SV_status', side_effect=return_status) self.revert_to_snapshot(None, vol, snap) return_status = ['snap/active', 'rst/exec', 'snap/fault'] self.mock_object(self._cli, 'query_BV_SV_status', side_effect=return_status) with self.assertRaisesRegex(exception.VolumeBackendAPIException, 'Failed to restore from snapshot. ' 'bvname=LX:yEUHrXa5AHMjOZZLb93eP, ' 'svname=LX:31HxzqBiAFTUxxOlcVn3EA, ' 'status=snap/fault'): self.revert_to_snapshot(None, vol, snap) class SetQosSpec_test(volume_helper.MStorageDSVDriver, test.TestCase): def setUp(self): super(SetQosSpec_test, self).setUp() self._set_config(conf.Configuration(None), 'dummy', 'dummy') self.mock_object(self._cli, '_execute', return_value=('success', 0, 0)) self.do_setup(None) def test_set_qos_spec(self): volume_type = {'name': u'Bronze', 'qos_specs_id': u'57223246-1d49-4565-860f-bbbee6cee122', 'deleted': False, 'created_at': '2019-01-08 08:48:20', 'updated_at': '2019-01-08 08:48:29', 'extra_specs': {}, 'is_public': True, 'deleted_at': None, 'id': u'33cd6136-0465-4ee0-82fa-b5f3a9138249', 'description': None} voltype_qos_specs = {'specs': {u'lowerlimit': u'500', u'upperlimit': u'2000', 'upperreport': None}} self.mock_object(volume_types, 'get_volume_type', return_value=volume_type) self.mock_object(qos_specs, 'get_qos_specs', return_value=voltype_qos_specs) ldname = 'LX:287RbQoP7VdwR1WsPC2fZT' volume_type_id = '33cd6136-0465-4ee0-82fa-b5f3a9138249' ret = self._set_qos_spec(ldname, volume_type_id) self.assertIsNone(ret) def test_get_qos_parameters(self): specs = {} qos_params = self.get_qos_parameters(specs, True) self.assertEqual(0, qos_params['upperlimit']) self.assertEqual(0, qos_params['lowerlimit']) self.assertEqual('off', qos_params['upperreport']) specs = {} qos_params = self.get_qos_parameters(specs, False) self.assertIsNone(qos_params['upperlimit']) self.assertIsNone(qos_params['lowerlimit']) self.assertIsNone(qos_params['upperreport']) specs = {u'upperlimit': u'1000', u'lowerlimit': u'500', u'upperreport': u'off'} qos_params = self.get_qos_parameters(specs, False) self.assertEqual(1000, qos_params['upperlimit']) self.assertEqual(500, qos_params['lowerlimit']) self.assertEqual('off', qos_params['upperreport']) specs = {u'upperreport': u'on'} qos_params = self.get_qos_parameters(specs, False) self.assertIsNone(qos_params['upperlimit']) self.assertIsNone(qos_params['lowerlimit']) self.assertEqual('on', qos_params['upperreport']) specs = {u'upperreport': u'aaa'} qos_params = self.get_qos_parameters(specs, False) self.assertIsNone(qos_params['upperlimit']) self.assertIsNone(qos_params['lowerlimit']) self.assertIsNone(qos_params['upperreport']) specs = {u'upperlimit': u'1000001', u'lowerlimit': u'500'} with self.assertRaisesRegex(exception.InvalidConfigurationValue, 'Value "1000001" is not valid for ' 'configuration option "upperlimit"'): self.get_qos_parameters(specs, False) specs = {u'upperlimit': u'aaa', u'lowerlimit': u'500'} with self.assertRaisesRegex(exception.InvalidConfigurationValue, 'Value "aaa" is not valid for ' 'configuration option "upperlimit"'): self.get_qos_parameters(specs, False) specs = {u'upperlimit': u'1000', u'lowerlimit': u'aaa'} with self.assertRaisesRegex(exception.InvalidConfigurationValue, 'Value "aaa" is not valid for ' 'configuration option "lowerlimit"'): self.get_qos_parameters(specs, False) specs = {u'upperlimit': u'1000', u'lowerlimit': u'1'} with self.assertRaisesRegex(exception.InvalidConfigurationValue, 'Value "1" is not valid for ' 'configuration option "lowerlimit"'): self.get_qos_parameters(specs, False) ././@PaxHeader0000000000000000000000000000003200000000000011450 xustar000000000000000026 mtime=1759315577.28712 cinder-27.0.0/cinder/tests/unit/volume/drivers/nec/v/0000775000175000017500000000000000000000000022453 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nec/v/__init__.py0000664000175000017500000000000000000000000024552 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py0000664000175000017500000015256700000000000030252 0ustar00zuulzuul00000000000000# Copyright (C) 2021, 2024, NEC corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for NEC Driver.""" import functools from unittest import mock from oslo_config import cfg import requests from requests import models from cinder import context as cinder_context from cinder import db from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder.objects import group_snapshot as obj_group_snap from cinder.objects import snapshot as obj_snap from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.nec.v import nec_v_fc from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils # Configuration parameter values CONFIG_MAP = { 'serial': '886000123456', 'my_ip': '127.0.0.1', 'rest_server_ip_addr': '172.16.18.108', 'rest_server_ip_port': '23451', 'port_id': 'CL1-A', 'host_grp_name': 'NEC-0123456789abcdef', 'host_mode': 'LINUX/IRIX', 'host_wwn': '0123456789abcdef', 'target_wwn': '1111111123456789', 'user_id': 'user', 'user_pass': 'password', 'pool_name': 'test_pool', 'auth_user': 'auth_user', 'auth_password': 'auth_password', } # Dummy response for FC zoning device mapping DEVICE_MAP = { 'fabric_name': { 'initiator_port_wwn_list': [CONFIG_MAP['host_wwn']], 'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}} DEFAULT_CONNECTOR = { 'host': 'host', 'ip': CONFIG_MAP['my_ip'], 'wwpns': [CONFIG_MAP['host_wwn']], 'multipath': False, } CTXT = cinder_context.get_admin_context() TEST_VOLUME = [] for i in range(5): volume = {} volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) volume['name'] = 'test-volume{0:d}'.format(i) volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) if i == 3 or i == 4: volume['provider_location'] = None else: volume['provider_location'] = '{0:d}'.format(i) volume['size'] = 128 if i == 2: volume['status'] = 'in-use' elif i == 4: volume['status'] = None else: volume['status'] = 'available' volume = fake_volume.fake_volume_obj(CTXT, **volume) volume.volume_type = fake_volume.fake_volume_type_obj(CTXT) TEST_VOLUME.append(volume) def _volume_get(context, volume_id): """Return predefined volume info.""" return TEST_VOLUME[int(volume_id.replace("-", ""))] TEST_SNAPSHOT = [] snapshot = {} snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0) snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0) snapshot['provider_location'] = '{0:d}'.format(1) snapshot['status'] = 'available' snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) snapshot['volume'] = _volume_get(None, snapshot['volume_id']) snapshot['volume_name'] = 'test-volume{0:d}'.format(0) snapshot['volume_size'] = 128 snapshot = obj_snap.Snapshot._from_db_object( CTXT, obj_snap.Snapshot(), fake_snapshot.fake_db_snapshot(**snapshot)) TEST_SNAPSHOT.append(snapshot) TEST_GROUP = [] for i in range(2): group = {} group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i) group['status'] = 'available' group = fake_group.fake_group_obj(CTXT, **group) TEST_GROUP.append(group) TEST_GROUP_SNAP = [] group_snapshot = {} group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0) group_snapshot['status'] = 'available' group_snapshot = obj_group_snap.GroupSnapshot._from_db_object( CTXT, obj_group_snap.GroupSnapshot(), fake_group_snapshot.fake_db_group_snapshot(**group_snapshot)) TEST_GROUP_SNAP.append(group_snapshot) # Dummy response for REST API POST_SESSIONS_RESULT = { "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "sessionId": 0, } GET_PORTS_RESULT = { "data": [ { "portId": CONFIG_MAP['port_id'], "portType": "FIBRE", "portAttributes": [ "TAR", "MCU", "RCU", "ELUN" ], "fabricMode": True, "portConnection": "PtoP", "lunSecuritySetting": True, "wwn": CONFIG_MAP['target_wwn'], }, ], } GET_HOST_WWNS_RESULT = { "data": [ { "hostGroupNumber": 0, "hostWwn": CONFIG_MAP['host_wwn'], }, ], } COMPLETED_SUCCEEDED_RESULT = { "status": "Completed", "state": "Succeeded", "affectedResources": ('a/b/c/1',), } COMPLETED_FAILED_RESULT_LU_DEFINED = { "status": "Completed", "state": "Failed", "error": { "errorCode": { "SSB1": "B958", "SSB2": "015A", }, }, } GET_LDEV_RESULT = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP"], "status": "NML", "poolId": 30, "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP"], "status": "NML", "ports": [ { "portId": CONFIG_MAP['port_id'], "hostGroupNumber": 0, "hostGroupName": CONFIG_MAP['host_grp_name'], "lun": 1 }, ], } GET_LDEV_RESULT_SNAP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP"], "status": "NML", "poolId": 30, "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP", "SS"], "status": "NML", "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_PAIR_SNAP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP", "SS"], "status": "NML", "label": "10000000000000000000000000000000", } GET_SNAPSHOTS_RESULT = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PSUS", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_PAIR = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PAIR", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_BUSY = { "data": [ { "primaryOrSecondary": "P-VOL", "status": "PSUP", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_POOLS_RESULT = { "data": [ { "poolId": 30, "poolName": CONFIG_MAP['pool_name'], "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, "totalLocatedCapacity": 71453172, "virtualVolumeCapacityRate": -1, }, ], } GET_LUNS_RESULT = { "data": [ { "ldevId": 0, "lun": 1, }, ], } GET_HOST_GROUP_RESULT = { "hostGroupName": CONFIG_MAP['host_grp_name'], } GET_HOST_GROUPS_RESULT = { "data": [ { "hostGroupNumber": 0, "portId": CONFIG_MAP['port_id'], "hostGroupName": "NEC-test", }, ], } GET_LDEVS_RESULT = { "data": [ { "ldevId": 0, "label": "15960cc738c94c5bb4f1365be5eeed44", }, { "ldevId": 1, "label": "15960cc738c94c5bb4f1365be5eeed45", }, ], } NOTFOUND_RESULT = { "data": [], } ERROR_RESULT = { "errorSource": "", "message": "", "solution": "", "messageId": "", "errorCode": { "SSB1": "", "SSB2": "", } } def _brick_get_connector_properties(multipath=False, enforce_multipath=False): """Return a predefined connector object.""" return DEFAULT_CONNECTOR def reduce_retrying_time(func): @functools.wraps(func) def wrapper(*args, **kwargs): backup_lock_waittime = hbsd_rest_api._LOCK_TIMEOUT backup_exec_max_waittime = hbsd_rest_api._REST_TIMEOUT backup_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) backup_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) backup_extend_waittime = hbsd_rest_api._EXTEND_TIMEOUT backup_exec_retry_interval = hbsd_rest_api._EXEC_RETRY_INTERVAL backup_rest_server_restart_timeout = ( hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT) backup_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) hbsd_rest_api._LOCK_TIMEOUT = 0.01 hbsd_rest_api._REST_TIMEOUT = 0.01 hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT = 0.01 hbsd_rest_api._GET_API_RESPONSE_TIMEOUT = 0.01 hbsd_rest_api._EXTEND_TIMEOUT = 0.01 hbsd_rest_api._EXEC_RETRY_INTERVAL = 0.004 hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT = 0.02 hbsd_rest._STATE_TRANSITION_TIMEOUT = 0.01 func(*args, **kwargs) hbsd_rest_api._LOCK_TIMEOUT = backup_lock_waittime hbsd_rest_api._REST_TIMEOUT = backup_exec_max_waittime hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT = ( backup_job_api_response_timeout) hbsd_rest_api._GET_API_RESPONSE_TIMEOUT = ( backup_get_api_response_timeout) hbsd_rest_api._EXTEND_TIMEOUT = backup_extend_waittime hbsd_rest_api._EXEC_RETRY_INTERVAL = backup_exec_retry_interval hbsd_rest_api._REST_SERVER_RESTART_TIMEOUT = ( backup_rest_server_restart_timeout) hbsd_rest._STATE_TRANSITION_TIMEOUT = ( backup_state_transition_timeout) return wrapper class FakeLookupService(): """Dummy FC zoning mapping lookup service class.""" def get_device_mapping_from_network(self, initiator_wwns, target_wwns): """Return predefined FC zoning mapping.""" return DEVICE_MAP class FakeResponse(): def __init__(self, status_code, data=None, headers=None): self.status_code = status_code self.data = data self.text = data self.content = data self.headers = {'Content-Type': 'json'} if headers is None else headers def json(self): return self.data class VStorageRESTFCDriverTest(test.TestCase): """Unit test class for NEC REST interface fibre channel module.""" test_existing_ref = {'source-id': '1'} test_existing_ref_name = { 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} def setUp(self): """Set up the test environment.""" def _set_required(opts, required): for opt in opts: opt.required = required # Initialize Cinder and avoid checking driver options. rest_required_opts = [ opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required] common_required_opts = [ opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required] _set_required(rest_required_opts, False) _set_required(common_required_opts, False) super(VStorageRESTFCDriverTest, self).setUp() _set_required(rest_required_opts, True) _set_required(common_required_opts, True) self.configuration = mock.Mock(conf.Configuration) self.ctxt = cinder_context.get_admin_context() self._setup_config() self._setup_driver() def _setup_config(self): """Set configuration parameter values.""" self.configuration.config_group = "REST" self.configuration.volume_backend_name = "RESTFC" self.configuration.volume_driver = ( "cinder.volume.drivers.nec.v.nec_v_fc.VStorageFCDriver") self.configuration.reserved_percentage = "0" self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False self.configuration.max_over_subscription_ratio = 500.0 self.configuration.driver_ssl_cert_verify = False self.configuration.nec_v_storage_id = CONFIG_MAP['serial'] self.configuration.nec_v_pools = ["30"] self.configuration.nec_v_snap_pool = None self.configuration.nec_v_ldev_range = "0-1" self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']] self.configuration.nec_v_compute_target_ports = [ CONFIG_MAP['port_id']] self.configuration.nec_v_group_create = True self.configuration.nec_v_group_delete = True self.configuration.nec_v_copy_speed = 3 self.configuration.nec_v_copy_check_interval = 3 self.configuration.nec_v_async_copy_check_interval = 10 self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] self.configuration.san_ip = CONFIG_MAP[ 'rest_server_ip_addr'] self.configuration.san_api_port = CONFIG_MAP[ 'rest_server_ip_port'] self.configuration.nec_v_rest_disable_io_wait = True self.configuration.nec_v_rest_tcp_keepalive = True self.configuration.nec_v_discard_zero_page = True self.configuration.nec_v_rest_number = "0" self.configuration.nec_v_lun_timeout = hbsd_rest._LUN_TIMEOUT self.configuration.nec_v_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.nec_v_restore_timeout = hbsd_rest._RESTORE_TIMEOUT self.configuration.nec_v_state_transition_timeout = 2 self.configuration.nec_v_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT self.configuration.nec_v_rest_timeout = 3 self.configuration.nec_v_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.nec_v_exec_retry_interval = ( hbsd_rest_api._EXEC_RETRY_INTERVAL) self.configuration.nec_v_rest_connect_timeout = ( hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) self.configuration.nec_v_rest_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) self.configuration.nec_v_rest_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) self.configuration.nec_v_rest_server_busy_timeout = ( hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) self.configuration.nec_v_rest_keep_session_loop_interval = ( hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) self.configuration.nec_v_rest_another_ldev_mapped_retry_timeout = ( hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) self.configuration.nec_v_rest_tcp_keepidle = ( hbsd_rest_api._TCP_KEEPIDLE) self.configuration.nec_v_rest_tcp_keepintvl = ( hbsd_rest_api._TCP_KEEPINTVL) self.configuration.nec_v_rest_tcp_keepcnt = ( hbsd_rest_api._TCP_KEEPCNT) self.configuration.nec_v_host_mode_options = [] self.configuration.nec_v_zoning_request = False self.configuration.san_thin_provision = True self.configuration.san_private_key = '' self.configuration.san_clustername = '' self.configuration.san_ssh_port = '22' self.configuration.san_is_local = False self.configuration.ssh_conn_timeout = '30' self.configuration.ssh_min_pool_conn = '1' self.configuration.ssh_max_pool_conn = '5' self.configuration.use_chap_auth = True self.configuration.chap_username = CONFIG_MAP['auth_user'] self.configuration.chap_password = CONFIG_MAP['auth_password'] self.configuration.safe_get = self._fake_safe_get CONF = cfg.CONF CONF.my_ip = CONFIG_MAP['my_ip'] def _fake_safe_get(self, value): """Retrieve a configuration value avoiding throwing an exception.""" try: val = getattr(self.configuration, value) except AttributeError: val = None return val @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def _setup_driver( self, brick_get_connector_properties=None, request=None): """Set up the driver environment.""" self.driver = nec_v_fc.VStorageFCDriver( configuration=self.configuration, db=db) request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT)] self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver.local_path(None) self.driver.create_export(None, None, None) self.driver.ensure_export(None, None) self.driver.remove_export(None, None) self.driver.create_export_snapshot(None, None, None) self.driver.remove_export_snapshot(None, None) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() def tearDown(self): self.client = None super(VStorageRESTFCDriverTest, self).tearDown() # API test cases @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup(self, brick_get_connector_properties, request): drv = nec_v_fc.VStorageFCDriver( configuration=self.configuration, db=db) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(3, request.call_count) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() self.driver.common.client.keep_session_loop.wait() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_create_hg(self, brick_get_connector_properties, request): """Normal case: The host group not exists.""" drv = nec_v_fc.VStorageFCDriver( configuration=self.configuration, db=db) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(8, request.call_count) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() self.driver.common.client.keep_session_loop.wait() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_pool_name(self, brick_get_connector_properties, request): """Normal case: Specify a pool name instead of pool id""" drv = nec_v_fc.VStorageFCDriver( configuration=self.configuration) self._setup_config() tmp_pools = self.configuration.hitachi_pools self.configuration.hitachi_pools = [CONFIG_MAP['pool_name']] request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_POOLS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']}, drv.common.storage_info['wwns']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(4, request.call_count) self.configuration.hitachi_pools = tmp_pools # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() self.driver.common.client.keep_session_loop.wait() @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_volume(TEST_VOLUME[4]) self.assertEqual('1', ret['provider_location']) self.assertEqual(2, request.call_count) @reduce_retrying_time @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_timeout( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.return_value = FakeResponse( 500, ERROR_RESULT, headers={'Content-Type': 'json'}) get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, TEST_VOLUME[4]) self.assertGreater(request.call_count, 1) @mock.patch.object(requests.Session, "request") def test_delete_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume_temporary_busy(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(7, request.call_count) @reduce_retrying_time @mock.patch.object(requests.Session, "request") def test_delete_volume_busy_timeout(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT_BUSY), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_LDEV_RESULT_PAIR)] self.assertRaises(exception.VolumeDriverException, self.driver.delete_volume, TEST_VOLUME[0]) self.assertGreater(request.call_count, 2) @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) self.assertEqual(3, request.call_count) @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") @mock.patch.object(requests.Session, "request") def test__update_volume_stats( self, request, get_filter_function, get_goodness_function): request.return_value = FakeResponse(200, GET_POOLS_RESULT) get_filter_function.return_value = None get_goodness_function.return_value = None self.driver._update_volume_stats() self.assertEqual( 'NEC', self.driver._stats['vendor_name']) self.assertTrue(self.driver._stats["pools"][0]['multiattach']) self.assertEqual(1, request.call_count) self.assertEqual(1, get_filter_function.call_count) self.assertEqual(1, get_goodness_function.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(10, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot_no_pair(self, request): """Normal case: Delete a snapshot without pair.""" request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_cloned_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) self.assertEqual('1', vol['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_from_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection( self, get_volume_type_extra_specs, request, add_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(2, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection_already_mapped( self, get_volume_type_extra_specs, request, add_fc_zone): """Normal case: ldev have already mapped.""" self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() get_volume_type_extra_specs.return_value = {} request.side_effect = [ FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_FAILED_RESULT_LU_DEFINED), FakeResponse(200, GET_LUNS_RESULT), ] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(3, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection_shared_target( self, get_volume_type_extra_specs, request, add_fc_zone): """Normal case: A target shared with other systems.""" self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() get_volume_type_extra_specs.return_value = {} request.side_effect = [FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(5, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection(self, request, remove_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) self.assertEqual(5, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection_not_connector(self, request, remove_fc_zone): """Normal case: Connector is None.""" self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], None) self.assertEqual(8, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection_not_lun(self, request, remove_fc_zone): """Normal case: Lun already not exist.""" self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) self.assertEqual(2, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(fczm_utils, "add_fc_zone") @mock.patch.object(requests.Session, "request") def test_initialize_connection_snapshot(self, request, add_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual('fibre_channel', ret['driver_volume_type']) self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(2, request.call_count) self.assertEqual(1, add_fc_zone.call_count) @mock.patch.object(fczm_utils, "remove_fc_zone") @mock.patch.object(requests.Session, "request") def test_terminate_connection_snapshot(self, request, remove_fc_zone): self.driver.common.conf.hitachi_zoning_request = True self.driver.common._lookup_service = FakeLookupService() request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual(5, request.call_count) self.assertEqual(1, remove_fc_zone.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing(self, get_volume_type_qos_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] get_volume_type_qos_specs.return_value = {'qos_specs': None} ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual('1', ret['provider_location']) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing_name(self, get_volume_type_qos_specs, request): request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] get_volume_type_qos_specs.return_value = {'qos_specs': None} ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual('1', ret['provider_location']) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing_get_size( self, get_volume_type_qos_specs, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size_name(self, request): request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_unmanage(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' image_id = 'fake_image_id' request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ as mock_copy_image: self.driver.copy_image_to_volume( self.ctxt, TEST_VOLUME[0], image_service, image_id) mock_copy_image.assert_called_with( self.ctxt, TEST_VOLUME[0], image_service, image_id, disable_sparse=False) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) ret = self.driver.update_migrated_volume( self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) actual = ({'_name_id': TEST_VOLUME[1]['id'], 'provider_location': TEST_VOLUME[1]['provider_location']}) self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" self.assertRaises( NotImplementedError, self.driver.unmanage_snapshot, TEST_SNAPSHOT[0]) @mock.patch.object(requests.Session, "request") def test_retype(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) new_specs = {'nec:test': 'test'} new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id']) diff = {} host = { 'capabilities': { 'location_info': { 'pool_id': 30, }, }, } ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(1, request.call_count) self.assertTrue(ret) def test_backup_use_temp_snapshot(self): self.assertTrue(self.driver.backup_use_temp_snapshot()) @mock.patch.object(requests.Session, "request") def test_revert_to_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.revert_to_snapshot( self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual(5, request.call_count) def test_session___call__(self): session = self.driver.common.client.Session('id', 'token') req = models.Response() ret = session.__call__(req) self.assertEqual('Session token', ret.headers['Authorization']) def test_create_group(self): ret = self.driver.create_group(self.ctxt, TEST_GROUP[0]) self.assertIsNone(ret) @mock.patch.object(requests.Session, "request") def test_delete_group(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]]) self.assertEqual(4, request.call_count) actual = ( {'status': TEST_GROUP[0]['status']}, [{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume_error( self, get_volume_type_qos_specs): get_volume_type_qos_specs.return_value = {'qos_specs': None} self.assertRaises( exception.VolumeDriverException, self.driver.create_group_from_src, self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]] ) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = False ret = self.driver.update_group( self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]]) self.assertTupleEqual((None, None, None), ret) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group_error(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = True self.assertRaises( exception.VolumeDriverException, self.driver.update_group, self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]], remove_volumes=[TEST_VOLUME[0]] ) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_non_cg( self, get_volume_type_qos_specs, get_volume_type_extra_specs, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = False get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_cg( self, get_volume_type_qos_specs, get_volume_type_extra_specs, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]) self.assertEqual(10, request.call_count) actual = ( {'status': TEST_GROUP_SNAP[0]['status']}, [{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py0000664000175000017500000013461700000000000030770 0ustar00zuulzuul00000000000000# Copyright (C) 2021, 2024, NEC corporation # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for NEC Driver.""" from unittest import mock from oslo_config import cfg import requests from cinder import context as cinder_context from cinder import db from cinder.db.sqlalchemy import api as sqlalchemy_api from cinder import exception from cinder.objects import group_snapshot as obj_group_snap from cinder.objects import snapshot as obj_snap from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.nec.v import nec_v_iscsi from cinder.volume import volume_types from cinder.volume import volume_utils # Configuration parameter values CONFIG_MAP = { 'serial': '886000123456', 'my_ip': '127.0.0.1', 'rest_server_ip_addr': '172.16.18.108', 'rest_server_ip_port': '23451', 'port_id': 'CL1-A', 'host_grp_name': 'NEC-127.0.0.1', 'host_mode': 'LINUX/IRIX', 'host_iscsi_name': 'iqn.nec-test-host', 'target_iscsi_name': 'iqn.nec-test-target', 'user_id': 'user', 'user_pass': 'password', 'ipv4Address': '111.22.333.44', 'tcpPort': '5555', 'pool_name': 'test_pool', 'auth_user': 'auth_user', 'auth_password': 'auth_password', } DEFAULT_CONNECTOR = { 'host': 'host', 'ip': CONFIG_MAP['my_ip'], 'initiator': CONFIG_MAP['host_iscsi_name'], 'multipath': False, } CTXT = cinder_context.get_admin_context() TEST_VOLUME = [] for i in range(5): volume = {} volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) volume['name'] = 'test-volume{0:d}'.format(i) volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i) if i == 3 or i == 4: volume['provider_location'] = None else: volume['provider_location'] = '{0:d}'.format(i) volume['size'] = 128 if i == 2: volume['status'] = 'in-use' elif i == 4: volume['status'] = None else: volume['status'] = 'available' volume = fake_volume.fake_volume_obj(CTXT, **volume) volume.volume_type = fake_volume.fake_volume_type_obj(CTXT) TEST_VOLUME.append(volume) def _volume_get(context, volume_id): """Return predefined volume info.""" return TEST_VOLUME[int(volume_id.replace("-", ""))] TEST_SNAPSHOT = [] snapshot = {} snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0) snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0) snapshot['provider_location'] = '{0:d}'.format(1) snapshot['status'] = 'available' snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) snapshot['volume'] = _volume_get(None, snapshot['volume_id']) snapshot['volume_name'] = 'test-volume{0:d}'.format(0) snapshot['volume_size'] = 128 snapshot = obj_snap.Snapshot._from_db_object( CTXT, obj_snap.Snapshot(), fake_snapshot.fake_db_snapshot(**snapshot)) TEST_SNAPSHOT.append(snapshot) TEST_GROUP = [] for i in range(2): group = {} group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i) group['status'] = 'available' group = fake_group.fake_group_obj(CTXT, **group) TEST_GROUP.append(group) TEST_GROUP_SNAP = [] group_snapshot = {} group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0) group_snapshot['status'] = 'available' group_snapshot = obj_group_snap.GroupSnapshot._from_db_object( CTXT, obj_group_snap.GroupSnapshot(), fake_group_snapshot.fake_db_group_snapshot(**group_snapshot)) TEST_GROUP_SNAP.append(group_snapshot) # Dummy response for REST API POST_SESSIONS_RESULT = { "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "sessionId": 0, } GET_PORTS_RESULT = { "data": [ { "portId": CONFIG_MAP['port_id'], "portType": "ISCSI", "portAttributes": [ "TAR", "MCU", "RCU", "ELUN" ], "portSpeed": "AUT", "loopId": "00", "fabricMode": False, "lunSecuritySetting": True, }, ], } GET_PORT_RESULT = { "ipv4Address": CONFIG_MAP['ipv4Address'], "tcpPort": CONFIG_MAP['tcpPort'], } GET_HOST_ISCSIS_RESULT = { "data": [ { "hostGroupNumber": 0, "iscsiName": CONFIG_MAP['host_iscsi_name'], }, ], } GET_HOST_GROUP_RESULT = { "hostGroupName": CONFIG_MAP['host_grp_name'], "iscsiName": CONFIG_MAP['target_iscsi_name'], } GET_HOST_GROUPS_RESULT = { "data": [ { "hostGroupNumber": 0, "portId": CONFIG_MAP['port_id'], "hostGroupName": "NEC-test", "iscsiName": CONFIG_MAP['target_iscsi_name'], }, ], } COMPLETED_SUCCEEDED_RESULT = { "status": "Completed", "state": "Succeeded", "affectedResources": ('a/b/c/1',), } GET_LDEV_RESULT = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP"], "status": "NML", "poolId": 30, "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_MAPPED = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP"], "status": "NML", "ports": [ { "portId": CONFIG_MAP['port_id'], "hostGroupNumber": 0, "hostGroupName": CONFIG_MAP['host_grp_name'], "lun": 1 }, ], } GET_LDEV_RESULT_SNAP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP"], "status": "NML", "poolId": 30, "label": "10000000000000000000000000000000", } GET_LDEV_RESULT_PAIR = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP", "SS"], "status": "NML", "label": "00000000000000000000000000000000", } GET_LDEV_RESULT_PAIR_SNAP = { "emulationType": "OPEN-V-CVS", "blockCapacity": 2097152, "attributes": ["CVS", "DP", "SS"], "status": "NML", "label": "10000000000000000000000000000000", } GET_POOLS_RESULT = { "data": [ { "poolId": 30, "poolName": CONFIG_MAP['pool_name'], "availableVolumeCapacity": 480144, "totalPoolCapacity": 507780, "totalLocatedCapacity": 71453172, "virtualVolumeCapacityRate": -1, }, ], } GET_SNAPSHOTS_RESULT = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PSUS", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_SNAPSHOTS_RESULT_PAIR = { "data": [ { "primaryOrSecondary": "S-VOL", "status": "PAIR", "pvolLdevId": 0, "muNumber": 1, "svolLdevId": 1, }, ], } GET_LDEVS_RESULT = { "data": [ { "ldevId": 0, "label": "15960cc738c94c5bb4f1365be5eeed44", }, { "ldevId": 1, "label": "15960cc738c94c5bb4f1365be5eeed45", }, ], } NOTFOUND_RESULT = { "data": [], } def _brick_get_connector_properties(multipath=False, enforce_multipath=False): """Return a predefined connector object.""" return DEFAULT_CONNECTOR class FakeResponse(): def __init__(self, status_code, data=None, headers=None): self.status_code = status_code self.data = data self.text = data self.content = data self.headers = {'Content-Type': 'json'} if headers is None else headers def json(self): return self.data class VStorageRESTISCSIDriverTest(test.TestCase): """Unit test class for NEC REST interface iSCSI module.""" test_existing_ref = {'source-id': '1'} test_existing_ref_name = { 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} def setUp(self): """Set up the test environment.""" def _set_required(opts, required): for opt in opts: opt.required = required # Initialize Cinder and avoid checking driver options. rest_required_opts = [ opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required] common_required_opts = [ opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required] _set_required(rest_required_opts, False) _set_required(common_required_opts, False) super(VStorageRESTISCSIDriverTest, self).setUp() _set_required(rest_required_opts, True) _set_required(common_required_opts, True) self.configuration = mock.Mock(conf.Configuration) self.ctxt = cinder_context.get_admin_context() self._setup_config() self._setup_driver() def _setup_config(self): """Set configuration parameter values.""" self.configuration.config_group = "REST" self.configuration.volume_backend_name = "RESTISCSI" self.configuration.volume_driver = ( "cinder.volume.drivers.nec.v.nec_v_iscsi.VStorageISCSIDriver") self.configuration.reserved_percentage = "0" self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False self.configuration.max_over_subscription_ratio = 500.0 self.configuration.driver_ssl_cert_verify = False self.configuration.nec_v_storage_id = CONFIG_MAP['serial'] self.configuration.nec_v_pools = ["30"] self.configuration.nec_v_snap_pool = None self.configuration.nec_v_ldev_range = "0-1" self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']] self.configuration.nec_v_compute_target_ports = [ CONFIG_MAP['port_id']] self.configuration.nec_v_group_create = True self.configuration.nec_v_group_delete = True self.configuration.nec_v_copy_speed = 3 self.configuration.nec_v_copy_check_interval = 3 self.configuration.nec_v_async_copy_check_interval = 10 self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] self.configuration.san_ip = CONFIG_MAP[ 'rest_server_ip_addr'] self.configuration.san_api_port = CONFIG_MAP[ 'rest_server_ip_port'] self.configuration.nec_v_rest_disable_io_wait = True self.configuration.nec_v_rest_tcp_keepalive = True self.configuration.nec_v_discard_zero_page = True self.configuration.nec_v_rest_number = "0" self.configuration.nec_v_lun_timeout = hbsd_rest._LUN_TIMEOUT self.configuration.nec_v_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.nec_v_restore_timeout = hbsd_rest._RESTORE_TIMEOUT self.configuration.nec_v_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) self.configuration.nec_v_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT self.configuration.nec_v_rest_timeout = hbsd_rest_api._REST_TIMEOUT self.configuration.nec_v_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.nec_v_exec_retry_interval = ( hbsd_rest_api._EXEC_RETRY_INTERVAL) self.configuration.nec_v_rest_connect_timeout = ( hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) self.configuration.nec_v_rest_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) self.configuration.nec_v_rest_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) self.configuration.nec_v_rest_server_busy_timeout = ( hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) self.configuration.nec_v_rest_keep_session_loop_interval = ( hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) self.configuration.nec_v_rest_another_ldev_mapped_retry_timeout = ( hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) self.configuration.nec_v_rest_tcp_keepidle = ( hbsd_rest_api._TCP_KEEPIDLE) self.configuration.nec_v_rest_tcp_keepintvl = ( hbsd_rest_api._TCP_KEEPINTVL) self.configuration.nec_v_rest_tcp_keepcnt = ( hbsd_rest_api._TCP_KEEPCNT) self.configuration.nec_v_host_mode_options = [] self.configuration.use_chap_auth = True self.configuration.chap_username = CONFIG_MAP['auth_user'] self.configuration.chap_password = CONFIG_MAP['auth_password'] self.configuration.san_thin_provision = True self.configuration.san_private_key = '' self.configuration.san_clustername = '' self.configuration.san_ssh_port = '22' self.configuration.san_is_local = False self.configuration.ssh_conn_timeout = '30' self.configuration.ssh_min_pool_conn = '1' self.configuration.ssh_max_pool_conn = '5' self.configuration.safe_get = self._fake_safe_get CONF = cfg.CONF CONF.my_ip = CONFIG_MAP['my_ip'] def _fake_safe_get(self, value): """Retrieve a configuration value avoiding throwing an exception.""" try: val = getattr(self.configuration, value) except AttributeError: val = None return val @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def _setup_driver( self, brick_get_connector_properties=None, request=None): """Set up the driver environment.""" self.driver = nec_v_iscsi.VStorageISCSIDriver( configuration=self.configuration, db=db) request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT)] self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver.local_path(None) self.driver.create_export(None, None, None) self.driver.ensure_export(None, None) self.driver.remove_export(None, None) self.driver.create_export_snapshot(None, None, None) self.driver.remove_export_snapshot(None, None) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() def tearDown(self): self.client = None super(VStorageRESTISCSIDriverTest, self).tearDown() # API test cases def test_driverinfo(self): drv = nec_v_iscsi.VStorageISCSIDriver( configuration=self.configuration, db=db) self.assertEqual(drv.common.driver_info['version'], "1.0.0") self.assertEqual(drv.common.driver_info['proto'], "iSCSI") self.assertEqual(drv.common.driver_info['hba_id'], "initiator") self.assertEqual(drv.common.driver_info['hba_id_type'], "iSCSI initiator IQN") self.assertEqual(drv.common.driver_info['msg_id']['target'].msg_id, 309) self.assertEqual(drv.common.driver_info['volume_backend_name'], "NECiSCSI") self.assertEqual(drv.common.driver_info['volume_type'], "iscsi") self.assertEqual(drv.common.driver_info['param_prefix'], "nec_v") self.assertEqual(drv.common.driver_info['vendor_name'], "NEC") self.assertEqual(drv.common.driver_info['driver_prefix'], "NEC") self.assertEqual(drv.common.driver_info['driver_file_prefix'], "nec") self.assertEqual(drv.common.driver_info['target_prefix'], "NEC-") self.assertEqual(drv.common.driver_info['hdp_vol_attr'], "DP") self.assertEqual(drv.common.driver_info['hdt_vol_attr'], "DT") self.assertEqual(drv.common.driver_info['nvol_ldev_type'], "DP-VOL") self.assertEqual(drv.common.driver_info['target_iqn_suffix'], ".nec-target") self.assertEqual(drv.common.driver_info['pair_attr'], "SS") @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup(self, brick_get_connector_properties, request): drv = nec_v_iscsi.VStorageISCSIDriver( configuration=self.configuration, db=db) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort']}}, drv.common.storage_info['portals']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(5, request.call_count) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() self.driver.common.client.keep_session_loop.wait() @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def test_do_setup_create_hg(self, brick_get_connector_properties, request): """Normal case: The host group not exists.""" drv = nec_v_iscsi.VStorageISCSIDriver( configuration=self.configuration, db=db) self._setup_config() request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] drv.do_setup(None) self.assertEqual( {CONFIG_MAP['port_id']: '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort']}}, drv.common.storage_info['portals']) self.assertEqual(1, brick_get_connector_properties.call_count) self.assertEqual(8, request.call_count) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() self.driver.common.client.keep_session_loop.wait() @mock.patch.object(requests.Session, "request") def test_extend_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.extend_volume(TEST_VOLUME[0], 256) self.assertEqual(3, request.call_count) @mock.patch.object(driver.ISCSIDriver, "get_goodness_function") @mock.patch.object(driver.ISCSIDriver, "get_filter_function") @mock.patch.object(requests.Session, "request") def test__update_volume_stats( self, request, get_filter_function, get_goodness_function): request.return_value = FakeResponse(200, GET_POOLS_RESULT) get_filter_function.return_value = None get_goodness_function.return_value = None self.driver._update_volume_stats() self.assertEqual( 'NEC', self.driver._stats['vendor_name']) self.assertTrue(self.driver._stats["pools"][0]['multiattach']) self.assertEqual(1, request.call_count) self.assertEqual(1, get_filter_function.call_count) self.assertEqual(1, get_goodness_function.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_volume(TEST_VOLUME[4]) self.assertEqual('1', ret['provider_location']) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_volume(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_volume(TEST_VOLUME[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, volume_get, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_snapshot(TEST_SNAPSHOT[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") def test_delete_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_SNAP), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.delete_snapshot(TEST_SNAPSHOT[0]) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_cloned_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1]) self.assertEqual('1', vol['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume_from_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] vol = self.driver.create_volume_from_snapshot( TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual('1', vol['provider_location']) self.assertEqual(5, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection( self, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('iscsi', ret['driver_volume_type']) self.assertEqual( '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort'], }, ret['data']['target_portal']) self.assertEqual(CONFIG_MAP['target_iscsi_name'], ret['data']['target_iqn']) self.assertEqual('CHAP', ret['data']['auth_method']) self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) self.assertEqual( CONFIG_MAP['auth_password'], ret['data']['auth_password']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_initialize_connection_shared_target( self, get_volume_type_extra_specs, request): """Normal case: A target shared with other systems.""" request.side_effect = [FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} ret = self.driver.initialize_connection( TEST_VOLUME[0], DEFAULT_CONNECTOR) self.assertEqual('iscsi', ret['driver_volume_type']) self.assertEqual( '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort'], }, ret['data']['target_portal']) self.assertEqual(CONFIG_MAP['target_iscsi_name'], ret['data']['target_iqn']) self.assertEqual('CHAP', ret['data']['auth_method']) self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) self.assertEqual( CONFIG_MAP['auth_password'], ret['data']['auth_password']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_terminate_connection(self, request): request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], DEFAULT_CONNECTOR) self.assertEqual(6, request.call_count) @mock.patch.object(requests.Session, "request") def test_terminate_connection_not_connector(self, request): """Normal case: Connector is None.""" request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUPS_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection(TEST_VOLUME[2], None) self.assertEqual(9, request.call_count) @mock.patch.object(requests.Session, "request") def test_initialize_connection_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.initialize_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual('iscsi', ret['driver_volume_type']) self.assertEqual( '%(ip)s:%(port)s' % { 'ip': CONFIG_MAP['ipv4Address'], 'port': CONFIG_MAP['tcpPort'], }, ret['data']['target_portal']) self.assertEqual(CONFIG_MAP['target_iscsi_name'], ret['data']['target_iqn']) self.assertEqual('CHAP', ret['data']['auth_method']) self.assertEqual(CONFIG_MAP['auth_user'], ret['data']['auth_username']) self.assertEqual( CONFIG_MAP['auth_password'], ret['data']['auth_password']) self.assertEqual(1, ret['data']['target_lun']) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") def test_terminate_connection_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT), FakeResponse(200, GET_LDEV_RESULT_MAPPED), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.terminate_connection_snapshot( TEST_SNAPSHOT[0], DEFAULT_CONNECTOR) self.assertEqual(6, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing(self, get_volume_type_qos_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] get_volume_type_qos_specs.return_value = {'qos_specs': None} ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual('1', ret['provider_location']) self.assertEqual(3, request.call_count) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_manage_existing_name(self, get_volume_type_qos_specs, request): request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEVS_RESULT)] get_volume_type_qos_specs.return_value = {'qos_specs': None} ret = self.driver.manage_existing( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual('1', ret['provider_location']) self.assertEqual(4, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_manage_existing_get_size_name(self, request): request.side_effect = [FakeResponse(200, GET_LDEVS_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.manage_existing_get_size( TEST_VOLUME[0], self.test_existing_ref_name) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_unmanage(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT)] self.driver.unmanage(TEST_VOLUME[0]) self.assertEqual(2, request.call_count) @mock.patch.object(requests.Session, "request") def test_copy_image_to_volume(self, request): image_service = 'fake_image_service' image_id = 'fake_image_id' request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \ as mock_copy_image: self.driver.copy_image_to_volume( self.ctxt, TEST_VOLUME[0], image_service, image_id) mock_copy_image.assert_called_with( self.ctxt, TEST_VOLUME[0], image_service, image_id, disable_sparse=False) self.assertEqual(1, request.call_count) @mock.patch.object(requests.Session, "request") def test_update_migrated_volume(self, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) ret = self.driver.update_migrated_volume( self.ctxt, TEST_VOLUME[0], TEST_VOLUME[1], "available") self.assertEqual(1, request.call_count) actual = ({'_name_id': TEST_VOLUME[1]['id'], 'provider_location': TEST_VOLUME[1]['provider_location']}) self.assertEqual(actual, ret) def test_unmanage_snapshot(self): """The driver don't support unmange_snapshot.""" self.assertRaises( NotImplementedError, self.driver.unmanage_snapshot, TEST_SNAPSHOT[0]) @mock.patch.object(requests.Session, "request") def test_retype(self, request): request.return_value = FakeResponse(200, GET_LDEV_RESULT) new_specs = {'nec:test': 'test'} new_type_ref = volume_types.create(self.ctxt, 'new', new_specs) new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id']) diff = {} host = { 'capabilities': { 'location_info': { 'pool_id': 30, }, }, } ret = self.driver.retype( self.ctxt, TEST_VOLUME[0], new_type, diff, host) self.assertEqual(1, request.call_count) self.assertTrue(ret) def test_backup_use_temp_snapshot(self): self.assertTrue(self.driver.backup_use_temp_snapshot()) @mock.patch.object(requests.Session, "request") def test_revert_to_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.revert_to_snapshot( self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0]) self.assertEqual(5, request.call_count) def test_create_group(self): ret = self.driver.create_group(self.ctxt, TEST_GROUP[0]) self.assertIsNone(ret) @mock.patch.object(requests.Session, "request") def test_delete_group(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]]) self.assertEqual(4, request.call_count) actual = ( {'status': TEST_GROUP[0]['status']}, [{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]] ) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_snapshot( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_from_src( self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]], group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]] ) self.assertEqual(5, request.call_count) actual = ( None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}]) self.assertTupleEqual(actual, ret) @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_from_src_volume_error( self, get_volume_type_qos_specs): get_volume_type_qos_specs.return_value = {'qos_specs': None} self.assertRaises( exception.VolumeDriverException, self.driver.create_group_from_src, self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]], source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[3]] ) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = False ret = self.driver.update_group( self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]]) self.assertTupleEqual((None, None, None), ret) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') def test_update_group_error(self, is_group_a_cg_snapshot_type): is_group_a_cg_snapshot_type.return_value = True self.assertRaises( exception.VolumeDriverException, self.driver.update_group, self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[3]], remove_volumes=[TEST_VOLUME[0]] ) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_non_cg( self, get_volume_type_qos_specs, get_volume_type_extra_specs, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = False get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(5, request.call_count) actual = ( {'status': 'available'}, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type') @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_group_snapshot_cg( self, get_volume_type_qos_specs, get_volume_type_extra_specs, is_group_a_cg_snapshot_type, volume_get, request): is_group_a_cg_snapshot_type.return_value = True get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT)] self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]] ) self.assertEqual(6, request.call_count) actual = ( None, [{'id': TEST_SNAPSHOT[0]['id'], 'provider_location': '1', 'status': 'available'}] ) self.assertTupleEqual(actual, ret) @mock.patch.object(requests.Session, "request") def test_delete_group_snapshot(self, request): request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR_SNAP), FakeResponse(200, NOTFOUND_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(200, GET_SNAPSHOTS_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(200, GET_LDEV_RESULT), FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)] ret = self.driver.delete_group_snapshot( self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]) self.assertEqual(10, request.call_count) actual = ( {'status': TEST_GROUP_SNAP[0]['status']}, [{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}] ) self.assertTupleEqual(actual, ret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py0000664000175000017500000004240700000000000026345 0ustar00zuulzuul00000000000000# Copyright (C) 2021, 2024, NEC corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for NEC Driver.""" from unittest import mock from oslo_config import cfg import requests from cinder import context as cinder_context from cinder import db from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.nec.v import nec_v_fc from cinder.volume.drivers.nec.v import nec_v_rest from cinder.volume import volume_types from cinder.volume import volume_utils # Configuration parameter values CONFIG_MAP = { 'serial': '886000123456', 'my_ip': '127.0.0.1', 'rest_server_ip_addr': '172.16.18.108', 'rest_server_ip_port': '23451', 'port_id': 'CL1-A', 'host_grp_name': 'NEC-0123456789abcdef', 'host_mode': 'LINUX/IRIX', 'host_wwn': '0123456789abcdef', 'target_wwn': '1111111123456789', 'user_id': 'user', 'user_pass': 'password', 'pool_name': 'test_pool', 'auth_user': 'auth_user', 'auth_password': 'auth_password', } # Dummy response DEFAULT_CONNECTOR = { 'host': 'host', 'ip': CONFIG_MAP['my_ip'], 'wwpns': [CONFIG_MAP['host_wwn']], 'multipath': False, } CTXT = cinder_context.get_admin_context() TEST_VOLUME = [] volume = {} volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) volume['name'] = 'test-volume{0:d}'.format(0) volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) volume = fake_volume.fake_volume_obj(CTXT, **volume) volume.volume_type = fake_volume.fake_volume_type_obj(CTXT) TEST_VOLUME.append(volume) # Dummy response for REST API POST_SESSIONS_RESULT = { "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "sessionId": 0, } GET_PORTS_RESULT = { "data": [ { "portId": CONFIG_MAP['port_id'], "portType": "FIBRE", "portAttributes": [ "TAR", "MCU", "RCU", "ELUN" ], "fabricMode": True, "portConnection": "PtoP", "lunSecuritySetting": True, "wwn": CONFIG_MAP['target_wwn'], }, ], } GET_HOST_WWNS_RESULT = { "data": [ { "hostGroupNumber": 0, "hostWwn": CONFIG_MAP['host_wwn'], }, ], } COMPLETED_SUCCEEDED_RESULT = { "status": "Completed", "state": "Succeeded", "affectedResources": ('a/b/c/1',), } def _brick_get_connector_properties(multipath=False, enforce_multipath=False): """Return a predefined connector object.""" return DEFAULT_CONNECTOR class FakeResponse(): def __init__(self, status_code, data=None, headers=None): self.status_code = status_code self.data = data self.text = data self.content = data self.headers = {'Content-Type': 'json'} if headers is None else headers def json(self): return self.data class VStorageRESTFCDriverTest(test.TestCase): """Unit test class for NEC REST interface fibre channel module.""" test_existing_ref = {'source-id': '1'} test_existing_ref_name = { 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} def setUp(self): """Set up the test environment.""" def _set_required(opts, required): for opt in opts: opt.required = required # Initialize Cinder and avoid checking driver options. rest_required_opts = [ opt for opt in nec_v_rest.REST_VOLUME_OPTS if opt.required] common_required_opts = [ opt for opt in nec_v_rest.COMMON_VOLUME_OPTS if opt.required] _set_required(rest_required_opts, False) _set_required(common_required_opts, False) super(VStorageRESTFCDriverTest, self).setUp() _set_required(rest_required_opts, True) _set_required(common_required_opts, True) self.configuration = mock.Mock(conf.Configuration) self.ctxt = cinder_context.get_admin_context() self._setup_config() self._setup_driver() def _setup_config(self): """Set configuration parameter values.""" self.configuration.config_group = "REST" self.configuration.volume_backend_name = "RESTFC" self.configuration.volume_driver = ( "cinder.volume.drivers.nec.v.nec_v_fc.VStorageFCDriver") self.configuration.reserved_percentage = "0" self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False self.configuration.max_over_subscription_ratio = 500.0 self.configuration.driver_ssl_cert_verify = False self.configuration.nec_v_storage_id = CONFIG_MAP['serial'] self.configuration.nec_v_pools = ["30"] self.configuration.nec_v_snap_pool = None self.configuration.nec_v_ldev_range = "0-1" self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']] self.configuration.nec_v_compute_target_ports = [ CONFIG_MAP['port_id']] self.configuration.nec_v_group_create = True self.configuration.nec_v_group_delete = True self.configuration.nec_v_copy_speed = 3 self.configuration.nec_v_copy_check_interval = 3 self.configuration.nec_v_async_copy_check_interval = 10 self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] self.configuration.san_ip = CONFIG_MAP[ 'rest_server_ip_addr'] self.configuration.san_api_port = CONFIG_MAP[ 'rest_server_ip_port'] self.configuration.nec_v_rest_disable_io_wait = True self.configuration.nec_v_rest_tcp_keepalive = True self.configuration.nec_v_discard_zero_page = True self.configuration.nec_v_rest_number = "0" self.configuration.nec_v_lun_timeout = hbsd_rest._LUN_TIMEOUT self.configuration.nec_v_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.nec_v_restore_timeout = hbsd_rest._RESTORE_TIMEOUT self.configuration.nec_v_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) self.configuration.nec_v_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT self.configuration.nec_v_rest_timeout = hbsd_rest_api._REST_TIMEOUT self.configuration.nec_v_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.nec_v_exec_retry_interval = ( hbsd_rest_api._EXEC_RETRY_INTERVAL) self.configuration.nec_v_rest_connect_timeout = ( hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) self.configuration.nec_v_rest_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) self.configuration.nec_v_rest_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) self.configuration.nec_v_rest_server_busy_timeout = ( hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) self.configuration.nec_v_rest_keep_session_loop_interval = ( hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) self.configuration.nec_v_rest_another_ldev_mapped_retry_timeout = ( hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) self.configuration.nec_v_rest_tcp_keepidle = ( hbsd_rest_api._TCP_KEEPIDLE) self.configuration.nec_v_rest_tcp_keepintvl = ( hbsd_rest_api._TCP_KEEPINTVL) self.configuration.nec_v_rest_tcp_keepcnt = ( hbsd_rest_api._TCP_KEEPCNT) self.configuration.nec_v_host_mode_options = [] self.configuration.nec_v_zoning_request = False self.configuration.san_thin_provision = True self.configuration.san_private_key = '' self.configuration.san_clustername = '' self.configuration.san_ssh_port = '22' self.configuration.san_is_local = False self.configuration.ssh_conn_timeout = '30' self.configuration.ssh_min_pool_conn = '1' self.configuration.ssh_max_pool_conn = '5' self.configuration.use_chap_auth = True self.configuration.chap_username = CONFIG_MAP['auth_user'] self.configuration.chap_password = CONFIG_MAP['auth_password'] self.configuration.safe_get = self._fake_safe_get CONF = cfg.CONF CONF.my_ip = CONFIG_MAP['my_ip'] def _fake_safe_get(self, value): """Retrieve a configuration value avoiding throwing an exception.""" try: val = getattr(self.configuration, value) except AttributeError: val = None return val @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def _setup_driver( self, brick_get_connector_properties=None, request=None): """Set up the driver environment.""" self.driver = nec_v_fc.VStorageFCDriver( configuration=self.configuration, db=db) request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_HOST_WWNS_RESULT)] self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver.local_path(None) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() def tearDown(self): self.client = None super(VStorageRESTFCDriverTest, self).tearDown() # API test cases def test_configuration(self): drv = nec_v_fc.VStorageFCDriver( configuration=self.configuration, db=db) self.assertEqual(drv.configuration.hitachi_storage_id, drv.configuration.nec_v_storage_id) self.assertEqual(drv.configuration.hitachi_pools, drv.configuration.nec_v_pools) self.assertEqual(drv.configuration.hitachi_snap_pool, drv.configuration.nec_v_snap_pool) self.assertEqual(drv.configuration.hitachi_ldev_range, drv.configuration.nec_v_ldev_range) self.assertEqual(drv.configuration.hitachi_target_ports, drv.configuration.nec_v_target_ports) self.assertEqual(drv.configuration.hitachi_compute_target_ports, drv.configuration.nec_v_compute_target_ports) self.assertEqual(drv.configuration.hitachi_group_create, drv.configuration.nec_v_group_create) self.assertEqual(drv.configuration.hitachi_group_delete, drv.configuration.nec_v_group_delete) self.assertEqual(drv.configuration.hitachi_copy_speed, drv.configuration.nec_v_copy_speed) self.assertEqual(drv.configuration.hitachi_copy_check_interval, drv.configuration.nec_v_copy_check_interval) self.assertEqual(drv.configuration.hitachi_async_copy_check_interval, drv.configuration.nec_v_async_copy_check_interval) self.assertEqual(drv.configuration.hitachi_rest_disable_io_wait, drv.configuration.nec_v_rest_disable_io_wait) self.assertEqual(drv.configuration.hitachi_rest_tcp_keepalive, drv.configuration.nec_v_rest_tcp_keepalive) self.assertEqual(drv.configuration.hitachi_discard_zero_page, drv.configuration.nec_v_discard_zero_page) self.assertEqual(drv.configuration.hitachi_lun_timeout, drv.configuration.nec_v_lun_timeout) self.assertEqual(drv.configuration.hitachi_lun_retry_interval, drv.configuration.nec_v_lun_retry_interval) self.assertEqual(drv.configuration.hitachi_restore_timeout, drv.configuration.nec_v_restore_timeout) self.assertEqual(drv.configuration.hitachi_state_transition_timeout, drv.configuration.nec_v_state_transition_timeout) self.assertEqual(drv.configuration.hitachi_lock_timeout, drv.configuration.nec_v_lock_timeout) self.assertEqual(drv.configuration.hitachi_rest_timeout, drv.configuration.nec_v_rest_timeout) self.assertEqual(drv.configuration.hitachi_extend_timeout, drv.configuration.nec_v_extend_timeout) self.assertEqual(drv.configuration.hitachi_exec_retry_interval, drv.configuration.nec_v_exec_retry_interval) self.assertEqual(drv.configuration.hitachi_rest_connect_timeout, drv.configuration.nec_v_rest_connect_timeout) self.assertEqual( drv.configuration.hitachi_rest_job_api_response_timeout, drv.configuration.nec_v_rest_job_api_response_timeout) self.assertEqual( drv.configuration.hitachi_rest_get_api_response_timeout, drv.configuration.nec_v_rest_get_api_response_timeout) self.assertEqual(drv.configuration.hitachi_rest_server_busy_timeout, drv.configuration.nec_v_rest_server_busy_timeout) self.assertEqual( drv.configuration.hitachi_rest_keep_session_loop_interval, drv.configuration.nec_v_rest_keep_session_loop_interval) self.assertEqual( drv.configuration.hitachi_rest_another_ldev_mapped_retry_timeout, drv.configuration.nec_v_rest_another_ldev_mapped_retry_timeout) self.assertEqual(drv.configuration.hitachi_rest_tcp_keepidle, drv.configuration.nec_v_rest_tcp_keepidle) self.assertEqual(drv.configuration.hitachi_rest_tcp_keepintvl, drv.configuration.nec_v_rest_tcp_keepintvl) self.assertEqual(drv.configuration.hitachi_rest_tcp_keepcnt, drv.configuration.nec_v_rest_tcp_keepcnt) self.assertEqual(drv.configuration.hitachi_host_mode_options, drv.configuration.nec_v_host_mode_options) self.assertEqual(drv.configuration.hitachi_zoning_request, drv.configuration.nec_v_zoning_request) def test_driverinfo(self): drv = nec_v_fc.VStorageFCDriver( configuration=self.configuration, db=db) self.assertEqual(drv.common.driver_info['version'], "1.0.0") self.assertEqual(drv.common.driver_info['proto'], "FC") self.assertEqual(drv.common.driver_info['hba_id'], "wwpns") self.assertEqual(drv.common.driver_info['hba_id_type'], "World Wide Name") self.assertEqual(drv.common.driver_info['msg_id']['target'].msg_id, 308) self.assertEqual(drv.common.driver_info['volume_backend_name'], "NECFC") self.assertEqual(drv.common.driver_info['volume_type'], "fibre_channel") self.assertEqual(drv.common.driver_info['param_prefix'], "nec_v") self.assertEqual(drv.common.driver_info['vendor_name'], "NEC") self.assertEqual(drv.common.driver_info['driver_prefix'], "NEC") self.assertEqual(drv.common.driver_info['driver_file_prefix'], "nec") self.assertEqual(drv.common.driver_info['target_prefix'], "NEC-") self.assertEqual(drv.common.driver_info['hdp_vol_attr'], "DP") self.assertEqual(drv.common.driver_info['hdt_vol_attr'], "DT") self.assertEqual(drv.common.driver_info['nvol_ldev_type'], "DP-VOL") self.assertEqual(drv.common.driver_info['target_iqn_suffix'], ".nec-target") self.assertEqual(drv.common.driver_info['pair_attr'], "SS") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_volume(TEST_VOLUME[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(2, request.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py0000664000175000017500000004337200000000000027071 0ustar00zuulzuul00000000000000# Copyright (C) 2021, 2024, NEC corporation # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for NEC Driver.""" from unittest import mock from oslo_config import cfg import requests from cinder import context as cinder_context from cinder import db from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.nec.v import nec_v_iscsi from cinder.volume.drivers.nec.v import nec_v_rest from cinder.volume import volume_types from cinder.volume import volume_utils # Configuration parameter values CONFIG_MAP = { 'serial': '886000123456', 'my_ip': '127.0.0.1', 'rest_server_ip_addr': '172.16.18.108', 'rest_server_ip_port': '23451', 'port_id': 'CL1-A', 'host_grp_name': 'NEC-127.0.0.1', 'host_mode': 'LINUX/IRIX', 'host_iscsi_name': 'iqn.nec-test-host', 'target_iscsi_name': 'iqn.nec-test-target', 'user_id': 'user', 'user_pass': 'password', 'ipv4Address': '111.22.333.44', 'tcpPort': '5555', 'auth_user': 'auth_user', 'auth_password': 'auth_password', } DEFAULT_CONNECTOR = { 'host': 'host', 'ip': CONFIG_MAP['my_ip'], 'initiator': CONFIG_MAP['host_iscsi_name'], 'multipath': False, } CTXT = cinder_context.get_admin_context() TEST_VOLUME = [] volume = {} volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) volume['name'] = 'test-volume{0:d}'.format(0) volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0) volume = fake_volume.fake_volume_obj(CTXT, **volume) volume.volume_type = fake_volume.fake_volume_type_obj(CTXT) TEST_VOLUME.append(volume) # Dummy response for REST API POST_SESSIONS_RESULT = { "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3", "sessionId": 0, } GET_PORTS_RESULT = { "data": [ { "portId": CONFIG_MAP['port_id'], "portType": "ISCSI", "portAttributes": [ "TAR", "MCU", "RCU", "ELUN" ], "portSpeed": "AUT", "loopId": "00", "fabricMode": False, "lunSecuritySetting": True, }, ], } GET_PORT_RESULT = { "ipv4Address": CONFIG_MAP['ipv4Address'], "tcpPort": CONFIG_MAP['tcpPort'], } GET_HOST_ISCSIS_RESULT = { "data": [ { "hostGroupNumber": 0, "iscsiName": CONFIG_MAP['host_iscsi_name'], }, ], } GET_HOST_GROUP_RESULT = { "hostGroupName": CONFIG_MAP['host_grp_name'], "iscsiName": CONFIG_MAP['target_iscsi_name'], } GET_HOST_GROUPS_RESULT = { "data": [ { "hostGroupNumber": 0, "portId": CONFIG_MAP['port_id'], "hostGroupName": "NEC-test", "iscsiName": CONFIG_MAP['target_iscsi_name'], }, ], } COMPLETED_SUCCEEDED_RESULT = { "status": "Completed", "state": "Succeeded", "affectedResources": ('a/b/c/1',), } def _brick_get_connector_properties(multipath=False, enforce_multipath=False): """Return a predefined connector object.""" return DEFAULT_CONNECTOR class FakeResponse(): def __init__(self, status_code, data=None, headers=None): self.status_code = status_code self.data = data self.text = data self.content = data self.headers = {'Content-Type': 'json'} if headers is None else headers def json(self): return self.data class VStorageRESTISCSIDriverTest(test.TestCase): """Unit test class for NEC REST interface iSCSI module.""" test_existing_ref = {'source-id': '1'} test_existing_ref_name = { 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'} def setUp(self): """Set up the test environment.""" def _set_required(opts, required): for opt in opts: opt.required = required # Initialize Cinder and avoid checking driver options. rest_required_opts = [ opt for opt in nec_v_rest.REST_VOLUME_OPTS if opt.required] common_required_opts = [ opt for opt in nec_v_rest.COMMON_VOLUME_OPTS if opt.required] _set_required(rest_required_opts, False) _set_required(common_required_opts, False) super(VStorageRESTISCSIDriverTest, self).setUp() _set_required(rest_required_opts, True) _set_required(common_required_opts, True) self.configuration = mock.Mock(conf.Configuration) self.ctxt = cinder_context.get_admin_context() self._setup_config() self._setup_driver() def _setup_config(self): """Set configuration parameter values.""" self.configuration.config_group = "REST" self.configuration.volume_backend_name = "RESTISCSI" self.configuration.volume_driver = ( "cinder.volume.drivers.nec.v.nec_v_iscsi.VStorageISCSIDriver") self.configuration.reserved_percentage = "0" self.configuration.use_multipath_for_image_xfer = False self.configuration.enforce_multipath_for_image_xfer = False self.configuration.max_over_subscription_ratio = 500.0 self.configuration.driver_ssl_cert_verify = False self.configuration.nec_v_storage_id = CONFIG_MAP['serial'] self.configuration.nec_v_pools = ["30"] self.configuration.nec_v_snap_pool = None self.configuration.nec_v_ldev_range = "0-1" self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']] self.configuration.nec_v_compute_target_ports = [ CONFIG_MAP['port_id']] self.configuration.nec_v_group_create = True self.configuration.nec_v_group_delete = True self.configuration.nec_v_copy_speed = 3 self.configuration.nec_v_copy_check_interval = 3 self.configuration.nec_v_async_copy_check_interval = 10 self.configuration.san_login = CONFIG_MAP['user_id'] self.configuration.san_password = CONFIG_MAP['user_pass'] self.configuration.san_ip = CONFIG_MAP[ 'rest_server_ip_addr'] self.configuration.san_api_port = CONFIG_MAP[ 'rest_server_ip_port'] self.configuration.nec_v_rest_disable_io_wait = True self.configuration.nec_v_rest_tcp_keepalive = True self.configuration.nec_v_discard_zero_page = True self.configuration.nec_v_rest_number = "0" self.configuration.nec_v_lun_timeout = hbsd_rest._LUN_TIMEOUT self.configuration.nec_v_lun_retry_interval = ( hbsd_rest._LUN_RETRY_INTERVAL) self.configuration.nec_v_restore_timeout = hbsd_rest._RESTORE_TIMEOUT self.configuration.nec_v_state_transition_timeout = ( hbsd_rest._STATE_TRANSITION_TIMEOUT) self.configuration.nec_v_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT self.configuration.nec_v_rest_timeout = hbsd_rest_api._REST_TIMEOUT self.configuration.nec_v_extend_timeout = ( hbsd_rest_api._EXTEND_TIMEOUT) self.configuration.nec_v_exec_retry_interval = ( hbsd_rest_api._EXEC_RETRY_INTERVAL) self.configuration.nec_v_rest_connect_timeout = ( hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT) self.configuration.nec_v_rest_job_api_response_timeout = ( hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT) self.configuration.nec_v_rest_get_api_response_timeout = ( hbsd_rest_api._GET_API_RESPONSE_TIMEOUT) self.configuration.nec_v_rest_server_busy_timeout = ( hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT) self.configuration.nec_v_rest_keep_session_loop_interval = ( hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL) self.configuration.nec_v_rest_another_ldev_mapped_retry_timeout = ( hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT) self.configuration.nec_v_rest_tcp_keepidle = ( hbsd_rest_api._TCP_KEEPIDLE) self.configuration.nec_v_rest_tcp_keepintvl = ( hbsd_rest_api._TCP_KEEPINTVL) self.configuration.nec_v_rest_tcp_keepcnt = ( hbsd_rest_api._TCP_KEEPCNT) self.configuration.nec_v_host_mode_options = [] self.configuration.use_chap_auth = True self.configuration.chap_username = CONFIG_MAP['auth_user'] self.configuration.chap_password = CONFIG_MAP['auth_password'] self.configuration.san_thin_provision = True self.configuration.san_private_key = '' self.configuration.san_clustername = '' self.configuration.san_ssh_port = '22' self.configuration.san_is_local = False self.configuration.ssh_conn_timeout = '30' self.configuration.ssh_min_pool_conn = '1' self.configuration.ssh_max_pool_conn = '5' self.configuration.safe_get = self._fake_safe_get CONF = cfg.CONF CONF.my_ip = CONFIG_MAP['my_ip'] def _fake_safe_get(self, value): """Retrieve a configuration value avoiding throwing an exception.""" try: val = getattr(self.configuration, value) except AttributeError: val = None return val @mock.patch.object(requests.Session, "request") @mock.patch.object( volume_utils, 'brick_get_connector_properties', side_effect=_brick_get_connector_properties) def _setup_driver( self, brick_get_connector_properties=None, request=None): """Set up the driver environment.""" self.driver = nec_v_iscsi.VStorageISCSIDriver( configuration=self.configuration, db=db) request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT), FakeResponse(200, GET_PORTS_RESULT), FakeResponse(200, GET_PORT_RESULT), FakeResponse(200, GET_HOST_ISCSIS_RESULT), FakeResponse(200, GET_HOST_GROUP_RESULT)] self.driver.do_setup(None) self.driver.check_for_setup_error() self.driver.local_path(None) # stop the Loopingcall within the do_setup treatment self.driver.common.client.keep_session_loop.stop() def tearDown(self): self.client = None super(VStorageRESTISCSIDriverTest, self).tearDown() # API test cases def test_configuration(self): drv = nec_v_iscsi.VStorageISCSIDriver( configuration=self.configuration, db=db) self.assertEqual(drv.configuration.hitachi_storage_id, drv.configuration.nec_v_storage_id) self.assertEqual(drv.configuration.hitachi_pools, drv.configuration.nec_v_pools) self.assertEqual(drv.configuration.hitachi_snap_pool, drv.configuration.nec_v_snap_pool) self.assertEqual(drv.configuration.hitachi_ldev_range, drv.configuration.nec_v_ldev_range) self.assertEqual(drv.configuration.hitachi_target_ports, drv.configuration.nec_v_target_ports) self.assertEqual(drv.configuration.hitachi_compute_target_ports, drv.configuration.nec_v_compute_target_ports) self.assertEqual(drv.configuration.hitachi_group_create, drv.configuration.nec_v_group_create) self.assertEqual(drv.configuration.hitachi_group_delete, drv.configuration.nec_v_group_delete) self.assertEqual(drv.configuration.hitachi_copy_speed, drv.configuration.nec_v_copy_speed) self.assertEqual(drv.configuration.hitachi_copy_check_interval, drv.configuration.nec_v_copy_check_interval) self.assertEqual(drv.configuration.hitachi_async_copy_check_interval, drv.configuration.nec_v_async_copy_check_interval) self.assertEqual(drv.configuration.hitachi_rest_disable_io_wait, drv.configuration.nec_v_rest_disable_io_wait) self.assertEqual(drv.configuration.hitachi_rest_tcp_keepalive, drv.configuration.nec_v_rest_tcp_keepalive) self.assertEqual(drv.configuration.hitachi_discard_zero_page, drv.configuration.nec_v_discard_zero_page) self.assertEqual(drv.configuration.hitachi_lun_timeout, drv.configuration.nec_v_lun_timeout) self.assertEqual(drv.configuration.hitachi_lun_retry_interval, drv.configuration.nec_v_lun_retry_interval) self.assertEqual(drv.configuration.hitachi_restore_timeout, drv.configuration.nec_v_restore_timeout) self.assertEqual(drv.configuration.hitachi_state_transition_timeout, drv.configuration.nec_v_state_transition_timeout) self.assertEqual(drv.configuration.hitachi_lock_timeout, drv.configuration.nec_v_lock_timeout) self.assertEqual(drv.configuration.hitachi_rest_timeout, drv.configuration.nec_v_rest_timeout) self.assertEqual(drv.configuration.hitachi_extend_timeout, drv.configuration.nec_v_extend_timeout) self.assertEqual(drv.configuration.hitachi_exec_retry_interval, drv.configuration.nec_v_exec_retry_interval) self.assertEqual(drv.configuration.hitachi_rest_connect_timeout, drv.configuration.nec_v_rest_connect_timeout) self.assertEqual( drv.configuration.hitachi_rest_job_api_response_timeout, drv.configuration.nec_v_rest_job_api_response_timeout) self.assertEqual( drv.configuration.hitachi_rest_get_api_response_timeout, drv.configuration.nec_v_rest_get_api_response_timeout) self.assertEqual(drv.configuration.hitachi_rest_server_busy_timeout, drv.configuration.nec_v_rest_server_busy_timeout) self.assertEqual( drv.configuration.hitachi_rest_keep_session_loop_interval, drv.configuration.nec_v_rest_keep_session_loop_interval) self.assertEqual( drv.configuration.hitachi_rest_another_ldev_mapped_retry_timeout, drv.configuration.nec_v_rest_another_ldev_mapped_retry_timeout) self.assertEqual(drv.configuration.hitachi_rest_tcp_keepidle, drv.configuration.nec_v_rest_tcp_keepidle) self.assertEqual(drv.configuration.hitachi_rest_tcp_keepintvl, drv.configuration.nec_v_rest_tcp_keepintvl) self.assertEqual(drv.configuration.hitachi_rest_tcp_keepcnt, drv.configuration.nec_v_rest_tcp_keepcnt) self.assertEqual(drv.configuration.hitachi_host_mode_options, drv.configuration.nec_v_host_mode_options) def test_driverinfo(self): drv = nec_v_iscsi.VStorageISCSIDriver( configuration=self.configuration, db=db) self.assertEqual(drv.common.driver_info['version'], "1.0.0") self.assertEqual(drv.common.driver_info['proto'], "iSCSI") self.assertEqual(drv.common.driver_info['hba_id'], "initiator") self.assertEqual(drv.common.driver_info['hba_id_type'], "iSCSI initiator IQN") self.assertEqual(drv.common.driver_info['msg_id']['target'].msg_id, 309) self.assertEqual(drv.common.driver_info['volume_backend_name'], "NECiSCSI") self.assertEqual(drv.common.driver_info['volume_type'], "iscsi") self.assertEqual(drv.common.driver_info['param_prefix'], "nec_v") self.assertEqual(drv.common.driver_info['vendor_name'], "NEC") self.assertEqual(drv.common.driver_info['driver_prefix'], "NEC") self.assertEqual(drv.common.driver_info['driver_file_prefix'], "nec") self.assertEqual(drv.common.driver_info['target_prefix'], "NEC-") self.assertEqual(drv.common.driver_info['hdp_vol_attr'], "DP") self.assertEqual(drv.common.driver_info['hdt_vol_attr'], "DT") self.assertEqual(drv.common.driver_info['nvol_ldev_type'], "DP-VOL") self.assertEqual(drv.common.driver_info['target_iqn_suffix'], ".nec-target") self.assertEqual(drv.common.driver_info['pair_attr'], "SS") @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(volume_types, 'get_volume_type_qos_specs') def test_create_volume( self, get_volume_type_qos_specs, get_volume_type_extra_specs, request): request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT) get_volume_type_extra_specs.return_value = {} get_volume_type_qos_specs.return_value = {'qos_specs': None} self.driver.common._stats = {} self.driver.common._stats['pools'] = [ {'location_info': {'pool_id': 30}}] ret = self.driver.create_volume(TEST_VOLUME[0]) self.assertEqual('1', ret['provider_location']) self.assertEqual(2, request.call_count) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2911203 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/0000775000175000017500000000000000000000000022730 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/__init__.py0000664000175000017500000000000000000000000025027 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2911203 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/0000775000175000017500000000000000000000000024703 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/__init__.py0000664000175000017500000000000000000000000027002 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2951202 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/0000775000175000017500000000000000000000000026161 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/__init__.py0000664000175000017500000000000000000000000030260 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py0000664000175000017500000030662700000000000027642 0ustar00zuulzuul00000000000000# Copyright (c) - 2015, Tom Barron. All rights reserved. # Copyright (c) - 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import urllib from lxml import etree from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake import cinder.volume.drivers.netapp.dataontap.client.api as netapp_api FAKE_VOL_XML = b""" open123 online 0 0 0 false false """ FAKE_XML1 = b"""\ abc\ abc\ """ FAKE_XML2 = b"""somecontent""" FAKE_NA_ELEMENT = netapp_api.NaElement(etree.XML(FAKE_VOL_XML)) FAKE_INVOKE_DATA = 'somecontent' FAKE_XML_STR = 'abc' FAKE_API_NAME = 'volume-get-iter' FAKE_API_NAME_ELEMENT = netapp_api.NaElement(FAKE_API_NAME) FAKE_NA_SERVER_STR = '127.0.0.1' FAKE_NA_SERVER = netapp_api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_5 = netapp_api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_5.set_vfiler('filer') FAKE_NA_SERVER_API_1_5.set_api_version(1, 5) FAKE_NA_SERVER_API_1_14 = netapp_api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_14.set_vserver('server') FAKE_NA_SERVER_API_1_14.set_api_version(1, 14) FAKE_NA_SERVER_API_1_20 = netapp_api.NaServer(FAKE_NA_SERVER_STR) FAKE_NA_SERVER_API_1_20.set_vfiler('filer') FAKE_NA_SERVER_API_1_20.set_vserver('server') FAKE_NA_SERVER_API_1_20.set_api_version(1, 20) VOLUME_VSERVER_NAME = 'fake_vserver' VOLUME_NAMES = ('volume1', 'volume2') VOLUME_NAME = 'volume1' NAMESPACE_NAME = '/vol/vol1/namespace1' HOST_NQN = 'nqn.1992-01.example.com:host' DEST_VOLUME_NAME = 'volume-dest' LUN_NAME = 'fake-lun-name' DEST_LUN_NAME = 'new-fake-lun-name' FILE_NAME = 'fake-file-name' DEST_FILE_NAME = 'new-fake-file-name' FAKE_UUID = 'b32bab78-82be-11ec-a8a3-0242ac120002' FAKE_QUERY = {'volume-attributes': None} FAKE_DES_ATTR = {'volume-attributes': ['volume-id-attributes', 'volume-space-attributes', 'volume-state-attributes', 'volume-qos-attributes']} FAKE_CALL_ARGS_LIST = [mock.call(80), mock.call(8088), mock.call(443), mock.call(8488)] FAKE_RESULT_API_ERR_REASON = netapp_api.NaElement('result') FAKE_RESULT_API_ERR_REASON.add_attr('errno', '000') FAKE_RESULT_API_ERR_REASON.add_attr('reason', 'fake_reason') FAKE_RESULT_API_ERRNO_INVALID = netapp_api.NaElement('result') FAKE_RESULT_API_ERRNO_INVALID.add_attr('errno', '000') FAKE_RESULT_API_ERRNO_VALID = netapp_api.NaElement('result') FAKE_RESULT_API_ERRNO_VALID.add_attr('errno', '14956') FAKE_RESULT_SUCCESS = netapp_api.NaElement('result') FAKE_RESULT_SUCCESS.add_attr('status', 'passed') FAKE_HTTP_OPENER = urllib.request.build_opener() INITIATOR_IQN = 'iqn.2015-06.com.netapp:fake_iqn' USER_NAME = 'fake_user' PASSWORD = 'passw0rd' ENCRYPTED_PASSWORD = 'B351F145DA527445' NO_RECORDS_RESPONSE = etree.XML(""" 0 """) VOLUME_GET_NAME_RESPONSE = etree.XML(""" %(volume)s %(vserver)s 1 """ % {'volume': VOLUME_NAMES[0], 'vserver': VOLUME_VSERVER_NAME}) INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES = etree.XML(""" 1 fake_tag """) INVALID_GET_ITER_RESPONSE_NO_RECORDS = etree.XML(""" fake_tag """) INVALID_RESPONSE = etree.XML(""" 1 """) GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE = etree.XML(""" 2
%(address1)s
%(address2)s
""" % {"address1": "1.2.3.4", "address2": "99.98.97.96"}) QOS_POLICY_GROUP_GET_ITER_RESPONSE = etree.XML(""" 30KB/S 1 53 fake_qos_policy_group_name user_defined 12496028-b641-11e5-abbd-123478563412 cinder-iscsi 1 """) VOLUME_LIST_INFO_RESPONSE = etree.XML(""" vol0 64_bit online 1441193750528 3161096192 1438032654336 0 vfiler0 aggr0 volume true false false false vol1 64_bit online 1441193750528 3161096192 1438032654336 0 vfiler0 aggr0 volume true false false false vol2 64_bit offline 1441193750528 3161096192 1438032654336 0 vfiler0 aggr0 volume true false false false vol3 64_bit online 1441193750528 3161096192 1438032654336 0 vfiler0 aggr0 volume true false false false """) SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE = etree.XML(""" %(snapshot_name)s False %(vol_name)s abcd-ef01-2345-6789 1 """ % { 'snapshot_name': fake.SNAPSHOT['name'], 'vol_name': fake.SNAPSHOT['volume_id'], }) SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_CMODE = etree.XML(""" %(snapshot_name)s True %(vol_name)s 1 """ % { 'snapshot_name': fake.SNAPSHOT['name'], 'vol_name': fake.SNAPSHOT['volume_id'], }) NODE_NAME = 'fake_node1' NODE_NAMES = ('fake_node1', 'fake_node2') VOLUME_AGGREGATE_NAME = 'fake_aggr1' VOLUME_AGGREGATE_NAMES = ('fake_aggr1', 'fake_aggr2') AGGR_GET_ITER_RESPONSE = etree.XML(""" false 64_bit 1758646411 aggr 512 30384 96 30384 30384 30384 243191 96 0 4082368507 cluster3-01 4082368507 cluster3-01 off 0 active block 3 cfo true false true false false false unmirrored online 1 true false /%(aggr1)s/plex0 normal,active block false false false /%(aggr1)s/plex0/rg0 0 0 0 on 16 raid_dp, normal raid_dp online false 0 0 true true 0 0 0 0 0 0 0 0 0 245760 0 95 45670400 943718400 898048000 0 898048000 897802240 1 0 0 %(aggr1)s 15863632-ea49-49a8-9c88-2bd2d57c6d7a cluster3-01 unknown false 64_bit 706602229 aggr 528 31142 96 31142 31142 31142 1945584 96 0 4082368507 cluster3-01 4082368507 cluster3-01 off 0 active block 10 sfo false false true false false false unmirrored online 1 true false /%(aggr2)s/plex0 normal,active block false false false /%(aggr2)s/plex0/rg0 0 0 block false false false /%(aggr2)s/plex0/rg1 0 0 0 on 8 raid4, normal raid4 online false 0 0 true true 0 0 0 0 0 0 0 0 0 425984 0 15 6448431104 7549747200 1101316096 0 1101316096 1100890112 2 0 0 %(aggr2)s 2a741934-1aaf-42dd-93ca-aaf231be108a cluster3-01 not_striped 2 """ % { 'aggr1': VOLUME_AGGREGATE_NAMES[0], 'aggr2': VOLUME_AGGREGATE_NAMES[1], }) AGGR_GET_SPACE_RESPONSE = etree.XML(""" /%(aggr1)s/plex0 /%(aggr1)s/plex0/rg0 45670400 943718400 898048000 %(aggr1)s /%(aggr2)s/plex0 /%(aggr2)s/plex0/rg0 /%(aggr2)s/plex0/rg1 4267659264 7549747200 3282087936 %(aggr2)s 2 """ % { 'aggr1': VOLUME_AGGREGATE_NAMES[0], 'aggr2': VOLUME_AGGREGATE_NAMES[1], }) AGGR_GET_NODE_RESPONSE = etree.XML(""" %(node)s %(aggr)s 1 """ % { 'aggr': VOLUME_AGGREGATE_NAME, 'node': NODE_NAME, }) AGGREGATE_RAID_TYPE = 'raid_dp' AGGR_GET_ITER_SSC_RESPONSE = etree.XML(""" /%(aggr)s/plex0 /%(aggr)s/plex0/rg0 %(raid)s true %(node)s %(aggr)s 1 """ % { 'aggr': VOLUME_AGGREGATE_NAME, 'raid': AGGREGATE_RAID_TYPE, 'node': NODE_NAME, }) AGGR_INFO_SSC = { 'name': VOLUME_AGGREGATE_NAME, 'raid-type': AGGREGATE_RAID_TYPE, 'is-hybrid': True, 'node-name': NODE_NAME, } AGGR_SIZE_TOTAL = 107374182400 AGGR_SIZE_AVAILABLE = 59055800320 AGGR_USED_PERCENT = 45 AGGR_SIZE_USED = 58888957952 AGGR_GET_ITER_CAPACITY_RESPONSE = etree.XML(""" %(used)s %(total_size)s %(available_size)s %(aggr)s 1 """ % { 'aggr': VOLUME_AGGREGATE_NAME, 'used': AGGR_USED_PERCENT, 'available_size': AGGR_SIZE_AVAILABLE, 'total_size': AGGR_SIZE_TOTAL, }) VOLUME_STATE_ONLINE = 'online' VOLUME_GET_ITER_STATE_ATTR_STR = """ flexgroup %(state)s """ % { 'state': VOLUME_STATE_ONLINE } VOLUME_GET_ITER_STATE_ATTR = etree.XML(VOLUME_GET_ITER_STATE_ATTR_STR) VOLUME_GET_ITER_STATE_RESPONSE = etree.XML(""" 1 %(volume)s """ % { 'volume': VOLUME_GET_ITER_STATE_ATTR_STR, }) VOLUME_SIZE_TOTAL = 19922944 VOLUME_SIZE_AVAILABLE = 19791872 VOLUME_GET_ITER_CAPACITY_ATTR_STR = """ flexgroup %(available_size)s %(total_size)s """ % { 'available_size': VOLUME_SIZE_AVAILABLE, 'total_size': VOLUME_SIZE_TOTAL, } VOLUME_GET_ITER_CAPACITY_ATTR = etree.XML(VOLUME_GET_ITER_CAPACITY_ATTR_STR) VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML(""" 1 %(volume)s """ % { 'volume': VOLUME_GET_ITER_CAPACITY_ATTR_STR, }) VOLUME_GET_ITER_STYLE_RESPONSE = etree.XML(""" 3 flexgroup flexgroup-constituent flexgroup-constituent """) VOLUME_FLEXGROUP_STYLE = etree.XML(""" flexgroup """) VOLUME_GET_ITER_SAME_STYLE_RESPONSE = etree.XML(""" 3 flexvol flexvol flexvol """) VOLUME_GET_ITER_LIST_RESPONSE = etree.XML(""" %(volume1)s %(vserver)s %(volume2)s %(vserver)s 2 """ % { 'volume1': VOLUME_NAMES[0], 'volume2': VOLUME_NAMES[1], 'vserver': VOLUME_VSERVER_NAME, }) VOLUME_GET_ITER_SSC_RESPONSE_STR = """ %(aggr)s /%(volume)s %(volume)s %(vserver)s rw flexvol false false fake_qos_policy_group_name true none 5 12345 default c.utf_8 """ % { 'aggr': VOLUME_AGGREGATE_NAMES[0], 'volume': VOLUME_NAMES[0], 'vserver': VOLUME_VSERVER_NAME, } VOLUME_GET_ITER_SSC_RESPONSE_ATTR = etree.XML( VOLUME_GET_ITER_SSC_RESPONSE_STR) VOLUME_GET_ITER_SSC_RESPONSE = etree.XML(""" %(volume)s 1 """ % { 'volume': VOLUME_GET_ITER_SSC_RESPONSE_STR, }) VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP = """ %(aggr)s /%(volume)s %(volume)s %(vserver)s rw flexgroup false false fake_qos_policy_group_name true none 5 12345 default c.utf_8 """ % { 'aggr': VOLUME_AGGREGATE_NAMES[0], 'volume': VOLUME_NAMES[0], 'vserver': VOLUME_VSERVER_NAME, } VOLUME_GET_ITER_SSC_RESPONSE_ATTR_FLEXGROUP = etree.XML( VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP) VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP = etree.XML(""" %(volume)s 1 """ % { 'volume': VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP, }) VOLUME_INFO_SSC = { 'name': VOLUME_NAMES[0], 'vserver': VOLUME_VSERVER_NAME, 'junction-path': '/%s' % VOLUME_NAMES[0], 'aggregate': [VOLUME_AGGREGATE_NAMES[0]], 'space-guarantee-enabled': True, 'language': 'c.utf_8', 'percentage-snapshot-reserve': '5', 'snapshot-policy': 'default', 'type': 'rw', 'size': '12345', 'space-guarantee': 'none', 'qos-policy-group': 'fake_qos_policy_group_name', 'style-extended': 'flexvol', } VOLUME_INFO_SSC_FLEXGROUP = { 'name': VOLUME_NAMES[0], 'vserver': VOLUME_VSERVER_NAME, 'junction-path': '/%s' % VOLUME_NAMES[0], 'aggregate': [VOLUME_AGGREGATE_NAMES[0]], 'space-guarantee-enabled': True, 'language': 'c.utf_8', 'percentage-snapshot-reserve': '5', 'snapshot-policy': 'default', 'type': 'rw', 'size': '12345', 'space-guarantee': 'none', 'qos-policy-group': 'fake_qos_policy_group_name', 'style-extended': 'flexgroup', } SIS_GET_ITER_SSC_RESPONSE = etree.XML(""" false enabled 211106232532992 703687441776640 1 """) VOLUME_DEDUPE_INFO_SSC = { 'compression': False, 'dedupe': True, 'logical-data-size': 211106232532992, 'logical-data-limit': 703687441776640, } SIS_GET_ITER_SSC_NO_LOGICAL_DATA_RESPONSE = etree.XML(""" false disabled 1 """) VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA = { 'compression': False, 'dedupe': False, 'logical-data-size': 0, 'logical-data-limit': 1, } CLONE_SPLIT_STATUS_RESPONSE = etree.XML(""" 1234 316659348799488 """) VOLUME_CLONE_SPLIT_STATUS = { 'unsplit-size': 316659348799488, 'unsplit-clone-count': 1234, } CLONE_SPLIT_STATUS_NO_DATA_RESPONSE = etree.XML(""" """) VOLUME_GET_ITER_ENCRYPTION_SSC_RESPONSE = etree.XML(""" true %(aggr)s /%(volume)s %(volume)s %(vserver)s rw false false fake_qos_policy_group_name true none 5 12345 default c.utf_8 1 """ % { 'aggr': VOLUME_AGGREGATE_NAMES[0], 'volume': VOLUME_NAMES[0], 'vserver': VOLUME_VSERVER_NAME, }) STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1 = etree.XML(""" cluster3-01:v4.16 cluster3-01:v4.17 cluster3-01:v4.18 cluster3-01:v4.19 cluster3-01:v4.20 cluster3-01:v4.21 cluster3-01:v4.22 cluster3-01:v4.24 cluster3-01:v4.25 cluster3-01:v4.26 next_tag_1 10 """) STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2 = etree.XML(""" cluster3-01:v4.27 cluster3-01:v4.28 cluster3-01:v4.29 cluster3-01:v4.32 cluster3-01:v5.16 cluster3-01:v5.17 cluster3-01:v5.18 cluster3-01:v5.19 cluster3-01:v5.20 cluster3-01:v5.21 next_tag_2 10 """) STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3 = etree.XML(""" cluster3-01:v5.22 cluster3-01:v5.24 cluster3-01:v5.25 cluster3-01:v5.26 cluster3-01:v5.27 cluster3-01:v5.28 cluster3-01:v5.29 cluster3-01:v5.32 8 """) AGGREGATE_DISK_TYPES = ['SATA', 'SSD'] STORAGE_DISK_GET_ITER_RESPONSE = etree.XML(""" cluster3-01:v5.19 %(type0)s cluster3-01:v5.20 %(type0)s cluster3-01:v5.20 %(type1)s cluster3-01:v5.20 %(type1)s 4 """ % { 'type0': AGGREGATE_DISK_TYPES[0], 'type1': AGGREGATE_DISK_TYPES[1], }) SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE = etree.XML(""" object api,api2,api3 operation 1 """) PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS = [ 'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD', 'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1', 'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM', 'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE', 'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO', 'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM', 'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT', 'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH', ] PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE = etree.XML(""" No. of times 8.3 names are accessed per second. access_8_3_names diag rate per_sec Array of counts of different types of CPs wafl_timer generated CP snapshot generated CP wafl_avail_bufs generated CP dirty_blk_cnt generated CP full NV-log generated CP,back-to-back CP flush generated CP,sync generated CP deferred back-to-back CP low mbufs generated CP low datavecs generated CP nvlog replay takeover time limit CP cp_count diag delta array none total_cp_msecs Array of percentage time spent in different phases of CP %(labels)s cp_phase_times diag percent array percent """ % {'labels': ','.join(PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS)}) PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE = etree.XML(""" avg_processor_busy 5674745133134 system %(node1)s:kernel:system avg_processor_busy 4077649009234 system %(node2)s:kernel:system 1453412013 """ % {'node1': NODE_NAMES[0], 'node2': NODE_NAMES[1]}) PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE = etree.XML(""" system %(node)s:kernel:system 1 """ % {'node': NODE_NAME}) PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE = etree.XML(""" processor0 processor1 """) SYSTEM_GET_INFO_RESPONSE = etree.XML(""" %(node)s 4082368508 SIMBOX SIMBOX NetApp 4082368508 2593 NetApp VSim 999999 2 1599 0x40661 15 2199023255552 17592186044416 500 true """ % {'node': NODE_NAME}) ISCSI_INITIATOR_GET_AUTH_ELEM = etree.XML(""" %s """ % INITIATOR_IQN) ISCSI_INITIATOR_AUTH_LIST_INFO_FAILURE = etree.XML(""" """ % INITIATOR_IQN) CLUSTER_NAME = 'fake_cluster' REMOTE_CLUSTER_NAME = 'fake_cluster_2' CLUSTER_ADDRESS_1 = 'fake_cluster_address' CLUSTER_ADDRESS_2 = 'fake_cluster_address_2' VSERVER_NAME = 'fake_vserver' DEST_VSERVER_NAME = 'fake_dest_vserver' VSERVER_NAME_2 = 'fake_vserver_2' ADMIN_VSERVER_NAME = 'fake_admin_vserver' NODE_VSERVER_NAME = 'fake_node_vserver' SM_SOURCE_VSERVER = 'fake_source_vserver' SM_SOURCE_VOLUME = 'fake_source_volume' SM_DEST_VSERVER = 'fake_destination_vserver' SM_DEST_VOLUME = 'fake_destination_volume' SM_SOURCE_CG = 'fake_source_cg' SM_DESTINATION_CG = 'fake_destination_cg' IGROUP_NAME = 'openstack-d9b4194f-5f65-4952-fake-26c911f1e4b2' LUN_NAME_PATH = '/vol/volume-fake/lun-path-fake-1234' CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML(""" %(addr1)s %(addr2)s available %(cluster)s fake_uuid %(addr1)s %(remote_cluster)s fake_serial_number 60 1 """ % { 'addr1': CLUSTER_ADDRESS_1, 'addr2': CLUSTER_ADDRESS_2, 'cluster': CLUSTER_NAME, 'remote_cluster': REMOTE_CLUSTER_NAME, }) CLUSTER_PEER_POLICY_GET_RESPONSE = etree.XML(""" false 8 """) FILE_SIZES_BY_DIR_GET_ITER_RESPONSE = etree.XML(""" %(name)s 1024 1 """ % { 'name': fake.VOLUME_NAME }) LUN_SIZES_BY_VOLUME_GET_ITER_RESPONSE = etree.XML(""" %(path)s 1024 1 """ % { 'path': fake.VOLUME_PATH }) VSERVER_PEER_GET_ITER_RESPONSE = etree.XML(""" snapmirror %(cluster)s peered %(vserver2)s %(vserver1)s 2 """ % { 'cluster': CLUSTER_NAME, 'vserver1': VSERVER_NAME, 'vserver2': VSERVER_NAME_2 }) SNAPMIRROR_GET_ITER_RESPONSE = etree.XML(""" %(vserver)s:%(volume2)s %(volume2)s fake_destination_node %(vserver)s fake_snapshot 1442701782 false true 2187 109 1442701890 test:manila 1171456 initialize 0 snapmirrored fake_snapshot 1442701782 DPDefault v2 ea8bfcc6-5f1d-11e5-8446-123478563412 idle data_protection daily %(vserver)s:%(volume1)s %(volume1)s %(vserver)s fake_destination_vserver 1 """ % { 'volume1': VOLUME_NAMES[0], 'volume2': VOLUME_NAMES[1], 'vserver': VOLUME_VSERVER_NAME, }) SNAPMIRROR_GET_ITER_FILTERED_RESPONSE = etree.XML(""" fake_destination_vserver fake_destination_volume true snapmirrored daily fake_source_vserver fake_source_volume 1 """) SNAPMIRROR_INITIALIZE_RESULT = etree.XML(""" succeeded """) VSERVER_DATA_LIST_RESPONSE = etree.XML(""" %(vserver)s data 1 """ % {'vserver': VSERVER_NAME}) GET_CLUSTER_NAME_RESPONSE = etree.XML(""" %(cluster)s """ % {'cluster': CLUSTER_NAME}) START_LUN_MOVE_RESPONSE = etree.XML(""" %(job_uuid)s """ % {'job_uuid': fake.JOB_UUID}) GET_LUN_MOVE_STATUS_RESPONSE = etree.XML(""" complete """) START_LUN_COPY_RESPONSE = etree.XML(""" %(job_uuid)s """ % {'job_uuid': fake.JOB_UUID}) GET_LUN_COPY_STATUS_RESPONSE = etree.XML(""" complete """) CANCEL_LUN_COPY_RESPONSE = etree.XML(""" """) START_FILE_COPY_RESPONSE = etree.XML(""" %(job_uuid)s """ % {'job_uuid': fake.JOB_UUID}) GET_FILE_COPY_STATUS_RESPONSE = etree.XML(""" complete 1 """) DESTROY_FILE_COPY_RESPONSE = etree.XML(""" """) VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP_REST = { "uuid": "2407b637-119c-11ec-a4fb", "language": "c.utf_8", "name": VOLUME_NAMES[0], "style": "flexgroup", "is_svm_root": False, "type": "rw", "aggregates": [ { "name": VOLUME_AGGREGATE_NAMES[0] } ], "error_state": { "is_inconsistent": False }, "nas": { "path": '/' + VOLUME_NAMES[0] }, "snapshot_policy": { "name": "default", "uuid": "e7b0f455-fc15-11ea-b64a" }, "svm": { "name": VOLUME_VSERVER_NAME }, "space": { "size": 12345, "snapshot": { "reserve_percent": 5 } }, "qos": { "policy": { "name": "fake_qos_policy_group_name" } }, "guarantee": { "type": "none", "honored": True }, "_links": { "self": { "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb" } } } VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP_REST = { "records": [ VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP_REST, ], "num_records": 1, "_links": { "self": { "href": "/api/storage/volumes" } } } VOLUME_GET_ITER_SSC_RESPONSE_STR_REST = { "uuid": "2407b637-119c-11ec-a4fb", "language": "c.utf_8", "name": VOLUME_NAMES[0], "style": "flexvol", "is_svm_root": False, "type": "rw", "aggregates": [ { "name": VOLUME_AGGREGATE_NAMES[0] } ], "error_state": { "is_inconsistent": False }, "nas": { "path": '/' + VOLUME_NAMES[0] }, "snapshot_policy": { "name": "default", "uuid": "e7b0f455-fc15-11ea-b64a" }, "svm": { "name": VOLUME_VSERVER_NAME }, "space": { "size": 12345, "snapshot": { "reserve_percent": 5 } }, "qos": { "policy": { "name": "fake_qos_policy_group_name" } }, "guarantee": { "type": "none", "honored": True }, "efficiency": { "compression": "none", "dedupe": "none", "cross_volume_dedupe": "none", "compaction": "none", "schedule": "-", "volume_path": "/vol/" + VOLUME_NAMES[0], "state": "disabled", "policy": { "name": "-" } }, "_links": { "self": { "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb" } } } VOLUME_GET_ITER_SSC_RESPONSE_REST = { "records": [ VOLUME_GET_ITER_SSC_RESPONSE_STR_REST, ], "num_records": 1, "_links": { "self": { "href": "/api/storage/volumes" } } } VOLUME_GET_ITER_RESPONSE_LIST_REST = [ { "uuid": "2407b637-119c-11ec-a4fb-00a0b89c9a78", "name": VOLUME_NAMES[0], "state": "online", "style": "flexvol", "is_svm_root": False, "type": "rw", "error_state": { "is_inconsistent": False }, "_links": { "self": { "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb" } } }, { "uuid": "2c190609-d51c-11eb-b83a", "name": VOLUME_NAMES[1], "state": "online", "style": "flexvol", "is_svm_root": False, "type": "rw", "error_state": { "is_inconsistent": False }, "_links": { "self": { "href": "/api/storage/volumes/2c190609-d51c-11eb-b83a" } } } ] VOLUME_GET_ITER_LIST_RESPONSE_REST = { "records": [ VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[1], ], "num_records": 2, "_links": { "self": { "href": "/api/storage/volumes" } } } VOLUME_ITEM_SIMPLE_RESPONSE_REST = { "uuid": "2407b637-119c-11ec-a4fb-00a0b89c9a78", "name": VOLUME_NAMES[0], "style": 'flexvol', "_links": { "self": { "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb-00a0b89c9a78" } } } VOLUME_LIST_SIMPLE_RESPONSE_REST = { "records": [ VOLUME_ITEM_SIMPLE_RESPONSE_REST ], "num_records": 1, "_links": { "self": { "href": "/api/storage/volumes" } } } NO_RECORDS_RESPONSE_REST = { "records": [], "num_records": 0, } VOLUME_GET_ITER_RESPONSE_REST_PAGE = { "records": [ VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], ], "num_records": 10, "_links": { "self": { "href": "/api/storage/volumes?fields=name&max_records=2" }, "next": { "href": "/api/storage/volumes?" f"start.uuid={VOLUME_GET_ITER_RESPONSE_LIST_REST[0]['uuid']}" "&fields=name&max_records=2" } } } VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE = { "records": [ VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], VOLUME_GET_ITER_RESPONSE_LIST_REST[0], ], "num_records": 8, } INVALID_GET_ITER_RESPONSE_NO_RECORDS_REST = { "num_records": 1, } INVALID_GET_ITER_RESPONSE_NO_NUM_RECORDS_REST = { "records": [], } VOLUME_GET_ITER_STYLE_RESPONSE_REST = { "records": [ { "style": "flexgroup", }, ], "num_records": 1, } VOLUME_FLEXGROUP_STYLE_REST = \ VOLUME_GET_ITER_STYLE_RESPONSE_REST["records"][0] VOLUME_GET_ITER_SAME_STYLE_RESPONSE_REST = { "records": [ { "style": "flexvol", }, { "style": "flexvol", }, { "style": "flexvol", }, ], "num_records": 3, } GET_NUM_RECORDS_RESPONSE_REST = { "num_records": 1, } AGGR_GET_ITER_RESPONSE_REST = { "records": [ { "uuid": "6aad2b76-a069-47e9-93ee-e501ebf2cdd2", "name": VOLUME_AGGREGATE_NAMES[1], "node": { "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9", "name": NODE_NAME }, "home_node": { "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9", "name": NODE_NAME }, "snapshot": { "files_total": 0, "files_used": 0, "max_files_available": 0, "max_files_used": 0 }, "space": { "footprint": 58491584512, "footprint_percent": 4, "block_storage": { "size": AGGR_SIZE_TOTAL, "available": AGGR_SIZE_AVAILABLE, "used": AGGR_SIZE_USED, "inactive_user_data": 0, "inactive_user_data_percent": 0, "full_threshold_percent": 98, "physical_used": 7706808320, "physical_used_percent": 1, "aggregate_metadata": 397373440, "aggregate_metadata_percent": 0, "used_including_snapshot_reserve": 58888957952, "used_including_snapshot_reserve_percent": 4, "data_compacted_count": 0, "data_compaction_space_saved": 0, "data_compaction_space_saved_percent": 0, "volume_deduplication_shared_count": 0, "volume_deduplication_space_saved": 0, "volume_deduplication_space_saved_percent": 0 }, "snapshot": { "used_percent": 0, "available": 0, "total": 0, "used": 0, "reserve_percent": 0 }, "cloud_storage": { "used": 0 }, "efficiency": { "savings": 0, "ratio": 1, "logical_used": 117510144 }, "efficiency_without_snapshots": { "savings": 0, "ratio": 1, "logical_used": 9617408 }, "efficiency_without_snapshots_flexclones": { "savings": 0, "ratio": 1, "logical_used": 9617408 } }, "state": "online", "snaplock_type": "non_snaplock", "create_time": "2020-09-21T14:45:11+00:00", "data_encryption": { "software_encryption_enabled": False, "drive_protection_enabled": False }, "block_storage": { "primary": { "disk_count": 1, "disk_class": "virtual", "raid_type": "raid0", "raid_size": 8, "checksum_style": "advanced_zoned", "disk_type": "vm_disk" }, "hybrid_cache": { "enabled": False }, "mirror": { "enabled": False, "state": "unmirrored" }, "plexes": [ { "name": "plex0" } ], "storage_type": "hdd" }, "cloud_storage": { "attach_eligible": True }, "inactive_data_reporting": { "enabled": False }, "metric": { "timestamp": "2021-12-21T13:25:15Z", "duration": "PT15S", "status": "ok", "throughput": { "read": 0, "write": 13107, "other": 0, "total": 13107 }, "latency": { "read": 0, "write": 2659, "other": 0, "total": 2659 }, "iops": { "read": 0, "write": 0, "other": 0, "total": 0 } }, "statistics": { "timestamp": "2021-12-21T13:25:21Z", "status": "ok", "throughput_raw": { "read": 3699994624, "write": 111813349376, "other": 0, "total": 115513344000 }, "latency_raw": { "read": 1884163936, "write": 9308463160, "other": 0, "total": 11192627096 }, "iops_raw": { "read": 242498, "write": 4871034, "other": 0, "total": 5113532 } } }, { "uuid": "ad20dafb-1dcb-483a-b457-012ae9225062", "name": VOLUME_AGGREGATE_NAMES[0], "node": { "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9", "name": NODE_NAME }, "home_node": { "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9", "name": NODE_NAME }, "snapshot": { "files_total": 0, "files_used": 0, "max_files_available": 0, "max_files_used": 0 }, "space": { "footprint": 172316893184, "footprint_percent": 14, "block_storage": { "size": 1271819509760, "available": 1099709939712, "used": 172109570048, "inactive_user_data": 0, "inactive_user_data_percent": 0, "full_threshold_percent": 98, "physical_used": 27038863360, "physical_used_percent": 2, "aggregate_metadata": 0, "aggregate_metadata_percent": 0, "used_including_snapshot_reserve": 172109570048, "used_including_snapshot_reserve_percent": 14, "data_compacted_count": 0, "data_compaction_space_saved": 0, "data_compaction_space_saved_percent": 0, "volume_deduplication_shared_count": 0, "volume_deduplication_space_saved": 0, "volume_deduplication_space_saved_percent": 0 }, "snapshot": { "used_percent": 0, "available": 0, "total": 0, "used": 0, "reserve_percent": 0 }, "cloud_storage": { "used": 0 }, "efficiency": { "savings": 74937720832, "ratio": 9.238858947247071, "logical_used": 84033363968 }, "efficiency_without_snapshots": { "savings": 0, "ratio": 1, "logical_used": 7005036544 }, "efficiency_without_snapshots_flexclones": { "savings": 0, "ratio": 1, "logical_used": 7005036544 } }, "state": "online", "snaplock_type": "non_snaplock", "create_time": "2020-09-21T14:44:51+00:00", "data_encryption": { "software_encryption_enabled": False, "drive_protection_enabled": False }, "block_storage": { "primary": { "disk_count": 1, "disk_class": "virtual", "raid_type": "raid0", "raid_size": 8, "checksum_style": "advanced_zoned", "disk_type": "vm_disk" }, "hybrid_cache": { "enabled": False }, "mirror": { "enabled": False, "state": "unmirrored" }, "plexes": [ { "name": "plex0" } ], "storage_type": "hdd" }, "cloud_storage": { "attach_eligible": True }, "inactive_data_reporting": { "enabled": False }, "metric": { "timestamp": "2021-12-21T13:25:15Z", "duration": "PT15S", "status": "ok", "throughput": { "read": 0, "write": 27033, "other": 0, "total": 27033 }, "latency": { "read": 0, "write": 1173, "other": 0, "total": 1173 }, "iops": { "read": 0, "write": 0, "other": 0, "total": 0 } }, "statistics": { "timestamp": "2021-12-21T13:25:21Z", "status": "ok", "throughput_raw": { "read": 5740912640, "write": 132358234112, "other": 0, "total": 138099146752 }, "latency_raw": { "read": 15095876198, "write": 12140289450, "other": 0, "total": 27236165648 }, "iops_raw": { "read": 535930, "write": 6011240, "other": 0, "total": 6547170 } } } ], "num_records": 2 } LUN_GET_ITER_REST = { "records": [ { "uuid": "bd6baab3-4842-45b6-b627-45b305ed2e84", "svm": { "uuid": "fake-uuid", "name": "vserver-name", }, "name": "/vol/nahim_dev_vol01/volume-fake-uuid", "location": { "logical_unit": "volume-fake-uuid", "node": { "name": "node-name", "uuid": "fake-uuid", }, "volume": { "uuid": "fake-uuid", "name": "nahim_dev_vol01", } }, "auto_delete": False, "class": "regular", "create_time": "2021-12-09T14:07:31+00:00", "enabled": True, "lun_maps": [ { "logical_unit_number": 0, "igroup": { "uuid": "fake-uuid", "name": "openstack-fake-uuid", }, } ], "os_type": "linux", "serial_number": "ZlAFA?QMnBdX", "space": { "scsi_thin_provisioning_support_enabled": False, "size": 10737418240, "used": 3474366464, "guarantee": { "requested": False, "reserved": False } }, "status": { "container_state": "online", "mapped": True, "read_only": False, "state": "online" }, "vvol": { "is_bound": False }, "metric": { "timestamp": "2021-12-23T20:36:00Z", "duration": "PT15S", "status": "ok", "throughput": { "read": 0, "write": 0, "other": 0, "total": 0 }, "iops": { "read": 0, "write": 0, "other": 0, "total": 0 }, "latency": { "read": 0, "write": 0, "other": 0, "total": 0 } }, "statistics": { "timestamp": "2021-12-23T20:36:02Z", "status": "ok", "throughput_raw": { "read": 1078230528, "write": 3294724096, "other": 0, "total": 4372954624 }, "iops_raw": { "read": 16641, "write": 51257, "other": 59, "total": 67957 }, "latency_raw": { "read": 2011655, "write": 1235068755, "other": 1402, "total": 1237081812 } }, }, { "uuid": "dff549b8-fabe-466b-8608-871a6493b492", "svm": { "uuid": "fake-uuid", "name": "vserver-name", "_links": { "self": { "href": "/api/svm/svms/fake-uuid" } } }, "name": "/vol/nahim_dev_vol01/volume-fake-uuid", "location": { "logical_unit": "volume-fake-uuid", "node": { "name": "node-name", "uuid": "fake-uuid", "_links": { "self": { "href": "/api/cluster/nodes/fake-uuid" } } }, "volume": { "uuid": "fake-uuid", "name": "nahim_dev_vol01", "_links": { "self": { "href": "/api/storage/volumes/fake-uuid" } } } }, "auto_delete": False, "class": "regular", "create_time": "2021-12-14T18:12:38+00:00", "enabled": True, "os_type": "linux", "serial_number": "ZlAFA?QMnBdf", "space": { "scsi_thin_provisioning_support_enabled": False, "size": 5368709120, "used": 0, "guarantee": { "requested": False, "reserved": False } }, "status": { "container_state": "online", "mapped": False, "read_only": False, "state": "online" }, "vvol": { "is_bound": False }, } ], "num_records": 2, } LUN_GET_ITER_RESULT = [ { 'Vserver': LUN_GET_ITER_REST['records'][0]['svm']['name'], 'Volume': LUN_GET_ITER_REST['records'][0]['location']['volume']['name'], 'Size': LUN_GET_ITER_REST['records'][0]['space']['size'], 'Qtree': (LUN_GET_ITER_REST['records'][0]['location'] .get('qtree', {}).get('name', '')), 'Path': LUN_GET_ITER_REST['records'][0]['name'], 'OsType': LUN_GET_ITER_REST['records'][0]['os_type'], 'SpaceReserved': LUN_GET_ITER_REST['records'][0]['space']['guarantee']['requested'], 'SpaceAllocated': LUN_GET_ITER_REST['records'][0]['space'] ['scsi_thin_provisioning_support_enabled'], 'UUID': LUN_GET_ITER_REST['records'][0]['uuid'], }, { 'Vserver': LUN_GET_ITER_REST['records'][1]['svm']['name'], 'Volume': LUN_GET_ITER_REST['records'][1]['location']['volume']['name'], 'Size': LUN_GET_ITER_REST['records'][1]['space']['size'], 'Qtree': (LUN_GET_ITER_REST['records'][1]['location'] .get('qtree', {}).get('name', '')), 'Path': LUN_GET_ITER_REST['records'][1]['name'], 'OsType': LUN_GET_ITER_REST['records'][1]['os_type'], 'SpaceReserved': LUN_GET_ITER_REST['records'][1]['space']['guarantee']['requested'], 'SpaceAllocated': LUN_GET_ITER_REST['records'][1]['space'] ['scsi_thin_provisioning_support_enabled'], 'UUID': LUN_GET_ITER_REST['records'][1]['uuid'], }, ] FILE_DIRECTORY_GET_ITER_REST = { "_links": { "next": { "href": "/api/resourcelink" }, "self": { "href": "/api/resourcelink" } }, "num_records": 2, "records": [ { "_links": { "metadata": { "href": "/api/resourcelink" }, "self": { "href": "/api/resourcelink" } }, "name": "test_file", "path": "d1/d2/d3", "size": 200, "type": "file" }, { "_links": { "metadata": { "href": "/api/resourcelink" }, "self": { "href": "/api/resourcelink" } }, "name": "test_file_2", "path": "d1/d2/d3", "size": 250, "type": "file" } ] } FILE_DIRECTORY_GET_ITER_RESULT_REST = [ { 'name': FILE_DIRECTORY_GET_ITER_REST['records'][0]['name'], 'file-size': float(FILE_DIRECTORY_GET_ITER_REST['records'][0]['size']) }, { 'name': FILE_DIRECTORY_GET_ITER_REST['records'][1]['name'], 'file-size': float(FILE_DIRECTORY_GET_ITER_REST['records'][1]['size']) } ] LUN_GET_MOVEMENT_REST = { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "/vol/volume1/qtree1/lun1", "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", "movement": { "progress": { "elapsed": 0, "failure": { "arguments": [ { "code": "string", "message": "string" } ], "code": "4", "message": "entry doesn't exist", "target": "uuid" }, "percent_complete": 0, "state": "preparing", "volume_snapshot_blocked": True } } } LUN_GET_COPY_REST = { "_links": { "self": { "href": "/api/resourcelink" } }, "name": "/vol/volume1/qtree1/lun1", "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412", "copy": { "source": { "_links": { "self": { "href": "/api/resourcelink" } }, "progress": { "elapsed": 0, "failure": { "arguments": [ { "code": "string", "message": "string" } ], "code": "4", "message": "entry doesn't exist", "target": "uuid" }, "percent_complete": 0, "state": "preparing", "volume_snapshot_blocked": True }, } }, } VOLUME_GET_ITER_STATE_RESPONSE_REST = { "records": [ { "uuid": "c19aef05-ac60-4211-9fe4-3ef8c8816c83", "name": "fake_volume", "state": VOLUME_STATE_ONLINE, "style": "flexvol", "nas": { "path": "/fake/vol" }, } ], "num_records": 1, } GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE_REST = { 'records': [ { 'uuid': 'fake_uuid_1', 'name': 'vserver_name', 'ip': {'address': '1.2.3.4'}, 'state': 'up' }, { 'uuid': 'fake_uuid_2', 'name': 'vserver_name', 'ip': {'address': '99.98.97.96'}, 'state': 'up' } ], 'num_records': 2 } ERROR_RESPONSE_REST = { "error": { "code": 1100, "message": "fake error", } } FAKE_ACTION_ENDPOINT = '/fake_endpoint' FAKE_BASE_ENDPOINT = '/fake_api' FAKE_HEADERS = {'header': 'fake_header'} FAKE_BODY = {'body': 'fake_body'} FAKE_HTTP_QUERY = {'type': 'fake_type'} FAKE_FORMATTED_HTTP_QUERY = '?type=fake_type' JOB_RESPONSE_REST = { "job": { "uuid": FAKE_UUID, "_links": { "self": { "href": f"/api/cluster/jobs/{FAKE_UUID}" } } } } VSERVER_DATA_LIST_RESPONSE_REST = { 'records': [ { 'name': VSERVER_NAME }, { 'name': VSERVER_NAME_2 } ], 'num_records': 2, } PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST = { 'name': 'wafl', 'counter_schemas': [ { 'name': 'cp_phase_times', 'description': 'Array of percentage time spent in different phases' + ' of Consistency Point (CP).', 'type': 'percent', 'unit': 'percent', 'denominator': { 'name': 'total_cp_msecs' } } ], } PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST = [ 'cp_setup', 'cp_pre_p0', 'cp_p0_snap_del', 'cp_p1_clean', 'cp_p1_quota', 'cp_ipu_disk_add', 'cp_p2v_inofile', 'cp_p2v_ino_pub', 'cp_p2v_ino_pri', 'cp_p2v_fsinfo', 'cp_p2v_dlog1', 'cp_p2v_dlog2', 'cp_p2v_refcount', 'cp_p2v_topaa', 'cp_p2v_df_scores_sub', 'cp_p2v_bm', 'cp_p2v_snap', 'cp_p2v_df_scores', 'cp_p2v_volinfo', 'cp_p2v_cont', 'cp_p2a_inofile', 'cp_p2a_ino', 'cp_p2a_dlog1', 'cp_p2a_hya', 'cp_p2a_dlog2', 'cp_p2a_fsinfo', 'cp_p2a_ipu_bitmap_grow', 'cp_p2a_refcount', 'cp_p2a_topaa', 'cp_p2a_hyabc', 'cp_p2a_bm', 'cp_p2a_snap', 'cp_p2a_volinfo', 'cp_p2_flush', 'cp_p2_finish', 'cp_p3_wait', 'cp_p3v_volinfo', 'cp_p3a_volinfo', 'cp_p3_finish', 'cp_p4_finish', 'cp_p5_finish', ] PERF_COUNTER_TOTAL_CP_MSECS_LABELS_RESULT = [ label[3:] for label in PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST ] PERF_COUNTER_TOTAL_CP_MSECS_VALUES_REST = [ 0, 3112, 3, 0, 0, 3, 757, 0, 99, 0, 26, 0, 22, 1, 0, 194, 4, 224, 359, 222, 0, 0, 0, 0, 0, 0, 82, 0, 0, 0, 0, 0, 0, 62, 0, 133, 16, 35, 334219, 43, 2218, 20, 0, ] PERF_COUNTER_TABLE_ROWS_WAFL = { 'records': [ { 'id': NODE_NAME + ':wafl', 'counters': [ { 'name': 'cp_phase_times', 'values': PERF_COUNTER_TOTAL_CP_MSECS_VALUES_REST, 'labels': PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST } ], } ], 'num_records': 1, } PERF_COUNTER_DOMAIN_BUSY_LABELS = [ 'exempt', 'ha', 'host_os', 'idle', 'kahuna', 'kahuna_legacy', 'none', 'nwk_exempt', 'network', 'protocol', 'raid', 'raid_exempt', 'sm_exempt', 'ssan_exempt', 'storage', 'target', 'unclassified', 'wafl_exempt', 'wafl_mpcleaner', 'xor_exempt', 'ssan_exempt2', 'exempt_ise', 'zombie', ] PERF_COUNTER_DOMAIN_BUSY_VALUES_1 = [ 83071627197, 1334877, 19459898, 588539096, 11516887, 14878622, 18, 647698, 20, 229232646, 4310322, 441035, 12946782, 57837913, 38765442, 1111004351701, 1497335, 949657, 109890, 768027, 21, 14, 13 ] PERF_COUNTER_DOMAIN_BUSY_VALUES_2 = [ 1191129018056, 135991, 22842513, 591213798, 9449562, 15345460, 0, 751656, 0, 162605694, 3927323, 511160, 7644403, 29696759, 21787992, 3585552592, 1058902, 957296, 87811, 499766, 0, 0, 0 ] PERF_COUNTER_ELAPSED_TIME_1 = 1199265469753 PERF_COUNTER_ELAPSED_TIME_2 = 1199265469755 PERF_GET_INSTANCES_PROCESSOR_RESPONSE_REST = { 'records': [ { 'counter_table': { 'name': 'processor' }, 'id': NODE_NAME + ':processor0', 'counters': [ { 'name': 'domain_busy_percent', 'values': PERF_COUNTER_DOMAIN_BUSY_VALUES_1, 'labels': PERF_COUNTER_DOMAIN_BUSY_LABELS }, { 'name': 'elapsed_time', 'value': PERF_COUNTER_ELAPSED_TIME_1, } ], }, { 'counter_table': { 'name': 'processor' }, 'id': NODE_NAME + ':processor1', 'counters': [ { 'name': 'domain_busy_percent', 'values': PERF_COUNTER_DOMAIN_BUSY_VALUES_2, 'labels': PERF_COUNTER_DOMAIN_BUSY_LABELS }, { 'name': 'elapsed_time', 'value': PERF_COUNTER_ELAPSED_TIME_2, } ], } ], 'num_records': 2, } PERF_COUNTERS_PROCESSOR_EXPECTED = [ { 'instance-name': 'processor', 'instance-uuid': NODE_NAME + ':processor0', 'node-name': NODE_NAME, 'timestamp': mock.ANY, 'domain_busy': ','.join([str(v) for v in PERF_COUNTER_DOMAIN_BUSY_VALUES_1]) }, { 'instance-name': 'processor', 'instance-uuid': NODE_NAME + ':processor0', 'node-name': NODE_NAME, 'timestamp': mock.ANY, 'processor_elapsed_time': PERF_COUNTER_ELAPSED_TIME_1 }, { 'instance-name': 'processor', 'instance-uuid': NODE_NAME + ':processor1', 'node-name': NODE_NAME, 'timestamp': mock.ANY, 'domain_busy': ','.join([str(v) for v in PERF_COUNTER_DOMAIN_BUSY_VALUES_2]) }, { 'instance-name': 'processor', 'instance-uuid': NODE_NAME + ':processor1', 'node-name': NODE_NAME, 'timestamp': mock.ANY, 'processor_elapsed_time': PERF_COUNTER_ELAPSED_TIME_2 }, ] SINGLE_IGROUP_REST = { "svm": { "uuid": FAKE_UUID, "name": VOLUME_VSERVER_NAME, }, "uuid": FAKE_UUID, "name": "openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53", "protocol": "iscsi", "os_type": "linux", "initiators": [ { "name": "iqn.1993-08.org.fake:01:5b67769f5c5e", } ], } IGROUP_GET_ITER_REST = { "records": [ SINGLE_IGROUP_REST ], "num_records": 1, } IGROUP_GET_ITER_MULT_REST = { "records": [ SINGLE_IGROUP_REST, SINGLE_IGROUP_REST ], "num_records": 2, } IGROUP_GET_ITER_INITS_REST = { "records": [ { "svm": { "uuid": FAKE_UUID, "name": VOLUME_VSERVER_NAME, }, "uuid": FAKE_UUID, "name": "openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53", "protocol": "iscsi", "os_type": "linux", "initiators": [ { "name": "iqn.1993-08.org.fake:01:5b67769f5c5e", }, { "name": "iqn.1993-08.org.fake:02:5b67769f5c5e", } ], } ], "num_records": 1, } GET_LUN_MAP_REST = { "records": [ { "svm": { "uuid": FAKE_UUID, "name": VSERVER_NAME, }, "lun": { "uuid": "6c2969dc-b022-434c-b7cd-9240bs975187", "name": LUN_NAME_PATH, }, "igroup": { "uuid": "08088517-a6f5-11ec-82cc-00a0b89c9a78", "name": IGROUP_NAME, }, "logical_unit_number": 0, } ], "num_records": 1, } FC_INTERFACE_REST = { "records": [ { "data_protocol": "fcp", "location": { "port": { "name": "0a", "uuid": FAKE_UUID, "node": { "name": "node1" } }, "node": { "name": "node1", "uuid": FAKE_UUID, } }, "wwpn": "20:00:00:50:56:b4:13:a8", "name": "lif1", "uuid": FAKE_UUID, "state": "up", "port_address": "5060F", "wwnn": "20:00:00:50:56:b4:13:01", "comment": "string", "svm": { "name": VOLUME_VSERVER_NAME, "uuid": FAKE_UUID, }, "enabled": True } ], "num_records": 1 } GET_LUN_MAPS = { "records": [ { "svm": { "uuid": "77deec3a-38ea-11ec-aca8-00a0b89c9a78", "name": VOLUME_NAME, }, "uuid": "99809170-a92c-11ec-82cc-0aa0b89c9a78", "name": "openstack-626d20dc-c420-4a5a-929c-59178d64f2c5", "initiators": [ { "name": "iqn.2005-03.org.open-iscsi:49ebe8a87d1", } ], "lun_maps": [ { "logical_unit_number": 0, "lun": { "name": LUN_NAME_PATH, "uuid": "91e83a0a-72c3-4278-9a24-f2f8135aa5db", "node": { "name": CLUSTER_NAME, "uuid": "9eff6c76-fc13-11ea-8799-525a0006bba9", }, }, } ], } ], "num_records": 1, } SNAPMIRROR_GET_ITER_RESPONSE_REST = { "records": [ { "uuid": FAKE_UUID, "source": { "path": SM_SOURCE_VSERVER + ':' + SM_SOURCE_VOLUME, "svm": { "name": SM_SOURCE_VSERVER } }, "destination": { "path": SM_DEST_VSERVER + ':' + SM_DEST_VOLUME, "svm": { "name": SM_DEST_VSERVER } }, "policy": { "type": "async" }, "state": "snapmirrored", "transfer": {"state": "success"}, "healthy": True } ], "num_records": 1, } GET_LUN_MAPS_NO_MAPS = { "records": [ { "svm": { "uuid": "77deec3a-38ea-11ec-aca8-00a0b89c9a78", "name": VOLUME_NAME, }, "uuid": "99809170-a92c-11ec-82cc-0aa0b89c9a78", "name": "openstack-626d20dc-c420-4a5a-929c-59178d64f2c5", "initiators": [ { "name": "iqn.2005-03.org.open-iscsi:49ebe8a87d1", } ], } ], "num_records": 1, } GET_ISCSI_SERVICE_DETAILS_REST = { "records": [ { "svm": { "uuid": FAKE_UUID, "name": VOLUME_VSERVER_NAME, }, "target": { "name": INITIATOR_IQN }, } ], "num_records": 1, } CHECK_ISCSI_INITIATOR_REST = { "records": [ { "svm": { "uuid": FAKE_UUID, "name": VOLUME_VSERVER_NAME, }, "initiator": INITIATOR_IQN, } ], "num_records": 1, } GET_ISCSI_TARGET_DETAILS_REST = { "records": [ { "uuid": FAKE_UUID, "name": VOLUME_VSERVER_NAME, "ip": { "address": "192.168.1.254" }, "enabled": True, "services": [ "data_core", "data_iscsi" ], } ], "num_records": 1, } VOLUME_GET_ITER_CAPACITY_RESPONSE_REST = { "records": [ { "uuid": FAKE_UUID, "name": VOLUME_NAME, "space": { "available": VOLUME_SIZE_AVAILABLE, "afs_total": VOLUME_SIZE_TOTAL }, } ], "num_records": 1, } REST_GET_SNAPMIRRORS_RESPONSE = [{ 'destination-volume': SM_DEST_VOLUME, 'destination-vserver': SM_DEST_VSERVER, 'is-healthy': True, 'lag-time': None, 'last-transfer-end-timestamp': None, 'mirror-state': 'snapmirrored', 'relationship-status': 'idle', 'source-volume': SM_SOURCE_VOLUME, 'source-vserver': SM_SOURCE_VSERVER, 'uuid': FAKE_UUID, 'transferring-state': 'success', }] TRANSFERS_GET_ITER_REST = { "records": [ { "uuid": FAKE_UUID, "state": "transferring" }, { "uuid": FAKE_UUID, "state": "failed" } ], "num_records": 2, } JOB_SUCCESSFUL_REST = { "uuid": FAKE_UUID, "description": "Fake description", "state": "success", "message": "success", "code": 0, "start_time": "2022-02-18T20:08:03+00:00", "end_time": "2022-02-18T20:08:04+00:00", } JOB_ERROR_REST = { "uuid": FAKE_UUID, "description": "Fake description", "state": "failure", "message": "failure", "code": -1, "start_time": "2022-02-18T20:08:03+00:00", "end_time": "2022-02-18T20:08:04+00:00", } GET_CLUSTER_NAME_RESPONSE_REST = { "name": CLUSTER_NAME, "uuid": "fake-cluster-uuid" } # ASA r2 specific cluster info response GET_CLUSTER_INFO_RESPONSE_REST = { "name": "jayaanancluster-1", "_links": { "self": { "href": "/api/cluster" } } } # ASA r2 specific cluster capacity response GET_CLUSTER_CAPACITY_RESPONSE_REST = { "efficiency_without_snapshots": { "ratio": 1, "logical_used": 692224 }, "block_storage": { "size": 234712203264, "available": 117230436352, "physical_used": 117481766912, "physical_used_percent": 50, "total_metadata_used": 117481066496, "log_and_recovery_metadata": 117356101632, "delayed_frees": 87412736, "full_threshold_percent": 98, "nearly_full_threshold_percent": 95 }, "metric": { "timestamp": "2025-08-05T08:56:45Z", "status": "inconsistent_old_data", "duration": "PT15S", "available_size": 117230436352, "used_size": 117481766912, "total_size": 234712203264 }, "_links": { "self": { "href": "/api/storage/cluster?fields=**" } } } GET_VSERVER_PEERS_RECORDS_REST = [ { "_links": { "self": { "href": "/api/resourcelink" } }, "applications": [ "snapmirror", "lun_copy" ], "name": CLUSTER_NAME, "peer": { "cluster": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": REMOTE_CLUSTER_NAME, "uuid": "fake-cluster-uuid-2" }, "svm": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": VSERVER_NAME_2, "uuid": "fake-svm-uuid-2" } }, "state": "peered", "svm": { "_links": { "self": { "href": "/api/resourcelink" } }, "name": VSERVER_NAME, "uuid": "fake-svm-uuid" }, "uuid": "fake-cluster-uuid" } ] GET_VSERVER_PEERS_RESPONSE_REST = { "_links": { "next": { "href": "/api/resourcelink" }, "self": { "href": "/api/resourcelink" } }, "num_records": 1, "records": GET_VSERVER_PEERS_RECORDS_REST } GET_NAMESPACE_RESPONSE_REST = { "records": [ { "uuid": "fake_uuid1", "svm": { "name": "fake_vserver1" }, "name": "/vol/fake_vol_001/test", "location": { "volume": { "name": "fake_vol_001" } }, "os_type": "linux", "space": { "block_size": 9999, "size": 999999, "guarantee": { "requested": True } }, }, { "uuid": "fake_uuid2", "svm": { "name": "fake_vserver2" }, "name": "/vol/fake_vol_002/test", "location": { "volume": { "name": "fake_vol_002" } }, "os_type": "linux", "space": { "block_size": 8888, "size": 8888888, "guarantee": { "requested": True } }, } ], "num_records": 2, } SUBSYSTEM = 'openstack-fake_subsystem' SUBSYSTEM_UUID = 'fake_subsystem_uuid1' TARGET_NQN = 'nqn.1992-01.example.com:target' HOST_NQN = 'nqn.1992-01.example.com:host' GET_SUBSYSTEM_RESPONSE_REST = { "records": [ { "uuid": "fake_uuid1", "name": SUBSYSTEM, "os_type": "linux", "target_nqn": TARGET_NQN, } ], "num_records": 1, } GET_SUBSYSTEM_MAP_RESPONSE_REST = { "records": [ { "namespace": { "uuid": FAKE_UUID, }, "subsystem": { "name": SUBSYSTEM, "uuid": FAKE_UUID, }, "svm": { "name": VSERVER_NAME }, }, ], "num_records": 1, } GET_INTERFACES_NVME_REST = { 'records': [ { "ip": { "address": "10.10.10.10", } } ], 'num_records': 1 } GET_AGGREGATE_STORAGE_TYPES_RESPONSE_REST = { "records": [ { "uuid": "3e5e2865-af43-4d82-a808-8a7222cf0369", "name": "dataFA_2_p0_i1", "block_storage": { "storage_type": "ssd", "primary": { "disk_class": "solid_state", "raid_size": 29, "disk_type": "ssd" } } } ], "num_records": 1 } GET_AGGREGATE_STORAGE_TYPES_MULTIPLE_RESPONSE_REST = { "records": [ { "uuid": "3e5e2865-af43-4d82-a808-8a7222cf0369", "name": "dataFA_2_p0_i1", "block_storage": { "storage_type": "ssd", "primary": { "disk_class": "solid_state", "disk_type": "ssd" } } }, { "uuid": "4f6f3976-bg54-5e93-b919-9b8333dg1480", "name": "dataFA_2_p0_i2", "block_storage": { "storage_type": "ssd", "primary": { "disk_class": "solid_state", "disk_type": "ssd" } } } ], "num_records": 2 } GET_AGGREGATE_STORAGE_TYPES_EMPTY_RESPONSE_REST = { "records": [], "num_records": 0 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py0000664000175000017500000012150200000000000030344 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for NetApp API layer""" from unittest import mock import urllib import ddt from lxml import etree from oslo_serialization import jsonutils from oslo_utils import netutils import paramiko import requests from requests import auth from cinder import exception from cinder.i18n import _ from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as zapi_fakes) from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api @ddt.ddt class NetAppApiServerTests(test.TestCase): """Test case for NetApp API server methods""" def setUp(self): self.root = netapp_api.NaServer('127.0.0.1') super(NetAppApiServerTests, self).setUp() @ddt.data( {'host': '127.0.0.1', 'ssl_cert_path': None, 'port': 8080, 'api_trace_pattern': None }, {'host': '127.0.0.1', 'ssl_cert_path': '/test/fake_cert.pem', 'port': 8080, 'api_trace_pattern': 'pattern' }, ) @ddt.unpack def test__init__ssl_verify(self, host, ssl_cert_path, port, api_trace_pattern): with mock.patch( 'cinder.volume.drivers.netapp.utils.setup_api_trace_pattern' ) as mock_trace: server = netapp_api.NaServer( host=host, ssl_cert_path=ssl_cert_path, port=port, api_trace_pattern=api_trace_pattern ) self.assertEqual(server._host, host) self.assertEqual(server._port, str(port) if port else None) self.assertEqual(server._refresh_conn, True) if api_trace_pattern: mock_trace.assert_called_once_with(api_trace_pattern) else: mock_trace.assert_not_called() @ddt.data(None, 'ftp') def test_set_transport_type_value_error(self, transport_type): """Tests setting an invalid transport type""" self.assertRaises(ValueError, self.root.set_transport_type, transport_type) @ddt.data({'params': {'transport_type': 'http', 'server_type_filer': 'filer'}}, {'params': {'transport_type': 'http', 'server_type_filer': 'xyz'}}, {'params': {'transport_type': 'https', 'server_type_filer': 'filer'}}, {'params': {'transport_type': 'https', 'server_type_filer': 'xyz'}}) @ddt.unpack def test_set_transport_type_valid(self, params): """Tests setting a valid transport type""" self.root._server_type = params['server_type_filer'] mock_invoke = self.mock_object(self.root, 'set_port') self.root.set_transport_type(params['transport_type']) expected_call_args = zapi_fakes.FAKE_CALL_ARGS_LIST self.assertIn(mock_invoke.call_args, expected_call_args) @ddt.data('stor', 'STORE', '') def test_set_server_type_value_error(self, server_type): """Tests Value Error on setting the wrong server type""" self.assertRaises(ValueError, self.root.set_server_type, server_type) @ddt.data('!&', '80na', '') def test_set_port__value_error(self, port): """Tests Value Error on trying to set port with a non-integer""" self.assertRaises(ValueError, self.root.set_port, port) @ddt.data('!&', '80na', '') def test_set_timeout_value_error(self, timeout): """Tests Value Error on trying to set port with a non-integer""" self.assertRaises(ValueError, self.root.set_timeout, timeout) @ddt.data({'params': {'major': 1, 'minor': '20a'}}, {'params': {'major': '20a', 'minor': 1}}, {'params': {'major': '!*', 'minor': '20a'}}) @ddt.unpack def test_set_api_version_value_error(self, params): """Tests Value Error on setting values incompatible with integer""" self.assertRaises(ValueError, self.root.set_api_version, **params) def test_set_api_version_valid(self): """Tests no Error on setting values compatible with integer""" args = {'major': '20', 'minor': 1} self.root.set_api_version(**args) @ddt.data({'params': {'result': zapi_fakes.FAKE_RESULT_API_ERR_REASON}}, {'params': {'result': zapi_fakes.FAKE_RESULT_API_ERRNO_INVALID}}, {'params': {'result': zapi_fakes.FAKE_RESULT_API_ERRNO_VALID}}) @ddt.unpack def test_invoke_successfully_naapi_error(self, params): """Tests invoke successfully raising NaApiError""" self.mock_object(self.root, 'send_http_request', return_value=params['result']) self.assertRaises(netapp_api.NaApiError, self.root.invoke_successfully, zapi_fakes.FAKE_NA_ELEMENT) def test_invoke_successfully_no_error(self): """Tests invoke successfully with no errors""" self.mock_object(self.root, 'send_http_request', return_value=zapi_fakes.FAKE_RESULT_SUCCESS) self.assertEqual(zapi_fakes.FAKE_RESULT_SUCCESS.to_string(), self.root.invoke_successfully( zapi_fakes.FAKE_NA_ELEMENT).to_string()) def test__create_request(self): """Tests method _create_request""" self.root._ns = zapi_fakes.FAKE_XML_STR self.root._api_version = '1.20' self.mock_object(self.root, '_enable_tunnel_request') self.mock_object(netapp_api.NaElement, 'add_child_elem') self.mock_object(netapp_api.NaElement, 'to_string', return_value=zapi_fakes.FAKE_XML_STR) mock_invoke = self.mock_object(urllib.request, 'Request') self.root._create_request(zapi_fakes.FAKE_NA_ELEMENT, True) self.assertTrue(mock_invoke.called) @ddt.data({'params': {'server': zapi_fakes.FAKE_NA_SERVER_API_1_5}}, {'params': {'server': zapi_fakes.FAKE_NA_SERVER_API_1_14}}) @ddt.unpack def test__enable_tunnel_request__value_error(self, params): """Tests value errors with creating tunnel request""" self.assertRaises(ValueError, params['server']._enable_tunnel_request, 'test') def test__enable_tunnel_request_valid(self): """Tests creating tunnel request with correct values""" netapp_elem = zapi_fakes.FAKE_NA_ELEMENT server = zapi_fakes.FAKE_NA_SERVER_API_1_20 mock_invoke = self.mock_object(netapp_elem, 'add_attr') expected_call_args = [mock.call('vfiler', 'filer'), mock.call('vfiler', 'server')] server._enable_tunnel_request(netapp_elem) self.assertEqual(expected_call_args, mock_invoke.call_args_list) def test__parse_response__naapi_error(self): """Tests NaApiError on no response""" self.assertRaises(netapp_api.NaApiError, self.root._parse_response, None) def test__parse_response_no_error(self): """Tests parse function with appropriate response""" mock_invoke = self.mock_object(etree, 'XML', return_value='xml') self.root._parse_response(zapi_fakes.FAKE_XML_STR) mock_invoke.assert_called_with(zapi_fakes.FAKE_XML_STR) def test_build_opener_with_certificate_auth(self): """Tests whether build opener works with """ """valid certificate parameters""" self.root._private_key_file = 'fake_key.pem' self.root._certificate_file = 'fake_cert.pem' auth_handler = self.mock_object(self.root, '_create_certificate_auth_handler', mock.Mock(return_value='fake_auth')) expected_opener = 'fake_auth' self.mock_object(urllib.request, 'build_opener', auth_handler) self.root._build_opener() self.assertEqual(self.root._opener, expected_opener) self.root._create_certificate_auth_handler.assert_called() def test__build_opener_default(self): """Tests whether build opener works with """ """default(basic auth) parameters""" mock_invoke = self.mock_object(urllib.request, 'build_opener') self.root._build_opener() self.assertTrue(mock_invoke.called) @mock.patch('ssl._create_unverified_context') @mock.patch('urllib.request.build_opener') def test_build_opener_with_ssl_verification_disabled( self, mock_build_opener, mock_unverified_context): self.root._ssl_verify = False mock_unverified_context.return_value = 'mock_unverified_context' self.root._build_opener() mock_unverified_context.assert_called_once() mock_build_opener.assert_called_once() @mock.patch('urllib.request.HTTPPasswordMgrWithDefaultRealm') @mock.patch('urllib.request.build_opener') def test_build_opener_with_basic_auth(self, mock_build_opener, mock_password_mgr): self.root._username = 'user' self.root._password = 'pass' mock_password_mgr.return_value = mock.Mock() self.root._build_opener() mock_password_mgr.assert_called_once() mock_build_opener.assert_called_once() @ddt.data(None, zapi_fakes.FAKE_XML_STR) def test_send_http_request_value_error(self, na_element): """Tests whether invalid NaElement parameter causes error""" self.assertRaises(ValueError, self.root.send_http_request, na_element) def test_send_http_request_http_error(self): """Tests handling of HTTPError""" na_element = zapi_fakes.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT)) self.mock_object(netapp_api, 'LOG') self.root._opener = zapi_fakes.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root._opener, 'open', side_effect=urllib.error.HTTPError(url='', hdrs='', fp=None, code='401', msg='httperror')) self.assertRaises(netapp_api.NaApiError, self.root.send_http_request, na_element) def test_send_http_request_unknown_exception(self): """Tests handling of Unknown Exception""" na_element = zapi_fakes.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT)) mock_log = self.mock_object(netapp_api, 'LOG') self.root._opener = zapi_fakes.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root._opener, 'open', side_effect=Exception) self.assertRaises(netapp_api.NaApiError, self.root.send_http_request, na_element) self.assertEqual(1, mock_log.exception.call_count) def test_send_http_request_valid(self): """Tests the method send_http_request with valid parameters""" na_element = zapi_fakes.FAKE_NA_ELEMENT self.mock_object(self.root, '_create_request', return_value=('abc', zapi_fakes.FAKE_NA_ELEMENT)) self.mock_object(netapp_api, 'LOG') self.root._opener = zapi_fakes.FAKE_HTTP_OPENER self.mock_object(self.root, '_build_opener') self.mock_object(self.root, '_get_result', return_value=zapi_fakes.FAKE_NA_ELEMENT) opener_mock = self.mock_object(self.root._opener, 'open') opener_mock.read.side_effect = ['resp1', 'resp2'] self.root.send_http_request(na_element) @ddt.data('192.168.1.0', '127.0.0.1', '0.0.0.0', '::ffff:8', 'fdf8:f53b:82e4::53', '2001::1', 'fe80::200::abcd', '2001:0000:4136:e378:8000:63bf:3fff:fdd2') def test__get_url(self, host): port = '80' root = netapp_api.NaServer(host, port=port) protocol = root.TRANSPORT_TYPE_HTTP url = root.URL_FILER if netutils.is_valid_ipv6(host): host = netutils.escape_ipv6(host) result = '%s://%s:%s/%s' % (protocol, host, port, url) url = root._get_url() self.assertEqual(result, url) class NetAppApiElementTransTests(test.TestCase): """Test case for NetApp API element translations.""" def test_translate_struct_dict_unique_key(self): """Tests if dict gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'} root.translate_struct(child) self.assertEqual(3, len(root.get_children())) self.assertEqual('v1', root.get_child_content('e1')) self.assertEqual('v2', root.get_child_content('e2')) self.assertEqual('v3', root.get_child_content('e3')) def test_translate_struct_dict_nonunique_key(self): """Tests if list/dict gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}] root.translate_struct(child) self.assertEqual(3, len(root.get_children())) children = root.get_children() for c in children: if c.get_name() == 'e1': self.assertIn(c.get_content(), ['v1', 'v3']) else: self.assertEqual('v2', c.get_content()) def test_translate_struct_list(self): """Tests if list gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = ['e1', 'e2'] root.translate_struct(child) self.assertEqual(2, len(root.get_children())) self.assertIsNone(root.get_child_content('e1')) self.assertIsNone(root.get_child_content('e2')) def test_translate_struct_tuple(self): """Tests if tuple gets properly converted to NaElements.""" root = netapp_api.NaElement('root') child = ('e1', 'e2') root.translate_struct(child) self.assertEqual(2, len(root.get_children())) self.assertIsNone(root.get_child_content('e1')) self.assertIsNone(root.get_child_content('e2')) def test_translate_invalid_struct(self): """Tests if invalid data structure raises exception.""" root = netapp_api.NaElement('root') child = 'random child element' self.assertRaises(ValueError, root.translate_struct, child) def test_setter_builtin_types(self): """Tests str, int, float get converted to NaElement.""" root = netapp_api.NaElement('root') root['e1'] = 'v1' root['e2'] = 1 root['e3'] = 2.0 root['e4'] = 8 self.assertEqual(4, len(root.get_children())) self.assertEqual('v1', root.get_child_content('e1')) self.assertEqual('1', root.get_child_content('e2')) self.assertEqual('2.0', root.get_child_content('e3')) self.assertEqual('8', root.get_child_content('e4')) def test_setter_na_element(self): """Tests na_element gets appended as child.""" root = netapp_api.NaElement('root') root['e1'] = netapp_api.NaElement('nested') self.assertEqual(1, len(root.get_children())) e1 = root.get_child_by_name('e1') self.assertIsInstance(e1, netapp_api.NaElement) self.assertIsInstance(e1.get_child_by_name('nested'), netapp_api.NaElement) def test_setter_child_dict(self): """Tests dict is appended as child to root.""" root = netapp_api.NaElement('root') root['d'] = {'e1': 'v1', 'e2': 'v2'} e1 = root.get_child_by_name('d') self.assertIsInstance(e1, netapp_api.NaElement) sub_ch = e1.get_children() self.assertEqual(2, len(sub_ch)) for c in sub_ch: self.assertIn(c.get_name(), ['e1', 'e2']) if c.get_name() == 'e1': self.assertEqual('v1', c.get_content()) else: self.assertEqual('v2', c.get_content()) def test_setter_child_list_tuple(self): """Tests list/tuple are appended as child to root.""" root = netapp_api.NaElement('root') root['l'] = ['l1', 'l2'] root['t'] = ('t1', 't2') l_element = root.get_child_by_name('l') self.assertIsInstance(l_element, netapp_api.NaElement) t = root.get_child_by_name('t') self.assertIsInstance(t, netapp_api.NaElement) for le in l_element.get_children(): self.assertIn(le.get_name(), ['l1', 'l2']) for te in t.get_children(): self.assertIn(te.get_name(), ['t1', 't2']) def test_setter_no_value(self): """Tests key with None value.""" root = netapp_api.NaElement('root') root['k'] = None self.assertIsNone(root.get_child_content('k')) def test_setter_invalid_value(self): """Tests invalid value raises exception.""" root = netapp_api.NaElement('root') try: root['k'] = netapp_api.NaServer('localhost') except Exception as e: if not isinstance(e, TypeError): self.fail(_('Error not a TypeError.')) def test_setter_invalid_key(self): """Tests invalid value raises exception.""" root = netapp_api.NaElement('root') try: root[None] = 'value' except Exception as e: if not isinstance(e, KeyError): self.fail(_('Error not a KeyError.')) def test_getter_key_error(self): """Tests invalid key raises exception""" root = netapp_api.NaElement('root') self.mock_object(root, 'get_child_by_name', return_value=None) self.mock_object(root, 'has_attr', return_value=None) self.assertRaises(KeyError, netapp_api.NaElement.__getitem__, root, '123') def test_getter_na_element_list(self): """Tests returning NaElement list""" root = netapp_api.NaElement('root') root['key'] = ['val1', 'val2'] self.assertEqual(root.get_child_by_name('key').get_name(), root.__getitem__('key').get_name()) def test_getter_child_text(self): """Tests NaElement having no children""" root = netapp_api.NaElement('root') root.set_content('FAKE_CONTENT') self.mock_object(root, 'get_child_by_name', return_value=root) self.assertEqual('FAKE_CONTENT', root.__getitem__('root')) def test_getter_child_attr(self): """Tests invalid key raises exception""" root = netapp_api.NaElement('root') root.add_attr('val', 'FAKE_VALUE') self.assertEqual('FAKE_VALUE', root.__getitem__('val')) def test_add_node_with_children(self): """Tests adding a child node with its own children""" root = netapp_api.NaElement('root') self.mock_object(netapp_api.NaElement, 'create_node_with_children', return_value=zapi_fakes.FAKE_INVOKE_DATA) mock_invoke = self.mock_object(root, 'add_child_elem') root.add_node_with_children('options') mock_invoke.assert_called_with(zapi_fakes.FAKE_INVOKE_DATA) def test_create_node_with_children(self): """Tests adding a child node with its own children""" root = netapp_api.NaElement('root') self.mock_object(root, 'add_new_child', return_value='abc') result_xml = str(root.create_node_with_children( 'options', test1=zapi_fakes.FAKE_XML_STR, test2=zapi_fakes.FAKE_XML_STR)) # No ordering is guaranteed for elements in this XML. self.assertTrue(result_xml.startswith(""), result_xml) self.assertIn("abc", result_xml) self.assertIn("abc", result_xml) self.assertTrue(result_xml.rstrip().endswith(""), result_xml) def test_add_new_child(self): """Tests adding a child node with its own children""" root = netapp_api.NaElement('root') self.mock_object(netapp_api.NaElement, '_convert_entity_refs', return_value=zapi_fakes.FAKE_INVOKE_DATA) root.add_new_child('options', zapi_fakes.FAKE_INVOKE_DATA) self.assertEqual(zapi_fakes.FAKE_XML2, root.to_string()) def test_get_attr_names_empty_attr(self): """Tests _elements.attrib being empty""" root = netapp_api.NaElement('root') self.assertEqual([], root.get_attr_names()) def test_get_attr_names(self): """Tests _elements.attrib being non-empty""" root = netapp_api.NaElement('root') root.add_attr('attr1', 'a1') root.add_attr('attr2', 'a2') self.assertEqual(['attr1', 'attr2'], root.get_attr_names()) @ddt.ddt class SSHUtilTests(test.TestCase): """Test Cases for SSH API invocation.""" def setUp(self): super(SSHUtilTests, self).setUp() self.mock_object(netapp_api.SSHUtil, '_init_ssh_pool') self.sshutil = netapp_api.SSHUtil('127.0.0.1', 'fake_user', 'fake_password') def test_execute_command(self): ssh = mock.Mock(paramiko.SSHClient) stdin, stdout, stderr = self._mock_ssh_channel_files( paramiko.ChannelFile) self.mock_object(ssh, 'exec_command', return_value=(stdin, stdout, stderr)) wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') stdout_read = self.mock_object(stdout, 'read', return_value='') self.sshutil.execute_command(ssh, 'ls') wait_on_stdout.assert_called_once_with(stdout, netapp_api.SSHUtil.RECV_TIMEOUT) stdout_read.assert_called_once_with() def test_execute_read_exception(self): ssh = mock.Mock(paramiko.SSHClient) exec_command = self.mock_object(ssh, 'exec_command') exec_command.side_effect = paramiko.SSHException('Failure') wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') self.assertRaises(paramiko.SSHException, self.sshutil.execute_command, ssh, 'ls') wait_on_stdout.assert_not_called() @ddt.data(b'Password:', b'Password: ', b'Password: \n\n', b'Fake response \r\n Password: \n\n') def test_execute_command_with_prompt(self, response): ssh = mock.Mock(paramiko.SSHClient) stdin, stdout, stderr = self._mock_ssh_channel_files(paramiko.Channel) stdout_read = self.mock_object(stdout.channel, 'recv', return_value=response) stdin_write = self.mock_object(stdin, 'write') self.mock_object(ssh, 'exec_command', return_value=(stdin, stdout, stderr)) wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') self.sshutil.execute_command_with_prompt(ssh, 'sudo ls', 'Password:', 'easypass') wait_on_stdout.assert_called_once_with(stdout, netapp_api.SSHUtil.RECV_TIMEOUT) stdout_read.assert_called_once_with(999) stdin_write.assert_called_once_with('easypass' + '\n') def test_execute_command_unexpected_response(self): ssh = mock.Mock(paramiko.SSHClient) stdin, stdout, stderr = self._mock_ssh_channel_files(paramiko.Channel) stdout_read = self.mock_object(stdout.channel, 'recv', return_value=b'bad response') self.mock_object(ssh, 'exec_command', return_value=(stdin, stdout, stderr)) wait_on_stdout = self.mock_object(self.sshutil, '_wait_on_stdout') self.assertRaises(exception.VolumeBackendAPIException, self.sshutil.execute_command_with_prompt, ssh, 'sudo ls', 'Password:', 'easypass') wait_on_stdout.assert_called_once_with(stdout, netapp_api.SSHUtil.RECV_TIMEOUT) stdout_read.assert_called_once_with(999) def test_wait_on_stdout(self): stdout = mock.Mock() stdout.channel = mock.Mock(paramiko.Channel) exit_status = self.mock_object(stdout.channel, 'exit_status_ready', return_value=False) self.sshutil._wait_on_stdout(stdout, 1) exit_status.assert_any_call() self.assertGreater(exit_status.call_count, 2) def _mock_ssh_channel_files(self, channel): stdin = mock.Mock() stdin.channel = mock.Mock(channel) stdout = mock.Mock() stdout.channel = mock.Mock(channel) stderr = mock.Mock() stderr.channel = mock.Mock(channel) return stdin, stdout, stderr @ddt.ddt class NetAppRestApiServerTests(test.TestCase): """Test case for NetApp REST API server methods.""" def setUp(self): self.rest_client = netapp_api.RestNaServer('127.0.0.1') super(NetAppRestApiServerTests, self).setUp() @ddt.data(None, 'my_cert') def test__init__ssl_verify(self, ssl_cert_path): client = netapp_api.RestNaServer('127.0.0.1', ssl_cert_path=ssl_cert_path) if ssl_cert_path: self.assertEqual(ssl_cert_path, client._ssl_verify) else: self.assertTrue(client._ssl_verify) @ddt.data(None, 'ftp') def test_set_transport_type_value_error(self, transport_type): self.assertRaises(ValueError, self.rest_client.set_transport_type, transport_type) @ddt.data('http', 'https') def test_set_transport_type_valid(self, transport_type): """Tests setting a valid transport type""" self.rest_client.set_transport_type(transport_type) self.assertEqual(self.rest_client._protocol, transport_type) @ddt.data('!&', '80na', '') def test_set_port__value_error(self, port): self.assertRaises(ValueError, self.rest_client.set_port, port) @ddt.data( {'port': None, 'protocol': 'http', 'expected_port': '80'}, {'port': None, 'protocol': 'https', 'expected_port': '443'}, {'port': '111', 'protocol': None, 'expected_port': '111'} ) @ddt.unpack def test_set_port(self, port, protocol, expected_port): self.rest_client._protocol = protocol self.rest_client.set_port(port=port) self.assertEqual(expected_port, self.rest_client._port) @ddt.data('!&', '80na', '') def test_set_timeout_value_error(self, timeout): self.assertRaises(ValueError, self.rest_client.set_timeout, timeout) @ddt.data({'params': {'major': 1, 'minor': '20a'}}, {'params': {'major': '20a', 'minor': 1}}, {'params': {'major': '!*', 'minor': '20a'}}) @ddt.unpack def test_set_api_version_value_error(self, params): self.assertRaises(ValueError, self.rest_client.set_api_version, **params) def test_set_api_version_valid(self): args = {'major': '20', 'minor': 1} self.rest_client.set_api_version(**args) self.assertEqual(self.rest_client._api_major_version, 20) self.assertEqual(self.rest_client._api_minor_version, 1) self.assertEqual(self.rest_client._api_version, "20.1") def test_invoke_successfully_naapi_error(self): self.mock_object(self.rest_client, '_build_headers', return_value={}) self.mock_object(self.rest_client, '_get_base_url', return_value='') self.mock_object(self.rest_client, 'send_http_request', return_value=(10, zapi_fakes.ERROR_RESPONSE_REST)) self.assertRaises(netapp_api.NaApiError, self.rest_client.invoke_successfully, zapi_fakes.FAKE_ACTION_ENDPOINT, 'get') @ddt.data(None, {'fields': 'fake_fields'}) def test_invoke_successfully(self, query): mock_build_header = self.mock_object( self.rest_client, '_build_headers', return_value=zapi_fakes.FAKE_HEADERS) mock_base = self.mock_object( self.rest_client, '_get_base_url', return_value=zapi_fakes.FAKE_BASE_ENDPOINT) mock_add_query = self.mock_object( self.rest_client, '_add_query_params_to_url', return_value=zapi_fakes.FAKE_ACTION_ENDPOINT) http_code = 200 mock_send_http = self.mock_object( self.rest_client, 'send_http_request', return_value=(http_code, zapi_fakes.NO_RECORDS_RESPONSE_REST)) code, response = self.rest_client.invoke_successfully( zapi_fakes.FAKE_ACTION_ENDPOINT, 'get', body=zapi_fakes.FAKE_BODY, query=query, enable_tunneling=True) self.assertEqual(response, zapi_fakes.NO_RECORDS_RESPONSE_REST) self.assertEqual(code, http_code) mock_build_header.assert_called_once_with(True) mock_base.assert_called_once_with() self.assertEqual(bool(query), mock_add_query.called) mock_send_http.assert_called_once_with( 'get', zapi_fakes.FAKE_BASE_ENDPOINT + zapi_fakes.FAKE_ACTION_ENDPOINT, zapi_fakes.FAKE_BODY, zapi_fakes.FAKE_HEADERS) @ddt.data( {'error': requests.HTTPError(), 'raised': netapp_api.NaApiError}, {'error': Exception, 'raised': netapp_api.NaApiError}) @ddt.unpack def test_send_http_request_http_error(self, error, raised): self.mock_object(netapp_api, 'LOG') self.mock_object(self.rest_client, '_build_session') self.rest_client._session = mock.Mock() self.mock_object( self.rest_client, '_get_request_method', mock.Mock( return_value=mock.Mock(side_effect=error))) self.assertRaises(raised, self.rest_client.send_http_request, 'get', zapi_fakes.FAKE_ACTION_ENDPOINT, zapi_fakes.FAKE_BODY, zapi_fakes.FAKE_HEADERS) @ddt.data( { 'resp_content': zapi_fakes.NO_RECORDS_RESPONSE_REST, 'body': zapi_fakes.FAKE_BODY, 'timeout': 10, }, { 'resp_content': zapi_fakes.NO_RECORDS_RESPONSE_REST, 'body': zapi_fakes.FAKE_BODY, 'timeout': None, }, { 'resp_content': zapi_fakes.NO_RECORDS_RESPONSE_REST, 'body': None, 'timeout': None, }, { 'resp_content': None, 'body': None, 'timeout': None, } ) @ddt.unpack def test_send_http_request(self, resp_content, body, timeout): if timeout: self.rest_client._timeout = timeout self.mock_object(netapp_api, 'LOG') mock_json_dumps = self.mock_object( jsonutils, 'dumps', mock.Mock(return_value='fake_dump_body')) mock_build_session = self.mock_object( self.rest_client, '_build_session') _mock_session = mock.Mock() self.rest_client._session = _mock_session response = mock.Mock() response.content = resp_content response.status_code = 10 mock_post = mock.Mock(return_value=response) mock_get_request_method = self.mock_object( self.rest_client, '_get_request_method', mock.Mock( return_value=mock_post)) mock_json_loads = self.mock_object( jsonutils, 'loads', mock.Mock(return_value='fake_loads_response')) code, res = self.rest_client.send_http_request( 'post', zapi_fakes.FAKE_ACTION_ENDPOINT, body, zapi_fakes.FAKE_HEADERS) expected_res = 'fake_loads_response' if resp_content else {} self.assertEqual(expected_res, res) self.assertEqual(10, code) self.assertEqual(bool(body), mock_json_dumps.called) self.assertEqual(bool(resp_content), mock_json_loads.called) mock_build_session.assert_called_once_with(zapi_fakes.FAKE_HEADERS) mock_get_request_method.assert_called_once_with('post', _mock_session) expected_data = 'fake_dump_body' if body else {} if timeout: mock_post.assert_called_once_with( zapi_fakes.FAKE_ACTION_ENDPOINT, data=expected_data, timeout=timeout) else: mock_post.assert_called_once_with(zapi_fakes.FAKE_ACTION_ENDPOINT, data=expected_data) @ddt.data( {'host': '192.168.1.0', 'port': '80', 'protocol': 'http'}, {'host': '0.0.0.0', 'port': '443', 'protocol': 'https'}, {'host': '::ffff:8', 'port': '80', 'protocol': 'http'}, {'host': 'fdf8:f53b:82e4::53', 'port': '443', 'protocol': 'https'}) @ddt.unpack def test__get_base_url(self, host, port, protocol): client = netapp_api.RestNaServer(host, port=port, transport_type=protocol) expected_host = f'[{host}]' if ':' in host else host expected_url = '%s://%s:%s/api/' % (protocol, expected_host, port) url = client._get_base_url() self.assertEqual(expected_url, url) def test__add_query_params_to_url(self): formatted_url = self.rest_client._add_query_params_to_url( zapi_fakes.FAKE_ACTION_ENDPOINT, zapi_fakes.FAKE_HTTP_QUERY) expected_formatted_url = zapi_fakes.FAKE_ACTION_ENDPOINT expected_formatted_url += zapi_fakes.FAKE_FORMATTED_HTTP_QUERY self.assertEqual(expected_formatted_url, formatted_url) @ddt.data('post', 'get', 'put', 'delete', 'patch') def test_get_request_method(self, method): _mock_session = mock.Mock() _mock_session.post = mock.Mock() _mock_session.get = mock.Mock() _mock_session.put = mock.Mock() _mock_session.delete = mock.Mock() _mock_session.patch = mock.Mock() res = self.rest_client._get_request_method(method, _mock_session) expected_method = getattr(_mock_session, method) self.assertEqual(expected_method, res) def test__str__(self): fake_host = 'fake_host' client = netapp_api.RestNaServer(fake_host) expected_str = "server: %s" % fake_host self.assertEqual(expected_str, str(client)) def test_get_transport_type(self): expected_protocol = 'fake_protocol' self.rest_client._protocol = expected_protocol res = self.rest_client.get_transport_type() self.assertEqual(expected_protocol, res) @ddt.data(None, ('1', '0')) def test_get_api_version(self, api_version): if api_version: self.rest_client._api_version = str(api_version) (self.rest_client._api_major_version, _) = api_version (_, self.rest_client._api_minor_version) = api_version res = self.rest_client.get_api_version() self.assertEqual(api_version, res) @ddt.data(None, '9.10') def test_get_ontap_version(self, ontap_version): if ontap_version: self.rest_client._ontap_version = ontap_version res = self.rest_client.get_ontap_version() self.assertEqual(ontap_version, res) def test_set_vserver(self): expected_vserver = 'fake_vserver' self.rest_client.set_vserver(expected_vserver) self.assertEqual(expected_vserver, self.rest_client._vserver) def test_get_vserver(self): expected_vserver = 'fake_vserver' self.rest_client._vserver = expected_vserver res = self.rest_client.get_vserver() self.assertEqual(expected_vserver, res) def test__build_session_with_basic_auth(self): """Tests whether build session works with """ """default(basic auth) parameters""" fake_session = mock.Mock() mock_requests_session = self.mock_object( requests, 'Session', mock.Mock(return_value=fake_session)) mock_auth = self.mock_object( self.rest_client, '_create_basic_auth_handler', mock.Mock(return_value='fake_auth')) self.rest_client._ssl_verify = 'fake_ssl' self.rest_client._build_session(zapi_fakes.FAKE_HEADERS) self.assertEqual(fake_session, self.rest_client._session) self.assertEqual('fake_auth', self.rest_client._session.auth) self.assertEqual('fake_ssl', self.rest_client._session.verify) self.assertEqual(zapi_fakes.FAKE_HEADERS, self.rest_client._session.headers) mock_requests_session.assert_called_once_with() mock_auth.assert_called_once_with() def test__build_session_with_certificate_auth(self): """Tests whether build session works with """ """valid certificate parameters""" self.rest_client._private_key_file = 'fake_key.pem' self.rest_client._certificate_file = 'fake_cert.pem' self.rest_client._certificate_host_validation = False fake_session = mock.Mock() mock_requests_session = self.mock_object( requests, 'Session', mock.Mock(return_value=fake_session)) mock_auth = self.mock_object( self.rest_client, '_create_certificate_auth_handler', mock.Mock(return_value=('fake_cert', 'fake_verify'))) self.rest_client._build_session(zapi_fakes.FAKE_HEADERS) self.assertEqual(fake_session, self.rest_client._session) self.assertEqual(('fake_cert', 'fake_verify'), (self.rest_client._session.cert, self.rest_client._session.verify)) self.assertEqual(zapi_fakes.FAKE_HEADERS, self.rest_client._session.headers) mock_requests_session.assert_called_once_with() mock_auth.assert_called_once_with() @ddt.data(True, False) def test__build_headers(self, enable_tunneling): self.rest_client._vserver = zapi_fakes.VSERVER_NAME res = self.rest_client._build_headers(enable_tunneling) expected = { "Accept": "application/json", "Content-Type": "application/json" } if enable_tunneling: expected["X-Dot-SVM-Name"] = zapi_fakes.VSERVER_NAME self.assertEqual(expected, res) def test__create_basic_auth_handler(self): username = 'fake_username' password = 'fake_password' client = netapp_api.RestNaServer('10.1.1.1', username=username, password=password) res = client._create_basic_auth_handler() expected = auth.HTTPBasicAuth(username, password) self.assertEqual(expected.__dict__, res.__dict__) def test__create_certificate_auth_handler_default(self): """Test whether create certificate auth handler """ """works with default params""" self.rest_client._private_key_file = 'fake_key.pem' self.rest_client._certificate_file = 'fake_cert.pem' self.rest_client._certificate_host_validation = False cert = self.rest_client._certificate_file, \ self.rest_client._private_key_file self.rest_client._session = mock.Mock() if not self.rest_client._certificate_host_validation: self.assertFalse(self.rest_client._certificate_host_validation) res = self.rest_client._create_certificate_auth_handler() self.assertEqual(res, (cert, self.rest_client._certificate_host_validation)) def test__create_certificate_auth_handler_with_host_validation(self): """Test whether create certificate auth handler """ """works with host validation enabled""" self.rest_client._private_key_file = 'fake_key.pem' self.rest_client._certificate_file = 'fake_cert.pem' self.rest_client._ca_certificate_file = 'fake_ca_cert.crt' self.rest_client._certificate_host_validation = True cert = self.rest_client._certificate_file, \ self.rest_client._private_key_file self.rest_client._session = mock.Mock() if self.rest_client._certificate_host_validation: self.assertTrue(self.rest_client._certificate_host_validation) res = self.rest_client._create_certificate_auth_handler() self.assertEqual(res, (cert, self.rest_client._ca_certificate_file)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py0000664000175000017500000010114500000000000032044 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from unittest import mock import uuid import ddt from lxml import etree from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as fake_client) import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base CONNECTION_INFO = {'hostname': 'hostname', 'transport_type': 'https', 'port': 443, 'username': 'admin', 'password': 'passw0rd', 'api_trace_pattern': 'fake_regex', 'private_key_file': 'fake_private_key.pem', 'certificate_file': 'fake_cert.pem', 'ca_certificate_file': 'fake_ca_cert.crt', 'certificate_host_validation': 'False' } @ddt.ddt class NetAppBaseClientTestCase(test.TestCase): def setUp(self): super(NetAppBaseClientTestCase, self).setUp() self.mock_object(client_base, 'LOG') self.mock_object(client_base.Client, '_init_ssh_client') self.client = client_base.Client(**CONNECTION_INFO) self.client.connection = mock.MagicMock() self.client.connection.get_api_version.return_value = (1, 100) self.client.ssh_client = mock.MagicMock() self.connection = self.client.connection self.fake_volume = str(uuid.uuid4()) self.fake_lun = str(uuid.uuid4()) self.fake_size = '1024' self.fake_metadata = {'OsType': 'linux', 'SpaceReserved': 'true', 'SpaceAllocated': 'true'} self.mock_send_request = self.mock_object( self.client.connection, 'send_request') def test_get_ontapi_version(self): version_response = netapp_api.NaElement( etree.XML(""" 1 19 """)) self.connection.invoke_successfully.return_value = version_response major, minor = self.client.get_ontapi_version(cached=False) self.assertEqual('1', major) self.assertEqual('19', minor) def test_get_ontapi_version_cached(self): self.connection.get_api_version.return_value = (1, 20) major, minor = self.client.get_ontapi_version() self.assertEqual(1, self.connection.get_api_version.call_count) self.assertEqual(1, major) self.assertEqual(20, minor) def test_check_is_naelement(self): element = netapp_api.NaElement('name') self.assertIsNone(self.client.check_is_naelement(element)) self.assertRaises(ValueError, self.client.check_is_naelement, None) @ddt.data({'ontap_version': (9, 4, 0), 'space_reservation': 'true', 'space_alloc': 'true'}, {'ontap_version': (9, 4, 0), 'space_reservation': 'false', 'space_alloc': 'false'}, {'ontap_version': (9, 6, 0), 'space_reservation': 'true', 'space_alloc': 'true'}, {'ontap_version': (9, 6, 0), 'space_reservation': 'false', 'space_alloc': 'false'}, ) @ddt.unpack def test_create_lun(self, ontap_version, space_reservation, space_alloc): expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) self.fake_metadata['SpaceReserved'] = space_reservation self.fake_metadata['SpaceAllocated'] = space_alloc expected_space_reservation = space_reservation self.mock_object(self.client, 'get_ontap_version', return_value=ontap_version) mock_resize_lun = self.mock_object( client_base.Client, 'do_direct_resize') mock_set_space_reservation = self.mock_object( client_base.Client, 'set_lun_space_reservation') mock_validate_qos_policy_group = self.mock_object( client_base.Client, '_validate_qos_policy_group') initial_size = self.fake_size if ontap_version < (9, 5, 0): initial_size = fake.MAX_SIZE_FOR_A_LUN expected_space_reservation = 'false' with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.create_lun(self.fake_volume, self.fake_lun, self.fake_size, self.fake_metadata) mock_validate_qos_policy_group.assert_called_once() mock_create_node.assert_called_with( 'lun-create-by-size', **{'path': expected_path, 'size': initial_size, 'ostype': self.fake_metadata['OsType'], 'space-reservation-enabled': expected_space_reservation, 'space-allocation-enabled': space_alloc}) self.connection.invoke_successfully.assert_called_with( mock.ANY, True) if ontap_version < (9, 5, 0): mock_resize_lun.assert_called_once_with( expected_path, self.fake_size) if ontap_version < (9, 5, 0) and space_reservation == 'true': mock_set_space_reservation.assert_called_once_with( expected_path, True) else: mock_set_space_reservation.assert_not_called() @ddt.data({'ontap_version': (9, 4, 0), 'space_reservation': 'true', 'space_alloc': 'true'}, {'ontap_version': (9, 4, 0), 'space_reservation': 'false', 'space_alloc': 'false'}, {'ontap_version': (9, 6, 0), 'space_reservation': 'true', 'space_alloc': 'true'}, {'ontap_version': (9, 6, 0), 'space_reservation': 'false', 'space_alloc': 'false'}, ) @ddt.unpack def test_create_lun_exact_size(self, ontap_version, space_reservation, space_alloc): expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) self.connection.get_api_version.return_value = (1, 110) self.fake_metadata['SpaceReserved'] = space_reservation self.fake_metadata['SpaceAllocated'] = space_alloc expected_space_reservation = self.fake_metadata['SpaceReserved'] self.mock_object(self.client, 'get_ontap_version', return_value=ontap_version) mock_resize_lun = self.mock_object( client_base.Client, 'do_direct_resize') mock_set_space_reservation = self.mock_object( client_base.Client, 'set_lun_space_reservation') mock_validate_qos_policy_group = self.mock_object( client_base.Client, '_validate_qos_policy_group') initial_size = self.fake_size if ontap_version < (9, 5, 0): initial_size = fake.MAX_SIZE_FOR_A_LUN expected_space_reservation = 'false' with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.create_lun(self.fake_volume, self.fake_lun, self.fake_size, self.fake_metadata) mock_validate_qos_policy_group.assert_called_once() mock_create_node.assert_called_with( 'lun-create-by-size', **{'path': expected_path, 'size': initial_size, 'ostype': self.fake_metadata['OsType'], 'use-exact-size': 'true', 'space-reservation-enabled': expected_space_reservation, 'space-allocation-enabled': space_alloc}) self.connection.invoke_successfully.assert_called_with( mock.ANY, True) if ontap_version < (9, 5, 0): mock_resize_lun.assert_called_once_with( expected_path, self.fake_size) if ontap_version < (9, 5, 0) and space_reservation == 'true': mock_set_space_reservation.assert_called_once_with( expected_path, True) else: mock_set_space_reservation.assert_not_called() @ddt.data({'ontap_version': (9, 4, 0), 'space_reservation': 'true', 'space_alloc': 'true'}, {'ontap_version': (9, 4, 0), 'space_reservation': 'false', 'space_alloc': 'false'}, {'ontap_version': (9, 6, 0), 'space_reservation': 'true', 'space_alloc': 'true'}, {'ontap_version': (9, 6, 0), 'space_reservation': 'false', 'space_alloc': 'false'}, ) @ddt.unpack def test_create_lun_with_qos_policy_group_name( self, ontap_version, space_reservation, space_alloc): expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) expected_qos_group_name = 'qos_1' mock_request = mock.Mock() self.fake_metadata['SpaceReserved'] = space_reservation self.fake_metadata['SpaceAllocated'] = space_alloc expected_space_reservation = self.fake_metadata['SpaceReserved'] self.mock_object(self.client, 'get_ontap_version', return_value=ontap_version) mock_resize_lun = self.mock_object( client_base.Client, 'do_direct_resize') mock_set_space_reservation = self.mock_object( client_base.Client, 'set_lun_space_reservation') mock_validate_qos_policy_group = self.mock_object( client_base.Client, '_validate_qos_policy_group') initial_size = self.fake_size if ontap_version < (9, 5, 0): initial_size = fake.MAX_SIZE_FOR_A_LUN expected_space_reservation = 'false' with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', return_value=mock_request ) as mock_create_node: self.client.create_lun( self.fake_volume, self.fake_lun, self.fake_size, self.fake_metadata, qos_policy_group_name=expected_qos_group_name) mock_validate_qos_policy_group.assert_called_once() mock_create_node.assert_called_with( 'lun-create-by-size', **{'path': expected_path, 'size': initial_size, 'ostype': self.fake_metadata['OsType'], 'space-reservation-enabled': expected_space_reservation, 'space-allocation-enabled': space_alloc}) mock_request.add_new_child.assert_called_with( 'qos-policy-group', expected_qos_group_name) self.connection.invoke_successfully.assert_called_with( mock.ANY, True) if ontap_version < (9, 5, 0): mock_resize_lun.assert_called_once_with( expected_path, self.fake_size) if ontap_version < (9, 5, 0) and space_reservation == 'true': mock_set_space_reservation.assert_called_once_with( expected_path, True) else: mock_set_space_reservation.assert_not_called() def test_get_ontap_version(self): version_response = netapp_api.NaElement( fake.SYSTEM_GET_VERSION_RESPONSE) self.connection.invoke_successfully.return_value = ( version_response) result = self.client.get_ontap_version(cached=False) self.assertEqual((9, 6, 0), result) def test_get_ontap_version_cached(self): self.connection.get_ontap_version.return_value = (9, 6, 0) result = self.client.get_ontap_version() self.connection.get_ontap_version.assert_called_once_with() self.assertEqual((9, 6, 0), result) def test_set_lun_space_reservation(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_set_space_reservation: self.client.set_lun_space_reservation(path, True) mock_set_space_reservation.assert_called_once_with( 'lun-set-space-reservation-info', **{'path': path, 'enable': 'True'}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) @ddt.data((9, 4, 0), (9, 6, 0)) def test_create_lun_raises_on_failure(self, ontap_version): self.connection.invoke_successfully = mock.Mock( side_effect=netapp_api.NaApiError) self.mock_object(self.client, 'get_ontap_version', return_value=ontap_version) mock_validate_qos_policy_group = self.mock_object( client_base.Client, '_validate_qos_policy_group') self.assertRaises(netapp_api.NaApiError, self.client.create_lun, self.fake_volume, self.fake_lun, self.fake_size, self.fake_metadata) mock_validate_qos_policy_group.assert_called_once() def test_destroy_lun(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.destroy_lun(path) mock_create_node.assert_called_once_with( 'lun-destroy', **{'path': path}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_destroy_lun_force(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) mock_request = mock.Mock() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', return_value=mock_request ) as mock_create_node: self.client.destroy_lun(path) mock_create_node.assert_called_once_with('lun-destroy', **{'path': path}) mock_request.add_new_child.assert_called_once_with('force', 'true') self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_map_lun(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' expected_lun_id = 'my_lun' mock_response = mock.Mock() self.connection.invoke_successfully.return_value = mock_response mock_response.get_child_content.return_value = expected_lun_id with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: actual_lun_id = self.client.map_lun(path, igroup) mock_create_node.assert_called_once_with( 'lun-map', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) self.assertEqual(expected_lun_id, actual_lun_id) def test_map_lun_with_lun_id(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' expected_lun_id = 'my_lun' mock_response = mock.Mock() self.connection.invoke_successfully.return_value = mock_response mock_response.get_child_content.return_value = expected_lun_id with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: actual_lun_id = self.client.map_lun(path, igroup, lun_id=expected_lun_id) mock_create_node.assert_called_once_with( 'lun-map', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) self.assertEqual(expected_lun_id, actual_lun_id) def test_map_lun_with_api_error(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' self.connection.invoke_successfully.side_effect =\ netapp_api.NaApiError() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.assertRaises(netapp_api.NaApiError, self.client.map_lun, path, igroup) mock_create_node.assert_called_once_with( 'lun-map', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_unmap_lun(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' mock_response = mock.Mock() self.connection.invoke_successfully.return_value = mock_response with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.unmap_lun(path, igroup) mock_create_node.assert_called_once_with( 'lun-unmap', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_unmap_lun_with_api_error(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' self.connection.invoke_successfully.side_effect =\ netapp_api.NaApiError() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.assertRaises(netapp_api.NaApiError, self.client.unmap_lun, path, igroup) mock_create_node.assert_called_once_with( 'lun-unmap', **{'path': path, 'initiator-group': igroup}) def test_unmap_lun_already_unmapped(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' EINVALIDINPUTERROR = '13115' self.connection.invoke_successfully.side_effect =\ netapp_api.NaApiError(code=EINVALIDINPUTERROR) with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.unmap_lun(path, igroup) mock_create_node.assert_called_once_with( 'lun-unmap', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_unmap_lun_lun_not_mapped_in_group(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) igroup = 'igroup' EVDISK_ERROR_NO_SUCH_LUNMAP = '9016' self.connection.invoke_successfully.side_effect =\ netapp_api.NaApiError(code=EVDISK_ERROR_NO_SUCH_LUNMAP) with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.unmap_lun(path, igroup) mock_create_node.assert_called_once_with( 'lun-unmap', **{'path': path, 'initiator-group': igroup}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_create_igroup(self): igroup = 'igroup' with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.create_igroup(igroup) mock_create_node.assert_called_once_with( 'igroup-create', **{'initiator-group-name': igroup, 'initiator-group-type': 'iscsi', 'os-type': 'default'}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_add_igroup_initiator(self): igroup = 'igroup' initiator = 'initiator' with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', ) as mock_create_node: self.client.add_igroup_initiator(igroup, initiator) mock_create_node.assert_called_once_with( 'igroup-add', **{'initiator-group-name': igroup, 'initiator': initiator}) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_do_direct_resize(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) new_size = 1024 mock_request = mock.Mock() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', return_value=mock_request ) as mock_create_node: self.client.do_direct_resize(path, new_size) mock_create_node.assert_called_once_with( 'lun-resize', **{'path': path, 'size': new_size}) mock_request.add_new_child.assert_called_once_with( 'force', 'true') self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_do_direct_resize_not_forced(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) new_size = 1024 mock_request = mock.Mock() with mock.patch.object(netapp_api.NaElement, 'create_node_with_children', return_value=mock_request ) as mock_create_node: self.client.do_direct_resize(path, new_size, force=False) mock_create_node.assert_called_once_with( 'lun-resize', **{'path': path, 'size': new_size}) self.assertFalse(mock_request.add_new_child.called) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_get_lun_geometry(self): expected_keys = set(['size', 'bytes_per_sector', 'sectors_per_track', 'tracks_per_cylinder', 'cylinders', 'max_resize']) path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) mock_response = mock.Mock() self.connection.invoke_successfully.return_value = mock_response geometry = self.client.get_lun_geometry(path) self.assertEqual(expected_keys, set(geometry.keys())) def test_get_lun_geometry_with_api_error(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) self.connection.invoke_successfully.side_effect =\ netapp_api.NaApiError() geometry = self.client.get_lun_geometry(path) self.assertEqual({}, geometry) def test_get_volume_options(self): fake_response = netapp_api.NaElement('volume') fake_response.add_node_with_children('options', test='blah') self.connection.invoke_successfully.return_value = fake_response options = self.client.get_volume_options('volume') self.assertEqual(1, len(options)) def test_get_volume_options_with_no_options(self): fake_response = netapp_api.NaElement('options') self.connection.invoke_successfully.return_value = fake_response options = self.client.get_volume_options('volume') self.assertEqual([], options) def test_move_lun(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) new_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) fake_response = netapp_api.NaElement('options') self.connection.invoke_successfully.return_value = fake_response self.client.move_lun(path, new_path) self.connection.invoke_successfully.assert_called_once_with( mock.ANY, True) def test_get_igroup_by_initiators(self): self.assertRaises(NotImplementedError, self.client.get_igroup_by_initiators, fake.FC_FORMATTED_INITIATORS) def test_get_fc_target_wwpns(self): self.assertRaises(NotImplementedError, self.client.get_fc_target_wwpns) def test_has_luns_mapped_to_initiator(self): initiator = fake.FC_FORMATTED_INITIATORS[0] version_response = netapp_api.NaElement( etree.XML(""" /vol/cinder1/volume-9be956b3-9854-4a5c-a7f5-13a16da52c9c openstack-4b57a80b-ebca-4d27-bd63-48ac5408d08b 0 /vol/cinder1/volume-ac90433c-a560-41b3-9357-7f3f80071eb5 openstack-4b57a80b-ebca-4d27-bd63-48ac5408d08b 1 """)) self.connection.invoke_successfully.return_value = version_response self.assertTrue(self.client._has_luns_mapped_to_initiator(initiator)) def test_has_luns_mapped_to_initiator_not_mapped(self): initiator = fake.FC_FORMATTED_INITIATORS[0] version_response = netapp_api.NaElement( etree.XML(""" """)) self.connection.invoke_successfully.return_value = version_response self.assertFalse(self.client._has_luns_mapped_to_initiator(initiator)) @mock.patch.object(client_base.Client, '_has_luns_mapped_to_initiator') def test_has_luns_mapped_to_initiators(self, mock_has_luns_mapped_to_initiator): initiators = fake.FC_FORMATTED_INITIATORS mock_has_luns_mapped_to_initiator.return_value = True self.assertTrue(self.client.has_luns_mapped_to_initiators(initiators)) @mock.patch.object(client_base.Client, '_has_luns_mapped_to_initiator') def test_has_luns_mapped_to_initiators_not_mapped( self, mock_has_luns_mapped_to_initiator): initiators = fake.FC_FORMATTED_INITIATORS mock_has_luns_mapped_to_initiator.return_value = False self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators)) def test_get_performance_counter_info(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE) result = self.client.get_performance_counter_info('wafl', 'cp_phase_times') expected = { 'name': 'cp_phase_times', 'base-counter': 'total_cp_msecs', 'labels': fake_client.PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS, } self.assertEqual(expected, result) perf_object_counter_list_info_args = {'objectname': 'wafl'} self.mock_send_request.assert_called_once_with( 'perf-object-counter-list-info', perf_object_counter_list_info_args, enable_tunneling=False) def test_get_performance_counter_info_not_found(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE) self.assertRaises(exception.NotFound, self.client.get_performance_counter_info, 'wafl', 'invalid') def test_delete_snapshot(self): api_args = { 'volume': fake.SNAPSHOT['volume_id'], 'snapshot': fake.SNAPSHOT['name'], } self.mock_object(self.client.connection, 'send_request') self.client.delete_snapshot(api_args['volume'], api_args['snapshot']) asserted_api_args = { 'volume': api_args['volume'], 'snapshot': api_args['snapshot'], } self.client.connection.send_request.assert_called_once_with( 'snapshot-delete', asserted_api_args) def test_create_cg_snapshot(self): self.mock_object(self.client, '_start_cg_snapshot', return_value=fake.CONSISTENCY_GROUP_ID) self.mock_object(self.client, '_commit_cg_snapshot') self.client.create_cg_snapshot([fake.CG_VOLUME_NAME], fake.CG_SNAPSHOT_NAME) self.client._commit_cg_snapshot.assert_called_once_with( fake.CONSISTENCY_GROUP_ID) def test_create_cg_snapshot_no_id(self): self.mock_object(self.client, '_start_cg_snapshot', return_value=None) self.assertRaises(exception.VolumeBackendAPIException, self.client.create_cg_snapshot, [fake.CG_VOLUME_NAME], fake.CG_SNAPSHOT_NAME) def test_start_cg_snapshot(self): snapshot_init = { 'snapshot': fake.CG_SNAPSHOT_NAME, 'timeout': 'relaxed', 'volumes': [{'volume-name': fake.CG_VOLUME_NAME}], } self.mock_object(self.client.connection, 'send_request') self.client._start_cg_snapshot([fake.CG_VOLUME_NAME], snapshot_init['snapshot']) self.client.connection.send_request.assert_called_once_with( 'cg-start', snapshot_init) def test_commit_cg_snapshot(self): snapshot_commit = {'cg-id': fake.CG_VOLUME_ID} self.mock_object(self.client.connection, 'send_request') self.client._commit_cg_snapshot(snapshot_commit['cg-id']) self.client.connection.send_request.assert_called_once_with( 'cg-commit', {'cg-id': snapshot_commit['cg-id']}) def test_wait_for_busy_snapshot_raise_exception(self): BUSY_SNAPSHOT = dict(fake.SNAPSHOT) BUSY_SNAPSHOT['busy'] = True # Need to mock sleep as it is called by @utils.retry self.mock_object(time, 'sleep') mock_get_snapshot = self.mock_object(self.client, 'get_snapshot', return_value=BUSY_SNAPSHOT) self.assertRaises(exception.SnapshotIsBusy, self.client.wait_for_busy_snapshot, fake.FLEXVOL, fake.SNAPSHOT_NAME) calls = [ mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), ] mock_get_snapshot.assert_has_calls(calls) def test_rename_snapshot(self): self.mock_object(self.client.connection, 'send_request') self.client.rename_snapshot( fake.SNAPSHOT['volume_id'], fake.SNAPSHOT_NAME, client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME) api_args = { 'volume': fake.SNAPSHOT['volume_id'], 'current-name': fake.SNAPSHOT_NAME, 'new-name': client_base.DELETED_PREFIX + fake.SNAPSHOT_NAME, } self.client.connection.send_request.assert_called_once_with( 'snapshot-rename', api_args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py0000664000175000017500000053541500000000000032234 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import time from unittest import mock import uuid import ddt from lxml import etree from oslo_utils import units import paramiko from cinder import exception from cinder import ssh_utils from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as fake_client) from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp import utils as netapp_utils CONNECTION_INFO = {'hostname': 'hostname', 'transport_type': 'https', 'port': 443, 'username': 'admin', 'password': 'passw0rd', 'vserver': 'fake_vserver', 'api_trace_pattern': 'fake_regex', 'private_key_file': 'fake_private_key.pem', 'certificate_file': 'fake_cert.pem', 'ca_certificate_file': 'fake_ca_cert.crt', 'certificate_host_validation': 'False'} @ddt.ddt class NetAppCmodeClientTestCase(test.TestCase): def setUp(self): super(NetAppCmodeClientTestCase, self).setUp() self.mock_object(client_cmode.Client, '_init_ssh_client') # store the original reference so we can call it later in # test__get_cluster_nodes_info self.original_get_cluster_nodes_info = ( client_cmode.Client._get_cluster_nodes_info) self.mock_object(client_cmode.Client, '_get_cluster_nodes_info', return_value=fake.HYBRID_SYSTEM_NODES_INFO) self.mock_object(client_cmode.Client, 'get_ontap_version', return_value='9.6') with mock.patch.object(client_cmode.Client, 'get_ontapi_version', return_value=(1, 20)): self.client = client_cmode.Client(**CONNECTION_INFO) self.client.ssh_client = mock.MagicMock() self.client.connection = mock.MagicMock() self.connection = self.client.connection self.vserver = CONNECTION_INFO['vserver'] self.fake_volume = str(uuid.uuid4()) self.fake_lun = str(uuid.uuid4()) self.mock_send_request = self.mock_object( self.client.connection, 'send_request') def _mock_api_error(self, code='fake'): return mock.Mock(side_effect=netapp_api.NaApiError(code=code)) def test_has_records(self): result = self.client._has_records(netapp_api.NaElement( fake_client.QOS_POLICY_GROUP_GET_ITER_RESPONSE)) self.assertTrue(result) def test_has_records_not_found(self): result = self.client._has_records( netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE)) self.assertFalse(result) @ddt.data((fake_client.AGGR_GET_ITER_RESPONSE, 2), (fake_client.NO_RECORDS_RESPONSE, 0)) @ddt.unpack def test_get_record_count(self, response, expected): api_response = netapp_api.NaElement(response) result = self.client._get_record_count(api_response) self.assertEqual(expected, result) def test_get_records_count_invalid(self): api_response = netapp_api.NaElement( fake_client.INVALID_GET_ITER_RESPONSE_NO_RECORDS) self.assertRaises(netapp_utils.NetAppDriverException, self.client._get_record_count, api_response) @ddt.data(True, False) def test_send_iter_request(self, enable_tunneling): api_responses = [ netapp_api.NaElement( fake_client.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_1), netapp_api.NaElement( fake_client.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_2), netapp_api.NaElement( fake_client.STORAGE_DISK_GET_ITER_RESPONSE_PAGE_3), ] mock_send_request = self.mock_object( self.client.connection, 'send_request', side_effect=copy.deepcopy(api_responses)) storage_disk_get_iter_args = { 'desired-attributes': { 'storage-disk-info': { 'disk-name': None, } } } result = self.client.send_iter_request( 'storage-disk-get-iter', api_args=storage_disk_get_iter_args, enable_tunneling=enable_tunneling, max_page_length=10) num_records = result.get_child_content('num-records') self.assertEqual('28', num_records) next_tag = result.get_child_content('next-tag') self.assertEqual('', next_tag) args1 = copy.deepcopy(storage_disk_get_iter_args) args1['max-records'] = 10 args2 = copy.deepcopy(storage_disk_get_iter_args) args2['max-records'] = 10 args2['tag'] = 'next_tag_1' args3 = copy.deepcopy(storage_disk_get_iter_args) args3['max-records'] = 10 args3['tag'] = 'next_tag_2' mock_send_request.assert_has_calls([ mock.call('storage-disk-get-iter', args1, enable_tunneling=enable_tunneling), mock.call('storage-disk-get-iter', args2, enable_tunneling=enable_tunneling), mock.call('storage-disk-get-iter', args3, enable_tunneling=enable_tunneling), ]) def test_send_iter_request_single_page(self): api_response = netapp_api.NaElement( fake_client.STORAGE_DISK_GET_ITER_RESPONSE) mock_send_request = self.mock_object(self.client.connection, 'send_request', return_value=api_response) storage_disk_get_iter_args = { 'desired-attributes': { 'storage-disk-info': { 'disk-name': None, } } } result = self.client.send_iter_request( 'storage-disk-get-iter', api_args=storage_disk_get_iter_args, max_page_length=10) num_records = result.get_child_content('num-records') self.assertEqual('4', num_records) args = copy.deepcopy(storage_disk_get_iter_args) args['max-records'] = 10 mock_send_request.assert_has_calls([ mock.call('storage-disk-get-iter', args, enable_tunneling=True), ]) def test_send_iter_request_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) mock_send_request = self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.send_iter_request('storage-disk-get-iter') num_records = result.get_child_content('num-records') self.assertEqual('0', num_records) args = {'max-records': client_cmode.DEFAULT_MAX_PAGE_LENGTH} mock_send_request.assert_has_calls([ mock.call('storage-disk-get-iter', args, enable_tunneling=True), ]) @ddt.data(fake_client.INVALID_GET_ITER_RESPONSE_NO_ATTRIBUTES, fake_client.INVALID_GET_ITER_RESPONSE_NO_RECORDS) def test_send_iter_request_invalid(self, fake_response): api_response = netapp_api.NaElement(fake_response) self.mock_object(self.client.connection, 'send_request', return_value=api_response) self.assertRaises(netapp_utils.NetAppDriverException, self.client.send_iter_request, 'storage-disk-get-iter') @ddt.data((fake.AFF_SYSTEM_NODE_GET_ITER_RESPONSE, fake.AFF_SYSTEM_NODES_INFO), (fake.FAS_SYSTEM_NODE_GET_ITER_RESPONSE, fake.FAS_SYSTEM_NODES_INFO), (fake_client.NO_RECORDS_RESPONSE, []), (fake.HYBRID_SYSTEM_NODE_GET_ITER_RESPONSE, fake.HYBRID_SYSTEM_NODES_INFO)) @ddt.unpack def test__get_cluster_nodes_info(self, response, expected): client_cmode.Client._get_cluster_nodes_info = ( self.original_get_cluster_nodes_info) nodes_response = netapp_api.NaElement(response) self.mock_object(client_cmode.Client, 'send_iter_request', return_value=nodes_response) result = self.client._get_cluster_nodes_info() self.assertEqual(expected, result) def test_list_vservers(self): api_response = netapp_api.NaElement( fake_client.VSERVER_DATA_LIST_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.list_vservers() vserver_get_iter_args = { 'query': { 'vserver-info': { 'vserver-type': 'data' } }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None } } } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_iter_args, enable_tunneling=False)]) self.assertListEqual([fake_client.VSERVER_NAME], result) def test_list_vservers_node_type(self): api_response = netapp_api.NaElement( fake_client.VSERVER_DATA_LIST_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.list_vservers(vserver_type='node') vserver_get_iter_args = { 'query': { 'vserver-info': { 'vserver-type': 'node' } }, 'desired-attributes': { 'vserver-info': { 'vserver-name': None } } } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-get-iter', vserver_get_iter_args, enable_tunneling=False)]) self.assertListEqual([fake_client.VSERVER_NAME], result) def test_list_vservers_not_found(self): api_response = netapp_api.NaElement( fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.list_vservers(vserver_type='data') self.assertListEqual([], result) @ddt.data((1, 21), (1, 100), (2, 0)) def test_get_ems_log_destination_vserver(self, ontapi_version): self.mock_object(self.client, 'get_ontapi_version', return_value=ontapi_version) mock_list_vservers = self.mock_object( self.client, 'list_vservers', return_value=[fake_client.ADMIN_VSERVER_NAME]) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_called_once_with(vserver_type='admin') self.assertEqual(fake_client.ADMIN_VSERVER_NAME, result) def test_get_ems_log_destination_vserver_legacy(self): self.mock_object(self.client, 'get_ontapi_version', return_value=(1, 15)) mock_list_vservers = self.mock_object( self.client, 'list_vservers', return_value=[fake_client.NODE_VSERVER_NAME]) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_called_once_with(vserver_type='node') self.assertEqual(fake_client.NODE_VSERVER_NAME, result) def test_get_ems_log_destination_no_cluster_creds(self): self.mock_object(self.client, 'get_ontapi_version', return_value=(1, 21)) mock_list_vservers = self.mock_object( self.client, 'list_vservers', side_effect=[[], [fake_client.VSERVER_NAME]]) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_has_calls([ mock.call(vserver_type='admin'), mock.call(vserver_type='data')]) self.assertEqual(fake_client.VSERVER_NAME, result) def test_get_ems_log_destination_vserver_not_found(self): self.mock_object(self.client, 'get_ontapi_version', return_value=(1, 21)) mock_list_vservers = self.mock_object( self.client, 'list_vservers', return_value=[]) self.assertRaises(exception.NotFound, self.client._get_ems_log_destination_vserver) mock_list_vservers.assert_has_calls([ mock.call(vserver_type='admin'), mock.call(vserver_type='data'), mock.call(vserver_type='node')]) def test_get_iscsi_target_details_no_targets(self): response = netapp_api.NaElement( etree.XML(""" 1 """)) self.connection.invoke_successfully.return_value = response target_list = self.client.get_iscsi_target_details() self.assertEqual([], target_list) def test_get_iscsi_target_details(self): expected_target = { "address": "127.0.0.1", "port": "1337", "interface-enabled": "true", "tpgroup-tag": "7777", } response = netapp_api.NaElement( etree.XML(""" 1 %(address)s %(port)s %(interface-enabled)s %(tpgroup-tag)s """ % expected_target)) self.connection.invoke_successfully.return_value = response target_list = self.client.get_iscsi_target_details() self.assertEqual([expected_target], target_list) def test_get_iscsi_service_details_with_no_iscsi_service(self): response = netapp_api.NaElement( etree.XML(""" 0 """)) self.connection.invoke_successfully.return_value = response iqn = self.client.get_iscsi_service_details() self.assertIsNone(iqn) def test_get_iscsi_service_details(self): expected_iqn = 'iqn.1998-01.org.openstack.iscsi:name1' response = netapp_api.NaElement( etree.XML(""" 1 %s """ % expected_iqn)) self.connection.invoke_successfully.return_value = response iqn = self.client.get_iscsi_service_details() self.assertEqual(expected_iqn, iqn) def test_get_lun_list(self): response = netapp_api.NaElement( etree.XML(""" 2 """)) self.connection.invoke_successfully.return_value = response luns = self.client.get_lun_list() self.assertEqual(2, len(luns)) def test_get_lun_list_with_multiple_pages(self): response = netapp_api.NaElement( etree.XML(""" 2 fake-next """)) response_2 = netapp_api.NaElement( etree.XML(""" 2 """)) self.connection.invoke_successfully.side_effect = [response, response_2] luns = self.client.get_lun_list() self.assertEqual(4, len(luns)) def test_get_lun_map_no_luns_mapped(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) response = netapp_api.NaElement( etree.XML(""" 0 """)) self.connection.invoke_successfully.return_value = response lun_map = self.client.get_lun_map(path) self.assertEqual([], lun_map) def test_get_lun_map(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) expected_lun_map = { "initiator-group": "igroup", "lun-id": "1337", "vserver": "vserver", } response = netapp_api.NaElement( etree.XML(""" 1 %(lun-id)s %(initiator-group)s %(vserver)s """ % expected_lun_map)) self.connection.invoke_successfully.return_value = response lun_map = self.client.get_lun_map(path) self.assertEqual([expected_lun_map], lun_map) def test_get_lun_map_multiple_pages(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) expected_lun_map = { "initiator-group": "igroup", "lun-id": "1337", "vserver": "vserver", } response = netapp_api.NaElement( etree.XML(""" 1 %(lun-id)s %(initiator-group)s %(vserver)s blah """ % expected_lun_map)) response_2 = netapp_api.NaElement( etree.XML(""" 1 %(lun-id)s %(initiator-group)s %(vserver)s """ % expected_lun_map)) self.connection.invoke_successfully.side_effect = [response, response_2] lun_map = self.client.get_lun_map(path) self.assertEqual([expected_lun_map, expected_lun_map], lun_map) def test_get_igroup_by_initiator_none_found(self): initiator = 'initiator' response = netapp_api.NaElement( etree.XML(""" 0 """)) self.connection.invoke_successfully.return_value = response igroup = self.client.get_igroup_by_initiators([initiator]) self.assertEqual([], igroup) def test_get_igroup_by_initiators(self): initiators = ['11:22:33:44:55:66:77:88'] expected_igroup = { 'initiator-group-os-type': 'default', 'initiator-group-type': 'fcp', 'initiator-group-name': 'openstack-igroup1', } response = netapp_api.NaElement( etree.XML(""" true %(initiator-group-name)s default false 0 %(initiator-group-type)s true f8aa707a-57fa-11e4-ad08-123478563412 false 11:22:33:44:55:66:77:88 cinder-iscsi 1 """ % expected_igroup)) self.connection.invoke_successfully.return_value = response igroups = self.client.get_igroup_by_initiators(initiators) # make these lists of dicts comparable using hashable dictionaries igroups = set( [netapp_utils.hashabledict(igroup) for igroup in igroups]) expected = set([netapp_utils.hashabledict(expected_igroup)]) self.assertSetEqual(igroups, expected) def test_get_igroup_by_initiators_multiple(self): initiators = ['11:22:33:44:55:66:77:88', '88:77:66:55:44:33:22:11'] expected_igroup = { 'initiator-group-os-type': 'default', 'initiator-group-type': 'fcp', 'initiator-group-name': 'openstack-igroup1', } response = netapp_api.NaElement( etree.XML(""" true %(initiator-group-name)s default false 0 %(initiator-group-type)s true f8aa707a-57fa-11e4-ad08-123478563412 false 11:22:33:44:55:66:77:88 88:77:66:55:44:33:22:11 cinder-iscsi 1 """ % expected_igroup)) self.connection.invoke_successfully.return_value = response igroups = self.client.get_igroup_by_initiators(initiators) # make these lists of dicts comparable using hashable dictionaries igroups = set( [netapp_utils.hashabledict(igroup) for igroup in igroups]) expected = set([netapp_utils.hashabledict(expected_igroup)]) self.assertSetEqual(igroups, expected) def test_get_igroup_by_initiators_multiple_pages(self): initiator = '11:22:33:44:55:66:77:88' expected_igroup1 = { 'initiator-group-os-type': 'default', 'initiator-group-type': 'fcp', 'initiator-group-name': 'openstack-igroup1', } expected_igroup2 = { 'initiator-group-os-type': 'default', 'initiator-group-type': 'fcp', 'initiator-group-name': 'openstack-igroup2', } response_1 = netapp_api.NaElement( etree.XML(""" true %(initiator-group-name)s default false 0 %(initiator-group-type)s true f8aa707a-57fa-11e4-ad08-123478563412 false 11:22:33:44:55:66:77:88 cinder-iscsi 12345 1 """ % expected_igroup1)) response_2 = netapp_api.NaElement( etree.XML(""" true %(initiator-group-name)s default false 0 %(initiator-group-type)s true f8aa707a-57fa-11e4-ad08-123478563412 false 11:22:33:44:55:66:77:88 cinder-iscsi 1 """ % expected_igroup2)) self.connection.invoke_successfully.side_effect = [response_1, response_2] igroups = self.client.get_igroup_by_initiators([initiator]) # make these lists of dicts comparable using hashable dictionaries igroups = set( [netapp_utils.hashabledict(igroup) for igroup in igroups]) expected = set([netapp_utils.hashabledict(expected_igroup1), netapp_utils.hashabledict(expected_igroup2)]) self.assertSetEqual(igroups, expected) @ddt.data(True, False) def test__validate_qos_policy_group_none_adaptive(self, is_adaptive): self.client.features.add_feature('ADAPTIVE_QOS', supported=True) self.client._validate_qos_policy_group( is_adaptive=is_adaptive, spec=None) def test__validate_qos_policy_group_none_adaptive_no_support(self): self.client.features.add_feature('ADAPTIVE_QOS', supported=False) self.assertRaises( netapp_utils.NetAppDriverException, self.client._validate_qos_policy_group, is_adaptive=True, spec=None) @ddt.data(True, False) def test__validate_qos_policy_group_no_qos_min_support(self, is_adaptive): spec = {'min_throughput': '10'} self.assertRaises( netapp_utils.NetAppDriverException, self.client._validate_qos_policy_group, is_adaptive=is_adaptive, spec=spec, qos_min_support=False) def test__validate_qos_policy_group_no_block_size_support(self): self.client.features.add_feature( 'ADAPTIVE_QOS_BLOCK_SIZE', supported=False) spec = {'block_size': '4K'} self.assertRaises( netapp_utils.NetAppDriverException, self.client._validate_qos_policy_group, is_adaptive=True, spec=spec) def test__validate_qos_policy_group_no_expected_iops_allocation_support( self): self.client.features.add_feature( 'ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION', supported=False) spec = {'expected_iops_allocation': 'used-space'} self.assertRaises( netapp_utils.NetAppDriverException, self.client._validate_qos_policy_group, is_adaptive=True, spec=spec) def test__validate_qos_policy_group_adaptive_qos_spec(self): self.client.features.add_feature('ADAPTIVE_QOS', supported=True) self.client.features.add_feature( 'ADAPTIVE_QOS_BLOCK_SIZE', supported=True) self.client.features.add_feature( 'ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION', supported=True) spec = { 'expected_iops': '128IOPS/GB', 'peak_iops': '512IOPS/GB', 'expected_iops_allocation': 'used-space', 'peak_iops_allocation': 'used-space', 'absolute_min_iops': '64IOPS', 'block_size': '4K', } self.client._validate_qos_policy_group(is_adaptive=True, spec=spec) def test_clone_lun(self): self.client.clone_lun( 'volume', 'fakeLUN', 'newFakeLUN', qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) self.assertEqual(1, self.connection.invoke_successfully.call_count) @ddt.data({'supports_is_backup': True, 'is_snapshot': True}, {'supports_is_backup': True, 'is_snapshot': False}, {'supports_is_backup': False, 'is_snapshot': True}, {'supports_is_backup': False, 'is_snapshot': False}) @ddt.unpack def test_clone_lun_is_snapshot(self, supports_is_backup, is_snapshot): self.client.features.add_feature('BACKUP_CLONE_PARAM', supported=supports_is_backup) self.client.clone_lun( 'volume', 'fakeLUN', 'newFakeLUN', is_snapshot=is_snapshot) clone_create_args = { 'volume': 'volume', 'source-path': 'fakeLUN', 'destination-path': 'newFakeLUN', 'space-reserve': 'true', } if is_snapshot and supports_is_backup: clone_create_args['is-backup'] = 'true' self.connection.invoke_successfully.assert_called_once_with( netapp_api.NaElement.create_node_with_children( 'clone-create', **clone_create_args), True) @ddt.data(0, 1) def test_clone_lun_is_sub_clone(self, block_count): self.client.clone_lun( 'volume', 'fakeLUN', 'newFakeLUN', block_count=block_count) clone_create_args = { 'volume': 'volume', 'source-path': 'fakeLUN', 'destination-path': 'newFakeLUN', } is_sub_clone = block_count > 0 if not is_sub_clone: clone_create_args['space-reserve'] = 'true' # build the expected request expected_clone_create_request = \ netapp_api.NaElement.create_node_with_children( 'clone-create', **clone_create_args) # add expected fields in the request if it's a sub-clone if is_sub_clone: block_ranges = netapp_api.NaElement("block-ranges") block_range = \ netapp_api.NaElement.create_node_with_children( 'block-range', **{'source-block-number': '0', 'destination-block-number': '0', 'block-count': '1'}) block_ranges.add_child_elem(block_range) expected_clone_create_request.add_child_elem(block_ranges) self.connection.invoke_successfully.assert_called_once_with( expected_clone_create_request, True) def test_clone_lun_multiple_zapi_calls(self): """Test for when lun clone requires more than one zapi call.""" # Max clone size per call = 2^18 blocks * 512 bytes/block = 128 MB # Force 2 calls bc = 2 ** 18 * 2 self.client.clone_lun('volume', 'fakeLUN', 'newFakeLUN', block_count=bc) self.assertEqual(2, self.connection.invoke_successfully.call_count) def test_get_lun_by_args(self): response = netapp_api.NaElement( etree.XML(""" 2 """)) self.connection.invoke_successfully.return_value = response lun = self.client.get_lun_by_args() self.assertEqual(1, len(lun)) def test_get_lun_by_args_no_lun_found(self): response = netapp_api.NaElement( etree.XML(""" 2 """)) self.connection.invoke_successfully.return_value = response lun = self.client.get_lun_by_args() self.assertEqual(0, len(lun)) def test_get_lun_by_args_with_args_specified(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) response = netapp_api.NaElement( etree.XML(""" 2 """)) self.connection.invoke_successfully.return_value = response lun = self.client.get_lun_by_args(path=path) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] query = actual_request.get_child_by_name('query') lun_info_args = query.get_child_by_name('lun-info').get_children() # Assert request is made with correct arguments self.assertEqual('path', lun_info_args[0].get_name()) self.assertEqual(path, lun_info_args[0].get_content()) self.assertEqual(1, len(lun)) def test_file_assign_qos(self): api_args = { 'volume': fake.FLEXVOL, 'qos-policy-group-name': fake.QOS_POLICY_GROUP_NAME, 'file': fake.NFS_FILE_PATH, 'vserver': self.vserver } self.client.file_assign_qos(fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME, False, fake.NFS_FILE_PATH) self.mock_send_request.assert_has_calls([ mock.call('file-assign-qos', api_args, False)]) def test_set_lun_qos_policy_group(self): api_args = { 'path': fake.LUN_PATH, 'qos-policy-group': fake.QOS_POLICY_GROUP_NAME, } self.client.set_lun_qos_policy_group( fake.LUN_PATH, fake.QOS_POLICY_GROUP_NAME) self.mock_send_request.assert_has_calls([ mock.call('lun-set-qos-policy-group', api_args)]) def test_provision_qos_policy_group_no_qos_policy_group_info(self): mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') self.client.provision_qos_policy_group(qos_policy_group_info=None, qos_min_support=True) mock_qos_policy_group_create.assert_not_called() def test_provision_qos_policy_group_no_legacy_no_spec(self): mock_qos_policy_group_exists = self.mock_object( self.client, 'qos_policy_group_exists') mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') mock_qos_policy_group_modify = self.mock_object( self.client, 'qos_policy_group_modify') self.client.provision_qos_policy_group(qos_policy_group_info={}, qos_min_support=False) mock_qos_policy_group_exists.assert_not_called() mock_qos_policy_group_create.assert_not_called() mock_qos_policy_group_modify.assert_not_called() def test_provision_qos_policy_group_legacy_qos_policy_group_info(self): mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') self.client.provision_qos_policy_group( qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY, qos_min_support=True) mock_qos_policy_group_create.assert_not_called() def test_provision_qos_policy_group_with_qos_spec_create_with_min(self): self.mock_object(self.client, 'qos_policy_group_exists', return_value=False) mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') mock_qos_policy_group_modify = self.mock_object( self.client, 'qos_policy_group_modify') self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO, True) mock_qos_policy_group_create.assert_called_once_with({ 'policy_name': fake.QOS_POLICY_GROUP_NAME, 'min_throughput': fake.MIN_IOPS, 'max_throughput': fake.MAX_IOPS, }) mock_qos_policy_group_modify.assert_not_called() def test_provision_qos_policy_group_with_qos_spec_create_with_aqos(self): self.client.features.add_feature('ADAPTIVE_QOS', supported=True) self.client.features.add_feature( 'ADAPTIVE_QOS_BLOCK_SIZE', supported=True) self.client.features.add_feature( 'ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION', supported=True) self.mock_object(self.client, 'qos_policy_group_exists', return_value=False) mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') mock_qos_policy_group_modify = self.mock_object( self.client, 'qos_policy_group_modify') mock_qos_adaptive_policy_group_create = self.mock_object( self.client, 'qos_adaptive_policy_group_create') mock_qos_adaptive_policy_group_modify = self.mock_object( self.client, 'qos_adaptive_policy_group_modify') self.client.provision_qos_policy_group( fake.ADAPTIVE_QOS_POLICY_GROUP_INFO, False) mock_qos_adaptive_policy_group_create.assert_called_once_with( fake.ADAPTIVE_QOS_SPEC) mock_qos_adaptive_policy_group_modify.assert_not_called() mock_qos_policy_group_create.assert_not_called() mock_qos_policy_group_modify.assert_not_called() def test_provision_qos_policy_group_with_qos_spec_create_unsupported(self): mock_qos_policy_group_exists = self.mock_object( self.client, 'qos_policy_group_exists') mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') mock_qos_policy_group_modify = self.mock_object( self.client, 'qos_policy_group_modify') self.assertRaises( netapp_utils.NetAppDriverException, self.client.provision_qos_policy_group, fake.QOS_POLICY_GROUP_INFO, False) mock_qos_policy_group_exists.assert_not_called() mock_qos_policy_group_create.assert_not_called() mock_qos_policy_group_modify.assert_not_called() def test_provision_qos_policy_group_with_invalid_qos_spec(self): self.mock_object(self.client, '_validate_qos_policy_group', side_effect=netapp_utils.NetAppDriverException) mock_policy_group_spec_is_adaptive = self.mock_object( netapp_utils, 'is_qos_policy_group_spec_adaptive') mock_qos_policy_group_exists = self.mock_object( self.client, 'qos_policy_group_exists') mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') mock_qos_policy_group_modify = self.mock_object( self.client, 'qos_policy_group_modify') self.assertRaises( netapp_utils.NetAppDriverException, self.client.provision_qos_policy_group, fake.QOS_POLICY_GROUP_INFO, False) mock_policy_group_spec_is_adaptive.assert_called_once_with( fake.QOS_POLICY_GROUP_INFO) mock_qos_policy_group_exists.assert_not_called() mock_qos_policy_group_create.assert_not_called() mock_qos_policy_group_modify.assert_not_called() def test_provision_qos_policy_group_with_qos_spec_create(self): self.mock_object(self.client, 'qos_policy_group_exists', return_value=False) mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') mock_qos_policy_group_modify = self.mock_object( self.client, 'qos_policy_group_modify') self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO_MAX, True) mock_qos_policy_group_create.assert_has_calls([ mock.call({ 'policy_name': fake.QOS_POLICY_GROUP_NAME, 'max_throughput': fake.MAX_THROUGHPUT, })]) mock_qos_policy_group_modify.assert_not_called() def test_provision_qos_policy_group_with_qos_spec_modify_with_min(self): self.mock_object(self.client, 'qos_policy_group_exists', return_value=True) mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') mock_qos_policy_group_modify = self.mock_object( self.client, 'qos_policy_group_modify') self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO, True) mock_qos_policy_group_create.assert_not_called() mock_qos_policy_group_modify.assert_has_calls([ mock.call({ 'policy_name': fake.QOS_POLICY_GROUP_NAME, 'min_throughput': fake.MIN_IOPS, 'max_throughput': fake.MAX_IOPS, })]) def test_provision_qos_policy_group_with_qos_spec_modify(self): self.mock_object(self.client, 'qos_policy_group_exists', return_value=True) mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') mock_qos_policy_group_modify = self.mock_object( self.client, 'qos_policy_group_modify') self.client.provision_qos_policy_group(fake.QOS_POLICY_GROUP_INFO_MAX, True) mock_qos_policy_group_create.assert_not_called() mock_qos_policy_group_modify.assert_has_calls([ mock.call({ 'policy_name': fake.QOS_POLICY_GROUP_NAME, 'max_throughput': fake.MAX_THROUGHPUT, })]) def test_provision_qos_policy_group_with_qos_spec_modify_with_aqos(self): self.client.features.add_feature('ADAPTIVE_QOS', supported=True) self.client.features.add_feature( 'ADAPTIVE_QOS_BLOCK_SIZE', supported=True) self.client.features.add_feature( 'ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION', supported=True) self.mock_object(self.client, 'qos_policy_group_exists', return_value=True) mock_qos_policy_group_create = self.mock_object( self.client, 'qos_policy_group_create') mock_qos_policy_group_modify = self.mock_object( self.client, 'qos_policy_group_modify') mock_qos_adaptive_policy_group_create = self.mock_object( self.client, 'qos_adaptive_policy_group_create') mock_qos_adaptive_policy_group_modify = self.mock_object( self.client, 'qos_adaptive_policy_group_modify') self.client.provision_qos_policy_group( fake.ADAPTIVE_QOS_POLICY_GROUP_INFO, False) mock_qos_adaptive_policy_group_modify.assert_called_once_with( fake.ADAPTIVE_QOS_SPEC) mock_qos_adaptive_policy_group_create.assert_not_called() mock_qos_policy_group_create.assert_not_called() mock_qos_policy_group_modify.assert_not_called() def test_qos_policy_group_exists(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.QOS_POLICY_GROUP_GET_ITER_RESPONSE) result = self.client.qos_policy_group_exists( fake.QOS_POLICY_GROUP_NAME) api_args = { 'query': { 'qos-policy-group-info': { 'policy-group': fake.QOS_POLICY_GROUP_NAME, }, }, 'desired-attributes': { 'qos-policy-group-info': { 'policy-group': None, }, }, } self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-get-iter', api_args, False)]) self.assertTrue(result) def test_qos_policy_group_exists_not_found(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.NO_RECORDS_RESPONSE) result = self.client.qos_policy_group_exists( fake.QOS_POLICY_GROUP_NAME) self.assertFalse(result) def test_qos_policy_group_create(self): api_args = { 'policy-group': fake.QOS_POLICY_GROUP_NAME, 'min-throughput': '0', 'max-throughput': fake.MAX_THROUGHPUT, 'vserver': self.vserver, } self.client.qos_policy_group_create({ 'policy_name': fake.QOS_POLICY_GROUP_NAME, 'min_throughput': '0', 'max_throughput': fake.MAX_THROUGHPUT, }) self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-create', api_args, False)]) def test_qos_adaptive_policy_group_create(self): api_args = { 'policy-group': fake.QOS_POLICY_GROUP_NAME, 'expected-iops': '%sIOPS/GB' % fake.EXPECTED_IOPS_PER_GB, 'peak-iops': '%sIOPS/GB' % fake.PEAK_IOPS_PER_GB, 'expected-iops-allocation': fake.EXPECTED_IOPS_ALLOCATION, 'peak-iops-allocation': fake.PEAK_IOPS_ALLOCATION, 'block-size': fake.BLOCK_SIZE, 'vserver': self.vserver, } self.client.qos_adaptive_policy_group_create({ 'policy_name': fake.QOS_POLICY_GROUP_NAME, 'expected_iops': '%sIOPS/GB' % fake.EXPECTED_IOPS_PER_GB, 'peak_iops': '%sIOPS/GB' % fake.PEAK_IOPS_PER_GB, 'expected_iops_allocation': fake.EXPECTED_IOPS_ALLOCATION, 'peak_iops_allocation': fake.PEAK_IOPS_ALLOCATION, 'block_size': fake.BLOCK_SIZE, }) self.mock_send_request.assert_has_calls([ mock.call('qos-adaptive-policy-group-create', api_args, False)]) def test_qos_policy_group_modify(self): api_args = { 'policy-group': fake.QOS_POLICY_GROUP_NAME, 'min-throughput': '0', 'max-throughput': fake.MAX_THROUGHPUT, } self.client.qos_policy_group_modify({ 'policy_name': fake.QOS_POLICY_GROUP_NAME, 'min_throughput': '0', 'max_throughput': fake.MAX_THROUGHPUT, }) self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-modify', api_args, False)]) def test_qos_adaptive_policy_group_modify(self): api_args = { 'policy-group': fake.QOS_POLICY_GROUP_NAME, 'expected-iops': '%sIOPS/GB' % fake.EXPECTED_IOPS_PER_GB, 'peak-iops': '%sIOPS/GB' % fake.PEAK_IOPS_PER_GB, 'expected-iops-allocation': fake.EXPECTED_IOPS_ALLOCATION, 'peak-iops-allocation': fake.PEAK_IOPS_ALLOCATION, 'block-size': fake.BLOCK_SIZE, } self.client.qos_adaptive_policy_group_modify({ 'policy_name': fake.QOS_POLICY_GROUP_NAME, 'expected_iops': '%sIOPS/GB' % fake.EXPECTED_IOPS_PER_GB, 'peak_iops': '%sIOPS/GB' % fake.PEAK_IOPS_PER_GB, 'expected_iops_allocation': fake.EXPECTED_IOPS_ALLOCATION, 'peak_iops_allocation': fake.PEAK_IOPS_ALLOCATION, 'block_size': fake.BLOCK_SIZE, }) self.mock_send_request.assert_has_calls([ mock.call('qos-adaptive-policy-group-modify', api_args, False)]) def test_qos_policy_group_rename(self): new_name = 'new-' + fake.QOS_POLICY_GROUP_NAME api_args = { 'policy-group-name': fake.QOS_POLICY_GROUP_NAME, 'new-name': new_name, } self.client.qos_policy_group_rename( fake.QOS_POLICY_GROUP_NAME, new_name) self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-rename', api_args, False)]) def test_mark_qos_policy_group_for_deletion_no_qos_policy_group_info(self): mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') mock_remove = self.mock_object(self.client, 'remove_unused_qos_policy_groups') self.client.mark_qos_policy_group_for_deletion( qos_policy_group_info=None) self.assertEqual(0, mock_rename.call_count) self.assertEqual(0, mock_remove.call_count) def test_mark_qos_policy_group_for_deletion_legacy_qos_policy(self): mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') mock_remove = self.mock_object(self.client, 'remove_unused_qos_policy_groups') self.client.mark_qos_policy_group_for_deletion( qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_LEGACY) self.assertEqual(0, mock_rename.call_count) self.assertEqual(1, mock_remove.call_count) @ddt.data(True, False) def test_mark_qos_policy_group_for_deletion_w_qos_spec(self, is_adaptive): mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') mock_remove = self.mock_object(self.client, 'remove_unused_qos_policy_groups') mock_log = self.mock_object(client_cmode.LOG, 'warning') new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME self.client.mark_qos_policy_group_for_deletion( qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_MAX, is_adaptive=is_adaptive) mock_rename.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_NAME, new_name, is_adaptive)]) self.assertEqual(0, mock_log.call_count) self.assertEqual(1, mock_remove.call_count) @ddt.data(True, False) def test_mark_qos_policy_group_for_deletion_exception_path(self, is_adaptive): mock_rename = self.mock_object(self.client, 'qos_policy_group_rename') mock_rename.side_effect = netapp_api.NaApiError mock_remove = self.mock_object(self.client, 'remove_unused_qos_policy_groups') mock_log = self.mock_object(client_cmode.LOG, 'warning') new_name = 'deleted_cinder_%s' % fake.QOS_POLICY_GROUP_NAME self.client.mark_qos_policy_group_for_deletion( qos_policy_group_info=fake.QOS_POLICY_GROUP_INFO_MAX, is_adaptive=is_adaptive) mock_rename.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_NAME, new_name, is_adaptive)]) self.assertEqual(1, mock_log.call_count) self.assertEqual(1, mock_remove.call_count) def test_remove_unused_qos_policy_groups(self): mock_log = self.mock_object(client_cmode.LOG, 'debug') api_args = { 'query': { 'qos-policy-group-info': { 'policy-group': 'deleted_cinder_*', 'vserver': self.vserver, } }, 'max-records': 3500, 'continue-on-failure': 'true', 'return-success-list': 'false', 'return-failure-list': 'false', } self.client.remove_unused_qos_policy_groups() self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-delete-iter', api_args, False)]) self.assertEqual(0, mock_log.call_count) def test_remove_unused_qos_policy_groups_api_error(self): self.client.features.add_feature('ADAPTIVE_QOS', supported=True) mock_log = self.mock_object(client_cmode.LOG, 'debug') qos_query = { 'qos-policy-group-info': { 'policy-group': 'deleted_cinder_*', 'vserver': self.vserver, } } adaptive_qos_query = { 'qos-adaptive-policy-group-info': { 'policy-group': 'deleted_cinder_*', 'vserver': self.vserver, } } qos_api_args = { 'query': qos_query, 'max-records': 3500, 'continue-on-failure': 'true', 'return-success-list': 'false', 'return-failure-list': 'false', } adaptive_qos_api_args = { 'query': adaptive_qos_query, 'max-records': 3500, 'continue-on-failure': 'true', 'return-success-list': 'false', 'return-failure-list': 'false', } self.mock_send_request.side_effect = netapp_api.NaApiError self.client.remove_unused_qos_policy_groups() self.mock_send_request.assert_has_calls([ mock.call('qos-policy-group-delete-iter', qos_api_args, False), mock.call('qos-adaptive-policy-group-delete-iter', adaptive_qos_api_args, False), ]) self.assertEqual(2, mock_log.call_count) @mock.patch('cinder.volume.volume_utils.resolve_hostname', return_value='192.168.1.101') def test_get_if_info_by_ip_not_found(self, mock_resolve_hostname): fake_ip = '192.168.1.101' response = netapp_api.NaElement( etree.XML(""" 0 """)) self.connection.invoke_successfully.return_value = response self.assertRaises(exception.NotFound, self.client.get_if_info_by_ip, fake_ip) @mock.patch('cinder.volume.volume_utils.resolve_hostname', return_value='192.168.1.101') def test_get_if_info_by_ip(self, mock_resolve_hostname): fake_ip = '192.168.1.101' response = netapp_api.NaElement( etree.XML(""" 1 fake_vserver """)) self.connection.invoke_successfully.return_value = response results = self.client.get_if_info_by_ip(fake_ip) self.assertEqual(1, len(results)) def test_get_vol_by_junc_vserver_not_found(self): fake_vserver = 'fake_vserver' fake_junc = 'fake_junction_path' response = netapp_api.NaElement( etree.XML(""" 0 """)) self.connection.invoke_successfully.return_value = response self.assertRaises(exception.NotFound, self.client.get_vol_by_junc_vserver, fake_vserver, fake_junc) def test_get_vol_by_junc_vserver(self): fake_vserver = 'fake_vserver' fake_junc = 'fake_junction_path' expected_flex_vol = 'fake_flex_vol' volume_attr_str = (""" %(flex_vol)s """ % {'flex_vol': expected_flex_vol}) volume_attr = netapp_api.NaElement(etree.XML(volume_attr_str)) response = netapp_api.NaElement( etree.XML(""" 1 %(vol)s """ % {'vol': volume_attr_str})) self.connection.invoke_successfully.return_value = response mock_get_unique_vol = self.mock_object( self.client, 'get_unique_volume', return_value=volume_attr) actual_flex_vol = self.client.get_vol_by_junc_vserver(fake_vserver, fake_junc) self.assertEqual(expected_flex_vol, actual_flex_vol) mock_get_unique_vol.assert_called_once_with(response) def test_clone_file(self): expected_flex_vol = "fake_flex_vol" expected_src_path = "fake_src_path" expected_dest_path = "fake_dest_path" self.connection.get_api_version.return_value = (1, 20) self.client.clone_file(expected_flex_vol, expected_src_path, expected_dest_path, self.vserver, source_snapshot=fake.CG_SNAPSHOT_ID) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] actual_flex_vol = actual_request.get_child_by_name('volume') \ .get_content() actual_src_path = actual_request \ .get_child_by_name('source-path').get_content() actual_dest_path = actual_request.get_child_by_name( 'destination-path').get_content() self.assertEqual(expected_flex_vol, actual_flex_vol) self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) req_snapshot_child = actual_request.get_child_by_name('snapshot-name') self.assertEqual(fake.CG_SNAPSHOT_ID, req_snapshot_child.get_content()) self.assertIsNone(actual_request.get_child_by_name( 'destination-exists')) def test_clone_file_when_destination_exists(self): expected_flex_vol = "fake_flex_vol" expected_src_path = "fake_src_path" expected_dest_path = "fake_dest_path" self.connection.get_api_version.return_value = (1, 20) self.client.clone_file(expected_flex_vol, expected_src_path, expected_dest_path, self.vserver, dest_exists=True) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] actual_flex_vol = actual_request.get_child_by_name('volume') \ .get_content() actual_src_path = actual_request \ .get_child_by_name('source-path').get_content() actual_dest_path = actual_request.get_child_by_name( 'destination-path').get_content() self.assertEqual(expected_flex_vol, actual_flex_vol) self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) self.assertEqual('true', actual_request.get_child_by_name( 'destination-exists').get_content()) def test_clone_file_when_destination_exists_and_version_less_than_1_20( self): expected_flex_vol = "fake_flex_vol" expected_src_path = "fake_src_path" expected_dest_path = "fake_dest_path" self.connection.get_api_version.return_value = (1, 19) self.client.clone_file(expected_flex_vol, expected_src_path, expected_dest_path, self.vserver, dest_exists=True) __, _args, __ = self.connection.invoke_successfully.mock_calls[0] actual_request = _args[0] actual_flex_vol = actual_request.get_child_by_name('volume') \ .get_content() actual_src_path = actual_request \ .get_child_by_name('source-path').get_content() actual_dest_path = actual_request.get_child_by_name( 'destination-path').get_content() self.assertEqual(expected_flex_vol, actual_flex_vol) self.assertEqual(expected_src_path, actual_src_path) self.assertEqual(expected_dest_path, actual_dest_path) self.assertIsNone(actual_request.get_child_by_name( 'destination-exists')) @ddt.data({'supports_is_backup': True, 'is_snapshot': True}, {'supports_is_backup': True, 'is_snapshot': False}, {'supports_is_backup': False, 'is_snapshot': True}, {'supports_is_backup': False, 'is_snapshot': False}) @ddt.unpack def test_clone_file_is_snapshot(self, supports_is_backup, is_snapshot): self.connection.get_api_version.return_value = (1, 20) self.client.features.add_feature('BACKUP_CLONE_PARAM', supported=supports_is_backup) self.client.clone_file( 'volume', 'fake_source', 'fake_destination', 'fake_vserver', is_snapshot=is_snapshot) clone_create_args = { 'volume': 'volume', 'source-path': 'fake_source', 'destination-path': 'fake_destination', } if is_snapshot and supports_is_backup: clone_create_args['is-backup'] = 'true' self.connection.invoke_successfully.assert_called_once_with( netapp_api.NaElement.create_node_with_children( 'clone-create', **clone_create_args), True) def test_get_file_usage(self): expected_bytes = "2048" fake_vserver = 'fake_vserver' fake_path = 'fake_path' response = netapp_api.NaElement( etree.XML(""" %(unique-bytes)s """ % {'unique-bytes': expected_bytes})) self.connection.invoke_successfully.return_value = response actual_bytes = self.client.get_file_usage(fake_vserver, fake_path) self.assertEqual(expected_bytes, actual_bytes) def test_check_cluster_api(self): self.client.features.USER_CAPABILITY_LIST = True mock_check_cluster_api_legacy = self.mock_object( self.client, '_check_cluster_api_legacy') mock_check_cluster_api = self.mock_object( self.client, '_check_cluster_api', return_value=True) result = self.client.check_cluster_api('object', 'operation', 'api') self.assertTrue(result) self.assertFalse(mock_check_cluster_api_legacy.called) mock_check_cluster_api.assert_called_once_with( 'object', 'operation', 'api') def test_check_cluster_api_legacy(self): self.client.features.USER_CAPABILITY_LIST = False mock_check_cluster_api_legacy = self.mock_object( self.client, '_check_cluster_api_legacy', return_value=True) mock_check_cluster_api = self.mock_object( self.client, '_check_cluster_api') result = self.client.check_cluster_api('object', 'operation', 'api') self.assertTrue(result) self.assertFalse(mock_check_cluster_api.called) mock_check_cluster_api_legacy.assert_called_once_with('api') def test__check_cluster_api(self): api_response = netapp_api.NaElement( fake_client.SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE) self.mock_send_request.return_value = api_response result = self.client._check_cluster_api('object', 'operation', 'api') system_user_capability_get_iter_args = { 'query': { 'capability-info': { 'object-name': 'object', 'operation-list': { 'operation-info': { 'name': 'operation', }, }, }, }, 'desired-attributes': { 'capability-info': { 'operation-list': { 'operation-info': { 'api-name': None, }, }, }, }, } self.mock_send_request.assert_called_once_with( 'system-user-capability-get-iter', system_user_capability_get_iter_args, False) self.assertTrue(result) @ddt.data(fake_client.SYSTEM_USER_CAPABILITY_GET_ITER_RESPONSE, fake_client.NO_RECORDS_RESPONSE) def test__check_cluster_api_not_found(self, response): api_response = netapp_api.NaElement(response) self.mock_send_request.return_value = api_response result = self.client._check_cluster_api('object', 'operation', 'api4') self.assertFalse(result) @ddt.data('volume-get-iter', 'volume-get', 'aggr-options-list-info') def test__check_cluster_api_legacy(self, api): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_send_request.return_value = api_response result = self.client._check_cluster_api_legacy(api) self.assertTrue(result) self.mock_send_request.assert_called_once_with(api, enable_tunneling=False) @ddt.data(netapp_api.EAPIPRIVILEGE, netapp_api.EAPINOTFOUND) def test__check_cluster_api_legacy_insufficient_privileges(self, code): self.mock_send_request.side_effect = netapp_api.NaApiError(code=code) result = self.client._check_cluster_api_legacy('volume-get-iter') self.assertFalse(result) self.mock_send_request.assert_called_once_with('volume-get-iter', enable_tunneling=False) def test__check_cluster_api_legacy_api_error(self): self.mock_send_request.side_effect = netapp_api.NaApiError() result = self.client._check_cluster_api_legacy('volume-get-iter') self.assertTrue(result) self.mock_send_request.assert_called_once_with('volume-get-iter', enable_tunneling=False) def test__check_cluster_api_legacy_invalid_api(self): self.assertRaises(ValueError, self.client._check_cluster_api_legacy, 'fake_api') def test_get_operational_lif_addresses(self): expected_result = ['1.2.3.4', '99.98.97.96'] api_response = netapp_api.NaElement( fake_client.GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) address_list = self.client.get_operational_lif_addresses() net_interface_get_iter_args = { 'query': { 'net-interface-info': { 'operational-status': 'up' } }, 'desired-attributes': { 'net-interface-info': { 'address': None, } } } self.client.send_iter_request.assert_called_once_with( 'net-interface-get-iter', net_interface_get_iter_args) self.assertEqual(expected_result, address_list) @ddt.data({'junction_path': '/fake/vol'}, {'name': 'fake_volume'}, {'junction_path': '/fake/vol', 'name': 'fake_volume'}) def test_get_volume_state(self, kwargs): api_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_STATE_RESPONSE) mock_send_iter_request = self.mock_object( self.client, 'send_iter_request', return_value=api_response) volume_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_STATE_ATTR) mock_get_unique_vol = self.mock_object( self.client, 'get_unique_volume', return_value=volume_response) state = self.client.get_volume_state(**kwargs) volume_id_attributes = {} if 'junction_path' in kwargs: volume_id_attributes['junction-path'] = kwargs['junction_path'] if 'name' in kwargs: volume_id_attributes['name'] = kwargs['name'] volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': volume_id_attributes, } }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'style-extended': None, }, 'volume-state-attributes': { 'state': None } } }, } mock_send_iter_request.assert_called_once_with( 'volume-get-iter', volume_get_iter_args) mock_get_unique_vol.assert_called_once_with(api_response) self.assertEqual(fake_client.VOLUME_STATE_ONLINE, state) @ddt.data({'flexvol_path': '/fake/vol'}, {'flexvol_name': 'fake_volume'}, {'flexvol_path': '/fake/vol', 'flexvol_name': 'fake_volume'}) def test_get_flexvol_capacity(self, kwargs): api_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_CAPACITY_RESPONSE) mock_send_iter_request = self.mock_object( self.client, 'send_iter_request', return_value=api_response) volume_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_CAPACITY_ATTR) mock_get_unique_vol = self.mock_object( self.client, 'get_unique_volume', return_value=volume_response) capacity = self.client.get_flexvol_capacity(**kwargs) volume_id_attributes = {} if 'flexvol_path' in kwargs: volume_id_attributes['junction-path'] = kwargs['flexvol_path'] if 'flexvol_name' in kwargs: volume_id_attributes['name'] = kwargs['flexvol_name'] volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': volume_id_attributes, } }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'style-extended': None, }, 'volume-space-attributes': { 'size-available': None, 'size-total': None, } } }, } mock_send_iter_request.assert_called_once_with( 'volume-get-iter', volume_get_iter_args) mock_get_unique_vol.assert_called_once_with(api_response) self.assertEqual(fake_client.VOLUME_SIZE_TOTAL, capacity['size-total']) self.assertEqual(fake_client.VOLUME_SIZE_AVAILABLE, capacity['size-available']) def test_get_flexvol_capacity_not_found(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.NO_RECORDS_RESPONSE) self.assertRaises(netapp_utils.NetAppDriverException, self.client.get_flexvol_capacity, flexvol_path='fake_path') def test_list_flexvols(self): api_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_LIST_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.list_flexvols() volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'type': 'rw', 'style': 'flex', }, 'volume-state-attributes': { 'is-vserver-root': 'false', 'is-inconsistent': 'false', 'is-invalid': 'false', 'state': 'online', }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } self.client.send_iter_request.assert_called_once_with( 'volume-get-iter', volume_get_iter_args) self.assertEqual(list(fake_client.VOLUME_NAMES), result) def test_list_flexvols_not_found(self): api_response = netapp_api.NaElement( fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.list_flexvols() self.assertEqual([], result) @ddt.data(False, True) def test_get_flexvol(self, is_flexgroup): if is_flexgroup: api_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP) volume_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_SSC_RESPONSE_ATTR_FLEXGROUP) else: api_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_SSC_RESPONSE) volume_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_SSC_RESPONSE_ATTR) self.mock_object(self.client, 'send_iter_request', return_value=api_response) mock_get_unique_vol = self.mock_object( self.client, 'get_unique_volume', return_value=volume_response) result = self.client.get_flexvol( flexvol_name=fake_client.VOLUME_NAMES[0], flexvol_path='/%s' % fake_client.VOLUME_NAMES[0]) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake_client.VOLUME_NAMES[0], 'junction-path': '/' + fake_client.VOLUME_NAMES[0], 'type': 'rw', 'style': 'flex', }, 'volume-state-attributes': { 'is-vserver-root': 'false', 'is-inconsistent': 'false', 'is-invalid': 'false', 'state': 'online', }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, 'owning-vserver-name': None, 'junction-path': None, 'type': None, 'aggr-list': { 'aggr-name': None, }, 'containing-aggregate-name': None, 'style-extended': None, }, 'volume-mirror-attributes': { 'is-data-protection-mirror': None, 'is-replica-volume': None, }, 'volume-space-attributes': { 'is-space-guarantee-enabled': None, 'space-guarantee': None, 'percentage-snapshot-reserve': None, 'size': None, }, 'volume-qos-attributes': { 'policy-group-name': None, }, 'volume-snapshot-attributes': { 'snapshot-policy': None, }, 'volume-language-attributes': { 'language-code': None, } }, }, } self.client.send_iter_request.assert_called_once_with( 'volume-get-iter', volume_get_iter_args) mock_get_unique_vol.assert_called_once_with(api_response) if is_flexgroup: self.assertEqual(fake_client.VOLUME_INFO_SSC_FLEXGROUP, result) else: self.assertEqual(fake_client.VOLUME_INFO_SSC, result) def test_create_flexvol(self): self.mock_object(self.client.connection, 'send_request') self.client.create_flexvol( fake_client.VOLUME_NAME, fake_client.VOLUME_AGGREGATE_NAME, 100) volume_create_args = { 'containing-aggr-name': fake_client.VOLUME_AGGREGATE_NAME, 'size': '100g', 'volume': fake_client.VOLUME_NAME, 'volume-type': 'rw', 'junction-path': '/%s' % fake_client.VOLUME_NAME, } self.client.connection.send_request.assert_called_once_with( 'volume-create', volume_create_args) @ddt.data('dp', 'rw', None) def test_create_volume_with_extra_specs(self, volume_type): self.mock_object(self.client, 'enable_flexvol_dedupe') self.mock_object(self.client, 'enable_flexvol_compression') self.mock_object(self.client.connection, 'send_request') self.client.create_flexvol( fake_client.VOLUME_NAME, fake_client.VOLUME_AGGREGATE_NAME, 100, space_guarantee_type='volume', language='en-US', snapshot_policy='default', dedupe_enabled=True, compression_enabled=True, snapshot_reserve=15, volume_type=volume_type) volume_create_args = { 'containing-aggr-name': fake_client.VOLUME_AGGREGATE_NAME, 'size': '100g', 'volume': fake_client.VOLUME_NAME, 'space-reserve': 'volume', 'language-code': 'en-US', 'volume-type': volume_type, 'percentage-snapshot-reserve': '15', } if volume_type != 'dp': volume_create_args['snapshot-policy'] = 'default' volume_create_args['junction-path'] = ('/%s' % fake_client.VOLUME_NAME) self.client.connection.send_request.assert_called_with( 'volume-create', volume_create_args) self.client.enable_flexvol_dedupe.assert_called_once_with( fake_client.VOLUME_NAME) self.client.enable_flexvol_compression.assert_called_once_with( fake_client.VOLUME_NAME) def test_create_volume_async(self): self.mock_object(self.client.connection, 'send_request') self.client.create_volume_async( fake_client.VOLUME_NAME, [fake_client.VOLUME_AGGREGATE_NAME], 100, volume_type='dp') volume_create_args = { 'aggr-list': [{'aggr-name': fake_client.VOLUME_AGGREGATE_NAME}], 'size': 100 * units.Gi, 'volume-name': fake_client.VOLUME_NAME, 'volume-type': 'dp' } self.client.connection.send_request.assert_called_once_with( 'volume-create-async', volume_create_args) @ddt.data('dp', 'rw', None) def test_create_volume_async_with_extra_specs(self, volume_type): self.mock_object(self.client.connection, 'send_request') self.client.create_volume_async( fake_client.VOLUME_NAME, [fake_client.VOLUME_AGGREGATE_NAME], 100, space_guarantee_type='volume', language='en-US', snapshot_policy='default', snapshot_reserve=15, volume_type=volume_type) volume_create_args = { 'aggr-list': [{'aggr-name': fake_client.VOLUME_AGGREGATE_NAME}], 'size': 100 * units.Gi, 'volume-name': fake_client.VOLUME_NAME, 'space-reserve': 'volume', 'language-code': 'en-US', 'volume-type': volume_type, 'percentage-snapshot-reserve': '15', } if volume_type != 'dp': volume_create_args['snapshot-policy'] = 'default' volume_create_args['junction-path'] = ('/%s' % fake_client.VOLUME_NAME) self.client.connection.send_request.assert_called_with( 'volume-create-async', volume_create_args) def test_flexvol_exists(self): api_response = netapp_api.NaElement( fake_client.VOLUME_GET_NAME_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.flexvol_exists(fake_client.VOLUME_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': fake_client.VOLUME_NAME } } }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None } } } } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) self.assertTrue(result) def test_flexvol_exists_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) self.assertFalse(self.client.flexvol_exists(fake_client.VOLUME_NAME)) def test_rename_flexvol(self): self.mock_object(self.client.connection, 'send_request') self.client.rename_flexvol(fake_client.VOLUME_NAME, 'new_name') volume_rename_api_args = { 'volume': fake_client.VOLUME_NAME, 'new-volume-name': 'new_name', } self.client.connection.send_request.assert_called_once_with( 'volume-rename', volume_rename_api_args) def test_mount_flexvol_default_junction_path(self): self.mock_object(self.client.connection, 'send_request') self.client.mount_flexvol(fake_client.VOLUME_NAME) volume_mount_args = { 'volume-name': fake_client.VOLUME_NAME, 'junction-path': '/%s' % fake_client.VOLUME_NAME, } self.client.connection.send_request.assert_has_calls([ mock.call('volume-mount', volume_mount_args)]) def test_mount_flexvol(self): self.mock_object(self.client.connection, 'send_request') fake_path = '/fake_path' self.client.mount_flexvol(fake_client.VOLUME_NAME, junction_path=fake_path) volume_mount_args = { 'volume-name': fake_client.VOLUME_NAME, 'junction-path': fake_path, } self.client.connection.send_request.assert_has_calls([ mock.call('volume-mount', volume_mount_args)]) def test_enable_volume_dedupe_async(self): self.mock_object(self.client.connection, 'send_request') self.client.enable_volume_dedupe_async(fake_client.VOLUME_NAME) sis_enable_args = {'volume-name': fake_client.VOLUME_NAME} self.client.connection.send_request.assert_called_once_with( 'sis-enable-async', sis_enable_args) def test_disable_volume_dedupe_async(self): self.mock_object(self.client.connection, 'send_request') self.client.disable_volume_dedupe_async(fake_client.VOLUME_NAME) sis_enable_args = {'volume-name': fake_client.VOLUME_NAME} self.client.connection.send_request.assert_called_once_with( 'sis-disable-async', sis_enable_args) def test_enable_volume_compression_async(self): self.mock_object(self.client.connection, 'send_request') self.client.enable_volume_compression_async(fake_client.VOLUME_NAME) sis_set_config_args = { 'volume-name': fake_client.VOLUME_NAME, 'enable-compression': 'true' } self.client.connection.send_request.assert_called_once_with( 'sis-set-config-async', sis_set_config_args) def test_disable_volume_compression_async(self): self.mock_object(self.client.connection, 'send_request') self.client.disable_volume_compression_async(fake_client.VOLUME_NAME) sis_set_config_args = { 'volume-name': fake_client.VOLUME_NAME, 'enable-compression': 'false' } self.client.connection.send_request.assert_called_once_with( 'sis-set-config-async', sis_set_config_args) def test_enable_flexvol_dedupe(self): self.mock_object(self.client.connection, 'send_request') self.client.enable_flexvol_dedupe(fake_client.VOLUME_NAME) sis_enable_args = {'path': '/vol/%s' % fake_client.VOLUME_NAME} self.client.connection.send_request.assert_called_once_with( 'sis-enable', sis_enable_args) def test_disable_flexvol_dedupe(self): self.mock_object(self.client.connection, 'send_request') self.client.disable_flexvol_dedupe(fake_client.VOLUME_NAME) sis_disable_args = {'path': '/vol/%s' % fake_client.VOLUME_NAME} self.client.connection.send_request.assert_called_once_with( 'sis-disable', sis_disable_args) def test_enable_flexvol_compression(self): self.mock_object(self.client.connection, 'send_request') self.client.enable_flexvol_compression(fake_client.VOLUME_NAME) sis_set_config_args = { 'path': '/vol/%s' % fake_client.VOLUME_NAME, 'enable-compression': 'true' } self.client.connection.send_request.assert_called_once_with( 'sis-set-config', sis_set_config_args) def test_disable_flexvol_compression(self): self.mock_object(self.client.connection, 'send_request') self.client.disable_flexvol_compression(fake_client.VOLUME_NAME) sis_set_config_args = { 'path': '/vol/%s' % fake_client.VOLUME_NAME, 'enable-compression': 'false' } self.client.connection.send_request.assert_called_once_with( 'sis-set-config', sis_set_config_args) def test_get_flexvol_dedupe_info(self): api_response = netapp_api.NaElement( fake_client.SIS_GET_ITER_SSC_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_flexvol_dedupe_info( fake_client.VOLUME_NAMES[0]) sis_get_iter_args = { 'query': { 'sis-status-info': { 'path': '/vol/%s' % fake_client.VOLUME_NAMES[0], }, }, 'desired-attributes': { 'sis-status-info': { 'state': None, 'is-compression-enabled': None, 'logical-data-size': None, 'logical-data-limit': None, }, }, } self.client.send_iter_request.assert_called_once_with( 'sis-get-iter', sis_get_iter_args) self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC, result) def test_get_flexvol_dedupe_info_no_logical_data_values(self): api_response = netapp_api.NaElement( fake_client.SIS_GET_ITER_SSC_NO_LOGICAL_DATA_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_flexvol_dedupe_info( fake_client.VOLUME_NAMES[0]) self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result) def test_get_flexvol_dedupe_info_not_found(self): api_response = netapp_api.NaElement( fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_flexvol_dedupe_info( fake_client.VOLUME_NAMES[0]) self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result) def test_get_flexvol_dedupe_info_api_error(self): self.mock_object(self.client, 'send_iter_request', side_effect=self._mock_api_error()) result = self.client.get_flexvol_dedupe_info( fake_client.VOLUME_NAMES[0]) self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result) def test_get_flexvol_dedupe_info_api_insufficient_privileges(self): api_error = netapp_api.NaApiError(code=netapp_api.EAPIPRIVILEGE) self.mock_object(self.client, 'send_iter_request', side_effect=api_error) result = self.client.get_flexvol_dedupe_info( fake_client.VOLUME_NAMES[0]) self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result) def test_get_flexvol_dedupe_used_percent(self): self.client.features.add_feature('CLONE_SPLIT_STATUS') mock_get_flexvol_dedupe_info = self.mock_object( self.client, 'get_flexvol_dedupe_info', return_value=fake_client.VOLUME_DEDUPE_INFO_SSC) mock_get_clone_split_info = self.mock_object( self.client, 'get_clone_split_info', return_value=fake_client.VOLUME_CLONE_SPLIT_STATUS) result = self.client.get_flexvol_dedupe_used_percent( fake_client.VOLUME_NAMES[0]) self.assertEqual(75.0, result) mock_get_flexvol_dedupe_info.assert_called_once_with( fake_client.VOLUME_NAMES[0]) mock_get_clone_split_info.assert_called_once_with( fake_client.VOLUME_NAMES[0]) def test_get_flexvol_dedupe_used_percent_not_supported(self): self.client.features.add_feature('CLONE_SPLIT_STATUS', supported=False) mock_get_flexvol_dedupe_info = self.mock_object( self.client, 'get_flexvol_dedupe_info', return_value=fake_client.VOLUME_DEDUPE_INFO_SSC) mock_get_clone_split_info = self.mock_object( self.client, 'get_clone_split_info', return_value=fake_client.VOLUME_CLONE_SPLIT_STATUS) result = self.client.get_flexvol_dedupe_used_percent( fake_client.VOLUME_NAMES[0]) self.assertEqual(0.0, result) self.assertFalse(mock_get_flexvol_dedupe_info.called) self.assertFalse(mock_get_clone_split_info.called) def test_get_clone_split_info(self): api_response = netapp_api.NaElement( fake_client.CLONE_SPLIT_STATUS_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.get_clone_split_info(fake_client.VOLUME_NAMES[0]) self.assertEqual(fake_client.VOLUME_CLONE_SPLIT_STATUS, result) self.client.connection.send_request.assert_called_once_with( 'clone-split-status', {'volume-name': fake_client.VOLUME_NAMES[0]}) def test_get_clone_split_info_api_error(self): self.mock_object(self.client.connection, 'send_request', side_effect=self._mock_api_error()) result = self.client.get_clone_split_info(fake_client.VOLUME_NAMES[0]) expected = {'unsplit-size': 0, 'unsplit-clone-count': 0} self.assertEqual(expected, result) def test_get_clone_split_info_no_data(self): api_response = netapp_api.NaElement( fake_client.CLONE_SPLIT_STATUS_NO_DATA_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.get_clone_split_info(fake_client.VOLUME_NAMES[0]) expected = {'unsplit-size': 0, 'unsplit-clone-count': 0} self.assertEqual(expected, result) def test_is_flexvol_mirrored(self): api_response = netapp_api.NaElement( fake_client.SNAPMIRROR_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.is_flexvol_mirrored( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) snapmirror_get_iter_args = { 'query': { 'snapmirror-info': { 'source-vserver': fake_client.VOLUME_VSERVER_NAME, 'source-volume': fake_client.VOLUME_NAMES[0], 'mirror-state': 'snapmirrored', 'relationship-type': 'data_protection', }, }, 'desired-attributes': { 'snapmirror-info': None, }, } self.client.send_iter_request.assert_called_once_with( 'snapmirror-get-iter', snapmirror_get_iter_args) self.assertTrue(result) def test_is_flexvol_mirrored_not_mirrored(self): api_response = netapp_api.NaElement( fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.is_flexvol_mirrored( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) self.assertFalse(result) def test_is_flexvol_mirrored_api_error(self): self.mock_object(self.client.connection, 'send_request', side_effect=self._mock_api_error()) result = self.client.is_flexvol_mirrored( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) self.assertFalse(result) def test_is_flexvol_encrypted(self): api_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_ENCRYPTION_SSC_RESPONSE) self.client.features.add_feature('FLEXVOL_ENCRYPTION') self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.is_flexvol_encrypted( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) volume_get_iter_args = { 'query': { 'volume-attributes': { 'encrypt': 'true', 'volume-id-attributes': { 'name': fake_client.VOLUME_NAME, 'owning-vserver-name': fake_client.VOLUME_VSERVER_NAME, } } }, 'desired-attributes': { 'volume-attributes': { 'encrypt': None, } } } self.client.send_iter_request.assert_called_once_with( 'volume-get-iter', volume_get_iter_args) self.assertTrue(result) def test_is_flexvol_encrypted_unsupported_version(self): self.client.features.add_feature('FLEXVOL_ENCRYPTION', supported=False) result = self.client.is_flexvol_encrypted( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) self.assertFalse(result) def test_is_flexvol_encrypted_no_records_found(self): api_response = netapp_api.NaElement( fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.is_flexvol_encrypted( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) self.assertFalse(result) def test_is_flexvol_encrypted_api_error(self): self.mock_object(self.client.connection, 'send_request', side_effect=self._mock_api_error()) result = self.client.is_flexvol_encrypted( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) self.assertFalse(result) def test_get_aggregates(self): api_response = netapp_api.NaElement( fake_client.AGGR_GET_ITER_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client._get_aggregates() self.client.connection.send_request.assert_has_calls([ mock.call('aggr-get-iter', {}, enable_tunneling=False)]) self.assertListEqual( [aggr.to_string() for aggr in api_response.get_child_by_name( 'attributes-list').get_children()], [aggr.to_string() for aggr in result]) def test_get_aggregates_with_filters(self): api_response = netapp_api.NaElement( fake_client.AGGR_GET_SPACE_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-space-attributes': { 'size-total': None, 'size-available': None, } } } result = self.client._get_aggregates( aggregate_names=fake_client.VOLUME_AGGREGATE_NAMES, desired_attributes=desired_attributes) aggr_get_iter_args = { 'query': { 'aggr-attributes': { 'aggregate-name': '|'.join( fake_client.VOLUME_AGGREGATE_NAMES), } }, 'desired-attributes': desired_attributes } self.client.connection.send_request.assert_has_calls([ mock.call('aggr-get-iter', aggr_get_iter_args, enable_tunneling=False)]) self.assertListEqual( [aggr.to_string() for aggr in api_response.get_child_by_name( 'attributes-list').get_children()], [aggr.to_string() for aggr in result]) def test_get_aggregates_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client._get_aggregates() self.client.connection.send_request.assert_has_calls([ mock.call('aggr-get-iter', {}, enable_tunneling=False)]) self.assertListEqual([], result) def test_get_node_for_aggregate(self): api_response = netapp_api.NaElement( fake_client.AGGR_GET_NODE_RESPONSE).get_child_by_name( 'attributes-list').get_children() self.mock_object(self.client, '_get_aggregates', return_value=api_response) result = self.client.get_node_for_aggregate( fake_client.VOLUME_AGGREGATE_NAME) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-ownership-attributes': { 'home-name': None, }, }, } self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], desired_attributes=desired_attributes)]) self.assertEqual(fake_client.NODE_NAME, result) def test_get_node_for_aggregate_none_requested(self): result = self.client.get_node_for_aggregate(None) self.assertIsNone(result) def test_get_node_for_aggregate_api_not_found(self): api_error = self._mock_api_error(netapp_api.EAPINOTFOUND) self.mock_object(self.client.connection, 'send_request', side_effect=api_error) result = self.client.get_node_for_aggregate( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsNone(result) def test_get_node_for_aggregate_api_error(self): self.mock_object(self.client.connection, 'send_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.get_node_for_aggregate, fake_client.VOLUME_AGGREGATE_NAME) def test_get_node_for_aggregate_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.get_node_for_aggregate( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsNone(result) def test_get_aggregate_none_specified(self): result = self.client.get_aggregate('') self.assertEqual({}, result) def test_get_aggregate(self): api_response = netapp_api.NaElement( fake_client.AGGR_GET_ITER_SSC_RESPONSE).get_child_by_name( 'attributes-list').get_children() self.mock_object(self.client, '_get_aggregates', return_value=api_response) result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-raid-attributes': { 'raid-type': None, 'is-hybrid': None, }, 'aggr-ownership-attributes': { 'home-name': None, }, }, } self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], desired_attributes=desired_attributes)]) expected = { 'name': fake_client.VOLUME_AGGREGATE_NAME, 'raid-type': 'raid_dp', 'is-hybrid': True, 'node-name': fake_client.NODE_NAME, } self.assertEqual(expected, result) def test_get_aggregate_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) def test_get_aggregate_api_error(self): self.mock_object(self.client.connection, 'send_request', side_effect=self._mock_api_error()) result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) def test_get_aggregate_api_not_found(self): api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND) self.mock_object(self.client.connection, 'send_iter_request', side_effect=api_error) result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) @ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']}, {'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},) @ddt.unpack def test_get_aggregate_disk_types(self, types, expected): mock_get_aggregate_disk_types = self.mock_object( self.client, '_get_aggregate_disk_types', return_value=types) result = self.client.get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) self.assertCountEqual(expected, result) mock_get_aggregate_disk_types.assert_called_once_with( fake_client.VOLUME_AGGREGATE_NAME) def test_get_aggregate_disk_types_not_found(self): mock_get_aggregate_disk_types = self.mock_object( self.client, '_get_aggregate_disk_types', return_value=set()) result = self.client.get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsNone(result) mock_get_aggregate_disk_types.assert_called_once_with( fake_client.VOLUME_AGGREGATE_NAME) def test_get_aggregate_disk_types_api_not_found(self): api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND) self.mock_object(self.client, 'send_iter_request', side_effect=api_error) result = self.client.get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsNone(result) def test_get_aggregate_disk_types_shared(self): self.client.features.add_feature('ADVANCED_DISK_PARTITIONING') mock_get_aggregate_disk_types = self.mock_object( self.client, '_get_aggregate_disk_types', side_effect=[set(['SSD']), set(['SATA'])]) result = self.client.get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsInstance(result, list) self.assertCountEqual(['SATA', 'SSD'], result) mock_get_aggregate_disk_types.assert_has_calls([ mock.call(fake_client.VOLUME_AGGREGATE_NAME), mock.call(fake_client.VOLUME_AGGREGATE_NAME, shared=True), ]) def test__get_aggregate_disk_types(self): api_response = netapp_api.NaElement( fake_client.STORAGE_DISK_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client._get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) storage_disk_get_iter_args = { 'query': { 'storage-disk-info': { 'disk-raid-info': { 'disk-aggregate-info': { 'aggregate-name': fake_client.VOLUME_AGGREGATE_NAME, }, }, }, }, 'desired-attributes': { 'storage-disk-info': { 'disk-raid-info': { 'effective-disk-type': None, }, }, }, } self.client.send_iter_request.assert_called_once_with( 'storage-disk-get-iter', storage_disk_get_iter_args, enable_tunneling=False) expected = set(fake_client.AGGREGATE_DISK_TYPES) self.assertEqual(expected, result) def test__get_aggregate_disk_types_shared(self): api_response = netapp_api.NaElement( fake_client.STORAGE_DISK_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client._get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME, shared=True) storage_disk_get_iter_args = { 'query': { 'storage-disk-info': { 'disk-raid-info': { 'disk-shared-info': { 'aggregate-list': { 'shared-aggregate-info': { 'aggregate-name': fake_client.VOLUME_AGGREGATE_NAME, }, }, }, }, }, }, 'desired-attributes': { 'storage-disk-info': { 'disk-raid-info': { 'effective-disk-type': None, }, }, }, } self.client.send_iter_request.assert_called_once_with( 'storage-disk-get-iter', storage_disk_get_iter_args, enable_tunneling=False) expected = set(fake_client.AGGREGATE_DISK_TYPES) self.assertEqual(expected, result) def test__get_aggregate_disk_types_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client._get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual(set(), result) def test__get_aggregate_disk_types_api_error(self): self.mock_object(self.client, 'send_iter_request', side_effect=self._mock_api_error()) result = self.client._get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual(set([]), result) def test_get_aggregate_capacities(self): aggr1_capacities = { 'percent-used': 50, 'size-available': 100.0, 'size-total': 200.0, } aggr2_capacities = { 'percent-used': 75, 'size-available': 125.0, 'size-total': 500.0, } mock_get_aggregate_capacity = self.mock_object( self.client, 'get_aggregate_capacity', side_effect=[aggr1_capacities, aggr2_capacities]) result = self.client.get_aggregate_capacities(['aggr1', 'aggr2']) expected = { 'aggr1': aggr1_capacities, 'aggr2': aggr2_capacities, } self.assertEqual(expected, result) mock_get_aggregate_capacity.assert_has_calls([ mock.call('aggr1'), mock.call('aggr2'), ]) def test_get_aggregate_capacities_not_found(self): mock_get_aggregate_capacity = self.mock_object( self.client, 'get_aggregate_capacity', side_effect=[{}, {}]) result = self.client.get_aggregate_capacities(['aggr1', 'aggr2']) expected = { 'aggr1': {}, 'aggr2': {}, } self.assertEqual(expected, result) mock_get_aggregate_capacity.assert_has_calls([ mock.call('aggr1'), mock.call('aggr2'), ]) def test_get_aggregate_capacities_not_list(self): result = self.client.get_aggregate_capacities('aggr1') self.assertEqual({}, result) def test_get_aggregate_capacity(self): api_response = netapp_api.NaElement( fake_client.AGGR_GET_ITER_CAPACITY_RESPONSE).get_child_by_name( 'attributes-list').get_children() self.mock_object(self.client, '_get_aggregates', return_value=api_response) result = self.client.get_aggregate_capacity( fake_client.VOLUME_AGGREGATE_NAME) desired_attributes = { 'aggr-attributes': { 'aggr-space-attributes': { 'percent-used-capacity': None, 'size-available': None, 'size-total': None, }, }, } self.client._get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], desired_attributes=desired_attributes)]) expected = { 'percent-used': float(fake_client.AGGR_USED_PERCENT), 'size-available': float(fake_client.AGGR_SIZE_AVAILABLE), 'size-total': float(fake_client.AGGR_SIZE_TOTAL), } self.assertEqual(expected, result) def test_get_aggregate_capacity_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.get_aggregate_capacity( fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) def test_get_aggregate_capacity_api_error(self): self.mock_object(self.client.connection, 'send_request', side_effect=self._mock_api_error()) result = self.client.get_aggregate_capacity( fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) def test_get_aggregate_capacity_api_not_found(self): api_error = netapp_api.NaApiError(code=netapp_api.EAPINOTFOUND) self.mock_object( self.client.connection, 'send_request', side_effect=api_error) result = self.client.get_aggregate_capacity( fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) def test_get_performance_instance_uuids(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE) result = self.client.get_performance_instance_uuids( 'system', fake_client.NODE_NAME) expected = [fake_client.NODE_NAME + ':kernel:system'] self.assertEqual(expected, result) perf_object_instance_list_info_iter_args = { 'objectname': 'system', 'query': { 'instance-info': { 'uuid': fake_client.NODE_NAME + ':*', } } } self.mock_send_request.assert_called_once_with( 'perf-object-instance-list-info-iter', perf_object_instance_list_info_iter_args, enable_tunneling=False) def test_get_performance_counters(self): self.mock_send_request.return_value = netapp_api.NaElement( fake_client.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE) instance_uuids = [ fake_client.NODE_NAMES[0] + ':kernel:system', fake_client.NODE_NAMES[1] + ':kernel:system', ] counter_names = ['avg_processor_busy'] result = self.client.get_performance_counters('system', instance_uuids, counter_names) expected = [ { 'avg_processor_busy': '5674745133134', 'instance-name': 'system', 'instance-uuid': instance_uuids[0], 'node-name': fake_client.NODE_NAMES[0], 'timestamp': '1453412013', }, { 'avg_processor_busy': '4077649009234', 'instance-name': 'system', 'instance-uuid': instance_uuids[1], 'node-name': fake_client.NODE_NAMES[1], 'timestamp': '1453412013' }, ] self.assertEqual(expected, result) perf_object_get_instances_args = { 'objectname': 'system', 'instance-uuids': [ {'instance-uuid': instance_uuid} for instance_uuid in instance_uuids ], 'counters': [ {'counter': counter} for counter in counter_names ], } self.mock_send_request.assert_called_once_with( 'perf-object-get-instances', perf_object_get_instances_args, enable_tunneling=False) def test_check_iscsi_initiator_exists_when_no_initiator_exists(self): self.connection.invoke_successfully = mock.Mock( side_effect=netapp_api.NaApiError) initiator = fake_client.INITIATOR_IQN initiator_exists = self.client.check_iscsi_initiator_exists(initiator) self.assertFalse(initiator_exists) def test_check_iscsi_initiator_exists_when_initiator_exists(self): self.connection.invoke_successfully = mock.Mock() initiator = fake_client.INITIATOR_IQN initiator_exists = self.client.check_iscsi_initiator_exists(initiator) self.assertTrue(initiator_exists) def test_set_iscsi_chap_authentication_no_previous_initiator(self): self.connection.invoke_successfully = mock.Mock() self.mock_object(self.client, 'check_iscsi_initiator_exists', return_value=False) ssh = mock.Mock(paramiko.SSHClient) sshpool = mock.Mock(ssh_utils.SSHPool) self.client.ssh_client.ssh_pool = sshpool self.mock_object(self.client.ssh_client, 'execute_command_with_prompt') sshpool.item().__enter__ = mock.Mock(return_value=ssh) sshpool.item().__exit__ = mock.Mock(return_value=False) self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN, fake_client.USER_NAME, fake_client.PASSWORD) command = ('iscsi security create -vserver fake_vserver ' '-initiator-name iqn.2015-06.com.netapp:fake_iqn ' '-auth-type CHAP -user-name fake_user') self.client.ssh_client.execute_command_with_prompt.assert_has_calls( [mock.call(ssh, command, 'Password:', fake_client.PASSWORD)] ) def test_set_iscsi_chap_authentication_with_preexisting_initiator(self): self.connection.invoke_successfully = mock.Mock() self.mock_object(self.client, 'check_iscsi_initiator_exists', return_value=True) ssh = mock.Mock(paramiko.SSHClient) sshpool = mock.Mock(ssh_utils.SSHPool) self.client.ssh_client.ssh_pool = sshpool self.mock_object(self.client.ssh_client, 'execute_command_with_prompt') sshpool.item().__enter__ = mock.Mock(return_value=ssh) sshpool.item().__exit__ = mock.Mock(return_value=False) self.client.set_iscsi_chap_authentication(fake_client.INITIATOR_IQN, fake_client.USER_NAME, fake_client.PASSWORD) command = ('iscsi security modify -vserver fake_vserver ' '-initiator-name iqn.2015-06.com.netapp:fake_iqn ' '-auth-type CHAP -user-name fake_user') self.client.ssh_client.execute_command_with_prompt.assert_has_calls( [mock.call(ssh, command, 'Password:', fake_client.PASSWORD)] ) def test_set_iscsi_chap_authentication_with_ssh_exception(self): self.connection.invoke_successfully = mock.Mock() self.mock_object(self.client, 'check_iscsi_initiator_exists', return_value=True) ssh = mock.Mock(paramiko.SSHClient) sshpool = mock.Mock(ssh_utils.SSHPool) self.client.ssh_client.ssh_pool = sshpool sshpool.item().__enter__ = mock.Mock(return_value=ssh) sshpool.item().__enter__.side_effect = paramiko.SSHException( 'Connection Failure') sshpool.item().__exit__ = mock.Mock(return_value=False) self.assertRaises(exception.VolumeBackendAPIException, self.client.set_iscsi_chap_authentication, fake_client.INITIATOR_IQN, fake_client.USER_NAME, fake_client.PASSWORD) def test_get_snapshot_if_snapshot_present_not_busy(self): expected_vol_name = fake.SNAPSHOT['volume_id'] expected_snapshot_name = fake.SNAPSHOT['name'] response = netapp_api.NaElement( fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE) self.mock_send_request.return_value = response snapshot = self.client.get_snapshot(expected_vol_name, expected_snapshot_name) self.assertEqual(expected_vol_name, snapshot['volume']) self.assertEqual(expected_snapshot_name, snapshot['name']) self.assertEqual(set([]), snapshot['owners']) self.assertFalse(snapshot['busy']) def test_get_snapshot_if_snapshot_present_busy(self): expected_vol_name = fake.SNAPSHOT['volume_id'] expected_snapshot_name = fake.SNAPSHOT['name'] response = netapp_api.NaElement( fake_client.SNAPSHOT_INFO_FOR_PRESENT_BUSY_SNAPSHOT_CMODE) self.mock_send_request.return_value = response snapshot = self.client.get_snapshot(expected_vol_name, expected_snapshot_name) self.assertEqual(expected_vol_name, snapshot['volume']) self.assertEqual(expected_snapshot_name, snapshot['name']) self.assertEqual(set([]), snapshot['owners']) self.assertTrue(snapshot['busy']) def test_get_snapshot_if_snapshot_not_present(self): expected_vol_name = fake.SNAPSHOT['volume_id'] expected_snapshot_name = fake.SNAPSHOT['name'] response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_send_request.return_value = response self.assertRaises(exception.SnapshotNotFound, self.client.get_snapshot, expected_vol_name, expected_snapshot_name) def test_create_cluster_peer(self): self.mock_object(self.client.connection, 'send_request') self.client.create_cluster_peer(['fake_address_1', 'fake_address_2'], 'fake_user', 'fake_password', 'fake_passphrase') cluster_peer_create_args = { 'peer-addresses': [ {'remote-inet-address': 'fake_address_1'}, {'remote-inet-address': 'fake_address_2'}, ], 'user-name': 'fake_user', 'password': 'fake_password', 'passphrase': 'fake_passphrase', } self.client.connection.send_request.assert_has_calls([ mock.call('cluster-peer-create', cluster_peer_create_args)]) def test_get_cluster_peers(self): api_response = netapp_api.NaElement( fake_client.CLUSTER_PEER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_cluster_peers() cluster_peer_get_iter_args = {} self.client.send_iter_request.assert_has_calls([ mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)]) expected = [{ 'active-addresses': [ fake_client.CLUSTER_ADDRESS_1, fake_client.CLUSTER_ADDRESS_2 ], 'availability': 'available', 'cluster-name': fake_client.CLUSTER_NAME, 'cluster-uuid': 'fake_uuid', 'peer-addresses': [fake_client.CLUSTER_ADDRESS_1], 'remote-cluster-name': fake_client.REMOTE_CLUSTER_NAME, 'serial-number': 'fake_serial_number', 'timeout': '60', }] self.assertEqual(expected, result) def test_get_cluster_peers_single(self): api_response = netapp_api.NaElement( fake_client.CLUSTER_PEER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) self.client.get_cluster_peers( remote_cluster_name=fake_client.CLUSTER_NAME) cluster_peer_get_iter_args = { 'query': { 'cluster-peer-info': { 'remote-cluster-name': fake_client.CLUSTER_NAME, } }, } self.client.send_iter_request.assert_has_calls([ mock.call('cluster-peer-get-iter', cluster_peer_get_iter_args)]) def test_get_cluster_peers_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_cluster_peers( remote_cluster_name=fake_client.CLUSTER_NAME) self.assertEqual([], result) self.assertTrue(self.client.send_iter_request.called) def test_delete_cluster_peer(self): self.mock_object(self.client.connection, 'send_request') self.client.delete_cluster_peer(fake_client.CLUSTER_NAME) cluster_peer_delete_args = {'cluster-name': fake_client.CLUSTER_NAME} self.client.connection.send_request.assert_has_calls([ mock.call('cluster-peer-delete', cluster_peer_delete_args)]) def test_get_cluster_peer_policy(self): self.client.features.add_feature('CLUSTER_PEER_POLICY') api_response = netapp_api.NaElement( fake_client.CLUSTER_PEER_POLICY_GET_RESPONSE) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.get_cluster_peer_policy() expected = { 'is-unauthenticated-access-permitted': False, 'passphrase-minimum-length': 8, } self.assertEqual(expected, result) self.assertTrue(self.client.connection.send_request.called) def test_get_cluster_peer_policy_not_supported(self): result = self.client.get_cluster_peer_policy() self.assertEqual({}, result) def test_set_cluster_peer_policy_not_supported(self): self.mock_object(self.client.connection, 'send_request') self.client.set_cluster_peer_policy() self.assertFalse(self.client.connection.send_request.called) def test_set_cluster_peer_policy_no_arguments(self): self.client.features.add_feature('CLUSTER_PEER_POLICY') self.mock_object(self.client.connection, 'send_request') self.client.set_cluster_peer_policy() self.assertFalse(self.client.connection.send_request.called) def test_set_cluster_peer_policy(self): self.client.features.add_feature('CLUSTER_PEER_POLICY') self.mock_object(self.client.connection, 'send_request') self.client.set_cluster_peer_policy( is_unauthenticated_access_permitted=True, passphrase_minimum_length=12) cluster_peer_policy_modify_args = { 'is-unauthenticated-access-permitted': 'true', 'passphrase-minlength': '12', } self.client.connection.send_request.assert_has_calls([ mock.call('cluster-peer-policy-modify', cluster_peer_policy_modify_args)]) def test_create_vserver_peer(self): self.mock_object(self.client.connection, 'send_request') self.client.create_vserver_peer('fake_vserver', 'fake_vserver_peer') vserver_peer_create_args = { 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_peer', 'applications': [ {'vserver-peer-application': 'snapmirror'}, ], } self.client.connection.send_request.assert_has_calls([ mock.call('vserver-peer-create', vserver_peer_create_args, enable_tunneling=False)]) def test_delete_vserver_peer(self): self.mock_object(self.client.connection, 'send_request') self.client.delete_vserver_peer('fake_vserver', 'fake_vserver_peer') vserver_peer_delete_args = { 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_peer', } self.client.connection.send_request.assert_has_calls([ mock.call('vserver-peer-delete', vserver_peer_delete_args)]) def test_accept_vserver_peer(self): self.mock_object(self.client.connection, 'send_request') self.client.accept_vserver_peer('fake_vserver', 'fake_vserver_peer') vserver_peer_accept_args = { 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_peer', } self.client.connection.send_request.assert_has_calls([ mock.call('vserver-peer-accept', vserver_peer_accept_args)]) def test_get_file_sizes_by_dir(self): api_response = netapp_api.NaElement( fake_client.FILE_SIZES_BY_DIR_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_file_sizes_by_dir(fake.NETAPP_VOLUME) get_get_file_sizes_by_dir_get_iter_args = { 'path': '/vol/%s' % fake.NETAPP_VOLUME, 'query': { 'file-info': { 'file-type': 'file', } }, 'desired-attributes': { 'file-info': { 'name': None, 'file-size': None } }, } self.client.send_iter_request.assert_has_calls([ mock.call('file-list-directory-iter', get_get_file_sizes_by_dir_get_iter_args, max_page_length=100)]) expected = [{ 'name': fake.VOLUME_NAME, 'file-size': float(1024) }] self.assertEqual(expected, result) def test_get_file_sizes_by_dir_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_file_sizes_by_dir(fake.NETAPP_VOLUME) self.assertEqual([], result) self.assertTrue(self.client.send_iter_request.called) def test_get_lun_sizes_by_volume(self): api_response = netapp_api.NaElement( fake_client.LUN_SIZES_BY_VOLUME_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_lun_sizes_by_volume(fake.NETAPP_VOLUME) get_lun_sizes_by_volume_get_iter_args = { 'query': { 'lun-info': { 'volume': fake.NETAPP_VOLUME, 'vserver': fake_client.VSERVER_NAME } }, 'desired-attributes': { 'lun-info': { 'path': None, 'size': None } }, } self.client.send_iter_request.assert_has_calls([ mock.call('lun-get-iter', get_lun_sizes_by_volume_get_iter_args, max_page_length=100)]) expected = [{ 'path': fake.VOLUME_PATH, 'size': float(1024) }] self.assertEqual(expected, result) def test_get_lun_sizes_by_volume_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_lun_sizes_by_volume(fake.NETAPP_VOLUME) self.assertEqual([], result) self.assertTrue(self.client.send_iter_request.called) def test_get_vserver_peers(self): api_response = netapp_api.NaElement( fake_client.VSERVER_PEER_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_vserver_peers( vserver_name=fake_client.VSERVER_NAME, peer_vserver_name=fake_client.VSERVER_NAME_2) vserver_peer_get_iter_args = { 'query': { 'vserver-peer-info': { 'vserver': fake_client.VSERVER_NAME, 'peer-vserver': fake_client.VSERVER_NAME_2, } } } self.client.send_iter_request.assert_has_calls([ mock.call('vserver-peer-get-iter', vserver_peer_get_iter_args, enable_tunneling=False)]) expected = [{ 'vserver': 'fake_vserver', 'peer-vserver': 'fake_vserver_2', 'peer-state': 'peered', 'peer-cluster': 'fake_cluster', 'applications': ['snapmirror'], }] self.assertEqual(expected, result) def test_get_vserver_peers_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client.get_vserver_peers( vserver_name=fake_client.VSERVER_NAME, peer_vserver_name=fake_client.VSERVER_NAME_2) self.assertEqual([], result) self.assertTrue(self.client.send_iter_request.called) def test_ensure_snapmirror_v2(self): self.assertIsNone(self.client._ensure_snapmirror_v2()) def test_ensure_snapmirror_v2_not_supported(self): self.client.features.add_feature('SNAPMIRROR_V2', supported=False) self.assertRaises(netapp_utils.NetAppDriverException, self.client._ensure_snapmirror_v2) @ddt.data({'schedule': 'fake_schedule', 'policy': 'fake_policy'}, {'schedule': None, 'policy': None}) @ddt.unpack def test_create_snapmirror(self, schedule, policy): self.mock_object(self.client.connection, 'send_request') sm_source_cg = None sm_destination_cg = None self.client.create_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, sm_source_cg, sm_destination_cg, schedule=schedule, policy=policy) snapmirror_create_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, 'relationship-type': 'data_protection', } if schedule: snapmirror_create_args['schedule'] = schedule if policy: snapmirror_create_args['policy'] = policy self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-create', snapmirror_create_args)]) def test_create_snapmirror_already_exists(self): api_error = netapp_api.NaApiError(code=netapp_api.ERELATION_EXISTS) self.mock_object( self.client.connection, 'send_request', side_effect=api_error) self.client.create_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_create_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, 'relationship-type': 'data_protection', } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-create', snapmirror_create_args)]) def test_create_snapmirror_error(self): api_error = netapp_api.NaApiError(code=0) self.mock_object( self.client.connection, 'send_request', side_effect=api_error) self.assertRaises(netapp_api.NaApiError, self.client.create_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) self.assertTrue(self.client.connection.send_request.called) @ddt.data( { 'source_snapshot': 'fake_snapshot', 'transfer_priority': 'fake_priority' }, { 'source_snapshot': None, 'transfer_priority': None } ) @ddt.unpack def test_initialize_snapmirror(self, source_snapshot, transfer_priority): api_response = netapp_api.NaElement( fake_client.SNAPMIRROR_INITIALIZE_RESULT) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.initialize_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, source_snapshot=source_snapshot, transfer_priority=transfer_priority) snapmirror_initialize_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } if source_snapshot: snapmirror_initialize_args['source-snapshot'] = source_snapshot if transfer_priority: snapmirror_initialize_args['transfer-priority'] = transfer_priority self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-initialize', snapmirror_initialize_args)]) expected = { 'operation-id': None, 'status': 'succeeded', 'jobid': None, 'error-code': None, 'error-message': None } self.assertEqual(expected, result) @ddt.data(True, False) def test_release_snapmirror(self, relationship_info_only): self.mock_object(self.client.connection, 'send_request') self.client.release_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, relationship_info_only=relationship_info_only) snapmirror_release_args = { 'query': { 'snapmirror-destination-info': { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, 'relationship-info-only': ('true' if relationship_info_only else 'false'), } } } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-release-iter', snapmirror_release_args)]) def test_quiesce_snapmirror(self): self.mock_object(self.client.connection, 'send_request') self.client.quiesce_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_quiesce_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-quiesce', snapmirror_quiesce_args)]) @ddt.data(True, False) def test_abort_snapmirror(self, clear_checkpoint): self.mock_object(self.client.connection, 'send_request') self.client.abort_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, clear_checkpoint=clear_checkpoint) snapmirror_abort_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, 'clear-checkpoint': 'true' if clear_checkpoint else 'false', } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-abort', snapmirror_abort_args)]) def test_abort_snapmirror_no_transfer_in_progress(self): api_error = netapp_api.NaApiError( code=netapp_api.ENOTRANSFER_IN_PROGRESS) self.mock_object( self.client.connection, 'send_request', side_effect=api_error) self.client.abort_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_abort_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, 'clear-checkpoint': 'false', } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-abort', snapmirror_abort_args)]) def test_abort_snapmirror_error(self): api_error = netapp_api.NaApiError(code=0) self.mock_object( self.client.connection, 'send_request', side_effect=api_error) self.assertRaises(netapp_api.NaApiError, self.client.abort_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) def test_break_snapmirror(self): self.mock_object(self.client.connection, 'send_request') self.client.break_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_break_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-break', snapmirror_break_args)]) @ddt.data( { 'schedule': 'fake_schedule', 'policy': 'fake_policy', 'tries': 5, 'max_transfer_rate': 1024, }, { 'schedule': None, 'policy': None, 'tries': None, 'max_transfer_rate': None, } ) @ddt.unpack def test_modify_snapmirror(self, schedule, policy, tries, max_transfer_rate): self.mock_object(self.client.connection, 'send_request') self.client.modify_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, schedule=schedule, policy=policy, tries=tries, max_transfer_rate=max_transfer_rate) snapmirror_modify_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } if schedule: snapmirror_modify_args['schedule'] = schedule if policy: snapmirror_modify_args['policy'] = policy if tries: snapmirror_modify_args['tries'] = tries if max_transfer_rate: snapmirror_modify_args['max-transfer-rate'] = max_transfer_rate self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-modify', snapmirror_modify_args)]) def test_delete_snapmirror(self): self.mock_object(self.client.connection, 'send_request') self.client.delete_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_delete_args = { 'query': { 'snapmirror-info': { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } } } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-destroy-iter', snapmirror_delete_args)]) def test_update_snapmirror(self): self.mock_object(self.client.connection, 'send_request') self.client.update_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_update_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-update', snapmirror_update_args)]) def test_update_snapmirror_already_transferring(self): api_error = netapp_api.NaApiError( code=netapp_api.ETRANSFER_IN_PROGRESS) self.mock_object( self.client.connection, 'send_request', side_effect=api_error) self.client.update_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_update_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-update', snapmirror_update_args)]) def test_update_snapmirror_already_transferring_two(self): api_error = netapp_api.NaApiError(code=netapp_api.EANOTHER_OP_ACTIVE) self.mock_object( self.client.connection, 'send_request', side_effect=api_error) self.client.update_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_update_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-update', snapmirror_update_args)]) def test_update_snapmirror_error(self): api_error = netapp_api.NaApiError(code=0) self.mock_object( self.client.connection, 'send_request', side_effect=api_error) self.assertRaises(netapp_api.NaApiError, self.client.update_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) def test_resume_snapmirror(self): self.mock_object(self.client.connection, 'send_request') self.client.resume_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_resume_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-resume', snapmirror_resume_args)]) def test_resume_snapmirror_not_quiesed(self): api_error = netapp_api.NaApiError( code=netapp_api.ERELATION_NOT_QUIESCED) self.mock_object( self.client.connection, 'send_request', side_effect=api_error) self.client.resume_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_resume_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-resume', snapmirror_resume_args)]) def test_resume_snapmirror_error(self): api_error = netapp_api.NaApiError(code=0) self.mock_object( self.client.connection, 'send_request', side_effect=api_error) self.assertRaises(netapp_api.NaApiError, self.client.resume_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) def test_resync_snapmirror(self): self.mock_object(self.client.connection, 'send_request') self.client.resync_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) snapmirror_resync_args = { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, } self.client.connection.send_request.assert_has_calls([ mock.call('snapmirror-resync', snapmirror_resync_args)]) def test__get_snapmirrors(self): api_response = netapp_api.NaElement( fake_client.SNAPMIRROR_GET_ITER_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) desired_attributes = { 'snapmirror-info': { 'source-vserver': None, 'source-volume': None, 'destination-vserver': None, 'destination-volume': None, 'is-healthy': None, } } result = self.client._get_snapmirrors( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, desired_attributes=desired_attributes) snapmirror_get_iter_args = { 'query': { 'snapmirror-info': { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, }, }, 'desired-attributes': { 'snapmirror-info': { 'source-vserver': None, 'source-volume': None, 'destination-vserver': None, 'destination-volume': None, 'is-healthy': None, }, }, } self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) self.assertEqual(1, len(result)) def test__get_snapmirrors_not_found(self): api_response = netapp_api.NaElement(fake_client.NO_RECORDS_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) result = self.client._get_snapmirrors() self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-iter', {})]) self.assertEqual([], result) def test_get_snapmirrors(self): api_response = netapp_api.NaElement( fake_client.SNAPMIRROR_GET_ITER_FILTERED_RESPONSE) self.mock_object(self.client, 'send_iter_request', return_value=api_response) desired_attributes = ['source-vserver', 'source-volume', 'destination-vserver', 'destination-volume', 'is-healthy', 'mirror-state', 'schedule'] result = self.client.get_snapmirrors( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, desired_attributes=desired_attributes) snapmirror_get_iter_args = { 'query': { 'snapmirror-info': { 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, }, }, 'desired-attributes': { 'snapmirror-info': { 'source-vserver': None, 'source-volume': None, 'destination-vserver': None, 'destination-volume': None, 'is-healthy': None, 'mirror-state': None, 'schedule': None, }, }, } expected = [{ 'source-vserver': fake_client.SM_SOURCE_VSERVER, 'source-volume': fake_client.SM_SOURCE_VOLUME, 'destination-vserver': fake_client.SM_DEST_VSERVER, 'destination-volume': fake_client.SM_DEST_VOLUME, 'is-healthy': 'true', 'mirror-state': 'snapmirrored', 'schedule': 'daily', }] self.client.send_iter_request.assert_has_calls([ mock.call('snapmirror-get-iter', snapmirror_get_iter_args)]) self.assertEqual(expected, result) def test_get_provisioning_options_from_flexvol(self): self.mock_object(self.client, 'get_flexvol', return_value=fake_client.VOLUME_INFO_SSC) self.mock_object(self.client, 'get_flexvol_dedupe_info', return_value=fake_client.VOLUME_DEDUPE_INFO_SSC) expected_prov_opts = { 'aggregate': ['fake_aggr1'], 'compression_enabled': False, 'dedupe_enabled': True, 'language': 'c.utf_8', 'size': 1, 'snapshot_policy': 'default', 'snapshot_reserve': '5', 'space_guarantee_type': 'none', 'volume_type': 'rw', 'is_flexgroup': False, } actual_prov_opts = self.client.get_provisioning_options_from_flexvol( fake_client.VOLUME_NAME) self.assertEqual(expected_prov_opts, actual_prov_opts) def test_wait_for_busy_snapshot(self): # Need to mock sleep as it is called by @utils.retry self.mock_object(time, 'sleep') mock_get_snapshot = self.mock_object( self.client, 'get_snapshot', return_value=fake.SNAPSHOT ) self.client.wait_for_busy_snapshot(fake.FLEXVOL, fake.SNAPSHOT_NAME) mock_get_snapshot.assert_called_once_with(fake.FLEXVOL, fake.SNAPSHOT_NAME) def test_wait_for_busy_snapshot_raise_exception(self): # Need to mock sleep as it is called by @utils.retry self.mock_object(time, 'sleep') BUSY_SNAPSHOT = dict(fake.SNAPSHOT) BUSY_SNAPSHOT['busy'] = True mock_get_snapshot = self.mock_object( self.client, 'get_snapshot', return_value=BUSY_SNAPSHOT ) self.assertRaises(exception.SnapshotIsBusy, self.client.wait_for_busy_snapshot, fake.FLEXVOL, fake.SNAPSHOT_NAME) calls = [ mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), mock.call(fake.FLEXVOL, fake.SNAPSHOT_NAME), ] mock_get_snapshot.assert_has_calls(calls) @ddt.data({ 'mock_return': fake_client.SNAPSHOT_INFO_FOR_PRESENT_NOT_BUSY_SNAPSHOT_CMODE, 'expected': [{ 'name': fake.SNAPSHOT_NAME, 'instance_id': 'abcd-ef01-2345-6789', 'volume_name': fake.SNAPSHOT['volume_id'], }] }, { 'mock_return': fake_client.NO_RECORDS_RESPONSE, 'expected': [], }) @ddt.unpack def test_get_snapshots_marked_for_deletion(self, mock_return, expected): api_response = netapp_api.NaElement(mock_return) self.mock_object(self.client.connection, 'send_request', return_value=api_response) result = self.client.get_snapshots_marked_for_deletion() api_args = { 'query': { 'snapshot-info': { 'name': client_base.DELETED_PREFIX + '*', 'vserver': self.vserver, 'busy': 'false' }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'volume': None, 'snapshot-instance-uuid': None, } }, } self.client.connection.send_request.assert_called_once_with( 'snapshot-get-iter', api_args) self.assertListEqual(expected, result) @ddt.data(True, False) def test_is_qos_min_supported(self, supported): self.client.features.add_feature('test', supported=supported) mock_name = self.mock_object(netapp_utils, 'qos_min_feature_name', return_value='test') result = self.client.is_qos_min_supported(True, 'node') mock_name.assert_called_once_with(True, 'node') self.assertEqual(result, supported) def test_is_qos_min_supported_invalid_node(self): mock_name = self.mock_object(netapp_utils, 'qos_min_feature_name', return_value='invalid_feature') result = self.client.is_qos_min_supported(True, 'node') mock_name.assert_called_once_with(True, 'node') self.assertFalse(result) def test_is_qos_min_supported_none_node(self): result = self.client.is_qos_min_supported(True, None) self.assertFalse(result) def test_get_unique_volume(self): api_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_STYLE_RESPONSE) volume_elem = netapp_api.NaElement(fake_client.VOLUME_FLEXGROUP_STYLE) volume_id_attr = self.client.get_unique_volume(api_response) xml_exp = str(volume_elem).replace(" ", "").replace("\n", "") xml_res = str(volume_id_attr).replace(" ", "").replace("\n", "") self.assertEqual(xml_exp, xml_res) def test_get_unique_volume_raise_exception(self): api_response = netapp_api.NaElement( fake_client.VOLUME_GET_ITER_SAME_STYLE_RESPONSE) self.assertRaises(exception.VolumeBackendAPIException, self.client.get_unique_volume, api_response) def test_get_cluster_name(self): api_response = netapp_api.NaElement( fake_client.GET_CLUSTER_NAME_RESPONSE) mock_send_request = self.mock_object( self.client.connection, 'send_request', return_value=api_response) api_args = { 'desired-attributes': { 'cluster-identity-info': { 'cluster-name': None, } } } result = self.client.get_cluster_name() mock_send_request.assert_called_once_with('cluster-identity-get', api_args, enable_tunneling=False) self.assertEqual(fake_client.CLUSTER_NAME, result) @ddt.data((fake_client.LUN_NAME, fake_client.DEST_VOLUME_NAME, None, fake_client.VOLUME_NAME), (fake_client.LUN_NAME, None, fake_client.DEST_LUN_NAME, fake_client.DEST_VOLUME_NAME)) @ddt.unpack def test_start_lun_move(self, src_lun_name, src_ontap_vol, dest_lun_name, dest_ontap_vol): api_response = netapp_api.NaElement( fake_client.START_LUN_MOVE_RESPONSE) mock_send_request = self.mock_object( self.client.connection, 'send_request', return_value=api_response) result = self.client.start_lun_move(src_lun_name, dest_ontap_vol, src_ontap_volume=src_ontap_vol, dest_lun_name=dest_lun_name) api_args = { 'paths': [{ 'lun-path-pair': { 'destination-path': '/vol/%s/%s' % (dest_ontap_vol, src_lun_name if dest_lun_name is None else dest_lun_name), 'source-path': '/vol/%s/%s' % (dest_ontap_vol if src_ontap_vol is None else src_ontap_vol, src_lun_name) } }] } mock_send_request.assert_called_once_with('lun-move-start', api_args) self.assertEqual(fake.JOB_UUID, result) def test_get_lun_move_status(self): api_response = netapp_api.NaElement( fake_client.GET_LUN_MOVE_STATUS_RESPONSE) mock_send_request = self.mock_object( self.client.connection, 'send_request', return_value=api_response) result = self.client.get_lun_move_status(fake.JOB_UUID) api_args = { 'query': { 'lun-move-info': { 'job-uuid': fake.JOB_UUID } } } mock_send_request.assert_called_once_with('lun-move-get-iter', api_args) expected = { 'job-status': 'complete', 'last-failure-reason': None } self.assertEqual(expected, result) @ddt.data((fake_client.LUN_NAME, None, fake_client.VSERVER_NAME, fake_client.DEST_LUN_NAME, fake_client.DEST_VOLUME_NAME, fake_client.DEST_VSERVER_NAME), (fake_client.LUN_NAME, fake_client.VOLUME_NAME, None, fake_client.DEST_LUN_NAME, fake_client.DEST_VOLUME_NAME, fake_client.DEST_VSERVER_NAME), (fake_client.LUN_NAME, fake_client.VOLUME_NAME, fake_client.VSERVER_NAME, None, fake_client.DEST_VOLUME_NAME, fake_client.DEST_VSERVER_NAME)) @ddt.unpack def test_start_lun_copy(self, src_lun_name, src_ontap_vol, src_vserver, dest_lun_name, dest_ontap_vol, dest_vserver): api_response = netapp_api.NaElement( fake_client.START_LUN_COPY_RESPONSE) mock_send_request = self.mock_object( self.client.connection, 'send_request', return_value=api_response) result = self.client.start_lun_copy(src_lun_name, dest_ontap_vol, dest_vserver, src_ontap_volume=src_ontap_vol, src_vserver=src_vserver, dest_lun_name=dest_lun_name) api_args = { 'source-vserver': (dest_vserver if not src_vserver else src_vserver), 'destination-vserver': dest_vserver, 'paths': [{ 'lun-path-pair': { 'destination-path': '/vol/%s/%s' % (dest_ontap_vol, src_lun_name if dest_lun_name is None else dest_lun_name), 'source-path': '/vol/%s/%s' % (dest_ontap_vol if src_ontap_vol is None else src_ontap_vol, src_lun_name) } }] } mock_send_request.assert_called_once_with('lun-copy-start', api_args, enable_tunneling=False) self.assertEqual(fake.JOB_UUID, result) def test_get_lun_copy_status(self): api_response = netapp_api.NaElement( fake_client.GET_LUN_COPY_STATUS_RESPONSE) mock_send_request = self.mock_object( self.client.connection, 'send_request', return_value=api_response) result = self.client.get_lun_copy_status(fake.JOB_UUID) api_args = { 'query': { 'lun-copy-info': { 'job-uuid': fake.JOB_UUID } } } mock_send_request.assert_called_once_with('lun-copy-get-iter', api_args, enable_tunneling=False) expected = { 'job-status': 'complete', 'last-failure-reason': None } self.assertEqual(expected, result) @ddt.data((fake_client.FILE_NAME, None, fake_client.DEST_VOLUME_NAME, fake_client.DEST_VOLUME_NAME), (fake_client.FILE_NAME, fake_client.VOLUME_NAME, None, fake_client.DEST_VOLUME_NAME)) @ddt.unpack def test_start_file_copy(self, src_file_name, src_ontap_vol, dest_file_name, dest_ontap_vol): api_response = netapp_api.NaElement( fake_client.START_FILE_COPY_RESPONSE) mock_send_request = self.mock_object( self.client.connection, 'send_request', return_value=api_response) result = self.client.start_file_copy(src_file_name, dest_ontap_vol, src_ontap_volume=src_ontap_vol, dest_file_name=dest_file_name) api_args = { 'source-paths': [{ 'sfod-operation-path': '%s/%s' % (dest_ontap_vol if src_ontap_vol is None else src_ontap_vol, src_file_name) }], 'destination-paths': [{ 'sfod-operation-path': '%s/%s' % (dest_ontap_vol, src_file_name if dest_file_name is None else dest_file_name) }], } mock_send_request.assert_called_once_with('file-copy-start', api_args, enable_tunneling=False) self.assertEqual(fake.JOB_UUID, result) def test_get_file_copy_status(self): api_response = netapp_api.NaElement( fake_client.GET_FILE_COPY_STATUS_RESPONSE) mock_send_request = self.mock_object( self.client.connection, 'send_request', return_value=api_response) result = self.client.get_file_copy_status(fake.JOB_UUID) api_args = { 'query': { 'file-copy-info': { 'job-uuid': fake.JOB_UUID } } } mock_send_request.assert_called_once_with('file-copy-get-iter', api_args, enable_tunneling=False) expected = { 'job-status': 'complete', 'last-failure-reason': None } self.assertEqual(expected, result) def test_destroy_file_copy(self): api_response = netapp_api.NaElement( fake_client.DESTROY_FILE_COPY_RESPONSE) mock_send_request = self.mock_object( self.client.connection, 'send_request', return_value=api_response) result = self.client.destroy_file_copy(fake.JOB_UUID) api_args = { 'job-uuid': fake.JOB_UUID, 'file-index': 0 } mock_send_request.assert_called_once_with('file-copy-destroy', api_args, enable_tunneling=False) self.assertIsNone(result) def test_destroy_file_copy_error(self): mock_send_request = self.mock_object(self.client.connection, 'send_request', side_effect=netapp_api.NaApiError) self.assertRaises(netapp_utils.NetAppDriverException, self.client.destroy_file_copy, fake.JOB_UUID) api_args = { 'job-uuid': fake.JOB_UUID, 'file-index': 0 } mock_send_request.assert_called_once_with('file-copy-destroy', api_args, enable_tunneling=False) def test_cancel_lun_copy(self): api_response = netapp_api.NaElement( fake_client.CANCEL_LUN_COPY_RESPONSE) mock_send_request = self.mock_object( self.client.connection, 'send_request', return_value=api_response) result = self.client.cancel_lun_copy(fake.JOB_UUID) api_args = { 'job-uuid': fake.JOB_UUID } mock_send_request.assert_called_once_with('lun-copy-cancel', api_args, enable_tunneling=False) self.assertIsNone(result) def test_cancel_lun_copy_error(self): mock_send_request = self.mock_object(self.client.connection, 'send_request', side_effect=netapp_api.NaApiError) self.assertRaises(netapp_utils.NetAppDriverException, self.client.cancel_lun_copy, fake.JOB_UUID) api_args = { 'job-uuid': fake.JOB_UUID } mock_send_request.assert_called_once_with('lun-copy-cancel', api_args, enable_tunneling=False) def test_rename_file(self): self.mock_object(self.client.connection, 'send_request') orig_file_name = '/vol/fake_vol/volume-%s' % self.fake_volume new_file_name = '/vol/fake_vol/new-volume-%s' % self.fake_volume self.client.rename_file(orig_file_name, new_file_name) api_args = { 'from-path': orig_file_name, 'to-path': new_file_name, } self.client.connection.send_request.assert_called_once_with( 'file-rename-file', api_args) def test_check_api_permissions(self): mock_log = self.mock_object(client_cmode.LOG, 'warning') self.mock_object(self.client, 'check_cluster_api', return_value=True) self.client.check_api_permissions() self.client.check_cluster_api.assert_has_calls( [mock.call(*key) for key in client_cmode.SSC_API_MAP.keys()]) self.assertEqual(0, mock_log.call_count) def test_check_api_permissions_failed_ssc_apis(self): def check_cluster_api(object_name, operation_name, api): if api != 'volume-get-iter': return False return True self.mock_object(self.client, 'check_cluster_api', side_effect=check_cluster_api) mock_log = self.mock_object(client_cmode.LOG, 'warning') self.client.check_api_permissions() self.assertEqual(1, mock_log.call_count) def test_check_api_permissions_failed_volume_api(self): def check_cluster_api(object_name, operation_name, api): if api == 'volume-get-iter': return False return True self.mock_object(self.client, 'check_cluster_api', side_effect=check_cluster_api) mock_log = self.mock_object(client_cmode.LOG, 'warning') self.assertRaises(exception.VolumeBackendAPIException, self.client.check_api_permissions) self.assertEqual(0, mock_log.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py0000664000175000017500000050333100000000000033261 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import uuid import ddt from oslo_utils import units from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as fake_client) from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest from cinder.volume.drivers.netapp import utils as netapp_utils CONNECTION_INFO = {'hostname': 'hostname', 'transport_type': 'https', 'port': 443, 'username': 'admin', 'password': 'passw0rd', 'vserver': 'fake_vserver', 'ssl_cert_path': 'fake_ca', 'api_trace_pattern': 'fake_regex', 'private_key_file': 'fake_private_key.pem', 'certificate_file': 'fake_cert.pem', 'ca_certificate_file': 'fake_ca_cert.crt', 'certificate_host_validation': 'False', 'is_disaggregated': 'False', } @ddt.ddt class NetAppRestCmodeClientTestCase(test.TestCase): def setUp(self): super(NetAppRestCmodeClientTestCase, self).setUp() # Setup Client mocks self.mock_object(client_cmode.Client, '_init_ssh_client') # store the original reference so we can call it later in # test__get_cluster_nodes_info self.original_get_cluster_nodes_info = ( client_cmode.Client._get_cluster_nodes_info) self.mock_object(client_cmode.Client, '_get_cluster_nodes_info', return_value=fake.HYBRID_SYSTEM_NODES_INFO) self.mock_object(client_cmode.Client, 'get_ontap_version', return_value=(9, 11, 1)) self.mock_object(client_cmode.Client, 'get_ontapi_version', return_value=(1, 20)) # Setup RestClient mocks self.mock_object(client_cmode_rest.RestClient, '_init_ssh_client') # store the original reference so we can call it later in # test__get_cluster_nodes_info self.original_get_cluster_nodes_info = ( client_cmode_rest.RestClient._get_cluster_nodes_info) # Temporary fix because the function is under implementation if not hasattr(client_cmode_rest.RestClient, '_get_cluster_nodes_info'): setattr(client_cmode_rest.RestClient, '_get_cluster_nodes_info', None) self.original_get_cluster_nodes_info = ( client_cmode_rest.RestClient._get_cluster_nodes_info) self.mock_object(client_cmode_rest.RestClient, '_get_cluster_nodes_info', return_value=fake.HYBRID_SYSTEM_NODES_INFO) self.mock_object(client_cmode_rest.RestClient, 'get_ontap_version', return_value=(9, 11, 1)) with mock.patch.object(client_cmode_rest.RestClient, 'get_ontap_version', return_value=(9, 11, 1)): self.client = client_cmode_rest.RestClient(**CONNECTION_INFO) self.client.ssh_client = mock.MagicMock() self.client.connection = mock.MagicMock() self.connection = self.client.connection self.vserver = CONNECTION_INFO['vserver'] self.fake_volume = str(uuid.uuid4()) self.fake_lun = str(uuid.uuid4()) # this line interferes in test__get_cluster_nodes_info # self.mock_send_request = self.mock_object( # self.client, 'send_request') def _mock_api_error(self, code='fake'): return mock.Mock(side_effect=netapp_api.NaApiError(code=code)) def test_send_request(self): expected = 'fake_response' mock_get_records = self.mock_object( self.client, 'get_records', mock.Mock(return_value=expected)) res = self.client.send_request( fake_client.FAKE_ACTION_ENDPOINT, 'get', body=fake_client.FAKE_BODY, query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) self.assertEqual(expected, res) mock_get_records.assert_called_once_with( fake_client.FAKE_ACTION_ENDPOINT, fake_client.FAKE_HTTP_QUERY, False, 10000) def test_send_request_post(self): expected = (201, 'fake_response') mock_invoke = self.mock_object( self.client.connection, 'invoke_successfully', mock.Mock(return_value=expected)) res = self.client.send_request( fake_client.FAKE_ACTION_ENDPOINT, 'post', body=fake_client.FAKE_BODY, query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) self.assertEqual(expected[1], res) mock_invoke.assert_called_once_with( fake_client.FAKE_ACTION_ENDPOINT, 'post', body=fake_client.FAKE_BODY, query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) def test_send_request_wait(self): expected = (202, fake_client.JOB_RESPONSE_REST) mock_invoke = self.mock_object( self.client.connection, 'invoke_successfully', mock.Mock(return_value=expected)) mock_wait = self.mock_object( self.client, '_wait_job_result', mock.Mock(return_value=expected[1])) res = self.client.send_request( fake_client.FAKE_ACTION_ENDPOINT, 'post', body=fake_client.FAKE_BODY, query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) self.assertEqual(expected[1], res) mock_invoke.assert_called_once_with( fake_client.FAKE_ACTION_ENDPOINT, 'post', body=fake_client.FAKE_BODY, query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) mock_wait.assert_called_once_with( expected[1]['job']['_links']['self']['href'][4:]) @ddt.data(True, False) def test_get_records(self, enable_tunneling): api_responses = [ (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE), (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE), (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE), ] mock_invoke = self.mock_object( self.client.connection, 'invoke_successfully', side_effect=copy.deepcopy(api_responses)) query = { 'fields': 'name' } result = self.client.get_records( '/storage/volumes/', query=query, enable_tunneling=enable_tunneling, max_page_length=10) num_records = result['num_records'] self.assertEqual(28, num_records) self.assertEqual(28, len(result['records'])) expected_records = [] expected_records.extend(api_responses[0][1]['records']) expected_records.extend(api_responses[1][1]['records']) expected_records.extend(api_responses[2][1]['records']) self.assertEqual(expected_records, result['records']) next_tag = result.get('next') self.assertIsNone(next_tag) expected_query = copy.deepcopy(query) expected_query['max_records'] = 10 next_url_1 = api_responses[0][1]['_links']['next']['href'][4:] next_url_2 = api_responses[1][1]['_links']['next']['href'][4:] mock_invoke.assert_has_calls([ mock.call('/storage/volumes/', 'get', query=expected_query, enable_tunneling=enable_tunneling), mock.call(next_url_1, 'get', query=None, enable_tunneling=enable_tunneling), mock.call(next_url_2, 'get', query=None, enable_tunneling=enable_tunneling), ]) def test_get_records_single_page(self): api_response = ( 200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE) mock_invoke = self.mock_object(self.client.connection, 'invoke_successfully', return_value=api_response) query = { 'fields': 'name' } result = self.client.get_records( '/storage/volumes/', query=query, max_page_length=10) num_records = result['num_records'] self.assertEqual(8, num_records) self.assertEqual(8, len(result['records'])) next_tag = result.get('next') self.assertIsNone(next_tag) args = copy.deepcopy(query) args['max_records'] = 10 mock_invoke.assert_has_calls([ mock.call('/storage/volumes/', 'get', query=args, enable_tunneling=True), ]) def test_get_records_not_found(self): api_response = (200, fake_client.NO_RECORDS_RESPONSE_REST) mock_invoke = self.mock_object(self.client.connection, 'invoke_successfully', return_value=api_response) result = self.client.get_records('/storage/volumes/') num_records = result['num_records'] self.assertEqual(0, num_records) self.assertEqual(0, len(result['records'])) args = { 'max_records': client_cmode_rest.DEFAULT_MAX_PAGE_LENGTH } mock_invoke.assert_has_calls([ mock.call('/storage/volumes/', 'get', query=args, enable_tunneling=True), ]) def test_get_records_timeout(self): # To simulate timeout, max_records is 30, but the API returns less # records and fill the 'next url' pointing to the next page. max_records = 30 api_responses = [ (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE), (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE), (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE), ] mock_invoke = self.mock_object( self.client.connection, 'invoke_successfully', side_effect=copy.deepcopy(api_responses)) query = { 'fields': 'name' } result = self.client.get_records( '/storage/volumes/', query=query, max_page_length=max_records) num_records = result['num_records'] self.assertEqual(28, num_records) self.assertEqual(28, len(result['records'])) expected_records = [] expected_records.extend(api_responses[0][1]['records']) expected_records.extend(api_responses[1][1]['records']) expected_records.extend(api_responses[2][1]['records']) self.assertEqual(expected_records, result['records']) next_tag = result.get('next', None) self.assertIsNone(next_tag) args1 = copy.deepcopy(query) args1['max_records'] = max_records next_url_1 = api_responses[0][1]['_links']['next']['href'][4:] next_url_2 = api_responses[1][1]['_links']['next']['href'][4:] mock_invoke.assert_has_calls([ mock.call('/storage/volumes/', 'get', query=args1, enable_tunneling=True), mock.call(next_url_1, 'get', query=None, enable_tunneling=True), mock.call(next_url_2, 'get', query=None, enable_tunneling=True), ]) def test__get_unique_volume(self): api_response = fake_client.VOLUME_GET_ITER_STYLE_RESPONSE_REST result = self.client._get_unique_volume(api_response["records"]) expected = fake_client.VOLUME_FLEXGROUP_STYLE_REST self.assertEqual(expected, result) def test__get_unique_volume_raise_exception(self): api_response = fake_client.VOLUME_GET_ITER_SAME_STYLE_RESPONSE_REST self.assertRaises(exception.VolumeBackendAPIException, self.client._get_unique_volume, api_response["records"]) @ddt.data(fake.REST_FIELDS, None) def test__get_volume_by_args(self, fields): mock_get_unique_vol = self.mock_object( self.client, '_get_unique_volume', return_value=fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST) mock_send_request = self.mock_object( self.client, 'send_request', return_value=fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST) volume = self.client._get_volume_by_args( vol_name=fake.VOLUME_NAME, vol_path=fake.VOLUME_PATH, vserver=fake.VSERVER_NAME, fields=fields) self.assertEqual(fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST, volume) mock_get_unique_vol.assert_called_once_with( fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST['records']) expected_query = { 'type': 'rw', 'style': 'flex*', 'is_svm_root': 'false', 'error_state.is_inconsistent': 'false', 'state': 'online', 'name': fake.VOLUME_NAME, 'nas.path': fake.VOLUME_PATH, 'svm.name': fake.VSERVER_NAME, 'fields': 'name,style' if not fields else fields, } mock_send_request.assert_called_once_with('/storage/volumes/', 'get', query=expected_query) @ddt.data(False, True) def test_get_flexvol(self, is_flexgroup): if is_flexgroup: api_response = \ fake_client.VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP_REST volume_response = \ fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP_REST else: api_response = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST volume_response = \ fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST self.mock_object(self.client, 'send_request', return_value=api_response) mock_get_unique_vol = self.mock_object( self.client, '_get_volume_by_args', return_value=volume_response) result = self.client.get_flexvol( flexvol_name=fake_client.VOLUME_NAMES[0], flexvol_path='/%s' % fake_client.VOLUME_NAMES[0]) fields = ('aggregates.name,name,svm.name,nas.path,' 'type,guarantee.honored,guarantee.type,' 'space.snapshot.reserve_percent,space.size,' 'qos.policy.name,snapshot_policy,language,style') mock_get_unique_vol.assert_called_once_with( vol_name=fake_client.VOLUME_NAMES[0], vol_path='/%s' % fake_client.VOLUME_NAMES[0], fields=fields) if is_flexgroup: self.assertEqual(fake_client.VOLUME_INFO_SSC_FLEXGROUP, result) else: self.assertEqual(fake_client.VOLUME_INFO_SSC, result) def test_list_flexvols(self): api_response = fake_client.VOLUME_GET_ITER_LIST_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.list_flexvols() query = { 'type': 'rw', 'style': 'flex*', # Match both 'flexvol' and 'flexgroup' 'is_svm_root': 'false', 'error_state.is_inconsistent': 'false', # 'is-invalid': 'false', 'state': 'online', 'fields': 'name' } self.client.send_request.assert_called_once_with( '/storage/volumes/', 'get', query=query) self.assertEqual(list(fake_client.VOLUME_NAMES), result) def test_list_flexvols_not_found(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.list_flexvols() self.assertEqual([], result) def test_is_flexvol_mirrored(self): api_response = fake_client.GET_NUM_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.is_flexvol_mirrored( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) query = { 'source.path': fake_client.VOLUME_VSERVER_NAME + ':' + fake_client.VOLUME_NAMES[0], 'state': 'snapmirrored', 'return_records': 'false', } self.client.send_request.assert_called_once_with( '/snapmirror/relationships/', 'get', query=query) self.assertTrue(result) def test_is_flexvol_mirrored_not_mirrored(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.is_flexvol_mirrored( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) self.assertFalse(result) def test_is_flexvol_mirrored_api_error(self): self.mock_object(self.client, 'send_request', side_effect=self._mock_api_error()) result = self.client.is_flexvol_mirrored( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) self.assertFalse(result) def test_is_flexvol_encrypted(self): api_response = fake_client.GET_NUM_RECORDS_RESPONSE_REST self.client.features.add_feature('FLEXVOL_ENCRYPTION') self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.is_flexvol_encrypted( fake_client.VOLUME_NAME, fake_client.VOLUME_VSERVER_NAME) query = { 'encryption.enabled': 'true', 'name': fake_client.VOLUME_NAME, 'svm.name': fake_client.VOLUME_VSERVER_NAME, 'return_records': 'false', } self.client.send_request.assert_called_once_with( '/storage/volumes/', 'get', query=query) self.assertTrue(result) def test_is_flexvol_encrypted_unsupported_version(self): self.client.features.add_feature('FLEXVOL_ENCRYPTION', supported=False) result = self.client.is_flexvol_encrypted( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) self.assertFalse(result) def test_is_flexvol_encrypted_no_records_found(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.is_flexvol_encrypted( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) self.assertFalse(result) def test_is_flexvol_encrypted_api_error(self): self.mock_object(self.client, 'send_request', side_effect=self._mock_api_error()) result = self.client.is_flexvol_encrypted( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) self.assertFalse(result) @ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']}, {'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},) @ddt.unpack def test_get_aggregate_disk_types(self, types, expected): mock_get_aggregate_disk_types = self.mock_object( self.client, '_get_aggregate_disk_types', return_value=types) result = self.client.get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) self.assertCountEqual(expected, result) mock_get_aggregate_disk_types.assert_called_once_with( fake_client.VOLUME_AGGREGATE_NAME) def test_get_aggregate_disk_types_not_found(self): mock_get_aggregate_disk_types = self.mock_object( self.client, '_get_aggregate_disk_types', return_value=set()) result = self.client.get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsNone(result) mock_get_aggregate_disk_types.assert_called_once_with( fake_client.VOLUME_AGGREGATE_NAME) def test_get_aggregate_disk_types_api_not_found(self): api_error = netapp_api.NaApiError() self.mock_object(self.client, 'send_request', side_effect=api_error) result = self.client.get_aggregate_disk_types( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsNone(result) def test__get_aggregates(self): api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST mock_send_request = self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client._get_aggregates() mock_send_request.assert_has_calls( [mock.call('/storage/aggregates', 'get', query={}, enable_tunneling=False)]) self.assertEqual(result, api_response['records']) def test__get_aggregates_with_filters(self): api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST mock_send_request = self.mock_object(self.client, 'send_request', return_value=api_response) query = { 'fields': 'space.block_storage.size,space.block_storage.available', 'name': ','.join(fake_client.VOLUME_AGGREGATE_NAMES), } result = self.client._get_aggregates( aggregate_names=fake_client.VOLUME_AGGREGATE_NAMES, fields=query['fields']) mock_send_request.assert_has_calls([ mock.call('/storage/aggregates', 'get', query=query, enable_tunneling=False)]) self.assertEqual(result, api_response['records']) def test__get_aggregates_not_found(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST mock_send_request = self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client._get_aggregates() mock_send_request.assert_has_calls([ mock.call('/storage/aggregates', 'get', query={}, enable_tunneling=False)]) self.assertEqual([], result) def test_get_aggregate_none_specified(self): result = self.client.get_aggregate('') self.assertEqual({}, result) def test_get_aggregate(self): api_response = [fake_client.AGGR_GET_ITER_RESPONSE_REST['records'][1]] mock__get_aggregates = self.mock_object(self.client, '_get_aggregates', return_value=api_response) response = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) fields = ('name,block_storage.primary.raid_type,' 'block_storage.storage_type,home_node.name') mock__get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], fields=fields)]) expected = { 'name': fake_client.VOLUME_AGGREGATE_NAME, 'raid-type': 'raid0', 'is-hybrid': False, 'node-name': fake_client.NODE_NAME, } self.assertEqual(expected, response) def test_get_aggregate_not_found(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) def test_get_aggregate_api_error(self): self.mock_object(self.client, 'send_request', side_effect=self._mock_api_error()) result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) def test_get_aggregate_api_not_found(self): api_error = netapp_api.NaApiError(code=netapp_api.REST_API_NOT_FOUND) self.mock_object(self.client, 'send_request', side_effect=api_error) result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) @ddt.data(True, False) def test_is_qos_min_supported(self, supported): self.client.features.add_feature('test', supported=supported) mock_name = self.mock_object(netapp_utils, 'qos_min_feature_name', return_value='test') result = self.client.is_qos_min_supported(True, 'node') mock_name.assert_called_once_with(True, 'node') self.assertEqual(result, supported) def test_is_qos_min_supported_invalid_node(self): mock_name = self.mock_object(netapp_utils, 'qos_min_feature_name', return_value='invalid_feature') result = self.client.is_qos_min_supported(True, 'node') mock_name.assert_called_once_with(True, 'node') self.assertFalse(result) def test_is_qos_min_supported_none_node(self): result = self.client.is_qos_min_supported(True, None) self.assertFalse(result) def test_get_flexvol_dedupe_info(self): api_response = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST mock_send_request = self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.get_flexvol_dedupe_info( fake_client.VOLUME_NAMES[0]) query = { 'efficiency.volume_path': '/vol/%s' % fake_client.VOLUME_NAMES[0], 'fields': 'efficiency.state,efficiency.compression' } mock_send_request.assert_called_once_with( '/storage/volumes', 'get', query=query) self.assertEqual( fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result) def test_get_flexvol_dedupe_info_no_logical_data_values(self): api_response = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.get_flexvol_dedupe_info( fake_client.VOLUME_NAMES[0]) self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result) def test_get_flexvol_dedupe_info_not_found(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.get_flexvol_dedupe_info( fake_client.VOLUME_NAMES[0]) self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result) def test_get_flexvol_dedupe_info_api_error(self): self.mock_object(self.client, 'send_request', side_effect=self._mock_api_error()) result = self.client.get_flexvol_dedupe_info( fake_client.VOLUME_NAMES[0]) self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result) def test_get_flexvol_dedupe_info_api_insufficient_privileges(self): api_error = netapp_api.NaApiError(code=netapp_api.EAPIPRIVILEGE) self.mock_object(self.client, 'send_request', side_effect=api_error) result = self.client.get_flexvol_dedupe_info( fake_client.VOLUME_NAMES[0]) self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result) def test_get_lun_list(self): response = fake_client.LUN_GET_ITER_REST self.mock_object(self.client, 'send_request', return_value=response) expected_result = fake_client.LUN_GET_ITER_RESULT luns = self.client.get_lun_list() self.assertEqual(expected_result, luns) self.assertEqual(2, len(luns)) def test_get_lun_list_no_records(self): response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=response) luns = self.client.get_lun_list() self.assertEqual([], luns) def test_get_lun_sizes_by_volume(self): volume_name = fake_client.VOLUME_NAME query = { 'location.volume.name': volume_name, 'svm.name': fake_client.VSERVER_NAME, 'fields': 'space.size,name' } response = fake_client.LUN_GET_ITER_REST expected_result = [] for lun in fake_client.LUN_GET_ITER_RESULT: expected_result.append({ 'size': lun['Size'], 'path': lun['Path'], }) self.mock_object(self.client, 'send_request', return_value=response) luns = self.client.get_lun_sizes_by_volume(volume_name) self.assertEqual(expected_result, luns) self.assertEqual(2, len(luns)) self.client.send_request.assert_called_once_with( '/storage/luns/', 'get', query=query) def test_get_lun_sizes_by_volume_no_records(self): volume_name = fake_client.VOLUME_NAME vserver = fake_client.VSERVER_NAME query = { 'location.volume.name': volume_name, 'svm.name': vserver, 'fields': 'space.size,name' } response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=response) luns = self.client.get_lun_sizes_by_volume(volume_name) self.assertEqual([], luns) self.client.send_request.assert_called_once_with( '/storage/luns/', 'get', query=query) def test_get_lun_by_args(self): response = fake_client.LUN_GET_ITER_REST mock_send_request = self.mock_object( self.client, 'send_request', return_value=response) lun_info_args = { 'vserver': fake.VSERVER_NAME, 'path': fake.LUN_PATH, 'uuid': fake.UUID1, } luns = self.client.get_lun_by_args(**lun_info_args) query = { 'svm.name': fake.VSERVER_NAME, 'name': fake.LUN_PATH, 'uuid': fake.UUID1, 'fields': 'svm.name,location.volume.name,space.size,' 'location.qtree.name,name,os_type,' 'space.scsi_thin_provisioning_support_enabled,' 'space.guarantee.requested,uuid' } mock_send_request.assert_called_once_with( '/storage/luns/', 'get', query=query) self.assertEqual(2, len(luns)) def test_get_lun_by_args_no_lun_found(self): response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=response) luns = self.client.get_lun_by_args() self.assertEqual([], luns) def test_get_lun_by_args_with_one_arg(self): path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun) response = fake_client.LUN_GET_ITER_REST mock_send_request = self.mock_object( self.client, 'send_request', return_value=response) luns = self.client.get_lun_by_args(path=path) query = { 'name': path, 'fields': 'svm.name,location.volume.name,space.size,' 'location.qtree.name,name,os_type,' 'space.scsi_thin_provisioning_support_enabled,' 'space.guarantee.requested,uuid' } mock_send_request.assert_called_once_with( '/storage/luns/', 'get', query=query) self.assertEqual(2, len(luns)) def test_get_file_sizes_by_dir(self): volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST query = { 'type': 'file', 'fields': 'size,name' } response = fake_client.FILE_DIRECTORY_GET_ITER_REST expected_result = fake_client.FILE_DIRECTORY_GET_ITER_RESULT_REST self.mock_object(self.client, '_get_volume_by_args', return_value=volume) self.mock_object(self.client, 'send_request', return_value=response) files = self.client.get_file_sizes_by_dir(volume['name']) self.assertEqual(expected_result, files) self.assertEqual(2, len(files)) self.client.send_request.assert_called_once_with( f'/storage/volumes/{volume["uuid"]}/files', 'get', query=query) def test_get_file_sizes_by_dir_no_records(self): volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST query = { 'type': 'file', 'fields': 'size,name' } api_error = netapp_api.NaApiError(code=netapp_api.REST_NO_SUCH_FILE) self.mock_object(self.client, '_get_volume_by_args', return_value=volume) self.mock_object(self.client, 'send_request', side_effect=api_error) files = self.client.get_file_sizes_by_dir(volume['name']) self.assertEqual([], files) self.assertEqual(0, len(files)) self.client.send_request.assert_called_once_with( f'/storage/volumes/{volume["uuid"]}/files', 'get', query=query) def test_get_file_sizes_by_dir_exception(self): volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST api_error = netapp_api.NaApiError(code=0) self.mock_object(self.client, '_get_volume_by_args', return_value=volume) self.mock_object(self.client, 'send_request', side_effect=api_error) self.assertRaises(netapp_api.NaApiError, self.client.get_file_sizes_by_dir, volume['name']) @ddt.data({'junction_path': '/fake/vol'}, {'name': 'fake_volume'}, {'junction_path': '/fake/vol', 'name': 'fake_volume'}) def test_get_volume_state(self, kwargs): query_args = {} query_args['fields'] = 'state' if 'name' in kwargs: query_args['name'] = kwargs['name'] if 'junction_path' in kwargs: query_args['nas.path'] = kwargs['junction_path'] response = fake_client.VOLUME_GET_ITER_STATE_RESPONSE_REST mock_send_request = self.mock_object( self.client, 'send_request', return_value=response) state = self.client.get_volume_state(**kwargs) mock_send_request.assert_called_once_with( '/storage/volumes/', 'get', query=query_args) self.assertEqual(fake_client.VOLUME_STATE_ONLINE, state) def test_delete_snapshot(self): volume = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST self.mock_object( self.client, '_get_volume_by_args', return_value=volume) snap_name = fake.SNAPSHOT["name"] self.mock_object(self.client, 'send_request') self.client.delete_snapshot(volume["name"], snap_name) self.client._get_volume_by_args.assert_called_once_with( vol_name=volume["name"]) self.client.send_request.assert_called_once_with( f'/storage/volumes/{volume["uuid"]}/snapshots' f'?name={snap_name}', 'delete') def test_get_operational_lif_addresses(self): expected_result = ['1.2.3.4', '99.98.97.96'] api_response = fake_client.GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE_REST mock_send_request = self.mock_object(self.client, 'send_request', return_value=api_response) address_list = self.client.get_operational_lif_addresses() query = { 'state': 'up', 'fields': 'ip.address', } mock_send_request.assert_called_once_with( '/network/ip/interfaces/', 'get', query=query) self.assertEqual(expected_result, address_list) def test__list_vservers(self): api_response = fake_client.VSERVER_DATA_LIST_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client._list_vservers() query = { 'fields': 'name', } self.client.send_request.assert_has_calls([ mock.call('/svm/svms', 'get', query=query, enable_tunneling=False)]) self.assertListEqual( [fake_client.VSERVER_NAME, fake_client.VSERVER_NAME_2], result) def test_list_vservers_not_found(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client._list_vservers() self.assertListEqual([], result) def test_get_ems_log_destination_vserver(self): mock_list_vservers = self.mock_object( self.client, '_list_vservers', return_value=[fake_client.VSERVER_NAME]) result = self.client._get_ems_log_destination_vserver() mock_list_vservers.assert_called_once_with() self.assertEqual(fake_client.VSERVER_NAME, result) def test_get_ems_log_destination_vserver_not_found(self): mock_list_vservers = self.mock_object( self.client, '_list_vservers', return_value=[]) self.assertRaises(exception.NotFound, self.client._get_ems_log_destination_vserver) mock_list_vservers.assert_called_once_with() def test_send_ems_log_message(self): message_dict = { 'computer-name': '25-dev-vm', 'event-source': 'Cinder driver NetApp_iSCSI_Cluster_direct', 'app-version': '20.1.0.dev|vendor|Linux-5.4.0-120-generic-x86_64', 'category': 'provisioning', 'log-level': '5', 'auto-support': 'false', 'event-id': '1', 'event-description': '{"pools": {"vserver": "vserver_name",' + '"aggregates": [], "flexvols": ["flexvol_01"]}}' } body = { 'computer_name': message_dict['computer-name'], 'event_source': message_dict['event-source'], 'app_version': message_dict['app-version'], 'category': message_dict['category'], 'severity': 'notice', 'autosupport_required': message_dict['auto-support'] == 'true', 'event_id': message_dict['event-id'], 'event_description': message_dict['event-description'], } self.mock_object(self.client, '_get_ems_log_destination_vserver', return_value='vserver_name') self.mock_object(self.client, 'send_request') self.client.send_ems_log_message(message_dict) self.client.send_request.assert_called_once_with( '/support/ems/application-logs', 'post', body=body) @ddt.data('cp_phase_times', 'domain_busy') def test_get_performance_counter_info(self, counter_name): response1 = fake_client.PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST response2 = fake_client.PERF_COUNTER_TABLE_ROWS_WAFL object_name = 'wafl' mock_send_request = self.mock_object( self.client, 'send_request', side_effect=[response1, response2]) result = self.client.get_performance_counter_info(object_name, counter_name) expected = { 'name': 'cp_phase_times', 'base-counter': 'total_cp_msecs', 'labels': fake_client.PERF_COUNTER_TOTAL_CP_MSECS_LABELS_RESULT, } query1 = { 'counter_schemas.name': counter_name, 'fields': 'counter_schemas.*' } query2 = { 'counters.name': counter_name, 'fields': 'counters.*' } if counter_name == 'domain_busy': expected['name'] = 'domain_busy' expected['labels'] = ( fake_client.PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST) query1['counter_schemas.name'] = 'domain_busy_percent' query2['counters.name'] = 'domain_busy_percent' self.assertEqual(expected, result) mock_send_request.assert_has_calls([ mock.call(f'/cluster/counter/tables/{object_name}', 'get', query=query1, enable_tunneling=False), mock.call(f'/cluster/counter/tables/{object_name}/rows', 'get', query=query2, enable_tunneling=False), ]) def test_get_performance_counter_info_not_found_rows(self): response1 = fake_client.PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST response2 = fake_client.NO_RECORDS_RESPONSE_REST object_name = 'wafl' counter_name = 'cp_phase_times' self.mock_object( self.client, 'send_request', side_effect=[response1, response2]) result = self.client.get_performance_counter_info(object_name, counter_name) expected = { 'name': 'cp_phase_times', 'base-counter': 'total_cp_msecs', 'labels': [], } self.assertEqual(expected, result) def test_get_performance_instance_uuids(self): response = fake_client.PERF_COUNTER_TABLE_ROWS_WAFL mock_send_request = self.mock_object( self.client, 'send_request', return_value=response) object_name = 'wafl' result = self.client.get_performance_instance_uuids( object_name, fake_client.NODE_NAME) expected = [fake_client.NODE_NAME + ':wafl'] self.assertEqual(expected, result) query = { 'id': fake_client.NODE_NAME + ':*', } mock_send_request.assert_called_once_with( f'/cluster/counter/tables/{object_name}/rows', 'get', query=query, enable_tunneling=False) def test_get_performance_counters(self): response = fake_client.PERF_GET_INSTANCES_PROCESSOR_RESPONSE_REST mock_send_request = self.mock_object( self.client, 'send_request', return_value=response) instance_uuids = [ fake_client.NODE_NAME + ':processor0', fake_client.NODE_NAME + ':processor1', ] object_name = 'processor' counter_names = ['domain_busy', 'processor_elapsed_time'] rest_counter_names = ['domain_busy_percent', 'elapsed_time'] result = self.client.get_performance_counters(object_name, instance_uuids, counter_names) expected = fake_client.PERF_COUNTERS_PROCESSOR_EXPECTED self.assertEqual(expected, result) query = { 'id': '|'.join(instance_uuids), 'counters.name': '|'.join(rest_counter_names), 'fields': 'id,counter_table.name,counters.*', } mock_send_request.assert_called_once_with( f'/cluster/counter/tables/{object_name}/rows', 'get', query=query, enable_tunneling=False) def test_get_aggregate_capacities(self): aggr1_capacities = { 'percent-used': 50, 'size-available': 100.0, 'size-total': 200.0, } aggr2_capacities = { 'percent-used': 75, 'size-available': 125.0, 'size-total': 500.0, } mock_get_aggregate_capacity = self.mock_object( self.client, '_get_aggregate_capacity', side_effect=[aggr1_capacities, aggr2_capacities]) result = self.client.get_aggregate_capacities(['aggr1', 'aggr2']) expected = { 'aggr1': aggr1_capacities, 'aggr2': aggr2_capacities, } self.assertEqual(expected, result) mock_get_aggregate_capacity.assert_has_calls([ mock.call('aggr1'), mock.call('aggr2'), ]) def test_get_aggregate_capacities_not_found(self): mock_get_aggregate_capacity = self.mock_object( self.client, '_get_aggregate_capacity', side_effect=[{}, {}]) result = self.client.get_aggregate_capacities(['aggr1', 'aggr2']) expected = { 'aggr1': {}, 'aggr2': {}, } self.assertEqual(expected, result) mock_get_aggregate_capacity.assert_has_calls([ mock.call('aggr1'), mock.call('aggr2'), ]) def test_get_aggregate_capacities_not_list(self): result = self.client.get_aggregate_capacities('aggr1') self.assertEqual({}, result) def test__get_aggregate_capacity(self): api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST['records'] mock_get_aggregates = self.mock_object(self.client, '_get_aggregates', return_value=api_response) result = self.client._get_aggregate_capacity( fake_client.VOLUME_AGGREGATE_NAME) fields = ('space.block_storage.available,space.block_storage.size,' 'space.block_storage.used') mock_get_aggregates.assert_has_calls([ mock.call(aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], fields=fields)]) available = float(fake_client.AGGR_SIZE_AVAILABLE) total = float(fake_client.AGGR_SIZE_TOTAL) used = float(fake_client.AGGR_SIZE_USED) percent_used = int((used * 100) // total) expected = { 'percent-used': percent_used, 'size-available': available, 'size-total': total, } self.assertEqual(expected, result) def test__get_aggregate_capacity_not_found(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client._get_aggregate_capacity( fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) def test__get_aggregate_capacity_api_error(self): self.mock_object(self.client, 'send_request', side_effect=self._mock_api_error()) result = self.client._get_aggregate_capacity( fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) def test__get_aggregate_capacity_api_not_found(self): api_error = netapp_api.NaApiError(code=netapp_api.REST_API_NOT_FOUND) self.mock_object( self.client, 'send_request', side_effect=api_error) result = self.client._get_aggregate_capacity( fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual({}, result) def test_get_node_for_aggregate(self): api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST['records'] mock_get_aggregates = self.mock_object(self.client, '_get_aggregates', return_value=api_response) result = self.client.get_node_for_aggregate( fake_client.VOLUME_AGGREGATE_NAME) fields = 'home_node.name' mock_get_aggregates.assert_has_calls([ mock.call( aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME], fields=fields)]) self.assertEqual(fake_client.NODE_NAME, result) def test_get_node_for_aggregate_none_requested(self): result = self.client.get_node_for_aggregate(None) self.assertIsNone(result) def test_get_node_for_aggregate_api_not_found(self): api_error = netapp_api.NaApiError(code=netapp_api.REST_API_NOT_FOUND) self.mock_object(self.client, 'send_request', side_effect=api_error) result = self.client.get_node_for_aggregate( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsNone(result) def test_get_node_for_aggregate_api_error(self): self.mock_object(self.client, 'send_request', self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.get_node_for_aggregate, fake_client.VOLUME_AGGREGATE_NAME) def test_get_node_for_aggregate_not_found(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.get_node_for_aggregate( fake_client.VOLUME_AGGREGATE_NAME) self.assertIsNone(result) @ddt.data(None, {'legacy': 'fake'}, {}) def test_provision_qos_policy_group_invalid_policy_info(self, policy_info): self.mock_object(self.client, '_validate_qos_policy_group') self.mock_object(self.client, '_get_qos_first_policy_group_by_name') self.mock_object(self.client, '_create_qos_policy_group') self.mock_object(self.client, '_modify_qos_policy_group') self.client.provision_qos_policy_group(policy_info, False) self.client._validate_qos_policy_group.assert_not_called() self.client._get_qos_first_policy_group_by_name.assert_not_called() self.client._create_qos_policy_group.assert_not_called() self.client._modify_qos_policy_group.assert_not_called() @ddt.data(True, False) def test_provision_qos_policy_group_qos_policy_create(self, is_adaptive): policy_info = fake.QOS_POLICY_GROUP_INFO policy_spec = fake.QOS_POLICY_GROUP_SPEC if is_adaptive: policy_info = fake.ADAPTIVE_QOS_POLICY_GROUP_INFO policy_spec = fake.ADAPTIVE_QOS_SPEC self.mock_object(self.client, '_validate_qos_policy_group') self.mock_object(self.client, '_get_qos_first_policy_group_by_name', return_value=None) self.mock_object(self.client, '_create_qos_policy_group') self.mock_object(self.client, '_modify_qos_policy_group') self.client.provision_qos_policy_group(policy_info, True) self.client._validate_qos_policy_group.assert_called_once_with( is_adaptive, spec=policy_spec, qos_min_support=True) (self.client._get_qos_first_policy_group_by_name. assert_called_once_with(policy_spec['policy_name'])) self.client._create_qos_policy_group.assert_called_once_with( policy_spec, is_adaptive) self.client._modify_qos_policy_group.assert_not_called() @ddt.data(True, False) def test_provision_qos_policy_group_qos_policy_modify(self, is_adaptive): policy_rest_item = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0] policy_info = fake.QOS_POLICY_GROUP_INFO policy_spec = fake.QOS_POLICY_GROUP_SPEC if is_adaptive: policy_info = fake.ADAPTIVE_QOS_POLICY_GROUP_INFO policy_spec = fake.ADAPTIVE_QOS_SPEC self.mock_object(self.client, '_validate_qos_policy_group') self.mock_object(self.client, '_get_qos_first_policy_group_by_name', return_value=policy_rest_item) self.mock_object(self.client, '_create_qos_policy_group') self.mock_object(self.client, '_modify_qos_policy_group') self.client.provision_qos_policy_group(policy_info, True) self.client._validate_qos_policy_group.assert_called_once_with( is_adaptive, spec=policy_spec, qos_min_support=True) (self.client._get_qos_first_policy_group_by_name. assert_called_once_with(policy_spec['policy_name'])) self.client._create_qos_policy_group.assert_not_called() self.client._modify_qos_policy_group.assert_called_once_with( policy_spec, is_adaptive, policy_rest_item) @ddt.data(True, False) def test__get_qos_first_policy_group_by_name(self, is_empty): qos_rest_records = [] qos_item = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0] if not is_empty: qos_rest_records = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'] self.mock_object(self.client, '_get_qos_policy_group_by_name', return_value=qos_rest_records) result = self.client._get_qos_first_policy_group_by_name( qos_item['name']) self.client._get_qos_policy_group_by_name.assert_called_once_with( qos_item['name'] ) if not is_empty: self.assertEqual(qos_item, result) else: self.assertTrue(result is None) @ddt.data(True, False) def test__get_qos_policy_group_by_name(self, is_empty): qos_rest_response = {} qos_rest_records = [] qos_name = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0]['name'] if not is_empty: qos_rest_response = fake.QOS_POLICY_BY_NAME_RESPONSE_REST qos_rest_records = qos_rest_response['records'] self.mock_object(self.client, 'send_request', return_value=qos_rest_response) result = self.client._get_qos_policy_group_by_name(qos_name) self.client.send_request.assert_called_once_with( '/storage/qos/policies/', 'get', query={'name': qos_name}) self.assertEqual(qos_rest_records, result) @ddt.data(True, False) def test__qos_spec_to_api_args(self, is_adaptive): policy_spec = copy.deepcopy(fake.QOS_POLICY_GROUP_SPEC) expected_args = fake.QOS_POLICY_GROUP_API_ARGS_REST if is_adaptive: policy_spec = fake.ADAPTIVE_QOS_SPEC expected_args = fake.ADAPTIVE_QOS_API_ARGS_REST result = self.client._qos_spec_to_api_args( policy_spec, is_adaptive, vserver=fake.VSERVER_NAME) self.assertEqual(expected_args, result) def test__qos_spec_to_api_args_bps(self): policy_spec = copy.deepcopy(fake.QOS_POLICY_GROUP_SPEC_BPS) expected_args = fake.QOS_POLICY_GROUP_API_ARGS_REST_BPS result = self.client._qos_spec_to_api_args( policy_spec, False, vserver=fake.VSERVER_NAME) self.assertEqual(expected_args, result) @ddt.data('100IOPS', '100iops', '100B/s', '100b/s') def test__sanitize_qos_spec_value(self, value): result = self.client._sanitize_qos_spec_value(value) self.assertEqual(100, result) @ddt.data(True, False) def test__create_qos_policy_group(self, is_adaptive): self.client.vserver = fake.VSERVER_NAME policy_spec = fake.QOS_POLICY_GROUP_SPEC body_args = fake.QOS_POLICY_GROUP_API_ARGS_REST if is_adaptive: policy_spec = fake.ADAPTIVE_QOS_SPEC body_args = fake.ADAPTIVE_QOS_API_ARGS_REST self.mock_object(self.client, '_qos_spec_to_api_args', return_value=body_args) self.mock_object(self.client, 'send_request') self.client._create_qos_policy_group(policy_spec, is_adaptive) self.client._qos_spec_to_api_args.assert_called_once_with( policy_spec, is_adaptive, vserver=fake.VSERVER_NAME) self.client.send_request.assert_called_once_with( '/storage/qos/policies/', 'post', body=body_args, enable_tunneling=False) @ddt.data((False, False), (False, True), (True, False), (True, True)) @ddt.unpack def test__modify_qos_policy_group(self, is_adaptive, same_name): self.client.vserver = fake.VSERVER_NAME policy_spec = fake.QOS_POLICY_GROUP_SPEC body_args = copy.deepcopy(fake.QOS_POLICY_GROUP_API_ARGS_REST) if is_adaptive: policy_spec = fake.ADAPTIVE_QOS_SPEC body_args = copy.deepcopy(fake.ADAPTIVE_QOS_API_ARGS_REST) expected_body_args = copy.deepcopy(body_args) qos_group_item = copy.deepcopy( fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0]) if same_name: qos_group_item['name'] = policy_spec['policy_name'] expected_body_args.pop('name') self.mock_object(self.client, '_qos_spec_to_api_args', return_value=body_args) self.mock_object(self.client, 'send_request') self.client._modify_qos_policy_group( policy_spec, is_adaptive, qos_group_item) self.client._qos_spec_to_api_args.assert_called_once_with( policy_spec, is_adaptive) self.client.send_request.assert_called_once_with( f'/storage/qos/policies/{qos_group_item["uuid"]}', 'patch', body=expected_body_args, enable_tunneling=False) def test_get_vol_by_junc_vserver(self): api_response = fake_client.VOLUME_LIST_SIMPLE_RESPONSE_REST volume_response = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST file_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-vol' self.mock_object(self.client, 'send_request', return_value=api_response) self.mock_object(self.client, '_get_unique_volume', return_value=volume_response) result = self.client.get_vol_by_junc_vserver( fake_client.VOLUME_VSERVER_NAME, file_path) query = { 'type': 'rw', 'style': 'flex*', 'is_svm_root': 'false', 'error_state.is_inconsistent': 'false', 'state': 'online', 'nas.path': file_path, 'svm.name': fake_client.VOLUME_VSERVER_NAME, 'fields': 'name,style' } self.client.send_request.assert_called_once_with( '/storage/volumes/', 'get', query=query) self.client._get_unique_volume.assert_called_once_with( api_response["records"]) self.assertEqual(volume_response['name'], result) def test_file_assign_qos(self): volume = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST self.mock_object( self.client, '_get_volume_by_args', return_value=volume) self.mock_object(self.client, 'send_request') self.client.file_assign_qos( volume['name'], fake.QOS_POLICY_GROUP_NAME, True, fake.VOLUME_NAME) self.client._get_volume_by_args.assert_called_once_with(volume['name']) body = {'qos_policy.name': fake.QOS_POLICY_GROUP_NAME} self.client.send_request.assert_called_once_with( f'/storage/volumes/{volume["uuid"]}/files/{fake.VOLUME_NAME}', 'patch', body=body, enable_tunneling=False) @ddt.data(None, {}) def test_mark_qos_policy_group_for_deletion_invalid_policy(self, policy_info): self.mock_object(self.client, '_rename_qos_policy_group') self.mock_object(self.client, 'remove_unused_qos_policy_groups') self.client.mark_qos_policy_group_for_deletion(policy_info, False) self.client._rename_qos_policy_group.assert_not_called() if policy_info is None: self.client.remove_unused_qos_policy_groups.assert_not_called() else: (self.client.remove_unused_qos_policy_groups .assert_called_once_with()) @ddt.data((False, False), (False, True), (True, False), (True, True)) @ddt.unpack def test_mark_qos_policy_group_for_deletion(self, is_adaptive, has_error): policy_info = fake.QOS_POLICY_GROUP_INFO if is_adaptive: policy_info = fake.ADAPTIVE_QOS_POLICY_GROUP_INFO current_name = policy_info['spec']['policy_name'] deleted_name = client_base.DELETED_PREFIX + current_name self.mock_object(self.client, 'remove_unused_qos_policy_groups') if has_error: self.mock_object(self.client, '_rename_qos_policy_group', side_effect=self._mock_api_error()) else: self.mock_object(self.client, '_rename_qos_policy_group') self.client.mark_qos_policy_group_for_deletion( policy_info, is_adaptive) self.client._rename_qos_policy_group.assert_called_once_with( current_name, deleted_name) self.client.remove_unused_qos_policy_groups.assert_called_once_with() def test__rename_qos_policy_group(self): self.mock_object(self.client, 'send_request') new_policy_name = 'fake_new_policy' self.client._rename_qos_policy_group(fake.QOS_POLICY_GROUP_NAME, new_policy_name) body = {'name': new_policy_name} query = {'name': fake.QOS_POLICY_GROUP_NAME} self.client.send_request.assert_called_once_with( '/storage/qos/policies/', 'patch', body=body, query=query, enable_tunneling=False) def test_remove_unused_qos_policy_groups(self): deleted_preffix = f'{client_base.DELETED_PREFIX}*' self.mock_object(self.client, 'send_request') self.client.remove_unused_qos_policy_groups() query = {'name': deleted_preffix} self.client.send_request.assert_called_once_with( '/storage/qos/policies', 'delete', query=query) def test_create_lun(self): metadata = copy.deepcopy(fake_client.LUN_GET_ITER_RESULT[0]) path = f'/vol/{fake.VOLUME_NAME}/{fake.LUN_NAME}' size = 2048 initial_size = size qos_policy_group_is_adaptive = False self.mock_object(self.client, '_validate_qos_policy_group') self.mock_object(self.client, 'send_request') body = { 'name': path, 'space.size': str(initial_size), 'os_type': metadata['OsType'], 'space.guarantee.requested': metadata['SpaceReserved'], 'space.scsi_thin_provisioning_support_enabled': metadata['SpaceAllocated'], 'qos_policy.name': fake.QOS_POLICY_GROUP_NAME } self.client.create_lun( fake.VOLUME_NAME, fake.LUN_NAME, size, metadata, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME, qos_policy_group_is_adaptive=qos_policy_group_is_adaptive) self.client._validate_qos_policy_group.assert_called_once_with( qos_policy_group_is_adaptive) self.client.send_request.assert_called_once_with( '/storage/luns', 'post', body=body) def test_do_direct_resize(self): lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun' new_size_bytes = '1073741824' body = {'name': lun_path, 'space.size': new_size_bytes} self.mock_object(self.client, '_lun_update_by_path') self.client.do_direct_resize(lun_path, new_size_bytes) self.client._lun_update_by_path.assert_called_once_with(lun_path, body) @ddt.data(True, False) def test__get_lun_by_path(self, is_empty): lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun' lun_response = fake_client.LUN_GET_ITER_REST lun_records = fake_client.LUN_GET_ITER_REST['records'] if is_empty: lun_response = {} lun_records = [] self.mock_object(self.client, 'send_request', return_value=lun_response) result = self.client._get_lun_by_path(lun_path) query = {'name': lun_path} self.client.send_request.assert_called_once_with( '/storage/luns', 'get', query=query) self.assertEqual(result, lun_records) @ddt.data(True, False) def test__get_first_lun_by_path(self, is_empty): lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun' lun_records = fake_client.LUN_GET_ITER_REST['records'] lun_item = lun_records[0] if is_empty: lun_records = [] self.mock_object(self.client, '_get_lun_by_path', return_value=lun_records) result = self.client._get_first_lun_by_path(lun_path) self.client._get_lun_by_path.assert_called_once_with( lun_path, fields=None) if is_empty: self.assertTrue(result is None) else: self.assertEqual(result, lun_item) def test__lun_update_by_path(self): lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun' lun_item = fake_client.LUN_GET_ITER_REST['records'][0] new_size_bytes = '1073741824' body = { 'name': lun_path, 'space.guarantee.requested': 'True', 'space.size': new_size_bytes } self.mock_object(self.client, '_get_first_lun_by_path', return_value=lun_item) self.mock_object(self.client, 'send_request') self.client._lun_update_by_path(lun_path, body) self.client._get_first_lun_by_path.assert_called_once_with(lun_path) self.client.send_request.assert_called_once_with( f'/storage/luns/{lun_item["uuid"]}', 'patch', body=body) def test__lun_update_by_path_not_found(self): lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun' lun_item = None new_size_bytes = '1073741824' body = { 'name': lun_path, 'space.guarantee.requested': 'True', 'space.size': new_size_bytes } self.mock_object(self.client, '_get_first_lun_by_path', return_value=lun_item) self.mock_object(self.client, 'send_request') self.assertRaises( netapp_api.NaApiError, self.client._lun_update_by_path, lun_path, body ) self.client._get_first_lun_by_path.assert_called_once_with(lun_path) self.client.send_request.assert_not_called() def test__validate_qos_policy_group_unsupported_qos(self): is_adaptive = True self.client.features.ADAPTIVE_QOS = False self.assertRaises( netapp_utils.NetAppDriverException, self.client._validate_qos_policy_group, is_adaptive ) def test__validate_qos_policy_group_no_spec(self): is_adaptive = True self.client.features.ADAPTIVE_QOS = True result = self.client._validate_qos_policy_group(is_adaptive) self.assertTrue(result is None) def test__validate_qos_policy_group_unsupported_feature(self): is_adaptive = True self.client.features.ADAPTIVE_QOS = True spec = { 'min_throughput': fake.MIN_IOPS_REST } self.assertRaises( netapp_utils.NetAppDriverException, self.client._validate_qos_policy_group, is_adaptive, spec=spec, qos_min_support=False ) @ddt.data(True, False) def test__validate_qos_policy_group(self, is_adaptive): self.client.features.ADAPTIVE_QOS = True spec = { 'max_throughput': fake.MAX_IOPS_REST, 'min_throughput': fake.MIN_IOPS_REST } self.client._validate_qos_policy_group( is_adaptive, spec=spec, qos_min_support=True) def test_delete_file(self): """Delete file at path.""" path_to_file = fake.VOLUME_PATH volume_response = fake_client.VOLUME_LIST_SIMPLE_RESPONSE_REST volume_item = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST volume_name = path_to_file.split('/')[2] relative_path = '/'.join(path_to_file.split('/')[3:]) query = { 'type': 'rw', 'style': 'flex*', # Match both 'flexvol' and 'flexgroup' 'is_svm_root': 'false', 'error_state.is_inconsistent': 'false', 'state': 'online', 'name': volume_name, 'fields': 'name,style' } self.mock_object(self.client, 'send_request', return_value=volume_response) self.mock_object(self.client, '_get_unique_volume', return_value=volume_item) self.client.delete_file(path_to_file) relative_path = relative_path.replace('/', '%2F').replace('.', '%2E') self.client.send_request.assert_has_calls([ mock.call('/storage/volumes/', 'get', query=query), mock.call(f'/storage/volumes/{volume_item["uuid"]}' + f'/files/{relative_path}', 'delete') ]) self.client._get_unique_volume.assert_called_once_with( volume_response['records']) def test_get_igroup_by_initiators_none_found(self): initiator = 'initiator' expected_response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=expected_response) igroup_list = self.client.get_igroup_by_initiators([initiator]) self.assertEqual([], igroup_list) def test_get_igroup_by_initiators(self): initiators = ['iqn.1993-08.org.fake:01:5b67769f5c5e'] expected_igroup = [{ 'initiator-group-os-type': 'linux', 'initiator-group-type': 'iscsi', 'initiator-group-name': 'openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53' }] expected_query = { 'svm.name': fake_client.VOLUME_VSERVER_NAME, 'initiators.name': ','.join(initiators), 'fields': 'name,protocol,os_type' } self.mock_object(self.client, 'send_request', return_value=fake_client.IGROUP_GET_ITER_REST) igroup_list = self.client.get_igroup_by_initiators(initiators) self.client.send_request.assert_called_once_with( '/protocols/san/igroups', 'get', query=expected_query) self.assertEqual(expected_igroup, igroup_list) def test_get_igroup_by_initiators_multiple(self): initiators = ['iqn.1993-08.org.fake:01:5b67769f5c5e', 'iqn.1993-08.org.fake:02:5b67769f5c5e'] expected_igroup = [{ 'initiator-group-os-type': 'linux', 'initiator-group-type': 'iscsi', 'initiator-group-name': 'openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53' }] expected_query = { 'svm.name': fake_client.VOLUME_VSERVER_NAME, 'initiators.name': ','.join(initiators), 'fields': 'name,protocol,os_type' } self.mock_object(self.client, 'send_request', return_value=fake_client.IGROUP_GET_ITER_INITS_REST) igroup_list = self.client.get_igroup_by_initiators(initiators) self.client.send_request.assert_called_once_with( '/protocols/san/igroups', 'get', query=expected_query) self.assertEqual(expected_igroup, igroup_list) def test_get_igroup_by_initiators_multiple_records(self): initiators = ['iqn.1993-08.org.fake:01:5b67769f5c5e'] expected_element = { 'initiator-group-os-type': 'linux', 'initiator-group-type': 'iscsi', 'initiator-group-name': 'openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53' } expected_igroup = [expected_element, expected_element] self.mock_object(self.client, 'send_request', return_value=fake_client.IGROUP_GET_ITER_MULT_REST) igroup_list = self.client.get_igroup_by_initiators(initiators) self.assertEqual(expected_igroup, igroup_list) def test_add_igroup_initiator(self): igroup = 'fake_igroup' initiator = 'fake_initator' mock_return = fake_client.IGROUP_GET_ITER_REST expected_uuid = fake_client.IGROUP_GET_ITER_REST['records'][0]['uuid'] mock_send_request = self.mock_object(self.client, 'send_request', return_value = mock_return) self.client.add_igroup_initiator(igroup, initiator) expected_body = { 'name': initiator } mock_send_request.assert_has_calls([ mock.call('/protocols/san/igroups/' + expected_uuid + '/initiators', 'post', body=expected_body)]) def test_create_igroup(self): igroup = 'fake_igroup' igroup_type = 'fake_type' os_type = 'fake_os' body = { 'name': igroup, 'protocol': igroup_type, 'os_type': os_type, } self.mock_object(self.client, 'send_request') self.client.create_igroup(igroup, igroup_type, os_type) self.client.send_request.assert_called_once_with( '/protocols/san/igroups', 'post', body=body) @ddt.data(None, 0, 4095) def test_map_lun(self, lun_id): fake_record = fake_client.GET_LUN_MAP_REST['records'][0] path = fake_record['lun']['name'] igroup_name = fake_record['igroup']['name'] mock_send_request = self.mock_object( self.client, 'send_request', return_value=fake_client.GET_LUN_MAP_REST) result = self.client.map_lun(path, igroup_name, lun_id) self.assertEqual(0, result) expected_body = { 'lun.name': path, 'igroup.name': igroup_name, } if lun_id is not None: expected_body['logical_unit_number'] = lun_id mock_send_request.assert_has_calls([ mock.call('/protocols/san/lun-maps', 'post', body=expected_body, query={'return_records': 'true'})]) def test_get_lun_map(self): fake_record = fake_client.GET_LUN_MAP_REST['records'][0] path = fake_record['lun']['name'] expected_lun_map = [{ 'initiator-group': fake_record['igroup']['name'], 'lun-id': fake_record['logical_unit_number'], 'vserver': fake_record['svm']['name'], }] expected_query = { 'lun.name': path, 'fields': 'igroup.name,logical_unit_number,svm.name', } self.mock_object(self.client, 'send_request', return_value=fake_client.GET_LUN_MAP_REST) lun_map = self.client.get_lun_map(path) self.assertEqual(observed=lun_map, expected=expected_lun_map) self.client.send_request.assert_called_once_with( '/protocols/san/lun-maps', 'get', query=expected_query) def test_get_lun_map_no_luns_mapped(self): fake_record = fake_client.GET_LUN_MAP_REST['records'][0] path = fake_record['lun']['name'] expected_lun_map = [] expected_query = { 'lun.name': path, 'fields': 'igroup.name,logical_unit_number,svm.name', } self.mock_object(self.client, 'send_request', return_value = fake_client.NO_RECORDS_RESPONSE_REST) lun_map = self.client.get_lun_map(path) self.assertEqual(observed=lun_map, expected=expected_lun_map) self.client.send_request.assert_called_once_with( '/protocols/san/lun-maps', 'get', query=expected_query) def test_get_fc_target_wwpns(self): fake_record = fake_client.FC_INTERFACE_REST['records'][0] expected_wwpns = [fake_record['wwpn']] expected_query = { 'fields': 'wwpn' } self.mock_object(self.client, 'send_request', return_value = fake_client.FC_INTERFACE_REST) wwpns = self.client.get_fc_target_wwpns() self.assertEqual(observed=wwpns, expected=expected_wwpns) self.client.send_request.assert_called_once_with( '/network/fc/interfaces', 'get', query=expected_query) def test_get_fc_target_wwpns_not_found(self): expected_wwpns = [] expected_query = { 'fields': 'wwpn' } self.mock_object(self.client, 'send_request', return_value = fake_client.NO_RECORDS_RESPONSE_REST) wwpns = self.client.get_fc_target_wwpns() self.assertEqual(observed=wwpns, expected=expected_wwpns) self.client.send_request.assert_called_once_with( '/network/fc/interfaces', 'get', query=expected_query) def test_unmap_lun(self): get_uuid_response = fake_client.GET_LUN_MAP_REST mock_send_request = self.mock_object( self.client, 'send_request', side_effect=[get_uuid_response, None]) self.client.unmap_lun(fake_client.LUN_NAME_PATH, fake_client.IGROUP_NAME) query_uuid = { 'igroup.name': fake_client.IGROUP_NAME, 'lun.name': fake_client.LUN_NAME_PATH, 'fields': 'lun.uuid,igroup.uuid' } lun_uuid = get_uuid_response['records'][0]['lun']['uuid'] igroup_uuid = get_uuid_response['records'][0]['igroup']['uuid'] mock_send_request.assert_has_calls([ mock.call('/protocols/san/lun-maps', 'get', query=query_uuid), mock.call(f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}', 'delete'), ]) def test_unmap_lun_with_api_error(self): get_uuid_response = fake_client.GET_LUN_MAP_REST mock_send_request = self.mock_object( self.client, 'send_request', side_effect=[get_uuid_response, netapp_api.NaApiError()]) self.assertRaises(netapp_api.NaApiError, self.client.unmap_lun, fake_client.LUN_NAME_PATH, fake_client.IGROUP_NAME) query_uuid = { 'igroup.name': fake_client.IGROUP_NAME, 'lun.name': fake_client.LUN_NAME_PATH, 'fields': 'lun.uuid,igroup.uuid' } lun_uuid = get_uuid_response['records'][0]['lun']['uuid'] igroup_uuid = get_uuid_response['records'][0]['igroup']['uuid'] mock_send_request.assert_has_calls([ mock.call('/protocols/san/lun-maps', 'get', query=query_uuid), mock.call(f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}', 'delete'), ]) def test_unmap_lun_invalid_input(self): get_uuid_response = fake_client.NO_RECORDS_RESPONSE_REST mock_send_request = self.mock_object( self.client, 'send_request', side_effect=[get_uuid_response, None]) self.client.unmap_lun(fake_client.LUN_NAME_PATH, fake_client.IGROUP_NAME) query_uuid = { 'igroup.name': fake_client.IGROUP_NAME, 'lun.name': fake_client.LUN_NAME_PATH, 'fields': 'lun.uuid,igroup.uuid' } mock_send_request.assert_called_once_with( '/protocols/san/lun-maps', 'get', query=query_uuid) def test_unmap_lun_not_mapped_in_group(self): get_uuid_response = fake_client.GET_LUN_MAP_REST # Exception REST_NO_SUCH_LUN_MAP is handled inside the function # and should not be re-raised mock_send_request = self.mock_object( self.client, 'send_request', side_effect=[ get_uuid_response, netapp_api.NaApiError( code=netapp_api.REST_NO_SUCH_LUN_MAP)]) self.client.unmap_lun(fake_client.LUN_NAME_PATH, fake_client.IGROUP_NAME) query_uuid = { 'igroup.name': fake_client.IGROUP_NAME, 'lun.name': fake_client.LUN_NAME_PATH, 'fields': 'lun.uuid,igroup.uuid' } lun_uuid = get_uuid_response['records'][0]['lun']['uuid'] igroup_uuid = get_uuid_response['records'][0]['igroup']['uuid'] mock_send_request.assert_has_calls([ mock.call('/protocols/san/lun-maps', 'get', query=query_uuid), mock.call(f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}', 'delete'), ]) def test_has_luns_mapped_to_initiators(self): initiators = ['iqn.2005-03.org.open-iscsi:49ebe8a87d1'] api_response = fake_client.GET_LUN_MAPS mock_send_request = self.mock_object( self.client, 'send_request', return_value=api_response) self.assertTrue(self.client.has_luns_mapped_to_initiators(initiators)) query = { 'initiators.name': ','.join(initiators), 'fields': 'lun_maps' } mock_send_request.assert_called_once_with( '/protocols/san/igroups', 'get', query=query) def test_has_luns_mapped_to_initiators_no_records(self): initiators = ['iqn.2005-03.org.open-iscsi:49ebe8a87d1'] api_response = fake_client.NO_RECORDS_RESPONSE_REST mock_send_request = self.mock_object( self.client, 'send_request', return_value=api_response) self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators)) query = { 'initiators.name': ','.join(initiators), 'fields': 'lun_maps' } mock_send_request.assert_called_once_with( '/protocols/san/igroups', 'get', query=query) def test_has_luns_mapped_to_initiators_not_mapped(self): initiators = ['iqn.2005-03.org.open-iscsi:49ebe8a87d1'] api_response = fake_client.GET_LUN_MAPS_NO_MAPS mock_send_request = self.mock_object( self.client, 'send_request', return_value=api_response) self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators)) query = { 'initiators.name': ','.join(initiators), 'fields': 'lun_maps' } mock_send_request.assert_called_once_with( '/protocols/san/igroups', 'get', query=query) def test_iscsi_service_details(self): fake_record = fake_client.GET_ISCSI_SERVICE_DETAILS_REST['records'][0] expected_iqn = fake_record['target']['name'] expected_query = { 'fields': 'target.name' } mock_send_request = self.mock_object( self.client, 'send_request', return_value=fake_client.GET_ISCSI_SERVICE_DETAILS_REST) iqn = self.client.get_iscsi_service_details() self.assertEqual(expected_iqn, iqn) mock_send_request.assert_called_once_with( '/protocols/san/iscsi/services', 'get', query=expected_query) def test_iscsi_service_details_not_found(self): expected_iqn = None expected_query = { 'fields': 'target.name' } mock_send_request = self.mock_object( self.client, 'send_request', return_value=fake_client.NO_RECORDS_RESPONSE_REST) iqn = self.client.get_iscsi_service_details() self.assertEqual(expected_iqn, iqn) mock_send_request.assert_called_once_with( '/protocols/san/iscsi/services', 'get', query=expected_query) def test_check_iscsi_initiator_exists(self): fake_record = fake_client.CHECK_ISCSI_INITIATOR_REST['records'][0] iqn = fake_record['initiator'] expected_query = { 'initiator': iqn } mock_send_request = self.mock_object( self.client, 'send_request', return_value=fake_client.CHECK_ISCSI_INITIATOR_REST) initiator_exists = self.client.check_iscsi_initiator_exists(iqn) self.assertEqual(expected=True, observed=initiator_exists) mock_send_request.assert_called_once_with( '/protocols/san/iscsi/credentials', 'get', query=expected_query) def test_check_iscsi_initiator_exists_not_found(self): fake_record = fake_client.CHECK_ISCSI_INITIATOR_REST['records'][0] iqn = fake_record['initiator'] expected_query = { 'initiator': iqn } mock_send_request = self.mock_object( self.client, 'send_request', return_value=fake_client.NO_RECORDS_RESPONSE_REST) initiator_exists = self.client.check_iscsi_initiator_exists(iqn) self.assertEqual(expected=False, observed=initiator_exists) mock_send_request.assert_called_once_with( '/protocols/san/iscsi/credentials', 'get', query=expected_query) def test_get_iscsi_target_details(self): fake_record = fake_client.GET_ISCSI_TARGET_DETAILS_REST['records'][0] expected_details = [{ 'address': fake_record['ip']['address'], 'port': 3260, 'tpgroup-tag': None, 'interface-enabled': fake_record['enabled'], }] expected_query = { 'services': 'data_iscsi', 'fields': 'ip.address,enabled' } mock_send_request = self.mock_object( self.client, 'send_request', return_value=fake_client.GET_ISCSI_TARGET_DETAILS_REST) details = self.client.get_iscsi_target_details() self.assertEqual(expected_details, details) mock_send_request.assert_called_once_with('/network/ip/interfaces', 'get', query=expected_query) def test_get_iscsi_target_details_no_details(self): expected_details = [] expected_query = { 'services': 'data_iscsi', 'fields': 'ip.address,enabled' } mock_send_request = self.mock_object( self.client, 'send_request', return_value=fake_client.NO_RECORDS_RESPONSE_REST) details = self.client.get_iscsi_target_details() self.assertEqual(expected_details, details) mock_send_request.assert_called_once_with('/network/ip/interfaces', 'get', query=expected_query) def test_move_lun(self): fake_cur_path = '/vol/fake_vol/fake_lun_cur' fake_new_path = '/vol/fake_vol/fake_lun_new' expected_query = { 'svm.name': self.vserver, 'name': fake_cur_path, } expected_body = { 'name': fake_new_path, } mock_send_request = self.mock_object(self.client, 'send_request') self.client.move_lun(fake_cur_path, fake_new_path) mock_send_request.assert_called_once_with( '/storage/luns/', 'patch', query=expected_query, body=expected_body) @ddt.data(True, False) def test_clone_file_snapshot(self, overwrite_dest): fake_volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST self.client.features.BACKUP_CLONE_PARAM = True fake_name = fake.NFS_VOLUME['name'] fake_new_name = fake.SNAPSHOT_NAME api_version = (1, 19) expected_body = { 'volume': { 'uuid': fake_volume['uuid'], 'name': fake_volume['name'] }, 'source_path': fake_name, 'destination_path': fake_new_name, 'is_backup': True } if overwrite_dest: api_version = (1, 20) expected_body['overwrite_destination'] = True self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_get_volume_by_args', return_value=fake_volume) self.mock_object(self.client.connection, 'get_api_version', return_value=api_version) self.client.clone_file( fake_volume['name'], fake_name, fake_new_name, fake.VSERVER_NAME, is_snapshot=True, dest_exists=overwrite_dest) self.client.send_request.assert_has_calls([ mock.call('/storage/file/clone', 'post', body=expected_body), ]) def test_clone_lun(self): self.client.vserver = fake.VSERVER_NAME expected_body = { 'svm': { 'name': fake.VSERVER_NAME }, 'name': f'/vol/{fake.VOLUME_NAME}/{fake.SNAPSHOT_NAME}', 'clone': { 'source': { 'name': f'/vol/{fake.VOLUME_NAME}/{fake.LUN_NAME}', } }, 'space': { 'guarantee': { 'requested': True, } }, 'qos_policy': { 'name': fake.QOS_POLICY_GROUP_NAME, } } mock_send_request = self.mock_object( self.client, 'send_request', return_value=None) mock_validate_policy = self.mock_object( self.client, '_validate_qos_policy_group') self.client.clone_lun( volume=fake.VOLUME_NAME, name=fake.LUN_NAME, new_name=fake.SNAPSHOT_NAME, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME, is_snapshot=True) mock_validate_policy.assert_called_once_with(False) mock_send_request.assert_called_once_with( '/storage/luns', 'post', body=expected_body) @ddt.data(True, False) def test_destroy_lun(self, force=True): path = f'/vol/{fake_client.VOLUME_NAME}/{fake_client.FILE_NAME}' query = {} query['name'] = path query['svm'] = fake_client.VOLUME_VSERVER_NAME if force: query['allow_delete_while_mapped'] = 'true' self.mock_object(self.client, 'send_request') self.client.destroy_lun(path, force) self.client.send_request.assert_called_once_with('/storage/luns/', 'delete', query=query) def test_get_flexvol_capacity(self, ): api_response = fake_client.VOLUME_GET_ITER_CAPACITY_RESPONSE_REST volume_response = api_response['records'][0] mock_get_unique_vol = self.mock_object( self.client, '_get_volume_by_args', return_value=volume_response) capacity = self.client.get_flexvol_capacity( flexvol_path=fake.VOLUME_PATH, flexvol_name=fake.VOLUME_NAME) mock_get_unique_vol.assert_called_once_with( vol_name=fake.VOLUME_NAME, vol_path=fake.VOLUME_PATH, fields='name,space.available,space.afs_total') self.assertEqual(float(fake_client.VOLUME_SIZE_TOTAL), capacity['size-total']) self.assertEqual(float(fake_client.VOLUME_SIZE_AVAILABLE), capacity['size-available']) def test_get_flexvol_capacity_not_found(self): self.mock_object( self.client, '_get_volume_by_args', side_effect=exception.VolumeBackendAPIException(data="fake")) self.assertRaises(netapp_utils.NetAppDriverException, self.client.get_flexvol_capacity, flexvol_path='fake_path') def test_check_api_permissions(self): mock_log = self.mock_object(client_cmode_rest.LOG, 'warning') self.mock_object(self.client, 'check_cluster_api', return_value=True) self.client.check_api_permissions() self.client.check_cluster_api.assert_has_calls( [mock.call(key) for key in client_cmode_rest.SSC_API_MAP.keys()]) self.assertEqual(0, mock_log.call_count) def test_check_api_permissions_failed_ssc_apis(self): def check_cluster_api(api): if api != '/storage/volumes': return False return True self.mock_object(self.client, 'check_cluster_api', side_effect=check_cluster_api) mock_log = self.mock_object(client_cmode_rest.LOG, 'warning') self.client.check_api_permissions() self.assertEqual(1, mock_log.call_count) def test_check_api_permissions_failed_volume_api(self): def check_cluster_api(api): if api == '/storage/volumes': return False return True self.mock_object(self.client, 'check_cluster_api', side_effect=check_cluster_api) mock_log = self.mock_object(client_cmode_rest.LOG, 'warning') self.assertRaises(exception.VolumeBackendAPIException, self.client.check_api_permissions) self.assertEqual(0, mock_log.call_count) def test_check_cluster_api(self): endpoint_api = '/storage/volumes' endpoint_request = '/storage/volumes?return_records=false' mock_send_request = self.mock_object(self.client, 'send_request', return_value=True) result = self.client.check_cluster_api(endpoint_api) mock_send_request.assert_has_calls([mock.call(endpoint_request, 'get', enable_tunneling=False)]) self.assertTrue(result) def test_check_cluster_api_error(self): endpoint_api = '/storage/volumes' api_error = netapp_api.NaApiError(code=netapp_api.REST_UNAUTHORIZED) self.mock_object(self.client, 'send_request', side_effect=[api_error]) result = self.client.check_cluster_api(endpoint_api) self.assertFalse(result) def test_get_provisioning_options_from_flexvol(self): self.mock_object(self.client, 'get_flexvol', return_value=fake_client.VOLUME_INFO_SSC) self.mock_object(self.client, 'get_flexvol_dedupe_info', return_value=fake_client.VOLUME_DEDUPE_INFO_SSC) expected_prov_opts = { 'aggregate': ['fake_aggr1'], 'compression_enabled': False, 'dedupe_enabled': True, 'language': 'c.utf_8', 'size': 1, 'snapshot_policy': 'default', 'snapshot_reserve': '5', 'space_guarantee_type': 'none', 'volume_type': 'rw', 'is_flexgroup': False, } actual_prov_opts = self.client.get_provisioning_options_from_flexvol( fake_client.VOLUME_NAME) self.assertEqual(expected_prov_opts, actual_prov_opts) def test_flexvol_exists(self): api_response = fake_client.GET_NUM_RECORDS_RESPONSE_REST mock_send_request = self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.flexvol_exists(fake_client.VOLUME_NAME) query = { 'name': fake_client.VOLUME_NAME, 'return_records': 'false' } mock_send_request.assert_has_calls([ mock.call('/storage/volumes/', 'get', query=query)]) self.assertTrue(result) def test_flexvol_exists_not_found(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=api_response) self.assertFalse(self.client.flexvol_exists(fake_client.VOLUME_NAME)) @ddt.data(fake_client.VOLUME_AGGREGATE_NAME, [fake_client.VOLUME_AGGREGATE_NAME], [fake_client.VOLUME_AGGREGATE_NAMES[0], fake_client.VOLUME_AGGREGATE_NAMES[1]]) def test_create_volume_async(self, aggregates): self.mock_object(self.client, 'send_request') self.client.create_volume_async( fake_client.VOLUME_NAME, aggregates, 100, volume_type='dp') body = { 'name': fake_client.VOLUME_NAME, 'size': 100 * units.Gi, 'type': 'dp' } if isinstance(aggregates, list): body['style'] = 'flexgroup' body['aggregates'] = [{'name': aggr} for aggr in aggregates] else: body['style'] = 'flexvol' body['aggregates'] = [{'name': aggregates}] self.client.send_request.assert_called_once_with( '/storage/volumes/', 'post', body=body, wait_on_accepted=False) @ddt.data('dp', 'rw', None) def test_create_volume_async_with_extra_specs(self, volume_type): self.mock_object(self.client, 'send_request') aggregates = [fake_client.VOLUME_AGGREGATE_NAME] snapshot_policy = 'default' size = 100 space_guarantee_type = 'volume' language = 'en-US' snapshot_reserve = 15 self.client.create_volume_async( fake_client.VOLUME_NAME, aggregates, size, space_guarantee_type=space_guarantee_type, language=language, snapshot_policy=snapshot_policy, snapshot_reserve=snapshot_reserve, volume_type=volume_type) body = { 'name': fake_client.VOLUME_NAME, 'size': size * units.Gi, 'type': volume_type, 'guarantee': {'type': space_guarantee_type}, 'space': {'snapshot': {'reserve_percent': str(snapshot_reserve)}}, 'language': language, } if isinstance(aggregates, list): body['style'] = 'flexgroup' body['aggregates'] = [{'name': aggr} for aggr in aggregates] else: body['style'] = 'flexvol' body['aggregates'] = [{'name': aggregates}] if volume_type == 'dp': snapshot_policy = None else: body['nas'] = {'path': '/%s' % fake_client.VOLUME_NAME} if snapshot_policy is not None: body['snapshot_policy'] = {'name': snapshot_policy} self.client.send_request.assert_called_once_with( '/storage/volumes/', 'post', body=body, wait_on_accepted=False) def test_create_flexvol(self): aggregates = [fake_client.VOLUME_AGGREGATE_NAME] size = 100 mock_response = { 'job': { 'uuid': fake.JOB_UUID, } } self.mock_object(self.client, 'send_request', return_value=mock_response) expected_response = { 'status': None, 'jobid': fake.JOB_UUID, 'error-code': None, 'error-message': None } response = self.client.create_volume_async(fake_client.VOLUME_NAME, aggregates, size_gb = size) self.assertEqual(expected_response, response) def test_enable_volume_dedupe_async(self): query = { 'name': fake_client.VOLUME_NAME, 'fields': 'uuid,style', } # This is needed because the first calling to send_request inside # enable_volume_dedupe_async must return a valid uuid for the given # volume name. mock_response = { 'records': [ { 'uuid': fake.JOB_UUID, 'name': fake_client.VOLUME_NAME, "style": 'flexgroup', } ], "num_records": 1, } body = { 'efficiency': {'dedupe': 'background'} } mock_send_request = self.mock_object(self.client, 'send_request', return_value=mock_response) call_list = [mock.call('/storage/volumes/', 'patch', body=body, query=query, wait_on_accepted=False)] self.client.enable_volume_dedupe_async(fake_client.VOLUME_NAME) mock_send_request.assert_has_calls(call_list) def test_enable_volume_compression_async(self): query = { 'name': fake_client.VOLUME_NAME, } # This is needed because the first calling to send_request inside # enable_volume_compression_async must return a valid uuid for the # given volume name. mock_response = { 'records': [ { 'uuid': fake.JOB_UUID, 'name': fake_client.VOLUME_NAME, "style": 'flexgroup', } ], "num_records": 1, } body = { 'efficiency': {'compression': 'background'} } mock_send_request = self.mock_object(self.client, 'send_request', return_value=mock_response) call_list = [mock.call('/storage/volumes/', 'patch', body=body, query=query, wait_on_accepted=False)] self.client.enable_volume_compression_async(fake_client.VOLUME_NAME) mock_send_request.assert_has_calls(call_list) def test__get_snapmirrors(self): api_response = fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST mock_send_request = self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client._get_snapmirrors( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) query = { 'source.path': (fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME), 'destination.path': (fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME), 'fields': 'state,source.svm.name,source.path,destination.svm.name,' 'destination.path,transfer.state,transfer.end_time,' 'lag_time,healthy,uuid' } mock_send_request.assert_called_once_with('/snapmirror/relationships', 'get', query=query) self.assertEqual(1, len(result)) def test__get_snapmirrors_not_found(self): api_response = fake_client.NO_RECORDS_RESPONSE_REST mock_send_request = self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client._get_snapmirrors( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) query = { 'source.path': (fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME), 'destination.path': (fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME), 'fields': 'state,source.svm.name,source.path,destination.svm.name,' 'destination.path,transfer.state,transfer.end_time,' 'lag_time,healthy,uuid' } mock_send_request.assert_called_once_with('/snapmirror/relationships', 'get', query=query) self.assertEqual([], result) def test_get_snapmirrors(self): api_response = fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST mock_send_request = self.mock_object(self.client, 'send_request', return_value=api_response) result = self.client.get_snapmirrors( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) expected = fake_client.REST_GET_SNAPMIRRORS_RESPONSE query = { 'source.path': (fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME), 'destination.path': (fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME), 'fields': 'state,source.svm.name,source.path,destination.svm.name,' 'destination.path,transfer.state,transfer.end_time,' 'lag_time,healthy,uuid' } mock_send_request.assert_called_once_with('/snapmirror/relationships', 'get', query=query) self.assertEqual(expected, result) @ddt.data({'policy': 'fake_policy'}, {'policy': None}) @ddt.unpack def test_create_snapmirror(self, policy): api_responses = [ { "job": { "uuid": fake_client.FAKE_UUID, }, }, ] self.mock_object(self.client, 'send_request', side_effect = copy.deepcopy(api_responses)) self.client.create_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, policy=policy) body = { 'source': { 'path': (fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME), }, 'destination': { 'path': (fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME) } } if policy: body['policy'] = {'name': policy} self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/', 'post', body=body)]) @ddt.data( { 'policy': None, 'sm_source_cg': fake_client.SM_SOURCE_CG, 'sm_destination_cg': fake_client.SM_DESTINATION_CG, }, { 'policy': None, 'sm_source_cg': None, 'sm_destination_cg': None, }, { 'policy': 'AutomatedFailOver', 'sm_source_cg': fake_client.SM_SOURCE_CG, 'sm_destination_cg': fake_client.SM_DESTINATION_CG, }, { 'policy': 'AutomatedFailOver', 'sm_source_cg': None, 'sm_destination_cg': None, }, ) @ddt.unpack def test_create_snapmirror_active_sync(self, policy, sm_source_cg, sm_destination_cg): """Tests creation of snapmirror with active sync""" api_responses = [ { "job": { "uuid": fake_client.FAKE_UUID, }, }, ] body = {} self.mock_object(self.client, 'send_request', side_effect = copy.deepcopy(api_responses)) self.client.create_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, sm_source_cg, sm_destination_cg, policy=policy) if sm_source_cg is not None and sm_destination_cg is not None: body = { 'source': { 'path': fake_client.SM_SOURCE_VSERVER + ':/cg/' + sm_source_cg, 'consistency_group_volumes': [ {'name': fake_client.SM_SOURCE_VOLUME}] }, 'destination': { 'path': fake_client.SM_DEST_VSERVER + ':/cg/' + sm_destination_cg, 'consistency_group_volumes': [ {'name': fake_client.SM_DEST_VOLUME}] } } else: body = { 'source': { 'path': fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME }, 'destination': { 'path': fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME }, } if policy: body['policy'] = {'name': policy} if bool(body): self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/', 'post', body=body)]) def test_create_snapmirror_already_exists(self): api_responses = netapp_api.NaApiError( code=netapp_api.REST_ERELATION_EXISTS) self.mock_object(self.client, 'send_request', side_effect=api_responses) response = self.client.create_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, schedule=None, policy=None, relationship_type='data_protection') self.assertIsNone(response) self.assertTrue(self.client.send_request.called) def test_create_snapmirror_error(self): self.mock_object(self.client, 'send_request', side_effect=netapp_api.NaApiError(code=123)) self.assertRaises(netapp_api.NaApiError, self.client.create_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, schedule=None, policy=None, relationship_type='data_protection') self.assertTrue(self.client.send_request.called) def test_create_ontap_consistency_group(self): """Tests creation of consistency group for active sync policies""" api_responses = [ { "job": { "uuid": fake_client.FAKE_UUID, }, }, ] self.mock_object(self.client, 'send_request', side_effect = copy.deepcopy(api_responses)) self.client.create_ontap_consistency_group( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_SOURCE_CG) body = { 'svm': { 'name': fake_client.SM_SOURCE_VSERVER }, 'name': fake_client.SM_SOURCE_CG, 'volumes': [{ 'name': fake_client.SM_SOURCE_VOLUME, "provisioning_options": {"action": "add"} }] } self.client.send_request.assert_has_calls([ mock.call('/application/consistency-groups/', 'post', body=body)]) def test__set_snapmirror_state(self): api_responses = [ fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST, { "job": { "uuid": fake_client.FAKE_UUID }, "num_records": 1 } ] expected_body = {'state': 'snapmirrored'} self.mock_object(self.client, 'send_request', side_effect=copy.deepcopy(api_responses)) result = self.client._set_snapmirror_state( 'snapmirrored', fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID, 'patch', body=expected_body, wait_on_accepted=True)]) expected = { 'operation-id': None, 'status': None, 'jobid': fake_client.FAKE_UUID, 'error-code': None, 'error-message': None, 'relationship-uuid': fake_client.FAKE_UUID } self.assertEqual(expected, result) def test_initialize_snapmirror(self): expected_job = { 'operation-id': None, 'status': None, 'jobid': fake_client.FAKE_UUID, 'error-code': None, 'error-message': None, } mock_set_snapmirror_state = self.mock_object( self.client, '_set_snapmirror_state', return_value=expected_job) result = self.client.initialize_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) mock_set_snapmirror_state.assert_called_once_with( 'snapmirrored', fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, wait_result=False) self.assertEqual(expected_job, result) @ddt.data(True, False) def test_abort_snapmirror(self, clear_checkpoint): self.mock_object( self.client, 'get_snapmirrors', return_value=fake_client.REST_GET_SNAPMIRRORS_RESPONSE) responses = [fake_client.TRANSFERS_GET_ITER_REST, None, None] self.mock_object(self.client, 'send_request', side_effect=copy.deepcopy(responses)) self.client.abort_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, clear_checkpoint=clear_checkpoint) body = {'state': 'hard_aborted' if clear_checkpoint else 'aborted'} query = {'state': 'transferring'} self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID + '/transfers/', 'get', query=query), mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID + '/transfers/' + fake_client.FAKE_UUID, 'patch', body=body)]) self.client.get_snapmirrors.assert_called_once_with( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) def test_abort_snapmirror_no_transfer_in_progress(self): self.mock_object(self.client, 'send_request', return_value=fake_client.NO_RECORDS_RESPONSE_REST) self.mock_object( self.client, 'get_snapmirrors', return_value=fake_client.REST_GET_SNAPMIRRORS_RESPONSE) self.assertRaises(netapp_api.NaApiError, self.client.abort_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, clear_checkpoint=True) query = {'state': 'transferring'} self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID + '/transfers/', 'get', query=query)]) def test_delete_snapmirror(self): response_list = [fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST, fake_client.JOB_RESPONSE_REST, fake_client.JOB_SUCCESSFUL_REST] self.mock_object(self.client, 'send_request', side_effect=copy.deepcopy(response_list)) self.client.delete_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) query_uuid = {} query_uuid['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME) query_uuid['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME) query_uuid['fields'] = 'uuid' query_delete = {"destination_only": "true"} self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/', 'get', query=query_uuid), mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID, 'delete', query=query_delete)]) def test_delete_snapmirror_timeout(self): # when a timeout happens, an exception is thrown by send_request api_error = netapp_api.NaRetryableError() self.mock_object(self.client, 'send_request', side_effect=api_error) self.assertRaises(netapp_api.NaRetryableError, self.client.delete_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) @ddt.data('async', 'sync') def test_resume_snapmirror(self, snapmirror_policy): snapmirror_response = copy.deepcopy( fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST) snapmirror_response['records'][0]['policy'] = { 'type': snapmirror_policy} if snapmirror_policy == 'async': snapmirror_response['state'] = 'snapmirrored' elif snapmirror_policy == 'sync': snapmirror_response['state'] = 'in_sync' response_list = [snapmirror_response, fake_client.JOB_RESPONSE_REST, snapmirror_response] self.mock_object(self.client, 'send_request', side_effect=copy.deepcopy(response_list)) self.client.resync_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) query_uuid = {} query_uuid['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME) query_uuid['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME) query_uuid['fields'] = 'uuid,policy.type' body_resync = {} if snapmirror_policy == 'async': body_resync['state'] = 'snapmirrored' elif snapmirror_policy == 'sync': body_resync['state'] = 'in_sync' self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/', 'get', query=query_uuid), mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID, 'patch', body=body_resync)]) def test_resume_snapmirror_not_found(self): query_uuid = {} query_uuid['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME) query_uuid['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME) query_uuid['fields'] = 'uuid,policy.type' self.mock_object( self.client, 'send_request', return_value={'records': []}) self.assertRaises( netapp_api.NaApiError, self.client.resume_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) self.client.send_request.assert_called_once_with( '/snapmirror/relationships/', 'get', query=query_uuid) def test_resume_snapmirror_api_error(self): query_resume = {} query_resume['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME) query_resume['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME) query_uuid = copy.deepcopy(query_resume) query_uuid['fields'] = 'uuid,policy.type' api_error = netapp_api.NaApiError(code=0) self.mock_object( self.client, 'send_request', side_effect=[fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST, api_error]) self.assertRaises(netapp_api.NaApiError, self.client.resume_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) @ddt.data(True, False) def test_release_snapmirror(self, relationship_info_only): response_list = [fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST, fake_client.JOB_RESPONSE_REST, fake_client.JOB_SUCCESSFUL_REST] self.mock_object(self.client, 'send_request', side_effect=copy.deepcopy(response_list)) self.client.release_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME, relationship_info_only) query_uuid = {} query_uuid['list_destinations_only'] = 'true' query_uuid['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME) query_uuid['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME) query_uuid['fields'] = 'uuid' query_release = {} if relationship_info_only: # release WITHOUT removing related snapshots query_release['source_info_only'] = 'true' else: # release and REMOVING all related snapshots query_release['source_only'] = 'true' self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/', 'get', query=query_uuid), mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID, 'delete', query=query_release)]) def test_release_snapmirror_timeout(self): # when a timeout happens, an exception is thrown by send_request api_error = netapp_api.NaRetryableError() self.mock_object(self.client, 'send_request', side_effect=api_error) self.assertRaises(netapp_api.NaRetryableError, self.client.release_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) @ddt.data('async', 'sync') def test_resync_snapmirror(self, snapmirror_policy): snapmirror_response = copy.deepcopy( fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST) snapmirror_response['records'][0]['policy'] = { 'type': snapmirror_policy} if snapmirror_policy == 'async': snapmirror_response['state'] = 'snapmirrored' elif snapmirror_policy == 'sync': snapmirror_response['state'] = 'in_sync' response_list = [snapmirror_response, fake_client.JOB_RESPONSE_REST, snapmirror_response] self.mock_object(self.client, 'send_request', side_effect=copy.deepcopy(response_list)) self.client.resync_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) query_uuid = {} query_uuid['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' + fake_client.SM_SOURCE_VOLUME) query_uuid['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' + fake_client.SM_DEST_VOLUME) query_uuid['fields'] = 'uuid,policy.type' body_resync = {} if snapmirror_policy == 'async': body_resync['state'] = 'snapmirrored' elif snapmirror_policy == 'sync': body_resync['state'] = 'in_sync' self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/', 'get', query=query_uuid), mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID, 'patch', body=body_resync)]) def test_resync_snapmirror_timeout(self): api_error = netapp_api.NaRetryableError() self.mock_object(self.client, 'resume_snapmirror', side_effect=api_error) self.assertRaises(netapp_api.NaRetryableError, self.client.resync_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) def test_quiesce_snapmirror(self): expected_job = { 'operation-id': None, 'status': None, 'jobid': fake_client.FAKE_UUID, 'error-code': None, 'error-message': None, 'relationship-uuid': fake_client.FAKE_UUID, } mock_set_snapmirror_state = self.mock_object( self.client, '_set_snapmirror_state', return_value=expected_job) result = self.client.quiesce_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) mock_set_snapmirror_state.assert_called_once_with( 'paused', fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) self.assertEqual(expected_job, result) def test_break_snapmirror(self): fake_snapmirror = fake_client.REST_GET_SNAPMIRRORS_RESPONSE fake_uuid = fake_snapmirror[0]['uuid'] fake_body = {'state': 'broken_off'} self.mock_object(self.client, 'send_request') mock_get_snap = self.mock_object( self.client, '_get_snapmirrors', mock.Mock(return_value=fake_snapmirror)) self.client.break_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) mock_get_snap.assert_called_once() self.client.send_request.assert_called_once_with( f'/snapmirror/relationships/{fake_uuid}', 'patch', body=fake_body) def test_break_snapmirror_not_found(self): self.mock_object( self.client, 'send_request', return_value={'records': []}) self.assertRaises( netapp_utils.NetAppDriverException, self.client.break_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) def test__break_snapmirror_error(self): fake_snapmirror = fake_client.REST_GET_SNAPMIRRORS_RESPONSE self.mock_object(self.client, '_get_snapmirrors', return_value=fake_snapmirror) self.mock_object(self.client, 'send_request', side_effect=self._mock_api_error()) self.assertRaises(netapp_api.NaApiError, self.client.break_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) def test__break_snapmirror_exception(self): fake_snapmirror = copy.deepcopy( fake_client.REST_GET_SNAPMIRRORS_RESPONSE) fake_snapmirror[0]['transferring-state'] = 'error' self.mock_object( self.client, '_get_snapmirrors', mock.Mock(return_value=fake_snapmirror)) self.assertRaises(netapp_utils.NetAppDriverException, self.client.break_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) def test_update_snapmirror(self): snapmirrors = fake_client.REST_GET_SNAPMIRRORS_RESPONSE self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'get_snapmirrors', return_value=snapmirrors) self.client.update_snapmirror( fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/' + snapmirrors[0]['uuid'] + '/transfers/', 'post', wait_on_accepted=False)]) def test_update_snapmirror_no_records(self): self.mock_object(self.client, 'send_request') self.mock_object(self.client, 'get_snapmirrors', return_value=[]) self.assertRaises(netapp_utils.NetAppDriverException, self.client.update_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) self.client.send_request.assert_not_called() def test_update_snapmirror_exception(self): snapmirrors = fake_client.REST_GET_SNAPMIRRORS_RESPONSE api_error = netapp_api.NaApiError( code=netapp_api.REST_UPDATE_SNAPMIRROR_FAILED) self.mock_object(self.client, 'send_request', side_effect=api_error) self.mock_object(self.client, 'get_snapmirrors', return_value=snapmirrors) self.assertRaises(netapp_api.NaApiError, self.client.update_snapmirror, fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME, fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME) self.client.send_request.assert_has_calls([ mock.call('/snapmirror/relationships/' + snapmirrors[0]['uuid'] + '/transfers/', 'post', wait_on_accepted=False)]) def test_mount_flexvol(self): volumes = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST self.mock_object(self.client, 'send_request', side_effect=[volumes, None]) fake_path = '/fake_path' fake_vol_name = volumes['records'][0]['name'] body = { 'nas.path': fake_path } query = { 'name': fake_vol_name } self.client.mount_flexvol(fake_client.VOLUME_NAME, junction_path=fake_path) self.client.send_request.assert_has_calls([ mock.call('/storage/volumes', 'patch', body=body, query=query)]) def test_mount_flexvol_default_junction_path(self): volumes = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST self.mock_object(self.client, 'send_request', side_effect=[volumes, None]) fake_vol_name = volumes['records'][0]['name'] body = { 'nas.path': '/' + fake_client.VOLUME_NAME } query = { 'name': fake_vol_name } self.client.mount_flexvol(fake_client.VOLUME_NAME) self.client.send_request.assert_has_calls([ mock.call('/storage/volumes', 'patch', body=body, query=query)]) def test_get_cluster_name(self): query = {'fields': 'name'} self.mock_object( self.client, 'send_request', return_value=fake_client.GET_CLUSTER_NAME_RESPONSE_REST) result = self.client.get_cluster_name() self.client.send_request.assert_called_once_with( '/cluster', 'get', query=query, enable_tunneling=False) self.assertEqual( fake_client.GET_CLUSTER_NAME_RESPONSE_REST['name'], result) @ddt.data( (fake_client.VSERVER_NAME, fake_client.VSERVER_NAME_2), (fake_client.VSERVER_NAME, None), (None, fake_client.VSERVER_NAME_2), (None, None)) @ddt.unpack def test_get_vserver_peers(self, svm_name, peer_svm_name): query = { 'fields': 'svm.name,state,peer.svm.name,peer.cluster.name,' 'applications' } if peer_svm_name: query['name'] = peer_svm_name if svm_name: query['svm.name'] = svm_name vserver_info = fake_client.GET_VSERVER_PEERS_RECORDS_REST[0] expected_result = [{ 'vserver': vserver_info['svm']['name'], 'peer-vserver': vserver_info['peer']['svm']['name'], 'peer-state': vserver_info['state'], 'peer-cluster': vserver_info['peer']['cluster']['name'], 'applications': vserver_info['applications'], }] self.mock_object( self.client, 'send_request', return_value=fake_client.GET_VSERVER_PEERS_RESPONSE_REST) result = self.client.get_vserver_peers( vserver_name=svm_name, peer_vserver_name=peer_svm_name) self.client.send_request.assert_called_once_with( '/svm/peers', 'get', query=query, enable_tunneling=False) self.assertEqual(expected_result, result) def test_get_vserver_peers_empty(self): vserver_peers_response = copy.deepcopy( fake_client.GET_VSERVER_PEERS_RESPONSE_REST) vserver_peers_response['records'] = [] vserver_peers_response['num_records'] = 0 query = { 'fields': 'svm.name,state,peer.svm.name,peer.cluster.name,' 'applications' } self.mock_object( self.client, 'send_request', return_value=vserver_peers_response) result = self.client.get_vserver_peers() self.client.send_request.assert_called_once_with( '/svm/peers', 'get', query=query, enable_tunneling=False) self.assertEqual([], result) @ddt.data(['snapmirror', 'lun_copy'], None) def test_create_vserver_peer(self, applications): body = { 'svm.name': fake_client.VSERVER_NAME, 'name': fake_client.VSERVER_NAME_2, 'applications': applications if applications else ['snapmirror'] } self.mock_object(self.client, 'send_request') self.client.create_vserver_peer( fake_client.VSERVER_NAME, fake_client.VSERVER_NAME_2, vserver_peer_application=applications) self.client.send_request.assert_called_once_with( '/svm/peers', 'post', body=body, enable_tunneling=False) @ddt.data( (fake.VOLUME_NAME, fake.LUN_NAME), (None, fake.LUN_NAME), (fake.VOLUME_NAME, None), (None, None) ) @ddt.unpack def test_start_lun_move(self, src_vol, dest_lun): src_lun = f'src-lun-{fake.LUN_NAME}' dest_vol = f'dest-vol-{fake.VOLUME_NAME}' src_path = f'/vol/{src_vol if src_vol else dest_vol}/{src_lun}' dest_path = f'/vol/{dest_vol}/{dest_lun if dest_lun else src_lun}' body = {'name': dest_path} self.mock_object(self.client, '_lun_update_by_path') result = self.client.start_lun_move( src_lun, dest_vol, src_ontap_volume=src_vol, dest_lun_name=dest_lun) self.client._lun_update_by_path.assert_called_once_with( src_path, body) self.assertEqual(dest_path, result) @ddt.data(fake_client.LUN_GET_MOVEMENT_REST, None) def test_get_lun_move_status(self, lun_moved): dest_path = f'/vol/{fake.VOLUME_NAME}/{fake.LUN_NAME}' move_status = None if lun_moved: move_progress = lun_moved['movement']['progress'] move_status = { 'job-status': move_progress['state'], 'last-failure-reason': move_progress['failure']['message'] } self.mock_object(self.client, '_get_first_lun_by_path', return_value=lun_moved) result = self.client.get_lun_move_status(dest_path) self.client._get_first_lun_by_path.assert_called_once_with( dest_path, fields='movement.progress') self.assertEqual(move_status, result) @ddt.data( (fake.VOLUME_NAME, fake.LUN_NAME), (None, fake.LUN_NAME), (fake.VOLUME_NAME, None), (None, None) ) @ddt.unpack def test_start_lun_copy(self, src_vol, dest_lun): src_lun = f'src-lun-{fake.LUN_NAME}' dest_vol = f'dest-vol-{fake.VOLUME_NAME}' dest_vserver = f'dest-vserver-{fake.VSERVER_NAME}' src_path = f'/vol/{src_vol if src_vol else dest_vol}/{src_lun}' dest_path = f'/vol/{dest_vol}/{dest_lun if dest_lun else src_lun}' body = { 'name': dest_path, 'copy.source.name': src_path, 'svm.name': dest_vserver } self.mock_object(self.client, 'send_request') result = self.client.start_lun_copy( src_lun, dest_vol, dest_vserver, src_ontap_volume=src_vol, src_vserver=fake_client.VSERVER_NAME, dest_lun_name=dest_lun) self.client.send_request.assert_called_once_with( '/storage/luns', 'post', body=body, enable_tunneling=False) self.assertEqual(dest_path, result) @ddt.data(fake_client.LUN_GET_COPY_REST, None) def test_get_lun_copy_status(self, lun_copied): dest_path = f'/vol/{fake.VOLUME_NAME}/{fake.LUN_NAME}' copy_status = None if lun_copied: copy_progress = lun_copied['copy']['source']['progress'] copy_status = { 'job-status': copy_progress['state'], 'last-failure-reason': copy_progress['failure']['message'] } self.mock_object(self.client, '_get_first_lun_by_path', return_value=lun_copied) result = self.client.get_lun_copy_status(dest_path) self.client._get_first_lun_by_path.assert_called_once_with( dest_path, fields='copy.source.progress') self.assertEqual(copy_status, result) def test_cancel_lun_copy(self): dest_path = f'/vol/{fake_client.VOLUME_NAME}/{fake_client.FILE_NAME}' query = { 'name': dest_path, 'svm.name': fake_client.VSERVER_NAME } self.mock_object(self.client, 'send_request') self.client.cancel_lun_copy(dest_path) self.client.send_request.assert_called_once_with('/storage/luns/', 'delete', query=query) def test_cancel_lun_copy_exception(self): dest_path = f'/vol/{fake_client.VOLUME_NAME}/{fake_client.FILE_NAME}' query = { 'name': dest_path, 'svm.name': fake_client.VSERVER_NAME } self.mock_object(self.client, 'send_request', side_effect=self._mock_api_error()) self.assertRaises( netapp_utils.NetAppDriverException, self.client.cancel_lun_copy, dest_path) self.client.send_request.assert_called_once_with('/storage/luns/', 'delete', query=query) # TODO(rfluisa): Add ddt data with None values for optional parameters to # improve coverage. def test_start_file_copy(self): volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST file_name = fake_client.FILE_NAME dest_ontap_volume = fake_client.VOLUME_NAME src_ontap_volume = dest_ontap_volume dest_file_name = file_name response = {'job': {'uuid': 'fake-uuid'}} body = { 'files_to_copy': [ { 'source': { 'path': f'{src_ontap_volume}/{file_name}', 'volume': { 'uuid': volume['uuid'] } }, 'destination': { 'path': f'{dest_ontap_volume}/{dest_file_name}', 'volume': { 'uuid': volume['uuid'] } } } ] } self.mock_object(self.client, '_get_volume_by_args', return_value=volume) self.mock_object(self.client, 'send_request', return_value=response) result = self.client.start_file_copy( file_name, dest_ontap_volume, src_ontap_volume=src_ontap_volume, dest_file_name=dest_file_name) self.client.send_request.assert_called_once_with( '/storage/file/copy', 'post', body=body, enable_tunneling=False) self.assertEqual(response['job']['uuid'], result) # TODO(rfluisa): Add ddt data with None values for possible api responses # to improve coverage. def test_get_file_copy_status(self): job_uuid = fake_client.FAKE_UUID query = {} query['fields'] = '*' response = { 'state': 'fake-state', 'error': { 'message': 'fake-error-message' } } expected_result = { 'job-status': response['state'], 'last-failure-reason': response['error']['message'] } self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_file_copy_status(job_uuid) self.client.send_request.assert_called_once_with( f'/cluster/jobs/{job_uuid}', 'get', query=query, enable_tunneling=False) self.assertEqual(expected_result, result) @ddt.data(('success', 'complete'), ('failure', 'destroyed')) @ddt.unpack def test_get_file_copy_status_translate_state(self, from_state, to_state): job_uuid = fake_client.FAKE_UUID query = {} query['fields'] = '*' response = { 'state': from_state, 'error': { 'message': 'fake-error-message' } } expected_result = { 'job-status': to_state, 'last-failure-reason': response['error']['message'] } self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_file_copy_status(job_uuid) self.client.send_request.assert_called_once_with( f'/cluster/jobs/{job_uuid}', 'get', query=query, enable_tunneling=False) self.assertEqual(expected_result, result) def test_rename_file(self): volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST orig_file_name = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-vol' new_file_name = f'/vol/{fake_client.VOLUME_NAMES[0]}/new-cinder-vol' body = {'path': new_file_name.split('/')[3]} self.mock_object(self.client, 'send_request') self.mock_object(self.client, '_get_volume_by_args', return_value=volume) self.client.rename_file(orig_file_name, new_file_name) orig_file_name = orig_file_name.split('/')[3] self.client.send_request.assert_called_once_with( f'/storage/volumes/{volume["uuid"]}/files/{orig_file_name}', 'patch', body=body) self.client._get_volume_by_args.assert_called_once_with( vol_name=fake_client.VOLUME_NAMES[0]) def test_get_namespace_list(self): response = fake_client.GET_NAMESPACE_RESPONSE_REST fake_query = { 'svm.name': 'fake_vserver', 'fields': 'svm.name,location.volume.name,space.size,' 'location.qtree.name,name,os_type,' 'space.guarantee.requested,uuid' } expected_result = [ { 'Vserver': 'fake_vserver1', 'Volume': 'fake_vol_001', 'Size': 999999, 'Qtree': '', 'Path': '/vol/fake_vol_001/test', 'OsType': 'linux', 'SpaceReserved': True, 'UUID': 'fake_uuid1' }, { 'Vserver': 'fake_vserver2', 'Volume': 'fake_vol_002', 'Size': 8888888, 'Qtree': '', 'Path': '/vol/fake_vol_002/test', 'OsType': 'linux', 'SpaceReserved': True, 'UUID': 'fake_uuid2' }, ] self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_namespace_list() self.client.send_request.assert_called_once_with( '/storage/namespaces/', 'get', query=fake_query) self.assertEqual(expected_result, result) def test_get_namespace_list_no_response(self): response = fake_client.NO_RECORDS_RESPONSE_REST fake_query = { 'svm.name': 'fake_vserver', 'fields': 'svm.name,location.volume.name,space.size,' 'location.qtree.name,name,os_type,' 'space.guarantee.requested,uuid' } self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_namespace_list() self.client.send_request.assert_called_once_with( '/storage/namespaces/', 'get', query=fake_query) self.assertEqual([], result) def test_destroy_namespace(self): fake_query = { 'name': '/vol/fake_vol_001/test', 'svm': 'fake_vserver' } self.mock_object(self.client, 'send_request') self.client.destroy_namespace('/vol/fake_vol_001/test', force=False) self.client.send_request.assert_called_once_with( '/storage/namespaces', 'delete', query=fake_query) def test_destroy_namespace_force_true(self): fake_query = { 'name': '/vol/fake_vol_001/test', 'svm': 'fake_vserver', 'allow_delete_while_mapped': 'true' } self.mock_object(self.client, 'send_request') self.client.destroy_namespace('/vol/fake_vol_001/test', force=True) self.client.send_request.assert_called_once_with( '/storage/namespaces', 'delete', query=fake_query) def test_clone_namespace(self): fake_body = { 'svm': { 'name': 'fake_vserver' }, 'name': '/vol/fake_volume/fake_new_name', 'clone': { 'source': { 'name': '/vol/fake_volume/fake_name', } } } self.mock_object(self.client, 'send_request') self.client.clone_namespace('fake_volume', 'fake_name', 'fake_new_name') self.client.send_request.assert_called_once_with( '/storage/namespaces', 'post', body=fake_body) def test_get_namespace_by_args(self): response = fake_client.GET_NAMESPACE_RESPONSE_REST lun_info_args = { 'vserver': fake.VSERVER_NAME, 'path': fake.LUN_PATH, 'uuid': fake.UUID1} fake_query = { 'fields': 'svm.name,location.volume.name,space.size,' 'location.qtree.name,name,os_type,' 'space.guarantee.requested,uuid,space.block_size', 'svm.name': fake.VSERVER_NAME, 'name': fake.LUN_PATH, 'uuid': fake.UUID1, } expected_result = [ { 'Vserver': 'fake_vserver1', 'Volume': 'fake_vol_001', 'Size': 999999, 'Qtree': '', 'Path': '/vol/fake_vol_001/test', 'OsType': 'linux', 'SpaceReserved': True, 'UUID': 'fake_uuid1', 'BlockSize': 9999 }, { 'Vserver': 'fake_vserver2', 'Volume': 'fake_vol_002', 'Size': 8888888, 'Qtree': '', 'Path': '/vol/fake_vol_002/test', 'OsType': 'linux', 'SpaceReserved': True, 'UUID': 'fake_uuid2', 'BlockSize': 8888 }, ] self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_namespace_by_args(**lun_info_args) self.client.send_request.assert_called_once_with( '/storage/namespaces', 'get', query=fake_query) self.assertEqual(expected_result, result) def test_get_namespace_by_args_no_response(self): response = fake_client.NO_RECORDS_RESPONSE_REST lun_info_args = { 'vserver': fake.VSERVER_NAME, 'path': fake.LUN_PATH, 'uuid': fake.UUID1} fake_query = { 'fields': 'svm.name,location.volume.name,space.size,' 'location.qtree.name,name,os_type,' 'space.guarantee.requested,uuid,space.block_size', 'svm.name': fake.VSERVER_NAME, 'name': fake.LUN_PATH, 'uuid': fake.UUID1, } self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_namespace_by_args(**lun_info_args) self.client.send_request.assert_called_once_with( '/storage/namespaces', 'get', query=fake_query) self.assertEqual([], result) def test_namespace_resize(self): fake_body = {'space.size': 9999} fake_query = {'name': fake.LUN_PATH} self.mock_object(self.client, 'send_request') self.client.namespace_resize(fake.LUN_PATH, 9999) self.client.send_request.assert_called_once_with( '/storage/namespaces', 'patch', body=fake_body, query=fake_query) def test_get_namespace_sizes_by_volume(self): response = fake_client.GET_NAMESPACE_RESPONSE_REST fake_query = { 'location.volume.name': 'fake_volume', 'svm.name': fake_client.VSERVER_NAME, 'fields': 'space.size,name' } expected_result = [ { 'path': '/vol/fake_vol_001/test', 'size': 999999, }, { 'path': '/vol/fake_vol_002/test', 'size': 8888888, }, ] self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_namespace_sizes_by_volume('fake_volume') self.client.send_request.assert_called_once_with( '/storage/namespaces', 'get', query=fake_query) self.assertEqual(expected_result, result) def test_get_namespace_sizes_by_volume_no_response(self): response = fake_client.NO_RECORDS_RESPONSE_REST fake_query = { 'location.volume.name': 'fake_volume', 'svm.name': fake_client.VSERVER_NAME, 'fields': 'space.size,name' } self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_namespace_sizes_by_volume('fake_volume') self.client.send_request.assert_called_once_with( '/storage/namespaces', 'get', query=fake_query) self.assertEqual([], result) def test_create_namespace(self): """Issues API request for creating namespace on volume.""" self.mock_object(self.client, 'send_request') self.client.create_namespace( fake_client.VOLUME_NAME, fake_client.NAMESPACE_NAME, fake_client.VOLUME_SIZE_TOTAL, {'OsType': 'linux'}) path = f'/vol/{fake_client.VOLUME_NAME}/{fake_client.NAMESPACE_NAME}' body = { 'name': path, 'space.size': str(fake_client.VOLUME_SIZE_TOTAL), 'os_type': 'linux', } self.client.send_request.assert_called_once_with( '/storage/namespaces', 'post', body=body) def test_create_namespace_error(self): api_error = netapp_api.NaApiError(code=0) self.mock_object(self.client, 'send_request', side_effect=api_error) self.assertRaises( netapp_api.NaApiError, self.client.create_namespace, fake_client.VOLUME_NAME, fake_client.NAMESPACE_NAME, fake_client.VOLUME_SIZE_TOTAL, {'OsType': 'linux'}) def test_get_subsystem_by_host(self): response = fake_client.GET_SUBSYSTEM_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=response) res = self.client.get_subsystem_by_host(fake_client.HOST_NQN) expected_res = [ {'name': fake_client.SUBSYSTEM, 'os_type': 'linux'}] self.assertEqual(expected_res, res) query = { 'svm.name': self.client.vserver, 'hosts.nqn': fake_client.HOST_NQN, 'fields': 'name,os_type', 'name': 'openstack-*', } self.client.send_request.assert_called_once_with( '/protocols/nvme/subsystems', 'get', query=query) def test_get_subsystem_by_path(self): response = fake_client.GET_SUBSYSTEM_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=response) res = self.client.get_subsystem_by_path(fake_client.NAMESPACE_NAME) expected_res = [{'name': fake_client.SUBSYSTEM, 'os_type': 'linux'}] self.assertEqual(expected_res, res) query = { 'svm.name': self.client.vserver, 'subsystem_maps.namespace.name': fake_client.NAMESPACE_NAME, 'fields': 'name,os_type', 'name': 'openstack-*', } self.client.send_request.assert_called_once_with( '/protocols/nvme/subsystems', 'get', query=query) def test_get_subsystem_by_path_no_records(self): response = fake_client.NO_RECORDS_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=response) res = self.client.get_subsystem_by_path(fake_client.NAMESPACE_NAME) self.assertEqual([], res) query = { 'svm.name': self.client.vserver, 'subsystem_maps.namespace.name': fake_client.NAMESPACE_NAME, 'fields': 'name,os_type', 'name': 'openstack-*', } self.client.send_request.assert_called_once_with( '/protocols/nvme/subsystems', 'get', query=query) def test_create_subsystem(self): self.mock_object(self.client, 'send_request') self.client.create_subsystem(fake_client.SUBSYSTEM, 'linux', fake_client.HOST_NQN) body = { 'svm.name': self.client.vserver, 'name': fake_client.SUBSYSTEM, 'os_type': 'linux', 'hosts': [{'nqn': fake_client.HOST_NQN}] } self.client.send_request.assert_called_once_with( '/protocols/nvme/subsystems', 'post', body=body) def test_get_namespace_map(self): response = fake_client.GET_SUBSYSTEM_MAP_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=response) res = self.client.get_namespace_map(fake_client.NAMESPACE_NAME) expected_res = [ {'subsystem': fake_client.SUBSYSTEM, 'subsystem_uuid': fake_client.FAKE_UUID, 'uuid': fake_client.FAKE_UUID, 'vserver': fake_client.VSERVER_NAME}] self.assertEqual(expected_res, res) query = { 'namespace.name': fake_client.NAMESPACE_NAME, 'fields': 'subsystem.name,namespace.uuid,svm.name,' 'subsystem.uuid', } self.client.send_request.assert_called_once_with( '/protocols/nvme/subsystem-maps', 'get', query=query) def test_map_namespace(self): response = fake_client.GET_SUBSYSTEM_MAP_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=response) res = self.client.map_namespace(fake_client.NAMESPACE_NAME, fake_client.SUBSYSTEM) self.assertEqual(fake_client.FAKE_UUID, res) body = { 'namespace.name': fake_client.NAMESPACE_NAME, 'subsystem.name': fake_client.SUBSYSTEM } self.client.send_request.assert_called_once_with( '/protocols/nvme/subsystem-maps', 'post', body=body, query={'return_records': 'true'}) def test_map_namespace_error(self): api_error = netapp_api.NaApiError(code=0) self.mock_object(self.client, 'send_request', side_effect=api_error) self.assertRaises( netapp_api.NaApiError, self.client.map_namespace, fake_client.VOLUME_NAME, fake_client.SUBSYSTEM) @ddt.data( {'response': fake_client.GET_SUBSYSTEM_RESPONSE_REST, 'expected': fake_client.TARGET_NQN}, {'response': fake_client.NO_RECORDS_RESPONSE_REST, 'expected': None}) @ddt.unpack def test_get_nvme_subsystem_nqn(self, response, expected): self.mock_object(self.client, 'send_request', return_value=response) res = self.client.get_nvme_subsystem_nqn(fake_client.SUBSYSTEM) self.assertEqual(expected, res) query = { 'fields': 'target_nqn', 'name': fake_client.SUBSYSTEM, 'svm.name': self.client.vserver } self.client.send_request.assert_called_once_with( '/protocols/nvme/subsystems', 'get', query=query) def test_get_nvme_target_portals(self): response = fake_client.GET_INTERFACES_NVME_REST self.mock_object(self.client, 'send_request', return_value=response) res = self.client.get_nvme_target_portals() expected = ["10.10.10.10"] self.assertEqual(expected, res) query = { 'services': 'data_nvme_tcp', 'fields': 'ip.address', 'enabled': 'true', } self.client.send_request.assert_called_once_with( '/network/ip/interfaces', 'get', query=query) def test_unmap_namespace(self): self.mock_object(self.client, 'send_request') self.client.unmap_namespace(fake_client.NAMESPACE_NAME, fake_client.SUBSYSTEM) query = { 'subsystem.name': fake_client.SUBSYSTEM, 'namespace.name': fake_client.NAMESPACE_NAME, } self.client.send_request.assert_called_once_with( '/protocols/nvme/subsystem-maps', 'delete', query=query) def test_unmap_host_with_subsystem(self): url = ( f'/protocols/nvme/subsystems/{fake_client.SUBSYSTEM_UUID}/' f'hosts/{fake_client.HOST_NQN}' ) self.mock_object(self.client, 'send_request') self.client.unmap_host_with_subsystem( fake_client.HOST_NQN, fake_client.SUBSYSTEM_UUID ) self.client.send_request.assert_called_once_with(url, 'delete') def test_unmap_host_with_subsystem_api_error(self): url = ( f'/protocols/nvme/subsystems/{fake_client.SUBSYSTEM_UUID}/' f'hosts/{fake_client.HOST_NQN}' ) api_error = netapp_api.NaApiError(code=123, message='fake_error') self.mock_object(self.client, 'send_request', side_effect=api_error) mock_log_warning = self.mock_object(client_cmode_rest.LOG, 'warning') self.client.unmap_host_with_subsystem( fake_client.HOST_NQN, fake_client.SUBSYSTEM_UUID ) self.client.send_request.assert_called_once_with(url, 'delete') mock_log_warning.assert_called_once_with( "Failed to unmap host from subsystem. " "Host NQN: %(host_nqn)s, Subsystem UUID: %(subsystem_uuid)s, " "Error Code: %(code)s, Error Message: %(message)s", {'host_nqn': fake_client.HOST_NQN, 'subsystem_uuid': fake_client.SUBSYSTEM_UUID, 'code': api_error.code, 'message': api_error.message}) def test_map_host_with_subsystem(self): url = f'/protocols/nvme/subsystems/{fake_client.SUBSYSTEM_UUID}/hosts' body_post = {'nqn': fake_client.HOST_NQN} self.mock_object(self.client, 'send_request') self.client.map_host_with_subsystem( fake_client.HOST_NQN, fake_client.SUBSYSTEM_UUID ) self.client.send_request.assert_called_once_with( url, 'post', body=body_post ) def test_map_host_with_subsystem_already_mapped(self): url = f'/protocols/nvme/subsystems/{fake_client.SUBSYSTEM_UUID}/hosts' body_post = {'nqn': fake_client.HOST_NQN} api_error = ( netapp_api.NaApiError( code=netapp_api.REST_HOST_ALREADY_MAPPED_TO_SUBSYSTEM, message='fake_error') ) self.mock_object(self.client, 'send_request', side_effect=api_error) mock_log_info = self.mock_object(client_cmode_rest.LOG, 'info') self.client.map_host_with_subsystem( fake_client.HOST_NQN, fake_client.SUBSYSTEM_UUID ) self.client.send_request.assert_called_once_with( url, 'post', body=body_post ) mock_log_info.assert_called_once_with( "Host %(host_nqn)s is already mapped to subsystem" " %(subsystem_uuid)s ", {'host_nqn': fake_client.HOST_NQN, 'subsystem_uuid': fake_client.SUBSYSTEM_UUID } ) def test_map_host_with_subsystem_api_error(self): url = f'/protocols/nvme/subsystems/{fake_client.SUBSYSTEM_UUID}/hosts' body_post = {'nqn': fake_client.HOST_NQN} api_error = netapp_api.NaApiError(code=123, message='fake_error') self.mock_object(self.client, 'send_request', side_effect=api_error) mock_log_error = self.mock_object(client_cmode_rest.LOG, 'error') self.assertRaises(netapp_api.NaApiError, self.client.map_host_with_subsystem, fake_client.HOST_NQN, fake_client.SUBSYSTEM_UUID ) self.client.send_request.assert_called_once_with( url, 'post', body=body_post ) mock_log_error.assert_called_once_with( "Error mapping host to subsystem. Code :" "%(code)s, Message: %(message)s", {'code': api_error.code, 'message': api_error.message}) ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest_asar2.py 22 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest_asar2.0000664000175000017500000011555300000000000034005 0ustar00zuulzuul00000000000000# Copyright (c) 2025 NetApp, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from unittest.mock import patch import uuid import ddt from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as fake_client) from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest from cinder.volume.drivers.netapp.dataontap.client.client_cmode_rest_asar2\ import RestClientASAr2 from cinder.volume.drivers.netapp import utils as netapp_utils CONNECTION_INFO = {'hostname': 'hostname', 'transport_type': 'https', 'port': 443, 'username': 'admin', 'password': 'passw0rd', 'vserver': 'fake_vserver', 'ssl_cert_path': 'fake_ca', 'api_trace_pattern': 'fake_regex', 'private_key_file': 'fake_private_key.pem', 'certificate_file': 'fake_cert.pem', 'ca_certificate_file': 'fake_ca_cert.crt', 'certificate_host_validation': 'False', 'is_disaggregated': 'True', # ASA r2 is disaggregated } @ddt.ddt class NetAppRestCmodeASAr2ClientTestCase(test.TestCase): def setUp(self): super(NetAppRestCmodeASAr2ClientTestCase, self).setUp() # Setup Client mocks self.mock_object(client_cmode.Client, '_init_ssh_client') # store the original reference so we can call it later in # test__get_cluster_nodes_info self.original_get_cluster_nodes_info = ( client_cmode.Client._get_cluster_nodes_info) self.mock_object(client_cmode.Client, '_get_cluster_nodes_info', return_value=fake.HYBRID_SYSTEM_NODES_INFO) self.mock_object(client_cmode.Client, 'get_ontap_version', return_value=(9, 16, 1)) self.mock_object(client_cmode.Client, 'get_ontapi_version', return_value=(0, 0)) # Setup RestClient mocks self.mock_object(client_cmode_rest.RestClient, '_init_ssh_client') self.original_get_cluster_nodes_info = ( client_cmode_rest.RestClient._get_cluster_nodes_info) if not hasattr(client_cmode_rest.RestClient, '_get_cluster_nodes_info'): setattr(client_cmode_rest.RestClient, '_get_cluster_nodes_info', None) self.original_get_cluster_nodes_info = ( client_cmode_rest.RestClient._get_cluster_nodes_info) self.mock_object(client_cmode_rest.RestClient, '_get_cluster_nodes_info', return_value=fake.HYBRID_SYSTEM_NODES_INFO) self.mock_object(client_cmode_rest.RestClient, 'get_ontap_version', return_value=(9, 16, 1)) # Setup ASA r2 specific mocks self.mock_object(RestClientASAr2, '_init_ssh_client') self.mock_object(RestClientASAr2, '_get_cluster_nodes_info', return_value=fake.HYBRID_SYSTEM_NODES_INFO) self.mock_object(RestClientASAr2, 'get_ontap_version', return_value=(9, 16, 1)) with mock.patch.object(RestClientASAr2, 'get_ontap_version', return_value=(9, 16, 1)): self.client = RestClientASAr2(**CONNECTION_INFO) self.client.ssh_client = mock.MagicMock() self.client.connection = mock.MagicMock() self.connection = self.client.connection self.vserver = CONNECTION_INFO['vserver'] self.fake_volume = str(uuid.uuid4()) self.fake_lun = str(uuid.uuid4()) def _mock_api_error(self, code='fake'): return mock.Mock(side_effect=netapp_api.NaApiError(code=code)) def test_initialization(self): """Test ASA r2 client initialization.""" self.assertIsInstance(self.client, RestClientASAr2) self.assertIsInstance(self.client, client_cmode_rest.RestClient) def test_init_asar2_features(self): """Test ASA r2 specific features initialization.""" # Test that _init_asar2_features is called during initialization with mock.patch.object(RestClientASAr2, '_init_asar2_features') as mock_init: with mock.patch.object(RestClientASAr2, 'get_ontap_version', return_value=(9, 16, 1)): RestClientASAr2(**CONNECTION_INFO) mock_init.assert_called_once() @ddt.data(True, False) def test_get_ontapi_version(self, cached): """Test that ASA r2 returns (0, 0) for ONTAPI version.""" result = self.client.get_ontapi_version(cached=cached) expected = (0, 0) self.assertEqual(expected, result) def test_getattr_missing_method(self): """Test __getattr__ behavior for missing methods.""" result = getattr(self.client, 'nonexistent_method', None) self.assertIsNone(result) def test_send_request_inherits_from_parent(self): """Test that send_request inherits behavior from parent class.""" expected = 'fake_response' mock_get_records = self.mock_object( self.client, 'get_records', mock.Mock(return_value=expected)) res = self.client.send_request( fake_client.FAKE_ACTION_ENDPOINT, 'get', body=fake_client.FAKE_BODY, query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) self.assertEqual(expected, res) mock_get_records.assert_called_once_with( fake_client.FAKE_ACTION_ENDPOINT, fake_client.FAKE_HTTP_QUERY, False, 10000) def test_send_request_post_inherits_from_parent(self): """Test that send_request POST inherits behavior from parent class.""" expected = (201, 'fake_response') mock_invoke = self.mock_object( self.client.connection, 'invoke_successfully', mock.Mock(return_value=expected)) res = self.client.send_request( fake_client.FAKE_ACTION_ENDPOINT, 'post', body=fake_client.FAKE_BODY, query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) self.assertEqual(expected[1], res) mock_invoke.assert_called_once_with( fake_client.FAKE_ACTION_ENDPOINT, 'post', body=fake_client.FAKE_BODY, query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False) @ddt.data( {'enable_tunneling': True}, {'enable_tunneling': False} ) @ddt.unpack def test_get_records_inherits_from_parent(self, enable_tunneling): """Test that get_records inherits behavior from parent class.""" api_responses = [ (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE), (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE), (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE), ] self.mock_object( self.client.connection, 'invoke_successfully', side_effect=copy.deepcopy(api_responses)) query = { 'fields': 'name' } result = self.client.get_records( '/storage/volumes/', query=query, enable_tunneling=enable_tunneling, max_page_length=10) num_records = result['num_records'] self.assertEqual(28, num_records) self.assertEqual(28, len(result['records'])) expected_records = [] expected_records.extend(api_responses[0][1]['records']) expected_records.extend(api_responses[1][1]['records']) expected_records.extend(api_responses[2][1]['records']) self.assertEqual(expected_records, result['records']) def test_send_ems_log_message_inherits_from_parent(self): """Test send_ems_log_message inherits behavior""" message_dict = { 'computer-name': '25-dev-vm', 'event-source': 'Cinder driver NetApp_iSCSI_ASAr2_direct', 'app-version': 'dummy app version', 'category': 'provisioning', 'log-level': '5', 'auto-support': 'false', 'event-id': '1', 'event-description': '{"pools": {"vserver": "vserver_name",' + '"aggregates": [], "flexvols": ["flexvol_01"]}}' } body = { 'computer_name': message_dict['computer-name'], 'event_source': message_dict['event-source'], 'app_version': message_dict['app-version'], 'category': message_dict['category'], 'severity': 'notice', 'autosupport_required': message_dict['auto-support'] == 'true', 'event_id': message_dict['event-id'], 'event_description': message_dict['event-description'], } self.mock_object(self.client, '_get_ems_log_destination_vserver', return_value='vserver_name') self.mock_object(self.client, 'send_request') self.client.send_ems_log_message(message_dict) self.client.send_request.assert_called_once_with( '/support/ems/application-logs', 'post', body=body) def test_inheritance_all_parent_methods_available(self): """Test that ASA r2 client has access to all parent methods.""" # Test that common parent methods are available parent_methods = [ 'send_request', 'get_records', 'send_ems_log_message' ] for method_name in parent_methods: self.assertTrue(hasattr(self.client, method_name), f"Method {method_name} should be available") self.assertTrue(callable(getattr(self.client, method_name)), f"Method {method_name} should be callable") def test_asar2_specific_ontapi_not_supported(self): """Test that ASA r2 specifically doesn't support ONTAPI.""" # This is a key differentiator for ASA r2 result = self.client.get_ontapi_version() self.assertEqual((0, 0), result) # No change for cached version result_cached = self.client.get_ontapi_version(cached=True) self.assertEqual((0, 0), result_cached) def test_disaggregated_platform_connection_info(self): """Test ASA r2 client works with disaggregated platform settings.""" # Verify the connection info includes disaggregated flag self.assertEqual('True', CONNECTION_INFO['is_disaggregated']) # Test that client can be initialized with disaggregated settings disaggregated_info = CONNECTION_INFO.copy() disaggregated_info['is_disaggregated'] = 'True' with mock.patch.object(RestClientASAr2, 'get_ontap_version', return_value=(9, 18, 1)): client = RestClientASAr2(**disaggregated_info) self.assertIsInstance(client, RestClientASAr2) def test_get_cluster_info_success(self): """Test successful cluster info retrieval.""" expected_response = fake_client.GET_CLUSTER_INFO_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=expected_response) result = self.client.get_cluster_info() expected_query = {'fields': 'name,disaggregated'} self.client.send_request.assert_called_once_with( '/cluster', 'get', query=expected_query, enable_tunneling=False) self.assertEqual(expected_response, result) def test_get_cluster_info_exception(self): """Test exception handling during cluster info retrieval.""" self.mock_object(self.client, 'send_request', side_effect=Exception("API error")) result = self.client.get_cluster_info() expected_query = {'fields': 'name,disaggregated'} self.client.send_request.assert_called_once_with( '/cluster', 'get', query=expected_query, enable_tunneling=False) self.assertIsNone(result) def test_get_cluster_info_empty_response(self): """Test cluster info retrieval with empty response.""" self.mock_object(self.client, 'send_request', return_value={}) result = self.client.get_cluster_info() expected_query = {'fields': 'name,disaggregated'} self.client.send_request.assert_called_once_with( '/cluster', 'get', query=expected_query, enable_tunneling=False) self.assertEqual({}, result) def test_get_cluster_info_netapp_api_error(self): """Test NetApp API error handling during cluster info retrieval.""" self.mock_object(self.client, 'send_request', side_effect=netapp_api.NaApiError("NetApp API error")) result = self.client.get_cluster_info() expected_query = {'fields': 'name,disaggregated'} self.client.send_request.assert_called_once_with( '/cluster', 'get', query=expected_query, enable_tunneling=False) self.assertIsNone(result) def test_get_cluster_capacity_success(self): """Test successful cluster capacity retrieval.""" expected_response = fake_client.GET_CLUSTER_CAPACITY_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=expected_response) result = self.client.get_cluster_capacity() expected_query =\ {'fields': 'block_storage.size,block_storage.available'} self.client.send_request.assert_called_once_with( '/storage/cluster', 'get', query=expected_query, enable_tunneling=False) expected_capacity = { 'size-total': float(expected_response['block_storage']['size']), 'size-available': float(expected_response['block_storage']['available']) } self.assertEqual(expected_capacity, result) def test_get_cluster_capacity_no_response(self): """Test cluster capacity retrieval with no response.""" self.mock_object(self.client, 'send_request', return_value=None) result = self.client.get_cluster_capacity() expected_query =\ {'fields': 'block_storage.size,block_storage.available'} self.client.send_request.assert_called_once_with( '/storage/cluster', 'get', query=expected_query, enable_tunneling=False) self.assertEqual({}, result) def test_get_cluster_capacity_missing_block_storage(self): """Test cluster capacity retrieval with missing block_storage.""" response = {'some_other_field': 'value'} self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_cluster_capacity() expected_query =\ {'fields': 'block_storage.size,block_storage.available'} self.client.send_request.assert_called_once_with( '/storage/cluster', 'get', query=expected_query, enable_tunneling=False) expected_capacity = { 'size-total': 0.0, 'size-available': 0.0 } self.assertEqual(expected_capacity, result) def test_get_cluster_capacity_partial_block_storage(self): """Test cluster capacity retrieval with partial block_storage.""" response = { 'block_storage': { 'size': 1000000000, # missing 'available' field } } self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_cluster_capacity() expected_query =\ {'fields': 'block_storage.size,block_storage.available'} self.client.send_request.assert_called_once_with( '/storage/cluster', 'get', query=expected_query, enable_tunneling=False) expected_capacity = { 'size-total': 1000000000.0, 'size-available': 0.0 } self.assertEqual(expected_capacity, result) def test_get_cluster_capacity_exception(self): """Test exception handling during cluster capacity retrieval.""" self.mock_object(self.client, 'send_request', side_effect=Exception("API error")) self.assertRaises(netapp_utils.NetAppDriverException, self.client.get_cluster_capacity) expected_query =\ {'fields': 'block_storage.size,block_storage.available'} self.client.send_request.assert_called_once_with( '/storage/cluster', 'get', query=expected_query, enable_tunneling=False) def test_get_cluster_capacity_netapp_api_error(self): """Test NetApp API error handling during cluster capacity retrieval.""" self.mock_object(self.client, 'send_request', side_effect=netapp_api.NaApiError("NetApp API error")) self.assertRaises(netapp_utils.NetAppDriverException, self.client.get_cluster_capacity) expected_query =\ {'fields': 'block_storage.size,block_storage.available'} self.client.send_request.assert_called_once_with( '/storage/cluster', 'get', query=expected_query, enable_tunneling=False) def test_get_aggregate_disk_types_success(self): """Test successful aggregate disk types retrieval.""" expected_response =\ fake_client.GET_AGGREGATE_STORAGE_TYPES_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=expected_response) result = self.client.get_aggregate_disk_types() expected_query = {'fields': 'name,block_storage.storage_type'} self.client.send_request.assert_called_once_with( '/storage/aggregates', 'get', query=expected_query, enable_tunneling=False) # Should return array of storage types self.assertEqual(['ssd'], result) def test_get_aggregate_disk_types_multiple_records(self): """Test aggregate disk types retrieval with multiple records.""" expected_response =\ fake_client.GET_AGGREGATE_STORAGE_TYPES_MULTIPLE_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=expected_response) result = self.client.get_aggregate_disk_types() expected_query = {'fields': 'name,block_storage.storage_type'} self.client.send_request.assert_called_once_with( '/storage/aggregates', 'get', query=expected_query, enable_tunneling=False) # Should return array with all storage types including duplicates self.assertEqual(['ssd', 'ssd'], result) def test_get_aggregate_disk_types_empty_records(self): """Test aggregate disk types retrieval with empty records.""" expected_response =\ fake_client.GET_AGGREGATE_STORAGE_TYPES_EMPTY_RESPONSE_REST self.mock_object(self.client, 'send_request', return_value=expected_response) result = self.client.get_aggregate_disk_types() expected_query = {'fields': 'name,block_storage.storage_type'} self.client.send_request.assert_called_once_with( '/storage/aggregates', 'get', query=expected_query, enable_tunneling=False) self.assertIsNone(result) def test_get_aggregate_disk_types_missing_block_storage(self): """Test aggregate disk types retrieval with missing block_storage.""" response = { "records": [ { "uuid": "3e5e2865-af43-4d82-a808-8a7222cf0369", "name": "dataFA_2_p0_i1", # missing block_storage field } ], "num_records": 1 } self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_aggregate_disk_types() expected_query = {'fields': 'name,block_storage.storage_type'} self.client.send_request.assert_called_once_with( '/storage/aggregates', 'get', query=expected_query, enable_tunneling=False) self.assertEqual([], result) def test_get_aggregate_disk_types_missing_storage_type(self): """Test aggregate disk types retrieval with missing storage_type.""" response = { "records": [ { "uuid": "3e5e2865-af43-4d82-a808-8a7222cf0369", "name": "dataFA_2_p0_i1", "block_storage": { "primary": { "disk_class": "solid_state", "disk_type": "ssd" } # missing storage_type field } } ], "num_records": 1 } self.mock_object(self.client, 'send_request', return_value=response) result = self.client.get_aggregate_disk_types() expected_query = {'fields': 'name,block_storage.storage_type'} self.client.send_request.assert_called_once_with( '/storage/aggregates', 'get', query=expected_query, enable_tunneling=False) self.assertEqual([], result) def test_get_aggregate_disk_types_netapp_api_error(self): """Test NetApp API error handling.""" self.mock_object(self.client, 'send_request', side_effect=netapp_api.NaApiError("NetApp API error")) self.assertRaises(netapp_utils.NetAppDriverException, self.client.get_aggregate_disk_types) expected_query = {'fields': 'name,block_storage.storage_type'} self.client.send_request.assert_called_once_with( '/storage/aggregates', 'get', query=expected_query, enable_tunneling=False) def test_get_performance_counter_info_not_supported(self): """Performance counter info raises NetAppDriverException.""" self.assertRaises(netapp_utils.NetAppDriverException, self.client.get_performance_counter_info, 'system', 'cpu_busy') def test_get_performance_instance_uuids_not_supported(self): """Performance instance UUIDs raises NetAppDriverException.""" self.assertRaises(netapp_utils.NetAppDriverException, self.client.get_performance_instance_uuids, 'system', 'node1') def test_get_performance_counters_not_supported(self): """Performance counters raises NetAppDriverException.""" self.assertRaises(netapp_utils.NetAppDriverException, self.client.get_performance_counters, 'system', ['uuid1'], ['cpu_busy']) def test_create_lun(self): metadata = copy.deepcopy(fake_client.LUN_GET_ITER_RESULT[0]) name = fake.LUN_NAME size = 2048 initial_size = size qos_policy_group_is_adaptive = False self.mock_object(self.client, '_validate_qos_policy_group') self.mock_object(self.client, 'send_request') body = { 'name': name, 'space.size': str(initial_size), 'os_type': metadata['OsType'], 'qos_policy.name': fake.QOS_POLICY_GROUP_NAME } self.client.create_lun( fake.VOLUME_NAME, fake.LUN_NAME, size, metadata, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME, qos_policy_group_is_adaptive=qos_policy_group_is_adaptive) self.client.send_request.assert_called_once_with( '/storage/luns', 'post', body=body) @patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest_asar2.RestClientASAr2.send_request') def test_create_lun_handles_qos_policy(self, mock_send_request): mock_send_request.return_value = None self.client.create_lun(fake_client.VOLUME_NAME, fake_client.LUN_NAME, 1024, {"OsType": "linux"}, qos_policy_group_name=( fake.QOS_POLICY_GROUP_NAME), ) mock_send_request.assert_called_once_with( '/storage/luns', 'post', body={ 'name': fake_client.LUN_NAME.replace("-", "_"), 'space.size': '1024', 'os_type': 'linux', 'qos_policy.name': fake.QOS_POLICY_GROUP_NAME } ) @patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest_asar2.RestClientASAr2.send_request') def test_create_lun_raises_error_on_failure(self, mock_send_request): mock_send_request.side_effect = netapp_api.NaApiError self.assertRaises( netapp_api.NaApiError, self.client.create_lun, fake.VOLUME_NAME, fake.LUN_NAME, 1024, {"OsType": "linux"} ) @patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest_asar2.RestClientASAr2.send_request') def test_destroy_lun(self, mock_send_request): mock_send_request.return_value = None self.client.destroy_lun(fake.LUN_PATH, force=True) lun_name = self.client._get_backend_lun_or_namespace( fake.LUN_PATH ) mock_send_request.assert_called_once_with( '/storage/luns/', 'delete', query={ 'name': lun_name, 'allow_delete_while_mapped': 'true', } ) @patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest_asar2.RestClientASAr2.send_request') def test_destroy_lun_handles_non_forced_deletion(self, mock_send_request): mock_send_request.return_value = None self.client.destroy_lun(fake.LUN_PATH, force=False) lun_name = self.client._get_backend_lun_or_namespace( fake.LUN_PATH ) mock_send_request.assert_called_once_with( '/storage/luns/', 'delete', query={ 'name': lun_name, } ) @patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest_asar2.RestClientASAr2.send_request') def test_destroy_lun_raises_error_on_failure(self, mock_send_request): mock_send_request.side_effect = netapp_api.NaApiError self.assertRaises( netapp_api.NaApiError, self.client.destroy_lun, fake.LUN_PATH, force=True, ) @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest_asar2.RestClientASAr2.send_request') def test_create_namespace(self, mock_send_request): self.client.create_namespace(fake_client.VOLUME_NAME, fake_client.NAMESPACE_NAME, 2048, {'OsType': 'linux'} ) mock_send_request.assert_called_once_with( '/storage/namespaces', 'post', body={ 'name': fake_client.NAMESPACE_NAME, 'space.size': '2048', 'os_type': 'linux' } ) @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest_asar2.RestClientASAr2.send_request') def test_destroy_namespace(self, mock_send_request): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=fake.NAMESPACE_NAME) self.client.destroy_namespace(fake.PATH_NAMESPACE, force=True) mock_send_request.assert_called_once_with( '/storage/namespaces', 'delete', query={ 'name': fake.NAMESPACE_NAME, 'svm': self.client.vserver, 'allow_delete_while_mapped': 'true' } ) @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest.RestClient.map_lun') def test_map_lun(self, mock_super_map_lun): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=fake.LUN_NAME ) mock_super_map_lun.return_value = 'result' result = self.client.map_lun(fake.LUN_PATH, 'igroup1', 42) self.client._get_backend_lun_or_namespace.assert_called_once_with( fake.LUN_PATH ) mock_super_map_lun.assert_called_once_with(fake.LUN_NAME, 'igroup1', 42) self.assertEqual(result, 'result') @mock.patch('cinder.volume.drivers.netapp.dataontap.' 'client.client_cmode_rest.RestClient.get_lun_map') def test_get_lun_map(self, mock_super_get_lun_map): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=fake.LUN_NAME) mock_super_get_lun_map.return_value = [ {'initiator-group': 'igroup1', 'lun-id': 1, 'vserver': 'svm1'} ] result = self.client.get_lun_map(fake.LUN_NAME) self.client._get_backend_lun_or_namespace.assert_called_once_with( fake.LUN_NAME ) mock_super_get_lun_map.assert_called_once_with(fake.LUN_NAME) self.assertEqual(result, [ {'initiator-group': 'igroup1', 'lun-id': 1, 'vserver': 'svm1'} ]) @mock.patch('cinder.volume.drivers.netapp.dataontap.' 'client.client_cmode_rest.RestClient.unmap_lun') def test_unmap_lun(self, mock_super_unmap_lun): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=fake.LUN_NAME ) self.client.unmap_lun(fake.LUN_NAME, fake.IGROUP1, ) self.client._get_backend_lun_or_namespace.assert_called_once_with( fake.LUN_NAME ) mock_super_unmap_lun.assert_called_once_with( fake.LUN_NAME, fake.IGROUP1 ) @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest.RestClient.get_lun_by_args') def test_get_lun_by_args_with_path(self, mock_super_get_lun_by_args): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=fake.LUN_NAME ) mock_super_get_lun_by_args.return_value = 'result' path_arg = {'path': fake.LUN_PATH} result = self.client.get_lun_by_args(path=path_arg) self.client._get_backend_lun_or_namespace.assert_called_once_with( path_arg ) self.assertEqual(path_arg['path'], fake.LUN_NAME) mock_super_get_lun_by_args.assert_called_once_with(path=path_arg) self.assertEqual(result, 'result') @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest.RestClient.get_lun_by_args') def test_get_lun_by_args_without_path(self, mock_super_get_lun_by_args): mock_super_get_lun_by_args.return_value = 'result' result = self.client.get_lun_by_args(path=None) mock_super_get_lun_by_args.assert_called_once_with(path=None) self.assertEqual(result, 'result') @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest.RestClient.map_namespace') def test_maps_namespace(self, mock_super_map_namespace): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=fake.NAMESPACE_NAME ) mock_super_map_namespace.return_value = 'namespace-uuid' result = self.client.map_namespace(fake.PATH_NAMESPACE, fake.SUBSYSTEM) self.client._get_backend_lun_or_namespace.assert_called_once_with( fake.PATH_NAMESPACE ) mock_super_map_namespace.assert_called_once_with( fake.NAMESPACE_NAME, fake.SUBSYSTEM ) self.assertEqual(result, 'namespace-uuid') @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest.RestClient.map_namespace') def test_maps_namespace_with_path_containing_hyphens( self, mock_super_map_namespace): self.client._get_backend_lun_or_namespace = mock.Mock( return_value='name_space_2') mock_super_map_namespace.return_value = 'uuid-2' result = self.client.map_namespace('/vol/vol1/name-space-2', 'subsystem2') self.client._get_backend_lun_or_namespace.assert_called_once_with( '/vol/vol1/name-space-2') mock_super_map_namespace.assert_called_once_with( 'name_space_2', 'subsystem2') self.assertEqual(result, 'uuid-2') @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest.RestClient.unmap_namespace') def test_unmaps_namespace_with_valid_path_and_subsystem( self, mock_super_unmap_namespace): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=fake.NAMESPACE_NAME) self.client.unmap_namespace(fake.PATH_NAMESPACE, fake.SUBSYSTEM) self.client._get_backend_lun_or_namespace.assert_called_once_with( fake.PATH_NAMESPACE ) mock_super_unmap_namespace.assert_called_once_with( fake.NAMESPACE_NAME, fake.SUBSYSTEM ) @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest.RestClient.unmap_namespace') def test_unmaps_namespace_with_path_containing_special_characters( self, mock_super_unmap_namespace): self.client._get_backend_lun_or_namespace = mock.Mock( return_value='namespace_special') self.client.unmap_namespace('/vol/vol1/namespace-special', 'subsystem2') self.client._get_backend_lun_or_namespace.assert_called_once_with( '/vol/vol1/namespace-special') mock_super_unmap_namespace.assert_called_once_with( 'namespace_special', 'subsystem2' ) @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest.RestClient.get_namespace_map') def test_get_namespace_map(self, mock_super_get_namespace_map): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=fake.NAMESPACE_NAME ) mock_super_get_namespace_map.return_value = { 'namespace_map': 'details'} result = self.client.get_namespace_map(fake.PATH_NAMESPACE) self.client._get_backend_lun_or_namespace.assert_called_once_with( fake.PATH_NAMESPACE ) mock_super_get_namespace_map.assert_called_once_with( fake.NAMESPACE_NAME) self.assertEqual(result, {'namespace_map': 'details'}) @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest.RestClient.get_namespace_map') def test_get_namespace_map_with_path_containing_special_characters( self, mock_super_get_namespace_map): self.client._get_backend_lun_or_namespace = mock.Mock( return_value='namespace_special') mock_super_get_namespace_map.return_value = { 'namespace_map': 'special_details'} result = self.client.get_namespace_map('/vol/vol1/namespace-special') self.client._get_backend_lun_or_namespace.assert_called_once_with( '/vol/vol1/namespace-special') mock_super_get_namespace_map.assert_called_once_with( 'namespace_special') self.assertEqual(result, { 'namespace_map': 'special_details' }) @mock.patch('cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest.RestClient.get_namespace_map') def test_returns_none_when_namespace_map_not_found( self, mock_super_get_namespace_map): mock_super_get_namespace_map.return_value = None result = self.client.get_namespace_map('/vol/vol1/namespace3') mock_super_get_namespace_map.assert_called_once_with('namespace3') self.assertIsNone(result) @mock.patch( 'cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest_asar2.RestClientASAr2._lun_update_by_path') def test_resizes_lun(self, mock_lun_update_by_path): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=fake.LUN_NAME) self.client.do_direct_resize(fake.LUN_PATH, 10) self.client._get_backend_lun_or_namespace.assert_called_once_with( fake.LUN_PATH) mock_lun_update_by_path.assert_called_once_with( fake.LUN_NAME, {'name': fake.LUN_NAME, 'space.size': 10}) @mock.patch( 'cinder.volume.drivers.netapp.dataontap.client.' 'client_cmode_rest_asar2.RestClientASAr2._lun_update_by_path' ) def test_resize_lun_with_invalid_path(self, mock_lun_update_by_path): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=None) self.client.do_direct_resize('/vol/vol1/invalid_lun', 53) self.client._get_backend_lun_or_namespace.assert_called_once_with( '/vol/vol1/invalid_lun') mock_lun_update_by_path.assert_not_called() @mock.patch('cinder.volume.drivers.netapp.dataontap.' 'client.client_cmode_rest_asar2.RestClientASAr2.send_request') def test_resizes_namespace(self, mock_send_request): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=fake.NAMESPACE_NAME) self.client.namespace_resize(fake.PATH_NAMESPACE, fake.SIZE) self.client._get_backend_lun_or_namespace.assert_called_once_with( fake.PATH_NAMESPACE) mock_send_request.assert_called_once_with( '/storage/namespaces', 'patch', body={'space.size': fake.SIZE}, query={'name': fake.NAMESPACE_NAME} ) @mock.patch('cinder.volume.drivers.netapp.dataontap.' 'client.client_cmode_rest_asar2.RestClientASAr2.send_request') def test_resize_namespace_with_invalid_path(self, mock_send_request): self.client._get_backend_lun_or_namespace = mock.Mock( return_value=None) self.client.namespace_resize('/vol/vol1/invalid_namespace', 5368) self.client._get_backend_lun_or_namespace.assert_called_once_with( '/vol/vol1/invalid_namespace') mock_send_request.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py0000664000175000017500000007236300000000000026361 0ustar00zuulzuul00000000000000# Copyright (c) - 2014, Clinton Knight. All rights reserved. # Copyright (c) - 2015, Tom Barron. All rights reserved. # Copyright (c) - 2016 Chuck Fouts. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree from cinder.tests.unit import fake_constants from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api VOLUME_ID = 'f10d1a84-9b7b-427e-8fec-63c48b509a56' LUN_ID = 'ee6b4cc7-477b-4016-aa0c-7127b4e3af86' LUN_HANDLE = 'fake_lun_handle' NAMESPACE_HANDLE = 'fake_namespace_handle' LUN_NAME = 'lun1' NAMESPACE_NAME = 'namespace1' LUN_SIZE = 3 LUN_TABLE = {LUN_NAME: None} SIZE = 1024 HOST_NAME = 'fake.host.name' BACKEND_NAME = 'fake_backend_name' POOL_NAME = 'aggr1' SHARE_IP = '192.168.99.24' IPV6_ADDRESS = 'fe80::6e40:8ff:fe8a:130' EXPORT_PATH = '/fake/export/path' NFS_SHARE = '%s:%s' % (SHARE_IP, EXPORT_PATH) NFS_SHARE_IPV6 = '[%s]:%s' % (IPV6_ADDRESS, EXPORT_PATH) HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, POOL_NAME) NFS_HOST_STRING = '%s@%s#%s' % (HOST_NAME, BACKEND_NAME, NFS_SHARE) AGGREGATE = 'aggr1' FLEXVOL = 'openstack-flexvol' NFS_FILE_PATH = 'nfsvol' PATH = '/vol/%s/%s' % (POOL_NAME, LUN_NAME) PATH_NAMESPACE = '/vol/%s/%s' % (POOL_NAME, NAMESPACE_NAME) IMAGE_FILE_ID = 'img-cache-imgid' PROVIDER_LOCATION = 'fake_provider_location' NFS_HOST = 'nfs-host1' NFS_SHARE_PATH = '/export' NFS_EXPORT_1 = '%s:%s' % (NFS_HOST, NFS_SHARE_PATH) NFS_EXPORT_2 = 'nfs-host2:/export' MOUNT_POINT = '/mnt/nfs' ATTACHED = 'attached' DETACHED = 'detached' DEST_POOL_NAME = 'dest-aggr' DEST_VSERVER_NAME = 'dest-vserver' DEST_BACKEND_NAME = 'dest-backend' DEST_HOST_STRING = '%s@%s#%s' % (HOST_NAME, DEST_BACKEND_NAME, DEST_POOL_NAME) DEST_EXPORT_PATH = '/fake/export/dest-path' DEST_NFS_SHARE = '%s:%s' % (SHARE_IP, DEST_EXPORT_PATH) CLUSTER_NAME = 'fake-cluster-name' DEST_CLUSTER_NAME = 'fake-dest-cluster-name' JOB_UUID = 'fb132b04-6422-43ce-9451-ee819f0131a4' LUN_METADATA = { 'OsType': None, 'SpaceReserved': 'true', 'SpaceAllocated': 'false', 'Path': PATH, 'Qtree': None, 'Volume': POOL_NAME, } LUN_METADATA_WITH_SPACE_ALLOCATION = { 'OsType': None, 'SpaceReserved': 'true', 'Path': PATH, 'SpaceAllocated': 'true', 'Qtree': None, 'Volume': POOL_NAME, } NAMESPACE_METADATA = { 'OsType': None, 'Path': PATH_NAMESPACE, 'Qtree': None, 'Volume': POOL_NAME, } VOLUME = { 'name': LUN_NAME, 'size': SIZE, 'id': VOLUME_ID, 'host': HOST_STRING, 'attach_status': DETACHED, } NAMESPACE_VOLUME = { 'name': NAMESPACE_NAME, 'size': SIZE, 'id': VOLUME_ID, 'host': HOST_STRING, 'attach_status': DETACHED, } NFS_VOLUME = { 'name': NFS_FILE_PATH, 'size': SIZE, 'id': VOLUME_ID, 'host': NFS_HOST_STRING, 'provider_location': PROVIDER_LOCATION, } FAKE_MANAGE_VOLUME = { 'name': 'volume-new-managed-123', 'id': 'volume-new-managed-123', } FAKE_IMAGE_LOCATION = ( None, [ # valid metadata { 'metadata': { 'share_location': 'nfs://host/path', 'mountpoint': '/opt/stack/data/glance', 'id': 'abc-123', 'type': 'nfs' }, 'url': 'file:///opt/stack/data/glance/image-id-0' }, # missing metadata { 'metadata': {}, 'url': 'file:///opt/stack/data/glance/image-id-1' }, # missing location_type { 'metadata': {'location_type': None}, 'url': 'file:///opt/stack/data/glance/image-id-2' }, # non-nfs location_type { 'metadata': {'location_type': 'not-NFS'}, 'url': 'file:///opt/stack/data/glance/image-id-3' }, # missing share_location { 'metadata': {'location_type': 'nfs', 'share_location': None}, 'url': 'file:///opt/stack/data/glance/image-id-4'}, # missing mountpoint { 'metadata': { 'location_type': 'nfs', 'share_location': 'nfs://host/path', # Pre-kilo we documented "mount_point" 'mount_point': '/opt/stack/data/glance' }, 'url': 'file:///opt/stack/data/glance/image-id-5' }, # Valid metadata { 'metadata': { 'share_location': 'nfs://host/path', 'mountpoint': '/opt/stack/data/glance', 'id': 'abc-123', 'type': 'nfs', }, 'url': 'file:///opt/stack/data/glance/image-id-6' } ] ) NETAPP_VOLUME = 'fake_netapp_volume' VFILER = 'fake_netapp_vfiler' UUID1 = '12345678-1234-5678-1234-567812345678' LUN_PATH = '/vol/vol0/%s' % LUN_NAME VSERVER_NAME = 'openstack-vserver' FC_VOLUME = {'name': 'fake_volume'} FC_INITIATORS = ['21000024ff406cc3', '21000024ff406cc2'] FC_FORMATTED_INITIATORS = ['21:00:00:24:ff:40:6c:c3', '21:00:00:24:ff:40:6c:c2'] FC_TARGET_WWPNS = ['500a098280feeba5', '500a098290feeba5', '500a098190feeba5', '500a098180feeba5'] FC_FORMATTED_TARGET_WWPNS = ['50:0a:09:82:80:fe:eb:a5', '50:0a:09:82:90:fe:eb:a5', '50:0a:09:81:90:fe:eb:a5', '50:0a:09:81:80:fe:eb:a5'] FC_CONNECTOR = {'ip': '1.1.1.1', 'host': 'fake_host', 'wwnns': ['20000024ff406cc3', '20000024ff406cc2'], 'wwpns': ['21000024ff406cc3', '21000024ff406cc2']} FC_I_T_MAP = {'21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'], '21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5']} FC_I_T_MAP_COMPLETE = {'21000024ff406cc3': FC_TARGET_WWPNS, '21000024ff406cc2': FC_TARGET_WWPNS} FC_FABRIC_MAP = {'fabricB': {'target_port_wwn_list': ['500a098190feeba5', '500a098180feeba5'], 'initiator_port_wwn_list': ['21000024ff406cc2']}, 'fabricA': {'target_port_wwn_list': ['500a098290feeba5', '500a098280feeba5'], 'initiator_port_wwn_list': ['21000024ff406cc3']}} FC_TARGET_INFO = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': 1, 'initiator_target_map': FC_I_T_MAP, 'target_wwn': FC_TARGET_WWPNS, 'target_discovered': True}} FC_TARGET_INFO_EMPTY = {'driver_volume_type': 'fibre_channel', 'data': {}} FC_TARGET_INFO_UNMAP = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': FC_TARGET_WWPNS, 'initiator_target_map': FC_I_T_MAP}} ISCSI_ONE_MAP_LIST = [{'initiator-group': 'openstack-faketgt1', 'vserver': 'vserver_123', 'lun-id': '1'}] ISCSI_MULTI_MAP_LIST = [{'initiator-group': 'openstack-faketgt1', 'vserver': 'vserver_123', 'lun-id': '1'}, {'initiator-group': 'openstack-faketgt2', 'vserver': 'vserver_123', 'lun-id': '2'} ] ISCSI_EMPTY_MAP_LIST = [] IGROUP1_NAME = 'openstack-igroup1' IGROUP1 = { 'initiator-group-os-type': 'linux', 'initiator-group-type': 'fcp', 'initiator-group-name': IGROUP1_NAME, } CUSTOM_IGROUP = { 'initiator-group-os-type': 'linux', 'initiator-group-type': 'fcp', 'initiator-group-name': 'node1', } ISCSI_VOLUME = { 'name': 'fake_volume', 'id': 'fake_id', 'provider_auth': 'fake provider auth', 'provider_location': 'iscsi:/dummy_path' } ISCSI_LUN = {'name': ISCSI_VOLUME, 'lun_id': 42} ISCSI_SERVICE_IQN = 'fake_iscsi_service_iqn' ISCSI_CONNECTION_PROPERTIES = { 'data': { 'auth_method': 'fake_method', 'auth_password': 'auth', 'auth_username': 'provider', 'discard': True, 'discovery_auth_method': 'fake_method', 'discovery_auth_username': 'provider', 'discovery_auth_password': 'auth', 'target_discovered': False, 'target_iqn': ISCSI_SERVICE_IQN, 'target_lun': 42, 'target_portal': '1.2.3.4:3260', 'volume_id': 'fake_id', }, 'driver_volume_type': 'iscsi', } ISCSI_CONNECTOR = { 'ip': '1.1.1.1', 'host': 'fake_host', 'initiator': 'fake_initiator_iqn', } ISCSI_TARGET_DETAILS_LIST = [ {'address': '5.6.7.8', 'port': '3260'}, {'address': '1.2.3.4', 'port': '3260'}, {'address': '99.98.97.96', 'port': '3260'}, ] IPV4_ADDRESS = '192.168.14.2' NFS_SHARE_IPV4 = IPV4_ADDRESS + ':' + EXPORT_PATH RESERVED_PERCENTAGE = 7 MAX_OVER_SUBSCRIPTION_RATIO = 19.0 TOTAL_BYTES = 4797892092432 AVAILABLE_BYTES = 13479932478 CAPACITY_VALUES = (TOTAL_BYTES, AVAILABLE_BYTES) CAPACITIES = {'size-total': TOTAL_BYTES, 'size-available': AVAILABLE_BYTES} IGROUP1 = {'initiator-group-os-type': 'linux', 'initiator-group-type': 'fcp', 'initiator-group-name': IGROUP1_NAME} QOS_SPECS = {} EXTRA_SPECS = {'netapp:space_allocation': ' True'} MAX_THROUGHPUT = '21734278B/s' MIN_IOPS = '256iops' MAX_IOPS = '512iops' MAX_BPS = '1000000B/s' QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name' QOS_POLICY_GROUP_INFO_LEGACY = { 'legacy': 'legacy-' + QOS_POLICY_GROUP_NAME, 'spec': None, } QOS_POLICY_GROUP_SPEC = { 'min_throughput': MIN_IOPS, 'max_throughput': MAX_IOPS, 'policy_name': QOS_POLICY_GROUP_NAME, } QOS_POLICY_GROUP_SPEC_BPS = { 'max_throughput': MAX_BPS, 'policy_name': QOS_POLICY_GROUP_NAME, } QOS_POLICY_GROUP_SPEC_MAX = { 'max_throughput': MAX_THROUGHPUT, 'policy_name': QOS_POLICY_GROUP_NAME, } EXPECTED_IOPS_PER_GB = '128' PEAK_IOPS_PER_GB = '512' EXPECTED_IOPS_ALLOCATION = 'used-space' PEAK_IOPS_ALLOCATION = 'used-space' ABSOLUTE_MIN_IOPS = '75' BLOCK_SIZE = 'ANY' ADAPTIVE_QOS_SPEC = { 'policy_name': QOS_POLICY_GROUP_NAME, 'expected_iops': EXPECTED_IOPS_PER_GB, 'peak_iops': PEAK_IOPS_PER_GB, 'expected_iops_allocation': EXPECTED_IOPS_ALLOCATION, 'peak_iops_allocation': PEAK_IOPS_ALLOCATION, 'absolute_min_iops': ABSOLUTE_MIN_IOPS, 'block_size': BLOCK_SIZE, } QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC} QOS_POLICY_GROUP_INFO_MAX = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC_MAX} ADAPTIVE_QOS_POLICY_GROUP_INFO = { 'legacy': None, 'spec': ADAPTIVE_QOS_SPEC, } CLONE_SOURCE_NAME = 'fake_clone_source_name' CLONE_SOURCE_ID = 'fake_clone_source_id' CLONE_SOURCE_SIZE = 1024 CLONE_SOURCE = { 'size': CLONE_SOURCE_SIZE, 'name': CLONE_SOURCE_NAME, 'id': CLONE_SOURCE_ID, } CLONE_DESTINATION_NAME = 'fake_clone_destination_name' CLONE_DESTINATION_SIZE = 1041 CLONE_DESTINATION_ID = 'fake_clone_destination_id' CLONE_DESTINATION = { 'size': CLONE_DESTINATION_SIZE, 'name': CLONE_DESTINATION_NAME, 'id': CLONE_DESTINATION_ID, } VOLUME_NAME = 'volume-fake_volume_id' VOLUME_PATH = '/vol/%s/%s' % (NETAPP_VOLUME, VOLUME_NAME) MOUNT_PATH = '168.10.16.11:/' + VOLUME_ID SNAPSHOT_NAME = 'fake_snapshot_name' SNAPSHOT_LUN_HANDLE = 'fake_snapshot_lun_handle' SNAPSHOT_NAMESPACE_HANDLE = 'fake_snapshot_namespace_handle' SNAPSHOT_MOUNT = '/fake/mount/path' SNAPSHOT = { 'name': SNAPSHOT_NAME, 'volume_size': SIZE, 'volume_id': VOLUME_ID, 'volume_name': VOLUME_NAME, 'volume_type_id': 'fake_id', 'busy': False, 'id': 'fake_id' } SNAPSHOT_VOLUME = { 'id': VOLUME_ID, 'name': VOLUME_NAME } LUN_WITH_METADATA = { 'handle': 'vserver_fake:/vol/fake_flexvol/volume-fake-uuid', 'name': 'volume-fake-uuid', 'size': 20971520, 'metadata': { 'Vserver': 'vserver_fake', 'Volume': 'fake_flexvol', 'Qtree': None, 'Path': '/vol/fake_flexvol/volume-fake-uuid', 'OsType': 'linux', 'SpaceReserved': 'false', 'UUID': 'fake-uuid' } } NAMESPACE_WITH_METADATA = { 'handle': 'vserver_fake:/vol/fake_flexvol/volume-fake-uuid', 'name': 'volume-fake-uuid', 'size': 20971520, 'metadata': { 'Vserver': 'vserver_fake', 'Volume': 'fake_flexvol', 'Qtree': None, 'Path': '/vol/fake_flexvol/volume-fake-uuid', 'OsType': 'linux', 'SpaceReserved': 'false', 'UUID': 'fake-uuid' } } VOLUME_REF = {'name': 'fake_vref_name', 'size': 42} FAKE_CMODE_VOLUMES = ['open123', 'mixed', 'open321'] FAKE_CMODE_POOL_MAP = { 'open123': { 'pool_name': 'open123', }, 'mixed': { 'pool_name': 'mixed', }, 'open321': { 'pool_name': 'open321', }, } FAKE_CLUSTER_INFO = { 'name': 'jayaanancluster-1', '_links': { 'self': { 'href': '/api/cluster' } } } FAKE_CLUSTER_POOL_MAP = { 'jayaanancluster-1': {'pool_name': 'jayaanancluster-1'} } FILE_LIST = ['file1', 'file2', 'file3'] FAKE_LUN = netapp_api.NaElement.create_node_with_children( 'lun-info', **{'alignment': 'indeterminate', 'block-size': '512', 'comment': '', 'creation-timestamp': '1354536362', 'is-space-alloc-enabled': 'false', 'is-space-reservation-enabled': 'true', 'mapped': 'false', 'multiprotocol-type': 'linux', 'online': 'true', 'path': '/vol/fakeLUN/fakeLUN', 'prefix-size': '0', 'qtree': '', 'read-only': 'false', 'serial-number': '2FfGI$APyN68', 'share-state': 'none', 'size': '20971520', 'size-used': '0', 'staging': 'false', 'suffix-size': '0', 'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412', 'volume': 'fakeLUN', 'vserver': 'fake_vserver'}) FAKE_LUN_GET_ITER_RESULT = [ { 'Vserver': 'fake_vserver', 'Volume': 'fake_volume', 'Size': 123, 'Qtree': 'fake_qtree', 'Path': 'fake_path', 'OsType': 'fake_os', 'SpaceReserved': 'true', 'UUID': 'fake-uuid', }, ] CG_VOLUME_NAME = 'fake_cg_volume' CG_GROUP_NAME = 'fake_consistency_group' CG_POOL_NAME = 'cdot' SOURCE_CG_VOLUME_NAME = 'fake_source_cg_volume' CG_VOLUME_ID = 'fake_cg_volume_id' CG_VOLUME_SIZE = 100 SOURCE_CG_VOLUME_ID = 'fake_source_cg_volume_id' CONSISTENCY_GROUP_NAME = 'fake_cg' SOURCE_CONSISTENCY_GROUP_ID = 'fake_source_cg_id' CONSISTENCY_GROUP_ID = 'fake_cg_id' CG_SNAPSHOT_ID = 'fake_cg_snapshot_id' CG_SNAPSHOT_NAME = 'snapshot-' + CG_SNAPSHOT_ID CG_VOLUME_SNAPSHOT_ID = 'fake_cg_volume_snapshot_id' CG_LUN_METADATA = { 'OsType': None, 'Path': '/vol/aggr1/fake_cg_volume', 'SpaceReserved': 'true', 'Qtree': None, 'Volume': POOL_NAME, } SOURCE_CG_VOLUME = { 'name': SOURCE_CG_VOLUME_NAME, 'size': CG_VOLUME_SIZE, 'id': SOURCE_CG_VOLUME_ID, 'host': 'hostname@backend#cdot', 'consistencygroup_id': None, 'status': 'fake_status', } CG_VOLUME = { 'name': CG_VOLUME_NAME, 'size': 100, 'id': CG_VOLUME_ID, 'host': 'hostname@backend#' + CG_POOL_NAME, 'consistencygroup_id': CONSISTENCY_GROUP_ID, 'status': 'fake_status', } SOURCE_CONSISTENCY_GROUP = { 'id': SOURCE_CONSISTENCY_GROUP_ID, 'status': 'fake_status', } CONSISTENCY_GROUP = { 'id': CONSISTENCY_GROUP_ID, 'status': 'fake_status', 'name': CG_GROUP_NAME, } CG_CONTEXT = {} CG_SNAPSHOT = { 'id': CG_SNAPSHOT_ID, 'name': CG_SNAPSHOT_NAME, 'volume_size': CG_VOLUME_SIZE, 'consistencygroup_id': CONSISTENCY_GROUP_ID, 'status': 'fake_status', 'volume_id': 'fake_source_volume_id', } CG_VOLUME_SNAPSHOT = { 'name': CG_SNAPSHOT_NAME, 'volume_size': CG_VOLUME_SIZE, 'cgsnapshot_id': CG_SNAPSHOT_ID, 'id': CG_VOLUME_SNAPSHOT_ID, 'status': 'fake_status', 'volume_id': CG_VOLUME_ID, } AFF_SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML(""" AFFA400 aff-node1 true false AFFA400 aff-node2 true false 2 """) FAS_SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML(""" FAS2554 fas-node1 false false FAS2554 fas-node2 false false 2 """) HYBRID_SYSTEM_NODE_GET_ITER_RESPONSE = etree.XML(""" select-node false true FDvM300 c190-node true false AFF-C190 2 """) AFF_NODE = { 'model': 'AFFA400', 'is_all_flash': True, 'is_all_flash_select': False, } AFF_NODE_1 = AFF_NODE.copy() AFF_NODE_1['name'] = 'aff-node1' AFF_NODE_2 = AFF_NODE.copy() AFF_NODE_2['name'] = 'aff-node2' FAS_NODE = { 'model': 'FAS2554', 'is_all_flash': False, 'is_all_flash_select': False, } FAS_NODE_1 = FAS_NODE.copy() FAS_NODE_1['name'] = 'fas-node1' FAS_NODE_2 = FAS_NODE.copy() FAS_NODE_2['name'] = 'fas-node2' SELECT_NODE = { 'model': 'FDvM300', 'is_all_flash': False, 'is_all_flash_select': True, 'name': 'select-node', } C190_NODE = { 'model': 'AFF-C190', 'is_all_flash': True, 'is_all_flash_select': False, 'name': 'c190-node', } AFF_SYSTEM_NODES_INFO = [AFF_NODE_1, AFF_NODE_2] FAS_SYSTEM_NODES_INFO = [FAS_NODE_1, FAS_NODE_2] HYBRID_SYSTEM_NODES_INFO = [SELECT_NODE, C190_NODE] SYSTEM_GET_VERSION_RESPONSE = etree.XML(""" 1395426307 true NetApp Release 9.6P2: Fri Jul 19 06:06:59 UTC 2019 9 6 0 """) VG_VOLUME_NAME = 'fake_vg_volume' VG_GROUP_NAME = 'fake_volume_group' VG_POOL_NAME = 'cdot' SOURCE_VG_VOLUME_NAME = 'fake_source_vg_volume' VG_VOLUME_ID = 'fake_vg_volume_id' VG_VOLUME_SIZE = 100 SOURCE_VG_VOLUME_ID = 'fake_source_vg_volume_id' VOLUME_GROUP_NAME = 'fake_vg' SOURCE_VOLUME_GROUP_ID = 'fake_source_vg_id' VOLUME_GROUP_ID = 'fake_vg_id' VG_SNAPSHOT_ID = 'fake_vg_snapshot_id' VG_SNAPSHOT_NAME = 'snapshot-' + VG_SNAPSHOT_ID VG_VOLUME_SNAPSHOT_ID = 'fake_vg_volume_snapshot_id' MIN_SIZE_FOR_A_LUN = '4194304' MAX_SIZE_FOR_A_LUN = '17555678822400' VG_LUN_METADATA = { 'OsType': None, 'Path': '/vol/aggr1/fake_vg_volume', 'SpaceReserved': 'true', 'Qtree': None, 'Volume': POOL_NAME, } SOURCE_VG_VOLUME = { 'name': SOURCE_VG_VOLUME_NAME, 'size': VG_VOLUME_SIZE, 'id': SOURCE_VG_VOLUME_ID, 'host': 'hostname@backend#cdot', 'volumegroup_id': None, 'status': 'fake_status', 'provider_location': PROVIDER_LOCATION, } VG_VOLUME = { 'name': VG_VOLUME_NAME, 'size': 100, 'id': VG_VOLUME_ID, 'host': 'hostname@backend#' + VG_POOL_NAME, 'volumegroup_id': VOLUME_GROUP_ID, 'status': 'fake_status', 'provider_location': PROVIDER_LOCATION, } SOURCE_VOLUME_GROUP = { 'id': SOURCE_VOLUME_GROUP_ID, 'status': 'fake_status', } VOLUME_GROUP = { 'id': VOLUME_GROUP_ID, 'status': 'fake_status', 'name': VG_GROUP_NAME, 'host': 'fake_host', } VG_CONTEXT = {} VG_SNAPSHOT = { 'id': VG_SNAPSHOT_ID, 'name': VG_SNAPSHOT_NAME, 'volume_size': VG_VOLUME_SIZE, 'volumegroup_id': VOLUME_GROUP_ID, 'status': 'fake_status', 'volume_id': 'fake_source_volume_id', 'volume': VG_VOLUME, } VG_VOLUME_SNAPSHOT = { 'name': VG_SNAPSHOT_NAME, 'volume_size': VG_VOLUME_SIZE, 'vgsnapshot_id': VG_SNAPSHOT_ID, 'id': VG_VOLUME_SNAPSHOT_ID, 'status': 'fake_status', 'volume_id': VG_VOLUME_ID, } class test_volume(object): def __getitem__(self, key): return getattr(self, key) test_volume = test_volume() test_volume.id = {'vserver': 'openstack', 'name': 'vola'} test_volume.aggr = { 'disk_type': 'SSD', 'ha_policy': 'cfo', 'junction': '/vola', 'name': 'aggr1', 'raid_type': 'raiddp', } test_volume.export = {'path': NFS_SHARE} test_volume.sis = {'dedup': False, 'compression': False} test_volume.state = { 'status': 'online', 'vserver_root': False, 'junction_active': True, } test_volume.qos = {'qos_policy_group': None} test_volume.host = 'fakehost@backbackend#fakepool' test_volume.name = 'fakename' test_volume.size = SIZE test_volume.multiattach = False class test_namespace_volume(object): def __getitem__(self, key): return getattr(self, key) test_namespace_volume = test_namespace_volume() test_namespace_volume.name = NAMESPACE_NAME test_namespace_volume.size = SIZE test_namespace_volume.id = VOLUME_ID test_namespace_volume.host = HOST_STRING test_namespace_volume.attach_status = DETACHED class test_snapshot(object): pass def __getitem__(self, key): return getattr(self, key) test_snapshot = test_snapshot() test_snapshot.id = fake_constants.SNAPSHOT_ID test_snapshot.name = 'snapshot-%s' % test_snapshot.id test_snapshot.volume_id = fake_constants.VOLUME_ID test_snapshot.provider_location = PROVIDER_LOCATION class test_iscsi_attachment(object): def __getattr__(self, key): return getattr(self, key) test_iscsi_attachment = test_iscsi_attachment() test_iscsi_attachment.connector = ISCSI_CONNECTOR def get_fake_net_interface_get_iter_response(): return etree.XML(""" 1
FAKE_IP
""") def get_fake_ifs(): return [{'vserver': VSERVER_NAME}] AFF_SYSTEM_NODE_GET_ITER_RESPONSE_REST = { "records": [ { "uuid": "9eff6c76-fc13-11ea-8799-525400", "name": "aff-node1", "model": "AFFA400", "is_all_flash_optimized": True, "is_all_flash_select_optimized": False, "_links": { "self": { "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" } } }, { "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", "name": "aff-node2", "model": "AFFA400", "is_all_flash_optimized": True, "is_all_flash_select_optimized": False, "_links": { "self": { "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" } } } ], "num_records": 2, "_links": { "self": { "href": "/api/cluster/nodes?fields=model,name," "is_all_flash_optimized,is_all_flash_select_optimized" } } } FAS_SYSTEM_NODE_GET_ITER_RESPONSE_REST = { "records": [ { "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", "name": "fas-node1", "model": "FAS2554", "is_all_flash_optimized": False, "is_all_flash_select_optimized": False, "_links": { "self": { "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" } } }, { "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", "name": "fas-node2", "model": "FAS2554", "is_all_flash_optimized": False, "is_all_flash_select_optimized": False, "_links": { "self": { "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" } } } ], "num_records": 2, "_links": { "self": { "href": "/api/cluster/nodes?fields=model,name," "is_all_flash_optimized,is_all_flash_select_optimized" } } } HYBRID_SYSTEM_NODE_GET_ITER_RESPONSE_REST = { "records": [ { "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", "name": "select-node", "model": "FDvM300", "is_all_flash_optimized": False, "is_all_flash_select_optimized": True, "_links": { "self": { "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" } } }, { "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", "name": "c190-node", "model": "AFF-C190", "is_all_flash_optimized": True, "is_all_flash_select_optimized": False, "_links": { "self": { "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400" } } } ], "num_records": 2, "_links": { "self": { "href": "/api/cluster/nodes?fields=model,name," "is_all_flash_optimized,is_all_flash_select_optimized" } } } QOS_POLICY_BY_NAME_RESPONSE_REST = { "records": [ { "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9", "name": "openstack-cd-uuid", "_links": { "self": { "href": "/api/storage/qos/policies/" "9eff6c76-fc13-11ea-8799-52540006bba9" } } } ], "num_records": 1, "_links": { "self": { "href": "/api/storage/qos/policies?fields=name" } } } QOS_SPECS_REST = {} MAX_THROUGHPUT_REST = '21734278' MIN_IOPS_REST = '256' MAX_IOPS_REST = '512' MAX_BPS_REST = '1' QOS_POLICY_GROUP_INFO_LEGACY_REST = { 'legacy': 'legacy-' + QOS_POLICY_GROUP_NAME, 'spec': None, } QOS_POLICY_GROUP_SPEC_REST = { 'min_throughput': MIN_IOPS_REST, 'max_throughput': MAX_IOPS_REST, 'policy_name': QOS_POLICY_GROUP_NAME, } QOS_POLICY_GROUP_API_ARGS_REST = { 'name': QOS_POLICY_GROUP_NAME, 'svm': { 'name': VSERVER_NAME }, 'fixed': { 'max_throughput_iops': int(MAX_IOPS_REST), 'min_throughput_iops': int(MIN_IOPS_REST) } } QOS_POLICY_GROUP_API_ARGS_REST_BPS = { 'name': QOS_POLICY_GROUP_NAME, 'svm': { 'name': VSERVER_NAME }, 'fixed': { 'max_throughput_mbps': int(MAX_BPS_REST), } } QOS_POLICY_GROUP_SPEC_MAX_REST = { 'max_throughput': MAX_THROUGHPUT_REST, 'policy_name': QOS_POLICY_GROUP_NAME, } EXPECTED_IOPS_PER_GB_REST = '128' PEAK_IOPS_PER_GB_REST = '512' PEAK_IOPS_ALLOCATION_REST = 'used-space' EXPECTED_IOPS_ALLOCATION_REST = 'used-space' ABSOLUTE_MIN_IOPS_REST = '75' BLOCK_SIZE_REST = 'ANY' ADAPTIVE_QOS_SPEC_REST = { 'policy_name': QOS_POLICY_GROUP_NAME, 'expected_iops': EXPECTED_IOPS_PER_GB_REST, 'expected_iops_allocation': EXPECTED_IOPS_ALLOCATION_REST, 'peak_iops': PEAK_IOPS_PER_GB_REST, 'peak_iops_allocation': PEAK_IOPS_ALLOCATION_REST, 'absolute_min_iops': ABSOLUTE_MIN_IOPS_REST, 'block_size': BLOCK_SIZE_REST, } ADAPTIVE_QOS_API_ARGS_REST = { 'name': QOS_POLICY_GROUP_NAME, 'svm': { 'name': VSERVER_NAME }, 'adaptive': { 'absolute_min_iops': int(ABSOLUTE_MIN_IOPS_REST), 'expected_iops': int(EXPECTED_IOPS_PER_GB_REST), 'expected_iops_allocation': EXPECTED_IOPS_ALLOCATION_REST, 'peak_iops': int(PEAK_IOPS_PER_GB_REST), 'peak_iops_allocation': PEAK_IOPS_ALLOCATION_REST, 'block_size': BLOCK_SIZE_REST, } } QOS_POLICY_GROUP_INFO_REST = { 'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC_REST} QOS_POLICY_GROUP_INFO_MAX_REST = { 'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC_MAX_REST} ADAPTIVE_QOS_POLICY_GROUP_INFO_REST = { 'legacy': None, 'spec': ADAPTIVE_QOS_SPEC_REST, } REST_FIELDS = 'uuid,name,style' SUBSYSTEM = 'openstack-fake-subsystem' MAPPED_SUBSYSTEM = 'openstack-fake-mapped_subsystem' HOST_NQN = 'nqn.1992-01.example.com:string' TARGET_NQN = 'nqn.1992-01.example.com:target' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2951202 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/0000775000175000017500000000000000000000000027204 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/__init__.py0000664000175000017500000000000000000000000031303 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/fakes.py0000664000175000017500000005331300000000000030654 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. NODE = 'cluster1-01' COUNTERS_T1 = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'avg_processor_busy': '29078861388', 'instance-name': 'system', 'timestamp': '1453573776', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time': '1063283283681', 'instance-name': 'system', 'timestamp': '1453573776', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time1': '1063283283681', 'instance-name': 'system', 'timestamp': '1453573776', }, { 'cp_phase_times:p2a_snap': '714', 'cp_phase_times:p4_finish': '14897', 'cp_phase_times:setup': '581', 'cp_phase_times:p2a_dlog1': '6019', 'cp_phase_times:p2a_dlog2': '2328', 'cp_phase_times:p2v_cont': '2479', 'cp_phase_times:p2v_volinfo': '1138', 'cp_phase_times:p2v_bm': '3484', 'cp_phase_times:p2v_fsinfo': '2031', 'cp_phase_times:p2a_inofile': '356', 'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,' '427,1058,354,3484,5135,1460,1138,2479,356,1373' ',6019,9,2328,2257,229,493,1275,0,6059,714,530215,' '21603833,0,0,3286,11075940,22001,14897,36', 'cp_phase_times:p2v_dlog2': '377', 'instance-name': 'wafl', 'cp_phase_times:p3_wait': '0', 'cp_phase_times:p2a_bm': '6059', 'cp_phase_times:p1_quota': '498', 'cp_phase_times:p2v_inofile': '839', 'cp_phase_times:p2a_refcount': '493', 'cp_phase_times:p2a_fsinfo': '2257', 'cp_phase_times:p2a_hyabc': '0', 'cp_phase_times:p2a_volinfo': '530215', 'cp_phase_times:pre_p0': '5007', 'cp_phase_times:p2a_hya': '9', 'cp_phase_times:p0_snap_del': '1840', 'cp_phase_times:p2a_ino': '1373', 'cp_phase_times:p2v_df_scores_sub': '354', 'cp_phase_times:p2v_ino_pub': '799', 'cp_phase_times:p2a_ipu_bitmap_grow': '229', 'cp_phase_times:p2v_refcount': '427', 'timestamp': '1453573776', 'cp_phase_times:p2v_dlog1': '0', 'cp_phase_times:p2_finish': '0', 'cp_phase_times:p1_clean': '9832', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'cp_phase_times:p3a_volinfo': '11075940', 'cp_phase_times:p2a_topaa': '1275', 'cp_phase_times:p2_flush': '21603833', 'cp_phase_times:p2v_df_scores': '1460', 'cp_phase_times:ipu_disk_add': '0', 'cp_phase_times:p2v_snap': '5135', 'cp_phase_times:p5_finish': '36', 'cp_phase_times:p2v_ino_pri': '1336', 'cp_phase_times:p3v_volinfo': '3286', 'cp_phase_times:p2v_topaa': '1058', 'cp_phase_times:p3_finish': '22001', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '33309624', 'instance-name': 'wafl', 'timestamp': '1453573776', }, { 'domain_busy:kahuna': '2712467226', 'timestamp': '1453573777', 'domain_busy:cifs': '434036', 'domain_busy:raid_exempt': '28', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy:target': '6460782', 'domain_busy:nwk_exempt': '20', 'domain_busy:raid': '722094140', 'domain_busy:storage': '2253156562', 'instance-name': 'processor0', 'domain_busy:cluster': '34', 'domain_busy:wafl_xcleaner': '51275254', 'domain_busy:wafl_exempt': '1243553699', 'domain_busy:protocol': '54', 'domain_busy': '1028851855595,2712467226,2253156562,5688808118,' '722094140,28,6460782,59,434036,1243553699,51275254,' '61237441,34,54,11,20,5254181873,13656398235,452215', 'domain_busy:nwk_legacy': '5254181873', 'domain_busy:dnscache': '59', 'domain_busy:exempt': '5688808118', 'domain_busy:hostos': '13656398235', 'domain_busy:sm_exempt': '61237441', 'domain_busy:nwk_exclusive': '11', 'domain_busy:idle': '1028851855595', 'domain_busy:ssan_exempt': '452215', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1063283843318', 'instance-name': 'processor0', 'timestamp': '1453573777', }, { 'domain_busy:kahuna': '1978024846', 'timestamp': '1453573777', 'domain_busy:cifs': '318584', 'domain_busy:raid_exempt': '0', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy:target': '3330956', 'domain_busy:nwk_exempt': '0', 'domain_busy:raid': '722235930', 'domain_busy:storage': '1498890708', 'instance-name': 'processor1', 'domain_busy:cluster': '0', 'domain_busy:wafl_xcleaner': '50122685', 'domain_busy:wafl_exempt': '1265921369', 'domain_busy:protocol': '0', 'domain_busy': '1039557880852,1978024846,1498890708,3734060289,' '722235930,0,3330956,0,318584,1265921369,50122685,' '36417362,0,0,0,0,2815252976,10274810484,393451', 'domain_busy:nwk_legacy': '2815252976', 'domain_busy:dnscache': '0', 'domain_busy:exempt': '3734060289', 'domain_busy:hostos': '10274810484', 'domain_busy:sm_exempt': '36417362', 'domain_busy:nwk_exclusive': '0', 'domain_busy:idle': '1039557880852', 'domain_busy:ssan_exempt': '393451', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1063283843321', 'instance-name': 'processor1', 'timestamp': '1453573777', } ] COUNTERS_T2 = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'avg_processor_busy': '29081228905', 'instance-name': 'system', 'timestamp': '1453573834', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time': '1063340792148', 'instance-name': 'system', 'timestamp': '1453573834', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time1': '1063340792148', 'instance-name': 'system', 'timestamp': '1453573834', }, { 'cp_phase_times:p2a_snap': '714', 'cp_phase_times:p4_finish': '14897', 'cp_phase_times:setup': '581', 'cp_phase_times:p2a_dlog1': '6019', 'cp_phase_times:p2a_dlog2': '2328', 'cp_phase_times:p2v_cont': '2479', 'cp_phase_times:p2v_volinfo': '1138', 'cp_phase_times:p2v_bm': '3484', 'cp_phase_times:p2v_fsinfo': '2031', 'cp_phase_times:p2a_inofile': '356', 'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,' '427,1058,354,3484,5135,1460,1138,2479,356,1373,' '6019,9,2328,2257,229,493,1275,0,6059,714,530215,' '21604863,0,0,3286,11076392,22001,14897,36', 'cp_phase_times:p2v_dlog2': '377', 'instance-name': 'wafl', 'cp_phase_times:p3_wait': '0', 'cp_phase_times:p2a_bm': '6059', 'cp_phase_times:p1_quota': '498', 'cp_phase_times:p2v_inofile': '839', 'cp_phase_times:p2a_refcount': '493', 'cp_phase_times:p2a_fsinfo': '2257', 'cp_phase_times:p2a_hyabc': '0', 'cp_phase_times:p2a_volinfo': '530215', 'cp_phase_times:pre_p0': '5007', 'cp_phase_times:p2a_hya': '9', 'cp_phase_times:p0_snap_del': '1840', 'cp_phase_times:p2a_ino': '1373', 'cp_phase_times:p2v_df_scores_sub': '354', 'cp_phase_times:p2v_ino_pub': '799', 'cp_phase_times:p2a_ipu_bitmap_grow': '229', 'cp_phase_times:p2v_refcount': '427', 'timestamp': '1453573834', 'cp_phase_times:p2v_dlog1': '0', 'cp_phase_times:p2_finish': '0', 'cp_phase_times:p1_clean': '9832', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'cp_phase_times:p3a_volinfo': '11076392', 'cp_phase_times:p2a_topaa': '1275', 'cp_phase_times:p2_flush': '21604863', 'cp_phase_times:p2v_df_scores': '1460', 'cp_phase_times:ipu_disk_add': '0', 'cp_phase_times:p2v_snap': '5135', 'cp_phase_times:p5_finish': '36', 'cp_phase_times:p2v_ino_pri': '1336', 'cp_phase_times:p3v_volinfo': '3286', 'cp_phase_times:p2v_topaa': '1058', 'cp_phase_times:p3_finish': '22001', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '33311106', 'instance-name': 'wafl', 'timestamp': '1453573834', }, { 'domain_busy:kahuna': '2712629374', 'timestamp': '1453573834', 'domain_busy:cifs': '434036', 'domain_busy:raid_exempt': '28', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy:target': '6461082', 'domain_busy:nwk_exempt': '20', 'domain_busy:raid': '722136824', 'domain_busy:storage': '2253260824', 'instance-name': 'processor0', 'domain_busy:cluster': '34', 'domain_busy:wafl_xcleaner': '51277506', 'domain_busy:wafl_exempt': '1243637154', 'domain_busy:protocol': '54', 'domain_busy': '1028906640232,2712629374,2253260824,5689093500,' '722136824,28,6461082,59,434036,1243637154,51277506,' '61240335,34,54,11,20,5254491236,13657992139,452215', 'domain_busy:nwk_legacy': '5254491236', 'domain_busy:dnscache': '59', 'domain_busy:exempt': '5689093500', 'domain_busy:hostos': '13657992139', 'domain_busy:sm_exempt': '61240335', 'domain_busy:nwk_exclusive': '11', 'domain_busy:idle': '1028906640232', 'domain_busy:ssan_exempt': '452215', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1063341351916', 'instance-name': 'processor0', 'timestamp': '1453573834', }, { 'domain_busy:kahuna': '1978217049', 'timestamp': '1453573834', 'domain_busy:cifs': '318584', 'domain_busy:raid_exempt': '0', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy:target': '3331147', 'domain_busy:nwk_exempt': '0', 'domain_busy:raid': '722276805', 'domain_busy:storage': '1498984059', 'instance-name': 'processor1', 'domain_busy:cluster': '0', 'domain_busy:wafl_xcleaner': '50126176', 'domain_busy:wafl_exempt': '1266039846', 'domain_busy:protocol': '0', 'domain_busy': '1039613222253,1978217049,1498984059,3734279672,' '722276805,0,3331147,0,318584,1266039846,50126176,' '36419297,0,0,0,0,2815435865,10276068104,393451', 'domain_busy:nwk_legacy': '2815435865', 'domain_busy:dnscache': '0', 'domain_busy:exempt': '3734279672', 'domain_busy:hostos': '10276068104', 'domain_busy:sm_exempt': '36419297', 'domain_busy:nwk_exclusive': '0', 'domain_busy:idle': '1039613222253', 'domain_busy:ssan_exempt': '393451', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1063341351919', 'instance-name': 'processor1', 'timestamp': '1453573834', }, ] SYSTEM_INSTANCE_UUIDS = ['cluster1-01:kernel:system'] SYSTEM_INSTANCE_NAMES = ['system'] SYSTEM_COUNTERS = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'avg_processor_busy': '27877641199', 'instance-name': 'system', 'timestamp': '1453524928', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time': '1014438541279', 'instance-name': 'system', 'timestamp': '1453524928', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:system', 'cpu_elapsed_time1': '1014438541279', 'instance-name': 'system', 'timestamp': '1453524928', }, ] WAFL_INSTANCE_UUIDS = ['cluster1-01:kernel:wafl'] WAFL_INSTANCE_NAMES = ['wafl'] WAFL_COUNTERS = [ { 'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,' '418,1048,344,3344,4867,1397,1101,2380,356,1318,' '5954,9,2236,2190,228,476,1221,0,5838,696,515588,' '20542954,0,0,3122,10567367,20696,13982,36', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'instance-name': 'wafl', 'timestamp': '1453523339', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '31721222', 'instance-name': 'wafl', 'timestamp': '1453523339', }, ] WAFL_CP_PHASE_TIMES_COUNTER_INFO = { 'labels': [ 'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD', 'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1', 'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM', 'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE', 'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO', 'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM', 'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT', 'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH', ], 'name': 'cp_phase_times', } EXPANDED_WAFL_COUNTERS = [ { 'cp_phase_times:p2a_snap': '696', 'cp_phase_times:p4_finish': '13982', 'cp_phase_times:setup': '563', 'cp_phase_times:p2a_dlog1': '5954', 'cp_phase_times:p2a_dlog2': '2236', 'cp_phase_times:p2v_cont': '2380', 'cp_phase_times:p2v_volinfo': '1101', 'cp_phase_times:p2v_bm': '3344', 'cp_phase_times:p2v_fsinfo': '1937', 'cp_phase_times:p2a_inofile': '356', 'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,' '418,1048,344,3344,4867,1397,1101,2380,356,1318,' '5954,9,2236,2190,228,476,1221,0,5838,696,515588,' '20542954,0,0,3122,10567367,20696,13982,36', 'cp_phase_times:p2v_dlog2': '359', 'instance-name': 'wafl', 'cp_phase_times:p3_wait': '0', 'cp_phase_times:p2a_bm': '5838', 'cp_phase_times:p1_quota': '469', 'cp_phase_times:p2v_inofile': '821', 'cp_phase_times:p2a_refcount': '476', 'cp_phase_times:p2a_fsinfo': '2190', 'cp_phase_times:p2a_hyabc': '0', 'cp_phase_times:p2a_volinfo': '515588', 'cp_phase_times:pre_p0': '4844', 'cp_phase_times:p2a_hya': '9', 'cp_phase_times:p0_snap_del': '1731', 'cp_phase_times:p2a_ino': '1318', 'cp_phase_times:p2v_df_scores_sub': '344', 'cp_phase_times:p2v_ino_pub': '763', 'cp_phase_times:p2a_ipu_bitmap_grow': '228', 'cp_phase_times:p2v_refcount': '418', 'timestamp': '1453523339', 'cp_phase_times:p2v_dlog1': '0', 'cp_phase_times:p2_finish': '0', 'cp_phase_times:p1_clean': '9676', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'cp_phase_times:p3a_volinfo': '10567367', 'cp_phase_times:p2a_topaa': '1221', 'cp_phase_times:p2_flush': '20542954', 'cp_phase_times:p2v_df_scores': '1397', 'cp_phase_times:ipu_disk_add': '0', 'cp_phase_times:p2v_snap': '4867', 'cp_phase_times:p5_finish': '36', 'cp_phase_times:p2v_ino_pri': '1282', 'cp_phase_times:p3v_volinfo': '3122', 'cp_phase_times:p2v_topaa': '1048', 'cp_phase_times:p3_finish': '20696', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:wafl', 'total_cp_msecs': '31721222', 'instance-name': 'wafl', 'timestamp': '1453523339', }, ] PROCESSOR_INSTANCE_UUIDS = [ 'cluster1-01:kernel:processor0', 'cluster1-01:kernel:processor1', ] PROCESSOR_INSTANCE_NAMES = ['processor0', 'processor1'] PROCESSOR_COUNTERS = [ { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '980648687811,2597164534,2155400686,5443901498,' '690280568,28,6180773,59,413895,1190100947,48989575,' '58549809,34,54,11,20,5024141791,13136260754,452215', 'instance-name': 'processor0', 'timestamp': '1453524150', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1013660714257', 'instance-name': 'processor0', 'timestamp': '1453524150', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy': '990957980543,1891766637,1433411516,3572427934,' '691372324,0,3188648,0,305947,1211235777,47954620,' '34832715,0,0,0,0,2692084482,9834648927,393451', 'instance-name': 'processor1', 'timestamp': '1453524150', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1013660714261', 'instance-name': 'processor1', 'timestamp': '1453524150', }, ] PROCESSOR_DOMAIN_BUSY_COUNTER_INFO = { 'labels': [ 'idle', 'kahuna', 'storage', 'exempt', 'raid', 'raid_exempt', 'target', 'dnscache', 'cifs', 'wafl_exempt', 'wafl_xcleaner', 'sm_exempt', 'cluster', 'protocol', 'nwk_exclusive', 'nwk_exempt', 'nwk_legacy', 'hostOS', 'ssan_exempt', ], 'name': 'domain_busy', } EXPANDED_PROCESSOR_COUNTERS = [ { 'domain_busy:kahuna': '2597164534', 'timestamp': '1453524150', 'domain_busy:cifs': '413895', 'domain_busy:raid_exempt': '28', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy:target': '6180773', 'domain_busy:nwk_exempt': '20', 'domain_busy:raid': '690280568', 'domain_busy:storage': '2155400686', 'instance-name': 'processor0', 'domain_busy:cluster': '34', 'domain_busy:wafl_xcleaner': '48989575', 'domain_busy:wafl_exempt': '1190100947', 'domain_busy:protocol': '54', 'domain_busy': '980648687811,2597164534,2155400686,5443901498,' '690280568,28,6180773,59,413895,1190100947,48989575,' '58549809,34,54,11,20,5024141791,13136260754,452215', 'domain_busy:nwk_legacy': '5024141791', 'domain_busy:dnscache': '59', 'domain_busy:exempt': '5443901498', 'domain_busy:hostos': '13136260754', 'domain_busy:sm_exempt': '58549809', 'domain_busy:nwk_exclusive': '11', 'domain_busy:idle': '980648687811', 'domain_busy:ssan_exempt': '452215', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'processor_elapsed_time': '1013660714257', 'instance-name': 'processor0', 'timestamp': '1453524150', }, { 'domain_busy:kahuna': '1891766637', 'timestamp': '1453524150', 'domain_busy:cifs': '305947', 'domain_busy:raid_exempt': '0', 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'domain_busy:target': '3188648', 'domain_busy:nwk_exempt': '0', 'domain_busy:raid': '691372324', 'domain_busy:storage': '1433411516', 'instance-name': 'processor1', 'domain_busy:cluster': '0', 'domain_busy:wafl_xcleaner': '47954620', 'domain_busy:wafl_exempt': '1211235777', 'domain_busy:protocol': '0', 'domain_busy': '990957980543,1891766637,1433411516,3572427934,' '691372324,0,3188648,0,305947,1211235777,47954620,' '34832715,0,0,0,0,2692084482,9834648927,393451', 'domain_busy:nwk_legacy': '2692084482', 'domain_busy:dnscache': '0', 'domain_busy:exempt': '3572427934', 'domain_busy:hostos': '9834648927', 'domain_busy:sm_exempt': '34832715', 'domain_busy:nwk_exclusive': '0', 'domain_busy:idle': '990957980543', 'domain_busy:ssan_exempt': '393451', }, { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor1', 'processor_elapsed_time': '1013660714261', 'instance-name': 'processor1', 'timestamp': '1453524150', }, ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_base.py0000664000175000017500000003542400000000000032553 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \ import fakes as fake from cinder.volume.drivers.netapp.dataontap.performance import perf_base @ddt.ddt class PerformanceLibraryTestCase(test.TestCase): def setUp(self): super(PerformanceLibraryTestCase, self).setUp() with mock.patch.object(perf_base.PerformanceLibrary, '_init_counter_info'): self.zapi_client = mock.Mock() self.perf_library = perf_base.PerformanceLibrary(self.zapi_client) self.perf_library.system_object_name = 'system' self.perf_library.avg_processor_busy_base_counter_name = ( 'cpu_elapsed_time1') def test_init(self): mock_zapi_client = mock.Mock() mock_init_counter_info = self.mock_object( perf_base.PerformanceLibrary, '_init_counter_info') library = perf_base.PerformanceLibrary(mock_zapi_client) self.assertEqual(mock_zapi_client, library.zapi_client) mock_init_counter_info.assert_called_once_with() def test_init_counter_info(self): self.perf_library._init_counter_info() self.assertIsNone(self.perf_library.system_object_name) self.assertIsNone( self.perf_library.avg_processor_busy_base_counter_name) def test_get_node_utilization_kahuna_overutilized(self): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', return_value=61.0) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', return_value=25.0) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertAlmostEqual(100.0, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') self.assertFalse(mock_get_average_cpu_utilization.called) @ddt.data({'cpu': -0.01, 'cp_time': 10000, 'poll_time': 0}, {'cpu': 1.01, 'cp_time': 0, 'poll_time': 1000}, {'cpu': 0.50, 'cp_time': 0, 'poll_time': 0}) @ddt.unpack def test_get_node_utilization_zero_time(self, cpu, cp_time, poll_time): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', return_value=59.0) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', return_value=cpu) mock_get_total_consistency_point_time = self.mock_object( self.perf_library, '_get_total_consistency_point_time', return_value=cp_time) mock_get_consistency_point_p2_flush_time = self.mock_object( self.perf_library, '_get_consistency_point_p2_flush_time', return_value=cp_time) mock_get_total_time = self.mock_object( self.perf_library, '_get_total_time', return_value=poll_time) mock_get_adjusted_consistency_point_time = self.mock_object( self.perf_library, '_get_adjusted_consistency_point_time') result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') expected = max(min(100.0, 100.0 * cpu), 0) self.assertEqual(expected, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') mock_get_average_cpu_utilization.assert_called_once_with('fake1', 'fake2') mock_get_total_consistency_point_time.assert_called_once_with('fake1', 'fake2') mock_get_consistency_point_p2_flush_time.assert_called_once_with( 'fake1', 'fake2') mock_get_total_time.assert_called_once_with('fake1', 'fake2', 'total_cp_msecs') self.assertFalse(mock_get_adjusted_consistency_point_time.called) @ddt.data({'cpu': 0.75, 'adjusted_cp_time': 8000, 'expected': 80}, {'cpu': 0.80, 'adjusted_cp_time': 7500, 'expected': 80}, {'cpu': 0.50, 'adjusted_cp_time': 11000, 'expected': 100}) @ddt.unpack def test_get_node_utilization(self, cpu, adjusted_cp_time, expected): mock_get_kahuna_utilization = self.mock_object( self.perf_library, '_get_kahuna_utilization', return_value=59.0) mock_get_average_cpu_utilization = self.mock_object( self.perf_library, '_get_average_cpu_utilization', return_value=cpu) mock_get_total_consistency_point_time = self.mock_object( self.perf_library, '_get_total_consistency_point_time', return_value=90.0) mock_get_consistency_point_p2_flush_time = self.mock_object( self.perf_library, '_get_consistency_point_p2_flush_time', return_value=50.0) mock_get_total_time = self.mock_object( self.perf_library, '_get_total_time', return_value=10000) mock_get_adjusted_consistency_point_time = self.mock_object( self.perf_library, '_get_adjusted_consistency_point_time', return_value=adjusted_cp_time) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertEqual(expected, result) mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2') mock_get_average_cpu_utilization.assert_called_once_with('fake1', 'fake2') mock_get_total_consistency_point_time.assert_called_once_with('fake1', 'fake2') mock_get_consistency_point_p2_flush_time.assert_called_once_with( 'fake1', 'fake2') mock_get_total_time.assert_called_once_with('fake1', 'fake2', 'total_cp_msecs') mock_get_adjusted_consistency_point_time.assert_called_once_with( 90.0, 50.0) def test_get_node_utilization_calculation_error(self): self.mock_object(self.perf_library, '_get_kahuna_utilization', return_value=59.0) self.mock_object(self.perf_library, '_get_average_cpu_utilization', return_value=25.0) self.mock_object(self.perf_library, '_get_total_consistency_point_time', return_value=90.0) self.mock_object(self.perf_library, '_get_consistency_point_p2_flush_time', return_value=50.0) self.mock_object(self.perf_library, '_get_total_time', return_value=10000) self.mock_object(self.perf_library, '_get_adjusted_consistency_point_time', side_effect=ZeroDivisionError) result = self.perf_library._get_node_utilization('fake1', 'fake2', 'fake_node') self.assertEqual(perf_base.DEFAULT_UTILIZATION, result) def test_get_kahuna_utilization(self): mock_get_performance_counter = self.mock_object( self.perf_library, '_get_performance_counter_average_multi_instance', return_value=[0.2, 0.3]) result = self.perf_library._get_kahuna_utilization('fake_t1', 'fake_t2') self.assertAlmostEqual(50.0, result) mock_get_performance_counter.assert_called_once_with( 'fake_t1', 'fake_t2', 'domain_busy:kahuna', 'processor_elapsed_time') def test_get_average_cpu_utilization(self): mock_get_performance_counter_average = self.mock_object( self.perf_library, '_get_performance_counter_average', return_value=0.45) result = self.perf_library._get_average_cpu_utilization('fake_t1', 'fake_t2') self.assertAlmostEqual(0.45, result) mock_get_performance_counter_average.assert_called_once_with( 'fake_t1', 'fake_t2', 'avg_processor_busy', 'cpu_elapsed_time1') def test_get_total_consistency_point_time(self): mock_get_performance_counter_delta = self.mock_object( self.perf_library, '_get_performance_counter_delta', return_value=500) result = self.perf_library._get_total_consistency_point_time( 'fake_t1', 'fake_t2') self.assertEqual(500, result) mock_get_performance_counter_delta.assert_called_once_with( 'fake_t1', 'fake_t2', 'total_cp_msecs') def test_get_consistency_point_p2_flush_time(self): mock_get_performance_counter_delta = self.mock_object( self.perf_library, '_get_performance_counter_delta', return_value=500) result = self.perf_library._get_consistency_point_p2_flush_time( 'fake_t1', 'fake_t2') self.assertEqual(500, result) mock_get_performance_counter_delta.assert_called_once_with( 'fake_t1', 'fake_t2', 'cp_phase_times:p2_flush') def test_get_total_time(self): mock_find_performance_counter_timestamp = self.mock_object( self.perf_library, '_find_performance_counter_timestamp', side_effect=[100, 105]) result = self.perf_library._get_total_time('fake_t1', 'fake_t2', 'fake_counter') self.assertEqual(5000, result) mock_find_performance_counter_timestamp.assert_has_calls([ mock.call('fake_t1', 'fake_counter'), mock.call('fake_t2', 'fake_counter')]) def test_get_adjusted_consistency_point_time(self): result = self.perf_library._get_adjusted_consistency_point_time( 500, 200) self.assertAlmostEqual(360.0, result) def test_get_performance_counter_delta(self): result = self.perf_library._get_performance_counter_delta( fake.COUNTERS_T1, fake.COUNTERS_T2, 'total_cp_msecs') self.assertEqual(1482, result) def test_get_performance_counter_average(self): result = self.perf_library._get_performance_counter_average( fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna', 'processor_elapsed_time', 'processor0') self.assertAlmostEqual(0.00281954360981, result) def test_get_performance_counter_average_multi_instance(self): result = ( self.perf_library._get_performance_counter_average_multi_instance( fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna', 'processor_elapsed_time')) expected = [0.002819543609809441, 0.0033421611147606135] self.assertAlmostEqual(expected, result) def test_find_performance_counter_value(self): result = self.perf_library._find_performance_counter_value( fake.COUNTERS_T1, 'domain_busy:kahuna', instance_name='processor0') self.assertEqual('2712467226', result) def test_find_performance_counter_value_not_found(self): self.assertRaises( exception.NotFound, self.perf_library._find_performance_counter_value, fake.COUNTERS_T1, 'invalid', instance_name='processor0') def test_find_performance_counter_timestamp(self): result = self.perf_library._find_performance_counter_timestamp( fake.COUNTERS_T1, 'domain_busy') self.assertEqual('1453573777', result) def test_find_performance_counter_timestamp_not_found(self): self.assertRaises( exception.NotFound, self.perf_library._find_performance_counter_timestamp, fake.COUNTERS_T1, 'invalid', instance_name='processor0') def test_expand_performance_array(self): counter_info = { 'labels': ['idle', 'kahuna', 'storage', 'exempt'], 'name': 'domain_busy', } self.zapi_client.get_performance_counter_info = mock.Mock( return_value=counter_info) counter = { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '969142314286,2567571412,2131582146,5383861579', 'instance-name': 'processor0', 'timestamp': '1453512244', } self.perf_library._expand_performance_array('wafl', 'domain_busy', counter) modified_counter = { 'node-name': 'cluster1-01', 'instance-uuid': 'cluster1-01:kernel:processor0', 'domain_busy': '969142314286,2567571412,2131582146,5383861579', 'instance-name': 'processor0', 'timestamp': '1453512244', 'domain_busy:idle': '969142314286', 'domain_busy:kahuna': '2567571412', 'domain_busy:storage': '2131582146', 'domain_busy:exempt': '5383861579', } self.assertEqual(modified_counter, counter) def test_get_base_counter_name(self): counter_info = { 'base-counter': 'cpu_elapsed_time', 'labels': [], 'name': 'avg_processor_busy', } self.zapi_client.get_performance_counter_info = mock.Mock( return_value=counter_info) result = self.perf_library._get_base_counter_name( 'system:constituent', 'avg_processor_busy') self.assertEqual('cpu_elapsed_time', result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py0000664000175000017500000004615000000000000032726 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.performance \ import fakes as fake from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode @ddt.ddt class PerformanceCmodeLibraryTestCase(test.TestCase): def setUp(self): super(PerformanceCmodeLibraryTestCase, self).setUp() with mock.patch.object(perf_cmode.PerformanceCmodeLibrary, '_init_counter_info'): self.zapi_client = mock.Mock() self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.zapi_client) self.perf_library.system_object_name = 'system' self.perf_library.avg_processor_busy_base_counter_name = ( 'cpu_elapsed_time1') self._set_up_fake_pools() def _set_up_fake_pools(self): self.fake_volumes = { 'pool1': { 'netapp_aggregate': 'aggr1', }, 'pool2': { 'netapp_aggregate': 'aggr2', }, 'pool3': { 'netapp_aggregate': 'aggr2', }, 'pool4': { 'netapp_aggregate': ['aggr1', 'aggr2'], } } self.fake_aggrs = set(['aggr1', 'aggr2', 'aggr3']) self.fake_nodes = set(['node1', 'node2']) self.fake_aggr_node_map = { 'aggr1': 'node1', 'aggr2': 'node2', 'aggr3': 'node2', } def test_init_counter_info_not_supported(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name') self.perf_library._init_counter_info() self.assertIsNone(self.perf_library.system_object_name) self.assertIsNone( self.perf_library.avg_processor_busy_base_counter_name) self.assertFalse(mock_get_base_counter_name.called) @ddt.data({ 'system_constituent': False, 'base_counter': 'cpu_elapsed_time1', }, { 'system_constituent': True, 'base_counter': 'cpu_elapsed_time', }) @ddt.unpack def test_init_counter_info_api_error(self, system_constituent, base_counter): self.zapi_client.features.SYSTEM_METRICS = True self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = ( system_constituent) self.mock_object(self.perf_library, '_get_base_counter_name', side_effect=netapp_api.NaApiError) self.perf_library._init_counter_info() self.assertEqual( base_counter, self.perf_library.avg_processor_busy_base_counter_name) def test_init_counter_info_system(self): self.zapi_client.features.SYSTEM_METRICS = True self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name', return_value='cpu_elapsed_time1') self.perf_library._init_counter_info() self.assertEqual('system', self.perf_library.system_object_name) self.assertEqual( 'cpu_elapsed_time1', self.perf_library.avg_processor_busy_base_counter_name) mock_get_base_counter_name.assert_called_once_with( 'system', 'avg_processor_busy') def test_init_counter_info_system_constituent(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = True mock_get_base_counter_name = self.mock_object( self.perf_library, '_get_base_counter_name', return_value='cpu_elapsed_time') self.perf_library._init_counter_info() self.assertEqual('system:constituent', self.perf_library.system_object_name) self.assertEqual( 'cpu_elapsed_time', self.perf_library.avg_processor_busy_base_counter_name) mock_get_base_counter_name.assert_called_once_with( 'system:constituent', 'avg_processor_busy') @test.testtools.skip("launchpad bug 1715915") def test_update_performance_cache(self): self.perf_library.performance_counters = { 'node1': list(range(11, 21)), 'node2': list(range(21, 31)), } mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', return_value=self.fake_aggrs) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', return_value=(self.fake_nodes, self.fake_aggr_node_map)) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', side_effect=[21, 31]) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', side_effect=[25, 75]) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = { 'node1': list(range(12, 22)), 'node2': list(range(22, 32)), } self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = {'pool1': 25, 'pool2': 75, 'pool3': 75, 'pool4': perf_base.DEFAULT_UTILIZATION} self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')]) mock_get_node_utilization.assert_has_calls([ mock.call(12, 21, 'node1'), mock.call(22, 31, 'node2')]) @test.testtools.skip("launchpad bug #1715915") def test_update_performance_cache_first_pass(self): mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', return_value=self.fake_aggrs) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', return_value=(self.fake_nodes, self.fake_aggr_node_map)) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', side_effect=[11, 21]) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', side_effect=[25, 75]) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = {'node1': [11], 'node2': [21]} self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': perf_base.DEFAULT_UTILIZATION, 'pool2': perf_base.DEFAULT_UTILIZATION, 'pool3': perf_base.DEFAULT_UTILIZATION, 'pool4': perf_base.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')]) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_unknown_nodes(self): self.perf_library.performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', return_value=self.fake_aggrs) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', return_value=(set(), {})) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', side_effect=[11, 21]) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', side_effect=[25, 75]) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': perf_base.DEFAULT_UTILIZATION, 'pool2': perf_base.DEFAULT_UTILIZATION, 'pool3': perf_base.DEFAULT_UTILIZATION, 'pool4': perf_base.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) self.assertFalse(mock_get_node_utilization_counters.called) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_counters_unavailable(self): self.perf_library.performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools', return_value=self.fake_aggrs) mock_get_nodes_for_aggregates = self.mock_object( self.perf_library, '_get_nodes_for_aggregates', return_value=(self.fake_nodes, self.fake_aggr_node_map)) mock_get_node_utilization_counters = self.mock_object( self.perf_library, '_get_node_utilization_counters', side_effect=[None, None]) mock_get_node_utilization = self.mock_object( self.perf_library, '_get_node_utilization', side_effect=[25, 75]) self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = { 'node1': range(11, 21), 'node2': range(21, 31), } self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = { 'pool1': perf_base.DEFAULT_UTILIZATION, 'pool2': perf_base.DEFAULT_UTILIZATION, 'pool3': perf_base.DEFAULT_UTILIZATION, 'pool4': perf_base.DEFAULT_UTILIZATION, } self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) mock_get_aggregates_for_pools.assert_called_once_with( self.fake_volumes) mock_get_nodes_for_aggregates.assert_called_once_with(self.fake_aggrs) mock_get_node_utilization_counters.assert_has_calls([ mock.call('node1'), mock.call('node2')], any_order=True) self.assertFalse(mock_get_node_utilization.called) def test_update_performance_cache_not_supported(self): self.zapi_client.features.SYSTEM_METRICS = False self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False mock_get_aggregates_for_pools = self.mock_object( self.perf_library, '_get_aggregates_for_pools') self.perf_library.update_performance_cache(self.fake_volumes) expected_performance_counters = {} self.assertEqual(expected_performance_counters, self.perf_library.performance_counters) expected_pool_utilization = {} self.assertEqual(expected_pool_utilization, self.perf_library.pool_utilization) self.assertFalse(mock_get_aggregates_for_pools.called) @ddt.data({'pool': 'pool1', 'expected': 10.0}, {'pool': 'pool3', 'expected': perf_base.DEFAULT_UTILIZATION}) @ddt.unpack def test_get_node_utilization_for_pool(self, pool, expected): self.perf_library.pool_utilization = {'pool1': 10.0, 'pool2': 15.0} result = self.perf_library.get_node_utilization_for_pool(pool) self.assertAlmostEqual(expected, result) def test__update_for_failover(self): self.mock_object(self.perf_library, 'update_performance_cache') mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT') self.perf_library._update_for_failover(mock_client, self.fake_volumes) self.assertEqual(mock_client, self.perf_library.zapi_client) self.perf_library.update_performance_cache.assert_called_once_with( self.fake_volumes) def test_get_aggregates_for_pools(self): result = self.perf_library._get_aggregates_for_pools(self.fake_volumes) expected_aggregate_names = set(['aggr1', 'aggr2']) self.assertEqual(expected_aggregate_names, result) def test_get_nodes_for_aggregates(self): aggregate_names = ['aggr1', 'aggr2', 'aggr3'] aggregate_nodes = ['node1', 'node2', 'node2'] mock_get_node_for_aggregate = self.mock_object( self.zapi_client, 'get_node_for_aggregate', side_effect=aggregate_nodes) result = self.perf_library._get_nodes_for_aggregates(aggregate_names) self.assertEqual(2, len(result)) result_node_names, result_aggr_node_map = result expected_node_names = set(['node1', 'node2']) expected_aggr_node_map = dict(zip(aggregate_names, aggregate_nodes)) self.assertEqual(expected_node_names, result_node_names) self.assertEqual(expected_aggr_node_map, result_aggr_node_map) mock_get_node_for_aggregate.assert_has_calls([ mock.call('aggr1'), mock.call('aggr2'), mock.call('aggr3')]) def test_get_node_utilization_counters(self): mock_get_node_utilization_system_counters = self.mock_object( self.perf_library, '_get_node_utilization_system_counters', return_value=['A', 'B', 'C']) mock_get_node_utilization_wafl_counters = self.mock_object( self.perf_library, '_get_node_utilization_wafl_counters', return_value=['D', 'E', 'F']) mock_get_node_utilization_processor_counters = self.mock_object( self.perf_library, '_get_node_utilization_processor_counters', return_value=['G', 'H', 'I']) result = self.perf_library._get_node_utilization_counters(fake.NODE) expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] self.assertEqual(expected, result) mock_get_node_utilization_system_counters.assert_called_once_with( fake.NODE) mock_get_node_utilization_wafl_counters.assert_called_once_with( fake.NODE) mock_get_node_utilization_processor_counters.assert_called_once_with( fake.NODE) def test_get_node_utilization_counters_api_error(self): self.mock_object(self.perf_library, '_get_node_utilization_system_counters', side_effect=netapp_api.NaApiError) result = self.perf_library._get_node_utilization_counters(fake.NODE) self.assertIsNone(result) def test_get_node_utilization_system_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', return_value=fake.SYSTEM_INSTANCE_UUIDS) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', return_value=fake.SYSTEM_COUNTERS) result = self.perf_library._get_node_utilization_system_counters( fake.NODE) self.assertEqual(fake.SYSTEM_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'system', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'system', fake.SYSTEM_INSTANCE_UUIDS, ['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time']) def test_get_node_utilization_wafl_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', return_value=fake.WAFL_INSTANCE_UUIDS) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', return_value=fake.WAFL_COUNTERS) mock_get_performance_counter_info = self.mock_object( self.zapi_client, 'get_performance_counter_info', return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO) result = self.perf_library._get_node_utilization_wafl_counters( fake.NODE) self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'wafl', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'wafl', fake.WAFL_INSTANCE_UUIDS, ['total_cp_msecs', 'cp_phase_times']) mock_get_performance_counter_info.assert_called_once_with( 'wafl', 'cp_phase_times') def test_get_node_utilization_processor_counters(self): mock_get_performance_instance_uuids = self.mock_object( self.zapi_client, 'get_performance_instance_uuids', return_value=fake.PROCESSOR_INSTANCE_UUIDS) mock_get_performance_counters = self.mock_object( self.zapi_client, 'get_performance_counters', return_value=fake.PROCESSOR_COUNTERS) self.mock_object( self.zapi_client, 'get_performance_counter_info', return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO) result = self.perf_library._get_node_utilization_processor_counters( fake.NODE) self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result) mock_get_performance_instance_uuids.assert_called_once_with( 'processor', fake.NODE) mock_get_performance_counters.assert_called_once_with( 'processor', fake.PROCESSOR_INSTANCE_UUIDS, ['domain_busy', 'processor_elapsed_time']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py0000664000175000017500000026064700000000000030417 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. # Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. # Copyright (c) 2016 Chuck Fouts. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Mock unit tests for the NetApp block storage library""" from concurrent.futures import ThreadPoolExecutor import copy import itertools from unittest import mock import uuid import ddt from oslo_log import versionutils from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils @ddt.ddt class NetAppBlockStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppBlockStorageLibraryTestCase, self).setUp() kwargs = { 'configuration': self.get_config_base(), 'host': 'openstack@netappblock', } self.library = block_base.NetAppBlockStorageLibrary( 'driver', 'protocol', **kwargs) self.library.zapi_client = mock.Mock() self.zapi_client = self.library.zapi_client self.mock_request = mock.Mock() self.ctxt = context.RequestContext('fake', 'fake', auth_token=True) def get_config_base(self): return na_fakes.create_configuration() @mock.patch.object(versionutils, 'report_deprecated_feature') def test_get_reserved_percentage_default_multipler(self, mock_report): default = 1.2 reserved_percentage = 20.0 self.library.configuration.netapp_size_multiplier = default self.library.configuration.reserved_percentage = reserved_percentage result = self.library._get_reserved_percentage() self.assertEqual(reserved_percentage, result) self.assertFalse(mock_report.called) @mock.patch.object(versionutils, 'report_deprecated_feature') def test_get_reserved_percentage(self, mock_report): multiplier = 2.0 self.library.configuration.netapp_size_multiplier = multiplier result = self.library._get_reserved_percentage() reserved_ratio = round(1 - (1 / multiplier), 2) reserved_percentage = 100 * int(reserved_ratio) self.assertEqual(reserved_percentage, result) msg = ('The "netapp_size_multiplier" configuration option is ' 'deprecated and will be removed in the Mitaka release. ' 'Please set "reserved_percentage = %d" instead.' % result) mock_report.assert_called_once_with(block_base.LOG, msg) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr', mock.Mock(return_value={'Volume': 'FAKE_CMODE_VOL1'})) def test_get_pool(self): pool = self.library.get_pool({'name': 'volume-fake-uuid'}) self.assertEqual('FAKE_CMODE_VOL1', pool) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr', mock.Mock(return_value=None)) def test_get_pool_no_metadata(self): pool = self.library.get_pool({'name': 'volume-fake-uuid'}) self.assertIsNone(pool) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr', mock.Mock(return_value=dict())) def test_get_pool_volume_unknown(self): pool = self.library.get_pool({'name': 'volume-fake-uuid'}) self.assertIsNone(pool) def test_create_volume(self): volume_size_in_bytes = int(fake.SIZE) * units.Gi self.mock_object(na_utils, 'get_volume_extra_specs', return_value={}) self.mock_object(na_utils, 'log_extra_spec_warnings') self.mock_object(block_base, 'LOG') self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) self.mock_object(self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_create_lun') self.mock_object(self.library, '_create_lun_handle') self.mock_object(self.library, '_add_lun_to_table') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.mock_object(self.library, '_get_volume_model_update') self.library.create_volume(fake.VOLUME) self.library._create_lun.assert_called_once_with( fake.POOL_NAME, fake.LUN_NAME, volume_size_in_bytes, fake.LUN_METADATA, fake.QOS_POLICY_GROUP_NAME, False) self.library._get_volume_model_update.assert_called_once_with( fake.VOLUME) self.assertEqual( 0, self.library. _mark_qos_policy_group_for_deletion.call_count) self.assertEqual(0, block_base.LOG.error.call_count) def test_create_volume_space_allocation_extra_spec_false(self): volume_size_in_bytes = int(fake.SIZE) * units.Gi self.mock_object(na_utils, 'get_volume_extra_specs', return_value={ 'netapp:space_allocation': ' False' } ) self.mock_object(na_utils, 'log_extra_spec_warnings') self.mock_object(block_base, 'LOG') self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) self.mock_object(self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_create_lun') self.mock_object(self.library, '_create_lun_handle') self.mock_object(self.library, '_add_lun_to_table') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.mock_object(self.library, '_get_volume_model_update') self.library.create_volume(fake.VOLUME) self.library._create_lun.assert_called_once_with( fake.POOL_NAME, fake.LUN_NAME, volume_size_in_bytes, fake.LUN_METADATA, fake.QOS_POLICY_GROUP_NAME, False) self.library._get_volume_model_update.assert_called_once_with( fake.VOLUME) self.assertEqual( 0, self.library._mark_qos_policy_group_for_deletion.call_count) self.assertEqual(0, block_base.LOG.error.call_count) def test_create_volume_space_allocation_extra_spec_true(self): volume_size_in_bytes = int(fake.SIZE) * units.Gi self.mock_object(na_utils, 'get_volume_extra_specs', return_value={ 'netapp:space_allocation': ' True' } ) self.mock_object(na_utils, 'log_extra_spec_warnings') self.mock_object(block_base, 'LOG') self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) self.mock_object(self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_create_lun') self.mock_object(self.library, '_create_lun_handle') self.mock_object(self.library, '_add_lun_to_table') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.mock_object(self.library, '_get_volume_model_update') self.library.create_volume(fake.VOLUME) self.library._create_lun.assert_called_once_with( fake.POOL_NAME, fake.LUN_NAME, volume_size_in_bytes, fake.LUN_METADATA_WITH_SPACE_ALLOCATION, fake.QOS_POLICY_GROUP_NAME, False) self.library._get_volume_model_update.assert_called_once_with( fake.VOLUME) self.assertEqual( 0, self.library._mark_qos_policy_group_for_deletion.call_count) self.assertEqual(0, block_base.LOG.error.call_count) def test_create_volume_no_pool(self): self.mock_object(volume_utils, 'extract_host', return_value=None) self.assertRaises(exception.InvalidHost, self.library.create_volume, fake.VOLUME) def test_space_allocation_exception_path(self): self.mock_object(block_base, 'LOG') self.mock_object(na_utils, 'get_volume_extra_specs', return_value={'netapp:space_allocation': 'xyz'}) self.mock_object(self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_create_lun', side_effect=Exception) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.assertRaises(exception.VolumeBackendAPIException, self.library.create_volume, fake.VOLUME) def test_create_volume_exception_path(self): self.mock_object(block_base, 'LOG') self.mock_object(na_utils, 'get_volume_extra_specs', return_value={}) self.mock_object(self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_create_lun', side_effect=Exception) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.assertRaises(exception.VolumeBackendAPIException, self.library.create_volume, fake.VOLUME) self.assertEqual(1, self.library. _mark_qos_policy_group_for_deletion.call_count) self.assertEqual(1, block_base.LOG.exception.call_count) def test_create_volume_no_pool_provided_by_scheduler(self): volume_copy = copy.deepcopy(fake.VOLUME) # Set up fake volume whose 'host' field is missing pool information. volume_copy['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME) self.assertRaises(exception.InvalidHost, self.library.create_volume, volume_copy) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') def test_map_lun(self, mock_get_or_create_igroup, mock_get_lun_attr): os = 'linux' protocol = 'fcp' self.library.host_type = 'linux' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') self.zapi_client.map_lun.return_value = '1' self.mock_object(self.library, '_is_active_sync_configured', return_value=False) lun_id = self.library._map_lun('fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) self.assertEqual('1', lun_id) mock_get_or_create_igroup.assert_called_once_with( fake.FC_FORMATTED_INITIATORS, protocol, os) self.zapi_client.map_lun.assert_called_once_with( fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_map_lun_mismatch_host_os( self, mock_get_or_create_igroup, mock_get_lun_attr): os = 'windows' protocol = 'fcp' self.library.host_type = 'linux' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') self.mock_object(self.library, '_is_active_sync_configured', return_value=False) self.library._map_lun('fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) mock_get_or_create_igroup.assert_called_once_with( fake.FC_FORMATTED_INITIATORS, protocol, self.library.host_type) self.zapi_client.map_lun.assert_called_once_with( fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None) self.assertEqual(1, block_base.LOG.warning.call_count) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_map_lun_preexisting(self, mock_find_mapped_lun_igroup, mock_get_or_create_igroup, mock_get_lun_attr): os = 'linux' protocol = 'fcp' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, '2') self.zapi_client.map_lun.side_effect = netapp_api.NaApiError self.mock_object(self.library, '_is_active_sync_configured', return_value=False) lun_id = self.library._map_lun( 'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) self.assertEqual('2', lun_id) mock_find_mapped_lun_igroup.assert_called_once_with( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_map_lun_api_error(self, mock_find_mapped_lun_igroup, mock_get_or_create_igroup, mock_get_lun_attr): os = 'linux' protocol = 'fcp' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') mock_find_mapped_lun_igroup.return_value = (None, None) self.zapi_client.map_lun.side_effect = netapp_api.NaApiError self.assertRaises(netapp_api.NaApiError, self.library._map_lun, 'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) def test__is_multiattached_true(self): volume = copy.deepcopy(fake.test_volume) volume.multiattach = True volume.volume_attachment = [ fake.test_iscsi_attachment, fake.test_iscsi_attachment, ] self.assertTrue(self.library._is_multiattached( volume, fake.ISCSI_CONNECTOR)) def test__is_multiattached_false(self): volume1 = copy.deepcopy(fake.test_volume) volume1.multiattach = True volume1.volume_attachment = [] volume2 = copy.deepcopy(fake.test_volume) volume2.multiattach = False volume2.volume_attachment = [] self.assertFalse(self.library._is_multiattached( volume1, fake.ISCSI_CONNECTOR)) self.assertFalse(self.library._is_multiattached( volume2, fake.ISCSI_CONNECTOR)) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_unmap_lun_empty(self, mock_find_mapped_lun_igroup): self.zapi_client.get_lun_map.return_value = fake.ISCSI_ONE_MAP_LIST self.library._unmap_lun(fake.LUN_PATH, fake.ISCSI_EMPTY_MAP_LIST) mock_find_mapped_lun_igroup.assert_not_called() self.zapi_client.get_lun_map.assert_called_once_with(fake.LUN_PATH) self.zapi_client.unmap_lun.assert_called_once_with( fake.LUN_PATH, fake.ISCSI_ONE_MAP_LIST[0]['initiator-group']) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_unmap_lun_detach_one(self, mock_find_mapped_lun_igroup): fake_ini_group = fake.ISCSI_ONE_MAP_LIST[0]['initiator-group'] mock_find_mapped_lun_igroup.return_value = (fake_ini_group, 1) self.zapi_client.get_lun_map.return_value = fake.ISCSI_ONE_MAP_LIST self.library._unmap_lun(fake.LUN_PATH, fake.ISCSI_ONE_MAP_LIST) mock_find_mapped_lun_igroup.assert_called_once_with( fake.LUN_PATH, fake.ISCSI_ONE_MAP_LIST) self.zapi_client.get_lun_map.assert_not_called() self.zapi_client.unmap_lun.assert_called_once_with( fake.LUN_PATH, fake_ini_group) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_unmap_lun_empty_detach_all(self, mock_find_mapped_lun_igroup): self.zapi_client.get_lun_map.return_value = fake.ISCSI_MULTI_MAP_LIST self.library._unmap_lun(fake.LUN_PATH, fake.ISCSI_EMPTY_MAP_LIST) mock_find_mapped_lun_igroup.assert_not_called() self.zapi_client.get_lun_map.assert_called_once_with(fake.LUN_PATH) calls = [mock.call(fake.LUN_PATH, fake.ISCSI_MULTI_MAP_LIST[0]['initiator-group']), mock.call(fake.LUN_PATH, fake.ISCSI_MULTI_MAP_LIST[1]['initiator-group'])] self.zapi_client.unmap_lun.assert_has_calls(calls) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') def test_terminate_connection_iscsi_multiattach(self, mock_unmap_lun): volume = copy.deepcopy(fake.test_volume) volume.multiattach = True volume.volume_attachment = [ fake.test_iscsi_attachment, fake.test_iscsi_attachment, ] self.library.terminate_connection_iscsi(volume, fake.ISCSI_CONNECTOR) mock_unmap_lun.assert_not_called() @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_iscsi_last_attachment(self, mock_get_lun_attr, mock_unmap_lun): mock_get_lun_attr.return_value = {'Path': fake.PATH} volume = copy.deepcopy(fake.test_volume) volume.multiattach = True volume.volume_attachment = [fake.test_iscsi_attachment] self.library.terminate_connection_iscsi(volume, fake.ISCSI_CONNECTOR) mock_unmap_lun.assert_called_once_with( fake.PATH, [fake.ISCSI_CONNECTOR['initiator']]) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_iscsi_all_initiators(self, mock_get_lun_attr, mock_unmap_lun): mock_get_lun_attr.return_value = {'Path': fake.PATH} volume = copy.deepcopy(fake.test_volume) volume.multiattach = True volume.volume_attachment = [ fake.test_iscsi_attachment, fake.test_iscsi_attachment, ] self.library.terminate_connection_iscsi(volume, None) mock_unmap_lun.assert_called_once_with(fake.PATH, []) def test_find_mapped_lun_igroup(self): self.assertRaises(NotImplementedError, self.library._find_mapped_lun_igroup, fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) def test_has_luns_mapped_to_initiators(self): self.zapi_client.has_luns_mapped_to_initiators.return_value = True self.assertTrue(self.library._has_luns_mapped_to_initiators( fake.FC_FORMATTED_INITIATORS)) self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( fake.FC_FORMATTED_INITIATORS) def test_get_or_create_igroup_preexisting(self): self.zapi_client.get_igroup_by_initiators.return_value = [fake.IGROUP1] self.library._create_igroup_add_initiators = mock.Mock() igroup_name, host_os, ig_type = self.library._get_or_create_igroup( fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux') self.assertEqual(fake.IGROUP1_NAME, igroup_name) self.assertEqual('linux', host_os) self.assertEqual('fcp', ig_type) self.zapi_client.get_igroup_by_initiators.assert_called_once_with( fake.FC_FORMATTED_INITIATORS) self.assertEqual( 0, self.library._create_igroup_add_initiators.call_count) @ddt.data([], [fake.CUSTOM_IGROUP]) @mock.patch.object(uuid, 'uuid4', mock.Mock(return_value=fake.UUID1)) def test_get_or_create_igroup_none_preexisting(self, igroups): """Test _create_igroup_add_initiators with not OS igroups.""" # We only care about the OpenStack igroups, so we must have the same # result if there are no igroups and if the igroup is a custom one. self.zapi_client.get_igroup_by_initiators.return_value = igroups igroup_name, os, ig_type = self.library._get_or_create_igroup( fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux') self.assertEqual('openstack-' + fake.UUID1, igroup_name) self.zapi_client.create_igroup.assert_called_once_with( igroup_name, 'fcp', 'linux') self.assertEqual(len(fake.FC_FORMATTED_INITIATORS), self.zapi_client.add_igroup_initiator.call_count) self.assertEqual('linux', os) self.assertEqual('fcp', ig_type) def test_get_fc_target_wwpns(self): self.assertRaises(NotImplementedError, self.library._get_fc_target_wwpns) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_build_initiator_target_map') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_map_lun') def test_initialize_connection_fc(self, mock_map_lun, mock_build_initiator_target_map): self.maxDiff = None mock_map_lun.return_value = '1' mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS, fake.FC_I_T_MAP, 4) target_info = self.library.initialize_connection_fc(fake.FC_VOLUME, fake.FC_CONNECTOR) self.assertDictEqual(target_info, fake.FC_TARGET_INFO) mock_map_lun.assert_called_once_with( 'fake_volume', fake.FC_FORMATTED_INITIATORS, 'fcp', None) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_build_initiator_target_map') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_map_lun') def test_initialize_connection_fc_no_wwpns( self, mock_map_lun, mock_build_initiator_target_map): mock_map_lun.return_value = '1' mock_build_initiator_target_map.return_value = (None, None, 0) self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_fc, fake.FC_VOLUME, fake.FC_CONNECTOR) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_has_luns_mapped_to_initiators') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_fc(self, mock_get_lun_attr, mock_unmap_lun, mock_has_luns_mapped_to_initiators): mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} mock_unmap_lun.return_value = None mock_has_luns_mapped_to_initiators.return_value = True volume = copy.deepcopy(fake.test_volume) target_info = self.library.terminate_connection_fc(volume, fake.FC_CONNECTOR) self.assertDictEqual(target_info, fake.FC_TARGET_INFO_EMPTY) mock_unmap_lun.assert_called_once_with(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_build_initiator_target_map') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_has_luns_mapped_to_initiators') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_fc_no_more_luns( self, mock_get_lun_attr, mock_unmap_lun, mock_has_luns_mapped_to_initiators, mock_build_initiator_target_map): mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} mock_unmap_lun.return_value = None mock_has_luns_mapped_to_initiators.return_value = False mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS, fake.FC_I_T_MAP, 4) volume = copy.deepcopy(fake.test_volume) target_info = self.library.terminate_connection_fc(volume, fake.FC_CONNECTOR) self.assertDictEqual(target_info, fake.FC_TARGET_INFO_UNMAP) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_has_luns_mapped_to_initiators') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_fc_multiattach( self, mock_get_lun_attr, mock_unmap_lun, mock_has_luns_mapped_to_initiators): volume = copy.deepcopy(fake.test_volume) volume.multiattach = True volume.volume_attachment = [ {'attach_status': fake.ATTACHED, 'attached_host': fake.HOST_NAME}, {'attach_status': fake.ATTACHED, 'attached_host': fake.HOST_NAME}, ] mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} mock_unmap_lun.return_value = None mock_has_luns_mapped_to_initiators.return_value = True self.library.terminate_connection_fc(volume, fake.FC_CONNECTOR) mock_unmap_lun.assert_called_once_with(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_has_luns_mapped_to_initiators') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_fc_last_attachment( self, mock_get_lun_attr, mock_unmap_lun, mock_has_luns_mapped_to_initiators): volume = copy.deepcopy(fake.test_volume) volume.multiattach = True volume.volume_attachment = [ {'attach_status': fake.ATTACHED, 'attached_host': fake.HOST_NAME}, ] mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} mock_unmap_lun.return_value = None mock_has_luns_mapped_to_initiators.return_value = True self.library.terminate_connection_fc(volume, fake.FC_CONNECTOR) mock_unmap_lun.assert_called_once_with(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_has_luns_mapped_to_initiators') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_fc_multiattach_cleanup( self, mock_get_lun_attr, mock_unmap_lun, mock_has_luns_mapped_to_initiators): volume = copy.deepcopy(fake.test_volume) volume.multiattach = True volume.volume_attachment = [ {'attach_status': fake.ATTACHED, 'attached_host': fake.HOST_NAME}, {'attach_status': fake.ATTACHED, 'attached_host': fake.HOST_NAME}, ] connector = fake.FC_CONNECTOR mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} mock_unmap_lun.return_value = None mock_has_luns_mapped_to_initiators.return_value = True def terminate_connection(*args, **kwargs): self.library.terminate_connection_fc(volume, connector) # Run the termination operation in parallel using ThreadPoolExecutor with ThreadPoolExecutor(max_workers=2) as executor: list(executor.map(terminate_connection, range(2))) # Ensure that the LUN maps are cleaned up correctly for both # parallel operations self.assertEqual(mock_unmap_lun.call_count, 2) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_fc_target_wwpns') def test_build_initiator_target_map_no_lookup_service( self, mock_get_fc_target_wwpns): self.library.lookup_service = None mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS (target_wwpns, init_targ_map, num_paths) = \ self.library._build_initiator_target_map(fake.FC_CONNECTOR) self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns)) self.assertDictEqual(fake.FC_I_T_MAP_COMPLETE, init_targ_map) self.assertEqual(0, num_paths) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_fc_target_wwpns') def test_build_initiator_target_map_with_lookup_service( self, mock_get_fc_target_wwpns): self.library.lookup_service = mock.Mock() self.library.lookup_service.get_device_mapping_from_network.\ return_value = fake.FC_FABRIC_MAP mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS (target_wwpns, init_targ_map, num_paths) = \ self.library._build_initiator_target_map(fake.FC_CONNECTOR) self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns)) for i in fake.FC_I_T_MAP: for t in fake.FC_I_T_MAP[i]: self.assertIn(t, init_targ_map[i]) self.assertEqual(4, num_paths) @mock.patch.object(na_utils, 'check_flags') def test_do_setup_san_configured(self, mock_check_flags): self.library.configuration.netapp_lun_ostype = 'windows' self.library.configuration.netapp_host_type = 'solaris' self.library.configuration.netapp_lun_space_reservation = 'disabled' self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertEqual('windows', self.library.lun_ostype) self.assertEqual('solaris', self.library.host_type) @mock.patch.object(na_utils, 'check_flags') def test_do_setup_san_unconfigured(self, mock_check_flags): self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.configuration.netapp_lun_space_reservation = 'enabled' self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertEqual('linux', self.library.lun_ostype) self.assertEqual('linux', self.library.host_type) def test_do_setup_space_reservation_disabled(self): self.mock_object(na_utils, 'check_flags') self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.configuration.netapp_lun_space_reservation = 'disabled' self.library.do_setup(mock.Mock()) self.assertEqual('false', self.library.lun_space_reservation) def test_do_setup_space_reservation_enabled(self): self.mock_object(na_utils, 'check_flags') self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.configuration.netapp_lun_space_reservation = 'enabled' self.library.do_setup(mock.Mock()) self.assertEqual('true', self.library.lun_space_reservation) def test_get_existing_vol_with_manage_ref_no_source_info(self): self.assertRaises(exception.ManageExistingInvalidReference, self.library._get_existing_vol_with_manage_ref, {}) def test_get_existing_vol_manage_not_found(self): self.zapi_client.get_lun_by_args.return_value = [] self.assertRaises(exception.ManageExistingInvalidReference, self.library._get_existing_vol_with_manage_ref, {'source-name': 'lun_path'}) self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count) def test_get_existing_vol_manage_lun_by_path(self): self.library.vserver = 'fake_vserver' self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1'] mock_lun = block_base.NetAppLun( 'lun0', 'lun0', '3', {'UUID': 'fake_uuid'}) self.mock_object(block_base.NetAppBlockStorageLibrary, '_extract_lun_info', return_value=mock_lun) existing_ref = {'source-name': 'fake_path'} lun = self.library._get_existing_vol_with_manage_ref(existing_ref) self.zapi_client.get_lun_by_args.assert_called_once_with( path='fake_path') self.library._extract_lun_info.assert_called_once_with('lun0') self.assertEqual('lun0', lun.name) def test_get_existing_vol_manage_lun_by_uuid(self): self.library.vserver = 'fake_vserver' self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1'] mock_lun = block_base.NetAppLun( 'lun0', 'lun0', '3', {'UUID': 'fake_uuid'}) self.mock_object(block_base.NetAppBlockStorageLibrary, '_extract_lun_info', return_value=mock_lun) existing_ref = {'source-id': 'fake_uuid'} lun = self.library._get_existing_vol_with_manage_ref(existing_ref) self.zapi_client.get_lun_by_args.assert_called_once_with( uuid='fake_uuid') self.library._extract_lun_info.assert_called_once_with('lun0') self.assertEqual('lun0', lun.name) def test_get_existing_vol_manage_lun_invalid_mode(self): self.assertRaises(exception.ManageExistingInvalidReference, self.library._get_existing_vol_with_manage_ref, {'source-id': 'src_id'}) def test_get_existing_vol_manage_lun_invalid_lun(self): self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1'] self.mock_object(block_base.NetAppBlockStorageLibrary, '_is_lun_valid_on_storage', side_effect=[False, True]) mock_lun0 = block_base.NetAppLun( 'lun0', 'lun0', '3', {'UUID': 'src_id_0'}) mock_lun1 = block_base.NetAppLun( 'lun1', 'lun1', '5', {'UUID': 'src_id_1'}) self.mock_object(block_base.NetAppBlockStorageLibrary, '_extract_lun_info', side_effect=[mock_lun0, mock_lun1]) lun = self.library._get_existing_vol_with_manage_ref( {'source-name': 'lun_path'}) self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count) self.library._extract_lun_info.assert_has_calls([ mock.call('lun0'), mock.call('lun1'), ]) self.assertEqual('lun1', lun.name) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_existing_vol_with_manage_ref', mock.Mock(return_value=block_base.NetAppLun( 'handle', 'name', '1073742824', {}))) def test_manage_existing_get_size(self): size = self.library.manage_existing_get_size( {'id': 'vol_id'}, {'ref': 'ref'}) self.assertEqual(2, size) self.library._get_existing_vol_with_manage_ref.assert_called_once_with( {'ref': 'ref'}) @ddt.data(None, {'replication_status': fields.ReplicationStatus.ENABLED}) def test_manage_existing_lun_name_matches(self, model_update): volume = fake_volume.fake_volume_obj(self.ctxt) existing_ref = {'source-name': 'fake_path'} mock_lun = block_base.NetAppLun( volume['name'], volume['name'], '3', {'UUID': 'fake_uuid', 'Path': 'p'}) self.mock_object(self.library, '_get_existing_vol_with_manage_ref', return_value=mock_lun) self.mock_object(na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) self.mock_object(self.library, '_check_volume_type_for_lun', return_value=True) self.mock_object(self.library, '_setup_qos_for_volume') self.mock_object(na_utils, 'get_qos_policy_group_name_from_info', return_value=None) self.mock_object(self.library, '_add_lun_to_table') self.mock_object(self.library, '_get_volume_model_update', return_value=model_update) mock_info_log = self.mock_object(block_base.LOG, 'info') actual_update = self.library.manage_existing(volume, existing_ref) self.assertEqual(model_update, actual_update) self.assertEqual(2, mock_info_log.call_count) self.library._add_lun_to_table.assert_called_once_with(mock_lun) @ddt.data(*itertools.product((None, 'fake_qos_policy_group_name'), (True, False))) @ddt.unpack def test_manage_existing_rename_lun(self, qos_policy_group_name, is_qos_policy_group_spec_adaptive): expected_update = ( {'replication_status': fields.ReplicationStatus.ENABLED}) volume = fake_volume.fake_volume_obj(self.ctxt) existing_ref = {'source-name': 'fake_path'} mock_lun = block_base.NetAppLun( 'lun0', 'lun0', '3', {'UUID': 'fake_uuid', 'Path': fake.LUN_PATH}) self.mock_object(self.library, '_get_existing_vol_with_manage_ref', return_value=mock_lun) self.mock_object(na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) self.mock_object(self.library, '_check_volume_type_for_lun', return_value=True) self.mock_object(self.library, '_setup_qos_for_volume') self.mock_object(na_utils, 'get_qos_policy_group_name_from_info', return_value=qos_policy_group_name) self.mock_object(na_utils, 'is_qos_policy_group_spec_adaptive', return_value=is_qos_policy_group_spec_adaptive) self.mock_object(self.library, '_add_lun_to_table') self.mock_object(self.library, '_get_volume_model_update', return_value=expected_update) self.mock_object(self.zapi_client, 'set_lun_qos_policy_group') mock_info_log = self.mock_object(block_base.LOG, 'info') actual_update = self.library.manage_existing(volume, existing_ref) expected_new_path = '/vol/vol0/%s' % volume['name'] self.assertEqual(expected_update, actual_update) self.assertEqual(1, mock_info_log.call_count) self.library._add_lun_to_table.assert_called_once_with(mock_lun) if qos_policy_group_name: (self.zapi_client.set_lun_qos_policy_group. assert_called_once_with(expected_new_path, qos_policy_group_name, is_qos_policy_group_spec_adaptive)) else: self.assertFalse( self.zapi_client.set_lun_qos_policy_group.called) @mock.patch.object(block_base.LOG, 'info') def test_unmanage(self, log): mock_lun = block_base.NetAppLun('handle', 'name', '1', {'Path': 'p', 'UUID': 'uuid'}) self.library._get_lun_from_table = mock.Mock(return_value=mock_lun) self.library.unmanage({'name': 'vol'}) self.library._get_lun_from_table.assert_called_once_with('vol') self.assertEqual(1, log.call_count) def test_check_vol_type_for_lun(self): result = self.library._check_volume_type_for_lun( 'vol', 'lun', 'existing_ref', {}) self.assertIsNone(result) def test_is_lun_valid_on_storage(self): self.assertTrue(self.library._is_lun_valid_on_storage('lun')) def test_initialize_connection_iscsi(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', return_value=fake.ISCSI_LUN['lun_id']) self.zapi_client.get_iscsi_target_details.return_value = ( target_details_list) self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_targets_from_list', return_value=target_details_list) self.mock_object(block_base.NetAppBlockStorageLibrary, '_is_space_alloc_enabled', return_value=True) self.zapi_client.get_iscsi_service_details.return_value = ( fake.ISCSI_SERVICE_IQN) self.mock_object(na_utils, 'get_iscsi_connection_properties', return_value=fake.ISCSI_CONNECTION_PROPERTIES) target_info = self.library.initialize_connection_iscsi(volume, connector) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data']['auth_method'], target_info['data']['auth_method']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data']['auth_password'], target_info['data']['auth_password']) self.assertIn('auth_password', target_info['data']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data']['discovery_auth_method'], target_info['data']['discovery_auth_method']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data'] ['discovery_auth_password'], target_info['data']['discovery_auth_password']) self.assertIn('auth_password', target_info['data']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data'] ['discovery_auth_username'], target_info['data']['discovery_auth_username']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data'] ['discard'], target_info['data']['discard']) self.assertEqual(fake.ISCSI_CONNECTION_PROPERTIES, target_info) block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with( fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']], 'iscsi', None) self.zapi_client.get_iscsi_target_details.assert_called_once_with() block_base.NetAppBlockStorageLibrary._get_targets_from_list\ .assert_called_once_with( target_details_list) self.zapi_client.get_iscsi_service_details.assert_called_once_with() def test_initialize_connection_iscsi_no_target_list(self): volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', return_value=fake.ISCSI_LUN['lun_id']) self.zapi_client.get_iscsi_target_details.return_value = None self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_targets_from_list') self.mock_object(block_base.NetAppBlockStorageLibrary, '_is_space_alloc_enabled', return_value=True) self.mock_object(na_utils, 'get_iscsi_connection_properties', return_value=fake.ISCSI_CONNECTION_PROPERTIES) self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_iscsi, volume, connector) self.assertEqual( 0, block_base.NetAppBlockStorageLibrary ._get_targets_from_list.call_count) self.assertEqual( 0, block_base.NetAppBlockStorageLibrary ._is_space_alloc_enabled.call_count) self.assertEqual( 0, self.zapi_client.get_iscsi_service_details.call_count) self.assertEqual( 0, na_utils.get_iscsi_connection_properties.call_count) def test_initialize_connection_iscsi_no_preferred_target(self): volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', return_value=fake.ISCSI_LUN['lun_id']) self.zapi_client.get_iscsi_target_details.return_value = None self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_targets_from_list', return_value=None) self.mock_object(block_base.NetAppBlockStorageLibrary, '_is_space_alloc_enabled', return_value=True) self.mock_object(na_utils, 'get_iscsi_connection_properties') self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_iscsi, volume, connector) self.assertEqual(0, self.zapi_client .get_iscsi_service_details.call_count) self.assertEqual(0, block_base.NetAppBlockStorageLibrary ._is_space_alloc_enabled.call_count) self.assertEqual(0, na_utils.get_iscsi_connection_properties .call_count) def test_initialize_connection_iscsi_no_iscsi_service_details(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', return_value=fake.ISCSI_LUN['lun_id']) self.zapi_client.get_iscsi_target_details.return_value = ( target_details_list) self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_targets_from_list', return_value=target_details_list) self.zapi_client.get_iscsi_service_details.return_value = None self.mock_object(na_utils, 'get_iscsi_connection_properties') self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_iscsi, volume, connector) block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with( fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']], 'iscsi', None) self.zapi_client.get_iscsi_target_details.assert_called_once_with() block_base.NetAppBlockStorageLibrary._get_targets_from_list\ .assert_called_once_with(target_details_list) def test_get_target_details_list(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST result = self.library._get_targets_from_list(target_details_list) self.assertEqual(target_details_list, result) def test_get_preferred_target_from_empty_list(self): target_details_list = [] result = self.library._get_targets_from_list(target_details_list) self.assertFalse(bool(result)) def test_get_targets_from_list_with_one_interface_disabled(self): target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST) target_details_list[0]['interface-enabled'] = 'false' result = self.library._get_targets_from_list(target_details_list) self.assertEqual(target_details_list[1:], result) def test_get_targets_from_list_with_all_interfaces_disabled(self): target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST) for target in target_details_list: target['interface-enabled'] = 'false' result = self.library._get_targets_from_list(target_details_list) self.assertEqual(target_details_list, result) def test_get_targets_from_list_with_filter(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST filter = [target_detail['address'] for target_detail in target_details_list[1:]] result = self.library._get_targets_from_list(target_details_list, filter) self.assertEqual(target_details_list[1:], result) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_setup_error_invalid_lun_os(self): self.library.configuration.netapp_lun_ostype = 'unknown' self.library.do_setup(mock.Mock()) self.assertRaises(na_utils.NetAppDriverException, self.library.check_for_setup_error) block_base.LOG.error.assert_called_once_with(mock.ANY) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_setup_error_invalid_host_type(self): self.library.configuration.netapp_lun_ostype = 'linux' self.library.configuration.netapp_host_type = 'future_os' self.library.do_setup(mock.Mock()) self.assertRaises(na_utils.NetAppDriverException, self.library.check_for_setup_error) block_base.LOG.error.assert_called_once_with(mock.ANY) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) def test_check_for_setup_error_both_config(self): self.library.configuration.netapp_lun_ostype = 'linux' self.library.configuration.netapp_host_type = 'linux' self.library.do_setup(mock.Mock()) self.zapi_client.get_lun_list.return_value = ['lun1'] self.library._extract_and_populate_luns = mock.Mock() mock_looping_start_tasks = self.mock_object( self.library.loopingcalls, 'start_tasks') self.library.check_for_setup_error() self.library._extract_and_populate_luns.assert_called_once_with( ['lun1']) mock_looping_start_tasks.assert_called_once_with() @mock.patch.object(na_utils, 'check_flags', mock.Mock()) def test_check_for_setup_error_no_os_host(self): mock_start_tasks = self.mock_object( self.library.loopingcalls, 'start_tasks') self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.do_setup(mock.Mock()) self.zapi_client.get_lun_list.return_value = ['lun1'] self.library._extract_and_populate_luns = mock.Mock() self.library.check_for_setup_error() self.library._extract_and_populate_luns.assert_called_once_with( ['lun1']) mock_start_tasks.assert_called_once_with() def test_delete_volume(self): mock_delete_lun = self.mock_object(self.library, '_delete_lun') self.library.delete_volume(fake.VOLUME) mock_delete_lun.assert_called_once_with(fake.LUN_NAME) def test_delete_lun(self): mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr') mock_get_lun_attr.return_value = fake.LUN_METADATA self.library.zapi_client = mock.Mock() self.library.lun_table = fake.LUN_TABLE self.library._delete_lun(fake.LUN_NAME) mock_get_lun_attr.assert_called_once_with( fake.LUN_NAME, 'metadata') self.library.zapi_client.destroy_lun.assert_called_once_with(fake.PATH) def test_delete_lun_no_metadata(self): self.mock_object(self.library, '_get_lun_attr', return_value=None) self.library.zapi_client = mock.Mock() self.library.lun_table = fake.LUN_TABLE self.mock_object(self.library, 'zapi_client') self.library._delete_lun(fake.LUN_NAME) self.library._get_lun_attr.assert_called_once_with( fake.LUN_NAME, 'metadata') self.assertEqual(0, self.library.zapi_client.destroy_lun.call_count) self.assertEqual(0, self.zapi_client. mark_qos_policy_group_for_deletion.call_count) @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_delete_lun_missing_lun(self): mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr') mock_get_lun_attr.return_value = fake.LUN_METADATA self.library.zapi_client = mock.Mock() error = netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) self.mock_object(self.library.zapi_client, 'destroy_lun', side_effect=error) self.library.lun_table = {fake.LUN_NAME: None} self.library._delete_lun(fake.LUN_NAME) mock_get_lun_attr.assert_called_once_with( fake.LUN_NAME, 'metadata') self.library.zapi_client.destroy_lun.assert_called_once_with(fake.PATH) block_base.LOG.error.assert_not_called() block_base.LOG.warning.assert_called_once() self.assertEqual({}, self.library.lun_table) @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_delete_lun_client_exception(self): mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr') mock_get_lun_attr.return_value = fake.LUN_METADATA self.library.zapi_client = mock.Mock() self.mock_object(self.library.zapi_client, 'destroy_lun', side_effect=netapp_api.NaApiError) self.assertRaises(na_utils.NetAppDriverException, self.library._delete_lun, fake.LUN_NAME) block_base.LOG.error.assert_not_called() block_base.LOG.warning.assert_not_called() def test_delete_snapshot(self): mock_delete_lun = self.mock_object(self.library, '_delete_lun') self.library.delete_snapshot(fake.SNAPSHOT) mock_delete_lun.assert_called_once_with(fake.SNAPSHOT_NAME) def test_clone_source_to_destination(self): self.mock_object(na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) self.mock_object(self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_clone_lun') self.mock_object(self.library, '_extend_volume') self.mock_object(self.library, 'delete_volume') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.mock_object(self.library, '_get_volume_model_update', return_value={'key': 'value'}) self.library.lun_space_reservation = 'false' retval = self.library._clone_source_to_destination( fake.CLONE_SOURCE, fake.CLONE_DESTINATION) self.assertEqual({'key': 'value'}, retval) na_utils.get_volume_extra_specs.assert_called_once_with( fake.CLONE_DESTINATION) self.library._setup_qos_for_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.EXTRA_SPECS) self.library._clone_lun.assert_called_once_with( fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, space_reserved='false', qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME, qos_policy_group_is_adaptive=False) self.library._extend_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE, fake.QOS_POLICY_GROUP_NAME) self.assertEqual(0, self.library.delete_volume.call_count) self.assertEqual(0, self.library. _mark_qos_policy_group_for_deletion.call_count) def test_clone_source_to_destination_exception_path(self): self.mock_object(na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) self.mock_object(self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_clone_lun') self.mock_object(self.library, '_extend_volume', side_effect=Exception) self.mock_object(self.library, 'delete_volume') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.library.lun_space_reservation = 'true' self.assertRaises(exception.VolumeBackendAPIException, self.library._clone_source_to_destination, fake.CLONE_SOURCE, fake.CLONE_DESTINATION) na_utils.get_volume_extra_specs.assert_called_once_with( fake.CLONE_DESTINATION) self.library._setup_qos_for_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.EXTRA_SPECS) self.library._clone_lun.assert_called_once_with( fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, space_reserved='true', qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME, qos_policy_group_is_adaptive=False) self.library._extend_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE, fake.QOS_POLICY_GROUP_NAME) self.assertEqual(1, self.library.delete_volume.call_count) self.assertEqual(1, self.library. _mark_qos_policy_group_for_deletion.call_count) def test_create_lun(self): self.assertRaises(NotImplementedError, self.library._create_lun, fake.VOLUME_ID, fake.LUN_ID, fake.SIZE, fake.LUN_METADATA) def test_clone_lun(self): self.assertRaises(NotImplementedError, self.library._clone_lun, fake.VOLUME_ID, 'new-' + fake.VOLUME_ID) def test_create_snapshot(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) mock_clone_lun = self.mock_object(self.library, '_clone_lun') self.mock_object(self.library, '_get_lun_from_table', return_value=fake_lun) self.library.create_snapshot(fake.SNAPSHOT) mock_clone_lun.assert_called_once_with( fake_lun.name, fake.SNAPSHOT_NAME, space_reserved='false', is_snapshot=True) def test_create_volume_from_snapshot(self): mock_do_clone = self.mock_object(self.library, '_clone_source_to_destination') source = { 'name': fake.SNAPSHOT['name'], 'size': fake.SNAPSHOT['volume_size'] } self.library.create_volume_from_snapshot(fake.VOLUME, fake.SNAPSHOT) mock_do_clone.assert_has_calls([ mock.call(source, fake.VOLUME)]) def test_create_cloned_volume(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object(self.library, '_get_lun_from_table') mock_get_lun_from_table.return_value = fake_lun mock_do_clone = self.mock_object(self.library, '_clone_source_to_destination') source = { 'name': fake_lun.name, 'size': fake.VOLUME_REF['size'] } self.library.create_cloned_volume(fake.VOLUME, fake.VOLUME_REF) mock_do_clone.assert_has_calls([ mock.call(source, fake.VOLUME)]) def test_extend_volume(self): new_size = 100 volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) mock_setup_qos_for_volume = self.mock_object( self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) mock_extend_volume = self.mock_object(self.library, '_extend_volume') self.library.extend_volume(fake.VOLUME, new_size) mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) mock_setup_qos_for_volume.assert_called_once_with(volume_copy, fake.EXTRA_SPECS) mock_extend_volume.assert_called_once_with(fake.VOLUME, new_size, fake.QOS_POLICY_GROUP_NAME) def test_extend_volume_api_error(self): new_size = 100 volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) mock_setup_qos_for_volume = self.mock_object( self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) mock_extend_volume = self.mock_object( self.library, '_extend_volume', side_effect=netapp_api.NaApiError) self.assertRaises(netapp_api.NaApiError, self.library.extend_volume, fake.VOLUME, new_size) mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) mock_setup_qos_for_volume.assert_has_calls([ mock.call(volume_copy, fake.EXTRA_SPECS), mock.call(fake.VOLUME, fake.EXTRA_SPECS)]) mock_extend_volume.assert_called_once_with( fake.VOLUME, new_size, fake.QOS_POLICY_GROUP_NAME) @ddt.data((9, 4, 0), (9, 6, 0)) def test__extend_volume_direct(self, ontap_version): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE * 2 new_size_bytes = new_size * units.Gi max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi mock_get_ontap_version = self.mock_object( self.library.zapi_client, 'get_ontap_version', return_value=ontap_version) fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, current_size_bytes, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) fake_lun_geometry = {'max_resize': str(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', return_value=fake_lun_geometry) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {fake.VOLUME['name']: fake_lun} self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy') mock_get_ontap_version.assert_called_once_with(cached=True) mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name']) if ontap_version < (9, 5, 0): mock_get_lun_geometry.assert_called_once_with( fake.LUN_METADATA['Path']) else: mock_get_lun_geometry.assert_not_called() mock_do_direct_resize.assert_called_once_with( fake.LUN_METADATA['Path'], str(new_size_bytes)) self.assertFalse(mock_do_sub_clone_resize.called) self.assertEqual(str(new_size_bytes), self.library.lun_table[fake.VOLUME['name']].size) @ddt.data((9, 4, 0), (9, 6, 0)) def test__extend_attached_volume_direct(self, ontap_version): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE * 2 new_size_bytes = new_size * units.Gi max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size volume_copy['attach_status'] = fake.ATTACHED mock_get_ontap_version = self.mock_object( self.library.zapi_client, 'get_ontap_version', return_value=ontap_version) fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, current_size_bytes, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) fake_lun_geometry = {'max_resize': str(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', return_value=fake_lun_geometry) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {volume_copy['name']: fake_lun} self.library._extend_volume(volume_copy, new_size, 'fake_qos_policy') mock_get_lun_from_table.assert_called_once_with(volume_copy['name']) mock_get_ontap_version.assert_called_once_with(cached=True) if ontap_version < (9, 5, 0): mock_get_lun_geometry.assert_called_once_with( fake.LUN_METADATA['Path']) else: mock_get_lun_geometry.assert_not_called() mock_do_direct_resize.assert_called_once_with( fake.LUN_METADATA['Path'], str(new_size_bytes)) self.assertFalse(mock_do_sub_clone_resize.called) self.assertEqual(str(new_size_bytes), self.library.lun_table[volume_copy['name']].size) @ddt.data((9, 4, 0), (9, 6, 0)) def test__extend_volume_clone(self, ontap_version): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE * 20 new_size_bytes = new_size * units.Gi max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi mock_get_ontap_version = self.mock_object( self.library.zapi_client, 'get_ontap_version', return_value=ontap_version) fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, current_size_bytes, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) fake_lun_geometry = {'max_resize': str(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', return_value=fake_lun_geometry) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {fake.VOLUME['name']: fake_lun} self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy') mock_get_ontap_version.assert_called_once_with(cached=True) mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name']) if ontap_version < (9, 5, 0): self.assertFalse(mock_do_direct_resize.called) mock_get_lun_geometry.assert_called_once_with( fake.LUN_METADATA['Path']) mock_do_sub_clone_resize.assert_called_once_with( fake.LUN_METADATA['Path'], str(new_size_bytes), qos_policy_group_name='fake_qos_policy') else: mock_get_lun_geometry.assert_not_called() mock_do_sub_clone_resize.assert_not_called() mock_do_direct_resize.assert_called_once_with( fake.LUN_METADATA['Path'], str(new_size_bytes)) self.assertEqual(str(new_size_bytes), self.library.lun_table[fake.VOLUME['name']].size) @ddt.data((9, 4, 0), (9, 6, 0)) def test__extend_attached_volume_clone_error(self, ontap_version): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE * 20 new_size_bytes = new_size * units.Gi max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi volume_copy = copy.copy(fake.VOLUME) volume_copy['attach_status'] = fake.ATTACHED mock_get_ontap_version = self.mock_object( self.library.zapi_client, 'get_ontap_version', return_value=ontap_version) fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, str(current_size_bytes), fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) fake_lun_geometry = {'max_resize': str(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', return_value=fake_lun_geometry) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {volume_copy['name']: fake_lun} # (throne82) This error occurs only with versions older than 9.5 if ontap_version < (9, 5, 0): self.assertRaises(exception.VolumeBackendAPIException, self.library._extend_volume, volume_copy, new_size, fake.QOS_POLICY_GROUP_NAME) self.assertFalse(mock_do_direct_resize.called) self.assertFalse(mock_do_sub_clone_resize.called) mock_get_lun_geometry.assert_called_once_with( fake.LUN_METADATA['Path']) self.assertEqual(str(current_size_bytes), self.library.lun_table[volume_copy['name']].size) else: self.library._extend_volume(volume_copy, new_size, fake.QOS_POLICY_GROUP_NAME) mock_do_direct_resize.assert_called_once_with( fake.LUN_METADATA['Path'], str(new_size_bytes)) mock_do_sub_clone_resize.assert_not_called() mock_get_lun_geometry.assert_not_called() self.assertEqual(str(new_size_bytes), self.library.lun_table[volume_copy['name']].size) mock_get_ontap_version.assert_called_once_with(cached=True) mock_get_lun_from_table.assert_called_once_with( volume_copy['name']) @ddt.data((9, 4, 0), (9, 6, 0)) def test__extend_volume_no_change(self, ontap_version): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size mock_get_ontap_version = self.mock_object( self.library.zapi_client, 'get_ontap_version') fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, current_size_bytes, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) fake_lun_geometry = {'max_resize': str(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', return_value=fake_lun_geometry) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {volume_copy['name']: fake_lun} self.library._extend_volume(volume_copy, new_size, 'fake_qos_policy') mock_get_lun_from_table.assert_called_once_with(volume_copy['name']) self.assertFalse(mock_get_lun_geometry.called) self.assertFalse(mock_do_direct_resize.called) self.assertFalse(mock_do_sub_clone_resize.called) self.assertFalse(mock_get_ontap_version.called) def test_do_sub_clone_resize(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 new_lun_name = 'new-%s' % fake.LUN_NAME block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', return_value='off') mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.library._do_sub_clone_resize(fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) mock_create_lun.assert_called_once_with( 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) mock_clone_lun.assert_called_once_with( fake.LUN_NAME, new_lun_name, block_count=block_count) mock_post_sub_clone_resize.assert_called_once_with(fake.LUN_PATH) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_compression_on(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', return_value='on') mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(exception.VolumeBackendAPIException, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') self.assertFalse(mock_get_lun_block_count.called) self.assertFalse(mock_create_lun.called) self.assertFalse(mock_clone_lun.called) self.assertFalse(mock_post_sub_clone_resize.called) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_no_blocks(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 block_count = 0 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', return_value='off') mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(exception.VolumeBackendAPIException, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) self.assertFalse(mock_create_lun.called) self.assertFalse(mock_clone_lun.called) self.assertFalse(mock_post_sub_clone_resize.called) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_create_error(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 new_lun_name = 'new-%s' % fake.LUN_NAME block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', return_value='off') mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun', side_effect=netapp_api.NaApiError) mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(netapp_api.NaApiError, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) mock_create_lun.assert_called_once_with( 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) self.assertFalse(mock_clone_lun.called) self.assertFalse(mock_post_sub_clone_resize.called) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_clone_error(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 new_lun_name = 'new-%s' % fake.LUN_NAME new_lun_path = '/vol/vol0/%s' % new_lun_name block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', return_value='off') mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object( self.library, '_clone_lun', side_effect=netapp_api.NaApiError) mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(netapp_api.NaApiError, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) mock_create_lun.assert_called_once_with( 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) mock_clone_lun.assert_called_once_with( fake.LUN_NAME, new_lun_name, block_count=block_count) self.assertFalse(mock_post_sub_clone_resize.called) mock_destroy_lun.assert_called_once_with(new_lun_path) def test_configure_chap_generate_username_and_password(self): """Ensure that a CHAP username and password are generated.""" initiator_name = fake.ISCSI_CONNECTOR['initiator'] username, password = self.library._configure_chap(initiator_name) self.assertEqual(na_utils.DEFAULT_CHAP_USER_NAME, username) self.assertIsNotNone(password) self.assertEqual(len(password), na_utils.CHAP_SECRET_LENGTH) def test_add_chap_properties(self): """Ensure that CHAP properties are added to the properties dictionary """ properties = {'data': {}} self.library._add_chap_properties(properties, 'user1', 'pass1') data = properties['data'] self.assertEqual('CHAP', data['auth_method']) self.assertEqual('user1', data['auth_username']) self.assertEqual('pass1', data['auth_password']) self.assertEqual('CHAP', data['discovery_auth_method']) self.assertEqual('user1', data['discovery_auth_username']) self.assertEqual('pass1', data['discovery_auth_password']) def test_add_looping_tasks(self): mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') self.library.configuration.netapp_disaggregated_platform = False mock_call_snap_cleanup = self.mock_object( self.library, '_delete_snapshots_marked_for_deletion') mock_call_ems_logging = self.mock_object( self.library, '_handle_ems_logging') self.library._add_looping_tasks() mock_add_task.assert_has_calls([ mock.call(mock_call_snap_cleanup, loopingcalls.ONE_MINUTE, loopingcalls.ONE_MINUTE), mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR)]) def test_delete_snapshots_marked_for_deletion(self): snapshots = [{ 'name': fake.SNAPSHOT_NAME, 'volume_name': fake.VOLUME['name'] }] mock_get_snapshots_marked = self.mock_object( self.zapi_client, 'get_snapshots_marked_for_deletion') mock_get_snapshots_marked.return_value = snapshots mock_delete_snapshot = self.mock_object( self.zapi_client, 'delete_snapshot') self.library._delete_snapshots_marked_for_deletion() mock_get_snapshots_marked.assert_called_once_with() mock_delete_snapshot.assert_called_once_with( fake.VOLUME['name'], fake.SNAPSHOT_NAME) def test_delete_lun_from_table(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) self.library.lun_table = {fake_lun.name: fake_lun} self.library._delete_lun_from_table(fake_lun.name) self.assertEqual({}, self.library.lun_table) def test_delete_lun_from_table_not_found(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) self.library.lun_table = {fake_lun.name: fake_lun} self.library._delete_lun_from_table('another-fake-lun') self.assertEqual({fake_lun.name: fake_lun}, self.library.lun_table) def test_add_looping_tasks_traditional_platform(self): """Test _add_looping_tasks with AFF platform""" mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') self.library.configuration.netapp_disaggregated_platform = False mock_call_snap_cleanup = self.mock_object( self.library, '_delete_snapshots_marked_for_deletion') mock_call_ems_logging = self.mock_object( self.library, '_handle_ems_logging') self.library._add_looping_tasks() # Traditional platform should include snapshot cleanup task mock_add_task.assert_has_calls([ mock.call(mock_call_snap_cleanup, loopingcalls.ONE_MINUTE, loopingcalls.ONE_MINUTE), mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR)]) def test_add_looping_tasks_disaggregated_platform(self): """Test _add_looping_tasks with disaggregated platform""" mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') self.library.configuration.netapp_disaggregated_platform = True mock_call_snap_cleanup = self.mock_object( self.library, '_delete_snapshots_marked_for_deletion') mock_call_ems_logging = self.mock_object( self.library, '_handle_ems_logging') self.library._add_looping_tasks() # Disaggregated platform should NOT include snapshot cleanup task mock_add_task.assert_has_calls([ mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR)]) # Verify snapshot cleanup is not called mock_call_snap_cleanup.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py0000664000175000017500000024671300000000000030572 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Mock unit tests for the NetApp block storage C-mode library.""" from unittest import mock import ddt from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as\ fake_utils import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap import block_cmode from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities from cinder.volume.drivers.netapp.dataontap.utils import data_motion from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils @ddt.ddt class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase): """Test case for NetApp's C-Mode iSCSI library.""" def setUp(self): super(NetAppBlockStorageCmodeLibraryTestCase, self).setUp() kwargs = { 'configuration': self.get_config_cmode(), 'host': 'openstack@cdotblock', } self.library = block_cmode.NetAppBlockStorageCmodeLibrary( 'driver', 'protocol', **kwargs) self.library.zapi_client = mock.Mock() self.zapi_client = self.library.zapi_client self.library.perf_library = mock.Mock() self.library.ssc_library = mock.Mock() self.library.vserver = mock.Mock() self.fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_NAME, fake.SIZE, None) self.fake_snapshot_lun = block_base.NetAppLun( fake.SNAPSHOT_LUN_HANDLE, fake.SNAPSHOT_NAME, fake.SIZE, None) self.mock_object(self.library, 'lun_table') self.library.lun_table = { fake.LUN_NAME: self.fake_lun, fake.SNAPSHOT_NAME: self.fake_snapshot_lun, } self.mock_object(block_base.NetAppBlockStorageLibrary, 'delete_volume') def get_config_cmode(self): config = na_fakes.create_configuration_cmode() config.netapp_storage_protocol = 'iscsi' config.netapp_login = 'admin' config.netapp_password = 'pass' config.netapp_server_hostname = '127.0.0.1' config.netapp_transport_type = 'https' config.netapp_server_port = '443' config.netapp_vserver = 'openstack' config.netapp_api_trace_pattern = 'fake_regex' return config @ddt.data(fake.AFF_SYSTEM_NODES_INFO, fake.FAS_SYSTEM_NODES_INFO, fake.HYBRID_SYSTEM_NODES_INFO) @mock.patch.object(client_base.Client, 'get_ontap_version', return_value='9.6') @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock()) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.MagicMock(return_value=(1, 20))) @mock.patch.object(capabilities.CapabilitiesLibrary, 'cluster_user_supported') @mock.patch.object(capabilities.CapabilitiesLibrary, 'check_api_permissions') @mock.patch.object(na_utils, 'check_flags') @mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup') def test_do_setup(self, cluster_nodes_info, super_do_setup, mock_check_flags, mock_check_api_permissions, mock_cluster_user_supported, mock_get_ontap_version): self.mock_object(client_base.Client, '_init_ssh_client') mock_get_cluster_nodes_info = self.mock_object( client_cmode.Client, '_get_cluster_nodes_info', return_value=cluster_nodes_info) self.mock_object( dot_utils, 'get_backend_configuration', return_value=self.get_config_cmode()) context = mock.Mock() self.library.do_setup(context) super_do_setup.assert_called_once_with(context) self.assertEqual(1, mock_check_flags.call_count) mock_check_api_permissions.assert_called_once_with() mock_cluster_user_supported.assert_called_once_with() mock_get_ontap_version.assert_called_once_with(cached=False) mock_get_cluster_nodes_info.assert_called_once_with() @ddt.data(fake.AFF_SYSTEM_NODES_INFO, fake.FAS_SYSTEM_NODES_INFO, fake.HYBRID_SYSTEM_NODES_INFO) @mock.patch.object(client_base.Client, 'get_ontap_version', return_value='9.6') @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock()) @mock.patch.object(client_base.Client, 'get_ontapi_version', mock.MagicMock(return_value=(1, 20))) @mock.patch.object(capabilities.CapabilitiesLibrary, 'cluster_user_supported') @mock.patch.object(capabilities.CapabilitiesLibrary, 'check_api_permissions') @mock.patch.object(na_utils, 'check_flags') @mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup') def test_do_setup_with_replication(self, cluster_nodes_info, super_do_setup, mock_check_flags, mock_check_api_permissions, mock_cluster_user_supported, mock_get_ontap_version): """Tests setup method when replication is enabled""" self.mock_object(client_base.Client, '_init_ssh_client') mock_get_cluster_nodes_info = self.mock_object( client_cmode.Client, '_get_cluster_nodes_info', return_value=cluster_nodes_info) self.mock_object( dot_utils, 'get_backend_configuration', return_value=self.get_config_cmode()) context = mock.Mock() self.replication_enabled = True self.replication_policy = "AutomatedFailOver" self.replication_backends = ['target_1', 'target_2'] self.mock_object(self.library, 'get_replication_backend_names', return_value=self.replication_backends) self.mock_object(self.library, 'get_replication_policy', return_value=self.replication_policy) self.library.do_setup(context) super_do_setup.assert_called_once_with(context) self.assertEqual(1, mock_check_flags.call_count) mock_check_api_permissions.assert_called_once_with() mock_cluster_user_supported.assert_called_once_with() mock_get_ontap_version.assert_called_once_with(cached=False) mock_get_cluster_nodes_info.assert_called_once_with() def test_check_for_setup_error(self): super_check_for_setup_error = self.mock_object( block_base.NetAppBlockStorageLibrary, 'check_for_setup_error') mock_get_pool_map = self.mock_object( self.library, '_get_flexvol_to_pool_map', return_value={'fake_map': None}) mock_add_looping_tasks = self.mock_object( self.library, '_add_looping_tasks') self.library.check_for_setup_error() self.assertEqual(1, super_check_for_setup_error.call_count) self.assertEqual(1, mock_add_looping_tasks.call_count) mock_get_pool_map.assert_called_once_with() mock_add_looping_tasks.assert_called_once_with() def test_check_for_setup_error_no_filtered_pools(self): self.mock_object(block_base.NetAppBlockStorageLibrary, 'check_for_setup_error') self.mock_object(self.library, '_add_looping_tasks') self.mock_object( self.library, '_get_flexvol_to_pool_map', return_value={}) self.assertRaises(na_utils.NetAppDriverException, self.library.check_for_setup_error) @ddt.data({'replication_enabled': True, 'failed_over': False, 'cluster_credentials': True}, {'replication_enabled': True, 'failed_over': True, 'cluster_credentials': True}, {'replication_enabled': False, 'failed_over': False, 'cluster_credentials': False}) @ddt.unpack def test_handle_housekeeping_tasks( self, replication_enabled, failed_over, cluster_credentials): self.library.using_cluster_credentials = cluster_credentials ensure_mirrors = self.mock_object(data_motion.DataMotionMixin, 'ensure_snapmirrors') self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', return_value=fake_utils.SSC.keys()) mock_remove_unused_qos_policy_groups = self.mock_object( self.zapi_client, 'remove_unused_qos_policy_groups') self.library.replication_enabled = replication_enabled self.library.failed_over = failed_over self.library._handle_housekeeping_tasks() if self.library.using_cluster_credentials: mock_remove_unused_qos_policy_groups.assert_called_once_with() else: mock_remove_unused_qos_policy_groups.assert_not_called() if replication_enabled and not failed_over: ensure_mirrors.assert_called_once_with( self.library.configuration, self.library.backend_name, fake_utils.SSC.keys()) else: self.assertFalse(ensure_mirrors.called) def test_handle_ems_logging(self): volume_list = ['vol0', 'vol1', 'vol2'] self.mock_object( self.library.ssc_library, 'get_ssc_flexvol_names', return_value=volume_list) self.mock_object( dot_utils, 'build_ems_log_message_0', return_value='fake_base_ems_log_message') self.mock_object( dot_utils, 'build_ems_log_message_1', return_value='fake_pool_ems_log_message') mock_send_ems_log_message = self.mock_object( self.zapi_client, 'send_ems_log_message') self.library._handle_ems_logging() mock_send_ems_log_message.assert_has_calls([ mock.call('fake_base_ems_log_message'), mock.call('fake_pool_ems_log_message'), ]) dot_utils.build_ems_log_message_0.assert_called_once_with( self.library.driver_name, self.library.app_version) dot_utils.build_ems_log_message_1.assert_called_once_with( self.library.driver_name, self.library.app_version, self.library.vserver, volume_list, []) def test_find_mapped_lun_igroup(self): igroups = [fake.IGROUP1, fake.CUSTOM_IGROUP] self.zapi_client.get_igroup_by_initiators.return_value = igroups lun_maps = [ {'initiator-group': fake.IGROUP1_NAME, 'lun-id': '1', 'vserver': fake.VSERVER_NAME}, {'initiator-group': fake.CUSTOM_IGROUP['initiator-group-name'], 'lun-id': '2', 'vserver': fake.VSERVER_NAME} ] self.zapi_client.get_lun_map.return_value = lun_maps (igroup, lun_id) = self.library._find_mapped_lun_igroup( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) self.assertEqual(fake.IGROUP1_NAME, igroup) self.assertEqual('1', lun_id) def test_find_mapped_lun_igroup_initiator_mismatch(self): self.zapi_client.get_igroup_by_initiators.return_value = [] lun_maps = [{'initiator-group': fake.IGROUP1_NAME, 'lun-id': '1', 'vserver': fake.VSERVER_NAME}] self.zapi_client.get_lun_map.return_value = lun_maps (igroup, lun_id) = self.library._find_mapped_lun_igroup( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) self.assertIsNone(igroup) self.assertIsNone(lun_id) def test_find_mapped_lun_igroup_name_mismatch(self): igroups = [{'initiator-group-os-type': 'linux', 'initiator-group-type': 'fcp', 'initiator-group-name': 'igroup2'}] self.zapi_client.get_igroup_by_initiators.return_value = igroups lun_maps = [{'initiator-group': fake.IGROUP1_NAME, 'lun-id': '1', 'vserver': fake.VSERVER_NAME}] self.zapi_client.get_lun_map.return_value = lun_maps (igroup, lun_id) = self.library._find_mapped_lun_igroup( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) self.assertIsNone(igroup) self.assertIsNone(lun_id) def test_find_mapped_lun_igroup_no_igroup_prefix(self): igroups = [fake.CUSTOM_IGROUP] expected_igroup = fake.CUSTOM_IGROUP['initiator-group-name'] self.zapi_client.get_igroup_by_initiators.return_value = igroups lun_maps = [{'initiator-group': expected_igroup, 'lun-id': '1', 'vserver': fake.VSERVER_NAME}] self.zapi_client.get_lun_map.return_value = lun_maps (igroup, lun_id) = self.library._find_mapped_lun_igroup( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) self.assertEqual(expected_igroup, igroup) self.assertEqual('1', lun_id) def test_clone_lun_zero_block_count(self): """Test for when clone lun is not passed a block count.""" self.library._get_lun_attr = mock.Mock(return_value={'Volume': 'fakeLUN'}) self.library.zapi_client = mock.Mock() lun = fake.FAKE_LUN_GET_ITER_RESULT self.library.zapi_client.get_lun_by_args.return_value = lun self.library._add_lun_to_table = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false') self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0, qos_policy_group_name=None, qos_policy_group_is_adaptive=False, source_snapshot=None, is_snapshot=False) def test_clone_lun_blocks(self): """Test for when clone lun is passed block information.""" block_count = 10 src_block = 10 dest_block = 30 self.library._get_lun_attr = mock.Mock(return_value={'Volume': 'fakeLUN'}) self.library.zapi_client = mock.Mock() lun = fake.FAKE_LUN_GET_ITER_RESULT self.library.zapi_client.get_lun_by_args.return_value = lun self.library._add_lun_to_table = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false', block_count=block_count, src_block=src_block, dest_block=dest_block) self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=block_count, dest_block=dest_block, src_block=src_block, qos_policy_group_name=None, qos_policy_group_is_adaptive=False, source_snapshot=None, is_snapshot=False) def test_clone_lun_no_space_reservation(self): """Test for when space_reservation is not passed.""" self.library._get_lun_attr = mock.Mock(return_value={'Volume': 'fakeLUN'}) self.library.zapi_client = mock.Mock() self.library.lun_space_reservation = 'false' lun = fake.FAKE_LUN_GET_ITER_RESULT self.library.zapi_client.get_lun_by_args.return_value = lun self.library._add_lun_to_table = mock.Mock() self.library._clone_lun('fakeLUN', 'newFakeLUN', is_snapshot=True) self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0, qos_policy_group_name=None, qos_policy_group_is_adaptive=False, source_snapshot=None, is_snapshot=True) def test_clone_lun_busy_exception(self): """Test for when clone lun is throwing device busy error.""" self.library._get_lun_attr = mock.Mock( return_value={'Volume': 'fakeLUN'}) self.library.zapi_client = mock.Mock() lun = fake.FAKE_LUN_GET_ITER_RESULT self.library.zapi_client.get_lun_by_args.return_value = lun self.library._add_lun_to_table = mock.Mock() msg = 'Device busy' self.mock_object(self.library.zapi_client, 'clone_lun', mock.Mock(side_effect=netapp_api.NaApiError( message=msg))) self.mock_object(self.library, '_retry_clone_lun', mock.Mock(return_value=None) ) self.library._clone_lun('fakeLUN', 'newFakeLUN', is_snapshot=True) self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'true', block_count=0, dest_block=0, src_block=0, qos_policy_group_name=None, qos_policy_group_is_adaptive=False, source_snapshot=None, is_snapshot=True) def test__retry_clone_lun_success(self): self.library.zapi_client = mock.Mock() self.library._retry_clone_lun('fakeSourceLUN', 'fakeLUN', 'newFakeLUN', 'false', ) self.library.zapi_client.clone_lun.assert_called_once_with( 'fakeSourceLUN', 'fakeLUN', 'newFakeLUN', 'false', block_count=0, dest_block=0, src_block=0, qos_policy_group_name=None, qos_policy_group_is_adaptive=False, source_snapshot=None, is_snapshot=False) def test_retry_clone_lun_failure(self): self.library.zapi_client = mock.Mock() self.mock_object(self.library.zapi_client, 'clone_lun', mock.Mock( side_effect=na_utils.NetAppDriverException), ) self.assertRaises( na_utils.NetAppDriverException, self.library._retry_clone_lun, 'fakeLUN', 'fakeLUN', 'newFakeLUN', 'false', ) def test_get_fc_target_wwpns(self): ports = [fake.FC_FORMATTED_TARGET_WWPNS[0], fake.FC_FORMATTED_TARGET_WWPNS[1]] self.zapi_client.get_fc_target_wwpns.return_value = ports result = self.library._get_fc_target_wwpns() self.assertSetEqual(set(ports), set(result)) def test_create_lun(self): self.library._create_lun( fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) self.library.zapi_client.create_lun.assert_called_once_with( fake.VOLUME_ID, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA, None, False) @ddt.data({'replication_backends': [], 'cluster_credentials': False, 'report_provisioned_capacity': False}, {'replication_backends': ['target_1', 'target_2'], 'cluster_credentials': True, 'report_provisioned_capacity': True}) @ddt.unpack def test_get_pool_stats(self, replication_backends, cluster_credentials, report_provisioned_capacity): self.library.using_cluster_credentials = cluster_credentials conf = self.library.configuration conf.netapp_driver_reports_provisioned_capacity = ( report_provisioned_capacity) ssc = { 'vola': { 'pool_name': 'vola', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', 'netapp_is_flexgroup': 'false', }, } mock_get_ssc = self.mock_object(self.library.ssc_library, 'get_ssc', return_value=ssc) mock_get_aggrs = self.mock_object(self.library.ssc_library, 'get_ssc_aggregates', return_value=['aggr1']) self.mock_object(self.library, 'get_replication_backend_names', return_value=replication_backends) self.library.reserved_percentage = 5 self.library.max_over_subscription_ratio = 10 self.library.perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=30.0)) mock_capacities = { 'size-total': 10737418240.0, 'size-available': 2147483648.0, } luns_provisioned_cap = [{ 'path': '/vol/volume-ae947c9b-2392-4956-b373-aaac4521f37e', 'size': 5368709120.0 # 5GB }, { 'path': '/vol/snapshot-527eedad-a431-483d-b0ca-18995dd65b66', 'size': 1073741824.0 # 1GB }] self.mock_object(self.zapi_client, 'get_flexvol_capacity', return_value=mock_capacities) self.mock_object(self.zapi_client, 'get_lun_sizes_by_volume', return_value=luns_provisioned_cap) self.mock_object(self.zapi_client, 'get_flexvol_dedupe_used_percent', return_value=55.0) aggr_capacities = { 'aggr1': { 'percent-used': 45, 'size-available': 59055800320.0, 'size-total': 107374182400.0, }, } mock_get_aggr_capacities = self.mock_object( self.zapi_client, 'get_aggregate_capacities', return_value=aggr_capacities) result = self.library._get_pool_stats(filter_function='filter', goodness_function='goodness') expected = [{ 'pool_name': 'vola', 'QoS_support': True, 'consistencygroup_support': True, 'consistent_group_snapshot_enabled': True, 'reserved_percentage': 5, 'max_over_subscription_ratio': 10.0, 'multiattach': True, 'total_capacity_gb': 10.0, 'free_capacity_gb': 2.0, 'netapp_dedupe_used_percent': 55.0, 'netapp_aggregate_used_percent': 45, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', 'replication_enabled': False, 'online_extend_support': True, 'netapp_is_flexgroup': 'false', 'total_volumes': 2, }] if report_provisioned_capacity: expected[0].update({'provisioned_capacity_gb': 5.0}) expected[0].update({'QoS_support': cluster_credentials}) if not cluster_credentials: expected[0].update({ 'netapp_aggregate_used_percent': 0, 'netapp_dedupe_used_percent': 0 }) if replication_backends: expected[0].update({ 'replication_enabled': True, 'replication_count': len(replication_backends), 'replication_targets': replication_backends, 'replication_type': 'async', }) self.assertEqual(expected, result) mock_get_ssc.assert_called_once_with() if cluster_credentials: mock_get_aggrs.assert_called_once_with() mock_get_aggr_capacities.assert_called_once_with(['aggr1']) @ddt.data({}, None) def test_get_pool_stats_no_ssc_vols(self, ssc): mock_get_ssc = self.mock_object(self.library.ssc_library, 'get_ssc', return_value=ssc) pools = self.library._get_pool_stats() self.assertListEqual([], pools) mock_get_ssc.assert_called_once_with() @ddt.data(r'open+|demix+', 'open.+', r'.+\d', '^((?!mix+).)*$', 'open123, open321') def test_get_pool_map_match_selected_pools(self, patterns): self.library.configuration.netapp_pool_name_search_pattern = patterns mock_list_flexvols = self.mock_object( self.zapi_client, 'list_flexvols', return_value=fake.FAKE_CMODE_VOLUMES) result = self.library._get_flexvol_to_pool_map() expected = { 'open123': { 'pool_name': 'open123', }, 'open321': { 'pool_name': 'open321', }, } self.assertEqual(expected, result) mock_list_flexvols.assert_called_once_with() @ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed, open321', '.*?') def test_get_pool_map_match_all_pools(self, patterns): self.library.configuration.netapp_pool_name_search_pattern = patterns mock_list_flexvols = self.mock_object( self.zapi_client, 'list_flexvols', return_value=fake.FAKE_CMODE_VOLUMES) result = self.library._get_flexvol_to_pool_map() self.assertEqual(fake.FAKE_CMODE_POOL_MAP, result) mock_list_flexvols.assert_called_once_with() def test_get_pool_map_invalid_conf(self): """Verify an exception is raised if the regex pattern is invalid""" self.library.configuration.netapp_pool_name_search_pattern = '(.+' self.assertRaises(exception.InvalidConfigurationValue, self.library._get_flexvol_to_pool_map) @ddt.data('abc|stackopen|openstack|abc*', 'abc', 'stackopen', 'openstack', 'abc*', '^$') def test_get_pool_map_non_matching_patterns(self, patterns): self.library.configuration.netapp_pool_name_search_pattern = patterns mock_list_flexvols = self.mock_object( self.zapi_client, 'list_flexvols', return_value=fake.FAKE_CMODE_VOLUMES) result = self.library._get_flexvol_to_pool_map() self.assertEqual({}, result) mock_list_flexvols.assert_called_once_with() def test_update_ssc_disaggregated_platform(self): """Test _update_ssc with disaggregated platform (ASA r2).""" self.library.configuration.netapp_disaggregated_platform = True mock_get_cluster_pool_map = self.mock_object( self.library, '_get_cluster_to_pool_map', return_value=fake.FAKE_CLUSTER_INFO) result = self.library._update_ssc() self.assertIsNone(result) mock_get_cluster_pool_map.assert_called_once_with() self.library.ssc_library.update_ssc_asa.assert_called_once_with( fake.FAKE_CLUSTER_INFO) def test_update_ssc(self): """Test _update_ssc with traditional platform (flexvol).""" mock_get_pool_map = self.mock_object( self.library, '_get_flexvol_to_pool_map', return_value=fake.FAKE_CMODE_VOLUMES) result = self.library._update_ssc() self.assertIsNone(result) mock_get_pool_map.assert_called_once_with() self.library.ssc_library.update_ssc.assert_called_once_with( fake.FAKE_CMODE_VOLUMES) def test_delete_volume(self): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.library.delete_volume(fake.VOLUME) (block_base.NetAppBlockStorageLibrary.delete_volume. assert_called_once_with(fake.VOLUME)) na_utils.get_valid_qos_policy_group_info.assert_called_once_with( fake.VOLUME) (self.library._mark_qos_policy_group_for_deletion. assert_called_once_with(fake.QOS_POLICY_GROUP_INFO)) def test_delete_volume_get_valid_qos_policy_group_info_exception(self): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', side_effect=exception.Invalid) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.library.delete_volume(fake.VOLUME) (block_base.NetAppBlockStorageLibrary.delete_volume. assert_called_once_with(fake.VOLUME)) (self.library._mark_qos_policy_group_for_deletion. assert_called_once_with(None)) def test_setup_qos_for_volume(self): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.zapi_client, 'provision_qos_policy_group') mock_is_qos_min_supported = self.mock_object(self.library.ssc_library, 'is_qos_min_supported', return_value=True) mock_extract_host = self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) result = self.library._setup_qos_for_volume(fake.VOLUME, fake.EXTRA_SPECS) self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result) self.zapi_client.provision_qos_policy_group.\ assert_called_once_with(fake.QOS_POLICY_GROUP_INFO, True) mock_is_qos_min_supported.assert_called_once_with(fake.POOL_NAME) mock_extract_host.assert_called_once_with(fake.VOLUME['host'], level='pool') def test_setup_qos_for_volume_exception_path(self): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', side_effect=exception.Invalid) self.mock_object(self.zapi_client, 'provision_qos_policy_group') self.assertRaises(exception.VolumeBackendAPIException, self.library._setup_qos_for_volume, fake.VOLUME, fake.EXTRA_SPECS) self.assertEqual(0, self.zapi_client. provision_qos_policy_group.call_count) @ddt.data(True, False) def test_mark_qos_policy_group_for_deletion(self, is_adaptive): self.mock_object(self.zapi_client, 'mark_qos_policy_group_for_deletion') self.mock_object(na_utils, 'is_qos_policy_group_spec_adaptive', return_value=is_adaptive) self.library._mark_qos_policy_group_for_deletion( fake.QOS_POLICY_GROUP_INFO) self.zapi_client.mark_qos_policy_group_for_deletion\ .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO, is_adaptive) def test_unmanage(self): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.mock_object(block_base.NetAppBlockStorageLibrary, 'unmanage') self.library.unmanage(fake.VOLUME) na_utils.get_valid_qos_policy_group_info.assert_called_once_with( fake.VOLUME) self.library._mark_qos_policy_group_for_deletion\ .assert_called_once_with(fake.QOS_POLICY_GROUP_INFO) block_base.NetAppBlockStorageLibrary.unmanage.assert_called_once_with( fake.VOLUME) def test_unmanage_w_invalid_qos_policy(self): self.mock_object(na_utils, 'get_valid_qos_policy_group_info', side_effect=exception.Invalid) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.mock_object(block_base.NetAppBlockStorageLibrary, 'unmanage') self.library.unmanage(fake.VOLUME) na_utils.get_valid_qos_policy_group_info.assert_called_once_with( fake.VOLUME) self.library._mark_qos_policy_group_for_deletion\ .assert_called_once_with(None) block_base.NetAppBlockStorageLibrary.unmanage.assert_called_once_with( fake.VOLUME) @ddt.data(True, False) def test_manage_existing_lun_same_name(self, is_adaptive): mock_lun = block_base.NetAppLun('handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'}) self.library._get_existing_vol_with_manage_ref = mock.Mock( return_value=mock_lun) self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(na_utils, 'log_extra_spec_warnings') self.library._check_volume_type_for_lun = mock.Mock() self.library._setup_qos_for_volume = mock.Mock() self.mock_object(na_utils, 'get_qos_policy_group_name_from_info', return_value=fake.QOS_POLICY_GROUP_NAME) self.mock_object(na_utils, 'is_qos_policy_group_spec_adaptive', return_value=is_adaptive) self.library._add_lun_to_table = mock.Mock() self.zapi_client.move_lun = mock.Mock() mock_set_lun_qos_policy_group = self.mock_object( self.zapi_client, 'set_lun_qos_policy_group') self.library.manage_existing({'name': 'name'}, {'ref': 'ref'}) self.library._get_existing_vol_with_manage_ref.assert_called_once_with( {'ref': 'ref'}) self.assertEqual(1, self.library._check_volume_type_for_lun.call_count) self.assertEqual(1, self.library._add_lun_to_table.call_count) self.assertEqual(0, self.zapi_client.move_lun.call_count) self.assertEqual(1, mock_set_lun_qos_policy_group.call_count) def test_manage_existing_lun_new_path(self): mock_lun = block_base.NetAppLun( 'handle', 'name', '1', {'Path': '/vol/FAKE_CMODE_VOL1/name'}) self.library._get_existing_vol_with_manage_ref = mock.Mock( return_value=mock_lun) self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(na_utils, 'log_extra_spec_warnings') self.library._check_volume_type_for_lun = mock.Mock() self.library._setup_qos_for_volume = mock.Mock() self.mock_object(na_utils, 'get_qos_policy_group_name_from_info', return_value=None) self.mock_object(na_utils, 'is_qos_policy_group_spec_adaptive', return_value=False) self.library._add_lun_to_table = mock.Mock() self.zapi_client.move_lun = mock.Mock() self.library.manage_existing({'name': 'volume'}, {'ref': 'ref'}) self.assertEqual( 2, self.library._get_existing_vol_with_manage_ref.call_count) self.assertEqual(1, self.library._check_volume_type_for_lun.call_count) self.assertEqual(1, self.library._add_lun_to_table.call_count) self.zapi_client.move_lun.assert_called_once_with( '/vol/FAKE_CMODE_VOL1/name', '/vol/FAKE_CMODE_VOL1/volume') @ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']}, {'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']}, {'secondary_id': 'dev1', 'configured_targets': []}, {'secondary_id': None, 'configured_targets': []}) @ddt.unpack def test_failover_host_invalid_replication_target(self, secondary_id, configured_targets): """This tests executes a method in the DataMotionMixin.""" self.library.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=configured_targets) complete_failover_call = self.mock_object( data_motion.DataMotionMixin, '_complete_failover') self.assertRaises(exception.InvalidReplicationTarget, self.library.failover_host, 'fake_context', [], secondary_id=secondary_id) self.assertFalse(complete_failover_call.called) def test_failover_host_unable_to_failover(self): """This tests executes a method in the DataMotionMixin.""" self.library.backend_name = 'dev0' self.mock_object( data_motion.DataMotionMixin, '_complete_failover', side_effect=na_utils.NetAppDriverException) self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=['dev1', 'dev2']) self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', return_value=fake_utils.SSC.keys()) self.mock_object(self.library, '_update_zapi_client') self.assertRaises(exception.UnableToFailOver, self.library.failover_host, 'fake_context', [], secondary_id='dev1') data_motion.DataMotionMixin._complete_failover.assert_called_once_with( 'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [], failover_target='dev1') self.assertFalse(self.library._update_zapi_client.called) def test_failover_host(self): """This tests executes a method in the DataMotionMixin.""" self.library.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, '_complete_failover', return_value=('dev1', [])) self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=['dev1', 'dev2']) self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', return_value=fake_utils.SSC.keys()) self.mock_object(self.library, '_update_zapi_client') actual_active, vol_updates, __ = self.library.failover_host( 'fake_context', [], secondary_id='dev1', groups=[]) data_motion.DataMotionMixin._complete_failover.assert_called_once_with( 'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [], failover_target='dev1') self.library._update_zapi_client.assert_called_once_with('dev1') self.assertTrue(self.library.failed_over) self.assertEqual('dev1', self.library.failed_over_backend_name) self.assertEqual('dev1', actual_active) self.assertEqual([], vol_updates) @ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']}, {'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']}, {'secondary_id': 'dev1', 'configured_targets': []}, {'secondary_id': None, 'configured_targets': []}) @ddt.unpack def test_failover_invalid_replication_target(self, secondary_id, configured_targets): """This tests executes a method in the DataMotionMixin.""" self.library.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=configured_targets) complete_failover_call = self.mock_object( data_motion.DataMotionMixin, '_complete_failover') self.assertRaises(exception.InvalidReplicationTarget, self.library.failover, 'fake_context', [], secondary_id=secondary_id) self.assertFalse(complete_failover_call.called) def test_failover_unable_to_failover(self): """This tests executes a method in the DataMotionMixin.""" self.library.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, '_complete_failover', side_effect=na_utils.NetAppDriverException) self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=['dev1', 'dev2']) self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', return_value=fake_utils.SSC.keys()) self.mock_object(self.library, '_update_zapi_client') self.assertRaises(exception.UnableToFailOver, self.library.failover, 'fake_context', [], secondary_id='dev1') data_motion.DataMotionMixin._complete_failover.assert_called_once_with( 'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [], failover_target='dev1') self.assertFalse(self.library._update_zapi_client.called) def test_failover(self): """This tests executes a method in the DataMotionMixin.""" self.library.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, '_complete_failover', return_value=('dev1', [])) self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=['dev1', 'dev2']) self.mock_object(self.library.ssc_library, 'get_ssc_flexvol_names', return_value=fake_utils.SSC.keys()) self.mock_object(self.library, '_update_zapi_client') actual_active, vol_updates, __ = self.library.failover( 'fake_context', [], secondary_id='dev1', groups=[]) data_motion.DataMotionMixin._complete_failover.assert_called_once_with( 'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [], failover_target='dev1') def test_failover_completed(self): self.mock_object(self.library, '_update_zapi_client') self.library.failover_completed('fake_context', secondary_id='dev1') self.library._update_zapi_client.assert_called_once_with('dev1') def test_add_looping_tasks(self): mock_update_ssc = self.mock_object(self.library, '_update_ssc') mock_handle_housekeeping = self.mock_object( self.library, '_handle_housekeeping_tasks') mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') mock_super_add_looping_tasks = self.mock_object( block_base.NetAppBlockStorageLibrary, '_add_looping_tasks') self.library._add_looping_tasks() mock_update_ssc.assert_called_once_with() mock_add_task.assert_has_calls([ mock.call(mock_update_ssc, loopingcalls.ONE_HOUR, loopingcalls.ONE_HOUR), mock.call(mock_handle_housekeeping, loopingcalls.TEN_MINUTES, 0)]) mock_super_add_looping_tasks.assert_called_once_with() def test_get_backing_flexvol_names(self): mock_ssc_library = self.mock_object( self.library.ssc_library, 'get_ssc') self.library._get_backing_flexvol_names() mock_ssc_library.assert_called_once_with() def test_create_group(self): model_update = self.library.create_group( fake.VOLUME_GROUP) self.assertEqual('available', model_update['status']) def test_delete_group_volume_delete_failure(self): self.mock_object(block_cmode, 'LOG') self.mock_object(self.library, '_delete_lun', side_effect=Exception) model_update, volumes = self.library.delete_group( fake.VOLUME_GROUP, [fake.VG_VOLUME]) self.assertEqual('deleted', model_update['status']) self.assertEqual('error_deleting', volumes[0]['status']) self.assertEqual(1, block_cmode.LOG.exception.call_count) def test_update_group(self): model_update, add_volumes_update, remove_volumes_update = ( self.library.update_group(fake.VOLUME_GROUP)) self.assertIsNone(model_update) self.assertIsNone(add_volumes_update) self.assertIsNone(remove_volumes_update) def test_delete_group_not_found(self): self.mock_object(block_cmode, 'LOG') self.mock_object(self.library, '_get_lun_attr', return_value=None) model_update, volumes = self.library.delete_group( fake.VOLUME_GROUP, [fake.VG_VOLUME]) self.assertEqual(0, block_cmode.LOG.error.call_count) self.assertEqual(0, block_cmode.LOG.info.call_count) self.assertEqual('deleted', model_update['status']) self.assertEqual('deleted', volumes[0]['status']) def test_create_group_snapshot_raise_exception(self): self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) mock_extract_host = self.mock_object( volume_utils, 'extract_host', return_value=fake.POOL_NAME) self.mock_object(self.zapi_client, 'create_cg_snapshot', side_effect=netapp_api.NaApiError) self.assertRaises(na_utils.NetAppDriverException, self.library.create_group_snapshot, fake.VOLUME_GROUP, [fake.VG_SNAPSHOT]) mock_extract_host.assert_called_once_with( fake.VG_SNAPSHOT['volume']['host'], level='pool') def test_create_group_snapshot(self): self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=False) fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) self.mock_object(self.library, '_get_lun_from_table', return_value=fake_lun) mock__clone_lun = self.mock_object(self.library, '_clone_lun') model_update, snapshots_model_update = ( self.library.create_group_snapshot(fake.VOLUME_GROUP, [fake.SNAPSHOT])) self.assertIsNone(model_update) self.assertIsNone(snapshots_model_update) mock__clone_lun.assert_called_once_with(fake_lun.name, fake.SNAPSHOT['name'], space_reserved='false', is_snapshot=True) def test_create_consistent_group_snapshot(self): self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) mock_create_cg_snapshot = self.mock_object( self.zapi_client, 'create_cg_snapshot') mock__clone_lun = self.mock_object(self.library, '_clone_lun') mock_wait_for_busy_snapshot = self.mock_object( self.zapi_client, 'wait_for_busy_snapshot') mock_delete_snapshot = self.mock_object( self.zapi_client, 'delete_snapshot') model_update, snapshots_model_update = ( self.library.create_group_snapshot(fake.VOLUME_GROUP, [fake.VG_SNAPSHOT])) self.assertIsNone(model_update) self.assertIsNone(snapshots_model_update) mock_create_cg_snapshot.assert_called_once_with( set([fake.POOL_NAME]), fake.VOLUME_GROUP['id']) mock__clone_lun.assert_called_once_with( fake.VG_SNAPSHOT['volume']['name'], fake.VG_SNAPSHOT['name'], source_snapshot=fake.VOLUME_GROUP['id']) mock_wait_for_busy_snapshot.assert_called_once_with( fake.POOL_NAME, fake.VOLUME_GROUP['id']) mock_delete_snapshot.assert_called_once_with( fake.POOL_NAME, fake.VOLUME_GROUP['id']) @ddt.data(None, {'replication_status': fields.ReplicationStatus.ENABLED}) def test_create_group_from_src_snapshot(self, volume_model_update): mock_clone_source_to_destination = self.mock_object( self.library, '_clone_source_to_destination', return_value=volume_model_update) actual_return_value = self.library.create_group_from_src( fake.VOLUME_GROUP, [fake.VOLUME], group_snapshot=fake.VG_SNAPSHOT, snapshots=[fake.VG_VOLUME_SNAPSHOT]) clone_source_to_destination_args = { 'name': fake.VG_SNAPSHOT['name'], 'size': fake.VG_SNAPSHOT['volume_size'], } mock_clone_source_to_destination.assert_called_once_with( clone_source_to_destination_args, fake.VOLUME) if volume_model_update: volume_model_update['id'] = fake.VOLUME['id'] expected_return_value = ((None, [volume_model_update]) if volume_model_update else (None, [])) self.assertEqual(expected_return_value, actual_return_value) @ddt.data(None, {'replication_status': fields.ReplicationStatus.ENABLED}) def test_create_group_from_src_group(self, volume_model_update): lun_name = fake.SOURCE_VG_VOLUME['name'] mock_lun = block_base.NetAppLun( lun_name, lun_name, '3', {'UUID': 'fake_uuid'}) self.mock_object(self.library, '_get_lun_from_table', return_value=mock_lun) mock_clone_source_to_destination = self.mock_object( self.library, '_clone_source_to_destination', return_value=volume_model_update) actual_return_value = self.library.create_group_from_src( fake.VOLUME_GROUP, [fake.VOLUME], source_group=fake.SOURCE_VOLUME_GROUP, source_vols=[fake.SOURCE_VG_VOLUME]) clone_source_to_destination_args = { 'name': fake.SOURCE_VG_VOLUME['name'], 'size': fake.SOURCE_VG_VOLUME['size'], } if volume_model_update: volume_model_update['id'] = fake.VOLUME['id'] expected_return_value = ((None, [volume_model_update]) if volume_model_update else (None, [])) mock_clone_source_to_destination.assert_called_once_with( clone_source_to_destination_args, fake.VOLUME) self.assertEqual(expected_return_value, actual_return_value) def test_delete_group_snapshot(self): mock__delete_lun = self.mock_object(self.library, '_delete_lun') model_update, snapshots_model_update = ( self.library.delete_group_snapshot(fake.VOLUME_GROUP, [fake.VG_SNAPSHOT])) self.assertIsNone(model_update) self.assertIsNone(snapshots_model_update) mock__delete_lun.assert_called_once_with(fake.VG_SNAPSHOT['name']) def test_move_lun(self): self.library.configuration.netapp_migrate_volume_timeout = 1 fake_job_status = {'job-status': 'complete'} mock_start_lun_move = self.mock_object(self.zapi_client, 'start_lun_move', return_value=fake.JOB_UUID) mock_get_lun_move_status = self.mock_object( self.zapi_client, 'get_lun_move_status', return_value=fake_job_status) ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) result = self.library._move_lun( fake_vol, fake.POOL_NAME, fake.DEST_POOL_NAME, dest_lun_name=fake.VOLUME_NAME) mock_start_lun_move.assert_called_with( fake_vol.name, fake.DEST_POOL_NAME, src_ontap_volume=fake.POOL_NAME, dest_lun_name=fake.VOLUME_NAME) mock_get_lun_move_status.assert_called_once_with(fake.JOB_UUID) self.assertIsNone(result) @ddt.data(('data', na_utils.NetAppDriverTimeout), ('destroyed', na_utils.NetAppDriverException)) @ddt.unpack @mock.patch('oslo_service.loopingcall.FixedIntervalWithTimeoutLoopingCall', new=test_utils.ZeroIntervalWithTimeoutLoopingCall) def test_move_lun_error(self, status_on_error, move_exception): self.library.configuration.netapp_migrate_volume_timeout = 1 fake_job_status = { 'job-status': status_on_error, 'last-failure-reason': None } mock_start_lun_move = self.mock_object(self.zapi_client, 'start_lun_move', return_value=fake.JOB_UUID) mock_get_lun_move_status = self.mock_object( self.zapi_client, 'get_lun_move_status', return_value=fake_job_status) ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises(move_exception, self.library._move_lun, fake_vol, fake.POOL_NAME, fake.DEST_POOL_NAME, dest_lun_name=fake.VOLUME_NAME) mock_start_lun_move.assert_called_with( fake_vol.name, fake.DEST_POOL_NAME, src_ontap_volume=fake.POOL_NAME, dest_lun_name=fake.VOLUME_NAME) mock_get_lun_move_status.assert_called_with(fake.JOB_UUID) def test_cancel_lun_copy(self): mock_cancel_lun_copy = self.mock_object(self.zapi_client, 'cancel_lun_copy') mock_get_client_for_backend = self.mock_object( dot_utils, 'get_client_for_backend', return_value=self.zapi_client) mock_destroy_lun = self.mock_object(self.zapi_client, 'destroy_lun') ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) result = self.library._cancel_lun_copy(fake.JOB_UUID, fake_vol, fake.DEST_POOL_NAME, fake.DEST_BACKEND_NAME) mock_cancel_lun_copy.assert_called_once_with(fake.JOB_UUID) mock_get_client_for_backend.assert_not_called() mock_destroy_lun.assert_not_called() self.assertIsNone(result) def test_cancel_lun_copy_force_destroy_lun(self): mock_cancel_lun_copy = self.mock_object( self.zapi_client, 'cancel_lun_copy', side_effect=na_utils.NetAppDriverException) mock_get_client_for_backend = self.mock_object( dot_utils, 'get_client_for_backend', return_value=self.zapi_client) mock_destroy_lun = self.mock_object(self.zapi_client, 'destroy_lun') ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) result = self.library._cancel_lun_copy(fake.JOB_UUID, fake_vol, fake.DEST_POOL_NAME, fake.DEST_BACKEND_NAME) mock_cancel_lun_copy.assert_called_once_with(fake.JOB_UUID) mock_get_client_for_backend.assert_called_once_with( fake.DEST_BACKEND_NAME) fake_lun_path = '/vol/%s/%s' % (fake.DEST_POOL_NAME, fake_vol.name) mock_destroy_lun.assert_called_once_with(fake_lun_path) self.assertIsNone(result) def test_cancel_lun_copy_error_on_force_destroy_lun(self): mock_cancel_lun_copy = self.mock_object( self.zapi_client, 'cancel_lun_copy', side_effect=na_utils.NetAppDriverException) mock_get_client_for_backend = self.mock_object( dot_utils, 'get_client_for_backend', return_value=self.zapi_client) mock_destroy_lun = self.mock_object( self.zapi_client, 'destroy_lun', side_effect=na_utils.NetAppDriverException) ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) result = self.library._cancel_lun_copy(fake.JOB_UUID, fake_vol, fake.DEST_POOL_NAME, fake.DEST_BACKEND_NAME) mock_cancel_lun_copy.assert_called_once_with(fake.JOB_UUID) mock_get_client_for_backend.assert_called_once_with( fake.DEST_BACKEND_NAME) fake_lun_path = '/vol/%s/%s' % (fake.DEST_POOL_NAME, fake_vol.name) mock_destroy_lun.assert_called_once_with(fake_lun_path) self.assertIsNone(result) def test_copy_lun(self): self.library.configuration.netapp_migrate_volume_timeout = 1 fake_job_status = {'job-status': 'complete'} mock_start_lun_copy = self.mock_object(self.zapi_client, 'start_lun_copy', return_value=fake.JOB_UUID) mock_get_lun_copy_status = self.mock_object( self.zapi_client, 'get_lun_copy_status', return_value=fake_job_status) mock_cancel_lun_copy = self.mock_object( self.library, '_cancel_lun_copy') ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) result = self.library._copy_lun( fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, dest_lun_name=fake.VOLUME_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_start_lun_copy.assert_called_with( fake_vol.name, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, src_ontap_volume=fake.POOL_NAME, src_vserver=fake.VSERVER_NAME, dest_lun_name=fake.VOLUME_NAME) mock_get_lun_copy_status.assert_called_once_with(fake.JOB_UUID) mock_cancel_lun_copy.assert_not_called() self.assertIsNone(result) @ddt.data(('data', na_utils.NetAppDriverTimeout), ('destroyed', na_utils.NetAppDriverException), ('destroyed', na_utils.NetAppDriverException)) @ddt.unpack @mock.patch('oslo_service.loopingcall.FixedIntervalWithTimeoutLoopingCall', new=test_utils.ZeroIntervalWithTimeoutLoopingCall) def test_copy_lun_error(self, status_on_error, copy_exception): self.library.configuration.netapp_migrate_volume_timeout = 1 fake_job_status = { 'job-status': status_on_error, 'last-failure-reason': None } mock_start_lun_copy = self.mock_object(self.zapi_client, 'start_lun_copy', return_value=fake.JOB_UUID) mock_get_lun_copy_status = self.mock_object( self.zapi_client, 'get_lun_copy_status', return_value=fake_job_status) mock_cancel_lun_copy = self.mock_object( self.library, '_cancel_lun_copy') ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises(copy_exception, self.library._copy_lun, fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, dest_lun_name=fake.VOLUME_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_start_lun_copy.assert_called_with( fake_vol.name, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, src_ontap_volume=fake.POOL_NAME, src_vserver=fake.VSERVER_NAME, dest_lun_name=fake.VOLUME_NAME) mock_get_lun_copy_status.assert_called_with(fake.JOB_UUID) mock_cancel_lun_copy.assert_called_once_with( fake.JOB_UUID, fake_vol, fake.DEST_POOL_NAME, dest_backend_name=fake.DEST_BACKEND_NAME) def test_migrate_volume_to_pool(self): mock_move_lun = self.mock_object(self.library, '_move_lun') mock_finish_migrate_volume_to_pool = self.mock_object( self.library, '_finish_migrate_volume_to_pool') ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) updates = self.library._migrate_volume_to_pool(fake_vol, fake.POOL_NAME, fake.DEST_POOL_NAME, fake.VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_move_lun.assert_called_once_with(fake_vol, fake.POOL_NAME, fake.DEST_POOL_NAME) mock_finish_migrate_volume_to_pool.assert_called_once_with( fake_vol, fake.DEST_POOL_NAME) self.assertEqual({}, updates) def test_migrate_volume_to_pool_lun_move_error(self): mock_move_lun = self.mock_object( self.library, '_move_lun', side_effect=na_utils.NetAppDriverException) mock_finish_migrate_volume_to_pool = self.mock_object( self.library, '_finish_migrate_volume_to_pool') ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises(na_utils.NetAppDriverException, self.library._migrate_volume_to_pool, fake_vol, fake.POOL_NAME, fake.DEST_POOL_NAME, fake.VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_move_lun.assert_called_once_with(fake_vol, fake.POOL_NAME, fake.DEST_POOL_NAME) mock_finish_migrate_volume_to_pool.assert_not_called() def test_migrate_volume_to_pool_lun_move_timeout(self): mock_move_lun = self.mock_object( self.library, '_move_lun', side_effect=na_utils.NetAppDriverTimeout) mock_finish_migrate_volume_to_pool = self.mock_object( self.library, '_finish_migrate_volume_to_pool') ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) updates = self.library._migrate_volume_to_pool(fake_vol, fake.POOL_NAME, fake.DEST_POOL_NAME, fake.VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_move_lun.assert_called_once_with(fake_vol, fake.POOL_NAME, fake.DEST_POOL_NAME) mock_finish_migrate_volume_to_pool.assert_called_once_with( fake_vol, fake.DEST_POOL_NAME) self.assertEqual({'status': fields.VolumeStatus.MAINTENANCE}, updates) def test_finish_migrate_volume_to_pool(self): ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) fake_lun_cache = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_NAME, fake.SIZE, None) mock_get_lun_from_table = self.mock_object(self.library, '_get_lun_from_table', return_value=fake_lun_cache) self.library._finish_migrate_volume_to_pool(fake_vol, fake.DEST_POOL_NAME) mock_get_lun_from_table.assert_called_once_with(fake_vol.name) expected = { 'Path': '/vol/%s/%s' % (fake.DEST_POOL_NAME, fake_vol.name), 'Volume': fake.DEST_POOL_NAME } self.assertEqual(expected, fake_lun_cache.metadata) def test_migrate_volume_to_vserver(self): self.library.using_cluster_credentials = True self.library.backend_name = fake.BACKEND_NAME mock_create_vserver_peer = self.mock_object( self.library, 'create_vserver_peer') mock_copy_lun = self.mock_object(self.library, '_copy_lun') mock_finish_migrate_volume_to_vserver = self.mock_object( self.library, '_finish_migrate_volume_to_vserver') ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) updates = self.library._migrate_volume_to_vserver( fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_create_vserver_peer.assert_called_once_with( fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME, ['lun_copy']) mock_copy_lun.assert_called_once_with( fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_finish_migrate_volume_to_vserver.assert_called_once_with(fake_vol) self.assertEqual({}, updates) @ddt.data(na_utils.NetAppDriverException, na_utils.NetAppDriverTimeout) def test_migrate_volume_to_vserver_error_on_copy(self, copy_error): self.library.using_cluster_credentials = True self.library.backend_name = fake.BACKEND_NAME self.library.backend_name = fake.BACKEND_NAME mock_create_vserver_peer = self.mock_object( self.library, 'create_vserver_peer') mock_copy_lun = self.mock_object( self.library, '_copy_lun', side_effect=copy_error) mock_finish_migrate_volume_to_vserver = self.mock_object( self.library, '_finish_migrate_volume_to_vserver') ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises(copy_error, self.library._migrate_volume_to_vserver, fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_create_vserver_peer.assert_called_once_with( fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME, ['lun_copy']) mock_copy_lun.assert_called_once_with( fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_finish_migrate_volume_to_vserver.assert_not_called() def test_migrate_volume_to_vserver_volume_is_not_available(self): self.library.using_cluster_credentials = True mock_create_vserver_peer = self.mock_object( self.library, 'create_vserver_peer') mock_copy_lun = self.mock_object(self.library, '_copy_lun') mock_finish_migrate_volume_to_vserver = self.mock_object( self.library, '_finish_migrate_volume_to_vserver') ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.IN_USE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises(exception.InvalidVolume, self.library._migrate_volume_to_vserver, fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_create_vserver_peer.assert_not_called() mock_copy_lun.assert_not_called() mock_finish_migrate_volume_to_vserver.assert_not_called() def test_migrate_volume_to_vserver_invalid_vserver_peer_applications(self): self.library.using_cluster_credentials = True self.library.backend_name = fake.VSERVER_NAME mock_create_vserver_peer = self.mock_object( self.library, 'create_vserver_peer', side_effect=na_utils.NetAppDriverException) mock_copy_lun = self.mock_object( self.library, '_copy_lun') mock_finish_migrate_volume_to_vserver = self.mock_object( self.library, '_finish_migrate_volume_to_vserver') ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises(na_utils.NetAppDriverException, self.library._migrate_volume_to_vserver, fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_create_vserver_peer.assert_called_once_with( fake.VSERVER_NAME, fake.VSERVER_NAME, fake.DEST_VSERVER_NAME, ['lun_copy']) mock_copy_lun.assert_not_called() mock_finish_migrate_volume_to_vserver.assert_not_called() def test_finish_migrate_volume_to_vserver(self): mock_delete_volume = self.mock_object(self.library, 'delete_volume') mock_delete_lun_from_table = self.mock_object( self.library, '_delete_lun_from_table') ctxt = mock.Mock() vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.library._finish_migrate_volume_to_vserver(fake_vol) mock_delete_volume.assert_called_once_with(fake_vol) mock_delete_lun_from_table.assert_called_once_with(fake_vol.name) def test_migrate_volume(self): ctx = mock.Mock() self.library.backend_name = fake.BACKEND_NAME self.library.configuration.netapp_vserver = fake.VSERVER_NAME mock_migrate_volume_ontap_assisted = self.mock_object( self.library, 'migrate_volume_ontap_assisted', return_value={}) vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctx, **vol_fields) result = self.library.migrate_volume(ctx, fake_vol, fake.DEST_HOST_STRING) mock_migrate_volume_ontap_assisted.assert_called_once_with( fake_vol, fake.DEST_HOST_STRING, fake.BACKEND_NAME, fake.VSERVER_NAME) self.assertEqual({}, result) def test_revert_to_snapshot(self): mock__revert_to_snapshot = self.mock_object(self.library, '_revert_to_snapshot') self.library.revert_to_snapshot(fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) mock__revert_to_snapshot.assert_called_once_with(fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) self.library.revert_to_snapshot(fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) def test_revert_to_snapshot_revert_failed(self): self.mock_object(self.library, '_revert_to_snapshot', side_effect=Exception) self.assertRaises(exception.VolumeBackendAPIException, self.library.revert_to_snapshot, fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) def test__revert_to_snapshot(self): lun_obj = block_base.NetAppLun(fake.LUN_WITH_METADATA['handle'], fake.LUN_WITH_METADATA['name'], fake.LUN_WITH_METADATA['size'], fake.LUN_WITH_METADATA['metadata']) lun_name = lun_obj.name new_lun_name = 'new-%s' % fake.SNAPSHOT['name'] flexvol_name = lun_obj.metadata['Volume'] mock__clone_snapshot = self.mock_object( self.library, '_clone_snapshot', return_value=new_lun_name) mock__get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=lun_obj) mock__swap_luns = self.mock_object(self.library, '_swap_luns') mock_destroy_lun = self.mock_object(self.library.zapi_client, 'destroy_lun') self.library._revert_to_snapshot(fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) mock__clone_snapshot.assert_called_once_with(fake.SNAPSHOT['name']) mock__get_lun_from_table.assert_called_once_with( fake.SNAPSHOT_VOLUME['name']) mock__swap_luns.assert_called_once_with(lun_name, new_lun_name, flexvol_name) mock_destroy_lun.assert_not_called() @ddt.data(False, True) def test__revert_to_snapshot_swap_exception(self, delete_lun_exception): lun_obj = block_base.NetAppLun(fake.LUN_WITH_METADATA['handle'], fake.LUN_WITH_METADATA['name'], fake.LUN_WITH_METADATA['size'], fake.LUN_WITH_METADATA['metadata']) new_lun_name = 'new-%s' % fake.SNAPSHOT['name'] flexvol_name = lun_obj.metadata['Volume'] new_lun_path = '/vol/%s/%s' % (flexvol_name, new_lun_name) side_effect = Exception if delete_lun_exception else lambda: True self.mock_object( self.library, '_clone_snapshot', return_value=new_lun_name) self.mock_object( self.library, '_get_lun_from_table', return_value=lun_obj) swap_exception = exception.VolumeBackendAPIException(data="data") self.mock_object(self.library, '_swap_luns', side_effect=swap_exception) mock_destroy_lun = self.mock_object(self.library.zapi_client, 'destroy_lun', side_effect=side_effect) self.assertRaises(exception.VolumeBackendAPIException, self.library._revert_to_snapshot, fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) mock_destroy_lun.assert_called_once_with(new_lun_path) def test__clone_snapshot(self): lun_obj = block_base.NetAppLun(fake.LUN_WITH_METADATA['handle'], fake.LUN_WITH_METADATA['name'], fake.LUN_WITH_METADATA['size'], fake.LUN_WITH_METADATA['metadata']) new_snap_name = 'new-%s' % fake.SNAPSHOT['name'] snapshot_path = lun_obj.metadata['Path'] block_count = 40960 mock__get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=lun_obj) mock__get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock__clone_lun = self.mock_object(self.library, '_clone_lun') self.library._clone_snapshot(fake.SNAPSHOT['name']) mock__get_lun_from_table.assert_called_once_with(fake.SNAPSHOT['name']) mock__get_lun_block_count.assert_called_once_with(snapshot_path) mock__clone_lun.assert_called_once_with(fake.SNAPSHOT['name'], new_snap_name, space_reserved='false', is_snapshot=True) def test__clone_snapshot_invalid_block_count(self): lun_obj = block_base.NetAppLun(fake.LUN_WITH_METADATA['handle'], fake.LUN_WITH_METADATA['name'], fake.LUN_WITH_METADATA['size'], fake.LUN_WITH_METADATA['metadata']) self.mock_object(self.library, '_get_lun_from_table', return_value=lun_obj) self.mock_object(self.library, '_get_lun_block_count', return_value=0) self.assertRaises(exception.VolumeBackendAPIException, self.library._clone_snapshot, fake.SNAPSHOT['name']) def test__clone_snapshot_clone_exception(self): lun_obj = block_base.NetAppLun(fake.LUN_WITH_METADATA['handle'], fake.LUN_WITH_METADATA['name'], fake.LUN_WITH_METADATA['size'], fake.LUN_WITH_METADATA['metadata']) new_snap_name = 'new-%s' % fake.SNAPSHOT['name'] snapshot_path = lun_obj.metadata['Path'] flexvol_name = lun_obj.metadata['Volume'] new_lun_path = '/vol/%s/%s' % (flexvol_name, new_snap_name) block_count = 40960 mock__get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=lun_obj) mock__get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) side_effect = exception.VolumeBackendAPIException(data='data') mock__clone_lun = self.mock_object(self.library, '_clone_lun', side_effect=side_effect) mock_destroy_lun = self.mock_object(self.library.zapi_client, 'destroy_lun') self.assertRaises(exception.VolumeBackendAPIException, self.library._clone_snapshot, fake.SNAPSHOT['name']) mock__get_lun_from_table.assert_called_once_with(fake.SNAPSHOT['name']) mock__get_lun_block_count.assert_called_once_with(snapshot_path) mock__clone_lun.assert_called_once_with(fake.SNAPSHOT['name'], new_snap_name, space_reserved='false', is_snapshot=True) mock_destroy_lun.assert_called_once_with(new_lun_path) def test__swap_luns(self): original_lun = fake.LUN_WITH_METADATA['name'] new_lun = 'new-%s' % fake.SNAPSHOT['name'] flexvol = fake.LUN_WITH_METADATA['metadata']['Volume'] tmp_lun = 'tmp-%s' % original_lun path = "/vol/%s/%s" % (flexvol, original_lun) # original path tmp_path = "/vol/%s/%s" % (flexvol, tmp_lun) new_path = "/vol/%s/%s" % (flexvol, new_lun) mock_move_lun = self.mock_object( self.library.zapi_client, 'move_lun', return_value=True) mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun', return_value=True) self.library._swap_luns(original_lun, new_lun, flexvol) mock_move_lun.assert_has_calls([ mock.call(path, tmp_path), mock.call(new_path, path) ]) mock_destroy_lun.assert_called_once_with(tmp_path) @ddt.data((True, False), (False, False), (False, True)) @ddt.unpack def test__swap_luns_move_exception(self, first_move_exception, move_back_exception): original_lun = fake.LUN_WITH_METADATA['name'] new_lun = 'new-%s' % fake.SNAPSHOT['name'] flexvol = fake.LUN_WITH_METADATA['metadata']['Volume'] side_effect = Exception def _side_effect_skip(): return True if not first_move_exception and not move_back_exception: side_effect = [_side_effect_skip, Exception, _side_effect_skip] elif not first_move_exception: side_effect = [_side_effect_skip, Exception, Exception] tmp_lun = 'tmp-%s' % original_lun path = "/vol/%s/%s" % (flexvol, original_lun) # original path tmp_path = "/vol/%s/%s" % (flexvol, tmp_lun) new_path = "/vol/%s/%s" % (flexvol, new_lun) mock_move_lun = self.mock_object(self.library.zapi_client, 'move_lun', side_effect=side_effect) self.assertRaises(exception.VolumeBackendAPIException, self.library._swap_luns, original_lun, new_lun, flexvol) if first_move_exception: mock_move_lun.assert_called_once_with(path, tmp_path) else: mock_move_lun.assert_has_calls([ mock.call(path, tmp_path), mock.call(new_path, path), mock.call(tmp_path, path) ]) def test__swap_luns_destroy_exception(self): original_lun = fake.LUN_WITH_METADATA['name'] new_lun = 'new-%s' % fake.SNAPSHOT['name'] flexvol = fake.LUN_WITH_METADATA['metadata']['Volume'] tmp_lun = 'tmp-%s' % original_lun path = "/vol/%s/%s" % (flexvol, original_lun) tmp_path = "/vol/%s/%s" % (flexvol, tmp_lun) new_path = "/vol/%s/%s" % (flexvol, new_lun) mock_move_lun = self.mock_object( self.library.zapi_client, 'move_lun', return_value=True) mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun', side_effect=Exception) self.library._swap_luns(original_lun, new_lun, flexvol) mock_move_lun.assert_has_calls([ mock.call(path, tmp_path), mock.call(new_path, path) ]) mock_destroy_lun.assert_called_once_with(tmp_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_driver_interfaces.py0000664000175000017500000000465100000000000033172 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Mock unit tests for the NetApp block storage driver interfaces""" from collections import abc from cinder.tests.unit import test from cinder.volume.drivers.netapp.dataontap import block_cmode from cinder.volume.drivers.netapp.dataontap import fc_cmode from cinder.volume.drivers.netapp.dataontap import iscsi_cmode class NetAppBlockStorageDriverInterfaceTestCase(test.TestCase): def setUp(self): super(NetAppBlockStorageDriverInterfaceTestCase, self).setUp() self.mock_object(block_cmode.NetAppBlockStorageCmodeLibrary, '__init__', return_value=None) self.iscsi_cmode_driver = iscsi_cmode.NetAppCmodeISCSIDriver() self.fc_cmode_driver = fc_cmode.NetAppCmodeFibreChannelDriver() def test_driver_interfaces_match(self): """Ensure the NetApp block storage driver interfaces match. The two block storage Cinder drivers from NetApp (iSCSI/FC) are merely passthrough shim layers atop a common block storage library. Bugs have been introduced when a Cinder method was exposed via a subset of those driver shims. This test ensures they remain in sync and the library features are uniformly available in the four drivers. """ # Get local functions of each driver interface iscsi_cmode = self._get_local_functions(self.iscsi_cmode_driver) fc_cmode = self._get_local_functions(self.fc_cmode_driver) # Ensure NetApp block storage driver shims are identical self.assertSetEqual(iscsi_cmode, fc_cmode) def _get_local_functions(self, obj): """Get function names of an object without superclass functions.""" return set([key for key, value in type(obj).__dict__.items() if isinstance(value, abc.Callable)]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_fc_cmode.py0000664000175000017500000000403000000000000030050 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Mock unit tests for NetApp Data ONTAP FibreChannel storage systems.""" from unittest import mock from cinder import context from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap import fc_cmode class NetAppCmodeFibreChannelDriverTestCase(test.TestCase): def setUp(self): super(NetAppCmodeFibreChannelDriverTestCase, self).setUp() kwargs = { 'configuration': self.get_config_base(), 'host': 'openstack@netappblock', } self.library = fc_cmode.NetAppCmodeFibreChannelDriver(**kwargs) self.library.zapi_client = mock.Mock() self.zapi_client = self.library.zapi_client self.mock_request = mock.Mock() self.ctxt = context.RequestContext('fake', 'fake', auth_token=True) def get_config_base(self): return na_fakes.create_configuration() def test_revert_to_snapshot(self): mock_revert_to_snapshot = self.mock_object(self.library.library, 'revert_to_snapshot') self.library.revert_to_snapshot(self.ctxt, fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) mock_revert_to_snapshot.assert_called_once_with(fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_iscsi_cmode.py0000664000175000017500000000403200000000000030574 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Mock unit tests for NetApp Data ONTAP (C-mode) iSCSI storage systems.""" from unittest import mock from cinder import context from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap import iscsi_cmode class NetAppCmodeFibreChannelDriverTestCase(test.TestCase): def setUp(self): super(NetAppCmodeFibreChannelDriverTestCase, self).setUp() kwargs = { 'configuration': self.get_config_base(), 'host': 'openstack@netappblock', } self.library = iscsi_cmode.NetAppCmodeISCSIDriver(**kwargs) self.library.zapi_client = mock.Mock() self.zapi_client = self.library.zapi_client self.mock_request = mock.Mock() self.ctxt = context.RequestContext('fake', 'fake', auth_token=True) def get_config_base(self): return na_fakes.create_configuration() def test_revert_to_snapshot(self): mock_revert_to_snapshot = self.mock_object(self.library.library, 'revert_to_snapshot') self.library.revert_to_snapshot(self.ctxt, fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) mock_revert_to_snapshot.assert_called_once_with(fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py0000664000175000017500000014705600000000000030111 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the NetApp NFS storage driver.""" import copy import os import time from unittest import mock import ddt from os_brick.remotefs import remotefs as remotefs_brick from oslo_concurrency import processutils from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume.drivers import nfs from cinder.volume.drivers import remotefs from cinder.volume import volume_utils @ddt.ddt class NetAppNfsDriverTestCase(test.TestCase): def setUp(self): super(NetAppNfsDriverTestCase, self).setUp() configuration = mock.Mock() configuration.reserved_percentage = 0 configuration.nfs_mount_point_base = '/mnt/test' configuration.reserved_percentage = 0 configuration.max_over_subscription_ratio = 1.1 self.fake_nfs_export_1 = fake.NFS_EXPORT_1 self.fake_nfs_export_2 = fake.NFS_EXPORT_2 self.fake_mount_point = fake.MOUNT_POINT self.ctxt = context.RequestContext('fake', 'fake', auth_token=True) kwargs = { 'configuration': configuration, 'host': 'openstack@netappnfs', } with mock.patch.object(utils, 'get_root_helper', return_value=mock.Mock()): with mock.patch.object(remotefs_brick, 'RemoteFsClient', return_value=mock.Mock()): self.driver = nfs_base.NetAppNfsDriver(**kwargs) self.driver.db = mock.Mock() self.driver.zapi_client = mock.Mock() self.zapi_client = self.driver.zapi_client @mock.patch.object(nfs.NfsDriver, 'do_setup') @mock.patch.object(na_utils, 'check_flags') def test_do_setup(self, mock_check_flags, mock_super_do_setup): self.driver.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertTrue(mock_super_do_setup.called) def test_get_share_capacity_info(self): mock_get_capacity = self.mock_object(self.driver, '_get_capacity_info') mock_get_capacity.return_value = fake.CAPACITY_VALUES expected_total_capacity_gb = na_utils.round_down( fake.TOTAL_BYTES / units.Gi, '0.01') expected_free_capacity_gb = (na_utils.round_down( fake.AVAILABLE_BYTES / units.Gi, '0.01')) expected_reserved_percentage = round( self.driver.configuration.reserved_percentage) result = self.driver._get_share_capacity_info(fake.NFS_SHARE) self.assertEqual(expected_total_capacity_gb, result['total_capacity_gb']) self.assertEqual(expected_free_capacity_gb, result['free_capacity_gb']) self.assertEqual(expected_reserved_percentage, round(result['reserved_percentage'])) def test_get_capacity_info_ipv4_share(self): expected = fake.CAPACITY_VALUES get_capacity = self.driver.zapi_client.get_flexvol_capacity get_capacity.return_value = fake.CAPACITIES result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV4) self.assertEqual(expected, result) get_capacity.assert_has_calls([ mock.call(flexvol_path=fake.EXPORT_PATH)]) def test_get_capacity_info_ipv6_share(self): expected = fake.CAPACITY_VALUES get_capacity = self.driver.zapi_client.get_flexvol_capacity get_capacity.return_value = fake.CAPACITIES result = self.driver._get_capacity_info(fake.NFS_SHARE_IPV6) self.assertEqual(expected, result) get_capacity.assert_has_calls([ mock.call(flexvol_path=fake.EXPORT_PATH)]) def test_get_pool(self): pool = self.driver.get_pool({'provider_location': 'fake-share'}) self.assertEqual('fake-share', pool) @ddt.data(None, {'replication_status': fields.ReplicationStatus.ENABLED}) def test_create_volume(self, model_update): self.mock_object(self.driver, '_ensure_shares_mounted') self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg') self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(self.driver, '_do_create_volume') self.mock_object(self.driver, '_do_qos_for_volume') self.mock_object(self.driver, '_get_volume_model_update', return_value=model_update) expected = {'provider_location': fake.NFS_SHARE} if model_update: expected.update(model_update) actual = self.driver.create_volume(fake.NFS_VOLUME) self.assertEqual(expected, actual) def test_create_volume_no_pool(self): volume = copy.deepcopy(fake.NFS_VOLUME) volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME) self.mock_object(self.driver, '_ensure_shares_mounted') self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg') self.assertRaises(exception.InvalidHost, self.driver.create_volume, volume) def test_create_volume_exception(self): self.mock_object(self.driver, '_ensure_shares_mounted') self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg') self.mock_object(na_utils, 'get_volume_extra_specs') mock_create = self.mock_object(self.driver, '_do_create_volume') mock_create.side_effect = Exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, fake.NFS_VOLUME) @ddt.data(None, {'key': 'value'}) def test_clone_source_to_destination_volume(self, model_update): self.mock_object(self.driver, '_get_volume_location', return_value=fake.POOL_NAME) self.mock_object(na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) self.mock_object( self.driver, '_clone_with_extension_check') self.mock_object(self.driver, '_do_qos_for_volume') self.mock_object(self.driver, '_get_volume_model_update', return_value=model_update) expected = {'provider_location': fake.POOL_NAME} if model_update: expected.update(model_update) result = self.driver._clone_source_to_destination_volume( fake.CLONE_SOURCE, fake.CLONE_DESTINATION) self.assertEqual(expected, result) def test_clone_source_to_destination_volume_with_do_qos_exception(self): self.mock_object(self.driver, '_get_volume_location', return_value=fake.POOL_NAME) self.mock_object(na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) self.mock_object( self.driver, '_clone_with_extension_check') self.mock_object(self.driver, '_do_qos_for_volume', side_effect=Exception) self.assertRaises( exception.VolumeBackendAPIException, self.driver._clone_source_to_destination_volume, fake.CLONE_SOURCE, fake.CLONE_DESTINATION) def test_clone_with_extension_check_equal_sizes(self): clone_source = copy.deepcopy(fake.CLONE_SOURCE) clone_source['size'] = fake.VOLUME['size'] self.mock_object(self.driver, '_clone_backing_file_for_volume') self.mock_object(self.driver, 'local_path') mock_discover = self.mock_object(self.driver, '_discover_file_till_timeout') mock_discover.return_value = True self.mock_object(self.driver, '_set_rw_permissions') mock_extend_volume = self.mock_object(self.driver, 'extend_volume') self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME) self.assertEqual(0, mock_extend_volume.call_count) def test_clone_with_extension_check_unequal_sizes(self): clone_source = copy.deepcopy(fake.CLONE_SOURCE) clone_source['size'] = fake.VOLUME['size'] + 1 self.mock_object(self.driver, '_clone_backing_file_for_volume') self.mock_object(self.driver, 'local_path') mock_discover = self.mock_object(self.driver, '_discover_file_till_timeout') mock_discover.return_value = True self.mock_object(self.driver, '_set_rw_permissions') mock_extend_volume = self.mock_object(self.driver, 'extend_volume') self.driver._clone_with_extension_check(clone_source, fake.NFS_VOLUME) self.assertEqual(1, mock_extend_volume.call_count) def test_clone_with_extension_check_extend_exception(self): clone_source = copy.deepcopy(fake.CLONE_SOURCE) clone_source['size'] = fake.VOLUME['size'] + 1 self.mock_object(self.driver, '_clone_backing_file_for_volume') self.mock_object(self.driver, 'local_path') mock_discover = self.mock_object(self.driver, '_discover_file_till_timeout') mock_discover.return_value = True self.mock_object(self.driver, '_set_rw_permissions') mock_extend_volume = self.mock_object(self.driver, 'extend_volume') mock_extend_volume.side_effect = Exception mock_cleanup = self.mock_object(self.driver, '_cleanup_volume_on_failure') self.assertRaises(exception.CinderException, self.driver._clone_with_extension_check, clone_source, fake.NFS_VOLUME) self.assertEqual(1, mock_cleanup.call_count) def test_clone_with_extension_check_no_discovery(self): self.mock_object(self.driver, '_clone_backing_file_for_volume') self.mock_object(self.driver, 'local_path') self.mock_object(self.driver, '_set_rw_permissions') mock_discover = self.mock_object(self.driver, '_discover_file_till_timeout') mock_discover.return_value = False self.assertRaises(exception.CinderException, self.driver._clone_with_extension_check, fake.CLONE_SOURCE, fake.NFS_VOLUME) @ddt.data(True, False) def test_create_volume_from_snapshot(self, is_flexgroup): provider_location = fake.POOL_NAME volume = fake.VOLUME expected_source = { 'name': fake.SNAPSHOT_NAME, 'size': fake.SIZE, 'id': fake.VOLUME_ID, } mock_clone_call = self.mock_object( self.driver, '_clone_source_to_destination_volume', return_value=provider_location) self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg') self.mock_object(self.driver, '_is_flexgroup', return_value=is_flexgroup) self.mock_object(self.driver, '_is_flexgroup_clone_file_supported', return_value=not is_flexgroup) mock_super_create = self.mock_object( nfs.NfsDriver, 'create_volume_from_snapshot', return_value=provider_location) mock_do_qos = self.mock_object( self.driver, '_do_qos_for_file_flexgroup', return_value=provider_location) retval = self.driver.create_volume_from_snapshot(volume, fake.SNAPSHOT) self.assertEqual(provider_location, retval) if is_flexgroup: mock_clone_call.assert_not_called() mock_super_create.assert_called_once_with(volume, fake.SNAPSHOT) mock_do_qos.assert_called_once_with(volume, provider_location) else: mock_clone_call.assert_called_once_with(expected_source, volume) mock_do_qos.assert_not_called() mock_super_create.assert_not_called() @ddt.data(True, False) def test_create_cloned_volume(self, is_flexgroup): provider_location = fake.POOL_NAME volume = fake.VOLUME src_vref = fake.CLONE_SOURCE mock_clone_call = self.mock_object( self.driver, '_clone_source_to_destination_volume', return_value=provider_location) self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg') self.mock_object(self.driver, '_is_flexgroup', return_value=is_flexgroup) self.mock_object(self.driver, '_is_flexgroup_clone_file_supported', return_value=not is_flexgroup) mock_super_create = self.mock_object( nfs.NfsDriver, 'create_cloned_volume', return_value=provider_location) mock_do_qos = self.mock_object( self.driver, '_do_qos_for_file_flexgroup', return_value=provider_location) result = self.driver.create_cloned_volume(volume, src_vref) self.assertEqual(provider_location, result) if is_flexgroup: mock_clone_call.assert_not_called() mock_super_create.assert_called_once_with(volume, src_vref) mock_do_qos.assert_called_once_with(volume, provider_location) else: mock_clone_call.assert_called_once_with(src_vref, volume) mock_do_qos.assert_not_called() mock_super_create.assert_not_called() def test_do_qos_for_file_flexgroup(self): volume = {'provider_location': 'fake'} extra_specs = 'fake_extra' model = {'provider_location': 'fake'} vol_model = {'replication': 'fake'} expected_model = { 'replication': vol_model['replication'], 'provider_location': model['provider_location'], } self.mock_object(self.driver, '_do_qos_for_volume') self.mock_object(self.driver, '_get_volume_model_update', return_value=vol_model) mock_extra = self.mock_object(na_utils, 'get_volume_extra_specs', return_value=extra_specs) model_updated = self.driver._do_qos_for_file_flexgroup(volume, model) self.assertEqual(model_updated, expected_model) mock_extra.assert_called_once_with(volume) def test_do_qos_for_file_flexgroup_error(self): self.mock_object(na_utils, 'get_volume_extra_specs', side_effect=exception.NotFound) self.assertRaises(exception.VolumeBackendAPIException, self.driver._do_qos_for_file_flexgroup, fake.VOLUME, 'fake_model') def test_do_qos_for_volume(self): self.assertRaises(NotImplementedError, self.driver._do_qos_for_volume, fake.NFS_VOLUME, fake.EXTRA_SPECS) @ddt.data((True, False), (False, True), (True, True), (False, False)) @ddt.unpack def test_create_snapshot(self, is_flexgroup, is_flexgroup_clone_file_supported): self.mock_object(self.driver, '_is_flexgroup', return_value=is_flexgroup) self.mock_object(self.driver, '_is_flexgroup_clone_file_supported', return_value=is_flexgroup_clone_file_supported) mock_clone_backing_file_for_volume = self.mock_object( self.driver, '_clone_backing_file_for_volume') mock_snap_flexgroup = self.mock_object( self.driver, '_create_snapshot_for_flexgroup') self.driver.create_snapshot(fake.SNAPSHOT) if (is_flexgroup and (self.driver.configuration.safe_get ('netapp_use_legacy_client') or not is_flexgroup_clone_file_supported)): mock_snap_flexgroup.assert_called_once_with(fake.SNAPSHOT) mock_clone_backing_file_for_volume.assert_not_called() else: mock_snap_flexgroup.assert_not_called() mock_clone_backing_file_for_volume.assert_called_once_with( fake.SNAPSHOT['volume_name'], fake.SNAPSHOT['name'], fake.SNAPSHOT['volume_id'], is_snapshot=True) def test_create_snapshot_for_flexgroup(self): source_vol = { 'id': fake.SNAPSHOT['volume_id'], 'name': fake.SNAPSHOT['volume_name'], 'volume_type_id': fake.SNAPSHOT['volume_type_id'], } snap_vol = { 'name': '%s.%s' % (fake.SNAPSHOT['volume_name'], fake.SNAPSHOT['id']), 'host': fake.HOST_NAME, } mock_super_snapshot = self.mock_object(nfs.NfsDriver, 'create_snapshot') mock_extra_specs = self.mock_object(na_utils, 'get_volume_extra_specs') mock_extra_specs.return_value = fake.EXTRA_SPECS mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO mock_get_host = self.mock_object(self.driver, '_get_volume_host') mock_get_host.return_value = fake.HOST_NAME mock_set_policy = self.mock_object(self.driver, '_set_qos_policy_group_on_volume') self.driver._create_snapshot_for_flexgroup(fake.SNAPSHOT) mock_super_snapshot.assert_has_calls([ mock.call(fake.SNAPSHOT)]) mock_get_host.assert_has_calls([ mock.call(source_vol['id'])]) mock_extra_specs.assert_has_calls([ mock.call(source_vol)]) mock_get_info.assert_has_calls([ mock.call(source_vol, fake.EXTRA_SPECS)]) mock_set_policy.assert_has_calls([ mock.call(snap_vol, fake.QOS_POLICY_GROUP_INFO, False)]) def test_create_snapshot_for_flexgroup_error(self): self.mock_object(nfs.NfsDriver, 'create_snapshot', side_effect=exception.NotFound) self.assertRaises(exception.VolumeBackendAPIException, self.driver._create_snapshot_for_flexgroup, fake.SNAPSHOT) def test_set_qos_policy_group_on_volume(self): self.assertRaises(NotImplementedError, self.driver._set_qos_policy_group_on_volume, fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO, False) @ddt.data(True, False) def test_delete_snapshot(self, is_flexgroup): updates = { 'name': fake.SNAPSHOT_NAME, 'volume_size': fake.SIZE, 'volume_id': fake.VOLUME_ID, 'volume_name': fake.VOLUME_NAME, 'busy': False, } snapshot = fake_snapshot.fake_snapshot_obj(self.ctxt, **updates) self.mock_object(self.driver, '_delete_file') self.mock_object(self.driver, '_is_flexgroup', return_value=is_flexgroup) self.mock_object(self.driver, '_is_flexgroup_clone_file_supported', return_value=not is_flexgroup) mock_super_delete = self.mock_object(nfs.NfsDriver, 'delete_snapshot') self.driver.delete_snapshot(snapshot) if is_flexgroup: mock_super_delete.assert_called_once_with(snapshot) self.driver._delete_file.assert_not_called() else: mock_super_delete.assert_not_called() self.driver._delete_file.assert_called_once_with( snapshot.volume_id, snapshot.name) @ddt.data(fake.NFS_SHARE, fake.NFS_SHARE_IPV6) def test__get_volume_location(self, provider): volume_id = fake.VOLUME_ID self.mock_object(self.driver, '_get_provider_location', return_value=provider) retval = self.driver._get_volume_location(volume_id) self.assertEqual(provider, retval) def test__clone_backing_file_for_volume(self): self.assertRaises(NotImplementedError, self.driver._clone_backing_file_for_volume, fake.VOLUME_NAME, fake.CLONE_SOURCE_NAME, fake.VOLUME_ID, share=None) def test__is_flexgroup(self): self.assertRaises(NotImplementedError, self.driver._is_flexgroup) def test__get_provider_location(self): updates = {'provider_location': fake.PROVIDER_LOCATION} volume = fake_volume.fake_volume_obj(self.ctxt, **updates) self.mock_object(self.driver.db, 'volume_get', return_value=volume) retval = self.driver._get_provider_location(fake.VOLUME_ID) self.assertEqual(fake.PROVIDER_LOCATION, retval) def test__get_volume_host(self): updates = {'host': fake.HOST_NAME} volume = fake_volume.fake_volume_obj(self.ctxt, **updates) self.mock_object(self.driver.db, 'volume_get', return_value=volume) retval = self.driver._get_volume_host(fake.VOLUME_ID) self.assertEqual(fake.HOST_NAME, retval) @ddt.data(None, processutils.ProcessExecutionError) def test__volume_not_present(self, side_effect): self.mock_object(self.driver, '_get_volume_path') self.mock_object(self.driver, '_try_execute', side_effect=side_effect) retval = self.driver._volume_not_present( fake.MOUNT_PATH, fake.VOLUME_NAME) self.assertEqual(side_effect is not None, retval) @mock.patch.object(time, 'sleep') def test__try_execute_exception(self, patched_sleep): self.mock_object(self.driver, '_execute', side_effect=processutils.ProcessExecutionError) mock_exception_log = self.mock_object(nfs_base.LOG, 'exception') self.driver.configuration.num_shell_tries = 3 self.assertRaises(processutils.ProcessExecutionError, self.driver._try_execute, 'fake-command', attr1='val1', attr2='val2') self.assertEqual(2, mock_exception_log.call_count) self.driver._execute.assert_has_calls([ mock.call('fake-command', attr1='val1', attr2='val2'), mock.call('fake-command', attr1='val1', attr2='val2'), mock.call('fake-command', attr1='val1', attr2='val2')]) self.assertEqual(2, time.sleep.call_count) patched_sleep.assert_has_calls([mock.call(1), mock.call(4)]) def test__update_volume_stats(self): self.assertRaises(NotImplementedError, self.driver._update_volume_stats) def test_copy_image_to_volume_base_exception(self): mock_info_log = self.mock_object(nfs_base.LOG, 'info') self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg') self.mock_object(remotefs.RemoteFSDriver, 'copy_image_to_volume', side_effect=exception.NfsException) self.assertRaises(exception.NfsException, self.driver.copy_image_to_volume, 'fake_context', fake.NFS_VOLUME, 'fake_img_service', fake.IMAGE_FILE_ID) mock_info_log.assert_not_called() def test_copy_image_to_volume(self): mock_log = self.mock_object(nfs_base, 'LOG') self.mock_object(self.driver, '_is_flexgroup', return_value=False) self.mock_object(self.driver, '_is_flexgroup_clone_file_supported', return_value=True) self.mock_object(self.driver, '_ensure_flexgroup_not_in_cg') mock_copy_image = self.mock_object( remotefs.RemoteFSDriver, 'copy_image_to_volume') mock_register_image = self.mock_object( self.driver, '_register_image_in_cache') self.driver.copy_image_to_volume('fake_context', fake.NFS_VOLUME, 'fake_img_service', fake.IMAGE_FILE_ID) mock_copy_image.assert_called_once_with( 'fake_context', fake.NFS_VOLUME, 'fake_img_service', fake.IMAGE_FILE_ID, disable_sparse=False) self.assertEqual(1, mock_log.info.call_count) mock_register_image.assert_called_once_with( fake.NFS_VOLUME, fake.IMAGE_FILE_ID) @ddt.data(None, Exception) def test__register_image_in_cache(self, exc): mock_log = self.mock_object(nfs_base, 'LOG') self.mock_object(self.driver, '_do_clone_rel_img_cache', side_effect=exc) retval = self.driver._register_image_in_cache( fake.NFS_VOLUME, fake.IMAGE_FILE_ID) self.assertIsNone(retval) self.assertEqual(exc is not None, mock_log.warning.called) self.assertEqual(1, mock_log.info.call_count) @ddt.data(True, False) def test_do_clone_rel_img_cache(self, path_exists): self.mock_object(nfs_base.LOG, 'info') self.mock_object(utils, 'synchronized', return_value=lambda f: f) self.mock_object(self.driver, '_get_mount_point_for_share', return_value='dir') self.mock_object(os.path, 'exists', return_value=path_exists) self.mock_object(self.driver, '_clone_backing_file_for_volume') self.mock_object(os, 'utime') retval = self.driver._do_clone_rel_img_cache( fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, fake.NFS_SHARE, 'fake_cache_file') self.assertIsNone(retval) self.assertTrue(self.driver._get_mount_point_for_share.called) if not path_exists: self.driver._clone_backing_file_for_volume.assert_called_once_with( fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, share=fake.NFS_SHARE, volume_id=None) os.utime.assert_called_once_with( 'dir/' + fake.CLONE_SOURCE_NAME, None) else: self.driver._clone_backing_file_for_volume.assert_not_called() os.utime.assert_not_called() os.path.exists.assert_called_once_with( 'dir/' + fake.CLONE_DESTINATION_NAME) def test_cleanup_volume_on_failure(self): path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) mock_local_path = self.mock_object(self.driver, 'local_path') mock_local_path.return_value = path mock_exists_check = self.mock_object(os.path, 'exists') mock_exists_check.return_value = True mock_delete = self.mock_object(self.driver, '_delete_file_at_path') self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME) mock_delete.assert_has_calls([mock.call(path)]) def test_cleanup_volume_on_failure_no_path(self): self.mock_object(self.driver, 'local_path') mock_exists_check = self.mock_object(os.path, 'exists') mock_exists_check.return_value = False mock_delete = self.mock_object(self.driver, '_delete_file_at_path') self.driver._cleanup_volume_on_failure(fake.NFS_VOLUME) self.assertEqual(0, mock_delete.call_count) @ddt.data((fake.NFS_SHARE, fake.SHARE_IP), (fake.NFS_SHARE_IPV6, fake.IPV6_ADDRESS)) @ddt.unpack def test_get_export_ip_path_volume_id_provided(self, provider_location, ip): mock_get_host_ip = self.mock_object(self.driver, '_get_provider_location') mock_get_host_ip.return_value = provider_location expected = (ip, fake.EXPORT_PATH) result = self.driver._get_export_ip_path(fake.VOLUME_ID) self.assertEqual(expected, result) @ddt.data((fake.NFS_SHARE, fake.SHARE_IP, fake.EXPORT_PATH), (fake.NFS_SHARE_IPV6, fake.IPV6_ADDRESS, fake.EXPORT_PATH)) @ddt.unpack def test_get_export_ip_path_share_provided(self, share, ip, path): expected = (ip, path) result = self.driver._get_export_ip_path(share=share) self.assertEqual(expected, result) def test_get_export_ip_path_volume_id_and_share_provided(self): mock_get_host_ip = self.mock_object(self.driver, '_get_provider_location') mock_get_host_ip.return_value = fake.NFS_SHARE_IPV4 expected = (fake.IPV4_ADDRESS, fake.EXPORT_PATH) result = self.driver._get_export_ip_path( fake.VOLUME_ID, fake.NFS_SHARE) self.assertEqual(expected, result) def test_get_export_ip_path_no_args(self): self.assertRaises(exception.InvalidInput, self.driver._get_export_ip_path) def test_construct_image_url_loc(self): img_loc = fake.FAKE_IMAGE_LOCATION locations = self.driver._construct_image_nfs_url(img_loc) self.assertIn("nfs://host/path/image-id-0", locations) self.assertIn("nfs://host/path/image-id-6", locations) self.assertEqual(2, len(locations)) def test_construct_image_url_direct(self): img_loc = ("nfs://host/path/image-id", None) locations = self.driver._construct_image_nfs_url(img_loc) self.assertIn("nfs://host/path/image-id", locations) @ddt.data(None, 'raw', 'qcow2') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_extend_volume(self, file_format, mock_get): volume = fake_volume.fake_volume_obj(self.ctxt) if file_format: volume.admin_metadata = {'format': file_format} mock_get.return_value = volume new_size = 100 volume_copy = copy.copy(volume) volume_copy['size'] = new_size path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) self.mock_object(self.driver, 'local_path', return_value=path) mock_resize_image_file = self.mock_object(self.driver, '_resize_image_file') mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) mock_do_qos_for_volume = self.mock_object(self.driver, '_do_qos_for_volume') self.driver.extend_volume(volume, new_size) mock_resize_image_file.assert_called_once_with(path, new_size, file_format=file_format) mock_get_volume_extra_specs.assert_called_once_with(volume) mock_do_qos_for_volume.assert_called_once_with(volume_copy, fake.EXTRA_SPECS, cleanup=False) @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_extend_volume_resize_error(self, mock_get): volume = fake_volume.fake_volume_obj(self.ctxt) mock_get.return_value = volume new_size = 100 volume_copy = copy.copy(volume) volume_copy['size'] = new_size path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) self.mock_object(self.driver, 'local_path', return_value=path) mock_resize_image_file = self.mock_object( self.driver, '_resize_image_file', side_effect=netapp_api.NaApiError) mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) mock_do_qos_for_volume = self.mock_object(self.driver, '_do_qos_for_volume') self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, volume, new_size) mock_resize_image_file.assert_called_once_with(path, new_size, file_format=None) self.assertFalse(mock_get_volume_extra_specs.called) self.assertFalse(mock_do_qos_for_volume.called) @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_extend_volume_qos_error(self, mock_get): volume = fake_volume.fake_volume_obj(self.ctxt) mock_get.return_value = volume new_size = 100 volume_copy = copy.copy(volume) volume_copy['size'] = new_size path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name']) self.mock_object(self.driver, 'local_path', return_value=path) mock_resize_image_file = self.mock_object(self.driver, '_resize_image_file') mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) mock_do_qos_for_volume = self.mock_object( self.driver, '_do_qos_for_volume', side_effect=netapp_api.NaApiError) self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, volume, new_size) mock_resize_image_file.assert_called_once_with(path, new_size, file_format=None) mock_get_volume_extra_specs.assert_called_once_with(volume) mock_do_qos_for_volume.assert_called_once_with(volume_copy, fake.EXTRA_SPECS, cleanup=False) def test_is_share_clone_compatible(self): self.assertRaises(NotImplementedError, self.driver._is_share_clone_compatible, fake.NFS_VOLUME, fake.NFS_SHARE) def test_get_share_mount_and_vol_from_vol_ref(self): self.mock_object(volume_utils, 'resolve_hostname', return_value='10.12.142.11') self.mock_object(os.path, 'isfile', return_value=True) self.driver._mounted_shares = [self.fake_nfs_export_1] vol_path = "%s/%s" % (self.fake_nfs_export_1, 'test_file_name') vol_ref = {'source-name': vol_path} self.driver._ensure_shares_mounted = mock.Mock() self.driver._get_mount_point_for_share = mock.Mock( return_value=self.fake_mount_point) (share, mount, file_path) = ( self.driver._get_share_mount_and_vol_from_vol_ref(vol_ref)) self.assertEqual(self.fake_nfs_export_1, share) self.assertEqual(self.fake_mount_point, mount) self.assertEqual('test_file_name', file_path) def test_get_share_mount_and_vol_from_vol_ref_with_bad_ref(self): self.mock_object(volume_utils, 'resolve_hostname', return_value='10.12.142.11') self.driver._mounted_shares = [self.fake_nfs_export_1] vol_ref = {'source-id': '1234546'} self.driver._ensure_shares_mounted = mock.Mock() self.driver._get_mount_point_for_share = mock.Mock( return_value=self.fake_mount_point) self.assertRaises(exception.ManageExistingInvalidReference, self.driver._get_share_mount_and_vol_from_vol_ref, vol_ref) def test_get_share_mount_and_vol_from_vol_ref_where_not_found(self): self.mock_object(volume_utils, 'resolve_hostname', return_value='10.12.142.11') self.driver._mounted_shares = [self.fake_nfs_export_1] vol_path = "%s/%s" % (self.fake_nfs_export_2, 'test_file_name') vol_ref = {'source-name': vol_path} self.driver._ensure_shares_mounted = mock.Mock() self.driver._get_mount_point_for_share = mock.Mock( return_value=self.fake_mount_point) self.assertRaises(exception.ManageExistingInvalidReference, self.driver._get_share_mount_and_vol_from_vol_ref, vol_ref) def test_get_share_mount_and_vol_from_vol_ref_where_is_dir(self): self.mock_object(volume_utils, 'resolve_hostname', return_value='10.12.142.11') self.driver._mounted_shares = [self.fake_nfs_export_1] vol_ref = {'source-name': self.fake_nfs_export_2} self.driver._ensure_shares_mounted = mock.Mock() self.driver._get_mount_point_for_share = mock.Mock( return_value=self.fake_mount_point) self.assertRaises(exception.ManageExistingInvalidReference, self.driver._get_share_mount_and_vol_from_vol_ref, vol_ref) @ddt.data(None, {'replication_status': fields.ReplicationStatus.ENABLED}) def test_manage_existing(self, model_update): self.mock_object(utils, 'get_file_size', return_value=1074253824) self.driver._mounted_shares = [self.fake_nfs_export_1] test_file = 'test_file_name' volume = fake.FAKE_MANAGE_VOLUME vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) vol_ref = {'source-name': vol_path} self.driver._check_volume_type = mock.Mock() self.mock_object(self.driver, '_execute') self.driver._ensure_shares_mounted = mock.Mock() self.driver._get_mount_point_for_share = mock.Mock( return_value=self.fake_mount_point) self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( return_value=(self.fake_nfs_export_1, self.fake_mount_point, test_file)) mock_get_specs = self.mock_object(na_utils, 'get_volume_extra_specs') mock_get_specs.return_value = {} self.mock_object(self.driver, '_do_qos_for_volume') self.mock_object(self.driver, '_get_volume_model_update', return_value=model_update) actual_model_update = self.driver.manage_existing(volume, vol_ref) self.assertEqual( self.fake_nfs_export_1, actual_model_update['provider_location']) if model_update: self.assertEqual(model_update['replication_status'], actual_model_update['replication_status']) else: self.assertNotIn('replication_status', actual_model_update) self.driver._check_volume_type.assert_called_once_with( volume, self.fake_nfs_export_1, test_file, {}) def test_manage_existing_move_fails(self): self.mock_object(utils, 'get_file_size', return_value=1074253824) self.driver._mounted_shares = [self.fake_nfs_export_1] test_file = 'test_file_name' volume = fake.FAKE_MANAGE_VOLUME vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) vol_ref = {'source-name': vol_path} self.driver._check_volume_type = mock.Mock() self.driver._ensure_shares_mounted = mock.Mock() self.driver._get_mount_point_for_share = mock.Mock( return_value=self.fake_mount_point) self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( return_value=(self.fake_nfs_export_1, self.fake_mount_point, test_file)) self.driver._execute = mock.Mock( side_effect=processutils.ProcessExecutionError) mock_get_specs = self.mock_object(na_utils, 'get_volume_extra_specs') mock_get_specs.return_value = {} self.mock_object(self.driver, '_do_qos_for_volume') self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing, volume, vol_ref) def test_unmanage(self): mock_log = self.mock_object(nfs_base, 'LOG') volume = {'id': '123', 'provider_location': '/share'} retval = self.driver.unmanage(volume) self.assertIsNone(retval) self.assertEqual(1, mock_log.info.call_count) def test_manage_existing_get_size(self): test_file = 'test_file_name' self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( return_value=(self.fake_nfs_export_1, self.fake_mount_point, test_file)) self.mock_object(utils, 'get_file_size', return_value=1073741824) self.driver._mounted_shares = [self.fake_nfs_export_1] volume = fake.FAKE_MANAGE_VOLUME vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) vol_ref = {'source-name': vol_path} self.driver._ensure_shares_mounted = mock.Mock() self.driver._get_mount_point_for_share = mock.Mock( return_value=self.fake_mount_point) vol_size = self.driver.manage_existing_get_size(volume, vol_ref) self.assertEqual(1, vol_size) def test_manage_existing_get_size_round_up(self): test_file = 'test_file_name' self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( return_value=(self.fake_nfs_export_1, self.fake_mount_point, test_file)) self.mock_object(utils, 'get_file_size', return_value=1073760270) self.driver._mounted_shares = [self.fake_nfs_export_1] volume = fake.FAKE_MANAGE_VOLUME vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) vol_ref = {'source-name': vol_path} self.driver._ensure_shares_mounted = mock.Mock() self.driver._get_mount_point_for_share = mock.Mock( return_value=self.fake_mount_point) vol_size = self.driver.manage_existing_get_size(volume, vol_ref) self.assertEqual(2, vol_size) def test_manage_existing_get_size_error(self): test_file = 'test_file_name' self.driver._get_share_mount_and_vol_from_vol_ref = mock.Mock( return_value=(self.fake_nfs_export_1, self.fake_mount_point, test_file)) self.driver._mounted_shares = [self.fake_nfs_export_1] volume = fake.FAKE_MANAGE_VOLUME vol_path = "%s/%s" % (self.fake_nfs_export_1, test_file) vol_ref = {'source-name': vol_path} self.driver._ensure_shares_mounted = mock.Mock() self.driver._get_mount_point_for_share = mock.Mock( return_value=self.fake_mount_point) self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, volume, vol_ref) @ddt.data(True, False) def test_delete_file(self, volume_not_present): mock_get_provider_location = self.mock_object( self.driver, '_get_provider_location') mock_get_provider_location.return_value = fake.NFS_SHARE mock_volume_not_present = self.mock_object( self.driver, '_volume_not_present') mock_volume_not_present.return_value = volume_not_present mock_get_volume_path = self.mock_object( self.driver, '_get_volume_path') mock_get_volume_path.return_value = fake.PATH mock_delete = self.mock_object(self.driver, '_delete') self.driver._delete_file(fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME) mock_get_provider_location.assert_called_once_with(fake.CG_VOLUME_ID) mock_volume_not_present.assert_called_once_with( fake.NFS_SHARE, fake.CG_VOLUME_NAME) if not volume_not_present: mock_get_volume_path.assert_called_once_with( fake.NFS_SHARE, fake.CG_VOLUME_NAME) mock_delete.assert_called_once_with(fake.PATH) def test_delete_file_volume_not_present(self): mock_get_provider_location = self.mock_object( self.driver, '_get_provider_location') mock_get_provider_location.return_value = fake.NFS_SHARE mock_volume_not_present = self.mock_object( self.driver, '_volume_not_present') mock_volume_not_present.return_value = True mock_get_volume_path = self.mock_object( self.driver, '_get_volume_path') mock_delete = self.mock_object(self.driver, '_delete') self.driver._delete_file(fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME) mock_get_provider_location.assert_called_once_with(fake.CG_VOLUME_ID) mock_volume_not_present.assert_called_once_with( fake.NFS_SHARE, fake.CG_VOLUME_NAME) mock_get_volume_path.assert_not_called() mock_delete.assert_not_called() def test_check_for_setup_error(self): super_check_for_setup_error = self.mock_object( nfs.NfsDriver, 'check_for_setup_error') mock_start_tasks = self.mock_object( self.driver.loopingcalls, 'start_tasks') self.driver.check_for_setup_error() super_check_for_setup_error.assert_called_once_with() mock_start_tasks.assert_called_once_with() def test_add_looping_tasks(self): mock_add_task = self.mock_object(self.driver.loopingcalls, 'add_task') mock_call_snap_cleanup = self.mock_object( self.driver, '_delete_snapshots_marked_for_deletion') mock_call_ems_logging = self.mock_object( self.driver, '_handle_ems_logging') mock_call_clean_image_cache = self.mock_object( self.driver, '_clean_image_cache') # image cache cleanup task can be configured with custom timeout cache_cleanup_interval = loopingcalls.ONE_HOUR self.driver.configuration.netapp_nfs_image_cache_cleanup_interval = ( cache_cleanup_interval) self.driver._add_looping_tasks() mock_add_task.assert_has_calls([ mock.call(mock_call_snap_cleanup, loopingcalls.ONE_MINUTE, loopingcalls.ONE_MINUTE), mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR), mock.call(mock_call_clean_image_cache, cache_cleanup_interval) ]) def test__clone_from_cache(self): image_id = 'fake_image_id' cache_result = [ ('fakepool_bad1', '/fakepath/img-cache-1'), ('fakepool', '/fakepath/img-cache-2'), ('fakepool_bad2', '/fakepath/img-cache-3'), ] mock_call__is_share_clone_compatible = self.mock_object( self.driver, '_is_share_clone_compatible') mock_call__is_share_clone_compatible.return_value = True mock_call__do_clone_rel_img_cache = self.mock_object( self.driver, '_do_clone_rel_img_cache') cloned = self.driver._clone_from_cache(fake.test_volume, image_id, cache_result) self.assertTrue(cloned) mock_call__is_share_clone_compatible.assert_called_once_with( fake.test_volume, 'fakepool') mock_call__do_clone_rel_img_cache.assert_called_once_with( '/fakepath/img-cache-2', 'fakename', 'fakepool', '/fakepath/img-cache-2' ) def test__clone_from_cache_not_found(self): image_id = 'fake_image_id' cache_result = [ ('fakepool_bad1', '/fakepath/img-cache-1'), ('fakepool_bad2', '/fakepath/img-cache-2'), ('fakepool_bad3', '/fakepath/img-cache-3'), ] mock_call__is_share_clone_compatible = self.mock_object( self.driver, '_is_share_clone_compatible') mock_call__do_clone_rel_img_cache = self.mock_object( self.driver, '_do_clone_rel_img_cache') cloned = self.driver._clone_from_cache(fake.test_volume, image_id, cache_result) self.assertFalse(cloned) mock_call__is_share_clone_compatible.assert_not_called() mock_call__do_clone_rel_img_cache.assert_not_called() def test__find_share(self): mock_extract = self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) pool_name = self.driver._find_share(fake.VOLUME) self.assertEqual(pool_name, fake.POOL_NAME) mock_extract.assert_called_once_with(fake.VOLUME['host'], level='pool') def test__find_share_error(self): mock_extract = self.mock_object(volume_utils, 'extract_host', return_value=None) self.assertRaises(exception.InvalidHost, self.driver._find_share, fake.VOLUME) mock_extract.assert_called_once_with(fake.VOLUME['host'], level='pool') def test__ensure_flexgroup_not_in_cg_raises(self): self.mock_object(self.driver, '_is_flexgroup', return_value=True) self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) fake_v1 = { 'group': 'fake_group', 'host': 'fake_host', 'id': 'fake_id' } self.assertRaises(na_utils.NetAppDriverException, self.driver._ensure_flexgroup_not_in_cg, fake_v1) def test__is_flexgroup_clone_file_supported(self): self.assertRaises(NotImplementedError, self.driver._is_flexgroup_clone_file_supported) def test_update_migrated_volume(self): self.assertRaises(NotImplementedError, self.driver.update_migrated_volume, self.ctxt, fake.test_volume, mock.sentinel.new_volume, mock.sentinel.original_status) @ddt.data({'is_flexgroup': False, 'is_flexgroup_supported': False}, {'is_flexgroup': False, 'is_flexgroup_supported': True}, {'is_flexgroup': True, 'is_flexgroup_supported': False}, {'is_flexgroup': True, 'is_flexgroup_supported': True}) @ddt.unpack def test_revert_to_snapshot(self, is_flexgroup, is_flexgroup_supported): context = {} self.mock_object(self.driver, '_is_flexgroup', return_value=is_flexgroup) self.mock_object(self.driver, '_is_flexgroup_clone_file_supported', return_value=is_flexgroup_supported) mock_revert_to_snapshot = self.mock_object( remotefs.RemoteFSSnapDriverDistributed, 'revert_to_snapshot') mock__revert_to_snapshot = self.mock_object(self.driver, '_revert_to_snapshot') self.driver.revert_to_snapshot(context, fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) if is_flexgroup and not is_flexgroup_supported: mock_revert_to_snapshot.assert_called_once_with( context, fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) mock__revert_to_snapshot.assert_not_called() else: mock__revert_to_snapshot.assert_called_once_with( fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) mock_revert_to_snapshot.assert_not_called() def test__revert_to_snapshot(self): self.assertRaises(NotImplementedError, self.driver._revert_to_snapshot, fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py0000664000175000017500000033015700000000000030262 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Mock unit tests for the NetApp cmode nfs storage driver.""" import hashlib from unittest import mock import uuid import ddt from os_brick.remotefs import remotefs as remotefs_brick from oslo_utils import units from cinder import exception from cinder.image import image_utils from cinder.objects import fields from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as \ fake_ssc from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap import nfs_cmode from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities from cinder.volume.drivers.netapp.dataontap.utils import data_motion from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume.drivers import nfs from cinder.volume import volume_utils @ddt.ddt class NetAppCmodeNfsDriverTestCase(test.TestCase): def setUp(self): super(NetAppCmodeNfsDriverTestCase, self).setUp() kwargs = { 'configuration': self.get_config_cmode(), 'host': 'openstack@nfscmode', } with mock.patch.object(utils, 'get_root_helper', return_value=mock.Mock()): with mock.patch.object(remotefs_brick, 'RemoteFsClient', return_value=mock.Mock()): self.driver = nfs_cmode.NetAppCmodeNfsDriver(**kwargs) self.driver._mounted_shares = [fake.NFS_SHARE] self.driver.ssc_vols = True self.driver.vserver = fake.VSERVER_NAME self.driver.ssc_enabled = True self.driver.perf_library = mock.Mock() self.driver.ssc_library = mock.Mock() self.driver.zapi_client = mock.Mock() self.driver.using_cluster_credentials = True def get_config_cmode(self): config = na_fakes.create_configuration_cmode() config.netapp_storage_protocol = 'nfs' config.netapp_login = 'admin' config.netapp_password = 'pass' config.netapp_server_hostname = '127.0.0.1' config.netapp_transport_type = 'http' config.netapp_server_port = '80' config.netapp_vserver = fake.VSERVER_NAME config.netapp_copyoffload_tool_path = 'copyoffload_tool_path' config.netapp_api_trace_pattern = 'fake_regex' return config @ddt.data({'active_backend_id': None, 'targets': ['dev1', 'dev2']}, {'active_backend_id': None, 'targets': []}, {'active_backend_id': 'dev1', 'targets': []}, {'active_backend_id': 'dev1', 'targets': ['dev1', 'dev2']}) @ddt.unpack def test_init_driver_for_replication(self, active_backend_id, targets): kwargs = { 'configuration': self.get_config_cmode(), 'host': 'openstack@nfscmode', 'active_backend_id': active_backend_id, } self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=targets) with mock.patch.object(utils, 'get_root_helper', return_value=mock.Mock()): with mock.patch.object(remotefs_brick, 'RemoteFsClient', return_value=mock.Mock()): nfs_driver = nfs_cmode.NetAppCmodeNfsDriver(**kwargs) self.assertEqual(active_backend_id, nfs_driver.failed_over_backend_name) self.assertEqual(active_backend_id is not None, nfs_driver.failed_over) self.assertEqual(len(targets) > 0, nfs_driver.replication_enabled) @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock()) @mock.patch.object(client_cmode, 'Client', mock.Mock()) @mock.patch.object(capabilities.CapabilitiesLibrary, 'cluster_user_supported') @mock.patch.object(capabilities.CapabilitiesLibrary, 'check_api_permissions') @mock.patch.object(nfs.NfsDriver, 'do_setup') @mock.patch.object(na_utils, 'check_flags') def test_do_setup(self, mock_check_flags, mock_super_do_setup, mock_check_api_permissions, mock_cluster_user_supported): self.mock_object( dot_utils, 'get_backend_configuration', return_value=self.get_config_cmode()) self.driver.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertTrue(mock_super_do_setup.called) mock_check_api_permissions.assert_called_once_with() mock_cluster_user_supported.assert_called_once_with() def test__update_volume_stats(self): mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') self.mock_object(self.driver, 'get_filter_function') self.mock_object(self.driver, 'get_goodness_function') self.driver.zapi_client = mock.Mock() self.mock_object(self.driver, '_get_pool_stats', return_value={}) expected_stats = { 'driver_version': self.driver.VERSION, 'pools': {}, 'sparse_copy_volume': True, 'replication_enabled': False, 'storage_protocol': 'nfs', 'vendor_name': 'NetApp', 'volume_backend_name': 'NetApp_NFS_Cluster_direct', } retval = self.driver._update_volume_stats() self.assertIsNone(retval) self.assertEqual(1, mock_debug_log.call_count) self.assertEqual(expected_stats, self.driver._stats) @ddt.data({'replication_backends': [], 'cluster_credentials': False, 'is_fg': False, 'report_provisioned_capacity': True}, {'replication_backends': ['target_1', 'target_2'], 'cluster_credentials': True, 'is_fg': False, 'report_provisioned_capacity': False}, {'replication_backends': ['target_1', 'target_2'], 'cluster_credentials': True, 'is_fg': True, 'report_provisioned_capacity': False} ) @ddt.unpack def test_get_pool_stats(self, replication_backends, cluster_credentials, is_fg, report_provisioned_capacity): self.driver.using_cluster_credentials = cluster_credentials conf = self.driver.configuration conf.netapp_driver_reports_provisioned_capacity = ( report_provisioned_capacity) self.driver.zapi_client = mock.Mock() ssc = { 'vola': { 'pool_name': '10.10.10.10:/vola', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', 'netapp_aggregate': ['aggr1'] if is_fg else 'aggr1', 'netapp_raid_type': ['raid_dp'] if is_fg else 'raid_dp', 'netapp_disk_type': ['SSD'] if is_fg else 'SSD', 'consistent_group_snapshot_enabled': True, 'netapp_is_flexgroup': 'true' if is_fg else 'false', }, } mock_get_ssc = self.mock_object(self.driver.ssc_library, 'get_ssc', return_value=ssc) mock_get_aggrs = self.mock_object(self.driver.ssc_library, 'get_ssc_aggregates', return_value=['aggr1']) self.mock_object(self.driver, 'get_replication_backend_names', return_value=replication_backends) total_capacity_gb = na_utils.round_down( fake.TOTAL_BYTES // units.Gi, '0.01') free_capacity_gb = na_utils.round_down( fake.AVAILABLE_BYTES // units.Gi, '0.01') capacity = { 'reserved_percentage': fake.RESERVED_PERCENTAGE, 'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, } files_provisioned_cap = [{ 'name': 'volume-ae947c9b-2392-4956-b373-aaac4521f37e', 'file-size': 5368709120.0 # 5GB }, { 'name': 'snapshot-527eedad-a431-483d-b0ca-18995dd65b66', 'file-size': 1073741824.0 # 1GB }] self.mock_object(self.driver, '_get_share_capacity_info', return_value=capacity) self.mock_object(self.driver.zapi_client, 'get_file_sizes_by_dir', return_value=files_provisioned_cap) self.mock_object(self.driver.zapi_client, 'get_flexvol_dedupe_used_percent', return_value=55.0) aggr_capacities = { 'aggr1': { 'percent-used': 45, 'size-available': 59055800320.0, 'size-total': 107374182400.0, }, } mock_get_aggr_capacities = self.mock_object( self.driver.zapi_client, 'get_aggregate_capacities', return_value=aggr_capacities) self.driver.perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=30.0)) result = self.driver._get_pool_stats(filter_function='filter', goodness_function='goodness') expected = [{ 'pool_name': '10.10.10.10:/vola', 'reserved_percentage': fake.RESERVED_PERCENTAGE, 'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO, 'multiattach': True, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'netapp_dedupe_used_percent': 55.0, 'netapp_aggregate_used_percent': 45, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', 'consistencygroup_support': True, 'consistent_group_snapshot_enabled': True, 'replication_enabled': False, 'online_extend_support': False, 'netapp_is_flexgroup': 'false', }] if report_provisioned_capacity: expected[0].update({'provisioned_capacity_gb': 5.0}) expected[0].update({'QoS_support': cluster_credentials}) if not cluster_credentials: expected[0].update({ 'netapp_aggregate_used_percent': 0, 'netapp_dedupe_used_percent': 0, }) if replication_backends: expected[0].update({ 'replication_enabled': True, 'replication_count': len(replication_backends), 'replication_targets': replication_backends, 'replication_type': 'async', }) if is_fg: expected[0].update({ 'netapp_is_flexgroup': 'true', 'netapp_disk_type': ['SSD'], 'netapp_raid_type': ['raid_dp'], 'netapp_aggregate': ['aggr1'], 'netapp_dedupe_used_percent': 0, 'consistencygroup_support': False, 'consistent_group_snapshot_enabled': False, 'multiattach': False, }) self.assertEqual(expected, result) mock_get_ssc.assert_called_once_with() if cluster_credentials: mock_get_aggrs.assert_called_once_with() mock_get_aggr_capacities.assert_called_once_with(['aggr1']) @ddt.data({}, None) def test_get_pool_stats_no_ssc_vols(self, ssc): mock_get_ssc = self.mock_object(self.driver.ssc_library, 'get_ssc', return_value=ssc) pools = self.driver._get_pool_stats() self.assertListEqual([], pools) mock_get_ssc.assert_called_once_with() def test_update_ssc(self): mock_ensure_shares_mounted = self.mock_object( self.driver, '_ensure_shares_mounted') mock_get_pool_map = self.mock_object( self.driver, '_get_flexvol_to_pool_map', return_value='fake_map') mock_update_ssc = self.mock_object( self.driver.ssc_library, 'update_ssc') result = self.driver._update_ssc() self.assertIsNone(result) mock_ensure_shares_mounted.assert_called_once_with() mock_get_pool_map.assert_called_once_with() mock_update_ssc.assert_called_once_with('fake_map') def test_get_pool_map(self): self.driver.zapi_client = mock.Mock() mock_get_operational_lif_addresses = self.mock_object( self.driver.zapi_client, 'get_operational_lif_addresses', return_value=[fake.SHARE_IP]) mock_resolve_hostname = self.mock_object( volume_utils, 'resolve_hostname', return_value=fake.SHARE_IP) mock_get_flexvol = self.mock_object( self.driver.zapi_client, 'get_flexvol', return_value={'name': fake.NETAPP_VOLUME}) result = self.driver._get_flexvol_to_pool_map() expected = { fake.NETAPP_VOLUME: { 'pool_name': fake.NFS_SHARE, }, } self.assertEqual(expected, result) mock_get_operational_lif_addresses.assert_called_once_with() mock_resolve_hostname.assert_called_once_with(fake.SHARE_IP) mock_get_flexvol.assert_called_once_with(flexvol_path=fake.EXPORT_PATH) def test_get_pool_map_address_not_found(self): self.driver.zapi_client = mock.Mock() self.mock_object(self.driver.zapi_client, 'get_operational_lif_addresses', return_value=[]) self.mock_object(volume_utils, 'resolve_hostname', return_value=fake.SHARE_IP) result = self.driver._get_flexvol_to_pool_map() self.assertEqual({}, result) def test_get_pool_map_flexvol_not_found(self): self.driver.zapi_client = mock.Mock() self.mock_object(self.driver.zapi_client, 'get_operational_lif_addresses', return_value=[fake.SHARE_IP]) self.mock_object(volume_utils, 'resolve_hostname', return_value=fake.SHARE_IP) side_effect = exception.VolumeBackendAPIException(data='fake_data') self.mock_object(self.driver.zapi_client, 'get_flexvol', side_effect=side_effect) result = self.driver._get_flexvol_to_pool_map() self.assertEqual({}, result) @ddt.data(['/mnt/img-id1', '/mnt/img-id2'], []) def test__shortlist_del_eligible_files(self, old_files): self.driver.zapi_client = mock.Mock() self.driver.zapi_client.get_file_usage = mock.Mock(return_value='1000') mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') self.mock_object(self.driver, '_get_vserver_and_exp_vol', return_value=('openstack', 'fake_share')) expected_list = [(o, '1000') for o in old_files] observed_list = self.driver._shortlist_del_eligible_files( 'fake_ip:fake_share', old_files) self.assertEqual(expected_list, observed_list) self.assertEqual(1, mock_debug_log.call_count) @ddt.data({'ip': None, 'shares': None}, {'ip': 'fake_ip', 'shares': ['fip:/fsh1']}) @ddt.unpack def test__share_match_for_ip_no_match(self, ip, shares): def side_effect(arg): if arg == 'fake_ip': return 'openstack' return None self.mock_object(self.driver, '_get_vserver_for_ip', side_effect=side_effect) mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') retval = self.driver._share_match_for_ip(ip, shares) self.assertIsNone(retval) self.assertEqual(1, mock_debug_log.call_count) def test__share_match_for_ip(self): shares = ['fip:/fsh1'] self.mock_object(self.driver, '_get_vserver_for_ip', return_value='openstack') mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') retval = self.driver._share_match_for_ip('fip', shares) self.assertEqual('fip:/fsh1', retval) self.assertEqual(1, mock_debug_log.call_count) def test__get_vserver_for_ip_ignores_zapi_exception(self): self.driver.zapi_client = mock.Mock() self.driver.zapi_client.get_if_info_by_ip = mock.Mock( side_effect=exception.NotFound) vserver = self.driver._get_vserver_for_ip('FAKE_IP') self.assertIsNone(vserver) def test__get_vserver_for_ip(self): self.driver.zapi_client = mock.Mock() self.driver.zapi_client.get_if_info_by_ip = mock.Mock( return_value=fake.get_fake_ifs()) vserver = self.driver._get_vserver_for_ip('FAKE_IP') self.assertEqual(fake.VSERVER_NAME, vserver) def test_check_for_setup_error(self): mock_add_looping_tasks = self.mock_object( self.driver, '_add_looping_tasks') mock_contains_fg = self.mock_object( self.driver.ssc_library, 'contains_flexgroup_pool', return_value=False) self.driver.zapi_client = mock.Mock(features=mock.Mock( FLEXGROUP=True)) super_check_for_setup_error = self.mock_object( nfs_base.NetAppNfsDriver, 'check_for_setup_error') self.driver.check_for_setup_error() self.assertEqual(1, super_check_for_setup_error.call_count) self.assertEqual(1, mock_add_looping_tasks.call_count) mock_add_looping_tasks.assert_called_once_with() mock_contains_fg.assert_called_once_with() def test_check_for_setup_error_fail(self): mock_add_looping_tasks = self.mock_object( self.driver, '_add_looping_tasks') mock_contains_fg = self.mock_object( self.driver.ssc_library, 'contains_flexgroup_pool', return_value=True) self.driver.zapi_client = mock.Mock(features=mock.Mock( FLEXGROUP=False)) self.assertRaises( na_utils.NetAppDriverException, self.driver.check_for_setup_error) self.assertEqual(1, mock_add_looping_tasks.call_count) mock_add_looping_tasks.assert_called_once_with() mock_contains_fg.assert_called_once_with() @ddt.data({'replication_enabled': True, 'failed_over': False, 'cluster_credentials': True}, {'replication_enabled': True, 'failed_over': True, 'cluster_credentials': True}, {'replication_enabled': False, 'failed_over': False, 'cluster_credentials': False}) @ddt.unpack def test_handle_housekeeping_tasks( self, replication_enabled, failed_over, cluster_credentials): self.driver.using_cluster_credentials = cluster_credentials ensure_mirrors = self.mock_object(data_motion.DataMotionMixin, 'ensure_snapmirrors') self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', return_value=fake_ssc.SSC.keys()) mock_remove_unused_qos_policy_groups = self.mock_object( self.driver.zapi_client, 'remove_unused_qos_policy_groups') self.driver.replication_enabled = replication_enabled self.driver.failed_over = failed_over self.driver._handle_housekeeping_tasks() if self.driver.using_cluster_credentials: mock_remove_unused_qos_policy_groups.assert_called_once_with() else: mock_remove_unused_qos_policy_groups.assert_not_called() if replication_enabled and not failed_over: ensure_mirrors.assert_called_once_with( self.driver.configuration, self.driver.backend_name, fake_ssc.SSC.keys()) else: self.assertFalse(ensure_mirrors.called) def test_handle_ems_logging(self): volume_list = ['vol0', 'vol1', 'vol2'] self.mock_object( self.driver, '_get_backing_flexvol_names', return_value=volume_list) self.mock_object( dot_utils, 'build_ems_log_message_0', return_value='fake_base_ems_log_message') self.mock_object( dot_utils, 'build_ems_log_message_1', return_value='fake_pool_ems_log_message') mock_send_ems_log_message = self.mock_object( self.driver.zapi_client, 'send_ems_log_message') self.driver._handle_ems_logging() mock_send_ems_log_message.assert_has_calls([ mock.call('fake_base_ems_log_message'), mock.call('fake_pool_ems_log_message'), ]) dot_utils.build_ems_log_message_0.assert_called_once_with( self.driver.driver_name, self.driver.app_version) dot_utils.build_ems_log_message_1.assert_called_once_with( self.driver.driver_name, self.driver.app_version, self.driver.vserver, volume_list, []) def test_delete_volume(self): fake_provider_location = 'fake_provider_location' fake_volume = {'provider_location': fake_provider_location} self.mock_object(self.driver, '_delete_backing_file_for_volume') self.mock_object(na_utils, 'get_valid_qos_policy_group_info', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(na_utils, 'is_qos_policy_group_spec_adaptive', return_value=False) self.driver.delete_volume(fake_volume) self.driver._delete_backing_file_for_volume.assert_called_once_with( fake_volume) na_utils.get_valid_qos_policy_group_info.assert_called_once_with( fake_volume) na_utils.is_qos_policy_group_spec_adaptive.assert_called_once_with( fake.QOS_POLICY_GROUP_INFO) (self.driver.zapi_client.mark_qos_policy_group_for_deletion. assert_called_once_with(fake.QOS_POLICY_GROUP_INFO, False)) def test_delete_volume_exception_path(self): fake_provider_location = 'fake_provider_location' fake_volume = {'provider_location': fake_provider_location} self.mock_object(self.driver, '_delete_backing_file_for_volume') self.mock_object(na_utils, 'get_valid_qos_policy_group_info', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(na_utils, 'is_qos_policy_group_spec_adaptive', return_value=False) self.mock_object( self.driver.zapi_client, 'mark_qos_policy_group_for_deletion', side_effect=na_utils.NetAppDriverException) self.driver.delete_volume(fake_volume) self.driver._delete_backing_file_for_volume.assert_called_once_with( fake_volume) na_utils.get_valid_qos_policy_group_info.assert_called_once_with( fake_volume) na_utils.is_qos_policy_group_spec_adaptive.assert_called_once_with( fake.QOS_POLICY_GROUP_INFO) (self.driver.zapi_client.mark_qos_policy_group_for_deletion. assert_called_once_with(fake.QOS_POLICY_GROUP_INFO, False)) def test_delete_backing_file_for_volume(self): mock_filer_delete = self.mock_object(self.driver, '_delete_file') mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_volume') mock_flexgroup = self.mock_object(self.driver, '_is_flexgroup', return_value=False) mock_clone_file = self.mock_object( self.driver, '_is_flexgroup_clone_file_supported', return_value=True) self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME) mock_flexgroup.assert_called_once_with(host=fake.NFS_VOLUME['host']) mock_clone_file.assert_not_called() mock_filer_delete.assert_called_once_with( fake.NFS_VOLUME['id'], fake.NFS_VOLUME['name']) self.assertEqual(0, mock_super_delete.call_count) @ddt.data(True, False) def test_delete_backing_file_for_volume_exception_path(self, super_exc): mock_flexgroup = self.mock_object(self.driver, '_is_flexgroup', return_value=False) mock_clone_file = self.mock_object( self.driver, '_is_flexgroup_clone_file_supported', return_value=True) mock_exception_log = self.mock_object(nfs_cmode.LOG, 'exception') exception_call_count = 2 if super_exc else 1 mock_filer_delete = self.mock_object(self.driver, '_delete_file') mock_filer_delete.side_effect = [Exception] mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_volume') if super_exc: mock_super_delete.side_effect = [Exception] self.driver._delete_backing_file_for_volume(fake.NFS_VOLUME) mock_flexgroup.assert_called_once_with(host=fake.NFS_VOLUME['host']) mock_clone_file.assert_not_called() mock_filer_delete.assert_called_once_with( fake.NFS_VOLUME['id'], fake.NFS_VOLUME['name']) mock_super_delete.assert_called_once_with(fake.NFS_VOLUME) self.assertEqual(exception_call_count, mock_exception_log.call_count) @ddt.data(True, False) def test_delete_snapshot(self, is_flexgroup): mock_delete_backing = self.mock_object( self.driver, '_delete_backing_file_for_snapshot') self.mock_object(self.driver, '_is_flexgroup', return_value=is_flexgroup) self.mock_object(self.driver, '_is_flexgroup_clone_file_supported', return_value=not is_flexgroup) mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_snapshot') self.driver.delete_snapshot(fake.test_snapshot) if is_flexgroup: mock_super_delete.assert_called_once_with(fake.test_snapshot) mock_delete_backing.assert_not_called() else: mock_super_delete.assert_not_called() mock_delete_backing.assert_called_once_with(fake.test_snapshot) def test_delete_backing_file_for_snapshot(self): mock_filer_delete = self.mock_object(self.driver, '_delete_file') mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_snapshot') self.driver._delete_backing_file_for_snapshot(fake.test_snapshot) mock_filer_delete.assert_called_once_with( fake.test_snapshot['volume_id'], fake.test_snapshot['name']) self.assertEqual(0, mock_super_delete.call_count) @ddt.data(True, False) def test_delete_backing_file_for_snapshot_exception_path(self, super_exc): mock_exception_log = self.mock_object(nfs_cmode.LOG, 'exception') exception_call_count = 2 if super_exc else 1 mock_filer_delete = self.mock_object(self.driver, '_delete_file') mock_filer_delete.side_effect = [Exception] mock_super_delete = self.mock_object(nfs_base.NetAppNfsDriver, 'delete_snapshot') if super_exc: mock_super_delete.side_effect = [Exception] self.driver._delete_backing_file_for_snapshot(fake.test_snapshot) mock_filer_delete.assert_called_once_with( fake.test_snapshot['volume_id'], fake.test_snapshot['name']) mock_super_delete.assert_called_once_with(fake.test_snapshot) self.assertEqual(exception_call_count, mock_exception_log.call_count) def test_delete_file(self): mock_get_vs_ip = self.mock_object(self.driver, '_get_export_ip_path') mock_get_vs_ip.return_value = (fake.SHARE_IP, fake.EXPORT_PATH) mock_get_vserver = self.mock_object(self.driver, '_get_vserver_for_ip') mock_get_vserver.return_value = fake.VSERVER_NAME mock_zapi_get_vol = self.driver.zapi_client.get_vol_by_junc_vserver mock_zapi_get_vol.return_value = fake.FLEXVOL mock_zapi_delete = self.driver.zapi_client.delete_file self.driver._delete_file( fake.test_snapshot['volume_id'], fake.test_snapshot['name']) mock_get_vs_ip.assert_called_once_with( volume_id=fake.test_snapshot['volume_id']) mock_get_vserver.assert_called_once_with(fake.SHARE_IP) mock_zapi_get_vol.assert_called_once_with( fake.VSERVER_NAME, fake.EXPORT_PATH) mock_zapi_delete.assert_called_once_with( '/vol/%s/%s' % (fake.FLEXVOL, fake.test_snapshot['name'])) def test_do_qos_for_volume_no_exception(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group mock_set_policy = self.mock_object(self.driver, '_set_qos_policy_group_on_volume') mock_error_log = self.mock_object(nfs_cmode.LOG, 'error') mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') mock_cleanup = self.mock_object(self.driver, '_cleanup_volume_on_failure') mock_is_qos_min_supported = self.mock_object(self.driver.ssc_library, 'is_qos_min_supported', return_value=True) mock_extract_host = self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) self.driver._do_qos_for_volume(fake.NFS_VOLUME, fake.EXTRA_SPECS) mock_get_info.assert_has_calls([ mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)]) mock_provision_qos.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_INFO, True)]) mock_set_policy.assert_has_calls([ mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO, False)]) mock_is_qos_min_supported.assert_called_once_with(fake.POOL_NAME) mock_extract_host.assert_called_once_with(fake.NFS_VOLUME['host'], level='pool') self.assertEqual(0, mock_error_log.call_count) self.assertEqual(0, mock_debug_log.call_count) self.assertEqual(0, mock_cleanup.call_count) def test_do_qos_for_volume_exception_w_cleanup(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group mock_set_policy = self.mock_object(self.driver, '_set_qos_policy_group_on_volume') mock_set_policy.side_effect = netapp_api.NaApiError mock_error_log = self.mock_object(nfs_cmode.LOG, 'error') mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') mock_cleanup = self.mock_object(self.driver, '_cleanup_volume_on_failure') mock_is_qos_min_supported = self.mock_object(self.driver.ssc_library, 'is_qos_min_supported', return_value=True) mock_extract_host = self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) self.assertRaises(netapp_api.NaApiError, self.driver._do_qos_for_volume, fake.NFS_VOLUME, fake.EXTRA_SPECS) mock_get_info.assert_has_calls([ mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)]) mock_provision_qos.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_INFO, True)]) mock_set_policy.assert_has_calls([ mock.call(fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO, False)]) mock_is_qos_min_supported.assert_called_once_with(fake.POOL_NAME) mock_extract_host.assert_called_once_with(fake.NFS_VOLUME['host'], level='pool') self.assertEqual(1, mock_error_log.call_count) self.assertEqual(1, mock_debug_log.call_count) mock_cleanup.assert_has_calls([ mock.call(fake.NFS_VOLUME)]) def test_do_qos_for_volume_exception_no_cleanup(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.side_effect = exception.Invalid mock_provision_qos = self.driver.zapi_client.provision_qos_policy_group mock_set_policy = self.mock_object(self.driver, '_set_qos_policy_group_on_volume') mock_error_log = self.mock_object(nfs_cmode.LOG, 'error') mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug') mock_cleanup = self.mock_object(self.driver, '_cleanup_volume_on_failure') self.assertRaises(exception.Invalid, self.driver._do_qos_for_volume, fake.NFS_VOLUME, fake.EXTRA_SPECS, cleanup=False) mock_get_info.assert_has_calls([ mock.call(fake.NFS_VOLUME, fake.EXTRA_SPECS)]) self.assertEqual(0, mock_provision_qos.call_count) self.assertEqual(0, mock_set_policy.call_count) self.assertEqual(1, mock_error_log.call_count) self.assertEqual(0, mock_debug_log.call_count) self.assertEqual(0, mock_cleanup.call_count) def test_set_qos_policy_group_on_volume(self): mock_get_name_from_info = self.mock_object( na_utils, 'get_qos_policy_group_name_from_info') mock_get_name_from_info.return_value = fake.QOS_POLICY_GROUP_NAME mock_extract_host = self.mock_object(volume_utils, 'extract_host') mock_extract_host.return_value = fake.NFS_SHARE mock_get_flex_vol_name =\ self.driver.zapi_client.get_vol_by_junc_vserver mock_get_flex_vol_name.return_value = fake.FLEXVOL mock_file_assign_qos = self.driver.zapi_client.file_assign_qos self.driver._set_qos_policy_group_on_volume( fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO, False) mock_get_name_from_info.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_INFO)]) mock_extract_host.assert_has_calls([ mock.call(fake.NFS_HOST_STRING, level='pool')]) mock_get_flex_vol_name.assert_has_calls([ mock.call(fake.VSERVER_NAME, fake.EXPORT_PATH)]) mock_file_assign_qos.assert_has_calls([ mock.call(fake.FLEXVOL, fake.QOS_POLICY_GROUP_NAME, False, fake.NFS_VOLUME['name'])]) def test_set_qos_policy_group_on_volume_no_info(self): mock_get_name_from_info = self.mock_object( na_utils, 'get_qos_policy_group_name_from_info') mock_extract_host = self.mock_object(volume_utils, 'extract_host') mock_get_flex_vol_name =\ self.driver.zapi_client.get_vol_by_junc_vserver mock_file_assign_qos = self.driver.zapi_client.file_assign_qos self.driver._set_qos_policy_group_on_volume(fake.NFS_VOLUME, None, False) self.assertEqual(0, mock_get_name_from_info.call_count) self.assertEqual(0, mock_extract_host.call_count) self.assertEqual(0, mock_get_flex_vol_name.call_count) self.assertEqual(0, mock_file_assign_qos.call_count) def test_set_qos_policy_group_on_volume_no_name(self): mock_get_name_from_info = self.mock_object( na_utils, 'get_qos_policy_group_name_from_info') mock_get_name_from_info.return_value = None mock_extract_host = self.mock_object(volume_utils, 'extract_host') mock_get_flex_vol_name =\ self.driver.zapi_client.get_vol_by_junc_vserver mock_file_assign_qos = self.driver.zapi_client.file_assign_qos self.driver._set_qos_policy_group_on_volume( fake.NFS_VOLUME, fake.QOS_POLICY_GROUP_INFO, False) mock_get_name_from_info.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_INFO)]) self.assertEqual(0, mock_extract_host.call_count) self.assertEqual(0, mock_get_flex_vol_name.call_count) self.assertEqual(0, mock_file_assign_qos.call_count) @ddt.data({'share': None, 'is_snapshot': False}, {'share': None, 'is_snapshot': True}, {'share': 'fake_share', 'is_snapshot': False}, {'share': 'fake_share', 'is_snapshot': True}) @ddt.unpack def test_clone_backing_file_for_volume(self, share, is_snapshot): mock_get_vserver_and_exp_vol = self.mock_object( self.driver, '_get_vserver_and_exp_vol', return_value=(fake.VSERVER_NAME, fake.FLEXVOL)) self.driver._clone_backing_file_for_volume( fake.FLEXVOL, 'fake_clone', fake.VOLUME_ID, share=share, is_snapshot=is_snapshot) mock_get_vserver_and_exp_vol.assert_called_once_with( fake.VOLUME_ID, share) self.driver.zapi_client.clone_file.assert_called_once_with( fake.FLEXVOL, fake.FLEXVOL, 'fake_clone', fake.VSERVER_NAME, is_snapshot=is_snapshot) def test__clone_backing_file_for_volume(self): self.driver.zapi_client.get_if_info_by_ip = mock.Mock( return_value=[{'ip': 'fake_ip'}]) self.driver.zapi_client.get_vol_by_junc_vserver = mock.Mock( return_value='nfsvol') self.mock_object(self.driver, '_get_export_ip_path', return_value=('127.0.0.1', 'fakepath')) retval = self.driver._clone_backing_file_for_volume( 'vol', 'clone', 'vol_id', share='share', is_snapshot=True) self.assertIsNone(retval) self.driver.zapi_client.clone_file.assert_called_once_with( 'nfsvol', 'vol', 'clone', None, is_snapshot=True) def test_copy_from_img_service_copyoffload_nonexistent_binary_path(self): self.mock_object(nfs_cmode.LOG, 'debug') drv = self.driver context = object() volume = {'id': 'vol_id', 'name': 'name', 'host': 'openstack@nfscmode#192.128.1.1:/mnt_point'} image_service = mock.Mock() image_service.get_location.return_value = (mock.Mock(), mock.Mock()) image_service.show.return_value = {'size': 0} image_id = 'image_id' drv._client = mock.Mock() drv._client.get_api_version = mock.Mock(return_value=(1, 20)) nfs_base.NetAppNfsDriver._find_image_in_cache = mock.Mock( return_value=[]) drv._construct_image_nfs_url = mock.Mock(return_value=["nfs://1"]) drv._check_get_nfs_path_segs = mock.Mock( return_value=("test:test", "dr")) drv._get_ip_verify_on_cluster = mock.Mock(return_value=("192.128.1.1", "vserver")) drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') drv._check_share_can_hold_size = mock.Mock() # Raise error as if the copyoffload file can not be found drv._clone_file_dst_exists = mock.Mock(side_effect=OSError()) drv._discover_file_till_timeout = mock.Mock() # Verify the original error is propagated self.assertRaises(OSError, drv._copy_from_img_service, context, volume, image_service, image_id) drv._discover_file_till_timeout.assert_not_called() @ddt.data(True, False) def test_copy_from_img_service_raw_copyoffload_workflow_success( self, use_tool): drv = self.driver volume = {'id': 'vol_id', 'name': 'name', 'size': 1, 'host': 'openstack@nfscmode#ip1:/mnt_point'} image_id = 'image_id' context = object() image_service = mock.Mock() image_service.get_location.return_value = ('nfs://ip1/openstack/img', None) image_service.show.return_value = {'size': 1, 'disk_format': 'raw'} drv._check_get_nfs_path_segs =\ mock.Mock(return_value=('ip1', '/openstack')) drv._get_ip_verify_on_cluster = mock.Mock(return_value=('ip1', 'vserver')) drv._get_host_ip = mock.Mock(return_value='ip2') drv._get_export_path = mock.Mock(return_value='/exp_path') drv._get_provider_location = mock.Mock(return_value='share') drv._execute = mock.Mock() drv._copy_file = mock.Mock() drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point') drv._discover_file_till_timeout = mock.Mock(return_value=True) img_inf = mock.Mock() img_inf.file_format = 'raw' image_utils.qemu_img_info.return_value = img_inf drv._check_share_can_hold_size = mock.Mock() drv._move_nfs_file = mock.Mock(return_value=True) drv._delete_file_at_path = mock.Mock() drv._clone_file_dst_exists = mock.Mock() drv._post_clone_image = mock.Mock() retval = drv._copy_from_img_service( context, volume, image_service, image_id, use_copyoffload_tool=use_tool) self.assertTrue(retval) drv._get_ip_verify_on_cluster.assert_any_call('ip1') drv._check_share_can_hold_size.assert_called_with( 'ip1:/mnt_point', 1) if use_tool: self.assertEqual(1, drv._execute.call_count) self.assertEqual(0, drv._copy_file.call_count) else: self.assertEqual(1, drv._copy_file.call_count) self.assertEqual(0, drv._execute.call_count) @mock.patch.object(image_utils, 'convert_image') @mock.patch.object(image_utils, 'qemu_img_info') @mock.patch('os.path.exists') @mock.patch('cinder.privsep.path') def test_copy_from_img_service_qcow2_copyoffload_workflow_success( self, mock_touch, mock_exists, mock_qemu_img_info, mock_cvrt_image): drv = self.driver cinder_mount_point_base = '/opt/stack/data/cinder/mnt/' # To get the cinder mount point directory, we use: mount_dir = hashlib.md5( '203.0.113.122:/cinder-flexvol1'.encode('utf-8'), usedforsecurity=False).hexdigest() cinder_mount_point = cinder_mount_point_base + mount_dir destination_copied_file = ( '/cinder-flexvol1/a155308c-0290-497b-b278-4cdd01de0253' ) volume = {'id': 'vol_id', 'name': 'name', 'size': 1, 'host': 'openstack@nfscmode#203.0.113.122:/cinder-flexvol1'} image_id = 'image_id' context = object() image_service = mock.Mock() image_service.get_location.return_value = ( 'nfs://203.0.113.122/glance-flexvol1', None) image_service.show.return_value = {'size': 1, 'disk_format': 'qcow2'} drv._check_get_nfs_path_segs = ( mock.Mock(return_value=('203.0.113.122', '/openstack')) ) drv._get_ip_verify_on_cluster = mock.Mock( return_value=('203.0.113.122', 'vserver')) drv._execute = mock.Mock() drv._execute_as_root = False drv._get_mount_point_for_share = mock.Mock( return_value=cinder_mount_point) img_inf = mock.Mock() img_inf.file_format = 'raw' mock_qemu_img_info.return_value = img_inf drv._check_share_can_hold_size = mock.Mock() drv._move_nfs_file = mock.Mock(return_value=True) drv._delete_file_at_path = mock.Mock() drv._clone_file_dst_exists = mock.Mock() drv._post_clone_image = mock.Mock() self.mock_object(uuid, 'uuid4', mock.Mock( return_value='a155308c-0290-497b-b278-4cdd01de0253')) retval = drv._copy_from_img_service( context, volume, image_service, image_id) self.assertTrue(retval) drv._get_ip_verify_on_cluster.assert_any_call('203.0.113.122') drv._check_share_can_hold_size.assert_called_with( '203.0.113.122:/cinder-flexvol1', 1) # _execute must be called once for copy-offload and again to touch # the top directory to refresh cache drv._execute.assert_has_calls( [ mock.call( 'copyoffload_tool_path', '203.0.113.122', '203.0.113.122', '/openstack/glance-flexvol1', destination_copied_file, run_as_root=False, check_exit_code=0 ) ] ) self.assertEqual(1, drv._execute.call_count) self.assertEqual(2, drv._delete_file_at_path.call_count) self.assertEqual(1, drv._clone_file_dst_exists.call_count) def test_copy_from_cache_copyoffload_success(self): drv = self.driver volume = {'id': 'vol_id', 'name': 'name', 'size': 1, 'host': 'openstack@nfscmode#192.128.1.1:/exp_path'} image_id = 'image_id' cache_result = [('ip1:/openstack', 'img-cache-imgid')] drv._get_ip_verify_on_cluster = mock.Mock(return_value=('ip1', 'vserver')) drv._execute = mock.Mock() drv._register_image_in_cache = mock.Mock() drv._post_clone_image = mock.Mock() copied = drv._copy_from_cache(volume, image_id, cache_result) self.assertTrue(copied) drv._get_ip_verify_on_cluster.assert_any_call('ip1') drv._execute.assert_called_once_with( 'copyoffload_tool_path', 'ip1', 'ip1', '/openstack/img-cache-imgid', '/exp_path/name', run_as_root=False, check_exit_code=0) def test_unmanage(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.return_value = fake.QOS_POLICY_GROUP_INFO mock_mark_for_deletion =\ self.driver.zapi_client.mark_qos_policy_group_for_deletion super_unmanage = self.mock_object(nfs_base.NetAppNfsDriver, 'unmanage') self.driver.unmanage(fake.NFS_VOLUME) mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)]) mock_mark_for_deletion.assert_has_calls([ mock.call(fake.QOS_POLICY_GROUP_INFO)]) super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)]) def test_unmanage_invalid_qos(self): mock_get_info = self.mock_object(na_utils, 'get_valid_qos_policy_group_info') mock_get_info.side_effect = exception.Invalid super_unmanage = self.mock_object(nfs_base.NetAppNfsDriver, 'unmanage') self.driver.unmanage(fake.NFS_VOLUME) mock_get_info.assert_has_calls([mock.call(fake.NFS_VOLUME)]) super_unmanage.assert_has_calls([mock.call(fake.NFS_VOLUME)]) def test_add_looping_tasks(self): mock_update_ssc = self.mock_object(self.driver, '_update_ssc') mock_handle_housekeeping = self.mock_object( self.driver, '_handle_housekeeping_tasks') mock_add_task = self.mock_object(self.driver.loopingcalls, 'add_task') mock_super_add_looping_tasks = self.mock_object( nfs_base.NetAppNfsDriver, '_add_looping_tasks') self.driver._add_looping_tasks() mock_update_ssc.assert_called_once_with() mock_add_task.assert_has_calls([ mock.call(mock_update_ssc, loopingcalls.ONE_HOUR, loopingcalls.ONE_HOUR), mock.call(mock_handle_housekeeping, loopingcalls.TEN_MINUTES, 0)]) mock_super_add_looping_tasks.assert_called_once_with() @ddt.data({'type_match': True, 'expected': True}, {'type_match': False, 'expected': False}) @ddt.unpack def test_is_share_clone_compatible(self, type_match, expected): mock_get_flexvol_name_for_share = self.mock_object( self.driver, '_get_flexvol_name_for_share', return_value='fake_flexvol') mock_is_share_vol_type_match = self.mock_object( self.driver, '_is_share_vol_type_match', return_value=type_match) result = self.driver._is_share_clone_compatible(fake.VOLUME, fake.NFS_SHARE) self.assertEqual(expected, result) mock_get_flexvol_name_for_share.assert_called_once_with(fake.NFS_SHARE) mock_is_share_vol_type_match.assert_called_once() @ddt.data({'flexvols': ['volume1', 'volume2'], 'expected': True}, {'flexvols': ['volume3', 'volume4'], 'expected': False}, {'flexvols': [], 'expected': False}) @ddt.unpack def test_is_share_vol_type_match(self, flexvols, expected): mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', return_value='fake_extra_specs') mock_get_matching_flexvols_for_extra_specs = self.mock_object( self.driver.ssc_library, 'get_matching_flexvols_for_extra_specs', return_value=flexvols) result = self.driver._is_share_vol_type_match(fake.VOLUME, fake.NFS_SHARE, 'volume1') self.assertEqual(expected, result) mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) mock_get_matching_flexvols_for_extra_specs.assert_called_once_with( 'fake_extra_specs') @ddt.data({'share': 'volume1', 'expected': 'volume1'}, {'share': 'volume3', 'expected': None}) @ddt.unpack def test_get_flexvol_name_for_share(self, share, expected): mock_get_ssc = self.mock_object( self.driver.ssc_library, 'get_ssc', return_value=fake_ssc.SSC) result = self.driver._get_flexvol_name_for_share(share) self.assertEqual(expected, result) mock_get_ssc.assert_called_once_with() def test_get_flexvol_name_for_share_no_ssc_vols(self): mock_get_ssc = self.mock_object( self.driver.ssc_library, 'get_ssc', return_value={}) result = self.driver._get_flexvol_name_for_share('fake_share') self.assertIsNone(result) mock_get_ssc.assert_called_once_with() def test_find_image_location_with_local_copy(self): local_share = '/share' cache_result = [ ('ip1:/openstack', 'img-cache-imgid'), ('ip2:/openstack', 'img-cache-imgid'), (local_share, 'img-cache-imgid'), ('ip3:/openstack', 'img-cache-imgid'), ] mock_extract_host = self.mock_object(volume_utils, 'extract_host') mock_extract_host.return_value = local_share cache_copy, found_local_copy = self.driver._find_image_location( cache_result, fake.VOLUME) self.assertEqual(cache_result[2], cache_copy) self.assertTrue(found_local_copy) def test_find_image_location_with_remote_copy(self): cache_result = [('ip1:/openstack', 'img-cache-imgid')] mock_extract_host = self.mock_object(volume_utils, 'extract_host') mock_extract_host.return_value = '/share' cache_copy, found_local_copy = self.driver._find_image_location( cache_result, fake.VOLUME) self.assertEqual(cache_result[0], cache_copy) self.assertFalse(found_local_copy) def test_find_image_location_without_cache_copy(self): cache_result = [] mock_extract_host = self.mock_object(volume_utils, 'extract_host') mock_extract_host.return_value = '/share' cache_copy, found_local_copy = self.driver._find_image_location( cache_result, fake.VOLUME) self.assertIsNone(cache_copy) self.assertFalse(found_local_copy) def test_clone_file_dest_exists(self): self.driver._get_vserver_and_exp_vol = mock.Mock( return_value=(fake.VSERVER_NAME, fake.EXPORT_PATH)) self.driver.zapi_client.clone_file = mock.Mock() self.driver._clone_file_dst_exists( fake.NFS_SHARE, fake.IMAGE_FILE_ID, fake.VOLUME['name'], dest_exists=True) self.driver._get_vserver_and_exp_vol.assert_called_once_with( share=fake.NFS_SHARE) self.driver.zapi_client.clone_file.assert_called_once_with( fake.EXPORT_PATH, fake.IMAGE_FILE_ID, fake.VOLUME['name'], fake.VSERVER_NAME, dest_exists=True) @ddt.data((fake.NFS_SHARE, fake.SHARE_IP), (fake.NFS_SHARE_IPV6, fake.IPV6_ADDRESS)) @ddt.unpack def test_get_source_ip_and_path(self, share, ip): self.driver._get_ip_verify_on_cluster = mock.Mock( return_value=(ip, fake.VSERVER_NAME)) src_ip, src_vserver, src_share, src_path = ( self.driver._get_source_ip_and_path( share, fake.IMAGE_FILE_ID)) self.assertEqual(ip, src_ip) self.assertEqual(fake.VSERVER_NAME, src_vserver) self.assertEqual(fake.EXPORT_PATH, src_share) assert_path = fake.EXPORT_PATH + '/' + fake.IMAGE_FILE_ID self.assertEqual(assert_path, src_path) self.driver._get_ip_verify_on_cluster.assert_called_once_with(ip) def test_get_destination_ip_and_path(self): self.driver._get_ip_verify_on_cluster = mock.Mock( return_value=(fake.SHARE_IP, fake.VSERVER_NAME)) mock_extract_host = self.mock_object(volume_utils, 'extract_host') mock_extract_host.return_value = fake.NFS_SHARE dest_ip, dest_vserver, dest_path = ( self.driver._get_destination_ip_and_path(fake.VOLUME)) self.assertEqual(fake.VSERVER_NAME, dest_vserver) self.assertEqual(fake.SHARE_IP, dest_ip) assert_path = fake.EXPORT_PATH + '/' + fake.LUN_NAME self.assertEqual(assert_path, dest_path) self.driver._get_ip_verify_on_cluster.assert_called_once_with( fake.SHARE_IP) def test_clone_image_copyoffload_from_cache_success(self): drv = self.driver context = object() volume = {'id': 'vol_id', 'name': 'name', 'host': 'openstack@nfscmode#192.128.1.1:/mnt_point'} image_service = object() image_location = 'img-loc' image_id = 'image_id' image_meta = {'id': image_id} drv.zapi_client = mock.Mock() drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) nfs_base.NetAppNfsDriver._find_image_in_cache = mock.Mock( return_value=[('share', 'img')]) nfs_base.NetAppNfsDriver._direct_nfs_clone = mock.Mock( return_value=False) drv._copy_from_cache = mock.Mock(return_value=True) drv._is_flexgroup = mock.Mock(return_value=False) drv._is_flexgroup_clone_file_supported = mock.Mock(return_value=True) drv.clone_image(context, volume, image_location, image_meta, image_service) drv._copy_from_cache.assert_called_once_with( volume, image_id, [('share', 'img')]) drv.clone_image(context, volume, image_location, image_meta, image_service) def test_clone_image_flexgroup(self): self.driver._is_flexgroup = mock.Mock(return_value=True) mock_clone_file = self.mock_object( self.driver, '_is_flexgroup_clone_file_supported', return_value=False) volume = {'host': 'openstack@nfscmode#192.128.1.1:/mnt_point'} context = object() model, cloned = self.driver.clone_image( context, volume, 'fake_loc', 'fake_img', 'fake_img_service') self.assertFalse(cloned) self.assertIsNone(model) self.driver._is_flexgroup.assert_called_once_with(host=volume['host']) mock_clone_file.assert_called_once_with() @ddt.data(True, False) def test_clone_image_from_img_service(self, use_tool): drv = self.driver context = object() volume = {'id': 'vol_id', 'name': 'name', 'host': 'openstack@nfscmode#192.128.1.1:/mnt_point', 'provider_location': '192.128.1.1:/mnt_point'} image_service = object() image_id = 'image_id' image_meta = {'id': image_id} image_location = 'img-loc' drv.zapi_client = mock.Mock() drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) nfs_base.NetAppNfsDriver._find_image_in_cache = mock.Mock( return_value=[]) nfs_base.NetAppNfsDriver._direct_nfs_clone = mock.Mock( return_value=False) nfs_base.NetAppNfsDriver._post_clone_image = mock.Mock( return_value=True) drv._copy_from_img_service = mock.Mock(return_value=True) drv._is_flexgroup = mock.Mock(return_value=False) drv._is_flexgroup_clone_file_supported = mock.Mock(return_value=True) if not use_tool: drv.configuration.netapp_copyoffload_tool_path = None retval = drv.clone_image( context, volume, image_location, image_meta, image_service) self.assertEqual(retval, ( {'provider_location': '192.128.1.1:/mnt_point', 'bootable': True}, True)) drv._copy_from_img_service.assert_called_once_with( context, volume, image_service, image_id, use_copyoffload_tool=use_tool) def test_clone_image_copyoffload_failure(self): mock_log = self.mock_object(nfs_cmode, 'LOG') drv = self.driver context = object() volume = {'id': 'vol_id', 'name': 'name', 'host': 'host'} image_service = object() image_id = 'image_id' image_meta = {'id': image_id} image_location = 'img-loc' drv.zapi_client = mock.Mock() drv.zapi_client.get_ontapi_version = mock.Mock(return_value=(1, 20)) nfs_base.NetAppNfsDriver._find_image_in_cache = mock.Mock( return_value=[]) nfs_base.NetAppNfsDriver._direct_nfs_clone = mock.Mock( return_value=False) drv._copy_from_img_service = mock.Mock(side_effect=Exception()) drv._is_flexgroup = mock.Mock(return_value=False) drv._is_flexgroup_clone_file_supported = mock.Mock(return_value=True) retval = drv.clone_image( context, volume, image_location, image_meta, image_service) self.assertEqual(retval, ({'bootable': False, 'provider_location': None}, False)) drv._copy_from_img_service.assert_called_once_with( context, volume, image_service, image_id, use_copyoffload_tool=True) mock_log.info.assert_not_called() @ddt.data(True, False) def test_copy_from_remote_cache(self, use_tool): source_ip = '192.0.1.1' source_path = '/openstack/img-cache-imgid' source_vserver = 'fake_vserver' source_share = 'vol_fake' cache_copy = ('192.0.1.1:/openstack', fake.IMAGE_FILE_ID) dest_vserver = 'fake_dest_vserver' dest_path = fake.EXPORT_PATH + '/' + fake.VOLUME['name'] self.driver._execute = mock.Mock() self.driver._copy_file = mock.Mock() self.driver._get_source_ip_and_path = mock.Mock( return_value=( source_ip, source_vserver, source_share, source_path)) self.driver._get_destination_ip_and_path = mock.Mock( return_value=(fake.SHARE_IP, dest_vserver, dest_path)) self.driver._register_image_in_cache = mock.Mock() ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.driver._copy_from_remote_cache( fake_vol, fake.IMAGE_FILE_ID, cache_copy, use_copyoffload_tool=use_tool) if use_tool: self.driver._execute.assert_called_once_with( 'copyoffload_tool_path', source_ip, fake.SHARE_IP, source_path, dest_path, run_as_root=False, check_exit_code=0) self.driver._copy_file.assert_not_called() else: dest_share_path = dest_path.rsplit("/", 1)[0] self.driver._copy_file.assert_called_once_with( fake.IMAGE_FILE_ID, fake.IMAGE_FILE_ID, source_share, source_vserver, dest_share_path, dest_vserver, dest_backend_name=self.driver.backend_name, dest_file_name=fake_vol.name) self.driver._execute.assert_not_called() self.driver._get_source_ip_and_path.assert_called_once_with( cache_copy[0], fake.IMAGE_FILE_ID) self.driver._get_destination_ip_and_path.assert_called_once_with( fake_vol) self.driver._register_image_in_cache.assert_called_once_with( fake_vol, fake.IMAGE_FILE_ID) @ddt.data(True, False) def test_copy_from_cache_workflow_remote_location(self, use_tool): cache_result = [('ip1:/openstack', fake.IMAGE_FILE_ID), ('ip2:/openstack', fake.IMAGE_FILE_ID), ('ip3:/openstack', fake.IMAGE_FILE_ID)] self.driver._find_image_location = mock.Mock(return_value=[ cache_result[0], False]) self.driver._copy_from_remote_cache = mock.Mock() self.driver._post_clone_image = mock.Mock() if not use_tool: self.driver.configuration.netapp_copyoffload_tool_path = None copied = self.driver._copy_from_cache( fake.VOLUME, fake.IMAGE_FILE_ID, cache_result) self.assertTrue(copied) if use_tool: self.driver._copy_from_remote_cache.assert_called_once_with( fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0]) else: self.driver._copy_from_remote_cache.assert_called_once_with( fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0], use_copyoffload_tool=False) def test_copy_from_cache_workflow_local_location(self): local_share = '/share' cache_result = [ ('ip1:/openstack', 'img-cache-imgid'), ('ip2:/openstack', 'img-cache-imgid'), (local_share, 'img-cache-imgid'), ('ip3:/openstack', 'img-cache-imgid'), ] self.driver._find_image_location = mock.Mock(return_value=[ cache_result[2], True]) self.driver._clone_file_dst_exists = mock.Mock() self.driver._post_clone_image = mock.Mock() copied = self.driver._copy_from_cache( fake.VOLUME, fake.IMAGE_FILE_ID, cache_result) self.assertTrue(copied) self.driver._clone_file_dst_exists.assert_called_once_with( local_share, fake.IMAGE_FILE_ID, fake.VOLUME['name'], dest_exists=True) def test_copy_from_cache_workflow_no_location(self): cache_result = [] self.driver._find_image_location = mock.Mock( return_value=(None, False)) copied = self.driver._copy_from_cache( fake.VOLUME, fake.IMAGE_FILE_ID, cache_result) self.assertFalse(copied) @ddt.data(True, False) def test_copy_from_cache_workflow_exception(self, use_tool): cache_result = [('ip1:/openstack', fake.IMAGE_FILE_ID)] self.driver._find_image_location = mock.Mock(return_value=[ cache_result[0], False]) self.driver._copy_from_remote_cache = mock.Mock( side_effect=Exception) self.driver._post_clone_image = mock.Mock() if not use_tool: self.driver.configuration.netapp_copyoffload_tool_path = None copied = self.driver._copy_from_cache( fake.VOLUME, fake.IMAGE_FILE_ID, cache_result) self.assertFalse(copied) if use_tool: self.driver._copy_from_remote_cache.assert_called_once_with( fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0]) else: self.driver._copy_from_remote_cache.assert_called_once_with( fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0], use_copyoffload_tool=False) self.assertFalse(self.driver._post_clone_image.called) @ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']}, {'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']}, {'secondary_id': 'dev1', 'configured_targets': []}, {'secondary_id': None, 'configured_targets': []}) @ddt.unpack def test_failover_host_invalid_replication_target(self, secondary_id, configured_targets): """This tests executes a method in the DataMotionMixin.""" self.driver.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=configured_targets) complete_failover_call = self.mock_object( data_motion.DataMotionMixin, '_complete_failover') self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_host, 'fake_context', [], secondary_id=secondary_id) self.assertFalse(complete_failover_call.called) def test_failover_host_unable_to_failover(self): """This tests executes a method in the DataMotionMixin.""" self.driver.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, '_complete_failover', side_effect=na_utils.NetAppDriverException) self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=['dev1', 'dev2']) self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', return_value=fake_ssc.SSC.keys()) self.mock_object(self.driver, '_update_zapi_client') self.assertRaises(exception.UnableToFailOver, self.driver.failover_host, 'fake_context', [], secondary_id='dev1') data_motion.DataMotionMixin._complete_failover.assert_called_once_with( 'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [], failover_target='dev1') self.assertFalse(self.driver._update_zapi_client.called) def test_failover_host(self): """This tests executes a method in the DataMotionMixin.""" self.driver.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, '_complete_failover', return_value=('dev1', [])) self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=['dev1', 'dev2']) self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', return_value=fake_ssc.SSC.keys()) self.mock_object(self.driver, '_update_zapi_client') actual_active, vol_updates, __ = self.driver.failover_host( 'fake_context', [], secondary_id='dev1', groups=[]) data_motion.DataMotionMixin._complete_failover.assert_called_once_with( 'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [], failover_target='dev1') self.driver._update_zapi_client.assert_called_once_with('dev1') self.assertTrue(self.driver.failed_over) self.assertEqual('dev1', self.driver.failed_over_backend_name) self.assertEqual('dev1', actual_active) self.assertEqual([], vol_updates) @ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']}, {'secondary_id': 'dev3', 'configured_targets': ['dev1', 'dev2']}, {'secondary_id': 'dev1', 'configured_targets': []}, {'secondary_id': None, 'configured_targets': []}) @ddt.unpack def test_failover_invalid_replication_target(self, secondary_id, configured_targets): """This tests executes a method in the DataMotionMixin.""" self.driver.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=configured_targets) complete_failover_call = self.mock_object( data_motion.DataMotionMixin, '_complete_failover') self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover, 'fake_context', [], secondary_id=secondary_id) self.assertFalse(complete_failover_call.called) def test_failover_unable_to_failover(self): """This tests executes a method in the DataMotionMixin.""" self.driver.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, '_complete_failover', side_effect=na_utils.NetAppDriverException) self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=['dev1', 'dev2']) self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', return_value=fake_ssc.SSC.keys()) self.mock_object(self.driver, '_update_zapi_client') self.assertRaises(exception.UnableToFailOver, self.driver.failover, 'fake_context', [], secondary_id='dev1') data_motion.DataMotionMixin._complete_failover.assert_called_once_with( 'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [], failover_target='dev1') self.assertFalse(self.driver._update_zapi_client.called) def test_failover(self): """This tests executes a method in the DataMotionMixin.""" self.driver.backend_name = 'dev0' self.mock_object(data_motion.DataMotionMixin, '_complete_failover', return_value=('dev1', [])) self.mock_object(data_motion.DataMotionMixin, 'get_replication_backend_names', return_value=['dev1', 'dev2']) self.mock_object(self.driver.ssc_library, 'get_ssc_flexvol_names', return_value=fake_ssc.SSC.keys()) self.mock_object(self.driver, '_update_zapi_client') actual_active, vol_updates, __ = self.driver.failover( 'fake_context', [], secondary_id='dev1', groups=[]) data_motion.DataMotionMixin._complete_failover.assert_called_once_with( 'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [], failover_target='dev1') def test_failover_completed(self): self.mock_object(self.driver, '_update_zapi_client') self.driver.failover_completed('fake_context', secondary_id='dev1') self.driver._update_zapi_client.assert_called_once_with('dev1') self.assertTrue(self.driver.failed_over) self.assertEqual('dev1', self.driver.failed_over_backend_name) def test_delete_group_snapshot(self): mock_delete_backing_file = self.mock_object( self.driver, '_delete_backing_file_for_snapshot') snapshots = [fake.VG_SNAPSHOT] model_update, snapshots_model_update = ( self.driver.delete_group_snapshot( fake.VG_CONTEXT, fake.VG_SNAPSHOT, snapshots)) mock_delete_backing_file.assert_called_once_with(fake.VG_SNAPSHOT) self.assertIsNone(model_update) self.assertIsNone(snapshots_model_update) def test_get_snapshot_backing_flexvol_names(self): snapshots = [ {'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}}, {'volume': {'host': 'hostA@192.168.1.01#/fake/volume2'}}, {'volume': {'host': 'hostA@192.168.99.25#/fake/volume3'}}, {'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}}, ] ssc = { 'volume1': {'pool_name': '/fake/volume1', }, 'volume2': {'pool_name': '/fake/volume2', }, 'volume3': {'pool_name': '/fake/volume3', }, } mock_get_ssc = self.mock_object(self.driver.ssc_library, 'get_ssc') mock_get_ssc.return_value = ssc hosts = [snap['volume']['host'] for snap in snapshots] flexvols = self.driver._get_flexvol_names_from_hosts(hosts) mock_get_ssc.assert_called_once_with() self.assertEqual(3, len(flexvols)) self.assertIn('volume1', flexvols) self.assertIn('volume2', flexvols) self.assertIn('volume3', flexvols) def test_get_backing_flexvol_names(self): mock_ssc_library = self.mock_object( self.driver.ssc_library, 'get_ssc') self.driver._get_backing_flexvol_names() mock_ssc_library.assert_called_once_with() def test_create_group(self): mock_flexgroup = self.mock_object(self.driver, '_is_flexgroup', return_value=False) self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=False) model_update = self.driver.create_group( fake.VG_CONTEXT, fake.VOLUME_GROUP) self.assertEqual('available', model_update['status']) mock_flexgroup.assert_called_once_with(host=fake.VOLUME_GROUP['host']) def test_create_group_raises(self): mock_flexgroup = self.mock_object(self.driver, '_is_flexgroup', return_value=True) mock_is_cg = self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) self.assertRaises( na_utils.NetAppDriverException, self.driver.create_group, fake.VG_CONTEXT, fake.VOLUME_GROUP) mock_flexgroup.assert_called_once_with(host=fake.VOLUME_GROUP['host']) mock_is_cg.assert_called_once_with(fake.VOLUME_GROUP) def test_update_group(self): mock_is_cg = self.mock_object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=False) model_update, add_volumes_update, remove_volumes_update = ( self.driver.update_group(fake.VG_CONTEXT, "foo")) self.assertIsNone(add_volumes_update) self.assertIsNone(remove_volumes_update) mock_is_cg.assert_called_once_with("foo") def test_update_group_raises(self): mock_is_cg = self.mock_object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) mock_is_flexgroup = self.mock_object( self.driver, '_is_flexgroup', return_value=True) self.assertRaises( na_utils.NetAppDriverException, self.driver.update_group, fake.VG_CONTEXT, "foo", add_volumes=[fake.VOLUME]) mock_is_cg.assert_called_once_with("foo") mock_is_flexgroup.assert_called_once_with(host=fake.VOLUME['host']) @ddt.data(None, {'replication_status': fields.ReplicationStatus.ENABLED}) def test_create_group_from_src(self, volume_model_update): volume_model_update = volume_model_update or {} volume_model_update.update( {'provider_location': fake.PROVIDER_LOCATION}) mock_create_volume_from_snapshot = self.mock_object( self.driver, 'create_volume_from_snapshot', return_value=volume_model_update) model_update, volumes_model_update = ( self.driver.create_group_from_src( fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VOLUME], group_snapshot=fake.VG_SNAPSHOT, sorted_snapshots=[fake.SNAPSHOT])) expected_volumes_model_updates = [{'id': fake.VOLUME['id']}] expected_volumes_model_updates[0].update(volume_model_update) mock_create_volume_from_snapshot.assert_called_once_with( fake.VOLUME, fake.SNAPSHOT) self.assertIsNone(model_update) self.assertEqual(expected_volumes_model_updates, volumes_model_update) @ddt.data(None, {'replication_status': fields.ReplicationStatus.ENABLED}) def test_create_group_from_src_source_vols(self, volume_model_update): self.driver.zapi_client = mock.Mock() mock_get_snapshot_flexvols = self.mock_object( self.driver, '_get_flexvol_names_from_hosts') mock_get_snapshot_flexvols.return_value = (set([fake.VG_POOL_NAME])) mock_clone_backing_file = self.mock_object( self.driver, '_clone_backing_file_for_volume') fake_snapshot_name = 'snapshot-temp-' + fake.VOLUME_GROUP['id'] mock_busy = self.mock_object( self.driver.zapi_client, 'wait_for_busy_snapshot') self.mock_object(self.driver, '_get_volume_model_update', return_value=volume_model_update) mock_is_flexgroup = self.mock_object(self.driver, '_is_flexgroup', return_value=False) model_update, volumes_model_update = ( self.driver.create_group_from_src( fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VG_VOLUME], source_group=fake.VOLUME_GROUP, sorted_source_vols=[fake.SOURCE_VG_VOLUME])) expected_volumes_model_updates = [{ 'id': fake.VG_VOLUME['id'], 'provider_location': fake.PROVIDER_LOCATION, }] if volume_model_update: expected_volumes_model_updates[0].update(volume_model_update) mock_is_flexgroup.assert_called_once_with( host=fake.SOURCE_VG_VOLUME['host']) mock_get_snapshot_flexvols.assert_called_once_with( [fake.SOURCE_VG_VOLUME['host']]) self.driver.zapi_client.create_cg_snapshot.assert_called_once_with( set([fake.VG_POOL_NAME]), fake_snapshot_name) mock_clone_backing_file.assert_called_once_with( fake.SOURCE_VG_VOLUME['name'], fake.VG_VOLUME['name'], fake.SOURCE_VG_VOLUME['id'], source_snapshot=fake_snapshot_name) mock_busy.assert_called_once_with( fake.VG_POOL_NAME, fake_snapshot_name) self.driver.zapi_client.delete_snapshot.assert_called_once_with( fake.VG_POOL_NAME, fake_snapshot_name) self.assertIsNone(model_update) self.assertEqual(expected_volumes_model_updates, volumes_model_update) @ddt.data( {'error': na_utils.NetAppDriverException, 'is_cg': True}, {'error': NotImplementedError, 'is_cg': False}) @ddt.unpack def test_create_group_from_src_raises(self, error, is_cg): self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=is_cg) mock_is_flexgroup = self.mock_object(self.driver, '_is_flexgroup', return_value=True) self.assertRaises( error, self.driver.create_group_from_src, fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VG_VOLUME], source_group=fake.VOLUME_GROUP, sorted_source_vols=[fake.SOURCE_VG_VOLUME]) mock_is_flexgroup.assert_called_once_with( host=fake.SOURCE_VG_VOLUME['host']) def test_create_group_from_src_invalid_parms(self): model_update, volumes_model_update = ( self.driver.create_group_from_src( fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VOLUME])) self.assertIn('error', model_update['status']) def test_create_group_snapshot_raise_exception(self): mock_is_cg_snapshot = self.mock_object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) mock__get_flexvol_names = self.mock_object( self.driver, '_get_flexvol_names_from_hosts') self.mock_object(self.driver, '_is_flexgroup', return_value=False) self.mock_object(self.driver.zapi_client, 'create_cg_snapshot', side_effect=netapp_api.NaApiError) self.assertRaises(na_utils.NetAppDriverException, self.driver.create_group_snapshot, fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VG_SNAPSHOT]) mock_is_cg_snapshot.assert_called_once_with(fake.VOLUME_GROUP) mock__get_flexvol_names.assert_called_once_with( [fake.VG_SNAPSHOT['volume']['host']]) def test_create_group_snapshot(self): mock_is_cg_snapshot = self.mock_object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=False) mock_create_snapshot = self.mock_object( self.driver, 'create_snapshot') model_update, snapshots_model_update = ( self.driver.create_group_snapshot(fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.SNAPSHOT])) self.assertIsNone(model_update) self.assertIsNone(snapshots_model_update) mock_is_cg_snapshot.assert_called_once_with(fake.VOLUME_GROUP) mock_create_snapshot.assert_called_once_with(fake.SNAPSHOT) def test_create_consistent_group_snapshot(self): mock_is_cg_snapshot = self.mock_object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) self.driver.zapi_client = mock.Mock() mock_get_snapshot_flexvols = self.mock_object( self.driver, '_get_flexvol_names_from_hosts') mock_get_snapshot_flexvols.return_value = (set([fake.VG_POOL_NAME])) mock_clone_backing_file = self.mock_object( self.driver, '_clone_backing_file_for_volume') mock_busy = self.mock_object( self.driver.zapi_client, 'wait_for_busy_snapshot') mock_is_flexgroup = self.mock_object( self.driver, '_is_flexgroup') mock_is_flexgroup.return_value = False model_update, snapshots_model_update = ( self.driver.create_group_snapshot(fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VG_SNAPSHOT])) self.assertIsNone(model_update) self.assertIsNone(snapshots_model_update) mock_is_flexgroup.assert_called_once_with( host=fake.VG_SNAPSHOT['volume']['host']) mock_is_cg_snapshot.assert_called_once_with(fake.VOLUME_GROUP) mock_get_snapshot_flexvols.assert_called_once_with( [fake.VG_SNAPSHOT['volume']['host']]) self.driver.zapi_client.create_cg_snapshot.assert_called_once_with( set([fake.VG_POOL_NAME]), fake.VOLUME_GROUP_ID) mock_clone_backing_file.assert_called_once_with( fake.VG_SNAPSHOT['volume']['name'], fake.VG_SNAPSHOT['name'], fake.VG_SNAPSHOT['volume']['id'], source_snapshot=fake.VOLUME_GROUP_ID) mock_busy.assert_called_once_with( fake.VG_POOL_NAME, fake.VOLUME_GROUP_ID) self.driver.zapi_client.delete_snapshot.assert_called_once_with( fake.VG_POOL_NAME, fake.VOLUME_GROUP_ID) def test_create_consistent_group_snapshot_flexgroup(self): mock_is_cg_snapshot = self.mock_object( volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) mock_is_flexgroup = self.mock_object( self.driver, '_is_flexgroup') mock_is_flexgroup.return_value = True self.assertRaises(na_utils.NetAppDriverException, self.driver.create_group_snapshot, fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VG_SNAPSHOT]) mock_is_cg_snapshot.assert_called_once_with(fake.VOLUME_GROUP) mock_is_flexgroup.assert_called_once_with( host=fake.VG_SNAPSHOT['volume']['host']) def test_create_group_snapshot_busy_snapshot(self): self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) mock_is_flexgroup = self.mock_object( self.driver, '_is_flexgroup') mock_is_flexgroup.return_value = False self.driver.zapi_client = mock.Mock() snapshot = fake.VG_SNAPSHOT snapshot['volume'] = fake.VG_VOLUME mock_get_snapshot_flexvols = self.mock_object( self.driver, '_get_flexvol_names_from_hosts') mock_get_snapshot_flexvols.return_value = (set([fake.VG_POOL_NAME])) mock_clone_backing_file = self.mock_object( self.driver, '_clone_backing_file_for_volume') mock_busy = self.mock_object( self.driver.zapi_client, 'wait_for_busy_snapshot') mock_busy.side_effect = exception.SnapshotIsBusy(snapshot['name']) mock_mark_snapshot_for_deletion = self.mock_object( self.driver.zapi_client, 'mark_snapshot_for_deletion') self.driver.create_group_snapshot( fake.VG_CONTEXT, fake.VG_SNAPSHOT, [snapshot]) mock_get_snapshot_flexvols.assert_called_once_with( [snapshot['volume']['host']]) mock_is_flexgroup.assert_called_once_with( host=snapshot['volume']['host']) self.driver.zapi_client.create_cg_snapshot.assert_called_once_with( set([fake.VG_POOL_NAME]), fake.VG_SNAPSHOT_ID) mock_clone_backing_file.assert_called_once_with( snapshot['volume']['name'], snapshot['name'], snapshot['volume']['id'], source_snapshot=fake.VG_SNAPSHOT_ID) mock_busy.assert_called_once_with( fake.VG_POOL_NAME, fake.VG_SNAPSHOT_ID) self.driver.zapi_client.delete_snapshot.assert_not_called() mock_mark_snapshot_for_deletion.assert_called_once_with( fake.VG_POOL_NAME, fake.VG_SNAPSHOT_ID) def test_delete_group_volume_delete_failure(self): self.mock_object(self.driver, 'delete_volume', side_effect=Exception) model_update, volumes = self.driver.delete_group( fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VG_VOLUME]) self.assertEqual('deleted', model_update['status']) self.assertEqual('error_deleting', volumes[0]['status']) def test_delete_group(self): mock_delete_file = self.mock_object( self.driver, 'delete_volume') model_update, volumes = self.driver.delete_group( fake.VG_CONTEXT, fake.VOLUME_GROUP, [fake.VG_VOLUME]) self.assertEqual('deleted', model_update['status']) self.assertEqual('deleted', volumes[0]['status']) mock_delete_file.assert_called_once_with(fake.VG_VOLUME) def test__is_flexgroup_clone_file_supported(self): self.driver.zapi_client = mock.Mock(features=mock.Mock( FLEXGROUP_CLONE_FILE=True)) is_fg_clone = self.driver._is_flexgroup_clone_file_supported() self.assertTrue(is_fg_clone) def test_copy_file(self): self.driver.configuration.netapp_migrate_volume_timeout = 1 fake_job_status = {'job-status': 'complete'} mock_start_file_copy = self.mock_object(self.driver.zapi_client, 'start_file_copy', return_value=fake.JOB_UUID) mock_get_file_copy_status = self.mock_object( self.driver.zapi_client, 'get_file_copy_status', return_value=fake_job_status) mock_cancel_file_copy = self.mock_object( self.driver, '_cancel_file_copy') result = self.driver._copy_file( fake.VOLUME_NAME, fake.VOLUME_ID, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, dest_file_name=fake.VOLUME_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_start_file_copy.assert_called_with( fake.VOLUME_NAME, fake.DEST_POOL_NAME, src_ontap_volume=fake.POOL_NAME, dest_file_name=fake.VOLUME_NAME) mock_get_file_copy_status.assert_called_with(fake.JOB_UUID) mock_cancel_file_copy.assert_not_called() self.assertIsNone(result) @ddt.data(('data', na_utils.NetAppDriverTimeout), ('destroyed', na_utils.NetAppDriverException), ('destroyed', na_utils.NetAppDriverException)) @ddt.unpack @mock.patch('oslo_service.loopingcall.FixedIntervalWithTimeoutLoopingCall', new=test_utils.ZeroIntervalWithTimeoutLoopingCall) def test_copy_file_error(self, status_on_error, copy_exception): self.driver.configuration.netapp_migrate_volume_timeout = 1 fake_job_status = { 'job-status': status_on_error, 'last-failure-reason': None } mock_start_file_copy = self.mock_object(self.driver.zapi_client, 'start_file_copy', return_value=fake.JOB_UUID) mock_get_file_copy_status = self.mock_object( self.driver.zapi_client, 'get_file_copy_status', return_value=fake_job_status) mock_cancel_file_copy = self.mock_object( self.driver, '_cancel_file_copy') self.assertRaises(copy_exception, self.driver._copy_file, fake.VOLUME_NAME, fake.VOLUME_ID, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME, dest_file_name=fake.VOLUME_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_start_file_copy.assert_called_with( fake.VOLUME_NAME, fake.DEST_POOL_NAME, src_ontap_volume=fake.POOL_NAME, dest_file_name=fake.VOLUME_NAME) mock_get_file_copy_status.assert_called_with(fake.JOB_UUID) mock_cancel_file_copy.assert_called_once_with( fake.JOB_UUID, fake.VOLUME_NAME, fake.DEST_POOL_NAME, dest_backend_name=fake.DEST_BACKEND_NAME) def test_migrate_volume_to_vserver(self): self.driver.backend_name = fake.BACKEND_NAME mock_copy_file = self.mock_object(self.driver, '_copy_file') mock_create_vserver_peer = self.mock_object(self.driver, 'create_vserver_peer') mock_finish_volume_migration = self.mock_object( self.driver, '_finish_volume_migration', return_value={}) ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) updates = self.driver._migrate_volume_to_vserver( fake_vol, fake.NFS_SHARE, fake.VSERVER_NAME, fake.DEST_NFS_SHARE, fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_copy_file.assert_called_once_with( fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:], fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:], fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_create_vserver_peer.assert_called_once_with( fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME, ['file_copy']) mock_finish_volume_migration.assert_called_once_with( fake_vol, fake.DEST_NFS_SHARE) self.assertEqual({}, updates) def test_migrate_volume_create_vserver_peer_error(self): self.driver.backend_name = fake.BACKEND_NAME mock_copy_file = self.mock_object( self.driver, '_copy_file', side_effect=na_utils.NetAppDriverException) mock_create_vserver_peer = self.mock_object( self.driver, 'create_vserver_peer', side_effect=na_utils.NetAppDriverException) mock_finish_volume_migration = self.mock_object( self.driver, '_finish_volume_migration') ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises( na_utils.NetAppDriverException, self.driver._migrate_volume_to_vserver, fake_vol, fake.NFS_SHARE, fake.VSERVER_NAME, fake.DEST_NFS_SHARE, fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_create_vserver_peer.assert_called_once_with( fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME, ['file_copy']) mock_copy_file.assert_not_called() mock_finish_volume_migration.assert_not_called() def test_migrate_volume_to_vserver_file_copy_error(self): self.driver.backend_name = fake.BACKEND_NAME mock_create_vserver_peer = self.mock_object( self.driver, 'create_vserver_peer') mock_copy_file = self.mock_object( self.driver, '_copy_file', side_effect=na_utils.NetAppDriverException) mock_finish_volume_migration = self.mock_object( self.driver, '_finish_volume_migration') ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises( na_utils.NetAppDriverException, self.driver._migrate_volume_to_vserver, fake_vol, fake.NFS_SHARE, fake.VSERVER_NAME, fake.DEST_NFS_SHARE, fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_create_vserver_peer.assert_called_once_with( fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME, ['file_copy']) mock_copy_file.assert_called_once_with( fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:], fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:], fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_finish_volume_migration.assert_not_called() def test_migrate_volume_to_vserver_file_copy_timeout(self): self.driver.backend_name = fake.BACKEND_NAME mock_create_vserver_peer = self.mock_object( self.driver, 'create_vserver_peer') mock_copy_file = self.mock_object( self.driver, '_copy_file', side_effect=na_utils.NetAppDriverTimeout) mock_finish_volume_migration = self.mock_object( self.driver, '_finish_volume_migration') ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises( na_utils.NetAppDriverTimeout, self.driver._migrate_volume_to_vserver, fake_vol, fake.NFS_SHARE, fake.VSERVER_NAME, fake.DEST_NFS_SHARE, fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_create_vserver_peer.assert_called_once_with( fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME, ['file_copy']) mock_copy_file.assert_called_once_with( fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:], fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:], fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_finish_volume_migration.assert_not_called() def test_migrate_volume_to_pool(self): mock_copy_file = self.mock_object(self.driver, '_copy_file') mock_finish_volume_migration = self.mock_object( self.driver, '_finish_volume_migration', return_value={}) ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) updates = self.driver._migrate_volume_to_pool(fake_vol, fake.NFS_SHARE, fake.DEST_NFS_SHARE, fake.VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_copy_file.assert_called_once_with( fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:], fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_finish_volume_migration.assert_called_once_with( fake_vol, fake.DEST_NFS_SHARE) self.assertEqual({}, updates) def test_migrate_volume_to_pool_file_copy_error(self): mock_copy_file = self.mock_object( self.driver, '_copy_file', side_effect=na_utils.NetAppDriverException) mock_finish_volume_migration = self.mock_object( self.driver, '_finish_volume_migration') ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises( na_utils.NetAppDriverException, self.driver._migrate_volume_to_pool, fake_vol, fake.NFS_SHARE, fake.DEST_NFS_SHARE, fake.VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_copy_file.assert_called_once_with( fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:], fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_finish_volume_migration.assert_not_called() def test_migrate_volume_to_pool_file_copy_timeout(self): mock_copy_file = self.mock_object( self.driver, '_copy_file', side_effect=na_utils.NetAppDriverTimeout) mock_finish_volume_migration = self.mock_object( self.driver, '_finish_volume_migration') ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) self.assertRaises( na_utils.NetAppDriverTimeout, self.driver._migrate_volume_to_pool, fake_vol, fake.NFS_SHARE, fake.DEST_NFS_SHARE, fake.VSERVER_NAME, fake.DEST_BACKEND_NAME) mock_copy_file.assert_called_once_with( fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:], fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True) mock_finish_volume_migration.assert_not_called() def test_finish_volume_migration(self): mock_delete_volume = self.mock_object(self.driver, 'delete_volume') ctxt = mock.Mock() vol_fields = {'id': fake.VOLUME_ID, 'host': 'fakeHost@%s#%s' % (fake.BACKEND_NAME, fake.POOL_NAME)} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) result = self.driver._finish_volume_migration(fake_vol, fake.DEST_POOL_NAME) mock_delete_volume.assert_called_once_with(fake_vol) expected = {'provider_location': fake.DEST_POOL_NAME} self.assertEqual(expected, result) def test_migrate_volume(self): ctx = mock.Mock() self.driver.backend_name = fake.BACKEND_NAME self.driver.netapp_vserver = fake.VSERVER_NAME mock_migrate_volume_ontap_assisted = self.mock_object( self.driver, 'migrate_volume_ontap_assisted', return_value={}) vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.AVAILABLE } fake_vol = fake_volume.fake_volume_obj(ctx, **vol_fields) result = self.driver.migrate_volume(ctx, fake_vol, fake.DEST_HOST_STRING) mock_migrate_volume_ontap_assisted.assert_called_once_with( fake_vol, fake.DEST_HOST_STRING, fake.BACKEND_NAME, fake.VSERVER_NAME) self.assertEqual({}, result) def test_migrate_volume_not_in_available_status(self): ctx = mock.Mock() self.driver.backend_name = fake.BACKEND_NAME self.driver.netapp_vserver = fake.VSERVER_NAME mock_migrate_volume_ontap_assisted = self.mock_object( self.driver, 'migrate_volume_ontap_assisted', return_value={}) vol_fields = { 'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME, 'status': fields.VolumeStatus.IN_USE } fake_vol = fake_volume.fake_volume_obj(ctx, **vol_fields) migrated, updates = self.driver.migrate_volume(ctx, fake_vol, fake.DEST_HOST_STRING) mock_migrate_volume_ontap_assisted.assert_not_called() self.assertFalse(migrated) self.assertEqual({}, updates) def test__revert_to_snapshot(self): mock_clone_backing_file_for_volume = self.mock_object( self.driver, '_clone_backing_file_for_volume') mock_get_export_ip_path = self.mock_object( self.driver, '_get_export_ip_path', return_value=(fake.SHARE_IP, fake.EXPORT_PATH)) mock_get_vserver_for_ip = self.mock_object( self.driver, '_get_vserver_for_ip', return_value=fake.VSERVER_NAME) mock_get_vol_by_junc_vserver = self.mock_object( self.driver.zapi_client, 'get_vol_by_junc_vserver', return_value=fake.FLEXVOL) mock_swap_files = self.mock_object(self.driver, '_swap_files') mock_delete_file = self.mock_object(self.driver.zapi_client, 'delete_file') self.driver._revert_to_snapshot(fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) mock_clone_backing_file_for_volume.assert_called_once_with( fake.SNAPSHOT['name'], 'new-%s' % fake.SNAPSHOT['name'], fake.SNAPSHOT_VOLUME['id'], is_snapshot=False) mock_get_export_ip_path.assert_called_once_with( volume_id=fake.SNAPSHOT_VOLUME['id']) mock_get_vserver_for_ip.assert_called_once_with(fake.SHARE_IP) mock_get_vol_by_junc_vserver.assert_called_once_with( fake.VSERVER_NAME, fake.EXPORT_PATH) mock_swap_files.assert_called_once_with( fake.FLEXVOL, fake.SNAPSHOT_VOLUME['name'], 'new-%s' % fake.SNAPSHOT['name']) mock_delete_file.assert_not_called() @ddt.data(False, True) def test__revert_to_snapshot_swap_exception(self, delete_exception): new_snap_name = 'new-%s' % fake.SNAPSHOT['name'] new_file_path = '/vol/%s/%s' % (fake.FLEXVOL, new_snap_name) self.mock_object(self.driver, '_clone_backing_file_for_volume') self.mock_object(self.driver, '_get_export_ip_path', return_value=(fake.SHARE_IP, fake.EXPORT_PATH)) self.mock_object(self.driver, '_get_vserver_for_ip', return_value=fake.VSERVER_NAME) self.mock_object(self.driver.zapi_client, 'get_vol_by_junc_vserver', return_value=fake.FLEXVOL) swap_exception = exception.VolumeBackendAPIException(data="data") self.mock_object(self.driver, '_swap_files', side_effect=swap_exception) side_effect = Exception if delete_exception else lambda: True mock_delete_file = self.mock_object(self.driver.zapi_client, 'delete_file', side_effect=side_effect) self.assertRaises(exception.VolumeBackendAPIException, self.driver._revert_to_snapshot, fake.SNAPSHOT_VOLUME, fake.SNAPSHOT) mock_delete_file.assert_called_once_with(new_file_path) def test__swap_files(self): new_file = 'new-%s' % fake.SNAPSHOT['name'] new_file_path = '/vol/%s/%s' % (fake.FLEXVOL, new_file) original_file_path = '/vol/%s/%s' % (fake.FLEXVOL, fake.VOLUME_NAME) tmp_file_path = '/vol/%s/tmp-%s' % (fake.FLEXVOL, fake.VOLUME_NAME) mock_rename_file = self.mock_object( self.driver.zapi_client, 'rename_file') mock_delete_file = self.mock_object( self.driver.zapi_client, 'delete_file') self.driver._swap_files(fake.FLEXVOL, fake.VOLUME_NAME, new_file) mock_rename_file.assert_has_calls([ mock.call(original_file_path, tmp_file_path), mock.call(new_file_path, original_file_path)]) mock_delete_file.assert_called_once_with(tmp_file_path) @ddt.data((True, False), (False, False), (False, True)) @ddt.unpack def test__swap_files_rename_exception(self, first_exception, rollback_exception): new_file = 'new-%s' % fake.SNAPSHOT['name'] new_file_path = '/vol/%s/%s' % (fake.FLEXVOL, new_file) original_file_path = '/vol/%s/%s' % (fake.FLEXVOL, fake.VOLUME_NAME) tmp_file_path = '/vol/%s/tmp-%s' % (fake.FLEXVOL, fake.VOLUME_NAME) side_effect = None def _skip_side_effect(): return True if not first_exception and not rollback_exception: side_effect = [_skip_side_effect, exception.VolumeBackendAPIException(data="data"), _skip_side_effect] elif not first_exception and rollback_exception: side_effect = [_skip_side_effect, exception.VolumeBackendAPIException(data="data"), exception.VolumeBackendAPIException(data="data")] else: side_effect = exception.VolumeBackendAPIException(data="data") mock_rename_file = self.mock_object(self.driver.zapi_client, 'rename_file', side_effect=side_effect) self.assertRaises( na_utils.NetAppDriverException, self.driver._swap_files, fake.FLEXVOL, fake.VOLUME_NAME, new_file) if not first_exception: mock_rename_file.assert_has_calls([ mock.call(original_file_path, tmp_file_path), mock.call(new_file_path, original_file_path), mock.call(tmp_file_path, original_file_path)]) else: mock_rename_file.assert_called_once_with(original_file_path, tmp_file_path) def test__swap_files_delete_exception(self): new_file = 'new-%s' % fake.SNAPSHOT['name'] self.mock_object(self.driver.zapi_client, 'rename_file') side_effect = exception.VolumeBackendAPIException(data="data") self.mock_object(self.driver.zapi_client, 'delete_file', side_effect=side_effect) self.driver._swap_files(fake.FLEXVOL, fake.VOLUME_NAME, new_file) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nvme_cmode.py0000664000175000017500000000274700000000000030442 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Mock unit tests for NetApp Data ONTAP FibreChannel storage systems.""" from unittest import mock from cinder import context from cinder.tests.unit import test import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap import nvme_cmode class NetAppCmodeNVMeDriverTestCase(test.TestCase): def setUp(self): super(NetAppCmodeNVMeDriverTestCase, self).setUp() kwargs = { 'configuration': self.get_config_base(), 'host': 'openstack@netappblock', } self.library = nvme_cmode.NetAppCmodeNVMeDriver(**kwargs) self.library.zapi_client = mock.Mock() self.zapi_client = self.library.zapi_client self.mock_request = mock.Mock() self.ctxt = context.RequestContext('fake', 'fake', auth_token=True) def get_config_base(self): return na_fakes.create_configuration() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nvme_library.py0000664000175000017500000014341600000000000031016 0ustar00zuulzuul00000000000000# Copyright (c) 2023 NetApp, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Mock unit tests for the NetApp block storage library""" from concurrent.futures import ThreadPoolExecutor import copy from unittest import mock from unittest.mock import patch import ddt from oslo_utils import units from cinder import context from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap import nvme_library from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils @ddt.ddt class NetAppNVMeStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppNVMeStorageLibraryTestCase, self).setUp() config = na_fakes.create_configuration_cmode() config.netapp_storage_protocol = 'nvme' config.netapp_login = 'admin' config.netapp_password = 'pass' config.netapp_server_hostname = '127.0.0.1' config.netapp_transport_type = 'https' config.netapp_server_port = '443' config.netapp_vserver = 'openstack' config.netapp_api_trace_pattern = 'fake_regex' kwargs = { 'configuration': config, 'host': 'openstack@netappnvme', } self.library = nvme_library.NetAppNVMeStorageLibrary( 'driver', 'protocol', **kwargs) self.library.client = mock.Mock() self.client = self.library.client self.mock_request = mock.Mock() self.ctxt = context.RequestContext('fake', 'fake', auth_token=True) self.vserver = fake.VSERVER_NAME self.library.perf_library = mock.Mock() self.library.ssc_library = mock.Mock() self.library.vserver = mock.Mock() # fakes objects. self.fake_namespace = nvme_library.NetAppNamespace( fake.NAMESPACE_HANDLE, fake.NAMESPACE_NAME, fake.SIZE, fake.NAMESPACE_METADATA) self.fake_snapshot_namespace = nvme_library.NetAppNamespace( fake.SNAPSHOT_NAMESPACE_HANDLE, fake.SNAPSHOT_NAME, fake.SIZE, None) self.mock_object(self.library, 'namespace_table') self.library.namespace_table = { fake.NAMESPACE_NAME: self.fake_namespace, fake.SNAPSHOT_NAME: self.fake_snapshot_namespace, } @mock.patch.object(perf_cmode, 'PerformanceCmodeLibrary', mock.Mock()) @mock.patch.object(capabilities.CapabilitiesLibrary, 'cluster_user_supported') @mock.patch.object(capabilities.CapabilitiesLibrary, 'check_api_permissions') @mock.patch.object(na_utils, 'check_flags') def test_do_setup_san_unconfigured(self, mock_check_flags, mock_check_api_permissions, mock_cluster_user_supported): self.library.configuration.netapp_namespace_ostype = None self.library.configuration.netapp_host_type = None self.library.backend_name = 'fake_backend' fake_client = mock.Mock() fake_client.vserver = 'fake_vserver' self.mock_object(dot_utils, 'get_client_for_backend', return_value=fake_client) self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) mock_check_api_permissions.assert_called_once_with() mock_cluster_user_supported.assert_called_once_with() self.assertEqual('linux', self.library.namespace_ostype) self.assertEqual('linux', self.library.host_type) dot_utils.get_client_for_backend.assert_called_once_with( 'fake_backend', force_rest=True) def test_check_for_setup_error(self): self.mock_object(self.library, '_get_flexvol_to_pool_map', return_value=fake.POOL_NAME) self.mock_object(self.library, '_add_looping_tasks') self.library.namespace_ostype = 'linux' self.library.host_type = 'linux' self.mock_object(self.library.client, 'get_namespace_list', return_value='fake_namespace_list') self.mock_object(self.library, '_extract_and_populate_namespaces') self.mock_object(self.library.loopingcalls, 'start_tasks') self.library.check_for_setup_error() self.library._get_flexvol_to_pool_map.assert_called_once_with() self.library._add_looping_tasks.assert_called_once_with() self.library.client.get_namespace_list.assert_called_once_with() self.library._extract_and_populate_namespaces.assert_called_once_with( 'fake_namespace_list') self.library.loopingcalls.start_tasks.assert_called_once_with() @ddt.data( {'pool_map': None, 'namespace': 'linux', 'host': 'linux'}, {'pool_map': 'fake_map', 'namespace': 'fake', 'host': 'linux'}, {'pool_map': 'fake_map', 'namespace': 'linux', 'host': 'fake'}) @ddt.unpack def test_check_for_setup_error_error(self, pool_map, namespace, host): self.mock_object(self.library, '_get_flexvol_to_pool_map', return_value=pool_map) self.library.namespace_ostype = namespace self.library.host_type = host self.mock_object(self.library, '_add_looping_tasks') self.assertRaises( na_utils.NetAppDriverException, self.library.check_for_setup_error) def test_check_for_setup_error_disaggregated(self): self.library.configuration.netapp_disaggregated_platform = True self.mock_object(self.library, '_get_cluster_to_pool_map', return_value=fake.POOL_NAME) self.mock_object(self.library, '_add_looping_tasks') self.library.namespace_ostype = 'linux' self.library.host_type = 'linux' self.mock_object(self.library.client, 'get_namespace_list', return_value='fake_namespace_list') self.mock_object(self.library, '_extract_and_populate_namespaces') self.mock_object(self.library.loopingcalls, 'start_tasks') self.library.check_for_setup_error() self.library._add_looping_tasks.assert_called_once_with() self.library.client.get_namespace_list.assert_called_once_with() self.library._extract_and_populate_namespaces.assert_called_once_with( 'fake_namespace_list') self.library.loopingcalls.start_tasks.assert_called_once_with() @ddt.data( {'pool_map': None, 'namespace': 'linux', 'host': 'linux'}, {'pool_map': 'fake_map', 'namespace': 'fake', 'host': 'linux'}, {'pool_map': 'fake_map', 'namespace': 'linux', 'host': 'fake'}) @ddt.unpack def test_check_for_setup_error_error_disaggregated( self, pool_map, namespace, host): self.library.configuration.netapp_disaggregated_platform = True self.mock_object(self.library, '_get_cluster_to_pool_map', return_value=pool_map) self.library.namespace_ostype = namespace self.library.host_type = host self.mock_object(self.library, '_add_looping_tasks') self.assertRaises( na_utils.NetAppDriverException, self.library.check_for_setup_error) def test_create_volume(self): volume_size_in_bytes = int(fake.SIZE) * units.Gi self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) self.mock_object(self.library.client, 'create_namespace') self.mock_object(self.library, '_create_namespace_handle') self.mock_object(self.library, '_add_namespace_to_table') volume1 = copy.deepcopy(fake.test_volume) self.library.create_volume(volume1) fake_metadata = { 'OsType': self.library.namespace_ostype, 'Path': '/vol/aggr1/fakename', 'Volume': 'aggr1', 'Qtree': None } self.library.client.create_namespace.assert_called_once_with( fake.POOL_NAME, 'fakename', volume_size_in_bytes, fake_metadata) self.library._create_namespace_handle.assert_called_once_with( fake_metadata) def test_create_namespace_handle(self): self.library.vserver = fake.VSERVER_NAME res = self.library._create_namespace_handle(fake.NAMESPACE_METADATA) self.assertEqual(f'{fake.VSERVER_NAME}:{fake.PATH_NAMESPACE}', res) def test__extract_namespace_info(self): self.mock_object(self.library, '_create_namespace_handle', return_value=fake.NAMESPACE_HANDLE) namespace = {'Path': fake.PATH_NAMESPACE, 'Size': fake.SIZE} res = self.library._extract_namespace_info(namespace) self.assertEqual(fake.NAMESPACE_NAME, res.name) self.library._create_namespace_handle.assert_called_once_with( namespace) def test__extract_and_populate_namespaces(self): self.mock_object(self.library, '_extract_namespace_info', return_value='fake_namespace') self.mock_object(self.library, '_add_namespace_to_table') self.library._extract_and_populate_namespaces([fake.NAMESPACE_NAME]) self.library._extract_namespace_info.assert_called_once_with( fake.NAMESPACE_NAME) self.library._add_namespace_to_table.assert_called_once_with( 'fake_namespace') def test__add_namespace_to_table(self): namespace = nvme_library.NetAppNamespace( fake.NAMESPACE_HANDLE, 'fake_namespace2', fake.SIZE, None) self.library._add_namespace_to_table(namespace) has_namespace = 'fake_namespace2' in self.library.namespace_table self.assertTrue(has_namespace) self.assertEqual(namespace, self.library.namespace_table['fake_namespace2']) def test__add_namespace_to_table_error(self): self.assertRaises( exception.VolumeBackendAPIException, self.library._add_namespace_to_table, 'fake' ) def test__get_namespace_from_table_error(self): self.mock_object(self.library.client, 'get_namespace_list', return_value='fake_list') self.mock_object(self.library, '_extract_and_populate_namespaces') self.assertRaises( exception.VolumeNotFound, self.library._get_namespace_from_table, 'fake') self.library.client.get_namespace_list.assert_called_once_with() self.library._extract_and_populate_namespaces.assert_called_once_with( 'fake_list') def test__get_namespace_from_table(self): res = self.library._get_namespace_from_table(fake.NAMESPACE_NAME) self.assertEqual(self.fake_namespace, res) @ddt.data(exception.VolumeNotFound, netapp_api.NaApiError) def test__get_namespace_attr_error(self, error_obj): self.mock_object(self.library, '_get_namespace_from_table', side_effect=error_obj) res = self.library._get_namespace_attr('namespace', 'name') self.assertIsNone(res) def test__get_namespace_attr(self): self.mock_object(self.library, '_get_namespace_from_table', return_value=self.fake_namespace) res = self.library._get_namespace_attr('namespace', 'name') self.assertEqual(fake.NAMESPACE_NAME, res) def test_create_volume_error(self): self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) self.mock_object(self.library.client, 'create_namespace', side_effect=exception.VolumeBackendAPIException) self.mock_object(self.library, '_create_namespace_handle') self.mock_object(self.library, '_add_namespace_to_table') self.assertRaises( exception.VolumeBackendAPIException, self.library.create_volume, copy.deepcopy(fake.test_volume)) def test__update_ssc(self): mock_get_flexvol = self.mock_object( self.library, '_get_flexvol_to_pool_map', return_value='fake_pool_map') self.library.ssc_library.update_ssc = mock.Mock() self.library._update_ssc() mock_get_flexvol.assert_called_once_with() self.library.ssc_library.update_ssc.assert_called_once_with( 'fake_pool_map') def test__update_ssc_disaggregated_platform(self): self.library.configuration.netapp_disaggregated_platform = True mock_get_cluster_pool_map = self.mock_object( self.library, '_get_cluster_to_pool_map', return_value=fake.FAKE_CLUSTER_POOL_MAP) self.library.ssc_library.update_ssc_asa = mock.Mock() self.library._update_ssc() mock_get_cluster_pool_map.assert_called_once_with() self.library.ssc_library.update_ssc_asa.assert_called_once_with( fake.FAKE_CLUSTER_POOL_MAP) def test__find_mapped_namespace_subsystem(self): self.mock_object(self.library.client, 'get_subsystem_by_host', return_value=[{'name': fake.SUBSYSTEM}]) self.mock_object( self.library.client, 'get_namespace_map', return_value=[{ 'subsystem_uuid': fake.UUID1, 'subsystem': fake.SUBSYSTEM, 'uuid': fake.UUID1 }]) subsystem_uuid, subsystem, n_uuid =\ self.library._find_mapped_namespace_subsystem( fake.NAMESPACE_NAME, fake.HOST_NQN ) self.assertEqual(fake.SUBSYSTEM, subsystem) self.assertEqual(fake.UUID1, n_uuid) self.library.client.get_subsystem_by_host.assert_called_once_with( fake.HOST_NQN) self.library.client.get_namespace_map.assert_called_once_with( fake.NAMESPACE_NAME) def test_delete_volume(self): self.mock_object(self.library, '_delete_namespace') self.library.delete_volume(fake.NAMESPACE_VOLUME) self.library._delete_namespace.assert_called_once_with( fake.NAMESPACE_NAME) def test__delete_namespace(self): namespace = copy.deepcopy(fake.NAMESPACE_WITH_METADATA) self.mock_object(self.library, '_get_namespace_attr', return_value=namespace['metadata']) self.mock_object(self.library.client, 'destroy_namespace') self.library._delete_namespace(fake.NAMESPACE_NAME) self.library._get_namespace_attr.assert_called_once_with( fake.NAMESPACE_NAME, 'metadata') self.library.client.destroy_namespace.assert_called_once_with( namespace['metadata']['Path']) has_namespace = fake.NAMESPACE_NAME in self.library.namespace_table self.assertFalse(has_namespace) def test__delete_namespace_not_found(self): namespace = copy.deepcopy(fake.NAMESPACE_WITH_METADATA) self.mock_object(self.library, '_get_namespace_attr', return_value=namespace['metadata']) error = netapp_api.NaApiError( code=netapp_api.REST_NAMESPACE_EOBJECTNOTFOUND[0]) self.mock_object(self.library.client, 'destroy_namespace', side_effect=error) self.library._delete_namespace(fake.NAMESPACE_NAME) self.library._get_namespace_attr.assert_called_once_with( fake.NAMESPACE_NAME, 'metadata') self.library.client.destroy_namespace.assert_called_once_with( namespace['metadata']['Path']) has_namespace = fake.NAMESPACE_NAME in self.library.namespace_table self.assertFalse(has_namespace) def test__delete_namespace_error(self): namespace = copy.deepcopy(fake.NAMESPACE_WITH_METADATA) self.mock_object(self.library, '_get_namespace_attr', return_value=namespace['metadata']) self.mock_object(self.library.client, 'destroy_namespace', side_effect=netapp_api.NaApiError) self.assertRaises(na_utils.NetAppDriverException, self.library._delete_namespace, fake.NAMESPACE_NAME) def test__delete_namespace_no_metadata(self): self.mock_object(self.library, '_get_namespace_attr', return_value=None) self.mock_object(self.library.client, 'destroy_namespace') self.library._delete_namespace(fake.NAMESPACE_NAME) self.library._get_namespace_attr.assert_called_once_with( fake.NAMESPACE_NAME, 'metadata') self.library.client.destroy_namespace.assert_not_called() def test_add_looping_tasks(self): mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') self.mock_object(self.library, '_update_ssc') self.library._add_looping_tasks() self.library._update_ssc.assert_called_once_with() mock_add_task.assert_has_calls([ mock.call(self.library._update_ssc, loopingcalls.ONE_HOUR, loopingcalls.ONE_HOUR), mock.call(self.library._handle_ems_logging, loopingcalls.ONE_HOUR)]) def test_handle_ems_logging(self): volume_list = ['vol0', 'vol1', 'vol2'] self.mock_object( self.library.ssc_library, 'get_ssc_flexvol_names', return_value=volume_list) self.mock_object( dot_utils, 'build_ems_log_message_0', return_value='fake_base_ems_log_message') self.mock_object( dot_utils, 'build_ems_log_message_1', return_value='fake_pool_ems_log_message') mock_send_ems_log_message = self.mock_object( self.client, 'send_ems_log_message') self.library._handle_ems_logging() mock_send_ems_log_message.assert_has_calls([ mock.call('fake_base_ems_log_message'), mock.call('fake_pool_ems_log_message'), ]) dot_utils.build_ems_log_message_0.assert_called_once_with( self.library.driver_name, self.library.app_version) dot_utils.build_ems_log_message_1.assert_called_once_with( self.library.driver_name, self.library.app_version, self.library.vserver, volume_list, []) def test_get_pool(self): namespace = copy.deepcopy(fake.NAMESPACE_WITH_METADATA) self.mock_object(self.library, '_get_namespace_attr', return_value=namespace['metadata']) res = self.library.get_pool(fake.VOLUME) self.assertEqual('fake_flexvol', res) self.library._get_namespace_attr.assert_called_once_with( fake.LUN_NAME, 'metadata') def test_delete_snapshot(self): mock__delete = self.mock_object(self.library, '_delete_namespace') self.library.delete_snapshot(fake.SNAPSHOT) mock__delete.assert_called_once_with(fake.SNAPSHOT_NAME) def test_create_volume_from_snapshot(self): self.mock_object(self.library, '_clone_source_to_destination') self.library.create_volume_from_snapshot(fake.NAMESPACE_VOLUME, fake.SNAPSHOT) self.library._clone_source_to_destination.assert_called_once_with( {'name': fake.SNAPSHOT_NAME, 'size': fake.SIZE}, fake.NAMESPACE_VOLUME) def test_create_cloned_volume(self): self.mock_object(self.library, '_get_namespace_from_table', return_value=self.fake_namespace) self.mock_object(self.library, '_clone_source_to_destination') src_volume = {'size': fake.SIZE, 'name': 'fake_name'} self.library.create_cloned_volume(fake.NAMESPACE_VOLUME, src_volume) self.library._get_namespace_from_table.assert_called_once_with( 'fake_name') self.library._clone_source_to_destination.assert_called_once_with( {'name': fake.NAMESPACE_NAME, 'size': fake.SIZE}, fake.NAMESPACE_VOLUME) def test_clone_source_to_destination(self): self.mock_object(self.library, '_clone_namespace') self.mock_object(self.library, '_extend_volume') self.mock_object(self.library, 'delete_volume') source_vol = {'size': fake.SIZE, 'name': 'fake_source'} dest_size = fake.SIZE + 12 dest_vol = {'size': dest_size, 'name': 'fake_dest'} self.library._clone_source_to_destination(source_vol, dest_vol) self.library._clone_namespace.assert_called_once_with( 'fake_source', 'fake_dest') self.library._extend_volume.assert_called_once_with( dest_vol, dest_size) self.library.delete_volume.assert_not_called() def test_clone_source_to_destination_clone_error(self): self.mock_object(self.library, '_clone_namespace', side_effect=exception.VolumeBackendAPIException) self.mock_object(self.library, '_extend_volume') self.mock_object(self.library, 'delete_volume') source_vol = {'size': fake.SIZE, 'name': 'fake_source'} dest_size = fake.SIZE + 12 dest_vol = {'size': dest_size, 'name': 'fake_dest'} self.assertRaises( exception.VolumeBackendAPIException, self.library._clone_source_to_destination, source_vol, dest_vol) def test_clone_source_to_destination_extend_error(self): self.mock_object(self.library, '_clone_namespace') self.mock_object(self.library, '_extend_volume', side_effect=exception.VolumeBackendAPIException) self.mock_object(self.library, 'delete_volume') source_vol = {'size': fake.SIZE, 'name': 'fake_source'} dest_size = fake.SIZE + 12 dest_vol = {'size': dest_size, 'name': 'fake_dest'} self.assertRaises( exception.VolumeBackendAPIException, self.library._clone_source_to_destination, source_vol, dest_vol) @ddt.data(True, False) def test_get_volume_stats(self, refresh): self.library._stats = 'fake_stats' self.mock_object(self.library, '_update_volume_stats') res = self.library.get_volume_stats(refresh, filter_function='filter', goodness_function='good') self.assertEqual('fake_stats', res) if refresh: self.library._update_volume_stats.assert_called_once_with( filter_function='filter', goodness_function='good') else: self.library._update_volume_stats.assert_not_called() def test__update_volume_stats(self): self.library.VERSION = '1.0.0' self.library.driver_protocol = 'nvme' self.mock_object(self.library, '_get_pool_stats', return_value='fake_pools') self.library._update_volume_stats(filter_function='filter', goodness_function='good') expected_ssc = { 'volume_backend_name': 'driver', 'vendor_name': 'NetApp', 'driver_version': '1.0.0', 'pools': 'fake_pools', 'sparse_copy_volume': True, 'replication_enabled': False, 'storage_protocol': 'nvme', } self.assertEqual(expected_ssc, self.library._stats) @ddt.data({'cluster_credentials': False, 'report_provisioned_capacity': False}, {'cluster_credentials': True, 'report_provisioned_capacity': True}) @ddt.unpack def test_get_pool_stats(self, cluster_credentials, report_provisioned_capacity): self.library.using_cluster_credentials = cluster_credentials conf = self.library.configuration conf.netapp_driver_reports_provisioned_capacity = ( report_provisioned_capacity) ssc = { 'vola': { 'pool_name': 'vola', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', 'netapp_is_flexgroup': 'false', }, } mock_get_ssc = self.mock_object(self.library.ssc_library, 'get_ssc', return_value=ssc) mock_get_aggrs = self.mock_object(self.library.ssc_library, 'get_ssc_aggregates', return_value=['aggr1']) self.library.reserved_percentage = 5 self.library.max_over_subscription_ratio = 10 self.library.perf_library.get_node_utilization_for_pool = ( mock.Mock(return_value=30.0)) mock_capacities = { 'size-total': 10737418240.0, 'size-available': 2147483648.0, } namespaces_provisioned_cap = [{ 'path': '/vol/volume-ae947c9b-2392-4956-b373-aaac4521f37e', 'size': 5368709120.0 # 5GB }, { 'path': '/vol/snapshot-527eedad-a431-483d-b0ca-18995dd65b66', 'size': 1073741824.0 # 1GB }] self.mock_object(self.client, 'get_flexvol_capacity', return_value=mock_capacities) self.mock_object(self.client, 'get_namespace_sizes_by_volume', return_value=namespaces_provisioned_cap) self.mock_object(self.client, 'get_flexvol_dedupe_used_percent', return_value=55.0) aggr_capacities = { 'aggr1': { 'percent-used': 45, 'size-available': 59055800320.0, 'size-total': 107374182400.0, }, } mock_get_aggr_capacities = self.mock_object( self.client, 'get_aggregate_capacities', return_value=aggr_capacities) result = self.library._get_pool_stats(filter_function='filter', goodness_function='goodness') expected = [{ 'pool_name': 'vola', 'QoS_support': False, 'consistencygroup_support': True, 'consistent_group_snapshot_enabled': True, 'reserved_percentage': 5, 'max_over_subscription_ratio': 10, 'multiattach': True, 'total_capacity_gb': 10.0, 'free_capacity_gb': 2.0, 'netapp_dedupe_used_percent': 55.0, 'netapp_aggregate_used_percent': 45, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', 'netapp_compression': 'false', 'netapp_mirrored': 'false', 'netapp_dedup': 'true', 'netapp_aggregate': 'aggr1', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'SSD', 'online_extend_support': True, 'netapp_is_flexgroup': 'false', 'total_volumes': 2, }] if report_provisioned_capacity: expected[0].update({'provisioned_capacity_gb': 5.0}) if not cluster_credentials: expected[0].update({ 'netapp_aggregate_used_percent': 0, 'netapp_dedupe_used_percent': 0.0 }) self.assertEqual(expected, result) mock_get_ssc.assert_called_once_with() if cluster_credentials: mock_get_aggrs.assert_called_once_with() mock_get_aggr_capacities.assert_called_once_with(['aggr1']) @ddt.data({}, None) def test_get_pool_stats_no_ssc_vols(self, ssc): mock_get_ssc = self.mock_object(self.library.ssc_library, 'get_ssc', return_value=ssc) pools = self.library._get_pool_stats() self.assertListEqual([], pools) mock_get_ssc.assert_called_once_with() @ddt.data(r'open+|demix+', 'open.+', r'.+\d', '^((?!mix+).)*$', 'open123, open321') def test_get_pool_map_match_selected_pools(self, patterns): self.library.configuration.netapp_pool_name_search_pattern = patterns mock_list_flexvols = self.mock_object( self.library.client, 'list_flexvols', return_value=fake.FAKE_CMODE_VOLUMES) result = self.library._get_flexvol_to_pool_map() expected = { 'open123': { 'pool_name': 'open123', }, 'open321': { 'pool_name': 'open321', }, } self.assertEqual(expected, result) mock_list_flexvols.assert_called_once_with() @ddt.data('', 'mix.+|open.+', '.+', 'open123, mixed, open321', '.*?') def test_get_pool_map_match_all_pools(self, patterns): self.library.configuration.netapp_pool_name_search_pattern = patterns mock_list_flexvols = self.mock_object( self.library.client, 'list_flexvols', return_value=fake.FAKE_CMODE_VOLUMES) result = self.library._get_flexvol_to_pool_map() self.assertEqual(fake.FAKE_CMODE_POOL_MAP, result) mock_list_flexvols.assert_called_once_with() def test_get_pool_map_invalid_conf(self): """Verify an exception is raised if the regex pattern is invalid""" self.library.configuration.netapp_pool_name_search_pattern = '(.+' self.assertRaises(exception.InvalidConfigurationValue, self.library._get_flexvol_to_pool_map) @ddt.data('abc|stackopen|openstack|abc*', 'abc', 'stackopen', 'openstack', 'abc*', '^$') def test_get_pool_map_non_matching_patterns(self, patterns): self.library.configuration.netapp_pool_name_search_pattern = patterns mock_list_flexvols = self.mock_object( self.library.client, 'list_flexvols', return_value=fake.FAKE_CMODE_VOLUMES) result = self.library._get_flexvol_to_pool_map() self.assertEqual({}, result) mock_list_flexvols.assert_called_once_with() def test_create_snapshot(self): self.mock_object(self.library, '_create_snapshot') self.library.create_snapshot('fake_snap') self.library._create_snapshot.assert_called_once_with('fake_snap') def test__create_snapshot(self): self.mock_object(self.library, '_get_namespace_from_table', return_value=self.fake_namespace) self.mock_object(self.library, '_clone_namespace') self.library._create_snapshot(fake.SNAPSHOT) self.library._get_namespace_from_table.assert_called_once_with( fake.VOLUME_NAME) self.library._clone_namespace.assert_called_once_with( fake.NAMESPACE_NAME, fake.SNAPSHOT_NAME) def test__clone_namespace_error(self): self.mock_object(self.library, '_get_namespace_attr', return_value=fake.NAMESPACE_METADATA) self.mock_object(self.library.client, 'clone_namespace') self.mock_object(self.library.client, 'get_namespace_by_args', return_value=[]) self.assertRaises( exception.VolumeBackendAPIException, self.library._clone_namespace, fake.NAMESPACE_NAME, 'fake_new_name') def test__clone_namespace(self): self.mock_object(self.library, '_get_namespace_attr', return_value=fake.NAMESPACE_METADATA) self.mock_object(self.library.client, 'clone_namespace') fake_namespace_res = { 'Vserver': fake.VSERVER_NAME, 'Path': fake.NAMESPACE_NAME, 'Size': 1024 } self.mock_object(self.library.client, 'get_namespace_by_args', return_value=[fake_namespace_res]) self.mock_object(self.library, '_add_namespace_to_table') self.library._clone_namespace(fake.NAMESPACE_NAME, 'fake_new_name') self.library._get_namespace_attr.assert_called_once_with( fake.NAMESPACE_NAME, 'metadata') self.library.client.clone_namespace.assert_called_once_with( fake.POOL_NAME, fake.NAMESPACE_NAME, 'fake_new_name') self.library.client.get_namespace_by_args.assert_called_once() self.library._add_namespace_to_table.assert_called_once() def test_ensure_export(self): self.mock_object(self.library, '_get_namespace_attr', return_value='fake_handle') res = self.library.ensure_export(mock.Mock(), fake.NAMESPACE_VOLUME) self.assertEqual({'provider_location': 'fake_handle'}, res) self.library._get_namespace_attr.assert_called_once_with( fake.NAMESPACE_NAME, 'handle') def test_create_export(self): self.mock_object(self.library, '_get_namespace_attr', return_value='fake_handle') res = self.library.create_export(mock.Mock(), fake.NAMESPACE_VOLUME) self.assertEqual({'provider_location': 'fake_handle'}, res) self.library._get_namespace_attr.assert_called_once_with( fake.NAMESPACE_NAME, 'handle') def test__extend_volume(self): self.mock_object(self.library, '_get_namespace_from_table', return_value=self.fake_namespace) self.mock_object(self.library.client, 'namespace_resize') self.library._extend_volume(fake.NAMESPACE_VOLUME, fake.SIZE) new_bytes = str(int(fake.SIZE) * units.Gi) self.assertEqual(new_bytes, self.fake_namespace.size) self.library._get_namespace_from_table.assert_called_once_with( fake.NAMESPACE_NAME) self.library.client.namespace_resize.assert_called_once_with( fake.PATH_NAMESPACE, new_bytes) def test__map_namespace(self): self.library.host_type = 'win' fake_namespace_metadata = [{ 'subsystem': 'fake_subsystem', 'subsystem_uuid': 'fake_subsystem_uuid', 'uuid': 'fake_uuid' }] self.mock_object(self.library, '_get_namespace_attr', return_value=fake.NAMESPACE_METADATA) self.mock_object(self.library.client, 'map_namespace', return_value=fake.UUID1) self.mock_object(self.library.client, 'get_namespace_map', return_value=fake_namespace_metadata) host_nqn = 'fake_host_nqn' name = 'fake_namespace_name' subsystem_name, ns_uuid = self.library._map_namespace(name, host_nqn) self.assertEqual(subsystem_name, 'fake_subsystem') self.assertEqual(ns_uuid, 'fake_uuid') self.library.client.map_host_with_subsystem.assert_called_once_with( host_nqn, 'fake_subsystem_uuid' ) def test_initialize_connection(self): self.mock_object(self.library, '_map_namespace', return_value=(fake.SUBSYSTEM, fake.UUID1)) self.mock_object(self.library.client, 'get_nvme_subsystem_nqn', return_value=fake.TARGET_NQN) self.mock_object(self.library.client, 'get_nvme_target_portals', return_value=['fake_ip']) res = self.library.initialize_connection( fake.NAMESPACE_VOLUME, {'nqn': fake.HOST_NQN}) expected_conn_info = { "driver_volume_type": "nvmeof", "data": { "target_nqn": fake.TARGET_NQN, "host_nqn": fake.HOST_NQN, "portals": [('fake_ip', 4420, 'tcp')], "vol_uuid": fake.UUID1 } } self.assertEqual(expected_conn_info, res) self.library._map_namespace.assert_called_once_with( fake.NAMESPACE_NAME, fake.HOST_NQN) self.library.client.get_nvme_subsystem_nqn.assert_called_once_with( fake.SUBSYSTEM) self.library.client.get_nvme_target_portals.assert_called_once_with() def test_initialize_connection_error_no_host(self): self.mock_object(self.library, '_map_namespace', return_value=(fake.SUBSYSTEM, fake.UUID1)) self.mock_object(self.library.client, 'get_nvme_subsystem_nqn', return_value=fake.TARGET_NQN) self.mock_object(self.library.client, 'get_nvme_target_portals', return_value=['fake_ip']) self.assertRaises( exception.VolumeBackendAPIException, self.library.initialize_connection, fake.NAMESPACE_VOLUME, {}) def test_initialize_connection_error_no_target(self): self.mock_object(self.library, '_map_namespace', return_value=(fake.SUBSYSTEM, fake.UUID1)) self.mock_object(self.library.client, 'get_nvme_subsystem_nqn', return_value=None) self.mock_object(self.library.client, 'get_nvme_target_portals', return_value=['fake_ip']) self.assertRaises( exception.VolumeBackendAPIException, self.library.initialize_connection, fake.NAMESPACE_VOLUME, {'nqn': fake.HOST_NQN}) def test_initialize_connection_error_no_portals(self): self.mock_object(self.library, '_map_namespace', return_value=(fake.SUBSYSTEM, fake.UUID1)) self.mock_object(self.library.client, 'get_nvme_subsystem_nqn', return_value=fake.TARGET_NQN) self.mock_object(self.library.client, 'get_nvme_target_portals', return_value=[]) self.assertRaises( exception.VolumeBackendAPIException, self.library.initialize_connection, fake.NAMESPACE_VOLUME, {'nqn': fake.HOST_NQN}) @ddt.data(fake.HOST_NQN, None) def test__unmap_namespace(self, host_nqn): mock_find = self.mock_object( self.library, '_find_mapped_namespace_subsystem', return_value=(fake.UUID1, fake.SUBSYSTEM, 'fake')) self.mock_object(self.library.client, 'get_namespace_map', return_value=[{'subsystem': fake.SUBSYSTEM}]) self.mock_object(self.library.client, 'unmap_namespace') self.library._unmap_namespace(fake.PATH_NAMESPACE, host_nqn) if host_nqn: mock_find.assert_called_once_with(fake.PATH_NAMESPACE, fake.HOST_NQN) self.library.client.get_namespace_map.assert_not_called() else: self.library._find_mapped_namespace_subsystem.assert_not_called() @ddt.data(None, {'nqn': fake.HOST_NQN}) def test_terminate_connection(self, connector): self.mock_object(self.library, '_get_namespace_attr', return_value=fake.NAMESPACE_METADATA) self.mock_object(self.library, '_unmap_namespace') self.mock_object(na_utils, 'is_multiattach_to_host', return_value=False) namespace_volume = copy.deepcopy(fake.test_namespace_volume) self.library.terminate_connection(namespace_volume, connector) self.library._get_namespace_attr.assert_called_once_with( namespace_volume.name, 'metadata') host = connector['nqn'] if connector else None self.library._unmap_namespace(fake.PATH_NAMESPACE, host) if connector: na_utils.is_multiattach_to_host.assert_called_once_with( namespace_volume, connector) @mock.patch.object(na_utils, 'is_multiattach_to_host', return_value=False) def test_terminate_connection_parallel(self, mock_is_multiattach_to_host): def execute_terminate_connection(connector): mock_log = patch('self.library.LOG').start() self.library.terminate_connection(fake.NAMESPACE_VOLUME, connector) self.library._get_namespace_attr.assert_called_once_with( fake.NAMESPACE_NAME, 'metadata') host = connector['nqn'] if connector else None self.library._unmap_namespace.assert_called_once_with( fake.PATH_NAMESPACE, host) if connector: mock_is_multiattach_to_host.assert_called_once_with( fake.NAMESPACE_VOLUME, connector) else: mock_log.debug.assert_called_with('Unmapping namespace ' '%(name)s from all hosts.', {'name': fake. NAMESPACE_VOLUME['name']}) mock_log.stop() connector_list = [None, {'nqn': fake.HOST_NQN}] with ThreadPoolExecutor(max_workers=2) as executor: executor.map(execute_terminate_connection, connector_list) def test_create_group(self): model_update = self.library.create_group( fake.VOLUME_GROUP) self.assertEqual('available', model_update['status']) def test_delete_group_volume_delete_failure(self): self.mock_object(nvme_library, 'LOG') self.mock_object(self.library, '_delete_namespace', side_effect=Exception) model_update, volumes = self.library.delete_group( fake.VOLUME_GROUP, [fake.VG_VOLUME]) self.assertEqual('deleted', model_update['status']) self.assertEqual('error_deleting', volumes[0]['status']) self.assertEqual(1, nvme_library.LOG.exception.call_count) def test_update_group(self): model_update, add_volumes_update, remove_volumes_update = ( self.library.update_group(fake.VOLUME_GROUP)) self.assertIsNone(model_update) self.assertIsNone(add_volumes_update) self.assertIsNone(remove_volumes_update) def test_delete_group_not_found(self): self.mock_object(nvme_library, 'LOG') self.mock_object(self.library, '_get_namespace_attr', return_value=None) model_update, volumes = self.library.delete_group( fake.VOLUME_GROUP, [fake.VG_VOLUME]) self.assertEqual(0, nvme_library.LOG.error.call_count) self.assertEqual(0, nvme_library.LOG.info.call_count) self.assertEqual('deleted', model_update['status']) self.assertEqual('deleted', volumes[0]['status']) def test_create_group_snapshot_raise_exception(self): self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) mock_extract_host = self.mock_object( volume_utils, 'extract_host', return_value=fake.POOL_NAME) self.mock_object(self.client, 'create_cg_snapshot', side_effect=netapp_api.NaApiError) self.assertRaises(na_utils.NetAppDriverException, self.library.create_group_snapshot, fake.VOLUME_GROUP, [fake.VG_SNAPSHOT]) mock_extract_host.assert_called_once_with( fake.VG_SNAPSHOT['volume']['host'], level='pool') def test_create_group_snapshot(self): self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=False) self.mock_object(self.library, '_get_namespace_from_table', return_value=self.fake_namespace) mock_clone_namespace = self.mock_object(self.library, '_clone_namespace') model_update, snapshots_model_update = ( self.library.create_group_snapshot(fake.VOLUME_GROUP, [fake.SNAPSHOT])) self.assertIsNone(model_update) self.assertIsNone(snapshots_model_update) mock_clone_namespace.assert_called_once_with(self.fake_namespace.name, fake.SNAPSHOT['name']) def test_create_consistent_group_snapshot(self): self.mock_object(volume_utils, 'is_group_a_cg_snapshot_type', return_value=True) self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) mock_create_cg_snapshot = self.mock_object( self.client, 'create_cg_snapshot') mock_clone_namespace = self.mock_object(self.library, '_clone_namespace') mock_wait_for_busy_snapshot = self.mock_object( self.client, 'wait_for_busy_snapshot') mock_delete_snapshot = self.mock_object( self.client, 'delete_snapshot') model_update, snapshots_model_update = ( self.library.create_group_snapshot(fake.VOLUME_GROUP, [fake.VG_SNAPSHOT])) self.assertIsNone(model_update) self.assertIsNone(snapshots_model_update) mock_create_cg_snapshot.assert_called_once_with( set([fake.POOL_NAME]), fake.VOLUME_GROUP['id']) mock_clone_namespace.assert_called_once_with( fake.VG_SNAPSHOT['volume']['name'], fake.VG_SNAPSHOT['name'], ) mock_wait_for_busy_snapshot.assert_called_once_with( fake.POOL_NAME, fake.VOLUME_GROUP['id']) mock_delete_snapshot.assert_called_once_with( fake.POOL_NAME, fake.VOLUME_GROUP['id']) def test_create_group_from_src_snapshot(self): mock_clone_source_to_destination = self.mock_object( self.library, '_clone_source_to_destination') actual_return_value = self.library.create_group_from_src( fake.VOLUME_GROUP, [fake.VOLUME], group_snapshot=fake.VG_SNAPSHOT, snapshots=[fake.VG_VOLUME_SNAPSHOT]) clone_source_to_destination_args = { 'name': fake.VG_SNAPSHOT['name'], 'size': fake.VG_SNAPSHOT['volume_size'], } mock_clone_source_to_destination.assert_called_once_with( clone_source_to_destination_args, fake.VOLUME) expected_return_value = (None, []) self.assertEqual(expected_return_value, actual_return_value) def test_create_group_from_src_group(self): namespace_name = fake.SOURCE_VG_VOLUME['name'] mock_namespace = nvme_library.NetAppNamespace( namespace_name, namespace_name, '3', {'UUID': 'fake_uuid'}) self.mock_object(self.library, '_get_namespace_from_table', return_value=mock_namespace) mock_clone_source_to_destination = self.mock_object( self.library, '_clone_source_to_destination') actual_return_value = self.library.create_group_from_src( fake.VOLUME_GROUP, [fake.VOLUME], source_group=fake.SOURCE_VOLUME_GROUP, source_vols=[fake.SOURCE_VG_VOLUME]) clone_source_to_destination_args = { 'name': fake.SOURCE_VG_VOLUME['name'], 'size': fake.SOURCE_VG_VOLUME['size'], } expected_return_value = (None, []) mock_clone_source_to_destination.assert_called_once_with( clone_source_to_destination_args, fake.VOLUME) self.assertEqual(expected_return_value, actual_return_value) def test_delete_group_snapshot(self): mock_delete_namespace = self.mock_object(self.library, '_delete_namespace') model_update, snapshots_model_update = ( self.library.delete_group_snapshot(fake.VOLUME_GROUP, [fake.VG_SNAPSHOT])) self.assertIsNone(model_update) self.assertIsNone(snapshots_model_update) mock_delete_namespace.assert_called_once_with(fake.VG_SNAPSHOT['name']) def test_netapp_disaggregated_platform_config_true(self): """Test behavior when netapp_disaggregated_platform is True.""" self.library.configuration.netapp_disaggregated_platform = True # Mock the cluster pool map method mock_cluster_pool_map = self.mock_object( self.library, '_get_cluster_to_pool_map', return_value=fake.FAKE_CLUSTER_POOL_MAP) # Test _update_ssc uses cluster pool mapping self.library.ssc_library.update_ssc_asa = mock.Mock() self.library._update_ssc() mock_cluster_pool_map.assert_called_once_with() self.library.ssc_library.update_ssc_asa.assert_called_once_with( fake.FAKE_CLUSTER_POOL_MAP) def test_netapp_disaggregated_platform_config_false(self): """Test behavior when netapp_disaggregated_platform is False.""" self.library.configuration.netapp_disaggregated_platform = False mock_flexvol_pool_map = self.mock_object( self.library, '_get_flexvol_to_pool_map', return_value=fake.POOL_NAME) # Test _update_ssc uses flexvol pool mapping self.library.ssc_library.update_ssc = mock.Mock() self.library._update_ssc() mock_flexvol_pool_map.assert_called_once_with() self.library.ssc_library.update_ssc.assert_called_once_with( fake.POOL_NAME) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2991202 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/0000775000175000017500000000000000000000000026043 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/__init__.py0000664000175000017500000000000000000000000030142 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py0000664000175000017500000001403500000000000027511 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.netapp import options as na_opts SSC_VSERVER = 'fake_vserver' SSC_VOLUMES = ('volume1', 'volume2') SSC_VOLUME_MAP = { SSC_VOLUMES[0]: { 'pool_name': SSC_VOLUMES[0], }, SSC_VOLUMES[1]: { 'pool_name': SSC_VOLUMES[1], }, } SSC_AGGREGATES = ('aggr1', 'aggr2') SSC = { 'volume1': { 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', 'netapp_aggregate': 'aggr1', 'netapp_compression': 'false', 'netapp_dedup': 'true', 'netapp_mirrored': 'false', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['SSD'], 'netapp_hybrid_aggregate': 'false', 'netapp_flexvol_encryption': 'true', 'netapp_qos_min_support': 'true', 'pool_name': 'volume1', 'netapp_is_flexgroup': 'false', }, 'volume2': { 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'netapp_thin_provisioned': 'true', 'netapp_aggregate': 'aggr2', 'netapp_compression': 'true', 'netapp_dedup': 'true', 'netapp_mirrored': 'true', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': ['FCAL', 'SSD'], 'netapp_hybrid_aggregate': 'true', 'netapp_flexvol_encryption': 'false', 'netapp_qos_min_support': 'false', 'pool_name': 'volume2', 'netapp_is_flexgroup': 'false', }, } SSC_FLEXVOL_INFO = { 'volume1': { 'thick_provisioning_support': True, 'thin_provisioning_support': False, 'netapp_thin_provisioned': 'false', 'netapp_aggregate': 'aggr1', 'netapp_is_flexgroup': 'false', }, 'volume2': { 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'netapp_thin_provisioned': 'true', 'netapp_aggregate': 'aggr2', 'netapp_is_flexgroup': 'false', }, } SSC_DEDUPE_INFO = { 'volume1': { 'netapp_dedup': 'true', 'netapp_compression': 'false', }, 'volume2': { 'netapp_dedup': 'true', 'netapp_compression': 'true', }, } SSC_ENCRYPTION_INFO = { 'volume1': { 'netapp_flexvol_encryption': 'true', }, 'volume2': { 'netapp_flexvol_encryption': 'false', }, } SSC_QOS_MIN_INFO = { 'volume1': { 'netapp_qos_min_support': 'true', }, 'volume2': { 'netapp_qos_min_support': 'false', }, } SSC_VOLUME_COUNT_INFO = { 'volume1': { 'total_volumes': 3, }, 'volume2': { 'total_volumes': 2, }, } SSC_LUNS_BY_SIZES = [ { 'path': '/vol/volume-ae947c9b-2392-4956-b373-aaac4521f37e', 'size': 5368709120.0 }, { 'path': '/vol/snapshot-527eedad-a431-483d-b0ca-18995dd65b66', 'size': 1073741824.0 } ] SSC_NAMESPACES_BY_SIZES = [ { 'path': '/vol/namespace-ae947c9b-2392-4956-b373-aaac4521f37e', 'size': 5379821234.0 }, { 'path': '/vol/namespace-527eedad-a431-483d-b0ca-18995dd65b66', 'size': 4673741874.0 } ] SSC_MIRROR_INFO = { 'volume1': { 'netapp_mirrored': 'false', }, 'volume2': { 'netapp_mirrored': 'true', }, } SSC_AGGREGATE_INFO = { 'volume1': { 'netapp_disk_type': ['SSD'], 'netapp_raid_type': 'raid_dp', 'netapp_hybrid_aggregate': 'false', 'netapp_node_name': 'node1', }, 'volume2': { 'netapp_disk_type': ['FCAL', 'SSD'], 'netapp_raid_type': 'raid_dp', 'netapp_hybrid_aggregate': 'true', 'netapp_node_name': 'node2', }, } PROVISIONING_OPTS_FLEXGROUP = { 'aggregate': ['fake_aggregate'], 'thin_provisioned': True, 'snapshot_policy': None, 'language': 'en_US', 'dedupe_enabled': True, 'compression_enabled': True, 'snapshot_reserve': '12', 'volume_type': 'rw', 'size': 20, 'is_flexgroup': True, } PROVISIONING_OPTS = { 'aggregate': ['fake_aggregate'], 'thin_provisioned': True, 'snapshot_policy': None, 'language': 'en_US', 'dedupe_enabled': True, 'compression_enabled': True, 'snapshot_reserve': '12', 'volume_type': 'rw', 'size': 20, 'is_flexgroup': False, } ENCRYPTED_PROVISIONING_OPTS = { 'aggregate': ['fake_aggregate'], 'thin_provisioned': True, 'snapshot_policy': None, 'language': 'en_US', 'dedupe_enabled': False, 'compression_enabled': False, 'snapshot_reserve': '12', 'volume_type': 'rw', 'size': 20, 'encrypt': 'true', 'is_flexgroup': False, } def get_fake_cmode_config(backend_name): config = configuration.Configuration(driver.volume_opts, config_group=backend_name) config.append_config_values(na_opts.netapp_proxy_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_certificateauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_cluster_opts) config.append_config_values(na_opts.netapp_san_opts) config.append_config_values(na_opts.netapp_replication_opts) config.append_config_values(na_opts.netapp_support_opts) return config ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py0000664000175000017500000005621200000000000032113 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy from unittest import mock import ddt from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as fake_client) import cinder.tests.unit.volume.drivers.netapp.dataontap.utils.fakes as fake import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap.utils import capabilities @ddt.ddt class CapabilitiesLibraryTestCase(test.TestCase): def setUp(self): super(CapabilitiesLibraryTestCase, self).setUp() self.zapi_client = mock.Mock() self.configuration = self.get_config_cmode() self.ssc_library = capabilities.CapabilitiesLibrary( 'iSCSI', fake.SSC_VSERVER, self.zapi_client, self.configuration) self.ssc_library.ssc = fake.SSC self.ssc_library_nvme = capabilities.CapabilitiesLibrary( 'NVMe', fake.SSC_VSERVER, self.zapi_client, self.configuration) def get_config_cmode(self): config = na_fakes.create_configuration_cmode() config.volume_backend_name = 'fake_backend' return config def test_get_ssc(self): result = self.ssc_library.get_ssc() self.assertEqual(fake.SSC, result) self.assertIsNot(fake.SSC, result) def test_get_ssc_flexvol_names(self): result = self.ssc_library.get_ssc_flexvol_names() self.assertCountEqual(fake.SSC_VOLUMES, result) def test_get_ssc_for_flexvol(self): result = self.ssc_library.get_ssc_for_flexvol(fake.SSC_VOLUMES[0]) self.assertEqual(fake.SSC.get(fake.SSC_VOLUMES[0]), result) self.assertIsNot(fake.SSC.get(fake.SSC_VOLUMES[0]), result) def test_get_ssc_for_flexvol_not_found(self): result = self.ssc_library.get_ssc_for_flexvol('invalid') self.assertEqual({}, result) def test_get_ssc_aggregates(self): result = self.ssc_library.get_ssc_aggregates() self.assertCountEqual(list(fake.SSC_AGGREGATES), result) def test_is_qos_min_supported(self): ssc_pool = fake.SSC.get(fake.SSC_VOLUMES[0]) is_qos_min = ssc_pool['netapp_qos_min_support'] == 'true' result = self.ssc_library.is_qos_min_supported(ssc_pool['pool_name']) self.assertEqual(is_qos_min, result) def test_is_qos_min_supported_not_found(self): result = self.ssc_library.is_qos_min_supported('invalid_pool') self.assertFalse(result) @ddt.data('nfs', 'iscsi') def test_update_ssc(self, protocol): mock_get_ssc_flexvol_info = self.mock_object( self.ssc_library, '_get_ssc_flexvol_info', side_effect=[fake.SSC_FLEXVOL_INFO['volume1'], fake.SSC_FLEXVOL_INFO['volume2']]) mock_get_ssc_dedupe_info = self.mock_object( self.ssc_library, '_get_ssc_dedupe_info', side_effect=[fake.SSC_DEDUPE_INFO['volume1'], fake.SSC_DEDUPE_INFO['volume2']]) mock_get_ssc_mirror_info = self.mock_object( self.ssc_library, '_get_ssc_mirror_info', side_effect=[fake.SSC_MIRROR_INFO['volume1'], fake.SSC_MIRROR_INFO['volume2']]) mock_get_ssc_aggregate_info = self.mock_object( self.ssc_library, '_get_ssc_aggregate_info', side_effect=[fake.SSC_AGGREGATE_INFO['volume1'], fake.SSC_AGGREGATE_INFO['volume2']]) mock_get_ssc_encryption_info = self.mock_object( self.ssc_library, '_get_ssc_encryption_info', side_effect=[fake.SSC_ENCRYPTION_INFO['volume1'], fake.SSC_ENCRYPTION_INFO['volume2']]) mock_get_ssc_qos_min_info = self.mock_object( self.ssc_library, '_get_ssc_qos_min_info', side_effect=[fake.SSC_QOS_MIN_INFO['volume1'], fake.SSC_QOS_MIN_INFO['volume2']]) if protocol != 'nfs': mock_get_ssc_volume_count_info = self.mock_object( self.ssc_library, '_get_ssc_volume_count_info', side_effect=[fake.SSC_QOS_MIN_INFO['volume1'], fake.SSC_QOS_MIN_INFO['volume2']]) else: mock_get_ssc_volume_count_info = self.mock_object( self.ssc_library, '_get_ssc_volume_count_info', side_effect=None) ordered_ssc = collections.OrderedDict() ordered_ssc['volume1'] = fake.SSC_VOLUME_MAP['volume1'] ordered_ssc['volume2'] = fake.SSC_VOLUME_MAP['volume2'] result = self.ssc_library.update_ssc(ordered_ssc) if protocol != 'nfs': mock_get_ssc_volume_count_info.assert_has_calls([ mock.call('volume1'), mock.call('volume2')]) else: self.ssc_library._get_ssc_volume_count_info(fake.SSC_VOLUMES[0]).\ assert_not_called() self.assertIsNone(result) self.assertEqual(fake.SSC, self.ssc_library.ssc) mock_get_ssc_flexvol_info.assert_has_calls([ mock.call('volume1'), mock.call('volume2')]) mock_get_ssc_dedupe_info.assert_has_calls([ mock.call('volume1'), mock.call('volume2')]) mock_get_ssc_mirror_info.assert_has_calls([ mock.call('volume1'), mock.call('volume2')]) mock_get_ssc_aggregate_info.assert_has_calls([ mock.call('aggr1', is_flexgroup=False), mock.call('aggr2', is_flexgroup=False)]) mock_get_ssc_encryption_info.assert_has_calls([ mock.call('volume1'), mock.call('volume2')]) mock_get_ssc_qos_min_info.assert_has_calls([ mock.call('node1'), mock.call('node2')]) def test__update_for_failover(self): self.mock_object(self.ssc_library, 'update_ssc') flexvol_map = {'volume1': fake.SSC_VOLUME_MAP['volume1']} mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT') self.ssc_library._update_for_failover(mock_client, flexvol_map) self.assertEqual(mock_client, self.ssc_library.zapi_client) self.ssc_library.update_ssc.assert_called_once_with(flexvol_map) @ddt.data({'lun_space_guarantee': True}, {'lun_space_guarantee': False}) @ddt.unpack def test_get_ssc_flexvol_info_thin_block(self, lun_space_guarantee): self.ssc_library.configuration.netapp_lun_space_reservation = \ 'enabled' if lun_space_guarantee else 'disabled' self.mock_object(self.ssc_library.zapi_client, 'get_flexvol', return_value=fake_client.VOLUME_INFO_SSC) result = self.ssc_library._get_ssc_flexvol_info( fake_client.VOLUME_NAMES[0]) expected = { 'netapp_thin_provisioned': 'true', 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'netapp_aggregate': 'fake_aggr1', 'netapp_is_flexgroup': 'false', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( flexvol_name=fake_client.VOLUME_NAMES[0]) @ddt.data({'vol_space_guarantee': 'file', 'lun_space_guarantee': True}, {'vol_space_guarantee': 'volume', 'lun_space_guarantee': True}) @ddt.unpack def test_get_ssc_flexvol_info_thick_block(self, vol_space_guarantee, lun_space_guarantee): self.ssc_library.configuration.netapp_lun_space_reservation = \ 'enabled' if lun_space_guarantee else 'disabled' fake_volume_info_ssc = copy.deepcopy(fake_client.VOLUME_INFO_SSC) fake_volume_info_ssc['space-guarantee'] = vol_space_guarantee self.mock_object(self.ssc_library.zapi_client, 'get_flexvol', return_value=fake_volume_info_ssc) result = self.ssc_library._get_ssc_flexvol_info( fake_client.VOLUME_NAMES[0]) expected = { 'netapp_thin_provisioned': 'false', 'thick_provisioning_support': lun_space_guarantee, 'thin_provisioning_support': not lun_space_guarantee, 'netapp_aggregate': 'fake_aggr1', 'netapp_is_flexgroup': 'false', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( flexvol_name=fake_client.VOLUME_NAMES[0]) @ddt.data({'nfs_sparsed_volumes': True}, {'nfs_sparsed_volumes': False}) @ddt.unpack def test_get_ssc_flexvol_info_thin_file(self, nfs_sparsed_volumes): self.ssc_library.protocol = 'nfs' self.ssc_library.configuration.nfs_sparsed_volumes = \ nfs_sparsed_volumes self.mock_object(self.ssc_library.zapi_client, 'get_flexvol', return_value=fake_client.VOLUME_INFO_SSC) result = self.ssc_library._get_ssc_flexvol_info( fake_client.VOLUME_NAMES[0]) expected = { 'netapp_thin_provisioned': 'true', 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'netapp_aggregate': 'fake_aggr1', 'netapp_is_flexgroup': 'false', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( flexvol_name=fake_client.VOLUME_NAMES[0]) @ddt.data({'vol_space_guarantee': 'file', 'nfs_sparsed_volumes': True}, {'vol_space_guarantee': 'volume', 'nfs_sparsed_volumes': False}) @ddt.unpack def test_get_ssc_flexvol_info_thick_file(self, vol_space_guarantee, nfs_sparsed_volumes): self.ssc_library.protocol = 'nfs' self.ssc_library.configuration.nfs_sparsed_volumes = \ nfs_sparsed_volumes fake_volume_info_ssc = copy.deepcopy(fake_client.VOLUME_INFO_SSC) fake_volume_info_ssc['space-guarantee'] = vol_space_guarantee self.mock_object(self.ssc_library.zapi_client, 'get_flexvol', return_value=fake_volume_info_ssc) result = self.ssc_library._get_ssc_flexvol_info( fake_client.VOLUME_NAMES[0]) expected = { 'netapp_thin_provisioned': 'false', 'thick_provisioning_support': not nfs_sparsed_volumes, 'thin_provisioning_support': nfs_sparsed_volumes, 'netapp_aggregate': 'fake_aggr1', 'netapp_is_flexgroup': 'false', } self.assertEqual(expected, result) self.zapi_client.get_flexvol.assert_called_once_with( flexvol_name=fake_client.VOLUME_NAMES[0]) @ddt.data([], ['netapp_dedup'], ['netapp_compression']) def test_get_ssc_dedupe_info(self, invalid_extra_specs): self.ssc_library.invalid_extra_specs = invalid_extra_specs self.mock_object( self.ssc_library.zapi_client, 'get_flexvol_dedupe_info', return_value=fake_client.VOLUME_DEDUPE_INFO_SSC) result = self.ssc_library._get_ssc_dedupe_info( fake_client.VOLUME_NAMES[0]) if invalid_extra_specs: expected = { 'netapp_dedup': 'false', 'netapp_compression': 'false', } self.zapi_client.get_flexvol_dedupe_info.assert_not_called() else: expected = { 'netapp_dedup': 'true', 'netapp_compression': 'false', } self.zapi_client.get_flexvol_dedupe_info.assert_called_once_with( fake_client.VOLUME_NAMES[0]) self.assertEqual(expected, result) def test_get_ssc_encryption_info(self): self.mock_object( self.ssc_library.zapi_client, 'is_flexvol_encrypted', return_value=True) result = self.ssc_library._get_ssc_encryption_info( fake_client.VOLUME_NAMES[0]) expected = { 'netapp_flexvol_encryption': 'true', } self.assertEqual(expected, result) self.zapi_client.is_flexvol_encrypted.assert_called_once_with( fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME) @ddt.data(True, False) def test_get_ssc_mirror_info(self, mirrored): self.mock_object( self.ssc_library.zapi_client, 'is_flexvol_mirrored', return_value=mirrored) result = self.ssc_library._get_ssc_mirror_info( fake_client.VOLUME_NAMES[0]) expected = {'netapp_mirrored': 'true' if mirrored else 'false'} self.assertEqual(expected, result) self.zapi_client.is_flexvol_mirrored.assert_called_once_with( fake_client.VOLUME_NAMES[0], fake.SSC_VSERVER) @ddt.data({'invalid_extra_specs': [], 'is_fg': False}, {'invalid_extra_specs': ['netapp_raid_type'], 'is_fg': False}, {'invalid_extra_specs': [], 'is_fg': True}) @ddt.unpack def test_get_ssc_aggregate_info(self, invalid_extra_specs, is_fg): self.ssc_library.invalid_extra_specs = invalid_extra_specs self.mock_object( self.ssc_library.zapi_client, 'get_aggregate', return_value=fake_client.AGGR_INFO_SSC) self.mock_object( self.ssc_library.zapi_client, 'get_aggregate_disk_types', return_value=fake_client.AGGREGATE_DISK_TYPES) aggr_name = fake_client.VOLUME_AGGREGATE_NAME if is_fg: aggr_name = [fake_client.VOLUME_AGGREGATE_NAME] result = self.ssc_library._get_ssc_aggregate_info(aggr_name, is_flexgroup=is_fg) if invalid_extra_specs: expected = { 'netapp_disk_type': None, 'netapp_raid_type': None, 'netapp_hybrid_aggregate': None, 'netapp_node_name': None, } self.zapi_client.get_aggregate.assert_not_called() self.zapi_client.get_aggregate_disk_types.assert_not_called() else: expected = { 'netapp_disk_type': fake_client.AGGREGATE_DISK_TYPES, 'netapp_raid_type': fake_client.AGGREGATE_RAID_TYPE, 'netapp_hybrid_aggregate': 'true', 'netapp_node_name': fake_client.NODE_NAME, } if is_fg: result['netapp_disk_type'] = sorted( result['netapp_disk_type']) expected['netapp_disk_type'] = sorted( expected['netapp_disk_type']) expected['netapp_raid_type'] = [ fake_client.AGGREGATE_RAID_TYPE] expected['netapp_node_name'] = [ fake_client.NODE_NAME] expected['netapp_hybrid_aggregate'] = ['true'] self.zapi_client.get_aggregate.assert_called_once_with( fake_client.VOLUME_AGGREGATE_NAME) self.zapi_client.get_aggregate_disk_types.assert_called_once_with( fake_client.VOLUME_AGGREGATE_NAME) self.assertEqual(expected, result) def test_get_ssc_aggregate_info_not_found(self): self.ssc_library.invalid_extra_specs = ['netapp_raid_type'] self.mock_object( self.ssc_library.zapi_client, 'get_aggregate', return_value={}) self.mock_object( self.ssc_library.zapi_client, 'get_aggregate_disk_types', return_value=None) result = self.ssc_library._get_ssc_aggregate_info( fake_client.VOLUME_AGGREGATE_NAME) expected = { 'netapp_disk_type': None, 'netapp_raid_type': None, 'netapp_hybrid_aggregate': None, 'netapp_node_name': None, } self.assertEqual(expected, result) def test_get_matching_flexvols_for_extra_specs(self): specs = { 'thick_provisioning_support': ' False', 'netapp_compression': 'true', 'netapp_dedup': 'true', 'netapp_mirrored': 'true', 'netapp_raid_type': 'raid_dp', 'netapp_disk_type': 'FCAL', 'non_ssc_key': 'fake_value', } result = self.ssc_library.get_matching_flexvols_for_extra_specs(specs) self.assertEqual(['volume2'], result) @ddt.data( { 'flexvol_info': { 'netapp_dedup': 'true', }, 'extra_specs': { 'netapp_dedup': 'true', 'non_ssc_key': 'fake_value', } }, { 'flexvol_info': fake.SSC['volume1'], 'extra_specs': { 'netapp_disk_type': 'SSD', 'pool_name': 'volume1', } }, { 'flexvol_info': fake.SSC['volume2'], 'extra_specs': { 'netapp_disk_type': 'SSD', 'netapp_hybrid_aggregate': 'true', } } ) @ddt.unpack def test_flexvol_matches_extra_specs(self, flexvol_info, extra_specs): result = self.ssc_library._flexvol_matches_extra_specs(flexvol_info, extra_specs) self.assertTrue(result) @ddt.data( { 'flexvol_info': { 'netapp_dedup': 'true', }, 'extra_specs': { 'netapp_dedup': 'false', 'non_ssc_key': 'fake_value', } }, { 'flexvol_info': fake.SSC['volume2'], 'extra_specs': { 'netapp_disk_type': 'SSD', 'pool_name': 'volume1', } }, { 'flexvol_info': fake.SSC['volume2'], 'extra_specs': { 'netapp_disk_type': 'SATA', } } ) @ddt.unpack def test_flexvol_matches_extra_specs_no_match(self, flexvol_info, extra_specs): result = self.ssc_library._flexvol_matches_extra_specs(flexvol_info, extra_specs) self.assertFalse(result) @ddt.data(('SSD', 'SSD'), ('SSD', ['SSD', 'FCAL'])) @ddt.unpack def test_extra_spec_matches(self, extra_spec_value, ssc_flexvol_value): result = self.ssc_library._extra_spec_matches(extra_spec_value, ssc_flexvol_value) self.assertTrue(result) @ddt.data(('SSD', 'FCAL'), ('SSD', ['FCAL'])) @ddt.unpack def test_extra_spec_matches_no_match(self, extra_spec_value, ssc_flexvol_value): result = self.ssc_library._extra_spec_matches(extra_spec_value, ssc_flexvol_value) self.assertFalse(result) def test_modify_extra_specs_for_comparison(self): specs = { 'thick_provisioning_support': ' False', 'thin_provisioning_support': ' true', 'netapp_compression': 'true', } result = self.ssc_library._modify_extra_specs_for_comparison(specs) expected = { 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'netapp_compression': 'true', } self.assertEqual(expected, result) @ddt.data([], ['netapp_dedup'], ['netapp_compression']) def test_cluster_user_supported(self, invalid_extra_specs): self.ssc_library.invalid_extra_specs = invalid_extra_specs if invalid_extra_specs: self.assertFalse(self.ssc_library.cluster_user_supported()) else: self.assertTrue(self.ssc_library.cluster_user_supported()) def test_get_ssc_qos_min_info(self): self.mock_object( self.ssc_library.zapi_client, 'is_qos_min_supported', return_value=True) result = self.ssc_library._get_ssc_qos_min_info('node') expected = { 'netapp_qos_min_support': 'true', } self.assertEqual(expected, result) self.zapi_client.is_qos_min_supported.assert_called_once_with(False, 'node') @ddt.data(False, True) def test_get_ssc_qos_min_info_flexgroup(self, qos_min_support): self.mock_object( self.ssc_library.zapi_client, 'is_qos_min_supported', return_value=qos_min_support) result = self.ssc_library._get_ssc_qos_min_info(['node']) expected = { 'netapp_qos_min_support': 'true' if qos_min_support else 'false', } self.assertEqual(expected, result) self.zapi_client.is_qos_min_supported.assert_called_once_with(False, 'node') @ddt.data('iscsi', 'fc', 'nvme') def test_get_ssc_volume_count_info(self, protocol): self.ssc_library = self.ssc_library_nvme if protocol == 'nvme' else \ self.ssc_library self.mock_object(self.ssc_library.zapi_client, 'get_namespace_sizes_by_volume', return_value=fake.SSC_NAMESPACES_BY_SIZES) self.mock_object(self.ssc_library.zapi_client, 'get_lun_sizes_by_volume', return_value=fake.SSC_LUNS_BY_SIZES) result = self.ssc_library._get_ssc_volume_count_info( fake_client.VOLUME_NAMES[0]) expected = {'total_volumes': 2} self.assertEqual(expected, result) if protocol != 'nvme': self.zapi_client.get_lun_sizes_by_volume.\ assert_called_once_with(fake_client.VOLUME_NAMES[0]) self.zapi_client.get_namespace_sizes_by_volume.assert_not_called() else: self.zapi_client.get_namespace_sizes_by_volume.\ assert_called_once_with(fake_client.VOLUME_NAMES[0]) self.zapi_client.get_lun_sizes_by_volume.assert_not_called() @ddt.data(True, False) def test_is_flexgroup(self, is_fg): pool_name = 'fake_pool' self.ssc_library.ssc = { pool_name: { 'pool_name': pool_name, 'netapp_is_flexgroup': 'true' if is_fg else 'false', }, } if not is_fg: pool_name = 'no_pool' is_fg_returned = self.ssc_library.is_flexgroup(pool_name) self.assertEqual(is_fg_returned, is_fg) @ddt.data(True, False) def test_contains_flexgroup(self, contains_fg): self.ssc_library.ssc = { 'fake_pool': { 'netapp_is_flexgroup': 'true' if contains_fg else 'false', }, } contains_fg_returned = self.ssc_library.contains_flexgroup_pool() self.assertEqual(contains_fg_returned, contains_fg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py0000664000175000017500000022653200000000000031764 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import time from unittest import mock import ddt from oslo_config import cfg from cinder import exception from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as \ dataontap_fakes from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.utils import data_motion from cinder.volume.drivers.netapp.dataontap.utils import utils from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils CONF = cfg.CONF @ddt.ddt class NetAppCDOTDataMotionMixinTestCase(test.TestCase): def setUp(self): super(NetAppCDOTDataMotionMixinTestCase, self).setUp() self.dm_mixin = data_motion.DataMotionMixin() self.src_backend = 'backend1' self.dest_backend = 'backend2' self.src_vserver = 'source_vserver' self.dest_vserver = 'dest_vserver' self._setup_mock_config() self.mock_cmode_client = self.mock_object(client_cmode, 'Client') self.src_flexvol_name = 'volume_c02d497a_236c_4852_812a_0d39373e312a' self.dest_flexvol_name = self.src_flexvol_name self.src_cg = '' self.dest_cg = '' self.active_sync_policy = False self.replication_policy = 'MirrorAllSnapshots' self.mock_src_client = mock.Mock() self.mock_dest_client = mock.Mock() self.config = fakes.get_fake_cmode_config(self.src_backend) self.mock_object(utils, 'get_backend_configuration', side_effect=[self.mock_dest_config, self.mock_src_config]) self.mock_object(utils, 'get_client_for_backend', side_effect=[self.mock_dest_client, self.mock_src_client]) def _setup_mock_config(self): self.mock_src_config = configuration.Configuration( driver.volume_opts, config_group=self.src_backend) self.mock_dest_config = configuration.Configuration( driver.volume_opts, config_group=self.dest_backend) for config in (self.mock_src_config, self.mock_dest_config): config.append_config_values(na_opts.netapp_proxy_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_certificateauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_cluster_opts) config.append_config_values(na_opts.netapp_san_opts) config.append_config_values(na_opts.netapp_replication_opts) config.netapp_snapmirror_quiesce_timeout = 10 CONF.set_override('netapp_vserver', self.src_vserver, group=self.src_backend) CONF.set_override('netapp_vserver', self.dest_vserver, group=self.dest_backend) @ddt.data(None, [], [{'some_key': 'some_value'}]) def test_get_replication_backend_names_none(self, replication_device): CONF.set_override('replication_device', replication_device, group=self.src_backend) devices = self.dm_mixin.get_replication_backend_names(self.config) self.assertEqual(0, len(devices)) @ddt.data([{'backend_id': 'xyzzy'}, {'backend_id': 'spoon!'}], [{'backend_id': 'foobar'}]) def test_get_replication_backend_names_valid(self, replication_device): CONF.set_override('replication_device', replication_device, group=self.src_backend) devices = self.dm_mixin.get_replication_backend_names(self.config) self.assertEqual(len(replication_device), len(devices)) def test_get_snapmirrors(self): self.mock_object(self.mock_dest_client, 'get_snapmirrors') self.dm_mixin.get_snapmirrors(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) self.mock_dest_client.get_snapmirrors.assert_called_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, desired_attributes=['relationship-status', 'mirror-state', 'source-vserver', 'source-volume', 'destination-vserver', 'destination-volume', 'last-transfer-end-timestamp', 'lag-time']) self.assertEqual(1, self.mock_dest_client.get_snapmirrors.call_count) @ddt.data([], ['backend1'], ['backend1', 'backend2']) def test_get_replication_backend_stats(self, replication_backend_names): self.mock_object(self.dm_mixin, 'get_replication_backend_names', return_value=replication_backend_names) enabled_stats = { 'replication_count': len(replication_backend_names), 'replication_targets': replication_backend_names, 'replication_type': 'async', } expected_stats = { 'replication_enabled': len(replication_backend_names) > 0, } if len(replication_backend_names) > 0: expected_stats.update(enabled_stats) actual_stats = self.dm_mixin.get_replication_backend_stats(self.config) self.assertDictEqual(expected_stats, actual_stats) @ddt.data(None, [], [{'backend_id': 'replication_backend_2', 'aggr2': 'aggr20'}]) def test_get_replication_aggregate_map_none(self, replication_aggr_map): self.mock_object(utils, 'get_backend_configuration', return_value=self.config) CONF.set_override('netapp_replication_aggregate_map', replication_aggr_map, group=self.src_backend) aggr_map = self.dm_mixin._get_replication_aggregate_map( self.src_backend, 'replication_backend_1') self.assertEqual(0, len(aggr_map)) @ddt.data([{'backend_id': 'replication_backend_1', 'aggr1': 'aggr10'}], [{'backend_id': 'replication_backend_1', 'aggr1': 'aggr10'}, {'backend_id': 'replication_backend_2', 'aggr2': 'aggr20'}]) def test_get_replication_aggregate_map_valid(self, replication_aggr_map): self.mock_object(utils, 'get_backend_configuration', return_value=self.config) CONF.set_override('netapp_replication_aggregate_map', replication_aggr_map, group=self.src_backend) aggr_map = self.dm_mixin._get_replication_aggregate_map( self.src_backend, 'replication_backend_1') self.assertDictEqual({'aggr1': 'aggr10'}, aggr_map) @ddt.data({'dest_exists': True, 'is_flexgroup': False}, {'dest_exists': True, 'is_flexgroup': True}, {'dest_exists': False, 'is_flexgroup': False}, {'dest_exists': False, 'is_flexgroup': True}) @ddt.unpack def test_create_snapmirror_dest_flexvol_exists(self, dest_exists, is_flexgroup): mock_dest_client = mock.Mock() mock_src_client = mock.Mock() self.mock_object(mock_dest_client, 'flexvol_exists', return_value=dest_exists) self.mock_object(mock_dest_client, 'get_snapmirrors', return_value=None) create_destination_flexvol = self.mock_object( self.dm_mixin, 'create_destination_flexvol') self.mock_object(utils, 'get_client_for_backend', side_effect=[mock_dest_client, mock_src_client]) mock_provisioning_options = mock.Mock() mock_provisioning_options.get.return_value = is_flexgroup self.mock_object(mock_src_client, 'get_provisioning_options_from_flexvol', return_value=mock_provisioning_options) self.dm_mixin.create_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name, self.replication_policy) if not dest_exists: create_destination_flexvol.assert_called_once_with( self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name, pool_is_flexgroup=is_flexgroup) else: self.assertFalse(create_destination_flexvol.called) sync_mirror = False mock_dest_client.create_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, self.src_cg, self.dest_cg, schedule='hourly', policy=self.replication_policy, relationship_type=('extended_data_protection' if is_flexgroup or sync_mirror else 'data_protection')) mock_dest_client.initialize_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, self.active_sync_policy) def test_create_snapmirror_cleanup_on_geometry_has_changed(self): mock_dest_client = mock.Mock() mock_src_client = mock.Mock() self.mock_object(mock_dest_client, 'flexvol_exists', return_value=True) self.mock_object(mock_dest_client, 'get_snapmirrors', return_value=None) create_destination_flexvol = self.mock_object( self.dm_mixin, 'create_destination_flexvol') mock_delete_snapshot = self.mock_object( self.dm_mixin, 'delete_snapmirror' ) self.mock_object(utils, 'get_client_for_backend', side_effect=[mock_dest_client, mock_src_client]) geometry_exception_message = ("Geometry of the destination FlexGroup " "has been changed since the SnapMirror " "relationship was created.") mock_dest_client.initialize_snapmirror.side_effect = [ netapp_api.NaApiError(code=netapp_api.EAPIERROR, message=geometry_exception_message), ] mock_provisioning_options = mock.Mock() mock_provisioning_options.get.return_value = False self.mock_object(mock_src_client, 'get_provisioning_options_from_flexvol', return_value=mock_provisioning_options) self.assertRaises(na_utils.GeometryHasChangedOnDestination, self.dm_mixin.create_snapmirror, self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name, self.replication_policy) self.assertFalse(create_destination_flexvol.called) mock_dest_client.create_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, self.src_cg, self.dest_cg, schedule='hourly', policy=self.replication_policy, relationship_type='data_protection') mock_dest_client.initialize_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, self.active_sync_policy) mock_delete_snapshot.assert_called_once_with( self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) @ddt.data('uninitialized', 'broken-off', 'snapmirrored') def test_create_snapmirror_snapmirror_exists_state(self, mirror_state): mock_dest_client = mock.Mock() existing_snapmirrors = [{'mirror-state': mirror_state}] self.mock_object(self.dm_mixin, 'create_destination_flexvol') self.mock_object(utils, 'get_client_for_backend', return_value=mock_dest_client) self.mock_object(mock_dest_client, 'flexvol_exists', return_value=True) self.mock_object(mock_dest_client, 'get_snapmirrors', return_value=existing_snapmirrors) self.dm_mixin.create_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name, self.replication_policy) self.assertFalse(mock_dest_client.create_snapmirror.called) self.assertFalse(mock_dest_client.initialize_snapmirror.called) self.assertFalse(self.dm_mixin.create_destination_flexvol.called) if mirror_state == 'snapmirrored': self.assertFalse(mock_dest_client.resume_snapmirror.called) self.assertFalse(mock_dest_client.resync_snapmirror.called) else: mock_dest_client.resume_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) mock_dest_client.resync_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) @ddt.data('resume_snapmirror', 'resync_snapmirror') def test_create_snapmirror_snapmirror_exists_repair_exception(self, failed_call): mock_dest_client = mock.Mock() mock_exception_log = self.mock_object(data_motion.LOG, 'exception') existing_snapmirrors = [{'mirror-state': 'broken-off'}] self.mock_object(self.dm_mixin, 'create_destination_flexvol') self.mock_object(utils, 'get_client_for_backend', return_value=mock_dest_client) self.mock_object(mock_dest_client, 'flexvol_exists', return_value=True) self.mock_object(mock_dest_client, 'get_snapmirrors', return_value=existing_snapmirrors) self.mock_object(mock_dest_client, failed_call, side_effect=netapp_api.NaApiError) self.dm_mixin.create_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name, self.replication_policy) self.assertFalse(mock_dest_client.create_snapmirror.called) self.assertFalse(mock_dest_client.initialize_snapmirror.called) self.assertFalse(self.dm_mixin.create_destination_flexvol.called) mock_dest_client.resume_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) if failed_call == 'resync_snapmirror': mock_dest_client.resync_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) self.assertEqual(1, mock_exception_log.call_count) def test_delete_snapmirror(self): mock_src_client = mock.Mock() mock_dest_client = mock.Mock() self.mock_object(utils, 'get_client_for_backend', side_effect=[mock_dest_client, mock_src_client]) self.dm_mixin.delete_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) mock_dest_client.abort_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, clear_checkpoint=False) mock_dest_client.delete_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) mock_src_client.release_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) def test_delete_snapmirror_does_not_exist(self): """Ensure delete succeeds when the snapmirror does not exist.""" mock_src_client = mock.Mock() mock_dest_client = mock.Mock() mock_dest_client.abort_snapmirror.side_effect = netapp_api.NaApiError( code=netapp_api.EAPIERROR) self.mock_object(utils, 'get_client_for_backend', side_effect=[mock_dest_client, mock_src_client]) self.dm_mixin.delete_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) mock_dest_client.abort_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, clear_checkpoint=False) mock_dest_client.delete_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) mock_src_client.release_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) def test_delete_snapmirror_error_deleting(self): """Ensure delete succeeds when the snapmirror does not exist.""" mock_src_client = mock.Mock() mock_dest_client = mock.Mock() mock_dest_client.delete_snapmirror.side_effect = netapp_api.NaApiError( code=netapp_api.ESOURCE_IS_DIFFERENT ) self.mock_object(utils, 'get_client_for_backend', side_effect=[mock_dest_client, mock_src_client]) self.dm_mixin.delete_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) mock_dest_client.abort_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, clear_checkpoint=False) mock_dest_client.delete_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) mock_src_client.release_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) def test_delete_snapmirror_error_releasing(self): """Ensure delete succeeds when the snapmirror does not exist.""" mock_src_client = mock.Mock() mock_dest_client = mock.Mock() mock_src_client.release_snapmirror.side_effect = ( netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)) self.mock_object(utils, 'get_client_for_backend', side_effect=[mock_dest_client, mock_src_client]) self.dm_mixin.delete_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) mock_dest_client.abort_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, clear_checkpoint=False) mock_dest_client.delete_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) mock_src_client.release_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) def test_delete_snapmirror_without_release(self): mock_src_client = mock.Mock() mock_dest_client = mock.Mock() self.mock_object(utils, 'get_client_for_backend', side_effect=[mock_dest_client, mock_src_client]) self.dm_mixin.delete_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name, release=False) mock_dest_client.abort_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, clear_checkpoint=False) mock_dest_client.delete_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) self.assertFalse(mock_src_client.release_snapmirror.called) def test_delete_snapmirror_source_unreachable(self): mock_src_client = mock.Mock() mock_dest_client = mock.Mock() self.mock_object(utils, 'get_client_for_backend', side_effect=[mock_dest_client, Exception]) self.dm_mixin.delete_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) mock_dest_client.abort_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, clear_checkpoint=False) mock_dest_client.delete_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) self.assertFalse(mock_src_client.release_snapmirror.called) def test_quiesce_then_abort_timeout(self): self.mock_object(time, 'sleep') mock_get_snapmirrors = mock.Mock( return_value=[{'relationship-status': 'transferring'}]) self.mock_object(self.mock_dest_client, 'get_snapmirrors', mock_get_snapmirrors) self.dm_mixin.quiesce_then_abort(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) self.mock_dest_client.get_snapmirrors.assert_called_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, desired_attributes=['relationship-status', 'mirror-state']) self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count) self.mock_dest_client.quiesce_snapmirror.assert_called_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) self.mock_dest_client.abort_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, clear_checkpoint=False) def test_update_snapmirror(self): self.mock_object(self.mock_dest_client, 'get_snapmirrors') self.dm_mixin.update_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) self.mock_dest_client.update_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) def test_create_vserver_peer(self): mock_get_client_for_backend = self.mock_object( utils, 'get_client_for_backend') get_vserver_peer_response = [] mock_get_vserver_peers = mock_get_client_for_backend.return_value.\ get_vserver_peers mock_get_vserver_peers.return_value = get_vserver_peer_response mock_create_vserver_peer = mock_get_client_for_backend.return_value.\ create_vserver_peer mock_create_vserver_peer.return_value = None peer_applications = ['snapmirror'] result = self.dm_mixin.create_vserver_peer( dataontap_fakes.VSERVER_NAME, self.src_backend, dataontap_fakes.DEST_VSERVER_NAME, peer_applications) mock_get_vserver_peers.assert_called_once_with( dataontap_fakes.VSERVER_NAME, dataontap_fakes.DEST_VSERVER_NAME) mock_create_vserver_peer.assert_called_once_with( dataontap_fakes.VSERVER_NAME, dataontap_fakes.DEST_VSERVER_NAME, vserver_peer_application=peer_applications) self.assertIsNone(result) def test_create_vserver_peer_already_exists(self): mock_get_client_for_backend = self.mock_object( utils, 'get_client_for_backend') get_vserver_peer_response = [{ 'vserver': dataontap_fakes.VSERVER_NAME, 'peer-vserver': dataontap_fakes.DEST_VSERVER_NAME, 'peer-state': 'peered', 'peer-cluster': dataontap_fakes.CLUSTER_NAME, 'applications': ['snapmirror'] }] mock_get_vserver_peers = mock_get_client_for_backend.return_value. \ get_vserver_peers mock_get_vserver_peers.return_value = get_vserver_peer_response mock_create_vserver_peer = mock_get_client_for_backend.return_value. \ create_vserver_peer mock_create_vserver_peer.return_value = None peer_applications = ['snapmirror'] result = self.dm_mixin.create_vserver_peer( dataontap_fakes.VSERVER_NAME, self.src_backend, dataontap_fakes.DEST_VSERVER_NAME, peer_applications) mock_get_vserver_peers.assert_called_once_with( dataontap_fakes.VSERVER_NAME, dataontap_fakes.DEST_VSERVER_NAME) mock_create_vserver_peer.assert_not_called() self.assertIsNone(result) def test_create_vserver_peer_application_not_defined(self): mock_get_client_for_backend = self.mock_object( utils, 'get_client_for_backend') get_vserver_peer_response = [{ 'vserver': dataontap_fakes.VSERVER_NAME, 'peer-vserver': dataontap_fakes.DEST_VSERVER_NAME, 'peer-state': 'peered', 'peer-cluster': dataontap_fakes.CLUSTER_NAME, 'applications': ['snapmirror'] }] mock_get_vserver_peers = mock_get_client_for_backend.return_value. \ get_vserver_peers mock_get_vserver_peers.return_value = get_vserver_peer_response mock_create_vserver_peer = mock_get_client_for_backend.return_value. \ create_vserver_peer mock_create_vserver_peer.return_value = None peer_applications = ['not a snapmirror application'] self.assertRaises(na_utils.NetAppDriverException, self.dm_mixin.create_vserver_peer, dataontap_fakes.VSERVER_NAME, self.src_backend, dataontap_fakes.DEST_VSERVER_NAME, peer_applications) mock_get_vserver_peers.assert_called_once_with( dataontap_fakes.VSERVER_NAME, dataontap_fakes.DEST_VSERVER_NAME) mock_create_vserver_peer.assert_not_called() def test_quiesce_then_abort_wait_for_quiesced(self): self.mock_object(time, 'sleep') self.mock_object(self.mock_dest_client, 'get_snapmirrors', side_effect=[ [{'relationship-status': 'transferring'}], [{'relationship-status': 'quiesced'}]]) self.dm_mixin.quiesce_then_abort(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) self.mock_dest_client.get_snapmirrors.assert_called_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name, desired_attributes=['relationship-status', 'mirror-state']) self.assertEqual(2, self.mock_dest_client.get_snapmirrors.call_count) self.mock_dest_client.quiesce_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) def test_break_snapmirror(self): self.mock_object(self.dm_mixin, 'quiesce_then_abort') self.dm_mixin.break_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) self.dm_mixin.quiesce_then_abort.assert_called_once_with( self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) self.mock_dest_client.break_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) self.mock_dest_client.mount_flexvol.assert_called_once_with( self.dest_flexvol_name) def test_break_snapmirror_wait_for_quiesced(self): self.mock_object(self.dm_mixin, 'quiesce_then_abort') self.dm_mixin.break_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) self.dm_mixin.quiesce_then_abort.assert_called_once_with( self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name,) self.mock_dest_client.break_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) self.mock_dest_client.mount_flexvol.assert_called_once_with( self.dest_flexvol_name) def test_resync_snapmirror(self): self.dm_mixin.resync_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) self.mock_dest_client.resync_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) def test_resume_snapmirror(self): self.dm_mixin.resume_snapmirror(self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) self.mock_dest_client.resume_snapmirror.assert_called_once_with( self.src_vserver, self.src_flexvol_name, self.dest_vserver, self.dest_flexvol_name) @ddt.data({'size': 1, 'aggr_map': {}, 'is_flexgroup': False}, {'size': 1, 'aggr_map': {'aggr02': 'aggr20'}, 'is_flexgroup': False}, {'size': None, 'aggr_map': {'aggr01': 'aggr10'}, 'is_flexgroup': False}, {'size': 1, 'aggr_map': {'aggr01': 'aggr10'}, 'is_flexgroup': True}) @ddt.unpack def test_create_destination_flexvol_exception(self, size, aggr_map, is_flexgroup): self.mock_object( self.mock_src_client, 'get_provisioning_options_from_flexvol', return_value={'size': size, 'aggregate': ['aggr1'], 'is_flexgroup': is_flexgroup}) self.mock_object(self.dm_mixin, '_get_replication_aggregate_map', return_value=aggr_map) self.mock_object(self.dm_mixin, '_get_replication_volume_online_timeout', return_value=2) self.mock_object(self.mock_dest_client, 'get_volume_state', return_value='online') mock_client_call = self.mock_object( self.mock_dest_client, 'create_flexvol') self.assertRaises(na_utils.NetAppDriverException, self.dm_mixin.create_destination_flexvol, self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) if size and is_flexgroup is False: self.dm_mixin._get_replication_aggregate_map.\ assert_called_once_with(self.src_backend, self.dest_backend) elif is_flexgroup is False: self.assertFalse( self.dm_mixin._get_replication_aggregate_map.called) self.assertFalse(mock_client_call.called) @ddt.data('mixed', None) @mock.patch('oslo_service.loopingcall.FixedIntervalWithTimeoutLoopingCall', new=test_utils.ZeroIntervalWithTimeoutLoopingCall) def test_create_destination_flexgroup_online_timeout(self, volume_state): aggr_map = { fakes.PROVISIONING_OPTS_FLEXGROUP['aggregate'][0]: 'aggr01', 'aggr20': 'aggr02', } provisioning_opts = copy.deepcopy(fakes.PROVISIONING_OPTS_FLEXGROUP) expected_prov_opts = copy.deepcopy(fakes.PROVISIONING_OPTS_FLEXGROUP) expected_prov_opts.pop('volume_type', None) expected_prov_opts.pop('size', None) expected_prov_opts.pop('aggregate', None) expected_prov_opts.pop('is_flexgroup', None) self.mock_object( self.mock_src_client, 'get_provisioning_options_from_flexvol', return_value=provisioning_opts) self.mock_object(self.dm_mixin, '_get_replication_aggregate_map', return_value=aggr_map) self.mock_object(self.dm_mixin, '_get_replication_volume_online_timeout', return_value=2) mock_create_volume_async = self.mock_object(self.mock_dest_client, 'create_volume_async') mock_volume_state = self.mock_object(self.mock_dest_client, 'get_volume_state', return_value=volume_state) self.mock_object(self.mock_src_client, 'is_flexvol_encrypted', return_value=False) mock_dedupe_enabled = self.mock_object( self.mock_dest_client, 'enable_volume_dedupe_async') mock_compression_enabled = self.mock_object( self.mock_dest_client, 'enable_volume_compression_async') self.assertRaises(na_utils.NetAppDriverException, self.dm_mixin.create_destination_flexvol, self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name, pool_is_flexgroup=True) expected_prov_opts.pop('dedupe_enabled') expected_prov_opts.pop('compression_enabled') mock_create_volume_async.assert_called_once_with( self.dest_flexvol_name, ['aggr01'], fakes.PROVISIONING_OPTS_FLEXGROUP['size'], volume_type='dp', **expected_prov_opts) mock_volume_state.assert_called_with( name=self.dest_flexvol_name) mock_dedupe_enabled.assert_not_called() mock_compression_enabled.assert_not_called() @ddt.data('flexvol', 'flexgroup') def test_create_destination_flexvol(self, volume_style): provisioning_opts = copy.deepcopy(fakes.PROVISIONING_OPTS) aggr_map = { provisioning_opts['aggregate'][0]: 'aggr01', 'aggr20': 'aggr02', } expected_prov_opts = copy.deepcopy(provisioning_opts) expected_prov_opts.pop('volume_type', None) expected_prov_opts.pop('size', None) expected_prov_opts.pop('aggregate', None) expected_prov_opts.pop('is_flexgroup', None) mock_is_flexvol_encrypted = self.mock_object( self.mock_src_client, 'is_flexvol_encrypted', return_value=False) self.mock_object(self.dm_mixin, '_get_replication_aggregate_map', return_value=aggr_map) self.mock_object(self.dm_mixin, '_get_replication_volume_online_timeout', return_value=2) mock_volume_state = self.mock_object(self.mock_dest_client, 'get_volume_state', return_value='online') pool_is_flexgroup = False if volume_style == 'flexgroup': pool_is_flexgroup = True provisioning_opts = copy.deepcopy( fakes.PROVISIONING_OPTS_FLEXGROUP) self.mock_object(self.dm_mixin, '_get_replication_volume_online_timeout', return_value=2) mock_create_volume_async = self.mock_object(self.mock_dest_client, 'create_volume_async') mock_dedupe_enabled = self.mock_object( self.mock_dest_client, 'enable_volume_dedupe_async') mock_compression_enabled = self.mock_object( self.mock_dest_client, 'enable_volume_compression_async') else: mock_create_flexvol = self.mock_object(self.mock_dest_client, 'create_flexvol') mock_get_provisioning_opts_call = self.mock_object( self.mock_src_client, 'get_provisioning_options_from_flexvol', return_value=provisioning_opts) retval = self.dm_mixin.create_destination_flexvol( self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name, pool_is_flexgroup=pool_is_flexgroup) self.assertIsNone(retval) mock_get_provisioning_opts_call.assert_called_once_with( self.src_flexvol_name) self.dm_mixin._get_replication_aggregate_map.assert_called_once_with( self.src_backend, self.dest_backend) if volume_style == 'flexgroup': expected_prov_opts.pop('dedupe_enabled') expected_prov_opts.pop('compression_enabled') mock_create_volume_async.assert_called_once_with( self.dest_flexvol_name, ['aggr01'], fakes.PROVISIONING_OPTS_FLEXGROUP['size'], volume_type='dp', **expected_prov_opts) mock_volume_state.assert_called_once_with( name=self.dest_flexvol_name) mock_dedupe_enabled.assert_called_once_with( self.dest_flexvol_name) mock_compression_enabled.assert_called_once_with( self.dest_flexvol_name) else: mock_create_flexvol.assert_called_once_with( self.dest_flexvol_name, 'aggr01', fakes.PROVISIONING_OPTS['size'], volume_type='dp', **expected_prov_opts) mock_is_flexvol_encrypted.assert_called_once_with( self.src_flexvol_name, self.src_vserver) def test_create_encrypted_destination_flexvol(self): aggr_map = { fakes.ENCRYPTED_PROVISIONING_OPTS['aggregate'][0]: 'aggr01', 'aggr20': 'aggr02', } provisioning_opts = copy.deepcopy(fakes.ENCRYPTED_PROVISIONING_OPTS) expected_prov_opts = copy.deepcopy(fakes.ENCRYPTED_PROVISIONING_OPTS) expected_prov_opts.pop('volume_type', None) expected_prov_opts.pop('size', None) expected_prov_opts.pop('aggregate', None) expected_prov_opts.pop('is_flexgroup', None) mock_get_provisioning_opts_call = self.mock_object( self.mock_src_client, 'get_provisioning_options_from_flexvol', return_value=provisioning_opts) mock_is_flexvol_encrypted = self.mock_object( self.mock_src_client, 'is_flexvol_encrypted', return_value=True) self.mock_object(self.dm_mixin, '_get_replication_aggregate_map', return_value=aggr_map) self.mock_object(self.dm_mixin, '_get_replication_volume_online_timeout', return_value=2) self.mock_object(self.mock_dest_client, 'get_volume_state', return_value='online') mock_client_call = self.mock_object( self.mock_dest_client, 'create_flexvol') retval = self.dm_mixin.create_destination_flexvol( self.src_backend, self.dest_backend, self.src_flexvol_name, self.dest_flexvol_name) self.assertIsNone(retval) mock_get_provisioning_opts_call.assert_called_once_with( self.src_flexvol_name) self.dm_mixin._get_replication_aggregate_map.assert_called_once_with( self.src_backend, self.dest_backend) mock_client_call.assert_called_once_with( self.dest_flexvol_name, 'aggr01', fakes.ENCRYPTED_PROVISIONING_OPTS['size'], volume_type='dp', **expected_prov_opts) mock_is_flexvol_encrypted.assert_called_once_with( self.src_flexvol_name, self.src_vserver) def test_ensure_snapmirrors(self): flexvols = ['nvol1', 'nvol2'] replication_backends = ['fallback1', 'fallback2'] self.mock_object(self.dm_mixin, 'get_replication_backend_names', return_value=replication_backends) self.mock_object(self.dm_mixin, 'create_snapmirror') expected_calls = [ mock.call(self.src_backend, replication_backends[0], flexvols[0], flexvols[0], self.replication_policy), mock.call(self.src_backend, replication_backends[0], flexvols[1], flexvols[1], self.replication_policy), mock.call(self.src_backend, replication_backends[1], flexvols[0], flexvols[0], self.replication_policy), mock.call(self.src_backend, replication_backends[1], flexvols[1], flexvols[1], self.replication_policy), ] retval = self.dm_mixin.ensure_snapmirrors(self.mock_src_config, self.src_backend, flexvols) self.assertIsNone(retval) self.dm_mixin.get_replication_backend_names.assert_called_once_with( self.mock_src_config) self.dm_mixin.create_snapmirror.assert_has_calls(expected_calls) def test_ensure_snapmirrors_number_of_tries_exceeded(self): flexvols = ['nvol1'] replication_backends = ['fallback1'] mock_error_log = self.mock_object(data_motion.LOG, 'error') self.mock_object(self.dm_mixin, 'get_replication_backend_names', return_value=replication_backends) self.mock_object(self.dm_mixin, 'create_snapmirror', side_effect=na_utils.GeometryHasChangedOnDestination) self.assertRaises(na_utils.GeometryHasChangedOnDestination, self.dm_mixin.ensure_snapmirrors, self.mock_src_config, self.src_backend, flexvols) self.dm_mixin.get_replication_backend_names.assert_called_once_with( self.mock_src_config) excepted_call = mock.call( self.src_backend, replication_backends[0], flexvols[0], flexvols[0], self.replication_policy) self.dm_mixin.create_snapmirror.assert_has_calls([ excepted_call, excepted_call, excepted_call ]) mock_error_log.assert_called() def test_break_snapmirrors(self): flexvols = ['nvol1', 'nvol2'] replication_backends = ['fallback1', 'fallback2'] side_effects = [None, netapp_api.NaApiError, None, None] self.mock_object(self.dm_mixin, 'get_replication_backend_names', return_value=replication_backends) self.mock_object(self.dm_mixin, 'break_snapmirror', side_effect=side_effects) mock_exc_log = self.mock_object(data_motion.LOG, 'exception') expected_calls = [ mock.call(self.src_backend, replication_backends[0], flexvols[0], flexvols[0]), mock.call(self.src_backend, replication_backends[0], flexvols[1], flexvols[1]), mock.call(self.src_backend, replication_backends[1], flexvols[0], flexvols[0]), mock.call(self.src_backend, replication_backends[1], flexvols[1], flexvols[1]), ] failed_to_break = self.dm_mixin.break_snapmirrors( self.mock_src_config, self.src_backend, flexvols, 'fallback1') self.assertEqual(1, len(failed_to_break)) self.assertEqual(1, mock_exc_log.call_count) self.dm_mixin.get_replication_backend_names.assert_called_once_with( self.mock_src_config) self.dm_mixin.break_snapmirror.assert_has_calls(expected_calls) def test_update_snapmirrors(self): flexvols = ['nvol1', 'nvol2'] replication_backends = ['fallback1', 'fallback2'] self.mock_object(self.dm_mixin, 'get_replication_backend_names', return_value=replication_backends) side_effects = [None, netapp_api.NaApiError, None, None] self.mock_object(self.dm_mixin, 'update_snapmirror', side_effect=side_effects) expected_calls = [ mock.call(self.src_backend, replication_backends[0], flexvols[0], flexvols[0]), mock.call(self.src_backend, replication_backends[0], flexvols[1], flexvols[1]), mock.call(self.src_backend, replication_backends[1], flexvols[0], flexvols[0]), mock.call(self.src_backend, replication_backends[1], flexvols[1], flexvols[1]), ] retval = self.dm_mixin.update_snapmirrors(self.mock_src_config, self.src_backend, flexvols) self.assertIsNone(retval) self.dm_mixin.get_replication_backend_names.assert_called_once_with( self.mock_src_config) self.dm_mixin.update_snapmirror.assert_has_calls(expected_calls) @ddt.data([{'destination-volume': 'nvol3', 'lag-time': '3223'}, {'destination-volume': 'nvol5', 'lag-time': '32'}], []) def test__choose_failover_target_no_failover_targets(self, snapmirrors): flexvols = ['nvol1', 'nvol2'] replication_backends = ['fallback1', 'fallback2'] mock_debug_log = self.mock_object(data_motion.LOG, 'debug') self.mock_object(self.dm_mixin, 'get_snapmirrors', return_value=snapmirrors) target = self.dm_mixin._choose_failover_target( self.src_backend, flexvols, replication_backends) self.assertIsNone(target) self.assertEqual(2, mock_debug_log.call_count) def test__choose_failover_target(self): flexvols = ['nvol1', 'nvol2'] replication_backends = ['fallback1', 'fallback2'] target_1_snapmirrors = [ {'destination-volume': 'nvol3', 'lag-time': '12'}, {'destination-volume': 'nvol1', 'lag-time': '1541'}, {'destination-volume': 'nvol2', 'lag-time': '16'}, ] target_2_snapmirrors = [ {'destination-volume': 'nvol2', 'lag-time': '717'}, {'destination-volume': 'nvol1', 'lag-time': '323'}, {'destination-volume': 'nvol3', 'lag-time': '720'}, ] mock_debug_log = self.mock_object(data_motion.LOG, 'debug') self.mock_object(self.dm_mixin, 'get_snapmirrors', side_effect=[target_1_snapmirrors, target_2_snapmirrors]) target = self.dm_mixin._choose_failover_target( self.src_backend, flexvols, replication_backends) self.assertEqual('fallback2', target) self.assertFalse(mock_debug_log.called) def test__failover_host_to_same_host(self): """Tests failover host to same host throws error""" # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "backend1" volumes = [] # Assert that an exception is raised self.assertRaises(exception.InvalidReplicationTarget, self.dm_mixin._failover_host, volumes, secondary_id) def test__failover_host_to_default(self): """Tests failover host to default sets the old primary as a """ """new primary""" # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "default" volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}] # Mock the necessary methods self.dm_mixin._update_zapi_client = mock.Mock() self.get_replication_backend_names = mock.Mock(return_value= ["backend1"]) # Call the method result = self.dm_mixin._failover_host(volumes, secondary_id) # Assert the expected result expected_result = ("backend1", [{'volume_id': 'volume1', 'updates': {'replication_status': 'enabled'}}], []) self.assertEqual(result, expected_result) self.assertTrue(self.dm_mixin._update_zapi_client.called) def test__failover_host_to_custom_host(self): """Tests failover host to custom host sets the secondary """ """as a new primary""" # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "backend2" volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}] # Mock the necessary methods self.dm_mixin._complete_failover = \ mock.Mock(return_value= ("backend2", [{'volume_id': 'volume1', 'updates': {'replication_status': 'enabled'}}])) self.dm_mixin._update_zapi_client = mock.Mock() self.dm_mixin.configuration = self.config self.dm_mixin.get_replication_backend_names = \ mock.Mock(return_value=["backend1", "backend2"]) self.mock_object(utils, 'get_backend_configuration') volume_list = ['pool1', 'vol1', 'vol2'] self.dm_mixin.ssc_library = mock.Mock() self.mock_object(self.dm_mixin.ssc_library, 'get_ssc_flexvol_names', return_value=volume_list) # Call the method result = self.dm_mixin._failover_host(volumes, secondary_id) # Assert the expected result expected_result = ("backend2", [{'volume_id': 'volume1', 'updates': {'replication_status': 'enabled'}}], []) self.assertEqual(result, expected_result) self.assertTrue(self.dm_mixin._complete_failover.called) self.assertTrue(self.dm_mixin._update_zapi_client.called) def test__failover_host_without_replication_targets(self): """Tests failover host to a target which doenst exist """ # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "backend2" volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}] # Mock the necessary methods self.dm_mixin._complete_failover = \ mock.Mock(return_value=("backend2", [{'volume_id': 'volume1', 'updates': {'replication_status': 'enabled'}}])) self.dm_mixin._update_zapi_client = mock.Mock() self.dm_mixin.configuration = self.config self.dm_mixin.get_replication_backend_names = \ mock.Mock(return_value=[]) self.mock_object(utils, 'get_backend_configuration') self.dm_mixin.host = "host1" # Assert that an exception is raised self.assertRaises(exception.InvalidReplicationTarget, self.dm_mixin._failover_host, volumes, secondary_id) def test__failover_host_secondary_id_not_in_replication_target(self): """Tests failover host to custom host whose id is not there """ """in replication target list""" # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "backend3" volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}] # Mock the necessary methods self.dm_mixin._complete_failover = \ mock.Mock(return_value=("backend2", [{'volume_id': 'volume1', 'updates': {'replication_status': 'enabled'}}])) self.dm_mixin._update_zapi_client = mock.Mock() self.dm_mixin.configuration = self.config self.dm_mixin.get_replication_backend_names = \ mock.Mock(return_value=["backend1", "backend2"]) self.mock_object(utils, 'get_backend_configuration') self.dm_mixin.host = "host1" # Assert that an exception is raised self.assertRaises(exception.InvalidReplicationTarget, self.dm_mixin._failover_host, volumes, secondary_id) def test__failover_host_no_suitable_target(self): """Tests failover host to a host which is not a suitable secondary """ # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "backend2" volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}] # Mock the necessary methods self.mock_object(data_motion.DataMotionMixin, '_complete_failover', side_effect=na_utils.NetAppDriverException) self.dm_mixin.configuration = self.config self.dm_mixin.get_replication_backend_names = \ mock.Mock(return_value=["backend1", "backend2"]) self.mock_object(utils, 'get_backend_configuration') volume_list = ['pool1', 'vol1', 'vol2'] self.dm_mixin.ssc_library = mock.Mock() self.mock_object(self.dm_mixin.ssc_library, 'get_ssc_flexvol_names', return_value=volume_list) # Assert that an exception is raised self.assertRaises(exception.UnableToFailOver, self.dm_mixin._failover_host, volumes, secondary_id) def test__failover_to_same_host(self): """Tests failover to same host throws error""" # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "backend1" volumes = [] # Assert that an exception is raised self.assertRaises(exception.InvalidReplicationTarget, self.dm_mixin._failover, 'fake_context', volumes, secondary_id) def test__failover_to_default(self): """Tests failover to default sets the old primary as a new primary""" # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "default" volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}] # Mock the necessary methods self.dm_mixin._update_zapi_client = mock.Mock() self.get_replication_backend_names = \ mock.Mock(return_value=["backend1"]) # Call the method result = self.dm_mixin._failover('fake_context', volumes, secondary_id) # Assert the expected result expected_result = ("backend1", [{'volume_id': 'volume1', 'updates': {'replication_status': 'enabled'}}], []) self.assertEqual(result, expected_result) self.assertTrue(self.dm_mixin._update_zapi_client.called) def test__failover_to_custom_host(self): """Tests failover to custom host sets the secondary """ """as a new primary""" # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "backend2" volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}] # Mock the necessary methods self.dm_mixin._complete_failover = \ mock.Mock(return_value=("backend2", [{'volume_id': 'volume1', 'updates': {'replication_status': 'enabled'}}])) self.dm_mixin.configuration = self.config self.dm_mixin.get_replication_backend_names = \ mock.Mock(return_value=["backend1", "backend2"]) self.mock_object(utils, 'get_backend_configuration') volume_list = ['pool1', 'vol1', 'vol2'] self.dm_mixin.ssc_library = mock.Mock() self.mock_object(self.dm_mixin.ssc_library, 'get_ssc_flexvol_names', return_value=volume_list) # Call the method result = self.dm_mixin._failover('fake_context', volumes, secondary_id) # Assert the expected result expected_result = ("backend2", [{'volume_id': 'volume1', 'updates': {'replication_status': 'enabled'}}], []) self.assertEqual(result, expected_result) self.assertTrue(self.dm_mixin._complete_failover.called) def test__failover_without_replication_targets(self): """Tests failover to a target which doenst exist """ # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "backend2" volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}] # Mock the necessary methods self.dm_mixin._complete_failover = \ mock.Mock(return_value=("backend2", [{'volume_id': 'volume1', 'updates': {'replication_status': 'enabled'}}])) self.dm_mixin._update_zapi_client = mock.Mock() self.dm_mixin.configuration = self.config self.dm_mixin.get_replication_backend_names = \ mock.Mock(return_value=[]) self.mock_object(utils, 'get_backend_configuration') self.dm_mixin.host = "host1" # Assert that an exception is raised self.assertRaises(exception.InvalidReplicationTarget, self.dm_mixin._failover, 'fake_context', volumes, secondary_id) def test__failover_secondary_id_not_in_replication_target(self): """Tests failover to custom host whose id is not there """ """in replication target list""" # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "backend3" volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}] # Mock the necessary methods self.dm_mixin._complete_failover = \ mock.Mock(return_value=("backend2", [{'volume_id': 'volume1', 'updates': {'replication_status': 'enabled'}}])) self.dm_mixin._update_zapi_client = mock.Mock() self.dm_mixin.configuration = self.config self.dm_mixin.get_replication_backend_names = \ mock.Mock(return_value=["backend1", "backend2"]) self.mock_object(utils, 'get_backend_configuration') self.dm_mixin.host = "host1" # Assert that an exception is raised self.assertRaises(exception.InvalidReplicationTarget, self.dm_mixin._failover, 'fake_context', volumes, secondary_id) def test__failover_no_suitable_target(self): """Tests failover to a host which is not a suitable secondary """ # Mock the required attributes self.dm_mixin.backend_name = "backend1" secondary_id = "backend2" volumes = [{'id': 'volume1', 'host': 'backend1#pool1'}] self.mock_object(data_motion.DataMotionMixin, '_complete_failover', side_effect=na_utils.NetAppDriverException) self.dm_mixin.configuration = self.config self.dm_mixin.get_replication_backend_names = \ mock.Mock(return_value=["backend1", "backend2"]) self.mock_object(utils, 'get_backend_configuration') volume_list = ['pool1', 'vol1', 'vol2'] self.dm_mixin.ssc_library = mock.Mock() self.mock_object(self.dm_mixin.ssc_library, 'get_ssc_flexvol_names', return_value=volume_list) # Assert that an exception is raised self.assertRaises(exception.UnableToFailOver, self.dm_mixin._failover, 'fake_context', volumes, secondary_id) def test__complete_failover_no_suitable_target(self): flexvols = ['nvol1', 'nvol2'] replication_backends = ['fallback1', 'fallback2'] self.mock_object(self.dm_mixin, '_choose_failover_target', return_value=None) self.mock_object(utils, 'get_backend_configuration') self.mock_object(self.dm_mixin, 'update_snapmirrors') self.mock_object(self.dm_mixin, 'break_snapmirrors') self.assertRaises(na_utils.NetAppDriverException, self.dm_mixin._complete_failover, self.src_backend, replication_backends, flexvols, [], failover_target=None) self.assertFalse(utils.get_backend_configuration.called) self.assertFalse(self.dm_mixin.update_snapmirrors.called) self.assertFalse(self.dm_mixin.break_snapmirrors.called) @ddt.data('fallback1', None) def test__complete_failover(self, failover_target): flexvols = ['nvol1', 'nvol2', 'nvol3'] replication_backends = ['fallback1', 'fallback2'] volumes = [ {'id': 'xyzzy', 'host': 'openstack@backend1#nvol1'}, {'id': 'foobar', 'host': 'openstack@backend1#nvol2'}, {'id': 'waldofred', 'host': 'openstack@backend1#nvol3'}, ] expected_volume_updates = [ { 'volume_id': 'xyzzy', 'updates': {'replication_status': 'failed-over'}, }, { 'volume_id': 'foobar', 'updates': {'replication_status': 'failed-over'}, }, { 'volume_id': 'waldofred', 'updates': {'replication_status': 'error'}, }, ] expected_active_backend_name = failover_target or 'fallback2' self.mock_object(self.dm_mixin, '_choose_failover_target', return_value='fallback2') self.mock_object(utils, 'get_backend_configuration') self.mock_object(self.dm_mixin, 'update_snapmirrors') self.mock_object(self.dm_mixin, 'break_snapmirrors', return_value=['nvol3']) actual_active_backend_name, actual_volume_updates = ( self.dm_mixin._complete_failover( self.src_backend, replication_backends, flexvols, volumes, failover_target=failover_target) ) self.assertEqual(expected_active_backend_name, actual_active_backend_name) self.assertEqual(expected_volume_updates, actual_volume_updates) def test_migrate_volume_ontap_assisted_is_same_pool(self): ctxt = mock.Mock() vol_fields = {'id': dataontap_fakes.VOLUME_ID, 'host': dataontap_fakes.HOST_STRING} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) fake_dest_host = {'host': dataontap_fakes.HOST_STRING} self.dm_mixin._migrate_volume_to_pool = mock.Mock() mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool self.dm_mixin._migrate_volume_to_vserver = mock.Mock() mock_migrate_volume_to_vserver = ( self.dm_mixin._migrate_volume_to_vserver) migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted( fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME, dataontap_fakes.DEST_VSERVER_NAME) mock_migrate_volume_to_pool.assert_not_called() mock_migrate_volume_to_vserver.assert_not_called() self.assertTrue(migrated) self.assertEqual({}, updates) def test_migrate_volume_ontap_assisted_same_pool_different_backend(self): CONF.set_override('netapp_vserver', dataontap_fakes.DEST_VSERVER_NAME, group=self.dest_backend) ctxt = mock.Mock() vol_fields = {'id': dataontap_fakes.VOLUME_ID, 'host': dataontap_fakes.HOST_STRING} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) fake_dest_host = {'host': '%s@%s#%s' % ( dataontap_fakes.HOST_NAME, dataontap_fakes.DEST_BACKEND_NAME, dataontap_fakes.POOL_NAME)} self.dm_mixin.using_cluster_credentials = True self.mock_src_client.get_cluster_name.return_value = ( dataontap_fakes.CLUSTER_NAME) self.mock_dest_client.get_cluster_name.return_value = ( dataontap_fakes.CLUSTER_NAME) self.dm_mixin._migrate_volume_to_pool = mock.Mock() mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool self.dm_mixin._migrate_volume_to_vserver = mock.Mock() mock_migrate_volume_to_vserver = ( self.dm_mixin._migrate_volume_to_vserver) migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted( fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME, dataontap_fakes.DEST_VSERVER_NAME) utils.get_backend_configuration.assert_called_once_with( dataontap_fakes.DEST_BACKEND_NAME) utils.get_client_for_backend.assert_has_calls( [mock.call(dataontap_fakes.DEST_BACKEND_NAME), mock.call(dataontap_fakes.BACKEND_NAME)]) self.mock_src_client.get_cluster_name.assert_called() self.mock_dest_client.get_cluster_name.assert_called() mock_migrate_volume_to_pool.assert_not_called() mock_migrate_volume_to_vserver.assert_not_called() self.assertTrue(migrated) self.assertEqual({}, updates) def test_migrate_volume_ontap_assisted_invalid_creds(self): ctxt = mock.Mock() vol_fields = {'id': dataontap_fakes.VOLUME_ID, 'host': dataontap_fakes.HOST_STRING} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) fake_dest_host = {'host': dataontap_fakes.DEST_HOST_STRING} self.dm_mixin.using_cluster_credentials = False self.mock_dest_config.netapp_vserver = dataontap_fakes.VSERVER_NAME self.dm_mixin._migrate_volume_to_pool = mock.Mock() mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool self.dm_mixin._migrate_volume_to_vserver = mock.Mock() mock_migrate_volume_to_vserver = ( self.dm_mixin._migrate_volume_to_vserver) migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted( fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME, dataontap_fakes.DEST_VSERVER_NAME) utils.get_backend_configuration.assert_not_called() utils.get_client_for_backend.assert_not_called() self.mock_src_client.get_cluster_name.assert_not_called() self.mock_dest_client.get_cluster_name.assert_not_called() mock_migrate_volume_to_pool.assert_not_called() mock_migrate_volume_to_vserver.assert_not_called() self.assertFalse(migrated) self.assertEqual({}, updates) def test_migrate_volume_ontap_assisted_dest_pool_not_in_same_cluster(self): CONF.set_override('netapp_vserver', dataontap_fakes.DEST_VSERVER_NAME, group=self.dest_backend) ctxt = mock.Mock() vol_fields = {'id': dataontap_fakes.VOLUME_ID, 'host': dataontap_fakes.HOST_STRING} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) fake_dest_host = {'host': dataontap_fakes.DEST_HOST_STRING} self.dm_mixin.using_cluster_credentials = True self.mock_src_client.get_cluster_name.return_value = ( dataontap_fakes.CLUSTER_NAME) self.mock_dest_client.get_cluster_name.return_value = ( dataontap_fakes.DEST_CLUSTER_NAME) self.dm_mixin._migrate_volume_to_pool = mock.Mock() mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool self.dm_mixin._migrate_volume_to_vserver = mock.Mock() mock_migrate_volume_to_vserver = ( self.dm_mixin._migrate_volume_to_vserver) migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted( fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME, dataontap_fakes.DEST_VSERVER_NAME) utils.get_backend_configuration.assert_called_once_with( dataontap_fakes.DEST_BACKEND_NAME) utils.get_client_for_backend.assert_has_calls( [mock.call(dataontap_fakes.DEST_BACKEND_NAME), mock.call(dataontap_fakes.BACKEND_NAME)]) self.mock_src_client.get_cluster_name.assert_called() self.mock_dest_client.get_cluster_name.assert_called() mock_migrate_volume_to_pool.assert_not_called() mock_migrate_volume_to_vserver.assert_not_called() self.assertFalse(migrated) self.assertEqual({}, updates) @ddt.data((dataontap_fakes.BACKEND_NAME, True), (dataontap_fakes.DEST_BACKEND_NAME, False)) @ddt.unpack def test_migrate_volume_ontap_assisted_same_vserver(self, dest_backend_name, is_same_backend): CONF.set_override('netapp_vserver', dataontap_fakes.VSERVER_NAME, group=self.dest_backend) ctxt = mock.Mock() vol_fields = {'id': dataontap_fakes.VOLUME_ID, 'host': dataontap_fakes.HOST_STRING} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) fake_dest_host = {'host': '%s@%s#%s' % ( dataontap_fakes.HOST_NAME, dest_backend_name, dataontap_fakes.DEST_POOL_NAME)} self.dm_mixin.using_cluster_credentials = True self.mock_src_client.get_cluster_name.return_value = ( dataontap_fakes.CLUSTER_NAME) self.mock_dest_client.get_cluster_name.return_value = ( dataontap_fakes.CLUSTER_NAME) self.dm_mixin._migrate_volume_to_pool = mock.Mock() mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool mock_migrate_volume_to_pool.return_value = {} self.dm_mixin._migrate_volume_to_vserver = mock.Mock() mock_migrate_volume_to_vserver = ( self.dm_mixin._migrate_volume_to_vserver) migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted( fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME, dataontap_fakes.VSERVER_NAME) if is_same_backend: utils.get_backend_configuration.assert_not_called() utils.get_client_for_backend.assert_not_called() self.mock_src_client.get_cluster_name.assert_not_called() self.mock_dest_client.get_cluster_name.assert_not_called() else: utils.get_backend_configuration.assert_called_once_with( dest_backend_name) utils.get_client_for_backend.assert_has_calls( [mock.call(dest_backend_name), mock.call(dataontap_fakes.BACKEND_NAME)]) self.mock_src_client.get_cluster_name.assert_called() self.mock_dest_client.get_cluster_name.assert_called() mock_migrate_volume_to_pool.assert_called_once_with( fake_vol, dataontap_fakes.POOL_NAME, dataontap_fakes.DEST_POOL_NAME, dataontap_fakes.VSERVER_NAME, dest_backend_name) mock_migrate_volume_to_vserver.assert_not_called() self.assertTrue(migrated) self.assertEqual({}, updates) def test_migrate_volume_different_vserver(self): CONF.set_override('netapp_vserver', dataontap_fakes.DEST_VSERVER_NAME, group=self.dest_backend) ctxt = mock.Mock() vol_fields = {'id': dataontap_fakes.VOLUME_ID, 'host': dataontap_fakes.HOST_STRING} fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields) fake_dest_host = {'host': dataontap_fakes.DEST_HOST_STRING} self.dm_mixin.using_cluster_credentials = True self.mock_src_client.get_cluster_name.return_value = ( dataontap_fakes.CLUSTER_NAME) self.mock_dest_client.get_cluster_name.return_value = ( dataontap_fakes.CLUSTER_NAME) self.dm_mixin._migrate_volume_to_pool = mock.Mock() mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool self.dm_mixin._migrate_volume_to_vserver = mock.Mock() mock_migrate_volume_to_vserver = ( self.dm_mixin._migrate_volume_to_vserver) mock_migrate_volume_to_vserver.return_value = {} migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted( fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME, dataontap_fakes.VSERVER_NAME) utils.get_backend_configuration.assert_called_once_with( dataontap_fakes.DEST_BACKEND_NAME) utils.get_client_for_backend.assert_has_calls( [mock.call(dataontap_fakes.DEST_BACKEND_NAME), mock.call(dataontap_fakes.BACKEND_NAME)]) self.mock_src_client.get_cluster_name.assert_called() self.mock_dest_client.get_cluster_name.assert_called() mock_migrate_volume_to_pool.assert_not_called() mock_migrate_volume_to_vserver.assert_called_once_with( fake_vol, dataontap_fakes.POOL_NAME, dataontap_fakes.VSERVER_NAME, dataontap_fakes.DEST_POOL_NAME, dataontap_fakes.DEST_VSERVER_NAME, dataontap_fakes.DEST_BACKEND_NAME) self.assertTrue(migrated) self.assertEqual({}, updates) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_loopingcalls.py0000664000175000017500000000441400000000000032145 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Chuck Fouts. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_service import loopingcall from cinder.tests.unit import test from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls class LoopingCallsTestCase(test.TestCase): def setUp(self): super(LoopingCallsTestCase, self).setUp() self.mock_first_looping_task = mock.Mock() self.mock_second_looping_task = mock.Mock() self.mock_loopingcall = self.mock_object( loopingcall, 'FixedIntervalLoopingCall', side_effect=[self.mock_first_looping_task, self.mock_second_looping_task] ) self.loopingcalls = loopingcalls.LoopingCalls() def test_add_task(self): interval = 3600 initial_delay = 5 self.loopingcalls.add_task(self.mock_first_looping_task, interval) self.loopingcalls.add_task( self.mock_second_looping_task, interval, initial_delay) self.assertEqual(2, len(self.loopingcalls.tasks)) self.assertEqual(interval, self.loopingcalls.tasks[0].interval) self.assertEqual(initial_delay, self.loopingcalls.tasks[1].initial_delay) def test_start_tasks(self): interval = 3600 initial_delay = 5 self.loopingcalls.add_task(self.mock_first_looping_task, interval) self.loopingcalls.add_task( self.mock_second_looping_task, interval, initial_delay) self.loopingcalls.start_tasks() self.mock_first_looping_task.start.assert_called_once_with( interval, 0) self.mock_second_looping_task.start.assert_called_once_with( interval, initial_delay) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py0000664000175000017500000002512700000000000030623 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import socket from unittest import mock import ddt from oslo_config import cfg from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest from cinder.volume.drivers.netapp.dataontap.utils import utils CONF = cfg.CONF @ddt.ddt class NetAppCDOTDataMotionTestCase(test.TestCase): def setUp(self): super(NetAppCDOTDataMotionTestCase, self).setUp() self.backend = 'backend1' self.mock_cmode_client = self.mock_object(client_cmode, 'Client') self.mock_cmode_rest_client = self.mock_object( client_cmode_rest, 'RestClient') self.config = fakes.get_fake_cmode_config(self.backend) CONF.set_override('volume_backend_name', self.backend, group=self.backend) CONF.set_override('netapp_transport_type', 'https', group=self.backend) CONF.set_override('netapp_login', 'fake_user', group=self.backend) CONF.set_override('netapp_password', 'fake_password', group=self.backend) CONF.set_override('netapp_server_hostname', 'fake_hostname', group=self.backend) CONF.set_override('netapp_server_port', 8866, group=self.backend) CONF.set_override('netapp_api_trace_pattern', "fake_regex", group=self.backend) CONF.set_override('netapp_ssl_cert_path', 'fake_ca', group=self.backend) CONF.set_override('netapp_private_key_file', 'fake_private_key.pem', group=self.backend) CONF.set_override('netapp_certificate_file', 'fake_cert.pem', group=self.backend) CONF.set_override('netapp_ca_certificate_file', 'fake_ca_cert.crt', group=self.backend) CONF.set_override('netapp_certificate_host_validation', False, group=self.backend) def test_get_backend_configuration(self): self.mock_object(utils, 'CONF') CONF.set_override('netapp_vserver', 'fake_vserver', group=self.backend) utils.CONF.list_all_sections.return_value = [self.backend] config = utils.get_backend_configuration(self.backend) self.assertEqual('fake_vserver', config.netapp_vserver) def test_get_backend_configuration_different_backend_name(self): self.mock_object(utils, 'CONF') CONF.set_override('netapp_vserver', 'fake_vserver', group=self.backend) CONF.set_override('volume_backend_name', 'fake_backend_name', group=self.backend) utils.CONF.list_all_sections.return_value = [self.backend] config = utils.get_backend_configuration(self.backend) self.assertEqual('fake_vserver', config.netapp_vserver) self.assertEqual('fake_backend_name', config.volume_backend_name) @ddt.data([], ['fake_backend1', 'fake_backend2']) def test_get_backend_configuration_not_configured(self, conf_sections): self.mock_object(utils, 'CONF') utils.CONF.list_all_sections.return_value = conf_sections self.assertRaises(exception.ConfigNotFound, utils.get_backend_configuration, self.backend) @ddt.data(True, False) def test_get_client_for_backend(self, use_legacy): self.config.netapp_use_legacy_client = use_legacy self.mock_object(utils, 'get_backend_configuration', return_value=self.config) utils.get_client_for_backend(self.backend) if use_legacy: self.mock_cmode_client.assert_called_once_with( hostname='fake_hostname', password='fake_password', username='fake_user', transport_type='https', port=8866, trace=mock.ANY, vserver=None, api_trace_pattern="fake_regex", ssl_cert_path='fake_ca', private_key_file='fake_private_key.pem', certificate_file='fake_cert.pem', ca_certificate_file='fake_ca_cert.crt', certificate_host_validation=False) self.mock_cmode_rest_client.assert_not_called() else: self.mock_cmode_rest_client.assert_called_once_with( hostname='fake_hostname', password='fake_password', username='fake_user', transport_type='https', port=8866, trace=mock.ANY, vserver=None, api_trace_pattern="fake_regex", ssl_cert_path='fake_ca', async_rest_timeout=60, private_key_file='fake_private_key.pem', certificate_file='fake_cert.pem', ca_certificate_file='fake_ca_cert.crt', certificate_host_validation=False) self.mock_cmode_client.assert_not_called() @ddt.data(True, False) def test_get_client_for_backend_with_vserver(self, use_legacy): self.config.netapp_use_legacy_client = use_legacy self.mock_object(utils, 'get_backend_configuration', return_value=self.config) CONF.set_override('netapp_vserver', 'fake_vserver', group=self.backend) utils.get_client_for_backend(self.backend) if use_legacy: self.mock_cmode_client.assert_called_once_with( hostname='fake_hostname', password='fake_password', username='fake_user', transport_type='https', port=8866, trace=mock.ANY, vserver='fake_vserver', api_trace_pattern="fake_regex", ssl_cert_path='fake_ca', private_key_file='fake_private_key.pem', certificate_file='fake_cert.pem', ca_certificate_file='fake_ca_cert.crt', certificate_host_validation=False) self.mock_cmode_rest_client.assert_not_called() else: self.mock_cmode_rest_client.assert_called_once_with( hostname='fake_hostname', password='fake_password', username='fake_user', transport_type='https', port=8866, trace=mock.ANY, vserver='fake_vserver', api_trace_pattern="fake_regex", ssl_cert_path='fake_ca', async_rest_timeout = 60, private_key_file='fake_private_key.pem', certificate_file='fake_cert.pem', ca_certificate_file='fake_ca_cert.crt', certificate_host_validation=False) self.mock_cmode_client.assert_not_called() @ddt.ddt class NetAppDataOntapUtilsTestCase(test.TestCase): def test_build_ems_log_message_0(self): self.mock_object( socket, 'gethostname', return_value='fake_hostname') result = utils.build_ems_log_message_0( 'fake_driver_name', 'fake_app_version') expected = { 'computer-name': 'fake_hostname', 'event-source': 'Cinder driver fake_driver_name', 'app-version': 'fake_app_version', 'category': 'provisioning', 'log-level': '5', 'auto-support': 'false', 'event-id': '0', 'event-description': 'OpenStack Cinder connected to cluster node', } self.assertEqual(expected, result) def test_build_ems_log_message_1(self): self.mock_object( socket, 'gethostname', return_value='fake_hostname') aggregate_pools = ['aggr1', 'aggr2'] flexvol_pools = ['vol1', 'vol2'] result = utils.build_ems_log_message_1( 'fake_driver_name', 'fake_app_version', 'fake_vserver', flexvol_pools, aggregate_pools) pool_info = { 'pools': { 'vserver': 'fake_vserver', 'aggregates': aggregate_pools, 'flexvols': flexvol_pools, }, } self.assertDictEqual(pool_info, json.loads(result['event-description'])) result['event-description'] = '' expected = { 'computer-name': 'fake_hostname', 'event-source': 'Cinder driver fake_driver_name', 'app-version': 'fake_app_version', 'category': 'provisioning', 'log-level': '5', 'auto-support': 'false', 'event-id': '1', 'event-description': '', } self.assertEqual(expected, result) def test_get_cluster_to_pool_map_success(self): """Test successful cluster-to-pool mapping.""" mock_client = mock.Mock() self.mock_object( mock_client, 'get_cluster_info', return_value={ 'name': 'cluster1', 'disaggregated': True }) expected_pool_map = { 'cluster1': {'pool_name': 'cluster1'} } result = utils.get_cluster_to_pool_map(mock_client) self.assertEqual(expected_pool_map, result) mock_client.get_cluster_info.assert_called_once() def test_get_cluster_to_pool_map_disaggregated_true_raises_exception(self): """Test that disaggregated=False raises InvalidConfigurationValue.""" mock_client = mock.Mock() self.mock_object( mock_client, 'get_cluster_info', return_value={ 'name': 'cluster1', 'disaggregated': False }) self.assertRaises( exception.InvalidConfigurationValue, utils.get_cluster_to_pool_map, mock_client) def test_get_cluster_to_pool_map_disaggregated_missing_exception(self): mock_client = mock.Mock() self.mock_object( mock_client, 'get_cluster_info', return_value={ 'name': 'cluster1', }) self.assertRaises( exception.InvalidConfigurationValue, utils.get_cluster_to_pool_map, mock_client) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/fakes.py0000664000175000017500000001445200000000000024401 0ustar00zuulzuul00000000000000# Copyright (c) - 2014, Clinton Knight All rights reserved. # Copyright (c) - 2015, Alex Meade. All Rights Reserved. # Copyright (c) - 2015, Rushil Chugh. All Rights Reserved. # Copyright (c) - 2015, Tom Barron. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.tests.unit import fake_volume from cinder.volume import configuration as conf import cinder.volume.drivers.netapp.options as na_opts ISCSI_FAKE_LUN_ID = 1 ISCSI_FAKE_IQN = 'iqn.1993-08.org.debian:01:10' ISCSI_FAKE_IQN2 = 'iqn.1993-08.org.debian:01:11' ISCSI_FAKE_ADDRESS_IPV4 = '10.63.165.216' ISCSI_FAKE_ADDRESS2_IPV4 = '10.63.165.217' ISCSI_FAKE_ADDRESS_IPV6 = 'fe80::72a4:a152:aad9:30d9' ISCSI_FAKE_PORT = '2232' ISCSI_FAKE_VOLUME = {'id': 'fake_id'} ISCSI_FAKE_TARGET = {} ISCSI_FAKE_TARGET['address'] = ISCSI_FAKE_ADDRESS_IPV4 ISCSI_FAKE_TARGET['port'] = ISCSI_FAKE_PORT ISCSI_FAKE_VOLUME = {'id': 'fake_id', 'provider_auth': 'None stack password'} ISCSI_FAKE_VOLUME_NO_AUTH = {'id': 'fake_id', 'provider_auth': ''} ISCSI_MP_TARGET_INFO_DICT = {'target_discovered': False, 'target_portal': '10.63.165.216:2232', 'target_portals': ['10.63.165.216:2232', '10.63.165.217:2232'], 'target_iqn': ISCSI_FAKE_IQN, 'target_iqns': [ISCSI_FAKE_IQN, ISCSI_FAKE_IQN2], 'target_lun': ISCSI_FAKE_LUN_ID, 'target_luns': [ISCSI_FAKE_LUN_ID] * 2, 'volume_id': ISCSI_FAKE_VOLUME['id'], 'auth_method': 'None', 'auth_username': 'stack', 'auth_password': 'password'} FC_ISCSI_TARGET_INFO_DICT = {'target_discovered': False, 'target_portal': '10.63.165.216:2232', 'target_iqn': ISCSI_FAKE_IQN, 'target_lun': ISCSI_FAKE_LUN_ID, 'volume_id': ISCSI_FAKE_VOLUME['id'], 'auth_method': 'None', 'auth_username': 'stack', 'auth_password': 'password'} FC_ISCSI_TARGET_INFO_DICT_IPV6 = {'target_discovered': False, 'target_portal': '[fe80::72a4:a152:aad9:30d9]:2232', 'target_iqn': ISCSI_FAKE_IQN, 'target_lun': ISCSI_FAKE_LUN_ID, 'volume_id': ISCSI_FAKE_VOLUME['id']} VOLUME_NAME = 'fake_volume_name' VOLUME_ID = '80113942-01fd-4114-aaee-9d73ecb536d5' VOLUME_TYPE_ID = '20c9718a-9256-4bf8-9f94-1c6f4e7f0c84' VOLUME = fake_volume.fake_volume_obj(None, name=VOLUME_NAME, size=42, id=VOLUME_ID, host='fake_host@fake_backend#fake_pool', volume_type_id=VOLUME_TYPE_ID) SNAPSHOT_NAME = 'fake_snapshot_name' SNAPSHOT_ID = 'fake_snapshot_id' SNAPSHOT = { 'name': SNAPSHOT_NAME, 'id': SNAPSHOT_ID, 'volume_id': VOLUME_ID, 'volume_name': VOLUME_NAME, 'volume_size': 42, } QOS_SPECS = {} EXTRA_SPECS = {} MAX_THROUGHPUT_BPS = '21734278B/s' QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name' LEGACY_EXTRA_SPECS = {'netapp:qos_policy_group': QOS_POLICY_GROUP_NAME} EXPECTED_IOPS_PER_GB = '128' PEAK_IOPS_PER_GB = '512' EXPECTED_IOPS_ALLOCATION = 'used-space' PEAK_IOPS_ALLOCATION = 'used-space' ABSOLUTE_MIN_IOPS = '75' BLOCK_SIZE = 'ANY' ADAPTIVE_QOS_SPEC = { 'expectedIOPSperGiB': EXPECTED_IOPS_PER_GB, 'peakIOPSperGiB': PEAK_IOPS_PER_GB, 'expectedIOPSAllocation': EXPECTED_IOPS_ALLOCATION, 'peakIOPSAllocation': PEAK_IOPS_ALLOCATION, 'absoluteMinIOPS': ABSOLUTE_MIN_IOPS, 'blockSize': BLOCK_SIZE, } LEGACY_QOS = { 'policy_name': QOS_POLICY_GROUP_NAME, } QOS_POLICY_GROUP_SPEC = { 'max_throughput': MAX_THROUGHPUT_BPS, 'policy_name': 'openstack-%s' % VOLUME_ID, } QOS_POLICY_GROUP_INFO_NONE = {'legacy': None, 'spec': None} QOS_POLICY_GROUP_INFO = {'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC} ADAPTIVE_QOS_POLICY_GROUP_SPEC = { 'expected_iops': '128IOPS/GB', 'peak_iops': '512IOPS/GB', 'expected_iops_allocation': 'used-space', 'peak_iops_allocation': 'used-space', 'absolute_min_iops': '75IOPS', 'block_size': 'ANY', 'policy_name': 'openstack-%s' % VOLUME_ID, } LEGACY_QOS_POLICY_GROUP_INFO = { 'legacy': LEGACY_QOS, 'spec': None, } INVALID_QOS_POLICY_GROUP_INFO_LEGACY_AND_SPEC = { 'legacy': LEGACY_QOS, 'spec': QOS_POLICY_GROUP_SPEC, } INVALID_QOS_POLICY_GROUP_INFO_STANDARD_AND_ADAPTIVE = { 'legacy': None, 'spec': {**QOS_POLICY_GROUP_SPEC, **ADAPTIVE_QOS_SPEC}, } QOS_SPECS_ID = 'fake_qos_specs_id' QOS_SPEC = {'maxBPS': 21734278} OUTER_BACKEND_QOS_SPEC = { 'id': QOS_SPECS_ID, 'specs': QOS_SPEC, 'consumer': 'back-end', } OUTER_FRONTEND_QOS_SPEC = { 'id': QOS_SPECS_ID, 'specs': QOS_SPEC, 'consumer': 'front-end', } OUTER_BOTH_QOS_SPEC = { 'id': QOS_SPECS_ID, 'specs': QOS_SPEC, 'consumer': 'both', } VOLUME_TYPE = {'id': VOLUME_TYPE_ID, 'qos_specs_id': QOS_SPECS_ID} def create_configuration(): config = conf.Configuration(None) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_certificateauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) return config def create_configuration_cmode(): config = create_configuration() config.append_config_values(na_opts.netapp_cluster_opts) return config ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/test_common.py0000664000175000017500000001150400000000000025632 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Clinton Knight. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import exception from cinder.tests.unit import test import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes import cinder.volume.drivers.netapp.common as na_common import cinder.volume.drivers.netapp.dataontap.fc_cmode as fc_cmode import cinder.volume.drivers.netapp.utils as na_utils class NetAppDriverFactoryTestCase(test.TestCase): def setUp(self): super(NetAppDriverFactoryTestCase, self).setUp() self.mock_object(na_common, 'LOG') def test_new(self): self.mock_object(na_utils.OpenStackInfo, 'info', return_value='fake_info') mock_create_driver = self.mock_object(na_common.NetAppDriver, 'create_driver') config = na_fakes.create_configuration() config.netapp_storage_family = 'fake_family' config.netapp_storage_protocol = 'fake_protocol' kwargs = {'configuration': config} na_common.NetAppDriver(**kwargs) kwargs['app_version'] = 'fake_info' mock_create_driver.assert_called_with('fake_family', 'fake_protocol', *(), **kwargs) def test_new_missing_config(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, 'create_driver') self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **{}) def test_new_missing_family(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, 'create_driver') config = na_fakes.create_configuration() config.netapp_storage_protocol = 'fake_protocol' config.netapp_storage_family = None kwargs = {'configuration': config} self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **kwargs) def test_new_missing_protocol(self): self.mock_object(na_utils.OpenStackInfo, 'info') self.mock_object(na_common.NetAppDriver, 'create_driver') config = na_fakes.create_configuration() config.netapp_storage_family = 'fake_family' kwargs = {'configuration': config} self.assertRaises(exception.InvalidInput, na_common.NetAppDriver, **kwargs) def test_create_driver(self): def get_full_class_name(obj): return obj.__module__ + '.' + obj.__class__.__name__ kwargs = { 'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info', 'host': 'fakehost@fakebackend', } registry = na_common.NETAPP_UNIFIED_DRIVER_REGISTRY for family in registry: for protocol, full_class_name in registry[family].items(): driver = na_common.NetAppDriver.create_driver( family, protocol, **kwargs) self.assertEqual(full_class_name, get_full_class_name(driver)) def test_create_driver_case_insensitive(self): kwargs = { 'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info', 'host': 'fakehost@fakebackend', } driver = na_common.NetAppDriver.create_driver('ONTAP_CLUSTER', 'FC', **kwargs) self.assertIsInstance(driver, fc_cmode.NetAppCmodeFibreChannelDriver) def test_create_driver_invalid_family(self): kwargs = { 'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info', 'host': 'fakehost@fakebackend', } self.assertRaises(exception.InvalidInput, na_common.NetAppDriver.create_driver, 'kardashian', 'iscsi', **kwargs) def test_create_driver_invalid_protocol(self): kwargs = { 'configuration': na_fakes.create_configuration(), 'app_version': 'fake_info', 'host': 'fakehost@fakebackend', } self.assertRaises(exception.InvalidInput, na_common.NetAppDriver.create_driver, 'ontap_cluster', 'carrier_pigeon', **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/netapp/test_utils.py0000664000175000017500000014206400000000000025510 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Michael Price. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver utility module """ import copy import platform from unittest import mock import ddt from oslo_concurrency import processutils as putils from cinder import context from cinder import exception from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.netapp.dataontap.client import ( fakes as zapi_fakes) from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes import cinder.tests.unit.volume.drivers.netapp.fakes as fake from cinder import version from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import qos_specs from cinder.volume import volume_types @ddt.ddt class NetAppDriverUtilsTestCase(test.TestCase): @mock.patch.object(na_utils, 'LOG', mock.Mock()) def test_validate_instantiation_proxy(self): kwargs = {'netapp_mode': 'proxy'} na_utils.validate_instantiation(**kwargs) na_utils.LOG.warning.assert_not_called() @mock.patch.object(na_utils, 'LOG', mock.Mock()) def test_validate_instantiation_no_proxy(self): kwargs = {'netapp_mode': 'asdf'} na_utils.validate_instantiation(**kwargs) na_utils.LOG.warning.assert_called_once() def test_check_flags(self): class TestClass(object): pass required_flags = ['flag1', 'flag2'] configuration = TestClass() setattr(configuration, 'flag1', 'value1') setattr(configuration, 'flag3', 'value3') self.assertRaises(exception.InvalidInput, na_utils.check_flags, required_flags, configuration) setattr(configuration, 'flag2', 'value2') self.assertIsNone(na_utils.check_flags(required_flags, configuration)) def test_to_bool(self): self.assertTrue(na_utils.to_bool(True)) self.assertTrue(na_utils.to_bool('true')) self.assertTrue(na_utils.to_bool('yes')) self.assertTrue(na_utils.to_bool('y')) self.assertTrue(na_utils.to_bool(1)) self.assertTrue(na_utils.to_bool('1')) self.assertFalse(na_utils.to_bool(False)) self.assertFalse(na_utils.to_bool('false')) self.assertFalse(na_utils.to_bool('asdf')) self.assertFalse(na_utils.to_bool('no')) self.assertFalse(na_utils.to_bool('n')) self.assertFalse(na_utils.to_bool(0)) self.assertFalse(na_utils.to_bool('0')) self.assertFalse(na_utils.to_bool(2)) self.assertFalse(na_utils.to_bool('2')) def test_set_safe_attr(self): fake_object = mock.Mock() fake_object.fake_attr = None # test initial checks self.assertFalse(na_utils.set_safe_attr(None, fake_object, None)) self.assertFalse(na_utils.set_safe_attr(fake_object, None, None)) self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr', None)) # test value isn't changed if it shouldn't be and retval is False fake_object.fake_attr = 'fake_value' self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr', 'fake_value')) self.assertEqual('fake_value', fake_object.fake_attr) # test value is changed if it should be and retval is True self.assertTrue(na_utils.set_safe_attr(fake_object, 'fake_attr', 'new_fake_value')) self.assertEqual('new_fake_value', fake_object.fake_attr) def test_round_down(self): self.assertAlmostEqual(na_utils.round_down(5.567), 5.56) self.assertAlmostEqual(na_utils.round_down(5.567, '0.00'), 5.56) self.assertAlmostEqual(na_utils.round_down(5.567, '0.0'), 5.5) self.assertAlmostEqual(na_utils.round_down(5.567, '0'), 5) self.assertAlmostEqual(na_utils.round_down(0, '0.00'), 0) self.assertAlmostEqual(na_utils.round_down(-5.567), -5.56) self.assertAlmostEqual(na_utils.round_down(-5.567, '0.00'), -5.56) self.assertAlmostEqual(na_utils.round_down(-5.567, '0.0'), -5.5) self.assertAlmostEqual(na_utils.round_down(-5.567, '0'), -5) def test_iscsi_connection_properties(self): actual_properties = na_utils.get_iscsi_connection_properties( fake.ISCSI_FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, [fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_IQN2], [fake.ISCSI_FAKE_ADDRESS_IPV4, fake.ISCSI_FAKE_ADDRESS2_IPV4], [fake.ISCSI_FAKE_PORT, fake.ISCSI_FAKE_PORT]) actual_properties_mapped = actual_properties['data'] self.assertDictEqual(actual_properties_mapped, fake.ISCSI_MP_TARGET_INFO_DICT) def test_iscsi_connection_properties_single_iqn(self): actual_properties = na_utils.get_iscsi_connection_properties( fake.ISCSI_FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN, [fake.ISCSI_FAKE_ADDRESS_IPV4, fake.ISCSI_FAKE_ADDRESS2_IPV4], [fake.ISCSI_FAKE_PORT, fake.ISCSI_FAKE_PORT]) actual_properties_mapped = actual_properties['data'] expected = copy.deepcopy(fake.ISCSI_MP_TARGET_INFO_DICT) expected['target_iqns'][1] = expected['target_iqns'][0] self.assertDictEqual(expected, actual_properties_mapped) def test_iscsi_connection_lun_id_type_str(self): FAKE_LUN_ID = '1' actual_properties = na_utils.get_iscsi_connection_properties( FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN, [fake.ISCSI_FAKE_ADDRESS_IPV4], [fake.ISCSI_FAKE_PORT]) actual_properties_mapped = actual_properties['data'] self.assertIs(int, type(actual_properties_mapped['target_lun'])) self.assertDictEqual(actual_properties_mapped, fake.FC_ISCSI_TARGET_INFO_DICT) def test_iscsi_connection_lun_id_type_dict(self): FAKE_LUN_ID = {'id': 'fake_id'} self.assertRaises(TypeError, na_utils.get_iscsi_connection_properties, FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN, [fake.ISCSI_FAKE_ADDRESS_IPV4], [fake.ISCSI_FAKE_PORT]) def test_iscsi_connection_properties_ipv6(self): actual_properties = na_utils.get_iscsi_connection_properties( '1', fake.ISCSI_FAKE_VOLUME_NO_AUTH, fake.ISCSI_FAKE_IQN, [fake.ISCSI_FAKE_ADDRESS_IPV6], [fake.ISCSI_FAKE_PORT]) self.assertDictEqual(actual_properties['data'], fake.FC_ISCSI_TARGET_INFO_DICT_IPV6) def test_get_volume_extra_specs(self): fake_extra_specs = {'fake_key': 'fake_value'} fake_volume_type = {'extra_specs': fake_extra_specs} fake_volume = {'volume_type_id': fake.VOLUME_TYPE_ID} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type', return_value=fake_volume_type) self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual(fake_extra_specs, result) def test_trace_filter_func_api(self): na_utils.setup_api_trace_pattern("^(?!(perf)).*$") na_element = zapi_fakes.FAKE_NA_ELEMENT all_args = {'na_element': na_element} self.assertTrue(na_utils.trace_filter_func_api(all_args)) def test_trace_filter_func_api_invalid(self): all_args = {'fake': 'not_na_element'} self.assertTrue(na_utils.trace_filter_func_api(all_args)) def test_trace_filter_func_api_filtered(self): na_utils.setup_api_trace_pattern("^(?!(perf)).*$") na_element = netapp_api.NaElement("perf-object-counter-list-info") all_args = {'na_element': na_element} self.assertFalse(na_utils.trace_filter_func_api(all_args)) def test_get_volume_extra_specs_no_type_id(self): fake_volume = {} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type') self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual({}, result) def test_get_volume_extra_specs_no_volume_type(self): fake_volume = {'volume_type_id': fake.VOLUME_TYPE_ID} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type', return_value=None) self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual({}, result) def test_log_extra_spec_warnings_obsolete_specs(self): mock_log = self.mock_object(na_utils.LOG, 'warning') na_utils.log_extra_spec_warnings({'netapp:raid_type': 'raid4'}) mock_log.assert_called_once() def test_log_extra_spec_warnings_deprecated_specs(self): mock_log = self.mock_object(na_utils.LOG, 'warning') na_utils.log_extra_spec_warnings({'netapp_thick_provisioned': 'true'}) mock_log.assert_called_once() def test_validate_qos_spec(self): qos_spec = fake.QOS_SPEC # Just return without raising an exception. na_utils.validate_qos_spec(qos_spec) def test_validate_qos_spec_none(self): qos_spec = None # Just return without raising an exception. na_utils.validate_qos_spec(qos_spec) def test_validate_qos_spec_adaptive(self): # Just return without raising an exception. na_utils.validate_qos_spec(fake.ADAPTIVE_QOS_SPEC) def test_validate_qos_spec_keys_weirdly_cased(self): qos_spec = {'mAxIopS': 33000, 'mInIopS': 0} # Just return without raising an exception. na_utils.validate_qos_spec(qos_spec) def test_validate_qos_spec_bad_key_max_flops(self): qos_spec = {'maxFlops': 33000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_validate_qos_spec_bad_key_min_bps(self): qos_spec = {'minBps': 33000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_validate_qos_spec_bad_key_min_bps_per_gib(self): qos_spec = {'minBPSperGiB': 33000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_validate_qos_spec_bad_key_combination_max_iops_max_bps(self): qos_spec = {'maxIOPS': 33000, 'maxBPS': 10000000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_validate_qos_spec_bad_key_combination_miniops_miniopspergib(self): qos_spec = {'minIOPS': 33000, 'minIOPSperGiB': 10000000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_validate_qos_spec_bad_key_combination_aqos_qos_max(self): qos_spec = {'peakIOPSperGiB': 33000, 'maxIOPS': 33000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_validate_qos_spec_bad_key_combination_aqos_qos_min(self): qos_spec = {'absoluteMinIOPS': 33000, 'minIOPS': 33000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_validate_qos_spec_bad_key_combination_aqos_qos_min_max(self): qos_spec = { 'expectedIOPSperGiB': 33000, 'minIOPS': 33000, 'maxIOPS': 33000, } self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_validate_qos_spec_adaptive_and_non_adaptive(self): qos_spec = fake.INVALID_QOS_POLICY_GROUP_INFO_STANDARD_AND_ADAPTIVE self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_map_qos_spec_none(self): qos_spec = None result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertIsNone(result) def test_map_qos_spec_bad_key_combination_miniops_maxbpspergib(self): qos_spec = {'minIOPS': 33000, 'maxBPSperGiB': 10000000} self.assertRaises(exception.Invalid, na_utils.map_qos_spec, qos_spec, fake.VOLUME) def test_map_qos_spec_bad_key_combination_min_iops_max_bps(self): qos_spec = {'minIOPS': 33000, 'maxBPS': 10000000} self.assertRaises(exception.Invalid, na_utils.map_qos_spec, qos_spec, fake.VOLUME) def test_map_qos_spec_miniops_greater_than_maxiops(self): qos_spec = {'minIOPS': 33001, 'maxIOPS': 33000} self.assertRaises(exception.Invalid, na_utils.map_qos_spec, qos_spec, fake.VOLUME) def test_map_qos_spec_maxiops(self): qos_spec = {'maxIOPs': 33000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': '33000iops', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_maxiopspergib(self): qos_spec = {'maxIOPSperGiB': 1000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': '42000iops', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_miniopspergib_maxiopspergib(self): qos_spec = {'minIOPSperGiB': 1000, 'maxIOPSperGiB': 1000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'min_throughput': '42000iops', 'max_throughput': '42000iops', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_maxbps(self): qos_spec = {'maxBPS': 1000000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': '1000000B/s', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_maxbpspergib(self): qos_spec = {'maxBPSperGiB': 100000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': '4200000B/s', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_no_key_present(self): qos_spec = {} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_miniops_maxiops(self): qos_spec = {'minIOPs': 25000, 'maxIOPs': 33000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'min_throughput': '25000iops', 'max_throughput': '33000iops', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_aqos_spec(self): qos_spec = { 'expectedIOPSperGiB': '128', 'peakIOPSperGiB': '512', 'expectedIOPSAllocation': 'used-space', 'peakIOPSAllocation': 'used-space', 'absoluteMinIOPS': '75', 'blockSize': 'ANY', } mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'expected_iops': '128IOPS/GB', 'peak_iops': '512IOPS/GB', 'expected_iops_allocation': 'used-space', 'peak_iops_allocation': 'used-space', 'absolute_min_iops': '75IOPS', 'block_size': 'ANY', 'policy_name': 'fake_qos_policy', } result = na_utils.map_aqos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) @ddt.data({'expectedIOPSperGiB': '528', 'peakIOPSperGiB': '128'}, {'expectedIOPSperGiB': '528'}) def test_map_aqos_spec_error(self, qos_spec): mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' self.assertRaises(exception.Invalid, na_utils.map_aqos_spec, qos_spec, fake.VOLUME) def test_is_qos_adaptive_adaptive_spec(self): aqos_spec = fake.ADAPTIVE_QOS_SPEC self.assertTrue(na_utils.is_qos_adaptive(aqos_spec)) def test_is_qos_adaptive_weirdly_cased_adaptive_spec(self): aqos_spec = {'expecTEDiopsPERgib': '128IOPS/GB'} self.assertTrue(na_utils.is_qos_adaptive(aqos_spec)) def test_is_qos_adaptive_non_adaptive_spec(self): qos_spec = fake.QOS_SPEC self.assertFalse(na_utils.is_qos_adaptive(qos_spec)) def test_is_qos_policy_group_spec_adaptive_adaptive_spec(self): aqos_spec = { 'spec': { 'expected_iops': '128IOPS/GB', 'peak_iops': '512IOPS/GB', 'expected_iops_allocation': 'used-space', 'absolute_min_iops': '75IOPS', 'block_size': 'ANY', 'policy_name': 'fake_policy_name', } } self.assertTrue(na_utils.is_qos_policy_group_spec_adaptive(aqos_spec)) def test_is_qos_policy_group_spec_adaptive_none(self): qos_spec = None self.assertFalse(na_utils.is_qos_policy_group_spec_adaptive(qos_spec)) def test_is_qos_policy_group_spec_adaptive_legacy(self): qos_spec = { 'legacy': fake.LEGACY_QOS, } self.assertFalse(na_utils.is_qos_policy_group_spec_adaptive(qos_spec)) def test_is_qos_policy_group_spec_adaptive_non_adaptive_spec(self): qos_spec = { 'spec': { 'max_throughput': '21834289B/s', 'policy_name': 'fake_policy_name', } } self.assertFalse(na_utils.is_qos_policy_group_spec_adaptive(qos_spec)) def test_policy_group_qos_spec_is_adaptive_invalid_spec(self): qos_spec = { 'spec': { 'max_flops': '512', 'policy_name': 'fake_policy_name', } } self.assertFalse(na_utils.is_qos_policy_group_spec_adaptive(qos_spec)) def test_map_dict_to_lower(self): original = {'UPperKey': 'Value'} expected = {'upperkey': 'Value'} result = na_utils.map_dict_to_lower(original) self.assertEqual(expected, result) def test_get_qos_policy_group_name(self): expected = 'openstack-%s' % fake.VOLUME_ID result = na_utils.get_qos_policy_group_name(fake.VOLUME) self.assertEqual(expected, result) def test_get_qos_policy_group_name_no_id(self): delattr(fake.VOLUME, '_obj_id') try: result = na_utils.get_qos_policy_group_name(fake.VOLUME) finally: fake.VOLUME._obj_id = fake.VOLUME_ID self.assertIsNone(result) def test_get_qos_policy_group_name_migrated_volume(self): fake.VOLUME._name_id = fake.VOLUME_ID try: expected = 'openstack-' + fake.VOLUME.name_id result = na_utils.get_qos_policy_group_name(fake.VOLUME) finally: fake.VOLUME._name_id = None self.assertEqual(expected, result) def test_get_qos_policy_group_name_from_info(self): expected = 'openstack-%s' % fake.VOLUME_ID result = na_utils.get_qos_policy_group_name_from_info( fake.QOS_POLICY_GROUP_INFO) self.assertEqual(expected, result) def test_get_qos_policy_group_name_from_info_no_info(self): result = na_utils.get_qos_policy_group_name_from_info(None) self.assertIsNone(result) def test_get_qos_policy_group_name_from_legacy_info(self): expected = fake.QOS_POLICY_GROUP_NAME result = na_utils.get_qos_policy_group_name_from_info( fake.LEGACY_QOS_POLICY_GROUP_INFO) self.assertEqual(expected, result) def test_get_qos_policy_group_name_from_spec_info(self): expected = 'openstack-%s' % fake.VOLUME_ID result = na_utils.get_qos_policy_group_name_from_info( fake.QOS_POLICY_GROUP_INFO) self.assertEqual(expected, result) def test_get_qos_policy_group_name_from_none_qos_info(self): expected = None result = na_utils.get_qos_policy_group_name_from_info( fake.QOS_POLICY_GROUP_INFO_NONE) self.assertEqual(expected, result) def test_get_valid_qos_policy_group_info_exception_path(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.side_effect = exception.VolumeTypeNotFound expected = fake.QOS_POLICY_GROUP_INFO_NONE result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(expected, result) def test_get_valid_qos_policy_group_info_volume_type_none(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = None expected = fake.QOS_POLICY_GROUP_INFO_NONE result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(expected, result) def test_get_valid_qos_policy_group_info_no_info(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = fake.VOLUME_TYPE mock_get_legacy_qos_policy = self.mock_object(na_utils, 'get_legacy_qos_policy') mock_get_legacy_qos_policy.return_value = None mock_get_valid_qos_spec_from_volume_type = self.mock_object( na_utils, 'get_valid_backend_qos_spec_from_volume_type') mock_get_valid_qos_spec_from_volume_type.return_value = None expected = fake.QOS_POLICY_GROUP_INFO_NONE result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(expected, result) def test_get_valid_legacy_qos_policy_group_info(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = fake.VOLUME_TYPE mock_get_legacy_qos_policy = self.mock_object(na_utils, 'get_legacy_qos_policy') mock_get_legacy_qos_policy.return_value = fake.LEGACY_QOS mock_get_valid_qos_spec_from_volume_type = self.mock_object( na_utils, 'get_valid_backend_qos_spec_from_volume_type') mock_get_valid_qos_spec_from_volume_type.return_value = None result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(fake.LEGACY_QOS_POLICY_GROUP_INFO, result) def test_get_valid_spec_qos_policy_group_info(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = fake.VOLUME_TYPE mock_get_legacy_qos_policy = self.mock_object(na_utils, 'get_legacy_qos_policy') mock_get_legacy_qos_policy.return_value = None mock_get_valid_qos_spec_from_volume_type = self.mock_object( na_utils, 'get_valid_backend_qos_spec_from_volume_type') mock_get_valid_qos_spec_from_volume_type.return_value =\ fake.QOS_POLICY_GROUP_SPEC result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result) def test_get_valid_backend_qos_spec_from_volume_type_no_spec(self): mock_get_spec = self.mock_object( na_utils, 'get_backend_qos_spec_from_volume_type') mock_get_spec.return_value = None mock_map_qos_spec = self.mock_object( na_utils, 'map_qos_spec') mock_map_aqos_spec = self.mock_object( na_utils, 'map_aqos_spec') result = na_utils.get_valid_backend_qos_spec_from_volume_type( fake.VOLUME, fake.VOLUME_TYPE) self.assertIsNone(result) mock_map_qos_spec.assert_not_called() mock_map_aqos_spec.assert_not_called() def test_get_valid_backend_qos_spec_from_volume_type(self): mock_get_spec = self.mock_object( na_utils, 'get_backend_qos_spec_from_volume_type') mock_get_spec.return_value = fake.QOS_SPEC mock_map_aqos_spec = self.mock_object( na_utils, 'map_aqos_spec') result = na_utils.get_valid_backend_qos_spec_from_volume_type( fake.VOLUME, fake.VOLUME_TYPE) self.assertEqual(fake.QOS_POLICY_GROUP_SPEC, result) mock_map_aqos_spec.assert_not_called() def test_get_valid_backend_qos_spec_from_volume_type_adaptive(self): mock_get_spec = self.mock_object( na_utils, 'get_backend_qos_spec_from_volume_type') mock_get_spec.return_value = fake.ADAPTIVE_QOS_SPEC mock_map_qos_spec = self.mock_object( na_utils, 'map_qos_spec') result = na_utils.get_valid_backend_qos_spec_from_volume_type( fake.VOLUME, fake.VOLUME_TYPE) self.assertEqual(fake.ADAPTIVE_QOS_POLICY_GROUP_SPEC, result) mock_map_qos_spec.assert_not_called() def test_get_backend_qos_spec_from_volume_type_no_qos_specs_id(self): volume_type = copy.deepcopy(fake.VOLUME_TYPE) del volume_type['qos_specs_id'] mock_get_context = self.mock_object(context, 'get_admin_context') result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertIsNone(result) mock_get_context.assert_not_called() def test_get_backend_qos_spec_from_volume_type_no_qos_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = None result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertIsNone(result) def test_get_backend_qos_spec_from_volume_type_with_frontend_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = fake.OUTER_FRONTEND_QOS_SPEC result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertIsNone(result) def test_get_backend_qos_spec_from_volume_type_with_backend_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = fake.OUTER_BACKEND_QOS_SPEC result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertEqual(fake.QOS_SPEC, result) def test_get_backend_qos_spec_from_volume_type_with_both_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = fake.OUTER_BOTH_QOS_SPEC result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertEqual(fake.QOS_SPEC, result) def test_check_for_invalid_qos_spec_combination_legacy(self): na_utils.check_for_invalid_qos_spec_combination( fake.LEGACY_QOS_POLICY_GROUP_INFO, fake.VOLUME_TYPE) def test_check_for_invalid_qos_spec_combination_spec(self): na_utils.check_for_invalid_qos_spec_combination( fake.QOS_POLICY_GROUP_INFO, fake.VOLUME_TYPE) def test_check_for_invalid_qos_spec_combination_legacy_and_spec(self): self.assertRaises(exception.Invalid, na_utils.check_for_invalid_qos_spec_combination, fake.INVALID_QOS_POLICY_GROUP_INFO_LEGACY_AND_SPEC, fake.VOLUME_TYPE) def test_get_legacy_qos_policy(self): extra_specs = fake.LEGACY_EXTRA_SPECS expected = {'policy_name': fake.QOS_POLICY_GROUP_NAME} result = na_utils.get_legacy_qos_policy(extra_specs) self.assertEqual(expected, result) def test_get_legacy_qos_policy_no_policy_name(self): extra_specs = fake.EXTRA_SPECS result = na_utils.get_legacy_qos_policy(extra_specs) self.assertIsNone(result) @ddt.data(("192.168.99.24:/fake/export/path", "192.168.99.24", "/fake/export/path"), ("127.0.0.1:/", "127.0.0.1", "/"), ("[f180::30d9]:/path_to-export/3.1/this folder", "f180::30d9", "/path_to-export/3.1/this folder"), ("[::]:/", "::", "/"), ("[2001:db8::1]:/fake_export", "2001:db8::1", "/fake_export")) @ddt.unpack def test_get_export_host_junction_path(self, share, host, junction_path): result_host, result_path = na_utils.get_export_host_junction_path( share) self.assertEqual(host, result_host) self.assertEqual(junction_path, result_path) @ddt.data("192.14.21.0/wrong_export", "192.14.21.0:8080:/wrong_export" "2001:db8::1:/wrong_export", "[2001:db8::1:/wrong_export", "2001:db8::1]:/wrong_export") def test_get_export_host_junction_path_with_invalid_exports(self, share): self.assertRaises(na_utils.NetAppDriverException, na_utils.get_export_host_junction_path, share) @ddt.data(True, False) def test_qos_min_feature_name(self, is_nfs): name = 'node' feature_name = na_utils.qos_min_feature_name(is_nfs, name) if is_nfs: self.assertEqual('QOS_MIN_NFS_' + name, feature_name) else: self.assertEqual('QOS_MIN_BLOCK_' + name, feature_name) self.assertEqual('QOS_MIN_NFS_', na_utils.qos_min_feature_name(True, None)) self.assertEqual('QOS_MIN_BLOCK_', na_utils.qos_min_feature_name(False, None)) def test__is_multiattach_to_host_no_attachments(self): volume = copy.deepcopy(fakes.test_volume) volume.multiattach = True volume.volume_attachment = [] result = na_utils.is_multiattach_to_host(volume, {'host': fakes.HOST_NAME}) self.assertFalse(result) def test__is_multiattach_to_host_multiattach_disabled(self): volume = copy.deepcopy(fakes.test_volume) result = na_utils.is_multiattach_to_host(volume, {'host': fakes.HOST_NAME}) self.assertFalse(result) def test__is_multiattach_to_host_single_attachment(self): volume = copy.deepcopy(fakes.test_volume) volume.multiattach = True volume.volume_attachment = [ {'attach_status': fakes.ATTACHED, 'attached_host': fakes.HOST_NAME} ] result = na_utils.is_multiattach_to_host(volume, fakes.FC_CONNECTOR) self.assertFalse(result) def test__is_multiattach_to_host_on_same_host(self): volume = copy.deepcopy(fakes.test_volume) volume.multiattach = True volume.volume_attachment = [ {'attach_status': fakes.ATTACHED, 'attached_host': fakes.HOST_NAME }, {'attach_status': fakes.ATTACHED, 'attached_host': fakes.HOST_NAME } ] result = na_utils.is_multiattach_to_host(volume, {'host': fakes.HOST_NAME}) self.assertTrue(result) def test__is_multiattach_to_host_on_different_host(self): volume = copy.deepcopy(fakes.test_volume) volume.multiattach = True volume.volume_attachment = [ {'attach_status': fakes.ATTACHED, 'attached_host': "fake_host1"}, {'attach_status': fakes.ATTACHED, 'attached_host': "fake_host2"}, ] result = na_utils.is_multiattach_to_host(volume, {'host': "fake_host1"}) self.assertFalse(result) class OpenStackInfoTestCase(test.TestCase): UNKNOWN_VERSION = 'unknown version' UNKNOWN_RELEASE = 'unknown release' UNKNOWN_VENDOR = 'unknown vendor' UNKNOWN_PLATFORM = 'unknown platform' VERSION_STRING_RET_VAL = 'fake_version_1' RELEASE_STRING_RET_VAL = 'fake_release_1' PLATFORM_RET_VAL = 'fake_platform_1' VERSION_INFO_VERSION = 'fake_version_2' VERSION_INFO_RELEASE = 'fake_release_2' RPM_INFO_VERSION = 'fake_version_3' RPM_INFO_RELEASE = 'fake_release_3' RPM_INFO_VENDOR = 'fake vendor 3' PUTILS_RPM_RET_VAL = ('fake_version_3 fake_release_3 fake vendor 3', '') NO_PKG_FOUND = ('', 'whatever') PUTILS_DPKG_RET_VAL = ('epoch:upstream_version-debian_revision', '') DEB_RLS = 'upstream_version-debian_revision' DEB_VENDOR = 'debian_revision' def test_openstack_info_init(self): info = na_utils.OpenStackInfo() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(return_value=VERSION_STRING_RET_VAL)) def test_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.VERSION_STRING_RET_VAL, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(side_effect=Exception)) def test_xcption_in_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(return_value=RELEASE_STRING_RET_VAL)) def test_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.RELEASE_STRING_RET_VAL, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(side_effect=Exception)) def test_xcption_in_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(return_value=PLATFORM_RET_VAL)) def test_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.PLATFORM_RET_VAL, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(side_effect=Exception)) def test_xcption_in_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=VERSION_INFO_RELEASE)) def test_update_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.VERSION_INFO_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value='')) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=None)) def test_no_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(side_effect=Exception)) def test_xcption_in_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_RPM_RET_VAL)) def test_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.RPM_INFO_VERSION, info._version) self.assertEqual(self.RPM_INFO_RELEASE, info._release) self.assertEqual(self.RPM_INFO_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_rpm_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_xcption_in_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_DPKG_RET_VAL)) def test_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.DEB_RLS, info._release) self.assertEqual(self.DEB_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_dpkg_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_xcption_in_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=True)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertFalse(mock_updt_from_dpkg.called) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=False)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_not_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertTrue(mock_updt_from_dpkg.called) @ddt.ddt class FeaturesTestCase(test.TestCase): def setUp(self): super(FeaturesTestCase, self).setUp() self.features = na_utils.Features() def test_init(self): self.assertSetEqual(set(), self.features.defined_features) def test_add_feature_default(self): self.features.add_feature('FEATURE_1') self.assertTrue(self.features.FEATURE_1.supported) self.assertIn('FEATURE_1', self.features.defined_features) @ddt.data(True, False) def test_add_feature(self, value): self.features.add_feature('FEATURE_2', value) self.assertEqual(value, bool(self.features.FEATURE_2)) self.assertEqual(value, self.features.FEATURE_2.supported) self.assertIsNone(self.features.FEATURE_2.minimum_version) self.assertIn('FEATURE_2', self.features.defined_features) @ddt.data((True, '1'), (False, 2), (False, None), (True, None)) @ddt.unpack def test_add_feature_min_version(self, enabled, min_version): self.features.add_feature('FEATURE_2', enabled, min_version=min_version) self.assertEqual(enabled, bool(self.features.FEATURE_2)) self.assertEqual(enabled, self.features.FEATURE_2.supported) self.assertEqual(min_version, self.features.FEATURE_2.minimum_version) self.assertIn('FEATURE_2', self.features.defined_features) @ddt.data('True', 'False', 0, 1, 1.0, None, [], {}, (True,)) def test_add_feature_type_error(self, value): self.assertRaises(TypeError, self.features.add_feature, 'FEATURE_3', value) self.assertNotIn('FEATURE_3', self.features.defined_features) def test_get_attr_missing(self): self.assertRaises(AttributeError, getattr, self.features, 'FEATURE_4') @ddt.ddt class BitSetTestCase(test.TestCase): def test_default(self): self.assertEqual(na_utils.BitSet(0), na_utils.BitSet()) def test_set(self): bitset = na_utils.BitSet(0) bitset.set(16) self.assertEqual(na_utils.BitSet(1 << 16), bitset) def test_unset(self): bitset = na_utils.BitSet(1 << 16) bitset.unset(16) self.assertEqual(na_utils.BitSet(0), bitset) def test_is_set(self): bitset = na_utils.BitSet(1 << 16) self.assertTrue(bool(bitset.is_set(16))) def test_not_equal(self): set1 = na_utils.BitSet(1 << 15) set2 = na_utils.BitSet(1 << 16) self.assertNotEqual(set1, set2) def test_repr(self): raw_val = 1 << 16 actual = repr(na_utils.BitSet(raw_val)) expected = str(raw_val) self.assertEqual(actual, expected) def test_str(self): raw_val = 1 << 16 actual = str(na_utils.BitSet(raw_val)) expected = bin(raw_val) self.assertEqual(actual, expected) def test_int(self): val = 1 << 16 actual = int(int(na_utils.BitSet(val))) self.assertEqual(val, actual) def test_and(self): actual = na_utils.BitSet(1 << 16 | 1 << 15) actual &= 1 << 16 self.assertEqual(na_utils.BitSet(1 << 16), actual) def test_or(self): actual = na_utils.BitSet() actual |= 1 << 16 self.assertEqual(na_utils.BitSet(1 << 16), actual) def test_invert(self): actual = na_utils.BitSet(1 << 16) actual = ~actual self.assertEqual(~(1 << 16), actual) def test_xor(self): actual = na_utils.BitSet(1 << 16) actual ^= 1 << 16 self.assertEqual(na_utils.BitSet(), actual) def test_lshift(self): actual = na_utils.BitSet(1) actual <<= 16 self.assertEqual(na_utils.BitSet(1 << 16), actual) def test_rshift(self): actual = na_utils.BitSet(1 << 16) actual >>= 16 self.assertEqual(na_utils.BitSet(1), actual) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.2991202 cinder-27.0.0/cinder/tests/unit/volume/drivers/nexenta/0000775000175000017500000000000000000000000023103 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nexenta/__init__.py0000664000175000017500000000000000000000000025202 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nexenta/test_nexenta.py0000664000175000017500000006651200000000000026170 0ustar00zuulzuul00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for OpenStack Cinder volume driver.""" from unittest import mock from unittest.mock import patch from oslo_utils import units from cinder import context from cinder import db from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta import iscsi from cinder.volume.drivers.nexenta import jsonrpc from cinder.volume.drivers.nexenta import nfs from cinder.volume.drivers.nexenta import utils class TestNexentaISCSIDriver(test.TestCase): TEST_VOLUME_NAME = 'volume1' TEST_VOLUME_NAME2 = 'volume2' TEST_VOLUME_NAME3 = 'volume3' TEST_SNAPSHOT_NAME = 'snapshot1' TEST_VOLUME_REF = { 'name': TEST_VOLUME_NAME, 'size': 1, 'id': '1', 'status': 'available' } TEST_VOLUME_REF2 = { 'name': TEST_VOLUME_NAME2, 'size': 1, 'id': '2', 'status': 'in-use' } TEST_VOLUME_REF3 = { 'name': TEST_VOLUME_NAME3, 'size': 3, 'id': '3', 'status': 'in-use' } TEST_SNAPSHOT_REF = { 'name': TEST_SNAPSHOT_NAME, 'volume_name': TEST_VOLUME_NAME, 'volume_size': 1, } def __init__(self, method): super(TestNexentaISCSIDriver, self).__init__(method) def setUp(self): super(TestNexentaISCSIDriver, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.ctxt = context.get_admin_context() self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_host = '1.1.1.1' self.cfg.nexenta_user = 'admin' self.cfg.nexenta_password = 'nexenta' self.cfg.nexenta_volume = 'cinder' self.cfg.nexenta_rest_port = 2000 self.cfg.nexenta_rest_protocol = 'http' self.cfg.nexenta_iscsi_target_portal_port = 3260 self.cfg.nexenta_target_prefix = 'iqn:' self.cfg.nexenta_target_group_prefix = 'cinder/' self.cfg.nexenta_blocksize = '8K' self.cfg.nexenta_sparse = True self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.nexenta_rrmgr_compression = 1 self.cfg.nexenta_rrmgr_tcp_buf_size = 1024 self.cfg.nexenta_rrmgr_connections = 2 self.cfg.reserved_percentage = 20 self.nms_mock = mock.Mock() for mod in ['volume', 'zvol', 'iscsitarget', 'appliance', 'stmf', 'scsidisk', 'snapshot']: setattr(self.nms_mock, mod, mock.Mock()) self.mock_object(jsonrpc, 'NexentaJSONProxy', return_value=self.nms_mock) self.drv = iscsi.NexentaISCSIDriver( configuration=self.cfg) self.drv.db = db self.drv.do_setup(self.ctxt) def test_check_do_setup(self): self.assertEqual('http', self.drv.nms_protocol) def test_check_for_setup_error(self): self.nms_mock.volume.object_exists.return_value = False self.assertRaises(LookupError, self.drv.check_for_setup_error) def test_local_path(self): self.assertRaises(NotImplementedError, self.drv.local_path, '') def test_create_volume(self): self.drv.create_volume(self.TEST_VOLUME_REF) self.nms_mock.zvol.create.assert_called_with( 'cinder/%s' % self.TEST_VOLUME_REF['name'], '1G', self.cfg.nexenta_blocksize, self.cfg.nexenta_sparse) def test_delete_volume(self): self.drv._collect_garbage = lambda vol: vol self.nms_mock.zvol.get_child_props.return_value = ( {'origin': 'cinder/volume0@snapshot'}) self.drv.delete_volume(self.TEST_VOLUME_REF) self.nms_mock.zvol.get_child_props.assert_called_with( 'cinder/volume1', 'origin') self.nms_mock.zvol.destroy.assert_called_with( 'cinder/volume1', '') self.nms_mock.zvol.get_child_props.assert_called_with( 'cinder/volume1', 'origin') self.nms_mock.zvol.destroy.assert_called_with('cinder/volume1', '') self.drv.delete_volume(self.TEST_VOLUME_REF) self.nms_mock.zvol.get_child_props.assert_called_with( 'cinder/volume1', 'origin') def test_create_cloned_volume(self): vol = self.TEST_VOLUME_REF2 src_vref = self.TEST_VOLUME_REF snapshot = { 'volume_name': src_vref['name'], 'name': 'cinder-clone-snapshot-%s' % vol['id'], } self.drv.create_cloned_volume(vol, src_vref) self.nms_mock.zvol.create_snapshot.assert_called_with( 'cinder/%s' % src_vref['name'], snapshot['name'], '') self.nms_mock.zvol.clone.assert_called_with( 'cinder/%s@%s' % (src_vref['name'], snapshot['name']), 'cinder/%s' % vol['name']) def test_migrate_volume(self): self.drv._collect_garbage = lambda vol: vol volume = self.TEST_VOLUME_REF host = { 'capabilities': { 'vendor_name': 'Nexenta', 'location_info': 'NexentaISCSIDriver:1.1.1.1:cinder', 'free_capacity_gb': 1, 'iscsi_target_portal_port': 3260, 'nms_url': 'http://admin:password@1.1.1.1:2000' } } snapshot = { 'volume_name': volume['name'], 'name': 'cinder-migrate-snapshot-%s' % volume['id'], } volume_name = 'cinder/%s' % volume['name'] self.nms_mock.appliance.ssh_list_bindings.return_value = ( {'0': [True, True, True, '1.1.1.1']}) self.nms_mock.zvol.get_child_props.return_value = None self.drv.migrate_volume(None, volume, host) self.nms_mock.zvol.create_snapshot.assert_called_with( 'cinder/%s' % volume['name'], snapshot['name'], '') src = '%(volume)s/%(zvol)s@%(snapshot)s' % { 'volume': 'cinder', 'zvol': volume['name'], 'snapshot': snapshot['name'] } dst = '1.1.1.1:cinder' cmd = ' '.join(['rrmgr -s zfs -c 1 -q -e -w 1024 -n 2', src, dst]) self.nms_mock.appliance.execute.assert_called_with(cmd) snapshot_name = 'cinder/%(volume)s@%(snapshot)s' % { 'volume': volume['name'], 'snapshot': snapshot['name'] } self.nms_mock.snapshot.destroy.assert_called_with(snapshot_name, '') self.nms_mock.zvol.destroy.assert_called_with(volume_name, '') self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/%(volume)s@%(snapshot)s' % { 'volume': volume['name'], 'snapshot': snapshot['name'] }, '') def test_create_snapshot(self): self.drv.create_snapshot(self.TEST_SNAPSHOT_REF) self.nms_mock.zvol.create_snapshot.assert_called_with( 'cinder/volume1', 'snapshot1', '') def test_create_volume_from_snapshot(self): self._create_volume_db_entry() self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF3, self.TEST_SNAPSHOT_REF) self.nms_mock.zvol.clone.assert_called_with( 'cinder/volume1@snapshot1', 'cinder/volume3') self.nms_mock.zvol.set_child_prop.assert_called_with( 'cinder/volume3', 'volsize', '3G') def test_delete_snapshot(self): self._create_volume_db_entry() self.drv._collect_garbage = lambda vol: vol self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/volume1@snapshot1', '') # Check that exception not raised if snapshot does not exist self.drv.delete_snapshot(self.TEST_SNAPSHOT_REF) self.nms_mock.snapshot.destroy.side_effect = ( utils.NexentaException('does not exist')) self.nms_mock.snapshot.destroy.assert_called_with( 'cinder/volume1@snapshot1', '') def _mock_all_export_methods(self, fail=False): self.assertTrue(self.nms_mock.stmf.list_targets.called) self.nms_mock.iscsitarget.create_target.assert_called_with( {'target_name': 'iqn:1.1.1.1-0'}) self.nms_mock.stmf.list_targetgroups() zvol_name = 'cinder/volume1' self.nms_mock.stmf.create_targetgroup.assert_called_with( 'cinder/1.1.1.1-0') self.nms_mock.stmf.list_targetgroup_members.assert_called_with( 'cinder/1.1.1.1-0') self.nms_mock.scsidisk.lu_exists.assert_called_with(zvol_name) self.nms_mock.scsidisk.create_lu.assert_called_with(zvol_name, {}) def _stub_all_export_methods(self): self.nms_mock.scsidisk.lu_exists.return_value = False self.nms_mock.scsidisk.lu_shared.side_effect = ( utils.NexentaException(['does not exist for zvol'])) self.nms_mock.scsidisk.create_lu.return_value = {'lun': 0} self.nms_mock.stmf.list_targets.return_value = [] self.nms_mock.stmf.list_targetgroups.return_value = [] self.nms_mock.stmf.list_targetgroup_members.return_value = [] self.nms_mock._get_target_name.return_value = ['iqn:1.1.1.1-0'] self.nms_mock.iscsitarget.create_targetgroup.return_value = ({ 'target_name': 'cinder/1.1.1.1-0'}) self.nms_mock.scsidisk.add_lun_mapping_entry.return_value = {'lun': 0} def test_create_export(self): self._stub_all_export_methods() retval = self.drv.create_export({}, self.TEST_VOLUME_REF, None) self._mock_all_export_methods() location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { 'host': self.cfg.nexenta_host, 'port': self.cfg.nexenta_iscsi_target_portal_port, 'name': 'iqn:1.1.1.1-0', 'lun': '0' } self.assertEqual({'provider_location': location}, retval) def test_ensure_export(self): self._stub_all_export_methods() self.drv.ensure_export({}, self.TEST_VOLUME_REF) self._mock_all_export_methods() def test_remove_export(self): self.nms_mock.stmf.list_targets.return_value = ['iqn:1.1.1.1-0'] self.nms_mock.stmf.list_targetgroups.return_value = ( ['cinder/1.1.1.1-0']) self.nms_mock.stmf.list_targetgroup_members.return_value = ( ['iqn:1.1.1.1-0']) self.drv.remove_export({}, self.TEST_VOLUME_REF) self.assertTrue(self.nms_mock.stmf.list_targets.called) self.assertTrue(self.nms_mock.stmf.list_targetgroups.called) self.nms_mock.scsidisk.delete_lu.assert_called_with('cinder/volume1') def test_get_volume_stats(self): stats = {'size': '5368709120G', 'used': '5368709120G', 'available': '5368709120G', 'health': 'ONLINE'} self.nms_mock.volume.get_child_props.return_value = stats stats = self.drv.get_volume_stats(True) self.assertEqual('iSCSI', stats['storage_protocol']) self.assertEqual(5368709120.0, stats['total_capacity_gb']) self.assertEqual(5368709120.0, stats['free_capacity_gb']) self.assertEqual(20, stats['reserved_percentage']) self.assertFalse(stats['QoS_support']) def test_collect_garbage__snapshot(self): name = 'cinder/v1@s1' self.drv._mark_as_garbage(name) self.nms_mock.zvol.get_child_props.return_value = None self.drv._collect_garbage(name) self.nms_mock.snapshot.destroy.assert_called_with(name, '') self.assertNotIn(name, self.drv._needless_objects) def test_collect_garbage__volume(self): name = 'cinder/v1' self.drv._mark_as_garbage(name) self.nms_mock.zvol.get_child_props.return_value = None self.drv._collect_garbage(name) self.nms_mock.zvol.destroy.assert_called_with(name, '') self.assertNotIn(name, self.drv._needless_objects) def _create_volume_db_entry(self): vol = { 'id': '1', 'size': 1, 'status': 'available', 'provider_location': self.TEST_VOLUME_NAME, 'volume_type_id': self.vt['id'] } return db.volume_create(self.ctxt, vol)['id'] class TestNexentaNfsDriver(test.TestCase): TEST_VOLUME_NAME = 'volume1' TEST_VOLUME_NAME2 = 'volume2' TEST_VOLUME_NAME3 = 'volume3' TEST_SNAPSHOT_NAME = 'snapshot1' TEST_VOLUME_REF = { 'name': TEST_VOLUME_NAME, 'size': 1, 'id': '1', 'status': 'available' } TEST_VOLUME_REF2 = { 'name': TEST_VOLUME_NAME2, 'size': 2, 'id': '2', 'status': 'in-use' } TEST_VOLUME_REF3 = { 'name': TEST_VOLUME_NAME2, 'id': '2', 'status': 'in-use' } TEST_SNAPSHOT_REF = { 'name': TEST_SNAPSHOT_NAME, 'volume_name': TEST_VOLUME_NAME, 'volume_size': 1, 'volume_id': 1 } TEST_EXPORT1 = 'host1:/volumes/stack/share' TEST_NMS1 = 'http://admin:nexenta@host1:2000' TEST_EXPORT2 = 'host2:/volumes/stack/share' TEST_NMS2 = 'http://admin:nexenta@host2:2000' TEST_EXPORT2_OPTIONS = '-o intr' TEST_FILE_NAME = 'test.txt' TEST_SHARES_CONFIG_FILE = '/etc/cinder/nexenta-shares.conf' TEST_SHARE_SVC = 'svc:/network/nfs/server:default' TEST_SHARE_OPTS = { 'read_only': '', 'read_write': '*', 'recursive': 'true', 'anonymous_rw': 'true', 'extra_options': 'anon=0', 'root': 'nobody' } def _create_volume_db_entry(self): vol = { 'id': '1', 'size': 1, 'status': 'available', 'provider_location': self.TEST_EXPORT1, 'volume_type_id': self.vt['id'] } self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} return db.volume_create(self.ctxt, vol)['id'] def setUp(self): super(TestNexentaNfsDriver, self).setUp() self.ctxt = context.get_admin_context() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_shares_config = None self.cfg.nexenta_mount_point_base = '$state_path/mnt' self.cfg.nexenta_sparsed_volumes = True self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.nexenta_rrmgr_compression = 1 self.cfg.nexenta_rrmgr_tcp_buf_size = 1024 self.cfg.nexenta_rrmgr_connections = 2 self.cfg.nfs_mount_point_base = '/mnt/test' self.cfg.nfs_mount_options = None self.cfg.nas_mount_options = None self.cfg.nexenta_nms_cache_volroot = False self.cfg.nfs_mount_attempts = 3 self.cfg.reserved_percentage = 20 self.cfg.max_over_subscription_ratio = 20.0 self.nms_mock = mock.Mock() for mod in ('appliance', 'folder', 'server', 'volume', 'netstorsvc', 'snapshot', 'netsvc'): setattr(self.nms_mock, mod, mock.Mock()) self.nms_mock.__hash__ = lambda *_, **__: 1 self.mock_object(jsonrpc, 'NexentaJSONProxy', return_value=self.nms_mock) self.drv = nfs.NexentaNfsDriver(configuration=self.cfg) self.drv.shares = {} self.drv.share2nms = {} def test_check_for_setup_error(self): self.drv.share2nms = { 'host1:/volumes/stack/share': self.nms_mock } self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.volume.object_exists.return_value = True self.nms_mock.folder.object_exists.return_value = True share_opts = { 'read_write': '*', 'read_only': '', 'root': 'nobody', 'extra_options': 'anon=0', 'recursive': 'true', 'anonymous_rw': 'true', } self.nms_mock.netstorsvc.get_shared_folders.return_value = '' self.nms_mock.folder.get_child_props.return_value = { 'available': 1, 'used': 1} self.drv.check_for_setup_error() self.nms_mock.netstorsvc.share_folder.assert_called_with( 'svc:/network/nfs/server:default', 'stack/share', share_opts) self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.volume.object_exists.return_value = False self.assertRaises(LookupError, self.drv.check_for_setup_error) self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.volume.object_exists.return_value = True self.nms_mock.folder.object_exists.return_value = False self.assertRaises(LookupError, self.drv.check_for_setup_error) def test_initialize_connection(self): self.drv.shares = { self.TEST_EXPORT1: None } volume = { 'provider_location': self.TEST_EXPORT1, 'name': 'volume' } result = self.drv.initialize_connection(volume, None) self.assertEqual('%s/volume' % self.TEST_EXPORT1, result['data']['export']) def test_do_create_volume(self): volume = { 'provider_location': self.TEST_EXPORT1, 'size': 1, 'name': 'volume-1' } self.drv.shares = {self.TEST_EXPORT1: None} self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} compression = self.cfg.nexenta_dataset_compression self.nms_mock.folder.get_child_props.return_value = { 'available': 1, 'used': 1} self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.netsvc.get_confopts('svc:/network/nfs/server:default', 'configure').AndReturn({ 'nfs_server_versmax': { 'current': u'3'}}) self.nms_mock.netsvc.get_confopts.return_value = { 'nfs_server_versmax': {'current': 4}} self.nms_mock._ensure_share_mounted.return_value = True self.drv._do_create_volume(volume) self.nms_mock.folder.create_with_props.assert_called_with( 'stack', 'share/volume-1', {'compression': compression}) self.nms_mock.netstorsvc.share_folder.assert_called_with( self.TEST_SHARE_SVC, 'stack/share/volume-1', self.TEST_SHARE_OPTS) mock_chmod = self.nms_mock.appliance.execute mock_chmod.assert_called_with( 'chmod ugo+rw /volumes/stack/share/volume-1/volume') mock_truncate = self.nms_mock.appliance.execute mock_truncate.side_effect = utils.NexentaException( 'fake_exception') self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.folder.get_child_props.return_value = { 'available': 1, 'used': 1} self.assertRaises(utils.NexentaException, self.drv._do_create_volume, volume) def test_create_sparsed_file(self): self.drv._create_sparsed_file(self.nms_mock, '/tmp/path', 1) self.nms_mock.appliance.execute.assert_called_with( 'truncate --size 1G /tmp/path') def test_create_regular_file(self): self.drv._create_regular_file(self.nms_mock, '/tmp/path', 1) self.nms_mock.appliance.execute.assert_called_with( 'dd if=/dev/zero of=/tmp/path bs=1M count=1024') @patch('cinder.volume.drivers.remotefs.' 'RemoteFSDriver._ensure_shares_mounted') @patch('cinder.volume.drivers.nexenta.nfs.' 'NexentaNfsDriver._get_volroot') @patch('cinder.volume.drivers.nexenta.nfs.' 'NexentaNfsDriver._get_nfs_server_version') def test_create_larger_volume_from_snap(self, version, volroot, ensure): version.return_value = 4 volroot.return_value = 'volroot' self._create_volume_db_entry() self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF2, self.TEST_SNAPSHOT_REF) self.nms_mock.appliance.execute.assert_called_with( 'truncate --size 2G /volumes/stack/share/volume2/volume') @patch('cinder.volume.drivers.remotefs.' 'RemoteFSDriver._ensure_shares_mounted') @patch('cinder.volume.drivers.nexenta.nfs.' 'NexentaNfsDriver._get_volroot') @patch('cinder.volume.drivers.nexenta.nfs.' 'NexentaNfsDriver._get_nfs_server_version') def test_create_volume_from_snapshot(self, version, volroot, ensure): version.return_value = 4 volroot.return_value = 'volroot' self._create_volume_db_entry() self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF, self.TEST_SNAPSHOT_REF) self.nms_mock.appliance.execute.assert_not_called() self.drv.create_volume_from_snapshot(self.TEST_VOLUME_REF3, self.TEST_SNAPSHOT_REF) self.nms_mock.appliance.execute.assert_not_called() def test_set_rw_permissions_for_all(self): path = '/tmp/path' self.drv._set_rw_permissions_for_all(self.nms_mock, path) self.nms_mock.appliance.execute.assert_called_with( 'chmod ugo+rw %s' % path) def test_local_path(self): volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'} path = self.drv.local_path(volume) self.assertEqual( '$state_path/mnt/b3f660847a52b29ac330d8555e4ad669/volume-1/volume', path ) def test_remote_path(self): volume = {'provider_location': self.TEST_EXPORT1, 'name': 'volume-1'} path = self.drv.remote_path(volume) self.assertEqual('/volumes/stack/share/volume-1/volume', path) def test_share_folder(self): self.drv._share_folder(self.nms_mock, 'stack', 'share/folder') path = 'stack/share/folder' self.nms_mock.netstorsvc.share_folder.assert_called_with( self.TEST_SHARE_SVC, path, self.TEST_SHARE_OPTS) def test_load_shares_config(self): self.drv.configuration.nfs_shares_config = ( self.TEST_SHARES_CONFIG_FILE) config_data = [ '%s %s' % (self.TEST_EXPORT1, self.TEST_NMS1), '# %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2), '', '%s %s %s' % (self.TEST_EXPORT2, self.TEST_NMS2, self.TEST_EXPORT2_OPTIONS) ] with mock.patch.object(self.drv, '_read_config_file') as \ mock_read_config_file: mock_read_config_file.return_value = config_data self.drv._load_shares_config( self.drv.configuration.nfs_shares_config) self.assertIn(self.TEST_EXPORT1, self.drv.shares) self.assertIn(self.TEST_EXPORT2, self.drv.shares) self.assertEqual(2, len(self.drv.shares)) self.assertIn(self.TEST_EXPORT1, self.drv.share2nms) self.assertIn(self.TEST_EXPORT2, self.drv.share2nms) self.assertEqual(2, len(self.drv.share2nms.keys())) self.assertEqual(self.TEST_EXPORT2_OPTIONS, self.drv.shares[self.TEST_EXPORT2]) def test_get_capacity_info(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.folder.get_child_props.return_value = { 'available': '1G', 'used': '2G' } total, free, allocated = self.drv._get_capacity_info(self.TEST_EXPORT1) self.assertEqual(3 * units.Gi, total) self.assertEqual(units.Gi, free) self.assertEqual(2 * units.Gi, allocated) def test_get_share_datasets(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self.nms_mock.server.get_prop.return_value = '/volumes' volume_name, folder_name = ( self.drv._get_share_datasets(self.TEST_EXPORT1)) self.assertEqual('stack', volume_name) self.assertEqual('share', folder_name) def test_delete_snapshot(self): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self._create_volume_db_entry() self.nms_mock.server.get_prop.return_value = '/volumes' self.drv.delete_snapshot({'volume_id': '1', 'name': 'snapshot1'}) self.nms_mock.snapshot.destroy.assert_called_with( 'stack/share/volume-1@snapshot1', '') @mock.patch('os_brick.remotefs.remotefs.' 'RemoteFsClient._read_mounts') def test_delete_volume(self, list_mount_points): self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock} self._create_volume_db_entry() self.drv._ensure_share_mounted = lambda *_, **__: 0 self.drv._execute = lambda *_, **__: 0 self.nms_mock.server.get_prop.return_value = '/volumes' self.nms_mock.folder.get_child_props.return_value = { 'available': 1, 'used': 1} self.drv.delete_volume({ 'id': '1', 'name': 'volume-1', 'provider_location': self.TEST_EXPORT1 }) self.nms_mock.folder.destroy.assert_called_with( 'stack/share/volume-1', '-r') list_mount_points.assert_called_once() # Check that exception not raised if folder does not exist on # NexentaStor appliance. mock = self.nms_mock.folder.destroy mock.side_effect = utils.NexentaException('Folder does not exist') self.drv.delete_volume({ 'id': '1', 'name': 'volume-1', 'provider_location': self.TEST_EXPORT1 }) class TestNexentaUtils(test.TestCase): def test_str2size(self): values_to_test = ( # Test empty value (None, 0), ('', 0), ('0', 0), ('12', 12), # Test int values (10, 10), # Test bytes string ('1b', 1), ('1B', 1), ('1023b', 1023), ('0B', 0), # Test other units ('1M', units.Mi), ('1.0M', units.Mi), ) for value, result in values_to_test: self.assertEqual(result, utils.str2size(value)) # Invalid format value self.assertRaises(ValueError, utils.str2size, 'A') def test_str2gib_size(self): self.assertEqual(1, utils.str2gib_size('1024M')) self.assertEqual(300 * units.Mi // units.Gi, utils.str2gib_size('300M')) self.assertEqual(1.2 * units.Ti // units.Gi, utils.str2gib_size('1.2T')) self.assertRaises(ValueError, utils.str2gib_size, 'A') def test_parse_nms_url(self): urls = ( ('http://192.168.1.1/', (False, 'http', 'admin', 'nexenta', '192.168.1.1', '2000', '/rest/nms/')), ('http://192.168.1.1:8080', (False, 'http', 'admin', 'nexenta', '192.168.1.1', '8080', '/rest/nms/')), ('https://root:password@192.168.1.1:8080', (False, 'https', 'root', 'password', '192.168.1.1', '8080', '/rest/nms/')), ) for url, result in urls: self.assertEqual(result, utils.parse_nms_url(url)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py0000664000175000017500000012341600000000000027444 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for OpenStack Cinder volume driver.""" from unittest import mock import uuid from oslo_utils import units from cinder import context from cinder import db from cinder.tests.unit.consistencygroup.fake_cgsnapshot import ( fake_cgsnapshot_obj as fake_cgsnapshot) from cinder.tests.unit.consistencygroup.fake_consistencygroup import ( fake_consistencyobject_obj as fake_cgroup) from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.fake_snapshot import fake_snapshot_obj as fake_snapshot from cinder.tests.unit.fake_volume import fake_volume_obj as fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta.ns5 import iscsi from cinder.volume.drivers.nexenta.ns5 import jsonrpc class TestNexentaISCSIDriver(test.TestCase): def setUp(self): super(TestNexentaISCSIDriver, self).setUp() self.ctxt = context.get_admin_context() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.volume_backend_name = 'nexenta_iscsi' self.cfg.nexenta_group_snapshot_template = 'group-snapshot-%s' self.cfg.nexenta_origin_snapshot_template = 'origin-snapshot-%s' self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_host = '1.1.1.1' self.cfg.nexenta_user = 'admin' self.cfg.nexenta_password = 'nexenta' self.cfg.nexenta_volume = 'cinder' self.cfg.nexenta_rest_port = 8443 self.cfg.nexenta_use_https = False self.cfg.nexenta_iscsi_target_portal_port = 3260 self.cfg.nexenta_target_prefix = 'iqn:cinder' self.cfg.nexenta_target_group_prefix = 'cinder' self.cfg.nexenta_ns5_blocksize = 32 self.cfg.nexenta_sparse = True self.cfg.nexenta_lu_writebackcache_disabled = True self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.reserved_percentage = 20 self.cfg.nexenta_host_group_prefix = 'hg' self.cfg.nexenta_volume = 'pool' self.cfg.driver_ssl_cert_verify = False self.cfg.nexenta_luns_per_target = 20 self.cfg.driver_ssl_cert_verify = False self.cfg.nexenta_iscsi_target_portals = '1.1.1.1:3260,2.2.2.2:3260' self.cfg.nexenta_iscsi_target_host_group = 'all' self.cfg.nexenta_rest_address = '1.1.1.1' self.cfg.nexenta_rest_backoff_factor = 1 self.cfg.nexenta_rest_retry_count = 3 self.cfg.nexenta_rest_connect_timeout = 1 self.cfg.nexenta_rest_read_timeout = 1 self.cfg.nexenta_volume_group = 'vg' self.cfg.safe_get = self.fake_safe_get self.nef_mock = mock.Mock() self.mock_object(jsonrpc, 'NefRequest', return_value=self.nef_mock) self.drv = iscsi.NexentaISCSIDriver( configuration=self.cfg) self.drv.db = db self.drv.do_setup(self.ctxt) def fake_safe_get(self, key): try: value = getattr(self.cfg, key) except AttributeError: value = None return value def fake_uuid4(): return uuid.UUID('38d18a48-b791-4046-b523-a84aad966310') def test_do_setup(self): self.assertIsNone(self.drv.do_setup(self.ctxt)) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefServices.get') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumeGroups.create') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumeGroups.get') def test_check_for_setup_error(self, volume_group_get, volume_group_create, service_get): path = self.drv.root_path bs = self.cfg.nexenta_ns5_blocksize * units.Ki name = 'iscsit' state = 'online' volume_group_get.return_value = {'path': path} service_get.return_value = {'name': name, 'state': state} self.assertIsNone(self.drv.check_for_setup_error()) volume_group_get.assert_called_with(path) service_get.assert_called_with(name) volume_group_get.side_effect = jsonrpc.NefException({ 'message': 'Failed to open dataset', 'code': 'ENOENT' }) volume_group_create.return_value = {} self.assertIsNone(self.drv.check_for_setup_error()) volume_group_get.assert_called_with(path) payload = {'path': path, 'volumeBlockSize': bs} volume_group_create.assert_called_with(payload) service_get.assert_called_with(name) state = 'offline' volume_group_get.return_value = {'path': path} service_get.return_value = {'name': name, 'state': state} self.assertRaises(jsonrpc.NefException, self.drv.check_for_setup_error) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumes.create') def test_create_volume(self, create_volume): volume = fake_volume(self.ctxt) self.assertIsNone(self.drv.create_volume(volume)) path = self.drv._get_volume_path(volume) size = volume['size'] * units.Gi bs = self.cfg.nexenta_ns5_blocksize * units.Ki payload = { 'path': path, 'volumeSize': size, 'volumeBlockSize': bs, 'compressionMode': self.cfg.nexenta_dataset_compression, 'sparseVolume': self.cfg.nexenta_sparse } create_volume.assert_called_with(payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumes.delete') def test_delete_volume(self, delete_volume): volume = fake_volume(self.ctxt) self.assertIsNone(self.drv.delete_volume(volume)) path = self.drv._get_volume_path(volume) payload = {'snapshots': True} delete_volume.assert_called_with(path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumes.set') def test_extend_volume(self, extend_volume): volume = fake_volume(self.ctxt) size = volume['size'] * 2 self.assertIsNone(self.drv.extend_volume(volume, size)) path = self.drv._get_volume_path(volume) size = size * units.Gi payload = {'volumeSize': size} extend_volume.assert_called_with(path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.delete') def test_delete_snapshot(self, delete_snapshot): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume delete_snapshot.return_value = {} self.assertIsNone(self.drv.delete_snapshot(snapshot)) path = self.drv._get_snapshot_path(snapshot) payload = {'defer': True} delete_snapshot.assert_called_with(path, payload) def test_snapshot_revert_use_temp_snapshot(self): result = self.drv.snapshot_revert_use_temp_snapshot() expected = False self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumes.rollback') def test_revert_to_snapshot(self, rollback_volume): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume rollback_volume.return_value = {} self.assertIsNone( self.drv.revert_to_snapshot(self.ctxt, volume, snapshot) ) path = self.drv._get_volume_path(volume) payload = {'snapshot': snapshot['name']} rollback_volume.assert_called_with(path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver.delete_snapshot') @mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver.create_volume_from_snapshot') @mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver.create_snapshot') def test_create_cloned_volume(self, create_snapshot, create_volume, delete_snapshot): volume = fake_volume(self.ctxt) clone_spec = {'id': fake.VOLUME2_ID} clone = fake_volume(self.ctxt, **clone_spec) create_snapshot.return_value = {} create_volume.return_value = {} delete_snapshot.return_value = {} self.assertIsNone(self.drv.create_cloned_volume(clone, volume)) snapshot = { 'name': self.drv.origin_snapshot_template % clone['id'], 'volume_id': volume['id'], 'volume_name': volume['name'], 'volume_size': volume['size'] } create_snapshot.assert_called_with(snapshot) create_volume.assert_called_with(clone, snapshot) create_volume.side_effect = jsonrpc.NefException({ 'message': 'Failed to create volume', 'code': 'EBUSY' }) self.assertRaises(jsonrpc.NefException, self.drv.create_cloned_volume, clone, volume) create_snapshot.side_effect = jsonrpc.NefException({ 'message': 'Failed to open dataset', 'code': 'ENOENT' }) self.assertRaises(jsonrpc.NefException, self.drv.create_cloned_volume, clone, volume) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.create') def test_create_snapshot(self, create_snapshot): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume create_snapshot.return_value = {} self.assertIsNone(self.drv.create_snapshot(snapshot)) path = self.drv._get_snapshot_path(snapshot) payload = {'path': path} create_snapshot.assert_called_with(payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver.extend_volume') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.clone') def test_create_volume_from_snapshot(self, clone_snapshot, extend_volume): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume clone_size = 10 clone_spec = { 'id': fake.VOLUME2_ID, 'size': clone_size } clone = fake_volume(self.ctxt, **clone_spec) snapshot_path = self.drv._get_snapshot_path(snapshot) clone_path = self.drv._get_volume_path(clone) clone_snapshot.return_value = {} extend_volume.return_value = None self.assertIsNone( self.drv.create_volume_from_snapshot(clone, snapshot) ) clone_payload = {'targetPath': clone_path} clone_snapshot.assert_called_with(snapshot_path, clone_payload) extend_volume.assert_called_with(clone, clone_size) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefLunMappings.list') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver._create_target_group') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver._create_target') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver._target_group_props') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver._get_host_portals') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver._get_host_group') @mock.patch('uuid.uuid4', fake_uuid4) def test_initialize_connection(self, get_host_group, get_host_portals, get_target_group_props, create_target, create_target_group, list_mappings): volume = fake_volume(self.ctxt) host_iqn = 'iqn:cinder-client' target_iqn = 'iqn:cinder-target' connector = {'initiator': host_iqn, 'multipath': True} host_group = 'cinder-host-group' target_group = 'cinder-target-group' target_portals = self.cfg.nexenta_iscsi_target_portals.split(',') get_host_group.return_value = host_group get_host_portals.return_value = { target_iqn: target_portals } list_mappings.return_value = [{ 'id': '309F9B9013CF627A00000000', 'lun': 0, 'hostGroup': host_group, 'targetGroup': target_group }] get_target_group_props.return_value = { target_iqn: target_portals } create_target.return_value = {} create_target_group.return_value = {} result = self.drv.initialize_connection(volume, connector) expected = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'encrypted': False, 'qos_specs': None, 'target_luns': [0] * len(target_portals), 'access_mode': 'rw', 'volume_id': volume['id'], 'target_portals': target_portals, 'target_iqns': [target_iqn] * len(target_portals) } } self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefLunMappings.delete') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefLunMappings.list') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver._get_host_group') def test_terminate_connection(self, get_host_group, list_mappings, delete_mapping): volume = fake_volume(self.ctxt) host_group = 'cinder-host-group' target_group = 'cinder-target-group' connector = {'initiator': 'iqn:test'} get_host_group.return_value = host_group list_mappings.return_value = [{ 'id': '309F9B9013CF627A00000000', 'lun': 0, 'hostGroup': host_group, 'targetGroup': target_group }] delete_mapping.return_value = {} expected = {'driver_volume_type': 'iscsi', 'data': {}} result = self.drv.terminate_connection(volume, connector) self.assertEqual(expected, result) def test_create_export(self): volume = fake_volume(self.ctxt) connector = {'initiator': 'iqn:test'} self.assertIsNone( self.drv.create_export(self.ctxt, volume, connector) ) def test_ensure_export(self): volume = fake_volume(self.ctxt) self.assertIsNone( self.drv.ensure_export(self.ctxt, volume) ) def test_remove_export(self): volume = fake_volume(self.ctxt) self.assertIsNone( self.drv.remove_export(self.ctxt, volume) ) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumeGroups.get') def test_get_volume_stats(self, get_volume_group): available = 100 used = 75 get_volume_group.return_value = { 'bytesAvailable': available * units.Gi, 'bytesUsed': used * units.Gi } result = self.drv.get_volume_stats(True) payload = {'fields': 'bytesAvailable,bytesUsed'} get_volume_group.assert_called_with(self.drv.root_path, payload) self.assertEqual(self.drv._stats, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumeGroups.get') def test_update_volume_stats(self, get_volume_group): available = 8 used = 2 get_volume_group.return_value = { 'bytesAvailable': available * units.Gi, 'bytesUsed': used * units.Gi } location_info = '%(driver)s:%(host)s:%(pool)s/%(group)s' % { 'driver': self.drv.__class__.__name__, 'host': self.cfg.nexenta_host, 'pool': self.cfg.nexenta_volume, 'group': self.cfg.nexenta_volume_group, } expected = { 'vendor_name': 'Nexenta', 'dedup': self.cfg.nexenta_dataset_dedup, 'compression': self.cfg.nexenta_dataset_compression, 'description': self.cfg.nexenta_dataset_description, 'driver_version': self.drv.VERSION, 'storage_protocol': 'iSCSI', 'sparsed_volumes': self.cfg.nexenta_sparse, 'total_capacity_gb': used + available, 'free_capacity_gb': available, 'reserved_percentage': self.cfg.reserved_percentage, 'QoS_support': False, 'multiattach': True, 'consistencygroup_support': True, 'consistent_group_snapshot_enabled': True, 'volume_backend_name': self.cfg.volume_backend_name, 'location_info': location_info, 'iscsi_target_portal_port': ( self.cfg.nexenta_iscsi_target_portal_port), 'nef_url': self.cfg.nexenta_rest_address, 'nef_port': self.cfg.nexenta_rest_port } self.assertIsNone(self.drv._update_volume_stats()) self.assertEqual(expected, self.drv._stats) def test__get_volume_path(self): volume = fake_volume(self.ctxt) result = self.drv._get_volume_path(volume) expected = '%s/%s/%s' % (self.cfg.nexenta_volume, self.cfg.nexenta_volume_group, volume['name']) self.assertEqual(expected, result) def test__get_snapshot_path(self): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume result = self.drv._get_snapshot_path(snapshot) expected = '%s/%s/%s@%s' % (self.cfg.nexenta_volume, self.cfg.nexenta_volume_group, snapshot['volume_name'], snapshot['name']) self.assertEqual(expected, result) def test__get_target_group_name(self): target_iqn = '%s-test' % self.cfg.nexenta_target_prefix result = self.drv._get_target_group_name(target_iqn) expected = '%s-test' % self.cfg.nexenta_target_group_prefix self.assertEqual(expected, result) def test__get_target_name(self): target_group = '%s-test' % self.cfg.nexenta_target_group_prefix result = self.drv._get_target_name(target_group) expected = '%s-test' % self.cfg.nexenta_target_prefix self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefNetAddresses.list') def test__get_host_addresses(self, list_addresses): expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] return_value = [] for address in expected: return_value.append({ 'addressType': 'static', 'address': '%s/24' % address }) list_addresses.return_value = return_value result = self.drv._get_host_addresses() self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver._get_host_addresses') def test__get_host_portals(self, list_addresses): list_addresses.return_value = ['1.1.1.1', '2.2.2.2', '3.3.3.3'] expected = ['1.1.1.1:3260', '2.2.2.2:3260'] result = self.drv._get_host_portals() self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefTargets.list') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefTargetsGroups.list') def test__target_group_props(self, list_target_groups, list_targets): host_portals = ['1.1.1.1:3260', '2.2.2.2:3260'] target_group = 'cinder-test' list_target_groups.return_value = [{ 'name': target_group, 'members': [ 'iqn:cinder-test' ] }] list_targets.return_value = [{ 'name': 'iqn:cinder-test', 'portals': [ { 'address': '1.1.1.1', 'port': 3260 }, { 'address': '2.2.2.2', 'port': 3260 } ] }] expected = {'iqn:cinder-test': host_portals} result = self.drv._target_group_props(target_group, host_portals) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefTargetsGroups.create') def test__create_target_group(self, create_target_group): name = 'name' members = ['a', 'b', 'c'] create_target_group.return_value = {} self.assertIsNone(self.drv._create_target_group(name, members)) payload = {'name': name, 'members': members} create_target_group.assert_called_with(payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefTargetsGroups.set') def test__update_target_group(self, update_target_group): name = 'name' members = ['a', 'b', 'c'] update_target_group.return_value = {} self.assertIsNone(self.drv._update_target_group(name, members)) payload = {'members': members} update_target_group.assert_called_with(name, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefLunMappings.delete') def test__delete_lun_mapping(self, delete_mapping): name = 'name' delete_mapping.return_value = {} self.assertIsNone(self.drv._delete_lun_mapping(name)) delete_mapping.assert_called_with(name) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefTargets.create') def test__create_target(self, create_target): name = 'name' portals = ['1.1.1.1:3260', '2.2.2.2:3260'] create_target.return_value = {} self.assertIsNone(self.drv._create_target(name, portals)) payload = { 'name': name, 'portals': [ { 'address': '1.1.1.1', 'port': 3260 }, { 'address': '2.2.2.2', 'port': 3260 } ] } create_target.assert_called_with(payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefHostGroups.list') def test__get_host_group(self, get_hostgroup): member = 'member1' get_hostgroup.return_value = [ { 'name': 'name1', 'members': [ 'member1', 'member2', 'member3' ] }, { 'name': 'name2', 'members': [ 'member4', 'member5', 'member6' ] } ] expected = 'name1' result = self.drv._get_host_group(member) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefHostGroups.create') def test__create_host_group(self, create_host_group): name = 'name' members = ['a', 'b', 'c'] create_host_group.return_value = {} self.assertIsNone(self.drv._create_host_group(name, members)) payload = {'name': name, 'members': members} create_host_group.assert_called_with(payload) def test__s2d(self): portals = ['1.1.1.1:3260', '2.2.2.2:3260'] expected = [ { 'address': '1.1.1.1', 'port': 3260 }, { 'address': '2.2.2.2', 'port': 3260 } ] result = self.drv._s2d(portals) self.assertEqual(expected, result) def test__d2s(self): portals = [ { 'address': '1.1.1.1', 'port': 3260 }, { 'address': '2.2.2.2', 'port': 3260 } ] expected = ['1.1.1.1:3260', '2.2.2.2:3260'] result = self.drv._d2s(portals) self.assertEqual(expected, result) def test_create_consistencygroup(self): cgroup = fake_cgroup(self.ctxt) result = self.drv.create_consistencygroup(self.ctxt, cgroup) expected = {} self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver.delete_volume') def test_delete_consistencygroup(self, delete_volume): cgroup = fake_cgroup(self.ctxt) volume1 = fake_volume(self.ctxt) volume2_spec = {'id': fake.VOLUME2_ID} volume2 = fake_volume(self.ctxt, **volume2_spec) volumes = [volume1, volume2] delete_volume.return_value = {} result = self.drv.delete_consistencygroup(self.ctxt, cgroup, volumes) expected = ({}, []) self.assertEqual(expected, result) def test_update_consistencygroup(self): cgroup = fake_cgroup(self.ctxt) volume1 = fake_volume(self.ctxt) volume2_spec = {'id': fake.VOLUME2_ID} volume2 = fake_volume(self.ctxt, **volume2_spec) volume3_spec = {'id': fake.VOLUME3_ID} volume3 = fake_volume(self.ctxt, **volume3_spec) volume4_spec = {'id': fake.VOLUME4_ID} volume4 = fake_volume(self.ctxt, **volume4_spec) add_volumes = [volume1, volume2] remove_volumes = [volume3, volume4] result = self.drv.update_consistencygroup(self.ctxt, cgroup, add_volumes, remove_volumes) expected = ({}, [], []) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.delete') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.rename') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.create') def test_create_cgsnapshot(self, create_snapshot, rename_snapshot, delete_snapshot): cgsnapshot = fake_cgsnapshot(self.ctxt) volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume snapshots = [snapshot] cgsnapshot_name = ( self.cfg.nexenta_group_snapshot_template % cgsnapshot['id']) cgsnapshot_path = '%s@%s' % (self.drv.root_path, cgsnapshot_name) snapshot_path = '%s/%s@%s' % (self.drv.root_path, snapshot['volume_name'], cgsnapshot_name) create_snapshot.return_value = {} rename_snapshot.return_value = {} delete_snapshot.return_value = {} result = self.drv.create_cgsnapshot(self.ctxt, cgsnapshot, snapshots) create_payload = {'path': cgsnapshot_path, 'recursive': True} create_snapshot.assert_called_with(create_payload) rename_payload = {'newName': snapshot['name']} rename_snapshot.assert_called_with(snapshot_path, rename_payload) delete_payload = {'defer': True, 'recursive': True} delete_snapshot.assert_called_with(cgsnapshot_path, delete_payload) expected = ({}, []) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver.delete_snapshot') def test_delete_cgsnapshot(self, delete_snapshot): cgsnapshot = fake_cgsnapshot(self.ctxt) volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume snapshots = [snapshot] delete_snapshot.return_value = {} result = self.drv.delete_cgsnapshot(self.ctxt, cgsnapshot, snapshots) delete_snapshot.assert_called_with(snapshot) expected = ({}, []) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver.create_volume_from_snapshot') def test_create_consistencygroup_from_src_snapshots(self, create_volume): cgroup = fake_cgroup(self.ctxt) cgsnapshot = fake_cgsnapshot(self.ctxt) volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume snapshots = [snapshot] clone_spec = {'id': fake.VOLUME2_ID} clone = fake_volume(self.ctxt, **clone_spec) clones = [clone] create_volume.return_value = {} result = self.drv.create_consistencygroup_from_src(self.ctxt, cgroup, clones, cgsnapshot, snapshots, None, None) create_volume.assert_called_with(clone, snapshot) expected = ({}, []) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.delete') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver.create_volume_from_snapshot') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.create') def test_create_consistencygroup_from_src_volumes(self, create_snapshot, create_volume, delete_snapshot): src_cgroup = fake_cgroup(self.ctxt) dst_cgroup_spec = {'id': fake.CONSISTENCY_GROUP2_ID} dst_cgroup = fake_cgroup(self.ctxt, **dst_cgroup_spec) src_volume = fake_volume(self.ctxt) src_volumes = [src_volume] dst_volume_spec = {'id': fake.VOLUME2_ID} dst_volume = fake_volume(self.ctxt, **dst_volume_spec) dst_volumes = [dst_volume] create_snapshot.return_value = {} create_volume.return_value = {} delete_snapshot.return_value = {} result = self.drv.create_consistencygroup_from_src(self.ctxt, dst_cgroup, dst_volumes, None, None, src_cgroup, src_volumes) snapshot_name = ( self.cfg.nexenta_origin_snapshot_template % dst_cgroup['id']) snapshot_path = '%s@%s' % (self.drv.root_path, snapshot_name) create_payload = {'path': snapshot_path, 'recursive': True} create_snapshot.assert_called_with(create_payload) snapshot = { 'name': snapshot_name, 'volume_id': src_volume['id'], 'volume_name': src_volume['name'], 'volume_size': src_volume['size'] } create_volume.assert_called_with(dst_volume, snapshot) delete_payload = {'defer': True, 'recursive': True} delete_snapshot.assert_called_with(snapshot_path, delete_payload) expected = ({}, []) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumes.list') def test__get_existing_volume(self, list_volumes): volume = fake_volume(self.ctxt) parent = self.drv.root_path name = volume['name'] size = volume['size'] path = self.drv._get_volume_path(volume) list_volumes.return_value = [{ 'name': name, 'path': path, 'volumeSize': size * units.Gi }] result = self.drv._get_existing_volume({'source-name': name}) payload = { 'parent': parent, 'fields': 'name,path,volumeSize', 'name': name } list_volumes.assert_called_with(payload) expected = { 'name': name, 'path': path, 'size': size } self.assertEqual(expected, result) def test__check_already_managed_snapshot(self): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume result = self.drv._check_already_managed_snapshot(snapshot) expected = False self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.list') def test__get_existing_snapshot(self, list_snapshots): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume name = snapshot['name'] path = self.drv._get_snapshot_path(snapshot) parent = self.drv._get_volume_path(volume) list_snapshots.return_value = [{ 'name': name, 'path': path }] payload = {'source-name': name} result = self.drv._get_existing_snapshot(snapshot, payload) payload = { 'parent': parent, 'fields': 'name,path', 'recursive': False, 'name': name } list_snapshots.assert_called_with(payload) expected = { 'name': name, 'path': path, 'volume_name': volume['name'], 'volume_size': volume['size'] } self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumes.rename') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefLunMappings.list') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver._get_existing_volume') def test_manage_existing(self, get_existing_volume, list_mappings, rename_volume): existing_volume = fake_volume(self.ctxt) manage_volume_spec = {'id': fake.VOLUME2_ID} manage_volume = fake_volume(self.ctxt, **manage_volume_spec) existing_name = existing_volume['name'] existing_path = self.drv._get_volume_path(existing_volume) existing_size = existing_volume['size'] manage_path = self.drv._get_volume_path(manage_volume) get_existing_volume.return_value = { 'name': existing_name, 'path': existing_path, 'size': existing_size } list_mappings.return_value = [] payload = {'source-name': existing_name} self.assertIsNone(self.drv.manage_existing(manage_volume, payload)) get_existing_volume.assert_called_with(payload) payload = {'volume': existing_path} list_mappings.assert_called_with(payload) payload = {'newPath': manage_path} rename_volume.assert_called_with(existing_path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.iscsi.' 'NexentaISCSIDriver._get_existing_volume') def test_manage_existing_get_size(self, get_volume): volume = fake_volume(self.ctxt) name = volume['name'] size = volume['size'] path = self.drv._get_volume_path(volume) get_volume.return_value = { 'name': name, 'path': path, 'size': size } payload = {'source-name': name} result = self.drv.manage_existing_get_size(volume, payload) expected = size self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefVolumes.list') def test_get_manageable_volumes(self, list_volumes): volume = fake_volume(self.ctxt) volumes = [volume] name = volume['name'] size = volume['size'] path = self.drv._get_volume_path(volume) guid = 12345 parent = self.drv.root_path list_volumes.return_value = [{ 'name': name, 'path': path, 'guid': guid, 'volumeSize': size * units.Gi }] result = self.drv.get_manageable_volumes(volumes, None, 1, 0, 'size', 'asc') payload = { 'parent': parent, 'fields': 'name,guid,path,volumeSize', 'recursive': False } list_volumes.assert_called_with(payload) expected = [{ 'cinder_id': volume['id'], 'extra_info': None, 'reason_not_safe': 'Volume already managed', 'reference': { 'source-guid': guid, 'source-name': volume['name'] }, 'safe_to_manage': False, 'size': volume['size'] }] self.assertEqual(expected, result) def test_unmanage(self): volume = fake_volume(self.ctxt) self.assertIsNone(self.drv.unmanage(volume)) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.rename') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver._get_existing_snapshot') def test_manage_existing_snapshot(self, get_existing_snapshot, rename_snapshot): volume = fake_volume(self.ctxt) existing_snapshot = fake_snapshot(self.ctxt) existing_snapshot.volume = volume manage_snapshot_spec = {'id': fake.SNAPSHOT2_ID} manage_snapshot = fake_snapshot(self.ctxt, **manage_snapshot_spec) manage_snapshot.volume = volume existing_name = existing_snapshot['name'] manage_name = manage_snapshot['name'] volume_name = volume['name'] volume_size = volume['size'] existing_path = self.drv._get_snapshot_path(existing_snapshot) get_existing_snapshot.return_value = { 'name': existing_name, 'path': existing_path, 'volume_name': volume_name, 'volume_size': volume_size } rename_snapshot.return_value = {} payload = {'source-name': existing_name} self.assertIsNone( self.drv.manage_existing_snapshot(manage_snapshot, payload) ) get_existing_snapshot.assert_called_with(manage_snapshot, payload) payload = {'newName': manage_name} rename_snapshot.assert_called_with(existing_path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'iscsi.NexentaISCSIDriver._get_existing_snapshot') def test_manage_existing_snapshot_get_size(self, get_snapshot): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume snapshot_name = snapshot['name'] volume_name = volume['name'] volume_size = volume['size'] snapshot_path = self.drv._get_snapshot_path(snapshot) get_snapshot.return_value = { 'name': snapshot_name, 'path': snapshot_path, 'volume_name': volume_name, 'volume_size': volume_size } payload = {'source-name': snapshot_name} result = self.drv.manage_existing_snapshot_get_size(volume, payload) expected = volume['size'] self.assertEqual(expected, result) @mock.patch('cinder.objects.VolumeList.get_all_by_host') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.list') def test_get_manageable_snapshots(self, list_snapshots, list_volumes): volume = fake_volume(self.ctxt) volumes = [volume] snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume snapshots = [snapshot] guid = 12345 name = snapshot['name'] path = self.drv._get_snapshot_path(snapshot) parent = self.drv._get_volume_path(volume) list_snapshots.return_value = [{ 'name': name, 'path': path, 'guid': guid, 'parent': parent, 'hprService': '', 'snaplistId': '' }] list_volumes.return_value = volumes result = self.drv.get_manageable_snapshots(snapshots, None, 1, 0, 'size', 'asc') payload = { 'parent': self.drv.root_path, 'fields': 'name,guid,path,parent,hprService,snaplistId', 'recursive': True } list_snapshots.assert_called_with(payload) expected = [{ 'cinder_id': snapshot['id'], 'extra_info': None, 'reason_not_safe': 'Snapshot already managed', 'source_reference': { 'name': volume['name'] }, 'reference': { 'source-guid': guid, 'source-name': snapshot['name'] }, 'safe_to_manage': False, 'size': volume['size'] }] self.assertEqual(expected, result) def test_unmanage_snapshot(self): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume self.assertIsNone(self.drv.unmanage_snapshot(snapshot)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_jsonrpc.py0000664000175000017500000012634000000000000030007 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for NexentaStor 5 REST API helper.""" import copy import hashlib import json import posixpath from unittest import mock import urllib import uuid import requests from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta.ns5 import jsonrpc class FakeNefProxy(object): def __init__(self): self.scheme = 'https' self.port = 8443 self.hosts = ['1.1.1.1', '2.2.2.2'] self.host = self.hosts[0] self.root = 'pool/share' self.username = 'username' self.password = 'password' self.retries = 3 self.timeout = 5 self.session = mock.Mock() self.session.headers = {} def __getattr__(self, name): pass def delay(self, interval): pass def delete_bearer(self): pass def update_lock(self): pass def update_token(self, token): pass def update_host(self, host): pass def url(self, path): return '%s://%s:%s/%s' % (self.scheme, self.host, self.port, path) class TestNefException(test.TestCase): def test_message(self): message = 'test message 1' result = jsonrpc.NefException(message) self.assertIn(message, result.msg) def test_message_kwargs(self): code = 'EAGAIN' message = 'test message 2' result = jsonrpc.NefException(message, code=code) self.assertEqual(code, result.code) self.assertIn(message, result.msg) def test_no_message_kwargs(self): code = 'ESRCH' message = 'test message 3' result = jsonrpc.NefException(None, code=code, message=message) self.assertEqual(code, result.code) self.assertIn(message, result.msg) def test_message_plus_kwargs(self): code = 'ENODEV' message1 = 'test message 4' message2 = 'test message 5' result = jsonrpc.NefException(message1, code=code, message=message2) self.assertEqual(code, result.code) self.assertIn(message2, result.msg) def test_dict(self): code = 'ENOENT' message = 'test message 4' result = jsonrpc.NefException({'code': code, 'message': message}) self.assertEqual(code, result.code) self.assertIn(message, result.msg) def test_kwargs(self): code = 'EPERM' message = 'test message 5' result = jsonrpc.NefException(code=code, message=message) self.assertEqual(code, result.code) self.assertIn(message, result.msg) def test_dict_kwargs(self): code = 'EINVAL' message = 'test message 6' result = jsonrpc.NefException({'code': code}, message=message) self.assertEqual(code, result.code) self.assertIn(message, result.msg) def test_defaults(self): code = 'EBADMSG' message = 'NexentaError' result = jsonrpc.NefException() self.assertEqual(code, result.code) self.assertIn(message, result.msg) class TestNefRequest(test.TestCase): def setUp(self): super(TestNefRequest, self).setUp() self.proxy = FakeNefProxy() def fake_response(self, method, path, payload, code, content): request = requests.PreparedRequest() request.method = method request.url = self.proxy.url(path) request.headers = {'Content-Type': 'application/json'} request.body = None if method in ['get', 'delete']: request.params = payload elif method in ['put', 'post']: request.data = json.dumps(payload) response = requests.Response() response.request = request response.status_code = code if content: response._content = json.dumps(content) else: response._content = '' return response def test___call___invalid_method(self): method = 'unsupported' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' self.assertRaises(jsonrpc.NefException, instance, path) def test___call___none_path(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) self.assertRaises(jsonrpc.NefException, instance, None) def test___call___empty_path(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) self.assertRaises(jsonrpc.NefException, instance, '') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___get(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {} content = {'name': 'snapshot'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) request.assert_called_with(method, path) self.assertEqual(content, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___get_payload(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'name': 'snapshot'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) params = {'params': payload} request.assert_called_with(method, path, **params) self.assertEqual(content, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___get_data_payload(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} data = [ { 'name': 'fs1', 'path': 'pool/fs1' }, { 'name': 'fs2', 'path': 'pool/fs2' } ] content = {'data': data} response = self.fake_response(method, path, payload, 200, content) request.return_value = response instance.data = data result = instance(path, payload) params = {'params': payload} request.assert_called_with(method, path, **params) self.assertEqual(data, result) def test___call___get_invalid_payload(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = 'bad data' self.assertRaises(jsonrpc.NefException, instance, path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___delete(self, request): method = 'delete' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {} content = {'name': 'snapshot'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) request.assert_called_with(method, path) self.assertEqual(content, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___delete_payload(self, request): method = 'delete' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'name': 'snapshot'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) params = {'params': payload} request.assert_called_with(method, path, **params) self.assertEqual(content, result) def test___call___delete_invalid_payload(self): method = 'delete' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = 'bad data' self.assertRaises(jsonrpc.NefException, instance, path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___post(self, request): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {} content = None response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) request.assert_called_with(method, path) self.assertEqual(content, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___post_payload(self, request): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = None response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) params = {'data': json.dumps(payload)} request.assert_called_with(method, path, **params) self.assertEqual(content, result) def test___call___post_invalid_payload(self): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = 'bad data' self.assertRaises(jsonrpc.NefException, instance, path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___put(self, request): method = 'put' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {} content = None response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) request.assert_called_with(method, path) self.assertEqual(content, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___put_payload(self, request): method = 'put' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = None response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance(path, payload) params = {'data': json.dumps(payload)} request.assert_called_with(method, path, **params) self.assertEqual(content, result) def test___call___put_invalid_payload(self): method = 'put' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = 'bad data' self.assertRaises(jsonrpc.NefException, instance, path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___non_ok_response(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'ENOENT', 'message': 'error'} response = self.fake_response(method, path, payload, 500, content) request.return_value = response self.assertRaises(jsonrpc.NefException, instance, path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.failover') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___request_after_failover(self, request, failover): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = None response = self.fake_response(method, path, payload, 200, content) request.side_effect = [requests.exceptions.Timeout, response] failover.return_value = True result = instance(path, payload) params = {'data': json.dumps(payload)} request.assert_called_with(method, path, **params) self.assertEqual(content, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.failover') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test___call___request_failover_error(self, request, failover): method = 'put' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} request.side_effect = requests.exceptions.Timeout failover.return_value = False self.assertRaises(requests.exceptions.Timeout, instance, path, payload) def test_hook_default(self): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'name': 'dataset'} response = self.fake_response(method, path, payload, 303, content) result = instance.hook(response) self.assertEqual(response, result) def test_hook_200_empty(self): method = 'delete' instance = jsonrpc.NefRequest(self.proxy, method) path = 'storage/filesystems' payload = {'force': True} content = None response = self.fake_response(method, path, payload, 200, content) result = instance.hook(response) self.assertEqual(response, result) def test_hook_201_empty(self): method = 'post' instance = jsonrpc.NefRequest(self.proxy, method) path = 'storage/snapshots' payload = {'path': 'parent/child@name'} content = None response = self.fake_response(method, path, payload, 201, content) result = instance.hook(response) self.assertEqual(response, result) def test_hook_500_empty(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'storage/pools' payload = {'poolName': 'tank'} content = None response = self.fake_response(method, path, payload, 500, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) def test_hook_200_bad_content(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'storage/volumes' payload = {'name': 'test'} content = None response = self.fake_response(method, path, payload, 200, content) response._content = 'bad_content' self.assertRaises(jsonrpc.NefException, instance.hook, response) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.auth') def test_hook_401(self, auth, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'EAUTH'} response = self.fake_response(method, path, payload, 401, content) auth.return_value = True content2 = {'name': 'test'} response2 = self.fake_response(method, path, payload, 200, content2) request.return_value = response2 self.proxy.session.send.return_value = content2 result = instance.hook(response) self.assertEqual(content2, result) def test_hook_401_max_retries(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) instance.stat[401] = self.proxy.retries path = 'parent/child' payload = {'key': 'value'} content = {'code': 'EAUTH'} response = self.fake_response(method, path, payload, 401, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) def test_hook_404_nested(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) instance.lock = True path = 'parent/child' payload = {'key': 'value'} content = {'code': 'ENOENT'} response = self.fake_response(method, path, payload, 404, content) result = instance.hook(response) self.assertEqual(response, result) def test_hook_404_max_retries(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) instance.stat[404] = self.proxy.retries path = 'parent/child' payload = {'key': 'value'} content = {'code': 'ENOENT'} response = self.fake_response(method, path, payload, 404, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.failover') def test_hook_404_failover_error(self, failover): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'ENOENT'} response = self.fake_response(method, path, payload, 404, content) failover.return_value = False result = instance.hook(response) self.assertEqual(response, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.failover') def test_hook_404_failover_ok(self, failover, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'ENOENT'} response = self.fake_response(method, path, payload, 404, content) failover.return_value = True content2 = {'name': 'test'} response2 = self.fake_response(method, path, payload, 200, content2) request.return_value = response2 result = instance.hook(response) self.assertEqual(response2, result) def test_hook_500_permanent(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'EINVAL'} response = self.fake_response(method, path, payload, 500, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) def test_hook_500_busy_max_retries(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) instance.stat[500] = self.proxy.retries path = 'parent/child' payload = {'key': 'value'} content = {'code': 'EBUSY'} response = self.fake_response(method, path, payload, 500, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_hook_500_busy_ok(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'code': 'EBUSY'} response = self.fake_response(method, path, payload, 500, content) content2 = {'name': 'test'} response2 = self.fake_response(method, path, payload, 200, content2) request.return_value = response2 result = instance.hook(response) self.assertEqual(response2, result) def test_hook_201_no_monitor(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'monitor': 'unknown'} response = self.fake_response(method, path, payload, 202, content) self.assertRaises(jsonrpc.NefException, instance.hook, response) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_hook_201_ok(self, request): method = 'delete' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = { 'links': [{ 'rel': 'monitor', 'href': '/jobStatus/jobID' }] } response = self.fake_response(method, path, payload, 202, content) content2 = None response2 = self.fake_response(method, path, payload, 201, content2) request.return_value = response2 result = instance.hook(response) self.assertEqual(response2, result) def test_200_no_data(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'name': 'test'} response = self.fake_response(method, path, payload, 200, content) result = instance.hook(response) self.assertEqual(response, result) def test_200_pagination_end(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = {'data': 'value'} response = self.fake_response(method, path, payload, 200, content) result = instance.hook(response) self.assertEqual(response, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_200_pagination_next(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} content = { 'data': [{ 'name': 'test' }], 'links': [{ 'rel': 'next', 'href': path }] } response = self.fake_response(method, path, payload, 200, content) response2 = self.fake_response(method, path, payload, 200, content) request.return_value = response2 result = instance.hook(response) self.assertEqual(response2, result) def test_request(self): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = 'parent/child' payload = {'key': 'value'} expected = {'name': 'dataset'} url = self.proxy.url(path) kwargs = payload.copy() kwargs['timeout'] = self.proxy.timeout kwargs['hooks'] = {'response': instance.hook} self.proxy.session.request.return_value = expected result = instance.request(method, path, **payload) self.proxy.session.request.assert_called_with(method, url, **kwargs) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_auth(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) method = 'post' path = 'auth/login' payload = { 'data': json.dumps({ 'username': self.proxy.username, 'password': self.proxy.password }) } content = {'token': 'test'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response instance.auth() request.assert_called_with(method, path, **payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_auth_error(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) method = 'post' path = 'auth/login' payload = { 'data': json.dumps({ 'username': self.proxy.username, 'password': self.proxy.password }) } content = {'data': 'noauth'} response = self.fake_response(method, path, payload, 200, content) request.return_value = response self.assertRaises(jsonrpc.NefException, instance.auth) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_failover(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = self.proxy.root payload = {} content = {'path': path} response = self.fake_response(method, path, payload, 200, content) request.return_value = response result = instance.failover() request.assert_called_with(method, path) expected = True self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_failover_timeout(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = self.proxy.root payload = {} content = {'path': path} response = self.fake_response(method, path, payload, 200, content) request.side_effect = [requests.exceptions.Timeout, response] result = instance.failover() request.assert_called_with(method, path) expected = True self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_failover_404(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = self.proxy.root payload = {} content = {} response = self.fake_response(method, path, payload, 404, content) request.side_effect = [response, response] result = instance.failover() request.assert_called_with(method, path) expected = False self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefRequest.request') def test_failover_error(self, request): method = 'get' instance = jsonrpc.NefRequest(self.proxy, method) path = self.proxy.root request.side_effect = [ requests.exceptions.Timeout, requests.exceptions.ConnectionError ] result = instance.failover() request.assert_called_with(method, path) expected = False self.assertEqual(expected, result) def test_getpath(self): method = 'get' rel = 'monitor' href = 'jobStatus/jobID' content = { 'links': [ [1, 2], 'bad link', { 'rel': 'next', 'href': href }, { 'rel': rel, 'href': href } ] } instance = jsonrpc.NefRequest(self.proxy, method) result = instance.getpath(content, rel) expected = href self.assertEqual(expected, result) def test_getpath_no_content(self): method = 'get' rel = 'next' content = None instance = jsonrpc.NefRequest(self.proxy, method) result = instance.getpath(content, rel) expected = None self.assertEqual(expected, result) def test_getpath_no_links(self): method = 'get' rel = 'next' content = {'a': 'b'} instance = jsonrpc.NefRequest(self.proxy, method) result = instance.getpath(content, rel) expected = None self.assertEqual(expected, result) def test_getpath_no_rel(self): method = 'get' rel = 'next' content = { 'links': [ { 'rel': 'monitor', 'href': '/jobs/jobID' } ] } instance = jsonrpc.NefRequest(self.proxy, method) result = instance.getpath(content, rel) expected = None self.assertEqual(expected, result) def test_getpath_no_href(self): method = 'get' rel = 'next' content = { 'links': [ { 'rel': rel } ] } instance = jsonrpc.NefRequest(self.proxy, method) result = instance.getpath(content, rel) expected = None self.assertEqual(expected, result) class TestNefCollections(test.TestCase): def setUp(self): super(TestNefCollections, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefCollections(self.proxy) def test_path(self): path = 'path/to/item name + - & # $ = 0' result = self.instance.path(path) quoted_path = urllib.parse.quote_plus(path) expected = posixpath.join(self.instance.root, quoted_path) self.assertEqual(expected, result) def test_get(self): name = 'parent/child' payload = {'key': 'value'} expected = {'name': 'dataset'} path = self.instance.path(name) self.proxy.get.return_value = expected result = self.instance.get(name, payload) self.proxy.get.assert_called_with(path, payload) self.assertEqual(expected, result) def test_set(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) self.proxy.put.return_value = expected result = self.instance.set(name, payload) self.proxy.put.assert_called_with(path, payload) self.assertEqual(expected, result) def test_list(self): payload = {'key': 'value'} expected = [{'name': 'dataset'}] self.proxy.get.return_value = expected result = self.instance.list(payload) self.proxy.get.assert_called_with(self.instance.root, payload) self.assertEqual(expected, result) def test_create(self): payload = {'key': 'value'} expected = None self.proxy.post.return_value = expected result = self.instance.create(payload) self.proxy.post.assert_called_with(self.instance.root, payload) self.assertEqual(expected, result) def test_create_exist(self): payload = {'key': 'value'} expected = None self.proxy.post.side_effect = jsonrpc.NefException(code='EEXIST') result = self.instance.create(payload) self.proxy.post.assert_called_with(self.instance.root, payload) self.assertEqual(expected, result) def test_create_error(self): payload = {'key': 'value'} self.proxy.post.side_effect = jsonrpc.NefException(code='EBUSY') self.assertRaises(jsonrpc.NefException, self.instance.create, payload) self.proxy.post.assert_called_with(self.instance.root, payload) def test_delete(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) self.proxy.delete.return_value = expected result = self.instance.delete(name, payload) self.proxy.delete.assert_called_with(path, payload) self.assertEqual(expected, result) def test_delete_not_found(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) self.proxy.delete.side_effect = jsonrpc.NefException(code='ENOENT') result = self.instance.delete(name, payload) self.proxy.delete.assert_called_with(path, payload) self.assertEqual(expected, result) def test_delete_error(self): name = 'parent/child' payload = {'key': 'value'} path = self.instance.path(name) self.proxy.delete.side_effect = jsonrpc.NefException(code='EINVAL') self.assertRaises(jsonrpc.NefException, self.instance.delete, name, payload) self.proxy.delete.assert_called_with(path, payload) class TestNefSettings(test.TestCase): def setUp(self): super(TestNefSettings, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefSettings(self.proxy) def test_create(self): payload = {'key': 'value'} result = self.instance.create(payload) expected = NotImplemented self.assertEqual(expected, result) def test_delete(self): name = 'parent/child' payload = {'key': 'value'} result = self.instance.delete(name, payload) expected = NotImplemented self.assertEqual(expected, result) class TestNefDatasets(test.TestCase): def setUp(self): super(TestNefDatasets, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefDatasets(self.proxy) def test_rename(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'rename') self.proxy.post.return_value = expected result = self.instance.rename(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertEqual(expected, result) class TestNefSnapshots(test.TestCase): def setUp(self): super(TestNefSnapshots, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefSnapshots(self.proxy) def test_clone(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'clone') self.proxy.post.return_value = expected result = self.instance.clone(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertEqual(expected, result) class TestNefVolumeGroups(test.TestCase): def setUp(self): super(TestNefVolumeGroups, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefVolumeGroups(self.proxy) def test_rollback(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'rollback') self.proxy.post.return_value = expected result = self.instance.rollback(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertEqual(expected, result) class TestNefVolumes(test.TestCase): def setUp(self): super(TestNefVolumes, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefVolumes(self.proxy) def test_promote(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'promote') self.proxy.post.return_value = expected result = self.instance.promote(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertEqual(expected, result) class TestNefFilesystems(test.TestCase): def setUp(self): super(TestNefFilesystems, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefFilesystems(self.proxy) def test_mount(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'mount') self.proxy.post.return_value = expected result = self.instance.mount(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertEqual(expected, result) def test_unmount(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'unmount') self.proxy.post.return_value = expected result = self.instance.unmount(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertEqual(expected, result) def test_acl(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = self.instance.path(name) path = posixpath.join(path, 'acl') self.proxy.post.return_value = expected result = self.instance.acl(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertEqual(expected, result) class TestNefHpr(test.TestCase): def setUp(self): super(TestNefHpr, self).setUp() self.proxy = mock.Mock() self.instance = jsonrpc.NefHpr(self.proxy) def test_activate(self): payload = {'key': 'value'} expected = None path = posixpath.join(self.instance.root, 'activate') self.proxy.post.return_value = expected result = self.instance.activate(payload) self.proxy.post.assert_called_with(path, payload) self.assertEqual(expected, result) def test_start(self): name = 'parent/child' payload = {'key': 'value'} expected = None path = posixpath.join(self.instance.path(name), 'start') self.proxy.post.return_value = expected result = self.instance.start(name, payload) self.proxy.post.assert_called_with(path, payload) self.assertEqual(expected, result) class TestNefProxy(test.TestCase): def setUp(self): super(TestNefProxy, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.nexenta_use_https = True self.cfg.driver_ssl_cert_verify = True self.cfg.nexenta_user = 'user' self.cfg.nexenta_password = 'pass' self.cfg.nexenta_rest_address = '1.1.1.1,2.2.2.2' self.cfg.nexenta_rest_port = 8443 self.cfg.nexenta_rest_backoff_factor = 1 self.cfg.nexenta_rest_retry_count = 3 self.cfg.nexenta_rest_connect_timeout = 1 self.cfg.nexenta_rest_read_timeout = 1 self.cfg.nas_host = '3.3.3.3' self.cfg.nas_share_path = 'pool/path/to/share' self.nef_mock = mock.Mock() self.mock_object(jsonrpc, 'NefRequest', return_value=self.nef_mock) self.proto = 'nfs' self.proxy = jsonrpc.NefProxy(self.proto, self.cfg.nas_share_path, self.cfg) def test___init___http(self): proto = 'nfs' cfg = copy.copy(self.cfg) cfg.nexenta_use_https = False result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___no_rest_port_http(self): proto = 'nfs' cfg = copy.copy(self.cfg) cfg.nexenta_rest_port = 0 cfg.nexenta_use_https = False result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___no_rest_port_https(self): proto = 'nfs' cfg = copy.copy(self.cfg) cfg.nexenta_rest_port = 0 cfg.nexenta_use_https = True result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___iscsi(self): proto = 'iscsi' cfg = copy.copy(self.cfg) result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___nfs_no_rest_address(self): proto = 'nfs' cfg = copy.copy(self.cfg) cfg.nexenta_rest_address = '' result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___iscsi_no_rest_address(self): proto = 'iscsi' cfg = copy.copy(self.cfg) cfg.nexenta_rest_address = '' cfg.nexenta_host = '4.4.4.4' result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) self.assertIsInstance(result, jsonrpc.NefProxy) def test___init___invalid_storage_protocol(self): proto = 'invalid' cfg = copy.copy(self.cfg) self.assertRaises(jsonrpc.NefException, jsonrpc.NefProxy, proto, cfg.nas_share_path, cfg) @mock.patch('requests.packages.urllib3.disable_warnings') def test___init___no_ssl_cert_verify(self, disable_warnings): proto = 'nfs' cfg = copy.copy(self.cfg) cfg.driver_ssl_cert_verify = False disable_warnings.return_value = None result = jsonrpc.NefProxy(proto, cfg.nas_share_path, cfg) disable_warnings.assert_called() self.assertIsInstance(result, jsonrpc.NefProxy) def test_delete_bearer(self): self.assertIsNone(self.proxy.delete_bearer()) self.assertNotIn('Authorization', self.proxy.session.headers) self.proxy.session.headers['Authorization'] = 'Bearer token' self.assertIsNone(self.proxy.delete_bearer()) self.assertNotIn('Authorization', self.proxy.session.headers) def test_update_bearer(self): token = 'token' bearer = 'Bearer %s' % token self.assertNotIn('Authorization', self.proxy.session.headers) self.assertIsNone(self.proxy.update_bearer(token)) self.assertIn('Authorization', self.proxy.session.headers) self.assertEqual(self.proxy.session.headers['Authorization'], bearer) def test_update_token(self): token = 'token' bearer = 'Bearer %s' % token self.assertIsNone(self.proxy.update_token(token)) self.assertEqual(self.proxy.tokens[self.proxy.host], token) self.assertEqual(self.proxy.session.headers['Authorization'], bearer) def test_update_host(self): token = 'token' bearer = 'Bearer %s' % token host = self.cfg.nexenta_rest_address self.proxy.tokens[host] = token self.assertIsNone(self.proxy.update_host(host)) self.assertEqual(self.proxy.session.headers['Authorization'], bearer) def test_skip_update_host(self): host = 'nonexistent' self.assertIsNone(self.proxy.update_host(host)) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSettings.get') def test_update_lock(self, get_settings): guid = uuid.uuid4().hex settings = {'value': guid} get_settings.return_value = settings self.assertIsNone(self.proxy.update_lock()) path = ('%s:%s' % (guid, self.proxy.path)).encode('utf-8') expected = hashlib.md5(path, usedforsecurity=False).hexdigest() self.assertEqual(expected, self.proxy.lock) def test_url(self): path = '/path/to/api' result = self.proxy.url(path) expected = '%s://%s:%s%s' % (self.proxy.scheme, self.proxy.host, self.proxy.port, path) self.assertEqual(expected, result) @mock.patch('eventlet.greenthread.sleep') def test_delay(self, sleep): sleep.return_value = None for attempt in range(0, 10): expected = int(self.proxy.backoff_factor * (2 ** (attempt - 1))) self.assertIsNone(self.proxy.delay(attempt)) sleep.assert_called_with(expected) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py0000664000175000017500000014404200000000000027116 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for OpenStack Cinder volume driver.""" import hashlib import os from unittest import mock from oslo_utils import units from cinder import context from cinder import db from cinder.tests.unit.consistencygroup.fake_cgsnapshot import ( fake_cgsnapshot_obj as fake_cgsnapshot) from cinder.tests.unit.consistencygroup.fake_consistencygroup import ( fake_consistencyobject_obj as fake_cgroup) from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.fake_snapshot import fake_snapshot_obj as fake_snapshot from cinder.tests.unit.fake_volume import fake_volume_obj as fake_volume from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.nexenta.ns5 import jsonrpc from cinder.volume.drivers.nexenta.ns5 import nfs class TestNexentaNfsDriver(test.TestCase): def setUp(self): super(TestNexentaNfsDriver, self).setUp() self.ctxt = context.get_admin_context() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.volume_backend_name = 'nexenta_nfs' self.cfg.nexenta_group_snapshot_template = 'group-snapshot-%s' self.cfg.nexenta_origin_snapshot_template = 'origin-snapshot-%s' self.cfg.nexenta_dataset_description = '' self.cfg.nexenta_mount_point_base = '$state_path/mnt' self.cfg.nexenta_sparsed_volumes = True self.cfg.nexenta_qcow2_volumes = False self.cfg.nexenta_dataset_compression = 'on' self.cfg.nexenta_dataset_dedup = 'off' self.cfg.nfs_mount_point_base = '/mnt/test' self.cfg.nfs_mount_attempts = 3 self.cfg.nas_mount_options = 'vers=4' self.cfg.reserved_percentage = 20 self.cfg.nexenta_use_https = False self.cfg.driver_ssl_cert_verify = False self.cfg.nexenta_user = 'user' self.cfg.nexenta_password = 'pass' self.cfg.max_over_subscription_ratio = 20.0 self.cfg.nas_host = '1.1.1.2' self.cfg.nexenta_rest_address = '1.1.1.1' self.cfg.nexenta_rest_port = 8443 self.cfg.nexenta_rest_backoff_factor = 1 self.cfg.nexenta_rest_retry_count = 3 self.cfg.nexenta_rest_connect_timeout = 1 self.cfg.nexenta_rest_read_timeout = 1 self.cfg.nas_share_path = 'pool/share' self.cfg.nfs_mount_options = '-o vers=4' self.cfg.safe_get = self.fake_safe_get self.nef_mock = mock.Mock() self.mock_object(jsonrpc, 'NefRequest', return_value=self.nef_mock) self.drv = nfs.NexentaNfsDriver(configuration=self.cfg) self.drv.db = db self.drv.do_setup(self.ctxt) def fake_safe_get(self, key): try: value = getattr(self.cfg, key) except AttributeError: value = None return value def test_do_setup(self): self.assertIsNone(self.drv.do_setup(self.ctxt)) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefNfs.get') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefServices.get') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.set') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.get') def test_check_for_setup_error(self, get_filesystem, set_filesystem, get_service, get_nfs): get_filesystem.return_value = { 'mountPoint': '/path/to/volume', 'nonBlockingMandatoryMode': False, 'smartCompression': False, 'isMounted': True } get_service.return_value = { 'state': 'online' } get_nfs.return_value = { 'shareState': 'online' } self.assertIsNone(self.drv.check_for_setup_error()) get_filesystem.assert_called_with(self.drv.root_path) set_filesystem.assert_not_called() get_service.assert_called_with('nfs') get_nfs.assert_called_with(self.drv.root_path) get_filesystem.return_value = { 'mountPoint': '/path/to/volume', 'nonBlockingMandatoryMode': True, 'smartCompression': True, 'isMounted': True } set_filesystem.return_value = {} payload = { 'nonBlockingMandatoryMode': False, 'smartCompression': False } self.assertIsNone(self.drv.check_for_setup_error()) get_filesystem.assert_called_with(self.drv.root_path) set_filesystem.assert_called_with(self.drv.root_path, payload) get_service.assert_called_with('nfs') get_nfs.assert_called_with(self.drv.root_path) get_filesystem.return_value = { 'mountPoint': '/path/to/volume', 'nonBlockingMandatoryMode': False, 'smartCompression': True, 'isMounted': True } payload = { 'smartCompression': False } set_filesystem.return_value = {} self.assertIsNone(self.drv.check_for_setup_error()) get_filesystem.assert_called_with(self.drv.root_path) set_filesystem.assert_called_with(self.drv.root_path, payload) get_service.assert_called_with('nfs') get_nfs.assert_called_with(self.drv.root_path) get_filesystem.return_value = { 'mountPoint': '/path/to/volume', 'nonBlockingMandatoryMode': True, 'smartCompression': False, 'isMounted': True } payload = { 'nonBlockingMandatoryMode': False } set_filesystem.return_value = {} self.assertIsNone(self.drv.check_for_setup_error()) get_filesystem.assert_called_with(self.drv.root_path) set_filesystem.assert_called_with(self.drv.root_path, payload) get_service.assert_called_with('nfs') get_nfs.assert_called_with(self.drv.root_path) get_filesystem.return_value = { 'mountPoint': 'none', 'nonBlockingMandatoryMode': False, 'smartCompression': False, 'isMounted': False } self.assertRaises(jsonrpc.NefException, self.drv.check_for_setup_error) get_filesystem.return_value = { 'mountPoint': '/path/to/volume', 'nonBlockingMandatoryMode': False, 'smartCompression': False, 'isMounted': False } self.assertRaises(jsonrpc.NefException, self.drv.check_for_setup_error) get_service.return_value = { 'state': 'online' } self.assertRaises(jsonrpc.NefException, self.drv.check_for_setup_error) get_nfs.return_value = { 'shareState': 'offline' } self.assertRaises(jsonrpc.NefException, self.drv.check_for_setup_error) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._unmount_volume') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.delete') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.set') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._create_regular_file') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._create_sparsed_file') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.local_path') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._mount_volume') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._set_volume_acl') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.create') def test_create_volume(self, create_volume, set_volume_acl, mount_volume, get_volume_local_path, create_sparsed_file, created_regular_file, set_volume, delete_volume, umount_volume): volume = fake_volume(self.ctxt) local_path = '/local/volume/path' create_volume.return_value = {} set_volume_acl.return_value = {} mount_volume.return_value = True get_volume_local_path.return_value = local_path create_sparsed_file.return_value = True created_regular_file.return_value = True set_volume.return_value = {} delete_volume.return_value = {} umount_volume.return_value = {} with mock.patch.object(self.drv, 'sparsed_volumes', True): self.assertIsNone(self.drv.create_volume(volume)) create_sparsed_file.assert_called_with(local_path, volume['size']) with mock.patch.object(self.drv, 'sparsed_volumes', False): self.assertIsNone(self.drv.create_volume(volume)) created_regular_file.assert_called_with(local_path, volume['size']) volume_path = self.drv._get_volume_path(volume) payload = { 'path': volume_path, 'compressionMode': 'off' } create_volume.assert_called_with(payload) set_volume_acl.assert_called_with(volume) payload = {'compressionMode': self.cfg.nexenta_dataset_compression} set_volume.assert_called_with(volume_path, payload) umount_volume.assert_called_with(volume) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._unmount_volume') @mock.patch('cinder.volume.drivers.remotefs.' 'RemoteFSDriver.copy_image_to_volume') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._mount_volume') def test_copy_image_to_volume(self, mount_volume, copy_image_to_volume, unmount_volume): volume = fake_volume(self.ctxt) image_service = fake_image.FakeImageService() image = image_service.images[fake.IMAGE_ID] mount_volume.return_value = True copy_image_to_volume.return_value = True unmount_volume.return_value = True self.drv.copy_image_to_volume(self.ctxt, volume, image_service, image['id']) mount_volume.assert_called_with(volume) copy_image_to_volume.assert_called_with(self.ctxt, volume, image_service, image['id']) unmount_volume.assert_called_with(volume) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._unmount_volume') @mock.patch('cinder.volume.drivers.remotefs.' 'RemoteFSSnapDriverDistributed.copy_volume_to_image') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._mount_volume') def test_copy_volume_to_image(self, mount_volume, copy_volume_to_image, unmount_volume): volume = fake_volume(self.ctxt) image_service = fake_image.FakeImageService() image = image_service.images[fake.IMAGE_ID] mount_volume.return_value = True copy_volume_to_image.return_value = True unmount_volume.return_value = True self.drv.copy_volume_to_image(self.ctxt, volume, image_service, image) mount_volume.assert_called_with(volume) copy_volume_to_image.assert_called_with(self.ctxt, volume, image_service, image) unmount_volume.assert_called_with(volume) @mock.patch('os.rmdir') @mock.patch('cinder.privsep.fs.umount') @mock.patch('os_brick.remotefs.remotefs.' 'RemoteFsClient._read_mounts') @mock.patch('cinder.volume.drivers.nfs.' 'NfsDriver._get_mount_point_for_share') def test__ensure_share_unmounted(self, get_mount_point, list_mount_points, unmount_filesystem, remove_mount_point): mount_point = '/mount/point1' get_mount_point.return_value = mount_point list_mount_points.return_value = [ mount_point, '/mount/point2', '/mount/point3' ] unmount_filesystem.return_value = True remove_mount_point.return_value = True share = '1.1.1.1:/path/to/volume' self.assertIsNone(self.drv._ensure_share_unmounted(share)) get_mount_point.assert_called_with(share) unmount_filesystem.assert_called_with(mount_point) remove_mount_point.assert_called_with(mount_point) @mock.patch('cinder.volume.drivers.nfs.' 'NfsDriver._ensure_share_mounted') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.get') def test__mount_volume(self, get_filesystem, mount_share): volume = fake_volume(self.ctxt) mount_point = '/path/to/volume' get_filesystem.return_value = { 'mountPoint': mount_point, 'isMounted': True } mount_share.return_value = True self.assertIsNone(self.drv._mount_volume(volume)) path = self.drv._get_volume_path(volume) payload = {'fields': 'mountPoint,isMounted'} get_filesystem.assert_called_with(path, payload) share = '%s:%s' % (self.drv.nas_host, mount_point) mount_share.assert_called_with(share) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._ensure_share_unmounted') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._get_volume_share') def test__unmount_volume(self, get_share, unmount_share): volume = fake_volume(self.ctxt) mount_point = '/path/to/volume' share = '%s:%s' % (self.drv.nas_host, mount_point) get_share.return_value = share unmount_share.return_value = True self.assertIsNone(self.drv._unmount_volume(volume)) get_share.assert_called_with(volume) unmount_share.assert_called_with(share) @mock.patch('cinder.volume.drivers.remotefs.' 'RemoteFSDriver._create_qcow2_file') @mock.patch('cinder.volume.drivers.remotefs.' 'RemoteFSDriver._create_sparsed_file') def test__create_sparsed_file(self, create_sparsed_file, create_qcow2_file): create_sparsed_file.return_value = True create_qcow2_file.return_value = True path = '/path/to/file' size = 1 with mock.patch.object(self.cfg, 'nexenta_qcow2_volumes', True): self.assertIsNone(self.drv._create_sparsed_file(path, size)) create_qcow2_file.assert_called_with(path, size) with mock.patch.object(self.cfg, 'nexenta_qcow2_volumes', False): self.assertIsNone(self.drv._create_sparsed_file(path, size)) create_sparsed_file.assert_called_with(path, size) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.delete_volume') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefHpr.delete') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefHpr.get') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefHpr.start') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefHpr.create') def test_migrate_volume(self, create_service, start_service, get_service, delete_service, delete_volume): create_service.return_value = {} start_service.return_value = {} get_service.return_value = { 'state': 'disabled' } delete_service.return_value = {} delete_volume.return_value = {} volume = fake_volume(self.ctxt) dst_host = '4.4.4.4' dst_port = 8443 dst_path = 'tank/nfs' location_info = 'NexentaNfsDriver:%s:/%s' % (dst_host, dst_path) host = { 'host': 'stack@nexenta_nfs#fake_nfs', 'capabilities': { 'vendor_name': 'Nexenta', 'nef_url': dst_host, 'nef_port': dst_port, 'storage_protocol': 'NFS', 'free_capacity_gb': 32, 'location_info': location_info } } result = self.drv.migrate_volume(self.ctxt, volume, host) expected = (True, None) svc = 'cinder-migrate-%s' % volume['name'] src = self.drv._get_volume_path(volume) dst = '%s/%s' % (dst_path, volume['name']) payload = { 'name': svc, 'sourceDataset': src, 'destinationDataset': dst, 'type': 'scheduled', 'sendShareNfs': True, 'isSource': True, 'remoteNode': { 'host': dst_host, 'port': dst_port } } create_service.assert_called_with(payload) start_service.assert_called_with(svc) get_service.assert_called_with(svc) payload = { 'destroySourceSnapshots': True, 'destroyDestinationSnapshots': True } delete_service.assert_called_with(svc, payload) delete_volume.assert_called_with(volume) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._unmount_volume') def test_terminate_connection(self, unmount_volume): unmount_volume.return_value = True volume = fake_volume(self.ctxt) connector = { 'initiator': 'iqn:cinder-client', 'multipath': True } self.assertIsNone(self.drv.terminate_connection(volume, connector)) unmount_volume.assert_called_with(volume) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._get_volume_share') def test_initialize_connection(self, get_share): volume = fake_volume(self.ctxt) path = self.drv._get_volume_path(volume) share = '%s:/%s' % (self.drv.nas_host, path) get_share.return_value = share connector = { 'initiator': 'iqn:cinder-client', 'multipath': True } result = self.drv.initialize_connection(volume, connector) get_share.assert_called_with(volume) base = self.cfg.nexenta_mount_point_base expected = { 'driver_volume_type': 'nfs', 'mount_point_base': base, 'data': { 'export': share, 'name': 'volume' } } self.assertEqual(expected, result) def test_ensure_export(self): volume = fake_volume(self.ctxt) self.assertIsNone(self.drv.ensure_export(self.ctxt, volume)) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.delete') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._unmount_volume') def test_delete_volume(self, unmount_volume, delete_filesystem): volume = fake_volume(self.ctxt) path = self.drv._get_volume_path(volume) unmount_volume.return_value = {} delete_filesystem.return_value = {} self.assertIsNone(self.drv.delete_volume(volume)) unmount_volume.assert_called_with(volume) payload = {'force': True, 'snapshots': True} delete_filesystem.assert_called_with(path, payload) @mock.patch('os.rmdir') def test__delete(self, rmdir): rmdir.return_value = True path = '/path/to/volume/mountpoint' self.assertIsNone(self.drv._delete(path)) rmdir.assert_called_with(path) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._unmount_volume') @mock.patch('oslo_concurrency.processutils.execute') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.local_path') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._mount_volume') def test_extend_volume(self, mount_volume, get_volume_local_path, execute_command, unmount_volume): volume = fake_volume(self.ctxt) root_helper = 'sudo cinder-rootwrap /etc/cinder/rootwrap.conf' local_path = '/path/to/volume/file' new_size = volume['size'] * 2 bs = 1 * units.Mi seek = volume['size'] * units.Ki count = (new_size - volume['size']) * units.Ki mount_volume.return_value = True get_volume_local_path.return_value = local_path execute_command.return_value = True unmount_volume.return_value = True with mock.patch.object(self.drv, 'sparsed_volumes', False): self.assertIsNone(self.drv.extend_volume(volume, new_size)) execute_command.assert_called_with('dd', 'if=/dev/zero', 'of=%s' % local_path, 'bs=%d' % bs, 'seek=%d' % seek, 'count=%d' % count, run_as_root=True, root_helper=root_helper) with mock.patch.object(self.drv, 'sparsed_volumes', True): self.assertIsNone(self.drv.extend_volume(volume, new_size)) execute_command.assert_called_with('truncate', '-s', '%dG' % new_size, local_path, run_as_root=True, root_helper=root_helper) mount_volume.assert_called_with(volume) unmount_volume.assert_called_with(volume) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.create') def test_create_snapshot(self, create_snapshot): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume create_snapshot.return_value = {} self.assertIsNone(self.drv.create_snapshot(snapshot)) path = self.drv._get_snapshot_path(snapshot) payload = {'path': path} create_snapshot.assert_called_with(payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.delete') def test_delete_snapshot(self, delete_snapshot): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume delete_snapshot.return_value = {} self.assertIsNone(self.drv.delete_snapshot(snapshot)) path = self.drv._get_snapshot_path(snapshot) payload = {'defer': True} delete_snapshot.assert_called_with(path, payload) def test_snapshot_revert_use_temp_snapshot(self): result = self.drv.snapshot_revert_use_temp_snapshot() expected = False self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.rollback') def test_revert_to_snapshot(self, rollback_volume): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume rollback_volume.return_value = {} self.assertIsNone( self.drv.revert_to_snapshot(self.ctxt, volume, snapshot) ) path = self.drv._get_volume_path(volume) payload = {'snapshot': snapshot['name']} rollback_volume.assert_called_with(path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.extend_volume') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.mount') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.unmount') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.clone') def test_create_volume_from_snapshot(self, clone_snapshot, unmount_filesystem, mount_filesystem, extend_volume): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume clone_size = 10 clone_spec = { 'id': fake.VOLUME2_ID, 'size': clone_size } clone = fake_volume(self.ctxt, **clone_spec) snapshot_path = self.drv._get_snapshot_path(snapshot) clone_path = self.drv._get_volume_path(clone) clone_snapshot.return_value = {} unmount_filesystem.return_value = {} mount_filesystem.return_value = {} extend_volume.return_value = None self.assertIsNone( self.drv.create_volume_from_snapshot(clone, snapshot) ) clone_payload = {'targetPath': clone_path} clone_snapshot.assert_called_with(snapshot_path, clone_payload) unmount_filesystem.assert_called_with(clone_path) mount_filesystem.assert_called_with(clone_path) extend_volume.assert_called_with(clone, clone_size) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.delete_snapshot') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.create_volume_from_snapshot') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.create_snapshot') def test_create_cloned_volume(self, create_snapshot, create_volume, delete_snapshot): volume = fake_volume(self.ctxt) clone_spec = {'id': fake.VOLUME2_ID} clone = fake_volume(self.ctxt, **clone_spec) create_snapshot.return_value = {} create_volume.return_value = {} delete_snapshot.return_value = {} self.assertIsNone(self.drv.create_cloned_volume(clone, volume)) snapshot = { 'name': self.drv.origin_snapshot_template % clone['id'], 'volume_id': volume['id'], 'volume_name': volume['name'], 'volume_size': volume['size'] } create_snapshot.assert_called_with(snapshot) create_volume.assert_called_with(clone, snapshot) create_volume.side_effect = jsonrpc.NefException({ 'message': 'Failed to create volume', 'code': 'EBUSY' }) self.assertRaises(jsonrpc.NefException, self.drv.create_cloned_volume, clone, volume) create_snapshot.side_effect = jsonrpc.NefException({ 'message': 'Failed to open dataset', 'code': 'ENOENT' }) self.assertRaises(jsonrpc.NefException, self.drv.create_cloned_volume, clone, volume) def test_create_consistencygroup(self): cgroup = fake_cgroup(self.ctxt) result = self.drv.create_consistencygroup(self.ctxt, cgroup) expected = {} self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.delete_volume') def test_delete_consistencygroup(self, delete_volume): cgroup = fake_cgroup(self.ctxt) volume1 = fake_volume(self.ctxt) volume2_spec = {'id': fake.VOLUME2_ID} volume2 = fake_volume(self.ctxt, **volume2_spec) volumes = [volume1, volume2] delete_volume.return_value = {} result = self.drv.delete_consistencygroup(self.ctxt, cgroup, volumes) expected = ({}, []) self.assertEqual(expected, result) def test_update_consistencygroup(self): cgroup = fake_cgroup(self.ctxt) volume1 = fake_volume(self.ctxt) volume2_spec = {'id': fake.VOLUME2_ID} volume2 = fake_volume(self.ctxt, **volume2_spec) volume3_spec = {'id': fake.VOLUME3_ID} volume3 = fake_volume(self.ctxt, **volume3_spec) volume4_spec = {'id': fake.VOLUME4_ID} volume4 = fake_volume(self.ctxt, **volume4_spec) add_volumes = [volume1, volume2] remove_volumes = [volume3, volume4] result = self.drv.update_consistencygroup(self.ctxt, cgroup, add_volumes, remove_volumes) expected = ({}, [], []) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.delete') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.rename') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.create') def test_create_cgsnapshot(self, create_snapshot, rename_snapshot, delete_snapshot): cgsnapshot = fake_cgsnapshot(self.ctxt) volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume snapshots = [snapshot] cgsnapshot_name = ( self.cfg.nexenta_group_snapshot_template % cgsnapshot['id']) cgsnapshot_path = '%s@%s' % (self.drv.root_path, cgsnapshot_name) snapshot_path = '%s/%s@%s' % (self.drv.root_path, snapshot['volume_name'], cgsnapshot_name) create_snapshot.return_value = {} rename_snapshot.return_value = {} delete_snapshot.return_value = {} result = self.drv.create_cgsnapshot(self.ctxt, cgsnapshot, snapshots) create_payload = {'path': cgsnapshot_path, 'recursive': True} create_snapshot.assert_called_with(create_payload) rename_payload = {'newName': snapshot['name']} rename_snapshot.assert_called_with(snapshot_path, rename_payload) delete_payload = {'defer': True, 'recursive': True} delete_snapshot.assert_called_with(cgsnapshot_path, delete_payload) expected = ({}, []) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.delete_snapshot') def test_delete_cgsnapshot(self, delete_snapshot): cgsnapshot = fake_cgsnapshot(self.ctxt) snapshot = fake_snapshot(self.ctxt) volume = fake_volume(self.ctxt) snapshot.volume = volume snapshots = [snapshot] delete_snapshot.return_value = {} result = self.drv.delete_cgsnapshot(self.ctxt, cgsnapshot, snapshots) delete_snapshot.assert_called_with(snapshot) expected = ({}, []) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.create_volume_from_snapshot') def test_create_consistencygroup_from_src_snapshots(self, create_volume): cgroup = fake_cgroup(self.ctxt) cgsnapshot = fake_cgsnapshot(self.ctxt) volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume snapshots = [snapshot] clone_spec = {'id': fake.VOLUME2_ID} clone = fake_volume(self.ctxt, **clone_spec) clones = [clone] create_volume.return_value = {} result = self.drv.create_consistencygroup_from_src(self.ctxt, cgroup, clones, cgsnapshot, snapshots, None, None) create_volume.assert_called_with(clone, snapshot) expected = ({}, []) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.delete') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.create_volume_from_snapshot') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.create') def test_create_consistencygroup_from_src_volumes(self, create_snapshot, create_volume, delete_snapshot): src_cgroup = fake_cgroup(self.ctxt) dst_cgroup_spec = {'id': fake.CONSISTENCY_GROUP2_ID} dst_cgroup = fake_cgroup(self.ctxt, **dst_cgroup_spec) src_volume = fake_volume(self.ctxt) src_volumes = [src_volume] dst_volume_spec = {'id': fake.VOLUME2_ID} dst_volume = fake_volume(self.ctxt, **dst_volume_spec) dst_volumes = [dst_volume] create_snapshot.return_value = {} create_volume.return_value = {} delete_snapshot.return_value = {} result = self.drv.create_consistencygroup_from_src(self.ctxt, dst_cgroup, dst_volumes, None, None, src_cgroup, src_volumes) snapshot_name = ( self.cfg.nexenta_origin_snapshot_template % dst_cgroup['id']) snapshot_path = '%s@%s' % (self.drv.root_path, snapshot_name) create_payload = {'path': snapshot_path, 'recursive': True} create_snapshot.assert_called_with(create_payload) snapshot = { 'name': snapshot_name, 'volume_id': src_volume['id'], 'volume_name': src_volume['name'], 'volume_size': src_volume['size'] } create_volume.assert_called_with(dst_volume, snapshot) delete_payload = {'defer': True, 'recursive': True} delete_snapshot.assert_called_with(snapshot_path, delete_payload) expected = ({}, []) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._get_volume_share') def test__local_volume_dir(self, get_share): volume = fake_volume(self.ctxt) share = '1.1.1.1:/path/to/share' get_share.return_value = share result = self.drv._local_volume_dir(volume) get_share.assert_called_with(volume) share = share.encode('utf-8') digest = hashlib.md5(share, usedforsecurity=False).hexdigest() expected = os.path.join(self.cfg.nexenta_mount_point_base, digest) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._local_volume_dir') def test_local_path(self, get_local): volume = fake_volume(self.ctxt) local_dir = '/path/to' get_local.return_value = local_dir result = self.drv.local_path(volume) get_local.assert_called_with(volume) expected = os.path.join(local_dir, 'volume') self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.acl') def test__set_volume_acl(self, set_acl): volume = fake_volume(self.ctxt) set_acl.return_value = {} path = self.drv._get_volume_path(volume) payload = { 'type': 'allow', 'principal': 'everyone@', 'permissions': ['full_set'], 'flags': ['file_inherit', 'dir_inherit'] } self.assertIsNone(self.drv._set_volume_acl(volume)) set_acl.assert_called_with(path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.get') def test__get_volume_share(self, get_filesystem): volume = fake_volume(self.ctxt) path = self.drv._get_volume_path(volume) mount_point = '/path/to' get_filesystem.return_value = {'mountPoint': mount_point} result = self.drv._get_volume_share(volume) payload = {'fields': 'mountPoint'} get_filesystem.assert_called_with(path, payload) expected = '%s:%s' % (self.drv.nas_host, mount_point) self.assertEqual(expected, result) def test__get_volume_path(self): volume = fake_volume(self.ctxt) result = self.drv._get_volume_path(volume) expected = '%s/%s' % (self.drv.root_path, volume['name']) self.assertEqual(expected, result) def test__get_snapshot_path(self): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume result = self.drv._get_snapshot_path(snapshot) expected = '%s/%s@%s' % (self.drv.root_path, snapshot['volume_name'], snapshot['name']) self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.get') def test_get_volume_stats(self, get_filesystem): available = 100 used = 75 get_filesystem.return_value = { 'mountPoint': '/path/to', 'bytesAvailable': available * units.Gi, 'bytesUsed': used * units.Gi } result = self.drv.get_volume_stats(True) payload = {'fields': 'mountPoint,bytesAvailable,bytesUsed'} get_filesystem.assert_called_with(self.drv.root_path, payload) self.assertEqual(self.drv._stats, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.get') def test_update_volume_stats(self, get_filesystem): available = 8 used = 2 share = '%s:/%s' % (self.drv.nas_host, self.drv.root_path) get_filesystem.return_value = { 'mountPoint': '/%s' % self.drv.root_path, 'bytesAvailable': available * units.Gi, 'bytesUsed': used * units.Gi } location_info = '%(driver)s:%(share)s' % { 'driver': self.drv.__class__.__name__, 'share': share } expected = { 'vendor_name': 'Nexenta', 'dedup': self.cfg.nexenta_dataset_dedup, 'compression': self.cfg.nexenta_dataset_compression, 'description': self.cfg.nexenta_dataset_description, 'nef_url': self.cfg.nexenta_rest_address, 'nef_port': self.cfg.nexenta_rest_port, 'driver_version': self.drv.VERSION, 'storage_protocol': 'NFS', 'sparsed_volumes': self.cfg.nexenta_sparsed_volumes, 'total_capacity_gb': used + available, 'free_capacity_gb': available, 'reserved_percentage': self.cfg.reserved_percentage, 'QoS_support': False, 'multiattach': True, 'consistencygroup_support': True, 'consistent_group_snapshot_enabled': True, 'volume_backend_name': self.cfg.volume_backend_name, 'location_info': location_info, 'nfs_mount_point_base': self.cfg.nexenta_mount_point_base } self.assertIsNone(self.drv._update_volume_stats()) self.assertEqual(expected, self.drv._stats) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.list') def test__get_existing_volume(self, list_filesystems): volume = fake_volume(self.ctxt) parent = self.drv.root_path name = volume['name'] path = self.drv._get_volume_path(volume) list_filesystems.return_value = [{ 'name': name, 'path': path }] result = self.drv._get_existing_volume({'source-name': name}) payload = { 'path': path, 'parent': parent, 'fields': 'path', 'recursive': False } list_filesystems.assert_called_with(payload) expected = { 'name': name, 'path': path } self.assertEqual(expected, result) def test__check_already_managed_snapshot(self): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume result = self.drv._check_already_managed_snapshot(snapshot) expected = False self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.list') def test__get_existing_snapshot(self, list_snapshots): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume name = snapshot['name'] path = self.drv._get_snapshot_path(snapshot) parent = self.drv._get_volume_path(volume) list_snapshots.return_value = [{ 'name': name, 'path': path }] payload = {'source-name': name} result = self.drv._get_existing_snapshot(snapshot, payload) payload = { 'parent': parent, 'fields': 'name,path', 'recursive': False, 'name': name } list_snapshots.assert_called_with(payload) expected = { 'name': name, 'path': path, 'volume_name': volume['name'], 'volume_size': volume['size'] } self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.rename') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._get_existing_volume') def test_manage_existing(self, get_existing_volume, rename_volume): existing_volume = fake_volume(self.ctxt) manage_volume_spec = {'id': fake.VOLUME2_ID} manage_volume = fake_volume(self.ctxt, **manage_volume_spec) existing_name = existing_volume['name'] existing_path = self.drv._get_volume_path(existing_volume) manage_path = self.drv._get_volume_path(manage_volume) get_existing_volume.return_value = { 'name': existing_name, 'path': existing_path } rename_volume.return_value = {} payload = {'source-name': existing_name} self.assertIsNone(self.drv.manage_existing(manage_volume, payload)) get_existing_volume.assert_called_with(payload) payload = {'newPath': manage_path} rename_volume.assert_called_with(existing_path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._unmount_volume') @mock.patch('os.path.getsize') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver.local_path') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._mount_volume') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._set_volume_acl') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._get_existing_volume') def test_manage_existing_get_size(self, get_volume, set_acl, mount_volume, get_local, get_size, unmount_volume): volume = fake_volume(self.ctxt) name = volume['name'] size = volume['size'] path = self.drv._get_volume_path(volume) get_volume.return_value = { 'name': name, 'path': path } set_acl.return_value = {} mount_volume.return_value = True get_local.return_value = '/path/to/volume/file' get_size.return_value = size * units.Gi unmount_volume.return_value = True payload = {'source-name': name} result = self.drv.manage_existing_get_size(volume, payload) expected = size self.assertEqual(expected, result) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefFilesystems.list') def test_get_manageable_volumes(self, list_filesystems): volume = fake_volume(self.ctxt) volumes = [volume] size = volume['size'] path = self.drv._get_volume_path(volume) guid = 12345 parent = self.drv.root_path list_filesystems.return_value = [{ 'guid': guid, 'parent': parent, 'path': path, 'bytesUsed': size * units.Gi }] result = self.drv.get_manageable_volumes(volumes, None, 1, 0, 'size', 'asc') payload = { 'parent': parent, 'fields': 'guid,parent,path,bytesUsed', 'recursive': False } list_filesystems.assert_called_with(payload) expected = [{ 'cinder_id': volume['id'], 'extra_info': None, 'reason_not_safe': 'Volume already managed', 'reference': { 'source-guid': guid, 'source-name': volume['name'] }, 'safe_to_manage': False, 'size': volume['size'] }] self.assertEqual(expected, result) def test_unmanage(self): volume = fake_volume(self.ctxt) self.assertIsNone(self.drv.unmanage(volume)) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.rename') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._get_existing_snapshot') def test_manage_existing_snapshot(self, get_existing_snapshot, rename_snapshot): volume = fake_volume(self.ctxt) existing_snapshot = fake_snapshot(self.ctxt) existing_snapshot.volume = volume manage_snapshot_spec = {'id': fake.SNAPSHOT2_ID} manage_snapshot = fake_snapshot(self.ctxt, **manage_snapshot_spec) manage_snapshot.volume = volume existing_name = existing_snapshot['name'] manage_name = manage_snapshot['name'] volume_name = volume['name'] volume_size = volume['size'] existing_path = self.drv._get_snapshot_path(existing_snapshot) get_existing_snapshot.return_value = { 'name': existing_name, 'path': existing_path, 'volume_name': volume_name, 'volume_size': volume_size } rename_snapshot.return_value = {} payload = {'source-name': existing_name} self.assertIsNone( self.drv.manage_existing_snapshot(manage_snapshot, payload) ) get_existing_snapshot.assert_called_with(manage_snapshot, payload) payload = {'newName': manage_name} rename_snapshot.assert_called_with(existing_path, payload) @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'nfs.NexentaNfsDriver._get_existing_snapshot') def test_manage_existing_snapshot_get_size(self, get_snapshot): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume snapshot_name = snapshot['name'] volume_name = volume['name'] volume_size = volume['size'] snapshot_path = self.drv._get_snapshot_path(snapshot) get_snapshot.return_value = { 'name': snapshot_name, 'path': snapshot_path, 'volume_name': volume_name, 'volume_size': volume_size } payload = {'source-name': snapshot_name} result = self.drv.manage_existing_snapshot_get_size(volume, payload) expected = volume['size'] self.assertEqual(expected, result) @mock.patch('cinder.objects.VolumeList.get_all_by_host') @mock.patch('cinder.volume.drivers.nexenta.ns5.' 'jsonrpc.NefSnapshots.list') def test_get_manageable_snapshots(self, list_snapshots, list_volumes): volume = fake_volume(self.ctxt) volumes = [volume] snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume snapshots = [snapshot] guid = 12345 name = snapshot['name'] path = self.drv._get_snapshot_path(snapshot) parent = self.drv._get_volume_path(volume) list_snapshots.return_value = [{ 'name': name, 'path': path, 'guid': guid, 'parent': parent, 'hprService': '', 'snaplistId': '' }] list_volumes.return_value = volumes result = self.drv.get_manageable_snapshots(snapshots, None, 1, 0, 'size', 'asc') payload = { 'parent': self.drv.root_path, 'fields': 'name,guid,path,parent,hprService,snaplistId', 'recursive': True } list_snapshots.assert_called_with(payload) expected = [{ 'cinder_id': snapshot['id'], 'extra_info': None, 'reason_not_safe': 'Snapshot already managed', 'source_reference': { 'name': volume['name'] }, 'reference': { 'source-guid': guid, 'source-name': snapshot['name'] }, 'safe_to_manage': False, 'size': volume['size'] }] self.assertEqual(expected, result) def test_unmanage_snapshot(self): volume = fake_volume(self.ctxt) snapshot = fake_snapshot(self.ctxt) snapshot.volume = volume self.assertIsNone(self.drv.unmanage_snapshot(snapshot)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3031204 cinder-27.0.0/cinder/tests/unit/volume/drivers/open_e/0000775000175000017500000000000000000000000022706 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/open_e/__init__.py0000664000175000017500000000000000000000000025005 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/open_e/test_common.py0000664000175000017500000002120300000000000025605 0ustar00zuulzuul00000000000000# Copyright (c) 2023 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from cinder import exception from cinder.tests.unit import test from cinder.volume.drivers.open_e.jovian_common import jdss_common as jcom UUID_1 = '12345678-1234-1234-1234-000000000001' UUID_2 = '12345678-1234-1234-1234-000000000002' UUID_S1 = '12345678-1234-1234-1234-100000000001' UUID_S2 = '12345678-1234-1234-1234-100000000002' V_UUID_1 = 'v_12345678-1234-1234-1234-000000000001' V_UUID_2 = 'v_12345678-1234-1234-1234-000000000002' V_UUID_3 = 'v_12345678-1234-1234-1234-000000000003' S_UUID_1 = f's_{UUID_S1}_{UUID_1}' S_UUID_2 = f's_{UUID_S2}_{UUID_1}' T_UUID_1 = 't_12345678-1234-1234-1234-000000000001' VOLUME_GET_THAT_IS_CLONE = { "origin": f"Pool-0/{V_UUID_1}@{S_UUID_1}", "relatime": None, "acltype": None, "vscan": None, "full_name": f"Pool-0/{jcom.vname(UUID_2)}", "userrefs": None, "primarycache": "all", "logbias": "latency", "creation": "1695078560", "sync": "always", "is_clone": True, "dedup": "off", "sharenfs": None, "receive_resume_token": None, "volsize": "1073741824", "referenced": "57344", "sharesmb": None, "createtxg": "19812058", "reservation": "0", "scontext": None, "mountpoint": None, "casesensitivity": None, "guid": "4947994863040470005", "usedbyrefreservation": "0", "dnodesize": None, "written": "0", "logicalused": "0", "compressratio": "1.00", "rootcontext": "none", "default_scsi_id": "5c02d042ed8dbce2", "type": "volume", "compression": "lz4", "snapdir": None, "overlay": None, "encryption": "off", "xattr": None, "volmode": "default", "copies": "1", "snapshot_limit": "18446744073709551615", "aclinherit": None, "defcontext": "none", "readonly": "off", "version": None, "recordsize": None, "filesystem_limit": None, "mounted": None, "mlslabel": "none", "secondarycache": "all", "refreservation": "0", "available": "954751713280", "san:volume_id": "5c02d042ed8dbce2570c8d5dc276dd6a2431e138", "encryptionroot": None, "exec": None, "refquota": None, "refcompressratio": "1.00", "quota": None, "utf8only": None, "keylocation": "none", "snapdev": "hidden", "snapshot_count": "18446744073709551615", "fscontext": "none", "clones": None, "canmount": None, "keystatus": None, "atime": None, "usedbysnapshots": "0", "normalization": None, "usedbychildren": "0", "volblocksize": "65536", "usedbydataset": "0", "objsetid": "19228", "name": "a2", "defer_destroy": None, "pbkdf2iters": "0", "checksum": "on", "redundant_metadata": "all", "filesystem_count": None, "devices": None, "keyformat": "none", "setuid": None, "used": "0", "logicalreferenced": "28672", "context": "none", "zoned": None, "nbmand": None, } SNAPSHOT_GET = { 'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 16:49:33', 'volsize': '1073741824', 'createtxg': '18402390', 'guid': '15554334551928551694', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '106843', 'name': S_UUID_1, 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'} SNAPSHOT_MULTIPLE_CLONES = { 'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 18:44:49', 'volsize': '1073741824', 'createtxg': '18403768', 'guid': '18319280142829358721', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '107416', 'clones': f'Pool-0/{V_UUID_2},Pool-0/{V_UUID_3}', 'name': S_UUID_1, 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'} SNAPSHOTS_GET_NO_CLONES = [ {'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 16:49:33', 'volsize': '1073741824', 'createtxg': '18402390', 'guid': '15554334551928551694', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '106843', 'name': S_UUID_1, 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'}, {'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 18:44:49', 'volsize': '1073741824', 'createtxg': '18403768', 'guid': '18319280142829358721', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '107416', 'name': S_UUID_2, 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'}] class TestOpenEJovianDSSCommon(test.TestCase): def test_is_volume(self): self.assertFalse(jcom.is_volume("asdasd")) self.assertFalse(jcom.is_volume(UUID_1)) self.assertTrue(jcom.is_volume(V_UUID_1)) def test_is_snapshot(self): self.assertFalse(jcom.is_snapshot("asdasd")) self.assertFalse(jcom.is_snapshot(UUID_S1)) self.assertTrue(jcom.is_snapshot(S_UUID_1)) def test_idname(self): self.assertEqual(UUID_1, jcom.idname(V_UUID_1)) self.assertEqual(UUID_S1, jcom.idname(S_UUID_1)) self.assertEqual(UUID_1, jcom.idname(T_UUID_1)) self.assertRaises(exception.VolumeDriverException, jcom.idname, 'asd') def test_vname(self): self.assertEqual(V_UUID_1, jcom.vname(UUID_1)) self.assertEqual(V_UUID_1, jcom.vname(V_UUID_1)) self.assertRaises(exception.VolumeDriverException, jcom.vname, S_UUID_1) def test_sname_to_id(self): self.assertEqual((UUID_S1, UUID_1), jcom.sname_to_id(S_UUID_1)) def test_sid_from_sname(self): self.assertEqual(UUID_S1, jcom.sid_from_sname(S_UUID_1)) def test_vid_from_sname(self): self.assertEqual(UUID_1, jcom.vid_from_sname(S_UUID_1)) def test_sname(self): self.assertEqual(S_UUID_1, jcom.sname(UUID_S1, UUID_1)) def test_sname_from_snap(self): snap = copy.deepcopy(SNAPSHOT_GET) self.assertEqual(S_UUID_1, jcom.sname_from_snap(snap)) def test_is_hidden(self): self.assertTrue(jcom.is_hidden(T_UUID_1)) self.assertFalse(jcom.is_hidden(S_UUID_1)) def test_origin_snapshot(self): vol = copy.deepcopy(VOLUME_GET_THAT_IS_CLONE) self.assertEqual(S_UUID_1, jcom.origin_snapshot(vol)) def test_origin_volume(self): vol = copy.deepcopy(VOLUME_GET_THAT_IS_CLONE) self.assertEqual(V_UUID_1, jcom.origin_volume(vol)) def test_snapshot_clones(self): clones = [V_UUID_2, V_UUID_3] snap = copy.deepcopy(SNAPSHOT_MULTIPLE_CLONES) self.assertEqual(clones, jcom.snapshot_clones(snap)) def test_hidden(self): self.assertEqual(T_UUID_1, jcom.hidden(V_UUID_1)) def test_get_newest_snapshot_name(self): snaps = copy.deepcopy(SNAPSHOTS_GET_NO_CLONES) self.assertEqual(S_UUID_2, jcom.get_newest_snapshot_name(snaps)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/open_e/test_driver.py0000664000175000017500000017237500000000000025631 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from oslo_utils import units as o_units from cinder import context from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume.drivers.open_e.jovian_common import driver from cinder.volume.drivers.open_e.jovian_common import exception as jexc from cinder.volume.drivers.open_e.jovian_common import jdss_common as jcom UUID_1 = '12345678-1234-1234-1234-000000000001' UUID_2 = '12345678-1234-1234-1234-000000000002' UUID_3 = '12345678-1234-1234-1234-000000000003' UUID_4 = '12345678-1234-1234-1234-000000000004' UUID_S1 = '12345678-1234-1234-1234-100000000001' UUID_S2 = '12345678-1234-1234-1234-100000000002' UUID_S3 = '12345678-1234-1234-1234-100000000003' UUID_S4 = '12345678-1234-1234-1234-100000000004' CONFIG_OK = { 'san_hosts': ['192.168.0.2'], 'san_api_port': 82, 'driver_use_ssl': 'false', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'jovian_user': 'admin', 'jovian_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'jovian_block_size': '128K' } CONFIG_BLOCK_SIZE = { 'san_hosts': ['192.168.0.2'], 'san_api_port': 82, 'driver_use_ssl': 'false', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'jovian_user': 'admin', 'jovian_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'jovian_block_size': '64K' } CONFIG_BAD_BLOCK_SIZE = { 'san_hosts': ['192.168.0.2'], 'san_api_port': 82, 'driver_use_ssl': 'false', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'jovian_user': 'admin', 'jovian_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'jovian_block_size': '61K' } CONFIG_BACKEND_NAME = { 'san_hosts': ['192.168.0.2'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'jovian_user': 'admin', 'jovian_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'volume_backend_name': 'JovianDSS', 'reserved_percentage': 10, 'jovian_block_size': '128K' } CONFIG_MULTI_HOST = { 'san_hosts': ['192.168.0.2', '192.168.0.3'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'jovian_user': 'admin', 'jovian_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'volume_backend_name': 'JovianDSS', 'reserved_percentage': 10, 'jovian_block_size': '128K' } VOLUME_GET_NO_SNAPSHOTS = { "origin": None, "relatime": None, "acltype": None, "vscan": None, "full_name": f"Pool-0/{jcom.vname(UUID_1)}", "userrefs": None, "primarycache": "all", "logbias": "latency", "creation": "1695048563", "sync": "always", "is_clone": False, "dedup": "off", "sharenfs": None, "receive_resume_token": None, "volsize": "1073741824", "referenced": "57344", "sharesmb": None, "createtxg": "19806101", "reservation": "0", "scontext": None, "mountpoint": None, "casesensitivity": None, "guid": "13628065397986503663", "usedbyrefreservation": "1079975936", "dnodesize": None, "written": "57344", "logicalused": "28672", "compressratio": "1.00", "rootcontext": "none", "default_scsi_id": "9e697f6e11336500", "type": "volume", "compression": "lz4", "snapdir": None, "overlay": None, "encryption": "off", "xattr": None, "volmode": "default", "copies": "1", "snapshot_limit": "18446744073709551615", "aclinherit": None, "defcontext": "none", "readonly": "off", "version": None, "recordsize": None, "filesystem_limit": None, "mounted": None, "mlslabel": "none", "secondarycache": "all", "refreservation": "1080033280", "available": "955831783424", "san:volume_id": "9e697f6e11336500480c13e4467b7964bed4b02e", "encryptionroot": None, "exec": None, "refquota": None, "refcompressratio": "1.00", "quota": None, "utf8only": None, "keylocation": "none", "snapdev": "hidden", "snapshot_count": "18446744073709551615", "fscontext": "none", "clones": None, "canmount": None, "keystatus": None, "atime": None, "usedbysnapshots": "0", "normalization": None, "usedbychildren": "0", "volblocksize": "65536", "usedbydataset": "57344", "objsetid": "18142", "name": "a1", "defer_destroy": None, "pbkdf2iters": "0", "checksum": "on", "redundant_metadata": "all", "filesystem_count": None, "devices": None, "keyformat": "none", "setuid": None, "used": "1080033280", "logicalreferenced": "28672", "context": "none", "zoned": None, "nbmand": None, } VOLUME_GET_THAT_IS_CLONE = { "origin": f"Pool-0/{jcom.vname(UUID_1)}@{jcom.sname(UUID_S1, UUID_1)}", "relatime": None, "acltype": None, "vscan": None, "full_name": f"Pool-0/{jcom.vname(UUID_2)}", "userrefs": None, "primarycache": "all", "logbias": "latency", "creation": "1695078560", "sync": "always", "is_clone": True, "dedup": "off", "sharenfs": None, "receive_resume_token": None, "volsize": "1073741824", "referenced": "57344", "sharesmb": None, "createtxg": "19812058", "reservation": "0", "scontext": None, "mountpoint": None, "casesensitivity": None, "guid": "4947994863040470005", "usedbyrefreservation": "0", "dnodesize": None, "written": "0", "logicalused": "0", "compressratio": "1.00", "rootcontext": "none", "default_scsi_id": "5c02d042ed8dbce2", "type": "volume", "compression": "lz4", "snapdir": None, "overlay": None, "encryption": "off", "xattr": None, "volmode": "default", "copies": "1", "snapshot_limit": "18446744073709551615", "aclinherit": None, "defcontext": "none", "readonly": "off", "version": None, "recordsize": None, "filesystem_limit": None, "mounted": None, "mlslabel": "none", "secondarycache": "all", "refreservation": "0", "available": "954751713280", "san:volume_id": "5c02d042ed8dbce2570c8d5dc276dd6a2431e138", "encryptionroot": None, "exec": None, "refquota": None, "refcompressratio": "1.00", "quota": None, "utf8only": None, "keylocation": "none", "snapdev": "hidden", "snapshot_count": "18446744073709551615", "fscontext": "none", "clones": None, "canmount": None, "keystatus": None, "atime": None, "usedbysnapshots": "0", "normalization": None, "usedbychildren": "0", "volblocksize": "65536", "usedbydataset": "0", "objsetid": "19228", "name": "a2", "defer_destroy": None, "pbkdf2iters": "0", "checksum": "on", "redundant_metadata": "all", "filesystem_count": None, "devices": None, "keyformat": "none", "setuid": None, "used": "0", "logicalreferenced": "28672", "context": "none", "zoned": None, "nbmand": None, } SNAPSHOTS_GET_NO_CLONES = [ {'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 16:49:33', 'volsize': '1073741824', 'createtxg': '18402390', 'guid': '15554334551928551694', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '106843', 'name': jcom.sname(UUID_S1, UUID_1), 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'}, {'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 18:44:49', 'volsize': '1073741824', 'createtxg': '18403768', 'guid': '18319280142829358721', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '107416', 'name': jcom.sname(UUID_S2, UUID_1), 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'}] SNAPSHOTS_GET_INTERMEDIATE_SNAP = [ {'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 16:49:33', 'volsize': '1073741824', 'createtxg': '18402390', 'guid': '15554334551928551694', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '106843', 'name': jcom.vname(UUID_S1), 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'}] SNAPSHOTS_GET_ONE_CLONE = [ {'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 16:49:33', 'volsize': '1073741824', 'createtxg': '18402390', 'guid': '15554334551928551694', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '106843', 'name': jcom.sname(UUID_S1, UUID_1), 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'}, {'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 18:44:49', 'volsize': '1073741824', 'createtxg': '18403768', 'guid': '18319280142829358721', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '107416', 'clones': 'Pool-0/' + jcom.vname(UUID_2), 'name': jcom.sname(UUID_S2, UUID_1), 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'}] SNAPSHOTS_GET_MULTIPLE_CLONES = [ {'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 16:49:33', 'volsize': '1073741824', 'createtxg': '18402390', 'guid': '15554334551928551694', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '106843', 'name': jcom.sname(UUID_S1, UUID_1), 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'}, {'referenced': '57344', 'userrefs': '0', 'primarycache': 'all', 'creation': '2023-06-28 18:44:49', 'volsize': '1073741824', 'createtxg': '18403768', 'guid': '18319280142829358721', 'compressratio': '1.00', 'rootcontext': 'none', 'encryption': 'off', 'defcontext': 'none', 'written': '0', 'type': 'snapshot', 'secondarycache': 'all', 'used': '0', 'refcompressratio': '1.00', 'fscontext': 'none', 'objsetid': '107416', 'clones': f'Pool-0/{jcom.vname(UUID_2)},Pool-0/{jcom.vname(UUID_3)}', 'name': jcom.sname(UUID_S2, UUID_1), 'defer_destroy': 'off', 'san:volume_id': 'e82c7fcbd78df0ffe67d363412e5091421d313ca', 'mlslabel': 'none', 'logicalreferenced': '28672', 'context': 'none'}] SNAPSHOT_GET_ONE_CLONE = { "referenced": "57344", "userrefs": "0", "primarycache": "all", "creation": "2023-09-19 01:08:25", "volsize": "1073741824", "createtxg": "19812047", "guid": "7433980076067517643", "compressratio": "1.00", "rootcontext": "none", "encryption": "off", "defcontext": "none", "written": "57344", "type": "snapshot", "secondarycache": "all", "used": "0", "refcompressratio": "1.00", "fscontext": "none", "clones": f"Pool-0/{jcom.vname(UUID_2)}", "objsetid": "19220", "defer_destroy": "off", "san:volume_id": "9e697f6e11336500480c13e4467b7964bed4b02e", "mlslabel": "none", "logicalreferenced": "28672", "context": "none" } SNAPSHOTS_CASCADE_1 = [ {"name": jcom.sname(UUID_S1, UUID_1), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_1)}, {"name": jcom.sname(UUID_S1, UUID_2), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_2)}, {"name": jcom.sname(UUID_S1, UUID_3), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_3)}] SNAPSHOTS_CASCADE_2 = [ {"name": jcom.sname(UUID_S1, UUID_1), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_1)}, {"name": jcom.vname(UUID_2), "clones": "Pool-0/" + jcom.vname(UUID_2)}, {"name": jcom.sname(UUID_S1, UUID_3), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_3)}] SNAPSHOTS_CASCADE_3 = [ {"name": jcom.vname(UUID_4), "clones": "Pool-0/" + jcom.vname(UUID_4)}] SNAPSHOTS_EMPTY = [] SNAPSHOTS_CLONE = [ {"name": jcom.vname(UUID_1), "clones": "Pool-0/" + jcom.vname(UUID_1)}] SNAPSHOTS_GARBAGE = [ {"name": jcom.sname(UUID_S1, UUID_1), "clones": "Pool-0/" + jcom.vname(UUID_2)}, {"name": jcom.sname(UUID_S1, UUID_2), "clones": ""}] SNAPSHOTS_RECURSIVE_1 = [ {"name": jcom.sname(UUID_S1, UUID_1), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_1)}, {"name": jcom.sname(UUID_S1, UUID_2), "clones": "Pool-0/" + jcom.hidden(UUID_2)}] SNAPSHOTS_RECURSIVE_CHAIN_1 = [ {"name": jcom.sname(UUID_S1, UUID_3), "clones": "Pool-0/" + jcom.hidden(UUID_3)}] SNAPSHOTS_RECURSIVE_CHAIN_2 = [ {"name": jcom.vname(UUID_2), "clones": "Pool-0/" + jcom.hidden(UUID_2)}] def get_jdss_exceptions(): out = [jexc.JDSSException(reason="Testing"), jexc.JDSSRESTException(request="ra request", reason="Testing"), jexc.JDSSRESTProxyException(host="test_host", reason="Testing"), jexc.JDSSResourceNotFoundException(res="test_resource"), jexc.JDSSVolumeNotFoundException(volume="test_volume"), jexc.JDSSSnapshotNotFoundException(snapshot="test_snapshot"), jexc.JDSSResourceExistsException(res="test_resource"), jexc.JDSSSnapshotExistsException(snapshot="test_snapshot"), jexc.JDSSVolumeExistsException(volume="test_volume"), jexc.JDSSSnapshotIsBusyException(snapshot="test_snapshot")] return out class TestOpenEJovianDSSDriver(test.TestCase): def get_jdss_driver(self, config): ctx = context.get_admin_context() cfg = mock.Mock() cfg.append_config_values.return_value = None cfg.get = lambda val, default: config.get(val, default) jdssd = driver.JovianDSSDriver(cfg) lib_to_patch = ('cinder.volume.drivers.open_e.jovian_common.driver.' 'rest.JovianRESTAPI') with mock.patch(lib_to_patch) as ra: ra.is_pool_exists.return_value = True jdssd.ra = mock.Mock() return jdssd, ctx def start_patches(self, patches): for p in patches: p.start() def stop_patches(self, patches): for p in patches: p.stop() def test_create_volume(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vol = fake_volume.fake_volume_obj(ctx) vol.id = UUID_1 vol.size = 1 jdssd.ra.create_lun.return_value = None jdssd.create_volume(vol.id, 1) create_vol_expected = [mock.call(jcom.vname(vol.id), 1073741824, sparse=False, block_size=None)] jdssd.create_volume(vol.id, 1, sparse=True) create_vol_expected += [mock.call(jcom.vname(vol.id), 1073741824, sparse=True, block_size=None)] jdssd.create_volume(vol.id, 1, sparse=True, block_size="64K") create_vol_expected += [mock.call(jcom.vname(vol.id), 1073741824, sparse=True, block_size="64K")] jdssd.ra.create_lun.assert_has_calls(create_vol_expected) def test_promote_newest_delete_no_snapshots(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) # test provide empty snapshot list snapshots = [] jdssd.ra.get_snapshots.return_value = [] resp = {'data': {"vscan": None, "full_name": "Pool-0/v_" + UUID_1, "userrefs": None, "primarycache": "all", "logbias": "latency", "creation": "1591543140", "sync": "always", "is_clone": False, "dedup": "off", "sharenfs": None, "receive_resume_token": None, "volsize": "1073741824"}, 'error': None, 'code': 200} jdssd.ra.get_lun.return_value = resp jdssd.ra.delete_lun.return_value = None jdssd._promote_newest_delete(vname, snapshots) delete_vol_expected = [mock.call(vname, force_umount=True, recursively_children=True)] jdssd.ra.delete_lun.assert_has_calls(delete_vol_expected) # test provide none as snapshot list snapshots = None jdssd.ra.get_snapshots.return_value = [] resp = {'data': {"vscan": None, "full_name": "Pool-0/v_" + UUID_1, "userrefs": None, "primarycache": "all", "logbias": "latency", "creation": "1591543140", "sync": "always", "is_clone": False, "dedup": "off", "sharenfs": None, "receive_resume_token": None, "volsize": "1073741824"}, 'error': None, 'code': 200} jdssd.ra.get_lun.return_value = resp jdssd.ra.delete_lun.return_value = None jdssd._promote_newest_delete(vname, snapshots) delete_vol_expected = [mock.call(vname, force_umount=True, recursively_children=True)] jdssd.ra.delete_lun.assert_has_calls(delete_vol_expected) def test_promote_newest_delete_has_snapshots(self): '''Test promote-remove on volume with snapshots We should sucessevely remove volume if it have snapshots with no clones. Also no promote should be called. ''' jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) snapshots = copy.deepcopy(SNAPSHOTS_GET_NO_CLONES) jdssd.ra.get_snapshots.return_value = [] resp = {"vscan": None, "full_name": "Pool-0/v_" + UUID_1, "userrefs": None, "primarycache": "all", "logbias": "latency", "creation": "1591543140", "sync": "always", "is_clone": False, "dedup": "off", "sharenfs": None, "receive_resume_token": None, "volsize": "1073741824"}, jdssd.ra.get_lun.return_value = resp jdssd.ra.delete_lun.return_value = None jdssd._promote_newest_delete(vname, snapshots) delete_vol_expected = [mock.call(vname, force_umount=True, recursively_children=True)] jdssd.ra.promote.assert_not_called() jdssd.ra.delete_lun.assert_has_calls(delete_vol_expected) def test_promote_newest_delete_has_clone(self): '''Test promote-remove on volume with clone We should sucessevely remove volume if it have snapshot with no clone. ''' jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) snapshots = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) self.assertEqual(f'Pool-0/{jcom.vname(UUID_2)}', snapshots[1]['clones']) jdssd.ra.get_snapshots.return_value = [] resp = {'data': {"vscan": None, "full_name": "Pool-0/v_" + UUID_1, "userrefs": None, "primarycache": "all", "logbias": "latency", "creation": "1591543140", "sync": "always", "is_clone": False, "dedup": "off", "sharenfs": None, "receive_resume_token": None, "volsize": "1073741824"}, 'error': None, 'code': 200} jdssd.ra.get_lun.return_value = resp jdssd.ra.delete_lun.return_value = None jdssd._promote_newest_delete(vname, snapshots) delete_vol_expected = [mock.call(vname, force_umount=True, recursively_children=True)] promote_vol_expected = [mock.call(vname, jcom.sname(UUID_S2, UUID_1), jcom.vname(UUID_2))] jdssd.ra.promote.assert_has_calls(promote_vol_expected) jdssd.ra.delete_lun.assert_has_calls(delete_vol_expected) def test_promote_newest_delete_has_multiple_clones(self): '''Test promote-remove on volume with clone We should sucessevely remove volume if it have snapshot with no clone. ''' jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) snapshots = copy.deepcopy(SNAPSHOTS_GET_MULTIPLE_CLONES) jdssd.ra.get_snapshots.return_value = [] resp = {'data': {"vscan": None, "full_name": "Pool-0/s_" + UUID_S2, "userrefs": None, "primarycache": "all", "logbias": "latency", "creation": "1591543140", "sync": "always", "is_clone": False, "dedup": "off", "sharenfs": None, "receive_resume_token": None, "volsize": "1073741824"}, 'error': None, 'code': 200} jdssd.ra.get_lun.return_value = resp jdssd.ra.delete_lun.return_value = None jdssd._promote_newest_delete(vname, snapshots) delete_vol_expected = [mock.call(vname, force_umount=True, recursively_children=True)] promote_vol_expected = [mock.call(vname, jcom.sname(UUID_S2, UUID_1), jcom.vname(UUID_3))] jdssd.ra.promote.assert_has_calls(promote_vol_expected) jdssd.ra.delete_lun.assert_has_calls(delete_vol_expected) def test_delete_vol_with_source_snap_no_snap(self): '''Test _delete_vol_with_source_snap We should sucessevely remove volume with no snapshots. ''' jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) jdssd.ra.get_lun.return_value = copy.deepcopy(VOLUME_GET_NO_SNAPSHOTS) jdssd.ra.delete_lun.return_value = None delete_vol_expected = [mock.call(vname, force_umount=True, recursively_children=False)] jdssd._delete_vol_with_source_snap(vname, recursive=False) jdssd.ra.delete_lun.assert_has_calls(delete_vol_expected) jdssd.ra.delete_snapshot.assert_not_called() def test_delete_vol_with_source_snap(self): '''Test _delete_vol_with_source_snap We should sucessevely remove volume that is clone. We should not remove source snapshot if that snapshot is not related to volume to remove ''' # Snapshot does belong to parent volume jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_2) jdssd.ra.get_lun.return_value = \ copy.deepcopy(VOLUME_GET_THAT_IS_CLONE) jdssd.ra.delete_lun.return_value = None delete_vol_expected = [mock.call(vname, force_umount=True, recursively_children=False)] jdssd._delete_vol_with_source_snap(vname, recursive=False) jdssd.ra.delete_lun.assert_has_calls(delete_vol_expected) jdssd.ra.delete_snapshot.assert_not_called() def test_delete_vol_with_source_snap_snap_delete(self): # Snapshot belongs to volume to delete jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_2) lun_info = copy.deepcopy(VOLUME_GET_THAT_IS_CLONE) origin = f"Pool-0/{jcom.vname(UUID_1)}@{jcom.sname(UUID_S2, UUID_2)}" lun_info['origin'] = origin jdssd.ra.get_lun.return_value = lun_info jdssd.ra.delete_lun.return_value = None delete_vol_expected = [mock.call(vname, force_umount=True, recursively_children=False)] delete_snapshot_expected = [mock.call(jcom.vname(UUID_1), jcom.sname(UUID_S2, UUID_2), recursively_children=True, force_umount=True)] jdssd._delete_vol_with_source_snap(vname, recursive=False) jdssd.ra.delete_lun.assert_has_calls(delete_vol_expected) jdssd.ra.delete_snapshot.assert_has_calls(delete_snapshot_expected) def test_clean_garbage_resources(self): # Make sure that we request list of snapshots if none is provide # Make sure we remove intermediate volume like snapshot if it has # no volumes associated with it jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) snap_list = copy.deepcopy(SNAPSHOTS_GET_INTERMEDIATE_SNAP) get_snapshots_expectes = [mock.call(jcom.vname(UUID_1))] delete_snapshot_expected = [mock.call(jcom.vname(UUID_1), jcom.vname(UUID_S1), force_umount=True)] jdssd.ra.get_snapshots.side_effect = [snap_list, []] ret = jdssd._clean_garbage_resources(vname, snapshots=None) self.assertEqual([], ret) jdssd.ra.get_snapshots.assert_has_calls(get_snapshots_expectes) jdssd.ra.delete_snapshot.assert_has_calls(delete_snapshot_expected) def test_clean_garbage_resources_do_nothing(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) snap_list = SNAPSHOTS_GET_ONE_CLONE.copy() get_snapshots_expectes = [mock.call(jcom.vname(UUID_1))] jdssd.ra.get_snapshots.side_effect = [snap_list, snap_list] ret = jdssd._clean_garbage_resources(vname, snapshots=None) self.assertEqual(SNAPSHOTS_GET_ONE_CLONE, ret) jdssd.ra.get_snapshots.assert_has_calls(get_snapshots_expectes) jdssd.ra.delete_snapshot.assert_not_called() def test_clean_garbage_resources_clean_hidden(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) snap_list = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) snap_list[1]['clones'] = f"Pool-0/{jcom.hidden(UUID_2)}" snap_list[1]['name'] = jcom.sname(UUID_S2, UUID_1) get_snapshots_expectes = [mock.call(jcom.vname(UUID_1)), mock.call(jcom.vname(UUID_1))] jdssd.ra.get_snapshots.side_effect = [snap_list, SNAPSHOTS_GET_NO_CLONES] with mock.patch.object(jdssd, '_promote_newest_delete') as pnd: pnd.side_effect = [None] ret = jdssd._clean_garbage_resources(vname, snapshots=None) self.assertEqual(SNAPSHOTS_GET_NO_CLONES, ret) pnd.assert_has_calls([mock.call(jcom.hidden(UUID_2))]) jdssd.ra.get_snapshots.assert_has_calls(get_snapshots_expectes) jdssd.ra.delete_snapshot.assert_not_called() def test_clean_garbage_resources_clean_snapshot(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) snap_list = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) snap_list[1]['clones'] = f"Pool-0/{jcom.sname(UUID_S2, UUID_1)}" snap_list[1]['name'] = jcom.sname(UUID_S2, UUID_1) get_snapshots_expectes = [mock.call(jcom.vname(UUID_1))] jdssd.ra.get_snapshots.side_effect = [snap_list] with mock.patch.object(jdssd, '_promote_newest_delete') as pnd: pnd.side_effect = [None] ret = jdssd._clean_garbage_resources(vname, snapshots=None) pnd.assert_not_called() self.assertEqual(snap_list, ret) jdssd.ra.get_snapshots.assert_has_calls(get_snapshots_expectes) jdssd.ra.delete_snapshot.assert_not_called() def test_list_busy_snapshots(self): # Check operation with regular clone jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) snap_list = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) ret = jdssd._list_busy_snapshots(vname, snap_list) self.assertEqual([SNAPSHOTS_GET_ONE_CLONE[1]], ret) # Check hidden clone snap_list = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) snap_list[0]['clones'] = f"Pool-0/{jcom.hidden(UUID_2)}" snap_list[0]['name'] = jcom.sname(UUID_S2, UUID_1) ret = jdssd._list_busy_snapshots(vname, snap_list) self.assertEqual(snap_list, ret) # Check exlude dedicated volume flag snap_list = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) ret = jdssd._list_busy_snapshots(vname, snap_list, exclude_dedicated_volumes=True) self.assertEqual([], ret) def _clean_volume_snapshots_mount_points(self): # Single attached snapshot case vname = jcom.vname(UUID_1) jdssd, ctx = self.get_jdss_driver(CONFIG_OK) snap_list = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) cname = jcom.sname(UUID_S2, UUID_1) snap_list[1]['clones'] = cname = f'Pool-0/{cname}' ret_list = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) ret_list[1].pop('clones') jdssd.ra.get_snapshots.return_value = ret_list with mock.patch.object(jdssd, '_delete_volume'): ret = jdssd._clean_volume_snapshots_mount_points(vname, snap_list) jdssd._delete_volume.assert_called_once_with(cname, cascade=True) jdssd.ra.get_snapshots.assert_called_once_with(vname) self.assertEqual(ret_list, ret) # Multiple attached snapshot case vname = jcom.vname(UUID_1) jdssd, ctx = self.get_jdss_driver(CONFIG_OK) snap_list = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) cname0 = jcom.sname(UUID_S1, UUID_1) cname1 = jcom.sname(UUID_S2, UUID_1) snap_list[0]['clones'] = cname = f'Pool-0/{cname0}' snap_list[1]['clones'] = cname = f'Pool-0/{cname1}' ret_list = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) ret_list[0].pop('clones') ret_list[1].pop('clones') jdssd.ra.get_snapshots.return_value = ret_list del_vol_expected = [mock.call(jcom.vname(cname0), cascade=True), mock.call(jcom.vname(cname1), cascade=True)] with mock.patch.object(jdssd, '_delete_volume'): ret = jdssd._clean_volume_snapshots_mount_points(vname, snap_list) jdssd._delete_volume.assert_has_calls(del_vol_expected) jdssd.ra.get_snapshots.assert_called_once_with(vname) self.assertEqual(ret_list, ret) def test_delete_volume_no_snap(self): vname = jcom.vname(UUID_1) jdssd, ctx = self.get_jdss_driver(CONFIG_OK) jdssd.ra.delete_lun.return_value = None del_lun_exp = [mock.call( jcom.vname(UUID_1), force_umount=True, recursively_children=False)] jdssd._delete_volume(vname) jdssd.ra.delete_lun.assert_has_calls(del_lun_exp) jdssd.ra.get_snapshots.assert_not_called() def test_delete_volume_cascade_with_clones(self): vname = jcom.vname(UUID_1) jdssd, ctx = self.get_jdss_driver(CONFIG_OK) jdssd.ra.delete_lun.side_effect = [ jexc.JDSSResourceIsBusyException(res=vname)] del_lun_exp = [mock.call(jcom.vname(UUID_1), force_umount=True, recursively_children=True)] snap_list = copy.deepcopy(SNAPSHOTS_GET_ONE_CLONE) get_snap_exp = [mock.call(jcom.vname(UUID_1))] jdssd.ra.get_snapshots.side_effect = [snap_list] pnd_exp = [mock.call(jcom.vname(UUID_1), snapshots=snap_list)] with mock.patch.object(jdssd, '_promote_newest_delete'): jdssd._delete_volume(vname, cascade=True) jdssd._promote_newest_delete.assert_has_calls(pnd_exp) jdssd.ra.delete_lun.assert_has_calls(del_lun_exp) jdssd.ra.get_snapshots.assert_has_calls(get_snap_exp) def test_delete_volume(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) with mock.patch.object(jdssd, '_delete_volume'): jdssd.delete_volume(UUID_1) jdssd._delete_volume.assert_called_once_with(vname, cascade=False) def test_get_provider_location(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) host = CONFIG_OK["san_hosts"][0] port = CONFIG_OK["target_port"] target_name = CONFIG_OK["target_prefix"] + UUID_1 patches = [mock.patch.object( jdssd.ra, "get_active_host", return_value=host)] out = '{host}:{port},1 {name} 0'.format( host=host, port=port, name=target_name ) self.start_patches(patches) self.assertEqual(out, jdssd.get_provider_location(UUID_1)) self.stop_patches(patches) def test_get_target_name(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) ret = jdssd._get_target_name(UUID_1) self.assertEqual(ret, f'iqn.2020-04.com.open-e.cinder:{UUID_1}') def test_get_iscsi_properties(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) provider_auth = 'chap user_name 123456789012' multipath = True target_name = CONFIG_OK['target_prefix'] + UUID_1 ret = jdssd._get_iscsi_properties(UUID_1, provider_auth, multipath=multipath) expected = {'auth_method': 'chap', 'auth_password': '123456789012', 'auth_username': 'user_name', 'target_discovered': False, 'target_iqns': [target_name], 'target_lun': 0, 'target_luns': [0], 'target_portals': ['192.168.0.2:3260']} self.assertEqual(expected, ret) def test_get_iscsi_properties_multipath(self): jdssd, ctx = self.get_jdss_driver(CONFIG_MULTI_HOST) provider_auth = 'chap user_name 123456789012' target_name = CONFIG_OK['target_prefix'] + UUID_1 ret = jdssd._get_iscsi_properties(UUID_1, provider_auth, multipath=True) expected = {'auth_method': 'chap', 'auth_password': '123456789012', 'auth_username': 'user_name', 'target_discovered': False, 'target_iqns': [target_name, target_name], 'target_lun': 0, 'target_luns': [0, 0], 'target_portals': ['192.168.0.2:3260', '192.168.0.3:3260']} self.assertEqual(expected, ret) def test_remove_target_volume(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) target_name = CONFIG_OK['target_prefix'] + UUID_1 jdssd.ra.detach_target_vol.return_value = None jdssd.ra.delete_target.return_value = None jdssd._remove_target_volume(UUID_1, jcom.vname(UUID_1)) jdssd.ra.detach_target_vol.assert_called_once_with(target_name, jcom.vname(UUID_1)) jdssd.ra.delete_target.assert_called_with(target_name) def test_remove_target_volume_no_target(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) target_name = CONFIG_OK['target_prefix'] + UUID_1 vname = jcom.vname(UUID_1) jdssd.ra.detach_target_vol.return_value = None jdssd.ra.detach_target_vol.side_effect = ( jexc.JDSSResourceNotFoundException(res=target_name)) jdssd.ra.delete_target.return_value = None jdssd._remove_target_volume(UUID_1, vname) jdssd.ra.detach_target_vol.assert_called_once_with(target_name, jcom.vname(UUID_1)) jdssd.ra.delete_target.assert_called_with(target_name) def test_remove_target_volume_fail_to_detach(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) target_name = CONFIG_OK['target_prefix'] + UUID_1 jdssd.ra.detach_target_vol.side_effect = ( jexc.JDSSRESTException(reason='running test', request='test')) jdssd.ra.delete_target.return_value = None self.assertRaises(jexc.JDSSException, jdssd._remove_target_volume, UUID_1, jcom.vname(UUID_1)) jdssd.ra.detach_target_vol.assert_called_once_with( target_name, jcom.vname(UUID_1)) jdssd.ra.delete_target.assert_not_called() def test_remove_target_volume_fail_to_delete(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) target_name = CONFIG_OK['target_prefix'] + UUID_1 jdssd.ra.detach_target_vol.return_value = None jdssd.ra.delete_target.side_effect = ( jexc.JDSSRESTException(reason='running test', request='test')) self.assertRaises(jexc.JDSSException, jdssd._remove_target_volume, UUID_1, jcom.vname(UUID_1)) jdssd.ra.detach_target_vol.assert_called_once_with(target_name, jcom.vname(UUID_1)) jdssd.ra.delete_target.assert_called_with(target_name) def test_ensure_export(self): jdssd, ctx = self.get_jdss_driver(CONFIG_MULTI_HOST) provider_auth = 'chap user_name 123456789012' with mock.patch.object(jdssd, "_ensure_target_volume"): jdssd.ensure_export(UUID_1, provider_auth) jdssd._ensure_target_volume.assert_called_once_with( UUID_1, jcom.vname(UUID_1), provider_auth) def test_initialize_connection(self): # Test Ok jdssd, ctx = self.get_jdss_driver(CONFIG_MULTI_HOST) volume_id = UUID_1 provider_auth = 'chap user_name 123456789012' multipath = True target_name = CONFIG_OK['target_prefix'] + UUID_1 properties = {'auth_method': 'chap', 'auth_password': '123456789012', 'auth_username': 'user_name', 'target_discovered': False, 'target_iqns': [target_name, target_name], 'target_lun': 0, 'target_luns': [0, 0], 'target_portals': ['192.168.0.2:3260', '192.168.0.3:3260']} con_info = { 'driver_volume_type': 'iscsi', 'data': properties, } vname = jcom.vname(volume_id) with mock.patch.object(jdssd, '_ensure_target_volume'): ret = jdssd.initialize_connection(volume_id, provider_auth, multipath=multipath) jdssd._ensure_target_volume.assert_called_with(UUID_1, vname, provider_auth) self.assertEqual(con_info, ret) # Test initialize for snapshot jdssd, ctx = self.get_jdss_driver(CONFIG_MULTI_HOST) volume_id = UUID_1 snapshot_id = UUID_S1 provider_auth = 'chap user_name 123456789012' multipath = True target_name = CONFIG_OK['target_prefix'] + UUID_S1 properties = {'auth_method': 'chap', 'auth_password': '123456789012', 'auth_username': 'user_name', 'target_discovered': False, 'target_iqns': [target_name, target_name], 'target_lun': 0, 'target_luns': [0, 0], 'target_portals': ['192.168.0.2:3260', '192.168.0.3:3260']} con_info = { 'driver_volume_type': 'iscsi', 'data': properties, } sname = jcom.sname(snapshot_id, volume_id) with mock.patch.object(jdssd, '_ensure_target_volume'): ret = jdssd.initialize_connection(volume_id, provider_auth, snapshot_id=snapshot_id, multipath=multipath) jdssd._ensure_target_volume.assert_called_with(UUID_S1, sname, provider_auth, mode='ro') self.assertEqual(con_info, ret) # Test no auth jdssd, ctx = self.get_jdss_driver(CONFIG_MULTI_HOST) volume_id = UUID_1 provider_auth = None multipath = True target_name = CONFIG_OK['target_prefix'] + UUID_1 properties = {'auth_method': 'chap', 'auth_password': '123456789012', 'auth_username': 'user_name', 'target_discovered': False, 'target_iqns': [target_name, target_name], 'target_lun': 0, 'target_luns': [0, 0], 'target_portals': ['192.168.0.2:3260', '192.168.0.3:3260']} con_info = { 'driver_volume_type': 'iscsi', 'data': properties, } sname = jcom.sname(snapshot_id, volume_id) with mock.patch.object(jdssd, '_ensure_target_volume'): self.assertRaises(jexc.JDSSException, jdssd.initialize_connection, volume_id, provider_auth, multipath=multipath) jdssd._ensure_target_volume.assert_not_called() def test_create_target_volume(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vid = jcom.vname(UUID_1) target_name = CONFIG_OK['target_prefix'] + UUID_1 provider_auth = 'chap user_name 123456789012' cred = {'name': 'user_name', 'password': '123456789012'} patches = [ mock.patch.object(jdssd, "_attach_target_volume"), mock.patch.object(jdssd, "_set_target_credentials")] self.start_patches(patches) jdssd._create_target_volume(UUID_1, vid, provider_auth) jdssd.ra.create_target.assert_called_once_with(target_name, use_chap=True) jdssd._attach_target_volume.assert_called_once_with( target_name, jcom.vname(UUID_1)) jdssd._set_target_credentials.assert_called_once_with( target_name, cred) self.stop_patches(patches) def test_create_target_volume_for_snapshot_attachment(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vid = jcom.sname(UUID_S1, UUID_1) target_name = CONFIG_OK['target_prefix'] + UUID_S1 provider_auth = 'chap user_name 123456789012' cred = {'name': 'user_name', 'password': '123456789012'} patches = [ mock.patch.object(jdssd, "_attach_target_volume"), mock.patch.object(jdssd, "_set_target_credentials")] self.start_patches(patches) jdssd._create_target_volume(UUID_S1, vid, provider_auth) jdssd.ra.create_target.assert_called_once_with(target_name, use_chap=True) jdssd._attach_target_volume.assert_called_once_with( target_name, jcom.sname(UUID_S1, UUID_1)) jdssd._set_target_credentials.assert_called_once_with( target_name, cred) self.stop_patches(patches) def test_attach_target_volume(self): jdssd, ctx = self.get_jdss_driver(CONFIG_BACKEND_NAME) target_name = CONFIG_OK['target_prefix'] + UUID_1 vname = jcom.vname(UUID_1) jdssd.ra.attach_target_vol.return_value = None jdssd.ra.delete_target.return_value = None jdssd._attach_target_volume(target_name, vname) jdssd.ra.attach_target_vol.assert_called_once_with( target_name, vname) jdssd.ra.delete_target.assert_not_called() ex = jexc.JDSSResourceExistsException(res=target_name) jdssd.ra.attach_target_vol.side_effect = ex self.assertRaises(jexc.JDSSException, jdssd._attach_target_volume, target_name, vname) jdssd.ra.delete_target.assert_called_once_with(target_name) def test_set_target_credentials(self): jdssd, ctx = self.get_jdss_driver(CONFIG_BACKEND_NAME) target_name = CONFIG_BACKEND_NAME['target_prefix'] + UUID_1 cred = {'name': 'user_name', 'password': '123456789012'} jdssd.ra.create_target_user.return_value = None jdssd.ra.delete_target.return_value = None jdssd._set_target_credentials(target_name, cred) jdssd.ra.create_target_user.assert_called_once_with( target_name, cred) jdssd.ra.delete_target.assert_not_called() ex = jexc.JDSSResourceExistsException(res=target_name) jdssd.ra.create_target_user.side_effect = ex self.assertRaises(jexc.JDSSException, jdssd._set_target_credentials, target_name, cred) jdssd.ra.delete_target.assert_called_once_with(target_name) def test_clone_object(self): # test ok jdssd, ctx = self.get_jdss_driver(CONFIG_OK) ovname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) cvname = jcom.vname(UUID_2) jdssd._clone_object(cvname, sname, ovname, sparse=True) jdssd.ra.create_snapshot.assesrt_not_called() jdssd.ra.create_volume_from_snapshot.assert_called_once_with( cvname, sname, ovname, sparse=True) # test create snapshot jdssd, ctx = self.get_jdss_driver(CONFIG_OK) ovname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) cvname = jcom.vname(UUID_2) jdssd._clone_object(cvname, sname, ovname, sparse=True) jdssd.ra.create_snapshot.assesrt_not_called() jdssd.ra.create_volume_from_snapshot.assert_called_once_with( cvname, sname, ovname, sparse=True) # test create from snapshot failed jdssd, ctx = self.get_jdss_driver(CONFIG_OK) ovname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) cvname = jcom.vname(UUID_2) jdssd.ra.create_volume_from_snapshot.side_effect = [ jexc.JDSSVolumeExistsException(volume=cvname)] self.assertRaises(jexc.JDSSException, jdssd._clone_object, cvname, sname, ovname, sparse=True) jdssd.ra.create_snapshot.assesrt_not_called() jdssd.ra.create_volume_from_snapshot.assert_called_once_with( cvname, sname, ovname, sparse=True) jdssd.ra.delete_snapshot(ovname, sname, force_umount=True, recursively_children=True) def test_resize_volume(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) jdssd.resize_volume(UUID_1, 2) jdssd.ra.extend_lun.assert_called_once_with(vname, o_units.Gi * 2) def test_create_cloned_volume(self): # test ok jdssd, ctx = self.get_jdss_driver(CONFIG_OK) cvname = jcom.vname(UUID_2) vname = jcom.vname(UUID_1) jdssd.ra.get_lun.return_value = copy.deepcopy(VOLUME_GET_THAT_IS_CLONE) with mock.patch.object(jdssd, '_clone_object'): jdssd.create_cloned_volume(UUID_2, UUID_1, 1, sparse=False) jdssd._clone_object.assert_called_once_with( cvname, cvname, vname, sparse=False, create_snapshot=True) # test clone from snapshot jdssd, ctx = self.get_jdss_driver(CONFIG_OK) cvname = jcom.vname(UUID_2) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) jdssd.ra.get_lun.return_value = copy.deepcopy(VOLUME_GET_THAT_IS_CLONE) with mock.patch.object(jdssd, '_clone_object'): jdssd.create_cloned_volume(UUID_2, UUID_1, 1, snapshot_name=UUID_S1, sparse=False) jdssd._clone_object.assert_called_once_with( cvname, sname, vname, sparse=False, create_snapshot=False) # test extend jdssd, ctx = self.get_jdss_driver(CONFIG_OK) cvname = jcom.vname(UUID_2) vname = jcom.vname(UUID_1) get_vol = copy.deepcopy(VOLUME_GET_THAT_IS_CLONE) get_vol['volsize'] = "1073145824" jdssd.ra.get_lun.return_value = get_vol with mock.patch.object(jdssd, '_clone_object'), \ mock.patch.object(jdssd, "resize_volume"): jdssd.create_cloned_volume(UUID_2, UUID_1, 1, sparse=False) jdssd._clone_object.assert_called_once_with( cvname, cvname, vname, sparse=False, create_snapshot=True) jdssd.resize_volume.assert_called_once_with(UUID_2, 1) def test_create_snapshot(self): jdssd, ctx = self.get_jdss_driver(CONFIG_BACKEND_NAME) jdssd.create_snapshot(UUID_S1, UUID_1) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) jdssd.ra.create_snapshot.assert_called_once_with(vname, sname) def test_create_export_snapshot(self): jdssd, ctx = self.get_jdss_driver(CONFIG_BACKEND_NAME) provider_auth = 'chap user_name 123456789012' sname = jcom.sname(UUID_S1, UUID_1) vname = jcom.vname(UUID_1) with mock.patch.object(jdssd, '_clone_object'), \ mock.patch.object(jdssd, '_ensure_target_volume'): jdssd.create_export_snapshot(UUID_S1, UUID_1, provider_auth) jdssd._clone_object.assert_called_once_with(sname, sname, vname, sparse=True, create_snapshot=False) jdssd._ensure_target_volume(UUID_S1, sname, provider_auth) def test_remove_export(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) patches = [ mock.patch.object( jdssd, "_remove_target_volume", return_value=None)] self.start_patches(patches) jdssd.remove_export(UUID_1) jdssd._remove_target_volume.assert_called_once_with(UUID_1, vname) self.stop_patches(patches) def test_remove_export_snapshot(self): # remove ok jdssd, ctx = self.get_jdss_driver(CONFIG_OK) with mock.patch.object(jdssd, "_remove_target_volume"), \ mock.patch.object(jdssd, "_delete_volume"): jdssd.remove_export_snapshot(UUID_S1, UUID_1) jdssd._delete_volume.assert_called_once() jdssd._remove_target_volume.assert_called_once() # remove export failed jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.sname(UUID_S1, UUID_1) with mock.patch.object(jdssd, "_remove_target_volume"), \ mock.patch.object(jdssd, "_delete_volume"): jdssd._remove_target_volume.side_effect = [ jexc.JDSSResourceIsBusyException(res=vname)] self.assertRaises(jexc.JDSSResourceIsBusyException, jdssd.remove_export_snapshot, UUID_S1, UUID_1) jdssd._delete_volume.assert_called_once() jdssd._remove_target_volume.assert_called_once() def test_delete_snapshot(self): # Delete ok, letion of snapshot with no clones jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) jdssd.ra.delete_snapshot.side_effect = [None] jdssd._delete_snapshot(vname, sname) jdssd.ra.delete_snapshot.assert_called_once_with(vname, sname, force_umount=True) # Test deletion of snapshot with clones jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) side_eff = [jexc.JDSSSnapshotIsBusyException(snapshot=sname), None] jdssd.ra.delete_snapshot.side_effect = side_eff side_eff = [copy.deepcopy(SNAPSHOT_GET_ONE_CLONE)] jdssd.ra.get_snapshot.side_effect = side_eff with mock.patch.object(jdssd, '_promote_newest_delete'): jdssd._delete_snapshot(vname, sname) jdssd._promote_newest_delete.assert_not_called() jdssd.ra.delete_snapshot.assert_called_once_with(vname, sname, force_umount=True) # Test deletion of attached snapshot jdssd, ctx = self.get_jdss_driver(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) side_eff = [jexc.JDSSSnapshotIsBusyException(snapshot=sname), None] jdssd.ra.delete_snapshot.side_effect = side_eff get_snap = copy.deepcopy(SNAPSHOT_GET_ONE_CLONE) get_snap['clones'] = f"Pool-0/{sname}" side_eff = [get_snap] jdssd.ra.get_snapshot.side_effect = side_eff delete_snap_expected = [mock.call(vname, sname, force_umount=True), mock.call(vname, sname, force_umount=True)] with mock.patch.object(jdssd, '_promote_newest_delete'): jdssd._delete_snapshot(vname, sname) jdssd._promote_newest_delete.assert_called_once_with(sname) jdssd.ra.delete_snapshot.assert_has_calls(delete_snap_expected) def test_delete_snapshot_wrapper(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) volume_name = UUID_1 snapshot_name = UUID_S1 with mock.patch.object(jdssd, "_delete_snapshot"): jdssd.delete_snapshot(volume_name, snapshot_name) jdssd._delete_snapshot.assert_called_once_with( jcom.vname(UUID_1), jcom.sname(UUID_S1, UUID_1)) def test_ensure_target_volume(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) id = UUID_1 vid = jcom.vname(UUID_1) target_name = CONFIG_OK['target_prefix'] + UUID_1 provider_auth = 'chap user_name 123456789012' cred = {'name': 'user_name'} patches = [ mock.patch.object(jdssd, "_attach_target_volume"), mock.patch.object(jdssd, "_set_target_credentials"), mock.patch.object(jdssd, "_attach_target_volume")] jdssd.ra.is_target.return_value = True jdssd.ra.is_target_lun.return_value = True jdssd.ra.get_target_user.return_value = [cred] self.start_patches(patches) jdssd._ensure_target_volume(id, vid, provider_auth) jdssd.ra.is_target.assert_called_once_with(target_name) jdssd.ra.is_target_lun.assert_called_once_with(target_name, vid) jdssd.ra.get_target_user.assert_called_once_with(target_name) jdssd.ra.delete_target_user.assert_not_called() jdssd._set_target_credentials.assert_not_called() self.stop_patches(patches) def test_ensure_target_volume_not_attached(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) id = UUID_1 vid = jcom.vname(UUID_1) target_name = CONFIG_OK['target_prefix'] + UUID_1 provider_auth = 'chap user_name 123456789012' cred = {'name': 'user_name'} patches = [ mock.patch.object(jdssd, "_attach_target_volume"), mock.patch.object(jdssd, "_set_target_credentials"), mock.patch.object(jdssd, "_attach_target_volume")] jdssd.ra.is_target.return_value = True jdssd.ra.is_target_lun.return_value = False jdssd.ra.get_target_user.return_value = [cred] self.start_patches(patches) jdssd._ensure_target_volume(id, vid, provider_auth) jdssd.ra.is_target.assert_called_once_with(target_name) jdssd.ra.is_target_lun.assert_called_once_with(target_name, vid) jdssd._attach_target_volume.assert_called_once_with( target_name, vid) jdssd.ra.get_target_user.assert_called_once_with(target_name) jdssd.ra.delete_target_user.assert_not_called() jdssd._set_target_credentials.assert_not_called() self.stop_patches(patches) def test_ensure_target_volume_no_target(self): jdssd, ctx = self.get_jdss_driver(CONFIG_OK) id = UUID_1 vid = jcom.vname(UUID_1) target_name = CONFIG_OK['target_prefix'] + UUID_1 provider_auth = 'chap user_name 123456789012' cred = {'name': 'user_name'} patches = [ mock.patch.object(jdssd, "_create_target_volume"), mock.patch.object(jdssd, "_attach_target_volume"), mock.patch.object(jdssd, "_set_target_credentials"), mock.patch.object(jdssd, "_attach_target_volume")] jdssd.ra.is_target.return_value = False jdssd.ra.get_target_user.return_value = cred['name'] self.start_patches(patches) jdssd._ensure_target_volume(id, vid, provider_auth) jdssd.ra.is_target.assert_called_once_with(target_name) jdssd._create_target_volume.assert_called_once_with(id, vid, provider_auth) jdssd.ra.is_target_lun.assert_not_called() self.stop_patches(patches) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/open_e/test_iscsi.py0000664000175000017500000006303500000000000025440 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from unittest import mock from oslo_utils import units as o_units from cinder import context from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume.drivers.open_e import iscsi from cinder.volume.drivers.open_e.jovian_common import exception as jexc from cinder.volume.drivers.open_e.jovian_common import jdss_common as jcom UUID_1 = '12345678-1234-1234-1234-000000000001' UUID_2 = '12345678-1234-1234-1234-000000000002' UUID_3 = '12345678-1234-1234-1234-000000000003' UUID_4 = '12345678-1234-1234-1234-000000000004' UUID_S1 = '12345678-1234-1234-1234-100000000001' UUID_S2 = '12345678-1234-1234-1234-100000000002' UUID_S3 = '12345678-1234-1234-1234-100000000003' UUID_S4 = '12345678-1234-1234-1234-100000000004' CONFIG_OK = { 'san_hosts': ['192.168.0.2'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'jovian_user': 'admin', 'jovian_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'jovian_block_size': '128K' } CONFIG_BLOCK_SIZE = { 'san_hosts': ['192.168.0.2'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'jovian_user': 'admin', 'jovian_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'jovian_block_size': '64K' } CONFIG_BAD_BLOCK_SIZE = { 'san_hosts': ['192.168.0.2'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'jovian_user': 'admin', 'jovian_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'jovian_block_size': '61K' } CONFIG_BACKEND_NAME = { 'san_hosts': ['192.168.0.2'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'jovian_user': 'admin', 'jovian_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'volume_backend_name': 'JovianDSS', 'reserved_percentage': 10, 'jovian_block_size': '128K' } CONFIG_MULTI_HOST = { 'san_hosts': ['192.168.0.2', '192.168.0.3'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'jovian_user': 'admin', 'jovian_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'volume_backend_name': 'JovianDSS', 'reserved_percentage': 10, 'jovian_block_size': '128K' } SNAPSHOTS_CASCADE_1 = [ {"name": jcom.sname(UUID_S1, UUID_1), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_1)}, {"name": jcom.sname(UUID_S1, UUID_2), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_2)}, {"name": jcom.sname(UUID_S1, UUID_3), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_3)}] SNAPSHOTS_CASCADE_2 = [ {"name": jcom.sname(UUID_S1, UUID_1), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_1)}, {"name": jcom.vname(UUID_2), "clones": "Pool-0/" + jcom.vname(UUID_2)}, {"name": jcom.sname(UUID_S1, UUID_3), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_3)}] SNAPSHOTS_CASCADE_3 = [ {"name": jcom.vname(UUID_4), "clones": "Pool-0/" + jcom.vname(UUID_4)}] SNAPSHOTS_EMPTY = [] SNAPSHOTS_CLONE = [ {"name": jcom.vname(UUID_1), "clones": "Pool-0/" + jcom.vname(UUID_1)}] SNAPSHOTS_GARBAGE = [ {"name": jcom.sname(UUID_S1, UUID_1), "clones": "Pool-0/" + jcom.vname(UUID_2)}, {"name": jcom.sname(UUID_S1, UUID_2), "clones": ""}] SNAPSHOTS_RECURSIVE_1 = [ {"name": jcom.sname(UUID_S1, UUID_1), "clones": "Pool-0/" + jcom.sname(UUID_S1, UUID_1)}, {"name": jcom.sname(UUID_S1, UUID_2), "clones": "Pool-0/" + jcom.hidden(UUID_2)}] SNAPSHOTS_RECURSIVE_CHAIN_1 = [ {"name": jcom.sname(UUID_S1, UUID_3), "clones": "Pool-0/" + jcom.hidden(UUID_3)}] SNAPSHOTS_RECURSIVE_CHAIN_2 = [ {"name": jcom.vname(UUID_2), "clones": "Pool-0/" + jcom.hidden(UUID_2)}] def get_jdss_exceptions(): out = [jexc.JDSSException(reason="Testing"), jexc.JDSSRESTException(request="ra request", reason="Testing"), jexc.JDSSRESTProxyException(host="test_host", reason="Testing"), jexc.JDSSResourceNotFoundException(res="test_resource"), jexc.JDSSVolumeNotFoundException(volume="test_volume"), jexc.JDSSSnapshotNotFoundException(snapshot="test_snapshot"), jexc.JDSSResourceExistsException(res="test_resource"), jexc.JDSSSnapshotExistsException(snapshot="test_snapshot"), jexc.JDSSVolumeExistsException(volume="test_volume"), jexc.JDSSResourceIsBusyException(res="test_resource"), jexc.JDSSSnapshotIsBusyException(snapshot="test_snapshot"), jexc.JDSSOSException(message="Some os error")] return out class TestOpenEJovianDSSISCSIDriver(test.TestCase): def get_iscsi_driver(self, config): ctx = context.get_admin_context() cfg = mock.Mock() cfg.append_config_values.return_value = None cfg.get = lambda val, default: config.get(val, default) jdssd = iscsi.JovianISCSIDriver() jdssd.configuration = cfg lib_to_patch = ('cinder.volume.drivers.open_e.jovian_common.rest.' 'JovianRESTAPI') with mock.patch(lib_to_patch) as ra: ra.is_pool_exists.return_value = True jdssd.do_setup(ctx) jdssd.ra = mock.Mock() jdssd.driver = mock.Mock() return jdssd, ctx def start_patches(self, patches): for p in patches: p.start() def stop_patches(self, patches): for p in patches: p.stop() def test_check_for_setup_error(self): cfg = mock.Mock() cfg.append_config_values.return_value = None jdssd = iscsi.JovianISCSIDriver() jdssd.configuration = cfg jdssd.ra = mock.Mock() jdssd.driver = mock.Mock() # No IP jdssd.driver.rest_config_is_ok.return_value = True jdssd.jovian_hosts = [] jdssd.block_size = ['64K'] self.assertRaises(exception.VolumeDriverException, jdssd.check_for_setup_error) # No pool detected jdssd.driver.rest_config_is_ok.return_value = False jdssd.jovian_hosts = ['192.168.0.2'] jdssd.block_size = ['64K'] self.assertRaises(exception.VolumeDriverException, jdssd.check_for_setup_error) # Bad block size jdssd.driver.rest_config_is_ok.return_value = True jdssd.jovian_hosts = ['192.168.0.2', '192.168.0.3'] jdssd.block_size = ['61K'] self.assertRaises(exception.InvalidConfigurationValue, jdssd.check_for_setup_error) def test_get_provider_info(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) host = CONFIG_OK["san_hosts"][0] port = CONFIG_OK["target_port"] target_name = CONFIG_OK["target_prefix"] + UUID_1 location = '{host}:{port},1 {name} 0'.format( host=host, port=port, name=target_name ) jdssd.driver.get_provider_location.return_value = location ret = jdssd._get_provider_info(UUID_1) jdssd.driver.get_provider_location.assert_called_once_with(UUID_1) self.assertEqual(location, ret['provider_location']) cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " "[0-9,a-z,A-Z]{{{pass_len}}}").format( name_len=8, pass_len=CONFIG_OK['chap_password_len']) self.assertIsNotNone(re.match(cred_format, ret['provider_auth'])) def test_create_volume(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) vol = fake_volume.fake_volume_obj(ctx) vol.id = UUID_1 vol.size = 1 host = CONFIG_OK["san_hosts"][0] port = CONFIG_OK["target_port"] target_name = CONFIG_OK["target_prefix"] + UUID_1 location = '{host}:{port},1 {name} 0'.format( host=host, port=port, name=target_name ) jdssd.driver.get_provider_location.return_value = location ret = jdssd.create_volume(vol) self.assertEqual(location, ret['provider_location']) cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " "[0-9,a-z,A-Z]{{{pass_len}}}").format( name_len=8, pass_len=CONFIG_OK['chap_password_len']) jdssd.driver.create_volume.assert_called_once_with( vol.id, vol.size, sparse=False, block_size="128K") self.assertIsNotNone(re.match(cred_format, ret['provider_auth'])) self.assertEqual(location, ret['provider_location']) def test_create_volume_small_block(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_BLOCK_SIZE) vol = fake_volume.fake_volume_obj(ctx) vol.id = UUID_1 vol.size = 1 host = CONFIG_OK["san_hosts"][0] port = CONFIG_OK["target_port"] target_name = CONFIG_OK["target_prefix"] + UUID_1 location = '{host}:{port},1 {name} 0'.format( host=host, port=port, name=target_name ) jdssd.driver.create_volume.return_value = None jdssd.driver.get_provider_location.return_value = location jdssd.ra.get_active_host.return_value = host ret = jdssd.create_volume(vol) jdssd.driver.create_volume.assert_called_once_with( vol.id, vol.size, sparse=False, block_size="64K") location = '{host}:{port},1 {name} 0'.format( host=host, port=port, name=target_name ) self.assertEqual(location, ret['provider_location']) cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " "[0-9,a-z,A-Z]{{{pass_len}}}").format( name_len=8, pass_len=CONFIG_OK['chap_password_len']) self.assertIsNotNone(re.match(cred_format, ret['provider_auth'])) def test_delete_volume_cascade(self): # Volume with 3 snapshots and 1 clone of a snapshots # We should delete childless snapshots # and then cal for volume deletion jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) vol = fake_volume.fake_volume_obj(ctx) vol.id = UUID_1 jdssd.driver.delete_volume.return_value = None jdssd.delete_volume(vol, cascade=True) jdssd.driver.delete_volume.assert_called_once_with(UUID_1, cascade=True) def test_delete_volume_exceptions(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) vol = fake_volume.fake_volume_obj(ctx) vol.id = UUID_1 for exc in get_jdss_exceptions(): jdssd.driver.delete_volume.side_effect = exc try: jdssd.delete_volume(vol, cascade=False) except Exception as err: self.assertIsInstance(err, exception.VolumeBackendAPIException) def test_extend_volume(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) vol = fake_volume.fake_volume_obj(ctx) vol.id = UUID_1 jdssd.driver.resize_volume.return_value = None jdssd.extend_volume(vol, 2) jdssd.driver.resize_volume.assert_called_once_with( UUID_1, 2) def test_extend_volume_exceptions(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) vol = fake_volume.fake_volume_obj(ctx) vol.id = UUID_1 for exc in get_jdss_exceptions(): try: jdssd.extend_volume(vol, 2) except Exception as err: self.assertIsInstance(err, exception.VolumeBackendAPIException) def test_create_cloned_volume(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) sparse = CONFIG_OK['san_thin_provision'] orig_vol = fake_volume.fake_volume_obj(ctx) orig_vol.id = UUID_1 orig_vol.size = 1 clone_vol = fake_volume.fake_volume_obj(ctx) clone_vol.id = UUID_2 clone_vol.size = 1 host = CONFIG_OK["san_hosts"][0] port = CONFIG_OK["target_port"] target_name = CONFIG_OK["target_prefix"] + UUID_2 location = '{host}:{port},1 {name} 0'.format( host=host, port=port, name=target_name ) cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " "[0-9,a-z,A-Z]{{{pass_len}}}").format( name_len=8, pass_len=CONFIG_OK['chap_password_len']) jdssd.driver.get_provider_location.return_value = location jdssd.driver.create_cloned_volume.return_value = None ret = jdssd.create_cloned_volume(clone_vol, orig_vol) jdssd.driver.create_cloned_volume.assert_called_once_with( clone_vol.id, orig_vol.id, clone_vol.size, sparse=sparse) self.assertEqual(location, ret['provider_location']) self.assertIsNotNone(re.match(cred_format, ret['provider_auth'])) self.assertEqual(location, ret['provider_location']) def test_create_volume_from_snapshot(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) orig_snap = fake_snapshot.fake_snapshot_obj(ctx) orig_snap.id = UUID_S1 orig_snap.volume_id = UUID_1 clone_vol = fake_volume.fake_volume_obj(ctx) clone_vol.id = UUID_2 clone_vol.size = 2 host = CONFIG_OK["san_hosts"][0] port = CONFIG_OK["target_port"] target_name = CONFIG_OK["target_prefix"] + UUID_2 location = '{host}:{port},1 {name} 0'.format( host=host, port=port, name=target_name ) cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " "[0-9,a-z,A-Z]{{{pass_len}}}").format( name_len=8, pass_len=CONFIG_OK['chap_password_len']) patches = [ mock.patch.object( jdssd, "_get_provider_auth", return_value=cred_format)] jdssd.driver.get_provider_location.return_value = location jdssd.driver.create_cloned_volume.return_value = None self.start_patches(patches) ret = jdssd.create_volume_from_snapshot(clone_vol, orig_snap) jdssd.driver.create_cloned_volume.assert_called_once_with( clone_vol.id, orig_snap.volume_id, clone_vol.size, snapshot_name=orig_snap.id) self.stop_patches(patches) self.assertEqual(location, ret['provider_location']) self.assertEqual(cred_format, ret['provider_auth']) def test_create_snapshot(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) snap = fake_snapshot.fake_snapshot_obj(ctx, id=UUID_S1) snap.volume_id = UUID_1 jdssd.driver.create_snapshot.return_value = None jdssd.create_snapshot(snap) jdssd.driver.create_snapshot.assert_called_once_with(UUID_S1, UUID_1) def test_delete_snapshot(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) snap = fake_snapshot.fake_snapshot_obj(ctx, id=UUID_S1, volume_id=UUID_1) jdssd.driver.delete_snapshot.return_value = None jdssd.delete_snapshot(snap) jdssd.driver.delete_snapshot.assert_called_once_with(UUID_1, UUID_S1) def test_delete_snapshot_exceptions(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) snap = fake_snapshot.fake_snapshot_obj(ctx, id=UUID_1) for exc in get_jdss_exceptions(): jdssd.driver.delete_snapshot.side_effect = exc try: ret = jdssd.delete_snapshot(snap) if isinstance(exc, jexc.JDSSVolumeNotFoundException): self.assertTrue(ret is None) except Exception as err: self.assertIsInstance(err, exception.VolumeBackendAPIException) def test_local_path(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) vol = fake_snapshot.fake_snapshot_obj(ctx, id=UUID_1) self.assertRaises(NotImplementedError, jdssd.local_path, vol) def test_get_provider_auth(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) auth = jdssd._get_provider_auth() cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " "[0-9,a-z,A-Z]{{{pass_len}}}").format( name_len=8, pass_len=CONFIG_OK['chap_password_len']) self.assertIsNotNone(re.match(cred_format, auth)) def test_get_provider_auth_long(self): long_pass_config = CONFIG_OK.copy() long_pass_config['chap_password_len'] = 16 jdssd, ctx = self.get_iscsi_driver(long_pass_config) auth = jdssd._get_provider_auth() cred_format = (r"CHAP [0-9,a-z,A-Z]{{{name_len}}} " "[0-9,a-z,A-Z]{{{pass_len}}}").format( name_len=8, pass_len=16) self.assertIsNotNone(re.match(cred_format, auth)) def test_create_export(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) vol = fake_volume.fake_volume_obj(ctx, id=UUID_1) host = CONFIG_OK["san_hosts"][0] port = CONFIG_OK["target_port"] target_name = CONFIG_OK["target_prefix"] + UUID_1 location = '{host}:{port},1 {name} 0'.format( host=host, port=port, name=target_name ) jdssd.driver.get_provider_location.return_value = location jdssd.driver.get_provider_location.return_value = location ret = jdssd.create_export(ctx, vol, "connector") jdssd.driver.ensure_export.assert_called_once_with(vol.id, mock.ANY) self.assertEqual(location, ret["provider_location"]) def test_ensure_export(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) vol = fake_volume.fake_volume_obj(ctx) vol.id = UUID_1 host = CONFIG_OK["san_hosts"][0] port = CONFIG_OK["target_port"] target_name = CONFIG_OK["target_prefix"] + UUID_1 location = '{host}:{port},1 {name} 0'.format( host=host, port=port, name=target_name ) auth = 'chap user_name 123456789012' jdssd.driver.get_provider_location.return_value = location with mock.patch.object(jdssd, '_get_provider_auth'): jdssd._get_provider_auth.return_value = auth ret = jdssd.ensure_export(ctx, vol) jdssd._get_provider_auth.assert_called_once() jdssd.driver.ensure_export.assert_called_once_with(vol.id, auth) self.assertEqual(location, ret["provider_location"]) def test_remove_export(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) vol = fake_volume.fake_volume_obj(ctx, id=UUID_1) jdssd.remove_export(ctx, vol) jdssd.driver.remove_export.assert_called_once_with(UUID_1) def test_update_volume_stats(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_BACKEND_NAME) location_info = 'JovianISCSIDriver:192.168.0.2:Pool-0' correct_out = { 'vendor_name': 'Open-E', 'driver_version': "1.0.3", 'storage_protocol': 'iSCSI', 'total_capacity_gb': 100, 'free_capacity_gb': 50, 'reserved_percentage': 10, 'volume_backend_name': CONFIG_BACKEND_NAME['volume_backend_name'], 'QoS_support': False, 'location_info': location_info, 'multiattach': True } jdssd.ra.get_pool_stats.return_value = { 'size': 100 * o_units.Gi, 'available': 50 * o_units.Gi} jdssd.ra.get_active_host.return_value = CONFIG_OK['san_hosts'][0] jdssd._update_volume_stats() self.assertEqual(correct_out, jdssd._stats) def test_get_iscsi_properties(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_OK) provider_auth = 'chap user_name 123456789012' target_name = CONFIG_OK['target_prefix'] + UUID_1 with mock.patch.object(jdssd.ra, "get_active_host"): jdssd.ra.get_active_host.return_value = CONFIG_OK['san_hosts'][0] ret = jdssd._get_iscsi_properties(UUID_1, provider_auth, multipath=False) expected = {'auth_method': 'chap', 'auth_password': '123456789012', 'auth_username': 'user_name', 'target_discovered': False, 'target_iqn': target_name, 'target_lun': 0, 'target_portal': '192.168.0.2:3260'} self.assertEqual(expected, ret) def test_get_iscsi_properties_multipath(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_MULTI_HOST) provider_auth = 'chap user_name 123456789012' target_name = CONFIG_OK['target_prefix'] + UUID_1 ret = jdssd._get_iscsi_properties(UUID_1, provider_auth, multipath=True) expected = {'auth_method': 'chap', 'auth_password': '123456789012', 'auth_username': 'user_name', 'target_discovered': False, 'target_iqns': [target_name, target_name], 'target_lun': 0, 'target_luns': [0, 0], 'target_portals': ['192.168.0.2:3260', '192.168.0.3:3260']} self.assertEqual(expected, ret) def test_initialize_connection(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_MULTI_HOST) vol = fake_volume.fake_volume_obj(ctx, id=UUID_1) vol.provider_auth = 'chap user_name 123456789012' connector = {'multipath': False, 'ip': '172.16.0.2'} target_name = CONFIG_OK['target_prefix'] + UUID_1 properties = {'auth_method': 'chap', 'auth_password': '123456789012', 'auth_username': 'user_name', 'target_discovered': False, 'target_iqn': target_name, 'target_lun': 0, 'target_portal': '192.168.0.2:3260'} con_info = { 'driver_volume_type': 'iscsi', 'data': properties, } jdssd.driver.initialize_connection.return_value = con_info init_con_exp = [mock.call(vol.id, vol.provider_auth, multipath=False)] with mock.patch.object(jdssd.ra, "get_active_host"): jdssd.ra.get_active_host.return_value = CONFIG_OK["san_hosts"][0] ret = jdssd.initialize_connection(vol, connector) jdssd.driver.initialize_connection.assert_has_calls(init_con_exp) self.assertEqual(con_info, ret) def test_initialize_connection_snapshot(self): jdssd, ctx = self.get_iscsi_driver(CONFIG_MULTI_HOST) snap = fake_snapshot.fake_snapshot_obj(ctx, id=UUID_1) snap.provider_auth = 'chap user_name 123456789012' connector = {'multipath': True, 'ip': '172.16.0.2'} target_name = CONFIG_OK['target_prefix'] + UUID_1 properties = {'auth_method': 'chap', 'auth_password': '123456789012', 'auth_username': 'user_name', 'target_discovered': False, 'target_iqns': [target_name, target_name], 'target_lun': 0, 'target_luns': [0, 0], 'target_portals': ['192.168.0.2:3260', '192.168.0.3:3260']} con_info = { 'driver_volume_type': 'iscsi', 'data': properties, } init_con_exp = [mock.call(snap.volume_id, snap.provider_auth, snapshot_id=snap.id, multipath=True)] jdssd.driver.initialize_connection.return_value = con_info ret = jdssd.initialize_connection_snapshot(snap, connector) jdssd.driver.initialize_connection.assert_has_calls(init_con_exp) self.assertEqual(con_info, ret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/open_e/test_rest.py0000664000175000017500000014755600000000000025316 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_utils import units as o_units from cinder import context from cinder.tests.unit import test from cinder.volume.drivers.open_e.jovian_common import exception as jexc from cinder.volume.drivers.open_e.jovian_common import jdss_common as jcom from cinder.volume.drivers.open_e.jovian_common import rest UUID_1 = '12345678-1234-1234-1234-000000000001' UUID_2 = '12345678-1234-1234-1234-000000000002' UUID_3 = '12345678-1234-1234-1234-000000000003' UUID_S1 = '12345678-1234-1234-1234-100000000001' UUID_S2 = '12345678-1234-1234-1234-100000000002' UUID_S3 = '12345678-1234-1234-1234-100000000003' CONFIG_OK = { 'san_hosts': ['192.168.0.2'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'san_login': 'admin', 'san_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'jovian_block_size': '128K' } class TestOpenEJovianRESTAPI(test.TestCase): def get_rest(self, config): ctx = context.get_admin_context() cfg = mock.Mock() cfg.append_config_values.return_value = None cfg.safe_get = lambda val: config[val] cfg.get = lambda val, default: config.get(val, default) jdssr = rest.JovianRESTAPI(config) jdssr.rproxy = mock.Mock() return jdssr, ctx def test_get_active_host(self): jrest, ctx = self.get_rest(CONFIG_OK) jrest.rproxy.get_active_host.return_value = "test_data" ret = jrest.get_active_host() self.assertEqual("test_data", ret) def test_is_pool_exists(self): jrest, ctx = self.get_rest(CONFIG_OK) resp = {'code': 200, 'error': None} jrest.rproxy.pool_request.return_value = resp self.assertTrue(jrest.is_pool_exists()) err = {'errorid': 12} resp = {'code': 404, 'error': err} jrest.rproxy.pool_request.return_value = resp self.assertFalse(jrest.is_pool_exists()) pool_request_expected = [ mock.call('GET', ''), mock.call('GET', '')] jrest.rproxy.pool_request.assert_has_calls(pool_request_expected) def get_iface_info(self): jrest, ctx = self.get_rest(CONFIG_OK) resp = { 'code': 200, 'error': None} jrest.rproxy.pool_request.return_value = resp self.assertTrue(jrest.is_pool_exists()) def test_get_luns(self): jrest, ctx = self.get_rest(CONFIG_OK) resp = {'data': [{ 'vscan': None, 'full_name': 'Pool-0/' + UUID_1, 'userrefs': None, 'primarycache': 'all', 'logbias': 'latency', 'creation': '1591543140', 'sync': 'always', 'is_clone': False, 'dedup': 'off', 'sharenfs': None, 'receive_resume_token': None, 'volsize': '1073741824'}], 'error': None, 'code': 200} jrest.rproxy.pool_request.return_value = resp self.assertEqual(resp['data'], jrest.get_luns()) err = {'errorid': 12, 'message': 'test failure'} resp = {'code': 404, 'data': None, 'error': err} jrest.rproxy.pool_request.return_value = resp self.assertRaises(jexc.JDSSRESTException, jrest.get_luns) get_luns_expected = [ mock.call('GET', "/volumes"), mock.call('GET', "/volumes")] jrest.rproxy.pool_request.assert_has_calls(get_luns_expected) def test_create_lun(self): jrest, ctx = self.get_rest(CONFIG_OK) resp = {'data': { 'vscan': None, 'full_name': 'Pool-0/' + jcom.vname(UUID_1), 'userrefs': None, 'primarycache': 'all', 'logbias': 'latency', 'creation': '1591543140', 'sync': 'always', 'is_clone': False, 'dedup': 'off', 'sharenfs': None, 'receive_resume_token': None, 'volsize': '1073741824'}, 'error': None, 'code': 200} jbody = { 'name': jcom.vname(UUID_1), 'size': "1073741824", 'sparse': False } jbody_sparse = { 'name': jcom.vname(UUID_1), 'size': "1073741824", 'sparse': True } jrest.rproxy.pool_request.return_value = resp self.assertIsNone(jrest.create_lun(jcom.vname(UUID_1), o_units.Gi)) err = {'errno': '5', 'message': 'test failure'} resp = {'code': 404, 'data': None, 'error': err} jrest.rproxy.pool_request.return_value = resp self.assertRaises(jexc.JDSSRESTException, jrest.create_lun, jcom.vname(UUID_1), o_units.Gi, sparse=True) addr = "/volumes" create_lun_expected = [ mock.call('POST', addr, json_data=jbody), mock.call('POST', addr, json_data=jbody_sparse)] jrest.rproxy.pool_request.assert_has_calls(create_lun_expected) def test_extend_lun(self): jrest, ctx = self.get_rest(CONFIG_OK) resp = {'data': None, 'error': None, 'code': 201} jbody = { 'size': "2147483648", } jrest.rproxy.pool_request.return_value = resp self.assertIsNone(jrest.extend_lun(jcom.vname(UUID_1), 2 * o_units.Gi)) err = {'message': 'test failure'} resp = {'code': 500, 'data': None, 'error': err} jrest.rproxy.pool_request.return_value = resp self.assertRaises(jexc.JDSSRESTException, jrest.extend_lun, jcom.vname(UUID_1), 2 * o_units.Gi) addr = "/volumes/" + jcom.vname(UUID_1) create_lun_expected = [ mock.call('PUT', addr, json_data=jbody), mock.call('PUT', addr, json_data=jbody)] jrest.rproxy.pool_request.assert_has_calls(create_lun_expected) def test_is_lun(self): jrest, ctx = self.get_rest(CONFIG_OK) resp = {'data': { "vscan": None, "full_name": "Pool-0/" + jcom.vname(UUID_1), "userrefs": None, "primarycache": "all", "logbias": "latency", "creation": "1591543140", "sync": "always", "is_clone": False, "dedup": "off", "sharenfs": None, "receive_resume_token": None, "volsize": "1073741824"}, 'error': None, 'code': 200} jrest.rproxy.pool_request.return_value = resp self.assertTrue(jrest.is_lun(jcom.vname(UUID_1))) err = {'errno': 1, 'message': ('Zfs resource: Pool-0/' + jcom.vname(UUID_1) + ' not found in this collection.')} resp = {'code': 500, 'data': None, 'error': err} jrest.rproxy.pool_request.return_value = resp self.assertEqual(False, jrest.is_lun(jcom.vname(UUID_1))) jrest.rproxy.pool_request.side_effect = ( jexc.JDSSRESTProxyException(host='test_host', reason='test')) self.assertRaises(jexc.JDSSRESTProxyException, jrest.is_lun, 'v_' + UUID_1) def test_get_lun(self): jrest, ctx = self.get_rest(CONFIG_OK) resp = {'data': {"vscan": None, "full_name": "Pool-0/v_" + UUID_1, "userrefs": None, "primarycache": "all", "logbias": "latency", "creation": "1591543140", "sync": "always", "is_clone": False, "dedup": "off", "sharenfs": None, "receive_resume_token": None, "volsize": "1073741824"}, 'error': None, 'code': 200} jrest.rproxy.pool_request.return_value = resp self.assertEqual(resp['data'], jrest.get_lun('v_' + UUID_1)) err = {'errno': 1, 'message': ('Zfs resource: Pool-0/v_' + UUID_1 + ' not found in this collection.')} resp = {'code': 500, 'data': None, 'error': err} jrest.rproxy.pool_request.return_value = resp self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.get_lun, 'v_' + UUID_1) jrest.rproxy.pool_request.return_value = resp self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.get_lun, 'v_' + UUID_1) err = {'errno': 10, 'message': ('Test error')} resp = {'code': 500, 'data': None, 'error': err} jrest.rproxy.pool_request.return_value = resp self.assertRaises(jexc.JDSSException, jrest.get_lun, 'v_' + UUID_1) def test_modify_lun(self): jrest, ctx = self.get_rest(CONFIG_OK) resp = {'data': None, 'error': None, 'code': 201} req = {'name': 'v_' + UUID_2} jrest.rproxy.pool_request.return_value = resp self.assertIsNone(jrest.modify_lun('v_' + UUID_1, prop=req)) err = {'errno': 1, 'message': ('Zfs resource: Pool-0/v_' + UUID_1 + ' not found in this collection.')} resp = {'code': 500, 'data': None, 'error': err} jrest.rproxy.pool_request.return_value = resp self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.modify_lun, 'v_' + UUID_1, prop=req) err = {'errno': 10, 'message': ('Test error')} resp = {'code': 500, 'data': None, 'error': err} jrest.rproxy.pool_request.return_value = resp self.assertRaises(jexc.JDSSException, jrest.modify_lun, 'v_' + UUID_1, prop=req) addr = "/volumes/v_" + UUID_1 modify_lun_expected = [ mock.call('PUT', addr, json_data=req), mock.call('PUT', addr, json_data=req), mock.call('PUT', addr, json_data=req)] jrest.rproxy.pool_request.assert_has_calls(modify_lun_expected) def test_make_readonly_lun(self): jrest, ctx = self.get_rest(CONFIG_OK) resp = {'data': None, 'error': None, 'code': 201} req = {'property_name': 'readonly', 'property_value': 'on'} jrest.rproxy.pool_request.return_value = resp self.assertIsNone(jrest.modify_lun('v_' + UUID_1, prop=req)) addr = "/volumes/v_" + UUID_1 modify_lun_expected = [mock.call('PUT', addr, json_data=req)] jrest.rproxy.pool_request.assert_has_calls(modify_lun_expected) def test_delete_lun(self): jrest, ctx = self.get_rest(CONFIG_OK) # Delete OK resp = {'data': None, 'error': None, 'code': 204} jrest.rproxy.pool_request.return_value = resp self.assertIsNone(jrest.delete_lun('v_' + UUID_1)) addr = "/volumes/v_" + UUID_1 delete_lun_expected = [mock.call('DELETE', addr)] jrest.rproxy.pool_request.assert_has_calls(delete_lun_expected) # No volume to delete err = {'errno': 1, 'message': ('Zfs resource: Pool-0/v_' + UUID_1 + ' not found in this collection.')} resp = {'code': 500, 'data': None, 'error': err} jrest.rproxy.pool_request.return_value = resp self.assertIsNone(jrest.delete_lun('v_' + UUID_1)) delete_lun_expected += [mock.call('DELETE', addr)] jrest.rproxy.pool_request.assert_has_calls(delete_lun_expected) # Volume has snapshots msg = ("cannot destroy 'Pool-0/{vol}': volume has children\nuse '-r'" " to destroy the following datasets:\nPool-0/{vol}@s1") msg = msg.format(vol='v_' + UUID_1) url = "http://192.168.0.2:82/api/v3/pools/Pool-0/volumes/" + UUID_1 err = {"class": "zfslib.wrap.zfs.ZfsCmdError", "errno": 1000, "message": msg, "url": url} resp = { 'code': 500, 'data': None, 'error': err} delete_lun_expected += [mock.call('DELETE', addr)] jrest.rproxy.pool_request.return_value = resp self.assertRaises( jexc.JDSSResourceIsBusyException, jrest.delete_lun, 'v_' + UUID_1) jrest.rproxy.pool_request.assert_has_calls(delete_lun_expected) def test_delete_lun_args(self): jrest, ctx = self.get_rest(CONFIG_OK) addr = "/volumes/v_" + UUID_1 # Delete OK resp = {'data': None, 'error': None, 'code': 204} req = {'recursively_children': True, 'force_umount': True} delete_lun_expected = [mock.call('DELETE', addr, json_data=req)] jrest.rproxy.pool_request.return_value = resp self.assertIsNone( jrest.delete_lun('v_' + UUID_1, recursively_children=True, force_umount=True)) jrest.rproxy.pool_request.assert_has_calls(delete_lun_expected) def test_is_target(self): jrest, ctx = self.get_rest(CONFIG_OK) tname = CONFIG_OK['target_prefix'] + UUID_1 addr = '/san/iscsi/targets/{}'.format(tname) data = {'incoming_users_active': True, 'name': tname, 'allow_ip': [], 'outgoing_user': None, 'active': True, 'conflicted': False, 'deny_ip': []} resp = {'data': data, 'error': None, 'code': 200} is_target_expected = [mock.call('GET', addr)] jrest.rproxy.pool_request.return_value = resp self.assertTrue(jrest.is_target(tname)) msg = "Target {} not exists.".format(tname) url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" "san/iscsi/targets/{target}") url = url.format(addr=CONFIG_OK['san_hosts'][0], port=CONFIG_OK['san_api_port'], target=tname) err = {"class": "opene.exceptions.ItemNotFoundError", "message": msg, "url": url} resp = {'data': None, 'error': err, 'code': 404} is_target_expected += [mock.call('GET', addr)] jrest.rproxy.pool_request.return_value = resp self.assertEqual(False, jrest.is_target(tname)) jrest.rproxy.pool_request.assert_has_calls(is_target_expected) def test_create_target(self): jrest, ctx = self.get_rest(CONFIG_OK) # Create OK tname = CONFIG_OK['target_prefix'] + UUID_1 addr = '/san/iscsi/targets' data = {'incoming_users_active': True, 'name': tname, 'allow_ip': [], 'outgoing_user': None, 'active': True, 'conflicted': False, 'deny_ip': []} resp = {'data': data, 'error': None, 'code': 201} req = {'name': tname, 'active': True, 'incoming_users_active': True} jrest.rproxy.pool_request.return_value = resp create_target_expected = [mock.call('POST', addr, json_data=req)] self.assertIsNone(jrest.create_target(tname)) # Target exists tname = CONFIG_OK['target_prefix'] + UUID_1 addr = '/san/iscsi/targets' data = {'incoming_users_active': True, 'name': tname, 'allow_ip': [], 'outgoing_user': None, 'active': True, 'conflicted': False, 'deny_ip': []} resp = {'data': data, 'error': None, 'code': 201} url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" "san/iscsi/targets") url = url.format(addr=CONFIG_OK['san_hosts'][0], port=CONFIG_OK['san_api_port']) msg = "Target with name {} is already present on Pool-0.".format(tname) err = {"class": "opene.san.target.base.iscsi.TargetNameConflictError", "message": msg, "url": url} resp = {'data': None, 'error': err, 'code': 409} jrest.rproxy.pool_request.return_value = resp create_target_expected += [mock.call('POST', addr, json_data=req)] self.assertRaises(jexc.JDSSResourceExistsException, jrest.create_target, tname) # Unknown error tname = CONFIG_OK['target_prefix'] + UUID_1 addr = "/san/iscsi/targets" resp = {'data': data, 'error': None, 'code': 500} url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" "san/iscsi/targets") url = url.format(addr=CONFIG_OK['san_hosts'][0], port=CONFIG_OK['san_api_port']) msg = "Target with name {} faced some fatal failure.".format(tname) err = {"class": "some test error", "message": msg, "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp create_target_expected += [mock.call('POST', addr, json_data=req)] self.assertRaises(jexc.JDSSException, jrest.create_target, tname) jrest.rproxy.pool_request.assert_has_calls(create_target_expected) def test_delete_target(self): jrest, ctx = self.get_rest(CONFIG_OK) # Delete OK tname = CONFIG_OK['target_prefix'] + UUID_1 addr = '/san/iscsi/targets/{}'.format(tname) resp = {'data': None, 'error': None, 'code': 204} jrest.rproxy.pool_request.return_value = resp delete_target_expected = [mock.call('DELETE', addr)] self.assertIsNone(jrest.delete_target(tname)) # Delete no such target url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" "san/iscsi/targets") url = url.format(addr=CONFIG_OK['san_hosts'][0], port=CONFIG_OK['san_api_port']) err = {"class": "opene.exceptions.ItemNotFoundError", "message": "Target {} not exists.".format(tname), "url": url} resp = {'data': None, 'error': err, 'code': 404} jrest.rproxy.pool_request.return_value = resp delete_target_expected += [mock.call('DELETE', addr)] self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.delete_target, tname) # Delete unknown error err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp delete_target_expected += [mock.call('DELETE', addr)] self.assertRaises(jexc.JDSSException, jrest.delete_target, tname) jrest.rproxy.pool_request.assert_has_calls(delete_target_expected) def test_create_target_user(self): jrest, ctx = self.get_rest(CONFIG_OK) # Modify OK tname = CONFIG_OK['target_prefix'] + UUID_1 addr = '/san/iscsi/targets/{}/incoming-users'.format(tname) chap_cred = {"name": "chapuser", "password": "123456789012"} resp = {'data': None, 'error': None, 'code': 201} jrest.rproxy.pool_request.return_value = resp expected = [mock.call('POST', addr, json_data=chap_cred)] self.assertIsNone(jrest.create_target_user(tname, chap_cred)) # No such target url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" "san/iscsi/targets") url = url.format(addr=CONFIG_OK['san_hosts'][0], port=CONFIG_OK['san_api_port']) err = {"class": "opene.exceptions.ItemNotFoundError", "message": "Target {} not exists.".format(tname), "url": url} resp = {'data': None, 'error': err, 'code': 404} jrest.rproxy.pool_request.return_value = resp expected += [mock.call('POST', addr, json_data=chap_cred)] self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.create_target_user, tname, chap_cred) # Unknown error err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp expected += [mock.call('POST', addr, json_data=chap_cred)] self.assertRaises(jexc.JDSSException, jrest.create_target_user, tname, chap_cred) jrest.rproxy.pool_request.assert_has_calls(expected) def test_get_target_user(self): jrest, ctx = self.get_rest(CONFIG_OK) # Get OK tname = CONFIG_OK['target_prefix'] + UUID_1 addr = '/san/iscsi/targets/{}/incoming-users'.format(tname) chap_users = {"name": "chapuser"} resp = {'data': chap_users, 'error': None, 'code': 200} jrest.rproxy.pool_request.return_value = resp get_target_user_expected = [mock.call('GET', addr)] self.assertEqual(chap_users, jrest.get_target_user(tname)) # No such target url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" "san/iscsi/targets") url = url.format(addr=CONFIG_OK['san_hosts'][0], port=CONFIG_OK['san_api_port']) err = {"class": "opene.exceptions.ItemNotFoundError", "message": "Target {} not exists.".format(tname), "url": url} resp = {'data': None, 'error': err, 'code': 404} jrest.rproxy.pool_request.return_value = resp get_target_user_expected += [mock.call('GET', addr)] self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.get_target_user, tname) # Unknown error err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp get_target_user_expected += [mock.call('GET', addr)] self.assertRaises(jexc.JDSSException, jrest.get_target_user, tname) jrest.rproxy.pool_request.assert_has_calls(get_target_user_expected) def test_delete_target_user(self): jrest, ctx = self.get_rest(CONFIG_OK) # Delete OK tname = CONFIG_OK['target_prefix'] + UUID_1 user = "chapuser" addr = '/san/iscsi/targets/{}/incoming-users/chapuser'.format(tname) resp = {'data': None, 'error': None, 'code': 204} jrest.rproxy.pool_request.return_value = resp delete_target_user_expected = [mock.call('DELETE', addr)] self.assertIsNone(jrest.delete_target_user(tname, user)) # No such user url = ("http://{addr}:{port}/api/v3/pools/Pool-0/" "san/iscsi/targets/{tname}/incoming-user/{chapuser}") url = url.format(addr=CONFIG_OK['san_hosts'][0], port=CONFIG_OK['san_api_port'], tname=tname, chapuser=user) err = {"class": "opene.exceptions.ItemNotFoundError", "message": "User {} not exists.".format(user), "url": url} resp = {'data': None, 'error': err, 'code': 404} jrest.rproxy.pool_request.return_value = resp delete_target_user_expected += [mock.call('DELETE', addr)] self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.delete_target_user, tname, user) # Unknown error err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp delete_target_user_expected += [mock.call('DELETE', addr)] self.assertRaises(jexc.JDSSException, jrest.delete_target_user, tname, user) jrest.rproxy.pool_request.assert_has_calls(delete_target_user_expected) def test_is_target_lun(self): jrest, ctx = self.get_rest(CONFIG_OK) # lun present tname = CONFIG_OK['target_prefix'] + UUID_1 vname = jcom.vname(UUID_1) addr = '/san/iscsi/targets/{target}/luns/{lun}'.format( target=tname, lun=vname) data = { "block_size": 512, "device_handler": "vdisk_fileio", "lun": 0, "mode": "wt", "name": vname, "prod_id": "Storage", "scsi_id": "99e2c883331edf87"} resp = {'data': data, 'error': None, 'code': 200} jrest.rproxy.pool_request.return_value = resp is_target_lun_expected = [mock.call('GET', addr)] self.assertTrue(jrest.is_target_lun(tname, vname)) url = "http://{ip}:{port}/api/v3/pools/Pool-0{addr}" url = url.format(ip=CONFIG_OK['san_hosts'][0], port=CONFIG_OK['san_api_port'], tname=tname, addr=addr) msg = "volume name {lun} is not attached to target {target}" msg = msg.format(lun=vname, target=tname) err = {"class": "opene.exceptions.ItemNotFoundError", "message": msg, "url": url} resp = {'data': None, 'error': err, 'code': 404} jrest.rproxy.pool_request.return_value = resp is_target_lun_expected += [mock.call('GET', addr)] self.assertEqual(False, jrest.is_target_lun(tname, vname)) err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp is_target_lun_expected += [mock.call('GET', addr)] self.assertRaises(jexc.JDSSException, jrest.is_target_lun, tname, vname) jrest.rproxy.pool_request.assert_has_calls(is_target_lun_expected) def test_attach_target_vol(self): jrest, ctx = self.get_rest(CONFIG_OK) # attach ok tname = CONFIG_OK['target_prefix'] + UUID_1 vname = jcom.vname(UUID_1) addr = '/san/iscsi/targets/{}/luns'.format(tname) jbody = {"name": vname, "lun": 0} data = {"block_size": 512, "device_handler": "vdisk_fileio", "lun": 0, "mode": "wt", "name": vname, "prod_id": "Storage", "scsi_id": "99e2c883331edf87"} resp = {'data': data, 'error': None, 'code': 201} jrest.rproxy.pool_request.return_value = resp attach_target_vol_expected = [ mock.call('POST', addr, json_data=jbody)] self.assertIsNone(jrest.attach_target_vol(tname, vname)) # attach with mode and lun jrest, ctx = self.get_rest(CONFIG_OK) tname = CONFIG_OK['target_prefix'] + UUID_1 vname = jcom.vname(UUID_1) addr = '/san/iscsi/targets/{}/luns'.format(tname) jbody = {"name": vname, "lun": 1, "mode": 'ro'} data = {"block_size": 512, "device_handler": "vdisk_fileio", "lun": 0, "mode": "ro", "name": vname, "prod_id": "Storage", "scsi_id": "99e2c883331edf87"} resp = {'data': data, 'error': None, 'code': 201} jrest.rproxy.pool_request.return_value = resp attach_target_vol_expected = [ mock.call('POST', addr, json_data=jbody)] self.assertIsNone(jrest.attach_target_vol(tname, vname, lun_id=1, mode='ro')) jrest.rproxy.pool_request.assert_has_calls(attach_target_vol_expected) # lun attached already jrest, ctx = self.get_rest(CONFIG_OK) jbody = {"name": vname, "lun": 0} url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) msg = 'Volume /dev/Pool-0/{} is already used.'.format(vname) err = {"class": "opene.exceptions.ItemConflictError", "message": msg, "url": url} resp = {'data': None, 'error': err, 'code': 409} jrest.rproxy.pool_request.return_value = resp attach_target_vol_expected += [ mock.call('POST', addr, json_data=jbody)] self.assertRaises(jexc.JDSSResourceExistsException, jrest.attach_target_vol, tname, vname) # no such target jrest, ctx = self.get_rest(CONFIG_OK) jbody = {"name": vname, "lun": 0} url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) msg = 'Target {} not exists.'.format(vname) err = {"class": "opene.exceptions.ItemNotFoundError", "message": msg, "url": url} resp = {'data': None, 'error': err, 'code': 404} jrest.rproxy.pool_request.return_value = resp attach_target_vol_expected = [ mock.call('POST', addr, json_data=jbody)] self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.attach_target_vol, tname, vname) # error unknown jrest, ctx = self.get_rest(CONFIG_OK) jbody = {"name": vname, "lun": 0} url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) msg = 'Target {} not exists.'.format(vname) err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp attach_target_vol_expected = [ mock.call('POST', addr, json_data=jbody)] self.assertRaises(jexc.JDSSException, jrest.attach_target_vol, tname, vname) jrest.rproxy.pool_request.assert_has_calls(attach_target_vol_expected) # error incorrect mode jrest, ctx = self.get_rest(CONFIG_OK) jbody = {"name": vname, "lun": 0} url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) attach_target_vol_expected = [ mock.call('POST', addr, json_data=jbody)] self.assertRaises(jexc.JDSSException, jrest.attach_target_vol, tname, vname, mode='bad') jrest.rproxy.pool_request.assert_not_called() def test_detach_target_vol(self): jrest, ctx = self.get_rest(CONFIG_OK) # detach target vol ok tname = CONFIG_OK['target_prefix'] + UUID_1 vname = jcom.vname(UUID_1) addr = '/san/iscsi/targets/{tar}/luns/{vol}'.format( tar=tname, vol=vname) resp = {'data': None, 'error': None, 'code': 204} jrest.rproxy.pool_request.return_value = resp detach_target_vol_expected = [ mock.call('DELETE', addr)] self.assertIsNone(jrest.detach_target_vol(tname, vname)) # no such target url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) msg = 'Target {} not exists.'.format(vname) err = {"class": "opene.exceptions.ItemNotFoundError", "message": msg, "url": url} resp = {'data': None, 'error': err, 'code': 404} jrest.rproxy.pool_request.return_value = resp detach_target_vol_expected += [ mock.call('DELETE', addr)] self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.detach_target_vol, tname, vname) # error unknown url = 'http://85.14.118.246:11582/api/v3/pools/Pool-0/{}'.format(addr) msg = 'Target {} not exists.'.format(vname) err = {"class": "some test error", "message": "test error message", "url": url, "errno": 125} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp detach_target_vol_expected += [ mock.call('DELETE', addr)] self.assertRaises(jexc.JDSSException, jrest.detach_target_vol, tname, vname) jrest.rproxy.pool_request.assert_has_calls(detach_target_vol_expected) def test_create_snapshot(self): jrest, ctx = self.get_rest(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) data = {'name': jcom.sname(UUID_S2, UUID_1)} resp = {'data': data, 'error': None, 'code': 201} jrest.rproxy.pool_request.return_value = resp self.assertIsNone(jrest.create_snapshot(vname, sname)) def test_create_snapshot_exception(self): jrest, ctx = self.get_rest(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) addr = '/volumes/{vol}/snapshots'.format(vol=vname) req = {'snapshot_name': sname} url = ('http://192.168.0.2:82/api/v3/pools/Pool-0/volumes/{vol}/' 'snapshots').format(vol=UUID_1) resp = {'data': None, 'error': { 'class': "zfslib.zfsapi.resources.ZfsResourceError", 'errno': 1, 'message': ('Zfs resource: Pool-0/{vol} not found in ' 'this collection.'.format(vol=vname)), "url": url}, 'code': 500} jrest.rproxy.pool_request.return_value = resp create_snapshot_expected = [ mock.call('POST', addr, json_data=req)] self.assertRaises(jexc.JDSSVolumeNotFoundException, jrest.create_snapshot, vname, sname) # snapshot exists resp = {'data': None, 'error': { 'class': "zfslib.zfsapi.resources.ZfsResourceError", 'errno': 5, 'message': 'Resource Pool-0/{vol}@{snap} already exists.', 'url': url}, 'code': 500} jrest.rproxy.pool_request.return_value = resp create_snapshot_expected += [mock.call('POST', addr, json_data=req)] self.assertRaises(jexc.JDSSSnapshotExistsException, jrest.create_snapshot, vname, sname) # error unknown err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp create_snapshot_expected += [mock.call('POST', addr, json_data=req)] self.assertRaises(jexc.JDSSException, jrest.create_snapshot, vname, sname) jrest.rproxy.pool_request.assert_has_calls(create_snapshot_expected) def test_create_volume_from_snapshot(self): jrest, ctx = self.get_rest(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S1, UUID_1) cname = jcom.vname(UUID_3) addr = '/volumes/{vol}/clone'.format(vol=vname) jbody = { 'name': cname, 'snapshot': sname, 'sparse': False } data = { "origin": "Pool-0/{vol}@{snap}".format(vol=vname, snap=sname), "is_clone": True, "full_name": "Pool-0/{}".format(cname), "name": cname } resp = {'data': data, 'error': None, 'code': 201} jrest.rproxy.pool_request.return_value = resp create_volume_from_snapshot_expected = [ mock.call('POST', addr, json_data=jbody)] self.assertIsNone(jrest.create_volume_from_snapshot(cname, sname, vname)) jrest.rproxy.pool_request.assert_has_calls( create_volume_from_snapshot_expected) def test_create_volume_from_snapshot_exception(self): jrest, ctx = self.get_rest(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S2, UUID_2) cname = jcom.vname(UUID_3) addr = '/volumes/{vol}/clone'.format(vol=vname) jbody = { 'name': cname, 'snapshot': sname, 'sparse': False } # volume DNE url = ('http://192.168.0.2:82/api/v3/pools/Pool-0/volumes/{vol}/' 'clone').format(vol=UUID_1) resp = {'data': None, 'error': { 'class': "zfslib.zfsapi.resources.ZfsResourceError", 'errno': 1, 'message': ('Zfs resource: Pool-0/{vol} not found in ' 'this collection.'.format(vol=vname)), "url": url}, 'code': 500} jrest.rproxy.pool_request.return_value = resp create_volume_from_snapshot_expected = [ mock.call('POST', addr, json_data=jbody)] self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.create_volume_from_snapshot, cname, sname, vname) # clone exists resp = {'data': None, 'error': { "class": "zfslib.wrap.zfs.ZfsCmdError", "errno": 100, "message": ("cannot create 'Pool-0/{}': " "dataset already exists").format(vname), 'url': url}, 'code': 500} jrest.rproxy.pool_request.return_value = resp create_volume_from_snapshot_expected += [ mock.call('POST', addr, json_data=jbody)] self.assertRaises(jexc.JDSSResourceExistsException, jrest.create_volume_from_snapshot, cname, sname, vname) # error unknown err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp create_volume_from_snapshot_expected += [ mock.call('POST', addr, json_data=jbody)] self.assertRaises(jexc.JDSSException, jrest.create_volume_from_snapshot, cname, sname, vname) jrest.rproxy.pool_request.assert_has_calls( create_volume_from_snapshot_expected) def test_rollback_volume_to_snapshot(self): jrest, ctx = self.get_rest(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S2, UUID_2) req = ('/volumes/{vol}/snapshots/' '{snap}/rollback').format(vol=vname, snap=sname) resp = {'data': None, 'error': None, 'code': 200} jrest.rproxy.pool_request.return_value = resp rollback_volume_to_snapshot_expected = [ mock.call('POST', req)] self.assertIsNone(jrest.rollback_volume_to_snapshot(vname, sname)) jrest.rproxy.pool_request.assert_has_calls( rollback_volume_to_snapshot_expected) def test_rollback_volume_to_snapshot_exception(self): jrest, ctx = self.get_rest(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S2, UUID_2) req = ('/volumes/{vol}/snapshots/' '{snap}/rollback').format(vol=vname, snap=sname) # volume DNE msg = ('Zfs resource: Pool-0/{vname}' ' not found in this collection.').format(vname=vname) url = ('http://192.168.0.2:82/api/v3/pools/Pool-0/volumes/{vol}/' 'snapshots/{snap}/rollback').format(vol=vname, snap=sname) err = {"class": "zfslib.zfsapi.resources.ZfsResourceError", "message": msg, "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp rollback_volume_to_snapshot_expected = [ mock.call('POST', req)] self.assertRaises(jexc.JDSSException, jrest.rollback_volume_to_snapshot, vname, sname) jrest.rproxy.pool_request.assert_has_calls( rollback_volume_to_snapshot_expected) # error unknown err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp rollback_volume_to_snapshot_expected += [ mock.call('POST', req)] self.assertRaises(jexc.JDSSException, jrest.rollback_volume_to_snapshot, vname, sname) jrest.rproxy.pool_request.assert_has_calls( rollback_volume_to_snapshot_expected) def test_delete_snapshot(self): jrest, ctx = self.get_rest(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S2, UUID_2) addr = '/volumes/{vol}/snapshots/{snap}'.format(vol=vname, snap=sname) jbody = { 'recursively_children': True, 'force_umount': True } resp = {'data': None, 'error': None, 'code': 204} jrest.rproxy.pool_request.return_value = resp delete_snapshot_expected = [mock.call('DELETE', addr, json_data={})] self.assertIsNone(jrest.delete_snapshot(vname, sname)) delete_snapshot_expected += [ mock.call('DELETE', addr, json_data=jbody)] self.assertIsNone(jrest.delete_snapshot(vname, sname, recursively_children=True, force_umount=True)) jrest.rproxy.pool_request.assert_has_calls(delete_snapshot_expected) def test_delete_snapshot_exception(self): jrest, ctx = self.get_rest(CONFIG_OK) vname = jcom.vname(UUID_1) sname = jcom.sname(UUID_S2, UUID_1) cname = jcom.sname(UUID_S3, UUID_1) addr = '/volumes/{vol}/snapshots/{snap}'.format(vol=vname, snap=sname) # snapshot busy url = ('http://192.168.0.2:82/api/v3/pools/Pool-0/volumes/{vol}/' 'snapshots/{snap}').format(vol=vname, snap=sname) msg = ('cannot destroy "Pool-0/{vol}@{snap}": snapshot has dependent ' 'clones use "-R" to destroy the following datasets: ' 'Pool-0/{clone}').format(vol=vname, snap=sname, clone=cname) err = {'class': 'zfslib.wrap.zfs.ZfsCmdError', 'message': msg, 'url': url, 'errno': 1000} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp delete_snapshot_expected = [ mock.call('DELETE', addr, json_data={})] self.assertRaises(jexc.JDSSSnapshotIsBusyException, jrest.delete_snapshot, vname, sname) # error unknown err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp delete_snapshot_expected += [mock.call('DELETE', addr, json_data={})] self.assertRaises(jexc.JDSSException, jrest.delete_snapshot, vname, sname) jrest.rproxy.pool_request.assert_has_calls(delete_snapshot_expected) def test_get_snapshots(self): jrest, ctx = self.get_rest(CONFIG_OK) vname = jcom.vname(UUID_1) addr = '/volumes/{vol}/snapshots'.format(vol=vname) data = {"results": 2, "entries": {"referenced": "65536", "name": jcom.sname(UUID_S1, UUID_1), "defer_destroy": "off", "userrefs": "0", "primarycache": "all", "type": "snapshot", "creation": "2015-5-27 16:8:35", "refcompressratio": "1.00x", "compressratio": "1.00x", "written": "65536", "used": "0", "clones": "", "mlslabel": "none", "secondarycache": "all"}} resp = {'data': data, 'error': None, 'code': 200} jrest.rproxy.pool_request.return_value = resp get_snapshots_expected = [mock.call('GET', addr)] self.assertEqual(data['entries'], jrest.get_snapshots(vname)) jrest.rproxy.pool_request.assert_has_calls(get_snapshots_expected) def test_get_snapshots_exception(self): jrest, ctx = self.get_rest(CONFIG_OK) vname = jcom.vname(UUID_1) addr = '/volumes/{vol}/snapshots'.format(vol=vname) url = ('http://192.168.0.2:82/api/v3/pools/Pool-0/volumes/{vol}/' 'snapshots').format(vol=vname) err = {"class": "zfslib.zfsapi.resources.ZfsResourceError", "message": ('Zfs resource: Pool-0/{vol} not found in ' 'this collection.').format(vol=vname), "url": url, "errno": 1} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp get_snapshots_expected = [mock.call('GET', addr)] self.assertRaises(jexc.JDSSResourceNotFoundException, jrest.get_snapshots, vname) # error unknown err = {"class": "some test error", "message": "test error message", "url": url, "errno": 123} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp get_snapshots_expected += [ mock.call('GET', addr)] self.assertRaises(jexc.JDSSException, jrest.get_snapshots, vname) jrest.rproxy.pool_request.assert_has_calls(get_snapshots_expected) def test_get_pool_stats(self): jrest, ctx = self.get_rest(CONFIG_OK) addr = '' data = {"available": "950040707072", "status": 26, "name": "Pool-0", "scan": None, "encryption": {"enabled": False}, "iostats": { "read": "0", "write": "0", "chksum": "0"}, "vdevs": [{"name": "wwn-0x5000cca3a8cddb2f", "iostats": {"read": "0", "write": "0", "chksum": "0"}, "disks": [{"origin": "local", "led": "off", "name": "sdc", "iostats": {"read": "0", "write": "0", "chksum": "0"}, "health": "ONLINE", "sn": "JPW9K0N20ZGXWE", "path": None, "model": "Hitachi HUA72201", "id": "wwn-0x5000cca3a8cddb2f", "size": 1000204886016}], "health": "ONLINE", "vdev_replacings": [], "vdev_spares": [], "type": ""}], "health": "ONLINE", "operation": "none", "id": "12413634663904564349", "size": "996432412672"} resp = {'data': data, 'error': None, 'code': 200} jrest.rproxy.pool_request.return_value = resp get_pool_stats_expected = [mock.call('GET', addr)] self.assertEqual(data, jrest.get_pool_stats()) jrest.rproxy.pool_request.assert_has_calls(get_pool_stats_expected) def test_get_pool_stats_exception(self): jrest, ctx = self.get_rest(CONFIG_OK) addr = '' url = 'http://192.168.0.2:82/api/v3/pools/Pool-0/' err = {'class': 'zfslib.zfsapi.zpool.ZpoolError', 'message': "Given zpool 'Pool-0' doesn't exists.", "url": url, "errno": 1} resp = {'data': None, 'error': err, 'code': 500} jrest.rproxy.pool_request.return_value = resp get_pool_stats_expected = [mock.call('GET', addr)] self.assertRaises(jexc.JDSSException, jrest.get_pool_stats) jrest.rproxy.pool_request.assert_has_calls(get_pool_stats_expected) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/open_e/test_rest_proxy.py0000664000175000017500000002367200000000000026547 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from unittest import mock import requests from cinder import exception from cinder.tests.unit import test from cinder.volume.drivers.open_e.jovian_common import exception as jexc from cinder.volume.drivers.open_e.jovian_common import rest_proxy UUID_1 = '12345678-1234-1234-1234-000000000001' UUID_2 = '12345678-1234-1234-1234-000000000002' UUID_3 = '12345678-1234-1234-1234-000000000003' CONFIG_OK = { 'san_hosts': ['192.168.0.2'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'driver_ssl_cert_verify': True, 'driver_ssl_cert_path': '/etc/cinder/joviandss.crt', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'san_login': 'admin', 'san_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'jovian_block_size': '128K' } CONFIG_BAD_IP = { 'san_hosts': ['asd'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'driver_ssl_cert_verify': True, 'driver_ssl_cert_path': '/etc/cinder/joviandss.crt', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'san_login': 'admin', 'san_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'jovian_block_size': '128K' } CONFIG_MULTIHOST = { 'san_hosts': ['192.168.0.2', '192.168.0.3', '192.168.0.4'], 'san_api_port': 82, 'driver_use_ssl': 'true', 'driver_ssl_cert_verify': True, 'driver_ssl_cert_path': '/etc/cinder/joviandss.crt', 'jovian_rest_send_repeats': 3, 'jovian_recovery_delay': 60, 'san_login': 'admin', 'san_password': 'password', 'jovian_ignore_tpath': [], 'target_port': 3260, 'jovian_pool': 'Pool-0', 'target_prefix': 'iqn.2020-04.com.open-e.cinder:', 'chap_password_len': 12, 'san_thin_provision': False, 'jovian_block_size': '128K' } class TestOpenEJovianRESTProxy(test.TestCase): def start_patches(self, patches): for p in patches: p.start() def stop_patches(self, patches): for p in patches: p.stop() def test_init(self): self.assertRaises(exception.InvalidConfigurationValue, rest_proxy.JovianDSSRESTProxy, CONFIG_BAD_IP) def test_get_base_url(self): proxy = rest_proxy.JovianDSSRESTProxy(CONFIG_OK) url = proxy._get_base_url() exp = '{proto}://{host}:{port}/api/v3'.format( proto='https', host='192.168.0.2', port='82') self.assertEqual(exp, url) def test_next_host(self): proxy = rest_proxy.JovianDSSRESTProxy(CONFIG_MULTIHOST) self.assertEqual(0, proxy.active_host) proxy._next_host() self.assertEqual(1, proxy.active_host) proxy._next_host() self.assertEqual(2, proxy.active_host) proxy._next_host() self.assertEqual(0, proxy.active_host) def test_request(self): proxy = rest_proxy.JovianDSSRESTProxy(CONFIG_MULTIHOST) patches = [ mock.patch.object(requests, "Request", return_value="request"), mock.patch.object(proxy.session, "prepare_request", return_value="out_data"), mock.patch.object(proxy, "_send", return_value="out_data")] addr = 'https://192.168.0.2:82/api/v3/pools/Pool-0' self.start_patches(patches) proxy.request('GET', '/pools/Pool-0') requests.Request.assert_called_once_with('GET', addr) self.stop_patches(patches) def test_request_host_failure(self): proxy = rest_proxy.JovianDSSRESTProxy(CONFIG_MULTIHOST) patches = [ mock.patch.object(requests, "Request", return_value="request"), mock.patch.object(proxy.session, "prepare_request", return_value="out_data"), mock.patch.object(proxy, "_send", return_value="out_data")] request_expected = [ mock.call('GET', 'https://192.168.0.2:82/api/v3/pools/Pool-0'), mock.call('GET', 'https://192.168.0.3:82/api/v3/pools/Pool-0'), mock.call('GET', 'https://192.168.0.4:82/api/v3/pools/Pool-0')] self.start_patches(patches) proxy._send.side_effect = [ requests.exceptions.ConnectionError(), requests.exceptions.ConnectionError(), "out_data"] proxy.request('GET', '/pools/Pool-0') self.assertEqual(2, proxy.active_host) requests.Request.assert_has_calls(request_expected) self.stop_patches(patches) def test_pool_request(self): proxy = rest_proxy.JovianDSSRESTProxy(CONFIG_OK) patches = [mock.patch.object(proxy, "request")] req = '/pools/Pool-0/volumes' self.start_patches(patches) proxy.pool_request('GET', '/volumes') proxy.request.assert_called_once_with('GET', req, json_data=None) self.stop_patches(patches) def test_send(self): proxy = rest_proxy.JovianDSSRESTProxy(CONFIG_MULTIHOST) json_data = {"data": [{"available": "949998694400", "status": 26, "name": "Pool-0", "scan": None, "encryption": {"enabled": False}, "iostats": {"read": "0", "write": "0", "chksum": "0"}, "vdevs": [{}], "health": "ONLINE", "operation": "none", "id": "12413634663904564349", "size": "996432412672"}], "error": None} session_ret = mock.Mock() session_ret.text = json.dumps(json_data) session_ret.status_code = 200 patches = [mock.patch.object(proxy.session, "send", return_value=session_ret)] pr = 'prepared_request' self.start_patches(patches) ret = proxy._send(pr) proxy.session.send.assert_called_once_with(pr) self.assertEqual(0, proxy.active_host) self.assertEqual(200, ret['code']) self.assertEqual(json_data['data'], ret['data']) self.assertEqual(json_data['error'], ret['error']) self.stop_patches(patches) def test_request_host_change(self): proxy = rest_proxy.JovianDSSRESTProxy(CONFIG_MULTIHOST) patches = [ mock.patch.object(requests, "Request", return_value="request"), mock.patch.object(proxy.session, "prepare_request", return_value="out_data"), mock.patch.object(proxy, "_send", return_value="out_data")] request_expected = [ mock.call('GET', 'https://192.168.0.2:82/api/v3/pools/Pool-0'), mock.call('GET', 'https://192.168.0.3:82/api/v3/pools/Pool-0'), mock.call('GET', 'https://192.168.0.4:82/api/v3/pools/Pool-0'), mock.call('GET', 'https://192.168.0.2:82/api/v3/pools/Pool-0')] self.start_patches(patches) proxy._send.side_effect = [ requests.exceptions.ConnectionError(), requests.exceptions.ConnectionError(), requests.exceptions.ConnectionError(), "out_data"] proxy.request('GET', '/pools/Pool-0') self.assertEqual(0, proxy.active_host) requests.Request.assert_has_calls(request_expected) self.stop_patches(patches) def test_send_jsondecode_error(self): proxy = rest_proxy.JovianDSSRESTProxy(CONFIG_MULTIHOST) session_ret = mock.Mock() session_ret.text = "{ some-bad-json" session_ret.status_code = 200 patches = [mock.patch.object(proxy.session, "send")] pr = 'prepared_request' self.start_patches(patches) side_effect = [session_ret] * 3 proxy.session.send.side_effect = side_effect send_expected = [mock.call(pr)] * 3 self.assertRaises(json.JSONDecodeError, proxy._send, pr) proxy.session.send.assert_has_calls(send_expected) def test_handle_500(self): error = {"class": "exceptions.OSError", "errno": 17, "message": ""} json_data = {"data": None, "error": error} session_ret = mock.Mock() session_ret.text = json.dumps(json_data) session_ret.status_code = 500 self.assertRaises(jexc.JDSSOSException, rest_proxy.JovianDSSRESTProxy._handle_500, session_ret) session_ret.status_code = 200 json_data = {"data": None, "error": None} session_ret.text = json.dumps(json_data) self.assertIsNone( rest_proxy.JovianDSSRESTProxy._handle_500(session_ret)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3031204 cinder-27.0.0/cinder/tests/unit/volume/drivers/sandstone/0000775000175000017500000000000000000000000023437 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/sandstone/__init__.py0000664000175000017500000000000000000000000025536 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/sandstone/test_sds_client.py0000664000175000017500000006464100000000000027212 0ustar00zuulzuul00000000000000# Copyright (c) 2019 SandStone data Technologies Co., Ltd # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unittest for sds_client.""" import json from unittest import mock import requests from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.sandstone import test_utils from cinder.volume.drivers.sandstone import sds_client class FakeSession(test_utils.FakeBaseSession): """Fake request session.""" method_map = { 'post': { 'capacity': {'data': {'capacity_bytes': 1024, 'free_bytes': 1024}}, 'pool/list': {'data': [{'status': {'progress': 100}, 'pool_name': 'fake_pool', 'realname': 'fake_pool', 'storage_policy': 'fake_replicate', 'domain_name': 'fake_domain', 'pool_id': 3, 'policy_type': 'replicated', 'size': 2}]}, 'resource/initiator/list': {'data': { 'results': [{'iqn': 'fake_iqn', 'type': 'iscsi'}]}}, 'resource/target/get_target_acl_list': {'data': { 'results': [{'autodiscovery': 'yes', 'name': 'fake_iqn', 'approved': 'yes', 'manual': 'no', 'ip': ''}]}}, 'block/gateway/server/list': {'data': [{ 'networks': [{'hostid': 'node0001', 'address': '1.1.1.1', 'type': 'iSCSI'}]}]}, 'resource/target/list': {'data': { 'results': [{'status': 'fake_state', 'node': ['node0001'], 'name': 'fake_target', 'type': 'iSCSI', 'gateway': [{ 'hostid': 'node0001', 'networks': [{ 'hostid': 'node0001', 'type': 'iSCSI', 'address': 'fake_address'}], 'hostip': 'fake_hostip'}]}]}}, 'resource/target/get_chap_list': {'data': [{ 'user': 'fake_chapuser', 'level': 'level1'}]}, 'resource/target/get_luns': {'data': { 'results': [{'lid': 1, 'name': 'fake_lun', 'pool_id': 1}]}}, 'resource/lun/list': {'data': { 'results': [{'volumeName': 'fake_lun', 'pool_id': 1, 'capacity_bytes': 1024}]}}, 'delaytask/list': {'data': { 'results': [{'status': 'completed', 'run_status': 'completed', 'executor': 'LunFlatten', 'progress': 100, 'parameter': {'pool_id': 1, 'lun_name': 'fake_lun'}}]}}, 'resource/snapshot/list': {'data': { 'results': [{'snapName': 'fake_snapshot', 'lunName': 'fake_lun'}]}}, } } class TestSdsclient(test.TestCase): """Testcase sds client.""" def setUp(self): """Setup.""" super(TestSdsclient, self).setUp() self.mock_object(requests, 'Session', FakeSession) self.client = sds_client.RestCmd('192.168.200.100', 'fake_user', 'fake_password', True) self.client.login() def test_login(self): """Test login and check headers.""" self.assertEqual('https://192.168.200.100', self.client.session.headers['Referer']) self.assertEqual('fake_token', self.client.session.headers['X-XSRF-Token']) self.assertEqual('XSRF-TOKEN=fake_token; username=fake_user; ' 'sdsom_sessionid=fake_session', self.client.session.headers['Cookie']) def test_logout(self): """Test logout.""" retval = self.client.logout() self.assertIsNone(retval) def test_query_capacity_info(self): """Test query cluster capacity.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_capacity_info() mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'capacity') self.assertDictEqual({'capacity_bytes': 1024, 'free_bytes': 1024}, retval) def test_query_pool_info(self): """Test query pool status.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_pool_info() mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'pool/list') self.assertListEqual([{'status': {'progress': 100}, 'realname': 'fake_pool', 'pool_name': 'fake_pool', 'storage_policy': 'fake_replicate', 'domain_name': 'fake_domain', 'pool_id': 3, 'policy_type': 'replicated', 'size': 2}], retval) def test_create_initiator(self): """Test create initiator.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.create_initiator( initiator_name='fake_iqn') data = json.dumps( {'iqn': 'fake_iqn', 'type': 'iSCSI', 'remark': 'Cinder iSCSI'}) mocker.assert_called_with( 'https://192.168.200.100/api/storage/' 'resource/initiator/create', data=data) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, "_judge_delaytask_status") def test_add_initiator_to_target(self, mock__judge_delaytask_status): """Test add initiator to target.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: mock__judge_delaytask_status.return_value = None retval = self.client.add_initiator_to_target( target_name='fake_target', initiator_name='fake_iqn') data = json.dumps( {'targetName': 'fake_target', 'iqns': [{'ip': '', 'iqn': 'fake_iqn'}]}) mocker.assert_called_with( 'https://192.168.200.100/api/storage/' 'resource/target/add_initiator_to_target', data=data) self.assertIsNone(retval) def test_query_initiator_by_name(self): """Test query initiator exist or not.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_initiator_by_name( initiator_name='fake_iqn') data = json.dumps( {'initiatorMark': '', 'pageno': 1, 'pagesize': 1000, 'type': 'iSCSI'}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/initiator/list', data=data) self.assertDictEqual({'iqn': 'fake_iqn', 'type': 'iscsi'}, retval) def test_query_target_initiatoracl(self): """Test query target related initiator info.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_target_initiatoracl( target_name='fake_target', initiator_name='fake_iqn') data = json.dumps( {'pageno': 1, 'pagesize': 1000, 'targetName': 'fake_target'}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/target/get_target_acl_list', data=data) self.assertListEqual([{'autodiscovery': 'yes', 'name': 'fake_iqn', 'approved': 'yes', 'manual': 'no', 'ip': ''}], retval) def test_query_node_by_targetips(self): """Test query node id and node ip, relation dict.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_node_by_targetips( target_ips=['1.1.1.1']) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'block/gateway/server/list') self.assertDictEqual({'1.1.1.1': 'node0001'}, retval) def test_query_target_by_name(self): """Test query target exist or not.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_target_by_name( target_name='fake_target') data = json.dumps( {'pageno': 1, 'pagesize': 1000, "thirdParty": [0, 1], "targetMark": ""}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/target/list', data=data) self.assertDictEqual({ 'status': 'fake_state', 'node': ['node0001'], 'name': 'fake_target', 'type': 'iSCSI', 'gateway': [{'hostid': 'node0001', 'networks': [{'hostid': 'node0001', 'type': 'iSCSI', 'address': 'fake_address'}], 'hostip': 'fake_hostip'}]}, retval) def test_create_target(self): """Test create target.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.create_target(target_name='fake_target', targetip_to_hostid= {'1.1.1.1': 'node0001', '1.1.1.2': 'node0002', '1.1.1.3': 'node0003'}) tip_to_hid = {'1.1.1.1': 'node0001', '1.1.1.2': 'node0002', '1.1.1.3': 'node0003'} data = json.dumps( {"type": "iSCSI", "readOnly": 0, "thirdParty": 1, "targetName": "fake_target", "networks": [{"hostid": host_id, "address": address} for address, host_id in tip_to_hid.items()]}) mocker.assert_called_with( 'https://192.168.200.100/api/storage/' 'resource/target/create', data=data) self.assertIsNone(retval) def test_add_chap_by_target(self): """Test add chap to target.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.add_chap_by_target( target_name='fake_target', username='fake_chapuser', password='fake_chappassword') data = json.dumps( {"password": "fake_chappassword", "user": "fake_chapuser", "targetName": "fake_target"}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/target/add_chap', data=data) self.assertIsNone(retval) def test_query_chapinfo_by_target(self): """Test query target chap info.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_chapinfo_by_target( target_name='fake_target', username='fake_chapuser') data = json.dumps({"targetName": "fake_target"}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/target/get_chap_list', data=data) self.assertDictEqual({'user': 'fake_chapuser', 'level': 'level1'}, retval) def test_create_lun(self): """Test create lun.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.create_lun(capacity_bytes=1024, poolid=1, volume_name='fake_lun') data = json.dumps({"capacity_bytes": 1024, "poolId": 1, "priority": "normal", "qosSettings": {}, "volumeName": 'fake_lun'}) mocker.assert_called_with( 'https://192.168.200.100/api/storage/' 'resource/lun/add', data=data) self.assertIsNone(retval) def test_delete_lun(self): """Test delete lun.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.delete_lun(poolid=1, volume_name='fake_lun') data = json.dumps({"delayTime": 0, "volumeNameList": [{ "poolId": 1, "volumeName": "fake_lun"}]}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/lun/batch_delete', data=data) self.assertIsNone(retval) def test_extend_lun(self): """Test resize lun.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.extend_lun(capacity_bytes=2048, poolid=1, volume_name='fake_lun') data = json.dumps({"capacity_bytes": 2048, "poolId": 1, "volumeName": 'fake_lun'}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/lun/resize', data=data) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, "_judge_delaytask_status") @mock.patch.object(sds_client.RestCmd, "query_lun_by_name") def test_unmap_lun(self, mock_query_lun_by_name, mock__judge_delaytask_status): """Test unmap lun from target.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: mock__judge_delaytask_status.return_value = None lun_uuid = "c5c8533c-4ce0-11ea-bc01-005056a736f8" mock_query_lun_by_name.return_value = {'uuid': lun_uuid} retval = self.client.unmap_lun(target_name='fake_target', poolid=1, volume_name='fake_lun', pool_name='fake_pool') data = json.dumps({"targetName": "fake_target", "targetLunList": [lun_uuid], "targetSnapList": []}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/target/unmap_luns', data=data) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, "_judge_delaytask_status") @mock.patch.object(sds_client.RestCmd, "query_lun_by_name") def test_mapping_lun(self, mock_query_lun_by_name, mock__judge_delaytask_status): """Test map lun to target.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: mock__judge_delaytask_status.return_value = None lun_uuid = "c5c8533c-4ce0-11ea-bc01-005056a736f8" mock_query_lun_by_name.return_value = {'uuid': lun_uuid} retval = self.client.mapping_lun( target_name='fake_target', poolid=1, volume_name='fake_lun', pool_name='fake_pool') data = json.dumps( {"targetName": 'fake_target', "targetLunList": [lun_uuid], "targetSnapList": []}) mocker.assert_called_with( 'https://192.168.200.100/api/storage/' 'resource/target/map_luns', data=data) self.assertIsNone(retval) def test_query_target_lunacl(self): """Test query target related lun info.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_target_lunacl(target_name='fake_target', poolid=1, volume_name='fake_lun') data = json.dumps({"pageno": 1, "pagesize": 1000, "pools": [1], "targetName": "fake_target"}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/target/get_luns', data=data) self.assertEqual(1, retval) def test_query_lun_by_name(self): """Test query lun exist or not.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_lun_by_name( volume_name='fake_lun', poolid=1) data = json.dumps( {"pageno": 1, "pagesize": 1000, "volumeMark": "fake_lun", "sortType": "time", "sortOrder": "desc", "pools": [1], "thirdParty": [0, 1]}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/lun/list', data=data) self.assertDictEqual({'volumeName': 'fake_lun', 'pool_id': 1, 'capacity_bytes': 1024}, retval) @mock.patch.object(sds_client.RestCmd, "_judge_delaytask_status") def test_create_snapshot(self, mock__judge_delaytask_status): """Test create snapshot.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: mock__judge_delaytask_status.return_value = None retval = self.client.create_snapshot(poolid=1, volume_name='fake_lun', snapshot_name='fake_snapshot') data = json.dumps( {"lunName": "fake_lun", "poolId": 1, "remark": "Cinder iSCSI snapshot.", "snapName": "fake_snapshot"}) mocker.assert_called_with( 'https://192.168.200.100/api/storage/' 'resource/snapshot/add', data=data) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, "_judge_delaytask_status") def test_delete_snapshot(self, mock__judge_delaytask_status): """Test delete snapshot.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: mock__judge_delaytask_status.return_value = None retval = self.client.delete_snapshot(poolid=1, volume_name='fake_lun', snapshot_name='fake_snapshot') data = json.dumps( {"lunName": "fake_lun", "poolId": 1, "snapName": "fake_snapshot"}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/snapshot/delete', data=data) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, "flatten_lun") @mock.patch.object(sds_client.RestCmd, "_judge_delaytask_status") def test_create_lun_from_snapshot(self, mock__judge_delaytask_status, mock_flatten_lun): """Test create lun from snapshot.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: mock__judge_delaytask_status.return_value = None mock_flatten_lun.return_value = None retval = self.client.create_lun_from_snapshot( snapshot_name='fake_snapshot', src_volume_name='fake_src_lun', poolid=1, dst_volume_name='fake_dst_lun') data = json.dumps( {"snapshot": {"poolId": 1, "lunName": "fake_src_lun", "snapName": "fake_snapshot"}, "cloneLun": {"lunName": "fake_dst_lun", "poolId": 1}}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/snapshot/clone', data=data) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, "_judge_delaytask_status") def test_flatten_lun(self, mock__judge_delaytask_status): """Test flatten lun.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: mock__judge_delaytask_status.return_value = None retval = self.client.flatten_lun(volume_name='fake_lun', poolid=1) data = json.dumps( {"poolId": 1, "volumeName": "fake_lun"}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/lun/flatten', data=data) self.assertIsNone(retval) def test_query_flatten_lun_process(self): """Test query flatten process.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_flatten_lun_process( poolid=1, volume_name='fake_lun') data = json.dumps({"pageno": 1, "pagesize": 20}) mocker.assert_called_once_with( 'https://192.168.200.100/api/om/' 'delaytask/list', data=data) self.assertDictEqual({'status': 'completed', 'run_status': 'completed', 'executor': 'LunFlatten', 'progress': 100, 'parameter': {'pool_id': 1, 'lun_name': 'fake_lun'}}, retval) @mock.patch.object(sds_client.RestCmd, "create_snapshot") @mock.patch.object(sds_client.RestCmd, "create_lun_from_snapshot") @mock.patch.object(sds_client.RestCmd, "flatten_lun") @mock.patch.object(sds_client.RestCmd, "delete_snapshot") def test_create_lun_from_lun(self, mock_delete_snapshot, mock_flatten_lun, mock_create_lun_from_snapshot, mock_create_snapshot): """Test create clone lun.""" self.client = sds_client.RestCmd( "https://192.168.200.100", "fake_user", "fake_password", True) mock_create_snapshot.return_value = {'success': 1} mock_create_lun_from_snapshot.return_value = {'success': 1} mock_flatten_lun.return_value = {'success': 1} mock_delete_snapshot.return_value = {'success': 1} retval = self.client.create_lun_from_lun( dst_volume_name='fake_dst_lun', poolid=1, src_volume_name='fake_src_lun') self.assertIsNone(retval) def test_query_snapshot_by_name(self): """Test query snapshot exist or not.""" with mock.patch.object(self.client.session, 'post', wraps=self.client.session.post) as mocker: retval = self.client.query_snapshot_by_name( volume_name='fake_lun', poolid=1, snapshot_name='fake_snapshot') data = json.dumps( {"lunName": "fake_lun", "pageno": 1, "pagesize": 1000, "poolId": 1, "snapMark": ""}) mocker.assert_called_once_with( 'https://192.168.200.100/api/storage/' 'resource/snapshot/list', data=data) self.assertListEqual([{'snapName': 'fake_snapshot', 'lunName': 'fake_lun'}], retval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/sandstone/test_sds_driver.py0000664000175000017500000005243000000000000027220 0ustar00zuulzuul00000000000000# Copyright (c) 2019 SandStone data Technologies Co., Ltd # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unittest for sds_client.""" from unittest import mock import uuid import ddt from oslo_utils import units from cinder import exception from cinder import objects from cinder.tests.unit import test from cinder.volume import configuration as config from cinder.volume.drivers.san import san from cinder.volume.drivers.sandstone import sds_client from cinder.volume.drivers.sandstone import sds_driver class FakeSdsBaseDriver(sds_driver.SdsBaseDriver): """Fake sds base driver.""" def __init__(self): """Init conf client pool sds_client.""" self.configuration = config.Configuration(None) self.configuration.append_config_values(sds_driver.sds_opts) self.configuration.append_config_values(san.san_opts) self.configuration.suppress_requests_ssl_warnings = True self.client = None self.poolid = 1 self.VERSION = '1.0' self.address = "192.168.200.100" self.user = "fake_user" self.password = "fake_password" self.pool = "fake_pool_name" self.iscsi_info = {"iqn.1994-05.com.redhat:899c5f9d15d": "1.1.1.1,1.1.1.2,1.1.1.3"} self.default_target_ips = ["1.1.1.1", "1.1.1.2", "1.1.1.3"] self.default_chap_info = "1234567891234,123456789123" @ddt.ddt class TestSdsBaseDriver(test.TestCase): """Testcase sds base driver.""" def setUp(self): """Setup.""" super(TestSdsBaseDriver, self).setUp() self.fake_driver = FakeSdsBaseDriver() self.fake_driver.client = sds_client.RestCmd('192.168.200.100', 'fake_user', 'fake_password', True) # @mock.patch.object(sds_client.RestCmd, 'login') def test_do_setup(self): """Do setup.""" self.fake_driver.client = sds_client.RestCmd( 'fake_rest_ip', 'user', 'password', True) self.fake_driver.configuration.san_ip = 'fake_rest_ip' self.fake_driver.configuration.san_login = 'fake_san_user' self.fake_driver.configuration.san_password = 'fake_san_password' self.fake_driver.do_setup('context') @mock.patch.object(sds_client.RestCmd, 'query_pool_info') @mock.patch.object(sds_client.RestCmd, 'get_poolid_from_poolname') @mock.patch.object(sds_client.RestCmd, 'login') def test_check_for_setup_error(self, mock_login, mock_get_poolid_from_poolname, mock_query_pool_info): """Test pool status health or not.""" result1 = [ {'status': {'progress': 33, 'state': ['degraded'], 'flags': 4}, 'pool_name': 'fake_pool_name', 'used': 1792950890, 'display_name': 'data', 'replicated_size': 2, 'storage_policy': '2', 'domain_name': 'sandstone', 'pool_id': 3, 'min_size': 1, 'erasure_code_profile': '', 'policy_type': 'replicated', 'rule_id': 1, 'size': 2}, {'status': {'progress': 33, 'state': ['degraded'], 'flags': 4}, 'pool_name': 'vms1', 'used': 1792950890, 'display_name': 'data', 'replicated_size': 2, 'storage_policy': '2', 'domain_name': 'sandstone', 'pool_id': 3, 'min_size': 1, 'erasure_code_profile': '', 'policy_type': 'replicated', 'rule_id': 1, 'size': 2}] result2 = [ {'status': {'progress': 33, 'state': ['degraded'], 'flags': 4}, 'pool_name': 'vms', 'used': 1792950890, 'display_name': 'data', 'replicated_size': 2, 'storage_policy': '2', 'domain_name': 'sandstone', 'pool_id': 3, 'min_size': 1, 'erasure_code_profile': '', 'policy_type': 'replicated', 'rule_id': 1, 'size': 2}, {'status': {'progress': 33, 'state': ['degraded'], 'flags': 4}, 'pool_name': 'vms1', 'used': 1792950890, 'display_name': 'data', 'replicated_size': 2, 'storage_policy': '2', 'domain_name': 'sandstone', 'pool_id': 3, 'min_size': 1, 'erasure_code_profile': '', 'policy_type': 'replicated', 'rule_id': 1, 'size': 2}] mock_login.return_value = {"success": 1} mock_get_poolid_from_poolname.return_value = ( {"fake_pool_name": 3}) mock_query_pool_info.return_value = result1 retval = self.fake_driver.check_for_setup_error() self.assertIsNone(retval) mock_query_pool_info.return_value = result2 try: self.fake_driver.check_for_setup_error() except Exception as e: self.assertEqual(exception.InvalidInput, type(e)) @mock.patch.object(sds_client.RestCmd, 'query_capacity_info') def test__update_volume_stats(self, mock_query_capacity_info): """Get cluster capacity.""" result1 = { "capacity_bytes": 2 * units.Gi, "free_bytes": units.Gi } mock_query_capacity_info.return_value = result1 retval = self.fake_driver._update_volume_stats( pool_name="fake_pool_name") self.assertDictEqual( {"pools": [dict( pool_name="fake_pool_name", vendor_name = 'SandStone USP', driver_version = self.fake_driver.VERSION, total_capacity_gb=2.0, free_capacity_gb=1.0, QoS_support=True, thin_provisioning_support=True, multiattach=False,) ]}, retval) mock_query_capacity_info.assert_called_once_with() @mock.patch.object(sds_driver.SdsBaseDriver, 'get_volume_stats') def test_get_volume_stats(self, mock_get_volume_stats): """Get cluster capacitys.""" result1 = {"pool": dict( pool_name="fake_pool_name", total_capacity_gb=2.0, free_capacity_gb=1.0, QoS_support=True, thin_provisioning_support=True, multiattach=False,)} mock_get_volume_stats.return_value = result1 retval = self.fake_driver.get_volume_stats() self.assertDictEqual( {"pool": dict( pool_name="fake_pool_name", total_capacity_gb=2.0, free_capacity_gb=1.0, QoS_support=True, thin_provisioning_support=True, multiattach=False, )}, retval) @mock.patch.object(sds_client.RestCmd, 'create_lun') def test_create_volume(self, mock_create_lun): """Test create volume.""" volume = objects.Volume(_name_id=uuid.uuid4(), size=1) mock_create_lun.return_value = {'success': 1} retval = self.fake_driver.create_volume(volume=volume) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, 'delete_lun') def test_delete_volume(self, mock_delete_): """Test delete volume.""" mock_delete_.return_value = {'success': 1} volume = objects.Volume(_name_id=uuid.uuid4(), size=1) retval = self.fake_driver.delete_volume(volume) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, 'extend_lun') @mock.patch.object(sds_client.RestCmd, 'create_lun_from_snapshot') def test_create_volume_from_snapshot(self, mock_lun_from_snapshot, mock_extend_lun): """Test create new volume from snapshot of src volume.""" volume = objects.Volume(_name_id=uuid.uuid4(), size=1) snapshot = objects.Snapshot( id=uuid.uuid4(), volume_size=2, volume=volume) mock_lun_from_snapshot.return_value = {'success': 1} mock_extend_lun.return_value = {'success': 1} retval = self.fake_driver.create_volume_from_snapshot(volume, snapshot) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, 'extend_lun') @mock.patch.object(sds_client.RestCmd, 'create_lun_from_lun') @mock.patch.object(sds_driver.SdsBaseDriver, '_check_volume_exist') def test_create_cloned_volume(self, mock__check_volume_exist, mock_create_lun_from_lun, mock_extend_lun): """Test create clone volume.""" mock__check_volume_exist.return_value = True mock_create_lun_from_lun.return_value = {'success': 1} mock_extend_lun.return_value = {'success': 1} dst_volume = objects.Volume(_name_id=uuid.uuid4(), size=2) src_volume = objects.Volume(_name_id=uuid.uuid4(), size=1) retval = self.fake_driver.create_cloned_volume(dst_volume, src_volume) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, 'query_lun_by_name') def test__check_volume_exist(self, mock_query_lun_by_name): """Test volume exist or not.""" mock_query_lun_by_name.return_value = {'success': 1} volume = objects.Volume(_name_id=uuid.uuid4(), size=1) retval = self.fake_driver._check_volume_exist(volume) self.assertEqual({'success': 1}, retval) @mock.patch.object(sds_client.RestCmd, 'extend_lun') @mock.patch.object(sds_driver.SdsBaseDriver, '_check_volume_exist') def test_extend_volume(self, mock__check_volume_exist, mock_extend_lun): """Test resize volume.""" volume = objects.Volume(_name_id=uuid.uuid4(), size=1) new_size = 3 mock__check_volume_exist.return_value = { 'capacity_bytes': units.Gi * 1} mock_extend_lun.return_value = {'success': 1} retval = self.fake_driver.extend_volume(volume, new_size) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, 'create_snapshot') def test_create_snapshot(self, mock_create_snapshot): """Test create snapshot of volume.""" volume = objects.Volume(_name_id=uuid.uuid4(), size=1) snapshot = objects.Snapshot( id=uuid.uuid4(), volume_size=2, volume=volume) mock_create_snapshot.return_value = {'success': 1} retval = self.fake_driver.create_snapshot(snapshot) self.assertIsNone(retval) @mock.patch.object(sds_client.RestCmd, 'query_snapshot_by_name') def test__check_snapshot_exist(self, mock_query_snapshot_by_name): """Test snapshot exist or not.""" volume = objects.Volume(_name_id=uuid.uuid4(), size=1) snapshot = objects.Snapshot( id=uuid.uuid4(), volume_size=2, volume=volume) mock_query_snapshot_by_name.return_value = {'success': 1} retval = self.fake_driver._check_snapshot_exist(snapshot) self.assertEqual({'success': 1}, retval) @mock.patch.object(sds_client.RestCmd, 'delete_snapshot') @mock.patch.object(sds_driver.SdsBaseDriver, '_check_snapshot_exist') def test_delete_snapshot(self, mock__check_snapshot_exist, mock_delete_snapshot): """Test delete snapshot.""" volume = objects.Volume(_name_id=uuid.uuid4(), size=1) snapshot = objects.Snapshot( id=uuid.uuid4(), volume_size=2, volume=volume) mock__check_snapshot_exist.return_value = True mock_delete_snapshot.return_value = {'success': 1} retval = self.fake_driver.delete_snapshot(snapshot) self.assertIsNone(retval) class FakeSdsISCSIDriver(sds_driver.SdsISCSIDriver): """Fake sds iscsi driver, include attach, detach.""" def __init__(self): """Init conf client pool.""" self.configuration = config.Configuration(None) self.client = None self.address = "192.168.200.100" self.user = "fake_user" self.password = "fake_password" self.pool = "fake_pool_name" self.poolid = 1 self.iscsi_info = {"iqn.1994-05.com.redhat:899c5f9d15d": "1.1.1.1,1.1.1.2,1.1.1.3"} self.default_target_ips = ["1.1.1.1", "1.1.1.2", "1.1.1.3"] self.chap_username = "123456789123" self.chap_password = "1234567891234" @ddt.ddt class TestSdsISCSIDriver(test.TestCase): """Testcase sds iscsi driver, include attach, detach.""" def setUp(self): """Setup.""" super(TestSdsISCSIDriver, self).setUp() self.fake_driver = FakeSdsISCSIDriver() self.fake_driver.client = sds_client.RestCmd("192.168.200.100", "fake_user", "fake_password", True) @mock.patch.object(sds_client.RestCmd, 'query_target_by_name') def test__check_target_exist(self, mock_query_target_by_name): """Test target exist or not.""" target_name = 'test_driver' mock_query_target_by_name.return_value = {'success': 1} retval = self.fake_driver._check_target_exist(target_name) self.assertEqual({'success': 1}, retval) @mock.patch.object(sds_client.RestCmd, 'query_initiator_by_name') def test__check_initiator_exist(self, mock_query_initiator_by_name): """Test initiator exist or not.""" initiator_name = 'test_driver' mock_query_initiator_by_name.return_value = {'success': 1} retval = self.fake_driver._check_initiator_exist(initiator_name) self.assertEqual({'success': 1}, retval) @mock.patch.object(sds_client.RestCmd, 'query_target_initiatoracl') def test__check_target_added_initiator(self, mock_query_target_initiatoracl): """Test target added the initiator.""" mock_query_target_initiatoracl.return_value = {'success': 1} target_name, initiator_name = 'test_driver', 'initiator_name' retval = self.fake_driver._check_target_added_initiator(target_name, initiator_name) self.assertEqual({'success': 1}, retval) @mock.patch.object(sds_client.RestCmd, 'query_target_lunacl') def test__check_target_added_lun(self, mock_query_target_lunacl): """Test target added the lun.""" mock_query_target_lunacl.return_value = {'success': 1} target_name, pool_name, volume_name = ('ccc', self.fake_driver.pool, 'fcc') retval = self.fake_driver._check_target_added_lun(target_name, pool_name, volume_name) self.assertEqual({'success': 1}, retval) @mock.patch.object(sds_client.RestCmd, 'query_chapinfo_by_target') def test__check_target_added_chap(self, mock_query_chapinfo_by_target): """Test target added chapuser.""" mock_query_chapinfo_by_target.return_value = {'success': 1} target_name, user_name = 'ccc', 'fcc' retval = self.fake_driver._check_target_added_chap(target_name, user_name) self.assertEqual({'success': 1}, retval) def test__get_target_ip(self): """Test get target from targetip.""" initiator = 'iqn.1994-05.com.redhat:899c5f9d15d' retval_target_ips = \ self.fake_driver._get_target_ip(initiator) self.assertListEqual(['1.1.1.1', '1.1.1.2', '1.1.1.3'], retval_target_ips) self.fake_driver.default_target_ips = \ ["1.1.1.1"] initiator = 'vms' retval_target_ips = \ self.fake_driver._get_target_ip(initiator) self.assertListEqual(["1.1.1.1"], retval_target_ips) @mock.patch.object(sds_client.RestCmd, 'add_chap_by_target') @mock.patch.object(sds_driver.SdsISCSIDriver, '_check_target_added_chap') @mock.patch.object(sds_driver.SdsISCSIDriver, '_check_target_added_lun') @mock.patch.object(sds_client.RestCmd, 'mapping_lun') @mock.patch.object(sds_client.RestCmd, 'add_initiator_to_target') @mock.patch.object(sds_driver.SdsISCSIDriver, '_check_target_added_initiator') @mock.patch.object(sds_client.RestCmd, 'create_initiator') @mock.patch.object(sds_driver.SdsISCSIDriver, '_check_initiator_exist') @mock.patch.object(sds_client.RestCmd, 'create_target') @mock.patch.object(sds_client.RestCmd, 'query_node_by_targetips') @mock.patch.object(sds_driver.SdsISCSIDriver, '_check_target_exist') @mock.patch.object(sds_driver.SdsISCSIDriver, '_get_target_ip') def test_initialize_connection(self, mock__get_target_ip, mock__check_target_exist, mock_query_node_by_targetips, mock_create_target, mock__check_initiator_exist, mock_create_initiator, mock__check_target_added_initiator, mock_add_initiator_to_target, mock_mapping_lun, mock__check_target_added_lun, mock__check_target_added_chap, mock_add_chap_by_target): """Test attach volume to kvm.""" mock__get_target_ip.return_value = (['1.1.1.1', '1.1.1.2', '1.1.1.3']) mock__check_target_exist.return_value = False mock__check_initiator_exist.return_value = False mock__check_target_added_initiator.result_value = False mock__check_target_added_chap.return_value = False mock_query_node_by_targetips.return_value = {'host_id', 'address'} mock_create_target.return_value = {'success': 1} mock_create_initiator.return_value = {'success': 1} mock_add_initiator_to_target.result_value = {'success': 1} mock_mapping_lun.return_value = {'success': 1} mock__check_target_added_lun.return_value = 1 mock_add_chap_by_target.return_value = {'success': 1} volume1, connector1 = (objects.Volume(id=uuid.uuid4(), _name_id=uuid.uuid4(), size=1), {'initiator': 'iqn.1994-05.com.redhat:899c5f9d15d', 'multipath': True}) initiator_name = connector1['initiator'] iqn_end = initiator_name.split(':', 1)[1] target_head = 'iqn.2014-10.com.szsandstone:storage:' target_name = target_head + iqn_end result1 = { 'driver_volume_type': 'iscsi', 'data': {'target_discovered': True, 'target_portals': ['1.1.1.1:3260', '1.1.1.2:3260', '1.1.1.3:3260'], 'volume_id': volume1.id, 'auth_method': 'CHAP', 'auth_username': '123456789123', 'auth_password': '1234567891234', 'target_iqns': [target_name, target_name, target_name], 'target_luns': [1, 1, 1]}} retval = self.fake_driver.initialize_connection(volume1, connector1) self.assertDictEqual(result1, retval) volume2, connector2 = (objects.Volume(id=uuid.uuid4(), _name_id=uuid.uuid4(), size=2), {'initiator': 'iqn.1994-05.com.redhat:899c5f9d15d'}) mock__get_target_ip.return_value = (['1.1.1.1', '1.1.1.2', '1.1.1.3']) initiator_name = connector2['initiator'] iqn_end = initiator_name.split(':', 1)[1] target_head = 'iqn.2014-10.com.szsandstone:storage:' target_name = target_head + iqn_end result2 = {'driver_volume_type': 'iscsi', 'data': {'target_discovered': True, 'target_portal': '1.1.1.1:3260', 'volume_id': volume2.id, 'target_iqn': target_name, 'target_lun': 1, 'auth_method': 'CHAP', 'auth_username': '123456789123', 'auth_password': '1234567891234'}} retval = self.fake_driver.initialize_connection(volume2, connector2) self.assertDictEqual(result2, retval) @mock.patch.object(sds_client.RestCmd, 'unmap_lun') def test_terminate_connection(self, mock_unmap_lun): """Test detach volume from kvm.""" volume, connector = (objects.Volume(_name_id=uuid.uuid4(), size=1), {'initiator': 'iqn.1994-05.com.redhat:899c5f9d15d'}) mock_unmap_lun.result_value = {'success': 1} retval = self.fake_driver.terminate_connection(volume, connector) self.assertIsNone(retval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/sandstone/test_utils.py0000664000175000017500000000336400000000000026216 0ustar00zuulzuul00000000000000# Copyright (c) 2019 ShenZhen SandStone Data Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import re import requests class FakeBaseSession(requests.Session): """Redefine get and post method, fake it.""" method_map = {} def _get_response(self, method, url): url_map = self.method_map.get(method, {}) tmp = None data = {} for k in url_map: if re.search(k, url): if not tmp or len(tmp) < len(k): data = url_map[k] tmp = k resp_content = {'success': 1} resp_content.update(data) resp = requests.Response() resp.cookies['XSRF-TOKEN'] = 'fake_token' resp.headers['Referer'] = 'fake_refer' resp.headers['Set-Cookie'] = 'sdsom_sessionid=fake_session;' resp.status_code = 200 resp.encoding = 'utf-8' resp._content = json.dumps(resp_content).encode('utf-8') return resp def get(self, url, **kwargs): """Redefine get method.""" return self._get_response('get', url) def post(self, url, **kwargs): """Redefine post method.""" return self._get_response('post', url) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3031204 cinder-27.0.0/cinder/tests/unit/volume/drivers/solidfire/0000775000175000017500000000000000000000000023421 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/solidfire/__init__.py0000664000175000017500000000000000000000000025520 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/solidfire/scaled_iops_invalid_data.json0000664000175000017500000000073100000000000031301 0ustar00zuulzuul00000000000000{ "test_max_greater_than_burst": [ { "burstIOPS": 2, "maxIOPS": 3, "minIOPS": "100", "scaleMin": "2", "scaledIOPS": "True", "size": 2 } ], "test_min_greater_than_max_burst": [ { "burstIOPS": 2, "maxIOPS": 2, "minIOPS": "100", "scaleMin": "3", "scaledIOPS": "True", "size": 2 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/solidfire/scaled_iops_test_data.json0000664000175000017500000000604100000000000030632 0ustar00zuulzuul00000000000000{ "test_capping_the_maximum_of_minIOPS": [ { "burstIOPS": "200000", "maxIOPS": "200000", "minIOPS": "14950", "scaleMin": "100", "scaledIOPS": "True", "size": 2 }, { "burstIOPS": 200000, "maxIOPS": 200000, "minIOPS": 15000 } ], "test_capping_the_maximums": [ { "burstIOPS": "190000", "maxIOPS": "190000", "minIOPS": "100", "scaleBurst": "10003", "scaleMax": "10002", "scaleMin": "2", "scaledIOPS": "True", "size": 2 }, { "burstIOPS": 200000, "maxIOPS": 200000, "minIOPS": 102 } ], "test_capping_the_minimum": [ { "burstIOPS": "300", "maxIOPS": "200", "minIOPS": "50", "scaleBurst": "2", "scaleMax": "2", "scaleMin": "2", "scaledIOPS": "True", "size": 2 }, { "burstIOPS": 302, "maxIOPS": 202, "minIOPS": 100 } ], "test_regular_QoS": [ { "burstIOPS": "200", "maxIOPS": "200", "minIOPS": "100", "size": 1 }, { "burstIOPS": 200, "maxIOPS": 200, "minIOPS": 100 } ], "test_scaled_QoS_with_size_1": [ { "burstIOPS": "300", "maxIOPS": "200", "minIOPS": "100", "scaleBurst": "2", "scaleMax": "2", "scaleMin": "2", "scaledIOPS": "True", "size": 1 }, { "burstIOPS": 300, "maxIOPS": 200, "minIOPS": 100 } ], "test_scaled_QoS_with_size_2": [ { "burstIOPS": "300", "maxIOPS": "200", "minIOPS": "100", "scaleBurst": "2", "scaleMax": "2", "scaleMin": "2", "scaledIOPS": "True", "size": 2 }, { "burstIOPS": 302, "maxIOPS": 202, "minIOPS": 102 } ], "test_scoped_regular_QoS": [ { "qos:burstIOPS": "200", "qos:maxIOPS": "200", "qos:minIOPS": "100", "size": 1 }, { "burstIOPS": 200, "maxIOPS": 200, "minIOPS": 100 } ], "test_when_no_valid_QoS_values_present": [ { "key": "value", "size": 2 }, {} ], "test_without_presence_of_the_scaled_flag": [ { "burstIOPS": "300", "maxIOPS": "200", "minIOPS": "100", "scaleBurst": "2", "scaleMax": "2", "scaleMin": "2", "size": 2 }, { "burstIOPS": 300, "maxIOPS": 200, "minIOPS": 100 } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py0000664000175000017500000056252400000000000027030 0ustar00zuulzuul00000000000000# # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import re from unittest import mock from unittest.mock import call from unittest.mock import MagicMock from ddt import data from ddt import ddt from ddt import file_data from ddt import unpack from oslo_service import loopingcall from oslo_utils import timeutils from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import fields from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume import configuration as conf from cinder.volume.drivers import solidfire from cinder.volume import qos_specs from cinder.volume import volume_types class mock_vref(object): def __init__(self): self._name_id = None self.admin_metadata = {} self.attach_status = 'detached' self.id = '262b9ce2-a71a-4fbe-830c-c20c5596caea' self.project_id = '52423d9394ad4c67b3b9034da58cedbc' self.provider_id = '5 4 6ecebf5d-5521-4ce1-80f3-358ebc1b9cdc' self.size = 20 def __setitem__(self, item, value): self.__dict__[item] = value def __getitem__(self, item): return self.__dict__[item] def get(self, item, arg2 = None): return self.__dict__[item] f_uuid = ['262b9ce2-a71a-4fbe-830c-c20c5596caea', '362b9ce2-a71a-4fbe-830c-c20c5596caea'] @ddt class SolidFireVolumeTestCase(test.TestCase): EXPECTED_QOS = {'minIOPS': 110, 'burstIOPS': 1530, 'maxIOPS': 1020} def setUp(self): self.ctxt = context.get_admin_context() self.configuration = conf.BackendGroupConfiguration( [], conf.SHARED_CONF_GROUP) self.configuration.sf_allow_tenant_qos = True self.configuration.san_is_local = True self.configuration.sf_emulate_512 = True self.configuration.sf_account_prefix = 'cinder' self.configuration.reserved_percentage = 25 self.configuration.target_helper = None self.configuration.sf_svip = None self.configuration.sf_volume_prefix = 'UUID-' self.configuration.sf_enable_vag = False self.configuration.replication_device = [] self.configuration.max_over_subscription_ratio = 2 super(SolidFireVolumeTestCase, self).setUp() self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) self.mock_object(solidfire.SolidFireDriver, '_get_provisioned_capacity_iops', return_value=(0, 0)) self.expected_qos_results = {'minIOPS': 1000, 'maxIOPS': 10000, 'burstIOPS': 20000} vol_updates = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': '3332b568-96fd-475e-afd7-b7a867bc5255', 'created_at': timeutils.utcnow(), 'attributes': {'uuid': '262b9ce2-a71a-4fbe-830c-c20c5596caea'}} ctx = context.get_admin_context() self.mock_volume = fake_volume.fake_volume_obj(ctx, **vol_updates) self.vol = test_utils.create_volume( self.ctxt, volume_id='b831c4d1-d1f0-11e1-9b23-0800200c9a66') self.snap = test_utils.create_snapshot( self.ctxt, volume_id=self.vol.id) self.fake_sfaccount = {'accountID': 25, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid', 'volumes': [6, 7, 20]} self.fake_sfvol = {'volumeID': 6, 'name': 'test_volume', 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {'uuid': f_uuid[0]}, 'qos': None, 'iqn': 'super_fake_iqn'} self.fake_primary_cluster = ( {'endpoint': {'passwd': 'admin', 'port': 443, 'url': 'https://192.168.139.11:443', 'svip': '10.10.8.11', 'mvip': '10.10.8.12', 'login': 'admin'}, 'name': 'volume-f0632d53-d836-474c-a5bc-478ef18daa32', 'clusterPairID': 33, 'uuid': 'f0632d53-d836-474c-a5bc-478ef18daa32', 'svip': '10.10.8.11', 'mvipNodeID': 1, 'repCount': 1, 'encryptionAtRestState': 'disabled', 'attributes': {}, 'mvip': '10.10.8.12', 'ensemble': ['10.10.5.130'], 'svipNodeID': 1}) self.fake_secondary_cluster = ( {'endpoint': {'passwd': 'admin', 'port': 443, 'url': 'https://192.168.139.102:443', 'svip': '10.10.8.134', 'mvip': '192.168.139.102', 'login': 'admin'}, 'name': 'AutoTest2-6AjG-FOR-TEST-ONLY', 'clusterPairID': 331, 'clusterAPIVersion': '9.4', 'uuid': '9c499d4b-8fff-48b4-b875-27601d5d9889', 'svip': '10.10.23.2', 'mvipNodeID': 1, 'repCount': 1, 'encryptionAtRestState': 'disabled', 'attributes': {}, 'mvip': '192.168.139.102', 'ensemble': ['10.10.5.130'], 'svipNodeID': 1}) self.cluster_pairs = ( [{'uniqueID': 'lu9f', 'endpoint': {'passwd': 'admin', 'port': 443, 'url': 'https://192.168.139.102:443', 'svip': '10.10.8.134', 'mvip': '192.168.139.102', 'login': 'admin'}, 'name': 'AutoTest2-6AjG-FOR-TEST-ONLY', 'clusterPairID': 33, 'clusterAPIVersion': '9.4', 'uuid': '9c499d4b-8fff-48b4-b875-27601d5d9889', 'svip': '10.10.23.2', 'mvipNodeID': 1, 'repCount': 1, 'encryptionAtRestState': 'disabled', 'attributes': {}, 'mvip': '192.168.139.102', 'ensemble': ['10.10.5.130'], 'svipNodeID': 1}]) self.mvip = '192.168.139.102' self.svip = '10.10.8.134' self.fake_sfsnap_name = '%s%s' % (self.configuration.sf_volume_prefix, self.snap.id) self.fake_sfsnaps = [{'snapshotID': '5', 'name': self.fake_sfsnap_name, 'volumeID': 6}] def fake_issue_api_request(self, method, params, version='1.0', endpoint=None, timeout=None): if method == 'GetClusterCapacity': data = {} if version == '1.0': data = {'result': {'clusterCapacity': { 'maxProvisionedSpace': 107374182400, 'usedSpace': 1073741824, 'compressionPercent': 100, 'deduplicationPercent': 100, 'thinProvisioningPercent': 100, 'maxUsedSpace': 53687091200}}} elif version == '8.0': data = {'result': {'clusterCapacity': { 'usedMetadataSpaceInSnapshots': 16476454912, 'maxUsedMetadataSpace': 432103337164, 'activeBlockSpace': 616690857535, 'uniqueBlocksUsedSpace': 628629229316, 'totalOps': 7092186135, 'peakActiveSessions': 0, 'uniqueBlocks': 519489473, 'maxOverProvisionableSpace': 276546135777280, 'zeroBlocks': 8719571984, 'provisionedSpace': 19938551005184, 'maxUsedSpace': 8402009333760, 'peakIOPS': 0, 'timestamp': '2019-04-24T12:08:22Z', 'currentIOPS': 0, 'usedSpace': 628629229316, 'activeSessions': 0, 'nonZeroBlocks': 1016048624, 'maxProvisionedSpace': 55309227155456, 'usedMetadataSpace': 16476946432, 'averageIOPS': 0, 'snapshotNonZeroBlocks': 1606, 'maxIOPS': 200000, 'clusterRecentIOSize': 0}}} return data elif method == 'GetClusterInfo': results = { 'result': {'clusterInfo': {'name': 'fake-cluster', 'mvip': '1.1.1.1', 'svip': '1.1.1.1', 'uniqueID': 'unqid', 'repCount': 2, 'uuid': '53c8be1e-89e2-4f7f-a2e3-7cb84c47e0ec', 'attributes': {}}}} return results elif method == 'GetClusterVersionInfo': return {'id': None, 'result': {'softwareVersionInfo': {'pendingVersion': '8.2.1.4', 'packageName': '', 'currentVersion': '8.2.1.4', 'nodeID': 0, 'startTime': ''}, 'clusterVersion': '8.2.1.4', 'clusterAPIVersion': '8.2'}} elif method == 'AddAccount' and version == '1.0': return {'result': {'accountID': 25}, 'id': 1} elif method == 'GetAccountByName' and version == '1.0': results = {'result': {'account': {'accountID': 25, 'username': params['username'], 'status': 'active', 'initiatorSecret': '123456789012', 'targetSecret': '123456789012', 'attributes': {}, 'volumes': [6, 7, 20]}}, "id": 1} return results elif method == 'CreateVolume' and version == '1.0': return {'result': {'volumeID': 5}, 'id': 1} elif method == 'CreateSnapshot' and version == '6.0': return {'result': {'snapshotID': 5}, 'id': 1} elif method == 'DeleteVolume' and version == '1.0': return {'result': {}, 'id': 1} elif method == 'ModifyVolume' and version == '5.0': return {'result': {}, 'id': 1} elif method == 'CloneVolume': return {'result': {'volumeID': 6}, 'id': 2} elif method == 'ModifyVolume': return {'result': {}, 'id': 1} elif method == 'ListVolumesForAccount' and version == '1.0': test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66' result = {'result': { 'volumes': [{'volumeID': 5, 'name': test_name, 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {'uuid': f_uuid[0]}, 'qos': None, 'iqn': test_name}]}} return result elif method == 'ListActiveVolumes': test_name = "existing_volume" result = {'result': { 'volumes': [{'volumeID': 5, 'name': test_name, 'accountID': 8, 'sliceCount': 1, 'totalSize': int(1.75 * units.Gi), 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {}, 'qos': None, 'iqn': test_name}]}} return result elif method == 'ListVolumes': test_name = "get_sfvol_by_cinder" result = {'result': { 'volumes': [{'volumeID': 5, 'name': test_name, 'accountID': 8, 'sliceCount': 1, 'totalSize': int(1.75 * units.Gi), 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {'uuid': f_uuid[0]}, 'qos': None, 'iqn': test_name}, {'volumeID': 15, 'name': test_name, 'accountID': 8, 'sliceCount': 1, 'totalSize': int(1.75 * units.Gi), 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {'uuid': f_uuid[1]}, 'qos': None, 'iqn': test_name}]}} if params and params.get('startVolumeID', None): volumes = result['result']['volumes'] selected_volumes = [v for v in volumes if v.get('volumeID') != params['startVolumeID']] result['result']['volumes'] = selected_volumes else: result = {'result': {'volumes': []}} return result elif method == 'DeleteSnapshot': return {'result': {}} elif method == 'GetClusterVersionInfo': return {'result': {'clusterAPIVersion': '8.0'}} elif method == 'StartVolumePairing': return {'result': {'volumePairingKey': 'fake-pairing-key'}} elif method == 'RollbackToSnapshot': return { "id": 1, "result": { "checksum": "0x0", "snapshot": { "attributes": {}, "checksum": "0x0", "createTime": "2016-04-04T17:27:32Z", "enableRemoteReplication": "false", "expirationReason": "None", "expirationTime": "null", "groupID": 0, "groupSnapshotUUID": f_uuid[0], "name": "test1-copy", "snapshotID": 1, "snapshotUUID": f_uuid[1], "status": "done", "totalSize": 5000658944, "virtualVolumeID": "null", "volumeID": 1 }, "snapshotID": 1 } } elif method == 'ListAccounts': return { 'result': { 'accounts': [{ 'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid' }] } } elif method == 'ListSnapshots': raise exception.VolumeNotFound('test clone unconfigured image') else: # Crap, unimplemented API call in Fake return None def fake_issue_api_request_fails(self, method, params, version='1.0', endpoint=None): response = {'error': {'code': 000, 'name': 'DummyError', 'message': 'This is a fake error response'}, 'id': 1} msg = ('Error (%s) encountered during ' 'SolidFire API call.' % response['error']['name']) raise solidfire.SolidFireAPIException(message=msg) def fake_set_qos_by_volume_type(self, type_id, ctxt): return {'minIOPS': 500, 'maxIOPS': 1000, 'burstIOPS': 1000} def fake_volume_get(self, key, default=None): return {'qos': 'fast'} def fake_update_cluster_status(self): return def fake_get_cluster_version_info(self): return def fake_get_model_info(self, account, vid, endpoint=None): return {'fake': 'fake-model'} @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_create_volume_with_qos_type(self, _mock_issue_api_request): _mock_issue_api_request.side_effect = self.fake_issue_api_request testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': '3332b568-96fd-475e-afd7-b7a867bc5255', 'created_at': timeutils.utcnow()} fake_sfaccounts = [{'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid'}] test_type = {'name': 'sf-1', 'qos_specs_id': 'fb0576d7-b4b5-4cad-85dc-ca92e6a497d1', 'deleted': False, 'created_at': '2014-02-06 04:58:11', 'updated_at': None, 'extra_specs': {}, 'deleted_at': None, 'id': 'e730e97b-bc7d-4af3-934a-32e59b218e81'} test_qos_spec = {'id': 'asdfafdasdf', 'specs': {'minIOPS': '1000', 'maxIOPS': '2000', 'burstIOPS': '3000'}} ctx = context.get_admin_context() testvol = fake_volume.fake_volume_obj(ctx, **testvol) def _fake_get_volume_type(ctxt, type_id): return test_type def _fake_get_qos_spec(ctxt, spec_id): return test_qos_spec def _fake_do_volume_create(account, params): params['provider_location'] = '1.1.1.1 iqn 0' return params sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_account_create_availability', return_value=fake_sfaccounts[0]), \ mock.patch.object(sfv, '_do_volume_create', side_effect=_fake_do_volume_create), \ mock.patch.object(volume_types, 'get_volume_type', side_effect=_fake_get_volume_type), \ mock.patch.object(qos_specs, 'get_qos_specs', side_effect=_fake_get_qos_spec): self.assertEqual({'burstIOPS': 3000, 'minIOPS': 1000, 'maxIOPS': 2000}, sfv.create_volume(testvol)['qos']) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_create_volume(self, _mock_issue_api_request): _mock_issue_api_request.side_effect = self.fake_issue_api_request testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} fake_sfaccounts = [{'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid'}] ctx = context.get_admin_context() testvol = fake_volume.fake_volume_obj(ctx, **testvol) sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_account_create_availability', return_value=fake_sfaccounts[0]): model_update = sfv.create_volume(testvol) self.assertIsNotNone(model_update) self.assertNotIn('provider_geometry', model_update) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_create_volume_non_512e(self, _mock_issue_api_request): _mock_issue_api_request.side_effect = self.fake_issue_api_request testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} ctx = context.get_admin_context() testvol = fake_volume.fake_volume_obj(ctx, **testvol) fake_sfaccounts = [{'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid'}] sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_issue_api_request', side_effect=self.fake_issue_api_request), \ mock.patch.object(sfv, '_get_account_create_availability', return_value=fake_sfaccounts[0]): self.configuration.sf_emulate_512 = False model_update = sfv.create_volume(testvol) self.configuration.sf_emulate_512 = True self.assertEqual('4096 4096', model_update.get('provider_geometry', None)) def test_create_delete_snapshot(self): ctx = context.get_admin_context() testvol = fake_volume.fake_volume_obj(ctx) testsnap_dict = {'project_id': 'testprjid', 'name': testvol.name, 'volume_size': testvol.size, 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'volume_id': testvol.id, 'volume_type_id': None, 'created_at': timeutils.utcnow(), 'provider_id': '8 99 None', 'volume': testvol} testsnap = fake_snapshot.fake_snapshot_obj(ctx, **testsnap_dict) sfv = solidfire.SolidFireDriver(configuration=self.configuration) fake_uuid = 'UUID-b831c4d1-d1f0-11e1-9b23-0800200c9a66' with mock.patch.object( solidfire.SolidFireDriver, '_get_sf_snapshots', return_value=[{'snapshotID': '5', 'name': fake_uuid, 'volumeID': 5}]), \ mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=[{'accountID': 5, 'username': 'prefix-testprjid'}]), \ mock.patch.object(sfv, '_retrieve_replication_settings', return_value=["Async", {}]), \ mock.patch.object(sfv, '_get_sf_volume', return_value={'volumeID': 33}): sfv.create_snapshot(testsnap) sfv.delete_snapshot(testsnap) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_create_clone(self, _mock_issue_api_request): _mock_issue_api_request.side_effect = self.fake_issue_api_request _fake_get_snaps = [{'snapshotID': 5, 'name': 'testvol'}] _fake_get_volume = ( {'volumeID': 99, 'name': 'UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'attributes': {}}) updates_vol_a = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} updates_vol_b = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'created_at': timeutils.utcnow()} fake_model_info = { 'provider_id': '%s %s cluster-id-01' % ( self.fake_sfvol['volumeID'], self.fake_sfaccount['accountID']) } ctx = context.get_admin_context() testvol = fake_volume.fake_volume_obj(ctx, **updates_vol_a) testvol_b = fake_volume.fake_volume_obj(ctx, **updates_vol_b) sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sf_snapshots', return_value=_fake_get_snaps), \ mock.patch.object(sfv, '_get_sf_volume', return_value=_fake_get_volume), \ mock.patch.object(sfv, '_issue_api_request', side_effect=self.fake_issue_api_request), \ mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=[]), \ mock.patch.object(sfv, '_get_model_info', return_value=fake_model_info): sfv.create_cloned_volume(testvol_b, testvol) def test_initialize_connector_with_blocksizes(self): connector = {'initiator': 'iqn.2012-07.org.fake:01'} testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' 'solidfire:87hg.uuid-2cc06226-cc' '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '4096 4096', 'created_at': timeutils.utcnow(), } sfv = solidfire.SolidFireDriver(configuration=self.configuration) properties = sfv.initialize_connection(testvol, connector) self.assertEqual('4096', properties['data']['physical_block_size']) self.assertEqual('4096', properties['data']['logical_block_size']) self.assertTrue(properties['data']['discard']) def test_create_volume_fails(self): # NOTE(JDG) This test just fakes update_cluster_status # this is inentional for this test self.mock_object(solidfire.SolidFireDriver, '_update_cluster_status', self.fake_update_cluster_status) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) try: sfv.create_volume(testvol) self.fail("Should have thrown Error") except Exception: pass def test_create_sfaccount(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) account = sfv._create_sfaccount('some-name') self.assertIsNotNone(account) def test_create_sfaccount_fails(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) self.assertRaises(solidfire.SolidFireAPIException, sfv._create_sfaccount, 'project-id') def test_get_sfaccounts_for_tenant(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) accounts = sfv._get_sfaccounts_for_tenant('some-name') self.assertIsNotNone(accounts) def test_get_sfaccounts_for_tenant_fails(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) self.assertRaises(solidfire.SolidFireAPIException, sfv._get_sfaccounts_for_tenant, 'some-name') def test_get_sfaccount_by_name(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) account = sfv._get_sfaccount_by_name('some-name') self.assertIsNotNone(account) def test_get_account_create_availability_no_account(self): fake_sfaccounts = [] sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfaccount = sfv._get_account_create_availability(fake_sfaccounts) self.assertIsNone(sfaccount) def test_get_account_create_availability(self): fake_sfaccounts = [{'accountID': 29, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid', 'volumes': [6, 7, 20]}] sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfaccount = sfv._get_account_create_availability(fake_sfaccounts) self.assertIsNotNone(sfaccount) self.assertEqual(sfaccount['accountID'], fake_sfaccounts[0]['accountID']) def test_get_account_create_availability_primary_full(self): fake_sfaccounts = [{'accountID': 30, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid'}] get_sfaccount_result = {'accountID': 31, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid_'} get_vol_result = list(range(1, 2001)) sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_volumes_for_account', return_value=get_vol_result): sfaccount = sfv._get_account_create_availability(fake_sfaccounts) self.assertIsNotNone(sfaccount) self.assertEqual(sfaccount['username'], get_sfaccount_result['username']) def test_get_account_create_availability_both_full(self): fake_sfaccounts = [{'accountID': 32, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid'}, {'accountID': 33, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid_'}] get_vol_result = list(range(1, 2001)) sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_volumes_for_account', return_value=get_vol_result): sfaccount = sfv._get_account_create_availability(fake_sfaccounts) self.assertIsNone(sfaccount) def test_get_create_account(self): fake_sfaccounts = [{'accountID': 34, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid'}, {'accountID': 35, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid_'}] get_vol_result = list(range(1, 2001)) sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_volumes_for_account', return_value=get_vol_result): sfaccount = sfv._get_account_create_availability(fake_sfaccounts) self.assertRaises(solidfire.SolidFireDriverException, sfv._get_create_account, sfaccount) def test_get_sfaccount_by_name_fails(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) self.assertRaises(solidfire.SolidFireAPIException, sfv._get_sfaccount_by_name, 'some-name') def test_get_sfvol_by_cinder_vref_no_provider_id(self): fake_sfaccounts = [{'accountID': 25, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid', 'volumes': [6, 7, 20]}] self.mock_vref = mock_vref() vol_result = {'volumeID': 5, 'name': 'test_volume', 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {'uuid': f_uuid[0]}, 'qos': None, 'iqn': 'super_fake_iqn'} mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value = fake_sfaccounts), \ mock.patch.object(sfv, '_issue_api_request', side_effect = self.fake_issue_api_request): self.mock_vref['provider_id'] = None sfvol = sfv._get_sfvol_by_cinder_vref(self.mock_vref) self.assertIsNotNone(sfvol) self.assertEqual(sfvol['attributes']['uuid'], vol_result['attributes']['uuid']) self.assertEqual(sfvol['volumeID'], vol_result['volumeID']) def test_get_sfvol_by_cinder_vref_no_provider_id_nomatch(self): fake_sfaccounts = [{'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid', 'volumes': [5, 6, 7, 8]}] self.mock_vref = mock_vref() mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value = fake_sfaccounts), \ mock.patch.object(sfv, '_issue_api_request', side_effect = self.fake_issue_api_request): self.mock_vref['provider_id'] = None self.mock_vref['id'] = '142b9c32-a71A-4fbe-830c-c20c5596caea' sfvol = sfv._get_sfvol_by_cinder_vref(self.mock_vref) self.assertIsNone(sfvol) def test_get_sfvol_by_cinder_vref_nomatch(self): fake_sfaccounts = [{'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid', 'volumes': [5, 6, 7, 8]}] self.mock_vref = mock_vref() mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value = fake_sfaccounts), \ mock.patch.object(sfv, '_issue_api_request', side_effect = self.fake_issue_api_request): p_i = '324 8 6ecebf5d-5521-4ce1-80f3-358ebc1b9cdc' self.mock_vref['provider_id'] = p_i self.mock_vref['id'] = '142b9c32-a71A-4fbe-830c-c20c5596caea' sfvol = sfv._get_sfvol_by_cinder_vref(self.mock_vref) self.assertIsNone(sfvol) def test_get_sfvol_by_cinder_vref(self): fake_sfaccounts = [{'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid', 'volumes': [5, 6, 7, 8]}] self.mock_vref = mock_vref() get_vol_result = {'volumeID': 5, 'name': 'test_volume', 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {'uuid': f_uuid[0]}, 'qos': None, 'iqn': 'super_fake_iqn'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value = fake_sfaccounts), \ mock.patch.object(sfv, '_issue_api_request', side_effect = self.fake_issue_api_request): sfvol = sfv._get_sfvol_by_cinder_vref(self.mock_vref) self.assertIsNotNone(sfvol) self.assertEqual(get_vol_result['volumeID'], sfvol['volumeID']) def test_delete_volume(self): vol_id = 'a720b3c0-d1f0-11e1-9b23-0800200c9a66' testvol = test_utils.create_volume( self.ctxt, id=vol_id, display_name='test_volume', provider_id='1 5 None', multiattach=False) fake_sfaccounts = [{'accountID': 5, 'name': 'testprjid', 'targetSecret': 'shhhh', 'username': 'john-wayne'}] get_vol_result = {'volumeID': 5, 'name': 'test_volume', 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {}, 'qos': None, 'iqn': 'super_fake_iqn'} mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_sfvol_by_cinder_vref', return_value=get_vol_result), \ mock.patch.object(sfv, '_issue_api_request'), \ mock.patch.object(sfv, '_remove_volume_from_vags') as rem_vol: sfv.delete_volume(testvol) rem_vol.assert_not_called() def test_delete_multiattach_volume(self): vol_id = 'a720b3c0-d1f0-11e1-9b23-0800200c9a66' testvol = test_utils.create_volume( self.ctxt, id=vol_id, display_name='test_volume', provider_id='1 5 None', multiattach=True) fake_sfaccounts = [{'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid'}] get_vol_result = {'volumeID': 5, 'name': 'test_volume', 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {}, 'qos': None, 'iqn': 'super_fake_iqn'} mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_sfvol_by_cinder_vref', return_value=get_vol_result), \ mock.patch.object(sfv, '_issue_api_request'), \ mock.patch.object(sfv, '_remove_volume_from_vags') as rem_vol: sfv.delete_volume(testvol) rem_vol.assert_called_with(get_vol_result['volumeID']) def test_delete_volume_no_volume_on_backend(self): fake_sfaccounts = [{'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid'}] fake_no_volumes = [] testvol = test_utils.create_volume(self.ctxt) sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_volumes_for_account', return_value=fake_no_volumes): sfv.delete_volume(testvol) def test_delete_snapshot_no_snapshot_on_backend(self): fake_sfaccounts = [{'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid'}] fake_no_volumes = [] testvol = test_utils.create_volume( self.ctxt, volume_id='b831c4d1-d1f0-11e1-9b23-0800200c9a66') testsnap = test_utils.create_snapshot( self.ctxt, volume_id=testvol.id) sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_volumes_for_account', return_value=fake_no_volumes): sfv.delete_snapshot(testsnap) def fake_ext_qos_issue_api_request(self, method, params, version='1.0', endpoint=None): EXPECTED_SIZE = 2 << 30 # 2147483648 size + increase if method == 'ModifyVolume': response = {'error': {'code': 0, 'name': 'Extend Volume', 'message': 'extend fail, size/scale-iops'}, 'id': 1} if params.get('totalSize', None) != EXPECTED_SIZE: msg = ('Error (%s) encountered during ' 'SolidFire API call.' % response['error']['name']) raise solidfire.SolidFireAPIException(message=msg) if params.get('qos', None) != SolidFireVolumeTestCase.EXPECTED_QOS: msg = ('Error (%s) encountered during ' 'SolidFire API call.' % response['error']['name']) raise solidfire.SolidFireAPIException(message=msg) return {'result': {}, 'id': 1} elif method == 'GetAccountByName' and version == '1.0': results = {'result': {'account': {'accountID': 25, 'username': params['username'], 'status': 'active', 'initiatorSecret': '123456789012', 'targetSecret': '123456789012', 'attributes': {}, 'volumes': [6, 7, 20]}}, "id": 1} return results elif method == 'ListVolumesForAccount' and version == '1.0': test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66' result = {'result': { 'volumes': [{'volumeID': 5, 'name': test_name, 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'attributes': {}, 'qos': None, 'iqn': test_name}]}} return result else: return None def test_extend_volume(self): self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.extend_volume(testvol, 2) def test_extend_volume_with_scaled_qos(self): size = 1 self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) sfv = solidfire.SolidFireDriver(configuration=self.configuration) qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'minIOPS': '100', 'maxIOPS': '1000', 'burstIOPS': '1500', 'scaledIOPS': 'True', 'scaleMin': '10', 'scaleMax': '20', 'scaleBurst': '30'}) type_ref = volume_types.create(self.ctxt, "type1", {'qos:minIOPS': '1000', 'qos:maxIOPS': '10000', 'qos:burstIOPS': '20000'}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id'], size + 1) self.assertEqual(SolidFireVolumeTestCase.EXPECTED_QOS, qos) def test_extend_volume_fails_no_volume(self): self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, 'id': 'not-found'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.VolumeNotFound, sfv.extend_volume, testvol, 2) def test_extend_volume_fails_account_lookup(self): # NOTE(JDG) This test just fakes update_cluster_status # this is intentional for this test self.mock_object(solidfire.SolidFireDriver, '_update_cluster_status', self.fake_update_cluster_status) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'no-name', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request_fails) self.assertRaises(solidfire.SolidFireAPIException, sfv.extend_volume, testvol, 2) @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') @mock.patch.object(solidfire.SolidFireDriver, '_retrieve_qos_setting') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_retrieve_replication_settings') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') def test_extend_replicated_volume(self, mock_create_cluster_reference, mock_retrieve_replication_settings, mock_issue_api_request, mock_retrieve_qos_setting, mock_get_sf_volume, mock_get_sfaccount): mock_create_cluster_reference.return_value = { 'mvip': self.mvip, 'svip': self.svip} mock_retrieve_replication_settings.return_value = "Async" mock_retrieve_qos_setting.return_value = None self.fake_sfvol['volumePairs'] = [{'remoteVolumeID': 26}] mock_get_sf_volume.return_value = self.fake_sfvol mock_get_sfaccount.return_value = self.fake_sfaccount ctx = context.get_admin_context() utc_now = timeutils.utcnow().isoformat() vol_fields = { 'id': f_uuid[0], 'created_at': utc_now } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.replication_enabled = True sfv.cluster_pairs = self.cluster_pairs sfv.active_cluster['mvip'] = self.mvip sfv.active_cluster['svip'] = self.svip mock_issue_api_request.reset_mock() # pylint: disable=assignment-from-no-return updates = sfv.extend_volume(vol, vol.size + 10) # pylint: enable=assignment-from-no-return self.assertIsNone(updates) modify_params = { 'volumeID': self.fake_sfvol['volumeID'], 'totalSize': int((vol.size + 10) * units.Gi), 'qos': None } modify_params2 = modify_params.copy() modify_params2['volumeID'] = 26 expected_calls = [ mock.call("ModifyVolume", modify_params, version='5.0'), mock.call("ModifyVolume", modify_params2, version='5.0', endpoint=self.cluster_pairs[0]['endpoint']) ] mock_issue_api_request.assert_has_calls(expected_calls) mock_create_cluster_reference.assert_called() mock_retrieve_replication_settings.assert_called_with(vol) mock_retrieve_qos_setting.assert_called_with(vol, vol.size + 10) mock_get_sf_volume.assert_called_with( vol.id, {'accountID': self.fake_sfaccount['accountID']}) mock_get_sfaccount.assert_called_with(vol.project_id) def test_set_by_qos_spec_with_scoping(self): size = 1 sfv = solidfire.SolidFireDriver(configuration=self.configuration) qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'qos:minIOPS': '1000', 'qos:maxIOPS': '10000', 'qos:burstIOPS': '20000'}) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "100", "qos:burstIOPS": "300", "qos:maxIOPS": "200"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id'], size) self.assertEqual(self.expected_qos_results, qos) def test_set_by_qos_spec(self): size = 1 sfv = solidfire.SolidFireDriver(configuration=self.configuration) qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', {'minIOPS': '1000', 'maxIOPS': '10000', 'burstIOPS': '20000'}) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "100", "qos:burstIOPS": "300", "qos:maxIOPS": "200"}) qos_specs.associate_qos_with_type(self.ctxt, qos_ref['id'], type_ref['id']) qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id'], size) self.assertEqual(self.expected_qos_results, qos) @file_data("scaled_iops_test_data.json") @unpack def test_scaled_qos_spec_by_type(self, argument): sfv = solidfire.SolidFireDriver(configuration=self.configuration) size = argument[0].pop('size') type_ref = volume_types.create(self.ctxt, "type1", argument[0]) qos = sfv._set_qos_by_volume_type(self.ctxt, type_ref['id'], size) self.assertEqual(argument[1], qos) @file_data("scaled_iops_invalid_data.json") @unpack def test_set_scaled_qos_by_type_invalid(self, inputs): sfv = solidfire.SolidFireDriver(configuration=self.configuration) size = inputs[0].pop('size') type_ref = volume_types.create(self.ctxt, "type1", inputs[0]) self.assertRaises(exception.InvalidQoSSpecs, sfv._set_qos_by_volume_type, self.ctxt, type_ref['id'], size) def test_accept_transfer(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} expected = {'provider_auth': 'CHAP cinder-new_project 123456789012'} self.assertEqual(expected, sfv.accept_transfer(self.ctxt, testvol, 'new_user', 'new_project')) def test_accept_transfer_volume_not_found_raises(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) testvol = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'created_at': timeutils.utcnow()} self.assertRaises(exception.VolumeNotFound, sfv.accept_transfer, self.ctxt, testvol, 'new_user', 'new_project') def test_retype(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) type_ref = volume_types.create(self.ctxt, "type1", {"qos:minIOPS": "500", "qos:burstIOPS": "2000", "qos:maxIOPS": "1000"}) diff = {'encryption': {}, 'qos_specs': {}, 'extra_specs': {'qos:burstIOPS': ('10000', u'2000'), 'qos:minIOPS': ('1000', u'500'), 'qos:maxIOPS': ('10000', u'1000')}} host = None updates = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} ctx = context.get_admin_context() testvol = fake_volume.fake_volume_obj(ctx, **updates) migrated, updates = sfv.retype(self.ctxt, testvol, type_ref, diff, host) self.assertTrue(migrated) self.assertEqual({}, updates) def test_retype_with_qos_spec(self): test_type = {'name': 'sf-1', 'qos_specs_id': 'fb0576d7-b4b5-4cad-85dc-ca92e6a497d1', 'deleted': False, 'created_at': '2014-02-06 04:58:11', 'updated_at': None, 'extra_specs': {}, 'deleted_at': None, 'id': 'e730e97b-bc7d-4af3-934a-32e59b218e81'} test_qos_spec = {'id': 'asdfafdasdf', 'specs': {'minIOPS': '1000', 'maxIOPS': '2000', 'burstIOPS': '3000'}} def _fake_get_volume_type(ctxt, type_id): return test_type def _fake_get_qos_spec(ctxt, spec_id): return test_qos_spec self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) self.mock_object(volume_types, 'get_volume_type', _fake_get_volume_type) self.mock_object(qos_specs, 'get_qos_specs', _fake_get_qos_spec) sfv = solidfire.SolidFireDriver(configuration=self.configuration) diff = {'encryption': {}, 'extra_specs': {}, 'qos_specs': {'burstIOPS': ('10000', '2000'), 'minIOPS': ('1000', '500'), 'maxIOPS': ('10000', '1000')}} host = None updates = {'project_id': 'testprjid', 'name': 'test_volume', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} ctx = context.get_admin_context() testvol = fake_volume.fake_volume_obj(ctx, **updates) sfv = solidfire.SolidFireDriver(configuration=self.configuration) migrated, updates = sfv.retype(self.ctxt, testvol, test_type, diff, host) self.assertTrue(migrated) self.assertEqual({}, updates) @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') @mock.patch.object(solidfire.SolidFireDriver, '_set_rep_by_volume_type') @mock.patch.object(solidfire.SolidFireDriver, '_retrieve_replication_settings') @mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params') @mock.patch.object(solidfire.SolidFireDriver, '_replicate_volume') @mock.patch.object(solidfire.SolidFireDriver, '_disable_replication') @mock.patch.object(solidfire.SolidFireDriver, '_set_qos_by_volume_type') def test_retype_replicated(self, mock_set_qos_by_volume_type, mock_disable_replication, mock_replicate_volume, mock_get_default_volume_params, mock_retrieve_replication_settings, mock_set_rep_by_volume_type, mock_get_sf_volume, mock_get_sfaccount): all_mocks = locals() mock_get_sf_volume.return_value = None mock_get_sfaccount.return_value = self.fake_sfaccount mock_retrieve_replication_settings.return_value = 'Async' ctx = context.get_admin_context() type_fields = {'extra_specs': {'replication_enabled': ' True'}, 'id': fakes.get_fake_uuid()} src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) fake_provider_id = "%s %s %s" % ( self.fake_sfvol['volumeID'], fakes.FAKE_UUID, self.cluster_pairs[0]['uuid']) utc_now = timeutils.utcnow().isoformat() vol_fields = { 'id': fakes.FAKE_UUID, 'created_at': utc_now, 'volume_type': src_vol_type, 'volume_type_id': src_vol_type.id, 'provider_id': fake_provider_id } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) dst_vol_type = fake_volume.fake_volume_type_obj(ctx) sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.replication_enabled = True sfv.cluster_pairs = self.cluster_pairs sfv.active_cluster['mvip'] = self.mvip sfv.active_cluster['svip'] = self.svip self.assertRaises(exception.VolumeNotFound, sfv.retype, ctx, vol, dst_vol_type, None, None) mock_get_sfaccount.assert_called_once_with(vol.project_id) mock_get_sf_volume.assert_called_once_with( vol.id, {'accountID': self.fake_sfaccount['accountID']}) mock_get_sfaccount.reset_mock() mock_get_sf_volume.reset_mock() expected = {"key": "value"} mock_get_sf_volume.return_value = self.fake_sfvol mock_replicate_volume.return_value = expected mock_set_rep_by_volume_type.side_effect = [src_vol_type, dst_vol_type] retyped, updates = sfv.retype(ctx, vol, dst_vol_type, None, None) self.assertDictEqual(expected, updates) mock_get_sfaccount.assert_called_once_with(vol.project_id) mock_get_sf_volume.assert_called_once_with( vol.id, {'accountID': self.fake_sfaccount['accountID']}) mock_get_default_volume_params.assert_called() mock_disable_replication.assert_not_called() mock_replicate_volume.assert_called_once() mock_retrieve_replication_settings.assert_called_once() mock_set_qos_by_volume_type.assert_called_once() expected = {} for mk in all_mocks.values(): if isinstance(mk, mock.MagicMock): mk.reset_mock() mock_set_rep_by_volume_type.side_effect = [src_vol_type, None] retyped, updates = sfv.retype(ctx, vol, dst_vol_type, None, None) self.assertDictEqual(expected, updates) mock_get_sfaccount.assert_called_once_with(vol.project_id) mock_get_sf_volume.assert_called_once_with( vol.id, {'accountID': self.fake_sfaccount['accountID']}) mock_get_default_volume_params.assert_not_called() mock_disable_replication.assert_called_with(vol) mock_replicate_volume.assert_not_called() mock_retrieve_replication_settings.assert_not_called() mock_set_qos_by_volume_type.assert_called_once() @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') def test_update_cluster_status(self, mock_create_cluster_reference, mock_issue_api_request): mock_create_cluster_reference.return_value = { 'mvip': self.mvip, 'svip': self.svip} fake_results = {'result': {'clusterCapacity': { 'usedMetadataSpaceInSnapshots': 16476454912, 'maxUsedMetadataSpace': 432103337164, 'activeBlockSpace': 616690857535, 'uniqueBlocksUsedSpace': 628629229316, 'totalOps': 7092186135, 'peakActiveSessions': 0, 'uniqueBlocks': 519489473, 'maxOverProvisionableSpace': 276546135777280, 'zeroBlocks': 8719571984, 'provisionedSpace': 19938551005184, 'maxUsedSpace': 8402009333760, 'peakIOPS': 0, 'timestamp': '2019-04-24T12:08:22Z', 'currentIOPS': 0, 'usedSpace': 628629229316, 'activeSessions': 0, 'nonZeroBlocks': 1016048624, 'maxProvisionedSpace': 55309227155456, 'usedMetadataSpace': 16476946432, 'averageIOPS': 0, 'snapshotNonZeroBlocks': 1606, 'maxIOPS': 200000, 'clusterRecentIOSize': 0}}} results_with_zero = fake_results.copy() results_with_zero['result']['clusterCapacity']['nonZeroBlocks'] = 0 mock_issue_api_request.return_value = fake_results driver_defined_stats = ['volume_backend_name', 'vendor_name', 'driver_version', 'storage_protocol', 'consistencygroup_support', 'consistent_group_snapshot_enabled', 'replication_enabled', 'active_cluster_mvip', 'reserved_percentage', 'QoS_support', 'multiattach', 'total_capacity_gb', 'free_capacity_gb', 'compression_percent', 'deduplication_percent', 'thin_provision_percent', 'provisioned_iops', 'current_iops', 'average_iops', 'max_iops', 'peak_iops', 'thin_provisioning_support', 'provisioned_capacity_gb', 'max_over_subscription_ratio'] sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.configuration.sf_provisioning_calc = 'usedSpace' sfv.active_cluster['mvip'] = self.mvip sfv.active_cluster['svip'] = self.svip sfv._update_cluster_status() for key in driver_defined_stats: if sfv.cluster_stats.get(key, None) is None: msg = 'Key %s should be present at driver stats.' % key raise exception.CinderException(message=msg) for key in driver_defined_stats: self.assertIn(key, driver_defined_stats) mock_create_cluster_reference.assert_called() mock_issue_api_request.assert_called_with('GetClusterCapacity', {}, version='8.0') mock_issue_api_request.reset_mock() mock_issue_api_request.return_value = results_with_zero sfv._update_cluster_status() self.assertEqual(100, sfv.cluster_stats['compression_percent']) self.assertEqual(100, sfv.cluster_stats['deduplication_percent']) self.assertEqual(100, sfv.cluster_stats['thin_provision_percent']) mock_create_cluster_reference.assert_called() mock_issue_api_request.assert_called_with('GetClusterCapacity', {}, version='8.0') def test_update_cluster_status_mvip_unreachable(self): self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_issue_api_request', side_effect=self.fake_issue_api_request_fails): sfv._update_cluster_status() self.assertEqual(0, sfv.cluster_stats['free_capacity_gb']) self.assertEqual(0, sfv.cluster_stats['total_capacity_gb']) def test_manage_existing_volume(self): external_ref = {'name': 'existing volume', 'source-id': 5} updates = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} ctx = context.get_admin_context() testvol = fake_volume.fake_volume_obj(ctx, **updates) self.mock_object(solidfire.SolidFireDriver, '_issue_api_request', self.fake_issue_api_request) sfv = solidfire.SolidFireDriver(configuration=self.configuration) model_update = sfv.manage_existing(testvol, external_ref) self.assertIsNotNone(model_update) self.assertNotIn('provider_geometry', model_update) def test_manage_existing_get_size(self): external_ref = {'name': 'existing volume', 'source-id': 5} testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'created_at': timeutils.utcnow()} mock_issue_api_request = self.mock_object(solidfire.SolidFireDriver, '_issue_api_request') mock_issue_api_request.side_effect = self.fake_issue_api_request sfv = solidfire.SolidFireDriver(configuration=self.configuration) size = sfv.manage_existing_get_size(testvol, external_ref) self.assertEqual(2, size) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_get_create_account') @mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params') @mock.patch.object(solidfire.SolidFireDriver, '_retrieve_replication_settings') @mock.patch.object(solidfire.SolidFireDriver, '_replicate_volume') @mock.patch.object(solidfire.SolidFireDriver, '_get_model_info') @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') def test_manage_existing_replicated_fail( self, mock_create_cluster_reference, mock_update_cluster_status, mock_get_model_info, mock_replicate_volume, mock_retrieve_replication_settings, mock_get_default_volume_params, mock_get_create_account, mock_issue_api_request): mock_retrieve_replication_settings.return_value = 'Async' mock_get_default_volume_params.return_value = {'totalSize': 50} mock_get_create_account.return_value = self.fake_sfaccount mock_replicate_volume.side_effect = solidfire.SolidFireAPIException ctx = context.get_admin_context() type_fields = {'extra_specs': {'replication_enabled': ' True'}, 'id': fakes.get_fake_uuid()} vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) fake_provider_id = "%s %s %s" % ( self.fake_sfvol['volumeID'], fakes.FAKE_UUID, self.cluster_pairs[0]['uuid']) utc_now = timeutils.utcnow().isoformat() vol_fields = { 'id': fakes.FAKE_UUID, 'created_at': utc_now, 'volume_type': vol_type, 'volume_type_id': vol_type.id, 'provider_id': fake_provider_id } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.replication_enabled = True sfv.active_cluster['mvip'] = self.mvip sfv.active_cluster['svip'] = self.svip external_ref = {} self.assertRaises(solidfire.SolidFireAPIException, sfv.manage_existing, vol, external_ref) self.fake_sfvol['volumePairs'] = [{'remoteVolumeID': 26}] mock_issue_api_request.return_value = { 'result': {'volumes': [self.fake_sfvol]}} external_ref = {'source-id': 6, 'name': 'new-being-managed'} self.assertRaises(solidfire.SolidFireDriverException, sfv.manage_existing, vol, external_ref) mock_get_default_volume_params.return_value = {'totalSize': 50} self.fake_sfvol['volumePairs'] = [] mock_issue_api_request.return_value = { 'result': {'volumes': [self.fake_sfvol]}} self.assertRaises(solidfire.SolidFireAPIException, sfv.manage_existing, vol, external_ref) modify_attributes = {'uuid': vol.id, 'is_clone': 'False', 'os_imported_at': utc_now + "+00:00", 'old_name': 'new-being-managed'} modify_params1 = {'volumeID': self.fake_sfvol['volumeID'], 'attributes': modify_attributes} modify_params2 = {'volumeID': self.fake_sfvol['volumeID'], 'attributes': self.fake_sfvol['attributes']} calls = [mock.call('ListActiveVolumes', {'startVolumeID': self.fake_sfvol['volumeID'], 'limit': 1}), mock.call('ModifyVolume', modify_params1, version='5.0'), mock.call('ModifyVolume', modify_params2, version='5.0')] mock_issue_api_request.assert_has_calls(calls) mock_get_model_info.assert_not_called() mock_create_cluster_reference.assert_called_once() mock_update_cluster_status.assert_called_once() mock_replicate_volume.assert_called() mock_retrieve_replication_settings.assert_called_with(vol) mock_get_default_volume_params.assert_called_with(vol) mock_get_create_account.assert_called_with(vol.project_id) @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') @mock.patch.object(solidfire.SolidFireDriver, '_set_rep_by_volume_type') @mock.patch.object(solidfire.SolidFireDriver, '_retrieve_replication_settings') @mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params') @mock.patch.object(solidfire.SolidFireDriver, '_replicate_volume') @mock.patch.object(solidfire.SolidFireDriver, '_disable_replication') @mock.patch.object(solidfire.SolidFireDriver, '_set_qos_by_volume_type') def test_manage_existing_replicated( self, mock_set_qos_by_volume_type, mock_disable_replication, mock_replicate_volume, mock_get_default_volume_params, mock_retrieve_replication_settings, mock_set_rep_by_volume_type, mock_get_sf_volume, mock_get_sfaccount): mock_get_sf_volume.return_value = None mock_get_sfaccount.return_value = self.fake_sfaccount mock_retrieve_replication_settings.return_value = 'Async' ctx = context.get_admin_context() type_fields = {'extra_specs': {'replication_enabled': ' True'}, 'id': fakes.get_fake_uuid()} src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) fake_provider_id = "%s %s %s" % ( self.fake_sfvol['volumeID'], fakes.FAKE_UUID, self.cluster_pairs[0]['uuid']) utc_now = timeutils.utcnow().isoformat() vol_fields = { 'id': fakes.FAKE_UUID, 'created_at': utc_now, 'volume_type': src_vol_type, 'volume_type_id': src_vol_type.id, 'provider_id': fake_provider_id } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) dst_vol_type = fake_volume.fake_volume_type_obj(ctx) sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.replication_enabled = True sfv.cluster_pairs = self.cluster_pairs sfv.active_cluster['mvip'] = self.mvip sfv.active_cluster['svip'] = self.svip self.assertRaises(exception.VolumeNotFound, sfv.retype, ctx, vol, dst_vol_type, None, None) mock_get_sfaccount.assert_called_once_with(vol.project_id) mock_get_sf_volume.assert_called_once_with( vol.id, {'accountID': self.fake_sfaccount['accountID']}) mock_get_sfaccount.reset_mock() mock_get_sf_volume.reset_mock() expected = {"key": "value"} mock_get_sf_volume.return_value = self.fake_sfvol mock_replicate_volume.return_value = expected mock_set_rep_by_volume_type.side_effect = [src_vol_type, dst_vol_type] retyped, updates = sfv.retype(ctx, vol, dst_vol_type, None, None) self.assertDictEqual(expected, updates) mock_get_sfaccount.assert_called_once_with(vol.project_id) mock_get_sf_volume.assert_called_once_with( vol.id, {'accountID': self.fake_sfaccount['accountID']}) mock_get_default_volume_params.assert_called() mock_disable_replication.assert_not_called() mock_replicate_volume.assert_called_once() mock_retrieve_replication_settings.assert_called_once() mock_set_qos_by_volume_type.assert_called_once() mock_set_rep_by_volume_type.assert_called() @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_create_volume_for_migration(self, _mock_issue_api_request): _mock_issue_api_request.side_effect = self.fake_issue_api_request testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'b830b3c0-d1f0-11e1-9b23-1900200c9a77', 'volume_type_id': None, 'created_at': timeutils.utcnow(), 'migration_status': 'target:' 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} ctx = context.get_admin_context() testvol = fake_volume.fake_volume_obj(ctx, **testvol) fake_sfaccounts = [{'accountID': 5, 'targetSecret': 'shhhh', 'username': 'prefix-testprjid'}] def _fake_do_v_create(project_id, params): cvol = { 'name': 'UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'attributes': { 'uuid': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'migration_uuid': 'b830b3c0-d1f0-11e1-9b23-1900200c9a77' } } return cvol sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_sfaccounts_for_tenant', return_value=fake_sfaccounts), \ mock.patch.object(sfv, '_get_account_create_availability', return_value=fake_sfaccounts[0]), \ mock.patch.object(sfv, '_do_volume_create', side_effect=_fake_do_v_create): sf_vol_object = sfv.create_volume(testvol) self.assertEqual('a720b3c0-d1f0-11e1-9b23-0800200c9a66', sf_vol_object['attributes']['uuid']) self.assertEqual('b830b3c0-d1f0-11e1-9b23-1900200c9a77', sf_vol_object['attributes']['migration_uuid']) self.assertEqual('UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66', sf_vol_object['name']) def test_init_volume_mappings(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) vid_1 = 'c9125d6d-22ff-4cc3-974d-d4e350df9c91' vid_2 = '79883868-6933-47a1-a362-edfbf8d55a18' sid_1 = 'e3caa4fa-485e-45ca-970e-1d3e693a2520' project_1 = 'e6fb073c-11f0-4f4c-897c-90e7c7c4bcf8' project_2 = '4ff32607-305c-4a6b-a51a-0dd33124eecf' vrefs = [{'id': vid_1, 'project_id': project_1, 'provider_id': None}, {'id': vid_2, 'project_id': project_2, 'provider_id': 22}] snaprefs = [{'id': sid_1, 'project_id': project_1, 'provider_id': None, 'volume_id': vid_1}] sf_vols = [{'volumeID': 99, 'name': 'UUID-' + vid_1, 'accountID': 100}, {'volumeID': 22, 'name': 'UUID-' + vid_2, 'accountID': 200}] sf_snaps = [{'snapshotID': 1, 'name': 'UUID-' + sid_1, 'volumeID': 99}] def _fake_issue_api_req(method, params, version=0): if 'ListActiveVolumes' in method: return {'result': {'volumes': sf_vols}} if 'ListSnapshots' in method: return {'result': {'snapshots': sf_snaps}} with mock.patch.object(sfv, '_issue_api_request', side_effect=_fake_issue_api_req): volume_updates, snapshot_updates = sfv.update_provider_info( vrefs, snaprefs) self.assertEqual('99 100 53c8be1e-89e2-4f7f-a2e3-7cb84c47e0ec', volume_updates[0]['provider_id']) self.assertEqual(1, len(volume_updates)) self.assertEqual('1 99 53c8be1e-89e2-4f7f-a2e3-7cb84c47e0ec', snapshot_updates[0]['provider_id']) self.assertEqual(1, len(snapshot_updates)) def test_get_sf_volume_missing_attributes(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) test_name = "existing_volume" fake_response = {'result': { 'volumes': [{'volumeID': 5, 'name': test_name, 'accountID': 8, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "readWrite", 'status': "active", 'qos': None, 'iqn': test_name}]}} def _fake_issue_api_req(method, params, version=0, endpoint=None): return fake_response with mock.patch.object( sfv, '_issue_api_request', side_effect=_fake_issue_api_req): self.assertEqual(5, sfv._get_sf_volume(test_name, 8)['volumeID']) def test_sf_init_conn_with_vag(self): # Verify with the _enable_vag conf set that we correctly create a VAG. mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' 'solidfire:87hg.uuid-2cc06226-cc' '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '4096 4096', 'created_at': timeutils.utcnow(), 'provider_id': "1 1 1" } connector = {'initiator': 'iqn.2012-07.org.fake:01'} provider_id = testvol['provider_id'] vol_id = int(provider_id.split()[0]) vag_id = 1 with mock.patch.object(sfv, '_safe_create_vag', return_value=vag_id) as create_vag, \ mock.patch.object(sfv, '_add_volume_to_vag') as add_vol: sfv._sf_initialize_connection(testvol, connector) create_vag.assert_called_with(connector['initiator'], vol_id) add_vol.assert_called_with(vol_id, connector['initiator'], vag_id) def test_sf_term_conn_with_vag_rem_vag(self): # Verify we correctly remove an empty VAG on detach. mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' 'solidfire:87hg.uuid-2cc06226-cc' '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '4096 4096', 'created_at': timeutils.utcnow(), 'provider_id': "1 1 1", 'multiattach': False } connector = {'initiator': 'iqn.2012-07.org.fake:01'} vag_id = 1 vags = [{'attributes': {}, 'deletedVolumes': [], 'initiators': [connector['initiator']], 'name': 'fakeiqn', 'volumeAccessGroupID': vag_id, 'volumes': [1], 'virtualNetworkIDs': []}] with mock.patch.object(sfv, '_get_vags_by_name', return_value=vags), \ mock.patch.object(sfv, '_remove_vag') as rem_vag: sfv._sf_terminate_connection(testvol, connector, False) rem_vag.assert_called_with(vag_id) def test_sf_term_conn_with_vag_rem_vol(self): # Verify we correctly remove a the volume from a non-empty VAG. mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' 'solidfire:87hg.uuid-2cc06226-cc' '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '4096 4096', 'created_at': timeutils.utcnow(), 'provider_id': "1 1 1", 'multiattach': False } provider_id = testvol['provider_id'] vol_id = int(provider_id.split()[0]) connector = {'initiator': 'iqn.2012-07.org.fake:01'} vag_id = 1 vags = [{'attributes': {}, 'deletedVolumes': [], 'initiators': [connector['initiator']], 'name': 'fakeiqn', 'volumeAccessGroupID': vag_id, 'volumes': [1, 2], 'virtualNetworkIDs': []}] with mock.patch.object(sfv, '_get_vags_by_name', return_value=vags), \ mock.patch.object(sfv, '_remove_volume_from_vag') as rem_vag: sfv._sf_terminate_connection(testvol, connector, False) rem_vag.assert_called_with(vol_id, vag_id) def test_sf_term_conn_without_connector(self): # Verify we correctly force the deletion of a volume. mod_conf = self.configuration mod_conf.sf_enable_vag = True sfv = solidfire.SolidFireDriver(configuration=mod_conf) testvol = {'project_id': 'testprjid', 'name': 'testvol', 'size': 1, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66', 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 iqn.2010-01.com.' 'solidfire:87hg.uuid-2cc06226-cc' '74-4cb7-bd55-14aed659a0cc.4060 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '4096 4096', 'created_at': timeutils.utcnow(), 'provider_id': "1 1 1", 'multiattach': False } provider_id = testvol['provider_id'] vol_id = int(provider_id.split()[0]) vag_id = 1 vags = [{'attributes': {}, 'deletedVolumes': [], 'initiators': ['iqn.2012-07.org.fake:01'], 'name': 'fakeiqn', 'volumeAccessGroupID': vag_id, 'volumes': [1, 2], 'virtualNetworkIDs': []}] with mock.patch.object(sfv, '_get_vags_by_volume', return_value=vags), \ mock.patch.object(sfv, '_remove_volume_from_vags') as rem_vags: sfv._sf_terminate_connection(testvol, None, False) rem_vags.assert_called_with(vol_id) def test_safe_create_vag_simple(self): # Test the sunny day call straight into _create_vag. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'fake_iqn' vol_id = 1 with mock.patch.object(sfv, '_get_vags_by_name', return_value=[]), \ mock.patch.object(sfv, '_create_vag') as mock_create_vag: sfv._safe_create_vag(iqn, vol_id) mock_create_vag.assert_called_with(iqn, vol_id) def test_safe_create_vag_matching_vag(self): # Vag exists, resuse. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vags = [{'attributes': {}, 'deletedVolumes': [], 'initiators': [iqn], 'name': iqn, 'volumeAccessGroupID': 1, 'volumes': [1, 2], 'virtualNetworkIDs': []}] with mock.patch.object(sfv, '_get_vags_by_name', return_value=vags), \ mock.patch.object(sfv, '_create_vag') as create_vag, \ mock.patch.object(sfv, '_add_initiator_to_vag') as add_iqn: vag_id = sfv._safe_create_vag(iqn, None) self.assertEqual(vag_id, vags[0]['volumeAccessGroupID']) create_vag.assert_not_called() add_iqn.assert_not_called() def test_safe_create_vag_reuse_vag(self): # Reuse a matching vag. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vags = [{'attributes': {}, 'deletedVolumes': [], 'initiators': [], 'name': iqn, 'volumeAccessGroupID': 1, 'volumes': [1, 2], 'virtualNetworkIDs': []}] vag_id = vags[0]['volumeAccessGroupID'] with mock.patch.object(sfv, '_get_vags_by_name', return_value=vags), \ mock.patch.object(sfv, '_add_initiator_to_vag', return_value=vag_id) as add_init: res_vag_id = sfv._safe_create_vag(iqn, None) self.assertEqual(res_vag_id, vag_id) add_init.assert_called_with(iqn, vag_id) def test_create_vag_iqn_fail(self): # Attempt to create a VAG with an already in-use initiator. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xExceededLimit: {}'.format(params['initiators'][0]) raise solidfire.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request), \ mock.patch.object(sfv, '_safe_create_vag', return_value=vag_id) as create_vag, \ mock.patch.object(sfv, '_purge_vags') as purge_vags: res_vag_id = sfv._create_vag(iqn, vol_id) self.assertEqual(res_vag_id, vag_id) create_vag.assert_called_with(iqn, vol_id) purge_vags.assert_not_called() def test_create_vag_limit_fail(self): # Attempt to create a VAG with VAG limit reached. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xExceededLimit' raise solidfire.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request), \ mock.patch.object(sfv, '_safe_create_vag', return_value=vag_id) as create_vag, \ mock.patch.object(sfv, '_purge_vags') as purge_vags: res_vag_id = sfv._create_vag(iqn, vol_id) self.assertEqual(res_vag_id, vag_id) create_vag.assert_called_with(iqn, vol_id) purge_vags.assert_called_with() def test_add_initiator_duplicate(self): # Thrown exception should yield vag_id. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 def throw_request(method, params, version): msg = 'xAlreadyInVolumeAccessGroup' raise solidfire.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request): res_vag_id = sfv._add_initiator_to_vag(iqn, vag_id) self.assertEqual(vag_id, res_vag_id) def test_add_initiator_missing_vag(self): # Thrown exception should result in create_vag call. sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 def throw_request(method, params, version): msg = 'xVolumeAccessGroupIDDoesNotExist' raise solidfire.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request), \ mock.patch.object(sfv, '_safe_create_vag', return_value=vag_id) as mock_create_vag: res_vag_id = sfv._add_initiator_to_vag(iqn, vag_id) self.assertEqual(vag_id, res_vag_id) mock_create_vag.assert_called_with(iqn) def test_add_volume_to_vag_duplicate(self): # Thrown exception should yield vag_id sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xAlreadyInVolumeAccessGroup' raise solidfire.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request): res_vag_id = sfv._add_volume_to_vag(vol_id, iqn, vag_id) self.assertEqual(res_vag_id, vag_id) def test_add_volume_to_vag_missing_vag(self): # Thrown exception should yield vag_id sfv = solidfire.SolidFireDriver(configuration=self.configuration) iqn = 'TESTIQN' vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xVolumeAccessGroupIDDoesNotExist' raise solidfire.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request), \ mock.patch.object(sfv, '_safe_create_vag', return_value=vag_id) as mock_create_vag: res_vag_id = sfv._add_volume_to_vag(vol_id, iqn, vag_id) self.assertEqual(res_vag_id, vag_id) mock_create_vag.assert_called_with(iqn, vol_id) def test_remove_volume_from_vag_missing_volume(self): # Volume not in VAG, throws. sfv = solidfire.SolidFireDriver(configuration=self.configuration) vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xNotInVolumeAccessGroup' raise solidfire.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request): sfv._remove_volume_from_vag(vol_id, vag_id) def test_remove_volume_from_vag_missing_vag(self): # Volume not in VAG, throws. sfv = solidfire.SolidFireDriver(configuration=self.configuration) vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xVolumeAccessGroupIDDoesNotExist' raise solidfire.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request): sfv._remove_volume_from_vag(vol_id, vag_id) def test_remove_volume_from_vag_unknown_exception(self): # Volume not in VAG, throws. sfv = solidfire.SolidFireDriver(configuration=self.configuration) vag_id = 1 vol_id = 42 def throw_request(method, params, version): msg = 'xUnknownException' raise solidfire.SolidFireAPIException(message=msg) with mock.patch.object(sfv, '_issue_api_request', side_effect=throw_request): self.assertRaises(solidfire.SolidFireAPIException, sfv._remove_volume_from_vag, vol_id, vag_id) def test_remove_volume_from_vags(self): # Remove volume from several VAGs. sfv = solidfire.SolidFireDriver(configuration=self.configuration) vol_id = 42 vags = [{'volumeAccessGroupID': 1, 'volumes': [vol_id]}, {'volumeAccessGroupID': 2, 'volumes': [vol_id, 43]}] with mock.patch.object(sfv, '_get_vags_by_volume', return_value=vags), \ mock.patch.object(sfv, '_remove_volume_from_vag') as rem_vol: sfv._remove_volume_from_vags(vol_id) self.assertEqual(len(vags), rem_vol.call_count) def test_purge_vags(self): # Remove subset of VAGs. sfv = solidfire.SolidFireDriver(configuration=self.configuration) vags = [{'initiators': [], 'volumeAccessGroupID': 1, 'deletedVolumes': [], 'volumes': [], 'attributes': {'openstack': True}}, {'initiators': [], 'volumeAccessGroupID': 2, 'deletedVolumes': [], 'volumes': [], 'attributes': {'openstack': False}}, {'initiators': [], 'volumeAccessGroupID': 3, 'deletedVolumes': [1], 'volumes': [], 'attributes': {'openstack': True}}, {'initiators': [], 'volumeAccessGroupID': 4, 'deletedVolumes': [], 'volumes': [1], 'attributes': {'openstack': True}}, {'initiators': ['fakeiqn'], 'volumeAccessGroupID': 5, 'deletedVolumes': [], 'volumes': [], 'attributes': {'openstack': True}}] with mock.patch.object(sfv, '_base_get_vags', return_value=vags), \ mock.patch.object(sfv, '_remove_vag') as rem_vag: sfv._purge_vags() # Of the vags provided there is only one that is valid for purge # based on the limits of no initiators, volumes, deleted volumes, # and features the openstack attribute. self.assertEqual(1, rem_vag.call_count) rem_vag.assert_called_with(1) def test_sf_create_group_snapshot(self): # Sunny day group snapshot creation. sfv = solidfire.SolidFireDriver(configuration=self.configuration) name = 'great_gsnap_name' sf_volumes = [{'volumeID': 1}, {'volumeID': 42}] expected_params = {'name': name, 'volumes': [1, 42]} fake_result = {'result': 'contrived_test'} with mock.patch.object(sfv, '_issue_api_request', return_value=fake_result) as fake_api: res = sfv._sf_create_group_snapshot(name, sf_volumes) self.assertEqual('contrived_test', res) fake_api.assert_called_with('CreateGroupSnapshot', expected_params, version='7.0') def test_group_snapshot_creator_sunny(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) gsnap_name = 'great_gsnap_name' prefix = sfv.configuration.sf_volume_prefix vol_uuids = ['one', 'two', 'three'] active_vols = [{'name': prefix + 'one'}, {'name': prefix + 'two'}, {'name': prefix + 'three'}] with mock.patch.object(sfv, '_get_all_active_volumes', return_value=active_vols), \ mock.patch.object(sfv, '_sf_create_group_snapshot', return_value=None) as create: sfv._group_snapshot_creator(gsnap_name, vol_uuids) create.assert_called_with(gsnap_name, active_vols) def test_group_snapshot_creator_rainy(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) gsnap_name = 'great_gsnap_name' prefix = sfv.configuration.sf_volume_prefix vol_uuids = ['one', 'two', 'three'] active_vols = [{'name': prefix + 'one'}, {'name': prefix + 'two'}] with mock.patch.object(sfv, '_get_all_active_volumes', return_value=active_vols): self.assertRaises(solidfire.SolidFireDriverException, sfv._group_snapshot_creator, gsnap_name, vol_uuids) def test_create_temp_group_snapshot(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) cg = {'id': 'great_gsnap_name'} prefix = sfv.configuration.sf_volume_prefix tmp_name = prefix + cg['id'] + '-tmp' vols = [{'id': 'one'}, {'id': 'two'}, {'id': 'three'}] with mock.patch.object(sfv, '_group_snapshot_creator', return_value=None) as create: sfv._create_temp_group_snapshot(cg, vols) create.assert_called_with(tmp_name, ['one', 'two', 'three']) def test_list_group_snapshots(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) res = {'result': {'groupSnapshots': 'a_thing'}} with mock.patch.object(sfv, '_issue_api_request', return_value=res): result = sfv._list_group_snapshots() self.assertEqual('a_thing', result) def test_get_group_snapshot_by_name(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) fake_snaps = [{'name': 'a_fantastic_name'}] with mock.patch.object(sfv, '_list_group_snapshots', return_value=fake_snaps): result = sfv._get_group_snapshot_by_name('a_fantastic_name') self.assertEqual(fake_snaps[0], result) def test_delete_group_snapshot(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) gsnap_id = 1 with mock.patch.object(sfv, '_issue_api_request') as api_req: sfv._delete_group_snapshot(gsnap_id) api_req.assert_called_with('DeleteGroupSnapshot', {'groupSnapshotID': gsnap_id}, version='7.0') def test_delete_cgsnapshot_by_name(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) fake_gsnap = {'groupSnapshotID': 42} with mock.patch.object(sfv, '_get_group_snapshot_by_name', return_value=fake_gsnap), \ mock.patch.object(sfv, '_delete_group_snapshot') as del_stuff: sfv._delete_cgsnapshot_by_name('does not matter') del_stuff.assert_called_with(fake_gsnap['groupSnapshotID']) def test_delete_cgsnapshot_by_name_rainy(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_group_snapshot_by_name', return_value=None): self.assertRaises(solidfire.SolidFireDriverException, sfv._delete_cgsnapshot_by_name, 'does not matter') def test_find_linked_snapshot(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_snap = {'members': [{'volumeID': 1}, {'volumeID': 2}]} source_vol = {'volumeID': 1} with mock.patch.object(sfv, '_get_sf_volume', return_value=source_vol) as get_vol: res = sfv._find_linked_snapshot('fake_uuid', group_snap) self.assertEqual(source_vol, res) get_vol.assert_called_with('fake_uuid') def test_create_consisgroup_from_src_cgsnapshot(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) ctxt = None group = {} volumes = [{'id': 'one'}, {'id': 'two'}, {'id': 'three'}] cgsnapshot = {'id': 'great_uuid'} snapshots = [{'id': 'snap_id_1', 'volume_id': 'one'}, {'id': 'snap_id_2', 'volume_id': 'two'}, {'id': 'snap_id_3', 'volume_id': 'three'}] source_cg = None source_vols = None group_snap = {} name = sfv.configuration.sf_volume_prefix + cgsnapshot['id'] kek = (None, None, {}) with mock.patch.object(sfv, '_get_group_snapshot_by_name', return_value=group_snap) as get_snap, \ mock.patch.object(sfv, '_find_linked_snapshot'), \ mock.patch.object(sfv, '_do_clone_volume', return_value=kek): model, vol_models = sfv._create_consistencygroup_from_src( ctxt, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) get_snap.assert_called_with(name) self.assertEqual( {'status': fields.GroupStatus.AVAILABLE}, model) def test_create_consisgroup_from_src_source_cg(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) ctxt = None group = {} volumes = [{'id': 'one', 'source_volid': 'source_one'}, {'id': 'two', 'source_volid': 'source_two'}, {'id': 'three', 'source_volid': 'source_three'}] cgsnapshot = {'id': 'great_uuid'} snapshots = None source_cg = {'id': 'fantastic_cg'} source_vols = [1, 2, 3] source_snap = None group_snap = {} kek = (None, None, {}) with mock.patch.object(sfv, '_create_temp_group_snapshot', return_value=source_cg['id']), \ mock.patch.object(sfv, '_get_group_snapshot_by_name', return_value=group_snap) as get_snap, \ mock.patch.object(sfv, '_find_linked_snapshot', return_value=source_snap), \ mock.patch.object(sfv, '_do_clone_volume', return_value=kek), \ mock.patch.object(sfv, '_delete_cgsnapshot_by_name'): model, vol_models = sfv._create_consistencygroup_from_src( ctxt, group, volumes, cgsnapshot, snapshots, source_cg, source_vols) get_snap.assert_called_with(source_cg['id']) self.assertEqual( {'status': fields.GroupStatus.AVAILABLE}, model) def test_create_cgsnapshot(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) ctxt = None cgsnapshot = {'id': 'acceptable_cgsnap_id'} snapshots = [{'volume_id': 'one'}, {'volume_id': 'two'}] pfx = sfv.configuration.sf_volume_prefix active_vols = [{'name': pfx + 'one'}, {'name': pfx + 'two'}] with mock.patch.object(sfv, '_get_all_active_volumes', return_value=active_vols), \ mock.patch.object(sfv, '_sf_create_group_snapshot') as create_gsnap: sfv._create_cgsnapshot(ctxt, cgsnapshot, snapshots) create_gsnap.assert_called_with(pfx + cgsnapshot['id'], active_vols) def test_create_cgsnapshot_rainy(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) ctxt = None cgsnapshot = {'id': 'acceptable_cgsnap_id'} snapshots = [{'volume_id': 'one'}, {'volume_id': 'two'}] pfx = sfv.configuration.sf_volume_prefix active_vols = [{'name': pfx + 'one'}] with mock.patch.object(sfv, '_get_all_active_volumes', return_value=active_vols), \ mock.patch.object(sfv, '_sf_create_group_snapshot'): self.assertRaises(solidfire.SolidFireDriverException, sfv._create_cgsnapshot, ctxt, cgsnapshot, snapshots) def test_create_vol_from_cgsnap(self): # cgsnaps on the backend yield numerous identically named snapshots. # create_volume_from_snapshot now searches for the correct snapshot. sfv = solidfire.SolidFireDriver(configuration=self.configuration) source = {'group_snapshot_id': 'typical_cgsnap_id', 'volume_id': 'typical_vol_id', 'id': 'no_id_4_u'} name = (self.configuration.sf_volume_prefix + source.get('group_snapshot_id')) with mock.patch.object(sfv, '_get_group_snapshot_by_name', return_value={}) as get, \ mock.patch.object(sfv, '_create_clone_from_sf_snapshot', return_value='model'): result = sfv.create_volume_from_snapshot({}, source) get.assert_called_once_with(name) self.assertEqual('model', result) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_cg(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = True group = mock.MagicMock() result = sfv.create_group(self.ctxt, group) self.assertEqual(result, {'status': fields.GroupStatus.AVAILABLE}) group_cg_test.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_snap_cg(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = True cgsnapshot = fake_group_snapshot.fake_group_snapshot_obj( mock.MagicMock()) snapshots = fake_snapshot.fake_snapshot_obj(mock.MagicMock()) with mock.patch.object(sfv, '_delete_cgsnapshot', return_value={}) as _del_mock: model_update = sfv.delete_group_snapshot(self.ctxt, cgsnapshot, snapshots) _del_mock.assert_called_once_with(self.ctxt, cgsnapshot, snapshots) self.assertEqual({}, model_update) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_snap(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = False cgsnapshot = fake_group_snapshot.fake_group_snapshot_obj( mock.MagicMock()) snapshots = fake_snapshot.fake_snapshot_obj(mock.MagicMock()) with mock.patch.object(sfv, '_delete_cgsnapshot', return_value={}) as _del_mock: self.assertRaises(NotImplementedError, sfv.delete_group_snapshot, self.ctxt, cgsnapshot, snapshots) _del_mock.assert_not_called() @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_rainy(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = False group = mock.MagicMock() self.assertRaises(NotImplementedError, sfv.create_group, self.ctxt, group) group_cg_test.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_from_src_rainy(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = False group = mock.MagicMock() volumes = [mock.MagicMock()] self.assertRaises(NotImplementedError, sfv.create_group_from_src, self.ctxt, group, volumes) group_cg_test.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_from_src_cg(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = True group = mock.MagicMock() volumes = [mock.MagicMock()] ret = 'things' with mock.patch.object(sfv, '_create_consistencygroup_from_src', return_value=ret): result = sfv.create_group_from_src(self.ctxt, group, volumes) self.assertEqual(ret, result) group_cg_test.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_snapshot_rainy(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = False group_snapshot = mock.MagicMock() snapshots = [mock.MagicMock()] self.assertRaises(NotImplementedError, sfv.create_group_snapshot, self.ctxt, group_snapshot, snapshots) group_cg_test.assert_called_once_with(group_snapshot) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_snapshot(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = True group_snapshot = mock.MagicMock() snapshots = [mock.MagicMock()] ret = 'things' with mock.patch.object(sfv, '_create_cgsnapshot', return_value=ret): result = sfv.create_group_snapshot(self.ctxt, group_snapshot, snapshots) self.assertEqual(ret, result) group_cg_test.assert_called_once_with(group_snapshot) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_rainy(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = False group = mock.MagicMock() volumes = [mock.MagicMock()] self.assertRaises(NotImplementedError, sfv.delete_group, self.ctxt, group, volumes) group_cg_test.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = True group = mock.MagicMock() volumes = [mock.MagicMock()] ret = 'things' with mock.patch.object(sfv, '_delete_consistencygroup', return_value=ret): result = sfv.delete_group(self.ctxt, group, volumes) self.assertEqual(ret, result) group_cg_test.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_update_group_rainy(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = False group = mock.MagicMock() self.assertRaises(NotImplementedError, sfv.update_group, self.ctxt, group) group_cg_test.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_update_group(self, group_cg_test): sfv = solidfire.SolidFireDriver(configuration=self.configuration) group_cg_test.return_value = True group = mock.MagicMock() ret = 'things' with mock.patch.object(sfv, '_update_consistencygroup', return_value=ret): result = sfv.update_group(self.ctxt, group) self.assertEqual(ret, result) group_cg_test.assert_called_once_with(group) def test_getattr_failure(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) try: sfv.foo() self.fail("Should have thrown Error") except Exception: pass @data('Async', 'Sync', 'SnapshotsOnly') @mock.patch.object(volume_types, 'get_volume_type') def test_set_rep_by_volume_type(self, mode, mock_get_volume_type): mock_get_volume_type.return_value = { 'name': 'sf-1', 'deleted': False, 'created_at': '2014-02-06 04:58:11', 'updated_at': None, 'extra_specs': {'replication_enabled': ' True', 'solidfire:replication_mode': mode}, 'deleted_at': None, 'id': '290edb2a-f5ea-11e5-9ce9-5e5517507c66'} rep_opts = {} sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.cluster_pairs = self.cluster_pairs ctxt = None type_id = '290edb2a-f5ea-11e5-9ce9-5e5517507c66' rep_opts['rep_type'] = mode self.assertEqual(rep_opts, sfv._set_rep_by_volume_type(ctxt, type_id)) mock_get_volume_type.assert_called() def test_replicate_volume(self): replication_status = fields.ReplicationStatus.ENABLED fake_vol = {'project_id': 1, 'volumeID': 1, 'size': 1} params = {'attributes': {}} rep_info = {'rep_type': 'Async'} sf_account = {'initiatorSecret': 'shhh', 'targetSecret': 'dont-tell'} model_update = {'provider_id': '1 2 xxxx'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.cluster_pairs = self.cluster_pairs with mock.patch.object(sfv, '_issue_api_request', self.fake_issue_api_request), \ mock.patch.object(sfv, '_get_sfaccount_by_name', return_value={'accountID': 1}), \ mock.patch.object(sfv, '_do_volume_create', return_value=model_update): self.assertEqual({'replication_status': replication_status}, sfv._replicate_volume(fake_vol, params, sf_account, rep_info)) def test_pythons_try_except(self): def _fake_retrieve_rep(vol): raise solidfire.SolidFireAPIException fake_type = {'extra_specs': {}} sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(sfv, '_get_create_account', return_value={'accountID': 5}), \ mock.patch.object(sfv, '_retrieve_qos_setting', return_value=None), \ mock.patch.object(sfv, '_do_volume_create', return_value={'provider_id': '1 2 xxxx'}), \ mock.patch.object(volume_types, 'get_volume_type', return_value=fake_type), \ mock.patch.object(sfv, '_retrieve_replication_settings', side_effect=_fake_retrieve_rep): self.assertRaises(solidfire.SolidFireAPIException, sfv.create_volume, self.mock_volume) def test_extract_sf_attributes_from_extra_specs(self): type_id = '290edb2a-f5ea-11e5-9ce9-5e5517507c66' fake_type = {'extra_specs': {'SFAttribute:foo': 'bar', 'SFAttribute:biz': 'baz'}} expected = [{'foo': 'bar'}, {'biz': 'baz'}] sfv = solidfire.SolidFireDriver(configuration=self.configuration) with mock.patch.object(volume_types, 'get_volume_type', return_value=fake_type): res = sfv._extract_sf_attributes_from_extra_specs(type_id) self.assertCountEqual(expected, res) def test_build_endpoint_with_kwargs(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) expected_ep = {'passwd': 'nunyabiz', 'port': 888, 'url': 'https://1.2.3.4:888', 'svip': None, 'mvip': '1.2.3.4', 'login': 'JohnWayne'} ep = sfv._build_endpoint_info(mvip='1.2.3.4', login='JohnWayne', password='nunyabiz', port=888) self.assertEqual(expected_ep, ep) # Make sure we pick up defaults for those not specified expected_ep = {'passwd': 'nunyabiz', 'url': 'https://1.2.3.4:443', 'svip': None, 'mvip': '1.2.3.4', 'login': 'admin', 'port': 443} ep = sfv._build_endpoint_info(mvip='1.2.3.4', password='nunyabiz') self.assertEqual(expected_ep, ep) # Make sure we add brackets for IPv6 MVIP expected_ep = {'passwd': 'nunyabiz', 'url': 'https://[ff00::00]:443', 'svip': None, 'mvip': 'ff00::00', 'login': 'admin', 'port': 443} ep = sfv._build_endpoint_info(mvip='ff00::00', password='nunyabiz') self.assertEqual(expected_ep, ep) def test_generate_random_string(self): sfv = solidfire.SolidFireDriver(configuration=self.configuration) a = sfv._generate_random_string(12) self.assertEqual(len(a), 12) self.assertIsNotNone(re.match(r'[A-Z0-9]{12}', a), a) @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_snapshots') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_revert_to_snapshot_success(self, mock_issue_api_request, mock_get_sf_snapshots, mock_get_sf_volume, mock_get_sfaccount): mock_issue_api_request.side_effect = self.fake_issue_api_request mock_get_sfaccount.return_value = self.fake_sfaccount mock_get_sf_volume.return_value = self.fake_sfvol mock_get_sf_snapshots.return_value = self.fake_sfsnaps expected_params = {'accountID': 25, 'volumeID': 6, 'snapshotID': '5', 'saveCurrentState': 'false'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) # Success path sfv.revert_to_snapshot(self.ctxt, self.vol, self.snap) mock_issue_api_request.assert_called_with( 'RollbackToSnapshot', expected_params, version='6.0') @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_snapshots') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_revert_to_snapshot_fail_vol_not_found( self, mock_issue_api_request, mock_get_sf_snapshots, mock_get_sf_volume, mock_get_sfaccount): mock_issue_api_request.side_effect = self.fake_issue_api_request mock_get_sfaccount.return_value = self.fake_sfaccount mock_get_sf_volume.return_value = None mock_get_sf_snapshots.return_value = [] sfv = solidfire.SolidFireDriver(configuration=self.configuration) # Volume not found mock_get_sf_volume.return_value = None self.assertRaises(exception.VolumeNotFound, sfv.revert_to_snapshot, self.ctxt, self.vol, self.snap) @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_snapshots') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_revert_to_snapshot_fail_snap_not_found( self, mock_issue_api_request, mock_get_sf_snapshots, mock_get_sf_volume, mock_get_sfaccount): mock_issue_api_request.side_effect = self.fake_issue_api_request mock_get_sfaccount.return_value = self.fake_sfaccount mock_get_sf_volume.return_value = self.fake_sfvol mock_get_sf_snapshots.return_value = [] sfv = solidfire.SolidFireDriver(configuration=self.configuration) # Snapshot not found mock_get_sf_snapshots.return_value = [] self.assertRaises(exception.VolumeSnapshotNotFound, sfv.revert_to_snapshot, self.ctxt, self.vol, self.snap) @mock.patch.object(solidfire.SolidFireDriver, '_get_create_account') @mock.patch.object(solidfire.SolidFireDriver, '_set_cluster_pairs') @mock.patch.object(solidfire.SolidFireDriver, '_snapshot_discovery') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_get_model_info') @mock.patch.object(solidfire.SolidFireDriver, '_update_attributes') @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') @mock.patch.object(solidfire.SolidFireDriver, '_set_cluster_pairs') @mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params') @mock.patch.object(solidfire.SolidFireDriver, '_retrieve_replication_settings') @mock.patch.object(solidfire.SolidFireDriver, '_replicate_volume') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') def test_do_clone_volume_rep_disabled(self, mock_create_cluster_reference, mock_replicate_volume, mock_retrieve_replication_settings, mock_get_default_volume_params, mock_set_cluster_pairs, mock_update_cluster_status, mock_update_attributes, mock_get_model_info, mock_issue_api_request, mock_snapshot_discovery, mock_test_set_cluster_pairs, mock_get_create_account): all_mocks = locals() def reset_mocks(): for mk in all_mocks.values(): if isinstance(mk, mock.MagicMock): mk.reset_mock() sf_volume_params = {'volumeID': 1, 'snapshotID': 2, 'newSize': 3} mock_snapshot_discovery.return_value = (sf_volume_params, True, self.fake_sfvol) mock_get_create_account.return_value = self.fake_sfaccount ctx = context.get_admin_context() vol_fields = {'updated_at': timeutils.utcnow(), 'created_at': timeutils.utcnow()} src_vol = fake_volume.fake_volume_obj(ctx) dst_vol = fake_volume.fake_volume_obj(ctx, **vol_fields) mock_create_cluster_reference.return_value = { 'mvip': self.mvip, 'svip': self.svip} self.configuration.sf_volume_clone_timeout = 1 sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.replication_enabled = False reset_mocks() mock_issue_api_request.return_value = { 'error': {'code': 000, 'name': 'DummyError', 'message': 'This is a fake error response'}, 'id': 1} self.assertRaises(solidfire.SolidFireAPIException, sfv._do_clone_volume, src_vol.id, dst_vol, sf_src_snap=self.fake_sfsnaps[0]) clone_vol_params = { 'snapshotID': self.fake_sfsnaps[0]['snapshotID'], 'volumeID': self.fake_sfsnaps[0]['volumeID'], 'newSize': dst_vol.size * units.Gi, 'name': '%(prefix)s%(id)s' % { 'prefix': self.configuration.sf_volume_prefix, 'id': dst_vol.id}, 'newAccountID': self.fake_sfaccount['accountID']} mock_get_create_account.assert_called_with(dst_vol.project_id) mock_issue_api_request.assert_called_once_with( 'CloneVolume', clone_vol_params, version='6.0') mock_test_set_cluster_pairs.assert_not_called() mock_update_attributes.assert_not_called() mock_get_model_info.assert_not_called() mock_snapshot_discovery.assert_not_called() reset_mocks() mock_issue_api_request.side_effect = self.fake_issue_api_request mock_get_default_volume_params.return_value = {} mock_get_model_info.return_value = None self.assertRaises(solidfire.SolidFireAPIException, sfv._do_clone_volume, src_vol.id, dst_vol, sf_src_snap=self.fake_sfsnaps[0]) mock_get_create_account.assert_called_with(dst_vol.project_id) calls = [mock.call('CloneVolume', clone_vol_params, version='6.0'), mock.call('ModifyVolume', {'volumeID': 6})] mock_issue_api_request.assert_has_calls(calls) mock_test_set_cluster_pairs.assert_not_called() mock_update_attributes.assert_not_called() mock_get_model_info.assert_called() mock_snapshot_discovery.assert_not_called() reset_mocks() mock_retrieve_replication_settings.return_value = 'Async' update = {'replication_status': fields.ReplicationStatus.ENABLED} mock_replicate_volume.side_effect = solidfire.SolidFireDriverException mock_update_attributes.return_value = {'result': {}, 'id': 1} mock_get_model_info.return_value = { 'provider_location': '1.1.1.1 iqn 0', 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2c76370d66b ' '2FE0CQ8J196R', 'provider_id': '%s %s cluster-id-01' % ( self.fake_sfvol['volumeID'], self.fake_sfaccount['accountID']) } data, account, updates = sfv._do_clone_volume( src_vol.id, dst_vol, sf_src_snap=self.fake_sfsnaps[0]) self.assertEqual({'result': {}, 'id': 1}, data) self.assertEqual(25, account['accountID']) self.assertEqual(self.fake_sfvol['volumeID'], int(updates['provider_id'].split()[0])) mock_get_create_account.assert_called_with(dst_vol.project_id) calls = [mock.call('CloneVolume', clone_vol_params, version='6.0'), mock.call('ModifyVolume', {'volumeID': 6})] mock_issue_api_request.assert_has_calls(calls) mock_test_set_cluster_pairs.assert_not_called() mock_update_attributes.assert_not_called() mock_get_model_info.assert_called_once() mock_snapshot_discovery.assert_not_called() @mock.patch.object(solidfire.SolidFireDriver, '_get_create_account') @mock.patch.object(solidfire.SolidFireDriver, '_retrieve_qos_setting') @mock.patch.object(solidfire.SolidFireDriver, '_extract_sf_attributes_from_extra_specs') def test_get_default_volume_params( self, mock_extract_sf_attributes_from_extra_specs, mock_retrieve_qos_setting, mock_get_create_account): mock_extract_sf_attributes_from_extra_specs.return_value = [{ 'key1': 'value1', 'key2': 'value2' }] mock_retrieve_qos_setting.return_value = None mock_get_create_account.return_value = self.fake_sfaccount ctx = context.get_admin_context() type_fields = {'extra_specs': {'replication_enabled': ' True'}} vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) utc_now = timeutils.utcnow().isoformat() vol_fields = { 'id': fakes.FAKE_UUID, 'created_at': utc_now, 'volume_type': vol_type, 'volume_type_id': vol_type.id } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) vol_name = '%s%s' % (self.configuration.sf_volume_prefix, vol.id) expected_attr = { 'uuid': vol.id, 'is_clone': False, 'created_at': utc_now + "+00:00", 'cinder-name': vol.get('display_name', ""), 'key1': 'value1', 'key2': 'value2', } expected_params = { 'name': vol_name, 'accountID': self.fake_sfaccount['accountID'], 'sliceCount': 1, 'totalSize': int(vol.size * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': expected_attr, 'qos': None } sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.replication_enabled = True params = sfv._get_default_volume_params(vol, False) self.assertDictEqual(expected_params, params) mock_extract_sf_attributes_from_extra_specs.assert_called() mock_retrieve_qos_setting.assert_called() mock_get_create_account.assert_called() @mock.patch.object(solidfire.SolidFireDriver, '_get_sfvol_by_cinder_vref') def test_disable_replication_fail(self, mock_get_sfvol_by_cinder_vref): self.fake_sfvol['volumePairs'] = [] mock_get_sfvol_by_cinder_vref.return_value = self.fake_sfvol ctx = context.get_admin_context() utc_now = timeutils.utcnow().isoformat() vol_fields = { 'id': f_uuid[0], 'created_at': utc_now } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.replication_enabled = True sfv.cluster_pairs = self.cluster_pairs expected = {'replication_status': fields.ReplicationStatus.DISABLED} updates = sfv._disable_replication(vol) self.assertDictEqual(expected, updates) @mock.patch.object(solidfire.SolidFireDriver, '_get_sfvol_by_cinder_vref') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') def test_disable_replication(self, mock_create_cluster_reference, mock_issue_api_request, mock_get_sfvol_by_cinder_vref): mock_create_cluster_reference.return_value = { 'mvip': self.mvip, 'svip': self.svip} self.fake_sfvol['volumePairs'] = [{"remoteVolumeID": 26}] mock_get_sfvol_by_cinder_vref.return_value = self.fake_sfvol ctx = context.get_admin_context() utc_now = timeutils.utcnow().isoformat() vol_fields = { 'id': f_uuid[0], 'created_at': utc_now } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.replication_enabled = True sfv.cluster_pairs = self.cluster_pairs sfv.active_cluster['mvip'] = self.mvip sfv.active_cluster['svip'] = self.svip expected = {'replication_status': fields.ReplicationStatus.DISABLED} mock_issue_api_request.reset_mock() updates = sfv._disable_replication(vol) self.assertDictEqual(expected, updates) expected = [ mock.call("RemoveVolumePair", {'volumeID': self.fake_sfvol['volumeID']}, '8.0'), mock.call("RemoveVolumePair", {'volumeID': 26}, '8.0', endpoint=sfv.cluster_pairs[0]['endpoint']), mock.call("DeleteVolume", {'volumeID': 26}, endpoint=sfv.cluster_pairs[0]['endpoint']), mock.call("PurgeDeletedVolume", {'volumeID': 26}, endpoint=sfv.cluster_pairs[0]['endpoint']) ] mock_issue_api_request.assert_has_calls(expected) mock_create_cluster_reference.assert_called() mock_get_sfvol_by_cinder_vref.assert_called() @mock.patch.object(solidfire.SolidFireDriver, '_set_cluster_pairs') @mock.patch.object(solidfire.SolidFireDriver, 'failover') @mock.patch.object(solidfire.SolidFireDriver, 'failover_completed') def test_failover_host(self, mock_failover_completed, mock_failover, mock_set_cluster_pairs): fake_context = None fake_cinder_vols = [{'id': 'testvol1'}, {'id': 'testvol2'}] fake_failover_updates = [{'volume_id': 'testvol1', 'updates': { 'replication_status': 'failed-over'}}, {'volume_id': 'testvol2', 'updates': { 'replication_status': 'failed-over'}}] mock_failover.return_value = "secondary", fake_failover_updates, [] drv_args = {'active_backend_id': None} sfv = solidfire.SolidFireDriver(configuration=self.configuration, **drv_args) cluster_id, updates, _ = sfv.failover_host( fake_context, fake_cinder_vols, secondary_id='secondary', groups=None) mock_failover.assert_called_once_with(fake_context, fake_cinder_vols, "secondary", None) mock_failover_completed.assert_called_once_with(fake_context, "secondary") self.assertEqual(cluster_id, "secondary") self.assertEqual(fake_failover_updates, updates) @mock.patch.object(solidfire.SolidFireDriver, '_set_cluster_pairs') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') def test_failover_completed(self, mock_create_cluster_reference, mock_set_cluster_pairs): ctx = context.get_admin_context() drv_args = {'active_backend_id': None} sfv = solidfire.SolidFireDriver(configuration=self.configuration, **drv_args) sfv.cluster_pairs = self.cluster_pairs sfv.failover_completed(ctx, "secondary") self.assertTrue(sfv.failed_over) self.assertDictEqual(sfv.active_cluster, sfv.cluster_pairs[0]) mock_create_cluster_reference.return_value = self.fake_primary_cluster sfv.failover_completed(ctx, '') self.assertFalse(sfv.failed_over) mock_create_cluster_reference.assert_called() self.assertDictEqual(sfv.active_cluster, self.fake_primary_cluster) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_set_cluster_pairs') @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') @mock.patch.object(solidfire.SolidFireDriver, '_map_sf_volumes') @mock.patch.object(solidfire.SolidFireDriver, '_failover_volume') @mock.patch.object(solidfire.SolidFireDriver, '_get_create_account') def test_failover(self, mock_get_create_account, mock_failover_volume, mock_map_sf_volumes, mock_update_cluster_status, mock_set_cluster_pairs, mock_create_cluster_reference, mock_issue_api_request): all_mocks = locals() def reset_mocks(): for mk in all_mocks.values(): if isinstance(mk, mock.MagicMock): mk.reset_mock() ctx = context.get_admin_context() vol_fields = {'updated_at': timeutils.utcnow(), 'created_at': timeutils.utcnow()} cinder_vols = [] sf_vols = [] for i in range(1, 6): vol = fake_volume.fake_volume_obj(ctx, **vol_fields) sf_vol = self.fake_sfvol.copy() sf_vol['volumeID'] = i sf_vol['name'] = '%s%s' % (self.configuration.sf_volume_prefix, vol.id) sf_vol['access'] = 'replicationTarget' sf_vol['attributes'] = {'uuid': vol.id} sf_vol['cinder_id'] = vol.id sf_vols.append(sf_vol) cinder_vols.append(vol) mock_map_sf_volumes.return_value = sf_vols self.configuration.replication_device = [] reset_mocks() drv_args = {'active_backend_id': None} sfv = solidfire.SolidFireDriver(configuration=self.configuration, **drv_args) self.assertRaises(exception.UnableToFailOver, sfv.failover, ctx, cinder_vols, 'fake', None) mock_map_sf_volumes.assert_not_called() fake_replication_device = {'backend_id': 'fake', 'mvip': '0.0.0.0', 'login': 'fake_login', 'password': 'fake_pwd'} self.configuration.replication_device = [fake_replication_device] reset_mocks() drv_args = {'active_backend_id': ''} sfv = solidfire.SolidFireDriver(configuration=self.configuration, **drv_args) sfv.replication_enabled = True self.assertRaises(exception.InvalidReplicationTarget, sfv.failover, ctx, cinder_vols, 'default', None) mock_map_sf_volumes.assert_not_called() reset_mocks() drv_args = {'active_backend_id': None} sfv = solidfire.SolidFireDriver(configuration=self.configuration, **drv_args) sfv.replication_enabled = True self.assertRaises(exception.InvalidReplicationTarget, sfv.failover, ctx, cinder_vols, secondary_id='not_fake_id', groups=None) mock_map_sf_volumes.assert_not_called() mock_create_cluster_reference.return_value = self.cluster_pairs[0] reset_mocks() drv_args = {'active_backend_id': 'fake'} sfv = solidfire.SolidFireDriver(configuration=self.configuration, **drv_args) sfv.cluster_pairs = self.cluster_pairs sfv.cluster_pairs[0]['backend_id'] = 'fake' sfv.replication_enabled = True cluster_id, updates, _ = sfv.failover_host( ctx, cinder_vols, secondary_id='default', groups=None) self.assertEqual(5, len(updates)) for update in updates: self.assertEqual(fields.ReplicationStatus.ENABLED, update['updates']['replication_status']) self.assertEqual('', cluster_id) mock_get_create_account.assert_called() mock_failover_volume.assert_called() mock_map_sf_volumes.assert_called() mock_update_cluster_status.assert_called() mock_create_cluster_reference.assert_called() reset_mocks() drv_args = {'active_backend_id': None} sfv = solidfire.SolidFireDriver(configuration=self.configuration, **drv_args) sfv.cluster_pairs = self.cluster_pairs sfv.cluster_pairs[0]['backend_id'] = 'fake' sfv.replication_enabled = True cluster_id, updates, _ = sfv.failover( ctx, cinder_vols, secondary_id='fake', groups=None) self.assertEqual(5, len(updates)) for update in updates: self.assertEqual(fields.ReplicationStatus.FAILED_OVER, update['updates']['replication_status']) self.assertEqual('fake', cluster_id) mock_get_create_account.assert_called() mock_failover_volume.assert_called() mock_map_sf_volumes.assert_called() mock_update_cluster_status.assert_called() mock_create_cluster_reference.assert_called() @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') def test_failover_volume(self, mock_update_cluster_status, mock_create_cluster_reference, mock_issue_api_request): all_mocks = locals() def reset_mocks(): for mk in all_mocks.values(): if isinstance(mk, mock.MagicMock): mk.reset_mock() mock_issue_api_request.return_value = self.fake_sfaccount sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.replication_enabled = True fake_src_sfvol = {'volumeID': 600, 'name': 'test_volume', 'accountID': 25, 'sliceCount': 1, 'totalSize': 1 * units.Gi, 'enable512e': True, 'access': "replicationTarget", 'status': "active", 'attributes': {'uuid': f_uuid[0]}, 'qos': None, 'iqn': 'super_fake_iqn'} expected_src_params = {'volumeID': fake_src_sfvol['volumeID'], 'access': 'replicationTarget'} expected_tgt_params = {'volumeID': self.fake_sfvol['volumeID'], 'access': 'readWrite'} sfv._failover_volume(self.fake_sfvol, self.cluster_pairs[0], fake_src_sfvol) mock_issue_api_request.assert_has_calls( [mock.call("ModifyVolume", expected_src_params), mock.call("ModifyVolume", expected_tgt_params, endpoint=self.cluster_pairs[0]['endpoint'])] ) reset_mocks() sfv._failover_volume(self.fake_sfvol, self.cluster_pairs[0]) mock_issue_api_request.assert_called_with( "ModifyVolume", expected_tgt_params, endpoint=self.cluster_pairs[0]['endpoint'] ) @mock.patch('oslo_service.loopingcall.FixedIntervalWithTimeoutLoopingCall') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_get_cluster_pair') @mock.patch.object(solidfire.SolidFireDriver, '_create_remote_pairing') def test_get_or_create_cluster_pairing( self, mock_create_remote_pairing, mock_get_cluster_pair, mock_create_cluster_reference, mock_issue_api_request, mock_looping_call): fake_remote_pair_connected = {'status': 'Connected'} mock_get_cluster_pair.side_effect = [None, fake_remote_pair_connected] sfv = solidfire.SolidFireDriver(configuration=self.configuration) result = sfv._get_or_create_cluster_pairing( self.fake_secondary_cluster, check_connected=True) mock_get_cluster_pair.assert_has_calls( [call(self.fake_secondary_cluster), call(self.fake_secondary_cluster)]) mock_create_remote_pairing.assert_called_with( self.fake_secondary_cluster) mock_looping_call.assert_not_called() self.assertEqual(fake_remote_pair_connected, result) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_get_cluster_pair') @mock.patch.object(solidfire.SolidFireDriver, '_create_remote_pairing') def test_get_or_create_cluster_pairing_check_connected_true( self, mock_create_remote_pairing, mock_get_cluster_pair, mock_create_cluster_reference, mock_issue_api_request): fake_remote_pair_misconfigured = {'status': 'Misconfigured'} fake_remote_pair_connected = {'status': 'Connected'} mock_get_cluster_pair.side_effect = [None, fake_remote_pair_misconfigured, fake_remote_pair_connected] sfv = solidfire.SolidFireDriver(configuration=self.configuration) result = sfv._get_or_create_cluster_pairing( self.fake_secondary_cluster, check_connected=True) mock_get_cluster_pair.assert_has_calls( [call(self.fake_secondary_cluster), call(self.fake_secondary_cluster), call(self.fake_secondary_cluster)]) mock_create_remote_pairing.assert_called_with( self.fake_secondary_cluster) self.assertEqual(fake_remote_pair_connected, result) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') def test_get_cluster_pair(self, mock_create_cluster_reference, mock_update_cluster_status, mock_issue_api_request): fake_cluster_pair = { 'result': { 'clusterPairs': [{ 'mvip': self.fake_secondary_cluster['mvip'] }] } } mock_issue_api_request.return_value = fake_cluster_pair sfv = solidfire.SolidFireDriver(configuration=self.configuration) result = sfv._get_cluster_pair(self.fake_secondary_cluster) mock_issue_api_request.assert_called_with('ListClusterPairs', {}, version='8.0') self.assertEqual( fake_cluster_pair['result']['clusterPairs'][0], result) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') def test_get_cluster_pair_remote_not_found(self, mock_create_cluster_reference, mock_update_cluster_status, mock_issue_api_request): fake_cluster_pair = { 'result': { 'clusterPairs': [] } } mock_issue_api_request.return_value = fake_cluster_pair sfv = solidfire.SolidFireDriver(configuration=self.configuration) result = sfv._get_cluster_pair(self.fake_secondary_cluster) mock_issue_api_request.assert_called_with('ListClusterPairs', {}, version='8.0') self.assertIsNone(result) @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') def _create_volume_pairing(self, mock_issue_api_request, mock_update_cluster_status, mock_create_cluster_reference): ctx = context.get_admin_context() type_fields = {'id': fakes.get_fake_uuid()} src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) fake_src_sf_volid = 1111 vol_fields = { 'id': fakes.get_fake_uuid(), 'volume_type': src_vol_type, 'host': 'fakeHost@fakeBackend#fakePool', 'status': 'in-use', 'provider_id': "%s %s %s" % (fake_src_sf_volid, fakes.get_fake_uuid(), self.fake_primary_cluster['uuid']) } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster) fake_dst_sf_volid = 9999 fake_dst_volume = { 'provider_id': "%s %s %s" % (fake_dst_sf_volid, fakes.get_fake_uuid(), fake_dst_cluster_ref['uuid']) } fake_start_volume_pairing = { {'result': {'volumePairingKey': 'CAFE'}} } mock_issue_api_request.side_effect = [MagicMock(), fake_start_volume_pairing] sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv._create_volume_pairing(vol, fake_dst_volume, fake_dst_cluster_ref) src_params = {'volumeID': fake_src_sf_volid, 'mode': "Sync"} dst_params = {'volumeID': fake_dst_sf_volid, 'volumePairingKey': 'CAFE'} mock_issue_api_request.assert_has_calls([ call('RemoveVolumePair', src_params, '8.0'), call('StartVolumePairing', src_params, '8.0'), call('CompleteVolumePairing', dst_params, '8.0', endpoint=fake_dst_cluster_ref['endpoint'])]) @mock.patch('cinder.volume.drivers.solidfire.retry') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') def _create_volume_pairing_timeout(self, mock_issue_api_request, mock_update_cluster_status, mock_create_cluster_reference, mock_retry): ctx = context.get_admin_context() fake_src_sf_volid = 1111 vol_fields = { 'provider_id': "%s %s %s" % (fake_src_sf_volid, fakes.get_fake_uuid(), self.fake_primary_cluster['uuid']) } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster) fake_dst_sf_volid = 9999 fake_dst_volume = { 'provider_id': "%s %s %s" % (fake_dst_sf_volid, fakes.get_fake_uuid(), fake_dst_cluster_ref['uuid']) } mock_retry.side_effect = solidfire.SolidFireReplicationPairingError() sfv = solidfire.SolidFireDriver(configuration=self.configuration) mock_issue_api_request.reset_mock() self.assertRaises(solidfire.SolidFireReplicationPairingError, sfv._create_volume_pairing, vol, fake_dst_volume, fake_dst_cluster_ref) @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration') def test_migrate_volume_volume_is_not_available( self, mock_do_intercluster_volume_migration): ctx = context.get_admin_context() vol_fields = { 'status': 'in-use' } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) host = {'host': 'fakeHost@anotherFakeBackend#fakePool'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.InvalidVolume, sfv.migrate_volume, ctx, vol, host) mock_do_intercluster_volume_migration.assert_not_called() @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration') def test_migrate_volume_volume_is_replicated( self, mock_do_intercluster_volume_migration): ctx = context.get_admin_context() type_fields = {'extra_specs': {'replication_enabled': ' True'}, 'id': fakes.get_fake_uuid()} src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) vol_fields = { 'id': fakes.get_fake_uuid(), 'volume_type': src_vol_type } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) vol.volume_type = src_vol_type host = {'host': 'fakeHost@fakeBackend#fakePool'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.InvalidVolume, sfv.migrate_volume, ctx, vol, host) mock_do_intercluster_volume_migration.assert_not_called() @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration') def test_migrate_volume_retyping_status( self, mock_do_intercluster_volume_migration): ctx = context.get_admin_context() type_fields = {'id': fakes.get_fake_uuid()} src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) vol_fields = { 'id': fakes.get_fake_uuid(), 'volume_type': src_vol_type, 'host': 'fakeHost@fakeBackend#fakePool', 'status': 'retyping' } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) vol.volume_type = src_vol_type host = {'host': 'fakeHost@fakeBackend#fakePool'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) result = sfv.migrate_volume(ctx, vol, host) mock_do_intercluster_volume_migration.assert_not_called() self.assertEqual((True, {}), result) @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration') def test_migrate_volume_same_host_and_backend( self, mock_do_intercluster_volume_migration): ctx = context.get_admin_context() type_fields = {'id': fakes.get_fake_uuid()} src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) vol_fields = { 'id': fakes.get_fake_uuid(), 'volume_type': src_vol_type, 'host': 'fakeHost@fakeBackend#fakePool' } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) vol.volume_type = src_vol_type host = {'host': 'fakeHost@fakeBackend#fakePool'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) result = sfv.migrate_volume(ctx, vol, host) mock_do_intercluster_volume_migration.assert_not_called() self.assertEqual((True, {}), result) @mock.patch('cinder.volume.volume_utils.get_backend_configuration') @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration') def test_migrate_volume_different_host_same_backend( self, mock_do_intercluster_volume_migration, mock_get_backend_configuration): ctx = context.get_admin_context() type_fields = {'id': fakes.get_fake_uuid()} src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) vol_fields = { 'id': fakes.get_fake_uuid(), 'volume_type': src_vol_type, 'host': 'fakeHost@fakeBackend#fakePool' } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) vol.volume_type = src_vol_type host = {'host': 'anotherFakeHost@fakeBackend#fakePool'} sfv = solidfire.SolidFireDriver(configuration=self.configuration) result = sfv.migrate_volume(ctx, vol, host) mock_get_backend_configuration.assert_not_called() mock_do_intercluster_volume_migration.assert_not_called() self.assertEqual((True, {}), result) @mock.patch('cinder.volume.volume_utils.get_backend_configuration') @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration') def test_migrate_volume_config_stanza_not_found( self, mock_do_intercluster_volume_migration, mock_get_backend_configuration): ctx = context.get_admin_context() type_fields = {'id': fakes.get_fake_uuid()} src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) vol_fields = { 'id': fakes.get_fake_uuid(), 'volume_type': src_vol_type, 'host': 'fakeHost@fakeBackend#fakePool' } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) vol.volume_type = src_vol_type host = {'host': 'fakeHost@anotherFakeBackend#fakePool'} mock_get_backend_configuration.side_effect = \ exception.ConfigNotFound('error') sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertRaises(exception.VolumeMigrationFailed, sfv.migrate_volume, ctx, vol, host) mock_get_backend_configuration.assert_called_with( 'anotherFakeBackend', sfv.get_driver_options()) mock_do_intercluster_volume_migration.assert_not_called() @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration') @mock.patch('cinder.volume.volume_utils.get_backend_configuration') def test_migrate_volume_different_backend_same_cluster( self, mock_get_backend_configuration, mock_do_intercluster_volume_migration): ctx = context.get_admin_context() type_fields = {'id': fakes.get_fake_uuid()} src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) vol_fields = { 'id': fakes.get_fake_uuid(), 'volume_type': src_vol_type, 'host': 'fakeHost@fakeBackend#fakePool' } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) vol.volume_type = src_vol_type host = {'host': 'fakeHost@anotherFakeBackend#fakePool'} dst_config = conf.BackendGroupConfiguration( [], conf.SHARED_CONF_GROUP) dst_config.san_ip = '10.10.10.10' mock_get_backend_configuration.return_value = dst_config sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.active_cluster['mvip'] = '10.10.10.10' result = sfv.migrate_volume(ctx, vol, host) mock_get_backend_configuration.assert_called_with( 'anotherFakeBackend', sfv.get_driver_options()) mock_do_intercluster_volume_migration.assert_not_called() self.assertEqual((True, {}), result) @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration') @mock.patch('cinder.volume.volume_utils.get_backend_configuration') def test_migrate_volume_different_cluster( self, mock_get_backend_configuration, mock_do_intercluster_volume_migration): ctx = context.get_admin_context() type_fields = {'id': fakes.get_fake_uuid()} src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields) vol_fields = { 'id': fakes.get_fake_uuid(), 'volume_type': src_vol_type, 'host': 'fakeHost@fakeBackend#fakePool' } vol = fake_volume.fake_volume_obj(ctx, **vol_fields) vol.volume_type = src_vol_type host = {'host': 'fakeHost@anotherFakeBackend#fakePool'} dst_config = conf.BackendGroupConfiguration( [], conf.SHARED_CONF_GROUP) dst_config.san_ip = '10.10.10.10' mock_get_backend_configuration.return_value = dst_config mock_do_intercluster_volume_migration.return_value = {} sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv.active_cluster['mvip'] = '20.20.20.20' result = sfv.migrate_volume(ctx, vol, host) mock_do_intercluster_volume_migration.assert_called() self.assertEqual((True, {}), result) @mock.patch.object(solidfire.SolidFireDriver, '_build_endpoint_info') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_setup_intercluster_volume_migration') @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration_data_sync') @mock.patch.object(solidfire.SolidFireDriver, '_cleanup_intercluster_volume_migration') def test_do_intercluster_volume_migration( self, mock_cleanup_intercluster_volume_migration, mock_do_intercluster_volume_migration_data_sync, mock_setup_intercluster_volume_migration, mock_create_cluster_reference, mock_build_endpoint_info): vol_fields = { 'id': fakes.get_fake_uuid() } vol = fake_volume.fake_volume_obj(context.get_admin_context(), **vol_fields) host = {'host': 'fakeHost@anotherFakeBackend#fakePool'} dst_config = conf.BackendGroupConfiguration( [], conf.SHARED_CONF_GROUP) fake_dst_endpoint = deepcopy(self.fake_secondary_cluster['endpoint']) fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster) mock_build_endpoint_info.return_value = fake_dst_endpoint mock_create_cluster_reference.return_value = fake_dst_cluster_ref fake_dst_volume = { 'provider_id': "%s %s %s" % (9999, fakes.get_fake_uuid(), fake_dst_cluster_ref['uuid']) } mock_setup_intercluster_volume_migration.return_value = \ fake_dst_volume sfv = solidfire.SolidFireDriver(configuration=self.configuration) result = sfv._do_intercluster_volume_migration(vol, host, dst_config) mock_build_endpoint_info.assert_called_once_with( backend_conf=dst_config) mock_create_cluster_reference.assert_called_with(fake_dst_endpoint) mock_setup_intercluster_volume_migration.assert_called_with( vol, fake_dst_cluster_ref) mock_do_intercluster_volume_migration_data_sync.assert_called_with( vol, None, 9999, fake_dst_cluster_ref) mock_cleanup_intercluster_volume_migration.assert_called_with( vol, 9999, fake_dst_cluster_ref) self.assertEqual(fake_dst_volume, result) @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_get_create_account') @mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params') @mock.patch.object(solidfire.SolidFireDriver, '_do_volume_create') @mock.patch.object(solidfire.SolidFireDriver, '_create_volume_pairing') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_get_or_create_cluster_pairing') def test_setup_intercluster_volume_migration( self, mock_get_or_create_cluster_pairing, mock_issue_api_request, mock_create_volume_pairing, mock_do_volume_create, mock_get_default_volume_params, mock_get_create_account, mock_create_cluster_reference): fake_project_id = fakes.get_fake_uuid() vol_fields = { 'id': fakes.get_fake_uuid(), 'project_id': fake_project_id } vol = fake_volume.fake_volume_obj(context.get_admin_context(), **vol_fields) fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster) fake_sfaccount = {'username': 'fakeAccount'} mock_get_create_account.return_value = fake_sfaccount fake_vol_default_params = {'name': 'someFakeVolumeName'} mock_get_default_volume_params.return_value = fake_vol_default_params fake_dst_volume = {'volumeID': 9999} mock_do_volume_create.return_value = fake_dst_volume sfv = solidfire.SolidFireDriver(configuration=self.configuration) mock_issue_api_request.reset_mock() result = sfv._setup_intercluster_volume_migration( vol, fake_dst_cluster_ref) mock_get_or_create_cluster_pairing.assert_called_with( fake_dst_cluster_ref, check_connected=True) mock_get_create_account.assert_called_with( fake_project_id, endpoint=fake_dst_cluster_ref['endpoint']) mock_get_default_volume_params.assert_called_with(vol, fake_sfaccount) mock_do_volume_create.assert_called_with( fake_sfaccount, fake_vol_default_params, endpoint=fake_dst_cluster_ref['endpoint']) mock_issue_api_request.assert_not_called() mock_create_volume_pairing.assert_called_with( vol, fake_dst_volume, fake_dst_cluster_ref) self.assertEqual(fake_dst_volume, result) @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_get_create_account') @mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params') @mock.patch.object(solidfire.SolidFireDriver, '_do_volume_create') @mock.patch.object(solidfire.SolidFireDriver, '_create_volume_pairing') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') @mock.patch.object(solidfire.SolidFireDriver, '_get_or_create_cluster_pairing') def test_setup_intercluster_volume_migration_rollback( self, mock_get_or_create_cluster_pairing, mock_issue_api_request, mock_create_volume_pairing, mock_do_volume_create, mock_get_default_volume_params, mock_get_create_account, mock_create_cluster_reference): fake_project_id = fakes.get_fake_uuid() fake_src_sf_volid = 1111 vol_fields = { 'id': fakes.get_fake_uuid(), 'project_id': fake_project_id, 'provider_id': "%s %s %s" % (fake_src_sf_volid, fakes.get_fake_uuid(), self.fake_primary_cluster['uuid']) } vol = fake_volume.fake_volume_obj(context.get_admin_context(), **vol_fields) fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster) fake_dst_sf_volid = 9999 fake_dst_volume = { 'provider_id': "%s %s %s" % (fake_dst_sf_volid, fakes.get_fake_uuid(), fake_dst_cluster_ref['uuid']) } mock_do_volume_create.return_value = fake_dst_volume mock_create_volume_pairing.side_effect = \ solidfire.SolidFireReplicationPairingError() sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertRaises(solidfire.SolidFireReplicationPairingError, sfv._setup_intercluster_volume_migration, vol, fake_dst_cluster_ref) src_params = {'volumeID': fake_src_sf_volid} dst_params = {'volumeID': fake_dst_sf_volid} mock_issue_api_request.assert_has_calls([ call('RemoveVolumePair', src_params, '8.0'), call('RemoveVolumePair', dst_params, '8.0', endpoint=fake_dst_cluster_ref["endpoint"]), call('DeleteVolume', dst_params, endpoint=fake_dst_cluster_ref["endpoint"]), call('PurgeDeletedVolume', dst_params, endpoint=fake_dst_cluster_ref["endpoint"])]) @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration_complete_data_sync') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_do_intercluster_volume_migration_data_sync( self, mock_issue_api_request, mock_create_cluster_reference, mock_get_sf_volume, mock_do_intercluster_volume_migration_complete_data_sync): fake_src_sf_volid = 1111 vol_fields = { 'id': fakes.get_fake_uuid(), 'provider_id': "%s %s %s" % (fake_src_sf_volid, fakes.get_fake_uuid(), self.fake_primary_cluster['uuid']) } vol = fake_volume.fake_volume_obj(context.get_admin_context(), **vol_fields) fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster) fake_dst_sf_volid = 9999 fake_sfaccount = {'accountID': 'fakeAccountID'} mock_get_sf_volume.return_value = { 'volumePairs': [{'remoteReplication': {'state': 'Active'}}] } sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv._do_intercluster_volume_migration_data_sync(vol, fake_sfaccount, fake_dst_sf_volid, fake_dst_cluster_ref) params = {'volumeID': fake_dst_sf_volid, 'access': 'replicationTarget'} mock_issue_api_request.assert_called_with( 'ModifyVolume', params, '8.0', endpoint=fake_dst_cluster_ref['endpoint']) vol_params = {'accountID': fake_sfaccount['accountID']} mock_get_sf_volume.assert_called_with(vol.id, vol_params) mock_do_intercluster_volume_migration_complete_data_sync\ .assert_called_with(fake_dst_sf_volid, fake_dst_cluster_ref) @mock.patch('oslo_service.loopingcall.FixedIntervalWithTimeoutLoopingCall') @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume') @mock.patch.object(solidfire.SolidFireDriver, '_do_intercluster_volume_migration_complete_data_sync') @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_do_intercluster_volume_migration_data_sync_timeout( self, mock_issue_api_request, mock_create_cluster_reference, mock_do_intercluster_volume_migration_complete_data_sync, mock_get_sf_volume, mock_looping_call): fake_src_sf_volid = 1111 vol_fields = { 'id': fakes.get_fake_uuid(), 'provider_id': "%s %s %s" % (fake_src_sf_volid, fakes.get_fake_uuid(), self.fake_primary_cluster['uuid']) } vol = fake_volume.fake_volume_obj(context.get_admin_context(), **vol_fields) fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster) fake_dst_sf_volid = 9999 fake_sfaccount = {'accountID': 'fakeAccountID'} mock_looping_call.return_value.start.return_value.wait.side_effect = ( loopingcall.LoopingCallTimeOut()) sfv = solidfire.SolidFireDriver(configuration=self.configuration) self.assertRaises(solidfire.SolidFireDataSyncTimeoutError, sfv._do_intercluster_volume_migration_data_sync, vol, fake_sfaccount, fake_dst_sf_volid, fake_dst_cluster_ref) mock_get_sf_volume.assert_not_called() mock_do_intercluster_volume_migration_complete_data_sync\ .assert_not_called() @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_do_intercluster_volume_migration_complete_data_sync( self, mock_issue_api_request, mock_create_cluster_reference): fake_src_sf_volid = 1111 fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster) sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv._do_intercluster_volume_migration_complete_data_sync( fake_src_sf_volid, fake_dst_cluster_ref) params = {'volumeID': fake_src_sf_volid, 'access': 'readWrite'} mock_issue_api_request.assert_called_with( 'ModifyVolume', params, '8.0', endpoint=fake_dst_cluster_ref['endpoint']) @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_cleanup_intercluster_volume_migration( self, mock_issue_api_request, mock_create_cluster_reference): fake_src_sf_volid = 1111 vol_fields = { 'id': fakes.get_fake_uuid(), 'provider_id': "%s %s %s" % (fake_src_sf_volid, fakes.get_fake_uuid(), self.fake_primary_cluster['uuid']) } vol = fake_volume.fake_volume_obj(context.get_admin_context(), **vol_fields) fake_dst_cluster_ref = deepcopy(self.fake_secondary_cluster) fake_dst_sf_volid = 9999 sfv = solidfire.SolidFireDriver(configuration=self.configuration) sfv._cleanup_intercluster_volume_migration(vol, fake_dst_sf_volid, fake_dst_cluster_ref) src_params = {'volumeID': fake_src_sf_volid} dst_params = {'volumeID': fake_dst_sf_volid} mock_issue_api_request.assert_has_calls([ call('RemoveVolumePair', dst_params, '8.0', endpoint=fake_dst_cluster_ref["endpoint"]), call('RemoveVolumePair', src_params, '8.0'), call('DeleteVolume', src_params), call('PurgeDeletedVolume', src_params)]) @data(True, False) @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference') @mock.patch.object(solidfire.SolidFireDriver, '_set_cluster_pairs') @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status') @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request') def test_list_volumes_by_name(self, has_endpoint, mock_issue_api_request, mock_update_cluster_status, mock_set_cluster_pairs, mock_create_cluster_reference): fake_sf_volume_name = 'fake-vol-name' vol_fields = { 'id': fakes.get_fake_uuid(), 'name': fake_sf_volume_name } vol = fake_volume.fake_volume_obj( context.get_admin_context(), **vol_fields) fake_endpoint = None volumes_list = [vol] if has_endpoint: fake_endpoint = self.fake_primary_cluster["endpoint"] volumes_list = [] mock_issue_api_request.return_value = { 'result': {'volumes': volumes_list}} sfv = solidfire.SolidFireDriver(configuration=self.configuration) result = ( sfv._list_volumes_by_name(fake_sf_volume_name, endpoint=fake_endpoint)) mock_issue_api_request.assert_called_once_with( 'ListVolumes', {'volumeName': fake_sf_volume_name}, version='8.0', endpoint=fake_endpoint) self.assertEqual(result, volumes_list) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3031204 cinder-27.0.0/cinder/tests/unit/volume/drivers/synology/0000775000175000017500000000000000000000000023324 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/synology/__init__.py0000664000175000017500000000000000000000000025423 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/synology/test_synology_common.py0000664000175000017500000020261500000000000030176 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Synology Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the Synology iSCSI volume driver.""" import copy from http import HTTPStatus import json import math from unittest import mock from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import rsa import ddt from oslo_utils import units import requests from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.synology import synology_common as common VOLUME_ID = fake.VOLUME_ID TARGET_NAME_PREFIX = 'Cinder-Target-' IP = '10.0.0.1' IQN = 'iqn.2000-01.com.synology:' + TARGET_NAME_PREFIX + VOLUME_ID TRG_ID = 1 CHAP_AUTH_USERNAME = 'username' CHAP_AUTH_PASSWORD = 'password' VOLUME = { '_name_id': '', 'name': fake.VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'fake_volume', 'size': 10, 'provider_location': '%s:3260,%d %s 1' % (IP, TRG_ID, IQN), 'provider_auth': 'CHAP %(user)s %(pass)s' % { 'user': CHAP_AUTH_USERNAME, 'pass': CHAP_AUTH_PASSWORD}, } NEW_VOLUME_ID = fake.VOLUME2_ID IQN2 = 'iqn.2000-01.com.synology:' + TARGET_NAME_PREFIX + NEW_VOLUME_ID NEW_TRG_ID = 2 NEW_VOLUME = { 'name': fake.VOLUME2_NAME, 'id': NEW_VOLUME_ID, 'display_name': 'new_fake_volume', 'size': 10, 'provider_location': '%s:3260,%d %s 1' % (IP, NEW_TRG_ID, IQN2), } SNAPSHOT_ID = fake.SNAPSHOT_ID DS_SNAPSHOT_UUID = 'ca86a56a-40d8-4210-974c-ef15dbf01cba' SNAPSHOT_METADATA = { 'snap-meta1': 'value1', 'snap-meta2': 'value2', 'snap-meta3': 'value3', } SNAPSHOT = { 'name': fake.SNAPSHOT_NAME, 'id': SNAPSHOT_ID, 'volume_id': VOLUME_ID, 'volume_name': VOLUME['name'], 'volume_size': 10, 'display_name': 'fake_snapshot', 'volume': VOLUME, 'metadata': SNAPSHOT_METADATA, } SNAPSHOT_INFO = { 'is_action_locked': False, 'snapshot_id': 1, 'status': 'Healthy', 'uuid': DS_SNAPSHOT_UUID, } INITIATOR_IQN = 'iqn.1993-08.org.debian:01:604af6a341' CONNECTOR = { 'initiator': INITIATOR_IQN, } CONTEXT = { } LOCAL_PATH = '/dev/isda' IMAGE_SERVICE = 'image_service' IMAGE_ID = 1 IMAGE_META = { 'id': IMAGE_ID } POOL_NAME = 'volume1' NODE_UUID = '72003c93-2db2-4f00-a169-67c5eae86bb1' NODE_UUID2 = '8e1e8b82-1ef9-4157-a4bf-e069355386c2' HOST = { 'capabilities': { 'pool_name': 'volume2', 'backend_info': 'Synology:iscsi:' + NODE_UUID, }, } POOL_INFO = { 'display_name': 'Volume 1', 'raid_type': 'raid_1', 'readonly': False, 'fs_type': 'ext4', 'location': 'internal', 'eppool_used_byte': '139177984', 'size_total_byte': '487262806016', 'volume_id': 1, 'size_free_byte': '486521139200', 'container': 'internal', 'volume_path': '/volume1', 'single_volume': True } LUN_UUID = 'e1315f33-ba35-42c3-a3e7-5a06958eca30' LUN_INFO = { 'status': '', 'is_action_locked': False, 'name': VOLUME['name'], 'extent_size': 0, 'allocated_size': 0, 'uuid': LUN_UUID, 'is_mapped': True, 'lun_id': 3, 'location': '/volume2', 'restored_time': 0, 'type': 143, 'size': 1073741824 } FAKE_API = 'SYNO.Fake.API' FAKE_METHOD = 'fake' FAKE_PATH = 'fake.cgi' class MockResponse(object): def __init__(self, json_data, status_code): self.json_data = json_data self.status_code = status_code def json(self): return self.json_data class SynoSessionTestCase(test.TestCase): @mock.patch('requests.post', return_value=MockResponse( {'data': {'sid': 'sid'}, 'success': True}, HTTPStatus.OK)) def setUp(self, _mock_post): super(SynoSessionTestCase, self).setUp() self.host = '127.0.0.1' self.port = 5001 self.username = 'admin' self.password = 'admin' self.https = True self.ssl_verify = False self.one_time_pass = None self.device_id = None self.session = common.Session(self.host, self.port, self.username, self.password, self.https, self.ssl_verify, self.one_time_pass, self.device_id) self.session.__class__.__del__ = lambda x: x def test_query(self): out = { 'maxVersion': 3, 'minVersion': 1, 'path': FAKE_PATH, 'requestFormat': 'JSON' } data = { 'api': 'SYNO.API.Info', 'version': 1, 'method': 'query', 'query': FAKE_API } requests.post = mock.Mock(side_effect=[ MockResponse({ 'data': { FAKE_API: out }, 'success': True }, HTTPStatus.OK), MockResponse({ 'data': { FAKE_API: out } }, HTTPStatus.OK), ]) result = self.session.query(FAKE_API) requests.post.assert_called_once_with( 'https://127.0.0.1:5001/webapi/query.cgi', data=data, verify=self.ssl_verify) self.assertDictEqual(out, result) result = self.session.query(FAKE_API) self.assertIsNone(result) def test__random_AES_passphrase(self): lengths_to_test = [0, 1, 10, 128, 501, 1024, 4096] for test_length in lengths_to_test: self.assertEqual( test_length, len(self.session._random_AES_passphrase(test_length)) ) def test__encrypt_RSA(self): # Initialize a fixed 1024 bit public/private key pair public_numbers = rsa.RSAPublicNumbers( int('10001', 16), int('c42eadf905d47388d84baeec2d5391ba7f91b35912933032c9c8a32d6358' '9cef1dfe532138adfad41fd41910cd12fbc05b8876f70aa1340fccf3227d' '087d1e47256c60ae49abee7c779815ec085265518791da38168a0597091d' '4c6ff10c0fa6616f250b85edfb4066f655695e304c0dc40c26fc11541e4c' '1be47771fcc1d257cccbb656015c5daed64aad7c8ae024f82531b7e637f4' '87530b77498d1bc7247687541fbbaa01112866da06f30185dde15131e89e' '27b30f07f10ddef23dd4da7bf3e216c733a4004415c9d1dd9bd5032e8b55' '4eb56efa9cd5cd1b416e0e55c903536787454ca3d3aba87edb70768f630c' 'beab3781848ff5ee40edfaee57ac87c9', 16) ) private_numbers = rsa.RSAPrivateNumbers( int('f0aa7e45ffb23ca683e1b01a9e1d77e5affaf9afa0094fb1eb89a3c8672b' '43ab9beb11e4ecdd2c8f88738db56be4149c55c28379480ac68a5727ba28' '4a47565579dbf083167a2845f5f267598febde3f7b12ba10da32ad2edff8' '4efd019498e0d8e03f6ddb8a5e80cdb862da9c0c921571fdb56ae7e0480a' 'de846e328517aa23', 16), int('d0ae9ce41716c4bdac074423d57e540b6f48ee42d9b06bdac3b3421ea2ae' 'e21088b3ae50acfe168edefda722dc15bc456bba76a98b8035ffa4da12dc' 'a92bad582c935791f9a48b416f53c728fd1866c8ecf2ca00dfa667a962d3' 'c9818cce540c5e9d2ef8843c5adfde0938ac8b5e2c592838c422ffac43ff' '4a4907c129de7723', 16), int('3733cf5e58069cefefb4f4269ee67a0619695d26fe340e86ec0299efe699' '83a741305421eff9fcaf7db947c8537c38fcba84debccaefeb5f5ad33b6c' '255c578dbb7910875a5197cccc362e4cf9567e0dfff0c98fa8bff3acb932' 'd6545566886ccfd3df7fab92f874f9c3eceab6472ecf5ccff2945127f352' '8532b76d8aaadb4dbcf0e5bae8c9c8597511e0771942f12e29bbee1ceef5' '4a6ba97e0096354b13ae4ca22e9be1a551a1bc8db9392de6bbad99b956b5' 'bb4b7f5094086e6eefd432066102a228bc18012cc31a7777e2e657eb115a' '9d718d413f2bd7a448a783c049afaaf127486b2c17feebb930e7ac8e6a07' 'd9c843beedfa8cec52e1aba98099baa5', 16), int('c8ab1050e36c457ffe550f56926235d7b18d8de5af86340a413fe9edae80' '77933e9599bd0cf73a318feff1c7c4e74f7c2f51d9f82566beb71906ca04' 'd0327d3d16379a6a633286241778004ec05f46581e11b64d58f28a4e9c77' '59bd423519e7d94dd9f58ae9ebf47013ff71124eb4fbe6a94a3c928d02e4' 'f536ecff78d40b8b', 16), int('5bb873a2d8f71bf015dd77b89c4c931a1786a19a665de179dccc3c4284d4' '82ee2b7776256573a46c955c3d8ad7db01ce2d645e6574b81c83c96c4420' '1286ed00b54ee98d72813ce7bccbc0dca629847bc99188f1cb5b3372c2ca' '3d6620824b74c85d23d8fd1e1dff09735a22947b06d90511b63b7fceb270' '51b139a45007c4ab', 16), int('cfeff2a88112512b327999eb926a0564c431ebed2e1456f51d274e4e6d7d' 'd75d5b26339bbca2807aa71008e9a08bd9fa0e53e3960e3b6e8c6e1a46d2' 'b8e89b218d3b453f7ed0020504d1679374cd884ae3bb3b88b54fb429f082' 'fa4e9d3f296c59d5d89fe16b0931dcf062bc309cf122c722c13ffb0fa0c5' '77d0abddcc655017', 16), public_numbers ) private_key = private_numbers.private_key(default_backend()) # run the _encrypt_RSA method original_text = 'test _encrypt_RSA' encrypted_text = self.session._encrypt_RSA( public_numbers.n, public_numbers.e, original_text ) # decrypt the output using the corresponding private key decrypted_bytes = private_key.decrypt( encrypted_text, padding.PKCS1v15() ) decrypted_text = decrypted_bytes.decode('ascii') self.assertEqual(original_text, decrypted_text) def test__encrypt_params(self): # setup mock cipherkey = 'cipherkey' self.session._get_enc_info = mock.Mock(return_value={ 'public_key': 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'cipherkey': cipherkey, 'ciphertoken': 'ciphertoken', 'server_time': 1111111111, }) self.session._encrypt_RSA = mock.Mock( return_value=b'1234567890abcdef' ) self.session._encrypt_AES = mock.Mock( return_value=b'fedcba0987654321' ) # call the method params = { 'account': 'account', 'passwd': 'passwd', 'session': 'sessionid', 'format': 'sid' } encrypted_data = self.session._encrypt_params(params) # check the format of the output self.assertDictEqual( json.loads(encrypted_data[cipherkey]), {'rsa': 'MTIzNDU2Nzg5MGFiY2RlZg==', 'aes': 'ZmVkY2JhMDk4NzY1NDMyMQ=='} ) @ddt.ddt class SynoAPIRequestTestCase(test.TestCase): @mock.patch('requests.post') def setUp(self, _mock_post): super(SynoAPIRequestTestCase, self).setUp() self.host = '127.0.0.1' self.port = 5001 self.username = 'admin' self.password = 'admin' self.https = True self.ssl_verify = False self.one_time_pass = None self.device_id = None self.request = common.APIRequest(self.host, self.port, self.username, self.password, self.https, self.ssl_verify, self.one_time_pass, self.device_id) self.request._APIRequest__session._sid = 'sid' self.request._APIRequest__session.__class__.__del__ = lambda x: x @mock.patch.object(common, 'Session') def test_new_session(self, _mock_session): self.device_id = 'did' self.request = common.APIRequest(self.host, self.port, self.username, self.password, self.https, self.ssl_verify, self.one_time_pass, self.device_id) result = self.request.new_session() self.assertIsNone(result) def test__start(self): out = { 'maxVersion': 3, 'minVersion': 1, 'path': FAKE_PATH, 'requestFormat': 'JSON' } self.request._APIRequest__session.query = mock.Mock(return_value=out) result = self.request._start(FAKE_API, 3) (self.request._APIRequest__session.query. assert_called_once_with(FAKE_API)) self.assertEqual(FAKE_PATH, result) out.update(maxVersion=2) self.assertRaises(exception.APIException, self.request._start, FAKE_API, 3) def test__encode_param(self): param = { 'api': FAKE_API, 'method': FAKE_METHOD, 'version': 1, '_sid': 'sid' } self.request._jsonFormat = True result = self.request._encode_param(param) self.assertIsInstance(result, str) def test_request(self): version = 1 self.request._start = mock.Mock(return_value='fake.cgi') self.request._encode_param = mock.Mock(side_effect=lambda x: x) self.request.new_session = mock.Mock() requests.post = mock.Mock(side_effect=[ MockResponse({'success': True}, HTTPStatus.OK), MockResponse({'error': {'code': HTTPStatus.SWITCHING_PROTOCOLS}, 'success': False}, HTTPStatus.OK), MockResponse({'error': {'code': HTTPStatus.SWITCHING_PROTOCOLS}}, HTTPStatus.OK), MockResponse({}, HTTPStatus.INTERNAL_SERVER_ERROR) ]) result = self.request.request(FAKE_API, FAKE_METHOD, version) self.assertDictEqual({'success': True}, result) result = self.request.request(FAKE_API, FAKE_METHOD, version) self.assertDictEqual( {'error': {'code': HTTPStatus.SWITCHING_PROTOCOLS}, 'success': False}, result) self.assertRaises(exception.MalformedResponse, self.request.request, FAKE_API, FAKE_METHOD, version) result = self.request.request(FAKE_API, FAKE_METHOD, version) self.assertDictEqual( {'http_status': HTTPStatus.INTERNAL_SERVER_ERROR}, result) @mock.patch.object(common.LOG, 'debug') @ddt.data(105, 119) def test_request_auth_error(self, _code, _log): version = 1 self.request._start = mock.Mock(return_value='fake.cgi') self.request._encode_param = mock.Mock(side_effect=lambda x: x) self.request.new_session = mock.Mock() requests.post = mock.Mock(return_value= MockResponse({ 'error': {'code': _code}, 'success': False }, HTTPStatus.OK)) self.assertRaises(common.SynoAuthError, self.request.request, FAKE_API, FAKE_METHOD, version) class SynoCommonTestCase(test.TestCase): @mock.patch.object(common.SynoCommon, '_get_node_uuid', return_value=NODE_UUID) @mock.patch.object(common, 'APIRequest') def setUp(self, _request, _get_node_uuid): super(SynoCommonTestCase, self).setUp() self.conf = self.setup_configuration() self.common = common.SynoCommon(self.conf, 'iscsi') self.common.vendor_name = 'Synology' self.common.driver_type = 'iscsi' self.common.volume_backend_name = 'DiskStation' self.common.target_port = 3260 def setup_configuration(self): config = mock.Mock(spec=conf.Configuration) config.use_chap_auth = False config.target_protocol = 'iscsi' config.target_ip_address = IP config.target_port = 3260 config.synology_admin_port = 5000 config.synology_username = 'admin' config.synology_password = 'admin' config.synology_ssl_verify = True config.synology_one_time_pass = '123456' config.synology_pool_name = POOL_NAME config.volume_dd_blocksize = 1 config.target_prefix = 'iqn.2000-01.com.synology:' config.chap_username = 'abcd' config.chap_password = 'qwerty' config.reserved_percentage = 0 config.max_over_subscription_ratio = 20 return config @mock.patch.object(common.SynoCommon, '_get_node_uuid', return_value=NODE_UUID) @mock.patch.object(common, 'APIRequest') def test___init__(self, _request, _get_node_uuid): self.conf.safe_get = (mock.Mock(side_effect=[ self.conf.target_ip_address, '', ''])) self.assertRaises(exception.InvalidConfigurationValue, self.common.__init__, self.conf, 'iscsi') self.assertRaises(exception.InvalidConfigurationValue, self.common.__init__, self.conf, 'iscsi') def test__get_node_uuid(self): out = { 'data': { 'nodes': [{ 'uuid': NODE_UUID }] }, 'success': True } self.common.exec_webapi = ( mock.Mock(side_effect=[ out, out, common.SynoAuthError(message='dont care')])) result = self.common._get_node_uuid() (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.Node', 'list', mock.ANY)) self.assertEqual(NODE_UUID, result) del out['data']['nodes'] self.assertRaises(exception.VolumeDriverException, self.common._get_node_uuid) self.assertRaises(common.SynoAuthError, self.common._get_node_uuid) def test__get_pool_info(self): out = { 'data': { 'volume': POOL_INFO }, 'success': True } self.common.exec_webapi = ( mock.Mock(side_effect=[ out, out, common.SynoAuthError(message='dont care')])) result = self.common._get_pool_info() (self.common.exec_webapi. assert_called_with('SYNO.Core.Storage.Volume', 'get', mock.ANY, volume_path='/' + POOL_NAME)) self.assertDictEqual(POOL_INFO, result) del out['data']['volume'] self.assertRaises(exception.MalformedResponse, self.common._get_pool_info) self.assertRaises(common.SynoAuthError, self.common._get_pool_info) self.conf.synology_pool_name = '' self.assertRaises(exception.InvalidConfigurationValue, self.common._get_pool_info) def test__get_pool_size(self): pool_info = copy.deepcopy(POOL_INFO) self.common._get_pool_info = mock.Mock(return_value=pool_info) result = self.common._get_pool_size() self.assertEqual((int(int(POOL_INFO['size_free_byte']) / units.Gi), int(int(POOL_INFO['size_total_byte']) / units.Gi), math.ceil((float(POOL_INFO['size_total_byte']) - float(POOL_INFO['size_free_byte']) - float(POOL_INFO['eppool_used_byte'])) / units.Gi)), result) del pool_info['size_free_byte'] self.assertRaises(exception.MalformedResponse, self.common._get_pool_size) def test__get_pool_lun_provisioned_size(self): out = { 'data': { 'luns': [{ 'lun_id': 1, 'location': '/' + POOL_NAME, 'size': 5368709120 }, { 'lun_id': 2, 'location': '/' + POOL_NAME, 'size': 3221225472 }] }, 'success': True } self.common.exec_webapi = mock.Mock(return_value=out) result = self.common._get_pool_lun_provisioned_size() (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'list', mock.ANY, location='/' + POOL_NAME)) self.assertEqual(int(math.ceil(float(5368709120 + 3221225472) / units.Gi)), result) def test__get_pool_lun_provisioned_size_error(self): out = { 'data': {}, 'success': True } self.common.exec_webapi = mock.Mock(return_value=out) self.assertRaises(exception.MalformedResponse, self.common._get_pool_lun_provisioned_size) self.conf.synology_pool_name = '' self.assertRaises(exception.InvalidConfigurationValue, self.common._get_pool_lun_provisioned_size) def test__get_lun_info(self): out = { 'data': { 'lun': LUN_INFO }, 'success': True } self.common.exec_webapi = ( mock.Mock(side_effect=[ out, out, common.SynoAuthError(message='dont care')])) result = self.common._get_lun_info(VOLUME['name'], ['is_mapped']) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'get', mock.ANY, uuid=VOLUME['name'], additional=['is_mapped'])) self.assertDictEqual(LUN_INFO, result) del out['data']['lun'] self.assertRaises(exception.MalformedResponse, self.common._get_lun_info, VOLUME['name']) self.assertRaises(common.SynoAuthError, self.common._get_lun_info, VOLUME['name']) self.assertRaises(exception.InvalidParameterValue, self.common._get_lun_info, '') def test__get_lun_uuid(self): lun_info = copy.deepcopy(LUN_INFO) self.common._get_lun_info = ( mock.Mock(side_effect=[ lun_info, lun_info, common.SynoAuthError(message='dont care')])) result = self.common._get_lun_uuid(VOLUME['name']) self.assertEqual(LUN_UUID, result) del lun_info['uuid'] self.assertRaises(exception.MalformedResponse, self.common._get_lun_uuid, VOLUME['name']) self.assertRaises(common.SynoAuthError, self.common._get_lun_uuid, VOLUME['name']) self.assertRaises(exception.InvalidParameterValue, self.common._get_lun_uuid, '') def test__get_lun_status(self): lun_info = copy.deepcopy(LUN_INFO) self.common._get_lun_info = ( mock.Mock(side_effect=[ lun_info, lun_info, lun_info, common.SynoAuthError(message='dont care')])) result = self.common._get_lun_status(VOLUME['name']) self.assertEqual((lun_info['status'], lun_info['is_action_locked']), result) del lun_info['is_action_locked'] self.assertRaises(exception.MalformedResponse, self.common._get_lun_status, VOLUME['name']) del lun_info['status'] self.assertRaises(exception.MalformedResponse, self.common._get_lun_status, VOLUME['name']) self.assertRaises(common.SynoAuthError, self.common._get_lun_status, VOLUME['name']) self.assertRaises(exception.InvalidParameterValue, self.common._get_lun_status, '') def test__get_snapshot_info(self): out = { 'data': { 'snapshot': SNAPSHOT_INFO }, 'success': True } self.common.exec_webapi = ( mock.Mock(side_effect=[ out, out, common.SynoAuthError(message='dont care')])) result = self.common._get_snapshot_info(DS_SNAPSHOT_UUID, additional=['status']) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'get_snapshot', mock.ANY, snapshot_uuid=DS_SNAPSHOT_UUID, additional=['status'])) self.assertDictEqual(SNAPSHOT_INFO, result) del out['data']['snapshot'] self.assertRaises(exception.MalformedResponse, self.common._get_snapshot_info, DS_SNAPSHOT_UUID) self.assertRaises(common.SynoAuthError, self.common._get_snapshot_info, DS_SNAPSHOT_UUID) self.assertRaises(exception.InvalidParameterValue, self.common._get_snapshot_info, '') def test__get_snapshot_status(self): snapshot_info = copy.deepcopy(SNAPSHOT_INFO) self.common._get_snapshot_info = ( mock.Mock(side_effect=[ snapshot_info, snapshot_info, snapshot_info, common.SynoAuthError(message='dont care')])) result = self.common._get_snapshot_status(DS_SNAPSHOT_UUID) self.assertEqual((snapshot_info['status'], snapshot_info['is_action_locked']), result) del snapshot_info['is_action_locked'] self.assertRaises(exception.MalformedResponse, self.common._get_snapshot_status, DS_SNAPSHOT_UUID) del snapshot_info['status'] self.assertRaises(exception.MalformedResponse, self.common._get_snapshot_status, DS_SNAPSHOT_UUID) self.assertRaises(common.SynoAuthError, self.common._get_snapshot_status, DS_SNAPSHOT_UUID) self.assertRaises(exception.InvalidParameterValue, self.common._get_snapshot_status, '') def test__get_metadata_value(self): ctxt = context.get_admin_context() fake_vol_obj = fake_volume.fake_volume_obj(ctxt) self.assertRaises(exception.VolumeMetadataNotFound, self.common._get_metadata_value, fake_vol_obj, 'no_such_key') fake_snap_obj = (fake_snapshot. fake_snapshot_obj(ctxt, expected_attrs=['metadata'])) self.assertRaises(exception.SnapshotMetadataNotFound, self.common._get_metadata_value, fake_snap_obj, 'no_such_key') meta = {'snapshot_metadata': [{'key': 'ds_snapshot_UUID', 'value': DS_SNAPSHOT_UUID}], 'expected_attrs': ['metadata']} fake_snap_obj = fake_snapshot.fake_snapshot_obj(ctxt, **meta) result = self.common._get_metadata_value(fake_snap_obj, 'ds_snapshot_UUID') self.assertEqual(DS_SNAPSHOT_UUID, result) self.assertRaises(exception.MetadataAbsent, self.common._get_metadata_value, SNAPSHOT, 'no_such_key') def test__target_create_with_chap_auth(self): out = { 'data': { 'target_id': TRG_ID }, 'success': True } trg_name = self.common.TARGET_NAME_PREFIX + VOLUME['id'] iqn = self.conf.target_prefix + trg_name self.conf.use_chap_auth = True self.common.exec_webapi = mock.Mock(return_value=out) self.conf.safe_get = ( mock.Mock(side_effect=[ self.conf.use_chap_auth, 'abcd', 'qwerty', self.conf.target_prefix])) result = self.common._target_create(VOLUME['id']) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.Target', 'create', mock.ANY, name=trg_name, iqn=iqn, auth_type=1, user='abcd', password='qwerty', max_sessions=0)) self.assertEqual((IQN, TRG_ID, 'CHAP abcd qwerty'), result) def test__target_create_without_chap_auth(self): out = { 'data': { 'target_id': TRG_ID }, 'success': True } trg_name = self.common.TARGET_NAME_PREFIX + VOLUME['id'] iqn = self.conf.target_prefix + trg_name self.common.exec_webapi = mock.Mock(return_value=out) self.conf.safe_get = ( mock.Mock(side_effect=[ self.conf.use_chap_auth, self.conf.target_prefix])) result = self.common._target_create(VOLUME['id']) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.Target', 'create', mock.ANY, name=trg_name, iqn=iqn, auth_type=0, user='', password='', max_sessions=0)) self.assertEqual((IQN, TRG_ID, ''), result) def test__target_create_error(self): out = { 'data': { }, 'success': True } self.common.exec_webapi = ( mock.Mock(side_effect=[ out, common.SynoAuthError(message='dont care')])) self.conf.safe_get = ( mock.Mock(side_effect=[ self.conf.use_chap_auth, self.conf.target_prefix, self.conf.use_chap_auth, self.conf.target_prefix])) self.assertRaises(exception.VolumeDriverException, self.common._target_create, VOLUME['id']) self.assertRaises(common.SynoAuthError, self.common._target_create, VOLUME['id']) self.assertRaises(exception.InvalidParameterValue, self.common._target_create, '') def test__target_delete(self): out = { 'success': True } self.common.exec_webapi = ( mock.Mock(side_effect=[ out, common.SynoAuthError(message='dont care')])) result = self.common._target_delete(TRG_ID) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.Target', 'delete', mock.ANY, target_id=str(TRG_ID))) self.assertIsNone(result) self.assertRaises(common.SynoAuthError, self.common._target_delete, TRG_ID) self.assertRaises(exception.InvalidParameterValue, self.common._target_delete, -1) def test__lun_map_unmap_target(self): out = { 'success': True } self.common.exec_webapi = ( mock.Mock(side_effect=[ out, out, common.SynoAuthError(message='dont care')])) self.common._get_lun_uuid = mock.Mock(return_value=LUN_UUID) result = self.common._lun_map_unmap_target(VOLUME['name'], True, TRG_ID) self.common._get_lun_uuid.assert_called_with(VOLUME['name']) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'map_target', mock.ANY, uuid=LUN_UUID, target_ids=[str(TRG_ID)])) self.assertIsNone(result) result = self.common._lun_map_unmap_target(VOLUME['name'], False, TRG_ID) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'unmap_target', mock.ANY, uuid=LUN_UUID, target_ids=[str(TRG_ID)])) self.assertIsNone(result) self.assertRaises(common.SynoAuthError, self.common._lun_map_unmap_target, VOLUME['name'], True, TRG_ID) self.assertRaises(exception.InvalidParameterValue, self.common._lun_map_unmap_target, mock.ANY, mock.ANY, -1) def test__lun_map_target(self): self.common._lun_map_unmap_target = mock.Mock() result = self.common._lun_map_target(VOLUME, TRG_ID) self.common._lun_map_unmap_target.assert_called_with(VOLUME, True, TRG_ID) self.assertIsNone(result) def test__lun_ummap_target(self): self.common._lun_map_unmap_target = mock.Mock() result = self.common._lun_unmap_target(VOLUME, TRG_ID) self.common._lun_map_unmap_target.assert_called_with(VOLUME, False, TRG_ID) self.assertIsNone(result) def test__modify_lun_name(self): out = { 'success': True } self.common.exec_webapi = ( mock.Mock(side_effect=[ out, common.SynoAuthError(message='dont care')])) result = self.common._modify_lun_name(VOLUME['name'], NEW_VOLUME['name']) self.assertIsNone(result) self.assertRaises(common.SynoAuthError, self.common._modify_lun_name, VOLUME['name'], NEW_VOLUME['name']) @mock.patch('eventlet.sleep') def test__check_lun_status_normal(self, _patched_sleep): self.common._get_lun_status = ( mock.Mock(side_effect=[ ('normal', True), ('normal', False), ('cloning', False), common.SynoLUNNotExist(message='dont care')])) result = self.common._check_lun_status_normal(VOLUME['name']) self.assertEqual(1, _patched_sleep.call_count) self.assertEqual([mock.call(2)], _patched_sleep.call_args_list) self.common._get_lun_status.assert_called_with(VOLUME['name']) self.assertTrue(result) result = self.common._check_lun_status_normal(VOLUME['name']) self.assertFalse(result) self.assertRaises(common.SynoLUNNotExist, self.common._check_lun_status_normal, VOLUME['name']) @mock.patch('eventlet.sleep') def test__check_snapshot_status_healthy(self, _patched_sleep): self.common._get_snapshot_status = ( mock.Mock(side_effect=[ ('Healthy', True), ('Healthy', False), ('Unhealthy', False), common.SynoLUNNotExist(message='dont care')])) result = self.common._check_snapshot_status_healthy(DS_SNAPSHOT_UUID) self.assertEqual(1, _patched_sleep.call_count) self.assertEqual([mock.call(2)], _patched_sleep.call_args_list) self.common._get_snapshot_status.assert_called_with(DS_SNAPSHOT_UUID) self.assertTrue(result) result = self.common._check_snapshot_status_healthy(DS_SNAPSHOT_UUID) self.assertFalse(result) self.assertRaises(common.SynoLUNNotExist, self.common._check_snapshot_status_healthy, DS_SNAPSHOT_UUID) def test__check_storage_response(self): out = { 'success': False } result = self.common._check_storage_response(out) self.assertEqual('Internal error', result[0]) self.assertIsInstance(result[1], (exception.VolumeBackendAPIException)) def test__check_iscsi_response(self): out = { 'success': False, 'error': { } } self.assertRaises(exception.MalformedResponse, self.common._check_iscsi_response, out) out['error'].update(code=18990505) result = self.common._check_iscsi_response(out, uuid=LUN_UUID) self.assertEqual('Bad LUN UUID [18990505]', result[0]) self.assertIsInstance(result[1], (common.SynoLUNNotExist)) out['error'].update(code=18990532) result = self.common._check_iscsi_response(out, snapshot_id=SNAPSHOT_ID) self.assertEqual('No such snapshot [18990532]', result[0]) self.assertIsInstance(result[1], (exception.SnapshotNotFound)) out['error'].update(code=12345678) result = self.common._check_iscsi_response(out, uuid=LUN_UUID) self.assertEqual('Internal error [12345678]', result[0]) self.assertIsInstance(result[1], (exception.VolumeBackendAPIException)) def test__check_ds_pool_status(self): info = copy.deepcopy(POOL_INFO) self.common._get_pool_info = mock.Mock(return_value=info) result = self.common._check_ds_pool_status() self.assertIsNone(result) info['readonly'] = True self.assertRaises(exception.VolumeDriverException, self.common._check_ds_pool_status) del info['readonly'] self.assertRaises(exception.MalformedResponse, self.common._check_ds_pool_status) def test__check_ds_version(self): ver1 = 'DSM 6.1-9999' ver2 = 'DSM UC 1.0-9999 Update 2' ver3 = 'DSM 6.0.1-9999 Update 2' ver4 = 'DSM 6.0-9999 Update 2' ver5 = 'DSM 5.2-9999' out = { 'data': { }, 'success': True } self.common.exec_webapi = mock.Mock(return_value=out) self.assertRaises(exception.MalformedResponse, self.common._check_ds_version) (self.common.exec_webapi. assert_called_with('SYNO.Core.System', 'info', mock.ANY, type='firmware')) out['data'].update(firmware_ver=ver1) result = self.common._check_ds_version() self.assertIsNone(result) out['data'].update(firmware_ver=ver2) result = self.common._check_ds_version() self.assertIsNone(result) out['data'].update(firmware_ver=ver3) self.assertRaises(exception.VolumeDriverException, self.common._check_ds_version) out['data'].update(firmware_ver=ver4) self.assertRaises(exception.VolumeDriverException, self.common._check_ds_version) out['data'].update(firmware_ver=ver5) self.assertRaises(exception.VolumeDriverException, self.common._check_ds_version) self.common.exec_webapi = ( mock.Mock(side_effect= common.SynoAuthError(message='dont care'))) self.assertRaises(common.SynoAuthError, self.common._check_ds_version) def test__check_ds_ability(self): out = { 'data': { 'support_storage_mgr': 'yes', 'support_iscsi_target': 'yes', 'support_vaai': 'yes', 'supportsnapshot': 'yes', }, 'success': True } self.common.exec_webapi = mock.Mock(return_value=out) result = self.common._check_ds_ability() self.assertIsNone(result) (self.common.exec_webapi. assert_called_with('SYNO.Core.System', 'info', mock.ANY, type='define')) out['data'].update(supportsnapshot='no') self.assertRaises(exception.VolumeDriverException, self.common._check_ds_ability) out['data'].update(support_vaai='no') self.assertRaises(exception.VolumeDriverException, self.common._check_ds_ability) out['data'].update(support_iscsi_target='no') self.assertRaises(exception.VolumeDriverException, self.common._check_ds_ability) out['data'].update(support_storage_mgr='no') self.assertRaises(exception.VolumeDriverException, self.common._check_ds_ability) out['data'].update(usbstation='yes') self.assertRaises(exception.VolumeDriverException, self.common._check_ds_ability) del out['data'] self.assertRaises(exception.MalformedResponse, self.common._check_ds_ability) self.common.exec_webapi = ( mock.Mock(side_effect= common.SynoAuthError(message='dont care'))) self.assertRaises(common.SynoAuthError, self.common._check_ds_ability) @mock.patch.object(common.LOG, 'exception') def test_check_response(self, _logexc): out = { 'success': True } bad_out1 = { 'api_info': { 'api': 'SYNO.Core.ISCSI.LUN', 'method': 'create', 'version': 1 }, 'success': False } bad_out2 = { 'api_info': { 'api': 'SYNO.Core.Storage.Volume', 'method': 'get', 'version': 1 }, 'success': False } bad_out3 = { 'api_info': { 'api': 'SYNO.Core.System', 'method': 'info', 'version': 1 }, 'success': False } self.common._check_iscsi_response = ( mock.Mock(return_value= ('Bad LUN UUID', common.SynoLUNNotExist(message='dont care')))) self.common._check_storage_response = ( mock.Mock(return_value= ('Internal error', exception. VolumeBackendAPIException(message='dont care')))) result = self.common.check_response(out) self.assertEqual(0, _logexc.call_count) self.assertIsNone(result) self.assertRaises(common.SynoLUNNotExist, self.common.check_response, bad_out1) self.assertRaises(exception.VolumeBackendAPIException, self.common.check_response, bad_out2) self.assertRaises(exception.VolumeBackendAPIException, self.common.check_response, bad_out3) def test_exec_webapi(self): api = 'SYNO.Fake.WebAPI' method = 'fake' version = 1 resp = {} bad_resp = { 'http_status': HTTPStatus.INTERNAL_SERVER_ERROR } expected = copy.deepcopy(resp) expected.update(api_info={'api': api, 'method': method, 'version': version}) self.common.synoexec = mock.Mock(side_effect=[resp, bad_resp]) result = self.common.exec_webapi(api, method, version, param1='value1', param2='value2') self.common.synoexec.assert_called_once_with(api, method, version, param1='value1', param2='value2') self.assertDictEqual(expected, result) self.assertRaises(common.SynoAPIHTTPError, self.common.exec_webapi, api, method, version, param1='value1', param2='value2') def test_get_ip(self): result = self.common.get_ip() self.assertEqual(self.conf.target_ip_address, result) def test_get_provider_location(self): self.common.get_ip = ( mock.Mock(return_value=self.conf.target_ip_address)) self.conf.safe_get = ( mock.Mock(return_value=['10.0.0.2', '10.0.0.3'])) expected = ('10.0.0.1:3260;10.0.0.2:3260;10.0.0.3:3260' + ',%(tid)d %(iqn)s 0') % {'tid': TRG_ID, 'iqn': IQN} result = self.common.get_provider_location(IQN, TRG_ID) self.assertEqual(expected, result) def test_is_lun_mapped(self): bad_lun_info = copy.deepcopy(LUN_INFO) del bad_lun_info['is_mapped'] self.common._get_lun_info = ( mock.Mock(side_effect=[ LUN_INFO, common.SynoAuthError(message='dont care'), bad_lun_info])) result = self.common.is_lun_mapped(VOLUME['name']) self.assertEqual(LUN_INFO['is_mapped'], result) self.assertRaises(common.SynoAuthError, self.common.is_lun_mapped, VOLUME['name']) self.assertRaises(exception.MalformedResponse, self.common.is_lun_mapped, VOLUME['name']) self.assertRaises(exception.InvalidParameterValue, self.common.is_lun_mapped, '') def test_check_for_setup_error(self): self.common._check_ds_pool_status = mock.Mock() self.common._check_ds_version = mock.Mock() self.common._check_ds_ability = mock.Mock() result = self.common.check_for_setup_error() self.common._check_ds_pool_status.assert_called_once_with() self.common._check_ds_version.assert_called_once_with() self.common._check_ds_ability.assert_called_once_with() self.assertIsNone(result) def test_update_volume_stats(self): self.common._get_pool_size = mock.Mock(return_value=(10, 100, 50)) self.common._get_pool_lun_provisioned_size = ( mock.Mock(return_value=300)) data = { 'volume_backend_name': 'DiskStation', 'vendor_name': 'Synology', 'storage_protocol': 'iscsi', 'consistencygroup_support': False, 'QoS_support': False, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': 0, 'free_capacity_gb': 10, 'total_capacity_gb': 100, 'provisioned_capacity_gb': 350, 'max_over_subscription_ratio': 20, 'target_ip_address': '10.0.0.1', 'pool_name': 'volume1', 'backend_info': 'Synology:iscsi:72003c93-2db2-4f00-a169-67c5eae86bb1' } result = self.common.update_volume_stats() self.assertDictEqual(data, result) def test_create_volume(self): out = { 'success': True } self.common.exec_webapi = ( mock.Mock(side_effect=[ out, out, common.SynoAuthError(message='dont care')])) self.common._check_lun_status_normal = ( mock.Mock(side_effect=[True, False, True])) result = self.common.create_volume(VOLUME) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'create', mock.ANY, name=VOLUME['name'], type=self.common.CINDER_LUN, location='/' + self.conf.synology_pool_name, size=VOLUME['size'] * units.Gi)) self.assertIsNone(result) self.assertRaises(exception.VolumeDriverException, self.common.create_volume, VOLUME) self.assertRaises(common.SynoAuthError, self.common.create_volume, VOLUME) def test_delete_volume(self): out = { 'success': True } self.common._get_lun_uuid = mock.Mock(return_value=LUN_UUID) self.common.exec_webapi = ( mock.Mock(side_effect=[ out, common.SynoLUNNotExist(message='dont care'), common.SynoAuthError(message='dont care')])) result = self.common.delete_volume(VOLUME) self.common._get_lun_uuid.assert_called_with(VOLUME['name']) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'delete', mock.ANY, uuid=LUN_UUID)) self.assertIsNone(result) result = self.common.delete_volume(VOLUME) self.assertIsNone(result) self.assertRaises(common.SynoAuthError, self.common.delete_volume, VOLUME) def test_create_cloned_volume(self): out = { 'success': True } new_volume = copy.deepcopy(NEW_VOLUME) new_volume['size'] = 20 self.common.exec_webapi = mock.Mock(return_value=out) self.common._get_lun_uuid = ( mock.Mock(side_effect=[ LUN_UUID, LUN_UUID, LUN_UUID, exception.InvalidParameterValue('dont care')])) self.common.extend_volume = mock.Mock() self.common._check_lun_status_normal = ( mock.Mock(side_effect=[True, True, False, False])) result = self.common.create_cloned_volume(new_volume, VOLUME) self.common._get_lun_uuid.assert_called_with(VOLUME['name']) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'clone', mock.ANY, src_lun_uuid=LUN_UUID, dst_lun_name=new_volume['name'], is_same_pool=True, clone_type='CINDER')) (self.common._check_lun_status_normal. assert_called_with(new_volume['name'])) self.common.extend_volume.assert_called_once_with(new_volume, new_volume['size']) self.assertIsNone(result) new_volume['size'] = 10 result = self.common.create_cloned_volume(new_volume, VOLUME) self.assertIsNone(result) self.assertRaises(exception.VolumeDriverException, self.common.create_cloned_volume, new_volume, VOLUME) self.assertRaises(exception.InvalidParameterValue, self.common.create_cloned_volume, new_volume, VOLUME) def test_extend_volume(self): new_size = 20 out = { 'success': True } self.common.exec_webapi = mock.Mock(return_value=out) self.common._get_lun_uuid = ( mock.Mock(side_effect=[ LUN_UUID, exception.InvalidParameterValue('dont care')])) result = self.common.extend_volume(VOLUME, new_size) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'set', mock.ANY, uuid=LUN_UUID, new_size=new_size * units.Gi)) self.assertIsNone(result) self.assertRaises(exception.ExtendVolumeError, self.common.extend_volume, VOLUME, new_size) def test_update_migrated_volume(self): expected = { '_name_id': None } self.common._modify_lun_name = mock.Mock(side_effect=[None, Exception]) result = self.common.update_migrated_volume(VOLUME, NEW_VOLUME) self.common._modify_lun_name.assert_called_with(NEW_VOLUME['name'], VOLUME['name']) self.assertDictEqual(expected, result) self.assertRaises(exception.VolumeMigrationFailed, self.common.update_migrated_volume, VOLUME, NEW_VOLUME) def test_create_snapshot(self): expected_result = { 'metadata': { self.common.METADATA_DS_SNAPSHOT_UUID: DS_SNAPSHOT_UUID } } expected_result['metadata'].update(SNAPSHOT['metadata']) out = { 'data': { 'snapshot_uuid': DS_SNAPSHOT_UUID, 'snapshot_id': SNAPSHOT_ID }, 'success': True } self.common.exec_webapi = mock.Mock(return_value=out) self.common._check_snapshot_status_healthy = ( mock.Mock(side_effect=[True, False])) result = self.common.create_snapshot(SNAPSHOT) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'take_snapshot', mock.ANY, src_lun_uuid=SNAPSHOT['volume']['name'], is_app_consistent=False, is_locked=False, taken_by='Cinder', description='(Cinder) ' + SNAPSHOT['id'])) self.assertDictEqual(expected_result, result) self.assertRaises(exception.VolumeDriverException, self.common.create_snapshot, SNAPSHOT) def test_create_snapshot_error(self): out = { 'data': { 'snapshot_uuid': 1, 'snapshot_id': SNAPSHOT_ID }, 'success': True } self.common.exec_webapi = mock.Mock(return_value=out) self.assertRaises(exception.MalformedResponse, self.common.create_snapshot, SNAPSHOT) self.common.exec_webapi = ( mock.Mock(side_effect=common.SynoAuthError(reason='dont care'))) self.assertRaises(common.SynoAuthError, self.common.create_snapshot, SNAPSHOT) def test_delete_snapshot(self): out = { 'success': True } self.common.exec_webapi = mock.Mock(return_value=out) self.common._get_metadata_value = ( mock.Mock(side_effect=[ DS_SNAPSHOT_UUID, exception.SnapshotMetadataNotFound(message='dont care'), exception.MetadataAbsent])) result = self.common.delete_snapshot(SNAPSHOT) (self.common._get_metadata_value. assert_called_with(SNAPSHOT, self.common.METADATA_DS_SNAPSHOT_UUID)) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'delete_snapshot', mock.ANY, snapshot_uuid=DS_SNAPSHOT_UUID, deleted_by='Cinder')) self.assertIsNone(result) result = self.common.delete_snapshot(SNAPSHOT) self.assertIsNone(result) self.assertRaises(exception.MetadataAbsent, self.common.delete_snapshot, SNAPSHOT) def test_create_volume_from_snapshot(self): out = { 'success': True } new_volume = copy.deepcopy(NEW_VOLUME) new_volume['size'] = 20 self.common.exec_webapi = mock.Mock(return_value=out) self.common._get_metadata_value = ( mock.Mock(side_effect=[ DS_SNAPSHOT_UUID, DS_SNAPSHOT_UUID, exception.SnapshotMetadataNotFound(message='dont care'), common.SynoAuthError(message='dont care')])) self.common._check_lun_status_normal = ( mock.Mock(side_effect=[True, False, True, True])) self.common.extend_volume = mock.Mock() result = self.common.create_volume_from_snapshot(new_volume, SNAPSHOT) (self.common._get_metadata_value. assert_called_with(SNAPSHOT, self.common.METADATA_DS_SNAPSHOT_UUID)) (self.common.exec_webapi. assert_called_with('SYNO.Core.ISCSI.LUN', 'clone_snapshot', mock.ANY, src_lun_uuid=SNAPSHOT['volume']['name'], snapshot_uuid=DS_SNAPSHOT_UUID, cloned_lun_name=new_volume['name'], clone_type='CINDER')) self.common.extend_volume.assert_called_once_with(new_volume, new_volume['size']) self.assertIsNone(result) self.assertRaises(exception.VolumeDriverException, self.common.create_volume_from_snapshot, new_volume, SNAPSHOT) self.assertRaises(exception.SnapshotMetadataNotFound, self.common.create_volume_from_snapshot, new_volume, SNAPSHOT) self.assertRaises(common.SynoAuthError, self.common.create_volume_from_snapshot, new_volume, SNAPSHOT) def test_get_iqn_and_trgid(self): location = '%s:3260,%d %s 1' % (IP, 1, IQN) result = self.common.get_iqn_and_trgid(location) self.assertEqual((IQN, 1), result) location = '' self.assertRaises(exception.InvalidParameterValue, self.common.get_iqn_and_trgid, location) location = 'BADINPUT' self.assertRaises(exception.InvalidInput, self.common.get_iqn_and_trgid, location) location = '%s:3260 %s 1' % (IP, IQN) self.assertRaises(exception.InvalidInput, self.common.get_iqn_and_trgid, location) def test_get_iscsi_properties(self): volume = copy.deepcopy(VOLUME) iscsi_properties = { 'target_discovered': False, 'target_iqn': IQN, 'target_portal': '%s:3260' % IP, 'volume_id': VOLUME['id'], 'access_mode': 'rw', 'discard': False, 'auth_method': 'CHAP', 'auth_username': CHAP_AUTH_USERNAME, 'auth_password': CHAP_AUTH_PASSWORD } self.common.get_ip = mock.Mock(return_value=IP) self.conf.safe_get = mock.Mock(return_value=[]) result = self.common.get_iscsi_properties(volume) self.assertDictEqual(iscsi_properties, result) volume['provider_location'] = '' self.assertRaises(exception.InvalidParameterValue, self.common.get_iscsi_properties, volume) def test_get_iscsi_properties_multipath(self): volume = copy.deepcopy(VOLUME) iscsi_properties = { 'target_discovered': False, 'target_iqn': IQN, 'target_iqns': [IQN] * 3, 'target_lun': 0, 'target_luns': [0] * 3, 'target_portal': '%s:3260' % IP, 'target_portals': ['%s:3260' % IP, '10.0.0.2:3260', '10.0.0.3:3260'], 'volume_id': VOLUME['id'], 'access_mode': 'rw', 'discard': False, 'auth_method': 'CHAP', 'auth_username': CHAP_AUTH_USERNAME, 'auth_password': CHAP_AUTH_PASSWORD } self.common.get_ip = mock.Mock(return_value=IP) self.conf.safe_get = mock.Mock(return_value=['10.0.0.2', '10.0.0.3']) result = self.common.get_iscsi_properties(volume) self.assertDictEqual(iscsi_properties, result) volume['provider_location'] = '' self.assertRaises(exception.InvalidParameterValue, self.common.get_iscsi_properties, volume) def test_get_iscsi_properties_without_chap(self): volume = copy.deepcopy(VOLUME) iscsi_properties = { 'target_discovered': False, 'target_iqn': IQN, 'target_portal': '%s:3260' % IP, 'volume_id': VOLUME['id'], 'access_mode': 'rw', 'discard': False } self.common.get_ip = mock.Mock(return_value=IP) self.conf.safe_get = mock.Mock(return_value=[]) volume['provider_auth'] = 'abcde' result = self.common.get_iscsi_properties(volume) self.assertDictEqual(iscsi_properties, result) volume['provider_auth'] = '' result = self.common.get_iscsi_properties(volume) self.assertDictEqual(iscsi_properties, result) del volume['provider_auth'] result = self.common.get_iscsi_properties(volume) self.assertDictEqual(iscsi_properties, result) def test_create_iscsi_export(self): self.common._target_create = ( mock.Mock(return_value=(IQN, TRG_ID, VOLUME['provider_auth']))) self.common._lun_map_target = mock.Mock() iqn, trg_id, provider_auth = ( self.common.create_iscsi_export(VOLUME['name'], VOLUME['id'])) self.common._target_create.assert_called_with(VOLUME['id']) self.common._lun_map_target.assert_called_with(VOLUME['name'], trg_id) self.assertEqual((IQN, TRG_ID, VOLUME['provider_auth']), (iqn, trg_id, provider_auth)) def test_remove_iscsi_export(self): trg_id = TRG_ID self.common._lun_unmap_target = mock.Mock() self.common._target_delete = mock.Mock() result = self.common.remove_iscsi_export(VOLUME['name'], trg_id) self.assertIsNone(result) self.common._lun_unmap_target.assert_called_with(VOLUME['name'], TRG_ID) self.common._target_delete.assert_called_with(TRG_ID) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/synology/test_synology_iscsi.py0000664000175000017500000003155100000000000030017 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Synology Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the Synology iSCSI volume driver.""" from unittest import mock from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.synology import synology_common as common from cinder.volume.drivers.synology import synology_iscsi VOLUME_ID = fake.VOLUME_ID TARGET_NAME_PREFIX = 'Cinder-Target-' IP = '10.10.10.10' IQN = 'iqn.2000-01.com.synology:' + TARGET_NAME_PREFIX + VOLUME_ID TRG_ID = 1 VOLUME = { 'name': fake.VOLUME_NAME, 'id': VOLUME_ID, 'display_name': 'fake_volume', 'size': 10, 'provider_location': '%s:3260,%d %s 1' % (IP, TRG_ID, IQN), } NEW_VOLUME_ID = fake.VOLUME2_ID IQN2 = 'iqn.2000-01.com.synology:' + TARGET_NAME_PREFIX + NEW_VOLUME_ID NEW_TRG_ID = 2 NEW_VOLUME = { 'name': fake.VOLUME2_NAME, 'id': NEW_VOLUME_ID, 'display_name': 'new_fake_volume', 'size': 10, 'provider_location': '%s:3260,%d %s 1' % (IP, NEW_TRG_ID, IQN2), } SNAPSHOT_ID = fake.SNAPSHOT_ID SNAPSHOT = { 'name': fake.SNAPSHOT_NAME, 'id': SNAPSHOT_ID, 'volume_id': VOLUME_ID, 'volume_name': VOLUME['name'], 'volume_size': 10, 'display_name': 'fake_snapshot', } DS_SNAPSHOT_UUID = 'ca86a56a-40d8-4210-974c-ef15dbf01cba' SNAPSHOT_METADATA = { 'metadata': { 'ds_snapshot_UUID': DS_SNAPSHOT_UUID } } INITIATOR_IQN = 'iqn.1993-08.org.debian:01:604af6a341' CONNECTOR = { 'initiator': INITIATOR_IQN, } CONTEXT = { } LOCAL_PATH = '/dev/isda' IMAGE_SERVICE = 'image_service' IMAGE_ID = 1 IMAGE_META = { 'id': IMAGE_ID } NODE_UUID = '72003c93-2db2-4f00-a169-67c5eae86bb1' HOST = { } class SynoISCSIDriverTestCase(test.TestCase): @mock.patch.object(common.SynoCommon, '_get_node_uuid', return_value=NODE_UUID) @mock.patch.object(common, 'APIRequest') def setUp(self, _request, _get_node_uuid): super(SynoISCSIDriverTestCase, self).setUp() self.conf = self.setup_configuration() self.driver = synology_iscsi.SynoISCSIDriver(configuration=self.conf) self.driver.common = common.SynoCommon(self.conf, 'iscsi') def setup_configuration(self): config = mock.Mock(spec=conf.Configuration) config.use_chap_auth = False config.target_protocol = 'iscsi' config.target_ip_address = IP config.synology_admin_port = 5000 config.synology_username = 'admin' config.synology_password = 'admin' config.synology_ssl_verify = True config.synology_one_time_pass = '123456' config.volume_dd_blocksize = 1 return config def test_check_for_setup_error(self): self.driver.common.check_for_setup_error = mock.Mock() result = self.driver.check_for_setup_error() self.driver.common.check_for_setup_error.assert_called_with() self.assertIsNone(result) def test_create_volume(self): self.driver.common.create_volume = mock.Mock() result = self.driver.create_volume(VOLUME) self.driver.common.create_volume.assert_called_with(VOLUME) self.assertIsNone(result) def test_delete_volume(self): self.driver.common.delete_volume = mock.Mock() result = self.driver.delete_volume(VOLUME) self.driver.common.delete_volume.assert_called_with(VOLUME) self.assertIsNone(result) def test_create_cloned_volume(self): self.driver.common.create_cloned_volume = mock.Mock() result = self.driver.create_cloned_volume(VOLUME, NEW_VOLUME) self.driver.common.create_cloned_volume.assert_called_with( VOLUME, NEW_VOLUME) self.assertIsNone(result) def test_extend_volume(self): new_size = 20 self.driver.common.extend_volume = mock.Mock() result = self.driver.extend_volume(VOLUME, new_size) self.driver.common.extend_volume.assert_called_with( VOLUME, new_size) self.assertIsNone(result) def test_extend_volume_wrong_size(self): wrong_new_size = 1 self.driver.common.extend_volume = mock.Mock() result = self.driver.extend_volume(VOLUME, wrong_new_size) self.driver.common.extend_volume.assert_not_called() self.assertIsNone(result) def test_create_volume_from_snapshot(self): self.driver.common.create_volume_from_snapshot = mock.Mock() result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT) (self.driver.common. create_volume_from_snapshot.assert_called_with(VOLUME, SNAPSHOT)) self.assertIsNone(result) def test_update_migrated_volume(self): fake_ret = {'_name_id': VOLUME['id']} status = '' self.driver.common.update_migrated_volume = ( mock.Mock(return_value=fake_ret)) result = self.driver.update_migrated_volume(CONTEXT, VOLUME, NEW_VOLUME, status) (self.driver.common.update_migrated_volume. assert_called_with(VOLUME, NEW_VOLUME)) self.assertEqual(fake_ret, result) def test_create_snapshot(self): self.driver.common.create_snapshot = ( mock.Mock(return_value=SNAPSHOT_METADATA)) result = self.driver.create_snapshot(SNAPSHOT) self.driver.common.create_snapshot.assert_called_with(SNAPSHOT) self.assertDictEqual(SNAPSHOT_METADATA, result) def test_delete_snapshot(self): self.driver.common.delete_snapshot = mock.Mock() result = self.driver.delete_snapshot(SNAPSHOT) self.driver.common.delete_snapshot.assert_called_with(SNAPSHOT) self.assertIsNone(result) def test_get_volume_stats(self): self.driver.common.update_volume_stats = mock.MagicMock() result = self.driver.get_volume_stats(True) self.driver.common.update_volume_stats.assert_called_with() self.assertEqual(self.driver.stats, result) result = self.driver.get_volume_stats(False) self.driver.common.update_volume_stats.assert_called_with() self.assertEqual(self.driver.stats, result) def test_get_volume_stats_error(self): self.driver.common.update_volume_stats = ( mock.MagicMock(side_effect=exception.VolumeDriverException( message='dont care'))) self.assertRaises(exception.VolumeDriverException, self.driver.get_volume_stats, True) def test_create_export(self): provider_auth = 'CHAP username password' provider_location = '%s:3260,%d %s 1' % (IP, TRG_ID, IQN) self.driver.common.is_lun_mapped = mock.Mock(return_value=False) self.driver.common.create_iscsi_export = ( mock.Mock(return_value=(IQN, TRG_ID, provider_auth))) self.driver.common.get_provider_location = ( mock.Mock(return_value=provider_location)) result = self.driver.create_export(CONTEXT, VOLUME, CONNECTOR) self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name']) (self.driver.common.create_iscsi_export. assert_called_with(VOLUME['name'], VOLUME['id'])) self.driver.common.get_provider_location.assert_called_with(IQN, TRG_ID) self.assertEqual(provider_location, result['provider_location']) self.assertEqual(provider_auth, result['provider_auth']) def test_create_export_is_mapped(self): self.driver.common.is_lun_mapped = mock.Mock(return_value=True) self.driver.common.create_iscsi_export = mock.Mock() self.driver.common.get_provider_location = mock.Mock() result = self.driver.create_export(CONTEXT, VOLUME, CONNECTOR) self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name']) self.driver.common.create_iscsi_export.assert_not_called() self.driver.common.get_provider_location.assert_not_called() self.assertEqual({}, result) def test_create_export_error(self): provider_location = '%s:3260,%d %s 1' % (IP, TRG_ID, IQN) self.driver.common.is_lun_mapped = mock.Mock(return_value=False) self.driver.common.create_iscsi_export = ( mock.Mock(side_effect=exception.InvalidInput(reason='dont care'))) self.driver.common.get_provider_location = ( mock.Mock(return_value=provider_location)) self.assertRaises(exception.ExportFailure, self.driver.create_export, CONTEXT, VOLUME, CONNECTOR) self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name']) self.driver.common.get_provider_location.assert_not_called() def test_remove_export(self): self.driver.common.is_lun_mapped = mock.Mock(return_value=True) self.driver.common.remove_iscsi_export = mock.Mock() self.driver.common.get_iqn_and_trgid = ( mock.Mock(return_value=('', TRG_ID))) _, trg_id = (self.driver.common. get_iqn_and_trgid(VOLUME['provider_location'])) result = self.driver.remove_export(CONTEXT, VOLUME) self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name']) (self.driver.common.get_iqn_and_trgid. assert_called_with(VOLUME['provider_location'])) (self.driver.common.remove_iscsi_export. assert_called_with(VOLUME['name'], trg_id)) self.assertIsNone(result) def test_remove_export_not_mapped(self): self.driver.common.is_lun_mapped = mock.Mock(return_value=False) self.driver.common.remove_iscsi_export = mock.Mock() self.driver.common.get_iqn_and_trgid = mock.Mock() result = self.driver.remove_export(CONTEXT, VOLUME) self.driver.common.is_lun_mapped.assert_called_with(VOLUME['name']) self.driver.common.get_iqn_and_trgid.assert_not_called() self.driver.common.remove_iscsi_export.assert_not_called() self.assertIsNone(result) def test_remove_export_error(self): self.driver.common.is_lun_mapped = mock.Mock(return_value=True) self.driver.common.remove_iscsi_export = ( mock.Mock(side_effect= exception.RemoveExportException( volume=VOLUME, reason='dont care'))) self.assertRaises(exception.RemoveExportException, self.driver.remove_export, CONTEXT, VOLUME) def test_remove_export_error_get_lun_mapped(self): self.driver.common.remove_iscsi_export = mock.Mock() self.driver.common.get_iqn_and_trgid = mock.Mock() self.driver.common.is_lun_mapped = ( mock.Mock(side_effect=common.SynoLUNNotExist( message='dont care'))) result = self.driver.remove_export(CONTEXT, VOLUME) self.assertIsNone(result) self.driver.common.get_iqn_and_trgid.assert_not_called() self.driver.common.remove_iscsi_export.assert_not_called() def test_initialize_connection(self): iscsi_properties = { 'target_discovered': False, 'target_iqn': IQN, 'target_portal': '%s:3260' % self.conf.target_ip_address, 'volume_id': VOLUME['id'], 'access_mode': 'rw', 'discard': False } self.driver.common.get_iscsi_properties = ( mock.Mock(return_value=iscsi_properties)) self.conf.safe_get = mock.Mock(return_value='iscsi') result = self.driver.initialize_connection(VOLUME, CONNECTOR) self.driver.common.get_iscsi_properties.assert_called_with(VOLUME) self.conf.safe_get.assert_called_with('target_protocol') self.assertEqual('iscsi', result['driver_volume_type']) self.assertDictEqual(iscsi_properties, result['data']) def test_initialize_connection_error(self): self.driver.common.get_iscsi_properties = ( mock.Mock(side_effect=exception.InvalidInput(reason='dont care'))) self.assertRaises(exception.InvalidInput, self.driver.initialize_connection, VOLUME, CONNECTOR) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_datera.py0000664000175000017500000004537600000000000024331 0ustar00zuulzuul00000000000000# Copyright 2020 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock import uuid from cinder import context from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume import volume_types sys.modules['dfs_sdk'] = mock.MagicMock() from cinder.volume.drivers.datera import datera_iscsi as datera # noqa datera.datc.DEFAULT_SI_SLEEP = 0 datera.datc.DEFAULT_SNAP_SLEEP = 0 OS_PREFIX = datera.datc.OS_PREFIX UNMANAGE_PREFIX = datera.datc.UNMANAGE_PREFIX DateraAPIException = datera.datc.DateraAPIException class DateraVolumeTestCasev22(test.TestCase): def setUp(self): self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.san_ip = '127.0.0.1' self.cfg.datera_api_port = '7717' self.cfg.san_is_local = True self.cfg.datera_num_replicas = 1 self.cfg.datera_503_timeout = 0.01 self.cfg.datera_503_interval = 0.001 self.cfg.datera_debug = False self.cfg.san_login = 'user' self.cfg.san_password = 'pass' self.cfg.datera_tenant_id = '/root/test-tenant' self.cfg.driver_client_cert = None self.cfg.driver_client_cert_key = None self.cfg.datera_disable_profiler = False self.cfg.datera_ldap_server = "" self.cfg.datera_volume_type_defaults = {} self.cfg.datera_disable_template_override = False self.cfg.datera_disable_extended_metadata = False self.cfg.datera_enable_image_cache = False self.cfg.datera_image_cache_volume_type_id = "" self.cfg.filter_function = lambda: None self.cfg.goodness_function = lambda: None self.cfg.use_chap_auth = False self.cfg.chap_username = "" self.cfg.chap_password = "" super(DateraVolumeTestCasev22, self).setUp() mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.driver = datera.DateraDriver(execute=mock_exec, configuration=self.cfg) self.driver.api = mock.MagicMock() self.driver.apiv = "2.2" self.driver.set_initialized() # No-op config getter self.driver.configuration.get = lambda *args, **kwargs: {} # self.addCleanup(self.api_patcher.stop) self.driver.datera_version = "3.3.3" def test_volume_create_success(self): testvol = _stub_volume() self.assertIsNone(self.driver.create_volume(testvol)) def test_volume_create_fails(self): testvol = _stub_volume() self.driver.api.app_instances.create.side_effect = DateraAPIException self.assertRaises(DateraAPIException, self.driver.create_volume, testvol) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_with_extra_specs(self, mock_get_type): mock_get_type.return_value = { 'name': u'The Best', 'qos_specs_id': None, 'deleted': False, 'created_at': '2015-08-14 04:18:11', 'updated_at': None, 'extra_specs': { u'volume_backend_name': u'datera', u'qos:max_iops_read': u'2000', u'qos:max_iops_write': u'4000', u'qos:max_iops_total': u'4000' }, 'is_public': True, 'deleted_at': None, 'id': u'dffb4a83-b8fb-4c19-9f8c-713bb75db3b1', 'description': None } mock_volume = _stub_volume( volume_type_id='dffb4a83-b8fb-4c19-9f8c-713bb75db3b1' ) self.assertIsNone(self.driver.create_volume(mock_volume)) self.assertTrue(mock_get_type.called) def test_create_cloned_volume_success(self): testvol = _stub_volume() ref = _stub_volume(id=str(uuid.uuid4())) self.assertIsNone(self.driver.create_cloned_volume(testvol, ref)) def test_create_cloned_volume_success_larger(self): newsize = 2 testvol = _stub_volume(size=newsize) ref = _stub_volume(id=str(uuid.uuid4())) mock_extend = mock.MagicMock() self.driver._extend_volume_2_2 = mock_extend self.driver._extend_volume_2_1 = mock_extend self.driver.create_cloned_volume(testvol, ref) mock_extend.assert_called_once_with(testvol, newsize) def test_create_cloned_volume_fails(self): testvol = _stub_volume() ref = _stub_volume(id=str(uuid.uuid4())) self.driver.api.app_instances.create.side_effect = DateraAPIException self.assertRaises(DateraAPIException, self.driver.create_cloned_volume, testvol, ref) def test_delete_volume_success(self): testvol = _stub_volume() self.driver.api.app_instances.delete.return_value = {} self.assertIsNone(self.driver.delete_volume(testvol)) def test_delete_volume_not_found(self): testvol = _stub_volume() self.driver.api.app_instances.list.side_effect = exception.NotFound self.assertIsNone(self.driver.delete_volume(testvol)) def test_delete_volume_fails(self): testvol = _stub_volume() self.driver.api.app_instances.list.side_effect = DateraAPIException self.assertRaises(DateraAPIException, self.driver.delete_volume, testvol) def test_ensure_export_success(self): testvol = _stub_volume() ctxt = context.get_admin_context() self.assertIsNone(self.driver.ensure_export(ctxt, testvol, None)) def test_ensure_export_fails(self): # This can't fail because it's a no-op testvol = _stub_volume() ctxt = context.get_admin_context() self.assertIsNone(self.driver.ensure_export(ctxt, testvol, None)) def test_create_export_target_does_not_exist_success(self): testvol = _stub_volume() aimock = mock.MagicMock() simock = mock.MagicMock() simock.reload.return_value = simock aimock.storage_instances.list.return_value = [simock] simock.op_state = "available" self.driver.cvol_to_ai = mock.Mock() self.driver.cvol_to_ai.return_value = aimock self.assertIsNone(self.driver.create_export(None, testvol, None)) def test_create_export_fails(self): testvol = _stub_volume() aimock = mock.MagicMock() simock = mock.MagicMock() simock.reload.return_value = simock aimock.storage_instances.list.side_effect = DateraAPIException simock.op_state = "available" self.driver.cvol_to_ai = mock.Mock() self.driver.cvol_to_ai.return_value = aimock self.assertRaises(DateraAPIException, self.driver.create_export, None, testvol, None) def test_initialize_connection_success(self): testvol = _stub_volume() aimock = mock.MagicMock() simock = mock.MagicMock() simock.access = {"ips": ["test-ip"], "iqn": "test-iqn"} simock.reload.return_value = simock aimock.storage_instances.list.return_value = [simock] self.driver.cvol_to_ai = mock.Mock() self.driver.cvol_to_ai.return_value = aimock self.assertEqual(self.driver.initialize_connection(testvol, {}), {'data': {'discard': False, 'target_discovered': False, 'target_iqn': 'test-iqn', 'target_lun': 0, 'target_portal': 'test-ip:3260', 'volume_id': testvol['id']}, 'driver_volume_type': 'iscsi'}) def test_initialize_connection_fails(self): testvol = _stub_volume() aimock = mock.MagicMock() simock = mock.MagicMock() simock.access = {"ips": ["test-ip"], "iqn": "test-iqn"} simock.reload.return_value = simock aimock.storage_instances.list.side_effect = DateraAPIException self.driver.cvol_to_ai = mock.Mock() self.driver.cvol_to_ai.return_value = aimock self.assertRaises(DateraAPIException, self.driver.initialize_connection, testvol, {}) def test_detach_volume_success(self): testvol = _stub_volume() self.driver.cvol_to_ai = mock.MagicMock() aimock = mock.MagicMock() aimock.set.return_value = {} self.driver.cvol_to_ai.return_value = aimock ctxt = context.get_admin_context() self.assertIsNone(self.driver.detach_volume(ctxt, testvol)) def test_detach_volume_fails(self): testvol = _stub_volume() self.driver.cvol_to_ai = mock.MagicMock() aimock = mock.MagicMock() aimock.set.side_effect = DateraAPIException self.driver.cvol_to_ai.return_value = aimock ctxt = context.get_admin_context() self.assertRaises(DateraAPIException, self.driver.detach_volume, ctxt, testvol) def test_detach_volume_not_found(self): testvol = _stub_volume() self.driver.cvol_to_ai = mock.MagicMock() aimock = mock.MagicMock() aimock.set.side_effect = exception.NotFound self.driver.cvol_to_ai.return_value = aimock ctxt = context.get_admin_context() self.assertIsNone(self.driver.detach_volume(ctxt, testvol)) def test_create_snapshot_success(self): testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) volmock = mock.MagicMock() snapmock = mock.MagicMock() snapmock.reload.return_value = snapmock snapmock.uuid = testsnap['id'] snapmock.op_state = "available" volmock.snapshots.create.return_value = snapmock self.driver.cvol_to_dvol = mock.MagicMock() self.driver.cvol_to_dvol.return_value = volmock self.assertIsNone(self.driver.create_snapshot(testsnap)) def test_create_snapshot_fails(self): testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) self.driver.api.app_instances.list.side_effect = DateraAPIException self.assertRaises(DateraAPIException, self.driver.create_snapshot, testsnap) def test_delete_snapshot_success(self): testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) self.assertIsNone(self.driver.delete_snapshot(testsnap)) def test_delete_snapshot_not_found(self): testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) self.driver.cvol_to_dvol = mock.MagicMock() aimock = mock.MagicMock() aimock.snapshots.list.side_effect = exception.NotFound self.driver.cvol_to_dvol.return_value = aimock self.assertIsNone(self.driver.delete_snapshot(testsnap)) def test_delete_snapshot_fails(self): testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) self.driver.cvol_to_dvol = mock.MagicMock() aimock = mock.MagicMock() aimock.snapshots.list.side_effect = DateraAPIException self.driver.cvol_to_dvol.return_value = aimock self.assertRaises(DateraAPIException, self.driver.delete_snapshot, testsnap) def test_create_volume_from_snapshot_success(self): testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) testvol = _stub_volume() volmock = mock.MagicMock() snapmock = mock.MagicMock() snapmock.reload.return_value = snapmock snapmock.uuid = testsnap['id'] snapmock.op_state = "available" self.driver.cvol_to_dvol = mock.MagicMock() self.driver.cvol_to_dvol.return_value = volmock volmock.snapshots.list.return_value = [snapmock] self.assertIsNone(self.driver.create_volume_from_snapshot( testvol, testsnap)) def test_create_volume_from_snapshot_fails(self): testsnap = _stub_snapshot(volume_id=str(uuid.uuid4())) testvol = _stub_volume() self.driver.cvol_to_dvol = mock.MagicMock() aimock = mock.MagicMock() aimock.snapshots.list.side_effect = DateraAPIException self.driver.cvol_to_dvol.return_value = aimock self.assertRaises(DateraAPIException, self.driver.create_volume_from_snapshot, testvol, testsnap) def test_extend_volume_success(self): newsize = 2 testvol = _stub_volume() mockvol = mock.MagicMock() mockvol.size = newsize self.driver.cvol_to_dvol = mock.MagicMock() self.driver.cvol_to_dvol.return_value = mockvol self.driver._offline_flip_2_2 = mock.MagicMock() self.driver._offline_flip_2_1 = mock.MagicMock() self.assertIsNone(self.driver.extend_volume(testvol, newsize)) def test_extend_volume_fails(self): newsize = 2 testvol = _stub_volume() mockvol = mock.MagicMock() mockvol.size = newsize mockvol.set.side_effect = DateraAPIException self.driver.cvol_to_dvol = mock.MagicMock() self.driver.cvol_to_dvol.return_value = mockvol self.driver._offline_flip_2_2 = mock.MagicMock() self.driver._offline_flip_2_1 = mock.MagicMock() self.assertRaises(DateraAPIException, self.driver.extend_volume, testvol, newsize) def test_manage_existing(self): existing_ref = {'source-name': "A:B:C:D"} testvol = _stub_volume() self.driver.cvol_to_ai = mock.MagicMock() self.assertIsNone(self.driver.manage_existing(testvol, existing_ref)) def test_manage_existing_wrong_ref(self): existing_ref = {'source-name': "ABCD"} testvol = _stub_volume() self.driver.cvol_to_ai = mock.MagicMock() self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, testvol, existing_ref) def test_manage_existing_get_size(self): existing_ref = {'source-name': "A:B:C:D"} testvol = _stub_volume() volmock = mock.MagicMock() volmock.size = testvol['size'] self.driver.cvol_to_dvol = mock.MagicMock() self.driver.cvol_to_dvol.return_value = volmock self.assertEqual(self.driver.manage_existing_get_size( testvol, existing_ref), testvol['size']) def test_manage_existing_get_size_wrong_ref(self): existing_ref = {'source-name': "ABCD"} testvol = _stub_volume() self.driver.cvol_to_ai = mock.MagicMock() self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, testvol, existing_ref) def test_get_manageable_volumes(self): testvol = _stub_volume() v1 = {'reference': {'source-name': 'some-ai:storage-1:volume-1'}, 'size': 1, 'safe_to_manage': True, 'reason_not_safe': '', 'cinder_id': None, 'extra_info': {'snapshots': '[]'}} v2 = {'reference': {'source-name': 'some-other-ai:storage-1:volume-1'}, 'size': 2, 'safe_to_manage': True, 'reason_not_safe': '', 'cinder_id': None, 'extra_info': {'snapshots': '[]'}} mock1 = mock.MagicMock() mock1.__getitem__.side_effect = ['some-ai'] mock1.name = 'some-ai' mocksi1 = mock.MagicMock() mocksi1.name = "storage-1" mocksi1.__getitem__.side_effect = [[mock.MagicMock()]] mock1.storage_instances.list.return_value = [mocksi1] mockvol1 = mock.MagicMock() mockvol1.name = "volume-1" mockvol1.size = v1['size'] mocksi1.volumes.list.return_value = [mockvol1] mock2 = mock.MagicMock() mock2.__getitem__.side_effect = ['some-other-ai'] mock2.name = 'some-other-ai' mocksi2 = mock.MagicMock() mocksi2.name = "storage-1" mocksi2.__getitem__.side_effect = [[mock.MagicMock()]] mock2.storage_instances.list.return_value = [mocksi2] mockvol2 = mock.MagicMock() mockvol2.name = "volume-1" mockvol2.size = v2['size'] mocksi2.volumes.list.return_value = [mockvol2] listmock = mock.MagicMock() listmock.return_value = [mock1, mock2] self.driver.api.app_instances.list = listmock marker = mock.MagicMock() limit = mock.MagicMock() offset = mock.MagicMock() sort_keys = mock.MagicMock() sort_dirs = mock.MagicMock() with mock.patch( 'cinder.volume.volume_utils.paginate_entries_list') \ as mpage: self.driver.get_manageable_volumes( [testvol], marker, limit, offset, sort_keys, sort_dirs) mpage.assert_called_once_with( [v1, v2], marker, limit, offset, sort_keys, sort_dirs) def test_unmanage(self): testvol = _stub_volume() self.assertIsNone(self.driver.unmanage(testvol)) class DateraVolumeTestCasev21(DateraVolumeTestCasev22): def setUp(self): super(DateraVolumeTestCasev21, self).setUp() self.driver.api = mock.MagicMock() self.driver.apiv = '2.1' def _stub_volume(*args, **kwargs): uuid = 'c20aba21-6ef6-446b-b374-45733b4883ba' name = 'volume-00000001' size = 1 volume = {} volume['id'] = kwargs.get('id', uuid) volume['project_id'] = "test-project" volume['display_name'] = kwargs.get('display_name', name) volume['size'] = kwargs.get('size', size) volume['provider_location'] = kwargs.get('provider_location', None) volume['volume_type_id'] = kwargs.get('volume_type_id', None) return volume def _stub_snapshot(*args, **kwargs): uuid = '0bb34f0c-fea4-48e0-bf96-591120ac7e3c' name = 'snapshot-00000001' volume_size = 1 snap = {} snap['id'] = kwargs.get('id', uuid) snap['project_id'] = "test-project" snap['display_name'] = kwargs.get('display_name', name) snap['volume_id'] = kwargs.get('volume_id', None) snap['volume_size'] = kwargs.get('volume_size', volume_size) return snap ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_fujitsu_dx.py0000664000175000017500000027130300000000000025244 0ustar00zuulzuul00000000000000# Copyright (c) 2015 FUJITSU LIMITED # Copyright (c) 2012 EMC Corporation, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile from unittest import mock from oslo_utils import units from cinder import context from cinder import exception from cinder import ssh_utils from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.fujitsu.eternus_dx import constants as CONSTANTS with mock.patch.dict('sys.modules', pywbem=mock.Mock()): from cinder.volume.drivers.fujitsu.eternus_dx \ import eternus_dx_cli from cinder.volume.drivers.fujitsu.eternus_dx \ import eternus_dx_common as dx_common from cinder.volume.drivers.fujitsu.eternus_dx \ import eternus_dx_fc as dx_fc from cinder.volume.drivers.fujitsu.eternus_dx \ import eternus_dx_iscsi as dx_iscsi PRIVATE_KEY_PATH = '/etc/cinder/eternus' CONFIG_FILE_NAME = 'cinder_fujitsu_eternus_dx.xml' STORAGE_SYSTEM = '172.16.0.2' CONF = """ 172.16.0.2 5988 testuser testpass 10.0.0.3 abcd1234_TPP abcd1234_RG abcd1234_OSVD abcd1234_TPP """ TEST_VOLUME = { 'id': '3d6eeb5d-109b-4435-b891-d01415178490', 'name': 'volume1', 'display_name': 'volume1', 'provider_location': None, 'metadata': {}, 'size': 1, 'host': 'controller@113#abcd1234_TPP' } TEST_VOLUME2 = { 'id': '98179912-2495-42e9-97f0-6a0d3511700a', 'name': 'volume2', 'display_name': 'volume2', 'provider_location': None, 'metadata': {}, 'size': 1, 'host': 'controller@113#abcd1234_RG' } TEST_SNAP = { 'id': 'f47a8da3-d9e2-46aa-831f-0ef04158d5a1', 'volume_name': 'volume-3d6eeb5d-109b-4435-b891-d01415178490', 'name': 'snap1', 'display_name': 'test_snapshot', 'volume': TEST_VOLUME, 'volume_id': '3d6eeb5d-109b-4435-b891-d01415178490', } TEST_CLONE = { 'name': 'clone1', 'size': 1, 'volume_name': 'vol1', 'id': '391fb914-8a55-4384-a747-588641db3b15', 'project_id': 'project', 'display_name': 'clone1', 'display_description': 'volume created from snapshot', 'metadata': {}, 'host': 'controller@113#abcd1234_TPP' } TEST_VOLUME_QOS = { 'id': '7bd8b81f-137d-4140-85ce-d00281c91c84', 'name': 'qos', 'display_name': 'qos', 'provider_location': None, 'metadata': {}, 'size': 1, 'host': 'controller@113#abcd1234_TPP' } ISCSI_INITIATOR = 'iqn.1993-08.org.debian:01:8261afe17e4c' ISCSI_TARGET_IP = '10.0.0.3' ISCSI_TARGET_IQN = 'iqn.2000-09.com.fujitsu:storage-system.eternus-dxl:0' FC_TARGET_WWN = ['500000E0DA000001', '500000E0DA000002'] TEST_WWPN = ['0123456789111111', '0123456789222222'] TEST_CONNECTOR = {'initiator': ISCSI_INITIATOR, 'wwpns': TEST_WWPN} STORAGE_IP = '172.16.0.2' TEST_USER = 'testuser' TEST_PASSWORD = 'testpass' STOR_CONF_SVC = 'FUJITSU_StorageConfigurationService' CTRL_CONF_SVC = 'FUJITSU_ControllerConfigurationService' REPL_SVC = 'FUJITSU_ReplicationService' STOR_VOL = 'FUJITSU_StorageVolume' SCSI_PROT_CTR = 'FUJITSU_AffinityGroupController' STOR_HWID = 'FUJITSU_StorageHardwareID' STOR_HWID_MNG_SVC = 'FUJITSU_StorageHardwareIDManagementService' STOR_POOL = 'FUJITSU_RAIDStoragePool' STOR_POOLS = ['FUJITSU_ThinProvisioningPool', 'FUJITSU_RAIDStoragePool'] AUTH_PRIV = 'FUJITSU_AuthorizedPrivilege' STOR_SYNC = 'FUJITSU_StorageSynchronized' PROT_CTRL_UNIT = 'CIM_ProtocolControllerForUnit' STORAGE_TYPE = 'abcd1234_TPP' STORAGE_TYPE2 = 'abcd1234_RG' LUNMASKCTRL_IDS = ['AFG0010_CM00CA00P00', 'AFG0011_CM01CA00P00'] MAP_STAT = '0' VOL_STAT = '0' FAKE_CAPACITY = 1170368102400 FAKE_REMAIN = 1168220618752 FAKE_PROVISION = 1024 # Volume1 in pool abcd1234_TPP FAKE_LUN_ID1 = '600000E00D2A0000002A011500140000' FAKE_LUN_NO1 = '0x0014' # Snapshot1 in pool abcd1234_OSVD FAKE_LUN_ID2 = '600000E00D2A0000002A0115001E0000' FAKE_LUN_NO2 = '0x001E' FAKE_SDV_NO = '0x001E' # Volume2 in pool abcd1234_RG FAKE_LUN_ID3 = '600000E00D2800000028075301140000' FAKE_LUN_NO3 = '0x0114' # VolumeQoS in pool abcd1234_TPP FAKE_LUN_ID_QOS = '600000E00D2A0000002A011500140000' FAKE_LUN_NO_QOS = '0x0014' FAKE_SYSTEM_NAME = 'ET603SA4621302115' # abcd1234_TPP pool FAKE_USEGB = 1 # abcd1234_RG pool FAKE_USEGB2 = 2 FAKE_POOLS = [{ 'path': {'InstanceID': 'FUJITSU:TPP0004'}, 'pool_name': 'abcd1234_TPP', 'useable_capacity_gb': int( (FAKE_CAPACITY / units.Mi * 20 - FAKE_PROVISION) / 1024), 'multiattach': True, 'thick_provisioning_support': False, 'provisioned_capacity_gb': FAKE_USEGB, 'thin_provisioning_support': True, 'free_capacity_gb': int(FAKE_CAPACITY / units.Gi - FAKE_USEGB), 'total_capacity_gb': int(FAKE_CAPACITY / units.Gi), 'max_over_subscription_ratio': '20.0', }, { 'path': {'InstanceID': 'FUJITSU:RSP0005'}, 'pool_name': 'abcd1234_RG', 'useable_capacity_gb': int(FAKE_CAPACITY / units.Gi - FAKE_USEGB2), 'multiattach': True, 'thick_provisioning_support': True, 'provisioned_capacity_gb': FAKE_USEGB2, 'total_volumes': 2, 'thin_provisioning_support': False, 'free_capacity_gb': int((FAKE_REMAIN * 1.0 / units.Mi) / 1024), 'total_capacity_gb': int(FAKE_CAPACITY / units.Gi), 'fragment_capacity_mb': FAKE_REMAIN * 1.0 / units.Mi, 'max_over_subscription_ratio': 1, }] FAKE_STATS = { 'driver_version': '1.4.8', 'storage_protocol': 'iSCSI', 'vendor_name': 'FUJITSU', 'QoS_support': True, 'volume_backend_name': 'volume_backend_name', 'shared_targets': True, 'backend_state': 'up', 'pools': FAKE_POOLS, } FAKE_STATS2 = { 'driver_version': '1.4.8', 'storage_protocol': 'FC', 'vendor_name': 'FUJITSU', 'QoS_support': True, 'volume_backend_name': 'volume_backend_name', 'shared_targets': True, 'backend_state': 'up', 'pools': FAKE_POOLS, } # Volume1 in pool abcd1234_TPP FAKE_KEYBIND1 = { 'SystemName': STORAGE_SYSTEM, 'DeviceID': FAKE_LUN_ID1, } # Volume2 in pool abcd1234_RG FAKE_KEYBIND3 = { 'SystemName': STORAGE_SYSTEM, 'DeviceID': FAKE_LUN_ID3, } # Volume QOS in pool abcd1234_TPP FAKE_KEYBIND_QOS = { 'SystemName': STORAGE_SYSTEM, 'DeviceID': FAKE_LUN_ID_QOS, } # Volume1 FAKE_LOCATION1 = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': FAKE_KEYBIND1, 'vol_name': 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==' } # Clone Volume FAKE_CLONE_LOCATION = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': FAKE_KEYBIND1, 'vol_name': 'FJosv_UkCZqMFZW3SU_JzxjHiKfg==' } # Volume2 FAKE_LOCATION3 = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': FAKE_KEYBIND3, 'vol_name': 'FJosv_4whcadwDac7ANKHA2O719A==' } # VolumeQOS FAKE_LOCATION_QOS = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': FAKE_KEYBIND_QOS, 'vol_name': 'FJosv_mIsapeuZOaSXz4LYTqFcug==' } # Volume1 metadata info. # Here is a misspelling, and the right value should be "Thinprovisioning_POOL". # It would not be compatible with the metadata of the legacy volumes, # so this spelling mistake needs to be retained. FAKE_LUN_META1 = { 'FJ_Pool_Type': 'Thinporvisioning_POOL', 'FJ_Volume_No': FAKE_LUN_NO1, 'FJ_Volume_Name': 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==', 'FJ_Pool_Name': STORAGE_TYPE, 'FJ_Backend': FAKE_SYSTEM_NAME, } # Volume2 metadata info. FAKE_LUN_META3 = { 'FJ_Pool_Type': 'RAID_GROUP', 'FJ_Volume_No': FAKE_LUN_NO3, 'FJ_Volume_Name': 'FJosv_4whcadwDac7ANKHA2O719A==', 'FJ_Pool_Name': STORAGE_TYPE2, 'FJ_Backend': FAKE_SYSTEM_NAME, } # VolumeQOS metadata info FAKE_LUN_META_QOS = { 'FJ_Pool_Type': 'Thinporvisioning_POOL', 'FJ_Volume_No': FAKE_LUN_NO_QOS, 'FJ_Volume_Name': 'FJosv_mIsapeuZOaSXz4LYTqFcug==', 'FJ_Pool_Name': STORAGE_TYPE, 'FJ_Backend': FAKE_SYSTEM_NAME, } # Volume1 FAKE_MODEL_INFO1 = { 'provider_location': str(FAKE_LOCATION1), 'metadata': FAKE_LUN_META1, } # Volume2 FAKE_MODEL_INFO3 = { 'provider_location': str(FAKE_LOCATION3), 'metadata': FAKE_LUN_META3, } # VoluemQOS FAKE_MODEL_INFO_QOS = { 'provider_location': str(FAKE_LOCATION_QOS), 'metadata': FAKE_LUN_META_QOS, } FAKE_KEYBIND2 = { 'SystemName': STORAGE_SYSTEM, 'DeviceID': FAKE_LUN_ID2, } FAKE_LOCATION2 = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': FAKE_KEYBIND2, 'vol_name': 'FJosv_OgEZj1mSvKRvIKOExKktlg==' } FAKE_SNAP_META = { 'FJ_Pool_Name': 'abcd1234_OSVD', 'FJ_SDV_Name': u'FJosv_OgEZj1mSvKRvIKOExKktlg==', 'FJ_SDV_No': FAKE_SDV_NO, 'FJ_Pool_Type': 2 } # Snapshot created on controller@113#abcd1234_TPP FAKE_SNAP_META2 = { 'FJ_Pool_Name': 'abcd1234_TPP', 'FJ_SDV_Name': 'FJosv_OgEZj1mSvKRvIKOExKktlg==', 'FJ_SDV_No': FAKE_SDV_NO, 'FJ_Pool_Type': 5 } FAKE_SNAP_INFO = { 'metadata': FAKE_SNAP_META, 'provider_location': str(FAKE_LOCATION2) } # Snapshot created on controller@113#abcd1234_TPP FAKE_SNAP_INFO2 = { 'metadata': FAKE_SNAP_META2, 'provider_location': str(FAKE_LOCATION2) } FAKE_LUN_META2 = { 'FJ_Pool_Type': 'Thinporvisioning_POOL', 'FJ_Volume_No': FAKE_LUN_NO1, 'FJ_Volume_Name': 'FJosv_OgEZj1mSvKRvIKOExKktlg==', 'FJ_Pool_Name': STORAGE_TYPE, 'FJ_Backend': FAKE_SYSTEM_NAME, } FAKE_CLONE_LUN_META = { 'FJ_Pool_Type': 'Thinporvisioning_POOL', 'FJ_Volume_No': FAKE_LUN_NO1, 'FJ_Volume_Name': 'FJosv_UkCZqMFZW3SU_JzxjHiKfg==', 'FJ_Pool_Name': STORAGE_TYPE, 'FJ_Backend': FAKE_SYSTEM_NAME, } FAKE_MODEL_INFO2 = { 'provider_location': str(FAKE_CLONE_LOCATION), 'metadata': FAKE_CLONE_LUN_META, } FAKE_CLI_OUTPUT = { "result": 0, 'rc': str(CONSTANTS.RC_OK), "message": 'TEST_MESSAGE' } # Constants for QOS MAX_IOPS = 4294967295 MAX_THROUGHPUT = 2097151 MIN_IOPS = 1 MIN_THROUGHPUT = 1 class FJ_StorageVolume(dict): pass class FJ_StoragePool(dict): pass class FJ_AffinityGroupController(dict): pass class FakeCIMInstanceName(dict): def fake_create_eternus_instance_name(self, classname, bindings): instancename = FakeCIMInstanceName() for key in bindings: instancename[key] = bindings[key] instancename.classname = classname instancename.namespace = 'root/eternus' return instancename def fake_enumerateinstances(self): instancename_1 = FakeCIMInstanceName() ret = [] instancename_1['ElementName'] = 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==' instancename_1['Purpose'] = '00228+0x06' instancename_1['Name'] = None instancename_1['DeviceID'] = FAKE_LUN_ID1 instancename_1['SystemName'] = STORAGE_SYSTEM ret.append(instancename_1) instancename_1.path = '' instancename_1.classname = 'FUJITSU_StorageVolume' snaps = FakeCIMInstanceName() snaps['ElementName'] = 'FJosv_OgEZj1mSvKRvIKOExKktlg==' snaps['Name'] = None snaps['DeviceID'] = FAKE_LUN_ID2 snaps['SystemName'] = STORAGE_SYSTEM ret.append(snaps) snaps.path = '' snaps.classname = 'FUJITSU_StorageVolume' map = FakeCIMInstanceName() map['ElementName'] = 'FJosv_hhJsV9lcMBvAPADrGqucwg==' map['Name'] = None ret.append(map) map.path = '' return ret class FakeEternusConnection(object): def InvokeMethod(self, MethodName, Service, ElementName=None, InPool=None, ElementType=None, TheElement=None, LUNames=None, Size=None, Type=None, Mode=None, Locality=None, InitiatorPortIDs=None, TargetPortIDs=None, DeviceAccesses=None, SyncType=None, SourceElement=None, TargetElement=None, Operation=None, CopyType=None, Synchronization=None, ProtocolControllers=None, TargetPool=None, WaitForCopyState=None): global MAP_STAT, VOL_STAT if MethodName == 'CreateOrModifyElementFromStoragePool': VOL_STAT = '1' rc = CONSTANTS.RC_OK vol = self._enum_volumes() if InPool.get('InstanceID') == 'FUJITSU:RSP0005': job = {'TheElement': vol[1].path} else: if ElementName == 'FJosv_OgEZj1mSvKRvIKOExKktlg==': job = {'TheElement': vol[3].path} else: job = {'TheElement': vol[0].path} elif MethodName == 'ReturnToStoragePool': VOL_STAT = '0' rc = CONSTANTS.RC_OK job = {} elif MethodName == 'GetReplicationRelationships': rc = CONSTANTS.RC_OK job = {'Synchronizations': []} elif MethodName == 'ExposePaths': MAP_STAT = '1' rc = CONSTANTS.RC_OK job = {} elif MethodName == 'HidePaths': MAP_STAT = '0' rc = CONSTANTS.RC_OK job = {} elif MethodName == 'CreateElementReplica': rc = CONSTANTS.RC_OK snap = self._enum_snapshots() job = {'TargetElement': snap[0].path} elif MethodName == 'CreateReplica': rc = CONSTANTS.RC_OK snap = self._enum_snapshots() job = {'TargetElement': snap[0].path} elif MethodName == 'ModifyReplicaSynchronization': rc = CONSTANTS.RC_OK job = {} else: raise exception.VolumeBackendAPIException(data="invoke method") return (rc, job) def EnumerateInstanceNames(self, name): result = [] if name == 'FUJITSU_StorageVolume': result = self._enum_volumes() elif name == 'FUJITSU_StorageConfigurationService': result = self._enum_confservice() elif name == 'FUJITSU_ReplicationService': result = self._enum_repservice() elif name == 'FUJITSU_ControllerConfigurationService': result = self._enum_ctrlservice() elif name == 'FUJITSU_AffinityGroupController': result = self._enum_afntyservice() elif name == 'FUJITSU_StorageHardwareIDManagementService': result = self._enum_sthwidmngsvc() elif name == 'CIM_ProtocolControllerForUnit': result = self._ref_unitnames() elif name == 'CIM_StoragePool': result = self._enum_pools() elif name == 'FUJITSU_SCSIProtocolEndpoint': result = self._enum_scsiport_endpoint() elif name == 'FUJITSU_IPProtocolEndpoint': result = self._enum_ipproto_endpoint() return result def EnumerateInstances(self, name, **param_dict): result = None if name == 'FUJITSU_StorageProduct': result = self._enum_sysnames() elif name == 'FUJITSU_RAIDStoragePool': result = self._enum_pool_details('RAID') elif name == 'FUJITSU_ThinProvisioningPool': result = self._enum_pool_details('TPP') elif name == 'FUJITSU_SCSIProtocolEndpoint': result = self._enum_scsiport_endpoint() elif name == 'FUJITSU_iSCSIProtocolEndpoint': result = self._enum_iscsiprot_endpoint() elif name == 'FUJITSU_StorageHardwareID': result = self._enum_sthwid() elif name == 'CIM_SCSIProtocolEndpoint': result = self._enum_scsiport_endpoint() elif name == 'FUJITSU_StorageHardwareID': result = None elif name == 'FUJITSU_StorageVolume': instancename_1 = FakeCIMInstanceName() result = instancename_1.fake_enumerateinstances() else: result = None return result def GetInstance(self, objectpath, LocalOnly=False): try: name = objectpath['CreationClassName'] except KeyError: name = objectpath.classname result = None if name == 'FUJITSU_StorageVolume': result = self._getinstance_storagevolume(objectpath) elif name == 'FUJITSU_IPProtocolEndpoint': result = self._getinstance_ipprotocolendpoint(objectpath) elif name == 'CIM_ProtocolControllerForUnit': result = self._getinstance_unit(objectpath) elif name == 'FUJITSU_AffinityGroupController': result = self._getinstance_unit(objectpath) return result def Associators(self, objectpath, AssocClass=None, ResultClass='FUJITSU_StorageHardwareID'): result = None if ResultClass == 'FUJITSU_StorageHardwareID': result = self._assoc_hdwid() elif ResultClass == 'FUJITSU_iSCSIProtocolEndpoint': result = self._assoc_endpoint(objectpath) elif ResultClass == 'FUJITSU_StorageVolume': result = self._assoc_storagevolume(objectpath) elif ResultClass == 'FUJITSU_AuthorizedPrivilege': result = self._assoc_authpriv() elif AssocClass == 'FUJITSU_AllocatedFromStoragePool': result = self._assocnames_pool(objectpath) else: result = self._default_assoc(objectpath) return result def AssociatorNames(self, objectpath, AssocClass=None, ResultClass=SCSI_PROT_CTR): result = None if ResultClass == SCSI_PROT_CTR: result = self._assocnames_lunmaskctrl() elif ResultClass == 'FUJITSU_TCPProtocolEndpoint': result = self._assocnames_tcp_endpoint() elif ResultClass == 'FUJITSU_AffinityGroupController': result = self._assocnames_afngroup() elif (ResultClass == 'FUJITSU_StorageVolume' and AssocClass == 'FUJITSU_AllocatedFromStoragePool'): result = self._assocnames_volumelist(objectpath) else: result = self._default_assocnames(objectpath) return result def ReferenceNames(self, objectpath, ResultClass='CIM_ProtocolControllerForUnit'): result = [] if ResultClass == 'CIM_ProtocolControllerForUnit': if MAP_STAT == '1': result = self._ref_unitnames() else: result = [] elif ResultClass == 'FUJITSU_StorageSynchronized': result = self._ref_storage_sync() else: result = self._default_ref(objectpath) return result def _ref_unitnames(self): unitnames = [] unitname = FJ_AffinityGroupController() dependent = {} dependent['CreationClassName'] = STOR_VOL dependent['DeviceID'] = FAKE_LUN_ID1 dependent['SystemName'] = STORAGE_SYSTEM antecedent = {} antecedent['CreationClassName'] = SCSI_PROT_CTR antecedent['DeviceID'] = LUNMASKCTRL_IDS[0] antecedent['SystemName'] = STORAGE_SYSTEM unitname['Dependent'] = dependent unitname['Antecedent'] = antecedent unitname['CreationClassName'] = PROT_CTRL_UNIT unitname.path = unitname unitnames.append(unitname) unitname2 = FJ_AffinityGroupController() dependent2 = {} dependent2['CreationClassName'] = STOR_VOL dependent2['DeviceID'] = FAKE_LUN_ID1 dependent2['SystemName'] = STORAGE_SYSTEM antecedent2 = {} antecedent2['CreationClassName'] = SCSI_PROT_CTR antecedent2['DeviceID'] = LUNMASKCTRL_IDS[1] antecedent2['SystemName'] = STORAGE_SYSTEM unitname2['Dependent'] = dependent2 unitname2['Antecedent'] = antecedent2 unitname2['CreationClassName'] = PROT_CTRL_UNIT unitname2.path = unitname2 unitnames.append(unitname2) return unitnames def _ref_storage_sync(self): syncnames = [] cpsessions = {} synced = FakeCIMInstanceName() synced_keybindings = {} synced_keybindings['CreationClassName'] = STOR_VOL synced_keybindings['DeviceID'] = FAKE_LUN_ID2 synced_keybindings['SystemCreationClassName'] = \ 'FUJITSU_StorageComputerSystem' synced_keybindings['SystemName'] = STORAGE_SYSTEM synced['ClassName'] = STOR_VOL synced.keybindings = synced_keybindings cpsessions['SyncedElement'] = synced system = FakeCIMInstanceName() system_keybindings = {} system_keybindings['CreationClassName'] = STOR_VOL system_keybindings['DeviceID'] = FAKE_LUN_ID1 system_keybindings['SystemCreationClassName'] = \ 'FUJITSU_StorageComputerSystem' system_keybindings['SystemName'] = STORAGE_SYSTEM system['ClassName'] = STOR_VOL system.keybindings = system_keybindings cpsessions['SystemElement'] = system cpsessions['classname'] = STOR_SYNC syncnames.append(cpsessions) return syncnames def _default_ref(self, objectpath): return objectpath def _default_assoc(self, objectpath): return objectpath def _assocnames_lunmaskctrl(self): return self._enum_lunmaskctrls() def _assocnames_tcp_endpoint(self): return self._enum_tcp_endpoint() def _assocnames_afngroup(self): return self._enum_afntyservice() def _assocnames_volumelist(self, poolpath): volumelist = self._enum_volumes(force=True) inpool = [] for vol in volumelist: vol_pool = vol.get('poolpath') if poolpath['InstanceID'] == vol_pool: inpool.append(vol) return inpool def _assocnames_pool(self, volumepath): poollist = self._enum_pool_details('RAID') poollist += self._enum_pool_details('TPP') volpool = [] for pool in poollist: if volumepath['poolpath'] == pool['InstanceID']: volpool.append(pool) return volpool def _default_assocnames(self, objectpath): return objectpath def _assoc_authpriv(self): authprivs = [] iscsi = {} iscsi['InstanceID'] = ISCSI_INITIATOR authprivs.append(iscsi) fc = {} fc['InstanceID'] = TEST_WWPN[0] authprivs.append(fc) fc1 = {} fc1['InstanceID'] = TEST_WWPN[1] authprivs.append(fc1) return authprivs def _assoc_endpoint(self, objectpath): targetlist = [] tgtport1 = {} tgtport1['CreationClassName'] = 'FUJITSU_IPProtocolEndpoint' tgtport1['Name'] = ('iqn.2000-09.com.fujitsu:storage-system.' 'eternus-dxl:0123456789,t,0x0009') targetlist.append(tgtport1) return targetlist def _getinstance_unit(self, objectpath): unit = FJ_AffinityGroupController() unit.path = None if MAP_STAT == '0': return unit dependent = {} dependent['CreationClassName'] = STOR_VOL dependent['DeviceID'] = FAKE_LUN_ID1 dependent['ElementName'] = TEST_VOLUME['name'] dependent['SystemName'] = STORAGE_SYSTEM antecedent = {} antecedent['CreationClassName'] = SCSI_PROT_CTR antecedent['DeviceID'] = LUNMASKCTRL_IDS[0] antecedent['SystemName'] = STORAGE_SYSTEM unit['Dependent'] = dependent unit['Antecedent'] = antecedent unit['CreationClassName'] = PROT_CTRL_UNIT unit['DeviceNumber'] = '0' unit.path = unit return unit def _enum_sysnames(self): sysnamelist = [] sysname = {} sysname['IdentifyingNumber'] = FAKE_SYSTEM_NAME sysnamelist.append(sysname) return sysnamelist def _enum_confservice(self): services = [] service = {} service['Name'] = 'FUJITSU:ETERNUS SMI-S Agent' service['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' service['SystemName'] = STORAGE_SYSTEM service['CreationClassName'] = 'FUJITSU_StorageConfigurationService' services.append(service) return services def _enum_ctrlservice(self): services = [] service = {} service['SystemName'] = STORAGE_SYSTEM service['CreationClassName'] = 'FUJITSU_ControllerConfigurationService' services.append(service) return services def _enum_afntyservice(self): services = [] service = {} service['SystemName'] = STORAGE_SYSTEM service['CreationClassName'] = 'FUJITSU_AffinityGroupController' services.append(service) return services def _enum_repservice(self): services = [] service = {} service['Name'] = 'FUJITSU:ETERNUS SMI-S Agent' service['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' service['SystemName'] = STORAGE_SYSTEM service['CreationClassName'] = 'FUJITSU_ReplicationService' services.append(service) return services def _enum_pools(self): pools = [] pool = {} pool['InstanceID'] = 'FUJITSU:RSP0004' pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool' pools.append(pool) pool2 = {} pool2['InstanceID'] = 'FUJITSU:TPP0004' pool2['CreationClassName'] = 'FUJITSU_ThinProvisioningPool' pools.append(pool2) return pools def _enum_pool_details(self, pooltype): pools = [] pool = FJ_StoragePool() pool2 = FJ_StoragePool() if pooltype == 'RAID': pool['InstanceID'] = 'FUJITSU:RSP0004' pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool' pool['ElementName'] = 'abcd1234_OSVD' pool['TotalManagedSpace'] = FAKE_CAPACITY pool['RemainingManagedSpace'] = FAKE_CAPACITY - 1 * units.Gi pool.path = FJ_StoragePool() pool.path['InstanceID'] = 'FUJITSU:RSP0004' pool.path.classname = 'FUJITSU_RAIDStoragePool' pools.append(pool) pool2['InstanceID'] = 'FUJITSU:RSP0005' pool2['CreationClassName'] = 'FUJITSU_RAIDStoragePool' pool2['ElementName'] = 'abcd1234_RG' pool2['TotalManagedSpace'] = FAKE_CAPACITY pool2['RemainingManagedSpace'] = FAKE_CAPACITY - 2 * units.Gi pool2.path = FJ_StoragePool() pool2.path['InstanceID'] = 'FUJITSU:RSP0005' pool2.path.classname = 'FUJITSU_RAIDStoragePool' pools.append(pool2) else: pool['InstanceID'] = 'FUJITSU:TPP0004' pool['CreationClassName'] = 'FUJITSU_ThinProvisioningPool' pool['ElementName'] = 'abcd1234_TPP' pool['TotalManagedSpace'] = FAKE_CAPACITY pool['RemainingManagedSpace'] = FAKE_CAPACITY - 1 * units.Gi pool.path = FJ_StoragePool() pool.path['InstanceID'] = 'FUJITSU:TPP0004' pool.path.classname = 'FUJITSU_ThinProvisioningPool' pools.append(pool) return pools def _enum_volumes(self, force=False): volumes = [] if VOL_STAT == '0' and not force: return volumes volume = FJ_StorageVolume() volume['name'] = TEST_VOLUME['name'] volume['poolpath'] = 'FUJITSU:TPP0004' volume['CreationClassName'] = 'FUJITSU_StorageVolume' volume['Name'] = FAKE_LUN_ID1 volume['DeviceID'] = FAKE_LUN_ID1 volume['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' volume['SystemName'] = STORAGE_SYSTEM volume['ElementName'] = 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==' volume['volume_type_id'] = None volume.path = volume volume.path.classname = volume['CreationClassName'] name = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': { 'CreationClassName': 'FUJITSU_StorageVolume', 'SystemName': STORAGE_SYSTEM, 'DeviceID': volume['DeviceID'], 'SystemCreationClassName': 'FUJITSU_StorageComputerSystem', }, } volume['provider_location'] = str(name) volume.path.keybindings = name['keybindings'] volumes.append(volume) volume3 = FJ_StorageVolume() volume3['name'] = TEST_VOLUME2['name'] volume3['poolpath'] = 'FUJITSU:RSP0005' volume3['CreationClassName'] = 'FUJITSU_StorageVolume' volume3['Name'] = FAKE_LUN_ID3 volume3['DeviceID'] = FAKE_LUN_ID3 volume3['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' volume3['SystemName'] = STORAGE_SYSTEM volume3['ElementName'] = 'FJosv_4whcadwDac7ANKHA2O719A==' volume3['volume_type_id'] = None volume3.path = volume3 volume3.path.classname = volume3['CreationClassName'] name3 = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': { 'CreationClassName': 'FUJITSU_StorageVolume', 'SystemName': STORAGE_SYSTEM, 'DeviceID': volume3['DeviceID'], 'SystemCreationClassName': 'FUJITSU_StorageComputerSystem', }, } volume3['provider_location'] = str(name3) volume3.path.keybindings = name3['keybindings'] volumes.append(volume3) snap_vol = FJ_StorageVolume() snap_vol['name'] = TEST_SNAP['name'] snap_vol['poolpath'] = 'FUJITSU:RSP0004' snap_vol['CreationClassName'] = 'FUJITSU_StorageVolume' snap_vol['Name'] = FAKE_LUN_ID2 snap_vol['DeviceID'] = FAKE_LUN_ID2 snap_vol['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' snap_vol['SystemName'] = STORAGE_SYSTEM snap_vol['ElementName'] = 'FJosv_OgEZj1mSvKRvIKOExKktlg==' snap_vol.path = snap_vol snap_vol.path.classname = snap_vol['CreationClassName'] name2 = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': { 'CreationClassName': 'FUJITSU_StorageVolume', 'SystemName': STORAGE_SYSTEM, 'DeviceID': snap_vol['DeviceID'], 'SystemCreationClassName': 'FUJITSU_StorageComputerSystem', }, } snap_vol['provider_location'] = str(name2) snap_vol.path.keybindings = name2['keybindings'] volumes.append(snap_vol) snap_vol2 = FJ_StorageVolume() snap_vol2['name'] = TEST_SNAP['name'] snap_vol2['poolpath'] = 'FUJITSU:TPP0004' snap_vol2['CreationClassName'] = 'FUJITSU_StorageVolume' snap_vol2['Name'] = FAKE_LUN_ID2 snap_vol2['DeviceID'] = FAKE_LUN_ID2 snap_vol2['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' snap_vol2['SystemName'] = STORAGE_SYSTEM snap_vol2['ElementName'] = 'FJosv_OgEZj1mSvKRvIKOExKktlg==' snap_vol2.path = snap_vol snap_vol2.path.classname = snap_vol['CreationClassName'] name4 = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': { 'CreationClassName': 'FUJITSU_StorageVolume', 'SystemName': STORAGE_SYSTEM, 'DeviceID': snap_vol['DeviceID'], 'SystemCreationClassName': 'FUJITSU_StorageComputerSystem', }, } snap_vol2['provider_location'] = str(name4) volumes.append(snap_vol2) clone_vol = FJ_StorageVolume() clone_vol['name'] = TEST_CLONE['name'] clone_vol['poolpath'] = 'FUJITSU:TPP0004' clone_vol['CreationClassName'] = 'FUJITSU_StorageVolume' clone_vol['ElementName'] = TEST_CLONE['name'] clone_vol['DeviceID'] = FAKE_LUN_ID2 clone_vol['SystemName'] = STORAGE_SYSTEM clone_vol['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' clone_vol.path = clone_vol clone_vol.path.classname = clone_vol['CreationClassName'] name_clone = { 'classname': 'FUJITSU_StorageVolume', 'keybindings': { 'CreationClassName': 'FUJITSU_StorageVolume', 'SystemName': STORAGE_SYSTEM, 'DeviceID': clone_vol['DeviceID'], 'SystemCreationClassName': 'FUJITSU_StorageComputerSystem', }, } clone_vol['provider_location'] = str(name_clone) clone_vol.path.keybindings = name_clone['keybindings'] volumes.append(clone_vol) return volumes def _enum_snapshots(self): snapshots = [] snap = FJ_StorageVolume() snap['CreationClassName'] = 'FUJITSU_StorageVolume' snap['SystemName'] = STORAGE_SYSTEM snap['DeviceID'] = FAKE_LUN_ID2 snap['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' snap.path = snap snap.path.classname = snap['CreationClassName'] snapshots.append(snap) return snapshots def _enum_lunmaskctrls(self): ctrls = [] ctrl = {} ctrl2 = {} if MAP_STAT == '1': ctrl['CreationClassName'] = SCSI_PROT_CTR ctrl['SystemName'] = STORAGE_SYSTEM ctrl['DeviceID'] = LUNMASKCTRL_IDS[0] ctrls.append(ctrl) ctrl2['CreationClassName'] = SCSI_PROT_CTR ctrl2['SystemName'] = STORAGE_SYSTEM ctrl2['DeviceID'] = LUNMASKCTRL_IDS[1] ctrls.append(ctrl2) return ctrls def _enum_scsiport_endpoint(self): targetlist = [] tgtport1 = {} tgtport1['Name'] = '1234567890000021' tgtport1['CreationClassName'] = 'FUJITSU_SCSIProtocolEndpoint' tgtport1['ConnectionType'] = 2 tgtport1['RAMode'] = 0 targetlist.append(tgtport1) return targetlist def _enum_ipproto_endpoint(self): targetlist = [] tgtport1 = {} tgtport1['CreationClassName'] = 'FUJITSU_IPProtocolEndpoint' tgtport1['NAME'] = 'IP_CM01CA00P00_00' targetlist.append(tgtport1) return targetlist def _enum_tcp_endpoint(self): targetlist = [] tgtport1 = {} tgtport1['CreationClassName'] = 'FUJITSU_TCPProtocolEndpoint' tgtport1['NAME'] = 'TCP_CM01CA00P00_00' targetlist.append(tgtport1) return targetlist def _enum_iscsiprot_endpoint(self): targetlist = [] tgtport1 = {} tgtport1['Name'] = ('iqn.2000-09.com.fujitsu:storage-system.' 'eternus-dxl:0123456789,t,0x0009') tgtport1['ConnectionType'] = 7 tgtport1['RAMode'] = 0 targetlist.append(tgtport1) return targetlist def _getinstance_storagevolume(self, objpath): instance = FJ_StorageVolume() volumes = self._enum_volumes() for volume in volumes: if volume['DeviceID'] == objpath['DeviceID']: instance = volume break if not instance: foundinstance = None else: foundinstance = instance return foundinstance def _getinstance_ipprotocolendpoint(self, objpath): instance = {} instance['IPv4Address'] = '10.0.0.3' return instance class FJFCDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(FJFCDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(FJFCDriverTestCase, self).setUp() # Make fake xml-configuration file. self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') self.addCleanup(self.config_file.close) self.config_file.write(CONF) self.config_file.flush() # Make fake Object by using mock as configuration object. self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.cinder_eternus_config_file = self.config_file.name self.configuration.safe_get = self.fake_safe_get self.configuration.max_over_subscription_ratio = '20.0' self.configuration.fujitsu_passwordless = False self.configuration.fujitsu_private_key_path = PRIVATE_KEY_PATH self.configuration.fujitsu_use_cli_copy = False self.mock_object(dx_common.FJDXCommon, '_get_eternus_connection', self.fake_eternus_connection) instancename = FakeCIMInstanceName() self.mock_object(dx_common.FJDXCommon, '_create_eternus_instance_name', instancename.fake_create_eternus_instance_name) self.mock_object(ssh_utils, 'SSHPool', mock.Mock()) self.mock_object(dx_common.FJDXCommon, '_get_qos_specs', return_value={}) self.mock_object(eternus_dx_cli.FJDXCLI, '_exec_cli_with_eternus', self.fake_exec_cli_with_eternus) # Set fc driver to self.driver. driver = dx_fc.FJDXFCDriver(configuration=self.configuration) self.driver = driver self.context = context.get_admin_context() def fake_exec_cli_with_eternus(self, exec_cmdline): if exec_cmdline == "show users": ret = ('\r\nCLI> %s\r\n00\r\n' '3B\r\nf.ce\tMaintainer\t01\t00' '\t00\t00\r\ntestuser\tSoftware' '\t01\t01\t00\t00\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('expand volume'): ret = '%s\r\n00\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('set volume-qos'): ret = '%s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('show volumes'): ret = ('\r\nCLI> %s\r\n00\r\n0560\r\n0000' '\tFJosv_0qJ4rpOHgFE8ipcJOMfBmg==' '\tA001\t0B\t00\t0000\tabcd1234_TPP' '\t0000000000200000\t00\t00' '\t00000000\t0050\tFF\t00\tFF' '\tFF\t20\tFF\tFFFF\t00' '\t600000E00D2A0000002A011500140000' '\t00\t00\tFF\tFF\tFFFFFFFF\t00' '\t00\tFF' % exec_cmdline) elif exec_cmdline.startswith('show enclosure-status'): ret = ('\r\nCLI> %s\r\n00\r\n' 'ETDX200S3_1\t01\tET203ACU\t4601417434\t280753\t20' '\t00\t00\t01\t02\t01001000\tV10L87-9000\t91\r\n02' '\r\n70000000\t30\r\nD0000100\t30\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show volume-qos'): ret = ('\r\nCLI> %s\r\n00\r\n' '0002\t\r\n0000\tFJosv_0qJ4rpOHgFE8ipcJOMfBmg==\t0F' '\t\r\n0001\tFJosv_OgEZj1mSvKRvIKOExKktlg==\t0D' '\t\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show copy-sessions'): ret = ('\r\nCLI> %s\r\n00\r\n0001\t\r\n' '0001\tFFFF\t01\t08\tFF\tFF\t03\t02\tFF\tFF\t05ABD7D2\t' '########################################\t' '########################################\t' '00000281\t00000286\t0001\t00\tFF\t0000000000000800\t' '0000000000000000\t0000000000000100\t0000000000000800\t' '04\t00\t00000000\t2020101009341400\t01\t10\tFFFF\tFFFF\t' '0000000000000000\tFFFFFFFFFFFFFFFF\tFFFFFFFFFFFFFFFF\tFF\t' 'FF\t64\t00\t07\t00\t00\t00\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show qos-bandwidth-limit'): ret = ('\r\nCLI> %s\r\n00\r\n0010\t\r\n00\t0000ffff\t0000ffff' '\t0000ffff\t0000ffff\t0000ffff\t0000ffff\t0000ffff' '\t0000ffff\t0000ffff\t0000ffff\t0000ffff\t0000ffff\r\n' '01\t00000001\t00000001\t00000001\t00000001\t00000001' '\t00000001\t00000001\t00000001\t00000001\t00000001' '\t00000001\t00000001\r\n02\t00000002\t00000002\t00000002' '\t00000002\t00000002\t00000002\t00000002\t00000002' '\t00000002\t00000002\t00000002\t00000002\r\n03\t00000003' '\t00000003\t00000003\t00000003\t00000003\t00000003' '\t00000003\t00000003\t00000003\t00000003\t00000003' '\t00000003\r\n04\t00000004\t00000004\t00000004\t00000004' '\t00000004\t00000004\t00000004\t00000004\t00000004' '\t00000004\t00000004\t00000004\r\n05\t00000005\t00000005' '\t00000005\t00000005\t00000005\t00000005\t00000005' '\t00000005\t00000005\t00000005\t00000005\t00000005\r\n06' '\t00000006\t00000006\t00000006\t00000006\t00000006' '\t00000006\t00000006\t00000006\t00000006\t00000006' '\t00000006\t00000006\r\n07\t00000007\t00000007\t00000007' '\t00000007\t00000007\t00000007\t00000007\t00000007' '\t00000007\t00000007\t00000007\t00000007\r\n08\t00000008' '\t00000008\t00000008\t00000008\t00000008\t00000008' '\t00000008\t00000008\t00000008\t00000008\t00000008' '\t00000008\r\n09\t00000009\t00000009\t00000009\t00000009' '\t00000009\t00000009\t00000009\t00000009\t00000009' '\t00000009\t00000009\t00000009\r\n0a\t0000000a\t0000000a' '\t0000000a\t0000000a\t0000000a\t0000000a\t0000000a' '\t0000000a\t0000000a\t0000000a\t0000000a\t0000000a\r\n0b' '\t0000000b\t0000000b\t0000000b\t0000000b\t0000000b' '\t0000000b\t0000000b\t0000000b\t0000000b\t0000000b' '\t0000000b\t0000000b\r\n0c\t0000000c\t0000000c\t0000000c' '\t0000000c\t0000000c\t0000000c\t0000000c\t0000000c' '\t0000000c\t0000000c\t0000000c\t0000000c\r\n0d\t0000000d' '\t0000000d\t0000000d\t0000000d\t0000000d\t0000000d' '\t0000000d\t0000000d\t0000000d\t0000000d\t0000000d' '\t0000000d\r\n0e\t0000000e\t0000000e\t0000000e\t0000000e' '\t0000000e\t0000000e\t0000000e\t0000000e\t0000000e' '\t0000000e\t0000000e\t0000000e\r\n0f\t0000000f\t0000000f' '\t0000000f\t0000000f\t0000000f\t0000000f\t0000000f' '\t0000000f\t0000000f\t0000000f\t0000000f\t0000000f' '\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('set qos-bandwidth-limit'): ret = '%s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('stop copy-session'): ret = '%s\r\n00\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('start copy-snap-opc'): ret = '%s\r\n00\r\n0019\r\nCLI> ' % exec_cmdline else: ret = None return ret def fake_safe_get(self, str=None): return str def fake_eternus_connection(self): conn = FakeEternusConnection() return conn def volume_update(self, volume, diction): for key, value in diction.items(): volume[key] = value def test_get_volume_stats(self): ret = self.driver.get_volume_stats(True) self.assertEqual(FAKE_STATS2, ret) def test_create_and_delete_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) model_info = self.driver.create_volume(TEST_VOLUME2) self.volume_update(TEST_VOLUME2, model_info) self.assertEqual(FAKE_MODEL_INFO3, model_info) self.driver.delete_volume(TEST_VOLUME) self.driver.delete_volume(TEST_VOLUME2) @mock.patch.object(dx_common.FJDXCommon, '_get_mapdata') def test_map_unmap(self, mock_mapdata): fake_data = {'target_wwn': FC_TARGET_WWN, 'target_lun': 0} mock_mapdata.return_value = fake_data fake_mapdata = dict(fake_data) fake_mapdata['initiator_target_map'] = { initiator: FC_TARGET_WWN for initiator in TEST_WWPN } fake_mapdata['volume_id'] = TEST_VOLUME['id'] fake_mapdata['target_discovered'] = True fake_info = {'driver_volume_type': 'fibre_channel', 'data': fake_mapdata} model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) info = self.driver.initialize_connection(TEST_VOLUME, TEST_CONNECTOR) self.assertEqual(fake_info, info) # Call terminate_connection with connector. self.driver.terminate_connection(TEST_VOLUME, TEST_CONNECTOR) info = self.driver.initialize_connection(TEST_VOLUME, TEST_CONNECTOR) self.assertEqual(fake_info, info) # Call terminate_connection without connector. self.driver.terminate_connection(TEST_VOLUME, None) self.driver.delete_volume(TEST_VOLUME) def test_create_and_delete_snapshot_using_smis(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.create_snapshot(TEST_SNAP) self.volume_update(TEST_SNAP, snap_info) self.assertEqual(FAKE_SNAP_INFO, snap_info) self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_VOLUME) @mock.patch.object(dx_common, 'LOG') def test_create_and_delete_snapshot_using_cli(self, mock_log): self.configuration.fujitsu_use_cli_copy = True driver = dx_fc.FJDXFCDriver(configuration=self.configuration) self.driver = driver model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) warning_msg = '_create_snapshot, Can not create SDV by SMI-S.' snap_info = self.driver.create_snapshot(TEST_SNAP) self.volume_update(TEST_SNAP, snap_info) self.assertEqual(FAKE_SNAP_INFO2, snap_info) mock_log.warning.assert_called_with(warning_msg) self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_VOLUME) def test_create_volume_from_snapshot(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.create_snapshot(TEST_SNAP) self.volume_update(TEST_SNAP, snap_info) self.assertEqual(FAKE_SNAP_INFO, snap_info) model_info = self.driver.create_volume_from_snapshot(TEST_CLONE, TEST_SNAP) self.volume_update(TEST_CLONE, model_info) self.assertEqual(FAKE_MODEL_INFO2, model_info) self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_CLONE) self.driver.delete_volume(TEST_VOLUME) def test_create_cloned_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) model_info = self.driver.create_cloned_volume(TEST_CLONE, TEST_VOLUME) self.volume_update(TEST_CLONE, model_info) self.assertEqual(FAKE_MODEL_INFO2, model_info) self.driver.delete_volume(TEST_CLONE) self.driver.delete_volume(TEST_VOLUME) def test_extend_volume(self): # Test the extension of volume created on RaidGroup and # ThinProvisioningPool separately. TEST_VOLUME_LIST = [TEST_VOLUME, TEST_VOLUME2] FAKE_MODEL_INFO_LIST = [FAKE_MODEL_INFO1, FAKE_MODEL_INFO3] for i in range(len(TEST_VOLUME_LIST)): model_info = self.driver.create_volume(TEST_VOLUME_LIST[i]) self.volume_update(TEST_VOLUME_LIST[i], model_info) self.assertEqual(FAKE_MODEL_INFO_LIST[i], model_info) self.driver.extend_volume(TEST_VOLUME_LIST[i], 10) def test_create_volume_with_qos(self): self.driver.common._get_qos_specs = mock.Mock() self.driver.common._get_qos_specs.return_value = {'maxBWS': '700'} self.driver.common._set_qos = mock.Mock() model_info = self.driver.create_volume(TEST_VOLUME_QOS) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO_QOS, model_info) self.driver.common._set_qos.assert_called() def test_update_migrated_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) model_info2 = self.driver.create_volume(TEST_VOLUME2) self.volume_update(TEST_VOLUME2, model_info2) self.assertEqual(FAKE_MODEL_INFO3, model_info2) model_update = self.driver.update_migrated_volume(self.context, TEST_VOLUME, TEST_VOLUME2, 'available') FAKE_MIGRATED_MODEL_UPDATE = { '_name_id': TEST_VOLUME2['id'], 'provider_location': model_info2['provider_location'] } self.assertEqual(FAKE_MIGRATED_MODEL_UPDATE, model_update) def test_revert_to_snapshot(self): self.driver.common.revert_to_snapshot = mock.Mock() model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.create_snapshot(TEST_SNAP) self.volume_update(TEST_SNAP, snap_info) self.assertEqual(FAKE_SNAP_INFO, snap_info) self.driver.revert_to_snapshot(self.context, TEST_VOLUME, TEST_SNAP) self.driver.common.revert_to_snapshot.assert_called_with(TEST_VOLUME, TEST_SNAP) class FJISCSIDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(FJISCSIDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(FJISCSIDriverTestCase, self).setUp() # Make fake xml-configuration file. self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') self.addCleanup(self.config_file.close) self.config_file.write(CONF) self.config_file.flush() # Make fake Object by using mock as configuration object. self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.cinder_eternus_config_file = self.config_file.name self.configuration.safe_get = self.fake_safe_get self.configuration.max_over_subscription_ratio = '20.0' self.configuration.fujitsu_passwordless = False self.configuration.fujitsu_private_key_path = PRIVATE_KEY_PATH self.configuration.fujitsu_use_cli_copy = False self.mock_object(dx_common.FJDXCommon, '_get_eternus_connection', self.fake_eternus_connection) instancename = FakeCIMInstanceName() self.mock_object(dx_common.FJDXCommon, '_create_eternus_instance_name', instancename.fake_create_eternus_instance_name) self.mock_object(dx_common.FJDXCommon, '_get_mapdata_iscsi', self.fake_get_mapdata) self.mock_object(ssh_utils, 'SSHPool', mock.Mock()) self.mock_object(dx_common.FJDXCommon, '_get_qos_specs', return_value={}) self.mock_object(eternus_dx_cli.FJDXCLI, '_exec_cli_with_eternus', self.fake_exec_cli_with_eternus) # Set iscsi driver to self.driver. driver = dx_iscsi.FJDXISCSIDriver(configuration=self.configuration) self.driver = driver self.context = context.get_admin_context() def fake_exec_cli_with_eternus(self, exec_cmdline): if exec_cmdline == "show users": ret = ('\r\nCLI> %s\r\n00\r\n' '3B\r\nf.ce\tMaintainer\t01\t00' '\t00\t00\r\ntestuser\tSoftware' '\t01\t01\t00\t00\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('expand volume'): ret = '%s\r\n00\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('set volume-qos'): ret = '%s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('show volumes'): ret = ('\r\nCLI> %s\r\n00\r\n0560\r\n0000' '\tFJosv_0qJ4rpOHgFE8ipcJOMfBmg==' '\tA001\t0B\t00\t0000\tabcd1234_TPP' '\t0000000000200000\t00\t00' '\t00000000\t0050\tFF\t00\tFF' '\tFF\t20\tFF\tFFFF\t00' '\t600000E00D2A0000002A011500140000' '\t00\t00\tFF\tFF\tFFFFFFFF\t00' '\t00\tFF' % exec_cmdline) elif exec_cmdline.startswith('show enclosure-status'): ret = ('\r\nCLI> %s\r\n00\r\n' 'ETDX200S3_1\t01\tET203ACU\t4601417434\t280753\t20' '\t00\t00\t01\t02\t01001000\tV10L87-9000\t91\r\n02' '\r\n70000000\t30\r\nD0000100\t30\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show volume-qos'): ret = ('\r\nCLI> %s\r\n00\r\n' '0002\t\r\n0000\tFJosv_0qJ4rpOHgFE8ipcJOMfBmg==\t0F' '\t\r\n0001\tFJosv_OgEZj1mSvKRvIKOExKktlg==\t0D' '\t\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show copy-sessions'): ret = ('\r\nCLI> %s\r\n00\r\n0001\t\r\n' '0001\tFFFF\t01\t08\tFF\tFF\t03\t02\tFF\tFF\t05ABD7D2\t' '########################################\t' '########################################\t' '00000281\t00000286\t0001\t00\tFF\t0000000000000800\t' '0000000000000000\t0000000000000100\t0000000000000800\t' '04\t00\t00000000\t2020101009341400\t01\t10\tFFFF\tFFFF\t' '0000000000000000\tFFFFFFFFFFFFFFFF\tFFFFFFFFFFFFFFFF\tFF\t' 'FF\t64\t00\t07\t00\t00\t00\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show qos-bandwidth-limit'): ret = ('\r\nCLI> %s\r\n00\r\n0010\t\r\n00\t0000ffff\t0000ffff' '\t0000ffff\t0000ffff\t0000ffff\t0000ffff\t0000ffff' '\t0000ffff\t0000ffff\t0000ffff\t0000ffff\t0000ffff\r\n' '01\t00000001\t00000001\t00000001\t00000001\t00000001' '\t00000001\t00000001\t00000001\t00000001\t00000001' '\t00000001\t00000001\r\n02\t00000002\t00000002\t00000002' '\t00000002\t00000002\t00000002\t00000002\t00000002' '\t00000002\t00000002\t00000002\t00000002\r\n03\t00000003' '\t00000003\t00000003\t00000003\t00000003\t00000003' '\t00000003\t00000003\t00000003\t00000003\t00000003' '\t00000003\r\n04\t00000004\t00000004\t00000004\t00000004' '\t00000004\t00000004\t00000004\t00000004\t00000004' '\t00000004\t00000004\t00000004\r\n05\t00000005\t00000005' '\t00000005\t00000005\t00000005\t00000005\t00000005' '\t00000005\t00000005\t00000005\t00000005\t00000005\r\n06' '\t00000006\t00000006\t00000006\t00000006\t00000006' '\t00000006\t00000006\t00000006\t00000006\t00000006' '\t00000006\t00000006\r\n07\t00000007\t00000007\t00000007' '\t00000007\t00000007\t00000007\t00000007\t00000007' '\t00000007\t00000007\t00000007\t00000007\r\n08\t00000008' '\t00000008\t00000008\t00000008\t00000008\t00000008' '\t00000008\t00000008\t00000008\t00000008\t00000008' '\t00000008\r\n09\t00000009\t00000009\t00000009\t00000009' '\t00000009\t00000009\t00000009\t00000009\t00000009' '\t00000009\t00000009\t00000009\r\n0a\t0000000a\t0000000a' '\t0000000a\t0000000a\t0000000a\t0000000a\t0000000a' '\t0000000a\t0000000a\t0000000a\t0000000a\t0000000a\r\n0b' '\t0000000b\t0000000b\t0000000b\t0000000b\t0000000b' '\t0000000b\t0000000b\t0000000b\t0000000b\t0000000b' '\t0000000b\t0000000b\r\n0c\t0000000c\t0000000c\t0000000c' '\t0000000c\t0000000c\t0000000c\t0000000c\t0000000c' '\t0000000c\t0000000c\t0000000c\t0000000c\r\n0d\t0000000d' '\t0000000d\t0000000d\t0000000d\t0000000d\t0000000d' '\t0000000d\t0000000d\t0000000d\t0000000d\t0000000d' '\t0000000d\r\n0e\t0000000e\t0000000e\t0000000e\t0000000e' '\t0000000e\t0000000e\t0000000e\t0000000e\t0000000e' '\t0000000e\t0000000e\t0000000e\r\n0f\t0000000f\t0000000f' '\t0000000f\t0000000f\t0000000f\t0000000f\t0000000f' '\t0000000f\t0000000f\t0000000f\t0000000f\t0000000f' '\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('set qos-bandwidth-limit'): ret = '%s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('start copy-snap-opc'): ret = '%s\r\n00\r\n0019\r\nCLI> ' % exec_cmdline else: ret = None return ret def fake_safe_get(self, str=None): return str def fake_eternus_connection(self): conn = FakeEternusConnection() return conn def fake_get_mapdata(self, vol_instance, connector, target_portlist): multipath = connector.get('multipath', False) if multipath: return {'target_portals': [ISCSI_TARGET_IP], 'target_iqns': [ISCSI_TARGET_IQN], 'target_luns': [0]} else: return {'target_portal': ISCSI_TARGET_IP, 'target_iqns': ISCSI_TARGET_IQN, 'target_lun': 0} def volume_update(self, volume, diction): for key, value in diction.items(): volume[key] = value def test_get_volume_stats(self): ret = self.driver.get_volume_stats(True) self.assertEqual(FAKE_STATS, ret) def test_create_and_delete_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) model_info = self.driver.create_volume(TEST_VOLUME2) self.volume_update(TEST_VOLUME2, model_info) self.assertEqual(FAKE_MODEL_INFO3, model_info) self.driver.delete_volume(TEST_VOLUME) self.driver.delete_volume(TEST_VOLUME2) def test_map_unmap(self): fake_mapdata = self.fake_get_mapdata(None, {}, None) fake_mapdata['volume_id'] = TEST_VOLUME['id'] fake_mapdata['target_discovered'] = True fake_info = {'driver_volume_type': 'iscsi', 'data': fake_mapdata} model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) info = self.driver.initialize_connection(TEST_VOLUME, TEST_CONNECTOR) self.assertEqual(fake_info, info) # Call terminate_connection with connector. self.driver.terminate_connection(TEST_VOLUME, TEST_CONNECTOR) info = self.driver.initialize_connection(TEST_VOLUME, TEST_CONNECTOR) self.assertEqual(fake_info, info) # Call terminate_connection without connector. self.driver.terminate_connection(TEST_VOLUME, None) self.driver.delete_volume(TEST_VOLUME) def test_create_and_delete_snapshot_using_smis(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.create_snapshot(TEST_SNAP) self.volume_update(TEST_SNAP, snap_info) self.assertEqual(FAKE_SNAP_INFO, snap_info) self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_VOLUME) @mock.patch.object(dx_common, 'LOG') def test_create_and_delete_snapshot_using_cli(self, mock_log): self.configuration.fujitsu_use_cli_copy = True driver = dx_fc.FJDXFCDriver(configuration=self.configuration) self.driver = driver model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) warning_msg = '_create_snapshot, Can not create SDV by SMI-S.' snap_info = self.driver.create_snapshot(TEST_SNAP) self.volume_update(TEST_SNAP, snap_info) self.assertEqual(FAKE_SNAP_INFO2, snap_info) mock_log.warning.assert_called_with(warning_msg) self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_VOLUME) def test_create_volume_from_snapshot(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.create_snapshot(TEST_SNAP) self.volume_update(TEST_SNAP, snap_info) self.assertEqual(FAKE_SNAP_INFO, snap_info) model_info = self.driver.create_volume_from_snapshot(TEST_CLONE, TEST_SNAP) self.volume_update(TEST_CLONE, model_info) self.assertEqual(FAKE_MODEL_INFO2, model_info) self.driver.delete_snapshot(TEST_SNAP) self.driver.delete_volume(TEST_CLONE) self.driver.delete_volume(TEST_VOLUME) def test_create_cloned_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) model_info = self.driver.create_cloned_volume(TEST_CLONE, TEST_VOLUME) self.volume_update(TEST_CLONE, model_info) self.assertEqual(FAKE_MODEL_INFO2, model_info) self.driver.delete_volume(TEST_CLONE) self.driver.delete_volume(TEST_VOLUME) def test_extend_volume(self): # Test the extension of volume created on RaidGroup and # ThinProvisioningPool separately. TEST_VOLUME_LIST = [TEST_VOLUME, TEST_VOLUME2] FAKE_MODEL_INFO_LIST = [FAKE_MODEL_INFO1, FAKE_MODEL_INFO3] for i in range(len(TEST_VOLUME_LIST)): model_info = self.driver.create_volume(TEST_VOLUME_LIST[i]) self.volume_update(TEST_VOLUME_LIST[i], model_info) self.assertEqual(FAKE_MODEL_INFO_LIST[i], model_info) self.driver.extend_volume(TEST_VOLUME_LIST[i], 10) def test_create_volume_with_qos(self): self.driver.common._get_qos_specs = mock.Mock() self.driver.common._get_qos_specs.return_value = {'maxBWS': '700'} self.driver.common._set_qos = mock.Mock() model_info = self.driver.create_volume(TEST_VOLUME_QOS) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO_QOS, model_info) self.driver.common._set_qos.assert_called() def test_update_migrated_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) model_info2 = self.driver.create_volume(TEST_VOLUME2) self.volume_update(TEST_VOLUME2, model_info2) self.assertEqual(FAKE_MODEL_INFO3, model_info2) model_update = self.driver.update_migrated_volume(self.context, TEST_VOLUME, TEST_VOLUME2, 'available') FAKE_MIGRATED_MODEL_UPDATE = { '_name_id': TEST_VOLUME2['id'], 'provider_location': model_info2['provider_location'] } self.assertEqual(FAKE_MIGRATED_MODEL_UPDATE, model_update) def test_revert_to_snapshot(self): self.driver.common.revert_to_snapshot = mock.Mock() model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.create_snapshot(TEST_SNAP) self.volume_update(TEST_SNAP, snap_info) self.assertEqual(FAKE_SNAP_INFO, snap_info) self.driver.revert_to_snapshot(self.context, TEST_VOLUME, TEST_SNAP) self.driver.common.revert_to_snapshot.assert_called_with(TEST_VOLUME, TEST_SNAP) class FJCLITestCase(test.TestCase): def __init__(self, *args, **kwargs): super(FJCLITestCase, self).__init__(*args, **kwargs) def setUp(self): super(FJCLITestCase, self).setUp() self.mock_object(ssh_utils, 'SSHPool', mock.Mock()) self.mock_object(eternus_dx_cli.FJDXCLI, '_exec_cli_with_eternus', self.fake_exec_cli_with_eternus) cli = eternus_dx_cli.FJDXCLI(user=TEST_USER, storage_ip=STORAGE_IP, password=TEST_PASSWORD) self.cli = cli def create_fake_options(self, **kwargs): # Create options for CLI command. FAKE_OPTION_DICT = {} for key, value in kwargs.items(): processed_key = key.replace('_', '-') FAKE_OPTION_DICT[processed_key] = value FAKE_OPTION = {**FAKE_OPTION_DICT} return FAKE_OPTION def fake_exec_cli_with_eternus(self, exec_cmdline): if exec_cmdline == "show users": ret = ('\r\nCLI> %s\r\n00\r\n' '3B\r\nf.ce\tMaintainer\t01\t00' '\t00\t00\r\ntestuser\tSoftware' '\t01\t01\t00\t00\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('expand volume'): ret = '%s\r\n00\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('set volume-qos'): ret = '%s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('show volumes'): ret = ('\r\nCLI> %s\r\n00\r\n0560\r\n0000' '\tFJosv_0qJ4rpOHgFE8ipcJOMfBmg==' '\tA001\t0B\t00\t0000\tabcd1234_TPP' '\t0000000000200000\t00\t00' '\t00000000\t0050\tFF\t00\tFF' '\tFF\t20\tFF\tFFFF\t00' '\t600000E00D2A0000002A011500140000' '\t00\t00\tFF\tFF\tFFFFFFFF\t00' '\t00\tFF\r\n0001\tFJosv_OgEZj1mSvKRvIKOExKktlg==' '\tA001\t0B\t00\t0000\tabcd1234_OSVD' '\t0000000000200000\t00\t00\t00000000' '\t0050\tFF\t00\tFF\tFF\t20\tFF\tFFFF' '\t00\t600000E00D2A0000002A0115001E0000' '\t00\t00\tFF\tFF\tFFFFFFFF\t00' '\t00\tFF' % exec_cmdline) elif exec_cmdline.startswith('show enclosure-status'): ret = ('\r\nCLI> %s\r\n00\r\n' 'ETDX200S3_1\t01\tET203ACU\t4601417434\t280753\t20' '\t00\t00\t01\t02\t01001000\tV10L87-9000\t91\r\n02' '\r\n70000000\t30\r\nD0000100\t30\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show volume-qos'): ret = ('\r\nCLI> %s\r\n00\r\n' '0001\r\n0000\tFJosv_0qJ4rpOHgFE8ipcJOMfBmg==\t01\t00\t00' '\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show copy-sessions'): ret = ('\r\nCLI> %s\r\n00\r\n0001\t\r\n' '0001\tFFFF\t01\t08\tFF\tFF\t03\t02\tFF\tFF\t05ABD7D2\t' '########################################\t' '########################################\t' '00000281\t00000286\t0001\t00\tFF\t0000000000000800\t' '0000000000000000\t0000000000000100\t0000000000000800\t' '04\t00\t00000000\t2020101009341400\t01\t10\tFFFF\tFFFF\t' '0000000000000000\tFFFFFFFFFFFFFFFF\tFFFFFFFFFFFFFFFF\tFF\t' 'FF\t64\t00\t07\t00\t00\t00\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show qos-bandwidth-limit'): ret = ('\r\nCLI> %s\r\n00\r\n0001\t\r\n00\t0000ffff\t0000ffff' '\t0000ffff\t0000ffff\t0000ffff\t0000ffff\t0000ffff' '\t0000ffff\t0000ffff\t0000ffff\t0000ffff\t0000ffff\r\n' 'CLI> ' % exec_cmdline) elif exec_cmdline.startswith('set qos-bandwidth-limit'): ret = '%s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('stop copy-session'): ret = '%s\r\n00\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('delete volume'): ret = '%s\r\n00\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('start copy-snap-opc'): ret = '%s\r\n00\r\n0019\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('start copy-opc'): ret = '%s\r\n00\r\n0019\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('show cli-error-code'): ret = '%s\r\n00\r\n0001\r\n0001\tBad Value\r\nCLI> ' % exec_cmdline else: ret = None return ret def test_show_cli_error_message(self): FAKE_OPTION = {'error-code': '0001'} FAKE_MESSAGE = 'Bad Value' FAKE_MESSAGE_OUTPUT = {**FAKE_CLI_OUTPUT, 'message': FAKE_MESSAGE} ERROR_MESSAGE_OUTPUT = self.cli._show_cli_error_message(**FAKE_OPTION) self.assertEqual(FAKE_MESSAGE_OUTPUT, ERROR_MESSAGE_OUTPUT) def test_create_error_message(self): FAKE_CODE = '0001' FAKE_MSG = 'Bad Value' expected_error_message = ('E' + FAKE_CODE, FAKE_MSG) ERROR_MESSAGE = self.cli._create_error_message(FAKE_CODE, FAKE_MSG) self.assertEqual(expected_error_message, ERROR_MESSAGE) def test_get_options(self): expected_option = " -bandwidth-limit 2" option = {"bandwidth-limit": 2} ret = self.cli._get_option(**option) self.assertEqual(expected_option, ret) def test_done_and_default_func(self): # Test function 'done' and '_default_func' in CLI file. self.cli.CMD_dic['check_user_role'] = mock.Mock() self.cli._default_func = mock.Mock( side_effect=Exception('Invalid function is specified')) cmd1 = 'check_user_role' self.cli.done(cmd1) self.cli.CMD_dic['check_user_role'].assert_called_with() cmd2 = 'test_run_cmd' cli_ex = None try: self.cli.done(cmd2) except Exception as ex: cli_ex = ex finally: self.cli._default_func.assert_called() self.assertEqual(str(cli_ex), "Invalid function is specified") def test_check_user_role(self): FAKE_ROLE = {**FAKE_CLI_OUTPUT, 'message': 'Software'} role = self.cli._check_user_role() self.assertEqual(FAKE_ROLE, role) def test_expand_volume(self): FAKE_VOLME_NAME = 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==' FAKE_RG_NAME = 'abcd1234_RG' FAKE_SIZE = '10gb' FAKE_EXPAND_OPTION = self.create_fake_options( volume_name=FAKE_VOLME_NAME, rg_name=FAKE_RG_NAME, size=FAKE_SIZE) EXPAND_OUTPUT = self.cli._expand_volume(**FAKE_EXPAND_OPTION) FAKE_EXPAND_OUTPUT = {**FAKE_CLI_OUTPUT, 'message': []} self.assertEqual(FAKE_EXPAND_OUTPUT, EXPAND_OUTPUT) def test_set_volume_qos(self): FAKE_VOLUME_NAME = 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==' FAKE_BANDWIDTH_LIMIT = 2 FAKE_QOS_OPTION = self.create_fake_options( volume_name=FAKE_VOLUME_NAME, bandwidth_limit=FAKE_BANDWIDTH_LIMIT) FAKE_VOLUME_NUMBER = ['0001'] FAKE_QOS_OUTPUT = {**FAKE_CLI_OUTPUT, 'message': FAKE_VOLUME_NUMBER} volume_number = self.cli._set_volume_qos(**FAKE_QOS_OPTION) self.assertEqual(FAKE_QOS_OUTPUT, volume_number) def test_show_copy_sessions(self): FAKE_COPY_SESSION = [{ 'Source Num': 641, 'Dest Num': 646, 'Type': 'Snap', 'Status': 'Active', 'Phase': 'Tracking', 'Session ID': 1, }] FAKE_COPY_SESSION_OUTPUT = {**FAKE_CLI_OUTPUT, 'message': FAKE_COPY_SESSION} cpdatalist = self.cli._show_copy_sessions() self.assertEqual(FAKE_COPY_SESSION_OUTPUT, cpdatalist) def test_show_pool_provision(self): FAKE_POOL_PROVIOSN_OPTION = self.create_fake_options( pool_name='abcd1234_TPP') FAKE_PROVISION = {**FAKE_CLI_OUTPUT, 'message': 2048.0} proviosn = self.cli._show_pool_provision(**FAKE_POOL_PROVIOSN_OPTION) self.assertEqual(FAKE_PROVISION, proviosn) def test_show_qos_bandwidth_limit(self): FAKE_QOS_BANDWIDTH_LIMIT = {'read_bytes_sec': 65535, 'read_iops_sec': 65535, 'read_limit': 0, 'total_bytes_sec': 65535, 'total_iops_sec': 65535, 'total_limit': 0, 'write_bytes_sec': 65535, 'write_iops_sec': 65535, 'write_limit': 0} FAKE_QOS_LIST = {**FAKE_CLI_OUTPUT, 'message': [FAKE_QOS_BANDWIDTH_LIMIT]} qos_list = self.cli._show_qos_bandwidth_limit() self.assertEqual(FAKE_QOS_LIST, qos_list) def test_set_qos_bandwidth_limit(self): FAKE_VOLUME_NAME = 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==' FAKE_READ_BANDWIDTH_LIMIT = 2 FAKE_WRITE_BANDWIDTH_LIMIT = 3 FAKE_QOS_OPTION = self.create_fake_options( volume_name=FAKE_VOLUME_NAME, read_bandwidth_limit=FAKE_READ_BANDWIDTH_LIMIT, write_bandwidth_limit=FAKE_WRITE_BANDWIDTH_LIMIT) FAKE_VOLUME_NUMBER = ['0001'] FAKE_QOS_OUTPUT = {**FAKE_CLI_OUTPUT, 'message': FAKE_VOLUME_NUMBER} volume_number = self.cli._set_qos_bandwidth_limit(**FAKE_QOS_OPTION) self.assertEqual(FAKE_QOS_OUTPUT, volume_number) def test_show_volume_qos(self): FAKE_VOLUME_QOS = {'total_limit': 1, 'read_limit': 0, 'write_limit': 0} FAKE_VQOS_DATA_LIST = {**FAKE_CLI_OUTPUT, 'message': [FAKE_VOLUME_QOS]} vqos_datalist = self.cli._show_volume_qos() self.assertEqual(FAKE_VQOS_DATA_LIST, vqos_datalist) def test_show_enclosure_status(self): FAKE_VERSION = 'V10L87-9000' FAKE_VERSION_INFO = {**FAKE_CLI_OUTPUT, 'message': {'version': FAKE_VERSION}} versioninfo = self.cli._show_enclosure_status() self.assertEqual(FAKE_VERSION_INFO, versioninfo) def test_start_copy_snap_opc(self): FAKE_SNAP_OPC_OPTION = self.create_fake_options( mode='normal', source_volume_number=31, destination_volume_number=39, source_lba=0, destination=0, size=1 ) FAKE_OPC_ID = '0019' FAKE_OPC_INFO = {**FAKE_CLI_OUTPUT, 'message': [FAKE_OPC_ID]} opc_id = self.cli._start_copy_snap_opc(**FAKE_SNAP_OPC_OPTION) self.assertEqual(FAKE_OPC_INFO, opc_id) def test_stop_copy_session(self): FAKE_SESSION_ID = '0001' FAKE_STOP_OUTPUT = {**FAKE_CLI_OUTPUT, 'message': []} FAKE_STOP_COPY_SESSION_OPTION = self.create_fake_options( session_id=FAKE_SESSION_ID) stop_output = self.cli._stop_copy_session( **FAKE_STOP_COPY_SESSION_OPTION) self.assertEqual(FAKE_STOP_OUTPUT, stop_output) def test_start_copy_opc(self): FAKE_SNAP_OPC_OPTION = self.create_fake_options( source_volume_number=31, destination_volume_number=39, ) FAKE_OPC_ID = '0019' FAKE_OPC_INFO = {**FAKE_CLI_OUTPUT, 'message': [FAKE_OPC_ID]} opc_id = self.cli._start_copy_opc(**FAKE_SNAP_OPC_OPTION) self.assertEqual(FAKE_OPC_INFO, opc_id) def test_delete_volume(self): FAKE_VOLUME_NAME = 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==' FAKE_DELETE_OUTPUT = {**FAKE_CLI_OUTPUT, 'message': []} FAKE_DELETE_VOLUME_OPTION = self.create_fake_options( volume_name=FAKE_VOLUME_NAME) delete_output = self.cli._delete_volume(**FAKE_DELETE_VOLUME_OPTION) self.assertEqual(FAKE_DELETE_OUTPUT, delete_output) class FJCommonTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(FJCommonTestCase, self).__init__(*args, **kwargs) def setUp(self): super(FJCommonTestCase, self).setUp() # Make fake xml-configuration file. self.config_file = tempfile.NamedTemporaryFile("w+", suffix='.xml') self.addCleanup(self.config_file.close) self.config_file.write(CONF) self.config_file.flush() # Make fake Object by using mock as configuration object. self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.cinder_eternus_config_file = self.config_file.name self.configuration.safe_get = self.fake_safe_get self.configuration.max_over_subscription_ratio = '20.0' self.configuration.fujitsu_passwordless = False self.configuration.fujitsu_private_key_path = PRIVATE_KEY_PATH self.configuration.fujitsu_use_cli_copy = False self.mock_object(dx_common.FJDXCommon, '_get_eternus_connection', self.fake_eternus_connection) instancename = FakeCIMInstanceName() self.mock_object(dx_common.FJDXCommon, '_create_eternus_instance_name', instancename.fake_create_eternus_instance_name) self.mock_object(ssh_utils, 'SSHPool', mock.Mock()) self.mock_object(dx_common.FJDXCommon, '_get_qos_specs', return_value={}) self.mock_object(eternus_dx_cli.FJDXCLI, '_exec_cli_with_eternus', self.fake_exec_cli_with_eternus) # Set iscsi driver to self.driver. driver = dx_iscsi.FJDXISCSIDriver(configuration=self.configuration) self.driver = driver self.context = context.get_admin_context() def fake_exec_cli_with_eternus(self, exec_cmdline): if exec_cmdline == "show users": ret = ('\r\nCLI> %s\r\n00\r\n' '3B\r\nf.ce\tMaintainer\t01\t00' '\t00\t00\r\ntestuser\tSoftware' '\t01\t01\t00\t00\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('set volume-qos'): ret = '%s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('show volumes'): ret = ('\r\nCLI> %s\r\n00\r\n0560\r\n0000' '\tFJosv_0qJ4rpOHgFE8ipcJOMfBmg==' '\tA001\t0B\t00\t0000\tabcd1234_TPP' '\t0000000000200000\t00\t00' '\t00000000\t0050\tFF\t00\tFF' '\tFF\t20\tFF\tFFFF\t00' '\t600000E00D2A0000002A011500140000' '\t00\t00\tFF\tFF\tFFFFFFFF\t00' '\t00\tFF\r\n0001\tFJosv_OgEZj1mSvKRvIKOExKktlg==' '\tA001\t0B\t00\t0000\tabcd1234_OSVD' '\t0000000000200000\t00\t00\t00000000' '\t0050\tFF\t00\tFF\tFF\t20\tFF\tFFFF' '\t00\t600000E00D2A0000002A0115001E0000' '\t00\t00\tFF\tFF\tFFFFFFFF\t00' '\t00\tFF' % exec_cmdline) elif exec_cmdline.startswith('show enclosure-status'): ret = ('\r\nCLI> %s\r\n00\r\n' 'ETDX200S3_1\t01\tET203ACU\t4601417434\t280753\t20' '\t00\t00\t01\t02\t01001000\tV10L87-9000\t91\r\n02' '\r\n70000000\t30\r\nD0000100\t30\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show volume-qos'): ret = ('\r\nCLI> %s\r\n00\r\n' '0001\r\n0000\tFJosv_0qJ4rpOHgFE8ipcJOMfBmg==\t01\t00\t00' '\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show copy-sessions'): ret = ('\r\nCLI> %s\r\n00\r\n0001\t\r\n' '0001\tFFFF\t01\t08\tFF\tFF\t03\t02\tFF\tFF\t05ABD7D2\t' '########################################\t' '########################################\t' '00000281\t00000286\t0001\t00\tFF\t0000000000000800\t' '0000000000000000\t0000000000000100\t0000000000000800\t' '04\t00\t00000000\t2020101009341400\t01\t10\tFFFF\tFFFF\t' '0000000000000000\tFFFFFFFFFFFFFFFF\tFFFFFFFFFFFFFFFF\tFF\t' 'FF\t64\t00\t07\t00\t00\t00\r\nCLI> ' % exec_cmdline) elif exec_cmdline.startswith('show qos-bandwidth-limit'): ret = ('\r\nCLI> %s\r\n00\r\n0001\t\r\n00\t0000ffff\t0000ffff' '\t0000ffff\t0000ffff\t0000ffff\t0000ffff\t0000ffff' '\t0000ffff\t0000ffff\t0000ffff\t0000ffff\t0000ffff\r\n' 'CLI> ' % exec_cmdline) elif exec_cmdline.startswith('set qos-bandwidth-limit'): ret = '%s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline elif exec_cmdline.startswith('stop copy-session'): ret = '%s\r\n00\r\nCLI> ' % exec_cmdline else: ret = None return ret def fake_safe_get(self, str=None): return str def fake_eternus_connection(self): conn = FakeEternusConnection() return conn def test_get_volume_number(self): vol_instance = FakeCIMInstanceName() vol_instance['ElementName'] = 'FJosv_0qJ4rpOHgFE8ipcJOMfBmg==' vol_instance['Purpose'] = '00228+0x06' vol_instance['Name'] = None vol_instance['DeviceID'] = FAKE_LUN_ID1 vol_instance['SystemName'] = STORAGE_SYSTEM vol_instance.path = '' vol_instance.classname = 'FUJITSU_StorageVolume' volume_no = self.driver.common._get_volume_number(vol_instance) self.assertEqual(FAKE_LUN_NO1, volume_no) def volume_update(self, volume, diction): for key, value in diction.items(): volume[key] = value @mock.patch.object(ssh_utils, 'SSHPool') def test_ssh_to_storage_by_password(self, mock_ssh_pool): command = 'show_enclosure_status' self.driver.common.fjdxcli = {} self.driver.common._exec_eternus_cli(command) mock_ssh_pool.assert_called_with(STORAGE_IP, 22, None, TEST_USER, password=TEST_PASSWORD, max_size=2) @mock.patch.object(ssh_utils, 'SSHPool') def test_ssh_to_storage_by_key(self, mock_ssh_pool): command = 'show_enclosure_status' self.configuration.fujitsu_passwordless = True driver = dx_iscsi.FJDXISCSIDriver(configuration=self.configuration) self.driver = driver self.driver.common.fjdxcli = {} self.driver.common._exec_eternus_cli(command) mock_ssh_pool.assert_called_with(STORAGE_IP, 22, None, TEST_USER, privatekey=PRIVATE_KEY_PATH, max_size=2) def test_exec_eternus_cli_success(self): command = 'show_enclosure_status' FAKE_CLI_ENCLOUSER_STATUS = (0, None, {'version': 'V10L87-9000'}) cli_enclosure_status = self.driver.common._exec_eternus_cli(command) self.assertEqual(FAKE_CLI_ENCLOUSER_STATUS, cli_enclosure_status) @mock.patch.object(eternus_dx_cli.FJDXCLI, '_exec_cli_with_eternus') def test_exec_eternus_cli_success_with_retry(self, mock_exec_cli_with_eternus): command = 'stop_copy_session' mock_exec_cli_with_eternus.side_effect = [ '\r\nCLI> stop copy-session\r\n01\r\n0060\r\nCLI> ', '\r\nCLI> show cli-error-code -error-code ' '0060\r\n00\r\n0060\r\n0060\tResource locked\r\nCLI> ', '\r\nCLI> stop copy-session\r\n00\r\nCLI> '] retry_msg = 'INFO:cinder.volume.drivers.fujitsu.eternus_dx.' \ 'eternus_dx_common:_exec_eternus_cli, retry, ' \ 'ip: 172.16.0.2, RetryCode: E0060, TryNum: 1.' FAKE_STOP_COPY_SESSION = (0, None, []) with self.assertLogs('cinder.volume.drivers.fujitsu.eternus_dx.' 'eternus_dx_common', level='INFO') as cm: cli_return = self.driver.common._exec_eternus_cli(command) self.assertIn(retry_msg, cm.output) self.assertEqual(FAKE_STOP_COPY_SESSION, cli_return) @mock.patch.object(eternus_dx_cli.FJDXCLI, '_exec_cli_with_eternus') def test_exec_eternus_cli_authentication_fail(self, mock_exec_cli_with_eternus): command = 'check_user_role' mock_exec_cli_with_eternus.side_effect = ( exception.VolumeBackendAPIException( 'Execute CLI command error. Error: Authentication failed.')) authentication_fail_msg = 'WARNING:' \ 'cinder.volume.drivers.fujitsu.eternus_dx.' \ 'eternus_dx_common:_exec_eternus_cli, ' \ 'retry, ip: 172.16.0.2, ' \ 'Message: Execute CLI command error. ' \ 'Error: Authentication failed., TryNum: 1.' FAKE_STOP_COPY_SESSION = (4, '4', 'Execute CLI command error. ' 'Error: Authentication failed.') with self.assertLogs('cinder.volume.drivers.fujitsu.eternus_dx.' 'eternus_dx_common', level='WARNING') as cm: cli_return = self.driver.common._exec_eternus_cli(command) self.assertIn(authentication_fail_msg, cm.output) self.assertEqual(FAKE_STOP_COPY_SESSION, cli_return) @mock.patch.object(eternus_dx_cli.FJDXCLI, '_exec_cli_with_eternus') @mock.patch.object(dx_common, 'LOG') def test_exec_eternus_cli_retry_exceed(self, mock_log, mock_exec_cli_with_eternus): command = 'stop_copy_session' mock_exec_cli_with_eternus.side_effect = [ '\r\nCLI> stop copy-session\r\n01\r\n0060\r\nCLI> ', '\r\nCLI> show cli-error-code -error-code ' '0060\r\n00\r\n0060\r\n0060\tResource locked\r\nCLI> '] * 3 exceed_msg = '_exec_eternus_cli, Retry was exceeded.' FAKE_STOP_COPY_SESSION = (4, 'E0060', 'Resource locked') cli_return = self.driver.common._exec_eternus_cli(command) mock_log.warning.assert_called_with(exceed_msg) self.assertEqual(FAKE_STOP_COPY_SESSION, cli_return) def test_get_eternus_model(self): ETERNUS_MODEL = self.driver.common._get_eternus_model() self.assertEqual(3, ETERNUS_MODEL) def test_get_matadata(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) TEST_METADATA = self.driver.common.get_metadata(TEST_VOLUME) self.assertEqual(FAKE_LUN_META1, TEST_METADATA) def test_is_qos_or_format_support(self): QOS_SUPPORT = \ self.driver.common._is_qos_or_format_support('QOS setting') self.assertTrue(QOS_SUPPORT) def test_get_qos_category_by_value(self): FAKE_QOS_KEY = 'maxBWS' FAKE_QOS_VALUE = 700 FAKE_QOS_DICT = {'bandwidth-limit': 2} QOS_Category_Dict = self.driver.common._get_qos_category_by_value( FAKE_QOS_KEY, FAKE_QOS_VALUE) self.assertEqual(FAKE_QOS_DICT, QOS_Category_Dict) def test_get_param(self): FAKE_QOS_SPEC_DICT = {'total_bytes_sec': 2137152, 'read_bytes_sec': 1068576, 'unspport_key': 1234} EXPECTED_KEY_DICT = {'read_bytes_sec': int(FAKE_QOS_SPEC_DICT ['read_bytes_sec'] / units.Mi), 'read_iops_sec': MAX_IOPS, 'total_bytes_sec': int(FAKE_QOS_SPEC_DICT ['total_bytes_sec'] / units.Mi), 'total_iops_sec': MAX_IOPS} KEY_DICT = self.driver.common._get_param(FAKE_QOS_SPEC_DICT) self.assertEqual(EXPECTED_KEY_DICT, KEY_DICT) def test_check_iops(self): FAKE_QOS_KEY = 'total_iops_sec' FAKE_QOS_VALUE = 2137152 QOS_VALUE = self.driver.common._check_iops(FAKE_QOS_KEY, FAKE_QOS_VALUE) self.assertEqual(FAKE_QOS_VALUE, QOS_VALUE) def test_check_throughput(self): FAKE_QOS_KEY = 'total_bytes_sec' FAKE_QOS_VALUE = 2137152 QOS_VALUE = self.driver.common._check_throughput(FAKE_QOS_KEY, FAKE_QOS_VALUE) self.assertEqual(int(FAKE_QOS_VALUE / units.Mi), QOS_VALUE) def test_get_qos_category(self): FAKE_QOS_SPEC_DICT = {'total_bytes_sec': 2137152, 'read_bytes_sec': 1068576} FAKE_KEY_DICT = {'read_bytes_sec': int(FAKE_QOS_SPEC_DICT ['read_bytes_sec'] / units.Mi), 'read_iops_sec': MAX_IOPS, 'total_bytes_sec': int(FAKE_QOS_SPEC_DICT ['total_bytes_sec'] / units.Mi), 'total_iops_sec': MAX_IOPS} FAKE_RET_DICT = {'bandwidth-limit': FAKE_KEY_DICT['total_bytes_sec'], 'read-bandwidth-limit': FAKE_KEY_DICT['read_bytes_sec'], 'write-bandwidth-limit': 0} RET_DICT = self.driver.common._get_qos_category(FAKE_KEY_DICT) self.assertEqual(FAKE_RET_DICT, RET_DICT) @mock.patch.object(eternus_dx_cli.FJDXCLI, '_exec_cli_with_eternus') def test_set_limit(self, mock_exec_cli_with_eternus): exec_cmdline = 'set qos-bandwidth-limit -mode volume-qos ' \ '-bandwidth-limit 5 -iops 10000 -throughput 450' mock_exec_cli_with_eternus.return_value = \ '\r\nCLI> %s\r\n00\r\n0001\r\nCLI> ' % exec_cmdline FAKE_MODE = 'volume-qos' FAKE_LIMIT = 5 FAKE_IOPS = 10000 FAKE_THROUGHOUTPUT = 450 self.driver.common._set_limit(FAKE_MODE, FAKE_LIMIT, FAKE_IOPS, FAKE_THROUGHOUTPUT) mock_exec_cli_with_eternus.assert_called_with(exec_cmdline) def test_get_copy_sessions_list(self): FAKE_COPY_SESSION = [{ 'Source Num': 641, 'Dest Num': 646, 'Type': 'Snap', 'Status': 'Active', 'Phase': 'Tracking', 'Session ID': 1, }] copy_session_list = self.driver.common._get_copy_sessions_list() self.assertEqual(FAKE_COPY_SESSION, copy_session_list) def test_update_migrated_volume(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) model_info2 = self.driver.create_volume(TEST_VOLUME2) self.volume_update(TEST_VOLUME2, model_info2) self.assertEqual(FAKE_MODEL_INFO3, model_info2) model_update = self.driver.common.update_migrated_volume(self.context, TEST_VOLUME, TEST_VOLUME2) FAKE_MIGRATED_MODEL_UPDATE = { '_name_id': TEST_VOLUME2['id'], 'provider_location': model_info2['provider_location'] } self.assertEqual(FAKE_MIGRATED_MODEL_UPDATE, model_update) def test_create_snapshot(self): model_info = self.driver.create_volume(TEST_VOLUME) self.volume_update(TEST_VOLUME, model_info) self.assertEqual(FAKE_MODEL_INFO1, model_info) snap_info = self.driver.common._create_snapshot(TEST_SNAP) self.assertEqual(FAKE_SNAP_INFO, snap_info) self.driver.delete_volume(TEST_VOLUME) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_gpfs.py0000664000175000017500000034532300000000000024023 0ustar00zuulzuul00000000000000 # Copyright IBM Corp. 2013 All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile from unittest import mock from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import units from cinder import context from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.ibm import gpfs from cinder.volume import volume_types CONF = cfg.CONF class FakeQemuImgInfo(object): def __init__(self): self.file_format = None self.backing_file = None class GPFSDriverTestCase(test.TestCase): driver_name = "cinder.volume.drivers.gpfs.GPFSDriver" context = context.get_admin_context() def _execute_wrapper(self, cmd, *args, **kwargs): try: kwargs.pop('run_as_root') except KeyError: pass return utils.execute(cmd, *args, **kwargs) def setUp(self): super(GPFSDriverTestCase, self).setUp() self.volumes_path = tempfile.mkdtemp(prefix="gpfs_") self.images_dir = '%s/images' % self.volumes_path self.addCleanup(self._cleanup, self.images_dir, self.volumes_path) if not os.path.exists(self.volumes_path): os.mkdir(self.volumes_path) if not os.path.exists(self.images_dir): os.mkdir(self.images_dir) self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.driver = gpfs.GPFSDriver( configuration=conf.Configuration([], conf.SHARED_CONF_GROUP)) self.driver.gpfs_execute = self._execute_wrapper exec_patcher = mock.patch.object(self.driver, '_execute', self._execute_wrapper) exec_patcher.start() self.addCleanup(exec_patcher.stop) self.driver._cluster_id = '123456' self.driver._gpfs_device = '/dev/gpfs' self.driver._storage_pool = 'system' self.driver._encryption_state = 'yes' self.override_config('volume_driver', self.driver_name, conf.SHARED_CONF_GROUP) self.override_config('gpfs_mount_point_base', self.volumes_path, conf.SHARED_CONF_GROUP) self.context = context.get_admin_context() self.context.user_id = 'fake' self.context.project_id = 'fake' self.updated_at = timeutils.utcnow() CONF.gpfs_images_dir = self.images_dir def _cleanup(self, images_dir, volumes_path): try: os.rmdir(images_dir) os.rmdir(volumes_path) except OSError: pass def test_different(self): self.assertTrue(gpfs._different((True, False))) self.assertFalse(gpfs._different((True, True))) self.assertFalse(gpfs._different(None)) def test_sizestr(self): self.assertEqual('10G', gpfs._sizestr('10')) @mock.patch('cinder.utils.execute') def test_gpfs_local_execute(self, mock_exec): mock_exec.return_value = 'test' self.driver._gpfs_local_execute('test') expected = [mock.call('test', run_as_root=True)] self.assertEqual(expected, mock_exec.mock_calls) @mock.patch('cinder.utils.execute') def test_get_gpfs_state_ok(self, mock_exec): mock_exec.return_value = ('mmgetstate::HEADER:version:reserved:' 'reserved:nodeName:nodeNumber:state:quorum:' 'nodesUp:totalNodes:remarks:cnfsState:\n' 'mmgetstate::0:1:::devstack:3:active:2:3:3:' 'quorum node:(undefined):', '') self.assertTrue(self.driver._get_gpfs_state().splitlines()[1]. startswith('mmgetstate::0:1:::devstack')) @mock.patch('cinder.utils.execute') def test_get_gpfs_state_fail_mmgetstate(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_gpfs_state) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state') def test_check_gpfs_state_ok(self, mock_get_gpfs_state): mock_get_gpfs_state.return_value = ('mmgetstate::HEADER:version:' 'reserved:reserved:nodeName:' 'nodeNumber:state:quorum:nodesUp:' 'totalNodes:remarks:cnfsState:\n' 'mmgetstate::0:1:::devstack:3:' 'active:2:3:3:' 'quorum node:(undefined):') self.driver._check_gpfs_state() @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state') def test_check_gpfs_state_fail_not_active(self, mock_get_gpfs_state): mock_get_gpfs_state.return_value = ('mmgetstate::HEADER:version:' 'reserved:reserved:nodeName:' 'nodeNumber:state:quorum:nodesUp:' 'totalNodes:remarks:cnfsState:\n' 'mmgetstate::0:1:::devstack:3:' 'arbitrating:2:3:3:' 'quorum node:(undefined):') self.assertRaises(exception.VolumeBackendAPIException, self.driver._check_gpfs_state) @mock.patch('cinder.utils.execute') def test_same_filesystem_ok(self, mock_exec): # returns filesystem id in hex mock_exec.return_value = ('ef0009600000002\nef0009600000002\n', '') self.assertTrue(self.driver._same_filesystem('/path1', '/path2')) @mock.patch('cinder.utils.execute') def test_same_filesystem_not_ok(self, mock_exec): # returns filesystem id in hex mock_exec.return_value = ('ef0009600000002\n000000000000007\n', '') self.assertFalse(self.driver._same_filesystem('/path1', '/path2')) @mock.patch('cinder.utils.execute') def test_same_filesystem_failed(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._same_filesystem, '', '') @mock.patch('cinder.utils.execute') def test_get_fs_from_path_ok(self, mock_exec): mock_exec.return_value = ('Filesystem 1K-blocks ' 'Used Available Use%% Mounted on\n' '%s 10485760 531968 9953792' ' 6%% /gpfs0' % self.driver._gpfs_device, '') self.assertEqual(self.driver._gpfs_device, self.driver._get_filesystem_from_path('/gpfs0')) @mock.patch('cinder.utils.execute') def test_get_fs_from_path_fail_path(self, mock_exec): mock_exec.return_value = ('Filesystem 1K-blocks ' 'Used Available Use% Mounted on\n' 'test 10485760 531968 ' '9953792 6% /gpfs0', '') self.assertNotEqual(self.driver._gpfs_device, self.driver._get_filesystem_from_path('/gpfs0')) @mock.patch('cinder.utils.execute') def test_get_fs_from_path_fail_raise(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_filesystem_from_path, '/gpfs0') @mock.patch('cinder.utils.execute') def test_get_gpfs_cluster_id_ok(self, mock_exec): mock_exec.return_value = ('mmlsconfig::HEADER:version:reserved:' 'reserved:configParameter:value:nodeList:\n' 'mmlsconfig::0:1:::clusterId:%s::' % self.driver._cluster_id, '') self.assertEqual(self.driver._cluster_id, self.driver._get_gpfs_cluster_id()) @mock.patch('cinder.utils.execute') def test_get_gpfs_cluster_id_fail_id(self, mock_exec): mock_exec.return_value = ('mmlsconfig::HEADER.:version:reserved:' 'reserved:configParameter:value:nodeList:\n' 'mmlsconfig::0:1:::clusterId:test::', '') self.assertNotEqual(self.driver._cluster_id, self.driver._get_gpfs_cluster_id()) @mock.patch('cinder.utils.execute') def test_get_gpfs_cluster_id_fail_raise(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_gpfs_cluster_id) @mock.patch('cinder.utils.execute') def test_get_fileset_from_path_ok(self, mock_exec): mock_exec.return_value = ('file name: /gpfs0\n' 'metadata replication: 1 max 2\n' 'data replication: 1 max 2\n' 'immutable: no\n' 'appendOnly: no\n' 'flags:\n' 'storage pool name: system\n' 'fileset name: root\n' 'snapshot name:\n' 'Windows attributes: DIRECTORY', '') self.driver._get_fileset_from_path('') @mock.patch('cinder.utils.execute') def test_get_fileset_from_path_fail_mmlsattr(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_fileset_from_path, '') @mock.patch('cinder.utils.execute') def test_get_fileset_from_path_fail_find_fileset(self, mock_exec): mock_exec.return_value = ('file name: /gpfs0\n' 'metadata replication: 1 max 2\n' 'data replication: 1 max 2\n' 'immutable: no\n' 'appendOnly: no\n' 'flags:\n' 'storage pool name: system\n' '*** name: root\n' 'snapshot name:\n' 'Windows attributes: DIRECTORY', '') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_fileset_from_path, '') @mock.patch('cinder.utils.execute') def test_verify_gpfs_pool_ok(self, mock_exec): mock_exec.return_value = ('Storage pools in file system at \'/gpfs0\':' '\n' 'Name Id BlkSize Data ' 'Meta ' 'Total Data in (KB) Free Data in (KB) ' 'Total Meta in (KB) Free Meta in (KB)\n' 'system 0 256 KB yes ' 'yes ' ' 10485760 9953792 ( 95%) ' '10485760 9954560 ( 95%)', '') self.assertEqual('/dev/gpfs', self.driver._gpfs_device) self.assertTrue(self.driver._verify_gpfs_pool('/dev/gpfs')) @mock.patch('cinder.utils.execute') def test_verify_gpfs_pool_fail_pool(self, mock_exec): mock_exec.return_value = ('Storage pools in file system at \'/gpfs0\':' '\n' 'Name Id BlkSize Data ' 'Meta ' 'Total Data in (KB) Free Data in (KB) ' 'Total Meta in (KB) Free Meta in (KB)\n' 'test 0 256 KB yes ' 'yes' ' 10485760 9953792 ( 95%)' ' 10485760 9954560 ( 95%)', '') self.assertEqual('/dev/gpfs', self.driver._gpfs_device) self.assertTrue(self.driver._verify_gpfs_pool('/dev/gpfs')) @mock.patch('cinder.utils.execute') def test_verify_gpfs_pool_fail_raise(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertFalse(self.driver._verify_gpfs_pool('/dev/gpfs')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.utils.execute') def test_update_volume_storage_pool_ok(self, mock_exec, mock_verify_pool): mock_verify_pool.return_value = True self.assertTrue(self.driver._update_volume_storage_pool('', 'system')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.utils.execute') def test_update_volume_storage_pool_ok_pool_none(self, mock_exec, mock_verify_pool): mock_verify_pool.return_value = True self.assertTrue(self.driver._update_volume_storage_pool('', None)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.utils.execute') def test_update_volume_storage_pool_fail_pool(self, mock_exec, mock_verify_pool): mock_verify_pool.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self.driver._update_volume_storage_pool, '', 'system') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.utils.execute') def test_update_volume_storage_pool_fail_mmchattr(self, mock_exec, mock_verify_pool): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') mock_verify_pool.return_value = True self.assertFalse(self.driver._update_volume_storage_pool('', 'system')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.utils.execute') def test_get_gpfs_fs_release_level_ok(self, mock_exec, mock_fs_from_path): mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:' 'deviceName:fieldName:data:remarks:\n' 'mmlsfs::0:1:::gpfs:filesystemVersion:14.03 ' '(4.1.0.0):\n' 'mmlsfs::0:1:::gpfs:filesystemVersionLocal:' '14.03 (4.1.0.0):\n' 'mmlsfs::0:1:::gpfs:filesystemVersionManager' ':14.03 (4.1.0.0):\n' 'mmlsfs::0:1:::gpfs:filesystemVersion' 'Original:14.03 (4.1.0.0):\n' 'mmlsfs::0:1:::gpfs:filesystemHighest' 'Supported:14.03 (4.1.0.0):', '') mock_fs_from_path.return_value = '/dev/gpfs' self.assertEqual(('/dev/gpfs', 1403), self.driver._get_gpfs_fs_release_level('')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.utils.execute') def test_get_gpfs_fs_release_level_fail_mmlsfs(self, mock_exec, mock_fs_from_path): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') mock_fs_from_path.return_value = '/dev/gpfs' self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_gpfs_fs_release_level, '') @mock.patch('cinder.utils.execute') def test_get_gpfs_cluster_release_level_ok(self, mock_exec): mock_exec.return_value = ('mmlsconfig::HEADER:version:reserved:' 'reserved:configParameter:value:nodeList:\n' 'mmlsconfig::0:1:::minReleaseLevel:1403::', '') self.assertEqual(1403, self.driver._get_gpfs_cluster_release_level()) @mock.patch('cinder.utils.execute') def test_get_gpfs_cluster_release_level_fail_mmlsconfig(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_gpfs_cluster_release_level) @mock.patch('cinder.utils.execute') def test_is_gpfs_path_fail_mmlsattr(self, mock_exec): mock_exec.side_effect = processutils.ProcessExecutionError( stdout='test', stderr='test') self.assertRaises(exception.VolumeBackendAPIException, self.driver._is_gpfs_path, '/dummy/path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_fileset_from_path') @mock.patch('cinder.utils.execute') def test_is_same_fileset_ok(self, mock_exec, mock_get_fileset_from_path): mock_get_fileset_from_path.return_value = True self.assertTrue(self.driver._is_same_fileset('', '')) mock_get_fileset_from_path.side_effect = [True, False] self.assertFalse(self.driver._is_same_fileset('', '')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_available_capacity') @mock.patch('cinder.utils.execute') def test_same_cluster_ok(self, mock_exec, mock_avail_capacity): mock_avail_capacity.return_value = (10192683008, 10737418240) stats = self.driver.get_volume_stats() loc = stats['location_info'] cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertTrue(self.driver._same_cluster(host)) locinfo = stats['location_info'] + '_' loc = locinfo cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertFalse(self.driver._same_cluster(host)) @mock.patch('cinder.utils.execute') def test_set_rw_permission(self, mock_exec): self.driver._set_rw_permission('') @mock.patch('cinder.utils.execute') def test_can_migrate_locally(self, mock_exec): host = {'host': 'foo', 'capabilities': ''} self.assertIsNone(self.driver._can_migrate_locally(host)) loc = 'GPFSDriver:%s' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertIsNone(self.driver._can_migrate_locally(host)) loc = 'GPFSDriver_:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertIsNone(self.driver._can_migrate_locally(host)) loc = 'GPFSDriver:%s:testpath' % (self.driver._cluster_id + '_') cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertIsNone(self.driver._can_migrate_locally(host)) loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} self.assertEqual('testpath', self.driver._can_migrate_locally(host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_encryption_status') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_id') @mock.patch('cinder.utils.execute') def test_do_setup_ok(self, mock_exec, mock_get_gpfs_cluster_id, mock_get_filesystem_from_path, mock_verify_gpfs_pool, mock_get_gpfs_fs_rel_lev, mock_verify_encryption_state): ctxt = self.context mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id mock_get_filesystem_from_path.return_value = '/dev/gpfs' mock_verify_gpfs_pool.return_value = True mock_get_gpfs_fs_rel_lev.return_value = 1405 mock_verify_encryption_state.return_value = 'Yes' self.driver.do_setup(ctxt) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_id') @mock.patch('cinder.utils.execute') def test_do_setup_no_encryption(self, mock_exec, mock_get_gpfs_cluster_id, mock_get_filesystem_from_path, mock_verify_gpfs_pool, mock_get_gpfs_fs_rel_lev): ctxt = self.context mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id mock_get_filesystem_from_path.return_value = '/dev/gpfs' mock_verify_gpfs_pool.return_value = True mock_get_gpfs_fs_rel_lev.return_value = 1403 self.driver.do_setup(ctxt) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_id') @mock.patch('cinder.utils.execute') def test_do_setup_fail_get_cluster_id(self, mock_exec, mock_get_gpfs_cluster_id, mock_get_filesystem_from_path, mock_verify_gpfs_pool): ctxt = self.context mock_get_gpfs_cluster_id.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) mock_get_filesystem_from_path.return_value = '/dev/gpfs' mock_verify_gpfs_pool.return_value = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, ctxt) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_id') @mock.patch('cinder.utils.execute') def test_do_setup_fail_get_fs_from_path(self, mock_exec, mock_get_gpfs_cluster_id, mock_get_fs_from_path, mock_verify_gpfs_pool): ctxt = self.context mock_get_gpfs_cluster_id.return_value = self.driver._cluster_id mock_get_fs_from_path.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) mock_verify_gpfs_pool.return_value = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, ctxt) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_filesystem_from_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_id') @mock.patch('cinder.utils.execute') def test_do_setup_fail_volume(self, mock_exec, mock_get_gpfs_cluster_id, mock_get_filesystem_from_path, mock_verify_gpfs_pool): ctxt = self.context mock_get_gpfs_cluster_id. return_value = self.driver._cluster_id mock_get_filesystem_from_path.return_value = '/dev/gpfs' mock_verify_gpfs_pool.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self.driver.do_setup, ctxt) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._check_gpfs_state') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_fs_release_level') def test_check_for_setup_error_fail_conf(self, mock_get_gpfs_fs_rel_lev, mock_is_gpfs_path, mock_check_gpfs_state): fake_fs = '/dev/gpfs' fake_fs_release = 1400 fake_cluster_release = 1201 # fail configuration.gpfs_mount_point_base is None org_value = self.driver.configuration.gpfs_mount_point_base self.override_config('gpfs_mount_point_base', None, conf.SHARED_CONF_GROUP) mock_get_gpfs_fs_rel_lev.return_value = (fake_fs, fake_fs_release) self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.override_config('gpfs_mount_point_base', org_value, conf.SHARED_CONF_GROUP) # fail configuration.gpfs_images_share_mode and # configuration.gpfs_images_dir is None self.override_config('gpfs_images_share_mode', 'copy', conf.SHARED_CONF_GROUP) self.override_config('gpfs_images_dir', None, conf.SHARED_CONF_GROUP) org_value_dir = self.driver.configuration.gpfs_images_dir self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.override_config('gpfs_images_dir', org_value_dir, conf.SHARED_CONF_GROUP) # fail configuration.gpfs_images_share_mode == 'copy_on_write' and not # self._same_filesystem(configuration.gpfs_mount_point_base, # configuration.gpfs_images_dir) self.override_config('gpfs_images_share_mode', 'copy_on_write', conf.SHARED_CONF_GROUP) with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_same_filesystem', return_value=False): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) # fail self.configuration.gpfs_images_share_mode == 'copy_on_write' and # not self._is_same_fileset(self.configuration.gpfs_mount_point_base, # self.configuration.gpfs_images_dir) with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_is_same_fileset', return_value=False): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) # fail directory is None self.override_config('gpfs_images_share_mode', None, conf.SHARED_CONF_GROUP) org_value_dir = self.driver.configuration.gpfs_images_dir self.override_config('gpfs_images_dir', None, conf.SHARED_CONF_GROUP) with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level', return_value=fake_cluster_release): self.driver.check_for_setup_error() self.override_config('gpfs_images_dir', org_value_dir, conf.SHARED_CONF_GROUP) # fail directory.startswith('/') org_value_mount = self.driver.configuration.gpfs_mount_point_base self.override_config('gpfs_mount_point_base', '_' + self.volumes_path, conf.SHARED_CONF_GROUP) self.override_config('gpfs_images_share_mode', None, conf.SHARED_CONF_GROUP) with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level', return_value=fake_cluster_release): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.override_config('gpfs_mount_point_base', org_value_mount, conf.SHARED_CONF_GROUP) # fail os.path.isdir(directory) org_value_mount = self.driver.configuration.gpfs_mount_point_base self.override_config('gpfs_mount_point_base', self.volumes_path + '_', conf.SHARED_CONF_GROUP) org_value_dir = self.driver.configuration.gpfs_images_dir self.override_config('gpfs_images_dir', None, conf.SHARED_CONF_GROUP) with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level', return_value=fake_cluster_release): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) self.override_config('gpfs_mount_point_base', org_value_mount, conf.SHARED_CONF_GROUP) self.override_config('gpfs_images_dir', org_value_dir, conf.SHARED_CONF_GROUP) # fail not cluster release level >= GPFS_CLONE_MIN_RELEASE org_fake_cluster_release = fake_cluster_release fake_cluster_release = 1105 self.override_config('gpfs_mount_point_base', self.volumes_path, conf.SHARED_CONF_GROUP) self.override_config('gpfs_images_dir', None, conf.SHARED_CONF_GROUP) with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level', return_value=fake_cluster_release): with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_fs_release_level', return_value=(fake_fs, fake_fs_release)): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) fake_cluster_release = org_fake_cluster_release # fail not fs release level >= GPFS_CLONE_MIN_RELEASE org_fake_fs_release = fake_fs_release fake_fs_release = 1105 self.override_config('gpfs_mount_point_base', self.volumes_path, conf.SHARED_CONF_GROUP) self.override_config('gpfs_images_dir', None, conf.SHARED_CONF_GROUP) with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_cluster_release_level', return_value=fake_cluster_release): with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_gpfs_fs_release_level', return_value=(fake_fs, fake_fs_release)): self.assertRaises(exception.VolumeBackendAPIException, self.driver.check_for_setup_error) fake_fs_release = org_fake_fs_release @mock.patch('cinder.utils.execute') def test_create_sparse_file(self, mock_exec): self.driver._create_sparse_file('', 100) @mock.patch('cinder.utils.execute') def test_allocate_file_blocks(self, mock_exec): self.driver._allocate_file_blocks(os.path.join(self.images_dir, 'test'), 1) @mock.patch('cinder.utils.execute') def test_gpfs_change_attributes(self, mock_exec): options = [] options.extend(['-T', 'test']) self.driver._gpfs_change_attributes(options, self.images_dir) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._mkfs') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_gpfs_change_attributes') def test_set_volume_attributes(self, mock_change_attributes, mock_mkfs): metadata = {'data_pool_name': 'test', 'replicas': 'test', 'dio': 'test', 'write_affinity_depth': 'test', 'block_group_factor': 'test', 'write_affinity_failure_group': 'test', 'fstype': 'test', 'fslabel': 'test', 'test': 'test'} self.driver._set_volume_attributes('', '', metadata) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_gpfs_change_attributes') def test_set_volume_attributes_no_attributes(self, mock_change_attributes): metadata = {} org_value = self.driver.configuration.gpfs_storage_pool self.override_config('gpfs_storage_pool', 'system', conf.SHARED_CONF_GROUP) self.driver._set_volume_attributes('', '', metadata) self.override_config('gpfs_storage_pool', org_value, conf.SHARED_CONF_GROUP) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_gpfs_change_attributes') def test_set_volume_attributes_no_options(self, mock_change_attributes): metadata = {} org_value = self.driver.configuration.gpfs_storage_pool self.override_config('gpfs_storage_pool', '', conf.SHARED_CONF_GROUP) self.driver._set_volume_attributes('', '', metadata) self.override_config('gpfs_storage_pool', org_value, conf.SHARED_CONF_GROUP) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_allocate_file_blocks') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_sparse_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_create_volume(self, mock_gpfs_path_state, mock_local_path, mock_sparse_file, mock_rw_permission, mock_set_volume_attributes, mock_allocate_file_blocks, mock_exec): mock_local_path.return_value = 'test' volume = self._fake_volume() value = {} value['value'] = 'test' org_value = self.driver.configuration.gpfs_sparse_volumes self.override_config('gpfs_sparse_volumes', False, conf.SHARED_CONF_GROUP) self.driver.create_volume(volume) self.override_config('gpfs_sparse_volumes', org_value, conf.SHARED_CONF_GROUP) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_allocate_file_blocks') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_sparse_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_create_volume_no_sparse_volume(self, mock_gpfs_path_state, mock_local_path, mock_sparse_file, mock_rw_permission, mock_set_volume_attributes, mock_allocate_file_blocks, mock_exec): mock_local_path.return_value = 'test' volume = self._fake_volume() value = {} value['value'] = 'test' org_value = self.driver.configuration.gpfs_sparse_volumes self.override_config('gpfs_sparse_volumes', True, conf.SHARED_CONF_GROUP) self.driver.create_volume(volume) self.override_config('gpfs_sparse_volumes', org_value, conf.SHARED_CONF_GROUP) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_allocate_file_blocks') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_sparse_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_create_volume_with_metadata(self, mock_gpfs_path_state, mock_local_path, mock_sparse_file, mock_rw_permission, mock_set_volume_attributes, mock_allocate_file_blocks, mock_exec): mock_local_path.return_value = 'test' volume = self._fake_volume() value = {} value['value'] = 'test' mock_set_volume_attributes.return_value = True metadata = {'fake_key': 'fake_value'} org_value = self.driver.configuration.gpfs_sparse_volumes self.override_config('gpfs_sparse_volumes', True, conf.SHARED_CONF_GROUP) self.driver.create_volume(volume) self.assertTrue(self.driver._set_volume_attributes(volume, 'test', metadata)) self.override_config('gpfs_sparse_volumes', org_value, conf.SHARED_CONF_GROUP) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.' 'GPFSDriver._get_snapshot_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_create_volume_from_snapshot(self, mock_local_path, mock_snapshot_path, mock_gpfs_full_copy, mock_create_gpfs_copy, mock_rw_permission, mock_gpfs_redirect, mock_set_volume_attributes, mock_resize_volume_file): mock_resize_volume_file.return_value = 5 * units.Gi volume = self._fake_volume() volume['group_id'] = None self.driver.db = mock.Mock() self.driver.db.volume_get = mock.Mock() self.driver.db.volume_get.return_value = volume snapshot = self._fake_snapshot() mock_snapshot_path.return_value = "/tmp/fakepath" self.assertEqual({'size': 5.0}, self.driver.create_volume_from_snapshot(volume, snapshot)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_snapshot_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_create_volume_from_snapshot_metadata(self, mock_local_path, mock_snapshot_path, mock_gpfs_full_copy, mock_create_gpfs_copy, mock_rw_permission, mock_gpfs_redirect, mock_set_volume_attributes, mock_resize_volume_file): mock_resize_volume_file.return_value = 5 * units.Gi volume = self._fake_volume() volume['group_id'] = None self.driver.db = mock.Mock() self.driver.db.volume_get = mock.Mock() self.driver.db.volume_get.return_value = volume snapshot = self._fake_snapshot() mock_snapshot_path.return_value = "/tmp/fakepath" mock_set_volume_attributes.return_value = True metadata = {'fake_key': 'fake_value'} self.assertTrue(self.driver._set_volume_attributes(volume, 'test', metadata)) self.assertEqual({'size': 5.0}, self.driver.create_volume_from_snapshot(volume, snapshot)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_gpfs_clone') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_create_cloned_volume(self, mock_local_path, mock_gpfs_full_copy, mock_create_gpfs_clone, mock_rw_permission, mock_set_volume_attributes, mock_resize_volume_file): mock_resize_volume_file.return_value = 5 * units.Gi volume = self._fake_volume() src_volume = self._fake_volume() self.assertEqual({'size': 5.0}, self.driver.create_cloned_volume(volume, src_volume)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_volume_attributes') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_gpfs_clone') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_create_cloned_volume_with_metadata(self, mock_local_path, mock_gpfs_full_copy, mock_create_gpfs_clone, mock_rw_permission, mock_set_volume_attributes, mock_resize_volume_file): mock_resize_volume_file.return_value = 5 * units.Gi volume = self._fake_volume() src_volume = self._fake_volume() mock_set_volume_attributes.return_value = True metadata = {'fake_key': 'fake_value'} self.assertTrue(self.driver._set_volume_attributes(volume, 'test', metadata)) self.assertEqual({'size': 5.0}, self.driver.create_cloned_volume(volume, src_volume)) @mock.patch('cinder.utils.execute') def test_delete_gpfs_file_ok(self, mock_exec): mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('', ''), ('', '')] self.driver._delete_gpfs_file(self.images_dir) self.driver._delete_gpfs_file(self.images_dir + '_') mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' ' '/gpfs0/test.txt', ''), ('', '')] self.driver._delete_gpfs_file(self.images_dir) @mock.patch('os.path.exists') @mock.patch('cinder.utils.execute') def test_delete_gpfs_file_ok_parent(self, mock_exec, mock_path_exists): mock_path_exists.side_effect = [True, False, False, True, False, False, True, False, False] mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('/gpfs0/test.snap\ntest', ''), ('', '')] self.driver._delete_gpfs_file(self.images_dir) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('/gpfs0/test.ts\ntest', ''), ('', '')] self.driver._delete_gpfs_file(self.images_dir) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('/gpfs0/test.txt\ntest', ''), ('', '')] self.driver._delete_gpfs_file(self.images_dir) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_delete_volume(self, mock_verify_gpfs_path_state, mock_local_path, mock_delete_gpfs_file): self.driver.delete_volume('') @mock.patch('cinder.utils.execute') def test_gpfs_redirect_ok(self, mock_exec): org_value = self.driver.configuration.gpfs_max_clone_depth self.override_config('gpfs_max_clone_depth', 1, conf.SHARED_CONF_GROUP) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('', '')] self.assertTrue(self.driver._gpfs_redirect('')) self.override_config('gpfs_max_clone_depth', 1, conf.SHARED_CONF_GROUP) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 1 148488 ' '/gpfs0/test.txt', ''), ('', '')] self.assertFalse(self.driver._gpfs_redirect('')) self.override_config('gpfs_max_clone_depth', org_value, conf.SHARED_CONF_GROUP) @mock.patch('cinder.utils.execute') def test_gpfs_redirect_fail_depth(self, mock_exec): org_value = self.driver.configuration.gpfs_max_clone_depth self.override_config('gpfs_max_clone_depth', 0, conf.SHARED_CONF_GROUP) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', ''), ('', '')] self.assertFalse(self.driver._gpfs_redirect('')) self.override_config('gpfs_max_clone_depth', org_value, conf.SHARED_CONF_GROUP) @mock.patch('cinder.utils.execute') def test_gpfs_redirect_fail_match(self, mock_exec): org_value = self.driver.configuration.gpfs_max_clone_depth self.override_config('gpfs_max_clone_depth', 1, conf.SHARED_CONF_GROUP) mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' 148488 ' '/gpfs0/test.txt', ''), ('', '')] self.assertFalse(self.driver._gpfs_redirect('')) self.override_config('gpfs_max_clone_depth', org_value, conf.SHARED_CONF_GROUP) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') @mock.patch('cinder.utils.execute') def test_create_gpfs_clone(self, mock_exec, mock_redirect, mock_cr_gpfs_cp, mock_cr_gpfs_snap): mock_redirect.return_value = True self.driver._create_gpfs_clone('', '') mock_redirect.side_effect = [True, False] self.driver._create_gpfs_clone('', '') @mock.patch('cinder.utils.execute') def test_create_gpfs_copy(self, mock_exec): self.driver._create_gpfs_copy('', '') @mock.patch('cinder.utils.execute') def test_create_gpfs_snap(self, mock_exec): self.driver._create_gpfs_snap('') self.driver._create_gpfs_snap('', '') @mock.patch('cinder.utils.execute') def test_is_gpfs_parent_file_ok(self, mock_exec): mock_exec.side_effect = [('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' yes 2 148488 ' '/gpfs0/test.txt', ''), ('Parent Depth Parent inode File name\n' '------ ----- -------------- ---------\n' ' no 2 148488 ' '/gpfs0/test.txt', '')] self.assertTrue(self.driver._is_gpfs_parent_file('')) self.assertFalse(self.driver._is_gpfs_parent_file('')) @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_snapshot_path') def test_create_snapshot(self, mock_get_snapshot_path, mock_local_path, mock_create_gpfs_snap, mock_set_rw_permission, mock_gpfs_redirect, mock_vol_get_by_id): mock_get_snapshot_path.return_value = "/tmp/fakepath" vol = self._fake_volume() mock_vol_get_by_id.return_value = vol self.driver.create_snapshot(self._fake_snapshot()) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_snapshot_path') def test_delete_snapshot(self, mock_snapshot_path, mock_exec): snapshot = self._fake_snapshot() snapshot_path = "/tmp/fakepath" mock_snapshot_path.return_value = snapshot_path snapshot_ts_path = '%s.ts' % snapshot_path self.driver.delete_snapshot(snapshot) mock_exec.assert_any_call('mv', snapshot_path, snapshot_ts_path) mock_exec.assert_any_call('rm', '-f', snapshot_ts_path, check_exit_code=False) def test_ensure_export(self): self.assertIsNone(self.driver.ensure_export('', '')) def test_create_export(self): self.assertIsNone(self.driver.create_export('', '', {})) def test_remove_export(self): self.assertIsNone(self.driver.remove_export('', '')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_initialize_connection(self, mock_local_path): volume = self._fake_volume() mock_local_path.return_value = "/tmp/fakepath" data = self.driver.initialize_connection(volume, '') self.assertEqual(volume.name, data['data']['name']) self.assertEqual("/tmp/fakepath", data['data']['device_path']) self.assertEqual('gpfs', data['driver_volume_type']) def test_terminate_connection(self): self.assertIsNone(self.driver.terminate_connection('', '')) def test_get_volume_stats(self): fake_avail = 80 * units.Gi fake_size = 2 * fake_avail with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_available_capacity', return_value=(fake_avail, fake_size)): stats = self.driver.get_volume_stats() self.assertEqual('GPFS', stats['volume_backend_name']) self.assertEqual('file', stats['storage_protocol']) self.assertEqual('True', stats['gpfs_encryption_rest']) stats = self.driver.get_volume_stats(True) self.assertEqual('GPFS', stats['volume_backend_name']) self.assertEqual('file', stats['storage_protocol']) self.assertEqual('True', stats['gpfs_encryption_rest']) @mock.patch('cinder.utils.execute') def test_get_gpfs_encryption_status_true(self, mock_exec): mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:' 'deviceName:fieldName:data:remarks:\n' 'mmlsfs::0:1:::gpfs:encryption:Yes:', '') self.assertEqual('Yes', self.driver._get_gpfs_encryption_status()) @mock.patch('cinder.utils.execute') def test_get_gpfs_encryption_status_false(self, mock_exec): mock_exec.return_value = ('mmlsfs::HEADER:version:reserved:reserved:' 'deviceName:fieldName:data:remarks:\n' 'mmlsfs::0:1:::gpfs:encryption:No:', '') self.assertEqual('No', self.driver._get_gpfs_encryption_status()) @mock.patch('cinder.utils.execute') def test_get_gpfs_encryption_status_fail(self, mock_exec): mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_gpfs_encryption_status) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_stats') def test_get_volume_stats_none_stats(self, mock_upd_vol_stats): _stats_org = self.driver._stats self.driver._stats = mock.Mock() self.driver._stats.return_value = None self.driver.get_volume_stats() self.driver._stats = _stats_org @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._clone_image') def test_clone_image_pub(self, mock_exec): self.driver.clone_image('', '', '', {'id': 1}, '') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') def test_is_cloneable_ok(self, mock_is_gpfs_path): self.override_config('gpfs_images_share_mode', 'copy', conf.SHARED_CONF_GROUP) self.override_config('gpfs_images_dir', self.images_dir, conf.SHARED_CONF_GROUP) CONF.gpfs_images_dir = self.images_dir mock_is_gpfs_path.return_value = None self.assertEqual((True, None, os.path.join(CONF.gpfs_images_dir, '12345')), self.driver._is_cloneable('12345')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') def test_is_cloneable_fail_path(self, mock_is_gpfs_path): self.override_config('gpfs_images_share_mode', 'copy', conf.SHARED_CONF_GROUP) CONF.gpfs_images_dir = self.images_dir mock_is_gpfs_path.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertNotEqual((True, None, os.path.join(CONF.gpfs_images_dir, '12345')), self.driver._is_cloneable('12345')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_is_gpfs_parent_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_clone_image_clonable(self, mock_verify_gpfs_path_state, mock_is_cloneable, mock_local_path, mock_is_gpfs_parent_file, mock_create_gpfs_snap, mock_qemu_img_info, mock_create_gpfs_copy, mock_conv_image, mock_set_rw_permission, mock_resize_volume_file): mock_is_cloneable.return_value = (True, 'test', self.images_dir) mock_is_gpfs_parent_file.return_value = False mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') volume = self._fake_volume() self.assertEqual(({'provider_location': None}, True), self.driver._clone_image(volume, '', 1)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver' '._verify_gpfs_path_state') def test_clone_image_not_cloneable(self, mock_verify_gpfs_path_state, mock_is_cloneable): mock_is_cloneable.return_value = (False, 'test', self.images_dir) volume = self._fake_volume() self.assertEqual((None, False), self.driver._clone_image(volume, '', 1)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_is_gpfs_parent_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_clone_image_format_raw_copy_on_write(self, mock_verify_gpfs_path_state, mock_is_cloneable, mock_local_path, mock_is_gpfs_parent_file, mock_create_gpfs_snap, mock_qemu_img_info, mock_create_gpfs_copy, mock_set_rw_permission, mock_resize_volume_file): mock_is_cloneable.return_value = (True, 'test', self.images_dir) mock_local_path.return_value = self.volumes_path mock_is_gpfs_parent_file.return_value = False mock_qemu_img_info.return_value = self._fake_qemu_raw_image_info('') volume = self._fake_volume() org_value = self.driver.configuration.gpfs_images_share_mode self.override_config('gpfs_images_share_mode', 'copy_on_write', conf.SHARED_CONF_GROUP) self.assertEqual(({'provider_location': None}, True), self.driver._clone_image(volume, '', 1)) mock_create_gpfs_snap.assert_called_once_with(self.images_dir) self.override_config('gpfs_images_share_mode', org_value, conf.SHARED_CONF_GROUP) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('shutil.copyfile') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_is_gpfs_parent_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_volume_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_clone_image_format_raw_copy(self, mock_verify_gpfs_path_state, mock_is_cloneable, mock_get_volume_path, mock_is_gpfs_parent_file, mock_qemu_img_info, mock_copyfile, mock_set_rw_permission, mock_resize_volume_file): mock_is_cloneable.return_value = (True, 'test', self.images_dir) mock_get_volume_path.return_value = self.volumes_path mock_qemu_img_info.return_value = self._fake_qemu_raw_image_info('') volume = self._fake_volume() org_value = self.driver.configuration.gpfs_images_share_mode self.override_config('gpfs_images_share_mode', 'copy', conf.SHARED_CONF_GROUP) self.assertEqual(({'provider_location': None}, True), self.driver._clone_image(volume, '', 1)) mock_copyfile.assert_called_once_with(self.images_dir, self.volumes_path) self.override_config('gpfs_images_share_mode', org_value, conf.SHARED_CONF_GROUP) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_set_rw_permission') @mock.patch('cinder.image.image_utils.convert_image') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_clone_image_format_qcow2(self, mock_verify_gpfs_path_state, mock_is_cloneable, mock_local_path, mock_qemu_img_info, mock_conv_image, mock_set_rw_permission, mock_resize_volume_file): mock_is_cloneable.return_value = (True, 'test', self.images_dir) mock_local_path.return_value = self.volumes_path mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') volume = self._fake_volume() self.assertEqual(({'provider_location': None}, True), self.driver._clone_image(volume, '', 1)) mock_conv_image.assert_called_once_with(self.images_dir, self.volumes_path, 'raw') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.image.image_utils.fetch_to_raw') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_copy_image_to_volume(self, mock_verify_gpfs_path_state, mock_fetch_to_raw, mock_local_path, mock_resize_volume_file): volume = self._fake_volume() self.driver.copy_image_to_volume('', volume, '', 1) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.resize_image') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_resize_volume_file_ok(self, mock_local_path, mock_resize_image, mock_qemu_img_info): volume = self._fake_volume() mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') self.assertEqual(self._fake_qemu_qcow2_image_info('').virtual_size, self.driver._resize_volume_file(volume, 2000)) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.resize_image') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_resize_volume_file_fail(self, mock_local_path, mock_resize_image, mock_qemu_img_info): volume = self._fake_volume() mock_resize_image.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) mock_qemu_img_info.return_value = self._fake_qemu_qcow2_image_info('') self.assertRaises(exception.VolumeBackendAPIException, self.driver._resize_volume_file, volume, 2000) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') def test_extend_volume(self, mock_resize_volume_file): volume = self._fake_volume() self.driver.extend_volume(volume, 2000) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.image.image_utils.upload_volume') def test_copy_volume_to_image(self, mock_upload_volume, mock_local_path): volume = test_utils.create_volume( self.context, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) extra_specs = { 'image_service:store_id': 'fake-store' } test_utils.create_volume_type( self.context.elevated(), id=fake.VOLUME_TYPE_ID, name="test_type", extra_specs=extra_specs) self.driver.copy_volume_to_image('', volume, '', '') @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_volume_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_can_migrate_locally') def test_migrate_volume_ok(self, mock_local, volume_path, mock_exec): volume = self._fake_volume() host = {} host = {'host': 'foo', 'capabilities': {}} mock_local.return_value = (self.driver.configuration. gpfs_mount_point_base + '_') self.assertEqual((True, None), self.driver._migrate_volume(volume, host)) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_can_migrate_locally') def test_migrate_volume_fail_dest_path(self, mock_local, mock_exec): volume = self._fake_volume() host = {} host = {'host': 'foo', 'capabilities': {}} mock_local.return_value = None self.assertEqual((False, None), self.driver._migrate_volume(volume, host)) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_can_migrate_locally') def test_migrate_volume_fail_mpb(self, mock_local, mock_exec): volume = self._fake_volume() host = {} host = {'host': 'foo', 'capabilities': {}} mock_local.return_value = (self.driver.configuration. gpfs_mount_point_base) mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertEqual((True, None), self.driver._migrate_volume(volume, host)) @mock.patch('cinder.utils.execute') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_get_volume_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_can_migrate_locally') def test_migrate_volume_fail_mv(self, mock_local, mock_path, mock_exec): volume = self._fake_volume() host = {} host = {'host': 'foo', 'capabilities': {}} mock_local.return_value = ( self.driver.configuration.gpfs_mount_point_base + '_') mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertEqual((False, None), self.driver._migrate_volume(volume, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') def test_migrate_volume_ok_pub(self, mock_migrate_volume): self.driver.migrate_volume('', '', '') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_storage_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs._different') def test_retype_ok(self, mock_different, local_path, mock_strg_pool, mock_migrate_vol): ctxt = self.context (volume, new_type, diff, host) = self._fake_retype_arguments() self.driver.db = mock.Mock() mock_different.side_effect = [False, True, True] mock_strg_pool.return_value = True mock_migrate_vol.return_value = (True, True) self.assertTrue(self.driver.retype(ctxt, volume, new_type, diff, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_storage_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs._different') def test_retype_diff_backend(self, mock_different, mock_strg_pool, mock_migrate_vol): ctxt = self.context (volume, new_type, diff, host) = self._fake_retype_arguments() mock_different.side_effect = [True, True, True] self.assertFalse(self.driver.retype(ctxt, volume, new_type, diff, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_storage_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs._different') def test_retype_diff_pools_migrated(self, mock_different, mock_strg_pool, mock_migrate_vol): ctxt = self.context (volume, new_type, diff, host) = self._fake_retype_arguments() self.driver.db = mock.Mock() mock_different.side_effect = [False, False, True] mock_strg_pool.return_value = True mock_migrate_vol.return_value = (True, True) self.assertTrue(self.driver.retype(ctxt, volume, new_type, diff, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_storage_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs._different') def test_retype_diff_pools(self, mock_different, mock_strg_pool, mock_migrate_vol): ctxt = self.context (volume, new_type, diff, host) = self._fake_retype_arguments() mock_different.side_effect = [False, False, True] mock_strg_pool.return_value = True mock_migrate_vol.return_value = (False, False) self.assertFalse(self.driver.retype(ctxt, volume, new_type, diff, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_volume_storage_pool') @mock.patch('cinder.volume.drivers.ibm.gpfs._different') def test_retype_no_diff_hit(self, mock_different, mock_strg_pool, mock_migrate_vol): ctxt = self.context (volume, new_type, diff, host) = self._fake_retype_arguments() mock_different.side_effect = [False, False, False] self.assertFalse(self.driver.retype(ctxt, volume, new_type, diff, host)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.utils.execute') def test_mkfs_ok(self, mock_exec, local_path): volume = self._fake_volume() self.driver._mkfs(volume, 'swap') self.driver._mkfs(volume, 'swap', 'test') self.driver._mkfs(volume, 'ext3', 'test') self.driver._mkfs(volume, 'vfat', 'test') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') @mock.patch('cinder.utils.execute') def test_mkfs_fail_mk(self, mock_exec, local_path): volume = self._fake_volume() mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertRaises(exception.VolumeBackendAPIException, self.driver._mkfs, volume, 'swap', 'test') @mock.patch('cinder.utils.execute') def test_get_available_capacity_ok(self, mock_exec): mock_exec.return_value = ('Filesystem 1-blocks Used ' 'Available Capacity Mounted on\n' '/dev/gpfs 10737418240 544735232 ' '10192683008 6%% /gpfs0', '') self.assertEqual((10192683008, 10737418240), self.driver._get_available_capacity('/gpfs0')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') @mock.patch('cinder.utils.execute') def test_get_available_capacity_fail_mounted(self, mock_exec, mock_path_state): mock_path_state.side_effect = ( exception.VolumeBackendAPIException('test')) mock_exec.return_value = ('Filesystem 1-blocks Used ' 'Available Capacity Mounted on\n' '/dev/gpfs 10737418240 544735232 ' '10192683008 6%% /gpfs0', '') self.assertEqual((0, 0), self.driver._get_available_capacity('/gpfs0')) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') def test_verify_gpfs_path_state_ok(self, mock_is_gpfs_path): self.driver._verify_gpfs_path_state(self.images_dir) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path') def test_verify_gpfs_path_state_fail_path(self, mock_is_gpfs_path): mock_is_gpfs_path.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertRaises(exception.VolumeBackendAPIException, self.driver._verify_gpfs_path_state, self.images_dir) @mock.patch('cinder.utils.execute') def test_create_consistencygroup(self, mock_exec): ctxt = self.context group = self._fake_group() self.driver._create_consistencygroup(ctxt, group) fsdev = self.driver._gpfs_device cgname = "consisgroup-%s" % group['id'] cgpath = os.path.join(self.driver.configuration.gpfs_mount_point_base, cgname) cmd = ['mmcrfileset', fsdev, cgname, '--inode-space', 'new'] mock_exec.assert_any_call(*cmd) cmd = ['mmlinkfileset', fsdev, cgname, '-J', cgpath] mock_exec.assert_any_call(*cmd) cmd = ['chmod', '770', cgpath] mock_exec.assert_any_call(*cmd) @mock.patch('cinder.utils.execute') def test_create_consistencygroup_fail(self, mock_exec): ctxt = self.context group = self._fake_group() mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertRaises(exception.VolumeBackendAPIException, self.driver._create_consistencygroup, ctxt, group) @mock.patch('cinder.utils.execute') def test_delete_consistencygroup(self, mock_exec): ctxt = self.context group = self._fake_group() group['status'] = fields.ConsistencyGroupStatus.AVAILABLE volume = self._fake_volume() volume['status'] = 'available' volumes = [] volumes.append(volume) self.driver.db = mock.Mock() self.driver.db.volume_get_all_by_group = mock.Mock() self.driver.db.volume_get_all_by_group.return_value = volumes self.driver._delete_consistencygroup(ctxt, group, []) fsdev = self.driver._gpfs_device cgname = "consisgroup-%s" % group['id'] cmd = ['mmlsfileset', fsdev, cgname] mock_exec.assert_any_call(*cmd) cmd = ['mmunlinkfileset', fsdev, cgname, '-f'] mock_exec.assert_any_call(*cmd) cmd = ['mmdelfileset', fsdev, cgname, '-f'] mock_exec.assert_any_call(*cmd) @mock.patch('cinder.utils.execute') def test_delete_consistencygroup_no_fileset(self, mock_exec): ctxt = self.context group = self._fake_group() group['status'] = fields.ConsistencyGroupStatus.AVAILABLE volume = self._fake_volume() volume['status'] = 'available' volumes = [] volumes.append(volume) self.driver.db = mock.Mock() self.driver.db.volume_get_all_by_group = mock.Mock() self.driver.db.volume_get_all_by_group.return_value = volumes mock_exec.side_effect = ( processutils.ProcessExecutionError(exit_code=2)) self.driver._delete_consistencygroup(ctxt, group, []) fsdev = self.driver._gpfs_device cgname = "consisgroup-%s" % group['id'] cmd = ['mmlsfileset', fsdev, cgname] mock_exec.assert_called_once_with(*cmd) @mock.patch('cinder.utils.execute') def test_delete_consistencygroup_fail(self, mock_exec): ctxt = self.context group = self._fake_group() group['status'] = fields.ConsistencyGroupStatus.AVAILABLE self.driver.db = mock.Mock() self.driver.db.volume_get_all_by_group = mock.Mock() self.driver.db.volume_get_all_by_group.return_value = [] mock_exec.side_effect = ( processutils.ProcessExecutionError(stdout='test', stderr='test')) self.assertRaises(exception.VolumeBackendAPIException, self.driver._delete_consistencygroup, ctxt, group, []) def test_update_consistencygroup(self): ctxt = self.context group = self._fake_group() self.assertRaises(gpfs.GPFSDriverUnsupportedOperation, self.driver._update_consistencygroup, ctxt, group) def test_create_consisgroup_from_src(self): ctxt = self.context group = self._fake_group() self.assertRaises(gpfs.GPFSDriverUnsupportedOperation, self.driver._create_consistencygroup_from_src, ctxt, group, []) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot') def test_create_cgsnapshot(self, mock_create_snap): ctxt = self.context cgsnap = self._fake_cgsnapshot() snapshot1 = self._fake_snapshot() model_update, snapshots = self.driver._create_cgsnapshot(ctxt, cgsnap, [snapshot1]) self.driver.create_snapshot.assert_called_once_with(snapshot1) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) self.assertEqual({'id': snapshot1.id, 'status': fields.SnapshotStatus.AVAILABLE}, snapshots[0]) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot') def test_create_cgsnapshot_empty(self, mock_create_snap): ctxt = self.context cgsnap = self._fake_cgsnapshot() model_update, snapshots = self.driver._create_cgsnapshot(ctxt, cgsnap, []) self.assertFalse(self.driver.create_snapshot.called) self.assertEqual({'status': fields.ConsistencyGroupStatus.AVAILABLE}, model_update) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot') def test_delete_cgsnapshot(self, mock_delete_snap): ctxt = self.context cgsnap = self._fake_cgsnapshot() snapshot1 = self._fake_snapshot() model_update, snapshots = self.driver._delete_cgsnapshot(ctxt, cgsnap, [snapshot1]) self.driver.delete_snapshot.assert_called_once_with(snapshot1) self.assertEqual({'status': fields.ConsistencyGroupStatus.DELETED}, model_update) self.assertEqual({'id': snapshot1.id, 'status': fields.SnapshotStatus.DELETED}, snapshots[0]) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot') def test_delete_cgsnapshot_empty(self, mock_delete_snap): ctxt = self.context cgsnap = self._fake_cgsnapshot() model_update, snapshots = self.driver._delete_cgsnapshot(ctxt, cgsnap, []) self.assertFalse(self.driver.delete_snapshot.called) self.assertEqual({'status': fields.ConsistencyGroupStatus.DELETED}, model_update) def test_local_path_volume_not_in_cg(self): volume = self._fake_volume() volume['group_id'] = None volume_path = os.path.join( self.driver.configuration.gpfs_mount_point_base, volume['name'] ) ret = self.driver.local_path(volume) self.assertEqual(volume_path, ret) @mock.patch('cinder.db.get_by_id') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_local_path_volume_in_cg(self, mock_group_cg_snapshot_type, mock_group_obj): mock_group_cg_snapshot_type.return_value = True volume = self._fake_volume() group = self._fake_group() mock_group_obj.return_value = group cgname = "consisgroup-%s" % volume['group_id'] volume_path = os.path.join( self.driver.configuration.gpfs_mount_point_base, cgname, volume['name'] ) ret = self.driver.local_path(volume) self.assertEqual(volume_path, ret) @mock.patch('cinder.context.get_admin_context') @mock.patch('cinder.objects.volume.Volume.get_by_id') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path') def test_get_snapshot_path(self, mock_local_path, mock_vol_get_by_id, mock_admin_context): volume = self._fake_volume() mock_vol_get_by_id.return_value = volume volume_path = self.volumes_path mock_local_path.return_value = volume_path snapshot = self._fake_snapshot() ret = self.driver._get_snapshot_path(snapshot) self.assertEqual( os.path.join(os.path.dirname(volume_path), snapshot.name), ret ) @mock.patch('cinder.utils.execute') def test_gpfs_full_copy(self, mock_exec): src = "/tmp/vol1" dest = "/tmp/vol2" self.driver._gpfs_full_copy(src, dest) mock_exec.assert_called_once_with('cp', src, dest, check_exit_code=True) def _fake_volume(self): volume = {} volume['id'] = fake.VOLUME_ID volume['display_name'] = 'test' volume['metadata'] = {'key1': 'val1'} volume['_name_id'] = None volume['size'] = 1000 volume['group_id'] = fake.CONSISTENCY_GROUP_ID return objects.Volume(self.context, **volume) def _fake_snapshot(self): snapshot = {} snapshot['id'] = fake.SNAPSHOT_ID snapshot['display_name'] = 'test-snap' snapshot['volume_size'] = 1000 snapshot['volume_id'] = fake.VOLUME_ID snapshot['status'] = 'available' snapshot['snapshot_metadata'] = [] return objects.Snapshot(context=self.context, **snapshot) def _fake_volume_in_cg(self): volume = self._fake_volume() volume.group_id = fake.CONSISTENCY_GROUP_ID return volume def _fake_group(self): group = {} group['name'] = 'test_group' group['id'] = fake.CONSISTENCY_GROUP_ID group['user_id'] = fake.USER_ID group['group_type_id'] = fake.GROUP_TYPE_ID group['project_id'] = fake.PROJECT_ID return objects.Group(self.context, **group) def _fake_cgsnapshot(self): snapshot = self._fake_snapshot() snapshot.group_id = fake.CONSISTENCY_GROUP_ID return snapshot def _fake_qemu_qcow2_image_info(self, path): data = FakeQemuImgInfo() data.file_format = 'qcow2' data.backing_file = None data.virtual_size = 1 * units.Gi return data def _fake_qemu_raw_image_info(self, path): data = FakeQemuImgInfo() data.file_format = 'raw' data.backing_file = None data.virtual_size = 1 * units.Gi return data def _fake_retype_arguments(self): ctxt = self.context loc = 'GPFSDriver:%s:testpath' % self.driver._cluster_id cap = {'location_info': loc} host = {'host': 'foo', 'capabilities': cap} key_specs_old = {'capabilities:storage_pool': 'bronze', 'volume_backend_name': 'backend1'} key_specs_new = {'capabilities:storage_pool': 'gold', 'volume_backend_name': 'backend1'} old_type_ref = volume_types.create(ctxt, 'old', key_specs_old) new_type_ref = volume_types.create(ctxt, 'new', key_specs_new) volume_types.get_volume_type(ctxt, old_type_ref['id']) new_type = volume_types.get_volume_type(ctxt, new_type_ref['id']) diff, _equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'], new_type_ref['id']) volume = self._fake_volume() volume['host'] = 'foo' return (volume, new_type, diff, host) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group(self, mock_cg_snapshot_type): mock_cg_snapshot_type.return_value = False ctxt = self.context group = self._fake_group() self.assertRaises( NotImplementedError, self.driver.create_group, ctxt, group ) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_consistencygroup') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_cg(self, mock_cg_snapshot_type, mock_consisgroup_create): mock_cg_snapshot_type.return_value = True ctxt = self.context group = self._fake_group() self.driver.create_group(ctxt, group) mock_consisgroup_create.assert_called_once_with(ctxt, group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group(self, mock_cg_snapshot_type): mock_cg_snapshot_type.return_value = False ctxt = self.context group = self._fake_group() volumes = [] self.assertRaises( NotImplementedError, self.driver.delete_group, ctxt, group, volumes ) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_delete_consistencygroup') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_cg(self, mock_cg_snapshot_type, mock_consisgroup_delete): mock_cg_snapshot_type.return_value = True ctxt = self.context group = self._fake_group() volumes = [] self.driver.delete_group(ctxt, group, volumes) mock_consisgroup_delete.assert_called_once_with(ctxt, group, volumes) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_update_group(self, mock_cg_snapshot_type): mock_cg_snapshot_type.return_value = False ctxt = self.context group = self._fake_group() self.assertRaises( NotImplementedError, self.driver.update_group, ctxt, group ) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_update_consistencygroup') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_update_group_cg(self, mock_cg_snapshot_type, mock_consisgroup_update): mock_cg_snapshot_type.return_value = True ctxt = self.context group = self._fake_group() self.driver.update_group(ctxt, group) mock_consisgroup_update.assert_called_once_with(ctxt, group, None, None) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_snapshot(self, mock_cg_snapshot_type): mock_cg_snapshot_type.return_value = False ctxt = self.context group_snapshot = mock.MagicMock() snapshots = [mock.Mock()] self.assertRaises( NotImplementedError, self.driver.create_group_snapshot, ctxt, group_snapshot, snapshots ) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_cgsnapshot') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_snapshot_cg(self, mock_cg_snapshot_type, mock_cgsnapshot_create): mock_cg_snapshot_type.return_value = True ctxt = self.context group_snapshot = mock.MagicMock() snapshots = [mock.Mock()] self.driver.create_group_snapshot(ctxt, group_snapshot, snapshots) mock_cgsnapshot_create.assert_called_once_with(ctxt, group_snapshot, snapshots) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_snapshot(self, mock_cg_snapshot_type): mock_cg_snapshot_type.return_value = False ctxt = self.context group_snapshot = mock.MagicMock() snapshots = [mock.Mock()] self.assertRaises( NotImplementedError, self.driver.delete_group_snapshot, ctxt, group_snapshot, snapshots ) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_delete_cgsnapshot') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_snapshot_cg(self, mock_cg_snapshot_type, mock_cgsnapshot_delete): mock_cg_snapshot_type.return_value = True ctxt = self.context group_snapshot = mock.MagicMock() snapshots = [mock.Mock()] self.driver.delete_group_snapshot(ctxt, group_snapshot, snapshots) mock_cgsnapshot_delete.assert_called_once_with(ctxt, group_snapshot, snapshots) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_from_src(self, mock_cg_snapshot_type): mock_cg_snapshot_type.return_value = False ctxt = self.context group = self._fake_group() volumes = [] self.assertRaises( NotImplementedError, self.driver.create_group_from_src, ctxt, group, volumes ) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_consistencygroup_from_src') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_from_src_cg(self, mock_cg_snapshot_type, mock_cg_clone_create): mock_cg_snapshot_type.return_value = True ctxt = self.context group = self._fake_group() volumes = [] self.driver.create_group_from_src(ctxt, group, volumes) mock_cg_clone_create.assert_called_once_with(ctxt, group, volumes, None, None, None, None) class GPFSRemoteDriverTestCase(test.TestCase): """Unit tests for GPFSRemoteDriver class""" @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSRemoteDriver.' '_get_active_gpfs_node_ip') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSRemoteDriver.' '_run_ssh') def test_gpfs_remote_execute(self, mock_run_ssh, mock_active_gpfs_ip): configuration = conf.Configuration(None) self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) self.driver._gpfs_remote_execute('test', check_exit_code=True) expected = [mock.call(('test',), True)] self.assertEqual(expected, mock_run_ssh.mock_calls) @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) @mock.patch('os.path.isfile', return_value=True) @mock.patch('builtins.open') @mock.patch('os.path.expanduser') @mock.patch('paramiko.RSAKey.from_private_key_file') @mock.patch('oslo_concurrency.processutils.ssh_execute') def test_get_active_gpfs_node_ip(self, mock_ssh_execute, mock_pkey_file, mock_path, mock_open, mock_isfile): configuration = conf.Configuration(None) configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] configuration.gpfs_mount_point_base = '/gpfs' configuration.gpfs_private_key = '/test/fake_private_key' mmgetstate_fake_out = "mmgetstate::state:\nmmgetstate::active:" mock_ssh_execute.side_effect = [(mmgetstate_fake_out, ''), ('', '')] self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) san_ip = self.driver._get_active_gpfs_node_ip() self.assertEqual('10.0.0.1', san_ip) @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) @mock.patch('os.path.isfile', return_value=True) @mock.patch('builtins.open') @mock.patch('os.path.expanduser') @mock.patch('paramiko.RSAKey.from_private_key_file') @mock.patch('oslo_concurrency.processutils.ssh_execute') def test_get_active_gpfs_node_ip_with_password(self, mock_ssh_execute, mock_pkey_file, mock_path, mock_open, mock_isfile): configuration = conf.Configuration(None) configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] configuration.gpfs_mount_point_base = '/gpfs' configuration.gpfs_user_password = 'FakePassword' mmgetstate_fake_out = "mmgetstate::state:\nmmgetstate::active:" mock_ssh_execute.side_effect = [(mmgetstate_fake_out, ''), ('', '')] self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) san_ip = self.driver._get_active_gpfs_node_ip() self.assertEqual('10.0.0.1', san_ip) @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) @mock.patch('os.path.isfile', return_value=True) @mock.patch('builtins.open') def test_get_active_gpfs_node_ip_missing_key_and_password(self, mock_open, mock_isfile): configuration = conf.Configuration(None) configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] configuration.gpfs_mount_point_base = '/gpfs' self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) self.assertRaises(exception.VolumeDriverException, self.driver._get_active_gpfs_node_ip) @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) @mock.patch('os.path.isfile', return_value=True) @mock.patch('builtins.open') @mock.patch('os.path.expanduser') @mock.patch('paramiko.RSAKey.from_private_key_file') @mock.patch('oslo_concurrency.processutils.ssh_execute') def test_get_active_gpfs_node_ip_second(self, mock_ssh_execute, mock_pkey_file, mock_path, mock_open, mock_isfile): configuration = conf.Configuration(None) configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] configuration.gpfs_mount_point_base = '/gpfs' configuration.gpfs_private_key = '/test/fake_private_key' mmgetstate_active_fake_out = "mmgetstate::state:\nmmgetstate::active:" mmgetstate_down_fake_out = "mmgetstate::state:\nmmgetstate::down:" mock_ssh_execute.side_effect = [(mmgetstate_down_fake_out, ''), (mmgetstate_active_fake_out, ''), ('', '')] self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) san_ip = self.driver._get_active_gpfs_node_ip() self.assertEqual('10.0.0.2', san_ip) @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) def test_missing_ssh_host_key_config(self): configuration = conf.Configuration(None) configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] configuration.gpfs_hosts_key_file = None self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) self.assertRaises(exception.ParameterNotFound, self.driver._get_active_gpfs_node_ip) @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) @mock.patch('os.path.isfile', return_value=False) def test_init_missing_ssh_host_key_file(self, mock_is_file): configuration = conf.Configuration(None) configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] configuration.gpfs_hosts_key_file = '/test' self.flags(state_path='/var/lib/cinder') self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) self.assertRaises(exception.InvalidInput, self.driver._get_active_gpfs_node_ip) @mock.patch('paramiko.SSHClient', new=mock.MagicMock()) @mock.patch('os.path.isfile', return_value=True) @mock.patch('builtins.open') @mock.patch('os.path.expanduser') @mock.patch('paramiko.RSAKey.from_private_key_file') @mock.patch('oslo_concurrency.processutils.ssh_execute') def test_get_active_gpfs_node_ip_exception(self, mock_ssh_execute, mock_pkey_file, mock_path, mock_open, mock_isfile): configuration = conf.Configuration(None) configuration.gpfs_hosts = ['10.0.0.1', '10.0.0.2'] configuration.gpfs_mount_point_base = '/gpfs' configuration.gpfs_private_key = "/test/fake_private_key" mmgetstate_down_fake_out = "mmgetstate::state:\nmmgetstate::down:" mock_ssh_execute.side_effect = [(mmgetstate_down_fake_out, ''), processutils.ProcessExecutionError( stderr='test')] self.driver = gpfs.GPFSRemoteDriver(configuration=configuration) self.assertRaises(exception.VolumeBackendAPIException, self.driver._get_active_gpfs_node_ip) class GPFSNFSDriverTestCase(test.TestCase): driver_name = "cinder.volume.drivers.gpfs.GPFSNFSDriver" TEST_NFS_EXPORT = 'nfs-host1:/export' TEST_SIZE_IN_GB = 1 TEST_EXTEND_SIZE_IN_GB = 2 TEST_MNT_POINT = '/mnt/nfs' TEST_MNT_POINT_BASE = '/mnt' TEST_GPFS_MNT_POINT_BASE = '/export' TEST_LOCAL_PATH = '/mnt/nfs/volume-123' TEST_VOLUME_PATH = '/export/volume-123' TEST_SNAP_PATH = '/export/snapshot-123' def _execute_wrapper(self, cmd, *args, **kwargs): try: kwargs.pop('run_as_root') except KeyError: pass return utils.execute(cmd, *args, **kwargs) def _fake_volume(self): volume = {} volume['id'] = fake.VOLUME_ID volume['display_name'] = 'test' volume['metadata'] = {'key1': 'val1'} volume['_name_id'] = None volume['size'] = 1000 volume['group_id'] = fake.CONSISTENCY_GROUP_ID return objects.Volume(self.context, **volume) def _fake_group(self): group = {} group['name'] = 'test_group' group['id'] = fake.CONSISTENCY_GROUP_ID group['user_id'] = fake.USER_ID group['group_type_id'] = fake.GROUP_TYPE_ID group['project_id'] = fake.PROJECT_ID return objects.Group(self.context, **group) def _fake_snapshot(self): snapshot = {} snapshot['id'] = '12345' snapshot['name'] = 'test-snap' snapshot['volume_size'] = 1000 snapshot['volume_id'] = '123456' snapshot['status'] = 'available' return snapshot def setUp(self): super(GPFSNFSDriverTestCase, self).setUp() self.driver = gpfs.GPFSNFSDriver(configuration=conf. Configuration(None)) self.driver.gpfs_execute = self._execute_wrapper self.context = context.get_admin_context() self.context.user_id = 'fake' self.context.project_id = 'fake' @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_run_ssh') def test_gpfs_remote_execute(self, mock_run_ssh): mock_run_ssh.return_value = 'test' self.driver._gpfs_remote_execute('test', check_exit_code=True) expected = [mock.call(('test',), True)] self.assertEqual(expected, mock_run_ssh.mock_calls) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_ensure_shares_mounted') def test_update_volume_stats(self, mock_ensure): """Check update volume stats.""" mock_ensure.return_value = True fake_avail = 80 * units.Gi fake_size = 2 * fake_avail fake_used = 10 * units.Gi with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_get_capacity_info', return_value=(fake_avail, fake_size, fake_used)): stats = self.driver.get_volume_stats() self.assertEqual('GPFSNFS', stats['volume_backend_name']) self.assertEqual('file', stats['storage_protocol']) stats = self.driver.get_volume_stats(True) self.assertEqual('GPFSNFS', stats['volume_backend_name']) self.assertEqual('file', stats['storage_protocol']) @mock.patch('cinder.db.get_by_id') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_get_volume_path(self, mock_group_cg_snapshot_type, mock_group): mock_group_cg_snapshot_type.return_value = True self.driver.configuration.gpfs_mount_point_base = ( self.TEST_GPFS_MNT_POINT_BASE) volume = self._fake_volume() group = self._fake_group() mock_group.return_value = group volume_path_in_cg = os.path.join(self.TEST_GPFS_MNT_POINT_BASE, 'consisgroup-' + fake.CONSISTENCY_GROUP_ID, 'volume-' + fake.VOLUME_ID) self.assertEqual(volume_path_in_cg, self.driver._get_volume_path(volume)) volume.group_id = None volume_path = os.path.join(self.TEST_GPFS_MNT_POINT_BASE, 'volume-' + fake.VOLUME_ID) self.assertEqual(volume_path, self.driver._get_volume_path(volume)) @mock.patch('cinder.db.get_by_id') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_get_mount_point_for_share') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_find_share') def test_local_path(self, mock_find_share, mock_mount_point, mock_group_cg_snapshot_type, mock_group): mock_mount_point.return_value = self.TEST_MNT_POINT_BASE mock_group_cg_snapshot_type.return_value = True volume = self._fake_volume() group = self._fake_group() mock_group.return_value = group mock_find_share.return_value = self.TEST_VOLUME_PATH local_volume_path_in_cg = os.path.join(self.TEST_MNT_POINT_BASE, 'consisgroup-' + fake.CONSISTENCY_GROUP_ID, 'volume-' + fake.VOLUME_ID) self.assertEqual(local_volume_path_in_cg, self.driver.local_path(volume)) volume.group_id = None local_volume_path = os.path.join(self.TEST_MNT_POINT_BASE, 'volume-' + fake.VOLUME_ID) self.assertEqual(local_volume_path, self.driver.local_path(volume)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_get_volume_path') def test_get_snapshot_path(self, mock_volume_path): volume = self._fake_volume() self.driver.db = mock.Mock() self.driver.db.volume_get = mock.Mock() self.driver.db.volume_get.return_value = volume mock_volume_path.return_value = os.path.join(self. TEST_GPFS_MNT_POINT_BASE, volume['name']) snapshot = self._fake_snapshot() self.assertEqual('/export/test-snap', self.driver._get_snapshot_path(snapshot)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_find_share') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' 'create_volume') def test_create_volume(self, mock_create_volume, mock_find_share): volume = self._fake_volume() mock_find_share.return_value = self.TEST_VOLUME_PATH self.assertEqual({'provider_location': self.TEST_VOLUME_PATH}, self.driver.create_volume(volume)) @mock.patch('os.path.dirname') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_delete_gpfs_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' 'local_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_get_volume_path') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_verify_gpfs_path_state') def test_delete_volume(self, mock_verify_gpfs_path_state, mock_volume_path, mock_local_path, mock_delete_gpfs_file, mock_dirname): mock_dirname.return_value = '/a/dir/' self.driver.delete_volume('') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' 'delete_snapshot') def test_delete_snapshot(self, mock_delete_snapshot): self.driver.delete_snapshot('') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_find_share') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_volume_from_snapshot') def test_create_volume_from_snapshot(self, mock_create_volume_from_snapshot, mock_find_share, mock_resize_volume_file): volume = self._fake_volume() snapshot = self._fake_snapshot() mock_find_share.return_value = self.TEST_VOLUME_PATH self.assertEqual({'provider_location': self.TEST_VOLUME_PATH}, self.driver.create_volume_from_snapshot(volume, snapshot)) @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_resize_volume_file') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver.' '_find_share') @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.' '_create_cloned_volume') def test_create_cloned_volume(self, mock_create_cloned_volume, mock_find_share, mock_resize_volume_file): volume = self._fake_volume() src_vref = self._fake_volume() mock_find_share.return_value = self.TEST_VOLUME_PATH self.assertEqual({'provider_location': self.TEST_VOLUME_PATH}, self.driver.create_cloned_volume(volume, src_vref)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_hedvig.py0000664000175000017500000002324500000000000024326 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Hedvig Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from cinder import context from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.hedvig import hedvig_cinder as hdvg from cinder.volume import qos_specs from cinder.volume import volume_types def _fake_volume_type(*args, **kwargs): ctxt = context.get_admin_context() type_ref = volume_types.create(ctxt, "qos_extra_specs", {}) qos_ref = qos_specs.create(ctxt, 'qos-specs', {}) qos_specs.associate_qos_with_type(ctxt, qos_ref['id'], type_ref['id']) qos_type = volume_types.get_volume_type(ctxt, type_ref['id']) return qos_type def _fake_volume(*args, **kwargs): qos_type = _fake_volume_type() return fake_volume.fake_volume_obj(context, name='hedvig', volume_type_id=qos_type['id'], volume_type=qos_type, volume_name='hedvig', display_name='hedvig', display_description='test volume', size=2) class HedvigDriverTest(test.TestCase): def setUp(self): super(HedvigDriverTest, self).setUp() self.context = context.get_admin_context() self._create_fake_config() self.assertIsNone(self.driver.do_setup(self.ctxt)) def _create_fake_config(self): self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.san_ip = '1.0.0.1' self.configuration.san_login = 'dummy_user' self.configuration.san_password = 'dummy_password' self.configuration.san_clustername = 'dummy_cluster' self.configuration.san_is_local = False self.ctxt = context.get_admin_context() self.vol = fake_volume.fake_volume_obj(self.context) self.vol.volume_type = fake_volume.fake_volume_type_obj(self.context) self.snap = fake_snapshot.fake_snapshot_obj(self.context) self.snap.volume = self.vol self.driver = hdvg.HedvigISCSIDriver(configuration=self.configuration) @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.create_vdisk') def test_create_volume(self, *args, **keywargs): result = self.driver.create_volume(self.vol) self.assertIsNone(result) def test_create_volume_negative(self, *args, **keywargs): self.driver.hrs = exception.VolumeDriverException() self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, self.vol) @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.delete_vdisk') def test_create_delete_volume(self, *args, **keywargs): result = self.driver.delete_volume(self.vol) self.assertIsNone(result) def test_create_delete_volume_negative(self, *args, **keywargs): self.driver.hrs = exception.VolumeDriverException() self.assertRaises(exception.VolumeDriverException, self.driver.delete_volume, self.vol) @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.resize_vdisk') def test_extend_volume(self, *args, **keywargs): self.assertIsNone(self.driver.extend_volume(self.vol, 10)) def test_extend_volume_negative(self, *args, **keywargs): self.driver.hrs = exception.VolumeDriverException() self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, self.vol, 10) @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.resize_vdisk') def test_extend_volume_shrinking(self, *args, **keywargs): volume = _fake_volume() self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, volume, 1) @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.clone_vdisk') def test_create_cloned_volume(self, *args, **keywargs): result = self.driver.create_cloned_volume(self.vol, self.vol) self.assertIsNone(result) def test_create_cloned_volume_negative(self, *args, **keywargs): self.driver.hrs = exception.VolumeDriverException() self.assertRaises(exception.VolumeDriverException, self.driver.create_cloned_volume, self.vol, self.vol) @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.create_snapshot') def test_create_snapshot(self, *args, **keywargs): result = self.driver.create_snapshot(self.snap) self.assertIsNone(result) def test_create_snapshot_negative(self, *args, **keywargs): self.driver.hrs = exception.VolumeDriverException() self.assertRaises(exception.VolumeDriverException, self.driver.create_snapshot, self.snap) @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.delete_snapshot') def test_delete_snapshot(self, *args, **keywargs): result = self.driver.delete_snapshot(self.snap) self.assertIsNone(result) def test_delete_snapshot_negative(self, *args, **keywargs): self.driver.hrs = exception.VolumeDriverException() self.assertRaises(exception.VolumeDriverException, self.driver.delete_snapshot, self.snap) @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.clone_hedvig_snapshot') def test_create_volume_from_snapshot(self, *args, **keywargs): result = self.driver.create_volume_from_snapshot(self.vol, self.snap) self.assertIsNone(result) def test_create_volume_from_snapshot_negative(self, *args, **keywargs): self.driver.hrs = exception.VolumeDriverException() self.assertRaises(exception.VolumeDriverException, self.driver.create_volume_from_snapshot, self.vol, self.snap) def test_do_setup(self): self.driver.do_setup(self.context) def test_do_setup_san_ip_negative(self): self.configuration.san_ip = None # check the driver for setup errors self.assertRaises(exception.VolumeDriverException, self.driver.do_setup, self.context) self.configuration.san_ip = "1.0.0.1" def test_do_setup_san_cluster_negative(self): self.configuration.san_clustername = None # check the driver for setup errors self.assertRaises(exception.VolumeDriverException, self.driver.do_setup, self.context) self.configuration.san_clustername = "dummy_cluster" def test_do_setup_san_login_negative(self): self.configuration.san_login = None # check the driver for setup errors self.assertRaises(exception.VolumeDriverException, self.driver.do_setup, self.context) self.configuration.san_login = "dummy_user" def test_do_setup_san_password_negative(self): self.configuration.san_password = None # check the driver for setup errors self.assertRaises(exception.VolumeDriverException, self.driver.do_setup, self.context) self.configuration.san_password = "dummy_password" @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.list_targets') def test_hedvig_lookup_tgt(self, *args, **keywargs): host = "hostname" result = self.driver.hedvig_lookup_tgt(host) self.assertIsNone(result) def test_hedvig_lookup_tgt_negative(self, *args, **keywargs): host = "hostname" self.driver.hrs = exception.VolumeDriverException() self.assertRaises(exception.VolumeDriverException, self.driver.hedvig_lookup_tgt, host) def test_hedvig_get_lun_negative(self, *args, **keywargs): host = "hostname" volname = "volume" self.driver.hrs = exception.VolumeDriverException() self.assertRaises(exception.VolumeDriverException, self.driver.hedvig_get_lun, host, volname) @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.get_iqn') def test_hedvig_get_iqn(self, *args, **keywargs): host = "hostname" result = self.driver.hedvig_get_iqn(host) self.assertIsNotNone(result) def test_hedvig_get_iqn_negative(self, *args, **keywargs): host = "hostname" self.driver.hrs = exception.VolumeDriverException() self.assertRaises(exception.VolumeDriverException, self.driver.hedvig_get_iqn, host) @mock.patch('cinder.volume.drivers.hedvig.rest_client.RestClient' '.list_targets') def test_terminate_connection_no_connector(self, *args, **keywargs): self.assertIsNone(self.driver. terminate_connection(_fake_volume(), None)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_infinidat.py0000664000175000017500000022255000000000000025025 0ustar00zuulzuul00000000000000# Copyright 2022 Infinidat Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for INFINIDAT InfiniBox volume driver.""" import collections import copy import functools import itertools import platform import socket from unittest import mock import uuid import ddt from oslo_utils import units from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder import version from cinder.volume import configuration from cinder.volume.drivers import infinidat TEST_LUN = 1 TEST_WWN_1 = '00:11:22:33:44:55:66:77' TEST_WWN_2 = '11:11:22:33:44:55:66:77' TEST_IP_ADDRESS1 = '1.1.1.1' TEST_IP_ADDRESS2 = '2.2.2.2' TEST_IP_ADDRESS3 = '3.3.3.3' TEST_IP_ADDRESS4 = '4.4.4.4' TEST_INITIATOR_IQN = 'iqn.2012-07.org.initiator:01' TEST_INITIATOR_IQN2 = 'iqn.2012-07.org.initiator:02' TEST_TARGET_IQN = 'iqn.2012-07.org.target:01' TEST_ISCSI_TCP_PORT1 = 3261 TEST_ISCSI_TCP_PORT2 = 3262 TEST_ISCSI_NAMESPACE1 = 'netspace1' TEST_ISCSI_NAMESPACE2 = 'netspace2' TEST_TARGET_PORTAL1 = '{}:{}'.format(TEST_IP_ADDRESS1, TEST_ISCSI_TCP_PORT1) TEST_TARGET_PORTAL2 = '{}:{}'.format(TEST_IP_ADDRESS2, TEST_ISCSI_TCP_PORT1) TEST_TARGET_PORTAL3 = '{}:{}'.format(TEST_IP_ADDRESS3, TEST_ISCSI_TCP_PORT2) TEST_TARGET_PORTAL4 = '{}:{}'.format(TEST_IP_ADDRESS4, TEST_ISCSI_TCP_PORT2) TEST_FC_PROTOCOL = 'fc' TEST_ISCSI_PROTOCOL = 'iscsi' TEST_VOLUME_SOURCE_NAME = 'test-volume' TEST_VOLUME_TYPE = 'MASTER' TEST_VOLUME_SOURCE_ID = 12345 TEST_VOLUME_METADATA = {'cinder_id': fake.VOLUME_ID} TEST_SNAPSHOT_SOURCE_NAME = 'test-snapshot' TEST_SNAPSHOT_SOURCE_ID = 67890 TEST_SNAPSHOT_METADATA = {'cinder_id': fake.SNAPSHOT_ID} TEST_POOL_NAME = 'pool' TEST_POOL_NAME2 = 'pool2' TEST_SYSTEM_SERIAL = 123 TEST_SYSTEM_SERIAL2 = 456 test_volume = mock.Mock(id=fake.VOLUME_ID, name_id=fake.VOLUME_ID, size=1, volume_type_id=fake.VOLUME_TYPE_ID, group_id=None, multiattach=False, volume_attachment=None) test_volume2 = mock.Mock(id=fake.VOLUME2_ID, name_id=fake.VOLUME2_ID, size=1, volume_type_id=None, group_id=None, multiattach=False, volume_attachment=None) test_volume3 = mock.Mock(id=fake.VOLUME3_ID, name_id=fake.VOLUME3_ID, size=1, volume_type_id=fake.VOLUME_TYPE_ID, group_id=fake.GROUP_ID, multiattach=True, volume_attachment=None) test_snapshot = mock.Mock(id=fake.SNAPSHOT_ID, volume=test_volume) test_clone = mock.Mock(id=fake.VOLUME4_ID, name_id=fake.VOLUME4_ID, size=1, volume_type_id=fake.VOLUME_TYPE_ID, group_id=None, multiattach=False, volume_attachment=None) test_group = mock.Mock(id=fake.GROUP_ID) test_snapgroup = mock.Mock(id=fake.GROUP_SNAPSHOT_ID, group=test_group) test_connector = dict(wwpns=[TEST_WWN_1], initiator=TEST_INITIATOR_IQN) test_connector2 = dict(wwpns=[TEST_WWN_2], initiator=TEST_INITIATOR_IQN2) test_connector3 = dict(wwpns=None, initiator=None) test_attachment1 = mock.Mock(connector=test_connector) test_attachment2 = mock.Mock(connector=test_connector2) test_attachment3 = mock.Mock(connector=None) def skip_driver_setup(func): @functools.wraps(func) def f(*args, **kwargs): return func(*args, **kwargs) f.__skip_driver_setup = True return f class FakeInfinisdkException(Exception): pass class InfiniboxDriverTestCaseBase(test.TestCase): def _test_skips_driver_setup(self): test_method_name = self.id().split('.')[-1] test_method = getattr(self, test_method_name) return getattr(test_method, '__skip_driver_setup', False) def setUp(self): super(InfiniboxDriverTestCaseBase, self).setUp() self.configuration = configuration.Configuration(None) self.configuration.append_config_values(infinidat.infinidat_opts) self.override_config('san_ip', 'infinibox', configuration.SHARED_CONF_GROUP) self.override_config('san_login', 'user', configuration.SHARED_CONF_GROUP) self.override_config('san_password', 'password', configuration.SHARED_CONF_GROUP) self.override_config('infinidat_pool_name', TEST_POOL_NAME) self.driver = infinidat.InfiniboxVolumeDriver( configuration=self.configuration) self._system = self._infinibox_mock() # mock external library dependencies infinisdk = self.patch("cinder.volume.drivers.infinidat.infinisdk") capacity = self.patch("cinder.volume.drivers.infinidat.capacity") self._log = self.patch("cinder.volume.drivers.infinidat.LOG") self._iqn = self.patch("cinder.volume.drivers.infinidat.iqn") self._wwn = self.patch("cinder.volume.drivers.infinidat.wwn") self._wwn.WWN = mock.Mock self._iqn.IQN = mock.Mock capacity.byte = 1 capacity.GiB = units.Gi infinisdk.core.exceptions.InfiniSDKException = FakeInfinisdkException infinisdk.InfiniBox.return_value = self._system if not self._test_skips_driver_setup(): self.driver.do_setup(None) def _infinibox_mock(self): result = mock.Mock() self._mock_volume = mock.Mock() self._mock_new_volume = mock.Mock() self._mock_volume.get_id.return_value = TEST_VOLUME_SOURCE_ID self._mock_volume.get_name.return_value = TEST_VOLUME_SOURCE_NAME self._mock_volume.get_type.return_value = TEST_VOLUME_TYPE self._mock_volume.get_pool_name.return_value = TEST_POOL_NAME self._mock_volume.get_size.return_value = 1 * units.Gi self._mock_volume.has_children.return_value = False self._mock_volume.get_qos_policy.return_value = None self._mock_volume.get_logical_units.return_value = [] self._mock_volume.get_all_metadata.return_value = {} self._mock_volume.create_snapshot.return_value = self._mock_volume self._mock_snapshot = mock.Mock() self._mock_snapshot.get_parent.return_value = self._mock_volume self._mock_host = mock.Mock() self._mock_host.get_luns.return_value = [] self._mock_host.map_volume().get_lun.return_value = TEST_LUN self._mock_pool = mock.Mock() self._mock_pool.get_free_physical_capacity.return_value = units.Gi self._mock_pool.get_physical_capacity.return_value = units.Gi self._mock_pool.get_volumes.return_value = [self._mock_volume] self._mock_name_space1 = mock.Mock() self._mock_name_space2 = mock.Mock() self._mock_name_space1.get_ips.return_value = [ mock.Mock(ip_address=TEST_IP_ADDRESS1, enabled=True)] self._mock_name_space2.get_ips.return_value = [ mock.Mock(ip_address=TEST_IP_ADDRESS3, enabled=True)] self._mock_name_space1.get_properties.return_value = mock.Mock( iscsi_iqn=TEST_TARGET_IQN, iscsi_tcp_port=TEST_ISCSI_TCP_PORT1) self._mock_name_space2.get_properties.return_value = mock.Mock( iscsi_iqn=TEST_TARGET_IQN, iscsi_tcp_port=TEST_ISCSI_TCP_PORT2) self._mock_group = mock.Mock() self._mock_qos_policy = mock.Mock() result.volumes.safe_get.return_value = self._mock_volume result.volumes.create.return_value = self._mock_volume result.pools.safe_get.return_value = self._mock_pool result.hosts.safe_get.return_value = self._mock_host result.cons_groups.safe_get.return_value = self._mock_group result.cons_groups.create.return_value = self._mock_group result.hosts.create.return_value = self._mock_host result.network_spaces.safe_get.return_value = self._mock_name_space1 result.components.nodes.get_all.return_value = [] result.qos_policies.create.return_value = self._mock_qos_policy result.qos_policies.safe_get.return_value = None result.get_serial.return_value = TEST_SYSTEM_SERIAL return result def _raise_infinisdk(self, *args, **kwargs): raise FakeInfinisdkException() @ddt.ddt class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase): def _generate_mock_object_metadata(self, cinder_object): return {"system": "openstack", "openstack_version": version.version_info.release_string(), "cinder_id": cinder_object.id, "cinder_name": cinder_object.name, "host.created_by": infinidat._INFINIDAT_CINDER_IDENTIFIER} def _validate_object_metadata(self, infinidat_object, cinder_object): infinidat_object.set_metadata_from_dict.assert_called_once_with( self._generate_mock_object_metadata(cinder_object)) def _generate_mock_host_metadata(self): return {"system": "openstack", "openstack_version": version.version_info.release_string(), "hostname": socket.gethostname(), "platform": platform.platform(), "host.created_by": infinidat._INFINIDAT_CINDER_IDENTIFIER} def _validate_host_metadata(self): self._mock_host.set_metadata_from_dict.assert_called_once_with( self._generate_mock_host_metadata()) @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_get_oslo_driver_opts') def test_get_driver_options(self, _get_oslo_driver_opts): _get_oslo_driver_opts.return_value = [] result = self.driver.get_driver_options() actual = (infinidat.infinidat_opts) self.assertEqual(actual, result) @skip_driver_setup def test__setup_and_get_system_object(self): # This test should skip the driver setup, as it generates more calls to # the add_auto_retry, set_source_identifier and login methods: auth = (self.configuration.san_login, self.configuration.san_password) self.driver._setup_and_get_system_object( self.configuration.san_ip, auth) self._system.api.add_auto_retry.assert_called_once() self._system.api.set_source_identifier.assert_called_once_with( infinidat._INFINIDAT_CINDER_IDENTIFIER) self._system.login.assert_called_once() @skip_driver_setup @mock.patch('cinder.volume.drivers.infinidat.infinisdk', None) def test_do_setup_no_infinisdk(self): self.assertRaises(exception.VolumeDriverException, self.driver.do_setup, None) @mock.patch('cinder.volume.drivers.infinidat.infinisdk.InfiniBox') @ddt.data(True, False) def test_ssl_options(self, use_ssl, infinibox): auth = (self.configuration.san_login, self.configuration.san_password) self.override_config('driver_use_ssl', use_ssl) self.driver.do_setup(None) infinibox.assert_called_once_with(self.configuration.san_ip, auth=auth, use_ssl=use_ssl) def test_create_export_snapshot(self): self.assertIsNone(self.driver.create_export_snapshot( None, test_snapshot, test_connector)) def test_remove_export_snapshot(self): self.assertIsNone(self.driver.remove_export_snapshot( None, test_snapshot)) def test_backup_use_temp_snapshot(self): self.assertTrue(self.driver.backup_use_temp_snapshot()) @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_get_infinidat_snapshot') def test_initialize_connection_snapshot(self, get_snapshot): result = self.driver.initialize_connection_snapshot( test_snapshot, test_connector) get_snapshot.assert_called_once_with(test_snapshot) self.assertEqual(1, result["data"]["target_lun"]) @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_get_infinidat_volume') def test_initialize_connection(self, get_volume): self._system.hosts.safe_get.return_value = None result = self.driver.initialize_connection(test_volume, test_connector) get_volume.assert_called_once_with(test_volume) self.assertEqual(1, result["data"]["target_lun"]) def test_initialize_connection_host_exists(self): result = self.driver.initialize_connection(test_volume, test_connector) self.assertEqual(1, result["data"]["target_lun"]) def test_initialize_connection_mapping_exists(self): mock_mapping = mock.Mock() mock_mapping.get_volume.return_value = self._mock_volume mock_mapping.get_lun.return_value = 888 self._mock_host.get_luns.return_value = [mock_mapping] result = self.driver.initialize_connection(test_volume, test_connector) self.assertEqual(888, result["data"]["target_lun"]) def test_initialize_connection_mapping_not_found(self): mock_mapping = mock.Mock() mock_mapping.get_volume.return_value = None self._mock_host.get_luns.return_value = [mock_mapping] result = self.driver.initialize_connection(test_volume, test_connector) self.assertEqual(TEST_LUN, result["data"]["target_lun"]) def test_initialize_connection_volume_doesnt_exist(self): self._system.volumes.safe_get.return_value = None self.assertRaises(exception.VolumeNotFound, self.driver.initialize_connection, test_volume, test_connector) def test_initialize_connection_create_fails(self): self._system.hosts.safe_get.return_value = None self._system.hosts.create.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, test_volume, test_connector) def test_initialize_connection_map_fails(self): self._mock_host.map_volume.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, test_volume, test_connector) def test_initialize_connection_metadata(self): self._system.hosts.safe_get.return_value = None self.driver.initialize_connection(test_volume, test_connector) self._validate_host_metadata() @ddt.data({'connector': None, 'multiattach': True, 'attachment': [test_attachment1, test_attachment1]}, {'connector': test_connector3, 'multiattach': True, 'attachment': [test_attachment1, test_attachment1]}, {'connector': test_connector, 'multiattach': False, 'attachment': [test_attachment1]}, {'connector': test_connector, 'multiattach': True, 'attachment': None}, {'connector': test_connector, 'multiattach': True, 'attachment': [test_attachment2, test_attachment3]}) @ddt.unpack def test__is_volume_multiattached_negative(self, connector, multiattach, attachment): volume = copy.deepcopy(test_volume) volume.multiattach = multiattach volume.volume_attachment = attachment self.assertFalse(self.driver._is_volume_multiattached(volume, connector)) @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_terminate_connection') @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_get_infinidat_volume') @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_is_volume_multiattached') def test_terminate_connection(self, volume_multiattached, get_volume, terminate_connection): volume = copy.deepcopy(test_volume) volume.volume_attachment = [test_attachment1] volume_multiattached.return_value = False get_volume.return_value = self._mock_volume self.assertFalse(self.driver.terminate_connection(volume, test_connector)) volume_multiattached.assert_called_once_with(volume, test_connector) get_volume.assert_called_once_with(volume) terminate_connection.assert_called_once_with(self._mock_volume, test_connector) @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_terminate_connection') @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_get_infinidat_snapshot') def test_terminate_connection_snapshot(self, get_snapshot, terminate_connection): get_snapshot.return_value = self._mock_snapshot self.assertIsNone(self.driver.terminate_connection_snapshot( test_snapshot, test_connector)) get_snapshot.assert_called_once_with(test_snapshot) terminate_connection.assert_called_once_with(self._mock_snapshot, test_connector) def test_terminate_connection_delete_host(self): self._mock_host.get_luns.return_value = [object()] volume = copy.deepcopy(test_volume) volume.volume_attachment = [test_attachment1] self.assertFalse(self.driver.terminate_connection(volume, test_connector)) self.assertEqual(0, self._mock_host.safe_delete.call_count) self._mock_host.get_luns.return_value = [] self.assertFalse(self.driver.terminate_connection(volume, test_connector)) self.assertEqual(1, self._mock_host.safe_delete.call_count) def test_terminate_connection_volume_doesnt_exist(self): self._system.volumes.safe_get.return_value = None self.assertRaises(exception.VolumeNotFound, self.driver.terminate_connection, test_volume, test_connector) def test_terminate_connection_api_fail(self): self._mock_host.unmap_volume.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, test_volume, test_connector) def test_get_volume_stats_refreshes(self): result = self.driver.get_volume_stats() self.assertEqual(1, result["free_capacity_gb"]) # change the "free space" in the pool self._mock_pool.get_free_physical_capacity.return_value = 0 # no refresh - free capacity should stay the same result = self.driver.get_volume_stats(refresh=False) self.assertEqual(1, result["free_capacity_gb"]) # refresh - free capacity should change to 0 result = self.driver.get_volume_stats(refresh=True) self.assertEqual(0, result["free_capacity_gb"]) def test_get_volume_stats_pool_not_found(self): self._system.pools.safe_get.return_value = None self.assertRaises(exception.VolumeDriverException, self.driver.get_volume_stats) def test_get_volume_stats_max_over_subscription_ratio(self): self.override_config('san_thin_provision', True) self.override_config('max_over_subscription_ratio', 10.0) result = self.driver.get_volume_stats() self.assertEqual('10.0', result['max_over_subscription_ratio']) self.assertTrue(result['thin_provisioning_support']) self.assertFalse(result['thick_provisioning_support']) @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_create_volume(self, *mocks): self.driver.create_volume(test_volume) def test_create_volume_pool_not_found(self): self._system.pools.safe_get.return_value = None self.assertRaises(exception.VolumeDriverException, self.driver.create_volume, test_volume) def test_create_volume_api_fail(self): self._system.pools.safe_get.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, test_volume) @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_create_volume_metadata(self, *mocks): self.driver.create_volume(test_volume) self._validate_object_metadata(self._mock_volume, test_volume) @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_create_volume_compression_enabled(self, *mocks): self.override_config('infinidat_use_compression', True) self.driver.create_volume(test_volume) self.assertTrue( self._system.volumes.create.call_args[1]["compression_enabled"] ) @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_create_volume_compression_disabled(self, *mocks): self.override_config('infinidat_use_compression', False) self.driver.create_volume(test_volume) self.assertFalse( self._system.volumes.create.call_args[1]["compression_enabled"] ) @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_create_volume_compression_default(self, *mocks): self.driver.create_volume(test_volume) self.assertNotIn( "compression_enabled", self._system.volumes.create.call_args[1] ) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.volume_utils.group_get_by_id') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_volume_within_group(self, *mocks): self.driver.create_volume(test_volume3) self._mock_group.add_member.assert_called_once() @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.volume_utils.group_get_by_id') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_create_volume_within_no_cg_group(self, *mocks): self.driver.create_volume(test_volume3) self._mock_group.add_member.assert_not_called() def test_delete_volume(self): self.driver.delete_volume(test_volume) def test_delete_volume_doesnt_exist(self): self._system.volumes.safe_get.return_value = None # should not raise an exception self.driver.delete_volume(test_volume) def test_delete_volume_with_children(self): self._mock_volume.has_children.return_value = True self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, test_volume) def test_extend_volume(self): self.driver.extend_volume(test_volume, 2) self._mock_volume.resize.assert_called_with(1 * units.Gi) def test_extend_volume_api_fail(self): self._mock_volume.resize.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, test_volume, 2) def test_create_snapshot(self): self.driver.create_snapshot(test_snapshot) def test_create_snapshot_metadata(self): self._mock_volume.create_snapshot.return_value = self._mock_volume self.driver.create_snapshot(test_snapshot) self._validate_object_metadata(self._mock_volume, test_snapshot) def test_create_snapshot_volume_doesnt_exist(self): self._system.volumes.safe_get.return_value = None self.assertRaises(exception.VolumeNotFound, self.driver.create_snapshot, test_snapshot) def test_create_snapshot_api_fail(self): self._mock_volume.create_snapshot.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, test_snapshot) @mock.patch("cinder.volume.volume_utils.copy_volume") @mock.patch("cinder.volume.volume_utils.brick_get_connector") @mock.patch("cinder.volume.volume_utils.brick_get_connector_properties", return_value=test_connector) @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_create_volume_from_snapshot(self, *mocks): self.driver.create_volume_from_snapshot(test_clone, test_snapshot) def test_create_volume_from_snapshot_doesnt_exist(self): self._system.volumes.safe_get.return_value = None self.assertRaises(exception.SnapshotNotFound, self.driver.create_volume_from_snapshot, test_clone, test_snapshot) def test_create_volume_from_snapshot_create_fails(self): self._system.volumes.create.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, test_clone, test_snapshot) @mock.patch("cinder.volume.volume_utils.brick_get_connector_properties", return_value=test_connector) @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_create_volume_from_snapshot_map_fails(self, *mocks): self._mock_host.map_volume.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, test_clone, test_snapshot) @mock.patch('cinder.volume.volume_utils.brick_get_connector') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_connect_device') def test_create_volume_from_snapshot_connect_fails(self, connect_device, connector_properties, *mocks): connector_properties.return_value = test_connector connect_device.side_effect = exception.DeviceUnavailable( path='/dev/sdb', reason='Block device required') self.assertRaises(exception.DeviceUnavailable, self.driver.create_volume_from_snapshot, test_clone, test_snapshot) def test_delete_snapshot(self): self.driver.delete_snapshot(test_snapshot) def test_delete_snapshot_doesnt_exist(self): self._system.volumes.safe_get.return_value = None # should not raise an exception self.driver.delete_snapshot(test_snapshot) def test_delete_snapshot_api_fail(self): self._mock_volume.safe_delete.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, test_snapshot) @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' 'delete_snapshot') @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' 'create_volume_from_snapshot') @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' 'create_snapshot') @mock.patch('uuid.uuid4') def test_create_cloned_volume(self, mock_uuid, create_snapshot, create_volume_from_snapshot, delete_snapshot): mock_uuid.return_value = uuid.UUID(test_snapshot.id) snapshot_attributes = ('id', 'name', 'volume') Snapshot = collections.namedtuple('Snapshot', snapshot_attributes) snapshot_id = test_snapshot.id snapshot_name = self.configuration.snapshot_name_template % snapshot_id snapshot = Snapshot(id=snapshot_id, name=snapshot_name, volume=test_volume) self.driver.create_cloned_volume(test_clone, test_volume) create_snapshot.assert_called_once_with(snapshot) create_volume_from_snapshot.assert_called_once_with(test_clone, snapshot) delete_snapshot.assert_called_once_with(snapshot) def test_create_cloned_volume_create_fails(self): self._system.volumes.create.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, test_clone, test_volume) @mock.patch("cinder.volume.volume_utils.brick_get_connector_properties", return_value=test_connector) @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_create_cloned_volume_map_fails(self, *mocks): self._mock_host.map_volume.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, test_clone, test_volume) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group(self, *mocks): self.driver.create_group(None, test_group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_create_generic_group(self, *mocks): self.assertRaises(NotImplementedError, self.driver.create_group, None, test_group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_metadata(self, *mocks): self.driver.create_group(None, test_group) self._validate_object_metadata(self._mock_group, test_group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_twice(self, *mocks): self.driver.create_group(None, test_group) self.driver.create_group(None, test_group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_api_fail(self, *mocks): self._system.cons_groups.create.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_group, None, test_group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group(self, *mocks): self.driver.delete_group(None, test_group, [test_volume]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_delete_generic_group(self, *mocks): self.assertRaises(NotImplementedError, self.driver.delete_group, None, test_group, [test_volume]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_doesnt_exist(self, *mocks): self._system.cons_groups.safe_get.return_value = None self.driver.delete_group(None, test_group, [test_volume]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_api_fail(self, *mocks): self._mock_group.safe_delete.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_group, None, test_group, [test_volume]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_update_group_add_and_remove(self, *mocks): self.driver.update_group(None, test_group, [test_volume], [test_volume]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_update_generic_group_add_and_remove(self, *mocks): self.assertRaises(NotImplementedError, self.driver.update_group, None, test_group, [test_volume], [test_volume]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_update_group_api_fail(self, *mocks): self._mock_group.add_member.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.update_group, None, test_group, [test_volume], [test_volume]) @mock.patch("cinder.volume.volume_utils.copy_volume") @mock.patch("cinder.volume.volume_utils.brick_get_connector") @mock.patch("cinder.volume.volume_utils.brick_get_connector_properties", return_value=test_connector) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_create_group_from_src_snaps(self, *mocks): self.driver.create_group_from_src(None, test_group, [test_volume], test_snapgroup, [test_snapshot], None, None) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_create_genericgroup_from_src_snaps(self, *mocks): self.assertRaises(NotImplementedError, self.driver.create_group_from_src, None, test_group, [test_volume], test_snapgroup, [test_snapshot], None, None) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_from_empty_sources(self, *mocks): self.assertRaises(exception.InvalidInput, self.driver.create_group_from_src, None, test_group, [test_volume], None, None, None, None) @mock.patch("cinder.volume.volume_utils.copy_volume") @mock.patch("cinder.volume.volume_utils.brick_get_connector") @mock.patch("cinder.volume.volume_utils.brick_get_connector_properties", return_value=test_connector) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_create_group_from_src_vols(self, *mocks): self.driver.create_group_from_src(None, test_group, [test_volume], None, None, test_group, [test_volume]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_snap(self, *mocks): mock_snapgroup = mock.Mock() mock_snapgroup.get_members.return_value = [self._mock_snapshot, self._mock_snapshot] self._mock_volume.get_name.side_effect = [fake.VOLUME_NAME, fake.VOLUME2_NAME] self._mock_group.create_snapshot.return_value = mock_snapgroup self.driver.create_group_snapshot(None, test_snapgroup, [test_snapshot, test_snapshot]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_create_generic_group_snap(self, *mocks): self.assertRaises(NotImplementedError, self.driver.create_group_snapshot, None, test_snapgroup, [test_snapshot]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_create_group_snap_api_fail(self, *mocks): self._mock_group.create_snapshot.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_group_snapshot, None, test_snapgroup, [test_snapshot]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_snap(self, *mocks): self.driver.delete_group_snapshot(None, test_snapgroup, [test_snapshot]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=False) def test_delete_generic_group_snap(self, *mocks): self.assertRaises(NotImplementedError, self.driver.delete_group_snapshot, None, test_snapgroup, [test_snapshot]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_snap_does_not_exist(self, *mocks): self._system.cons_groups.safe_get.return_value = None self.driver.delete_group_snapshot(None, test_snapgroup, [test_snapshot]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_snap_invalid_group(self, *mocks): self._mock_group.is_snapgroup.return_value = False self.assertRaises(exception.InvalidGroupSnapshot, self.driver.delete_group_snapshot, None, test_snapgroup, [test_snapshot]) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type', return_value=True) def test_delete_group_snap_api_fail(self, *mocks): self._mock_group.safe_delete.side_effect = self._raise_infinisdk self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_group_snapshot, None, test_snapgroup, [test_snapshot]) def test_snapshot_revert_use_temp_snapshot(self): result = self.driver.snapshot_revert_use_temp_snapshot() self.assertFalse(result) @ddt.data((1, 1), (1, 2)) @ddt.unpack def test_revert_to_snapshot_resize(self, volume_size, snapshot_size): volume = copy.deepcopy(test_volume) snapshot = copy.deepcopy(test_snapshot) snapshot.volume.size = snapshot_size self._system.volumes.safe_get.side_effect = [self._mock_snapshot, self._mock_volume, self._mock_volume] self._mock_volume.get_size.side_effect = [volume_size * units.Gi, volume_size * units.Gi] self.driver.revert_to_snapshot(None, volume, snapshot) self._mock_volume.restore.assert_called_once_with(self._mock_snapshot) if volume_size == snapshot_size: self._mock_volume.resize.assert_not_called() else: delta = (snapshot_size - volume_size) * units.Gi self._mock_volume.resize.assert_called_with(delta) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_manage_existing_by_source_name(self, *mocks): existing_ref = {'source-name': TEST_VOLUME_SOURCE_NAME} self.driver.manage_existing(test_volume, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_manage_existing_by_source_id(self, *mocks): existing_ref = {'source-id': TEST_VOLUME_SOURCE_ID} self.driver.manage_existing(test_volume, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_manage_existing_by_invalid_source(self, *mocks): existing_ref = {'source-path': None} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, test_volume, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.volume_utils.check_already_managed_volume', return_value=False) def test_manage_existing_not_managed(self, *mocks): self._mock_volume.get_all_metadata.return_value = ( TEST_VOLUME_METADATA) existing_ref = {'source-name': TEST_VOLUME_SOURCE_NAME} self.driver.manage_existing(test_volume, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.volume_utils.check_already_managed_volume', return_value=True) def test_manage_existing_already_managed(self, *mocks): self._mock_volume.get_all_metadata.return_value = ( TEST_VOLUME_METADATA) existing_ref = {'source-name': TEST_VOLUME_SOURCE_NAME} self.assertRaises(exception.ManageExistingAlreadyManaged, self.driver.manage_existing, test_volume, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_manage_existing_invalid_pool(self, *mocks): existing_ref = {'source-name': TEST_VOLUME_SOURCE_NAME} self._mock_volume.get_pool_name.return_value = TEST_POOL_NAME2 self.assertRaises(exception.InvalidConfigurationValue, self.driver.manage_existing, test_volume, existing_ref) def test_manage_existing_get_size(self): existing_ref = {'source-name': TEST_VOLUME_SOURCE_NAME} size = self.driver.manage_existing_get_size(test_volume, existing_ref) self.assertEqual(test_volume.size, size) def test_get_manageable_volumes(self): cinder_volumes = [test_volume] self._mock_volume.is_snapshot.return_value = False self._mock_volume.get_all_metadata.return_value = { 'cinder_id': fake.VOLUME2_ID } self.driver.get_manageable_volumes(cinder_volumes, None, 1, 0, [], []) def test_get_manageable_volumes_already_managed(self): cinder_volumes = [test_volume] self._mock_volume.get_id.return_value = TEST_VOLUME_SOURCE_ID self._mock_volume.get_all_metadata.return_value = ( TEST_VOLUME_METADATA) self._mock_volume.is_snapshot.return_value = False self.driver.get_manageable_volumes(cinder_volumes, None, 1, 0, [], []) def test_get_manageable_volumes_but_snapshots(self): cinder_volumes = [test_volume] self._mock_volume.is_snapshot.return_value = True self.driver.get_manageable_volumes(cinder_volumes, None, 1, 0, [], []) def test_get_manageable_volumes_has_mappings(self): cinder_volumes = [test_volume] self._mock_volume.is_snapshot.return_value = False self._mock_volume.get_all_metadata.return_value = { 'cinder_id': fake.VOLUME2_ID } lun = mock.Mock() self._mock_volume.get_logical_units.return_value = [lun] self.driver.get_manageable_volumes(cinder_volumes, None, 1, 0, [], []) def test_get_manageable_volumes_has_snapshots(self): cinder_volumes = [test_volume] self._mock_volume.is_snapshot.return_value = False self._mock_volume.has_children.return_value = True self._mock_volume.get_all_metadata.return_value = { 'cinder_id': fake.VOLUME2_ID } self.driver.get_manageable_volumes(cinder_volumes, None, 1, 0, [], []) def test_unmanage(self): self.driver.unmanage(test_volume) @mock.patch('cinder.objects.Snapshot.exists', return_value=True) def test__check_already_managed_snapshot(self, *mocks): self.driver._check_already_managed_snapshot(test_snapshot.id) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_manage_existing_snapshot_by_source_name(self, *mocks): existing_ref = {'source-name': TEST_SNAPSHOT_SOURCE_NAME} self.driver.manage_existing_snapshot(test_snapshot, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_manage_existing_snapshot_by_source_id(self, *mocks): existing_ref = {'source-id': TEST_SNAPSHOT_SOURCE_ID} self.driver.manage_existing_snapshot(test_snapshot, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_manage_existing_snapshot_but_volume(self, *mocks): existing_ref = {'source-id': TEST_SNAPSHOT_SOURCE_ID} self._mock_volume.is_snapshot.return_value = False self.assertRaises(exception.InvalidSnapshot, self.driver.manage_existing_snapshot, test_snapshot, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_manage_existing_snapshot_by_invalid_source(self, *mocks): existing_ref = {'source-path': None} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, test_snapshot, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_manage_existing_snapshot_by_non_cinder_id(self, *mocks): self._mock_volume.get_all_metadata.return_value = {'cinder_id': 'x'} existing_ref = {'source-id': TEST_SNAPSHOT_SOURCE_ID} self.driver.manage_existing_snapshot(test_snapshot, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_check_already_managed_snapshot', return_value=False) def test_manage_existing_snapshot_not_managed(self, *mocks): self._mock_volume.get_all_metadata.return_value = ( TEST_SNAPSHOT_METADATA) existing_ref = {'source-name': TEST_SNAPSHOT_SOURCE_NAME} self.driver.manage_existing(test_snapshot, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_check_already_managed_snapshot', return_value=True) def test_manage_existing_snapshot_already_managed(self, *mocks): self._mock_volume.get_all_metadata.return_value = ( TEST_SNAPSHOT_METADATA) existing_ref = {'source-name': TEST_SNAPSHOT_SOURCE_NAME} self.assertRaises(exception.ManageExistingAlreadyManaged, self.driver.manage_existing_snapshot, test_snapshot, existing_ref) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_manage_existing_snapshot_invalid_pool(self, *mocks): existing_ref = {'source-name': TEST_SNAPSHOT_SOURCE_NAME} self._mock_volume.get_pool_name.return_value = TEST_POOL_NAME2 self.assertRaises(exception.InvalidConfigurationValue, self.driver.manage_existing_snapshot, test_snapshot, existing_ref) def test_manage_existing_snapshot_get_size(self): existing_ref = {'source-name': TEST_SNAPSHOT_SOURCE_NAME} size = self.driver.manage_existing_snapshot_get_size(test_volume, existing_ref) self.assertEqual(test_snapshot.volume.size, size) def test_get_manageable_snapshots(self): cinder_snapshots = [test_snapshot] self._mock_volume.is_snapshot.return_value = True self._mock_volume.get_all_metadata.return_value = { 'cinder_id': fake.SNAPSHOT2_ID } self.driver.get_manageable_snapshots(cinder_snapshots, None, 1, 0, [], []) def test_get_manageable_snapshots_already_managed(self): cinder_snapshots = [test_snapshot] self._mock_volume.get_id.return_value = TEST_SNAPSHOT_SOURCE_ID self._mock_volume.get_all_metadata.return_value = ( TEST_SNAPSHOT_METADATA) self._mock_volume.is_snapshot.return_value = True self.driver.get_manageable_snapshots(cinder_snapshots, None, 1, 0, [], []) def test_get_manageable_snapshots_but_volumes(self): cinder_snapshots = [test_snapshot] self._mock_volume.is_snapshot.return_value = False self.driver.get_manageable_snapshots(cinder_snapshots, None, 1, 0, [], []) def test_get_manageable_snapshots_has_mappings(self): cinder_snapshots = [test_snapshot] self._mock_volume.is_snapshot.return_value = True self._mock_volume.get_all_metadata.return_value = { 'cinder_id': fake.SNAPSHOT2_ID } lun = mock.Mock() self._mock_volume.get_logical_units.return_value = [lun] self.driver.get_manageable_snapshots(cinder_snapshots, None, 1, 0, [], []) def test_get_manageable_snapshots_has_clones(self): cinder_snapshots = [test_snapshot] self._mock_volume.is_snapshot.return_value = True self._mock_volume.has_children.return_value = True self._mock_volume.get_all_metadata.return_value = { 'cinder_id': fake.SNAPSHOT2_ID } self.driver.get_manageable_snapshots(cinder_snapshots, None, 1, 0, [], []) def test_unmanage_snapshot(self): self.driver.unmanage_snapshot(test_snapshot) def test_terminate_connection_no_attachment_connector(self): volume = copy.deepcopy(test_volume) volume.multiattach = True volume.volume_attachment = [test_attachment3] self.assertFalse(self.driver.terminate_connection(volume, test_connector)) def test_terminate_connection_no_host(self): self._system.hosts.safe_get.return_value = None volume = copy.deepcopy(test_volume) volume.volume_attachment = [test_attachment1] self.assertFalse(self.driver.terminate_connection(volume, test_connector)) def test_terminate_connection_no_mapping(self): self._mock_host.unmap_volume.side_effect = KeyError volume = copy.deepcopy(test_volume) volume.volume_attachment = [test_attachment1] self.assertFalse(self.driver.terminate_connection(volume, test_connector)) def test_update_migrated_volume_new_volume_not_found(self): self._system.volumes.safe_get.side_effect = [ None, self._mock_volume] self.assertRaises(exception.VolumeNotFound, self.driver.update_migrated_volume, None, test_volume, test_volume2, 'available') @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_set_cinder_object_metadata') def test_update_migrated_volume_volume_not_found(self, set_metadata): self._system.volumes.safe_get.side_effect = [ self._mock_new_volume, None] update = self.driver.update_migrated_volume(None, test_volume, test_volume2, 'available') expected = {'_name_id': None, 'provider_location': None} self.assertEqual(expected, update) set_metadata.assert_called_once_with(self._mock_new_volume, test_volume) @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_set_cinder_object_metadata') def test_update_migrated_new_volume_rename_error(self, set_metadata): self._system.volumes.safe_get.side_effect = [ self._mock_new_volume, None] self._mock_new_volume.update_name.side_effect = [ FakeInfinisdkException] update = self.driver.update_migrated_volume(None, test_volume, test_volume2, 'available') expected = {'_name_id': test_volume2.name_id, 'provider_location': None} self.assertEqual(expected, update) set_metadata.assert_called_once_with(self._mock_new_volume, test_volume) @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.' '_set_cinder_object_metadata') def test_update_migrated(self, set_metadata): self._system.volumes.safe_get.side_effect = [ self._mock_new_volume, self._mock_volume] self._mock_new_volume.update_name.side_effect = None update = self.driver.update_migrated_volume(None, test_volume, test_volume2, 'available') expected = {'_name_id': test_volume2.name_id, 'provider_location': None} self.assertEqual(expected, update) set_metadata.assert_called_once_with(self._mock_new_volume, test_volume) self.assertEqual(0, self._log.error.call_count) @ddt.data(None, {}) def test_migrate_volume_no_host(self, host): expected = False, None update = self.driver.migrate_volume(None, test_volume, host) self.assertEqual(expected, update) @ddt.data(None, {}) def test_migrate_volume_no_capabilities(self, capabilities): expected = False, None host = {'capabilities': capabilities} update = self.driver.migrate_volume(None, test_volume, host) self.assertEqual(expected, update) @ddt.data(None, 123, 'location') def test_migrate_volume_invalid_location_info(self, location_info): expected = False, None capabilities = {'location_info': location_info} host = {'capabilities': capabilities} update = self.driver.migrate_volume(None, test_volume, host) self.assertEqual(expected, update) def test_migrate_volume_invalid_driver(self): expected = False, None location_info = 'vendor:0:/path' capabilities = {'location_info': location_info} host = {'capabilities': capabilities} update = self.driver.migrate_volume(None, test_volume, host) self.assertEqual(expected, update) def test_migrate_volume_invalid_serial(self): expected = False, None location_info = '%s:%s:%s' % (self.driver.__class__.__name__, TEST_SYSTEM_SERIAL2, TEST_POOL_NAME2) capabilities = {'location_info': location_info} host = {'capabilities': capabilities} update = self.driver.migrate_volume(None, test_volume, host) self.assertEqual(expected, update) def test_migrate_volume_same_pool(self): expected = True, None location_info = '%s:%s:%s' % (self.driver.__class__.__name__, TEST_SYSTEM_SERIAL, TEST_POOL_NAME) capabilities = {'location_info': location_info} host = {'capabilities': capabilities} update = self.driver.migrate_volume(None, test_volume, host) self.assertEqual(expected, update) def test_migrate_volume_no_pool(self): expected = False, None self._system.pools.safe_get.return_value = None location_info = '%s:%s:%s' % (self.driver.__class__.__name__, TEST_SYSTEM_SERIAL, TEST_POOL_NAME2) capabilities = {'location_info': location_info} host = {'capabilities': capabilities} update = self.driver.migrate_volume(None, test_volume, host) self.assertEqual(expected, update) def test_migrate_volume(self): expected = True, None location_info = '%s:%s:%s' % (self.driver.__class__.__name__, TEST_SYSTEM_SERIAL, TEST_POOL_NAME2) capabilities = {'location_info': location_info} host = {'capabilities': capabilities} update = self.driver.migrate_volume(None, test_volume, host) self.assertEqual(expected, update) @ddt.ddt class InfiniboxDriverTestCaseFC(InfiniboxDriverTestCaseBase): @ddt.data(*itertools.product(('UP', 'DOWN'), ('OK', 'ERROR'))) @ddt.unpack def test_initialize_connection_nodes_ports(self, link_state, port_state): node = mock.Mock() port = mock.Mock() port.get_link_state.return_value = link_state port.get_state.return_value = port_state node.get_fc_ports.return_value = [port] self._system.components.nodes.get_all.return_value = [node] result = self.driver.initialize_connection(test_volume, test_connector) self.assertEqual(1, result["data"]["target_lun"]) def test_initialize_connection_multiple_wwpns(self): connector = {'wwpns': [TEST_WWN_1, TEST_WWN_2]} result = self.driver.initialize_connection(test_volume, connector) self.assertEqual(1, result["data"]["target_lun"]) def test_validate_connector(self): fc_connector = {'wwpns': [TEST_WWN_1, TEST_WWN_2]} iscsi_connector = {'initiator': TEST_INITIATOR_IQN} self.driver.validate_connector(fc_connector) self.assertRaises(exception.InvalidConnectorException, self.driver.validate_connector, iscsi_connector) @ddt.data({'connector': test_connector, 'attachment': [test_attachment1, test_attachment1]}, {'connector': test_connector2, 'attachment': [test_attachment2, test_attachment2]}) @ddt.unpack def test__is_volume_multiattached_positive(self, connector, attachment): volume = copy.deepcopy(test_volume) volume.multiattach = True volume.volume_attachment = attachment self.assertTrue(self.driver._is_volume_multiattached(volume, connector)) def test_terminate_connection_multiattached_volume(self): volume = copy.deepcopy(test_volume) volume.multiattach = True volume.volume_attachment = [test_attachment1, test_attachment1] self.assertTrue(self.driver.terminate_connection(volume, test_connector)) def test_terminate_connection_force_detach(self): mock_infinidat_host = mock.Mock() mock_infinidat_host.get_ports.return_value = [ self._wwn.WWN(TEST_WWN_1)] mock_mapping = mock.Mock() mock_mapping.get_host.return_value = mock_infinidat_host self._mock_volume.get_logical_units.return_value = [mock_mapping] volume = copy.deepcopy(test_volume) volume.volume_attachment = [test_attachment1, test_attachment2] self.assertTrue(self.driver.terminate_connection(volume, None)) self._mock_host.unmap_volume.assert_called_once() self._mock_host.safe_delete.assert_called_once() @ddt.ddt class InfiniboxDriverTestCaseISCSI(InfiniboxDriverTestCaseBase): def setUp(self): super(InfiniboxDriverTestCaseISCSI, self).setUp() self.override_config('infinidat_storage_protocol', TEST_ISCSI_PROTOCOL) self.override_config('infinidat_iscsi_netspaces', [TEST_ISCSI_NAMESPACE1]) self.override_config('use_chap_auth', False) self.driver.do_setup(None) def test_setup_without_netspaces_configured(self): self.override_config('infinidat_iscsi_netspaces', []) self.assertRaises(exception.VolumeDriverException, self.driver.do_setup, None) def test_initialize_connection(self): result = self.driver.initialize_connection(test_volume, test_connector) expected = { 'driver_volume_type': TEST_ISCSI_PROTOCOL, 'data': { 'target_discovered': True, 'target_portal': TEST_TARGET_PORTAL1, 'target_iqn': TEST_TARGET_IQN, 'target_lun': TEST_LUN, 'target_portals': [ TEST_TARGET_PORTAL1 ], 'target_iqns': [ TEST_TARGET_IQN ], 'target_luns': [ TEST_LUN ] } } self.assertEqual(expected, result) def test_initialize_netspace_does_not_exist(self): self._system.network_spaces.safe_get.return_value = None self.assertRaises(exception.VolumeDriverException, self.driver.initialize_connection, test_volume, test_connector) def test_initialize_netspace_has_no_ips(self): self._mock_name_space1.get_ips.return_value = [] self.assertRaises(exception.VolumeDriverException, self.driver.initialize_connection, test_volume, test_connector) def test_initialize_connection_with_chap(self): self.override_config('use_chap_auth', True) result = self.driver.initialize_connection(test_volume, test_connector) self.assertEqual(1, result['data']['target_lun']) self.assertEqual('CHAP', result['data']['auth_method']) self.assertIn('auth_username', result['data']) self.assertIn('auth_password', result['data']) def test_initialize_connection_multiple_netspaces(self): self.override_config('infinidat_iscsi_netspaces', [TEST_ISCSI_NAMESPACE1, TEST_ISCSI_NAMESPACE2]) self._system.network_spaces.safe_get.side_effect = [ self._mock_name_space1, self._mock_name_space2] result = self.driver.initialize_connection(test_volume, test_connector) expected = { 'driver_volume_type': TEST_ISCSI_PROTOCOL, 'data': { 'target_discovered': True, 'target_portal': TEST_TARGET_PORTAL1, 'target_iqn': TEST_TARGET_IQN, 'target_lun': TEST_LUN, 'target_portals': [ TEST_TARGET_PORTAL1, TEST_TARGET_PORTAL3 ], 'target_iqns': [ TEST_TARGET_IQN, TEST_TARGET_IQN ], 'target_luns': [ TEST_LUN, TEST_LUN ] } } self.assertEqual(expected, result) def test_initialize_connection_multiple_netspaces_multipath(self): self.override_config('infinidat_iscsi_netspaces', [TEST_ISCSI_NAMESPACE1, TEST_ISCSI_NAMESPACE2]) self._system.network_spaces.safe_get.side_effect = [ self._mock_name_space1, self._mock_name_space2] self._mock_name_space1.get_ips.return_value = [ mock.Mock(ip_address=TEST_IP_ADDRESS1, enabled=True), mock.Mock(ip_address=TEST_IP_ADDRESS2, enabled=True)] self._mock_name_space2.get_ips.return_value = [ mock.Mock(ip_address=TEST_IP_ADDRESS3, enabled=True), mock.Mock(ip_address=TEST_IP_ADDRESS4, enabled=True)] result = self.driver.initialize_connection(test_volume, test_connector) expected = { 'driver_volume_type': TEST_ISCSI_PROTOCOL, 'data': { 'target_discovered': True, 'target_portal': TEST_TARGET_PORTAL1, 'target_iqn': TEST_TARGET_IQN, 'target_lun': TEST_LUN, 'target_portals': [ TEST_TARGET_PORTAL1, TEST_TARGET_PORTAL2, TEST_TARGET_PORTAL3, TEST_TARGET_PORTAL4 ], 'target_iqns': [ TEST_TARGET_IQN, TEST_TARGET_IQN, TEST_TARGET_IQN, TEST_TARGET_IQN ], 'target_luns': [ TEST_LUN, TEST_LUN, TEST_LUN, TEST_LUN ] } } self.assertEqual(expected, result) def test_initialize_connection_disabled_interface(self): self._mock_name_space1.get_ips.return_value = [ mock.Mock(ip_address=TEST_IP_ADDRESS1, enabled=False), mock.Mock(ip_address=TEST_IP_ADDRESS2, enabled=True)] result = self.driver.initialize_connection(test_volume, test_connector) expected = { 'driver_volume_type': TEST_ISCSI_PROTOCOL, 'data': { 'target_discovered': True, 'target_portal': TEST_TARGET_PORTAL2, 'target_iqn': TEST_TARGET_IQN, 'target_lun': TEST_LUN, 'target_portals': [ TEST_TARGET_PORTAL2 ], 'target_iqns': [ TEST_TARGET_IQN ], 'target_luns': [ TEST_LUN ] } } self.assertEqual(expected, result) def test_initialize_connection_multiple_interfaces(self): self._mock_name_space1.get_ips.return_value = [ mock.Mock(ip_address=TEST_IP_ADDRESS1, enabled=True), mock.Mock(ip_address=TEST_IP_ADDRESS2, enabled=True)] self._mock_name_space1.get_properties.return_value = mock.Mock( iscsi_iqn=TEST_TARGET_IQN, iscsi_tcp_port=TEST_ISCSI_TCP_PORT1) result = self.driver.initialize_connection(test_volume, test_connector) expected = { 'driver_volume_type': TEST_ISCSI_PROTOCOL, 'data': { 'target_discovered': True, 'target_portal': TEST_TARGET_PORTAL1, 'target_iqn': TEST_TARGET_IQN, 'target_lun': TEST_LUN, 'target_portals': [ TEST_TARGET_PORTAL1, TEST_TARGET_PORTAL2 ], 'target_iqns': [ TEST_TARGET_IQN, TEST_TARGET_IQN ], 'target_luns': [ TEST_LUN, TEST_LUN ] } } self.assertEqual(expected, result) @ddt.data({'connector': test_connector, 'attachment': [test_attachment1, test_attachment1]}, {'connector': test_connector2, 'attachment': [test_attachment2, test_attachment2]}) @ddt.unpack def test__is_volume_multiattached_positive(self, connector, attachment): volume = copy.deepcopy(test_volume) volume.multiattach = True volume.volume_attachment = attachment self.assertTrue(self.driver._is_volume_multiattached(volume, connector)) def test_terminate_connection(self): volume = copy.deepcopy(test_volume) volume.volume_attachment = [test_attachment1] self.assertFalse(self.driver.terminate_connection(volume, test_connector)) def test_terminate_connection_force_detach(self): mock_infinidat_host = mock.Mock() mock_infinidat_host.get_ports.return_value = [ self._iqn.IQN(TEST_TARGET_IQN)] mock_mapping = mock.Mock() mock_mapping.get_host.return_value = mock_infinidat_host self._mock_volume.get_logical_units.return_value = [mock_mapping] volume = copy.deepcopy(test_volume) volume.volume_attachment = [test_attachment1, test_attachment2] self.assertTrue(self.driver.terminate_connection(volume, None)) self._mock_host.unmap_volume.assert_called_once() self._mock_host.safe_delete.assert_called_once() def test_validate_connector(self): fc_connector = {'wwpns': [TEST_WWN_1, TEST_WWN_2]} iscsi_connector = {'initiator': TEST_INITIATOR_IQN} self.driver.validate_connector(iscsi_connector) self.assertRaises(exception.InvalidConnectorException, self.driver.validate_connector, fc_connector) class InfiniboxDriverTestCaseQoS(InfiniboxDriverTestCaseBase): @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_no_qos(self, qos_specs): qos_specs.return_value = None self.driver.create_volume(test_volume) self._system.qos_policies.create.assert_not_called() self._mock_qos_policy.assign_entity.assert_not_called() @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_qos_max_ipos(self, qos_specs): qos_specs.return_value = {'qos_specs': {'id': 'qos_name', 'consumer': 'back-end', 'specs': {'maxIOPS': 1000, 'maxBWS': None}}} self.driver.create_volume(test_volume) self._system.qos_policies.create.assert_called_once() self._mock_qos_policy.assign_entity.assert_called_once() @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_qos_max_bws(self, qos_specs): qos_specs.return_value = {'qos_specs': {'id': 'qos_name', 'consumer': 'back-end', 'specs': {'maxIOPS': None, 'maxBWS': 10000}}} self.driver.create_volume(test_volume) self._system.qos_policies.create.assert_called_once() self._mock_qos_policy.assign_entity.assert_called_once() @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_qos_no_compat(self, qos_specs): qos_specs.return_value = {'qos_specs': {'id': 'qos_name', 'consumer': 'back-end', 'specs': {'maxIOPS': 1000, 'maxBWS': 10000}}} self._system.compat.has_qos.return_value = False self.driver.create_volume(test_volume) self._system.qos_policies.create.assert_not_called() self._mock_qos_policy.assign_entity.assert_not_called() @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_qos_volume_type_id_none(self, qos_specs): qos_specs.return_value = {'qos_specs': {'id': 'qos_name', 'consumer': 'back-end', 'specs': {'maxIOPS': 1000, 'maxBWS': 10000}}} self.driver.create_volume(test_volume2) self._system.qos_policies.create.assert_not_called() self._mock_qos_policy.assign_entity.assert_not_called() @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_qos_no_specs(self, qos_specs): qos_specs.return_value = {'qos_specs': None} self.driver.create_volume(test_volume) self._system.qos_policies.create.assert_not_called() self._mock_qos_policy.assign_entity.assert_not_called() @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_qos_front_end(self, qos_specs): qos_specs.return_value = {'qos_specs': {'id': 'qos_name', 'consumer': 'front-end', 'specs': {'maxIOPS': 1000, 'maxBWS': 10000}}} self.driver.create_volume(test_volume) self._system.qos_policies.create.assert_not_called() self._mock_qos_policy.assign_entity.assert_not_called() @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_qos_specs_empty(self, qos_specs): qos_specs.return_value = {'qos_specs': {'id': 'qos_name', 'consumer': 'back-end', 'specs': {'maxIOPS': None, 'maxBWS': None}}} self.driver.create_volume(test_volume) self._system.qos_policies.create.assert_not_called() self._mock_qos_policy.assign_entity.assert_not_called() @mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs") def test_qos_policy_exists(self, qos_specs): qos_specs.return_value = {'qos_specs': {'id': 'qos_name', 'consumer': 'back-end', 'specs': {'maxIOPS': 1000, 'maxBWS': 10000}}} self._system.qos_policies.safe_get.return_value = self._mock_qos_policy self.driver.create_volume(test_volume) self._system.qos_policies.create.assert_not_called() self._mock_qos_policy.assign_entity.assert_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_kaminario.py0000664000175000017500000006275100000000000025037 0ustar00zuulzuul00000000000000# Copyright (c) 2016 by Kaminario Technologies, Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for kaminario driver.""" import re import time from unittest import mock import ddt from oslo_utils import units from cinder import context from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration from cinder.volume.drivers.kaminario import kaminario_common from cinder.volume.drivers.kaminario import kaminario_fc from cinder.volume.drivers.kaminario import kaminario_iscsi from cinder.volume import volume_utils CONNECTOR = {'initiator': 'iqn.1993-08.org.debian:01:12aa12aa12aa', 'ip': '192.168.2.5', 'platform': 'x86_64', 'host': 'test-k2', 'wwpns': ['12341a2a00001234', '12341a2a00001235'], 'wwnns': ['12351a2a00001234', '12361a2a00001234'], 'os_type': 'linux2', 'multipath': False} class FakeK2Obj(object): id = 548 lun = 548 class FakeSaveObject(FakeK2Obj): def __init__(self, *args, **kwargs): item = kwargs.pop('item', 1) self.ntype = kwargs.get('ntype') self.ip_address = '10.0.0.%s' % item self.iscsi_qualified_target_name = "xyztlnxyz" self.snapshot = FakeK2Obj() self.name = 'test' self.pwwn = '50024f405330030%s' % item self.volume_group = self self.is_dedup = True self.size = units.Mi self.replication_status = None self.state = 'in_sync' self.generation_number = 548 self.current_role = 'target' self.current_snapshot_progress = 100 self.current_snapshot_id = None self.wan_port = None def refresh(self): return def save(self): return FakeSaveObject() def delete(self): return None class FakeSaveObjectExp(FakeSaveObject): def save(self): raise kaminario_common.KaminarioCinderDriverException("test") def delete(self): raise kaminario_common.KaminarioCinderDriverException("test") class FakeSearchObject(object): hits = [FakeSaveObject(item=1), FakeSaveObject(item=2)] total = 2 def __init__(self, *args): if args and "mappings" in args[0]: self.total = 0 class FakeSearchObjectExp(object): hits = [FakeSaveObjectExp()] total = 1 class FakeKrest(object): def search(self, *args, **argv): return FakeSearchObject(*args) def new(self, *args, **argv): return FakeSaveObject() class FakeKrestException(object): def search(self, *args, **argv): return FakeSearchObjectExp() def new(self, *args, **argv): return FakeSaveObjectExp() class Replication(object): backend_id = '10.0.0.1' login = 'login' password = 'password' rpo = 500 @ddt.ddt class TestKaminarioCommon(test.TestCase): driver = None conf = None def setUp(self): self._setup_config() self._setup_driver() super(TestKaminarioCommon, self).setUp() self.context = context.get_admin_context() self.vol = fake_volume.fake_volume_obj(self.context) self.vol.volume_type = fake_volume.fake_volume_type_obj(self.context) self.vol.volume_type.extra_specs = {'foo': None} self.snap = fake_snapshot.fake_snapshot_obj(self.context) self.snap.volume = self.vol self.patch('eventlet.sleep') def _setup_config(self): self.conf = mock.Mock(spec=configuration.Configuration) self.conf.kaminario_dedup_type_name = "dedup" self.conf.volume_dd_blocksize = 2 self.conf.disable_discovery = False self.conf.unique_fqdn_network = True self.conf.use_multipath_for_image_xfer = False self.conf.enforce_multipath_for_image_xfer = False def _setup_driver(self): self.driver = (kaminario_iscsi. KaminarioISCSIDriver(configuration=self.conf)) device = mock.Mock(return_value={'device': {'path': '/dev'}}) self.driver._connect_device = device self.driver.client = FakeKrest() def test_create_volume(self): """Test create_volume.""" result = self.driver.create_volume(self.vol) self.assertIsNone(result) def test_create_volume_with_exception(self): """Test create_volume_with_exception.""" self.driver.client = FakeKrestException() self.assertRaises(kaminario_common.KaminarioCinderDriverException, self.driver.create_volume, self.vol) def test_delete_volume(self): """Test delete_volume.""" result = self.driver.delete_volume(self.vol) self.assertIsNone(result) def test_delete_volume_with_exception(self): """Test delete_volume_with_exception.""" self.driver.client = FakeKrestException() self.assertRaises(kaminario_common.KaminarioCinderDriverException, self.driver.delete_volume, self.vol) def test_create_snapshot(self): """Test create_snapshot.""" self.snap.id = "253b2878-ec60-4793-ad19-e65496ec7aab" self.driver.client.new = mock.Mock() result = self.driver.create_snapshot(self.snap) self.assertIsNone(result) fake_object = self.driver.client.search().hits[0] self.driver.client.new.assert_called_once_with( "snapshots", short_name='cs-253b2878-ec60-4793-ad19-e65496ec7aab', source=fake_object, retention_policy=fake_object, is_auto_deleteable=False) def test_create_snapshot_with_exception(self): """Test create_snapshot_with_exception.""" self.driver.client = FakeKrestException() self.assertRaises(kaminario_common.KaminarioCinderDriverException, self.driver.create_snapshot, self.snap) def test_delete_snapshot(self): """Test delete_snapshot.""" result = self.driver.delete_snapshot(self.snap) self.assertIsNone(result) def test_delete_snapshot_with_exception(self): """Test delete_snapshot_with_exception.""" self.driver.client = FakeKrestException() self.assertRaises(kaminario_common.KaminarioCinderDriverException, self.driver.delete_snapshot, self.snap) @mock.patch.object(volume_utils, 'brick_get_connector_properties') @mock.patch.object(volume_utils, 'copy_volume') def test_create_volume_from_snapshot(self, mock_copy_volume, mock_brick_get): """Test create_volume_from_snapshot.""" mock_brick_get.return_value = CONNECTOR mock_copy_volume.return_value = None self.driver._kaminario_disconnect_volume = mock.Mock() result = self.driver.create_volume_from_snapshot(self.vol, self.snap) self.assertIsNone(result) @mock.patch.object(volume_utils, 'brick_get_connector_properties') @mock.patch.object(volume_utils, 'copy_volume') def test_create_volume_from_snapshot_with_exception(self, mock_copy_volume, mock_brick_get): """Test create_volume_from_snapshot_with_exception.""" mock_brick_get.return_value = CONNECTOR mock_copy_volume.return_value = None self.driver.client = FakeKrestException() self.assertRaises(kaminario_common.KaminarioCinderDriverException, self.driver.create_volume_from_snapshot, self.vol, self.snap) @mock.patch.object(volume_utils, 'brick_get_connector_properties') @mock.patch.object(volume_utils, 'copy_volume') def test_create_cloned_volume(self, mock_copy_volume, mock_brick_get): """Test create_cloned_volume.""" mock_brick_get.return_value = CONNECTOR mock_copy_volume.return_value = None self.driver._kaminario_disconnect_volume = mock.Mock() result = self.driver.create_cloned_volume(self.vol, self.vol) self.assertIsNone(result) @mock.patch.object(volume_utils, 'brick_get_connector_properties') @mock.patch.object(volume_utils, 'copy_volume') def test_create_cloned_volume_with_exception(self, mock_copy_volume, mock_brick_get): """Test create_cloned_volume_with_exception.""" mock_brick_get.return_value = CONNECTOR mock_copy_volume.return_value = None self.driver.terminate_connection = mock.Mock() self.driver.client = FakeKrestException() self.assertRaises(kaminario_common.KaminarioCinderDriverException, self.driver.create_cloned_volume, self.vol, self.vol) def test_extend_volume(self): """Test extend_volume.""" new_size = 256 result = self.driver.extend_volume(self.vol, new_size) self.assertIsNone(result) def test_extend_volume_with_exception(self): """Test extend_volume_with_exception.""" self.driver.client = FakeKrestException() new_size = 256 self.assertRaises(kaminario_common.KaminarioCinderDriverException, self.driver.extend_volume, self.vol, new_size) def test_initialize_connection_with_exception(self): """Test initialize_connection_with_exception.""" self.driver.client = FakeKrestException() self.assertRaises(kaminario_common.KaminarioCinderDriverException, self.driver.initialize_connection, self.vol, CONNECTOR) def test_get_lun_number(self): """Test _get_lun_number.""" host, host_rs, host_name = self.driver._get_host_object(CONNECTOR) result = self.driver._get_lun_number(self.vol, host) self.assertEqual(548, result) def test_get_volume_object(self): """Test _get_volume_object.""" result = self.driver._get_volume_object(self.vol) self.assertEqual(548, result.id) def test_get_host_object(self): """Test _get_host_object.""" host, host_rs, host_name = self.driver._get_host_object(CONNECTOR) self.assertEqual(548, host.id) self.assertEqual(2, host_rs.total) self.assertEqual('test-k2', host_name) def test_k2_initialize_connection(self): """Test k2_initialize_connection.""" result = self.driver.k2_initialize_connection(self.vol, CONNECTOR) self.assertEqual(548, result) @mock.patch.object(FakeSearchObject, 'total', 1) def test_manage_existing(self): """Test manage_existing.""" self.driver._get_replica_status = mock.Mock(return_value=False) result = self.driver.manage_existing(self.vol, {'source-name': 'test'}) self.assertIsNone(result) def test_manage_existing_exp(self): self.driver._get_replica_status = mock.Mock(return_value=True) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.vol, {'source-name': 'test'}) def test_manage_vg_volumes(self): self.driver.nvol = 2 self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, self.vol, {'source-name': 'test'}) def test_manage_existing_get_size(self): """Test manage_existing_get_size.""" self.driver.client.search().hits[0].size = units.Mi result = self.driver.manage_existing_get_size(self.vol, {'source-name': 'test'}) self.assertEqual(1, result) def test_get_is_dedup(self): """Test _get_is_dedup.""" result = self.driver._get_is_dedup(self.vol.volume_type) self.assertTrue(result) def test_get_is_dedup_false(self): """Test _get_is_dedup_false.""" specs = {'kaminario:thin_prov_type': 'nodedup'} self.vol.volume_type.extra_specs = specs result = self.driver._get_is_dedup(self.vol.volume_type) self.assertFalse(result) def test_get_replica_status(self): """Test _get_replica_status.""" result = self.driver._get_replica_status(self.vol) self.assertTrue(result) def test_create_volume_replica(self): """Test _create_volume_replica.""" vg = FakeSaveObject() rep = Replication() self.driver.replica = rep session_name = self.driver.get_session_name('1234567890987654321') self.assertEqual('ssn-1234567890987654321', session_name) rsession_name = self.driver.get_rep_name(session_name) self.assertEqual('rssn-1234567890987654321', rsession_name) src_ssn = self.driver.client.new("replication/sessions").save() self.assertEqual('in_sync', src_ssn.state) result = self.driver._create_volume_replica(self.vol, vg, vg, rep.rpo) self.assertIsNone(result) def test_create_volume_replica_exp(self): """Test _create_volume_replica_exp.""" vg = FakeSaveObject() rep = Replication() self.driver.replica = rep self.driver.client = FakeKrestException() self.assertRaises(kaminario_common.KaminarioCinderDriverException, self.driver._create_volume_replica, self.vol, vg, vg, rep.rpo) def test_delete_by_ref(self): """Test _delete_by_ref.""" result = self.driver._delete_by_ref(self.driver.client, 'volume', 'name', 'message') self.assertIsNone(result) def test_failover_volume(self): """Test _failover_volume.""" self.driver.target = FakeKrest() session_name = self.driver.get_session_name('1234567890987654321') self.assertEqual('ssn-1234567890987654321', session_name) rsession_name = self.driver.get_rep_name(session_name) self.assertEqual('rssn-1234567890987654321', rsession_name) result = self.driver._failover_volume(self.vol) self.assertIsNone(result) @mock.patch.object(kaminario_common.KaminarioCinderDriver, '_check_for_status') @mock.patch.object(objects.service.Service, 'get_by_args') def test_failover_host(self, get_by_args, check_stauts): """Test failover_host.""" mock_args = mock.Mock() mock_args.active_backend_id = '10.0.0.1' self.vol.replication_status = 'failed-over' self.driver.configuration.san_ip = '10.0.0.1' get_by_args.side_effect = [mock_args, mock_args] self.driver.host = 'host' volumes = [self.vol, self.vol] self.driver.replica = Replication() self.driver.target = FakeKrest() self.driver.target.search().total = 1 self.driver.client.search().total = 1 backend_ip, res_volumes, __ = self.driver.failover_host( None, volumes, []) self.assertEqual('10.0.0.1', backend_ip) status = res_volumes[0]['updates']['replication_status'] self.assertEqual(fields.ReplicationStatus.FAILED_OVER, status) # different backend ip self.driver.configuration.san_ip = '10.0.0.2' self.driver.client.search().hits[0].state = 'in_sync' backend_ip, res_volumes, __ = self.driver.failover_host( None, volumes, []) self.assertEqual('10.0.0.2', backend_ip) status = res_volumes[0]['updates']['replication_status'] self.assertEqual(fields.ReplicationStatus.DISABLED, status) def test_delete_volume_replica(self): """Test _delete_volume_replica.""" self.driver.replica = Replication() self.driver.target = FakeKrest() session_name = self.driver.get_session_name('1234567890987654321') self.assertEqual('ssn-1234567890987654321', session_name) rsession_name = self.driver.get_rep_name(session_name) self.assertEqual('rssn-1234567890987654321', rsession_name) res = self.driver._delete_by_ref(self.driver.client, 'volumes', 'test', 'test') self.assertIsNone(res) result = self.driver._delete_volume_replica(self.vol, 'test', 'test') self.assertIsNone(result) src_ssn = self.driver.client.search("replication/sessions").hits[0] self.assertEqual('idle', src_ssn.state) def test_delete_volume_replica_exp(self): """Test _delete_volume_replica_exp.""" self.driver.replica = Replication() self.driver.target = FakeKrestException() self.driver._check_for_status = mock.Mock() self.assertRaises(kaminario_common.KaminarioCinderDriverException, self.driver._delete_volume_replica, self.vol, 'test', 'test') def test_get_is_replica(self): """Test get_is_replica.""" result = self.driver._get_is_replica(self.vol.volume_type) self.assertFalse(result) def test_get_is_replica_true(self): """Test get_is_replica_true.""" self.driver.replica = Replication() self.vol.volume_type.extra_specs = {'kaminario:replication': 'enabled'} result = self.driver._get_is_replica(self.vol.volume_type) self.assertTrue(result) def test_after_volume_copy(self): """Test after_volume_copy.""" result = self.driver.after_volume_copy(None, self.vol, self.vol.volume_type) self.assertIsNone(result) def test_retype(self): """Test retype.""" replica_status = self.driver._get_replica_status('test') self.assertTrue(replica_status) replica = self.driver._get_is_replica(self.vol.volume_type) self.assertFalse(replica) self.driver.replica = Replication() result = self.driver._add_replication(self.vol) self.assertIsNone(result) self.driver.target = FakeKrest() self.driver._check_for_status = mock.Mock() result = self.driver._delete_replication(self.vol) self.assertIsNone(result) self.driver._delete_volume_replica = mock.Mock() result = self.driver.retype(None, self.vol, self.vol.volume_type, None, None) self.assertTrue(result) new_vol_type = fake_volume.fake_volume_type_obj(self.context) new_vol_type.extra_specs = {'kaminario:thin_prov_type': 'nodedup'} result2 = self.driver.retype(None, self.vol, new_vol_type, None, None) self.assertFalse(result2) def test_add_replication(self): """"Test _add_replication.""" self.driver.replica = Replication() result = self.driver._add_replication(self.vol) self.assertIsNone(result) def test_delete_replication(self): """Test _delete_replication.""" self.driver.replica = Replication() self.driver.target = FakeKrest() self.driver._check_for_status = mock.Mock() result = self.driver._delete_replication(self.vol) self.assertIsNone(result) def test_create_failover_volume_replica(self): """Test _create_failover_volume_replica.""" self.driver.replica = Replication() self.driver.target = FakeKrest() self.driver.configuration.san_ip = '10.0.0.1' result = self.driver._create_failover_volume_replica(self.vol, 'test', 'test') self.assertIsNone(result) def test_create_volume_replica_user_snap(self): """Test create_volume_replica_user_snap.""" result = self.driver._create_volume_replica_user_snap(FakeKrest(), 'sess') self.assertEqual(548, result) def test_is_user_snap_sync_finished(self): """Test _is_user_snap_sync_finished.""" sess_mock = mock.Mock() sess_mock.refresh = mock.Mock() sess_mock.generation_number = 548 sess_mock.current_snapshot_id = None sess_mock.current_snapshot_progress = 100 sess_mock.current_snapshot_id = None self.driver.snap_updates = [{'tgt_ssn': sess_mock, 'gno': 548, 'stime': time.time()}] result = self.driver._is_user_snap_sync_finished() self.assertIsNone(result) def test_delete_failover_volume_replica(self): """Test _delete_failover_volume_replica.""" self.driver.target = FakeKrest() result = self.driver._delete_failover_volume_replica(self.vol, 'test', 'test') self.assertIsNone(result) def test_get_initiator_host_name(self): result = self.driver.get_initiator_host_name(CONNECTOR) self.assertEqual(CONNECTOR['host'], result) @ddt.data(True, False) def test_get_initiator_host_name_unique(self, in_shared): cfg = self._set_unique_fqdn_override(False, in_shared) self.mock_object(self.driver, 'configuration', cfg) result = self.driver.get_initiator_host_name(CONNECTOR) expected = re.sub('[:.]', '_', CONNECTOR['initiator'][::-1][:32]) self.assertEqual(expected, result) @ddt.ddt class TestKaminarioISCSI(TestKaminarioCommon): def test_get_target_info(self): """Test get_target_info.""" iscsi_portals, target_iqns = self.driver.get_target_info(self.vol) self.assertEqual(['10.0.0.1:3260', '10.0.0.2:3260'], iscsi_portals) self.assertEqual(['xyztlnxyz', 'xyztlnxyz'], target_iqns) @ddt.data(True, False) def test_initialize_connection(self, multipath): """Test initialize_connection.""" connector = CONNECTOR.copy() connector['multipath'] = multipath self.driver.configuration.disable_discovery = False conn_info = self.driver.initialize_connection(self.vol, CONNECTOR) expected = { 'data': { 'target_discovered': True, 'target_iqn': 'xyztlnxyz', 'target_lun': 548, 'target_portal': '10.0.0.1:3260', }, 'driver_volume_type': 'iscsi', } self.assertEqual(expected, conn_info) def test_initialize_connection_multipath(self): """Test initialize_connection with multipath.""" connector = CONNECTOR.copy() connector['multipath'] = True self.driver.configuration.disable_discovery = True conn_info = self.driver.initialize_connection(self.vol, connector) expected = { 'data': { 'target_discovered': True, 'target_iqn': 'xyztlnxyz', 'target_iqns': ['xyztlnxyz', 'xyztlnxyz'], 'target_lun': 548, 'target_luns': [548, 548], 'target_portal': '10.0.0.1:3260', 'target_portals': ['10.0.0.1:3260', '10.0.0.2:3260'], }, 'driver_volume_type': 'iscsi', } self.assertEqual(expected, conn_info) def test_terminate_connection(self): """Test terminate_connection.""" result = self.driver.terminate_connection(self.vol, CONNECTOR) self.assertIsNone(result) def test_terminate_connection_without_connector(self): """Test terminate_connection_without_connector.""" result = self.driver.terminate_connection(self.vol, None) self.assertIsNone(result) @ddt.ddt class TestKaminarioFC(TestKaminarioCommon): def _setup_driver(self): self.driver = (kaminario_fc. KaminarioFCDriver(configuration=self.conf)) device = mock.Mock(return_value={'device': {'path': '/dev'}}) self.driver._connect_device = device self.driver.client = FakeKrest() self.driver._lookup_service = mock.Mock() def test_initialize_connection(self): """Test initialize_connection.""" conn_info = self.driver.initialize_connection(self.vol, CONNECTOR) self.assertIn('data', conn_info) self.assertIn('target_wwn', conn_info['data']) def test_get_target_info(self): """Test get_target_info.""" target_wwpn = self.driver.get_target_info(self.vol) self.assertEqual(['50024f4053300301', '50024f4053300302'], target_wwpn) def test_terminate_connection(self): """Test terminate_connection.""" result = self.driver.terminate_connection(self.vol, CONNECTOR) self.assertIn('data', result) def test_terminate_connection_without_connector(self): """Test terminate_connection_without_connector.""" result = self.driver.terminate_connection(self.vol, None) self.assertIn('data', result) @ddt.data(True, False) def test_get_initiator_host_name_unique(self, in_shared): cfg = self._set_unique_fqdn_override(False, in_shared) self.mock_object(self.driver, 'configuration', cfg) connector = CONNECTOR.copy() del connector['initiator'] result = self.driver.get_initiator_host_name(connector) expected = re.sub('[:.]', '_', connector['wwnns'][0][::-1][:32]) self.assertEqual(expected, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_kioxia.py0000664000175000017500000011637500000000000024353 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import unittest from unittest import mock from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.kioxia import entities from cinder.volume.drivers.kioxia import kumoscale as kioxia from cinder.volume.drivers.kioxia import rest_client VOL_BACKEND_NAME = 'kioxia_kumoscale_1' VOL_NAME = 'volume-c2fd04e3-320e-44eb-b-2' VOL_UUID = 'c20aba21-6ef6-446b-b374-45733b4883ba' VOL_SIZE = 10 VOL_PROTOCOL = 'NVMeoF' SNAP_UUID = 'c9ef9d49-0d26-44cb-b609-0b8bd2d3db77' CONN_UUID = '34206309-3733-4cc6-a7d5-9d4dbbe377da' CONN_HOST_NAME = 'devstack' CONN_NQN = 'nqn.2014-08.org.nvmexpress:uuid:' \ 'beaae2de-3a97-4be1-a739-6ac4bc5bf138' success_prov_response = entities.ProvisionerResponse(None, None, "Success", "Success") fail_prov_response = entities.ProvisionerResponse(None, None, "Failure", "Failure") prov_backend1 = entities.Backend(None, None, None, None, 'dummy-pid-1') prov_backend2 = entities.Backend(None, None, None, None, 'dummy-pid-2') prov_location1 = entities.Location(VOL_UUID, prov_backend1) prov_location2 = entities.Location(VOL_UUID, prov_backend2) prov_volume = entities.VolumeProv(VOL_UUID, None, None, None, None, None, None, None, None, None, None, True, None, [prov_location1, prov_location2]) prov_volumes_response = entities.ProvisionerResponse([prov_volume]) no_entities_prov_response = entities.ProvisionerResponse([], None, "Success") class KioxiaVolumeTestCase(test.TestCase): @mock.patch.object(rest_client.KioxiaProvisioner, 'get_info') @mock.patch.object(kioxia.KumoScaleBaseVolumeDriver, '_get_kumoscale') def setUp(self, mock_kumoscale, mock_get_info): mock_get_info.return_value = success_prov_response mock_kumoscale.return_value = \ rest_client.KioxiaProvisioner(['1.2.3.4'], 'cert', 'token') super(KioxiaVolumeTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.volume_backend_name = VOL_BACKEND_NAME self.cfg.url = 'dummyURL' self.cfg.token = 'dummy.dummy.Rf-dummy-dummy-lE' self.cfg.cafile = 'dummy' self.cfg.num_replicas = 1 self.cfg.block_size = 512 self.cfg.max_iops_per_gb = 1000 self.cfg.desired_iops_per_gb = 1000 self.cfg.max_bw_per_gb = 1000 self.cfg.desired_bw_per_gb = 1000 self.cfg.same_rack_allowed = False self.cfg.max_replica_down_time = 5 self.cfg.span_allowed = True self.cfg.vol_reserved_space_percentage = 20 self.cfg.provisioning_type = 'THIN' self.driver = kioxia.KumoScaleBaseVolumeDriver(configuration=self.cfg) self.driver.configuration.get = lambda *args, **kwargs: {} self.driver.num_replicas = 2 self.expected_stats = { 'volume_backend_name': VOL_BACKEND_NAME, 'vendor_name': 'KIOXIA', 'driver_version': self.driver.VERSION, 'storage_protocol': 'NVMeOF', 'consistencygroup_support': False, 'thin_provisioning_support': True, 'multiattach': False, 'total_capacity_gb': 1000, 'free_capacity_gb': 600 } @mock.patch.object(rest_client.KioxiaProvisioner, 'get_info') def test_get_kumoscale(self, mock_get_info): mock_get_info.return_value = success_prov_response result = self.driver._get_kumoscale('https://1.2.3.4:8090', 'token', 'cert') self.assertEqual(result.mgmt_ips, ['1.2.3.4']) self.assertEqual(result.port, '8090') self.assertEqual(result.token, 'token') @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') def test_volume_create_success(self, mock_create_volume): testvol = _stub_volume() mock_create_volume.return_value = success_prov_response result = self.driver.create_volume(testvol) args, kwargs = mock_create_volume.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testvol['name'][:27]) self.assertEqual(mock_call.capacity, testvol['size']) self.assertEqual(mock_call.uuid, testvol['id']) self.assertEqual(mock_call.protocol, VOL_PROTOCOL) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') def test_volume_create_failure(self, mock_create_volume): testvol = _stub_volume() mock_create_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_volume') def test_volume_create_exception(self, mock_create_volume): testvol = _stub_volume() mock_create_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') def test_delete_volume_success(self, mock_delete_volume): testvol = _stub_volume() mock_delete_volume.return_value = success_prov_response result = self.driver.delete_volume(testvol) mock_delete_volume.assert_any_call(testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') def test_delete_volume_failure(self, mock_delete_volume): testvol = _stub_volume() mock_delete_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_volume') def test_delete_volume_exception(self, mock_delete_volume): testvol = _stub_volume() mock_delete_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target1 = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target1]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) result = self.driver.initialize_connection(testvol, testconn) mock_host_probe.assert_any_call(testconn['nqn'], testconn['uuid'], testconn['host'], 'Agent', 'cinder-driver-0.1', 30) mock_publish.assert_any_call(testconn['uuid'], testvol['id']) mock_get_volumes_by_uuid.assert_any_call(testvol['id']) mock_get_targets.assert_any_call(testconn['uuid'], testvol['id']) mock_get_backend_by_id.assert_any_call('dummy-pid-1') expected_replica = {'portals': [('1.2.3.4', '4420', 'TCP')], 'target_nqn': 'target.nqn', 'vol_uuid': testvol['id']} expected_data = { 'vol_uuid': testvol['id'], 'alias': testvol['name'], 'writable': True, 'volume_replicas': [expected_replica], 'replica_count': 2 } expected_result = { 'driver_volume_type': 'nvmeof', 'data': expected_data } self.assertDictEqual(result, expected_result) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_host_probe_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = fail_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_host_probe_exception( self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.side_effect = Exception() mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_publish_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = fail_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_publish_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.side_effect = Exception() mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_volumes_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = fail_prov_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_no_volumes(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = no_entities_prov_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_volumes_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.side_effect = Exception() mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_targets_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = fail_prov_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_no_targets(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = no_entities_prov_response mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_targets_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_portal = PortalEntity('1.2.3.4', 4420, 'TCP') backend = BackendEntity([prov_portal]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.side_effect = Exception() mock_get_backend_by_id.return_value = \ entities.ProvisionerResponse([backend]) self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_backend_failure(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_no_backend(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.return_value = no_entities_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_backend_by_id') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_targets') @mock.patch.object(rest_client.KioxiaProvisioner, 'get_volumes_by_uuid') @mock.patch.object(rest_client.KioxiaProvisioner, 'publish') @mock.patch.object(rest_client.KioxiaProvisioner, 'host_probe') def test_initialize_connection_backend_exception(self, mock_host_probe, mock_publish, mock_get_volumes_by_uuid, mock_get_targets, mock_get_backend_by_id): testvol = _stub_volume() testconn = _stub_connector() prov_target = TargetEntity('target.nqn', prov_backend1) prov_targets_response = entities.ProvisionerResponse([prov_target]) mock_publish.return_value = success_prov_response mock_host_probe.return_value = success_prov_response mock_get_volumes_by_uuid.return_value = prov_volumes_response mock_get_targets.return_value = prov_targets_response mock_get_backend_by_id.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') def test_terminate_connection(self, mock_unpublish): testvol = _stub_volume() testconn = _stub_connector() mock_unpublish.return_value = success_prov_response result = self.driver.terminate_connection(testvol, testconn) mock_unpublish.assert_any_call(testconn['uuid'], testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') def test_terminate_connection_unpublish_failure(self, mock_unpublish): testvol = _stub_volume() testconn = _stub_connector() mock_unpublish.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'unpublish') def test_terminate_connection_unpublish_exception(self, mock_unpublish): testvol = _stub_volume() testconn = _stub_connector() mock_unpublish.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, testvol, testconn) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats(self, mock_get_tenants): tenant = TenantEntity(1000, 400) mock_get_tenants.return_value = entities.ProvisionerResponse([tenant]) result = self.driver.get_volume_stats(True) mock_get_tenants.assert_any_call() self.assertDictEqual(result, self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats_tenants_failure(self, mock_get_tenants): mock_get_tenants.return_value = fail_prov_response self.expected_stats['total_capacity_gb'] = 'unknown' self.expected_stats['free_capacity_gb'] = 'unknown' self.assertDictEqual( self.driver.get_volume_stats(True), self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats_no_tenants(self, mock_get_tenants): mock_get_tenants.return_value = no_entities_prov_response self.expected_stats['total_capacity_gb'] = 'unknown' self.expected_stats['free_capacity_gb'] = 'unknown' self.assertDictEqual( self.driver.get_volume_stats(True), self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'get_tenants') def test_get_volume_stats_tenants_exception(self, mock_get_tenants): mock_get_tenants.side_effect = Exception() self.expected_stats['total_capacity_gb'] = 'unknown' self.expected_stats['free_capacity_gb'] = 'unknown' self.assertDictEqual( self.driver.get_volume_stats(True), self.expected_stats) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') def test_create_snapshot_success(self, mock_create_snapshot): testsnap = _stub_snapshot() mock_create_snapshot.return_value = success_prov_response result = self.driver.create_snapshot(testsnap) args, kwargs = mock_create_snapshot.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testsnap['name']) self.assertEqual(mock_call.volumeID, testsnap['volume_id']) self.assertEqual(mock_call.snapshotID, testsnap['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') def test_create_snapshot_failure(self, mock_create_snapshot): testsnap = _stub_snapshot() mock_create_snapshot.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot') def test_create_snapshot_exception(self, mock_create_snapshot): testsnap = _stub_snapshot() mock_create_snapshot.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') def test_delete_snapshot_success(self, mock_delete_snapshot): testsnap = _stub_snapshot() mock_delete_snapshot.return_value = success_prov_response result = self.driver.delete_snapshot(testsnap) mock_delete_snapshot.assert_any_call(testsnap['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') def test_delete_snapshot_failure(self, mock_delete_snapshot): testsnap = _stub_snapshot() mock_delete_snapshot.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'delete_snapshot') def test_delete_snapshot_exception(self, mock_delete_snapshot): testsnap = _stub_snapshot() mock_delete_snapshot.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') def test_create_volume_from_snapshot_success(self, mock_create_snapshot_volume): testsnap = _stub_snapshot() testvol = _stub_volume() mock_create_snapshot_volume.return_value = success_prov_response result = self.driver.create_volume_from_snapshot(testvol, testsnap) args, kwargs = mock_create_snapshot_volume.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testvol['name']) self.assertEqual(mock_call.volumeID, testsnap['volume_id']) self.assertEqual(mock_call.snapshotID, testsnap['id']) self.assertEqual(mock_call.protocol, VOL_PROTOCOL) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') def test_create_volume_from_snapshot_failure(self, mock_create_snapshot_volume): testsnap = _stub_snapshot() testvol = _stub_volume() mock_create_snapshot_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, testvol, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'create_snapshot_volume') def test_create_volume_from_snapshot_exception( self, mock_create_snapshot_volume): testsnap = _stub_snapshot() testvol = _stub_volume() mock_create_snapshot_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, testvol, testsnap) @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') def test_extend_volume_success(self, mock_expand_volume): testvol = _stub_volume() mock_expand_volume.return_value = success_prov_response new_size = VOL_SIZE + 2 result = self.driver.extend_volume(testvol, new_size) mock_expand_volume.assert_any_call(new_size, testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') def test_extend_volume_failure(self, mock_expand_volume): testvol = _stub_volume() mock_expand_volume.return_value = fail_prov_response new_size = VOL_SIZE + 2 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, testvol, new_size) @mock.patch.object(rest_client.KioxiaProvisioner, 'expand_volume') def test_extend_volume_exception(self, mock_expand_volume): testvol = _stub_volume() mock_expand_volume.side_effect = Exception() new_size = VOL_SIZE + 2 self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, testvol, new_size) @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') def test_create_cloned_volume_success(self, mock_clone_volume): testvol = _stub_volume() mock_clone_volume.return_value = success_prov_response result = self.driver.create_cloned_volume(testvol, testvol) args, kwargs = mock_clone_volume.call_args mock_call = args[0] self.assertEqual(mock_call.alias, testvol['name']) self.assertEqual(mock_call.capacity, testvol['size']) self.assertEqual(mock_call.volumeId, testvol['id']) self.assertEqual(mock_call.sourceVolumeId, testvol['id']) self.assertIsNone(result) @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') def test_create_cloned_volume_failure(self, mock_clone_volume): testvol = _stub_volume() mock_clone_volume.return_value = fail_prov_response self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, testvol, testvol) @mock.patch.object(rest_client.KioxiaProvisioner, 'clone_volume') def test_create_cloned_volume_exception(self, mock_clone_volume): testvol = _stub_volume() mock_clone_volume.side_effect = Exception() self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, testvol, testvol) def test_convert_host_name(self): name = 'ks-node3-000c2960a794-000c2960a797' result = self.driver._convert_host_name(name) expected = hashlib.md5(name.encode('utf-8'), usedforsecurity=False).hexdigest() self.assertEqual(result, expected) def test_create_export(self): result = self.driver.create_export(None, None, None) self.assertIsNone(result) def test_ensure_export(self): result = self.driver.ensure_export(None, None) self.assertIsNone(result) def test_remove_export(self): result = self.driver.remove_export(None, None) self.assertIsNone(result) def test_check_for_setup_error(self): result = self.driver.check_for_setup_error() self.assertIsNone(result) def _stub_volume(*args, **kwargs): volume = {'id': kwargs.get('id', VOL_UUID), 'name': kwargs.get('name', VOL_NAME), 'project_id': "test-project", 'display_name': kwargs.get('display_name', VOL_NAME), 'size': kwargs.get('size', VOL_SIZE), 'provider_location': kwargs.get('provider_location', None), 'volume_type_id': kwargs.get('volume_type_id', None)} return volume def _stub_connector(*args, **kwargs): connector = {'uuid': kwargs.get('uuid', CONN_UUID), 'nqn': kwargs.get('nqn', CONN_NQN), 'host': kwargs.get('host', CONN_HOST_NAME)} return connector def _stub_snapshot(*args, **kwargs): volume = {'id': kwargs.get('id', SNAP_UUID), 'name': kwargs.get('name', 'snap2000'), 'volume_id': kwargs.get('id', VOL_UUID)} return volume class TenantEntity: def __init__(self, capacity, consumed): self.tenantId = '0' self.capacity = capacity self.consumedCapacity = consumed class TargetEntity: def __init__(self, name, backend): self.targetName = name self.backend = backend class BackendEntity: def __init__(self, portals): self.portals = portals class PortalEntity: def __init__(self, ip, port, transport): self.ip = ip self.port = port self.transport = transport if __name__ == '__main__': unittest.main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_linstordrv.py0000664000175000017500000014063200000000000025266 0ustar00zuulzuul00000000000000# Copyright (c) 2018-2019 LINBIT HA Solutions GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from oslo_utils import timeutils from cinder import exception as cinder_exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers import linstordrv as drv CINDER_UNKNOWN = 'unknown' DISKLESS = 'DISKLESS' LVM = 'LVM' LVM_THIN = 'LVM_THIN' ZFS = 'ZFS' ZFS_THIN = 'ZFS_THIN' DRIVER = 'cinder.volume.drivers.linstordrv.' RESOURCE = { 'name': 'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'volume': { 'device_path': '/dev/drbd1000' } } RESOURCE_LIST = [{ 'layer_object': { 'children': [{ 'storage': { 'storage_volumes': [{ 'allocated_size_kib': 1048576, 'device_path': '/dev/vol/CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131_00000', 'disk_state': '[]', 'usable_size_kib': 1048576, 'volume_number': 0}]}, 'type': 'STORAGE'}], 'drbd': { 'al_size': 32, 'al_stripes': 1, 'drbd_resource_definition': { 'al_stripe_size_kib': 32, 'al_stripes': 1, 'down': False, 'peer_slots': 7, 'port': 7005, 'secret': 'poQZ0Ad/Bq8DT9fA7ydB', 'transport_type': 'IP'}, 'drbd_volumes': [{ 'allocated_size_kib': 1044740, 'backing_device': '/dev/vol/CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131_00000', 'device_path': '/dev/drbd1005', 'drbd_volume_definition': { 'minor_number': 1005, 'volume_number': 0}, 'usable_size_kib': 1044480}], 'node_id': 0, 'peer_slots': 7}, 'type': 'DRBD'}, 'name': 'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'node_name': 'node-2', 'state': {'in_use': False}, 'uuid': 'a4ab4670-c5fc-4590-a3a2-39c4685c8c32', 'volumes': [{ 'allocated_size_kib': 45403, 'device_path': '/dev/drbd1005', 'layer_data_list': [{ 'data': { 'allocated_size_kib': 1044740, 'backing_device': '/dev/vol/CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131_00000', 'device_path': '/dev/drbd1005', 'drbd_volume_definition': { 'minor_number': 1005, 'volume_number': 0}, 'usable_size_kib': 1044480}, 'type': 'DRBD'}, { 'data': { 'allocated_size_kib': 1048576, 'device_path': '/dev/vol/CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131_00000', 'disk_state': '[]', 'usable_size_kib': 1048576, 'volume_number': 0}, 'type': 'STORAGE'} ], 'props': { 'RestoreFromResource': 'CV_123a2fdc-365f-472e-bb8e-484788712abc', 'RestoreFromSnapshot': 'SN_68edb708-48de-4da1-9953-b9de9da9f1b8' }, 'provider_kind': 'LVM_THIN', 'state': {'disk_state': 'UpToDate'}, 'storage_pool_name': 'DfltStorPool', 'uuid': 'e270ba0c-b284-4f21-85cc-602f132a2251', 'volume_number': 0}]}, { 'flags': ['DISKLESS'], 'layer_object': { 'children': [{ 'storage': { 'storage_volumes': [{ 'allocated_size_kib': 0, 'usable_size_kib': 1044480, 'volume_number': 0}]}, 'type': 'STORAGE'}], 'drbd': { 'al_size': 32, 'al_stripes': 1, 'drbd_resource_definition': { 'al_stripe_size_kib': 32, 'al_stripes': 1, 'down': False, 'peer_slots': 7, 'port': 7005, 'secret': 'poQZ0Ad/Bq8DT9fA7ydB', 'transport_type': 'IP'}, 'drbd_volumes': [{ 'allocated_size_kib': 1044740, 'device_path': '/dev/drbd1005', 'drbd_volume_definition': { 'minor_number': 1005, 'volume_number': 0}, 'usable_size_kib': 1044480}], 'flags': ['DISKLESS'], 'node_id': 1, 'peer_slots': 7}, 'type': 'DRBD'}, 'name': 'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'node_name': 'node-1', 'state': {'in_use': False}, 'uuid': '11e853df-6f66-4cd9-9fbc-f3f7cc98d5cf', 'volumes': [{ 'allocated_size_kib': 45403, 'device_path': '/dev/drbd1005', 'layer_data_list': [ { 'data': { 'allocated_size_kib': 1044740, 'device_path': '/dev/drbd1005', 'drbd_volume_definition': { 'minor_number': 1005, 'volume_number': 0}, 'usable_size_kib': 1044480}, 'type': 'DRBD' }, { 'data': { 'allocated_size_kib': 0, 'usable_size_kib': 1044480, 'volume_number': 0 }, 'type': 'STORAGE' } ], 'provider_kind': 'DISKLESS', 'state': {'disk_state': 'Diskless'}, 'storage_pool_name': 'DfltStorPool', 'uuid': '27b4aeec-2b42-41c9-b186-86afc8778046', 'volume_number': 0 }]}] RESOURCE_LIST_RESP = ['node-1', 'node-2'] SNAPSHOT_LIST_RESP = ['node-1'] DISKLESS_LIST_RESP = ['node-1'] RESOURCE_DFN_LIST = [{ 'layer_data': [ { 'data': { 'al_stripe_size_kib': 32, 'al_stripes': 1, 'down': False, 'peer_slots': 7, 'port': 7005, 'secret': 'poQZ0Ad/Bq8DT9fA7ydB', 'transport_type': 'IP' }, 'type': 'DRBD' }, { 'type': 'STORAGE' } ], 'name': 'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'props': {'DrbdPrimarySetOn': 'node-1'}, 'uuid': '9a684294-6db4-40c8-bfeb-e5351200b9db' }] RESOURCE_DFN_LIST_RESP = [{ 'rd_name': u'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'rd_uuid': u'9a684294-6db4-40c8-bfeb-e5351200b9db', }] NODES_LIST = [ { 'connection_status': 'ONLINE', 'name': 'node-1', 'net_interfaces': [{ 'address': '192.168.8.63', 'name': 'default', 'satellite_encryption_type': 'PLAIN', 'satellite_port': 3366, 'uuid': '9c5b727f-0c62-4040-9a33-96a4fd4aaac3'}], 'props': {'CurStltConnName': 'default'}, 'type': 'COMBINED', 'uuid': '69b88ffb-50d9-4576-9843-d7bf4724d043' }, { 'connection_status': 'ONLINE', 'name': 'node-2', 'net_interfaces': [{ 'address': '192.168.8.102', 'name': 'default', 'satellite_encryption_type': 'PLAIN', 'satellite_port': 3366, 'uuid': '3f911fc9-4f9b-4155-b9da-047d5242484c'}], 'props': {'CurStltConnName': 'default'}, 'type': 'SATELLITE', 'uuid': '26bde754-0f05-499c-a63c-9f4e5f30556e' } ] NODES_RESP = [ {'node_address': '192.168.8.63', 'node_name': 'node-1'}, {'node_address': '192.168.8.102', 'node_name': 'node-2'} ] STORAGE_POOL_DEF = [{'storage_pool_name': 'DfltStorPool'}] STORAGE_POOL_DEF_RESP = ['DfltStorPool'] STORAGE_POOL_LIST = [ { 'free_capacity': 104815656, 'free_space_mgr_name': 'node-2:DfltStorPool', 'node_name': 'node-2', 'props': { 'StorDriver/LvmVg': 'vol', 'StorDriver/ThinPool': 'thin_pool' }, 'provider_kind': 'LVM_THIN', 'static_traits': { 'Provisioning': 'Thin', 'SupportsSnapshots': 'true' }, 'storage_pool_name': 'DfltStorPool', 'total_capacity': 104857600, 'uuid': '004faf29-be1a-4d74-9470-038bcee2c611' }, { 'free_capacity': 9223372036854775807, 'free_space_mgr_name': 'node-1:DfltStorPool', 'node_name': 'node-1', 'provider_kind': 'DISKLESS', 'static_traits': {'SupportsSnapshots': 'false'}, 'storage_pool_name': 'DfltStorPool', 'total_capacity': 9223372036854775807, 'uuid': '897da09e-1316-45c0-a308-c07008af42df' } ] STORAGE_POOL_LIST_RESP = [ { 'driver_name': 'LVM_THIN', 'node_name': 'node-2', 'sp_uuid': '004faf29-be1a-4d74-9470-038bcee2c611', 'sp_cap': 100.0, 'sp_free': 100, 'sp_name': u'DfltStorPool' }, { 'driver_name': 'DISKLESS', 'node_name': 'node-1', 'sp_uuid': '897da09e-1316-45c0-a308-c07008af42df', 'sp_allocated': 0.0, 'sp_cap': -1.0, 'sp_free': -1.0, 'sp_name': 'DfltStorPool' } ] VOLUME_STATS_RESP = { 'driver_version': '0.0.7', 'pools': [{ 'QoS_support': False, 'backend_state': 'up', 'filter_function': None, 'free_capacity_gb': 100, 'goodness_function': None, 'location_info': 'linstor://localhost', 'max_over_subscription_ratio': 0, 'multiattach': False, 'pool_name': 'lin-test-driver', 'provisioned_capacity_gb': 0.0, 'reserved_percentage': 0, 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'total_capacity_gb': 100.0, 'total_volumes': 1, }], 'vendor_name': 'LINBIT', 'volume_backend_name': 'lin-test-driver' } CINDER_VOLUME = { 'id': '0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'name': 'test-lin-vol', 'size': 1, 'volume_type_id': 'linstor', 'created_at': timeutils.utcnow() } SNAPSHOT = { 'id': '0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'volume_id': '0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'volume_size': 1 } VOLUME_NAMES = { 'linstor': 'CV_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'cinder': '0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', 'snap': 'SN_0348a7d3-3bb9-452d-9f40-2cf5ebfe9131', } class LinstorAPIFakeDriver(object): def fake_api_ping(self): return 1234 def fake_api_resource_list(self): return RESOURCE_LIST def fake_api_node_list(self): return NODES_LIST def fake_api_storage_pool_dfn_list(self): return STORAGE_POOL_DEF def fake_api_storage_pool_list(self): return STORAGE_POOL_LIST def fake_api_resource_dfn_list(self): return RESOURCE_DFN_LIST def fake_api_snapshot_list(self): return SNAPSHOT_LIST_RESP class LinstorFakeResource(object): def __init__(self): self.volumes = [{'size': 1069547520}] self.id = 0 def delete(self): return True def is_diskless(self, host): if host in DISKLESS_LIST_RESP: return True else: return False class LinstorBaseDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(LinstorBaseDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(LinstorBaseDriverTestCase, self).setUp() if drv is None: return self._mock = mock.Mock() self._fake_driver = LinstorAPIFakeDriver() self.configuration = mock.Mock(conf.Configuration) self.driver = drv.LinstorBaseDriver( configuration=self.configuration) self.driver.VERSION = '0.0.7' self.driver.default_rsc_size = 1 self.driver.default_vg_name = 'vg-1' self.driver.default_downsize_factor = int('4096') self.driver.default_pool = STORAGE_POOL_DEF_RESP[0] self.driver.host_name = 'node-1' self.driver.diskless = True self.driver.default_uri = 'linstor://localhost' self.driver.default_backend_name = 'lin-test-driver' self.driver.configuration.reserved_percentage = 0 self.driver.configuration.max_over_subscription_ratio = 0 self.driver.ap_count = 0 @mock.patch(DRIVER + 'LinstorBaseDriver._ping') def test_ping(self, m_ping): m_ping.return_value = self._fake_driver.fake_api_ping() val = self.driver._ping() expected = 1234 self.assertEqual(expected, val) @mock.patch('uuid.uuid4') def test_clean_uuid(self, m_uuid): m_uuid.return_value = u'bd6472d1-dc3c-4d41-a5f0-f44271c05680' val = self.driver._clean_uuid() expected = u'bd6472d1-dc3c-4d41-a5f0-f44271c05680' self.assertEqual(expected, val) @mock.patch('uuid.uuid4') def test_clean_uuid_with_braces(self, m_uuid): m_uuid.return_value = u'{bd6472d1-dc3c-4d41-a5f0-f44271c05680}' val = self.driver._clean_uuid() expected = u'bd6472d1-dc3c-4d41-a5f0-f44271c05680' m_uuid.assert_called_once() self.assertEqual(expected, val) # Test volume size conversions def test_unit_conversions_to_linstor_1GiB(self): val = self.driver._vol_size_to_linstor(1) expected = 1044480 # 1048575 - 4096 self.assertEqual(expected, val) def test_unit_conversions_to_linstor_2GiB(self): val = self.driver._vol_size_to_linstor(2) expected = 2093056 # 2097152 - 4096 self.assertEqual(expected, val) def test_unit_conversions_to_cinder(self): val = self.driver._vol_size_to_cinder(1048576) expected = 1 self.assertEqual(expected, val) def test_unit_conversions_to_cinder_2GiB(self): val = self.driver._vol_size_to_cinder(2097152) expected = 2 self.assertEqual(expected, val) def test_is_clean_volume_name(self): val = self.driver._is_clean_volume_name(VOLUME_NAMES['cinder'], drv.DM_VN_PREFIX) expected = VOLUME_NAMES['linstor'] self.assertEqual(expected, val) def test_is_clean_volume_name_invalid(self): wrong_uuid = 'bc3015e6-695f-4688-91f2-invaliduuid1' val = self.driver._is_clean_volume_name(wrong_uuid, drv.DM_VN_PREFIX) expected = None self.assertEqual(expected, val) def test_snapshot_name_from_cinder_snapshot(self): val = self.driver._snapshot_name_from_cinder_snapshot( SNAPSHOT) expected = VOLUME_NAMES['snap'] self.assertEqual(expected, val) def test_cinder_volume_name_from_drbd_resource(self): val = self.driver._cinder_volume_name_from_drbd_resource( VOLUME_NAMES['linstor']) expected = VOLUME_NAMES['cinder'] self.assertEqual(expected, val) def test_drbd_resource_name_from_cinder_snapshot(self): val = self.driver._drbd_resource_name_from_cinder_snapshot( SNAPSHOT) expected = VOLUME_NAMES['linstor'] self.assertEqual(expected, val) def test_drbd_resource_name_from_cinder_volume(self): val = self.driver._drbd_resource_name_from_cinder_volume( CINDER_VOLUME) expected = VOLUME_NAMES['linstor'] self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') def test_get_rcs_path(self, m_rsc_list): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() val = self.driver._get_rsc_path(VOLUME_NAMES['linstor']) expected = '/dev/drbd1005' m_rsc_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') def test_get_local_path(self, m_rsc_list): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() val = self.driver._get_local_path(CINDER_VOLUME) expected = '/dev/drbd1005' m_rsc_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') def test_get_spd(self, m_spd_list): m_spd_list.return_value = ( self._fake_driver.fake_api_storage_pool_dfn_list()) val = self.driver._get_spd() expected = STORAGE_POOL_DEF_RESP m_spd_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') def test_get_storage_pool(self, m_sp_list): m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) val = self.driver._get_storage_pool() expected = STORAGE_POOL_LIST_RESP m_sp_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_dfn_list') def test_get_resource_definitions(self, m_rscd_list): m_rscd_list.return_value = ( self._fake_driver.fake_api_resource_dfn_list()) val = self.driver._get_resource_definitions() expected = RESOURCE_DFN_LIST_RESP m_rscd_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') def test_get_snapshot_nodes(self, m_rsc_list): m_rsc_list.return_value = self._fake_driver.fake_api_snapshot_list() val = self.driver._get_snapshot_nodes(VOLUME_NAMES['linstor']) expected = SNAPSHOT_LIST_RESP m_rsc_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') def test_get_diskless_nodes(self, m_rsc_list): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() val = self.driver._get_diskless_nodes(RESOURCE['name']) expected = DISKLESS_LIST_RESP m_rsc_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_node_list') def test_get_linstor_nodes(self, m_node_list): m_node_list.return_value = self._fake_driver.fake_api_node_list() val = self.driver._get_linstor_nodes() expected = RESOURCE_LIST_RESP m_node_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_node_list') def test_get_nodes(self, m_node_list): m_node_list.return_value = self._fake_driver.fake_api_node_list() val = self.driver._get_nodes() expected = NODES_RESP m_node_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_size') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_is_diskless') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') @mock.patch(DRIVER + 'LinstorBaseDriver.get_goodness_function') @mock.patch(DRIVER + 'LinstorBaseDriver.get_filter_function') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_dfn_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') def test_get_volume_stats(self, m_sp_list, m_rscd_list, m_filter, m_goodness, m_rsc_list, m_diskless, m_rsc_size): m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) m_rscd_list.return_value = ( self._fake_driver.fake_api_resource_dfn_list()) m_filter.return_value = None m_goodness.return_value = None m_rsc_list.return_value = RESOURCE_LIST m_diskless.return_value = True m_rsc_size.return_value = 1069547520 val = self.driver._get_volume_stats() expected = VOLUME_STATS_RESP m_sp_list.assert_called_once() m_rscd_list.assert_called_once() self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_create') def test_create_snapshot_fail(self, m_snap_create): m_snap_create.return_value = False self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.create_snapshot, SNAPSHOT) @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_create') def test_create_snapshot_success(self, m_snap_create): m_snap_create.return_value = True # No exception should be raised self.assertIsNone(self.driver.create_snapshot(SNAPSHOT)) @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_delete') def test_delete_snapshot_fail(self, m_snap_delete): m_snap_delete.return_value = False self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.delete_snapshot, SNAPSHOT) @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_delete') def test_delete_snapshot_success(self, m_snap_delete, m_snap_nodes): m_snap_delete.return_value = True m_snap_nodes.return_value = self._fake_driver.fake_api_snapshot_list() # No exception should be raised self.driver.delete_snapshot(SNAPSHOT) @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_delete') def test_delete_snapshot_success_cleanup_rd(self, m_snap_delete, m_snap_nodes, m_rd_delete): m_snap_delete.return_value = True m_snap_nodes.return_value = [] m_rd_delete.return_value = None # No exception should be raised self.driver.delete_snapshot(SNAPSHOT) # Resource Definition Delete should run once m_rd_delete.assert_called_once() @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_resource_restore') @mock.patch(DRIVER + 'LinstorBaseDriver._get_linstor_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_volume_dfn_restore') @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') def test_create_volume_from_snapshot(self, m_rsc_dfn_create, m_api_reply, m_snap_vd_restore, m_lin_nodes, m_snap_rsc_restore, m_rsc_create, m_vol_extend, m_sp_list): m_rsc_dfn_create.return_value = True m_api_reply.return_value = True m_snap_vd_restore.return_value = True m_nodes = [] m_lin_nodes.return_value = m_nodes m_snap_rsc_restore.return_value = True m_rsc_create.return_value = True m_vol_extend.return_value = True m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) # No exception should be raised self.assertIsNone(self.driver.create_volume_from_snapshot( CINDER_VOLUME, SNAPSHOT)) @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_resource_restore') @mock.patch(DRIVER + 'LinstorBaseDriver._get_linstor_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_volume_dfn_restore') @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') def test_create_volume_from_snapshot_fail_restore(self, m_rsc_dfn_create, m_api_reply, m_snap_vd_restore, m_lin_nodes, m_snap_rsc_restore, m_rsc_create, m_vol_extend, m_sp_list): m_rsc_dfn_create.return_value = True m_api_reply.return_value = True m_snap_vd_restore.return_value = True m_nodes = [] m_lin_nodes.return_value = m_nodes m_snap_rsc_restore.return_value = False m_rsc_create.return_value = True m_vol_extend.return_value = True m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) # Failing to restore a snapshot should raise an exception self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, CINDER_VOLUME, SNAPSHOT) @mock.patch(DRIVER + 'LinstorBaseDriver.delete_volume') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_resource_restore') @mock.patch(DRIVER + 'LinstorBaseDriver._get_linstor_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._api_snapshot_volume_dfn_restore') @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') def test_create_volume_from_snapshot_fail_extend(self, m_rsc_dfn_create, m_api_reply, m_snap_vd_restore, m_lin_nodes, m_snap_rsc_restore, m_rsc_create, m_vol_extend, m_sp_list, m_delete_volume): m_rsc_dfn_create.return_value = True m_api_reply.return_value = False m_snap_vd_restore.return_value = True m_nodes = [] m_lin_nodes.return_value = m_nodes m_snap_rsc_restore.return_value = True m_rsc_create.return_value = True m_vol_extend.return_value = True m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) m_delete_volume.return_value = True # Failing to extend the volume after a snapshot restoration should # raise an exception new_volume = CINDER_VOLUME new_volume['size'] = 2 self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, new_volume, SNAPSHOT) @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_storage_pool_create') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_node_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') def test_create_volume_fail_no_linstor_nodes(self, m_sp_list, m_node_list, m_spd_list, m_sp_create, m_rsc_dfn_create, m_vol_dfn_create, m_rsc_create, m_api_reply): m_sp_list.return_value = [] m_node_list.return_value = [] m_spd_list.return_value = ( self._fake_driver.fake_api_storage_pool_dfn_list()) m_sp_create.return_value = True m_rsc_dfn_create.return_value = True m_vol_dfn_create.return_value = True m_rsc_create.return_value = True m_api_reply.return_value = True test_volume = CINDER_VOLUME test_volume['migration_status'] = ('migrating:', str(VOLUME_NAMES['cinder'])) test_volume['display_name'] = 'test_volume' test_volume['host'] = 'node_one' test_volume['size'] = 1 self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.create_volume, test_volume) @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_storage_pool_create') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_node_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') def test_create_volume_fail_rsc_create(self, m_sp_list, m_node_list, m_spd_list, m_sp_create, m_rsc_dfn_create, m_vol_dfn_create, m_rsc_create, m_api_reply): m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) m_node_list.return_value = self._fake_driver.fake_api_node_list() m_spd_list.return_value = ( self._fake_driver.fake_api_storage_pool_dfn_list()) m_sp_create.return_value = True m_rsc_dfn_create.return_value = True m_vol_dfn_create.return_value = True m_rsc_create.return_value = True m_api_reply.return_value = False test_volume = CINDER_VOLUME test_volume['migration_status'] = ('migrating:', str(VOLUME_NAMES['cinder'])) test_volume['display_name'] = 'test_volume' test_volume['host'] = 'node_one' test_volume['size'] = 1 self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.create_volume, test_volume) @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_create') @mock.patch(DRIVER + 'LinstorBaseDriver._api_storage_pool_create') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_dfn_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_node_list') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_storage_pool_list') def test_create_volume(self, m_sp_list, m_node_list, m_spd_list, m_sp_create, m_rsc_dfn_create, m_vol_dfn_create, m_rsc_create, m_api_reply): m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) m_node_list.return_value = self._fake_driver.fake_api_node_list() m_spd_list.return_value = ( self._fake_driver.fake_api_storage_pool_dfn_list()) m_sp_create.return_value = True m_rsc_dfn_create.return_value = True m_vol_dfn_create.return_value = True m_rsc_create.return_value = True m_api_reply.return_value = True test_volume = CINDER_VOLUME test_volume['migration_status'] = ('migrating:', str(VOLUME_NAMES['cinder'])) test_volume['display_name'] = 'test_volume' test_volume['host'] = 'node_one' test_volume['size'] = 1 val = self.driver.create_volume(test_volume) expected = {} self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_auto_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') def test_delete_volume_fail_incomplete(self, m_rsc_list, m_rsc_delete, m_vol_dfn_delete, m_rsc_dfn_delete, m_api_reply, m_rsc_auto_delete): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() m_rsc_delete.return_value = True m_vol_dfn_delete.return_value = True m_rsc_dfn_delete.return_value = True m_api_reply.return_value = False m_rsc_auto_delete.return_value = True test_volume = CINDER_VOLUME test_volume['display_name'] = 'linstor_test' test_volume['host'] = 'node_one' test_volume['size'] = 1 self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.delete_volume, test_volume) @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_auto_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._get_diskless_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') def test_delete_volume_fail_diskless_remove(self, m_rsc_list, m_rsc_delete, m_vol_dfn_delete, m_rsc_dfn_delete, m_api_reply, m_diskless, m_rsc_auto_delete): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() m_rsc_delete.return_value = False m_vol_dfn_delete.return_value = True m_rsc_dfn_delete.return_value = True m_api_reply.return_value = False m_diskless.return_value = ['foo'] m_rsc_auto_delete.return_value = True test_volume = CINDER_VOLUME test_volume['display_name'] = 'linstor_test' test_volume['host'] = 'node_one' test_volume['size'] = 1 # Raises exception for failing to delete a diskless resource self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.delete_volume, test_volume) @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_auto_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._get_diskless_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') def test_delete_volume_fail_diskful_remove(self, m_rsc_list, m_rsc_delete, m_vol_dfn_delete, m_rsc_dfn_delete, m_api_reply, m_diskless, m_snap_nodes, m_rsc_auto_delete): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() m_rsc_delete.return_value = False m_vol_dfn_delete.return_value = True m_rsc_dfn_delete.return_value = True m_api_reply.return_value = False m_diskless.return_value = [] m_snap_nodes.return_value = ['foo'] m_rsc_auto_delete.return_value = True test_volume = CINDER_VOLUME test_volume['display_name'] = 'linstor_test' test_volume['host'] = 'node_one' test_volume['size'] = 1 # Raises exception for failing to delete a diskful resource self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.delete_volume, test_volume) @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_auto_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._get_snapshot_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._get_diskless_nodes') @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') def test_delete_volume_fail_volume_definition(self, m_rsc_list, m_rsc_delete, m_vol_dfn_delete, m_rsc_dfn_delete, m_api_reply, m_diskless, m_snap_nodes, m_rsc_auto_delete): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() m_rsc_delete.return_value = True m_vol_dfn_delete.return_value = False m_rsc_dfn_delete.return_value = True m_api_reply.return_value = False m_diskless.return_value = [] m_snap_nodes.return_value = [] m_rsc_auto_delete.return_value = True test_volume = CINDER_VOLUME test_volume['display_name'] = 'linstor_test' test_volume['host'] = 'node_one' test_volume['size'] = 1 # Raises exception for failing to delete a volume definition self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.delete_volume, test_volume) @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_auto_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_volume_dfn_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._api_rsc_delete') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_resource_list') def test_delete_volume(self, m_rsc_list, m_rsc_delete, m_vol_dfn_delete, m_rsc_dfn_delete, m_api_reply, m_rsc_auto_delete): m_rsc_list.return_value = self._fake_driver.fake_api_resource_list() m_rsc_delete.return_value = True m_vol_dfn_delete.return_value = True m_rsc_dfn_delete.return_value = True m_api_reply.return_value = True m_rsc_auto_delete.return_value = True test_volume = CINDER_VOLUME test_volume['display_name'] = 'linstor_test' test_volume['host'] = 'node_one' test_volume['size'] = 1 val = self.driver.delete_volume(test_volume) expected = True self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') def test_extend_volume_success(self, m_vol_extend, m_api_reply): m_vol_extend.return_value = True m_api_reply.return_value = True # No exception should be raised self.driver.extend_volume(CINDER_VOLUME, 2) @mock.patch(DRIVER + 'LinstorBaseDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorBaseDriver._get_api_volume_extend') def test_extend_volume_fail(self, m_vol_extend, m_api_reply): m_vol_extend.return_value = False m_api_reply.return_value = False self.assertRaises(cinder_exception.VolumeBackendAPIException, self.driver.extend_volume, CINDER_VOLUME, 2) def test_migrate_volume(self): m_ctxt = {} m_volume = {} m_host = '' val = self.driver.migrate_volume(m_ctxt, m_volume, m_host) expected = (False, None) self.assertEqual(expected, val) class LinstorIscsiDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(LinstorIscsiDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(LinstorIscsiDriverTestCase, self).setUp() self._mock = mock.Mock() self._fake_driver = LinstorAPIFakeDriver() self.configuration = mock.Mock(conf.Configuration) self.configuration.iscsi_helper = 'tgtadm' self.driver = drv.LinstorIscsiDriver( configuration=self.configuration, h_name='tgtadm') self.driver.VERSION = '0.0.7' self.driver.default_rsc_size = 1 self.driver.default_vg_name = 'vg-1' self.driver.default_downsize_factor = int('4096') self.driver.default_pool = STORAGE_POOL_DEF_RESP[0] self.driver.host_name = 'node_one' self.driver.diskless = True self.driver.location_info = 'LinstorIscsi:linstor://localhost' self.driver.default_backend_name = 'lin-test-driver' self.driver.configuration.reserved_percentage = int('0') self.driver.configuration.max_over_subscription_ratio = int('0') @mock.patch(DRIVER + 'LinstorIscsiDriver._get_api_resource_list') @mock.patch(DRIVER + 'LinstorIscsiDriver._get_volume_stats') def test_iscsi_get_volume_stats(self, m_vol_stats, m_rsc_list): m_vol_stats.return_value = copy.deepcopy(VOLUME_STATS_RESP) m_rsc_list.return_value = RESOURCE_LIST val = self.driver.get_volume_stats() expected = copy.deepcopy(VOLUME_STATS_RESP) expected["storage_protocol"] = 'iSCSI' expected["pools"][0]['location_info'] = ( 'LinstorIscsiDriver:' + expected["pools"][0]['location_info']) self.assertEqual(expected, val) @mock.patch(DRIVER + 'linstor') def test_iscsi_check_for_setup_error_pass(self, m_linstor): m_linstor.return_value = True # No exception should be raised self.driver.check_for_setup_error() class LinstorDrbdDriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(LinstorDrbdDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(LinstorDrbdDriverTestCase, self).setUp() self._mock = mock.Mock() self._fake_driver = LinstorAPIFakeDriver() self.configuration = mock.Mock(conf.Configuration) self.driver = drv.LinstorDrbdDriver( configuration=self.configuration) self.driver.VERSION = '0.0.7' self.driver.default_rsc_size = 1 self.driver.default_vg_name = 'vg-1' self.driver.default_downsize_factor = int('4096') self.driver.default_pool = STORAGE_POOL_DEF_RESP[0] self.driver.host_name = 'node_one' self.driver.diskless = True self.driver.location_info = 'LinstorDrbd:linstor://localhost' self.driver.default_backend_name = 'lin-test-driver' self.driver.configuration.reserved_percentage = int('0') self.driver.configuration.max_over_subscription_ratio = int('0') @mock.patch(DRIVER + 'LinstorDrbdDriver._get_rsc_path') def test_drbd_return_drbd_config(self, m_rsc_path): m_rsc_path.return_value = '/dev/drbd1005' val = self.driver._return_drbd_config(CINDER_VOLUME) expected = { 'driver_volume_type': 'local', 'data': { "device_path": str(m_rsc_path.return_value) } } self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorDrbdDriver._get_api_storage_pool_list') def test_drbd_node_in_sp(self, m_sp_list): m_sp_list.return_value = ( self._fake_driver.fake_api_storage_pool_list()) val = self.driver._node_in_sp('node-1') self.assertTrue(val) @mock.patch(DRIVER + 'LinstorDrbdDriver._get_volume_stats') def test_drbd_get_volume_stats(self, m_vol_stats): m_vol_stats.return_value = copy.deepcopy(VOLUME_STATS_RESP) val = self.driver.get_volume_stats() expected = copy.deepcopy(VOLUME_STATS_RESP) expected["storage_protocol"] = 'DRBD' expected["pools"][0]['location_info'] = ( 'LinstorDrbdDriver:' + expected["pools"][0]['location_info']) self.assertEqual(expected, val) @mock.patch(DRIVER + 'linstor') def test_drbd_check_for_setup_error_pass(self, m_linstor): m_linstor.return_value = True # No exception should be raised self.driver.check_for_setup_error() @mock.patch(DRIVER + 'LinstorDrbdDriver._get_rsc_path') @mock.patch(DRIVER + 'LinstorDrbdDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorDrbdDriver._api_rsc_create') @mock.patch(DRIVER + 'LinstorDrbdDriver._node_in_sp') def test_drbd_initialize_connection_pass(self, m_node_sp, m_rsc_create, m_check, m_rsc_path): m_node_sp.return_value = True m_rsc_create.return_value = True m_check.return_value = True m_rsc_path.return_value = '/dev/drbd1000' connector = {} connector["host"] = 'wp-u16-cinder-dev-lg' val = self.driver.initialize_connection(CINDER_VOLUME, connector) expected = { 'driver_volume_type': 'local', 'data': { "device_path": str(m_rsc_path.return_value) } } self.assertEqual(expected, val) @mock.patch(DRIVER + 'LinstorDrbdDriver._check_api_reply') @mock.patch(DRIVER + 'LinstorDrbdDriver._api_rsc_delete') @mock.patch(DRIVER + 'LinstorDrbdDriver._node_in_sp') def test_drbd_terminate_connection_pass(self, m_node_sp, m_rsc_create, m_check): m_node_sp.return_value = True m_rsc_create.return_value = True m_check.return_value = True connector = {} connector["host"] = 'wp-u16-cinder-dev-lg' # No exception should be raised self.driver.terminate_connection(CINDER_VOLUME, connector) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_lvm_driver.py0000664000175000017500000015572000000000000025235 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import socket from unittest import mock import ddt from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import importutils from cinder.brick.local_dev import lvm as brick_lvm from cinder import db from cinder import exception from cinder.objects import fields from cinder.tests import fake_driver from cinder.tests.unit.brick import fake_lvm from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import utils as tests_utils from cinder.tests.unit.volume import test_driver from cinder.volume import configuration as conf from cinder.volume.drivers import lvm import cinder.volume.volume_utils from cinder.volume import volume_utils CONF = cfg.CONF fake_opt = [ cfg.StrOpt('fake_opt1', default='fake', help='fake opts') ] @ddt.ddt class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase): """Test case for VolumeDriver""" driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" FAKE_VOLUME = {'name': 'test1', 'id': 'test1'} def test___init___share_target_not_supported(self): """Fail to use shared targets if target driver doesn't support it.""" original_import = importutils.import_object def wrap_target_as_no_shared_support(*args, **kwargs): res = original_import(*args, **kwargs) self.mock_object(res, 'SHARED_TARGET_SUPPORT', False) return res self.patch('oslo_utils.importutils.import_object', side_effect=wrap_target_as_no_shared_support) self.configuration.lvm_share_target = True self.assertRaises(exception.InvalidConfigurationValue, lvm.LVMVolumeDriver, configuration=self.configuration) def test___init___secondary_ips_not_supported(self): """Fail to use secondary ips if target driver doesn't support it.""" original_import = importutils.import_object def wrap_target_as_no_secondary_ips_support(*args, **kwargs): res = original_import(*args, **kwargs) self.mock_object(res, 'SECONDARY_IP_SUPPORT', False) return res self.patch('oslo_utils.importutils.import_object', side_effect=wrap_target_as_no_secondary_ips_support) self.configuration.target_secondary_ip_addresses = True self.assertRaises(exception.InvalidConfigurationValue, lvm.LVMVolumeDriver, configuration=self.configuration) def test___init___share_target_supported(self): """OK to use shared targets if target driver supports it.""" original_import = importutils.import_object def wrap_target_as_no_shared_support(*args, **kwargs): res = original_import(*args, **kwargs) self.mock_object(res, 'SHARED_TARGET_SUPPORT', True) return res self.patch('oslo_utils.importutils.import_object', side_effect=wrap_target_as_no_shared_support) self.configuration.lvm_share_target = True lvm.LVMVolumeDriver(configuration=self.configuration) @ddt.data(True, False) def test___init___share_target_not_requested(self, supports_shared): """For non shared it works regardless of target driver support.""" original_import = importutils.import_object def wrap_target_as_no_shared_support(*args, **kwargs): res = original_import(*args, **kwargs) self.mock_object(res, 'SHARED_TARGET_SUPPORT', supports_shared) return res self.patch('oslo_utils.importutils.import_object', side_effect=wrap_target_as_no_shared_support) self.configuration.lvm_share_target = False lvm.LVMVolumeDriver(configuration=self.configuration) @mock.patch.object(os.path, 'exists', return_value=True) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') def test_delete_volume_invalid_parameter(self, _mock_create_export, mock_exists): self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration) # Test volume without 'size' field and 'volume_size' field self.assertRaises(exception.InvalidParameterValue, lvm_driver._delete_volume, self.FAKE_VOLUME) @mock.patch.object(os.path, 'exists', return_value=False) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') def test_delete_volume_bad_path(self, _mock_create_export, mock_exists): self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 self.configuration.volume_type = 'default' volume = dict(self.FAKE_VOLUME, size=1) lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration) self.assertRaises(exception.VolumeBackendAPIException, lvm_driver._delete_volume, volume) @mock.patch.object(volume_utils, 'clear_volume') @mock.patch.object(volume_utils, 'copy_volume') @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') def test_delete_volume_thinlvm_snap(self, _mock_create_export, mock_copy, mock_clear): vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') self.configuration.volume_clear = 'zero' self.configuration.volume_clear_size = 0 self.configuration.lvm_type = 'thin' self.configuration.target_helper = 'tgtadm' lvm_driver = lvm.LVMVolumeDriver( configuration=self.configuration, vg_obj=vg_obj) uuid = '00000000-0000-0000-0000-c3aa7ee01536' fake_snapshot = {'name': 'volume-' + uuid, 'id': uuid, 'size': 123} lvm_driver._delete_volume(fake_snapshot, is_snapshot=True) @mock.patch.object(volume_utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lvm_version', return_value=(2, 2, 100)) def test_check_for_setup_error(self, _mock_get_version, vgs): vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'auto') configuration = conf.Configuration(fake_opt, 'fake_group') lvm_driver = lvm.LVMVolumeDriver( configuration=configuration, vg_obj=vg_obj) lvm_driver.delete_snapshot = mock.Mock() volume = tests_utils.create_volume(self.context, host=socket.gethostname()) volume_id = volume['id'] backup = {} backup['volume_id'] = volume_id backup['user_id'] = fake.USER_ID backup['project_id'] = fake.PROJECT_ID backup['host'] = socket.gethostname() backup['availability_zone'] = '1' backup['display_name'] = 'test_check_for_setup_error' backup['display_description'] = 'test_check_for_setup_error' backup['container'] = 'fake' backup['status'] = fields.BackupStatus.CREATING backup['fail_reason'] = '' backup['service'] = 'fake' backup['parent_id'] = None backup['size'] = 5 * 1024 * 1024 backup['object_count'] = 22 db.backup_create(self.context, backup) lvm_driver.check_for_setup_error() def test_retype_volume(self): vol = tests_utils.create_volume(self.context) new_type = fake.VOLUME_TYPE_ID diff = {} host = 'fake_host' retyped = self.volume.driver.retype(self.context, vol, new_type, diff, host) self.assertTrue(retyped) def test_update_migrated_volume(self): fake_volume_id = fake.VOLUME_ID fake_new_volume_id = fake.VOLUME2_ID fake_provider = 'fake_provider' original_volume_name = CONF.volume_name_template % fake_volume_id current_name = CONF.volume_name_template % fake_new_volume_id fake_volume = tests_utils.create_volume(self.context) fake_volume['id'] = fake_volume_id fake_new_volume = tests_utils.create_volume(self.context) fake_new_volume['id'] = fake_new_volume_id fake_new_volume['provider_location'] = fake_provider fake_vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') with mock.patch.object(self.volume.driver, 'vg') as vg: vg.return_value = fake_vg vg.rename_volume.return_value = None update = self.volume.driver.update_migrated_volume(self.context, fake_volume, fake_new_volume, 'available') vg.rename_volume.assert_called_once_with(current_name, original_volume_name) self.assertEqual({'_name_id': None, 'provider_location': None}, update) vg.rename_volume.reset_mock() vg.rename_volume.side_effect = processutils.ProcessExecutionError update = self.volume.driver.update_migrated_volume(self.context, fake_volume, fake_new_volume, 'available') vg.rename_volume.assert_called_once_with(current_name, original_volume_name) self.assertEqual({'_name_id': fake_new_volume_id, 'provider_location': fake_provider}, update) def test_create_volume_from_snapshot_none_sparse(self): with mock.patch.object(self.volume.driver, 'vg'), \ mock.patch.object(self.volume.driver, '_create_volume'), \ mock.patch.object(volume_utils, 'copy_volume') as mock_copy: # Test case for thick LVM src_volume = tests_utils.create_volume(self.context) snapshot_ref = tests_utils.create_snapshot(self.context, src_volume['id']) dst_volume = tests_utils.create_volume(self.context) self.volume.driver.create_volume_from_snapshot(dst_volume, snapshot_ref) volume_path = self.volume.driver.local_path(dst_volume) snapshot_path = self.volume.driver.local_path(snapshot_ref) volume_size = 1024 block_size = '1M' mock_copy.assert_called_with(snapshot_path, volume_path, volume_size, block_size, execute=self.volume.driver._execute, sparse=False) def test_create_volume_from_snapshot_sparse(self): self.configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration) with mock.patch.object(lvm_driver, 'vg'): # Test case for thin LVM lvm_driver._sparse_copy_volume = True src_volume = tests_utils.create_volume(self.context) snapshot_ref = tests_utils.create_snapshot(self.context, src_volume['id']) dst_volume = tests_utils.create_volume(self.context) lvm_driver.create_volume_from_snapshot(dst_volume, snapshot_ref) def test_create_volume_from_snapshot_sparse_extend(self): self.configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration) with mock.patch.object(lvm_driver, 'vg'), \ mock.patch.object(lvm_driver, 'extend_volume') as mock_extend: # Test case for thin LVM lvm_driver._sparse_copy_volume = True src_volume = tests_utils.create_volume(self.context) snapshot_ref = tests_utils.create_snapshot(self.context, src_volume['id']) dst_volume = tests_utils.create_volume(self.context) dst_volume['size'] = snapshot_ref['volume_size'] + 1 lvm_driver.create_volume_from_snapshot(dst_volume, snapshot_ref) mock_extend.assert_called_with(dst_volume, dst_volume['size']) @mock.patch.object(cinder.volume.volume_utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=True) def test_lvm_type_auto_thin_pool_exists(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj) lvm_driver.check_for_setup_error() self.assertEqual('thin', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.volume_utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) @mock.patch.object(cinder.brick.local_dev.lvm.LVM, 'get_volumes', return_value=[]) @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=True) def test_lvm_type_auto_no_lvs(self, *_unused_mocks): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=configuration, vg_obj=vg_obj) lvm_driver.check_for_setup_error() self.assertEqual('thin', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.volume_utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lv_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.activate_lv') @mock.patch('cinder.brick.local_dev.lvm.LVM.' 'supports_lvchange_ignoreskipactivation') @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=False) def test_lvm_type_auto_no_thin_support(self, *_unused_mocks): self.mock_object(cinder.brick.local_dev.lvm.LVM, 'get_lvm_version', return_value=(2, 2, 107)) configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) lvm_driver.check_for_setup_error() self.assertEqual('default', lvm_driver.configuration.lvm_type) @mock.patch.object(cinder.volume.volume_utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lv_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.activate_lv') @mock.patch('cinder.brick.local_dev.lvm.LVM.' 'supports_lvchange_ignoreskipactivation') @mock.patch('cinder.brick.local_dev.lvm.LVM.update_volume_group_info') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_all_physical_volumes') @mock.patch('cinder.brick.local_dev.lvm.LVM.get_volume') @mock.patch('cinder.brick.local_dev.lvm.LVM.supports_thin_provisioning', return_value=False) def test_lvm_type_auto_no_thin_pool(self, *_unused_mocks): self.mock_object(cinder.brick.local_dev.lvm.LVM, 'get_lvm_version', return_value=(2, 2, 107)) configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'auto' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) lvm_driver.check_for_setup_error() self.assertEqual('default', lvm_driver.configuration.lvm_type) @mock.patch.object(lvm.LVMVolumeDriver, 'extend_volume') def test_create_cloned_volume_by_thin_snapshot(self, mock_extend): self.configuration.lvm_type = 'thin' fake_vg = mock.Mock(fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default')) lvm_driver = lvm.LVMVolumeDriver( configuration=self.configuration, vg_obj=fake_vg) fake_volume = tests_utils.create_volume(self.context, size=1) fake_new_volume = tests_utils.create_volume(self.context, size=2) lvm_driver.create_cloned_volume(fake_new_volume, fake_volume) fake_vg.create_lv_snapshot.assert_called_once_with( fake_new_volume['name'], fake_volume['name'], 'thin') mock_extend.assert_called_once_with(fake_new_volume, 2) fake_vg.activate_lv.assert_called_once_with( fake_new_volume['name'], is_snapshot=True, permanent=True) def test_lvm_migrate_volume_no_loc_info(self): host = {'capabilities': {}} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_bad_loc_info(self): capabilities = {'location_info': 'foo'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_diff_driver(self): capabilities = {'location_info': 'FooDriver:foo:bar:default:0'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_diff_host(self): capabilities = {'location_info': 'LVMVolumeDriver:foo:bar:default:0'} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_in_use(self): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:bar' % hostname} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'in-use'} moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) @mock.patch.object(volume_utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) def test_lvm_migrate_volume_same_volume_group(self, vgs): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes:default:0' % hostname} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.migrate_volume, self.context, vol, host) @mock.patch.object(lvm.LVMVolumeDriver, '_create_volume') @mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes') @mock.patch.object(brick_lvm.LVM, 'delete') @mock.patch.object(volume_utils, 'copy_volume', side_effect=processutils.ProcessExecutionError) @mock.patch.object(volume_utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes'}]) def test_lvm_migrate_volume_volume_copy_error(self, vgs, copy_volume, mock_delete, mock_pvs, mock_create): self.mock_object(cinder.brick.local_dev.lvm.LVM, 'get_lvm_version', return_value=(2, 2, 107)) hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes:default:0' % hostname} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes-old', False, None, 'default') self.assertRaises(processutils.ProcessExecutionError, self.volume.driver.migrate_volume, self.context, vol, host) mock_delete.assert_called_once_with(vol) @mock.patch.object(volume_utils, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes-2'}]) def test_lvm_volume_group_missing(self, vgs): hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-3:default:0' % hostname} host = {'capabilities': capabilities} vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'} self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') moved, model_update = self.volume.driver.migrate_volume(self.context, vol, host) self.assertFalse(moved) self.assertIsNone(model_update) def test_lvm_migrate_volume_proceed(self): self.mock_object(cinder.brick.local_dev.lvm.LVM, 'get_lvm_version', return_value=(2, 2, 107)) hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-2:default:0' % hostname} host = {'capabilities': capabilities} vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'} def fake_execute(*args, **kwargs): pass def get_all_volume_groups(): # NOTE(flaper87) Return just the destination # host to test the check of dest VG existence. return [{'name': 'cinder-volumes-2'}] def _fake_get_all_physical_volumes(obj, root_helper, vg_name): return [{}] with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes', return_value = [{}]), \ mock.patch.object(self.volume.driver, '_execute') \ as mock_execute, \ mock.patch.object(volume_utils, 'copy_volume') as mock_copy, \ mock.patch.object(volume_utils, 'get_all_volume_groups', side_effect = get_all_volume_groups), \ mock.patch.object(self.volume.driver, '_delete_volume'): self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') mock_execute.return_value = ("mock_outs", "mock_errs") moved, model_update = \ self.volume.driver.migrate_volume(self.context, vol, host) self.assertTrue(moved) self.assertIsNone(model_update) mock_copy.assert_called_once_with( '/dev/mapper/cinder--volumes-testvol', '/dev/mapper/cinder--volumes--2-testvol', 2048, '1M', execute=mock_execute, sparse=False) def test_lvm_migrate_volume_proceed_with_thin(self): self.mock_object(cinder.brick.local_dev.lvm.LVM, 'get_lvm_version', return_value=(2, 2, 107)) hostname = socket.gethostname() capabilities = {'location_info': 'LVMVolumeDriver:%s:' 'cinder-volumes-2:default:0' % hostname} host = {'capabilities': capabilities} vol = {'name': 'testvol', 'id': 1, 'size': 2, 'status': 'available'} def fake_execute(*args, **kwargs): pass def get_all_volume_groups(): # NOTE(flaper87) Return just the destination # host to test the check of dest VG existence. return [{'name': 'cinder-volumes-2'}] def _fake_get_all_physical_volumes(obj, root_helper, vg_name): return [{}] self.configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration) with mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes', return_value = [{}]), \ mock.patch.object(lvm_driver, '_execute') \ as mock_execute, \ mock.patch.object(volume_utils, 'copy_volume') as mock_copy, \ mock.patch.object(volume_utils, 'get_all_volume_groups', side_effect = get_all_volume_groups), \ mock.patch.object(lvm_driver, '_delete_volume'): lvm_driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver._sparse_copy_volume = True mock_execute.return_value = ("mock_outs", "mock_errs") moved, model_update = \ lvm_driver.migrate_volume(self.context, vol, host) self.assertTrue(moved) self.assertIsNone(model_update) mock_copy.assert_called_once_with( '/dev/mapper/cinder--volumes-testvol', '/dev/mapper/cinder--volumes--2-testvol', 2048, '1M', execute=mock_execute, sparse=True) @staticmethod def _get_manage_existing_lvs(name): """Helper method used by the manage_existing tests below.""" lvs = [{'name': 'fake_lv', 'size': '1.75'}, {'name': 'fake_lv_bad_size', 'size': 'Not a float'}] for lv in lvs: if lv['name'] == name: return lv def _setup_stubs_for_manage_existing(self): """Helper to set up common stubs for the manage_existing tests.""" self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') @mock.patch.object(db.sqlalchemy.api, 'volume_get', side_effect=exception.VolumeNotFound( volume_id='d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1')) def test_lvm_manage_existing_not_found(self, mock_vol_get): self._setup_stubs_for_manage_existing() vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' ref = {'source-name': 'fake_lv'} vol = {'name': vol_name, 'id': fake.VOLUME_ID, 'size': 0} with mock.patch.object(self.volume.driver.vg, 'rename_volume'): model_update = self.volume.driver.manage_existing(vol, ref) self.assertIsNone(model_update) @mock.patch('cinder.db.sqlalchemy.api.resource_exists', return_value=True) def test_lvm_manage_existing_already_managed(self, exists_mock): self._setup_stubs_for_manage_existing() vol_name = 'volume-d8cd1feb-2dcc-404d-9b15-b86fe3bec0a1' ref = {'source-name': vol_name} vol = {'name': 'test', 'id': 1, 'size': 0} with mock.patch.object(self.volume.driver.vg, 'rename_volume'): self.assertRaises(exception.ManageExistingAlreadyManaged, self.volume.driver.manage_existing, vol, ref) def test_lvm_manage_existing(self): """Good pass on managing an LVM volume. This test case ensures that, when a logical volume with the specified name exists, and the size is as expected, no error is returned from driver.manage_existing, and that the rename_volume function is called in the Brick LVM code with the correct arguments. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv'} vol = {'name': 'test', 'id': fake.VOLUME_ID, 'size': 0} def _rename_volume(old_name, new_name): self.assertEqual(ref['source-name'], old_name) self.assertEqual(vol['name'], new_name) with mock.patch.object(self.volume.driver.vg, 'rename_volume') as mock_rename_volume, \ mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): mock_rename_volume.return_value = _rename_volume size = self.volume.driver.manage_existing_get_size(vol, ref) self.assertEqual(2, size) model_update = self.volume.driver.manage_existing(vol, ref) self.assertIsNone(model_update) def test_lvm_manage_existing_bad_size(self): """Make sure correct exception on bad size returned from LVM. This test case ensures that the correct exception is raised when the information returned for the existing LVs is not in the format that the manage_existing code expects. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv_bad_size'} vol = {'name': 'test', 'id': fake.VOLUME_ID, 'size': 2} with mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.manage_existing_get_size, vol, ref) def test_lvm_manage_existing_bad_ref(self): """Error case where specified LV doesn't exist. This test case ensures that the correct exception is raised when the caller attempts to manage a volume that does not exist. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_nonexistent_lv'} vol = {'name': 'test', 'id': 1, 'size': 0, 'status': 'available'} with mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): self.assertRaises(exception.ManageExistingInvalidReference, self.volume.driver.manage_existing_get_size, vol, ref) def test_lvm_manage_existing_snapshot(self): """Good pass on managing an LVM snapshot. This test case ensures that, when a logical volume's snapshot with the specified name exists, and the size is as expected, no error is returned from driver.manage_existing_snapshot, and that the rename_volume function is called in the Brick LVM code with the correct arguments. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv'} snp = {'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 0} def _rename_volume(old_name, new_name): self.assertEqual(ref['source-name'], old_name) self.assertEqual(snp['name'], new_name) with mock.patch.object(self.volume.driver.vg, 'rename_volume') as mock_rename_volume, \ mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): mock_rename_volume.return_value = _rename_volume size = self.volume.driver.manage_existing_snapshot_get_size( snp, ref) self.assertEqual(2, size) model_update = self.volume.driver.manage_existing_snapshot( snp, ref) self.assertIsNone(model_update) def test_lvm_manage_existing_snapshot_bad_ref(self): """Error case where specified LV snapshot doesn't exist. This test case ensures that the correct exception is raised when the caller attempts to manage a snapshot that does not exist. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_nonexistent_lv'} snp = { 'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 0, 'status': 'available', } with mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): self.assertRaises( exception.ManageExistingInvalidReference, self.volume.driver.manage_existing_snapshot_get_size, snp, ref) def test_revert_snapshot(self): self._setup_stubs_for_manage_existing() self.configuration.lvm_type = 'auto' fake_volume = tests_utils.create_volume(self.context, display_name='fake_volume') fake_snapshot = tests_utils.create_snapshot( self.context, fake_volume.id) with mock.patch.object(self.volume.driver.vg, 'revert') as mock_revert, \ mock.patch.object(self.volume.driver.vg, 'create_lv_snapshot') as mock_create, \ mock.patch.object(self.volume.driver.vg, 'deactivate_lv') as mock_deactivate, \ mock.patch.object(self.volume.driver.vg, 'activate_lv') as mock_activate: self.volume.driver.revert_to_snapshot(self.context, fake_volume, fake_snapshot) mock_revert.assert_called_once_with( self.volume.driver._escape_snapshot(fake_snapshot.name)) mock_deactivate.assert_called_once_with(fake_volume.name) mock_activate.assert_called_once_with(fake_volume.name) mock_create.assert_called_once_with( self.volume.driver._escape_snapshot(fake_snapshot.name), fake_volume.name, self.configuration.lvm_type) def test_revert_thin_snapshot(self): configuration = conf.Configuration(fake_opt, 'fake_group') configuration.lvm_type = 'thin' lvm_driver = lvm.LVMVolumeDriver(configuration=configuration) fake_volume = tests_utils.create_volume(self.context, display_name='fake_volume') fake_snapshot = tests_utils.create_snapshot( self.context, fake_volume.id) self.assertRaises(NotImplementedError, lvm_driver.revert_to_snapshot, self.context, fake_volume, fake_snapshot) def test_lvm_manage_existing_snapshot_bad_size(self): """Make sure correct exception on bad size returned from LVM. This test case ensures that the correct exception is raised when the information returned for the existing LVs is not in the format that the manage_existing_snapshot code expects. """ self._setup_stubs_for_manage_existing() ref = {'source-name': 'fake_lv_bad_size'} snp = {'name': 'test', 'id': fake.SNAPSHOT_ID, 'size': 2} with mock.patch.object(self.volume.driver.vg, 'get_volume', self._get_manage_existing_lvs): self.assertRaises( exception.VolumeBackendAPIException, self.volume.driver.manage_existing_snapshot_get_size, snp, ref) def test_lvm_unmanage(self): volume = tests_utils.create_volume(self.context, status='available', size=1, host=CONF.host) ret = self.volume.driver.unmanage(volume) self.assertIsNone(ret) def test_lvm_get_manageable_volumes(self): cinder_vols = [{'id': '00000000-0000-0000-0000-000000000000'}] lvs = [{'name': 'volume-00000000-0000-0000-0000-000000000000', 'size': '1.75'}, {'name': 'volume-00000000-0000-0000-0000-000000000001', 'size': '3.0'}, {'name': 'snapshot-00000000-0000-0000-0000-000000000002', 'size': '2.2'}, {'name': 'myvol', 'size': '4.0'}] self.volume.driver.vg = mock.Mock() self.volume.driver.vg.get_volumes.return_value = lvs self.volume.driver.vg.lv_is_snapshot.side_effect = [False, False, True, False] self.volume.driver.vg.lv_is_open.side_effect = [True, False] res = self.volume.driver.get_manageable_volumes(cinder_vols, None, 1000, 0, ['size'], ['asc']) exp = [{'size': 2, 'reason_not_safe': 'already managed', 'extra_info': None, 'reference': {'source-name': 'volume-00000000-0000-0000-0000-000000000000'}, 'cinder_id': '00000000-0000-0000-0000-000000000000', 'safe_to_manage': False}, {'size': 3, 'reason_not_safe': 'volume in use', 'reference': {'source-name': 'volume-00000000-0000-0000-0000-000000000001'}, 'safe_to_manage': False, 'cinder_id': None, 'extra_info': None}, {'size': 4, 'reason_not_safe': None, 'safe_to_manage': True, 'reference': {'source-name': 'myvol'}, 'cinder_id': None, 'extra_info': None}] self.assertEqual(exp, res) def test_lvm_get_manageable_snapshots(self): cinder_snaps = [{'id': '00000000-0000-0000-0000-000000000000'}] lvs = [{'name': 'snapshot-00000000-0000-0000-0000-000000000000', 'size': '1.75'}, {'name': 'volume-00000000-0000-0000-0000-000000000001', 'size': '3.0'}, {'name': 'snapshot-00000000-0000-0000-0000-000000000002', 'size': '2.2'}, {'name': 'mysnap', 'size': '4.0'}] self.volume.driver.vg = mock.Mock() self.volume.driver.vg.get_volumes.return_value = lvs self.volume.driver.vg.lv_is_snapshot.side_effect = [True, False, True, True] self.volume.driver.vg.lv_is_open.side_effect = [True, False] self.volume.driver.vg.lv_get_origin.side_effect = [ 'volume-00000000-0000-0000-0000-000000000000', 'volume-00000000-0000-0000-0000-000000000002', 'myvol'] res = self.volume.driver.get_manageable_snapshots(cinder_snaps, None, 1000, 0, ['size'], ['asc']) exp = [{'size': 2, 'reason_not_safe': 'already managed', 'reference': {'source-name': 'snapshot-00000000-0000-0000-0000-000000000000'}, 'safe_to_manage': False, 'extra_info': None, 'cinder_id': '00000000-0000-0000-0000-000000000000', 'source_reference': {'source-name': 'volume-00000000-0000-0000-0000-000000000000'}}, {'size': 3, 'reason_not_safe': 'snapshot in use', 'reference': {'source-name': 'snapshot-00000000-0000-0000-0000-000000000002'}, 'safe_to_manage': False, 'extra_info': None, 'cinder_id': None, 'source_reference': {'source-name': 'volume-00000000-0000-0000-0000-000000000002'}}, {'size': 4, 'reason_not_safe': None, 'reference': {'source-name': 'mysnap'}, 'safe_to_manage': True, 'cinder_id': None, 'source_reference': {'source-name': 'myvol'}, 'extra_info': None}] self.assertEqual(exp, res) class LVMISCSITestCase(test_driver.BaseDriverTestCase): """Test Case for LVMISCSIDriver""" driver_name = "cinder.volume.drivers.lvm.LVMVolumeDriver" def setUp(self): super(LVMISCSITestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.target_prefix = 'iqn.2010-10.org.openstack:' self.configuration.target_ip_address = '0.0.0.0' self.configuration.target_port = 3260 def _attach_volume(self): """Attach volumes to an instance.""" volume_id_list = [] for index in range(3): vol = {} vol['size'] = 0 vol_ref = db.volume_create(self.context, vol) self.volume.create_volume(self.context, vol_ref) vol_ref = db.volume_get(self.context, vol_ref['id']) # each volume has a different mountpoint mountpoint = "/dev/sd" + chr((ord('b') + index)) instance_uuid = '12345678-1234-5678-1234-567812345678' db.volume_attached(self.context, vol_ref['id'], instance_uuid, 'host', mountpoint) volume_id_list.append(vol_ref['id']) return volume_id_list def test_do_iscsi_discovery(self): self.configuration = conf.Configuration(None) iscsi_driver = \ cinder.volume.targets.tgt.TgtAdm( configuration=self.configuration) ret = ("%s dummy" % CONF.target_ip_address, '') with mock.patch('cinder.utils.execute', return_value=ret): volume = {"name": "dummy", "host": "0.0.0.0", "id": "12345678-1234-5678-1234-567812345678"} iscsi_driver._do_iscsi_discovery(volume) def test_get_iscsi_properties(self): volume = {"provider_location": '', "id": "0", "provider_auth": "a b c", "attached_mode": "rw"} iscsi_driver = \ cinder.volume.targets.tgt.TgtAdm(configuration=self.configuration) iscsi_driver._do_iscsi_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0" result = iscsi_driver._get_iscsi_properties(volume) self.assertEqual("0.0.0.0:0000", result["target_portal"]) self.assertEqual("iqn:iqn", result["target_iqn"]) self.assertEqual(0, result["target_lun"]) def test_get_iscsi_properties_multiple_portals(self): volume = {"provider_location": '1.1.1.1:3260;2.2.2.2:3261,1 iqn:iqn 0', "id": "0", "provider_auth": "a b c", "attached_mode": "rw"} iscsi_driver = \ cinder.volume.targets.tgt.TgtAdm(configuration=self.configuration) result = iscsi_driver._get_iscsi_properties(volume) self.assertEqual("1.1.1.1:3260", result["target_portal"]) self.assertEqual("iqn:iqn", result["target_iqn"]) self.assertEqual(0, result["target_lun"]) self.assertEqual(["1.1.1.1:3260", "2.2.2.2:3261"], result["target_portals"]) self.assertEqual(["iqn:iqn", "iqn:iqn"], result["target_iqns"]) self.assertEqual([0, 0], result["target_luns"]) @mock.patch.object(brick_lvm.LVM, 'get_volumes', return_value=[{'vg': 'fake_vg', 'name': 'fake_vol', 'size': '1000'}]) @mock.patch.object(brick_lvm.LVM, 'get_all_physical_volumes') @mock.patch.object(brick_lvm.LVM, 'get_all_volume_groups', return_value=[{'name': 'cinder-volumes', 'size': '5.52', 'available': '0.52', 'lv_count': '2', 'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm'}]) @mock.patch('cinder.brick.local_dev.lvm.LVM.get_lvm_version', return_value=(2, 2, 100)) def test_get_volume_stats(self, _mock_get_version, mock_vgs, mock_pvs, mock_get_volumes): self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') self.volume.driver._update_volume_stats() stats = self.volume.driver._stats self.assertEqual( float('5.52'), stats['pools'][0]['total_capacity_gb']) self.assertEqual( float('0.52'), stats['pools'][0]['free_capacity_gb']) self.assertEqual( float('5.0'), stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual( int('1'), stats['pools'][0]['total_volumes']) self.assertFalse(stats['sparse_copy_volume']) # Check value of sparse_copy_volume for thin enabled case. # This value is set in check_for_setup_error. self.configuration = conf.Configuration(None) self.configuration.lvm_type = 'thin' self.configuration.target_helper = 'lioadm' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver( configuration=self.configuration, vg_obj=vg_obj) lvm_driver.check_for_setup_error() lvm_driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') lvm_driver._update_volume_stats() stats = lvm_driver._stats self.assertTrue(stats['sparse_copy_volume']) def test_validate_connector(self): iscsi_driver =\ cinder.volume.targets.tgt.TgtAdm( configuration=self.configuration) # Validate a valid connector connector = {'ip': '10.0.0.2', 'host': 'fakehost', 'initiator': 'iqn.2012-07.org.fake:01'} iscsi_driver.validate_connector(connector) # Validate a connector without the initiator connector = {'ip': '10.0.0.2', 'host': 'fakehost'} self.assertRaises(exception.InvalidConnectorException, iscsi_driver.validate_connector, connector) def test_multiattach_terminate_connection(self): # Ensure that target_driver.terminate_connection is only called when a # single active volume attachment remains per host for each volume. host1_connector = {'ip': '10.0.0.2', 'host': 'fakehost1', 'initiator': 'iqn.2012-07.org.fake:01'} host2_connector = {'ip': '10.0.0.3', 'host': 'fakehost2', 'initiator': 'iqn.2012-07.org.fake:02'} host1_attachment1 = fake_volume.volume_attachment_ovo( self.context) host1_attachment1.connector = host1_connector host1_attachment2 = fake_volume.volume_attachment_ovo( self.context) host1_attachment2.connector = host1_connector host2_attachment = fake_volume.volume_attachment_ovo(self.context) host2_attachment.connector = host2_connector # Create a multiattach volume object with two active attachments on # host1 and another single attachment on host2. vol = fake_volume.fake_volume_obj(self.context) vol.multiattach = True vol.volume_attachment.objects.append(host1_attachment1) vol.volume_attachment.objects.append(host1_attachment2) vol.volume_attachment.objects.append(host2_attachment) self.configuration = conf.Configuration(None) vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver( configuration=self.configuration, vg_obj=vg_obj) mock_same = self.mock_object( lvm_driver.target_driver, 'are_same_connector', side_effect=lvm_driver.target_driver.are_same_connector) with mock.patch.object(lvm_driver.target_driver, 'terminate_connection') as mock_term_conn: # Verify that terminate_connection is not called against host1 when # there are multiple active attachments against that host. self.assertTrue(lvm_driver.terminate_connection(vol, host1_connector)) mock_term_conn.assert_not_called() self.assertEqual(3, mock_same.call_count) mock_same.assert_has_calls(( mock.call(host1_connector, host1_connector), mock.call(host1_connector, host1_connector), mock.call(host2_connector, host1_connector))) mock_same.reset_mock() # Verify that terminate_connection is called against either host # when only one active attachment per host is present. vol.volume_attachment.objects.remove(host1_attachment1) self.assertTrue(lvm_driver.terminate_connection(vol, host1_connector)) self.assertEqual(2, mock_same.call_count) mock_same.assert_has_calls(( mock.call(host1_connector, host1_connector), mock.call(host2_connector, host1_connector))) mock_same.reset_mock() self.assertTrue(lvm_driver.terminate_connection(vol, host2_connector)) self.assertEqual(2, mock_same.call_count) mock_same.assert_has_calls(( mock.call(host1_connector, host2_connector), mock.call(host2_connector, host2_connector))) mock_same.reset_mock() mock_term_conn.assert_has_calls([mock.call(vol, host1_connector), mock.call(vol, host2_connector)]) def test_multiattach_terminate_connection_with_differing_mountpoints(self): # Ensure that target_driver.terminate_connection is only called when a # single active volume attachment remains per host for each volume # regardless of the volumes being attached under differing mountpoints. # As seen reported in bug #1825957. connector_mountpoint_1 = {'ip': '10.0.0.2', 'host': 'fakehost1', 'mountpoint': '/dev/vdb', 'initiator': 'iqn.2012-07.org.fake:01'} connector_mountpoint_2 = {'ip': '10.0.0.2', 'host': 'fakehost1', 'mountpoint': '/dev/vdc', 'initiator': 'iqn.2012-07.org.fake:01'} attachment1 = fake_volume.volume_attachment_ovo(self.context) attachment1.connector = connector_mountpoint_1 attachment2 = fake_volume.volume_attachment_ovo(self.context) attachment2.connector = connector_mountpoint_2 # Create a multiattach volume object with two active attachments on # the same host using different mountpoints within the connectors vol = fake_volume.fake_volume_obj(self.context) vol.multiattach = True vol.volume_attachment.objects.append(attachment1) vol.volume_attachment.objects.append(attachment2) self.configuration = conf.Configuration(None) vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver( configuration=self.configuration, vg_obj=vg_obj) with mock.patch.object(lvm_driver.target_driver, 'terminate_connection') as mock_term_conn: # Verify that terminate_connection is not called when there are # multiple active attachments against the same host even when their # mountpoints do not match. self.assertTrue(lvm_driver.terminate_connection( vol, connector_mountpoint_1)) mock_term_conn.assert_not_called() # Verify that terminate_connection is called against either host # when only one active attachment per host is present. vol.volume_attachment.objects.remove(attachment1) self.assertFalse(lvm_driver.terminate_connection( vol, connector_mountpoint_2)) mock_term_conn.assert_called_with(vol, connector_mountpoint_2) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_macrosan_drivers.py0000664000175000017500000010760000000000000026417 0ustar00zuulzuul00000000000000# Copyright (c) 2019 MacroSAN Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for macrosan drivers.""" from collections import UserDict import os import socket from unittest import mock from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.macrosan import devop_client from cinder.volume.drivers.macrosan import driver from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils test_volume = ( UserDict({'name': 'volume-728ec287-bf30-4d2d-98a8-7f1bed3f59ce', 'volume_name': 'test', 'id': '728ec287-bf30-4d2d-98a8-7f1bed3f59ce', 'provider_auth': None, 'project_id': 'project', 'display_name': 'test', 'display_description': 'test', 'host': 'controller@macrosan#MacroSAN', 'size': 10, 'provider_location': 'macrosan uuid:0x00b34201-025b0000-46b35ae7-b7deec47'})) test_volume.size = 10 test_volume.volume_type_id = '36674caf-5314-468a-a8cb-baab4f71fe44' test_volume.volume_attachment = [] test_migrate_volume = { 'name': 'volume-d42b436a-54cc-480a-916c-275b0258ef59', 'size': 10, 'volume_name': 'test', 'id': 'd42b436a-54cc-480a-916c-275b0258ef59', 'volume_id': 'd42b436a-54cc-480a-916c-275b0258ef59', 'provider_auth': None, 'project_id': 'project', 'display_name': 'test', 'display_description': 'test', 'volume_type_id': '36674caf-5314-468a-a8cb-baab4f71fe44', '_name_id': None, 'host': 'controller@macrosan#MacroSAN', 'provider_location': 'macrosan uuid:0x00b34201-00180000-9ac35425-9e288d9a'} test_snap = {'name': 'volume-728ec287-bf30-4d2d-98a8-7f1bed3f59ce', 'size': 10, 'volume_name': 'test', 'id': 'aa2419a3-c144-46af-831b-e0d914d3957b', 'volume_id': '728ec287-bf30-4d2d-98a8-7f1bed3f59ce', 'provider_auth': None, 'project_id': 'project', 'display_name': 'test', 'display_description': 'test volume', 'volume_type_id': '36674caf-5314-468a-a8cb-baab4f71fe44', 'provider_location': 'pointid: 1', 'volume_size': 10, 'volume': test_volume} test_connector = {'initiator': 'iqn.1993-08.org.debian:01:62027e12fbc', 'wwpns': ['500b342001001805', '500b342001004605'], 'wwnns': ['21000024ff2003ec', '21000024ff2003ed'], 'host': 'controller' } fake_fabric_mapping = { 'switch1': { 'target_port_wwn_list': ['500b342001001805', '500b342001004605'], 'initiator_port_wwn_list': ['21000024ff2003ec', '21000024ff2003ed'] } } expected_iscsi_properties = {'target_discovered': False, 'target_portal': '192.168.251.1:3260', 'target_iqn': 'iqn.2010-05.com.macrosan.target:controller', 'target_lun': 0, 'target_iqns': ['iqn.2010-05.com.macrosan.target:controller', 'iqn.2010-05.com.macrosan.target:controller'], 'target_portals': ['192.168.251.1:3260', '192.168.251.2:3260'], 'target_luns': [0, 0], 'volume_id': '728ec287-bf30-4d2d-98a8-7f1bed3f59ce' } expected_iscsi_connection_data = { 'client': 'devstack', 'ports': [{'ip': '192.168.251.1', 'port': 'eth-1:0:0', 'port_name': 'iSCSI-Target-1:0:0', 'target': 'iqn.2010-05.com.macrosan.target:controller'}, {'ip': '192.168.251.2', 'port': 'eth-2:0:0', 'port_name': 'iSCSI-Target-2:0:0', 'target': 'iqn.2010-05.com.macrosan.target:controller'}]} expected_initr_port_map_tgtexist = { '21:00:00:24:ff:20:03:ec': [{'port_name': 'FC-Target-1:1:1', 'wwn': '50:0b:34:20:01:00:18:05'}, {'port_name': 'FC-Target-2:1:1', 'wwn': '50:0b:34:20:01:00:46:05'}], '21:00:00:24:ff:20:03:ed': [{'port_name': 'FC-Target-1:1:1', 'wwn': '50:0b:34:20:01:00:18:05'}, {'port_name': 'FC-Target-2:1:1', 'wwn': '50:0b:34:20:01:00:46:05'}]} expected_initr_port_map_tgtnotexist = {'21:00:00:24:ff:20:03:ec': [], '21:00:00:24:ff:20:03:ed': []} expected_fctgtexist_properties = {'target_lun': 0, 'target_discovered': True, 'target_wwn': ['500b342001001805', '500b342001004605'], 'volume_id': '728ec287-bf30-4d2d-98a8-7f1bed3f59ce' } class FakeMacroSANFCDriver(driver.MacroSANFCDriver): """Fake MacroSAN Storage, Rewrite some methods of MacroSANFCDriver.""" def do_setup(self): self.client = FakeClient(self.sp1_ipaddr, self.sp2_ipaddr, self.username + self.passwd) self.fcsan_lookup_service = FCSanLookupService() @property def _self_node_wwns(self): return ['21000024ff2003ec', '21000024ff2003ed'] def _snapshot_name(self, snapshotid): return "aa2419a3c14446af831be0d914d3957" def _get_client_name(self, host): return 'devstack' class FCSanLookupService(object): def get_device_mapping_from_network(self, initiator_list, target_list): return fake_fabric_mapping class DummyBrickGetConnector(object): def connect_volume(self, fake_con_data): return {'path': '/dev/mapper/3600b3429d72e349d93bad6597d0000df'} def disconnect_volume(self, fake_con_data, fake_device): return None class FakeMacroSANISCSIDriver(driver.MacroSANISCSIDriver): """Fake MacroSAN Storage, Rewrite some methods of MacroSANISCSIDriver.""" def do_setup(self): self.client = FakeClient(self.sp1_ipaddr, self.sp2_ipaddr, self.username + self.passwd) self.device_uuid = '0x00b34201-028100eb-4922a092-1d54b755' @property def _self_node_wwns(self): return ["iqn.1993-08.org.debian:01:62027e12fbc"] def _snapshot_name(self, snapshotid): return "aa2419a3c14446af831be0d914d3957" def _get_iscsi_ports(self, dev_client, host): if self.client.cmd_fail: raise exception.VolumeBackendAPIException(data='Command failed.') else: return [{'ip': '192.168.251.1', 'port_name': 'iSCSI-Target-1:0:0', 'port': 'eth-1:0:0', 'target': 'iqn.2010-05.com.macrosan.target:controller'}, {'ip': '192.168.251.2', 'port_name': 'iSCSI-Target-2:0:0', 'port': 'eth-2:0:0', 'target': 'iqn.2010-05.com.macrosan.target:controller'}] def _get_client_name(self, host): return 'devstack' def _attach_volume(self, context, volume, properties, remote=False): return super(FakeMacroSANISCSIDriver, self)._attach_volume( context, volume, properties, remote) def _detach_volume(self, context, attach_info, volume, properties, force=False, remote=False, ignore_errors=True): return super(FakeMacroSANISCSIDriver, self)._detach_volume( context, attach_info, volume, properties, force, remote, ignore_errors) class FakeClient(devop_client.Client): def __init__(self, sp1_ip, sp2_ip, secret_key): self.cmd_fail = False self.tgt_notexist = False def get_raid_list(self, pool): return [{'name': 'RAID-1', 'free_cap': 1749}] def get_client(self, name): return True def create_lun(self, name, owner, pool, raids, lun_mode, size, lun_params): return True def get_pool_cap(self, pool): return 1862, 1749, 0 def delete_lun(self, name): return True def setup_snapshot_resource(self, name, res_size, raids): pass def snapshot_resource_exists(self, name): return True def create_snapshot_point(self, lun_name, snapshot_name): if self.cmd_fail: raise exception.VolumeBackendAPIException(data='Command failed') else: return True def disable_snapshot(self, volume_name): if self.cmd_fail: raise exception.VolumeBackendAPIException(data='Command failed') else: return True def delete_snapshot_resource(self, volume_name): if self.cmd_fail: raise exception.VolumeBackendAPIException(data='Command failed') else: return True def snapshot_point_exists(self, lun_name, pointid): return True def lun_exists(self, name): return True def snapshot_enabled(self, lun_name): return True def create_snapshot_view(self, view_name, lun_name, pointid): if self.cmd_fail: raise exception.VolumeBackendAPIException(data='Command failed') else: return True def get_snapshot_pointid(self, lun_name, snapshot_name): if self.cmd_fail: raise exception.VolumeBackendAPIException(data='Command failed') else: return 1 def delete_snapshot_view(self, view_name): return True def delete_snapshot_point(self, lun_name, pointid): return True def copy_volume_from_view(self, lun_name, view_name): return True def snapshot_copy_task_completed(self, lun_name): return True def extend_lun(self, name, raids, size): return True def initiator_exists(self, initr_wwn): return True def get_device_uuid(self): return '0x00b34201-025b0000-46b35ae7-b7deec47' def is_initiator_mapped_to_client(self, initr_wwn, client_name): return True def unmap_lun_to_it(self, lun_name, initr_wwn, tgt_port_name): if self.cmd_fail: raise exception.VolumeBackendAPIException('Command failed.') else: return None def map_lun_to_it(self, lun_name, initr_wwn, tgt_port_name, lun_id=-1): if self.cmd_fail: raise exception.VolumeBackendAPIException('Command failed.') else: return None def map_target_to_initiator(self, tgt_port_name, initr_wwn): return True def get_it_unused_id_list(self, it_type, initr_wwn, tgt_port_name): if self.cmd_fail: raise exception.VolumeBackendAPIException('Command failed.') else: return [i for i in range(511)] def enable_lun_qos(self, name, strategy): if self.cmd_fail: raise Exception() else: return None def get_fc_initr_mapped_ports(self, initr_wwns): return {'21:00:00:24:ff:20:03:ec': [{'wwn': '50:0b:34:20:01:00:18:05', 'port_name': 'FC-Target-1:1:1'}, {'wwn': '50:0b:34:20:01:00:46:05', 'port_name': 'FC-Target-2:1:1'}], '21:00:00:24:ff:20:03:ed': [{'wwn': '50:0b:34:20:01:00:18:05', 'port_name': 'FC-Target-1:1:1'}, {'wwn': '50:0b:34:20:01:00:46:05', 'port_name': 'FC-Target-2:1:1'}] } def get_fc_ports(self): if self.tgt_notexist: return [{'sp': 1, 'refcnt': 0, 'port_name': 'FC-Target-1:1:1', 'initr': '', 'online': 0, 'wwn': '50:0b:34:20:01:00:18:05', 'port': 'FC-1:1:1'}, {'sp': 2, 'refcnt': 0, 'port_name': 'FC-Target-2:1:1', 'initr': '', 'online': 0, 'wwn': '50:0b:34:20:01:00:46:05', 'port': 'FC-2:1:1'}, ] else: return [{'sp': 1, 'refcnt': 0, 'port_name': 'FC-Target-1:1:1', 'initr': '', 'online': 1, 'wwn': '50:0b:34:20:01:00:18:05', 'port': 'FC-1:1:1'}, {'sp': 2, 'refcnt': 0, 'port_name': 'FC-Target-2:1:1', 'initr': '', 'online': 1, 'wwn': '50:0b:34:20:01:00:46:05', 'port': 'FC-2:1:1'}, ] def get_lun_uuid(self, lun_name): return '0x00b34201-025b0000-46b35ae7-b7deec47' def get_lun_name(self, lun_uuid): if lun_uuid == "0x00b34201-025b0000-46b35ae7-b7deec47": return '728ec287-bf30-4d2d-98a8-7f1bed3f59ce' if lun_uuid == "0x00b34201-00180000-9ac35425-9e288d9a": return 'd42b436a-54cc-480a-916c-275b0258ef59' def get_lun_name_from_rename_file(self, name): return None def backup_lun_name_to_rename_file(self, cur_name, original_name): return None def get_lun_id(self, tgt_name, lun_name, type='FC'): return 0 def get_view_lun_id(self, tgt_name, view_name, type='FC'): return 0 class MacroSANISCSIDriverTestCase(test.TestCase): def setUp(self): super(MacroSANISCSIDriverTestCase, self).setUp() self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.san_ip = "172.192.251.1, 172.192.251.2" self.configuration.san_login = "openstack" self.configuration.san_password = "passwd" self.configuration.macrosan_sdas_ipaddrs = None self.configuration.macrosan_replication_ipaddrs = None self.configuration.san_thin_provision = False self.configuration.macrosan_pool = 'Pool-1' self.configuration.macrosan_thin_lun_extent_size = 8 self.configuration.macrosan_thin_lun_low_watermark = 8 self.configuration.macrosan_thin_lun_high_watermark = 40 self.configuration.macrosan_force_unmap_itl = False self.configuration.macrosan_snapshot_resource_ratio = 0.3 self.configuration.macrosan_log_timing = True self.configuration.macrosan_client = \ ['devstack; device1; "eth-1:0:0"; "eth-2:0:0"'] self.configuration.macrosan_client_default = "eth-1:0:0;eth-2:0:0" self.driver = FakeMacroSANISCSIDriver(configuration=self.configuration) self.driver.do_setup() @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_create_volume(self, mock_volume_type, mock_qos): ret = self.driver.create_volume(test_volume) actual = ret['provider_location'] self.assertEqual(test_volume['provider_location'], actual) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_create_qos_volume(self, mock_volume_type, mock_qos): test_volume.volume_type_id = 'a2ed23e0-76c4-426f-a574-a1327275e725' ret = self.driver.create_volume(test_volume) actual = ret['provider_location'] self.assertEqual(test_volume['provider_location'], actual) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_delete_volume(self, mock_volume_type, mock_qos): self.driver.delete_volume(test_volume) def test_create_snapshot(self): self.driver.client.snappoid = True ret = self.driver.create_snapshot(test_snap) actual = ret['provider_location'] self.assertEqual(test_snap['provider_location'], actual) def test_delete_snapshot(self): self.driver.delete_snapshot(test_snap) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) @mock.patch.object(socket, 'gethostname', return_value='controller') @mock.patch.object(volume_utils, 'brick_get_connector', return_value=DummyBrickGetConnector()) @mock.patch.object(volume_utils, 'copy_volume', return_value=None) @mock.patch.object(os.path, 'realpath', return_value=None) def test_create_volume_from_snapshot(self, mock_volume_type, mock_qos, mock_hostname, mock_brick_get_connector, mock_copy_volume, mock_os_path): ret = self.driver.create_volume_from_snapshot(test_volume, test_snap) actual = ret['provider_location'] self.assertEqual(test_volume['provider_location'], actual) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) @mock.patch.object(socket, 'gethostname', return_value='controller') @mock.patch.object(volume_utils, 'brick_get_connector', return_value=DummyBrickGetConnector()) @mock.patch.object(volume_utils, 'copy_volume', return_value=None) @mock.patch.object(os.path, 'realpath', return_value=None) def test_create_cloned_volume(self, mock_volume_types, mock_qos, mock_hostname, mock_brick_get_connector, mock_copy_volume, mock_os_path): self.driver.client.snappoid = True ret = self.driver.create_cloned_volume(test_volume, test_volume) actual = ret['provider_location'] self.assertEqual(test_volume['provider_location'], actual) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_extend_volume(self, mock_volume_type, mock_qos): self.driver.extend_volume(test_volume, 15) def test_update_migrated_volume(self): expected = {'_name_id': test_migrate_volume['id'], 'provider_location': test_migrate_volume['provider_location']} ret = self.driver.update_migrated_volume("", test_volume, test_migrate_volume) self.assertEqual(expected, ret) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_initialize_connection(self, mock_volume_type, mock_qos): ret = self.driver.initialize_connection(test_volume, test_connector) self.assertEqual(expected_iscsi_properties, ret['data']) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_terminate_connection(self, mock_volume_type, mock_qos): ret = self.driver.terminate_connection(test_volume, test_connector) self.assertEqual({'driver_volume_type': 'iSCSI', 'data': expected_iscsi_connection_data}, ret) def test_get_raid_list(self): expected = ["RAID-1"] ret = self.driver.get_raid_list(20) self.assertEqual(expected, ret) def test_get_volume_stats(self): ret = self.driver.get_volume_stats(True) expected = "iSCSI" self.assertEqual(expected, ret['storage_protocol']) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_create_qos_volume_fail(self, mock_volume_type, mock_qos): test_volume.volume_type_id = 'a2ed23e0-76c4-426f-a574-a1327275e725' self.driver.client.cmd_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume, test_volume) def test_create_snapshot_fail(self): self.driver.client.cmd_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, test_snap) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) @mock.patch.object(socket, 'gethostname', return_value='controller') @mock.patch.object(volume_utils, 'brick_get_connector', return_value=DummyBrickGetConnector()) @mock.patch.object(volume_utils, 'copy_volume', return_value=None) @mock.patch.object(os.path, 'realpath', return_value=None) def test_create_volume_from_snapshot_fail(self, mock_volume_type, mock_qos, mock_hostname, mock_brick_get_connector, mock_copy_volume, mock_os_path): self.driver.client.cmd_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, test_volume, test_snap) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) @mock.patch.object(socket, 'gethostname', return_value='controller') @mock.patch.object(volume_utils, 'brick_get_connector', return_value=DummyBrickGetConnector()) @mock.patch.object(volume_utils, 'copy_volume', return_value=None) @mock.patch.object(os.path, 'realpath', return_value=None) def test_create_cloned_volume_fail(self, mock_volume_types, mock_qos, mock_hostname, mock_brick_get_connector, mock_copy_volume, mock_os_path): self.driver.client.cmd_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, test_volume, test_volume) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_initialize_connection_fail(self, mock_volume_type, mock_qos): self.driver.client.cmd_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, test_volume, test_connector) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_terminate_connection_fail(self, mock_volume_type, mock_qos): self.driver.client.cmd_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, test_volume, test_connector) def test_get_raid_list_fail(self): self.assertRaises(exception.VolumeBackendAPIException, self.driver.get_raid_list, 2000) class MacroSANFCDriverTestCase(test.TestCase): def setUp(self): super(MacroSANFCDriverTestCase, self).setUp() self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.san_ip = \ "172.192.251.1, 172.192.251.2" self.configuration.san_login = "openstack" self.configuration.san_password = "passwd" self.configuration.macrosan_sdas_ipaddrs = None self.configuration.macrosan_replication_ipaddrs = None self.configuration.san_thin_provision = False self.configuration.macrosan_pool = 'Pool-1' self.configuration.macrosan_thin_lun_extent_size = 8 self.configuration.macrosan_thin_lun_low_watermark = 8 self.configuration.macrosan_thin_lun_high_watermark = 40 self.configuration.macrosan_force_unmap_itl = False self.configuration.macrosan_snapshot_resource_ratio = 0.3 self.configuration.macrosan_log_timing = True self.configuration.macrosan_host_name = 'devstack' self.configuration.macrosan_fc_use_sp_port_nr = 1 self.configuration.macrosan_fc_keep_mapped_ports = True self.configuration.macrosan_host_name = 'devstack' self.configuration.macrosan_client = \ ['devstack; device1; "eth-1:0:0"; "eth-2:0:0"'] self.configuration.macrosan_client_default = \ "eth-1:0:0;eth-2:0:0" self.driver = FakeMacroSANFCDriver(configuration=self.configuration) self.driver.do_setup() def test_get_initr_port_map_tgtnotexist(self): self.driver.client.tgt_notexist = True ret = self.driver._get_initr_port_map(self.driver.client, test_connector['wwpns']) self.assertEqual(expected_initr_port_map_tgtnotexist, ret) def test_get_initr_port_map_tgtexist(self): ret = self.driver._get_initr_port_map(self.driver.client, test_connector['wwpns']) self.assertEqual(expected_initr_port_map_tgtexist, ret) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_initialize_connection(self, mock_volume_types, mock_qos): ret = self.driver.initialize_connection(test_volume, test_connector) self.assertEqual(expected_fctgtexist_properties, ret['data']) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_terminate_connection(self, mock_volume_types, mock_qos): ret = self.driver.terminate_connection(test_volume, test_connector) self.assertEqual({'driver_volume_type': 'fibre_channel', 'data': {}}, ret) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) @mock.patch.object(socket, 'gethostname', return_value='controller') @mock.patch.object(volume_utils, 'brick_get_connector', return_value=DummyBrickGetConnector()) @mock.patch.object(volume_utils, 'copy_volume', return_value=None) @mock.patch.object(os.path, 'realpath', return_value=None) def test_create_volume_from_snapshot(self, mock_volume_types, mock_qos, mock_hostname, mock_brick_get_connector, mock_copy_volume, mock_os_path): ret = self.driver.create_volume_from_snapshot(test_volume, test_snap) actual = ret['provider_location'] self.assertEqual(test_volume['provider_location'], actual) @mock.patch.object(volume_types, 'get_volume_type', return_value={ 'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) @mock.patch.object(socket, 'gethostname', return_value='controller') @mock.patch.object(volume_utils, 'brick_get_connector', return_value=DummyBrickGetConnector()) @mock.patch.object(volume_utils, 'copy_volume', return_value=None) @mock.patch.object(os.path, 'realpath', return_value=None) def test_create_cloned_volume(self, mock_volume_types, mock_qos, mock_hostname, mock_brick_get_connector, mock_copy_volume, mock_os_path): self.driver.client.snappoid = True ret = self.driver.create_cloned_volume(test_volume, test_volume) actual = ret['provider_location'] self.assertEqual(test_volume['provider_location'], actual) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) @mock.patch.object(socket, 'gethostname', return_value='controller') @mock.patch.object(volume_utils, 'brick_get_connector', return_value=DummyBrickGetConnector()) @mock.patch.object(volume_utils, 'copy_volume', return_value=None) @mock.patch.object(os.path, 'realpath', return_value=None) def test_create_volume_from_snapshot_fail(self, mock_volume_types, mock_qos, mock_hostname, mock_brick_get_connector, mock_copy_volume, mock_os_path): self.driver.client.cmd_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_volume_from_snapshot, test_volume, test_snap) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) @mock.patch.object(socket, 'gethostname', return_value='controller') @mock.patch.object(volume_utils, 'brick_get_connector', return_value=DummyBrickGetConnector()) @mock.patch.object(volume_utils, 'copy_volume', return_value=None) @mock.patch.object(os.path, 'realpath', return_value=None) def test_create_cloned_volume_fail(self, mock_volume_types, mock_qos, mock_hostname, mock_brick_get_connector, mock_copy_volume, mock_os_path): self.driver.client.cmd_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_cloned_volume, test_volume, test_volume) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_initialize_connection_fail(self, mock_volume_types, mock_qos): self.driver.client.cmd_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, test_volume, test_connector) @mock.patch.object(volume_types, 'get_volume_type', return_value={'qos_specs_id': '99f3d240-1b20-4b7b-9321-c6b8b86243ff', 'extra_specs': {}}) @mock.patch.object(qos_specs, 'get_qos_specs', return_value={'specs': {'qos-strategy': 'QoS-1'}}) def test_terminate_connection_fail(self, mock_volume_types, mock_qos): self.driver.client.cmd_fail = True self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, test_volume, test_connector) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_nfs.py0000664000175000017500000022307500000000000023651 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NetApp, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the NFS driver module.""" import errno import os from unittest import mock import castellan import ddt from oslo_utils import imageutils from oslo_utils import units from cinder import context from cinder import exception from cinder.image import image_utils from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.keymgr import fake as fake_keymgr from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers import nfs from cinder.volume.drivers import remotefs from cinder.volume import volume_utils class KeyObject(object): def get_encoded(arg): return "asdf".encode('utf-8') class RemoteFsDriverTestCase(test.TestCase): TEST_FILE_NAME = 'test.txt' TEST_EXPORT = 'nas-host1:/export' TEST_MNT_POINT = '/mnt/nas' def setUp(self): super(RemoteFsDriverTestCase, self).setUp() self._driver = remotefs.RemoteFSDriver() self.configuration = conf.Configuration(None) self.configuration.append_config_values(nfs.nfs_opts) self.configuration.append_config_values(remotefs.nas_opts) self.override_config('nas_secure_file_permissions', 'false') self.override_config('nas_secure_file_operations', 'false') self.override_config('nfs_snapshot_support', True) self.override_config('max_over_subscription_ratio', 1.0) self.override_config('reserved_percentage', 5) self._driver = remotefs.RemoteFSDriver( configuration=self.configuration) mock_exc = mock.patch.object(self._driver, '_execute') self._execute = mock_exc.start() self.addCleanup(mock_exc.stop) def test_create_sparsed_file(self): self._driver._create_sparsed_file('/path', 1) self._execute.assert_called_once_with('truncate', '-s', '1G', '/path', run_as_root=True) def test_create_regular_file(self): self._driver._create_regular_file('/path', 1) self._execute.assert_called_once_with('dd', 'if=/dev/zero', 'of=/path', 'bs=1M', 'count=1024', run_as_root=True) def test_create_qcow2_file(self): file_size = 1 self._driver._create_qcow2_file('/path', file_size) self._execute.assert_called_once_with('qemu-img', 'create', '-f', 'qcow2', '-o', 'preallocation=metadata', '/path', '%s' % str(file_size * units.Gi), run_as_root=True) def test_set_rw_permissions_for_all(self): self._driver._set_rw_permissions_for_all('/path') self._execute.assert_called_once_with('chmod', 'ugo+rw', '/path', run_as_root=True) @mock.patch.object(remotefs, 'LOG') def test_set_rw_permissions_with_secure_file_permissions(self, LOG): self._driver._mounted_shares = [self.TEST_EXPORT] self.override_config('nas_secure_file_permissions', 'true') self._driver._set_rw_permissions(self.TEST_FILE_NAME) self.assertFalse(LOG.warning.called) @mock.patch.object(remotefs, 'LOG') def test_set_rw_permissions_without_secure_file_permissions(self, LOG): self.override_config('nas_secure_file_permissions', 'false') self._driver._set_rw_permissions(self.TEST_FILE_NAME) self.assertTrue(LOG.warning.called) warn_msg = "%(path)s is being set with open permissions: %(perm)s" LOG.warning.assert_called_once_with( warn_msg, {'path': self.TEST_FILE_NAME, 'perm': 'ugo+rw'}) @mock.patch('os.path.join') @mock.patch('os.path.isfile', return_value=False) def test_determine_nas_security_options_when_auto_and_new_install( self, mock_isfile, mock_join): """Test the setting of the NAS Security Option In this test case, we will create the marker file. No pre-exxisting Cinder volumes found during bootup. """ self._driver._mounted_shares = [self.TEST_EXPORT] file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT is_new_install = True self._driver._ensure_shares_mounted = mock.Mock() nas_mount = self._driver._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) mock_join.return_value = file_path secure_file_permissions = 'auto' nas_option = self._driver._determine_nas_security_option_setting( secure_file_permissions, nas_mount, is_new_install) self.assertEqual('true', nas_option) secure_file_operations = 'auto' nas_option = self._driver._determine_nas_security_option_setting( secure_file_operations, nas_mount, is_new_install) self.assertEqual('true', nas_option) @mock.patch('os.path.join') @mock.patch('os.path.isfile') def test_determine_nas_security_options_when_auto_and_new_install_exists( self, isfile, join): """Test the setting of the NAS Security Option In this test case, the marker file already exists. Cinder volumes found during bootup. """ drv = self._driver drv._mounted_shares = [self.TEST_EXPORT] file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT is_new_install = False drv._ensure_shares_mounted = mock.Mock() nas_mount = drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) join.return_value = file_path isfile.return_value = True secure_file_permissions = 'auto' nas_option = drv._determine_nas_security_option_setting( secure_file_permissions, nas_mount, is_new_install) self.assertEqual('true', nas_option) secure_file_operations = 'auto' nas_option = drv._determine_nas_security_option_setting( secure_file_operations, nas_mount, is_new_install) self.assertEqual('true', nas_option) @mock.patch('os.path.join') @mock.patch('os.path.isfile') def test_determine_nas_security_options_when_auto_and_old_install(self, isfile, join): """Test the setting of the NAS Security Option In this test case, the marker file does not exist. There are also pre-existing Cinder volumes. """ drv = self._driver drv._mounted_shares = [self.TEST_EXPORT] file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT is_new_install = False drv._ensure_shares_mounted = mock.Mock() nas_mount = drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) join.return_value = file_path isfile.return_value = False secure_file_permissions = 'auto' nas_option = drv._determine_nas_security_option_setting( secure_file_permissions, nas_mount, is_new_install) self.assertEqual('false', nas_option) secure_file_operations = 'auto' nas_option = drv._determine_nas_security_option_setting( secure_file_operations, nas_mount, is_new_install) self.assertEqual('false', nas_option) def test_determine_nas_security_options_when_admin_set_true(self): """Test the setting of the NAS Security Option In this test case, the Admin set the flag to 'true'. """ drv = self._driver drv._mounted_shares = [self.TEST_EXPORT] is_new_install = False drv._ensure_shares_mounted = mock.Mock() nas_mount = drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) secure_file_permissions = 'true' nas_option = drv._determine_nas_security_option_setting( secure_file_permissions, nas_mount, is_new_install) self.assertEqual('true', nas_option) secure_file_operations = 'true' nas_option = drv._determine_nas_security_option_setting( secure_file_operations, nas_mount, is_new_install) self.assertEqual('true', nas_option) def test_determine_nas_security_options_when_admin_set_false(self): """Test the setting of the NAS Security Option In this test case, the Admin set the flag to 'false'. """ drv = self._driver drv._mounted_shares = [self.TEST_EXPORT] is_new_install = False drv._ensure_shares_mounted = mock.Mock() nas_mount = drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) secure_file_permissions = 'false' nas_option = drv._determine_nas_security_option_setting( secure_file_permissions, nas_mount, is_new_install) self.assertEqual('false', nas_option) secure_file_operations = 'false' nas_option = drv._determine_nas_security_option_setting( secure_file_operations, nas_mount, is_new_install) self.assertEqual('false', nas_option) @mock.patch.object(remotefs, 'LOG') def test_set_nas_security_options(self, LOG): """Test setting of NAS Security options. The RemoteFS driver will force set options to false. The derived objects will provide an inherited interface to properly set options. """ drv = self._driver is_new_install = False drv.set_nas_security_options(is_new_install) self.assertEqual('false', drv.configuration.nas_secure_file_operations) self.assertEqual('false', drv.configuration.nas_secure_file_permissions) self.assertTrue(LOG.warning.called) def test_secure_file_operations_enabled_true(self): """Test nas_secure_file_operations = 'true' Networked file system based drivers may support secure file operations. This test verifies the settings when secure. """ drv = self._driver self.override_config('nas_secure_file_operations', 'true') ret_flag = drv.secure_file_operations_enabled() self.assertTrue(ret_flag) def test_secure_file_operations_enabled_false(self): """Test nas_secure_file_operations = 'false' Networked file system based drivers may support secure file operations. This test verifies the settings when not secure. """ drv = self._driver self.override_config('nas_secure_file_operations', 'false') ret_flag = drv.secure_file_operations_enabled() self.assertFalse(ret_flag) # NFS configuration scenarios NFS_CONFIG1 = {'max_over_subscription_ratio': 1.0, 'reserved_percentage': 0, 'nfs_sparsed_volumes': True, 'nfs_qcow2_volumes': False, 'nas_secure_file_permissions': 'false', 'nas_secure_file_operations': 'false'} NFS_CONFIG2 = {'max_over_subscription_ratio': 10.0, 'reserved_percentage': 5, 'nfs_sparsed_volumes': False, 'nfs_qcow2_volumes': True, 'nas_secure_file_permissions': 'true', 'nas_secure_file_operations': 'true'} NFS_CONFIG3 = {'max_over_subscription_ratio': 15.0, 'reserved_percentage': 10, 'nfs_sparsed_volumes': False, 'nfs_qcow2_volumes': False, 'nas_secure_file_permissions': 'auto', 'nas_secure_file_operations': 'auto'} NFS_CONFIG4 = {'max_over_subscription_ratio': 20.0, 'reserved_percentage': 60, 'nfs_sparsed_volumes': True, 'nfs_qcow2_volumes': True, 'nas_secure_file_permissions': 'false', 'nas_secure_file_operations': 'true'} QEMU_IMG_INFO_OUT1 = """{ "filename": "%(volid)s", "format": "raw", "virtual-size": %(size_b)s, "actual-size": 173000 }""" QEMU_IMG_INFO_OUT2 = """{ "filename": "%(volid)s", "format": "qcow2", "virtual-size": %(size_b)s, "actual-size": 196000, "cluster-size": 65536, "format-specific": { "compat": "1.1", "lazy-refcounts": false, "refcount-bits": 16, "corrupt": false } }""" QEMU_IMG_INFO_OUT3 = """{ "filename": "volume-%(volid)s.%(snapid)s", "format": "qcow2", "virtual-size": %(size_b)s, "actual-size": 196000, "cluster-size": 65536, "backing-filename": "volume-%(volid)s", "backing-filename-format": "qcow2", "format-specific": { "compat": "1.1", "lazy-refcounts": false, "refcount-bits": 16, "corrupt": false } }""" QEMU_IMG_INFO_OUT4 = """{ "filename": "volume-%(volid)s.%(snapid)s", "format": "raw", "virtual-size": %(size_b)s, "actual-size": 196000, "cluster-size": 65536, "backing-filename": "volume-%(volid)s", "backing-filename-format": "raw" }""" QEMU_IMG_INFO_OUT5 = """{ "filename": "volume-%(volid)s.%(snapid)s", "format": "qcow2", "virtual-size": %(size_b)s, "actual-size": 196000, "encrypted": true, "cluster-size": 65536, "backing-filename": "volume-%(volid)s", "backing-filename-format": "raw", "format-specific": { "type": "luks", "data": { "ivgen-alg": "plain64", "hash-alg": "sha256", "cipher-alg": "aes-256", "uuid": "386f8626-33f0-4683-a517-78ddfe385e33", "cipher-mode": "xts", "slots": [ { "active": true, "iters": 1892498, "key offset": 4096, "stripes": 4000 }, { "active": false, "key offset": 262144 }, { "active": false, "key offset": 520192 }, { "active": false, "key offset": 778240 }, { "active": false, "key offset": 1036288 }, { "active": false, "key offset": 1294336 }, { "active": false, "key offset": 1552384 }, { "active": false, "key offset": 1810432 } ], "payload-offset": 2068480, "master-key-iters": 459347 }, "corrupt": false } }""" @ddt.ddt class NfsDriverTestCase(test.TestCase): """Test case for NFS driver.""" TEST_NFS_HOST = 'nfs-host1' TEST_NFS_SHARE_PATH = '/export' TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH) TEST_NFS_EXPORT2 = 'nfs-host2:/export' TEST_NFS_EXPORT2_OPTIONS = '-o intr' TEST_SIZE_IN_GB = 1 TEST_MNT_POINT = '/mnt/nfs' TEST_MNT_POINT_BASE_EXTRA_SLASH = '/opt/stack/data/cinder//mnt' TEST_MNT_POINT_BASE = '/mnt/test' TEST_LOCAL_PATH = '/mnt/nfs/volume-123' TEST_FILE_NAME = 'test.txt' TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' TEST_NFS_EXPORT_SPACES = 'nfs-host3:/export this' TEST_MNT_POINT_SPACES = '/ 0 0 0 /foo' def setUp(self): super(NfsDriverTestCase, self).setUp() self.configuration = conf.Configuration(None) self.configuration.append_config_values(nfs.nfs_opts) self.configuration.append_config_values(remotefs.nas_opts) self.override_config('max_over_subscription_ratio', 1.0) self.override_config('reserved_percentage', 5) self.override_config('nfs_shares_config', None) self.override_config('nfs_sparsed_volumes', True) self.override_config('reserved_percentage', 5.0) self.override_config('nfs_mount_point_base', self.TEST_MNT_POINT_BASE) self.override_config('nfs_mount_options', None) self.override_config('nfs_mount_attempts', 3) self.override_config('nfs_qcow2_volumes', False) self.override_config('nas_secure_file_permissions', 'false') self.override_config('nas_secure_file_operations', 'false') self.override_config('nas_host', None) self.override_config('nas_share_path', None) self.override_config('nas_mount_options', None) self.override_config('volume_dd_blocksize', '1M') self.mock_object(volume_utils, 'get_max_over_subscription_ratio', return_value=1) self.context = context.get_admin_context() def _set_driver(self, extra_confs=None): # Overide the default configs if extra_confs: for config_name, config_value in extra_confs.items(): setattr(self.configuration, config_name, config_value) self._driver = nfs.NfsDriver(configuration=self.configuration) self._driver.shares = {} self.mock_object(self._driver, '_execute') @ddt.data(NFS_CONFIG1, NFS_CONFIG2, NFS_CONFIG3, NFS_CONFIG4) def test_local_path(self, nfs_config): """local_path common use case.""" self.override_config('nfs_mount_point_base', self.TEST_MNT_POINT_BASE) self._set_driver(extra_confs=nfs_config) drv = self._driver volume = fake_volume.fake_volume_obj( self.context, provider_location=self.TEST_NFS_EXPORT1) self.assertEqual( '/mnt/test/2f4f60214cf43c595666dd815f0360a4/%s' % volume.name, drv.local_path(volume)) @ddt.data(NFS_CONFIG1, NFS_CONFIG2, NFS_CONFIG3, NFS_CONFIG4) def test_copy_image_to_volume(self, nfs_config): """resize_image common case usage.""" mock_resize = self.mock_object(image_utils, 'resize_image') mock_fetch = self.mock_object(image_utils, 'fetch_to_raw') self._set_driver() drv = self._driver volume = fake_volume.fake_volume_obj(self.context, size=self.TEST_SIZE_IN_GB) test_img_source = 'volume-%s' % volume.id self.mock_object(drv, 'local_path', return_value=test_img_source) data = mock.Mock() data.virtual_size = 1 * units.Gi self.mock_object(image_utils, 'qemu_img_info', return_value=data) drv.copy_image_to_volume(None, volume, None, None) mock_fetch.assert_called_once_with( None, None, None, test_img_source, mock.ANY, run_as_root=True, size=self.TEST_SIZE_IN_GB, disable_sparse=False) mock_resize.assert_called_once_with(test_img_source, self.TEST_SIZE_IN_GB, run_as_root=True) def test_get_mount_point_for_share(self): """_get_mount_point_for_share should calculate correct value.""" self._set_driver() drv = self._driver self.override_config('nfs_mount_point_base', self.TEST_MNT_POINT_BASE) self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4', drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1)) def test_get_mount_point_for_share_given_extra_slash_in_state_path(self): """_get_mount_point_for_share should calculate correct value.""" # This test gets called with the extra slash self.override_config('nfs_mount_point_base', self.TEST_MNT_POINT_BASE_EXTRA_SLASH) # The driver gets called with the correct configuration and removes # the extra slash drv = nfs.NfsDriver(configuration=self.configuration) self.assertEqual('/opt/stack/data/cinder/mnt', drv.base) self.assertEqual( '/opt/stack/data/cinder/mnt/2f4f60214cf43c595666dd815f0360a4', drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1)) def test_get_capacity_info(self): """_get_capacity_info should calculate correct value.""" self._set_driver() drv = self._driver stat_total_size = 2620544 stat_avail = 2129984 stat_output = '1 %d %d' % (stat_total_size, stat_avail) du_used = 490560 du_output = '%d /mnt' % du_used with mock.patch.object( drv, '_get_mount_point_for_share') as mock_get_mount: mock_get_mount.return_value = self.TEST_MNT_POINT drv._execute.side_effect = [(stat_output, None), (du_output, None)] self.assertEqual((stat_total_size, stat_avail, du_used), drv._get_capacity_info(self.TEST_NFS_EXPORT1)) mock_get_mount.assert_called_once_with(self.TEST_NFS_EXPORT1) calls = [mock.call('stat', '-f', '-c', '%S %b %a', self.TEST_MNT_POINT, run_as_root=True), mock.call('du', '-sb', '--apparent-size', '--exclude', '*snapshot*', self.TEST_MNT_POINT, run_as_root=True)] drv._execute.assert_has_calls(calls) def test_get_capacity_info_for_share_and_mount_point_with_spaces(self): """_get_capacity_info should calculate correct value.""" self._set_driver() drv = self._driver stat_total_size = 2620544 stat_avail = 2129984 stat_output = '1 %d %d' % (stat_total_size, stat_avail) du_used = 490560 du_output = '%d /mnt' % du_used with mock.patch.object( drv, '_get_mount_point_for_share') as mock_get_mount: mock_get_mount.return_value = self.TEST_MNT_POINT_SPACES drv._execute.side_effect = [(stat_output, None), (du_output, None)] self.assertEqual((stat_total_size, stat_avail, du_used), drv._get_capacity_info( self.TEST_NFS_EXPORT_SPACES)) mock_get_mount.assert_called_once_with( self.TEST_NFS_EXPORT_SPACES) calls = [mock.call('stat', '-f', '-c', '%S %b %a', self.TEST_MNT_POINT_SPACES, run_as_root=True), mock.call('du', '-sb', '--apparent-size', '--exclude', '*snapshot*', self.TEST_MNT_POINT_SPACES, run_as_root=True)] drv._execute.assert_has_calls(calls) def test_load_shares_config(self): self._set_driver() drv = self._driver drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE with mock.patch.object( drv, '_read_config_file') as mock_read_config: config_data = [] config_data.append(self.TEST_NFS_EXPORT1) config_data.append('#' + self.TEST_NFS_EXPORT2) config_data.append('') config_data.append(self.TEST_NFS_EXPORT2 + ' ' + self.TEST_NFS_EXPORT2_OPTIONS) config_data.append('broken:share_format') mock_read_config.return_value = config_data drv._load_shares_config(drv.configuration.nfs_shares_config) mock_read_config.assert_called_once_with( self.TEST_SHARES_CONFIG_FILE) self.assertIn(self.TEST_NFS_EXPORT1, drv.shares) self.assertIn(self.TEST_NFS_EXPORT2, drv.shares) self.assertEqual(2, len(drv.shares)) self.assertEqual(self.TEST_NFS_EXPORT2_OPTIONS, drv.shares[self.TEST_NFS_EXPORT2]) def test_load_shares_config_nas_opts(self): self._set_driver() drv = self._driver drv.configuration.nas_host = self.TEST_NFS_HOST drv.configuration.nas_share_path = self.TEST_NFS_SHARE_PATH drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE drv._load_shares_config(drv.configuration.nfs_shares_config) self.assertIn(self.TEST_NFS_EXPORT1, drv.shares) self.assertEqual(1, len(drv.shares)) def test_ensure_shares_mounted_should_save_mounting_successfully(self): """_ensure_shares_mounted should save share if mounted with success.""" self._set_driver() drv = self._driver config_data = [] config_data.append(self.TEST_NFS_EXPORT1) drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE with mock.patch.object( drv, '_read_config_file') as mock_read_config: with mock.patch.object( drv, '_ensure_share_mounted') as mock_ensure: mock_read_config.return_value = config_data drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) mock_ensure.assert_called_once_with(self.TEST_NFS_EXPORT1) @mock.patch.object(remotefs, 'LOG') def test_ensure_shares_mounted_should_not_save_mounting_with_error(self, LOG): """_ensure_shares_mounted should not save share if failed to mount.""" self._set_driver() drv = self._driver config_data = [] config_data.append(self.TEST_NFS_EXPORT1) drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE with mock.patch.object( drv, '_read_config_file') as mock_read_config: with mock.patch.object( drv, '_ensure_share_mounted') as mock_ensure: mock_read_config.return_value = config_data drv._ensure_share_mounted() self.assertEqual(0, len(drv._mounted_shares)) mock_ensure.assert_called_once_with() def test_find_share_should_throw_error_if_there_is_no_mounted_share(self): """_find_share should throw error if there is no mounted shares.""" self._set_driver() drv = self._driver drv._mounted_shares = [] self.assertRaises(exception.NfsNoSharesMounted, drv._find_share, self._simple_volume()) def test_find_share(self): """_find_share simple use case.""" self._set_driver() drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] volume = fake_volume.fake_volume_obj(self.context, size=self.TEST_SIZE_IN_GB) with mock.patch.object( drv, '_get_capacity_info') as mock_get_capacity_info: mock_get_capacity_info.side_effect = [ (5 * units.Gi, 2 * units.Gi, 2 * units.Gi), (10 * units.Gi, 3 * units.Gi, 1 * units.Gi)] self.assertEqual(self.TEST_NFS_EXPORT2, drv._find_share(volume)) calls = [mock.call(self.TEST_NFS_EXPORT1), mock.call(self.TEST_NFS_EXPORT2)] mock_get_capacity_info.assert_has_calls(calls) self.assertEqual(2, mock_get_capacity_info.call_count) def test_find_share_should_throw_error_if_there_is_not_enough_space(self): """_find_share should throw error if there is no share to host vol.""" self._set_driver() drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] with mock.patch.object( drv, '_get_capacity_info') as mock_get_capacity_info: mock_get_capacity_info.side_effect = [ (5 * units.Gi, 0, 5 * units.Gi), (10 * units.Gi, 0, 10 * units.Gi)] self.assertRaises(exception.NfsNoSuitableShareFound, drv._find_share, self._simple_volume()) calls = [mock.call(self.TEST_NFS_EXPORT1), mock.call(self.TEST_NFS_EXPORT2)] mock_get_capacity_info.assert_has_calls(calls) self.assertEqual(2, mock_get_capacity_info.call_count) def _simple_volume(self, size=10): loc = self.TEST_NFS_EXPORT1 return fake_volume.fake_volume_obj(self.context, display_name='volume_name', provider_location=loc, size=size) def _simple_encrypted_volume(self, size=10): loc = self.TEST_NFS_EXPORT1 info_dic = {'name': u'volume-0000000a', 'id': '55555555-222f-4b32-b585-9991b3bf0a99', 'size': size, 'encryption_key_id': fake.ENCRYPTION_KEY_ID} return fake_volume.fake_volume_obj(self.context, provider_location=loc, **info_dic) def test_get_provisioned_capacity(self): self._set_driver() drv = self._driver mock_execute = self.mock_object(drv, '_execute') mock_execute.return_value = ("148418423\t/dir", "") with mock.patch.object(drv, 'shares') as shares: shares.keys.return_value = {'192.0.2.1:/srv/nfs1'} shares.return_value = {'192.0.2.1:/srv/nfs1', ''} ret = drv._get_provisioned_capacity() self.assertEqual(ret, 0.14) @mock.patch('cinder.objects.volume.Volume.save') def test_create_sparsed_volume(self, mock_save): self._set_driver() drv = self._driver volume = self._simple_volume() self.override_config('nfs_sparsed_volumes', True) with mock.patch.object( drv, '_create_sparsed_file') as mock_create_sparsed_file: with mock.patch.object( drv, '_set_rw_permissions') as mock_set_rw_permissions: drv._do_create_volume(volume) mock_create_sparsed_file.assert_called_once_with(mock.ANY, mock.ANY) mock_set_rw_permissions.assert_called_once_with(mock.ANY) @mock.patch('cinder.objects.volume.Volume.save') def test_create_nonsparsed_volume(self, mock_save): self._set_driver() drv = self._driver self.override_config('nfs_sparsed_volumes', False) volume = self._simple_volume() with mock.patch.object( drv, '_create_regular_file') as mock_create_regular_file: with mock.patch.object( drv, '_set_rw_permissions') as mock_set_rw_permissions: drv._do_create_volume(volume) mock_create_regular_file.assert_called_once_with(mock.ANY, mock.ANY) mock_set_rw_permissions.assert_called_once_with(mock.ANY) @mock.patch.object(nfs, 'LOG') def test_create_volume_should_ensure_nfs_mounted(self, mock_log): """create_volume ensures shares provided in config are mounted.""" self._set_driver() drv = self._driver drv._find_share = mock.Mock() drv._find_share.return_value = self.TEST_NFS_EXPORT1 drv._do_create_volume = mock.Mock() with mock.patch.object( drv, '_ensure_share_mounted') as mock_ensure_share: drv._ensure_share_mounted() volume = fake_volume.fake_volume_obj(self.context, size=self.TEST_SIZE_IN_GB) drv.create_volume(volume) mock_ensure_share.assert_called_once_with() @mock.patch.object(nfs, 'LOG') def test_create_volume_should_return_provider_location(self, mock_log): """create_volume should return provider_location with found share.""" self._set_driver() drv = self._driver drv._ensure_shares_mounted = mock.Mock() drv._do_create_volume = mock.Mock() with mock.patch.object(drv, '_find_share') as mock_find_share: mock_find_share.return_value = self.TEST_NFS_EXPORT1 volume = fake_volume.fake_volume_obj(self.context, size=self.TEST_SIZE_IN_GB) result = drv.create_volume(volume) self.assertEqual(self.TEST_NFS_EXPORT1, result['provider_location']) mock_find_share.assert_called_once_with(volume) def test_delete_volume(self): """delete_volume simple test case.""" self._set_driver() drv = self._driver drv._ensure_share_mounted = mock.Mock() volume = fake_volume.fake_volume_obj( self.context, display_name='volume-123', provider_location=self.TEST_NFS_EXPORT1) with mock.patch.object(drv, 'local_path') as mock_local_path: mock_local_path.return_value = self.TEST_LOCAL_PATH drv.delete_volume(volume) mock_local_path.assert_called_with(volume) drv._execute.assert_called_once() def test_delete_should_ensure_share_mounted(self): """delete_volume should ensure that corresponding share is mounted.""" self._set_driver() drv = self._driver volume = fake_volume.fake_volume_obj( self.context, display_name='volume-123', provider_location=self.TEST_NFS_EXPORT1) with mock.patch.object(drv, '_ensure_share_mounted'): drv.delete_volume(volume) def test_delete_should_not_delete_if_provider_location_not_provided(self): """delete_volume shouldn't delete if provider_location missed.""" self._set_driver() drv = self._driver volume = fake_volume.fake_volume_obj(self.context, name='volume-123', provider_location=None) with mock.patch.object(drv, '_ensure_share_mounted'): drv.delete_volume(volume) self.assertFalse(drv._execute.called) def test_get_volume_stats(self): """get_volume_stats must fill the correct values.""" self._set_driver() drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] with mock.patch.object( drv, '_ensure_shares_mounted') as mock_ensure_share: with mock.patch.object( drv, '_get_capacity_info') as mock_get_capacity_info: mock_get_capacity_info.side_effect = [ (10 * units.Gi, 2 * units.Gi, 2 * units.Gi), (20 * units.Gi, 3 * units.Gi, 3 * units.Gi)] drv._ensure_shares_mounted() drv.get_volume_stats() calls = [mock.call(self.TEST_NFS_EXPORT1), mock.call(self.TEST_NFS_EXPORT2)] mock_get_capacity_info.assert_has_calls(calls) self.assertTrue(mock_ensure_share.called) self.assertEqual(30.0, drv._stats['total_capacity_gb']) self.assertEqual(5.0, drv._stats['free_capacity_gb']) self.assertEqual(5, drv._stats['reserved_percentage']) self.assertTrue(drv._stats['sparse_copy_volume']) def test_get_volume_stats_with_non_zero_reserved_percentage(self): """get_volume_stats must fill the correct values.""" self.override_config('reserved_percentage', 10.0) drv = nfs.NfsDriver(configuration=self.configuration) drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2] with mock.patch.object( drv, '_ensure_shares_mounted') as mock_ensure_share: with mock.patch.object( drv, '_get_capacity_info') as mock_get_capacity_info: mock_get_capacity_info.side_effect = [ (10 * units.Gi, 2 * units.Gi, 2 * units.Gi), (20 * units.Gi, 3 * units.Gi, 3 * units.Gi)] drv._ensure_shares_mounted() drv.get_volume_stats() calls = [mock.call(self.TEST_NFS_EXPORT1), mock.call(self.TEST_NFS_EXPORT2)] mock_get_capacity_info.assert_has_calls(calls) self.assertTrue(mock_ensure_share.called) self.assertEqual(30.0, drv._stats['total_capacity_gb']) self.assertEqual(5.0, drv._stats['free_capacity_gb']) self.assertEqual(10.0, drv._stats['reserved_percentage']) @ddt.data(True, False) def test_update_volume_stats(self, thin): self._set_driver() self._driver.configuration.max_over_subscription_ratio = 20.0 self._driver.configuration.reserved_percentage = 5.0 self._driver.configuration.nfs_sparsed_volumes = thin remotefs_volume_stats = { 'volume_backend_name': 'fake_backend_name', 'vendor_name': 'fake_vendor', 'driver_version': 'fake_version', 'storage_protocol': 'NFS', 'total_capacity_gb': 100.0, 'free_capacity_gb': 20.0, 'reserved_percentage': 5.0, 'QoS_support': False, } self.mock_object(remotefs.RemoteFSDriver, '_update_volume_stats') self._driver._stats = remotefs_volume_stats mock_get_provisioned_capacity = self.mock_object( self._driver, '_get_provisioned_capacity', return_value=25.0) self._driver._update_volume_stats() nfs_added_volume_stats = { 'provisioned_capacity_gb': 25.0 if thin else 80.0, 'max_over_subscription_ratio': 20.0, 'reserved_percentage': 5.0, 'thin_provisioning_support': thin, 'thick_provisioning_support': not thin, } expected = remotefs_volume_stats expected.update(nfs_added_volume_stats) self.assertEqual(expected, self._driver._stats) self.assertEqual(thin, mock_get_provisioned_capacity.called) def _check_is_share_eligible(self, total_size, total_available, total_allocated, requested_volume_size): self._set_driver() with mock.patch.object(self._driver, '_get_capacity_info')\ as mock_get_capacity_info: mock_get_capacity_info.return_value = (total_size, total_available, total_allocated) return self._driver._is_share_eligible('fake_share', requested_volume_size) def test_is_share_eligible(self): self._set_driver() total_size = 100.0 * units.Gi total_available = 90.0 * units.Gi total_allocated = 10.0 * units.Gi requested_volume_size = 1 # GiB self.assertTrue(self._check_is_share_eligible(total_size, total_available, total_allocated, requested_volume_size)) def test_share_eligibility_with_reserved_percentage(self): self._set_driver() total_size = 100.0 * units.Gi total_available = 4.0 * units.Gi total_allocated = 96.0 * units.Gi requested_volume_size = 1 # GiB # Check used > used_ratio statement entered self.assertFalse(self._check_is_share_eligible(total_size, total_available, total_allocated, requested_volume_size)) def test_is_share_eligible_above_oversub_ratio(self): self._set_driver() total_size = 100.0 * units.Gi total_available = 10.0 * units.Gi total_allocated = 90.0 * units.Gi requested_volume_size = 10 # GiB # Check apparent_available <= requested_volume_size statement entered self.assertFalse(self._check_is_share_eligible(total_size, total_available, total_allocated, requested_volume_size)) def test_is_share_eligible_reserved_space_above_oversub_ratio(self): self._set_driver() total_size = 100.0 * units.Gi total_available = 10.0 * units.Gi total_allocated = 100.0 * units.Gi requested_volume_size = 1 # GiB # Check total_allocated / total_size >= oversub_ratio # statement entered self.assertFalse(self._check_is_share_eligible(total_size, total_available, total_allocated, requested_volume_size)) @ddt.data(None, 'raw', 'qcow2') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_extend_volume(self, file_format, mock_get): """Extend a volume by 1.""" self._set_driver() drv = self._driver volume = fake_volume.fake_volume_obj( self.context, id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', size=1, provider_location='nfs_share') if file_format: volume.admin_metadata = {'format': file_format} mock_get.return_value = volume local_vol_dir = 'dir' newSize = volume['size'] + 1 with mock.patch.object(image_utils, 'resize_image') as resize: with mock.patch.object(drv, '_local_volume_dir', return_value=local_vol_dir): with mock.patch.object(drv, '_is_share_eligible', return_value=True): with mock.patch.object(drv, '_is_file_size_equal', return_value=True): drv.extend_volume(volume, newSize) path = os.path.join(local_vol_dir, volume.name) resize.assert_called_once_with(path, newSize, run_as_root=True, file_format=file_format) def test_extend_volume_attached_fail(self): """Extend a volume by 1.""" self._set_driver() drv = self._driver volume = fake_volume.fake_volume_obj( self.context, id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', size=1, provider_location='nfs_share') path = 'path' newSize = volume['size'] + 1 with mock.patch.object(drv, 'local_path', return_value=path): with mock.patch.object(drv, '_is_share_eligible', return_value=True): with mock.patch.object(drv, '_is_file_size_equal', return_value=True): with mock.patch.object(drv, '_is_volume_attached', return_value=True): self.assertRaises(exception.ExtendVolumeError, drv.extend_volume, volume, newSize) @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_extend_volume_failure(self, mock_get): """Error during extend operation.""" self._set_driver() drv = self._driver volume = fake_volume.fake_volume_obj( self.context, id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', size=1, provider_location='nfs_share') volume.admin_metadata = {'format': 'qcow2'} mock_get.return_value = volume with mock.patch.object(image_utils, 'resize_image'): with mock.patch.object(drv, 'local_path', return_value='path'): with mock.patch.object(drv, '_is_share_eligible', return_value=True): with mock.patch.object(drv, '_is_file_size_equal', return_value=False): self.assertRaises(exception.ExtendVolumeError, drv.extend_volume, volume, 2) def test_extend_volume_insufficient_space(self): """Insufficient space on nfs_share during extend operation.""" self._set_driver() drv = self._driver volume = fake_volume.fake_volume_obj( self.context, id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10', size=1, provider_location='nfs_share') with mock.patch.object(image_utils, 'resize_image'): with mock.patch.object(drv, 'local_path', return_value='path'): with mock.patch.object(drv, '_is_share_eligible', return_value=False): with mock.patch.object(drv, '_is_file_size_equal', return_value=False): self.assertRaises(exception.ExtendVolumeError, drv.extend_volume, volume, 2) def test_is_file_size_equal(self): """File sizes are equal.""" self._set_driver() drv = self._driver path = 'fake/path' volume_name = 'volume1' size = 2 data = mock.MagicMock() data.virtual_size = size * units.Gi with mock.patch.object(drv, '_qemu_img_info', return_value=data) as mock_qemu_img_info: self.assertTrue(drv._is_file_size_equal(path, volume_name, size)) mock_qemu_img_info.assert_called_once_with(path, volume_name) def test_is_file_size_equal_false(self): """File sizes are not equal.""" self._set_driver() drv = self._driver path = 'fake/path' volume_name = 'volume1' size = 2 data = mock.MagicMock() data.virtual_size = (size + 1) * units.Gi with mock.patch.object(drv, '_qemu_img_info', return_value=data) as mock_qemu_img_info: self.assertFalse(drv._is_file_size_equal(path, volume_name, size)) mock_qemu_img_info.assert_called_once_with(path, volume_name) @mock.patch.object(nfs, 'LOG') def test_set_nas_security_options_when_true(self, LOG): """Test higher level setting of NAS Security options. The NFS driver overrides the base method with a driver specific version. """ self._set_driver() drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] is_new_install = True drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) drv._determine_nas_security_option_setting = mock.Mock( return_value='true') drv.set_nas_security_options(is_new_install) self.assertEqual('true', drv.configuration.nas_secure_file_operations) self.assertEqual('true', drv.configuration.nas_secure_file_permissions) self.assertFalse(LOG.warning.called) @mock.patch.object(nfs, 'LOG') def test_set_nas_security_options_when_false(self, LOG): """Test higher level setting of NAS Security options. The NFS driver overrides the base method with a driver specific version. """ self._set_driver() drv = self._driver drv._mounted_shares = [self.TEST_NFS_EXPORT1] is_new_install = False drv._ensure_shares_mounted = mock.Mock() drv._get_mount_point_for_share = mock.Mock( return_value=self.TEST_MNT_POINT) drv._determine_nas_security_option_setting = mock.Mock( return_value='false') drv.set_nas_security_options(is_new_install) self.assertEqual('false', drv.configuration.nas_secure_file_operations) self.assertEqual('false', drv.configuration.nas_secure_file_permissions) self.assertTrue(LOG.warning.called) def test_set_nas_security_options_exception_if_no_mounted_shares(self): """Ensure proper exception is raised if there are no mounted shares.""" self._set_driver() drv = self._driver drv._ensure_shares_mounted = mock.Mock() drv._mounted_shares = [] is_new_cinder_install = 'does not matter' self.assertRaises(exception.NfsNoSharesMounted, drv.set_nas_security_options, is_new_cinder_install) def test_ensure_share_mounted(self): """Case where the mount works the first time.""" self._set_driver() self.mock_object(self._driver._remotefsclient, 'mount', autospec=True) drv = self._driver drv.configuration.nfs_mount_attempts = 3 drv.shares = {self.TEST_NFS_EXPORT1: ''} drv._ensure_share_mounted(self.TEST_NFS_EXPORT1) drv._remotefsclient.mount.assert_called_once_with( self.TEST_NFS_EXPORT1, []) @mock.patch('time.sleep') def test_ensure_share_mounted_exception(self, _mock_sleep): """Make the configured number of attempts when mounts fail.""" num_attempts = 3 self._set_driver() self.mock_object(self._driver._remotefsclient, 'mount', side_effect=Exception) drv = self._driver drv.configuration.nfs_mount_attempts = num_attempts drv.shares = {self.TEST_NFS_EXPORT1: ''} self.assertRaises(exception.NfsException, drv._ensure_share_mounted, self.TEST_NFS_EXPORT1) self.assertEqual(num_attempts, drv._remotefsclient.mount.call_count) def test_ensure_share_mounted_at_least_one_attempt(self): """Make at least one mount attempt even if configured for less.""" min_num_attempts = 1 num_attempts = 0 self._set_driver() self.mock_object(self._driver._remotefsclient, 'mount', side_effect=Exception) drv = self._driver drv.configuration.nfs_mount_attempts = num_attempts drv.shares = {self.TEST_NFS_EXPORT1: ''} self.assertRaises(exception.NfsException, drv._ensure_share_mounted, self.TEST_NFS_EXPORT1) self.assertEqual(min_num_attempts, drv._remotefsclient.mount.call_count) @mock.patch('tempfile.NamedTemporaryFile') @ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT3, False], [NFS_CONFIG2, QEMU_IMG_INFO_OUT4, False], [NFS_CONFIG3, QEMU_IMG_INFO_OUT3, False], [NFS_CONFIG4, QEMU_IMG_INFO_OUT4, False], [NFS_CONFIG4, QEMU_IMG_INFO_OUT5, True]) @ddt.unpack def test_copy_volume_from_snapshot(self, nfs_conf, qemu_img_info, encryption, mock_temp_file): class DictObj(object): # convert a dict to object w/ attributes def __init__(self, d): self.__dict__ = d self._set_driver(extra_confs=nfs_conf) drv = self._driver src_encryption_key_id = None dest_encryption_key_id = None if encryption: mock_temp_file.return_value.__enter__.side_effect = [ DictObj({'name': '/tmp/imgfile'}), DictObj({'name': '/tmp/passfile'})] dest_volume = self._simple_encrypted_volume() src_volume = self._simple_encrypted_volume() key_mgr = fake_keymgr.fake_api() self.mock_object(castellan.key_manager, 'API', return_value=key_mgr) key_id = key_mgr.store(self.context, KeyObject()) src_volume.encryption_key_id = key_id dest_volume.encryption_key_id = key_id src_encryption_key_id = src_volume.encryption_key_id dest_encryption_key_id = dest_volume.encryption_key_id else: dest_volume = self._simple_volume() src_volume = self._simple_volume() # snapshot img_info fake_snap = fake_snapshot.fake_snapshot_obj(self.context) fake_snap.volume = src_volume img_out = qemu_img_info % {'volid': src_volume.id, 'snapid': fake_snap.id, 'size_gb': src_volume.size, 'size_b': src_volume.size * units.Gi} img_info = imageutils.QemuImgInfo(img_out, format='json') # backing file img_info img_out = QEMU_IMG_INFO_OUT1 % {'volid': src_volume.id, 'size_b': src_volume.size * units.Gi} bk_img_info = imageutils.QemuImgInfo(img_out, format='json') mock_img_info = self.mock_object(image_utils, 'qemu_img_info') mock_img_info.side_effect = [img_info, bk_img_info] mock_convert_image = self.mock_object(image_utils, 'convert_image') vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(src_volume.provider_location)) src_vol_path = os.path.join(vol_dir, img_info.backing_file) dest_vol_path = os.path.join(vol_dir, dest_volume.name) info_path = os.path.join(vol_dir, src_volume.name) + '.info' snap_file = dest_volume.name + '.' + fake_snap.id snap_path = os.path.join(vol_dir, snap_file) size = dest_volume.size mock_read_info_file = self.mock_object(drv, '_read_info_file') mock_read_info_file.return_value = {'active': snap_file, fake_snap.id: snap_file} mock_permission = self.mock_object(drv, '_set_rw_permissions_for_all') drv._copy_volume_from_snapshot(fake_snap, dest_volume, size, src_encryption_key_id, dest_encryption_key_id) mock_read_info_file.assert_called_once_with(info_path) snap_info_call = mock.call(snap_path, force_share=True, run_as_root=True, allow_qcow2_backing_file=True) src_info_call = mock.call(src_vol_path, force_share=True, run_as_root=True, allow_qcow2_backing_file=True) self.assertEqual(2, mock_img_info.call_count) mock_img_info.assert_has_calls([snap_info_call, src_info_call]) used_qcow = nfs_conf['nfs_qcow2_volumes'] if encryption: mock_convert_image.assert_called_once_with( src_vol_path, dest_vol_path, 'luks', passphrase_file='/tmp/passfile', run_as_root=True, src_passphrase_file='/tmp/imgfile', data=bk_img_info) else: mock_convert_image.assert_called_once_with( src_vol_path, dest_vol_path, 'qcow2' if used_qcow else 'raw', run_as_root=True, data=bk_img_info) mock_permission.assert_called_once_with(dest_vol_path) @ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT3, 'available'], [NFS_CONFIG2, QEMU_IMG_INFO_OUT4, 'backing-up'], [NFS_CONFIG2, QEMU_IMG_INFO_OUT4, 'restoring'], [NFS_CONFIG3, QEMU_IMG_INFO_OUT3, 'available'], [NFS_CONFIG4, QEMU_IMG_INFO_OUT4, 'backing-up'], [NFS_CONFIG4, QEMU_IMG_INFO_OUT4, 'restoring']) @ddt.unpack @mock.patch('cinder.objects.volume.Volume.save') def test_create_volume_from_snapshot(self, nfs_conf, qemu_img_info, snap_status, mock_save): self._set_driver(extra_confs=nfs_conf) drv = self._driver # Volume source of the snapshot we are trying to clone from. We need it # to have a different id than the default provided. src_volume = self._simple_volume(size=10) src_volume.id = fake.VOLUME_ID src_volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str( src_volume.provider_location)) src_volume_path = os.path.join(src_volume_dir, src_volume.name) fake_snap = fake_snapshot.fake_snapshot_obj(self.context) # Fake snapshot based in the previous created volume snap_file = src_volume.name + '.' + fake_snap.id fake_snap.volume = src_volume fake_snap.status = snap_status fake_snap.size = 10 # New fake volume where the snap will be copied new_volume = self._simple_volume(size=10) new_volume_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str( src_volume.provider_location)) new_volume_path = os.path.join(new_volume_dir, new_volume.name) # Mocks img_out = qemu_img_info % {'volid': src_volume.id, 'snapid': fake_snap.id, 'size_gb': src_volume.size, 'size_b': src_volume.size * units.Gi} img_info = imageutils.QemuImgInfo(img_out, format='json') mock_img_info = self.mock_object(image_utils, 'qemu_img_info') mock_img_info.return_value = img_info mock_ensure = self.mock_object(drv, '_ensure_shares_mounted') mock_find_share = self.mock_object(drv, '_find_share', return_value=self.TEST_NFS_EXPORT1) mock_read_info_file = self.mock_object(drv, '_read_info_file') mock_read_info_file.return_value = {'active': snap_file, fake_snap.id: snap_file} mock_convert_image = self.mock_object(image_utils, 'convert_image') self.mock_object(drv, '_create_qcow2_file') self.mock_object(drv, '_create_regular_file') self.mock_object(drv, '_create_regular_file') self.mock_object(drv, '_set_rw_permissions') self.mock_object(drv, '_read_file') ret = drv.create_volume_from_snapshot(new_volume, fake_snap) # Test asserts self.assertEqual(self.TEST_NFS_EXPORT1, ret['provider_location']) used_qcow = nfs_conf['nfs_qcow2_volumes'] mock_convert_image.assert_called_once_with( src_volume_path, new_volume_path, 'qcow2' if used_qcow else 'raw', run_as_root=True, data=img_info) mock_ensure.assert_called_once() mock_find_share.assert_called_once_with(new_volume) @ddt.data('error', 'creating', 'deleting', 'deleted', 'updating', 'error_deleting', 'unmanaging') def test_create_volume_from_snapshot_invalid_status(self, snap_status): """Expect an error when the snapshot's status is not 'available'.""" self._set_driver() drv = self._driver src_volume = self._simple_volume() fake_snap = fake_snapshot.fake_snapshot_obj(self.context) fake_snap.volume = src_volume fake_snap.status = snap_status new_volume = self._simple_volume() new_volume['size'] = fake_snap['volume_size'] self.assertRaises(exception.InvalidSnapshot, drv.create_volume_from_snapshot, new_volume, fake_snap) @ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT1], [NFS_CONFIG2, QEMU_IMG_INFO_OUT2], [NFS_CONFIG3, QEMU_IMG_INFO_OUT1], [NFS_CONFIG4, QEMU_IMG_INFO_OUT2]) @ddt.unpack def test_initialize_connection(self, nfs_confs, qemu_img_info): self._set_driver(extra_confs=nfs_confs) drv = self._driver volume = self._simple_volume() vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(volume.provider_location)) vol_path = os.path.join(vol_dir, volume.name) mock_img_utils = self.mock_object(image_utils, 'qemu_img_info') img_out = qemu_img_info % {'volid': volume.id, 'size_gb': volume.size, 'size_b': volume.size * units.Gi} mock_img_utils.return_value = imageutils.QemuImgInfo(img_out, format='json') self.mock_object(drv, '_read_info_file', return_value={'active': "volume-%s" % volume.id}) conn_info = drv.initialize_connection(volume, None) mock_img_utils.assert_called_once_with(vol_path, force_share=True, run_as_root=True, allow_qcow2_backing_file=True) self.assertEqual('nfs', conn_info['driver_volume_type']) self.assertEqual(volume.name, conn_info['data']['name']) self.assertEqual(self.TEST_MNT_POINT_BASE, conn_info['mount_point_base']) @mock.patch.object(image_utils, 'qemu_img_info') def test_initialize_connection_raise_exception(self, mock_img_info): self._set_driver() drv = self._driver volume = self._simple_volume() qemu_img_output = """{ "filename": "%s", "format": "iso", "virtual-size": 10737418240, "actual-size": 173000 }""" % volume['name'] mock_img_info.return_value = imageutils.QemuImgInfo(qemu_img_output, format='json') self.assertRaisesRegex(exception.InvalidVolume, "must be a valid raw or qcow2 image", drv.initialize_connection, volume, None) @mock.patch.object(image_utils, 'qemu_img_info') def test_initialize_connection_raise_on_wrong_size(self, mock_img_info): self._set_driver() drv = self._driver volume = self._simple_volume() qemu_img_output = """{ "filename": "%s", "format": "qcow2", "virtual-size": 999999999999999, "actual-size": 173000 }""" % volume['name'] mock_img_info.return_value = imageutils.QemuImgInfo(qemu_img_output, format='json') self.assertRaisesRegex(exception.InvalidVolume, "virtual_size does not match", drv.initialize_connection, volume, None) def test_create_snapshot(self): self._set_driver() drv = self._driver volume = self._simple_volume() self.override_config('nfs_snapshot_support', True) fake_snap = fake_snapshot.fake_snapshot_obj(self.context) fake_snap.volume = volume vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(self.TEST_NFS_EXPORT1)) snap_file = volume['name'] + '.' + fake_snap.id snap_path = os.path.join(vol_dir, snap_file) info_path = os.path.join(vol_dir, volume['name']) + '.info' with mock.patch.object(drv, '_local_path_volume_info', return_value=info_path), \ mock.patch.object(drv, '_read_info_file', return_value={}), \ mock.patch.object(drv, '_do_create_snapshot') \ as mock_do_create_snapshot, \ mock.patch.object(drv, '_check_snapshot_support') \ as mock_check_support, \ mock.patch.object(drv, '_write_info_file') \ as mock_write_info_file, \ mock.patch.object(drv, 'get_active_image_from_info', return_value=volume['name']), \ mock.patch.object(drv, '_get_new_snap_path', return_value=snap_path): self._driver.create_snapshot(fake_snap) mock_check_support.assert_called_once() mock_do_create_snapshot.assert_called_with(fake_snap, volume['name'], snap_path) mock_write_info_file.assert_called_with( info_path, {'active': snap_file, fake_snap.id: snap_file}) @ddt.data({'volume_status': 'available', 'original_provider': 'original_provider', 'rename_side_effect': None}, {'volume_status': 'available', 'original_provider': 'current_provider', 'rename_side_effect': None}, {'volume_status': 'in-use', 'original_provider': 'original_provider', 'rename_side_effect': None}, {'volume_status': 'available', 'original_provider': 'original_provider', 'rename_side_effect': OSError}) @ddt.unpack @mock.patch('os.rename') def test_update_migrated_volume(self, mock_rename, rename_side_effect, original_provider, volume_status): drv = nfs.NfsDriver(configuration=self.configuration) base_dir = '/dir_base/' current_path = base_dir + fake.VOLUME2_NAME current_provider = 'current_provider' mock_rename.side_effect = rename_side_effect volume = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, provider_location=original_provider, _name_id=None) new_volume = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME2_ID, provider_location=current_provider, _name_id=None) with mock.patch.object(drv, 'local_path') as local_path: local_path.return_value = current_path update = drv.update_migrated_volume(self.context, volume, new_volume, volume_status) if (volume_status == 'available' and original_provider != current_provider): original_path = base_dir + fake.VOLUME_NAME mock_rename.assert_called_once_with(current_path, original_path) else: mock_rename.assert_not_called() if mock_rename.call_count > 0 and rename_side_effect is None: self.assertEqual({'_name_id': None, 'provider_location': current_provider}, update) else: self.assertEqual({'_name_id': fake.VOLUME2_ID, 'provider_location': current_provider}, update) class NfsDriverDoSetupTestCase(test.TestCase): def setUp(self): super(NfsDriverDoSetupTestCase, self).setUp() self.context = mock.Mock() self.create_configuration() self.override_config('compute_api_class', 'unittest.mock.Mock') def create_configuration(self): config = conf.Configuration(None) config.append_config_values(nfs.nfs_opts) self.configuration = config def test_setup_should_throw_error_if_shares_config_not_configured(self): """do_setup should throw error if shares config is not configured.""" self.override_config('nfs_shares_config', None) drv = nfs.NfsDriver(configuration=self.configuration) mock_os_path_exists = self.mock_object(os.path, 'exists') with self.assertRaisesRegex(exception.NfsException, ".*no NFS config file configured.*"): drv.do_setup(self.context) self.assertEqual(0, mock_os_path_exists.call_count) def test_setup_should_throw_error_if_shares_file_does_not_exist(self): """do_setup should throw error if shares file does not exist.""" drv = nfs.NfsDriver(configuration=self.configuration) mock_os_path_exists = self.mock_object(os.path, 'exists') mock_os_path_exists.return_value = False with self.assertRaisesRegex(exception.NfsException, "NFS config file.*doesn't exist"): drv.do_setup(self.context) mock_os_path_exists.assert_has_calls( [mock.call(self.configuration.nfs_shares_config)]) def test_setup_should_not_throw_error_if_host_and_share_set(self): """do_setup shouldn't throw shares file error if host and share set.""" drv = nfs.NfsDriver(configuration=self.configuration) self.override_config('nas_host', 'nfs-host1') self.override_config('nas_share_path', '/export') mock_os_path_exists = self.mock_object(os.path, 'exists') mock_os_path_exists.return_value = False mock_set_nas_sec_options = self.mock_object(nfs.NfsDriver, 'set_nas_security_options') mock_set_nas_sec_options.return_value = True mock_execute = self.mock_object(drv, '_execute') mock_execute.return_value = True drv.do_setup(self.context) mock_os_path_exists.assert_not_called() def test_setup_throw_error_if_shares_file_does_not_exist_no_host(self): """do_setup should throw error if no shares file and no host set.""" drv = nfs.NfsDriver(configuration=self.configuration) self.override_config('nas_share_path', '/export') mock_os_path_exists = self.mock_object(os.path, 'exists') mock_os_path_exists.return_value = False with self.assertRaisesRegex(exception.NfsException, "NFS config file.*doesn't exist"): drv.do_setup(self.context) mock_os_path_exists.assert_has_calls( [mock.call(self.configuration.nfs_shares_config)]) def test_setup_throw_error_if_shares_file_does_not_exist_no_share(self): """do_setup should throw error if no shares file and no share set.""" drv = nfs.NfsDriver(configuration=self.configuration) self.override_config('nas_host', 'nfs-host1') mock_os_path_exists = self.mock_object(os.path, 'exists') mock_os_path_exists.return_value = False with self.assertRaisesRegex(exception.NfsException, "NFS config file.*doesn't exist"): drv.do_setup(self.context) mock_os_path_exists.assert_has_calls( [mock.call(self.configuration.nfs_shares_config)]) def test_setup_throw_error_if_shares_file_doesnt_exist_no_share_host(self): """do_setup should throw error if no shares file and no host/share.""" drv = nfs.NfsDriver(configuration=self.configuration) mock_os_path_exists = self.mock_object(os.path, 'exists') mock_os_path_exists.return_value = False with self.assertRaisesRegex(exception.NfsException, "NFS config file.*doesn't exist"): drv.do_setup(self.context) mock_os_path_exists.assert_has_calls( [mock.call(self.configuration.nfs_shares_config)]) def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self): """do_setup should throw error if nfs client is not installed.""" drv = nfs.NfsDriver(configuration=self.configuration) mock_os_path_exists = self.mock_object(os.path, 'exists') mock_os_path_exists.return_value = True mock_execute = self.mock_object(drv, '_execute') mock_execute.side_effect = OSError( errno.ENOENT, 'No such file or directory.') with self.assertRaisesRegex(exception.NfsException, 'mount.nfs is not installed'): drv.do_setup(self.context) mock_os_path_exists.assert_has_calls( [mock.call(self.configuration.nfs_shares_config)]) mock_execute.assert_has_calls( [mock.call('mount.nfs', check_exit_code=False, run_as_root=True)]) def test_setup_should_throw_exception_if_mount_nfs_command_fails(self): """do_setup should throw error if mount.nfs fails with OSError This test covers the OSError path when mount.nfs is installed. """ drv = nfs.NfsDriver(configuration=self.configuration) mock_os_path_exists = self.mock_object(os.path, 'exists') mock_os_path_exists.return_value = True mock_execute = self.mock_object(drv, '_execute') mock_execute.side_effect = OSError( errno.EPERM, 'Operation... BROKEN') with self.assertRaisesRegex(OSError, '.*Operation... BROKEN'): drv.do_setup(self.context) mock_os_path_exists.assert_has_calls( [mock.call(self.configuration.nfs_shares_config)]) mock_execute.assert_has_calls( [mock.call('mount.nfs', check_exit_code=False, run_as_root=True)]) def test_retype_is_there(self): "Ensure that driver.retype() is there.""" drv = nfs.NfsDriver(configuration=self.configuration) v1 = fake_volume.fake_volume_obj(self.context) ret = drv.retype(self.context, v1, mock.sentinel.new_type, mock.sentinel.diff, mock.sentinel.host) self.assertEqual((False, None), ret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py0000664000175000017500000011316100000000000026305 0ustar00zuulzuul00000000000000# Copyright (c) 2014 ProphetStor, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno from http import HTTPStatus import re from unittest import mock from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import fields from cinder.tests.unit import fake_constants from cinder.tests.unit import fake_snapshot from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume import configuration as conf from cinder.volume.drivers.prophetstor import dpl_iscsi as DPLDRIVER from cinder.volume.drivers.prophetstor import dplcommon as DPLCOMMON from cinder.volume import group_types POOLUUID = 'ac33fc6e417440d5a1ef27d7231e1cc4' VOLUMEUUID = 'a000000000000000000000000000001' INITIATOR = 'iqn.2013-08.org.debian:01:aaaaaaaa' DATA_IN_CONNECTOR = {'initiator': INITIATOR} DATA_SERVER_INFO = 0, { 'metadata': {'vendor': 'ProphetStor', 'version': '1.5'}} DATA_POOLS = 0, { 'children': [POOLUUID] } DATA_POOLINFO = 0, { 'capabilitiesURI': '', 'children': [], 'childrenrange': '', 'completionStatus': 'Complete', 'metadata': {'available_capacity': 4294967296, 'ctime': 1390551362349, 'vendor': 'prophetstor', 'version': '1.5', 'display_description': 'Default Pool', 'display_name': 'default_pool', 'event_uuid': '4f7c4d679a664857afa4d51f282a516a', 'physical_device': {'cache': [], 'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'], 'log': [], 'spare': []}, 'pool_uuid': POOLUUID, 'properties': {'raid_level': 'raid0'}, 'state': 'Online', 'used_capacity': 0, 'total_capacity': 4294967296, 'zpool_guid': '8173612007304181810'}, 'objectType': 'application/cdmi-container', 'percentComplete': 100} DATA_ASSIGNVDEV = 0, { 'children': [], 'childrenrange': '', 'completionStatus': 'Complete', 'domainURI': '', 'exports': {'Network/iSCSI': [ {'logical_unit_name': '', 'logical_unit_number': '101', 'permissions': [INITIATOR], 'portals': ['172.31.1.210:3260'], 'target_identifier': 'iqn.2013-09.com.prophetstor:hypervisor.886423051816' }]}, 'metadata': {'ctime': 0, 'event_uuid': 'c11e90287e9348d0b4889695f1ec4be5', 'type': 'volume'}, 'objectID': '', 'objectName': 'd827e23d403f4f12bb208a6fec208fd8', 'objectType': 'application/cdmi-container', 'parentID': '8daa374670af447e8efea27e16bf84cd', 'parentURI': '/dpl_volume', 'snapshots': [] } DATA_OUTPUT = 0, None MOD_OUTPUT = {'status': 'available'} DATA_IN_GROUP = {'id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', 'name': 'group123', 'description': 'des123', 'status': ''} DATA_IN_VOLUME = {'id': 'c11e902-87e9-348d-0b48-89695f1ec4be5', 'display_name': 'abc123', 'display_description': '', 'size': 10, 'host': "hostname@backend#%s" % POOLUUID} DATA_IN_VOLUME_VG = {'id': 'fe2dbc5-1581-0451-dab2-f8c8a48d15bee', 'display_name': 'abc123', 'display_description': '', 'size': 10, 'group_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', 'status': 'available', 'host': "hostname@backend#%s" % POOLUUID} DATA_IN_REMOVE_VOLUME_VG = { 'id': 'fe2dbc515810451dab2f8c8a48d15bee', 'display_name': 'fe2dbc515810451dab2f8c8a48d15bee', 'display_description': '', 'size': 10, 'group_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', 'status': 'available', 'host': "hostname@backend#%s" % POOLUUID} DATA_IN_VOLUME1 = {'id': 'c11e902-87e9-348d-0b48-89695f1ec4bef', 'display_name': 'abc456', 'display_description': '', 'size': 10, 'host': "hostname@backend#%s" % POOLUUID} DATA_IN_CG_SNAPSHOT = { 'group_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee', 'id': 'cgsnapshot1', 'name': 'cgsnapshot1', 'description': 'cgsnapshot1', 'status': ''} DATA_IN_SNAPSHOT = {'id': 'fe2dbc5-1581-0451-dab2-f8c8a48d15bee', 'volume_id': 'c11e902-87e9-348d-0b48-89695f1ec4be5', 'display_name': 'snapshot1', 'display_description': '', 'volume_size': 5} DATA_OUT_SNAPSHOT_CG = { 'id': 'snapshot1', 'volume_id': 'c11e902-87e9-348d-0b48-89695f1ec4be5', 'display_name': 'snapshot1', 'display_description': '', 'group_snapshot_id': 'fe2dbc51-5810-451d-ab2f-8c8a48d15bee'} DATA_OUT_CG = { "objectType": "application/cdmi-container", "objectID": "fe2dbc515810451dab2f8c8a48d15bee", "objectName": "", "parentURI": "/dpl_volgroup", "parentID": "fe2dbc515810451dab2f8c8a48d15bee", "domainURI": "", "capabilitiesURI": "", "completionStatus": "Complete", "percentComplete": 100, "metadata": { "type": "volume|snapshot|replica", "volume_group_uuid": "", "origin_uuid": "", "snapshot_uuid": "", "display_name": "", "display_description": "", "ctime": 12345678, "total_capacity": 1024, "snapshot_used_capacity": 0, "maximum_snapshot": 1024, "snapshot_quota": 0, "state": "", "properties": { "snapshot_rotation": True, } }, "childrenrange": "", "children": [ 'fe2dbc515810451dab2f8c8a48d15bee', ], } class TestProphetStorDPLVolume(test.TestCase): def _gen_snapshot_url(self, vdevid, snapshotid): snapshot_url = '/%s/%s/%s' % (vdevid, DPLCOMMON.DPL_OBJ_SNAPSHOT, snapshotid) return snapshot_url def setUp(self): super(TestProphetStorDPLVolume, self).setUp() self.dplcmd = DPLCOMMON.DPLVolume('1.1.1.1', 8356, 'admin', 'password') self.DPL_MOCK = mock.MagicMock() self.dplcmd.objCmd = self.DPL_MOCK self.DPL_MOCK.send_cmd.return_value = DATA_OUTPUT def test_getserverinfo(self): self.dplcmd.get_server_info() self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_SYSTEM), None, [HTTPStatus.OK, HTTPStatus.ACCEPTED]) def test_createvdev(self): self.dplcmd.create_vdev(DATA_IN_VOLUME['id'], DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], POOLUUID, int(DATA_IN_VOLUME['size']) * units.Gi) metadata = {} metadata['display_name'] = DATA_IN_VOLUME['display_name'] metadata['display_description'] = DATA_IN_VOLUME['display_description'] metadata['pool_uuid'] = POOLUUID metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi metadata['maximum_snapshot'] = 1024 metadata['properties'] = dict(thin_provision=True) params = {} params['metadata'] = metadata self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def test_extendvdev(self): self.dplcmd.extend_vdev(DATA_IN_VOLUME['id'], DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], int(DATA_IN_VOLUME['size']) * units.Gi) metadata = {} metadata['display_name'] = DATA_IN_VOLUME['display_name'] metadata['display_description'] = DATA_IN_VOLUME['display_description'] metadata['total_capacity'] = int(DATA_IN_VOLUME['size']) * units.Gi metadata['maximum_snapshot'] = 1024 params = {} params['metadata'] = metadata self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def test_deletevdev(self): self.dplcmd.delete_vdev(DATA_IN_VOLUME['id'], True) metadata = {} params = {} metadata['force'] = True params['metadata'] = metadata self.DPL_MOCK.send_cmd.assert_called_once_with( 'DELETE', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.NOT_FOUND, HTTPStatus.NO_CONTENT]) def test_createvdevfromsnapshot(self): self.dplcmd.create_vdev_from_snapshot( DATA_IN_VOLUME['id'], DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], DATA_IN_SNAPSHOT['id'], POOLUUID) metadata = {} params = {} metadata['snapshot_operation'] = 'copy' metadata['display_name'] = DATA_IN_VOLUME['display_name'] metadata['display_description'] = DATA_IN_VOLUME['display_description'] metadata['pool_uuid'] = POOLUUID metadata['maximum_snapshot'] = 1024 metadata['properties'] = dict(thin_provision=True) params['metadata'] = metadata params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'], DATA_IN_SNAPSHOT['id']) self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def test_getpool(self): self.dplcmd.get_pool(POOLUUID) self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_POOL, POOLUUID), None, [HTTPStatus.OK, HTTPStatus.ACCEPTED]) def test_clonevdev(self): self.dplcmd.clone_vdev( DATA_IN_VOLUME['id'], DATA_IN_VOLUME1['id'], POOLUUID, DATA_IN_VOLUME['display_name'], DATA_IN_VOLUME['display_description'], int(DATA_IN_VOLUME['size']) * units.Gi ) metadata = {} params = {} metadata["snapshot_operation"] = "clone" metadata["display_name"] = DATA_IN_VOLUME['display_name'] metadata["display_description"] = DATA_IN_VOLUME['display_description'] metadata["pool_uuid"] = POOLUUID metadata["total_capacity"] = int(DATA_IN_VOLUME['size']) * units.Gi metadata['maximum_snapshot'] = 1024 metadata['properties'] = dict(thin_provision=True) params["metadata"] = metadata params["copy"] = DATA_IN_VOLUME['id'] self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME1['id']), params, [HTTPStatus.OK, HTTPStatus.CREATED, HTTPStatus.ACCEPTED]) def test_createvdevsnapshot(self): self.dplcmd.create_vdev_snapshot( DATA_IN_VOLUME['id'], DATA_IN_SNAPSHOT['id'], DATA_IN_SNAPSHOT['display_name'], DATA_IN_SNAPSHOT['display_description'] ) metadata = {} params = {} metadata['display_name'] = DATA_IN_SNAPSHOT['display_name'] metadata['display_description'] = ( DATA_IN_SNAPSHOT['display_description']) params['metadata'] = metadata params['snapshot'] = DATA_IN_SNAPSHOT['id'] self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [HTTPStatus.OK, HTTPStatus.CREATED, HTTPStatus.ACCEPTED]) def test_getvdev(self): self.dplcmd.get_vdev(DATA_IN_VOLUME['id']) self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), None, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.NOT_FOUND]) def test_getvdevstatus(self): self.dplcmd.get_vdev_status(DATA_IN_VOLUME['id'], '123456') self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id'], '123456'), None, [HTTPStatus.OK, HTTPStatus.NOT_FOUND]) def test_getpoolstatus(self): self.dplcmd.get_pool_status(POOLUUID, '123456') self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/%s/?event_uuid=%s' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_POOL, POOLUUID, '123456'), None, [HTTPStatus.OK, HTTPStatus.NOT_FOUND]) def test_assignvdev(self): self.dplcmd.assign_vdev( DATA_IN_VOLUME['id'], 'iqn.1993-08.org.debian:01:test1', '', '1.1.1.1:3260', 0 ) params = {} metadata = {} exports = {} metadata['export_operation'] = 'assign' exports['Network/iSCSI'] = {} target_info = {} target_info['logical_unit_number'] = 0 target_info['logical_unit_name'] = '' permissions = [] portals = [] portals.append('1.1.1.1:3260') permissions.append('iqn.1993-08.org.debian:01:test1') target_info['permissions'] = permissions target_info['portals'] = portals exports['Network/iSCSI'] = target_info params['metadata'] = metadata params['exports'] = exports self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def test_unassignvdev(self): self.dplcmd.unassign_vdev(DATA_IN_VOLUME['id'], 'iqn.1993-08.org.debian:01:test1', '') params = {} metadata = {} exports = {} metadata['export_operation'] = 'unassign' params['metadata'] = metadata exports['Network/iSCSI'] = {} exports['Network/iSCSI']['target_identifier'] = '' permissions = [] permissions.append('iqn.1993-08.org.debian:01:test1') exports['Network/iSCSI']['permissions'] = permissions params['exports'] = exports self.DPL_MOCK.send_cmd.assert_called_once_with( 'PUT', '/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id']), params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT, HTTPStatus.NOT_FOUND]) def test_deletevdevsnapshot(self): self.dplcmd.delete_vdev_snapshot(DATA_IN_VOLUME['id'], DATA_IN_SNAPSHOT['id']) params = {} params['copy'] = self._gen_snapshot_url(DATA_IN_VOLUME['id'], DATA_IN_SNAPSHOT['id']) self.DPL_MOCK.send_cmd.assert_called_once_with( 'DELETE', '/%s/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id'], DPLCOMMON.DPL_OBJ_SNAPSHOT, DATA_IN_SNAPSHOT['id']), None, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT, HTTPStatus.NOT_FOUND]) def test_listvdevsnapshots(self): self.dplcmd.list_vdev_snapshots(DATA_IN_VOLUME['id']) self.DPL_MOCK.send_cmd.assert_called_once_with( 'GET', '/%s/%s/%s/%s/' % (DPLCOMMON.DPL_VER_V1, DPLCOMMON.DPL_OBJ_VOLUME, DATA_IN_VOLUME['id'], DPLCOMMON.DPL_OBJ_SNAPSHOT), None, [HTTPStatus.OK]) class TestProphetStorDPLDriver(test.TestCase): def __init__(self, method): super(TestProphetStorDPLDriver, self).__init__(method) def _conver_uuid2hex(self, strID): return strID.replace('-', '') def setUp(self): super(TestProphetStorDPLDriver, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.san_ip = '1.1.1.1' self.configuration.dpl_port = 8356 self.configuration.san_login = 'admin' self.configuration.san_password = 'password' self.configuration.dpl_pool = POOLUUID self.configuration.target_port = 3260 self.configuration.san_is_local = False self.configuration.san_thin_provision = True self.configuration.driver_ssl_cert_verify = False self.configuration.driver_ssl_cert_path = None self.context = context.get_admin_context() self.DPL_MOCK = mock.MagicMock() self.DB_MOCK = mock.MagicMock() self.dpldriver = DPLDRIVER.DPLISCSIDriver( configuration=self.configuration) self.dpldriver.dpl = self.DPL_MOCK self.dpldriver.db = self.DB_MOCK self.dpldriver.do_setup(self.context) def test_get_volume_stats(self): self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO self.DPL_MOCK.get_server_info.return_value = DATA_SERVER_INFO res = self.dpldriver.get_volume_stats(True) self.assertEqual('ProphetStor', res['vendor_name']) self.assertEqual('1.5', res['driver_version']) pool = res["pools"][0] self.assertEqual(4, pool['total_capacity_gb']) self.assertEqual(4, pool['free_capacity_gb']) self.assertEqual(0, pool['reserved_percentage']) self.assertFalse(pool['QoS_support']) def test_create_volume(self): volume = test_utils.create_volume( self.context, id=DATA_IN_VOLUME['id'], display_name=DATA_IN_VOLUME['display_name'], size=DATA_IN_VOLUME['size'], host=DATA_IN_VOLUME['host']) self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT self.dpldriver.create_volume(volume) self.DPL_MOCK.create_vdev.assert_called_once_with( self._conver_uuid2hex(volume.id), volume.display_name, volume.display_description, self.configuration.dpl_pool, int(volume.size) * units.Gi, True) def test_create_volume_without_pool(self): volume = test_utils.create_volume( self.context, id=DATA_IN_VOLUME['id'], display_name=DATA_IN_VOLUME['display_name'], size=DATA_IN_VOLUME['size'], host=DATA_IN_VOLUME['host']) self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT self.configuration.dpl_pool = "" volume.host = "host@backend" # missing pool self.assertRaises(exception.InvalidHost, self.dpldriver.create_volume, volume=volume) def test_create_volume_with_configuration_pool(self): volume = test_utils.create_volume( self.context, id=DATA_IN_VOLUME['id'], display_name=DATA_IN_VOLUME['display_name'], size=DATA_IN_VOLUME['size'], host="host@backend") self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT self.dpldriver.create_volume(volume) self.DPL_MOCK.create_vdev.assert_called_once_with( self._conver_uuid2hex(volume.id), volume.display_name, volume.display_description, self.configuration.dpl_pool, int(volume.size) * units.Gi, True) def test_create_volume_of_group(self): group_type = group_types.create( self.context, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = test_utils.create_group( self.context, id=fake_constants.CONSISTENCY_GROUP_ID, host='host@backend#unit_test_pool', group_type_id=group_type.id) self.DPL_MOCK.create_vdev.return_value = DATA_OUTPUT self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT volume = test_utils.create_volume( self.context, id=DATA_IN_VOLUME_VG['id'], display_name=DATA_IN_VOLUME_VG['display_name'], size=DATA_IN_VOLUME_VG['size'], group_id=group.id, host=DATA_IN_VOLUME_VG['host']) self.dpldriver.create_volume(volume) self.DPL_MOCK.create_vdev.assert_called_once_with( self._conver_uuid2hex(volume.id), volume.display_name, volume.display_description, self.configuration.dpl_pool, int(volume.size) * units.Gi, True) self.DPL_MOCK.join_vg.assert_called_once_with( self._conver_uuid2hex(volume.id), self._conver_uuid2hex(volume.group_id)) def test_delete_volume(self): volume = test_utils.create_volume( self.context, id=DATA_IN_VOLUME['id'], display_name=DATA_IN_VOLUME['display_name'], size=DATA_IN_VOLUME['size'], host=DATA_IN_VOLUME['host']) self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT self.dpldriver.delete_volume(volume) self.DPL_MOCK.delete_vdev.assert_called_once_with( self._conver_uuid2hex(volume.id)) def test_delete_volume_of_group(self): group_type = group_types.create( self.context, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = test_utils.create_group( self.context, id=fake_constants.CONSISTENCY_GROUP_ID, host='host@backend#unit_test_pool', group_type_id=group_type.id) volume = test_utils.create_volume( self.context, id=DATA_IN_VOLUME_VG['id'], display_name=DATA_IN_VOLUME_VG['display_name'], size=DATA_IN_VOLUME_VG['size'], group_id=group.id, host=DATA_IN_VOLUME_VG['host']) self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT self.DPL_MOCK.leave_vg.return_volume = DATA_OUTPUT self.dpldriver.delete_volume(volume) self.DPL_MOCK.leave_vg.assert_called_once_with( self._conver_uuid2hex(volume.id), self._conver_uuid2hex(volume.group_id) ) self.DPL_MOCK.delete_vdev.assert_called_once_with( self._conver_uuid2hex(volume.id)) def test_create_volume_from_snapshot(self): self.DPL_MOCK.create_vdev_from_snapshot.return_value = DATA_OUTPUT self.DPL_MOCK.extend_vdev.return_value = DATA_OUTPUT volume = test_utils.create_volume( self.context, id=DATA_IN_VOLUME_VG['id'], display_name=DATA_IN_VOLUME_VG['display_name'], size=DATA_IN_VOLUME_VG['size'], host=DATA_IN_VOLUME_VG['host']) self.dpldriver.create_volume_from_snapshot( volume, DATA_IN_SNAPSHOT) self.DPL_MOCK.create_vdev_from_snapshot.assert_called_once_with( self._conver_uuid2hex(volume.id), volume.display_name, volume.display_description, self._conver_uuid2hex(volume.id), self.configuration.dpl_pool, True) self.DPL_MOCK.extend_vdev.assert_called_once_with( self._conver_uuid2hex(volume.id), volume.display_name, volume.display_description, volume.size * units.Gi) def test_create_cloned_volume(self): new_volume = test_utils.create_volume( self.context, id=DATA_IN_VOLUME1['id'], display_name=DATA_IN_VOLUME1['display_name'], size=DATA_IN_VOLUME1['size'], host=DATA_IN_VOLUME1['host']) src_volume = test_utils.create_volume( self.context, id=DATA_IN_VOLUME['id']) self.DPL_MOCK.clone_vdev.return_value = DATA_OUTPUT self.dpldriver.create_cloned_volume(new_volume, src_volume) self.DPL_MOCK.clone_vdev.assert_called_once_with( self._conver_uuid2hex(src_volume.id), self._conver_uuid2hex(new_volume.id), self.configuration.dpl_pool, new_volume.display_name, new_volume.display_description, int(new_volume.size) * units.Gi, True) def test_create_snapshot(self): self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT self.dpldriver.create_snapshot(DATA_IN_SNAPSHOT) self.DPL_MOCK.create_vdev_snapshot.assert_called_once_with( self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']), self._conver_uuid2hex(DATA_IN_SNAPSHOT['id']), DATA_IN_SNAPSHOT['display_name'], DATA_IN_SNAPSHOT['display_description']) def test_delete_snapshot(self): self.DPL_MOCK.delete_vdev_snapshot.return_value = DATA_OUTPUT self.dpldriver.delete_snapshot(DATA_IN_SNAPSHOT) self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with( self._conver_uuid2hex(DATA_IN_SNAPSHOT['volume_id']), self._conver_uuid2hex(DATA_IN_SNAPSHOT['id'])) def test_initialize_connection(self): self.DPL_MOCK.assign_vdev.return_value = DATA_ASSIGNVDEV self.DPL_MOCK.get_vdev.return_value = DATA_ASSIGNVDEV res = self.dpldriver.initialize_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) self.assertEqual('iscsi', res['driver_volume_type']) self.assertEqual(101, res['data']['target_lun']) self.assertTrue(res['data']['target_discovered']) self.assertEqual('172.31.1.210:3260', res['data']['target_portal']) self.assertEqual( 'iqn.2013-09.com.prophetstor:hypervisor.886423051816', res['data']['target_iqn']) def test_terminate_connection(self): self.DPL_MOCK.unassign_vdev.return_value = DATA_OUTPUT self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) self.DPL_MOCK.unassign_vdev.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id']), DATA_IN_CONNECTOR['initiator']) def test_terminate_connection_volume_detached(self): self.DPL_MOCK.unassign_vdev.return_value = errno.ENODATA, None self.dpldriver.terminate_connection(DATA_IN_VOLUME, DATA_IN_CONNECTOR) self.DPL_MOCK.unassign_vdev.assert_called_once_with( self._conver_uuid2hex(DATA_IN_VOLUME['id']), DATA_IN_CONNECTOR['initiator']) def test_terminate_connection_failed(self): self.DPL_MOCK.unassign_vdev.return_value = errno.EFAULT, None ex = self.assertRaises( exception.VolumeBackendAPIException, self.dpldriver.terminate_connection, volume=DATA_IN_VOLUME, connector=DATA_IN_CONNECTOR) self.assertIsNotNone( re.match(r".*Flexvisor failed", ex.msg)) def test_get_pool_info(self): self.DPL_MOCK.get_pool.return_value = DATA_POOLINFO _, res = self.dpldriver._get_pool_info(POOLUUID) self.assertEqual(4294967296, res['metadata']['available_capacity']) self.assertEqual(1390551362349, res['metadata']['ctime']) self.assertEqual('Default Pool', res['metadata']['display_description']) self.assertEqual('default_pool', res['metadata']['display_name']) self.assertEqual('4f7c4d679a664857afa4d51f282a516a', res['metadata']['event_uuid']) self.assertEqual( {'cache': [], 'data': ['disk_uuid_0', 'disk_uuid_1', 'disk_uuid_2'], 'log': [], 'spare': []}, res['metadata']['physical_device']) self.assertEqual(POOLUUID, res['metadata']['pool_uuid']) self.assertEqual( {'raid_level': 'raid0'}, res['metadata']['properties']) self.assertEqual('Online', res['metadata']['state']) self.assertEqual(4294967296, res['metadata']['total_capacity']) self.assertEqual('8173612007304181810', res['metadata']['zpool_guid']) def test_create_group(self): group_type = group_types.create( self.context, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = test_utils.create_group( self.context, id=fake_constants.CONSISTENCY_GROUP_ID, host='host@backend#unit_test_pool', group_type_id=group_type.id) self.DPL_MOCK.create_vg.return_value = DATA_OUTPUT model_update = self.dpldriver.create_group(self.context, group) self.DPL_MOCK.create_vg.assert_called_once_with( self._conver_uuid2hex(fake_constants.CONSISTENCY_GROUP_ID), 'test_group', 'this is a test group') self.assertDictEqual({'status': ( fields.ConsistencyGroupStatus.AVAILABLE)}, model_update) def test_delete_group(self): group_type = group_types.create( self.context, 'group', {'consistent_group_snapshot_enabled': ' True'} ) group = test_utils.create_group( self.context, id=fake_constants.CONSISTENCY_GROUP_ID, host='host@backend#unit_test_pool', group_type_id=group_type.id) self.DB_MOCK.volume_get_all_by_group.return_value = ( [DATA_IN_VOLUME_VG]) self.DPL_MOCK.delete_vdev.return_value = DATA_OUTPUT self.DPL_MOCK.delete_cg.return_value = DATA_OUTPUT model_update, volumes = self.dpldriver.delete_group( self.context, group, []) self.DPL_MOCK.delete_vg.assert_called_once_with( self._conver_uuid2hex(fake_constants.CONSISTENCY_GROUP_ID)) self.DPL_MOCK.delete_vdev.assert_called_once_with( self._conver_uuid2hex((DATA_IN_VOLUME_VG['id']))) self.assertDictEqual({'status': ( fields.ConsistencyGroupStatus.DELETED)}, model_update) def test_update_group(self): group_type = group_types.create( self.context, 'group', {'consistent_group_snapshot_enabled': ' True'} ) self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) self.DPL_MOCK.join_vg.return_value = DATA_OUTPUT self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT group = test_utils.create_group( self.context, id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', host='host@backend#unit_test_pool', group_type_id=group_type.id) vol_add = test_utils.create_volume( self.context, id=fake_constants.VOLUME2_ID, display_name=DATA_IN_VOLUME_VG['display_name'], size=DATA_IN_VOLUME_VG['size'], group_id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', host=DATA_IN_VOLUME_VG['host']) vol_del = test_utils.create_volume( self.context, id=DATA_IN_REMOVE_VOLUME_VG['id'], display_name=DATA_IN_REMOVE_VOLUME_VG['display_name'], size=DATA_IN_REMOVE_VOLUME_VG['size'], group_id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', host=DATA_IN_REMOVE_VOLUME_VG['host']) (model_update, add_vols, remove_vols) = ( self.dpldriver.update_group( self.context, group, [vol_add], [vol_del])) self.DPL_MOCK.join_vg.assert_called_once_with( self._conver_uuid2hex(vol_add.id), self._conver_uuid2hex(group.id)) self.DPL_MOCK.leave_vg.assert_called_once_with( self._conver_uuid2hex(vol_del.id), self._conver_uuid2hex(group.id)) self.assertDictEqual({'status': ( fields.ConsistencyGroupStatus.AVAILABLE)}, model_update) def test_update_group_exception_join(self): group_type = group_types.create( self.context, 'group', {'consistent_group_snapshot_enabled': ' True'} ) self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) self.DPL_MOCK.join_vg.return_value = -1, None self.DPL_MOCK.leave_vg.return_value = DATA_OUTPUT volume = test_utils.create_volume( self.context, id=fake_constants.VOLUME2_ID, display_name=DATA_IN_VOLUME_VG['display_name'], size=DATA_IN_VOLUME_VG['size'], host=DATA_IN_VOLUME_VG['host']) group = test_utils.create_group( self.context, id=fake_constants.CONSISTENCY_GROUP_ID, host='host@backend#unit_test_pool', group_type_id=group_type.id) self.assertRaises(exception.VolumeBackendAPIException, self.dpldriver.update_group, context=None, group=group, add_volumes=[volume], remove_volumes=None) def test_update_group_exception_leave(self): group_type = group_types.create( self.context, 'group', {'consistent_group_snapshot_enabled': ' True'} ) self.DPL_MOCK.get_vg.return_value = (0, DATA_OUT_CG) self.DPL_MOCK.leave_vg.return_value = -1, None volume = test_utils.create_volume( self.context, id='fe2dbc51-5810-451d-ab2f-8c8a48d15bee', display_name=DATA_IN_VOLUME_VG['display_name'], size=DATA_IN_VOLUME_VG['size'], host=DATA_IN_VOLUME_VG['host']) group = test_utils.create_group( self.context, id=fake_constants.CONSISTENCY_GROUP_ID, host='host@backend#unit_test_pool', group_type_id=group_type.id) self.assertRaises(exception.VolumeBackendAPIException, self.dpldriver.update_group, context=None, group=group, add_volumes=None, remove_volumes=[volume]) @mock.patch( 'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot') def test_create_group_snapshot(self, get_all_for_group_snapshot): group_type = group_types.create( self.context, 'group', {'consistent_group_snapshot_enabled': ' True'} ) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context) snapshot_obj.group_id = \ DATA_IN_CG_SNAPSHOT['group_id'] snapshot_obj.group_type_id = group_type.id get_all_for_group_snapshot.return_value = [snapshot_obj] self.DPL_MOCK.create_vdev_snapshot.return_value = DATA_OUTPUT model_update, snapshots = self.dpldriver.create_group_snapshot( self.context, snapshot_obj, []) self.assertDictEqual({'status': 'available'}, model_update) @mock.patch( 'cinder.objects.snapshot.SnapshotList.get_all_for_group_snapshot') def test_delete_group_snapshot(self, get_all_for_group_snapshot): group_type = group_types.create( self.context, 'group', {'consistent_group_snapshot_enabled': ' True'} ) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context) snapshot_obj.group_id = \ DATA_IN_CG_SNAPSHOT['group_id'] snapshot_obj.group_type_id = group_type.id get_all_for_group_snapshot.return_value = [snapshot_obj] self.DPL_MOCK.delete_group_snapshot.return_value = DATA_OUTPUT model_update, snapshots = self.dpldriver.delete_group_snapshot( self.context, snapshot_obj, []) self.DPL_MOCK.delete_vdev_snapshot.assert_called_once_with( self._conver_uuid2hex(snapshot_obj.group_id), self._conver_uuid2hex(snapshot_obj.id), True) self.assertDictEqual({'status': 'deleted'}, model_update) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_pure.py0000664000175000017500000110075700000000000024040 0ustar00zuulzuul00000000000000# Copyright (c) 2024 Pure Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import json import pprint import sys from unittest import mock import ddt from oslo_utils import units from cinder import context from cinder import exception from cinder.objects import fields from cinder.objects import volume_type from cinder.tests.unit.consistencygroup import fake_cgsnapshot from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_group from cinder.tests.unit import fake_group_snapshot from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils def fake_retry(exceptions, interval=1, retries=3, backoff_rate=2): def _decorator(f): return f return _decorator patch_retry = mock.patch('cinder.utils.retry', fake_retry) patch_retry.start() sys.modules['pypureclient'] = mock.Mock() from cinder.volume.drivers import pure # noqa # Only mock utils.retry for cinder.volume.drivers.pure import patch_retry.stop() # This part is copied from the Pure 2.x REST API code class Parameters(object): """A class for static parameter names. """ continuation_token = 'continuation_token' filter = 'filter' limit = 'limit' offset = 'offset' sort = 'sort' x_request_id = 'x_request_id' class Headers(object): """A class for static header names. """ api_token = 'api-token' authorization = 'Authorization' x_auth_token = 'x-auth-token' x_request_id = 'X-Request-ID' x_ratelimit_sec = 'X-RateLimit-Limit-second' x_ratelimit_min = 'X-RateLimit-Limit-minute' x_ratelimit_remaining_sec = 'X-RateLimit-Remaining-second' x_ratelimit_remaining_min = 'X-RateLimit-Remaining-minute' class ItemIterator(object): """An iterator for items of a collection returned by the server. """ def __init__(self, client, api_endpoint, kwargs, continuation_token, total_item_count, items, x_request_id, more_items_remaining=None, response_size_limit=1000): """Initialize an ItemIterator. Args: client (Client): A Pure1 Client that can call the API. api_endpoint (function): The function that corresponds to the internal API call. kwargs (dict): The kwargs of the initial call. continuation_token (str): The continuation token provided by the server. May be None. total_item_count (int): The total number of items available in the collection. items (list[object]): The items returned from the initial response. x_request_id (str): The X-Request-ID to use for all subsequent calls. """ self._response_size_limit = response_size_limit self._client = client self._api_endpoint = api_endpoint self._kwargs = kwargs self._continuation_token = '\'{}\''.format(continuation_token) self._total_item_count = total_item_count self._more_items_remaining = more_items_remaining self._items = items self._x_request_id = x_request_id self._index = 0 def __iter__(self): """Creates a new iterator. Returns: ItemIterator """ return self def __next__(self): """Get the next item in the collection. If there are no items left to get from the last response, it calls the API again to get more items. Returns: object Raises: StopIteration: If there are no more items to return, or if there was an error calling the API. """ # If we've reached the end of the desired limit, stop if Parameters.limit in self._kwargs and \ self._kwargs.get(Parameters.limit) <= self._index: raise StopIteration # If we've reached the end of all possible items, stop if self._total_item_count is not None and self._total_item_count \ <= self._index: raise StopIteration if self._response_size_limit is None: item_index = self._index else: item_index = self._index % self._response_size_limit # If we've reached the end of the current collection, get more data if item_index == len(self._items): if self._more_items_remaining is False: raise StopIteration self._refresh_data() # Return the next item in the current list if possible if item_index < len(self._items): to_return = self._items[item_index] self._index += 1 return to_return # If no new data was given, just stop raise StopIteration def __len__(self): """Get the length of collection. Number of items returned is not guaranteed to be the length of collection at the start. Returns: int """ return self._total_item_count or len(self._items) def _refresh_data(self): """Call the API to collect more items and updates the internal state. Raises: StopIteration: If there was an error calling the API. """ # Use continuation token if provided if Parameters.continuation_token in self._kwargs: self._kwargs[Parameters.continuation_token] = \ self._continuation_token else: # Use offset otherwise (no continuation token with sorts) self._kwargs[Parameters.offset] = len(self._items) if self._x_request_id is not None: self._kwargs[Parameters.x_request_id] = self._x_request_id # Call the API again and update internal state response, is_error = self._client._call_api(self._api_endpoint, self._kwargs) if is_error is True: raise StopIteration body, _, _ = response self._continuation_token = '\'{}\''.format(body.continuation_token) self._total_item_count = body.total_item_count self._items = body.items class ResponseHeaders(object): """An object that includes headers from the server response. """ def __init__(self, x_request_id, x_ratelimit_limit_second, x_ratelimit_limit_minute, x_ratelimit_remaining_second, x_ratelimit_remaining_minute): """Initialize a ResponseHeaders. Args: x_request_id (str): The X-Request-ID from the client or generated by the server. x_ratelimit_limit_second (int): The number of requests available per second. x_ratelimit_limit_minute (int): The number of requests available per minute. x_ratelimit_remaining_second (int): The number of requests remaining in that second. x_ratelimit_remaining_minute (int): The number of requests remaining in that minute. """ self.x_request_id = x_request_id self.x_ratelimit_limit_second = x_ratelimit_limit_second self.x_ratelimit_limit_minute = x_ratelimit_limit_minute self.x_ratelimit_remaining_second = x_ratelimit_remaining_second self.x_ratelimit_remaining_minute = x_ratelimit_remaining_minute def to_dict(self): """Return a dictionary of the class attributes. Returns: dict """ return self.__dict__ def __repr__(self): """Return a pretty formatted string of the object. Returns: str """ return pprint.pformat(self.to_dict()) def _create_response_headers(headers): response_headers = None if headers and headers.get(Headers.x_request_id, None): RH = ResponseHeaders(headers.get(Headers.x_request_id, None), headers.get(Headers.x_ratelimit_sec, None), headers.get(Headers.x_ratelimit_min, None), headers.get(Headers.x_ratelimit_remaining_sec, None), headers.get(Headers.x_ratelimit_remaining_min, None)) response_headers = RH return response_headers class Response(object): """An abstract response that is extended to a valid or error response. """ def __init__(self, status_code, headers): """Initialize a Response. Args: status_code (int): The HTTP status code. headers (dict): Response headers from the server. """ self.status_code = status_code self.headers = _create_response_headers(headers) class ValidResponse(Response): """A response that indicates the request was successful and has the returned data. """ def __init__(self, status_code, continuation_token, total_item_count, items, headers, total=None, more_items_remaining=None): """Initialize a ValidResponse. Args: status_code (int): The HTTP status code. continuation_token (str): An opaque token to iterate over a collection of resources. May be None. total_item_count (int): The total number of items available in the collection. items (ItemIterator): An iterator over the items in the collection. headers (dict): Response headers from the server. """ super(ValidResponse, self).__init__(status_code, headers) self.continuation_token = continuation_token self.total_item_count = total_item_count self.items = items if total is not None: self.total = total if more_items_remaining is not None: self.more_items_remaining = more_items_remaining def to_dict(self): """Return a dictionary of the class attributes. It will convert the items to a list of items by exhausting the iterator. If any items were previously iterated, they will be missed. Returns: dict """ new_dict = dict(self.__dict__) if isinstance(self.items, ItemIterator): new_dict['items'] = [item.to_dict() for item in list(self.items)] new_dict['headers'] = (self.headers.to_dict if self.headers is not None else None) if hasattr(self, 'total') and isinstance(self.total, list): new_dict['total'] = [item.to_dict() for item in self.total] return new_dict def __repr__(self): """Return a pretty formatted string of the object. Does not convert the items to a list of items by using the iterator. Returns: str """ new_dict = dict(self.__dict__) if self.headers: new_dict['headers'] = self.headers.to_dict() return pprint.pformat(new_dict) class ErrorResponse(Response): """A response that indicates there was an error with the request and has the list of errors. """ def __init__(self, status_code, errors, headers): """Initialize an ErrorResponse. Args: status_code (int): The HTTP status code. errors (list[ApiError]): The list of errors encountered. headers (dict): Response headers from the server. """ super(ErrorResponse, self).__init__(status_code, headers) self.errors = errors def to_dict(self): """Return a dictionary of the class attributes. Returns: dict """ new_dict = dict(self.__dict__) new_dict['errors'] = [err.to_dict() for err in new_dict['errors']] new_dict['headers'] = (self.headers.to_dict if self.headers is not None else None) return new_dict def __repr__(self): """Return a pretty formatted string of the object. Returns: str """ return pprint.pformat(self.to_dict()) # Simple implementation of dot notation dictionary class DotNotation(dict): __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ def __init__(self, data): if isinstance(data, str): data = json.loads(data) for name, value in data.items(): setattr(self, name, self._wrap(value)) def __getattr__(self, attr): def _traverse(obj, attr): if self._is_indexable(obj): try: return obj[int(attr)] except Exception: return None elif isinstance(obj, dict): return obj.get(attr, None) else: return attr # if '.' in attr: # return reduce(_traverse, attr.split('.'), self) return self.get(attr, None) def _wrap(self, value): if self._is_indexable(value): # (!) recursive (!) return type(value)([self._wrap(v) for v in value]) elif isinstance(value, dict): return DotNotation(value) else: return value @staticmethod def _is_indexable(obj): return isinstance(obj, (tuple, list, set, frozenset)) def __deepcopy__(self, memo=None): return DotNotation(deepcopy(dict(self), memo=memo)) DRIVER_PATH = "cinder.volume.drivers.pure" BASE_DRIVER_OBJ = DRIVER_PATH + ".PureBaseVolumeDriver" ISCSI_DRIVER_OBJ = DRIVER_PATH + ".PureISCSIDriver" FC_DRIVER_OBJ = DRIVER_PATH + ".PureFCDriver" NVME_DRIVER_OBJ = DRIVER_PATH + ".PureNVMEDriver" ARRAY_OBJ = DRIVER_PATH + ".FlashArray" UNMANAGED_SUFFIX = "-unmanaged" GET_ARRAY_PRIMARY = {"version": "99.9.9", "name": "pure_target1", "id": "primary_array_id"} VALID_GET_ARRAY_PRIMARY = ValidResponse(200, None, 1, [DotNotation(GET_ARRAY_PRIMARY)], {}) GET_ARRAY_SECONDARY = {"version": "99.9.9", "name": "pure_target2", "id": "secondary_array_id"} VALID_GET_ARRAY_SECONDARY = ValidResponse(200, None, 1, [DotNotation(GET_ARRAY_SECONDARY)], {}) REPLICATION_TARGET_TOKEN = "12345678-abcd-1234-abcd-1234567890ab" REPLICATION_PROTECTION_GROUP = "cinder-group" REPLICATION_INTERVAL_IN_SEC = 3600 REPLICATION_RETENTION_SHORT_TERM = 14400 REPLICATION_RETENTION_LONG_TERM = 6 REPLICATION_RETENTION_LONG_TERM_PER_DAY = 3 PRIMARY_MANAGEMENT_IP = GET_ARRAY_PRIMARY["name"] API_TOKEN = "12345678-abcd-1234-abcd-1234567890ab" VOLUME_BACKEND_NAME = "Pure_iSCSI" ISCSI_PORT_NAMES = ["ct0.eth2", "ct0.eth3", "ct1.eth2", "ct1.eth3"] NVME_PORT_NAMES = ["ct0.eth8", "ct0.eth9", "ct1.eth8", "ct1.eth9"] FC_PORT_NAMES = ["ct0.fc2", "ct0.fc3", "ct1.fc2", "ct1.fc3"] # These two IP blocks should use the same prefix (see NVME_CIDR_FILTERED to # make sure changes make sense). Our arrays now have 4 IPv4 + 4 IPv6 ports. NVME_IPS = ["10.0.0." + str(i + 1) for i in range(len(NVME_PORT_NAMES))] NVME_IPS += ["[2001:db8::" + str(i + 1) + "]" for i in range(len(NVME_PORT_NAMES))] AC_NVME_IPS = ["10.0.0." + str(i + 1 + len(NVME_PORT_NAMES)) for i in range(len(NVME_PORT_NAMES))] AC_NVME_IPS += ["[2001:db8::1:" + str(i + 1) + "]" for i in range(len(NVME_PORT_NAMES))] NVME_CIDR = "0.0.0.0/0" NVME_CIDR_V6 = "::/0" NVME_PORT = 4420 # Designed to filter out only one of the AC NVMe IPs, leaving the rest in NVME_CIDR_FILTERED = "10.0.0.0/29" # Include several IP / networks: 10.0.0.2, 10.0.0.3, 10.0.0.6, 10.0.0.7 NVME_CIDRS_FILTERED = ["10.0.0.2", "10.0.0.3", "2001:db8::1:2/127"] # These two IP blocks should use the same prefix (see ISCSI_CIDR_FILTERED to # make sure changes make sense). Our arrays now have 4 IPv4 + 4 IPv6 ports. ISCSI_IPS = ["10.0.0." + str(i + 1) for i in range(len(ISCSI_PORT_NAMES))] ISCSI_IPS += ["[2001:db8::1:" + str(i + 1) + "]" for i in range(len(ISCSI_PORT_NAMES))] AC_ISCSI_IPS = ["10.0.0." + str(i + 1 + len(ISCSI_PORT_NAMES)) for i in range(len(ISCSI_PORT_NAMES))] AC_ISCSI_IPS += ["[2001:db8::1:" + str(i + 1) + "]" for i in range(len(ISCSI_PORT_NAMES))] ISCSI_CIDR = "0.0.0.0/0" ISCSI_CIDR_V6 = "::/0" # Designed to filter out only one of the AC ISCSI IPs, leaving the rest in ISCSI_CIDR_FILTERED = '10.0.0.0/29' # Include several IP / networks: 10.0.0.2, 10.0.0.3, 10.0.0.6, 10.0.0.7 ISCSI_CIDRS_FILTERED = ['10.0.0.2', '10.0.0.3', '2001:db8::1:2/127'] FC_WWNS = ["21000024ff59fe9" + str(i + 1) for i in range(len(FC_PORT_NAMES))] AC_FC_WWNS = [ "21000024ff59fab" + str(i + 1) for i in range(len(FC_PORT_NAMES))] HOSTNAME = "computenode1" SYSTEM_UUID = "420456d9-ec23-e120-7084-a8ce8fde3990" CONNECTOR = { "host": HOSTNAME, "system_uuid": SYSTEM_UUID } PURE_HOST_NAME = ( pure.PureBaseVolumeDriver._generate_purity_host_name(CONNECTOR) ) PURE_HOST = { "name": PURE_HOST_NAME, "host_group": None, "nqns": [], "iqns": [], "wwns": [], } INITIATOR_NQN = ( "nqn.2014-08.org.nvmexpress:uuid:6953a373-c3f7-4ea8-ae77-105c393012ff" ) INITIATOR_IQN = "iqn.1993-08.org.debian:01:222" INITIATOR_WWN = "5001500150015081abc" NVME_CONNECTOR = { "nqn": INITIATOR_NQN, "host": HOSTNAME, "system_uuid": SYSTEM_UUID } ISCSI_CONNECTOR = { "initiator": INITIATOR_IQN, "name": HOSTNAME, "host": DotNotation({"name": HOSTNAME}), "system_uuid": SYSTEM_UUID } FC_CONNECTOR = { "wwpns": {INITIATOR_WWN}, "host": HOSTNAME, "system_uuid": SYSTEM_UUID } TARGET_NQN = "nqn.2010-06.com.purestorage:flasharray.12345abc" AC_TARGET_NQN = "nqn.2010-06.com.purestorage:flasharray.67890def" TARGET_IQN = "iqn.2010-06.com.purestorage:flasharray.12345abc" AC_TARGET_IQN = "iqn.2018-06.com.purestorage:flasharray.67890def" TARGET_WWN = "21000024ff59fe94" TARGET_PORT = "3260" TARGET_ROCE_PORT = "4420" INITIATOR_TARGET_MAP = { # _build_initiator_target_map() calls list(set()) on the list, # we must also call list(set()) to get the exact same order '5001500150015081abc': list(set(FC_WWNS)), } AC_INITIATOR_TARGET_MAP = { # _build_initiator_target_map() calls list(set()) on the list, # we must also call list(set()) to get the exact same order '5001500150015081abc': list(set(FC_WWNS + AC_FC_WWNS)), } DEVICE_MAPPING = { "fabric": { 'initiator_port_wwn_list': {INITIATOR_WWN}, 'target_port_wwn_list': FC_WWNS, }, } AC_DEVICE_MAPPING = { "fabric": { 'initiator_port_wwn_list': {INITIATOR_WWN}, 'target_port_wwn_list': FC_WWNS + AC_FC_WWNS, }, } # We now have IPv6 in addition to IPv4 on each interface NVME_PORTS = [{"name": name, "nqn": TARGET_NQN, "iqn": None, "portal": ip + ":" + TARGET_ROCE_PORT, "wwn": None, } for name, ip in zip(NVME_PORT_NAMES * 2, NVME_IPS)] AC_NVME_PORTS = [{"name": name, "nqn": AC_TARGET_NQN, "iqn": None, "portal": ip + ":" + TARGET_ROCE_PORT, "wwn": None, } for name, ip in zip(NVME_PORT_NAMES * 2, AC_NVME_IPS)] ISCSI_PORTS = [{"name": name, "iqn": TARGET_IQN, "portal": ip + ":" + TARGET_PORT, "nqn": None, "wwn": None, } for name, ip in zip(ISCSI_PORT_NAMES * 2, ISCSI_IPS)] AC_ISCSI_PORTS = [{"name": name, "iqn": AC_TARGET_IQN, "portal": ip + ":" + TARGET_PORT, "nqn": None, "wwn": None, } for name, ip in zip(ISCSI_PORT_NAMES * 2, AC_ISCSI_IPS)] FC_PORTS = [{"name": name, "iqn": None, "nqn": None, "portal": None, "wwn": wwn, } for name, wwn in zip(FC_PORT_NAMES, FC_WWNS)] AC_FC_PORTS = [{"name": name, "iqn": None, "nqn": None, "portal": None, "wwn": wwn, } for name, wwn in zip(FC_PORT_NAMES, AC_FC_WWNS)] NON_ISCSI_PORT = { "name": "ct0.fc1", "iqn": None, "nqn": None, "portal": None, "wwn": "5001500150015081", } ISCSI_LACP_PORTS = [ { "name": "lacp2", "iqn": TARGET_IQN, "nqn": None, "portal": None, "wwn": None, }, ] NVME_LACP_PORTS = [ { "name": "lacp0", "iqn": None, "nqn": TARGET_NQN, "portal": None, "wwn": None, }, { "name": "lacp1", "iqn": None, "nqn": TARGET_NQN, "portal": None, "wwn": None, }, ] NVME_PORTS_WITH = NVME_PORTS + [NON_ISCSI_PORT] + NVME_LACP_PORTS ISCSI_PORTS_WITH = ISCSI_PORTS + ISCSI_LACP_PORTS PORTS_WITH = ISCSI_PORTS + [NON_ISCSI_PORT] + ISCSI_LACP_PORTS PORTS_WITH = ISCSI_PORTS + [NON_ISCSI_PORT] PORTS_WITHOUT = [NON_ISCSI_PORT] TOTAL_CAPACITY = 50.0 USED_SPACE = 32.1 PROVISIONED_CAPACITY = 70.0 TOTAL_REDUCTION = 2.18 DEFAULT_OVER_SUBSCRIPTION = 20 SPACE_INFO = {"space": {"capacity": TOTAL_CAPACITY * units.Gi, "total_used": USED_SPACE * units.Gi}} SPACE_INFO_EMPTY = { "capacity": TOTAL_CAPACITY * units.Gi, "total": 0, } CTRL_INFO = {'mode': 'primary', 'mode_since': 1910956431807, 'model': 'dummy-model', 'name': 'CT0', 'status': 'ready', 'type': 'array_controller', 'version': '6.6.3'} CTRL_OBJ = ValidResponse(200, None, 1, [DotNotation(CTRL_INFO)], {}) PERF_INFO = { 'writes_per_sec': 318, 'usec_per_write_op': 255, 'output_per_sec': 234240, 'read_bytes_per_sec': 234240, 'reads_per_sec': 15, 'input_per_sec': 2827943, 'write_bytes_per_sec': 2827943, 'time': '2015-12-17T21:50:55Z', 'usec_per_read_op': 192, 'queue_depth': 4, # Deprecated - to be removed in 2026.1 cycle 'queue_usec_per_mirrored_write_op': 1, 'queue_usec_per_read_op': 2, 'queue_usec_per_write_op': 3, } PERF_INFO_RAW = [PERF_INFO] ARRAYS_SPACE_INFO = {'capacity': 53687091200, 'id': 'd4eca33c-xxx-yyyy-zzz-8615590fzzz', 'name': 'dummy-array', 'parity': 1.0, 'space': {'data_reduction': 4.084554444259789, 'shared': 34617664613455, 'snapshots': 1239024085076, 'system': 0, 'thin_provisioning': 0.8557968609746274, 'total_physical': 34467112550.4, 'total_provisioned': 75161927680, 'total_reduction': 21.020004503715246, 'unique': 2564030093034, 'virtual': 110211386607104}, 'time': 1713201705834} ISCSI_CONNECTION_INFO = { "driver_volume_type": "iscsi", "data": { "target_discovered": False, "discard": True, "target_luns": [1, 1, 1, 1], "addressing_mode": "SAM2", "target_iqns": [TARGET_IQN, TARGET_IQN, TARGET_IQN, TARGET_IQN], "target_portals": [ISCSI_IPS[0] + ":" + TARGET_PORT, ISCSI_IPS[1] + ":" + TARGET_PORT, ISCSI_IPS[2] + ":" + TARGET_PORT, ISCSI_IPS[3] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", }, } ISCSI_CONNECTION_INFO_V6 = { "driver_volume_type": "iscsi", "data": { "target_discovered": False, "discard": True, "target_luns": [1, 1, 1, 1], "addressing_mode": "SAM2", "target_iqns": [TARGET_IQN, TARGET_IQN, TARGET_IQN, TARGET_IQN], "target_portals": [ISCSI_IPS[4] + ":" + TARGET_PORT, ISCSI_IPS[5] + ":" + TARGET_PORT, ISCSI_IPS[6] + ":" + TARGET_PORT, ISCSI_IPS[7] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", }, } ISCSI_CONNECTION_INFO_AC = { "driver_volume_type": "iscsi", "data": { "target_discovered": False, "discard": True, "addressing_mode": "SAM2", "target_luns": [1, 1, 1, 1, 5, 5, 5, 5], "target_iqns": [TARGET_IQN, TARGET_IQN, TARGET_IQN, TARGET_IQN, AC_TARGET_IQN, AC_TARGET_IQN, AC_TARGET_IQN, AC_TARGET_IQN], "target_portals": [ISCSI_IPS[0] + ":" + TARGET_PORT, ISCSI_IPS[1] + ":" + TARGET_PORT, ISCSI_IPS[2] + ":" + TARGET_PORT, ISCSI_IPS[3] + ":" + TARGET_PORT, AC_ISCSI_IPS[0] + ":" + TARGET_PORT, AC_ISCSI_IPS[1] + ":" + TARGET_PORT, AC_ISCSI_IPS[2] + ":" + TARGET_PORT, AC_ISCSI_IPS[3] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", }, } ISCSI_CONNECTION_INFO_AC_FILTERED = { "driver_volume_type": "iscsi", "data": { "target_discovered": False, "discard": True, "addressing_mode": "SAM2", "target_luns": [1, 1, 1, 1, 5, 5, 5], # Final entry filtered by ISCSI_CIDR_FILTERED "target_iqns": [TARGET_IQN, TARGET_IQN, TARGET_IQN, TARGET_IQN, AC_TARGET_IQN, AC_TARGET_IQN, AC_TARGET_IQN], # Final entry filtered by ISCSI_CIDR_FILTERED "target_portals": [ISCSI_IPS[0] + ":" + TARGET_PORT, ISCSI_IPS[1] + ":" + TARGET_PORT, ISCSI_IPS[2] + ":" + TARGET_PORT, ISCSI_IPS[3] + ":" + TARGET_PORT, AC_ISCSI_IPS[0] + ":" + TARGET_PORT, AC_ISCSI_IPS[1] + ":" + TARGET_PORT, AC_ISCSI_IPS[2] + ":" + TARGET_PORT], "wwn": "3624a93709714b5cb91634c470002b2c8", }, } ISCSI_CONNECTION_INFO_AC_FILTERED_LIST = { "driver_volume_type": "iscsi", "data": { "target_discovered": False, "discard": True, "addressing_mode": "SAM2", "target_luns": [1, 1, 5, 5], # Final entry filtered by ISCSI_CIDR_FILTERED "target_iqns": [TARGET_IQN, TARGET_IQN, AC_TARGET_IQN, AC_TARGET_IQN], # Final entry filtered by ISCSI_CIDR_FILTERED "target_portals": [ISCSI_IPS[1] + ":" + TARGET_PORT, ISCSI_IPS[2] + ":" + TARGET_PORT, AC_ISCSI_IPS[5] + ":" + TARGET_PORT, # IPv6 AC_ISCSI_IPS[6] + ":" + TARGET_PORT], # IPv6 "wwn": "3624a93709714b5cb91634c470002b2c8", }, } NVME_CONNECTION_INFO = { "driver_volume_type": "nvmeof", "data": { "target_nqn": TARGET_NQN, "discard": True, "portals": [(NVME_IPS[0], NVME_PORT, "rdma"), (NVME_IPS[1], NVME_PORT, "rdma"), (NVME_IPS[2], NVME_PORT, "rdma"), (NVME_IPS[3], NVME_PORT, "rdma")], "volume_nguid": "0009714b5cb916324a9374c470002b2c8", }, } NVME_CONNECTION_INFO_V6 = { "driver_volume_type": "nvmeof", "data": { "target_nqn": TARGET_NQN, "discard": True, "portals": [(NVME_IPS[4].strip("[]"), NVME_PORT, "rdma"), (NVME_IPS[5].strip("[]"), NVME_PORT, "rdma"), (NVME_IPS[6].strip("[]"), NVME_PORT, "rdma"), (NVME_IPS[7].strip("[]"), NVME_PORT, "rdma")], "volume_nguid": "0009714b5cb916324a9374c470002b2c8", }, } NVME_CONNECTION_INFO_AC = { "driver_volume_type": "nvmeof", "data": { "target_nqn": TARGET_NQN, "discard": True, "portals": [ (NVME_IPS[0], NVME_PORT, "rdma"), (NVME_IPS[1], NVME_PORT, "rdma"), (NVME_IPS[2], NVME_PORT, "rdma"), (NVME_IPS[3], NVME_PORT, "rdma"), (AC_NVME_IPS[0], NVME_PORT, "rdma"), (AC_NVME_IPS[1], NVME_PORT, "rdma"), (AC_NVME_IPS[2], NVME_PORT, "rdma"), (AC_NVME_IPS[3], NVME_PORT, "rdma")], "volume_nguid": "0009714b5cb916324a9374c470002b2c8", }, } NVME_CONNECTION_INFO_AC_FILTERED = { "driver_volume_type": "nvmeof", "data": { "target_nqn": TARGET_NQN, "discard": True, # Final entry filtered by NVME_CIDR_FILTERED "portals": [ (NVME_IPS[0], NVME_PORT, "rdma"), (NVME_IPS[1], NVME_PORT, "rdma"), (NVME_IPS[2], NVME_PORT, "rdma"), (NVME_IPS[3], NVME_PORT, "rdma"), (AC_NVME_IPS[0], NVME_PORT, "rdma"), (AC_NVME_IPS[1], NVME_PORT, "rdma"), (AC_NVME_IPS[2], NVME_PORT, "rdma")], "volume_nguid": "0009714b5cb916324a9374c470002b2c8", }, } NVME_CONNECTION_INFO_AC_FILTERED_LIST = { "driver_volume_type": "nvmeof", "data": { "target_nqn": TARGET_NQN, "discard": True, # Final entry filtered by NVME_CIDR_FILTERED "portals": [ (NVME_IPS[1], NVME_PORT, "rdma"), (NVME_IPS[2], NVME_PORT, "rdma"), (AC_NVME_IPS[5].strip("[]"), NVME_PORT, "rdma"), # IPv6 (AC_NVME_IPS[6].strip("[]"), NVME_PORT, "rdma"), # IPv6 ], "volume_nguid": "0009714b5cb916324a9374c470002b2c8", }, } FC_CONNECTION_INFO = { "driver_volume_type": "fibre_channel", "data": { "target_wwn": FC_WWNS, "target_wwns": FC_WWNS, "target_lun": 1, "target_luns": [1, 1, 1, 1], "target_discovered": True, "addressing_mode": "SAM2", "initiator_target_map": INITIATOR_TARGET_MAP, "discard": True, "wwn": "3624a93709714b5cb91634c470002b2c8", }, } FC_CONNECTION_INFO_AC = { "driver_volume_type": "fibre_channel", "data": { "target_wwn": FC_WWNS + AC_FC_WWNS, "target_wwns": FC_WWNS + AC_FC_WWNS, "target_lun": 1, "target_luns": [1, 1, 1, 1, 5, 5, 5, 5], "target_discovered": True, "addressing_mode": "SAM2", "initiator_target_map": AC_INITIATOR_TARGET_MAP, "discard": True, "wwn": "3624a93709714b5cb91634c470002b2c8", }, } PURE_SNAPSHOT = { "created": "2015-05-27T17:34:33Z", "name": "vol1.snap1", "serial": "8343DFDE2DAFBE40000115E4", "size": 3221225472, "source": "vol1" } PURE_PGROUP = { "hgroups": None, "hosts": None, "name": "pg1", "source": "pure01", "targets": None, "volumes": ["v1"] } PGROUP_ON_TARGET_NOT_ALLOWED = { "name": "array1:replicated_pgroup", "hgroups": None, "source": "array1", "hosts": None, "volumes": ["array1:replicated_volume"], "time_remaining": None, "targets": [{"name": "array2", "allowed": False}]} PGROUP_ON_TARGET_ALLOWED = { "name": "array1:replicated_pgroup", "hgroups": None, "source": "array1", "hosts": None, "volumes": ["array1:replicated_volume"], "time_remaining": None, "allowed": True, "targets": [{"name": "array2", "allowed": True}]} REPLICATED_PGSNAPS = [ { "name": "array1:cinder-repl-pg.3", "created": "2014-12-04T22:59:38Z", "started": "2014-12-04T22:59:38Z", "completed": "2014-12-04T22:59:39Z", "source": "array1:cinder-repl-pg", "logical_data_transferred": 0, "progress": 1.0, "data_transferred": 318 }, { "name": "array1:cinder-repl-pg.2", "created": "2014-12-04T21:59:38Z", "started": "2014-12-04T21:59:38Z", "completed": "2014-12-04T21:59:39Z", "source": "array1:cinder-repl-pg", "logical_data_transferred": 0, "progress": 1.0, "data_transferred": 318 }, { "name": "array1:cinder-repl-pg.1", "created": "2014-12-04T20:59:38Z", "started": "2014-12-04T20:59:38Z", "completed": "2014-12-04T20:59:39Z", "source": "array1:cinder-repl-pg", "logical_data_transferred": 0, "progress": 1.0, "data_transferred": 318 }] REPLICATED_VOLUME_OBJS = [ fake_volume.fake_volume_obj( None, id=fake.VOLUME_ID, provider_id=("volume-%s-cinder" % fake.VOLUME_ID) ), fake_volume.fake_volume_obj( None, id=fake.VOLUME2_ID, provider_id=("volume-%s-cinder" % fake.VOLUME2_ID) ), fake_volume.fake_volume_obj( None, id=fake.VOLUME3_ID, provider_id=("volume-%s-cinder" % fake.VOLUME3_ID) ), ] REPLICATED_VOLUME_SNAPS = [ { "source": "array1:volume-%s-cinder" % fake.VOLUME_ID, "serial": "BBA481C01639104E0001D5F7", "created": "2014-12-04T22:59:38Z", "name": "array1:cinder-repl-pg.2.volume-%s-cinder" % fake.VOLUME_ID, "size": 1048576 }, { "source": "array1:volume-%s-cinder" % fake.VOLUME2_ID, "serial": "BBA481C01639104E0001D5F8", "created": "2014-12-04T22:59:38Z", "name": "array1:cinder-repl-pg.2.volume-%s-cinder" % fake.VOLUME2_ID, "size": 1048576 }, { "source": "array1:volume-%s-cinder" % fake.VOLUME3_ID, "serial": "BBA481C01639104E0001D5F9", "created": "2014-12-04T22:59:38Z", "name": "array1:cinder-repl-pg.2.volume-%s-cinder" % fake.VOLUME3_ID, "size": 1048576 } ] array_1 = {'status': 'online', 'id': '47966b2d-a1ed-4144-8cae-6332794562b8', 'name': 'fs83-14', 'mediator_status': 'online'} array_2 = {'status': 'online', 'id': '8ed17cf4-4650-4634-ab3d-f2ca165cd021', 'name': 'fs83-15', 'mediator_status': 'online'} pod_1 = dict(arrays = [array_1, array_2], source = None, name= 'cinder-pod') dotted_dict = DotNotation(pod_1) CINDER_POD = ValidResponse(200, None, 1, [dotted_dict], {}) VALID_ISCSI_PORTS = ValidResponse(200, None, 1, [DotNotation(ISCSI_PORTS[0]), DotNotation(ISCSI_PORTS[1]), DotNotation(ISCSI_PORTS[2]), DotNotation(ISCSI_PORTS[3])], {}) VALID_AC_ISCSI_PORTS = ValidResponse(200, None, 1, [DotNotation(AC_ISCSI_PORTS[0]), DotNotation(AC_ISCSI_PORTS[1]), DotNotation(AC_ISCSI_PORTS[2]), DotNotation(AC_ISCSI_PORTS[3])], {}) VALID_AC_ISCSI_PORTS_IPV6 = ValidResponse(200, None, 1, [DotNotation(AC_ISCSI_PORTS[4]), DotNotation(AC_ISCSI_PORTS[5]), DotNotation(AC_ISCSI_PORTS[6]), DotNotation(AC_ISCSI_PORTS[7])], {}) VALID_ISCSI_PORTS_IPV6 = ValidResponse(200, None, 1, [DotNotation(ISCSI_PORTS[4]), DotNotation(ISCSI_PORTS[5]), DotNotation(ISCSI_PORTS[6]), DotNotation(ISCSI_PORTS[7])], {}) VALID_FC_PORTS = ValidResponse(200, None, 1, [DotNotation(FC_PORTS[0]), DotNotation(FC_PORTS[1]), DotNotation(FC_PORTS[2]), DotNotation(FC_PORTS[3])], {}) VALID_AC_FC_PORTS = ValidResponse(200, None, 1, [DotNotation(AC_FC_PORTS[0]), DotNotation(AC_FC_PORTS[1]), DotNotation(AC_FC_PORTS[2]), DotNotation(AC_FC_PORTS[3])], {}) MANAGEABLE_PODS = [ { 'name': 'somepod', } ] MANAGEABLE_PURE_VOLS = [ { 'name': 'myVol1', 'id': fake.VOLUME_ID, 'serial': '8E9C7E588B16C1EA00048CCA', 'size': 3221225472, 'provisioned': 3221225472, 'space': {'total_provisioned': 3221225472}, 'created': '2016-08-05T17:26:34Z', 'source': None, 'connection_count': 0 }, { 'name': 'myVol2', 'id': fake.VOLUME2_ID, 'serial': '8E9C7E588B16C1EA00048CCB', 'size': 3221225472, 'provisioned': 3221225472, 'space': {'total_provisioned': 3221225472}, 'created': '2016-08-05T17:26:34Z', 'source': None, 'connection_count': 0 }, { 'name': 'myVol3', 'id': fake.VOLUME3_ID, 'serial': '8E9C7E588B16C1EA00048CCD', 'size': 3221225472, 'provisioned': 3221225472, 'space': {'total_provisioned': 3221225472}, 'created': '2016-08-05T17:26:34Z', 'source': None, 'connection_count': 0 } ] MANAGEABLE_PURE_VOL_REFS = [ { 'reference': {'name': 'myVol1'}, 'size': 3, 'safe_to_manage': True, 'reason_not_safe': '', 'cinder_id': None, 'extra_info': None, }, { 'reference': {'name': 'myVol2'}, 'size': 3, 'safe_to_manage': True, 'reason_not_safe': '', 'cinder_id': None, 'extra_info': None, }, { 'reference': {'name': 'myVol3'}, 'size': 3, 'safe_to_manage': True, 'reason_not_safe': '', 'cinder_id': None, 'extra_info': None, } ] MPV_REFS = ValidResponse(200, None, 3, [DotNotation(MANAGEABLE_PURE_VOL_REFS[0]), DotNotation(MANAGEABLE_PURE_VOL_REFS[1]), DotNotation(MANAGEABLE_PURE_VOL_REFS[2])], {}) MPV = ValidResponse(200, None, 3, [DotNotation(MANAGEABLE_PURE_VOLS[0]), DotNotation(MANAGEABLE_PURE_VOLS[1]), DotNotation(MANAGEABLE_PURE_VOLS[2])], {}) CONNECTION_DATA = {'host': {'name': 'utest'}, 'host_group': {}, 'lun': 1, 'nsid': 9753, 'protocol_endpoint': {}, 'volume': {'id': '78a9e55b-d9ef-37ce-0dbd-14de74ae35d4', 'name': 'xVol1'}} CONN = ValidResponse(200, None, 1, [DotNotation(CONNECTION_DATA)], {}) vol_dict = {'id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a', 'name': 'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a-cinder'} NCONNECTION_DATA = {'host': {'name': PURE_HOST_NAME}, 'host_group': {}, 'lun': 1, 'nsid': 9753, 'protocol_endpoint': {}, 'volume': vol_dict} NCONN = ValidResponse(200, None, 1, [DotNotation(NCONNECTION_DATA)], {}) AC_CONNECTION_DATA = [{'host': {'name': 'utest5'}, 'host_group': {}, 'lun': 5, 'nsid': 9755, 'protocol_endpoint': {}, 'volume': {'id': '78a9e55b-d9ef-37ce-0dbd-14de74ae35d5', 'name': 'xVol5'}}] AC_CONN = ValidResponse(200, None, 1, [DotNotation(AC_CONNECTION_DATA[0])], {}) MANAGEABLE_PURE_SNAPS = [ { 'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder.snap1', 'serial': '8E9C7E588B16C1EA00048CCA', 'size': 3221225472, 'provisioned': 3221225472, 'created': '2016-08-05T17:26:34Z', 'source': {'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder'}, }, { 'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder.snap2', 'serial': '8E9C7E588B16C1EA00048CCB', 'size': 4221225472, 'provisioned': 4221225472, 'created': '2016-08-05T17:26:34Z', 'source': {'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder'}, }, { 'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder.snap3', 'serial': '8E9C7E588B16C1EA00048CCD', 'size': 5221225472, 'provisioned': 5221225472, 'created': '2016-08-05T17:26:34Z', 'source': {'name': 'volume-fd33de6e-56f6-452d-a7b6-451c11089a9f-cinder'}, } ] MANAGEABLE_PURE_SNAP_REFS = [ { 'reference': {'name': MANAGEABLE_PURE_SNAPS[0]['name']}, 'size': 3, 'safe_to_manage': True, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': None, 'source_reference': {'name': MANAGEABLE_PURE_SNAPS[0]['source']['name']}, }, { 'reference': {'name': MANAGEABLE_PURE_SNAPS[1]['name']}, 'size': 4, 'safe_to_manage': True, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': None, 'source_reference': {'name': MANAGEABLE_PURE_SNAPS[1]['source']['name']}, }, { 'reference': {'name': MANAGEABLE_PURE_SNAPS[2]['name']}, 'size': 5, 'safe_to_manage': True, 'reason_not_safe': None, 'cinder_id': None, 'extra_info': None, 'source_reference': {'name': MANAGEABLE_PURE_SNAPS[2]['source']['name']}, } ] MAX_SNAP_LENGTH = 96 MPS = ValidResponse(200, None, 3, [DotNotation(MANAGEABLE_PURE_SNAPS[0]), DotNotation(MANAGEABLE_PURE_SNAPS[1]), DotNotation(MANAGEABLE_PURE_SNAPS[2])], {}) MPS_REFS = ValidResponse(200, None, 3, [DotNotation(MANAGEABLE_PURE_SNAP_REFS[0]), DotNotation(MANAGEABLE_PURE_SNAP_REFS[1]), DotNotation(MANAGEABLE_PURE_SNAP_REFS[2])], {}) # unit for maxBWS is MB QOS_IOPS_BWS = {"maxIOPS": "100", "maxBWS": "1", "maxIOPS_per_GB": "0", "maxBWS_per_GB": "0"} QOS_IOPS_BWS_2 = {"maxIOPS": "1000", "maxBWS": "10", "maxIOPS_per_GB": "0", "maxBWS_per_GB": "0"} QOS_INVALID = {"maxIOPS": "100", "maxBWS": str(512 * 1024 + 1), "maxIOPS_per_GB": "0", "maxBWS_per_GB": "0"} QOS_ZEROS = {"maxIOPS": "0", "maxBWS": "0", "maxIOPS_per_GB": "0", "maxBWS_per_GB": "0"} QOS_IOPS = {"maxIOPS": "100"} QOS_BWS = {"maxBWS": "1"} MAX_IOPS = 100000000 MAX_BWS = 549755813888 MIN_IOPS = 100 MIN_BWS = 1048576 VGROUP = 'puretest-vgroup' ARRAY_RESPONSE = { 'status_code': 200 } INTERFACES = [ { 'name': 'ct0.eth4', 'services': ['nvme-tcp'], 'eth': {'address': '1.1.1.1', 'subtype': 'physical'}, }, { 'name': 'ct0.eth5', 'services': ['iscsi'], 'eth': {'address': '2.2.2.2', 'subtype': 'physical'}, }, { 'name': 'ct0.eth20', 'services': ['nvme-roce'], 'eth': {'address': '3.3.3.3', 'subtype': 'physical'} }, { 'name': 'ct0.fc4', 'services': ['nvme-fc'], 'eth': {'address': None, 'subtype': 'physical'}, }, { 'name': 'lacp0', 'services': ['nvme-roce'], 'eth': {'address': '4.4.4.4', 'subtype': 'lacp_bond'}, }, { 'name': 'lacp1', 'services': ['nvme-tcp'], 'eth': {'address': '5.5.5.5', 'subtype': 'lacp_bond'}, }, { 'name': 'lacp2', 'services': ['iscsi'], 'eth': {'address': '6.6.6.6', 'subtype': 'lacp_bond'}, }, { 'name': 'ct0.fc1', 'services': ['scsi-fc'], 'eth': {'address': None, 'subtype': 'physical'}, } ] retype_meta = { 'metadata': { 'array_volume_name': ( 'cinder-pod::volume-1e5177e7-95e5-4a0f-' 'b170-e45f4b469f6a-cinder' ) }, 'replication_status': 'enabled' } retype_repl_off = { 'metadata': { 'array_volume_name': ( 'volume-1e5177e7-95e5-4a0f-' 'b170-e45f4b469f6a-cinder' ) }, 'replication_status': 'disabled' } retype_async = { 'metadata': { 'array_volume_name': ( 'volume-1e5177e7-95e5-4a0f-' 'b170-e45f4b469f6a-cinder' ) }, 'replication_status': 'enabled' } class PureDriverTestCase(test.TestCase): def setUp(self): super(PureDriverTestCase, self).setUp() self.mock_config = mock.Mock() self.mock_config.san_ip = PRIMARY_MANAGEMENT_IP self.mock_config.pure_api_token = API_TOKEN self.mock_config.volume_backend_name = VOLUME_BACKEND_NAME self.mock_config.safe_get.return_value = None self.mock_config.pure_eradicate_on_delete = False self.mock_config.driver_ssl_cert_verify = False self.mock_config.driver_ssl_cert_path = None self.mock_config.pure_iscsi_cidr = ISCSI_CIDR self.mock_config.pure_iscsi_cidr_list = None self.mock_config.pure_nvme_cidr = NVME_CIDR self.mock_config.pure_nvme_cidr_list = None self.mock_config.pure_nvme_transport = "roce" self.array = mock.Mock() self.array.get_arrays.return_value = VALID_GET_ARRAY_PRIMARY self.array.get.return_value = GET_ARRAY_PRIMARY self.array.array_name = GET_ARRAY_PRIMARY["name"] self.array.array_id = GET_ARRAY_PRIMARY["id"] self.async_array2 = mock.Mock() self.async_array2.get_arrays.return_value = VALID_GET_ARRAY_SECONDARY self.async_array2.array_name = GET_ARRAY_SECONDARY["name"] self.async_array2.array_id = GET_ARRAY_SECONDARY["id"] self.async_array2.get.return_value = GET_ARRAY_SECONDARY self.async_array2.replication_type = 'async' self.flasharray = pure.flasharray # self.purestorage_module = pure.flasharray # self.purestorage_module.PureHTTPError = FakePureStorageHTTPError def fake_get_array(self, *args, **kwargs): if 'action' in kwargs and kwargs['action'] == 'monitor': return ValidResponse(200, None, 1, [DotNotation(PERF_INFO_RAW)], {}) if 'space' in kwargs and kwargs['space'] is True: return ValidResponse(200, None, 1, [DotNotation(SPACE_INFO)], {}) return ValidResponse(200, None, 1, [DotNotation(GET_ARRAY_PRIMARY)], {}) def assert_error_propagates(self, mocks, func, *args, **kwargs): """Assert that errors from mocks propagate to func. Fail if exceptions raised by mocks are not seen when calling func(*args, **kwargs). Ensure that we are really seeing exceptions from the mocks by failing if just running func(*args, **kargs) raises an exception itself. """ func(*args, **kwargs) for mock_func in mocks: original_side_effect = mock_func.side_effect mock_func.side_effect = [pure.PureDriverException( reason='reason')] self.assertRaises(pure.PureDriverException, func, *args, **kwargs) mock_func.side_effect = original_side_effect @mock.patch('distro.name') def test_for_user_agent(self, mock_distro): mock_distro.return_value = 'MyFavouriteDistro' driver = pure.PureBaseVolumeDriver(configuration=self.mock_config) expected_agent = "OpenStack Cinder %s/%s (MyFavouriteDistro)" % ( driver.__class__.__name__, driver.VERSION ) self.assertEqual(expected_agent, driver._user_agent) class PureBaseSharedDriverTestCase(PureDriverTestCase): def setUp(self): super(PureBaseSharedDriverTestCase, self).setUp() self.driver = pure.PureBaseVolumeDriver(configuration=self.mock_config) self.driver._array = self.array self.mock_object(self.driver, '_get_current_array', return_value=self.array) self.driver._replication_pod_name = 'cinder-pod' self.driver._replication_pg_name = 'cinder-group' self.driver._ghost_pod_name = 'cinder-ghost-pod' def new_fake_vol(self, set_provider_id=True, fake_context=None, spec=None, type_extra_specs=None, type_qos_specs_id=None, type_qos_specs=None): if fake_context is None: fake_context = mock.MagicMock() if type_extra_specs is None: type_extra_specs = {} if spec is None: spec = {} voltype = fake_volume.fake_volume_type_obj(fake_context) voltype.extra_specs = type_extra_specs voltype.qos_specs_id = type_qos_specs_id voltype.qos_specs = type_qos_specs vol = fake_volume.fake_volume_obj(fake_context, **spec) repl_type = self.driver._get_replication_type_from_vol_type(voltype) vol_name = vol.name + '-cinder' if repl_type == 'sync': vol_name = 'cinder-pod::' + vol_name if set_provider_id: vol.provider_id = vol_name vol.volume_type = voltype vol.volume_type_id = voltype.id vol.volume_attachment = None return vol, vol_name def new_fake_snap(self, vol=None, group_snap=None): if vol: vol_name = vol.name + "-cinder" else: vol, vol_name = self.new_fake_vol() snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock()) snap.volume_id = vol.id snap.volume = vol if group_snap is not None: snap.group_snapshot_id = group_snap.id snap.group_snapshot = group_snap snap_name = "%s.%s" % (vol_name, snap.name) return snap, snap_name def new_fake_group(self): group = fake_group.fake_group_obj(mock.MagicMock()) group_name = "consisgroup-%s-cinder" % group.id return group, group_name def new_fake_group_snap(self, group=None): if group: group_name = "consisgroup-%s-cinder" % group.id else: group, group_name = self.new_fake_group() group_snap = fake_group_snapshot.fake_group_snapshot_obj( mock.MagicMock()) group_snap_name = "%s.cgsnapshot-%s-cinder" % (group_name, group_snap.id) group_snap.group = group group_snap.group_id = group.id return group_snap, group_snap_name class PureBaseVolumeDriverGetCurrentArrayTestCase(PureDriverTestCase): def setUp(self): super(PureBaseVolumeDriverGetCurrentArrayTestCase, self).setUp() self.driver = pure.PureBaseVolumeDriver(configuration=self.mock_config) self.driver._array = self.array self.driver._replication_pod_name = 'cinder-pod' self.driver._replication_pg_name = 'cinder-group' # self.purestorage_module.Client.side_effect = None def test_get_current_array(self): self.driver._is_active_cluster_enabled = True self.array.array_id = '47966b2d-a1ed-4144-8cae-6332794562b8' self.array.get_pods.return_value = CINDER_POD self.driver._active_cluster_target_arrays = [self.array] self.driver._get_current_array() self.array.get_pods.assert_called_with(names=['cinder-pod']) @ddt.ddt(testNameFormat=ddt.TestNameFormat.INDEX_ONLY) class PureBaseVolumeDriverTestCase(PureBaseSharedDriverTestCase): def _setup_mocks_for_replication(self): # Mock config values self.mock_config.pure_replica_interval_default = ( REPLICATION_INTERVAL_IN_SEC) self.mock_config.pure_replica_retention_short_term_default = ( REPLICATION_RETENTION_SHORT_TERM) self.mock_config.pure_replica_retention_long_term_default = ( REPLICATION_RETENTION_LONG_TERM) self.mock_config.pure_replica_retention_long_term_default = ( REPLICATION_RETENTION_LONG_TERM_PER_DAY) self.mock_config.pure_replication_pg_name = 'cinder-group' self.mock_config.pure_replication_pod_name = 'cinder-pod' self.mock_config.safe_get.return_value = [ {"backend_id": self.driver._array.array_id, "managed_backend_name": None, "san_ip": "1.2.3.4", "api_token": "abc123"}] @mock.patch(BASE_DRIVER_OBJ + '._get_flasharray') @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') def test_parse_replication_configs_single_async_target( self, mock_setup_repl_pgroups, mock_generate_replication_retention, mock_getarray): retention = mock.MagicMock() mock_generate_replication_retention.return_value = retention mock_setup_repl_pgroups.return_value = None # Test single array configured self.mock_config.safe_get.return_value = [ {"backend_id": self.driver._array.id, "managed_backend_name": None, "san_ip": "1.2.3.4", "api_token": "abc123"}] mock_getarray().get_arrays.return_value = VALID_GET_ARRAY_PRIMARY self.mock_config.pure_replica_interval_default = ( REPLICATION_INTERVAL_IN_SEC) self.driver.parse_replication_configs() self.assertEqual(1, len(self.driver._replication_target_arrays)) self.assertEqual(mock_getarray(), self.driver._replication_target_arrays[0]) only_target_array = self.driver._replication_target_arrays[0] self.assertEqual(self.driver._array.id, only_target_array.backend_id) @mock.patch(BASE_DRIVER_OBJ + '._get_flasharray') @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') def test_parse_replication_configs_multiple_async_target( self, mock_setup_repl_pgroups, mock_generate_replication_retention, mock_getarray): retention = mock.MagicMock() mock_generate_replication_retention.return_value = retention mock_setup_repl_pgroups.return_value = None # Test multiple arrays configured self.mock_config.safe_get.return_value = [ {"backend_id": GET_ARRAY_PRIMARY["id"], "managed_backend_name": None, "san_ip": "1.2.3.4", "api_token": "abc123"}, {"backend_id": GET_ARRAY_SECONDARY["id"], "managed_backend_name": None, "san_ip": "1.2.3.5", "api_token": "abc124"}] mock_getarray.side_effect = [self.array, self.async_array2] self.mock_config.pure_replica_interval_default = ( REPLICATION_INTERVAL_IN_SEC) self.driver.parse_replication_configs() self.assertEqual(2, len(self.driver._replication_target_arrays)) self.assertEqual(self.array, self.driver._replication_target_arrays[0]) first_target_array = self.driver._replication_target_arrays[0] self.assertEqual(GET_ARRAY_PRIMARY["id"], first_target_array.backend_id) self.assertEqual( self.async_array2, self.driver._replication_target_arrays[1]) second_target_array = self.driver._replication_target_arrays[1] self.assertEqual(GET_ARRAY_SECONDARY["id"], second_target_array.backend_id) @mock.patch(BASE_DRIVER_OBJ + '._get_flasharray') @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') def test_parse_replication_configs_single_sync_target_non_uniform( self, mock_setup_repl_pgroups, mock_generate_replication_retention, mock_getarray): retention = mock.MagicMock() mock_generate_replication_retention.return_value = retention mock_setup_repl_pgroups.return_value = None self.mock_config.pure_replica_interval_default = ( REPLICATION_INTERVAL_IN_SEC) # Test single array configured self.mock_config.safe_get.return_value = [ { "backend_id": "foo", "managed_backend_name": None, "san_ip": "1.2.3.4", "api_token": "abc123", "type": "sync", } ] mock_getarray().get_arrays.return_value = VALID_GET_ARRAY_PRIMARY self.driver._storage_protocol = 'iSCSI' self.driver.parse_replication_configs() self.assertEqual(1, len(self.driver._replication_target_arrays)) self.assertEqual(mock_getarray(), self.driver._replication_target_arrays[0]) only_target_array = self.driver._replication_target_arrays[0] self.assertEqual("foo", only_target_array.backend_id) self.assertEqual([mock_getarray()], self.driver._active_cluster_target_arrays) self.assertEqual( 0, len(self.driver._uniform_active_cluster_target_arrays)) @mock.patch(BASE_DRIVER_OBJ + '._get_flasharray') @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') def test_parse_replication_configs_single_sync_target_uniform( self, mock_setup_repl_pgroups, mock_generate_replication_retention, mock_getarray): retention = mock.MagicMock() mock_generate_replication_retention.return_value = retention mock_setup_repl_pgroups.return_value = None self.mock_config.pure_replica_interval_default = ( REPLICATION_INTERVAL_IN_SEC) # Test single array configured self.mock_config.safe_get.return_value = [ { "backend_id": "foo", "managed_backend_name": None, "san_ip": "1.2.3.4", "api_token": "abc123", "type": "sync", "uniform": True, } ] mock_getarray().get_arrays.return_value = VALID_GET_ARRAY_PRIMARY self.driver._storage_protocol = 'iSCSI' self.driver.parse_replication_configs() self.assertEqual(1, len(self.driver._replication_target_arrays)) self.assertEqual(mock_getarray(), self.driver._replication_target_arrays[0]) only_target_array = self.driver._replication_target_arrays[0] self.assertEqual("foo", only_target_array.backend_id) self.assertEqual([mock_getarray()], self.driver._active_cluster_target_arrays) self.assertEqual( 1, len(self.driver._uniform_active_cluster_target_arrays)) self.assertEqual( mock_getarray(), self.driver._uniform_active_cluster_target_arrays[0]) @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') def test_do_setup_replicated(self, mock_setup_repl_pgroups, mock_generate_replication_retention): retention = mock.MagicMock() mock_generate_replication_retention.return_value = retention self._setup_mocks_for_replication() self.mock_config.safe_get.return_value = [ { "backend_id": "foo", "managed_backend_name": None, "san_ip": "1.2.3.4", "api_token": "abc123", "type": "async", } ] self.driver._get_flasharray = mock.MagicMock() self.driver._get_flasharray().\ get_arrays.return_value = VALID_GET_ARRAY_PRIMARY self.driver._replication_target_arrays = [self.async_array2] self.driver._storage_protocol = 'iSCSI' self.driver.do_setup(None) calls = [ mock.call(self.array, [self.async_array2, self.driver._get_flasharray()], 'cinder-group', REPLICATION_INTERVAL_IN_SEC * 1000, retention) ] mock_setup_repl_pgroups.assert_has_calls(calls) @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pods') @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention') @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups') def test_do_setup_replicated_sync_rep(self, mock_setup_repl_pgroups, mock_generate_replication_retention, mock_setup_pods): retention = mock.MagicMock() mock_generate_replication_retention.return_value = retention self._setup_mocks_for_replication() self.mock_config.safe_get.return_value = [ { "backend_id": "foo", "managed_backend_name": None, "san_ip": "1.2.3.4", "api_token": "abc123", "type": "sync", } ] mock_sync_target = mock.MagicMock() mock_sync_target.get_arrays.return_value = VALID_GET_ARRAY_SECONDARY self.driver._get_flasharray = mock.MagicMock() self.driver._get_flasharray().\ get_arrays.return_value = VALID_GET_ARRAY_PRIMARY self.driver._active_cluster_target_arrays = [mock_sync_target] self.driver.configuration.pure_nvme_transport = "roce" self.driver._storage_protocol = 'iSCSI' self.driver.do_setup(None) mock_setup_pods.assert_has_calls([ mock.call(self.array, [mock_sync_target, self.driver._get_flasharray()], 'cinder-pod') ]) def test_update_provider_info_update_all(self): test_vols = [ self.new_fake_vol(spec={'id': fake.VOLUME_ID}, set_provider_id=False), self.new_fake_vol(spec={'id': fake.VOLUME2_ID}, set_provider_id=False), self.new_fake_vol(spec={'id': fake.VOLUME3_ID}, set_provider_id=False), ] vols = [] vol_names = [] for v in test_vols: vols.append(v[0]) vol_names.append(v[1]) self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) model_updates, _ = self.driver.update_provider_info(vols, None) self.assertEqual(len(test_vols), len(model_updates)) for update, vol_name in zip(model_updates, vol_names): self.assertEqual(vol_name, update['provider_id']) def test_update_provider_info_update_some(self): test_vols = [ self.new_fake_vol(spec={'id': fake.VOLUME_ID}, set_provider_id=True), self.new_fake_vol(spec={'id': fake.VOLUME2_ID}, set_provider_id=True), self.new_fake_vol(spec={'id': fake.VOLUME3_ID}, set_provider_id=False), ] vols = [] vol_names = [] for v in test_vols: vols.append(v[0]) vol_names.append(v[1]) self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) model_updates, _ = self.driver.update_provider_info(vols, None) self.assertEqual(1, len(model_updates)) self.assertEqual(vol_names[2], model_updates[0]['provider_id']) def test_update_provider_info_no_updates(self): test_vols = [ self.new_fake_vol(spec={'id': fake.VOLUME_ID}, set_provider_id=True), self.new_fake_vol(spec={'id': fake.VOLUME2_ID}, set_provider_id=True), self.new_fake_vol(spec={'id': fake.VOLUME3_ID}, set_provider_id=True), ] vols = [] for v in test_vols: vols.append(v[0]) model_updates, _ = self.driver.update_provider_info(vols, None) self.assertEqual(0, len(model_updates)) def test_generate_purity_host_name(self): connector = { "host": "really-long-string-thats-a-bit-too-long", "system_uuid": SYSTEM_UUID } result = self.driver._generate_purity_host_name(connector) self.assertTrue(result.startswith("really-long-string-that-")) self.assertTrue(result.endswith("-cinder")) self.assertEqual(63, len(result)) self.assertTrue(bool(pure.GENERATED_NAME.match(result))) connector["host"] = "!@#$%^-invalid&*" result = self.driver._generate_purity_host_name(connector) self.assertTrue(result.startswith("invalid---")) self.assertTrue(result.endswith("-cinder")) self.assertEqual(49, len(result)) self.assertIsNotNone(pure.GENERATED_NAME.match(result)) @mock.patch.object(volume_types, 'get_volume_type') @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") def test_revert_to_snapshot(self, mock_fa, mock_get_volume_type): vol, vol_name = self.new_fake_vol(set_provider_id=True) mock_get_volume_type.return_value = vol.volume_type snap, snap_name = self.new_fake_snap(vol) mock_data = self.flasharray.VolumePost(source=self.flasharray. Reference(name=vol_name)) context = mock.MagicMock() self.driver.revert_to_snapshot(context, vol, snap) self.array.post_volumes.assert_called_with(names=[snap_name], overwrite=True, volume=mock_data) self.assert_error_propagates([self.array.post_volumes], self.driver.revert_to_snapshot, context, vol, snap) @mock.patch.object(volume_types, 'get_volume_type') @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") def test_revert_to_snapshot_group(self, mock_fa, mock_get_volume_type): vol, vol_name = self.new_fake_vol(set_provider_id=True) mock_get_volume_type.return_value = vol.volume_type group, group_name = self.new_fake_group() group_snap, group_snap_name = self.new_fake_group_snap(group) snap, snap_name = self.new_fake_snap(vol, group_snap) mock_data = self.flasharray.VolumePost(source=self.flasharray. Reference(name=vol_name)) context = mock.MagicMock() self.driver.revert_to_snapshot(context, vol, snap) self.array.post_volumes.\ assert_called_with(names=[group_snap_name + '.' + vol_name], volume=mock_data, overwrite=True) self.assert_error_propagates([self.array.post_volumes], self.driver.revert_to_snapshot, context, vol, snap) @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") def test_create_in_vgroup(self, mock_fa_post): vol, vol_name = self.new_fake_vol() mock_data = self.array.flasharray.VolumePost(provisioned=vol["size"]) mock_fa_post.return_value = mock_data self.driver.create_in_vgroup(self.array, vol_name, vol["size"], VGROUP, MAX_IOPS, MAX_BWS) self.array.post_volumes.\ assert_called_with(names=[VGROUP + "/" + vol_name], with_default_protection=False, volume=mock_data) iops_msg = (f"vg_maxIOPS QoS error. Must be more than {MIN_IOPS} " f"and less than {MAX_IOPS}") exc_out = self.assertRaises(exception.InvalidQoSSpecs, self.driver.create_in_vgroup, self.array, vol_name, vol["size"], VGROUP, 1, MAX_BWS) self.assertEqual(str(exc_out), iops_msg) bws_msg = (f"vg_maxBWS QoS error. Must be between {MIN_BWS} " f"and {MAX_BWS}") exc_out = self.assertRaises(exception.InvalidQoSSpecs, self.driver.create_in_vgroup, self.array, vol_name, vol["size"], VGROUP, MAX_IOPS, 1) self.assertEqual(str(exc_out), bws_msg) @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") def test_create_from_snap_in_vgroup(self, mock_fa_post): vol, vol_name = self.new_fake_vol() snap, snap_name = self.new_fake_snap(vol) src_data = pure.flasharray.Reference(name=snap_name) mock_data = self.array.flasharray.VolumePost(source=src_data) mock_fa_post.return_value = mock_data self.driver.create_from_snap_in_vgroup(self.array, vol_name, vol["size"], VGROUP, MAX_IOPS, MAX_BWS) self.array.post_volumes.\ assert_called_with(names=[VGROUP + "/" + vol_name], with_default_protection=False, volume=mock_data) iops_msg = (f"vg_maxIOPS QoS error. Must be more than {MIN_IOPS} " f"and less than {MAX_IOPS}") exc_out = self.assertRaises(exception.InvalidQoSSpecs, self.driver.create_from_snap_in_vgroup, self.array, vol_name, vol["size"], VGROUP, 1, MAX_BWS) self.assertEqual(str(exc_out), iops_msg) bws_msg = (f"vg_maxBWS QoS error. Must be between {MIN_BWS} " f"and {MAX_BWS}") exc_out = self.assertRaises(exception.InvalidQoSSpecs, self.driver.create_from_snap_in_vgroup, self.array, vol_name, vol["size"], VGROUP, MAX_IOPS, 1) self.assertEqual(str(exc_out), bws_msg) @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(DRIVER_PATH + ".flasharray.VolumeGroupPatch") def test_delete_vgroup_if_empty(self, mock_vg_patch, mock_logger): vol, vol_name = self.new_fake_vol() vgname = VGROUP + "/" + vol_name rsp = ValidResponse(200, None, 1, [DotNotation({"volume_count": 0, "name": vgname})], {}) self.array.get_volume_groups.return_value = rsp mock_data = pure.flasharray.VolumeGroupPatch(destroyed=True) self.driver._delete_vgroup_if_empty(self.array, vgname) self.array.patch_volume_groups.\ assert_called_with(names=[vgname], volume_group=mock_data) self.mock_config.pure_eradicate_on_delete = True self.driver._delete_vgroup_if_empty(self.array, vgname) self.array.delete_volume_groups.assert_called_with(names=[vgname]) err_rsp = ErrorResponse(400, [DotNotation({'message': 'vgroup delete failed'})], {}) self.array.delete_volume_groups.return_value = err_rsp self.driver._delete_vgroup_if_empty(self.array, vgname) mock_logger.warning.\ assert_called_with("Volume group deletion failed " "with message: %s", "vgroup delete failed") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test__get_volume_type_extra_spec(self, mock_specs): vol, vol_name = self.new_fake_vol() vgname = VGROUP + "/" + vol_name mock_specs.return_value = {'vg_name': vgname} self.driver.\ _get_volume_type_extra_spec = mock.Mock(return_value={}) @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(DRIVER_PATH + ".flasharray.VolumeGroupPost") def test_create_volume_group_if_not_exist(self, mock_vg_post, mock_logger): vol, vol_name = self.new_fake_vol() vgname = VGROUP + "/" + vol_name mock_qos = pure.flasharray.Qos(iops_limit='MAX_IOPS', bandwidth_limit='MAX_BWS') mock_data = pure.flasharray.VolumeGroupPost(qos=mock_qos) err_mock_data = pure.flasharray.VolumeGroupPatch(qos=mock_qos) self.driver._create_volume_group_if_not_exist(self.array, vgname, MAX_IOPS, MAX_BWS) self.array.post_volume_groups.\ assert_called_with(names=[vgname], volume_group=mock_data) err_rsp = ErrorResponse(400, [DotNotation({'message': 'already exists'})], {}) self.array.post_volume_groups.return_value = err_rsp self.driver._create_volume_group_if_not_exist(self.array, vgname, MAX_IOPS, MAX_BWS) self.array.patch_volume_groups.\ assert_called_with(names=[vgname], volume_group=err_mock_data) mock_logger.warning.\ assert_called_with("Skipping creation of vg %s since it " "already exists. Resetting QoS", vgname) patch_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.patch_volume_groups.return_value = patch_rsp self.driver._create_volume_group_if_not_exist(self.array, vgname, MAX_IOPS, MAX_BWS) mock_logger.warning.\ assert_called_with("Unable to change %(vgroup)s QoS, " "error message: %(error)s", {"vgroup": vgname, "error": 'does not exist'}) call_count = 0 def side_effect(*args, **kwargs): nonlocal call_count call_count += 1 if call_count > 1: # Return immediately on any recursive call return None else: # Call the actual method logic for the first invocation err_rsp = ErrorResponse(400, [DotNotation({'message': 'some error'})], {}) self.array.post_volume_groups.return_value = err_rsp rsp = ValidResponse(200, None, 1, [DotNotation({"destroyed": "true", "name": vgname})], {}) self.array.get_volume_groups.return_value = rsp return original_method(*args, **kwargs) original_method = self.driver._create_volume_group_if_not_exist with mock.patch.object(self.driver, '_create_volume_group_if_not_exist', side_effect=side_effect): self.driver._create_volume_group_if_not_exist(self.array, vgname, MAX_IOPS, MAX_BWS) mock_logger.warning.\ assert_called_with("Volume group %s is deleted but not" " eradicated - will recreate.", vgname) self.array.delete_volume_groups.assert_called_with(names=[vgname]) @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") @mock.patch(BASE_DRIVER_OBJ + "._get_replication_type_from_vol_type") def test_create_volume(self, mock_get_repl_type, mock_add_to_group, mock_fa): mock_get_repl_type.return_value = None vol_obj = fake_volume.fake_volume_obj(mock.MagicMock(), size=2) mock_data = self.array.flasharray.VolumePost(provisioned=2147483648) mock_fa.return_value = mock_data self.driver.create_volume(vol_obj) vol_name = vol_obj["name"] + "-cinder" self.array.post_volumes.assert_called_with(names=[vol_name], with_default_protection= False, volume=mock_data) mock_add_to_group.assert_called_once_with(vol_obj, vol_name) self.assert_error_propagates([mock_fa], self.driver.create_volume, vol_obj) @mock.patch(DRIVER_PATH + ".LOG") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') def test_get_volume_type_extra_spec(self, mock_specs, mock_logger): vol, vol_name = self.new_fake_vol() vgname = VGROUP + "/" + vol_name mock_specs.return_value = {'flasharray:vg_name': vgname} spec_out = self.driver._get_volume_type_extra_spec(vol.volume_type_id, 'vg_name') assert spec_out == vgname mock_specs.return_value = {} default_value = "puretestvg" spec_out = self.driver.\ _get_volume_type_extra_spec(vol.volume_type_id, 'vg_name', default_value=default_value) mock_logger.debug.\ assert_called_with("Returning default spec value: %s.", default_value) mock_specs.return_value = {'flasharray:vg_name': vgname} possible_values = ['vgtest'] spec_out = self.driver.\ _get_volume_type_extra_spec(vol.volume_type_id, 'vg_name', possible_values=possible_values) mock_logger.debug.\ assert_called_with("Invalid spec value: %s specified.", vgname) mock_specs.return_value = {'flasharray:vg_name': vgname} possible_values = [vgname] spec_out = self.driver.\ _get_volume_type_extra_spec(vol.volume_type_id, 'vg_name', possible_values=possible_values) mock_logger.debug.\ assert_called_with("Returning spec value %s", vgname) @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") @mock.patch(BASE_DRIVER_OBJ + "._get_replication_type_from_vol_type") @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot(self, mock_get_volume_type, mock_get_replicated_type, mock_add_to_group, mock_fa): srcvol, _ = self.new_fake_vol() snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock(), volume=srcvol) snap_name = snap["volume_name"] + "-cinder." + snap["name"] mock_get_replicated_type.return_value = None vol, vol_name = self.new_fake_vol(set_provider_id=False) mock_data = self.array.flasharray.VolumePost(names=[snap_name], source=pure.flasharray. Reference(name=vol_name), name=vol_name) mock_fa.return_value = mock_data mock_get_volume_type.return_value = vol.volume_type # Branch where extend unneeded self.driver.create_volume_from_snapshot(vol, snap) self.array.post_volumes.assert_called_with(names=[vol_name], with_default_protection= False, volume=mock_data) self.assertFalse(self.array.extend_volume.called) mock_add_to_group.assert_called_once_with(vol, vol_name) self.assert_error_propagates( [mock_fa], self.driver.create_volume_from_snapshot, vol, snap) self.assertFalse(self.array.extend_volume.called) @mock.patch(BASE_DRIVER_OBJ + "._extend_if_needed") @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") @mock.patch(BASE_DRIVER_OBJ + "._get_replication_type_from_vol_type") @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_with_extend(self, mock_get_volume_type, mock_get_replicated_type, mock_add_to_group, mock_fa, mock_extend): srcvol, srcvol_name = self.new_fake_vol(spec={"size": 1}) snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock(), volume=srcvol) snap_name = snap["volume_name"] + "-cinder." + snap["name"] mock_get_replicated_type.return_value = None vol, vol_name = self.new_fake_vol(set_provider_id=False, spec={"size": 2}) mock_data = self.array.flasharray.VolumePost(names=[snap_name], source=pure.flasharray. Reference(name=vol_name), name=vol_name) mock_fa.return_value = mock_data mock_get_volume_type.return_value = vol.volume_type self.driver.create_volume_from_snapshot(vol, snap) mock_extend.assert_called_with(self.array, vol_name, snap["volume_size"], vol["size"]) mock_add_to_group.assert_called_once_with(vol, vol_name) @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_sync(self, mock_get_volume_type, mock_fa): repl_extra_specs = { 'replication_type': ' async', 'replication_enabled': ' true', } srcvol, _ = self.new_fake_vol(type_extra_specs=repl_extra_specs) snap, snap_name = self.new_fake_snap(vol=srcvol) vol, vol_name = self.new_fake_vol(set_provider_id=False, type_extra_specs=repl_extra_specs) mock_data = self.array.flasharray.VolumePost(names=[snap_name], source=pure.flasharray. Reference(name=vol_name), name=vol_name) mock_fa.return_value = mock_data mock_get_volume_type.return_value = vol.volume_type self.driver.create_volume_from_snapshot(vol, snap) self.array.post_volumes.assert_called_with(names=[vol_name], with_default_protection= False, volume=mock_data) @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") @mock.patch(BASE_DRIVER_OBJ + "._extend_if_needed", autospec=True) @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name_from_snapshot") @mock.patch(BASE_DRIVER_OBJ + "._get_replication_type_from_vol_type") @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_cgsnapshot(self, mock_get_volume_type, mock_get_replicated_type, mock_get_snap_name, mock_extend_if_needed, mock_add_to_group, mock_fa): cgroup = fake_group.fake_group_obj(mock.MagicMock()) cgsnap = fake_group_snapshot.fake_group_snapshot_obj(mock.MagicMock(), group=cgroup) vol, vol_name = self.new_fake_vol(spec={"group": cgroup}) mock_get_volume_type.return_value = vol.volume_type snap = fake_cgsnapshot.fake_cgsnapshot_obj(mock.MagicMock(), volume=vol) snap.cgsnapshot_id = cgsnap.id snap.cgsnapshot = cgsnap snap.volume_size = 1 snap_name = "consisgroup-%s-cinder.%s.%s-cinder" % ( cgroup.id, snap.id, vol.name ) mock_get_snap_name.return_value = snap_name mock_get_replicated_type.return_value = False mock_data = self.array.flasharray.VolumePost(names=[vol_name], source=pure.flasharray. Reference(name=vol_name), name=vol_name) mock_fa.return_value = mock_data self.driver.create_volume_from_snapshot(vol, snap, True) self.array.post_volumes.assert_called_with(names=[vol_name], with_default_protection= False, volume=mock_data) self.assertTrue(mock_extend_if_needed.called) mock_add_to_group.assert_called_with(vol, vol_name) # Tests cloning a volume that is not replicated type @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") @mock.patch(BASE_DRIVER_OBJ + "._get_replication_type_from_vol_type") @mock.patch.object(volume_types, 'get_volume_type') def test_create_cloned_volume(self, mock_get_volume_type, mock_get_replication_type, mock_add_to_group, mock_fa): vol, vol_name = self.new_fake_vol(set_provider_id=False) src_vol, src_name = self.new_fake_vol() mock_get_volume_type.return_value = vol.volume_type mock_data = self.array.flasharray.VolumePost(names=[vol_name], source= pure.flasharray. reference(name=src_name)) self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) mock_fa.return_value = mock_data mock_get_replication_type.return_value = None # Branch where extend unneeded self.driver.create_cloned_volume(vol, src_vol) self.array.post_volumes.assert_called_with(names=[vol_name], volume=mock_data) self.assertFalse(self.array.extend_volume.called) mock_add_to_group.assert_called_once_with(vol, vol_name) self.assert_error_propagates( [self.array.post_volumes], self.driver.create_cloned_volume, vol, src_vol) self.assertFalse(self.array.extend_volume.called) # Tests cloning a volume that is not replicated type with QoS @ddt.data( { "qos_name": "qos-iops-bws", "qos_spec": dict(QOS_IOPS_BWS), "qos_data": {"iops_limit": '100', "bandwidth_limit": '1048576'} }, { "qos_name": "qos-iops", "qos_spec": dict(QOS_IOPS), "qos_data": {"iops_limit": '100'} }, { "qos_name": "qos-bws", "qos_spec": dict(QOS_BWS), "qos_data": {"bandwidth_limit": '1048576'} }, ) @mock.patch(BASE_DRIVER_OBJ + "._get_qos_settings") @mock.patch(BASE_DRIVER_OBJ + ".set_qos") @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch.object(volume_types, 'get_volume_type') def test_create_cloned_volume_qos(self, qos_info, mock_get_volume_type, mock_fa, mock_qos, mock_qos_specs): ctxt = context.get_admin_context() qos = qos_specs.create(ctxt, qos_info["qos_name"], qos_info["qos_spec"]) qos_data = self.flasharray.Qos(**qos_info["qos_data"]) vol, vol_name = self.new_fake_vol(set_provider_id=False) src_vol, src_name = self.new_fake_vol(spec={"size": 1}, type_qos_specs_id=qos.id) mock_get_volume_type.return_value = vol.volume_type mock_data = self.array.flasharray.VolumePost(names=[vol_name], source= pure.flasharray. reference(name=src_name), qos=qos_data) mock_fa.return_value = mock_data mock_qos_specs.return_value = qos self.mock_object(self.driver, '_get_volume_type_extra_spec', return_value={}) self.driver.create_cloned_volume(vol, src_vol) self.driver.set_qos.assert_called_with(self.array, vol_name, vol["size"], qos) @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch.object(volume_types, 'get_volume_type') def test_create_cloned_volume_sync_rep(self, mock_get_volume_type, mock_fa): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } src_vol, src_name = self.new_fake_vol( type_extra_specs=repl_extra_specs) vol, vol_name = self.new_fake_vol(set_provider_id=False, type_extra_specs=repl_extra_specs) mock_get_volume_type.return_value = vol.volume_type mock_data = self.array.flasharray.VolumePost(names=[vol_name], source=pure.flasharray. reference(name=src_name)) mock_fa.return_value = mock_data # Branch where extend unneeded self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) self.driver.create_cloned_volume(vol, src_vol) self.array.post_volumes.assert_called_with(names=[vol_name], volume=mock_data) self.assertFalse(self.array.extend_volume.called) @mock.patch(BASE_DRIVER_OBJ + "._extend_if_needed") @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") @mock.patch(BASE_DRIVER_OBJ + "._get_replication_type_from_vol_type") @mock.patch.object(volume_types, 'get_volume_type') def test_create_cloned_volume_and_extend(self, mock_get_volume_type, mock_get_replication_type, mock_add_to_group, mock_fa, mock_extend): vol, vol_name = self.new_fake_vol(set_provider_id=False, spec={"size": 2}) mock_get_volume_type.return_value = vol.volume_type src_vol, src_name = self.new_fake_vol() mock_get_replication_type.return_value = None mock_data = self.array.flasharray.VolumePost(names=[vol_name], source= pure.flasharray. Reference(name=src_name), name=vol_name) mock_fa.return_value = mock_data self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) self.driver.create_cloned_volume(vol, src_vol) mock_extend.assert_called_with(self.array, vol_name, src_vol["size"], vol["size"]) mock_add_to_group.assert_called_once_with(vol, vol_name) # Tests cloning a volume that is part of a consistency group @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") @mock.patch(BASE_DRIVER_OBJ + "._get_replication_type_from_vol_type") @mock.patch.object(volume_types, 'get_volume_type') def test_create_cloned_volume_with_cgroup(self, mock_get_volume_type, mock_get_replication_type, mock_add_to_group): vol, vol_name = self.new_fake_vol(set_provider_id=False) mock_get_volume_type.return_value = vol.volume_type group = fake_group.fake_group_obj(mock.MagicMock()) self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) src_vol, _ = self.new_fake_vol(spec={"group_id": group.id}) mock_get_replication_type.return_value = None self.driver.create_cloned_volume(vol, src_vol) mock_add_to_group.assert_called_with(vol, vol_name) def test_delete_volume_already_deleted(self): vol, _ = self.new_fake_vol() self.array.get_connections.return_value = CONN self.driver.delete_volume(vol) self.assertFalse(self.array.delete_volumes.called) # Testing case where array.destroy_volume returns an exception # because volume has already been deleted self.array.get_connections.side_effect = None self.array.get_connections.return_value = ValidResponse(200, None, 1, [], {}) self.driver.delete_volume(vol) self.array.delete_connections.assert_called_with(host_names=['utest'], volume_names= [vol["provider_id"]]) self.assertTrue(self.array.patch_volumes.called) self.assertFalse(self.array.delete_volumes.called) def test_delete_volume(self): vol, vol_name = self.new_fake_vol() self.array.get_connections.return_value = CONN self.driver.delete_volume(vol) self.array.get_connections.assert_called() self.array.patch_volumes.assert_called() self.assertFalse(self.array.eradicate_volume.called) @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(DRIVER_PATH + ".flasharray.VolumePatch") def test_delete_volume_error(self, mock_vol_patch, mock_logger): vol, vol_name = self.new_fake_vol() self.array.get_connections.return_value = ValidResponse(200, None, 1, [], {}) err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.patch_volumes.return_value = err_rsp self.driver.delete_volume(vol) mock_logger.warning.\ assert_called_with('Volume deletion failed with message: %s', 'does not exist') @mock.patch(DRIVER_PATH + ".flasharray.VolumePatch") def test_delete_volume_eradicate_now(self, mock_vol_patch): vol, vol_name = self.new_fake_vol() self.array.get_connections.return_value = ValidResponse(200, None, 1, [], {}) self.mock_config.pure_eradicate_on_delete = True mock_data = self.array.flasharray.VolumePatch(names=[vol_name], volume=vol) mock_vol_patch.return_data = mock_data self.driver.delete_volume(vol) expected = [mock.call.flasharray.VolumePatch(names=[vol_name], volume=vol), mock.call.get_connections(volume_names = [vol_name]), mock.call.get_connections(volume_names = [vol_name]), mock.call.patch_volumes(names=[vol_name], volume=mock_vol_patch()), mock.call.delete_volumes(names=[vol_name])] self.array.assert_has_calls(expected) @mock.patch(DRIVER_PATH + ".flasharray.VolumePatch") def test_delete_connected_volume(self, mock_vol_patch): vol, vol_name = self.new_fake_vol() self.array.get_connections.return_value = CONN mock_data = self.array.flasharray.VolumePatch(names=[vol_name], volume=vol) mock_vol_patch.return_data = mock_data self.driver.delete_volume(vol) expected = [mock.call.flasharray.VolumePatch(names=[vol_name], volume=vol), mock.call.get_connections(volume_names = [vol_name]), mock.call.get_connections(volume_names = [vol_name]), mock.call.delete_connections(host_names=['utest'], volume_names = [vol_name]), mock.call.get_connections(host_names=['utest']), mock.call.patch_volumes(names=[vol_name], volume=mock_vol_patch()) ] self.array.assert_has_calls(expected) @mock.patch(DRIVER_PATH + ".flasharray.VolumePatch") def test_delete_not_connected_pod_volume(self, mock_vol_patch): type_spec = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=type_spec) self.array.get_connections.return_value = ValidResponse(200, None, 1, [], {}) mock_data = self.array.flasharray.VolumePatch(names=[vol_name], volume=vol) mock_vol_patch.return_data = mock_data # Set the array to be in a sync-rep enabled version self.driver.delete_volume(vol) expected = [mock.call.flasharray.VolumePatch(names=[vol_name], volume=vol), mock.call.get_connections(volume_names = [vol_name]), mock.call.get_connections(volume_names = [vol_name]), mock.call.patch_volumes(names=[vol_name], volume=mock_vol_patch()) ] self.array.assert_has_calls(expected) @mock.patch(DRIVER_PATH + ".flasharray.VolumePatch") def test_delete_connected_pod_volume(self, mock_vol_patch): type_spec = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=type_spec) self.array.get_connections.return_value = CONN mock_data = self.array.flasharray.VolumePatch(names=[vol_name], volume=vol) mock_vol_patch.return_data = mock_data # Set the array to be in a sync-rep enabled version self.driver.delete_volume(vol) expected = [mock.call.flasharray.VolumePatch(names=[vol_name], volume=vol), mock.call.get_connections(volume_names = [vol_name]), mock.call.get_connections(volume_names = [vol_name]), mock.call.delete_connections(host_names = ['utest'], volume_names = [vol_name]), mock.call.get_connections(host_names = ['utest']), mock.call.patch_volumes(names=[vol_name], volume=mock_vol_patch()) ] self.array.assert_has_calls(expected) @mock.patch(DRIVER_PATH + ".flasharray.VolumeSnapshotPost") def test_create_snapshot(self, mock_snap): vol, vol_name = self.new_fake_vol() snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock(), volume=vol) suffix_name = snap['name'].split(".") mock_data = self.array.flasharray.VolumeSnapshotPost(suffix= suffix_name) mock_snap.return_value = mock_data self.driver.create_snapshot(snap) self.array.post_volume_snapshots.assert_called_with( source_names=[vol_name], volume_snapshot=mock_data ) self.assert_error_propagates([self.array.post_volume_snapshots], self.driver.create_snapshot, snap) @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(DRIVER_PATH + ".flasharray.VolumeSnapshotPatch") def test_delete_snapshot_error(self, mock_snap_patch, mock_logger): vol, _ = self.new_fake_vol() snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock(), volume=vol) err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.patch_volume_snapshots.return_value = err_rsp self.driver.delete_snapshot(snap) mock_logger.warning.\ assert_called_with('Unable to delete snapshot, ' 'assuming already deleted. ' 'Error: %s', 'does not exist') @mock.patch(DRIVER_PATH + ".flasharray.VolumeSnapshotPatch") def test_delete_snapshot(self, mock_snap_patch): vol, _ = self.new_fake_vol() snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock(), volume=vol) snap_name = snap["volume_name"] + "-cinder." + snap["name"] mock_data = self.array.flasharray.VolumeSnapshotPatch(destroyed=True) mock_snap_patch.return_value = mock_data self.driver.delete_snapshot(snap) expected = [mock.call.flasharray.VolumeSnapshotPatch(destroyed=True), mock.call.patch_volume_snapshots(names=[snap_name], volume_snapshot=mock_data)] self.array.assert_has_calls(expected) self.assertFalse(self.array.delete_volume_snapshots.called) @mock.patch(DRIVER_PATH + ".flasharray.VolumeSnapshotPatch") def test_delete_snapshot_eradicate_now(self, mock_snap_patch): vol, _ = self.new_fake_vol() snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock(), volume=vol) snap_name = snap["volume_name"] + "-cinder." + snap["name"] self.mock_config.pure_eradicate_on_delete = True mock_data = self.array.flasharray.VolumeSnapshotPatch(destroyed=True) mock_snap_patch.return_value = mock_data self.driver.delete_snapshot(snap) self.array.delete_volume_snapshots.assert_called_with(names= [snap_name]) self.assertTrue(self.array.delete_volume_snapshots.called) @mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True) def test_terminate_connection(self, mock_host): vol, vol_name = self.new_fake_vol() pure_hosts = ValidResponse(200, None, 1, [DotNotation({"name": "some-host"})], {}) mock_host.return_value = pure_hosts.items self.array.get_connections.return_value = CONN # Branch with manually created host self.driver.terminate_connection(vol, ISCSI_CONNECTOR) self.array.delete_connections.\ assert_called_with(host_names=["some-host"], volume_names=[vol_name]) self.assertTrue(self.array.get_connections.called) self.assertTrue(self.array.delete_connections.called) self.assertFalse(self.array.delete_hosts.called) # Branch with host added to host group self.array.reset_mock() self.array.get_connections.\ return_value = ValidResponse(200, None, 1, [], {}) pure_hosts = ValidResponse(200, None, 1, [DotNotation(PURE_HOST.copy())], {}) mock_host.return_value = pure_hosts.items mock_host.return_value[0].update(hgroup="some-group") self.array.delete_hosts.\ return_value = ValidResponse(200, None, 1, [], {}) self.driver.terminate_connection(vol, ISCSI_CONNECTOR) self.array.delete_connections.\ assert_called_with(host_names=[PURE_HOST_NAME], volume_names=[vol_name]) self.assertTrue(self.array.get_connections.called) self.assertTrue(self.array.delete_hosts.called) # Branch with host still having connected volumes self.array.reset_mock() pure_hosts = ValidResponse(200, None, 1, [DotNotation(PURE_HOST.copy())], {}) self.array.get_host_connections.return_value = [ {"lun": 2, "name": PURE_HOST_NAME, "vol": "some-vol"}] mock_host.return_value = pure_hosts.items self.driver.terminate_connection(vol, ISCSI_CONNECTOR) self.array.delete_connections.\ assert_called_with(host_names=[PURE_HOST_NAME], volume_names=[vol_name]) self.assertTrue(self.array.get_connections.called) self.assertFalse(self.array.delete_host.called) # Branch where host gets deleted self.array.reset_mock() self.array.get_host_connections.\ return_value = ValidResponse(200, None, 1, [], {}) self.driver.terminate_connection(vol, ISCSI_CONNECTOR) self.array.delete_connections.\ assert_called_with(host_names=[PURE_HOST_NAME], volume_names=[vol_name]) self.assertTrue(self.array.get_connections.called) self.array.delete_hosts.assert_called_with(names=[PURE_HOST_NAME]) # Branch where connection is missing and the host is still deleted self.array.reset_mock() err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.get_host_connections.return_value = err_rsp self.driver.terminate_connection(vol, ISCSI_CONNECTOR) self.array.delete_connections.\ assert_called_with(host_names=[PURE_HOST_NAME], volume_names=[vol_name]) self.assertTrue(self.array.get_connections.called) self.array.delete_hosts.assert_called_with(names=[PURE_HOST_NAME]) # Branch where an unexpected exception occurs self.array.reset_mock() err_rsp = ErrorResponse(500, [DotNotation({'message': 'Some other error'})], {}) self.array.get_host_connections.return_value = err_rsp self.driver.terminate_connection(vol, ISCSI_CONNECTOR) self.array.delete_connections.\ assert_called_with(host_names=[PURE_HOST_NAME], volume_names=[vol_name]) self.assertTrue(self.array.get_connections.called) self.array.delete_hosts.assert_called_with(names=[PURE_HOST_NAME]) @mock.patch(BASE_DRIVER_OBJ + "._disconnect_host") @mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True) def test_terminate_connection_uniform_ac_remove_remote_hosts( self, mock_host, mock_disconnect): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) self.driver._is_active_cluster_enabled = True mock_secondary = mock.MagicMock() self.driver._uniform_active_cluster_target_arrays = [mock_secondary] pure_hosts = ValidResponse(200, None, 2, [DotNotation({"name": "secondary-fa1:some-host1"}), DotNotation({"name": "some-host1"})], {}) mock_host.return_value = pure_hosts.items self.driver.terminate_connection(vol, ISCSI_CONNECTOR) mock_disconnect.assert_has_calls([ mock.call(mock_secondary, "secondary-fa1:some-host1", vol_name), mock.call(mock_secondary, "some-host1", vol_name) ]) @mock.patch(BASE_DRIVER_OBJ + "._disconnect_host") @mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True) def test_terminate_connection_uniform_ac_no_remote_hosts( self, mock_host, mock_disconnect): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) self.driver._is_active_cluster_enabled = True mock_secondary = mock.MagicMock() self.driver._uniform_active_cluster_target_arrays = [mock_secondary] pure_hosts = ValidResponse(200, None, 2, [DotNotation({"name": "some-host2"})], {}) mock_host.return_value = pure_hosts.items self.driver.terminate_connection(vol, ISCSI_CONNECTOR) mock_disconnect.assert_has_calls([ mock.call(self.array, "some-host2", vol_name), ]) def _test_terminate_connection_with_error(self, mock_host, error_text): vol, vol_name = self.new_fake_vol() mock_host.return_value = [DotNotation(PURE_HOST.copy())] self.array.reset_mock() self.array.get_host_connections.return_value = [] err_rsp = ErrorResponse(400, [DotNotation({'message': f"{error_text}"})], {}) self.array.delete_hosts.return_value = err_rsp pure_hosts = ValidResponse(200, None, 1, [], {}) self.array.get_connections.return_value = pure_hosts self.driver.terminate_connection(vol, DotNotation(ISCSI_CONNECTOR)) self.array.get_connections.\ assert_called_with(host_names=[PURE_HOST_NAME]) self.array.delete_hosts.\ assert_called_once_with(names=[PURE_HOST_NAME]) @mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True) def test_terminate_connection_host_deleted(self, mock_host): self._test_terminate_connection_with_error(mock_host, 'does not exist') @mock.patch(BASE_DRIVER_OBJ + "._get_host", autospec=True) def test_terminate_connection_host_got_new_connections(self, mock_host): self._test_terminate_connection_with_error( mock_host, 'Host cannot be deleted due to existing connections.' ) def test_terminate_connection_no_connector_with_host(self): vol, vol_name = self.new_fake_vol() # Show the volume having a connection connections = [ {"host": "h1", "name": vol_name}, {"host": "h2", "name": vol_name}, ] self.array.get_connections.\ return_value = ValidResponse(200, None, 1, [DotNotation(connections[0])], {}) self.driver.terminate_connection(vol, None) self.array.delete_connections.\ assert_called_with(host_names=[connections[0]["host"]], volume_names=[vol_name]) def test_terminate_connection_no_connector_no_host(self): vol, _ = self.new_fake_vol() # Show the volume not having a connection self.array.get_connections.return_value = [] self.array.get_connections.\ return_value = ValidResponse(200, None, 1, [], {}) self.driver.terminate_connection(vol, None) self.array.delete_connections.assert_not_called() @mock.patch(DRIVER_PATH + ".flasharray.VolumePatch") @mock.patch.object(volume_types, 'get_volume_type') def test_extend_volume(self, mock_get_volume_type, mock_fa): vol, vol_name = self.new_fake_vol(spec={"size": 1}) mock_get_volume_type.return_value = vol.volume_type mock_data = self.flasharray.VolumePatch(provisioned=3 * units.Gi) self.driver.extend_volume(vol, 3) self.array.patch_volumes.\ assert_called_with(names=[vol_name], volume=mock_data) self.assert_error_propagates([self.array.patch_volumes], self.driver.extend_volume, vol, 3) @ddt.data( dict( repl_types=[None], id=fake.GROUP_ID, expected_name=("consisgroup-%s-cinder" % fake.GROUP_ID) ), dict( repl_types=['async'], id=fake.GROUP_ID, expected_name=("consisgroup-%s-cinder" % fake.GROUP_ID) ), dict( repl_types=[None, 'async'], id=fake.GROUP_ID, expected_name=("consisgroup-%s-cinder" % fake.GROUP_ID) ), dict( repl_types=['sync'], id=fake.GROUP_ID, expected_name=("cinder-pod::consisgroup-%s-cinder" % fake.GROUP_ID) ), dict( repl_types=[None, 'sync'], id=fake.GROUP_ID, expected_name=("cinder-pod::consisgroup-%s-cinder" % fake.GROUP_ID) ), dict( repl_types=['trisync'], id=fake.GROUP_ID, expected_name=("cinder-pod::consisgroup-%s-cinder" % fake.GROUP_ID) ), dict( repl_types=[None, 'trisync'], id=fake.GROUP_ID, expected_name=("cinder-pod::consisgroup-%s-cinder" % fake.GROUP_ID) ), dict( repl_types=['sync', 'async'], id=fake.GROUP_ID, expected_name=("cinder-pod::consisgroup-%s-cinder" % fake.GROUP_ID) ), dict( repl_types=[None, 'sync', 'async'], id=fake.GROUP_ID, expected_name=("cinder-pod::consisgroup-%s-cinder" % fake.GROUP_ID) ), dict( repl_types=['trisync', 'sync', 'async'], id=fake.GROUP_ID, expected_name=("cinder-pod::consisgroup-%s-cinder" % fake.GROUP_ID) ), dict( repl_types=[None, 'trisync', 'sync', 'async'], id=fake.GROUP_ID, expected_name=("cinder-pod::consisgroup-%s-cinder" % fake.GROUP_ID) ), ) @ddt.unpack def test_get_pgroup_name(self, repl_types, id, expected_name): pgroup = fake_group.fake_group_obj(mock.MagicMock(), id=id) vol_types = [] for repl_type in repl_types: vol_type = fake_volume.fake_volume_type_obj(None) if repl_type is not None: repl_extra_specs = { 'replication_type': ' %s' % repl_type, 'replication_enabled': ' true', } vol_type.extra_specs = repl_extra_specs vol_types.append(vol_type) pgroup.volume_types = volume_type.VolumeTypeList(objects=vol_types) actual_name = self.driver._get_pgroup_name(pgroup) self.assertEqual(expected_name, actual_name) def test_get_pgroup_snap_suffix(self): cgsnap = { 'id': "4a2f7e3a-312a-40c5-96a8-536b8a0fe074" } expected_suffix = "cgsnapshot-%s-cinder" % cgsnap['id'] actual_suffix = self.driver._get_pgroup_snap_suffix(cgsnap) self.assertEqual(expected_suffix, actual_suffix) @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_name") def test_get_pgroup_snap_name(self, mock_get_pgroup_name): cg = fake_group.fake_group_obj(mock.MagicMock()) cgsnap = fake_group_snapshot.fake_group_snapshot_obj(mock.MagicMock()) cgsnap.group_id = cg.id cgsnap.group = cg group_name = "consisgroup-%s-cinder" % cg.id mock_get_pgroup_name.return_value = group_name expected_name = ("%(group_name)s.cgsnapshot-%(snap)s-cinder" % { "group_name": group_name, "snap": cgsnap.id}) actual_name = self.driver._get_pgroup_snap_name(cgsnap) self.assertEqual(expected_name, actual_name) def test_get_pgroup_snap_name_from_snapshot(self): vol, _ = self.new_fake_vol() cg = fake_group.fake_group_obj(mock.MagicMock()) cgsnap = fake_group_snapshot.fake_group_snapshot_obj(mock.MagicMock()) cgsnap.group_id = cg.id cgsnap.group = cg pgsnap_name_base = ( 'consisgroup-%s-cinder.cgsnapshot-%s-cinder.%s-cinder') pgsnap_name = pgsnap_name_base % (cg.id, cgsnap.id, vol.name) snap, _ = self.new_fake_snap(vol=vol, group_snap=cgsnap) actual_name = self.driver._get_pgroup_snap_name_from_snapshot( snap ) self.assertEqual(pgsnap_name, actual_name) @mock.patch(BASE_DRIVER_OBJ + "._group_potential_repl_types") def test_create_consistencygroup(self, mock_get_repl_types): cgroup = fake_group.fake_group_obj(mock.MagicMock()) mock_get_repl_types.return_value = set() model_update = self.driver.create_consistencygroup(None, cgroup) expected_name = "consisgroup-" + cgroup.id + "-cinder" self.driver._get_current_array.assert_called() self.array.post_protection_groups.assert_called_with(names= [expected_name]) self.assertEqual({'status': 'available'}, model_update) self.assert_error_propagates( [self.array.post_protection_groups], self.driver.create_consistencygroup, None, cgroup) @mock.patch(BASE_DRIVER_OBJ + "._group_potential_repl_types") def test_create_consistencygroup_in_pod(self, mock_get_repl_types): cgroup = fake_group.fake_group_obj(mock.MagicMock()) mock_get_repl_types.return_value = ['sync', 'async'] model_update = self.driver.create_consistencygroup(None, cgroup) expected_name = "cinder-pod::consisgroup-" + cgroup.id + "-cinder" self.array.post_protection_groups.assert_called_with(names= [expected_name]) self.assertEqual({'status': 'available'}, model_update) @mock.patch('cinder.volume.group_types.get_group_type_specs') @mock.patch(BASE_DRIVER_OBJ + ".create_volume_from_snapshot") @mock.patch(BASE_DRIVER_OBJ + ".create_consistencygroup") def test_create_consistencygroup_from_cgsnapshot(self, mock_create_cg, mock_create_vol, mock_gp_specs): ctxt = context.get_admin_context() mock_gp_specs.return_value = ' True' mock_group = fake_group.fake_group_obj( None, group_type_id=fake.GROUP_TYPE_ID) mock_cgsnapshot = mock.Mock() mock_snapshots = [mock.Mock() for i in range(5)] mock_volumes = [mock.Mock() for i in range(5)] self.driver.create_consistencygroup_from_src( ctxt, mock_group, mock_volumes, cgsnapshot=mock_cgsnapshot, snapshots=mock_snapshots, source_cg=None, source_vols=None ) mock_create_cg.assert_called_with(ctxt, mock_group, None) expected_calls = [mock.call(vol, snap, cgsnapshot=True) for vol, snap in zip(mock_volumes, mock_snapshots)] mock_create_vol.assert_has_calls(expected_calls, any_order=True) self.assert_error_propagates( [mock_create_vol, mock_create_cg], self.driver.create_consistencygroup_from_src, ctxt, mock_group, mock_volumes, cgsnapshot=mock_cgsnapshot, snapshots=mock_snapshots, source_cg=None, source_vols=None ) @mock.patch('cinder.volume.group_types.get_group_type_specs') @mock.patch(BASE_DRIVER_OBJ + ".create_consistencygroup") def test_create_consistencygroup_from_cg(self, mock_create_cg, mock_gp_specs): num_volumes = 5 ctxt = context.get_admin_context() mock_gp_specs.return_value = ' True' mock_group = fake_group.fake_group_obj( None, group_type_id=fake.GROUP_TYPE_ID) mock_source_cg = mock.MagicMock() mock_volumes = [mock.MagicMock() for i in range(num_volumes)] mock_source_vols = [mock.MagicMock() for i in range(num_volumes)] self.driver.create_consistencygroup_from_src( ctxt, mock_group, mock_volumes, source_cg=mock_source_cg, source_vols=mock_source_vols ) mock_create_cg.assert_called_with(ctxt, mock_group, None) self.assertTrue(self.array.post_protection_group_snapshots.called) self.assertTrue(self.array.patch_protection_group_snapshots.called) @mock.patch('cinder.volume.group_types.get_group_type_specs') @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(DRIVER_PATH + ".flasharray.ProtectionGroup") @mock.patch(BASE_DRIVER_OBJ + ".delete_volume", autospec=True) def test_delete_consistencygroup(self, mock_delete_volume, mock_pg, mock_logger, mock_gp_specs): ctxt = context.get_admin_context() mock_gp_specs.return_value = ' True' mock_cgroup = fake_group.fake_group_obj(ctxt) mock_volume = fake_volume.fake_volume_obj(ctxt) self.array.patch_protection_groups.\ return_value = ValidResponse(200, None, 1, ['pgroup_name'], {}) mock_data = self.array.flasharray.ProtectionGroup(destroyed=True) mock_pg.return_value = mock_data model_update, volumes = self.driver.delete_consistencygroup( ctxt, mock_cgroup, [mock_volume]) expected_name = "consisgroup-%s-cinder" % mock_cgroup.id self.array.patch_protection_groups.\ assert_called_with(names=[expected_name], protection_group=mock_data) self.assertFalse(self.array.delete_protection_groups.called) self.assertIsNone(volumes) self.assertIsNone(model_update) mock_delete_volume.assert_called_with(self.driver, mock_volume) err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.patch_protection_groups.return_value = err_rsp self.driver.delete_consistencygroup(ctxt, mock_cgroup, [mock_volume]) mock_logger.warning.\ assert_called_with('Unable to delete Protection Group: %s', None) self.assert_error_propagates( [self.array.patch_protection_groups], self.driver.delete_consistencygroup, ctxt, mock_cgroup, [mock_volume] ) def test_update_consistencygroup(self): group, group_name = self.new_fake_group() add_vols = [ self.new_fake_vol(spec={"id": fake.VOLUME_ID}), self.new_fake_vol(spec={"id": fake.VOLUME2_ID}), self.new_fake_vol(spec={"id": fake.VOLUME3_ID}), ] add_vol_objs = [] expected_addvollist = [] for vol in add_vols: add_vol_objs.append(vol[0]) expected_addvollist.append(vol[1]) remove_vols = [ self.new_fake_vol(spec={"id": fake.VOLUME4_ID}), self.new_fake_vol(spec={"id": fake.VOLUME5_ID}), ] rem_vol_objs = [] expected_remvollist = [] for vol in remove_vols: rem_vol_objs.append(vol[0]) expected_remvollist.append(vol[1]) self.driver.update_consistencygroup(mock.Mock(), group, add_vol_objs, rem_vol_objs) self.array.post_protection_groups_volumes.assert_called_with( group_names=[group_name], member_names=expected_addvollist ) self.array.delete_protection_groups_volumes.assert_called_with( group_names=[group_name], member_names=expected_remvollist ) def test_update_consistencygroup_no_add_vols(self): group, group_name = self.new_fake_group() remove_vols = [ self.new_fake_vol(spec={"id": fake.VOLUME4_ID}), self.new_fake_vol(spec={"id": fake.VOLUME5_ID}), ] rem_vol_objs = [] expected_remvollist = [] for vol in remove_vols: rem_vol_objs.append(vol[0]) expected_remvollist.append(vol[1]) self.driver.update_consistencygroup(mock.Mock(), group, None, rem_vol_objs) self.array.delete_protection_groups_volumes.assert_called_with( group_names=[group_name], member_names=expected_remvollist ) def test_update_consistencygroup_no_remove_vols(self): group, group_name = self.new_fake_group() add_vols = [ self.new_fake_vol(spec={"id": fake.VOLUME_ID}), self.new_fake_vol(spec={"id": fake.VOLUME2_ID}), self.new_fake_vol(spec={"id": fake.VOLUME3_ID}), ] add_vol_objs = [] expected_addvollist = [] for vol in add_vols: add_vol_objs.append(vol[0]) expected_addvollist.append(vol[1]) self.driver.update_consistencygroup(mock.Mock(), group, add_vol_objs, None) self.array.post_protection_groups_volumes.assert_called_with( group_names=[group_name], member_names=expected_addvollist ) def test_update_consistencygroup_no_vols(self): group, group_name = self.new_fake_group() self.driver.update_consistencygroup(mock.Mock(), group, None, None) self.array.post_protection_groups_volumes.assert_called_with( group_names=[group_name], member_names=[] ) self.array.delete_protection_groups_volumes.assert_called_with( group_names=[group_name], member_names=[] ) @mock.patch(DRIVER_PATH + ".flasharray.ProtectionGroupSnapshotPost") def test_create_cgsnapshot(self, mock_pgsnap): ctxt = context.get_admin_context() mock_group = fake_group.fake_group_obj(ctxt) mock_cgsnap = fake_group_snapshot.fake_group_snapshot_obj( ctxt, group_id=mock_group.id) mock_snap = fake_snapshot.fake_snapshot_obj(ctxt) suffix_name = mock_snap['name'].split(".") mock_data = self.array.flasharray.\ ProtectionGroupSnapshotPost(suffix=suffix_name) mock_pgsnap.return_value = mock_data # Avoid having the group snapshot object load from the db with mock.patch('cinder.objects.Group.get_by_id') as mock_get_group: mock_get_group.return_value = mock_group model_update, snapshots = self.driver.create_cgsnapshot( ctxt, mock_cgsnap, [mock_snap]) expected_pgroup_name = self.driver._get_pgroup_name(mock_group) self.array.post_protection_group_snapshots\ .assert_called_with(source_names=[expected_pgroup_name], protection_group_snapshot=mock_data) self.assertIsNone(model_update) self.assertIsNone(snapshots) self.assert_error_propagates( [self.array.post_protection_group_snapshots], self.driver.create_cgsnapshot, ctxt, mock_cgsnap, []) @ddt.data("does not exist", "has been destroyed") @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(DRIVER_PATH + ".flasharray.ProtectionGroupSnapshotPatch") @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name", spec=pure.PureBaseVolumeDriver._get_pgroup_snap_name) def test_delete_cgsnapshot(self, error_text, mock_get_snap_name, mock_pgsnap_patch, mock_logger): snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \ "e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075" mock_get_snap_name.return_value = snap_name mock_cgsnap = mock.Mock() mock_cgsnap.status = 'deleted' ctxt = context.get_admin_context() mock_snap = mock.Mock() mock_data = self.array.flasharray.\ ProtectionGroupSnapshotPatch(destroyed=True) mock_pgsnap_patch.return_value = mock_data model_update, snapshots = self.driver.delete_cgsnapshot(ctxt, mock_cgsnap, [mock_snap]) self.array.patch_protection_group_snapshots.\ assert_called_with(names=[snap_name], protection_group_snapshot=mock_data) self.assertFalse(self.array.delete_protection_group_snapshots.called) self.assertIsNone(model_update) self.assertIsNone(snapshots) err_rsp = ErrorResponse(400, [DotNotation({'message': f"{error_text}"})], {}) self.array.patch_protection_group_snapshots.return_value = err_rsp self.driver.delete_cgsnapshot(ctxt, mock_cgsnap, [mock_snap]) self.assertFalse(self.array.delete_protection_group_snapshots.called) mock_logger.warning.assert_called_with('Unable to delete ' 'Protection Group ' 'Snapshot: %s', f"{error_text}") self.assert_error_propagates( [self.array.patch_protection_group_snapshots], self.driver.delete_cgsnapshot, ctxt, mock_cgsnap, [mock_snap] ) @mock.patch(DRIVER_PATH + ".flasharray.ProtectionGroupSnapshotPatch") @mock.patch(BASE_DRIVER_OBJ + "._get_pgroup_snap_name", spec=pure.PureBaseVolumeDriver._get_pgroup_snap_name) def test_delete_cgsnapshot_eradicate_now(self, mock_get_snap_name, mock_pgsnap_patch): snap_name = "consisgroup-4a2f7e3a-312a-40c5-96a8-536b8a0f" \ "e074-cinder.4a2f7e3a-312a-40c5-96a8-536b8a0fe075" mock_get_snap_name.return_value = snap_name self.mock_config.pure_eradicate_on_delete = True mock_data = self.array.flasharray.ProtectionGroupSnapshotPatch( destroyed=True) mock_pgsnap_patch.return_value = mock_data model_update, snapshots = self.driver.delete_cgsnapshot(mock.Mock(), mock.Mock(), [mock.Mock()]) self.array.patch_protection_group_snapshots.\ assert_called_with(names=[snap_name], protection_group_snapshot=mock_data) self.array.delete_protection_group_snapshots.\ assert_called_with(names=[snap_name]) @mock.patch(BASE_DRIVER_OBJ + "._rename_volume_object") def test_manage_existing(self, mock_rename): ref_name = 'vol1' volume_ref = {'source-name': ref_name} self.array.get_volumes.return_value = MPV self.array.get_connections.return_value = [] vol, vol_name = self.new_fake_vol(set_provider_id=False) self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) mock_rsp = ValidResponse(200, None, 1, [{"group": {"name": "tstpg"}}], {}) self.array.get_protection_groups_volumes.return_value = mock_rsp tpg_rsp = ValidResponse(200, None, 1, [DotNotation({"target_count": 2})], {}) self.array.get_protection_groups.return_value = tpg_rsp self.driver.manage_existing(vol, volume_ref) mock_rename.assert_called_with(ref_name, vol_name, raise_not_exist=True, manage=True) @mock.patch(BASE_DRIVER_OBJ + '._validate_manage_existing_ref') @mock.patch(BASE_DRIVER_OBJ + "._rename_volume_object") def test_manage_existing_error_propagates(self, mock_rename, mock_validate): self.array.get_volumes.return_value = MPV self.array.get_connections.return_value = [] vol, _ = self.new_fake_vol(set_provider_id=False) self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) mock_rsp = ValidResponse(200, None, 1, [{"group": {"name": "tstpg"}}], {}) self.array.get_protection_groups_volumes.return_value = mock_rsp tpg_rsp = ValidResponse(200, None, 1, [DotNotation({"target_count": 2})], {}) self.array.get_protection_groups.return_value = tpg_rsp self.assert_error_propagates( [mock_rename, mock_validate], self.driver.manage_existing, vol, {'source-name': 'vol1'} ) def test_manage_existing_bad_ref(self): vol, _ = self.new_fake_vol(set_provider_id=False) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol, {'bad_key': 'bad_value'}) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol, {'source-name': ''}) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol, {'source-name': None}) @mock.patch(BASE_DRIVER_OBJ + "._rename_volume_object") def test_manage_existing_with_connected_hosts(self, mock_rename): ref_name = 'vol1' vol, _ = self.new_fake_vol(set_provider_id=False) cvol = deepcopy(MANAGEABLE_PURE_VOLS) cvol[0]['connection_count'] = 1 self.array.get_volumes.\ return_value = ValidResponse(200, None, 1, [DotNotation(cvol[0])], {}) pg_rsp = ValidResponse( 200, None, 1, [{"group": {"name": "tstpg"}}], {}) self.array.get_protection_groups_volumes.return_value = pg_rsp tpg_rsp = ValidResponse( 200, None, 1, [DotNotation({"target_count": 2})], {}) self.array.get_protection_groups.return_value = tpg_rsp self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, vol, {'source-name': ref_name}) self.assertFalse(mock_rename.called) def test_manage_existing_get_size(self): ref_name = 'vol1' volume_ref = {'source-name': ref_name} expected_size = 3 self.array.get_volumes.return_value = MPV vol, _ = self.new_fake_vol(set_provider_id=False) size = self.driver.manage_existing_get_size(vol, volume_ref) self.assertEqual(expected_size, size) self.array.get_volumes.assert_called_with(names=[ref_name]) @mock.patch(BASE_DRIVER_OBJ + '._validate_manage_existing_ref') def test_manage_existing_get_size_error_propagates(self, mock_validate): self.array.get_volumes.return_value = mock.MagicMock() vol, _ = self.new_fake_vol(set_provider_id=False) self.assert_error_propagates([mock_validate], self.driver.manage_existing_get_size, vol, {'source-name': 'vol1'}) def test_manage_existing_get_size_bad_ref(self): vol, _ = self.new_fake_vol(set_provider_id=False) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol, {'bad_key': 'bad_value'}) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol, {'source-name': ''}) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, vol, {'source-name': None}) @mock.patch(BASE_DRIVER_OBJ + "._rename_volume_object") def test_unmanage(self, mock_rename): vol, vol_name = self.new_fake_vol() unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX self.driver.unmanage(vol) mock_rename.assert_called_with(vol_name, unmanaged_vol_name, manage=True) @mock.patch(BASE_DRIVER_OBJ + "._rename_volume_object") def test_unmanage_error_propagates(self, mock_rename): vol, _ = self.new_fake_vol() self.assert_error_propagates([mock_rename], self.driver.unmanage, vol) @mock.patch(BASE_DRIVER_OBJ + "._rename_volume_object") def test_unmanage_with_deleted_volume(self, mock_rename): vol, vol_name = self.new_fake_vol() unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX self.driver.unmanage(vol) mock_rename.assert_called_with( vol_name, unmanaged_vol_name, manage=True) @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(DRIVER_PATH + ".flasharray.VolumePatch") def test_unmanage_with_deleted_volume_error(self, mock_vol_patch, mock_logger): vol, vol_name = self.new_fake_vol() err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.patch_volumes.return_value = err_rsp self.driver.unmanage(vol) mock_logger.warning.\ assert_called_with('Unable to rename %(old_name)s, ' 'error message: %(error)s', {'old_name': f"{vol_name}", 'error': 'does not exist'}) @mock.patch(BASE_DRIVER_OBJ + "._rename_volume_object") def test_manage_existing_snapshot(self, mock_rename): ref_name = PURE_SNAPSHOT['name'] snap_ref = {'source-name': ref_name} snap, snap_name = self.new_fake_snap() vol_rsp = ValidResponse(200, None, 1, [DotNotation(PURE_SNAPSHOT)], {}) self.array.get_volumes.return_value = vol_rsp self.array.get_volume_snapshots.return_value = MPV self.driver.manage_existing_snapshot(snap, snap_ref) mock_rename.assert_called_once_with(ref_name, snap_name, raise_not_exist=True, snapshot=True) self.array.get_volumes.\ assert_called_with(names=[PURE_SNAPSHOT['source']]) @mock.patch(BASE_DRIVER_OBJ + "._rename_volume_object") def test_manage_existing_snapshot_multiple_snaps_on_volume(self, mock_rename): ref_name = PURE_SNAPSHOT['name'] snap_ref = {'source-name': ref_name} pure_snaps = [PURE_SNAPSHOT] snap, snap_name = self.new_fake_snap() for i in range(5): pure_snap = PURE_SNAPSHOT.copy() pure_snap['name'] += str(i) pure_snaps.append(DotNotation(pure_snap)) vol_rsp = ValidResponse(200, None, 1, pure_snaps, {}) self.array.get_volumes.return_value = vol_rsp self.array.get_volume_snapshots.return_value = MPS self.driver.manage_existing_snapshot(snap, snap_ref) mock_rename.assert_called_once_with(ref_name, snap_name, raise_not_exist=True, snapshot=True) @mock.patch(BASE_DRIVER_OBJ + '._validate_manage_existing_ref') def test_manage_existing_snapshot_error_propagates(self, mock_validate): self.array.get_volumes.return_value = [PURE_SNAPSHOT] snap, _ = self.new_fake_snap() self.assert_error_propagates( [mock_validate], self.driver.manage_existing_snapshot, snap, {'source-name': PURE_SNAPSHOT['name']} ) def test_manage_existing_snapshot_bad_ref(self): snap, _ = self.new_fake_snap() self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, snap, {'bad_key': 'bad_value'}) def test_manage_existing_snapshot_empty_ref(self): snap, _ = self.new_fake_snap() self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, snap, {'source-name': ''}) def test_manage_existing_snapshot_none_ref(self): snap, _ = self.new_fake_snap() self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, snap, {'source-name': None}) def test_manage_existing_snapshot_volume_ref_not_exist(self): snap, _ = self.new_fake_snap() err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.get_volumes.return_value = err_rsp self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, snap, {'source-name': 'non-existing-volume.snap1'}) def test_manage_existing_snapshot_ref_not_exist(self): ref_name = PURE_SNAPSHOT['name'] + '-fake' snap_ref = {'source-name': ref_name} snap, _ = self.new_fake_snap() err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.get_volumes.return_value = err_rsp self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, snap, snap_ref) def test_manage_existing_snapshot_get_size(self): ref_name = PURE_SNAPSHOT['name'] snap_ref = {'source-name': ref_name} self.array.get_volumes.return_value = MPV self.array.get_volume_snapshots.return_value = MPS snap, _ = self.new_fake_snap() size = self.driver.manage_existing_snapshot_get_size(snap, snap_ref) expected_size = 3.0 self.assertEqual(expected_size, size) self.array.get_volumes.\ assert_called_with(names=[PURE_SNAPSHOT['source']]) @mock.patch(BASE_DRIVER_OBJ + '._validate_manage_existing_ref') def test_manage_existing_snapshot_get_size_error_propagates(self, mock_valid): self.array.get_volumes.return_value = MPS snap, _ = self.new_fake_snap() self.assert_error_propagates( [mock_valid], self.driver.manage_existing_snapshot_get_size, snap, {'names': PURE_SNAPSHOT['name']} ) def test_manage_existing_snapshot_get_size_bad_ref(self): snap, _ = self.new_fake_snap() self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snap, {'bad_key': 'bad_value'}) def test_manage_existing_snapshot_get_size_empty_ref(self): snap, _ = self.new_fake_snap() self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snap, {'source-name': ''}) def test_manage_existing_snapshot_get_size_none_ref(self): snap, _ = self.new_fake_snap() self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snap, {'source-name': None}) def test_manage_existing_snapshot_get_size_volume_ref_not_exist(self): snap, _ = self.new_fake_snap() err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.get_volumes.return_value = err_rsp self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snap, {'source-name': 'non-existing-volume.snap1'}) @ddt.data( # 96 chars, will exceed allowable length 'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a-cinder.' 'snapshot-253b2878-ec60-4793-ad19-e65496ec7aab', # short_name that will require no adjustment 'volume-1e5177e7-cinder.snapshot-e65496ec7aab') @mock.patch(DRIVER_PATH + ".flasharray.VolumePatch") @mock.patch(BASE_DRIVER_OBJ + "._get_snap_name") def test_unmanage_snapshot(self, fake_name, mock_get_snap_name, mock_vol_patch): snap, snap_name = self.new_fake_snap() mock_get_snap_name.return_value = fake_name mock_data = self.array.flasharray.VolumePatch(names='snap_name') mock_vol_patch.return_value = mock_data self.driver.unmanage_snapshot(snap) self.array.patch_volume_snapshots.\ assert_called_with(names=[fake_name], volume_snapshot=mock_data) @mock.patch(BASE_DRIVER_OBJ + "._rename_volume_object") def test_unmanage_snapshot_error_propagates(self, mock_rename): snap, _ = self.new_fake_snap() self.assert_error_propagates([mock_rename], self.driver.unmanage_snapshot, snap) @mock.patch(DRIVER_PATH + ".LOG") def test_unmanage_snapshot_with_deleted_snapshot(self, mock_logger): snap, snap_name = self.new_fake_snap() self.driver.unmanage_snapshot(snap) err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.patch_volume_snapshots.return_value = err_rsp self.driver.unmanage_snapshot(snap) mock_logger.warning.\ assert_called_with('Unable to rename %(old_name)s, ' 'error message: %(error)s', {'old_name': f"{snap_name}", 'error': 'does not exist'}) def _test_get_manageable_things(self, pure_objs=MPV, expected_refs=MPV_REFS.items, pure_hosts = CONN, cinder_objs=list(), is_snapshot=False): self.array.get_connections.return_value = pure_hosts self.array.get_volume_snapshots.return_value = pure_objs self.array.get_volumes.return_value = pure_objs marker = mock.Mock() limit = mock.Mock() offset = mock.Mock() sort_keys = mock.Mock() sort_dirs = mock.Mock() with mock.patch('cinder.volume.volume_utils.' 'paginate_entries_list') as mpage: if is_snapshot: test_func = self.driver.get_manageable_snapshots else: test_func = self.driver.get_manageable_volumes test_func(cinder_objs, marker, limit, offset, sort_keys, sort_dirs) mpage.assert_called_once_with( expected_refs, marker, limit, offset, sort_keys, sort_dirs ) def test_get_manageable_volumes(self,): """Default success case. Given a list of pure volumes from the REST API, give back a list of volume references. """ self._test_get_manageable_things() def test_get_manageable_volumes_connected_vol(self): """Make sure volumes connected to hosts are flagged as unsafe.""" connected_vol = deepcopy(MPV.items) connected_vol[0]['name'] = 'xVol1' nexpected_refs = deepcopy(MPV_REFS.items) nexpected_refs[0]['reference'] = {'name': 'xVol1'} nexpected_refs[0]['safe_to_manage'] = False nexpected_refs[0]['reason_not_safe'] = 'Volume connected to host utest' del nexpected_refs[-2:] local_pure_objs = ValidResponse(200, None, 1, [DotNotation(connected_vol[0])], {}) self._test_get_manageable_things(pure_objs=local_pure_objs, expected_refs=nexpected_refs) nexpected_refs[0]['safe_to_manage'] = True def test_get_manageable_volumes_already_managed(self): """Make sure volumes already owned by cinder are flagged as unsafe.""" cinder_vol, cinder_vol_name = self.new_fake_vol() cinders_vols = [cinder_vol] # Have one of our vol names match up with the existing cinder volume purity_vols = deepcopy(MPV.items) purity_vols[0]['name'] = cinder_vol_name managed_expected_refs = deepcopy(MPV_REFS.items) managed_expected_refs[0]['reference'] = {'name': cinder_vol_name} managed_expected_refs[0]['safe_to_manage'] = False managed_expected_refs[0]['reason_not_safe'] = 'Volume already managed' managed_expected_refs[0]['cinder_id'] = cinder_vol.id local_pure_objs = ValidResponse(200, None, 3, [DotNotation(purity_vols[0]), DotNotation(purity_vols[1]), DotNotation(purity_vols[2])], {}) self._test_get_manageable_things(pure_objs=local_pure_objs, expected_refs=managed_expected_refs, cinder_objs=cinders_vols) managed_expected_refs[0]['safe_to_manage'] = True def test_get_manageable_volumes_no_pure_volumes(self): """Expect no refs to be found if no volumes are on Purity.""" self._test_get_manageable_things(pure_objs=ValidResponse(200, None, 0, [], {}), expected_refs=[]) def test_get_manageable_volumes_no_hosts(self): """Success case with no hosts on Purity.""" self._test_get_manageable_things(pure_hosts=ValidResponse(200, None, 0, [], {})) def test_get_manageable_snapshots(self): """Default success case. Given a list of pure snapshots from the REST API, give back a list of snapshot references. """ self._test_get_manageable_things( pure_objs=MPS, expected_refs=MPS_REFS.items, pure_hosts=ValidResponse(200, None, 1, [DotNotation(CONNECTION_DATA)], {}), is_snapshot=True ) def test_get_manageable_snapshots_already_managed(self): """Make sure snaps already owned by cinder are flagged as unsafe.""" cinder_vol, _ = self.new_fake_vol() cinder_snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock()) cinder_snap.volume = cinder_vol cinder_snaps = [cinder_snap] purity_snaps = MPS.items.copy() purity_snaps[0]['name'] = 'volume-%s-cinder.snapshot-%s' % ( cinder_vol.id, cinder_snap.id ) expected_refs = MPS_REFS.items.copy() expected_refs[0]['reference'] = {'name': purity_snaps[0]['name']} expected_refs[0]['safe_to_manage'] = False expected_refs[0]['reason_not_safe'] = 'Snapshot already managed.' expected_refs[0]['cinder_id'] = cinder_snap.id self._test_get_manageable_things( pure_objs=ValidResponse(200, None, 3, [DotNotation(purity_snaps[0]), DotNotation(purity_snaps[1]), DotNotation(purity_snaps[2])], {}), expected_refs=expected_refs, cinder_objs=cinder_snaps, is_snapshot=True ) def test_get_manageable_snapshots_no_pure_snapshots(self): """Expect no refs to be found if no snapshots are on Purity.""" self._test_get_manageable_things(pure_objs=ValidResponse(200, None, 0, [], {}), pure_hosts=ValidResponse(200, None, 0, [], {}), expected_refs=[], is_snapshot=True) @ddt.data( # No replication change, non-replicated dict( current_spec={ 'replication_enabled': ' false', }, new_spec={ 'replication_type': ' async', 'replication_enabled': ' false', }, expected_model_update=None, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # No replication change, async to async dict( current_spec={ 'replication_type': ' async', 'replication_enabled': ' true', 'other_spec': 'blah' }, new_spec={ 'replication_type': ' async', 'replication_enabled': ' true', 'other_spec': 'something new' }, expected_model_update=None, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # No replication change, sync to sync dict( current_spec={ 'replication_type': ' sync', 'replication_enabled': ' true', 'other_spec': 'blah' }, new_spec={ 'replication_type': ' sync', 'replication_enabled': ' true', 'other_spec': 'something new' }, expected_model_update=None, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # Turn on async rep dict( current_spec={ 'replication_enabled': ' false', }, new_spec={ 'replication_type': ' async', 'replication_enabled': ' true', }, expected_model_update={ "replication_status": fields.ReplicationStatus.ENABLED }, expected_did_retype=True, expected_add_to_group=True, expected_remove_from_pgroup=False, ), # Turn off async rep dict( current_spec={ 'replication_type': ' async', 'replication_enabled': ' true', }, new_spec={ 'replication_type': ' async', 'replication_enabled': ' false', }, expected_model_update={ "replication_status": fields.ReplicationStatus.DISABLED }, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=True, ), # Turn on sync rep dict( current_spec={ 'replication_enabled': ' false', }, new_spec={ 'replication_type': ' sync', 'replication_enabled': ' true', }, expected_model_update=retype_meta, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # Turn on trisync rep dict( current_spec={ 'replication_enabled': ' false', }, new_spec={ 'replication_type': ' trisync', 'replication_enabled': ' true', }, expected_model_update=retype_meta, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # Turn off sync rep dict( current_spec={ 'replication_type': ' sync', 'replication_enabled': ' true', }, new_spec={ 'replication_type': ' sync', 'replication_enabled': ' false', }, expected_model_update=retype_repl_off, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # Turn off trisync rep dict( current_spec={ 'replication_type': ' trisync', 'replication_enabled': ' true', }, new_spec={ 'replication_type': ' trisync', 'replication_enabled': ' false', }, expected_model_update=retype_repl_off, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # Change from async to sync rep dict( current_spec={ 'replication_type': ' async', 'replication_enabled': ' true', }, new_spec={ 'replication_type': ' sync', 'replication_enabled': ' true', }, expected_model_update=retype_meta, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # Change from async to trisync rep dict( current_spec={ 'replication_type': ' async', 'replication_enabled': ' true', }, new_spec={ 'replication_type': ' trisync', 'replication_enabled': ' true', }, expected_model_update=retype_meta, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # Change from sync to async rep dict( current_spec={ 'replication_type': ' sync', 'replication_enabled': ' true', }, new_spec={ 'replication_type': ' async', 'replication_enabled': ' true', }, expected_model_update=retype_async, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # Change from trisync to async rep dict( current_spec={ 'replication_type': ' trisync', 'replication_enabled': ' true', }, new_spec={ 'replication_type': ' async', 'replication_enabled': ' true', }, expected_model_update=retype_async, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=False, ), # Change from trisync to sync rep dict( current_spec={ 'replication_type': ' trisync', 'replication_enabled': ' true', }, new_spec={ 'replication_type': ' sync', 'replication_enabled': ' true', }, expected_model_update=None, expected_did_retype=True, expected_add_to_group=False, expected_remove_from_pgroup=True, ), # Change from sync to trisync rep dict( current_spec={ 'replication_type': ' sync', 'replication_enabled': ' true', }, new_spec={ 'replication_type': ' trisync', 'replication_enabled': ' true', }, expected_model_update=None, expected_did_retype=True, expected_add_to_group=True, expected_remove_from_pgroup=False, ), ) @mock.patch(BASE_DRIVER_OBJ + "._stretch_replica") @mock.patch(BASE_DRIVER_OBJ + "._wait_for_stretch") @mock.patch(BASE_DRIVER_OBJ + "._create_pod_if_not_exist") @ddt.unpack def test_retype_replication(self, mock_pod, mock_wait_stretch, mock_stretch, current_spec, new_spec, expected_model_update, expected_did_retype, expected_add_to_group, expected_remove_from_pgroup): ctxt = context.get_admin_context() vol, vol_name = self.new_fake_vol(type_extra_specs=current_spec) secondary = mock.MagicMock() self.driver._active_cluster_target_arrays = [secondary] new_type = fake_volume.fake_volume_type_obj(ctxt) new_type.extra_specs = new_spec get_voltype = "cinder.objects.volume_type.VolumeType.get_by_name_or_id" with mock.patch(get_voltype) as mock_get_vol_type: mock_get_vol_type.return_value = new_type self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) pg_rsp = ValidResponse( 200, None, 1, [{"group": {"name": "cinder-group"}}], {}) self.array.get_protection_groups_volumes.return_value = pg_rsp did_retype, model_update = self.driver.retype( ctxt, vol, {"id": new_type.id, "extra_specs": new_spec}, None, # ignored by driver None, # ignored by driver ) self.assertEqual(expected_did_retype, did_retype) self.assertEqual(expected_model_update, model_update) if expected_add_to_group: if "trisync" not in new_type.extra_specs["replication_type"]: self.array.post_protection_groups_volumes.\ assert_called_once_with(group_names = [self.driver._replication_pg_name], member_names = [vol_name]) if expected_remove_from_pgroup: if "trisync" not in current_spec["replication_type"]: self.array.delete_protection_groups_volumes.\ assert_called_once_with(group_names = [self.driver._replication_pg_name], member_names = [vol_name]) @ddt.data( dict( specs={ 'replication_type': ' async', 'replication_enabled': ' true', }, expected_repl_type='async' ), dict( specs={ 'replication_type': ' sync', 'replication_enabled': ' true', }, expected_repl_type='sync' ), dict( specs={ 'replication_type': ' trisync', 'replication_enabled': ' true', }, expected_repl_type='trisync' ), dict( specs={ 'replication_type': ' async', 'replication_enabled': ' false', }, expected_repl_type=None ), dict( specs={ 'replication_type': ' sync', 'replication_enabled': ' false', }, expected_repl_type=None ), dict( specs={ 'not_replication_stuff': 'foo', 'replication_enabled': ' true', }, expected_repl_type='async' ), dict( specs=None, expected_repl_type=None ), dict( specs={ 'replication_type': ' super-turbo-repl-mode', 'replication_enabled': ' true', }, expected_repl_type=None ) ) @ddt.unpack def test_get_replication_type_from_vol_type(self, specs, expected_repl_type): voltype = fake_volume.fake_volume_type_obj(mock.MagicMock()) voltype.extra_specs = specs actual_type = self.driver._get_replication_type_from_vol_type(voltype) self.assertEqual(expected_repl_type, actual_type) @mock.patch(DRIVER_PATH + ".LOG") def test_does_pgroup_exist_not_exists(self, mock_logger): err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.get_protection_groups.return_value = err_rsp exists = self.driver._does_pgroup_exist(self.array, "some_pgroup") self.assertFalse(exists) def test_does_pgroup_exist_exists(self): valid_rsp = ValidResponse(200, None, 1, [DotNotation(PGROUP_ON_TARGET_NOT_ALLOWED)], {}) self.array.get_protection_groups.return_value = valid_rsp exists = self.driver._does_pgroup_exist(self.array, "some_pgroup") self.assertTrue(exists) def test_does_pgroup_exist_error_propagates(self): err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.get_protection_groups.return_value = err_rsp self.assert_error_propagates([self.array.get_protection_groups], self.driver._does_pgroup_exist, self.array, "some_pgroup") @mock.patch(BASE_DRIVER_OBJ + "._does_pgroup_exist") def test_wait_until_target_group_setting_propagates_ready(self, mock_exists): mock_exists.return_value = True self.driver._wait_until_target_group_setting_propagates( self.array, "some_pgroup" ) @mock.patch(BASE_DRIVER_OBJ + "._does_pgroup_exist") def test_wait_until_target_group_setting_propagates_not_ready(self, mock_exists): mock_exists.return_value = False self.assertRaises( pure.PureDriverException, self.driver._wait_until_target_group_setting_propagates, self.array, "some_pgroup" ) def test_wait_until_source_array_allowed_ready(self): pgtgt = ValidResponse(200, None, 1, [DotNotation(PGROUP_ON_TARGET_ALLOWED)], {}) self.array.get_protection_groups_targets.return_value = \ pgtgt self.driver._wait_until_source_array_allowed( self.array, "array1:replicated_pgroup",) def test_wait_until_source_array_allowed_not_ready(self): pgtgt = ValidResponse(200, None, 1, [DotNotation(PGROUP_ON_TARGET_NOT_ALLOWED)], {}) self.array.get_protection_groups_targets.return_value = \ pgtgt self.assertRaises( pure.PureDriverException, self.driver._wait_until_source_array_allowed, self.array, "some_pgroup", ) @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_replicated_async(self, mock_get_volume_type): repl_extra_specs = { 'replication_type': ' async', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(spec={"size": 2}, type_extra_specs=repl_extra_specs) mock_get_volume_type.return_value = vol.volume_type self.driver.create_volume(vol) self.array.post_volumes.assert_called() self.array.post_protection_groups_volumes.assert_called_with( group_names=[REPLICATION_PROTECTION_GROUP], member_names=[vol["name"] + "-cinder"]) @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_replicated_sync(self, mock_get_volume_type, mock_fa): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(spec={"size": 2}, type_extra_specs=repl_extra_specs) mock_get_volume_type.return_value = vol.volume_type mock_data = self.array.flasharray.VolumePost(provisioned=2147483648) mock_fa.return_value = mock_data self.driver.create_volume(vol) self.array.post_volumes.assert_called_with(names=[vol_name], with_default_protection= False, volume=mock_data) def test_find_async_failover_target_no_repl_targets(self): self.driver._replication_target_arrays = [] self.assertRaises(pure.PureDriverException, self.driver._find_async_failover_target) @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_async_failover_target(self, mock_get_snap): mock_backend_1 = mock.Mock() mock_backend_1.replication_type = 'async' mock_backend_2 = mock.Mock() mock_backend_2.replication_type = 'async' self.driver._replication_target_arrays = [mock_backend_1, mock_backend_2] mock_get_snap.return_value = REPLICATED_PGSNAPS[0] array, pg_snap = self.driver._find_async_failover_target() self.assertEqual(mock_backend_1, array) self.assertEqual(REPLICATED_PGSNAPS[0], pg_snap) @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_async_failover_target_missing_pgsnap( self, mock_get_snap): mock_backend_1 = mock.Mock() mock_backend_1.replication_type = 'async' mock_backend_2 = mock.Mock() mock_backend_2.replication_type = 'async' self.driver._replication_target_arrays = [mock_backend_1, mock_backend_2] mock_get_snap.side_effect = [None, REPLICATED_PGSNAPS[0]] array, pg_snap = self.driver._find_async_failover_target() self.assertEqual(mock_backend_2, array) self.assertEqual(REPLICATED_PGSNAPS[0], pg_snap) @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_async_failover_target_no_pgsnap( self, mock_get_snap): mock_backend = mock.Mock() mock_backend.replication_type = 'async' self.driver._replication_target_arrays = [mock_backend] mock_get_snap.return_value = None self.assertRaises(pure.PureDriverException, self.driver._find_async_failover_target) @mock.patch(BASE_DRIVER_OBJ + '._get_latest_replicated_pg_snap') def test_find_async_failover_target_error_propagates_no_secondary( self, mock_get_snap): mock_backend = mock.Mock() mock_backend.replication_type = 'async' self.driver._replication_target_arrays = [mock_backend] self.assert_error_propagates( [mock_get_snap], self.driver._find_async_failover_target ) def test_find_sync_failover_target_success(self): secondary = mock.MagicMock() self.driver._active_cluster_target_arrays = [secondary] secondary.get_pods.return_value = CINDER_POD secondary.array_id = CINDER_POD.items[0]['arrays'][1]['id'] actual_secondary = self.driver._find_sync_failover_target() self.assertEqual(secondary, actual_secondary) def test_find_sync_failover_target_no_ac_arrays(self): self.driver._active_cluster_target_arrays = [] actual_secondary = self.driver._find_sync_failover_target() self.assertIsNone(actual_secondary) def test_find_sync_failover_target_fail_to_get_pod(self): secondary = mock.MagicMock() self.driver._active_cluster_target_arrays = [secondary] secondary.array_id = CINDER_POD.items[0]['arrays'][1]['id'] actual_secondary = self.driver._find_sync_failover_target() self.assertIsNone(actual_secondary) def test_find_sync_failover_target_pod_status_error(self): secondary = mock.MagicMock() self.driver._active_cluster_target_arrays = [secondary] modified_array = deepcopy(array_2) modified_array['status'] = 'error' POD_WITH_ERR = dict(arrays = [array_1, modified_array], source = None, name= 'cinder-pod') secondary.get_pod.\ return_value = ValidResponse(200, None, 1, [DotNotation(POD_WITH_ERR)], {}) secondary.array_id = POD_WITH_ERR['arrays'][1]['id'] actual_secondary = self.driver._find_sync_failover_target() self.assertIsNone(actual_secondary) def test_enable_async_replication_if_needed_success(self): repl_extra_specs = { 'replication_type': ' async', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) self.driver._enable_async_replication_if_needed(self.array, vol) self.array.post_protection_groups_volumes.assert_called_with( group_names=[self.driver._replication_pg_name], member_names=[vol_name] ) def test_enable_async_replication_if_needed_not_repl_type(self): vol_type = fake_volume.fake_volume_type_obj(mock.MagicMock()) vol_obj = fake_volume.fake_volume_obj(mock.MagicMock()) with mock.patch('cinder.objects.VolumeType.get_by_id') as mock_type: mock_type.return_value = vol_type self.driver._enable_async_replication_if_needed(self.array, vol_obj) self.assertFalse(self.array.set_pgroup.called) def test_enable_async_replication_if_needed_sync_skip(self): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) self.driver._enable_async_replication_if_needed(self.array, vol) self.array.post_protection_groups_volumes.assert_not_called() def test_enable_async_replication_if_needed_error_propagates(self): repl_extra_specs = { 'replication_type': ' async', 'replication_enabled': ' true', } vol, _ = self.new_fake_vol(type_extra_specs=repl_extra_specs) self.driver._enable_async_replication_if_needed(self.array, vol) self.assert_error_propagates( [self.array.post_protection_groups_volumes], self.driver._enable_async_replication, self.array, vol ) @mock.patch(BASE_DRIVER_OBJ + "._cleanup_ghostpod") @mock.patch(BASE_DRIVER_OBJ + "._wait_for_stretch") @mock.patch(BASE_DRIVER_OBJ + "._stretch_replica") @mock.patch(BASE_DRIVER_OBJ + "._create_pod_if_not_exist") def test_enable_sync_replication( self, mock_pod, mock_stretch, mock_wait_stretch, mock_cleanup): vol, vol_name = self.new_fake_vol() ghost_pod_name = "cinder-ghost-pod-" + vol.id cpod = self.flasharray.Pod(name='cinder-pod') mock_rsp = ValidResponse(200, None, 1, [vol], {}) self.array.patch_volumes.return_value = mock_rsp self.driver._enable_sync_replication(self.array, vol, vol_name) mock_pod.assert_called_with(self.array, ghost_pod_name) mock_stretch.assert_called_with(self.array, vol, ghost_pod_name) mock_wait_stretch.assert_called_with(self.array, ghost_pod_name) self.array.patch_volumes.assert_called_with( names=[ ghost_pod_name + '::' + vol_name], volume=self.flasharray.VolumePatch( pod=cpod)) mock_cleanup.assert_called_with(self.array, ghost_pod_name) err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.patch_volumes.return_value = err_rsp self.driver._enable_sync_replication(self.array, vol, vol_name) assert mock_cleanup.call_count == 1 def test_enable_sync_replication_sync(self): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) cpod = self.flasharray.Pod(name='cinder-pod') self.driver._enable_sync_replication(self.array, vol, vol_name) self.array.patch_volumes.assert_called_with( names=[vol_name], volume=self.flasharray.VolumePatch(pod=cpod)) @mock.patch(BASE_DRIVER_OBJ + "._wait_for_stretch") @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pods') @mock.patch(BASE_DRIVER_OBJ + "._create_pod_if_not_exist") def test_disable_sync_replication( self, mock_pod, mock_setup_pods, mock_wait_stretch): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) ghost_pod_name = "cinder-ghost-pod-" + vol.id ghost_ref = self.flasharray.Reference(name=ghost_pod_name) self.driver._disable_sync_replication(self.array, vol, vol_name) self.array.patch_volumes.assert_called_with(names=[vol_name], volume=self.flasharray. VolumePatch(pod=ghost_ref)) mock_rsp = ValidResponse(200, None, 1, [vol], {}) self.array.patch_volumes.return_value = mock_rsp self.driver._disable_sync_replication(self.array, vol, vol_name) self.array.delete_pods.assert_called_with(names=[ghost_pod_name], eradicate_contents=True) assert vol.provider_id == vol_name.split('::')[-1] def test_enable_sync_replication_if_needed(self): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) output = self.driver._enable_sync_replication_if_needed( self.array, vol, vol_name) assert output vol, vol_name = self.new_fake_vol() output = self.driver._enable_sync_replication_if_needed( self.array, vol, vol_name) assert output is False @mock.patch(BASE_DRIVER_OBJ + "._wait_for_stretch") @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pods') @mock.patch(BASE_DRIVER_OBJ + "._create_pod_if_not_exist") def test_disable_sync_replication_if_needed( self, mock_pod, mock_setup_pods, mock_wait_stretch): repl_extra_specs = { 'replication_type': ' async', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) output = self.driver._disable_sync_replication_if_needed( self.array, vol, vol_name) assert output repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) output = self.driver._disable_sync_replication_if_needed( self.array, vol, vol_name) assert output is False @mock.patch(BASE_DRIVER_OBJ + "._disable_async_replication") def test_disable_async_replication_if_needed(self, mock_async): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) output = self.driver._disable_async_replication_if_needed( self.array, vol) assert output repl_extra_specs = { 'replication_type': ' async', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) output = self.driver._disable_async_replication_if_needed( self.array, vol) assert output is False @mock.patch(DRIVER_PATH + ".excutils.save_and_reraise_exception") @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pods') def test_stretch_replica( self, mock_setup_pods, mock_logger, mock_excutils): vol, vol_name = self.new_fake_vol() ghost_pod_name = "cinder-ghost-pod-" + vol.id mock_rsp = ValidResponse( 200, None, 1, [{"group": {"name": "tstpg"}}], {}) self.array.get_protection_groups_volumes.return_value = mock_rsp mock_patch_rsp = ValidResponse(200, None, 1, [vol], {}) self.array.patch_volumes.return_value = mock_patch_rsp self.driver._stretch_replica(self.array, vol, ghost_pod_name) mock_setup_pods.assert_called() err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.patch_volumes.return_value = err_rsp self.driver._stretch_replica(self.array, vol, ghost_pod_name) mock_logger.warning.\ assert_called_with('Unable to add volume to Ghost Pod: %s', 'does not exist') @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(DRIVER_PATH + ".excutils.save_and_reraise_exception") def test_cleanup_ghostpod(self, mock_excutils, mock_logger): vol, vol_name = self.new_fake_vol() ghost_pod_name = "cinder-ghost-pod-" + vol.id mock_rsp = ValidResponse(200, None, 1, [], {}) self.array.delete_pods_arrays.return_value = mock_rsp self.driver._cleanup_ghostpod(self.array, ghost_pod_name) self.array.delete_pods.assert_called_with(names=[ghost_pod_name], eradicate_contents=True) err_rsp = ErrorResponse(400, [DotNotation({'message': 'does not exist'})], {}) self.array.delete_pods_arrays.return_value = err_rsp self.driver._cleanup_ghostpod(self.array, ghost_pod_name) mock_logger.warning. assert_called_with( 'Unable to unstretch ghost pod for deletion: %s', 'does not exist') def test_safemode_check(self): vol, vol_name = self.new_fake_vol() mock_rsp = ValidResponse( 200, None, 1, [{"group": {"name": "tstpg"}}], {}) self.array.get_protection_groups_volumes.return_value = mock_rsp mock_pg = ValidResponse( 200, None, 1, [DotNotation({"retention_lock": "ratcheted"})], {}) self.array.get_protection_groups.return_value = mock_pg exc_out = self.assertRaises(exception.ManageExistingInvalidReference, self.driver._safemode_check, self.array, {"source-name": vol_name}) self.assertIn( "SafeMode protected volume as its not supported", str(exc_out)) def test_check_repl(self): mock_rsp = ValidResponse( 200, None, 1, [DotNotation({"array_count": 2})], {}) self.array.get_pods.return_value = mock_rsp rtype = self.driver._check_repl(self.array, 'cinderpod::tstvol') assert rtype == 'sync' pg_rsp = ValidResponse( 200, None, 1, [{"group": {"name": "tstpg"}}], {}) self.array.get_protection_groups_volumes.return_value = pg_rsp tpg_rsp = ValidResponse( 200, None, 1, [DotNotation({"target_count": 2})], {}) self.array.get_protection_groups.return_value = tpg_rsp rtype = self.driver._check_repl(self.array, 'tstvol') assert rtype == 'async' @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(BASE_DRIVER_OBJ + '._enable_sync_replication') def test_sync_retype_enable(self, mock_sync, mock_logger): vol, vol_name = self.new_fake_vol() secondary = mock.MagicMock() self.driver._active_cluster_target_arrays = [secondary] fvol = 'cinder-pod::' + vol_name model_update = {"replication_status": fields.ReplicationStatus.ENABLED, "metadata": {**vol.metadata, "array_volume_name": fvol} } out = self.driver._sync_retype_enable(vol) assert vol.provider_id == fvol assert out == model_update self.driver._active_cluster_target_arrays = [] self.driver._sync_retype_enable(vol) mock_logger.error.\ assert_called_with("Sync replication is not enabled on the array") def test_get_pgroups(self): pg_rsp = ValidResponse( 200, None, 1, [{"group": {"name": "tstpg"}}], {}) self.array.get_protection_groups_volumes.return_value = pg_rsp out = self.driver._get_pgroups(self.array, 'tstvol') assert out == ['tstpg'] @mock.patch("time.sleep", return_value=None) @mock.patch("time.time") def test_wait_for_stretch(self, mock_time, mock_sleep): vol, vol_name = self.new_fake_vol() ghost_pod_name = "cinder-ghost-pod-" + vol.id mock_rsp0 = ValidResponse(200, None, 1, [DotNotation( {"arrays": [{"name": "array-tst", "status": "offline"}]})], {}) mock_rsp1 = ValidResponse(200, None, 1, [DotNotation( {"arrays": [{"name": "array-tst", "status": "online"}]})], {}) self.array.get_pods.side_effect = [mock_rsp0, mock_rsp1] self.driver._wait_for_stretch(self.array, ghost_pod_name) @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch(BASE_DRIVER_OBJ + '._get_flasharray') @mock.patch(BASE_DRIVER_OBJ + '._find_async_failover_target') def test_failover_async(self, mock_find_failover_target, mock_get_array, mock_vol): secondary_device_id = 'foo' self.async_array2.backend_id = secondary_device_id self.driver._replication_target_arrays = [self.async_array2] pgout = ValidResponse(200, None, 1, [DotNotation(REPLICATED_PGSNAPS[0]), DotNotation(REPLICATED_PGSNAPS[1]), DotNotation(REPLICATED_PGSNAPS[2])], {}) volout = ValidResponse(200, None, 1, [DotNotation(REPLICATED_VOLUME_SNAPS[0]), DotNotation(REPLICATED_VOLUME_SNAPS[1]), DotNotation(REPLICATED_VOLUME_SNAPS[2])], {}) self.async_array2.get_volume_snapshots.return_value = volout array2 = mock.Mock() array2.backend_id = secondary_device_id array2.array_name = GET_ARRAY_SECONDARY['name'] array2.array_id = GET_ARRAY_SECONDARY['id'] mock_get_array.return_value = array2 target_array = self.async_array2 target_array.copy_volume = mock.Mock() mock_find_failover_target.return_value = ( target_array, pgout.items[0], ) array2.get_volume.return_value = volout.items context = mock.MagicMock() new_active_id, volume_updates, __ = self.driver.failover( context, REPLICATED_VOLUME_OBJS, None, [] ) self.assertEqual(secondary_device_id, new_active_id) expected_updates = [ { 'updates': { 'replication_status': fields.ReplicationStatus.FAILED_OVER }, 'volume_id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a' }, { 'updates': { 'replication_status': fields.ReplicationStatus.FAILED_OVER }, 'volume_id': '43a09914-e495-475f-b862-0bda3c8918e4' }, { 'updates': { 'replication_status': fields.ReplicationStatus.FAILED_OVER }, 'volume_id': '1b1cf149-219c-44ac-aee3-13121a7f86a7' } ] self.assertEqual(expected_updates, volume_updates) calls = [] for snap in REPLICATED_VOLUME_SNAPS: vol_name = snap['name'].split('.')[-1] calls.append(mock.call( with_default_protection=False, names=[vol_name], volume=mock_vol(), overwrite=True )) target_array.post_volumes.assert_has_calls(calls, any_order=True) @mock.patch(BASE_DRIVER_OBJ + '._find_sync_failover_target') def test_failover_sync(self, mock_find_failover_target): secondary_device_id = 'foo' mock_secondary = mock.MagicMock() mock_secondary.backend_id = secondary_device_id mock_secondary.replication_type = 'sync' self.driver._replication_target_arrays = [mock_secondary] mock_find_failover_target.return_value = mock_secondary rpod = 'cinder-pod::volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a-cinder' repvol = deepcopy(REPLICATED_VOLUME_SNAPS[1]) repvol['name'] = rpod volout = ValidResponse(200, None, 1, [DotNotation(REPLICATED_VOLUME_SNAPS[0]), DotNotation(repvol), DotNotation(REPLICATED_VOLUME_SNAPS[2])], {}) context = mock.MagicMock() sync_repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } sync_replicated_vol, sync_replicated_vol_name = self.new_fake_vol( type_extra_specs=sync_repl_extra_specs, spec={'id': fake.VOLUME_ID} ) async_repl_extra_specs = { 'replication_type': ' async', 'replication_enabled': ' true', } async_replicated_vol, _ = self.new_fake_vol( type_extra_specs=async_repl_extra_specs, spec={'id': fake.VOLUME2_ID} ) not_replicated_vol, _ = self.new_fake_vol( spec={'id': fake.VOLUME3_ID} ) not_replicated_vol2, _ = self.new_fake_vol( spec={'id': fake.VOLUME4_ID} ) mock_secondary.get_connections.return_value = [ {"name": sync_replicated_vol_name} ] mock_secondary.get_volumes.return_value = volout new_active_id, volume_updates, __ = self.driver.failover( context, [ not_replicated_vol, async_replicated_vol, sync_replicated_vol, not_replicated_vol2 ], None, [] ) self.assertEqual(secondary_device_id, new_active_id) # only expect the sync rep'd vol to make it through the failover expected_updates = [ { 'updates': { 'status': fields.VolumeStatus.ERROR }, 'volume_id': not_replicated_vol.id }, { 'updates': { 'status': fields.VolumeStatus.ERROR }, 'volume_id': async_replicated_vol.id }, { 'updates': { 'replication_status': fields.ReplicationStatus.FAILED_OVER }, 'volume_id': sync_replicated_vol.id }, { 'updates': { 'status': fields.VolumeStatus.ERROR }, 'volume_id': not_replicated_vol2.id }, ] self.assertEqual(expected_updates, volume_updates) @mock.patch(BASE_DRIVER_OBJ + '._get_flasharray') @mock.patch(BASE_DRIVER_OBJ + '._find_async_failover_target') def test_async_failover_error_propagates(self, mock_find_failover_target, mock_get_array): pgout = ValidResponse(200, None, 1, [DotNotation(REPLICATED_PGSNAPS[0]), DotNotation(REPLICATED_PGSNAPS[1]), DotNotation(REPLICATED_PGSNAPS[2])], {}) volout = ValidResponse(200, None, 1, [DotNotation(REPLICATED_VOLUME_SNAPS[0]), DotNotation(REPLICATED_VOLUME_SNAPS[1]), DotNotation(REPLICATED_VOLUME_SNAPS[2])], {}) mock_find_failover_target.return_value = ( self.async_array2, pgout.items[0] ) self.async_array2.get_volume_snapshots.return_value = volout array2 = mock.Mock() array2.array_name = GET_ARRAY_SECONDARY['name'] array2.array_id = GET_ARRAY_SECONDARY['id'] mock_get_array.return_value = array2 array2.get_volume.return_value = volout.items self.assert_error_propagates( [mock_find_failover_target, self.async_array2.get_volume_snapshots], self.driver.failover, mock.Mock(), REPLICATED_VOLUME_OBJS, None ) def test_disable_replication_success(self): vol, vol_name = self.new_fake_vol() mock_rsp = ValidResponse( 200, None, 1, [{"group": {"name": "cinder-group"}}], {}) self.array.get_protection_groups_volumes.return_value = mock_rsp self.driver._disable_async_replication(vol) self.array.delete_protection_groups_volumes.assert_called_with( group_names=[self.driver._replication_pg_name], member_names=[vol_name] ) def test_disable_replication_error_propagates(self): vol, _ = self.new_fake_vol() mock_rsp = ValidResponse( 200, None, 1, [{"group": {"name": "cinder-group"}}], {}) self.array.get_protection_groups_volumes.return_value = mock_rsp self.assert_error_propagates( [self.array.delete_protection_groups_volumes], self.driver._disable_async_replication, vol ) @mock.patch(DRIVER_PATH + ".LOG") def test_disable_replication_already_disabled(self, mock_logger): vol, vol_name = self.new_fake_vol() err_rsp = ErrorResponse(400, [DotNotation({'message': 'could not be found'})], {}) self.array.delete_protection_groups_volumes.return_value = err_rsp mock_rsp = ValidResponse( 200, None, 1, [{"group": {"name": "cinder-group"}}], {}) self.array.get_protection_groups_volumes.return_value = mock_rsp self.driver._disable_async_replication(vol) self.array.delete_protection_groups_volumes.assert_called_with( group_names=[self.driver._replication_pg_name], member_names=[vol_name] ) mock_logger.warning.\ assert_called_with('Disable replication on volume failed:' ' already disabled: %s', 'could not be found') def test_get_flasharray_verify_https(self): san_ip = '1.2.3.4' api_token = 'abcdef' cert_path = '/my/ssl/certs' self.flasharray.Client.return_value = mock.MagicMock() self.flasharray.Client().get_arrays.return_value = \ self.fake_get_array() self.driver._get_flasharray(san_ip, api_token, verify_ssl=True, ssl_cert_path=cert_path) self.flasharray.Client.assert_called_with( target=san_ip, api_token=api_token, verify_ssl=True, ssl_cert=cert_path, user_agent=self.driver._user_agent, ) def test_get_wwn(self): vol = {'created': '2019-01-28T14:16:54Z', 'name': 'volume-fdc9892f-5af0-47c8-9d4a-5167ac29dc98-cinder', 'serial': '9714B5CB91634C470002B2C8', 'size': 3221225472, 'source': 'volume-a366b1ba-ec27-4ca3-9051-c301b75bc778-cinder'} self.array.get_volumes.return_value = ValidResponse(200, None, 1, [DotNotation (vol)], {}) returned_wwn = self.driver._get_wwn(vol['name']) expected_wwn = '3624a93709714b5cb91634c470002b2c8' self.assertEqual(expected_wwn, returned_wwn) @mock.patch.object(qos_specs, "get_qos_specs") def test_get_qos_settings_from_specs_id(self, mock_get_qos_specs): ctxt = context.get_admin_context() qos = qos_specs.create(ctxt, "qos-iops-bws", QOS_IOPS_BWS) mock_get_qos_specs.return_value = qos voltype = fake_volume.fake_volume_type_obj(mock.MagicMock()) voltype.qos_specs_id = qos.id voltype.extra_specs = QOS_IOPS_BWS_2 # test override extra_specs specs = self.driver._get_qos_settings(voltype) self.assertEqual(specs["maxIOPS"], int(QOS_IOPS_BWS["maxIOPS"])) self.assertEqual(specs["maxBWS"], int(QOS_IOPS_BWS["maxBWS"]) * 1024 * 1024) def test_get_qos_settings_from_extra_specs(self): voltype = fake_volume.fake_volume_type_obj(mock.MagicMock()) voltype.extra_specs = QOS_IOPS_BWS specs = self.driver._get_qos_settings(voltype) self.assertEqual(specs["maxIOPS"], int(QOS_IOPS_BWS["maxIOPS"])) self.assertEqual(specs["maxBWS"], int(QOS_IOPS_BWS["maxBWS"]) * 1024 * 1024) def test_get_qos_settings_set_zeros(self): voltype = fake_volume.fake_volume_type_obj(mock.MagicMock()) voltype.extra_specs = QOS_ZEROS specs = self.driver._get_qos_settings(voltype) self.assertEqual(specs["maxIOPS"], 0) self.assertEqual(specs["maxBWS"], 0) def test_get_qos_settings_set_one(self): voltype = fake_volume.fake_volume_type_obj(mock.MagicMock()) voltype.extra_specs = QOS_IOPS specs = self.driver._get_qos_settings(voltype) self.assertEqual(specs["maxIOPS"], int(QOS_IOPS["maxIOPS"])) self.assertEqual(specs["maxBWS"], 0) voltype.extra_specs = QOS_BWS specs = self.driver._get_qos_settings(voltype) self.assertEqual(specs["maxIOPS"], 0) self.assertEqual(specs["maxBWS"], int(QOS_BWS["maxBWS"]) * 1024 * 1024) def test_get_qos_settings_invalid(self): voltype = fake_volume.fake_volume_type_obj(mock.MagicMock()) voltype.extra_specs = QOS_INVALID self.assertRaises(exception.InvalidQoSSpecs, self.driver._get_qos_settings, voltype) @ddt.data( { "qos_name": "qos-iops-bws", "qos_spec": dict(QOS_IOPS_BWS), "qos_data": {"iops_limit": '100', "bandwidth_limit": '1048576'} }, { "qos_name": "qos-iops", "qos_spec": dict(QOS_IOPS), "qos_data": {"iops_limit": '100'} }, { "qos_name": "qos-bws", "qos_spec": dict(QOS_BWS), "qos_data": {"bandwidth_limit": '1048576'} }, ) @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") @mock.patch(BASE_DRIVER_OBJ + "._get_replication_type_from_vol_type") @mock.patch.object(qos_specs, "get_qos_specs") @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_with_qos(self, qos_info, mock_get_volume_type, mock_get_qos_specs, mock_get_repl_type, mock_add_to_group, mock_create_favol): ctxt = context.get_admin_context() qos = qos_specs.create(ctxt, qos_info["qos_name"], qos_info["qos_spec"]) qos_data = self.flasharray.Qos(**qos_info["qos_data"]) vol, vol_name = self.new_fake_vol(spec={"size": 1}, type_qos_specs_id=qos.id) mock_data = self.flasharray.VolumePost(names=[vol_name], provisioned=vol["size"], qos=qos_data) mock_get_volume_type.return_value = vol.volume_type mock_get_qos_specs.return_value = qos mock_get_repl_type.return_value = None self.driver.create_volume(vol) self.array.post_volumes.\ assert_called_with(names=[vol_name], with_default_protection=False, volume=mock_data) mock_add_to_group.assert_called_with(vol, vol_name) self.assert_error_propagates([mock_create_favol], self.driver.create_volume, vol) @mock.patch(BASE_DRIVER_OBJ + ".set_qos") @mock.patch(DRIVER_PATH + ".flasharray.VolumePost") @mock.patch(BASE_DRIVER_OBJ + "._add_to_group_if_needed") @mock.patch(BASE_DRIVER_OBJ + "._get_replication_type_from_vol_type") @mock.patch.object(qos_specs, "get_qos_specs") @mock.patch.object(volume_types, 'get_volume_type') def test_create_volume_from_snapshot_with_qos(self, mock_get_volume_type, mock_get_qos_specs, mock_get_repl_type, mock_add_to_group, mock_fa, mock_qos): ctxt = context.get_admin_context() srcvol, _ = self.new_fake_vol() snap = fake_snapshot.fake_snapshot_obj(mock.MagicMock(), volume=srcvol) qos = qos_specs.create(ctxt, "qos-iops-bws", QOS_IOPS_BWS) vol, vol_name = self.new_fake_vol(set_provider_id=False, type_qos_specs_id=qos.id) mock_data = self.array.flasharray.VolumePost(names=[vol_name], source=pure. flasharray. Reference(name=vol_name), name=vol_name, qos={'maxIOPS': 100, 'maxBWS': 1048576, 'maxBWS': 1048576, 'maxIOPS_per_GB': 0, 'maxBWS_per_GB': 0}) mock_fa.return_value = mock_data mock_get_volume_type.return_value = vol.volume_type mock_get_qos_specs.return_value = qos mock_get_repl_type.return_value = None self.driver.create_volume_from_snapshot(vol, snap) self.array.post_volumes.assert_called_with(names=[vol_name], with_default_protection= False, volume=mock_data) self.driver.set_qos.assert_called_with(self.array, vol_name, 1, {'maxIOPS': 100, 'maxBWS': 1048576, 'maxIOPS_per_GB': 0, 'maxBWS_per_GB': 0}) self.assertFalse(self.array.extend_volume.called) mock_add_to_group.assert_called_once_with(vol, vol_name) self.assert_error_propagates( [self.array.post_volumes], self.driver.create_volume_from_snapshot, vol, snap) self.assertFalse(self.array.extend_volume.called) @mock.patch(BASE_DRIVER_OBJ + ".set_qos") @mock.patch.object(qos_specs, "get_qos_specs") @mock.patch.object(volume_types, 'get_volume_type') def test_manage_existing_with_qos(self, mock_get_volume_type, mock_get_qos_specs, mock_qos): ctxt = context.get_admin_context() ref_name = 'vol1' volume_ref = {'source-name': ref_name} qos = qos_specs.create(ctxt, "qos-iops-bws", QOS_IOPS_BWS) vol, vol_name = self.new_fake_vol(set_provider_id=False, type_qos_specs_id=qos.id) mock_get_volume_type.return_value = vol.volume_type mock_get_qos_specs.return_value = qos self.array.get_connections.return_value = [] self.array.get_volumes.return_value = MPV pg_rsp = ValidResponse( 200, None, 1, [{"group": {"name": "tstpg"}}], {}) self.array.get_protection_groups_volumes.return_value = pg_rsp tpg_rsp = ValidResponse( 200, None, 1, [DotNotation({"target_count": 2})], {}) self.array.get_protection_groups.return_value = tpg_rsp self.driver.manage_existing(vol, volume_ref) mock_qos.assert_called_with(self.array, vol_name, 3, {'maxIOPS': 100, 'maxBWS': 1048576, 'maxIOPS_per_GB': 0, 'maxBWS_per_GB': 0}) @mock.patch(DRIVER_PATH + ".flasharray.VolumePatch") def test_retype_qos(self, mock_fa): ctxt = context.get_admin_context() vol, vol_name = self.new_fake_vol() qos = qos_specs.create(ctxt, "qos-iops-bws", QOS_IOPS_BWS) new_type = fake_volume.fake_volume_type_obj(ctxt) new_type.qos_specs_id = qos.id mock_data = self.array.flasharray.\ VolumePatch(qos=self.flasharray. Qos(iops_limit=int(QOS_IOPS_BWS["maxIOPS"]), bandwidth_limit=int(QOS_IOPS_BWS["maxBWS"]) * 1024 * 1024)) mock_fa.return_value = mock_data get_voltype = "cinder.objects.volume_type.VolumeType.get_by_name_or_id" with mock.patch(get_voltype) as mock_get_vol_type: mock_get_vol_type.return_value = new_type self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) did_retype, model_update = self.driver.retype( ctxt, vol, new_type, None, # ignored by driver None, # ignored by driver ) self.array.patch_volumes.assert_called_with( names=[vol_name], volume=mock_data) self.assertTrue(did_retype) self.assertIsNone(model_update) @mock.patch(DRIVER_PATH + ".flasharray.VolumePatch") def test_retype_qos_reset_iops(self, mock_fa): ctxt = context.get_admin_context() vol, vol_name = self.new_fake_vol() new_type = fake_volume.fake_volume_type_obj(ctxt) mock_data = self.array.flasharray.\ VolumePatch(qos=self.flasharray.Qos(iops_limit='', bandwidth_limit='')) mock_fa.return_value = mock_data get_voltype = "cinder.objects.volume_type.VolumeType.get_by_name_or_id" with mock.patch(get_voltype) as mock_get_vol_type: mock_get_vol_type.return_value = new_type self.driver._get_volume_type_extra_spec = mock.Mock( return_value={}) did_retype, model_update = self.driver.retype( ctxt, vol, new_type, None, # ignored by driver None, # ignored by driver ) self.array.patch_volumes.assert_called_with( names=[vol_name], volume=mock_data) self.assertTrue(did_retype) self.assertIsNone(model_update) class PureISCSIDriverTestCase(PureBaseSharedDriverTestCase): def setUp(self): super(PureISCSIDriverTestCase, self).setUp() self.mock_config.use_chap_auth = False self.driver = pure.PureISCSIDriver(configuration=self.mock_config) self.driver._array = self.array self.mock_object(self.driver, '_get_current_array', return_value=self.array) self.driver._storage_protocol = 'iSCSI' self.mock_utils = mock.Mock() self.driver.driver_utils = self.mock_utils self.set_pure_hosts = ValidResponse(200, None, 1, [DotNotation(PURE_HOST.copy())], {}) def test_get_host(self): good_host = PURE_HOST.copy() good_host.update(iqns=INITIATOR_IQN) pure_bad_host = ValidResponse(200, None, 1, [], {}) pure_good_host = ValidResponse(200, None, 1, [DotNotation(good_host)], {}) self.array.get_hosts.return_value = pure_bad_host real_result = self.driver._get_host(self.array, ISCSI_CONNECTOR) self.assertEqual([], real_result) self.array.get_hosts.return_value = pure_good_host real_result = self.driver._get_host(self.array, ISCSI_CONNECTOR) self.assertEqual([good_host], real_result) self.assert_error_propagates([self.array.get_hosts], self.driver._get_host, self.array, ISCSI_CONNECTOR) @mock.patch(ISCSI_DRIVER_OBJ + "._get_wwn") @mock.patch(ISCSI_DRIVER_OBJ + "._connect") @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection(self, mock_attachments, mock_get_iscsi_ports, mock_connection, mock_get_wwn): vol, vol_name = self.new_fake_vol() mock_attachments.return_value = "Data", None mock_get_iscsi_ports.return_value = VALID_ISCSI_PORTS.items mock_get_wwn.return_value = '3624a93709714b5cb91634c470002b2c8' mock_connection.return_value = CONN.items result = deepcopy(ISCSI_CONNECTION_INFO) real_result = self.driver.initialize_connection(vol, ISCSI_CONNECTOR) self.assertDictEqual(result, real_result) mock_get_iscsi_ports.assert_called_with(self.array) mock_connection.assert_called_with(self.array, vol_name, ISCSI_CONNECTOR, None, None) self.assert_error_propagates([mock_get_iscsi_ports, mock_connection], self.driver.initialize_connection, vol, ISCSI_CONNECTOR) @mock.patch(ISCSI_DRIVER_OBJ + "._get_wwn") @mock.patch(ISCSI_DRIVER_OBJ + "._connect") @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_ipv6(self, mock_attachments, mock_get_iscsi_ports, mock_connection, mock_get_wwn): vol, vol_name = self.new_fake_vol() mock_attachments.return_value = "Data", None mock_get_iscsi_ports.return_value = VALID_ISCSI_PORTS_IPV6.items mock_get_wwn.return_value = '3624a93709714b5cb91634c470002b2c8' mock_connection.return_value = CONN.items self.mock_config.pure_iscsi_cidr = ISCSI_CIDR_V6 result = deepcopy(ISCSI_CONNECTION_INFO_V6) real_result = self.driver.initialize_connection(vol, ISCSI_CONNECTOR) self.assertDictEqual(result, real_result) mock_get_iscsi_ports.assert_called_with(self.array) mock_connection.assert_called_with(self.array, vol_name, ISCSI_CONNECTOR, None, None) self.assert_error_propagates([mock_get_iscsi_ports, mock_connection], self.driver.initialize_connection, vol, ISCSI_CONNECTOR) @mock.patch(ISCSI_DRIVER_OBJ + "._get_wwn") @mock.patch(ISCSI_DRIVER_OBJ + "._connect") @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_uniform_ac(self, mock_attachments, mock_get_iscsi_ports, mock_connection, mock_get_wwn): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) mock_attachments.return_value = "Data", None mock_get_wwn.return_value = '3624a93709714b5cb91634c470002b2c8' result = deepcopy(ISCSI_CONNECTION_INFO_AC) self.driver._is_active_cluster_enabled = True mock_secondary = mock.MagicMock() mock_connection.side_effect = lambda *args, **kwargs: \ CONN.items if args and args[0] == self.array else AC_CONN.items mock_get_iscsi_ports.side_effect = lambda *args, **kwargs: \ VALID_ISCSI_PORTS.items if args and args[0] == self.array \ else VALID_AC_ISCSI_PORTS.items self.driver._uniform_active_cluster_target_arrays = [mock_secondary] real_result = self.driver.initialize_connection(vol, ISCSI_CONNECTOR) self.assertDictEqual(result, real_result) mock_get_iscsi_ports.assert_has_calls([ mock.call(self.array), mock.call(mock_secondary), ]) mock_connection.assert_has_calls([ mock.call(self.array, vol_name, ISCSI_CONNECTOR, None, None), mock.call(mock_secondary, vol_name, ISCSI_CONNECTOR, None, None), ]) @mock.patch(ISCSI_DRIVER_OBJ + "._get_wwn") @mock.patch(ISCSI_DRIVER_OBJ + "._connect") @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_uniform_ac_cidr(self, mock_attachments, mock_get_iscsi_ports, mock_connection, mock_get_wwn): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) mock_attachments.return_value = "Data", None mock_get_wwn.return_value = '3624a93709714b5cb91634c470002b2c8' result = deepcopy(ISCSI_CONNECTION_INFO_AC_FILTERED) self.driver._is_active_cluster_enabled = True # Set up some CIDRs to block: this will block only one of the # ActiveCluster addresses from above, so we should check that we only # get four+three results back self.driver.configuration.pure_iscsi_cidr = ISCSI_CIDR_FILTERED mock_secondary = mock.MagicMock() mock_connection.side_effect = lambda *args, **kwargs: \ CONN.items if args and args[0] == self.array else AC_CONN.items mock_get_iscsi_ports.side_effect = lambda *args, **kwargs: \ VALID_ISCSI_PORTS.items if args and args[0] == self.array \ else VALID_AC_ISCSI_PORTS.items self.driver._uniform_active_cluster_target_arrays = [mock_secondary] real_result = self.driver.initialize_connection(vol, ISCSI_CONNECTOR) self.assertDictEqual(result, real_result) mock_get_iscsi_ports.assert_has_calls([ mock.call(self.array), mock.call(mock_secondary), ]) mock_connection.assert_has_calls([ mock.call(self.array, vol_name, ISCSI_CONNECTOR, None, None), mock.call(mock_secondary, vol_name, ISCSI_CONNECTOR, None, None), ]) @mock.patch(ISCSI_DRIVER_OBJ + "._get_wwn") @mock.patch(ISCSI_DRIVER_OBJ + "._connect") @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_uniform_ac_cidrs(self, mock_attachments, mock_get_iscsi_ports, mock_connection, mock_get_wwn): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) mock_attachments.return_value = "Data", None mock_get_wwn.return_value = '3624a93709714b5cb91634c470002b2c8' result = deepcopy(ISCSI_CONNECTION_INFO_AC_FILTERED_LIST) self.driver._is_active_cluster_enabled = True # Set up some CIDRs to block: this will allow only 2 addresses from # each host of the ActiveCluster, so we should check that we only # get two+two results back self.driver.configuration.pure_iscsi_cidr_list = ISCSI_CIDRS_FILTERED mock_secondary = mock.MagicMock() mock_connection.side_effect = lambda *args, **kwargs: \ CONN.items if args and args[0] == self.array else AC_CONN.items mock_get_iscsi_ports.side_effect = lambda *args, **kwargs: \ VALID_ISCSI_PORTS.items if args and args[0] == self.array \ else VALID_AC_ISCSI_PORTS_IPV6.items self.driver._uniform_active_cluster_target_arrays = [mock_secondary] real_result = self.driver.initialize_connection(vol, ISCSI_CONNECTOR) self.assertDictEqual(result, real_result) mock_get_iscsi_ports.assert_has_calls([ mock.call(self.array), mock.call(mock_secondary), ]) mock_connection.assert_has_calls([ mock.call(self.array, vol_name, ISCSI_CONNECTOR, None, None), mock.call(mock_secondary, vol_name, ISCSI_CONNECTOR, None, None), ]) @mock.patch(ISCSI_DRIVER_OBJ + "._get_wwn") @mock.patch(ISCSI_DRIVER_OBJ + "._get_chap_credentials") @mock.patch(ISCSI_DRIVER_OBJ + "._connect") @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_with_auth(self, mock_attachments, mock_get_iscsi_ports, mock_connection, mock_get_chap_creds, mock_get_wwn): vol, vol_name = self.new_fake_vol() self.maxDiff = None auth_type = "CHAP" chap_username = ISCSI_CONNECTOR["host"] chap_password = "password" mock_attachments.return_value = "Data", None mock_get_iscsi_ports.return_value = VALID_ISCSI_PORTS.items mock_get_wwn.return_value = '3624a93709714b5cb91634c470002b2c8' mock_connection.return_value = CONN.items result = deepcopy(ISCSI_CONNECTION_INFO) result["data"]["auth_method"] = auth_type result["data"]["auth_username"] = chap_username result["data"]["auth_password"] = chap_password self.mock_config.use_chap_auth = True mock_get_chap_creds.return_value = (chap_username, chap_password) # Branch where no credentials were generated real_result = self.driver.initialize_connection(vol, ISCSI_CONNECTOR) mock_connection.assert_called_with(self.array, vol_name, ISCSI_CONNECTOR, chap_username, chap_password) self.assertDictEqual(result, real_result) self.assert_error_propagates([mock_get_iscsi_ports, mock_connection], self.driver.initialize_connection, vol, ISCSI_CONNECTOR) @mock.patch(ISCSI_DRIVER_OBJ + "._get_wwn") @mock.patch(ISCSI_DRIVER_OBJ + "._connect") @mock.patch(ISCSI_DRIVER_OBJ + "._get_target_iscsi_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_multipath(self, mock_attachments, mock_get_iscsi_ports, mock_connection, mock_get_wwn): vol, vol_name = self.new_fake_vol() mock_get_iscsi_ports.return_value = VALID_ISCSI_PORTS.items mock_attachments.return_value = "Data", None mock_get_wwn.return_value = '3624a93709714b5cb91634c470002b2c8' mock_connection.return_value = CONN.items multipath_connector = deepcopy(ISCSI_CONNECTOR) multipath_connector["multipath"] = True result = deepcopy(ISCSI_CONNECTION_INFO) real_result = self.driver.initialize_connection(vol, multipath_connector) self.assertDictEqual(result, real_result) mock_get_iscsi_ports.assert_called_with(self.array) mock_connection.assert_called_with(self.array, vol_name, multipath_connector, None, None) multipath_connector["multipath"] = False self.driver.initialize_connection(vol, multipath_connector) def test_get_target_iscsi_ports(self): self.array.get_controllers.return_value = CTRL_OBJ self.array.get_ports.return_value = VALID_ISCSI_PORTS self.array.get_network_interfaces.return_value = ValidResponse( 200, None, 1, [DotNotation(INTERFACES[1])], {}) ret = self.driver._get_target_iscsi_ports(self.array) self.assertEqual(ISCSI_PORTS[0:4], ret[0:4]) def test_get_target_iscsi_ports_with_iscsi_and_fc(self): self.array.get_controllers.return_value = CTRL_OBJ PORTS_DATA = [DotNotation(i) for i in ISCSI_PORTS_WITH] ifc_ports = ValidResponse(200, None, 1, PORTS_DATA, {}) self.array.get_ports.return_value = ifc_ports self.array.get_network_interfaces.return_value = ValidResponse( 200, None, 1, [DotNotation(INTERFACES[0])], {}) ret = self.driver._get_target_iscsi_ports(self.array) self.assertEqual(ISCSI_PORTS_WITH[0:9], ret[0:9]) def test_get_target_iscsi_ports_with_no_ports(self): # Should raise an exception if there are no ports self.array.get_controllers.return_value = CTRL_OBJ no_ports = ValidResponse(200, None, 1, [], {}) self.array.get_network_interfaces.return_value = ValidResponse( 200, None, 1, [], {}) self.array.get_ports.return_value = no_ports self.assertRaises(pure.PureDriverException, self.driver._get_target_iscsi_ports, self.array) def test_get_target_iscsi_ports_with_only_fc_ports(self): # Should raise an exception of there are no iscsi ports self.array.get_controllers.return_value = CTRL_OBJ PORTS_NOISCSI = [DotNotation(i) for i in PORTS_WITHOUT] self.array.get_network_interfaces.return_value = ValidResponse( 200, None, 1, [DotNotation(INTERFACES[3])], {}) self.array.get_ports.\ return_value = ValidResponse(200, None, 1, PORTS_NOISCSI, {}) self.assertRaises(pure.PureDriverException, self.driver._get_target_iscsi_ports, self.array) @mock.patch(DRIVER_PATH + ".flasharray.HostPatch") @mock.patch(DRIVER_PATH + ".flasharray.HostPost") @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) @mock.patch(ISCSI_DRIVER_OBJ + "._generate_purity_host_name", spec=True) def test_connect(self, mock_generate, mock_host, mock_post_host, mock_patch_host): vol, vol_name = self.new_fake_vol() # Branch where host already exists pure_hosts = ValidResponse(200, None, 1, [DotNotation(PURE_HOST.copy())], {}) mock_host.return_value = pure_hosts.items self.array.post_connections.return_value = CONN real_result = self.driver._connect(self.array, vol_name, ISCSI_CONNECTOR, None, None) self.assertEqual([CONNECTION_DATA], real_result) mock_host.assert_called_with(self.driver, self.array, ISCSI_CONNECTOR, remote=False) self.assertFalse(mock_generate.called) self.assertFalse(self.array.post_hosts.called) self.array.post_connections.\ assert_called_with(host_names=[PURE_HOST_NAME], volume_names=[vol_name]) # Branch where new host is created empty_hosts = ValidResponse(200, None, 1, [], {}) mock_host.return_value = empty_hosts.items mock_generate.return_value = PURE_HOST_NAME real_result = self.driver._connect(self.array, vol_name, ISCSI_CONNECTOR, None, None) mock_host.assert_called_with(self.driver, self.array, ISCSI_CONNECTOR, remote=False) mock_generate.assert_called_with(ISCSI_CONNECTOR) self.array.post_hosts.assert_called_with(names=[PURE_HOST_NAME], host=mock_post_host()) self.assertFalse(self.array.patch_hosts.called) self.assertEqual([CONNECTION_DATA], real_result) mock_generate.reset_mock() self.array.reset_mock() self.assert_error_propagates( [mock_host, mock_generate, self.array.post_connections, self.array.post_hosts], self.driver._connect, self.array, vol_name, ISCSI_CONNECTOR, None, None) self.mock_config.use_chap_auth = True chap_user = ISCSI_CONNECTOR["host"] chap_password = "sOmEseCr3t" # Branch where chap is used and credentials already exist real_result = self.driver._connect(self.array, vol_name, ISCSI_CONNECTOR, chap_user, chap_password) self.assertEqual([CONNECTION_DATA], real_result) self.array.patch_hosts.assert_called_with(names=[PURE_HOST_NAME], host=mock_patch_host()) self.array.reset_mock() self.mock_config.use_chap_auth = False self.mock_config.safe_get.return_value = 'oracle-vm-server' # Branch where personality is set self.driver._connect(self.array, vol_name, ISCSI_CONNECTOR, None, None) self.assertEqual([CONNECTION_DATA], real_result) self.array.patch_hosts.\ assert_called_with(names=[PURE_HOST_NAME], host=mock_patch_host( personality='oracle-vm-server')) @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected(self, mock_host, mock_logger): vol, vol_name = self.new_fake_vol() mock_host.return_value = self.set_pure_hosts.items self.array.get_connections.return_value = NCONN err_rsp = ErrorResponse(400, [DotNotation({'message': 'already exists'})], {}) self.array.post_connections.return_value = err_rsp self.array.get_volumes.return_value = MPV actual = self.driver._connect(self.array, vol_name, ISCSI_CONNECTOR, None, None) mock_logger.debug.\ assert_called_with('Volume connection already exists for Purity ' 'host with message: %s', 'already exists') self.assertEqual(NCONN.items, actual) self.assertTrue(self.array.post_connections.called) self.assertTrue(bool(self.array.get_connections)) @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected_list_hosts_empty(self, mock_host): vol, vol_name = self.new_fake_vol() mock_host.return_value = self.set_pure_hosts.items self.array.get_connections.return_value = CONN err_rsp = ErrorResponse(400, [DotNotation({'message': 'unknown'})], {}) self.array.post_connections.return_value = err_rsp self.assertRaises(pure.PureDriverException, self.driver._connect, self.array, vol_name, ISCSI_CONNECTOR, None, None) self.assertTrue(self.array.post_connections.called) self.assertTrue(bool(self.array.get_connections)) @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected_list_hosts_exception(self, mock_host): vol, vol_name = self.new_fake_vol() hosts = deepcopy(PURE_HOST) hosts['name'] = 'utest' pure_hosts = ValidResponse(200, None, 1, [DotNotation(hosts)], {}) mock_host.return_value = pure_hosts.items self.array.get_connections.return_value = CONN err_con = ErrorResponse(400, [DotNotation({'message': 'Unknown Error'})], {}) self.array.post_connections.return_value = err_con self.array.get_volumes.return_value = MPV self.assertRaises(pure.PureDriverException, self.driver._connect, self.array, vol_name, ISCSI_CONNECTOR, None, None) self.assertTrue(self.array.post_connections.called) self.assertTrue(bool(self.array.get_connections)) @mock.patch(ISCSI_DRIVER_OBJ + "._generate_purity_host_name", spec=True) @mock.patch(ISCSI_DRIVER_OBJ + "._get_chap_secret_from_init_data") @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_host_deleted(self, mock_host, mock_get_secret, mock_hname): vol, vol_name = self.new_fake_vol() empty_hosts = ValidResponse(200, None, 1, [], {}) mock_host.return_value = empty_hosts.items mock_hname.return_value = PURE_HOST_NAME self.mock_config.use_chap_auth = True mock_get_secret.return_value = 'abcdef' err_rsp = ErrorResponse(400, [DotNotation({'message': 'Host does not exist'})], {}) self.array.patch_hosts.return_value = err_rsp # Because we mocked out retry make sure we are raising the right # exception to allow for retries to happen. self.assertRaises(pure.PureRetryableException, self.driver._connect, self.array, vol_name, ISCSI_CONNECTOR, None, None) @mock.patch(ISCSI_DRIVER_OBJ + "._generate_purity_host_name", spec=True) @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_iqn_already_in_use(self, mock_host, mock_hname): vol, vol_name = self.new_fake_vol() empty_hosts = ValidResponse(200, None, 1, [], {}) mock_host.return_value = empty_hosts.items mock_hname.return_value = PURE_HOST_NAME err_iqn = ErrorResponse(400, [DotNotation({'message': 'already in use'})], {}) self.array.post_hosts.return_value = err_iqn # Because we mocked out retry make sure we are raising the right # exception to allow for retries to happen. self.assertRaises(pure.PureRetryableException, self.driver._connect, self.array, vol_name, ISCSI_CONNECTOR, None, None) @mock.patch(ISCSI_DRIVER_OBJ + "._generate_purity_host_name", spec=True) @mock.patch(ISCSI_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_create_host_already_exists(self, mock_host, mock_hname): vol, vol_name = self.new_fake_vol() mock_host.return_value = [] mock_hname.return_value = PURE_HOST_NAME err_iqn = ErrorResponse(400, [DotNotation({'message': 'already exists'})], {}) self.array.post_hosts.return_value = err_iqn # Because we mocked out retry make sure we are raising the right # exception to allow for retries to happen. self.assertRaises(pure.PureRetryableException, self.driver._connect, self.array, vol_name, ISCSI_CONNECTOR, None, None) @mock.patch(ISCSI_DRIVER_OBJ + "._generate_chap_secret") def test_get_chap_credentials_create_new(self, mock_generate_secret): self.mock_utils.get_driver_initiator_data.return_value = [] host = 'host1' expected_password = 'foo123' mock_generate_secret.return_value = expected_password self.mock_utils.insert_driver_initiator_data.return_value = True username, password = self.driver._get_chap_credentials(host, INITIATOR_IQN) self.assertEqual(host, username) self.assertEqual(expected_password, password) self.mock_utils.insert_driver_initiator_data.assert_called_once_with( INITIATOR_IQN, pure.CHAP_SECRET_KEY, expected_password ) @mock.patch(ISCSI_DRIVER_OBJ + "._generate_chap_secret") def test_get_chap_credentials_create_new_fail_to_set(self, mock_generate_secret): host = 'host1' expected_password = 'foo123' mock_generate_secret.return_value = 'badpassw0rd' self.mock_utils.insert_driver_initiator_data.return_value = False self.mock_utils.get_driver_initiator_data.side_effect = [ [], [{'key': pure.CHAP_SECRET_KEY, 'value': expected_password}], pure.PureDriverException(reason='this should never be hit'), ] username, password = self.driver._get_chap_credentials(host, INITIATOR_IQN) self.assertEqual(host, username) self.assertEqual(expected_password, password) class PureFCDriverTestCase(PureBaseSharedDriverTestCase): def setUp(self): super(PureFCDriverTestCase, self).setUp() self.driver = pure.PureFCDriver(configuration=self.mock_config) self.driver._storage_protocol = "FC" self.driver._array = self.array self.mock_object(self.driver, '_get_current_array', return_value=self.array) self.driver._lookup_service = mock.Mock() pure_hosts = ValidResponse(200, None, 1, [DotNotation(PURE_HOST.copy())], {}) def test_get_host(self): good_host = PURE_HOST.copy() good_host.update(wwn=["another-wrong-wwn", INITIATOR_WWN]) pure_bad_host = ValidResponse(200, None, 1, [], {}) pure_good_host = ValidResponse(200, None, 1, [DotNotation(good_host)], {}) self.array.get_hosts.return_value = pure_bad_host actual_result = self.driver._get_host(self.array, FC_CONNECTOR) self.assertEqual([], actual_result) self.array.get_hosts.return_value = pure_good_host actual_result = self.driver._get_host(self.array, FC_CONNECTOR) self.assertEqual([good_host], actual_result) self.assert_error_propagates([self.array.get_hosts], self.driver._get_host, self.array, FC_CONNECTOR) def test_get_host_uppercase_wwpn(self): expected_host = PURE_HOST.copy() expected_host['wwn'] = [INITIATOR_WWN] pure_hosts = ValidResponse(200, None, 1, [DotNotation(expected_host)], {}) self.array.get_hosts.return_value = pure_hosts connector = FC_CONNECTOR.copy() connector['wwpns'] = [wwpn.upper() for wwpn in FC_CONNECTOR['wwpns']] actual_result = self.driver._get_host(self.array, connector) self.assertEqual([expected_host], actual_result) @mock.patch(FC_DRIVER_OBJ + "._get_valid_ports") @mock.patch(FC_DRIVER_OBJ + "._get_wwn") @mock.patch(FC_DRIVER_OBJ + "._connect") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection(self, mock_attachments, mock_connection, mock_get_wwn, mock_ports): vol, vol_name = self.new_fake_vol() mock_attachments.return_value = "Data", None lookup_service = self.driver._lookup_service (lookup_service.get_device_mapping_from_network. return_value) = DEVICE_MAPPING mock_get_wwn.return_value = '3624a93709714b5cb91634c470002b2c8' self.array.get_connections.return_value = CONN.items mock_connection.return_value = CONN.items mock_ports.return_value = VALID_FC_PORTS.items actual_result = self.driver.initialize_connection(vol, FC_CONNECTOR) self.assertDictEqual(FC_CONNECTION_INFO, actual_result) @mock.patch(FC_DRIVER_OBJ + "._get_valid_ports") @mock.patch(FC_DRIVER_OBJ + "._get_wwn") @mock.patch(FC_DRIVER_OBJ + "._connect") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_uniform_ac(self, mock_attachments, mock_connection, mock_get_wwn, mock_ports): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) mock_attachments.return_value = "Data", None lookup_service = self.driver._lookup_service (lookup_service.get_device_mapping_from_network. return_value) = AC_DEVICE_MAPPING mock_get_wwn.return_value = '3624a93709714b5cb91634c470002b2c8' self.array.get_connections.return_value = CONN.items self.driver._is_active_cluster_enabled = True mock_secondary = mock.MagicMock() self.driver._uniform_active_cluster_target_arrays = [mock_secondary] mock_secondary.get_connections.return_value = AC_CONN.items mock_connection.side_effect = lambda *args, **kwargs: \ CONN.items if args and args[0] == self.array else AC_CONN.items mock_ports.side_effect = lambda *args, **kwargs: \ VALID_FC_PORTS.items if args and args[0] == self.array \ else VALID_AC_FC_PORTS.items actual_result = self.driver.initialize_connection(vol, FC_CONNECTOR) self.assertDictEqual(FC_CONNECTION_INFO_AC, actual_result) @mock.patch(DRIVER_PATH + ".flasharray.HostPatch") @mock.patch(DRIVER_PATH + ".flasharray.HostPost") @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) @mock.patch(FC_DRIVER_OBJ + "._generate_purity_host_name", spec=True) def test_connect(self, mock_generate, mock_host, mock_post_host, mock_patch_host): vol, vol_name = self.new_fake_vol() # Branch where host already exists pure_hosts = ValidResponse(200, None, 1, [DotNotation(PURE_HOST.copy())], {}) mock_host.return_value = pure_hosts.items self.array.get_connections.return_value = CONN self.array.post_connections.return_value = CONN real_result = self.driver._connect(self.array, vol_name, FC_CONNECTOR) self.assertEqual([CONNECTION_DATA], real_result) mock_host.assert_called_with(self.driver, self.array, FC_CONNECTOR, remote=False) self.assertFalse(mock_generate.called) self.assertFalse(self.array.create_host.called) self.array.post_connections.\ assert_called_with(host_names=[PURE_HOST_NAME], volume_names=[vol_name]) # Branch where new host is created empty_hosts = ValidResponse(200, None, 1, [], {}) mock_host.return_value = empty_hosts.items mock_generate.return_value = PURE_HOST_NAME real_result = self.driver._connect(self.array, vol_name, FC_CONNECTOR) mock_host.assert_called_with(self.driver, self.array, FC_CONNECTOR, remote=False) mock_generate.assert_called_with(FC_CONNECTOR) self.array.post_hosts.assert_called_with(names=[PURE_HOST_NAME], host=mock_post_host()) self.assertEqual([CONNECTION_DATA], real_result) mock_generate.reset_mock() self.array.reset_mock() self.assert_error_propagates( [mock_host, mock_generate, self.array.post_connections, self.array.post_hosts], self.driver._connect, self.array, vol_name, FC_CONNECTOR) self.mock_config.safe_get.return_value = 'oracle-vm-server' # Branch where personality is set self.driver._connect(self.array, vol_name, FC_CONNECTOR) self.assertEqual([CONNECTION_DATA], real_result) self.array.patch_hosts.\ assert_called_with(names=[PURE_HOST_NAME], host=mock_patch_host(personality= 'oracle-vm-server')) @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected(self, mock_host, mock_logger): vol, vol_name = self.new_fake_vol() hosts = deepcopy(PURE_HOST) hosts['name'] = 'utest' pure_hosts = ValidResponse(200, None, 1, [DotNotation(hosts)], {}) mock_host.return_value = pure_hosts.items vdict = {'id': '1e5177e7-95e5-4a0f-b170-e45f4b469f6a', 'name': 'volume-1e5177e7-95e5-4a0f-b170-e45f4b469f6a-cinder'} NCONNECTION_DATA = {'host': {'name': 'utest'}, 'host_group': {}, 'lun': 1, 'nsid': None, 'protocol_endpoint': {}, 'volume': vdict} NCONN = ValidResponse(200, None, 1, [DotNotation(NCONNECTION_DATA)], {}) self.array.get_connections.return_value = NCONN pure_vol_copy = deepcopy(MANAGEABLE_PURE_VOLS) MPV = ValidResponse(200, None, 3, [DotNotation(pure_vol_copy[0]), DotNotation(pure_vol_copy[1]), DotNotation(pure_vol_copy[2])], {}) self.array.get_volumes.return_value = MPV err_rsp = ErrorResponse(400, [DotNotation({'message': 'already exists'})], {}) self.array.post_connections.return_value = err_rsp actual = self.driver._connect(self.array, vol_name, FC_CONNECTOR) mock_logger.debug.\ assert_called_with('Volume connection already exists for Purity ' 'host with message: %s', 'already exists') self.assertEqual(NCONN.items, actual) self.assertTrue(self.array.post_connections.called) self.assertTrue(bool(self.array.get_connections)) @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected_list_hosts_empty(self, mock_host): vol, vol_name = self.new_fake_vol() pure_hosts = ValidResponse(200, None, 1, [DotNotation(PURE_HOST)], {}) mock_host.return_value = pure_hosts.items self.array.get_volumes.return_value = MPV err_rsp = ErrorResponse(400, [DotNotation({'message': 'unknown'})], {}) self.array.get_connections.return_value = CONN self.array.post_connections.return_value = err_rsp self.assertRaises(pure.PureDriverException, self.driver._connect, self.array, vol_name, FC_CONNECTOR) self.assertTrue(self.array.post_connections.called) self.assertTrue(bool(self.array.get_connections)) @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected_list_hosts_exception(self, mock_host): vol, vol_name = self.new_fake_vol() hosts = deepcopy(PURE_HOST) hosts['name'] = 'utest' pure_hosts = ValidResponse(200, None, 1, [DotNotation(hosts)], {}) mock_host.return_value = pure_hosts.items err_rsp = ErrorResponse(400, [DotNotation({'message': 'Unknown Error'})], {}) self.array.get_connections.return_value = CONN self.array.post_connections.return_value = err_rsp self.assertRaises(pure.PureDriverException, self.driver._connect, self.array, vol_name, FC_CONNECTOR) self.assertTrue(self.array.post_connections.called) self.assertTrue(bool(self.array.get_connections)) @mock.patch(FC_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_wwn_already_in_use(self, mock_host): vol, vol_name = self.new_fake_vol() mock_host.return_value = [] err_rsp = ErrorResponse(400, [DotNotation({'message': 'already in use'})], {}) self.array.post_hosts.return_value = err_rsp # Because we mocked out retry make sure we are raising the right # exception to allow for retries to happen. self.assertRaises(pure.PureRetryableException, self.driver._connect, self.array, vol_name, FC_CONNECTOR) @mock.patch(FC_DRIVER_OBJ + "._disconnect") def test_terminate_connection_uniform_ac(self, mock_disconnect): repl_extra_specs = { 'replication_type': ' sync', 'replication_enabled': ' true', } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) fcls = self.driver._lookup_service fcls.get_device_mapping_from_network.return_value = AC_DEVICE_MAPPING self.driver._is_active_cluster_enabled = True mock_secondary = mock.MagicMock() self.driver._uniform_active_cluster_target_arrays = [mock_secondary] self.array.get_ports.return_value = FC_PORTS mock_secondary.list_ports.return_value = AC_FC_PORTS mock_disconnect.return_value = False self.driver.terminate_connection(vol, FC_CONNECTOR) mock_disconnect.assert_has_calls([ mock.call(mock_secondary, vol, FC_CONNECTOR, is_multiattach=False, remove_remote_hosts=True), mock.call(self.array, vol, FC_CONNECTOR, is_multiattach=False, remove_remote_hosts=False) ]) @ddt.ddt class PureVolumeUpdateStatsTestCase(PureBaseSharedDriverTestCase): def setUp(self): super(PureVolumeUpdateStatsTestCase, self).setUp() self.array.get_arrays.side_effect = self.fake_get_array @ddt.data(dict(reduction=10, config_ratio=5, expected_ratio=5, auto=False), dict(reduction=10, config_ratio=5, expected_ratio=10, auto=True), dict(reduction=1000, config_ratio=5, expected_ratio=5, auto=True)) @ddt.unpack def test_get_thin_provisioning(self, reduction, config_ratio, expected_ratio, auto): self.mock_object(volume_utils, 'get_max_over_subscription_ratio', return_value=expected_ratio) self.mock_config.pure_automatic_max_oversubscription_ratio = auto self.mock_config.max_over_subscription_ratio = config_ratio actual_ratio = self.driver._get_thin_provisioning(reduction) self.assertEqual(expected_ratio, actual_ratio) @ddt.data( dict( connections=[ {'status': 'connected', 'type': 'sync-replication'}, ], expected='sync'), dict( connections=[ {'status': 'connected', 'type': 'async-replication'} ], expected='async'), dict( connections=[ {'status': 'connected', 'type': 'async-replication'}, {'status': 'connected', 'type': 'sync-replication'}, {'status': 'connected', 'type': 'async-replication'} ], expected='trisync'), dict( connections=[ {'status': 'connected', 'type': 'async-replication'}, {'status': 'connected', 'type': 'async-replication'} ], expected='async'), dict( connections=[ {'status': 'connected', 'type': 'sync-replication'}, {'status': 'connected', 'type': 'sync-replication'} ], expected='sync'), dict( connections=[ {'status': 'connected', 'type': 'sync-replication'}, {'status': 'connected', 'type': 'async-replication'} ], expected='trisync'), dict( connections=[ {'status': 'connecting', 'type': 'sync-replication'} ], expected=None)) @ddt.unpack def test_get_replication_capability(self, connections, expected): clist = [DotNotation(connections[i]) for i in range(len(connections))] con_obj = ValidResponse(200, None, 1, clist, {}) self.array.get_array_connections.return_value = con_obj connection_status = self.driver._get_replication_capability() self.assertEqual(expected, connection_status) @mock.patch(BASE_DRIVER_OBJ + '._get_replication_capability') @mock.patch(BASE_DRIVER_OBJ + '.get_goodness_function') @mock.patch(BASE_DRIVER_OBJ + '.get_filter_function') @mock.patch(BASE_DRIVER_OBJ + '._get_thin_provisioning') def test_get_volume_stats(self, mock_get_thin_provisioning, mock_get_filter, mock_get_goodness, mock_get_replication_capability): filter_function = 'capabilities.total_volumes < 10' goodness_function = '90' reserved_percentage = 12 SPACE_OBJ = ValidResponse(200, None, 1, [DotNotation(ARRAYS_SPACE_INFO)], {}) PERF_OBJ = ValidResponse(200, None, 1, [DotNotation(PERF_INFO)], {}) self.array.get_arrays_space.return_value = SPACE_OBJ self.array.get_arrays_performance.return_value = PERF_OBJ self.array.current_array.version.return_value = "6.2.0" pure_hosts = ValidResponse(200, None, 1, [DotNotation(PURE_HOST)], {}) self.array.get_hosts.return_value = pure_hosts self.array.get_volumes.return_value = MPV self.array.get_volume_snapshots.return_value = MPS pg = ValidResponse(200, None, 1, [DotNotation(PURE_PGROUP)], {}) self.array.get_protection_groups.return_value = \ pg self.mock_config.reserved_percentage = reserved_percentage mock_get_filter.return_value = filter_function mock_get_goodness.return_value = goodness_function mock_get_replication_capability.return_value = 'sync' mock_get_thin_provisioning.return_value = TOTAL_REDUCTION expected_result = { 'volume_backend_name': VOLUME_BACKEND_NAME, 'vendor_name': 'Pure Storage', 'driver_version': self.driver.VERSION, 'storage_protocol': None, 'consistencygroup_support': True, 'consistent_group_snapshot_enabled': True, 'consistent_group_replication_enabled': True, 'thin_provisioning_support': True, 'multiattach': True, 'QoS_support': True, 'total_capacity_gb': TOTAL_CAPACITY, 'free_capacity_gb': TOTAL_CAPACITY - USED_SPACE, 'reserved_percentage': reserved_percentage, 'provisioned_capacity': PROVISIONED_CAPACITY, 'max_over_subscription_ratio': TOTAL_REDUCTION, 'filter_function': filter_function, 'goodness_function': goodness_function, 'total_volumes': 3, 'total_snapshots': 3, 'total_hosts': 1, 'total_pgroups': 1, 'writes_per_sec': PERF_INFO['writes_per_sec'], 'reads_per_sec': PERF_INFO['reads_per_sec'], 'input_per_sec': PERF_INFO['input_per_sec'], 'output_per_sec': PERF_INFO['output_per_sec'], 'usec_per_read_op': PERF_INFO['usec_per_read_op'], 'usec_per_write_op': PERF_INFO['usec_per_write_op'], 'queue_depth': PERF_INFO['queue_depth'], 'queue_usec_per_mirrored_write_op': PERF_INFO[ 'queue_usec_per_mirrored_write_op' ], 'queue_usec_per_read_op': PERF_INFO['queue_usec_per_read_op'], 'queue_usec_per_write_op': PERF_INFO['queue_usec_per_write_op'], 'replication_capability': 'sync', 'replication_enabled': False, 'replication_type': [], 'replication_count': 0, 'replication_targets': [], } real_result = self.driver.get_volume_stats(refresh=True) self.assertDictEqual(expected_result, real_result) # Make sure when refresh=False we are using cached values and not # sending additional requests to the array. self.array.reset_mock() real_result = self.driver.get_volume_stats(refresh=False) self.assertDictEqual(expected_result, real_result) self.assertFalse(self.array.get_arrays.called) self.assertFalse(self.array.get_volumes.called) self.assertFalse(self.array.get_hosts.called) self.assertFalse(self.array.get_protection_groups.called) class PureVolumeGroupsTestCase(PureBaseSharedDriverTestCase): def setUp(self): super(PureVolumeGroupsTestCase, self).setUp() self.array.get_arrays.side_effect = self.fake_get_array self.ctxt = context.get_admin_context() self.driver.db = mock.Mock() self.driver.db.group_get = mock.Mock() @mock.patch(BASE_DRIVER_OBJ + '._add_volume_to_consistency_group') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_add_to_group_if_needed(self, mock_is_cg, mock_add_to_cg): mock_is_cg.return_value = False volume, vol_name = self.new_fake_vol() group, _ = self.new_fake_group() volume.group = group volume.group_id = group.id self.driver._add_to_group_if_needed(volume, vol_name) mock_is_cg.assert_called_once_with(group) mock_add_to_cg.assert_not_called() @mock.patch(BASE_DRIVER_OBJ + '._add_volume_to_consistency_group') @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_add_to_group_if_needed_with_cg(self, mock_is_cg, mock_add_to_cg): mock_is_cg.return_value = True volume, vol_name = self.new_fake_vol() group, _ = self.new_fake_group() volume.group = group volume.group_id = group.id self.driver._add_to_group_if_needed(volume, vol_name) mock_is_cg.assert_called_once_with(group) mock_add_to_cg.assert_called_once_with( group, vol_name ) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group(self, mock_is_cg): mock_is_cg.return_value = False group = fake_group.fake_group_type_obj(None) self.assertRaises( NotImplementedError, self.driver.create_group, self.ctxt, group ) mock_is_cg.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group(self, mock_is_cg): mock_is_cg.return_value = False group = mock.MagicMock() volumes = [fake_volume.fake_volume_obj(None)] self.assertRaises( NotImplementedError, self.driver.delete_group, self.ctxt, group, volumes ) mock_is_cg.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_update_group(self, mock_is_cg): mock_is_cg.return_value = False group = mock.MagicMock() self.assertRaises( NotImplementedError, self.driver.update_group, self.ctxt, group ) mock_is_cg.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_from_src(self, mock_is_cg): mock_is_cg.return_value = False group = mock.MagicMock() volumes = [fake_volume.fake_volume_obj(None)] self.assertRaises( NotImplementedError, self.driver.create_group_from_src, self.ctxt, group, volumes ) mock_is_cg.assert_called_once_with(group) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_create_group_snapshot(self, mock_is_cg): mock_is_cg.return_value = False group_snapshot = mock.MagicMock() snapshots = [fake_snapshot.fake_snapshot_obj(None)] self.assertRaises( NotImplementedError, self.driver.create_group_snapshot, self.ctxt, group_snapshot, snapshots ) mock_is_cg.assert_called_once_with(group_snapshot) @mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type') def test_delete_group_snapshot(self, mock_is_cg): mock_is_cg.return_value = False group_snapshot = mock.MagicMock() snapshots = [fake_snapshot.fake_snapshot_obj(None)] self.assertRaises( NotImplementedError, self.driver.create_group_snapshot, self.ctxt, group_snapshot, snapshots ) mock_is_cg.assert_called_once_with(group_snapshot) @mock.patch(BASE_DRIVER_OBJ + '.create_consistencygroup') @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_create_group_with_cg(self, mock_get_specs, mock_create_cg): self.driver._is_replication_enabled = True mock_get_specs.return_value = ' True' group = mock.MagicMock() self.driver.create_group(self.ctxt, group) mock_create_cg.assert_called_once_with(self.ctxt, group, None) self.driver._is_replication_enabled = False @mock.patch(BASE_DRIVER_OBJ + '.delete_consistencygroup') @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_delete_group_with_cg(self, mock_get_specs, mock_delete_cg): mock_get_specs.return_value = ' True' group = mock.MagicMock() volumes = [fake_volume.fake_volume_obj(None)] self.driver.delete_group(self.ctxt, group, volumes) mock_delete_cg.assert_called_once_with(self.ctxt, group, volumes) @mock.patch(BASE_DRIVER_OBJ + '.update_consistencygroup') @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_update_group_with_cg(self, mock_get_specs, mock_update_cg): mock_get_specs.return_value = ' True' group = mock.MagicMock() addvollist = [mock.Mock()] remvollist = [mock.Mock()] self.driver.update_group( self.ctxt, group, addvollist, remvollist ) mock_update_cg.assert_called_once_with( self.ctxt, group, addvollist, remvollist ) @mock.patch(BASE_DRIVER_OBJ + '.create_consistencygroup_from_src') @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_create_group_from_src_with_cg(self, mock_get_specs, mock_create): mock_get_specs.return_value = ' True' group = mock.MagicMock() volumes = [mock.Mock()] group_snapshot = mock.Mock() snapshots = [mock.Mock()] source_group = mock.MagicMock() source_vols = [mock.Mock()] group_type = True self.driver.create_group_from_src( self.ctxt, group, volumes, group_snapshot, snapshots, source_group, source_vols ) mock_create.assert_called_once_with( self.ctxt, group, volumes, group_snapshot, snapshots, source_group, source_vols, group_type ) @mock.patch(BASE_DRIVER_OBJ + '.create_cgsnapshot') @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_create_group_snapshot_with_cg(self, mock_get_specs, mock_create_cgsnap): mock_get_specs.return_value = ' True' group_snapshot = mock.MagicMock() snapshots = [mock.Mock()] self.driver.create_group_snapshot( self.ctxt, group_snapshot, snapshots ) mock_create_cgsnap.assert_called_once_with( self.ctxt, group_snapshot, snapshots ) @mock.patch(BASE_DRIVER_OBJ + '.delete_cgsnapshot') @mock.patch('cinder.volume.group_types.get_group_type_specs') def test_delete_group_snapshot_with_cg(self, mock_get_specs, mock_delete_cg): mock_get_specs.return_value = ' True' group_snapshot = mock.MagicMock() snapshots = [mock.Mock()] self.driver.delete_group_snapshot( self.ctxt, group_snapshot, snapshots ) mock_delete_cg.assert_called_once_with( self.ctxt, group_snapshot, snapshots ) class PureNVMEDriverTestCase(PureBaseSharedDriverTestCase): def setUp(self): super(PureNVMEDriverTestCase, self).setUp() self.driver = pure.PureNVMEDriver(configuration=self.mock_config) self.driver._array = self.array self.mock_object(self.driver, '_get_current_array', return_value=self.array) self.driver._storage_protocol = 'NVMe-RoCE' self.mock_utils = mock.Mock() self.driver.transport_type = "rdma" self.driver.driver_utils = self.mock_utils self.set_pure_hosts = ValidResponse(200, None, 1, [DotNotation(PURE_HOST.copy())], {}) def test_get_host(self): good_host = deepcopy(PURE_HOST) good_host.update(nqns=[INITIATOR_NQN]) bad_host = ValidResponse(200, None, 1, [], {}) self.array.get_hosts.return_value = bad_host real_result = self.driver._get_host(self.array, NVME_CONNECTOR) self.assertEqual([], real_result) hostg = ValidResponse(200, None, 1, [DotNotation(good_host)], {}) self.array.get_hosts.return_value = hostg real_result = self.driver._get_host(self.array, NVME_CONNECTOR) self.assertEqual([good_host], real_result) self.assert_error_propagates( [self.array.get_hosts], self.driver._get_host, self.array, NVME_CONNECTOR, ) def test_get_nguid(self): vol = {'created': '2019-01-28T14:16:54Z', 'name': 'volume-fdc9892f-5af0-47c8-9d4a-5167ac29dc98-cinder', 'serial': '9714B5CB91634C470002B2C8', 'size': 3221225472, 'source': 'volume-a366b1ba-ec27-4ca3-9051-c301b75bc778-cinder'} volumes_nguid = ValidResponse(200, None, 1, [DotNotation(vol)], {}) self.array.get_volumes.return_value = volumes_nguid returned_nguid = self.driver._get_nguid(vol['name']) expected_nguid = '009714b5cb91634c24a937470002b2c8' self.assertEqual(expected_nguid, returned_nguid) @mock.patch(NVME_DRIVER_OBJ + "._get_nguid") @mock.patch(NVME_DRIVER_OBJ + "._get_wwn") @mock.patch(NVME_DRIVER_OBJ + "._connect") @mock.patch(NVME_DRIVER_OBJ + "._get_target_nvme_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection( self, mock_attachments, mock_get_nvme_ports, mock_connection, mock_get_wwn, mock_get_nguid ): vol, vol_name = self.new_fake_vol() nvme_ports = ValidResponse(200, None, 4, [DotNotation(NVME_PORTS[x]) for x in range(8)], {}) mock_get_nvme_ports.return_value = nvme_ports.items mock_attachments.return_value = "Data", None mock_get_wwn.return_value = "3624a93709714b5cb91634c470002b2c8" mock_get_nguid.return_value = "0009714b5cb916324a9374c470002b2c8" mock_connection.return_value = CONN.items result = deepcopy(NVME_CONNECTION_INFO) real_result = self.driver.initialize_connection(vol, NVME_CONNECTOR) self.maxDiff = None self.assertDictEqual(result, real_result) mock_get_nvme_ports.assert_called_with(self.array) mock_connection.assert_called_with( self.array, vol_name, NVME_CONNECTOR ) self.assert_error_propagates( [mock_get_nvme_ports, mock_connection], self.driver.initialize_connection, vol, NVME_CONNECTOR, ) @mock.patch(NVME_DRIVER_OBJ + "._get_nguid") @mock.patch(NVME_DRIVER_OBJ + "._get_wwn") @mock.patch(NVME_DRIVER_OBJ + "._connect") @mock.patch(NVME_DRIVER_OBJ + "._get_target_nvme_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_ipv6( self, mock_attachments, mock_get_nvme_ports, mock_connection, mock_get_wwn, mock_get_nguid ): vol, vol_name = self.new_fake_vol() nvme_ports = ValidResponse(200, None, 4, [DotNotation(NVME_PORTS[x]) for x in range(8)], {}) mock_get_nvme_ports.return_value = nvme_ports.items mock_attachments.return_value = "Data", None mock_get_wwn.return_value = "3624a93709714b5cb91634c470002b2c8" mock_get_nguid.return_value = "0009714b5cb916324a9374c470002b2c8" mock_connection.return_value = CONN.items self.mock_config.pure_nvme_cidr = NVME_CIDR_V6 result = deepcopy(NVME_CONNECTION_INFO_V6) real_result = self.driver.initialize_connection(vol, NVME_CONNECTOR) self.maxDiff = None self.assertDictEqual(result, real_result) mock_get_nvme_ports.assert_called_with(self.array) mock_connection.assert_called_with( self.array, vol_name, NVME_CONNECTOR ) self.assert_error_propagates( [mock_get_nvme_ports, mock_connection], self.driver.initialize_connection, vol, NVME_CONNECTOR, ) @mock.patch(NVME_DRIVER_OBJ + "._get_nguid") @mock.patch(NVME_DRIVER_OBJ + "._get_wwn") @mock.patch(NVME_DRIVER_OBJ + "._connect") @mock.patch(NVME_DRIVER_OBJ + "._get_target_nvme_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_uniform_ac( self, mock_attachments, mock_get_nvme_ports, mock_connection, mock_get_wwn, mock_get_nguid ): repl_extra_specs = { "replication_type": " sync", "replication_enabled": " true", } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) nvme_p = ValidResponse(200, None, 8, [DotNotation(NVME_PORTS[x]) for x in range(8)], {}) ac_nvme_p = ValidResponse(200, None, 8, [DotNotation(AC_NVME_PORTS[x]) for x in range(8)], {}) mock_get_nvme_ports.side_effect = [nvme_p.items, ac_nvme_p.items] mock_attachments.return_value = "Data", None mock_get_wwn.return_value = "3624a93709714b5cb91634c470002b2c8" mock_get_nguid.return_value = "0009714b5cb916324a9374c470002b2c8" mock_connection.side_effect = lambda *args, **kwargs: \ CONN.items if args and args[0] == self.array else AC_CONN.items result = deepcopy(NVME_CONNECTION_INFO_AC) self.driver._is_active_cluster_enabled = True mock_secondary = mock.MagicMock() self.driver._uniform_active_cluster_target_arrays = [mock_secondary] real_result = self.driver.initialize_connection(vol, NVME_CONNECTOR) self.assertDictEqual(result, real_result) mock_get_nvme_ports.assert_has_calls( [ mock.call(self.array), mock.call(mock_secondary), ] ) mock_connection.assert_has_calls( [ mock.call(self.array, vol_name, NVME_CONNECTOR), mock.call( mock_secondary, vol_name, NVME_CONNECTOR), ] ) @mock.patch(NVME_DRIVER_OBJ + "._get_nguid") @mock.patch(NVME_DRIVER_OBJ + "._get_wwn") @mock.patch(NVME_DRIVER_OBJ + "._connect") @mock.patch(NVME_DRIVER_OBJ + "._get_target_nvme_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_uniform_ac_cidr( self, mock_attachments, mock_get_nvme_ports, mock_connection, mock_get_wwn, mock_get_nguid ): repl_extra_specs = { "replication_type": " sync", "replication_enabled": " true", } nvme_p = ValidResponse(200, None, 8, [DotNotation(NVME_PORTS[x]) for x in range(8)], {}) ac_nvme_p = ValidResponse(200, None, 8, [DotNotation(AC_NVME_PORTS[x]) for x in range(8)], {}) vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) mock_get_nvme_ports.side_effect = [nvme_p.items, ac_nvme_p.items] mock_attachments.return_value = "Data", None mock_get_wwn.return_value = "3624a93709714b5cb91634c470002b2c8" mock_get_nguid.return_value = "0009714b5cb916324a9374c470002b2c8" mock_connection.side_effect = lambda *args, **kwargs: \ CONN.items if args and args[0] == self.array else AC_CONN.items result = deepcopy(NVME_CONNECTION_INFO_AC_FILTERED) self.driver._is_active_cluster_enabled = True # Set up some CIDRs to block: this will block only one of the # get four+three results back self.driver.configuration.pure_nvme_cidr = NVME_CIDR_FILTERED mock_secondary = mock.MagicMock() self.driver._uniform_active_cluster_target_arrays = [mock_secondary] real_result = self.driver.initialize_connection(vol, NVME_CONNECTOR) self.assertDictEqual(result, real_result) mock_get_nvme_ports.assert_has_calls( [ mock.call(self.array), mock.call(mock_secondary), ] ) mock_connection.assert_has_calls( [ mock.call(self.array, vol_name, NVME_CONNECTOR), mock.call(mock_secondary, vol_name, NVME_CONNECTOR), ] ) @mock.patch(NVME_DRIVER_OBJ + "._get_nguid") @mock.patch(NVME_DRIVER_OBJ + "._get_wwn") @mock.patch(NVME_DRIVER_OBJ + "._connect") @mock.patch(NVME_DRIVER_OBJ + "._get_target_nvme_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_uniform_ac_cidrs( self, mock_attachments, mock_get_nvme_ports, mock_connection, mock_get_wwn, mock_get_nguid ): repl_extra_specs = { "replication_type": " sync", "replication_enabled": " true", } vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs) nvme_p = ValidResponse(200, None, 8, [DotNotation(NVME_PORTS[x]) for x in range(8)], {}) ac_nvme_p = ValidResponse(200, None, 8, [DotNotation(AC_NVME_PORTS[x]) for x in range(8)], {}) mock_get_nvme_ports.side_effect = [nvme_p.items, ac_nvme_p.items] mock_attachments.return_value = "Data", None mock_get_wwn.return_value = "3624a93709714b5cb91634c470002b2c8" mock_get_nguid.return_value = "0009714b5cb916324a9374c470002b2c8" mock_connection.side_effect = lambda *args, **kwargs: \ CONN.items if args and args[0] == self.array else AC_CONN.items result = deepcopy(NVME_CONNECTION_INFO_AC_FILTERED_LIST) self.driver._is_active_cluster_enabled = True # Set up some CIDRs to block: this will allow only 2 addresses from # each host of the ActiveCluster, so we should check that we only # get two+two results back self.driver.configuration.pure_nvme = NVME_CIDR self.driver.configuration.pure_nvme_cidr_list = NVME_CIDRS_FILTERED mock_secondary = mock.MagicMock() self.driver._uniform_active_cluster_target_arrays = [mock_secondary] real_result = self.driver.initialize_connection(vol, NVME_CONNECTOR) self.assertDictEqual(result, real_result) mock_get_nvme_ports.assert_has_calls( [ mock.call(self.array), mock.call(mock_secondary), ] ) mock_connection.assert_has_calls( [ mock.call(self.array, vol_name, NVME_CONNECTOR), mock.call(mock_secondary, vol_name, NVME_CONNECTOR), ] ) @mock.patch(NVME_DRIVER_OBJ + "._get_nguid") @mock.patch(NVME_DRIVER_OBJ + "._get_wwn") @mock.patch(NVME_DRIVER_OBJ + "._connect") @mock.patch(NVME_DRIVER_OBJ + "._get_target_nvme_ports") @mock.patch(BASE_DRIVER_OBJ + '._get_attachments') def test_initialize_connection_multipath( self, mock_attachments, mock_get_nvme_ports, mock_connection, mock_get_wwn, mock_get_nguid ): self.driver.configuration.pure_nvme_transport = "roce" vol, vol_name = self.new_fake_vol() nvme_ports = ValidResponse(200, None, 4, [DotNotation(NVME_PORTS[x]) for x in range(8)], {}) mock_get_nvme_ports.return_value = nvme_ports.items mock_attachments.return_value = "Data", None mock_get_wwn.return_value = "3624a93709714b5cb91634c470002b2c8" mock_get_nguid.return_value = "0009714b5cb916324a9374c470002b2c8" mock_connection.return_value = CONN.items multipath_connector = deepcopy(NVME_CONNECTOR) multipath_connector["multipath"] = True result = deepcopy(NVME_CONNECTION_INFO) real_result = self.driver.initialize_connection( vol, multipath_connector ) self.assertDictEqual(result, real_result) mock_get_nvme_ports.assert_called_with(self.array) mock_connection.assert_called_with( self.array, vol_name, multipath_connector ) multipath_connector["multipath"] = False self.driver.initialize_connection(vol, multipath_connector) def test_get_target_nvme_ports(self): ports = [{'name': 'CT0.ETH4', 'wwn': None, 'iqn': None, 'nqn': TARGET_NQN}, {'name': 'CT0.ETH5', 'wwn': None, 'iqn': TARGET_IQN, 'nqn': None}, {'name': 'CT0.ETH20', 'wwn': None, 'iqn': None, 'nqn': TARGET_NQN}, {'name': 'CT0.FC4', 'wwn': TARGET_WWN, 'iqn': None, 'nqn': TARGET_NQN}, {'name': 'LACP0', 'wwn': None, 'iqn': None, 'nqn': TARGET_NQN}, {'name': 'LACP1', 'wwn': None, 'iqn': None, 'nqn': TARGET_NQN}] # Test for the nvme-tcp port self.driver.configuration.pure_nvme_transport = "tcp" self.array.get_controllers.return_value = CTRL_OBJ nvme_interfaces = ValidResponse(200, None, 4, [DotNotation(INTERFACES[x]) for x in range(4)], {}) self.array.get_network_interfaces.return_value = nvme_interfaces nvme_ports = ValidResponse(200, None, 4, [DotNotation(ports[x]) for x in range(4)], {}) self.array.get_ports.return_value = nvme_ports ret = self.driver._get_target_nvme_ports(self.array) self.assertEqual([ports[0]], [ret[0]]) # Test for failure if no NVMe ports self.array.get_network_interfaces.return_value = nvme_interfaces non_nvme_ports = ValidResponse(200, None, 1, [DotNotation(ports[1])], {}) self.array.get_ports.return_value = non_nvme_ports self.assertRaises( pure.PureDriverException, self.driver._get_target_nvme_ports, self.array, ) # Test for the nvme-roce port self.driver.configuration.pure_nvme_transport = "roce" nvme_roce_interface = ValidResponse(200, None, 1, [DotNotation(INTERFACES[2])], {}) self.array.get_network_interfaces.return_value = nvme_roce_interface nvme_roce_ports = ValidResponse(200, None, 1, [DotNotation(ports[2])], {}) self.array.get_ports.return_value = nvme_roce_ports ret = self.driver._get_target_nvme_ports(self.array) self.assertEqual([ports[2]], [ret[0]]) # Test for the nvme-roce LACP port self.driver.configuration.pure_nvme_transport = "roce" nvme_roce_interface = ValidResponse(200, None, 1, [DotNotation(INTERFACES[4])], {}) self.array.get_network_interfaces.return_value = nvme_roce_interface nvme_roce_ports = ValidResponse(200, None, 1, [DotNotation(ports[4])], {}) self.array.get_ports.return_value = nvme_roce_ports ret = self.driver._get_target_nvme_ports(self.array) self.assertEqual([ports[4]], [ret[0]]) # Test for the nvme-tcp LACP port self.driver.configuration.pure_nvme_transport = "tcp" nvme_roce_interface = ValidResponse(200, None, 1, [DotNotation(INTERFACES[5])], {}) self.array.get_network_interfaces.return_value = nvme_roce_interface nvme_roce_ports = ValidResponse(200, None, 1, [DotNotation(ports[5])], {}) self.array.get_ports.return_value = nvme_roce_ports ret = self.driver._get_target_nvme_ports(self.array) self.assertEqual([ports[5]], [ret[0]]) # Test for empty dict if only nvme-fc port self.driver.configuration.pure_nvme_transport = "roce" nvme_fc_interface = ValidResponse(200, None, 1, [DotNotation(INTERFACES[3])], {}) self.array.get_network_interfaces.return_value = nvme_fc_interface nvme_fc_ports = ValidResponse(200, None, 1, [DotNotation(ports[3])], {}) self.array.get_ports.return_value = nvme_fc_ports ret = self.driver._get_target_nvme_ports(self.array) self.assertEqual([], ret) def test_get_target_nvme_ports_with_no_ports(self): # Should raise an exception if there are no ports self.array.get_controllers.return_value = CTRL_OBJ nvme_no_ports = ValidResponse(200, None, 1, [], {}) nvme_no_interfaces = ValidResponse(200, None, 1, [], {}) self.array.get_ports.return_value = nvme_no_ports self.array.get_network_interfaces.return_value = nvme_no_interfaces self.assertRaises( pure.PureDriverException, self.driver._get_target_nvme_ports, self.array, ) def test_get_target_nvme_ports_with_only_fc_ports(self): # Should raise an exception of there are no nvme ports self.array.get_controllers.return_value = CTRL_OBJ PORTS_NONVME = [DotNotation(i) for i in PORTS_WITHOUT] nvme_noports = ValidResponse(200, None, 1, PORTS_NONVME, {}) nvme_nointerfaces = ValidResponse(200, None, 1, [DotNotation(INTERFACES[3])], {}) self.array.get_ports.return_value = nvme_noports self.array.get_network_interfaces.return_value = nvme_nointerfaces self.assertRaises( pure.PureDriverException, self.driver._get_target_nvme_ports, self.array, ) @mock.patch(DRIVER_PATH + ".flasharray.HostPatch") @mock.patch(DRIVER_PATH + ".flasharray.HostPost") @mock.patch(NVME_DRIVER_OBJ + "._get_host", autospec=True) @mock.patch(NVME_DRIVER_OBJ + "._generate_purity_host_name", spec=True) def test_connect(self, mock_generate, mock_host, mock_post_host, mock_patch_host): vol, vol_name = self.new_fake_vol() # Branch where host already exists pure_hosts = ValidResponse(200, None, 1, [DotNotation(PURE_HOST.copy())], {}) mock_host.return_value = pure_hosts.items self.array.post_connections.return_value = CONN real_result = self.driver._connect( self.array, vol_name, NVME_CONNECTOR ) self.assertEqual([CONNECTION_DATA], real_result) mock_host.assert_called_with( self.driver, self.array, NVME_CONNECTOR, remote=False ) self.assertFalse(mock_generate.called) self.assertFalse(self.array.create_host.called) self.array.post_connections.\ assert_called_with(host_names=[PURE_HOST_NAME], volume_names=[vol_name]) # Branch where new host is created empty_hosts = ValidResponse(200, None, 1, [], {}) mock_host.return_value = empty_hosts.items mock_generate.return_value = PURE_HOST_NAME real_result = self.driver._connect( self.array, vol_name, NVME_CONNECTOR ) mock_host.assert_called_with( self.driver, self.array, NVME_CONNECTOR, remote=False ) mock_generate.assert_called_with(NVME_CONNECTOR) self.array.post_hosts.assert_called_with( names=[PURE_HOST_NAME], host=mock_post_host() ) self.assertFalse(self.array.set_host.called) self.assertEqual([CONNECTION_DATA], real_result) mock_generate.reset_mock() self.array.reset_mock() self.assert_error_propagates( [ mock_host, mock_generate, self.array.post_connections, self.array.post_hosts, ], self.driver._connect, self.array, vol_name, NVME_CONNECTOR, ) self.mock_config.safe_get.return_value = "oracle-vm-server" # Branch where personality is set self.driver._connect(self.array, vol_name, NVME_CONNECTOR) self.assertEqual([CONNECTION_DATA], real_result) self.array.patch_hosts.assert_called_with( names=[PURE_HOST_NAME], host=mock_patch_host() ) @mock.patch(DRIVER_PATH + ".LOG") @mock.patch(NVME_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected(self, mock_host, mock_logger): vol, vol_name = self.new_fake_vol() mock_host.return_value = self.set_pure_hosts.items self.array.get_connections.return_value = NCONN err_rsp = ErrorResponse(400, [DotNotation({'message': 'already exists'})], {}) self.array.post_connections.return_value = err_rsp self.array.get_volumes.return_value = MPV actual = self.driver._connect(self.array, vol_name, NVME_CONNECTOR) mock_logger.debug.\ assert_called_with('Volume connection already exists for Purity ' 'host with message: %s', 'already exists') self.assertEqual(NCONN.items, actual) self.assertTrue(self.array.post_connections.called) self.assertTrue(bool(self.array.get_connections)) @mock.patch(NVME_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected_list_hosts_empty(self, mock_host): vol, vol_name = self.new_fake_vol() mock_host.return_value = self.set_pure_hosts.items self.array.get_connections.return_value = CONN err_rsp = ErrorResponse(400, [DotNotation({'message': 'unknown'})], {}) self.array.post_connections.return_value = err_rsp self.assertRaises( pure.PureDriverException, self.driver._connect, self.array, vol_name, NVME_CONNECTOR, ) self.assertTrue(self.array.post_connections.called) self.assertTrue(bool(self.array.get_connections)) @mock.patch(NVME_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_already_connected_list_hosts_exception(self, mock_host): vol, vol_name = self.new_fake_vol() hosts = deepcopy(PURE_HOST) hosts['name'] = 'utest' pure_hosts = ValidResponse(200, None, 1, [DotNotation(hosts)], {}) mock_host.return_value = pure_hosts.items err_rsp = ErrorResponse(400, [DotNotation({'message': 'Unknown Error'})], {}) self.array.get_connections.return_value = CONN self.array.post_connections.return_value = err_rsp self.assertRaises( pure.PureDriverException, self.driver._connect, self.array, vol_name, NVME_CONNECTOR, ) self.assertTrue(self.array.post_connections.called) self.assertTrue(bool(self.array.get_connections)) @mock.patch(NVME_DRIVER_OBJ + "._generate_purity_host_name", spec=True) @mock.patch(NVME_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_nqn_already_in_use(self, mock_host, mock_hname): vol, vol_name = self.new_fake_vol() empty_hosts = ValidResponse(200, None, 1, [], {}) mock_host.return_value = empty_hosts.items mock_hname.return_value = PURE_HOST_NAME err_iqn = ErrorResponse(400, [DotNotation({'message': 'already in use'})], {}) self.array.post_hosts.return_value = err_iqn # Because we mocked out retry make sure we are raising the right # exception to allow for retries to happen. self.assertRaises( pure.PureRetryableException, self.driver._connect, self.array, vol_name, NVME_CONNECTOR, ) @mock.patch(NVME_DRIVER_OBJ + "._generate_purity_host_name", spec=True) @mock.patch(NVME_DRIVER_OBJ + "._get_host", autospec=True) def test_connect_create_host_already_exists(self, mock_host, mock_hname): vol, vol_name = self.new_fake_vol() mock_host.return_value = [] mock_hname.return_value = PURE_HOST_NAME err_iqn = ErrorResponse(400, [DotNotation({'message': 'already exists'})], {}) self.array.post_hosts.return_value = err_iqn # Because we mocked out retry make sure we are raising the right # exception to allow for retries to happen. self.assertRaises( pure.PureRetryableException, self.driver._connect, self.array, vol_name, NVME_CONNECTOR, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_qnap.py0000664000175000017500000065542300000000000024030 0ustar00zuulzuul00000000000000# Copyright (c) 2016 QNAP Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from unittest import mock import urllib from ddt import data from ddt import ddt from ddt import unpack import eventlet from lxml import etree as ET from oslo_utils import units import requests from cinder import exception from cinder.tests.unit import test from cinder import utils from cinder.volume import driver from cinder.volume.drivers import qnap FAKE_LUNNAA = {'LUNNAA': 'fakeLunNaa'} FAKE_USER = 'admin' FAKE_PASSWORD = 'qnapadmin' FAKE_PASSWORD_ENCODED = 'cW5hcGFkbWlu' # Base64 encoding of FAKE_PASSWORD FAKE_PARMS = collections.OrderedDict(pwd=FAKE_PASSWORD_ENCODED, serviceKey='1', user=FAKE_USER) global_sanitized_params = urllib.parse.urlencode(FAKE_PARMS) header = { 'charset': 'utf-8', 'Content-Type': 'application/x-www-form-urlencoded' } login_url = '/cgi-bin/authLogin.cgi?' fake_login_url = 'http://1.2.3.4:8080' + login_url get_basic_info_url = '/cgi-bin/authLogin.cgi' fake_get_basic_info_url = 'http://1.2.3.4:8080' + get_basic_info_url FAKE_RES_DETAIL_DATA_LOGIN = """ """ FAKE_RES_DETAIL_DATA_NO_AUTHPASSED = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TS = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_114 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_433 = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_UNSUPPORT = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_UNSUPPORT_TS = """ """ FAKE_RES_DETAIL_DATA_GETBASIC_INFO_UNSUPPORT_TES = """ """ FAKE_RES_DETAIL_DATA_LUN_INFO = """ 1 """ FAKE_RES_DETAIL_DATA_LUN_INFO_FAIL = """ 1 """ FAKE_RES_DETAIL_DATA_SNAPSHOT_INFO = """ fakeSnapshotId fakeSnapshotName 0 """ FAKE_RES_DETAIL_DATA_SNAPSHOT_INFO_FAIL = """ fakeSnapshotId fakeSnapshotName -1 """ FAKE_RES_DETAIL_DATA_MAPPED_LUN_INFO = """ 2 """ FAKE_RES_DETAIL_DATA_ONE_LUN_INFO = """ """ FAKE_RES_DETAIL_DATA_MAPPED_ONE_LUN_INFO = """ """ FAKE_RES_DETAIL_DATA_SNAPSHOT = """ """ FAKE_RES_DETAIL_DATA_SNAPSHOT_WITHOUT_SNAPSHOT = """ """ FAKE_RES_DETAIL_DATA_SNAPSHOT_WITHOUT_LUN = """ """ FAKE_RES_DETAIL_DATA_SNAPSHOT_FAIL = """ """ FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO = """ """ FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO_FAIL = """ """ FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO = """ """ FAKE_RES_DETAIL_DATA_ETHERNET_IP = """ """ FAKE_RES_DETAIL_DATA_CREATE_LUN = """ """ FAKE_RES_DETAIL_DATA_CREATE_LUN_FAIL = """ """ FAKE_RES_DETAIL_DATA_CREATE_LUN_BUSY = """ """ FAKE_RES_DETAIL_DATA_CREATE_TARGET = """ """ FAKE_RES_DETAIL_DATA_CREATE_TARGET_FAIL = """ """ FAKE_RES_DETAIL_DATA_GET_ALL_ISCSI_PORTAL_SETTING = """ """ FAKE_RES_DETAIL_DATA_TARGET_INFO = """ """ FAKE_RES_DETAIL_DATA_TARGET_INFO_FAIL = """ """ FAKE_RES_DETAIL_DATA_TARGET_INFO_BY_INITIATOR = """ """ FAKE_RES_DETAIL_DATA_TARGET_INFO_BY_INITIATOR_FAIL = """ """ FAKE_RES_DETAIL_GET_ALL_ISCSI_PORTAL_SETTING = { 'data': FAKE_RES_DETAIL_DATA_GET_ALL_ISCSI_PORTAL_SETTING, 'error': None, 'http_status': 'fackStatus' } FAKE_RES_DETAIL_ISCSI_PORTAL_INFO = { 'data': FAKE_RES_DETAIL_DATA_ISCSI_PORTAL_INFO, 'error': None, 'http_status': 'fackStatus' } def create_configuration( username, password, management_url, san_iscsi_ip, poolname, thin_provision=True, compression=True, deduplication=False, ssd_cache=False, verify_ssl=True): """Create configuration.""" configuration = mock.Mock() configuration.san_login = username configuration.san_password = password configuration.qnap_management_url = management_url configuration.san_thin_provision = thin_provision configuration.qnap_compression = compression configuration.qnap_deduplication = deduplication configuration.qnap_ssd_cache = ssd_cache configuration.san_iscsi_ip = san_iscsi_ip configuration.qnap_poolname = poolname configuration.safe_get.return_value = 'QNAP' configuration.target_ip_address = '1.2.3.4' configuration.qnap_storage_protocol = 'iscsi' configuration.reserved_percentage = 0 configuration.use_chap_auth = False configuration.driver_ssl_cert_verify = verify_ssl return configuration class QnapDriverBaseTestCase(test.TestCase): """Base Class for the QnapDriver Tests.""" def setUp(self): """Setup the Qnap Driver Base TestCase.""" super(QnapDriverBaseTestCase, self).setUp() self.driver = None self.mock_session = None @staticmethod def sanitize(params): sanitized = {_key: str(_value) for _key, _value in params.items() if _value is not None} sanitized = utils.create_ordereddict(sanitized) return urllib.parse.urlencode(sanitized) class SnapshotClass(object): """Snapshot Class.""" volume = {} name = '' volume_name = '' volume_size = 0 metadata = {} def __init__(self, volume, volume_size): """Init.""" self.volume = volume self.volume_size = volume_size self.metadata = {'snapshot_id': 'fakeSnapshotId'} def __getitem__(self, arg): """Getitem.""" return { 'display_name': 'fakeSnapshotDisplayName', 'id': 'fakeSnapshotId', 'volume_size': self.volume_size, 'metadata': self.metadata }[arg] def __contains__(self, arg): """Getitem.""" return { 'display_name': 'fakeSnapshotDisplayName', 'id': 'fakeSnapshotId', 'volume_size': self.volume_size, 'metadata': self.metadata }[arg] class VolumeClass(object): """Volume Class.""" display_name = '' id = '' size = 0 name = '' volume_metadata = [] def __init__(self, display_name, id, size, name): """Init.""" self.display_name = display_name self.id = id self.size = size self.name = name self.volume_metadata = [{'key': 'LUNNAA', 'value': 'fakeLunNaa'}, {'key': 'LUNIndex', 'value': 'fakeLunIndex'}] self.metadata = {'LUNNAA': 'fakeLunNaa', 'LUNIndex': 'fakeLunIndex'} self.provider_location = '%(host)s:%(port)s,1 %(name)s %(tgt_lun)s' % { 'host': '1.2.3.4', 'port': '3260', 'name': 'fakeTargetIqn', 'tgt_lun': '1' } self.volume_type = { 'extra_specs': { 'qnap_thin_provision': 'True', 'qnap_compression': 'True', 'qnap_deduplication': 'False', 'qnap_ssd_cache': 'False' } } def __getitem__(self, arg): """Getitem.""" return { 'display_name': self.display_name, 'size': self.size, 'id': self.id, 'name': self.name, 'volume_metadata': self.volume_metadata, 'metadata': self.metadata, 'provider_location': self.provider_location, 'volume_type': self.volume_type }[arg] def __contains__(self, arg): """Getitem.""" return { 'display_name': self.display_name, 'size': self.size, 'id': self.id, 'name': self.name, 'volume_metadata': self.volume_metadata, 'metadata': self.metadata, 'provider_location': self.provider_location, 'volume_type': self.volume_type }[arg] def __setitem__(self, key, value): """Setitem.""" if key == 'display_name': self.display_name = value class FakeLoginResponse(object): """Fake login response.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_LOGIN class FakeNoAuthPassedResponse(object): """Fake no auth passed response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_NO_AUTHPASSED class FakeGetBasicInfoResponse(object): """Fake GetBasicInfo response.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO class FakeGetBasicInfo114Response(object): """Fake GetBasicInfo114 response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_114 class FakeGetBasicInfoTsResponse(object): """Fake GetBasicInfoTs response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TS class FakeGetBasicInfoTesResponse(object): """Fake GetBasicInfoTes response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES class FakeGetBasicInfoTes433Response(object): """Fake GetBasicInfoTes response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_TES_433 class FakeGetBasicInfoUnsupportResponse(object): """Fake GetBasicInfoUnsupport response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_UNSUPPORT class FakeGetBasicInfoUnsupportTsResponse(object): """Fake GetBasicInfoUnsupportTs response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_UNSUPPORT_TS class FakeGetBasicInfoUnsupportTesResponse(object): """Fake GetBasicInfoUnsupportTes response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_GETBASIC_INFO_UNSUPPORT_TES class FakeLunInfoResponse(object): """Fake lun info response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_LUN_INFO class FakeLunInfoFailResponse(object): """Fake lun info response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_LUN_INFO_FAIL class FakeSnapshotInfoResponse(object): """Fake snapshot info response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_SNAPSHOT_INFO class FakeSnapshotInfoFailResponse(object): """Fake snapshot info response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_SNAPSHOT_INFO_FAIL class FakeOneLunInfoResponse(object): """Fake one lun info response.""" status_code = 'fackStatus' @property def text(self): """Mock response.read.""" return FAKE_RES_DETAIL_DATA_ONE_LUN_INFO class FakeMappedOneLunInfoResponse(object): """Fake one lun info response.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_MAPPED_ONE_LUN_INFO class FakePoolInfoResponse(object): """Fake pool info response.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO class FakePoolInfoFailResponse(object): """Fake pool info response.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO_FAIL class FakeCreateLunResponse(object): """Fake create lun response.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_CREATE_LUN class FakeCreateLunFailResponse(object): """Fake create lun response.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_CREATE_LUN_FAIL class FakeCreateLunBusyResponse(object): """Fake create lun response.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_CREATE_LUN_BUSY class FakeCreateTargetResponse(object): """Fake create target response.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_CREATE_TARGET class FakeCreateTargetFailResponse(object): """Fake create target response.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_CREATE_TARGET_FAIL class FakeCreateSnapshotResponse(object): """Fake Create snapshot inforesponse.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_SNAPSHOT class FakeCreateSnapshotWithoutSnapshotResponse(object): """Fake Create snapshot inforesponse.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_SNAPSHOT_WITHOUT_SNAPSHOT class FakeCreateSnapshotWithoutLunResponse(object): """Fake Create snapshot inforesponse.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_SNAPSHOT_WITHOUT_LUN class FakeCreateSnapshotFailResponse(object): """Fake Create snapshot inforesponse.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_SNAPSHOT_FAIL class FakeGetAllIscsiPortalSetting(object): """Fake get all iSCSI portal setting.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_GET_ALL_ISCSI_PORTAL_SETTING class FakeGetAllEthernetIp(object): """Fake get all ethernet ip setting.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_ETHERNET_IP class FakeTargetInfo(object): """Fake target info setting.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_TARGET_INFO class FakeTargetInfoFail(object): """Fake target info setting.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_TARGET_INFO_FAIL class FakeTargetInfoByInitiator(object): """Fake target info setting.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_TARGET_INFO_BY_INITIATOR class FakeTargetInfoByInitiatorFail(object): """Fake target info setting.""" status_code = 'fackStatus' @property def text(self): """Mock response.text.""" return FAKE_RES_DETAIL_DATA_TARGET_INFO_BY_INITIATOR_FAIL @ddt class QnapDriverLoginTestCase(QnapDriverBaseTestCase): """Tests do_setup api.""" def setUp(self): """Setup the Qnap Share Driver login TestCase.""" super(QnapDriverLoginTestCase, self).setUp() self.mock_object(requests, 'request') @data({'mng_url': 'http://1.2.3.4:8080', 'port': '8080', 'ssl': False, 'get_basic_info_response': FakeGetBasicInfoResponse()}, {'mng_url': 'https://1.2.3.4:443', 'port': '443', 'ssl': True, 'get_basic_info_response': FakeGetBasicInfoResponse()}, {'mng_url': 'http://1.2.3.4:8080', 'port': '8080', 'ssl': False, 'get_basic_info_response': FakeGetBasicInfoTsResponse()}, {'mng_url': 'https://1.2.3.4:443', 'port': '443', 'ssl': True, 'get_basic_info_response': FakeGetBasicInfoTsResponse()}, {'mng_url': 'http://1.2.3.4:8080', 'port': '8080', 'ssl': False, 'get_basic_info_response': FakeGetBasicInfoTesResponse()}, {'mng_url': 'https://1.2.3.4:443', 'port': '443', 'ssl': True, 'get_basic_info_response': FakeGetBasicInfoTesResponse()}, {'mng_url': 'http://1.2.3.4:8080', 'port': '8080', 'ssl': False, 'get_basic_info_response': FakeGetBasicInfoTes433Response()}, {'mng_url': 'https://1.2.3.4:443', 'port': '443', 'ssl': True, 'get_basic_info_response': FakeGetBasicInfoTes433Response()} ) @unpack def test_do_setup_positive(self, mng_url, port, ssl, get_basic_info_response): """Test do_setup with http://1.2.3.4:8080.""" fake_login_response = FakeLoginResponse() fake_get_basic_info_response = get_basic_info_response mock_request = requests.request mock_request.side_effect = ([ fake_login_response, fake_get_basic_info_response, fake_login_response]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, mng_url, '1.2.3.4', 'Storage Pool 1', True, verify_ssl=ssl)) self.driver.do_setup('context') self.assertEqual('fakeSid', self.driver.api_executor.sid) self.assertEqual(FAKE_USER, self.driver.api_executor.username) self.assertEqual(FAKE_PASSWORD, self.driver.api_executor.password) self.assertEqual('1.2.3.4', self.driver.api_executor.ip) self.assertEqual(port, self.driver.api_executor.port) self.assertEqual(ssl, self.driver.api_executor.ssl) @data({'mng_url': 'http://1.2.3.4:8080', 'port': '8080', 'ssl': False}, {'mng_url': 'https://1.2.3.4:443', 'port': '443', 'ssl': True}) @unpack def test_do_setup_negative_with_configuration_not_set(self, mng_url, port, ssl): """Test do_setup with http://1.2.3.4:8080.""" fake_login_response = FakeLoginResponse() fake_get_basic_info_response = FakeGetBasicInfoResponse() mock_request = requests.request mock_request.side_effect = ([ fake_login_response, fake_get_basic_info_response, fake_login_response]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, mng_url, '1.2.3.4', 'Storage Pool 1', True, verify_ssl=ssl)) del self.driver.configuration.qnap_management_url self.assertRaises(exception.InvalidInput, self.driver.do_setup, 'context') @data({'mng_url': 'http://1.2.3.4:8080', 'port': '8080', 'ssl': False, 'get_basic_info_response': FakeGetBasicInfoUnsupportTsResponse()}, {'mng_url': 'https://1.2.3.4:443', 'port': '443', 'ssl': True, 'get_basic_info_response': FakeGetBasicInfoUnsupportTsResponse()}) @unpack def test_do_setup_negative_with_unsupport_nas(self, mng_url, port, ssl, get_basic_info_response): """Test do_setup with http://1.2.3.4:8080.""" fake_login_response = FakeLoginResponse() fake_get_basic_info_response = get_basic_info_response mock_request = requests.request mock_request.side_effect = ([ fake_login_response, fake_get_basic_info_response, fake_login_response]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, mng_url, '1.2.3.4', 'Storage Pool 1', True, verify_ssl=ssl)) self.assertRaises(exception.VolumeDriverException, self.driver.do_setup, 'context') @data({'mng_url': 'http://1.2.3.4:8080', 'port': '8080', 'ssl': False}, {'mng_url': 'https://1.2.3.4:443', 'port': '443', 'ssl': True}) @unpack def test_check_for_setup_error(self, mng_url, port, ssl): """Test check_for_setup_error.""" fake_login_response = FakeLoginResponse() fake_get_basic_info_response = FakeGetBasicInfoResponse() mock_request = requests.request mock_request.side_effect = ([ fake_login_response, fake_get_basic_info_response, fake_login_response]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, mng_url, '1.2.3.4', 'Storage Pool 1', True, verify_ssl=ssl)) self.driver.do_setup('context') self.driver.check_for_setup_error() self.assertEqual('fakeSid', self.driver.api_executor.sid) self.assertEqual(FAKE_USER, self.driver.api_executor.username) self.assertEqual(FAKE_PASSWORD, self.driver.api_executor.password) self.assertEqual('1.2.3.4', self.driver.api_executor.ip) self.assertEqual(port, self.driver.api_executor.port) self.assertEqual(ssl, self.driver.api_executor.ssl) @ddt class QnapDriverVolumeTestCase(QnapDriverBaseTestCase): """Tests volume related api's.""" def get_lun_info_return_value(self): """Return the lun form get_lun_info method.""" root = ET.fromstring(FAKE_RES_DETAIL_DATA_LUN_INFO) lun_list = root.find('iSCSILUNList') lun_info_tree = lun_list.findall('LUNInfo') for lun in lun_info_tree: return lun def get_mapped_lun_info_return_value(self): """Return the lun form get_lun_info method.""" root = ET.fromstring(FAKE_RES_DETAIL_DATA_MAPPED_LUN_INFO) lun_list = root.find('iSCSILUNList') lun_info_tree = lun_list.findall('LUNInfo') for lun in lun_info_tree: return lun def get_one_lun_info_return_value(self): """Return the lun form get_one_lun_info method.""" fake_one_lun_info_response = FakeOneLunInfoResponse() ret = {'data': fake_one_lun_info_response.text, 'error': None, 'http_status': fake_one_lun_info_response.status_code} return ret def get_mapped_one_lun_info_return_value(self): """Return the lun form get_one_lun_info method.""" fake_mapped_one_lun_info_response = FakeMappedOneLunInfoResponse() ret = {'data': fake_mapped_one_lun_info_response.text, 'error': None, 'http_status': fake_mapped_one_lun_info_response.status_code} return ret def get_snapshot_info_return_value(self): """Return the lun form get_lun_info method.""" root = ET.fromstring(FAKE_RES_DETAIL_DATA_SNAPSHOT) snapshot_list = root.find('SnapshotList') snapshot_info_tree = snapshot_list.findall('row') for snapshot in snapshot_info_tree: return snapshot def get_target_info_return_value(self): """Return the target form get_target_info method.""" root = ET.fromstring(FAKE_RES_DETAIL_DATA_TARGET_INFO) target_info = root.find('targetInfo/row') return target_info @mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata') @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_volume_positive( self, mock_api_executor, mock_gen_random_name, mock_get_volume_metadata): """Test create_volume with fake_volume.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_api_return = mock_api_executor.return_value mock_api_return.get_lun_info.side_effect = [ None, self.get_lun_info_return_value()] mock_gen_random_name.return_value = 'fakeLun' mock_api_return.create_lun.return_value = 'fakeIndex' mock_get_volume_metadata.return_value = {} self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.mock_object(eventlet, 'sleep') self.driver.do_setup('context') self.driver.create_volume(fake_volume) mock_api_return.create_lun.assert_called_once_with( fake_volume, self.driver.configuration.qnap_poolname, 'fakeLun', True, False, True, False) expected_call_list = [ mock.call(LUNName='fakeLun'), mock.call(LUNIndex='fakeIndex')] self.assertEqual( expected_call_list, mock_api_return.get_lun_info.call_args_list) @mock.patch.object( qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_delete_volume_positive_without_mapped_lun( self, mock_api_executor, mock_get_lun_naa_from_volume_metadata): """Test delete_volume with fake_volume.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' mock_api_return = mock_api_executor.return_value mock_api_return.get_one_lun_info.return_value = ( self.get_one_lun_info_return_value()) mock_api_return.delete_lun.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.delete_volume(fake_volume) mock_api_return.delete_lun.assert_called_once_with( 'fakeLunIndex') @mock.patch.object( qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_delete_volume_positive_with_mapped_lun( self, mock_api_executor, mock_get_lun_naa_from_volume_metadata): """Test delete_volume with fake_volume.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' mock_api_return = mock_api_executor.return_value mock_api_return.get_one_lun_info.return_value = ( self.get_mapped_one_lun_info_return_value()) mock_api_return.delete_lun.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.delete_volume(fake_volume) mock_api_return.delete_lun.assert_called_once_with( 'fakeLunIndex') @mock.patch.object( qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_delete_volume_negative_without_lun_naa( self, mock_api_executor, mock_get_lun_naa_from_volume_metadata): """Test delete_volume with fake_volume.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_get_lun_naa_from_volume_metadata.return_value = '' mock_api_return = mock_api_executor.return_value mock_api_return.get_one_lun_info.return_value = ( self.get_one_lun_info_return_value()) mock_api_return.delete_lun.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.delete_volume(fake_volume) @mock.patch.object( qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch.object(qnap.QnapISCSIDriver, '_create_snapshot_name') @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') @mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_cloned_volume_volume_size_less_src_verf( self, mock_api_executor, mock_get_volume_metadata, mock_gen_random_name, mock_create_snapshot_name, mock_get_lun_naa_from_volume_metadata): """Test create cloned volume.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 90, 'fakeLunName') fake_src_vref = VolumeClass( 'fakeSrcVrefName', 'fakeId', 100, 'fakeSrcVref') mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_get_volume_metadata.return_value = {} mock_api_executor.return_value.get_lun_info.side_effect = [ self.get_lun_info_return_value(), None, self.get_lun_info_return_value()] mock_gen_random_name.return_value = 'fakeLun' mock_create_snapshot_name.return_value = 'fakeSnapshot' mock_api_executor.return_value.get_snapshot_info.return_value = ( self.get_snapshot_info_return_value()) mock_api_executor.return_value.create_snapshot_api.return_value = ( 'fakeSnapshotId') mock_api_executor.return_value.clone_snapshot.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', 'Pool1', True)) self.mock_object(eventlet, 'sleep') self.driver.do_setup('context') self.driver.create_cloned_volume(fake_volume, fake_src_vref) expected_call_list = [ mock.call(LUNNAA='fakeLunNaa'), mock.call(LUNName='fakeLun'), mock.call(LUNName='fakeLun')] self.assertEqual( expected_call_list, mock_api_executor.return_value.get_lun_info.call_args_list) expected_call_list = [ mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot')] self.assertEqual( expected_call_list, mock_api_executor.return_value.get_snapshot_info.call_args_list) mock_api_return = mock_api_executor.return_value mock_api_return.create_snapshot_api.assert_called_once_with( 'fakeLunIndex', 'fakeSnapshot') mock_api_return.clone_snapshot.assert_called_once_with( 'fakeSnapshotId', 'fakeLun') @mock.patch.object( qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch.object(qnap.QnapISCSIDriver, '_extend_lun') @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') @mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_cloned_volume_volume_size_morethan_src_verf( self, mock_api_executor, mock_get_volume_metadata, mock_gen_random_name, mock_extend_lun, mock_get_lun_naa_from_volume_metadata): """Test create cloned volume.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_src_vref = VolumeClass( 'fakeSrcVrefName', 'fakeId', 90, 'fakeSrcVref') mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_get_volume_metadata.return_value = FAKE_LUNNAA mock_api_executor.return_value.get_lun_info.side_effect = [ self.get_lun_info_return_value(), None, self.get_lun_info_return_value()] mock_gen_random_name.side_effect = ['fakeSnapshot', 'fakeLun'] mock_api_executor.return_value.get_snapshot_info.side_effect = [ None, self.get_snapshot_info_return_value()] mock_api_executor.return_value.create_snapshot_api.return_value = ( 'fakeSnapshotId') mock_api_executor.return_value.clone_snapshot.return_value = None mock_extend_lun.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.mock_object(eventlet, 'sleep') self.driver.do_setup('context') self.driver.create_cloned_volume(fake_volume, fake_src_vref) mock_extend_lun.assert_called_once_with(fake_volume, 'fakeLunNaa') @mock.patch.object(qnap.QnapISCSIDriver, '_create_snapshot_name') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_snapshot_positive( self, mock_api_executor, mock_create_snapshot_name): """Test create snapshot.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') snapshot = SnapshotClass(fake_volume, 100) mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_api_executor.return_value.get_lun_info.return_value = ( self.get_lun_info_return_value()) mock_create_snapshot_name.return_value = 'fakeSnapshot' mock_api_executor.return_value.get_snapshot_info.side_effect = [ None, self.get_snapshot_info_return_value()] mock_api_executor.return_value.create_snapshot_api.return_value = ( 'fakeSnapshotId') self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.mock_object(eventlet, 'sleep') self.driver.do_setup('context') self.driver.create_snapshot(snapshot) mock_api_return = mock_api_executor.return_value mock_api_return.get_lun_info.assert_called_once_with( LUNNAA='fakeLunNaa') expected_call_list = [ mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot'), mock.call(lun_index='fakeLunIndex', snapshot_name='fakeSnapshot')] self.assertEqual( expected_call_list, mock_api_return.get_snapshot_info.call_args_list) mock_api_return.create_snapshot_api.assert_called_once_with( 'fakeLunIndex', 'fakeSnapshot') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_delete_snapshot_positive( self, mock_api_executor): """Test delete snapshot.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_snapshot = SnapshotClass(fake_volume, 100) mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_api_return = mock_api_executor.return_value mock_api_return.delete_snapshot_api.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.delete_snapshot(fake_snapshot) mock_api_return.delete_snapshot_api.assert_called_once_with( 'fakeSnapshotId') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_delete_snapshot_negative( self, mock_api_executor): """Test delete snapshot.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_snapshot = SnapshotClass(fake_volume, 100) mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_api_return = mock_api_executor.return_value mock_api_return.delete_snapshot_api.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') fake_snapshot.metadata.pop('snapshot_id', None) self.driver.delete_snapshot(fake_snapshot) @mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata') @mock.patch.object(qnap.QnapISCSIDriver, '_extend_lun') @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_volume_from_snapshot_positive_volsize_more_snapshotvolsize( self, mock_api_executor, mock_gen_random_name, mock_extend_lun, mock_get_volume_metadata): """Test create volume from snapshot positive.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_snapshot = SnapshotClass(fake_volume, 90) mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_gen_random_name.return_value = 'fakeLun' mock_api_return = mock_api_executor.return_value mock_api_return.get_lun_info.side_effect = [ None, self.get_lun_info_return_value()] mock_api_return.clone_snapshot.return_value = None mock_api_return.create_snapshot_api.return_value = ( 'fakeSnapshotId') mock_extend_lun.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.mock_object(eventlet, 'sleep') self.driver.do_setup('context') self.driver.create_volume_from_snapshot(fake_volume, fake_snapshot) expected_call_list = [ mock.call(LUNName='fakeLun'), mock.call(LUNName='fakeLun')] self.assertEqual( expected_call_list, mock_api_return.get_lun_info.call_args_list) mock_api_return.clone_snapshot.assert_called_once_with( 'fakeSnapshotId', 'fakeLun') mock_extend_lun.assert_called_once_with(fake_volume, 'fakeLunNaa') @mock.patch.object(qnap.QnapISCSIDriver, '_get_volume_metadata') @mock.patch.object(qnap.QnapISCSIDriver, '_extend_lun') @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_volume_from_snapshot_negative( self, mock_api_executor, mock_gen_random_name, mock_extend_lun, mock_get_volume_metadata): """Test create volume from snapshot positive.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_snapshot = SnapshotClass(fake_volume, 90) mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_gen_random_name.return_value = 'fakeLun' mock_api_return = mock_api_executor.return_value mock_api_return.get_lun_info.side_effect = [ None, self.get_lun_info_return_value()] mock_api_return.clone_snapshot.return_value = None mock_api_return.create_snapshot_api.return_value = ( 'fakeSnapshotId') mock_extend_lun.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') fake_snapshot.metadata.pop('snapshot_id', None) self.assertRaises(exception.VolumeDriverException, self.driver.create_volume_from_snapshot, fake_volume, fake_snapshot) def get_specific_poolinfo_return_value(self): """Get specific pool info.""" root = ET.fromstring(FAKE_RES_DETAIL_DATA_SPECIFIC_POOL_INFO) pool_list = root.find('Pool_Index') pool_info_tree = pool_list.findall('row') for pool in pool_info_tree: return pool @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_get_volume_stats( self, mock_api_executor): """Get volume stats.""" mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_api_return = mock_api_executor.return_value mock_api_return.get_specific_poolinfo.return_value = ( self.get_specific_poolinfo_return_value()) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.VERSION = 'fakeVersion' expected_res = {'volume_backend_name': 'QNAP', 'vendor_name': 'QNAP', 'driver_version': 'fakeVersion', 'storage_protocol': 'iscsi'} single_pool = dict( pool_name=self.driver.configuration.qnap_poolname, total_capacity_gb=930213412209 / units.Gi, free_capacity_gb=928732941681 / units.Gi, provisioned_capacity_gb=1480470528 / units.Gi, reserved_percentage=self.driver.configuration.reserved_percentage, QoS_support=False, qnap_thin_provision=['True', 'False'], qnap_compression=['True', 'False'], qnap_deduplication=['True', 'False'], qnap_ssd_cache=['True', 'False']) expected_res['pools'] = [single_pool] self.assertEqual( expected_res, self.driver.get_volume_stats(refresh=True)) mock_api_return.get_specific_poolinfo.assert_called_once_with( self.driver.configuration.qnap_poolname) @mock.patch.object(qnap.QnapISCSIDriver, '_extend_lun') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_extend_volume( self, mock_api_executor, mock_extend_lun): """Test extend volume.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.extend_volume(fake_volume, 'fakeSize') mock_extend_lun.assert_called_once_with(fake_volume, '') @mock.patch.object( qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_extend_lun( self, mock_api_executor, mock_get_lun_naa_from_volume_metadata): """Test _extend_lun method.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' mock_api_return = mock_api_executor.return_value mock_api_return.get_lun_info.return_value = ( self.get_lun_info_return_value()) mock_api_return.edit_lun.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver._extend_lun(fake_volume, '') mock_api_return.get_lun_info.assert_called_once_with( LUNNAA='fakeLunNaa') expect_lun = { 'LUNName': 'fakeLunName', 'LUNCapacity': fake_volume['size'], 'LUNIndex': 'fakeLunIndex', 'LUNThinAllocate': 'fakeLunThinAllocate', 'LUNPath': 'fakeLunPath', 'LUNStatus': '1'} mock_api_return.edit_lun.assert_called_once_with(expect_lun) @mock.patch.object(qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_export_positive_without_multipath( self, mock_api_executor, mock_gen_random_name, mock_get_lun_naa_from_volume_metadata): """Test create export.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiatorIqn'} mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_api_return = mock_api_executor.return_value mock_api_return.get_lun_info.return_value = ( self.get_lun_info_return_value()) mock_api_return.get_iscsi_portal_info.return_value = ( FAKE_RES_DETAIL_ISCSI_PORTAL_INFO) mock_gen_random_name.return_value = 'fakeTargetName' mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' mock_api_return.create_target.return_value = 'fakeTargetIndex' mock_api_return.get_target_info.return_value = ( self.get_target_info_return_value()) mock_api_return.map_lun.return_value = None mock_api_return.get_ethernet_ip.return_value = ['1.2.3.4'], None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.configuration.use_chap_auth = False self.driver.configuration.chap_username = '' self.driver.configuration.chap_password = '' self.driver.iscsi_port = 'fakeServicePort' self.mock_object(eventlet, 'sleep') self.driver.do_setup('context') expected_properties = '%(host)s:%(port)s,1 %(name)s %(tgt_lun)s' % { 'host': '1.2.3.4', 'port': 'fakeServicePort', 'name': 'fakeTargetIqn', 'tgt_lun': '1'} expected_return = { 'provider_location': expected_properties, 'provider_auth': None} self.assertEqual(expected_return, self.driver.create_export( 'context', fake_volume, fake_connector)) @mock.patch.object(qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_export_positive_with_multipath( self, mock_api_executor, mock_gen_random_name, mock_get_lun_naa_from_volume_metadata): """Test create export.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiatorIqn', 'multipath': True} mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_api_return = mock_api_executor.return_value mock_api_return.get_lun_info.return_value = ( self.get_lun_info_return_value()) mock_api_return.get_iscsi_portal_info.return_value = ( FAKE_RES_DETAIL_ISCSI_PORTAL_INFO) mock_gen_random_name.return_value = 'fakeTargetName' mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' mock_api_return.create_target.return_value = 'fakeTargetIndex' mock_api_return.get_target_info.return_value = ( self.get_target_info_return_value()) mock_api_return.map_lun.return_value = None mock_api_return.get_ethernet_ip.return_value = ['1.2.3.4'], None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.configuration.use_chap_auth = False self.driver.configuration.chap_username = '' self.driver.configuration.chap_password = '' self.driver.iscsi_port = 'fakeServicePort' self.mock_object(eventlet, 'sleep') self.driver.do_setup('context') expected_properties = '%(host)s:%(port)s,1 %(name)s %(tgt_lun)s' % { 'host': '1.2.3.4', 'port': 'fakeServicePort', 'name': 'fakeTargetIqn', 'tgt_lun': '1'} expected_return = { 'provider_location': expected_properties, 'provider_auth': None} self.assertEqual(expected_return, self.driver.create_export( 'context', fake_volume, fake_connector)) @mock.patch.object(qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_export_114( self, mock_api_executor, mock_gen_random_name, mock_get_lun_naa_from_volume_metadata): """Test create export.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiatorIqn'} mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.4') mock_api_return = mock_api_executor.return_value mock_api_return.get_one_lun_info.return_value = ( self.get_mapped_one_lun_info_return_value()) mock_api_return.get_iscsi_portal_info.return_value = ( FAKE_RES_DETAIL_ISCSI_PORTAL_INFO) mock_gen_random_name.return_value = 'fakeTargetName' mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' mock_api_return.create_target.return_value = 'fakeTargetIndex' mock_api_return.get_target_info.return_value = ( self.get_target_info_return_value()) mock_api_return.add_target_init.return_value = None mock_api_return.map_lun.return_value = None mock_api_return.get_ethernet_ip.return_value = ['1.2.3.4'], None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.configuration.use_chap_auth = False self.driver.configuration.chap_username = '' self.driver.configuration.chap_password = '' self.driver.iscsi_port = 'fakeServicePort' self.mock_object(eventlet, 'sleep') self.driver.do_setup('context') expected_properties = '%(host)s:%(port)s,1 %(name)s %(tgt_lun)s' % { 'host': '1.2.3.4', 'port': 'fakeServicePort', 'name': 'fakeTargetIqn', 'tgt_lun': '1'} expected_return = { 'provider_location': expected_properties, 'provider_auth': None} self.assertEqual(expected_return, self.driver.create_export( 'context', fake_volume, fake_connector)) @mock.patch.object(qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch.object(qnap.QnapISCSIDriver, '_gen_random_name') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutorTS') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_export_positive_ts( self, mock_api_executor, mock_api_executor_ts, mock_gen_random_name, mock_get_lun_naa_from_volume_metadata): """Test create export.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiatorIqn'} mock_api_executor.return_value.get_basic_info.return_value = ( 'TS-870U-RP ', 'TS-870U-RP ', '4.3.0') mock_api_executor_ts.return_value.get_basic_info.return_value = ( 'TS-870U-RP ', 'TS-870U-RP ', '4.3.0') mock_api_return = mock_api_executor_ts.return_value mock_api_return.get_one_lun_info.return_value = ( self.get_mapped_one_lun_info_return_value()) mock_api_return.get_iscsi_portal_info.return_value = ( FAKE_RES_DETAIL_ISCSI_PORTAL_INFO) mock_gen_random_name.return_value = 'fakeTargetName' mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' mock_api_return.create_target.return_value = 'fakeTargetIndex' mock_api_return.get_target_info.return_value = ( self.get_target_info_return_value()) mock_api_return.map_lun.return_value = None mock_api_return.get_ethernet_ip.return_value = ['1.2.3.4'], None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.configuration.use_chap_auth = False self.driver.configuration.chap_username = '' self.driver.configuration.chap_password = '' self.driver.iscsi_port = 'fakeServicePort' self.mock_object(eventlet, 'sleep') self.driver.do_setup('context') expected_properties = '%(host)s:%(port)s,1 %(name)s %(tgt_lun)s' % { 'host': '1.2.3.4', 'port': 'fakeServicePort', 'name': 'fakeTargetIqn', 'tgt_lun': '1'} expected_return = { 'provider_location': expected_properties, 'provider_auth': None} self.assertEqual(expected_return, self.driver.create_export( 'context', fake_volume, fake_connector)) @mock.patch.object(qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_create_export_negative_without_lun_naa( self, mock_api_executor, mock_get_lun_naa_from_volume_metadata): """Test create export.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiatorIqn'} mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') # mock_api_return = mock_api_executor.return_value mock_get_lun_naa_from_volume_metadata.return_value = '' self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.iscsi_port = 'fakeServicePort' self.driver.do_setup('context') self.assertRaises(exception.VolumeDriverException, self.driver.create_export, 'context', fake_volume, fake_connector) @mock.patch.object(qnap.QnapISCSIDriver, '_get_lun_naa_from_volume_metadata') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_initialize_connection_with_target_exist( self, mock_api_executor, mock_get_lun_naa_from_volume_metadata): """Test initialize connection.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiatorIqn'} mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_api_return = mock_api_executor.return_value mock_api_return.get_iscsi_portal_info.return_value = ( FAKE_RES_DETAIL_ISCSI_PORTAL_INFO) mock_get_lun_naa_from_volume_metadata.return_value = 'fakeLunNaa' mock_api_return.get_lun_info.side_effect = [ self.get_lun_info_return_value(), self.get_lun_info_return_value()] mock_api_return.get_all_iscsi_portal_setting.return_value = ( FAKE_RES_DETAIL_GET_ALL_ISCSI_PORTAL_SETTING) mock_api_return.map_lun.return_value = None mock_api_return.get_ethernet_ip.return_value = ['1.2.3.4'], None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.configuration.use_chap_auth = False self.driver.configuration.chap_username = '' self.driver.configuration.chap_password = '' self.driver.iscsi_port = 'fakeServicePort' self.driver.do_setup('context') expected_properties = { 'target_discovered': False, 'target_portal': '1.2.3.4:fakeServicePort', 'target_iqn': 'fakeTargetIqn', 'target_lun': 1, 'volume_id': fake_volume['id']} expected_return = { 'driver_volume_type': 'iscsi', 'data': expected_properties} self.assertEqual(expected_return, self.driver.initialize_connection( fake_volume, fake_connector)) @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_initialize_connection_with_target_exist_negative_no_provider( self, mock_api_executor): """Test initialize connection.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiatorIqn'} mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') fake_volume.provider_location = None self.assertRaises(exception.InvalidParameterValue, self.driver.initialize_connection, fake_volume, fake_connector) @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_initialize_connection_with_target_exist_negative_wrong_provider_1( self, mock_api_executor): """Test initialize connection.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiatorIqn'} mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') fake_volume.provider_location = ( '%(host)s:%(port)s,1%(name)s%(tgt_lun)s' % { 'host': '1.2.3.4', 'port': '3260', 'name': 'fakeTargetIqn', 'tgt_lun': '1' }) self.assertRaises(exception.InvalidInput, self.driver.initialize_connection, fake_volume, fake_connector) @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_initialize_connection_with_target_exist_negative_wrong_provider_2( self, mock_api_executor): """Test initialize connection.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiatorIqn'} mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') fake_volume.provider_location = ( '%(host)s:%(port)s1 %(name)s %(tgt_lun)s' % { 'host': '1.2.3.4', 'port': '3260', 'name': 'fakeTargetIqn', 'tgt_lun': '1' }) self.assertRaises(exception.InvalidInput, self.driver.initialize_connection, fake_volume, fake_connector) @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_terminate_connection_positive_with_lun_mapped( self, mock_api_executor): """Test terminate connection.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiator'} mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_api_return = mock_api_executor.return_value mock_api_return.get_lun_info.return_value = ( self.get_mapped_lun_info_return_value()) mock_api_return.unmap_lun.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.terminate_connection(fake_volume, fake_connector) mock_api_return.get_lun_info.assert_called_once_with( LUNIndex='fakeLunIndex') mock_api_return.unmap_lun.assert_called_once_with( 'fakeLunIndex', '9') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_terminate_connection_positive_without_lun_mapped( self, mock_api_executor): """Test terminate connection.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_connector = {'initiator': 'fakeInitiator'} mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') mock_api_return = mock_api_executor.return_value mock_api_return.get_lun_info.return_value = ( self.get_lun_info_return_value()) mock_api_return.unmap_lun.return_value = None self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.terminate_connection(fake_volume, fake_connector) mock_api_return.get_lun_info.assert_called_once_with( LUNIndex='fakeLunIndex') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_update_migrated_volume( self, mock_api_executor): """Test update migrated volume.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') fake_new_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.update_migrated_volume('context', fake_volume, fake_new_volume, 'fakeOriginalVolumeStatus') @data({ 'fake_spec': {}, 'expect_spec': { 'force': False, 'ignore_errors': False, 'remote': False } }, { 'fake_spec': { 'force': mock.sentinel.force, 'ignore_errors': mock.sentinel.ignore_errors, 'remote': mock.sentinel.remote }, 'expect_spec': { 'force': mock.sentinel.force, 'ignore_errors': mock.sentinel.ignore_errors, 'remote': mock.sentinel.remote } }) @unpack @mock.patch.object(driver.BaseVD, '_detach_volume') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_detach_volume( self, mock_api_executor, mock_detach_volume, fake_spec, expect_spec): """Test detach volume.""" mock_detach_volume.return_value = None mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver._detach_volume('context', 'attach_info', 'volume', 'property', **fake_spec) mock_detach_volume.assert_called_once_with( 'context', 'attach_info', 'volume', 'property', **expect_spec) @mock.patch.object(driver.BaseVD, '_attach_volume') @mock.patch('cinder.volume.drivers.qnap.QnapAPIExecutor') def test_attach_volume( self, mock_api_executor, mock_attach_volume): """Test attach volume.""" mock_attach_volume.return_value = None mock_api_executor.return_value.get_basic_info.return_value = ( 'ES1640dc ', 'ES1640dc ', '1.1.3') self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver._attach_volume('context', 'volume', 'properties') mock_attach_volume.assert_called_once_with( 'context', 'volume', 'properties', False) class QnapAPIExecutorEsTestCase(QnapDriverBaseTestCase): """Tests QnapAPIExecutor.""" @mock.patch('requests.request') def test_create_lun_positive_with_thin_allocate( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertEqual( 'fakeLunIndex', self.driver.api_executor.create_lun( fake_volume, 'fakepool', 'fakeLun', True, False, True, False)) fake_params = {} fake_params['func'] = 'add_lun' fake_params['FileIO'] = 'no' fake_params['LUNThinAllocate'] = '1' fake_params['LUNName'] = 'fakeLun' fake_params['LUNPath'] = 'fakeLun' fake_params['poolID'] = 'fakepool' fake_params['lv_ifssd'] = 'no' fake_params['compression'] = '1' fake_params['dedup'] = 'off' fake_params['LUNCapacity'] = 100 fake_params['lv_threshold'] = '80' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) create_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', create_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_create_lun_positive_without_thin_allocate( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertEqual( 'fakeLunIndex', self.driver.api_executor.create_lun( fake_volume, 'fakepool', 'fakeLun', False, False, True, False)) fake_params = {} fake_params['func'] = 'add_lun' fake_params['FileIO'] = 'no' fake_params['LUNThinAllocate'] = '0' fake_params['LUNName'] = 'fakeLun' fake_params['LUNPath'] = 'fakeLun' fake_params['poolID'] = 'fakepool' fake_params['lv_ifssd'] = 'no' fake_params['compression'] = '1' fake_params['dedup'] = 'off' fake_params['LUNCapacity'] = 100 fake_params['lv_threshold'] = '80' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) create_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', create_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_create_lun_negative( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_lun, fake_volume, 'fakepool', 'fakeLun', 'False', 'False', 'True', 'False') @mock.patch('requests.request') def test_create_lun_negative_with_wrong_result( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_lun, fake_volume, 'fakepool', 'fakeLun', 'False', 'False', 'True', 'False') @mock.patch('requests.request') def test_delete_lun( self, mock_request): """Test delete lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.delete_lun('fakeLunIndex') fake_params = {} fake_params['func'] = 'remove_lun' fake_params['run_background'] = '1' fake_params['ha_sync'] = '1' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) delete_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', delete_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_delete_lun_negative(self, mock_request): """Test delete lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.delete_lun, 'fakeLunIndex') @mock.patch('requests.request') def test_delete_lun_negative_with_wrong_result( self, mock_request): """Test delete lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.delete_lun, 'fakeLunIndex') @mock.patch('requests.request') def test_delete_lun_positive_with_busy_result( self, mock_request): """Test delete lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunBusyResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.delete_lun('fakeLunIndex') fake_params = {} fake_params['func'] = 'remove_lun' fake_params['run_background'] = '1' fake_params['ha_sync'] = '1' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) delete_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', delete_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_specific_poolinfo( self, mock_request): """Test get specific pool info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakePoolInfoResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_specific_poolinfo('fakePoolId') fake_params = {} fake_params['store'] = 'poolInfo' fake_params['func'] = 'extra_get' fake_params['poolID'] = 'fakePoolId' fake_params['Pool_Info'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_specific_poolinfo_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/disk_manage.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_specific_poolinfo_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_specific_poolinfo_negative( self, mock_request): """Test get specific pool info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_specific_poolinfo, 'Pool1') @mock.patch('requests.request') def test_get_specific_poolinfo_negative_with_wrong_result( self, mock_request): """Test get specific pool info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakePoolInfoFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_specific_poolinfo, 'Pool1') @mock.patch('requests.request') def test_create_target( self, mock_request): """Test create target.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateTargetResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.create_target('fakeTargetName', 'sca') fake_params = {} fake_params['func'] = 'add_target' fake_params['targetName'] = 'fakeTargetName' fake_params['targetAlias'] = 'fakeTargetName' fake_params['bTargetDataDigest'] = '0' fake_params['bTargetHeaderDigest'] = '0' fake_params['bTargetClusterEnable'] = '1' fake_params['controller_name'] = 'sca' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) create_target_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', create_target_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_create_target_negative( self, mock_request): """Test create target.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_target, 'fakeTargetName', 'sca') @mock.patch('requests.request') def test_create_target_negative_with_wrong_result( self, mock_request): """Test create target.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateTargetFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_target, 'fakeTargetName', 'sca') @mock.patch('requests.request') def test_add_target_init(self, mock_request): """Test add target init.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.add_target_init( 'fakeTargetIqn', 'fakeInitiatorIqn', False, '', '') fake_params = {} fake_params['func'] = 'add_init' fake_params['targetIQN'] = 'fakeTargetIqn' fake_params['initiatorIQN'] = 'fakeInitiatorIqn' fake_params['initiatorAlias'] = 'fakeInitiatorIqn' fake_params['bCHAPEnable'] = '0' fake_params['CHAPUserName'] = '' fake_params['CHAPPasswd'] = '' fake_params['bMutualCHAPEnable'] = '0' fake_params['mutualCHAPUserName'] = '' fake_params['mutualCHAPPasswd'] = '' fake_params['ha_sync'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) add_target_init_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', add_target_init_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_add_target_init_negative( self, mock_request): """Test add target init.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.add_target_init, 'fakeTargetIqn', 'fakeInitiatorIqn', False, '', '') @mock.patch('requests.request') def test_add_target_init_negative_with_wrong_result( self, mock_request): """Test add target init.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.add_target_init, 'fakeTargetIqn', 'fakeInitiatorIqn', False, '', '') @mock.patch('requests.request') def test_remove_target_init( self, mock_request): """Test add target init.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.remove_target_init( 'fakeTargetIqn', 'fakeInitiatorIqn') expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_map_lun( self, mock_request): """Test map lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.map_lun( 'fakeLunIndex', 'fakeTargetIndex') fake_params = {} fake_params['func'] = 'add_lun' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['targetIndex'] = 'fakeTargetIndex' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) map_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', map_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_map_lun_negative( self, mock_request): """Test map lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.map_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_map_lun_negative_with_wrong_result( self, mock_request): """Test map lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.map_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_disable_lun( self, mock_request): """Test disable lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.disable_lun( 'fakeLunIndex', 'fakeTargetIndex') fake_params = {} fake_params['func'] = 'edit_lun' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['targetIndex'] = 'fakeTargetIndex' fake_params['LUNEnable'] = 0 fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) unmap_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', unmap_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_disable_lun_negative(self, mock_request): """Test disable lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.disable_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_disable_lun_negative_with_wrong_result( self, mock_request): """Test disable lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.disable_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_unmap_lun( self, mock_request): """Test unmap lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.unmap_lun( 'fakeLunIndex', 'fakeTargetIndex') fake_params = {} fake_params['func'] = 'remove_lun' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['targetIndex'] = 'fakeTargetIndex' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) unmap_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', unmap_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_unmap_lun_negative( self, mock_request): """Test unmap lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.unmap_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_unmap_lun_negative_with_wrong_result( self, mock_request): """Test unmap lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.unmap_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_get_iscsi_portal_info( self, mock_request): """Test get iscsi portal info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_iscsi_portal_info() fake_params = {} fake_params['func'] = 'extra_get' fake_params['iSCSI_portal'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_iscsi_portal_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_iscsi_portal_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_iscsi_portal_info_negative( self, mock_request): """Test get iscsi portal info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_iscsi_portal_info) @mock.patch('requests.request') def test_get_lun_info(self, mock_request): """Test get lun info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeLunInfoResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_lun_info() fake_params = {} fake_params['func'] = 'extra_get' fake_params['lunList'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_lun_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_lun_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_lun_info_positive_with_lun_index( self, mock_request): """Test get lun info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeLunInfoResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_lun_info(LUNIndex='fakeLunIndex') fake_params = {} fake_params['func'] = 'extra_get' fake_params['lunList'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_lun_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_lun_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_lun_info_positive_with_lun_name( self, mock_request): """Test get lun info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeLunInfoResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_lun_info(LUNName='fakeLunName') fake_params = {} fake_params['func'] = 'extra_get' fake_params['lunList'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_lun_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_lun_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_lun_info_positive_with_lun_naa( self, mock_request): """Test get lun info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeLunInfoResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_lun_info(LUNNAA='fakeLunNaa') fake_params = {} fake_params['func'] = 'extra_get' fake_params['lunList'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_lun_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_lun_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_lun_info_negative( self, mock_request): """Test get lun info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_lun_info) @mock.patch('requests.request') def test_get_one_lun_info( self, mock_request): """Test get one lun info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeOneLunInfoResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_one_lun_info('fakeLunId') fake_params = {} fake_params['func'] = 'extra_get' fake_params['lun_info'] = '1' fake_params['lunID'] = 'fakeLunId' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_lun_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_lun_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_one_lun_info_negative( self, mock_request): """Test get one lun info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_one_lun_info, 'fakeLunId') @mock.patch('requests.request') def test_get_snapshot_info( self, mock_request): """Test get snapshot info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeSnapshotInfoResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_snapshot_info( lun_index='fakeLunIndex', snapshot_name='fakeSnapshotName') fake_params = {} fake_params['func'] = 'extra_get' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['snapshot_list'] = '1' fake_params['snap_start'] = '0' fake_params['snap_count'] = '100' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_snapshot_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/snapshot.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_snapshot_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_snapshot_info_negative( self, mock_request): """Test get snapshot info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_snapshot_info, lun_index='fakeLunIndex', snapshot_name='fakeSnapshotName') @mock.patch('requests.request') def test_get_snapshot_info_negative_with_wrong_result( self, mock_request): """Test get snapshot info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeSnapshotInfoFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_snapshot_info, lun_index='fakeLunIndex', snapshot_name='fakeSnapshotName') @mock.patch('requests.request') def test_create_snapshot_api( self, mock_request): """Test create snapshot api.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateSnapshotResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.create_snapshot_api( 'fakeLunIndex', 'fakeSnapshotName') fake_params = {} fake_params['func'] = 'create_snapshot' fake_params['lunID'] = 'fakeLunIndex' fake_params['snapshot_name'] = 'fakeSnapshotName' fake_params['expire_min'] = '0' fake_params['vital'] = '1' fake_params['snapshot_type'] = '0' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) create_snapshot_api_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/snapshot.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', create_snapshot_api_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_create_snapshot_api_negative( self, mock_request): """Test create snapshot api.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_snapshot_api, 'fakeLunIndex', 'fakeSnapshotName') @mock.patch('requests.request') def test_create_snapshot_api_negative_with_wrong_result( self, mock_request): """Test create snapshot api.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateSnapshotFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_snapshot_api, 'fakeLunIndex', 'fakeSnapshotName') @mock.patch('requests.request') def test_delete_snapshot_api( self, mock_request): """Test api delete snapshot.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateSnapshotResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.delete_snapshot_api( 'fakeSnapshotId') fake_params = {} fake_params['func'] = 'del_snapshots' fake_params['snapshotID'] = 'fakeSnapshotId' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) api_delete_snapshot_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/snapshot.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', api_delete_snapshot_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_delete_snapshot_api_positive_without_snapshot( self, mock_request): """Test api de;ete snapshot.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateSnapshotWithoutSnapshotResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.delete_snapshot_api( 'fakeSnapshotId') fake_params = {} fake_params['func'] = 'del_snapshots' fake_params['snapshotID'] = 'fakeSnapshotId' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) api_delete_snapshot_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/snapshot.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', api_delete_snapshot_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_delete_snapshot_api_positive_without_lun( self, mock_request): """Test api de;ete snapshot.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateSnapshotWithoutLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.delete_snapshot_api( 'fakeSnapshotId') fake_params = {} fake_params['func'] = 'del_snapshots' fake_params['snapshotID'] = 'fakeSnapshotId' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) api_delete_snapshot_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/snapshot.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', api_delete_snapshot_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_delete_snapshot_api_negative( self, mock_request): """Test api de;ete snapshot.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.delete_snapshot_api, 'fakeSnapshotId') @mock.patch('requests.request') def test_delete_snapshot_api_negative_with_wrong_result( self, mock_request): """Test api de;ete snapshot.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateSnapshotFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.delete_snapshot_api, 'fakeSnapshotId') @mock.patch('requests.request') def test_clone_snapshot( self, mock_request): """Test clone snapshot.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateSnapshotResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.clone_snapshot( 'fakeSnapshotId', 'fakeLunName') fake_params = {} fake_params['func'] = 'clone_qsnapshot' fake_params['by_lun'] = '1' fake_params['snapshotID'] = 'fakeSnapshotId' fake_params['new_name'] = 'fakeLunName' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) clone_snapshot_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/snapshot.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', clone_snapshot_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_clone_snapshot_negative( self, mock_request): """Test clone snapshot.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.clone_snapshot, 'fakeSnapshotId', 'fakeLunName') @mock.patch('requests.request') def test_clone_snapshot_negative_with_wrong_result( self, mock_request): """Test clone snapshot.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeCreateSnapshotFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.clone_snapshot, 'fakeSnapshotId', 'fakeLunName') @mock.patch('requests.request') def test_edit_lun( self, mock_request): """Test edit lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeLunInfoResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') fake_lun = {'LUNName': 'fakeLunName', 'LUNCapacity': 100, 'LUNIndex': 'fakeLunIndex', 'LUNThinAllocate': False, 'LUNPath': 'fakeLunPath', 'LUNStatus': 'fakeLunStatus'} self.driver.api_executor.edit_lun(fake_lun) fake_params = {} fake_params['func'] = 'edit_lun' fake_params['LUNName'] = 'fakeLunName' fake_params['LUNCapacity'] = 100 fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['LUNThinAllocate'] = False fake_params['LUNPath'] = 'fakeLunPath' fake_params['LUNStatus'] = 'fakeLunStatus' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) edit_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', edit_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_edit_lun_negative( self, mock_request): """Test edit lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') fake_lun = {'LUNName': 'fakeLunName', 'LUNCapacity': 100, 'LUNIndex': 'fakeLunIndex', 'LUNThinAllocate': False, 'LUNPath': 'fakeLunPath', 'LUNStatus': 'fakeLunStatus'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.edit_lun, fake_lun) @mock.patch('requests.request') def test_edit_lun_negative_with_wrong_result( self, mock_request): """Test edit lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeLunInfoFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') fake_lun = {'LUNName': 'fakeLunName', 'LUNCapacity': 100, 'LUNIndex': 'fakeLunIndex', 'LUNThinAllocate': False, 'LUNPath': 'fakeLunPath', 'LUNStatus': 'fakeLunStatus'} self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.edit_lun, fake_lun) @mock.patch('requests.request') def test_get_all_iscsi_portal_setting( self, mock_request): """Test get all iscsi portal setting.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeLunInfoResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_all_iscsi_portal_setting() fake_params = {} fake_params['func'] = 'get_all' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_all_iscsi_portal_setting_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_all_iscsi_portal_setting_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_ethernet_ip_with_type_data( self, mock_request): """Test get ethernet ip.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeGetAllEthernetIp()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_ethernet_ip(type='data') fake_params = {} fake_params['subfunc'] = 'net_setting' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_ethernet_ip_url = ( 'http://1.2.3.4:8080/cgi-bin/sys/sysRequest.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_ethernet_ip_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_ethernet_ip_with_type_manage( self, mock_request): """Test get ethernet ip.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeGetAllEthernetIp()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_ethernet_ip(type='manage') fake_params = {} fake_params['subfunc'] = 'net_setting' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_ethernet_ip_url = ( 'http://1.2.3.4:8080/cgi-bin/sys/sysRequest.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_ethernet_ip_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_ethernet_ip_with_type_all(self, mock_request): """Test get ethernet ip.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeGetAllEthernetIp()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_ethernet_ip(type='all') fake_params = {} fake_params['subfunc'] = 'net_setting' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_ethernet_ip_url = ( 'http://1.2.3.4:8080/cgi-bin/sys/sysRequest.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_ethernet_ip_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_ethernet_ip_negative( self, mock_request): """Test get ethernet ip.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_ethernet_ip, type='data') @mock.patch('requests.request') def test_get_target_info( self, mock_request): """Test get target info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeTargetInfo()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_target_info('fakeTargetIndex') fake_params = {} fake_params['func'] = 'extra_get' fake_params['targetInfo'] = 1 fake_params['targetIndex'] = 'fakeTargetIndex' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_target_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_target_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_target_info_negative( self, mock_request): """Test get target info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_target_info, 'fakeTargetIndex') @mock.patch('requests.request') def test_get_target_info_negative_with_wrong_result( self, mock_request): """Test get target info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoResponse(), FakeLoginResponse(), FakeTargetInfoFail()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_target_info, 'fakeTargetIndex') @mock.patch('requests.request') def test_get_target_info_by_initiator( self, mock_request): """Test get target info by initiator.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfo114Response(), FakeLoginResponse(), FakeTargetInfoByInitiator()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_target_info_by_initiator( 'fakeInitiatorIQN') fake_params = {} fake_params['func'] = 'extra_get' fake_params['initiatorIQN'] = 'fakeInitiatorIQN' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_target_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_target_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_target_info_by_initiator_negative( self, mock_request): """Test get target info by initiator.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfo114Response(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor. get_target_info_by_initiator, 'fakeInitiatorIQN') @mock.patch('requests.request') def test_get_target_info_by_initiator_with_wrong_result( self, mock_request): """Test get target info by initiator.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfo114Response(), FakeLoginResponse(), FakeTargetInfoByInitiatorFail()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_target_info_by_initiator( 'fakeInitiatorIQN') fake_params = {} fake_params['func'] = 'extra_get' fake_params['initiatorIQN'] = 'fakeInitiatorIQN' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_target_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_target_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) class QnapAPIExecutorTsTestCase(QnapDriverBaseTestCase): """Tests QnapAPIExecutorTS.""" @mock.patch('requests.request') def test_create_lun_positive_with_thin_allocate( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertEqual( 'fakeLunIndex', self.driver.api_executor.create_lun( fake_volume, 'fakepool', 'fakeLun', True, False, True, False)) fake_params = {} fake_params['func'] = 'add_lun' fake_params['FileIO'] = 'no' fake_params['LUNThinAllocate'] = '1' fake_params['LUNName'] = 'fakeLun' fake_params['LUNPath'] = 'fakeLun' fake_params['poolID'] = 'fakepool' fake_params['lv_ifssd'] = 'no' fake_params['LUNCapacity'] = 100 fake_params['LUNSectorSize'] = '512' fake_params['lv_threshold'] = '80' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) create_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', create_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_create_lun_positive_without_thin_allocate( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertEqual( 'fakeLunIndex', self.driver.api_executor.create_lun( fake_volume, 'fakepool', 'fakeLun', False, False, True, False)) fake_params = {} fake_params['func'] = 'add_lun' fake_params['FileIO'] = 'no' fake_params['LUNThinAllocate'] = '0' fake_params['LUNName'] = 'fakeLun' fake_params['LUNPath'] = 'fakeLun' fake_params['poolID'] = 'fakepool' fake_params['lv_ifssd'] = 'no' fake_params['LUNCapacity'] = 100 fake_params['LUNSectorSize'] = '512' fake_params['lv_threshold'] = '80' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) create_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', create_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_create_lun_negative( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_lun, fake_volume, 'fakepool', 'fakeLun', 'False', 'False', 'True', 'False') @mock.patch('requests.request') def test_create_lun_negative_with_wrong_result( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_lun, fake_volume, 'fakepool', 'fakeLun', 'False', 'False', 'True', 'False') @mock.patch('requests.request') def test_delete_lun( self, mock_request): """Test delete lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.driver.api_executor.delete_lun('fakeLunIndex') fake_params = {} fake_params['func'] = 'remove_lun' fake_params['run_background'] = '1' fake_params['ha_sync'] = '1' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) delete_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', delete_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_delete_lun_negative( self, mock_request): """Test delete lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.delete_lun, 'fakeLunIndex') @mock.patch('requests.request') def test_delete_lun_negative_with_wrong_result( self, mock_request): """Test delete lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.delete_lun, 'fakeLunIndex') @mock.patch('requests.request') def test_delete_lun_positive_with_busy_result( self, mock_request): """Test delete lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunBusyResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.driver.api_executor.delete_lun('fakeLunIndex') fake_params = {} fake_params['func'] = 'remove_lun' fake_params['run_background'] = '1' fake_params['ha_sync'] = '1' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) delete_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', delete_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_map_lun( self, mock_request): """Test map lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.driver.api_executor.map_lun( 'fakeLunIndex', 'fakeTargetIndex') fake_params = {} fake_params['func'] = 'add_lun' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['targetIndex'] = 'fakeTargetIndex' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) map_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', map_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_map_lun_negative( self, mock_request): """Test map lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.map_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_map_lun_negative_with_wrong_result( self, mock_request): """Test map lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.map_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_disable_lun( self, mock_request): """Test disable lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.driver.api_executor.disable_lun( 'fakeLunIndex', 'fakeTargetIndex') fake_params = {} fake_params['func'] = 'edit_lun' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['targetIndex'] = 'fakeTargetIndex' fake_params['LUNEnable'] = 0 fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) unmap_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', unmap_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_disable_lun_negative( self, mock_request): """Test disable lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.disable_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_disable_lun_negative_with_wrong_result( self, mock_request): """Test disable lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.disable_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_unmap_lun( self, mock_request): """Test unmap lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.driver.api_executor.unmap_lun( 'fakeLunIndex', 'fakeTargetIndex') fake_params = {} fake_params['func'] = 'remove_lun' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['targetIndex'] = 'fakeTargetIndex' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) unmap_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', unmap_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_unmap_lun_negative( self, mock_request): """Test unmap lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.unmap_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_unmap_lun_negative_with_wrong_result( self, mock_request): """Test unmap lun.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.unmap_lun, 'fakeLunIndex', 'fakeTargetIndex') @mock.patch('requests.request') def test_remove_target_init( self, mock_request): """Test remove target init.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeTargetInfo()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.driver.api_executor.remove_target_init( 'fakeTargetIqn', 'fakeDefaultAcl') fake_params = {} fake_params['func'] = 'remove_init' fake_params['targetIQN'] = 'fakeTargetIqn' fake_params['initiatorIQN'] = 'fakeDefaultAcl' fake_params['ha_sync'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) remove_target_init_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', remove_target_init_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_remove_target_init_negative( self, mock_request): """Test remove target init.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.remove_target_init, 'fakeTargetIqn', 'fakeDefaultAcl') @mock.patch('requests.request') def test_remove_target_init_negative_with_wrong_result( self, mock_request): """Test remove target init.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeTargetInfoFail()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.remove_target_init, 'fakeTargetIqn', 'fakeDefaultAcl') @mock.patch('requests.request') def test_get_target_info( self, mock_request): """Test get get target info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeTargetInfo()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.driver.api_executor.get_target_info( 'fakeTargetIndex') fake_params = {} fake_params['func'] = 'extra_get' fake_params['targetInfo'] = 1 fake_params['targetIndex'] = 'fakeTargetIndex' fake_params['ha_sync'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_target_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_portal_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_target_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_target_info_negative( self, mock_request): """Test get get target info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_target_info, 'fakeTargetIndex') @mock.patch('requests.request') def test_get_target_info_negative_with_wrong_result( self, mock_request): """Test get get target info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeTargetInfoFail()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_target_info, 'fakeTargetIndex') @mock.patch('requests.request') def test_get_ethernet_ip_with_type( self, mock_request): """Test get ethernet ip.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeGetAllEthernetIp()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.driver.api_executor.get_ethernet_ip( type='data') fake_post_parm = 'sid=fakeSid&subfunc=net_setting' get_ethernet_ip_url = ( 'http://1.2.3.4:8080/cgi-bin/sys/sysRequest.cgi?' + fake_post_parm) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_ethernet_ip_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_ethernet_ip_negative(self, mock_request): """Test get ethernet ip.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_ethernet_ip, type='data') @mock.patch('requests.request') def test_get_snapshot_info( self, mock_request): """Test get snapshot info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeSnapshotInfoResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.driver.api_executor.get_snapshot_info( lun_index='fakeLunIndex', snapshot_name='fakeSnapshotName') fake_params = {} fake_params['func'] = 'extra_get' fake_params['LUNIndex'] = 'fakeLunIndex' fake_params['smb_snapshot_list'] = '1' fake_params['smb_snapshot'] = '1' fake_params['snapshot_list'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) get_snapshot_info_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/snapshot.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_snapshot_info_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_snapshot_info_negative( self, mock_request): """Test get snapshot info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_snapshot_info, lun_index='fakeLunIndex', snapshot_name='fakeSnapshotName') @mock.patch('requests.request') def test_get_snapshot_info_negative_with_wrong_result( self, mock_request): """Test get snapshot info.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeSnapshotInfoFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_snapshot_info, lun_index='fakeLunIndex', snapshot_name='fakeSnapshotName') @mock.patch('requests.request') def test_create_target( self, mock_request): """Test create target.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateTargetResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.driver.api_executor.create_target('fakeTargetName', 'sca') fake_params = {} fake_params['func'] = 'add_target' fake_params['targetName'] = 'fakeTargetName' fake_params['targetAlias'] = 'fakeTargetName' fake_params['bTargetDataDigest'] = '0' fake_params['bTargetHeaderDigest'] = '0' fake_params['bTargetClusterEnable'] = '1' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) create_target_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_target_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', create_target_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_create_target_negative( self, mock_request): """Test create target.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_target, 'fakeTargetName', 'sca') @mock.patch('requests.request') def test_create_target_negative_with_wrong_result( self, mock_request): """Test create target.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTsResponse(), FakeLoginResponse(), FakeCreateTargetFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Storage Pool 1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_target, 'fakeTargetName', 'sca') class QnapAPIExecutorTesTestCase(QnapDriverBaseTestCase): """Tests QnapAPIExecutorTES.""" @mock.patch('requests.request') def test_create_lun_positive_with_thin_allocate( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTesResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertEqual( 'fakeLunIndex', self.driver.api_executor.create_lun( fake_volume, 'fakepool', 'fakeLun', True, False, True, False)) fake_params = {} fake_params['func'] = 'add_lun' fake_params['FileIO'] = 'no' fake_params['LUNThinAllocate'] = '1' fake_params['LUNName'] = 'fakeLun' fake_params['LUNPath'] = 'fakeLun' fake_params['poolID'] = 'fakepool' fake_params['lv_ifssd'] = 'no' fake_params['compression'] = '1' fake_params['dedup'] = 'off' fake_params['sync'] = 'disabled' fake_params['LUNCapacity'] = 100 fake_params['lv_threshold'] = '80' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) create_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', create_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_create_lun_positive_without_thin_allocate( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTesResponse(), FakeLoginResponse(), FakeCreateLunResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertEqual( 'fakeLunIndex', self.driver.api_executor.create_lun( fake_volume, 'fakepool', 'fakeLun', False, False, True, False)) fake_params = {} fake_params['func'] = 'add_lun' fake_params['FileIO'] = 'no' fake_params['LUNThinAllocate'] = '0' fake_params['LUNName'] = 'fakeLun' fake_params['LUNPath'] = 'fakeLun' fake_params['poolID'] = 'fakepool' fake_params['lv_ifssd'] = 'no' fake_params['compression'] = '1' fake_params['dedup'] = 'off' fake_params['sync'] = 'disabled' fake_params['LUNCapacity'] = 100 fake_params['lv_threshold'] = '80' fake_params['sid'] = 'fakeSid' fake_post_params = self.sanitize(fake_params) create_lun_url = ( 'http://1.2.3.4:8080/cgi-bin/disk/iscsi_lun_setting.cgi?' + fake_post_params) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', create_lun_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_create_lun_negative( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTesResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_lun, fake_volume, 'fakepool', 'fakeLun', 'False', 'False', 'True', 'False') @mock.patch('requests.request') def test_create_lun_negative_with_wrong_result( self, mock_request): """Test create lun.""" fake_volume = VolumeClass( 'fakeDisplayName', 'fakeId', 100, 'fakeLunName') mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTesResponse(), FakeLoginResponse(), FakeCreateLunFailResponse()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.create_lun, fake_volume, 'fakepool', 'fakeLun', 'False', 'False', 'True', 'False') @mock.patch('requests.request') def test_get_ethernet_ip_with_type( self, mock_request): """Test get ehternet ip.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTesResponse(), FakeLoginResponse(), FakeGetAllEthernetIp()]) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.driver.api_executor.get_ethernet_ip( type='data') fake_post_parm = 'sid=fakeSid&subfunc=net_setting' get_ethernet_ip_url = ( 'http://1.2.3.4:8080/cgi-bin/sys/sysRequest.cgi?' + fake_post_parm) expected_call_list = [ mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', fake_get_basic_info_url, data=None, headers=None, verify=False), mock.call('POST', fake_login_url, data=global_sanitized_params, headers=header, verify=False), mock.call('GET', get_ethernet_ip_url, data=None, headers=None, verify=False)] self.assertEqual(expected_call_list, mock_request.call_args_list) @mock.patch('requests.request') def test_get_ethernet_ip_negative( self, mock_request): """Test get ethernet ip.""" mock_request.side_effect = ([ FakeLoginResponse(), FakeGetBasicInfoTesResponse(), FakeLoginResponse(), FakeNoAuthPassedResponse()] + [ FakeLoginResponse(), FakeNoAuthPassedResponse()] * 4) self.driver = qnap.QnapISCSIDriver( configuration=create_configuration( FAKE_USER, FAKE_PASSWORD, 'http://1.2.3.4:8080', '1.2.3.4', 'Pool1', True)) self.driver.do_setup('context') self.assertRaises(exception.VolumeBackendAPIException, self.driver.api_executor.get_ethernet_ip, type='data') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_quobyte.py0000664000175000017500000021054500000000000024551 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Quobyte Inc. # Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for the Quobyte driver module.""" import errno from io import StringIO import os import shutil import traceback from unittest import mock import ddt from oslo_concurrency import processutils as putils from oslo_utils import fileutils from oslo_utils import imageutils from oslo_utils import units from cinder import context from cinder import db from cinder import exception from cinder.image import image_utils from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers import quobyte from cinder.volume.drivers import remotefs class FakeDb(object): msg = "Tests are broken: mock this out." def volume_get(self, *a, **kw): raise Exception(self.msg) def snapshot_get_all_for_volume(self, *a, **kw): """Mock this if you want results from it.""" return [] def volume_get_all(self, *a, **kw): return [] @ddt.ddt class QuobyteDriverTestCase(test.TestCase): """Test case for Quobyte driver.""" TEST_QUOBYTE_VOLUME = 'quobyte://quobyte-host/openstack-volumes' TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL = 'quobyte-host/openstack-volumes' TEST_SIZE_IN_GB = 1 TEST_MNT_HASH = "1331538734b757ed52d0e18c0a7210cd" TEST_MNT_POINT_BASE = '/fake-mnt' TEST_MNT_POINT = os.path.join(TEST_MNT_POINT_BASE, TEST_MNT_HASH) TEST_FILE_NAME = 'test.txt' TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf' TEST_TMP_FILE = '/tmp/tempfile' VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab' SNAP_UUID = 'bacadaca-baca-daca-baca-dacadacadaca' SNAP_UUID_2 = 'bebedede-bebe-dede-bebe-dedebebedede' CACHE_NAME = quobyte.QuobyteDriver.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME def _get_fake_snapshot(self, src_volume): snapshot = fake_snapshot.fake_snapshot_obj( self.context, volume_name=src_volume.name, display_name='clone-snap-%s' % src_volume.id, size=src_volume.size, volume_size=src_volume.size, volume_id=src_volume.id, id=self.SNAP_UUID) snapshot.volume = src_volume return snapshot def setUp(self): super(QuobyteDriverTestCase, self).setUp() self._configuration = mock.Mock(conf.Configuration) self._configuration.append_config_values(mock.ANY) self._configuration.quobyte_volume_url = \ self.TEST_QUOBYTE_VOLUME self._configuration.quobyte_client_cfg = None self._configuration.quobyte_sparsed_volumes = True self._configuration.quobyte_qcow2_volumes = False self._configuration.quobyte_mount_point_base = \ self.TEST_MNT_POINT_BASE self._configuration.nas_secure_file_operations = "true" self._configuration.nas_secure_file_permissions = "true" self._configuration.quobyte_volume_from_snapshot_cache = False self._configuration.quobyte_overlay_volumes = False self._driver = quobyte.QuobyteDriver(configuration=self._configuration) self._driver.shares = {} self._driver.set_nas_security_options(is_new_cinder_install=False) self._driver.base = self._configuration.quobyte_mount_point_base self.context = context.get_admin_context() def assertRaisesAndMessageMatches( self, excClass, msg, callableObj, *args, **kwargs): """Ensure that the specified exception was raised. """ caught = False try: callableObj(*args, **kwargs) except Exception as exc: caught = True self.assertIsInstance(exc, excClass, 'Wrong exception caught: %s Stacktrace: %s' % (exc, traceback.format_exc())) self.assertIn(msg, str(exc)) if not caught: self.fail('Expected raised exception but nothing caught.') def get_mock_partitions(self): mypart = mock.Mock() mypart.device = "quobyte@" mypart.mountpoint = self.TEST_MNT_POINT return [mypart] @mock.patch.object(os, "symlink") def test__create_overlay_volume_from_snapshot(self, os_sl_mock): drv = self._driver drv._execute = mock.Mock() vol = self._simple_volume() snap = self._get_fake_snapshot(vol) r_path = os.path.join(drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME, snap.id) vol_path = drv._local_path_volume(vol) drv._create_overlay_volume_from_snapshot(vol, snap, 1, "qcow2") drv._execute.assert_called_once_with( 'qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s,backing_fmt=qcow2' % (r_path), vol_path, "1G", run_as_root=drv._execute_as_root) os_sl_mock.assert_called_once_with( drv.local_path(vol), drv._local_volume_from_snap_cache_path(snap) + '.child-' + vol.id) def test__create_regular_file(self): with mock.patch.object(self._driver, "_execute") as qb_exec_mock: tmp_path = "/path/for/test" test_size = 1 self._driver._create_regular_file(tmp_path, test_size) qb_exec_mock.assert_called_once_with( 'fallocate', '-l', '%sGiB' % test_size, tmp_path, run_as_root=self._driver._execute_as_root) @mock.patch.object(os, "makedirs") @mock.patch.object(os.path, "join", return_value="dummy_path") @mock.patch.object(os, "access", return_value=True) def test__ensure_volume_cache_ok(self, os_access_mock, os_join_mock, os_makedirs_mock): tmp_path = "/some/random/path" self._driver._ensure_volume_from_snap_cache(tmp_path) calls = [mock.call("dummy_path", os.F_OK), mock.call("dummy_path", os.R_OK), mock.call("dummy_path", os.W_OK), mock.call("dummy_path", os.X_OK)] os_access_mock.assert_has_calls(calls) os_join_mock.assert_called_once_with( tmp_path, self._driver.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME) self.assertFalse(os_makedirs_mock.called) @mock.patch.object(fileutils, "ensure_tree") @mock.patch.object(os.path, "join", return_value="dummy_path") @mock.patch.object(os, "access", return_value=True) def test__ensure_volume_cache_create(self, os_access_mock, os_join_mock, os_makedirs_mock): tmp_path = "/some/random/path" os_access_mock.side_effect = [False, True, True, True] self._driver._ensure_volume_from_snap_cache(tmp_path) calls = [mock.call("dummy_path", os.F_OK), mock.call("dummy_path", os.R_OK), mock.call("dummy_path", os.W_OK), mock.call("dummy_path", os.X_OK)] os_access_mock.assert_has_calls(calls) os_join_mock.assert_called_once_with( tmp_path, self._driver.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME) os_makedirs_mock.assert_called_once_with("dummy_path") @mock.patch.object(os, "makedirs") @mock.patch.object(os.path, "join", return_value="dummy_path") @mock.patch.object(os, "access", return_value=True) def test__ensure_volume_cache_error(self, os_access_mock, os_join_mock, os_makedirs_mock): tmp_path = "/some/random/path" os_access_mock.side_effect = [True, False, False, False] self.assertRaises( exception.VolumeDriverException, self._driver._ensure_volume_from_snap_cache, tmp_path) calls = [mock.call("dummy_path", os.F_OK), mock.call("dummy_path", os.R_OK)] os_access_mock.assert_has_calls(calls) os_join_mock.assert_called_once_with( tmp_path, self._driver.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME) self.assertFalse(os_makedirs_mock.called) @mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, "_get_backing_chain_for_path") @ddt.data( [[], []], [[{'filename': "A"}, {'filename': CACHE_NAME}], [{'filename': "A"}]], [[{'filename': "A"}, {'filename': "B"}], [{'filename': "A"}, {'filename': "B"}]] ) @ddt.unpack def test__get_backing_chain_for_path(self, test_chain, result_chain, rfs_chain_mock): drv = self._driver rfs_chain_mock.return_value = test_chain result = drv._get_backing_chain_for_path("foo", "bar") self.assertEqual(result_chain, result) @mock.patch.object(image_utils, 'qemu_img_info') @mock.patch('os.path.basename') def _test__qemu_img_info(self, mock_basename, mock_qemu_img_info, backing_file, base_dir, valid_backing_file=True): drv = self._driver drv._execute_as_root = True fake_vol_name = "volume-" + self.VOLUME_UUID mock_info = mock_qemu_img_info.return_value mock_info.image = mock.sentinel.image_path mock_info.backing_file = backing_file drv._VALID_IMAGE_EXTENSIONS = ['raw', 'qcow2'] mock_basename.side_effect = [mock.sentinel.image_basename, mock.sentinel.backing_file_basename] if valid_backing_file: img_info = drv._qemu_img_info_base( mock.sentinel.image_path, fake_vol_name, base_dir) self.assertEqual(mock_info, img_info) self.assertEqual(mock.sentinel.image_basename, mock_info.image) expected_basename_calls = [mock.call(mock.sentinel.image_path)] if backing_file: self.assertEqual(mock.sentinel.backing_file_basename, mock_info.backing_file) expected_basename_calls.append(mock.call(backing_file)) mock_basename.assert_has_calls(expected_basename_calls) else: self.assertRaises(exception.RemoteFSInvalidBackingFile, drv._qemu_img_info_base, mock.sentinel.image_path, fake_vol_name, base_dir) mock_qemu_img_info.assert_called_with(mock.sentinel.image_path, force_share=True, run_as_root=True, allow_qcow2_backing_file=True) @ddt.data(['/other_random_path', '/mnt'], ['/other_basedir/' + TEST_MNT_HASH + '/volume-' + VOLUME_UUID, '/fake_basedir'], ['/mnt/invalid_hash/volume-' + VOLUME_UUID, '/mnt'], ['/mnt/' + TEST_MNT_HASH + '/invalid_vol_name', '/mnt'], ['/mnt/' + TEST_MNT_HASH + '/volume-' + VOLUME_UUID + '.info', '/fake_basedir'], ['/mnt/' + TEST_MNT_HASH + '/volume-' + VOLUME_UUID + '.random-suffix', '/mnt'], ['/mnt/' + TEST_MNT_HASH + '/volume-' + VOLUME_UUID + '.invalidext', '/mnt']) @ddt.unpack def test__qemu_img_info_invalid_backing_file(self, backing_file, basedir): self._test__qemu_img_info(backing_file=backing_file, base_dir=basedir, valid_backing_file=False) @ddt.data([None, '/mnt'], ['/mnt/' + TEST_MNT_HASH + '/volume-' + VOLUME_UUID, '/mnt'], ['/mnt/' + TEST_MNT_HASH + '/volume-' + VOLUME_UUID + '.qcow2', '/mnt'], ['/mnt/' + TEST_MNT_HASH + '/volume-' + VOLUME_UUID + '.404f-404', '/mnt'], ['/mnt/' + TEST_MNT_HASH + '/volume-' + VOLUME_UUID + '.tmp-snap-404f-404', '/mnt']) @ddt.unpack def test__qemu_img_info_valid_backing_file(self, backing_file, basedir): self._test__qemu_img_info(backing_file=backing_file, base_dir=basedir) @ddt.data(['/mnt/' + TEST_MNT_HASH + '/' + CACHE_NAME + '/' + VOLUME_UUID, '/mnt'], ['/mnt/' + TEST_MNT_HASH + '/' + CACHE_NAME + '/' + VOLUME_UUID + '.child-aaaaa', '/mnt'], ['/mnt/' + TEST_MNT_HASH + '/' + CACHE_NAME + '/' + VOLUME_UUID + '.parent-bbbbbb', '/mnt'], ['/mnt/' + TEST_MNT_HASH + '/' + CACHE_NAME + '/tmp-snap-' + VOLUME_UUID, '/mnt']) @ddt.unpack def test__qemu_img_info_valid_cache_backing_file(self, backing_file, basedir): self._test__qemu_img_info(backing_file=backing_file, base_dir=basedir) @mock.patch.object(os, "listdir", return_value=["fake_vol"]) @mock.patch.object(fileutils, "delete_if_exists") def test__remove_from_vol_cache_no_refs(self, fu_die_mock, os_list_mock): drv = self._driver volume = self._simple_volume() cache_path = drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME + "/fake_vol" suf = ".test_suffix" drv._remove_from_vol_cache(cache_path, suf, volume) fu_die_mock.assert_has_calls([ mock.call(os.path.join(drv._local_volume_dir(volume), drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME, "fake_vol.test_suffix")), mock.call(os.path.join(drv._local_volume_dir(volume), drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME, "fake_vol"))]) os_list_mock.assert_called_once_with(os.path.join( drv._local_volume_dir(volume), drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME)) @mock.patch.object(os, "listdir", return_value=["fake_vol", "fake_vol.more_ref"]) @mock.patch.object(fileutils, "delete_if_exists") def test__remove_from_vol_cache_with_refs(self, fu_die_mock, os_list_mock): drv = self._driver volume = self._simple_volume() cache_path = drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME + "/fake_vol" suf = ".test_suffix" drv._remove_from_vol_cache(cache_path, suf, volume) fu_die_mock.assert_called_once_with( os.path.join(drv._local_volume_dir(volume), drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME, "fake_vol.test_suffix")) os_list_mock.assert_called_once_with(os.path.join( drv._local_volume_dir(volume), drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME)) def test_local_path(self): """local_path common use case.""" drv = self._driver vol_id = self.VOLUME_UUID volume = self._simple_volume(_name_id=vol_id) self.assertEqual( os.path.join(self.TEST_MNT_POINT, 'volume-%s' % vol_id), drv.local_path(volume)) def test_mount_quobyte_should_mount_correctly(self): with mock.patch.object(self._driver, '_execute') as mock_execute, \ mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '.read_proc_mount') as mock_open, \ mock.patch('oslo_utils.fileutils.ensure_tree') as mock_mkdir, \ mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '._validate_volume') as mock_validate: # Content of /proc/mount (not mounted yet). mock_open.return_value = StringIO( "/dev/sda5 / ext4 rw,relatime,data=ordered 0 0") self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT) mock_mkdir.assert_called_once_with(self.TEST_MNT_POINT) mount_call = mock.call( 'mount.quobyte', '--disable-xattrs', self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT, run_as_root=False) mock_execute.assert_has_calls( [mount_call], any_order=False) mock_validate.assert_called_once_with(self.TEST_MNT_POINT) def test_mount_quobyte_already_mounted_detected_seen_in_proc_mount(self): with mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '.read_proc_mount') as mock_open, \ mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '._validate_volume') as mock_validate: # Content of /proc/mount (already mounted). mock_open.return_value = StringIO( "quobyte@%s %s fuse rw,nosuid,nodev,noatime,user_id=1000" ",group_id=100,default_permissions,allow_other 0 0" % (self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT)) self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT) mock_validate.assert_called_once_with(self.TEST_MNT_POINT) def test_mount_quobyte_should_suppress_already_mounted_error(self): """test_mount_quobyte_should_suppress_already_mounted_error Based on /proc/mount, the file system is not mounted yet. However, mount.quobyte returns with an 'already mounted' error. This is a last-resort safe-guard in case /proc/mount parsing was not successful. Because _mount_quobyte gets called with ensure=True, the error will be suppressed instead. """ with mock.patch.object(self._driver, '_execute') as mock_execute, \ mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '.read_proc_mount') as mock_open, \ mock.patch('oslo_utils.fileutils.ensure_tree') as mock_mkdir, \ mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '._validate_volume') as mock_validate: # Content of /proc/mount (empty). mock_open.return_value = StringIO() mock_execute.side_effect = [None, putils.ProcessExecutionError( stderr='is busy or already mounted')] self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT, ensure=True) mock_mkdir.assert_called_once_with(self.TEST_MNT_POINT) mount_call = mock.call( 'mount.quobyte', '--disable-xattrs', self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT, run_as_root=False) mock_execute.assert_has_calls([mount_call], any_order=False) mock_validate.assert_called_once_with(self.TEST_MNT_POINT) @mock.patch.object(quobyte, 'psutil') def test_mount_quobyte_should_reraise_already_mounted_error(self, ps_mock): """test_mount_quobyte_should_reraise_already_mounted_error Like test_mount_quobyte_should_suppress_already_mounted_error but with ensure=False. """ part_mock = ps_mock.disk_partitions part_mock.return_value = [] # no quobyte@ devices with mock.patch.object(self._driver, '_execute') as mock_execute, \ mock.patch('oslo_utils.fileutils.ensure_tree') as mock_mkdir, \ mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver' '.read_proc_mount') as mock_open: mock_open.return_value = StringIO() mock_execute.side_effect = [ None, # mkdir putils.ProcessExecutionError( # mount stderr='is busy or already mounted')] self.assertRaises(exception.VolumeDriverException, self._driver._mount_quobyte, self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT, ensure=False) mock_mkdir.assert_called_once_with(self.TEST_MNT_POINT) mount_call = mock.call( 'mount.quobyte', '--disable-xattrs', self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT, run_as_root=False) mock_execute.assert_has_calls([mount_call], any_order=False) def test_get_hash_str(self): """_get_hash_str should calculation correct value.""" drv = self._driver self.assertEqual(self.TEST_MNT_HASH, drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) def test_get_available_capacity_with_df(self): """_get_available_capacity should calculate correct value.""" drv = self._driver df_total_size = 2620544 df_avail = 1490560 df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n' df_data = 'quobyte@%s %d 996864 %d 41%% %s' % \ (self.TEST_QUOBYTE_VOLUME, df_total_size, df_avail, self.TEST_MNT_POINT) df_output = df_head + df_data drv._get_mount_point_for_share = mock.Mock(return_value=self. TEST_MNT_POINT) drv._execute = mock.Mock(return_value=(df_output, None)) self.assertEqual((df_avail, df_total_size), drv._get_available_capacity(self.TEST_QUOBYTE_VOLUME)) (drv._get_mount_point_for_share. assert_called_once_with(self.TEST_QUOBYTE_VOLUME)) (drv._execute. assert_called_once_with('df', '--portability', '--block-size', '1', self.TEST_MNT_POINT, run_as_root=self._driver._execute_as_root)) def test_get_capacity_info(self): with mock.patch.object(self._driver, '_get_available_capacity') \ as mock_get_available_capacity: drv = self._driver df_size = 2620544 df_avail = 1490560 mock_get_available_capacity.return_value = (df_avail, df_size) size, available, used = drv._get_capacity_info(mock.ANY) mock_get_available_capacity.assert_called_once_with(mock.ANY) self.assertEqual(df_size, size) self.assertEqual(df_avail, available) self.assertEqual(size - available, used) def test_load_shares_config(self): """_load_shares_config takes the Volume URL and strips quobyte://.""" drv = self._driver drv._load_shares_config() self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, drv.shares) def test_load_shares_config_without_protocol(self): """Same as test_load_shares_config, but URL is without quobyte://.""" drv = self._driver drv.configuration.quobyte_volume_url = \ self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL drv._load_shares_config() self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, drv.shares) def test_ensure_share_mounted(self): """_ensure_share_mounted simple use case.""" with mock.patch.object(self._driver, '_get_mount_point_for_share') as \ mock_get_mount_point, \ mock.patch.object(self._driver, '_mount_quobyte') as \ mock_mount: drv = self._driver drv._ensure_share_mounted(self.TEST_QUOBYTE_VOLUME) mock_get_mount_point.assert_called_once_with( self.TEST_QUOBYTE_VOLUME) mock_mount.assert_called_once_with( self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, mock_get_mount_point.return_value, ensure=True) def test_ensure_shares_mounted_should_save_mounting_successfully(self): """_ensure_shares_mounted should save share if mounted with success.""" with mock.patch.object(self._driver, '_ensure_share_mounted') \ as mock_ensure_share_mounted: drv = self._driver drv._ensure_shares_mounted() mock_ensure_share_mounted.assert_called_once_with( self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL) self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, drv._mounted_shares) def test_ensure_shares_mounted_should_not_save_mounting_with_error(self): """_ensure_shares_mounted should not save if mount raised an error.""" with mock.patch.object(self._driver, '_ensure_share_mounted') \ as mock_ensure_share_mounted: drv = self._driver mock_ensure_share_mounted.side_effect = Exception() drv._ensure_shares_mounted() mock_ensure_share_mounted.assert_called_once_with( self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL) self.assertEqual(1, len(drv.shares)) self.assertEqual(0, len(drv._mounted_shares)) @mock.patch.object(quobyte.QuobyteDriver, "set_nas_security_options") def test_do_setup(self, qb_snso_mock): """do_setup runs successfully.""" drv = self._driver drv.do_setup(mock.create_autospec(context.RequestContext)) qb_snso_mock.assert_called_once_with(is_new_cinder_install=mock.ANY) @mock.patch.object(quobyte.QuobyteDriver, "set_nas_security_options") def test_do_setup_overlay(self, qb_snso_mock): """do_setup runs successfully.""" drv = self._driver drv.configuration.quobyte_qcow2_volumes = True drv.configuration.quobyte_overlay_volumes = True drv.configuration.quobyte_volume_from_snapshot_cache = True drv.do_setup(mock.create_autospec(context.RequestContext)) qb_snso_mock.assert_called_once_with(is_new_cinder_install=mock.ANY) self.assertTrue(drv.configuration.quobyte_overlay_volumes) @mock.patch.object(quobyte.QuobyteDriver, "set_nas_security_options") def test_do_setup_no_overlay(self, qb_snso_mock): """do_setup runs successfully.""" drv = self._driver drv.configuration.quobyte_overlay_volumes = True drv.configuration.quobyte_volume_from_snapshot_cache = True drv.configuration.quobyte_qcow2_volumes = False drv.do_setup(mock.create_autospec(context.RequestContext)) qb_snso_mock.assert_called_once_with(is_new_cinder_install=mock.ANY) self.assertFalse(drv.configuration.quobyte_overlay_volumes) @mock.patch.object(quobyte, 'psutil', new=None) def test_check_for_setup_error_throws_psutil_missing(self): """check_for_setup_error raises if psutil not installed.""" drv = self._driver e = self.assertRaises(exception.VolumeDriverException, drv.check_for_setup_error) self.assertIn("psutil", str(e)) @mock.patch.object(quobyte, 'psutil') def test_check_for_setup_error_throws_quobyte_volume_url_not_set( self, mock_psutil): """check_for_setup_error throws if 'quobyte_volume_url' is not set.""" drv = self._driver drv.configuration.quobyte_volume_url = None self.assertRaisesAndMessageMatches(exception.VolumeDriverException, 'no Quobyte volume configured', drv.check_for_setup_error) @mock.patch.object(quobyte, 'psutil') def test_check_for_setup_error_throws_client_not_installed( self, mock_psutil): """check_for_setup_error throws if client is not installed.""" drv = self._driver drv._execute = mock.Mock(side_effect=OSError (errno.ENOENT, 'No such file or directory')) self.assertRaisesAndMessageMatches(exception.VolumeDriverException, 'mount.quobyte is not installed', drv.check_for_setup_error) drv._execute.assert_called_once_with('mount.quobyte', check_exit_code=False, run_as_root=False) @mock.patch.object(quobyte, 'psutil') def test_check_for_setup_error_throws_client_not_executable( self, mock_psutil): """check_for_setup_error throws if client cannot be executed.""" drv = self._driver drv._execute = mock.Mock(side_effect=OSError (errno.EPERM, 'Operation not permitted')) self.assertRaisesAndMessageMatches(OSError, 'Operation not permitted', drv.check_for_setup_error) drv._execute.assert_called_once_with('mount.quobyte', check_exit_code=False, run_as_root=False) def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self): """_find_share should throw error if there is no mounted share.""" drv = self._driver drv._mounted_shares = [] self.assertRaises(exception.NotFound, drv._find_share, self._simple_volume()) def test_find_share(self): """_find_share simple use case.""" drv = self._driver drv._mounted_shares = [self.TEST_QUOBYTE_VOLUME] self.assertEqual(self.TEST_QUOBYTE_VOLUME, drv._find_share(self._simple_volume())) def test_find_share_does_not_throw_error_if_there_isnt_enough_space(self): """_find_share intentionally does not throw when no space is left.""" with mock.patch.object(self._driver, '_get_available_capacity') \ as mock_get_available_capacity: drv = self._driver df_size = 2620544 df_avail = 0 mock_get_available_capacity.return_value = (df_avail, df_size) drv._mounted_shares = [self.TEST_QUOBYTE_VOLUME] self.assertEqual(self.TEST_QUOBYTE_VOLUME, drv._find_share(self._simple_volume())) # The current implementation does not call _get_available_capacity. # Future ones might do and therefore we mocked it. self.assertGreaterEqual(mock_get_available_capacity.call_count, 0) def _simple_volume(self, **kwargs): updates = {'id': self.VOLUME_UUID, 'provider_location': self.TEST_QUOBYTE_VOLUME, 'display_name': 'volume-%s' % self.VOLUME_UUID, 'name': 'volume-%s' % self.VOLUME_UUID, 'size': 10, 'status': 'available'} updates.update(kwargs) if 'display_name' not in updates: updates['display_name'] = 'volume-%s' % updates['id'] return fake_volume.fake_volume_obj(self.context, **updates) def test_create_sparsed_volume(self): drv = self._driver volume = self._simple_volume() drv._create_sparsed_file = mock.Mock() drv._set_rw_permissions_for_all = mock.Mock() drv._do_create_volume(volume) drv._create_sparsed_file.assert_called_once_with(mock.ANY, mock.ANY) drv._set_rw_permissions_for_all.assert_called_once_with(mock.ANY) def test_create_nonsparsed_volume(self): drv = self._driver volume = self._simple_volume() old_value = self._configuration.quobyte_sparsed_volumes self._configuration.quobyte_sparsed_volumes = False drv._create_regular_file = mock.Mock() drv._set_rw_permissions_for_all = mock.Mock() drv._do_create_volume(volume) drv._create_regular_file.assert_called_once_with(mock.ANY, mock.ANY) drv._set_rw_permissions_for_all.assert_called_once_with(mock.ANY) self._configuration.quobyte_sparsed_volumes = old_value def test_create_qcow2_volume(self): drv = self._driver volume = self._simple_volume() old_value = self._configuration.quobyte_qcow2_volumes self._configuration.quobyte_qcow2_volumes = True drv._execute = mock.Mock() hashed = drv._get_hash_str(volume['provider_location']) path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, hashed, self.VOLUME_UUID) drv._do_create_volume(volume) assert_calls = [mock.call('qemu-img', 'create', '-f', 'qcow2', '-o', 'preallocation=metadata', path, str(volume['size'] * units.Gi), run_as_root=self._driver._execute_as_root), mock.call('chmod', 'ugo+rw', path, run_as_root=self._driver._execute_as_root)] drv._execute.assert_has_calls(assert_calls) self._configuration.quobyte_qcow2_volumes = old_value def test_create_volume_should_ensure_quobyte_mounted(self): """create_volume ensures shares provided in config are mounted.""" drv = self._driver drv.LOG = mock.Mock() drv._find_share = mock.Mock() drv._find_share.return_value = self.TEST_QUOBYTE_VOLUME drv._do_create_volume = mock.Mock() drv._ensure_shares_mounted = mock.Mock() volume = self._simple_volume(size=self.TEST_SIZE_IN_GB) drv.create_volume(volume) drv._find_share.assert_called_once_with(mock.ANY) drv._do_create_volume.assert_called_once_with(volume) drv._ensure_shares_mounted.assert_called_once_with() def test_create_volume_should_return_provider_location(self): """create_volume should return provider_location with found share.""" drv = self._driver drv.LOG = mock.Mock() drv._ensure_shares_mounted = mock.Mock() drv._do_create_volume = mock.Mock() drv._find_share = mock.Mock(return_value=self.TEST_QUOBYTE_VOLUME) volume = self._simple_volume(size=self.TEST_SIZE_IN_GB) result = drv.create_volume(volume) self.assertEqual(self.TEST_QUOBYTE_VOLUME, result['provider_location']) drv._do_create_volume.assert_called_once_with(volume) drv._ensure_shares_mounted.assert_called_once_with() drv._find_share.assert_called_once_with(volume) @mock.patch('oslo_utils.fileutils.delete_if_exists') def test_delete_volume(self, mock_delete_if_exists): volume = self._simple_volume() volume_filename = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename) info_file = volume_path + '.info' with mock.patch.object(self._driver, '_ensure_share_mounted') as \ mock_ensure_share_mounted, \ mock.patch.object(self._driver, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(self._driver, 'get_active_image_from_info') as \ mock_active_image_from_info, \ mock.patch.object(self._driver, '_execute') as \ mock_execute, \ mock.patch.object(self._driver, '_local_path_volume') as \ mock_local_path_volume, \ mock.patch.object(self._driver, '_local_path_volume_info') as \ mock_local_path_volume_info: self._driver._qemu_img_info = mock.Mock() self._driver._qemu_img_info.return_value = mock.Mock() self._driver._qemu_img_info.return_value.backing_file = None mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_active_image_from_info.return_value = volume_filename mock_local_path_volume.return_value = volume_path mock_local_path_volume_info.return_value = info_file self._driver.delete_volume(volume) mock_ensure_share_mounted.assert_called_once_with( volume['provider_location']) mock_local_volume_dir.assert_called_once_with(volume) mock_active_image_from_info.assert_called_once_with(volume) mock_execute.assert_called_once_with('rm', '-f', volume_path, run_as_root= self._driver._execute_as_root) mock_local_path_volume_info.assert_called_once_with(volume) mock_local_path_volume.assert_called_once_with(volume) mock_delete_if_exists.assert_any_call(volume_path) mock_delete_if_exists.assert_any_call(info_file) @mock.patch.object(os, 'access', return_value=True) @mock.patch('oslo_utils.fileutils.delete_if_exists') def test_delete_volume_backing_file(self, mock_delete_if_exists, os_acc_mock): drv = self._driver volume = self._simple_volume() volume_filename = 'volume-%s' % self.VOLUME_UUID volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename) info_file = volume_path + '.info' drv._ensure_share_mounted = mock.Mock() drv._local_volume_dir = mock.Mock() drv._local_volume_dir.return_value = self.TEST_MNT_POINT drv.get_active_image_from_info = mock.Mock() drv.get_active_image_from_info.return_value = volume_filename drv._qemu_img_info = mock.Mock() drv._qemu_img_info.return_value = mock.Mock() drv._qemu_img_info.return_value.backing_file = os.path.join( drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME, "cached_volume_file") drv._remove_from_vol_cache = mock.Mock() drv._execute = mock.Mock() drv._local_path_volume = mock.Mock() drv._local_path_volume.return_value = volume_path drv._local_path_volume_info = mock.Mock() drv._local_path_volume_info.return_value = info_file drv.delete_volume(volume) drv._ensure_share_mounted.assert_called_once_with( volume['provider_location']) drv._local_volume_dir.assert_called_once_with(volume) drv.get_active_image_from_info.assert_called_once_with(volume) drv._qemu_img_info.assert_called_once_with( drv.local_path(volume), drv.get_active_image_from_info()) drv._remove_from_vol_cache.assert_called_once_with( drv._qemu_img_info().backing_file, ".child-" + volume.id, volume) drv._execute.assert_called_once_with('rm', '-f', volume_path, run_as_root= self._driver._execute_as_root) drv._local_path_volume.assert_called_once_with(volume) drv._local_path_volume_info.assert_called_once_with(volume) mock_delete_if_exists.assert_any_call(volume_path) mock_delete_if_exists.assert_any_call(info_file) os_acc_mock.assert_called_once_with(drv._local_path_volume(volume), os.F_OK) @mock.patch.object(os, 'access', return_value=True) def test_delete_should_ensure_share_mounted(self, os_acc_mock): """delete_volume should ensure that corresponding share is mounted.""" drv = self._driver drv._execute = mock.Mock() drv._qemu_img_info = mock.Mock() drv._qemu_img_info.return_value = mock.Mock() drv._qemu_img_info.return_value.backing_file = "/virtual/test/file" volume = self._simple_volume(display_name='volume-123') drv._ensure_share_mounted = mock.Mock() drv._remove_from_vol_cache = mock.Mock() drv.delete_volume(volume) (drv._ensure_share_mounted. assert_called_once_with(self.TEST_QUOBYTE_VOLUME)) drv._qemu_img_info.assert_called_once_with( drv._local_path_volume(volume), drv.get_active_image_from_info(volume)) # backing file is not in cache, no cache cleanup: self.assertFalse(drv._remove_from_vol_cache.called) drv._execute.assert_called_once_with('rm', '-f', drv.local_path(volume), run_as_root=False) os_acc_mock.assert_called_once_with(drv._local_path_volume(volume), os.F_OK) def test_delete_should_not_delete_if_provider_location_not_provided(self): """delete_volume shouldn't delete if provider_location missed.""" drv = self._driver drv._ensure_share_mounted = mock.Mock() drv._execute = mock.Mock() volume = self._simple_volume(display_name='volume-123', provider_location=None) drv.delete_volume(volume) drv._ensure_share_mounted.assert_not_called() drv._execute.assert_not_called() @ddt.data(True, False) @mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, "_is_volume_attached") def test_extend_volume(self, is_attached, mock_remote_attached): drv = self._driver volume = self._simple_volume() volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE, drv._get_hash_str( self.TEST_QUOBYTE_VOLUME), self.VOLUME_UUID) qemu_img_info_output = """{ "filename": "volume-%s", "format": "qcow2", "virtual-size": 1073741824, "actual-size": 473000 }""" % self.VOLUME_UUID img_info = imageutils.QemuImgInfo(qemu_img_info_output, format='json') self.mock_object(image_utils, 'qemu_img_info', return_value=img_info) self.mock_object(image_utils, 'resize_image') mock_remote_attached.return_value = is_attached if is_attached: self.assertRaises(exception.ExtendVolumeError, drv.extend_volume, volume, 3) else: drv.extend_volume(volume, 3) image_utils.qemu_img_info.assert_called_once_with( volume_path, force_share=True, run_as_root=False, allow_qcow2_backing_file=True) image_utils.resize_image.assert_called_once_with(volume_path, 3) def test_copy_volume_from_snapshot(self): drv = self._driver # lots of test vars to be prepared at first dest_volume = self._simple_volume( id='c1073000-0000-0000-0000-0000000c1073') src_volume = self._simple_volume() vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) src_vol_path = os.path.join(vol_dir, src_volume['name']) dest_vol_path = os.path.join(vol_dir, dest_volume['name']) info_path = os.path.join(vol_dir, src_volume['name']) + '.info' snapshot = self._get_fake_snapshot(src_volume) snap_file = dest_volume['name'] + '.' + snapshot['id'] snap_path = os.path.join(vol_dir, snap_file) size = dest_volume['size'] qemu_img_output = """{ "filename": "%s", "format": "raw", "virtual-size": 1073741824, "actual-size": 173000, "backing-filename": "%s" }""" % (snap_file, src_volume['name']) img_info = imageutils.QemuImgInfo(qemu_img_output, format='json') # mocking and testing starts here mock_convert = self.mock_object(image_utils, 'convert_image') drv._read_info_file = mock.Mock(return_value= {'active': snap_file, snapshot['id']: snap_file}) self.mock_object(image_utils, 'qemu_img_info', return_value=img_info) drv._set_rw_permissions = mock.Mock() drv._copy_volume_from_snapshot(snapshot, dest_volume, size) drv._read_info_file.assert_called_once_with(info_path) image_utils.qemu_img_info.assert_called_once_with( snap_path, force_share=True, run_as_root=False, allow_qcow2_backing_file=True) (mock_convert. assert_called_once_with(src_vol_path, dest_vol_path, 'raw', run_as_root=self._driver._execute_as_root)) drv._set_rw_permissions.assert_called_once_with(dest_vol_path) @mock.patch.object(quobyte.QuobyteDriver, "_fallocate_file") @mock.patch.object(os, "access", return_value=True) def test_copy_volume_from_snapshot_cached(self, os_ac_mock, qb_falloc_mock): drv = self._driver drv.configuration.quobyte_volume_from_snapshot_cache = True # lots of test vars to be prepared at first dest_volume = self._simple_volume( id='c1073000-0000-0000-0000-0000000c1073') src_volume = self._simple_volume() vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) dest_vol_path = os.path.join(vol_dir, dest_volume['name']) info_path = os.path.join(vol_dir, src_volume['name']) + '.info' snapshot = self._get_fake_snapshot(src_volume) snap_file = dest_volume['name'] + '.' + snapshot['id'] snap_path = os.path.join(vol_dir, snap_file) cache_path = os.path.join(vol_dir, drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME, snapshot['id']) size = dest_volume['size'] qemu_img_output = """{ "filename": "%s", "format": "raw", "virtual-size": 1073741824, "actual-size": 173000, "backing-filename": "%s" }""" % (snap_file, src_volume['name']) img_info = imageutils.QemuImgInfo(qemu_img_output, format='json') # mocking and testing starts here mock_convert = self.mock_object(image_utils, 'convert_image') drv._read_info_file = mock.Mock(return_value= {'active': snap_file, snapshot['id']: snap_file}) self.mock_object(image_utils, 'qemu_img_info', return_value=img_info) drv._set_rw_permissions = mock.Mock() shutil.copyfile = mock.Mock() drv._copy_volume_from_snapshot(snapshot, dest_volume, size) drv._read_info_file.assert_called_once_with(info_path) image_utils.qemu_img_info.assert_called_once_with( snap_path, force_share=True, run_as_root=False, allow_qcow2_backing_file=True) self.assertFalse(mock_convert.called, ("_convert_image was called but should not have been") ) os_ac_mock.assert_called_once_with( drv._local_volume_from_snap_cache_path(snapshot), os.F_OK) qb_falloc_mock.assert_called_once_with(dest_vol_path, size) shutil.copyfile.assert_called_once_with(cache_path, dest_vol_path) drv._set_rw_permissions.assert_called_once_with(dest_vol_path) @mock.patch.object(os, "symlink") @mock.patch.object(os, "access", return_value=False) def test_copy_volume_from_snapshot_not_cached_overlay(self, os_ac_mock, os_sl_mock): drv = self._driver drv.configuration.quobyte_qcow2_volumes = True drv.configuration.quobyte_volume_from_snapshot_cache = True drv.configuration.quobyte_overlay_volumes = True # lots of test vars to be prepared at first dest_volume = self._simple_volume( id='c1073000-0000-0000-0000-0000000c1073') src_volume = self._simple_volume() vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) src_vol_path = os.path.join(vol_dir, src_volume['name']) vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) dest_vol_path = os.path.join(vol_dir, dest_volume['name']) info_path = os.path.join(vol_dir, src_volume['name']) + '.info' snapshot = self._get_fake_snapshot(src_volume) snap_file = dest_volume['name'] + '.' + snapshot['id'] snap_path = os.path.join(vol_dir, snap_file) size = dest_volume['size'] qemu_img_output = """{ "filename": "%s", "format": "raw", "virtual-size": 1073741824, "actual-size": 173000, "backing-filename": "%s" }""" % (snap_file, src_volume['name']) img_info = imageutils.QemuImgInfo(qemu_img_output, format='json') # mocking and testing starts here mock_convert = self.mock_object(image_utils, 'convert_image') drv._read_info_file = mock.Mock(return_value= {'active': snap_file, snapshot['id']: snap_file}) self.mock_object(image_utils, 'qemu_img_info', return_value=img_info) drv._set_rw_permissions = mock.Mock() drv._create_overlay_volume_from_snapshot = mock.Mock() drv._copy_volume_from_snapshot(snapshot, dest_volume, size) drv._read_info_file.assert_called_once_with(info_path) os_ac_mock.assert_called_once_with( drv._local_volume_from_snap_cache_path(snapshot), os.F_OK) image_utils.qemu_img_info.assert_called_once_with( snap_path, force_share=True, run_as_root=False, allow_qcow2_backing_file=True) (mock_convert. assert_called_once_with( src_vol_path, drv._local_volume_from_snap_cache_path(snapshot), 'qcow2', run_as_root=self._driver._execute_as_root)) os_sl_mock.assert_called_once_with( src_vol_path, drv._local_volume_from_snap_cache_path(snapshot) + '.parent-' + snapshot.id) drv._create_overlay_volume_from_snapshot.assert_called_once_with( dest_volume, snapshot, size, 'qcow2') drv._set_rw_permissions.assert_called_once_with(dest_vol_path) @mock.patch.object(quobyte.QuobyteDriver, "_fallocate_file") def test_copy_volume_from_snapshot_not_cached(self, qb_falloc_mock): drv = self._driver drv.configuration.quobyte_volume_from_snapshot_cache = True # lots of test vars to be prepared at first dest_volume = self._simple_volume( id='c1073000-0000-0000-0000-0000000c1073') src_volume = self._simple_volume() vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) src_vol_path = os.path.join(vol_dir, src_volume['name']) dest_vol_path = os.path.join(vol_dir, dest_volume['name']) info_path = os.path.join(vol_dir, src_volume['name']) + '.info' snapshot = self._get_fake_snapshot(src_volume) snap_file = dest_volume['name'] + '.' + snapshot['id'] snap_path = os.path.join(vol_dir, snap_file) cache_path = os.path.join(vol_dir, drv.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME, snapshot['id']) size = dest_volume['size'] qemu_img_output = """{ "filename": "%s", "format": "raw", "virtual-size": 1073741824, "actual-size": 173000, "backing-filename": "%s" }""" % (snap_file, src_volume['name']) img_info = imageutils.QemuImgInfo(qemu_img_output, format='json') # mocking and testing starts here mock_convert = self.mock_object(image_utils, 'convert_image') drv._read_info_file = mock.Mock(return_value= {'active': snap_file, snapshot['id']: snap_file}) self.mock_object(image_utils, 'qemu_img_info', return_value=img_info) drv._set_rw_permissions = mock.Mock() self.mock_object(shutil, 'copyfile') drv._copy_volume_from_snapshot(snapshot, dest_volume, size) drv._read_info_file.assert_called_once_with(info_path) image_utils.qemu_img_info.assert_called_once_with( snap_path, force_share=True, run_as_root=False, allow_qcow2_backing_file=True) (mock_convert. assert_called_once_with( src_vol_path, drv._local_volume_from_snap_cache_path(snapshot), 'raw', run_as_root=self._driver._execute_as_root)) qb_falloc_mock.assert_called_once_with(dest_vol_path, size) shutil.copyfile.assert_called_once_with(cache_path, dest_vol_path) drv._set_rw_permissions.assert_called_once_with(dest_vol_path) @ddt.data(['available', True], ['backing-up', True], ['creating', False], ['deleting', False]) @ddt.unpack def test_create_volume_from_snapshot(self, state, should_work): drv = self._driver src_volume = self._simple_volume() snap_ref = fake_snapshot.fake_snapshot_obj( self.context, volume_name=src_volume.name, display_name='clone-snap-%s' % src_volume.id, volume_size=src_volume.size, volume_id=src_volume.id, id=self.SNAP_UUID, status=state) snap_ref.volume = src_volume new_volume = self._simple_volume(size=snap_ref.volume_size) drv._ensure_shares_mounted = mock.Mock() drv._find_share = mock.Mock(return_value=self.TEST_QUOBYTE_VOLUME) drv._copy_volume_from_snapshot = mock.Mock() if should_work: drv.create_volume_from_snapshot(new_volume, snap_ref) drv._ensure_shares_mounted.assert_called_once_with() drv._find_share.assert_called_once_with(new_volume) (drv._copy_volume_from_snapshot. assert_called_once_with(snap_ref, new_volume, new_volume['size'])) else: self.assertRaises(exception.InvalidSnapshot, drv.create_volume_from_snapshot, new_volume, snap_ref) def test_initialize_connection(self): drv = self._driver volume = self._simple_volume() vol_dir = os.path.join(self.TEST_MNT_POINT_BASE, drv._get_hash_str(self.TEST_QUOBYTE_VOLUME)) vol_path = os.path.join(vol_dir, volume['name']) qemu_img_output = """{ "filename": "%s", "format": "raw", "virtual-size": 1073741824, "actual-size": 173000 }""" % volume['name'] img_info = imageutils.QemuImgInfo(qemu_img_output, format='json') drv.get_active_image_from_info = mock.Mock(return_value=volume['name']) self.mock_object(image_utils, 'qemu_img_info', return_value=img_info) conn_info = drv.initialize_connection(volume, None) drv.get_active_image_from_info.assert_called_once_with(volume) image_utils.qemu_img_info.assert_called_once_with( vol_path, force_share=True, run_as_root=False, allow_qcow2_backing_file=True) self.assertEqual('raw', conn_info['data']['format']) self.assertEqual('quobyte', conn_info['driver_volume_type']) self.assertEqual(volume['name'], conn_info['data']['name']) self.assertEqual(self.TEST_MNT_POINT_BASE, conn_info['mount_point_base']) @mock.patch('cinder.db.volume_glance_metadata_get', return_value={}) def test_copy_volume_to_image_raw_image(self, vol_glance_metadata): drv = self._driver volume_type_id = db.volume_type_create( self.context, {'name': 'quo_type', 'extra_specs': {}}).get('id') volume = self._simple_volume(volume_type_id=volume_type_id) volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name']) image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info, \ mock.patch.object(drv, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info, \ mock.patch.object(image_utils, 'upload_volume') as \ mock_upload_volume, \ mock.patch.object(image_utils, 'create_temporary_file') as \ mock_create_temporary_file: mock_get_active_image_from_info.return_value = volume['name'] mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_create_temporary_file.return_value = self.TEST_TMP_FILE qemu_img_output = """{ "filename": "%s", "format": "raw", "virtual-size": 1073741824, "actual-size": 173000 }""" % volume['name'] img_info = imageutils.QemuImgInfo(qemu_img_output, format='json') mock_qemu_img_info.return_value = img_info upload_path = volume_path drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) mock_get_active_image_from_info.assert_called_once_with(volume) mock_local_volume_dir.assert_called_once_with(volume) mock_qemu_img_info.assert_called_once_with( volume_path, force_share=True, run_as_root=False, allow_qcow2_backing_file=True) mock_upload_volume.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False, store_id=None, base_image_ref=None, compress=True, volume_format='raw', volume_fd=None) self.assertTrue(mock_create_temporary_file.called) @mock.patch('cinder.db.volume_glance_metadata_get', return_value={}) def test_copy_volume_to_image_qcow2_image(self, vol_glance_metadata): """Upload a qcow2 image file which has to be converted to raw first.""" drv = self._driver volume_type_id = db.volume_type_create( self.context, {'name': 'quo_type', 'extra_specs': {}}).get('id') volume = self._simple_volume(volume_type_id=volume_type_id) volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name']) image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info, \ mock.patch.object(drv, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info, \ mock.patch.object(image_utils, 'convert_image') as \ mock_convert_image, \ mock.patch.object(image_utils, 'upload_volume') as \ mock_upload_volume, \ mock.patch.object(image_utils, 'create_temporary_file') as \ mock_create_temporary_file: mock_get_active_image_from_info.return_value = volume['name'] mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_create_temporary_file.return_value = self.TEST_TMP_FILE qemu_img_output = """{ "filename": "%s", "format": "qcow2", "virtual-size": 1073741824, "actual-size": 173000 }""" % volume['name'] img_info = imageutils.QemuImgInfo(qemu_img_output, format='json') mock_qemu_img_info.return_value = img_info upload_path = self.TEST_TMP_FILE drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) mock_get_active_image_from_info.assert_called_once_with(volume) mock_local_volume_dir.assert_called_with(volume) mock_qemu_img_info.assert_called_once_with( volume_path, force_share=True, run_as_root=False, allow_qcow2_backing_file=True) mock_convert_image.assert_called_once_with( volume_path, upload_path, 'raw', run_as_root=False) mock_upload_volume.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, upload_path, run_as_root=False, store_id=None, base_image_ref=None, compress=True, volume_format='raw', volume_fd=None) self.assertTrue(mock_create_temporary_file.called) @mock.patch('cinder.db.volume_glance_metadata_get', return_value={}) def test_copy_volume_to_image_snapshot_exists(self, vol_glance_metadata): """Upload an active snapshot which has to be converted to raw first.""" drv = self._driver volume_type_id = db.volume_type_create( self.context, {'name': 'quo_type', 'extra_specs': {}}).get('id') volume = self._simple_volume(volume_type_id=volume_type_id) volume_path = '%s/volume-%s' % (self.TEST_MNT_POINT, self.VOLUME_UUID) volume_filename = 'volume-%s' % self.VOLUME_UUID image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'} with mock.patch.object(drv, 'get_active_image_from_info') as \ mock_get_active_image_from_info, \ mock.patch.object(drv, '_local_volume_dir') as \ mock_local_volume_dir, \ mock.patch.object(image_utils, 'qemu_img_info') as \ mock_qemu_img_info, \ mock.patch.object(image_utils, 'convert_image') as \ mock_convert_image, \ mock.patch.object(image_utils, 'upload_volume') as \ mock_upload_volume, \ mock.patch.object(image_utils, 'create_temporary_file') as \ mock_create_temporary_file: mock_get_active_image_from_info.return_value = volume['name'] mock_local_volume_dir.return_value = self.TEST_MNT_POINT mock_create_temporary_file.return_value = self.TEST_TMP_FILE qemu_img_output = """{ "filename": "volume-%s.%s", "format": "qcow2", "virtual-size": 1073741824, "actual-size": 173000, "backing-filename": "%s" }""" % (self.VOLUME_UUID, self.SNAP_UUID, volume_filename) img_info = imageutils.QemuImgInfo(qemu_img_output, format='json') mock_qemu_img_info.return_value = img_info upload_path = self.TEST_TMP_FILE drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta) mock_get_active_image_from_info.assert_called_once_with(volume) mock_local_volume_dir.assert_called_with(volume) mock_qemu_img_info.assert_called_once_with( volume_path, force_share=True, run_as_root=False, allow_qcow2_backing_file=True) mock_convert_image.assert_called_once_with( volume_path, upload_path, 'raw', run_as_root=False) mock_upload_volume.assert_called_once_with( mock.ANY, mock.ANY, mock.ANY, upload_path, volume_fd=None, run_as_root=False, store_id=None, base_image_ref=None, compress=True, volume_format='raw') self.assertTrue(mock_create_temporary_file.called) def test_set_nas_security_options_default(self): drv = self._driver self.assertEqual("true", drv.configuration.nas_secure_file_operations) self.assertEqual("true", drv.configuration.nas_secure_file_permissions) self.assertFalse(drv._execute_as_root) def test_set_nas_security_options_insecure(self): drv = self._driver drv.configuration.nas_secure_file_operations = "false" drv.configuration.nas_secure_file_permissions = "false" drv.set_nas_security_options(is_new_cinder_install=True) self.assertEqual("false", drv.configuration.nas_secure_file_operations) self.assertEqual("false", drv.configuration.nas_secure_file_permissions) self.assertTrue(drv._execute_as_root) def test_set_nas_security_options_explicitly_secure(self): drv = self._driver drv.configuration.nas_secure_file_operations = "true" drv.configuration.nas_secure_file_permissions = "true" drv.set_nas_security_options(is_new_cinder_install=True) self.assertEqual("true", drv.configuration.nas_secure_file_operations) self.assertEqual("true", drv.configuration.nas_secure_file_permissions) self.assertFalse(drv._execute_as_root) @mock.patch.object(quobyte, 'psutil') @mock.patch.object(os, "stat") def test_validate_volume_all_good_prefix_val(self, stat_mock, ps_mock): part_mock = ps_mock.disk_partitions part_mock.return_value = self.get_mock_partitions() drv = self._driver def statMockCall(*args): if args[0] == self.TEST_MNT_POINT: stat_result = mock.Mock() stat_result.st_size = 0 return stat_result return os.stat(args) stat_mock.side_effect = statMockCall drv._validate_volume(self.TEST_MNT_POINT) stat_mock.assert_called_once_with(self.TEST_MNT_POINT) part_mock.assert_called_once_with(all=True) @mock.patch.object(quobyte, 'psutil') @mock.patch.object(os, "stat") def test_validate_volume_all_good_subtype_val(self, stat_mock, ps_mock): part_mock = ps_mock.disk_partitions part_mock.return_value = self.get_mock_partitions() part_mock.return_value[0].device = "not_quobyte" part_mock.return_value[0].fstype = "fuse.quobyte" drv = self._driver def statMockCall(*args): if args[0] == self.TEST_MNT_POINT: stat_result = mock.Mock() stat_result.st_size = 0 return stat_result return os.stat(args) stat_mock.side_effect = statMockCall drv._validate_volume(self.TEST_MNT_POINT) stat_mock.assert_called_once_with(self.TEST_MNT_POINT) part_mock.assert_called_once_with(all=True) @mock.patch.object(quobyte, 'psutil') @mock.patch.object(os, "stat") def test_validate_volume_mount_not_working(self, stat_mock, ps_mock): part_mock = ps_mock.disk_partitions part_mock.return_value = self.get_mock_partitions() drv = self._driver def statMockCall(*args): if args[0] == self.TEST_MNT_POINT: raise exception.VolumeDriverException() stat_mock.side_effect = [statMockCall, os.stat] self.assertRaises( exception.VolumeDriverException, drv._validate_volume, self.TEST_MNT_POINT) stat_mock.assert_called_once_with(self.TEST_MNT_POINT) part_mock.assert_called_once_with(all=True) @mock.patch.object(quobyte, 'psutil') def test_validate_volume_no_mtab_entry(self, ps_mock): part_mock = ps_mock.disk_partitions part_mock.return_value = [] # no quobyte@ devices msg = ("Volume driver reported an error: " "No matching Quobyte mount entry for %(mpt)s" " could be found for validation in partition list." % {'mpt': self.TEST_MNT_POINT}) self.assertRaisesAndMessageMatches( exception.VolumeDriverException, msg, self._driver._validate_volume, self.TEST_MNT_POINT) @mock.patch.object(quobyte, 'psutil') def test_validate_volume_wrong_mount_type(self, ps_mock): part_mock = ps_mock.disk_partitions mypart = mock.Mock() mypart.device = "not-quobyte" mypart.mountpoint = self.TEST_MNT_POINT part_mock.return_value = [mypart] msg = ("Volume driver reported an error: " "The mount %(mpt)s is not a valid" " Quobyte volume according to partition list." % {'mpt': self.TEST_MNT_POINT}) drv = self._driver self.assertRaisesAndMessageMatches( exception.VolumeDriverException, msg, drv._validate_volume, self.TEST_MNT_POINT) part_mock.assert_called_once_with(all=True) @mock.patch.object(quobyte, 'psutil') def test_validate_volume_stale_mount(self, ps_mock): part_mock = ps_mock.disk_partitions part_mock.return_value = self.get_mock_partitions() drv = self._driver # As this uses a local fs the dir size is >0, raising an exception self.assertRaises( exception.VolumeDriverException, drv._validate_volume, self.TEST_MNT_POINT) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_rbd.py0000664000175000017500000051502000000000000023624 0ustar00zuulzuul00000000000000# Copyright 2012 Josh Durgin # Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import math import os import tempfile import time import types from unittest import mock from unittest.mock import call import uuid import castellan import ddt from oslo_utils import fileutils from oslo_utils import imageutils from oslo_utils import units from cinder import context from cinder import db from cinder import exception import cinder.image.glance from cinder.image import image_utils from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.keymgr import fake as fake_keymgr from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.tests.unit.volume import test_driver from cinder.volume import configuration as conf import cinder.volume.drivers.rbd as driver from cinder.volume import qos_specs from cinder.volume import volume_utils # This is used to collect raised exceptions so that tests may check what was # raised. # NOTE: this must be initialised in test setUp(). RAISED_EXCEPTIONS = [] class MockException(Exception): def __init__(self, *args, **kwargs): RAISED_EXCEPTIONS.append(self.__class__) class MockImageNotFoundException(MockException): """Used as mock for rbd.ImageNotFound.""" class MockImageBusyException(MockException): """Used as mock for rbd.ImageBusy.""" class MockImageExistsException(MockException): """Used as mock for rbd.ImageExists.""" class MockOSErrorException(MockException): """Used as mock for rbd.OSError.""" class MockPermissionError(MockException): """Used as mock for PermissionError.""" errno = errno.EPERM class MockTimeOutException(MockException): """Used as mock for TimeOut.""" class MockImageHasSnapshotsException(MockException): """Used as mock for rbd.ImageHasSnapshots.""" class MockInvalidArgument(MockException): """Used as mock for rbd.InvalidArgument.""" class KeyObject(object): def get_encoded(arg): return "asdf".encode('utf-8') def common_mocks(f): """Decorator to set mocks common to all tests. The point of doing these mocks here is so that we don't accidentally set mocks that can't/don't get unset. """ def _common_inner_inner1(inst, *args, **kwargs): @mock.patch.object(driver.RBDDriver, '_get_usage_info') @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') @mock.patch('cinder.volume.drivers.rbd.RADOSClient') @mock.patch('cinder.backup.drivers.ceph.rbd') @mock.patch('cinder.backup.drivers.ceph.rados') def _common_inner_inner2(mock_rados, mock_rbd, mock_client, mock_proxy, mock_usage_info): inst.mock_rbd = mock_rbd inst.mock_rados = mock_rados inst.mock_client = mock_client inst.mock_proxy = mock_proxy inst.mock_rbd.RBD.Error = Exception inst.mock_rados.Error = Exception inst.mock_rbd.Error = Exception inst.mock_rbd.ImageBusy = MockImageBusyException inst.mock_rbd.ImageNotFound = MockImageNotFoundException inst.mock_rbd.ImageExists = MockImageExistsException inst.mock_rbd.ImageHasSnapshots = MockImageHasSnapshotsException inst.mock_rbd.InvalidArgument = MockInvalidArgument inst.mock_rbd.PermissionError = MockPermissionError inst.mock_rbd.TimeOut = MockTimeOutException inst.driver.rbd = inst.mock_rbd aux = inst.driver.rbd aux.Image.return_value.stripe_unit.return_value = 4194304 inst.driver.rados = inst.mock_rados return f(inst, *args, **kwargs) return _common_inner_inner2() return _common_inner_inner1 CEPH_MON_DUMP = r"""dumped monmap epoch 1 { "epoch": 1, "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa", "modified": "2013-05-22 17:44:56.343618", "created": "2013-05-22 17:44:56.343618", "mons": [ { "rank": 0, "name": "a", "addr": "[::1]:6789\/0"}, { "rank": 1, "name": "b", "addr": "[::1]:6790\/0"}, { "rank": 2, "name": "c", "addr": "[::1]:6791\/0"}, { "rank": 3, "name": "d", "addr": "127.0.0.1:6792\/0"}, { "rank": 4, "name": "e", "addr": "example.com:6791\/0"}], "quorum": [ 0, 1, 2]} """ class MockDriverConfig(object): def __init__(self, **kwargs): my_dict = vars(self) my_dict.update(kwargs) my_dict.setdefault('max_over_subscription_ratio', 1.0) my_dict.setdefault('reserved_percentage', 0) my_dict.setdefault('volume_backend_name', 'RBD') my_dict.setdefault('_default', None) def __call__(self, value): return getattr(self, value, self._default) @ddt.ddt class RBDTestCase(test.TestCase): @classmethod def _make_configuration(cls, conf_in=None): cfg = mock.Mock(spec=conf.Configuration) cfg.image_conversion_dir = None cfg.rbd_cluster_name = 'nondefault' cfg.rbd_pool = 'rbd' cfg.rbd_ceph_conf = '/etc/ceph/my_ceph.conf' cfg.rbd_secret_uuid = '5fe62cc7-0392-4a32-8466-081ce0ea970f' cfg.rbd_user = 'cinder' cfg.volume_backend_name = None cfg.volume_dd_blocksize = '1M' cfg.rbd_store_chunk_size = 4 cfg.rados_connect_timeout = -1 cfg.rados_connection_retries = 3 cfg.rados_connection_interval = 5 cfg.backup_use_temp_snapshot = False cfg.enable_deferred_deletion = False cfg.rbd_concurrent_flatten_operations = 3 # Because the mocked conf doesn't actually have an underlying oslo conf # it doesn't have the set_default method, so we use a fake one. cfg.set_default = types.MethodType(cls._set_default, cfg) if conf_in is not None: for k in conf_in: setattr(cfg, k, conf_in[k]) return cfg @staticmethod def _set_default(cfg, name, value, group=None): # Ignore the group for now if not getattr(cfg, name): setattr(cfg, name, value) @staticmethod def _make_drv(conf_in): cfg = RBDTestCase._make_configuration(conf_in) mock_exec = mock.Mock(return_value=('', '')) drv = driver.RBDDriver(execute=mock_exec, configuration=cfg, rbd=mock.MagicMock()) drv.set_initialized() return drv def setUp(self): global RAISED_EXCEPTIONS RAISED_EXCEPTIONS = [] super(RBDTestCase, self).setUp() self.cfg = self._make_configuration() mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.driver = driver.RBDDriver(execute=mock_exec, configuration=self.cfg) self.driver.set_initialized() self.context = context.get_admin_context() self.volume_a = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000a', 'id': '4c39c3c7-168f-4b32-b585-77f1b3bf0a38', 'use_quota': True, 'size': 10}) self.temp_volume = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000t', 'id': '4c39c3c7-168f-4b32-b585-77f1b3bf0a44', 'use_quota': False, 'size': 10}) self.volume_b = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000b', 'id': '0c7d1f44-5a06-403f-bb82-ae7ad0d693a6', 'use_quota': True, 'size': 10}) self.volume_c = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000a', 'id': '55555555-222f-4b32-b585-9991b3bf0a99', 'size': 12, 'use_quota': True, 'encryption_key_id': fake.ENCRYPTION_KEY_ID}) self.snapshot = fake_snapshot.fake_snapshot_obj( self.context, name='snapshot-0000000a', use_quota=True) self.snapshot_b = fake_snapshot.fake_snapshot_obj( self.context, **{'name': u'snapshot-0000000n', 'expected_attrs': ['volume'], 'use_quota': True, 'volume': {'id': fake.VOLUME_ID, 'name': 'cinder-volume', 'size': 128, 'host': 'host@fakebackend#fakepool'} }) self.qos_policy_a = {"total_iops_sec": "100", "total_bytes_sec": "1024"} self.qos_policy_b = {"read_iops_sec": "500", "write_iops_sec": "200"} # For tests involving multiattach volume type MULTIATTACH_FULL_FEATURES = ( driver.RBDDriver.RBD_FEATURE_LAYERING | driver.RBDDriver.RBD_FEATURE_EXCLUSIVE_LOCK | driver.RBDDriver.RBD_FEATURE_OBJECT_MAP | driver.RBDDriver.RBD_FEATURE_FAST_DIFF | driver.RBDDriver.RBD_FEATURE_JOURNALING) MULTIATTACH_REDUCED_FEATURES = ( driver.RBDDriver.RBD_FEATURE_LAYERING | driver.RBDDriver.RBD_FEATURE_EXCLUSIVE_LOCK) @ddt.data({'cluster_name': None, 'pool_name': 'rbd'}, {'cluster_name': 'volumes', 'pool_name': None}) @ddt.unpack def test_min_config(self, cluster_name, pool_name): self.cfg.rbd_cluster_name = cluster_name self.cfg.rbd_pool = pool_name with mock.patch('cinder.volume.drivers.rbd.rados'): self.assertRaises(exception.InvalidConfigurationValue, self.driver.check_for_setup_error) @mock.patch.object(driver, 'rados', mock.Mock()) @mock.patch.object(driver, 'RADOSClient') def test_check_for_setup_error_missing_keyring_data(self, mock_client): self.driver.keyring_file = '/etc/ceph/ceph.client.admin.keyring' self.driver.keyring_data = None self.assertRaises(exception.InvalidConfigurationValue, self.driver.check_for_setup_error) mock_client.assert_called_once_with(self.driver) def test_parse_replication_config_empty(self): self.driver._parse_replication_configs([]) self.assertEqual([], self.driver._replication_targets) def test_parse_replication_config_missing(self): """Parsing replication_device without required backend_id.""" cfg = [{'conf': '/etc/ceph/secondary.conf'}] self.assertRaises(exception.InvalidConfigurationValue, self.driver._parse_replication_configs, cfg) def test_parse_replication_config_defaults(self): """Parsing replication_device with default conf and user.""" cfg = [{'backend_id': 'secondary-backend'}] expected = [{'name': 'secondary-backend', 'conf': '/etc/ceph/secondary-backend.conf', 'user': 'cinder', 'secret_uuid': self.cfg.rbd_secret_uuid}] self.driver._parse_replication_configs(cfg) self.assertEqual(expected, self.driver._replication_targets) @ddt.data(1, 2) def test_parse_replication_config(self, num_targets): cfg = [{'backend_id': 'secondary-backend', 'conf': 'foo', 'user': 'bar'}, {'backend_id': 'tertiary-backend'}] expected = [{'name': 'secondary-backend', 'conf': 'foo', 'user': 'bar', 'secret_uuid': self.cfg.rbd_secret_uuid}, {'name': 'tertiary-backend', 'conf': '/etc/ceph/tertiary-backend.conf', 'user': 'cinder', 'secret_uuid': self.cfg.rbd_secret_uuid}] self.driver._parse_replication_configs(cfg[:num_targets]) self.assertEqual(expected[:num_targets], self.driver._replication_targets) def test_do_setup_replication_disabled(self): with mock.patch.object(self.driver.configuration, 'safe_get', return_value=None), \ mock.patch.object(self.driver, '_set_default_secret_uuid') as mock_secret: self.driver.do_setup(self.context) mock_secret.assert_called_once_with() self.assertFalse(self.driver._is_replication_enabled) self.assertEqual([], self.driver._replication_targets) self.assertEqual([], self.driver._target_names) self.assertEqual({'name': self.cfg.rbd_cluster_name, 'conf': self.cfg.rbd_ceph_conf, 'user': self.cfg.rbd_user, 'secret_uuid': self.cfg.rbd_secret_uuid}, self.driver._active_config) @ddt.data('', None) @mock.patch.object(driver.RBDDriver, '_get_fsid') def test__set_default_secret_uuid_missing(self, secret_uuid, mock_fsid): # Clear the current values self.cfg.rbd_secret_uuid = secret_uuid self.driver._active_config['secret_uuid'] = secret_uuid # Fake fsid value returned by the cluster fsid = str(uuid.uuid4()) mock_fsid.return_value = fsid self.driver._set_default_secret_uuid() mock_fsid.assert_called_once_with() self.assertEqual(fsid, self.driver._active_config['secret_uuid']) self.assertEqual(fsid, self.cfg.rbd_secret_uuid) @mock.patch.object(driver.RBDDriver, '_get_fsid') def test__set_default_secret_uuid_present(self, mock_fsid): # Set secret_uuid like _get_target_config does on do_setup secret_uuid = self.cfg.rbd_secret_uuid self.driver._active_config['secret_uuid'] = secret_uuid # Fake fsid value returned by the cluster (should not be callled) mock_fsid.return_value = str(uuid.uuid4()) self.driver._set_default_secret_uuid() mock_fsid.assert_not_called() # Values must not have changed self.assertEqual(secret_uuid, self.driver._active_config['secret_uuid']) self.assertEqual(secret_uuid, self.cfg.rbd_secret_uuid) def test_do_setup_replication(self): cfg = [{'backend_id': 'secondary-backend', 'conf': 'foo', 'user': 'bar', 'secret_uuid': 'secondary_secret_uuid'}] expected = [{'name': 'secondary-backend', 'conf': 'foo', 'user': 'bar', 'secret_uuid': 'secondary_secret_uuid'}] with mock.patch.object(self.driver.configuration, 'safe_get', return_value=cfg): self.driver.do_setup(self.context) self.assertTrue(self.driver._is_replication_enabled) self.assertEqual(expected, self.driver._replication_targets) self.assertEqual({'name': self.cfg.rbd_cluster_name, 'conf': self.cfg.rbd_ceph_conf, 'user': self.cfg.rbd_user, 'secret_uuid': self.cfg.rbd_secret_uuid}, self.driver._active_config) def test_do_setup_replication_failed_over(self): cfg = [{'backend_id': 'secondary-backend', 'conf': 'foo', 'user': 'bar', 'secret_uuid': 'secondary_secret_uuid'}] expected = [{'name': 'secondary-backend', 'conf': 'foo', 'user': 'bar', 'secret_uuid': 'secondary_secret_uuid'}] self.driver._active_backend_id = 'secondary-backend' with mock.patch.object(self.driver.configuration, 'safe_get', return_value=cfg): self.driver.do_setup(self.context) self.assertTrue(self.driver._is_replication_enabled) self.assertEqual(expected, self.driver._replication_targets) self.assertEqual(expected[0], self.driver._active_config) def test_do_setup_replication_failed_over_unknown(self): cfg = [{'backend_id': 'secondary-backend', 'conf': 'foo', 'user': 'bar'}] self.driver._active_backend_id = 'unknown-backend' with mock.patch.object(self.driver.configuration, 'safe_get', return_value=cfg): self.assertRaises(exception.InvalidReplicationTarget, self.driver.do_setup, self.context) @mock.patch.object(driver.RBDDriver, '_enable_replication', return_value={'replication': 'enabled'}) def test_setup_volume_with_replication(self, mock_enable): self.volume_a.volume_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, extra_specs={'replication_enabled': ' True'}) res = self.driver._setup_volume(self.volume_a) self.assertEqual('enabled', res['replication']) mock_enable.assert_called_once_with(self.volume_a) @ddt.data(False, True) @mock.patch.object(driver.RBDDriver, '_enable_replication') def test_setup_volume_without_replication(self, enabled, mock_enable): self.driver._is_replication_enabled = enabled res = self.driver._setup_volume(self.volume_a) if enabled: expect = {'replication_status': fields.ReplicationStatus.DISABLED} else: expect = {} self.assertEqual(expect, res) mock_enable.assert_not_called() @ddt.data([True, False], [False, False], [True, True]) @ddt.unpack @common_mocks def test_enable_replication(self, exclusive_lock_enabled, journaling_enabled): """Test _enable_replication method. We want to confirm that if the Ceph backend has globally enabled 'exclusive_lock' and 'journaling'. we don't try to enable them again and we properly indicate with our return value that they were already enabled. 'journaling' depends on 'exclusive_lock', so if 'exclusive-lock' is disabled, 'journaling' can't be enabled so the '[False. True]' case is impossible. In this test case, there are three test scenarios: 1. 'exclusive_lock' and 'journaling' both enabled, 'image.features()' will not be called. 2. 'exclusive_lock' enabled, 'journaling' disabled, 'image.features()' will be only called for 'journaling'. 3. 'exclusice_lock' and 'journaling' are both disabled, 'image.features()'will be both called for 'exclusive-lock' and 'journaling' in this order. """ image = self.mock_proxy.return_value.__enter__.return_value image_features = 0 if exclusive_lock_enabled: image_features |= self.driver.RBD_FEATURE_EXCLUSIVE_LOCK if journaling_enabled: image_features |= self.driver.RBD_FEATURE_JOURNALING image.features.return_value = image_features journaling_status = str(journaling_enabled).lower() exclusive_lock_status = str(exclusive_lock_enabled).lower() expected = { 'replication_driver_data': ('{"had_exclusive_lock":%s,' '"had_journaling":%s}' % (exclusive_lock_status, journaling_status)), 'replication_status': 'enabled', } res = self.driver._enable_replication(self.volume_a) self.assertEqual(expected, res) if exclusive_lock_enabled and journaling_enabled: image.update_features.assert_not_called() elif exclusive_lock_enabled and not journaling_enabled: image.update_features.assert_called_once_with( self.driver.RBD_FEATURE_JOURNALING, True) else: calls = [call(self.driver.RBD_FEATURE_EXCLUSIVE_LOCK, True), call(self.driver.RBD_FEATURE_JOURNALING, True)] image.update_features.assert_has_calls(calls, any_order=False) image.mirror_image_enable.assert_called_once_with() @ddt.data(['false', 'true'], ['true', 'true'], ['false', 'false']) @ddt.unpack @common_mocks def test_disable_replication(self, had_journaling, had_exclusive_lock): driver_data = ('{"had_journaling": %s,"had_exclusive_lock": %s}' % (had_journaling, had_exclusive_lock)) self.volume_a.replication_driver_data = driver_data image = self.mock_proxy.return_value.__enter__.return_value res = self.driver._disable_replication(self.volume_a) expected = {'replication_status': fields.ReplicationStatus.DISABLED, 'replication_driver_data': None} self.assertEqual(expected, res) image.mirror_image_disable.assert_called_once_with(False) if had_journaling == 'true' and had_exclusive_lock == 'true': image.update_features.assert_not_called() elif had_journaling == 'false' and had_exclusive_lock == 'true': image.update_features.assert_called_once_with( self.driver.RBD_FEATURE_JOURNALING, False) else: calls = [call(self.driver.RBD_FEATURE_JOURNALING, False), call(self.driver.RBD_FEATURE_EXCLUSIVE_LOCK, False)] image.update_features.assert_has_calls(calls, any_order=False) @common_mocks @mock.patch.object(driver.RBDDriver, '_qos_specs_from_volume_type') @mock.patch.object(driver.RBDDriver, '_supports_qos') @mock.patch.object(driver.RBDDriver, '_enable_replication') def test_create_volume(self, mock_enable_repl, mock_qos_vers, mock_get_qos_specs): client = self.mock_client.return_value client.__enter__.return_value = client mock_qos_vers.return_value = True mock_get_qos_specs.return_value = None res = self.driver.create_volume(self.volume_a) self.assertEqual({}, res) chunk_size = self.cfg.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) args = [client.ioctx, str(self.volume_a.name), self.volume_a.size * units.Gi, order] kwargs = {'old_format': False, 'features': client.features} self.mock_rbd.RBD.return_value.create.assert_called_once_with( *args, **kwargs) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_enable_repl.assert_not_called() mock_qos_vers.assert_not_called() @common_mocks @mock.patch.object(driver.RBDDriver, '_enable_replication') def test_create_volume_replicated(self, mock_enable_repl): self.volume_a.volume_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, extra_specs={'replication_enabled': ' True'}) client = self.mock_client.return_value client.__enter__.return_value = client expected_update = { 'replication_status': 'enabled', 'replication_driver_data': '{"had_journaling": false}' } mock_enable_repl.return_value = expected_update res = self.driver.create_volume(self.volume_a) self.assertEqual(expected_update, res) mock_enable_repl.assert_called_once_with(self.volume_a) chunk_size = self.cfg.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) self.mock_rbd.RBD.return_value.create.assert_called_once_with( client.ioctx, self.volume_a.name, self.volume_a.size * units.Gi, order, old_format=False, features=client.features) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) @common_mocks @mock.patch.object(driver.RBDDriver, '_supports_qos') @mock.patch.object(driver.RBDDriver, 'update_rbd_image_qos') def test_create_volume_with_qos(self, mock_update_qos, mock_qos_supported): ctxt = context.get_admin_context() qos = qos_specs.create(ctxt, "qos-iops-bws", self.qos_policy_a) self.volume_a.volume_type = fake_volume.fake_volume_type_obj( ctxt, id=fake.VOLUME_TYPE_ID, qos_specs_id = qos.id) client = self.mock_client.return_value client.__enter__.return_value = client mock_qos_supported.return_value = True res = self.driver.create_volume(self.volume_a) self.assertEqual({}, res) chunk_size = self.cfg.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) args = [client.ioctx, str(self.volume_a.name), self.volume_a.size * units.Gi, order] kwargs = {'old_format': False, 'features': client.features} self.mock_rbd.RBD.return_value.create.assert_called_once_with( *args, **kwargs) mock_update_qos.assert_called_once_with(self.volume_a, qos.specs) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) @common_mocks def test_manage_existing_get_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ mock_rbd_image_size: with mock.patch.object(self.driver.rbd.Image(), 'close') \ as mock_rbd_image_close: mock_rbd_image_size.return_value = 2 * units.Gi existing_ref = {'source-name': self.volume_a.name} return_size = self.driver.manage_existing_get_size( self.volume_a, existing_ref) self.assertEqual(2, return_size) mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() @common_mocks def test_manage_existing_get_non_integer_size(self): rbd_image = self.driver.rbd.Image.return_value rbd_image.size.return_value = int(1.75 * units.Gi) existing_ref = {'source-name': self.volume_a.name} return_size = self.driver.manage_existing_get_size(self.volume_a, existing_ref) self.assertEqual(2, return_size) rbd_image.size.assert_called_once_with() rbd_image.close.assert_called_once_with() @common_mocks def test_manage_existing_get_invalid_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ mock_rbd_image_size: with mock.patch.object(self.driver.rbd.Image(), 'close') \ as mock_rbd_image_close: mock_rbd_image_size.return_value = 'abcd' existing_ref = {'source-name': self.volume_a.name} self.assertRaises(exception.VolumeBackendAPIException, self.driver.manage_existing_get_size, self.volume_a, existing_ref) mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() @common_mocks def test_manage_existing(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \ mock_rbd_image_rename: exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} mock_rbd_image_rename.return_value = 0 self.driver.manage_existing(self.volume_a, existing_ref) mock_rbd_image_rename.assert_called_with( client.ioctx, exist_volume, self.volume_a.name) @common_mocks def test_manage_existing_with_exist_rbd_image(self): client = self.mock_client.return_value client.__enter__.return_value = client self.mock_rbd.RBD.return_value.rename.side_effect = ( MockImageExistsException) exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} self.assertRaises(self.mock_rbd.ImageExists, self.driver.manage_existing, self.volume_a, existing_ref) # Make sure the exception was raised self.assertEqual([self.mock_rbd.ImageExists], RAISED_EXCEPTIONS) @common_mocks def test_manage_existing_with_invalid_rbd_image(self): self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound invalid_volume = 'vol-invalid' invalid_ref = {'source-name': invalid_volume} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, self.volume_a, invalid_ref) # Make sure the exception was raised self.assertEqual([self.mock_rbd.ImageNotFound], RAISED_EXCEPTIONS) @common_mocks def test_manage_existing_replicated_type(self): client = self.mock_client.return_value client.__enter__.return_value = client self.volume_a.volume_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, extra_specs={'replication_enabled': ' True'}) with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \ mock_rbd_image_rename: exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} mock_rbd_image_rename.return_value = 0 res = self.driver.manage_existing(self.volume_a, existing_ref) mock_rbd_image_rename.assert_called_with( client.ioctx, exist_volume, self.volume_a.name) self.assertEqual('enabled', res['replication_status']) @common_mocks def test_manage_existing_multiattach_type(self): client = self.mock_client.return_value client.__enter__.return_value = client image = self.mock_proxy.return_value.__enter__.return_value image_features = self.MULTIATTACH_FULL_FEATURES image.features.return_value = image_features expected_res = { 'provider_location': "{\"saved_features\":%s}" % image_features} self.volume_a.volume_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, extra_specs={'multiattach': ' True'}) with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \ mock_rbd_image_rename: exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} mock_rbd_image_rename.return_value = 0 res = self.driver.manage_existing(self.volume_a, existing_ref) mock_rbd_image_rename.assert_called_with( client.ioctx, exist_volume, self.volume_a.name) self.assertEqual(expected_res, res) @common_mocks def test_manage_existing_invalid_type(self): client = self.mock_client.return_value client.__enter__.return_value = client # Replication and multiattach are mutually exclusive extra_specs = { 'replication_enabled': ' True', 'multiattach': ' True' } self.volume_a.volume_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, extra_specs=extra_specs) with mock.patch.object(self.driver.rbd.RBD(), 'rename') as \ mock_rbd_image_rename: exist_volume = 'vol-exist' existing_ref = {'source-name': exist_volume} mock_rbd_image_rename.return_value = 0 res = self.assertRaises( exception.ManageExistingVolumeTypeMismatch, self.driver.manage_existing, self.volume_a, existing_ref) self.assertIn( "Manage existing volume failed due to volume type mismatch", str(res)) self.assertIn( "Replication and Multiattach are mutually exclusive.", str(res)) # Ensure rename is not called mock_rbd_image_rename.assert_not_called() @common_mocks @mock.patch.object(driver.RBDDriver, '_get_image_status') def test_get_manageable_volumes(self, mock_get_image_status): cinder_vols = [{'id': '00000000-0000-0000-0000-000000000000'}] vols = ['volume-00000000-0000-0000-0000-000000000000', 'vol1', 'vol2', 'volume-11111111-1111-1111-1111-111111111111.deleted'] self.mock_rbd.RBD.return_value.list.return_value = vols image = self.mock_proxy.return_value.__enter__.return_value image.size.side_effect = [2 * units.Gi, 4 * units.Gi, 6 * units.Gi, 8 * units.Gi] mock_get_image_status.side_effect = [ {'watchers': []}, {'watchers': [{"address": "192.168.120.61:0/3012034728", "client": 44431941, "cookie": 94077162321152}]}, {'watchers': []}] res = self.driver.get_manageable_volumes( cinder_vols, None, 1000, 0, ['size'], ['asc']) exp = [{'size': 2, 'reason_not_safe': 'already managed', 'extra_info': None, 'safe_to_manage': False, 'reference': {'source-name': 'volume-00000000-0000-0000-0000-000000000000'}, 'cinder_id': '00000000-0000-0000-0000-000000000000'}, {'size': 4, 'reason_not_safe': None, 'safe_to_manage': True, 'reference': {'source-name': 'vol1'}, 'cinder_id': None, 'extra_info': None}, {'size': 6, 'reason_not_safe': 'volume in use', 'safe_to_manage': False, 'reference': {'source-name': 'vol2'}, 'cinder_id': None, 'extra_info': None}, {'size': 8, 'reason_not_safe': 'volume marked as deleted', 'safe_to_manage': False, 'cinder_id': None, 'extra_info': None, 'reference': { 'source-name': 'volume-11111111-1111-1111-1111-111111111111.deleted'}} ] self.assertEqual(exp, res) @common_mocks @mock.patch.object(driver.RBDDriver, '_get_image_status') def test_get_manageable_volumes_exc(self, mock_get_image_status): cinder_vols = [{'id': '00000000-0000-0000-0000-000000000000'}] vols = ['volume-00000000-0000-0000-0000-000000000000', 'vol1', 'vol2', 'volume-11111111-1111-1111-1111-111111111111.deleted'] self.mock_rbd.RBD.return_value.list.return_value = vols image = self.mock_proxy.return_value.__enter__.return_value # Four images are present, but the third image can't be opened image.size.side_effect = [2 * units.Gi, self.mock_rbd.ImageNotFound, self.mock_rbd.TimeOut, self.mock_rbd.PermissionError] mock_get_image_status.side_effect = [ {'watchers': []}, {'watchers': []}, {'watchers': []}] res = self.driver.get_manageable_volumes( cinder_vols, None, 1000, 0, ['size'], ['asc']) exp = [{'size': 2, 'reason_not_safe': 'already managed', 'extra_info': None, 'safe_to_manage': False, 'reference': {'source-name': 'volume-00000000-0000-0000-0000-000000000000'}, 'cinder_id': '00000000-0000-0000-0000-000000000000'}] self.assertEqual(exp, res) @common_mocks def test_delete_backup_snaps(self): self.driver.rbd.Image.remove_snap = mock.Mock() with mock.patch.object(self.driver, '_get_backup_snaps') as \ mock_get_backup_snaps: mock_get_backup_snaps.return_value = [{'name': 'snap1'}] rbd_image = self.driver.rbd.Image() self.driver._delete_backup_snaps(rbd_image) mock_get_backup_snaps.assert_called_once_with(rbd_image) self.assertTrue( self.driver.rbd.Image.return_value.remove_snap.called) @common_mocks def test_delete_volume(self): client = self.mock_client.return_value self.driver.rbd.Image.return_value.list_snaps.return_value = [] with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: mock_get_clone_info.return_value = (None, None, None) self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) self.mock_proxy.return_value.__enter__.return_value.\ list_snaps.assert_called_once_with() client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.driver.rbd.RBD.return_value.remove.call_count) @common_mocks def test_delete_volume_clone_info_return_parent(self): client = self.mock_client.return_value self.driver.rbd.Image.return_value.list_snaps.return_value = [] pool = 'volumes' parent = True parent_snap = self.snapshot_b mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info', return_value=(pool, parent, parent_snap)) m_del_clone_parent_refs = self.mock_object(self.driver, '_delete_clone_parent_refs') m_del_back_snaps = self.mock_object(self.driver, '_delete_backup_snaps') self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) m_del_clone_parent_refs.assert_not_called() self.mock_proxy.return_value.__enter__.return_value.list_snaps.\ assert_called_once_with() client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) m_del_back_snaps.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.driver.rbd.RBD.return_value.remove.call_count) self.driver.rbd.RBD.return_value.trash_move.assert_not_called() @common_mocks def test_deferred_deletion(self): drv = self._make_drv({'enable_deferred_deletion': True, 'deferred_deletion_delay': 0}) client = self.mock_client.return_value with mock.patch.object(drv, '_get_clone_info') as \ mock_get_clone_info: with mock.patch.object(drv, '_delete_backup_snaps') as \ mock_delete_backup_snaps: mock_get_clone_info.return_value = (None, None, None) drv.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( drv.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 0, drv.rbd.RBD.return_value.trash_move.call_count) self.driver.rbd.RBD.return_value.remove.assert_not_called() @common_mocks def test_deferred_deletion_periodic_task(self): drv = self._make_drv({'rados_connect_timeout': -1, 'enable_deferred_deletion': True, 'deferred_deletion_purge_interval': 1}) drv._start_periodic_tasks() time.sleep(1.2) self.assertTrue(drv.rbd.RBD.return_value.trash_list.called) self.assertFalse(drv.rbd.RBD.return_value.trash_remove.called) @common_mocks def test_deferred_deletion_trash_purge(self): drv = self._make_drv({'enable_deferred_deletion': True}) with mock.patch.object(drv.rbd.RBD(), 'trash_list') as mock_trash_list: mock_trash_list.return_value = [self.volume_a] drv._trash_purge() self.assertEqual( 1, drv.rbd.RBD.return_value.trash_list.call_count) self.assertEqual( 1, drv.rbd.RBD.return_value.trash_remove.call_count) @common_mocks def test_deferred_deletion_trash_purge_not_expired(self): drv = self._make_drv({'enable_deferred_deletion': True}) with mock.patch.object(drv.rbd.RBD(), 'trash_list') as mock_trash_list: mock_trash_list.return_value = [self.volume_a] drv.rbd.RBD.return_value.trash_remove.side_effect = ( self.mock_rbd.PermissionError) drv._trash_purge() self.assertEqual( 1, drv.rbd.RBD.return_value.trash_list.call_count) self.assertEqual( 1, drv.rbd.RBD.return_value.trash_remove.call_count) # Make sure the exception was raised self.assertEqual(1, len(RAISED_EXCEPTIONS)) self.assertIn(self.mock_rbd.PermissionError, RAISED_EXCEPTIONS) @common_mocks def test_deferred_deletion_w_parent(self): drv = self._make_drv({'enable_deferred_deletion': True, 'deferred_deletion_delay': 0}) _get_clone_info_return_values = [ (None, self.volume_b.name, None), (None, None, None)] with mock.patch.object(drv, '_get_clone_info', side_effect = _get_clone_info_return_values): drv.delete_volume(self.volume_a) self.assertEqual( 0, drv.rbd.RBD.return_value.trash_move.call_count) @common_mocks def test_deferred_deletion_w_deleted_parent(self): drv = self._make_drv({'enable_deferred_deletion': True, 'deferred_deletion_delay': 0}) _get_clone_info_return_values = [ (None, "%s.deleted" % self.volume_b.name, None), (None, None, None)] with mock.patch.object(drv, '_get_clone_info', side_effect = _get_clone_info_return_values): drv.delete_volume(self.volume_a) self.assertEqual( 0, drv.rbd.RBD.return_value.trash_move.call_count) @common_mocks def test_delete_volume_not_found_at_open(self): self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound self.mock_proxy.side_effect = self.mock_rbd.ImageNotFound self.assertIsNone(self.driver.delete_volume(self.volume_a)) with mock.patch.object(driver, 'RADOSClient') as client: client = self.mock_client.return_value.__enter__.return_value self.mock_proxy.assert_called_once_with(self.driver, self.volume_a.name, ioctx=client.ioctx) # Make sure the exception was raised self.assertEqual([self.mock_rbd.ImageNotFound], RAISED_EXCEPTIONS) @common_mocks def test_delete_busy_volume(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( self.mock_rbd.ImageBusy, None) mock_delete_backup_snaps = self.mock_object(self.driver, '_delete_backup_snaps') mock_rados_client = self.mock_object(driver, 'RADOSClient') mock_flatten = self.mock_object(self.driver, '_flatten') with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = (None, None, None) self.driver.rbd.Image.return_value.list_children.\ return_value = [('pool1', 'child1'), ('pool1', 'child2')] self.mock_proxy.return_value.__enter__.return_value.list_children.\ return_value = [('pool1', 'child1'), ('pool1', 'child2')] self.driver.delete_volume(self.volume_a) mock_flatten.assert_has_calls( [mock.call('pool1', 'child1'), mock.call('pool1', 'child2')]) mock_get_clone_info.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) self.mock_proxy.return_value.__enter__.return_value.list_snaps.\ assert_called_once_with() mock_rados_client.assert_called_once_with(self.driver) mock_delete_backup_snaps.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap. called) self.assertEqual( 2, self.mock_rbd.RBD.return_value.remove.call_count) self.assertEqual(1, len(RAISED_EXCEPTIONS)) # Make sure the exception was raised self.assertIn(self.mock_rbd.ImageBusy, RAISED_EXCEPTIONS) self.mock_rbd.RBD.return_value.trash_move.assert_not_called() @common_mocks def test_delete_volume_has_snapshots(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( self.mock_rbd.ImageHasSnapshots, # initial vol remove attempt None # removal of child image ) mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info', return_value=(None, None, None)) m_del_backup_snaps = self.mock_object(self.driver, '_delete_backup_snaps') mock_try_remove_volume = self.mock_object(self.driver, '_try_remove_volume', return_value=True) mock_rados_client = self.mock_object(driver, 'RADOSClient') self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) mock_rados_client.assert_called_once_with(self.driver) m_del_backup_snaps.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.mock_rbd.RBD.return_value.remove.call_count) self.assertEqual(1, len(RAISED_EXCEPTIONS)) # Make sure the exception was raised self.assertIn(self.mock_rbd.ImageHasSnapshots, RAISED_EXCEPTIONS) self.mock_rbd.RBD.return_value.trash_move.assert_not_called() mock_try_remove_volume.assert_called_once_with(mock.ANY, self.volume_a.name) @common_mocks def test_delete_volume_has_snapshots_trash(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( self.mock_rbd.ImageHasSnapshots, # initial vol remove attempt None # removal of child image ) mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info', return_value=(None, None, None)) m_del_backup_snaps = self.mock_object(self.driver, '_delete_backup_snaps') mock_try_remove_volume = self.mock_object(self.driver, '_try_remove_volume', return_value=False) mock_trash_volume = self.mock_object(self.driver, '_move_volume_to_trash') with mock.patch.object(driver, 'RADOSClient') as mock_rados_client: self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, None) self.mock_proxy.return_value.__enter__.return_value.list_snaps.\ assert_called_once_with() mock_rados_client.assert_called_once_with(self.driver) m_del_backup_snaps.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.mock_rbd.RBD.return_value.remove.call_count) self.assertEqual(1, len(RAISED_EXCEPTIONS)) # Make sure the exception was raised self.assertIn(self.mock_rbd.ImageHasSnapshots, RAISED_EXCEPTIONS) self.mock_rbd.RBD.return_value.trash_move.\ assert_not_called() mock_trash_volume.assert_called_once_with(mock.ANY, self.volume_a.name, 0) mock_try_remove_volume.assert_called_once_with(mock.ANY, self.volume_a.name) @common_mocks def test_delete_volume_not_found(self): self.mock_rbd.Image.return_value.list_snaps.return_value = [] self.mock_rbd.RBD.return_value.remove.side_effect = ( self.mock_rbd.ImageNotFound) mock_delete_backup_snaps = self.mock_object(self.driver, '_delete_backup_snaps') mock_rados_client = self.mock_object(driver, 'RADOSClient') mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info') mock_get_clone_info.return_value = (None, None, None) mock_find_clone_snap = self.mock_object(self.driver, '_find_clone_snap', return_value=None) self.assertIsNone(self.driver.delete_volume(self.volume_a)) image = self.mock_proxy.return_value.__enter__.return_value mock_get_clone_info.assert_called_once_with( image, self.volume_a.name, None) mock_find_clone_snap.assert_called_once_with(image) mock_rados_client.assert_called_once_with(self.driver) mock_delete_backup_snaps.assert_called_once_with(image) self.assertFalse( self.mock_rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.mock_rbd.RBD.return_value.remove.call_count) # Make sure the exception was raised self.assertEqual([self.mock_rbd.ImageNotFound], RAISED_EXCEPTIONS) @common_mocks def test_delete_volume_w_clone_snaps(self): client = self.mock_client.return_value snapshots = [ {'id': 1, 'name': 'snapshot-00000000-0000-0000-0000-000000000000', 'size': 2147483648}, {'id': 2, 'name': 'snap1', 'size': 6442450944}, {'id': 3, 'size': 8589934592, 'name': 'volume-22222222-2222-2222-2222-222222222222.clone_snap'}, {'id': 4, 'size': 5368709120, 'name': 'backup.33333333-3333-3333-3333-333333333333.snap.123'}] self.mock_rbd.Image.return_value.list_snaps.return_value = snapshots mock_get_clone_info = self.mock_object(self.driver, '_get_clone_info', return_value=(None, None, None)) self.mock_object(self.driver, '_find_clone_snap', return_value=snapshots[2]['name']) with mock.patch.object(self.driver, '_delete_backup_snaps') as \ mock_delete_backup_snaps: self.driver.delete_volume(self.volume_a) mock_get_clone_info.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value, self.volume_a.name, snapshots[2]['name']) client.__enter__.assert_called_once_with() client.__exit__.assert_called_once_with(None, None, None) mock_delete_backup_snaps.assert_called_once_with( self.mock_proxy.return_value.__enter__.return_value) self.assertFalse( self.driver.rbd.Image.return_value.unprotect_snap.called) self.assertEqual( 1, self.driver.rbd.RBD.return_value.rename.call_count) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_create_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.create_snapshot(self.snapshot) args = [str(self.snapshot.name)] proxy.create_snap.assert_called_with(*args) proxy.protect_snap.assert_called_with(*args) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock()) def test_log_create_vol_from_snap_w_v2_clone_api(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a self.mock_proxy().__enter__().volume.op_features.return_value = 1 self.mock_rbd.RBD_OPERATION_FEATURE_CLONE_PARENT = 1 self.cfg.rbd_flatten_volume_from_snapshot = False with mock.patch.object(driver, 'LOG') as mock_log: with mock.patch.object(self.driver.rbd.Image(), 'stripe_unit') as \ mock_rbd_image_stripe_unit: mock_rbd_image_stripe_unit.return_value = 4194304 self.driver.create_volume_from_snapshot(self.volume_a, self.snapshot) mock_log.info.assert_called_with('Using v2 Clone API') self.assertTrue(self.driver._clone_v2_api_checked) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock()) def test_log_create_vol_from_snap_without_v2_clone_api(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a self.mock_proxy().__enter__().volume.op_features.return_value = 0 self.mock_rbd.RBD_OPERATION_FEATURE_CLONE_PARENT = 1 self.cfg.rbd_flatten_volume_from_snapshot = False with mock.patch.object(driver, 'LOG') as mock_log: with mock.patch.object(self.driver.rbd.Image(), 'stripe_unit') as \ mock_rbd_image_stripe_unit: mock_rbd_image_stripe_unit.return_value = 4194304 self.driver.create_volume_from_snapshot(self.volume_a, self.snapshot) self.assertTrue(any(m for m in mock_log.warning.call_args_list if 'Not using v2 clone API' in m[0][0])) self.assertTrue(self.driver._clone_v2_api_checked) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch.object(driver.RBDDriver, '_get_stripe_unit', mock.Mock(return_value=4194304)) @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock()) @mock.patch.object(driver.RBDDriver, '_flatten') def test_create_temp_vol_from_snap(self, flatten_mock, volume_get_by_id): volume_get_by_id.return_value = self.temp_volume snapshot = mock.Mock(volume_name='volume-name', volume_size=self.temp_volume.size) # This is a temp vol so this option will be ignored and won't flatten self.cfg.rbd_flatten_volume_from_snapshot = True self.driver.create_volume_from_snapshot(self.temp_volume, snapshot) flatten_mock.assert_not_called() @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch.object(driver.RBDDriver, '_get_stripe_unit', mock.Mock(return_value=4194304)) @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock()) @mock.patch.object(driver.RBDDriver, '_flatten') def test_create_vol_from_snap(self, flatten_mock, volume_get_by_id): volume_get_by_id.return_value = self.volume_a snapshot = mock.Mock(volume_name='volume-name', volume_size=self.volume_a.size) self.cfg.rbd_flatten_volume_from_snapshot = True self.driver.create_volume_from_snapshot(self.volume_a, snapshot) flatten_mock.assert_called_once_with(self.cfg.rbd_pool, self.volume_a.name) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock()) def test_log_create_vol_from_snap_raise(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a self.mock_proxy().__enter__().volume.op_features.side_effect = \ Exception self.mock_rbd.RBD_OPERATION_FEATURE_CLONE_PARENT = 1 snapshot = self.snapshot self.cfg.rbd_flatten_volume_from_snapshot = False with mock.patch.object(driver, 'LOG') as mock_log: # Fist call self.driver.create_volume_from_snapshot(self.volume_a, snapshot) self.assertTrue(self.driver._clone_v2_api_checked) # Second call self.driver.create_volume_from_snapshot(self.volume_a, snapshot) # Check that that the second call to create_volume_from_snapshot # doesn't log anything mock_log.warning.assert_called_once_with(mock.ANY) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy self.driver.delete_snapshot(self.snapshot) proxy.remove_snap.assert_called_with(self.snapshot.name) proxy.unprotect_snap.assert_called_with(self.snapshot.name) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_notfound_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = ( self.mock_rbd.ImageNotFound) self.driver.delete_snapshot(self.snapshot) proxy.remove_snap.assert_not_called() proxy.unprotect_snap.assert_called_with(self.snapshot.name) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_notfound_on_remove_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.remove_snap.side_effect = ( self.mock_rbd.ImageNotFound) self.driver.delete_snapshot(self.snapshot) proxy.remove_snap.assert_called_with(self.snapshot.name) proxy.unprotect_snap.assert_called_with(self.snapshot.name) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_unprotected_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = self.mock_rbd.InvalidArgument self.driver.delete_snapshot(self.snapshot) self.assertTrue(proxy.unprotect_snap.called) self.assertTrue(proxy.remove_snap.called) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_busy_snapshot(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = ( self.mock_rbd.ImageBusy, None) with mock.patch.object(self.driver, '_flatten_children') as \ mock_flatten_children: self.driver.delete_snapshot(self.snapshot) mock_flatten_children.assert_called_once_with(mock.ANY, self.volume_a.name, self.snapshot.name) self.assertTrue(proxy.unprotect_snap.called) self.assertTrue(proxy.remove_snap.called) @common_mocks @mock.patch.object(driver.RBDDriver, '_flatten') @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_busy_snapshot_fail(self, volume_get_by_id, flatten_mock): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.unprotect_snap.side_effect = ( self.mock_rbd.ImageBusy, self.mock_rbd.ImageBusy, self.mock_rbd.ImageBusy) flatten_mock.side_effect = exception.SnapshotIsBusy(self.snapshot.name) self.assertRaises(exception.SnapshotIsBusy, self.driver.delete_snapshot, self.snapshot) self.assertTrue(proxy.unprotect_snap.called) self.assertFalse(proxy.remove_snap.called) @common_mocks @mock.patch('cinder.objects.Volume.get_by_id') def test_delete_snapshot_volume_not_found(self, volume_get_by_id): volume_get_by_id.return_value = self.volume_a proxy = self.mock_proxy.return_value proxy.__enter__.side_effect = self.mock_rbd.ImageNotFound self.driver.delete_snapshot(self.snapshot) proxy.remove_snap.assert_not_called() proxy.unprotect_snap.assert_not_called() @common_mocks def test_snapshot_revert_use_temp_snapshot(self): self.assertFalse(self.driver.snapshot_revert_use_temp_snapshot()) @common_mocks def test_revert_to_snapshot(self): image = self.mock_proxy.return_value.__enter__.return_value self.driver.revert_to_snapshot(self.context, self.volume_a, self.snapshot) image.rollback_to_snap.assert_called_once_with(self.snapshot.name) @common_mocks def test_get_clone_info(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) volume.parent_info.return_value = parent_info info = self.driver._get_clone_info(volume, self.volume_a.name) self.assertEqual(parent_info, info) self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once_with() @common_mocks def test_get_clone_info_w_snap(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) volume.parent_info.return_value = parent_info snapshot = self.mock_rbd.ImageSnapshot() info = self.driver._get_clone_info(volume, self.volume_a.name, snap=snapshot) self.assertEqual(parent_info, info) self.assertEqual(2, volume.set_snap.call_count) volume.parent_info.assert_called_once_with() @common_mocks def test_get_clone_info_w_exception(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() volume.parent_info.side_effect = self.mock_rbd.ImageNotFound snapshot = self.mock_rbd.ImageSnapshot() info = self.driver._get_clone_info(volume, self.volume_a.name, snap=snapshot) self.assertEqual((None, None, None), info) self.assertEqual(2, volume.set_snap.call_count) volume.parent_info.assert_called_once_with() # Make sure the exception was raised self.assertEqual([self.mock_rbd.ImageNotFound], RAISED_EXCEPTIONS) @common_mocks def test_get_clone_info_deleted_volume(self): volume = self.mock_rbd.Image() volume.set_snap = mock.Mock() volume.parent_info = mock.Mock() parent_info = ('a', 'b', '%s.clone_snap' % (self.volume_a.name)) volume.parent_info.return_value = parent_info info = self.driver._get_clone_info(volume, "%s.deleted" % (self.volume_a.name)) self.assertEqual(parent_info, info) self.assertFalse(volume.set_snap.called) volume.parent_info.assert_called_once_with() @ddt.data(3, 2, 1, 0) @common_mocks def test_get_clone_depth(self, expected_depth): # set the max_clone_depth option to check for Bug #1901241, where # lowering the configured rbd_max_clone_depth prevented cloning of # volumes that had already (legally) exceeded the new value because # _get_clone_depth would raise an uncaught exception self.cfg.rbd_max_clone_depth = 1 # create a list of fake parents for the expected depth vols = [self.volume_a, self.volume_b, self.volume_c] volume_list = vols[:expected_depth] def fake_clone_info(volume, volume_name): parent = volume_list.pop() if volume_list else None return (None, parent, None) with mock.patch.object( self.driver, '_get_clone_info') as mock_get_clone_info: mock_get_clone_info.side_effect = fake_clone_info with mock.patch.object( self.driver.rbd.Image(), 'close') as mock_rbd_image_close: depth = self.driver._get_clone_depth(self.mock_client, "volume-00000000d") self.assertEqual(expected_depth, depth) # each parent must be closed plus the original volume self.assertEqual(expected_depth + 1, mock_rbd_image_close.call_count) @common_mocks @mock.patch.object(driver.RBDDriver, '_enable_replication') def test_create_cloned_volume_same_size(self, mock_enable_repl): self.cfg.rbd_max_clone_depth = 2 with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required with mock.patch.object(self.driver, '_resize') as mock_resize: mock_get_clone_depth.return_value = 1 res = self.driver.create_cloned_volume(self.volume_b, self.volume_a) self.assertEqual({}, res) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) # We expect clone() to be called exactly once. self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) # Without flattening, only the source volume is opened, # so only one call to close() should occur. self.assertEqual( 1, self.mock_rbd.Image.return_value.close.call_count) self.assertTrue(mock_get_clone_depth.called) mock_resize.assert_not_called() mock_enable_repl.assert_not_called() @common_mocks @mock.patch.object(driver.RBDDriver, '_get_clone_depth', return_value=1) @mock.patch.object(driver.RBDDriver, '_resize') @mock.patch.object(driver.RBDDriver, '_enable_replication') def test_create_cloned_volume_replicated(self, mock_enable_repl, mock_resize, mock_get_clone_depth): self.cfg.rbd_max_clone_depth = 2 self.volume_b.volume_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, extra_specs={'replication_enabled': ' True'}) expected_update = { 'replication_status': 'enabled', 'replication_driver_data': '{"had_journaling": false}' } mock_enable_repl.return_value = expected_update res = self.driver.create_cloned_volume(self.volume_b, self.volume_a) self.assertEqual(expected_update, res) mock_enable_repl.assert_called_once_with(self.volume_b) name = self.volume_b.name image = self.mock_rbd.Image.return_value image.create_snap.assert_called_once_with(name + '.clone_snap') image.protect_snap.assert_called_once_with(name + '.clone_snap') self.assertEqual(1, self.mock_rbd.RBD.return_value.clone.call_count) self.assertEqual( 1, self.mock_rbd.Image.return_value.close.call_count) mock_get_clone_depth.assert_called_once_with( self.mock_client().__enter__(), self.volume_a.name) mock_resize.assert_not_called() @common_mocks @mock.patch.object(driver.RBDDriver, '_enable_replication') def test_create_cloned_volume_different_size(self, mock_enable_repl): self.cfg.rbd_max_clone_depth = 2 with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required with mock.patch.object(self.driver, '_resize') as mock_resize: mock_get_clone_depth.return_value = 1 self.volume_b.size = 20 res = self.driver.create_cloned_volume(self.volume_b, self.volume_a) self.assertEqual({}, res) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) self.assertEqual( 1, self.mock_rbd.Image.return_value.close.call_count) self.assertTrue(mock_get_clone_depth.called) self.assertEqual( 1, mock_resize.call_count) mock_enable_repl.assert_not_called() @common_mocks def test_create_cloned_volume_different_size_copy_only(self): self.cfg.rbd_max_clone_depth = 0 with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required with mock.patch.object(self.driver, '_resize') as mock_resize: mock_get_clone_depth.return_value = 1 self.volume_b.size = 20 self.driver.create_cloned_volume(self.volume_b, self.volume_a) self.assertEqual(1, mock_resize.call_count) @ddt.data(True, False) @common_mocks @mock.patch.object(driver.RBDDriver, '_enable_replication') def test_create_cloned_volume_max_depth(self, use_quota, mock_enable_repl): """Test clone when we reach max depth. It will flatten for normal volumes and skip flattening for temporary volumes. """ self.cfg.rbd_max_clone_depth = 1 dest_vol = self.volume_b if use_quota else self.temp_volume client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver, '_get_clone_info') as \ mock_get_clone_info: mock_get_clone_info.return_value = ( ('fake_pool', dest_vol.name, '.'.join((dest_vol.name, 'clone_snap')))) with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Force flatten mock_get_clone_depth.return_value = 1 res = self.driver.create_cloned_volume(dest_vol, self.volume_a) self.assertEqual({}, res) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (dest_vol.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (dest_vol.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) proxy = self.mock_proxy.return_value.__enter__.return_value if dest_vol.use_quota: clone_snap_name = '.'.join((dest_vol.name, 'clone_snap')) self.mock_rbd.Image.return_value.unprotect_snap.\ assert_called_once_with(clone_snap_name) self.mock_rbd.Image.return_value.remove_snap.\ assert_called_once_with(clone_snap_name) self.mock_proxy.assert_called_once_with( self.driver, dest_vol.name, client=client, ioctx=client.ioctx) proxy.flatten.assert_called_once_with() else: self.mock_rbd.Image.return_value.unprotect_snap.\ assert_not_called() self.mock_rbd.Image.return_value.remove_snap.\ assert_not_called() self.mock_proxy.assert_not_called() proxy.flatten.assert_not_called() # Source volume is closed by direct call of close() self.assertEqual( 1, self.mock_rbd.Image.return_value.close.call_count) self.assertTrue(mock_get_clone_depth.called) mock_enable_repl.assert_not_called() @common_mocks @mock.patch.object(driver.RBDDriver, '_enable_replication') def test_create_cloned_volume_w_clone_exception(self, mock_enable_repl): self.cfg.rbd_max_clone_depth = 2 self.mock_rbd.RBD.return_value.clone.side_effect = ( self.mock_rbd.RBD.Error) with mock.patch.object(self.driver, '_get_clone_depth') as \ mock_get_clone_depth: # Try with no flatten required mock_get_clone_depth.return_value = 1 self.assertRaises(self.mock_rbd.RBD.Error, self.driver.create_cloned_volume, self.volume_b, self.volume_a) (self.mock_rbd.Image.return_value.create_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.protect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.RBD.return_value.clone.call_count) (self.mock_rbd.Image.return_value.unprotect_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) (self.mock_rbd.Image.return_value.remove_snap .assert_called_once_with('.'.join( (self.volume_b.name, 'clone_snap')))) self.assertEqual( 1, self.mock_rbd.Image.return_value.close.call_count) mock_enable_repl.assert_not_called() @common_mocks def test_good_locations(self): locations = ['rbd://fsid/pool/image/snap', 'rbd://%2F/%2F/%2F/%2F', ] map(self.driver._parse_location, locations) @common_mocks def test_bad_locations(self): locations = ['rbd://image', 'http://path/to/somewhere/else', 'rbd://image/extra', 'rbd://image/', 'rbd://fsid/pool/image/', 'rbd://fsid/pool/image/snap/', 'rbd://///', ] for loc in locations: self.assertRaises(exception.ImageUnacceptable, self.driver._parse_location, loc) self.assertFalse( self.driver._is_cloneable(loc, {'disk_format': 'raw'})) @common_mocks def test_cloneable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' info = {'disk_format': 'raw'} self.assertTrue(self.driver._is_cloneable(location, info)) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_different_fsid(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://def/pool/image/snap' self.assertFalse( self.driver._is_cloneable(location, {'disk_format': 'raw'})) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_unreadable(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' self.driver.rbd.Error = Exception self.mock_proxy.side_effect = Exception args = [location, {'disk_format': 'raw'}] self.assertFalse(self.driver._is_cloneable(*args)) self.assertEqual(1, self.mock_proxy.call_count) self.assertTrue(mock_get_fsid.called) @common_mocks def test_uncloneable_bad_format(self): with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' location = 'rbd://abc/pool/image/snap' formats = ['qcow2', 'vmdk', 'vdi'] for f in formats: self.assertFalse( self.driver._is_cloneable(location, {'disk_format': f})) self.assertTrue(mock_get_fsid.called) def _copy_image(self, volume_busy=False): self.mock_object(tempfile, 'NamedTemporaryFile') self.mock_object(os.path, 'exists', return_value=True) self.mock_object(image_utils, 'fetch_to_raw') self.mock_object(self.driver, '_resize') with mock.patch.object(self.driver, 'delete_volume') as mock_dv: mock_image_service = mock.MagicMock() args = [None, self.volume_a, mock_image_service, None] if volume_busy: mock_dv.side_effect = exception.VolumeIsBusy("doh") self.assertRaises( exception.VolumeIsBusy, self.driver.copy_image_to_volume, *args) self.assertEqual( self.cfg.rados_connection_retries, mock_dv.call_count) else: self.driver.copy_image_to_volume(*args) @mock.patch('cinder.volume.drivers.rbd.fileutils.delete_if_exists') @mock.patch('cinder.image.image_utils.convert_image') def _copy_image_encrypted(self, mock_convert, mock_temp_delete): key_mgr = fake_keymgr.fake_api() self.mock_object(castellan.key_manager, 'API', return_value=key_mgr) key_id = key_mgr.store(self.context, KeyObject()) self.volume_a.encryption_key_id = key_id enc_info = {'encryption_key_id': key_id, 'cipher': 'aes-xts-essiv', 'key_size': 256} self.mock_object(cinder.volume.volume_utils, 'check_encryption_provider', return_value=enc_info) self.mock_object(cinder.volume.drivers.rbd, 'open') self.mock_object(os, 'rename') self.mock_object(tempfile, 'NamedTemporaryFile') self.mock_object(os.path, 'exists', return_value=True) self.mock_object(image_utils, 'fetch_to_raw') self.mock_object(self.driver, 'delete_volume') self.mock_object(self.driver, '_resize') mock_image_service = mock.MagicMock() args = [self.context, self.volume_a, mock_image_service, None] self.driver.copy_image_to_encrypted_volume(*args) mock_temp_delete.assert_called() self.assertEqual(1, mock_temp_delete.call_count) @common_mocks def test_copy_image_no_volume_tmp(self): self.cfg.image_conversion_dir = None self._copy_image() @common_mocks def test_copy_image_volume_tmp(self): self.cfg.image_conversion_dir = '/var/run/cinder/tmp' self._copy_image() @common_mocks def test_copy_image_volume_tmp_encrypted(self): self.cfg.image_conversion_dir = '/var/run/cinder/tmp' self._copy_image_encrypted() @common_mocks def test_copy_image_busy_volume(self): self.cfg.image_conversion_dir = '/var/run/cinder/tmp' self._copy_image(volume_busy=True) @ddt.data(True, False) @common_mocks @mock.patch('cinder.volume.drivers.rbd.RBDDriver._supports_qos') @mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_usage_info') @mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_pool_stats') def test_update_volume_stats(self, replication_enabled, stats_mock, usage_mock, mock_qos_supported): stats_mock.return_value = (mock.sentinel.free_capacity_gb, mock.sentinel.total_capacity_gb) usage_mock.return_value = mock.sentinel.provisioned_capacity_gb mock_qos_supported.return_value = True expected_fsid = 'abc' expected_location_info = ('nondefault:%s:%s:%s:rbd' % (self.cfg.rbd_ceph_conf, expected_fsid, self.cfg.rbd_user)) expected = dict( volume_backend_name='RBD', replication_enabled=replication_enabled, vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb=mock.sentinel.total_capacity_gb, free_capacity_gb=mock.sentinel.free_capacity_gb, reserved_percentage=0, thin_provisioning_support=True, provisioned_capacity_gb=mock.sentinel.provisioned_capacity_gb, max_over_subscription_ratio=1.0, multiattach=True, location_info=expected_location_info, backend_state='up', qos_support=True) if replication_enabled: targets = [{'backend_id': 'secondary-backend'}, {'backend_id': 'tertiary-backend'}] with mock.patch.object(self.driver.configuration, 'safe_get', return_value=targets): self.driver._do_setup_replication() expected['replication_targets'] = [t['backend_id']for t in targets] expected['replication_targets'].append('default') my_safe_get = MockDriverConfig(rbd_exclusive_cinder_pool=False) self.mock_object(self.driver.configuration, 'safe_get', my_safe_get) with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = expected_fsid actual = self.driver.get_volume_stats(True) self.assertDictEqual(expected, actual) mock_qos_supported.assert_called_once_with() @common_mocks @mock.patch('cinder.volume.drivers.rbd.RBDDriver._supports_qos') @mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_usage_info') @mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_pool_stats') def test_update_volume_stats_exclusive_pool(self, stats_mock, usage_mock, mock_qos_supported): stats_mock.return_value = (mock.sentinel.free_capacity_gb, mock.sentinel.total_capacity_gb) # Set the version to unsupported, leading to the qos_support parameter # in the actual output differing to the one set below in expected. mock_qos_supported.return_value = False expected_fsid = 'abc' expected_location_info = ('nondefault:%s:%s:%s:rbd' % (self.cfg.rbd_ceph_conf, expected_fsid, self.cfg.rbd_user)) expected = dict( volume_backend_name='RBD', replication_enabled=False, vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb=mock.sentinel.total_capacity_gb, free_capacity_gb=mock.sentinel.free_capacity_gb, reserved_percentage=0, thin_provisioning_support=True, max_over_subscription_ratio=1.0, multiattach=True, location_info=expected_location_info, backend_state='up', qos_support=False) my_safe_get = MockDriverConfig(rbd_exclusive_cinder_pool=True) self.mock_object(self.driver.configuration, 'safe_get', my_safe_get) with mock.patch.object(self.driver, '_get_fsid', return_value=expected_fsid): actual = self.driver.get_volume_stats(True) self.assertDictEqual(expected, actual) usage_mock.assert_not_called() mock_qos_supported.assert_called_once_with() @common_mocks @mock.patch('cinder.volume.drivers.rbd.RBDDriver._supports_qos') @mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_usage_info') @mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_pool_stats') def test_update_volume_stats_error(self, stats_mock, usage_mock, mock_qos_supported): my_safe_get = MockDriverConfig(rbd_exclusive_cinder_pool=False) self.mock_object(self.driver.configuration, 'safe_get', my_safe_get) mock_qos_supported.return_value = True expected_fsid = 'abc' expected_location_info = ('nondefault:%s:%s:%s:rbd' % (self.cfg.rbd_ceph_conf, expected_fsid, self.cfg.rbd_user)) expected = dict(volume_backend_name='RBD', replication_enabled=False, vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0, multiattach=True, max_over_subscription_ratio=1.0, thin_provisioning_support=True, location_info=expected_location_info, backend_state='down', qos_support=True) with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = expected_fsid actual = self.driver.get_volume_stats(True) self.assertDictEqual(expected, actual) @ddt.data( # Normal case, no quota and dynamic total {'free_capacity': 27.0, 'total_capacity': 28.44}, # No quota and static total {'dynamic_total': False, 'free_capacity': 27.0, 'total_capacity': 59.96}, # Quota and dynamic total {'quota_max_bytes': 3221225472, 'max_avail': 1073741824, 'free_capacity': 1, 'total_capacity': 2.44}, # Quota and static total {'quota_max_bytes': 3221225472, 'max_avail': 1073741824, 'dynamic_total': False, 'free_capacity': 1, 'total_capacity': 3.00}, # Quota and dynamic total when free would be negative {'quota_max_bytes': 1073741824, 'free_capacity': 0, 'total_capacity': 1.44}, ) @ddt.unpack @common_mocks def test_get_pool(self, free_capacity, total_capacity, max_avail=28987613184, quota_max_bytes=0, dynamic_total=True): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster.mon_command.side_effect = [ (0, '{"stats":{"total_bytes":64385286144,' '"total_used_bytes":3289628672,"total_avail_bytes":61095657472},' '"pools":[{"name":"rbd","id":2,"stats":{"kb_used":1510197,' '"bytes_used":1546440971,"max_avail":%s,"objects":412}},' '{"name":"volumes","id":3,"stats":{"kb_used":0,"bytes_used":0,' '"max_avail":28987613184,"objects":0}}]}\n' % max_avail, ''), (0, '{"pool_name":"volumes","pool_id":4,"quota_max_objects":0,' '"quota_max_bytes":%s}\n' % quota_max_bytes, ''), ] with mock.patch.object(self.driver.configuration, 'safe_get', return_value=dynamic_total): result = self.driver._get_pool_stats() client.cluster.mon_command.assert_has_calls([ mock.call('{"prefix":"df", "format":"json"}', b''), mock.call('{"prefix":"osd pool get-quota", "pool": "rbd",' ' "format":"json"}', b''), ]) self.assertEqual((free_capacity, total_capacity), result) @ddt.data( # Normal case, no quota and dynamic total {'free_capacity': 27.0, 'total_capacity': 28.44}, # No quota and static total {'dynamic_total': False, 'free_capacity': 27.0, 'total_capacity': 59.96}, # Quota and dynamic total {'quota_max_bytes': 3221225472, 'max_avail': 1073741824, 'free_capacity': 1, 'total_capacity': 2.44}, # Quota and static total {'quota_max_bytes': 3221225472, 'max_avail': 1073741824, 'dynamic_total': False, 'free_capacity': 1, 'total_capacity': 3.00}, # Quota and dynamic total when free would be negative {'quota_max_bytes': 1073741824, 'free_capacity': 0, 'total_capacity': 1.44}, ) @ddt.unpack @common_mocks def test_get_pool_nautilus(self, free_capacity, total_capacity, max_avail=28987613184, quota_max_bytes=0, dynamic_total=True): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster.mon_command.side_effect = [ (0, '{"stats":{"total_bytes":64385286144,' '"total_used_bytes":3289628672,"total_avail_bytes":61095657472},' '"pools":[{"name":"rbd","id":2,"stats":{"kb_used":1510197,' '"stored":1546440971,"bytes_used":4639322913,"max_avail":%s,' '"objects":412}},{"name":"volumes","id":3,"stats":{"kb_used":0,' '"bytes_used":0,"max_avail":28987613184,"objects":0}}]}\n' % max_avail, ''), (0, '{"pool_name":"volumes","pool_id":4,"quota_max_objects":0,' '"quota_max_bytes":%s}\n' % quota_max_bytes, ''), ] with mock.patch.object(self.driver.configuration, 'safe_get', return_value=dynamic_total): result = self.driver._get_pool_stats() client.cluster.mon_command.assert_has_calls([ mock.call('{"prefix":"df", "format":"json"}', b''), mock.call('{"prefix":"osd pool get-quota", "pool": "rbd",' ' "format":"json"}', b''), ]) self.assertEqual((free_capacity, total_capacity), result) @common_mocks def test_get_pool_bytes(self): """Test for mon_commands returning bytes instead of strings.""" client = self.mock_client.return_value client.__enter__.return_value = client client.cluster.mon_command.side_effect = [ (0, b'{"stats":{"total_bytes":64385286144,' b'"total_used_bytes":3289628672,"total_avail_bytes":61095657472},' b'"pools":[{"name":"rbd","id":2,"stats":{"kb_used":1510197,' b'"bytes_used":1546440971,"max_avail":2897613184,"objects":412}},' b'{"name":"volumes","id":3,"stats":{"kb_used":0,"bytes_used":0,' b'"max_avail":28987613184,"objects":0}}]}\n', ''), (0, b'{"pool_name":"volumes","pool_id":4,"quota_max_objects":0,' b'"quota_max_bytes":3221225472}\n', ''), ] result = self.driver._get_pool_stats() client.cluster.mon_command.assert_has_calls([ mock.call('{"prefix":"df", "format":"json"}', b''), mock.call('{"prefix":"osd pool get-quota", "pool": "rbd",' ' "format":"json"}', b''), ]) free_capacity = 1.56 total_capacity = 3.0 self.assertEqual((free_capacity, total_capacity), result) @common_mocks def test_get_pool_stats_failure(self): client = self.mock_client.return_value client.__enter__.return_value = client client.cluster.mon_command.return_value = (-1, '', '') result = self.driver._get_pool_stats() self.assertEqual(('unknown', 'unknown'), result) @common_mocks def test_get_mon_addrs(self): with mock.patch.object(self.driver, '_execute') as mock_execute: mock_execute.return_value = (CEPH_MON_DUMP, '') hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] self.assertEqual((hosts, ports), self.driver._get_mon_addrs()) @common_mocks def _initialize_connection_helper(self, expected, hosts, ports): with mock.patch.object(self.driver, '_get_mon_addrs') as \ mock_get_mon_addrs: mock_get_mon_addrs.return_value = (hosts, ports) actual = self.driver.initialize_connection(self.volume_a, None) self.assertDictEqual(expected, actual) self.assertTrue(mock_get_mon_addrs.called) def test_initialize_connection(self): hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] self.driver._active_config = {'name': 'secondary_id', 'user': 'foo', 'conf': 'bar', 'secret_uuid': self.cfg.rbd_secret_uuid} expected = { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % (self.cfg.rbd_pool, self.volume_a.name), 'hosts': hosts, 'ports': ports, 'cluster_name': 'secondary_id', 'auth_enabled': True, 'auth_username': 'foo', 'secret_type': 'ceph', 'secret_uuid': self.cfg.rbd_secret_uuid, 'volume_id': self.volume_a.id, 'discard': True, } } self._initialize_connection_helper(expected, hosts, ports) # Check how it will work with keyring data (for cinderlib) keyring_data = "[client.cinder]\n key = test\n" self.driver.keyring_data = keyring_data expected['data']['keyring'] = keyring_data self._initialize_connection_helper(expected, hosts, ports) self.driver._active_config = {'name': 'secondary_id', 'user': 'foo', 'conf': 'bar', 'secret_uuid': 'secondary_secret_uuid'} expected['data']['secret_uuid'] = 'secondary_secret_uuid' self._initialize_connection_helper(expected, hosts, ports) def test__set_keyring_attributes_openstack(self): # OpenStack usage doesn't have the rbd_keyring_conf Oslo Config option self.assertFalse(hasattr(self.driver.configuration, 'rbd_keyring_conf')) # Set initial values so we can confirm that we set them to None self.driver.keyring_file = mock.sentinel.keyring_file self.driver.keyring_data = mock.sentinel.keyring_data self.driver._set_keyring_attributes() self.assertIsNone(self.driver.keyring_file) self.assertIsNone(self.driver.keyring_data) def test__set_keyring_attributes_cinderlib(self): # OpenStack usage doesn't have the rbd_keyring_conf Oslo Config option cfg_file = '/etc/ceph/ceph.client.admin.keyring' self.driver.configuration.rbd_keyring_conf = cfg_file with mock.patch('os.path.isfile', return_value=False): self.driver._set_keyring_attributes() self.assertEqual(cfg_file, self.driver.keyring_file) self.assertIsNone(self.driver.keyring_data) @mock.patch('os.path.isfile') @mock.patch.object(driver, 'open') def test__set_keyring_attributes_cinderlib_read_file(self, mock_open, mock_isfile): cfg_file = '/etc/ceph/ceph.client.admin.keyring' # This is how cinderlib sets the config option setattr(self.driver.configuration, 'rbd_keyring_conf', cfg_file) keyring_data = "[client.cinder]\n key = test\n" mock_read = mock_open.return_value.__enter__.return_value.read mock_read.return_value = keyring_data self.assertIsNone(self.driver.keyring_file) self.assertIsNone(self.driver.keyring_data) self.driver._set_keyring_attributes() mock_isfile.assert_called_once_with(cfg_file) mock_open.assert_called_once_with(cfg_file, 'r') mock_read.assert_called_once_with() self.assertEqual(cfg_file, self.driver.keyring_file) self.assertEqual(keyring_data, self.driver.keyring_data) @mock.patch('os.path.isfile') @mock.patch.object(driver, 'open', side_effect=IOError) def test__set_keyring_attributes_cinderlib_error(self, mock_open, mock_isfile): cfg_file = '/etc/ceph/ceph.client.admin.keyring' # This is how cinderlib sets the config option setattr(self.driver.configuration, 'rbd_keyring_conf', cfg_file) self.assertIsNone(self.driver.keyring_file) self.driver.keyring_data = mock.sentinel.keyring_data self.driver._set_keyring_attributes() mock_isfile.assert_called_once_with(cfg_file) mock_open.assert_called_once_with(cfg_file, 'r') self.assertEqual(cfg_file, self.driver.keyring_file) self.assertIsNone(self.driver.keyring_data) @ddt.data({'rbd_chunk_size': 1}, {'rbd_chunk_size': 8}, {'rbd_chunk_size': 32}) @ddt.unpack @common_mocks @mock.patch.object(driver.RBDDriver, '_enable_replication') def test_clone(self, mock_enable_repl, rbd_chunk_size): self.cfg.rbd_store_chunk_size = rbd_chunk_size src_pool = u'images' src_image = u'image-name' src_snap = u'snapshot-name' client_stack = [] def mock__enter__(inst): def _inner(): client_stack.append(inst) return inst return _inner client = self.mock_client.return_value # capture both rados client used to perform the clone client.__enter__.side_effect = mock__enter__(client) with mock.patch.object(self.driver.rbd.Image(), 'stripe_unit') as \ mock_rbd_image_stripe_unit: mock_rbd_image_stripe_unit.return_value = 4194304 res = self.driver._clone(self.volume_a, src_pool, src_image, src_snap) self.assertEqual({}, res) args = [client_stack[0].ioctx, str(src_image), str(src_snap), client_stack[1].ioctx, str(self.volume_a.name)] stripe_unit = max(4194304, rbd_chunk_size * 1048576) expected_order = int(math.log(stripe_unit, 2)) kwargs = {'features': client.features, 'order': expected_order} self.mock_rbd.RBD.return_value.clone.assert_called_once_with( *args, **kwargs) self.assertEqual(2, client.__enter__.call_count) mock_enable_repl.assert_not_called() @common_mocks @mock.patch.object(driver.RBDDriver, '_enable_replication') def test_clone_replicated(self, mock_enable_repl): order = 20 rbd_chunk_size = 1 stripe_unit = 1048576 self.volume_a.volume_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, extra_specs={'replication_enabled': ' True'}) expected_update = { 'replication_status': 'enabled', 'replication_driver_data': '{"had_journaling": false}' } mock_enable_repl.return_value = expected_update self.cfg.rbd_store_chunk_size = rbd_chunk_size src_pool = u'images' src_image = u'image-name' src_snap = u'snapshot-name' client_stack = [] def mock__enter__(inst): def _inner(): client_stack.append(inst) return inst return _inner client = self.mock_client.return_value # capture both rados client used to perform the clone client.__enter__.side_effect = mock__enter__(client) with mock.patch.object(self.driver.rbd.Image(), 'stripe_unit') as \ mock_rbd_image_stripe_unit: mock_rbd_image_stripe_unit.return_value = stripe_unit res = self.driver._clone(self.volume_a, src_pool, src_image, src_snap) self.assertEqual(expected_update, res) mock_enable_repl.assert_called_once_with(self.volume_a) args = [client_stack[0].ioctx, str(src_image), str(src_snap), client_stack[1].ioctx, str(self.volume_a.name)] kwargs = {'features': client.features, 'order': order} self.mock_rbd.RBD.return_value.clone.assert_called_once_with( *args, **kwargs) self.assertEqual(2, client.__enter__.call_count) @ddt.data({}, {'replication_status': 'enabled', 'replication_driver_data': '{"had_journaling": false}'}) @common_mocks @mock.patch.object(driver.RBDDriver, '_is_cloneable', return_value=True) def test_clone_image_replication(self, return_value, mock_cloneable): mock_clone = self.mock_object(self.driver, '_clone', return_value=return_value) image_loc = ('rbd://fee/fi/fo/fum', None) image_meta = {'disk_format': 'raw', 'id': 'id.foo'} res = self.driver.clone_image(self.context, self.volume_a, image_loc, image_meta, mock.Mock()) expected = return_value.copy() expected['provider_location'] = None self.assertEqual((expected, True), res) mock_clone.assert_called_once_with(self.volume_a, 'fi', 'fo', 'fum') @common_mocks @mock.patch.object(driver.RBDDriver, '_clone', return_value=mock.sentinel.volume_update) @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock()) def test_create_vol_from_snap_replication(self, mock_clone): self.cfg.rbd_flatten_volume_from_snapshot = False snapshot = self.snapshot_b res = self.driver.create_volume_from_snapshot(self.volume_a, snapshot) self.assertEqual(mock.sentinel.volume_update, res) mock_clone.assert_called_once_with(self.volume_a, self.cfg.rbd_pool, snapshot.volume_name, snapshot.name) @common_mocks @mock.patch.object(driver.RBDDriver, '_clone', return_value=mock.sentinel.volume_update) def test_create_encrypted_vol_from_snap_same_size(self, mock_clone): """Test create encrypted volume from encrypted snapshot. When creating an encrypted volume from encrypted snapshot the new volume is same size than the snapshot. """ self.cfg.rbd_flatten_volume_from_snapshot = False volume_size = self.volume_c.size self.snapshot_b.volume_size = volume_size mock_resize = self.mock_object(self.driver, '_resize') mock_new_size = self.mock_object(self.driver, '_calculate_new_size') res = self.driver.create_volume_from_snapshot(self.volume_c, self.snapshot_b) self.assertEqual(mock.sentinel.volume_update, res) mock_resize.assert_not_called() mock_new_size.assert_not_called() @common_mocks @mock.patch.object(driver.RBDDriver, '_clone', return_value=mock.sentinel.volume_update) def test_create_encrypted_vol_from_snap(self, mock_clone): """Test create encrypted volume from encrypted snapshot. When creating an encrypted volume from encrypted snapshot the new volume is larger than the snapshot (12GB vs 11GB). """ self.cfg.rbd_flatten_volume_from_snapshot = False new_size_bytes = 12288 diff_size = 1 volume_size = 11 self.snapshot_b.volume_size = volume_size mock_resize = self.mock_object(self.driver, '_resize') mock_new_size = self.mock_object(self.driver, '_calculate_new_size') mock_new_size.return_value = new_size_bytes res = self.driver.create_volume_from_snapshot(self.volume_c, self.snapshot_b) self.assertEqual(mock.sentinel.volume_update, res) mock_resize.assert_called_once_with(self.volume_c, size=new_size_bytes) volume_name = self.volume_c.name mock_new_size.assert_called_once_with(diff_size, volume_name) @common_mocks @mock.patch.object(driver.RBDDriver, '_clone', return_value=mock.sentinel.volume_update) def test_create_unencrypted_vol_from_snap(self, mock_clone): """Test create regular volume from regular snapshot""" self.cfg.rbd_flatten_volume_from_snapshot = False self.snapshot_b.volume.size = 9 mock_resize = self.mock_object(self.driver, '_resize') mock_new_size = self.mock_object(self.driver, '_calculate_new_size') res = self.driver.create_volume_from_snapshot(self.volume_b, self.snapshot_b) self.assertEqual(mock.sentinel.volume_update, res) mock_resize.assert_called_once_with(self.volume_b, size=None) mock_new_size.assert_not_called() @common_mocks def test_extend_volume(self): fake_size = '20' size = int(fake_size) * units.Gi with mock.patch.object(self.driver, '_resize') as mock_resize: self.driver.extend_volume(self.volume_a, fake_size) mock_resize.assert_called_once_with(self.volume_a, size=size) @mock.patch.object(driver.RBDDriver, '_qos_specs_from_volume_type') @mock.patch.object(driver.RBDDriver, '_supports_qos') @ddt.data(False, True) @common_mocks def test_retype(self, enabled, mock_qos_vers, mock_get_qos_specs): """Test retyping a non replicated volume. We will test on a system that doesn't have replication enabled and on one that hast it enabled. """ self.driver._is_replication_enabled = enabled mock_qos_vers.return_value = False if enabled: expect = {'replication_status': fields.ReplicationStatus.DISABLED} else: expect = {} context = {} diff = {'encryption': {}, 'extra_specs': {}} updates = {'name': 'testvolume', 'host': 'currenthost', 'id': fake.VOLUME_ID} fake_type = fake_volume.fake_volume_type_obj(context) volume = fake_volume.fake_volume_obj(context, **updates) volume.volume_type = None # The hosts have been checked same before rbd.retype # is called. # RBD doesn't support multiple pools in a driver. host = {'host': 'currenthost'} self.assertEqual((True, expect), self.driver.retype(context, volume, fake_type, diff, host)) # The encryptions have been checked as same before rbd.retype # is called. diff['encryption'] = {} self.assertEqual((True, expect), self.driver.retype(context, volume, fake_type, diff, host)) # extra_specs changes are supported. diff['extra_specs'] = {'non-empty': 'non-empty'} self.assertEqual((True, expect), self.driver.retype(context, volume, fake_type, diff, host)) diff['extra_specs'] = {} self.assertEqual((True, expect), self.driver.retype(context, volume, fake_type, diff, host)) @ddt.data({'old_replicated': False, 'new_replicated': False}, {'old_replicated': False, 'new_replicated': True}, {'old_replicated': True, 'new_replicated': False}, {'old_replicated': True, 'new_replicated': True}) @ddt.unpack @common_mocks @mock.patch.object(driver.RBDDriver, '_qos_specs_from_volume_type') @mock.patch.object(driver.RBDDriver, '_supports_qos') @mock.patch.object(driver.RBDDriver, '_disable_replication', return_value={'replication': 'disabled'}) @mock.patch.object(driver.RBDDriver, '_enable_replication', return_value={'replication': 'enabled'}) def test_retype_replicated(self, mock_disable, mock_enable, mock_qos_vers, mock_get_qos_specs, old_replicated, new_replicated): """Test retyping a non replicated volume. We will test on a system that doesn't have replication enabled and on one that hast it enabled. """ self.driver._is_replication_enabled = True replicated_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, extra_specs={'replication_enabled': ' True'}) self.volume_a.volume_type = replicated_type if old_replicated else None mock_qos_vers.return_value = False mock_get_qos_specs.return_value = False if new_replicated: new_type = replicated_type if old_replicated: update = {} else: update = {'replication': 'enabled'} else: new_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID), if old_replicated: update = {'replication': 'disabled'} else: update = {'replication_status': fields.ReplicationStatus.DISABLED} res = self.driver.retype(self.context, self.volume_a, new_type, None, None) self.assertEqual((True, update), res) @common_mocks @mock.patch.object(driver.RBDDriver, 'delete_rbd_image_qos_keys') @mock.patch.object(driver.RBDDriver, 'get_rbd_image_qos') @mock.patch.object(driver.RBDDriver, '_supports_qos') @mock.patch.object(driver.RBDDriver, 'update_rbd_image_qos') def test_retype_qos(self, mock_update_qos, mock_qos_supported, mock_get_vol_qos, mock_del_vol_qos): ctxt = context.get_admin_context() qos_a = qos_specs.create(ctxt, "qos-vers-a", self.qos_policy_a) qos_b = qos_specs.create(ctxt, "qos-vers-b", self.qos_policy_b) # The vol_config dictionary containes supported as well as currently # unsupported values (CNA). The latter will be marked accordingly to # indicate the current support status. vol_config = { "rbd_qos_bps_burst": "0", "rbd_qos_bps_burst_seconds": "1", # CNA "rbd_qos_bps_limit": "1024", "rbd_qos_iops_burst": "0", "rbd_qos_iops_burst_seconds": "1", # CNA "rbd_qos_iops_limit": "100", "rbd_qos_read_bps_burst": "0", "rbd_qos_read_bps_burst_seconds": "1", # CNA "rbd_qos_read_bps_limit": "0", "rbd_qos_read_iops_burst": "0", "rbd_qos_read_iops_burst_seconds": "1", # CNA "rbd_qos_read_iops_limit": "0", "rbd_qos_schedule_tick_min": "50", # CNA "rbd_qos_write_bps_burst": "0", "rbd_qos_write_bps_burst_seconds": "1", # CNA "rbd_qos_write_bps_limit": "0", "rbd_qos_write_iops_burst": "0", "rbd_qos_write_iops_burst_seconds": "1", # CNA "rbd_qos_write_iops_limit": "0", } mock_get_vol_qos.return_value = vol_config diff = {'encryption': {}, 'extra_specs': {}, 'qos_specs': {'consumer': (u'front-end', u'back-end'), 'created_at': (123, 456), u'total_bytes_sec': (u'1024', None), u'total_iops_sec': (u'200', None)}} delete_qos = ['total_iops_sec', 'total_bytes_sec'] self.volume_a.volume_type = fake_volume.fake_volume_type_obj( ctxt, id=fake.VOLUME_TYPE_ID, qos_specs_id = qos_a.id) new_type = fake_volume.fake_volume_type_obj( ctxt, id=fake.VOLUME_TYPE2_ID, qos_specs_id = qos_b.id) mock_qos_supported.return_value = True res = self.driver.retype(ctxt, self.volume_a, new_type, diff, None) self.assertEqual((True, {}), res) assert delete_qos == [key for key in delete_qos if key in driver.QOS_KEY_MAP] mock_update_qos.assert_called_once_with(self.volume_a, qos_b.specs) mock_del_vol_qos.assert_called_once_with(self.volume_a, delete_qos) @common_mocks @mock.patch('cinder.volume.drivers.rbd.RBDDriver.RBDProxy') def test__supports_qos(self, rbdproxy_mock): rbdproxy_ver = 20 rbdproxy_mock.return_value.version.return_value = (0, rbdproxy_ver) self.assertTrue(self.driver._supports_qos()) @common_mocks def test__qos_specs_from_volume_type(self): ctxt = context.get_admin_context() qos = qos_specs.create(ctxt, "qos-vers-a", self.qos_policy_a) self.volume_a.volume_type = fake_volume.fake_volume_type_obj( ctxt, id=fake.VOLUME_TYPE_ID, qos_specs_id = qos.id) self.assertEqual( {'total_iops_sec': '100', 'total_bytes_sec': '1024'}, self.driver._qos_specs_from_volume_type(self.volume_a.volume_type)) @common_mocks def test_get_rbd_image_qos(self): ctxt = context.get_admin_context() qos = qos_specs.create(ctxt, "qos-vers-a", self.qos_policy_a) self.volume_a.volume_type = fake_volume.fake_volume_type_obj( ctxt, id=fake.VOLUME_TYPE_ID, qos_specs_id = qos.id) rbd_image_conf = [] for qos_key, qos_val in ( self.volume_a.volume_type.qos_specs.specs.items()): rbd_image_conf.append( {'name': driver.QOS_KEY_MAP[qos_key]['ceph_key'], 'value': int(qos_val)}) rbd_image = self.mock_proxy.return_value.__enter__.return_value rbd_image.config_list.return_value = rbd_image_conf self.assertEqual( {'rbd_qos_bps_limit': 1024, 'rbd_qos_iops_limit': 100}, self.driver.get_rbd_image_qos(self.volume_a)) @common_mocks def test_update_rbd_image_qos(self): ctxt = context.get_admin_context() qos = qos_specs.create(ctxt, "qos-vers-a", self.qos_policy_a) self.volume_a.volume_type = fake_volume.fake_volume_type_obj( ctxt, id=fake.VOLUME_TYPE_ID, qos_specs_id = qos.id) rbd_image = self.mock_proxy.return_value.__enter__.return_value updated_specs = {"total_iops_sec": '50'} rbd_image.config_set.return_value = qos_specs.update(ctxt, qos.id, updated_specs) self.driver.update_rbd_image_qos(self.volume_a, updated_specs) self.assertEqual( {'total_bytes_sec': '1024', 'total_iops_sec': '50'}, self.volume_a.volume_type.qos_specs.specs) @common_mocks def test_delete_rbd_image_qos_key(self): ctxt = context.get_admin_context() qos = qos_specs.create(ctxt, 'qos-vers-a', self.qos_policy_a) self.volume_a.volume_type = fake_volume.fake_volume_type_obj( ctxt, id=fake.VOLUME_TYPE_ID, qos_specs_id = qos.id) rbd_image = self.mock_proxy.return_value.__enter__.return_value keys = ['total_iops_sec'] rbd_image.config_remove.return_value = qos_specs.delete_keys(ctxt, qos.id, keys) self.driver.delete_rbd_image_qos_keys(self.volume_a, keys) self.assertEqual( {'total_bytes_sec': '1024'}, self.volume_a.volume_type.qos_specs.specs) @common_mocks def test_update_migrated_volume(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver.rbd.RBD(), 'rename') as mock_rename: context = {} mock_rename.return_value = 0 model_update = self.driver.update_migrated_volume(context, self.volume_a, self.volume_b, 'available') mock_rename.assert_called_with(client.ioctx, 'volume-%s' % self.volume_b.id, 'volume-%s' % self.volume_a.id) self.assertEqual({'_name_id': None, 'provider_location': None}, model_update) @common_mocks def test_update_migrated_volume_in_use(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver.rbd.RBD(), 'rename') as mock_rename: context = {} mock_rename.return_value = 0 model_update = self.driver.update_migrated_volume(context, self.volume_a, self.volume_b, 'in-use') mock_rename.assert_not_called() self.assertEqual({'_name_id': self.volume_b.id, 'provider_location': self.volume_b['provider_location']}, model_update) @common_mocks def test_update_migrated_volume_image_exists(self): client = self.mock_client.return_value client.__enter__.return_value = client with mock.patch.object(self.driver.rbd.RBD(), 'rename') as mock_rename: context = {} mock_rename.return_value = 1 mock_rename.side_effect = MockImageExistsException model_update = self.driver.update_migrated_volume(context, self.volume_a, self.volume_b, 'available') mock_rename.assert_called_with(client.ioctx, 'volume-%s' % self.volume_b.id, 'volume-%s' % self.volume_a.id) self.assertEqual({'_name_id': self.volume_b.id, 'provider_location': None}, model_update) def test_rbd_volume_proxy_init(self): mock_driver = mock.Mock(name='driver') mock_driver._connect_to_rados.return_value = (None, None) with driver.RBDVolumeProxy(mock_driver, self.volume_a.name): self.assertEqual(1, mock_driver._connect_to_rados.call_count) self.assertFalse(mock_driver._disconnect_from_rados.called) self.assertEqual(1, mock_driver._disconnect_from_rados.call_count) mock_driver.reset_mock() snap = u'snapshot-name' with driver.RBDVolumeProxy(mock_driver, self.volume_a.name, snapshot=snap): self.assertEqual(1, mock_driver._connect_to_rados.call_count) self.assertFalse(mock_driver._disconnect_from_rados.called) self.assertEqual(1, mock_driver._disconnect_from_rados.call_count) def test_rbd_volume_proxy_external_conn(self): mock_driver = mock.Mock(name='driver') mock_driver._connect_to_rados.return_value = (None, None) with driver.RBDVolumeProxy(mock_driver, self.volume_a.name, client='fake_cl', ioctx='fake_io'): mock_driver._connect_to_rados.assert_not_called() mock_driver._disconnect_from_rados.assert_not_called() def test_rbd_volume_proxy_external_conn_no_iocxt(self): mock_driver = mock.Mock(name='driver') mock_driver._connect_to_rados.return_value = ('fake_cl', 'fake_io') with driver.RBDVolumeProxy(mock_driver, self.volume_a.name, client='fake_cl', pool='vol_pool'): mock_driver._connect_to_rados.assert_called_once_with( 'vol_pool', None, None) mock_driver._disconnect_from_rados.assert_called_once_with( 'fake_cl', 'fake_io') def test_rbd_volume_proxy_external_conn_error(self): mock_driver = mock.Mock(name='driver') mock_driver._connect_to_rados.return_value = (None, None) class RBDError(Exception): pass mock_driver.rbd.Error = RBDError mock_driver.rbd.Image.side_effect = RBDError() self.assertRaises(RBDError, driver.RBDVolumeProxy, mock_driver, self.volume_a.name, client='fake_cl', ioctx='fake_io') mock_driver._connect_to_rados.assert_not_called() mock_driver._disconnect_from_rados.assert_not_called() def test_rbd_volume_proxy_conn_error(self): mock_driver = mock.Mock(name='driver') mock_driver._connect_to_rados.return_value = ( 'fake_client', 'fake_ioctx') class RBDError(Exception): pass mock_driver.rbd.Error = RBDError mock_driver.rbd.Image.side_effect = RBDError() self.assertRaises(RBDError, driver.RBDVolumeProxy, mock_driver, self.volume_a.name, pool='fake-volumes') mock_driver._connect_to_rados.assert_called_once_with( 'fake-volumes', None, None) mock_driver._disconnect_from_rados.assert_called_once_with( 'fake_client', 'fake_ioctx') @common_mocks def test_connect_to_rados(self): # Default self.cfg.rados_connect_timeout = -1 self.mock_rados.Rados.return_value.open_ioctx.return_value = \ self.mock_rados.Rados.return_value.ioctx # default configured pool ret = self.driver._connect_to_rados() self.assertTrue(self.mock_rados.Rados.return_value.connect.called) # Expect no timeout if default is used self.mock_rados.Rados.return_value.connect.assert_called_once_with() self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1]) self.mock_rados.Rados.return_value.open_ioctx.assert_called_with( self.cfg.rbd_pool) conf_set = self.mock_rados.Rados.return_value.conf_set conf_set.assert_not_called() # different pool ret = self.driver._connect_to_rados('alt_pool') self.assertTrue(self.mock_rados.Rados.return_value.connect.called) self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual(self.mock_rados.Rados.return_value.ioctx, ret[1]) self.mock_rados.Rados.return_value.open_ioctx.assert_called_with( 'alt_pool') # With timeout self.cfg.rados_connect_timeout = 1 self.mock_rados.Rados.return_value.connect.reset_mock() self.driver._connect_to_rados() conf_set.assert_has_calls((mock.call('rados_osd_op_timeout', '1'), mock.call('rados_mon_op_timeout', '1'), mock.call('client_mount_timeout', '1'))) self.mock_rados.Rados.return_value.connect.assert_called_once_with() # error self.mock_rados.Rados.return_value.open_ioctx.reset_mock() self.mock_rados.Rados.return_value.shutdown.reset_mock() self.mock_rados.Rados.return_value.open_ioctx.side_effect = ( self.mock_rados.Error) self.assertRaises(exception.VolumeBackendAPIException, self.driver._connect_to_rados) self.assertTrue(self.mock_rados.Rados.return_value.open_ioctx.called) self.assertEqual( 3, self.mock_rados.Rados.return_value.shutdown.call_count) @common_mocks def test_failover_host_no_replication(self): self.driver._is_replication_enabled = False self.assertRaises(exception.UnableToFailOver, self.driver.failover_host, self.context, [self.volume_a], []) @ddt.data(None, 'tertiary-backend') @common_mocks @mock.patch.object(driver.RBDDriver, '_get_failover_target_config') @mock.patch.object(driver.RBDDriver, '_failover_volume', autospec=True) def test_failover_host(self, secondary_id, mock_failover_vol, mock_get_cfg): mock_failover_vol.side_effect = lambda self, v, r, d, s: v self.mock_object(self.driver.configuration, 'safe_get', return_value=[{'backend_id': 'secondary-backend'}, {'backend_id': 'tertiary-backend'}]) self.driver._do_setup_replication() volumes = [self.volume_a, self.volume_b] remote = self.driver._replication_targets[1 if secondary_id else 0] mock_get_cfg.return_value = (remote['name'], remote) res = self.driver.failover_host(self.context, volumes, secondary_id, []) self.assertEqual((remote['name'], volumes, []), res) self.assertEqual(remote, self.driver._active_config) mock_failover_vol.assert_has_calls( [mock.call(mock.ANY, v, remote, False, fields.ReplicationStatus.FAILED_OVER) for v in volumes]) mock_get_cfg.assert_called_with(secondary_id) @mock.patch.object(driver.RBDDriver, '_failover_volume', autospec=True) def test_failover_host_failback(self, mock_failover_vol): mock_failover_vol.side_effect = lambda self, v, r, d, s: v self.driver._active_backend_id = 'secondary-backend' self.mock_object(self.driver.configuration, 'safe_get', return_value=[{'backend_id': 'secondary-backend'}, {'backend_id': 'tertiary-backend'}]) self.driver._do_setup_replication() remote = self.driver._get_target_config('default') volumes = [self.volume_a, self.volume_b] res = self.driver.failover_host(self.context, volumes, 'default', []) self.assertEqual(('default', volumes, []), res) self.assertEqual(remote, self.driver._active_config) mock_failover_vol.assert_has_calls( [mock.call(mock.ANY, v, remote, False, fields.ReplicationStatus.ENABLED) for v in volumes]) @mock.patch.object(driver.RBDDriver, '_failover_volume') def test_failover_host_no_more_replica_targets(self, mock_failover_vol): mock_failover_vol.side_effect = lambda w, x, y, z: w self.driver._active_backend_id = 'secondary-backend' self.mock_object(self.driver.configuration, 'safe_get', return_value=[{'backend_id': 'secondary-backend'}]) self.driver._do_setup_replication() volumes = [self.volume_a, self.volume_b] self.assertRaises(exception.InvalidReplicationTarget, self.driver.failover_host, self.context, volumes, None, []) @ddt.data(True, False) @mock.patch.object(driver.RBDDriver, '_exec_on_volume', side_effect=Exception) def test_failover_volume_error(self, is_demoted, mock_exec): self.volume_a.replication_driver_data = '{"had_journaling": false}' self.volume_a.volume_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, extra_specs={'replication_enabled': ' True'}) remote = {'name': 'name', 'user': 'user', 'conf': 'conf', 'pool': 'pool'} repl_status = fields.ReplicationStatus.FAILOVER_ERROR expected = {'volume_id': self.volume_a.id, 'updates': {'status': 'error', 'previous_status': self.volume_a.status, 'replication_status': repl_status}} res = self.driver._failover_volume( self.volume_a, remote, is_demoted, fields.ReplicationStatus.FAILED_OVER) self.assertEqual(expected, res) mock_exec.assert_called_once_with(self.volume_a.name, remote, 'mirror_image_promote', not is_demoted) @mock.patch.object(driver.RBDDriver, '_exec_on_volume') def test_failover_volume(self, mock_exec): self.volume_a.replication_driver_data = '{"had_journaling": false}' self.volume_a.volume_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, extra_specs={'replication_enabled': ' True'}) remote = {'name': 'name', 'user': 'user', 'conf': 'conf', 'pool': 'pool'} repl_status = fields.ReplicationStatus.FAILED_OVER expected = {'volume_id': self.volume_a.id, 'updates': {'replication_status': repl_status}} res = self.driver._failover_volume(self.volume_a, remote, True, repl_status) self.assertEqual(expected, res) mock_exec.assert_called_once_with(self.volume_a.name, remote, 'mirror_image_promote', False) @common_mocks def test_manage_existing_snapshot_get_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ mock_rbd_image_size: with mock.patch.object(self.driver.rbd.Image(), 'close') \ as mock_rbd_image_close: mock_rbd_image_size.return_value = 2 * units.Gi existing_ref = {'source-name': self.snapshot_b.name} return_size = self.driver.manage_existing_snapshot_get_size( self.snapshot_b, existing_ref) self.assertEqual(2, return_size) mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() @common_mocks def test_manage_existing_snapshot_get_non_integer_size(self): rbd_snapshot = self.driver.rbd.Image.return_value rbd_snapshot.size.return_value = int(1.75 * units.Gi) existing_ref = {'source-name': self.snapshot_b.name} return_size = self.driver.manage_existing_snapshot_get_size( self.snapshot_b, existing_ref) self.assertEqual(2, return_size) rbd_snapshot.size.assert_called_once_with() rbd_snapshot.close.assert_called_once_with() @common_mocks def test_manage_existing_snapshot_get_invalid_size(self): with mock.patch.object(self.driver.rbd.Image(), 'size') as \ mock_rbd_image_size: with mock.patch.object(self.driver.rbd.Image(), 'close') \ as mock_rbd_image_close: mock_rbd_image_size.return_value = 'abcd' existing_ref = {'source-name': self.snapshot_b.name} self.assertRaises( exception.VolumeBackendAPIException, self.driver.manage_existing_snapshot_get_size, self.snapshot_b, existing_ref) mock_rbd_image_size.assert_called_once_with() mock_rbd_image_close.assert_called_once_with() @common_mocks def test_manage_existing_snapshot_with_invalid_rbd_image(self): self.mock_rbd.Image.side_effect = self.mock_rbd.ImageNotFound invalid_snapshot = 'snapshot-invalid' invalid_ref = {'source-name': invalid_snapshot} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, self.snapshot_b, invalid_ref) # Make sure the exception was raised self.assertEqual([self.mock_rbd.ImageNotFound], RAISED_EXCEPTIONS) @common_mocks def test_manage_existing_snapshot(self): proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy exist_snapshot = 'snapshot-exist' existing_ref = {'source-name': exist_snapshot} proxy.rename_snap.return_value = 0 proxy.is_protected_snap.return_value = False self.driver.manage_existing_snapshot(self.snapshot_b, existing_ref) proxy.rename_snap.assert_called_with(exist_snapshot, self.snapshot_b.name) proxy.protect_snap.assert_called_with(self.snapshot_b.name) @common_mocks def test_manage_existing_snapshot_with_exist_rbd_image(self): proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.rename_snap.side_effect = MockImageExistsException exist_snapshot = 'snapshot-exist' existing_ref = {'source-name': exist_snapshot} self.assertRaises(self.mock_rbd.ImageExists, self.driver.manage_existing_snapshot, self.snapshot_b, existing_ref) # Make sure the exception was raised self.assertEqual([self.mock_rbd.ImageExists], RAISED_EXCEPTIONS) @common_mocks def test_get_manageable_snapshots(self): cinder_snaps = [{'id': '00000000-0000-0000-0000-000000000000', 'volume_id': '11111111-1111-1111-1111-111111111111'}] vols = ['volume-11111111-1111-1111-1111-111111111111', 'vol1'] self.mock_rbd.RBD.return_value.list.return_value = vols image = self.mock_proxy.return_value.__enter__.return_value image.list_snaps.side_effect = [ [{'id': 1, 'name': 'snapshot-00000000-0000-0000-0000-000000000000', 'size': 2 * units.Gi}, {'id': 2, 'name': 'snap1', 'size': 6 * units.Gi}, {'id': 3, 'size': 8 * units.Gi, 'name': 'volume-22222222-2222-2222-2222-222222222222.clone_snap' }, {'id': 4, 'size': 5 * units.Gi, 'name': 'backup.33333333-3333-3333-3333-333333333333.snap.123'}], [{'id': 1, 'name': 'snap2', 'size': 4 * units.Gi}]] res = self.driver.get_manageable_snapshots( cinder_snaps, None, 1000, 0, ['size'], ['desc']) exp = [ {'size': 8, 'safe_to_manage': False, 'extra_info': None, 'reason_not_safe': 'used for clone snap', 'cinder_id': None, 'reference': { 'source-name': 'volume-22222222-2222-2222-2222-222222222222.clone_snap'}, 'source_reference': { 'source-name': 'volume-11111111-1111-1111-1111-111111111111'} }, {'size': 6, 'safe_to_manage': True, 'extra_info': None, 'reason_not_safe': None, 'cinder_id': None, 'reference': {'source-name': 'snap1'}, 'source_reference': { 'source-name': 'volume-11111111-1111-1111-1111-111111111111'} }, {'size': 5, 'safe_to_manage': False, 'extra_info': None, 'reason_not_safe': 'used for volume backup', 'cinder_id': None, 'reference': { 'source-name': 'backup.33333333-3333-3333-3333-333333333333.snap.123'}, 'source_reference': { 'source-name': 'volume-11111111-1111-1111-1111-111111111111'} }, {'size': 4, 'safe_to_manage': True, 'extra_info': None, 'reason_not_safe': None, 'cinder_id': None, 'reference': {'source-name': 'snap2'}, 'source_reference': {'source-name': 'vol1'} }, {'size': 2, 'safe_to_manage': False, 'extra_info': None, 'reason_not_safe': 'already managed', 'cinder_id': '00000000-0000-0000-0000-000000000000', 'reference': {'source-name': 'snapshot-00000000-0000-0000-0000-000000000000'}, 'source_reference': { 'source-name': 'volume-11111111-1111-1111-1111-111111111111'} }] self.assertEqual(exp, res) @common_mocks def test_unmanage_snapshot(self): proxy = self.mock_proxy.return_value proxy.__enter__.return_value = proxy proxy.list_children.return_value = [] proxy.is_protected_snap.return_value = True self.driver.unmanage_snapshot(self.snapshot_b) proxy.unprotect_snap.assert_called_with(self.snapshot_b.name) @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') @mock.patch('cinder.volume.drivers.rbd.RADOSClient') @mock.patch('cinder.volume.drivers.rbd.RBDDriver.RBDProxy') def test__get_usage_info(self, rbdproxy_mock, client_mock, volproxy_mock): def FakeVolProxy(size_or_exc): return mock.Mock(return_value=mock.Mock( size=mock.Mock(side_effect=(size_or_exc,)))) volumes = [ 'volume-1', 'non-existent', 'non-existent', 'non-cinder-volume' ] client = client_mock.return_value.__enter__.return_value rbdproxy_mock.return_value.list.return_value = volumes with mock.patch.object(self.driver, 'rbd', ImageNotFound=MockImageNotFoundException, OSError=MockOSErrorException): volproxy_mock.side_effect = [ mock.MagicMock(**{'__enter__': FakeVolProxy(s)}) for s in (1.0 * units.Gi, self.driver.rbd.ImageNotFound, self.driver.rbd.OSError, 2.0 * units.Gi) ] total_provision = self.driver._get_usage_info() rbdproxy_mock.return_value.list.assert_called_once_with(client.ioctx) expected_volproxy_calls = [ mock.call(self.driver, v, read_only=True, client=client.cluster, ioctx=client.ioctx) for v in volumes] self.assertEqual(expected_volproxy_calls, volproxy_mock.mock_calls) self.assertEqual(3.00, total_provision) def test_migrate_volume_bad_volume_status(self): self.volume_a.status = 'backingup' ret = self.driver.migrate_volume(context, self.volume_a, None) self.assertEqual((False, None), ret) def test_migrate_volume_bad_host(self): host = { 'capabilities': { 'storage_protocol': 'not-ceph'}} ret = self.driver.migrate_volume(context, self.volume_a, host) self.assertEqual((False, None), ret) def test_migrate_volume_missing_location_info(self): host = { 'capabilities': { 'storage_protocol': 'ceph'}} ret = self.driver.migrate_volume(context, self.volume_a, host) self.assertEqual((False, None), ret) def test_migrate_volume_invalid_location_info(self): host = { 'capabilities': { 'storage_protocol': 'ceph', 'location_info': 'foo:bar:baz'}} ret = self.driver.migrate_volume(context, self.volume_a, host) self.assertEqual((False, None), ret) @mock.patch('os_brick.initiator.linuxrbd.rbd') @mock.patch('os_brick.initiator.linuxrbd.RBDClient') def test_migrate_volume_mismatch_fsid(self, mock_client, mock_rbd): host = { 'capabilities': { 'storage_protocol': 'ceph', 'location_info': 'nondefault:None:abc:None:rbd'}} mock_client().__enter__().client.get_fsid.return_value = 'abc' with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'not-abc' ret = self.driver.migrate_volume(context, self.volume_a, host) self.assertEqual((False, None), ret) mock_client().__enter__().client.get_fsid.return_value = 'not-abc' with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' ret = self.driver.migrate_volume(context, self.volume_a, host) self.assertEqual((False, None), ret) host = { 'capabilities': { 'storage_protocol': 'ceph', 'location_info': 'nondefault:None:not-abc:None:rbd'}} mock_client().__enter__().client.get_fsid.return_value = 'abc' with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' ret = self.driver.migrate_volume(context, self.volume_a, host) self.assertEqual((False, None), ret) @mock.patch('os_brick.initiator.linuxrbd.rbd') @mock.patch('os_brick.initiator.linuxrbd.RBDClient') def test_migrate_volume_same_pool(self, mock_client, mock_rbd): host = { 'capabilities': { 'storage_protocol': 'ceph', 'location_info': 'nondefault:None:abc:None:rbd'}} mock_client().__enter__().client.get_fsid.return_value = 'abc' with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' ret = self.driver.migrate_volume(context, self.volume_a, host) self.assertEqual((True, None), ret) @mock.patch('os_brick.initiator.linuxrbd.rbd') @mock.patch('os_brick.initiator.linuxrbd.RBDClient') def test_migrate_volume_insue_different_pool(self, mock_client, mock_rbd): self.volume_a.status = 'in-use' host = { 'capabilities': { 'storage_protocol': 'ceph', 'location_info': 'nondefault:None:abc:None:rbd2'}} mock_client().__enter__().client.get_fsid.return_value = 'abc' with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid: mock_get_fsid.return_value = 'abc' ret = self.driver.migrate_volume(context, self.volume_a, host) self.assertEqual((False, None), ret) @mock.patch('os_brick.initiator.linuxrbd.rbd') @mock.patch('os_brick.initiator.linuxrbd.RBDClient') @mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy') def test_migrate_volume_different_pool(self, mock_proxy, mock_client, mock_rbcd): host = { 'capabilities': { 'storage_protocol': 'ceph', 'location_info': 'nondefault:None:abc:None:rbd2'}} mock_client().__enter__().client.get_fsid.return_value = 'abc' with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid, \ mock.patch.object(self.driver, 'delete_volume') as mock_delete: mock_get_fsid.return_value = 'abc' proxy = mock_proxy.return_value proxy.__enter__.return_value = proxy ret = self.driver.migrate_volume(context, self.volume_a, host) proxy.copy.assert_called_once_with( mock_client.return_value.__enter__.return_value.ioctx, self.volume_a.name) mock_delete.assert_called_once_with(self.volume_a) self.assertEqual((True, None), ret) @mock.patch('tempfile.NamedTemporaryFile') @mock.patch('cinder.volume.volume_utils.check_encryption_provider', return_value={'encryption_key_id': fake.ENCRYPTION_KEY_ID}) def test_create_encrypted_volume(self, mock_check_enc_prov, mock_temp_file): class DictObj(object): # convert a dict to object w/ attributes def __init__(self, d): self.__dict__ = d mock_temp_file.return_value.__enter__.side_effect = [ DictObj({'name': '/imgfile'}), DictObj({'name': '/passfile'})] key_mgr = fake_keymgr.fake_api() self.mock_object(castellan.key_manager, 'API', return_value=key_mgr) key_id = key_mgr.store(self.context, KeyObject()) self.volume_c.encryption_key_id = key_id enc_info = {'encryption_key_id': key_id, 'cipher': 'aes-xts-essiv', 'key_size': 256} with mock.patch('cinder.volume.volume_utils.check_encryption_provider', return_value=enc_info), \ mock.patch('cinder.volume.drivers.rbd.open') as mock_open, \ mock.patch.object(self.driver, '_execute') as mock_exec: self.driver._create_encrypted_volume(self.volume_c, self.context) mock_open.assert_called_with('/passfile', 'w') mock_exec.assert_any_call( 'qemu-img', 'create', '-f', 'luks', '-o', 'cipher-alg=aes-256,cipher-mode=xts,ivgen-alg=essiv', '--object', 'secret,id=luks_sec,format=raw,file=/passfile', '-o', 'key-secret=luks_sec', '/imgfile', '12288M') mock_exec.assert_any_call( 'rbd', 'import', '--dest-pool', 'rbd', '--order', 22, '/imgfile', self.volume_c.name) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.db.volume_glance_metadata_get', return_value={}) @common_mocks def test_get_backup_device_ceph(self, mock_gm_get, volume_get_by_id): # Use the same volume for backup (volume_a) volume_get_by_id.return_value = self.volume_a driver = self.driver self._create_backup_db_entry(fake.BACKUP_ID, self.volume_a['id'], 1) backup = objects.Backup.get_by_id(self.context, fake.BACKUP_ID) backup.service = 'cinder.backup.drivers.ceph' ret = driver.get_backup_device(self.context, backup) self.assertEqual(ret, (self.volume_a, False)) def _create_backup_db_entry(self, backupid, volid, size, userid=str(uuid.uuid4()), projectid=str(uuid.uuid4())): backup = {'id': backupid, 'size': size, 'volume_id': volid, 'user_id': userid, 'project_id': projectid} return db.backup_create(self.context, backup)['id'] @mock.patch('cinder.volume.driver.BaseVD._get_backup_volume_temp_snapshot') @mock.patch('cinder.volume.driver.BaseVD._get_backup_volume_temp_volume') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.db.volume_glance_metadata_get', return_value={}) @common_mocks def test_get_backup_device_other(self, mock_gm_get, volume_get_by_id, mock_get_temp_volume, mock_get_temp_snapshot): # Use a cloned volume for backup (volume_b) self.volume_a.previous_status = 'in-use' mock_get_temp_volume.return_value = self.volume_b mock_get_temp_snapshot.return_value = (self.volume_b, False) volume_get_by_id.return_value = self.volume_a driver = self.driver self._create_backup_db_entry(fake.BACKUP_ID, self.volume_a['id'], 1) backup = objects.Backup.get_by_id(self.context, fake.BACKUP_ID) backup.service = 'asdf' ret = driver.get_backup_device(self.context, backup) self.assertEqual(ret, (self.volume_b, False)) @common_mocks def test_multiattach_exclusions(self): self.assertEqual( self.driver.RBD_FEATURE_JOURNALING | self.driver.RBD_FEATURE_FAST_DIFF | self.driver.RBD_FEATURE_OBJECT_MAP | self.driver.RBD_FEATURE_EXCLUSIVE_LOCK, self.driver.MULTIATTACH_EXCLUSIONS) @ddt.data(MULTIATTACH_FULL_FEATURES, MULTIATTACH_REDUCED_FEATURES) @common_mocks def test_enable_multiattach(self, features): image = self.mock_proxy.return_value.__enter__.return_value image_features = features image.features.return_value = image_features ret = self.driver._enable_multiattach(self.volume_a) image.update_features.assert_called_once_with( self.driver.MULTIATTACH_EXCLUSIONS & image_features, False) self.assertEqual( {'provider_location': "{\"saved_features\":%s}" % image_features}, ret) @common_mocks def test_enable_multiattach_no_features(self): image = self.mock_proxy.return_value.__enter__.return_value image.features.return_value = 0 ret = self.driver._enable_multiattach(self.volume_a) image.update_features.assert_not_called() self.assertEqual({'provider_location': '{"saved_features":0}'}, ret) @ddt.data(MULTIATTACH_FULL_FEATURES, MULTIATTACH_REDUCED_FEATURES) @common_mocks def test_disable_multiattach(self, features): image = self.mock_proxy.return_value.__enter__.return_value self.volume_a.provider_location = '{"saved_features": %s}' % features ret = self.driver._disable_multiattach(self.volume_a) image.update_features.assert_called_once_with( self.driver.MULTIATTACH_EXCLUSIONS & features, True) self.assertEqual({'provider_location': None}, ret) @common_mocks def test_disable_multiattach_no_features(self): image = self.mock_proxy.return_value.__enter__.return_value self.volume_a.provider_location = '{"saved_features": 0}' image.features.return_value = 0 ret = self.driver._disable_multiattach(self.volume_a) image.update_features.assert_not_called() self.assertEqual({'provider_location': None}, ret) @ddt.data(('bare', 'raw'), ('bare', 'qcow2'), ('compressed', 'raw'), ('compressed', 'qcow2')) @ddt.unpack @common_mocks def test_copy_volume_to_image(self, container_format, disk_format): fake_image_meta = { 'id': 'e105244f-4cb8-447b-8452-6f1da459e3ab', 'container_format': container_format, 'disk_format': disk_format, } mock_uv = self.mock_object(cinder.volume.volume_utils, 'upload_volume') mock_get_rbd_handle = self.mock_object( self.driver, '_get_rbd_handle', return_value=mock.sentinel.rbd_handle) if container_format != 'compressed' and disk_format == 'raw': self.driver.copy_volume_to_image(mock.sentinel.context, mock.sentinel.volume, mock.sentinel.image_service, fake_image_meta) mock_get_rbd_handle.assert_called_once_with(mock.sentinel.volume) mock_uv.assert_called_once_with(mock.sentinel.context, mock.sentinel.image_service, fake_image_meta, None, mock.sentinel.volume, volume_fd= mock.sentinel.rbd_handle) else: with mock.patch.object(self.driver, '_execute'), \ mock.patch.object(fileutils, 'remove_path_on_error'), \ mock.patch.object(os, 'unlink'), \ mock.patch.object( volume_utils, 'image_conversion_dir') as fake_dir: fake_path = 'fake_path' fake_vol = 'volume-' + fake_image_meta['id'] fake_dir.return_value = fake_path fake_vol_path = os.path.join(fake_path, fake_vol) self.driver.copy_volume_to_image(mock.sentinel.context, mock.sentinel.volume, mock.sentinel.image_service, fake_image_meta) mock_get_rbd_handle.assert_not_called() mock_uv.assert_called_once_with(mock.sentinel.context, mock.sentinel.image_service, fake_image_meta, fake_vol_path, mock.sentinel.volume) class ManagedRBDTestCase(test_driver.BaseDriverTestCase): driver_name = "cinder.volume.drivers.rbd.RBDDriver" def setUp(self): super(ManagedRBDTestCase, self).setUp() self.volume.driver.set_initialized() self.volume.stats = {'allocated_capacity_gb': 0, 'pools': {}} self.called = [] def _create_volume_from_image(self, expected_status, raw=False, clone_error=False): """Try to clone a volume from an image, and check status afterwards. NOTE: if clone_error is True we force the image type to raw otherwise clone_image is not called """ # See tests.image.fake for image types. if raw: image_id = '155d900f-4e14-4e4c-a73d-069cbf4541e6' else: image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' # creating volume testdata db_volume = {'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'availability_zone': 'fake_zone', 'attach_status': fields.VolumeAttachStatus.DETACHED, 'host': 'dummy'} volume = objects.Volume(context=self.context, **db_volume) volume.create() try: if not clone_error: self.volume.create_volume(self.context, volume, request_spec={'image_id': image_id}) else: self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume, request_spec={'image_id': image_id}) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual(expected_status, volume.status) finally: # cleanup volume.destroy() @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch.object(cinder.image.glance, 'get_default_image_service') def test_create_vol_from_image_status_available(self, mock_gdis, mock_check_space): """Clone raw image then verify volume is in available state.""" def _mock_clone_image(context, volume, image_location, image_meta, image_service): return {'provider_location': None}, True with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = _mock_clone_image with mock.patch.object(self.volume.driver, 'create_volume') as \ mock_create: with mock.patch.object(volume_utils, 'copy_image_to_volume') as mock_copy: self._create_volume_from_image('available', raw=True) self.assertFalse(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertFalse(mock_create.called) self.assertTrue(mock_gdis.called) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch.object(cinder.image.glance, 'get_default_image_service') @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.verify_glance_image_signature') def test_create_vol_from_non_raw_image_status_available( self, mock_verify, mock_qemu_info, mock_fetch, mock_gdis, mock_check_space): """Clone non-raw image then verify volume is in available state.""" def _mock_clone_image(context, volume, image_location, image_meta, image_service): return {'provider_location': None}, False image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info self.flags(verify_glance_signatures='disabled') mock_fetch.return_value = mock.MagicMock(spec=utils.get_file_spec()) with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = _mock_clone_image with mock.patch.object(self.volume.driver, 'create_volume') as \ mock_create: with mock.patch.object(volume_utils, 'copy_image_to_volume') as mock_copy: self._create_volume_from_image('available', raw=False) self.assertTrue(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertTrue(mock_create.called) self.assertTrue(mock_gdis.called) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch.object(cinder.image.glance, 'get_default_image_service') def test_create_vol_from_image_status_error(self, mock_gdis, mock_check_space): """Fail to clone raw image then verify volume is in error state.""" with mock.patch.object(self.volume.driver, 'clone_image') as \ mock_clone_image: mock_clone_image.side_effect = exception.CinderException with mock.patch.object(self.volume.driver, 'create_volume'): with mock.patch.object(volume_utils, 'copy_image_to_volume') as mock_copy: self._create_volume_from_image('error', raw=True, clone_error=True) self.assertFalse(mock_copy.called) self.assertTrue(mock_clone_image.called) self.assertFalse(self.volume.driver.create_volume.called) self.assertTrue(mock_gdis.called) def test_clone_failure(self): driver = self.volume.driver with mock.patch.object(driver, '_is_cloneable', lambda *args: False): image_loc = (mock.Mock(), None) actual = driver.clone_image(mock.Mock(), mock.Mock(), image_loc, {}, mock.Mock()) self.assertEqual(({}, False), actual) self.assertEqual(({}, False), driver.clone_image('', object(), None, {}, '')) def test_clone_success(self): expected = ({'provider_location': None}, True) driver = self.volume.driver with mock.patch.object(self.volume.driver, '_is_cloneable') as \ mock_is_cloneable: mock_is_cloneable.return_value = True with mock.patch.object(self.volume.driver, '_clone') as \ mock_clone: with mock.patch.object(self.volume.driver, '_resize') as \ mock_resize: mock_clone.return_value = {} image_loc = ('rbd://fee/fi/fo/fum', None) volume = {'name': 'vol1'} actual = driver.clone_image(mock.Mock(), volume, image_loc, {'disk_format': 'raw', 'id': 'id.foo'}, mock.Mock()) self.assertEqual(expected, actual) mock_clone.assert_called_once_with(volume, 'fi', 'fo', 'fum') mock_resize.assert_called_once_with(volume) def test_clone_multilocation_success(self): expected = ({'provider_location': None}, True) driver = self.volume.driver def cloneable_side_effect(url_location, image_meta): return url_location == 'rbd://fee/fi/fo/fum' with mock.patch.object(self.volume.driver, '_is_cloneable') \ as mock_is_cloneable, \ mock.patch.object(self.volume.driver, '_clone') as mock_clone, \ mock.patch.object(self.volume.driver, '_resize') \ as mock_resize: mock_is_cloneable.side_effect = cloneable_side_effect mock_clone.return_value = {} image_loc = ('rbd://bee/bi/bo/bum', [{'url': 'rbd://bee/bi/bo/bum'}, {'url': 'rbd://fee/fi/fo/fum'}]) volume = {'name': 'vol1'} image_meta = mock.sentinel.image_meta image_service = mock.sentinel.image_service actual = driver.clone_image(self.context, volume, image_loc, image_meta, image_service) self.assertEqual(expected, actual) self.assertEqual(2, mock_is_cloneable.call_count) mock_clone.assert_called_once_with(volume, 'fi', 'fo', 'fum') mock_is_cloneable.assert_called_with('rbd://fee/fi/fo/fum', image_meta) mock_resize.assert_called_once_with(volume) def test_clone_multilocation_failure(self): expected = ({}, False) driver = self.volume.driver with mock.patch.object(driver, '_is_cloneable', return_value=False) \ as mock_is_cloneable, \ mock.patch.object(self.volume.driver, '_clone') as mock_clone, \ mock.patch.object(self.volume.driver, '_resize') \ as mock_resize: image_loc = ('rbd://bee/bi/bo/bum', [{'url': 'rbd://bee/bi/bo/bum'}, {'url': 'rbd://fee/fi/fo/fum'}]) volume = {'name': 'vol1'} image_meta = mock.sentinel.image_meta image_service = mock.sentinel.image_service actual = driver.clone_image(self.context, volume, image_loc, image_meta, image_service) self.assertEqual(expected, actual) self.assertEqual(2, mock_is_cloneable.call_count) mock_is_cloneable.assert_any_call('rbd://bee/bi/bo/bum', image_meta) mock_is_cloneable.assert_any_call('rbd://fee/fi/fo/fum', image_meta) self.assertFalse(mock_clone.called) self.assertFalse(mock_resize.called) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_remotefs.py0000664000175000017500000020307000000000000024700 0ustar00zuulzuul00000000000000# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import os import re import sys from unittest import mock import castellan import ddt from cinder import context from cinder import exception from cinder.image import image_utils from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.keymgr import fake as fake_keymgr from cinder.tests.unit import test from cinder import utils from cinder.volume.drivers import remotefs from cinder.volume import volume_utils class KeyObject(object): def get_encoded(arg): return "asdf".encode('utf-8') @ddt.ddt class RemoteFsSnapDriverTestCase(test.TestCase): _FAKE_MNT_POINT = '/mnt/fake_hash' def setUp(self): super(RemoteFsSnapDriverTestCase, self).setUp() self._driver = remotefs.RemoteFSSnapDriver() self._driver._remotefsclient = mock.Mock() self._driver._execute = mock.Mock() self._driver._delete = mock.Mock() self.context = context.get_admin_context() self._fake_volume = fake_volume.fake_volume_obj( self.context, provider_location='fake_share') self._fake_volume_path = os.path.join(self._FAKE_MNT_POINT, self._fake_volume.name) self._fake_snapshot = fake_snapshot.fake_snapshot_obj(self.context) self._fake_snapshot_path = (self._fake_volume_path + '.' + self._fake_snapshot.id) self._fake_snapshot.volume = self._fake_volume # Encrypted volume and snapshot self.volume_c = fake_volume.fake_volume_obj( self.context, **{'name': u'volume-0000000a', 'id': '55555555-222f-4b32-b585-9991b3bf0a99', 'size': 12, 'encryption_key_id': fake.ENCRYPTION_KEY_ID}) self._fake_snap_c = fake_snapshot.fake_snapshot_obj(self.context) self._fake_snap_c.volume = self.volume_c self.volume_c_path = os.path.join(self._FAKE_MNT_POINT, self.volume_c.name) self._fake_snap_c_path = (self.volume_c_path + '.' + self._fake_snap_c.id) @ddt.data({'current_state': 'in-use', 'acceptable_states': ['available', 'in-use']}, {'current_state': 'in-use', 'acceptable_states': ['available'], 'expected_exception': exception.InvalidVolume}) @ddt.unpack def test_validate_state(self, current_state, acceptable_states, expected_exception=None): if expected_exception: self.assertRaises(expected_exception, self._driver._validate_state, current_state, acceptable_states) else: self._driver._validate_state(current_state, acceptable_states) def _test_delete_snapshot(self, volume_in_use=False, stale_snapshot=False, is_active_image=True, is_tmp_snap=False, encryption=False): # If the snapshot is not the active image, it is guaranteed that # another snapshot exists having it as backing file. fake_upper_snap_id = 'fake_upper_snap_id' if encryption: fake_snapshot_name = os.path.basename(self._fake_snap_c_path) fake_info = {'active': fake_snapshot_name, self._fake_snap_c.id: fake_snapshot_name} expected_info = fake_info fake_upper_snap_path = ( self.volume_c_path + '-snapshot' + fake_upper_snap_id) snapshot = self._fake_snap_c snapshot_path = self._fake_snap_c_path volume_name = self.volume_c.name else: fake_snapshot_name = os.path.basename(self._fake_snapshot_path) fake_info = {'active': fake_snapshot_name, self._fake_snapshot.id: fake_snapshot_name} expected_info = fake_info fake_upper_snap_path = ( self._fake_volume_path + '-snapshot' + fake_upper_snap_id) snapshot = self._fake_snapshot snapshot_path = self._fake_snapshot_path volume_name = self._fake_volume.name fake_snap_img_info = mock.Mock() fake_base_img_info = mock.Mock() if stale_snapshot: fake_snap_img_info.backing_file = None else: fake_snap_img_info.backing_file = volume_name fake_snap_img_info.file_format = 'qcow2' fake_base_img_info.backing_file = None fake_base_img_info.file_format = 'raw' self._driver._local_path_volume_info = mock.Mock( return_value=mock.sentinel.fake_info_path) self._driver._qemu_img_info = mock.Mock( side_effect=[fake_snap_img_info, fake_base_img_info]) self._driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._driver._validate_state = mock.Mock() self._driver._read_info_file = mock.Mock() self._driver._write_info_file = mock.Mock() self._driver._img_commit = mock.Mock() self._driver._rebase_img = mock.Mock() self._driver._delete_stale_snapshot = mock.Mock() self._driver._delete_snapshot_online = mock.Mock() exp_acceptable_states = ['available', 'in-use', 'backing-up', 'deleting', 'downloading'] if volume_in_use: snapshot.volume.status = 'backing-up' snapshot.volume.attach_status = 'attached' self._driver._read_info_file.return_value = fake_info self._driver._delete_snapshot(snapshot) self._driver._validate_state.assert_called_once_with( snapshot.volume.status, exp_acceptable_states) if stale_snapshot: self._driver._delete_stale_snapshot.assert_called_once_with( snapshot) else: expected_online_delete_info = { 'active_file': fake_snapshot_name, 'snapshot_file': fake_snapshot_name, 'base_file': volume_name, 'base_id': None, 'new_base_file': None } self._driver._delete_snapshot_online.assert_called_once_with( self.context, snapshot, expected_online_delete_info) elif is_active_image: self._driver._read_info_file.return_value = fake_info self._driver._delete_snapshot(snapshot) self._driver._img_commit.assert_called_once_with( snapshot_path) self.assertNotIn(snapshot.id, fake_info) self._driver._write_info_file.assert_called_once_with( mock.sentinel.fake_info_path, fake_info) else: fake_upper_snap_name = os.path.basename(fake_upper_snap_path) fake_backing_chain = [ {'filename': fake_upper_snap_name, 'backing-filename': fake_snapshot_name}, {'filename': fake_snapshot_name, 'backing-filename': volume_name}, {'filename': volume_name, 'backing-filename': None}] fake_info[fake_upper_snap_id] = fake_upper_snap_name fake_info[self._fake_snapshot.id] = fake_snapshot_name fake_info['active'] = fake_upper_snap_name expected_info = copy.deepcopy(fake_info) del expected_info[snapshot.id] self._driver._read_info_file.return_value = fake_info self._driver._get_backing_chain_for_path = mock.Mock( return_value=fake_backing_chain) self._driver._delete_snapshot(snapshot) self._driver._img_commit.assert_called_once_with( snapshot_path) self._driver._rebase_img.assert_called_once_with( fake_upper_snap_path, volume_name, fake_base_img_info.file_format) self._driver._write_info_file.assert_called_once_with( mock.sentinel.fake_info_path, expected_info) @ddt.data({'encryption': True}, {'encryption': False}) def test_delete_snapshot_when_active_file(self, encryption): self._test_delete_snapshot(encryption=encryption) @ddt.data({'encryption': True}, {'encryption': False}) def test_delete_snapshot_in_use(self, encryption): self._test_delete_snapshot(volume_in_use=True, encryption=encryption) @ddt.data({'encryption': True}, {'encryption': False}) def test_delete_snapshot_in_use_stale_snapshot(self, encryption): self._test_delete_snapshot(volume_in_use=True, stale_snapshot=True, encryption=encryption) @ddt.data({'encryption': True}, {'encryption': False}) def test_delete_snapshot_with_one_upper_file(self, encryption): self._test_delete_snapshot(is_active_image=False, encryption=encryption) @ddt.data({'encryption': True}, {'encryption': False}) def test_delete_stale_snapshot(self, encryption): if encryption: fake_snapshot_name = os.path.basename(self._fake_snap_c_path) volume_name = self.volume_c.name snapshot = self._fake_snap_c snapshot_path = self._fake_snap_c_path else: fake_snapshot_name = os.path.basename(self._fake_snapshot_path) volume_name = self._fake_volume.name snapshot = self._fake_snapshot snapshot_path = self._fake_snapshot_path fake_snap_info = { 'active': volume_name, snapshot.id: fake_snapshot_name } expected_info = {'active': volume_name} self._driver._local_path_volume_info = mock.Mock( return_value=mock.sentinel.fake_info_path) self._driver._read_info_file = mock.Mock( return_value=fake_snap_info) self._driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._driver._write_info_file = mock.Mock() self._driver._delete_stale_snapshot(snapshot) self._driver._delete.assert_called_once_with(snapshot_path) self._driver._write_info_file.assert_called_once_with( mock.sentinel.fake_info_path, expected_info) @mock.patch.object(remotefs.RemoteFSDriver, 'secure_file_operations_enabled', return_value=True) @mock.patch.object(os, 'stat') def test_do_create_snapshot(self, _mock_stat, _mock_sec_enabled): self._driver._local_volume_dir = mock.Mock( return_value=self._fake_volume_path) fake_backing_path = os.path.join( self._driver._local_volume_dir(), self._fake_volume.name) self._driver._execute = mock.Mock() self._driver._set_rw_permissions = mock.Mock() self._driver._qemu_img_info = mock.Mock( return_value=mock.Mock(file_format=mock.sentinel.backing_fmt)) self._driver._do_create_snapshot(self._fake_snapshot, self._fake_volume.name, self._fake_snapshot_path) command1 = ['qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s,backing_fmt=%s' % (fake_backing_path, mock.sentinel.backing_fmt), self._fake_snapshot_path, "%dG" % self._fake_volume.size] command2 = ['qemu-img', 'rebase', '-u', '-b', self._fake_volume.name, '-F', mock.sentinel.backing_fmt, self._fake_snapshot_path] command3 = ['chown', '--reference=%s' % fake_backing_path, self._fake_snapshot_path] calls = [mock.call(*command1, run_as_root=True), mock.call(*command2, run_as_root=True), mock.call(*command3, run_as_root=True)] self._driver._execute.assert_has_calls(calls) def _test_create_snapshot(self, display_name=None, volume_in_use=False, encryption=False): fake_snapshot_info = {} if encryption: fake_snapshot_file_name = os.path.basename(self._fake_snap_c_path) volume_name = self.volume_c.name snapshot = self._fake_snap_c snapshot_path = self._fake_snap_c_path else: fake_snapshot_file_name = os.path.basename( self._fake_snapshot_path) volume_name = self._fake_volume.name snapshot = self._fake_snapshot snapshot_path = self._fake_snapshot_path snapshot.display_name = display_name self._driver._local_path_volume_info = mock.Mock( return_value=mock.sentinel.fake_info_path) self._driver._read_info_file = mock.Mock( return_value=fake_snapshot_info) self._driver._do_create_snapshot = mock.Mock() self._driver._create_snapshot_online = mock.Mock() self._driver._write_info_file = mock.Mock() self._driver.get_active_image_from_info = mock.Mock( return_value=volume_name) self._driver._get_new_snap_path = mock.Mock( return_value=snapshot_path) self._driver._validate_state = mock.Mock() expected_snapshot_info = { 'active': fake_snapshot_file_name, snapshot.id: fake_snapshot_file_name } exp_acceptable_states = ['available', 'in-use', 'backing-up'] if display_name and display_name.startswith('tmp-snap-'): exp_acceptable_states.append('downloading') self._fake_snapshot.volume.status = 'downloading' if volume_in_use: snapshot.volume.status = 'backing-up' snapshot.volume.attach_status = 'attached' expected_method_called = '_create_snapshot_online' conn_info = ('{"driver_volume_type": "nfs",' '"export": "localhost:/srv/nfs1",' '"name": "old_name"}') attachment = fake_volume.volume_attachment_ovo( self.context, connection_info=conn_info) snapshot.volume.volume_attachment.objects.append(attachment) mock_save = self.mock_object(attachment, 'save') mock_vol_save = self.mock_object(snapshot.volume, 'save') # After the snapshot the connection info should change the name of # the file expected = copy.deepcopy(attachment.connection_info) expected['name'] = snapshot.volume.name + '.' + snapshot.id expected['format'] = 'qcow2' else: expected_method_called = '_do_create_snapshot' self._driver._create_snapshot(snapshot) self._driver._validate_state.assert_called_once_with( snapshot.volume.status, exp_acceptable_states) fake_method = getattr(self._driver, expected_method_called) fake_method.assert_called_with( snapshot, volume_name, snapshot_path) self._driver._write_info_file.assert_called_with( mock.sentinel.fake_info_path, expected_snapshot_info) if volume_in_use: mock_save.assert_called_once() # We should have updated the volume format after the snapshot mock_vol_save.assert_called_once() changed_fields = attachment.cinder_obj_get_changes() self.assertEqual(expected, changed_fields['connection_info']) @ddt.data({'encryption': True}, {'encryption': False}) def test_create_snapshot_volume_available(self, encryption): self._test_create_snapshot(encryption=encryption) @ddt.data({'encryption': True}, {'encryption': False}) def test_create_snapshot_volume_in_use(self, encryption): self._test_create_snapshot(volume_in_use=True, encryption=encryption) def test_create_snapshot_invalid_volume(self): self._fake_snapshot.volume.status = 'error' self.assertRaises(exception.InvalidVolume, self._driver._create_snapshot, self._fake_snapshot) @ddt.data(None, 'test', 'tmp-snap-404f-404') def test_create_snapshot_names(self, display_name): self._test_create_snapshot(display_name=display_name) @mock.patch('cinder.db.snapshot_get') @mock.patch('time.sleep') def test_create_snapshot_online_with_concurrent_delete( self, mock_sleep, mock_snapshot_get): self._driver._nova = mock.Mock() # Test what happens when progress is so slow that someone # decides to delete the snapshot while the last known status is # "creating". mock_snapshot_get.side_effect = [ {'status': 'creating', 'progress': '42%'}, {'status': 'creating', 'progress': '45%'}, {'status': 'deleting'}, ] fake_snapshot = self._fake_snapshot fake_snapshot.context = self.context with mock.patch.object(self._driver, '_do_create_snapshot') as \ mock_do_create_snapshot: self.assertRaises(exception.RemoteFSConcurrentRequest, self._driver._create_snapshot_online, fake_snapshot, self._fake_volume.name, self._fake_snapshot_path) mock_do_create_snapshot.assert_called_once_with( fake_snapshot, self._fake_volume.name, self._fake_snapshot_path) self.assertEqual([mock.call(1), mock.call(1)], mock_sleep.call_args_list) self.assertEqual(3, mock_snapshot_get.call_count) mock_snapshot_get.assert_called_with(self._fake_snapshot._context, self._fake_snapshot.id) @mock.patch.object(utils, 'synchronized') def _locked_volume_operation_test_helper(self, mock_synchronized, func, expected_exception=False, *args, **kwargs): def mock_decorator(*args, **kwargs): def mock_inner(f): return f return mock_inner mock_synchronized.side_effect = mock_decorator expected_lock = '%s-%s' % (self._driver.driver_prefix, self._fake_volume.id) if expected_exception: self.assertRaises(expected_exception, func, self._driver, *args, **kwargs) else: ret_val = func(self._driver, *args, **kwargs) mock_synchronized.assert_called_with(expected_lock, external=False) self.assertEqual(mock.sentinel.ret_val, ret_val) def test_locked_volume_id_operation(self): mock_volume = mock.Mock() mock_volume.id = self._fake_volume.id @remotefs.locked_volume_id_operation def synchronized_func(inst, volume): return mock.sentinel.ret_val self._locked_volume_operation_test_helper(func=synchronized_func, volume=mock_volume) def test_locked_volume_id_snapshot_operation(self): mock_snapshot = mock.Mock() mock_snapshot.volume.id = self._fake_volume.id @remotefs.locked_volume_id_operation def synchronized_func(inst, snapshot): return mock.sentinel.ret_val self._locked_volume_operation_test_helper(func=synchronized_func, snapshot=mock_snapshot) def test_locked_volume_id_operation_exception(self): @remotefs.locked_volume_id_operation def synchronized_func(inst): return mock.sentinel.ret_val self._locked_volume_operation_test_helper( func=synchronized_func, expected_exception=exception.VolumeBackendAPIException) @mock.patch.object(image_utils, 'qemu_img_info') @mock.patch('os.path.basename') def _test_qemu_img_info(self, mock_basename, mock_qemu_img_info, backing_file, basedir, template=None, valid_backing_file=True): fake_vol_name = 'fake_vol_name' mock_info = mock_qemu_img_info.return_value mock_info.image = mock.sentinel.image_path mock_info.backing_file = backing_file self._driver._VALID_IMAGE_EXTENSIONS = ['vhd', 'vhdx', 'raw', 'qcow2'] mock_basename.side_effect = [mock.sentinel.image_basename, mock.sentinel.backing_file_basename] if valid_backing_file: img_info = self._driver._qemu_img_info_base( mock.sentinel.image_path, fake_vol_name, basedir, ext_bf_template=template) self.assertEqual(mock_info, img_info) self.assertEqual(mock.sentinel.image_basename, mock_info.image) expected_basename_calls = [mock.call(mock.sentinel.image_path)] if backing_file: self.assertEqual(mock.sentinel.backing_file_basename, mock_info.backing_file) expected_basename_calls.append(mock.call(backing_file)) mock_basename.assert_has_calls(expected_basename_calls) else: self.assertRaises(exception.RemoteFSInvalidBackingFile, self._driver._qemu_img_info_base, mock.sentinel.image_path, fake_vol_name, basedir) mock_qemu_img_info.assert_called_with(mock.sentinel.image_path, force_share=False, run_as_root=True, allow_qcow2_backing_file=True) @ddt.data([None, '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name.VHD', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name.404f-404', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name.tmp-snap-404f-404', '/fake_basedir']) @ddt.unpack def test_qemu_img_info_valid_backing_file(self, backing_file, basedir): self._test_qemu_img_info(backing_file=backing_file, basedir=basedir) @ddt.data(['/other_random_path', '/fake_basedir'], ['/other_basedir/cb2016/fake_vol_name', '/fake_basedir'], ['/fake_basedir/invalid_hash/fake_vol_name', '/fake_basedir'], ['/fake_basedir/cb2016/invalid_vol_name', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name.info', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name-random-suffix', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name.invalidext', '/fake_basedir']) @ddt.unpack def test_qemu_img_info_invalid_backing_file(self, backing_file, basedir): self._test_qemu_img_info(backing_file=backing_file, basedir=basedir, valid_backing_file=False) @ddt.data([None, '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name.VHD', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name.404f-404', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name.tmp-snap-404f-404', '/fake_basedir'], ['/fake_basedir/cb2016/other_dir/404f-404', '/fake_basedir'], ['/fake_basedir/cb2016/other_dir/tmp-snap-404f-404', '/fake_basedir'], ['/fake_basedir/cb2016/other_dir/404f-404.mod1-404f-404', '/fake_basedir'], ['/fake_basedir/cb2016/other_dir/404f-404.mod2-404f-404', '/fake_basedir']) @ddt.unpack def test_qemu_img_info_extended_backing_file(self, backing_file, basedir): """Tests using a special backing file template The special backing file template used in here allows backing files in a subdirectory and with special extended names (.mod1-[], .mod2-[], ...). """ ext_template = ("(#basedir/[0-9a-f]+/)?(#volname(.(tmp-snap-)" "?[0-9a-f-]+)?#valid_ext|other_dir/(tmp-snap-)?" "[0-9a-f-]+(.(mod1-|mod2-)[0-9a-f-]+)?)$") self._test_qemu_img_info(backing_file=backing_file, basedir=basedir, template=remotefs.BackingFileTemplate( ext_template), valid_backing_file=True) @ddt.data(['/other_random_path', '/fake_basedir'], ['/other_basedir/cb2016/fake_vol_name', '/fake_basedir'], ['/fake_basedir/invalid_hash/fake_vol_name', '/fake_basedir'], ['/fake_basedir/cb2016/invalid_vol_name', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name.info', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name-random-suffix', '/fake_basedir'], ['/fake_basedir/cb2016/fake_vol_name.invalidext', '/fake_basedir'], ['/fake_basedir/cb2016/invalid_dir/404f-404', '/fake_basedir'], ['/fake_basedir/cb2016/other_dir/invalid-prefix-404f-404', '/fake_basedir'], ['/fake_basedir/cb2016/other_dir/404f-404.mod3-404f-404', '/fake_basedir'], ['/fake_basedir/cb2016/other_dir/404f-404.mod2-404f-404.invalid', '/fake_basedir']) @ddt.unpack def test_qemu_img_info_extended_backing_file_invalid(self, backing_file, basedir): """Tests using a special backing file template with invalid files The special backing file template used in here allows backing files in a subdirectory and with special extended names (.mod1-[], .mod2-[], ...). """ ext_template = ("(#basedir/[0-9a-f]+/)?(#volname(.(tmp-snap-)" "?[0-9a-f-]+)?#valid_ext|other_dir/(tmp-snap-)?" "[0-9a-f-]+(.(mod1-|mod2-)[0-9a-f-]+)?)$") self._test_qemu_img_info(backing_file=backing_file, basedir=basedir, template=remotefs.BackingFileTemplate( ext_template), valid_backing_file=False) @mock.patch.object(remotefs.RemoteFSSnapDriver, '_local_volume_dir') @mock.patch.object(remotefs.RemoteFSSnapDriver, 'get_active_image_from_info') def test_local_path_active_image(self, mock_get_active_img, mock_local_vol_dir): fake_vol_dir = 'fake_vol_dir' fake_active_img = 'fake_active_img_fname' mock_get_active_img.return_value = fake_active_img mock_local_vol_dir.return_value = fake_vol_dir active_img_path = self._driver._local_path_active_image( mock.sentinel.volume) exp_act_img_path = os.path.join(fake_vol_dir, fake_active_img) self.assertEqual(exp_act_img_path, active_img_path) mock_get_active_img.assert_called_once_with(mock.sentinel.volume) mock_local_vol_dir.assert_called_once_with(mock.sentinel.volume) @ddt.data({}, {'provider_location': None}, {'active_fpath': 'last_snap_img', 'expect_snaps': True}) @ddt.unpack @mock.patch.object(remotefs.RemoteFSSnapDriver, '_local_path_active_image') @mock.patch.object(remotefs.RemoteFSSnapDriver, 'local_path') def test_snapshots_exist(self, mock_local_path, mock_local_path_active_img, provider_location='fake_share', active_fpath='base_img_path', base_vol_path='base_img_path', expect_snaps=False): self._fake_volume.provider_location = provider_location mock_local_path.return_value = base_vol_path mock_local_path_active_img.return_value = active_fpath snaps_exist = self._driver._snapshots_exist(self._fake_volume) self.assertEqual(expect_snaps, snaps_exist) if provider_location: mock_local_path.assert_called_once_with(self._fake_volume) mock_local_path_active_img.assert_called_once_with( self._fake_volume) else: self.assertFalse(mock_local_path.called) @ddt.data({}, {'snapshots_exist': True}, {'force_temp_snap': True}) @ddt.unpack @mock.patch.object(sys.modules['cinder.objects'], "Snapshot") @mock.patch.object(remotefs.RemoteFSSnapDriver, 'local_path') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_snapshots_exist') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_copy_volume_image') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_extend_volume') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_validate_state') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_create_snapshot') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_delete_snapshot') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_copy_volume_from_snapshot') def test_create_cloned_volume(self, mock_copy_volume_from_snapshot, mock_delete_snapshot, mock_create_snapshot, mock_validate_state, mock_extend_volume, mock_copy_volume_image, mock_snapshots_exist, mock_local_path, mock_obj_snap, snapshots_exist=False, force_temp_snap=False): drv = self._driver # prepare test volume = fake_volume.fake_volume_obj(self.context) src_vref_id = '375e32b2-804a-49f2-b282-85d1d5a5b9e1' src_vref = fake_volume.fake_volume_obj( self.context, id=src_vref_id, name='volume-%s' % src_vref_id, obj_context=self.context) src_vref.context = self.context mock_snapshots_exist.return_value = snapshots_exist drv._always_use_temp_snap_when_cloning = force_temp_snap vol_attrs = ['provider_location', 'size', 'id', 'name', 'status', 'volume_type', 'metadata', 'obj_context'] Volume = collections.namedtuple('Volume', vol_attrs) volume_ref = Volume(id=volume.id, metadata=volume.metadata, name=volume.name, provider_location=volume.provider_location, status=volume.status, size=volume.size, volume_type=volume.volume_type, obj_context=self.context,) snap_args_creation = { 'volume_id': src_vref.id, 'user_id': None, 'project_id': None, 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'volume_size': src_vref.size, 'display_name': 'tmp-snap-%s' % volume.id, 'display_description': None, 'volume_type_id': src_vref.volume_type_id, 'encryption_key_id': None, } mock_obj_snap.return_value = mock.Mock() mock_obj_snap.return_value.create = mock.Mock() # end of prepare test # run test drv.create_cloned_volume(volume, src_vref) # evaluate test exp_acceptable_states = ['available', 'backing-up', 'downloading'] mock_validate_state.assert_called_once_with( src_vref.status, exp_acceptable_states, obj_description='source volume') if snapshots_exist or force_temp_snap: mock_obj_snap.return_value.create.assert_called_once_with() mock_obj_snap.assert_called_once_with( context=self.context, **snap_args_creation) mock_create_snapshot.assert_called_once_with( mock_obj_snap.return_value) mock_copy_volume_from_snapshot.assert_called_once_with( mock_obj_snap.return_value, volume_ref, volume['size'], src_encryption_key_id=None, new_encryption_key_id=None) mock_delete_snapshot.assert_called_once_with( mock_obj_snap.return_value) mock_obj_snap.return_value.destroy.assert_called_once_with() else: self.assertFalse(mock_create_snapshot.called) mock_snapshots_exist.assert_called_once_with(src_vref) mock_copy_volume_image.assert_called_once_with( mock_local_path.return_value, mock_local_path.return_value) mock_local_path.assert_has_calls( [mock.call(src_vref), mock.call(volume_ref)]) mock_extend_volume.assert_called_once_with(volume_ref, volume.size) @ddt.data(None, 'raw', 'qcow2') @mock.patch('cinder.objects.volume.Volume.save') @mock.patch.object(sys.modules['cinder.objects'], "Snapshot") @mock.patch.object(remotefs.RemoteFSSnapDriver, 'local_path') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_snapshots_exist') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_copy_volume_image') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_extend_volume') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_validate_state') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_create_snapshot') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_delete_snapshot') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_copy_volume_from_snapshot') def test_create_cloned_volume_with_format( self, file_format, mock_copy_volume_from_snapshot, mock_delete_snapshot, mock_create_snapshot, mock_validate_state, mock_extend_volume, mock_copy_volume_image, mock_snapshots_exist, mock_local_path, mock_obj_snap, mock_save): drv = self._driver # prepare test volume = fake_volume.fake_volume_obj(self.context) src_vref_id = '375e32b2-804a-49f2-b282-85d1d5a5b9e1' src_vref = fake_volume.fake_volume_obj( self.context, id=src_vref_id, name='volume-%s' % src_vref_id, obj_context=self.context) src_vref.context = self.context if file_format: src_vref.admin_metadata = {'format': file_format} mock_snapshots_exist.return_value = False drv._always_use_temp_snap_when_cloning = False vol_attrs = ['provider_location', 'size', 'id', 'name', 'status', 'volume_type', 'metadata', 'obj_context'] Volume = collections.namedtuple('Volume', vol_attrs) volume_ref = Volume(id=volume.id, metadata=volume.metadata, name=volume.name, provider_location=volume.provider_location, status=volume.status, size=volume.size, volume_type=volume.volume_type, obj_context=self.context,) snap_args_creation = { 'volume_id': src_vref.id, 'user_id': None, 'project_id': None, 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'volume_size': src_vref.size, 'display_name': 'tmp-snap-%s' % volume.id, 'display_description': None, 'volume_type_id': src_vref.volume_type_id, 'encryption_key_id': None, } snap_args_deletion = snap_args_creation.copy() snap_args_deletion["status"] = fields.SnapshotStatus.DELETED snap_args_deletion["deleted"] = True mock_obj_snap.return_value = mock.Mock() mock_obj_snap.return_value.create = mock.Mock() # end of prepare test # run test drv.create_cloned_volume(volume, src_vref) # evaluate test exp_acceptable_states = ['available', 'backing-up', 'downloading'] mock_validate_state.assert_called_once_with( src_vref.status, exp_acceptable_states, obj_description='source volume') self.assertFalse(mock_create_snapshot.called) mock_snapshots_exist.assert_called_once_with(src_vref) mock_copy_volume_image.assert_called_once_with( mock_local_path.return_value, mock_local_path.return_value) mock_local_path.assert_has_calls( [mock.call(src_vref), mock.call(volume_ref)]) mock_extend_volume.assert_called_once_with(volume_ref, volume.size) if file_format: self.assertEqual(file_format, volume.admin_metadata['format']) @mock.patch('tempfile.NamedTemporaryFile') @mock.patch('cinder.volume.volume_utils.check_encryption_provider', return_value={'encryption_key_id': fake.ENCRYPTION_KEY_ID}) def test_create_encrypted_volume(self, mock_check_enc_prov, mock_temp_file): class DictObj(object): # convert a dict to object w/ attributes def __init__(self, d): self.__dict__ = d drv = self._driver mock_temp_file.return_value.__enter__.side_effect = [ DictObj({'name': '/imgfile'}), DictObj({'name': '/passfile'})] key_mgr = fake_keymgr.fake_api() self.mock_object(castellan.key_manager, 'API', return_value=key_mgr) key_id = key_mgr.store(self.context, KeyObject()) self.volume_c.encryption_key_id = key_id enc_info = {'encryption_key_id': key_id, 'cipher': 'aes-xts-essiv', 'key_size': 256} remotefs_path = 'cinder.volume.drivers.remotefs.open' with mock.patch('cinder.volume.volume_utils.check_encryption_provider', return_value=enc_info), \ mock.patch(remotefs_path) as mock_open, \ mock.patch.object(drv, '_execute') as mock_exec: drv._create_encrypted_volume_file("/passfile", self.volume_c.size, enc_info, self.context) mock_open.assert_called_with('/imgfile', 'w') mock_exec.assert_called() @mock.patch('shutil.copyfile') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_set_rw_permissions') def test_copy_volume_image(self, mock_set_perm, mock_copyfile): self._driver._copy_volume_image(mock.sentinel.src, mock.sentinel.dest) mock_copyfile.assert_called_once_with(mock.sentinel.src, mock.sentinel.dest) mock_set_perm.assert_called_once_with(mock.sentinel.dest) def test_create_regular_file(self): self._driver._create_regular_file('/path', 1) self._driver._execute.assert_called_once_with('dd', 'if=/dev/zero', 'of=/path', 'bs=1M', 'count=1024', run_as_root=True) @mock.patch.object(remotefs.RemoteFSSnapDriver, '_local_path_volume_info') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_read_info_file') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_local_volume_dir') @mock.patch.object(remotefs.RemoteFSSnapDriver, '_qemu_img_info') def test_get_snapshot_backing_file( self, mock_qemu_img_info, mock_local_vol_dir, mock_read_info_file, mock_local_path_vol_info): fake_snapshot_file_name = os.path.basename(self._fake_snapshot_path) fake_snapshot_info = {self._fake_snapshot.id: fake_snapshot_file_name} fake_snap_img_info = mock.Mock() fake_snap_img_info.backing_file = self._fake_volume.name mock_read_info_file.return_value = fake_snapshot_info mock_qemu_img_info.return_value = fake_snap_img_info mock_local_vol_dir.return_value = self._FAKE_MNT_POINT snap_backing_file = self._driver._get_snapshot_backing_file( self._fake_snapshot) self.assertEqual(os.path.basename(self._fake_volume_path), snap_backing_file) mock_local_path_vol_info.assert_called_once_with(self._fake_volume) mock_read_info_file.assert_called_once_with( mock_local_path_vol_info.return_value) mock_local_vol_dir.assert_called_once_with(self._fake_volume) mock_qemu_img_info.assert_called_once_with(self._fake_snapshot_path) @ddt.data({}, {'info_file_exists': True}, {'os_name': 'nt'}) @ddt.unpack @mock.patch('json.dump') @mock.patch('cinder.volume.drivers.remotefs.open') @mock.patch('os.path.exists') def test_write_info_file(self, mock_os_path_exists, mock_open, mock_json_dump, info_file_exists=False, os_name='posix'): mock_os_path_exists.return_value = info_file_exists fake_info_path = '/path/to/info' fake_snapshot_info = {'active': self._fake_snapshot_path} self._driver._execute = mock.Mock() self._driver._set_rw_permissions = mock.Mock() self._driver._write_info_file(fake_info_path, fake_snapshot_info) mock_open.assert_called_once_with(fake_info_path, 'w') mock_json_dump.assert_called_once_with( fake_snapshot_info, mock.ANY, indent=1, sort_keys=True) if info_file_exists or os.name == 'nt': self._driver._execute.assert_not_called() self._driver._set_rw_permissions.assert_not_called() else: self._driver._execute.assert_called_once_with( 'truncate', "-s0", fake_info_path, run_as_root=self._driver._execute_as_root) self._driver._set_rw_permissions.assert_called_once_with( fake_info_path) fake_snapshot_info.pop('active') self.assertRaises(exception.RemoteFSException, self._driver._write_info_file, fake_info_path, fake_snapshot_info) class RemoteFSPoolMixinTestCase(test.TestCase): def setUp(self): super(RemoteFSPoolMixinTestCase, self).setUp() # We'll instantiate this directly for now. self._driver = remotefs.RemoteFSPoolMixin() self.context = context.get_admin_context() @mock.patch.object(remotefs.RemoteFSPoolMixin, '_get_pool_name_from_volume') @mock.patch.object(remotefs.RemoteFSPoolMixin, '_get_share_from_pool_name') def test_find_share(self, mock_get_share_from_pool, mock_get_pool_from_volume): share = self._driver._find_share(mock.sentinel.volume) self.assertEqual(mock_get_share_from_pool.return_value, share) mock_get_pool_from_volume.assert_called_once_with( mock.sentinel.volume) mock_get_share_from_pool.assert_called_once_with( mock_get_pool_from_volume.return_value) def test_get_pool_name_from_volume(self): fake_pool = 'fake_pool' fake_host = 'fake_host@fake_backend#%s' % fake_pool fake_vol = fake_volume.fake_volume_obj( self.context, provider_location='fake_share', host=fake_host) pool_name = self._driver._get_pool_name_from_volume(fake_vol) self.assertEqual(fake_pool, pool_name) def test_update_volume_stats(self): share_total_gb = 3 share_free_gb = 2 share_used_gb = 4 # provisioned space self._driver._mounted_shares = [mock.sentinel.share] self._driver.configuration = mock.Mock() self._driver.configuration.safe_get.return_value = ( mock.sentinel.backend_name) self._driver.vendor_name = mock.sentinel.vendor_name self._driver.driver_volume_type = mock.sentinel.driver_volume_type self._driver._thin_provisioning_support = ( mock.sentinel.thin_prov_support) self._driver._thick_provisioning_support = ( mock.sentinel.thick_prov_support) self._driver.get_version = mock.Mock( return_value=mock.sentinel.driver_version) self._driver._ensure_shares_mounted = mock.Mock() self._driver._get_capacity_info = mock.Mock( return_value=(share_total_gb << 30, share_free_gb << 30, share_used_gb << 30)) self._driver._get_pool_name_from_share = mock.Mock( return_value=mock.sentinel.pool_name) expected_pool = { 'pool_name': mock.sentinel.pool_name, 'total_capacity_gb': float(share_total_gb), 'free_capacity_gb': float(share_free_gb), 'provisioned_capacity_gb': float(share_used_gb), 'reserved_percentage': ( self._driver.configuration.reserved_percentage), 'max_over_subscription_ratio': ( self._driver.configuration.max_over_subscription_ratio), 'thin_provisioning_support': ( mock.sentinel.thin_prov_support), 'thick_provisioning_support': ( mock.sentinel.thick_prov_support), 'QoS_support': False, } expected_stats = { 'volume_backend_name': mock.sentinel.backend_name, 'vendor_name': mock.sentinel.vendor_name, 'driver_version': mock.sentinel.driver_version, 'storage_protocol': mock.sentinel.driver_volume_type, 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'pools': [expected_pool], } self._driver._update_volume_stats() self.assertDictEqual(expected_stats, self._driver._stats) self._driver._get_capacity_info.assert_called_once_with( mock.sentinel.share) self._driver.configuration.safe_get.assert_called_once_with( 'volume_backend_name') @ddt.ddt class RevertToSnapshotMixinTestCase(test.TestCase): _FAKE_MNT_POINT = '/mnt/fake_hash' def setUp(self): super(RevertToSnapshotMixinTestCase, self).setUp() self._driver = remotefs.RevertToSnapshotMixin() self._driver._remotefsclient = mock.Mock() self._driver._execute = mock.Mock() self._driver._delete = mock.Mock() self.context = context.get_admin_context() self._fake_volume = fake_volume.fake_volume_obj( self.context, provider_location='fake_share') self._fake_volume_path = os.path.join(self._FAKE_MNT_POINT, self._fake_volume.name) self._fake_snapshot = fake_snapshot.fake_snapshot_obj(self.context) self._fake_snapshot_path = (self._fake_volume_path + '.' + self._fake_snapshot.id) self._fake_snapshot_name = os.path.basename( self._fake_snapshot_path) self._fake_snapshot.volume = self._fake_volume @ddt.data(True, False) @mock.patch.object(remotefs.RevertToSnapshotMixin, '_validate_state', create=True) @mock.patch.object(remotefs.RevertToSnapshotMixin, '_read_info_file', create=True) @mock.patch.object(remotefs.RevertToSnapshotMixin, '_local_path_volume_info', create=True) @mock.patch.object(remotefs.RevertToSnapshotMixin, '_qemu_img_info', create=True) @mock.patch.object(remotefs.RevertToSnapshotMixin, '_do_create_snapshot', create=True) @mock.patch.object(remotefs.RevertToSnapshotMixin, '_local_volume_dir', create=True) def test_revert_to_snapshot(self, is_latest_snapshot, mock_local_vol_dir, mock_do_create_snapshot, mock_qemu_img_info, mock_local_path_vol_info, mock_read_info_file, mock_validate_state): active_file = (self._fake_snapshot_name if is_latest_snapshot else 'fake_latest_snap') fake_snapshot_info = { 'active': active_file, self._fake_snapshot.id: self._fake_snapshot_name } mock_read_info_file.return_value = fake_snapshot_info fake_snap_img_info = mock.Mock() fake_snap_img_info.backing_file = self._fake_volume.name mock_qemu_img_info.return_value = fake_snap_img_info mock_local_vol_dir.return_value = self._FAKE_MNT_POINT if is_latest_snapshot: self._driver._revert_to_snapshot(self.context, self._fake_volume, self._fake_snapshot) self._driver._delete.assert_called_once_with( self._fake_snapshot_path) mock_do_create_snapshot.assert_called_once_with( self._fake_snapshot, fake_snap_img_info.backing_file, self._fake_snapshot_path) mock_qemu_img_info.assert_called_once_with( self._fake_snapshot_path, self._fake_volume.name) elif not is_latest_snapshot: self.assertRaises(exception.InvalidSnapshot, self._driver._revert_to_snapshot, self.context, self._fake_volume, self._fake_snapshot) self._driver._delete.assert_not_called() exp_acceptable_states = ['available', 'reverting'] mock_validate_state.assert_called_once_with( self._fake_snapshot.volume.status, exp_acceptable_states) mock_local_path_vol_info.assert_called_once_with( self._fake_snapshot.volume) mock_read_info_file.assert_called_once_with( mock_local_path_vol_info.return_value) @ddt.ddt class RemoteFSManageableVolumesTestCase(test.TestCase): def setUp(self): super(RemoteFSManageableVolumesTestCase, self).setUp() # We'll instantiate this directly for now. self._driver = remotefs.RemoteFSManageableVolumesMixin() @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_mount_point_for_share', create=True) @mock.patch.object(os.path, 'isfile') def test_get_manageable_vol_location_invalid(self, mock_is_file, mock_get_mount_point): self.assertRaises(exception.ManageExistingInvalidReference, self._driver._get_manageable_vol_location, {}) self._driver._mounted_shares = [] self.assertRaises(exception.ManageExistingInvalidReference, self._driver._get_manageable_vol_location, {'source-name': '//hots/share/img'}) self._driver._mounted_shares = ['//host/share'] mock_get_mount_point.return_value = '/fake_mountpoint' mock_is_file.return_value = False self.assertRaises(exception.ManageExistingInvalidReference, self._driver._get_manageable_vol_location, {'source-name': '//host/share/subdir/img'}) mock_is_file.assert_any_call( os.path.normpath('/fake_mountpoint/subdir/img')) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_mount_point_for_share', create=True) @mock.patch.object(os.path, 'isfile') def test_get_manageable_vol_location(self, mock_is_file, mock_get_mount_point): self._driver._mounted_shares = [ '//host/share2/subdir', '//host/share/subdir', 'host:/dir/subdir' ] mock_get_mount_point.return_value = '/fake_mountpoint' mock_is_file.return_value = True location_info = self._driver._get_manageable_vol_location( {'source-name': 'host:/dir/subdir/import/img'}) exp_location_info = { 'share': 'host:/dir/subdir', 'mountpoint': mock_get_mount_point.return_value, 'vol_local_path': '/fake_mountpoint/import/img', 'vol_remote_path': 'host:/dir/subdir/import/img' } self.assertEqual(exp_location_info, location_info) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_mount_point_for_share', create=True) @mock.patch.object(os.path, 'isfile') @mock.patch.object(os.path, 'normpath', lambda x: x.replace('/', '\\')) @mock.patch.object(os.path, 'normcase', lambda x: x.lower()) @mock.patch.object(os.path, 'join', lambda *args: '\\'.join(args)) @mock.patch.object(os.path, 'sep', '\\') def test_get_manageable_vol_location_win32(self, mock_is_file, mock_get_mount_point): self._driver._mounted_shares = [ '//host/share2/subdir', '//host/share/subdir', 'host:/dir/subdir' ] mock_get_mount_point.return_value = r'c:\fake_mountpoint' mock_is_file.return_value = True location_info = self._driver._get_manageable_vol_location( {'source-name': '//Host/share/Subdir/import/img'}) exp_location_info = { 'share': '//host/share/subdir', 'mountpoint': mock_get_mount_point.return_value, 'vol_local_path': r'c:\fake_mountpoint\import\img', 'vol_remote_path': r'\\host\share\subdir\import\img' } self.assertEqual(exp_location_info, location_info) def test_get_managed_vol_exp_path(self): fake_vol = fake_volume.fake_volume_obj(mock.sentinel.context) vol_location = dict(mountpoint='fake-mountpoint') exp_path = os.path.join(vol_location['mountpoint'], fake_vol.name) ret_val = self._driver._get_managed_vol_expected_path( fake_vol, vol_location) self.assertEqual(exp_path, ret_val) @ddt.data( {'already_managed': True}, {'qemu_side_eff': exception.RemoteFSInvalidBackingFile}, {'qemu_side_eff': Exception}, {'qemu_side_eff': [mock.Mock(backing_file=None, file_format='fakefmt')]}, {'qemu_side_eff': [mock.Mock(backing_file='backing_file', file_format='raw')]} ) @ddt.unpack @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_qemu_img_info', create=True) def test_check_unmanageable_volume(self, mock_qemu_info, qemu_side_eff=None, already_managed=False): mock_qemu_info.side_effect = qemu_side_eff manageable = self._driver._is_volume_manageable( mock.sentinel.volume_path, already_managed=already_managed)[0] self.assertFalse(manageable) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_qemu_img_info', create=True) def test_check_manageable_volume(self, mock_qemu_info, qemu_side_eff=None, already_managed=False): mock_qemu_info.return_value = mock.Mock( backing_file=None, file_format='raw') manageable = self._driver._is_volume_manageable( mock.sentinel.volume_path)[0] self.assertTrue(manageable) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_manageable_vol_location') @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_is_volume_manageable') def test_manage_existing_unmanageable(self, mock_check_manageable, mock_get_location): fake_vol = fake_volume.fake_volume_obj(mock.sentinel.context) mock_get_location.return_value = dict( vol_local_path=mock.sentinel.local_path) mock_check_manageable.return_value = False, mock.sentinel.resason self.assertRaises(exception.ManageExistingInvalidReference, self._driver.manage_existing, fake_vol, mock.sentinel.existing_ref) mock_get_location.assert_called_once_with(mock.sentinel.existing_ref) mock_check_manageable.assert_called_once_with( mock.sentinel.local_path) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_manageable_vol_location') @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_is_volume_manageable') @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_set_rw_permissions', create=True) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_managed_vol_expected_path') @mock.patch.object(os, 'rename') def test_manage_existing_manageable(self, mock_rename, mock_get_exp_path, mock_set_perm, mock_check_manageable, mock_get_location): fake_vol = fake_volume.fake_volume_obj(mock.sentinel.context) mock_get_location.return_value = dict( vol_local_path=mock.sentinel.local_path, share=mock.sentinel.share) mock_check_manageable.return_value = True, None exp_ret_val = {'provider_location': mock.sentinel.share} ret_val = self._driver.manage_existing(fake_vol, mock.sentinel.existing_ref) self.assertEqual(exp_ret_val, ret_val) mock_get_exp_path.assert_called_once_with( fake_vol, mock_get_location.return_value) mock_set_perm.assert_called_once_with(mock.sentinel.local_path) mock_rename.assert_called_once_with(mock.sentinel.local_path, mock_get_exp_path.return_value) @mock.patch.object(image_utils, 'qemu_img_info') def _get_rounded_manageable_image_size(self, mock_qemu_info): mock_qemu_info.return_value.virtual_size = 1 << 30 + 1 exp_rounded_size_gb = 2 size = self._driver._get_rounded_manageable_image_size( mock.sentinel.image_path) self.assertEqual(exp_rounded_size_gb, size) mock_qemu_info.assert_called_once_with(mock.sentinel.image_path) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_manageable_vol_location') @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_rounded_manageable_image_size') def test_manage_existing_get_size(self, mock_get_size, mock_get_location): mock_get_location.return_value = dict( vol_local_path=mock.sentinel.image_path) size = self._driver.manage_existing_get_size( mock.sentinel.volume, mock.sentinel.existing_ref) self.assertEqual(mock_get_size.return_value, size) mock_get_location.assert_called_once_with(mock.sentinel.existing_ref) mock_get_size.assert_called_once_with(mock.sentinel.image_path) @ddt.data( {}, {'managed_volume': mock.Mock(size=mock.sentinel.sz), 'exp_size': mock.sentinel.sz, 'manageable_check_ret_val': False, 'exp_manageable': False}, {'exp_size': None, 'get_size_side_effect': Exception, 'exp_manageable': False}) @ddt.unpack @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_is_volume_manageable') @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_rounded_manageable_image_size') @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_mount_point_for_share', create=True) def test_get_manageable_volume( self, mock_get_mount_point, mock_get_size, mock_check_manageable, managed_volume=None, get_size_side_effect=(mock.sentinel.size_gb, ), manageable_check_ret_val=True, exp_size=mock.sentinel.size_gb, exp_manageable=True): share = '//host/share' mountpoint = '/fake-mountpoint' volume_path = '/fake-mountpoint/subdir/vol' exp_ret_val = { 'reference': {'source-name': '//host/share/subdir/vol'}, 'size': exp_size, 'safe_to_manage': exp_manageable, 'reason_not_safe': mock.ANY, 'cinder_id': managed_volume.id if managed_volume else None, 'extra_info': None, } mock_get_size.side_effect = get_size_side_effect mock_check_manageable.return_value = (manageable_check_ret_val, mock.sentinel.reason) mock_get_mount_point.return_value = mountpoint ret_val = self._driver._get_manageable_volume( share, volume_path, managed_volume) self.assertEqual(exp_ret_val, ret_val) mock_check_manageable.assert_called_once_with( volume_path, already_managed=managed_volume is not None) mock_get_mount_point.assert_called_once_with(share) if managed_volume: mock_get_size.assert_not_called() else: mock_get_size.assert_called_once_with(volume_path) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_mount_point_for_share', create=True) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_manageable_volume') @mock.patch.object(os, 'walk') @mock.patch.object(os.path, 'join', lambda *args: '/'.join(args)) def test_get_share_manageable_volumes( self, mock_walk, mock_get_manageable_volume, mock_get_mount_point): mount_path = '/fake-mountpoint' mock_walk.return_value = [ [mount_path, ['subdir'], ['volume-1.vhdx']], ['/fake-mountpoint/subdir', [], ['volume-0', 'volume-3.vhdx']]] mock_get_manageable_volume.side_effect = [ Exception, mock.sentinel.managed_volume] self._driver._MANAGEABLE_IMAGE_RE = re.compile(r'.*\.(?:vhdx)$') managed_volumes = {'volume-1': mock.sentinel.vol1} exp_manageable = [mock.sentinel.managed_volume] manageable_volumes = self._driver._get_share_manageable_volumes( mock.sentinel.share, managed_volumes) self.assertEqual(exp_manageable, manageable_volumes) mock_get_manageable_volume.assert_has_calls( [mock.call(mock.sentinel.share, '/fake-mountpoint/volume-1.vhdx', mock.sentinel.vol1), mock.call(mock.sentinel.share, '/fake-mountpoint/subdir/volume-3.vhdx', None)]) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, '_get_share_manageable_volumes') @mock.patch.object(volume_utils, 'paginate_entries_list') def test_get_manageable_volumes(self, mock_paginate, mock_get_share_vols): fake_vol = fake_volume.fake_volume_obj(mock.sentinel.context) self._driver._mounted_shares = [mock.sentinel.share0, mock.sentinel.share1] mock_get_share_vols.side_effect = [ Exception, [mock.sentinel.manageable_vol]] pagination_args = [ mock.sentinel.marker, mock.sentinel.limit, mock.sentinel.offset, mock.sentinel.sort_keys, mock.sentinel.sort_dirs] ret_val = self._driver.get_manageable_volumes( [fake_vol], *pagination_args) self.assertEqual(mock_paginate.return_value, ret_val) mock_paginate.assert_called_once_with( [mock.sentinel.manageable_vol], *pagination_args) exp_managed_vols_dict = {fake_vol.name: fake_vol} mock_get_share_vols.assert_has_calls( [mock.call(share, exp_managed_vols_dict) for share in self._driver._mounted_shares]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_rsd.py0000664000175000017500000015545500000000000023661 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock import fixtures from cinder import exception from cinder.i18n import _ from cinder.tests.unit import test from cinder.tests.unit.volume import test_driver MOCK_URL = "http://www.mock.url.com:4242" MOCK_USER = "mock_user" MOCK_PASSWORD = "mock_password" class MockHTTPError(Exception): def __init__(self, msg): super(MockHTTPError, self).__init__(msg) class MockConnectionError(Exception): def __init__(self, msg): super(MockConnectionError, self).__init__(msg) class MockResourceNotFoundError(Exception): def __init__(self, msg): super(MockResourceNotFoundError, self).__init__(msg) class MockBadRequestError(Exception): def __init__(self, msg): super(MockBadRequestError, self).__init__(msg) self.body = { "@Message.ExtendedInfo": [{"Message": "Cannot delete source snapshot volume when " "other clone volumes are based on this snapshot."}]} class MockInvalidParameterValueError(Exception): def __init__(self, msg): super(MockInvalidParameterValueError, self).__init__(msg) fake_RSDLib = mock.Mock() fake_rsd_lib = mock.Mock() fake_rsd_lib.RSDLib = mock.MagicMock(return_value=fake_RSDLib) fake_sushy = mock.Mock() fake_sushy.exceptions = mock.Mock() fake_sushy.exceptions.HTTPError = MockHTTPError fake_sushy.exceptions.ConnectionError = MockConnectionError fake_sushy.exceptions.ResourceNotFoundError = MockResourceNotFoundError fake_sushy.exceptions.BadRequestError = MockBadRequestError fake_sushy.exceptions.InvalidParameterValueError = ( MockInvalidParameterValueError) sys.modules['rsd_lib'] = fake_rsd_lib sys.modules['sushy'] = fake_sushy from cinder.volume.drivers import rsd as rsd_driver # noqa class RSDClientTestCase(test.TestCase): def setUp(self): super(RSDClientTestCase, self).setUp() self.mock_rsd_lib = mock.Mock() self.mock_rsd_lib._rsd_api_version = "2.4.0" self.mock_rsd_lib._redfish_version = "1.1.0" self.mock_rsd_lib_factory = mock.MagicMock( return_value=self.mock_rsd_lib) fake_RSDLib.factory = self.mock_rsd_lib_factory self.rsd_client = rsd_driver.RSDClient(self.mock_rsd_lib) self.uuid = "84cff9ea-de0f-4841-8645-58620adf49b2" self.url = "/redfish/v1/Resource/Type" self.resource_url = self.url + "/" + self.uuid def _generate_rsd_storage_objects(self): self._mock_stor_obj_1 = mock.Mock() self._mock_stor_obj_2 = mock.Mock() self._mock_stor_obj_3 = mock.Mock() self._mock_drive_obj_1 = mock.Mock() self._mock_drive_obj_2 = mock.Mock() self._mock_drive_obj_3 = mock.Mock() self._mock_drive_obj_1.protocol = "NVMe" self._mock_drive_obj_2.protocol = "Blank" self._mock_drive_obj_3.protocol = "" self._mock_stor_obj_1.drives.get_members = mock.MagicMock( return_value=[self._mock_drive_obj_1]) self._mock_stor_obj_2.drives.get_members = mock.MagicMock( return_value=[self._mock_drive_obj_2]) self._mock_stor_obj_3.drives.get_members = mock.MagicMock( return_value=[self._mock_drive_obj_3]) self._mock_stor_collection = [self._mock_stor_obj_1, self._mock_stor_obj_2, self._mock_stor_obj_3] def test_initialize(self): rsd_client = rsd_driver.RSDClient.initialize(MOCK_URL, MOCK_USER, MOCK_PASSWORD, verify=True) self.assertIsInstance(rsd_client, rsd_driver.RSDClient) def test_initialize_rsd_api_incorrect_version(self): self.mock_rsd_lib._rsd_api_version = "2.3.0" rsd_client_init = rsd_driver.RSDClient.initialize self.assertRaises(exception.VolumeBackendAPIException, rsd_client_init, MOCK_URL, MOCK_USER, MOCK_PASSWORD, False) def test_initialize_rsd_api_higher_version(self): self.mock_rsd_lib._rsd_api_version = "2.5.0" rsd_client = rsd_driver.RSDClient.initialize(MOCK_URL, MOCK_USER, MOCK_PASSWORD, verify=True) self.assertIsInstance(rsd_client, rsd_driver.RSDClient) def test_initialize_rsd_lib_incorrect_version(self): self.mock_rsd_lib._redfish_version = "1.0.0" rsd_client_init = rsd_driver.RSDClient.initialize self.assertRaises(exception.VolumeBackendAPIException, rsd_client_init, MOCK_URL, MOCK_USER, MOCK_PASSWORD, False) def test_initialize_rsd_lib_higher_version(self): self.mock_rsd_lib._redfish_version = "1.5.0" rsd_client = rsd_driver.RSDClient.initialize(MOCK_URL, MOCK_USER, MOCK_PASSWORD, verify=True) self.assertIsInstance(rsd_client, rsd_driver.RSDClient) def test_initialize_invalid_credentials(self): self.mock_rsd_lib_factory.side_effect = ( fixtures._fixtures.timeout.TimeoutException) rsd_client_init = rsd_driver.RSDClient.initialize self.assertRaises(exception.VolumeBackendAPIException, rsd_client_init, MOCK_URL, MOCK_USER, MOCK_PASSWORD, False) def test_get_storage(self): mock_stor_serv = mock.Mock() self.mock_rsd_lib.get_storage_service = mock.MagicMock( return_value=mock_stor_serv) stor_serv = self.rsd_client._get_storage(self.resource_url) self.assertEqual(mock_stor_serv, stor_serv) self.mock_rsd_lib.get_storage_service.assert_called_with(self.url) def test_get_storages(self): self._generate_rsd_storage_objects() get_mem = self.mock_rsd_lib.get_storage_service_collection.return_value get_mem.get_members.return_value = self._mock_stor_collection storages = self.rsd_client._get_storages() self.assertEqual([self._mock_stor_obj_1], storages) def test_get_storages_non_nvme(self): self._generate_rsd_storage_objects() get_mem = self.mock_rsd_lib.get_storage_service_collection.return_value get_mem.get_members.return_value = self._mock_stor_collection storages = self.rsd_client._get_storages(False) self.assertEqual([self._mock_stor_obj_1, self._mock_stor_obj_2, self._mock_stor_obj_3], storages) def test_get_storages_empty_storage(self): self._generate_rsd_storage_objects() get_mem = self.mock_rsd_lib.get_storage_service_collection.return_value get_mem.get_members.return_value = [] storages = self.rsd_client._get_storages() self.assertEqual([], storages) def test_get_storages_empty_drive(self): self._generate_rsd_storage_objects() get_mem = self.mock_rsd_lib.get_storage_service_collection.return_value get_mem.get_members.return_value = self._mock_stor_collection self._mock_stor_obj_1.drives.get_members = mock.MagicMock( return_value=[]) storages = self.rsd_client._get_storages() self.assertEqual([], storages) def test_get_volume(self): mock_stor_serv = mock.Mock() mock_vol_serv = mock.Mock() self.mock_rsd_lib.get_storage_service = mock.MagicMock( return_value=mock_stor_serv) mock_stor_serv.volumes.get_member = mock.MagicMock( return_value=mock_vol_serv) vol_serv = self.rsd_client._get_volume(self.resource_url) self.assertEqual(mock_vol_serv, vol_serv) self.mock_rsd_lib.get_storage_service.assert_called_with(self.url) mock_stor_serv.volumes.get_member.assert_called_with(self.resource_url) def test_get_providing_pool(self): mock_providing_pool_collection = mock.Mock() mock_providing_pool_collection.path = mock.Mock() mock_providing_pool = mock.Mock() mock_providing_pool.get_members = mock.Mock( return_value=[mock_providing_pool_collection]) mock_volume = mock.Mock() mock_volume.capacity_sources = [mock.Mock()] mock_volume.capacity_sources[0].providing_pools = [mock_providing_pool] provider_pool = self.rsd_client._get_providing_pool(mock_volume) self.assertEqual(mock_providing_pool_collection.path, provider_pool) def test_get_providing_pool_no_capacity(self): mock_volume = mock.Mock() mock_volume.capacity_sources = [] self.assertRaises(exception.ValidationError, self.rsd_client._get_providing_pool, mock_volume) def test_get_providing_pool_no_pools(self): mock_volume = mock.Mock() mock_volume.capacity_sources = [mock.Mock()] mock_volume.capacity_sources[0].providing_pools = [] self.assertRaises(exception.ValidationError, self.rsd_client._get_providing_pool, mock_volume) def test_get_providing_pool_too_many_pools(self): mock_volume = mock.Mock() mock_volume.capacity_sources = [mock.Mock()] mock_volume.capacity_sources[0].providing_pools = [mock.Mock(), mock.Mock()] self.assertRaises(exception.ValidationError, self.rsd_client._get_providing_pool, mock_volume) def test_create_vol_or_snap(self): mock_stor = mock.Mock() size_in_bytes = 10737418240 mock_stor.volumes.create_volume = mock.Mock( return_value=self.resource_url) stor_url = self.rsd_client._create_vol_or_snap(mock_stor, size_in_bytes) self.assertEqual(self.resource_url, stor_url) mock_stor.volumes.create_volume.assert_called_with( size_in_bytes, capacity_sources=None, replica_infos=None) def test_create_vol_or_snap_stor_pool(self): mock_stor = mock.Mock() size_in_bytes = 10737418240 stor_uuid = "/redfish/v1/StorageService/NvMeoE1/StoragePools/2" expected_capacity = [{ "ProvidingPools": [{ "@odata.id": stor_uuid }] }] mock_stor.volumes.create_volume = mock.Mock( return_value=self.resource_url) stor_url = self.rsd_client._create_vol_or_snap(mock_stor, size_in_bytes, pool_url=stor_uuid) self.assertEqual(self.resource_url, stor_url) mock_stor.volumes.create_volume.assert_called_with( size_in_bytes, capacity_sources=expected_capacity, replica_infos=None) def test_create_vol_or_snap_source_snap(self): mock_stor = mock.Mock() size_in_bytes = 10737418240 stor_uuid = "/redfish/v1/StorageService/NvMeoE1/StoragePools/2" expected_replica = [{ "ReplicaType": "Clone", "Replica": {"@odata.id": stor_uuid} }] mock_stor.volumes.create_volume = mock.Mock( return_value=self.resource_url) stor_url = self.rsd_client._create_vol_or_snap(mock_stor, size_in_bytes, source_snap=stor_uuid) self.assertEqual(self.resource_url, stor_url) mock_stor.volumes.create_volume.assert_called_with( size_in_bytes, capacity_sources=None, replica_infos=expected_replica) def test_create_vol_or_snap_source_vol(self): mock_stor = mock.Mock() size_in_bytes = 10737418240 stor_uuid = "/redfish/v1/StorageService/NvMeoE1/StoragePools/2" expected_replica = [{ "ReplicaType": "Snapshot", "Replica": {"@odata.id": stor_uuid} }] mock_stor.volumes.create_volume = mock.Mock( return_value=self.resource_url) stor_url = self.rsd_client._create_vol_or_snap(mock_stor, size_in_bytes, source_vol=stor_uuid) self.assertEqual(self.resource_url, stor_url) mock_stor.volumes.create_volume.assert_called_with( size_in_bytes, capacity_sources=None, replica_infos=expected_replica) def test_create_vol_or_snap_source_snap_vol(self): mock_stor = mock.Mock() size_in_bytes = 10737418240 stor_uuid = "/redfish/v1/StorageService/NvMeoE1/StoragePools/2" mock_stor.volumes.create_volume = mock.Mock( return_value=self.resource_url) self.assertRaises(exception.InvalidInput, self.rsd_client._create_vol_or_snap, mock_stor, size_in_bytes, source_snap=stor_uuid, source_vol=stor_uuid) def test_create_volume(self): self._generate_rsd_storage_objects() size_in_Gb = 10 expected_size_in_bytes = 10737418240 self._mock_stor_obj_1.volumes.create_volume = mock.Mock( return_value=self.resource_url) self.rsd_client._get_storages = mock.Mock( return_value=[self._mock_stor_obj_1]) stor_url = self.rsd_client.create_volume(size_in_Gb) self._mock_stor_obj_1.volumes.create_volume.assert_called_with( expected_size_in_bytes, capacity_sources=None, replica_infos=None) self.assertEqual(self.resource_url, stor_url) def test_create_volume_no_storage(self): self._generate_rsd_storage_objects() size_in_Gb = 10 self.rsd_client._get_storages = mock.Mock( return_value=[]) self.assertRaises(exception.VolumeBackendAPIException, self.rsd_client.create_volume, size_in_Gb) def test_create_volume_multiple_storages(self): self._generate_rsd_storage_objects() size_in_Gb = 10 expected_size_in_bytes = 10737418240 mock_resp = mock.Mock() mock_resp.status = "404" self._mock_stor_obj_1.volumes.create_volume = mock.Mock( return_value=self.resource_url) self._mock_stor_obj_2.volumes.create_volume = mock.Mock( side_effect=MockHTTPError("HTTP Error")) self._mock_stor_obj_3.volumes.create_volume = mock.Mock( side_effect=MockConnectionError("Connection Error")) self.rsd_client._get_storages = mock.Mock( return_value=[self._mock_stor_obj_3, self._mock_stor_obj_2, self._mock_stor_obj_1]) stor_url = self.rsd_client.create_volume(size_in_Gb) self._mock_stor_obj_1.volumes.create_volume.assert_called_with( expected_size_in_bytes, capacity_sources=None, replica_infos=None) self.assertEqual(self.resource_url, stor_url) def test_clone_volume(self): mock_volume = mock.Mock() mock_volume.capacity_bytes = 10737418240 mock_volume.capacity_sources = [mock.Mock()] mock_volume.capacity_sources[0].providing_pools = [mock.Mock()] mock_storage = mock.Mock() self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_storage = mock.Mock(return_value=mock_storage) self.rsd_client._create_vol_or_snap = mock.Mock( return_value=self.resource_url) self.rsd_client._get_providing_pool = mock.Mock( return_value=self.resource_url) vol_url, snap_url = self.rsd_client.clone_volume(self.resource_url) self.assertEqual(self.resource_url, vol_url) self.assertEqual(self.resource_url, snap_url) self.rsd_client._create_vol_or_snap.assert_called_with( mock.ANY, 10737418240, pool_url=self.resource_url, source_snap=self.resource_url) def test_clone_volume_size_increase(self): mock_volume = mock.Mock() mock_volume.capacity_bytes = 10737418240 new_size = 20 mock_volume.capacity_sources = [mock.Mock()] mock_volume.capacity_sources[0].providing_pools = [mock.Mock()] mock_storage = mock.Mock() self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_storage = mock.Mock(return_value=mock_storage) self.rsd_client._create_vol_or_snap = mock.Mock( return_value=self.resource_url) self.rsd_client._get_providing_pool = mock.Mock( return_value=self.resource_url) vol_url, snap_url = self.rsd_client.clone_volume(self.resource_url, new_size) self.assertEqual(self.resource_url, vol_url) self.assertEqual(self.resource_url, snap_url) self.rsd_client._create_vol_or_snap.assert_called_with( mock.ANY, 21474836480, pool_url=self.resource_url, source_snap=self.resource_url) def test_clone_volume_fail(self): mock_volume = mock.Mock() mock_volume.capacity_bytes = 10737418240 mock_volume.capacity_sources = [mock.Mock()] mock_volume.capacity_sources[0].providing_pools = [mock.Mock()] mock_storage = mock.Mock() self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_storage = mock.Mock(return_value=mock_storage) self.rsd_client.delete_vol_or_snap = mock.Mock() self.rsd_client._create_vol_or_snap = mock.Mock( return_value=self.resource_url, side_effect=[None, exception.InvalidInput( reason=(_("_create_vol_or_snap failed")))]) self.rsd_client._get_providing_pool = mock.Mock( return_value=self.resource_url) self.assertRaises(exception.VolumeBackendAPIException, self.rsd_client.clone_volume, self.resource_url) self.rsd_client.delete_vol_or_snap.assert_called_once() def test_create_volume_from_snap(self): mock_snap = mock.Mock() mock_storage = mock.Mock() mock_snap.capacity_bytes = 10737418240 self.rsd_client._get_storage = mock.Mock(return_value=mock_storage) self.rsd_client._get_volume = mock.Mock(return_value=mock_snap) self.rsd_client._get_providing_pool = mock.Mock( return_value=self.resource_url) self.rsd_client._create_vol_or_snap = mock.Mock( return_value=self.resource_url) volume_url = self.rsd_client.create_volume_from_snap(self.resource_url) self.assertEqual(self.resource_url, volume_url) self.rsd_client._create_vol_or_snap.assert_called_with( mock.ANY, 10737418240, pool_url=self.resource_url, source_snap=self.resource_url) def test_create_volume_from_snap_with_size(self): mock_snap = mock.Mock() mock_storage = mock.Mock() mock_snap.capacity_bytes = 10737418240 expected_capacity_bytes = 21474836480 self.rsd_client._get_storage = mock.Mock(return_value=mock_storage) self.rsd_client._get_volume = mock.Mock(return_value=mock_snap) self.rsd_client._get_providing_pool = mock.Mock( return_value=self.resource_url) self.rsd_client._create_vol_or_snap = mock.Mock( return_value=self.resource_url) volume_url = self.rsd_client.create_volume_from_snap( self.resource_url, 20) self.assertEqual(self.resource_url, volume_url) self.rsd_client._create_vol_or_snap.assert_called_with( mock.ANY, expected_capacity_bytes, pool_url=self.resource_url, source_snap=self.resource_url) def test_create_volume_from_snap_create_failed(self): mock_snap = mock.Mock() mock_storage = mock.Mock() mock_snap.capacity_bytes = 10737418240 self.rsd_client._get_storage = mock.Mock(return_value=mock_storage) self.rsd_client._get_volume = mock.Mock(return_value=mock_snap) self.rsd_client._get_providing_pool = mock.Mock( return_value=self.resource_url) self.rsd_client._create_vol_or_snap = mock.Mock( return_value=self.resource_url, side_effect=[exception.InvalidInput( reason=_("_create_vol_or_snap failed."))]) self.assertRaises( exception.VolumeBackendAPIException, self.rsd_client.create_volume_from_snap, self.resource_url) def test_delete_vol_or_snap(self): mock_volume = mock.Mock() mock_volume.links.endpoints = [] mock_volume.delete = mock.Mock() self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client.delete_vol_or_snap(self.resource_url) mock_volume.delete.assert_called_once() def test_delete_vol_or_snap_failed_delete(self): mock_volume = mock.Mock() mock_volume.links.endpoints = [] mock_volume.delete = mock.Mock(side_effect=[ RuntimeError("delete error")]) self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.assertRaises( exception.VolumeBackendAPIException, self.rsd_client.delete_vol_or_snap, self.resource_url) def test_delete_vol_or_snap_non_exist(self): mock_volume = mock.Mock() mock_volume.links.endpoints = [] mock_volume.delete = mock.Mock() self.rsd_client._get_volume = mock.Mock( side_effect=MockResourceNotFoundError("volume doesn't exist!")) self.assertRaises(exception.VolumeBackendAPIException, self.rsd_client.delete_vol_or_snap, self.resource_url, ignore_non_exist=True) mock_volume.delete.assert_not_called() def test_delete_vol_or_snap_has_endpoints(self): mock_volume = mock.Mock() mock_volume.links.endpoints = [mock.Mock()] mock_volume.delete = mock.Mock() self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.assertRaises(exception.VolumeBackendAPIException, self.rsd_client.delete_vol_or_snap, self.resource_url) mock_volume.delete.assert_not_called() def test_delete_vol_or_snap_has_deps(self): mock_volume = mock.Mock() mock_volume.links.endpoints = [mock.Mock()] mock_volume.delete = mock.Mock( side_effect=MockBadRequestError("busy!")) self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client.delete_vol_or_snap = mock.Mock( side_effect=[None, exception.VolumeBackendAPIException( data="error")]) self.rsd_client.delete_vol_or_snap(self.resource_url) self.rsd_client.delete_vol_or_snap.assert_called_once() def test_attach_volume_to_node_invalid_vol_url(self): self.rsd_client._get_volume = mock.Mock(side_effect=[ RuntimeError("_get_volume failed")]) self.rsd_client._get_node = mock.Mock() self.assertRaises( exception.VolumeBackendAPIException, self.rsd_client.attach_volume_to_node, self.resource_url, self.resource_url) self.rsd_client._get_volume.assert_called_once() self.rsd_client._get_node.assert_not_called() def test_attach_volume_to_node_invalid_node_url(self): mock_volume = mock.Mock() self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_node = mock.Mock(side_effect=[ RuntimeError("_get_node failed")]) self.assertRaises( exception.VolumeBackendAPIException, self.rsd_client.attach_volume_to_node, self.resource_url, self.resource_url) self.rsd_client._get_volume.assert_called_once() self.rsd_client._get_node.assert_called_once() def test_attach_volume_to_node_already_attached(self): mock_volume = mock.Mock() mock_node = mock.Mock() mock_volume.links.endpoints = [mock.Mock()] self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_node = mock.Mock(return_value=mock_node) self.assertRaises( exception.VolumeBackendAPIException, self.rsd_client.attach_volume_to_node, self.resource_url, self.resource_url) self.rsd_client._get_volume.assert_called_once() self.rsd_client._get_node.assert_called_once() @mock.patch('time.sleep') def test_attach_volume_to_node_too_few_endpoints(self, mock_sleep): mock_volume = mock.Mock() mock_node = mock.Mock() mock_volume.links.endpoints = [] mock_node.detach_endpoint = mock.Mock() self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_node = mock.Mock(return_value=mock_node) self.rsd_client._get_nqn_endpoints = mock.Mock(return_value=[]) self.assertRaises( rsd_driver.RSDRetryableException, self.rsd_client.attach_volume_to_node, self.resource_url, self.resource_url) self.assertEqual(5, mock_node.attach_endpoint.call_count) self.assertEqual(5, mock_node.detach_endpoint.call_count) @mock.patch('time.sleep') def test_attach_volume_to_node_too_many_endpoints(self, mock_sleep): mock_volume = mock.Mock() mock_node = mock.Mock() mock_volume.links.endpoints = [] self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_node = mock.Mock(return_value=mock_node) self.rsd_client._get_nqn_endpoints = mock.Mock(return_value=[ mock.Mock(), mock.Mock()]) self.assertRaises( rsd_driver.RSDRetryableException, self.rsd_client.attach_volume_to_node, self.resource_url, self.resource_url) self.assertEqual(5, mock_node.attach_endpoint.call_count) self.assertEqual(5, mock_node.detach_endpoint.call_count) @mock.patch('time.sleep') def test_attach_volume_to_node_too_few_ip_transport(self, mock_sleep): mock_volume = mock.Mock() mock_node = mock.Mock() mock_target_nqn = mock.Mock() v_endpoints = {"IPTransportDetails": []} mock_v_endpoints = [(mock_target_nqn, v_endpoints)] mock_volume.links.endpoints = [] self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_node = mock.Mock(return_value=mock_node) self.rsd_client._get_nqn_endpoints = mock.Mock( return_value=mock_v_endpoints) self.assertRaises( rsd_driver.RSDRetryableException, self.rsd_client.attach_volume_to_node, self.resource_url, self.resource_url) self.assertEqual(5, mock_node.attach_endpoint.call_count) self.assertEqual(5, mock_node.detach_endpoint.call_count) @mock.patch('time.sleep') def test_attach_volume_to_node_too_many_ip_transport(self, mock_sleep): mock_volume = mock.Mock() mock_node = mock.Mock() mock_target_nqn = mock.Mock() v_endpoints = {"IPTransportDetails": [mock.Mock(), mock.Mock()]} mock_v_endpoints = [(mock_target_nqn, v_endpoints)] mock_volume.links.endpoints = [] self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_node = mock.Mock(return_value=mock_node) self.rsd_client._get_nqn_endpoints = mock.Mock( return_value=mock_v_endpoints) self.assertRaises( rsd_driver.RSDRetryableException, self.rsd_client.attach_volume_to_node, self.resource_url, self.resource_url) self.assertEqual(5, mock_node.attach_endpoint.call_count) self.assertEqual(5, mock_node.detach_endpoint.call_count) @mock.patch('time.sleep') def test_attach_volume_to_node_no_n_endpoints(self, mock_sleep): mock_volume = mock.Mock() mock_node = mock.Mock() mock_target_nqn = mock.Mock() mock_ip = '0.0.0.0' mock_port = 5446 target_ip = {"Address": mock_ip} ip_transport = {"IPv4Address": target_ip, "Port": mock_port} v_endpoints = {"IPTransportDetails": [ip_transport]} mock_v_endpoints = [(mock_target_nqn, v_endpoints)] mock_volume.links.endpoints = [] mock_node_system = mock.Mock() mock_node_system.json = {"Links": {"Endpoints": []}} self.mock_rsd_lib.get_system = mock.MagicMock( return_value=mock_node_system) self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_node = mock.Mock(return_value=mock_node) self.rsd_client._get_nqn_endpoints = mock.Mock(side_effect=[ mock_v_endpoints, [], mock_v_endpoints, [], mock_v_endpoints, [], mock_v_endpoints, [], mock_v_endpoints, []]) self.assertRaises( rsd_driver.RSDRetryableException, self.rsd_client.attach_volume_to_node, self.resource_url, self.resource_url) self.assertEqual(5, mock_node.attach_endpoint.call_count) self.assertEqual(5, mock_node.detach_endpoint.call_count) @mock.patch('time.sleep') def test_attach_volume_to_node_retry_attach(self, mock_sleep): mock_volume = mock.Mock() mock_node = mock.Mock() mock_target_nqn = mock.Mock() mock_ip = '0.0.0.0' mock_port = 5446 mock_host_nqn = 'host_nqn' target_ip = {"Address": mock_ip} ip_transport = {"IPv4Address": target_ip, "Port": mock_port} v_endpoints = {"IPTransportDetails": [ip_transport]} mock_v_endpoints = [(mock_target_nqn, v_endpoints)] mock_n_endpoints = [(mock_host_nqn, v_endpoints)] mock_volume.links.endpoints = [] mock_node_system = mock.Mock() mock_node_system.json = {"Links": {"Endpoints": []}} self.mock_rsd_lib.get_system = mock.MagicMock( return_value=mock_node_system) self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_node = mock.Mock(return_value=mock_node) self.rsd_client._get_nqn_endpoints = mock.Mock(side_effect=[ mock_v_endpoints, mock_n_endpoints]) mock_node.attach_endpoint = mock.Mock(side_effect=[ MockInvalidParameterValueError("invalid resource"), None]) ret_tuple = self.rsd_client.attach_volume_to_node(self.resource_url, self.resource_url) self.assertEqual((mock_ip, mock_port, mock_target_nqn, mock_host_nqn), ret_tuple) self.assertEqual(2, mock_node.attach_endpoint.call_count) mock_node.detach_endpoint.assert_not_called() @mock.patch('time.sleep') def test_attach_volume_to_node_retry_post_attach(self, mock_sleep): mock_volume = mock.Mock() mock_node = mock.Mock() mock_target_nqn = mock.Mock() mock_ip = '0.0.0.0' mock_port = 5446 mock_host_nqn = 'host_nqn' target_ip = {"Address": mock_ip} ip_transport = {"IPv4Address": target_ip, "Port": mock_port} v_endpoints = {"IPTransportDetails": [ip_transport]} mock_v_endpoints = [(mock_target_nqn, v_endpoints)] mock_n_endpoints = [(mock_host_nqn, v_endpoints)] mock_volume.links.endpoints = [] mock_node_system = mock.Mock() mock_node_system.json = {"Links": {"Endpoints": []}} self.mock_rsd_lib.get_system = mock.MagicMock( return_value=mock_node_system) self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_node = mock.Mock(return_value=mock_node) self.rsd_client._get_nqn_endpoints = mock.Mock(side_effect=[ mock_v_endpoints, [], mock_v_endpoints, mock_n_endpoints]) ret_tuple = self.rsd_client.attach_volume_to_node(self.resource_url, self.resource_url) self.assertEqual((mock_ip, mock_port, mock_target_nqn, mock_host_nqn), ret_tuple) self.assertEqual(2, mock_node.attach_endpoint.call_count) mock_node.detach_endpoint.assert_called_once() def test_attach_volume_to_node(self): mock_volume = mock.Mock() mock_node = mock.Mock() mock_target_nqn = mock.Mock() mock_ip = '0.0.0.0' mock_port = 5446 mock_host_nqn = 'host_nqn' target_ip = {"Address": mock_ip} ip_transport = {"IPv4Address": target_ip, "Port": mock_port} v_endpoints = {"IPTransportDetails": [ip_transport]} mock_v_endpoints = [(mock_target_nqn, v_endpoints)] mock_n_endpoints = [(mock_host_nqn, v_endpoints)] mock_volume.links.endpoints = [] mock_node_system = mock.Mock() mock_node_system.json = {"Links": {"Endpoints": []}} self.mock_rsd_lib.get_system = mock.MagicMock( return_value=mock_node_system) self.rsd_client._get_volume = mock.Mock(return_value=mock_volume) self.rsd_client._get_node = mock.Mock(return_value=mock_node) self.rsd_client._get_nqn_endpoints = mock.Mock(side_effect=[ mock_v_endpoints, mock_n_endpoints]) ret_tuple = self.rsd_client.attach_volume_to_node(self.resource_url, self.resource_url) self.assertEqual((mock_ip, mock_port, mock_target_nqn, mock_host_nqn), ret_tuple) mock_node.attach_endpoint.assert_called_once() mock_node.detach_endpoint.assert_not_called() def test_get_node_url_by_uuid(self): mock_node = mock.Mock() mock_node.path = self.resource_url mock_node_system = mock.Mock() mock_node_system.uuid = self.uuid self.mock_rsd_lib.get_system = mock.MagicMock( return_value=mock_node_system) get_mem = self.mock_rsd_lib.get_node_collection.return_value get_mem.get_members.return_value = [mock_node] node_url = self.rsd_client.get_node_url_by_uuid(self.uuid.lower()) self.assertEqual(self.resource_url, node_url) def test_get_node_url_by_uuid_uuid_not_present(self): mock_node = mock.Mock() mock_node.path = self.resource_url mock_node_system = mock.Mock() mock_node_system.uuid = self.uuid self.mock_rsd_lib.get_system = mock.MagicMock( return_value=mock_node_system) get_mem = self.mock_rsd_lib.get_node_collection.return_value get_mem.get_members.return_value = [] node_url = self.rsd_client.get_node_url_by_uuid(self.uuid.lower()) self.assertEqual("", node_url) def test_get_node_url_by_uuid_multiple_uuids(self): mock_node = mock.Mock() mock_node.path = self.resource_url mock_node_system = mock.Mock() mock_node_system.uuid = self.uuid second_uuid = "9f9244dd-59a1-4532-b548-df784c7" mock_node_dummy = mock.Mock() mock_node_dummy.path = self.url + "/" + second_uuid mock_node_dummy_system = mock.Mock() mock_node_dummy_system.uuid = second_uuid self.mock_rsd_lib.get_system = mock.MagicMock( side_effect=[mock_node_dummy_system, mock_node_system]) get_mem = self.mock_rsd_lib.get_node_collection.return_value get_mem.get_members.return_value = [mock_node_dummy, mock_node] node_url = self.rsd_client.get_node_url_by_uuid(self.uuid.lower()) self.assertEqual(self.resource_url, node_url) def test_get_node_url_by_uuid_exception(self): mock_node = mock.Mock() mock_node.path = self.resource_url mock_node_system = mock.Mock() mock_node_system.uuid = self.uuid self.mock_rsd_lib.get_system = mock.MagicMock( return_value=mock_node_system) get_mem = self.mock_rsd_lib.get_node_collection.return_value get_mem.get_members.side_effect = [RuntimeError("Mock Exception")] node_url = self.rsd_client.get_node_url_by_uuid(self.uuid.lower()) self.assertEqual("", node_url) def test_get_stats(self): mock_str_pool_1 = mock.Mock() mock_str_pool_2 = mock.Mock() mock_str_pool_3 = mock.Mock() mock_str_pool_1.capacity.allocated_bytes = 10737418240 mock_str_pool_2.capacity.allocated_bytes = 21474836480 mock_str_pool_3.capacity.allocated_bytes = 32212254720 mock_str_pool_1.capacity.consumed_bytes = 5368709120 mock_str_pool_2.capacity.consumed_bytes = 10737418240 mock_str_pool_3.capacity.consumed_bytes = 21474836480 self._generate_rsd_storage_objects() self._mock_stor_obj_1.storage_pools.get_members = mock.Mock( return_value=[mock_str_pool_1]) self._mock_stor_obj_2.storage_pools.get_members = mock.Mock( return_value=[mock_str_pool_2]) self._mock_stor_obj_3.storage_pools.get_members = mock.Mock( return_value=[mock_str_pool_3]) self._mock_stor_obj_1.volumes.members_identities = [mock.Mock()] self._mock_stor_obj_2.volumes.members_identities = [mock.Mock(), mock.Mock()] self._mock_stor_obj_3.volumes.members_identities = [mock.Mock(), mock.Mock(), mock.Mock()] self.rsd_client._get_storages = mock.Mock( return_value=self._mock_stor_collection) stat_tuple = self.rsd_client.get_stats() self.assertEqual((25.0, 60.0, 35.0, 6), stat_tuple) def test_get_stats_fail(self): self.rsd_client._get_storages = mock.Mock() self.rsd_client._get_storages.side_effect = [ RuntimeError("Connection Error")] stat_tuple = self.rsd_client.get_stats() self.assertEqual((0, 0, 0, 0), stat_tuple) class RSDDriverTestCase(test_driver.BaseDriverTestCase): driver_name = "cinder.volume.drivers.rsd.RSDDriver" def setUp(self): super(RSDDriverTestCase, self).setUp() self.mock_volume = mock.MagicMock() self.mock_dict = {'size': 10} self.volume.driver.rsdClient = mock.MagicMock() self.rsd_client = self.volume.driver.rsdClient self.uuid = "84cff9ea-de0f-4841-8645-58620adf49b2" self.url = "/redfish/v1/Storage/StorageService" self.resource_url = self.url + "/" + self.uuid def test_create_volume(self): self.rsd_client.create_volume = mock.Mock( return_value=self.resource_url) vol_update = self.volume.driver.create_volume(self.mock_dict) self.assertEqual({'provider_location': self.resource_url}, vol_update) def test_delete_volume(self): self.rsd_client.delete_vol_or_snap = mock.Mock( return_value=True) self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.mock_volume.metadata.get = mock.Mock( return_value=self.resource_url) self.assertIsNone(self.volume.driver.delete_volume(self.mock_volume)) self.rsd_client.delete_vol_or_snap.assert_called() self.assertEqual(2, self.rsd_client.delete_vol_or_snap.call_count) def test_delete_volume_no_snapshot(self): self.rsd_client.delete_vol_or_snap = mock.Mock( return_value=True) self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.mock_volume.metadata.get = mock.Mock(return_value=None) self.assertIsNone(self.volume.driver.delete_volume(self.mock_volume)) self.rsd_client.delete_vol_or_snap.assert_called_once() def test_delete_volume_no_volume_url(self): self.rsd_client.delete_vol_or_snap = mock.Mock( return_value=True) self.mock_dict['provider_location'] = None self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.assertIsNone(self.volume.driver.delete_volume(self.mock_volume)) self.rsd_client.delete_vol_or_snap.assert_not_called() def test_delete_volume_busy_volume(self): self.rsd_client.delete_vol_or_snap = mock.Mock( side_effect=[exception.VolumeIsBusy( volume_name=self.mock_volume.name)]) self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.assertRaises(exception.VolumeIsBusy, self.volume.driver.delete_volume, self.mock_volume) self.rsd_client.delete_vol_or_snap.assert_called_once() def test_delete_volume_snap_deletion_error(self): self.rsd_client.delete_vol_or_snap = mock.Mock( side_effect=[None, exception.VolumeBackendAPIException( data="error")]) self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.delete_volume, self.mock_volume) self.rsd_client.delete_vol_or_snap.assert_called() self.assertEqual(2, self.rsd_client.delete_vol_or_snap.call_count) def test_get_volume_stats_refresh(self): ret_tuple = (25.0, 60.0, 35.0, 6) self.rsd_client.get_stats = mock.Mock(return_value=ret_tuple) expected_stats = {'driver_version': '1.0.0', 'pools': [{ 'allocated_capacity_gb': 35.0, 'free_capacity_gb': 25.0, 'multiattach': False, 'pool_name': 'RSD', 'thick_provisioning_support': True, 'thin_provisioning_support': True, 'total_capacity_gb': 60.0}], 'storage_protocol': 'nvmeof', 'vendor_name': 'Intel', 'volume_backend_name': 'RSD'} stats = self.volume.driver.get_volume_stats(refresh=True) self.assertEqual(expected_stats, stats) def test_initialize_connection(self): mock_connector = {'system uuid': "281bbc50-e76f-40e7-a757-06b916a83d6f"} self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.rsd_client.get_node_url_by_uuid = mock.Mock( return_value=self.resource_url) ret_tuple = ("0.0.0.0", 5467, "target.mock.nqn", "initiator.mock.nqn") self.rsd_client.attach_volume_to_node = mock.Mock( return_value=ret_tuple) expected_conn_info = { 'driver_volume_type': 'nvmeof', 'data': { 'transport_type': 'rdma', 'host_nqn': "initiator.mock.nqn", 'nqn': "target.mock.nqn", 'target_port': 5467, 'target_portal': "0.0.0.0", } } conn_info = self.volume.driver.initialize_connection(self.mock_volume, mock_connector) self.assertEqual(expected_conn_info, conn_info) def test_initialize_connection_node_not_found(self): mock_connector = {'system uuid': "281bbc50-e76f-40e7-a757-06b916a83d6f"} self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.rsd_client.get_node_url_by_uuid = mock.Mock( return_value="") ret_tuple = ("0.0.0.0", 5467, "target.mock.nqn", "initiator.mock.nqn") self.rsd_client.attach_volume_to_node = mock.Mock( return_value=ret_tuple) self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.initialize_connection, self.mock_volume, mock_connector) self.rsd_client.attach_volume_to_node.assert_not_called() self.rsd_client.get_node_url_by_uuid.assert_called_once() def test_initialize_connection_no_system_uuid(self): mock_connector = {} self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.rsd_client.get_node_url_by_uuid = mock.Mock( return_value=self.resource_url) ret_tuple = ("0.0.0.0", 5467, "target.mock.nqn", "initiator.mock.nqn") self.rsd_client.attach_volume_to_node = mock.Mock( return_value=ret_tuple) self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.initialize_connection, self.mock_volume, mock_connector) self.rsd_client.attach_volume_to_node.assert_not_called() self.rsd_client.get_node_url_by_uuid.assert_not_called() def test_terminate_connection(self): mock_connector = {'system uuid': "281bbc50-e76f-40e7-a757-06b916a83d6f"} self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.rsd_client.get_node_url_by_uuid = mock.Mock( return_value=self.resource_url) self.volume.driver.terminate_connection(self.mock_volume, mock_connector) self.rsd_client.get_node_url_by_uuid.assert_called_once() self.rsd_client.detach_volume_from_node.assert_called_once() def test_terminate_connection_no_node(self): mock_connector = {'system uuid': "281bbc50-e76f-40e7-a757-06b916a83d6f"} self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.rsd_client.get_node_url_by_uuid = mock.Mock( return_value="") self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.terminate_connection, self.mock_volume, mock_connector) self.rsd_client.get_node_url_by_uuid.assert_called_once() self.rsd_client.detach_volume_from_node.assert_not_called() def test_terminate_connection_no_connector(self): mock_connector = None self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.rsd_client.get_node_url_by_uuid = mock.Mock( return_value=self.resource_url) self.volume.driver.terminate_connection( self.mock_volume, mock_connector) self.rsd_client.detach_all_node_connections_for_volume. \ assert_called_once() self.rsd_client.get_node_url_by_uuid.assert_not_called() self.rsd_client.detach_volume_from_node.assert_not_called() def test_terminate_connection_no_system_uuid(self): mock_connector = {} self.mock_dict['provider_location'] = self.resource_url self.mock_volume.__getitem__.side_effect = self.mock_dict.__getitem__ self.rsd_client.get_node_url_by_uuid = mock.Mock( return_value=self.resource_url) self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.terminate_connection, self.mock_volume, mock_connector) self.rsd_client.get_node_url_by_uuid.assert_not_called() self.rsd_client.detach_volume_from_node.assert_not_called() def test_create_volume_from_snapshot(self): mock_snap = mock.Mock() mock_snap.provider_location = self.resource_url mock_snap.volume_size = 10 self.mock_volume.size = 10 self.rsd_client.create_volume_from_snap = mock.Mock( return_value=self.resource_url) self.rsd_client.delete_vol_or_snap = mock.Mock() ret_dict = self.volume.driver.create_volume_from_snapshot( self.mock_volume, mock_snap) self.assertEqual({'provider_location': self.resource_url}, ret_dict) self.rsd_client.create_volume_from_snap.assert_called_once() self.rsd_client.extend_volume.assert_not_called() self.rsd_client.delete_vol_or_snap.assert_not_called() def test_create_volume_from_snapshot_diff_size(self): mock_snap = mock.Mock() mock_snap.provider_location = self.resource_url mock_snap.volume_size = 10 self.mock_volume.size = 20 self.rsd_client.create_volume_from_snap = mock.Mock( return_value=self.resource_url) self.rsd_client.extend_volume = mock.Mock() self.rsd_client.delete_vol_or_snap = mock.Mock() ret_dict = self.volume.driver.create_volume_from_snapshot( self.mock_volume, mock_snap) self.assertEqual({'provider_location': self.resource_url}, ret_dict) self.rsd_client.create_volume_from_snap.assert_called_once() self.rsd_client.extend_volume.assert_called_once() self.rsd_client.delete_vol_or_snap.assert_not_called() def test_create_volume_from_snapshot_diff_size_fail(self): mock_snap = mock.Mock() mock_snap.provider_location = self.resource_url mock_snap.volume_size = 10 self.mock_volume.size = 20 self.rsd_client.create_volume_from_snap = mock.Mock( return_value=self.resource_url) self.rsd_client.extend_volume = mock.Mock( side_effect=[exception.VolumeBackendAPIException( data="extend fail")]) self.rsd_client.delete_vol_or_snap = mock.Mock() self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.create_volume_from_snapshot, self.mock_volume, mock_snap) self.rsd_client.create_volume_from_snap.assert_called_once() self.rsd_client.extend_volume.assert_called_once() self.rsd_client.delete_vol_or_snap.assert_called_once() def test_delete_snapshot(self): mock_snap = mock.Mock() mock_snap.provider_location = self.resource_url mock_snap.name = "mock_snapshot" self.rsd_client.delete_vol_or_snap = mock.Mock(return_value=True) self.volume.driver.delete_snapshot(mock_snap) self.rsd_client.delete_vol_or_snap.assert_called_once() def test_delete_snapshot_no_url(self): mock_snap = mock.Mock() mock_snap.provider_location = "" mock_snap.name = "mock_snapshot" self.rsd_client.delete_vol_or_snap = mock.Mock(return_value=True) self.volume.driver.delete_snapshot(mock_snap) self.rsd_client.delete_vol_or_snap.assert_not_called() def test_delete_snapshot_unable_to_delete(self): mock_snap = mock.Mock() mock_snap.provider_location = self.resource_url mock_snap.name = "mock_snapshot" self.rsd_client.delete_vol_or_snap = mock.Mock( side_effect=[exception.SnapshotIsBusy( snapshot_name=mock_snap.name)]) self.assertRaises(exception.SnapshotIsBusy, self.volume.driver.delete_snapshot, mock_snap) self.rsd_client.delete_vol_or_snap.assert_called_once() def test_create_cloned_volume(self): mock_vref = mock.Mock() mock_vref.provider_location = self.resource_url mock_vref.size = 10 self.mock_volume.size = 10 self.rsd_client.clone_volume = mock.Mock( return_value=(self.resource_url, self.resource_url)) self.rsd_client.extend_volume = mock.Mock() self.rsd_client.delete_vol_or_snap = mock.Mock() self.volume.driver.create_cloned_volume(self.mock_volume, mock_vref) self.rsd_client.clone_volume.assert_called_once() self.rsd_client.extend_volume.assert_not_called() self.rsd_client.delete_vol_or_snap.assert_not_called() def test_create_cloned_volume_extend_vol(self): mock_vref = mock.Mock() mock_vref.provider_location = self.resource_url mock_vref.size = 20 self.mock_volume.size = 10 self.rsd_client.clone_volume = mock.Mock( return_value=(self.resource_url, self.resource_url)) self.rsd_client.extend_volume = mock.Mock() self.rsd_client.delete_vol_or_snap = mock.Mock() self.volume.driver.create_cloned_volume(self.mock_volume, mock_vref) self.rsd_client.clone_volume.assert_called_once() self.rsd_client.extend_volume.assert_called_once() self.rsd_client.delete_vol_or_snap.assert_not_called() def test_create_cloned_volume_extend_vol_fail(self): mock_vref = mock.Mock() mock_vref.provider_location = self.resource_url mock_vref.size = 20 self.mock_volume.size = 10 self.rsd_client.clone_volume = mock.Mock( return_value=(self.resource_url, self.resource_url)) self.rsd_client.extend_volume = mock.Mock( side_effect=exception.VolumeBackendAPIException( data="extend fail")) self.rsd_client.delete_vol_or_snap = mock.Mock() self.assertRaises(exception.VolumeBackendAPIException, self.volume.driver.create_cloned_volume, self.mock_volume, mock_vref) self.rsd_client.clone_volume.assert_called_once() self.rsd_client.extend_volume.assert_called_once() self.assertEqual(2, self.rsd_client.delete_vol_or_snap.call_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_san.py0000664000175000017500000000476600000000000023650 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from unittest import mock from cinder.tests.unit import test from cinder.volume import configuration from cinder.volume.drivers.san import san class SanDriverTestCase(test.TestCase): """Tests for SAN driver""" def __init__(self, *args, **kwargs): super(SanDriverTestCase, self).__init__(*args, **kwargs) def setUp(self): super(SanDriverTestCase, self).setUp() self.configuration = mock.Mock(spec=configuration.Configuration) self.configuration.san_is_local = False self.configuration.san_ip = "10.0.0.1" self.configuration.san_login = "admin" self.configuration.san_password = "password" self.configuration.san_ssh_port = 22 self.configuration.san_thin_provision = True self.configuration.san_private_key = 'private_key' self.configuration.ssh_min_pool_conn = 1 self.configuration.ssh_max_pool_conn = 5 self.configuration.ssh_conn_timeout = 30 class fake_san_driver(san.SanDriver): def initialize_connection(): pass def create_volume(): pass def delete_volume(): pass def terminate_connection(): pass @mock.patch.object(san.processutils, 'ssh_execute') @mock.patch.object(san.ssh_utils, 'SSHPool') @mock.patch.object(san.utils, 'check_ssh_injection') def test_ssh_formatted_command(self, mock_check_ssh_injection, mock_ssh_pool, mock_ssh_execute): driver = self.fake_san_driver(configuration=self.configuration) cmd_list = ['uname', '-s'] expected_cmd = 'uname -s' driver.san_execute(*cmd_list) # get the same used mocked item from the pool with driver.sshpool.item() as ssh_item: mock_ssh_execute.assert_called_with(ssh_item, expected_cmd, check_exit_code=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_seagate.py0000664000175000017500000013406300000000000024472 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # Copyright 2016-19 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for OpenStack Cinder Seagate driver.""" from unittest import mock from lxml import etree import requests from cinder import exception from cinder.objects import fields from cinder.tests.unit import test import cinder.volume.drivers.stx.client import cinder.volume.drivers.stx.common import cinder.volume.drivers.stx.exception as stx_exception import cinder.volume.drivers.stx.fc import cinder.volume.drivers.stx.iscsi from cinder.zonemanager import utils as fczm_utils STXClient = cinder.volume.drivers.stx.client.STXClient STXCommon = cinder.volume.drivers.stx.common.STXCommon STXFCDriver = cinder.volume.drivers.stx.fc.STXFCDriver STXISCSIDriver = cinder.volume.drivers.stx.iscsi.STXISCSIDriver session_key = '12a1626754554a21d85040760c81b' resp_login = ''' success 0 12a1626754554a21d85040760c81b 1''' resp_fw_ti = '''T252R07 0''' resp_fw = '''GLS220R001 0''' resp_fw_nomatch = '''Z 0''' resp_system = ''' 00C0FFEEEEEE 0 ''' resp_badlogin = ''' error 1 Authentication failure 1''' response_ok = ''' some data 0 ''' response_not_ok = ''' Error Message 1 ''' response_stats_linear = ''' 3863830528 3863830528 ''' response_stats_virtual = ''' 3863830528 3863830528 ''' response_no_lun = '''''' response_lun = ''' 1 4''' response_ports = ''' FC id1 Disconnected FC id2 Up iSCSI id3 10.0.0.10 Disconnected iSCSI id4 10.0.0.11 Up iSCSI id5 10.0.0.12 Up ''' # mccli -x array show-volumes | egrep \ # '(RESPONSE|OBJECT|volume-name|volume-type|serial-number|wwn| \ # storage-pool-name|size-numeric|volume-parent)' response_vols = ''' A bar 2097152 2097152 0 00c0ff53a30500000323416101000000 -1 base 15 600C0FF00053A3050323416101000000 A foo 8388608 8388608 0 00c0ff53a3050000df513e6101000000 -1 base 15 600C0FF00053A305DF513E6101000000 A snap 2097152 2097152 0 00c0ff53a3050000fbc5416101000000 -1 snapshot 13 00c0ff53a30500000323416101000000 600C0FF00053A305FBC5416101000000 A vqoINx4UbS-Cno3gDq1V 2097152 2097152 0 00c0ff53a305000024c6416101000000 -1 base 15 600C0FF00053A30524C6416101000000 ''' response_maps = ''' 00c0ff53a30500000323416101000000 bar ''' # The two XML samples above will produce the following result from # get_manageable_volumes(): # # [{'cinder_id': None, # 'extra_info': None, # 'reason_not_safe': 'volume in use', # 'reference': {'source-name': 'bar'}, # 'safe_to_manage': False, # 'size': 1}, # {'cinder_id': 'aa820dc7-851b-4be0-a7a3-7803ab555495', # 'extra_info': None, # 'reason_not_safe': 'already managed', # 'reference': {'source-name': 'vqoINx4UbS-Cno3gDq1V'}, # 'safe_to_manage': False, # 'size': 1}, # {'cinder_id': None, # 'extra_info': None, # 'reason_not_safe': None, # 'reference': {'source-name': 'foo'}, # 'safe_to_manage': True, # 'size': 4}] response_ports_linear = response_ports % {'ip': 'primary-ip-address'} response_ports_virtual = response_ports % {'ip': 'ip-address'} invalid_xml = '''''' malformed_xml = '''''' fake_xml = '''''' stats_low_space = {'free_capacity_gb': 10, 'total_capacity_gb': 100} stats_large_space = {'free_capacity_gb': 90, 'total_capacity_gb': 100} vol_id = 'fceec30e-98bc-4ce5-85ff-d7309cc17cc2' test_volume = {'id': vol_id, 'name_id': None, 'display_name': 'test volume', 'name': 'volume', 'size': 10} test_retype_volume = {'attach_status': fields.VolumeAttachStatus.DETACHED, 'id': vol_id, 'name_id': None, 'display_name': 'test volume', 'name': 'volume', 'size': 10} test_host = {'capabilities': {'location_info': 'SeagateVolumeDriver:xxxxx:dg02:A'}} test_snap = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'volume': {'name_id': None}, 'volume_id': vol_id, 'display_name': 'test volume', 'name': 'volume', 'volume_size': 10} encoded_volid = 'v_O7DDpi8TOWF_9cwnMF' encoded_snapid = 's_O7DDpi8TOWF_9cwnMF' dest_volume = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'source_volid': vol_id, 'display_name': 'test volume', 'name': 'volume', 'size': 10} dest_volume_larger = {'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', 'name_id': None, 'source_volid': vol_id, 'display_name': 'test volume', 'name': 'volume', 'size': 20} attached_volume = {'id': vol_id, 'display_name': 'test volume', 'name': 'volume', 'size': 10, 'status': 'in-use', 'attach_status': fields.VolumeAttachStatus.ATTACHED} attaching_volume = {'id': vol_id, 'display_name': 'test volume', 'name': 'volume', 'size': 10, 'status': 'attaching', 'attach_status': fields.VolumeAttachStatus.ATTACHED} detached_volume = {'id': vol_id, 'name_id': None, 'display_name': 'test volume', 'name': 'volume', 'size': 10, 'status': 'available', 'attach_status': 'detached'} connector = {'ip': '10.0.0.2', 'initiator': 'iqn.1993-08.org.debian:01:222', 'wwpns': ["111111111111111", "111111111111112"], 'wwnns': ["211111111111111", "211111111111112"], 'host': 'fakehost'} invalid_connector = {'ip': '10.0.0.2', 'initiator': '', 'wwpns': [], 'wwnns': [], 'host': 'fakehost'} class TestSeagateClient(test.TestCase): def setUp(self): super(TestSeagateClient, self).setUp() self.login = 'manage' self.passwd = '!manage' self.ip = '10.0.0.1' self.protocol = 'http' self.ssl_verify = False self.client = STXClient(self.ip, self.login, self.passwd, self.protocol, self.ssl_verify) @mock.patch('requests.get') def test_login(self, mock_requests_get): m = mock.Mock() mock_requests_get.return_value = m m.text.encode.side_effect = [resp_badlogin, resp_badlogin] self.assertRaises(stx_exception.AuthenticationError, self.client.login) m.text.encode.side_effect = [resp_login, resp_fw_nomatch, resp_system] self.client.login() self.assertEqual('Z', self.client._fw_type) self.assertEqual(0, self.client._fw_rev) self.assertEqual(False, self.client.is_g5_fw()) m.text.encode.side_effect = [resp_login, resp_fw, resp_system] self.client.login() self.assertEqual(session_key, self.client._session_key) def test_build_request_url(self): url = self.client._build_request_url('/path') self.assertEqual('http://10.0.0.1/api/path', url) url = self.client._build_request_url('/path', arg1='val1') self.assertEqual('http://10.0.0.1/api/path/arg1/val1', url) url = self.client._build_request_url('/path', arg_1='val1') self.assertEqual('http://10.0.0.1/api/path/arg-1/val1', url) url = self.client._build_request_url('/path', 'arg1') self.assertEqual('http://10.0.0.1/api/path/arg1', url) url = self.client._build_request_url('/path', 'arg1', arg2='val2') self.assertEqual('http://10.0.0.1/api/path/arg2/val2/arg1', url) url = self.client._build_request_url('/path', 'arg1', 'arg3', arg2='val2') self.assertEqual('http://10.0.0.1/api/path/arg2/val2/arg1/arg3', url) @mock.patch('requests.get') def test_request(self, mock_requests_get): self.client._session_key = session_key m = mock.Mock() m.text.encode.side_effect = [response_ok, malformed_xml, requests.exceptions. RequestException("error")] mock_requests_get.return_value = m ret = self.client._api_request('/path') self.assertTrue(type(ret) is etree._Element) self.assertRaises(stx_exception.ConnectionError, self.client._api_request, '/path') self.assertRaises(stx_exception.ConnectionError, self.client._api_request, '/path') def test_assert_response_ok(self): ok_tree = etree.XML(response_ok) not_ok_tree = etree.XML(response_not_ok) invalid_tree = etree.XML(invalid_xml) ret = self.client._assert_response_ok(ok_tree) self.assertIsNone(ret) self.assertRaises(stx_exception.RequestError, self.client._assert_response_ok, not_ok_tree) self.assertRaises(stx_exception.RequestError, self.client._assert_response_ok, invalid_tree) @mock.patch.object(STXClient, '_request') def test_backend_exists(self, mock_request): mock_request.side_effect = [stx_exception.RequestError, fake_xml] self.assertFalse(self.client.backend_exists('backend_name', 'linear')) self.assertTrue(self.client.backend_exists('backend_name', 'linear')) @mock.patch.object(STXClient, '_request') def test_backend_stats(self, mock_request): stats = {'free_capacity_gb': 1843, 'total_capacity_gb': 1843} linear = etree.XML(response_stats_linear) virtual = etree.XML(response_stats_virtual) mock_request.side_effect = [linear, virtual] self.assertEqual(stats, self.client.backend_stats('OpenStack', 'linear')) self.assertEqual(stats, self.client.backend_stats('A', 'virtual')) @mock.patch.object(STXClient, '_request') def test_get_lun(self, mock_request): mock_request.side_effect = [etree.XML(response_no_lun), etree.XML(response_lun)] ret = self.client._get_first_available_lun_for_host("fakehost") self.assertEqual(1, ret) ret = self.client._get_first_available_lun_for_host("fakehost") self.assertEqual(2, ret) @mock.patch.object(STXClient, '_request') def test_get_ports(self, mock_request): mock_request.side_effect = [etree.XML(response_ports)] ret = self.client.get_active_target_ports() self.assertEqual([{'port-type': 'FC', 'target-id': 'id2', 'status': 'Up'}, {'port-type': 'iSCSI', 'target-id': 'id4', 'status': 'Up'}, {'port-type': 'iSCSI', 'target-id': 'id5', 'status': 'Up'}], ret) @mock.patch.object(STXClient, '_request') def test_get_fc_ports(self, mock_request): mock_request.side_effect = [etree.XML(response_ports)] ret = self.client.get_active_fc_target_ports() self.assertEqual(['id2'], ret) @mock.patch.object(STXClient, '_request') def test_get_iscsi_iqns(self, mock_request): mock_request.side_effect = [etree.XML(response_ports)] ret = self.client.get_active_iscsi_target_iqns() self.assertEqual(['id4', 'id5'], ret) @mock.patch.object(STXClient, '_request') def test_get_iscsi_portals(self, mock_request): portals = {'10.0.0.12': 'Up', '10.0.0.11': 'Up'} mock_request.side_effect = [etree.XML(response_ports_linear), etree.XML(response_ports_virtual)] ret = self.client.get_active_iscsi_target_portals() self.assertEqual(portals, ret) ret = self.client.get_active_iscsi_target_portals() self.assertEqual(portals, ret) @mock.patch.object(STXClient, '_request') def test_delete_snapshot(self, mock_request): mock_request.side_effect = [None, None] self.client.delete_snapshot('dummy', 'linear') mock_request.assert_called_with('/delete/snapshot', 'cleanup', 'dummy') self.client.delete_snapshot('dummy', 'paged') mock_request.assert_called_with('/delete/snapshot', 'dummy') @mock.patch.object(STXClient, '_request') def test_list_luns_for_host(self, mock_request): mock_request.side_effect = [etree.XML(response_no_lun), etree.XML(response_lun), etree.XML(response_lun)] self.client._fw_type = 'T' self.client.list_luns_for_host('dummy') mock_request.assert_called_with('/show/host-maps', 'dummy') self.client._fw_type = 'G' self.client.list_luns_for_host('dummy') mock_request.assert_called_with('/show/maps/initiator', 'dummy') self.client._fw_type = 'I' self.client.list_luns_for_host('dummy') mock_request.assert_called_with('/show/maps/initiator', 'dummy') class FakeConfiguration1(object): seagate_pool_name = 'OpenStack' seagate_pool_type = 'linear' san_ip = '10.0.0.1' san_login = 'manage' san_password = '!manage' seagate_api_protocol = 'http' driver_use_ssl = True driver_ssl_cert_verify = False def safe_get(self, key): return 'fakevalue' class FakeConfiguration2(FakeConfiguration1): seagate_iscsi_ips = ['10.0.0.11'] use_chap_auth = None class fake(dict): def __init__(self, *args, **kwargs): for d in args: self.update(d) self.update(kwargs) def __getattr__(self, attr): return self[attr] class TestFCSeagateCommon(test.TestCase): def setUp(self): super(TestFCSeagateCommon, self).setUp() self.config = FakeConfiguration1() self.common = STXCommon(self.config) self.common.client_login = mock.MagicMock() self.common.client_logout = mock.MagicMock() self.common.serialNumber = "xxxxx" self.common.owner = "A" self.connector_element = "wwpns" @mock.patch.object(STXClient, 'get_serial_number') @mock.patch.object(STXClient, 'get_owner_info') @mock.patch.object(STXClient, 'backend_exists') def test_do_setup(self, mock_backend_exists, mock_owner_info, mock_serial_number): mock_backend_exists.side_effect = [False, True] mock_owner_info.return_value = "A" mock_serial_number.return_value = "xxxxx" self.assertRaises(stx_exception.InvalidBackend, self.common.do_setup, None) self.assertIsNone(self.common.do_setup(None)) mock_backend_exists.assert_called_with(self.common.backend_name, self.common.backend_type) mock_owner_info.assert_called_with(self.common.backend_name, self.common.backend_type) def test_vol_name(self): self.assertEqual(encoded_volid, self.common._get_vol_name(vol_id)) self.assertEqual(encoded_snapid, self.common._get_snap_name(vol_id)) def test_check_flags(self): class FakeOptions(object): def __init__(self, d): for k, v in d.items(): self.__dict__[k] = v options = FakeOptions({'opt1': 'val1', 'opt2': 'val2'}) required_flags = ['opt1', 'opt2'] ret = self.common.check_flags(options, required_flags) self.assertIsNone(ret) options = FakeOptions({'opt1': 'val1', 'opt2': 'val2'}) required_flags = ['opt1', 'opt2', 'opt3'] self.assertRaises(exception.Invalid, self.common.check_flags, options, required_flags) def test_assert_connector_ok(self): self.assertRaises(exception.InvalidInput, self.common._assert_connector_ok, invalid_connector, self.connector_element) self.assertIsNone(self.common._assert_connector_ok( connector, self.connector_element)) @mock.patch.object(STXClient, 'backend_stats') def test_update_volume_stats(self, mock_stats): mock_stats.side_effect = [stx_exception.RequestError, stats_large_space] self.assertRaises(exception.Invalid, self.common._update_volume_stats) mock_stats.assert_called_with(self.common.backend_name, self.common.backend_type) ret = self.common._update_volume_stats() self.assertIsNone(ret) self.assertEqual({'driver_version': self.common.VERSION, 'pools': [{'QoS_support': False, 'multiattach': True, 'free_capacity_gb': 90, 'location_info': 'SeagateVolumeDriver:xxxxx:OpenStack:A', 'pool_name': 'OpenStack', 'total_capacity_gb': 100}], 'storage_protocol': None, 'vendor_name': 'Seagate', 'volume_backend_name': None}, self.common.stats) @mock.patch.object(STXClient, 'create_volume') def test_create_volume(self, mock_create): mock_create.side_effect = [stx_exception.RequestError, None] self.assertRaises(exception.Invalid, self.common.create_volume, test_volume) ret = self.common.create_volume(test_volume) self.assertIsNone(ret) mock_create.assert_called_with(encoded_volid, "%sGiB" % test_volume['size'], self.common.backend_name, self.common.backend_type) @mock.patch.object(STXClient, 'delete_volume') def test_delete_volume(self, mock_delete): not_found_e = stx_exception.RequestError( 'The volume was not found on this system.') mock_delete.side_effect = [not_found_e, stx_exception.RequestError, None] self.assertIsNone(self.common.delete_volume(test_volume)) self.assertRaises(exception.Invalid, self.common.delete_volume, test_volume) self.assertIsNone(self.common.delete_volume(test_volume)) mock_delete.assert_called_with(encoded_volid) @mock.patch.object(STXClient, 'copy_volume') @mock.patch.object(STXClient, 'backend_stats') def test_create_cloned_volume(self, mock_stats, mock_copy): mock_stats.side_effect = [stats_low_space, stats_large_space, stats_large_space] self.assertRaises( stx_exception.NotEnoughSpace, self.common.create_cloned_volume, dest_volume, detached_volume) self.assertFalse(mock_copy.called) mock_copy.side_effect = [stx_exception.RequestError, None] self.assertRaises(exception.Invalid, self.common.create_cloned_volume, dest_volume, detached_volume) ret = self.common.create_cloned_volume(dest_volume, detached_volume) self.assertIsNone(ret) mock_copy.assert_called_with(encoded_volid, 'vqqqqqqqqqqqqqqqqqqq', self.common.backend_name, self.common.backend_type) @mock.patch.object(STXClient, 'copy_volume') @mock.patch.object(STXClient, 'backend_stats') @mock.patch.object(STXCommon, 'extend_volume') def test_create_cloned_volume_larger(self, mock_extend, mock_stats, mock_copy): mock_stats.side_effect = [stats_low_space, stats_large_space, stats_large_space] self.assertRaises(stx_exception.NotEnoughSpace, self.common.create_cloned_volume, dest_volume_larger, detached_volume) self.assertFalse(mock_copy.called) mock_copy.side_effect = [stx_exception.RequestError, None] self.assertRaises(exception.Invalid, self.common.create_cloned_volume, dest_volume_larger, detached_volume) ret = self.common.create_cloned_volume(dest_volume_larger, detached_volume) self.assertIsNone(ret) mock_copy.assert_called_with(encoded_volid, 'vqqqqqqqqqqqqqqqqqqq', self.common.backend_name, self.common.backend_type) mock_extend.assert_called_once_with(dest_volume_larger, dest_volume_larger['size']) @mock.patch.object(STXClient, 'get_volume_size') @mock.patch.object(STXClient, 'extend_volume') @mock.patch.object(STXClient, 'copy_volume') @mock.patch.object(STXClient, 'backend_stats') def test_create_volume_from_snapshot(self, mock_stats, mock_copy, mock_extend, mock_get_size): mock_stats.side_effect = [stats_low_space, stats_large_space, stats_large_space] self.assertRaises(stx_exception.NotEnoughSpace, self.common.create_volume_from_snapshot, dest_volume, test_snap) mock_copy.side_effect = [stx_exception.RequestError, None] mock_get_size.return_value = test_snap['volume_size'] self.assertRaises(exception.Invalid, self.common.create_volume_from_snapshot, dest_volume, test_snap) ret = self.common.create_volume_from_snapshot(dest_volume_larger, test_snap) self.assertIsNone(ret) mock_copy.assert_called_with('sqqqqqqqqqqqqqqqqqqq', 'vqqqqqqqqqqqqqqqqqqq', self.common.backend_name, self.common.backend_type) mock_extend.assert_called_with('vqqqqqqqqqqqqqqqqqqq', '10GiB') @mock.patch.object(STXClient, 'get_volume_size') @mock.patch.object(STXClient, 'extend_volume') def test_extend_volume(self, mock_extend, mock_size): mock_extend.side_effect = [stx_exception.RequestError, None] mock_size.side_effect = [10, 10] self.assertRaises(exception.Invalid, self.common.extend_volume, test_volume, 20) ret = self.common.extend_volume(test_volume, 20) self.assertIsNone(ret) mock_extend.assert_called_with(encoded_volid, '10GiB') @mock.patch.object(STXClient, 'create_snapshot') def test_create_snapshot(self, mock_create): mock_create.side_effect = [stx_exception.RequestError, None] self.assertRaises(exception.Invalid, self.common.create_snapshot, test_snap) ret = self.common.create_snapshot(test_snap) self.assertIsNone(ret) mock_create.assert_called_with(encoded_volid, 'sqqqqqqqqqqqqqqqqqqq') @mock.patch.object(STXClient, 'delete_snapshot') def test_delete_snapshot(self, mock_delete): not_found_e = stx_exception.RequestError( 'The volume was not found on this system.') mock_delete.side_effect = [not_found_e, stx_exception.RequestError, None] self.assertIsNone(self.common.delete_snapshot(test_snap)) self.assertRaises(exception.Invalid, self.common.delete_snapshot, test_snap) self.assertIsNone(self.common.delete_snapshot(test_snap)) mock_delete.assert_called_with('sqqqqqqqqqqqqqqqqqqq', self.common.backend_type) @mock.patch.object(STXClient, 'map_volume') def test_map_volume(self, mock_map): mock_map.side_effect = [stx_exception.RequestError, 10] self.assertRaises(exception.Invalid, self.common.map_volume, test_volume, connector, self.connector_element) lun = self.common.map_volume(test_volume, connector, self.connector_element) self.assertEqual(10, lun) mock_map.assert_called_with(encoded_volid, connector, self.connector_element) @mock.patch.object(STXClient, 'unmap_volume') def test_unmap_volume(self, mock_unmap): mock_unmap.side_effect = [stx_exception.RequestError, None] self.assertRaises(exception.Invalid, self.common.unmap_volume, test_volume, connector, self.connector_element) ret = self.common.unmap_volume(test_volume, connector, self.connector_element) self.assertIsNone(ret) mock_unmap.assert_called_with(encoded_volid, connector, self.connector_element) @mock.patch.object(STXClient, 'copy_volume') @mock.patch.object(STXClient, 'delete_volume') @mock.patch.object(STXClient, 'modify_volume_name') def test_retype(self, mock_modify, mock_delete, mock_copy): mock_copy.side_effect = [stx_exception.RequestError, None] self.assertRaises(exception.Invalid, self.common.migrate_volume, test_retype_volume, test_host) ret = self.common.migrate_volume(test_retype_volume, test_host) self.assertEqual((True, None), ret) ret = self.common.migrate_volume(test_retype_volume, {'capabilities': {}}) self.assertEqual((False, None), ret) @mock.patch.object(STXClient, 'modify_volume_name') def test_manage_existing(self, mock_modify): existing_ref = {'source-name': 'xxxx'} mock_modify.side_effect = [stx_exception.RequestError, None] self.assertRaises(exception.Invalid, self.common.manage_existing, test_volume, existing_ref) ret = self.common.manage_existing(test_volume, existing_ref) self.assertIsNone(ret) @mock.patch.object(STXClient, 'get_volume_size') def test_manage_existing_get_size(self, mock_volume): existing_ref = {'source-name': 'xxxx'} mock_volume.side_effect = [stx_exception.RequestError, 1] self.assertRaises(exception.Invalid, self.common.manage_existing_get_size, None, existing_ref) ret = self.common.manage_existing_get_size(None, existing_ref) self.assertEqual(1, ret) @mock.patch.object(STXClient, 'modify_volume_name') @mock.patch.object(STXClient, '_request') def test_manage_existing_snapshot(self, mock_response, mock_modify): fake_snap = fake(test_snap) mock_response.side_effect = [etree.XML(response_maps), etree.XML(response_vols)] snap_ref = {'source-name': 'snap'} ret = self.common.manage_existing_snapshot(fake_snap, snap_ref) self.assertIsNone(ret) newname = self.common._get_snap_name(test_snap['id']) mock_modify.assert_called_with('snap', newname) @mock.patch.object(STXClient, 'get_volume_size') def test_manage_existing_snapshot_get_size(self, mock_volume): existing_ref = {'source-name': 'xxxx'} mock_volume.side_effect = [stx_exception.RequestError, 1] self.assertRaises(exception.Invalid, self.common.manage_existing_get_size, None, existing_ref) ret = self.common.manage_existing_snapshot_get_size(None, existing_ref) self.assertEqual(1, ret) @mock.patch.object(STXClient, '_request') def test_get_manageable_volumes(self, mock_response): mock_response.side_effect = [etree.XML(response_maps), etree.XML(response_vols)] cinder_volumes = [fake(id='aa820dc7-851b-4be0-a7a3-7803ab555495')] marker = None limit = 1000 offset = 0 sort_keys = ['size'] sort_dirs = ['asc'] ret = self.common.get_manageable_volumes(cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) # We expect to get back 3 volumes: one manageable, # one already managed by Cinder, and one mapped (hence unmanageable) self.assertEqual(len(ret), 3) reasons_not_seen = {'volume in use', 'already managed'} manageable_vols = 0 for vol in ret: if vol['reason_not_safe']: reasons_not_seen.discard(vol['reason_not_safe']) self.assertGreaterEqual(len(vol['reference']['source-name']), 3) if vol['safe_to_manage']: manageable_vols += 1 self.assertIsNone(vol.get('cinder-id')) else: self.assertIsNotNone(vol['reason_not_safe']) self.assertEqual(0, len(reasons_not_seen)) self.assertEqual(1, manageable_vols) @mock.patch.object(STXClient, '_request') def test_get_manageable_snapshots(self, mock_response): mock_response.side_effect = [etree.XML(response_maps), etree.XML(response_vols)] cinder_volumes = [fake(id='aa820dc7-851b-4be0-a7a3-7803ab555495')] marker = None limit = 1000 offset = 0 sort_keys = ['size'] sort_dirs = ['asc'] ret = self.common.get_manageable_snapshots(cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) self.assertEqual(ret, [{ 'cinder_id': None, 'extra_info': None, 'reason_not_safe': None, 'reference': {'source-name': 'snap'}, 'safe_to_manage': True, 'size': 1, 'source_reference': {'source-name': 'bar'} }]) class TestISCSISeagateCommon(TestFCSeagateCommon): def setUp(self): super(TestISCSISeagateCommon, self).setUp() self.connector_element = 'initiator' class TestSeagateFC(test.TestCase): @mock.patch.object(STXCommon, 'do_setup') def setUp(self, mock_setup): super(TestSeagateFC, self).setUp() self.vendor_name = 'Seagate' mock_setup.return_value = True def fake_init(self, *args, **kwargs): super(STXFCDriver, self).__init__() self.common = None self.configuration = FakeConfiguration1() self.lookup_service = fczm_utils.create_lookup_service() STXFCDriver.__init__ = fake_init self.driver = STXFCDriver() self.driver.do_setup(None) def _test_with_mock(self, mock, method, args, expected=None): func = getattr(self.driver, method) mock.side_effect = [exception.Invalid(), None] self.assertRaises(exception.Invalid, func, *args) self.assertEqual(expected, func(*args)) @mock.patch.object(STXCommon, 'create_volume') def test_create_volume(self, mock_create): self._test_with_mock(mock_create, 'create_volume', [None]) @mock.patch.object(STXCommon, 'create_cloned_volume') def test_create_cloned_volume(self, mock_create): self._test_with_mock(mock_create, 'create_cloned_volume', [None, None]) @mock.patch.object(STXCommon, 'create_volume_from_snapshot') def test_create_volume_from_snapshot(self, mock_create): self._test_with_mock(mock_create, 'create_volume_from_snapshot', [None, None]) @mock.patch.object(STXCommon, 'delete_volume') def test_delete_volume(self, mock_delete): self._test_with_mock(mock_delete, 'delete_volume', [None]) @mock.patch.object(STXCommon, 'create_snapshot') def test_create_snapshot(self, mock_create): self._test_with_mock(mock_create, 'create_snapshot', [None]) @mock.patch.object(STXCommon, 'delete_snapshot') def test_delete_snapshot(self, mock_delete): self._test_with_mock(mock_delete, 'delete_snapshot', [None]) @mock.patch.object(STXCommon, 'extend_volume') def test_extend_volume(self, mock_extend): self._test_with_mock(mock_extend, 'extend_volume', [None, 10]) @mock.patch.object(STXCommon, 'client_logout') @mock.patch.object(STXCommon, 'get_active_fc_target_ports') @mock.patch.object(STXCommon, 'map_volume') @mock.patch.object(STXCommon, 'client_login') def test_initialize_connection(self, mock_login, mock_map, mock_ports, mock_logout): mock_login.return_value = None mock_logout.return_value = None mock_map.side_effect = [exception.Invalid, 1] mock_ports.side_effect = [['id1']] self.assertRaises(exception.Invalid, self.driver.initialize_connection, test_volume, connector) mock_map.assert_called_with(test_volume, connector, 'wwpns') ret = self.driver.initialize_connection(test_volume, connector) self.assertEqual({'driver_volume_type': 'fibre_channel', 'data': {'initiator_target_map': { '111111111111111': ['id1'], '111111111111112': ['id1']}, 'target_wwn': ['id1'], 'target_lun': 1, 'target_discovered': True}}, ret) @mock.patch.object(STXCommon, 'unmap_volume') @mock.patch.object(STXClient, 'list_luns_for_host') def test_terminate_connection(self, mock_list, mock_unmap): mock_unmap.side_effect = [1] mock_list.side_effect = ['yes'] actual = {'driver_volume_type': 'fibre_channel', 'data': {}} ret = self.driver.terminate_connection(test_volume, connector) self.assertEqual(actual, ret) mock_unmap.assert_called_with(test_volume, connector, 'wwpns') ret = self.driver.terminate_connection(test_volume, connector) self.assertEqual(actual, ret) @mock.patch.object(STXCommon, 'get_volume_stats') def test_get_volume_stats(self, mock_stats): stats = {'storage_protocol': None, 'driver_version': self.driver.VERSION, 'volume_backend_name': None, 'vendor_name': self.vendor_name, 'pools': [{'free_capacity_gb': 90, 'reserved_percentage': 0, 'total_capacity_gb': 100, 'QoS_support': False, 'multiattach': True, 'location_info': 'xx:xx:xx:xx', 'pool_name': 'x'}]} mock_stats.side_effect = [exception.Invalid, stats, stats] self.assertRaises(exception.Invalid, self.driver.get_volume_stats, False) ret = self.driver.get_volume_stats(False) self.assertEqual(stats, ret) ret = self.driver.get_volume_stats(True) self.assertEqual(stats, ret) mock_stats.assert_called_with(True) @mock.patch.object(STXCommon, 'retype') def test_retype(self, mock_retype): mock_retype.side_effect = [exception.Invalid, True, False] args = [None, None, None, None, None] self.assertRaises(exception.Invalid, self.driver.retype, *args) self.assertTrue(self.driver.retype(*args)) self.assertFalse(self.driver.retype(*args)) @mock.patch.object(STXCommon, 'manage_existing') def test_manage_existing(self, mock_manage_existing): self._test_with_mock(mock_manage_existing, 'manage_existing', [None, None]) @mock.patch.object(STXCommon, 'manage_existing_get_size') def test_manage_size(self, mock_manage_size): mock_manage_size.side_effect = [exception.Invalid, 1] self.assertRaises(exception.Invalid, self.driver.manage_existing_get_size, None, None) self.assertEqual(1, self.driver.manage_existing_get_size(None, None)) class TestSeagateISCSI(TestSeagateFC): @mock.patch.object(STXCommon, 'do_setup') def setUp(self, mock_setup): super(TestSeagateISCSI, self).setUp() self.vendor_name = 'Seagate' mock_setup.return_value = True def fake_init(self, *args, **kwargs): super(STXISCSIDriver, self).__init__() self.common = None self.configuration = FakeConfiguration2() self.iscsi_ips = ['10.0.0.11'] STXISCSIDriver.__init__ = fake_init self.driver = STXISCSIDriver() self.driver.do_setup(None) @mock.patch.object(STXCommon, 'client_logout') @mock.patch.object(STXCommon, 'get_active_iscsi_target_portals') @mock.patch.object(STXCommon, 'get_active_iscsi_target_iqns') @mock.patch.object(STXCommon, 'map_volume') @mock.patch.object(STXCommon, 'client_login') def test_initialize_connection(self, mock_login, mock_map, mock_iqns, mock_portals, mock_logout): mock_login.return_value = None mock_logout.return_value = None mock_map.side_effect = [exception.Invalid, 1] self.driver.iscsi_ips = ['10.0.0.11'] self.driver.initialize_iscsi_ports() mock_iqns.side_effect = [['id2']] mock_portals.return_value = {'10.0.0.11': 'Up', '10.0.0.12': 'Up'} self.assertRaises(exception.Invalid, self.driver.initialize_connection, test_volume, connector) mock_map.assert_called_with(test_volume, connector, 'initiator') ret = self.driver.initialize_connection(test_volume, connector) self.assertEqual({'driver_volume_type': 'iscsi', 'data': {'target_iqn': 'id2', 'target_lun': 1, 'target_discovered': True, 'target_portal': '10.0.0.11:3260'}}, ret) @mock.patch.object(STXCommon, 'unmap_volume') def test_terminate_connection(self, mock_unmap): mock_unmap.side_effect = [exception.Invalid, 1] self.assertRaises(exception.Invalid, self.driver.terminate_connection, test_volume, connector) mock_unmap.assert_called_with(test_volume, connector, 'initiator') ret = self.driver.terminate_connection(test_volume, connector) self.assertIsNone(ret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_spdk.py0000664000175000017500000010030400000000000024011 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import json from unittest import mock from os_brick import initiator from os_brick.initiator import connector from oslo_utils import timeutils from oslo_utils import units from cinder import context from cinder import objects from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers import spdk as spdk_driver BDEVS = [{ "num_blocks": 4096000, "name": "Nvme0n1", "driver_specific": { "nvme": { "trid": { "trtype": "PCIe", "traddr": "0000:00:04.0" }, "ns_data": { "id": 1 }, "pci_address": "0000:00:04.0", "vs": { "nvme_version": "1.1" }, "ctrlr_data": { "firmware_revision": "1.0", "serial_number": "deadbeef", "oacs": { "ns_manage": 0, "security": 0, "firmware": 0, "format": 0 }, "vendor_id": "0x8086", "model_number": "QEMU NVMe Ctrl" }, "csts": { "rdy": 1, "cfs": 0 } } }, "supported_io_types": { "reset": True, "nvme_admin": True, "unmap": False, "read": True, "write_zeroes": False, "write": True, "flush": True, "nvme_io": True }, "claimed": False, "block_size": 512, "product_name": "NVMe disk", "aliases": ["Nvme0n1"] }, { "num_blocks": 8192, "uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d", "aliases": [ "Nvme0n1p0" ], "driver_specific": { "lvol": { "base_bdev": "Nvme0n1", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": False } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Split Disk", "name": "Nvme0n1p0" }, { "num_blocks": 8192, "uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d", "aliases": [ "Nvme0n1p1" ], "driver_specific": { "lvol": { "base_bdev": "Nvme0n1", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": False } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Split Disk", "name": "Nvme0n1p1" }, { "num_blocks": 8192, "uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d", "aliases": [ "lvs_test/lvol0" ], "driver_specific": { "lvol": { "base_bdev": "Malloc0", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": False } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Logical Volume", "name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297" }, { "num_blocks": 8192, "uuid": "8dec1964-d533-41df-bea7-40520efdb416", "aliases": [ "lvs_test/lvol1" ], "driver_specific": { "lvol": { "base_bdev": "Malloc0", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": True } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Logical Volume", "name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967298" }] LVOL_STORES = [{ "uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "base_bdev": "Nvme0n1", "free_clusters": 5976, "cluster_size": 1048576, "total_data_clusters": 5976, "block_size": 4096, "name": "lvs_test" }] NVMF_SUBSYSTEMS = [{ "listen_addresses": [], "subtype": "Discovery", "nqn": "nqn.2014-08.org.nvmexpress.discovery", "hosts": [], "allow_any_host": True }, { "listen_addresses": [], "subtype": "NVMe", "hosts": [{ "nqn": "nqn.2016-06.io.spdk:init" }], "namespaces": [{ "bdev_name": "Nvme0n1p0", "nsid": 1, "name": "Nvme0n1p0" }], "allow_any_host": False, "serial_number": "SPDK00000000000001", "nqn": "nqn.2016-06.io.spdk:cnode1" }, { "listen_addresses": [], "subtype": "NVMe", "hosts": [], "namespaces": [{ "bdev_name": "Nvme1n1p0", "nsid": 1, "name": "Nvme1n1p0" }], "allow_any_host": True, "serial_number": "SPDK00000000000002", "nqn": "nqn.2016-06.io.spdk:cnode2" }] class Volume(object): def __init__(self): self.size = 1 self.name = "lvol2" class Snapshot(object): def __init__(self): self.name = "snapshot0" self.volume_size = 1 class JSONRPCException(Exception): def __init__(self, message): self.message = message class JSONRPCClient(object): def __init__(self, addr=None, port=None): self.methods = {"bdev_get_bdevs": self.get_bdevs, "bdev_lvol_get_lvstores": self.get_lvol_stores, "bdev_lvol_delete": self.destroy_lvol_bdev, "bdev_lvol_snapshot": self.snapshot_lvol_bdev, "bdev_lvol_clone": self.clone_lvol_bdev, "bdev_lvol_create": self.construct_lvol_bdev, "bdev_lvol_resize": self.resize_lvol_bdev, "nvmf_get_subsystems": self.get_nvmf_subsystems, "construct_nvmf_subsystem": self.construct_nvmf_subsystem, "nvmf_create_subsystem": self.nvmf_subsystem_create, "nvmf_subsystem_add_listener": self.nvmf_subsystem_add_listener, "nvmf_subsystem_add_ns": self.nvmf_subsystem_add_ns, "bdev_lvol_inflate": self.inflate_lvol_bdev} self.bdevs = copy.deepcopy(BDEVS) self.nvmf_subsystems = copy.deepcopy(NVMF_SUBSYSTEMS) self.lvol_stores = copy.deepcopy(LVOL_STORES) def get_bdevs(self, params=None): if params and 'name' in params: for bdev in self.bdevs: for alias in bdev['aliases']: if params['name'] in alias: return json.dumps({"result": [bdev]}) if bdev['name'] == params['name']: return json.dumps({"result": [bdev]}) return json.dumps({"error": "Not found"}) return json.dumps({"result": self.bdevs}) def destroy_lvol_bdev(self, params=None): if 'name' not in params: return json.dumps({}) i = 0 found_bdev = -1 for bdev in self.bdevs: if bdev['name'] == params['name']: found_bdev = i break i += 1 if found_bdev != -1: del self.bdevs[found_bdev] return json.dumps({"result": {}}) def get_lvol_stores(self, params=None): return json.dumps({"result": self.lvol_stores}) def snapshot_lvol_bdev(self, params=None): snapshot = { 'num_blocks': 5376, 'name': '58b17014-d4a1-4f85-9761-093643ed18f2', 'aliases': ['lvs_test/%s' % params['snapshot_name']], 'driver_specific': { 'lvol': { 'base_bdev': u'Malloc0', 'lvol_store_uuid': u'58b17014-d4a1-4f85-9761-093643ed18f1', 'thin_provision': False, 'clones': ['clone0', 'clone1'] } }, 'claimed': False, 'block_size': 4096, 'product_name': 'Logical Volume', 'supported_io_types': { 'reset': True, 'nvme_admin': False, 'unmap': True, 'read': True, 'write_zeroes': True, 'write': True, 'flush': False, 'nvme_io': False } } self.bdevs.append(snapshot) return json.dumps({"result": [snapshot]}) def clone_lvol_bdev(self, params=None): clone = { 'num_blocks': 7936, 'supported_io_types': { 'reset': True, 'nvme_admin': False, 'unmap': True, 'read': True, 'write_zeroes': True, 'write': True, 'flush': False, 'nvme_io': False }, 'name': '3735a554-0dce-4d13-ba67-597d41186104', 'driver_specific': { 'lvol': { 'base_bdev': 'Malloc0', 'lvol_store_uuid': '58b17014-d4a1-4f85-9761-093643ed18f1', 'thin_provision': False } }, 'block_size': 4096, 'claimed': False, 'aliases': [u'lvs_test/%s' % params['clone_name']], 'product_name': 'Logical Volume', 'uuid': '3735a554-0dce-4d13-ba67-597d41186104' } self.bdevs.append(clone) return json.dumps({"result": [clone]}) def construct_lvol_bdev(self, params=None): lvol_bdev = { "num_blocks": 8192, "uuid": "8dec1964-d533-41df-bea7-40520efdb416", "aliases": [ "lvs_test/%s" % params['lvol_name'] ], "driver_specific": { "lvol": { "base_bdev": "Malloc0", "lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1", "thin_provision": True } }, "supported_io_types": { "reset": True, "nvme_admin": False, "unmap": True, "read": True, "write_zeroes": True, "write": True, "flush": False, "nvme_io": False }, "claimed": False, "block_size": 4096, "product_name": "Logical Volume", "name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967299" } self.bdevs.append(lvol_bdev) return json.dumps({"result": [{}]}) def get_nvmf_subsystems(self, params=None): return json.dumps({"result": self.nvmf_subsystems}) def resize_lvol_bdev(self, params=None): if params: if "name" in params: tmp_bdev = json.loads( self.get_bdevs(params={"name": params['name']}))['result'] if "size" in params: for bdev in self.bdevs: if bdev['name'] == tmp_bdev[0]['name']: bdev['num_blocks'] = params['size'] \ / bdev['block_size'] return json.dumps({"result": {}}) return json.dumps({"error": {}}) def inflate_lvol_bdev(self, params=None): return json.dumps({'result': {}}) def construct_nvmf_subsystem(self, params=None): nvmf_subsystem = { "listen_addresses": [], "subtype": "NVMe", "hosts": [], "namespaces": [{ "bdev_name": "Nvme1n1p0", "nsid": 1, "name": "Nvme1n1p0" }], "allow_any_host": True, "serial_number": params['serial_number'], "nqn": params['nqn'] } self.nvmf_subsystems.append(nvmf_subsystem) return json.dumps({"result": nvmf_subsystem}) def nvmf_subsystem_create(self, params=None): nvmf_subsystem = { "namespaces": [], "nqn": params['nqn'], "serial_number": "S0000000000000000001", "allow_any_host": False, "subtype": "NVMe", "hosts": [], "listen_addresses": [] } self.nvmf_subsystems.append(nvmf_subsystem) return json.dumps({"result": nvmf_subsystem}) def nvmf_subsystem_add_listener(self, params=None): for nvmf_subsystem in self.nvmf_subsystems: if nvmf_subsystem['nqn'] == params['nqn']: nvmf_subsystem['listen_addresses'].append( params['listen_address'] ) return json.dumps({"result": ""}) def nvmf_subsystem_add_ns(self, params=None): for nvmf_subsystem in self.nvmf_subsystems: if nvmf_subsystem['nqn'] == params['nqn']: nvmf_subsystem['namespaces'].append( params['namespace'] ) return json.dumps({"result": ""}) def call(self, method, params=None): req = {} req['jsonrpc'] = '2.0' req['method'] = method req['id'] = 1 if (params): req['params'] = params response = json.loads(self.methods[method](params)) if not response: if method == "kill_instance": return {} msg = "Timeout while waiting for response:" raise JSONRPCException(msg) if 'error' in response: msg = "\n".join(["Got JSON-RPC error response", "request:", json.dumps(req, indent=2), "response:", json.dumps(response['error'], indent=2)]) raise JSONRPCException(msg) return response['result'] class SpdkDriverTestCase(test.TestCase): def setUp(self): super(SpdkDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.target_helper = "" self.configuration.target_ip_address = "192.168.0.1" self.configuration.target_secondary_ip_addresses = [] self.configuration.target_port = 4420 self.configuration.target_prefix = "nqn.2014-08.io.spdk" self.configuration.nvmeof_conn_info_version = 1 self.configuration.nvmet_port_id = "1" self.configuration.nvmet_ns_id = "fake_id" self.configuration.nvmet_subsystem_name = "2014-08.io.spdk" self.configuration.target_protocol = "nvmet_rdma" self.configuration.spdk_rpc_ip = "127.0.0.1" self.configuration.spdk_rpc_port = 8000 self.configuration.spdk_rpc_protocol = "https" mock_safe_get = mock.Mock() mock_safe_get.return_value = 'spdk-nvmeof' self.configuration.safe_get = mock_safe_get self.configuration.lvm_share_target = False self.jsonrpcclient = JSONRPCClient() self.driver = spdk_driver.SPDKDriver(configuration= self.configuration) self._context = context.get_admin_context() self.updated_at = timeutils.utcnow() def test__update_volume_stats(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): self.driver._update_volume_stats() self.assertEqual(1, len(self.driver._stats['pools'])) self.assertEqual("lvs_test", self.driver._stats['pools'][0]['pool_name']) self.assertEqual('SPDK', self.driver._stats['volume_backend_name']) self.assertEqual('Open Source', self.driver._stats['vendor_name']) self.assertEqual('NVMe-oF', self.driver._stats['storage_protocol']) self.assertIsNotNone(self.driver._stats['driver_version']) def test__get_spdk_volume_name(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): bdev = self.driver._get_spdk_volume_name("lvs_test/lvol0") self.assertEqual('58b17014-d4a1-4f85-9761' '-093643ed18f1_4294967297', bdev) bdev = self.driver._get_spdk_volume_name("Nvme1n1") self.assertIsNone(bdev) def test__get_spdk_lvs_uuid(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): bdev = self.driver._rpc_call( "bdev_get_bdevs", params={"name": "lvs_test/lvol0"}) self.assertEqual( bdev[0]['driver_specific']['lvol']['lvol_store_uuid'], self.driver._get_spdk_lvs_uuid( "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297")) self.assertIsNone( self.driver._get_spdk_lvs_uuid("lvs_test/fake")) def test__get_spdk_lvs_free_space(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): lvs = self.driver._rpc_call("bdev_lvol_get_lvstores") lvol_store = None for lvol in lvs: if lvol['name'] == "lvs_test": lvol_store = lvol self.assertIsNotNone(lvol_store) free_size = (lvol_store['free_clusters'] * lvol_store['cluster_size'] / units.Gi) self.assertEqual(free_size, self.driver._get_spdk_lvs_free_space( "58b17014-d4a1-4f85-9761-093643ed18f1")) self.assertEqual(0, self.driver._get_spdk_lvs_free_space("fake")) def test__delete_bdev(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): self.driver._delete_bdev("lvs_test/lvol1") bdev = self.driver._get_spdk_volume_name("lvs_test/lvol1") self.assertIsNone(bdev) self.driver._delete_bdev("lvs_test/lvol1") bdev = self.driver._get_spdk_volume_name("lvs_test/lvol1") self.assertIsNone(bdev) def test__create_volume(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): self.driver._create_volume(Volume()) bdev = self.driver._get_spdk_volume_name("lvs_test/lvol2") self.assertEqual("58b17014-d4a1-4f85-9761" "-093643ed18f1_4294967299", bdev) volume_clone = Volume() volume_clone.name = "clone0" self.driver._rpc_call("bdev_lvol_snapshot", params={'snapshot_name': "snapshot0", 'lvol_name': "lvs_test/lvol2"}) bdev = self.driver._get_spdk_volume_name("lvs_test/snapshot0") self.assertEqual("58b17014-d4a1-4f85-9761-093643ed18f2", bdev) snapshot = Snapshot() self.driver._create_volume(volume_clone, snapshot) bdev = self.driver._get_spdk_volume_name("lvs_test/clone0") self.assertEqual("3735a554-0dce-4d13-ba67-597d41186104", bdev) def test_check_for_setup_error(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): self.driver.check_for_setup_error() @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_create_volume(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) volume_get.return_value = db_volume self.driver.create_volume(db_volume) bdev = self.driver._get_spdk_volume_name("lvs_test/%s" % db_volume.name) self.assertEqual("58b17014-d4a1-4f85-9761" "-093643ed18f1_4294967299", bdev) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_delete_volume(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() with mock.patch.object(self.driver.target_driver, "_rpc_call", self.jsonrpcclient.call): nqn = "nqn.2016-06.io.spdk:cnode%s" \ % self.driver.target_driver._get_first_free_node() db_volume['provider_id'] = nqn ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) volume_get.return_value = db_volume start_bdevs_len = len(self.driver._rpc_call('bdev_get_bdevs')) self.driver.create_volume(db_volume) tmp_bdevs = self.driver._rpc_call('bdev_get_bdevs') self.assertEqual(start_bdevs_len + 1, len(tmp_bdevs)) volume = Volume() volume.name = "lvs_test/%s" % db_volume.name volume_name = self.driver._get_spdk_volume_name(volume.name) self.driver._rpc_call('bdev_lvol_delete', {"name": volume_name}) self.driver.delete_volume(volume) bdev = self.driver._get_spdk_volume_name("lvs_test/%s" % db_volume.name) self.assertIsNone(bdev) tmp_bdevs = self.driver._rpc_call('bdev_get_bdevs') self.assertEqual(start_bdevs_len, len(tmp_bdevs)) def get_volume_stats(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): self.driver.get_volume_stats(True) self.driver.get_volume_stats(False) def test_create_volume_from_snapshot(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): volume_clone = Volume() volume_clone.name = "clone0" self.driver._rpc_call("bdev_lvol_snapshot", params={'snapshot_name': "snapshot0", 'lvol_name': "lvs_test/lvol2"}) snapshot = Snapshot() self.driver.create_volume_from_snapshot(volume_clone, snapshot) bdev = self.driver._get_spdk_volume_name("lvs_test/clone0") self.assertEqual("3735a554-0dce-4d13-ba67-597d41186104", bdev) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_create_snapshot(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['name'] = "lvs_test/lvol0" ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) volume_get.return_value = db_volume snapshot = {} snapshot['volume_id'] = db_volume['id'] snapshot['name'] = "snapshot0" snapshot['volume'] = db_volume for bdev in self.jsonrpcclient.bdevs: if bdev['aliases'][-1] == "lvs_test/lvol0": bdev['aliases'].append(db_volume.name) self.driver.create_snapshot(snapshot) bdev = self.driver._get_spdk_volume_name("lvs_test/snapshot0") self.assertEqual("58b17014-d4a1-4f85-9761-093643ed18f2", bdev) def test_delete_snapshot(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): snapshot = Snapshot() snapshot.name = "snapshot0" self.driver._rpc_call("bdev_lvol_snapshot", params = {'snapshot_name': snapshot.name}) self.driver.delete_snapshot(snapshot) snapshot = self.driver._get_spdk_volume_name("lvs_test/" + snapshot.name) self.assertIsNone(snapshot) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_create_cloned_volume(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['name'] = "lvs_test/lvol0" db_volume['size'] = 1 ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) cloned_volume = Volume() cloned_volume.name = 'lvs_test/cloned_volume' for bdev in self.jsonrpcclient.bdevs: if bdev['aliases'][-1] == "lvs_test/lvol0": bdev['aliases'].append(db_volume.name) self.driver.create_cloned_volume(cloned_volume, db_volume) bdev = self.driver._get_spdk_volume_name("lvs_test/cloned_volume") self.assertEqual("3735a554-0dce-4d13-ba67-597d41186104", bdev) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_copy_image_to_volume(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['provider_location'] = "127.0.0.1:3262 RDMA " \ "2016-06.io.spdk:cnode2" ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) volume_get.return_value = db_volume with mock.patch.object(self.driver.target_driver, "_rpc_call", self.jsonrpcclient.call): self.driver.copy_image_to_volume(ctxt, db_volume, None, None) @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_copy_volume_to_image(self, volume_get): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): provider_location = "127.0.0.1:3262 RDMA 2016-06.io.spdk:cnode2" volume = test_utils.create_volume( self._context, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at, provider_location=provider_location) extra_specs = { 'image_service:store_id': 'fake-store' } test_utils.create_volume_type(self._context.elevated(), id=fake.VOLUME_TYPE_ID, name="test_type", extra_specs=extra_specs) ctxt = context.get_admin_context() volume_get.return_value = volume with mock.patch.object(self.driver.target_driver, "_rpc_call", self.jsonrpcclient.call): self.driver.copy_volume_to_image(ctxt, volume, None, None) def test_extend_volume(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): volume = Volume() volume.name = "lvs_test/lvol0" self.driver.extend_volume(volume, 2) bdev = self.driver._rpc_call("bdev_get_bdevs", params={"name": "lvs_test/lvol0"}) self.assertEqual(2 * units.Gi, bdev[0]['num_blocks'] * bdev[0]['block_size']) def test_ensure_export(self): pass def test_create_export(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['provider_location'] = "192.168.0.1:4420 rdma " \ "2014-08.io.spdk:cnode2" ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) with mock.patch.object(self.driver.target_driver, "_rpc_call", self.jsonrpcclient.call): expected_return = { 'provider_location': self.driver.target_driver.get_nvmeof_location( "nqn.%s:cnode%s" % ( self.configuration.nvmet_subsystem_name, self.driver.target_driver._get_first_free_node() ), [self.configuration.target_ip_address], self.configuration.target_port, "rdma", self.configuration.nvmet_ns_id ), 'provider_auth': '' } export = self.driver.create_export(ctxt, db_volume, None) self.assertEqual(expected_return, export) def test_remove_export(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['provider_location'] = "127.0.0.1:4420 rdma " \ "2016-06.io.spdk:cnode2" ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) with mock.patch.object(self.driver.target_driver, "_rpc_call", self.jsonrpcclient.call): self.driver.create_export(ctxt, db_volume, None) self.assertIsNone(self.driver.remove_export(ctxt, db_volume)) def test_initialize_connection(self): with mock.patch.object(self.driver, "_rpc_call", self.jsonrpcclient.call): db_volume = fake_volume.fake_db_volume() db_volume['provider_location'] = "127.0.0.1:3262 RDMA " \ "2016-06.io.spdk:cnode2 1" ctxt = context.get_admin_context() db_volume = objects.Volume._from_db_object(ctxt, objects.Volume(), db_volume) target_connector = \ connector.InitiatorConnector.factory(initiator.NVME, utils.get_root_helper()) self.driver.initialize_connection(db_volume, target_connector) def test_validate_connector(self): mock_connector = {'nqn': 'fake-nqn'} self.assertTrue(self.driver.validate_connector(mock_connector)) def test_terminate_connection(self): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_storpool.py0000664000175000017500000006200400000000000024735 0ustar00zuulzuul00000000000000# Copyright 2014 - 2017, 2019 StorPool # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import re from unittest import mock import ddt from os_brick.initiator import storpool_utils from os_brick.tests.initiator import test_storpool_utils from oslo_utils import units from cinder import exception from cinder.tests.unit import fake_constants from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers import storpool as driver volume_types = { fake_constants.VOLUME_TYPE_ID: {}, fake_constants.VOLUME_TYPE2_ID: {'storpool_template': 'ssd'}, fake_constants.VOLUME_TYPE3_ID: {'storpool_template': 'hdd'} } volumes = {} snapshots = {} def MockExtraSpecs(vtype): return volume_types[vtype] def mock_volume_types(f): def _types_inner_inner1(inst, *args, **kwargs): @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs', new=MockExtraSpecs) def _types_inner_inner2(): return f(inst, *args, **kwargs) return _types_inner_inner2() return _types_inner_inner1 def volumeName(vid): return 'os--volume-{id}'.format(id=vid) def snapshotName(vtype, vid, more=None): return 'os--{t}--{m}--snapshot-{id}'.format( t=vtype, m="none" if more is None else more, id=vid ) class MockAPI(object): def __init__(self, *args): self._disks = {} for disk_id in [1, 2, 3, 4]: self._disks[disk_id] = { 'id': disk_id, 'generationLeft': -1, 'agCount': 14, 'agFree': 12, 'agAllocated': 1 } self._disks[3]['generationLeft'] = 42 self._templates = [{'name': name} for name in ('ssd', 'hdd')] def disks_list(self): return self._disks def snapshot_create(self, vname, snap): snapshots[snap['name']] = dict(volumes[vname]) def snapshot_update(self, snap, data): sdata = snapshots[snap] sdata.update(data) def snapshot_delete(self, name): del snapshots[name] def volume_create(self, vol): name = vol['name'] if name in volumes: raise storpool_utils.StorPoolAPIError( 'none', {'error': { 'descr': 'volume already exists'}}) data = dict(vol) if 'parent' in vol and 'template' not in vol: sdata = snapshots[vol['parent']] if 'template' in sdata: data['template'] = sdata['template'] if 'baseOn' in vol and 'template' not in vol: vdata = volumes[vol['baseOn']] if 'template' in vdata: data['template'] = vdata['template'] if 'template' not in data: data['template'] = None volumes[name] = data def volume_delete(self, name): del volumes[name] def volumes_list(self): the_volumes = [] for volume in volumes: the_volumes.append({'name': volume}) return the_volumes def volume_templates_list(self): return self._templates def volumes_reassign(self, json): pass def volume_update(self, name, data): if 'size' in data: volumes[name]['size'] = data['size'] if 'rename' in data and data['rename'] != name: new_name = data['rename'] volumes[new_name] = volumes[name] if volumes[new_name]['name'] == name: volumes[new_name]['name'] = new_name del volumes[name] def volume_revert(self, name, data): if name not in volumes: raise storpool_utils.StorPoolAPIError( 'none', {'error': { 'descr': 'No such volume {name}'.format(name=name)}}) snapname = data['toSnapshot'] if snapname not in snapshots: raise storpool_utils.StorPoolAPIError( 'none', {'error': { 'descr': 'No such snapshot {name}'.format(name=snapname)}}) volumes[name] = dict(snapshots[snapname]) class MockVolumeDB(object): """Simulate a Cinder database with a volume_get() method.""" def __init__(self, vol_types=None): """Store the specified volume types mapping if necessary.""" self.vol_types = vol_types if vol_types is not None else {} def volume_get(self, _context, vid): """Get a volume-like structure, only the fields we care about.""" # Still, try to at least make sure we know about that volume return { 'id': vid, 'size': volumes[volumeName(vid)]['size'], 'volume_type': self.vol_types.get(vid), } def MockSPConfig(section = 's01'): res = {} m = re.match('^s0*([A-Za-z0-9]+)$', section) if m: res['SP_OURID'] = m.group(1) return res @ddt.ddt @mock.patch('os_brick.initiator.storpool_utils.get_conf', MockSPConfig) class StorPoolTestCase(test.TestCase): def setUp(self): super(StorPoolTestCase, self).setUp() self.cfg = mock.Mock(spec=conf.Configuration) self.cfg.volume_backend_name = 'storpool_test' self.cfg.storpool_template = None self.cfg.storpool_replication = 3 mock_exec = mock.Mock() mock_exec.return_value = ('', '') self.driver = driver.StorPoolDriver(execute=mock_exec, configuration=self.cfg) with ( mock.patch( 'os_brick.initiator.storpool_utils.get_conf' ) as get_conf, mock.patch( 'os_brick.initiator.storpool_utils.StorPoolAPI', MockAPI) ): get_conf.return_value = test_storpool_utils.SP_CONF self.driver.check_for_setup_error() @ddt.data( (5, TypeError), ({'no-host': None}, KeyError), ({'host': 'sbad'}, driver.StorPoolConfigurationInvalid), ({'host': 's01'}, None), ({'host': 'none'}, None), ) @ddt.unpack def test_validate_connector(self, conn, exc): if exc is None: self.assertTrue(self.driver.validate_connector(conn)) else: self.assertRaises(exc, self.driver.validate_connector, conn) @ddt.data( (5, TypeError), ({'no-host': None}, KeyError), ({'host': 'sbad'}, driver.StorPoolConfigurationInvalid), ) @ddt.unpack def test_initialize_connection_bad(self, conn, exc): self.assertRaises(exc, self.driver.initialize_connection, None, conn) @ddt.data( (1, '42', 's01'), (2, '616', 's02'), (65, '1610', 'none'), ) @ddt.unpack def test_initialize_connection_good(self, cid, hid, name): c = self.driver.initialize_connection({'id': hid}, {'host': name}) self.assertEqual('storpool', c['driver_volume_type']) self.assertDictEqual({'client_id': cid, 'volume': hid, 'access_mode': 'rw'}, c['data']) def test_noop_functions(self): self.driver.terminate_connection(None, None) self.driver.create_export(None, None, {}) self.driver.remove_export(None, None) def test_stats(self): stats = self.driver.get_volume_stats(refresh=True) self.assertEqual('StorPool', stats['vendor_name']) self.assertEqual('storpool', stats['storage_protocol']) self.assertListEqual(['default', 'template_hdd', 'template_ssd'], sorted([p['pool_name'] for p in stats['pools']])) r = re.compile(r'^template_([A-Za-z0-9_]+)$') for pool in stats['pools']: self.assertEqual(21, pool['total_capacity_gb']) self.assertEqual(5, int(pool['free_capacity_gb'])) self.assertTrue(pool['multiattach']) self.assertFalse(pool['QoS_support']) self.assertFalse(pool['thick_provisioning_support']) self.assertTrue(pool['thin_provisioning_support']) if pool['pool_name'] != 'default': m = r.match(pool['pool_name']) self.assertIsNotNone(m) self.assertIsNotNone(m.group(1)) self.assertEqual(m.group(1), pool['storpool_template']) def assertVolumeNames(self, names): self.assertListEqual(sorted([volumeName(n) for n in names]), sorted(volumes.keys())) self.assertListEqual(sorted([volumeName(n) for n in names]), sorted(data['name'] for data in volumes.values())) def assertSnapshotNames(self, specs): self.assertListEqual( sorted(snapshotName(spec[0], spec[1]) for spec in specs), sorted(snapshots.keys())) @mock_volume_types def test_create_delete_volume(self): self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.assertCountEqual([volumeName('1')], volumes.keys()) self.assertVolumeNames(('1',)) v = volumes[volumeName('1')] self.assertEqual(1 * units.Gi, v['size']) self.assertIsNone(v['template']) self.assertEqual(3, v['replication']) caught = False try: self.driver.create_volume( {'id': '1', 'name': 'v1', 'size': 0, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) except exception.VolumeBackendAPIException: caught = True self.assertTrue(caught) self.driver.delete_volume({'id': '1'}) self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.driver.create_volume( {'id': '1', 'name': 'v1', 'size': 2, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.assertVolumeNames(('1',)) v = volumes[volumeName('1')] self.assertEqual(2 * units.Gi, v['size']) self.assertIsNone(v['template']) self.assertEqual(3, v['replication']) self.driver.create_volume({'id': '2', 'name': 'v2', 'size': 3, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.assertVolumeNames(('1', '2')) v = volumes[volumeName('2')] self.assertEqual(3 * units.Gi, v['size']) self.assertIsNone(v['template']) self.assertEqual(3, v['replication']) self.driver.create_volume( {'id': '3', 'name': 'v2', 'size': 4, 'volume_type': {'id': fake_constants.VOLUME_TYPE2_ID}}) self.assertVolumeNames(('1', '2', '3')) v = volumes[volumeName('3')] self.assertEqual(4 * units.Gi, v['size']) self.assertEqual('ssd', v['template']) self.assertNotIn('replication', v.keys()) self.driver.create_volume( {'id': '4', 'name': 'v2', 'size': 5, 'volume_type': {'id': fake_constants.VOLUME_TYPE3_ID}}) self.assertVolumeNames(('1', '2', '3', '4')) v = volumes[volumeName('4')] self.assertEqual(5 * units.Gi, v['size']) self.assertEqual('hdd', v['template']) self.assertNotIn('replication', v.keys()) # Make sure the dictionary is not corrupted somehow... v = volumes[volumeName('1')] self.assertEqual(2 * units.Gi, v['size']) self.assertIsNone(v['template']) self.assertEqual(3, v['replication']) for vid in ('1', '2', '3', '4'): self.driver.delete_volume({'id': vid}) self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) @mock_volume_types def test_update_migrated_volume(self): self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) # Create two volumes self.driver.create_volume( {'id': '1', 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.driver.create_volume( {'id': '2', 'name': 'v2', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.assertCountEqual([volumeName('1'), volumeName('2')], volumes.keys()) self.assertVolumeNames(('1', '2',)) # Failure: the "migrated" volume does not even exist res = self.driver.update_migrated_volume(None, {'id': '1'}, {'id': '3', '_name_id': '1'}, 'available') self.assertDictEqual({'_name_id': '1'}, res) # Success: rename the migrated volume to match the original res = self.driver.update_migrated_volume(None, {'id': '3'}, {'id': '2', '_name_id': '3'}, 'available') self.assertDictEqual({'_name_id': None}, res) self.assertCountEqual([volumeName('1'), volumeName('3')], volumes.keys()) self.assertVolumeNames(('1', '3',)) # Success: swap volume names with an existing volume res = self.driver.update_migrated_volume(None, {'id': '1'}, {'id': '3', '_name_id': '1'}, 'available') self.assertDictEqual({'_name_id': None}, res) self.assertCountEqual([volumeName('1'), volumeName('3')], volumes.keys()) self.assertVolumeNames(('1', '3',)) for vid in ('1', '3'): self.driver.delete_volume({'id': vid}) self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) @mock_volume_types def test_clone_extend_volume(self): self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) self.driver.create_volume( {'id': '1', 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.assertVolumeNames(('1',)) self.driver.extend_volume({'id': '1'}, 2) self.assertEqual(2 * units.Gi, volumes[volumeName('1')]['size']) with mock.patch.object(self.driver, 'db', new=MockVolumeDB()): self.driver.create_cloned_volume( { 'id': '2', 'name': 'clo', 'size': 3, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID} }, {'id': 1}) self.assertVolumeNames(('1', '2')) self.assertDictEqual({}, snapshots) # We do not provide a StorPool template name in either of the volumes' # types, so create_cloned_volume() should take the baseOn shortcut. vol2 = volumes[volumeName('2')] self.assertEqual(vol2['baseOn'], volumeName('1')) self.assertNotIn('parent', vol2) self.driver.delete_volume({'id': 1}) self.driver.delete_volume({'id': 2}) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) @ddt.data(*itertools.product( [{'id': key} for key in sorted(volume_types.keys())], [{'id': key} for key in sorted(volume_types.keys())])) @ddt.unpack @mock_volume_types def test_create_cloned_volume(self, src_type, dst_type): self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) src_template = volume_types[src_type['id']].get('storpool_template') dst_template = volume_types[dst_type['id']].get('storpool_template') src_name = 's-none' if src_template is None else 's-' + src_template dst_name = 'd-none' if dst_template is None else 'd-' + dst_template snap_name = snapshotName('clone', '2') vdata1 = { 'id': '1', 'name': src_name, 'size': 1, 'volume_type': src_type, } self.assertEqual( self.driver._template_from_volume(vdata1), src_template) self.driver.create_volume(vdata1) self.assertVolumeNames(('1',)) vdata2 = { 'id': 2, 'name': dst_name, 'size': 1, 'volume_type': dst_type, } self.assertEqual( self.driver._template_from_volume(vdata2), dst_template) with mock.patch.object(self.driver, 'db', new=MockVolumeDB(vol_types={'1': src_type})): self.driver.create_cloned_volume(vdata2, {'id': '1'}) self.assertVolumeNames(('1', '2')) vol2 = volumes[volumeName('2')] self.assertEqual(vol2['template'], dst_template) if src_template == dst_template: self.assertEqual(vol2['baseOn'], volumeName('1')) self.assertNotIn('parent', vol2) self.assertDictEqual({}, snapshots) else: self.assertNotIn('baseOn', vol2) self.assertEqual(vol2['parent'], snap_name) self.assertSnapshotNames((('clone', '2'),)) self.assertEqual(snapshots[snap_name]['template'], dst_template) self.driver.delete_volume({'id': '1'}) self.driver.delete_volume({'id': '2'}) if src_template != dst_template: del snapshots[snap_name] self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) @mock_volume_types def test_config_replication(self): self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) save_repl = self.driver.configuration.storpool_replication self.driver.configuration.storpool_replication = 3 stats = self.driver.get_volume_stats(refresh=True) pool = stats['pools'][0] self.assertEqual(21, pool['total_capacity_gb']) self.assertEqual(5, int(pool['free_capacity_gb'])) self.driver.create_volume( {'id': 'cfgrepl1', 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.assertVolumeNames(('cfgrepl1',)) v = volumes[volumeName('cfgrepl1')] self.assertEqual(3, v['replication']) self.assertIsNone(v['template']) self.driver.delete_volume({'id': 'cfgrepl1'}) self.driver.configuration.storpool_replication = 2 stats = self.driver.get_volume_stats(refresh=True) pool = stats['pools'][0] self.assertEqual(21, pool['total_capacity_gb']) self.assertEqual(8, int(pool['free_capacity_gb'])) self.driver.create_volume( {'id': 'cfgrepl2', 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.assertVolumeNames(('cfgrepl2',)) v = volumes[volumeName('cfgrepl2')] self.assertEqual(2, v['replication']) self.assertIsNone(v['template']) self.driver.delete_volume({'id': 'cfgrepl2'}) self.driver.create_volume( {'id': 'cfgrepl3', 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE2_ID}}) self.assertVolumeNames(('cfgrepl3',)) v = volumes[volumeName('cfgrepl3')] self.assertNotIn('replication', v) self.assertEqual('ssd', v['template']) self.driver.delete_volume({'id': 'cfgrepl3'}) self.driver.configuration.storpool_replication = save_repl self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) @mock_volume_types def test_config_template(self): self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) save_template = self.driver.configuration.storpool_template self.driver.configuration.storpool_template = None self.driver.create_volume( {'id': 'cfgtempl1', 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.assertVolumeNames(('cfgtempl1',)) v = volumes[volumeName('cfgtempl1')] self.assertEqual(3, v['replication']) self.assertIsNone(v['template']) self.driver.delete_volume({'id': 'cfgtempl1'}) self.driver.create_volume( {'id': 'cfgtempl2', 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE2_ID}}) self.assertVolumeNames(('cfgtempl2',)) v = volumes[volumeName('cfgtempl2')] self.assertNotIn('replication', v) self.assertEqual('ssd', v['template']) self.driver.delete_volume({'id': 'cfgtempl2'}) self.driver.configuration.storpool_template = 'hdd' self.driver.create_volume( {'id': 'cfgtempl3', 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.assertVolumeNames(('cfgtempl3',)) v = volumes[volumeName('cfgtempl3')] self.assertNotIn('replication', v) self.assertEqual('hdd', v['template']) self.driver.delete_volume({'id': 'cfgtempl3'}) self.driver.create_volume( {'id': 'cfgtempl4', 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE2_ID}}) self.assertVolumeNames(('cfgtempl4',)) v = volumes[volumeName('cfgtempl4')] self.assertNotIn('replication', v) self.assertEqual('ssd', v['template']) self.driver.delete_volume({'id': 'cfgtempl4'}) self.driver.configuration.storpool_template = save_template self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) @ddt.data( # No volume type at all: 'default' ('default', None), # No storpool_template in the type extra specs: 'default' ('default', {'id': fake_constants.VOLUME_TYPE_ID}), # An actual template specified: 'template_*' ('template_ssd', {'id': fake_constants.VOLUME_TYPE2_ID}), ('template_hdd', {'id': fake_constants.VOLUME_TYPE3_ID}), ) @ddt.unpack @mock_volume_types def test_get_pool(self, pool, volume_type): self.assertEqual(pool, self.driver.get_pool({ 'volume_type': volume_type })) @mock_volume_types def test_volume_revert(self): vol_id = 'rev1' vol_name = volumeName(vol_id) snap_id = 'rev-s1' snap_name = snapshotName('snap', snap_id) self.assertVolumeNames([]) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) self.driver.create_volume( {'id': vol_id, 'name': 'v1', 'size': 1, 'volume_type': {'id': fake_constants.VOLUME_TYPE_ID}}) self.assertVolumeNames((vol_id,)) self.assertDictEqual({}, snapshots) self.driver.create_snapshot({'id': snap_id, 'volume_id': vol_id}) self.assertVolumeNames((vol_id,)) self.assertListEqual([snap_name], sorted(snapshots.keys())) self.assertDictEqual(volumes[vol_name], snapshots[snap_name]) self.assertIsNot(volumes[vol_name], snapshots[snap_name]) self.driver.extend_volume({'id': vol_id}, 2) self.assertVolumeNames((vol_id,)) self.assertNotEqual(volumes[vol_name], snapshots[snap_name]) self.driver.revert_to_snapshot(None, {'id': vol_id}, {'id': snap_id}) self.assertVolumeNames((vol_id,)) self.assertDictEqual(volumes[vol_name], snapshots[snap_name]) self.assertIsNot(volumes[vol_name], snapshots[snap_name]) self.driver.delete_snapshot({'id': snap_id}) self.assertVolumeNames((vol_id,)) self.assertDictEqual({}, snapshots) self.assertRaisesRegex(exception.VolumeBackendAPIException, 'No such snapshot', self.driver.revert_to_snapshot, None, {'id': vol_id}, {'id': snap_id}) self.driver.delete_volume({'id': vol_id}) self.assertDictEqual({}, volumes) self.assertDictEqual({}, snapshots) self.assertRaisesRegex(exception.VolumeBackendAPIException, 'No such volume', self.driver.revert_to_snapshot, None, {'id': vol_id}, {'id': snap_id}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_veritas_cnfs.py0000664000175000017500000002045500000000000025546 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Veritas Technologies LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest import mock from cinder import context from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers import veritas_cnfs as cnfs class VeritasCNFSDriverTestCase(test.TestCase): """Test case for VeritasCNFS driver.""" TEST_CNFS_SHARE = 'cnfs-host1:/share' TEST_VOL_NM = 'volume-a6707cd3-348c-45cd-9524-255be0939b60' TEST_SNAP_NM = 'snapshot-73368c68-1c0b-4027-ba8a-14629918945e' TEST_VOL_SIZE = 1 TEST_MNT_BASE = '/cnfs/share' TEST_LOCAL_PATH = '/cnfs/share/mnt' TEST_VOL_LOCAL_PATH = TEST_LOCAL_PATH + '/' + TEST_VOL_NM TEST_SNAP_LOCAL_PATH = TEST_LOCAL_PATH + '/' + TEST_SNAP_NM TEST_SPL_SNAP_LOCAL_PATH = TEST_SNAP_LOCAL_PATH + "::snap:vxfs:" TEST_NFS_SHARES_CONFIG = '/etc/cinder/access_nfs_share' TEST_NFS_MOUNT_OPTIONS_FAIL_NONE = '' TEST_NFS_MOUNT_OPTIONS_FAIL_V4 = 'nfsvers=4' TEST_NFS_MOUNT_OPTIONS_FAIL_V2 = 'nfsvers=2' TEST_NFS_MOUNT_OPTIONS_PASS_V3 = 'nfsvers=3' TEST_VOL_ID = 'a6707cd3-348c-45cd-9524-255be0939b60' SNAPSHOT_ID = '73368c68-1c0b-4027-ba8a-14629918945e' def setUp(self): super(VeritasCNFSDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.nfs_shares_config = self.TEST_NFS_SHARES_CONFIG self.configuration.nfs_sparsed_volumes = True self.configuration.nfs_mount_point_base = self.TEST_MNT_BASE self.configuration.nfs_mount_options = (self. TEST_NFS_MOUNT_OPTIONS_PASS_V3) self.configuration.reserved_percentage = 0 self.configuration.max_over_subscription_ratio = 20.0 self.configuration.nas_secure_file_permissions = 'false' self.configuration.nas_secure_file_operations = 'false' self._loc = 'localhost:/share' self.context = context.get_admin_context() self.driver = cnfs.VeritasCNFSDriver(configuration=self.configuration) def test_throw_error_if_nfs_mount_options_not_configured(self): """Fail if no nfs mount options are configured""" drv = self.driver none_opts = self.TEST_NFS_MOUNT_OPTIONS_FAIL_NONE self.configuration.nfs_mount_options = none_opts self.assertRaises( exception.NfsException, drv.do_setup, context.RequestContext) def test_throw_error_if_nfs_mount_options_configured_with_NFSV2(self): """Fail if nfs mount options is not nfsv4 """ drv = self.driver nfs_v2_opts = self.TEST_NFS_MOUNT_OPTIONS_FAIL_V2 self.configuration.nfs_mount_options = nfs_v2_opts self.assertRaises( exception.NfsException, drv.do_setup, context.RequestContext) def test_throw_error_if_nfs_mount_options_configured_with_NFSV4(self): """Fail if nfs mount options is not nfsv4 """ drv = self.driver nfs_v4_opts = self.TEST_NFS_MOUNT_OPTIONS_FAIL_V4 self.configuration.nfs_mount_options = nfs_v4_opts self.assertRaises( exception.NfsException, drv.do_setup, context.RequestContext) @mock.patch.object(cnfs.VeritasCNFSDriver, '_get_local_volume_path') @mock.patch.object(os.path, 'exists') def test_do_clone_volume_success(self, m_exists, m_get_local_volume_path): """test _do_clone_volume() when filesnap over nfs is supported""" drv = self.driver volume = fake_volume.fake_volume_obj(self.context, provider_location=self._loc) snapshot = fake_volume.fake_volume_obj(self.context) with mock.patch('cinder.privsep.path.symlink'): m_exists.return_value = True drv._do_clone_volume(volume, volume.name, snapshot) @mock.patch.object(cnfs.VeritasCNFSDriver, '_get_local_volume_path') @mock.patch.object(os.path, 'exists') @mock.patch('cinder.privsep.path.symlink') def test_do_clone_volume_fail( self, m_symlink, m_exists, m_get_local_volume_path): """test _do_clone_volume() when filesnap over nfs is supported""" drv = self.driver volume = fake_volume.fake_volume_obj(self.context) snapshot = fake_volume.fake_volume_obj(self.context) with mock.patch.object(drv, '_execute'): m_exists.return_value = False self.assertRaises(exception.NfsException, drv._do_clone_volume, volume, volume.name, snapshot) def assign_provider_loc(self, src_vol, tgt_vol): tgt_vol.provider_location = src_vol.provider_location @mock.patch.object(cnfs.VeritasCNFSDriver, '_do_clone_volume') def test_create_volume_from_snapshot(self, m_do_clone_volume): """test create volume from snapshot""" drv = self.driver volume = fake_volume.fake_volume_obj(self.context) snapshot = fake_volume.fake_volume_obj(self.context, provider_location=self._loc) volume.size = 10 snapshot.volume_size = 10 m_do_clone_volume(snapshot, snapshot.name, volume).return_value = True drv.create_volume_from_snapshot(volume, snapshot) self.assertEqual(volume.provider_location, snapshot.provider_location) @mock.patch.object(cnfs.VeritasCNFSDriver, '_get_vol_by_id') @mock.patch.object(cnfs.VeritasCNFSDriver, '_do_clone_volume') def test_create_snapshot(self, m_do_clone_volume, m_get_vol_by_id): """test create snapshot""" drv = self.driver volume = fake_volume.fake_volume_obj(context.get_admin_context(), provider_location=self._loc) snapshot = fake_snapshot.fake_snapshot_obj(context.get_admin_context()) snapshot.volume = volume m_get_vol_by_id.return_value = volume m_do_clone_volume(snapshot, snapshot.name, volume).return_value = True drv.create_snapshot(snapshot) self.assertEqual(volume.provider_location, snapshot.provider_location) @mock.patch.object(cnfs.VeritasCNFSDriver, '_ensure_share_mounted') @mock.patch.object(cnfs.VeritasCNFSDriver, 'local_path') def test_delete_snapshot(self, m_local_path, m_ensure_share_mounted): """test delete snapshot""" drv = self.driver snapshot = fake_snapshot.fake_snapshot_obj(context.get_admin_context(), provider_location=self._loc) m_ensure_share_mounted(self._loc).AndReturn(None) m_local_path(snapshot).AndReturn(self.TEST_SNAP_LOCAL_PATH) with mock.patch.object(drv, '_execute'): drv.delete_snapshot(snapshot) @mock.patch.object(cnfs.VeritasCNFSDriver, '_do_clone_volume') @mock.patch.object(cnfs.VeritasCNFSDriver, 'local_path') def test_create_volume_from_snapshot_greater_size(self, m_local_path, m_do_clone_volume): """test create volume from snapshot with greater volume size""" drv = self.driver volume = fake_volume.fake_volume_obj(self.context) snapshot = fake_volume.fake_volume_obj(self.context, provider_location=self._loc) volume.size = 20 snapshot.volume_size = 10 m_do_clone_volume(snapshot, snapshot.name, volume).return_value = True m_local_path(volume).AndReturn(self.TEST_VOL_LOCAL_PATH) with mock.patch.object(drv, '_execute'): drv.create_volume_from_snapshot(volume, snapshot) self.assertEqual(volume.provider_location, snapshot.provider_location) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_vzstorage.py0000664000175000017500000004757100000000000025114 0ustar00zuulzuul00000000000000# Copyright 2015 Odin # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import errno import os from unittest import mock import ddt from os_brick.remotefs import remotefs from oslo_utils import units from cinder import context from cinder import exception from cinder.image import image_utils from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume.drivers import vzstorage _orig_path_exists = os.path.exists @ddt.ddt class VZStorageTestCase(test.TestCase): _FAKE_SHARE = "10.0.0.1,10.0.0.2:/cluster123:123123" _FAKE_MNT_BASE = '/mnt' _FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, 'fake_hash') _FAKE_VOLUME_NAME = 'volume-4f711859-4928-4cb7-801a-a50c37ceaccc' _FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME) _FAKE_SNAPSHOT_ID = '50811859-4928-4cb7-801a-a50c37ceacba' _FAKE_SNAPSHOT_PATH = ( _FAKE_VOLUME_PATH + '-snapshot' + _FAKE_SNAPSHOT_ID) def setUp(self): super(VZStorageTestCase, self).setUp() self._cfg = mock.MagicMock() self._cfg.vzstorage_shares_config = '/fake/config/path' self._cfg.vzstorage_sparsed_volumes = False self._cfg.vzstorage_used_ratio = 0.7 self._cfg.vzstorage_mount_point_base = self._FAKE_MNT_BASE self._cfg.vzstorage_default_volume_format = 'raw' self._cfg.nas_secure_file_operations = 'auto' self._cfg.nas_secure_file_permissions = 'auto' self._vz_driver = vzstorage.VZStorageDriver(configuration=self._cfg) self._vz_driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._vz_driver._execute = mock.Mock() self._vz_driver.base = self._FAKE_MNT_BASE self.context = context.get_admin_context() vol_type = fake_volume.fake_volume_type_obj(self.context) vol_type.extra_specs = {} _FAKE_VOLUME = {'id': '4f711859-4928-4cb7-801a-a50c37ceaccc', 'size': 1, 'provider_location': self._FAKE_SHARE, 'name': self._FAKE_VOLUME_NAME, 'status': 'available'} self.vol = fake_volume.fake_volume_obj(self.context, volume_type_id=vol_type.id, **_FAKE_VOLUME) self.vol.volume_type = vol_type _FAKE_SNAPSHOT = {'id': self._FAKE_SNAPSHOT_ID, 'status': 'available', 'volume_size': 1} self.snap = fake_snapshot.fake_snapshot_obj(self.context, **_FAKE_SNAPSHOT) self.snap.volume = self.vol def _path_exists(self, path): if path.startswith(self._cfg.vzstorage_shares_config): return True return _orig_path_exists(path) def _path_dont_exists(self, path): if path.startswith('/fake'): return False return _orig_path_exists(path) @mock.patch('os.path.exists') def test_setup_ok(self, mock_exists): mock_exists.side_effect = self._path_exists self._vz_driver.do_setup(mock.sentinel.context) @mock.patch('os.path.exists') def test_setup_missing_shares_conf(self, mock_exists): mock_exists.side_effect = self._path_dont_exists self.assertRaises(vzstorage.VzStorageException, self._vz_driver.do_setup, mock.sentinel.context) @mock.patch('os.path.exists') def test_setup_invalid_usage_ratio(self, mock_exists): mock_exists.side_effect = self._path_exists self._vz_driver.configuration.vzstorage_used_ratio = 1.2 self.assertRaises(vzstorage.VzStorageException, self._vz_driver.do_setup, mock.sentinel.context) @mock.patch('os.path.exists') def test_setup_invalid_usage_ratio2(self, mock_exists): mock_exists.side_effect = self._path_exists self._vz_driver.configuration.vzstorage_used_ratio = 0 self.assertRaises(vzstorage.VzStorageException, self._vz_driver.do_setup, mock.sentinel.context) @mock.patch('os.path.exists') def test_setup_invalid_mount_point_base(self, mock_exists): mock_exists.side_effect = self._path_exists self._cfg.vzstorage_mount_point_base = './tmp' vz_driver = vzstorage.VZStorageDriver(configuration=self._cfg) self.assertRaises(vzstorage.VzStorageException, vz_driver.do_setup, mock.sentinel.context) @mock.patch('os.path.exists') def test_setup_no_vzstorage(self, mock_exists): mock_exists.side_effect = self._path_exists exc = OSError() exc.errno = errno.ENOENT self._vz_driver._execute.side_effect = exc self.assertRaises(vzstorage.VzStorageException, self._vz_driver.do_setup, mock.sentinel.context) @ddt.data({'qemu_fmt': 'parallels', 'glance_fmt': 'ploop'}, {'qemu_fmt': 'qcow2', 'glance_fmt': 'qcow2'}) @ddt.unpack def test_initialize_connection(self, qemu_fmt, glance_fmt): drv = self._vz_driver info = mock.Mock() info.file_format = qemu_fmt snap_info = """{"volume_format": "%s", "active": "%s"}""" % (qemu_fmt, self.vol.id) with mock.patch.object(drv, '_qemu_img_info', return_value=info): with mock.patch.object(drv, '_read_file', return_value=snap_info): ret = drv.initialize_connection(self.vol, None) name = drv.get_active_image_from_info(self.vol) expected = {'driver_volume_type': 'vzstorage', 'data': {'export': self._FAKE_SHARE, 'format': glance_fmt, 'name': name}, 'mount_point_base': self._FAKE_MNT_BASE} self.assertEqual(expected, ret) def test_ensure_share_mounted_invalid_share(self): self.assertRaises(vzstorage.VzStorageException, self._vz_driver._ensure_share_mounted, ':') @mock.patch.object(remotefs.RemoteFsClient, 'mount') def test_ensure_share_mounted(self, mock_mount): drv = self._vz_driver share = 'test' expected_calls = [ mock.call(share, ['-u', 'cinder', '-g', 'root', '-l', '/var/log/vstorage/%s/cinder.log.gz' % share]), mock.call(share, ['-l', '/var/log/dummy.log']) ] share_flags = '["-u", "cinder", "-g", "root"]' drv.shares[share] = share_flags drv._ensure_share_mounted(share) share_flags = '["-l", "/var/log/dummy.log"]' drv.shares[share] = share_flags drv._ensure_share_mounted(share) mock_mount.assert_has_calls(expected_calls) def test_find_share(self): drv = self._vz_driver drv._mounted_shares = [self._FAKE_SHARE] with mock.patch.object(drv, '_is_share_eligible', return_value=True): ret = drv._find_share(self.vol) self.assertEqual(self._FAKE_SHARE, ret) def test_find_share_no_shares_mounted(self): drv = self._vz_driver with mock.patch.object(drv, '_is_share_eligible', return_value=True): self.assertRaises(vzstorage.VzStorageNoSharesMounted, drv._find_share, self.vol) def test_find_share_no_shares_suitable(self): drv = self._vz_driver drv._mounted_shares = [self._FAKE_SHARE] with mock.patch.object(drv, '_is_share_eligible', return_value=False): self.assertRaises(vzstorage.VzStorageNoSuitableShareFound, drv._find_share, self.vol) def test_is_share_eligible_false(self): drv = self._vz_driver cap_info = (100 * units.Gi, 40 * units.Gi, 60 * units.Gi) with mock.patch.object(drv, '_get_capacity_info', return_value=cap_info): ret = drv._is_share_eligible(self._FAKE_SHARE, 50) self.assertFalse(ret) def test_is_share_eligible_true(self): drv = self._vz_driver cap_info = (100 * units.Gi, 40 * units.Gi, 60 * units.Gi) with mock.patch.object(drv, '_get_capacity_info', return_value=cap_info): ret = drv._is_share_eligible(self._FAKE_SHARE, 30) self.assertTrue(ret) @mock.patch.object(image_utils, 'resize_image') def test_extend_volume(self, mock_resize_image): drv = self._vz_driver drv._check_extend_volume_support = mock.Mock(return_value=True) drv._is_file_size_equal = mock.Mock(return_value=True) snap_info = '{"active": "%s"}' % self.vol.id with mock.patch.object(drv, 'get_volume_format', return_value="raw"): with mock.patch.object(drv, 'get_active_image_from_info', return_value=self._FAKE_VOLUME_PATH): with mock.patch.object(drv, '_read_file', return_value=snap_info): drv.extend_volume(self.vol, 10) mock_resize_image.assert_called_once_with(self._FAKE_VOLUME_PATH, 10) def _test_check_extend_support(self, is_eligible=True): drv = self._vz_driver drv.local_path = mock.Mock(return_value=self._FAKE_VOLUME_PATH) drv._is_share_eligible = mock.Mock(return_value=is_eligible) active = self._FAKE_VOLUME_PATH drv.get_active_image_from_info = mock.Mock(return_value=active) if not is_eligible: self.assertRaises(exception.ExtendVolumeError, drv._check_extend_volume_support, self.vol, 2) else: drv._check_extend_volume_support(self.vol, 2) drv._is_share_eligible.assert_called_once_with(self._FAKE_SHARE, 1) def test_check_extend_support(self): self._test_check_extend_support() def test_check_extend_volume_uneligible_share(self): self._test_check_extend_support(is_eligible=False) @mock.patch.object(image_utils, 'convert_image') def test_copy_volume_from_snapshot(self, mock_convert_image): drv = self._vz_driver fake_volume_info = {self._FAKE_SNAPSHOT_ID: 'fake_snapshot_file_name', 'backing-files': {self._FAKE_SNAPSHOT_ID: self._FAKE_VOLUME_NAME}} fake_img_info = mock.MagicMock() fake_img_info.backing_file = self._FAKE_VOLUME_NAME drv.get_volume_format = mock.Mock(return_value='raw') drv._local_path_volume_info = mock.Mock( return_value=self._FAKE_VOLUME_PATH + '.info') drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv._read_info_file = mock.Mock( return_value=fake_volume_info) drv._qemu_img_info = mock.Mock( return_value=fake_img_info) drv.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH[:-1]) drv._extend_volume = mock.Mock() drv._copy_volume_from_snapshot( self.snap, self.vol, self.vol['size']) drv._extend_volume.assert_called_once_with( self.vol, self.vol['size'], 'raw') mock_convert_image.assert_called_once_with( self._FAKE_VOLUME_PATH, self._FAKE_VOLUME_PATH[:-1], 'raw') def test_delete_volume(self): drv = self._vz_driver fake_vol_info = self._FAKE_VOLUME_PATH + '.info' drv._ensure_share_mounted = mock.MagicMock() fake_ensure_mounted = drv._ensure_share_mounted drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv.get_active_image_from_info = mock.Mock( return_value=self._FAKE_VOLUME_NAME) drv._delete = mock.Mock() drv._local_path_volume_info = mock.Mock( return_value=fake_vol_info) with mock.patch('os.path.exists', lambda x: True): drv.delete_volume(self.vol) fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE) drv._delete.assert_any_call( self._FAKE_VOLUME_PATH) drv._delete.assert_any_call(fake_vol_info) @mock.patch('cinder.volume.drivers.remotefs.RemoteFSSnapDriverBase.' '_write_info_file') def test_delete_snapshot_ploop(self, _mock_write_info_file): fake_snap_info = { 'active': self._FAKE_VOLUME_NAME, self._FAKE_SNAPSHOT_ID: self._FAKE_SNAPSHOT_PATH, } self._vz_driver.get_volume_format = mock.Mock( return_value=vzstorage.DISK_FORMAT_PLOOP) self._vz_driver._read_info_file = mock.Mock( return_value=fake_snap_info ) self._vz_driver._get_desc_path = mock.Mock( return_value='%s/DiskDescriptor.xml' % self._FAKE_VOLUME_PATH ) self._vz_driver.delete_snapshot(self.snap) self._vz_driver._execute.assert_called_once_with( 'ploop', 'snapshot-delete', '-u', '{%s}' % self._FAKE_SNAPSHOT_ID, '%s/DiskDescriptor.xml' % self._FAKE_VOLUME_PATH, run_as_root=True ) @mock.patch('cinder.volume.drivers.remotefs.RemoteFSSnapDriverBase.' '_delete_snapshot') def test_delete_snapshot_qcow2_invalid_snap_info(self, mock_delete_snapshot): fake_snap_info = { 'active': self._FAKE_VOLUME_NAME, } self._vz_driver.get_volume_format = mock.Mock( return_value=vzstorage.DISK_FORMAT_QCOW2) self._vz_driver._read_info_file = mock.Mock( return_value=fake_snap_info ) self._vz_driver.delete_snapshot(self.snap) self.assertFalse(mock_delete_snapshot.called) def test_extend_volume_ploop(self): drv = self._vz_driver drv.get_active_image_from_info = mock.Mock( return_value=self._FAKE_VOLUME_PATH) drv.get_volume_format = mock.Mock( return_value=vzstorage.DISK_FORMAT_PLOOP) drv._is_share_eligible = mock.Mock( return_value=True) drv.extend_volume(self.vol, 100) drv._execute.assert_called_once_with( 'ploop', 'resize', '-s', '100G', '%s/DiskDescriptor.xml' % self._FAKE_VOLUME_PATH, run_as_root=True) @mock.patch.object(os.path, 'exists', return_value=False) def test_do_create_volume_with_volume_type(self, mock_exists): drv = self._vz_driver drv.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH) drv._write_info_file = mock.Mock() drv._qemu_img_info = mock.Mock() drv._create_qcow2_file = mock.Mock() drv._create_ploop = mock.Mock() volume_type = fake_volume.fake_volume_type_obj(self.context) volume_type.extra_specs = { 'vz:volume_format': 'qcow2' } volume1 = fake_volume.fake_volume_obj(self.context) volume1.size = 1024 volume1.volume_type = volume_type volume2 = copy.deepcopy(volume1) volume2.metadata = { 'volume_format': 'ploop' } drv._do_create_volume(volume1) drv._create_qcow2_file.assert_called_once_with( self._FAKE_VOLUME_PATH, 1024) drv._do_create_volume(volume2) drv._create_ploop.assert_called_once_with( self._FAKE_VOLUME_PATH, 1024) @mock.patch('cinder.volume.drivers.remotefs.RemoteFSSnapDriver.' '_create_cloned_volume') @mock.patch.object(vzstorage.VZStorageDriver, 'get_volume_format', return_value='qcow2') def test_create_cloned_volume_qcow2(self, mock_get_volume_format, mock_remotefs_create_cloned_volume, ): drv = self._vz_driver volume = fake_volume.fake_volume_obj(self.context) src_vref_id = '375e32b2-804a-49f2-b282-85d1d5a5b9e1' src_vref = fake_volume.fake_volume_obj( self.context, id=src_vref_id, name='volume-%s' % src_vref_id, provider_location=self._FAKE_SHARE) src_vref.context = self.context mock_remotefs_create_cloned_volume.return_value = { 'provider_location': self._FAKE_SHARE} ret = drv.create_cloned_volume(volume, src_vref) # Bug #1875953: code should also be passing context here mock_remotefs_create_cloned_volume.assert_called_once_with( volume, src_vref, self.context) self.assertEqual(ret, {'provider_location': self._FAKE_SHARE}) @mock.patch.object(vzstorage.VZStorageDriver, '_local_path_volume_info') @mock.patch.object(vzstorage.VZStorageDriver, '_create_snapshot_ploop') @mock.patch.object(vzstorage.VZStorageDriver, 'delete_snapshot') @mock.patch.object(vzstorage.VZStorageDriver, '_write_info_file') @mock.patch.object(vzstorage.VZStorageDriver, '_copy_volume_from_snapshot') @mock.patch.object(vzstorage.VZStorageDriver, 'get_volume_format', return_value='ploop') def test_create_cloned_volume_ploop(self, mock_get_volume_format, mock_copy_volume_from_snapshot, mock_write_info_file, mock_delete_snapshot, mock_create_snapshot_ploop, mock_local_path_volume_info, ): drv = self._vz_driver volume = fake_volume.fake_volume_obj(self.context) src_vref_id = '375e32b2-804a-49f2-b282-85d1d5a5b9e1' src_vref = fake_volume.fake_volume_obj( self.context, id=src_vref_id, name='volume-%s' % src_vref_id, provider_location=self._FAKE_SHARE) src_vref.context = self.context snap_attrs = ['volume_name', 'size', 'volume_size', 'name', 'volume_id', 'id', 'volume'] Snapshot = collections.namedtuple('Snapshot', snap_attrs) snap_ref = Snapshot(volume_name=volume.name, name='clone-snap-%s' % src_vref.id, size=src_vref.size, volume_size=src_vref.size, volume_id=src_vref.id, id=src_vref.id, volume=src_vref) def _check_provider_location(volume): self.assertEqual(volume.provider_location, self._FAKE_SHARE) return mock.sentinel.fake_info_path mock_local_path_volume_info.side_effect = _check_provider_location ret = drv.create_cloned_volume(volume, src_vref) self.assertEqual(ret, {'provider_location': self._FAKE_SHARE}) mock_write_info_file.assert_called_once_with( mock.sentinel.fake_info_path, {'active': 'volume-%s' % volume.id}) mock_create_snapshot_ploop.assert_called_once_with(snap_ref) mock_copy_volume_from_snapshot.assert_called_once_with( snap_ref, volume, volume.size) mock_delete_snapshot.assert_called_once_with(snap_ref) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/test_zadara.py0000664000175000017500000016145300000000000024326 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Zadara Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Zadara VPSA volume driver """ import copy from unittest import mock from urllib import parse import requests from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume.drivers.zadara import common from cinder.volume.drivers.zadara import exception as zadara_exception from cinder.volume.drivers.zadara import zadara def check_access_key(func): """A decorator for all operations that needed an API before executing""" def wrap(self, *args, **kwargs): if not self._is_correct_access_key(): return RUNTIME_VARS['bad_login'] return func(self, *args, **kwargs) return wrap DEFAULT_RUNTIME_VARS = { 'status': 200, 'user': 'test', 'password': 'test_password', 'access_key': '0123456789ABCDEF', 'volumes': [], 'servers': [], 'controllers': [('active_ctrl', {'display-name': 'test_ctrl'})], 'counter': 1000, "login": """ { "response": { "user": { "updated-at": "2021-01-22", "access-key": "%s", "id": 1, "created-at": "2021-01-22", "email": "jsmith@example.com", "username": "jsmith" }, "status": 0 } }""", "good": """ { "response": { "status": 0 } }""", "bad_login": """ { "response": { "status": 5, "status-msg": "Some message..." } }""", "bad_volume": """ { "response": { "status": 10081, "status-msg": "Virtual volume xxx should be found" } }""", "fake_volume": """ { "response": { "volumes": [], "status": 0, "status-msg": "Virtual volume xxx doesn't exist" } }""", "bad_server": """ { "response": { "status": 10086, "status-msg": "Server xxx not found" } }""", "server_created": """ { "response": { "server_name": "%s", "status": 0 } }""", } RUNTIME_VARS = None class FakeResponse(object): def __init__(self, method, url, params, body, headers, **kwargs): # kwargs include: verify, timeout self.method = method self.url = url self.body = body self.params = params self.headers = headers self.status = RUNTIME_VARS['status'] @property def access_key(self): """Returns Response Access Key""" return self.headers["X-Access-Key"] def read(self): ops = {'POST': [('/api/users/login.json', self._login), ('/api/volumes.json', self._create_volume), ('/api/servers.json', self._create_server), ('/api/servers/*/volumes.json', self._attach), ('/api/volumes/*/detach.json', self._detach), ('/api/volumes/*/expand.json', self._expand), ('/api/volumes/*/rename.json', self._rename), ('/api/consistency_groups/*/snapshots.json', self._create_snapshot), ('/api/snapshots/*/rename.json', self._rename_snapshot), ('/api/consistency_groups/*/clone.json', self._create_clone)], 'DELETE': [('/api/volumes/*', self._delete), ('/api/snapshots/*', self._delete_snapshot)], 'GET': [('/api/volumes.json?showonlyblock=YES', self._list_volumes), ('/api/volumes.json?display_name=*', self._get_volume_by_name), ('/api/pools/*.json', self._get_pool), ('/api/vcontrollers.json', self._list_controllers), ('/api/servers.json', self._list_servers), ('/api/consistency_groups/*/snapshots.json', self._list_vol_snapshots), ('/api/snapshots.json', self._list_snapshots), ('/api/volumes/*/servers.json', self._list_vol_attachments)] } ops_list = ops[self.method] for (templ_url, func) in ops_list: if self._compare_url(self.url, templ_url): result = func() return result @staticmethod def _compare_url(url, template_url): items = url.split('/') titems = template_url.split('/') for (i, titem) in enumerate(titems): if '*' not in titem and titem != items[i]: return False if '?' in titem and titem.split('=')[0] != items[i].split('=')[0]: return False return True @staticmethod def _get_counter(): cnt = RUNTIME_VARS['counter'] RUNTIME_VARS['counter'] += 1 return cnt def _login(self): params = self.body if (params['user'] == RUNTIME_VARS['user'] and params['password'] == RUNTIME_VARS['password']): return RUNTIME_VARS['login'] % RUNTIME_VARS['access_key'] else: return RUNTIME_VARS['bad_login'] def _is_correct_access_key(self): return self.access_key == RUNTIME_VARS['access_key'] @check_access_key def _create_volume(self): params = self.body params['display-name'] = params['name'] params['cg_name'] = params['name'] params['snapshots'] = [] params['server_ext_names'] = '' params['pool'] = 'pool-0001' params['provider_location'] = params['name'] vpsa_vol = 'volume-%07d' % self._get_counter() RUNTIME_VARS['volumes'].append((vpsa_vol, params)) return RUNTIME_VARS['good'] @check_access_key def _create_server(self): params = self.body params['display-name'] = params['display_name'] vpsa_srv = 'srv-%07d' % self._get_counter() RUNTIME_VARS['servers'].append((vpsa_srv, params)) return RUNTIME_VARS['server_created'] % vpsa_srv @check_access_key def _attach(self): srv = self.url.split('/')[3] params = self.body vol = params['volume_name[]'] for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == vol: attachments = params['server_ext_names'].split(',') if srv in attachments: # already attached - ok return RUNTIME_VARS['good'] else: if not attachments[0]: params['server_ext_names'] = srv else: params['server_ext_names'] += ',' + srv return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _detach(self): params = self.body vol = self.url.split('/')[3] srv = params['server_name[]'] for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == vol: attachments = params['server_ext_names'].split(',') if srv not in attachments: return RUNTIME_VARS['bad_server'] else: attachments.remove(srv) params['server_ext_names'] = (','.join([str(elem) for elem in attachments])) return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _expand(self): params = self.body vol = self.url.split('/')[3] capacity = params['capacity'] for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == vol: params['capacity'] = capacity return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _rename(self): params = self.body vol = self.url.split('/')[3] for (vol_name, vol_params) in RUNTIME_VARS['volumes']: if vol_params['name'] == vol: vol_params['name'] = params['new_name'] vol_params['display-name'] = params['new_name'] vol_params['cg_name'] = params['new_name'] return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _rename_snapshot(self): params = self.body vpsa_snapshot = self.url.split('/')[3] for (vol_name, vol_params) in RUNTIME_VARS['volumes']: for snapshot in vol_params['snapshots']: if vpsa_snapshot == snapshot: vol_params['snapshots'].remove(snapshot) vol_params['snapshots'].append(params['newname']) return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _create_snapshot(self): params = self.body cg_name = self.url.split('/')[3] snap_name = params['display_name'] for (vol_name, vol_params) in RUNTIME_VARS['volumes']: if vol_params['cg_name'] == cg_name: snapshots = vol_params['snapshots'] if snap_name in snapshots: # already attached return RUNTIME_VARS['bad_volume'] else: snapshots.append(snap_name) vol_params['has_snapshots'] = 'YES' return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _delete_snapshot(self): snap = self.url.split('/')[3].split('.')[0] for (vol_name, params) in RUNTIME_VARS['volumes']: if snap in params['snapshots']: params['snapshots'].remove(snap) return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] @check_access_key def _create_clone(self): params = self.body params['display-name'] = params['name'] params['cg_name'] = params['name'] params['capacity'] = 1 params['snapshots'] = [] params['server_ext_names'] = '' params['pool'] = 'pool-0001' vpsa_vol = 'volume-%07d' % self._get_counter() RUNTIME_VARS['volumes'].append((vpsa_vol, params)) return RUNTIME_VARS['good'] def _delete(self): vol = self.url.split('/')[3].split('.')[0] for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == vol: if params['server_ext_names']: # there are attachments - should be volume busy error return RUNTIME_VARS['bad_volume'] else: RUNTIME_VARS['volumes'].remove((vol_name, params)) return RUNTIME_VARS['good'] return RUNTIME_VARS['bad_volume'] def _generate_list_resp(self, null_body, body, lst, vol): resp = '' for (obj, params) in lst: if vol: resp += body % (obj, params['display-name'], params['cg_name'], params['capacity'], params['pool']) else: resp += body % (obj, params['display-name']) if resp: return resp else: return null_body def _list_volumes(self): null_body = """ { "response": { "volumes": [ ], "status": 0 } }""" body = """ { "response": { "volumes": %s, "status": 0 } }""" volume_obj = """ { "name": "%s", "display_name": "%s", "cg_name": "%s", "status": "%s", "virtual_capacity": %d, "pool_name": "%s", "allocated-capacity": 1, "raid-group-name": "r5", "cache": "write-through", "created-at": "2021-01-22", "modified-at": "2021-01-22", "has_snapshots": "%s" } """ if len(RUNTIME_VARS['volumes']) == 0: return null_body resp = '' volume_list = '' count = 0 for (vol_name, params) in RUNTIME_VARS['volumes']: vol_status = (params.get('status') if params.get('status') else 'Available') has_snapshots = 'YES' if params.get('has_snapshots') else 'NO' volume_dict = volume_obj % (params['name'], params['display-name'], params['cg_name'], vol_status, params['capacity'], params['pool'], has_snapshots) if count == 0: volume_list += volume_dict count += 1 elif count != len(RUNTIME_VARS['volumes']): volume_list = volume_list + ',' + volume_dict count += 1 if volume_list: volume_list = '[' + volume_list + ']' resp = body % volume_list return resp return RUNTIME_VARS['bad_volume'] def _get_volume_by_name(self): volume_name = self.url.split('=')[1] body = """ { "response": { "volumes": [ { "name": "%s", "display_name": "%s", "cg_name": "%s", "provider_location": "%s", "status": "%s", "virtual_capacity": %d, "pool_name": "%s", "allocated-capacity": 1, "raid-group-name": "r5", "cache": "write-through", "created-at": "2021-01-22", "modified-at": "2021-01-22", "has_snapshots": "%s", "server_ext_names": "%s" } ], "status": 0 } }""" for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == volume_name: vol_status = (params.get('status') if params.get('status') else 'Available') has_snapshots = 'YES' if params.get('has_snapshots') else 'NO' resp = body % (volume_name, params['display-name'], params['cg_name'], params['cg_name'], vol_status, params['capacity'], params['pool'], has_snapshots, params['server_ext_names']) return resp return RUNTIME_VARS['fake_volume'] def _list_controllers(self): null_body = """ { "response": { "vcontrollers": [ ], "status": 0 } }""" body = """ { "response": { "vcontrollers": [ { "name": "%s", "display-name": "%s", "state": "active", "target": "iqn.2011-04.zadarastorage:vsa-xxx:1", "iscsi_ip": "1.1.1.1", "iscsi_ipv6": "", "mgmt-ip": "1.1.1.1", "software-ver": "0.0.09-05.1--77.7", "heartbeat1": "ok", "heartbeat2": "ok", "vpsa_chap_user": "test_chap_user", "vpsa_chap_secret": "test_chap_secret" } ], "status": 0 } }""" return self._generate_list_resp(null_body, body, RUNTIME_VARS['controllers'], False) def _get_pool(self): response = """ { "response": { "pool": { "name": "pool-0001", "capacity": 100, "available_capacity": 99, "provisioned_capacity": 1 }, "status": 0 } }""" return response def _list_servers(self): null_body = """ { "response": { "servers": [ ], "status": 0 } }""" body = """ { "response": { "servers": %s, "status": 0 } }""" server_obj = """ { "name": "%s", "display_name": "%s", "iqn": "%s", "target": "iqn.2011-04.zadarastorage:vsa-xxx:1", "lun": 0 } """ resp = '' server_list = '' count = 0 for (obj, params) in RUNTIME_VARS['servers']: server_dict = server_obj % (obj, params['display_name'], params['iqn']) if count == 0: server_list += server_dict count += 1 elif count != len(RUNTIME_VARS['servers']): server_list = server_list + ',' + server_dict count += 1 server_list = '[' + server_list + ']' resp = body % server_list if resp: return resp else: return null_body def _list_snapshots(self): null_body = """ { "response": { "snapshots": [ ], "status": 0 } }""" body = """ { "response": { "snapshots": %s, "status": 0 } }""" snapshot_obj = """ { "name": "%s", "display_name": "%s", "volume_display_name": "%s", "volume_capacity_mb": %d, "volume_ext_name": "%s", "cg_name": "%s", "pool_name": "pool-0001" } """ resp = '' snapshot_list = '' count = 0 for (obj, params) in RUNTIME_VARS['volumes']: snapshots = params['snapshots'] if len(snapshots) == 0: continue for snapshot in snapshots: snapshot_dict = snapshot_obj % (snapshot, snapshot, params['provider_location'], params['capacity'] * 1024, params['display-name'], params['cg_name']) if count == 0: snapshot_list += snapshot_dict count += 1 else: snapshot_list = snapshot_list + ',' + snapshot_dict count += 1 snapshot_list = '[' + snapshot_list + ']' resp = body % snapshot_list if resp: return resp else: return null_body def _get_server_obj(self, name): for (srv_name, params) in RUNTIME_VARS['servers']: if srv_name == name: return params def _list_vol_attachments(self): vol = self.url.split('/')[3] null_body = """ { "response": { "servers": [ ], "status": 0 } }""" body = """ { "response": { "servers": %s, "status": 0 } }""" server_obj = """ { "name": "%s", "display_name": "%s", "iqn": "%s", "target": "iqn.2011-04.zadarastorage:vsa-xxx:1", "lun": 0 } """ for (vol_name, params) in RUNTIME_VARS['volumes']: if params['name'] == vol: attachments = params['server_ext_names'].split(',') if not attachments[0]: return null_body resp = '' server_list = '' count = 0 for server in attachments: srv_params = self._get_server_obj(server) server_dict = (server_obj % (server, srv_params['display_name'], srv_params['iqn'])) if count == 0: server_list += server_dict count += 1 elif count != len(attachments): server_list = server_list + ',' + server_dict count += 1 server_list = '[' + server_list + ']' resp = body % server_list return resp return RUNTIME_VARS['bad_volume'] def _list_vol_snapshots(self): cg_name = self.url.split('/')[3] null_body = """ { "response": { "snapshots": [ ], "status": 0 } }""" body = """ { "response": { "snapshots": %s, "status": 0 } }""" snapshot_obj = """ { "name": "%s", "display_name": "%s", "cg_name": "%s", "pool_name": "pool-0001" } """ for (vol_name, params) in RUNTIME_VARS['volumes']: if params['cg_name'] == cg_name: snapshots = params['snapshots'] if len(snapshots) == 0: return null_body resp = '' snapshot_list = '' count = 0 for snapshot in snapshots: snapshot_dict = snapshot_obj % (snapshot, snapshot, cg_name) if count == 0: snapshot_list += snapshot_dict count += 1 elif count != len(snapshots): snapshot_list = snapshot_list + ',' + snapshot_dict count += 1 snapshot_list = '[' + snapshot_list + ']' resp = body % snapshot_list return resp return RUNTIME_VARS['bad_volume'] class FakeRequests(object): """A fake requests for zadara volume driver tests.""" def __init__(self, method, api_url, params=None, data=None, headers=None, **kwargs): apiurl_items = parse.urlparse(api_url) if apiurl_items.query: url = apiurl_items.path + '?' + apiurl_items.query else: url = apiurl_items.path res = FakeResponse(method, url, params, data, headers, **kwargs) self.content = res.read() self.status_code = res.status class ZadaraVPSADriverTestCase(test.TestCase): def __init__(self, *args, **kwargs): super(ZadaraVPSADriverTestCase, self).__init__(*args, **kwargs) self.configuration = None self.driver = None """Test case for Zadara VPSA volume driver.""" @mock.patch.object(requests.Session, 'request', FakeRequests) def setUp(self): super(ZadaraVPSADriverTestCase, self).setUp() global RUNTIME_VARS RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS) self.configuration = mock.Mock(conf.Configuration(None)) self.configuration.append_config_values(common.zadara_opts) self.configuration.reserved_percentage = 10 self.configuration.zadara_use_iser = True self.configuration.zadara_vpsa_host = '192.168.5.5' self.configuration.zadara_vpsa_port = '80' self.configuration.zadara_user = 'test' self.configuration.zadara_password = 'test_password' self.configuration.zadara_access_key = '0123456789ABCDEF' self.configuration.zadara_vpsa_poolname = 'pool-0001' self.configuration.zadara_vol_encrypt = False self.configuration.zadara_vol_name_template = 'OS_%s' self.configuration.zadara_vpsa_use_ssl = False self.configuration.zadara_ssl_cert_verify = False self.configuration.driver_ssl_cert_path = '/path/to/cert' self.configuration.zadara_default_snap_policy = False self.configuration.zadara_gen3_vol_compress = False self.configuration.zadara_gen3_vol_dedupe = False self.driver = (zadara.ZadaraVPSAISCSIDriver( configuration=self.configuration)) self.driver.do_setup(None) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_create_destroy(self): """Create/Delete volume.""" vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume = fake_volume.fake_volume_obj(None, **vol_args) self.driver.create_volume(volume) self.driver.delete_volume(volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_create_destroy_multiple(self): """Create/Delete multiple volumes.""" vol1_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} vol2_args = {'display_name': 'test_volume_02', 'size': 2, 'id': fake.VOLUME2_ID} vol3_args = {'display_name': 'test_volume_03', 'size': 3, 'id': fake.VOLUME3_ID} vol4_args = {'display_name': 'test_volume_04', 'size': 4, 'id': fake.VOLUME4_ID} volume1 = fake_volume.fake_volume_obj(None, **vol1_args) volume2 = fake_volume.fake_volume_obj(None, **vol2_args) volume3 = fake_volume.fake_volume_obj(None, **vol3_args) volume4 = fake_volume.fake_volume_obj(None, **vol4_args) self.driver.create_volume(volume1) self.driver.create_volume(volume2) self.driver.create_volume(volume3) self.driver.delete_volume(volume1) self.driver.delete_volume(volume2) self.driver.delete_volume(volume3) self.driver.delete_volume(volume4) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_destroy_non_existent(self): """Delete non-existent volume.""" vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume = fake_volume.fake_volume_obj(None, **vol_args) self.driver.delete_volume(volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_empty_apis(self): """Test empty func (for coverage only).""" context = None vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume = fake_volume.fake_volume_obj(None, **vol_args) self.driver.create_export(context, volume) self.driver.ensure_export(context, volume) self.driver.remove_export(context, volume) self.assertRaises(NotImplementedError, self.driver.local_path, None) self.driver.check_for_setup_error() @mock.patch.object(requests.Session, 'request', FakeRequests) def test_volume_attach_detach(self): """Test volume attachment and detach.""" vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume = fake_volume.fake_volume_obj(None, **vol_args) connector = dict(initiator='test_iqn.1') self.driver.create_volume(volume) props = self.driver.initialize_connection(volume, connector) self.assertEqual('iser', props['driver_volume_type']) data = props['data'] self.assertEqual('1.1.1.1:3260', data['target_portal']) self.assertEqual('iqn.2011-04.zadarastorage:vsa-xxx:1', data['target_iqn']) self.assertEqual(int('0'), data['target_lun']) self.assertEqual(volume['id'], data['volume_id']) self.assertEqual('CHAP', data['auth_method']) self.assertEqual('test_chap_user', data['auth_username']) self.assertEqual('test_chap_secret', data['auth_password']) self.driver.terminate_connection(volume, connector) self.driver.delete_volume(volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_wrong_attach_params(self): """Test different wrong attach scenarios.""" vol1_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume1 = fake_volume.fake_volume_obj(None, **vol1_args) connector1 = dict(initiator='test_iqn.1') self.assertRaises(exception.VolumeDriverException, self.driver.initialize_connection, volume1, connector1) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_wrong_detach_params(self): """Test different wrong detachment scenarios.""" vol1_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume1 = fake_volume.fake_volume_obj(None, **vol1_args) # Volume is not created. self.assertRaises(exception.VolumeDriverException, self.driver.terminate_connection, volume1, None) self.driver.create_volume(volume1) connector1 = dict(initiator='test_iqn.1') # Server is not found. Volume is found self.assertRaises(zadara_exception.ZadaraServerNotFound, self.driver.terminate_connection, volume1, connector1) vol2_args = {'display_name': 'test_volume_02', 'size': 1, 'id': fake.VOLUME2_ID} vol3_args = {'display_name': 'test_volume_03', 'size': 1, 'id': fake.VOLUME3_ID} volume2 = fake_volume.fake_volume_obj(None, **vol2_args) volume3 = fake_volume.fake_volume_obj(None, **vol3_args) connector2 = dict(initiator='test_iqn.2') connector3 = dict(initiator='test_iqn.3') self.driver.create_volume(volume2) self.driver.initialize_connection(volume1, connector1) self.driver.initialize_connection(volume2, connector2) # volume is found. Server not found self.assertRaises(zadara_exception.ZadaraServerNotFound, self.driver.terminate_connection, volume1, connector3) # Server is found. volume not found self.assertRaises(exception.VolumeDriverException, self.driver.terminate_connection, volume3, connector1) # Server and volume exits but not attached self.assertRaises(common.exception.FailedCmdWithDump, self.driver.terminate_connection, volume1, connector2) self.driver.terminate_connection(volume1, connector1) self.driver.terminate_connection(volume2, connector2) @mock.patch.object(requests.Session, 'request') def test_ssl_use(self, request): """Coverage test for SSL connection.""" self.configuration.zadara_ssl_cert_verify = True self.configuration.zadara_vpsa_use_ssl = True self.configuration.driver_ssl_cert_path = '/path/to/cert' fake_request_ctrls = FakeRequests("GET", "/api/vcontrollers.json") raw_controllers = fake_request_ctrls.content good_response = mock.MagicMock() good_response.status_code = RUNTIME_VARS['status'] good_response.content = raw_controllers def request_verify_cert(*args, **kwargs): self.assertEqual(kwargs['verify'], '/path/to/cert') return good_response request.side_effect = request_verify_cert self.driver.do_setup(None) @mock.patch.object(requests.Session, 'request') def test_wrong_access_key(self, request): """Wrong Access Key""" fake_ak = 'FAKEACCESSKEY' self.configuration.zadara_access_key = fake_ak bad_response = mock.MagicMock() bad_response.status_code = RUNTIME_VARS['status'] bad_response.content = RUNTIME_VARS['bad_login'] def request_verify_access_key(*args, **kwargs): # Checks if the fake access_key was sent to driver token = kwargs['headers']['X-Access-Key'] self.assertEqual(token, fake_ak, "access_key wasn't delivered") return bad_response request.side_effect = request_verify_access_key # when access key is invalid, driver will raise # ZadaraInvalidAccessKey exception self.assertRaises(zadara_exception.ZadaraCinderInvalidAccessKey, self.driver.do_setup, None) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_bad_http_response(self): """Coverage test for non-good HTTP response.""" RUNTIME_VARS['status'] = 400 vol_args = {'display_name': 'test_volume_03', 'size': 1, 'id': fake.VOLUME_ID} volume = fake_volume.fake_volume_obj(None, **vol_args) self.assertRaises(exception.BadHTTPResponseStatus, self.driver.create_volume, volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_terminate_connection_force_detach(self): """Test terminate connection for os-force_detach """ vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume = fake_volume.fake_volume_obj(None, **vol_args) connector = dict(initiator='test_iqn.1') self.driver.create_volume(volume) self.driver.initialize_connection(volume, connector) # connector is None - force detach - detach all mappings self.driver.terminate_connection(volume, None) self.assertRaises(common.exception.FailedCmdWithDump, self.driver.terminate_connection, volume, connector) self.driver.delete_volume(volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_delete_without_detach(self): """Test volume deletion without detach.""" vol1_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume1 = fake_volume.fake_volume_obj(None, **vol1_args) connector1 = dict(initiator='test_iqn.1') connector2 = dict(initiator='test_iqn.2') connector3 = dict(initiator='test_iqn.3') self.driver.create_volume(volume1) self.driver.initialize_connection(volume1, connector1) self.driver.initialize_connection(volume1, connector2) self.driver.initialize_connection(volume1, connector3) self.driver.delete_volume(volume1) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_no_active_ctrl(self): vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume = fake_volume.fake_volume_obj(None, **vol_args) connector = dict(initiator='test_iqn.1') self.driver.create_volume(volume) RUNTIME_VARS['controllers'] = [] self.assertRaises(zadara_exception.ZadaraVPSANoActiveController, self.driver.initialize_connection, volume, connector) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_create_destroy_snapshot(self): """Create/Delete snapshot test.""" wrong_vol_args = {'display_name': 'wrong_vol_01', 'size': 1, 'id': fake.VOLUME2_ID} wrong_volume = fake_volume.fake_volume_obj(None, **wrong_vol_args) wrong_snap_args = {'display_name': 'snap_01', 'volume': wrong_volume} wrong_snapshot = fake_snapshot.fake_snapshot_obj(None, **wrong_snap_args) self.assertRaises(exception.VolumeDriverException, self.driver.create_snapshot, wrong_snapshot) # Create cinder volume and snapshot vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume = fake_volume.fake_volume_obj(None, **vol_args) snap_args = {'display_name': 'test_snap_01', 'id': fake.SNAPSHOT_ID, 'volume': volume} snapshot = fake_snapshot.fake_snapshot_obj(None, **snap_args) self.driver.create_volume(volume) self.driver.create_snapshot(snapshot) # Deleted should succeed for missing volume self.driver.delete_snapshot(wrong_snapshot) # Deleted should succeed for missing snap fake_snap_args = {'display_name': 'test_snap_02', 'id': fake.SNAPSHOT2_ID, 'volume': volume} fake_snap = fake_snapshot.fake_snapshot_obj(None, **fake_snap_args) self.driver.delete_snapshot(fake_snap) self.driver.delete_snapshot(snapshot) self.driver.delete_volume(volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_expand_volume(self): """Expand volume test.""" vol_args = {'display_name': 'test_volume_01', 'id': fake.VOLUME_ID, 'size': 10} vol2_args = {'display_name': 'test_volume_02', 'id': fake.VOLUME2_ID, 'size': 10} volume = fake_volume.fake_volume_obj(None, **vol_args) volume2 = fake_volume.fake_volume_obj(None, **vol2_args) self.driver.create_volume(volume) self.assertRaises(exception.VolumeDriverException, self.driver.extend_volume, volume2, 15) self.assertRaises(exception.InvalidInput, self.driver.extend_volume, volume, 5) self.driver.extend_volume(volume, 15) self.driver.delete_volume(volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_create_destroy_clones(self): """Create/Delete clones test.""" vol1_args = {'display_name': 'test_volume_01', 'id': fake.VOLUME_ID, 'size': 1} vol2_args = {'display_name': 'test_volume_02', 'id': fake.VOLUME2_ID, 'size': 2} vol3_args = {'display_name': 'test_volume_03', 'id': fake.VOLUME3_ID, 'size': 1} volume1 = fake_volume.fake_volume_obj(None, **vol1_args) volume2 = fake_volume.fake_volume_obj(None, **vol2_args) volume3 = fake_volume.fake_volume_obj(None, **vol3_args) snap_args = {'display_name': 'test_snap_01', 'id': fake.SNAPSHOT_ID, 'volume': volume1} snapshot = fake_snapshot.fake_snapshot_obj(None, **snap_args) self.driver.create_volume(volume1) self.driver.create_snapshot(snapshot) # Test invalid vol reference wrong_vol_args = {'display_name': 'wrong_volume_01', 'id': fake.VOLUME4_ID, 'size': 1} wrong_volume = fake_volume.fake_volume_obj(None, **wrong_vol_args) wrong_snap_args = {'display_name': 'test_wrong_snap', 'id': fake.SNAPSHOT2_ID, 'volume': wrong_volume} wrong_snapshot = fake_snapshot.fake_snapshot_obj(None, **wrong_snap_args) self.assertRaises(exception.SnapshotNotFound, self.driver.create_volume_from_snapshot, wrong_volume, wrong_snapshot) wrong_snap_args = {'display_name': 'test_wrong_snap', 'id': fake.SNAPSHOT3_ID, 'volume': volume1} wrong_snapshot = fake_snapshot.fake_snapshot_obj(None, **wrong_snap_args) # Test invalid snap reference self.assertRaises(exception.SnapshotNotFound, self.driver.create_volume_from_snapshot, volume1, wrong_snapshot) # Test invalid src_vref for volume clone self.assertRaises(exception.VolumeDriverException, self.driver.create_cloned_volume, volume3, volume2) self.driver.create_volume_from_snapshot(volume2, snapshot) self.driver.create_cloned_volume(volume3, volume1) self.driver.delete_volume(volume3) self.driver.delete_volume(volume2) self.driver.delete_snapshot(snapshot) self.driver.delete_volume(volume1) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_get_volume_stats(self): """Get stats test.""" self.configuration.safe_get.return_value = 'ZadaraVPSAISCSIDriver' data = self.driver.get_volume_stats(True) self.assertEqual('Zadara Storage', data['vendor_name']) self.assertEqual(100, data['total_capacity_gb']) self.assertEqual(99, data['free_capacity_gb']) self.assertEqual({'total_capacity_gb': 100, 'free_capacity_gb': 99, 'multiattach': True, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'vendor_name': 'Zadara Storage', 'driver_version': self.driver.VERSION, 'storage_protocol': 'iSER', 'volume_backend_name': 'ZadaraVPSAISCSIDriver'}, data) def create_vpsa_backend_volume(self, vol_id, vol_name, vol_size, vol_status, has_snapshots): vol_params = {} vol_params['id'] = vol_id vol_params['name'] = vol_name vol_params['display-name'] = vol_name vol_params['cg_name'] = vol_name vol_params['provider_location'] = vol_name vol_params['status'] = vol_status vol_params['capacity'] = vol_size vol_params['pool'] = 'pool-0001' vol_params['has_snapshots'] = has_snapshots vol_params['server_ext_names'] = '' vol_params['snapshots'] = [] volname = 'fake-volume' vpsa_volume = (volname, vol_params) RUNTIME_VARS['volumes'].append(vpsa_volume) return vpsa_volume @mock.patch.object(requests.Session, 'request', FakeRequests) def test_manage_existing_volume(self): vol_args = {'id': fake.VOLUME_ID, 'display_name': 'manage-name', 'size': 1} volume = fake_volume.fake_volume_obj(None, **vol_args) vpsa_volume = self.create_vpsa_backend_volume('fake_id', 'fake_name', 1, 'Available', 'NO') # Check the failure with an empty reference for volume identifier = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, identifier) # Check the failure with an invalid reference for volume identifier['name'] = 'fake_identifier' self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing, volume, identifier) identifier['name'] = 'fake_name' self.driver.manage_existing(volume, identifier) # Check the new volume renamed accordingly self.assertEqual(vpsa_volume[1]['display-name'], 'OS_%s' % volume['name']) self.driver.delete_volume(volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_manage_existing_snapshot(self): vol_args = {'display_name': 'fake_name', 'size': 1, 'id': fake.VOLUME_ID} volume = fake_volume.fake_volume_obj(None, **vol_args) self.driver.create_volume(volume) # Create a backend snapshot that will be managed by cinder volume (vol_name, vol_params) = RUNTIME_VARS['volumes'][0] vol_params['snapshots'].append('fakesnapname') # Check the failure with wrong volume for snapshot wrong_vol_args = {'display_name': 'wrong_volume_01', 'size': 1, 'id': fake.VOLUME2_ID} wrong_volume = fake_volume.fake_volume_obj(None, **wrong_vol_args) wrong_snap_args = {'display_name': 'snap_01', 'volume': wrong_volume} wrong_snapshot = fake_snapshot.fake_snapshot_obj(None, **wrong_snap_args) identifier = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, wrong_snapshot, identifier) identifier['name'] = 'fake_identifier' self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, wrong_snapshot, identifier) # Check the failure with wrong identifier for the snapshot snap_args = {'display_name': 'manage_snapname', 'id': fake.SNAPSHOT_ID, 'volume': volume} snapshot = fake_snapshot.fake_snapshot_obj(None, **snap_args) self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot, snapshot, identifier) identifier['name'] = 'fakesnapname' self.driver.manage_existing_snapshot(snapshot, identifier) # Check that the backend snapshot has been renamed (vol_name, vol_params) = RUNTIME_VARS['volumes'][0] self.assertEqual(vol_params['snapshots'][0], snapshot['name']) self.driver.delete_snapshot(snapshot) self.driver.delete_volume(volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_get_manageable_volumes(self): vpsa_volume1 = self.create_vpsa_backend_volume('manage_vol_id1', 'manage_vol1', 1, 'Available', 'NO') vpsa_volume2 = self.create_vpsa_backend_volume('manage_vol_id2', 'manage_vol2', 2, 'Available', 'NO') cinder_vol1_args = {'display_name': 'fake-volume1', 'size': 3, 'id': fake.VOLUME_ID} cinder_vol2_args = {'display_name': 'fake-volume2', 'size': 4, 'id': fake.VOLUME2_ID} cinder_vol1 = fake_volume.fake_volume_obj(None, **cinder_vol1_args) cinder_vol2 = fake_volume.fake_volume_obj(None, **cinder_vol2_args) self.driver.create_volume(cinder_vol1) self.driver.create_volume(cinder_vol2) cinder_vols = [cinder_vol1, cinder_vol2] manageable_vols = (self.driver.get_manageable_volumes( cinder_vols, None, 10, 0, ['size'], ['asc'])) # Check the volumes are returned in the sorted order self.assertEqual(len(manageable_vols), 4) self.assertGreater(manageable_vols[1]['size'], manageable_vols[0]['size']) self.assertGreater(manageable_vols[3]['size'], manageable_vols[2]['size']) self.driver.delete_volume(cinder_vol1) self.driver.delete_volume(cinder_vol2) # Try to manage the volume and delete it vol1_args = {'display_name': 'manage-name1', 'size': 1, 'id': fake.VOLUME3_ID} volume1 = fake_volume.fake_volume_obj(None, **vol1_args) identifier = {'name': 'manage_vol1'} self.driver.manage_existing(volume1, identifier) self.assertEqual(vpsa_volume1[1]['display-name'], 'OS_%s' % volume1['name']) self.driver.delete_volume(volume1) # Manage and delete the volume vol2_args = {'display_name': 'manage-name2', 'size': 1, 'id': fake.VOLUME4_ID} volume2 = fake_volume.fake_volume_obj(None, **vol2_args) identifier = {'name': 'manage_vol2'} self.driver.manage_existing(volume2, identifier) self.assertEqual(vpsa_volume2[1]['display-name'], 'OS_%s' % volume2['name']) self.driver.delete_volume(volume2) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_get_manageable_snapshots(self): # Create a cinder volume and a snapshot vol_args = {'display_name': 'test_volume_01', 'size': 1, 'id': fake.VOLUME_ID} volume = fake_volume.fake_volume_obj(None, **vol_args) snap_args = {'display_name': 'test_snap_01', 'id': fake.SNAPSHOT_ID, 'volume': volume} snapshot = fake_snapshot.fake_snapshot_obj(None, **snap_args) self.driver.create_volume(volume) self.driver.create_snapshot(snapshot) # Create backend snapshots for the volume vpsa_volume = self.create_vpsa_backend_volume('manage_vol_id', 'manage_vol', 1, 'Available', 'YES') snapshot1 = {'name': 'manage_snap_01', 'volume_name': vpsa_volume[1]['name'], 'provider_location': 'manage_snap_01'} snapshot2 = {'name': 'manage_snap_02', 'volume_name': vpsa_volume[1]['name'], 'provider_location': 'manage_snap_02'} vpsa_volume[1]['snapshots'].append(snapshot1['name']) vpsa_volume[1]['snapshots'].append(snapshot2['name']) cinder_snapshots = [snapshot] manageable_snapshots = (self.driver.get_manageable_snapshots( cinder_snapshots, None, 10, 0, ['reference'], ['asc'])) # Check the returned manageable snapshot names self.assertEqual(snapshot1['name'], manageable_snapshots[0]['reference']['name']) self.assertEqual(snapshot2['name'], manageable_snapshots[1]['reference']['name']) # Verify the safety of the snapshots to manage self.assertEqual(manageable_snapshots[0]['safe_to_manage'], True) self.assertEqual(manageable_snapshots[1]['safe_to_manage'], True) # Verify the refernce of the source volume of the snapshots source_vol = manageable_snapshots[0]['source_reference'] self.assertEqual(vpsa_volume[1]['name'], source_vol['name']) source_vol = manageable_snapshots[1]['source_reference'] self.assertEqual(vpsa_volume[1]['name'], source_vol['name']) self.driver.delete_volume(volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_manage_existing_volume_get_size(self): vol_args = {'display_name': 'fake_name', 'id': fake.VOLUME_ID, 'size': 1} volume = fake_volume.fake_volume_obj(None, **vol_args) self.driver.create_volume(volume) # Check the failure with empty reference of the volume identifier = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, identifier) # Check the failure with invalid volume reference identifier = {'name': 'fake_identifiter'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_get_size, volume, identifier) # Verify the volume size identifier = {'name': 'OS_volume-%s' % volume['id']} vol_size = self.driver.manage_existing_get_size(volume, identifier) self.assertEqual(vol_size, volume.size) self.driver.delete_volume(volume) @mock.patch.object(requests.Session, 'request', FakeRequests) def test_manage_existing_snapshot_get_size(self): # Create a cinder volume and a snapshot vol_args = {'display_name': 'fake_name', 'id': fake.VOLUME_ID, 'size': 1} volume = fake_volume.fake_volume_obj(None, **vol_args) self.driver.create_volume(volume) snap_args = {'display_name': 'fake_snap', 'id': fake.SNAPSHOT_ID, 'volume': volume} snapshot = fake_snapshot.fake_snapshot_obj(None, **snap_args) self.driver.create_snapshot(snapshot) # Check with the wrong volume of the snapshot wrong_vol_args = {'display_name': 'wrong_volume_01', 'size': 1, 'id': fake.VOLUME2_ID} wrong_volume = fake_volume.fake_volume_obj(None, **wrong_vol_args) wrong_snap_args = {'display_name': 'wrong_snap', 'volume': wrong_volume} wrong_snapshot = fake_snapshot.fake_snapshot_obj(None, **wrong_snap_args) identifier = {} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, wrong_snapshot, identifier) identifier = {'name': 'fake_identifiter'} self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, wrong_snapshot, identifier) # Check with the invalid reference of the snapshot self.assertRaises(exception.ManageExistingInvalidReference, self.driver.manage_existing_snapshot_get_size, snapshot, identifier) # Verify the snapshot size same as the volume identifier = {'name': 'snapshot-%s' % snapshot['id']} snap_size = (self.driver.manage_existing_snapshot_get_size( snapshot, identifier)) self.assertEqual(snap_size, volume['size']) self.driver.delete_volume(volume) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3071203 cinder-27.0.0/cinder/tests/unit/volume/drivers/toyou/0000775000175000017500000000000000000000000022620 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/toyou/__init__.py0000664000175000017500000000000000000000000024717 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/toyou/test_acs5000.py0000664000175000017500000024024500000000000025313 0ustar00zuulzuul00000000000000# Copyright 2020 toyou Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Testing for acs5000 san storage driver """ import copy import json import random import time from unittest import mock from eventlet import greenthread from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import excutils from oslo_utils import units import paramiko from cinder import context import cinder.db from cinder import exception from cinder.objects import volume_attachment from cinder import ssh_utils from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as testutils from cinder import utils as cinder_utils from cinder.volume import configuration as conf from cinder.volume.drivers.toyou.acs5000 import acs5000_common from cinder.volume.drivers.toyou.acs5000 import acs5000_fc from cinder.volume.drivers.toyou.acs5000 import acs5000_iscsi POOLS_NAME = ['pool01', 'pool02'] VOLUME_PRE = acs5000_common.VOLUME_PREFIX # luns number only for test LUN_NUMS_AVAILABLE = range(0, 5) # snapshot count on a volume, only for test SNAPSHOTS_A_VOLUME = 3 # snapshot count on a system, only for test SNAPSHOTS_ON_SYSTEM = 10 # volume count on a pool, only for test VOLUME_LIMIT_ON_POOL = 10 # volume count on a pool, only for test VOLUME_LIMIT_ON_SYSTEM = 16 # volume count on a system, only for test CONF = cfg.CONF class CommandSimulator(object): def __init__(self, pool_name): self._all_pools_name = {'acs5000_volpool_name': pool_name} self._pools_list = { 'pool01': { 'name': 'pool01', 'capacity': '799090409472', 'free_capacity': '795869184000', 'used_capacity': '3221225472', 'total_volumes': 0}, 'pool02': { 'name': 'pool02', 'capacity': '193273528320', 'free_capacity': '190052302848', 'used_capacity': '3221225472', 'total_volumes': 0 }} self._volumes_list = {} self._lun_maps_list = [] self._snapshots_list = [] self._controllers_list = [ {'id': '0', 'name': 'node1', 'iscsi_name': 'iqn.2020-12.cn.com.toyou:' 'disk-array-000f12345:dev0.ctr1', 'WWNN': '200008CA45D33768', 'status': 'online'}, {'id': '1', 'name': 'node2', 'iscsi_name': 'iqn.2020-04.cn.com.toyou:' 'disk-array-000f12345:dev0.ctr2', 'WWNN': '200008CA45D33768', 'status': 'online'}] self._iscsi_list = [ {'ctrl_idx': 0, 'id': 0, 'name': 'lan1', 'ip': '10.23.45.67', 'mask': '255.255.255.0', 'gw': '', 'link': '1 Gb/s'}, {'ctrl_idx': 0, 'id': 1, 'name': 'lan2', 'ip': '10.23.45.68', 'mask': '255.255.255.0', 'gw': '', 'link': '1 Gb/s'}, {'ctrl_idx': 0, 'id': 2, 'name': 'lan3', 'ip': '10.23.45.69', 'mask': '255.255.255.0', 'gw': '', 'link': '1 Gb/s'}, {'ctrl_idx': 1, 'id': 0, 'name': 'lan1', 'ip': '10.23.45.70', 'mask': '255.255.255.0', 'gw': '', 'link': '1 Gb/s'}, {'ctrl_idx': 1, 'id': 1, 'name': 'lan2', 'ip': '10.23.45.71', 'mask': '255.255.255.0', 'gw': '', 'link': 'Down'}, {'ctrl_idx': 1, 'id': 2, 'name': 'lan3', 'ip': '10.23.45.72', 'mask': '255.255.255.0', 'gw': '', 'link': 'Down'}] self._fc_list = [ {'ctrl_idx': 0, 'WWPN': str(random.randint(0, 9999999999999999)).zfill(16), 'link': 'Up'}, {'ctrl_idx': 0, 'wwpn': str(random.randint(0, 9999999999999999)).zfill(16), 'link': 'Up'}, {'ctrl_idx': 0, 'link': 'Up'}, {'ctrl_idx': 1, 'WWPN': str(random.randint(0, 9999999999999999)).zfill(16), 'link': 'Up'}, {'ctrl_idx': 1, 'link': 'Up'}, {'ctrl_idx': 1, 'wwpn': str(random.randint(0, 9999999999999999)).zfill(16), 'link': 'Down'}] self._system_info = {'version': '3.1.2.345678', 'vendor': 'TOYOU', 'system_name': 'Disk-Array', 'system_id': 'TY123456789ABCDEF', 'code_level': '1', 'ip': '10.0.0.1'} self._error = { 'success': (0, 'Success'), 'unknown': (1, 'unknown error'), 'pool_not_exist': (101, 'The pool does not exist ' 'on the system.'), 'pool_exceeds_size': (102, 'The pool cannot provide ' 'more storage space'), 'volume_not_exist': (303, 'The volume does not exist ' 'on the system.'), 'source_volume_not_exist': (304, 'A clone relation needs ' 'a source volume.'), 'target_volume_not_exist': (305, 'A clone relation needs ' 'a target volume.'), 'source_size_larger_target': (306, 'The source volume ' 'must not be larger ' 'than the target volume' ' in a clone relation '), 'volume_limit_pool': (307, 'A pool only supports 96 volumes'), 'volume_limit_system': (308, 'A system only supports 96 volumes'), 'volume_name_exist': (310, 'A volume with same name ' 'already exists on the system.'), 'volume_extend_min': (321, 'A volume capacity shall not be' ' less than the current size'), 'volume_extend_size_equal': (322, 'A volume capacity shall not ' 'be equal to than the ' 'current size'), 'lun_not_exist': (401, 'The volume does not exist ' 'on the system.'), 'not_available_lun': (402, 'The system have no available lun.'), 'host_empty': (403, 'The host is empty.'), 'snap_over_system': (503, 'The system snapshots maximum quantity ' 'has been reached.'), 'snap_over_volume': (504, 'A volume snapshots maximum quantity ' 'has been reached.'), 'snap_not_exist': (505, 'The snapshot does not exist ' 'on the system.'), 'snap_not_latest': (506, 'The snapshot is not the latest one.'), 'snapshot_not_belong_volume': (507, 'The snapshot does not ' 'belong to the volume.'), 'snap_name_existed': (508, 'A snapshot with same name ' 'already exists on the system.') } self._command_function = { 'set_volume': 'set_volume_property', 'error_ssh': 'error_ssh' } self._volume_type = { '0': 'RAID Volume', '10': 'BACKUP' } @staticmethod def _json_return(rows=None, msg='', key=0): json_data = {'key': key, 'msg': msg, 'arr': rows} return (json.dumps(json_data), '') @staticmethod def _create_id(lists, key='id'): ids = [] if isinstance(lists, list): for v in lists: ids.append(int(v[key])) elif isinstance(lists, dict): for v in lists.values(): ids.append(int(v[key])) new_id = 'ffffffffff' while True: new_id = str(random.randint(1000000000, 9999999999)) if new_id not in ids: break return new_id def _clone_thread(self, vol_name, setting=None): intval = 0.1 loop_times = int(self._volumes_list[vol_name]['size_gb']) chunk = int(100 / loop_times) if setting: for k, value in setting.items(): for v in value: self._volumes_list[k][v[0]] = v[1] time.sleep(v[2]) self._volumes_list[vol_name]['status'] = 'Cloning' while loop_times > 0: # volumes may be deleted if vol_name in self._volumes_list: src_vol = self._volumes_list[vol_name] else: return if src_vol['clone'] not in self._volumes_list: self._volumes_list[vol_name]['status'] = 'Online' self._volumes_list[vol_name]['r'] = '' return progress = src_vol['r'] if not progress: progress = 0 src_vol['r'] = str(int(progress) + chunk) loop_times -= 1 self._volumes_list[vol_name] = src_vol time.sleep(intval) self._volumes_list[vol_name]['status'] = 'Online' self._volumes_list[vol_name]['r'] = '' def execute_command(self, cmd_list, check_exit_code=True): command = cmd_list[1] if command in self._command_function: command = self._command_function[command] func = getattr(self, '_sim_' + command) kwargs = {} for i in range(2, len(cmd_list)): if cmd_list[i].startswith('--'): key = cmd_list[i][2:] value = '' if cmd_list[i + 1]: value = cmd_list[i + 1] i += 1 if key in kwargs.keys(): if not isinstance(kwargs[key], list): kwargs[key] = [kwargs[key]] kwargs[key].append(value) else: kwargs[key] = value try: out, err = func(**kwargs) return (out, err) except Exception as e: with excutils.save_and_reraise_exception(): if check_exit_code: raise processutils.ProcessExecutionError( exit_code=1, stdout='out', stderr=e, cmd=' '.join(cmd_list)) def _sim_get_system(self, **kwargs): return self._json_return(self._system_info) def _sim_ls_iscsi(self, **kwargs): return self._json_return(self._iscsi_list) def _sim_ls_fc(self, **kwargs): return self._json_return(self._fc_list) def _sim_get_pool(self, **kwargs): pool_name = kwargs['pool'].strip('\'\"') if pool_name in self._all_pools_name['acs5000_volpool_name']: vol_len = 0 for vol in self._volumes_list.values(): if vol['poolname'] == pool_name: vol_len += 1 if pool_name in self._pools_list: pool_data = self._pools_list[pool_name] else: pool_data = self._pools_list['pool01'] pool_data['name'] = pool_name pool_data['total_volumes'] = str(vol_len) return self._json_return(pool_data) else: return self._json_return({}) def _sim_get_volume(self, **kwargs): rows = [] if 'volume' not in kwargs: volume_name = [] elif isinstance(kwargs['volume'], list): volume_name = kwargs['volume'] elif isinstance(kwargs['volume'], str): volume_name = [kwargs['volume']] for vol_name in volume_name: if vol_name in self._volumes_list.keys(): rows.append(self._volumes_list[vol_name]) return self._json_return(rows) def _sim_ls_controller(self, **kwargs): return self._json_return(self._controllers_list) def _sim_create_volume(self, **kwargs): volume_name = kwargs['volume'] pool_name = kwargs['pool'] size = kwargs['size'] type = kwargs['type'] if pool_name not in self._pools_list: return self._json_return( msg=self._error['pool_not_exist'][1], key=self._error['pool_not_exist'][0]) if volume_name in self._volumes_list: return self._json_return( msg=self._error['volume_name_exist'][1], key=self._error['volume_name_exist'][0]) elif len(self._volumes_list) >= VOLUME_LIMIT_ON_SYSTEM: return self._json_return( msg=self._error['volume_limit_system'][1], key=self._error['volume_limit_system'][0]) volume_count_on_pool = 0 for v in self._volumes_list.values(): if v['poolname'] == pool_name: volume_count_on_pool += 1 if volume_count_on_pool >= VOLUME_LIMIT_ON_POOL: return self._json_return( msg=self._error['volume_limit_pool'][1], key=self._error['volume_limit_pool'][0]) avail_size = (int(self._pools_list[pool_name]['free_capacity']) / units.Gi) if float(size) > avail_size: return self._json_return( msg=self._error['pool_exceeds_size'][1], key=self._error['pool_exceeds_size'][0]) volume_info = {} volume_info['id'] = self._create_id(self._volumes_list) volume_info['name'] = volume_name volume_info['size_gb'] = size volume_info['size_mb'] = str(int(float(size) * 1024)) volume_info['status'] = 'Online' volume_info['health'] = 'Optimal' volume_info['r'] = '' volume_info['poolname'] = pool_name volume_info['has_clone'] = 0 volume_info['clone'] = 'N/A' volume_info['clone_snap'] = 'N/A' if type not in ('0', '10'): type = '0' volume_info['type'] = self._volume_type[type] self._volumes_list[volume_info['name']] = volume_info return self._json_return() def _sim_delete_volume(self, **kwargs): vol_name = kwargs['volume'] if vol_name in self._volumes_list: del self._volumes_list[vol_name] return self._json_return() def _sim_extend_volume(self, **kwargs): vol_name = kwargs['volume'] size = int(kwargs['size']) if vol_name not in self._volumes_list: return self._json_return( msg=self._error['volume_not_exist'][1], key=self._error['volume_not_exist'][0]) volume = self._volumes_list[vol_name] curr_size = int(volume['size_mb']) / 1024 pool = self._pools_list[volume['poolname']] avail_size = int(pool['free_capacity']) / units.Gi if curr_size > size: return self._json_return( msg=self._error['volume_extend_min'][1], key=self._error['volume_extend_min'][0]) elif curr_size == size: return self._json_return( msg=self._error['volume_extend_size_equal'][1], key=self._error['volume_extend_size_equal'][0]) elif (size - curr_size) > avail_size: return self._json_return( msg=self._error['pool_exceeds_size'][1], key=self._error['pool_exceeds_size'][0]) self._volumes_list[vol_name]['size_gb'] = str(size) return self._json_return() def _sim_create_clone(self, **kwargs): src_name = kwargs['volume'] tgt_name = kwargs['clone'] src_exist = False tgt_exist = False for vol in self._volumes_list.values(): if (vol['name'] == src_name and vol['type'] == self._volume_type['0']): src_exist = True elif (vol['name'] == tgt_name and vol['type'] == self._volume_type['10']): tgt_exist = True if src_exist and tgt_exist: break if not src_exist: return self._json_return( msg=self._error['source_volume_not_exist'][1], key=self._error['source_volume_not_exist'][0]) elif not tgt_exist: return self._json_return( msg=self._error['target_volume_not_exist'][1], key=self._error['target_volume_not_exist'][0]) src_size = int(self._volumes_list[src_name]['size_gb']) tgt_size = int(self._volumes_list[tgt_name]['size_gb']) if src_size > tgt_size: return self._json_return( msg=self._error['source_size_larger_target'][1], key=self._error['source_size_larger_target'][0]) tgt_volume = self._volumes_list[tgt_name] self._volumes_list[src_name]['has_clone'] = 1 self._volumes_list[src_name]['clone'] = tgt_volume['name'] return self._json_return() def _sim_start_clone(self, **kwargs): vol_name = kwargs['volume'] snapshot = kwargs['snapshot'] if len(snapshot) > 0: snap_found = False for snap in self._snapshots_list: if snap['name'] == snapshot: snap_found = True break if not snap_found: return self._json_return( msg=self._error['snap_not_exist'][1], key=self._error['snap_not_exist'][0]) else: snapshot = ('clone-' + str(random.randint(100, 999))) tmp_snap = {'volume': vol_name, 'snapshot': snapshot} self._sim_create_snapshot(**tmp_snap) self._volumes_list[vol_name]['status'] = 'Queued' self._volumes_list[vol_name]['clone_snap'] = snapshot greenthread.spawn_n(self._clone_thread, vol_name) return self._json_return() def _sim_delete_clone(self, **kwargs): vol_name = kwargs['volume'] snapshot = kwargs['snapshot'] if vol_name not in self._volumes_list: return self._json_return( msg=self._error['volume_not_exist'][1], key=self._error['volume_not_exist'][0]) self._volumes_list[vol_name]['has_clone'] = 0 clone_volume = self._volumes_list[vol_name]['clone'] self._volumes_list[vol_name]['clone'] = 'N/A' clone_snap = self._volumes_list[vol_name]['clone_snap'] self._volumes_list[vol_name]['clone_snap'] = 'N/A' self._volumes_list[clone_volume]['type'] = self._volume_type['0'] if len(snapshot) == 0: for snap in self._snapshots_list: if clone_snap == snap['name']: self._snapshots_list.remove(snap) break return self._json_return() def _sim_create_lun_map(self, **kwargs): volume_name = kwargs.get('volume', None) protocol = kwargs.get('protocol', None) hosts = kwargs.get('host', None) if volume_name is None or protocol is None or hosts is None: return self._json_return( msg=self._error['unknown'][1], key=self._error['unknown'][0]) if volume_name not in self._volumes_list: return self._json_return( msg=self._error['volume_not_exist'][1], key=self._error['volume_not_exist'][0]) if isinstance(hosts, str): if hosts == '': return self._json_return( msg=self._error['host_empty'][1], key=self._error['host_empty'][0]) hosts = [hosts] volume = self._volumes_list[volume_name] available_luns = LUN_NUMS_AVAILABLE existed_lun = -1 for lun_row in self._lun_maps_list: if lun_row['vd_id'] == volume['id']: if lun_row['host'] in hosts: existed_lun = lun_row['lun'] hosts = [h for h in hosts if h != lun_row['host']] else: if lun_row['protocol'] == protocol: available_luns = [lun for lun in available_luns if lun != lun_row['lun']] if hosts and existed_lun > -1: return self._json_return({'lun': existed_lun}) lun_info = {} lun_info['vd_id'] = volume['id'] lun_info['vd_name'] = volume['name'] lun_info['protocol'] = protocol if existed_lun > -1: lun_info['lun'] = existed_lun elif available_luns: lun_info['lun'] = available_luns[0] else: return self._json_return( msg=self._error['not_available_lun'][1], key=self._error['not_available_lun'][0]) for host in hosts: lun_info['id'] = self._create_id(self._lun_maps_list) lun_info['host'] = host self._lun_maps_list.append(copy.deepcopy(lun_info)) ret = {} if protocol == 'FC': ret = {'lun': lun_info['lun']} elif protocol == 'iSCSI': ret = {'lun': [], 'iscsi_name': [], 'portal': []} for iscsi in self._iscsi_list: if iscsi['link'] == 'Down': continue ret['lun'].append(lun_info['lun']) ret['portal'].append('%s:3260' % iscsi['ip']) for control in self._controllers_list: if iscsi['ctrl_idx'] == int(control['id']): ret['iscsi_name'].append(control['iscsi_name']) break return self._json_return(ret) def _sim_delete_lun_map(self, **kwargs): map_exist = False volume_name = kwargs['volume'] protocol = kwargs['protocol'] hosts = kwargs['host'] all_host = False if hosts == '-1': all_host = True elif isinstance(hosts, str): hosts = [hosts] if volume_name not in self._volumes_list: return self._json_return( msg=self._error['volume_not_exist'][1], key=self._error['volume_not_exist'][0]) volume = self._volumes_list[volume_name] lun_maps_list = self._lun_maps_list self._lun_maps_list = [] for row in lun_maps_list: if (row['vd_id'] == volume['id'] and row['protocol'] == protocol and (all_host or row['host'] in hosts)): map_exist = True else: map_exist = False self._lun_maps_list.append(row) if not map_exist: return self._json_return( msg=self._error['lun_not_exist'][1], key=self._error['lun_not_exist'][0]) else: return self._json_return() def _sim_create_snapshot(self, **kwargs): volume_name = kwargs['volume'] snapshot_name = kwargs['snapshot'] if volume_name not in self._volumes_list: return self._json_return( msg=self._error['volume_not_exist'][1], key=self._error['volume_not_exist'][0]) if len(self._snapshots_list) >= SNAPSHOTS_ON_SYSTEM: return self._json_return( msg=self._error['snap_over_system'][1], key=self._error['snap_over_system'][0]) tag = -1 volume_snap_count = 0 for snap in self._snapshots_list: if snap['vd_name'] == volume_name: volume_snap_count += 1 if int(snap['tag']) > tag: tag = int(snap['tag']) if snap['name'] == snapshot_name: return self._json_return( msg=self._error['snap_name_existed'][1], key=self._error['snap_name_existed'][0]) if volume_snap_count >= SNAPSHOTS_A_VOLUME: return self._json_return( msg=self._error['snap_over_volume'][1], key=self._error['snap_over_volume'][0]) volume = self._volumes_list[volume_name] snapshot = {} snapshot['id'] = self._create_id(self._snapshots_list) snapshot['name'] = snapshot_name snapshot['vd_id'] = volume['id'] snapshot['vd_name'] = volume['name'] snapshot['tag'] = tag + 1 snapshot['create_time'] = '' self._snapshots_list.append(snapshot) return self._json_return() def _sim_delete_snapshot(self, **kwargs): volume_name = kwargs['volume'] snapshot_name = kwargs['snapshot'] if volume_name not in self._volumes_list: return self._json_return( msg=self._error['volume_not_exist'][1], key=self._error['volume_not_exist'][0]) snap_exist = False for snap in self._snapshots_list: if (snap['vd_name'] == volume_name and snap['name'] == snapshot_name): snap_exist = True self._snapshots_list.remove(snap) break if not snap_exist: return self._json_return( msg=self._error['snap_not_exist'][1], key=self._error['snap_not_exist'][0]) return self._json_return() def _sim_rollback_snapshot(self, **kwargs): volume_name = kwargs['volume'] snapshot_name = kwargs['snapshot'] if volume_name and volume_name not in self._volumes_list: return self._json_return( msg=self._error['volume_not_exist'][1], key=self._error['volume_not_exist'][0]) snapshot = [] for snap in self._snapshots_list: if snap['name'] == snapshot_name: snapshot = snap break if not snapshot: return self._json_return( msg=self._error['snap_not_exist'][1], key=self._error['snap_not_exist'][0]) if volume_name and volume_name != snapshot['vd_name']: return self._json_return( msg=self._error['snapshot_not_belong_volume'][1], key=self._error['snapshot_not_belong_volume'][0]) elif not volume_name: volume_name = snapshot['vd_name'] for snap in self._snapshots_list: if (snap['vd_name'] == volume_name and snapshot_name != snap['name'] and snap['tag'] > snapshot['tag']): return self._json_return( msg=self._error['snap_not_latest'][1], key=self._error['snap_not_latest'][0]) return self._json_return() def _sim_set_volume_property(self, **kwargs): volume_name = kwargs['volume'] kwargs.pop('volume') if len(kwargs) == 0: raise exception.InvalidInput( reason=self._error['unknown'][1]) new_name = volume_name if 'new_name' in kwargs: new_name = kwargs['new_name'] kwargs.pop('new_name') if volume_name not in self._volumes_list: return self._json_return( msg=self._error['volume_not_exist'][1], key=self._error['volume_not_exist'][0]) volume = self._volumes_list[volume_name] volume['name'] = new_name for k, v in kwargs.items(): if k in volume: volume[k] = v else: return ('', self._error['unknown'][1]) if volume_name != new_name: del self._volumes_list[volume_name] self._volumes_list[new_name] = volume else: self._volumes_list[volume_name] = volume return self._json_return() def _sim_error_ssh(self, **kwargs): error = kwargs['error'] if error == 'json_error': return ('This text is used for json errors.', '') elif error == 'dict_error': return (json.dumps('This text is used for dict errors.'), '') elif error == 'keys_error': keys = {'msg': 'This text is used for keys errors'} return (json.dumps(keys), '') elif error == 'key_false': keys = {'msg': 'This text is used for key non-0 error', 'key': 1, 'arr': {}} return (json.dumps(keys), '') class Acs5000ISCSIFakeDriver(acs5000_iscsi.Acs5000ISCSIDriver): def __init__(self, *args, **kwargs): super(Acs5000ISCSIFakeDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _run_ssh(self, cmd_list, check_exit_code=True): cinder_utils.check_ssh_injection(cmd_list) ret = self.fake_storage.execute_command(cmd_list, check_exit_code) return ret class Acs5000FCFakeDriver(acs5000_fc.Acs5000FCDriver): def __init__(self, *args, **kwargs): super(Acs5000FCFakeDriver, self).__init__(*args, **kwargs) def set_fake_storage(self, fake): self.fake_storage = fake def _run_ssh(self, cmd_list, check_exit_code=True): cinder_utils.check_ssh_injection(cmd_list) ret = self.fake_storage.execute_command(cmd_list, check_exit_code) return ret class Acs5000ISCSIDriverTestCase(test.TestCase): @mock.patch.object(time, 'sleep') def setUp(self, mock_sleep): super(Acs5000ISCSIDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.san_is_local = False self.configuration.san_ip = '23.44.56.78' self.configuration.san_login = 'cliuser' self.configuration.san_password = 'clipassword' self.configuration.acs5000_volpool_name = ['pool01'] self.configuration.acs5000_multiattach = True self.iscsi_driver = Acs5000ISCSIFakeDriver( configuration=self.configuration) initiator = 'test.iqn.%s' % str(random.randint(10000, 99999)) self._connector = {'ip': '1.234.56.78', 'host': 'stack', 'wwpns': [], 'initiator': initiator} self.sim = CommandSimulator(POOLS_NAME) self.iscsi_driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self.db = cinder.db self.iscsi_driver.db = self.db self.iscsi_driver.get_driver_options() self.iscsi_driver.do_setup(None) self.iscsi_driver.check_for_setup_error() def _create_volume(self, **kwargs): prop = {'host': 'stack@ty1#%s' % POOLS_NAME[0], 'size': 1, 'volume_type_id': self.vt['id']} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.iscsi_driver.create_volume(vol) return vol def _delete_volume(self, volume): self.iscsi_driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _assert_lun_exists(self, vol_id, exists): lun_maps = self.sim._lun_maps_list is_lun_defined = False luns = [] volume_name = VOLUME_PRE + vol_id[-12:] for lun in lun_maps: if volume_name == lun['vd_name']: luns.append(lun) if len(luns): is_lun_defined = True self.assertEqual(exists, is_lun_defined) return luns def test_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'iqn.123'} conn_fc = {'host': 'host', 'wwpns': 'fff123'} conn_both = {'host': 'host', 'initiator': 'iqn.123', 'wwpns': 'fff123'} self.iscsi_driver._state['enabled_protocols'] = set(['iSCSI']) self.iscsi_driver.validate_connector(conn_iscsi) self.iscsi_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_fc) self.assertRaises(exception.InvalidConnectorException, self.iscsi_driver.validate_connector, conn_neither) def test_initialize_connection(self): volume = self._create_volume() result = self.iscsi_driver.initialize_connection(volume, self._connector) ip_connect = self.sim._iscsi_list ip_count = 0 for iscsi in ip_connect: if iscsi['link'] != 'Down': ip_count += 1 self.assertEqual('iscsi', result['driver_volume_type']) self.assertEqual(ip_count, len(result['data']['target_iqns'])) self.assertEqual(ip_count, len(result['data']['target_portals'])) self.assertEqual(volume['id'], result['data']['volume_id']) self.assertEqual(ip_count, len(result['data']['target_portals'])) self._delete_volume(volume) def test_initialize_connection_not_found(self): prop = {'host': 'stack@ty1#%s' % POOLS_NAME[0], 'size': 1, 'volume_type_id': self.vt['id']} vol = testutils.create_volume(self.ctxt, **prop) self.assertRaises(exception.VolumeNotFound, self.iscsi_driver.initialize_connection, vol, self._connector) self.db.volume_destroy(self.ctxt, vol['id']) def test_initialize_connection_available_lun(self): volume_list = [] for i in LUN_NUMS_AVAILABLE: vol = self._create_volume() self.iscsi_driver.initialize_connection( vol, self._connector) volume_list.append(vol) vol = self._create_volume() self.assertRaises(exception.ISCSITargetAttachFailed, self.iscsi_driver.initialize_connection, vol, self._connector) self._delete_volume(vol) for v in volume_list: self.iscsi_driver.terminate_connection( v, self._connector) self._delete_volume(v) def test_initialize_connection_exception(self): vol = self._create_volume() connector = self._connector connector['initiator'] = '' self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.initialize_connection, vol, connector) self._delete_volume(vol) def test_initialize_connection_multi_host(self): connector = self._connector initiator1 = ('test.iqn.%s' % str(random.randint(10000, 99999))) initiator2 = ('test.iqn.%s' % str(random.randint(10000, 99999))) connector['initiator'] = [initiator1, initiator2] volume = self._create_volume() self.iscsi_driver.initialize_connection( volume, connector) lun_maps = self._assert_lun_exists(volume['id'], True) hosts = [] for lun in lun_maps: hosts.append(lun['host']) self.assertIn(initiator1, hosts) self.assertIn(initiator2, hosts) self.iscsi_driver.terminate_connection( volume, connector) self._assert_lun_exists(volume['id'], False) self._delete_volume(volume) def test_initialize_connection_protocol(self): volume = self._create_volume() protocol = self.iscsi_driver.protocol self.iscsi_driver.protocol = 'error_protocol' self.assertRaises(exception.VolumeBackendAPIException, self.iscsi_driver.initialize_connection, volume, self._connector) self.iscsi_driver.protocol = protocol self._delete_volume(volume) def test_terminate_connection(self): volume = self._create_volume() self.iscsi_driver.initialize_connection(volume, self._connector) self.iscsi_driver.terminate_connection(volume, self._connector) self._assert_lun_exists(volume['id'], False) self._delete_volume(volume) def test_terminate_connection_multi_attached(self): vol = self._create_volume() connector = self._connector connector['uuid'] = fake.UUID1 self.iscsi_driver.initialize_connection(vol, connector) self._assert_lun_exists(vol.id, True) attachment1 = volume_attachment.VolumeAttachment() attachment2 = volume_attachment.VolumeAttachment() attachment1.connector = connector attachment2.connector = connector vol.volume_attachment.objects.append(attachment1) vol.volume_attachment.objects.append(attachment2) self.iscsi_driver.terminate_connection(vol, connector) self._assert_lun_exists(vol.id, True) vol.volume_attachment.objects = [attachment1] self.iscsi_driver.terminate_connection(vol, connector) self._assert_lun_exists(vol.id, False) self.iscsi_driver.initialize_connection(vol, connector) self._assert_lun_exists(vol.id, True) self.iscsi_driver.terminate_connection(vol, None) self._assert_lun_exists(vol.id, False) self._delete_volume(vol) class Acs5000FCDriverTestCase(test.TestCase): @mock.patch.object(time, 'sleep') def setUp(self, mock_sleep): super(Acs5000FCDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.san_is_local = False self.configuration.san_ip = '23.44.56.78' self.configuration.san_login = 'cliuser' self.configuration.san_password = 'clipassword' self.configuration.acs5000_volpool_name = ['pool01'] self.configuration.acs5000_multiattach = True self.fc_driver = Acs5000FCFakeDriver( configuration=self.configuration) wwpns = [ str(random.randint(0, 9999999999999999)).zfill(16), str(random.randint(0, 9999999999999999)).zfill(16)] initiator = 'test.iqn.%s' % str(random.randint(10000, 99999)) self._connector = {'ip': '1.234.56.78', 'host': 'stack', 'wwpns': wwpns, 'initiator': initiator} self.sim = CommandSimulator(POOLS_NAME) self.fc_driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self.db = cinder.db self.fc_driver.db = self.db self.fc_driver.get_driver_options() self.fc_driver.do_setup(None) self.fc_driver.check_for_setup_error() def _create_volume(self, **kwargs): prop = {'host': 'stack@ty1#%s' % POOLS_NAME[0], 'size': 1, 'volume_type_id': self.vt['id']} for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) self.fc_driver.create_volume(vol) return vol def _delete_volume(self, volume): self.fc_driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _assert_lun_exists(self, vol_id, exists): lun_maps = self.sim._lun_maps_list is_lun_defined = False luns = [] volume_name = VOLUME_PRE + vol_id[-12:] for lun in lun_maps: if volume_name == lun['vd_name']: luns.append(lun) if len(luns): is_lun_defined = True self.assertEqual(exists, is_lun_defined) return luns def test_validate_connector(self): conn_neither = {'host': 'host'} conn_iscsi = {'host': 'host', 'initiator': 'iqn.123'} conn_fc = {'host': 'host', 'wwpns': 'fff123'} conn_both = {'host': 'host', 'initiator': 'iqn.123', 'wwpns': 'fff123'} self.fc_driver.validate_connector(conn_fc) self.fc_driver.validate_connector(conn_both) self.assertRaises(exception.InvalidConnectorException, self.fc_driver.validate_connector, conn_iscsi) self.assertRaises(exception.InvalidConnectorException, self.fc_driver.validate_connector, conn_neither) def test_initialize_connection(self): volume = self._create_volume() result = self.fc_driver.initialize_connection(volume, self._connector) fc_list = self.sim._fc_list up_wwpns = [] for port in fc_list: if port['link'] == 'Up': if 'WWPN' in port: up_wwpns.append(port['WWPN']) elif 'wwpn' in port: up_wwpns.append(port['wwpn']) self.assertEqual('fibre_channel', result['driver_volume_type']) self.assertEqual(up_wwpns.sort(), result['data']['target_wwn'].sort()) self.assertEqual(volume['id'], result['data']['volume_id']) self.assertIsNotNone(result['data']['target_lun']) self._delete_volume(volume) def test_initialize_connection_not_found(self): prop = {'host': 'stack@ty1#%s' % POOLS_NAME[0], 'size': 1, 'volume_type_id': self.vt['id']} vol = testutils.create_volume(self.ctxt, **prop) self.assertRaises(exception.VolumeNotFound, self.fc_driver.initialize_connection, vol, self._connector) self.db.volume_destroy(self.ctxt, vol['id']) def test_initialize_connection_failure(self): volume_list = [] for i in LUN_NUMS_AVAILABLE: vol = self._create_volume() self.fc_driver.initialize_connection( vol, self._connector) volume_list.append(vol) vol = self._create_volume() self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, vol, self._connector) self._delete_volume(vol) for v in volume_list: self.fc_driver.terminate_connection( v, self._connector) self._delete_volume(v) def test_initialize_connection_exception(self): vol = self._create_volume() connector = self._connector self.fc_driver.protocol = 'error_protocol' self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, vol, connector) self.fc_driver.protocol = acs5000_fc.Acs5000FCDriver.PROTOCOL fc_list = self.sim._fc_list self.sim._fc_list = [] self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, vol, connector) # _check_multi_attached then delete_lun_map connector['uuid'] = fake.UUID1 attachment1 = volume_attachment.VolumeAttachment() attachment2 = volume_attachment.VolumeAttachment() attachment1.connector = connector attachment2.connector = connector vol.volume_attachment.objects.append(attachment1) vol.volume_attachment.objects.append(attachment2) self.assertRaises(exception.VolumeBackendAPIException, self.fc_driver.initialize_connection, vol, connector) self.sim._fc_list = fc_list self._delete_volume(vol) def test_terminate_connection(self): volume = self._create_volume() self.fc_driver.initialize_connection(volume, self._connector) self.fc_driver.terminate_connection(volume, self._connector) self._assert_lun_exists(volume['id'], False) self._delete_volume(volume) def test_terminate_connection_warn(self): volume = self._create_volume() connector = self._connector connector['uuid'] = fake.UUID1 self.fc_driver.initialize_connection(volume, connector) self.fc_driver.terminate_connection(volume, None) self._assert_lun_exists(volume.id, False) self.fc_driver.initialize_connection(volume, connector) attachment1 = volume_attachment.VolumeAttachment() attachment2 = volume_attachment.VolumeAttachment() attachment1.connector = connector attachment2.connector = connector volume.volume_attachment.objects.append(attachment1) volume.volume_attachment.objects.append(attachment2) self.fc_driver.terminate_connection(volume, connector) self._assert_lun_exists(volume.id, True) fc_list = self.sim._fc_list self.sim._fc_list = [] volume.volume_attachment.objects = [attachment1] self.fc_driver.terminate_connection(volume, connector) self.sim._fc_list = fc_list self._assert_lun_exists(volume.id, False) self._delete_volume(volume) class Acs5000CommonDriverTestCase(test.TestCase): @mock.patch.object(time, 'sleep') def setUp(self, mock_sleep): super(Acs5000CommonDriverTestCase, self).setUp() self.configuration = mock.Mock(conf.Configuration) self.configuration.san_is_local = False self.configuration.san_ip = '23.44.56.78' self.configuration.san_ssh_port = '22' self.configuration.san_login = 'cliuser' self.configuration.san_password = 'clipassword' self.configuration.acs5000_volpool_name = POOLS_NAME self.configuration.acs5000_copy_interval = 0.01 self.configuration.acs5000_multiattach = True self.configuration.reserved_percentage = 0 options = acs5000_iscsi.Acs5000ISCSIDriver.get_driver_options() config = conf.Configuration(options, conf.SHARED_CONF_GROUP) self._driver = Acs5000ISCSIFakeDriver( configuration=self.configuration) self.override_config('san_ip', '23.44.56.78', conf.SHARED_CONF_GROUP) self.override_config('san_ssh_port', '22', conf.SHARED_CONF_GROUP) self.override_config('san_login', 'cliuser', conf.SHARED_CONF_GROUP) self.override_config('san_password', 'clipassword', conf.SHARED_CONF_GROUP) self.override_config('acs5000_volpool_name', POOLS_NAME, conf.SHARED_CONF_GROUP) self._driver.configuration.safe_get = self._safe_get self._iscsi_driver = acs5000_iscsi.Acs5000ISCSIDriver( configuration=config) initiator = 'test.iqn.%s' % str(random.randint(10000, 99999)) self._connector = {'ip': '1.234.56.78', 'host': 'stack', 'wwpns': [], 'initiator': initiator} self.sim = CommandSimulator(POOLS_NAME) self._driver.set_fake_storage(self.sim) self.ctxt = context.get_admin_context() self.db = cinder.db self._driver.db = self.db self._driver.do_setup(None) self._driver.check_for_setup_error() def _safe_get(self, key): try: return getattr(self._driver.configuration, key) except AttributeError: return None def _assert_vol_exists(self, name, exists): volume = self._driver._cmd.get_volume(VOLUME_PRE + name[-12:]) is_vol_defined = False if volume: is_vol_defined = True self.assertEqual(exists, is_vol_defined) return volume def _assert_snap_exists(self, name, exists): snap_name = VOLUME_PRE + name[-12:] snapshot_list = self.sim._snapshots_list is_snap_defined = False snapshot = {} for snap in snapshot_list: if snap['name'] == snap_name: is_snap_defined = True snapshot = snap break self.assertEqual(exists, is_snap_defined) return snapshot def _create_volume(self, **kwargs): prop = {'host': 'stack@ty1#%s' % POOLS_NAME[0], 'size': 1, 'volume_type_id': self.vt['id']} driver = True if 'driver' in kwargs: if not kwargs['driver']: driver = False kwargs.pop('driver') for p in prop.keys(): if p not in kwargs: kwargs[p] = prop[p] vol = testutils.create_volume(self.ctxt, **kwargs) if driver: self._driver.create_volume(vol) return vol def _delete_volume(self, volume, driver=True): if driver: self._driver.delete_volume(volume) self.db.volume_destroy(self.ctxt, volume['id']) def _create_snapshot(self, vol_id, driver=True): snap = testutils.create_snapshot(self.ctxt, vol_id) if driver: self._driver.create_snapshot(snap) return snap def _delete_snapshot(self, snap, driver=True): if driver: self._driver.delete_snapshot(snap) self.db.snapshot_destroy(self.ctxt, snap['id']) def test_run_ssh_failure(self): self.assertRaises(exception.VolumeBackendAPIException, self._driver._build_pool_stats, 'error_pool') ssh_cmd = ['error_ssh', '--error', 'json_error'] self.assertRaises(exception.VolumeBackendAPIException, self._driver._cmd.run_ssh_info, ssh_cmd) ssh_cmd = ['error_ssh', '--error', 'dict_error'] self.assertRaises(exception.VolumeBackendAPIException, self._driver._cmd.run_ssh_info, ssh_cmd) ssh_cmd = ['error_ssh', '--error', 'keys_error'] self.assertRaises(exception.VolumeBackendAPIException, self._driver._cmd.run_ssh_info, ssh_cmd) ssh_cmd = ['error_ssh', '--error', 'key_false'] self.assertRaises(exception.VolumeBackendAPIException, self._driver._cmd.run_ssh_info, ssh_cmd) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_with_ip(self, mock_ssh_execute, mock_ssh_pool): ssh_cmd = ['run_ssh'] self._iscsi_driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_once_with( self._iscsi_driver.configuration.san_ip, self._iscsi_driver.configuration.san_ssh_port, self._iscsi_driver.configuration.ssh_conn_timeout, self._iscsi_driver.configuration.san_login, password=self._iscsi_driver.configuration.san_password, min_size=self._iscsi_driver.configuration.ssh_min_pool_conn, max_size=self._iscsi_driver.configuration.ssh_max_pool_conn) mock_ssh_pool.side_effect = [paramiko.SSHException, mock.MagicMock()] self._iscsi_driver._run_ssh(ssh_cmd) mock_ssh_pool.assert_called_once_with( self._iscsi_driver.configuration.san_ip, self._iscsi_driver.configuration.san_ssh_port, self._iscsi_driver.configuration.ssh_conn_timeout, self._iscsi_driver.configuration.san_login, password=self._iscsi_driver.configuration.san_password, min_size=self._iscsi_driver.configuration.ssh_min_pool_conn, max_size=self._iscsi_driver.configuration.ssh_max_pool_conn) @mock.patch.object(ssh_utils, 'SSHPool') @mock.patch.object(processutils, 'ssh_execute') def test_run_ssh_with_exception(self, mock_ssh_execute, mock_ssh_pool): mock_ssh_execute.side_effect = [processutils.ProcessExecutionError, mock.MagicMock()] self.override_config('acs5000_volpool_name', None, self._iscsi_driver.configuration.config_group) ssh_cmd = ['run_ssh'] self.assertRaises(processutils.ProcessExecutionError, self._iscsi_driver._run_ssh, ssh_cmd) def test_do_setup(self): system_info = self.sim._system_info self.assertEqual(system_info['vendor'], self._driver._state['vendor']) self.assertIn('iSCSI', self._driver._state['enabled_protocols']) self.assertEqual(2, len(self._driver._state['controller'])) iscsi_list = self.sim._iscsi_list self.sim._iscsi_list = [] self._driver._state['enabled_protocols'] = set() self._driver.do_setup(None) self.assertEqual(set(), self._driver._state['enabled_protocols']) self.sim._iscsi_list = iscsi_list self._driver.do_setup(None) self.assertEqual({'iSCSI'}, self._driver._state['enabled_protocols']) def test_do_setup_no_pools(self): self._driver.pools = ['pool_error'] self.assertRaises(exception.InvalidInput, self._driver.do_setup, None) def test_create_volume(self): vol = self._create_volume() self._assert_vol_exists(vol['id'], True) self._delete_volume(vol) def test_create_volume_same_name(self): vol = self._create_volume() self._assert_vol_exists(vol['id'], True) self.assertRaises(exception.VolumeBackendAPIException, self._driver.create_volume, vol) self._delete_volume(vol) def test_create_volume_size_exceeds_limit(self): prop = { 'host': 'stack@ty2#%s' % POOLS_NAME[1], 'size': 200, 'driver': False } self._driver.get_volume_stats() vol = self._create_volume(**prop) self._assert_vol_exists(vol['id'], False) self.assertRaises(exception.VolumeSizeExceedsLimit, self._driver.create_volume, vol) self._delete_volume(vol, False) def test_create_volume_number_exceeds_pool_limit(self): volume_list = [] for i in range(VOLUME_LIMIT_ON_POOL): vol = self._create_volume() self._assert_vol_exists(vol['id'], True) volume_list.append(vol) vol = self._create_volume(driver=False) self.assertRaises(exception.VolumeLimitExceeded, self._driver.create_volume, vol) self._delete_volume(vol, False) for v in volume_list: self._delete_volume(v) def test_create_volume_number_exceeds_system_limit(self): volume_list = [] volume_count_on_pool = int(VOLUME_LIMIT_ON_SYSTEM / len(POOLS_NAME)) for i in range(volume_count_on_pool): for x in range(len(POOLS_NAME)): vol = self._create_volume( host='stack@ty1#%s' % POOLS_NAME[x]) self._assert_vol_exists(vol['id'], True) volume_list.append(vol) vol = self._create_volume(driver=False) self.assertRaises(exception.VolumeLimitExceeded, self._driver.create_volume, vol) self._delete_volume(vol, False) for v in volume_list: self._delete_volume(v) def test_create_volume_pool_not_existed(self): prop = { 'host': 'stack@ty2#no_pool', 'driver': False } self._driver.get_volume_stats() vol = self._create_volume(**prop) self._assert_vol_exists(vol['id'], False) self.assertRaises(exception.VolumeBackendAPIException, self._driver.create_volume, vol) self._delete_volume(vol, False) def test_delete_volume(self): vol = self._create_volume() self._assert_vol_exists(vol['id'], True) self._delete_volume(vol) self._assert_vol_exists(vol['id'], False) def test_create_snapshot(self): vol = self._create_volume() self._assert_vol_exists(vol['id'], True) snap = self._create_snapshot(vol['id']) self._assert_snap_exists(snap['id'], True) self._delete_snapshot(snap) self._delete_volume(vol) def test_create_snapshot_exceed_limit(self): vol = self._create_volume() self._assert_vol_exists(vol['id'], True) snapshot_list = [] for i in range(SNAPSHOTS_A_VOLUME): snap = self._create_snapshot(vol['id']) self._assert_snap_exists(snap['id'], True) snapshot_list.append(snap) snap = self._create_snapshot(vol['id'], False) self.assertRaises(exception.SnapshotLimitExceeded, self._driver.create_snapshot, snap) self._delete_snapshot(snap, False) vol_list = [vol] snap_count = SNAPSHOTS_A_VOLUME while snap_count < SNAPSHOTS_ON_SYSTEM: vol = self._create_volume() vol_list.append(vol) for x in range(SNAPSHOTS_A_VOLUME): snap = self._create_snapshot(vol['id']) self._assert_snap_exists(snap['id'], True) snapshot_list.append(snap) snap_count += 1 if snap_count >= SNAPSHOTS_ON_SYSTEM: break vol = self._create_volume() vol_list.append(vol) snap = self._create_snapshot(vol['id'], False) self.assertRaises(exception.SnapshotLimitExceeded, self._driver.create_snapshot, snap) for sp in snapshot_list: self._delete_snapshot(sp) for vol in vol_list: self._delete_volume(vol) def test_create_snapshot_name_existed(self): vol = self._create_volume() snap = self._create_snapshot(vol['id']) self._assert_vol_exists(vol['id'], True) self._assert_snap_exists(snap['id'], True) self.assertRaises(exception.VolumeBackendAPIException, self._driver.create_snapshot, snap) self._delete_snapshot(snap) self._delete_volume(vol) def test_delete_snapshot(self): vol = self._create_volume() self._assert_vol_exists(vol['id'], True) snap = self._create_snapshot(vol['id']) self._assert_snap_exists(snap['id'], True) self._delete_snapshot(snap) self._assert_snap_exists(snap['id'], False) self._delete_volume(vol) def test_delete_snapshot_not_found(self): vol = self._create_volume() self._assert_vol_exists(vol['id'], True) snap = self._create_snapshot(vol['id'], False) self._assert_snap_exists(snap['id'], False) self.assertRaises(exception.SnapshotNotFound, self._driver.delete_snapshot, snap) self._delete_snapshot(snap, False) self._delete_volume(vol) def test_delete_snapshot_volume_not_found(self): vol = self._create_volume() snap = self._create_snapshot(vol['id']) self._delete_volume(vol) self._assert_vol_exists(vol['id'], False) self._assert_snap_exists(snap['id'], True) self.assertRaises(exception.VolumeBackendAPIException, self._driver.delete_snapshot, snap) self._driver.create_volume(vol) self._delete_snapshot(snap) self._driver.delete_volume(vol) def test_create_volume_from_snapshot(self): prop = {'size': 2} vol = self._create_volume(**prop) self._assert_vol_exists(vol['id'], True) snap = self._create_snapshot(vol['id']) self._assert_snap_exists(snap['id'], True) prop['driver'] = False new_vol = self._create_volume(**prop) self._driver.create_volume_from_snapshot(new_vol, snap) new_volume = self._assert_vol_exists(new_vol['id'], True) self.assertEqual(1, len(new_volume)) self.assertEqual('2', new_volume[0]['size_gb']) self.assertEqual('RAID Volume', new_volume[0]['type']) self._delete_volume(new_vol) self._delete_snapshot(snap) self._delete_volume(vol) def test_create_volume_from_snapshot_not_found(self): vol = self._create_volume() self._assert_vol_exists(vol['id'], True) snap = self._create_snapshot(vol['id'], False) self._assert_snap_exists(snap['id'], False) new_vol = self._create_volume(driver=False) self._assert_vol_exists(new_vol['id'], False) self.assertRaises(exception.SnapshotNotFound, self._driver.create_volume_from_snapshot, new_vol, snap) self._delete_volume(new_vol, False) self._delete_snapshot(snap, False) self._delete_volume(vol) def test_create_snapshot_volume_not_found(self): vol = self._create_volume(driver=False) self._assert_vol_exists(vol['id'], False) self.assertRaises(exception.VolumeNotFound, self._create_snapshot, vol['id']) self._delete_volume(vol, driver=False) def test_create_cloned_volume(self): src_volume = self._create_volume() self._assert_vol_exists(src_volume['id'], True) tgt_volume = self._create_volume(driver=False) self._driver.create_cloned_volume(tgt_volume, src_volume) volume = self._assert_vol_exists(tgt_volume['id'], True) self.assertEqual(1, len(volume)) self.assertEqual('RAID Volume', volume[0]['type']) self._delete_volume(src_volume) self._delete_volume(tgt_volume) def test_create_cloned_volume_with_size(self): prop = {'size': 2} src_volume = self._create_volume(**prop) volume = self._assert_vol_exists(src_volume['id'], True) prop['driver'] = False tgt_volume = self._create_volume(**prop) self._driver.create_cloned_volume(tgt_volume, src_volume) clone_volume = self._assert_vol_exists(tgt_volume['id'], True) self.assertEqual(1, len(volume)) self.assertEqual(1, len(clone_volume)) self.assertEqual('RAID Volume', volume[0]['type']) self.assertEqual('RAID Volume', clone_volume[0]['type']) self.assertEqual('2', volume[0]['size_gb']) self.assertEqual('2', clone_volume[0]['size_gb']) self._delete_volume(src_volume) self._delete_volume(tgt_volume) def test_create_cloned_volume_size_failure(self): prop = {'size': 10} src_volume = self._create_volume(**prop) self._assert_vol_exists(src_volume['id'], True) prop = {'size': 5, 'driver': False} tgt_volume = self._create_volume(**prop) self.assertRaises(exception.VolumeBackendAPIException, self._driver.create_cloned_volume, tgt_volume, src_volume) self._assert_vol_exists(tgt_volume['id'], False) self._delete_volume(src_volume) self._delete_volume(tgt_volume, False) def test_create_cloned_volume_failure(self): self.assertRaises(exception.VolumeBackendAPIException, self._driver._local_clone_copy, None, None) self.assertRaises(exception.VolumeBackendAPIException, self._driver._local_clone_copy, 'src_test', 'tgt_test') src_volume = self._create_volume() src_name = VOLUME_PRE + src_volume['id'][-12:] self.assertRaises(exception.VolumeBackendAPIException, self._driver._local_clone_copy, src_name, 'tgt_test') self._delete_volume(src_volume) def test_wait_volume_copy(self): src_volume = self._create_volume(size=2) src_info = self._assert_vol_exists(src_volume['id'], True)[0] tgt_volume = self._create_volume(size=2) tgt_info = self._assert_vol_exists(tgt_volume['id'], True)[0] self._driver._cmd.set_volume_property( src_info['name'], {'status': 'Queued', 'clone_snap': tgt_info['name']}) self._driver._cmd.set_volume_property(tgt_info['name'], {'type': 'BACKUP'}) src_name = VOLUME_PRE + src_volume['id'][-12:] tgt_name = VOLUME_PRE + tgt_volume['id'][-12:] self._driver._cmd.create_clone(src_name, tgt_name) tgt_set = { tgt_name: [('status', 'Erasing', 0.2)], src_name: [('status', 'Erasing', 0.2)], } greenthread.spawn_n(self.sim._clone_thread, src_name, tgt_set) ret = self._driver._wait_volume_copy(src_name, tgt_name, 'test_func', 'test_action') self.assertTrue(ret) self._driver._cmd.set_volume_property( src_info['name'], {'status': 'error', 'clone_snap': tgt_info['name']}) ret = self._driver._wait_volume_copy(src_name, tgt_name, 'test_func', 'test_action') self.assertFalse(ret) self._driver._cmd.set_volume_property( src_info['name'], {'status': 'Online', 'clone_snap': tgt_info['name']}) self._delete_volume(tgt_volume) self._assert_vol_exists(tgt_volume['id'], False) ret = self._driver._wait_volume_copy(src_name, tgt_name, 'test_func', 'test_action') self.assertFalse(ret) self._driver._cmd.set_volume_property(src_info['name'], {'type': 'BACKUP'}) ret = self._driver._wait_volume_copy(tgt_name, 'backup_test', 'test_func', 'test_action') self.assertFalse(ret) self._delete_volume(src_volume) def test_extend_volume(self): volume = self._create_volume(size=10) vol_info = self._assert_vol_exists(volume['id'], True) self.assertEqual('10', vol_info[0]['size_gb']) self._driver.extend_volume(volume, '100') extend_vol = self._assert_vol_exists(volume['id'], True) self.assertEqual('100', extend_vol[0]['size_gb']) self._delete_volume(volume) def test_extend_volume_not_found(self): volume = self._create_volume(driver=False) self.assertRaises(exception.VolumeNotFound, self._driver.extend_volume, volume, 10) self._delete_volume(volume, False) def test_extend_volume_size_less(self): volume = self._create_volume(size=100) vol_info = self._assert_vol_exists(volume['id'], True) self.assertEqual('100', vol_info[0]['size_gb']) self.assertRaises(exception.VolumeBackendAPIException, self._driver.extend_volume, volume, '10') self._delete_volume(volume) def test_extend_volume_size_exceeds_limit(self): host = 'stack@ty2#%s' % POOLS_NAME[1] self._driver.get_volume_stats() volume = self._create_volume(size=10, host=host) vol_info = self._assert_vol_exists(volume['id'], True) self.assertEqual('10', vol_info[0]['size_gb']) self.assertEqual(POOLS_NAME[1], vol_info[0]['poolname']) self.assertRaises(exception.VolumeSizeExceedsLimit, self._driver.extend_volume, volume, '200') self._delete_volume(volume) def test_extend_volume_size_equal(self): volume = self._create_volume(size=10) vol_info = self._assert_vol_exists(volume['id'], True) self.assertEqual('10', vol_info[0]['size_gb']) self.assertRaises(exception.VolumeBackendAPIException, self._driver.extend_volume, volume, '10') self._delete_volume(volume) def test_migrate_volume_same_pool(self): host = 'stack@ty1#%s' % POOLS_NAME[0] volume = self._create_volume(host=host) target_host = { 'host': 'stack_new@ty1#%s' % POOLS_NAME[0], 'capabilities': { 'system_id': self.sim._system_info['system_id'], 'pool_name': POOLS_NAME[0] } } ret = self._driver.migrate_volume(self.ctxt, volume, target_host) self.assertEqual((True, None), ret) def test_migrate_volume_different_system(self): host = 'stack@ty1#%s' % POOLS_NAME[0] volume = self._create_volume(host=host) target_host = { 'host': 'stack_new@ty1#%s' % POOLS_NAME[0], 'capabilities': { 'system_id': 'test_system_id', 'pool_name': POOLS_NAME[0] } } ret = self._driver.migrate_volume(self.ctxt, volume, target_host) self.assertEqual((False, None), ret) target_host = { 'host': 'stack_new@ty1#%s' % POOLS_NAME[0], 'capabilities': { 'pool_name': POOLS_NAME[0] } } ret = self._driver.migrate_volume(self.ctxt, volume, target_host) self.assertEqual((False, None), ret) def test_migrate_volume_same_system_different_pool(self): host = 'stack@ty1#%s' % POOLS_NAME[0] volume = self._create_volume(host=host, size=2) target_host = { 'host': 'stack_new@ty1#%s' % POOLS_NAME[1], 'capabilities': { 'system_id': self.sim._system_info['system_id'], 'pool_name': POOLS_NAME[1] } } ret = self._driver.migrate_volume(self.ctxt, volume, target_host) self.assertEqual((True, None), ret) vol_info = self._assert_vol_exists(volume['id'], True) self.assertEqual(POOLS_NAME[1], vol_info[0]['poolname']) self.assertEqual('2', vol_info[0]['size_gb']) def test_get_volume_stats(self): self.assertEqual({}, self._driver._stats) self._driver.get_volume_stats() stats = self._driver._stats system_info = self.sim._system_info self.assertEqual(system_info['vendor'], stats['vendor_name']) def test_get_volume_none(self): ret = self._driver._cmd.get_volume('') self.assertEqual([], ret) ret = self._driver._cmd.get_volume('test_volume') self.assertEqual([], ret) vol1 = self._create_volume() vol2 = self._create_volume() vol1_name = VOLUME_PRE + vol1['id'][-12:] vol2_name = VOLUME_PRE + vol2['id'][-12:] ret = self._driver._cmd.get_volume([vol1_name, vol2_name]) self.assertEqual(2, len(ret)) vol_name = [] for vol in ret: vol_name.append(vol['name']) self.assertEqual(sorted([vol1_name, vol2_name]), sorted(vol_name)) ret = self._driver._cmd.get_volume({'test_key': 'test_value'}) self.assertEqual([], ret) self._delete_volume(vol1) self._delete_volume(vol2) def test_check_for_setup_error_failure(self): self._driver._state['system_name'] = None self.assertRaises(exception.VolumeBackendAPIException, self._driver.check_for_setup_error) self._driver.do_setup(None) self._driver._state['system_id'] = None self.assertRaises(exception.VolumeBackendAPIException, self._driver.check_for_setup_error) self._driver.do_setup(None) self._driver._state['controller'] = [] self.assertRaises(exception.VolumeDriverException, self._driver.check_for_setup_error) self._driver.do_setup(None) self._driver._state['enabled_protocols'] = set() self.assertRaises(exception.InvalidInput, self._driver.check_for_setup_error) self._driver.do_setup(None) password = self._driver.configuration.san_password self._driver.configuration.san_password = None self.assertRaises(exception.InvalidInput, self._driver.check_for_setup_error) self._driver.configuration.san_password = password san_ip = self._driver.configuration.san_ip self._driver.configuration.san_ip = None self.assertRaises(exception.InvalidInput, self._driver.check_for_setup_error) self._driver.configuration.san_ip = san_ip def test_build_pool_stats_no_pool(self): self.assertRaises(exception.VolumeBackendAPIException, self._driver._build_pool_stats, 'pool_test') def test_set_volume_property_failure(self): volume = self._create_volume() self._assert_vol_exists(volume['id'], True) volume_name = VOLUME_PRE + volume['id'][-12:] self.assertRaises(exception.VolumeBackendAPIException, self._driver._cmd.set_volume_property, volume_name, {'error_key': 'error'}) self.assertRaises(exception.VolumeBackendAPIException, self._driver._cmd.set_volume_property, volume_name, {}) self._delete_volume(volume) def test_snapshot_revert_use_temp_snapshot(self): ret = self._driver.snapshot_revert_use_temp_snapshot() self.assertFalse(ret) def test_revert_to_snapshot(self): volume = self._create_volume() snapshot = self._create_snapshot(volume.id) self._driver.revert_to_snapshot(self.ctxt, volume, snapshot) self._delete_snapshot(snapshot) self._delete_volume(volume) def test_revert_to_snapshot_volume_not_found(self): volume = self._create_volume(driver=False) snapshot = self._create_snapshot(volume.id, driver=False) self.assertRaises(exception.VolumeNotFound, self._driver.revert_to_snapshot, self.ctxt, volume, snapshot) self._delete_snapshot(snapshot, driver=False) self._delete_volume(volume, driver=False) def test_revert_to_snapshot_snapshot_not_found(self): volume = self._create_volume() snapshot = self._create_snapshot(volume.id, driver=False) self.assertRaises(exception.SnapshotNotFound, self._driver.revert_to_snapshot, self.ctxt, volume, snapshot) self._delete_snapshot(snapshot, driver=False) self._delete_volume(volume) def test_revert_to_snapshot_not_latest_one(self): volume = self._create_volume() snapshot1 = self._create_snapshot(volume.id) snapshot2 = self._create_snapshot(volume.id) self.assertRaises(exception.InvalidSnapshot, self._driver.revert_to_snapshot, self.ctxt, volume, snapshot1) self._delete_snapshot(snapshot2) self._delete_snapshot(snapshot1) self._delete_volume(volume) def test_revert_to_snapshot_not_belong(self): volume1 = self._create_volume() volume2 = self._create_volume() snapshot = self._create_snapshot(volume2.id) self.assertRaises(exception.VolumeBackendAPIException, self._driver.revert_to_snapshot, self.ctxt, volume1, snapshot) self._delete_snapshot(snapshot) self._delete_volume(volume1) self._delete_volume(volume2) def test_convert_name(self): name = self._driver._convert_name('') self.assertEqual(len(acs5000_common.VOLUME_PREFIX) + 12, len(name)) name = self._driver._convert_name('test_name') self.assertEqual(len(acs5000_common.VOLUME_PREFIX) + 12, len(name)) name = self._driver._convert_name(fake.UUID1) self.assertEqual(len(acs5000_common.VOLUME_PREFIX) + 12, len(name)) def test_check_multi_attached(self): connector = self._connector volume = self._create_volume(driver=False) connector['uuid'] = fake.UUID1 attachment1 = volume_attachment.VolumeAttachment() attachment2 = volume_attachment.VolumeAttachment() volume.volume_attachment.objects.append(attachment1) volume.volume_attachment.objects.append(attachment2) count = self._driver._check_multi_attached(volume, connector) self.assertEqual(0, count) attachment1.connector = connector attachment2.connector = connector volume.volume_attachment.objects.append(attachment1) volume.volume_attachment.objects.append(attachment2) count = self._driver._check_multi_attached(volume, connector) self.assertEqual(4, count) self._delete_volume(volume, driver=False) def test_update_migrated_volume(self): old_vol = self._create_volume(driver=False) self._assert_vol_exists(old_vol.id, False) new_vol = self._create_volume() self._assert_vol_exists(new_vol.id, True) ret = self._driver.update_migrated_volume( self.ctxt, old_vol, new_vol, None) self.assertEqual({'_name_id': None}, ret) self._assert_vol_exists(old_vol.id, True) self._assert_vol_exists(new_vol.id, False) self._delete_volume(old_vol) self._delete_volume(new_vol, False) def test_update_migrated_volume_existed(self): old_vol = self._create_volume() self._assert_vol_exists(old_vol.id, True) new_vol = self._create_volume() self._assert_vol_exists(new_vol.id, True) ret = self._driver.update_migrated_volume( self.ctxt, old_vol, new_vol, None) self.assertEqual({'_name_id': new_vol.id}, ret) self._delete_volume(old_vol) self._delete_volume(new_vol) def test_manage_existing(self): volume_name = fake.UUID1 self._driver._cmd.create_volume(volume_name, '1', POOLS_NAME[0]) volume = self._driver._cmd.get_volume(volume_name) self.assertEqual(1, len(volume)) self.assertEqual(volume_name, volume[0]['name']) new_volume = self._create_volume(driver=False) self._assert_vol_exists(new_volume.id, False) self.assertRaises(exception.ManageExistingInvalidReference, self._driver.manage_existing, new_volume, {}) self._driver.manage_existing(new_volume, {'source-name': volume_name}) self._assert_vol_exists(new_volume.id, True) self._delete_volume(new_volume) def test_manage_existing_get_size(self): volume_name = fake.UUID1 self._driver._cmd.create_volume(volume_name, '1', POOLS_NAME[0]) vol = self._create_volume(size=1, driver=False) self._assert_vol_exists(vol.id, False) ret = self._driver.manage_existing_get_size( vol, {'source-name': volume_name}) self.assertEqual(1, ret) self._driver._cmd.delete_volume(volume_name) self._delete_volume(vol, False) def test_manage_existing_get_size_extend(self): volume_name = fake.UUID1 size_str = '1.2' size_gb = 2 self._driver._cmd.create_volume(volume_name, size_str, POOLS_NAME[0]) volume = self._driver._cmd.get_volume(volume_name) self.assertEqual(1, len(volume)) self.assertEqual(volume_name, volume[0]['name']) vol = self._create_volume(driver=False) self._assert_vol_exists(vol.id, False) ret = self._driver.manage_existing_get_size( vol, {'source-name': volume_name}) self.assertEqual(size_gb, ret) self._driver._cmd.delete_volume(volume_name) self._delete_volume(vol, driver=False) def test_manage_get_volume(self): vol = self._create_volume() vol_backend = self._assert_vol_exists(vol.id, True) vol_name = vol_backend[0]['name'] ret = self._driver._manage_get_volume({'source-name': vol_name}) self.assertEqual(vol_backend[0], ret) self.assertRaises(exception.ManageExistingInvalidReference, self._driver._manage_get_volume, {}) self.assertRaises(exception.ManageExistingInvalidReference, self._driver._manage_get_volume, {'source-name': 'error_volume'}) self.assertRaises(exception.ManageExistingInvalidReference, self._driver._manage_get_volume, {'source-name': vol_name}, 'error_pool') self._delete_volume(vol) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/toyou/test_tyds.py0000664000175000017500000007137000000000000025224 0ustar00zuulzuul00000000000000# Copyright 2023 toyou Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from unittest import mock from cinder import exception from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.volume import configuration as conf from cinder.volume.drivers.toyou.tyds import tyds as driver POOLS_NAME = ['pool1', 'pool2'] class TestTydsDriver(unittest.TestCase): @mock.patch('cinder.volume.drivers.toyou.tyds.tyds_client.TydsClient', autospec=True) def setUp(self, mock_tyds_client): """Set up the test case. - Creates a driver instance. - Mocks the TydsClient and its methods. - Initializes volumes and snapshots for testing. """ super().setUp() self.mock_client = mock_tyds_client.return_value self.mock_do_request = mock.MagicMock( side_effect=self.mock_client.do_request) self.mock_client.do_request = self.mock_do_request self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.tyds_pools = POOLS_NAME self.configuration.san_ip = "23.44.56.78" self.configuration.tyds_http_port = 80 self.configuration.san_login = 'admin' self.configuration.san_password = 'admin' self.configuration.tyds_stripe_size = '4M' self.configuration.tyds_clone_progress_interval = 3 self.configuration.tyds_copy_progress_interval = 3 self.driver = driver.TYDSDriver(configuration=self.configuration) self.driver.do_setup(context=None) self.driver.check_for_setup_error() self.volume = fake_volume.fake_volume_obj(None) self.volume.host = 'host@backend#pool1' self.snapshot = fake_snapshot.fake_snapshot_obj(None) self.snapshot.volume = self.volume self.snapshot.volume_id = self.volume.id self.target_volume = fake_volume.fake_volume_obj(None) self.target_volume.host = 'host@backend#pool2' self.src_vref = self.volume def test_create_volume_success(self): """Test case for successful volume creation. - Sets mock return value. - Calls create_volume method. - Verifies if the create_volume method is called with correct arguments. """ self.mock_client.create_volume.return_value = self.volume self.driver.create_volume(self.volume) self.mock_client.create_volume.assert_called_once_with( self.volume.name, self.volume.size * 1024, 'pool1', '4M') def test_create_volume_failure(self): """Test case for volume creation failure. - Sets the mock return value to simulate a failure. - Calls the create_volume method. - Verifies if the create_volume method raises the expected exception. """ # Set the mock return value to simulate a failure self.mock_client.create_volume.side_effect = \ exception.VolumeBackendAPIException('API error') # Call the create_volume method and check the result self.assertRaises( exception.VolumeBackendAPIException, self.driver.create_volume, self.volume ) def test_delete_volume_success(self): """Test case for successful volume deletion. - Mocks the _get_volume_by_name method to return a volume. - Calls the delete_volume method. - Verifies if the delete_volume method is called with the correct volume ID. """ # Mock the _get_volume_by_name method to return a volume self.driver._get_volume_by_name = mock.Mock(return_value={'id': '13'}) # Call the delete_volume method self.driver.delete_volume(self.volume) # Verify if the delete_volume method is called with the correct # volume ID self.mock_client.delete_volume.assert_called_once_with('13') def test_delete_volume_failure(self): """Test case for volume deletion failure. - Mocks the _get_volume_by_name method to return a volume. - Sets the mock return value for delete_volume method to raise an exception. - Calls the delete_volume method. - Verifies if the delete_volume method raises the expected exception. """ # Mock the _get_volume_by_name method to return a volume self.driver._get_volume_by_name = mock.Mock(return_value={'id': '13'}) # Set the mock return value for delete_volume method to raise an # exception self.mock_client.delete_volume.side_effect = \ exception.VolumeBackendAPIException('API error') # Call the delete_volume method and verify if it raises the expected # exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_create_snapshot_success(self): """Test case for successful snapshot creation. - Sets the mock return value for create_snapshot method. - Mocks the _get_volume_by_name method to return a volume. - Calls the create_snapshot method. - Verifies if the create_snapshot method is called with the correct arguments. """ # Set the mock return value for create_snapshot method self.mock_client.create_snapshot.return_value = self.snapshot # Mock the _get_volume_by_name method to return a volume self.driver._get_volume_by_name = mock.Mock(return_value={'id': '13'}) # Call the create_snapshot method self.driver.create_snapshot(self.snapshot) # Verify if the create_snapshot method is called with the correct # arguments self.mock_client.create_snapshot.assert_called_once_with( self.snapshot.name, '13', '%s/%s' % (self.volume.name, self.snapshot.name) ) def test_create_snapshot_failure_with_no_volume(self): """Test case for snapshot creation failure when volume is not found. - Mocks the _get_volume_by_name method to return None. - Calls the create_snapshot method. - Verifies if the create_snapshot method is not called. """ # Mock the _get_volume_by_name method to return None self.driver._get_volume_by_name = mock.Mock(return_value=None) # Call the create_snapshot method and check for exception self.assertRaises(driver.TYDSDriverException, self.driver.create_snapshot, self.snapshot) # Verify if the create_snapshot method is not called self.mock_client.create_snapshot.assert_not_called() def test_create_snapshot_failure(self): """Test case for snapshot creation failure. - Mocks the _get_volume_by_name method to return a volume. - Sets the mock return value for create_snapshot to raise an exception. - Calls the create_snapshot method. - Verifies if the create_snapshot method is called with the correct arguments. """ # Mock the _get_volume_by_name method to return a volume self.driver._get_volume_by_name = mock.Mock(return_value={'id': '13'}) # Set the mock return value for create_snapshot to raise an exception self.mock_client.create_snapshot.side_effect = \ exception.VolumeBackendAPIException('API error') # Call the create_snapshot method and check for exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.create_snapshot, self.snapshot) # Verify if the create_snapshot method is called with the correct # arguments self.mock_client.create_snapshot.assert_called_once_with( self.snapshot.name, '13', '%s/%s' % (self.volume.name, self.snapshot.name)) def test_delete_snapshot_success(self): """Test case for successful snapshot deletion. - Mocks the _get_snapshot_by_name method to return a snapshot. - Calls the delete_snapshot method. - Verifies if the delete_snapshot method is called with the correct arguments. """ # Mock the _get_snapshot_by_name method to return a snapshot self.driver._get_snapshot_by_name = mock.Mock( return_value={'id': 'volume_id'}) # Call the delete_snapshot method self.driver.delete_snapshot(self.snapshot) # Verify if the delete_snapshot method is called with the correct # arguments self.mock_client.delete_snapshot.assert_called_once_with('volume_id') def test_delete_snapshot_failure(self): """Test case for snapshot deletion failure. - Mocks the _get_snapshot_by_name method to return a snapshot. - Sets the mock return value for delete_snapshot to raise an exception. - Calls the delete_snapshot method. - Verifies if the delete_snapshot method is called with the correct arguments. """ # Mock the _get_snapshot_by_name method to return a snapshot self.driver._get_snapshot_by_name = mock.Mock( return_value={'id': 'volume_id'}) # Set the mock return value for delete_snapshot to raise an exception self.mock_client.delete_snapshot.side_effect = \ exception.VolumeBackendAPIException('API error') # Call the delete_snapshot method and check for exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_snapshot, self.snapshot) # Verify if the delete_snapshot method is called once self.mock_client.delete_snapshot.assert_called_once() @mock.patch('time.sleep') @mock.patch('cinder.coordination.synchronized', new=mock.MagicMock()) def test_create_volume_from_snapshot_success(self, mock_sleep): """Test case for successful volume creation from snapshot. - Mocks the sleep function. - Sets the mock return values for create_volume_from_snapshot, _get_volume_by_name, and get_clone_progress. - Calls the create_volume_from_snapshot method. - Verifies if the create_volume_from_snapshot method is called with the correct arguments. - Verifies if the _get_volume_by_name method is called once. """ # Mock the sleep function mock_sleep.return_value = None # Set the mock return values for create_volume_from_snapshot, # _get_volume_by_name, and get_clone_progress self.mock_client.create_volume_from_snapshot.return_value = self.volume self.driver._get_volume_by_name = mock.Mock( return_value={'poolName': 'pool1', 'sizeMB': self.volume.size * 1024}) self.mock_client.get_clone_progress.return_value = {'progress': '100%'} # Call the create_volume_from_snapshot method self.driver.create_volume_from_snapshot(self.target_volume, self.snapshot) # Verify if the create_volume_from_snapshot method is called with the # correct arguments self.mock_client.create_volume_from_snapshot.assert_called_once_with( self.target_volume.name, 'pool2', self.snapshot.name, self.volume.name, 'pool1') # Verify if the _get_volume_by_name method is called once self.driver._get_volume_by_name.assert_called_once() def test_create_volume_from_snapshot_failure(self): """Test case for volume creation from snapshot failure. - Sets the mock return value for _get_volume_by_name to return None. - Calls the create_volume_from_snapshot method. - Verifies if the create_volume_from_snapshot method raises a driver.TYDSDriverException. """ # Set the mock return value for _get_volume_by_name to return None self.driver._get_volume_by_name = mock.Mock(return_value=None) # Call the create_volume_from_snapshot method and check for exception self.assertRaises(driver.TYDSDriverException, self.driver.create_volume_from_snapshot, self.volume, self.snapshot) @mock.patch('cinder.coordination.synchronized', new=mock.MagicMock()) def test_create_cloned_volume_success(self): """Test case for successful cloned volume creation. - Sets the mock return values for get_copy_progress, get_pools, get_volumes, and _get_volume_by_name. - Calls the create_cloned_volume method. - Verifies if the create_clone_volume method is called with the correct arguments. """ # Set the mock return values for get_copy_progress, get_pools, # get_volumes, and _get_volume_by_name self.mock_client.get_copy_progress.return_value = {'progress': '100%'} self.driver.client.get_pools.return_value = [ {'name': 'pool1', 'id': 'pool1_id'}, {'name': 'pool2', 'id': 'pool2_id'} ] self.driver.client.get_volumes.return_value = [ {'blockName': self.volume.name, 'poolName': 'pool1', 'id': 'source_volume_id'} ] self.driver._get_volume_by_name = mock.Mock( return_value={'name': self.volume.name, 'id': '13'}) # Call the create_cloned_volume method self.driver.create_cloned_volume(self.target_volume, self.src_vref) # Verify if the create_clone_volume method is called with the correct # arguments self.driver.client.create_clone_volume.assert_called_once_with( 'pool1', self.volume.name, 'source_volume_id', 'pool2', 'pool2_id', self.target_volume.name ) @mock.patch('cinder.coordination.synchronized', new=mock.MagicMock()) def test_create_cloned_volume_failure(self): """Test case for cloned volume creation failure. - Sets the mock return values for get_pools and get_volumes. - Calls the create_cloned_volume method. - Verifies if the create_cloned_volume method raises a driver.TYDSDriverException. """ # Set the mock return values for get_pools and get_volumes self.driver.client.get_pools.return_value = [ {'name': 'pool1', 'id': 'pool1_id'}, {'name': 'pool2', 'id': 'pool2_id'} ] self.driver.client.get_volumes.return_value = [ {'blockName': self.volume.name, 'poolName': None, 'id': '14'} ] # Call the create_cloned_volume method and check for exception self.assertRaises(driver.TYDSDriverException, self.driver.create_cloned_volume, self.target_volume, self.src_vref) def test_extend_volume_success(self): """Test case for successful volume extension. - Sets the new size. - Calls the extend_volume method. - Verifies if the extend_volume method is called with the correct arguments. """ new_size = 10 # Call the extend_volume method self.driver.extend_volume(self.volume, new_size) # Verify if the extend_volume method is called with the correct # arguments self.mock_client.extend_volume.assert_called_once_with( self.volume.name, 'pool1', new_size * 1024) def test_extend_volume_failure(self): """Test case for volume extension failure. - Sets the new size and error message. - Sets the mock side effect for extend_volume to raise an Exception. - Calls the extend_volume method. - Verifies if the extend_volume method raises the expected exception and the error message matches. - Verifies if the extend_volume method is called with the correct arguments. """ new_size = 10 # Set the mock side effect for extend_volume to raise an Exception self.mock_client.extend_volume.side_effect = \ exception.VolumeBackendAPIException('API Error: Volume extend') # Call the extend_volume method and check for exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.extend_volume, self.volume, new_size) # Verify if the extend_volume method is called with the correct # arguments self.mock_client.extend_volume.assert_called_once_with( self.volume.name, 'pool1', new_size * 1024) def test_get_volume_stats(self): """Test case for retrieving volume statistics. - Sets the mock side effect for safe_get to return the appropriate values. - Sets the mock return values for get_pools and get_volumes. - Calls the get_volume_stats method. - Verifies if the get_pools and get_volumes methods are called once. - Verifies if the retrieved statistics match the expected statistics. """ def safe_get_side_effect(param_name): if param_name == 'volume_backend_name': return 'toyou_backend' # Set the mock side effect for safe_get to return the appropriate # values self.configuration.safe_get.side_effect = safe_get_side_effect # Set the mock return values for get_pools and get_volumes self.mock_client.get_pools.return_value = [ {'name': 'pool1', 'stats': {'max_avail': '107374182400', 'stored': '53687091200'}}, {'name': 'pool2', 'stats': {'max_avail': '214748364800', 'stored': '107374182400'}} ] self.mock_client.get_volumes.return_value = [ {'poolName': 'pool1', 'sizeMB': '1024'}, {'poolName': 'pool1', 'sizeMB': '2048'}, {'poolName': 'pool2', 'sizeMB': '3072'} ] # Call the get_volume_stats method stats = self.driver.get_volume_stats() # Verify if the get_pools and get_volumes methods are called once self.mock_client.get_pools.assert_called_once() self.mock_client.get_volumes.assert_called_once() # Define the expected statistics expected_stats = { 'vendor_name': 'TOYOU', 'driver_version': '1.0.0', 'volume_backend_name': 'toyou_backend', 'pools': [ { 'pool_name': 'pool1', 'total_capacity_gb': 100.0, 'free_capacity_gb': 50.0, 'provisioned_capacity_gb': 3.0, 'thin_provisioning_support': True, 'QoS_support': False, 'consistencygroup_support': False, 'total_volumes': 2, 'multiattach': False }, { 'pool_name': 'pool2', 'total_capacity_gb': 200.0, 'free_capacity_gb': 100.0, 'provisioned_capacity_gb': 3.0, 'thin_provisioning_support': True, 'QoS_support': False, 'consistencygroup_support': False, 'total_volumes': 1, 'multiattach': False } ], 'storage_protocol': 'iSCSI', } # Verify if the retrieved statistics match the expected statistics self.assertEqual(stats, expected_stats) def test_get_volume_stats_pool_not_found(self): """Test case for retrieving volume statistics when pool not found. - Sets the mock return value for get_pools to an empty list. - Calls the get_volume_stats method. - Verifies if the get_pools method is called once. - Verifies if the get_volume_stats method raises a driver.TYDSDriverException. """ # Set the mock return value for get_pools to an empty list self.mock_client.get_pools.return_value = [] # Call the get_volume_stats method and check for exception self.assertRaises(driver.TYDSDriverException, self.driver.get_volume_stats) # Verify if the get_pools method is called once self.mock_client.get_pools.assert_called_once() def test_initialize_connection_success(self): """Test case for successful volume initialization. - Sets the connector information. - Sets the mock return values for get_initiator_list and get_target. - Sets the mock return values and assertions for create_initiator_group , create_target, modify_target, and generate_config. - Calls the initialize_connection method. - Verifies the expected return value and method calls. """ # Set the connector information connector = { 'host': 'host1', 'initiator': 'iqn.1234', 'ip': '192.168.0.1', 'uuid': 'uuid1' } # Set the mock return values for get_initiator_list and get_target self.mock_client.get_initiator_list.return_value = [] self.mock_client.get_target.return_value = [ {'name': 'iqn.2023-06.com.toyou:uuid1', 'ipAddr': '192.168.0.2'}] # Set the mock return values and assertions for create_initiator_group, # create_target, modify_target, and generate_config self.mock_client.create_initiator_group.return_value = None self.mock_client.create_target.return_value = None self.mock_client.modify_target.return_value = None self.mock_client.generate_config.return_value = None self.mock_client.get_initiator_target_connections.side_effect = [ [], # First call returns an empty list [{'target_name': 'iqn.2023-06.com.toyou:initiator-group-uuid1', 'target_iqn': 'iqn1', 'block': [{'name': 'volume1', 'lunid': 0}]}] # Second call returns a non-empty dictionary ] # Call the initialize_connection method result = self.driver.initialize_connection(self.volume, connector) # Define the expected return value expected_return = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'target_portal': '192.168.0.2:3260', 'target_lun': 0, 'target_iqns': ['iqn.2023-06.com.toyou:initiator-group-uuid1'], 'target_portals': ['192.168.0.2:3260'], 'target_luns': [0] } } # Verify the method calls and return value self.mock_client.get_initiator_list.assert_called_once() self.mock_client.create_initiator_group.assert_called_once() self.assertEqual( self.mock_client.get_initiator_target_connections.call_count, 2) self.assertEqual(self.mock_client.get_target.call_count, 2) self.mock_client.modify_target.assert_not_called() self.mock_client.create_target.assert_called_once() self.mock_client.generate_config.assert_called_once() self.assertEqual(result, expected_return) def test_initialize_connection_failure(self): """Test case for failed volume initialization. - Sets the connector information. - Sets the mock return values for get_initiator_list and get_it. - Calls the initialize_connection method. - Verifies if the get_initiator_list method is called once. - Verifies if the create_initiator_group method is not called. - Verifies if the initialize_connection method raises an exception to type exception.VolumeBackendAPIException. """ # Set the connector information connector = { 'host': 'host1', 'initiator': 'iqn.1234', 'ip': '192.168.0.1', 'uuid': 'uuid1' } # Set the mock return values for get_initiator_list and get_it self.mock_client.get_initiator_list.return_value = [ {'group_name': 'initiator-group-uuid1'}] self.mock_client.get_initiator_target_connections.return_value = [] # Call the initialize_connection method and check for exception self.assertRaises(exception.VolumeBackendAPIException, self.driver.initialize_connection, self.volume, connector) # Verify if the get_initiator_list method is called once self.mock_client.get_initiator_list.assert_called_once() # Verify if the create_initiator_group method is not called self.mock_client.create_initiator_group.assert_not_called() def test_terminate_connection_success(self): """Test case for successful termination of volume connection. - Sets the connector information. - Sets the mock return values for get_it and get_initiator_list. - Calls the terminate_connection method with the required mock methods. - Verifies the method calls using assertions. """ # Set the connector information connector = { 'host': 'host1', 'initiator': 'iqn.1234', 'ip': '192.168.0.1', 'uuid': 'uuid1' } # Set the mock return values for get_it and get_initiator_list self.mock_client.get_initiator_target_connections.return_value = [ {'target_iqn': 'target_iqn1', 'target_name': 'target1', 'hostName': ['host1'], 'block': [{'name': 'volume1', 'lunid': 1}, {'name': 'volume2', 'lunid': 2}]} ] self.mock_client.get_initiator_list.return_value = [ {'group_name': 'initiator-group-uuid1', 'group_id': 'group_id1'} ] # Call the terminate_connection method with the required mock methods self.driver.terminate_connection( self.volume, connector, mock_get_it=self.mock_client.get_initiator_target_connections, mock_delete_target=self.mock_client.delete_target, mock_get_initiator_list=self.mock_client.get_initiator_list, mock_delete_initiator_group=self.mock_client .delete_initiator_group, mock_restart_service=self.mock_client.restart_service, ) # Verify the method calls using assertions self.mock_client.get_initiator_target_connections.assert_called_once() self.mock_client.get_initiator_list.assert_not_called() self.mock_client.delete_target.assert_not_called() self.mock_client.delete_initiator_group.assert_not_called() self.mock_client.restart_service.assert_not_called() def test_terminate_connection_failure(self): """Test case for failed termination of volume connection. - Sets the connector information. - Sets the mock return values for get_it and get_initiator_list. - Sets the delete_target method to raise an exception. - Calls the terminate_connection method. - Verifies the method calls and assertions. """ # Set the connector information connector = { 'host': 'host1', 'initiator': 'iqn.1234', 'ip': '192.168.0.1', 'uuid': 'uuid1' } # Set the mock return values for get_it and get_initiator_list self.mock_client.get_initiator_target_connections.return_value = [ { 'target_iqn': 'target_iqn1', 'target_name': 'iqn.2023-06.com.toyou:initiator-group-uuid1', 'hostName': ['host1'], 'block': [{'name': self.volume.name, 'lunid': 1}] } ] self.mock_client.get_initiator_list.return_value = [ {'group_name': 'initiator-group-uuid1', 'group_id': 'group_id1'} ] # Set the delete_target method to raise an exception self.mock_client.delete_target.side_effect = \ exception.VolumeBackendAPIException('API error') # Assert that an exception to type exception.VolumeBackendAPIException # is raised self.assertRaises(exception.VolumeBackendAPIException, self.driver.terminate_connection, self.volume, connector) # Verify method calls and assertions self.mock_client.get_initiator_target_connections.assert_called_once() self.mock_client.get_initiator_list.assert_not_called() self.mock_client.delete_target.assert_called_once_with('target_iqn1') self.mock_client.delete_initiator_group.assert_not_called() self.mock_client.restart_service.assert_not_called() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3071203 cinder-27.0.0/cinder/tests/unit/volume/drivers/veritas_access/0000775000175000017500000000000000000000000024437 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/veritas_access/__init__.py0000664000175000017500000000000000000000000026536 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/veritas_access/test_veritas_iscsi.py0000664000175000017500000005774300000000000030737 0ustar00zuulzuul00000000000000# Copyright 2017 Veritas Technologies LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for Veritas Access cinder driver. """ import hashlib import json import tempfile from unittest import mock from xml.dom.minidom import Document import requests from cinder import context from cinder import exception from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume import configuration as conf from cinder.volume.drivers.veritas_access import veritas_iscsi FAKE_BACKEND = 'fake_backend' class MockResponse(object): def __init__(self): self.status_code = 200 def json(self): data = {'fake_key': 'fake_val'} return json.dumps(data) class FakeXML(object): def __init__(self): self.tempdir = tempfile.mkdtemp() def create_vrts_fake_config_file(self): target = 'iqn.2017-02.com.veritas:faketarget' portal = '1.1.1.1' auth_detail = '0' doc = Document() vrts_node = doc.createElement("VRTS") doc.appendChild(vrts_node) vrts_target_node = doc.createElement("VrtsTargets") vrts_node.appendChild(vrts_target_node) target_node = doc.createElement("Target") vrts_target_node.appendChild(target_node) name_ele = doc.createElement("Name") portal_ele = doc.createElement("PortalIP") auth_ele = doc.createElement("Authentication") name_ele.appendChild(doc.createTextNode(target)) portal_ele.appendChild(doc.createTextNode(portal)) auth_ele.appendChild(doc.createTextNode(auth_detail)) target_node.appendChild(name_ele) target_node.appendChild(portal_ele) target_node.appendChild(auth_ele) filename = 'vrts_config.xml' config_file_path = self.tempdir + '/' + filename f = open(config_file_path, 'w') doc.writexml(f) f.close() return config_file_path class fake_volume(object): def __init__(self): self.id = 'fakeid' self.name = 'fakename' self.size = 1 self.snapshot_id = False self.metadata = {'dense': True} class fake_volume2(object): def __init__(self): self.id = 'fakeid2' self.name = 'fakename2' self.size = 2 self.snapshot_id = False self.metadata = {'dense': True} class fake_clone_volume(object): def __init__(self): self.id = 'fakecloneid' self.name = 'fakeclonename' self.size = 1 self.snapshot_id = False class fake_clone_volume2(object): def __init__(self): self.id = 'fakecloneid2' self.name = 'fakeclonename' self.size = 2 self.snapshot_id = False class fake_snapshot(object): def __init__(self): self.id = 'fakeid' self.volume_id = 'fakevolumeid' self.volume_size = 1 class ACCESSIscsiDriverTestCase(test.TestCase): """Tests ACCESSShareDriver.""" volume = fake_volume() volume2 = fake_volume2() snapshot = fake_snapshot() clone_volume = fake_clone_volume() clone_volume2 = fake_clone_volume2() connector = { 'initiator': 'iqn.1994-05.com.fakeinitiator' } def setUp(self): super(ACCESSIscsiDriverTestCase, self).setUp() self._create_fake_config() lcfg = self.configuration self._context = context.get_admin_context() self._driver = veritas_iscsi.ACCESSIscsiDriver(configuration=lcfg) self._driver.do_setup(self._context) def _create_fake_config(self): self.mock_object(veritas_iscsi.ACCESSIscsiDriver, '_authenticate_access') self.configuration = mock.Mock(spec=conf.Configuration) self.configuration.safe_get = self.fake_safe_get self.configuration.san_ip = '1.1.1.1' self.configuration.san_login = 'user' self.configuration.san_password = 'passwd' self.configuration.san_api_port = 14161 self.configuration.vrts_lun_sparse = True self.configuration.vrts_target_config = ( FakeXML().create_vrts_fake_config_file()) self.configuration.target_port = 3260 self.configuration.volume_backend_name = FAKE_BACKEND def fake_safe_get(self, value): try: val = getattr(self.configuration, value) except AttributeError: val = None return val def test_create_volume(self): self.mock_object(self._driver, '_vrts_get_suitable_target') self.mock_object(self._driver, '_vrts_get_targets_store') self.mock_object(self._driver, '_access_api') mylist = [] target = {} target['name'] = 'iqn.2017-02.com.veritas:faketarget' target['portal_ip'] = '1.1.1.1' target['auth'] = '0' mylist.append(target) self._driver._vrts_get_suitable_target.return_value = ( 'iqn.2017-02.com.veritas:faketarget') self._driver._access_api.return_value = True return_list = self._driver._vrts_parse_xml_file( self.configuration.vrts_target_config) self._driver.create_volume(self.volume) self.assertEqual(mylist, return_list) self.assertEqual(1, self._driver._access_api.call_count) def test_create_volume_negative(self): self.mock_object(self._driver, '_vrts_get_suitable_target') self.mock_object(self._driver, '_vrts_get_targets_store') self.mock_object(self._driver, '_access_api') self._driver._vrts_get_suitable_target.return_value = ( 'iqn.2017-02.com.veritas:faketarget') self._driver._access_api.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self._driver.create_volume, self.volume) def test_create_volume_negative_no_suitable_target_found(self): self.mock_object(self._driver, '_vrts_get_suitable_target') self.mock_object(self._driver, '_access_api') self._driver._vrts_get_suitable_target.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self._driver.create_volume, self.volume) self.assertEqual(0, self._driver._access_api.call_count) def test_delete_volume(self): self.mock_object(self._driver, '_get_vrts_lun_list') self.mock_object(self._driver, '_access_api') va_lun_name = self._driver._get_va_lun_name(self.volume.id) length = len(self.volume.id) index = int(length / 2) name1 = self.volume.id[:index] name2 = self.volume.id[index:] crc1 = hashlib.md5(name1.encode('utf-8'), usedforsecurity=False).hexdigest()[:5] crc2 = hashlib.md5(name2.encode('utf-8'), usedforsecurity=False).hexdigest()[:5] volume_name_to_ret = 'cinder' + '-' + crc1 + '-' + crc2 lun = {} lun['lun_name'] = va_lun_name lun['target_name'] = 'iqn.2017-02.com.veritas:faketarget' lun_list = {'output': {'output': {'luns': [lun]}}} self._driver._get_vrts_lun_list.return_value = lun_list self._driver._access_api.return_value = True self._driver.delete_volume(self.volume) self.assertEqual(volume_name_to_ret, va_lun_name) self.assertEqual(1, self._driver._access_api.call_count) def test_delete_volume_negative(self): self.mock_object(self._driver, '_get_vrts_lun_list') self.mock_object(self._driver, '_access_api') self._driver._access_api.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self._driver.delete_volume, self.volume) def test_create_snapshot(self): self.mock_object(self._driver, '_access_api') self._driver._access_api.return_value = True self._driver.create_snapshot(self.snapshot) self.assertEqual(1, self._driver._access_api.call_count) def test_create_snapshot_negative(self): self.mock_object(self._driver, '_access_api') self._driver._access_api.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self._driver.create_snapshot, self.snapshot) self.assertEqual(1, self._driver._access_api.call_count) def test_delete_snapshot(self): self.mock_object(self._driver, '_access_api') self._driver._access_api.return_value = True self._driver.delete_snapshot(self.snapshot) self.assertEqual(1, self._driver._access_api.call_count) def test_delete_snapshot_negative(self): self.mock_object(self._driver, '_access_api') self._driver._access_api.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self._driver.delete_snapshot, self.snapshot) self.assertEqual(1, self._driver._access_api.call_count) def test_create_cloned_volume(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_vrts_extend_lun') self.mock_object(self._driver, '_get_vrts_lun_list') self.mock_object(self._driver, '_vrts_get_fs_list') self.mock_object(self._driver, '_vrts_is_space_available_in_store') va_lun_name = self._driver._get_va_lun_name(self.volume.id) lun = {} lun['lun_name'] = va_lun_name lun['fs_name'] = 'fake_fs' lun['target_name'] = 'iqn.2017-02.com.veritas:faketarget' lun_list = {'output': {'output': {'luns': [lun]}}} self._driver._get_vrts_lun_list.return_value = lun_list self._driver._vrts_is_space_available_in_store.return_value = True self._driver._access_api.return_value = True self._driver.create_cloned_volume(self.clone_volume, self.volume) self.assertEqual(1, self._driver._access_api.call_count) self.assertEqual(0, self._driver._vrts_extend_lun.call_count) def test_create_cloned_volume_of_greater_size(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_vrts_extend_lun') self.mock_object(self._driver, '_get_vrts_lun_list') self.mock_object(self._driver, '_vrts_get_fs_list') self.mock_object(self._driver, '_vrts_is_space_available_in_store') va_lun_name = self._driver._get_va_lun_name(self.volume.id) lun = {} lun['lun_name'] = va_lun_name lun['fs_name'] = 'fake_fs' lun['target_name'] = 'iqn.2017-02.com.veritas:faketarget' lun_list = {'output': {'output': {'luns': [lun]}}} self._driver._get_vrts_lun_list.return_value = lun_list self._driver._vrts_is_space_available_in_store.return_value = True self._driver._access_api.return_value = True self._driver.create_cloned_volume(self.clone_volume2, self.volume) self.assertEqual(1, self._driver._access_api.call_count) self.assertEqual(1, self._driver._vrts_extend_lun.call_count) def test_create_cloned_volume_negative(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_get_vrts_lun_list') self.mock_object(self._driver, '_vrts_get_fs_list') self.mock_object(self._driver, '_vrts_is_space_available_in_store') va_lun_name = self._driver._get_va_lun_name(self.volume.id) lun = {} lun['lun_name'] = va_lun_name lun['fs_name'] = 'fake_fs' lun['target_name'] = 'iqn.2017-02.com.veritas:faketarget' lun_list = {'output': {'output': {'luns': [lun]}}} self._driver._get_vrts_lun_list.return_value = lun_list self._driver._vrts_is_space_available_in_store.return_value = True self._driver._access_api.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self._driver.create_cloned_volume, self.clone_volume, self.volume) self.assertEqual(1, self._driver._access_api.call_count) def test_create_volume_from_snapshot(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_vrts_extend_lun') self.mock_object(self._driver, '_vrts_get_targets_store') self.mock_object(self._driver, '_vrts_get_assigned_store') self.mock_object(self._driver, '_vrts_get_fs_list') self.mock_object(self._driver, '_vrts_is_space_available_in_store') snap_name = self._driver._get_va_lun_name(self.snapshot.id) snap = {} snap['snapshot_name'] = snap_name snap['target_name'] = 'fake_target' snapshots = [] snapshots.append(snap) snap_info = {} snap_info['output'] = {'output': {'snapshots': snapshots}} self._driver._access_api.return_value = snap_info self._driver._vrts_is_space_available_in_store.return_value = True self._driver.create_volume_from_snapshot(self.volume, self.snapshot) self.assertEqual(2, self._driver._access_api.call_count) self.assertEqual(0, self._driver._vrts_extend_lun.call_count) def test_create_volume_from_snapshot_of_greater_size(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_vrts_extend_lun') self.mock_object(self._driver, '_vrts_get_targets_store') self.mock_object(self._driver, '_vrts_get_assigned_store') self.mock_object(self._driver, '_vrts_get_fs_list') self.mock_object(self._driver, '_vrts_is_space_available_in_store') snap_name = self._driver._get_va_lun_name(self.snapshot.id) snap = {} snap['snapshot_name'] = snap_name snap['target_name'] = 'fake_target' snapshots = [] snapshots.append(snap) snap_info = {} snap_info['output'] = {'output': {'snapshots': snapshots}} self._driver._access_api.return_value = snap_info self._driver._vrts_is_space_available_in_store.return_value = True self._driver.create_volume_from_snapshot(self.volume2, self.snapshot) self.assertEqual(2, self._driver._access_api.call_count) self.assertEqual(1, self._driver._vrts_extend_lun.call_count) def test_create_volume_from_snapshot_negative(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_vrts_get_targets_store') snap = {} snap['snapshot_name'] = 'fake_snap_name' snap['target_name'] = 'fake_target' snapshots = [] snapshots.append(snap) snap_info = {} snap_info['output'] = {'output': {'snapshots': snapshots}} self._driver._access_api.return_value = snap_info self.assertRaises(exception.VolumeBackendAPIException, self._driver.create_volume_from_snapshot, self.volume, self.snapshot) self.assertEqual(1, self._driver._access_api.call_count) self.assertEqual(0, self._driver._vrts_get_targets_store.call_count) def test_extend_volume(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_get_vrts_lun_list') self.mock_object(self._driver, '_vrts_get_fs_list') self.mock_object(self._driver, '_vrts_is_space_available_in_store') va_lun_name = self._driver._get_va_lun_name(self.volume.id) lun = {} lun['lun_name'] = va_lun_name lun['fs_name'] = 'fake_fs' lun['target_name'] = 'iqn.2017-02.com.veritas:faketarget' lun_list = {'output': {'output': {'luns': [lun]}}} self._driver._get_vrts_lun_list.return_value = lun_list self._driver._vrts_is_space_available_in_store.return_value = True self._driver._access_api.return_value = True self._driver.extend_volume(self.volume, 2) self.assertEqual(1, self._driver._access_api.call_count) def test_extend_volume_negative(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_get_vrts_lun_list') self.mock_object(self._driver, '_vrts_get_fs_list') self.mock_object(self._driver, '_vrts_is_space_available_in_store') va_lun_name = self._driver._get_va_lun_name(self.volume.id) lun = {} lun['lun_name'] = va_lun_name lun['fs_name'] = 'fake_fs' lun['target_name'] = 'iqn.2017-02.com.veritas:faketarget' lun_list = {'output': {'output': {'luns': [lun]}}} self._driver._get_vrts_lun_list.return_value = lun_list self._driver._vrts_is_space_available_in_store.return_value = True self._driver._access_api.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self._driver.extend_volume, self.volume, 2) self.assertEqual(1, self._driver._vrts_get_fs_list.call_count) self.assertEqual(1, self._driver._access_api.call_count) def test_extend_volume_negative_not_volume_found(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_get_vrts_lun_list') self.mock_object(self._driver, '_vrts_get_fs_list') lun = {} lun['lun_name'] = 'fake_lun' lun['fs_name'] = 'fake_fs' lun['target_name'] = 'iqn.2017-02.com.veritas:faketarget' lun_list = {'output': {'output': {'luns': [lun]}}} self._driver._get_vrts_lun_list.return_value = lun_list self.assertRaises(exception.VolumeBackendAPIException, self._driver.extend_volume, self.volume, 2) self.assertEqual(0, self._driver._vrts_get_fs_list.call_count) self.assertEqual(0, self._driver._access_api.call_count) def test_initialize_connection(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_get_vrts_lun_list') self.mock_object(self._driver, '_vrts_target_initiator_mapping') self.mock_object(self._driver, '_vrts_get_iscsi_properties') va_lun_name = self._driver._get_va_lun_name(self.volume.id) lun = {} lun['lun_name'] = va_lun_name lun['target_name'] = 'iqn.2017-02.com.veritas:faketarget' lun_list = {'output': {'output': {'luns': [lun]}}} self._driver._get_vrts_lun_list.return_value = lun_list self._driver._access_api.return_value = True self._driver.initialize_connection(self.volume, self.connector) self.assertEqual(1, self._driver._vrts_get_iscsi_properties.call_count) @mock.patch('oslo_service.loopingcall.FixedIntervalWithTimeoutLoopingCall', new=test_utils.ZeroIntervalWithTimeoutLoopingCall) def test_initialize_connection_negative(self): self.mock_object(self._driver, '_access_api') self.mock_object(self._driver, '_get_vrts_lun_list') self.mock_object(self._driver, '_vrts_target_initiator_mapping') self.mock_object(self._driver, '_vrts_get_iscsi_properties') lun = {} lun['lun_name'] = 'fakelun' lun['target_name'] = 'iqn.2017-02.com.veritas:faketarget' lun_list = {'output': {'output': {'luns': [lun]}}} self._driver._get_vrts_lun_list.return_value = lun_list self._driver._access_api.return_value = True self.assertRaises(exception.VolumeBackendAPIException, self._driver.initialize_connection, self.volume, self.connector) self.assertEqual( 0, self._driver._vrts_target_initiator_mapping.call_count) self.assertEqual(0, self._driver._vrts_get_iscsi_properties.call_count) def test___vrts_get_iscsi_properties(self): self.mock_object(self._driver, '_access_api') va_lun_name = self._driver._get_va_lun_name(self.volume.id) storage_object = "'/fakestores/fakeio/" + va_lun_name + "'" lun_id_list = {} lun_id_list['output'] = ("[{'storage_object': " + storage_object + ", 'index': '1'}]") target_name = 'iqn.2017-02.com.veritas:faketarget' self._driver._access_api.return_value = lun_id_list iscsi_properties_ret_value = {} iscsi_properties_ret_value['target_discovered'] = True iscsi_properties_ret_value['target_iqn'] = target_name iscsi_properties_ret_value['target_portal'] = '1.1.1.1:3260' iscsi_properties_ret_value['target_lun'] = 1 iscsi_properties_ret_value['volume_id'] = 'fakeid' iscsi_properties = self._driver._vrts_get_iscsi_properties(self.volume, target_name) self.assertEqual(iscsi_properties_ret_value, iscsi_properties) def test__access_api(self): self.mock_object(requests, 'session') provider = '%s:%s' % (self._driver._va_ip, self._driver._port) path = '/fake/path' input_data = {} mock_response = MockResponse() session = requests.session data = {'fake_key': 'fake_val'} json_data = json.dumps(data) session.request.return_value = mock_response ret_value = self._driver._access_api(session, provider, path, json.dumps(input_data), 'GET') self.assertEqual(json_data, ret_value) def test__access_api_negative(self): session = self._driver.session provider = '%s:%s' % (self._driver._va_ip, self._driver._port) path = '/fake/path' input_data = {} ret_value = self._driver._access_api(session, provider, path, json.dumps(input_data), 'GET') self.assertEqual(False, ret_value) def test__get_api(self): provider = '%s:%s' % (self._driver._va_ip, self._driver._port) tail = '/fake/path' ret = self._driver._get_api(provider, tail) api_root = 'https://%s/api/access' % (provider) to_be_ret = api_root + tail self.assertEqual(to_be_ret, ret) def test__vrts_target_initiator_mapping_negative(self): self.mock_object(self._driver, '_access_api') target_name = 'fake_target' initiator_name = 'fake_initiator' self._driver._access_api.return_value = False self.assertRaises(exception.VolumeBackendAPIException, self._driver._vrts_target_initiator_mapping, target_name, initiator_name) def test_get_volume_stats(self): self.mock_object(self._driver, '_authenticate_access') self.mock_object(self._driver, '_vrts_get_targets_store') self.mock_object(self._driver, '_vrts_get_fs_list') target_list = [] target_details = {} target_details['fs_list'] = ['fs1'] target_details['wwn'] = 'iqn.2017-02.com.veritas:faketarget' target_list.append(target_details) self._driver._vrts_get_targets_store.return_value = target_list fs_list = [] fs_dict = {} fs_dict['name'] = 'fs1' fs_dict['file_storage_capacity'] = 10737418240 fs_dict['file_storage_used'] = 1073741824 fs_list.append(fs_dict) self._driver._vrts_get_fs_list.return_value = fs_list self._driver.get_volume_stats() data = { 'volume_backend_name': FAKE_BACKEND, 'vendor_name': 'Veritas', 'driver_version': '1.0', 'storage_protocol': 'iSCSI', 'total_capacity_gb': 10, 'free_capacity_gb': 9, 'reserved_percentage': 0, 'thin_provisioning_support': True } self.assertEqual(data, self._driver._stats) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3071203 cinder-27.0.0/cinder/tests/unit/volume/drivers/vmware/0000775000175000017500000000000000000000000022742 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/vmware/__init__.py0000664000175000017500000000000000000000000025041 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/vmware/fake.py0000664000175000017500000000224300000000000024223 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ManagedObjectReference(object): """A managed object reference is a remote identifier.""" def __init__(self, name="ManagedObject", value=None): super(ManagedObjectReference, self) # Managed Object Reference value attributes # typically have values like vm-123 or # host-232 and not UUID. self.value = value self._value_1 = value # Managed Object Reference type # attributes hold the name of the type # of the vCenter object the value # attribute is the identifier for self.type = name self._type = name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/vmware/test_fcd.py0000664000175000017500000007443500000000000025124 0ustar00zuulzuul00000000000000# Copyright (c) 2017 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test suite for VMware vCenter FCD driver.""" from unittest import mock import ddt from oslo_utils import timeutils from oslo_utils import units from oslo_vmware import image_transfer from oslo_vmware.objects import datastore from oslo_vmware import vim_util from cinder import context from cinder import exception as cinder_exceptions from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume import configuration from cinder.volume.drivers.vmware import datastore as hub from cinder.volume.drivers.vmware import fcd from cinder.volume.drivers.vmware import vmdk from cinder.volume.drivers.vmware import volumeops @ddt.ddt class VMwareVStorageObjectDriverTestCase(test.TestCase): IP = 'localhost' PORT = 2321 IMG_TX_TIMEOUT = 10 RESERVED_PERCENTAGE = 0 VMDK_DRIVER = vmdk.VMwareVcVmdkDriver FCD_DRIVER = fcd.VMwareVStorageObjectDriver VC_VERSION = "6.7.0" VOL_ID = 'abcdefab-cdef-abcd-efab-cdefabcdefab' SRC_VOL_ID = '9b3f6f1b-03a9-4f1e-aaff-ae15122b6ccf' DISPLAY_NAME = 'foo' VOL_TYPE_ID = 'd61b8cb3-aa1b-4c9b-b79e-abcdbda8b58a' VOL_SIZE = 2 PROJECT_ID = 'd45beabe-f5de-47b7-b462-0d9ea02889bc' IMAGE_ID = 'eb87f4b0-d625-47f8-bb45-71c43b486d3a' IMAGE_NAME = 'image-1' def setUp(self): super(VMwareVStorageObjectDriverTestCase, self).setUp() self._config = mock.Mock(spec=configuration.Configuration) self._config.vmware_host_ip = self.IP self._config.vmware_host_port = self.PORT self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT self._config.reserved_percentage = self.RESERVED_PERCENTAGE self._driver = fcd.VMwareVStorageObjectDriver( configuration=self._config) self._driver._vc_version = self.VC_VERSION self._driver._storage_policy_enabled = True self._context = context.get_admin_context() self.updated_at = timeutils.utcnow() @mock.patch.object(VMDK_DRIVER, 'do_setup') @mock.patch.object(FCD_DRIVER, 'volumeops') def test_do_setup(self, vops, vmdk_do_setup): self._driver._storage_policy_enabled = False self._driver.do_setup(self._context) vmdk_do_setup.assert_called_once_with(self._context) vops.set_vmx_version.assert_called_once_with('vmx-13') self.assertTrue(self._driver._use_fcd_snapshot) self.assertTrue(self._driver._storage_policy_enabled) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_datastore_summaries') def test_get_volume_stats(self, _get_datastore_summaries, vops): FREE_GB = 7 TOTAL_GB = 11 class ObjMock(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) _get_datastore_summaries.return_value = \ ObjMock(objects= [ ObjMock(propSet = [ ObjMock(name = "host", val = ObjMock(DatastoreHostMount = [])), ObjMock(name = "summary", val = ObjMock(freeSpace = FREE_GB * units.Gi, capacity = TOTAL_GB * units.Gi, accessible = True)) ]) ]) vops._in_maintenance.return_value = False stats = self._driver.get_volume_stats() self.assertEqual('VMware', stats['vendor_name']) self.assertEqual(self._driver.VERSION, stats['driver_version']) self.assertEqual(self._driver.STORAGE_TYPE, stats['storage_protocol']) self.assertEqual(self.RESERVED_PERCENTAGE, stats['reserved_percentage']) self.assertEqual(TOTAL_GB, stats['total_capacity_gb']) self.assertEqual(FREE_GB, stats['free_capacity_gb']) def _create_volume_dict(self, vol_id=VOL_ID, display_name=DISPLAY_NAME, volume_type_id=VOL_TYPE_ID, status='available', size=VOL_SIZE, attachment=None, project_id=PROJECT_ID): return {'id': vol_id, 'display_name': display_name, 'name': 'volume-%s' % vol_id, 'volume_type_id': volume_type_id, 'status': status, 'size': size, 'volume_attachment': attachment, 'project_id': project_id, } def _create_volume_obj(self, vol_id=VOL_ID, display_name=DISPLAY_NAME, volume_type_id=VOL_TYPE_ID, status='available', size=VOL_SIZE, attachment=None, project_id=PROJECT_ID): vol = self._create_volume_dict( vol_id, display_name, volume_type_id, status, size, attachment, project_id) return fake_volume.fake_volume_obj(self._context, **vol) @mock.patch.object(FCD_DRIVER, '_get_storage_profile') @mock.patch.object(FCD_DRIVER, '_select_datastore') def test_select_ds_fcd(self, select_datastore, get_storage_profile): profile = mock.sentinel.profile get_storage_profile.return_value = profile datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) select_datastore.return_value = (mock.ANY, mock.ANY, summary) volume = self._create_volume_obj() ret = self._driver._select_ds_fcd(volume) self.assertEqual(datastore, ret) exp_req = {hub.DatastoreSelector.SIZE_BYTES: volume.size * units.Gi, hub.DatastoreSelector.PROFILE_NAME: profile} select_datastore.assert_called_once_with(exp_req) @mock.patch.object(FCD_DRIVER, '_select_datastore') @mock.patch.object(FCD_DRIVER, 'volumeops') def _test_get_temp_image_folder( self, vops, select_datastore, preallocated=False): host = mock.sentinel.host summary = mock.Mock() summary.name = 'ds-1' select_datastore.return_value = (host, mock.ANY, summary) dc_ref = mock.sentinel.dc_ref vops.get_dc.return_value = dc_ref size_bytes = units.Gi ret = self._driver._get_temp_image_folder(size_bytes, preallocated) self.assertEqual( (dc_ref, summary, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH), ret) exp_req = {hub.DatastoreSelector.SIZE_BYTES: size_bytes} if preallocated: exp_req[hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = ( {hub.DatastoreType.NFS, hub.DatastoreType.VMFS, hub.DatastoreType.NFS41}) select_datastore.assert_called_once_with(exp_req) vops.get_dc.assert_called_once_with(host) vops.create_datastore_folder.assert_called_once_with( summary.name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH, dc_ref) def test_get_temp_image_folder(self): self._test_get_temp_image_folder() def test_get_temp_image_folder_preallocated(self): self._test_get_temp_image_folder(preallocated=True) @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @ddt.data(('eagerZeroedThick', 'eagerZeroedThick'), ('thick', 'preallocated'), ('thin', 'thin')) @ddt.unpack def test_get_disk_type( self, extra_spec_disk_type, exp_ret_val, vmdk_get_disk_type): vmdk_get_disk_type.return_value = extra_spec_disk_type volume = mock.sentinel.volume ret = self._driver._get_disk_type(volume) self.assertEqual(exp_ret_val, ret) @mock.patch.object(FCD_DRIVER, '_select_ds_fcd') @mock.patch.object(FCD_DRIVER, '_get_disk_type') @mock.patch.object(FCD_DRIVER, '_get_storage_profile_id') @mock.patch.object(FCD_DRIVER, 'volumeops') def test_create_volume(self, vops, get_storage_profile_id, get_disk_type, select_ds_fcd): ds_ref = mock.sentinel.ds_ref select_ds_fcd.return_value = ds_ref disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type profile_id = mock.sentinel.profile_id get_storage_profile_id.return_value = profile_id fcd_loc = mock.Mock() provider_loc = mock.sentinel.provider_loc fcd_loc.provider_location.return_value = provider_loc vops.create_fcd.return_value = fcd_loc volume = self._create_volume_obj() ret = self._driver.create_volume(volume) self.assertEqual({'provider_location': provider_loc}, ret) select_ds_fcd.assert_called_once_with(volume) get_disk_type.assert_called_once_with(volume) vops.create_fcd.assert_called_once_with( volume.name, volume.size * units.Ki, ds_ref, disk_type, profile_id=profile_id) @mock.patch.object(volumeops.FcdLocation, 'from_provider_location') @mock.patch.object(FCD_DRIVER, 'volumeops') def test_delete_fcd(self, vops, from_provider_loc): fcd_loc = mock.sentinel.fcd_loc from_provider_loc.return_value = fcd_loc provider_loc = mock.sentinel.provider_loc self._driver._delete_fcd(provider_loc) from_provider_loc.test_assert_called_once_with(provider_loc) vops.delete_fcd.assert_called_once_with(fcd_loc) @mock.patch.object(FCD_DRIVER, '_delete_fcd') def test_delete_volume(self, delete_fcd): volume = self._create_volume_obj() volume.provider_location = 'foo@ds1' self._driver.delete_volume(volume) delete_fcd.assert_called_once_with(volume.provider_location) @mock.patch.object(FCD_DRIVER, '_delete_fcd') def test_delete_volume_empty_provider_location(self, delete_fcd): volume = self._create_volume_obj() self._driver.delete_volume(volume) delete_fcd.assert_not_called() @mock.patch.object(volumeops.FcdLocation, 'from_provider_location') @mock.patch.object(FCD_DRIVER, '_get_adapter_type') def test_initialize_connection( self, get_adapter_type, from_provider_location): fcd_loc = mock.Mock( fcd_id=mock.sentinel.fcd_id, ds_ref_val=mock.sentinel.ds_ref_val) from_provider_location.return_value = fcd_loc adapter_type = mock.sentinel.adapter_type get_adapter_type.return_value = adapter_type volume = self._create_volume_obj() connector = mock.sentinel.connector ret = self._driver.initialize_connection(volume, connector) self.assertEqual(self._driver.STORAGE_TYPE, ret['driver_volume_type']) self.assertEqual(fcd_loc.fcd_id, ret['data']['id']) self.assertEqual(fcd_loc.ds_ref_val, ret['data']['ds_ref_val']) self.assertEqual(adapter_type, ret['data']['adapter_type']) def test_container_format(self): self._driver._validate_container_format('bare', mock.sentinel.image_id) def test_container_format_invalid(self): self.assertRaises(cinder_exceptions.ImageUnacceptable, self._driver._validate_container_format, 'ova', mock.sentinel.image_id) def _create_image_meta(self, _id=IMAGE_ID, name=IMAGE_NAME, disk_format='vmdk', size=1 * units.Gi, container_format='bare', vmware_disktype='streamOptimized', vmware_adaptertype='lsiLogic', is_public=True): return {'id': _id, 'name': name, 'disk_format': disk_format, 'size': size, 'container_format': container_format, 'properties': {'vmware_disktype': vmware_disktype, 'vmware_adaptertype': vmware_adaptertype, }, 'is_public': is_public, } @mock.patch.object(FCD_DRIVER, '_get_temp_image_folder') @mock.patch.object(FCD_DRIVER, '_create_virtual_disk_from_sparse_image') @mock.patch.object(FCD_DRIVER, '_create_virtual_disk_from_preallocated_image') @mock.patch.object(FCD_DRIVER, 'volumeops') @mock.patch.object(datastore, 'DatastoreURL') @mock.patch.object(FCD_DRIVER, '_get_storage_profile_id') @ddt.data(vmdk.ImageDiskType.PREALLOCATED, vmdk.ImageDiskType.SPARSE, vmdk.ImageDiskType.STREAM_OPTIMIZED) def test_copy_image_to_volume(self, disk_type, get_storage_profile_id, datastore_url_cls, vops, create_disk_from_preallocated_image, create_disk_from_sparse_image, get_temp_image_folder): image_meta = self._create_image_meta(vmware_disktype=disk_type) image_service = mock.Mock() image_service.show.return_value = image_meta dc_ref = mock.sentinel.dc_ref datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) summary.name = 'ds1' folder_path = mock.sentinel.folder_path get_temp_image_folder.return_value = (dc_ref, summary, folder_path) vmdk_path = mock.Mock() vmdk_path.get_descriptor_ds_file_path.return_value = ( "[ds1] cinder_vol/foo.vmdk") if disk_type == vmdk.ImageDiskType.PREALLOCATED: create_disk_from_preallocated_image.return_value = vmdk_path else: create_disk_from_sparse_image.return_value = vmdk_path dc_path = '/test-dc' vops.get_inventory_path.return_value = dc_path ds_url = mock.sentinel.ds_url datastore_url_cls.return_value = ds_url profile_id = mock.sentinel.profile_id get_storage_profile_id.return_value = profile_id fcd_loc = mock.Mock() provider_location = mock.sentinel.provider_location fcd_loc.provider_location.return_value = provider_location vops.register_disk.return_value = fcd_loc volume = self._create_volume_obj() image_id = self.IMAGE_ID ret = self._driver.copy_image_to_volume( self._context, volume, image_service, image_id) self.assertEqual({'provider_location': provider_location}, ret) get_temp_image_folder.assert_called_once_with(volume.size * units.Gi) if disk_type == vmdk.ImageDiskType.PREALLOCATED: create_disk_from_preallocated_image.assert_called_once_with( self._context, image_service, image_id, image_meta['size'], dc_ref, summary.name, folder_path, volume.id, volumeops.VirtualDiskAdapterType.LSI_LOGIC) else: create_disk_from_sparse_image.assert_called_once_with( self._context, image_service, image_id, image_meta['size'], dc_ref, summary.name, folder_path, volume.id) datastore_url_cls.assert_called_once_with( 'https', self._driver.configuration.vmware_host_ip, 'cinder_vol/foo.vmdk', '/test-dc', 'ds1') vops.register_disk.assert_called_once_with( str(ds_url), volume.name, summary.datastore) vops.update_fcd_policy.assert_called_once_with(fcd_loc, profile_id) @mock.patch.object(volumeops.FcdLocation, 'from_provider_location') @mock.patch.object(FCD_DRIVER, 'volumeops') @mock.patch.object(vim_util, 'get_moref') @mock.patch.object(FCD_DRIVER, '_create_backing') @mock.patch.object(image_transfer, 'upload_image') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch.object(FCD_DRIVER, '_delete_temp_backing') def test_copy_volume_to_image( self, delete_temp_backing, session, upload_image, create_backing, get_moref, vops, from_provider_loc): fcd_loc = mock.Mock() ds_ref = mock.sentinel.ds_ref fcd_loc.ds_ref.return_value = ds_ref from_provider_loc.return_value = fcd_loc host_ref_val = mock.sentinel.host_ref_val vops.get_connected_hosts.return_value = [host_ref_val] host = mock.sentinel.host get_moref.return_value = host backing = mock.sentinel.backing create_backing.return_value = backing vmdk_file_path = mock.sentinel.vmdk_file_path vops.get_vmdk_path.return_value = vmdk_file_path vops.get_backing_by_uuid.return_value = backing volume = test_utils.create_volume( self._context, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) extra_specs = { 'image_service:store_id': 'fake-store' } test_utils.create_volume_type( self._context.elevated(), id=fake.VOLUME_TYPE_ID, name="test_type", extra_specs=extra_specs) image_service = mock.sentinel.image_service image_meta = self._create_image_meta() self._driver.copy_volume_to_image( self._context, volume, image_service, image_meta) from_provider_loc.assert_called_once_with(volume.provider_location) vops.get_connected_hosts.assert_called_once_with(ds_ref) create_backing.assert_called_once_with( volume, host, {vmdk.CREATE_PARAM_DISK_LESS: True}) vops.attach_fcd.assert_called_once_with(backing, fcd_loc) vops.get_vmdk_path.assert_called_once_with(backing) conf = self._driver.configuration upload_image.assert_called_once_with( self._context, conf.vmware_image_transfer_timeout_secs, image_service, image_meta['id'], volume.project_id, session=session, host=conf.vmware_host_ip, port=conf.vmware_host_port, vm=backing, vmdk_file_path=vmdk_file_path, vmdk_size=volume.size * units.Gi, image_name=image_meta['name'], store_id='fake-store', base_image_ref=None) vops.detach_fcd.assert_called_once_with(backing, fcd_loc) delete_temp_backing.assert_called_once_with(backing) @mock.patch.object(volumeops.FcdLocation, 'from_provider_location') @mock.patch.object(FCD_DRIVER, 'volumeops') def test_extend_volume(self, vops, from_provider_loc): fcd_loc = mock.sentinel.fcd_loc from_provider_loc.return_value = fcd_loc volume = self._create_volume_obj() new_size = 3 self._driver.extend_volume(volume, new_size) from_provider_loc.assert_called_once_with(volume.provider_location) vops.extend_fcd.assert_called_once_with( fcd_loc, new_size * units.Ki) @mock.patch.object(volumeops.FcdLocation, 'from_provider_location') @mock.patch.object(FCD_DRIVER, 'volumeops') def test_clone_fcd(self, vops, from_provider_loc): fcd_loc = mock.sentinel.fcd_loc from_provider_loc.return_value = fcd_loc dest_fcd_loc = mock.sentinel.dest_fcd_loc vops.clone_fcd.return_value = dest_fcd_loc provider_loc = mock.sentinel.provider_loc name = mock.sentinel.name dest_ds_ref = mock.sentinel.dest_ds_ref disk_type = mock.sentinel.disk_type ret = self._driver._clone_fcd( provider_loc, name, dest_ds_ref, disk_type) self.assertEqual(dest_fcd_loc, ret) from_provider_loc.assert_called_once_with(provider_loc) vops.clone_fcd.assert_called_once_with( name, fcd_loc, dest_ds_ref, disk_type, profile_id=None) @mock.patch.object(FCD_DRIVER, '_select_ds_fcd') @mock.patch.object(FCD_DRIVER, '_clone_fcd') @mock.patch.object(FCD_DRIVER, 'volumeops') @mock.patch.object(volumeops.FcdLocation, 'from_provider_location') def _test_create_snapshot( self, from_provider_loc, vops, clone_fcd, select_ds_fcd, use_fcd_snapshot=False): self._driver._use_fcd_snapshot = use_fcd_snapshot provider_location = mock.sentinel.provider_location if use_fcd_snapshot: fcd_loc = mock.sentinel.fcd_loc from_provider_loc.return_value = fcd_loc fcd_snap_loc = mock.Mock() fcd_snap_loc.provider_location.return_value = provider_location vops.create_fcd_snapshot.return_value = fcd_snap_loc else: ds_ref = mock.sentinel.ds_ref select_ds_fcd.return_value = ds_ref dest_fcd_loc = mock.Mock() dest_fcd_loc.provider_location.return_value = provider_location clone_fcd.return_value = dest_fcd_loc volume = self._create_volume_obj() snapshot = fake_snapshot.fake_snapshot_obj( self._context, volume=volume) ret = self._driver.create_snapshot(snapshot) self.assertEqual({'provider_location': provider_location}, ret) if use_fcd_snapshot: vops.create_fcd_snapshot.assert_called_once_with( fcd_loc, description="snapshot-%s" % snapshot.id) else: select_ds_fcd.assert_called_once_with(snapshot.volume) clone_fcd.assert_called_once_with( volume.provider_location, snapshot.name, ds_ref) def test_create_snapshot_legacy(self): self._test_create_snapshot() def test_create_snapshot(self): self._test_create_snapshot(use_fcd_snapshot=True) @mock.patch.object(FCD_DRIVER, '_delete_fcd') @mock.patch.object(volumeops.FcdSnapshotLocation, 'from_provider_location') @mock.patch.object(FCD_DRIVER, 'volumeops') def _test_delete_snapshot( self, vops, from_provider_loc, delete_fcd, empty_provider_loc=False, use_fcd_snapshot=False): volume = self._create_volume_obj() snapshot = fake_snapshot.fake_snapshot_obj( self._context, volume=volume) if empty_provider_loc: snapshot.provider_location = None else: snapshot.provider_location = "test" if use_fcd_snapshot: fcd_snap_loc = mock.sentinel.fcd_snap_loc from_provider_loc.return_value = fcd_snap_loc else: from_provider_loc.return_value = None self._driver.delete_snapshot(snapshot) if empty_provider_loc: delete_fcd.assert_not_called() vops.delete_fcd_snapshot.assert_not_called() elif use_fcd_snapshot: vops.delete_fcd_snapshot.assert_called_once_with(fcd_snap_loc) else: delete_fcd.assert_called_once_with(snapshot.provider_location) def test_delete_snapshot_legacy(self): self._test_delete_snapshot() def test_delete_snapshot_with_empty_provider_loc(self): self._test_delete_snapshot(empty_provider_loc=True) def test_delete_snapshot(self): self._test_delete_snapshot(use_fcd_snapshot=True) @mock.patch.object(FCD_DRIVER, 'volumeops') @ddt.data((1, 1), (1, 2)) @ddt.unpack def test_extend_if_needed(self, cur_size, new_size, vops): fcd_loc = mock.sentinel.fcd_loc self._driver._extend_if_needed(fcd_loc, cur_size, new_size) if new_size > cur_size: vops.extend_fcd.assert_called_once_with( fcd_loc, new_size * units.Ki) else: vops.extend_fcd.assert_not_called() @mock.patch.object(FCD_DRIVER, '_select_ds_fcd') @mock.patch.object(FCD_DRIVER, '_get_disk_type') @mock.patch.object(FCD_DRIVER, '_get_storage_profile_id') @mock.patch.object(FCD_DRIVER, '_clone_fcd') @mock.patch.object(FCD_DRIVER, '_extend_if_needed') def test_create_volume_from_fcd( self, extend_if_needed, clone_fcd, get_storage_profile_id, get_disk_type, select_ds_fcd): ds_ref = mock.sentinel.ds_ref select_ds_fcd.return_value = ds_ref disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type profile_id = mock.sentinel.profile_id get_storage_profile_id.return_value = profile_id cloned_fcd_loc = mock.Mock() dest_provider_loc = mock.sentinel.dest_provider_loc cloned_fcd_loc.provider_location.return_value = dest_provider_loc clone_fcd.return_value = cloned_fcd_loc provider_loc = mock.sentinel.provider_loc cur_size = 1 volume = self._create_volume_obj() ret = self._driver._create_volume_from_fcd( provider_loc, cur_size, volume) self.assertEqual({'provider_location': dest_provider_loc}, ret) select_ds_fcd.test_assert_called_once_with(volume) get_disk_type.test_assert_called_once_with(volume) clone_fcd.assert_called_once_with( provider_loc, volume.name, ds_ref, disk_type=disk_type, profile_id=profile_id) extend_if_needed.assert_called_once_with( cloned_fcd_loc, cur_size, volume.size) @mock.patch.object(FCD_DRIVER, '_create_volume_from_fcd') @mock.patch.object(volumeops.FcdSnapshotLocation, 'from_provider_location') @mock.patch.object(FCD_DRIVER, '_get_storage_profile_id') @mock.patch.object(FCD_DRIVER, 'volumeops') @mock.patch.object(FCD_DRIVER, '_extend_if_needed') def _test_create_volume_from_snapshot( self, extend_if_needed, vops, get_storage_profile_id, from_provider_loc, create_volume_from_fcd, use_fcd_snapshot=False): src_volume = self._create_volume_obj(vol_id=self.SRC_VOL_ID) snapshot = fake_snapshot.fake_snapshot_obj( self._context, volume=src_volume) volume = self._create_volume_obj(size=self.VOL_SIZE + 1) if use_fcd_snapshot: fcd_snap_loc = mock.sentinel.fcd_snap_loc from_provider_loc.return_value = fcd_snap_loc profile_id = mock.sentinel.profile_id get_storage_profile_id.return_value = profile_id fcd_loc = mock.Mock() provider_loc = mock.sentinel.provider_loc fcd_loc.provider_location.return_value = provider_loc vops.create_fcd_from_snapshot.return_value = fcd_loc else: from_provider_loc.return_value = None ret = self._driver.create_volume_from_snapshot(volume, snapshot) if use_fcd_snapshot: self.assertEqual({'provider_location': provider_loc}, ret) vops.create_fcd_from_snapshot.assert_called_once_with( fcd_snap_loc, volume.name, profile_id=profile_id) extend_if_needed.assert_called_once_with( fcd_loc, snapshot.volume_size, volume.size) else: create_volume_from_fcd.assert_called_once_with( snapshot.provider_location, snapshot.volume.size, volume) def test_create_volume_from_snapshot_legacy(self): self._test_create_volume_from_snapshot() def test_create_volume_from_snapshot(self): self._test_create_volume_from_snapshot(use_fcd_snapshot=True) @mock.patch.object(FCD_DRIVER, '_create_volume_from_fcd') def test_create_cloned_volume(self, create_volume_from_fcd): src_volume = self._create_volume_obj() volume = mock.sentinel.volume self._driver.create_cloned_volume(volume, src_volume) create_volume_from_fcd.assert_called_once_with( src_volume.provider_location, src_volume.size, volume) @mock.patch.object(FCD_DRIVER, '_get_storage_profile') @mock.patch.object(FCD_DRIVER, '_get_extra_spec_storage_profile') @mock.patch.object(FCD_DRIVER, '_in_use') @mock.patch.object(FCD_DRIVER, 'volumeops') @mock.patch.object(volumeops.FcdLocation, 'from_provider_location') @mock.patch.object(FCD_DRIVER, 'ds_sel') @ddt.data({}, {'storage_policy_enabled': False}, {'same_profile': True}, {'vol_in_use': True} ) @ddt.unpack def test_retype( self, ds_sel, from_provider_location, vops, in_use, get_extra_spec_storage_profile, get_storage_profile, storage_policy_enabled=True, same_profile=False, vol_in_use=False): self._driver._storage_policy_enabled = storage_policy_enabled if storage_policy_enabled: profile = mock.sentinel.profile get_storage_profile.return_value = profile if same_profile: new_profile = profile else: new_profile = mock.sentinel.new_profile get_extra_spec_storage_profile.return_value = new_profile in_use.return_value = vol_in_use if not vol_in_use: fcd_loc = mock.sentinel.fcd_loc from_provider_location.return_value = fcd_loc new_profile_id = mock.Mock() ds_sel.get_profile_id.return_value = new_profile_id ctxt = mock.sentinel.ctxt volume = self._create_volume_obj() new_type = {'id': mock.sentinel.new_type_id} diff = mock.sentinel.diff host = mock.sentinel.host ret = self._driver.retype(ctxt, volume, new_type, diff, host) if not storage_policy_enabled: self.assertTrue(ret) else: get_storage_profile.assert_called_once_with(volume) get_extra_spec_storage_profile.assert_called_once_with( new_type['id']) if same_profile: self.assertTrue(ret) else: in_use.assert_called_once_with(volume) if vol_in_use: self.assertFalse(ret) else: ds_sel.get_profile_id.assert_called_once_with(new_profile) vops.update_fcd_policy.assert_called_once_with( fcd_loc, new_profile_id.uniqueId) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/vmware/test_vmware_datastore.py0000664000175000017500000004100400000000000027721 0ustar00zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for datastore module.""" import re from unittest import mock from oslo_utils import units from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.vmware import fake as vmware_fake from cinder.volume.drivers.vmware import datastore as ds_sel from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions class DatastoreTest(test.TestCase): """Unit tests for Datastore.""" def setUp(self): super(DatastoreTest, self).setUp() self._session = mock.Mock() self._vops = mock.Mock() self._ds_sel = ds_sel.DatastoreSelector( self._vops, self._session, 1024) @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') def test_get_profile_id(self, get_profile_id_by_name): profile_id = mock.sentinel.profile_id get_profile_id_by_name.return_value = profile_id profile_name = mock.sentinel.profile_name self.assertEqual(profile_id, self._ds_sel.get_profile_id(profile_name)) get_profile_id_by_name.assert_called_once_with(self._session, profile_name) self.assertEqual(profile_id, self._ds_sel._profile_id_cache[profile_name]) @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') def test_get_profile_id_cache_hit(self, get_profile_id_by_name): profile_id = mock.sentinel.profile_id profile_name = mock.sentinel.profile_name self._ds_sel._profile_id_cache[profile_name] = profile_id self.assertEqual(profile_id, self._ds_sel.get_profile_id(profile_name)) self.assertFalse(get_profile_id_by_name.called) @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') def test_get_profile_id_with_invalid_profile(self, get_profile_id_by_name): get_profile_id_by_name.return_value = None profile_name = mock.sentinel.profile_name self.assertRaises(vmdk_exceptions.ProfileNotFoundException, self._ds_sel.get_profile_id, profile_name) get_profile_id_by_name.assert_called_once_with(self._session, profile_name) def _create_datastore(self, value): return vmware_fake.ManagedObjectReference('Datastore', value) def _create_summary( self, ds, free_space=units.Mi, _type=ds_sel.DatastoreType.VMFS, capacity=2 * units.Mi, accessible=True): summary = mock.Mock(datastore=ds, freeSpace=free_space, type=_type, capacity=capacity, accessible=accessible) summary.name = ds.value return summary def _create_host(self, value): return vmware_fake.ManagedObjectReference('HostSystem', value) @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_filter_by_profile') def test_filter_datastores(self, filter_by_profile): host1 = self._create_host('host-1') host2 = self._create_host('host-2') host3 = self._create_host('host-3') host_mounts1 = [mock.Mock(key=host1)] host_mounts2 = [mock.Mock(key=host2)] host_mounts3 = [mock.Mock(key=host3)] # empty summary ds1 = self._create_datastore('ds-1') ds1_props = {'host': host_mounts1} # hard anti-affinity datastore ds2 = self._create_datastore('ds-2') ds2_props = {'summary': self._create_summary(ds2), 'host': host_mounts2} # not enough free space ds3 = self._create_datastore('ds-3') ds3_props = {'summary': self._create_summary(ds3, free_space=128), 'host': host_mounts1} # not connected to a valid host ds4 = self._create_datastore('ds-4') ds4_props = {'summary': self._create_summary(ds4), 'host': host_mounts3} # invalid datastore type ds5 = self._create_datastore('ds-5') ds5_props = {'summary': self._create_summary(ds5, _type='foo'), 'host': host_mounts1} # hard affinity datastore type ds6 = self._create_datastore('ds-6') ds6_props = { 'summary': self._create_summary( ds6, _type=ds_sel.DatastoreType.VSAN), 'host': host_mounts2} # inaccessible datastore ds7 = self._create_datastore('ds-7') ds7_props = {'summary': self._create_summary(ds7, accessible=False), 'host': host_mounts1} def mock_in_maintenace(summary): return summary.datastore.value == 'ds-8' self._vops._in_maintenance.side_effect = mock_in_maintenace # in-maintenance datastore ds8 = self._create_datastore('ds-8') ds8_props = {'summary': self._create_summary(ds8), 'host': host_mounts2} # not compliant with profile ds9 = self._create_datastore('ds-9') ds9_props = {'summary': self._create_summary(ds9), 'host': host_mounts1} # valid datastore ds1a = self._create_datastore('ds-1a') ds1a_props = {'summary': self._create_summary(ds1a), 'host': host_mounts1} filter_by_profile.return_value = {ds1a: ds1a_props} # datastore name not matching the regex filter ds1b = self._create_datastore('ds-1b') ds1b_props = {'summary': self._create_summary(ds1b), 'host': host_mounts1} # datastore with zero capacity ds1c = self._create_datastore('ds-1c') ds1c_props = {'summary': self._create_summary(ds1c, capacity=0), 'host': host_mounts1} datastores = {ds1: ds1_props, ds2: ds2_props, ds3: ds3_props, ds4: ds4_props, ds5: ds5_props, ds6: ds6_props, ds7: ds7_props, ds8: ds8_props, ds9: ds9_props, ds1a: ds1a_props, ds1b: ds1b_props, ds1c: ds1c_props} profile_id = mock.sentinel.profile_id self._ds_sel._ds_regex = re.compile(r"ds-[1-9ac]{1,2}$") datastores = self._ds_sel._filter_datastores( datastores, 512, profile_id, ['ds-2'], {ds_sel.DatastoreType.VMFS, ds_sel.DatastoreType.NFS}, valid_host_refs=[host1, host2]) self.assertEqual({ds1a: ds1a_props}, datastores) filter_by_profile.assert_called_once_with( {ds9: ds9_props, ds1a: ds1a_props}, profile_id) def test_filter_datastores_with_empty_datastores(self): self.assertIsNone(self._ds_sel._filter_datastores( {}, 1024, None, None, None)) def _create_host_properties( self, parent, connection_state='connected', in_maintenace=False): return mock.Mock(connectionState=connection_state, inMaintenanceMode=in_maintenace, parent=parent) @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_get_host_properties') @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_get_resource_pool') def test_select_best_datastore(self, get_resource_pool, get_host_props): host1 = self._create_host('host-1') host2 = self._create_host('host-2') host3 = self._create_host('host-3') host_mounts1 = [mock.Mock(key=host1, mountInfo=mock.sentinel.ds1_mount_info1), mock.Mock(key=host2, mountInfo=mock.sentinel.ds1_mount_info2), mock.Mock(key=host3, mountInfo=mock.sentinel.ds1_mount_info3)] host_mounts2 = [mock.Mock(key=host2, mountInfo=mock.sentinel.ds2_mount_info2), mock.Mock(key=host3, mountInfo=mock.sentinel.ds2_mount_info3)] host_mounts3 = [mock.Mock(key=host1, mountInfo=mock.sentinel.ds3_mount_info1), mock.Mock(key=host2, mountInfo=mock.sentinel.ds3_mount_info2)] host_mounts4 = [mock.Mock(key=host1, mountInfo=mock.sentinel.ds4_mount_info1)] ds1 = self._create_datastore('ds-1') ds1_props = {'summary': self._create_summary(ds1), 'host': host_mounts1} ds2 = self._create_datastore('ds-2') ds2_props = { 'summary': self._create_summary( ds2, free_space=1024, capacity=2048), 'host': host_mounts2} ds3 = self._create_datastore('ds-3') ds3_props = { 'summary': self._create_summary( ds3, free_space=512, capacity=2048), 'host': host_mounts3} ds4 = self._create_datastore('ds-3') ds4_props = {'summary': self._create_summary(ds4), 'host': host_mounts4} cluster_ref = mock.sentinel.cluster_ref def mock_get_host_properties(host_ref): self.assertIsNot(host1, host_ref) if host_ref == host2: in_maintenance = False else: in_maintenance = True runtime = mock.Mock(spec=['connectionState', 'inMaintenanceMode']) runtime.connectionState = 'connected' runtime.inMaintenanceMode = in_maintenance return {'parent': cluster_ref, 'runtime': runtime} get_host_props.side_effect = mock_get_host_properties def mock_is_usable(mount_info): if (mount_info == mock.sentinel.ds1_mount_info2 or mount_info == mock.sentinel.ds2_mount_info2): return False else: return True self._vops._is_usable.side_effect = mock_is_usable rp = mock.sentinel.resource_pool get_resource_pool.return_value = rp # ds1 is mounted to 3 hosts: host1, host2 and host3; host1 is # not a valid host, ds1 is not usable in host1, and host3 is # in maintenance mode. # ds2 and ds3 are mounted to same hosts, and ds2 has a low space # utilization. But ds2 is not usable in host2, and host3 is in # maintenance mode. Therefore, ds3 and host2 will be selected. datastores = {ds1: ds1_props, ds2: ds2_props, ds3: ds3_props, ds4: ds4_props} ret = self._ds_sel._select_best_datastore( datastores, valid_host_refs=[host2, host3]) self.assertEqual((host2, rp, ds3_props['summary']), ret) self.assertCountEqual([mock.call(mock.sentinel.ds1_mount_info2), mock.call(mock.sentinel.ds1_mount_info3), mock.call(mock.sentinel.ds2_mount_info2), mock.call(mock.sentinel.ds2_mount_info3), mock.call(mock.sentinel.ds3_mount_info2)], self._vops._is_usable.call_args_list) self.assertEqual([mock.call(host3), mock.call(host2)], get_host_props.call_args_list) get_resource_pool.assert_called_once_with(cluster_ref) def test_select_best_datastore_with_empty_datastores(self): self.assertIsNone(self._ds_sel._select_best_datastore({})) @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' 'get_profile_id') @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_get_datastores') @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_filter_datastores') @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_select_best_datastore') def test_select_datastore( self, select_best_datastore, filter_datastores, get_datastores, get_profile_id): profile_id = mock.sentinel.profile_id get_profile_id.return_value = profile_id datastores = mock.sentinel.datastores get_datastores.return_value = datastores filtered_datastores = mock.sentinel.filtered_datastores filter_datastores.return_value = filtered_datastores best_datastore = mock.sentinel.best_datastore select_best_datastore.return_value = best_datastore size_bytes = 1024 req = {self._ds_sel.SIZE_BYTES: size_bytes} aff_ds_types = [ds_sel.DatastoreType.VMFS] req[ds_sel.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = aff_ds_types anti_affinity_ds = [mock.sentinel.ds] req[ds_sel.DatastoreSelector.HARD_ANTI_AFFINITY_DS] = anti_affinity_ds profile_name = mock.sentinel.profile_name req[ds_sel.DatastoreSelector.PROFILE_NAME] = profile_name hosts = mock.sentinel.hosts self.assertEqual(best_datastore, self._ds_sel.select_datastore(req, hosts)) get_datastores.assert_called_once_with() filter_datastores.assert_called_once_with( datastores, size_bytes, profile_id, anti_affinity_ds, aff_ds_types, valid_host_refs=hosts) select_best_datastore.assert_called_once_with(filtered_datastores, valid_host_refs=hosts) @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' 'get_profile_id') @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_filter_by_profile') def test_is_datastore_compliant_true( self, filter_by_profile, get_profile_id): profile_name = mock.sentinel.profile_name datastore = mock.sentinel.datastore profile_id = mock.sentinel.profile_id get_profile_id.return_value = profile_id filter_by_profile.return_value = {datastore: None} self.assertTrue(self._ds_sel.is_datastore_compliant(datastore, profile_name)) get_profile_id.assert_called_once_with(profile_name) filter_by_profile.assert_called_once_with({datastore: None}, profile_id) @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' 'get_profile_id') @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' '_filter_by_profile') def test_is_datastore_compliant_false( self, filter_by_profile, get_profile_id): profile_name = mock.sentinel.profile_name datastore = mock.sentinel.datastore profile_id = mock.sentinel.profile_id get_profile_id.return_value = profile_id filter_by_profile.return_value = {} self.assertFalse(self._ds_sel.is_datastore_compliant(datastore, profile_name)) get_profile_id.assert_called_once_with(profile_name) filter_by_profile.assert_called_once_with({datastore: None}, profile_id) def test_is_datastore_compliant_with_empty_profile(self): self.assertTrue(self._ds_sel.is_datastore_compliant( mock.sentinel.datastore, None)) @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector.' 'get_profile_id') def test_is_datastore_compliant_with_invalid_profile(self, get_profile_id): profile_name = mock.sentinel.profile_name get_profile_id.side_effect = vmdk_exceptions.ProfileNotFoundException self.assertRaises(vmdk_exceptions.ProfileNotFoundException, self._ds_sel.is_datastore_compliant, mock.sentinel.datastore, profile_name) get_profile_id.assert_called_once_with(profile_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py0000664000175000017500000044051700000000000026710 0ustar00zuulzuul00000000000000# Copyright (c) 2013 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test suite for VMware vCenter VMDK driver.""" import re from unittest import mock import ddt from oslo_utils import timeutils from oslo_utils import units from oslo_utils import versionutils from oslo_vmware import exceptions from oslo_vmware import image_transfer from oslo_vmware import vim_util from cinder import context from cinder import exception as cinder_exceptions from cinder.tests.unit import fake_constants from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.tests.unit.volume.drivers.vmware import fake as vmware_fake from cinder.volume.drivers.vmware import datastore as hub from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions from cinder.volume.drivers.vmware import vmdk from cinder.volume.drivers.vmware import volumeops class MockConfiguration(object): def __init__(self, **kwargs): for kw in kwargs: setattr(self, kw, kwargs[kw]) def safe_get(self, name): return getattr(self, name) if hasattr(self, name) else None def append_config_values(self, opts): for opt in opts: if not hasattr(self, opt.name): setattr(self, opt.name, opt.default or None) # TODO(vbala) Split test methods handling multiple cases into multiple methods, # each handling a specific case. @ddt.ddt class VMwareVcVmdkDriverTestCase(test.TestCase): """Unit tests for VMwareVcVmdkDriver.""" IP = 'localhost' PORT = 2321 USERNAME = 'username' PASSWORD = 'password' VOLUME_FOLDER = 'cinder-volumes' API_RETRY_COUNT = 3 TASK_POLL_INTERVAL = 5.0 IMG_TX_TIMEOUT = 10 MAX_OBJECTS = 100 TMP_DIR = "/vmware-tmp" CA_FILE = "/etc/ssl/rui-ca-cert.pem" VMDK_DRIVER = vmdk.VMwareVcVmdkDriver CLUSTERS = ["cls-1", "cls-2"] DEFAULT_VC_VERSION = '5.5' POOL_SIZE = 20 SNAPSHOT_FORMAT = 'COW' VOL_ID = 'abcdefab-cdef-abcd-efab-cdefabcdefab' SRC_VOL_ID = '9b3f6f1b-03a9-4f1e-aaff-ae15122b6ccf' DISPLAY_NAME = 'foo' VOL_TYPE_ID = 'd61b8cb3-aa1b-4c9b-b79e-abcdbda8b58a' VOL_SIZE = 2 PROJECT_ID = 'd45beabe-f5de-47b7-b462-0d9ea02889bc' SNAPSHOT_ID = '2f59670a-0355-4790-834c-563b65bba740' SNAPSHOT_NAME = 'snap-foo' SNAPSHOT_DESCRIPTION = 'test snapshot' IMAGE_ID = 'eb87f4b0-d625-47f8-bb45-71c43b486d3a' IMAGE_NAME = 'image-1' ADAPTER_TYPE = volumeops.VirtualDiskAdapterType.BUS_LOGIC def setUp(self): super(VMwareVcVmdkDriverTestCase, self).setUp() self._config = MockConfiguration( vmware_host_ip=self.IP, vmware_host_port=self.PORT, vmware_host_username=self.USERNAME, vmware_host_password=self.PASSWORD, vmware_wsdl_location=None, vmware_volume_folder=self.VOLUME_FOLDER, vmware_api_retry_count=self.API_RETRY_COUNT, vmware_task_poll_interval=self.TASK_POLL_INTERVAL, vmware_image_transfer_timeout_secs=self.IMG_TX_TIMEOUT, vmware_max_objects_retrieval=self.MAX_OBJECTS, vmware_tmp_dir=self.TMP_DIR, vmware_ca_file=self.CA_FILE, vmware_insecure=False, vmware_cluster_name=self.CLUSTERS, vmware_host_version=self.DEFAULT_VC_VERSION, vmware_connection_pool_size=self.POOL_SIZE, vmware_adapter_type=self.ADAPTER_TYPE, vmware_snapshot_format=self.SNAPSHOT_FORMAT, vmware_lazy_create=True, vmware_datastore_regex=None, reserved_percentage=0 ) self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config) self._context = context.get_admin_context() self.updated_at = timeutils.utcnow() @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_datastore_summaries') def test_get_volume_stats(self, _get_datastore_summaries, vops): FREE_GB = 7 TOTAL_GB = 11 class ObjMock(object): def __init__(self, **kwargs): self.__dict__.update(kwargs) _get_datastore_summaries.return_value = (ObjMock(objects= [ ObjMock(propSet = [ ObjMock(name = "host", val = ObjMock(DatastoreHostMount = [])), ObjMock(name = "summary", val = ObjMock(freeSpace = FREE_GB * units.Gi, capacity = TOTAL_GB * units.Gi, accessible = True)) ]) ])) vops._in_maintenance.return_value = False # Enable volume stats collection from backend self._driver.configuration.vmware_enable_volume_stats = True stats = self._driver.get_volume_stats() self.assertEqual('VMware', stats['vendor_name']) self.assertEqual(self._driver.VERSION, stats['driver_version']) self.assertEqual('vmdk', stats['storage_protocol']) self.assertEqual(self._config.reserved_percentage, stats['reserved_percentage']) self.assertEqual(TOTAL_GB, stats['total_capacity_gb']) self.assertEqual(FREE_GB, stats['free_capacity_gb']) self.assertFalse(stats['shared_targets']) def test_test_volume_stats_disabled(self): RESERVED_PERCENTAGE = 0 TOTAL_GB = 'unknown' FREE_GB = 'unknown' self._driver.configuration.vmware_enable_volume_stats = False stats = self._driver.get_volume_stats() self.assertEqual('VMware', stats['vendor_name']) self.assertEqual(self._driver.VERSION, stats['driver_version']) self.assertEqual('vmdk', stats['storage_protocol']) self.assertEqual(RESERVED_PERCENTAGE, stats['reserved_percentage']) self.assertEqual(TOTAL_GB, stats['total_capacity_gb']) self.assertEqual(FREE_GB, stats['free_capacity_gb']) self.assertFalse(stats['shared_targets']) def _create_volume_dict(self, vol_id=VOL_ID, display_name=DISPLAY_NAME, volume_type_id=VOL_TYPE_ID, status='available', size=VOL_SIZE, attachment=None, project_id=PROJECT_ID): return {'id': vol_id, 'display_name': display_name, 'name': 'volume-%s' % vol_id, 'volume_type_id': volume_type_id, 'status': status, 'size': size, 'volume_attachment': attachment, 'project_id': project_id, } def _create_volume_obj(self, vol_id=VOL_ID, display_name=DISPLAY_NAME, volume_type_id=VOL_TYPE_ID, status='available', size=VOL_SIZE, attachment=None, project_id=PROJECT_ID): vol = self._create_volume_dict( vol_id, display_name, volume_type_id, status, size, attachment, project_id) return fake_volume.fake_volume_obj(self._context, **vol) @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') @mock.patch.object(VMDK_DRIVER, '_get_adapter_type') def test_verify_volume_creation(self, get_adapter_type, ds_sel, get_storage_profile, get_disk_type): profile_name = mock.sentinel.profile_name get_storage_profile.return_value = profile_name volume = self._create_volume_obj() self._driver._verify_volume_creation(volume) get_disk_type.assert_called_once_with(volume) get_storage_profile.assert_called_once_with(volume) ds_sel.get_profile_id.assert_called_once_with(profile_name) get_adapter_type.assert_called_once_with(volume) @mock.patch.object(VMDK_DRIVER, '_verify_volume_creation') def test_create_volume(self, verify_volume_creation): volume = self._create_volume_dict() self._driver.create_volume(volume) verify_volume_creation.assert_called_once_with(volume) @mock.patch.object(VMDK_DRIVER, '_create_backing') def test_create_volume_with_lazy_create_disabled(self, create_backing): self._config.vmware_lazy_create = False volume = self._create_volume_dict() self._driver.create_volume(volume) create_backing.assert_called_once_with(volume) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_delete_volume_without_backing(self, vops): vops.get_backing.return_value = None volume = self._create_volume_dict() self._driver.delete_volume(volume) vops.get_backing.assert_called_once_with(volume['name'], volume['id']) self.assertFalse(vops.delete_backing.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_delete_volume(self, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing volume = self._create_volume_dict() self._driver.delete_volume(volume) vops.get_backing.assert_called_once_with(volume['name'], volume['id']) vops.delete_backing.assert_called_once_with(backing) @mock.patch('cinder.volume.drivers.vmware.vmdk.' '_get_volume_type_extra_spec') @mock.patch('cinder.volume.drivers.vmware.volumeops.' 'VirtualDiskType.validate') def test_get_extra_spec_disk_type(self, validate, get_volume_type_extra_spec): vmdk_type = mock.sentinel.vmdk_type get_volume_type_extra_spec.return_value = vmdk_type type_id = mock.sentinel.type_id self.assertEqual(vmdk_type, self._driver._get_extra_spec_disk_type(type_id)) get_volume_type_extra_spec.assert_called_once_with( type_id, 'vmdk_type', default_value=vmdk.THIN_VMDK_TYPE) validate.assert_called_once_with(vmdk_type) @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_disk_type') def test_get_disk_type(self, get_extra_spec_disk_type): vmdk_type = mock.sentinel.vmdk_type get_extra_spec_disk_type.return_value = vmdk_type volume = self._create_volume_dict() self.assertEqual(vmdk_type, self._driver._get_disk_type(volume)) get_extra_spec_disk_type.assert_called_once_with( volume['volume_type_id']) @mock.patch('cinder.volume.drivers.vmware.vmdk.' '_get_volume_type_extra_spec') @mock.patch('cinder.volume.drivers.vmware.volumeops.' 'VirtualDiskAdapterType.validate') def test_get_extra_spec_adapter_type( self, validate, get_volume_type_extra_spec): adapter_type = mock.sentinel.adapter_type get_volume_type_extra_spec.return_value = adapter_type type_id = mock.sentinel.type_id self.assertEqual(adapter_type, self._driver._get_extra_spec_adapter_type(type_id)) get_volume_type_extra_spec.assert_called_once_with( type_id, 'adapter_type', default_value=self._driver.configuration.vmware_adapter_type) validate.assert_called_once_with(adapter_type) @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_adapter_type') def test_get_adapter_type(self, get_extra_spec_adapter_type): adapter_type = mock.sentinel.adapter_type get_extra_spec_adapter_type.return_value = adapter_type volume = self._create_volume_dict() self.assertEqual(adapter_type, self._driver._get_adapter_type(volume)) get_extra_spec_adapter_type.assert_called_once_with( volume['volume_type_id']) def _create_snapshot_dict(self, volume, snap_id=SNAPSHOT_ID, name=SNAPSHOT_NAME, description=SNAPSHOT_DESCRIPTION, provider_location=None): return {'id': snap_id, 'volume': volume, 'volume_name': volume['name'], 'name': name, 'display_description': description, 'volume_size': volume['size'], 'provider_location': provider_location } @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') def test_get_snapshot_group_folder(self, get_volume_group_folder, vops): dc = mock.sentinel.dc vops.get_dc.return_value = dc folder = mock.sentinel.folder get_volume_group_folder.return_value = folder volume = self._create_volume_obj() backing = mock.sentinel.backing self.assertEqual(folder, self._driver._get_snapshot_group_folder( volume, backing)) vops.get_dc.assert_called_once_with(backing) get_volume_group_folder.assert_called_once_with( dc, volume.project_id, snapshot=True) @mock.patch.object(VMDK_DRIVER, '_get_snapshot_group_folder') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_in_use') @mock.patch.object(VMDK_DRIVER, '_create_temp_backing_from_attached_vmdk') @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') def _test_create_snapshot_template_format( self, delete_temp_backing, create_temp_backing_from_attached_vmdk, in_use, vops, get_snapshot_group_folder, attached=False, mark_as_template_error=False): folder = mock.sentinel.folder get_snapshot_group_folder.return_value = folder datastore = mock.sentinel.datastore vops.get_datastore.return_value = datastore tmp_backing = mock.sentinel.tmp_backing if attached: in_use.return_value = True create_temp_backing_from_attached_vmdk.return_value = tmp_backing else: in_use.return_value = False vops.clone_backing.return_value = tmp_backing if mark_as_template_error: vops.mark_backing_as_template.side_effect = ( exceptions.VimException()) else: inv_path = mock.sentinel.inv_path vops.get_inventory_path.return_value = inv_path volume = self._create_volume_obj() snapshot = fake_snapshot.fake_snapshot_obj( self._context, volume=volume) backing = mock.sentinel.backing if mark_as_template_error: self.assertRaises( exceptions.VimException, self._driver._create_snapshot_template_format, snapshot, backing) delete_temp_backing.assert_called_once_with(tmp_backing) else: exp_result = {'provider_location': inv_path} self.assertEqual(exp_result, self._driver._create_snapshot_template_format( snapshot, backing)) delete_temp_backing.assert_not_called() get_snapshot_group_folder.test_assert_called_once_with(volume, backing) vops.get_datastore.assert_called_once_with(backing) in_use.assert_called_once_with(snapshot.volume) if attached: create_temp_backing_from_attached_vmdk.assert_called_once_with( snapshot.volume, None, None, folder, datastore, tmp_name=snapshot.name) else: vops.clone_backing.assert_called_once_with( snapshot.name, backing, None, volumeops.FULL_CLONE_TYPE, datastore, folder=folder) vops.mark_backing_as_template.assert_called_once_with(tmp_backing) def test_create_snapshot_template_format(self): self._test_create_snapshot_template_format() def test_create_snapshot_template_format_force(self): self._test_create_snapshot_template_format(attached=True) def test_create_snapshot_template_format_mark_template_error(self): self._test_create_snapshot_template_format(mark_as_template_error=True) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_snapshot_without_backing(self, vops, in_use): vops.get_backing.return_value = None volume = self._create_volume_dict() snapshot = self._create_snapshot_dict(volume) ret = self._driver.create_snapshot(snapshot) self.assertIsNone(ret) vops.get_backing.assert_called_once_with(snapshot['volume_name'], snapshot['volume']['id']) self.assertFalse(vops.create_snapshot.called) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_snapshot_with_backing(self, vops, in_use): backing = mock.sentinel.backing vops.get_backing.return_value = backing volume = self._create_volume_dict() snapshot = self._create_snapshot_dict(volume) ret = self._driver.create_snapshot(snapshot) self.assertIsNone(ret) vops.get_backing.assert_called_once_with(snapshot['volume_name'], snapshot['volume']['id']) vops.create_snapshot.assert_called_once_with( backing, snapshot['name'], snapshot['display_description']) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) def test_create_snapshot_when_attached(self, in_use): volume = self._create_volume_dict(status='in-use') snapshot = self._create_snapshot_dict(volume) self.assertRaises(cinder_exceptions.InvalidVolume, self._driver.create_snapshot, snapshot) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_create_snapshot_template_format') def test_create_snapshot_template( self, create_snapshot_template_format, vops, in_use): self._driver.configuration.vmware_snapshot_format = 'template' backing = mock.sentinel.backing vops.get_backing.return_value = backing model_update = mock.sentinel.model_update create_snapshot_template_format.return_value = model_update volume = self._create_volume_dict() snapshot = self._create_snapshot_dict(volume) ret = self._driver.create_snapshot(snapshot) self.assertEqual(model_update, ret) vops.get_backing.assert_called_once_with(snapshot['volume_name'], snapshot['volume']['id']) create_snapshot_template_format.assert_called_once_with( snapshot, backing) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_template_by_inv_path(self, vops): template = mock.sentinel.template vops.get_entity_by_inventory_path.return_value = template inv_path = mock.sentinel.inv_path self.assertEqual(template, self._driver._get_template_by_inv_path(inv_path)) vops.get_entity_by_inventory_path.assert_called_once_with(inv_path) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_template_by_inv_path_invalid_path(self, vops): vops.get_entity_by_inventory_path.return_value = None inv_path = mock.sentinel.inv_path self.assertRaises(vmdk_exceptions.TemplateNotFoundException, self._driver._get_template_by_inv_path, inv_path) vops.get_entity_by_inventory_path.assert_called_once_with(inv_path) @mock.patch.object(VMDK_DRIVER, '_get_template_by_inv_path') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_delete_snapshot_template_format( self, vops, get_template_by_inv_path): template = mock.sentinel.template get_template_by_inv_path.return_value = template inv_path = '/dc-1/vm/foo' volume = self._create_volume_dict() snapshot = fake_snapshot.fake_snapshot_obj(self._context, volume=volume, provider_location=inv_path) self._driver._delete_snapshot_template_format(snapshot) get_template_by_inv_path.assert_called_once_with(inv_path) vops.delete_backing.assert_called_once_with(template) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_delete_snapshot_without_backing(self, vops): vops.get_backing.return_value = None volume = self._create_volume_dict() snapshot = fake_snapshot.fake_snapshot_obj(self._context, volume=volume) self._driver.delete_snapshot(snapshot) vops.get_backing.assert_called_once_with(snapshot.volume_name, snapshot.volume.id) vops.get_snapshot.assert_not_called() vops.delete_snapshot.assert_not_called() @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) def test_delete_snapshot_with_backing(self, in_use, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing volume = self._create_volume_dict(status='deleting') snapshot = fake_snapshot.fake_snapshot_obj(self._context, volume=volume) self._driver.delete_snapshot(snapshot) vops.get_backing.assert_called_once_with(snapshot.volume_name, snapshot.volume.id) vops.get_snapshot.assert_called_once_with(backing, snapshot.name) in_use.assert_called_once_with(snapshot.volume) vops.delete_snapshot.assert_called_once_with( backing, snapshot.name) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) def test_delete_snapshot_when_attached(self, in_use, vops): volume = self._create_volume_dict(status='in-use') snapshot = fake_snapshot.fake_snapshot_obj(self._context, volume=volume) self.assertRaises(cinder_exceptions.InvalidSnapshot, self._driver.delete_snapshot, snapshot) in_use.assert_called_once_with(snapshot.volume) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_delete_snapshot_without_backend_snapshot(self, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing vops.get_snapshot.return_value = None volume = self._create_volume_dict(status='in-use') snapshot = fake_snapshot.fake_snapshot_obj(self._context, volume=volume) self._driver.delete_snapshot(snapshot) vops.get_backing.assert_called_once_with(snapshot.volume_name, snapshot.volume.id) vops.get_snapshot.assert_called_once_with(backing, snapshot.name) vops.delete_snapshot.assert_not_called() @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) @mock.patch.object(VMDK_DRIVER, '_delete_snapshot_template_format') def test_delete_snapshot_template( self, delete_snapshot_template_format, in_use, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing inv_path = '/dc-1/vm/foo' volume = self._create_volume_dict(status='deleting') snapshot = fake_snapshot.fake_snapshot_obj(self._context, volume=volume, provider_location=inv_path) self._driver.delete_snapshot(snapshot) vops.get_backing.assert_called_once_with(snapshot.volume_name, snapshot.volume.id) vops.get_snapshot.assert_not_called() in_use.assert_called_once_with(snapshot.volume) delete_snapshot_template_format.assert_called_once_with(snapshot) @ddt.data('vmdk', 'VMDK', None) def test_validate_disk_format(self, disk_format): self._driver._validate_disk_format(disk_format) def test_validate_disk_format_with_invalid_format(self): self.assertRaises(cinder_exceptions.ImageUnacceptable, self._driver._validate_disk_format, 'img') def _create_image_meta(self, _id=IMAGE_ID, name=IMAGE_NAME, disk_format='vmdk', size=1 * units.Gi, container_format='bare', vmware_disktype='streamOptimized', vmware_adaptertype='lsiLogic', is_public=True): return {'id': _id, 'name': name, 'disk_format': disk_format, 'size': size, 'container_format': container_format, 'properties': {'vmware_disktype': vmware_disktype, 'vmware_adaptertype': vmware_adaptertype, }, 'is_public': is_public, } @mock.patch.object(VMDK_DRIVER, 'copy_image_to_volume') def test_clone_image(self, copy_image_to_volume): model_update = mock.sentinel.model_update copy_image_to_volume.return_value = model_update context = mock.sentinel.context volume = mock.sentinel.volume image_meta = self._create_image_meta() image_service = mock.sentinel.image_service ret = self._driver.clone_image( context, volume, mock.sentinel.image_location, image_meta, image_service) self.assertEqual((model_update, True), ret) copy_image_to_volume.assert_called_once_with( context, volume, image_service, image_meta['id']) @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_validate_disk_format') def test_copy_image_to_volume_with_invalid_container(self, validate_disk_format): image_service = mock.Mock() image_meta = self._create_image_meta(container_format='ami') image_service.show.return_value = image_meta context = mock.sentinel.context volume = self._create_volume_dict() image_id = mock.sentinel.image_id self.assertRaises( cinder_exceptions.ImageUnacceptable, self._driver.copy_image_to_volume, context, volume, image_service, image_id) validate_disk_format.assert_called_once_with(image_meta['disk_format']) @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_validate_disk_format') @mock.patch.object(VMDK_DRIVER, '_get_adapter_type', return_value=volumeops.VirtualDiskAdapterType.BUS_LOGIC) @mock.patch('cinder.volume.drivers.vmware.volumeops.' 'VirtualDiskAdapterType.validate') @mock.patch('cinder.volume.drivers.vmware.vmdk.ImageDiskType.' 'validate') @mock.patch.object(VMDK_DRIVER, '_create_volume_from_non_stream_optimized_image') @mock.patch.object(VMDK_DRIVER, '_fetch_stream_optimized_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def _test_copy_image_to_volume(self, extend_backing, vops, fetch_stream_optimized_image, create_volume_from_non_stream_opt_image, validate_image_disk_type, validate_image_adapter_type, get_adapter_type, validate_disk_format, get_disk_type, vmware_disk_type='streamOptimized', backing_disk_size=VOL_SIZE, call_extend_backing=False, container_format='bare'): image_service = mock.Mock() image_meta = self._create_image_meta(vmware_disktype=vmware_disk_type, container_format=container_format) image_service.show.return_value = image_meta backing = mock.sentinel.backing vops.get_backing.return_value = backing vops.get_disk_size.return_value = backing_disk_size * units.Gi disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type context = mock.sentinel.context volume = self._create_volume_dict() image_id = mock.sentinel.image_id self._driver.copy_image_to_volume( context, volume, image_service, image_id) validate_disk_format.assert_called_once_with(image_meta['disk_format']) validate_image_disk_type.assert_called_once_with( image_meta['properties']['vmware_disktype']) validate_image_adapter_type.assert_called_once_with( image_meta['properties']['vmware_adaptertype']) if vmware_disk_type == 'streamOptimized': fetch_stream_optimized_image.assert_called_once_with( context, volume, image_service, image_id, image_meta['size'], image_meta['properties']['vmware_adaptertype']) else: create_volume_from_non_stream_opt_image.assert_called_once_with( context, volume, image_service, image_id, image_meta['size'], image_meta['properties']['vmware_adaptertype'], image_meta['properties']['vmware_disktype']) vops.get_disk_size.assert_called_once_with(backing) if call_extend_backing: extend_backing.assert_called_once_with(backing, volume['size'], disk_type) else: self.assertFalse(extend_backing.called) @ddt.data('sparse', 'preallocated', 'streamOptimized') def test_copy_image_to_volume(self, vmware_disk_type): self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type) @ddt.data('sparse', 'preallocated', 'streamOptimized') def test_copy_image_to_volume_with_extend_backing(self, vmware_disk_type): self._test_copy_image_to_volume(vmware_disk_type=vmware_disk_type, backing_disk_size=1, call_extend_backing=True) def test_copy_image_to_volume_with_ova_container(self): self._test_copy_image_to_volume(container_format='ova') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch.object(VMDK_DRIVER, '_check_disk_conversion') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch.object(VMDK_DRIVER, '_create_backing') @mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image') @mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_preallocated_image') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') def _test_create_volume_from_non_stream_optimized_image( self, delete_tmp_backing, select_ds_for_volume, get_storage_profile_id, create_disk_from_preallocated_image, create_disk_from_sparse_image, vops, get_ds_name_folder_path, create_backing, generate_uuid, check_disk_conversion, get_disk_type, image_disk_type='sparse', disk_conversion=False): disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type check_disk_conversion.return_value = disk_conversion volume = self._create_volume_dict() if disk_conversion: disk_name = "6b77b25a-9136-470e-899e-3c930e570d8e" generate_uuid.return_value = disk_name else: disk_name = volume['name'] backing = mock.sentinel.backing create_backing.return_value = backing ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_ds_name_folder_path.return_value = (ds_name, folder_path) host = mock.sentinel.host dc_ref = mock.sentinel.dc_ref vops.get_host.return_value = host vops.get_dc.return_value = dc_ref vmdk_path = mock.Mock(spec=volumeops.FlatExtentVirtualDiskPath) create_disk_from_sparse_image.return_value = vmdk_path create_disk_from_preallocated_image.return_value = vmdk_path profile_id = mock.sentinel.profile_id get_storage_profile_id.return_value = profile_id if disk_conversion: rp = mock.sentinel.rp folder = mock.sentinel.folder datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) select_ds_for_volume.return_value = (host, rp, folder, summary) clone = mock.sentinel.clone vops.clone_backing.return_value = clone context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = units.Gi adapter_type = mock.sentinel.adapter_type self._driver._create_volume_from_non_stream_optimized_image( context, volume, image_service, image_id, image_size_in_bytes, adapter_type, image_disk_type) check_disk_conversion.assert_called_once_with(image_disk_type, mock.sentinel.disk_type) if disk_conversion: create_backing.assert_called_once_with( volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True, vmdk.CREATE_PARAM_BACKING_NAME: disk_name, vmdk.CREATE_PARAM_TEMP_BACKING: True}) else: create_backing.assert_called_once_with( volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True}) if image_disk_type == 'sparse': create_disk_from_sparse_image.assert_called_once_with( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name) else: create_disk_from_preallocated_image.assert_called_once_with( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name, adapter_type) get_storage_profile_id.assert_called_once_with(volume) vops.attach_disk_to_backing.assert_called_once_with( backing, image_size_in_bytes / units.Ki, disk_type, adapter_type, profile_id, vmdk_path.get_descriptor_ds_file_path()) if disk_conversion: select_ds_for_volume.assert_called_once_with(volume) extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'], volumeops.BACKING_UUID_KEY: volume['id']} vops.clone_backing.assert_called_once_with( volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=disk_type, host=host, resource_pool=rp, extra_config=extra_config, folder=folder) delete_tmp_backing.assert_called_once_with(backing) vops.update_backing_disk_uuid(clone, volume['id']) else: vops.update_backing_disk_uuid(backing, volume['id']) @ddt.data('sparse', 'preallocated') def test_create_volume_from_non_stream_optimized_image(self, image_disk_type): self._test_create_volume_from_non_stream_optimized_image( image_disk_type=image_disk_type) @ddt.data('sparse', 'preallocated') def test_create_volume_from_non_stream_opt_image_with_disk_conversion( self, image_disk_type): self._test_create_volume_from_non_stream_optimized_image( image_disk_type=image_disk_type, disk_conversion=True) def _test_get_vsphere_url(self, direct_url, exp_vsphere_url=None): image_service = mock.Mock() image_service.get_location.return_value = (direct_url, []) context = mock.sentinel.context image_id = mock.sentinel.image_id ret = self._driver._get_vsphere_url(context, image_service, image_id) self.assertEqual(exp_vsphere_url, ret) image_service.get_location.assert_called_once_with(context, image_id) def test_get_vsphere_url(self): url = "vsphere://foo/folder/glance/img_uuid?dcPath=dc1&dsName=ds1" self._test_get_vsphere_url(url, exp_vsphere_url=url) def test_get_vsphere_url_(self): url = "http://foo/folder/glance/img_uuid?dcPath=dc1&dsName=ds1" self._test_get_vsphere_url(url) @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_get_vsphere_url') @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def _test_create_virtual_disk_from_preallocated_image( self, vops, copy_image, get_vsphere_url, flat_extent_path, generate_uuid, get_temp_image_folder, copy_temp_virtual_disk, vsphere_url=None): dc_ref = vmware_fake.ManagedObjectReference(value=mock.sentinel.dc_ref) ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) uuid = mock.sentinel.uuid generate_uuid.return_value = uuid path = mock.Mock() dest_path = mock.Mock() flat_extent_path.side_effect = [path, dest_path] get_vsphere_url.return_value = vsphere_url context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dest_dc_ref = \ vmware_fake.ManagedObjectReference(value=mock.sentinel.dest_dc_ref) dest_ds_name = mock.sentinel.dest_ds_name dest_folder_path = mock.sentinel.dest_folder_path dest_disk_name = mock.sentinel.dest_disk_name adapter_type = mock.sentinel.adapter_type ret = self._driver._create_virtual_disk_from_preallocated_image( context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) exp_flat_extent_path_calls = [ mock.call(ds_name, folder_path, uuid), mock.call(dest_ds_name, dest_folder_path, dest_disk_name)] self.assertEqual(exp_flat_extent_path_calls, flat_extent_path.call_args_list) create_descriptor = vops.create_flat_extent_virtual_disk_descriptor create_descriptor.assert_called_once_with( dc_ref, path, image_size_in_bytes / units.Ki, adapter_type, vmdk.EAGER_ZEROED_THICK_VMDK_TYPE) get_vsphere_url.assert_called_once_with( context, image_service, image_id) if vsphere_url: vops.copy_datastore_file.assert_called_once_with( vsphere_url, dc_ref, path.get_flat_extent_ds_file_path()) else: copy_image.assert_called_once_with( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, path.get_flat_extent_file_path()) copy_temp_virtual_disk.assert_called_once_with(dc_ref, path, dest_dc_ref, dest_path) self.assertEqual(dest_path, ret) def test_create_virtual_disk_from_preallocated_image(self): self._test_create_virtual_disk_from_preallocated_image() def test_create_virtual_disk_from_preallocated_image_on_vsphere(self): self._test_create_virtual_disk_from_preallocated_image( vsphere_url=mock.sentinel.vsphere_url) @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_get_vsphere_url', return_value=None) @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_virtual_disk_from_preallocated_image_with_no_disk_copy( self, vops, copy_image, get_vsphere_url, flat_extent_path, get_temp_image_folder, copy_temp_virtual_disk): dc_ref = vmware_fake.ManagedObjectReference(value=mock.sentinel.dc_ref) ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) path = mock.Mock() flat_extent_path.return_value = path context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dc_ref_value = mock.sentinel.dc_ref dest_dc_ref = vmware_fake.ManagedObjectReference(value=dc_ref_value) dest_ds_name = ds_name dest_folder_path = mock.sentinel.dest_folder_path dest_disk_name = mock.sentinel.dest_disk_name adapter_type = mock.sentinel.adapter_type ret = self._driver._create_virtual_disk_from_preallocated_image( context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) flat_extent_path.assert_called_once_with( dest_ds_name, dest_folder_path, dest_disk_name) create_descriptor = vops.create_flat_extent_virtual_disk_descriptor create_descriptor.assert_called_once_with( dc_ref, path, image_size_in_bytes / units.Ki, adapter_type, vmdk.EAGER_ZEROED_THICK_VMDK_TYPE) copy_image.assert_called_once_with( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, path.get_flat_extent_file_path()) self.assertFalse(copy_temp_virtual_disk.called) self.assertEqual(path, ret) @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_temp_image_folder') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_get_vsphere_url', return_value=None) @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_create_virtual_disk_from_preallocated_image_with_copy_error( self, vops, copy_image, get_vsphere_url, flat_extent_path, generate_uuid, get_temp_image_folder, copy_temp_virtual_disk): dc_ref = vmware_fake.ManagedObjectReference(value=mock.sentinel.dc_ref) ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path get_temp_image_folder.return_value = (dc_ref, ds_name, folder_path) uuid = mock.sentinel.uuid generate_uuid.return_value = uuid path = mock.Mock() dest_path = mock.Mock() flat_extent_path.side_effect = [path, dest_path] copy_image.side_effect = exceptions.VimException("error") context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dest_dc_ref = \ vmware_fake.ManagedObjectReference(value=mock.sentinel.dest_dc_ref) dest_ds_name = mock.sentinel.dest_ds_name dest_folder_path = mock.sentinel.dest_folder_path dest_disk_name = mock.sentinel.dest_disk_name adapter_type = mock.sentinel.adapter_type self.assertRaises( exceptions.VimException, self._driver._create_virtual_disk_from_preallocated_image, context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type) vops.delete_file.assert_called_once_with( path.get_descriptor_ds_file_path(), dc_ref) self.assertFalse(copy_temp_virtual_disk.called) @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.' 'MonolithicSparseVirtualDiskPath') @mock.patch( 'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath') @mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk') @mock.patch.object(VMDK_DRIVER, '_get_vsphere_url') @mock.patch.object(VMDK_DRIVER, '_copy_image') @mock.patch.object(VMDK_DRIVER, 'volumeops') def _test_create_virtual_disk_from_sparse_image( self, vops, copy_image, get_vsphere_url, copy_temp_virtual_disk, flat_extent_path, sparse_path, generate_uuid, vsphere_url=None): uuid = mock.sentinel.uuid generate_uuid.return_value = uuid src_path = mock.Mock() sparse_path.return_value = src_path dest_path = mock.Mock() flat_extent_path.return_value = dest_path get_vsphere_url.return_value = vsphere_url context = mock.sentinel.context image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 2 * units.Gi dc_ref = mock.sentinel.dc_ref ds_name = mock.sentinel.ds_name folder_path = mock.sentinel.folder_path disk_name = mock.sentinel.disk_name ret = self._driver._create_virtual_disk_from_sparse_image( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name) sparse_path.assert_called_once_with(ds_name, folder_path, uuid) get_vsphere_url.assert_called_once_with( context, image_service, image_id) if vsphere_url: vops.copy_datastore_file.assert_called_once_with( vsphere_url, dc_ref, src_path.get_descriptor_ds_file_path()) else: copy_image.assert_called_once_with( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, src_path.get_descriptor_file_path()) flat_extent_path.assert_called_once_with( ds_name, folder_path, disk_name) copy_temp_virtual_disk.assert_called_once_with( dc_ref, src_path, dc_ref, dest_path) self.assertEqual(dest_path, ret) def test_create_virtual_disk_from_sparse_image(self): self._test_create_virtual_disk_from_sparse_image() def test_create_virtual_disk_from_sparse_image_on_vsphere(self): self._test_create_virtual_disk_from_sparse_image( vsphere_url=mock.sentinel.vsphere_url) @mock.patch.object(VMDK_DRIVER, '_select_datastore') @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_temp_image_folder(self, vops, select_datastore): host = mock.sentinel.host resource_pool = mock.sentinel.rp summary = mock.Mock() ds_name = mock.sentinel.ds_name summary.name = ds_name select_datastore.return_value = (host, resource_pool, summary) dc = mock.sentinel.dc vops.get_dc.return_value = dc image_size = 2 * units.Gi ret = self._driver._get_temp_image_folder(image_size) self.assertEqual((dc, ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH), ret) exp_req = { hub.DatastoreSelector.SIZE_BYTES: image_size, hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE: {hub.DatastoreType.VMFS, hub.DatastoreType.NFS, hub.DatastoreType.NFS41}} select_datastore.assert_called_once_with(exp_req) vops.create_datastore_folder.assert_called_once_with( ds_name, vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH, dc) @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_extra_config') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch.object(image_transfer, 'download_stream_optimized_image') def _test_copy_image_to_volume_stream_optimized(self, download_image, session, vops, get_extra_config, get_disk_type, get_profile_id, select_ds_for_volume, download_error=False): host = mock.sentinel.host rp = mock.sentinel.rp folder = mock.sentinel.folder # NOTE(mriedem): The summary.name gets logged so it has to be a string summary = mock.Mock(name=str(mock.sentinel.ds_name)) select_ds_for_volume.return_value = (host, rp, folder, summary) profile_id = mock.sentinel.profile_id get_profile_id.return_value = profile_id disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type extra_config = mock.sentinel.extra_config get_extra_config.return_value = extra_config vm_create_spec = mock.sentinel.vm_create_spec vops.get_create_spec.return_value = vm_create_spec import_spec = mock.Mock() session.vim.client.factory.create.return_value = import_spec backing = mock.sentinel.backing if download_error: download_image.side_effect = exceptions.VimException vops.get_backing.return_value = backing else: download_image.return_value = backing context = mock.sentinel.context volume = self._create_volume_dict(size=3) image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size = 2 * units.Gi adapter_type = mock.sentinel.adapter_type if download_error: self.assertRaises( exceptions.VimException, self._driver._fetch_stream_optimized_image, context, volume, image_service, image_id, image_size, adapter_type) else: self._driver._fetch_stream_optimized_image( context, volume, image_service, image_id, image_size, adapter_type) select_ds_for_volume.assert_called_once_with(volume) vops.get_create_spec.assert_called_once_with( volume['name'], 0, disk_type, summary.name, profile_id=profile_id, adapter_type=adapter_type, extra_config=extra_config) self.assertEqual(vm_create_spec, import_spec.configSpec) download_image.assert_called_with( context, self._config.vmware_image_transfer_timeout_secs, image_service, image_id, session=session, host=self._config.vmware_host_ip, port=self._config.vmware_host_port, resource_pool=rp, vm_folder=folder, vm_import_spec=import_spec, image_size=image_size, http_method='POST') if download_error: self.assertFalse(vops.update_backing_disk_uuid.called) vops.delete_backing.assert_called_once_with(backing) else: vops.update_backing_disk_uuid.assert_called_once_with( backing, volume['id']) def test_copy_image_to_volume_stream_optimized(self): self._test_copy_image_to_volume_stream_optimized() def test_copy_image_to_volume_stream_optimized_with_download_error(self): self._test_copy_image_to_volume_stream_optimized(download_error=True) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) def test_copy_volume_to_image_when_attached(self, in_use): volume = self._create_volume_dict( status="uploading", attachment=[mock.sentinel.attachment_1]) self.assertRaises( cinder_exceptions.InvalidVolume, self._driver.copy_volume_to_image, mock.sentinel.context, volume, mock.sentinel.image_service, mock.sentinel.image_meta) in_use.assert_called_once_with(volume) @mock.patch.object(VMDK_DRIVER, '_validate_disk_format') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_create_backing') @mock.patch('oslo_vmware.image_transfer.upload_image') @mock.patch.object(VMDK_DRIVER, 'session') def _test_copy_volume_to_image( self, session, upload_image, create_backing, vops, validate_disk_format, backing_exists=True): backing = mock.sentinel.backing if backing_exists: vops.get_backing.return_value = backing else: vops.get_backing.return_value = None create_backing.return_value = backing vmdk_file_path = mock.sentinel.vmdk_file_path vops.get_vmdk_path.return_value = vmdk_file_path context = mock.sentinel.context volume = test_utils.create_volume( self._context, volume_type_id = fake_constants.VOLUME_TYPE_ID, updated_at = self.updated_at) extra_specs = { 'image_service:store_id': 'fake-store' } test_utils.create_volume_type(self._context.elevated(), id=fake_constants.VOLUME_TYPE_ID, name="test_type", extra_specs=extra_specs) image_service = mock.sentinel.image_service image_meta = self._create_image_meta() self._driver.copy_volume_to_image( context, volume, image_service, image_meta) validate_disk_format.assert_called_once_with(image_meta['disk_format']) vops.get_backing.assert_called_once_with(volume['name'], volume['id']) if not backing_exists: create_backing.assert_called_once_with(volume) vops.get_vmdk_path.assert_called_once_with(backing) upload_image.assert_called_once_with( context, self._config.vmware_image_transfer_timeout_secs, image_service, image_meta['id'], volume['project_id'], session=session, host=self._config.vmware_host_ip, port=self._config.vmware_host_port, store_id='fake-store', base_image_ref=None, vm=backing, vmdk_file_path=vmdk_file_path, vmdk_size=volume['size'] * units.Gi, image_name=image_meta['name'], image_version=1) def test_copy_volume_to_image(self): self._test_copy_volume_to_image() def test_copy_volume_to_image_with_no_backing(self): self._test_copy_volume_to_image(backing_exists=False) def test_in_use(self): volume = self._create_volume_dict( attachment=[mock.sentinel.attachment_1]) self.assertTrue(self._driver._in_use(volume)) def test_in_use_with_available_volume(self): volume = self._create_volume_dict() self.assertIsNone(self._driver._in_use(volume)) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) def test_retype_with_in_use_volume(self, in_use): context = mock.sentinel.context volume = self._create_volume_dict( status='retyping', attachment=[mock.sentinel.attachment_1]) new_type = mock.sentinel.new_type diff = mock.sentinel.diff host = mock.sentinel.host self.assertFalse(self._driver.retype(context, volume, new_type, diff, host)) in_use.assert_called_once_with(volume) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_retype_with_no_volume_backing(self, vops, in_use): vops.get_backing.return_value = None context = mock.sentinel.context volume = self._create_volume_dict(status='retyping') new_type = mock.sentinel.new_type diff = mock.sentinel.diff host = mock.sentinel.host self.assertTrue(self._driver.retype(context, volume, new_type, diff, host)) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_extra_spec_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') @mock.patch.object(VMDK_DRIVER, '_select_datastore') @mock.patch.object( VMDK_DRIVER, '_get_adapter_type', return_value='lsiLogic') @mock.patch.object( VMDK_DRIVER, '_get_extra_spec_adapter_type', return_value='lsiLogic') def test_retype_with_diff_profile_and_ds_compliance( self, _get_extra_spec_adapter_type, _get_adapter_type, select_datastore, ds_sel, get_extra_spec_storage_profile, get_storage_profile, get_extra_spec_disk_type, get_disk_type, vops, in_use): backing = mock.sentinel.backing vops.get_backing.return_value = backing datastore = vmware_fake.ManagedObjectReference(value='ds1') vops.get_datastore.return_value = datastore disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type get_extra_spec_disk_type.return_value = disk_type self._driver._storage_policy_enabled = True profile = 'gold' get_storage_profile.return_value = profile new_profile = 'silver' get_extra_spec_storage_profile.return_value = new_profile ds_sel.is_datastore_compliant.return_value = True new_profile_id = mock.sentinel.new_profile_id ds_sel.get_profile_id.return_value = new_profile_id context = mock.sentinel.context volume = self._create_volume_dict(status='retyping') new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'} diff = mock.sentinel.diff host = mock.sentinel.host self.assertTrue(self._driver.retype(context, volume, new_type, diff, host)) ds_sel.is_datastore_compliant.assert_called_once_with(datastore, new_profile) select_datastore.assert_not_called() vops.change_backing_profile.assert_called_once_with(backing, new_profile_id) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_extra_spec_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') @mock.patch.object(VMDK_DRIVER, '_select_datastore') def test_retype_with_diff_profile_and_ds_sel_no_candidate( self, select_datastore, ds_sel, get_extra_spec_storage_profile, get_storage_profile, get_extra_spec_disk_type, get_disk_type, vops, in_use): backing = mock.sentinel.backing vops.get_backing.return_value = backing datastore = vmware_fake.ManagedObjectReference(value='ds1') vops.get_datastore.return_value = datastore disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type get_extra_spec_disk_type.return_value = disk_type vops.snapshot_exists.return_value = False self._driver._storage_policy_enabled = True profile = 'gold' get_storage_profile.return_value = profile new_profile = 'silver' get_extra_spec_storage_profile.return_value = new_profile ds_sel.is_datastore_compliant.return_value = False select_datastore.side_effect = ( vmdk_exceptions.NoValidDatastoreException) context = mock.sentinel.context volume = self._create_volume_dict(status='retyping') new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'} diff = mock.sentinel.diff host = mock.sentinel.host self.assertFalse(self._driver.retype(context, volume, new_type, diff, host)) ds_sel.is_datastore_compliant.assert_called_once_with(datastore, new_profile) select_datastore.assert_called_once_with( {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, hub.DatastoreSelector.PROFILE_NAME: new_profile}) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_extra_spec_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') @mock.patch.object(VMDK_DRIVER, '_select_datastore') @mock.patch.object(VMDK_DRIVER, '_get_dc') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') @mock.patch.object( VMDK_DRIVER, '_get_adapter_type', return_value='lsiLogic') @mock.patch.object( VMDK_DRIVER, '_get_extra_spec_adapter_type', return_value='lsiLogic') def test_retype_with_diff_extra_spec_and_vol_snapshot( self, get_extra_spec_adapter_type, get_adapter_type, get_volume_group_folder, get_dc, select_datastore, ds_sel, get_extra_spec_storage_profile, get_storage_profile, get_extra_spec_disk_type, get_disk_type, vops, in_use): backing = mock.sentinel.backing vops.get_backing.return_value = backing datastore = vmware_fake.ManagedObjectReference(value='ds1') vops.get_datastore.return_value = datastore get_disk_type.return_value = 'thin' new_disk_type = 'thick' get_extra_spec_disk_type.return_value = new_disk_type vops.snapshot_exists.return_value = True self._driver._storage_policy_enabled = True profile = 'gold' get_storage_profile.return_value = profile new_profile = 'silver' get_extra_spec_storage_profile.return_value = new_profile ds_sel.is_datastore_compliant.return_value = False host = mock.sentinel.host rp = mock.sentinel.rp new_datastore = mock.Mock(value='ds2') summary = mock.Mock(datastore=new_datastore) select_datastore.return_value = (host, rp, summary) dc = mock.sentinel.dc get_dc.return_value = dc folder = mock.sentinel.folder get_volume_group_folder.return_value = folder new_profile_id = mock.sentinel.new_profile_id ds_sel.get_profile_id.return_value = new_profile_id context = mock.sentinel.context volume = self._create_volume_dict(status='retyping') new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'} diff = mock.sentinel.diff host = mock.sentinel.host self.assertTrue(self._driver.retype(context, volume, new_type, diff, host)) ds_sel.is_datastore_compliant.assert_called_once_with(datastore, new_profile) select_datastore.assert_called_once_with( {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: ['ds1'], hub.DatastoreSelector.PROFILE_NAME: new_profile}) get_dc.assert_called_once_with(rp) get_volume_group_folder.assert_called_once_with(dc, volume['project_id']) vops.relocate_backing.assert_called_once_with( backing, new_datastore, rp, host, new_disk_type) vops.move_backing_to_folder.assert_called_once_with(backing, folder) vops.change_backing_profile.assert_called_once_with(backing, new_profile_id) @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_extra_spec_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') @mock.patch.object(VMDK_DRIVER, '_select_datastore') @mock.patch.object(VMDK_DRIVER, '_get_dc') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') @mock.patch.object(VMDK_DRIVER, '_get_adapter_type') @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_adapter_type') def _test_retype_with_diff_extra_spec_and_ds_compliance( self, get_extra_spec_adapter_type, get_adapter_type, delete_temp_backing, generate_uuid, get_volume_group_folder, get_dc, select_datastore, ds_sel, get_extra_spec_storage_profile, get_storage_profile, get_extra_spec_disk_type, get_disk_type, vops, in_use, clone_error=False): backing = mock.sentinel.backing vops.get_backing.return_value = backing datastore = vmware_fake.ManagedObjectReference(value='ds1') vops.get_datastore.return_value = datastore get_disk_type.return_value = 'thin' new_disk_type = 'thick' get_extra_spec_disk_type.return_value = new_disk_type vops.snapshot_exists.return_value = False self._driver._storage_policy_enabled = True profile = 'gold' get_storage_profile.return_value = profile new_profile = 'silver' get_extra_spec_storage_profile.return_value = new_profile ds_sel.is_datastore_compliant.return_value = True host = mock.sentinel.host rp = mock.sentinel.rp summary = mock.Mock(datastore=datastore) select_datastore.return_value = (host, rp, summary) dc = mock.sentinel.dc get_dc.return_value = dc folder = mock.sentinel.folder get_volume_group_folder.return_value = folder new_profile_id = mock.sentinel.new_profile_id ds_sel.get_profile_id.return_value = new_profile_id uuid = '025b654b-d4ed-47f9-8014-b71a7744eafc' generate_uuid.return_value = uuid if clone_error: vops.clone_backing.side_effect = exceptions.VimException else: new_backing = mock.sentinel.new_backing vops.clone_backing.return_value = new_backing adapter_type = 'lsiLogic' get_adapter_type.return_value = adapter_type new_adapter_type = 'paraVirtual' get_extra_spec_adapter_type.return_value = new_adapter_type capacity = self.VOL_SIZE * units.Mi filename = mock.sentinel.filename disk_backing = mock.Mock(filename=filename) disk_device = mock.Mock(capacityInKB=capacity, backing=disk_backing) vops._get_disk_device.return_value = disk_device context = mock.sentinel.context volume = self._create_volume_dict(status='retyping') new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'} diff = mock.sentinel.diff host = mock.sentinel.host if clone_error: self.assertRaises(exceptions.VimException, self._driver.retype, context, volume, new_type, diff, host) else: self.assertTrue(self._driver.retype(context, volume, new_type, diff, host)) ds_sel.is_datastore_compliant.assert_called_once_with(datastore, new_profile) select_datastore.assert_called_once_with( {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, hub.DatastoreSelector.PROFILE_NAME: new_profile}) get_dc.assert_called_once_with(rp) get_volume_group_folder.assert_called_once_with(dc, volume['project_id']) vops.clone_backing.assert_called_once_with( volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=new_disk_type, host=host, resource_pool=rp, folder=folder) if clone_error: exp_rename_calls = [mock.call(backing, uuid), mock.call(backing, volume['name'])] self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list) else: vops.rename_backing.assert_called_once_with(backing, uuid) vops.update_backing_uuid.assert_called_once_with( new_backing, volume['id']) vops.update_backing_disk_uuid.assert_called_once_with( new_backing, volume['id']) delete_temp_backing.assert_called_once_with(backing) vops.detach_disk_from_backing.assert_called_once_with( new_backing, disk_device) vops.attach_disk_to_backing.assert_called_once_with( new_backing, disk_device.capacityInKB, new_disk_type, new_adapter_type, None, disk_device.backing.fileName) vops.change_backing_profile.assert_called_once_with(new_backing, new_profile_id) def test_retype_with_diff_extra_spec_and_ds_compliance(self): self._test_retype_with_diff_extra_spec_and_ds_compliance() def test_retype_with_diff_extra_spec_ds_compliance_and_clone_error(self): self._test_retype_with_diff_extra_spec_and_ds_compliance( clone_error=True) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_extend_backing(self, vops): vmdk_path = mock.sentinel.vmdk_path vops.get_vmdk_path.return_value = vmdk_path dc = mock.sentinel.datacenter vops.get_dc.return_value = dc disk_type = mock.sentinel.disk_type eager_zero = (True if disk_type == "eagerZeroedThick" else False) backing = mock.sentinel.backing new_size = 1 self._driver._extend_backing(backing, new_size, disk_type) vops.get_vmdk_path.assert_called_once_with(backing) vops.get_dc.assert_called_once_with(backing) vops.extend_virtual_disk.assert_called_once_with(new_size, vmdk_path, dc, eager_zero) @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch('oslo_vmware.vim_util.get_vc_version') def test_get_vc_version(self, get_vc_version, session): self._driver.configuration.vmware_host_version = None version_str = '6.0.0' get_vc_version.return_value = version_str version = self._driver._get_vc_version() self.assertEqual(version_str, version) get_vc_version.assert_called_once_with(session) @mock.patch('oslo_vmware.vim_util.get_vc_version') def test_get_vc_version_override(self, get_vc_version): version = self._driver._get_vc_version() self.assertEqual( self._driver.configuration.vmware_host_version, version) get_vc_version.assert_not_called() @mock.patch('cinder.volume.drivers.vmware.vmdk.LOG') @ddt.data('5.5', '6.0') def test_validate_vcenter_version(self, version, log): # vCenter versions 5.5 and above should pass validation. self._driver._validate_vcenter_version(version) # Deprecation warning should be logged for vCenter versions which are # incompatible with next minimum supported version. if not versionutils.is_compatible( self._driver.NEXT_MIN_SUPPORTED_VC_VERSION, version, same_major=False): log.warning.assert_called_once() else: log.warning.assert_not_called() def test_validate_vcenter_version_with_less_than_min_supported_version( self): # Validation should fail for vCenter version less than 5.1. self.assertRaises(exceptions.VMwareDriverException, self._driver._validate_vcenter_version, '5.1') @mock.patch('oslo_vmware.vim_util.find_extension') @mock.patch('oslo_vmware.vim_util.register_extension') @mock.patch.object(VMDK_DRIVER, 'session') def _test_register_extension( self, session, register_extension, find_extension, ext_exists=False): if not ext_exists: find_extension.return_value = None self._driver._register_extension() find_extension.assert_called_once_with(session.vim, vmdk.EXTENSION_KEY) if not ext_exists: register_extension.assert_called_once_with( session.vim, vmdk.EXTENSION_KEY, vmdk.EXTENSION_TYPE, label='OpenStack Cinder') def test_register_extension(self): self._test_register_extension() def test_register_extension_with_existing_extension(self): self._test_register_extension(ext_exists=True) @mock.patch('oslo_vmware.vim_util.find_extension', return_value=None) @mock.patch('oslo_vmware.vim_util.register_extension') @mock.patch.object(VMDK_DRIVER, 'session') def test_concurrent_register_extension( self, session, register_extension, find_extension): register_extension.side_effect = exceptions.VimFaultException( ['InvalidArgument'], 'error') self._driver._register_extension() find_extension.assert_called_once_with(session.vim, vmdk.EXTENSION_KEY) register_extension.assert_called_once_with( session.vim, vmdk.EXTENSION_KEY, vmdk.EXTENSION_TYPE, label='OpenStack Cinder') @mock.patch('oslo_vmware.vim_util.find_extension', return_value=None) @mock.patch('oslo_vmware.vim_util.register_extension') @mock.patch.object(VMDK_DRIVER, 'session') def test_register_extension_failure( self, session, register_extension, find_extension): register_extension.side_effect = exceptions.VimFaultException( ['RuntimeFault'], 'error') self.assertRaises(exceptions.VimFaultException, self._driver._register_extension) find_extension.assert_called_once_with(session.vim, vmdk.EXTENSION_KEY) register_extension.assert_called_once_with( session.vim, vmdk.EXTENSION_KEY, vmdk.EXTENSION_TYPE, label='OpenStack Cinder') @mock.patch.object(VMDK_DRIVER, '_validate_params') @mock.patch('re.compile') @mock.patch.object(VMDK_DRIVER, '_create_session') @mock.patch.object(VMDK_DRIVER, '_get_vc_version') @mock.patch.object(VMDK_DRIVER, '_validate_vcenter_version') @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') @mock.patch.object(VMDK_DRIVER, '_register_extension') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps') @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'session') def _test_do_setup( self, session, vops, ds_sel_cls, vops_cls, register_extension, get_pbm_wsdl_loc, validate_vc_version, get_vc_version, create_session, re_compile, validate_params, enable_pbm=True, ds_regex_pat=None, invalid_regex=False): mock_session = mock.Mock() create_session.return_value = mock_session if enable_pbm: ver_str = '5.5' pbm_wsdl = mock.sentinel.pbm_wsdl get_pbm_wsdl_loc.return_value = pbm_wsdl else: ver_str = '5.1' get_vc_version.return_value = ver_str cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 cluster_refs = {'cls-1': cls_1, 'cls-2': cls_2} vops.get_cluster_refs.return_value = cluster_refs self._driver.configuration.vmware_datastore_regex = ds_regex_pat ds_regex = None if ds_regex_pat: if invalid_regex: re_compile.side_effect = re.error("error") else: ds_regex = mock.sentinel.ds_regex re_compile.return_value = ds_regex if ds_regex_pat and invalid_regex: self.assertRaises(cinder_exceptions.InvalidInput, self._driver.do_setup, mock.ANY) validate_params.assert_called_once_with() else: self._driver.do_setup(mock.ANY) validate_params.assert_called_once_with() create_session.assert_called_once_with() get_vc_version.assert_called_once_with() validate_vc_version.assert_called_once_with(ver_str) if enable_pbm: get_pbm_wsdl_loc.assert_called_once_with(ver_str) mock_session.pbm_wsdl_loc_set.assert_called_once_with(pbm_wsdl) self.assertEqual(enable_pbm, self._driver._storage_policy_enabled) register_extension.assert_called_once() vops_cls.assert_called_once_with( session, self._driver.configuration.vmware_max_objects_retrieval, vmdk.EXTENSION_KEY, vmdk.EXTENSION_TYPE) self.assertEqual(vops_cls.return_value, self._driver._volumeops) ds_sel_cls.assert_called_once_with( vops, session, self._driver.configuration.vmware_max_objects_retrieval, ds_regex=ds_regex) self.assertEqual(ds_sel_cls.return_value, self._driver._ds_sel) vops.get_cluster_refs.assert_called_once_with( self._driver.configuration.vmware_cluster_name) vops.build_backing_ref_cache.assert_called_once_with() self.assertEqual(list(cluster_refs.values()), list(self._driver._clusters)) if ds_regex_pat: re_compile.assert_called_once_with(ds_regex_pat) def test_do_setup(self): self._test_do_setup() def test_do_setup_with_pbm_disabled(self): self._test_do_setup(enable_pbm=False) @mock.patch.object(VMDK_DRIVER, '_validate_params') @mock.patch.object(VMDK_DRIVER, '_create_session') @mock.patch.object(VMDK_DRIVER, '_get_vc_version') @mock.patch.object(VMDK_DRIVER, '_validate_vcenter_version') @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location') def test_do_setup_with_invalid_pbm_wsdl( self, get_pbm_wsdl_loc, validate_vc_version, get_vc_version, create_session, validate_params): ver_str = '5.5' get_vc_version.return_value = ver_str get_pbm_wsdl_loc.return_value = None self.assertRaises(exceptions.VMwareDriverException, self._driver.do_setup, mock.ANY) validate_params.assert_called_once_with() create_session.assert_called_once_with() get_vc_version.assert_called_once_with() validate_vc_version.assert_called_once_with(ver_str) get_pbm_wsdl_loc.assert_called_once_with(ver_str) def test_do_setup_with_ds_regex(self): self._test_do_setup(ds_regex_pat='foo') def test_do_setup_with_invalid_ds_regex(self): self._test_do_setup(ds_regex_pat='(foo', invalid_regex=True) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_dc(self, vops): dc_1 = mock.sentinel.dc_1 dc_2 = mock.sentinel.dc_2 vops.get_dc.side_effect = [dc_1, dc_2] # cache miss rp_1 = vmware_fake.ManagedObjectReference(value='rp-1') rp_2 = vmware_fake.ManagedObjectReference(value='rp-2') self.assertEqual(dc_1, self._driver._get_dc(rp_1)) self.assertEqual(dc_2, self._driver._get_dc(rp_2)) self.assertDictEqual({'rp-1': dc_1, 'rp-2': dc_2}, self._driver._dc_cache) # cache hit self.assertEqual(dc_1, self._driver._get_dc(rp_1)) self.assertEqual(dc_2, self._driver._get_dc(rp_2)) vops.get_dc.assert_has_calls([mock.call(rp_1), mock.call(rp_2)]) @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, '_select_datastore') @mock.patch.object(VMDK_DRIVER, '_get_dc') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') @ddt.data(None, {vmdk.CREATE_PARAM_DISK_SIZE: 2 * VOL_SIZE}) def test_select_ds_for_volume( self, create_params, get_volume_group_folder, vops, get_dc, select_datastore, get_storage_profile): profile = mock.sentinel.profile get_storage_profile.return_value = profile host = mock.sentinel.host rp = mock.sentinel.rp summary = mock.sentinel.summary select_datastore.return_value = (host, rp, summary) dc = mock.sentinel.dc get_dc.return_value = dc folder = mock.sentinel.folder get_volume_group_folder.return_value = folder vol = self._create_volume_dict() ret = self._driver._select_ds_for_volume( vol, host=host, create_params=create_params) self.assertEqual((host, rp, folder, summary), ret) if create_params: exp_size = create_params[vmdk.CREATE_PARAM_DISK_SIZE] * units.Gi else: exp_size = vol['size'] * units.Gi exp_req = {hub.DatastoreSelector.SIZE_BYTES: exp_size, hub.DatastoreSelector.PROFILE_NAME: profile} select_datastore.assert_called_once_with(exp_req, host) get_dc.assert_called_once_with(rp) get_volume_group_folder.assert_called_once_with(dc, vol['project_id']) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') def _test_get_connection_info( self, get_storage_profile_id, vops, vmdk_connector=False): volume = self._create_volume_obj() backing = vmware_fake.ManagedObjectReference(value='ref-1') profile_id = mock.sentinel.profile_id get_storage_profile_id.return_value = profile_id if vmdk_connector: vmdk_path = mock.sentinel.vmdk_path vops.get_vmdk_path.return_value = vmdk_path datastore = vmware_fake.ManagedObjectReference(value='ds-1') vops.get_datastore.return_value = datastore datacenter = vmware_fake.ManagedObjectReference(value='dc-1') vops.get_dc.return_value = datacenter connector = {'platform': mock.sentinel.platform, 'os_type': mock.sentinel.os_type} else: connector = {'instance': 'vm-1'} ret = self._driver._get_connection_info(volume, backing, connector) self.assertEqual('vmdk', ret['driver_volume_type']) self.assertEqual('ref-1', ret['data']['volume']) self.assertEqual(volume.id, ret['data']['volume_id']) self.assertEqual(volume.name, ret['data']['name']) self.assertEqual(profile_id, ret['data']['profile_id']) if vmdk_connector: self.assertEqual(volume.size * units.Gi, ret['data']['vmdk_size']) self.assertEqual(vmdk_path, ret['data']['vmdk_path']) self.assertEqual('ds-1', ret['data']['datastore']) self.assertEqual('dc-1', ret['data']['datacenter']) config = self._driver.configuration exp_config = { 'vmware_host_ip': config.vmware_host_ip, 'vmware_host_port': config.vmware_host_port, 'vmware_host_username': config.vmware_host_username, 'vmware_host_password': config.vmware_host_password, 'vmware_api_retry_count': config.vmware_api_retry_count, 'vmware_task_poll_interval': config.vmware_task_poll_interval, 'vmware_ca_file': config.vmware_ca_file, 'vmware_insecure': config.vmware_insecure, 'vmware_tmp_dir': config.vmware_tmp_dir, 'vmware_image_transfer_timeout_secs': config.vmware_image_transfer_timeout_secs, } self.assertEqual(exp_config, ret['data']['config']) def test_get_connection_info(self): self._test_get_connection_info() def test_get_connection_info_vmdk_connector(self): self._test_get_connection_info(vmdk_connector=True) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch('oslo_vmware.vim_util.get_moref') @mock.patch.object(VMDK_DRIVER, '_create_backing') @mock.patch.object(VMDK_DRIVER, '_relocate_backing') @mock.patch.object(VMDK_DRIVER, '_get_connection_info') def _test_initialize_connection( self, get_connection_info, relocate_backing, create_backing, get_moref, vops, backing_exists=True, instance_exists=True): backing_val = mock.sentinel.backing_val backing = vmware_fake.ManagedObjectReference(value=backing_val) if backing_exists: vops.get_backing.return_value = backing else: vops.get_backing.return_value = None create_backing.return_value = backing if instance_exists: instance_val = mock.sentinel.instance_val connector = {'instance': instance_val} instance_moref = mock.sentinel.instance_moref get_moref.return_value = instance_moref host = mock.sentinel.host vops.get_host.return_value = host else: connector = {} conn_info = mock.sentinel.conn_info get_connection_info.return_value = conn_info volume = self._create_volume_obj() ret = self._driver.initialize_connection(volume, connector) self.assertEqual(conn_info, ret) if instance_exists: vops.get_host.assert_called_once_with(instance_moref) if backing_exists: relocate_backing.assert_called_once_with(volume, backing, host) create_backing.assert_not_called() else: create_backing.assert_called_once_with(volume, host) relocate_backing.assert_not_called() elif not backing_exists: create_backing.assert_called_once_with(volume) relocate_backing.assert_not_called() else: create_backing.assert_not_called() relocate_backing.assert_not_called() get_connection_info.assert_called_once_with(volume, backing, connector) def test_initialize_connection_with_instance_and_backing(self): self._test_initialize_connection() def test_initialize_connection_with_instance_and_no_backing(self): self._test_initialize_connection(backing_exists=False) def test_initialize_connection_with_no_instance_and_no_backing(self): self._test_initialize_connection( backing_exists=False, instance_exists=False) def test_initialize_connection_with_no_instance_and_backing(self): self._test_initialize_connection(instance_exists=False) @mock.patch.object(VMDK_DRIVER, 'volumeops') def _test_get_volume_group_folder(self, vops, snapshot=False): folder = mock.sentinel.folder vops.create_vm_inventory_folder.return_value = folder datacenter = mock.sentinel.dc project_id = '63c19a12292549818c09946a5e59ddaf' self.assertEqual(folder, self._driver._get_volume_group_folder( datacenter, project_id, snapshot=snapshot)) project_folder_name = 'Project (%s)' % project_id exp_folder_names = ['OpenStack', project_folder_name, self.VOLUME_FOLDER] if snapshot: exp_folder_names.append('Snapshots') vops.create_vm_inventory_folder.assert_called_once_with( datacenter, exp_folder_names) def test_get_volume_group_folder(self): self._test_get_volume_group_folder() def test_get_volume_group_folder_for_snapshot(self): self._test_get_volume_group_folder(snapshot=True) @mock.patch('cinder.volume.drivers.vmware.vmdk.' '_get_volume_type_extra_spec') @ddt.data('full', 'linked') def test_get_clone_type(self, clone_type, get_volume_type_extra_spec): get_volume_type_extra_spec.return_value = clone_type volume = self._create_volume_dict() self.assertEqual(clone_type, self._driver._get_clone_type(volume)) get_volume_type_extra_spec.assert_called_once_with( volume['volume_type_id'], 'clone_type', default_value=volumeops.FULL_CLONE_TYPE) @mock.patch('cinder.volume.drivers.vmware.vmdk.' '_get_volume_type_extra_spec') def test_get_clone_type_invalid( self, get_volume_type_extra_spec): get_volume_type_extra_spec.return_value = 'foo' volume = self._create_volume_dict() self.assertRaises( cinder_exceptions.Invalid, self._driver._get_clone_type, volume) get_volume_type_extra_spec.assert_called_once_with( volume['volume_type_id'], 'clone_type', default_value=volumeops.FULL_CLONE_TYPE) @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def _test_clone_backing( self, extend_backing, select_ds_for_volume, vops, get_disk_type, clone_type=volumeops.FULL_CLONE_TYPE, extend_needed=False, vc60=False): host = mock.sentinel.host rp = mock.sentinel.rp folder = mock.sentinel.folder datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) select_ds_for_volume.return_value = (host, rp, folder, summary) clone = mock.sentinel.clone vops.clone_backing.return_value = clone disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type if vc60: self._driver._vc_version = '6.0' else: self._driver._vc_version = '5.5' src_vsize = 1 if extend_needed: size = 2 else: size = 1 volume = self._create_volume_obj(size=size) backing = mock.sentinel.backing snapshot = mock.sentinel.snapshot self._driver._clone_backing( volume, backing, snapshot, clone_type, src_vsize) extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'], volumeops.BACKING_UUID_KEY: volume['id']} if volume.size > src_vsize or clone_type == volumeops.FULL_CLONE_TYPE: vops.clone_backing.assert_called_once_with( volume.name, backing, snapshot, volumeops.FULL_CLONE_TYPE, datastore, host=host, resource_pool=rp, extra_config=extra_config, folder=folder) vops.update_backing_disk_uuid.assert_called_once_with(clone, volume.id) else: vops.clone_backing.assert_called_once_with( volume.name, backing, snapshot, volumeops.LINKED_CLONE_TYPE, None, host=None, resource_pool=None, extra_config=extra_config, folder=None) if not vc60: vops.update_backing_disk_uuid.assert_called_once_with( clone, volume.id) else: vops.update_backing_disk_uuid.assert_not_called() if volume.size > src_vsize: extend_backing.assert_called_once_with(clone, volume.size, disk_type) else: extend_backing.assert_not_called() @ddt.data(volumeops.FULL_CLONE_TYPE, volumeops.LINKED_CLONE_TYPE) def test_clone_backing(self, clone_type): self._test_clone_backing(clone_type=clone_type) @ddt.data(volumeops.FULL_CLONE_TYPE, volumeops.LINKED_CLONE_TYPE) def test_clone_backing_with_extend(self, clone_type): self._test_clone_backing(clone_type=clone_type, extend_needed=True) def test_clone_backing_linked_vc_60(self): self._test_clone_backing( clone_type=volumeops.LINKED_CLONE_TYPE, vc60=True) @mock.patch.object(VMDK_DRIVER, '_get_template_by_inv_path') @mock.patch('oslo_utils.uuidutils.generate_uuid') @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_create_volume_from_temp_backing') def test_create_volume_from_template( self, create_volume_from_temp_backing, vops, get_disk_type, select_ds_for_volume, generate_uuid, get_template_by_inv_path): template = mock.sentinel.template get_template_by_inv_path.return_value = template tmp_name = 'de4c648c-8403-4dcc-b14a-d2541b7cba2b' generate_uuid.return_value = tmp_name host = mock.sentinel.host rp = mock.sentinel.rp folder = mock.sentinel.folder datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) select_ds_for_volume.return_value = (host, rp, folder, summary) disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type tmp_backing = mock.sentinel.tmp_backing vops.clone_backing.return_value = tmp_backing volume = self._create_volume_obj() inv_path = mock.sentinel.inv_path self._driver._create_volume_from_template(volume, inv_path) get_template_by_inv_path.assert_called_once_with(inv_path) select_ds_for_volume.assert_called_once_with(volume) get_disk_type.assert_called_once_with(volume) vops.clone_backing.assert_called_once_with(tmp_name, template, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=disk_type, host=host, resource_pool=rp, folder=folder) create_volume_from_temp_backing.assert_called_once_with(volume, tmp_backing) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_clone_backing') def test_create_volume_from_snapshot_without_backing(self, clone_backing, vops): vops.get_backing.return_value = None volume = self._create_volume_dict() src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID) snapshot = self._create_snapshot_dict(src_vref) self._driver.create_volume_from_snapshot(volume, snapshot) vops.get_backing.assert_called_once_with(snapshot['volume_name'], snapshot['volume']['id']) clone_backing.assert_not_called() @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_clone_backing') def test_create_volume_from_snapshot_without_backing_snapshot( self, clone_backing, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing vops.get_snapshot.return_value = None volume = self._create_volume_dict() src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID) snapshot = self._create_snapshot_dict(src_vref) self._driver.create_volume_from_snapshot(volume, snapshot) vops.get_backing.assert_called_once_with(snapshot['volume_name'], snapshot['volume']['id']) vops.get_snapshot.assert_called_once_with(backing, snapshot['name']) clone_backing.assert_not_called() @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_clone_type') @mock.patch.object(VMDK_DRIVER, '_create_volume_from_template') @mock.patch.object(VMDK_DRIVER, '_clone_backing') def _test_create_volume_from_snapshot( self, clone_backing, create_volume_from_template, get_clone_type, vops, template=False): backing = mock.sentinel.backing vops.get_backing.return_value = backing snapshot_moref = mock.sentinel.snap_moref vops.get_snapshot.return_value = snapshot_moref get_clone_type.return_value = volumeops.FULL_CLONE_TYPE volume = self._create_volume_dict() src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID) if template: provider_location = mock.sentinel.inv_path else: provider_location = None snapshot = self._create_snapshot_dict( src_vref, provider_location=provider_location) self._driver.create_volume_from_snapshot(volume, snapshot) vops.get_backing.assert_called_once_with(snapshot['volume_name'], snapshot['volume']['id']) if template: create_volume_from_template.assert_called_once_with( volume, mock.sentinel.inv_path) else: vops.get_snapshot.assert_called_once_with(backing, snapshot['name']) get_clone_type.assert_called_once_with(volume) clone_backing.assert_called_once_with( volume, backing, snapshot_moref, volumeops.FULL_CLONE_TYPE, snapshot['volume_size']) def test_create_volume_from_snapshot(self): self._test_create_volume_from_snapshot() def test_create_volume_from_snapshot_template(self): self._test_create_volume_from_snapshot(template=True) @mock.patch.object(VMDK_DRIVER, 'session') def test_get_volume_device_uuid(self, session): dev_uuid = mock.sentinel.dev_uuid opt_val = mock.Mock(value=dev_uuid) session.invoke_api.return_value = opt_val instance = mock.sentinel.instance ret = self._driver._get_volume_device_uuid(instance, self.VOL_ID) self.assertEqual(dev_uuid, ret) exp_prop = 'config.extraConfig["volume-%s"]' % self.VOL_ID session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', session.vim, instance, exp_prop) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_volume_device_uuid') @mock.patch('oslo_utils.uuidutils.generate_uuid') def test_create_temp_backing_from_attached_vmdk( self, generate_uuid, get_volume_device_uuid, vops): instance = mock.sentinel.instance vops.get_backing_by_uuid.return_value = instance vol_dev_uuid = mock.sentinel.vol_dev_uuid get_volume_device_uuid.return_value = vol_dev_uuid tmp_name = mock.sentinel.tmp_name generate_uuid.return_value = tmp_name tmp_backing = mock.sentinel.tmp_backing vops.clone_backing.return_value = tmp_backing instance_uuid = fake_constants.INSTANCE_ID attachment = fake_volume.fake_db_volume_attachment( instance_uuid=instance_uuid) src_vref = self._create_volume_dict(vol_id=fake_constants.VOLUME_ID, attachment=[attachment]) host = mock.sentinel.host rp = mock.sentinel.rp folder = mock.sentinel.folder datastore = mock.sentinel.datastore ret = self._driver._create_temp_backing_from_attached_vmdk( src_vref, host, rp, folder, datastore) self.assertEqual(tmp_backing, ret) vops.get_backing_by_uuid.assert_called_once_with(instance_uuid) get_volume_device_uuid.assert_called_once_with(instance, src_vref['id']) vops.clone_backing.assert_called_once_with( tmp_name, instance, None, volumeops.FULL_CLONE_TYPE, datastore, host=host, resource_pool=rp, folder=folder, disks_to_clone=[vol_dev_uuid]) @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def _test_extend_backing_if_needed( self, extend_backing, vops, get_disk_type, extend=True): disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type if extend: vol_size = 2 else: vol_size = 1 vops.get_disk_size.return_value = units.Gi volume = self._create_volume_obj(size=vol_size) backing = mock.sentinel.backing self._driver._extend_if_needed(volume, backing) vops.get_disk_size.assert_called_once_with(backing) if extend: extend_backing.assert_called_once_with(backing, vol_size, disk_type) else: extend_backing.assert_not_called() def test_extend_backing_if_needed(self): self._test_extend_backing_if_needed() def test_extend_backing_if_needed_no_extend(self): self._test_extend_backing_if_needed(extend=False) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_manage_existing_int') @mock.patch.object(VMDK_DRIVER, '_extend_if_needed') @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing') def test_create_volume_from_temp_backing( self, delete_temp_backing, extend_if_needed, manage_existing_int, vops): disk_device = mock.sentinel.disk_device vops._get_disk_device.return_value = disk_device backing = mock.sentinel.backing manage_existing_int.return_value = backing volume = self._create_volume_dict() tmp_backing = mock.sentinel.tmp_backing self._driver._create_volume_from_temp_backing(volume, tmp_backing) vops._get_disk_device.assert_called_once_with(tmp_backing) manage_existing_int.assert_called_once_with( volume, tmp_backing, disk_device) extend_if_needed.assert_called_once_with(volume, backing) delete_temp_backing.assert_called_once_with(tmp_backing) @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_create_temp_backing_from_attached_vmdk') @mock.patch.object(VMDK_DRIVER, '_create_volume_from_temp_backing') def test_clone_attached_volume( self, create_volume_from_temp_backing, create_temp_backing_from_attached_vmdk, select_ds_for_volume): host = mock.sentinel.host rp = mock.sentinel.rp folder = mock.sentinel.folder datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) select_ds_for_volume.return_value = (host, rp, folder, summary) tmp_backing = mock.sentinel.tmp_backing create_temp_backing_from_attached_vmdk.return_value = tmp_backing src_vref = mock.sentinel.src_vref volume = mock.sentinel.volume self._driver._clone_attached_volume(src_vref, volume) select_ds_for_volume.assert_called_once_with(volume) create_temp_backing_from_attached_vmdk.assert_called_once_with( src_vref, host, rp, folder, datastore) create_volume_from_temp_backing.assert_called_once_with( volume, tmp_backing) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_clone_backing') def test_create_cloned_volume_without_backing(self, clone_backing, vops): vops.get_backing.return_value = None volume = self._create_volume_dict() src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID) self._driver.create_cloned_volume(volume, src_vref) vops.get_backing.assert_called_once_with(src_vref['name'], src_vref['id']) clone_backing.assert_not_called() @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_clone_type') @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) @mock.patch.object(VMDK_DRIVER, '_clone_backing') def test_create_cloned_volume( self, clone_backing, in_use, get_clone_type, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing get_clone_type.return_value = volumeops.FULL_CLONE_TYPE volume = self._create_volume_dict() src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID) self._driver.create_cloned_volume(volume, src_vref) vops.get_backing.assert_called_once_with(src_vref['name'], src_vref['id']) get_clone_type.assert_called_once_with(volume) clone_backing.assert_called_once_with( volume, backing, None, volumeops.FULL_CLONE_TYPE, src_vref['size']) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_clone_type') @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False) @mock.patch.object(VMDK_DRIVER, '_clone_backing') def test_create_cloned_volume_linked( self, clone_backing, in_use, get_clone_type, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing get_clone_type.return_value = volumeops.LINKED_CLONE_TYPE temp_snapshot = mock.sentinel.temp_snapshot vops.create_snapshot.return_value = temp_snapshot volume = self._create_volume_dict() src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID) self._driver.create_cloned_volume(volume, src_vref) vops.get_backing.assert_called_once_with(src_vref['name'], src_vref['id']) get_clone_type.assert_called_once_with(volume) temp_snap_name = 'temp-snapshot-%s' % volume['id'] vops.create_snapshot.assert_called_once_with( backing, temp_snap_name, None) clone_backing.assert_called_once_with( volume, backing, temp_snapshot, volumeops.LINKED_CLONE_TYPE, src_vref['size']) vops.delete_snapshot.assert_called_once_with(backing, temp_snap_name) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_clone_type') @mock.patch.object(VMDK_DRIVER, '_clone_backing') def test_create_cloned_volume_linked_when_attached( self, clone_backing, get_clone_type, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing get_clone_type.return_value = volumeops.LINKED_CLONE_TYPE volume = self._create_volume_dict() src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID, status='in-use') self.assertRaises(cinder_exceptions.InvalidVolume, self._driver.create_cloned_volume, volume, src_vref) vops.get_backing.assert_called_once_with(src_vref['name'], src_vref['id']) get_clone_type.assert_called_once_with(volume) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_clone_type') @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True) @mock.patch.object(VMDK_DRIVER, '_clone_attached_volume') def test_create_cloned_volume_when_attached( self, clone_attached_volume, in_use, get_clone_type, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing get_clone_type.return_value = volumeops.FULL_CLONE_TYPE volume = self._create_volume_dict(status='in-use') src_vref = self._create_volume_dict(vol_id=self.SRC_VOL_ID) self._driver.create_cloned_volume(volume, src_vref) vops.get_backing.assert_called_once_with(src_vref['name'], src_vref['id']) get_clone_type.assert_called_once_with(volume) clone_attached_volume.assert_called_once_with(src_vref, volume) @mock.patch('cinder.volume.drivers.vmware.vmdk.' '_get_volume_type_extra_spec') def test_get_extra_spec_storage_profile(self, get_volume_type_extra_spec): vol_type_id = mock.sentinel.vol_type_id self._driver._get_extra_spec_storage_profile(vol_type_id) get_volume_type_extra_spec.assert_called_once_with(vol_type_id, 'storage_profile') @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile') def test_get_storage_profile(self, get_extra_spec_storage_profile): volume = self._create_volume_dict() self._driver._get_storage_profile(volume) get_extra_spec_storage_profile.assert_called_once_with( volume['volume_type_id']) @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') def test_get_storage_profile_id( self, get_profile_id_by_name, session, get_storage_profile): get_storage_profile.return_value = 'gold' profile_id = mock.sentinel.profile_id get_profile_id_by_name.return_value = mock.Mock(uniqueId=profile_id) self._driver._storage_policy_enabled = True volume = self._create_volume_dict() self.assertEqual(profile_id, self._driver._get_storage_profile_id(volume)) get_storage_profile.assert_called_once_with(volume) get_profile_id_by_name.assert_called_once_with(session, 'gold') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') def test_get_storage_profile_id_with_missing_extra_spec( self, get_profile_id_by_name, session, get_storage_profile): get_storage_profile.return_value = None self._driver._storage_policy_enabled = True volume = self._create_volume_dict() self.assertIsNone(self._driver._get_storage_profile_id(volume)) get_storage_profile.assert_called_once_with(volume) self.assertFalse(get_profile_id_by_name.called) @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') def test_get_storage_profile_id_with_pbm_disabled( self, get_profile_id_by_name, session, get_storage_profile): get_storage_profile.return_value = 'gold' volume = self._create_volume_dict() self.assertIsNone(self._driver._get_storage_profile_id(volume)) get_storage_profile.assert_called_once_with(volume) self.assertFalse(get_profile_id_by_name.called) @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch('oslo_vmware.pbm.get_profile_id_by_name') def test_get_storage_profile_id_with_missing_profile( self, get_profile_id_by_name, session, get_storage_profile): get_storage_profile.return_value = 'gold' get_profile_id_by_name.return_value = None self._driver._storage_policy_enabled = True volume = self._create_volume_dict() self.assertIsNone(self._driver._get_storage_profile_id(volume)) get_storage_profile.assert_called_once_with(volume) get_profile_id_by_name.assert_called_once_with(session, 'gold') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, 'session') @mock.patch('cinder.image.image_utils.TemporaryImages.for_image_service') @mock.patch('cinder.volume.drivers.vmware.vmdk.open', create=True) @mock.patch('oslo_vmware.image_transfer.download_file') @mock.patch('oslo_vmware.image_transfer.download_flat_image') def _test_copy_image(self, download_flat_image, download_file, mock_open, temp_images_img_service, session, vops, expected_cacerts=False, use_temp_image=False): dc_name = mock.sentinel.dc_name vops.get_entity_name.return_value = dc_name mock_get = mock.Mock(return_value=None) tmp_images = mock.Mock(get=mock_get) temp_images_img_service.return_value = tmp_images if use_temp_image: mock_get.return_value = '/tmp/foo' mock_open_ret = mock.Mock() mock_open_ret.__enter__ = mock.Mock( return_value=mock.sentinel.read_handle) mock_open_ret.__exit__ = mock.Mock() mock_open.return_value = mock_open_ret context = mock.sentinel.context dc_ref = mock.sentinel.dc_ref image_service = mock.sentinel.image_service image_id = mock.sentinel.image_id image_size_in_bytes = 102400 ds_name = mock.sentinel.ds_name upload_file_path = mock.sentinel.upload_file_path self._driver._copy_image( context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, upload_file_path) vops.get_entity_name.assert_called_once_with(dc_ref) cookies = session.vim.client.cookiejar if use_temp_image: mock_open.assert_called_once_with('/tmp/foo', 'rb') download_file.assert_called_once_with( mock.sentinel.read_handle, self._config.vmware_host_ip, self._config.vmware_host_port, dc_name, ds_name, cookies, upload_file_path, image_size_in_bytes, expected_cacerts, self._config.vmware_image_transfer_timeout_secs) else: download_flat_image.assert_called_once_with( context, self._config.vmware_image_transfer_timeout_secs, image_service, image_id, image_size=image_size_in_bytes, host=self._config.vmware_host_ip, port=self._config.vmware_host_port, data_center_name=dc_name, datastore_name=ds_name, cookies=cookies, file_path=upload_file_path, cacerts=expected_cacerts) def test_copy_image(self): # Default value of vmware_ca_file is not None; it should be passed # to download_flat_image as cacerts. self._test_copy_image(expected_cacerts=self._config.vmware_ca_file) def test_copy_image_insecure(self): # Set config options to allow insecure connections. self._config.vmware_ca_file = None self._config.vmware_insecure = True # Since vmware_ca_file is unset and vmware_insecure is True, # dowload_flat_image should be called with cacerts=False. self._test_copy_image() def test_copy_temp_image(self): self._test_copy_image(expected_cacerts=self._config.vmware_ca_file, use_temp_image=True) @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_adapter_type') def _test_create_backing( self, get_adapter_type, get_disk_type, vops, get_storage_profile_id, select_ds_for_volume, create_params=None): create_params = create_params or {} host = mock.sentinel.host resource_pool = mock.sentinel.resource_pool folder = mock.sentinel.folder summary = mock.sentinel.summary select_ds_for_volume.return_value = (host, resource_pool, folder, summary) profile_id = mock.sentinel.profile_id get_storage_profile_id.return_value = profile_id backing = mock.sentinel.backing vops.create_backing_disk_less.return_value = backing vops.create_backing.return_value = backing disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type adapter_type = mock.sentinel.adapter_type get_adapter_type.return_value = adapter_type volume = self._create_volume_dict() ret = self._driver._create_backing(volume, host, create_params) self.assertEqual(backing, ret) select_ds_for_volume.assert_called_once_with(volume, host) get_storage_profile_id.assert_called_once_with(volume) exp_extra_config = {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'], volumeops.BACKING_UUID_KEY: volume['id']} if create_params.get(vmdk.CREATE_PARAM_DISK_LESS): vops.create_backing_disk_less.assert_called_once_with( volume['name'], folder, resource_pool, host, summary.name, profileId=profile_id, extra_config=exp_extra_config) vops.update_backing_disk_uuid.assert_not_called() else: get_disk_type.assert_called_once_with(volume) get_adapter_type.assert_called_once_with(volume) exp_backing_name = ( create_params.get(vmdk.CREATE_PARAM_BACKING_NAME) or volume['name']) exp_adapter_type = ( create_params.get(vmdk.CREATE_PARAM_ADAPTER_TYPE) or adapter_type) vops.create_backing.assert_called_once_with( exp_backing_name, volume['size'] * units.Mi, disk_type, folder, resource_pool, host, summary.name, profileId=profile_id, adapter_type=exp_adapter_type, extra_config=exp_extra_config) vops.update_backing_disk_uuid.assert_called_once_with(backing, volume['id']) def test_create_backing_disk_less(self): create_params = {vmdk.CREATE_PARAM_DISK_LESS: True} self._test_create_backing(create_params=create_params) def test_create_backing_with_adapter_type_override(self): create_params = {vmdk.CREATE_PARAM_ADAPTER_TYPE: 'ide'} self._test_create_backing(create_params=create_params) def test_create_backing_with_backing_name_override(self): create_params = {vmdk.CREATE_PARAM_BACKING_NAME: 'foo'} self._test_create_backing(create_params=create_params) def test_create_backing(self): self._test_create_backing() @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_hosts(self, vops): host_1 = mock.sentinel.host_1 host_2 = mock.sentinel.host_2 host_3 = mock.sentinel.host_3 vops.get_cluster_hosts.side_effect = [[host_1, host_2], [host_3]] cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 self.assertEqual([host_1, host_2, host_3], self._driver._get_hosts([cls_1, cls_2])) exp_calls = [mock.call(cls_1), mock.call(cls_2)] self.assertEqual(exp_calls, vops.get_cluster_hosts.call_args_list) @mock.patch.object(VMDK_DRIVER, '_get_hosts') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_datastore(self, ds_sel, get_hosts): cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 self._driver._clusters = [cls_1, cls_2] host_1 = mock.sentinel.host_1 host_2 = mock.sentinel.host_2 host_3 = mock.sentinel.host_3 get_hosts.return_value = [host_1, host_2, host_3] best_candidate = mock.sentinel.best_candidate ds_sel.select_datastore.return_value = best_candidate req = mock.sentinel.req self.assertEqual(best_candidate, self._driver._select_datastore(req)) get_hosts.assert_called_once_with(self._driver._clusters) ds_sel.select_datastore.assert_called_once_with( req, hosts=[host_1, host_2, host_3]) @mock.patch.object(VMDK_DRIVER, '_get_hosts') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_datastore_with_no_best_candidate(self, ds_sel, get_hosts): cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 self._driver._clusters = [cls_1, cls_2] host_1 = mock.sentinel.host_1 host_2 = mock.sentinel.host_2 host_3 = mock.sentinel.host_3 get_hosts.return_value = [host_1, host_2, host_3] ds_sel.select_datastore.return_value = () req = mock.sentinel.req self.assertRaises(vmdk_exceptions.NoValidDatastoreException, self._driver._select_datastore, req) get_hosts.assert_called_once_with(self._driver._clusters) ds_sel.select_datastore.assert_called_once_with( req, hosts=[host_1, host_2, host_3]) @mock.patch.object(VMDK_DRIVER, '_get_hosts') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_datastore_with_single_host(self, ds_sel, get_hosts): best_candidate = mock.sentinel.best_candidate ds_sel.select_datastore.return_value = best_candidate req = mock.sentinel.req host_1 = mock.sentinel.host_1 self.assertEqual(best_candidate, self._driver._select_datastore(req, host_1)) ds_sel.select_datastore.assert_called_once_with(req, hosts=[host_1]) self.assertFalse(get_hosts.called) @mock.patch.object(VMDK_DRIVER, '_get_hosts') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_datastore_with_empty_clusters(self, ds_sel, get_hosts): self._driver._clusters = None best_candidate = mock.sentinel.best_candidate ds_sel.select_datastore.return_value = best_candidate req = mock.sentinel.req self.assertEqual(best_candidate, self._driver._select_datastore(req)) ds_sel.select_datastore.assert_called_once_with(req, hosts=None) self.assertFalse(get_hosts.called) @mock.patch.object(VMDK_DRIVER, '_get_hosts') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_select_datastore_with_no_valid_host(self, ds_sel, get_hosts): cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 self._driver._clusters = [cls_1, cls_2] get_hosts.return_value = [] req = mock.sentinel.req self.assertRaises(vmdk_exceptions.NoValidHostException, self._driver._select_datastore, req) get_hosts.assert_called_once_with(self._driver._clusters) self.assertFalse(ds_sel.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_relocate_backing_nop(self, ds_sel, get_profile, vops): self._driver._storage_policy_enabled = True volume = self._create_volume_dict() datastore = mock.sentinel.datastore vops.get_datastore.return_value = datastore profile = mock.sentinel.profile get_profile.return_value = profile vops.is_datastore_accessible.return_value = True ds_sel.is_datastore_compliant.return_value = True backing = mock.sentinel.backing host = mock.sentinel.host self._driver._relocate_backing(volume, backing, host) get_profile.assert_called_once_with(volume) vops.is_datastore_accessible.assert_called_once_with(datastore, host) ds_sel.is_datastore_compliant.assert_called_once_with(datastore, profile) self.assertFalse(vops.relocate_backing.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_relocate_backing_with_no_datastore( self, ds_sel, get_profile, vops): self._driver._storage_policy_enabled = True volume = self._create_volume_dict() profile = mock.sentinel.profile get_profile.return_value = profile vops.is_datastore_accessible.return_value = True ds_sel.is_datastore_compliant.return_value = False ds_sel.select_datastore.return_value = [] backing = mock.sentinel.backing host = mock.sentinel.host self.assertRaises(vmdk_exceptions.NoValidDatastoreException, self._driver._relocate_backing, volume, backing, host) get_profile.assert_called_once_with(volume) ds_sel.select_datastore.assert_called_once_with( {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, hub.DatastoreSelector.PROFILE_NAME: profile}, hosts=[host]) self.assertFalse(vops.relocate_backing.called) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_dc') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_relocate_backing( self, ds_sel, get_volume_group_folder, get_dc, vops): volume = self._create_volume_dict() vops.is_datastore_accessible.return_value = False ds_sel.is_datastore_compliant.return_value = True backing = mock.sentinel.backing host = mock.sentinel.host rp = mock.sentinel.rp datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) ds_sel.select_datastore.return_value = (host, rp, summary) dc = mock.sentinel.dc get_dc.return_value = dc folder = mock.sentinel.folder get_volume_group_folder.return_value = folder self._driver._relocate_backing(volume, backing, host) get_dc.assert_called_once_with(rp) get_volume_group_folder.assert_called_once_with( dc, volume['project_id']) vops.relocate_backing.assert_called_once_with(backing, datastore, rp, host) vops.move_backing_to_folder.assert_called_once_with(backing, folder) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_dc') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') @mock.patch.object(VMDK_DRIVER, 'ds_sel') def test_relocate_backing_with_pbm_disabled( self, ds_sel, get_volume_group_folder, get_dc, vops): self._driver._storage_policy_enabled = False volume = self._create_volume_dict() vops.is_datastore_accessible.return_value = False backing = mock.sentinel.backing host = mock.sentinel.host rp = mock.sentinel.rp datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) ds_sel.select_datastore.return_value = (host, rp, summary) dc = mock.sentinel.dc get_dc.return_value = dc folder = mock.sentinel.folder get_volume_group_folder.return_value = folder self._driver._relocate_backing(volume, backing, host) self.assertFalse(vops.get_profile.called) get_dc.assert_called_once_with(rp) get_volume_group_folder.assert_called_once_with( dc, volume['project_id']) vops.relocate_backing.assert_called_once_with(backing, datastore, rp, host) vops.move_backing_to_folder.assert_called_once_with(backing, folder) ds_sel.select_datastore.assert_called_once_with( {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi, hub.DatastoreSelector.PROFILE_NAME: None}, hosts=[host]) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_get_disk_device(self, vops): vm = mock.sentinel.vm vops.get_entity_by_inventory_path.return_value = vm dev = mock.sentinel.dev vops.get_disk_device.return_value = dev vm_inv_path = mock.sentinel.vm_inv_path vmdk_path = mock.sentinel.vmdk_path ret = self._driver._get_disk_device(vmdk_path, vm_inv_path) self.assertEqual((vm, dev), ret) vops.get_entity_by_inventory_path.assert_called_once_with(vm_inv_path) vops.get_disk_device.assert_called_once_with(vm, vmdk_path) def test_get_existing_with_empty_source_name(self): self.assertRaises(cinder_exceptions.InvalidInput, self._driver._get_existing, {}) def test_get_existing_with_invalid_source_name(self): self.assertRaises(cinder_exceptions.InvalidInput, self._driver._get_existing, {'source-name': 'foo'}) @mock.patch.object(VMDK_DRIVER, '_get_disk_device', return_value=None) def test_get_existing_with_invalid_existing_ref(self, get_disk_device): self.assertRaises(cinder_exceptions.ManageExistingInvalidReference, self._driver._get_existing, {'source-name': '[ds1] foo/foo.vmdk@/dc-1/vm/foo'}) get_disk_device.assert_called_once_with('[ds1] foo/foo.vmdk', '/dc-1/vm/foo') @mock.patch.object(VMDK_DRIVER, '_get_disk_device') def test_get_existing(self, get_disk_device): vm = mock.sentinel.vm disk_device = mock.sentinel.disk_device get_disk_device.return_value = (vm, disk_device) self.assertEqual( (vm, disk_device), self._driver._get_existing({'source-name': '[ds1] foo/foo.vmdk@/dc-1/vm/foo'})) get_disk_device.assert_called_once_with('[ds1] foo/foo.vmdk', '/dc-1/vm/foo') @mock.patch.object(VMDK_DRIVER, '_get_existing') @ddt.data((16384, 1), (1048576, 1), (1572864, 2)) def test_manage_existing_get_size(self, test_data, get_existing): (capacity_kb, exp_size) = test_data disk_device = mock.Mock(capacityInKB=capacity_kb) get_existing.return_value = (mock.sentinel.vm, disk_device) volume = mock.sentinel.volume existing_ref = mock.sentinel.existing_ref self.assertEqual(exp_size, self._driver.manage_existing_get_size(volume, existing_ref)) get_existing.assert_called_once_with(existing_ref) @mock.patch.object(VMDK_DRIVER, '_create_backing') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path') @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id') @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.' '_get_disk_type') @mock.patch.object(VMDK_DRIVER, '_get_adapter_type') def test_manage_existing_int( self, get_adapter_type, get_disk_type, get_storage_profile_id, get_ds_name_folder_path, vops, create_backing): backing = mock.sentinel.backing create_backing.return_value = backing src_dc = mock.sentinel.src_dc dest_dc = mock.sentinel.dest_dc vops.get_dc.side_effect = [src_dc, dest_dc] volume = self._create_volume_dict() ds_name = "ds1" folder_path = "%s/" % volume['name'] get_ds_name_folder_path.return_value = (ds_name, folder_path) profile_id = mock.sentinel.profile_id get_storage_profile_id.return_value = profile_id disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type adapter_type = mock.sentinel.adapter_type get_adapter_type.return_value = adapter_type vm = mock.sentinel.vm src_path = mock.sentinel.src_path disk_backing = mock.Mock(fileName=src_path) disk_device = mock.Mock(backing=disk_backing, capacityInKB=1048576) ret = self._driver._manage_existing_int(volume, vm, disk_device) self.assertEqual(backing, ret) create_backing.assert_called_once_with( volume, create_params={vmdk.CREATE_PARAM_DISK_LESS: True}) vops.detach_disk_from_backing.assert_called_once_with(vm, disk_device) dest_path = "[%s] %s%s.vmdk" % (ds_name, folder_path, volume['name']) vops.move_vmdk_file.assert_called_once_with( src_dc, src_path, dest_path, dest_dc_ref=dest_dc) get_storage_profile_id.assert_called_once_with(volume) get_adapter_type.assert_called_once_with(volume) vops.attach_disk_to_backing.assert_called_once_with( backing, disk_device.capacityInKB, disk_type, adapter_type, profile_id, dest_path) vops.update_backing_disk_uuid.assert_called_once_with(backing, volume['id']) @mock.patch.object(VMDK_DRIVER, '_get_existing') @mock.patch.object(VMDK_DRIVER, '_manage_existing_int') def test_manage_existing(self, manage_existing_int, get_existing): vm = mock.sentinel.vm disk_device = mock.sentinel.disk_device get_existing.return_value = (vm, disk_device) volume = mock.sentinel.volume existing_ref = mock.sentinel.existing_ref self._driver.manage_existing(volume, existing_ref) get_existing.assert_called_once_with(existing_ref) manage_existing_int.assert_called_once_with(volume, vm, disk_device) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_unmanage(self, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing volume = self._create_volume_dict() self._driver.unmanage(volume) vops.get_backing.assert_called_once_with(volume['name'], volume['id']) vops.update_backing_extra_config.assert_called_once_with( backing, {vmdk.EXTRA_CONFIG_VOLUME_ID_KEY: '', volumeops.BACKING_UUID_KEY: ''}) @mock.patch('oslo_vmware.api.VMwareAPISession') def test_create_session(self, apiSession): session = mock.sentinel.session apiSession.return_value = session ret = self._driver._create_session() self.assertEqual(session, ret) config = self._driver.configuration apiSession.assert_called_once_with( config.vmware_host_ip, config.vmware_host_username, config.vmware_host_password, config.vmware_api_retry_count, config.vmware_task_poll_interval, wsdl_loc=config.safe_get('vmware_wsdl_location'), port=config.vmware_host_port, cacert=config.vmware_ca_file, insecure=config.vmware_insecure, pool_size=config.vmware_connection_pool_size, op_id_prefix='c-vol') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def test_extend_volume_with_no_backing(self, extend_backing, vops): vops.get_backing.return_value = None volume = self._create_volume_dict() self._driver.extend_volume(volume, 2) self.assertFalse(extend_backing.called) @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def test_extend_volume(self, extend_backing, vops, get_disk_type): backing = mock.sentinel.backing vops.get_backing.return_value = backing disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type volume = self._create_volume_dict() new_size = 2 self._driver.extend_volume(volume, new_size) extend_backing.assert_called_once_with(backing, new_size, disk_type) @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') @mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume') def test_extend_volume_with_no_disk_space(self, select_ds_for_volume, extend_backing, vops, get_disk_type): backing = mock.sentinel.backing vops.get_backing.return_value = backing disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type extend_backing.side_effect = [exceptions.NoDiskSpaceException, None] host = mock.sentinel.host rp = mock.sentinel.rp folder = mock.sentinel.folder datastore = mock.sentinel.datastore summary = mock.Mock(datastore=datastore) select_ds_for_volume.return_value = (host, rp, folder, summary) volume = self._create_volume_dict() new_size = 2 self._driver.extend_volume(volume, new_size) create_params = {vmdk.CREATE_PARAM_DISK_SIZE: new_size} select_ds_for_volume.assert_called_once_with( volume, create_params=create_params) vops.relocate_backing.assert_called_once_with(backing, datastore, rp, host) vops.move_backing_to_folder(backing, folder) extend_backing_calls = [mock.call(backing, new_size, disk_type), mock.call(backing, new_size, disk_type)] self.assertEqual(extend_backing_calls, extend_backing.call_args_list) @mock.patch.object(VMDK_DRIVER, '_get_disk_type') @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_extend_backing') def test_extend_volume_with_extend_backing_error( self, extend_backing, vops, get_disk_type): backing = mock.sentinel.backing vops.get_backing.return_value = backing disk_type = mock.sentinel.disk_type get_disk_type.return_value = disk_type extend_backing.side_effect = exceptions.VimException("Error") volume = self._create_volume_dict() new_size = 2 self.assertRaises(exceptions.VimException, self._driver.extend_volume, volume, new_size) extend_backing.assert_called_once_with(backing, new_size, disk_type) @mock.patch.object(VMDK_DRIVER, 'volumeops') @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder') def test_accept_transfer(self, get_volume_group_folder, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing dc = mock.sentinel.dc vops.get_dc.return_value = dc new_folder = mock.sentinel.new_folder get_volume_group_folder.return_value = new_folder context = mock.sentinel.context volume = self._create_volume_obj() new_project = mock.sentinel.new_project self._driver.accept_transfer(context, volume, mock.sentinel.new_user, new_project) vops.get_backing.assert_called_once_with(volume.name, volume.id) vops.get_dc.assert_called_once_with(backing) get_volume_group_folder.assert_called_once_with(dc, new_project) vops.move_backing_to_folder.assert_called_once_with(backing, new_folder) @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_revert_to_snapshot_with_no_backing(self, vops): vops.get_backing.return_value = None volume = self._create_volume_obj() snapshot = fake_snapshot.fake_snapshot_obj(self._context, volume=volume) self._driver.revert_to_snapshot( mock.sentinel.context, volume, snapshot) vops.get_backing.assert_called_once_with(volume.name, volume.id) vops.revert_to_snapshot.assert_not_called() @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_revert_to_snapshot_template_format(self, vops): volume = self._create_volume_obj() loc = '/test-dc/foo' snapshot = fake_snapshot.fake_snapshot_obj(self._context, volume=volume, provider_location=loc) self.assertRaises(cinder_exceptions.InvalidSnapshot, self._driver.revert_to_snapshot, mock.sentinel.context, volume, snapshot) vops.revert_to_snapshot.assert_not_called() @mock.patch.object(VMDK_DRIVER, 'volumeops') def test_revert_to_snapshot(self, vops): backing = mock.sentinel.backing vops.get_backing.return_value = backing volume = self._create_volume_obj() snapshot = fake_snapshot.fake_snapshot_obj(self._context, volume=volume) self._driver.revert_to_snapshot( mock.sentinel.context, volume, snapshot) vops.get_backing.assert_called_once_with(volume.name, volume.id) vops.revert_to_snapshot.assert_called_once_with(backing, snapshot.name) @ddt.ddt class ImageDiskTypeTest(test.TestCase): """Unit tests for ImageDiskType.""" @ddt.data('thin', 'preallocated', 'streamOptimized', 'sparse') def test_is_valid(self, image_disk_type): self.assertTrue(vmdk.ImageDiskType.is_valid(image_disk_type)) def test_is_valid_with_invalid_type(self): self.assertFalse(vmdk.ImageDiskType.is_valid('thick')) @ddt.data('thin', 'preallocated', 'streamOptimized', 'sparse') def test_validate(self, image_disk_type): vmdk.ImageDiskType.validate(image_disk_type) def test_validate_with_invalid_type(self): self.assertRaises(cinder_exceptions.ImageUnacceptable, vmdk.ImageDiskType.validate, "thick") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/vmware/test_vmware_volumeops.py0000664000175000017500000032315200000000000027773 0ustar00zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test suite for VMware VMDK driver volumeops module.""" from unittest import mock import ddt from oslo_utils import units from oslo_vmware import exceptions from oslo_vmware import vim_util from cinder.tests.unit import test from cinder.tests.unit.volume.drivers.vmware import fake as vmware_fake from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions from cinder.volume.drivers.vmware import volumeops @ddt.ddt class VolumeOpsTestCase(test.TestCase): """Unit tests for volumeops module.""" MAX_OBJECTS = 100 def setUp(self): super(VolumeOpsTestCase, self).setUp() self.session = mock.MagicMock() self.vops = volumeops.VMwareVolumeOps( self.session, self.MAX_OBJECTS, mock.sentinel.extension_key, mock.sentinel.extension_type) def test_split_datastore_path(self): test1 = '[datastore1] myfolder/mysubfolder/myvm.vmx' (datastore, folder, file_name) = volumeops.split_datastore_path(test1) self.assertEqual('datastore1', datastore) self.assertEqual('myfolder/mysubfolder/', folder) self.assertEqual('myvm.vmx', file_name) test2 = '[datastore2 ] myfolder/myvm.vmdk' (datastore, folder, file_name) = volumeops.split_datastore_path(test2) self.assertEqual('datastore2', datastore) self.assertEqual('myfolder/', folder) self.assertEqual('myvm.vmdk', file_name) test3 = 'myfolder/myvm.vmdk' self.assertRaises(IndexError, volumeops.split_datastore_path, test3) def vm(self, val): """Create a mock vm in retrieve result format.""" vm = mock.MagicMock() prop = mock.Mock(spec=object) prop.val = val vm.propSet = [prop] return vm @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_backing_by_uuid') def test_get_backing(self, get_backing_by_uuid): ref = mock.sentinel.ref get_backing_by_uuid.return_value = ref name = mock.sentinel.name backing_uuid = mock.sentinel.backing_uuid ret = self.vops.get_backing(name, backing_uuid) self.assertEqual(ref, ret) get_backing_by_uuid.assert_called_once_with(backing_uuid) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_backing_by_uuid') def test_get_backing_legacy(self, get_backing_by_uuid): ref = mock.sentinel.ref get_backing_by_uuid.return_value = None name = mock.sentinel.name self.vops._backing_ref_cache[name] = ref backing_uuid = mock.sentinel.backing_uuid ret = self.vops.get_backing(name, backing_uuid) self.assertEqual(ref, ret) get_backing_by_uuid.assert_called_once_with(backing_uuid) def test_get_backing_by_uuid(self): backing = mock.sentinel.backing self.session.invoke_api.return_value = [backing] uuid = mock.sentinel.uuid self.assertEqual(backing, self.vops.get_backing_by_uuid(uuid)) self.session.invoke_api.assert_called_once_with( self.session.vim, 'FindAllByUuid', self.session.vim.service_content.searchIndex, uuid=uuid, vmSearch=True, instanceUuid=True) def _create_property(self, name, val): prop = mock.Mock() prop.name = name prop.val = val return prop def _create_backing_obj(self, name, ref, instance_uuid=None, vol_id=None): name_prop = self._create_property('name', name) instance_uuid_prop = self._create_property('config.instanceUuid', instance_uuid) vol_id_val = mock.Mock(value=vol_id) vol_id_prop = self._create_property( 'config.extraConfig["cinder.volume.id"]', vol_id_val) backing = mock.Mock() backing.obj = ref backing.propSet = [name_prop, instance_uuid_prop, vol_id_prop] return backing @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'continue_retrieval', return_value=None) def test_build_backing_ref_cache(self, continue_retrieval): uuid1 = 'd68cbee0-c1f7-4886-98a4-cf2201461c6e' ref1 = mock.sentinel.ref1 non_vol_backing = self._create_backing_obj( 'foo', ref1, instance_uuid=uuid1) uuid2 = 'f36f0e87-97e0-4a1c-b788-2f84f1376960' ref2 = mock.sentinel.ref2 legacy_vol_backing = self._create_backing_obj( 'volume-f36f0e87-97e0-4a1c-b788-2f84f1376960', ref2, instance_uuid=uuid2) uuid3 = '405d6afd-43be-4ce0-9e5f-fd49559e2763' ref3 = mock.sentinel.ref3 vol_backing = self._create_backing_obj( 'volume-405d6afd-43be-4ce0-9e5f-fd49559e2763', ref3, instance_uuid=uuid3, vol_id=uuid3) result = mock.Mock(objects=[ non_vol_backing, legacy_vol_backing, vol_backing]) self.session.invoke_api.return_value = result self.vops.build_backing_ref_cache() exp_cache = {'foo': ref1, 'volume-f36f0e87-97e0-4a1c-b788-2f84f1376960': ref2} self.assertEqual(exp_cache, self.vops._backing_ref_cache) self.session.invoke_api.assert_called_once_with( vim_util, 'get_objects', self.session.vim, 'VirtualMachine', self.MAX_OBJECTS, properties_to_collect=[ 'name', 'config.instanceUuid', 'config.extraConfig["cinder.volume.id"]']) continue_retrieval.assert_called_once_with(result) def test_delete_backing(self): backing = mock.sentinel.backing task = mock.sentinel.task self.session.invoke_api.return_value = task self.vops.delete_backing(backing) self.session.invoke_api.assert_called_once_with(self.session.vim, "Destroy_Task", backing) self.session.wait_for_task(task) def test_get_host(self): instance = mock.sentinel.instance host = mock.sentinel.host self.session.invoke_api.return_value = host result = self.vops.get_host(instance) self.assertEqual(host, result) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, instance, 'runtime.host') def _host_runtime_info( self, connection_state='connected', in_maintenance=False): return mock.Mock(connectionState=connection_state, inMaintenanceMode=in_maintenance) def test_get_hosts(self): hosts = mock.sentinel.hosts self.session.invoke_api.return_value = hosts result = self.vops.get_hosts() self.assertEqual(hosts, result) self.session.invoke_api.assert_called_once_with(vim_util, 'get_objects', self.session.vim, 'HostSystem', self.MAX_OBJECTS) def test_continue_retrieval(self): retrieve_result = mock.sentinel.retrieve_result self.session.invoke_api.return_value = retrieve_result result = self.vops.continue_retrieval(retrieve_result) self.assertEqual(retrieve_result, result) self.session.invoke_api.assert_called_once_with(vim_util, 'continue_retrieval', self.session.vim, retrieve_result) def test_cancel_retrieval(self): retrieve_result = mock.sentinel.retrieve_result self.session.invoke_api.return_value = retrieve_result result = self.vops.cancel_retrieval(retrieve_result) self.assertIsNone(result) self.session.invoke_api.assert_called_once_with(vim_util, 'cancel_retrieval', self.session.vim, retrieve_result) def test_is_usable(self): mount_info = mock.Mock(spec=object) mount_info.accessMode = "readWrite" mount_info.mounted = True mount_info.accessible = True self.assertTrue(self.vops._is_usable(mount_info)) del mount_info.mounted self.assertTrue(self.vops._is_usable(mount_info)) mount_info.accessMode = "readonly" self.assertFalse(self.vops._is_usable(mount_info)) mount_info.accessMode = "readWrite" mount_info.mounted = False self.assertFalse(self.vops._is_usable(mount_info)) mount_info.mounted = True mount_info.accessible = False self.assertFalse(self.vops._is_usable(mount_info)) del mount_info.accessible self.assertFalse(self.vops._is_usable(mount_info)) def _create_host_mounts(self, access_mode, host, set_accessible=True, is_accessible=True, mounted=True): """Create host mount value of datastore with single mount info. :param access_mode: string specifying the read/write permission :param set_accessible: specify whether accessible property should be set :param is_accessible: boolean specifying whether the datastore is accessible to host :param host: managed object reference of the connected host :return: list of host mount info """ mntInfo = mock.Mock(spec=object) mntInfo.accessMode = access_mode if set_accessible: mntInfo.accessible = is_accessible else: del mntInfo.accessible mntInfo.mounted = mounted host_mount = mock.Mock(spec=object) host_mount.key = host host_mount.mountInfo = mntInfo host_mounts = mock.Mock(spec=object) host_mounts.DatastoreHostMount = [host_mount] return host_mounts def test_get_connected_hosts(self): with mock.patch.object(self.vops, 'get_summary') as get_summary: datastore = mock.sentinel.datastore summary = mock.Mock(spec=object) get_summary.return_value = summary summary.accessible = False hosts = self.vops.get_connected_hosts(datastore) self.assertEqual([], hosts) summary.accessible = True host = vmware_fake.ManagedObjectReference(value=mock.sentinel.host) host_mounts = self._create_host_mounts("readWrite", host) self.session.invoke_api.return_value = host_mounts hosts = self.vops.get_connected_hosts(datastore) self.assertEqual([mock.sentinel.host], hosts) self.session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', self.session.vim, datastore, 'host') del host_mounts.DatastoreHostMount hosts = self.vops.get_connected_hosts(datastore) self.assertEqual([], hosts) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_connected_hosts') def test_is_datastore_accessible(self, get_connected_hosts): host_1 = mock.sentinel.host_1 host_2 = mock.sentinel.host_2 get_connected_hosts.return_value = [host_1, host_2] ds = mock.sentinel.datastore host = vmware_fake.ManagedObjectReference(value=mock.sentinel.host_1) self.assertTrue(self.vops.is_datastore_accessible(ds, host)) get_connected_hosts.assert_called_once_with(ds) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_connected_hosts') def test_is_datastore_accessible_with_inaccessible(self, get_connected_hosts): host_1 = mock.sentinel.host_1 get_connected_hosts.return_value = [host_1] ds = mock.sentinel.datastore host = vmware_fake.ManagedObjectReference(value=mock.sentinel.host_2) self.assertFalse(self.vops.is_datastore_accessible(ds, host)) get_connected_hosts.assert_called_once_with(ds) def test_get_parent(self): # Not recursive child = vmware_fake.ManagedObjectReference('Parent') ret = self.vops._get_parent(child, 'Parent') self.assertEqual(child, ret) # Recursive parent = vmware_fake.ManagedObjectReference('Parent') child = vmware_fake.ManagedObjectReference('Child') self.session.invoke_api.return_value = parent ret = self.vops._get_parent(child, 'Parent') self.assertEqual(parent, ret) self.session.invoke_api.assert_called_with(vim_util, 'get_object_property', self.session.vim, child, 'parent') def test_get_dc(self): # set up hierarchy of objects dc = vmware_fake.ManagedObjectReference('Datacenter') o1 = vmware_fake.ManagedObjectReference('mockType1') o1.parent = dc o2 = vmware_fake.ManagedObjectReference('mockType2') o2.parent = o1 # mock out invoke_api behaviour to fetch parent def mock_invoke_api(vim_util, method, vim, the_object, arg): return the_object.parent self.session.invoke_api.side_effect = mock_invoke_api ret = self.vops.get_dc(o2) self.assertEqual(dc, ret) # Clear side effects. self.session.invoke_api.side_effect = None def test_get_vmfolder(self): self.session.invoke_api.return_value = mock.sentinel.ret ret = self.vops.get_vmfolder(mock.sentinel.dc) self.assertEqual(mock.sentinel.ret, ret) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, mock.sentinel.dc, 'vmFolder') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_entity_name') def test_get_child_folder(self, get_entity_name): child_entity_1 = vmware_fake.ManagedObjectReference('Folder') child_entity_2 = vmware_fake.ManagedObjectReference('foo') child_entity_3 = vmware_fake.ManagedObjectReference('Folder') prop_val = mock.Mock(ManagedObjectReference=[child_entity_1, child_entity_2, child_entity_3]) self.session.invoke_api.return_value = prop_val get_entity_name.side_effect = ['bar', '%2fcinder-volumes'] parent_folder = mock.sentinel.parent_folder child_name = '/cinder-volumes' ret = self.vops._get_child_folder(parent_folder, child_name) self.assertEqual(child_entity_3, ret) self.session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', self.session.vim, parent_folder, 'childEntity') get_entity_name.assert_has_calls([mock.call(child_entity_1), mock.call(child_entity_3)]) def test_create_folder(self): folder = mock.sentinel.folder self.session.invoke_api.return_value = folder parent_folder = mock.sentinel.parent_folder child_folder_name = mock.sentinel.child_folder_name ret = self.vops.create_folder(parent_folder, child_folder_name) self.assertEqual(folder, ret) self.session.invoke_api.assert_called_once_with( self.session.vim, 'CreateFolder', parent_folder, name=child_folder_name) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_child_folder') def test_create_folder_with_duplicate_name(self, get_child_folder): self.session.invoke_api.side_effect = exceptions.DuplicateName folder = mock.sentinel.folder get_child_folder.return_value = folder parent_folder = mock.sentinel.parent_folder child_folder_name = mock.sentinel.child_folder_name ret = self.vops.create_folder(parent_folder, child_folder_name) self.assertEqual(folder, ret) self.session.invoke_api.assert_called_once_with( self.session.vim, 'CreateFolder', parent_folder, name=child_folder_name) get_child_folder.assert_called_once_with(parent_folder, child_folder_name) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_vmfolder') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'create_folder') def test_create_vm_inventory_folder(self, create_folder, get_vmfolder): vm_folder_1 = mock.sentinel.vm_folder_1 get_vmfolder.return_value = vm_folder_1 folder_1a = mock.sentinel.folder_1a folder_1b = mock.sentinel.folder_1b create_folder.side_effect = [folder_1a, folder_1b] datacenter_1 = vmware_fake.ManagedObjectReference(value='dc-1') path_comp = ['a', 'b'] ret = self.vops.create_vm_inventory_folder(datacenter_1, path_comp) self.assertEqual(folder_1b, ret) get_vmfolder.assert_called_once_with(datacenter_1) exp_calls = [mock.call(vm_folder_1, 'a'), mock.call(folder_1a, 'b')] self.assertEqual(exp_calls, create_folder.call_args_list) exp_cache = {'/dc-1': vm_folder_1, '/dc-1/a': folder_1a, '/dc-1/a/b': folder_1b} self.assertEqual(exp_cache, self.vops._folder_cache) # Test cache get_vmfolder.reset_mock() create_folder.reset_mock() folder_1c = mock.sentinel.folder_1c create_folder.side_effect = [folder_1c] path_comp = ['a', 'c'] ret = self.vops.create_vm_inventory_folder(datacenter_1, path_comp) self.assertEqual(folder_1c, ret) self.assertFalse(get_vmfolder.called) exp_calls = [mock.call(folder_1a, 'c')] self.assertEqual(exp_calls, create_folder.call_args_list) exp_cache = {'/dc-1': vm_folder_1, '/dc-1/a': folder_1a, '/dc-1/a/b': folder_1b, '/dc-1/a/c': folder_1c} self.assertEqual(exp_cache, self.vops._folder_cache) # Test cache with different datacenter get_vmfolder.reset_mock() create_folder.reset_mock() vm_folder_2 = mock.sentinel.vm_folder_2 get_vmfolder.return_value = vm_folder_2 folder_2a = mock.sentinel.folder_2a folder_2b = mock.sentinel.folder_2b create_folder.side_effect = [folder_2a, folder_2b] datacenter_2 = vmware_fake.ManagedObjectReference(value='dc-2') path_comp = ['a', 'b'] ret = self.vops.create_vm_inventory_folder(datacenter_2, path_comp) self.assertEqual(folder_2b, ret) get_vmfolder.assert_called_once_with(datacenter_2) exp_calls = [mock.call(vm_folder_2, 'a'), mock.call(folder_2a, 'b')] self.assertEqual(exp_calls, create_folder.call_args_list) exp_cache = {'/dc-1': vm_folder_1, '/dc-1/a': folder_1a, '/dc-1/a/b': folder_1b, '/dc-1/a/c': folder_1c, '/dc-2': vm_folder_2, '/dc-2/a': folder_2a, '/dc-2/a/b': folder_2b } self.assertEqual(exp_cache, self.vops._folder_cache) def test_create_disk_backing_thin(self): backing = mock.Mock() del backing.eagerlyScrub cf = self.session.vim.client.factory cf.create.return_value = backing disk_type = 'thin' ret = self.vops._create_disk_backing(disk_type, None) self.assertEqual(backing, ret) self.assertIsInstance(ret.thinProvisioned, bool) self.assertTrue(ret.thinProvisioned) self.assertEqual('', ret.fileName) self.assertEqual('persistent', ret.diskMode) def test_create_disk_backing_thick(self): backing = mock.Mock() del backing.eagerlyScrub del backing.thinProvisioned cf = self.session.vim.client.factory cf.create.return_value = backing disk_type = 'thick' ret = self.vops._create_disk_backing(disk_type, None) self.assertEqual(backing, ret) self.assertEqual('', ret.fileName) self.assertEqual('persistent', ret.diskMode) def test_create_disk_backing_eager_zeroed_thick(self): backing = mock.Mock() del backing.thinProvisioned cf = self.session.vim.client.factory cf.create.return_value = backing disk_type = 'eagerZeroedThick' ret = self.vops._create_disk_backing(disk_type, None) self.assertEqual(backing, ret) self.assertIsInstance(ret.eagerlyScrub, bool) self.assertTrue(ret.eagerlyScrub) self.assertEqual('', ret.fileName) self.assertEqual('persistent', ret.diskMode) def test_create_virtual_disk_config_spec(self): cf = self.session.vim.client.factory cf.create.side_effect = lambda *args: mock.Mock() size_kb = units.Ki controller_key = 200 disk_type = 'thick' profile_id = mock.sentinel.profile_id spec = self.vops._create_virtual_disk_config_spec(size_kb, disk_type, controller_key, profile_id, None) cf.create.side_effect = None self.assertEqual('add', spec.operation) self.assertEqual('create', spec.fileOperation) device = spec.device self.assertEqual(size_kb, device.capacityInKB) self.assertEqual(-101, device.key) self.assertEqual(0, device.unitNumber) self.assertEqual(controller_key, device.controllerKey) backing = device.backing self.assertEqual('', backing.fileName) self.assertEqual('persistent', backing.diskMode) disk_profiles = spec.profile self.assertEqual(1, len(disk_profiles)) self.assertEqual(profile_id, disk_profiles[0].profileId) def test_create_specs_for_ide_disk_add(self): factory = self.session.vim.client.factory factory.create.side_effect = lambda *args: mock.Mock() size_kb = 1 disk_type = 'thin' adapter_type = 'ide' profile_id = mock.sentinel.profile_id ret = self.vops._create_specs_for_disk_add(size_kb, disk_type, adapter_type, profile_id) factory.create.side_effect = None self.assertEqual(1, len(ret)) self.assertEqual(units.Ki, ret[0].device.capacityInKB) self.assertEqual(200, ret[0].device.controllerKey) expected = [mock.call.create('ns0:VirtualDeviceConfigSpec'), mock.call.create('ns0:VirtualDisk'), mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo')] factory.create.assert_has_calls(expected, any_order=True) def test_create_specs_for_scsi_disk_add(self): factory = self.session.vim.client.factory factory.create.side_effect = lambda *args: mock.Mock() size_kb = 2 * units.Ki disk_type = 'thin' adapter_type = 'lsiLogicsas' profile_id = mock.sentinel.profile_id ret = self.vops._create_specs_for_disk_add(size_kb, disk_type, adapter_type, profile_id) factory.create.side_effect = None self.assertEqual(2, len(ret)) self.assertEqual('noSharing', ret[1].device.sharedBus) self.assertEqual(size_kb, ret[0].device.capacityInKB) expected = [mock.call.create('ns0:VirtualLsiLogicSASController'), mock.call.create('ns0:VirtualDeviceConfigSpec'), mock.call.create('ns0:VirtualDisk'), mock.call.create('ns0:VirtualDiskFlatVer2BackingInfo'), mock.call.create('ns0:VirtualDeviceConfigSpec')] factory.create.assert_has_calls(expected, any_order=True) def test_get_create_spec_disk_less(self): factory = self.session.vim.client.factory factory.create.side_effect = lambda *args: mock.Mock() name = mock.sentinel.name ds_name = mock.sentinel.ds_name profile_id = mock.sentinel.profile_id option_key = mock.sentinel.key option_value = mock.sentinel.value extra_config = {option_key: option_value, volumeops.BACKING_UUID_KEY: mock.sentinel.uuid} ret = self.vops._get_create_spec_disk_less(name, ds_name, profile_id, extra_config) factory.create.side_effect = None self.assertEqual(name, ret.name) self.assertEqual(mock.sentinel.uuid, ret.instanceUuid) self.assertEqual('[%s]' % ds_name, ret.files.vmPathName) self.assertEqual("vmx-08", ret.version) self.assertEqual(profile_id, ret.vmProfile[0].profileId) self.assertEqual(1, len(ret.extraConfig)) self.assertEqual(option_key, ret.extraConfig[0].key) self.assertEqual(option_value, ret.extraConfig[0].value) self.assertEqual(mock.sentinel.extension_key, ret.managedBy.extensionKey) self.assertEqual(mock.sentinel.extension_type, ret.managedBy.type) expected = [mock.call.create('ns0:VirtualMachineFileInfo'), mock.call.create('ns0:VirtualMachineConfigSpec'), mock.call.create('ns0:VirtualMachineDefinedProfileSpec'), mock.call.create('ns0:OptionValue'), mock.call.create('ns0:ManagedByInfo')] factory.create.assert_has_calls(expected, any_order=True) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_create_spec_disk_less') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_specs_for_disk_add') def test_get_create_spec(self, create_specs_for_disk_add, get_create_spec_disk_less): name = 'vol-1' size_kb = 1024 disk_type = 'thin' ds_name = 'nfs-1' profile_id = mock.sentinel.profile_id adapter_type = 'busLogic' extra_config = mock.sentinel.extra_config self.vops.get_create_spec(name, size_kb, disk_type, ds_name, profile_id, adapter_type, extra_config) get_create_spec_disk_less.assert_called_once_with( name, ds_name, profileId=profile_id, extra_config=extra_config) create_specs_for_disk_add.assert_called_once_with( size_kb, disk_type, adapter_type, profile_id) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_create_spec') def test_create_backing(self, get_create_spec): create_spec = mock.sentinel.create_spec get_create_spec.return_value = create_spec task = mock.sentinel.task self.session.invoke_api.return_value = task task_info = mock.Mock(spec=object) task_info.result = mock.sentinel.result self.session.wait_for_task.return_value = task_info name = 'backing_name' size_kb = mock.sentinel.size_kb disk_type = mock.sentinel.disk_type adapter_type = mock.sentinel.adapter_type folder = mock.sentinel.folder resource_pool = mock.sentinel.resource_pool host = mock.sentinel.host ds_name = mock.sentinel.ds_name profile_id = mock.sentinel.profile_id extra_config = mock.sentinel.extra_config ret = self.vops.create_backing(name, size_kb, disk_type, folder, resource_pool, host, ds_name, profile_id, adapter_type, extra_config) self.assertEqual(mock.sentinel.result, ret) get_create_spec.assert_called_once_with( name, size_kb, disk_type, ds_name, profile_id=profile_id, adapter_type=adapter_type, extra_config=extra_config) self.session.invoke_api.assert_called_once_with(self.session.vim, 'CreateVM_Task', folder, config=create_spec, pool=resource_pool, host=host) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_create_spec_disk_less') def test_create_backing_disk_less(self, get_create_spec_disk_less): create_spec = mock.sentinel.create_spec get_create_spec_disk_less.return_value = create_spec task = mock.sentinel.task self.session.invoke_api.return_value = task task_info = mock.Mock(spec=object) task_info.result = mock.sentinel.result self.session.wait_for_task.return_value = task_info name = 'backing_name' folder = mock.sentinel.folder resource_pool = mock.sentinel.resource_pool host = mock.sentinel.host ds_name = mock.sentinel.ds_name profile_id = mock.sentinel.profile_id extra_config = mock.sentinel.extra_config ret = self.vops.create_backing_disk_less(name, folder, resource_pool, host, ds_name, profile_id, extra_config) self.assertEqual(mock.sentinel.result, ret) get_create_spec_disk_less.assert_called_once_with( name, ds_name, profileId=profile_id, extra_config=extra_config) self.session.invoke_api.assert_called_once_with(self.session.vim, 'CreateVM_Task', folder, config=create_spec, pool=resource_pool, host=host) self.session.wait_for_task.assert_called_once_with(task) def test_get_datastore(self): backing = mock.sentinel.backing datastore = mock.Mock(spec=object) datastore.ManagedObjectReference = [mock.sentinel.ds] self.session.invoke_api.return_value = datastore ret = self.vops.get_datastore(backing) self.assertEqual(mock.sentinel.ds, ret) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, backing, 'datastore') def test_get_summary(self): datastore = mock.sentinel.datastore summary = mock.sentinel.summary self.session.invoke_api.return_value = summary ret = self.vops.get_summary(datastore) self.assertEqual(summary, ret) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, datastore, 'summary') def test_get_relocate_spec(self): delete_disk_attribute = True def _create_side_effect(type): obj = mock.Mock() if type == "ns0:VirtualDiskFlatVer2BackingInfo": del obj.eagerlyScrub elif (type == "ns0:VirtualMachineRelocateSpec" and delete_disk_attribute): del obj.disk else: pass return obj factory = self.session.vim.client.factory factory.create.side_effect = _create_side_effect datastore = mock.sentinel.datastore resource_pool = mock.sentinel.resource_pool host = mock.sentinel.host disk_move_type = mock.sentinel.disk_move_type ret = self.vops._get_relocate_spec(datastore, resource_pool, host, disk_move_type) self.assertEqual(datastore, ret.datastore) self.assertEqual(resource_pool, ret.pool) self.assertEqual(host, ret.host) self.assertEqual(disk_move_type, ret.diskMoveType) # Test with disk locator. delete_disk_attribute = False disk_type = 'thin' disk_device = mock.Mock() ret = self.vops._get_relocate_spec(datastore, resource_pool, host, disk_move_type, disk_type, disk_device) factory.create.side_effect = None self.assertEqual(datastore, ret.datastore) self.assertEqual(resource_pool, ret.pool) self.assertEqual(host, ret.host) self.assertEqual(disk_move_type, ret.diskMoveType) self.assertIsInstance(ret.disk, list) self.assertEqual(1, len(ret.disk)) disk_locator = ret.disk[0] self.assertEqual(datastore, disk_locator.datastore) self.assertEqual(disk_device.key, disk_locator.diskId) backing = disk_locator.diskBackingInfo self.assertIsInstance(backing.thinProvisioned, bool) self.assertTrue(backing.thinProvisioned) self.assertEqual('', backing.fileName) self.assertEqual('persistent', backing.diskMode) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_device') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_relocate_spec') def test_relocate_backing(self, get_relocate_spec, get_disk_device): disk_device = mock.sentinel.disk_device get_disk_device.return_value = disk_device spec = mock.sentinel.relocate_spec get_relocate_spec.return_value = spec task = mock.sentinel.task self.session.invoke_api.return_value = task backing = mock.sentinel.backing datastore = mock.sentinel.datastore resource_pool = mock.sentinel.resource_pool host = mock.sentinel.host disk_type = mock.sentinel.disk_type self.vops.relocate_backing(backing, datastore, resource_pool, host, disk_type) # Verify calls disk_move_type = 'moveAllDiskBackingsAndAllowSharing' get_disk_device.assert_called_once_with(backing) get_relocate_spec.assert_called_once_with(datastore, resource_pool, host, disk_move_type, disk_type, disk_device) self.session.invoke_api.assert_called_once_with(self.session.vim, 'RelocateVM_Task', backing, spec=spec) self.session.wait_for_task.assert_called_once_with(task) def test_move_backing_to_folder(self): task = mock.sentinel.task self.session.invoke_api.return_value = task backing = mock.sentinel.backing folder = mock.sentinel.folder self.vops.move_backing_to_folder(backing, folder) # Verify calls self.session.invoke_api.assert_called_once_with(self.session.vim, 'MoveIntoFolder_Task', folder, list=[backing]) self.session.wait_for_task.assert_called_once_with(task) def test_create_snapshot_operation(self): task = mock.sentinel.task self.session.invoke_api.return_value = task task_info = mock.Mock(spec=object) task_info.result = mock.sentinel.result self.session.wait_for_task.return_value = task_info backing = mock.sentinel.backing name = mock.sentinel.name desc = mock.sentinel.description quiesce = True ret = self.vops.create_snapshot(backing, name, desc, quiesce) self.assertEqual(mock.sentinel.result, ret) self.session.invoke_api.assert_called_once_with(self.session.vim, 'CreateSnapshot_Task', backing, name=name, description=desc, memory=False, quiesce=quiesce) self.session.wait_for_task.assert_called_once_with(task) def test_get_snapshot_from_tree(self): volops = volumeops.VMwareVolumeOps name = mock.sentinel.name # Test snapshot == 'None' ret = volops._get_snapshot_from_tree(name, None) self.assertIsNone(ret) # Test root == snapshot snapshot = mock.sentinel.snapshot node = mock.Mock(spec=object) node.name = name node.snapshot = snapshot ret = volops._get_snapshot_from_tree(name, node) self.assertEqual(snapshot, ret) # Test root.childSnapshotList == None root = mock.Mock(spec=object) root.name = 'root' del root.childSnapshotList ret = volops._get_snapshot_from_tree(name, root) self.assertIsNone(ret) # Test root.child == snapshot root.childSnapshotList = [node] ret = volops._get_snapshot_from_tree(name, root) self.assertEqual(snapshot, ret) def test_get_snapshot(self): # build out the root snapshot tree snapshot_name = mock.sentinel.snapshot_name snapshot = mock.sentinel.snapshot root = mock.Mock(spec=object) root.name = 'root' node = mock.Mock(spec=object) node.name = snapshot_name node.snapshot = snapshot root.childSnapshotList = [node] # Test rootSnapshotList is not None snapshot_tree = mock.Mock(spec=object) snapshot_tree.rootSnapshotList = [root] self.session.invoke_api.return_value = snapshot_tree backing = mock.sentinel.backing ret = self.vops.get_snapshot(backing, snapshot_name) self.assertEqual(snapshot, ret) self.session.invoke_api.assert_called_with(vim_util, 'get_object_property', self.session.vim, backing, 'snapshot') # Test rootSnapshotList == None snapshot_tree.rootSnapshotList = None ret = self.vops.get_snapshot(backing, snapshot_name) self.assertIsNone(ret) self.session.invoke_api.assert_called_with(vim_util, 'get_object_property', self.session.vim, backing, 'snapshot') def test_snapshot_exists(self): backing = mock.sentinel.backing invoke_api = self.session.invoke_api invoke_api.return_value = None self.assertFalse(self.vops.snapshot_exists(backing)) invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, backing, 'snapshot') snapshot = mock.Mock() invoke_api.return_value = snapshot snapshot.rootSnapshotList = None self.assertFalse(self.vops.snapshot_exists(backing)) snapshot.rootSnapshotList = [mock.Mock()] self.assertTrue(self.vops.snapshot_exists(backing)) def test_delete_snapshot(self): backing = mock.sentinel.backing snapshot_name = mock.sentinel.snapshot_name # Test snapshot is None with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot: get_snapshot.return_value = None self.vops.delete_snapshot(backing, snapshot_name) get_snapshot.assert_called_once_with(backing, snapshot_name) # Test snapshot is not None snapshot = mock.sentinel.snapshot task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task with mock.patch.object(self.vops, 'get_snapshot') as get_snapshot: get_snapshot.return_value = snapshot self.vops.delete_snapshot(backing, snapshot_name) get_snapshot.assert_called_with(backing, snapshot_name) invoke_api.assert_called_once_with(self.session.vim, 'RemoveSnapshot_Task', snapshot, removeChildren=False) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_snapshot') def test_revert_to_snapshot_with_missing_snapshot(self, get_snapshot): get_snapshot.return_value = None backing = mock.sentinel.backing self.assertRaises(vmdk_exceptions.SnapshotNotFoundException, self.vops.revert_to_snapshot, backing, 'foo') get_snapshot.assert_called_once_with(backing, 'foo') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_snapshot') def test_revert_to_snapshot(self, get_snapshot): snapshot = mock.sentinel.snapshot get_snapshot.return_value = snapshot task = mock.sentinel.task self.session.invoke_api.return_value = task backing = mock.sentinel.backing self.vops.revert_to_snapshot(backing, 'foo') get_snapshot.assert_called_once_with(backing, 'foo') self.session.invoke_api.assert_called_once_with( self.session.vim, 'RevertToSnapshot_Task', snapshot) self.session.wait_for_task.assert_called_once_with(task) def test_get_folder(self): folder = mock.sentinel.folder backing = mock.sentinel.backing with mock.patch.object(self.vops, '_get_parent') as get_parent: get_parent.return_value = folder ret = self.vops._get_folder(backing) self.assertEqual(folder, ret) get_parent.assert_called_once_with(backing, 'Folder') def _verify_extra_config(self, option_values, key, value): self.assertEqual(1, len(option_values)) self.assertEqual(key, option_values[0].key) self.assertEqual(value, option_values[0].value) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_relocate_spec') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_device') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_device_change_for_disk_removal') def _test_get_clone_spec( self, create_device_change_for_disk_removal, get_disk_device, get_relocate_spec, disk_type=None): factory = self.session.vim.client.factory factory.create.side_effect = lambda *args: mock.Mock() relocate_spec = mock.sentinel.relocate_spec get_relocate_spec.return_value = relocate_spec if disk_type: disk_device = mock.sentinel.disk_device get_disk_device.return_value = disk_device else: disk_device = None dev_change = mock.sentinel.dev_change create_device_change_for_disk_removal.return_value = dev_change datastore = mock.sentinel.datastore disk_move_type = mock.sentinel.disk_move_type snapshot = mock.sentinel.snapshot backing = mock.sentinel.backing host = mock.sentinel.host rp = mock.sentinel.rp key = mock.sentinel.key value = mock.sentinel.value extra_config = {key: value, volumeops.BACKING_UUID_KEY: mock.sentinel.uuid} disks_to_clone = [mock.sentinel.disk_uuid] ret = self.vops._get_clone_spec(datastore, disk_move_type, snapshot, backing, disk_type, host=host, resource_pool=rp, extra_config=extra_config, disks_to_clone=disks_to_clone) self.assertEqual(relocate_spec, ret.location) self.assertFalse(ret.powerOn) self.assertFalse(ret.template) self.assertEqual(snapshot, ret.snapshot) self.assertEqual(mock.sentinel.uuid, ret.config.instanceUuid) self.assertEqual(mock.sentinel.extension_key, ret.config.managedBy.extensionKey) self.assertEqual(mock.sentinel.extension_type, ret.config.managedBy.type) get_relocate_spec.assert_called_once_with(datastore, rp, host, disk_move_type, disk_type, disk_device) self._verify_extra_config(ret.config.extraConfig, key, value) create_device_change_for_disk_removal.assert_called_once_with( backing, disks_to_clone) self.assertEqual(dev_change, ret.config.deviceChange) def test_get_clone_spec(self): self._test_get_clone_spec() def test_get_clone_spec_with_thin_disk_type(self): self._test_get_clone_spec(disk_type='thin') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_devices') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_spec_for_disk_remove') def test_create_device_change_for_disk_removal( self, create_spec_for_disk_remove, get_disk_devices): uuid_1 = mock.sentinel.uuid_1 disk_dev_1 = self._create_disk_device('foo', uuid_1) uuid_2 = mock.sentinel.uuid_2 disk_dev_2 = self._create_disk_device('bar', uuid_2) get_disk_devices.return_value = [disk_dev_1, disk_dev_2] spec = mock.sentinel.spec create_spec_for_disk_remove.return_value = spec backing = mock.sentinel.backing disks_to_clone = [uuid_2] ret = self.vops._create_device_change_for_disk_removal( backing, disks_to_clone) get_disk_devices.assert_called_once_with(backing) create_spec_for_disk_remove.assert_called_once_with(disk_dev_1) self.assertEqual([spec], ret) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_folder') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_clone_spec') def _test_clone_backing( self, clone_type, folder, get_clone_spec, get_folder): backing_folder = mock.sentinel.backing_folder get_folder.return_value = backing_folder clone_spec = mock.sentinel.clone_spec get_clone_spec.return_value = clone_spec task = mock.sentinel.task self.session.invoke_api.return_value = task clone = mock.sentinel.clone self.session.wait_for_task.return_value = mock.Mock(result=clone) name = mock.sentinel.name backing = mock.sentinel.backing snapshot = mock.sentinel.snapshot datastore = mock.sentinel.datastore disk_type = mock.sentinel.disk_type host = mock.sentinel.host resource_pool = mock.sentinel.resource_pool extra_config = mock.sentinel.extra_config ret = self.vops.clone_backing( name, backing, snapshot, clone_type, datastore, disk_type=disk_type, host=host, resource_pool=resource_pool, extra_config=extra_config, folder=folder) if folder: self.assertFalse(get_folder.called) else: get_folder.assert_called_once_with(backing) if clone_type == 'linked': exp_disk_move_type = 'createNewChildDiskBacking' else: exp_disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' get_clone_spec.assert_called_once_with( datastore, exp_disk_move_type, snapshot, backing, disk_type, host=host, resource_pool=resource_pool, extra_config=extra_config, disks_to_clone=None) exp_folder = folder if folder else backing_folder self.session.invoke_api.assert_called_once_with( self.session.vim, 'CloneVM_Task', backing, folder=exp_folder, name=name, spec=clone_spec) self.session.wait_for_task.assert_called_once_with(task) self.assertEqual(clone, ret) @ddt.data('linked', 'full') def test_clone_backing(self, clone_type): self._test_clone_backing(clone_type, mock.sentinel.folder) def test_clone_backing_with_empty_folder(self): self._test_clone_backing('linked', None) def _create_controller_device(self, controller_type): dev = mock.Mock() dev.__class__.__name__ = controller_type return dev def test_get_controller(self): disk = self._create_disk_device('foo.vmdk') controller1 = self._create_controller_device( volumeops.ControllerType.LSI_LOGIC) controller2 = self._create_controller_device( volumeops.ControllerType.PARA_VIRTUAL) self.session.invoke_api.return_value = [disk, controller1, controller2] backing = mock.sentinel.backing ret = self.vops._get_controller( backing, volumeops.VirtualDiskAdapterType.PARA_VIRTUAL) self.assertEqual(controller2, ret) self.session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', self.session.vim, backing, 'config.hardware.device') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_controller', return_value=None) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_specs_for_disk_add') def test_attach_disk_to_backing(self, create_spec, get_controller): reconfig_spec = mock.Mock() self.session.vim.client.factory.create.return_value = reconfig_spec disk_add_config_specs = mock.Mock() create_spec.return_value = disk_add_config_specs task = mock.Mock() self.session.invoke_api.return_value = task backing = mock.Mock() size_in_kb = units.Ki disk_type = "thin" adapter_type = "ide" profile_id = mock.sentinel.profile_id vmdk_ds_file_path = mock.sentinel.vmdk_ds_file_path self.vops.attach_disk_to_backing(backing, size_in_kb, disk_type, adapter_type, profile_id, vmdk_ds_file_path) get_controller.assert_called_once_with(backing, adapter_type) self.assertEqual(disk_add_config_specs, reconfig_spec.deviceChange) create_spec.assert_called_once_with( size_in_kb, disk_type, adapter_type, profile_id, vmdk_ds_file_path=vmdk_ds_file_path) self.session.invoke_api.assert_called_once_with(self.session.vim, "ReconfigVM_Task", backing, spec=reconfig_spec) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_controller') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_virtual_disk_config_spec') def test_attach_disk_to_backing_existing_controller( self, create_disk_spec, get_controller): key = mock.sentinel.key controller = mock.Mock(key=key) get_controller.return_value = controller reconfig_spec = mock.Mock() self.session.vim.client.factory.create.return_value = reconfig_spec disk_spec = mock.Mock() create_disk_spec.return_value = disk_spec task = mock.Mock() self.session.invoke_api.return_value = task backing = mock.Mock() size_in_kb = units.Ki disk_type = "thin" adapter_type = "ide" profile_id = mock.sentinel.profile_id vmdk_ds_file_path = mock.sentinel.vmdk_ds_file_path self.vops.attach_disk_to_backing(backing, size_in_kb, disk_type, adapter_type, profile_id, vmdk_ds_file_path) get_controller.assert_called_once_with(backing, adapter_type) self.assertEqual([disk_spec], reconfig_spec.deviceChange) create_disk_spec.assert_called_once_with( size_in_kb, disk_type, key, profile_id, vmdk_ds_file_path) self.session.invoke_api.assert_called_once_with(self.session.vim, "ReconfigVM_Task", backing, spec=reconfig_spec) self.session.wait_for_task.assert_called_once_with(task) def test_create_spec_for_disk_remove(self): disk_spec = mock.Mock() self.session.vim.client.factory.create.return_value = disk_spec disk_device = mock.sentinel.disk_device self.vops._create_spec_for_disk_remove(disk_device) self.session.vim.client.factory.create.assert_called_once_with( 'ns0:VirtualDeviceConfigSpec') self.assertEqual('remove', disk_spec.operation) self.assertEqual(disk_device, disk_spec.device) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_spec_for_disk_remove') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_reconfigure_backing') def test_detach_disk_from_backing(self, reconfigure_backing, create_spec): disk_spec = mock.sentinel.disk_spec create_spec.return_value = disk_spec reconfig_spec = mock.Mock() self.session.vim.client.factory.create.return_value = reconfig_spec backing = mock.sentinel.backing disk_device = mock.sentinel.disk_device self.vops.detach_disk_from_backing(backing, disk_device) create_spec.assert_called_once_with(disk_device) self.session.vim.client.factory.create.assert_called_once_with( 'ns0:VirtualMachineConfigSpec') self.assertEqual([disk_spec], reconfig_spec.deviceChange) reconfigure_backing.assert_called_once_with(backing, reconfig_spec) def test_rename_backing(self): task = mock.sentinel.task self.session.invoke_api.return_value = task backing = mock.sentinel.backing new_name = mock.sentinel.new_name self.vops.rename_backing(backing, new_name) self.session.invoke_api.assert_called_once_with(self.session.vim, "Rename_Task", backing, newName=new_name) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_device') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_reconfigure_backing') def test_update_backing_disk_uuid(self, reconfigure_backing, get_disk_device): disk_spec = mock.Mock() reconfig_spec = mock.Mock() self.session.vim.client.factory.create.side_effect = [disk_spec, reconfig_spec] disk_device = mock.Mock() get_disk_device.return_value = disk_device self.vops.update_backing_disk_uuid(mock.sentinel.backing, mock.sentinel.disk_uuid) get_disk_device.assert_called_once_with(mock.sentinel.backing) self.assertEqual(mock.sentinel.disk_uuid, disk_device.backing.uuid) self.assertEqual('edit', disk_spec.operation) self.assertEqual(disk_device, disk_spec.device) self.assertEqual([disk_spec], reconfig_spec.deviceChange) reconfigure_backing.assert_called_once_with(mock.sentinel.backing, reconfig_spec) exp_factory_create_calls = [mock.call('ns0:VirtualDeviceConfigSpec'), mock.call('ns0:VirtualMachineConfigSpec')] self.assertEqual(exp_factory_create_calls, self.session.vim.client.factory.create.call_args_list) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_extra_config_option_values') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_reconfigure_backing') def test_update_backing_extra_config(self, reconfigure_backing, get_extra_config_option_values): reconfig_spec = mock.Mock() self.session.vim.client.factory.create.return_value = reconfig_spec option_values = mock.sentinel.option_values get_extra_config_option_values.return_value = option_values backing = mock.sentinel.backing option_key = mock.sentinel.key option_value = mock.sentinel.value extra_config = {option_key: option_value, volumeops.BACKING_UUID_KEY: mock.sentinel.uuid} self.vops.update_backing_extra_config(backing, extra_config) get_extra_config_option_values.assert_called_once_with( {option_key: option_value}) self.assertEqual(mock.sentinel.uuid, reconfig_spec.instanceUuid) self.assertEqual(option_values, reconfig_spec.extraConfig) reconfigure_backing.assert_called_once_with(backing, reconfig_spec) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_reconfigure_backing') def test_update_backing_uuid(self, reconfigure_backing): reconfig_spec = mock.Mock() self.session.vim.client.factory.create.return_value = reconfig_spec backing = mock.sentinel.backing uuid = mock.sentinel.uuid self.vops.update_backing_uuid(backing, uuid) self.assertEqual(mock.sentinel.uuid, reconfig_spec.instanceUuid) reconfigure_backing.assert_called_once_with(backing, reconfig_spec) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_device') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_reconfigure_backing') def test_change_backing_profile_to_empty_profile( self, reconfigure_backing, get_disk_device): reconfig_spec = mock.Mock() empty_profile_spec = mock.sentinel.empty_profile_spec disk_spec = mock.Mock() self.session.vim.client.factory.create.side_effect = [ empty_profile_spec, reconfig_spec, disk_spec] disk_device = mock.sentinel.disk_device get_disk_device.return_value = disk_device backing = mock.sentinel.backing self.vops.change_backing_profile(backing, None) self.assertEqual([empty_profile_spec], reconfig_spec.vmProfile) get_disk_device.assert_called_once_with(backing) self.assertEqual(disk_device, disk_spec.device) self.assertEqual('edit', disk_spec.operation) self.assertEqual([empty_profile_spec], disk_spec.profile) self.assertEqual([disk_spec], reconfig_spec.deviceChange) reconfigure_backing.assert_called_once_with(backing, reconfig_spec) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_device') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_reconfigure_backing') def test_change_backing_profile( self, reconfigure_backing, get_disk_device): reconfig_spec = mock.Mock() profile_spec = mock.Mock() disk_spec = mock.Mock() self.session.vim.client.factory.create.side_effect = [ profile_spec, reconfig_spec, disk_spec] disk_device = mock.sentinel.disk_device get_disk_device.return_value = disk_device backing = mock.sentinel.backing unique_id = mock.sentinel.unique_id profile_id = mock.Mock(uniqueId=unique_id) self.vops.change_backing_profile(backing, profile_id) self.assertEqual(unique_id, profile_spec.profileId) self.assertEqual([profile_spec], reconfig_spec.vmProfile) get_disk_device.assert_called_once_with(backing) self.assertEqual(disk_device, disk_spec.device) self.assertEqual('edit', disk_spec.operation) self.assertEqual([profile_spec], disk_spec.profile) self.assertEqual([disk_spec], reconfig_spec.deviceChange) reconfigure_backing.assert_called_once_with(backing, reconfig_spec) def test_delete_file(self): file_mgr = mock.sentinel.file_manager self.session.vim.service_content.fileManager = file_mgr task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task # Test delete file file_path = mock.sentinel.file_path datacenter = mock.sentinel.datacenter self.vops.delete_file(file_path, datacenter) # verify calls invoke_api.assert_called_once_with(self.session.vim, 'DeleteDatastoreFile_Task', file_mgr, name=file_path, datacenter=datacenter) self.session.wait_for_task.assert_called_once_with(task) def test_create_datastore_folder(self): file_manager = mock.sentinel.file_manager self.session.vim.service_content.fileManager = file_manager invoke_api = self.session.invoke_api ds_name = "nfs" folder_path = "test/" datacenter = mock.sentinel.datacenter self.vops.create_datastore_folder(ds_name, folder_path, datacenter) invoke_api.assert_called_once_with(self.session.vim, 'MakeDirectory', file_manager, name="[nfs] test/", datacenter=datacenter) def test_create_datastore_folder_with_existing_folder(self): file_manager = mock.sentinel.file_manager self.session.vim.service_content.fileManager = file_manager invoke_api = self.session.invoke_api invoke_api.side_effect = exceptions.FileAlreadyExistsException ds_name = "nfs" folder_path = "test/" datacenter = mock.sentinel.datacenter self.vops.create_datastore_folder(ds_name, folder_path, datacenter) invoke_api.assert_called_once_with(self.session.vim, 'MakeDirectory', file_manager, name="[nfs] test/", datacenter=datacenter) invoke_api.side_effect = None def test_create_datastore_folder_with_invoke_api_error(self): file_manager = mock.sentinel.file_manager self.session.vim.service_content.fileManager = file_manager invoke_api = self.session.invoke_api invoke_api.side_effect = exceptions.VimFaultException( ["FileFault"], "error") ds_name = "nfs" folder_path = "test/" datacenter = mock.sentinel.datacenter self.assertRaises(exceptions.VimFaultException, self.vops.create_datastore_folder, ds_name, folder_path, datacenter) invoke_api.assert_called_once_with(self.session.vim, 'MakeDirectory', file_manager, name="[nfs] test/", datacenter=datacenter) invoke_api.side_effect = None def test_get_path_name(self): path = mock.Mock(spec=object) path_name = mock.sentinel.vm_path_name path.vmPathName = path_name invoke_api = self.session.invoke_api invoke_api.return_value = path backing = mock.sentinel.backing ret = self.vops.get_path_name(backing) self.assertEqual(path_name, ret) invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, backing, 'config.files') def test_get_entity_name(self): entity_name = mock.sentinel.entity_name invoke_api = self.session.invoke_api invoke_api.return_value = entity_name entity = mock.sentinel.entity ret = self.vops.get_entity_name(entity) self.assertEqual(entity_name, ret) invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, entity, 'name') def test_get_vmdk_path(self): # Setup hardware_devices for test device = mock.Mock() device.__class__.__name__ = 'VirtualDisk' backing = mock.Mock() backing.__class__.__name__ = 'VirtualDiskFlatVer2BackingInfo' backing.fileName = mock.sentinel.vmdk_path device.backing = backing invoke_api = self.session.invoke_api invoke_api.return_value = [device] # Test get_vmdk_path ret = self.vops.get_vmdk_path(backing) self.assertEqual(mock.sentinel.vmdk_path, ret) invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, backing, 'config.hardware.device') backing.__class__.__name__ = ' VirtualDiskSparseVer2BackingInfo' self.assertRaises(AssertionError, self.vops.get_vmdk_path, backing) # Test with no disk device. invoke_api.return_value = [] self.assertRaises(vmdk_exceptions.VirtualDiskNotFoundException, self.vops.get_vmdk_path, backing) def test_get_disk_size(self): # Test with valid disk device. device = mock.Mock() device.__class__.__name__ = 'VirtualDisk' disk_size_bytes = 1024 device.capacityInKB = disk_size_bytes / units.Ki invoke_api = self.session.invoke_api invoke_api.return_value = [device] self.assertEqual(disk_size_bytes, self.vops.get_disk_size(mock.sentinel.backing)) # Test with no disk device. invoke_api.return_value = [] self.assertRaises(vmdk_exceptions.VirtualDiskNotFoundException, self.vops.get_disk_size, mock.sentinel.backing) def test_create_virtual_disk(self): task = mock.Mock() invoke_api = self.session.invoke_api invoke_api.return_value = task spec = mock.Mock() factory = self.session.vim.client.factory factory.create.return_value = spec disk_mgr = self.session.vim.service_content.virtualDiskManager dc_ref = mock.Mock() vmdk_ds_file_path = mock.Mock() size_in_kb = 1024 adapter_type = 'ide' disk_type = 'thick' self.vops.create_virtual_disk(dc_ref, vmdk_ds_file_path, size_in_kb, adapter_type, disk_type) self.assertEqual(volumeops.VirtualDiskAdapterType.IDE, spec.adapterType) self.assertEqual(volumeops.VirtualDiskType.PREALLOCATED, spec.diskType) self.assertEqual(size_in_kb, spec.capacityKb) invoke_api.assert_called_once_with(self.session.vim, 'CreateVirtualDisk_Task', disk_mgr, name=vmdk_ds_file_path, datacenter=dc_ref, spec=spec) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'create_virtual_disk') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'delete_file') def test_create_flat_extent_virtual_disk_descriptor(self, delete_file, create_virtual_disk): dc_ref = mock.Mock() path = mock.Mock() size_in_kb = 1024 adapter_type = 'ide' disk_type = 'thick' self.vops.create_flat_extent_virtual_disk_descriptor(dc_ref, path, size_in_kb, adapter_type, disk_type) create_virtual_disk.assert_called_once_with( dc_ref, path.get_descriptor_ds_file_path(), size_in_kb, adapter_type, disk_type) delete_file.assert_called_once_with( path.get_flat_extent_ds_file_path(), dc_ref) def test_copy_vmdk_file(self): task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task disk_mgr = self.session.vim.service_content.virtualDiskManager src_dc_ref = mock.sentinel.src_dc_ref src_vmdk_file_path = mock.sentinel.src_vmdk_file_path dest_dc_ref = mock.sentinel.dest_dc_ref dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path self.vops.copy_vmdk_file(src_dc_ref, src_vmdk_file_path, dest_vmdk_file_path, dest_dc_ref) invoke_api.assert_called_once_with(self.session.vim, 'CopyVirtualDisk_Task', disk_mgr, sourceName=src_vmdk_file_path, sourceDatacenter=src_dc_ref, destName=dest_vmdk_file_path, destDatacenter=dest_dc_ref, force=True) self.session.wait_for_task.assert_called_once_with(task) def test_copy_vmdk_file_with_default_dest_datacenter(self): task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task disk_mgr = self.session.vim.service_content.virtualDiskManager src_dc_ref = mock.sentinel.src_dc_ref src_vmdk_file_path = mock.sentinel.src_vmdk_file_path dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path self.vops.copy_vmdk_file(src_dc_ref, src_vmdk_file_path, dest_vmdk_file_path) invoke_api.assert_called_once_with(self.session.vim, 'CopyVirtualDisk_Task', disk_mgr, sourceName=src_vmdk_file_path, sourceDatacenter=src_dc_ref, destName=dest_vmdk_file_path, destDatacenter=src_dc_ref, force=True) self.session.wait_for_task.assert_called_once_with(task) def test_move_vmdk_file(self): task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task disk_mgr = self.session.vim.service_content.virtualDiskManager src_dc_ref = mock.sentinel.src_dc_ref src_vmdk_file_path = mock.sentinel.src_vmdk_file_path dest_dc_ref = mock.sentinel.dest_dc_ref dest_vmdk_file_path = mock.sentinel.dest_vmdk_file_path self.vops.move_vmdk_file(src_dc_ref, src_vmdk_file_path, dest_vmdk_file_path, dest_dc_ref=dest_dc_ref) invoke_api.assert_called_once_with(self.session.vim, 'MoveVirtualDisk_Task', disk_mgr, sourceName=src_vmdk_file_path, sourceDatacenter=src_dc_ref, destName=dest_vmdk_file_path, destDatacenter=dest_dc_ref, force=True) self.session.wait_for_task.assert_called_once_with(task) def test_delete_vmdk_file(self): task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task disk_mgr = self.session.vim.service_content.virtualDiskManager dc_ref = self.session.dc_ref vmdk_file_path = self.session.vmdk_file self.vops.delete_vmdk_file(vmdk_file_path, dc_ref) invoke_api.assert_called_once_with(self.session.vim, 'DeleteVirtualDisk_Task', disk_mgr, name=vmdk_file_path, datacenter=dc_ref) self.session.wait_for_task.assert_called_once_with(task) def test_extend_virtual_disk(self): """Test volumeops.extend_virtual_disk.""" task = mock.sentinel.task invoke_api = self.session.invoke_api invoke_api.return_value = task disk_mgr = self.session.vim.service_content.virtualDiskManager fake_size = 5 fake_size_in_kb = fake_size * units.Mi fake_name = 'fake_volume_0000000001' fake_dc = mock.sentinel.datacenter self.vops.extend_virtual_disk(fake_size, fake_name, fake_dc) invoke_api.assert_called_once_with(self.session.vim, "ExtendVirtualDisk_Task", disk_mgr, name=fake_name, datacenter=fake_dc, newCapacityKb=fake_size_in_kb, eagerZero=False) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_all_clusters') def test_get_cluster_refs(self, get_all_clusters): cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 clusters = {"cls_1": cls_1, "cls_2": cls_2} get_all_clusters.return_value = clusters self.assertEqual({"cls_2": cls_2}, self.vops.get_cluster_refs(["cls_2"])) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_all_clusters') def test_get_cluster_refs_with_invalid_cluster(self, get_all_clusters): cls_1 = mock.sentinel.cls_1 cls_2 = mock.sentinel.cls_2 clusters = {"cls_1": cls_1, "cls_2": cls_2} get_all_clusters.return_value = clusters self.assertRaises(vmdk_exceptions.ClusterNotFoundException, self.vops.get_cluster_refs, ["cls_1", "cls_3"]) def test_get_cluster_hosts(self): host_1 = mock.sentinel.host_1 host_2 = mock.sentinel.host_2 hosts = mock.Mock(ManagedObjectReference=[host_1, host_2]) self.session.invoke_api.return_value = hosts cluster = mock.sentinel.cluster ret = self.vops.get_cluster_hosts(cluster) self.assertEqual([host_1, host_2], ret) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, cluster, 'host') def test_get_cluster_hosts_with_no_host(self): self.session.invoke_api.return_value = None cluster = mock.sentinel.cluster ret = self.vops.get_cluster_hosts(cluster) self.assertEqual([], ret) self.session.invoke_api.assert_called_once_with(vim_util, 'get_object_property', self.session.vim, cluster, 'host') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'continue_retrieval', return_value=None) def test_get_all_clusters(self, continue_retrieval): prop_1 = mock.Mock(val='test_cluster_1') cls_1 = mock.Mock(propSet=[prop_1], obj=mock.sentinel.mor_1) prop_2 = mock.Mock(val='/test_cluster_2') cls_2 = mock.Mock(propSet=[prop_2], obj=mock.sentinel.mor_2) retrieve_result = mock.Mock(objects=[cls_1, cls_2]) self.session.invoke_api.return_value = retrieve_result ret = self.vops._get_all_clusters() exp = {'test_cluster_1': mock.sentinel.mor_1, '/test_cluster_2': mock.sentinel.mor_2} self.assertEqual(exp, ret) self.session.invoke_api.assert_called_once_with( vim_util, 'get_objects', self.session.vim, 'ClusterComputeResource', self.MAX_OBJECTS) continue_retrieval.assert_called_once_with(retrieve_result) def test_get_entity_by_inventory_path(self): self.session.invoke_api.return_value = mock.sentinel.ref path = mock.sentinel.path ret = self.vops.get_entity_by_inventory_path(path) self.assertEqual(mock.sentinel.ref, ret) self.session.invoke_api.assert_called_once_with( self.session.vim, "FindByInventoryPath", self.session.vim.service_content.searchIndex, inventoryPath=path) def test_get_inventory_path(self): path = mock.sentinel.path self.session.invoke_api.return_value = path entity = mock.sentinel.entity self.assertEqual(path, self.vops.get_inventory_path(entity)) self.session.invoke_api.assert_called_once_with( vim_util, 'get_inventory_path', self.session.vim, entity) def test_get_disk_devices(self): disk_device = mock.Mock() disk_device.__class__.__name__ = 'VirtualDisk' controller_device = mock.Mock() controller_device.__class__.__name__ = 'VirtualLSILogicController' devices = mock.Mock() devices.__class__.__name__ = "ArrayOfVirtualDevice" devices.VirtualDevice = [disk_device, controller_device] self.session.invoke_api.return_value = devices vm = mock.sentinel.vm self.assertEqual([disk_device], self.vops._get_disk_devices(vm)) self.session.invoke_api.assert_called_once_with( vim_util, 'get_object_property', self.session.vim, vm, 'config.hardware.device') def _create_disk_device(self, file_name, uuid=None): backing = mock.Mock(fileName=file_name) backing.__class__.__name__ = 'VirtualDiskFlatVer2BackingInfo' backing.uuid = uuid return mock.Mock(backing=backing) def test_mark_backing_as_template(self): backing = mock.Mock() self.vops.mark_backing_as_template(backing) self.session.invoke_api.assert_called_once_with( self.session.vim, 'MarkAsTemplate', backing) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_get_disk_devices') def test_get_disk_device(self, get_disk_devices): dev_1 = self._create_disk_device('[ds1] foo/foo.vmdk') dev_2 = self._create_disk_device('[ds1] foo/foo_1.vmdk') get_disk_devices.return_value = [dev_1, dev_2] vm = mock.sentinel.vm self.assertEqual(dev_2, self.vops.get_disk_device(vm, '[ds1] foo/foo_1.vmdk')) get_disk_devices.assert_called_once_with(vm) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' 'get_entity_by_inventory_path') def test_copy_datastore_file(self, get_entity_by_inventory_path): get_entity_by_inventory_path.return_value = mock.sentinel.src_dc_ref self.session.invoke_api.return_value = mock.sentinel.task vsphere_url = "vsphere://hostname/folder/openstack_glance/img_uuid?" \ "dcPath=dc1&dsName=ds1" self.vops.copy_datastore_file(vsphere_url, mock.sentinel.dest_dc_ref, mock.sentinel.dest_ds_file_path) get_entity_by_inventory_path.assert_called_once_with('dc1') self.session.invoke_api.assert_called_once_with( self.session.vim, 'CopyDatastoreFile_Task', self.session.vim.service_content.fileManager, sourceName='[ds1] openstack_glance/img_uuid', sourceDatacenter=mock.sentinel.src_dc_ref, destinationName=mock.sentinel.dest_ds_file_path, destinationDatacenter=mock.sentinel.dest_dc_ref) self.session.wait_for_task.assert_called_once_with(mock.sentinel.task) @ddt.data(volumeops.VirtualDiskType.EAGER_ZEROED_THICK, volumeops.VirtualDiskType.PREALLOCATED, volumeops.VirtualDiskType.THIN) def test_create_fcd_backing_spec(self, disk_type): spec = mock.Mock() self.session.vim.client.factory.create.return_value = spec ds_ref = mock.sentinel.ds_ref ret = self.vops._create_fcd_backing_spec(disk_type, ds_ref) if disk_type == volumeops.VirtualDiskType.PREALLOCATED: prov_type = 'lazyZeroedThick' else: prov_type = disk_type self.assertEqual(prov_type, ret.provisioningType) self.assertEqual(ds_ref, ret.datastore) self.session.vim.client.factory.create.assert_called_once_with( 'ns0:VslmCreateSpecDiskFileBackingSpec') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_fcd_backing_spec') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_profile_spec') def test_create_fcd(self, create_profile_spec, create_fcd_backing_spec): spec = mock.Mock() self.session.vim.client.factory.create.return_value = spec backing_spec = mock.sentinel.backing_spec create_fcd_backing_spec.return_value = backing_spec profile_spec = mock.sentinel.profile_spec create_profile_spec.return_value = profile_spec task = mock.sentinel.task self.session.invoke_api.return_value = task task_info = mock.Mock() fcd_id = mock.sentinel.fcd_id task_info.result.config.id.id = fcd_id self.session.wait_for_task.return_value = task_info name = mock.sentinel.name size_mb = 1024 ds_ref_val = mock.sentinel.ds_ref_val ds_ref = vmware_fake.ManagedObjectReference(value=ds_ref_val) disk_type = mock.sentinel.disk_type profile_id = mock.sentinel.profile_id ret = self.vops.create_fcd( name, size_mb, ds_ref, disk_type, profile_id=profile_id) self.assertEqual(fcd_id, ret.fcd_id) self.assertEqual(ds_ref_val, ret.ds_ref_val) self.session.vim.client.factory.create.assert_called_once_with( 'ns0:VslmCreateSpec') create_fcd_backing_spec.assert_called_once_with(disk_type, ds_ref) self.assertEqual(1024, spec.capacityInMB) self.assertEqual(name, spec.name) self.assertEqual(backing_spec, spec.backingSpec) self.assertEqual([profile_spec], spec.profile) create_profile_spec.assert_called_once_with( self.session.vim.client.factory, profile_id) self.session.invoke_api.assert_called_once_with( self.session.vim, 'CreateDisk_Task', self.session.vim.service_content.vStorageObjectManager, spec=spec) self.session.wait_for_task.assert_called_once_with(task) def test_delete_fcd(self): task = mock.sentinel.task self.session.invoke_api.return_value = task fcd_location = mock.Mock() fcd_id = mock.sentinel.fcd_id fcd_location.id.return_value = fcd_id ds_ref = mock.sentinel.ds_ref fcd_location.ds_ref.return_value = ds_ref self.vops.delete_fcd(fcd_location) self.session.invoke_api.assert_called_once_with( self.session.vim, 'DeleteVStorageObject_Task', self.session.vim.service_content.vStorageObjectManager, id=fcd_id, datastore=ds_ref) self.session.wait_for_task(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_fcd_backing_spec') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_profile_spec') def test_clone_fcd(self, create_profile_spec, create_fcd_backing_spec): spec = mock.Mock() self.session.vim.client.factory.create.return_value = spec backing_spec = mock.sentinel.backing_spec create_fcd_backing_spec.return_value = backing_spec profile_spec = mock.sentinel.profile_spec create_profile_spec.return_value = profile_spec task = mock.sentinel.task self.session.invoke_api.return_value = task task_info = mock.Mock() fcd_id = mock.sentinel.fcd_id task_info.result.config.id.id = fcd_id self.session.wait_for_task.return_value = task_info fcd_location = mock.Mock() fcd_id = mock.sentinel.fcd_id fcd_location.id.return_value = fcd_id ds_ref = mock.sentinel.ds_ref fcd_location.ds_ref.return_value = ds_ref name = mock.sentinel.name dest_ds_ref_val = mock.sentinel.dest_ds_ref_val dest_ds_ref = vmware_fake.ManagedObjectReference(value=dest_ds_ref_val) disk_type = mock.sentinel.disk_type profile_id = mock.sentinel.profile_id ret = self.vops.clone_fcd( name, fcd_location, dest_ds_ref, disk_type, profile_id=profile_id) self.assertEqual(fcd_id, ret.fcd_id) self.assertEqual(dest_ds_ref_val, ret.ds_ref_val) self.session.vim.client.factory.create.assert_called_once_with( 'ns0:VslmCloneSpec') create_fcd_backing_spec.assert_called_once_with(disk_type, dest_ds_ref) self.assertEqual(name, spec.name) self.assertEqual(backing_spec, spec.backingSpec) self.assertEqual([profile_spec], spec.profile) create_profile_spec.assert_called_once_with( self.session.vim.client.factory, profile_id) self.session.invoke_api.assert_called_once_with( self.session.vim, 'CloneVStorageObject_Task', self.session.vim.service_content.vStorageObjectManager, id=fcd_id, datastore=ds_ref, spec=spec) self.session.wait_for_task.assert_called_once_with(task) def test_extend_fcd(self): task = mock.sentinel.task self.session.invoke_api.return_value = task fcd_location = mock.Mock() fcd_id = mock.sentinel.fcd_id fcd_location.id.return_value = fcd_id ds_ref = mock.sentinel.ds_ref fcd_location.ds_ref.return_value = ds_ref new_size_mb = 1024 self.vops.extend_fcd(fcd_location, new_size_mb) self.session.invoke_api.assert_called_once_with( self.session.vim, 'ExtendDisk_Task', self.session.vim.service_content.vStorageObjectManager, id=fcd_id, datastore=ds_ref, newCapacityInMB=new_size_mb) self.session.wait_for_task(task) def test_register_disk(self): fcd = mock.Mock() fcd_id = mock.sentinel.fcd_id fcd.config.id = mock.Mock(id=fcd_id) self.session.invoke_api.return_value = fcd vmdk_url = mock.sentinel.vmdk_url name = mock.sentinel.name ds_ref_val = mock.sentinel.ds_ref_val ds_ref = vmware_fake.ManagedObjectReference(value=ds_ref_val) ret = self.vops.register_disk(vmdk_url, name, ds_ref) self.assertEqual(fcd_id, ret.fcd_id) self.assertEqual(ds_ref_val, ret.ds_ref_val) self.session.invoke_api.assert_called_once_with( self.session.vim, 'RegisterDisk', self.session.vim.service_content.vStorageObjectManager, path=vmdk_url, name=name) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_controller_config_spec') @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_reconfigure_backing') def test_attach_fcd(self, reconfigure_backing, create_controller_spec): reconfig_spec = mock.Mock() self.session.vim.client.factory.create.return_value = reconfig_spec spec = mock.Mock() create_controller_spec.return_value = spec task = mock.sentinel.task self.session.invoke_api.return_value = task backing = mock.sentinel.backing fcd_location = mock.Mock() fcd_id = mock.sentinel.fcd_id fcd_location.id.return_value = fcd_id ds_ref = mock.Mock() fcd_location.ds_ref.return_value = ds_ref self.vops.attach_fcd(backing, fcd_location) self.session.vim.client.factory.create.assert_called_once_with( 'ns0:VirtualMachineConfigSpec') create_controller_spec.assert_called_once_with( volumeops.VirtualDiskAdapterType.LSI_LOGIC) self.assertEqual([spec], reconfig_spec.deviceChange) reconfigure_backing.assert_called_once_with(backing, reconfig_spec) self.session.invoke_api.assert_called_once_with( self.session.vim, 'AttachDisk_Task', backing, diskId=fcd_id, datastore=ds_ref) self.session.wait_for_task.assert_called_once_with(task) def test_detach_fcd(self): task = mock.sentinel.task self.session.invoke_api.return_value = task backing = mock.sentinel.backing fcd_location = mock.Mock() fcd_id = mock.sentinel.fcd_id fcd_location.id.return_value = fcd_id self.vops.detach_fcd(backing, fcd_location) self.session.invoke_api.assert_called_once_with( self.session.vim, 'DetachDisk_Task', backing, diskId=fcd_id) self.session.wait_for_task.assert_called_once_with(task) def test_create_fcd_snapshot(self): task = mock.sentinel.task self.session.invoke_api.return_value = task task_info = mock.Mock() fcd_snap_id = mock.sentinel.fcd_snap_id task_info.result.id = fcd_snap_id self.session.wait_for_task.return_value = task_info fcd_location = mock.Mock() fcd_id = mock.sentinel.fcd_id fcd_location.id.return_value = fcd_id ds_ref = mock.Mock() fcd_location.ds_ref.return_value = ds_ref description = mock.sentinel.description ret = self.vops.create_fcd_snapshot(fcd_location, description) self.assertEqual(fcd_snap_id, ret.snap_id) self.assertEqual(fcd_location, ret.fcd_loc) self.session.invoke_api.assert_called_once_with( self.session.vim, 'VStorageObjectCreateSnapshot_Task', self.session.vim.service_content.vStorageObjectManager, id=fcd_id, datastore=ds_ref, description=description) self.session.wait_for_task.assert_called_once_with(task) def test_delete_fcd_snapshot(self): task = mock.sentinel.task self.session.invoke_api.return_value = task fcd_location = mock.Mock() fcd_id = mock.sentinel.fcd_id fcd_location.id.return_value = fcd_id ds_ref = mock.Mock() fcd_location.ds_ref.return_value = ds_ref fcd_snap_id = mock.sentinel.fcd_snap_id fcd_snap_loc = mock.Mock(fcd_loc=fcd_location) fcd_snap_loc.id.return_value = fcd_snap_id self.vops.delete_fcd_snapshot(fcd_snap_loc) self.session.invoke_api.assert_called_once_with( self.session.vim, 'DeleteSnapshot_Task', self.session.vim.service_content.vStorageObjectManager, id=fcd_id, datastore=ds_ref, snapshotId=fcd_snap_id) self.session.wait_for_task(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_profile_spec') def test_create_fcd_from_snapshot(self, create_profile_spec): task = mock.sentinel.task self.session.invoke_api.return_value = task profile_spec = mock.sentinel.profile_spec create_profile_spec.return_value = profile_spec task_info = mock.Mock() fcd_id = mock.sentinel.fcd_id task_info.result.config.id.id = fcd_id self.session.wait_for_task.return_value = task_info fcd_location = mock.Mock() fcd_id = mock.sentinel.fcd_id fcd_location.id.return_value = fcd_id ds_ref_val = mock.sentinel.ds_ref_val ds_ref = vmware_fake.ManagedObjectReference(value=ds_ref_val) fcd_location.ds_ref.return_value = ds_ref fcd_snap_id = mock.sentinel.fcd_snap_id fcd_snap_loc = mock.Mock(fcd_loc=fcd_location) fcd_snap_loc.id.return_value = fcd_snap_id name = mock.sentinel.name profile_id = mock.sentinel.profile_id ret = self.vops.create_fcd_from_snapshot( fcd_snap_loc, name, profile_id=profile_id) self.assertEqual(fcd_id, ret.fcd_id) self.assertEqual(ds_ref_val, ret.ds_ref_val) create_profile_spec.assert_called_once_with( self.session.vim.client.factory, profile_id) self.session.invoke_api.assert_called_once_with( self.session.vim, 'CreateDiskFromSnapshot_Task', self.session.vim.service_content.vStorageObjectManager, id=fcd_id, datastore=ds_ref, snapshotId=fcd_snap_id, name=name, profile=[profile_spec]) self.session.wait_for_task.assert_called_once_with(task) @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.' '_create_profile_spec') @ddt.data(mock.sentinel.profile_id, None) def test_update_fcd_policy(self, profile_id, create_profile_spec): cf = self.session.vim.client.factory if profile_id: profile_spec = mock.sentinel.profile_spec create_profile_spec.return_value = profile_spec else: empty_profile_spec = mock.sentinel.empty_profile_spec cf.create.return_value = empty_profile_spec task = mock.sentinel.task self.session.invoke_api.return_value = task fcd_location = mock.Mock() fcd_id = mock.sentinel.fcd_id fcd_location.id.return_value = fcd_id ds_ref = mock.Mock() fcd_location.ds_ref.return_value = ds_ref self.vops.update_fcd_policy(fcd_location, profile_id) if profile_id: create_profile_spec.assert_called_once_with(cf, profile_id) exp_profile_spec = profile_spec else: cf.create.assert_called_once_with( 'ns0:VirtualMachineEmptyProfileSpec') exp_profile_spec = empty_profile_spec self.session.invoke_api.assert_called_once_with( self.session.vim, 'UpdateVStorageObjectPolicy_Task', self.session.vim.service_content.vStorageObjectManager, id=fcd_id, datastore=ds_ref, profile=[exp_profile_spec]) self.session.wait_for_task(task) class VirtualDiskPathTest(test.TestCase): """Unit tests for VirtualDiskPath.""" def setUp(self): super(VirtualDiskPathTest, self).setUp() self._path = volumeops.VirtualDiskPath("nfs", "A/B/", "disk") def test_get_datastore_file_path(self): self.assertEqual("[nfs] A/B/disk.vmdk", self._path.get_datastore_file_path("nfs", "A/B/disk.vmdk")) def test_get_descriptor_file_path(self): self.assertEqual("A/B/disk.vmdk", self._path.get_descriptor_file_path()) def test_get_descriptor_ds_file_path(self): self.assertEqual("[nfs] A/B/disk.vmdk", self._path.get_descriptor_ds_file_path()) class FlatExtentVirtualDiskPathTest(test.TestCase): """Unit tests for FlatExtentVirtualDiskPath.""" def setUp(self): super(FlatExtentVirtualDiskPathTest, self).setUp() self._path = volumeops.FlatExtentVirtualDiskPath("nfs", "A/B/", "disk") def test_get_flat_extent_file_path(self): self.assertEqual("A/B/disk-flat.vmdk", self._path.get_flat_extent_file_path()) def test_get_flat_extent_ds_file_path(self): self.assertEqual("[nfs] A/B/disk-flat.vmdk", self._path.get_flat_extent_ds_file_path()) class VirtualDiskTypeTest(test.TestCase): """Unit tests for VirtualDiskType.""" def test_is_valid(self): self.assertTrue(volumeops.VirtualDiskType.is_valid("thick")) self.assertTrue(volumeops.VirtualDiskType.is_valid("thin")) self.assertTrue(volumeops.VirtualDiskType.is_valid("eagerZeroedThick")) self.assertFalse(volumeops.VirtualDiskType.is_valid("preallocated")) def test_validate(self): volumeops.VirtualDiskType.validate("thick") volumeops.VirtualDiskType.validate("thin") volumeops.VirtualDiskType.validate("eagerZeroedThick") self.assertRaises(vmdk_exceptions.InvalidDiskTypeException, volumeops.VirtualDiskType.validate, "preallocated") def test_get_virtual_disk_type(self): self.assertEqual("preallocated", volumeops.VirtualDiskType.get_virtual_disk_type( "thick")) self.assertEqual("thin", volumeops.VirtualDiskType.get_virtual_disk_type( "thin")) self.assertEqual("eagerZeroedThick", volumeops.VirtualDiskType.get_virtual_disk_type( "eagerZeroedThick")) self.assertRaises(vmdk_exceptions.InvalidDiskTypeException, volumeops.VirtualDiskType.get_virtual_disk_type, "preallocated") class VirtualDiskAdapterTypeTest(test.TestCase): """Unit tests for VirtualDiskAdapterType.""" def test_is_valid(self): self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("lsiLogic")) self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("busLogic")) self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid( "lsiLogicsas")) self.assertTrue( volumeops.VirtualDiskAdapterType.is_valid("paraVirtual")) self.assertTrue(volumeops.VirtualDiskAdapterType.is_valid("ide")) self.assertFalse(volumeops.VirtualDiskAdapterType.is_valid("pvscsi")) def test_validate(self): volumeops.VirtualDiskAdapterType.validate("lsiLogic") volumeops.VirtualDiskAdapterType.validate("busLogic") volumeops.VirtualDiskAdapterType.validate("lsiLogicsas") volumeops.VirtualDiskAdapterType.validate("paraVirtual") volumeops.VirtualDiskAdapterType.validate("ide") self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException, volumeops.VirtualDiskAdapterType.validate, "pvscsi") def test_get_adapter_type(self): self.assertEqual("lsiLogic", volumeops.VirtualDiskAdapterType.get_adapter_type( "lsiLogic")) self.assertEqual("busLogic", volumeops.VirtualDiskAdapterType.get_adapter_type( "busLogic")) self.assertEqual("lsiLogic", volumeops.VirtualDiskAdapterType.get_adapter_type( "lsiLogicsas")) self.assertEqual("lsiLogic", volumeops.VirtualDiskAdapterType.get_adapter_type( "paraVirtual")) self.assertEqual("ide", volumeops.VirtualDiskAdapterType.get_adapter_type( "ide")) self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException, volumeops.VirtualDiskAdapterType.get_adapter_type, "pvscsi") class ControllerTypeTest(test.TestCase): """Unit tests for ControllerType.""" def test_get_controller_type(self): self.assertEqual(volumeops.ControllerType.LSI_LOGIC, volumeops.ControllerType.get_controller_type( 'lsiLogic')) self.assertEqual(volumeops.ControllerType.BUS_LOGIC, volumeops.ControllerType.get_controller_type( 'busLogic')) self.assertEqual(volumeops.ControllerType.LSI_LOGIC_SAS, volumeops.ControllerType.get_controller_type( 'lsiLogicsas')) self.assertEqual(volumeops.ControllerType.PARA_VIRTUAL, volumeops.ControllerType.get_controller_type( 'paraVirtual')) self.assertEqual(volumeops.ControllerType.IDE, volumeops.ControllerType.get_controller_type( 'ide')) self.assertRaises(vmdk_exceptions.InvalidAdapterTypeException, volumeops.ControllerType.get_controller_type, 'invalid_type') def test_is_scsi_controller(self): self.assertTrue(volumeops.ControllerType.is_scsi_controller( volumeops.ControllerType.LSI_LOGIC)) self.assertTrue(volumeops.ControllerType.is_scsi_controller( volumeops.ControllerType.BUS_LOGIC)) self.assertTrue(volumeops.ControllerType.is_scsi_controller( volumeops.ControllerType.LSI_LOGIC_SAS)) self.assertTrue(volumeops.ControllerType.is_scsi_controller( volumeops.ControllerType.PARA_VIRTUAL)) self.assertFalse(volumeops.ControllerType.is_scsi_controller( volumeops.ControllerType.IDE)) class FcdLocationTest(test.TestCase): """Unit tests for FcdLocation.""" def test_create(self): fcd_id = mock.sentinel.fcd_id fcd_id_obj = mock.Mock(id=fcd_id) ds_ref_val = mock.sentinel.ds_ref_val ds_ref = vmware_fake.ManagedObjectReference(value=ds_ref_val) fcd_loc = volumeops.FcdLocation.create(fcd_id_obj, ds_ref) self.assertEqual(fcd_id, fcd_loc.fcd_id) self.assertEqual(ds_ref_val, fcd_loc.ds_ref_val) def test_provider_location(self): fcd_loc = volumeops.FcdLocation('123', 'ds1') self.assertEqual('123@ds1', fcd_loc.provider_location()) def test_ds_ref(self): fcd_loc = volumeops.FcdLocation('123', 'ds1') ds_ref = fcd_loc.ds_ref() self.assertEqual('ds1', ds_ref.value) def test_id(self): id_obj = mock.Mock() cf = mock.Mock() cf.create.return_value = id_obj fcd_loc = volumeops.FcdLocation('123', 'ds1') fcd_id = fcd_loc.id(cf) self.assertEqual('123', fcd_id.id) cf.create.assert_called_once_with('ns0:ID') def test_from_provider_location(self): fcd_loc = volumeops.FcdLocation.from_provider_location('123@ds1') self.assertEqual('123', fcd_loc.fcd_id) self.assertEqual('ds1', fcd_loc.ds_ref_val) def test_str(self): fcd_loc = volumeops.FcdLocation('123', 'ds1') self.assertEqual('123@ds1', str(fcd_loc)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3111205 cinder-27.0.0/cinder/tests/unit/volume/drivers/yadro/0000775000175000017500000000000000000000000022557 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/yadro/__init__.py0000664000175000017500000000000000000000000024656 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/yadro/test_tatlin_client.py0000664000175000017500000004607700000000000027037 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest import TestCase import requests from requests import codes from cinder.exception import NotAuthorized from cinder.exception import VolumeBackendAPIException from cinder.tests.unit.fake_constants import VOLUME_NAME from cinder.tests.unit.volume.drivers.yadro.test_tatlin_common import \ DummyVolume from cinder.tests.unit.volume.drivers.yadro.test_tatlin_common import \ MockResponse from cinder.volume.drivers.yadro import tatlin_api from cinder.volume.drivers.yadro.tatlin_client import InitTatlinClient from cinder.volume.drivers.yadro.tatlin_client import TatlinAccessAPI from cinder.volume.drivers.yadro.tatlin_client import TatlinClientCommon from cinder.volume.drivers.yadro.tatlin_client import TatlinClientV23 from cinder.volume.drivers.yadro.tatlin_client import TatlinClientV25 from cinder.volume.drivers.yadro.tatlin_exception import TatlinAPIException VOL_ID = 'cinder-volume-id' LUN_ID = 75 HOST_ID = 'host-id' HOST_ID_2 = 'host-id-2' HOST_GROUP_ID = 'group-id' HOST_GROUP_NAME = 'cinder-group' HOST_IQN = 'iqn.1994-05.com.redhat:12345' POOL_NAME = 'cinder-pool-name' POOL_ID = 'cinder-pool-id' ALL_HOSTS_RESP = [ { "version": "c7216b2e14c8edc718e1664178f75777", "id": HOST_ID_2, "name": "cinder-host-2", "port_type": "fc", "initiators": ["21:00:34:80:0d:74:17:30", "21:00:34:80:0d:74:17:31"], }, { "version": "216d08e98f8d4a695b6632fc3c79b1cc", "id": HOST_ID, "name": "cinder-host-1", "port_type": "fc", "initiators": ['21:00:00:24:ff:7f:35:b7', '21:00:00:24:ff:7f:35:b6'], }, { "version": "301fc82d355a691248b1e1dd8164f5e5", "id": HOST_ID, "name": "cinder-host-1", "port_type": "iscsi", "initiators": [HOST_IQN], "auth": {"auth_type": "none"}, }, { "version": "401fc82d355a691248b1e1dd8164f5e5", "id": HOST_ID_2, "name": "cinder-host-2", "port_type": "iscsi", "initiators": ["iqn.1994-05.com.redhat:5daf702e9655"], "auth": {"auth_type": "none"}, }, ] RES_MAPPING_RESP = [ { "resource_id": "62bbb941-ba4a-4101-927d-e527ce5ee011", "host_id": "5e37d335-8fff-4aee-840a-34749301a16a", "mapped_lun_id": 1 }, { "resource_id": VOL_ID, "host_id": HOST_ID, "mapped_lun_id": LUN_ID }, { "resource_id": "62bbb941-ba4a-4101-927d-e527ce5ee011", "host_id": "5e37d335-8fff-4aee-840a-34749301a16a", "mapped_lun_id": 1 }, ] RES_MAPPING_RESP2 = [ { "resource_id": "62bbb941-ba4a-4101-927d-e527ce5ee011", "host_id": "5e37d335-8fff-4aee-840a-34749301a16a", "mapped_lun_id": 1 }, { "resource_id": "62bbb941-ba4a-4101-927d-e527ce5ee011", "host_id": "5e37d335-8fff-4aee-840a-34749301a16a", "mapped_lun_id": 1 }, ] POOL_LIST_RESPONCE = [ { "id": POOL_ID, "name": POOL_NAME, "status": "ready" }, { "id": "123", "name": "some-name", "status": "ready" } ] ERROR_VOLUME = [ { "ptyId": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "id": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "name": "cinder-volume-f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "type": "block", "poolId": "92c05782-7529-479f-8db7-b9435e1e9a3d", "size": 16106127360, "maxModifySize": 95330557231104, "status": "error", } ] READY_VOLUME = [ { "ptyId": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "id": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "name": "cinder-volume-f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "type": "block", "poolId": "92c05782-7529-479f-8db7-b9435e1e9a3d", "size": 16106127360, "maxModifySize": 95330557231104, "status": "ready", } ] RESOURCE_INFORMATION = { "ptyId": "62bbb941-ba4a-4101-927d-e527ce5ee011", "id": "62bbb941-ba4a-4101-927d-e527ce5ee011", "name": "res1", "type": "block", "poolId": "c46584c5-3113-4cc7-8a72-f9262f32c508", "size": 1073741824, "maxModifySize": 5761094647808, "status": "ready", "stat": { "used_capacity": 1073741824, "mapped_blocks": 0, "dedup_count": 0, "reduction_ratio": 0 }, "lbaFormat": "4kn", "volume_id": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011", "wwid": "naa.614529011650000c4000800000000004", "lun_id": LUN_ID, "cached": "true", "rCacheMode": "enabled", "wCacheMode": "enabled", "ports": [ { "port": "fc21", "port_status": "healthy", "port_status_desc": "resource is available on all storage controllers", "running": [ "sp-1", "sp-0" ], "wwn": [ "10:00:14:52:90:00:03:91", "10:00:14:52:90:00:03:11" ], "lun": "scsi-lun-fc21-4", "volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011", "lun_index": LUN_ID }, { "port": "fc20", "port_status": "healthy", "port_status_desc": "resource is available on all storage controllers", "running": [ "sp-1", "sp-0" ], "wwn": [ "10:00:14:52:90:00:03:10", "10:00:14:52:90:00:03:90" ], "lun": "scsi-lun-fc20-4", "volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011", "lun_index": LUN_ID } ], "volume_path": "/dev/mapper/dmc-89382c6c-7cf9-4ff8-bdbb-f438d20c960a", "blockSize": "4kn", "replication": { "is_enabled": False } } VOL_PORTS_RESP = [ { "port": "p01", "port_status": "healthy", "running": ["sp-0", "sp-1"], "wwn": ["iqn.2017-01.com.yadro:tatlin:sn.09082200a51002"], "lun_index": LUN_ID, }, { "port": "p11", "port_status": "healthy", "running": ["sp-0", "sp-1"], "wwn": ["iqn.2017-01.com.yadro:tatlin:sn.09082200a51002"], "lun_index": LUN_ID, }, { "port": "p10", "port_status": "healthy", "running": ["sp-0", "sp-1"], "wwn": ["iqn.2017-01.com.yadro:tatlin:sn.09082200a51002"], "lun_index": LUN_ID, }, { "port": "p00", "port_status": "healthy", "running": ["sp-0", "sp-1"], "wwn": ["iqn.2017-01.com.yadro:tatlin:sn.09082200a51002"], "lun_index": LUN_ID }, ] ALL_HOST_GROUP_RESP = [ { "version": "20c28d21549fb7ec5777637f72f50043", "id": HOST_GROUP_ID, "name": HOST_GROUP_NAME, "host_ids": [ HOST_ID, ], "tags": None, "comment": "" } ] class TatlinClientTest(TestCase): @mock.patch.object(TatlinAccessAPI, '_authenticate_access') def setUp(self, auth_access): self.access_api = TatlinAccessAPI('127.0.0.1', 443, 'user', 'passwd', False) self.client = TatlinClientV25(self.access_api, api_retry_count=1, wait_interval=1, wait_retry_count=1) @mock.patch.object(TatlinAccessAPI, '_authenticate_access') @mock.patch.object(TatlinAccessAPI, 'get_tatlin_version') def test_different_client_versions(self, version, auth): version.side_effect = [(2, 2), (2, 3), (2, 4), (2, 5), (3, 0)] args = ['1.2.3.4', 443, 'username', 'password', True, 1, 1, 1] self.assertIsInstance(InitTatlinClient(*args), TatlinClientV23) self.assertIsInstance(InitTatlinClient(*args), TatlinClientV23) self.assertIsInstance(InitTatlinClient(*args), TatlinClientV25) self.assertIsInstance(InitTatlinClient(*args), TatlinClientV25) self.assertIsInstance(InitTatlinClient(*args), TatlinClientV25) @mock.patch.object(requests, 'packages') @mock.patch.object(requests, 'session') def test_authenticate_success(self, session, packages): session().post.return_value = MockResponse({'token': 'ABC'}, codes.ok) TatlinAccessAPI('127.0.0.1', 443, 'user', 'passwd', False) session().post.assert_called_once_with( 'https://127.0.0.1:443/auth/login', data={'user': 'user', 'secret': 'passwd'}, verify=False ) session().headers.update.assert_any_call({'X-Auth-Token': 'ABC'}) TatlinAccessAPI('127.0.0.1', 443, 'user', 'passwd', True) session().headers.update.assert_any_call({'X-Auth-Token': 'ABC'}) @mock.patch.object(requests, 'session') def test_authenticate_fail(self, session): session().post.return_value = MockResponse( {}, codes.unauthorized) self.assertRaises(NotAuthorized, TatlinAccessAPI, '127.0.0.1', 443, 'user', 'passwd', False) @mock.patch.object(TatlinAccessAPI, '_authenticate_access') @mock.patch.object(requests, 'session') def test_send_request(self, session, auth): session().request.side_effect = [ MockResponse({}, codes.ok), MockResponse({}, codes.unauthorized), MockResponse({}, codes.ok)] access_api = TatlinAccessAPI('127.0.0.1', 443, 'user', 'passwd', True) access_api.session = session() access_api.send_request(tatlin_api.ALL_RESOURCES, {}, 'GET') access_api.session.request.assert_called_once_with( 'GET', 'https://127.0.0.1:443/' + tatlin_api.ALL_RESOURCES, json={}, verify=True ) access_api.send_request(tatlin_api.ALL_RESOURCES, {}, 'GET') self.assertEqual(auth.call_count, 2) access_api.session.request.assert_called_with( 'GET', 'https://127.0.0.1:443/' + tatlin_api.ALL_RESOURCES, json={}, verify=True ) @mock.patch.object(TatlinAccessAPI, '_authenticate_access') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_get_tatlin_version(self, send_request, auth): send_request.return_value = MockResponse({'build-version': '2.3.0-44'}, codes.ok) access_api = TatlinAccessAPI('127.0.0.1', 443, 'user', 'passwd', True) self.assertEqual(access_api.get_tatlin_version(), (2, 3)) send_request.assert_called_once() self.assertEqual(access_api.get_tatlin_version(), (2, 3)) send_request.assert_called_once() @mock.patch.object(TatlinClientCommon, '_is_vol_on_host') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_add_volume_to_host(self, send_request, is_on_host): vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011') # Success volume already on host is_on_host.side_effect = [True] self.client.add_vol_to_host(vol.name_id, 10) send_request.assert_not_called() # Success volume added is_on_host.side_effect = [False, True] send_request.side_effect = [(MockResponse({}, codes.ok)), ] self.client.add_vol_to_host(vol.name_id, 10) # Error adding volume to host is_on_host.side_effect = [False] send_request.side_effect = [ TatlinAPIException(codes.internal_server_error, ''), ] with self.assertRaises(TatlinAPIException): self.client.add_vol_to_host(vol.name_id, 10) # Added successfull but not on host is_on_host.side_effect = [False, False] send_request.side_effect = [(MockResponse({}, codes.ok)), ] with self.assertRaises(VolumeBackendAPIException): self.client.add_vol_to_host(vol.name_id, 10) @mock.patch.object(TatlinClientCommon, '_is_vol_on_host') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_remove_volume_to_host(self, send_request, is_on_host): vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011') # Success volume not on host is_on_host.side_effect = [False] self.client.remove_vol_from_host(vol.name_id, 10) send_request.assert_not_called() # Success volume removed is_on_host.side_effect = [True, False] send_request.side_effect = [(MockResponse({}, codes.ok)), ] self.client.remove_vol_from_host(vol.name_id, 10) # Remove from host rise an error is_on_host.side_effect = [True, False] send_request.side_effect = [ TatlinAPIException(codes.internal_server_error, ''), ] with self.assertRaises(TatlinAPIException): self.client.remove_vol_from_host(vol.name_id, 10) # Removed successfull but still on host is_on_host.side_effect = [True, True] send_request.side_effect = [(MockResponse({}, codes.ok)), ] with self.assertRaises(VolumeBackendAPIException): self.client.remove_vol_from_host(vol.name_id, 10) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_is_volume_exist_success(self, send_request): send_request.side_effect = [ (MockResponse(RESOURCE_INFORMATION, codes.ok)), ] vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011') result = self.client.is_volume_exists(vol.name_id) self.assertTrue(result) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_is_volume_exist_not_found(self, send_request): send_request.return_value = MockResponse( RESOURCE_INFORMATION, codes.not_found) vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011') result = self.client.is_volume_exists(vol.name_id) self.assertFalse(result) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_is_volume_exist_unknown_error(self, send_request): send_request.return_value = MockResponse( {}, codes.internal_server_error) vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011') with self.assertRaises(VolumeBackendAPIException): self.client.is_volume_exists(vol.name_id) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_delete_volume(self, send_request): vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011') # Success delete send_request.side_effect = [(MockResponse({}, codes.ok)), ] self.client.delete_volume(vol.name_id) # Volume does't exist send_request.side_effect = [(MockResponse({}, 404)), ] self.client.delete_volume(vol.name_id) # Volume delete error send_request.side_effect = [ (MockResponse({}, codes.internal_server_error)), ] with self.assertRaises(TatlinAPIException): self.client.delete_volume(vol.name_id) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_extend_volume(self, send_request): vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011') # Success delete send_request.side_effect = [(MockResponse({}, codes.ok)), ] self.client.extend_volume(vol.name_id, 20000) # Error send_request.side_effect = [ (MockResponse({}, codes.internal_server_error)), ] with self.assertRaises(VolumeBackendAPIException): self.client.extend_volume(vol.name_id, 20000) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_is_volume_ready(self, send_request): send_request.side_effect = [(MockResponse(READY_VOLUME, codes.ok)), ] self.assertTrue(self.client.is_volume_ready(VOLUME_NAME)) send_request.side_effect = [ (MockResponse(ERROR_VOLUME, codes.ok)) ] self.assertFalse(self.client.is_volume_ready(VOLUME_NAME)) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_get_host_group_id_success(self, send_request): send_request.return_value = MockResponse( ALL_HOST_GROUP_RESP, codes.ok) self.assertEqual(self.client.get_host_group_id(HOST_GROUP_NAME), HOST_GROUP_ID) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_get_volume_ports(self, send_request): send_request.return_value = MockResponse( VOL_PORTS_RESP, requests.codes.ok) self.assertEqual(VOL_PORTS_RESP, self.client.get_volume_ports(VOL_ID)) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_get_volume_ports_negative(self, send_request): send_request.return_value = MockResponse( {}, requests.codes.internal_server_error) self.assertRaises(VolumeBackendAPIException, self.client.get_volume_ports, VOL_ID) @mock.patch.object(TatlinClientCommon, 'get_volume_ports') def test_get_resource_ports_array_empty(self, vol_ports): vol_ports.return_value = [] self.assertListEqual([], self.client.get_resource_ports_array(VOL_ID)) @mock.patch.object(TatlinClientCommon, 'get_volume_ports') def test_get_resource_ports_array(self, vol_ports): vol_ports.return_value = VOL_PORTS_RESP self.assertListEqual( ['p00', 'p01', 'p10', 'p11'], sorted(self.client.get_resource_ports_array(VOL_ID))) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_get_resource_mapping_negative(self, send_request): send_request.return_value = MockResponse( {}, codes.internal_server_error) self.assertRaises(VolumeBackendAPIException, self.client.get_resource_mapping) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_get_pool_id_by_name(self, send_request, *args): send_request.return_value = MockResponse(POOL_LIST_RESPONCE, codes.ok) self.assertEqual(self.client.get_pool_id_by_name(POOL_NAME), POOL_ID) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_get_all_hosts(self, send_request): send_request.return_value = MockResponse({}, codes.ok) self.client.get_all_hosts() @mock.patch.object(TatlinAccessAPI, 'send_request') def test_get_all_hosts_negative(self, send_request): send_request.return_value = MockResponse( {}, codes.internal_server_error) self.assertRaises(VolumeBackendAPIException, self.client.get_all_hosts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/yadro/test_tatlin_common.py0000664000175000017500000004427400000000000027046 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import MagicMock from unittest.mock import Mock from unittest import TestCase from cinder.cmd import manage as cinder_manage from cinder.exception import ManageExistingInvalidReference from cinder.exception import VolumeBackendAPIException from cinder.tests.unit.fake_constants import VOLUME_NAME from cinder.volume import configuration from cinder.volume.drivers.yadro.tatlin_client import TatlinAccessAPI from cinder.volume.drivers.yadro.tatlin_client import TatlinClientCommon from cinder.volume.drivers.yadro.tatlin_common import tatlin_opts from cinder.volume.drivers.yadro.tatlin_common import TatlinCommonVolumeDriver from cinder.volume.drivers.yadro.tatlin_exception import TatlinAPIException from cinder.volume.drivers.yadro.tatlin_utils import TatlinVolumeConnections OSMGR_ISCSI_PORTS = [ { "id": "ip-sp-1-98039b04091a", "meta": { "tatlin-node": "sp-1", "type": "ip", "port-type": "active" }, "params": { "dhcp": False, "ifname": "p30", "physical-port": "p30", "ipaddress": "172.20.101.65", "netmask": "24", "mtu": "1500", "gateway": "172.20.101.1", "roles": "", "iflabel": "", "wwpn": "" } }, { "id": "ip-sp-0-b8599f1caf1b", "meta": { "tatlin-node": "sp-0", "type": "ip", "port-type": "active" }, "params": { "dhcp": False, "ifname": "p31", "physical-port": "p31", "ipaddress": "172.20.101.66", "netmask": "24", "mtu": "1500", "gateway": "172.20.101.1", "roles": "", "iflabel": "", "wwpn": "" } }, { "id": "ip-sp-1-98039b04091b", "meta": { "tatlin-node": "sp-1", "type": "ip", "port-type": "active" }, "params": { "dhcp": False, "ifname": "p31", "physical-port": "p31", "ipaddress": "172.20.101.67", "netmask": "24", "mtu": "1500", "gateway": "172.20.101.1", "roles": "", "iflabel": "", "wwpn": "" } }, { "id": "ip-sp-0-b8599f1caf1a", "meta": { "tatlin-node": "sp-0", "type": "ip", "port-type": "active" }, "params": { "dhcp": False, "ifname": "p30", "physical-port": "p30", "ipaddress": "172.20.101.64", "netmask": "24", "mtu": "1500", "gateway": "172.20.101.1", "roles": "", "iflabel": "", "wwpn": "" } }, ] ISCSI_PORT_PORTALS = { 'p30': ['172.20.101.65:3260', '172.20.101.64:3260'], 'p31': ['172.20.101.66:3260', '172.20.101.67:3260'] } RES_MAPPING_RESP = [ { "resource_id": "62bbb941-ba4a-4101-927d-e527ce5ee011", "host_id": "5e37d335-8fff-4aee-840a-34749301a16a", "mapped_lun_id": 1 } ] POOL_LIST_RESPONCE = [ { "id": "7e259486-deb8-4d11-8cb0-e2c5874aaa5e", "name": "cinder-pool", "status": "ready" } ] OK_POOL_ID = '7e259486-deb8-4d11-8cb0-e2c5874aaa5e' WRONG_POOL_ID = 'wrong-id' ERROR_VOLUME = [ { "ptyId": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "id": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "name": "cinder-volume-f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "type": "block", "poolId": "92c05782-7529-479f-8db7-b9435e1e9a3d", "size": 16106127360, "maxModifySize": 95330557231104, "status": "error", } ] READY_VOLUME = [ { "ptyId": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "id": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "name": "cinder-volume-f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "type": "block", "poolId": "92c05782-7529-479f-8db7-b9435e1e9a3d", "size": 16106127360, "maxModifySize": 95330557231104, "status": "ready", } ] ONLINE_VOLUME = [ { "ptyId": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "id": "f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "name": "cinder-volume-f28ee814-22ed-4bb0-8b6a-f7ce9075034a", "type": "block", "poolId": "92c05782-7529-479f-8db7-b9435e1e9a3d", "size": 16106127360, "maxModifySize": 95330557231104, "status": "online", } ] RESOURCE_INFORMATION = { "ptyId": "62bbb941-ba4a-4101-927d-e527ce5ee011", "id": "62bbb941-ba4a-4101-927d-e527ce5ee011", "name": "res1", "type": "block", "poolId": "c46584c5-3113-4cc7-8a72-f9262f32c508", "size": 1073741824, "maxModifySize": 5761094647808, "status": "ready", "stat": { "used_capacity": 1073741824, "mapped_blocks": 0, "dedup_count": 0, "reduction_ratio": 0 }, "lbaFormat": "4kn", "volume_id": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011", "wwid": "naa.614529011650000c4000800000000004", "lun_id": "4", "cached": "true", "rCacheMode": "enabled", "wCacheMode": "enabled", "ports": [ { "port": "fc21", "port_status": "healthy", "port_status_desc": "resource is available on all storage controllers", "running": [ "sp-1", "sp-0" ], "wwn": [ "10:00:14:52:90:00:03:91", "10:00:14:52:90:00:03:11" ], "lun": "scsi-lun-fc21-4", "volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011", "lun_index": "4" }, { "port": "fc20", "port_status": "healthy", "port_status_desc": "resource is available on all storage controllers", "running": [ "sp-1", "sp-0" ], "wwn": [ "10:00:14:52:90:00:03:10", "10:00:14:52:90:00:03:90" ], "lun": "scsi-lun-fc20-4", "volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011", "lun_index": "4" } ], "volume_path": "/dev/mapper/dmc-89382c6c-7cf9-4ff8-bdbb-f438d20c960a", "blockSize": "4kn", "replication": { "is_enabled": False } } POOL_NAME = 'cinder-pool' class MockResponse: def __init__(self, json_data, status_code): self.json_data = json_data self.status_code = status_code def json(self): return self.json_data class DummyVolume(object): def __init__(self, volid, volsize=1): self.id = volid self._name_id = None self.size = volsize self.status = None self.__volume_type_id = 1 self.attach_status = None self.volume_attachment = None self.provider_location = None self.name = None self.metadata = {} @property def name_id(self): return self.id if not self._name_id else self._name_id @property def name(self): return self.name_id @property def volume_type_id(self): return self.__volume_type_id @name_id.setter def name_id(self, value): self._name_id = value @name.setter def name(self, value): self._name_id = value @volume_type_id.setter def volume_type_id(self, value): self.__volume_type_id = value def get_fake_tatlin_config(): config = configuration.Configuration( tatlin_opts, configuration.SHARED_CONF_GROUP) config.san_ip = '127.0.0.1' config.san_password = 'pwd' config.san_login = 'admin' config.pool_name = POOL_NAME config.host_group = 'cinder-group' config.tat_api_retry_count = 1 config.wait_interval = 1 config.wait_retry_count = 3 config.chap_username = 'chap_user' config.chap_password = 'chap_passwd' config.state_path = '/tmp' return config class TatlinCommonVolumeDriverTest(TestCase): @mock.patch.object(TatlinVolumeConnections, 'create_store') @mock.patch.object(TatlinAccessAPI, '_authenticate_access') def setUp(self, auth_access, create_store): access_api = TatlinAccessAPI('127.0.0.1', '443', 'user', 'passwd', False) access_api._authenticate_access = MagicMock() self.client = TatlinClientCommon(access_api, api_retry_count=1, wait_interval=1, wait_retry_count=3) self.driver = TatlinCommonVolumeDriver( configuration=get_fake_tatlin_config()) self.driver._get_tatlin_client = MagicMock() self.driver._get_tatlin_client.return_value = self.client self.driver.do_setup(None) @mock.patch.object(TatlinClientCommon, 'delete_volume') @mock.patch.object(TatlinClientCommon, 'is_volume_exists') def test_delete_volume_ok(self, is_volume_exist, delete_volume): cinder_manage.cfg.CONF.set_override('lock_path', '/tmp/locks', group='oslo_concurrency') is_volume_exist.side_effect = [True, False, False] self.driver.delete_volume(DummyVolume(VOLUME_NAME)) @mock.patch.object(TatlinClientCommon, 'delete_volume') @mock.patch.object(TatlinClientCommon, 'is_volume_exists') def test_delete_volume_ok_404(self, is_volume_exist, delete_volume): cinder_manage.cfg.CONF.set_override('lock_path', '/tmp/locks', group='oslo_concurrency') is_volume_exist.side_effect = [False] self.driver.delete_volume(DummyVolume(VOLUME_NAME)) @mock.patch.object(TatlinClientCommon, 'delete_volume') @mock.patch.object(TatlinClientCommon, 'is_volume_exists') def test_delete_volume_error_500(self, is_volume_exist, delete_volume): cinder_manage.cfg.CONF.set_override('lock_path', '/tmp/locks', group='oslo_concurrency') is_volume_exist.return_value = True delete_volume.side_effect = TatlinAPIException(500, 'ERROR') with self.assertRaises(VolumeBackendAPIException): self.driver.delete_volume(DummyVolume(VOLUME_NAME)) @mock.patch.object(TatlinCommonVolumeDriver, '_update_qos') @mock.patch.object(TatlinClientCommon, 'is_volume_ready') @mock.patch.object(TatlinClientCommon, 'extend_volume') @mock.patch.object(TatlinClientCommon, 'is_volume_exists') def test_extend_volume_ok(self, is_volume_exist, extend_volume, is_volume_ready, update_qos): cinder_manage.cfg.CONF.set_override('lock_path', '/tmp/locks', group='oslo_concurrency') is_volume_ready.return_value = True is_volume_exist.return_value = True self.driver.extend_volume(DummyVolume(VOLUME_NAME), 10) @mock.patch('time.sleep') @mock.patch.object(TatlinCommonVolumeDriver, '_update_qos') @mock.patch.object(TatlinClientCommon, 'is_volume_ready') @mock.patch.object(TatlinClientCommon, 'extend_volume') @mock.patch.object(TatlinClientCommon, 'is_volume_exists') def test_extend_volume_error_not_ready(self, is_volume_exist, extend_volume, is_volume_ready, update_qos, sleeper): cinder_manage.cfg.CONF.set_override('lock_path', '/tmp/locks', group='oslo_concurrency') is_volume_ready.return_value = False is_volume_exist.return_value = True with self.assertRaises(VolumeBackendAPIException): self.driver.extend_volume(DummyVolume(VOLUME_NAME), 10) @mock.patch.object(TatlinClientCommon, 'is_volume_ready', return_value=True) def test_wait_volume_ready_success(self, is_ready): self.driver.wait_volume_ready(DummyVolume('cinder_volume')) @mock.patch.object(TatlinCommonVolumeDriver, '_update_qos') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_succeess_manage_existing(self, sendMock, qosMock): sendMock.side_effect = [ (MockResponse([{'id': '1', 'poolId': OK_POOL_ID}], 200)), (MockResponse(POOL_LIST_RESPONCE, 200)) ] self.driver.manage_existing(DummyVolume(VOLUME_NAME), { 'source-name': 'existing-resource' }) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_fail_manage_existing_volume_not_found(self, sendMock): self.driver.tatlin_api._send_request = Mock() sendMock.side_effect = [ (MockResponse([{}], 404)), ] with self.assertRaises(ManageExistingInvalidReference): self.driver.manage_existing(DummyVolume('new-vol-id'), { 'source-name': 'existing-resource' }) self.driver.tatlin_api.get_volume_info.assert_called_once() self.driver.tatlin_api.get_pool_id_by_name.assert_not_called() @mock.patch.object(TatlinCommonVolumeDriver, '_update_qos') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_fail_manage_existing_wrong_pool(self, sendMock, qosMock): sendMock.side_effect = [ (MockResponse([{'id': '1', 'poolId': WRONG_POOL_ID}], 200)), (MockResponse(POOL_LIST_RESPONCE, 200)) ] with self.assertRaises(ManageExistingInvalidReference): self.driver.manage_existing(DummyVolume('new-vol-id'), { 'source-name': 'existing-resource' }) self.driver.tatlin_api.get_volume_info.assert_called_once() self.driver.tatlin_api.get_pool_id_by_name.assert_called_once() @mock.patch.object(TatlinClientCommon, 'get_resource_count') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_success_create_volume(self, send_requst, object_count): self.driver._stats['overall_resource_count'] = 1 object_count.side_effect = [(1, 1)] send_requst.side_effect = [ (MockResponse(POOL_LIST_RESPONCE, 200)), # Get pool id (MockResponse({}, 200)), # Create volume (MockResponse(READY_VOLUME, 200)), # Is volume ready (MockResponse(READY_VOLUME, 200)) # Is volume ready ] self.driver._update_qos = Mock() self.driver.create_volume(DummyVolume(VOLUME_NAME)) @mock.patch.object(TatlinClientCommon, 'get_resource_count') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_fail_create_volume_400(self, send_request, object_count): self.driver._stats['overall_resource_count'] = 1 object_count.side_effect = [(1, 1)] send_request.side_effect = [ (MockResponse(POOL_LIST_RESPONCE, 200)), (MockResponse({}, 500)), (MockResponse({}, 400)) ] with self.assertRaises(VolumeBackendAPIException): self.driver.create_volume(DummyVolume(VOLUME_NAME)) self.driver.tatlin_api.create_volume.assert_called_once() @mock.patch('time.sleep') @mock.patch.object(TatlinClientCommon, 'get_resource_count') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_fail_volume_not_ready_create_volume(self, sendMock, volume_count, sleeper): self.driver._stats['overall_resource_count'] = 1 volume_count.side_effect = [(1, 1)] sendMock.side_effect = [ (MockResponse(POOL_LIST_RESPONCE, 200)), (MockResponse({}, 200)), (MockResponse(ERROR_VOLUME, 200)), (MockResponse(ERROR_VOLUME, 200)), (MockResponse(ERROR_VOLUME, 200)), ] with self.assertRaises(VolumeBackendAPIException): self.driver.create_volume(DummyVolume(VOLUME_NAME)) @mock.patch.object(TatlinCommonVolumeDriver, '_get_ports_portals') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_fail_create_export(self, sendMock, portsMock): sendMock.side_effect = [ (MockResponse(OSMGR_ISCSI_PORTS, 200)), ] portsMock.side_effect = [ ISCSI_PORT_PORTALS ] self.driver._is_all_ports_assigned = Mock(return_value=True) with self.assertRaises(NotImplementedError): self.driver.create_export(None, DummyVolume(VOLUME_NAME), None) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_find_mapped_lun(self, sendMock): sendMock.side_effect = [ (MockResponse(RES_MAPPING_RESP, 200)), ] self.driver.find_current_host = Mock( return_value='5e37d335-8fff-4aee-840a-34749301a16a') self.driver._find_mapped_lun( '62bbb941-ba4a-4101-927d-e527ce5ee011', '') @mock.patch.object(TatlinCommonVolumeDriver, '_update_qos') @mock.patch.object(TatlinClientCommon, 'add_vol_to_host') @mock.patch.object(TatlinClientCommon, 'is_volume_exists', return_value=True) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_add_volume_to_host(self, *args): vol = DummyVolume('62bbb941-ba4a-4101-927d-e527ce5ee011') self.driver.add_volume_to_host( vol, '5e37d335-8fff-4aee-840a-34749301a16a' ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/yadro/test_tatlin_fc.py0000664000175000017500000003564200000000000026145 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from unittest import mock from cinder import exception from cinder.tests.unit.volume.drivers.yadro import test_tatlin_client as tc from cinder.volume import configuration from cinder.volume.drivers.yadro import tatlin_client from cinder.volume.drivers.yadro import tatlin_common from cinder.volume.drivers.yadro import tatlin_fc from cinder.volume.drivers.yadro import tatlin_utils FC_PORTS_RESP = [ { "id": "fc-sp-0-1000145290000320", "meta": {"tatlin-node": "sp-0", "type": "fc", "port-type": "active"}, "params": {"ifname": "fc40", "wwpn": "10:00:14:52:90:00:03:20"} }, { "id": "fc-sp-0-1000145290000321", "meta": {"tatlin-node": "sp-0", "type": "fc", "port-type": "active"}, "params": {"ifname": "fc41", "wwpn": "10:00:14:52:90:00:03:21"} }, { "id": "fc-sp-0-1000145290000310", "meta": {"tatlin-node": "sp-0", "type": "fc", "port-type": "active"}, "params": {"ifname": "fc20", "wwpn": "10:00:14:52:90:00:03:10"} }, { "id": "fc-sp-0-1000145290000311", "meta": {"tatlin-node": "sp-0", "type": "fc", "port-type": "active"}, "params": {"ifname": "fc21", "wwpn": "10:00:14:52:90:00:03:11"} }, { "id": "fc-sp-1-1000145290000390", "meta": {"tatlin-node": "sp-1", "type": "fc", "port-type": "active"}, "params": {"ifname": "fc20", "wwpn": "10:00:14:52:90:00:03:90"} }, { "id": "fc-sp-1-1000145290000391", "meta": {"tatlin-node": "sp-1", "type": "fc", "port-type": "active"}, "params": {"ifname": "fc21", "wwpn": "10:00:14:52:90:00:03:91"} }, { "id": "fc-sp-1-10001452900003a0", "meta": {"tatlin-node": "sp-1", "type": "fc", "port-type": "active"}, "params": {"ifname": "fc40", "wwpn": "10:00:14:52:90:00:03:a0"} }, { "id": "fc-sp-1-10001452900003a1", "meta": {"tatlin-node": "sp-1", "type": "fc", "port-type": "active"}, "params": {"ifname": "fc41", "wwpn": "10:00:14:52:90:00:03:a1"} }, ] FC_PORTS_PORTALS = { 'fc21': ['10:00:14:52:90:00:03:11', '10:00:14:52:90:00:03:91'], 'fc20': ['10:00:14:52:90:00:03:10', '10:00:14:52:90:00:03:90'], } FC_TARGET_WWNS = [ '1000145290000390', '1000145290000311', '1000145290000310', '1000145290000391', ] FC_VOL_PORTS_RESP = [ { "port": "fc21", "port_status": "healthy", "running": ["sp-0", "sp-1"], "wwn": ["10:00:14:52:90:00:03:11", "10:00:14:52:90:00:03:91"], "lun_index": tc.LUN_ID, }, { "port": "fc20", "port_status": "healthy", "running": ["sp-0", "sp-1"], "wwn": ["10:00:14:52:90:00:03:10", "10:00:14:52:90:00:03:90"], "lun_index": tc.LUN_ID, }, { "port": "fc40", "port_status": "healthy", "running": ["sp-0", "sp-1"], "wwn": ["10:00:14:52:90:00:03:09", "10:00:14:52:90:00:03:89"], "lun_index": tc.LUN_ID, }, ] HOST_WWNS = [ '21000024ff7f35b7', '21000024ff7f35b6', ] INITIATOR_TARGET_MAP = { '21000024ff7f35b7': FC_TARGET_WWNS, '21000024ff7f35b6': FC_TARGET_WWNS, } FC_CONNECTOR = {'wwpns': HOST_WWNS, 'host': 'myhost'} FC_CONNECTOR_2 = {'wwpns': ['123', '456'], 'host': 'myhost'} VOLUME_DATA = { 'discard': False, 'target_discovered': True, 'target_lun': tc.LUN_ID, 'target_wwn': [ '10:00:14:52:90:00:03:11', '10:00:14:52:90:00:03:91', '10:00:14:52:90:00:03:10', '10:00:14:52:90:00:03:90', ], 'initiator_target_map': INITIATOR_TARGET_MAP, } def get_fake_tatlin_config(): config = configuration.Configuration( tatlin_common.tatlin_opts, configuration.SHARED_CONF_GROUP) config.san_ip = '127.0.0.1' config.san_password = 'pwd' config.san_login = 'admin' config.pool_name = tc.POOL_NAME config.host_group = 'cinder-group' config.tat_api_retry_count = 1 config.wait_interval = 1 config.wait_retry_count = 3 config.chap_username = 'chap_user' config.chap_password = 'chap_passwd' config.state_path = '/tmp' config.export_ports = 'fc20,fc21' return config class TatlinFCVolumeDriverTest(unittest.TestCase): @mock.patch.object(tatlin_utils.TatlinVolumeConnections, 'create_store') @mock.patch.object(tatlin_client.TatlinAccessAPI, '_authenticate_access') def setUp(self, auth_access, create_store): access_api = tatlin_client.TatlinAccessAPI( '127.0.0.1', '443', 'user', 'passwd', False) access_api._authenticate_access = mock.MagicMock() self.client = tatlin_client.TatlinClientCommon( access_api, api_retry_count=1, wait_interval=1, wait_retry_count=1) mock.patch.object(tatlin_client.TatlinAccessAPI, '_authenticate_access') self.driver = tatlin_fc.TatlinFCVolumeDriver( configuration=get_fake_tatlin_config()) self.driver._get_tatlin_client = mock.MagicMock() self.driver._get_tatlin_client.return_value = self.client self.driver.do_setup(None) @mock.patch.object(tatlin_fc.fczm_utils, 'add_fc_zone') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, '_is_cinder_host_connection') @mock.patch.object(tatlin_fc.TatlinFCVolumeDriver, '_create_volume_data') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, '_find_mapped_lun') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, 'add_volume_to_host') @mock.patch.object(tatlin_fc.TatlinFCVolumeDriver, 'find_current_host') def test_initialize_connection(self, find_current_host, add_volume_to_host, find_mapped_lun, create_volume_data, is_cinder_connection, add_fc_zone): find_current_host.return_value = tc.HOST_ID find_mapped_lun.return_value = tc.LUN_ID is_cinder_connection.return_value = False create_volume_data.return_value = VOLUME_DATA volume = tc.DummyVolume(tc.VOL_ID) connector = FC_CONNECTOR data = self.driver.initialize_connection(volume, FC_CONNECTOR) self.assertDictEqual( data, {'driver_volume_type': 'fibre_channel', 'data': VOLUME_DATA} ) find_current_host.assert_called_once() add_volume_to_host.assert_called_once_with(volume, tc.HOST_ID) is_cinder_connection.assert_called_once_with(connector) create_volume_data.assert_called_once_with(volume, connector) add_fc_zone.assert_called_once_with(data) @mock.patch.object(tatlin_fc.TatlinFCVolumeDriver, '_create_volume_data') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, 'add_volume_to_host') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, '_find_mapped_lun') @mock.patch.object(tatlin_fc.TatlinFCVolumeDriver, 'find_current_host') @mock.patch.object(tatlin_utils.TatlinVolumeConnections, 'increment') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, '_is_cinder_host_connection') def test_initialize_connection_cinder_attachement(self, is_cinder_connection, increment, *args): is_cinder_connection.return_value = True volume = tc.DummyVolume(tc.VOL_ID) self.driver.initialize_connection(volume, FC_CONNECTOR) is_cinder_connection.assert_called_once_with(FC_CONNECTOR) increment.assert_called_once_with(tc.VOL_ID) @mock.patch.object(tatlin_client.TatlinClientCommon, 'get_port_portal') def test_get_ports_portals(self, get_port_portal): get_port_portal.return_value = FC_PORTS_RESP pp = self.driver._get_ports_portals() self.assertDictEqual(pp, FC_PORTS_PORTALS) @mock.patch.object(tatlin_client.TatlinClientCommon, 'get_all_hosts') def test_find_current_host(self, get_all_hosts): get_all_hosts.return_value = tc.ALL_HOSTS_RESP host_id = self.driver.find_current_host(FC_CONNECTOR) self.assertEqual(host_id, tc.HOST_ID) @mock.patch.object(tatlin_client.TatlinClientCommon, 'get_all_hosts') def test_find_current_host_not_found(self, get_all_hosts): get_all_hosts.return_value = tc.ALL_HOSTS_RESP self.assertRaises(exception.VolumeBackendAPIException, self.driver.find_current_host, FC_CONNECTOR_2) @mock.patch.object(tatlin_fc.TatlinFCVolumeDriver, '_build_initiator_target_map') @mock.patch.object(tatlin_fc.TatlinFCVolumeDriver, '_get_ports_portals') @mock.patch.object(tatlin_client.TatlinClientCommon, 'get_volume_ports') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, '_find_mapped_lun') def test_create_volume_data(self, find_lun, volume_ports, ports_portals, build_map): find_lun.return_value = tc.LUN_ID volume_ports.return_value = FC_VOL_PORTS_RESP ports_portals.return_value = FC_PORTS_PORTALS build_map.return_value = INITIATOR_TARGET_MAP volume = tc.DummyVolume(tc.VOL_ID) connector = FC_CONNECTOR data = self.driver._create_volume_data(volume, connector) self.assertEqual(data['target_lun'], tc.LUN_ID) self.assertEqual(sorted(data['target_wwn']), sorted(FC_TARGET_WWNS)) self.assertDictEqual(data['initiator_target_map'], INITIATOR_TARGET_MAP) @mock.patch.object(tatlin_fc.fczm_utils, 'remove_fc_zone') @mock.patch.object(tatlin_client.TatlinClientCommon, 'get_resource_mapping') @mock.patch.object(tatlin_fc.TatlinFCVolumeDriver, '_create_volume_data') @mock.patch.object(tatlin_fc.TatlinFCVolumeDriver, 'find_current_host') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, '_is_cinder_host_connection') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, 'remove_volume_from_host') def test_terminate_connection(self, remove_host, is_cinder, find_host, create_data, resource_mapping, remove_fc_zone): is_cinder.return_value = True find_host.return_value = tc.HOST_ID resource_mapping.return_value = tc.RES_MAPPING_RESP volume = tc.DummyVolume(tc.VOL_ID) connector = FC_CONNECTOR self.driver.terminate_connection(volume, connector) remove_host.assert_called_once_with(volume, tc.HOST_ID) remove_fc_zone.assert_not_called() @mock.patch.object(tatlin_fc.fczm_utils, 'remove_fc_zone') @mock.patch.object(tatlin_client.TatlinClientCommon, 'get_resource_mapping') @mock.patch.object(tatlin_fc.TatlinFCVolumeDriver, '_create_volume_data') @mock.patch.object(tatlin_fc.TatlinFCVolumeDriver, 'find_current_host') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, '_is_cinder_host_connection') @mock.patch.object(tatlin_common.TatlinCommonVolumeDriver, 'remove_volume_from_host') def test_terminate_connection_with_zone_removal(self, remove_host, is_cinder, find_host, create_data, resource_mapping, remove_fc_zone): is_cinder.return_value = True find_host.return_value = tc.HOST_ID_2 resource_mapping.side_effect = [ tc.RES_MAPPING_RESP, tc.RES_MAPPING_RESP2, ] create_data.return_value = VOLUME_DATA volume = tc.DummyVolume(tc.VOL_ID) connector = FC_CONNECTOR self.driver.terminate_connection(volume, connector) remove_host.assert_called_once_with(volume, tc.HOST_ID_2) remove_fc_zone.assert_called_once_with({ 'driver_volume_type': 'fibre_channel', 'data': VOLUME_DATA, }) def test_build_initiator_target_map(self): self.driver._lookup_service = None connector = FC_CONNECTOR targets = FC_TARGET_WWNS itmap = self.driver._build_initiator_target_map(targets, connector) self.assertListEqual(sorted(itmap.keys()), sorted(INITIATOR_TARGET_MAP.keys())) for initiator in itmap: self.assertListEqual(sorted(itmap[initiator]), sorted(INITIATOR_TARGET_MAP[initiator])) def test_build_initiator_target_map_with_lookup(self): lookup_service = mock.MagicMock() lookup_service.get_device_mapping_from_network.return_value = { 'san-1': { 'initiator_port_wwn_list': HOST_WWNS, 'target_port_wwn_list': FC_TARGET_WWNS, }, } self.driver._lookup_service = lookup_service connector = FC_CONNECTOR targets = FC_TARGET_WWNS itmap = self.driver._build_initiator_target_map(targets, connector) self.assertListEqual(sorted(itmap.keys()), sorted(INITIATOR_TARGET_MAP.keys())) for initiator in itmap: self.assertListEqual(sorted(itmap[initiator]), sorted(INITIATOR_TARGET_MAP[initiator])) lookup_service.get_device_mapping_from_network.assert_called_once_with( connector['wwpns'], targets) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/yadro/test_tatlin_iscsi.py0000664000175000017500000002423000000000000026656 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from unittest.mock import MagicMock from unittest.mock import Mock from unittest import TestCase from cinder.tests.unit.volume.drivers.yadro.test_tatlin_common import \ MockResponse from cinder.volume import configuration from cinder.volume.drivers.yadro.tatlin_client import TatlinAccessAPI from cinder.volume.drivers.yadro.tatlin_client import TatlinClientCommon from cinder.volume.drivers.yadro.tatlin_common import tatlin_opts from cinder.volume.drivers.yadro.tatlin_common import TatlinCommonVolumeDriver from cinder.volume.drivers.yadro.tatlin_iscsi import TatlinISCSIVolumeDriver from cinder.volume.drivers.yadro.tatlin_utils import TatlinVolumeConnections OSMGR_ISCSI_PORTS = [ { "id": "ip-sp-1-98039b04091a", "meta": { "tatlin-node": "sp-1", "type": "ip", "port-type": "active" }, "params": { "dhcp": False, "ifname": "p30", "physical-port": "p30", "ipaddress": "172.20.101.65", "netmask": "24", "mtu": "1500", "gateway": "172.20.101.1", "roles": "", "iflabel": "", "wwpn": "" } }, { "id": "ip-sp-0-b8599f1caf1b", "meta": { "tatlin-node": "sp-0", "type": "ip", "port-type": "active" }, "params": { "dhcp": False, "ifname": "p31", "physical-port": "p31", "ipaddress": "172.20.101.66", "netmask": "24", "mtu": "1500", "gateway": "172.20.101.1", "roles": "", "iflabel": "", "wwpn": "" } }, { "id": "ip-sp-1-98039b04091b", "meta": { "tatlin-node": "sp-1", "type": "ip", "port-type": "active" }, "params": { "dhcp": False, "ifname": "p31", "physical-port": "p31", "ipaddress": "172.20.101.67", "netmask": "24", "mtu": "1500", "gateway": "172.20.101.1", "roles": "", "iflabel": "", "wwpn": "" } }, { "id": "ip-sp-0-b8599f1caf1a", "meta": { "tatlin-node": "sp-0", "type": "ip", "port-type": "active" }, "params": { "dhcp": False, "ifname": "p30", "physical-port": "p30", "ipaddress": "172.20.101.64", "netmask": "24", "mtu": "1500", "gateway": "172.20.101.1", "roles": "", "iflabel": "", "wwpn": "" } }, ] ISCSI_PORT_PORTALS = { 'p30': ['172.20.101.65:3260', '172.20.101.64:3260'], 'p31': ['172.20.101.66:3260', '172.20.101.67:3260'] } RES_PORTS_RESP = [ { "port": "fc20", "port_status": "healthy", "port_status_desc": "resource is available", "running": [ "sp-0", "sp-1" ], "wwn": [ "10:00:14:52:90:00:03:10", "10:00:14:52:90:00:03:90" ], "lun": "scsi-lun-fc20-5", "volume": "pty-vol-0d9627cb-c52e-49f1-878c-57c9bc3010c9", "lun_index": "5" } ] ALL_HOSTS_RESP = [ { "version": "d6a2d310d9adb16f0d24d5352b5c4837", "id": "5e37d335-8fff-4aee-840a-34749301a16a", "name": "victoria-fc", "port_type": "fc", "initiators": [ "21:00:34:80:0d:6b:aa:e3", "21:00:34:80:0d:6b:aa:e2" ], "tags": [], "comment": "", "auth": {} } ] RES_MAPPING_RESP = [ { "resource_id": "62bbb941-ba4a-4101-927d-e527ce5ee011", "host_id": "5e37d335-8fff-4aee-840a-34749301a16a", "mapped_lun_id": 1 } ] RESOURCE_INFORMATION = { "ptyId": "62bbb941-ba4a-4101-927d-e527ce5ee011", "id": "62bbb941-ba4a-4101-927d-e527ce5ee011", "name": "res1", "type": "block", "poolId": "c46584c5-3113-4cc7-8a72-f9262f32c508", "size": 1073741824, "maxModifySize": 5761094647808, "status": "ready", "stat": { "used_capacity": 1073741824, "mapped_blocks": 0, "dedup_count": 0, "reduction_ratio": 0 }, "lbaFormat": "4kn", "volume_id": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011", "wwid": "naa.614529011650000c4000800000000004", "lun_id": "4", "cached": "true", "rCacheMode": "enabled", "wCacheMode": "enabled", "ports": [ { "port": "fc21", "port_status": "healthy", "port_status_desc": "resource is available on all storage controllers", "running": [ "sp-1", "sp-0" ], "wwn": [ "10:00:14:52:90:00:03:91", "10:00:14:52:90:00:03:11" ], "lun": "scsi-lun-fc21-4", "volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011", "lun_index": "4" }, { "port": "fc20", "port_status": "healthy", "port_status_desc": "resource is available on all storage controllers", "running": [ "sp-1", "sp-0" ], "wwn": [ "10:00:14:52:90:00:03:10", "10:00:14:52:90:00:03:90" ], "lun": "scsi-lun-fc20-4", "volume": "pty-vol-62bbb941-ba4a-4101-927d-e527ce5ee011", "lun_index": "4" } ], "volume_path": "/dev/mapper/dmc-89382c6c-7cf9-4ff8-bdbb-f438d20c960a", "blockSize": "4kn", "replication": { "is_enabled": False } } ALL_HOST_GROUP_RESP = [ { "version": "20c28d21549fb7ec5777637f72f50043", "id": "314b5546-45da-4c8f-a24c-b615265fbc32", "name": "cinder-group", "host_ids": [ "5e37d335-8fff-4aee-840a-34749301a16a" ], "tags": None, "comment": "" } ] HOST_GROUP_RESP = { "version": "20c28d21549fb7ec5777637f72f50043", "id": "314b5546-45da-4c8f-a24c-b615265fbc32", "name": "cinder-group", "host_ids": [ "5e37d335-8fff-4aee-840a-34749301a16a" ], "tags": None, "comment": "" } ISCSI_HOST_INFO = { "version": "8c516c292055283e8ec3b7676d42f149", "id": "5e37d335-8fff-4aee-840a-34749301a16a", "name": "iscsi-host", "port_type": "iscsi", "initiators": [ "iqn.1994-05.com.redhat:4e5d7ab85a4c", ], "tags": None, "comment": "", "auth": { "auth_type": "none" } } POOL_NAME = 'cinder-pool' def get_fake_tatlin_config(): config = configuration.Configuration( tatlin_opts, configuration.SHARED_CONF_GROUP) config.san_ip = '127.0.0.1' config.san_password = 'pwd' config.san_login = 'admin' config.pool_name = POOL_NAME config.host_group = 'cinder-group' config.tat_api_retry_count = 1 config.wait_interval = 1 config.wait_retry_count = 3 config.chap_username = 'chap_user' config.chap_password = 'chap_passwd' config.state_path = '/tmp' return config class TatlinISCSIVolumeDriverTest(TestCase): @mock.patch.object(TatlinVolumeConnections, 'create_store') @mock.patch.object(TatlinAccessAPI, '_authenticate_access') def setUp(self, auth_access, create_store): access_api = TatlinAccessAPI('127.0.0.1', '443', 'user', 'passwd', False) access_api._authenticate_access = MagicMock() self.client = TatlinClientCommon(access_api, api_retry_count=1, wait_interval=1, wait_retry_count=1) mock.patch.object(TatlinAccessAPI, '_authenticate_access') self.driver = TatlinISCSIVolumeDriver( configuration=get_fake_tatlin_config()) self.driver._get_tatlin_client = MagicMock() self.driver._get_tatlin_client.return_value = self.client self.driver.do_setup(None) @mock.patch.object(TatlinAccessAPI, 'send_request') def test_success_find_current_host(self, sr_mock): sr_mock.side_effect = [ (MockResponse(ALL_HOST_GROUP_RESP, 200)), (MockResponse(HOST_GROUP_RESP, 200)), (MockResponse(ISCSI_HOST_INFO, 200)), ] self.assertEqual(self.driver.find_current_host( {'initiator': 'iqn.1994-05.com.redhat:4e5d7ab85a4c'}), '5e37d335-8fff-4aee-840a-34749301a16a') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_success_get_ports_portals(self, sr_mock): sr_mock.side_effect = [ (MockResponse(OSMGR_ISCSI_PORTS, 200)), ] portals = self.driver._get_ports_portals() self.assertEqual(portals, ISCSI_PORT_PORTALS) @mock.patch.object(TatlinCommonVolumeDriver, '_update_qos') @mock.patch.object(TatlinAccessAPI, 'send_request') def test_success_initialize_connection(self, sr_mock, qos_mock): self.driver._get_ports_portals = Mock(return_value=OSMGR_ISCSI_PORTS) self.driver.find_current_host = Mock( return_value='5e37d335-8fff-4aee-840a-34749301a16a') self.driver.add_volume_to_host = Mock() sr_mock.side_effect = [ (MockResponse(RESOURCE_INFORMATION, 200)), # Get volume (MockResponse(RES_MAPPING_RESP, 200)), # In vol on host (MockResponse(RES_PORTS_RESP, 200)), # Get ports (MockResponse(ALL_HOSTS_RESP, 200)), # Find mapped LUN ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/drivers/yadro/test_tatlin_utils.py0000664000175000017500000000630700000000000026711 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from unittest.mock import mock_open from unittest.mock import patch from unittest import TestCase from cinder.volume.drivers.yadro.tatlin_utils import TatlinVolumeConnections VOL_ID = 'cinder-volume-id' class TatlinVolumeConnectionsTest(TestCase): @patch('oslo_concurrency.lockutils.lock', autospec=True) @patch('os.mkdir') @patch('os.path.isdir') def setUp(self, isdir, mkdir, lock): self.path = 'fake_path' isdir.return_value = False self.connections = TatlinVolumeConnections(self.path) isdir.assert_called_once_with(self.path) mkdir.assert_called_once_with(self.path) isdir.reset_mock() mkdir.reset_mock() isdir.return_value = True self.connections = TatlinVolumeConnections(self.path) isdir.assert_called_once_with(self.path) mkdir.assert_not_called() @patch('oslo_concurrency.lockutils.lock', autospec=True) @patch('builtins.open', mock_open(read_data='1')) @patch('os.path.exists') def test_get(self, exists, lock): exists.side_effect = [False, True] self.assertEqual(self.connections.get(VOL_ID), 0) self.assertEqual(self.connections.get(VOL_ID), 1) @patch('oslo_concurrency.lockutils.lock', autospec=True) @patch('builtins.open', callable=mock_open(read_data='1')) @patch('os.path.exists') def test_increment(self, exists, open, lock): exists.side_effect = [False, True] self.assertEqual(self.connections.increment(VOL_ID), 1) open.assert_called_once_with(os.path.join(self.path, VOL_ID), 'w') with open() as f: f.write.assert_called_once_with('1') self.assertEqual(self.connections.increment(VOL_ID), 2) open.assert_called_with(os.path.join(self.path, VOL_ID), 'w') with open() as f: f.write.assert_called_with('2') @patch('oslo_concurrency.lockutils.lock', autospec=True) @patch('builtins.open', callable=mock_open()) @patch('os.remove') @patch('os.path.exists') def test_decrement(self, exists, remove, open, lock): exists.side_effect = [False, True, True] with open() as f: f.read.side_effect = [2, 1] self.assertEqual(self.connections.decrement(VOL_ID), 0) remove.assert_not_called() self.assertEqual(self.connections.decrement(VOL_ID), 1) open.assert_called_with(os.path.join(self.path, VOL_ID), 'w') f.write.assert_called_with('1') self.assertEqual(self.connections.decrement(VOL_ID), 0) remove.assert_called_with(os.path.join(self.path, VOL_ID)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3111205 cinder-27.0.0/cinder/tests/unit/volume/flows/0000775000175000017500000000000000000000000021115 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/flows/__init__.py0000664000175000017500000000000000000000000023214 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3111205 cinder-27.0.0/cinder/tests/unit/volume/flows/api/0000775000175000017500000000000000000000000021666 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/flows/api/__init__.py0000664000175000017500000000000000000000000023765 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/flows/api/test_create_volume.py0000664000175000017500000002063500000000000026137 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for create_volume in the TaskFlow volume.flow.api""" from unittest import mock import ddt from cinder import context from cinder import exception from cinder.tests.unit import test from cinder.volume.flows.api import create_volume from cinder.volume import volume_types @ddt.ddt class ExtractVolumeRequestTaskValidationsTestCase(test.TestCase): """Test validation code. The ExtractVolumeRequestTask takes a set of inputs that will form a volume-create request and validates them, inferring values for "missing" inputs. This class tests the validation code, not the Task itself. """ def setUp(self): super(ExtractVolumeRequestTaskValidationsTestCase, self).setUp() self.context = context.get_admin_context() fake_vol_type = 'vt-from-volume_type' fake_source_vol = {'volume_type_id': 'vt-from-source_vol'} fake_snapshot = {'volume_type_id': 'vt-from-snapshot'} fake_img_vol_type_id = 'vt-from-image_volume_type_id' fake_config_value = 'vt-from-config-value' big_ass_data_tuple = ( # case 0: null params and no configured default should # result in the system default volume type {'param_vol_type': None, 'param_source_vol': None, 'param_snap': None, 'param_img_vol_type_id': None, 'config_value': volume_types.DEFAULT_VOLUME_TYPE, 'expected_vol_type': volume_types.DEFAULT_VOLUME_TYPE}, # case set 1: if a volume_type is passed, should always be selected {'param_vol_type': fake_vol_type, 'param_source_vol': None, 'param_snap': None, 'param_img_vol_type_id': None, 'config_value': volume_types.DEFAULT_VOLUME_TYPE, 'expected_vol_type': 'vt-from-volume_type'}, {'param_vol_type': fake_vol_type, 'param_source_vol': fake_source_vol, 'param_snap': fake_snapshot, 'param_img_vol_type_id': fake_img_vol_type_id, 'config_value': fake_config_value, 'expected_vol_type': 'vt-from-volume_type'}, # case set 2: if no volume_type is passed, the vt from the # source_volume should be selected {'param_vol_type': None, 'param_source_vol': fake_source_vol, 'param_snap': None, 'param_img_vol_type_id': None, 'config_value': volume_types.DEFAULT_VOLUME_TYPE, 'expected_vol_type': 'vt-from-source_vol'}, {'param_vol_type': None, 'param_source_vol': fake_source_vol, 'param_snap': fake_snapshot, 'param_img_vol_type_id': fake_img_vol_type_id, 'config_value': fake_config_value, 'expected_vol_type': 'vt-from-source_vol'}, # case set 3: no volume_type, no source_volume, so snapshot's type # should be selected {'param_vol_type': None, 'param_source_vol': None, 'param_snap': fake_snapshot, 'param_img_vol_type_id': None, 'config_value': volume_types.DEFAULT_VOLUME_TYPE, 'expected_vol_type': 'vt-from-snapshot'}, {'param_vol_type': None, 'param_source_vol': None, 'param_snap': fake_snapshot, 'param_img_vol_type_id': fake_img_vol_type_id, 'config_value': fake_config_value, 'expected_vol_type': 'vt-from-snapshot'}, # case set 4: no volume_type, no source_volume, no snapshot -- # use the volume_type from the image metadata {'param_vol_type': None, 'param_source_vol': None, 'param_snap': None, 'param_img_vol_type_id': fake_img_vol_type_id, 'config_value': volume_types.DEFAULT_VOLUME_TYPE, 'expected_vol_type': 'vt-from-image_volume_type_id'}, {'param_vol_type': None, 'param_source_vol': None, 'param_snap': None, 'param_img_vol_type_id': fake_img_vol_type_id, 'config_value': fake_config_value, 'expected_vol_type': 'vt-from-image_volume_type_id'}, # case 5: params all null, should use configured volume_type {'param_vol_type': None, 'param_source_vol': None, 'param_snap': None, 'param_img_vol_type_id': None, 'config_value': fake_config_value, 'expected_vol_type': 'vt-from-config-value'}) def reflect_second(a, b): return b @ddt.data(*big_ass_data_tuple) @mock.patch('cinder.objects.VolumeType.get_by_name_or_id', side_effect = reflect_second) @mock.patch('cinder.volume.volume_types.get_volume_type_by_name', side_effect = reflect_second) @ddt.unpack def test__get_volume_type(self, mock_get_volume_type_by_name, mock_get_by_name_or_id, param_vol_type, param_source_vol, param_snap, param_img_vol_type_id, config_value, expected_vol_type): self.flags(default_volume_type=config_value) test_fn = create_volume.ExtractVolumeRequestTask._get_volume_type self.assertEqual(expected_vol_type, test_fn(self.context, param_vol_type, param_source_vol, param_snap, param_img_vol_type_id)) # Before the Train release, an invalid volume type specifier # would not raise an exception; it would log an error and you'd # get a volume with volume_type == None. We want to verify that # specifying a non-existent volume_type always raises an exception smaller_data_tuple = ( {'param_source_vol': fake_source_vol, 'param_snap': None, 'param_img_vol_type_id': None, 'config_value': None}, {'param_source_vol': None, 'param_snap': fake_snapshot, 'param_img_vol_type_id': None, 'config_value': None}, {'param_source_vol': None, 'param_snap': None, 'param_img_vol_type_id': fake_img_vol_type_id, 'config_value': None}, {'param_source_vol': None, 'param_snap': None, 'param_img_vol_type_id': None, 'config_value': fake_config_value}) @ddt.data(*smaller_data_tuple) @mock.patch('cinder.objects.VolumeType.get_by_name_or_id', side_effect = exception.VolumeTypeNotFoundByName( volume_type_name="get_by_name_or_id")) @mock.patch('cinder.volume.volume_types.get_volume_type_by_name', side_effect = exception.VolumeTypeNotFoundByName( volume_type_name="get_by_name")) @ddt.unpack def test_neg_get_volume_type(self, mock_get_volume_type_by_name, mock_get_by_name_or_id, param_source_vol, param_snap, param_img_vol_type_id, config_value): self.flags(default_volume_type=config_value) test_fn = create_volume.ExtractVolumeRequestTask._get_volume_type if config_value: self.assertRaises(exception.VolumeTypeDefaultMisconfiguredError, test_fn, self.context, None, param_source_vol, param_snap, param_img_vol_type_id) else: self.assertRaises(exception.VolumeTypeNotFoundByName, test_fn, self.context, None, param_source_vol, param_snap, param_img_vol_type_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/flows/fake_volume_api.py0000664000175000017500000000427600000000000024626 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class FakeVolumeAPI(object): def __init__(self, expected_spec, test_inst): self.expected_spec = expected_spec.copy() self.test_inst = test_inst def create_volume(self, ctxt, volume, host, request_spec, filter_properties, allow_reschedule=True, snapshot_id=None, image_id=None, source_volid=None, source_replicaid=None): self.test_inst.assertEqual(self.expected_spec, request_spec) self.test_inst.assertEqual(request_spec['source_volid'], source_volid) self.test_inst.assertEqual(request_spec['snapshot_id'], snapshot_id) self.test_inst.assertEqual(request_spec['image_id'], image_id) class FakeSchedulerRpcAPI(object): def __init__(self, expected_spec, test_inst): self.expected_spec = expected_spec.copy() self.test_inst = test_inst def create_volume(self, ctxt, volume, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, backup_id=None): self.test_inst.assertEqual(self.expected_spec, request_spec) def manage_existing(self, context, volume, request_spec=None): self.test_inst.assertEqual(self.expected_spec, request_spec) class FakeDb(object): def volume_get(self, *args, **kwargs): return {'host': 'barf'} def volume_update(self, *args, **kwargs): return {'host': 'farb'} def snapshot_get(self, *args, **kwargs): return {'volume_id': 1} def consistencygroup_get(self, *args, **kwargs): return {'consistencygroup_id': 1} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/cinder/tests/unit/volume/flows/test_create_volume_flow.py0000664000175000017500000033006100000000000026412 0ustar00zuulzuul00000000000000# Copyright 2013 Canonical Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for create_volume TaskFlow """ import sys from unittest import mock import uuid from castellan.common import exception as castellan_exc from castellan.tests.unit.key_manager import mock_key_manager import ddt from oslo_utils import imageutils from cinder import context from cinder import exception from cinder.message import message_field from cinder.tests.unit.backup import fake_backup from cinder.tests.unit.consistencygroup import fake_consistencygroup from cinder.tests.unit import fake_constants as fakes from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import test from cinder.tests.unit import utils from cinder.tests.unit.volume.flows import fake_volume_api from cinder.volume.flows.api import create_volume from cinder.volume.flows.manager import create_volume as create_volume_manager @ddt.ddt class CreateVolumeFlowTestCase(test.TestCase): def time_inc(self): self.counter += 1 return self.counter def setUp(self): super(CreateVolumeFlowTestCase, self).setUp() self.ctxt = context.get_admin_context() # Ensure that time.time() always returns more than the last time it was # called to avoid div by zero errors. self.counter = float(0) self.get_extra_specs = self.patch( 'cinder.volume.volume_types.get_volume_type_extra_specs', return_value={}) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.volume.volume_utils.extract_host') @mock.patch('time.time') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_cast_create_volume_from_resource(self, mock_snapshot_get, mock_time, mock_extract_host, volume_get_by_id): mock_time.side_effect = self.time_inc volume = fake_volume.fake_volume_obj( self.ctxt, host='host@backend#pool', cluster_name='cluster@backend#pool') volume_get_by_id.return_value = volume # This is the spec for a volume created from another resource. It # includes the 'resource_backend'. When the volume is associated # with a cluster the 'resource_backend' should use the cluster name. spec = {'volume_id': volume.id, 'volume': volume, 'resource_backend': 'cluster@backend#pool', 'source_volid': volume.id, 'snapshot_id': None, 'image_id': 4, 'consistencygroup_id': None, 'cgsnapshot_id': None, 'group_id': None, 'backup_id': None, } # Fake objects assert specs task = create_volume.VolumeCastTask( fake_volume_api.FakeSchedulerRpcAPI(spec, self), fake_volume_api.FakeVolumeAPI(spec, self), fake_volume_api.FakeDb()) # Remove 'resource_backend' prior to calling task._cast_create_volume # (the point of the test is to confirm that it adds it to the spec # sent to the scheduler). spec.pop('resource_backend') task._cast_create_volume(self.ctxt, spec, {}) mock_snapshot_get.assert_not_called() mock_extract_host.assert_not_called() snapshot = fake_snapshot.fake_snapshot_obj(self.ctxt, volume=volume) mock_snapshot_get.return_value = snapshot spec = {'volume_id': volume.id, 'volume': volume, 'resource_backend': 'cluster@backend#pool', 'source_volid': None, 'snapshot_id': snapshot.id, 'image_id': 4, 'consistencygroup_id': None, 'cgsnapshot_id': None, 'group_id': None, 'backup_id': None, } # Fake objects assert specs task = create_volume.VolumeCastTask( fake_volume_api.FakeSchedulerRpcAPI(spec, self), fake_volume_api.FakeVolumeAPI(spec, self), fake_volume_api.FakeDb()) spec.pop('resource_backend') task._cast_create_volume(self.ctxt, spec, {}) mock_snapshot_get.assert_called_once_with(self.ctxt, snapshot.id) mock_extract_host.assert_not_called() @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.volume.volume_utils.extract_host') @mock.patch('time.time') @mock.patch('cinder.objects.ConsistencyGroup.get_by_id') def test_cast_create_volume(self, consistencygroup_get_by_id, mock_time, mock_extract_host, volume_get_by_id): mock_time.side_effect = self.time_inc volume = fake_volume.fake_volume_obj(self.ctxt) volume_get_by_id.return_value = volume props = {} cg_obj = fake_consistencygroup.fake_consistencyobject_obj( self.ctxt, consistencygroup_id=1, host='host@backend#pool', cluster_name='cluster@backend#pool') consistencygroup_get_by_id.return_value = cg_obj mock_extract_host.return_value = 'cluster@backend' spec = {'volume_id': None, 'volume': None, 'source_volid': None, 'snapshot_id': None, 'image_id': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, 'group_id': None, 'backup_id': None, } # Fake objects assert specs task = create_volume.VolumeCastTask( fake_volume_api.FakeSchedulerRpcAPI(spec, self), fake_volume_api.FakeVolumeAPI(spec, self), fake_volume_api.FakeDb()) task._cast_create_volume(self.ctxt, spec, props) consistencygroup_get_by_id.assert_not_called() mock_extract_host.assert_not_called() # This is the spec for a volume created from a consistency group. It # includes the 'resource_backend'. spec = {'volume_id': volume.id, 'volume': volume, 'resource_backend': 'cluster@backend', 'source_volid': 2, 'snapshot_id': 3, 'image_id': 4, 'consistencygroup_id': 5, 'cgsnapshot_id': None, 'group_id': None, 'backup_id': None, } # Fake objects assert specs task = create_volume.VolumeCastTask( fake_volume_api.FakeSchedulerRpcAPI(spec, self), fake_volume_api.FakeVolumeAPI(spec, self), fake_volume_api.FakeDb()) # Remove 'resource_backend' prior to calling task._cast_create_volume # (the point of the test is to confirm that it adds it to the spec # sent to the scheduler). spec.pop('resource_backend') task._cast_create_volume(self.ctxt, spec, props) consistencygroup_get_by_id.assert_called_once_with(self.ctxt, 5) mock_extract_host.assert_called_once_with('cluster@backend#pool') @mock.patch('cinder.db.volume_create') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create_volume_from_snapshot(self, snapshot_get_by_id, volume_get_by_id, volume_create): volume_db = {'bootable': True} volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = volume_obj volume_create.return_value = {'id': fakes.VOLUME_ID, 'volume_attachment': []} task = create_volume.EntryCreateTask() result = task.execute(self.ctxt, optional_args=None, source_volid=None, snapshot_id=snapshot_obj.id, availability_zones=['nova'], size=1, encryption_key_id=None, description='123', name='123', multiattach=None) self.assertTrue(result['volume_properties']['bootable']) volume_db = {'bootable': False} volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = volume_obj task = create_volume.EntryCreateTask() result = task.execute(self.ctxt, optional_args=None, source_volid=None, snapshot_id=snapshot_obj.id, availability_zones=['nova'], size=1, encryption_key_id=None, description='123', name='123', multiattach=None) self.assertFalse(result['volume_properties']['bootable']) @ddt.data({'bootable': True}, {'bootable': False}) @mock.patch('cinder.db.volume_create') @mock.patch('cinder.objects.Volume.get_by_id') @ddt.unpack def test_create_from_source_volid_bootable(self, volume_get_by_id, volume_create, bootable): volume_db = {'bootable': bootable} volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db) volume_get_by_id.return_value = volume_obj volume_create.return_value = {'id': fakes.VOLUME_ID, 'volume_attachment': []} task = create_volume.EntryCreateTask() result = task.execute(self.ctxt, optional_args=None, source_volid=volume_obj.id, snapshot_id=None, availability_zones=['nova'], size=1, encryption_key_id=None, description='123', name='123', multiattach=None) self.assertEqual(bootable, result['volume_properties']['bootable']) @mock.patch('cinder.db.volume_create') @mock.patch('cinder.objects.Volume.get_by_id') @ddt.unpack def test_create_from_source_volid_encrypted(self, volume_get_by_id, volume_create): volume_db = {'encryption_key_id': fakes.ENCRYPTION_KEY_ID, 'id': fakes.VOLUME2_ID} volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db) volume_get_by_id.return_value = volume_obj volume_create.return_value = volume_obj task = create_volume.EntryCreateTask() result = task.execute(self.ctxt, optional_args=None, source_volid=volume_obj.id, snapshot_id=None, availability_zones=['nova'], size=1, encryption_key_id=volume_obj.encryption_key_id, description='123', name='123', multiattach=None) self.assertEqual( fakes.ENCRYPTION_KEY_ID, result['volume_properties']['encryption_key_id']) @ddt.data(('enabled', {'replication_enabled': ' True'}), ('disabled', {'replication_enabled': ' False'}), ('disabled', {})) @ddt.unpack @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_encryption_key_id', mock.Mock()) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch( 'cinder.objects.volume_type.VolumeType.get_by_name_or_id', mock.Mock(return_value={})) def test_extract_volume_request_replication_status(self, replication_status, extra_specs, fake_get_qos): volume_type = {'id': fakes.VOLUME_TYPE_ID, 'size': 1, 'extra_specs': extra_specs} self.get_extra_specs.return_value = extra_specs fake_image_service = fake_image.FakeImageService() fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask(fake_image_service, {'nova'}) result = task.execute(self.ctxt, size=1, snapshot=None, image_id=None, source_volume=None, availability_zone='nova', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) self.assertEqual(replication_status, result['replication_status']) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.flows.api.create_volume.' 'ExtractVolumeRequestTask.' '_get_encryption_key_id') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch( 'cinder.objects.volume_type.VolumeType.get_by_name_or_id', mock.Mock(return_value={})) def test_extract_volume_request_from_image_encrypted( self, fake_get_qos, fake_get_encryption_key, fake_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = 1 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = True task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type={'name': 'fake_type', 'id': 1}, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) fake_get_encryption_key.assert_called_once_with( fake_key_manager, self.ctxt, 1, None, None, image_meta) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.objects.volume_type.VolumeType.get_by_name_or_id') def test_extract_volume_request_from_image( self, fake_get_type, fake_get_qos, fake_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = 2 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() volume_type = {'name': 'type1', 'id': 1} fake_get_type.return_value = volume_type task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_qos.return_value = {'qos_specs': None} result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zones': ['nova'], 'volume_type': volume_type, 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, 'group_id': None, 'refresh_az': False, 'replication_status': 'disabled', 'backup_id': None, 'multiattach': False} self.assertEqual(expected_result, result) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_extract_availability_zones_without_fallback( self, fake_get_qos, fake_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = 3 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() volume_type = {'name': 'type1', 'id': 1} task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_qos.return_value = {'qos_specs': None} self.assertRaises(exception.InvalidAvailabilityZone, task.execute, self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='notnova', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') def test_extract_availability_zones_with_azs_not_matched( self, fake_get_qos, fake_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = str(uuid.uuid4()) image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() volume_type = {'name': 'type1', 'id': 1, 'extra_specs': {'RESKEY:availability_zones': 'nova3'}} task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova1', 'nova2'}) fake_is_encrypted.return_value = False fake_get_qos.return_value = {'qos_specs': None} self.assertRaises(exception.InvalidTypeAvailabilityZones, task.execute, self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='notnova', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) @ddt.data({'type_azs': 'nova3', 'self_azs': ['nova3'], 'expected': ['nova3']}, {'type_azs': 'nova3, nova2', 'self_azs': ['nova3'], 'expected': ['nova3']}, {'type_azs': 'nova3,,,', 'self_azs': ['nova3'], 'expected': ['nova3']}, {'type_azs': 'nova3', 'self_azs': ['nova2'], 'expected': exception.InvalidTypeAvailabilityZones}, {'type_azs': ',,', 'self_azs': ['nova2'], 'expected': exception.InvalidTypeAvailabilityZones} ) @ddt.unpack def test__extract_availability_zones_az_not_specified(self, type_azs, self_azs, expected): fake_image_service = fake_image.FakeImageService() image_id = str(uuid.uuid4()) image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) volume_type = {'name': 'type1', 'extra_specs': {'RESKEY:availability_zones': type_azs}} task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) task.availability_zones = self_azs if isinstance(expected, list): result = task._extract_availability_zones( None, {}, {}, {}, volume_type=volume_type) self.assertEqual(expected, result[0]) else: self.assertRaises( expected, task._extract_availability_zones, None, {}, {}, {}, volume_type=volume_type) def test__extract_availability_zones_az_not_in_type_azs(self): self.override_config('allow_availability_zone_fallback', False) fake_image_service = fake_image.FakeImageService() image_id = str(uuid.uuid4()) image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) volume_type = {'name': 'type1', 'extra_specs': {'RESKEY:availability_zones': 'nova1, nova2'}} task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) task.availability_zones = ['nova1'] self.assertRaises(exception.InvalidAvailabilityZone, task._extract_availability_zones, 'nova2', {}, {}, {}, volume_type=volume_type) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.objects.volume_type.VolumeType.get_by_name_or_id') def test_extract_availability_zones_with_fallback( self, fake_get_type, fake_get_qos, fake_is_encrypted): self.override_config('allow_availability_zone_fallback', True) fake_image_service = fake_image.FakeImageService() image_id = 4 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() volume_type = {'name': 'type1', 'id': 1} fake_get_type.return_value = volume_type task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_qos.return_value = {'qos_specs': None} result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='does_not_exist', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zones': ['nova'], 'volume_type': volume_type, 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, 'group_id': None, 'refresh_az': True, 'multiattach': False, 'replication_status': 'disabled', 'backup_id': None} self.assertEqual(expected_result, result) @mock.patch('cinder.volume.volume_types.is_encrypted', return_value=True) @mock.patch('cinder.volume.volume_types.get_volume_type_encryption', return_value=mock.Mock(cipher='my-cipher-2000')) @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs', return_value={'qos_specs': None}) def test_get_encryption_key_id_castellan_error( self, mock_get_qos, mock_get_volume_type_encryption, mock_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = 99 image_meta = {'id': image_id, 'status': 'active', 'size': 1} fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() volume_type = {'name': 'type1', 'id': 1} with mock.patch.object( fake_key_manager, 'create_key', side_effect=castellan_exc.KeyManagerError('foo') ): with mock.patch.object(fake_key_manager, 'get', return_value=fakes.ENCRYPTION_KEY_ID): task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) self.assertRaises(exception.Invalid, task.execute, self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) mock_is_encrypted.assert_called_with(self.ctxt, 1) mock_get_volume_type_encryption.assert_called_once_with(self.ctxt, 1) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.objects.volume_type.VolumeType.get_by_name_or_id') def test_extract_volume_request_task_with_large_volume_size( self, fake_get_type, fake_get_qos, fake_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = 11 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() volume_type = {'name': 'type1', 'id': 1} fake_get_type.return_value = volume_type task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_qos.return_value = {'qos_specs': None} result = task.execute(self.ctxt, size=(sys.maxsize + 1), snapshot=None, image_id=image_id, source_volume=None, availability_zone=None, volume_type=volume_type, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) expected_result = {'size': (sys.maxsize + 1), 'snapshot_id': None, 'source_volid': None, 'availability_zones': ['nova'], 'volume_type': volume_type, 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': None, 'replication_status': 'disabled', 'consistencygroup_id': None, 'cgsnapshot_id': None, 'refresh_az': False, 'group_id': None, 'multiattach': False, 'backup_id': None} self.assertEqual(expected_result, result) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.objects.volume_type.VolumeType.get_by_name_or_id') def test_extract_volume_request_from_image_with_qos_specs( self, fake_get_type, fake_get_qos, fake_is_encrypted): fake_image_service = fake_image.FakeImageService() image_id = 5 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() volume_type = {'name': 'type1', 'id': 1} fake_get_type.return_value = volume_type task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_qos_spec = {'specs': {'fake_key': 'fake'}} fake_get_qos.return_value = {'qos_specs': fake_qos_spec} result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=volume_type, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zones': ['nova'], 'volume_type': volume_type, 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': {'fake_key': 'fake'}, 'consistencygroup_id': None, 'cgsnapshot_id': None, 'group_id': None, 'refresh_az': False, 'multiattach': False, 'replication_status': 'disabled', 'backup_id': None} self.assertEqual(expected_result, result) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.volume.volume_types.get_default_volume_type') @mock.patch('cinder.volume.volume_types.get_volume_type_by_name') @mock.patch('cinder.objects.volume_type.VolumeType.get_by_name_or_id') def test_extract_image_volume_type_from_image( self, fake_get_type, fake_get_vol_type, fake_get_def_vol_type, fake_get_qos, fake_is_encrypted): image_volume_type = {'name': 'type_from_image', 'id': 1} fake_get_type.return_value = image_volume_type fake_image_service = fake_image.FakeImageService() image_id = 6 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 image_meta['properties'] = {} image_meta['properties']['cinder_img_volume_type'] = 'fake_volume_type' fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_vol_type.return_value = image_volume_type fake_get_qos.return_value = {'qos_specs': None} result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=None, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zones': ['nova'], 'volume_type': image_volume_type, 'volume_type_id': 1, 'encryption_key_id': None, 'qos_specs': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, 'group_id': None, 'refresh_az': False, 'multiattach': False, 'replication_status': 'disabled', 'backup_id': None} self.assertEqual(expected_result, result) @mock.patch('cinder.objects.volume_type.VolumeType.get_by_name_or_id') def test_extract_image_volume_type_from_image_invalid_type( self, fake_get_type): # Expected behavior: if the cinder_img_volume_type image property # specifies an invalid type, it should raise an exception image_volume_type = 'an_invalid_type' fake_image_service = fake_image.FakeImageService() image_id = 7 image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 image_meta['properties'] = {} image_meta['properties']['cinder_img_volume_type'] = image_volume_type fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) def raise_with_id(stuff, id): raise exception.VolumeTypeNotFoundByName(volume_type_name=id) fake_get_type.side_effect = raise_with_id e = self.assertRaises(exception.VolumeTypeNotFoundByName, task.execute, self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=None, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) self.assertIn(image_volume_type, str(e)) @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs') @mock.patch('cinder.objects.volume_type.VolumeType.get_by_name_or_id') @mock.patch('cinder.volume.volume_types.get_default_volume_type') @ddt.data((8, None), (9, {'cinder_img_volume_type': None})) @ddt.unpack def test_extract_image_volume_type_from_image_properties_error( self, image_id, fake_img_properties, fake_get_default_vol_type, fake_get_by_name_or_id, fake_get_qos, fake_is_encrypted): # Expected behavior: if the image has no properties # or the cinder_img_volume_type is present but has no # value, the default volume type should be used self.flags(default_volume_type='fake_default_volume_type') fake_image_service = fake_image.FakeImageService() image_meta = {} image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 image_meta['properties'] = fake_img_properties fake_image_service.create(self.ctxt, image_meta) fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) fake_is_encrypted.return_value = False fake_get_qos.return_value = {'qos_specs': None} fake_volume_type = {'name': 'fake_default_volume_type', 'id': fakes.VOLUME_TYPE_ID} fake_get_default_vol_type.return_value = fake_volume_type # yeah, I don't like this either, but until someone figures # out why we re-get the volume_type object in the execute # function, we have to do this. At least I will check later # and make sure we called it with the correct vol_type_id, so # I'm not completely cheating fake_get_by_name_or_id.return_value = fake_volume_type result = task.execute(self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=None, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) fake_get_default_vol_type.assert_called_once() fake_get_by_name_or_id.assert_called_once_with( self.ctxt, fakes.VOLUME_TYPE_ID) expected_result = {'size': 1, 'snapshot_id': None, 'source_volid': None, 'availability_zones': ['nova'], 'volume_type': fake_volume_type, 'volume_type_id': fakes.VOLUME_TYPE_ID, 'encryption_key_id': None, 'qos_specs': None, 'consistencygroup_id': None, 'cgsnapshot_id': None, 'group_id': None, 'refresh_az': False, 'multiattach': False, 'replication_status': 'disabled', 'backup_id': None} self.assertEqual(expected_result, result) glance_nonactive_statuses = ('queued', 'saving', 'deleted', 'deactivated', 'uploading', 'importing', 'not_a_vaild_state', 'error') @ddt.data(*glance_nonactive_statuses) def test_extract_image_volume_type_from_image_invalid_input( self, status): # Expected behavior: an image must be in 'active' status # or we should not create an image from it fake_image_service = fake_image.FakeImageService() image_meta = {'status': status} image_id = fake_image_service.create(self.ctxt, image_meta)['id'] fake_key_manager = mock_key_manager.MockKeyManager() task = create_volume.ExtractVolumeRequestTask( fake_image_service, {'nova'}) e = self.assertRaises(exception.InvalidInput, task.execute, self.ctxt, size=1, snapshot=None, image_id=image_id, source_volume=None, availability_zone='nova', volume_type=None, metadata=None, key_manager=fake_key_manager, consistencygroup=None, cgsnapshot=None, group=None, group_snapshot=None, backup=None) self.assertIn("Invalid input received", str(e)) self.assertIn("Image {} is not active".format(image_id), str(e)) fake_image_service.delete(self.ctxt, image_id) @ddt.ddt class CreateVolumeFlowManagerTestCase(test.TestCase): def setUp(self): super(CreateVolumeFlowManagerTestCase, self).setUp() self.ctxt = context.get_admin_context() @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_cleanup_cg_in_volume') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_rekey_volume') @mock.patch('cinder.objects.Volume.update') @mock.patch('cinder.objects.Volume.get_by_id') def test_create_from_source_volume_encrypted_update_volume( self, volume_get_by_id, vol_update, rekey_vol, cleanup_cg): fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_driver.capabilities = {} fake_volume_manager = mock.MagicMock() fake_manager = create_volume_manager.CreateVolumeFromSpecTask( fake_volume_manager, fake_db, fake_driver) volume_db = {'encryption_key_id': fakes.ENCRYPTION_KEY_ID} volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db) volume_get_by_id.return_value = volume_obj source_volume_id = fakes.VOLUME2_ID fake_manager._create_from_source_volume( self.ctxt, volume_obj, source_volume_id) # Check if volume object is updated. self.assertTrue(vol_update.called) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_cleanup_cg_in_volume') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_handle_bootable_volume_glance_meta') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create_from_snapshot(self, snapshot_get_by_id, volume_get_by_id, handle_bootable, cleanup_cg): fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_driver.capabilities = {} fake_volume_manager = mock.MagicMock() fake_manager = create_volume_manager.CreateVolumeFromSpecTask( fake_volume_manager, fake_db, fake_driver) volume_db = {'bootable': True} volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt) snapshot_get_by_id.return_value = snapshot_obj volume_get_by_id.return_value = volume_obj fake_manager._create_from_snapshot(self.ctxt, volume_obj, snapshot_obj.id) fake_driver.create_volume_from_snapshot.assert_called_once_with( volume_obj, snapshot_obj) handle_bootable.assert_called_once_with(self.ctxt, volume_obj, snapshot_id=snapshot_obj.id) cleanup_cg.assert_called_once_with(volume_obj) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_cleanup_cg_in_volume') @mock.patch('cinder.objects.Snapshot.get_by_id') def test_create_from_snapshot_update_failure(self, snapshot_get_by_id, mock_cleanup_cg): fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_driver.capabilities = {} fake_volume_manager = mock.MagicMock() fake_manager = create_volume_manager.CreateVolumeFromSpecTask( fake_volume_manager, fake_db, fake_driver) volume_obj = fake_volume.fake_volume_obj(self.ctxt) snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt) snapshot_get_by_id.return_value = snapshot_obj fake_db.volume_get.side_effect = exception.CinderException self.assertRaises(exception.MetadataUpdateFailure, fake_manager._create_from_snapshot, self.ctxt, volume_obj, snapshot_obj.id) fake_driver.create_volume_from_snapshot.assert_called_once_with( volume_obj, snapshot_obj) mock_cleanup_cg.assert_called_once_with(volume_obj) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_cleanup_cg_in_volume') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_prepare_image_cache_entry') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_handle_bootable_volume_glance_meta') @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.check_virtual_size') def test_create_encrypted_volume_from_image(self, mock_check_size, mock_qemu_img, mock_fetch_img, mock_handle_bootable, mock_prepare_image_cache, mock_cleanup_cg): fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_driver.capabilities = {} fake_volume_manager = mock.MagicMock() fake_cache = mock.MagicMock() fake_manager = create_volume_manager.CreateVolumeFromSpecTask( fake_volume_manager, fake_db, fake_driver, fake_cache) volume = fake_volume.fake_volume_obj( self.ctxt, encryption_key_id=fakes.ENCRYPTION_KEY_ID, host='host@backend#pool') fake_image_service = fake_image.FakeImageService() image_meta = {} image_id = fakes.IMAGE_ID image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 image_location = 'abc' fake_db.volume_update.return_value = volume fake_manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, fake_image_service) fake_driver.create_volume.assert_called_once_with(volume) fake_driver.copy_image_to_encrypted_volume.assert_called_once_with( self.ctxt, volume, fake_image_service, image_id, disable_sparse=False) mock_prepare_image_cache.assert_not_called() mock_handle_bootable.assert_called_once_with(self.ctxt, volume, image_id=image_id, image_meta=image_meta) mock_cleanup_cg.assert_called_once_with(volume) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_cleanup_cg_in_volume') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_handle_bootable_volume_glance_meta') @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.check_virtual_size') def test_create_encrypted_volume_from_enc_image(self, mock_check_size, mock_qemu_img, mock_fetch_img, mock_handle_bootable, mock_cleanup_cg): fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_driver.capabilities = {} fake_volume_manager = mock.MagicMock() fake_manager = create_volume_manager.CreateVolumeFromSpecTask( fake_volume_manager, fake_db, fake_driver) volume = fake_volume.fake_volume_obj( self.ctxt, encryption_key_id=fakes.ENCRYPTION_KEY_ID, host='host@backend#pool') fake_image_service = fake_image.FakeImageService() image_meta = {} image_id = fakes.IMAGE_ID image_meta['id'] = image_id image_meta['status'] = 'active' image_meta['size'] = 1 image_meta['cinder_encryption_key_id'] = \ '00000000-0000-0000-0000-000000000000' image_location = 'abc' fake_db.volume_update.return_value = volume fake_manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, fake_image_service) fake_driver.create_volume.assert_called_once_with(volume) fake_driver.copy_image_to_encrypted_volume.assert_not_called() fake_driver.copy_image_to_volume.assert_called_once_with( self.ctxt, volume, fake_image_service, image_id, disable_sparse=False) mock_handle_bootable.assert_called_once_with(self.ctxt, volume, image_id=image_id, image_meta=image_meta) mock_cleanup_cg.assert_called_once_with(volume) @ddt.data({'driver_error': True}, {'driver_error': False}) @mock.patch('cinder.backup.api.API.get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') @mock.patch('oslo_service.loopingcall.' 'FixedIntervalWithTimeoutLoopingCall') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_create_raw_volume') @mock.patch('cinder.db.volume_update') @mock.patch('cinder.db.backup_update') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.objects.Backup.get_by_id') @ddt.unpack def test_create_from_backup(self, backup_get_by_id, volume_get_by_id, mock_backup_update, mock_volume_update, mock_create_volume, mock_fixed_looping_call, mock_restore_backup, mock_get_backup_host, driver_error): fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_driver.capabilities = {} fake_volume_manager = mock.MagicMock() backup_host = 'host@backend#pool' test_manager = create_volume_manager.CreateVolumeFromSpecTask( fake_volume_manager, fake_db, fake_driver) volume_obj = fake_volume.fake_volume_obj(self.ctxt) backup_obj = fake_backup.fake_backup_obj(self.ctxt, **{'status': 'available', 'host': backup_host}) backup_get_by_id.return_value = backup_obj volume_get_by_id.return_value = volume_obj mock_create_volume.return_value = {} mock_get_backup_host.return_value = backup_host mock_fixed_looping_call.return_value = mock.MagicMock() if driver_error: fake_driver.create_volume_from_backup.side_effect = [ NotImplementedError] test_manager._create_from_backup(self.ctxt, volume_obj, backup_obj.id) fake_driver.create_volume_from_backup.assert_called_once_with( volume_obj, backup_obj) if driver_error: mock_create_volume.assert_called_once_with(self.ctxt, volume_obj) mock_get_backup_host.assert_called_once_with( backup_obj.host, backup_obj.availability_zone) mock_restore_backup.assert_called_once_with(self.ctxt, backup_host, backup_obj, volume_obj['id'], volume_is_new=True) else: fake_driver.create_volume_from_backup.assert_called_once_with( volume_obj, backup_obj) @mock.patch('cinder.message.api.API.create') def test_create_drive_error(self, mock_message_create): fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_driver.capabilities = {} fake_volume_manager = mock.MagicMock() fake_manager = create_volume_manager.CreateVolumeFromSpecTask( fake_volume_manager, fake_db, fake_driver) volume_obj = fake_volume.fake_volume_obj(self.ctxt) err = NotImplementedError() fake_driver.create_volume.side_effect = [err] self.assertRaises( NotImplementedError, fake_manager._create_raw_volume, self.ctxt, volume_obj) mock_message_create.assert_called_once_with( self.ctxt, message_field.Action.CREATE_VOLUME_FROM_BACKEND, resource_uuid=volume_obj.id, detail=message_field.Detail.DRIVER_FAILED_CREATE, exception=err) @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_notify_volume_action_do_nothing(self, notify_mock): task = create_volume_manager.NotifyVolumeActionTask(mock.sentinel.db, None) task.execute(mock.sentinel.context, mock.sentinel.volume) notify_mock.assert_not_called() @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_notify_volume_action_send_notification(self, notify_mock): event_suffix = 'create.start' volume = mock.Mock() task = create_volume_manager.NotifyVolumeActionTask(mock.sentinel.db, event_suffix) task.execute(mock.sentinel.context, volume) notify_mock.assert_called_once_with(mock.sentinel.context, volume, event_suffix, host=volume.host) # Test possible combinations to confirm volumes from W, X, Y releases work @ddt.data((False, True), (True, None), (True, False)) @ddt.unpack @mock.patch('taskflow.engines.load') @mock.patch.object(create_volume_manager, 'CreateVolumeOnFinishTask') @mock.patch.object(create_volume_manager, 'CreateVolumeFromSpecTask') @mock.patch.object(create_volume_manager, 'NotifyVolumeActionTask') @mock.patch.object(create_volume_manager, 'ExtractVolumeSpecTask') @mock.patch.object(create_volume_manager, 'OnFailureRescheduleTask') @mock.patch.object(create_volume_manager, 'ExtractVolumeRefTask') @mock.patch.object(create_volume_manager.linear_flow, 'Flow') def test_get_flow(self, is_migration_target, use_quota, flow_mock, extract_ref_mock, onfailure_mock, extract_spec_mock, notify_mock, create_mock, onfinish_mock, load_mock): self.assertIsInstance(is_migration_target, bool) filter_properties = {'retry': mock.sentinel.retry} tasks = [mock.call(extract_ref_mock.return_value), mock.call(onfailure_mock.return_value), mock.call(extract_spec_mock.return_value), mock.call(notify_mock.return_value), mock.call(create_mock.return_value, onfinish_mock.return_value)] volume = mock.Mock( **{'is_migration_target.return_value': is_migration_target, 'use_quota': use_quota}) result = create_volume_manager.get_flow( mock.sentinel.context, mock.sentinel.manager, mock.sentinel.db, mock.sentinel.driver, mock.sentinel.scheduler_rpcapi, mock.sentinel.host, volume, mock.sentinel.allow_reschedule, mock.sentinel.reschedule_context, mock.sentinel.request_spec, filter_properties, mock.sentinel.image_volume_cache) if not volume.quota_use: volume.is_migration_target.assert_called_once_with() if is_migration_target or not use_quota: tasks.pop(3) notify_mock.assert_not_called() end_notify_suffix = None else: notify_mock.assert_called_once_with(mock.sentinel.db, 'create.start') end_notify_suffix = 'create.end' flow_mock.assert_called_once_with('volume_create_manager') extract_ref_mock.assert_called_once_with(mock.sentinel.db, mock.sentinel.host, set_error=False) onfailure_mock.assert_called_once_with( mock.sentinel.reschedule_context, mock.sentinel.db, mock.sentinel.manager, mock.sentinel.scheduler_rpcapi, mock.ANY) extract_spec_mock.assert_called_once_with(mock.sentinel.db) create_mock.assert_called_once_with(mock.sentinel.manager, mock.sentinel.db, mock.sentinel.driver, mock.sentinel.image_volume_cache) onfinish_mock.assert_called_once_with(mock.sentinel.db, end_notify_suffix) volume_flow = flow_mock.return_value self.assertEqual(len(tasks), volume_flow.add.call_count) volume_flow.add.assert_has_calls(tasks) load_mock.assert_called_once_with( volume_flow, store={'context': mock.sentinel.context, 'filter_properties': filter_properties, 'request_spec': mock.sentinel.request_spec, 'volume': volume}) self.assertEqual(result, load_mock.return_value) @ddt.ddt(testNameFormat=ddt.TestNameFormat.INDEX_ONLY) class CreateVolumeFlowManagerGlanceCinderBackendCase(test.TestCase): def setUp(self): super(CreateVolumeFlowManagerGlanceCinderBackendCase, self).setUp() self.ctxt = context.get_admin_context() # data for test__extract_cinder_ids # legacy glance cinder URI: cinder:// # new-style glance cinder URI: cinder:/// LEGACY_VOL2 = 'cinder://%s' % fakes.VOLUME2_ID NEW_VOL3 = 'cinder://glance-store-name/%s' % fakes.VOLUME3_ID # these *may* be illegal names in glance, but check anyway NEW_VOL4 = 'cinder://glance/store/name/%s' % fakes.VOLUME4_ID NEW_VOL5 = 'cinder://glance:store:name/%s' % fakes.VOLUME5_ID NEW_VOL6 = 'cinder://glance:store,name/%s' % fakes.VOLUME6_ID NOT_CINDER1 = 'rbd://%s' % fakes.UUID1 NOT_CINDER2 = 'http://%s' % fakes.UUID2 NOGOOD3 = 'cinder://glance:store,name/%s/garbage' % fakes.UUID3 NOGOOD4 = 'cinder://glance:store,name/%s-garbage' % fakes.UUID4 NOGOOD5 = fakes.UUID5 NOGOOD6 = 'cinder://store-name/12345678' NOGOOD7 = 'cinder://' NOGOOD8 = 'some-random-crap' NOGOOD9 = None TEST_CASE_DATA = ( # the format of these is: (input, expected output) ([LEGACY_VOL2], [fakes.VOLUME2_ID]), ([NEW_VOL3], [fakes.VOLUME3_ID]), ([NEW_VOL4], [fakes.VOLUME4_ID]), ([NEW_VOL5], [fakes.VOLUME5_ID]), ([NEW_VOL6], [fakes.VOLUME6_ID]), ([], []), ([''], []), ([NOT_CINDER1], []), ([NOT_CINDER2], []), ([NOGOOD3], []), ([NOGOOD4], []), ([NOGOOD5], []), ([NOGOOD6], []), ([NOGOOD7], []), ([NOGOOD8], []), ([NOGOOD9], []), ([NOT_CINDER1, NOGOOD4], []), # mix of URIs should only get the cinder IDs ([LEGACY_VOL2, NOT_CINDER1, NEW_VOL3, NOT_CINDER2], [fakes.VOLUME2_ID, fakes.VOLUME3_ID]), # a bad cinder URI early in the list shouldn't prevent us from # processing a good one later in the list ([NOGOOD6, NEW_VOL3, NOGOOD7, LEGACY_VOL2], [fakes.VOLUME3_ID, fakes.VOLUME2_ID]), ) @ddt.data(*TEST_CASE_DATA) @ddt.unpack def test__extract_cinder_ids(self, url_list, id_list): """Test utility function that gets IDs from Glance location URIs""" klass = create_volume_manager.CreateVolumeFromSpecTask actual = klass._extract_cinder_ids(url_list) self.assertEqual(id_list, actual) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_cleanup_cg_in_volume') @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_handle_bootable_volume_glance_meta') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_from_image_volume(self, mock_qemu_info, handle_bootable, mock_fetch_img, mock_cleanup_cg, mock_get, format='raw', owner=None, location=True): self.flags(allowed_direct_url_schemes=['cinder']) mock_fetch_img.return_value = mock.MagicMock( spec=utils.get_file_spec()) fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_driver.capabilities = {} fake_manager = create_volume_manager.CreateVolumeFromSpecTask( mock.MagicMock(), fake_db, fake_driver) fake_image_service = fake_image.FakeImageService() volume = fake_volume.fake_volume_obj(self.ctxt, host='host@backend#pool') image_volume = fake_volume.fake_volume_obj(self.ctxt, volume_metadata={}) image_id = fakes.IMAGE_ID image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info url = 'cinder://%s' % image_volume['id'] image_location = None if location: image_location = (url, [{'url': url, 'metadata': {}}]) image_meta = {'id': image_id, 'container_format': 'bare', 'disk_format': format, 'size': 1024, 'owner': owner or self.ctxt.project_id, 'virtual_size': None, 'cinder_encryption_key_id': None} fake_driver.clone_image.return_value = (None, False) fake_db.volume_get_all.return_value = [] fake_db.volume_get_all_by_host.return_value = [image_volume] mock_get.return_value = image_volume fake_manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, fake_image_service) if format == 'raw' and not owner and location: fake_driver.create_cloned_volume.assert_called_once_with( volume, image_volume) handle_bootable.assert_called_once_with(self.ctxt, volume, image_id=image_id, image_meta=image_meta) mock_get.assert_called_once_with(self.ctxt, image_volume.id) else: self.assertFalse(fake_driver.create_cloned_volume.called) mock_cleanup_cg.assert_called_once_with(volume) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_cleanup_cg_in_volume') @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_handle_bootable_volume_glance_meta') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_from_image_across(self, mock_qemu_info, handle_bootable, mock_fetch_img, mock_cleanup_cg, mock_get, format='raw', owner=None, location=True): self.flags(allowed_direct_url_schemes=['cinder']) mock_fetch_img.return_value = mock.MagicMock( spec=utils.get_file_spec()) fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_driver.capabilities = {'clone_across_pools': True} fake_manager = create_volume_manager.CreateVolumeFromSpecTask( mock.MagicMock(), fake_db, fake_driver) fake_image_service = fake_image.FakeImageService() volume = fake_volume.fake_volume_obj(self.ctxt, host='host@backend#pool') image_volume = fake_volume.fake_volume_obj(self.ctxt, volume_metadata={}) image_id = fakes.IMAGE_ID image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info url = 'cinder://%s' % image_volume['id'] image_location = None if location: image_location = (url, [{'url': url, 'metadata': {}}]) image_meta = {'id': image_id, 'container_format': 'bare', 'disk_format': format, 'size': 1024, 'owner': owner or self.ctxt.project_id, 'virtual_size': None, 'cinder_encryption_key_id': None} fake_driver.clone_image.return_value = (None, False) fake_db.volume_get_all.return_value = [image_volume] fake_db.volume_get_all_by_host.return_value = [] mock_get.return_value = image_volume fake_manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, fake_image_service) if format == 'raw' and not owner and location: fake_driver.create_cloned_volume.assert_called_once_with( volume, image_volume) handle_bootable.assert_called_once_with(self.ctxt, volume, image_id=image_id, image_meta=image_meta) mock_get.assert_called_once_with(self.ctxt, image_volume.id) else: self.assertFalse(fake_driver.create_cloned_volume.called) mock_cleanup_cg.assert_called_once_with(volume) LEGACY_URI = 'cinder://%s' % fakes.VOLUME_ID MULTISTORE_URI = 'cinder://fake-store/%s' % fakes.VOLUME_ID @ddt.data(LEGACY_URI, MULTISTORE_URI) @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_cleanup_cg_in_volume') @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_handle_bootable_volume_glance_meta') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_from_image_volume_ignore_size(self, location_uri, mock_qemu_info, handle_bootable, mock_fetch_img, mock_cleanup_cg, mock_get, format='raw', owner=None, location=True): self.flags(allowed_direct_url_schemes=['cinder']) self.override_config('allowed_direct_url_schemes', 'cinder') mock_fetch_img.return_value = mock.MagicMock( spec=utils.get_file_spec()) fake_db = mock.MagicMock() fake_driver = mock.MagicMock() fake_driver.capabilities = {} fake_manager = create_volume_manager.CreateVolumeFromSpecTask( mock.MagicMock(), fake_db, fake_driver) fake_image_service = fake_image.FakeImageService() volume = fake_volume.fake_volume_obj(self.ctxt, host='host@backend#pool') image_volume = fake_volume.fake_volume_obj(self.ctxt, volume_metadata={}) image_id = fakes.IMAGE_ID image_info = imageutils.QemuImgInfo() # Making huge image. If cinder will try to convert it, it # will fail because of free space being too low. image_info.virtual_size = '1073741824000000000000' mock_qemu_info.return_value = image_info url = location_uri image_location = None if location: image_location = (url, [{'url': url, 'metadata': {}}]) image_meta = {'id': image_id, 'container_format': 'bare', 'disk_format': format, 'size': 1024, 'owner': owner or self.ctxt.project_id, 'virtual_size': None, 'cinder_encryption_key_id': None} fake_driver.clone_image.return_value = (None, False) fake_db.volume_get_all_by_host.return_value = [image_volume] mock_get.return_value = image_volume fake_manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, fake_image_service) if format == 'raw' and not owner and location: fake_driver.create_cloned_volume.assert_called_once_with( volume, image_volume) handle_bootable.assert_called_once_with(self.ctxt, volume, image_id=image_id, image_meta=image_meta) mock_get.assert_called_once_with(self.ctxt, image_volume.id) else: self.assertFalse(fake_driver.create_cloned_volume.called) mock_cleanup_cg.assert_called_once_with(volume) def test_create_from_image_volume_in_qcow2_format(self): self.test_create_from_image_volume(format='qcow2') def test_create_from_image_volume_of_other_owner(self): self.test_create_from_image_volume(owner='fake-owner') def test_create_from_image_volume_without_location(self): self.test_create_from_image_volume(location=False) @ddt.ddt @mock.patch('cinder.image.image_utils.TemporaryImages.fetch') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_handle_bootable_volume_glance_meta') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_create_from_source_volume') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_create_from_image_download') @mock.patch('cinder.context.get_internal_tenant_context') class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase): def setUp(self): super(CreateVolumeFlowManagerImageCacheTestCase, self).setUp() self.ctxt = context.get_admin_context() self.mock_db = mock.MagicMock() self.mock_driver = mock.MagicMock() self.mock_cache = mock.MagicMock() self.mock_image_service = mock.MagicMock() self.mock_volume_manager = mock.MagicMock() self.internal_context = self.ctxt self.internal_context.user_id = 'abc123' self.internal_context.project_id = 'def456' @mock.patch('cinder.image.image_utils.check_available_space') def test_create_from_image_clone_image_and_skip_cache( self, mock_check_space, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): self.mock_driver.clone_image.return_value = (None, True) volume = fake_volume.fake_volume_obj(self.ctxt, host='host@backend#pool') image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = {'virtual_size': '1073741824', 'size': 1073741824} manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) # Make sure check_available_space is not called because the driver # will clone things for us. self.assertFalse(mock_check_space.called) # Make sure clone_image is always called even if the cache is enabled self.assertTrue(self.mock_driver.clone_image.called) # Create from source shouldn't happen if clone_image succeeds self.assertFalse(mock_create_from_src.called) # The image download should not happen if clone_image succeeds self.assertFalse(mock_create_from_img_dl.called) mock_handle_bootable.assert_called_once_with( self.ctxt, volume, image_id=image_id, image_meta=image_meta ) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.verify_glance_image_signature') def test_create_from_image_cannot_use_cache( self, mock_verify, mock_qemu_info, mock_check_space, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): mock_get_internal_context.return_value = None self.mock_driver.clone_image.return_value = (None, False) self.flags(verify_glance_signatures='disabled') volume = fake_volume.fake_volume_obj(self.ctxt, host='host@backend#pool') image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = {'id': image_id, 'virtual_size': '1073741824', 'size': 1073741824} manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) # Make sure check_available_space is always called self.assertTrue(mock_check_space.called) # Make sure clone_image is always called self.assertTrue(self.mock_driver.clone_image.called) # Create from source shouldn't happen if cache cannot be used. self.assertFalse(mock_create_from_src.called) # The image download should happen if clone fails and we can't use the # image-volume cache. mock_create_from_img_dl.assert_called_once_with( self.ctxt, volume, image_location, image_meta, self.mock_image_service ) # This should not attempt to use a minimal size volume self.assertFalse(self.mock_db.volume_update.called) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) mock_handle_bootable.assert_called_once_with( self.ctxt, volume, image_id=image_id, image_meta=image_meta ) @ddt.data(False, True) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_from_image_clone_failure( self, cloning_supported, mock_qemu_info, mock_check_space, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = mock.MagicMock() volume_id = str(uuid.uuid4()) self.mock_cache.get_entry.return_value = {'volume_id': volume_id} volume = fake_volume.fake_volume_obj(self.ctxt, size=1, host='foo@bar#pool') self.mock_driver.clone_image.return_value = (None, False) self.flags(verify_glance_signatures='disabled') manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) if cloning_supported: mock_create_from_src.side_effect = exception.SnapshotLimitReached( 'Error during cloning') self.assertRaises( exception.SnapshotLimitReached, manager._create_from_image, self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) mock_handle_bootable.assert_not_called() self.mock_cache.delete_cached_volume.assert_called_once_with( self.ctxt, self.mock_cache.get_entry.return_value, mock.ANY) else: mock_create_from_src.side_effect = NotImplementedError( 'Driver does not support clone') model_update = manager._create_from_image( self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) mock_create_from_img_dl.assert_called_once() self.assertEqual(mock_create_from_img_dl.return_value, model_update) mock_handle_bootable.assert_called_once_with(self.ctxt, volume, image_id=image_id, image_meta=image_meta) # Ensure cloning was attempted and that it failed mock_create_from_src.assert_called_once_with(self.ctxt, volume, volume_id) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_cleanup_cg_in_volume') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.db.volume_update') @mock.patch('cinder.image.image_utils.verify_glance_image_signature') def test_create_from_image_extend_failure( self, mock_verify, mock_volume_update, mock_qemu_info, mock_check_size, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img, mock_cleanup_cg): self.mock_driver.clone_image.return_value = (None, False) self.mock_cache.get_entry.return_value = None self.mock_driver.extend_volume.side_effect = ( exception.CinderException('Error during extending')) self.flags(verify_glance_signatures='disabled') volume_size = 2 volume = fake_volume.fake_volume_obj(self.ctxt, host='host@backend#pool', size=volume_size) image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = {'virtual_size': '1073741824', 'size': '1073741824'} manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) self.assertRaises(exception.CinderException, manager._create_from_image, self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) self.assertTrue(mock_cleanup_cg.called) mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 1}) self.assertEqual(volume_size, volume.size) @mock.patch('cinder.image.image_utils.check_available_space') def test_create_from_image_bigger_size( self, mock_check_space, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): volume = fake_volume.fake_volume_obj(self.ctxt) image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = {'virtual_size': '2147483648', 'size': 2147483648} manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) self.assertRaises( exception.ImageUnacceptable, manager._create_from_image, self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) def test_create_from_image_cache_hit( self, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): self.mock_driver.clone_image.return_value = (None, False) image_volume_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.mock_cache.get_entry.return_value = { 'volume_id': image_volume_id } volume = fake_volume.fake_volume_obj(self.ctxt, host='host@backend#pool') image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = {'virtual_size': None, 'size': 1024} manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) # Make sure clone_image is always called even if the cache is enabled self.assertTrue(self.mock_driver.clone_image.called) # For a cache hit it should only clone from the image-volume mock_create_from_src.assert_called_once_with(self.ctxt, volume, image_volume_id) # The image download should not happen when we get a cache hit self.assertFalse(mock_create_from_img_dl.called) mock_handle_bootable.assert_called_once_with( self.ctxt, volume, image_id=image_id, image_meta=image_meta ) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.verify_glance_image_signature') def test_create_from_image_cache_miss( self, mocl_verify, mock_check_size, mock_qemu_info, mock_volume_get, mock_volume_update, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): mock_get_internal_context.return_value = self.ctxt mock_fetch_img.return_value = mock.MagicMock( spec=utils.get_file_spec()) self.flags(verify_glance_signatures='disabled') image_info = imageutils.QemuImgInfo() image_info.virtual_size = '2147483648' mock_qemu_info.return_value = image_info self.mock_driver.clone_image.return_value = (None, False) self.mock_cache.get_entry.return_value = None volume = fake_volume.fake_volume_obj(self.ctxt, size=10, host='foo@bar#pool') mock_volume_get.return_value = volume image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = {'id': image_id, 'size': 2000000} manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) with mock.patch('os.path.exists', return_value=True): manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) # Make sure clone_image is always called self.assertTrue(self.mock_driver.clone_image.called) # The image download should happen if clone fails and # we get a cache miss mock_create_from_img_dl.assert_called_once_with( self.ctxt, mock.ANY, image_location, image_meta, self.mock_image_service ) # The volume size should be reduced to virtual_size and then put back mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2}) mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10}) # Make sure created a new cache entry (self.mock_volume_manager. _create_image_cache_volume_entry.assert_called_once_with( self.ctxt, volume, image_id, image_meta)) mock_handle_bootable.assert_called_once_with( self.ctxt, volume, image_id=image_id, image_meta=image_meta ) @mock.patch('cinder.db.volume_update') @mock.patch('cinder.objects.Volume.get_by_id') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.verify_glance_image_signature') def test_create_from_image_cache_miss_error_downloading( self, mock_verify, mock_check_size, mock_qemu_info, mock_volume_get, mock_volume_update, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): mock_fetch_img.return_value = mock.MagicMock() image_info = imageutils.QemuImgInfo() image_info.virtual_size = '2147483648' mock_qemu_info.return_value = image_info self.mock_driver.clone_image.return_value = (None, False) self.mock_cache.get_entry.return_value = None self.flags(verify_glance_signatures='disabled') volume = fake_volume.fake_volume_obj(self.ctxt, size=10, host='foo@bar#pool') mock_volume_get.return_value = volume mock_create_from_img_dl.side_effect = exception.CinderException() image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = mock.MagicMock() manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) with mock.patch('os.path.exists', return_value=True): self.assertRaises( exception.CinderException, manager._create_from_image, self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service ) # Make sure clone_image is always called self.assertTrue(self.mock_driver.clone_image.called) # The image download should happen if clone fails and # we get a cache miss mock_create_from_img_dl.assert_called_once_with( self.ctxt, mock.ANY, image_location, image_meta, self.mock_image_service ) # The volume size should be reduced to virtual_size and then put back, # especially if there is an exception while creating the volume. self.assertEqual(2, mock_volume_update.call_count) mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2}) mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10}) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.verify_glance_image_signature') def test_create_from_image_no_internal_context( self, mock_verify, mock_chk_space, mock_qemu_info, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): self.mock_driver.clone_image.return_value = (None, False) mock_get_internal_context.return_value = None self.flags(verify_glance_signatures='disabled') volume = fake_volume.fake_volume_obj(self.ctxt, host='host@backend#pool') image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = {'virtual_size': '1073741824', 'size': 1073741824} manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) manager._create_from_image(self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) # Make sure check_available_space is always called self.assertTrue(mock_chk_space.called) # Make sure clone_image is always called self.assertTrue(self.mock_driver.clone_image.called) # Create from source shouldn't happen if cache cannot be used. self.assertFalse(mock_create_from_src.called) # The image download should happen if clone fails and we can't use the # image-volume cache due to not having an internal context available. mock_create_from_img_dl.assert_called_once_with( self.ctxt, volume, image_location, image_meta, self.mock_image_service ) # This should not attempt to use a minimal size volume self.assertFalse(self.mock_db.volume_update.called) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) mock_handle_bootable.assert_called_once_with( self.ctxt, volume, image_id=image_id, image_meta=image_meta ) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_cleanup_cg_in_volume') @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.image.image_utils.verify_glance_image_signature') def test_create_from_image_cache_miss_error_size_invalid( self, mock_verify, mock_qemu_info, mock_check_space, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img, mock_cleanup_cg): mock_fetch_img.return_value = mock.MagicMock() image_info = imageutils.QemuImgInfo() image_info.virtual_size = '2147483648' mock_qemu_info.return_value = image_info self.mock_driver.clone_image.return_value = (None, False) self.mock_cache.get_entry.return_value = None self.flags(verify_glance_signatures='disabled') volume = fake_volume.fake_volume_obj(self.ctxt, size=1, host='foo@bar#pool') image_volume = fake_volume.fake_db_volume(size=2) self.mock_db.volume_create.return_value = image_volume image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = mock.MagicMock() manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) with mock.patch('os.path.exists', return_value=True): self.assertRaises( exception.ImageUnacceptable, manager._create_from_image, self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service ) self.assertTrue(mock_cleanup_cg.called) # The volume size should NOT be changed when in this case self.assertFalse(self.mock_db.volume_update.called) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.message.api.API.create') def test_create_from_image_insufficient_space( self, mock_message_create, mock_qemu_info, mock_check_space, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): image_info = imageutils.QemuImgInfo() image_info.virtual_size = '2147483648' mock_qemu_info.return_value = image_info self.mock_driver.clone_image.return_value = (None, False) self.mock_cache.get_entry.return_value = None volume = fake_volume.fake_volume_obj(self.ctxt, size=1, host='foo@bar#pool') image_volume = fake_volume.fake_db_volume(size=2) self.mock_db.volume_create.return_value = image_volume image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = mock.MagicMock() mock_check_space.side_effect = exception.ImageTooBig( image_id=image_id, reason="fake") manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) self.assertRaises( exception.ImageTooBig, manager._create_from_image, self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service ) mock_message_create.assert_called_once_with( self.ctxt, message_field.Action.COPY_IMAGE_TO_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE, exception=mock.ANY) # The volume size should NOT be changed when in this case self.assertFalse(self.mock_db.volume_update.called) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.image.image_utils.verify_glance_image_signature') def test_create_from_image_cache_insufficient_size( self, mock_verify, mock_message_create, mock_qemu_info, mock_check_space, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info self.mock_driver.clone_image.return_value = (None, False) self.mock_cache.get_entry.return_value = None volume = fake_volume.fake_volume_obj(self.ctxt, size=1, host='foo@bar#pool') image_volume = fake_volume.fake_db_volume(size=2) self.mock_db.volume_create.return_value = image_volume image_id = fakes.IMAGE_ID mock_create_from_img_dl.side_effect = exception.ImageTooBig( image_id=image_id, reason="fake") self.flags(verify_glance_signatures='disabled') image_location = 'someImageLocationStr' image_meta = mock.MagicMock() manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) self.assertRaises( exception.ImageTooBig, manager._create_from_image_cache_or_download, self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service ) mock_message_create.assert_called_once_with( self.ctxt, message_field.Action.COPY_IMAGE_TO_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE, exception=mock.ANY) # The volume size should NOT be changed when in this case self.assertFalse(self.mock_db.volume_update.called) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) @mock.patch('cinder.image.image_utils.check_available_space') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.image.image_utils.verify_glance_image_signature') def test_create_from_image_cache_unacceptable_image_message( self, mock_verify, mock_message_create, mock_qemu_info, mock_check_space, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info self.mock_driver.clone_image.return_value = (None, False) self.mock_cache.get_entry.return_value = None volume = fake_volume.fake_volume_obj(self.ctxt, size=1, host='foo@bar#pool') image_volume = fake_volume.fake_db_volume(size=2) self.mock_db.volume_create.return_value = image_volume image_id = fakes.IMAGE_ID mock_create_from_img_dl.side_effect = ( exception.ImageConversionNotAllowed(image_id=image_id, reason='')) self.flags(verify_glance_signatures='disabled') image_location = 'someImageLocationStr' image_meta = mock.MagicMock() manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) self.assertRaises( exception.ImageConversionNotAllowed, manager._create_from_image_cache_or_download, self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service ) mock_message_create.assert_called_once_with( self.ctxt, message_field.Action.COPY_IMAGE_TO_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.IMAGE_FORMAT_UNACCEPTABLE) # The volume size should NOT be changed when in this case self.assertFalse(self.mock_db.volume_update.called) # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) @ddt.data(None, {'volume_id': fakes.VOLUME_ID}) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.' '_create_from_image_cache_or_download') def test_prepare_image_cache_entry( self, mock_cache_entry, mock_create_from_image_cache_or_download, mock_get_internal_context, mock_create_from_img_dl, mock_create_from_src, mock_handle_bootable, mock_fetch_img): self.mock_cache.get_entry.return_value = mock_cache_entry volume = fake_volume.fake_volume_obj(self.ctxt, id=fakes.VOLUME_ID, host='host@backend#pool') image_location = 'someImageLocationStr' image_id = fakes.IMAGE_ID image_meta = {'virtual_size': '1073741824', 'size': 1073741824} manager = create_volume_manager.CreateVolumeFromSpecTask( self.mock_volume_manager, self.mock_db, self.mock_driver, image_volume_cache=self.mock_cache ) model_update, cloned = manager._prepare_image_cache_entry( self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service) if mock_cache_entry: # Entry is in cache, so basically don't do anything. self.assertFalse(cloned) self.assertIsNone(model_update) mock_create_from_image_cache_or_download.assert_not_called() else: # Entry is not in cache, so do the work that will add it. self.assertTrue(cloned) self.assertEqual( mock_create_from_image_cache_or_download.return_value, model_update) mock_create_from_image_cache_or_download.assert_called_once_with( self.ctxt, volume, image_location, image_id, image_meta, self.mock_image_service, update_cache=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/flows/test_manage_snapshot_flow.py0000664000175000017500000002701200000000000026726 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for manage_existing_snapshot TaskFlow.""" # TODO(mdovgal): add tests for other TaskFlow cases from unittest import mock import ddt from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.volume.flows.manager import manage_existing_snapshot as manager @ddt.ddt class ManageSnapshotFlowTestCase(test.TestCase): def setUp(self): super(ManageSnapshotFlowTestCase, self).setUp() self.ctxt = context.get_admin_context() @mock.patch('cinder.objects.snapshot.Snapshot.get_by_id') def test_manage_snapshot_after_volume_extending(self, _get_by_id): """Test checks snapshot's volume_size value after it is managed.""" fake_size = 3 fake_snap = fake_snapshot.fake_snapshot_obj(self.ctxt, volume_size=fake_size) fake_snap.save = mock.MagicMock() _get_by_id.return_value = fake_snap real_size = 1 mock_db = mock.MagicMock() mock_driver = mock.MagicMock() mock_manage_existing_ref = mock.MagicMock() mock_driver.manage_existing_snapshot.return_value = {} task = manager.ManageExistingTask(mock_db, mock_driver) result = task.execute(self.ctxt, fake_snap, mock_manage_existing_ref, real_size) snap_after_manage = result['snapshot'] # assure value is equal that size, that we want self.assertEqual(real_size, snap_after_manage['volume_size']) def test_manage_existing_snapshot_with_wrong_volume(self): """Test that raise an error when get_by_id fail.""" mock_db = mock.MagicMock() mock_driver = mock.MagicMock() real_size = 1 manage_existing_ref = None fake_snap = fake_snapshot.fake_snapshot_obj(self.ctxt, volume_size=real_size) task = manager.ManageExistingTask(mock_db, mock_driver) self.assertRaises(exception.SnapshotNotFound, task.execute, self.ctxt, fake_snap, manage_existing_ref, real_size) @mock.patch('cinder.quota.QuotaEngine.reserve') @mock.patch('cinder.db.sqlalchemy.api.volume_type_get') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_quota_reservation_task(self, mock_get_vol_by_id, mock_type_get, mock_quota_reserve): volume_size = 1 fake_size = '2' fake_snap = fake_snapshot.fake_snapshot_obj(self.ctxt, volume_size=volume_size) fake_snap.save = mock.MagicMock() fake_vol = fake_volume.fake_volume_obj( self.ctxt, id=fake.VOLUME_ID, volume_type_id=fake.VOLUME_TYPE_ID) mock_get_vol_by_id.return_value = fake_vol mock_type_get.return_value = {'name': 'fake_type_name'} task = manager.QuotaReserveTask() task.execute(self.ctxt, fake_size, fake_snap, {}) reserve_opts = {'gigabytes': 1, 'snapshots': 1, 'gigabytes_fake_type_name': 1, 'snapshots_fake_type_name': 1} mock_quota_reserve.assert_called_once_with(self.ctxt, **reserve_opts) @ddt.data(True, False) @mock.patch('cinder.quota.QuotaEngine.reserve') @mock.patch('cinder.db.sqlalchemy.api.volume_type_get') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_quota_reservation_task_with_update_flag( self, need_update, mock_get_vol_by_id, mock_type_get, mock_quota_reserve): volume_size = 1 fake_size = '2' fake_snap = fake_snapshot.fake_snapshot_obj(self.ctxt, volume_size=volume_size) fake_snap.save = mock.MagicMock() fake_vol = fake_volume.fake_volume_obj( self.ctxt, id=fake.VOLUME_ID, volume_type_id=fake.VOLUME_TYPE_ID) mock_get_vol_by_id.return_value = fake_vol mock_type_get.return_value = {'name': 'fake_type_name'} task = manager.QuotaReserveTask() task.execute(self.ctxt, fake_size, fake_snap, {'update_size': need_update}) reserve_opts = {'gigabytes': 1, 'gigabytes_fake_type_name': 1} if not need_update: reserve_opts.update({'snapshots': 1, 'snapshots_fake_type_name': 1}) mock_quota_reserve.assert_called_once_with(self.ctxt, **reserve_opts) def test_prepare_for_quota_reserveration_task_execute(self): mock_db = mock.MagicMock() mock_driver = mock.MagicMock() mock_manage_existing_ref = mock.MagicMock() mock_get_snapshot_size = self.mock_object( mock_driver, 'manage_existing_snapshot_get_size') mock_get_snapshot_size.return_value = '5' fake_snap = fake_snapshot.fake_snapshot_obj(self.ctxt, volume_size=1) task = manager.PrepareForQuotaReservationTask(mock_db, mock_driver) result = task.execute(self.ctxt, fake_snap, mock_manage_existing_ref) self.assertEqual(fake_snap, result['snapshot_properties']) self.assertEqual('5', result['size']) mock_get_snapshot_size.assert_called_once_with( snapshot=fake_snap, existing_ref=mock_manage_existing_ref ) @mock.patch('cinder.quota.QuotaEngine.rollback') def test_quota_reservation_revert_task(self, mock_quota_rollback): """Test checks that we can rollback the snapshot.""" mock_result = mock.MagicMock() optional_args = {} optional_args['is_quota_committed'] = False task = manager.QuotaReserveTask() task.revert(self.ctxt, mock_result, optional_args) mock_quota_rollback.assert_called_once_with(self.ctxt, mock_result['reservations'] ) @mock.patch('cinder.volume.flows.manager.manage_existing_snapshot.' 'QuotaReserveTask.revert') def test_quota_reservation_revert_already_been_committed(self, mock_quota_revert ): """Test reservations can not be rolled back.""" mock_result = mock.MagicMock() optional_args = {} optional_args['is_quota_committed'] = True task = manager.QuotaReserveTask() task.revert(self.ctxt, mock_result, optional_args) mock_quota_revert.assert_called_once_with(self.ctxt, mock_result, optional_args) @mock.patch('cinder.quota.QuotaEngine.commit') def test_quota_commit_task(self, mock_quota_commit): """Test checks commits the reservation.""" mock_reservations = mock.MagicMock() mock_snapshot_properties = mock.MagicMock() mock_optional_args = mock.MagicMock() task = manager.QuotaCommitTask() task.execute(self.ctxt, mock_reservations, mock_snapshot_properties, mock_optional_args) mock_quota_commit.assert_called_once_with(self.ctxt, mock_reservations) @mock.patch('cinder.quota.QuotaEngine.reserve') def test_quota_commit_revert_task(self, mock_quota_reserve): """Test checks commits the reservation.""" mock_result = mock.MagicMock() expected_snapshot = mock_result['snapshot_properties'] expected_gigabyte = -expected_snapshot['volume_size'] task = manager.QuotaCommitTask() task.revert(self.ctxt, mock_result) mock_quota_reserve.assert_called_once_with(self.ctxt, gigabytes=expected_gigabyte, project_id=None, snapshots=-1) @mock.patch('cinder.volume.flows.manager.manage_existing_snapshot.' 'CreateSnapshotOnFinishTask.execute') def test_create_snap_on_finish_task(self, mock_snap_create): """Test to create snapshot on finish.""" mock_status = mock.MagicMock() mock_db = mock.MagicMock() mock_event_suffix = mock.MagicMock() mock_host = mock.MagicMock() task = manager.CreateSnapshotOnFinishTask(mock_db, mock_event_suffix, mock_host) task.execute(self.ctxt, fake_snapshot, mock_status) mock_snap_create.assert_called_once_with(self.ctxt, fake_snapshot, mock_status) @mock.patch('cinder.objects.snapshot.Snapshot.get_by_id') @mock.patch('cinder.volume.volume_utils.notify_about_snapshot_usage') def test_create_snap_on_finish_task_notify(self, mock_notify_about_usage, _mock_get_by_id): mock_status = mock.MagicMock() mock_db = mock.MagicMock() mock_event_suffix = mock.MagicMock() mock_host = mock.MagicMock() fake_snap = fake_snapshot.fake_snapshot_obj(self.ctxt, volume_size=1) task = manager.CreateSnapshotOnFinishTask(mock_db, mock_event_suffix, mock_host) task.execute(self.ctxt, fake_snap, mock_status) mock_notify_about_usage.assert_called_once_with( self.ctxt, fake_snap, mock_event_suffix, host=mock_host) @mock.patch('cinder.volume.flows.manager.manage_existing_snapshot.' 'taskflow.engines.load') @mock.patch('cinder.volume.flows.manager.manage_existing_snapshot.' 'linear_flow.Flow') def test_get_flow(self, mock_linear_flow, mock_taskflow_engine): mock_db = mock.MagicMock() mock_driver = mock.MagicMock() mock_host = mock.MagicMock() mock_snapshot_id = mock.MagicMock() mock_ref = mock.MagicMock() ctxt = context.get_admin_context() mock_snapshot_flow = mock.Mock() mock_linear_flow.return_value = mock_snapshot_flow expected_store = { 'context': ctxt, 'snapshot_id': mock_snapshot_id, 'manage_existing_ref': mock_ref, 'optional_args': { 'is_quota_committed': False, 'update_size': True }, } manager.get_flow(ctxt, mock_db, mock_driver, mock_host, mock_snapshot_id, mock_ref) mock_linear_flow.assert_called_once_with( 'snapshot_manage_existing_manager') mock_taskflow_engine.assert_called_once_with(mock_snapshot_flow, store=expected_store) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/flows/test_manage_volume_flow.py0000664000175000017500000002506100000000000026400 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for manage_existing TaskFlow """ import inspect from unittest import mock import taskflow.engines from taskflow.types import failure as ft from cinder import context from cinder import exception from cinder.tests.unit import fake_constants as fakes from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit.volume.flows import fake_volume_api from cinder.volume.flows.api import manage_existing from cinder.volume.flows import common as flow_common from cinder.volume.flows.manager import manage_existing as manager class ManageVolumeFlowTestCase(test.TestCase): def setUp(self): super(ManageVolumeFlowTestCase, self).setUp() self.ctxt = context.get_admin_context() self.counter = float(0) def test_cast_manage_existing(self): volume = fake_volume.fake_volume_obj(self.ctxt) spec = { 'name': 'name', 'description': 'description', 'host': 'host', 'ref': 'ref', 'volume_type': 'volume_type', 'metadata': 'metadata', 'availability_zone': 'availability_zone', 'bootable': 'bootable', 'volume_id': volume.id, } # Fake objects assert specs task = manage_existing.ManageCastTask( fake_volume_api.FakeSchedulerRpcAPI(spec, self), fake_volume_api.FakeDb()) create_what = spec.copy() create_what.update({'volume': volume}) create_what.pop('volume_id') task.execute(self.ctxt, **create_what) def test_cast_manage_existing_revert(self): volume = fake_volume.fake_volume_obj(self.ctxt) volume.save = mock.MagicMock() # Fake objects assert specs task = manage_existing.ManageCastTask( fake_volume_api.FakeSchedulerRpcAPI({}, self), fake_volume_api.FakeDb()) flow_failures = [mock.MagicMock()] task.revert(self.ctxt, {}, flow_failures, volume) # Check that volume status is updated and saved self.assertEqual('error_managing', volume.status) volume.save.assert_called_once() def test_create_db_entry_task_with_multiattach(self): fake_volume_type = fake_volume.fake_volume_type_obj( self.ctxt, extra_specs={'multiattach': ' True'}) spec = { 'name': 'name', 'description': 'description', 'host': 'host', 'ref': 'ref', 'volume_type': fake_volume_type, 'metadata': {}, 'availability_zone': 'availability_zone', 'bootable': 'bootable', 'volume_type_id': fake_volume_type.id, 'cluster_name': 'fake_cluster' } task = manage_existing.EntryCreateTask(fake_volume_api.FakeDb()) result = task.execute(self.ctxt, **spec) self.assertTrue(result['volume_properties']['multiattach']) def test_revert_manage_existing(self): fake_db = fake_volume_api.FakeDb() fake_db.volume_destroy = mock.MagicMock() self.ctxt.elevated = mock.Mock() task = manage_existing.EntryCreateTask(fake_db) task.revert(self.ctxt, {'volume_id': fakes.VOLUME_ID}) # Check DB entry task is destroyed fake_db.volume_destroy.assert_called_once_with( self.ctxt.elevated.return_value, fakes.VOLUME_ID ) def test_revert_manage_existing_with_ft_failure(self): fake_db = fake_volume_api.FakeDb() fake_db.volume_destroy = mock.MagicMock() mock_failure = mock.Mock(spec=ft.Failure) task = manage_existing.EntryCreateTask(fake_db) task.revert(self.ctxt, mock_failure) # Check DB entry task is not destroyed fake_db.volume_destroy.assert_not_called() @staticmethod def _stub_volume_object_get(self): volume = { 'id': fakes.VOLUME_ID, 'volume_type_id': fakes.VOLUME_TYPE_ID, 'status': 'creating', 'name': fakes.VOLUME_NAME, } return fake_volume.fake_volume_obj(self.ctxt, **volume) def test_prepare_for_quota_reserveration_task_execute(self): mock_db = mock.MagicMock() mock_driver = mock.MagicMock() mock_manage_existing_ref = mock.MagicMock() mock_get_size = self.mock_object( mock_driver, 'manage_existing_get_size') mock_get_size.return_value = '5' volume_ref = self._stub_volume_object_get(self) task = manager.PrepareForQuotaReservationTask(mock_db, mock_driver) result = task.execute(self.ctxt, volume_ref, mock_manage_existing_ref) self.assertEqual(volume_ref, result['volume_properties']) self.assertEqual('5', result['size']) self.assertEqual(volume_ref.id, result['volume_spec']['volume_id']) mock_get_size.assert_called_once_with( volume_ref, mock_manage_existing_ref) def test_prepare_for_quota_reservation_task_revert(self): mock_db = mock.MagicMock() mock_driver = mock.MagicMock() mock_result = mock.MagicMock() mock_flow_failures = mock.MagicMock() mock_error_out = self.mock_object(flow_common, 'error_out') volume_ref = self._stub_volume_object_get(self) task = manager.PrepareForQuotaReservationTask(mock_db, mock_driver) task.revert(self.ctxt, mock_result, mock_flow_failures, volume_ref) mock_error_out.assert_called_once_with(volume_ref, reason='Volume manage failed.', status='error_managing') def test_prepare_for_quota_reservation_with_wrong_volume(self): """Test the class PrepareForQuotaReservationTas with wrong vol.""" mock_db = mock.MagicMock() mock_driver = mock.MagicMock() wrong_volume = mock.MagicMock() mock_manage_existing_ref = mock.MagicMock() mock_except = exception.CinderException mock_driver.manage_existing_get_size.side_effect = mock_except task = manager.PrepareForQuotaReservationTask(mock_db, mock_driver) self.assertRaises(exception.CinderException, task.execute, self.ctxt, wrong_volume, mock_manage_existing_ref) def test_manage_existing_task(self): """Test the class ManageExistingTask.""" mock_db = mock.MagicMock() mock_driver = mock.MagicMock() mock_volume = mock.MagicMock() mock_manage_existing_ref = mock.MagicMock() mock_size = mock.MagicMock() task = manager.ManageExistingTask(mock_db, mock_driver) rv = task.execute(self.ctxt, mock_volume, mock_manage_existing_ref, mock_size) expected_output = {'volume': mock_volume} self.assertDictEqual(rv, expected_output) def test_manage_existing_task_with_wrong_volume(self): """Test the class ManageExistingTask with wrong volume.""" mock_db = mock.MagicMock() mock_driver = mock.MagicMock() mock_volume = mock.MagicMock() mock_volume.update.side_effect = exception.CinderException mock_manage_existing_ref = mock.MagicMock() mock_size = mock.MagicMock() task = manager.ManageExistingTask(mock_db, mock_driver) self.assertRaises(exception.CinderException, task.execute, self.ctxt, mock_volume, mock_manage_existing_ref, mock_size) def test_get_flow(self): mock_volume_flow = mock.Mock() mock_linear_flow = self.mock_object(manager.linear_flow, 'Flow') mock_linear_flow.return_value = mock_volume_flow mock_taskflow_engine = self.mock_object(taskflow.engines, 'load') expected_store = { 'context': mock.sentinel.context, 'volume': mock.sentinel.volume, 'manage_existing_ref': mock.sentinel.ref, 'group_snapshot': None, 'optional_args': {'is_quota_committed': False, 'update_size': True} } manager.get_flow( mock.sentinel.context, mock.sentinel.db, mock.sentinel.driver, mock.sentinel.host, mock.sentinel.volume, mock.sentinel.ref) mock_linear_flow.assert_called_once_with( 'volume_manage_existing_manager') mock_taskflow_engine.assert_called_once_with( mock_volume_flow, store=expected_store) def test_get_flow_volume_flow_tasks(self): """Test that all expected parameter names exist for added tasks.""" mock_taskflow_engine = self.mock_object(taskflow.engines, 'load') mock_taskflow_engine.side_effect = self._verify_volume_flow_tasks manager.get_flow( mock.sentinel.context, mock.sentinel.db, mock.sentinel.driver, mock.sentinel.host, mock.sentinel.volume, mock.sentinel.ref) def _verify_volume_flow_tasks(self, volume_flow, store=None): param_names = [ 'context', 'volume', 'manage_existing_ref', 'group_snapshot', 'optional_args', ] provides = {'self'} revert_provides = ['self', 'result', 'flow_failures'] for node in volume_flow.iter_nodes(): task = node[0] # Subsequent tasks may use parameters defined in a previous task's # default_provides list. Add these names to the provides set. if task.default_provides: for p in task.default_provides: provides.add(p) execute_args = inspect.getfullargspec(task.execute)[0] execute_args = [x for x in execute_args if x not in provides] [self.assertIn(arg, param_names) for arg in execute_args] revert_args = inspect.getfullargspec(task.revert)[0] revert_args = [x for x in revert_args if x not in revert_provides] [self.assertIn(arg, param_names) for arg in revert_args] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_availability_zone.py0000664000175000017500000001301300000000000025077 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test for volume availability zone.""" import datetime from unittest import mock from oslo_utils import timeutils from cinder.tests.unit import volume as base import cinder.volume class AvailabilityZoneTestCase(base.BaseVolumeTestCase): def setUp(self): super(AvailabilityZoneTestCase, self).setUp() self.get_all = self.patch( 'cinder.db.service_get_all', autospec=True, return_value=[{'availability_zone': 'a', 'disabled': False, 'uuid': 'f838f35c-4035-464f-9792-ce60e390c13d'}]) def test_list_availability_zones_cached(self): azs = self.volume_api.list_availability_zones(enable_cache=True) self.assertEqual([{"name": 'a', 'available': True}], list(azs)) self.assertIsNotNone(self.volume_api.availability_zones_last_fetched) self.assertTrue(self.get_all.called) self.volume_api.list_availability_zones(enable_cache=True) self.assertEqual(1, self.get_all.call_count) def test_list_availability_zones_cached_and_refresh_on(self): azs = self.volume_api.list_availability_zones(enable_cache=True, refresh_cache=True) self.assertEqual([{"name": 'a', 'available': True}], list(azs)) time_before = self.volume_api.availability_zones_last_fetched self.assertIsNotNone(time_before) self.assertEqual(1, self.get_all.call_count) self.volume_api.list_availability_zones(enable_cache=True, refresh_cache=True) self.assertTrue(time_before != self.volume_api.availability_zones_last_fetched) self.assertEqual(2, self.get_all.call_count) def test_list_availability_zones_no_cached(self): azs = self.volume_api.list_availability_zones(enable_cache=False) self.assertEqual([{"name": 'a', 'available': True}], list(azs)) self.assertIsNone(self.volume_api.availability_zones_last_fetched) self.get_all.return_value[0]['disabled'] = True azs = self.volume_api.list_availability_zones(enable_cache=False) self.assertEqual([{"name": 'a', 'available': False}], list(azs)) self.assertIsNone(self.volume_api.availability_zones_last_fetched) @mock.patch('oslo_utils.timeutils.utcnow') def test_list_availability_zones_refetched(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime.utcnow() azs = self.volume_api.list_availability_zones(enable_cache=True) self.assertEqual([{"name": 'a', 'available': True}], list(azs)) self.assertIsNotNone(self.volume_api.availability_zones_last_fetched) last_fetched = self.volume_api.availability_zones_last_fetched self.assertTrue(self.get_all.called) self.volume_api.list_availability_zones(enable_cache=True) self.assertEqual(1, self.get_all.call_count) # The default cache time is 3600, push past that... mock_utcnow.return_value = (timeutils.utcnow() + datetime.timedelta(0, 3800)) self.get_all.return_value = [ { 'availability_zone': 'a', 'disabled': False, 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824', }, { 'availability_zone': 'b', 'disabled': False, 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e', }, ] azs = self.volume_api.list_availability_zones(enable_cache=True) azs = sorted([n['name'] for n in azs]) self.assertEqual(['a', 'b'], azs) self.assertEqual(2, self.get_all.call_count) self.assertGreater(self.volume_api.availability_zones_last_fetched, last_fetched) mock_utcnow.assert_called_with() def test_list_availability_zones_enabled_service(self): def sort_func(obj): return obj['name'] self.get_all.return_value = [ {'availability_zone': 'ping', 'disabled': 0, 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}, {'availability_zone': 'ping', 'disabled': 1, 'uuid': '4200b32b-0bf9-436c-86b2-0675f6ac218e'}, {'availability_zone': 'pong', 'disabled': 0, 'uuid': '6d91e7f5-ca17-4e3b-bf4f-19ca77166dd7'}, {'availability_zone': 'pung', 'disabled': 1, 'uuid': '18417850-2ca9-43d1-9619-ae16bfb0f655'}, ] volume_api = cinder.volume.api.API() azs = volume_api.list_availability_zones() azs = sorted(azs, key=sort_func) expected = sorted([ {'name': 'pung', 'available': False}, {'name': 'pong', 'available': True}, {'name': 'ping', 'available': True}, ], key=sort_func) self.assertEqual(expected, azs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_capabilities.py0000664000175000017500000002051500000000000024030 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_serialization import jsonutils from cinder import exception from cinder.tests import fake_driver from cinder.tests.unit import volume as base from cinder.volume import driver from cinder.volume import manager as vol_manager # import cinder.volume.targets.tgt """Tests for volume capabilities test cases.""" class VolumeCapabilitiesTestCase(base.BaseVolumeTestCase): @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'get_volume_stats') @mock.patch.object(driver.BaseVD, '_init_vendor_properties') def test_get_capabilities(self, mock_init_vendor, mock_get_volume_stats): stats = { 'volume_backend_name': 'lvm', 'vendor_name': 'Open Source', 'storage_protocol': 'iSCSI', 'vendor_prefix': 'abcd' } expected = stats.copy() expected['properties'] = { 'compression': { 'title': 'Compression', 'description': 'Enables compression.', 'type': 'boolean'}, 'qos': { 'title': 'QoS', 'description': 'Enables QoS.', 'type': 'boolean'}, 'replication_enabled': { 'title': 'Replication', 'description': 'Enables replication.', 'type': 'boolean'}, 'thin_provisioning': { 'title': 'Thin Provisioning', 'description': 'Sets thin provisioning.', 'type': 'boolean'}, } # Test to get updated capabilities discover = True mock_get_volume_stats.return_value = stats mock_init_vendor.return_value = ({}, None) capabilities = self.volume.get_capabilities(self.context, discover) self.assertEqual(expected, capabilities) mock_get_volume_stats.assert_called_once_with(True) # Test to get existing original capabilities mock_get_volume_stats.reset_mock() discover = False capabilities = self.volume.get_capabilities(self.context, discover) self.assertEqual(expected, capabilities) self.assertFalse(mock_get_volume_stats.called) # Normal test case to get vendor unique capabilities def init_vendor_properties(self): properties = {} self._set_property( properties, "abcd:minIOPS", "Minimum IOPS QoS", "Sets minimum IOPS if QoS is enabled.", "integer", minimum=10, default=100) return properties, 'abcd' expected['properties'].update( {'abcd:minIOPS': { 'title': 'Minimum IOPS QoS', 'description': 'Sets minimum IOPS if QoS is enabled.', 'type': 'integer', 'minimum': 10, 'default': 100}}) mock_get_volume_stats.reset_mock() mock_init_vendor.reset_mock() discover = True mock_init_vendor.return_value = ( init_vendor_properties(self.volume.driver)) capabilities = self.volume.get_capabilities(self.context, discover) self.assertEqual(expected, capabilities) self.assertTrue(mock_get_volume_stats.called) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'get_volume_stats') @mock.patch.object(driver.BaseVD, '_init_vendor_properties') @mock.patch.object(driver.BaseVD, '_init_standard_capabilities') def test_get_capabilities_prefix_error(self, mock_init_standard, mock_init_vendor, mock_get_volume_stats): # Error test case: property does not match vendor prefix def init_vendor_properties(self): properties = {} self._set_property( properties, "aaa:minIOPS", "Minimum IOPS QoS", "Sets minimum IOPS if QoS is enabled.", "integer") self._set_property( properties, "abcd:compression_type", "Compression type", "Specifies compression type.", "string") return properties, 'abcd' expected = { 'abcd:compression_type': { 'title': 'Compression type', 'description': 'Specifies compression type.', 'type': 'string'}} discover = True mock_get_volume_stats.return_value = {} mock_init_standard.return_value = {} mock_init_vendor.return_value = ( init_vendor_properties(self.volume.driver)) capabilities = self.volume.get_capabilities(self.context, discover) self.assertEqual(expected, capabilities['properties']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'get_volume_stats') @mock.patch.object(driver.BaseVD, '_init_vendor_properties') @mock.patch.object(driver.BaseVD, '_init_standard_capabilities') def test_get_capabilities_fail_override(self, mock_init_standard, mock_init_vendor, mock_get_volume_stats): # Error test case: property cannot override any standard capabilities def init_vendor_properties(self): properties = {} self._set_property( properties, "qos", "Minimum IOPS QoS", "Sets minimum IOPS if QoS is enabled.", "integer") self._set_property( properties, "ab::cd:compression_type", "Compression type", "Specifies compression type.", "string") return properties, 'ab::cd' expected = { 'ab__cd:compression_type': { 'title': 'Compression type', 'description': 'Specifies compression type.', 'type': 'string'}} discover = True mock_get_volume_stats.return_value = {} mock_init_standard.return_value = {} mock_init_vendor.return_value = ( init_vendor_properties(self.volume.driver)) capabilities = self.volume.get_capabilities(self.context, discover) self.assertEqual(expected, capabilities['properties']) def test_extra_capabilities(self): # Test valid extra_capabilities. fake_capabilities = {'key1': 1, 'key2': 2} with mock.patch.object(jsonutils, 'loads') as mock_loads: mock_loads.return_value = fake_capabilities manager = vol_manager.VolumeManager() manager.stats = {'pools': {}} manager.driver.set_initialized() manager.publish_service_capabilities(self.context) self.assertTrue(mock_loads.called) volume_stats = manager.last_capabilities self.assertEqual(fake_capabilities['key1'], volume_stats["pools"][0]['key1']) self.assertEqual(fake_capabilities['key2'], volume_stats["pools"][0]['key2']) def test_extra_capabilities_fail(self): with mock.patch.object(jsonutils, 'loads') as mock_loads: mock_loads.side_effect = exception.CinderException('test') self.assertRaises(exception.CinderException, vol_manager.VolumeManager) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_connection.py0000664000175000017500000020326000000000000023536 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Volume connection test cases.""" from unittest import mock import ddt from cinder import context from cinder import db from cinder import exception from cinder.message import message_field from cinder import objects from cinder.objects import fields from cinder.tests import fake_driver from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base import cinder.volume import cinder.volume.targets import cinder.volume.targets.iscsi @ddt.ddt class DiscardFlagTestCase(base.BaseVolumeTestCase): def setUp(self): super(DiscardFlagTestCase, self).setUp() self.volume.driver = mock.MagicMock() db.volume_type_create(self.context, v2_fakes.fake_default_type_get( fake.VOLUME_TYPE2_ID)) self.vol_type = db.volume_type_get_by_name(self.context, 'vol_type_name') @ddt.data(dict(config_discard_flag=True, driver_discard_flag=None, expected_flag=True), dict(config_discard_flag=False, driver_discard_flag=None, expected_flag=None), dict(config_discard_flag=True, driver_discard_flag=True, expected_flag=True), dict(config_discard_flag=False, driver_discard_flag=True, expected_flag=True), dict(config_discard_flag=False, driver_discard_flag=False, expected_flag=False), dict(config_discard_flag=None, driver_discard_flag=True, expected_flag=True), dict(config_discard_flag=None, driver_discard_flag=False, expected_flag=False)) @ddt.unpack def test_initialize_connection_discard_flag(self, config_discard_flag, driver_discard_flag, expected_flag): self.volume.driver.create_export.return_value = None connector = {'ip': 'IP', 'initiator': 'INITIATOR'} conn_info = { 'driver_volume_type': 'iscsi', 'data': {'access_mode': 'rw', 'encrypted': False} } if driver_discard_flag is not None: conn_info['data']['discard'] = driver_discard_flag self.volume.driver.initialize_connection.return_value = conn_info def _safe_get(key): if key == 'report_discard_supported': return config_discard_flag else: return None self.volume.driver.configuration.safe_get.side_effect = _safe_get with mock.patch.object(objects, 'Volume') as mock_vol: volume = tests_utils.create_volume(self.context) volume.volume_type_id = None mock_vol.get_by_id.return_value = volume conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual(expected_flag, conn_info['data'].get('discard')) class VolumeConnectionTestCase(base.BaseVolumeTestCase): def setUp(self, *args, **kwargs): super(VolumeConnectionTestCase, self).setUp() db.volume_type_create(self.context, v2_fakes.fake_default_type_get( fake.VOLUME_TYPE2_ID)) self.vol_type = db.volume_type_get_by_name(self.context, 'vol_type_name') @mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget, '_get_target_chap_auth') @mock.patch.object(db, 'volume_admin_metadata_get') @mock.patch.object(db.sqlalchemy.api, 'volume_get') @mock.patch.object(db, 'volume_update') def test_initialize_connection_fetchqos(self, _mock_volume_update, _mock_volume_get, _mock_volume_admin_metadata_get, mock_get_target): """Make sure initialize_connection returns correct information.""" _fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}] _fake_volume = {'volume_type_id': fake.VOLUME_TYPE_ID, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.VOLUME_ID, 'volume_admin_metadata': _fake_admin_meta} fake_volume_obj = fake_volume.fake_volume_obj(self.context, **_fake_volume) _mock_volume_get.return_value = _fake_volume _mock_volume_update.return_value = _fake_volume _mock_volume_admin_metadata_get.return_value = { 'fake-key': 'fake-value'} connector = {'ip': 'IP', 'initiator': 'INITIATOR'} qos_values = {'consumer': 'front-end', 'specs': { 'key1': 'value1', 'key2': 'value2'} } with mock.patch.object(cinder.volume.volume_types, 'get_volume_type_qos_specs') as type_qos, \ mock.patch.object(cinder.volume.volume_types, 'get_volume_type_extra_specs' ) as type_extra_specs, \ mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'initialize_connection') as driver_init: type_qos.return_value = dict(qos_specs=qos_values) type_extra_specs.return_value = {} driver_init.return_value = {'data': {}} mock_get_target.return_value = None qos_specs_expected = {'key1': 'value1', 'key2': 'value2'} # initialize_connection() passes qos_specs that is designated to # be consumed by front-end or both front-end and back-end conn_info = self.volume.initialize_connection( self.context, fake_volume_obj, connector,) self.assertDictEqual(qos_specs_expected, conn_info['data']['qos_specs']) qos_values.update({'consumer': 'both'}) conn_info = self.volume.initialize_connection( self.context, fake_volume_obj, connector) self.assertDictEqual(qos_specs_expected, conn_info['data']['qos_specs']) # initialize_connection() skips qos_specs that is designated to be # consumed by back-end only qos_values.update({'consumer': 'back-end'}) type_qos.return_value = dict(qos_specs=qos_values) conn_info = self.volume.initialize_connection( self.context, fake_volume_obj, connector) self.assertIsNone(conn_info['data']['qos_specs']) @mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget, '_get_target_chap_auth') @mock.patch.object(db, 'volume_admin_metadata_get') @mock.patch.object(db.sqlalchemy.api, 'volume_get') @mock.patch.object(db, 'volume_update') def test_initialize_connection_qos_per_gb(self, _mock_volume_update, _mock_volume_get, _mock_volume_admin_metadata_get, mock_get_target): """QoS test with no minimum value.""" _fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}] _fake_volume = {'size': 3, 'volume_type_id': fake.VOLUME_TYPE_ID, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.VOLUME_ID, 'volume_admin_metadata': _fake_admin_meta} fake_volume_obj = fake_volume.fake_volume_obj(self.context, **_fake_volume) _mock_volume_get.return_value = _fake_volume _mock_volume_update.return_value = _fake_volume _mock_volume_admin_metadata_get.return_value = { 'fake-key': 'fake-value'} connector = {'ip': 'IP', 'initiator': 'INITIATOR'} qos_values = {'consumer': 'front-end', 'specs': { 'write_iops_sec_per_gb': 30, 'read_iops_sec_per_gb': 7700, 'total_iops_sec_per_gb': 300000, 'read_bytes_sec_per_gb': 10, 'write_bytes_sec_per_gb': 40, 'total_bytes_sec_per_gb': 1048576} } with mock.patch.object(cinder.volume.volume_types, 'get_volume_type_qos_specs') as type_qos, \ mock.patch.object(cinder.volume.volume_types, 'get_volume_type_extra_specs' ) as type_extra_specs, \ mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'initialize_connection') as driver_init: type_qos.return_value = dict(qos_specs=qos_values) type_extra_specs.return_value = {} driver_init.return_value = {'data': {}} mock_get_target.return_value = None qos_specs_expected = {'write_iops_sec': 90, 'read_iops_sec': 23100, 'total_iops_sec': 900000, 'read_bytes_sec': 30, 'write_bytes_sec': 120, 'total_bytes_sec': 3145728} # initialize_connection() passes qos_specs that is designated to # be consumed by front-end or both front-end and back-end conn_info = self.volume.initialize_connection( self.context, fake_volume_obj, connector,) self.assertDictEqual(qos_specs_expected, conn_info['data']['qos_specs']) qos_values.update({'consumer': 'both'}) conn_info = self.volume.initialize_connection( self.context, fake_volume_obj, connector) self.assertDictEqual(qos_specs_expected, conn_info['data']['qos_specs']) @mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget, '_get_target_chap_auth') @mock.patch.object(db, 'volume_admin_metadata_get') @mock.patch.object(db.sqlalchemy.api, 'volume_get') @mock.patch.object(db, 'volume_update') def test_initialize_connection_qos_per_gb_with_min_small( self, _mock_volume_update, _mock_volume_get, _mock_volume_admin_metadata_get, mock_get_target): """QoS test when volume size results in using minimum.""" _fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}] _fake_volume = {'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.VOLUME_ID, 'volume_admin_metadata': _fake_admin_meta} fake_volume_obj = fake_volume.fake_volume_obj(self.context, **_fake_volume) _mock_volume_get.return_value = _fake_volume _mock_volume_update.return_value = _fake_volume _mock_volume_admin_metadata_get.return_value = { 'fake-key': 'fake-value'} connector = {'ip': 'IP', 'initiator': 'INITIATOR'} qos_values = {'consumer': 'front-end', 'specs': { 'write_iops_sec_per_gb_min': 800, 'write_iops_sec_per_gb': 30, 'read_iops_sec_per_gb_min': 23100, 'read_iops_sec_per_gb': 7700, 'total_iops_sec_per_gb_min': 900000, 'total_iops_sec_per_gb': 300000, 'total_iops_sec_max': 15000000, 'read_bytes_sec_per_gb_min': 30, 'read_bytes_sec_per_gb': 10, 'write_bytes_sec_per_gb_min': 120, 'write_bytes_sec_per_gb': 40, 'total_bytes_sec_per_gb_min': 3145728, 'total_bytes_sec_per_gb': 1048576} } with mock.patch.object(cinder.volume.volume_types, 'get_volume_type_qos_specs') as type_qos, \ mock.patch.object(cinder.volume.volume_types, 'get_volume_type_extra_specs' ) as type_extra_specs, \ mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'initialize_connection') as driver_init: type_qos.return_value = dict(qos_specs=qos_values) type_extra_specs.return_value = {} driver_init.return_value = {'data': {}} mock_get_target.return_value = None qos_specs_expected = {'write_iops_sec': 800, 'read_iops_sec': 23100, 'total_iops_sec': 900000, 'read_bytes_sec': 30, 'write_bytes_sec': 120, 'total_bytes_sec': 3145728} # initialize_connection() passes qos_specs that is designated to # be consumed by front-end or both front-end and back-end conn_info = self.volume.initialize_connection( self.context, fake_volume_obj, connector,) self.assertDictEqual(qos_specs_expected, conn_info['data']['qos_specs']) qos_values.update({'consumer': 'both'}) conn_info = self.volume.initialize_connection( self.context, fake_volume_obj, connector) self.assertDictEqual(qos_specs_expected, conn_info['data']['qos_specs']) @mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget, '_get_target_chap_auth') @mock.patch.object(db, 'volume_admin_metadata_get') @mock.patch.object(db.sqlalchemy.api, 'volume_get') @mock.patch.object(db, 'volume_update') def test_initialize_connection_qos_per_gb_with_min_large( self, _mock_volume_update, _mock_volume_get, _mock_volume_admin_metadata_get, mock_get_target): """QoS test when volume size results in using per-gb values.""" _fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}] _fake_volume = {'size': 100, 'volume_type_id': fake.VOLUME_TYPE_ID, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.VOLUME_ID, 'volume_admin_metadata': _fake_admin_meta} fake_volume_obj = fake_volume.fake_volume_obj(self.context, **_fake_volume) _mock_volume_get.return_value = _fake_volume _mock_volume_update.return_value = _fake_volume _mock_volume_admin_metadata_get.return_value = { 'fake-key': 'fake-value'} connector = {'ip': 'IP', 'initiator': 'INITIATOR'} qos_values = {'consumer': 'front-end', 'specs': { 'write_iops_sec_per_gb_min': 800, 'write_iops_sec_per_gb': 30, 'read_iops_sec_per_gb_min': 23100, 'read_iops_sec_per_gb': 7700, 'total_iops_sec_per_gb_min': 900000, 'total_iops_sec_per_gb': 300000, 'total_iops_sec_max': 15000000, 'read_bytes_sec_per_gb_min': 30, 'read_bytes_sec_per_gb': 10, 'write_bytes_sec_per_gb_min': 120, 'write_bytes_sec_per_gb': 40, 'total_bytes_sec_per_gb_min': 3145728, 'total_bytes_sec_per_gb': 1048576} } with mock.patch.object(cinder.volume.volume_types, 'get_volume_type_qos_specs') as type_qos, \ mock.patch.object(cinder.volume.volume_types, 'get_volume_type_extra_specs' ) as type_extra_specs, \ mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'initialize_connection') as driver_init: type_qos.return_value = dict(qos_specs=qos_values) type_extra_specs.return_value = {} driver_init.return_value = {'data': {}} mock_get_target.return_value = None qos_specs_expected = {'write_iops_sec': 3000, 'read_iops_sec': 770000, 'total_iops_sec': 15000000, 'read_bytes_sec': 1000, 'write_bytes_sec': 4000, 'total_bytes_sec': 104857600} # initialize_connection() passes qos_specs that is designated to # be consumed by front-end or both front-end and back-end conn_info = self.volume.initialize_connection( self.context, fake_volume_obj, connector,) self.assertDictEqual(qos_specs_expected, conn_info['data']['qos_specs']) qos_values.update({'consumer': 'both'}) conn_info = self.volume.initialize_connection( self.context, fake_volume_obj, connector) self.assertDictEqual(qos_specs_expected, conn_info['data']['qos_specs']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') def test_initialize_connection_export_failure(self, _mock_create_export): """Test exception path for create_export failure.""" volume = tests_utils.create_volume( self.context, admin_metadata={'fake-key': 'fake-value'}, **self.volume_params) _mock_create_export.side_effect = exception.CinderException connector = {'ip': 'IP', 'initiator': 'INITIATOR'} self.assertRaises(exception.VolumeBackendAPIException, self.volume.initialize_connection, self.context, volume, connector) def test_initialize_connection_maintenance(self): """Test initialize connection in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.initialize_connection, self.context, volume, None) @ddt.ddt class VolumeAttachDetachTestCase(base.BaseVolumeTestCase): def setUp(self): super(VolumeAttachDetachTestCase, self).setUp() self.patch('cinder.volume.volume_utils.clear_volume', autospec=True) self.user_context = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID) db.volume_type_create(self.context, v2_fakes.fake_default_type_get( fake.VOLUME_TYPE2_ID)) self.vol_type = db.volume_type_get_by_name(self.context, 'vol_type_name') @ddt.data(False, True) def test_run_attach_detach_volume_for_instance(self, volume_object): """Make sure volume can be attached and detached from instance.""" mountpoint = "/dev/sdf" # attach volume to the instance then to detach instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.user_context, **self.volume_params) with volume.obj_as_admin(): volume.admin_metadata['readonly'] = True volume.save() volume_id = volume.id self.volume.create_volume(self.user_context, volume=volume) volume_passed = volume if volume_object else None attachment = self.volume.attach_volume(self.user_context, volume_id, instance_uuid, None, mountpoint, 'ro', volume=volume_passed) attachment2 = self.volume.attach_volume(self.user_context, volume_id, instance_uuid, None, mountpoint, 'ro', volume=volume_passed) self.assertEqual(attachment.id, attachment2.id) vol = objects.Volume.get_by_id(self.context, volume_id) self.assertEqual("in-use", vol.status) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment.attach_status) self.assertEqual(mountpoint, attachment.mountpoint) self.assertEqual(instance_uuid, attachment.instance_uuid) self.assertIsNone(attachment.attached_host) admin_metadata = vol.volume_admin_metadata self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} volume = volume if volume_object else vol conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume=volume) self.volume.detach_volume(self.context, volume_id, attachment.id, volume=volume_passed) vol = objects.Volume.get_by_id(self.context, volume_id) self.assertEqual('available', vol.status) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) @mock.patch('cinder.volume.manager.LOG', mock.Mock()) def test_initialize_connection(self): volume = mock.Mock(save=mock.Mock(side_effect=Exception)) with mock.patch.object(self.volume, 'driver') as driver_mock: self.assertRaises(exception.ExportFailure, self.volume.initialize_connection, self.context, volume, mock.Mock()) driver_mock.remove_export.assert_called_once_with(mock.ANY, volume) def test_run_attach_detach_2volumes_for_instance(self): """Make sure volume can be attached and detached from instance.""" # attach first volume to the instance mountpoint1 = "/dev/vdc" instance_uuid = '12345678-1234-5678-1234-567812345678' volume1 = tests_utils.create_volume( self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) volume1_id = volume1['id'] self.volume.create_volume(self.context, volume1) attachment = self.volume.attach_volume(self.context, volume1_id, instance_uuid, None, mountpoint1, 'ro') vol1 = db.volume_get(context.get_admin_context(), volume1_id) self.assertEqual("in-use", vol1['status']) self.assertEqual('attached', attachment['attach_status']) self.assertEqual(mountpoint1, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol1['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume1, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume1) # attach 2nd volume to the instance mountpoint2 = "/dev/vdd" volume2 = tests_utils.create_volume( self.context, admin_metadata={'readonly': 'False'}, **self.volume_params) volume2_id = volume2['id'] self.volume.create_volume(self.context, volume2) attachment2 = self.volume.attach_volume(self.context, volume2_id, instance_uuid, None, mountpoint2, 'rw') vol2 = db.volume_get(context.get_admin_context(), volume2_id) self.assertEqual("in-use", vol2['status']) self.assertEqual('attached', attachment2['attach_status']) self.assertEqual(mountpoint2, attachment2['mountpoint']) self.assertEqual(instance_uuid, attachment2['instance_uuid']) self.assertIsNone(attachment2['attached_host']) admin_metadata = vol2['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='False', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:02'} conn_info = self.volume.initialize_connection(self.context, volume2, connector) self.assertEqual('rw', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume2) # detach first volume and then 2nd volume self.volume.detach_volume(self.context, volume1_id, attachment['id']) vol1 = db.volume_get(self.context, volume1_id) self.assertEqual('available', vol1['status']) self.volume.delete_volume(self.context, volume1) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume1_id) self.volume.detach_volume(self.context, volume2_id, attachment2['id']) vol2 = db.volume_get(self.context, volume2_id) self.assertEqual('available', vol2['status']) self.volume.delete_volume(self.context, volume2) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume2_id) def test_detach_invalid_attachment_id(self): """Make sure if the attachment id isn't found we raise.""" attachment_id = "notfoundid" volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=False, **self.volume_params) self.volume.detach_volume(self.context, volume['id'], attachment_id) volume = db.volume_get(self.context, volume['id']) self.assertEqual('available', volume['status']) instance_uuid = '12345678-1234-5678-1234-567812345678' attached_host = 'fake_host' mountpoint = '/dev/fake' tests_utils.attach_volume(self.context, volume['id'], instance_uuid, attached_host, mountpoint) self.volume.detach_volume(self.context, volume['id'], attachment_id) volume = db.volume_get(self.context, volume['id']) self.assertEqual('in-use', volume['status']) def test_detach_no_attachments(self): self.volume_params['status'] = 'detaching' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=False, **self.volume_params) self.volume.detach_volume(self.context, volume['id']) volume = db.volume_get(self.context, volume['id']) self.assertEqual('available', volume['status']) def test_run_attach_detach_volume_for_instance_no_attachment_id(self): """Make sure volume can be attached and detached from instance.""" mountpoint = "/dev/sdf" # attach volume to the instance then to detach instance_uuid = '12345678-1234-5678-1234-567812345678' instance_uuid_2 = '12345678-4321-8765-4321-567812345678' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=True, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) attachment2 = self.volume.attach_volume(self.context, volume_id, instance_uuid_2, None, mountpoint, 'ro') connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume) self.assertRaises(exception.InvalidVolume, self.volume.detach_volume, self.context, volume_id) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('in-use', vol['status']) self.volume.detach_volume(self.context, volume_id, attachment2['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(self.context, volume_id) self.assertEqual('in-use', vol['status']) self.volume.detach_volume(self.context, volume_id) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_detach_multiattach_volume_for_instances(self): """Make sure volume can be attached to multiple instances.""" mountpoint = "/dev/sdf" # attach volume to the instance then to detach instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=True, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) instance2_uuid = '12345678-1234-5678-1234-567812345000' mountpoint2 = "/dev/sdx" attachment2 = self.volume.attach_volume(self.context, volume_id, instance2_uuid, None, mountpoint2, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment2['attach_status']) self.assertEqual(mountpoint2, attachment2['mountpoint']) self.assertEqual(instance2_uuid, attachment2['instance_uuid']) self.assertIsNone(attachment2['attached_host']) self.assertNotEqual(attachment, attachment2) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('in-use', vol['status']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume) self.volume.detach_volume(self.context, volume_id, attachment2['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_twice_multiattach_volume_for_instances(self): """Make sure volume can be attached to multiple instances.""" mountpoint = "/dev/sdf" # attach volume to the instance then to detach instance_uuid = '12345678-1234-5678-1234-567812345699' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=True, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" attachment2 = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint2, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual('attached', attachment2['attach_status']) self.assertEqual(mountpoint, attachment2['mountpoint']) self.assertEqual(instance_uuid, attachment2['instance_uuid']) self.assertIsNone(attachment2['attached_host']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume) def test_attach_detach_not_multiattach_volume_for_instances(self): """Make sure volume can't be attached to more than one instance.""" mountpoint = "/dev/sdf" # attach volume to the instance then to detach instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, multiattach=False, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertFalse(vol['multiattach']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) instance2_uuid = '12345678-1234-5678-1234-567812345000' mountpoint2 = "/dev/sdx" self.assertRaises(exception.InvalidVolume, self.volume.attach_volume, self.context, volume_id, instance2_uuid, None, mountpoint2, 'ro') self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_detach_volume_for_host(self): """Make sure volume can be attached and detached from host.""" mountpoint = "/dev/sdf" volume = tests_utils.create_volume( self.context, admin_metadata={'readonly': 'False'}, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host', attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='False', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual("available", vol['status']) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_detach_multiattach_volume_for_hosts(self): """Make sure volume can be attached and detached from hosts.""" mountpoint = "/dev/sdf" volume = tests_utils.create_volume( self.context, admin_metadata={'readonly': 'False'}, multiattach=True, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host', attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='False', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" attachment2 = self.volume.attach_volume(self.context, volume_id, None, 'fake_host2', mountpoint2, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment2['attach_status']) self.assertEqual(mountpoint2, attachment2['mountpoint']) self.assertIsNone(attachment2['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host2', attachment2['attached_host']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual("in-use", vol['status']) self.volume.detach_volume(self.context, volume_id, attachment2['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual("available", vol['status']) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_twice_multiattach_volume_for_hosts(self): """Make sure volume can be attached and detached from hosts.""" mountpoint = "/dev/sdf" volume = tests_utils.create_volume( self.context, admin_metadata={'readonly': 'False'}, multiattach=True, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertTrue(vol['multiattach']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host', attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='False', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" attachment2 = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint2, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertEqual('attached', attachment2['attach_status']) self.assertEqual(mountpoint, attachment2['mountpoint']) self.assertIsNone(attachment2['instance_uuid']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume) def test_run_attach_detach_not_multiattach_volume_for_hosts(self): """Make sure volume can't be attached to more than one host.""" mountpoint = "/dev/sdf" volume = tests_utils.create_volume( self.context, admin_metadata={'readonly': 'False'}, multiattach=False, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertFalse(vol['multiattach']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host', attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='False', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" self.assertRaises(exception.InvalidVolume, self.volume.attach_volume, self.context, volume_id, None, 'fake_host2', mountpoint2, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual('in-use', vol['status']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, attachment['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) # sanitized, conforms to RFC-952 and RFC-1123 specs. self.assertEqual('fake-host', attachment['attached_host']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_attach_detach_volume_with_attach_mode(self): instance_uuid = '12345678-1234-5678-1234-567812345678' mountpoint = "/dev/sdf" volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] db.volume_update(self.context, volume_id, {'status': 'available', }) self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) attachment = vol['volume_attachment'][0] self.assertEqual('in-use', vol['status']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, vol['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertEqual(instance_uuid, attachment['instance_uuid']) self.assertIsNone(attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) attachment = vol['volume_attachment'] self.assertEqual('available', vol['status']) self.assertEqual(fields.VolumeAttachStatus.DETACHED, vol['attach_status']) self.assertEqual([], attachment) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('True', admin_metadata[0]['value']) self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'ro') vol = db.volume_get(context.get_admin_context(), volume_id) attachment = vol['volume_attachment'][0] self.assertEqual('in-use', vol['status']) self.assertEqual(fields.VolumeAttachStatus.ATTACHED, vol['attach_status']) self.assertEqual(mountpoint, attachment['mountpoint']) self.assertIsNone(attachment['instance_uuid']) self.assertEqual('fake-host', attachment['attached_host']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='ro') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) attachment = vol['volume_attachment'] self.assertEqual('available', vol['status']) self.assertEqual(fields.VolumeAttachStatus.DETACHED, vol['attach_status']) self.assertEqual([], attachment) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('True', admin_metadata[0]['value']) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self): # Not allow using 'read-write' mode attach readonly volume instance_uuid = '12345678-1234-5678-1234-567812345678' mountpoint = "/dev/sdf" volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) self.assertRaises(exception.InvalidVolumeAttachMode, self.volume.attach_volume, self.context, volume_id, instance_uuid, None, mountpoint, 'rw') # Assert a user message was created self.volume.message_api.create.assert_called_once_with( self.context, message_field.Action.ATTACH_VOLUME, resource_uuid=volume['id'], exception=mock.ANY) attachment = objects.VolumeAttachmentList.get_all_by_volume_id( context.get_admin_context(), volume_id)[0] self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING, attachment.attach_status) vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(fields.VolumeAttachStatus.DETACHED, vol['attach_status']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) db.volume_update(self.context, volume_id, {'status': 'available'}) self.assertRaises(exception.InvalidVolumeAttachMode, self.volume.attach_volume, self.context, volume_id, None, 'fake_host', mountpoint, 'rw') attachment = objects.VolumeAttachmentList.get_all_by_volume_id( context.get_admin_context(), volume_id)[0] self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING, attachment.attach_status) vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(fields.VolumeAttachStatus.DETACHED, vol['attach_status']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(2, len(admin_metadata)) expected = dict(readonly='True', attached_mode='rw') ret = {} for item in admin_metadata: ret.update({item['key']: item['value']}) self.assertDictEqual(expected, ret) def test_run_api_attach_detach_volume_with_wrong_attach_mode(self): # Not allow using 'read-write' mode attach readonly volume instance_uuid = '12345678-1234-5678-1234-567812345678' mountpoint = "/dev/sdf" volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolumeAttachMode, volume_api.attach, self.context, volume, instance_uuid, None, mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(fields.VolumeAttachStatus.DETACHED, vol['attach_status']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('True', admin_metadata[0]['value']) db.volume_update(self.context, volume_id, {'status': 'available'}) self.assertRaises(exception.InvalidVolumeAttachMode, volume_api.attach, self.context, volume, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(fields.VolumeAttachStatus.DETACHED, vol['attach_status']) admin_metadata = vol['volume_admin_metadata'] self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('True', admin_metadata[0]['value']) def test_detach_volume_while_uploading_to_image_is_in_progress(self): # If instance is booted from volume with 'Terminate on Delete' flag # set, and when we delete instance then it tries to delete volume # even it is in 'uploading' state. # It is happening because detach call is setting volume status to # 'available'. mountpoint = "/dev/sdf" # Attach volume to the instance instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') # Change volume status to 'uploading' db.volume_update(self.context, volume_id, {'status': 'uploading'}) # Call detach api self.volume.detach_volume(self.context, volume_id) vol = db.volume_get(self.context, volume_id) # Check that volume status is 'uploading' self.assertEqual("uploading", vol['status']) self.assertEqual(fields.VolumeAttachStatus.DETACHED, vol['attach_status']) def test_volume_attach_in_maintenance(self): """Test attach the volume in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' self.assertRaises(exception.InvalidVolume, self.volume_api.attach, self.context, volume, None, None, None, None) @mock.patch('cinder.volume.api.API.attachment_deletion_allowed') def test_volume_detach_in_maintenance(self, mock_attachment_deletion): """Test detach the volume in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.detach, self.context, volume, None) mock_attachment_deletion.assert_called_once_with(self.context, None, volume) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_driver.py0000664000175000017500000007753500000000000022710 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Volume Code.""" import shutil import tempfile from unittest import mock import ddt import os_brick from oslo_config import cfg from oslo_utils import importutils from cinder.brick.local_dev import lvm as brick_lvm from cinder import context from cinder import db from cinder import exception from cinder.image import image_utils from cinder import objects from cinder.objects import fields import cinder.policy from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import test from cinder.tests.unit import utils as tests_utils import cinder.volume from cinder.volume import configuration as conf from cinder.volume import driver from cinder.volume import manager from cinder.volume import rpcapi as volume_rpcapi import cinder.volume.targets.tgt from cinder.volume import volume_utils CONF = cfg.CONF def my_safe_get(self, value): if value == 'replication_device': return ['replication'] return None @ddt.ddt class DriverTestCase(test.TestCase): @staticmethod def _get_driver(relicated, version): class NonReplicatedDriver(driver.VolumeDriver): pass class V21Driver(driver.VolumeDriver): def failover_host(*args, **kwargs): pass class AADriver(V21Driver): def failover_completed(*args, **kwargs): pass if not relicated: return NonReplicatedDriver if version == 'v2.1': return V21Driver return AADriver @ddt.data('v2.1', 'a/a', 'newfeature') def test_supports_replication_feature_none(self, rep_version): my_driver = self._get_driver(False, None) self.assertFalse(my_driver.supports_replication_feature(rep_version)) @ddt.data('v2.1', 'a/a', 'newfeature') def test_supports_replication_feature_only_21(self, rep_version): version = 'v2.1' my_driver = self._get_driver(True, version) self.assertEqual(rep_version == version, my_driver.supports_replication_feature(rep_version)) @ddt.data('v2.1', 'a/a', 'newfeature') def test_supports_replication_feature_aa(self, rep_version): my_driver = self._get_driver(True, 'a/a') self.assertEqual(rep_version in ('v2.1', 'a/a'), my_driver.supports_replication_feature(rep_version)) def test_init_non_replicated(self): config = manager.config.Configuration(manager.volume_manager_opts, config_group='volume') # No exception raised self._get_driver(False, None)(configuration=config) @ddt.data('v2.1', 'a/a') @mock.patch('cinder.volume.configuration.Configuration.safe_get', my_safe_get) def test_init_replicated_non_clustered(self, version): def append_config_values(self, volume_opts): pass config = manager.config.Configuration(manager.volume_manager_opts, config_group='volume') # No exception raised self._get_driver(True, version)(configuration=config) @mock.patch('cinder.volume.configuration.Configuration.safe_get', my_safe_get) def test_init_replicated_clustered_not_supported(self): config = manager.config.Configuration(manager.volume_manager_opts, config_group='volume') # Raises exception because we are trying to run a replicated service # in clustered mode but the driver doesn't support it. self.assertRaises(exception.Invalid, self._get_driver(True, 'v2.1'), configuration=config, cluster_name='mycluster') @mock.patch('cinder.volume.configuration.Configuration.safe_get', my_safe_get) def test_init_replicated_clustered_supported(self): config = manager.config.Configuration(manager.volume_manager_opts, config_group='volume') # No exception raised self._get_driver(True, 'a/a')(configuration=config, cluster_name='mycluster') def test_failover(self): """Test default failover behavior of calling failover_host.""" my_driver = self._get_driver(True, 'a/a')() with mock.patch.object(my_driver, 'failover_host') as failover_mock: res = my_driver.failover(mock.sentinel.context, mock.sentinel.volumes, secondary_id=mock.sentinel.secondary_id, groups=[]) self.assertEqual(failover_mock.return_value, res) failover_mock.assert_called_once_with(mock.sentinel.context, mock.sentinel.volumes, mock.sentinel.secondary_id, []) class BaseDriverTestCase(test.TestCase): """Base Test class for Drivers.""" driver_name = "cinder.volume.driver.FakeBaseDriver" def setUp(self): super(BaseDriverTestCase, self).setUp() vol_tmpdir = tempfile.mkdtemp() self.override_config('volume_driver', self.driver_name, conf.SHARED_CONF_GROUP) self.override_config('volumes_dir', vol_tmpdir, conf.SHARED_CONF_GROUP) self.volume = importutils.import_object(CONF.volume_manager) self.mock_object(self.volume, '_driver_shares_targets', return_value=False) self.context = context.get_admin_context() self.output = "" self.configuration = conf.Configuration(None) self.mock_object(brick_lvm.LVM, '_vg_exists', lambda x: True) def _fake_execute(_command, *_args, **_kwargs): """Fake _execute.""" return self.output, None exec_patcher = mock.patch.object(self.volume.driver, '_execute', _fake_execute) exec_patcher.start() self.addCleanup(exec_patcher.stop) self.volume.driver.set_initialized() self.addCleanup(self._cleanup) def _cleanup(self): try: shutil.rmtree(CONF.volumes_dir) except OSError: pass def _attach_volume(self): """Attach volumes to an instance.""" return [] @ddt.ddt class GenericVolumeDriverTestCase(BaseDriverTestCase): """Test case for VolumeDriver.""" driver_name = "cinder.tests.fake_driver.FakeLoggingVolumeDriver" def test_create_temp_cloned_volume(self): with mock.patch.object( self.volume.driver, 'create_cloned_volume') as mock_create_cloned_volume: model_update = {'provider_location': 'dummy'} mock_create_cloned_volume.return_value = model_update vol = tests_utils.create_volume(self.context, status='backing-up') cloned_vol = self.volume.driver._create_temp_cloned_volume( self.context, vol) self.assertEqual('dummy', cloned_vol.provider_location) self.assertEqual('available', cloned_vol.status) mock_create_cloned_volume.return_value = None vol = tests_utils.create_volume(self.context, status='backing-up') cloned_vol = self.volume.driver._create_temp_cloned_volume( self.context, vol, status=fields.VolumeStatus.BACKING_UP) self.assertEqual(fields.VolumeStatus.BACKING_UP, cloned_vol.status) def test_get_backup_device_available(self): vol = tests_utils.create_volume(self.context) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID backup_obj = tests_utils.create_backup(self.context, vol['id']) (backup_device, is_snapshot) = self.volume.driver.get_backup_device( self.context, backup_obj) volume = objects.Volume.get_by_id(self.context, vol.id) self.assertNotIn('temporary', backup_device.admin_metadata.keys()) self.assertEqual(volume, backup_device) self.assertFalse(is_snapshot) backup_obj.refresh() self.assertIsNone(backup_obj.temp_volume_id) def test_get_backup_device_in_use(self): vol = tests_utils.create_volume(self.context, status='backing-up', previous_status='in-use') admin_meta = {'temporary': 'True'} temp_vol = tests_utils.create_volume( self.context, status=fields.VolumeStatus.BACKING_UP, admin_metadata=admin_meta) self.context.user_id = fake.USER_ID self.context.project_id = fake.PROJECT_ID backup_obj = tests_utils.create_backup(self.context, vol['id']) with mock.patch.object( self.volume.driver, '_create_temp_cloned_volume') as mock_create_temp: mock_create_temp.return_value = temp_vol (backup_device, is_snapshot) = ( self.volume.driver.get_backup_device(self.context, backup_obj)) self.assertEqual(temp_vol, backup_device) self.assertFalse(is_snapshot) backup_obj.refresh() self.assertEqual(temp_vol.id, backup_obj.temp_volume_id) mock_create_temp.assert_called_once_with( self.context, vol, status=fields.VolumeStatus.BACKING_UP) def test_create_temp_volume_from_snapshot(self): volume_dict = {'id': fake.SNAPSHOT_ID, 'host': 'fakehost', 'cluster_name': 'fakecluster', 'availability_zone': 'fakezone', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID} vol = fake_volume.fake_volume_obj(self.context, **volume_dict) snapshot = fake_snapshot.fake_snapshot_obj(self.context) with mock.patch.object( self.volume.driver, 'create_volume_from_snapshot'): temp_vol = self.volume.driver._create_temp_volume_from_snapshot( self.context, vol, snapshot) self.assertEqual(fields.VolumeAttachStatus.DETACHED, temp_vol.attach_status) self.assertEqual('fakezone', temp_vol.availability_zone) self.assertEqual('fakecluster', temp_vol.cluster_name) self.assertFalse(temp_vol.use_quota) def test__create_temp_snapshot(self): volume_dict = {'id': fake.SNAPSHOT_ID, 'host': 'fakehost', 'cluster_name': 'fakecluster', 'availability_zone': 'fakezone', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID} volume = fake_volume.fake_volume_obj(self.context, **volume_dict) # We want to confirm that the driver properly updates fields with the # value returned by the create_snapshot method driver_updates = {'provider_location': 'driver_provider_location'} with mock.patch.object(self.volume.driver, 'create_snapshot', return_value=driver_updates) as create_mock: res = self.volume.driver._create_temp_snapshot(self.context, volume) create_mock.assert_called_once_with(res) expected = {'volume_id': volume.id, 'progress': '100%', 'status': fields.SnapshotStatus.AVAILABLE, 'use_quota': False, # Temporary snapshots don't use quota 'project_id': self.context.project_id, 'user_id': self.context.user_id, 'volume_size': volume.size, 'volume_type_id': volume.volume_type_id, 'provider_location': 'driver_provider_location'} for key, value in expected.items(): self.assertEqual(value, res[key]) @mock.patch.object(volume_utils, 'brick_get_connector_properties') @mock.patch.object(cinder.volume.manager.VolumeManager, '_attach_volume') @mock.patch.object(cinder.volume.manager.VolumeManager, '_detach_volume') @mock.patch.object(volume_utils, 'copy_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities') @mock.patch.object(cinder.volume.volume_types, 'volume_types_encryption_changed') @ddt.data(False, True) def test_copy_volume_data_mgr(self, encryption_changed, mock_encryption_changed, mock_get_capabilities, mock_copy, mock_detach, mock_attach, mock_get_connector): """Test function of _copy_volume_data.""" src_vol = tests_utils.create_volume(self.context, size=1, host=CONF.host) dest_vol = tests_utils.create_volume(self.context, size=1, host=CONF.host) mock_get_connector.return_value = {} mock_encryption_changed.return_value = encryption_changed self.volume.driver._throttle = mock.MagicMock() attach_expected = [ mock.call(self.context, dest_vol, {}, remote=False, attach_encryptor=encryption_changed), mock.call(self.context, src_vol, {}, remote=False, attach_encryptor=encryption_changed)] detach_expected = [ mock.call(self.context, {'device': {'path': 'bar'}}, dest_vol, {}, force=True, remote=False, attach_encryptor=encryption_changed), mock.call(self.context, {'device': {'path': 'foo'}}, src_vol, {}, force=True, remote=False, attach_encryptor=encryption_changed)] attach_volume_returns = [ {'device': {'path': 'bar'}}, {'device': {'path': 'foo'}} ] # Test case for sparse_copy_volume = False mock_attach.side_effect = attach_volume_returns mock_get_capabilities.return_value = {} self.volume._copy_volume_data(self.context, src_vol, dest_vol) self.assertEqual(attach_expected, mock_attach.mock_calls) mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=False) self.assertEqual(detach_expected, mock_detach.mock_calls) # Test case for sparse_copy_volume = True mock_attach.reset_mock() mock_detach.reset_mock() mock_attach.side_effect = attach_volume_returns mock_get_capabilities.return_value = {'sparse_copy_volume': True} self.volume._copy_volume_data(self.context, src_vol, dest_vol) self.assertEqual(attach_expected, mock_attach.mock_calls) mock_copy.assert_called_with('foo', 'bar', 1024, '1M', sparse=True) self.assertEqual(detach_expected, mock_detach.mock_calls) # cleanup resource db.volume_destroy(self.context, src_vol['id']) db.volume_destroy(self.context, dest_vol['id']) @ddt.data(False, True) @mock.patch(driver_name + '.initialize_connection') @mock.patch(driver_name + '.create_export', return_value=None) @mock.patch(driver_name + '._connect_device') def test_attach_volume_enforce_mpath(self, enforce_mpath, connect_mock, export_mock, initialize_mock): properties = {'host': 'myhost', 'ip': '192.168.1.43', 'initiator': u'iqn.1994-05.com.redhat:d9be887375', 'multipath': False, 'os_type': 'linux2', 'platform': 'x86_64', 'enforce_multipath': enforce_mpath} data = {'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, 'discard': False, 'encrypted': False, 'enforce_multipath': enforce_mpath} passed_conn = {'driver_volume_type': 'iscsi', 'data': data.copy()} initialize_mock.return_value = passed_conn expected_conn = {'driver_volume_type': 'iscsi', 'data': data.copy()} volume = tests_utils.create_volume( self.context, status='available', size=2) attach_info, vol = self.volume.driver._attach_volume(self.context, volume, properties) export_mock.assert_called_once_with(self.context, volume, properties) initialize_mock.assert_called_once_with(volume, properties) connect_mock.assert_called_once_with(expected_conn) self.assertEqual(connect_mock.return_value, attach_info) self.assertEqual(volume, vol) @mock.patch(driver_name + '.initialize_connection') @mock.patch(driver_name + '.create_export', return_value=None) @mock.patch(driver_name + '._connect_device') def test_attach_volume_encrypted(self, connect_mock, export_mock, initialize_mock): properties = {'host': 'myhost', 'ip': '192.168.1.43', 'initiator': u'iqn.1994-05.com.redhat:d9be887375', 'multipath': False, 'os_type': 'linux2', 'platform': 'x86_64'} data = {'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, 'discard': False, 'enforce_multipath': False} passed_conn = {'driver_volume_type': 'iscsi', 'data': data.copy()} initialize_mock.return_value = passed_conn # _attach_volume adds the encrypted value based on the volume expected_conn = {'driver_volume_type': 'iscsi', 'data': data.copy()} expected_conn['data']['encrypted'] = True volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) attach_info, vol = self.volume.driver._attach_volume(self.context, volume, properties) export_mock.assert_called_once_with(self.context, volume, properties) initialize_mock.assert_called_once_with(volume, properties) connect_mock.assert_called_once_with(expected_conn) self.assertEqual(connect_mock.return_value, attach_info) self.assertEqual(volume, vol) @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(image_utils, 'fetch_to_raw') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') @mock.patch.object(cinder.volume.volume_utils, 'brick_attach_volume_encryptor') @mock.patch.object(cinder.volume.volume_utils, 'brick_detach_volume_encryptor') def test_copy_image_to_encrypted_volume(self, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() local_path = 'dev/sda' attach_info = {'device': {'path': local_path}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] self.volume.driver.copy_image_to_encrypted_volume( self.context, volume, image_service, fake.IMAGE_ID) encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} mock_attach_volume.assert_called_once_with( self.context, volume, properties) mock_attach_encryptor.assert_called_once_with( self.context, attach_info, encryption) mock_fetch_to_raw.assert_called_once_with( self.context, image_service, fake.IMAGE_ID, local_path, '1M', size=2, disable_sparse=False) mock_detach_encryptor.assert_called_once_with( attach_info, encryption) mock_detach_volume.assert_called_once_with( self.context, attach_info, volume, properties, force=True) @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(image_utils, 'fetch_to_raw') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') @mock.patch.object(cinder.volume.volume_utils, 'brick_attach_volume_encryptor') @mock.patch.object(cinder.volume.volume_utils, 'brick_detach_volume_encryptor') def test_copy_image_to_encrypted_volume_failed_attach_encryptor( self, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() attach_info = {'device': {'path': 'dev/sda'}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] raised_exception = os_brick.exception.VolumeEncryptionNotSupported( volume_id = "123", volume_type = "abc") mock_attach_encryptor.side_effect = raised_exception self.assertRaises(os_brick.exception.VolumeEncryptionNotSupported, self.volume.driver.copy_image_to_encrypted_volume, self.context, volume, image_service, fake.IMAGE_ID) encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} mock_attach_volume.assert_called_once_with( self.context, volume, properties) mock_attach_encryptor.assert_called_once_with( self.context, attach_info, encryption) self.assertFalse(mock_fetch_to_raw.called) self.assertFalse(mock_detach_encryptor.called) mock_detach_volume.assert_called_once_with( self.context, attach_info, volume, properties, force=True) @mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') @mock.patch.object(image_utils, 'fetch_to_raw') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_attach_volume') @mock.patch.object(cinder.volume.driver.VolumeDriver, '_detach_volume') @mock.patch.object(cinder.volume.volume_utils, 'brick_attach_volume_encryptor') @mock.patch.object(cinder.volume.volume_utils, 'brick_detach_volume_encryptor') @ddt.data(exception.ImageUnacceptable( reason='fake', image_id=fake.IMAGE_ID), exception.ImageTooBig( reason='fake image size exceeded', image_id=fake.IMAGE_ID)) def test_copy_image_to_encrypted_volume_failed_fetch( self, excep, mock_detach_encryptor, mock_attach_encryptor, mock_detach_volume, mock_attach_volume, mock_fetch_to_raw, mock_get_connector_properties): properties = {} volume = tests_utils.create_volume( self.context, status='available', size=2, encryption_key_id=fake.ENCRYPTION_KEY_ID) volume_id = volume['id'] volume = db.volume_get(context.get_admin_context(), volume_id) image_service = fake_image.FakeImageService() local_path = 'dev/sda' attach_info = {'device': {'path': local_path}, 'conn': {'driver_volume_type': 'iscsi', 'data': {}, }} mock_get_connector_properties.return_value = properties mock_attach_volume.return_value = [attach_info, volume] mock_fetch_to_raw.side_effect = excep encryption = {'encryption_key_id': fake.ENCRYPTION_KEY_ID} self.assertRaises(type(excep), self.volume.driver.copy_image_to_encrypted_volume, self.context, volume, image_service, fake.IMAGE_ID) mock_attach_volume.assert_called_once_with( self.context, volume, properties) mock_attach_encryptor.assert_called_once_with( self.context, attach_info, encryption) mock_fetch_to_raw.assert_called_once_with( self.context, image_service, fake.IMAGE_ID, local_path, '1M', size=2, disable_sparse=False) mock_detach_encryptor.assert_called_once_with( attach_info, encryption) mock_detach_volume.assert_called_once_with( self.context, attach_info, volume, properties, force=True) @mock.patch('cinder.volume.driver.brick_exception') @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'terminate_connection', side_effect=Exception) @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'remove_export', side_effect=Exception) def test_detach_volume_force(self, remove_mock, terminate_mock, exc_mock): """Test force parameter on _detach_volume. On the driver if we receive the force parameter we will do everything even with Exceptions on disconnect, terminate, and remove export. """ connector = mock.Mock() connector.disconnect_volume.side_effect = Exception # TODO(geguileo): Remove this ExceptionChainer simulation once we # release OS-Brick version with it and bump min version. exc = exc_mock.ExceptionChainer.return_value exc.context.return_value.__enter__.return_value = exc exc.context.return_value.__exit__.return_value = True volume = {'id': fake.VOLUME_ID} attach_info = {'device': {}, 'connector': connector, 'conn': {'data': {}, }} # TODO(geguileo): Change TypeError to ExceptionChainer once we release # OS-Brick version with it and bump min version. self.assertRaises(TypeError, self.volume.driver._detach_volume, self.context, attach_info, volume, {}, force=True) self.assertTrue(connector.disconnect_volume.called) self.assertTrue(remove_mock.called) self.assertTrue(terminate_mock.called) self.assertEqual(3, exc.context.call_count) @ddt.data({'cfg_value': '10', 'valid': True}, {'cfg_value': 'auto', 'valid': True}, {'cfg_value': '1', 'valid': True}, {'cfg_value': '1.2', 'valid': True}, {'cfg_value': '100', 'valid': True}, {'cfg_value': '20.15', 'valid': True}, {'cfg_value': 'True', 'valid': False}, {'cfg_value': 'False', 'valid': False}, {'cfg_value': '10.0.0', 'valid': False}, {'cfg_value': '0.00', 'valid': True}, {'cfg_value': 'anything', 'valid': False},) @ddt.unpack def test_auto_max_subscription_ratio_options(self, cfg_value, valid): # This tests the max_over_subscription_ratio option as it is now # checked by a regex def _set_conf(config, value): config.set_override('max_over_subscription_ratio', value) config = conf.Configuration(None) config.append_config_values(driver.volume_opts) if valid: _set_conf(config, cfg_value) self.assertEqual(cfg_value, config.safe_get( 'max_over_subscription_ratio')) else: self.assertRaises(ValueError, _set_conf, config, cfg_value) class FibreChannelTestCase(BaseDriverTestCase): """Test Case for FibreChannelDriver.""" driver_name = "cinder.volume.driver.FibreChannelDriver" def test_initialize_connection(self): self.assertRaises(NotImplementedError, self.volume.driver.initialize_connection, {}, {}) def test_validate_connector(self): """validate_connector() successful use case. validate_connector() does not throw an exception when wwpns and wwnns are both set and both are not empty. """ connector = {'wwpns': ["not empty"], 'wwnns': ["not empty"]} self.volume.driver.validate_connector(connector) def test_validate_connector_no_wwpns(self): """validate_connector() throws exception when it has no wwpns.""" connector = {'wwnns': ["not empty"]} self.assertRaises(exception.InvalidConnectorException, self.volume.driver.validate_connector, connector) def test_validate_connector_empty_wwpns(self): """validate_connector() throws exception when it has empty wwpns.""" connector = {'wwpns': [], 'wwnns': ["not empty"]} self.assertRaises(exception.InvalidConnectorException, self.volume.driver.validate_connector, connector) def test_validate_connector_no_wwnns(self): """validate_connector() throws exception when it has no wwnns.""" connector = {'wwpns': ["not empty"]} self.assertRaises(exception.InvalidConnectorException, self.volume.driver.validate_connector, connector) def test_validate_connector_empty_wwnns(self): """validate_connector() throws exception when it has empty wwnns.""" connector = {'wwnns': [], 'wwpns': ["not empty"]} self.assertRaises(exception.InvalidConnectorException, self.volume.driver.validate_connector, connector) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_image.py0000664000175000017500000011036200000000000022461 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume and images.""" import datetime import os import tempfile from unittest import mock from oslo_utils import imageutils from oslo_utils import units from cinder import db from cinder import exception from cinder.message import message_field from cinder import objects from cinder.objects import fields from cinder import quota from cinder.tests import fake_driver from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base import cinder.volume from cinder.volume import manager as vol_manager QUOTAS = quota.QUOTAS NON_EXISTENT_IMAGE_ID = '003f540f-ec6b-4293-a3f9-7c68646b0f5c' class FakeImageService(object): def __init__(self, image_service=None): pass def show(self, context, image_id): return {'size': 2 * units.Gi, 'disk_format': 'raw', 'container_format': 'bare', 'status': 'active'} class CopyVolumeToImageTestCase(base.BaseVolumeTestCase): def fake_local_path(self, volume): return self.dst_path def setUp(self): super(CopyVolumeToImageTestCase, self).setUp() self.dst_fd, self.dst_path = tempfile.mkstemp() self.addCleanup(os.unlink, self.dst_path) os.close(self.dst_fd) self.mock_object(self.volume.driver, 'local_path', self.fake_local_path) self.mock_cache = mock.MagicMock() self.image_id = '70a599e0-31e7-49b7-b260-868f441e862b' self.image_meta = { 'id': self.image_id, 'container_format': 'bare', 'disk_format': 'raw' } self.volume_id = fake.VOLUME_ID self.addCleanup(db.volume_destroy, self.context, self.volume_id) self.volume_attrs = { 'id': self.volume_id, 'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'display_description': 'Test Desc', 'size': 20, 'status': 'uploading', 'host': 'dummy', 'volume_type_id': fake.VOLUME_TYPE_ID } self.mock_object(db.sqlalchemy.api, 'volume_type_get', v2_fakes.fake_volume_type_get) def test_copy_volume_to_image_status_available(self): # creating volume testdata self.volume_attrs['instance_uuid'] = None volume_type_id = db.volume_type_create( self.context, {'name': 'test', 'extra_specs': { 'image_service:store_id': 'fake_store' }}).get('id') self.volume_attrs['volume_type_id'] = volume_type_id db.volume_create(self.context, self.volume_attrs) # start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) def test_copy_volume_to_image_with_conversion_disabled(self): self.flags(image_conversion_disable=True) self.volume_attrs['instance_uuid'] = None volume_type_id = db.volume_type_create( self.context, {'name': 'test', 'extra_specs': { 'image_service:store_id': 'fake_store' }}).get('id') self.volume_attrs['volume_type_id'] = volume_type_id db.volume_create(self.context, self.volume_attrs) image_meta = { 'id': self.image_id, 'container_format': 'ova', 'disk_format': 'vhdx' } self.assertRaises(exception.ImageConversionNotAllowed, self.volume.copy_volume_to_image, self.context, self.volume_id, image_meta) def test_copy_volume_to_image_over_image_quota(self): # creating volume testdata self.volume_attrs['instance_uuid'] = None volume = db.volume_create(self.context, self.volume_attrs) with mock.patch.object(self.volume.driver, 'copy_volume_to_image') as driver_copy_mock: driver_copy_mock.side_effect = exception.ImageLimitExceeded # test with image not in queued state self.assertRaises(exception.ImageLimitExceeded, self.volume.copy_volume_to_image, self.context, self.volume_id, self.image_meta) # Assert a user message was created self.volume.message_api.create.assert_called_once_with( self.context, message_field.Action.COPY_VOLUME_TO_IMAGE, resource_uuid=volume['id'], exception=mock.ANY, detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME) def test_copy_volume_to_image_instance_deleted(self): # During uploading volume to image if instance is deleted, # volume should be in available status. self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379' # Creating volume testdata self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \ '45b1161abb02' volume_type_id = db.volume_type_create( self.context, {'name': 'test', 'extra_specs': { 'image_service:store_id': 'fake_store' }}).get('id') self.volume_attrs['volume_type_id'] = volume_type_id db.volume_create(self.context, self.volume_attrs) method = 'volume_update_status_based_on_attachment' with mock.patch.object(db, method, wraps=getattr(db, method)) as mock_update: # Start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) # Check 'volume_update_status_after_copy_volume_to_image' # is called 1 time self.assertEqual(1, mock_update.call_count) # Check volume status has changed to available because # instance is deleted volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) def test_copy_volume_to_image_status_use(self): self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379' # creating volume testdata volume_type_id = db.volume_type_create( self.context, {'name': 'test', 'extra_specs': { 'image_service:store_id': 'fake_store' }}).get('id') self.volume_attrs['volume_type_id'] = volume_type_id db.volume_create(self.context, self.volume_attrs) # start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) def test_copy_volume_to_image_exception(self): self.image_meta['id'] = NON_EXISTENT_IMAGE_ID # creating volume testdata volume_type_id = db.volume_type_create( self.context, {'name': 'test', 'extra_specs': { 'image_service:store_id': 'fake_store' }}).get('id') self.volume_attrs['volume_type_id'] = volume_type_id self.volume_attrs['status'] = 'in-use' db.volume_create(self.context, self.volume_attrs) # start test self.assertRaises(exception.ImageNotFound, self.volume.copy_volume_to_image, self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) def test_copy_volume_to_image_driver_not_initialized(self): # creating volume testdata db.volume_create(self.context, self.volume_attrs) # set initialized to False self.volume.driver._initialized = False # start test self.assertRaises(exception.DriverNotInitialized, self.volume.copy_volume_to_image, self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume.status) def test_copy_volume_to_image_driver_exception(self): self.image_meta['id'] = self.image_id image_service = fake_image.FakeImageService() # create new image in queued state queued_image_id = 'd5133f15-f753-41bd-920a-06b8c49275d9' queued_image_meta = image_service.show(self.context, self.image_id) queued_image_meta['id'] = queued_image_id queued_image_meta['status'] = 'queued' image_service.create(self.context, queued_image_meta) # create new image in saving state saving_image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2' saving_image_meta = image_service.show(self.context, self.image_id) saving_image_meta['id'] = saving_image_id saving_image_meta['status'] = 'saving' image_service.create(self.context, saving_image_meta) # create volume self.volume_attrs['status'] = 'available' self.volume_attrs['instance_uuid'] = None db.volume_create(self.context, self.volume_attrs) with mock.patch.object(self.volume.driver, 'copy_volume_to_image') as driver_copy_mock: driver_copy_mock.side_effect = exception.VolumeDriverException( "Error") # test with image not in queued state self.assertRaises(exception.VolumeDriverException, self.volume.copy_volume_to_image, self.context, self.volume_id, self.image_meta) # Make sure we are passing an OVO instance and not an ORM instance # to the driver self.assertIsInstance(driver_copy_mock.call_args[0][1], objects.Volume) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) # image shouldn't be deleted if it is not in queued state image_service.show(self.context, self.image_id) # test with image in queued state self.assertRaises(exception.VolumeDriverException, self.volume.copy_volume_to_image, self.context, self.volume_id, queued_image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) # queued image should be deleted self.assertRaises(exception.ImageNotFound, image_service.show, self.context, queued_image_id) # test with image in saving state self.assertRaises(exception.VolumeDriverException, self.volume.copy_volume_to_image, self.context, self.volume_id, saving_image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) # image in saving state should be deleted self.assertRaises(exception.ImageNotFound, image_service.show, self.context, saving_image_id) @mock.patch.object(QUOTAS, 'reserve') @mock.patch.object(QUOTAS, 'commit') @mock.patch.object(vol_manager.VolumeManager, 'create_volume') @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'copy_volume_to_image') def _test_copy_volume_to_image_with_image_volume( self, mock_copy, mock_create, mock_quota_commit, mock_quota_reserve): self.volume.driver.configuration.image_upload_use_cinder_backend = True self.addCleanup(fake_image.FakeImageService_reset) image_service = fake_image.FakeImageService() def add_location_wrapper(ctx, id, uri, metadata): try: volume = db.volume_get(ctx, id) self.assertEqual(ctx.project_id, volume['metadata']['image_owner']) except exception.VolumeNotFound: pass return image_service.add_location_orig(ctx, id, uri, metadata) image_service.add_location_orig = image_service.add_location image_service.add_location = add_location_wrapper image_id = '5c6eec33-bab4-4e7d-b2c9-88e2d0a5f6f2' self.image_meta['id'] = image_id self.image_meta['status'] = 'queued' image_service.create(self.context, self.image_meta) # creating volume testdata self.volume_attrs['instance_uuid'] = None self.volume_attrs['snapshot_id'] = fake.SNAPSHOT_ID volume_type_id = db.volume_type_create( self.context, {'name': 'test', 'extra_specs': { 'image_service:store_id': 'fake_store' }}).get('id') self.volume_attrs['volume_type_id'] = volume_type_id db.volume_create(self.context, self.volume_attrs) def fake_create(context, volume, **kwargs): db.volume_update(context, volume.id, {'status': 'available'}) mock_create.side_effect = fake_create # start test self.volume.copy_volume_to_image(self.context, self.volume_id, self.image_meta) volume = db.volume_get(self.context, self.volume_id) self.assertEqual('available', volume['status']) # return create image image = image_service.show(self.context, image_id) image_service.delete(self.context, image_id) return image def test_copy_volume_to_image_with_image_volume(self): image = self._test_copy_volume_to_image_with_image_volume() self.assertTrue(image['locations'][0]['url'].startswith('cinder://')) image_volume_id = image['locations'][0]['url'].split('/')[-1] # The image volume does NOT include the snapshot_id, and include the # source_volid which is the uploaded-volume id. vol_ref = db.volume_get(self.context, image_volume_id) self.assertIsNone(vol_ref['snapshot_id']) self.assertEqual(vol_ref['source_volid'], self.volume_id) def test_copy_volume_to_image_with_image_volume_qcow2(self): self.image_meta['disk_format'] = 'qcow2' image = self._test_copy_volume_to_image_with_image_volume() self.assertNotIn('locations', image) @mock.patch.object(vol_manager.VolumeManager, 'delete_volume') @mock.patch.object(fake_image._FakeImageService, 'add_location', side_effect=exception.Invalid) def test_copy_volume_to_image_with_image_volume_failure( self, mock_add_location, mock_delete): image = self._test_copy_volume_to_image_with_image_volume() self.assertNotIn('locations', image) self.assertTrue(mock_delete.called) @mock.patch('cinder.volume.manager.' 'VolumeManager._clone_image_volume') @mock.patch('cinder.volume.manager.' 'VolumeManager._create_image_cache_volume_entry') def test_create_image_cache_volume_entry(self, mock_cache_entry, mock_clone_image_volume): image_id = self.image_id image_meta = self.image_meta self.mock_cache.get_entry.return_value = mock_cache_entry if mock_cache_entry: # Entry is in cache, so basically don't do anything. # Make sure we didn't try and create a cache entry self.assertFalse(self.mock_cache.ensure_space.called) self.assertFalse(self.mock_cache.create_cache_entry.called) else: result = self.volume._create_image_cache_volume_entry( self.context, mock_clone_image_volume, image_id, image_meta) self.assertNotEqual(False, result) cache_entry = self.image_volume_cache.get_entry( self.context, mock_clone_image_volume, image_id, image_meta) self.assertIsNotNone(cache_entry) class ImageVolumeCacheTestCase(base.BaseVolumeTestCase): def setUp(self): super(ImageVolumeCacheTestCase, self).setUp() self.volume.driver.set_initialized() @mock.patch('oslo_utils.importutils.import_object') def test_cache_configs(self, mock_import_object): opts = { 'image_volume_cache_enabled': True, 'image_volume_cache_max_size_gb': 100, 'image_volume_cache_max_count': 20 } def conf_get(option): if option in opts: return opts[option] else: return None mock_driver = mock.Mock() mock_driver.configuration.safe_get.side_effect = conf_get mock_driver.configuration.extra_capabilities = 'null' def import_obj(*args, **kwargs): return mock_driver mock_import_object.side_effect = import_obj manager = vol_manager.VolumeManager(volume_driver=mock_driver) self.assertIsNotNone(manager) self.assertIsNotNone(manager.image_volume_cache) self.assertEqual(100, manager.image_volume_cache.max_cache_size_gb) self.assertEqual(20, manager.image_volume_cache.max_cache_size_count) def test_delete_image_volume(self): volume_params = { 'status': 'creating', 'host': 'some_host', 'cluster_name': 'some_cluster', 'size': 1 } volume_api = cinder.volume.api.API() volume = tests_utils.create_volume(self.context, **volume_params) volume.status = 'available' volume.save() image_id = '70a599e0-31e7-49b7-b260-868f441e862b' db.image_volume_cache_create(self.context, volume['host'], volume_params['cluster_name'], image_id, datetime.datetime.utcnow(), volume['id'], volume['size']) volume_api.delete(self.context, volume) entry = db.image_volume_cache_get_by_volume_id(self.context, volume['id']) self.assertIsNone(entry) def test_delete_volume_with_keymanager_exception(self): volume_params = { 'host': 'some_host', 'size': 1 } volume_api = cinder.volume.api.API() volume = tests_utils.create_volume(self.context, **volume_params) with mock.patch.object( volume_api.key_manager, 'delete') as key_del_mock: key_del_mock.side_effect = Exception("Key not found") volume_api.delete(self.context, volume) class ImageVolumeTestCases(base.BaseVolumeTestCase): def setUp(self): super(ImageVolumeTestCases, self).setUp() db.volume_type_create(self.context, v2_fakes.fake_default_type_get( fake.VOLUME_TYPE2_ID)) self.vol_type = db.volume_type_get_by_name(self.context, 'vol_type_name') @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'create_cloned_volume') @mock.patch('cinder.quota.QUOTAS.rollback') @mock.patch('cinder.quota.QUOTAS.commit') @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) def test_clone_image_volume(self, mock_reserve, mock_commit, mock_rollback, mock_cloned_volume): # Confirm cloning does not copy quota use field vol = tests_utils.create_volume(self.context, use_quota=False, **self.volume_params) # unnecessary attributes should be removed from image volume vol.consistencygroup = None result = self.volume._clone_image_volume(self.context, vol, {'id': fake.VOLUME_ID}) self.assertNotEqual(False, result) self.assertTrue(result.use_quota) # Original was False mock_reserve.assert_called_once_with(self.context, volumes=1, volumes_vol_type_name=1, gigabytes=vol.size, gigabytes_vol_type_name=vol.size) mock_commit.assert_called_once_with(self.context, ["RESERVATION"], project_id=vol.project_id) @mock.patch('cinder.quota.QUOTAS.rollback') @mock.patch('cinder.quota.QUOTAS.commit') @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) def test_clone_image_volume_creation_failure(self, mock_reserve, mock_commit, mock_rollback): vol = tests_utils.create_volume(self.context, **self.volume_params) with mock.patch.object(objects, 'Volume', side_effect=ValueError): self.assertIsNone(self.volume._clone_image_volume( self.context, vol, {'id': fake.VOLUME_ID})) mock_reserve.assert_called_once_with(self.context, volumes=1, volumes_vol_type_name=1, gigabytes=vol.size, gigabytes_vol_type_name=vol.size) mock_rollback.assert_called_once_with(self.context, ["RESERVATION"]) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_volume_from_image_cloned_status_available( self, mock_qemu_info): """Test create volume from image via cloning. Verify that after cloning image to volume, it is in available state and is bootable. """ image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info volume = self._create_volume_from_image() self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) self.volume.delete_volume(self.context, volume) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_volume_from_image_not_cloned_status_available( self, mock_qemu_info): """Test create volume from image via full copy. Verify that after copying image to volume, it is in available state and is bootable. """ image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info volume = self._create_volume_from_image(fakeout_clone_image=True) self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) self.volume.delete_volume(self.context, volume) def test_create_volume_from_image_exception(self): """Test create volume from a non-existing image. Verify that create volume from a non-existing image, the volume status is 'error' and is not bootable. """ dst_fd, dst_path = tempfile.mkstemp() os.close(dst_fd) self.mock_object(self.volume.driver, 'local_path', lambda x: dst_path) # creating volume testdata kwargs = {'display_description': 'Test Desc', 'size': 20, 'availability_zone': 'fake_availability_zone', 'status': 'creating', 'attach_status': fields.VolumeAttachStatus.DETACHED, 'host': 'dummy'} volume = objects.Volume(context=self.context, **kwargs) volume.create() self.assertRaises(exception.ImageNotFound, self.volume.create_volume, self.context, volume, {'image_id': NON_EXISTENT_IMAGE_ID}) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual("error", volume['status']) self.assertFalse(volume['bootable']) # cleanup volume.destroy() os.unlink(dst_path) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_volume_from_image_copy_exception_rescheduling( self, mock_qemu_info): """Test create volume with ImageCopyFailure This exception should not trigger rescheduling and allocated_capacity should be incremented so we're having assert for that here. """ image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info def fake_copy_image_to_volume(context, volume, image_service, image_id): raise exception.ImageCopyFailure() self.mock_object(self.volume.driver, 'copy_image_to_volume', fake_copy_image_to_volume) mock_delete = self.mock_object(self.volume.driver, 'delete_volume') self.assertRaises(exception.ImageCopyFailure, self._create_volume_from_image) # NOTE(dulek): Rescheduling should not occur, so lets assert that # allocated_capacity is incremented. self.assertDictEqual(self.volume.stats['pools'], {'_pool0': {'allocated_capacity_gb': 1}}) # NOTE(dulek): As we haven't rescheduled, make sure no delete_volume # was called. self.assertFalse(mock_delete.called) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_volume_from_image_with_img_too_big( self, mock_qemu_info): """Test create volume with ImageCopyFailure This exception should not trigger rescheduling and allocated_capacity should be incremented so we're having assert for that here. """ image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info def fake_copy_image_to_volume(context, volume, image_service, image_id, disable_sparse=False): raise exception.ImageTooBig(image_id=image_id, reason='') self.mock_object(self.volume.driver, 'copy_image_to_volume', fake_copy_image_to_volume) self.assertRaises(exception.ImageTooBig, self._create_volume_from_image) @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.volume_utils.brick_get_connector') @mock.patch('cinder.volume.driver.BaseVD.secure_file_operations_enabled') @mock.patch('cinder.volume.driver.BaseVD._detach_volume') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_volume_from_image_unavailable( self, mock_qemu_info, mock_detach, mock_secure, *args): """Test create volume with ImageCopyFailure We'll raise an exception inside _connect_device after volume has already been attached to confirm that it detaches the volume. """ mock_secure.side_effect = NameError image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume bound_copy_method = unbound_copy_method.__get__(self.volume.driver) with mock.patch.object(self.volume.driver, 'copy_image_to_volume', side_effect=bound_copy_method): self.assertRaises(exception.ImageCopyFailure, self._create_volume_from_image, fakeout_copy_image_to_volume=False) # We must have called detach method. self.assertEqual(1, mock_detach.call_count) @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.volume.volume_utils.brick_get_connector') @mock.patch('cinder.volume.driver.BaseVD._connect_device') @mock.patch('cinder.volume.driver.BaseVD._detach_volume') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_volume_from_image_unavailable_no_attach_info( self, mock_qemu_info, mock_detach, mock_connect, *args): """Test create volume with ImageCopyFailure We'll raise an exception on _connect_device call to confirm that it detaches the volume even if the exception doesn't have attach_info. """ mock_connect.side_effect = NameError image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info unbound_copy_method = cinder.volume.driver.BaseVD.copy_image_to_volume bound_copy_method = unbound_copy_method.__get__(self.volume.driver) with mock.patch.object(self.volume.driver, 'copy_image_to_volume', side_effect=bound_copy_method): self.assertRaises(exception.ImageCopyFailure, self._create_volume_from_image, fakeout_copy_image_to_volume=False) # We must have called detach method. self.assertEqual(1, mock_detach.call_count) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_volume_from_image_clone_image_volume(self, mock_qemu_info): """Test create volume from image via image volume. Verify that after cloning image to volume, it is in available state and is bootable. """ image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info volume = self._create_volume_from_image(clone_image_volume=True) self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) self.volume.delete_volume(self.context, volume) def test_create_volume_from_exact_sized_image(self): """Test create volume from an image of the same size. Verify that an image which is exactly the same size as the volume, will work correctly. """ try: volume_id = None volume_api = cinder.volume.api.API( image_service=FakeImageService()) volume = volume_api.create(self.context, 2, 'name', 'description', image_id=self.FAKE_UUID, volume_type=self.vol_type) volume_id = volume['id'] self.assertEqual('creating', volume['status']) finally: # cleanup db.volume_destroy(self.context, volume_id) def test_create_volume_from_oversized_image(self): """Verify that an image which is too big will fail correctly.""" class _ModifiedFakeImageService(FakeImageService): def show(self, context, image_id): return {'size': 2 * units.Gi + 1, 'disk_format': 'raw', 'container_format': 'bare', 'status': 'active'} volume_api = cinder.volume.api.API( image_service=_ModifiedFakeImageService()) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 2, 'name', 'description', image_id=1) def test_create_volume_with_mindisk_error(self): """Verify volumes smaller than image minDisk will cause an error.""" class _ModifiedFakeImageService(FakeImageService): def show(self, context, image_id): return {'size': 2 * units.Gi, 'disk_format': 'raw', 'container_format': 'bare', 'min_disk': 5, 'status': 'active'} volume_api = cinder.volume.api.API( image_service=_ModifiedFakeImageService()) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 2, 'name', 'description', image_id=1) def test_create_volume_with_deleted_imaged(self): """Verify create volume from image will cause an error.""" class _ModifiedFakeImageService(FakeImageService): def show(self, context, image_id): return {'size': 2 * units.Gi, 'disk_format': 'raw', 'container_format': 'bare', 'min_disk': 5, 'status': 'deleted', 'id': image_id} volume_api = cinder.volume.api.API( image_service=_ModifiedFakeImageService()) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 2, 'name', 'description', image_id=1) def test_copy_volume_to_image_maintenance(self): """Test copy volume to image in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.copy_volume_to_image, self.context, volume, test_meta1, force=True) class CopyVolumeToImagePrivateFunctionsTestCase( cinder.tests.unit.test.TestCase): @mock.patch('cinder.volume.api.API.get_volume_image_metadata', return_value={'some_key': 'some_value', 'cinder_encryption_key_id': 'stale_value'}) def test_merge_volume_image_meta(self, mock_get_img_meta): # this is what we're passing to copy_volume_to_image image_meta = { 'container_format': 'bare', 'disk_format': 'raw', 'cinder_encryption_key_id': 'correct_value' } self.assertNotIn('properties', image_meta) volume_api = cinder.volume.api.API() volume_api._merge_volume_image_meta(None, None, image_meta) # we've got 'properties' now self.assertIn('properties', image_meta) # verify the key_id is what we expect self.assertEqual(image_meta['cinder_encryption_key_id'], 'correct_value') translate = cinder.image.glance.GlanceImageService._translate_to_glance sent_to_glance = translate(image_meta) # this is correct, glance gets a "flat" dict of properties self.assertNotIn('properties', sent_to_glance) # make sure the image would be created in Glance with the # correct key_id self.assertEqual(image_meta['cinder_encryption_key_id'], sent_to_glance['cinder_encryption_key_id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_init_host.py0000664000175000017500000003747400000000000023413 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for volume init host method cases.""" from unittest import mock from oslo_config import cfg from oslo_utils import importutils from cinder import context from cinder import exception from cinder import objects from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base from cinder.volume import driver from cinder.volume import volume_migration as volume_migration from cinder.volume import volume_utils CONF = cfg.CONF class VolumeInitHostTestCase(base.BaseVolumeTestCase): def setUp(self): super(VolumeInitHostTestCase, self).setUp() self.service_id = 1 @mock.patch('cinder.manager.CleanableManager.init_host') def test_init_host_count_allocated_capacity(self, init_host_mock): vol0 = tests_utils.create_volume( self.context, size=100, host=CONF.host) vol1 = tests_utils.create_volume( self.context, size=128, host=volume_utils.append_host(CONF.host, 'pool0')) vol2 = tests_utils.create_volume( self.context, size=256, host=volume_utils.append_host(CONF.host, 'pool0')) vol3 = tests_utils.create_volume( self.context, size=512, host=volume_utils.append_host(CONF.host, 'pool1')) vol4 = tests_utils.create_volume( self.context, size=1024, host=volume_utils.append_host(CONF.host, 'pool2')) self.volume.init_host(service_id=self.service_id) init_host_mock.assert_called_once_with( service_id=self.service_id, added_to_cluster=None) stats = self.volume.stats self.assertEqual(2020, stats['allocated_capacity_gb']) self.assertEqual( 384, stats['pools']['pool0']['allocated_capacity_gb']) self.assertEqual( 512, stats['pools']['pool1']['allocated_capacity_gb']) self.assertEqual( 1024, stats['pools']['pool2']['allocated_capacity_gb']) # NOTE(jdg): On the create we have host='xyz', BUT # here we do a db.volume_get, and now the host has # been updated to xyz#pool-name. Note this is # done via the managers init, which calls the drivers # get_pool method, which in the legacy case is going # to be volume_backend_name or None vol0.refresh() expected_host = volume_utils.append_host(CONF.host, 'fake') self.assertEqual(expected_host, vol0.host) self.volume.delete_volume(self.context, vol0) self.volume.delete_volume(self.context, vol1) self.volume.delete_volume(self.context, vol2) self.volume.delete_volume(self.context, vol3) self.volume.delete_volume(self.context, vol4) def test_init_host_count_allocated_capacity_batch_retrieval(self): old_val = CONF.init_host_max_objects_retrieval CONF.init_host_max_objects_retrieval = 1 try: self.test_init_host_count_allocated_capacity() finally: CONF.init_host_max_objects_retrieval = old_val @mock.patch('cinder.manager.CleanableManager.init_host') def test_init_host_count_allocated_capacity_cluster(self, init_host_mock): cluster_name = 'mycluster' self.volume.cluster = cluster_name # All these volumes belong to the same cluster, so we will calculate # the capacity of them all because we query the DB by cluster_name. tests_utils.create_volume(self.context, size=100, host=CONF.host, cluster_name=cluster_name) tests_utils.create_volume( self.context, size=128, cluster_name=cluster_name, host=volume_utils.append_host(CONF.host, 'pool0')) tests_utils.create_volume( self.context, size=256, cluster_name=cluster_name, host=volume_utils.append_host(CONF.host + '2', 'pool0')) tests_utils.create_volume( self.context, size=512, cluster_name=cluster_name, host=volume_utils.append_host(CONF.host + '2', 'pool1')) tests_utils.create_volume( self.context, size=1024, cluster_name=cluster_name, host=volume_utils.append_host(CONF.host + '3', 'pool2')) # These don't belong to the cluster so they will be ignored tests_utils.create_volume( self.context, size=1024, host=volume_utils.append_host(CONF.host, 'pool2')) tests_utils.create_volume( self.context, size=1024, cluster_name=cluster_name + '1', host=volume_utils.append_host(CONF.host + '3', 'pool2')) self.volume.init_host(service_id=self.service_id) init_host_mock.assert_called_once_with( service_id=self.service_id, added_to_cluster=None) stats = self.volume.stats self.assertEqual(2020, stats['allocated_capacity_gb']) self.assertEqual( 384, stats['pools']['pool0']['allocated_capacity_gb']) self.assertEqual( 512, stats['pools']['pool1']['allocated_capacity_gb']) self.assertEqual( 1024, stats['pools']['pool2']['allocated_capacity_gb']) @mock.patch.object(driver.BaseVD, "update_provider_info") def test_init_host_sync_provider_info(self, mock_update): vol0 = tests_utils.create_volume( self.context, size=1, host=CONF.host) vol1 = tests_utils.create_volume( self.context, size=1, host=CONF.host) vol2 = tests_utils.create_volume( self.context, size=1, host=CONF.host, status='creating') snap0 = tests_utils.create_snapshot(self.context, vol0.id) snap1 = tests_utils.create_snapshot(self.context, vol1.id) # Return values for update_provider_info volumes = [{'id': vol0.id, 'provider_id': '1 2 xxxx'}, {'id': vol1.id, 'provider_id': '3 4 yyyy'}] snapshots = [{'id': snap0.id, 'provider_id': '5 6 xxxx'}, {'id': snap1.id, 'provider_id': '7 8 yyyy'}] mock_update.return_value = (volumes, snapshots) # initialize self.volume.init_host(service_id=self.service_id) # Grab volume and snapshot objects vol0_obj = objects.Volume.get_by_id(context.get_admin_context(), vol0.id) vol1_obj = objects.Volume.get_by_id(context.get_admin_context(), vol1.id) vol2_obj = objects.Volume.get_by_id(context.get_admin_context(), vol2.id) snap0_obj = objects.Snapshot.get_by_id(self.context, snap0.id) snap1_obj = objects.Snapshot.get_by_id(self.context, snap1.id) # Check updated provider ids self.assertEqual('1 2 xxxx', vol0_obj.provider_id) self.assertEqual('3 4 yyyy', vol1_obj.provider_id) self.assertIsNone(vol2_obj.provider_id) self.assertEqual('5 6 xxxx', snap0_obj.provider_id) self.assertEqual('7 8 yyyy', snap1_obj.provider_id) # Clean up self.volume.delete_snapshot(self.context, snap0_obj) self.volume.delete_snapshot(self.context, snap1_obj) self.volume.delete_volume(self.context, vol0) self.volume.delete_volume(self.context, vol1) def test_init_host_sync_provider_info_batch_retrieval(self): old_val = CONF.init_host_max_objects_retrieval CONF.init_host_max_objects_retrieval = 1 try: self.test_init_host_sync_provider_info() finally: CONF.init_host_max_objects_retrieval = old_val @mock.patch.object(driver.BaseVD, "update_provider_info") def test_init_host_sync_provider_info_no_update(self, mock_update): vol0 = tests_utils.create_volume( self.context, size=1, host=CONF.host) vol1 = tests_utils.create_volume( self.context, size=1, host=CONF.host) snap0 = tests_utils.create_snapshot(self.context, vol0.id) snap1 = tests_utils.create_snapshot(self.context, vol1.id) mock_update.return_value = ([], []) # initialize self.volume.init_host(service_id=self.service_id) # Grab volume and snapshot objects vol0_obj = objects.Volume.get_by_id(context.get_admin_context(), vol0.id) vol1_obj = objects.Volume.get_by_id(context.get_admin_context(), vol1.id) snap0_obj = objects.Snapshot.get_by_id(self.context, snap0.id) snap1_obj = objects.Snapshot.get_by_id(self.context, snap1.id) # Check provider ids are not changed self.assertIsNone(vol0_obj.provider_id) self.assertIsNone(vol1_obj.provider_id) self.assertIsNone(snap0_obj.provider_id) self.assertIsNone(snap1_obj.provider_id) # Clean up self.volume.delete_snapshot(self.context, snap0_obj) self.volume.delete_snapshot(self.context, snap1_obj) self.volume.delete_volume(self.context, vol0) self.volume.delete_volume(self.context, vol1) @mock.patch.object(driver.BaseVD, "update_provider_info") def test_init_host_sync_provider_info_no_update_cluster(self, mock_update): cluster_name = 'mycluster' self.volume.cluster = cluster_name vol0 = tests_utils.create_volume( self.context, size=1, host=CONF.host, cluster_name=cluster_name) vol1 = tests_utils.create_volume( self.context, size=1, host=CONF.host + '2', cluster_name=cluster_name) vol2 = tests_utils.create_volume( self.context, size=1, host=CONF.host) vol3 = tests_utils.create_volume( self.context, size=1, host=CONF.host, cluster_name=cluster_name + '2') snap0 = tests_utils.create_snapshot(self.context, vol0.id) snap1 = tests_utils.create_snapshot(self.context, vol1.id) tests_utils.create_snapshot(self.context, vol2.id) tests_utils.create_snapshot(self.context, vol3.id) mock_update.return_value = ([], []) # initialize self.volume.init_host(service_id=self.service_id) # Grab volume and snapshot objects vol0_obj = objects.Volume.get_by_id(context.get_admin_context(), vol0.id) vol1_obj = objects.Volume.get_by_id(context.get_admin_context(), vol1.id) snap0_obj = objects.Snapshot.get_by_id(self.context, snap0.id) snap1_obj = objects.Snapshot.get_by_id(self.context, snap1.id) self.assertSetEqual({vol0.id, vol1.id}, {vol.id for vol in mock_update.call_args[0][0]}) self.assertSetEqual({snap0.id, snap1.id}, {snap.id for snap in mock_update.call_args[0][1]}) # Check provider ids are not changed self.assertIsNone(vol0_obj.provider_id) self.assertIsNone(vol1_obj.provider_id) self.assertIsNone(snap0_obj.provider_id) self.assertIsNone(snap1_obj.provider_id) # Clean up self.volume.delete_snapshot(self.context, snap0_obj) self.volume.delete_snapshot(self.context, snap1_obj) self.volume.delete_volume(self.context, vol0) self.volume.delete_volume(self.context, vol1) @mock.patch('cinder.volume.manager.VolumeManager.' '_include_resources_in_cluster') def test_init_host_cluster_not_changed(self, include_in_cluster_mock): self.volume.init_host(added_to_cluster=False, service_id=self.service_id) include_in_cluster_mock.assert_not_called() @mock.patch('cinder.objects.group.GroupList.include_in_cluster') @mock.patch('cinder.objects.snapshot.SnapshotList.get_all', return_value=[]) @mock.patch('cinder.objects.volume.VolumeList.get_all', return_value=[]) @mock.patch('cinder.objects.volume.VolumeList.include_in_cluster') @mock.patch('cinder.objects.consistencygroup.ConsistencyGroupList.' 'include_in_cluster') @mock.patch('cinder.db.image_volume_cache_include_in_cluster') def test_init_host_added_to_cluster(self, image_cache_include_mock, cg_include_mock, vol_include_mock, vol_get_all_mock, snap_get_all_mock, group_include_mock): cluster = str(mock.sentinel.cluster) self.mock_object(self.volume, 'cluster', cluster) self.volume.init_host(added_to_cluster=True, service_id=self.service_id) vol_include_mock.assert_called_once_with(mock.ANY, cluster, host=self.volume.host) cg_include_mock.assert_called_once_with(mock.ANY, cluster, host=self.volume.host) image_cache_include_mock.assert_called_once_with(mock.ANY, cluster, host=self.volume.host) group_include_mock.assert_called_once_with(mock.ANY, cluster, host=self.volume.host) vol_get_all_mock.assert_called_once_with( mock.ANY, filters={'cluster_name': cluster}, limit=None, offset=None) snap_get_all_mock.assert_called_once_with( mock.ANY, filters={'cluster_name': cluster}, limit=None, offset=None) @mock.patch('cinder.keymgr.migration.migrate_fixed_key') @mock.patch('cinder.volume.manager.VolumeManager._get_my_volumes') @mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool') def test_init_host_key_migration(self, mock_add_threadpool, mock_get_my_volumes, mock_migrate_fixed_key): self.volume.init_host(service_id=self.service_id) volumes = mock_get_my_volumes() volumes_to_migrate = volume_migration.VolumeMigrationList() volumes_to_migrate.append(volumes, self.context) mock_add_threadpool.assert_called_once_with( mock_migrate_fixed_key, volumes=volumes_to_migrate) @mock.patch('time.sleep') def test_init_host_retry(self, mock_sleep): kwargs = {'service_id': 2} self.volume = importutils.import_object(CONF.volume_manager) self.volume.driver.do_setup = mock.MagicMock() self.volume.driver.do_setup.side_effect = [ exception.CinderException("Test driver error."), exception.InvalidConfigurationValue('Test config error.'), ImportError] self.volume.init_host(added_to_cluster=False, **kwargs) self.assertEqual(4, self.volume.driver.do_setup.call_count) self.assertFalse(self.volume.is_working()) @mock.patch('time.sleep') def test_init_host_retry_once(self, mock_sleep): kwargs = {'service_id': 2} self.volume = importutils.import_object(CONF.volume_manager) self.volume.driver.do_setup = mock.MagicMock() self.volume.driver.do_setup.side_effect = [ImportError, None] self.volume.init_host(added_to_cluster=False, **kwargs) self.assertEqual(2, self.volume.driver.do_setup.call_count) self.assertTrue(self.volume.is_working()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_manage_volume.py0000664000175000017500000003242100000000000024215 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Chuck Fouts. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder import quota from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base from cinder.volume.flows.manager import manage_existing from cinder.volume import manager from cinder.volume import volume_utils FAKE_HOST_POOL = 'volPool' FAKE_HOST = 'hostname@backend' QUOTAS = quota.QUOTAS class ManageVolumeTestCase(base.BaseVolumeTestCase): def setUp(self): super(ManageVolumeTestCase, self).setUp() self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, True) self.manager = manager.VolumeManager() self.manager.stats = {'allocated_capacity_gb': 0, 'pools': {}} @staticmethod def _stub_volume_object_get(cls, host=FAKE_HOST): volume = { 'id': fake.VOLUME_ID, 'size': 1, 'name': fake.VOLUME_NAME, 'host': host, } return fake_volume.fake_volume_obj(cls.context, **volume) def test_manage_existing(self): volume_object = self._stub_volume_object_get(self) mock_run_flow_engine = self.mock_object( self.manager, '_run_manage_existing_flow_engine', return_value=volume_object) mock_update_volume_stats = self.mock_object( self.manager, '_update_stats_for_managed') result = self.manager.manage_existing(self.context, volume_object) self.assertEqual(fake.VOLUME_ID, result) mock_run_flow_engine.assert_called_once_with(self.context, volume_object, None) mock_update_volume_stats.assert_called_once_with(volume_object) def test_manage_existing_with_volume_object(self): volume_object = self._stub_volume_object_get(self) mock_object_volume = self.mock_object(objects.Volume, 'get_by_id') mock_run_flow_engine = self.mock_object( self.manager, '_run_manage_existing_flow_engine', return_value=volume_object) mock_update_volume_stats = self.mock_object( self.manager, '_update_stats_for_managed') result = self.manager.manage_existing( self.context, volume_object) self.assertEqual(fake.VOLUME_ID, result) mock_object_volume.assert_not_called() mock_run_flow_engine.assert_called_once_with(self.context, volume_object, None) mock_update_volume_stats.assert_called_once_with(volume_object) def test_run_manage_existing_flow_engine(self): mock_volume = mock.Mock() volume_object = self._stub_volume_object_get(self) mock_flow_engine = mock.Mock() mock_flow_engine_run = self.mock_object(mock_flow_engine, 'run') mock_flow_engine_fetch = self.mock_object( mock_flow_engine.storage, 'fetch', return_value=volume_object) mock_get_flow = self.mock_object( manage_existing, 'get_flow', return_value=mock_flow_engine) result = self.manager._run_manage_existing_flow_engine(self.context, mock_volume, None) self.assertEqual(volume_object, result) mock_get_flow.assert_called_once_with(self.context, self.manager.db, self.manager.driver, self.manager.host, mock_volume, None) mock_flow_engine_run.assert_called_once_with() mock_flow_engine_fetch.assert_called_once_with('volume') def test_run_manage_existing_flow_engine_exception(self): mock_get_flow = self.mock_object( manage_existing, 'get_flow', side_effect=Exception) volume_object = self._stub_volume_object_get(self) self.assertRaises(exception.CinderException, self.manager._run_manage_existing_flow_engine, self.context, volume_object, None) mock_get_flow.assert_called_once_with(self.context, self.manager.db, self.manager.driver, self.manager.host, volume_object, None) def test_update_stats_for_managed(self): volume_object = self._stub_volume_object_get(self, host=FAKE_HOST + '#volPool') self.manager._update_stats_for_managed(volume_object) backend_stats = self.manager.stats['pools'][FAKE_HOST_POOL] self.assertEqual( 1, backend_stats['allocated_capacity_gb']) def test_update_stats_for_managed_no_pool(self): safe_get_backend = 'safe_get_backend' volume_obj = self._stub_volume_object_get(self) mock_safe_get = self.mock_object( self.manager.driver.configuration, 'safe_get', return_value=safe_get_backend) self.manager._update_stats_for_managed(volume_obj) mock_safe_get.assert_called_once_with('volume_backend_name') backend_stats = self.manager.stats['pools'][safe_get_backend] self.assertEqual(1, backend_stats['allocated_capacity_gb']) def test_update_stats_for_managed_default_backend(self): volume_obj = self._stub_volume_object_get(self) mock_safe_get = self.mock_object( self.manager.driver.configuration, 'safe_get', return_value=None) self.manager._update_stats_for_managed(volume_obj) mock_safe_get.assert_called_once_with('volume_backend_name') pool_stats = self.manager.stats['pools'] backend_stats = pool_stats[volume_utils.DEFAULT_POOL_NAME] self.assertEqual(1, backend_stats['allocated_capacity_gb']) def test_update_stats_key_error(self): self.manager.stats = {} self.assertRaises( KeyError, self.manager._update_stats_for_managed, self._stub_volume_object_get(self)) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'manage_existing') @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'manage_existing_get_size') @mock.patch('cinder.volume.volume_utils.notify_about_volume_usage') def test_manage_volume_with_notify(self, mock_notify, mock_size, mock_manage): elevated = context.get_admin_context() vol_type = db.volume_type_create( elevated, {'name': 'type1', 'extra_specs': {}}) # create source volume volume_params = {'volume_type_id': vol_type.id, 'status': 'managing'} test_vol = tests_utils.create_volume(self.context, **volume_params) mock_size.return_value = 1 mock_manage.return_value = None self.volume.manage_existing(self.context, test_vol, 'volume_ref') mock_notify.assert_called_with(self.context, test_vol, 'manage_existing.end', host=test_vol.host) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'manage_existing_get_size') @mock.patch('cinder.volume.flows.manager.manage_existing.' 'ManageExistingTask.execute') def test_manage_volume_raise_driver_exception(self, mock_execute, mock_driver_get_size): elevated = context.get_admin_context() project_id = self.context.project_id db.volume_type_create(elevated, {'name': 'type1', 'extra_specs': {}}) vol_type = db.volume_type_get_by_name(elevated, 'type1') # create source volume self.volume_params['volume_type_id'] = vol_type['id'] self.volume_params['status'] = 'managing' test_vol = tests_utils.create_volume(self.context, **self.volume_params) mock_execute.side_effect = exception.VolumeBackendAPIException( data="volume driver got exception") mock_driver_get_size.return_value = 1 # Set quota usage reserve_opts = {'volumes': 1, 'gigabytes': 1} reservations = QUOTAS.reserve(self.context, project_id=project_id, **reserve_opts) QUOTAS.commit(self.context, reservations) usage = db.quota_usage_get(self.context, project_id, 'volumes') volumes_in_use = usage.in_use usage = db.quota_usage_get(self.context, project_id, 'gigabytes') gigabytes_in_use = usage.in_use self.assertRaises(exception.VolumeBackendAPIException, self.volume.manage_existing, self.context, test_vol, 'volume_ref') # check volume status volume = objects.Volume.get_by_id(context.get_admin_context(), test_vol.id) self.assertEqual('error_managing', volume.status) # Delete this volume with 'error_managing_deleting' status in c-vol. test_vol.status = 'error_managing_deleting' test_vol.save() self.volume.delete_volume(self.context, test_vol) ctxt = context.get_admin_context(read_deleted='yes') volume = objects.Volume.get_by_id(ctxt, test_vol.id) self.assertEqual('deleted', volume.status) # Get in_use number after deleting error_managing volume usage = db.quota_usage_get(self.context, project_id, 'volumes') volumes_in_use_new = usage.in_use self.assertEqual(volumes_in_use, volumes_in_use_new) usage = db.quota_usage_get(self.context, project_id, 'gigabytes') gigabytes_in_use_new = usage.in_use self.assertEqual(gigabytes_in_use, gigabytes_in_use_new) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'manage_existing_get_size') def test_manage_volume_raise_driver_size_exception(self, mock_driver_get_size): elevated = context.get_admin_context() project_id = self.context.project_id db.volume_type_create(elevated, {'name': 'type1', 'extra_specs': {}}) # create source volume test_vol = tests_utils.create_volume(self.context, **self.volume_params) mock_driver_get_size.side_effect = exception.VolumeBackendAPIException( data="volume driver got exception") # Set quota usage reserve_opts = {'volumes': 1, 'gigabytes': 1} reservations = QUOTAS.reserve(self.context, project_id=project_id, **reserve_opts) QUOTAS.commit(self.context, reservations) usage = db.quota_usage_get(self.context, project_id, 'volumes') volumes_in_use = usage.in_use usage = db.quota_usage_get(self.context, project_id, 'gigabytes') gigabytes_in_use = usage.in_use self.assertRaises(exception.VolumeBackendAPIException, self.volume.manage_existing, self.context, test_vol, 'volume_ref') # check volume status volume = objects.Volume.get_by_id(context.get_admin_context(), test_vol.id) self.assertEqual('error_managing', volume.status) # Delete this volume with 'error_managing_deleting' status in c-vol. test_vol.status = 'error_managing_deleting' test_vol.save() self.volume.delete_volume(self.context, test_vol) ctxt = context.get_admin_context(read_deleted='yes') volume = objects.Volume.get_by_id(ctxt, test_vol.id) self.assertEqual('deleted', volume.status) # Get in_use number after raising exception usage = db.quota_usage_get(self.context, project_id, 'volumes') volumes_in_use_new = usage.in_use self.assertEqual(volumes_in_use, volumes_in_use_new) usage = db.quota_usage_get(self.context, project_id, 'gigabytes') gigabytes_in_use_new = usage.in_use self.assertEqual(gigabytes_in_use, gigabytes_in_use_new) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_replication_manager.py0000664000175000017500000010070200000000000025377 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import ddt from oslo_config import cfg from oslo_utils import timeutils from cinder.common import constants from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_service from cinder.tests.unit import utils from cinder.tests.unit import volume as base import cinder.volume from cinder.volume import manager from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import volume_utils CONF = cfg.CONF @ddt.ddt class ReplicationTestCase(base.BaseVolumeTestCase): def setUp(self): super(ReplicationTestCase, self).setUp() self.host = 'host@backend#pool' self.manager = manager.VolumeManager(host=self.host) @mock.patch('cinder.objects.VolumeList.get_all') @mock.patch('cinder.volume.driver.BaseVD.failover_host', side_effect=exception.InvalidReplicationTarget('')) @ddt.data(('backend2', 'default', fields.ReplicationStatus.FAILED_OVER), ('backend2', 'backend3', fields.ReplicationStatus.FAILED_OVER), (None, 'backend2', fields.ReplicationStatus.ENABLED), ('', 'backend2', fields.ReplicationStatus.ENABLED)) @ddt.unpack def test_failover_host_invalid_target(self, svc_backend, new_backend, expected, mock_failover, mock_getall): """Test replication failover_host with invalid_target. When failingover fails due to an invalid target exception we return replication_status to its previous status, and we decide what that is depending on the currect active backend. """ svc = utils.create_service( self.context, {'host': self.host, 'binary': constants.VOLUME_BINARY, 'active_backend_id': svc_backend, 'replication_status': fields.ReplicationStatus.FAILING_OVER}) self.manager.failover_host(self.context, new_backend) mock_getall.assert_called_once_with(self.context, filters={'host': self.host}, limit=None, offset=None) mock_failover.assert_called_once_with(self.context, [], secondary_id=new_backend, groups=[]) db_svc = objects.Service.get_by_id(self.context, svc.id) self.assertEqual(expected, db_svc.replication_status) @mock.patch('cinder.volume.driver.BaseVD.failover_host', mock.Mock(side_effect=exception.VolumeDriverException(''))) def test_failover_host_driver_exception(self): svc = utils.create_service( self.context, {'host': self.host, 'binary': constants.VOLUME_BINARY, 'active_backend_id': None, 'replication_status': fields.ReplicationStatus.FAILING_OVER}) self.manager.failover_host(self.context, mock.sentinel.backend_id) db_svc = objects.Service.get_by_id(self.context, svc.id) self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR, db_svc.replication_status) @mock.patch('cinder.objects.Service.is_up', True) @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover') @mock.patch.object(cinder.db, 'conditional_update') @mock.patch.object(objects.ServiceList, 'get_all') def test_failover(self, mock_get_all, mock_db_update, mock_failover): """Test replication failover.""" service = fake_service.fake_service_obj(self.context, binary='cinder-volume') mock_get_all.return_value = [service] mock_db_update.return_value = {'replication_status': 'enabled'} volume_api = cinder.volume.api.API() volume_api.failover(self.context, host=CONF.host, cluster_name=None) mock_failover.assert_called_once_with(self.context, service, None) @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover') @mock.patch.object(cinder.db, 'conditional_update') @mock.patch.object(cinder.db, 'service_get_all') def test_failover_unexpected_status(self, mock_db_get_all, mock_db_update, mock_failover): """Test replication failover unexpected status.""" mock_db_get_all.return_value = [fake_service.fake_service_obj( self.context, binary=constants.VOLUME_BINARY)] mock_db_update.return_value = None volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.failover, self.context, host=CONF.host, cluster_name=None) @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host') @mock.patch.object(cinder.db, 'conditional_update', return_value=1) @mock.patch.object(cinder.objects.ServiceList, 'get_all') def test_freeze_host(self, mock_get_all, mock_db_update, mock_freeze): """Test replication freeze_host.""" service = fake_service.fake_service_obj(self.context, binary=constants.VOLUME_BINARY) mock_get_all.return_value = [service] mock_freeze.return_value = True volume_api = cinder.volume.api.API() volume_api.freeze_host(self.context, host=CONF.host, cluster_name=None) mock_freeze.assert_called_once_with(self.context, service) @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host') @mock.patch.object(cinder.db, 'conditional_update') @mock.patch.object(cinder.db, 'service_get_all') def test_freeze_host_unexpected_status(self, mock_get_all, mock_db_update, mock_freeze): """Test replication freeze_host unexpected status.""" mock_get_all.return_value = [fake_service.fake_service_obj( self.context, binary=constants.VOLUME_BINARY)] mock_db_update.return_value = None volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.freeze_host, self.context, host=CONF.host, cluster_name=None) @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host') @mock.patch.object(cinder.db, 'conditional_update', return_value=1) @mock.patch.object(cinder.objects.ServiceList, 'get_all') def test_thaw_host(self, mock_get_all, mock_db_update, mock_thaw): """Test replication thaw_host.""" service = fake_service.fake_service_obj(self.context, binary=constants.VOLUME_BINARY) mock_get_all.return_value = [service] mock_thaw.return_value = True volume_api = cinder.volume.api.API() volume_api.thaw_host(self.context, host=CONF.host, cluster_name=None) mock_thaw.assert_called_once_with(self.context, service) @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host') @mock.patch.object(cinder.db, 'conditional_update') @mock.patch.object(cinder.db, 'service_get_all') def test_thaw_host_unexpected_status(self, mock_get_all, mock_db_update, mock_thaw): """Test replication thaw_host unexpected status.""" mock_get_all.return_value = [fake_service.fake_service_obj( self.context, binary=constants.VOLUME_BINARY)] mock_db_update.return_value = None volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.thaw_host, self.context, host=CONF.host, cluster_name=None) @mock.patch('cinder.volume.driver.BaseVD.failover_completed') def test_failover_completed(self, completed_mock): rep_field = fields.ReplicationStatus svc = objects.Service(self.context, host=self.volume.host, binary=constants.VOLUME_BINARY, replication_status=rep_field.ENABLED) svc.create() self.volume.failover_completed( self.context, {'active_backend_id': 'secondary', 'replication_status': rep_field.FAILED_OVER}) service = objects.Service.get_by_id(self.context, svc.id) self.assertEqual('secondary', service.active_backend_id) self.assertEqual('failed-over', service.replication_status) completed_mock.assert_called_once_with(self.context, 'secondary') @mock.patch('cinder.volume.driver.BaseVD.failover_completed', wraps=True) def test_failover_completed_driver_failure(self, completed_mock): rep_field = fields.ReplicationStatus svc = objects.Service(self.context, host=self.volume.host, binary=constants.VOLUME_BINARY, replication_status=rep_field.ENABLED) svc.create() self.volume.failover_completed( self.context, {'active_backend_id': 'secondary', 'replication_status': rep_field.FAILED_OVER}) service = objects.Service.get_by_id(self.context, svc.id) self.assertEqual('secondary', service.active_backend_id) self.assertEqual(rep_field.ERROR, service.replication_status) self.assertTrue(service.disabled) self.assertIsNotNone(service.disabled_reason) completed_mock.assert_called_once_with(self.context, 'secondary') @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed') def test_finish_failover_non_clustered(self, completed_mock): svc = mock.Mock(is_clustered=None) self.volume.finish_failover(self.context, svc, mock.sentinel.updates) svc.update.assert_called_once_with(mock.sentinel.updates) svc.save.assert_called_once_with() completed_mock.assert_not_called() @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed') def test_finish_failover_clustered(self, completed_mock): svc = mock.Mock(cluster_name='cluster_name') updates = {'status': 'error'} self.volume.finish_failover(self.context, svc, updates) completed_mock.assert_called_once_with(self.context, svc, updates) svc.cluster.status = 'error' svc.cluster.save.assert_called_once() @ddt.data(None, 'cluster_name') @mock.patch('cinder.volume.manager.VolumeManager.finish_failover') @mock.patch('cinder.volume.manager.VolumeManager._get_my_volumes') def test_failover_manager(self, cluster, get_vols_mock, finish_mock): """Test manager's failover method for clustered and not clustered.""" rep_field = fields.ReplicationStatus svc = objects.Service(self.context, host=self.volume.host, binary=constants.VOLUME_BINARY, cluster_name=cluster, replication_status=rep_field.ENABLED) svc.create() vol = objects.Volume(self.context, host=self.volume.host) vol.create() get_vols_mock.return_value = [vol] with mock.patch.object(self.volume, 'driver') as driver: called, not_called = driver.failover_host, driver.failover if cluster: called, not_called = not_called, called called.return_value = ('secondary', [{'volume_id': vol.id, 'updates': {'status': 'error'}}], []) self.volume.failover(self.context, secondary_backend_id='secondary') not_called.assert_not_called() called.assert_called_once_with(self.context, [vol], secondary_id='secondary', groups=[]) expected_update = {'replication_status': rep_field.FAILED_OVER, 'active_backend_id': 'secondary', 'disabled': True, 'disabled_reason': 'failed-over'} finish_mock.assert_called_once_with(self.context, svc, expected_update) volume = objects.Volume.get_by_id(self.context, vol.id) self.assertEqual('error', volume.status) @ddt.data(('host1', None), (None, 'mycluster')) @ddt.unpack def test_failover_api_fail_multiple_results(self, host, cluster): """Fail if we try to failover multiple backends in the same request.""" rep_field = fields.ReplicationStatus clusters = [ objects.Cluster(self.context, name='mycluster@backend1', replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY), objects.Cluster(self.context, name='mycluster@backend2', replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY) ] clusters[0].create() clusters[1].create() services = [ objects.Service(self.context, host='host1@backend1', cluster_name=clusters[0].name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY), objects.Service(self.context, host='host1@backend2', cluster_name=clusters[1].name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY), ] services[0].create() services[1].create() self.assertRaises(exception.Invalid, self.volume_api.failover, self.context, host, cluster) def test_failover_api_not_found(self): self.assertRaises(exception.ServiceNotFound, self.volume_api.failover, self.context, 'host1', None) @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover') def test_failover_api_success_multiple_results(self, failover_mock): """Succeed to failover multiple services for the same backend.""" rep_field = fields.ReplicationStatus cluster_name = 'mycluster@backend1' cluster = objects.Cluster(self.context, name=cluster_name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY) cluster.create() services = [ objects.Service(self.context, host='host1@backend1', cluster_name=cluster_name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY), objects.Service(self.context, host='host2@backend1', cluster_name=cluster_name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY), ] services[0].create() services[1].create() self.volume_api.failover(self.context, None, cluster_name, mock.sentinel.secondary_id) for service in services + [cluster]: self.assertEqual(rep_field.ENABLED, service.replication_status) service.refresh() self.assertEqual(rep_field.FAILING_OVER, service.replication_status) failover_mock.assert_called_once_with(self.context, mock.ANY, mock.sentinel.secondary_id) self.assertEqual(services[0].id, failover_mock.call_args[0][1].id) @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover') def test_failover_api_success_multiple_results_not_updated(self, failover_mock): """Succeed to failover even if a service is not updated.""" rep_field = fields.ReplicationStatus cluster_name = 'mycluster@backend1' cluster = objects.Cluster(self.context, name=cluster_name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY) cluster.create() services = [ objects.Service(self.context, host='host1@backend1', cluster_name=cluster_name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY), objects.Service(self.context, host='host2@backend1', cluster_name=cluster_name, replication_status=rep_field.ERROR, binary=constants.VOLUME_BINARY), ] services[0].create() services[1].create() self.volume_api.failover(self.context, None, cluster_name, mock.sentinel.secondary_id) for service in services[:1] + [cluster]: service.refresh() self.assertEqual(rep_field.FAILING_OVER, service.replication_status) services[1].refresh() self.assertEqual(rep_field.ERROR, services[1].replication_status) failover_mock.assert_called_once_with(self.context, mock.ANY, mock.sentinel.secondary_id) self.assertEqual(services[0].id, failover_mock.call_args[0][1].id) @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover') def test_failover_api_fail_multiple_results_not_updated(self, failover_mock): """Fail if none of the services could be updated.""" rep_field = fields.ReplicationStatus cluster_name = 'mycluster@backend1' cluster = objects.Cluster(self.context, name=cluster_name, replication_status=rep_field.ENABLED, binary=constants.VOLUME_BINARY) cluster.create() down_time = timeutils.datetime.datetime(1970, 1, 1) services = [ # This service is down objects.Service(self.context, host='host1@backend1', cluster_name=cluster_name, replication_status=rep_field.ENABLED, created_at=down_time, updated_at=down_time, modified_at=down_time, binary=constants.VOLUME_BINARY), # This service is not with the right replication status objects.Service(self.context, host='host2@backend1', cluster_name=cluster_name, replication_status=rep_field.ERROR, binary=constants.VOLUME_BINARY), ] services[0].create() services[1].create() self.assertRaises(exception.InvalidInput, self.volume_api.failover, self.context, None, cluster_name, mock.sentinel.secondary_id) for service in services: svc = objects.Service.get_by_id(self.context, service.id) self.assertEqual(service.replication_status, svc.replication_status) cluster.refresh() self.assertEqual(rep_field.ENABLED, cluster.replication_status) failover_mock.assert_not_called() def _check_failover_db(self, get_method, expected_results): db_data = get_method.get_all(self.context, None) db_data = {e.id: e for e in db_data} for expected in expected_results: id_ = expected['id'] for key, value in expected.items(): self.assertEqual(value, getattr(db_data[id_], key), '(%s) ref=%s != act=%s' % ( key, expected, dict(db_data[id_]))) def _test_failover_model_updates(self, in_volumes, in_snapshots, driver_volumes, driver_result, out_volumes, out_snapshots, in_groups=None, out_groups=None, driver_group_result=None, secondary_id=None): host = volume_utils.extract_host(self.manager.host) utils.create_service(self.context, {'host': host, 'binary': constants.VOLUME_BINARY}) for volume in in_volumes: utils.create_volume(self.context, self.manager.host, **volume) for snapshot in in_snapshots: utils.create_snapshot(self.context, **snapshot) for group in in_groups: utils.create_group(self.context, self.manager.host, **group) with mock.patch.object( self.manager.driver, 'failover_host', return_value=(secondary_id, driver_result, driver_group_result)) as driver_mock: self.manager.failover_host(self.context, secondary_id) self.assertSetEqual(driver_volumes, {v.id for v in driver_mock.call_args[0][1]}) self._check_failover_db(objects.VolumeList, out_volumes) self._check_failover_db(objects.SnapshotList, out_snapshots) self._check_failover_db(objects.GroupList, out_groups) @mock.patch('cinder.volume.volume_utils.is_group_a_type') def test_failover_host_model_updates(self, mock_group_type): status = fields.ReplicationStatus mock_group_type.return_value = True in_groups = [ {'id': str(uuid.uuid4()), 'status': 'available', 'group_type_id': fake.GROUP_TYPE_ID, 'volume_type_ids': [fake.VOLUME_TYPE_ID], 'replication_status': status.FAILOVER_ERROR}, {'id': str(uuid.uuid4()), 'status': 'available', 'group_type_id': fake.GROUP_TYPE_ID, 'volume_type_ids': [fake.VOLUME_TYPE_ID], 'replication_status': status.ENABLED}, ] driver_group_result = [ {'group_id': in_groups[0]['id'], 'updates': {'replication_status': status.FAILOVER_ERROR}}, {'group_id': in_groups[1]['id'], 'updates': {'replication_status': status.FAILED_OVER}}, ] out_groups = [ {'id': in_groups[0]['id'], 'status': 'error', 'replication_status': status.FAILOVER_ERROR}, {'id': in_groups[1]['id'], 'status': in_groups[1]['status'], 'replication_status': status.FAILED_OVER}, ] # test volumes in_volumes = [ {'id': str(uuid.uuid4()), 'status': 'available', 'replication_status': status.DISABLED}, {'id': str(uuid.uuid4()), 'status': 'in-use', 'replication_status': status.NOT_CAPABLE}, {'id': str(uuid.uuid4()), 'status': 'available', 'replication_status': status.FAILOVER_ERROR}, {'id': str(uuid.uuid4()), 'status': 'in-use', 'replication_status': status.ENABLED}, {'id': str(uuid.uuid4()), 'status': 'available', 'replication_status': status.FAILOVER_ERROR}, {'id': str(uuid.uuid4()), 'status': 'in-use', 'replication_status': status.ENABLED}, {'id': str(uuid.uuid4()), 'status': 'available', 'group_id': in_groups[0]['id'], 'replication_status': status.FAILOVER_ERROR}, {'id': str(uuid.uuid4()), 'status': 'available', 'group_id': in_groups[1]['id'], 'replication_status': status.ENABLED}, ] in_snapshots = [ {'id': v['id'], 'volume_id': v['id'], 'status': 'available'} for v in in_volumes ] driver_volumes = { v['id'] for v in in_volumes if v['replication_status'] not in (status.DISABLED, status.NOT_CAPABLE)} driver_result = [ {'volume_id': in_volumes[3]['id'], 'updates': {'status': 'error'}}, {'volume_id': in_volumes[4]['id'], 'updates': {'replication_status': status.FAILOVER_ERROR}}, {'volume_id': in_volumes[5]['id'], 'updates': {'replication_status': status.FAILED_OVER}}, {'volume_id': in_volumes[6]['id'], 'updates': {'replication_status': status.FAILOVER_ERROR}}, {'volume_id': in_volumes[7]['id'], 'updates': {'replication_status': status.FAILED_OVER}}, ] out_volumes = [ {'id': in_volumes[0]['id'], 'status': 'error', 'replication_status': status.NOT_CAPABLE, 'previous_status': in_volumes[0]['status']}, {'id': in_volumes[1]['id'], 'status': 'error', 'replication_status': status.NOT_CAPABLE, 'previous_status': in_volumes[1]['status']}, {'id': in_volumes[2]['id'], 'status': in_volumes[2]['status'], 'replication_status': status.FAILED_OVER}, {'id': in_volumes[3]['id'], 'status': 'error', 'previous_status': in_volumes[3]['status'], 'replication_status': status.FAILOVER_ERROR}, {'id': in_volumes[4]['id'], 'status': 'error', 'previous_status': in_volumes[4]['status'], 'replication_status': status.FAILOVER_ERROR}, {'id': in_volumes[5]['id'], 'status': in_volumes[5]['status'], 'replication_status': status.FAILED_OVER}, {'id': in_volumes[6]['id'], 'status': 'error', 'previous_status': in_volumes[6]['status'], 'replication_status': status.FAILOVER_ERROR}, {'id': in_volumes[7]['id'], 'status': in_volumes[7]['status'], 'replication_status': status.FAILED_OVER}, ] out_snapshots = [ {'id': ov['id'], 'status': 'error' if ov['status'] == 'error' else 'available'} for ov in out_volumes ] self._test_failover_model_updates(in_volumes, in_snapshots, driver_volumes, driver_result, out_volumes, out_snapshots, in_groups, out_groups, driver_group_result) def test_failback_host_model_updates(self): status = fields.ReplicationStatus # IDs will be overwritten with UUIDs, but they help follow the code in_volumes = [ {'id': 0, 'status': 'available', 'replication_status': status.DISABLED}, {'id': 1, 'status': 'in-use', 'replication_status': status.NOT_CAPABLE}, {'id': 2, 'status': 'available', 'replication_status': status.FAILOVER_ERROR}, {'id': 3, 'status': 'in-use', 'replication_status': status.ENABLED}, {'id': 4, 'status': 'available', 'replication_status': status.FAILOVER_ERROR}, {'id': 5, 'status': 'in-use', 'replication_status': status.FAILED_OVER}, ] # Generate real volume IDs for volume in in_volumes: volume['id'] = str(uuid.uuid4()) in_snapshots = [ {'id': in_volumes[0]['id'], 'volume_id': in_volumes[0]['id'], 'status': fields.SnapshotStatus.ERROR_DELETING}, {'id': in_volumes[1]['id'], 'volume_id': in_volumes[1]['id'], 'status': fields.SnapshotStatus.AVAILABLE}, {'id': in_volumes[2]['id'], 'volume_id': in_volumes[2]['id'], 'status': fields.SnapshotStatus.CREATING}, {'id': in_volumes[3]['id'], 'volume_id': in_volumes[3]['id'], 'status': fields.SnapshotStatus.DELETING}, {'id': in_volumes[4]['id'], 'volume_id': in_volumes[4]['id'], 'status': fields.SnapshotStatus.CREATING}, {'id': in_volumes[5]['id'], 'volume_id': in_volumes[5]['id'], 'status': fields.SnapshotStatus.CREATING}, ] driver_volumes = { v['id'] for v in in_volumes if v['replication_status'] not in (status.DISABLED, status.NOT_CAPABLE)} driver_result = [ {'volume_id': in_volumes[3]['id'], 'updates': {'status': 'error'}}, {'volume_id': in_volumes[4]['id'], 'updates': {'replication_status': status.FAILOVER_ERROR}}, {'volume_id': in_volumes[5]['id'], 'updates': {'replication_status': status.FAILED_OVER}}, ] out_volumes = [ {'id': in_volumes[0]['id'], 'status': in_volumes[0]['status'], 'replication_status': in_volumes[0]['replication_status'], 'previous_status': None}, {'id': in_volumes[1]['id'], 'status': in_volumes[1]['status'], 'replication_status': in_volumes[1]['replication_status'], 'previous_status': None}, {'id': in_volumes[2]['id'], 'status': in_volumes[2]['status'], 'replication_status': status.ENABLED}, {'id': in_volumes[3]['id'], 'status': 'error', 'previous_status': in_volumes[3]['status'], 'replication_status': status.FAILOVER_ERROR}, {'id': in_volumes[4]['id'], 'status': 'error', 'previous_status': in_volumes[4]['status'], 'replication_status': status.FAILOVER_ERROR}, {'id': in_volumes[5]['id'], 'status': in_volumes[5]['status'], 'replication_status': status.ENABLED}, ] # Snapshot status is preserved except for those that error the failback out_snapshots = in_snapshots[:] out_snapshots[3]['status'] = fields.SnapshotStatus.ERROR out_snapshots[4]['status'] = fields.SnapshotStatus.ERROR self._test_failover_model_updates(in_volumes, in_snapshots, driver_volumes, driver_result, out_volumes, out_snapshots, [], [], [], self.manager.FAILBACK_SENTINEL) @mock.patch('cinder.volume.volume_utils.log_unsupported_driver_warning', mock.Mock()) @mock.patch('cinder.volume.volume_utils.require_driver_initialized', mock.Mock()) def test_init_host_with_rpc_clustered_replication(self): # These are not OVOs but ORM instances cluster = utils.create_cluster(self.context) service = utils.create_service(self.context, {'cluster_name': cluster.name, 'binary': cluster.binary}) self.assertNotEqual(fields.ReplicationStatus.ENABLED, cluster.replication_status) self.assertNotEqual(fields.ReplicationStatus.ENABLED, service.replication_status) vol_manager = manager.VolumeManager( 'cinder.tests.fake_driver.FakeHAReplicatedLoggingVolumeDriver', host=service.host, cluster=cluster.name) vol_manager.driver = mock.Mock() vol_manager.driver.get_volume_stats.return_value = { 'replication_enabled': True } vol_manager.init_host_with_rpc() cluster_ovo = objects.Cluster.get_by_id(self.context, cluster.id) service_ovo = objects.Service.get_by_id(self.context, service.id) self.assertEqual(fields.ReplicationStatus.ENABLED, cluster_ovo.replication_status) self.assertEqual(fields.ReplicationStatus.ENABLED, service_ovo.replication_status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_rpcapi.py0000664000175000017500000007636600000000000022674 0ustar00zuulzuul00000000000000# Copyright 2012, Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for cinder.volume.rpcapi.""" from unittest import mock import ddt from oslo_config import cfg from oslo_serialization import jsonutils from cinder.common import constants from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder.tests.unit.backup import fake_backup from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_service from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as tests_utils from cinder.volume import rpcapi as volume_rpcapi CONF = cfg.CONF @ddt.ddt class VolumeRPCAPITestCase(test.RPCAPITestCase): def setUp(self): super(VolumeRPCAPITestCase, self).setUp() self.rpcapi = volume_rpcapi.VolumeAPI self.base_version = '3.0' vol = {} vol['host'] = 'fake_host' vol['availability_zone'] = CONF.storage_availability_zone vol['status'] = "available" vol['attach_status'] = "detached" vol['metadata'] = {"test_key": "test_val"} vol['size'] = 1 vol['volume_type_id'] = fake.VOLUME_TYPE_ID volume = db.volume_create(self.context, vol) kwargs = { 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'display_name': 'fake_name', 'display_description': 'fake_description'} snapshot = tests_utils.create_snapshot(self.context, vol['id'], **kwargs) generic_group = tests_utils.create_group( self.context, availability_zone=CONF.storage_availability_zone, group_type_id='group_type1', host='fakehost@fakedrv#fakepool') group_snapshot = tests_utils.create_group_snapshot( self.context, group_id=generic_group.id, group_type_id=fake.GROUP_TYPE_ID) self.fake_volume = jsonutils.to_primitive(volume) self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol) self.fake_snapshot = snapshot self.fake_reservations = ["RESERVATION"] self.fake_backup_obj = fake_backup.fake_backup_obj(self.context) self.fake_group = generic_group self.fake_group_snapshot = group_snapshot self.can_send_version_mock = self.patch( 'oslo_messaging.RPCClient.can_send_version', return_value=True) def tearDown(self): super(VolumeRPCAPITestCase, self).tearDown() self.fake_snapshot.destroy() self.fake_volume_obj.destroy() self.fake_group_snapshot.destroy() self.fake_group.destroy() self.fake_backup_obj.destroy() def _change_cluster_name(self, resource, cluster_name): resource.cluster_name = cluster_name resource.obj_reset_changes() def test_create_volume(self): self._test_rpc_api('create_volume', rpc_method='cast', server='fake_host', volume=self.fake_volume_obj, request_spec=objects.RequestSpec.from_primitives( {}), filter_properties={'availability_zone': 'fake_az'}, allow_reschedule=True) @ddt.data(None, 'my_cluster') def test_delete_volume(self, cluster_name): self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_rpc_api('delete_volume', rpc_method='cast', server=cluster_name or self.fake_volume_obj.host, volume=self.fake_volume_obj, unmanage_only=False, cascade=False) def test_delete_volume_cascade(self): self._test_rpc_api('delete_volume', rpc_method='cast', server=self.fake_volume_obj.host, volume=self.fake_volume_obj, unmanage_only=False, cascade=True) @ddt.data(None, 'mycluster') def test_create_snapshot(self, cluster_name): self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_rpc_api('create_snapshot', rpc_method='cast', server=cluster_name or self.fake_volume_obj.host, volume=self.fake_volume_obj, snapshot=self.fake_snapshot) @ddt.data(None, 'mycluster') def test_delete_snapshot(self, cluster_name): self._change_cluster_name(self.fake_snapshot.volume, cluster_name) self._test_rpc_api( 'delete_snapshot', rpc_method='cast', server=cluster_name or self.fake_snapshot.volume.host, snapshot=self.fake_snapshot, unmanage_only=False) def test_delete_snapshot_with_unmanage_only(self): self._test_rpc_api('delete_snapshot', rpc_method='cast', server=self.fake_snapshot.volume.host, snapshot=self.fake_snapshot, unmanage_only=True) @ddt.data('3.0', '3.3') def test_attach_volume_to_instance(self, version): self.can_send_version_mock.return_value = (version == '3.3') self._test_rpc_api('attach_volume', rpc_method='call', server=self.fake_volume_obj.host, volume=self.fake_volume_obj, instance_uuid=fake.INSTANCE_ID, host_name=None, mountpoint='fake_mountpoint', mode='ro', expected_kwargs_diff={ 'volume_id': self.fake_volume_obj.id}, retval=fake_volume.fake_db_volume_attachment(), version=version) @ddt.data('3.0', '3.3') def test_attach_volume_to_host(self, version): self.can_send_version_mock.return_value = (version == '3.3') self._test_rpc_api('attach_volume', rpc_method='call', server=self.fake_volume_obj.host, volume=self.fake_volume_obj, instance_uuid=None, host_name='fake_host', mountpoint='fake_mountpoint', mode='rw', expected_kwargs_diff={ 'volume_id': self.fake_volume_obj.id}, retval=fake_volume.fake_db_volume_attachment(), version=version) @ddt.data('3.0', '3.3') def test_attach_volume_cluster(self, version): self.can_send_version_mock.return_value = (version == '3.3') self._change_cluster_name(self.fake_volume_obj, 'mycluster') self._test_rpc_api('attach_volume', rpc_method='call', server=self.fake_volume_obj.cluster_name, volume=self.fake_volume_obj, instance_uuid=None, host_name='fake_host', mountpoint='fake_mountpoint', mode='rw', expected_kwargs_diff={ 'volume_id': self.fake_volume_obj.id}, retval=fake_volume.fake_db_volume_attachment(), version=version) @ddt.data('3.0', '3.4') def test_detach_volume(self, version): self.can_send_version_mock.return_value = (version == '3.4') self._test_rpc_api('detach_volume', rpc_method='call', server=self.fake_volume_obj.host, volume=self.fake_volume_obj, attachment_id=fake.ATTACHMENT_ID, expected_kwargs_diff={ 'volume_id': self.fake_volume_obj.id}, # NOTE(dulek): Detach isn't returning anything, but # it's a call and it is synchronous. retval=None, version=version) @ddt.data('3.0', '3.4') def test_detach_volume_cluster(self, version): self.can_send_version_mock.return_value = (version == '3.4') self._change_cluster_name(self.fake_volume_obj, 'mycluster') self._test_rpc_api('detach_volume', rpc_method='call', server=self.fake_volume_obj.cluster_name, volume=self.fake_volume_obj, attachment_id='fake_uuid', expected_kwargs_diff={ 'volume_id': self.fake_volume_obj.id}, # NOTE(dulek): Detach isn't returning anything, but # it's a call and it is synchronous. retval=None, version=version) @ddt.data(None, 'mycluster') def test_copy_volume_to_image(self, cluster_name): self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_rpc_api('copy_volume_to_image', rpc_method='cast', server=cluster_name or self.fake_volume_obj.host, volume=self.fake_volume_obj, expected_kwargs_diff={ 'volume_id': self.fake_volume_obj.id}, image_meta={'id': fake.IMAGE_ID, 'container_format': 'fake_type', 'disk_format': 'fake_format'}) @ddt.data(None, 'mycluster') def test_initialize_connection(self, cluster_name): self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_rpc_api('initialize_connection', rpc_method='call', server=cluster_name or self.fake_volume_obj.host, connector='fake_connector', volume=self.fake_volume_obj) @ddt.data(None, 'mycluster') def test_terminate_connection(self, cluster_name): self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_rpc_api('terminate_connection', rpc_method='call', server=cluster_name or self.fake_volume_obj.host, volume=self.fake_volume_obj, connector='fake_connector', force=False, # NOTE(dulek): Terminate isn't returning anything, # but it's a call and it is synchronous. retval=None, expected_kwargs_diff={ 'volume_id': self.fake_volume_obj.id}) @ddt.data(None, 'mycluster') def test_accept_transfer(self, cluster_name): self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_rpc_api('accept_transfer', rpc_method='call', server=cluster_name or self.fake_volume_obj.host, volume=self.fake_volume_obj, new_user=fake.USER_ID, new_project=fake.PROJECT_ID, no_snapshots=True, expected_kwargs_diff={ 'volume_id': self.fake_volume_obj.id}, version='3.16') @ddt.data(None, 'mycluster') def test_extend_volume(self, cluster_name): self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_rpc_api('extend_volume', rpc_method='cast', server=cluster_name or self.fake_volume_obj.host, volume=self.fake_volume_obj, new_size=1, reservations=self.fake_reservations) def test_migrate_volume(self): class FakeBackend(object): def __init__(self): self.host = 'fake_host' self.cluster_name = 'cluster_name' self.capabilities = {} dest_backend = FakeBackend() self._test_rpc_api('migrate_volume', rpc_method='cast', server=self.fake_volume_obj.host, volume=self.fake_volume_obj, dest_backend=dest_backend, force_host_copy=True, expected_kwargs_diff={ 'host': {'host': 'fake_host', 'cluster_name': 'cluster_name', 'capabilities': {}}}, version='3.5') def test_migrate_volume_completion(self): self._test_rpc_api('migrate_volume_completion', rpc_method='call', server=self.fake_volume_obj.host, volume=self.fake_volume_obj, new_volume=self.fake_volume_obj, error=False, retval=fake.VOLUME_ID) def test_retype(self): class FakeBackend(object): def __init__(self): self.host = 'fake_host' self.cluster_name = 'cluster_name' self.capabilities = {} dest_backend = FakeBackend() self._test_rpc_api('retype', rpc_method='cast', server=self.fake_volume_obj.host, volume=self.fake_volume_obj, new_type_id=fake.VOLUME_TYPE_ID, dest_backend=dest_backend, migration_policy='never', reservations=self.fake_reservations, old_reservations=self.fake_reservations, expected_kwargs_diff={ 'host': {'host': 'fake_host', 'cluster_name': 'cluster_name', 'capabilities': {}}}, version='3.5') def test_manage_existing(self): self._test_rpc_api('manage_existing', rpc_method='cast', server=self.fake_volume_obj.host, volume=self.fake_volume_obj, ref={'lv_name': 'foo'}) def test_manage_existing_snapshot(self): self._test_rpc_api('manage_existing_snapshot', rpc_method='cast', server=self.fake_snapshot.volume.host, snapshot=self.fake_snapshot, ref='foo', backend='fake_host') def test_freeze_host(self): service = fake_service.fake_service_obj(self.context, host='fake_host', binary=constants.VOLUME_BINARY) self._test_rpc_api('freeze_host', rpc_method='call', server='fake_host', service=service, retval=True) def test_thaw_host(self): service = fake_service.fake_service_obj(self.context, host='fake_host', binary=constants.VOLUME_BINARY) self._test_rpc_api('thaw_host', rpc_method='call', server='fake_host', service=service, retval=True) @ddt.data('3.0', '3.8') def test_failover(self, version): self.can_send_version_mock.side_effect = lambda x: x == version service = objects.Service(self.context, host='fake_host', cluster_name=None) expected_method = 'failover' if version == '3.8' else 'failover_host' self._test_rpc_api('failover', rpc_method='cast', expected_method=expected_method, server='fake_host', service=service, secondary_backend_id='fake_backend', version=version) @mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt') def test_failover_completed(self, cctxt_mock): service = objects.Service(self.context, host='fake_host', cluster_name='cluster_name') self._test_rpc_api('failover_completed', rpc_method='cast', fanout=True, server='fake_host', service=service, updates=mock.sentinel.updates) def test_get_capabilities(self): self._test_rpc_api('get_capabilities', rpc_method='call', server='fake_host', backend_id='fake_host', discover=True, retval={'foo': 'bar'}) def test_remove_export(self): self._test_rpc_api('remove_export', rpc_method='cast', server=self.fake_volume_obj.host, volume=self.fake_volume_obj, expected_kwargs_diff={ 'volume_id': self.fake_volume_obj.id}) @ddt.data(None, 'mycluster') def test_get_backup_device_cast(self, cluster_name): self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_rpc_api('get_backup_device', rpc_method='cast', server=cluster_name or self.fake_volume_obj.host, backup=self.fake_backup_obj, volume=self.fake_volume_obj, expected_kwargs_diff={ 'want_objects': True, 'async_call': True, }, retval=None, version='3.17') @ddt.data(None, 'mycluster') def test_get_backup_device_call(self, cluster_name): self.can_send_version_mock.side_effect = (False, False, True, False, True) self._change_cluster_name(self.fake_volume_obj, cluster_name) backup_device_dict = {'backup_device': self.fake_volume, 'is_snapshot': False, 'secure_enabled': True} backup_device_obj = objects.BackupDeviceInfo.from_primitive( backup_device_dict, self.context) self._test_rpc_api('get_backup_device', rpc_method='call', server=cluster_name or self.fake_volume_obj.host, backup=self.fake_backup_obj, volume=self.fake_volume_obj, expected_kwargs_diff={ 'want_objects': True, }, retval=backup_device_obj, version='3.2') @ddt.data(None, 'mycluster') def test_get_backup_device_old(self, cluster_name): self.can_send_version_mock.side_effect = (False, False, False, False, False) self._change_cluster_name(self.fake_volume_obj, cluster_name) backup_device_dict = {'backup_device': self.fake_volume, 'is_snapshot': False, 'secure_enabled': True} backup_device_obj = objects.BackupDeviceInfo.from_primitive( backup_device_dict, self.context) self._test_rpc_api('get_backup_device', rpc_method='call', server=cluster_name or self.fake_volume_obj.host, backup=self.fake_backup_obj, volume=self.fake_volume_obj, retval=backup_device_dict, expected_retval=backup_device_obj, version='3.0') @ddt.data(None, 'mycluster') def test_secure_file_operations_enabled(self, cluster_name): self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_rpc_api('secure_file_operations_enabled', rpc_method='call', server=cluster_name or self.fake_volume_obj.host, volume=self.fake_volume_obj, retval=True) def test_create_group(self): self._test_rpc_api('create_group', rpc_method='cast', server='fakehost@fakedrv', group=self.fake_group) @ddt.data(None, 'mycluster') def test_delete_group(self, cluster_name): self._change_cluster_name(self.fake_group, cluster_name) self._test_rpc_api('delete_group', rpc_method='cast', server=cluster_name or self.fake_group.host, group=self.fake_group) @ddt.data(None, 'mycluster') def test_update_group(self, cluster_name): self._change_cluster_name(self.fake_group, cluster_name) self._test_rpc_api('update_group', rpc_method='cast', server=cluster_name or self.fake_group.host, group=self.fake_group, add_volumes=[fake.VOLUME2_ID], remove_volumes=[fake.VOLUME3_ID]) def test_create_group_from_src(self): self._test_rpc_api('create_group_from_src', rpc_method='cast', server=self.fake_group.host, group=self.fake_group, group_snapshot=self.fake_group_snapshot, source_group=None) def test_create_group_snapshot(self): self._test_rpc_api('create_group_snapshot', rpc_method='cast', server=self.fake_group_snapshot.group.host, group_snapshot=self.fake_group_snapshot) def test_delete_group_snapshot(self): self._test_rpc_api('delete_group_snapshot', rpc_method='cast', server=self.fake_group_snapshot.group.host, group_snapshot=self.fake_group_snapshot) @ddt.data(('myhost', None), ('myhost', 'mycluster')) @ddt.unpack @mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt') def test_do_cleanup(self, host, cluster, get_cctxt_mock): cleanup_request = objects.CleanupRequest(self.context, host=host, cluster_name=cluster) rpcapi = volume_rpcapi.VolumeAPI() rpcapi.do_cleanup(self.context, cleanup_request) get_cctxt_mock.assert_called_once_with( cleanup_request.service_topic_queue, '3.7') get_cctxt_mock.return_value.cast.assert_called_once_with( self.context, 'do_cleanup', cleanup_request=cleanup_request) def test_do_cleanup_too_old(self): cleanup_request = objects.CleanupRequest(self.context) rpcapi = volume_rpcapi.VolumeAPI() with mock.patch.object(rpcapi.client, 'can_send_version', return_value=False) as can_send_mock: self.assertRaises(exception.ServiceTooOld, rpcapi.do_cleanup, self.context, cleanup_request) can_send_mock.assert_called_once_with('3.7') @ddt.data(('myhost', None, '3.10'), ('myhost', 'mycluster', '3.10'), ('myhost', None, '3.0')) @ddt.unpack @mock.patch('oslo_messaging.RPCClient.can_send_version') def test_get_manageable_volumes( self, host, cluster_name, version, can_send_version): can_send_version.side_effect = lambda x: x == version service = objects.Service(self.context, host=host, cluster_name=cluster_name) expected_kwargs_diff = { 'want_objects': True} if version == '3.10' else {} self._test_rpc_api('get_manageable_volumes', rpc_method='call', service=service, server=cluster_name or host, marker=5, limit=20, offset=5, sort_keys='fake_keys', sort_dirs='fake_dirs', expected_kwargs_diff=expected_kwargs_diff, version=version) can_send_version.assert_has_calls([mock.call('3.10')]) @ddt.data(('myhost', None, '3.10'), ('myhost', 'mycluster', '3.10'), ('myhost', None, '3.0')) @ddt.unpack @mock.patch('oslo_messaging.RPCClient.can_send_version') def test_get_manageable_snapshots( self, host, cluster_name, version, can_send_version): can_send_version.side_effect = lambda x: x == version service = objects.Service(self.context, host=host, cluster_name=cluster_name) expected_kwargs_diff = { 'want_objects': True} if version == '3.10' else {} self._test_rpc_api('get_manageable_snapshots', rpc_method='call', service=service, server=cluster_name or host, marker=5, limit=20, offset=5, sort_keys='fake_keys', sort_dirs='fake_dirs', expected_kwargs_diff=expected_kwargs_diff, version=version) can_send_version.assert_has_calls([mock.call('3.10')]) @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) def test_set_log_levels(self): service = objects.Service(self.context, host='host1') self._test_rpc_api('set_log_levels', rpc_method='cast', server=service.host, service=service, log_request='log_request', version='3.12') @mock.patch('oslo_messaging.RPCClient.can_send_version', mock.Mock()) def test_get_log_levels(self): service = objects.Service(self.context, host='host1') self._test_rpc_api('get_log_levels', rpc_method='call', server=service.host, service=service, log_request='log_request', version='3.12') @ddt.data(None, 'mycluster') def test_initialize_connection_snapshot(self, cluster_name): self._change_cluster_name(self.fake_snapshot.volume, cluster_name) self._test_rpc_api('initialize_connection_snapshot', rpc_method='call', server=(cluster_name or self.fake_snapshot.volume.host), connector='fake_connector', snapshot=self.fake_snapshot, expected_kwargs_diff={ 'snapshot_id': self.fake_snapshot.id}, version='3.13') @ddt.data(None, 'mycluster') def test_terminate_connection_snapshot(self, cluster_name): self._change_cluster_name(self.fake_snapshot.volume, cluster_name) self._test_rpc_api('terminate_connection_snapshot', rpc_method='call', server=(cluster_name or self.fake_snapshot.volume.host), snapshot=self.fake_snapshot, connector='fake_connector', force=False, retval=None, expected_kwargs_diff={ 'snapshot_id': self.fake_snapshot.id}, version='3.13') def test_remove_export_snapshot(self): self._test_rpc_api('remove_export_snapshot', rpc_method='cast', server=self.fake_volume_obj.host, snapshot=self.fake_snapshot, expected_kwargs_diff={ 'snapshot_id': self.fake_snapshot.id}, version='3.13') def test_enable_replication(self): self._test_rpc_api('enable_replication', rpc_method='cast', server=self.fake_group.host, group=self.fake_group, version='3.14') def test_disable_replication(self): self._test_rpc_api('disable_replication', rpc_method='cast', server=self.fake_group.host, group=self.fake_group, version='3.14') def test_failover_replication(self): self._test_rpc_api('failover_replication', rpc_method='cast', server=self.fake_group.host, group=self.fake_group, allow_attached_volume=False, secondary_backend_id=None, version='3.14') def test_list_replication_targets(self): self._test_rpc_api('list_replication_targets', rpc_method='call', server=self.fake_group.host, group=self.fake_group, version='3.14') @ddt.data('3.18', '3.20') def test_reimage(self, version): if version == '3.18': self.can_send_version_mock.side_effect = ( True, True, False, False) self._test_rpc_api('reimage', rpc_method='cast', server=self.fake_volume_obj.host, volume=self.fake_volume_obj, image_meta={'id': fake.IMAGE_ID, 'container_format': 'fake_type', 'disk_format': 'fake_format'}, image_snap='fake_snap', version=version) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_snapshot.py0000664000175000017500000010051600000000000023236 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for global snapshot cases.""" import os import sys from unittest import mock import ddt from oslo_config import cfg from oslo_utils import imageutils from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder import quota from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit.brick import fake_lvm from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base import cinder.volume QUOTAS = quota.QUOTAS CONF = cfg.CONF OVER_SNAPSHOT_QUOTA_EXCEPTION = exception.OverQuota( overs=['snapshots'], usages = {'snapshots': {'reserved': 1, 'in_use': 9}}, quotas = {'gigabytes': 10, 'snapshots': 10}) def create_snapshot(volume_id, size=1, metadata=None, ctxt=None, **kwargs): """Create a snapshot object.""" metadata = metadata or {} snap = objects.Snapshot(ctxt or context.get_admin_context()) snap.volume_size = size snap.user_id = fake.USER_ID snap.project_id = fake.PROJECT_ID snap.volume_id = volume_id snap.status = fields.SnapshotStatus.CREATING if metadata is not None: snap.metadata = metadata snap.update(kwargs) snap.create() return snap @ddt.ddt class SnapshotTestCase(base.BaseVolumeTestCase): def setUp(self, *args, **kwargs): super(SnapshotTestCase, self).setUp() db.volume_type_create(self.context, v2_fakes.fake_default_type_get( fake.VOLUME_TYPE2_ID)) self.vol_type = db.volume_type_get_by_name(self.context, 'vol_type_name') @mock.patch('cinder.utils.clean_snapshot_file_locks') def test_delete_snapshot_frozen(self, mock_clean): service = tests_utils.create_service(self.context, {'frozen': True}) volume = tests_utils.create_volume(self.context, host=service.host) snapshot = tests_utils.create_snapshot(self.context, volume.id) self.assertRaises(exception.InvalidInput, self.volume_api.delete_snapshot, self.context, snapshot) mock_clean.assert_not_called() @ddt.data('create_snapshot', 'create_snapshot_force') def test_create_snapshot_frozen(self, method): service = tests_utils.create_service(self.context, {'frozen': True}) volume = tests_utils.create_volume(self.context, host=service.host) method = getattr(self.volume_api, method) self.assertRaises(exception.InvalidInput, method, self.context, volume, 'name', 'desc') def test_create_snapshot_driver_not_initialized(self): volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src) snapshot_id = create_snapshot(volume_src['id'], size=volume_src['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.create_snapshot, self.context, snapshot_obj) # NOTE(flaper87): The volume status should be error. self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status) # lets cleanup the mess self.volume.driver._initialized = True self.volume.delete_snapshot(self.context, snapshot_obj) self.volume.delete_volume(self.context, volume_src) @mock.patch('cinder.utils.clean_snapshot_file_locks') @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') def test_create_delete_snapshot(self, mock_notify, mock_clean): """Test snapshot can be created and deleted.""" volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) mock_notify.assert_not_called() self.volume.create_volume(self.context, volume) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end']), any_order=True) snapshot = create_snapshot(volume['id'], size=volume['size']) snapshot_id = snapshot.id self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot_id, objects.Snapshot.get_by_id(self.context, snapshot_id).id) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end'], ['INFO', 'snapshot.create.start'], ['INFO', 'snapshot.create.end']), any_order=True) self.volume.delete_snapshot(self.context, snapshot) mock_clean.assert_called_once_with(snapshot.id, self.volume.driver) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end'], ['INFO', 'snapshot.create.start'], ['INFO', 'snapshot.create.end'], ['INFO', 'snapshot.delete.start'], ['INFO', 'snapshot.delete.end']), any_order=True) snap = objects.Snapshot.get_by_id(context.get_admin_context( read_deleted='yes'), snapshot_id) self.assertEqual(fields.SnapshotStatus.DELETED, snap.status) self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot_id) self.volume.delete_volume(self.context, volume) def test_create_delete_snapshot_with_metadata(self): """Test snapshot can be created with metadata and deleted.""" test_meta = {'fake_key': 'fake_value'} volume = tests_utils.create_volume(self.context, **self.volume_params) snapshot = create_snapshot(volume['id'], size=volume['size'], metadata=test_meta) snapshot_id = snapshot.id result_dict = snapshot.metadata self.assertEqual(test_meta, result_dict) self.volume.delete_snapshot(self.context, snapshot) self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot_id) def test_delete_snapshot_another_cluster_fails(self): """Test delete of snapshot from another cluster fails.""" self.volume.cluster = 'mycluster' volume = tests_utils.create_volume(self.context, status='available', size=1, host=CONF.host + 'fake', cluster_name=self.volume.cluster) snapshot = create_snapshot(volume.id, size=volume.size) self.volume.delete_snapshot(self.context, snapshot) self.assertRaises(exception.NotFound, db.snapshot_get, self.context, snapshot.id) @mock.patch.object(db, 'snapshot_create', side_effect=exception.InvalidSnapshot( 'Create snapshot in db failed!')) def test_create_snapshot_failed_db_snapshot(self, mock_snapshot): """Test exception handling when create snapshot in db failed.""" test_volume = tests_utils.create_volume( self.context, status='available', host=CONF.host) volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidSnapshot, volume_api.create_snapshot, self.context, test_volume, 'fake_name', 'fake_description') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_snapshot_in_db_invalid_volume_status(self, mock_get): test_volume1 = tests_utils.create_volume( self.context, status='available', host=CONF.host) test_volume2 = tests_utils.create_volume( self.context, status='deleting', host=CONF.host) mock_get.return_value = test_volume2 volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot_in_db, self.context, test_volume1, "fake_snapshot_name", "fake_description", False, {}, None, commit_quota=False) @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_snapshot_in_db_invalid_metadata(self, mock_get): test_volume = tests_utils.create_volume( self.context, status='available', host=CONF.host) mock_get.return_value = test_volume volume_api = cinder.volume.api.API() with mock.patch.object(QUOTAS, 'add_volume_type_opts'), \ mock.patch.object(QUOTAS, 'reserve') as mock_reserve, \ mock.patch.object(QUOTAS, 'commit') as mock_commit: self.assertRaises(exception.InvalidInput, volume_api.create_snapshot_in_db, self.context, test_volume, "fake_snapshot_name", "fake_description", False, "fake_metadata", None, commit_quota=True) mock_reserve.assert_not_called() mock_commit.assert_not_called() def test_create_snapshot_failed_maintenance(self): """Test exception handling when create snapshot in maintenance.""" test_volume = tests_utils.create_volume( self.context, status='maintenance', host=CONF.host) volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, test_volume, 'fake_name', 'fake_description') @mock.patch.object(QUOTAS, 'commit', side_effect=exception.QuotaError( 'Snapshot quota commit failed!')) def test_create_snapshot_failed_quota_commit(self, mock_snapshot): """Test exception handling when snapshot quota commit failed.""" test_volume = tests_utils.create_volume( self.context, status='available', host=CONF.host) volume_api = cinder.volume.api.API() self.assertRaises(exception.QuotaError, volume_api.create_snapshot, self.context, test_volume, 'fake_name', 'fake_description') @mock.patch.object(QUOTAS, 'reserve', side_effect = OVER_SNAPSHOT_QUOTA_EXCEPTION) def test_create_snapshot_failed_quota_reserve(self, mock_reserve): """Test exception handling when snapshot quota reserve failed.""" test_volume = tests_utils.create_volume( self.context, status='available', host=CONF.host) volume_api = cinder.volume.api.API() self.assertRaises(exception.SnapshotLimitExceeded, volume_api.create_snapshot, self.context, test_volume, 'fake_name', 'fake_description') @mock.patch.object(QUOTAS, 'reserve', side_effect = OVER_SNAPSHOT_QUOTA_EXCEPTION) def test_create_snapshots_in_db_failed_quota_reserve(self, mock_reserve): """Test exception handling when snapshot quota reserve failed.""" test_volume = tests_utils.create_volume( self.context, status='available', host=CONF.host) volume_api = cinder.volume.api.API() self.assertRaises(exception.SnapshotLimitExceeded, volume_api.create_snapshots_in_db, self.context, [test_volume], 'fake_name', 'fake_description', fake.CONSISTENCY_GROUP_ID) def test_create_snapshot_failed_host_is_None(self): """Test exception handling when create snapshot and host is None.""" test_volume = tests_utils.create_volume( self.context, host=None) volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, test_volume, 'fake_name', 'fake_description') def test_create_snapshot_force(self): """Test snapshot in use can be created forcibly.""" instance_uuid = '12345678-1234-4678-1234-567812345678' # create volume and attach to the instance volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], instance_uuid, None, '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot_force(self.context, volume, 'fake_name', 'fake_description') snapshot_ref.destroy() db.volume_destroy(self.context, volume['id']) def test_create_snapshot_force_host(self): # create volume and attach to the host volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'attached_host': 'fake_host', 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], None, 'fake_host', '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot_force(self.context, volume, 'fake_name', 'fake_description') snapshot_ref.destroy() db.volume_destroy(self.context, volume['id']) def test_create_snapshot_in_use(self): """Test snapshot in use can be created forcibly.""" instance_uuid = 'a14dc210-d43b-4792-a608-09fe0824de54' # create volume and attach to the instance volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], instance_uuid, None, '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot(self.context, volume, 'fake_name', 'fake_description', allow_in_use=True) snapshot_ref.destroy() db.volume_destroy(self.context, volume['id']) # create volume and attach to the host volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'attached_host': 'fake_host', 'attach_status': fields.VolumeAttachStatus.ATTACHING, } attachment = db.volume_attach(self.context, values) db.volume_attached(self.context, attachment['id'], None, 'fake_host', '/dev/sda1') volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertRaises(exception.InvalidVolume, volume_api.create_snapshot, self.context, volume, 'fake_name', 'fake_description') snapshot_ref = volume_api.create_snapshot(self.context, volume, 'fake_name', 'fake_description', allow_in_use=True) snapshot_ref.destroy() db.volume_destroy(self.context, volume['id']) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_snapshot_from_bootable_volume(self, mock_qemu_info): """Test create snapshot from bootable volume.""" # create bootable volume from image volume = self._create_volume_from_image() volume_id = volume['id'] self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info # get volume's volume_glance_metadata ctxt = context.get_admin_context() vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id) self.assertTrue(bool(vol_glance_meta)) # create snapshot from bootable volume snap = create_snapshot(volume_id) self.volume.create_snapshot(ctxt, snap) # get snapshot's volume_glance_metadata snap_glance_meta = db.volume_snapshot_glance_metadata_get( ctxt, snap.id) self.assertTrue(bool(snap_glance_meta)) # ensure that volume's glance metadata is copied # to snapshot's glance metadata self.assertEqual(len(vol_glance_meta), len(snap_glance_meta)) vol_glance_dict = {x.key: x.value for x in vol_glance_meta} snap_glance_dict = {x.key: x.value for x in snap_glance_meta} self.assertDictEqual(vol_glance_dict, snap_glance_dict) # ensure that snapshot's status is changed to 'available' self.assertEqual(fields.SnapshotStatus.AVAILABLE, snap.status) # cleanup resource snap.destroy() db.volume_destroy(ctxt, volume_id) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_snapshot_from_bootable_volume_fail(self, mock_qemu_info): """Test create snapshot from bootable volume. But it fails to volume_glance_metadata_copy_to_snapshot. As a result, status of snapshot is changed to ERROR. """ # create bootable volume from image volume = self._create_volume_from_image() volume_id = volume['id'] self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info # get volume's volume_glance_metadata ctxt = context.get_admin_context() vol_glance_meta = db.volume_glance_metadata_get(ctxt, volume_id) self.assertTrue(bool(vol_glance_meta)) snap = create_snapshot(volume_id) self.assertEqual(36, len(snap.id)) # dynamically-generated UUID self.assertEqual('creating', snap.status) # set to return DB exception with mock.patch.object(db, 'volume_glance_metadata_copy_to_snapshot')\ as mock_db: mock_db.side_effect = exception.MetadataCopyFailure( reason="Because of DB service down.") # create snapshot from bootable volume self.assertRaises(exception.MetadataCopyFailure, self.volume.create_snapshot, ctxt, snap) # get snapshot's volume_glance_metadata self.assertRaises(exception.GlanceMetadataNotFound, db.volume_snapshot_glance_metadata_get, ctxt, snap.id) # ensure that status of snapshot is 'error' self.assertEqual(fields.SnapshotStatus.ERROR, snap.status) # cleanup resource snap.destroy() db.volume_destroy(ctxt, volume_id) def test_create_snapshot_from_bootable_volume_with_volume_metadata_none( self): volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) # set bootable flag of volume to True db.volume_update(self.context, volume_id, {'bootable': True}) snapshot = create_snapshot(volume['id']) self.volume.create_snapshot(self.context, snapshot) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_snapshot_glance_metadata_get, self.context, snapshot.id) # ensure that status of snapshot is 'available' self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status) # cleanup resource snapshot.destroy() db.volume_destroy(self.context, volume_id) def test_create_snapshot_during_encryption_key_migration(self): fixed_key_id = '00000000-0000-0000-0000-000000000000' volume = tests_utils.create_volume(self.context, **self.volume_params) volume['encryption_key_id'] = fixed_key_id volume_id = volume['id'] self.volume.create_volume(self.context, volume) kwargs = {'encryption_key_id': fixed_key_id} snapshot = create_snapshot(volume['id'], **kwargs) self.assertEqual(fixed_key_id, snapshot.encryption_key_id) db.volume_update(self.context, volume_id, {'encryption_key_id': fake.ENCRYPTION_KEY_ID}) self.volume.create_snapshot(self.context, snapshot) snap_db = db.snapshot_get(self.context, snapshot.id) self.assertEqual(fake.ENCRYPTION_KEY_ID, snap_db.encryption_key_id) # cleanup resource snapshot.destroy() db.volume_destroy(self.context, volume_id) @mock.patch('cinder.utils.clean_snapshot_file_locks') def test_delete_busy_snapshot(self, mock_clean): """Test snapshot can be created and deleted.""" self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume_id, size=volume['size']) self.volume.create_snapshot(self.context, snapshot) with mock.patch.object(self.volume.driver, 'delete_snapshot', side_effect=exception.SnapshotIsBusy( snapshot_name='fake') ) as mock_del_snap: snapshot_id = snapshot.id self.volume.delete_snapshot(self.context, snapshot) snapshot_ref = objects.Snapshot.get_by_id(self.context, snapshot_id) self.assertEqual(snapshot_id, snapshot_ref.id) self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_ref.status) mock_del_snap.assert_called_once_with(snapshot) mock_clean.assert_not_called() @mock.patch('cinder.utils.clean_snapshot_file_locks') @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX") def test_delete_no_dev_fails(self, mock_clean): """Test delete snapshot with no dev file fails.""" self.mock_object(os.path, 'exists', lambda x: False) self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume_id) snapshot_id = snapshot.id self.volume.create_snapshot(self.context, snapshot) with mock.patch.object(self.volume.driver, 'delete_snapshot', side_effect=exception.SnapshotIsBusy( snapshot_name='fake')) as mock_del_snap: self.volume.delete_snapshot(self.context, snapshot) snapshot_ref = objects.Snapshot.get_by_id(self.context, snapshot_id) self.assertEqual(snapshot_id, snapshot_ref.id) self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_ref.status) mock_del_snap.assert_called_once_with(snapshot) mock_clean.assert_not_called() def test_force_delete_snapshot(self): """Test snapshot can be forced to delete.""" fake_volume = tests_utils.create_volume(self.context) fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume.id, status='error_deleting') # 'error_deleting' snapshot can't be deleted self.assertRaises(exception.InvalidSnapshot, self.volume_api.delete_snapshot, self.context, fake_snapshot) # delete with force self.volume_api.delete_snapshot(self.context, fake_snapshot, force=True) # status is deleting fake_snapshot.refresh() self.assertEqual(fields.SnapshotStatus.DELETING, fake_snapshot.status) def test_volume_api_update_snapshot(self): # create raw snapshot volume = tests_utils.create_volume(self.context, **self.volume_params) snapshot = create_snapshot(volume['id']) snapshot_id = snapshot.id self.assertIsNone(snapshot.display_name) # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} volume_api.update_snapshot(self.context, snapshot, update_dict) # read changes from db snap = objects.Snapshot.get_by_id(context.get_admin_context(), snapshot_id) self.assertEqual('test update name', snap.display_name) @mock.patch.object(QUOTAS, 'reserve', side_effect = OVER_SNAPSHOT_QUOTA_EXCEPTION) def test_existing_snapshot_failed_quota_reserve(self, mock_reserve): vol = tests_utils.create_volume(self.context) snap = tests_utils.create_snapshot(self.context, vol.id) with mock.patch.object( self.volume.driver, 'manage_existing_snapshot_get_size') as mock_get_size: mock_get_size.return_value = 1 self.assertRaises(exception.SnapshotLimitExceeded, self.volume.manage_existing_snapshot, self.context, snap) @mock.patch('cinder.utils.clean_snapshot_file_locks') def test_delete_snapshot_driver_not_initialized(self, mock_clean): volume = tests_utils.create_volume(self.context, **self.volume_params) snapshot = tests_utils.create_snapshot(self.context, volume.id) self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.delete_snapshot, self.context, snapshot) snapshot.refresh() self.assertEqual(fields.SnapshotStatus.ERROR_DELETING, snapshot.status) mock_clean.assert_not_called() @ddt.data({'all_tenants': '1', 'name': 'snap1'}, {'all_tenants': 'true', 'name': 'snap1'}, {'all_tenants': 'false', 'name': 'snap1'}, {'all_tenants': '0', 'name': 'snap1'}, {'name': 'snap1'}) @mock.patch.object(objects, 'SnapshotList') @mock.patch.object(context.RequestContext, 'authorize') def test_get_all_snapshots_non_admin(self, search_opts, auth_mock, snaplist_mock): ctxt = context.RequestContext(user_id=None, is_admin=False, project_id=mock.sentinel.project_id, read_deleted='no', overwrite=False) volume_api = cinder.volume.api.API() res = volume_api.get_all_snapshots(ctxt, search_opts, mock.sentinel.marker, mock.sentinel.limit, mock.sentinel.sort_keys, mock.sentinel.sort_dirs, mock.sentinel.offset) auth_mock.assert_called_once_with( cinder.volume.api.snapshot_policy.GET_ALL_POLICY) snaplist_mock.get_all.assert_not_called() snaplist_mock.get_all_by_project.assert_called_once_with( ctxt, mock.sentinel.project_id, {'name': 'snap1'}, mock.sentinel.marker, mock.sentinel.limit, mock.sentinel.sort_keys, mock.sentinel.sort_dirs, mock.sentinel.offset) self.assertEqual(snaplist_mock.get_all_by_project.return_value, res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_volume.py0000664000175000017500000050344600000000000022717 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Volume Code.""" import datetime import enum import io import time from unittest import mock import castellan from castellan.common import exception as castellan_exception from castellan import key_manager import ddt import eventlet import os_brick.initiator.connectors.iscsi from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils import imageutils from taskflow.engines.action_engine import engine from cinder.api import common from cinder import context from cinder import coordination from cinder import db from cinder import exception from cinder.message import message_field from cinder import objects from cinder.objects import fields from cinder.policies import volumes as vol_policy from cinder import quota from cinder.tests import fake_driver from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import conf_fixture from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.keymgr import fake as fake_keymgr from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base from cinder import utils import cinder.volume from cinder.volume import driver from cinder.volume import manager as vol_manager from cinder.volume import rpcapi as volume_rpcapi import cinder.volume.targets.tgt from cinder.volume import volume_types QUOTAS = quota.QUOTAS CONF = cfg.CONF ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor' fake_opt = [ cfg.StrOpt('fake_opt1', default='fake', help='fake opts') ] def create_snapshot(volume_id, size=1, metadata=None, ctxt=None, **kwargs): """Create a snapshot object.""" metadata = metadata or {} snap = objects.Snapshot(ctxt or context.get_admin_context()) snap.volume_size = size snap.user_id = fake.USER_ID snap.project_id = fake.PROJECT_ID snap.volume_id = volume_id snap.status = fields.SnapshotStatus.CREATING if metadata is not None: snap.metadata = metadata snap.update(kwargs) snap.create() return snap class KeyObject(object): def get_encoded(self): return "asdf".encode('utf-8') class KeyObject2(object): def get_encoded(self): return "qwert".encode('utf-8') @ddt.ddt class VolumeTestCase(base.BaseVolumeTestCase): def setUp(self): super(VolumeTestCase, self).setUp() self.patch('cinder.volume.volume_utils.clear_volume', autospec=True) self.expected_status = 'available' self.service_id = 1 self.user_context = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID) elevated = context.get_admin_context() db.volume_type_create(elevated, v2_fakes.fake_default_type_get( id=fake.VOLUME_TYPE2_ID)) self.vol_type = db.volume_type_get_by_name(elevated, '__DEFAULT__') self._setup_volume_types() def _create_volume(self, context, **kwargs): return tests_utils.create_volume( context, volume_type_id=volume_types.get_default_volume_type()['id'], **kwargs) @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-scheduler': '1.3'}) def test_reset(self, get_min_obj, get_min_rpc): old_version = objects.base.OBJ_VERSIONS.versions[-2] with mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-scheduler': old_version}): vol_mgr = vol_manager.VolumeManager() scheduler_rpcapi = vol_mgr.scheduler_rpcapi self.assertEqual('1.3', scheduler_rpcapi.client.version_cap) self.assertEqual(old_version, scheduler_rpcapi.client.serializer._base.version_cap) get_min_obj.return_value = self.latest_ovo_version vol_mgr.reset() scheduler_rpcapi = vol_mgr.scheduler_rpcapi self.assertEqual(get_min_rpc.return_value, scheduler_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, scheduler_rpcapi.client.serializer._base.version_cap) self.assertIsNone(scheduler_rpcapi.client.serializer._base.manifest) @mock.patch('oslo_utils.importutils.import_object') def test_backend_availability_zone(self, mock_import_object): # NOTE(smcginnis): This isn't really the best place for this test, # but we don't currently have a pure VolumeManager test class. So # until we create a good suite for that class, putting here with # other tests that use VolumeManager. opts = { 'backend_availability_zone': 'caerbannog' } def conf_get(option): if option in opts: return opts[option] return None mock_driver = mock.Mock() mock_driver.configuration.safe_get.side_effect = conf_get mock_driver.configuration.extra_capabilities = 'null' def import_obj(*args, **kwargs): return mock_driver mock_import_object.side_effect = import_obj manager = vol_manager.VolumeManager(volume_driver=mock_driver) self.assertIsNotNone(manager) self.assertEqual(opts['backend_availability_zone'], manager.availability_zone) @mock.patch('cinder.volume.manager.VolumeManager._append_volume_stats', mock.Mock()) @mock.patch.object(vol_manager.VolumeManager, 'update_service_capabilities') def test_report_filter_goodness_function(self, mock_update): manager = vol_manager.VolumeManager() manager.driver.set_initialized() myfilterfunction = "myFilterFunction" mygoodnessfunction = "myGoodnessFunction" expected = {'name': 'cinder-volumes', 'storage_protocol': 'iSCSI', 'cacheable': True, 'filter_function': myfilterfunction, 'goodness_function': mygoodnessfunction, } with mock.patch.object(manager.driver, 'get_volume_stats') as m_get_stats: with mock.patch.object(manager.driver, 'get_goodness_function') as m_get_goodness: with mock.patch.object(manager.driver, 'get_filter_function') as m_get_filter: m_get_stats.return_value = {'name': 'cinder-volumes', 'storage_protocol': 'iSCSI', } m_get_filter.return_value = myfilterfunction m_get_goodness.return_value = mygoodnessfunction manager._report_driver_status(context.get_admin_context()) self.assertTrue(m_get_stats.called) mock_update.assert_called_once_with(expected) def test_is_working(self): # By default we have driver mocked to be initialized... self.assertTrue(self.volume.is_working()) # ...lets switch it and check again! self.volume.driver._initialized = False self.assertFalse(self.volume.is_working()) def _create_min_max_size_dict(self, min_size, max_size): return {volume_types.MIN_SIZE_KEY: min_size, volume_types.MAX_SIZE_KEY: max_size} def _setup_volume_types(self): """Creates 2 types, one with size limits, one without.""" spec_dict = self._create_min_max_size_dict(2, 4) sized_vol_type_dict = {'name': 'limit', 'extra_specs': spec_dict} db.volume_type_create(self.context, sized_vol_type_dict) self.sized_vol_type = db.volume_type_get_by_name( self.context, sized_vol_type_dict['name']) unsized_vol_type_dict = {'name': 'unsized', 'extra_specs': {}} db.volume_type_create(context.get_admin_context(), unsized_vol_type_dict) self.unsized_vol_type = db.volume_type_get_by_name( self.context, unsized_vol_type_dict['name']) @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') @mock.patch.object(QUOTAS, 'reserve') @mock.patch.object(QUOTAS, 'commit') @mock.patch.object(QUOTAS, 'rollback') def test_create_driver_not_initialized(self, reserve, commit, rollback, mock_notify): self.volume.driver._initialized = False def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] def fake_commit_and_rollback(context, reservations, project_id=None): pass reserve.return_value = fake_reserve commit.return_value = fake_commit_and_rollback rollback.return_value = fake_commit_and_rollback volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) volume_id = volume['id'] self.assertIsNone(volume['encryption_key_id']) self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, self.context, volume) volume = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual("error", volume.status) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_driver_not_initialized_rescheduling(self): self.volume.driver._initialized = False mock_delete = self.mock_object(self.volume.driver, 'delete_volume') volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) volume_id = volume['id'] self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, self.context, volume, {'volume_properties': self.volume_params}, {'retry': {'num_attempts': 1, 'host': []}}) # NOTE(dulek): Volume should be rescheduled as we passed request_spec # and filter_properties, assert that it wasn't counted in # allocated_capacity tracking. self.assertEqual({'_pool0': {'allocated_capacity_gb': 0}}, self.volume.stats['pools']) # NOTE(dulek): As we've rescheduled, make sure delete_volume was # called. self.assertTrue(mock_delete.called) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_non_cinder_exception_rescheduling(self): params = self.volume_params del params['host'] volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **params) volume_id = volume['id'] with mock.patch.object(self.volume.driver, 'create_volume', side_effect=processutils.ProcessExecutionError): self.assertRaises(processutils.ProcessExecutionError, self.volume.create_volume, self.context, volume, {'volume_properties': params}, {'retry': {'num_attempts': 1, 'host': []}}) # NOTE(dulek): Volume should be rescheduled as we passed request_spec # and filter_properties, assert that it wasn't counted in # allocated_capacity tracking. self.assertEqual({'_pool0': {'allocated_capacity_gb': 0}}, self.volume.stats['pools']) db.volume_destroy(context.get_admin_context(), volume_id) @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') @mock.patch.object(QUOTAS, 'rollback') @mock.patch.object(QUOTAS, 'commit') @mock.patch.object(QUOTAS, 'reserve') def test_delete_driver_not_initialized(self, reserve, commit, rollback, mock_notify): self.volume.driver._initialized = False def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] def fake_commit_and_rollback(context, reservations, project_id=None): pass reserve.return_value = fake_reserve commit.return_value = fake_commit_and_rollback rollback.return_value = fake_commit_and_rollback volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) self.assertIsNone(volume['encryption_key_id']) self.assertRaises(exception.DriverNotInitialized, self.volume.delete_volume, self.context, volume) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual("error_deleting", volume.status) volume.destroy() @ddt.data(True, False) @mock.patch('cinder.utils.clean_volume_file_locks') @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') @mock.patch('cinder.quota.QUOTAS.rollback', new=mock.Mock()) @mock.patch('cinder.quota.QUOTAS.commit') @mock.patch('cinder.quota.QUOTAS.reserve', return_value=['RESERVATION']) def test_create_delete_volume(self, boolean, _mock_reserve, commit_mock, mock_notify, mock_clean): """Test volume can be created and deleted.""" mock_shares = self.mock_object(self.volume, '_driver_shares_targets', return_value=boolean) volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) volume_id = volume['id'] self.assertIsNone(volume['encryption_key_id']) self.volume.create_volume(self.context, volume) mock_shares.assert_called_once_with() self.assertIs(boolean, volume.shared_targets) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end']), any_order=True) self.assertEqual({'_pool0': {'allocated_capacity_gb': 1}}, self.volume.stats['pools']) # Confirm delete_volume handles use_quota field volume.use_quota = boolean volume.save() # Need to save to DB because of the refresh call commit_mock.reset_mock() _mock_reserve.reset_mock() mock_notify.reset_mock() self.volume.delete_volume(self.context, volume) vol = db.volume_get(context.get_admin_context(read_deleted='yes'), volume_id) self.assertEqual(vol['status'], 'deleted') if boolean: expected_capacity = 0 self.assert_notify_called(mock_notify, (['INFO', 'volume.delete.start'], ['INFO', 'volume.delete.end']), any_order=True) self.assertEqual(1, _mock_reserve.call_count) self.assertEqual(1, commit_mock.call_count) else: expected_capacity = 1 mock_notify.assert_not_called() _mock_reserve.assert_not_called() commit_mock.assert_not_called() self.assertEqual( {'_pool0': {'allocated_capacity_gb': expected_capacity}}, self.volume.stats['pools']) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id) mock_clean.assert_called_once_with(volume_id, self.volume.driver) @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') @mock.patch('cinder.quota.QUOTAS.rollback') @mock.patch('cinder.quota.QUOTAS.commit') @mock.patch('cinder.quota.QUOTAS.reserve', return_value=['RESERVATION']) def test_delete_migrating_volume(self, reserve_mock, commit_mock, rollback_mock, notify_mock): """Test volume can be created and deleted.""" volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, migration_status='target:123', use_quota=False, **self.volume_params) volume_id = volume['id'] self.volume.delete_volume(self.context, volume) vol = db.volume_get(context.get_admin_context(read_deleted='yes'), volume_id) self.assertEqual(vol['status'], 'deleted') # For migration's temp volume we don't notify or do any quota notify_mock.assert_not_called() rollback_mock.assert_not_called() commit_mock.assert_not_called() reserve_mock.assert_not_called() def test_create_delete_volume_with_metadata(self): """Test volume can be created with metadata and deleted.""" test_meta = {'fake_key': 'fake_value'} volume = tests_utils.create_volume(self.context, metadata=test_meta, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) self.assertEqual(test_meta, volume.metadata) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id) @mock.patch('cinder.utils.clean_volume_file_locks') def test_delete_volume_frozen(self, mock_clean): service = tests_utils.create_service(self.context, {'frozen': True}) volume = tests_utils.create_volume(self.context, host=service.host) self.assertRaises(exception.InvalidInput, self.volume_api.delete, self.context, volume) mock_clean.assert_not_called() def test_delete_volume_another_cluster_fails(self): """Test delete of volume from another cluster fails.""" self.volume.cluster = 'mycluster' volume = tests_utils.create_volume(self.context, status='available', size=1, host=CONF.host + 'fake', cluster_name=self.volume.cluster) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume.id) @mock.patch('cinder.db.volume_metadata_update') def test_create_volume_metadata(self, metadata_update): metadata = {'fake_key': 'fake_value'} metadata_update.return_value = metadata volume = tests_utils.create_volume(self.context, **self.volume_params) res = self.volume_api.create_volume_metadata(self.context, volume, metadata) metadata_update.assert_called_once_with(self.context, volume.id, metadata, False, common.METADATA_TYPES.user) self.assertEqual(metadata, res) @ddt.data('maintenance', 'uploading') def test_create_volume_metadata_maintenance(self, status): metadata = {'fake_key': 'fake_value'} volume = tests_utils.create_volume(self.context, **self.volume_params) volume['status'] = status self.assertRaises(exception.InvalidVolume, self.volume_api.create_volume_metadata, self.context, volume, metadata) def test_update_volume_metadata_with_metatype(self): """Test update volume metadata with different metadata type.""" test_meta1 = {'fake_key1': 'fake_value1'} test_meta2 = {'fake_key1': 'fake_value2'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) self.volume.create_volume(self.context, volume) # update user metadata associated with the volume. result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta2, False, common.METADATA_TYPES.user) self.assertEqual(test_meta2, result_meta) # create image metadata associated with the volume. result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta1, False, common.METADATA_TYPES.image) self.assertEqual(test_meta1, result_meta) # update image metadata associated with the volume. result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta2, False, common.METADATA_TYPES.image) self.assertEqual(test_meta2, result_meta) # update volume metadata with invalid metadta type. self.assertRaises(exception.InvalidMetadataType, self.volume_api.update_volume_metadata, self.context, volume, test_meta1, False, FAKE_METADATA_TYPE.fake_type) def test_update_volume_metadata_maintenance(self): """Test update volume metadata with different metadata type.""" test_meta1 = {'fake_key1': 'fake_value1'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' self.assertRaises(exception.InvalidVolume, self.volume_api.update_volume_metadata, self.context, volume, test_meta1, False, FAKE_METADATA_TYPE.fake_type) @mock.patch('cinder.db.volume_update') def test_update_with_ovo(self, volume_update): """Test update volume using oslo_versionedobject.""" volume = tests_utils.create_volume(self.context, **self.volume_params) updates = {'display_name': 'foobbar'} self.volume_api.update(self.context, volume, updates) volume_update.assert_called_once_with(self.context, volume.id, updates) self.assertEqual('foobbar', volume.display_name) def test_delete_volume_metadata_with_metatype(self): """Test delete volume metadata with different metadata type.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} test_meta2 = {'fake_key1': 'fake_value1'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) # delete user metadata associated with the volume. self.volume_api.delete_volume_metadata( self.context, volume, 'fake_key2', common.METADATA_TYPES.user) self.assertEqual(test_meta2, db.volume_metadata_get(self.context, volume_id)) # create image metadata associated with the volume. result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta1, False, common.METADATA_TYPES.image) self.assertEqual(test_meta1, result_meta) # delete image metadata associated with the volume. self.volume_api.delete_volume_metadata( self.context, volume, 'fake_key2', common.METADATA_TYPES.image) # parse the result to build the dict. rows = db.volume_glance_metadata_get(self.context, volume_id) result = {} for row in rows: result[row['key']] = row['value'] self.assertEqual(test_meta2, result) # delete volume metadata with invalid metadta type. self.assertRaises(exception.InvalidMetadataType, self.volume_api.delete_volume_metadata, self.context, volume, 'fake_key1', FAKE_METADATA_TYPE.fake_type) @mock.patch('cinder.utils.clean_volume_file_locks') def test_delete_volume_metadata_maintenance(self, mock_clean): """Test delete volume metadata in maintenance.""" FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' self.assertRaises(exception.InvalidVolume, self.volume_api.delete_volume_metadata, self.context, volume, 'fake_key1', FAKE_METADATA_TYPE.fake_type) mock_clean.assert_not_called() def test_accept_transfer_maintenance(self): """Test accept transfer in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.accept_transfer, self.context, volume, None, None) @mock.patch.object(cinder.volume.api.API, 'list_availability_zones') def test_create_volume_uses_default_availability_zone(self, mock_list_az): """Test setting availability_zone correctly during volume create.""" mock_list_az.return_value = ({'name': 'az1', 'available': True}, {'name': 'az2', 'available': True}, {'name': 'default-az', 'available': True}) volume_api = cinder.volume.api.API() # Test backwards compatibility, default_availability_zone not set self.override_config('storage_availability_zone', 'az2') volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=self.vol_type) self.assertEqual('az2', volume['availability_zone']) self.override_config('default_availability_zone', 'default-az') volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=self.vol_type) self.assertEqual('default-az', volume['availability_zone']) def test_create_volume_with_default_type_misconfigured(self): """Test volume creation with non-existent default volume type.""" volume_api = cinder.volume.api.API() self.flags(default_volume_type='fake_type') # Create volume with default volume type while default # volume type doesn't exist self.assertRaises(exception.VolumeTypeDefaultMisconfiguredError, volume_api.create, self.context, 1, 'name', 'description') @mock.patch('cinder.quota.QUOTAS.rollback', new=mock.MagicMock()) @mock.patch('cinder.quota.QUOTAS.commit', new=mock.MagicMock()) @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) def test_create_volume_with_volume_type(self, _mock_reserve): """Test volume creation with default volume type.""" volume_api = cinder.volume.api.API() # Create volume with default volume type while default # volume type doesn't exist, volume_type_id should be NULL volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=self.vol_type) self.assertIsNone(volume['encryption_key_id']) # Create default volume type vol_type = conf_fixture.def_vol_type db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), vol_type) # Create volume with default volume type volume = volume_api.create(self.context, 1, 'name', 'description') self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) self.assertIsNone(volume['encryption_key_id']) # Create volume with specific volume type vol_type = 'test' db.volume_type_create(context.get_admin_context(), {'name': vol_type, 'extra_specs': {}}) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), vol_type) volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) @mock.patch('cinder.quota.QUOTAS.rollback', new=mock.MagicMock()) @mock.patch('cinder.quota.QUOTAS.commit', new=mock.MagicMock()) @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) def test_create_volume_with_volume_type_size_limits(self, _mock_reserve): """Test that volume type size limits are enforced.""" volume_api = cinder.volume.api.API() volume = volume_api.create(self.context, 2, 'name', 'description', volume_type=self.sized_vol_type) self.assertEqual(self.sized_vol_type['id'], volume['volume_type_id']) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 1, 'name', 'description', volume_type=self.sized_vol_type) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 5, 'name', 'description', volume_type=self.sized_vol_type) def test_create_volume_with_multiattach_volume_type(self): """Test volume creation with multiattach volume type.""" elevated = context.get_admin_context() volume_api = cinder.volume.api.API() especs = dict(multiattach=" True") volume_types.create(elevated, "multiattach-type", especs, description="test-multiattach") foo = objects.VolumeType.get_by_name_or_id(elevated, "multiattach-type") vol = volume_api.create(self.context, 1, 'admin-vol', 'description', volume_type=foo) self.assertEqual(foo['id'], vol['volume_type_id']) self.assertTrue(vol['multiattach']) def _fail_multiattach_policy_authorize(self, policy): if policy == vol_policy.MULTIATTACH_POLICY: raise exception.PolicyNotAuthorized(action='Test') def test_create_volume_with_multiattach_volume_type_not_authorized(self): """Test policy unauthorized create with multiattach volume type.""" elevated = context.get_admin_context() volume_api = cinder.volume.api.API() especs = dict(multiattach=" True") volume_types.create(elevated, "multiattach-type", especs, description="test-multiattach") foo = objects.VolumeType.get_by_name_or_id(elevated, "multiattach-type") with mock.patch.object(self.context, 'authorize') as mock_auth: mock_auth.side_effect = self._fail_multiattach_policy_authorize self.assertRaises(exception.PolicyNotAuthorized, volume_api.create, self.context, 1, 'admin-vol', 'description', volume_type=foo) @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_create_volume_with_encrypted_volume_type_multiattach(self): ctxt = context.get_admin_context() cipher = 'aes-xts-plain64' key_size = 256 control_location = 'front-end' db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS', 'extra_specs': {'multiattach': ' True'}}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': control_location, 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) volume_api = cinder.volume.api.API() db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') self.assertRaises(exception.InvalidVolume, volume_api.create, self.context, 1, 'name', 'description', volume_type=db_vol_type) @ddt.data({'cipher': 'blowfish-cbc', 'algo': 'blowfish', 'length': 32}, {'cipher': 'aes-xts-plain64', 'algo': 'aes', 'length': 256}) @ddt.unpack @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_create_volume_with_encrypted_volume_types( self, cipher, algo, length): ctxt = context.get_admin_context() key_size = length control_location = 'front-end' db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': control_location, 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) volume_api = cinder.volume.api.API() db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) key_manager = volume_api.key_manager key = key_manager.get(self.context, volume['encryption_key_id']) self.assertEqual(key_size, len(key.get_encoded()) * 8) self.assertEqual(algo, key.algorithm) metadata = db.volume_encryption_metadata_get(self.context, volume.id) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) self.assertEqual(cipher, metadata.get('cipher')) self.assertEqual(key_size, metadata.get('key_size')) self.assertIsNotNone(volume['encryption_key_id']) def test_create_volume_with_provider_id(self): volume_params_with_provider_id = dict(provider_id=fake.PROVIDER_ID, **self.volume_params) volume = tests_utils.create_volume(self.context, **volume_params_with_provider_id) self.volume.create_volume(self.context, volume) self.assertEqual(fake.PROVIDER_ID, volume['provider_id']) def test_create_volume_with_admin_metadata(self): with mock.patch.object( self.volume.driver, 'create_volume', return_value={'admin_metadata': {'foo': 'bar'}}): volume = tests_utils.create_volume(self.user_context) self.volume.create_volume(self.user_context, volume) self.assertEqual({'foo': 'bar'}, volume['admin_metadata']) @mock.patch.object(key_manager, 'API', new=fake_keymgr.fake_api) def test_create_delete_volume_with_encrypted_volume_type(self): cipher = 'aes-xts-plain64' key_size = 256 db.volume_type_create(self.context, {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) db.volume_type_encryption_create( self.context, fake.VOLUME_TYPE_ID, {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) db_vol_type = db.volume_type_get_by_name(self.context, 'LUKS') volume = self.volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) self.assertIsNotNone(volume.get('encryption_key_id', None)) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) volume['host'] = 'fake_host' volume['status'] = 'available' db.volume_update(self.context, volume['id'], {'status': 'available'}) self.volume_api.delete(self.context, volume) volume = objects.Volume.get_by_id(self.context, volume.id) while volume.status == 'available': # Must wait for volume_api delete request to process enough to # change the volume status. time.sleep(0.5) volume.refresh() self.assertEqual('deleting', volume['status']) db.volume_destroy(self.context, volume['id']) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume['id']) @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_delete_encrypted_volume_fail_deleting_key(self): cipher = 'aes-xts-plain64' key_size = 256 db.volume_type_create(self.context, {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) db.volume_type_encryption_create( self.context, fake.VOLUME_TYPE_ID, {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) db_vol_type = db.volume_type_get_by_name(self.context, 'LUKS') volume = self.volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) volume_id = volume['id'] volume['host'] = 'fake_host' volume['status'] = 'available' db.volume_update(self.context, volume_id, {'status': 'available'}) with mock.patch.object( self.volume_api.key_manager, 'delete', side_effect=Exception): self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume) volume = objects.Volume.get_by_id(self.context, volume_id) self.assertEqual("error_deleting", volume.status) volume.destroy() @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_delete_encrypted_volume_key_not_found(self): cipher = 'aes-xts-plain64' key_size = 256 db.volume_type_create(self.context, {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) db.volume_type_encryption_create( self.context, fake.VOLUME_TYPE_ID, {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) db_vol_type = db.volume_type_get_by_name(self.context, 'LUKS') volume = self.volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) volume_id = volume['id'] volume['host'] = 'fake_host' volume['status'] = 'available' db.volume_update(self.context, volume_id, {'status': 'available'}) with mock.patch.object( self.volume_api.key_manager, 'delete', side_effect=castellan_exception.ManagedObjectNotFoundError( uuid=fake.ENCRYPTION_KEY_ID)): self.volume_api.delete(self.context, volume) volume = objects.Volume.get_by_id(self.context, volume_id) self.assertEqual("deleting", volume.status) volume.destroy() @mock.patch('cinder.utils.clean_volume_file_locks') def test_delete_busy_volume(self, mock_clean): """Test volume survives deletion if driver reports it as busy.""" volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) with mock.patch.object(self.volume.driver, 'delete_volume', side_effect=exception.VolumeIsBusy( volume_name='fake') ) as mock_del_vol: self.volume.delete_volume(self.context, volume) volume_ref = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(volume_id, volume_ref.id) self.assertEqual("available", volume_ref.status) mock_del_vol.assert_called_once_with(volume) mock_clean.assert_not_called() @mock.patch('cinder.utils.clean_volume_file_locks') def test_unmanage_encrypted_volume_fails(self, mock_clean): volume = tests_utils.create_volume( self.context, encryption_key_id=fake.ENCRYPTION_KEY_ID, **self.volume_params) self.volume.create_volume(self.context, volume) manager = vol_manager.VolumeManager() self.assertRaises(exception.Invalid, manager.delete_volume, self.context, volume, unmanage_only=True) mock_clean.assert_not_called() self.volume.delete_volume(self.context, volume) def test_unmanage_cascade_delete_fails(self): volume = tests_utils.create_volume( self.context, **self.volume_params) self.volume.create_volume(self.context, volume) manager = vol_manager.VolumeManager() self.assertRaises(exception.Invalid, manager.delete_volume, self.context, volume, unmanage_only=True, cascade=True) self.volume.delete_volume(self.context, volume) def test_get_volume_different_tenant(self): """Test can't get volume of another tenant when viewable_admin_meta.""" volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) another_context = context.RequestContext('another_user_id', 'another_project_id', is_admin=False) self.assertNotEqual(another_context.project_id, self.context.project_id) volume_api = cinder.volume.api.API() self.assertRaises(exception.VolumeNotFound, volume_api.get, another_context, volume_id, viewable_admin_meta=True) self.assertEqual(volume_id, volume_api.get(self.context, volume_id)['id']) self.volume.delete_volume(self.context, volume) def test_get_all_limit_bad_value(self): """Test value of 'limit' is numeric and >= 0""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.get_all, self.context, limit="A") self.assertRaises(exception.InvalidInput, volume_api.get_all, self.context, limit="-1") def test_get_all_tenants_volume_list(self): """Validate when the volume list for all tenants is returned""" volume_api = cinder.volume.api.API() with mock.patch.object(volume_api.db, 'volume_get_all_by_project') as by_project: with mock.patch.object(volume_api.db, 'volume_get_all') as get_all: db_volume = {'volume_type_id': fake.VOLUME_TYPE_ID, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.VOLUME_ID} volume = fake_volume.fake_db_volume(**db_volume) by_project.return_value = [volume] get_all.return_value = [volume] volume_api.get_all(self.context, filters={'all_tenants': '0'}) self.assertTrue(by_project.called) by_project.called = False self.context.is_admin = False volume_api.get_all(self.context, filters={'all_tenants': '1'}) self.assertTrue(by_project.called) # check for volume list of all tenants self.context.is_admin = True volume_api.get_all(self.context, filters={'all_tenants': '1'}) self.assertTrue(get_all.called) @mock.patch('cinder.utils.clean_volume_file_locks') def test_delete_volume_in_error_extending(self, mock_clean): """Test volume can be deleted in error_extending stats.""" # create a volume volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) # delete 'error_extending' volume db.volume_update(self.context, volume['id'], {'status': 'error_extending'}) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume['id']) mock_clean.assert_called_once_with(volume.id, self.volume.driver) @mock.patch('cinder.utils.clean_volume_file_locks') @mock.patch.object(db.sqlalchemy.api, 'volume_get', side_effect=exception.VolumeNotFound( volume_id='12345678-1234-5678-1234-567812345678')) def test_delete_volume_not_found(self, mock_get_volume, mock_clean): """Test delete volume moves on if the volume does not exist.""" volume_id = '12345678-1234-5678-1234-567812345678' volume = objects.Volume(self.context, status='available', id=volume_id) self.volume.delete_volume(self.context, volume) self.assertTrue(mock_get_volume.called) mock_clean.assert_called_once_with(volume_id, self.volume.driver) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'create_volume_from_snapshot') def test_create_volume_from_snapshot(self, mock_create_from_snap): """Test volume can be created from a snapshot.""" volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src) snapshot_id = create_snapshot(volume_src['id'], size=volume_src['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.create_snapshot(self.context, snapshot_obj) volume_dst = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) self.volume.create_volume(self.context, volume_dst) self.assertEqual(volume_dst['id'], db.volume_get( context.get_admin_context(), volume_dst['id']).id) self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), volume_dst['id']).snapshot_id) self.volume.delete_volume(self.context, volume_dst) self.volume.delete_snapshot(self.context, snapshot_obj) self.volume.delete_volume(self.context, volume_src) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_volume_from_snapshot_with_types( self, _get_by_id, _get_flow): """Test volume create from snapshot with types including mistmatch.""" volume_api = cinder.volume.api.API() foo_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='foo', extra_specs={'volume_backend_name': 'dev_1'}) biz_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID, name='foo', extra_specs={'volume_backend_name': 'dev_2'}) source_vol = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, status='available', volume_size=10, volume_type_id=biz_type.id) source_vol.volume_type = biz_type snapshot = {'id': fake.SNAPSHOT_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 10, 'volume_type_id': biz_type.id} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) snapshot_obj.volume = source_vol # Make sure the case of specifying a type that # doesn't match the snapshots type fails self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) # Make sure that trying to specify a type # when the snapshots type is None fails snapshot_obj.volume_type_id = None snapshot_obj.volume.volume_type_id = None snapshot_obj.volume.volume_type = None self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) snapshot_obj.volume_type_id = foo_type.id snapshot_obj.volume.volume_type_id = foo_type.id snapshot_obj.volume.volume_type = foo_type volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_volume_from_source_with_types( self, _get_by_id, _get_flow): """Test volume create from source with types including mistmatch.""" volume_api = cinder.volume.api.API() foo_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='foo', extra_specs={'volume_backend_name': 'dev_1'}) biz_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID, name='biz', extra_specs={'volume_backend_name': 'dev_2'}) source_vol = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, status='available', volume_size=0, volume_type_id=biz_type.id) source_vol.volume_type = biz_type self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) # Make sure that trying to specify a type # when the source type is None fails source_vol.volume_type_id = None source_vol.volume_type = None self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) source_vol.volume_type_id = biz_type.id source_vol.volume_type = biz_type volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=biz_type, source_volume=source_vol) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_volume_from_source_with_same_backend( self, _get_by_id, _get_flow): """Test volume create from source with type mismatch same backend.""" volume_api = cinder.volume.api.API() foo_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='foo', qos_specs_id=None, deleted=False, created_at=datetime.datetime(2015, 5, 8, 0, 40, 5, 408232), updated_at=None, extra_specs={'volume_backend_name': 'dev_1'}, is_public=True, deleted_at=None, description=None) biz_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID, name='biz', qos_specs_id=None, deleted=False, created_at=datetime.datetime(2015, 5, 8, 0, 20, 5, 408232), updated_at=None, extra_specs={'volume_backend_name': 'dev_1'}, is_public=True, deleted_at=None, description=None) source_vol = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, status='available', volume_size=10, volume_type_id=biz_type.id) source_vol.volume_type = biz_type volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_from_source_and_snap_only_one_backend( self, _get_by_id, _get_flow): """Test create from source and snap with type mismatch one backend.""" volume_api = cinder.volume.api.API() foo_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='foo', qos_specs_id=None, deleted=False, created_at=datetime.datetime(2015, 5, 8, 0, 40, 5, 408232), updated_at=None, extra_specs={'some_key': 3}, is_public=True, deleted_at=None, description=None) biz_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID, name='biz', qos_specs_id=None, deleted=False, created_at=datetime.datetime(2015, 5, 8, 0, 20, 5, 408232), updated_at=None, extra_specs={'some_other_key': 4}, is_public=True, deleted_at=None, description=None) source_vol = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, status='available', volume_size=10, volume_type_id=biz_type.id) source_vol.volume_type = biz_type snapshot = {'id': fake.SNAPSHOT_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 10, 'volume_type_id': biz_type['id']} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) snapshot_obj.volume = source_vol with mock.patch('cinder.db.service_get_all') as mock_get_service, \ mock.patch.object(volume_api, 'list_availability_zones') as mock_get_azs: mock_get_service.return_value = [ {'host': 'foo', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] mock_get_azs.return_value = {} volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) def _test_create_from_source_snapshot_encryptions( self, is_snapshot=False): volume_api = cinder.volume.api.API() foo_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='foo', extra_specs={'volume_backend_name': 'dev_1'}) biz_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID, name='biz', extra_specs={'volume_backend_name': 'dev_1'}) source_vol = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, status='available', volume_size=1, volume_type_id=biz_type.id) source_vol.volume_type = biz_type snapshot = {'id': fake.SNAPSHOT_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 1, 'volume_type_id': biz_type['id']} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) snapshot_obj.volume = source_vol with mock.patch.object( cinder.volume.volume_types, 'volume_types_encryption_changed') as mock_encryption_changed: mock_encryption_changed.return_value = True self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=( source_vol if not is_snapshot else None), snapshot=snapshot_obj if is_snapshot else None) def test_create_from_source_encryption_changed(self): self._test_create_from_source_snapshot_encryptions() def test_create_from_snapshot_encryption_changed(self): self._test_create_from_source_snapshot_encryptions(is_snapshot=True) def _mock_synchronized(self, name, *s_args, **s_kwargs): def inner_sync1(f): def inner_sync2(*args, **kwargs): self.called.append('lock-%s' % (name)) ret = f(*args, **kwargs) self.called.append('unlock-%s' % (name)) return ret return inner_sync2 return inner_sync1 def _fake_execute(self, *cmd, **kwargs): pass @mock.patch.object(coordination.Coordinator, 'get_lock') @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_volume_from_snapshot') def test_create_volume_from_snapshot_check_locks( self, mock_lvm_create, mock_lock): orig_flow = engine.ActionEngine.run def mock_flow_run(*args, **kwargs): # ensure the lock has been taken mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) # now proceed with the flow. ret = orig_flow(*args, **kwargs) return ret # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) # no lock self.volume.create_volume(self.context, src_vol) snap_id = create_snapshot(src_vol.id, size=src_vol['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) # no lock self.volume.create_snapshot(self.context, snapshot_obj) dst_vol = tests_utils.create_volume(self.context, snapshot_id=snap_id, **self.volume_params) admin_ctxt = context.get_admin_context() # mock the flow runner so we can do some checks self.mock_object(engine.ActionEngine, 'run', mock_flow_run) # locked self.volume.create_volume(self.context, dst_vol, request_spec={'snapshot_id': snap_id}) mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) self.assertEqual(dst_vol.id, db.volume_get(admin_ctxt, dst_vol.id).id) self.assertEqual(snap_id, db.volume_get(admin_ctxt, dst_vol.id).snapshot_id) # locked self.volume.delete_volume(self.context, dst_vol) mock_lock.assert_called_with('%s-delete_volume' % dst_vol.id) # locked self.volume.delete_snapshot(self.context, snapshot_obj) mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) # locked self.volume.delete_volume(self.context, src_vol) mock_lock.assert_called_with('%s-delete_volume' % src_vol.id) self.assertTrue(mock_lvm_create.called) @mock.patch.object(coordination.Coordinator, 'get_lock') def test_create_volume_from_volume_check_locks(self, mock_lock): # mock the synchroniser so we can record events self.mock_object(utils, 'execute', self._fake_execute) orig_flow = engine.ActionEngine.run def mock_flow_run(*args, **kwargs): # ensure the lock has been taken mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) # now proceed with the flow. ret = orig_flow(*args, **kwargs) return ret # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] # no lock self.volume.create_volume(self.context, src_vol) self.assertEqual(0, mock_lock.call_count) dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) dst_vol_id = dst_vol['id'] admin_ctxt = context.get_admin_context() # mock the flow runner so we can do some checks self.mock_object(engine.ActionEngine, 'run', mock_flow_run) # locked self.volume.create_volume(self.context, dst_vol, request_spec={'source_volid': src_vol_id}) mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id) self.assertEqual(src_vol_id, db.volume_get(admin_ctxt, dst_vol_id).source_volid) # locked self.volume.delete_volume(self.context, dst_vol) mock_lock.assert_called_with('%s-delete_volume' % dst_vol_id) # locked self.volume.delete_volume(self.context, src_vol) mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) def _raise_metadata_copy_failure(self, method, dst_vol): # MetadataCopyFailure exception will be raised if DB service is Down # while copying the volume glance metadata with mock.patch.object(db, method) as mock_db: mock_db.side_effect = exception.MetadataCopyFailure( reason="Because of DB service down.") self.assertRaises(exception.MetadataCopyFailure, self.volume.create_volume, self.context, dst_vol) # ensure that status of volume is 'error' vol = db.volume_get(self.context, dst_vol.id) self.assertEqual('error', vol['status']) # cleanup resource db.volume_destroy(self.context, dst_vol.id) @mock.patch('cinder.utils.execute') def test_create_volume_from_volume_with_glance_volume_metadata_none( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from source volume dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) self.volume.create_volume(self.context, dst_vol) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_copy_from_volume_to_volume, self.context, src_vol_id, dst_vol['id']) # ensure that status of volume is 'available' vol = db.volume_get(self.context, dst_vol['id']) self.assertEqual('available', vol['status']) # cleanup resource db.volume_destroy(self.context, src_vol_id) db.volume_destroy(self.context, dst_vol['id']) @mock.patch('cinder.utils.execute') def test_create_volume_from_volume_raise_metadata_copy_failure( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from source volume dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) self._raise_metadata_copy_failure( 'volume_glance_metadata_copy_from_volume_to_volume', dst_vol) # cleanup resource db.volume_destroy(self.context, src_vol_id) @mock.patch('cinder.utils.execute') def test_create_volume_from_snapshot_raise_metadata_copy_failure( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from snapshot snapshot_id = create_snapshot(src_vol['id'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.create_snapshot(self.context, snapshot_obj) # ensure that status of snapshot is 'available' self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status) dst_vol = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) self._raise_metadata_copy_failure( 'volume_glance_metadata_copy_to_volume', dst_vol) # cleanup resource snapshot_obj.destroy() db.volume_destroy(self.context, src_vol_id) @mock.patch('cinder.utils.execute') def test_create_volume_from_snapshot_with_glance_volume_metadata_none( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) volume = db.volume_get(self.context, src_vol_id) # create snapshot of volume snapshot_id = create_snapshot(volume['id'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.create_snapshot(self.context, snapshot_obj) # ensure that status of snapshot is 'available' self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status) # create volume from snapshot dst_vol = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) self.volume.create_volume(self.context, dst_vol) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_copy_to_volume, self.context, dst_vol['id'], snapshot_id) # ensure that status of volume is 'available' vol = db.volume_get(self.context, dst_vol['id']) self.assertEqual('available', vol['status']) # cleanup resource snapshot_obj.destroy() db.volume_destroy(self.context, src_vol_id) db.volume_destroy(self.context, dst_vol['id']) @ddt.data({'connector_class': os_brick.initiator.connectors.iscsi.ISCSIConnector, 'rekey_supported': True, 'already_encrypted': True}, {'connector_class': os_brick.initiator.connectors.iscsi.ISCSIConnector, 'rekey_supported': True, 'already_encrypted': False}, {'connector_class': os_brick.initiator.connectors.rbd.RBDConnector, 'rekey_supported': False, 'already_encrypted': False}) @ddt.unpack @mock.patch('cinder.volume.volume_utils.delete_encryption_key') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask._setup_encryption_keys') @mock.patch('cinder.db.sqlalchemy.api.volume_encryption_metadata_get') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.volume.driver.VolumeDriver._detach_volume') @mock.patch('cinder.volume.driver.VolumeDriver._attach_volume') @mock.patch('cinder.volume.volume_utils.brick_get_connector_properties') @mock.patch('cinder.utils.execute') def test_create_volume_from_volume_with_enc( self, mock_execute, mock_brick_gcp, mock_at, mock_det, mock_qemu_img_info, mock_enc_metadata_get, mock_setup_enc_keys, mock_del_enc_key, connector_class=None, rekey_supported=None, already_encrypted=None): # create source volume mock_execute.return_value = ('', '') mock_enc_metadata_get.return_value = { 'cipher': 'aes-xts-plain64', 'key_size': 256, 'provider': 'luks', 'encryption_key_id': fake.ENCRYPTION_KEY_ID} mock_setup_enc_keys.return_value = ( 'qwert', 'asdfg', fake.ENCRYPTION_KEY2_ID) params = {'status': 'creating', 'size': 1, 'host': CONF.host, 'encryption_key_id': fake.ENCRYPTION_KEY_ID} src_vol = tests_utils.create_volume(self.context, **params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol) db.volume_update(self.context, src_vol['id'], {'encryption_key_id': fake.ENCRYPTION_KEY_ID}) # create volume from source volume params['encryption_key_id'] = fake.ENCRYPTION_KEY2_ID attach_info = { 'connector': connector_class(None), 'device': {'path': '/some/device/thing'}} mock_at.return_value = (attach_info, src_vol) img_info = imageutils.QemuImgInfo() if already_encrypted: # defaults to None when not encrypted img_info.encrypted = 'yes' img_info.file_format = 'raw' mock_qemu_img_info.return_value = img_info dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **params) self.volume.create_volume(self.context, dst_vol) # ensure that status of volume is 'available' vol = db.volume_get(self.context, dst_vol['id']) self.assertEqual('available', vol['status']) # cleanup resource db.volume_destroy(self.context, src_vol_id) db.volume_destroy(self.context, dst_vol['id']) if rekey_supported: mock_setup_enc_keys.assert_called_once_with( mock.ANY, src_vol, {'key_size': 256, 'provider': 'luks', 'cipher': 'aes-xts-plain64', 'encryption_key_id': fake.ENCRYPTION_KEY_ID} ) if already_encrypted: mock_execute.assert_called_once_with( 'cryptsetup', 'luksChangeKey', '/some/device/thing', '--force-password', log_errors=processutils.LOG_ALL_ERRORS, process_input='qwert\nasdfg\n', run_as_root=True) else: mock_execute.assert_called_once_with( 'cryptsetup', '--batch-mode', 'luksFormat', '--force-password', '--type', 'luks1', '--cipher', 'aes-xts-plain64', '--key-size', '256', '--key-file=-', '/some/device/thing', process_input='asdfg', run_as_root=True) mock_del_enc_key.assert_called_once_with(mock.ANY, # context mock.ANY, # keymgr fake.ENCRYPTION_KEY2_ID) else: mock_setup_enc_keys.assert_not_called() mock_execute.assert_not_called() mock_del_enc_key.assert_not_called() mock_at.assert_called() mock_det.assert_called() @mock.patch('cinder.db.sqlalchemy.api.volume_encryption_metadata_get') def test_setup_encryption_keys(self, mock_enc_metadata_get): key_mgr = fake_keymgr.fake_api() self.mock_object(castellan.key_manager, 'API', return_value=key_mgr) key_id = key_mgr.store(self.context, KeyObject()) key2_id = key_mgr.store(self.context, KeyObject2()) params = {'status': 'creating', 'size': 1, 'host': CONF.host, 'encryption_key_id': key_id} vol = tests_utils.create_volume(self.context, **params) self.volume.create_volume(self.context, vol) db.volume_update(self.context, vol['id'], {'encryption_key_id': key_id}) mock_enc_metadata_get.return_value = {'cipher': 'aes-xts-plain64', 'key_size': 256, 'provider': 'luks'} ctxt = context.get_admin_context() enc_info = {'encryption_key_id': key_id} with mock.patch('cinder.volume.volume_utils.create_encryption_key', return_value=key2_id): r = cinder.volume.flows.manager.create_volume.\ CreateVolumeFromSpecTask._setup_encryption_keys(ctxt, vol, enc_info) (source_pass, new_pass, new_key_id) = r self.assertNotEqual(source_pass, new_pass) self.assertEqual(new_key_id, key2_id) @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_create_volume_from_snapshot_with_encryption(self): """Test volume can be created from a snapshot of an encrypted volume""" ctxt = context.get_admin_context() cipher = 'aes-xts-plain64' key_size = 256 db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) volume_api = cinder.volume.api.API() db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), 'LUKS') volume_src = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) db.volume_update(self.context, volume_src['id'], {'host': 'fake_host@fake_backend', 'status': 'available'}) volume_src = objects.Volume.get_by_id(self.context, volume_src['id']) snapshot_ref = volume_api.create_snapshot_force(self.context, volume_src, 'name', 'description') snapshot_ref['status'] = fields.SnapshotStatus.AVAILABLE # status must be available volume_dst = volume_api.create(self.context, 1, 'name', 'description', snapshot=snapshot_ref) self.assertEqual(volume_dst['id'], db.volume_get( context.get_admin_context(), volume_dst['id']).id) self.assertEqual(snapshot_ref['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).snapshot_id) # ensure encryption keys match self.assertIsNotNone(volume_src['encryption_key_id']) self.assertIsNotNone(volume_dst['encryption_key_id']) key_manager = volume_api.key_manager # must use *same* key manager volume_src_key = key_manager.get(self.context, volume_src['encryption_key_id']) volume_dst_key = key_manager.get(self.context, volume_dst['encryption_key_id']) self.assertEqual(volume_src_key, volume_dst_key) def test_create_volume_from_encrypted_volume(self): """Test volume can be created from an encrypted volume.""" self.mock_object(key_manager, 'API', fake_keymgr.fake_api) cipher = 'aes-xts-plain64' key_size = 256 volume_api = cinder.volume.api.API() ctxt = context.get_admin_context() db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), 'LUKS') volume_src = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) db.volume_update(self.context, volume_src['id'], {'host': 'fake_host@fake_backend', 'status': 'available'}) volume_src = objects.Volume.get_by_id(self.context, volume_src['id']) volume_dst = volume_api.create(self.context, 1, 'name', 'description', source_volume=volume_src) self.assertEqual(volume_dst['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).id) self.assertEqual(volume_src['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).source_volid) # ensure encryption keys match self.assertIsNotNone(volume_src['encryption_key_id']) self.assertIsNotNone(volume_dst['encryption_key_id']) km = volume_api.key_manager # must use *same* key manager volume_src_key = km.get(self.context, volume_src['encryption_key_id']) volume_dst_key = km.get(self.context, volume_dst['encryption_key_id']) self.assertEqual(volume_src_key, volume_dst_key) def test_delete_invalid_status_fails(self): self.volume_params['status'] = 'invalid1234' volume = tests_utils.create_volume(self.context, **self.volume_params) vol_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, vol_api.delete, self.context, volume) def test_create_volume_from_snapshot_fail_bad_size(self): """Test volume can't be created from snapshot with bad volume size.""" volume_api = cinder.volume.api.API() snapshot = {'id': fake.SNAPSHOT_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 10} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', snapshot=snapshot_obj) def test_create_volume_from_snapshot_fail_wrong_az(self): """Test volume can't be created from snapshot in a different az.""" volume_api = cinder.volume.api.API() def fake_list_availability_zones(enable_cache=False): return ({'name': 'nova', 'available': True}, {'name': 'az2', 'available': True}) self.mock_object(volume_api, 'list_availability_zones', fake_list_availability_zones) volume_src = tests_utils.create_volume(self.context, availability_zone='az2', **self.volume_params) self.volume.create_volume(self.context, volume_src) snapshot = create_snapshot(volume_src['id']) self.volume.create_snapshot(self.context, snapshot) volume_dst = volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', snapshot=snapshot) self.assertEqual('az2', volume_dst['availability_zone']) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', snapshot=snapshot, availability_zone='nova') def test_create_volume_with_invalid_exclusive_options(self): """Test volume create with multiple exclusive options fails.""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 1, 'name', 'description', snapshot=fake.SNAPSHOT_ID, image_id=fake.IMAGE_ID, source_volume=fake.VOLUME_ID) def test_reserve_volume_success(self): volume = tests_utils.create_volume(self.context, status='available') cinder.volume.api.API().reserve_volume(self.context, volume) volume_db = db.volume_get(self.context, volume.id) self.assertEqual('attaching', volume_db.status) db.volume_destroy(self.context, volume.id) def test_reserve_volume_in_attaching(self): self._test_reserve_volume_bad_status('attaching') def test_reserve_volume_in_maintenance(self): self._test_reserve_volume_bad_status('maintenance') def _test_reserve_volume_bad_status(self, status): volume = tests_utils.create_volume(self.context, status=status) self.assertRaises(exception.InvalidVolume, cinder.volume.api.API().reserve_volume, self.context, volume) db.volume_destroy(self.context, volume.id) def test_attachment_reserve_with_bootable_volume(self): # test the private _attachment_reserve method with a bootable, # in-use, multiattach volume. instance_uuid = fake.UUID1 volume = tests_utils.create_volume(self.context, status='in-use') tests_utils.attach_volume(self.context, volume.id, instance_uuid, 'attached_host', 'mountpoint', mode='rw') volume.multiattach = True volume.bootable = True attachment = self.volume_api._attachment_reserve( self.context, volume, instance_uuid) self.assertEqual(attachment.attach_status, 'reserved') def test_attachment_reserve_conditional_update_attach_race(self): # Tests a scenario where two instances are racing to attach the # same multiattach=False volume. One updates the volume status to # "reserved" but the other fails the conditional update which is # then validated to not be the same instance that is already attached # to the multiattach=False volume which triggers a failure. volume = tests_utils.create_volume(self.context) # Assert that we're not dealing with a multiattach volume and that # it does not have any existing attachments. self.assertFalse(volume.multiattach) self.assertEqual(0, len(volume.volume_attachment)) # Attach the first instance which is OK and should update the volume # status to 'reserved'. self.volume_api._attachment_reserve(self.context, volume, fake.UUID1) # Try attaching a different instance to the same volume which should # fail. ex = self.assertRaises(exception.InvalidVolume, self.volume_api._attachment_reserve, self.context, volume, fake.UUID2) self.assertIn("status must be available or downloading", str(ex)) def test_attachment_reserve_with_instance_uuid_error_volume(self): # Tests that trying to create an attachment (with an instance_uuid # provided) on a volume that's not 'available' or 'downloading' status # will fail if the volume does not have any attachments, similar to how # the volume reserve action works. volume = tests_utils.create_volume(self.context, status='error') # Assert that we're not dealing with a multiattach volume and that # it does not have any existing attachments. self.assertFalse(volume.multiattach) self.assertEqual(0, len(volume.volume_attachment)) # Try attaching an instance to the volume which should fail based on # the volume status. ex = self.assertRaises(exception.InvalidVolume, self.volume_api._attachment_reserve, self.context, volume, fake.UUID1) self.assertIn("status must be available or downloading", str(ex)) def test_unreserve_volume_success_in_use(self): volume = tests_utils.create_volume(self.context, status='attaching') tests_utils.attach_volume(self.context, volume.id, fake.INSTANCE_ID, 'attached_host', 'mountpoint', mode='rw') cinder.volume.api.API().unreserve_volume(self.context, volume) db_volume = db.volume_get(self.context, volume.id) self.assertEqual('in-use', db_volume.status) def test_unreserve_volume_success_available(self): volume = tests_utils.create_volume(self.context, status='attaching') cinder.volume.api.API().unreserve_volume(self.context, volume) db_volume = db.volume_get(self.context, volume.id) self.assertEqual('available', db_volume.status) def test_multi_node(self): # TODO(termie): Figure out how to test with two nodes, # each of them having a different FLAG for storage_node # This will allow us to test cross-node interactions pass def test_cannot_delete_volume_in_use(self): """Test volume can't be deleted in in-use status.""" self._test_cannot_delete_volume('in-use') def test_cannot_delete_volume_maintenance(self): """Test volume can't be deleted in maintenance status.""" self._test_cannot_delete_volume('maintenance') @mock.patch('cinder.utils.clean_volume_file_locks') def _test_cannot_delete_volume(self, status, mock_clean): """Test volume can't be deleted in invalid stats.""" # create a volume and assign to host volume = tests_utils.create_volume(self.context, CONF.host, status=status) # 'in-use' status raises InvalidVolume self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume) mock_clean.assert_not_called() # clean up self.volume.delete_volume(self.context, volume) def test_force_delete_volume(self): """Test volume can be forced to delete.""" # create a volume and assign to host self.volume_params['status'] = 'error_deleting' volume = tests_utils.create_volume(self.context, **self.volume_params) # 'error_deleting' volumes can't be deleted self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume) # delete with force self.volume_api.delete(self.context, volume, force=True) # status is deleting volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('deleting', volume.status) # clean up self.volume.delete_volume(self.context, volume) def test_cannot_force_delete_attached_volume(self): """Test volume can't be force delete in attached state.""" volume = tests_utils.create_volume(self.context, CONF.host, status='in-use', attach_status= fields.VolumeAttachStatus.ATTACHED) self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume, force=True) db.volume_destroy(self.context, volume.id) @mock.patch('cinder.utils.clean_volume_file_locks') def test__revert_to_snapshot_generic_failed(self, mock_clean): fake_volume = tests_utils.create_volume(self.context, status='available') fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume.id) with mock.patch.object( self.volume.driver, '_create_temp_volume_from_snapshot') as mock_temp, \ mock.patch.object( self.volume.driver, 'delete_volume') as mock_driver_delete, \ mock.patch.object( self.volume, '_copy_volume_data') as mock_copy: temp_volume = tests_utils.create_volume( self.context, status=fields.VolumeStatus.IN_USE) mock_copy.side_effect = [exception.VolumeDriverException('error')] mock_temp.return_value = temp_volume self.assertRaises(exception.VolumeDriverException, self.volume._revert_to_snapshot_generic, self.context, fake_volume, fake_snapshot) mock_copy.assert_called_once_with( self.context, temp_volume, fake_volume) mock_driver_delete.assert_called_once_with(temp_volume) mock_clean.assert_called_once_with(temp_volume.id, self.volume.driver) @mock.patch('cinder.utils.clean_volume_file_locks') def test__revert_to_snapshot_generic(self, mock_clean): fake_volume = tests_utils.create_volume(self.context, status='available') fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume.id) with mock.patch.object( self.volume.driver, '_create_temp_volume_from_snapshot') as mock_temp, \ mock.patch.object( self.volume.driver, 'delete_volume') as mock_driver_delete, \ mock.patch.object( self.volume, '_copy_volume_data') as mock_copy: temp_volume = tests_utils.create_volume(self.context, status='available') mock_temp.return_value = temp_volume self.volume._revert_to_snapshot_generic( self.context, fake_volume, fake_snapshot) mock_copy.assert_called_once_with( self.context, temp_volume, fake_volume) mock_driver_delete.assert_called_once_with(temp_volume) mock_clean.assert_called_once_with(temp_volume.id, self.volume.driver) @ddt.data({'driver_error': True}, {'driver_error': False}) @ddt.unpack def test__revert_to_snapshot(self, driver_error): mock.patch.object(self.volume, '_notify_about_snapshot_usage') with mock.patch.object(self.volume.driver, 'revert_to_snapshot') as driver_revert, \ mock.patch.object(self.volume, '_notify_about_volume_usage'), \ mock.patch.object(self.volume, '_notify_about_snapshot_usage'), \ mock.patch.object(self.volume, '_revert_to_snapshot_generic') as generic_revert: if driver_error: driver_revert.side_effect = [NotImplementedError] else: driver_revert.return_value = None self.volume._revert_to_snapshot(self.context, {}, {}) driver_revert.assert_called_once_with(self.context, {}, {}) if driver_error: generic_revert.assert_called_once_with(self.context, {}, {}) @ddt.data({}, {'has_snapshot': True}, {'use_temp_snapshot': True}, {'use_temp_snapshot': True, 'has_snapshot': True}) @ddt.unpack def test_revert_to_snapshot(self, has_snapshot=False, use_temp_snapshot=False): fake_volume = tests_utils.create_volume(self.context, status='reverting', project_id='123', size=2) fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume['id'], status='restoring', volume_size=1) with mock.patch.object(self.volume, '_revert_to_snapshot') as _revert, \ mock.patch.object(self.volume, '_create_backup_snapshot') as _create_snapshot, \ mock.patch.object(self.volume, 'delete_snapshot') as _delete_snapshot, \ mock.patch.object(self.volume.driver, 'snapshot_revert_use_temp_snapshot') as \ _use_temp_snap: _revert.return_value = None _use_temp_snap.return_value = use_temp_snapshot if has_snapshot: _create_snapshot.return_value = {'id': 'fake_snapshot'} else: _create_snapshot.return_value = None self.volume.revert_to_snapshot(self.context, fake_volume, fake_snapshot) _revert.assert_called_once_with(self.context, fake_volume, fake_snapshot) if not use_temp_snapshot: _create_snapshot.assert_not_called() else: _create_snapshot.assert_called_once_with(self.context, fake_volume) if use_temp_snapshot and has_snapshot: _delete_snapshot.assert_called_once_with( self.context, {'id': 'fake_snapshot'}) else: _delete_snapshot.assert_not_called() fake_volume.refresh() fake_snapshot.refresh() self.assertEqual('available', fake_volume['status']) self.assertEqual('available', fake_snapshot['status']) self.assertEqual(2, fake_volume['size']) def test_revert_to_snapshot_failed(self): fake_volume = tests_utils.create_volume(self.context, status='reverting', project_id='123', size=2) fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume['id'], status='restoring', volume_size=1) with mock.patch.object(self.volume, '_revert_to_snapshot') as _revert, \ mock.patch.object(self.volume, '_create_backup_snapshot'), \ mock.patch.object(self.volume, 'delete_snapshot') as _delete_snapshot: _revert.side_effect = [exception.VolumeDriverException( message='fake_message')] self.assertRaises(exception.VolumeDriverException, self.volume.revert_to_snapshot, self.context, fake_volume, fake_snapshot) _revert.assert_called_once_with(self.context, fake_volume, fake_snapshot) _delete_snapshot.assert_not_called() fake_volume.refresh() fake_snapshot.refresh() self.assertEqual('error', fake_volume['status']) self.assertEqual('available', fake_snapshot['status']) self.assertEqual(2, fake_volume['size']) def test_cannot_revert_to_snapshot_in_use(self): """Test volume can't be reverted to snapshot in in-use status.""" fake_volume = tests_utils.create_volume(self.context, status='in-use') fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume.id, status='available') self.assertRaises(exception.InvalidVolume, self.volume_api.revert_to_snapshot, self.context, fake_volume, fake_snapshot) @ddt.data(True, False) @mock.patch('cinder.quota.QUOTAS.commit') @mock.patch('cinder.quota.QUOTAS.reserve') @mock.patch.object(vol_manager.VolumeManager, '_notify_about_snapshot_usage') @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'delete_snapshot') def test_delete_snapshot(self, use_quota, delete_mock, notify_mock, reserve_mock, commit_mock): """Test delete snapshot.""" volume = tests_utils.create_volume(self.context, CONF.host) snapshot = create_snapshot(volume.id, size=volume.size, ctxt=self.context, use_quota=use_quota, status=fields.SnapshotStatus.AVAILABLE) self.volume.delete_snapshot(self.context, snapshot) delete_mock.assert_called_once_with(snapshot) self.assertEqual(2, notify_mock.call_count) notify_mock.assert_has_calls(( mock.call(mock.ANY, snapshot, 'delete.start'), mock.call(mock.ANY, snapshot, 'delete.end'), )) if use_quota: reserve_mock.assert_called_once_with( mock.ANY, project_id=snapshot.project_id, gigabytes=-snapshot.volume_size, gigabytes_vol_type_name=-snapshot.volume_size, snapshots=-1, snapshots_vol_type_name=-1) commit_mock.assert_called_once_with(mock.ANY, reserve_mock.return_value, project_id=snapshot.project_id) else: reserve_mock.assert_not_called() commit_mock.assert_not_called() self.assertEqual(fields.SnapshotStatus.DELETED, snapshot.status) self.assertTrue(snapshot.deleted) def test_cannot_delete_volume_with_snapshots(self): """Test volume can't be deleted with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume['id'], size=volume['size']) self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume) self.volume.delete_snapshot(self.context, snapshot) self.volume.delete_volume(self.context, volume) def test_can_delete_errored_snapshot(self): """Test snapshot can be created and deleted.""" volume = tests_utils.create_volume(self.context, CONF.host) snapshot = create_snapshot(volume.id, size=volume['size'], ctxt=self.context, status=fields.SnapshotStatus.ERROR) self.volume_api.delete_snapshot(self.context, snapshot) self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status) self.volume.delete_volume(self.context, volume) def test_create_snapshot_set_worker(self): volume = tests_utils.create_volume(self.context) snapshot = create_snapshot(volume.id, size=volume['size'], ctxt=self.context, status=fields.SnapshotStatus.CREATING) self.volume.create_snapshot(self.context, snapshot) volume.set_worker.assert_called_once_with() def test_cannot_delete_snapshot_with_bad_status(self): volume = tests_utils.create_volume(self.context, CONF.host) snapshot = create_snapshot(volume.id, size=volume['size'], ctxt=self.context, status=fields.SnapshotStatus.CREATING) self.assertRaises(exception.InvalidSnapshot, self.volume_api.delete_snapshot, self.context, snapshot) snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() self.volume_api.delete_snapshot(self.context, snapshot) self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status) self.volume.delete_volume(self.context, volume) @mock.patch.object(QUOTAS, "rollback") @mock.patch.object(QUOTAS, "commit") @mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"]) def _do_test_create_volume_with_size(self, size, *_unused_quota_mocks): volume_api = cinder.volume.api.API() volume = volume_api.create(self.context, size, 'name', 'description', volume_type=self.vol_type) self.assertEqual(int(size), volume['size']) def test_create_volume_int_size(self): """Test volume creation with int size.""" self._do_test_create_volume_with_size(2) def test_create_volume_string_size(self): """Test volume creation with string size.""" self._do_test_create_volume_with_size('2') @mock.patch.object(QUOTAS, "rollback") @mock.patch.object(QUOTAS, "commit") @mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"]) def test_create_volume_with_bad_size(self, *_unused_quota_mocks): volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, '2Gb', 'name', 'description') def test_create_volume_with_float_fails(self): """Test volume creation with invalid float size.""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, '1.5', 'name', 'description') def test_create_volume_with_zero_size_fails(self): """Test volume creation with string size.""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, '0', 'name', 'description') def test_begin_detaching_fails_available(self): volume_api = cinder.volume.api.API() volume = tests_utils.create_volume(self.context, status='available') # Volume status is 'available'. self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, self.context, volume) db.volume_update(self.context, volume.id, {'status': 'in-use', 'attach_status': fields.VolumeAttachStatus.DETACHED}) # Should raise an error since not attached self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, self.context, volume) db.volume_update(self.context, volume.id, {'attach_status': fields.VolumeAttachStatus.ATTACHED}) # Ensure when attached no exception raised volume_api.begin_detaching(self.context, volume) volume_api.update(self.context, volume, {'status': 'maintenance'}) self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, self.context, volume) db.volume_destroy(self.context, volume.id) def test_begin_roll_detaching_volume(self): """Test begin_detaching and roll_detaching functions.""" instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, **self.volume_params) attachment = db.volume_attach(self.context, {'volume_id': volume['id'], 'attached_host': 'fake-host'}) db.volume_attached(self.context, attachment['id'], instance_uuid, 'fake-host', 'vdb') volume_api = cinder.volume.api.API() volume_api.begin_detaching(self.context, volume) volume = volume_api.get(self.context, volume['id']) self.assertEqual("detaching", volume['status']) volume_api.roll_detaching(self.context, volume) volume = volume_api.get(self.context, volume['id']) self.assertEqual("in-use", volume['status']) def test_volume_api_update(self): # create a raw vol volume = tests_utils.create_volume(self.context, **self.volume_params) # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} volume_api.update(self.context, volume, update_dict) # read changes from db vol = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual('test update name', vol['display_name']) def test_volume_api_update_maintenance(self): # create a raw vol volume = tests_utils.create_volume(self.context, **self.volume_params) volume['status'] = 'maintenance' # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} self.assertRaises(exception.InvalidVolume, volume_api.update, self.context, volume, update_dict) def test_volume_api_get_list_volumes_image_metadata(self): """Test get_list_volumes_image_metadata in volume API.""" ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': 'fake1', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(ctxt, 'fake1', 'key1', 'value1') db.volume_glance_metadata_create(ctxt, 'fake1', 'key2', 'value2') db.volume_create(ctxt, {'id': 'fake2', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(ctxt, 'fake2', 'key3', 'value3') db.volume_glance_metadata_create(ctxt, 'fake2', 'key4', 'value4') volume_api = cinder.volume.api.API() results = volume_api.get_list_volumes_image_metadata(ctxt, ['fake1', 'fake2']) expect_results = {'fake1': {'key1': 'value1', 'key2': 'value2'}, 'fake2': {'key3': 'value3', 'key4': 'value4'}} self.assertEqual(expect_results, results) @mock.patch.object(QUOTAS, 'limit_check') @mock.patch.object(QUOTAS, 'reserve') def test_extend_attached_volume(self, reserve, limit_check): volume = self._create_volume(self.context, size=2, status='available', host=CONF.host) volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api._extend, self.context, volume, 3, attached=True) db.volume_update(self.context, volume.id, {'status': 'in-use'}) volume.refresh() reserve.return_value = ["RESERVATION"] volume_api._extend(self.context, volume, 3, attached=True) volume.refresh() self.assertEqual('extending', volume.status) self.assertEqual('in-use', volume.previous_status) reserve.assert_called_once_with(self.context, gigabytes=1, gigabytes___DEFAULT__=1, project_id=volume.project_id) limit_check.side_effect = None reserve.side_effect = None db.volume_update(self.context, volume.id, {'status': 'in-use'}) volume_api.scheduler_rpcapi = mock.MagicMock() volume_api.scheduler_rpcapi.extend_volume = mock.MagicMock() volume_api._extend(self.context, volume, 3, attached=True) request_spec = { 'volume_properties': volume, 'volume_type': self.vol_type, 'volume_id': volume.id } volume_api.scheduler_rpcapi.extend_volume.assert_called_once_with( self.context, volume, 3, ["RESERVATION"], request_spec) # clean up self.volume.delete_volume(self.context, volume) @mock.patch.object(QUOTAS, 'limit_check') @mock.patch.object(QUOTAS, 'reserve') def test_extend_volume(self, reserve, limit_check): """Test volume can be extended at API level.""" # create a volume and assign to host volume = self._create_volume(self.context, size=2, status='in-use', host=CONF.host) volume_api = cinder.volume.api.API() # Extend fails when status != available self.assertRaises(exception.InvalidVolume, volume_api._extend, self.context, volume, 3) db.volume_update(self.context, volume.id, {'status': 'available'}) volume.refresh() # Extend fails when new_size < orig_size self.assertRaises(exception.InvalidInput, volume_api._extend, self.context, volume, 1) # Extend fails when new_size == orig_size self.assertRaises(exception.InvalidInput, volume_api._extend, self.context, volume, 2) # works when new_size > orig_size reserve.return_value = ["RESERVATION"] volume_api._extend(self.context, volume, 3) volume.refresh() self.assertEqual('extending', volume.status) self.assertEqual('available', volume.previous_status) reserve.assert_called_once_with(self.context, gigabytes=1, gigabytes___DEFAULT__=1, project_id=volume.project_id) # Test the quota exceeded db.volume_update(self.context, volume.id, {'status': 'available'}) reserve.side_effect = exception.OverQuota(overs=['gigabytes'], quotas={'gigabytes': 20}, usages={'gigabytes': {'reserved': 5, 'in_use': 15}}) self.assertRaises(exception.VolumeSizeExceedsAvailableQuota, volume_api._extend, self.context, volume, 3) db.volume_update(self.context, volume.id, {'status': 'available'}) limit_check.side_effect = exception.OverQuota( overs=['per_volume_gigabytes'], quotas={'per_volume_gigabytes': 2}) self.assertRaises(exception.VolumeSizeExceedsLimit, volume_api._extend, self.context, volume, 3) # Test scheduler path limit_check.side_effect = None reserve.side_effect = None db.volume_update(self.context, volume.id, {'status': 'available'}) volume_api.scheduler_rpcapi = mock.MagicMock() volume_api.scheduler_rpcapi.extend_volume = mock.MagicMock() volume_api._extend(self.context, volume, 3) request_spec = { 'volume_properties': volume, 'volume_type': self.vol_type, 'volume_id': volume.id } volume_api.scheduler_rpcapi.extend_volume.assert_called_once_with( self.context, volume, 3, ["RESERVATION"], request_spec) # clean up self.volume.delete_volume(self.context, volume) @mock.patch.object(QUOTAS, 'limit_check') @mock.patch.object(QUOTAS, 'reserve') def test_extend_volume_with_volume_type_limit(self, reserve, limit_check): """Test volume can be extended at API level.""" volume_api = cinder.volume.api.API() volume = tests_utils.create_volume( self.context, size=2, volume_type_id=self.sized_vol_type['id']) volume_api.scheduler_rpcapi = mock.MagicMock() volume_api.scheduler_rpcapi.extend_volume = mock.MagicMock() volume_api._extend(self.context, volume, 3) self.assertRaises(exception.InvalidInput, volume_api._extend, self.context, volume, 5) def test_extend_volume_driver_not_initialized(self): """Test volume can be extended at API level.""" # create a volume and assign to host fake_reservations = ['RESERVATION'] volume = tests_utils.create_volume(self.context, size=2, status='available', host=CONF.host) self.volume.create_volume(self.context, volume) self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.extend_volume, self.context, volume, 3, fake_reservations) volume.refresh() self.assertEqual('error_extending', volume.status) # lets cleanup the mess. self.volume.driver._initialized = True self.volume.delete_volume(self.context, volume) def _test_extend_volume_manager_fails_with_exception(self, volume): fake_reservations = ['RESERVATION'] # Test driver exception with mock.patch.object( self.volume.driver, 'extend_volume', side_effect=exception.CinderException('fake exception')): with mock.patch.object( self.volume.message_api, 'create') as mock_create: volume['status'] = 'extending' self.volume.extend_volume(self.context, volume, 4, fake_reservations) volume.refresh() self.assertEqual(2, volume.size) self.assertEqual('error_extending', volume.status) mock_create.assert_called_once_with( self.context, message_field.Action.EXTEND_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.DRIVER_FAILED_EXTEND) @mock.patch('cinder.compute.API') def _test_extend_volume_manager_successful(self, volume, nova_api, attached_to_glance=False): """Test volume can be extended at the manager level.""" def fake_extend(volume, new_size): volume['size'] = new_size nova_extend_volume = nova_api.return_value.extend_volume fake_reservations = ['RESERVATION'] orig_status = volume.status # Test driver success with mock.patch.object(self.volume.driver, 'extend_volume') as extend_volume: with mock.patch.object(QUOTAS, 'commit') as quotas_commit: extend_volume.return_value = fake_extend volume.status = 'extending' self.volume.extend_volume(self.context, volume, 4, fake_reservations) volume.refresh() self.assertEqual(4, volume.size) self.assertEqual(orig_status, volume.status) quotas_commit.assert_called_with( self.context, ['RESERVATION'], project_id=volume.project_id) if orig_status == 'in-use': instance_uuids = [ attachment.instance_uuid for attachment in volume.volume_attachment] if attached_to_glance: nova_extend_volume.assert_not_called() else: nova_extend_volume.assert_called_with( self.context, instance_uuids, volume.id) def test_extend_volume_manager_available_fails_with_exception(self): volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume) self._test_extend_volume_manager_fails_with_exception(volume) self.volume.delete_volume(self.context, volume) def test_extend_volume_manager_available_successful(self): volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume) self._test_extend_volume_manager_successful(volume) self.volume.delete_volume(self.context, volume) def test_extend_volume_manager_in_use_fails_with_exception(self): volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume) instance_uuid = '12345678-1234-5678-1234-567812345678' attachment = db.volume_attach(self.context, {'volume_id': volume.id, 'attached_host': 'fake-host'}) db.volume_attached(self.context, attachment.id, instance_uuid, 'fake-host', 'vdb') volume.refresh() self._test_extend_volume_manager_fails_with_exception(volume) self.volume.detach_volume(self.context, volume.id, attachment.id) self.volume.delete_volume(self.context, volume) def test_extend_volume_manager_in_use_successful(self): volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume) instance_uuid = '12345678-1234-5678-1234-567812345678' attachment = db.volume_attach(self.context, {'volume_id': volume.id, 'attached_host': 'fake-host'}) db.volume_attached(self.context, attachment.id, instance_uuid, 'fake-host', 'vdb') volume.refresh() self._test_extend_volume_manager_successful(volume) self.volume.detach_volume(self.context, volume.id, attachment.id) self.volume.delete_volume(self.context, volume) def test_extend_volume_manager_in_use_glance_store(self): volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume) instance_uuid = None attachment = db.volume_attach(self.context, {'volume_id': volume.id, 'attached_host': 'fake-host'}) db.volume_attached(self.context, attachment.id, instance_uuid, 'fake-host', 'vdb') volume.refresh() self._test_extend_volume_manager_successful(volume, attached_to_glance=True) self.volume.detach_volume(self.context, volume.id, attachment.id) self.volume.delete_volume(self.context, volume) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.extend_volume') def test_extend_volume_with_volume_type(self, mock_scheduler_extend): elevated = context.get_admin_context() project_id = self.context.project_id db.volume_type_create(elevated, {'name': 'type', 'extra_specs': {}}) vol_type = db.volume_type_get_by_name(elevated, 'type') volume_api = cinder.volume.api.API() volume = volume_api.create(self.context, 100, 'name', 'description', volume_type=vol_type) try: usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type') volumes_in_use = usage.in_use except exception.QuotaUsageNotFound: volumes_in_use = 0 self.assertEqual(100, volumes_in_use) db.volume_update(self.context, volume.id, {'status': 'available'}) volume_api._extend(self.context, volume, 200) mock_scheduler_extend.assert_called_once_with( self.context, volume, 200, mock.ANY, { 'volume_properties': volume, 'volume_type': vol_type, 'volume_id': volume.id }) try: usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type') volumes_reserved = usage.reserved except exception.QuotaUsageNotFound: volumes_reserved = 0 self.assertEqual(100, volumes_reserved) @mock.patch('cinder.compute.nova.API.extend_volume') @mock.patch('cinder.volume.manager.VolumeManager.' 'extend_volume_completion') def test_extend_volume_no_wait_for_nova_available(self, extend_completion, nova_extend): volume = tests_utils.create_volume(self.context, size=2, status='extending') with mock.patch.object(self.volume.driver, 'extend_volume'): self.volume.extend_volume(self.context, volume, 4, [uuids.reservation]) extend_completion.assert_called_once_with(self.context, volume, 4, [uuids.reservation], error=False) nova_extend.assert_not_called() self.assertNotIn('extend_new_size', volume.admin_metadata) self.assertNotIn('extend_reservations', volume.admin_metadata) @mock.patch('cinder.compute.nova.API.extend_volume') @mock.patch('cinder.volume.manager.VolumeManager.' 'extend_volume_completion') def test_extend_volume_no_wait_for_nova_attached(self, extend_completion, nova_extend): volume = tests_utils.create_volume(self.context, size=2) tests_utils.attach_volume(self.context, volume.id, uuids.instance, 'fake-host', '/dev/vda') db.volume_update(self.context, volume.id, {'status': 'extending'}) volume.refresh() with mock.patch.object(self.volume.driver, 'extend_volume'): self.volume.extend_volume(self.context, volume, 4, [uuids.reservation]) extend_completion.assert_called_once_with(self.context, volume, 4, [uuids.reservation], error=False) nova_extend.assert_called_once_with(self.context, [uuids.instance], volume.id) self.assertNotIn('extend_new_size', volume.admin_metadata) self.assertNotIn('extend_reservations', volume.admin_metadata) @mock.patch('cinder.compute.nova.API.extend_volume', return_value=False) @mock.patch('cinder.volume.manager.VolumeManager.' 'extend_volume_completion') def test_extend_volume_no_wait_for_nova_fail_to_send(self, extend_completion, nova_extend): volume = tests_utils.create_volume(self.context, size=2) tests_utils.attach_volume(self.context, volume.id, uuids.instance, 'fake-host', '/dev/vda') db.volume_update(self.context, volume.id, {'status': 'extending'}) volume.refresh() with mock.patch.object(self.volume.driver, 'extend_volume'): self.volume.extend_volume(self.context, volume, 4, [uuids.reservation]) extend_completion.assert_called_once_with(self.context, volume, 4, [uuids.reservation], error=False) def test_create_volume_from_sourcevol(self): """Test volume can be created from a source volume.""" def fake_create_cloned_volume(volume, src_vref): pass self.mock_object(self.volume.driver, 'create_cloned_volume', fake_create_cloned_volume) volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) self.volume.create_volume(self.context, volume_dst) volume_dst.refresh() self.assertEqual('available', volume_dst.status) self.volume.delete_volume(self.context, volume_dst) self.volume.delete_volume(self.context, volume_src) def test_create_volume_from_sourcevol_fail_bad_size(self): """Test cannot clone volume with bad volume size.""" volume_src = tests_utils.create_volume(self.context, size=3, status='available', host=CONF.host) self.assertRaises(exception.InvalidInput, self.volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', source_volume=volume_src) @mock.patch('cinder.volume.api.API.list_availability_zones', return_value=({'name': 'nova', 'available': True}, {'name': 'az2', 'available': True})) def test_create_volume_from_sourcevol_fail_wrong_az(self, _mock_laz): """Test volume can't be cloned from an other volume in different az.""" volume_api = cinder.volume.api.API() volume_src = self._create_volume(self.context, availability_zone='az2', **self.volume_params) self.volume.create_volume(self.context, volume_src) volume_src = db.volume_get(self.context, volume_src['id']) volume_dst = volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', source_volume=volume_src, volume_type= objects.VolumeType.get_by_name_or_id( self.context, self.vol_type['id'])) self.assertEqual('az2', volume_dst['availability_zone']) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', source_volume=volume_src, availability_zone='nova') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_volume_from_sourcevol_with_glance_metadata( self, mock_qemu_info): """Test glance metadata can be correctly copied to new volume.""" def fake_create_cloned_volume(volume, src_vref): pass self.mock_object(self.volume.driver, 'create_cloned_volume', fake_create_cloned_volume) image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info volume_src = self._create_volume_from_image() self.volume.create_volume(self.context, volume_src) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) self.volume.create_volume(self.context, volume_dst) self.assertEqual('available', db.volume_get(context.get_admin_context(), volume_dst['id']).status) # TODO: review all tests in this file to make sure they are # using the defined db.api to access stuff rather than taking # shortcuts like the following (see LP Bug #1860817): # src_glancemeta = db.volume_get(context.get_admin_context(), # volume_src['id']).volume_glance_metadata src_glancemeta = db.volume_glance_metadata_get( context.get_admin_context(), volume_src['id']) dst_glancemeta = db.volume_glance_metadata_get( context.get_admin_context(), volume_dst['id']) for meta_src in src_glancemeta: for meta_dst in dst_glancemeta: if meta_dst.key == meta_src.key: self.assertEqual(meta_src.value, meta_dst.value) self.volume.delete_volume(self.context, volume_src) self.volume.delete_volume(self.context, volume_dst) def test_create_volume_from_sourcevol_failed_clone(self): """Test src vol status will be restore by error handling code.""" def fake_error_create_cloned_volume(volume, src_vref): db.volume_update(self.context, src_vref['id'], {'status': 'error'}) raise exception.CinderException('fake exception') self.mock_object(self.volume.driver, 'create_cloned_volume', fake_error_create_cloned_volume) volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.assertEqual('creating', volume_src.status) self.volume.create_volume(self.context, volume_src) self.assertEqual('available', volume_src.status) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) self.assertEqual('creating', volume_dst.status) self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume_dst) # Source volume's status is still available and dst is set to error self.assertEqual('available', volume_src.status) self.assertEqual('error', volume_dst.status) self.volume.delete_volume(self.context, volume_dst) self.volume.delete_volume(self.context, volume_src) def test_clean_temporary_volume(self): def fake_delete_volume(ctxt, volume): volume.destroy() fake_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host, migration_status='migrating') fake_new_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) # 1. Only clean the db self.volume._clean_temporary_volume(self.context, fake_volume, fake_new_volume, clean_db_only=True) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, fake_new_volume.id) # 2. Delete the backend storage fake_new_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) with mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as \ mock_delete_volume: mock_delete_volume.side_effect = fake_delete_volume self.volume._clean_temporary_volume(self.context, fake_volume, fake_new_volume, clean_db_only=False) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, fake_new_volume.id) # Check when the migrated volume is not in migration fake_new_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) fake_volume.migration_status = 'non-migrating' fake_volume.save() self.volume._clean_temporary_volume(self.context, fake_volume, fake_new_volume) volume = db.volume_get(context.get_admin_context(), fake_new_volume.id) self.assertIsNone(volume.migration_status) def test_check_volume_filters_true(self): """Test bootable as filter for true""" volume_api = cinder.volume.api.API() filters = {'bootable': 'TRUE'} # To convert filter value to True or False volume_api.check_volume_filters(filters) # Confirming converted filter value against True self.assertTrue(filters['bootable']) def test_check_volume_filters_false(self): """Test bootable as filter for false""" volume_api = cinder.volume.api.API() filters = {'bootable': 'false'} # To convert filter value to True or False volume_api.check_volume_filters(filters) # Confirming converted filter value against False self.assertEqual(False, filters['bootable']) def test_check_volume_filters_invalid(self): """Test bootable as filter""" volume_api = cinder.volume.api.API() filters = {'bootable': 'invalid'} # To convert filter value to True or False volume_api.check_volume_filters(filters) # Confirming converted filter value against invalid value self.assertTrue(filters['bootable']) def test_update_volume_readonly_flag(self): """Test volume readonly flag can be updated at API level.""" # create a volume and assign to host volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) self.volume.create_volume(self.context, volume) volume.status = 'in-use' def sort_func(obj): return obj['name'] volume_api = cinder.volume.api.API() # Update fails when status != available self.assertRaises(exception.InvalidVolume, volume_api.update_readonly_flag, self.context, volume, False) volume.status = 'available' # works when volume in 'available' status volume_api.update_readonly_flag(self.context, volume, False) volume.refresh() self.assertEqual('available', volume.status) admin_metadata = volume.volume_admin_metadata self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) # clean up self.volume.delete_volume(self.context, volume) def test_secure_file_operations_enabled(self): """Test secure file operations setting for base driver. General, non network file system based drivers do not have anything to do with "secure_file_operations". This test verifies that calling the method always returns False. """ ret_flag = self.volume.driver.secure_file_operations_enabled() self.assertFalse(ret_flag) @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') def test_secure_file_operations_enabled_2(self, mock_secure): mock_secure.return_value = True vol = tests_utils.create_volume(self.context) result = self.volume.secure_file_operations_enabled(self.context, vol) mock_secure.assert_called_once_with() self.assertTrue(result) @mock.patch('cinder.volume.flows.common.make_pretty_name', new=mock.MagicMock()) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.create_volume', return_value=None) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.execute', side_effect=exception.DriverNotInitialized()) def test_create_volume_raise_rescheduled_exception(self, mock_execute, mock_reschedule): # Create source volume test_vol = tests_utils.create_volume(self.context, **self.volume_params) test_vol_id = test_vol['id'] self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, self.context, test_vol, {'volume_properties': self.volume_params}, {'retry': {'num_attempts': 1, 'host': []}}) self.assertTrue(mock_reschedule.called) volume = db.volume_get(context.get_admin_context(), test_vol_id) self.assertEqual('creating', volume['status']) # We increase the stats on entering the create method, but we must # have cleared them on reschedule. self.assertEqual({'_pool0': {'allocated_capacity_gb': 0}}, self.volume.stats['pools']) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.execute') def test_create_volume_raise_unrescheduled_exception(self, mock_execute): # create source volume test_vol = tests_utils.create_volume(self.context, **self.volume_params) test_vol_id = test_vol['id'] mock_execute.side_effect = exception.VolumeNotFound( volume_id=test_vol_id) self.assertRaises(exception.VolumeNotFound, self.volume.create_volume, self.context, test_vol, {'volume_properties': self.volume_params, 'source_volid': fake.VOLUME_ID}, {'retry': {'num_attempts': 1, 'host': []}}) volume = db.volume_get(context.get_admin_context(), test_vol_id) self.assertEqual('error', volume['status']) self.assertEqual({'_pool0': {'allocated_capacity_gb': 1}}, self.volume.stats['pools']) @mock.patch('cinder.utils.api_clean_volume_file_locks') def test_cascade_delete_volume_with_snapshots(self, mock_api_clean): """Test volume deletion with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume['id'], size=volume['size']) self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() volume_api.delete(self.context, volume, cascade=True) mock_api_clean.assert_called_once_with(volume.id) @mock.patch('cinder.utils.api_clean_volume_file_locks') def test_cascade_delete_volume_with_snapshots_error(self, mock_api_clean): """Test volume deletion with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume['id'], size=volume['size']) self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) snapshot.update({'status': fields.SnapshotStatus.CREATING}) snapshot.save() volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume, cascade=True) mock_api_clean.assert_not_called() @mock.patch('cinder.utils.api_clean_volume_file_locks') def test_cascade_force_delete_volume_with_snapshots_error(self, mock_api_clean): """Test volume force deletion with errored dependent snapshots.""" volume = tests_utils.create_volume(self.context, host='fakehost') snapshot = create_snapshot(volume.id, size=volume.size, status=fields.SnapshotStatus.ERROR_DELETING) self.volume.create_snapshot(self.context, snapshot) volume_api = cinder.volume.api.API() volume_api.delete(self.context, volume, cascade=True, force=True) snapshot = objects.Snapshot.get_by_id(self.context, snapshot.id) self.assertEqual('deleting', snapshot.status) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('deleting', volume.status) mock_api_clean.assert_called_once_with(volume.id) def test_cascade_delete_volume_with_snapshots_in_other_project(self): """Test volume deletion with dependent snapshots in other project.""" volume = tests_utils.create_volume(self.user_context, **self.volume_params) snapshot = create_snapshot(volume['id'], size=volume['size'], project_id=fake.PROJECT2_ID) self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.user_context, volume, cascade=True) @mock.patch.object(driver.BaseVD, 'get_backup_device') @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') def test_get_backup_device(self, mock_secure, mock_get_backup): vol = tests_utils.create_volume(self.context) backup = tests_utils.create_backup(self.context, vol['id']) mock_secure.return_value = False mock_get_backup.return_value = (vol, False) result = self.volume.get_backup_device(self.context, backup) mock_get_backup.assert_called_once_with(self.context, backup) mock_secure.assert_called_once_with() expected_result = {'backup_device': vol, 'secure_enabled': False, 'is_snapshot': False} self.assertEqual(expected_result, result) @mock.patch.object(driver.BaseVD, 'get_backup_device') @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') def test_get_backup_device_want_objects(self, mock_secure, mock_get_backup): vol = tests_utils.create_volume(self.context) backup = tests_utils.create_backup(self.context, vol['id']) mock_secure.return_value = False mock_get_backup.return_value = (vol, False) result = self.volume.get_backup_device(self.context, backup, want_objects=True) mock_get_backup.assert_called_once_with(self.context, backup) mock_secure.assert_called_once_with() expected_result = objects.BackupDeviceInfo.from_primitive( {'backup_device': vol, 'secure_enabled': False, 'is_snapshot': False}, self.context) self.assertEqual(expected_result, result) @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'SUPPORTS_ACTIVE_ACTIVE', True) def test_set_resource_host_different(self): manager = vol_manager.VolumeManager(host='localhost-1@ceph', cluster='mycluster@ceph') volume = tests_utils.create_volume(self.user_context, host='localhost-2@ceph#ceph', cluster_name='mycluster@ceph') manager._set_resource_host(volume) volume.refresh() self.assertEqual('localhost-1@ceph#ceph', volume.host) @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'SUPPORTS_ACTIVE_ACTIVE', True) def test_set_resource_host_equal(self): manager = vol_manager.VolumeManager(host='localhost-1@ceph', cluster='mycluster@ceph') volume = tests_utils.create_volume(self.user_context, host='localhost-1@ceph#ceph', cluster_name='mycluster@ceph') with mock.patch.object(volume, 'save') as save_mock: manager._set_resource_host(volume) save_mock.assert_not_called() def test_volume_attach_attaching(self): """Test volume_attach.""" instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, **self.volume_params) attachment = db.volume_attach(self.context, {'volume_id': volume['id'], 'attached_host': 'fake-host'}) db.volume_attached(self.context, attachment['id'], instance_uuid, 'fake-host', 'vdb', mark_attached=False) volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertEqual("attaching", volume['status']) self.assertEqual("attaching", volume['attach_status']) def test__append_volume_stats_with_pools(self): manager = vol_manager.VolumeManager() manager.stats = {'pools': {'pool1': {'allocated_capacity_gb': 20}, 'pool2': {'allocated_capacity_gb': 10}}} vol_stats = {'vendor_name': 'Open Source', 'pools': [ {'pool_name': 'pool1', 'provisioned_capacity_gb': 31}, {'pool_name': 'pool2', 'provisioned_capacity_gb': 21}]} manager._append_volume_stats(vol_stats) expected = {'vendor_name': 'Open Source', 'pools': [ {'pool_name': 'pool1', 'provisioned_capacity_gb': 31, 'allocated_capacity_gb': 20}, {'pool_name': 'pool2', 'provisioned_capacity_gb': 21, 'allocated_capacity_gb': 10}]} self.assertDictEqual(expected, vol_stats) def test__append_volume_stats_no_pools(self): manager = vol_manager.VolumeManager() manager.stats = {'pools': {'backend': {'allocated_capacity_gb': 20}}} vol_stats = {'provisioned_capacity_gb': 30} manager._append_volume_stats(vol_stats) expected = {'provisioned_capacity_gb': 30, 'allocated_capacity_gb': 20} self.assertDictEqual(expected, vol_stats) def test__append_volume_stats_no_pools_no_volumes(self): manager = vol_manager.VolumeManager() # This is what gets set on c-vol manager's init_host method manager.stats = {'pools': {}, 'allocated_capacity_gb': 0} vol_stats = {'provisioned_capacity_gb': 30} manager._append_volume_stats(vol_stats) expected = {'provisioned_capacity_gb': 30, 'allocated_capacity_gb': 0} self.assertDictEqual(expected, vol_stats) def test__append_volume_stats_driver_error(self): manager = vol_manager.VolumeManager() self.assertRaises(exception.ProgrammingError, manager._append_volume_stats, {'pools': 'bad_data'}) def test_default_tpool_size(self): self.skipTest("Bug 1811663") """Test we can set custom tpool size.""" eventlet.tpool._nthreads = 10 self.assertListEqual([], eventlet.tpool._threads) vol_manager.VolumeManager() self.assertEqual(20, eventlet.tpool._nthreads) self.assertListEqual([], eventlet.tpool._threads) def test_tpool_size(self): self.skipTest("Bug 1811663") """Test we can set custom tpool size.""" self.assertNotEqual(100, eventlet.tpool._nthreads) self.assertListEqual([], eventlet.tpool._threads) self.override_config('backend_native_threads_pool_size', 100, group='backend_defaults') vol_manager.VolumeManager() self.assertEqual(100, eventlet.tpool._nthreads) self.assertListEqual([], eventlet.tpool._threads) eventlet.tpool._nthreads = 20 class VolumeTestCaseLocks(base.BaseVolumeTestCase): MOCK_TOOZ = False def test_create_volume_from_volume_delete_lock_taken(self): # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] # no lock self.volume.create_volume(self.context, src_vol) dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) orig_elevated = self.context.elevated gthreads = [] def mock_elevated(*args, **kwargs): # unset mock so it is only called once self.mock_object(self.context, 'elevated', orig_elevated) # we expect this to block and then fail t = eventlet.spawn(self.volume.create_volume, self.context, volume=dst_vol, request_spec={'source_volid': src_vol_id}) gthreads.append(t) return orig_elevated(*args, **kwargs) # mock something from early on in the delete operation and within the # lock so that when we do the create we expect it to block. self.mock_object(self.context, 'elevated', mock_elevated) # locked self.volume.delete_volume(self.context, src_vol) # we expect the volume create to fail with the following err since the # source volume was deleted while the create was locked. Note that the # volume is still in the db since it was created by the test prior to # calling manager.create_volume. with mock.patch('sys.stderr', new=io.StringIO()): self.assertRaises(exception.VolumeNotFound, gthreads[0].wait) def test_create_volume_from_snapshot_delete_lock_taken(self): # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) # no lock self.volume.create_volume(self.context, src_vol) # create snapshot snap_id = create_snapshot(src_vol.id, size=src_vol['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) # no lock self.volume.create_snapshot(self.context, snapshot_obj) # create vol from snapshot... dst_vol = tests_utils.create_volume(self.context, snapshot_id=snap_id, source_volid=src_vol.id, **self.volume_params) orig_elevated = self.context.elevated gthreads = [] def mock_elevated(*args, **kwargs): # unset mock so it is only called once self.mock_object(self.context, 'elevated', orig_elevated) # We expect this to block and then fail t = eventlet.spawn(self.volume.create_volume, self.context, volume=dst_vol, request_spec={'snapshot_id': snap_id}) gthreads.append(t) return orig_elevated(*args, **kwargs) # mock something from early on in the delete operation and within the # lock so that when we do the create we expect it to block. self.mock_object(self.context, 'elevated', mock_elevated) # locked self.volume.delete_snapshot(self.context, snapshot_obj) # we expect the volume create to fail with the following err since the # snapshot was deleted while the create was locked. Note that the # volume is still in the db since it was created by the test prior to # calling manager.create_volume. with mock.patch('sys.stderr', new=io.StringIO()): self.assertRaises(exception.SnapshotNotFound, gthreads[0].wait) # locked self.volume.delete_volume(self.context, src_vol) # make sure it is gone self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, src_vol.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_volume_manager.py0000664000175000017500000005301000000000000024374 0ustar00zuulzuul00000000000000# Copyright 2019, Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Volume Manager Code.""" from unittest import mock import ddt from cinder.common import constants from cinder import exception from cinder.message import message_field from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import volume as base from cinder.volume import manager as vol_manager @ddt.ddt class VolumeManagerTestCase(base.BaseVolumeTestCase): @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.volume.volume_utils.require_driver_initialized') @mock.patch('cinder.volume.manager.VolumeManager.' '_notify_about_snapshot_usage') def test_create_snapshot_driver_not_initialized_generates_user_message( self, fake_notify, fake_init, fake_msg_create): manager = vol_manager.VolumeManager() fake_init.side_effect = exception.CinderException() fake_snapshot = mock.MagicMock(id='22') fake_context = mock.MagicMock() fake_context.elevated.return_value = fake_context ex = self.assertRaises(exception.CinderException, manager.create_snapshot, fake_context, fake_snapshot) # make sure a user message was generated fake_msg_create.assert_called_once_with( fake_context, action=message_field.Action.SNAPSHOT_CREATE, resource_type=message_field.Resource.VOLUME_SNAPSHOT, resource_uuid=fake_snapshot['id'], exception=ex, detail=message_field.Detail.SNAPSHOT_CREATE_ERROR) @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.volume.volume_utils.require_driver_initialized') @mock.patch('cinder.volume.manager.VolumeManager.' '_notify_about_snapshot_usage') def test_create_snapshot_metadata_update_failure_generates_user_message( self, fake_notify, fake_init, fake_msg_create): manager = vol_manager.VolumeManager() fake_driver = mock.MagicMock() fake_driver.create_snapshot.return_value = False manager.driver = fake_driver fake_vol_ref = mock.MagicMock() fake_vol_ref.bootable.return_value = True fake_db = mock.MagicMock() fake_db.volume_get.return_value = fake_vol_ref fake_exp = exception.CinderException() fake_db.volume_glance_metadata_copy_to_snapshot.side_effect = fake_exp manager.db = fake_db fake_snapshot = mock.MagicMock(id='86') fake_context = mock.MagicMock() fake_context.elevated.return_value = fake_context self.assertRaises(exception.CinderException, manager.create_snapshot, fake_context, fake_snapshot) # make sure a user message was generated fake_msg_create.assert_called_once_with( fake_context, action=message_field.Action.SNAPSHOT_CREATE, resource_type=message_field.Resource.VOLUME_SNAPSHOT, resource_uuid=fake_snapshot['id'], exception=fake_exp, detail=message_field.Detail.SNAPSHOT_UPDATE_METADATA_FAILED) @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.volume.volume_utils.require_driver_initialized') @mock.patch('cinder.volume.manager.VolumeManager.' '_notify_about_snapshot_usage') def test_delete_snapshot_when_busy_generates_user_message( self, fake_notify, fake_init, fake_msg_create): manager = vol_manager.VolumeManager() fake_snapshot = mock.MagicMock(id='0', project_id='1') fake_context = mock.MagicMock() fake_context.elevated.return_value = fake_context fake_exp = exception.SnapshotIsBusy(snapshot_name='Fred') fake_init.side_effect = fake_exp manager.delete_snapshot(fake_context, fake_snapshot) # make sure a user message was generated fake_msg_create.assert_called_once_with( fake_context, action=message_field.Action.SNAPSHOT_DELETE, resource_type=message_field.Resource.VOLUME_SNAPSHOT, resource_uuid=fake_snapshot['id'], exception=fake_exp) @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.volume.volume_utils.require_driver_initialized') @mock.patch('cinder.volume.manager.VolumeManager.' '_notify_about_snapshot_usage') def test_delete_snapshot_general_exception_generates_user_message( self, fake_notify, fake_init, fake_msg_create): manager = vol_manager.VolumeManager() fake_snapshot = mock.MagicMock(id='0', project_id='1') fake_context = mock.MagicMock() fake_context.elevated.return_value = fake_context class LocalException(Exception): pass fake_exp = LocalException() # yeah, this isn't where it would be coming from in real life, # but it saves mocking out a bunch more stuff fake_init.side_effect = fake_exp self.assertRaises(LocalException, manager.delete_snapshot, fake_context, fake_snapshot) # make sure a user message was generated fake_msg_create.assert_called_once_with( fake_context, action=message_field.Action.SNAPSHOT_DELETE, resource_type=message_field.Resource.VOLUME_SNAPSHOT, resource_uuid=fake_snapshot['id'], exception=fake_exp, detail=message_field.Detail.SNAPSHOT_DELETE_ERROR) @mock.patch('cinder.volume.rpcapi.VolumeAPI') def test_attach_volume_local(self, mock_api): manager = vol_manager.VolumeManager() mock_initialize = self.mock_object(manager, 'initialize_connection') mock_connect = self.mock_object(manager, '_connect_device') ctxt = mock.sentinel.context vol = fake_volume.fake_volume_obj(ctxt) result = manager._attach_volume(ctxt, vol, mock.sentinel.properties, remote=False) mock_api.assert_not_called() mock_initialize.assert_called_once_with(ctxt, vol, mock.sentinel.properties) mock_connect.assert_called_once_with(mock_initialize.return_value) self.assertEqual(mock_connect.return_value, result) @mock.patch('cinder.volume.rpcapi.VolumeAPI') def test_attach_volume_remote(self, mock_api): mock_rpc = mock_api.return_value manager = vol_manager.VolumeManager() mock_connect = self.mock_object(manager, '_connect_device') mock_initialize = self.mock_object(manager, 'initialize_connection') ctxt = mock.sentinel.context vol = fake_volume.fake_volume_obj(ctxt) result = manager._attach_volume(ctxt, vol, mock.sentinel.properties, remote=True) mock_api.assert_called_once_with() mock_initialize.assert_not_called() mock_rpc.initialize_connection.assert_called_once_with( ctxt, vol, mock.sentinel.properties) mock_connect.assert_called_once_with( mock_rpc.initialize_connection.return_value) self.assertEqual(mock_connect.return_value, result) @mock.patch('cinder.volume.rpcapi.VolumeAPI') def test_attach_volume_fail_connect(self, mock_api): mock_initialize = mock_api.return_value.initialize_connection manager = vol_manager.VolumeManager() mock_detach = self.mock_object(manager, '_detach_volume') mock_connect = self.mock_object(manager, '_connect_device', side_effect=ValueError) ctxt = mock.sentinel.context vol = fake_volume.fake_volume_obj(ctxt) self.assertRaises(ValueError, manager._attach_volume, ctxt, vol, mock.sentinel.properties, mock.sentinel.remote) mock_initialize.assert_called_once_with(ctxt, vol, mock.sentinel.properties) mock_connect.assert_called_once_with(mock_initialize.return_value) mock_detach.assert_called_once_with( ctxt, None, vol, mock.sentinel.properties, force=True, remote=mock.sentinel.remote) @mock.patch('cinder.volume.volume_utils.brick_attach_volume_encryptor') @mock.patch('cinder.volume.volume_types.is_encrypted') @mock.patch('cinder.volume.rpcapi.VolumeAPI') def test_attach_volume_fail_decrypt(self, mock_api, mock_is_encrypted, mock_attach_encryptor): mock_initialize = mock_api.return_value.initialize_connection manager = vol_manager.VolumeManager() mock_detach = self.mock_object(manager, '_detach_volume') mock_connect = self.mock_object(manager, '_connect_device') mock_db = self.mock_object(manager.db, 'volume_encryption_metadata_get') mock_attach_encryptor.side_effect = ValueError ctxt = mock.Mock() vol = fake_volume.fake_volume_obj(ctxt) self.assertRaises(ValueError, manager._attach_volume, ctxt, vol, mock.sentinel.properties, mock.sentinel.remote, attach_encryptor=True) mock_initialize.assert_called_once_with(ctxt, vol, mock.sentinel.properties) mock_connect.assert_called_once_with(mock_initialize.return_value) mock_is_encrypted.assert_called_once_with(ctxt, vol.volume_type_id) mock_db.assert_called_once_with(ctxt.elevated.return_value, vol.id) mock_attach_encryptor.assert_called_once_with( ctxt, mock_connect.return_value, mock_db.return_value) mock_detach.assert_called_once_with( ctxt, mock_connect.return_value, vol, mock.sentinel.properties, force=True, remote=mock.sentinel.remote) @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs') @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs', return_value={'qos_specs': None}) def test_parse_connection_options_cacheable(self, mock_get_qos, mock_get_extra_specs): ctxt = mock.Mock() manager = vol_manager.VolumeManager() vol = fake_volume.fake_volume_obj(ctxt) vol.volume_type_id = fake.VOLUME_TYPE_ID # no 'cacheable' set by driver, should be extra spec conn_info = {"data": {}} mock_get_extra_specs.return_value = {'cacheable': ' True'} manager._parse_connection_options(ctxt, vol, conn_info) self.assertIn('cacheable', conn_info['data']) self.assertIs(conn_info['data']['cacheable'], True) # driver sets 'cacheable' False, should override extra spec conn_info = {"data": {"cacheable": False}} mock_get_extra_specs.return_value = {'cacheable': ' True'} manager._parse_connection_options(ctxt, vol, conn_info) self.assertIn('cacheable', conn_info['data']) self.assertIs(conn_info['data']['cacheable'], False) # driver sets 'cacheable' True, nothing in extra spec, # extra spec should override driver conn_info = {"data": {"cacheable": True}} mock_get_extra_specs.return_value = {} manager._parse_connection_options(ctxt, vol, conn_info) self.assertIn('cacheable', conn_info['data']) self.assertIs(conn_info['data']['cacheable'], False) # driver sets 'cacheable' True, extra spec has False, # extra spec should override driver conn_info = {"data": {"cacheable": True}} mock_get_extra_specs.return_value = {'cacheable': ' False'} manager._parse_connection_options(ctxt, vol, conn_info) self.assertIn('cacheable', conn_info['data']) self.assertIs(conn_info['data']['cacheable'], False) @ddt.data(*(constants.ISCSI_VARIANTS + constants.NVMEOF_VARIANTS)) def test__driver_shares_targets_reported_shared(self, protocol): """Shared targets must be reported for iSCSI and NVMe-oF.""" manager = vol_manager.VolumeManager() fake_driver = mock.MagicMock() fake_driver.capabilities = {'shared_targets': True, 'storage_protocol': protocol} manager.driver = fake_driver res = manager._driver_shares_targets() expected = True if protocol in constants.ISCSI_VARIANTS else None self.assertIs(expected, res) @ddt.data(*(constants.ISCSI_VARIANTS + constants.NVMEOF_VARIANTS)) def test__driver_shares_targets_reported_nonshared(self, protocol): """Protocol is irrelevant for drivers that don't share targets.""" manager = vol_manager.VolumeManager() fake_driver = mock.MagicMock() fake_driver.capabilities = {'shared_targets': False, 'storage_protocol': protocol} manager.driver = fake_driver res = manager._driver_shares_targets() self.assertFalse(res) @ddt.data(*(constants.ISCSI_VARIANTS + constants.NVMEOF_VARIANTS)) def test__driver_shares_targets_not_reported(self, protocol): """When driver doesn't report, assume it's shared.""" manager = vol_manager.VolumeManager() fake_driver = mock.MagicMock() fake_driver.capabilities = {'storage_protocol': protocol} manager.driver = fake_driver res = manager._driver_shares_targets() expected = True if protocol in constants.ISCSI_VARIANTS else None self.assertIs(expected, res) @ddt.data({'storage_protocol': 'NFS'}, {'shared_targets': True, 'storage_protocol': 'NFS'}, {'storage_protocol': 'ceph'}, {'shared_targets': True, 'storage_protocol': 'ceph'}) def test__driver_shares_targets_other_protocols(self, capabilities): """Sharing is irrelevant for other protocols.""" manager = vol_manager.VolumeManager() fake_driver = mock.MagicMock() fake_driver.capabilities = capabilities manager.driver = fake_driver res = manager._driver_shares_targets() self.assertFalse(res) @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.volume.volume_utils.require_driver_initialized') @mock.patch('cinder.volume.manager.VolumeManager._clone_image_volume') @mock.patch('cinder.db.volume_metadata_update') def test_clone_image_no_volume(self, fake_update, fake_clone, fake_msg_create, fake_init): """Make sure nothing happens if no volume was created.""" manager = vol_manager.VolumeManager() ctx = mock.sentinel.context volume = fake_volume.fake_volume_obj(ctx) image_service = mock.MagicMock(spec=[]) fake_clone.return_value = None image_meta = {'disk_format': 'raw', 'container_format': 'ova'} manager._clone_image_volume_and_add_location(ctx, volume, image_service, image_meta) fake_clone.assert_not_called() fake_update.assert_not_called() image_meta = {'disk_format': 'qcow2', 'container_format': 'bare'} manager._clone_image_volume_and_add_location(ctx, volume, image_service, image_meta) fake_clone.assert_not_called() fake_update.assert_not_called() image_meta = {'disk_format': 'raw', 'container_format': 'bare'} manager._clone_image_volume_and_add_location(ctx, volume, image_service, image_meta) fake_clone.assert_called_once_with(ctx, volume, image_meta) fake_update.assert_not_called() @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.objects.VolumeType.get_by_id') @mock.patch('cinder.volume.volume_utils.require_driver_initialized') @mock.patch('cinder.volume.manager.VolumeManager._clone_image_volume') @mock.patch('cinder.db.volume_metadata_update') def test_clone_image_no_store_id(self, fake_update, fake_clone, fake_msg_create, fake_volume_type_get, fake_init): """Send a cinder:// URL if no store ID in extra specs.""" manager = vol_manager.VolumeManager() project_id = fake.PROJECT_ID ctx = mock.MagicMock() ctx.elevated.return_value = ctx ctx.project_id = project_id vol_type = fake_volume.fake_volume_type_obj( ctx, id=fake.VOLUME_TYPE_ID, name=fake.VOLUME_TYPE_NAME, extra_specs={'volume_type_backend': 'unknown'}) fake_volume_type_get.return_value = vol_type volume = fake_volume.fake_volume_obj(ctx, id=fake.VOLUME_ID, volume_type_id=vol_type.id) image_volume_id = fake.VOLUME2_ID image_volume = fake_volume.fake_volume_obj(ctx, id=image_volume_id) url = 'cinder://%(vol)s' % {'vol': image_volume_id} image_service = mock.MagicMock(spec=['add_location']) image_meta_id = fake.IMAGE_ID image_meta = { 'id': image_meta_id, 'disk_format': 'raw', 'container_format': 'bare', } image_volume_meta = { 'image_owner': project_id, 'glance_image_id': image_meta_id, } fake_clone.return_value = image_volume manager._clone_image_volume_and_add_location(ctx, volume, image_service, image_meta) fake_clone.assert_called_once_with(ctx, volume, image_meta) fake_update.assert_called_with(ctx, image_volume_id, image_volume_meta, False) image_service.add_location.assert_called_once_with(ctx, image_meta_id, url, {}) @mock.patch('cinder.message.api.API.create') @mock.patch('cinder.objects.VolumeType.get_by_id') @mock.patch('cinder.volume.volume_utils.require_driver_initialized') @mock.patch('cinder.volume.manager.VolumeManager._clone_image_volume') @mock.patch('cinder.db.volume_metadata_update') def test_clone_image_with_store_id(self, fake_update, fake_clone, fake_msg_create, fake_volume_type_get, fake_init): """Send a cinder:/// URL.""" manager = vol_manager.VolumeManager() project_id = fake.PROJECT_ID ctx = mock.MagicMock() ctx.elevated.return_value = ctx ctx.project_id = project_id store_id = 'muninn' vol_type = fake_volume.fake_volume_type_obj( ctx, id=fake.VOLUME_TYPE_ID, name=fake.VOLUME_TYPE_NAME, extra_specs={ 'volume_type_backend': 'unknown', 'image_service:store_id': store_id, }) fake_volume_type_get.return_value = vol_type volume = fake_volume.fake_volume_obj(ctx, id=fake.VOLUME_ID, volume_type_id=vol_type.id) image_volume_id = '42' image_volume = mock.MagicMock(spec=['id']) image_volume.id = image_volume_id url = 'cinder://%(store)s/%(vol)s' % { 'store': store_id, 'vol': image_volume_id, } image_service = mock.MagicMock(spec=['add_location']) image_meta_id = fake.IMAGE_ID image_meta = { 'id': image_meta_id, 'disk_format': 'raw', 'container_format': 'bare', } image_volume_meta = { 'image_owner': project_id, 'glance_image_id': image_meta_id, } fake_clone.return_value = image_volume manager._clone_image_volume_and_add_location(ctx, volume, image_service, image_meta) fake_clone.assert_called_once_with(ctx, volume, image_meta) fake_update.assert_called_with(ctx, image_volume_id, image_volume_meta, False) image_service.add_location.assert_called_once_with(ctx, image_meta_id, url, {'store': store_id}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_volume_migration.py0000664000175000017500000015517200000000000024767 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Volume Code.""" import time from unittest import mock import ddt import os_brick from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import imageutils from cinder.common import constants from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder import quota from cinder import quota_utils from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base import cinder.volume from cinder.volume import api as volume_api from cinder.volume.flows.manager import create_volume as create_volume_manager from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import volume_types from cinder.volume import volume_utils QUOTAS = quota.QUOTAS CONF = cfg.CONF def create_snapshot(volume_id, size=1, metadata=None, ctxt=None, **kwargs): """Create a snapshot object.""" metadata = metadata or {} snap = objects.Snapshot(ctxt or context.get_admin_context()) snap.volume_size = size snap.user_id = kwargs.get('user_id', fake.USER_ID) snap.project_id = kwargs.get('project_id', fake.PROJECT_ID) snap.volume_id = volume_id snap.status = fields.SnapshotStatus.CREATING if metadata is not None: snap.metadata = metadata snap.update(kwargs) snap.create() return snap @ddt.ddt class VolumeMigrationTestCase(base.BaseVolumeTestCase): def setUp(self): super(VolumeMigrationTestCase, self).setUp() self._clear_patch = mock.patch( 'cinder.volume.volume_utils.clear_volume', autospec=True) self._clear_patch.start() self.expected_status = 'available' self._service = tests_utils.create_service( self.context, values={'host': 'newhost', 'binary': constants.VOLUME_BINARY}) def tearDown(self): super(VolumeMigrationTestCase, self).tearDown() self._clear_patch.stop() def test_migrate_volume_driver(self): """Test volume migration done by driver.""" # Mock driver and rpc functions self.mock_object(self.volume.driver, 'migrate_volume', lambda x, y, z, new_type_id=None: ( True, {'user_id': fake.USER_ID})) volume = tests_utils.create_volume(ctxt=self.context, size=0, host=CONF.host, migration_status='migrating') host_obj = {'host': 'newhost', 'capabilities': {}} self.volume.migrate_volume(self.context, volume, host_obj, False) # check volume properties volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('newhost', volume.host) self.assertEqual('success', volume.migration_status) @mock.patch('cinder.volume.manager.VolumeManager.' '_can_use_driver_migration') def test_migrate_volume_driver_for_retype(self, mock_can_use): """Test volume migration done by driver on a retype.""" # Mock driver and rpc functions mock_driver = self.mock_object(self.volume.driver, 'migrate_volume', return_value=(True, {})) volume = tests_utils.create_volume(self.context, size=0, host=CONF.host, migration_status='migrating') host_obj = {'host': 'newhost', 'capabilities': {}} self.volume.migrate_volume(self.context, volume, host_obj, False, fake.VOLUME_TYPE2_ID, mock.sentinel.diff) mock_can_use.assert_called_once_with(mock.sentinel.diff) mock_driver.assert_called_once_with(self.context, volume, host_obj) # check volume properties volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('newhost', volume.host) self.assertEqual('success', volume.migration_status) self.assertEqual(fake.VOLUME_TYPE2_ID, volume.volume_type_id) @mock.patch('cinder.volume.manager.VolumeManager._migrate_volume_generic') @mock.patch('cinder.volume.manager.VolumeManager.' '_can_use_driver_migration') def test_migrate_volume_driver_for_retype_generic(self, mock_can_use, mock_generic): """Test generic volume migration on a retype after driver can't.""" # Mock driver and rpc functions mock_driver = self.mock_object(self.volume.driver, 'migrate_volume', return_value=(False, None)) volume = tests_utils.create_volume(self.context, size=0, host=CONF.host, migration_status='migrating') host_obj = {'host': 'newhost', 'capabilities': {}} self.volume.migrate_volume(self.context, volume, host_obj, False, fake.VOLUME_TYPE2_ID, mock.sentinel.diff) mock_can_use.assert_called_once_with(mock.sentinel.diff) mock_driver.assert_called_once_with(self.context, volume, host_obj) mock_generic.assert_called_once_with(self.context, volume, host_obj, fake.VOLUME_TYPE2_ID) @mock.patch('cinder.volume.manager.VolumeManager._migrate_volume_generic') def test_migrate_volume_driver_attached_volume(self, mock_generic): """Test driver volume migration with an attachment.""" mock_driver = self.mock_object(self.volume.driver, 'migrate_volume', return_value=(False, None)) volume = tests_utils.create_volume(self.context, size=0, host=CONF.host, migration_status='migrating') volume = tests_utils.attach_volume( self.context, volume, fake.INSTANCE_ID, 'host', '/dev/vda') host_obj = {'host': 'newhost', 'capabilities': {}} self.volume.migrate_volume(self.context, volume, host_obj, False, fake.VOLUME_TYPE2_ID) # Driver assisted migration should not be attempted when the volume # has attachments. mock_driver.assert_not_called() mock_generic.assert_called_once_with(self.context, volume, host_obj, fake.VOLUME_TYPE2_ID) def test_migrate_volume_driver_cross_az(self): """Test volume migration done by driver.""" # Mock driver and rpc functions self.mock_object(self.volume.driver, 'migrate_volume', lambda x, y, z, new_type_id=None: ( True, {'user_id': fake.USER_ID})) dst_az = 'AZ2' db.service_update(self.context, self._service.id, {'availability_zone': dst_az}) volume = tests_utils.create_volume(self.context, size=0, host=CONF.host, migration_status='migrating') host_obj = {'host': 'newhost', 'capabilities': {}} self.volume.migrate_volume(self.context, volume, host_obj, False) # check volume properties volume.refresh() self.assertEqual('newhost', volume.host) self.assertEqual('success', volume.migration_status) self.assertEqual(dst_az, volume.availability_zone) def _fake_create_volume(self, ctxt, volume, req_spec, filters, allow_reschedule=True): return db.volume_update(ctxt, volume['id'], {'status': self.expected_status}) def test_migrate_volume_error(self): with mock.patch.object(self.volume.driver, 'migrate_volume') as \ mock_migrate, \ mock.patch.object(self.volume.driver, 'create_export') as \ mock_create_export: # Exception case at self.driver.migrate_volume and create_export mock_migrate.side_effect = processutils.ProcessExecutionError mock_create_export.side_effect = processutils.ProcessExecutionError volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, volume, host_obj, False) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume.migration_status) self.assertEqual('available', volume.status) @mock.patch('cinder.compute.API') @mock.patch('cinder.volume.manager.VolumeManager.' 'migrate_volume_completion') @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_migrate_volume_generic(self, volume_get, migrate_volume_completion, nova_api): def Volume(original=objects.Volume, **kwargs): return original(**kwargs) fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID} fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) new_volume_obj = fake_volume.fake_volume_obj(self.context, **fake_new_volume) host_obj = {'host': 'newhost', 'capabilities': {}} volume_get.return_value = fake_new_volume update_server_volume = nova_api.return_value.update_server_volume volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) volume_mock = self.mock_object(objects, 'Volume', side_effect=Volume) with mock.patch.object(self.volume, '_copy_volume_data') as \ mock_copy_volume: self.volume._migrate_volume_generic(self.context, volume, host_obj, None) # Temporary created volume must not use quota self.assertFalse(volume_mock.call_args[1]['use_quota']) mock_copy_volume.assert_called_with(self.context, volume, new_volume_obj, remote='dest') migrate_volume_completion.assert_called_with( self.context, volume, new_volume_obj, error=False) self.assertFalse(update_server_volume.called) @mock.patch('cinder.compute.API') @mock.patch('cinder.volume.manager.VolumeManager.' 'migrate_volume_completion') def test_migrate_volume_generic_cross_az(self, migrate_volume_completion, nova_api): """Test that we set the right AZ in cross AZ migrations.""" original_create = objects.Volume.create dst_az = 'AZ2' db.service_update(self.context, self._service.id, {'availability_zone': dst_az}) def my_create(self, *args, **kwargs): self.status = 'available' original_create(self, *args, **kwargs) volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} create_vol = self.patch('cinder.objects.Volume.create', side_effect=my_create, autospec=True) with mock.patch.object(self.volume, '_copy_volume_data') as copy_mock: self.volume._migrate_volume_generic(self.context, volume, host_obj, None) copy_mock.assert_called_with(self.context, volume, mock.ANY, remote='dest') migrate_volume_completion.assert_called_with( self.context, volume, mock.ANY, error=False) nova_api.return_value.update_server_volume.assert_not_called() self.assertEqual(dst_az, create_vol.call_args[0][0]['availability_zone']) @mock.patch('cinder.compute.API') @mock.patch('cinder.volume.manager.VolumeManager.' 'migrate_volume_completion') @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_migrate_volume_generic_attached_volume(self, volume_get, migrate_volume_completion, nova_api): attached_host = 'some-host' fake_volume_id = fake.VOLUME_ID fake_db_new_volume = {'status': 'available', 'id': fake_volume_id} fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) host_obj = {'host': 'newhost', 'capabilities': {}} fake_uuid = fakes.get_fake_uuid() update_server_volume = nova_api.return_value.update_server_volume volume_get.return_value = fake_new_volume volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) volume = tests_utils.attach_volume( self.context, volume, fake_uuid, attached_host, '/dev/vda') self.assertIsNotNone(volume.volume_attachment[0].id) self.assertEqual(fake_uuid, volume.volume_attachment[0].instance_uuid) self.assertEqual('in-use', volume.status) self.volume._migrate_volume_generic(self.context, volume, host_obj, None) self.assertFalse(migrate_volume_completion.called) update_server_volume.assert_called_with(self.context, fake_uuid, volume['id'], fake_volume_id) @mock.patch('cinder.objects.volume.Volume.save') @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_volume') @mock.patch('cinder.compute.API') @mock.patch('cinder.volume.manager.VolumeManager.' 'migrate_volume_completion') @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_migrate_volume_generic_volume_from_snap(self, volume_get, migrate_volume_completion, nova_api, create_volume, save): def fake_create_volume(*args, **kwargs): context, volume, request_spec, filter_properties = args fake_db = mock.Mock() task = create_volume_manager.ExtractVolumeSpecTask(fake_db) specs = task.execute(context, volume, {}) self.assertEqual('raw', specs['type']) def fake_copy_volume_data_with_chk_param(*args, **kwargs): context, src, dest = args self.assertEqual(src['snapshot_id'], dest['snapshot_id']) fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID} fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) host_obj = {'host': 'newhost', 'capabilities': {}} volume_get.return_value = fake_new_volume volume_from_snap = tests_utils.create_volume(self.context, size=1, host=CONF.host) volume_from_snap['snapshot_id'] = fake.SNAPSHOT_ID create_volume.side_effect = fake_create_volume with mock.patch.object(self.volume, '_copy_volume_data') as \ mock_copy_volume: mock_copy_volume.side_effect = fake_copy_volume_data_with_chk_param self.volume._migrate_volume_generic(self.context, volume_from_snap, host_obj, None) @mock.patch('cinder.objects.volume.Volume.save') @mock.patch('cinder.volume.rpcapi.VolumeAPI.create_volume') @mock.patch('cinder.compute.API') @mock.patch('cinder.volume.manager.VolumeManager.' 'migrate_volume_completion') @mock.patch('cinder.db.sqlalchemy.api.volume_get') def test_migrate_volume_generic_for_clone(self, volume_get, migrate_volume_completion, nova_api, create_volume, save): def fake_create_volume(*args, **kwargs): context, volume, request_spec, filter_properties = args fake_db = mock.Mock() task = create_volume_manager.ExtractVolumeSpecTask(fake_db) specs = task.execute(context, volume, {}) self.assertEqual('raw', specs['type']) def fake_copy_volume_data_with_chk_param(*args, **kwargs): context, src, dest = args self.assertEqual(src['source_volid'], dest['source_volid']) fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID} fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume) host_obj = {'host': 'newhost', 'capabilities': {}} volume_get.return_value = fake_new_volume clone = tests_utils.create_volume(self.context, size=1, host=CONF.host) clone['source_volid'] = fake.VOLUME2_ID create_volume.side_effect = fake_create_volume with mock.patch.object(self.volume, '_copy_volume_data') as \ mock_copy_volume: mock_copy_volume.side_effect = fake_copy_volume_data_with_chk_param self.volume._migrate_volume_generic(self.context, clone, host_obj, None) @mock.patch.object(volume_rpcapi.VolumeAPI, 'update_migrated_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') def test_migrate_volume_for_volume_generic(self, create_volume, rpc_delete_volume, update_migrated_volume): fake_volume = tests_utils.create_volume(self.context, size=1, previous_status='available', host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} with mock.patch.object(self.volume.driver, 'migrate_volume') as \ mock_migrate_volume, \ mock.patch.object(self.volume, '_copy_volume_data'), \ mock.patch.object(self.volume.driver, 'delete_volume') as \ delete_volume: create_volume.side_effect = self._fake_create_volume self.volume.migrate_volume(self.context, fake_volume, host_obj, True) volume = objects.Volume.get_by_id(context.get_admin_context(), fake_volume.id) self.assertEqual('newhost', volume.host) self.assertEqual('success', volume.migration_status) self.assertFalse(mock_migrate_volume.called) self.assertFalse(delete_volume.called) self.assertTrue(rpc_delete_volume.called) self.assertTrue(update_migrated_volume.called) def test_migrate_volume_generic_copy_error(self): with mock.patch.object(self.volume.driver, 'migrate_volume'), \ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') \ as mock_create_volume, \ mock.patch.object(self.volume, '_copy_volume_data') as \ mock_copy_volume, \ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'), \ mock.patch.object(self.volume, 'migrate_volume_completion'), \ mock.patch.object(self.volume.driver, 'create_export'): # Exception case at migrate_volume_generic # source_volume['migration_status'] is 'migrating' mock_create_volume.side_effect = self._fake_create_volume mock_copy_volume.side_effect = processutils.ProcessExecutionError volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, volume, host_obj, True) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume.migration_status) self.assertEqual('available', volume.status) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_migrate_volume_with_glance_metadata(self, mock_qemu_info): volume = self._create_volume_from_image(clone_image_volume=True) glance_metadata = volume.glance_metadata # We imitate the behavior of rpcapi, by serializing and then # deserializing the volume object we created earlier. serializer = objects.base.CinderObjectSerializer() serialized_volume = serializer.serialize_entity(self.context, volume) volume = serializer.deserialize_entity(self.context, serialized_volume) image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info host_obj = {'host': 'newhost', 'capabilities': {}} with mock.patch.object(self.volume.driver, 'migrate_volume') as mock_migrate_volume: mock_migrate_volume.side_effect = ( lambda x, y, z, new_type_id=None: ( True, {'user_id': fake.USER_ID})) self.volume.migrate_volume(self.context, volume, host_obj, False) self.assertEqual('newhost', volume.host) self.assertEqual('success', volume.migration_status) self.assertEqual(glance_metadata, volume.glance_metadata) @mock.patch('cinder.db.volume_update') def test_update_migrated_volume(self, volume_update): fake_host = 'fake_host' fake_new_host = 'fake_new_host' fake_update = {'_name_id': fake.VOLUME2_NAME_ID, 'provider_location': 'updated_location'} fake_elevated = context.RequestContext(fake.USER_ID, self.project_id, is_admin=True) volume = tests_utils.create_volume(self.context, size=1, status='available', host=fake_host) new_volume = tests_utils.create_volume( self.context, size=1, status='available', provider_location='fake_provider_location', _name_id=fake.VOLUME_NAME_ID, host=fake_new_host) new_volume._name_id = fake.VOLUME_NAME_ID new_volume.provider_location = 'fake_provider_location' fake_update_error = {'_name_id': new_volume._name_id, 'provider_location': new_volume.provider_location} expected_update = {'_name_id': volume._name_id, 'provider_location': volume.provider_location} with mock.patch.object(self.volume.driver, 'update_migrated_volume') as migrate_update, \ mock.patch.object(self.context, 'elevated') as elevated: migrate_update.return_value = fake_update elevated.return_value = fake_elevated self.volume.update_migrated_volume(self.context, volume, new_volume, 'available') volume_update.assert_has_calls(( mock.call(fake_elevated, new_volume.id, expected_update), mock.call(fake_elevated, volume.id, fake_update))) # Test the case for update_migrated_volume not implemented # for the driver. migrate_update.reset_mock() volume_update.reset_mock() # Reset the volume objects to their original value, since they # were changed in the last call. new_volume._name_id = fake.VOLUME_NAME_ID new_volume.provider_location = 'fake_provider_location' migrate_update.side_effect = NotImplementedError self.volume.update_migrated_volume(self.context, volume, new_volume, 'available') volume_update.assert_has_calls(( mock.call(fake_elevated, new_volume.id, fake_update), mock.call(fake_elevated, volume.id, fake_update_error))) def test_migrate_volume_generic_create_volume_error(self): self.expected_status = 'error' with mock.patch.object(self.volume.driver, 'migrate_volume'), \ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') as mock_create_volume, \ mock.patch.object(self.volume, '_clean_temporary_volume') as \ clean_temporary_volume: # Exception case at the creation of the new temporary volume mock_create_volume.side_effect = self._fake_create_volume volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(exception.VolumeMigrationFailed, self.volume.migrate_volume, self.context, volume, host_obj, True) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) self.assertTrue(clean_temporary_volume.called) self.expected_status = 'available' def test_migrate_volume_generic_timeout_error(self): CONF.set_override("migration_create_volume_timeout_secs", 2) with mock.patch.object(self.volume.driver, 'migrate_volume'), \ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') as mock_create_volume, \ mock.patch.object(self.volume, '_clean_temporary_volume') as \ clean_temporary_volume, \ mock.patch.object(time, 'sleep'): # Exception case at the timeout of the volume creation self.expected_status = 'creating' mock_create_volume.side_effect = self._fake_create_volume volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(exception.VolumeMigrationFailed, self.volume.migrate_volume, self.context, volume, host_obj, True) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) self.assertTrue(clean_temporary_volume.called) self.expected_status = 'available' def test_migrate_volume_generic_create_export_error(self): with mock.patch.object(self.volume.driver, 'migrate_volume'), \ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume') \ as mock_create_volume, \ mock.patch.object(self.volume, '_copy_volume_data') as \ mock_copy_volume, \ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'), \ mock.patch.object(self.volume, 'migrate_volume_completion'), \ mock.patch.object(self.volume.driver, 'create_export') as \ mock_create_export: # Exception case at create_export mock_create_volume.side_effect = self._fake_create_volume mock_copy_volume.side_effect = processutils.ProcessExecutionError mock_create_export.side_effect = processutils.ProcessExecutionError volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, volume, host_obj, True) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) def test_migrate_volume_generic_migrate_volume_completion_error(self): def fake_migrate_volume_completion(ctxt, volume, new_volume, error=False): db.volume_update(ctxt, volume['id'], {'migration_status': 'completing'}) raise processutils.ProcessExecutionError with mock.patch.object(self.volume.driver, 'migrate_volume'), \ mock.patch.object(volume_rpcapi.VolumeAPI, 'create_volume')\ as mock_create_volume, \ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume'), \ mock.patch.object(self.volume, 'migrate_volume_completion')\ as mock_migrate_compl, \ mock.patch.object(self.volume.driver, 'create_export'), \ mock.patch.object(self.volume, '_attach_volume') \ as mock_attach, \ mock.patch.object(self.volume, '_detach_volume'), \ mock.patch.object(os_brick.initiator.connector, 'get_connector_properties') \ as mock_get_connector_properties, \ mock.patch.object(volume_utils, 'copy_volume') as mock_copy, \ mock.patch.object(volume_rpcapi.VolumeAPI, 'get_capabilities') \ as mock_get_capabilities: # Exception case at delete_volume # source_volume['migration_status'] is 'completing' mock_create_volume.side_effect = self._fake_create_volume mock_migrate_compl.side_effect = fake_migrate_volume_completion mock_get_connector_properties.return_value = {} mock_attach.side_effect = [{'device': {'path': 'bar'}}, {'device': {'path': 'foo'}}] mock_get_capabilities.return_value = {'sparse_copy_volume': True} volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, volume, host_obj, True) volume = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) mock_copy.assert_called_once_with('foo', 'bar', 0, '1M', sparse=True) def fake_attach_volume(self, ctxt, volume, instance_uuid, host_name, mountpoint, mode): tests_utils.attach_volume(ctxt, volume.id, instance_uuid, host_name, '/dev/vda') def _test_migrate_volume_completion(self, status='available', instance_uuid=None, attached_host=None, retyping=False, previous_status='available'): initial_status = retyping and 'retyping' or status old_volume = tests_utils.create_volume(self.context, size=0, host=CONF.host, status=initial_status, migration_status='migrating', previous_status=previous_status) attachment = None if status == 'in-use': old_volume = tests_utils.attach_volume(self.context, old_volume, instance_uuid, attached_host, '/dev/vda') self.assertEqual('in-use', old_volume.status) attachment = old_volume.volume_attachment[0] target_status = 'target:%s' % old_volume.id new_host = CONF.host + 'new' new_volume = tests_utils.create_volume(self.context, size=0, host=new_host, migration_status=target_status) with mock.patch.object(self.volume, 'detach_volume') as \ mock_detach_volume, \ mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as mock_delete_volume, \ mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') as mock_attach_volume, \ mock.patch.object(volume_rpcapi.VolumeAPI, 'update_migrated_volume'): mock_attach_volume.side_effect = self.fake_attach_volume old_volume_host = old_volume.host new_volume_host = new_volume.host self.volume.migrate_volume_completion(self.context, old_volume, new_volume) after_new_volume = objects.Volume.get_by_id(self.context, new_volume.id) after_old_volume = objects.Volume.get_by_id(self.context, old_volume.id) if status == 'in-use': mock_detach_volume.assert_called_with(self.context, old_volume.id, attachment['id']) attachments = db.volume_attachment_get_all_by_instance_uuid( self.context, instance_uuid) mock_attach_volume.assert_called_once_with( self.context, old_volume, attachment['instance_uuid'], attachment['attached_host'], attachment['mountpoint'], attachment.get('attach_mode', 'rw'), ) self.assertIsNotNone(attachments) self.assertEqual(attached_host, attachments[0]['attached_host']) self.assertEqual(instance_uuid, attachments[0]['instance_uuid']) else: self.assertFalse(mock_detach_volume.called) self.assertTrue(mock_delete_volume.called) # NOTE(sborkows): the migrate_volume_completion method alters # old and new volume objects, so we need to check the equality # between the former host value and the actual one. self.assertEqual(old_volume_host, after_new_volume.host) self.assertEqual(new_volume_host, after_old_volume.host) def test_migrate_volume_completion_retype_available(self): self._test_migrate_volume_completion('available', retyping=True) def test_migrate_volume_completion_retype_in_use(self): self._test_migrate_volume_completion( 'in-use', '83c969d5-065e-4c9c-907d-5394bc2e98e2', 'some-host', retyping=True, previous_status='in-use') def test_migrate_volume_completion_migrate_available(self): self._test_migrate_volume_completion() def test_migrate_volume_completion_migrate_in_use(self): self._test_migrate_volume_completion( 'in-use', '83c969d5-065e-4c9c-907d-5394bc2e98e2', 'some-host', retyping=False, previous_status='in-use') @ddt.data(False, True) def test_api_migrate_volume_completion_from_swap_with_no_migration( self, swap_error): # This test validates that Cinder properly finishes the swap volume # status updates for the case that no migration has occurred instance_uuid = '83c969d5-065e-4c9c-907d-5394bc2e98e2' attached_host = 'attached-host' orig_attached_vol = tests_utils.create_volume(self.context, size=0) orig_attached_vol = tests_utils.attach_volume( self.context, orig_attached_vol['id'], instance_uuid, attached_host, '/dev/vda') new_volume = tests_utils.create_volume(self.context, size=0) @mock.patch.object(volume_rpcapi.VolumeAPI, 'detach_volume') @mock.patch.object(volume_rpcapi.VolumeAPI, 'attach_volume') def _run_migration_completion(rpc_attach_volume, rpc_detach_volume): attachment = orig_attached_vol['volume_attachment'][0] attachment_id = attachment['id'] rpc_attach_volume.side_effect = self.fake_attach_volume vol_id = volume_api.API().migrate_volume_completion( self.context, orig_attached_vol, new_volume, swap_error) if swap_error: # When swap failed, we don't want to finish attachment self.assertFalse(rpc_detach_volume.called) self.assertFalse(rpc_attach_volume.called) else: # When no error, we should be finishing the attachment rpc_detach_volume.assert_called_with(self.context, orig_attached_vol, attachment_id) rpc_attach_volume.assert_called_with( self.context, new_volume, attachment['instance_uuid'], attachment['attached_host'], attachment['mountpoint'], 'rw') self.assertEqual(new_volume['id'], vol_id) _run_migration_completion() @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') def test_retype_setup_fail_volume_is_available(self, mock_notify): """Verify volume is still available if retype prepare failed.""" elevated = context.get_admin_context() project_id = self.context.project_id db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}}) old_vol_type = db.volume_type_get_by_name(elevated, 'old') db.volume_type_create(elevated, {'name': 'new', 'extra_specs': {}}) new_vol_type = db.volume_type_get_by_name(elevated, 'new') db.quota_create(elevated, project_id, 'volumes_new', 0) volume = tests_utils.create_volume(self.context, size=1, host=CONF.host, status='available', volume_type_id=old_vol_type['id']) api = cinder.volume.api.API() self.assertRaises(exception.VolumeLimitExceeded, api.retype, self.context, volume, new_vol_type['id']) volume = db.volume_get(elevated, volume.id) # FIXME: restore when Bug #1803648 is figured out # mock_notify.assert_not_called() self.assertEqual('available', volume['status']) @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') def _retype_volume_exec(self, driver, mock_notify, snap=False, policy='on-demand', migrate_exc=False, exc=None, diff_equal=False, replica=False, reserve_vol_type_only=False, encryption_changed=False, replica_new=None): elevated = context.get_admin_context() project_id = self.context.project_id if replica: rep_status = 'enabled' extra_specs = {'replication_enabled': ' True'} else: rep_status = 'disabled' extra_specs = {} if replica_new is None: replica_new = replica new_specs = {'replication_enabled': ' True'} if replica_new else {} db.volume_type_create(elevated, {'name': 'old', 'extra_specs': extra_specs}) old_vol_type = db.volume_type_get_by_name(elevated, 'old') db.volume_type_create(elevated, {'name': 'new', 'extra_specs': new_specs}) vol_type = db.volume_type_get_by_name(elevated, 'new') new_type = objects.VolumeType.get_by_id(elevated, vol_type['id']) db.quota_create(elevated, project_id, 'volumes_new', 10) volume = tests_utils.create_volume(self.context, size=1, host=CONF.host, status='retyping', volume_type_id=old_vol_type['id'], replication_status=rep_status) volume.previous_status = 'available' volume.save() if snap: snapshot = create_snapshot(volume.id, size=volume.size, user_id=self.user_context.user_id, project_id=self.user_context.project_id, volume_type_id=volume.volume_type_id, ctxt=self.user_context) if driver or diff_equal: host_obj = {'host': CONF.host, 'capabilities': {}} else: host_obj = {'host': 'newhost', 'capabilities': {}} reservations = quota_utils.get_volume_type_reservation( self.context, volume, new_type.id, reserve_vol_type_only) old_reservations = quota_utils.get_volume_type_reservation( self.context, volume, old_vol_type['id'], reserve_vol_type_only, negative=True) old_usage = db.quota_usage_get_all_by_project(elevated, project_id) with mock.patch.object(self.volume.driver, 'retype') as _retype, \ mock.patch.object(volume_types, 'volume_types_diff') as _diff, \ mock.patch.object(self.volume, 'migrate_volume') as _mig, \ mock.patch.object(db.sqlalchemy.api, 'volume_get') as _vget, \ mock.patch.object(context.RequestContext, 'elevated') as _ctx, \ mock.patch.object(objects.VolumeType, 'get_by_id') as _vtget: _vget.return_value = volume _retype.return_value = driver _ctx.return_value = self.context _vtget.return_value = new_type returned_diff = { 'encryption': {}, 'qos_specs': {}, 'extra_specs': {}, } if replica != replica_new: returned_diff['extra_specs']['replication_enabled'] = ( extra_specs.get('replication_enabled'), new_specs.get('replication_enabled')) expected_replica_status = 'enabled' if replica_new else 'disabled' if encryption_changed: returned_diff['encryption'] = 'fake' _diff.return_value = (returned_diff, diff_equal) if migrate_exc: _mig.side_effect = KeyError else: _mig.return_value = True if not exc: self.volume.retype(self.context, volume, vol_type['id'], host_obj, migration_policy=policy, reservations=reservations, old_reservations=old_reservations) else: self.assertRaises(exc, self.volume.retype, self.context, volume, vol_type['id'], host_obj, migration_policy=policy, reservations=reservations, old_reservations=old_reservations) if host_obj['host'] != CONF.host: _retype.assert_not_called() # get volume/quota properties volume = objects.Volume.get_by_id(elevated, volume.id) try: usage = db.quota_usage_get(elevated, project_id, 'volumes_new') volumes_in_use = usage.in_use except exception.QuotaUsageNotFound: volumes_in_use = 0 # Get new in_use after retype, it should not be changed. if reserve_vol_type_only: new_usage = db.quota_usage_get_all_by_project(elevated, project_id) for resource in ('volumes', 'gigabytes', 'snapshots'): empty = {'in_use': 0, 'reserved': 0} # Global resource hasn't changed self.assertEqual(old_usage.get(resource, empty)['in_use'], new_usage.get(resource, empty)['in_use']) # The new type was empty before self.assertEqual( 0, old_usage.get(resource + '_new', empty)['in_use']) # Old type resources have been moved to the new one self.assertEqual( old_usage.get(resource + '_old', empty)['in_use'], new_usage.get(resource + '_new', empty)['in_use']) # The old type is empty now self.assertEqual( 0, new_usage.get(resource + '_old', empty)['in_use']) # check properties if driver or diff_equal: self.assertEqual(vol_type['id'], volume.volume_type_id) self.assertEqual('available', volume.status) self.assertEqual(CONF.host, volume.host) self.assertEqual(1, volumes_in_use) self.assert_notify_called(mock_notify, (['INFO', 'volume.retype'],), any_order=True) if driver: _vtget.assert_called_once_with(self.context, new_type.id) _retype.assert_called_once_with(self.context, mock.ANY, new_type, returned_diff, host_obj) # When retyping a volume with snapshots the snapshots should be # retyped as well if snap: snapshot.refresh() self.assertEqual(new_type.id, snapshot.volume_type_id) elif not exc: self.assertEqual(old_vol_type['id'], volume.volume_type_id) self.assertEqual('retyping', volume.status) self.assertEqual(CONF.host, volume.host) self.assertEqual(1, volumes_in_use) self.assert_notify_called(mock_notify, (['INFO', 'volume.retype'],), any_order=True) else: self.assertEqual(old_vol_type['id'], volume.volume_type_id) self.assertEqual('available', volume.status) self.assertEqual(CONF.host, volume.host) self.assertEqual(0, volumes_in_use) if encryption_changed: self.assertTrue(_mig.called) self.assertEqual(expected_replica_status, volume.replication_status) def test_retype_volume_driver_success(self): self._retype_volume_exec(True) @ddt.data((False, False), (False, True), (True, False), (True, True)) @ddt.unpack def test_retype_volume_replica(self, replica, replica_new): self._retype_volume_exec(True, replica=replica, replica_new=replica_new) def test_retype_volume_migration_bad_policy(self): # Test volume retype that requires migration by not allowed self._retype_volume_exec(False, policy='never', exc=exception.VolumeMigrationFailed) def test_retype_volume_migration_with_replica(self): self._retype_volume_exec(False, replica=True, exc=exception.InvalidVolume) def test_retype_volume_migration_with_snaps(self): self._retype_volume_exec(False, snap=True, exc=exception.InvalidVolume) def test_retype_volume_migration_failed(self): self._retype_volume_exec(False, migrate_exc=True, exc=KeyError) def test_retype_volume_migration_success(self): self._retype_volume_exec(False, migrate_exc=False, exc=None) def test_retype_volume_migration_equal_types(self): self._retype_volume_exec(False, diff_equal=True) def test_retype_volume_migration_equal_types_snaps(self): self._retype_volume_exec(False, snap=True, diff_equal=True, reserve_vol_type_only=True) def test_retype_volume_with_type_only(self): self._retype_volume_exec(True, reserve_vol_type_only=True) def test_retype_volume_migration_encryption(self): self._retype_volume_exec(False, encryption_changed=True) def test_migrate_driver_not_initialized(self): volume = tests_utils.create_volume(self.context, size=0, host=CONF.host) host_obj = {'host': 'newhost', 'capabilities': {}} self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.migrate_volume, self.context, volume, host_obj, True) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume.migration_status) # lets cleanup the mess. self.volume.driver._initialized = True self.volume.delete_volume(self.context, volume) def test_delete_source_volume_in_migration(self): """Test deleting a source volume that is in migration.""" self._test_delete_volume_in_migration('migrating') def test_delete_destination_volume_in_migration(self): """Test deleting a destination volume that is in migration.""" self._test_delete_volume_in_migration('target:vol-id') def _test_delete_volume_in_migration(self, migration_status): """Test deleting a volume that is in migration.""" volume = tests_utils.create_volume(self.context, host=CONF.host, migration_status=migration_status) self.volume.delete_volume(self.context, volume=volume) # The volume is successfully removed during the volume delete # and won't exist in the database any more. self.assertRaises(exception.VolumeNotFound, volume.refresh) def test_retype_volume_not_capable_to_replica(self): elevated = context.get_admin_context() db.volume_type_create(elevated, {'name': 'old', 'extra_specs': {}}) old_vol_type = db.volume_type_get_by_name(elevated, 'old') new_extra_specs = {'replication_enabled': ' True'} new_type = db.volume_type_create(elevated, {'name': 'new', 'extra_specs': new_extra_specs}) new_vol_type = objects.VolumeType.get_by_id(self.context, new_type['id']) volume = tests_utils.create_volume(self.context, size=1, host=CONF.host, status='available', volume_type_id=old_vol_type['id'], replication_status='not-capable') host_obj = {'host': 'newhost', 'capabilities': {}} with mock.patch.object(self.volume, 'migrate_volume') as migrate_volume, \ mock.patch.object(objects.VolumeType, 'get_by_id') as vt_get: migrate_volume.return_value = True vt_get.return_value = new_vol_type self.volume.retype(self.context, volume, new_vol_type.id, host_obj, migration_policy='on-demand') vt_get.assert_not_called() @ddt.data( (None, True), ({'encryption': {'cipher': ('v1', 'v2')}}, False), ({'qos_specs': {'key1': ('v1', 'v2')}}, False), ({'encryption': {}, 'qos_specs': {}, 'extra_specs': {}}, True), ({'encryption': {}, 'qos_specs': {}, 'extra_specs': {'volume_backend_name': ('ceph1', 'ceph2'), 'RESKEY:availability_zones': ('nova', 'nova2')}}, True), ({'encryption': {}, 'qos_specs': {}, 'extra_specs': {'thin_provisioning_support': (' True', None)}}, False), ) @ddt.unpack def test__can_use_driver_migration(self, diff, expected): res = self.volume._can_use_driver_migration(diff) self.assertEqual(expected, res) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_volume_reimage.py0000664000175000017500000002334400000000000024402 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Volume reimage Code.""" from unittest import mock import ddt from oslo_concurrency import processutils from cinder import exception from cinder.message import message_field from cinder.tests.unit import fake_constants from cinder.tests.unit.image import fake as fake_image from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base @ddt.ddt class VolumeReimageTestCase(base.BaseVolumeTestCase): def setUp(self): super(VolumeReimageTestCase, self).setUp() self.patch('cinder.volume.volume_utils.clear_volume', autospec=True) fake_image.mock_image_service(self) self.image_meta = fake_image.FakeImageService().show( self.context, fake_constants.IMAGE_ID) def test_volume_reimage(self): volume = tests_utils.create_volume(self.context, status='downloading', previous_status='available') self.assertEqual(volume.status, 'downloading') self.assertEqual(volume.previous_status, 'available') self.volume.create_volume(self.context, volume) with mock.patch.object(self.volume.driver, 'copy_image_to_volume' ) as mock_cp_img: self.volume.reimage(self.context, volume, self.image_meta) mock_cp_img.assert_called_once_with(self.context, volume, fake_image.FakeImageService(), self.image_meta['id'], disable_sparse=True) self.assertEqual(volume.status, 'available') def test_volume_reimage_image_snapshot(self): volume = tests_utils.create_volume(self.context, status='downloading', previous_status='available') self.assertEqual(volume.status, 'downloading') self.assertEqual(volume.previous_status, 'available') self.volume.create_volume(self.context, volume) with mock.patch.object(self.volume.driver, 'copy_image_to_volume' ) as mock_cp_img, \ mock.patch.object(self.volume, '_revert_to_snapshot_generic' ) as generic_revert: fake_snap = mock.MagicMock( id='08f850d7-8b43-4656-a71c-647c864a3599') self.volume.reimage( self.context, volume, self.image_meta, image_snap=fake_snap) mock_cp_img.assert_not_called() generic_revert.assert_called_once_with( self.context, volume, fake_snap) self.assertEqual(volume.status, 'available') def test_volume_reimage_raise_exception(self): volume = tests_utils.create_volume(self.context) self.volume.create_volume(self.context, volume) with mock.patch.object(self.volume.driver, 'copy_image_to_volume' ) as mock_cp_img: mock_cp_img.side_effect = processutils.ProcessExecutionError self.assertRaises(exception.ImageCopyFailure, self.volume.reimage, self.context, volume, self.image_meta) self.assertEqual(volume.previous_status, 'available') self.assertEqual(volume.status, 'error') mock_cp_img.side_effect = exception.ImageUnacceptable( image_id=self.image_meta['id'], reason='') self.assertRaises(exception.ImageUnacceptable, self.volume.reimage, self.context, volume, self.image_meta) mock_cp_img.side_effect = exception.ImageConversionNotAllowed( image_id=self.image_meta['id'], reason='') with mock.patch.object( self.volume.message_api, 'create' ) as mock_msg_create: self.assertRaises( exception.ImageConversionNotAllowed, self.volume.reimage, self.context, volume, self.image_meta) mock_msg_create.assert_called_with( self.context, message_field.Action.REIMAGE_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.IMAGE_FORMAT_UNACCEPTABLE) mock_cp_img.side_effect = exception.ImageTooBig( image_id=self.image_meta['id'], reason='') self.assertRaises(exception.ImageTooBig, self.volume.reimage, self.context, volume, self.image_meta) mock_cp_img.side_effect = Exception self.assertRaises(exception.ImageCopyFailure, self.volume.reimage, self.context, volume, self.image_meta) mock_cp_img.side_effect = exception.ImageCopyFailure(reason='') self.assertRaises(exception.ImageCopyFailure, self.volume.reimage, self.context, volume, self.image_meta) @mock.patch('cinder.volume.volume_utils.check_image_metadata') @mock.patch('cinder.volume.rpcapi.VolumeAPI.reimage') @ddt.data('available', 'error') def test_volume_reimage_api(self, status, mock_reimage, mock_check): volume = tests_utils.create_volume(self.context) volume.status = status volume.save() self.assertEqual(volume.status, status) # The available or error volume can be reimaged directly self.volume_api.reimage(self.context, volume, self.image_meta['id']) mock_check.assert_called_once_with(self.image_meta, volume.size) mock_reimage.assert_called_once_with(self.context, volume, self.image_meta, image_snap=None) @mock.patch('cinder.volume.volume_utils.check_image_metadata') @mock.patch('cinder.volume.rpcapi.VolumeAPI.reimage') @ddt.data('available', 'error') def test_volume_reimage_check_meta_exception(self, status, mock_reimage, mock_check): volume = tests_utils.create_volume(self.context) volume.status = status volume.save() self.assertEqual(volume.status, status) mock_check.side_effect = exception.ImageUnacceptable( image_id=self.image_meta['id'], reason='') # The available or error volume can be reimaged directly self.assertRaises(exception.ImageUnacceptable, self.volume_api.reimage, self.context, volume, self.image_meta['id']) self.assertEqual(status, volume.status) @mock.patch('cinder.volume.volume_utils.check_image_metadata') @mock.patch('cinder.volume.rpcapi.VolumeAPI.reimage') def test_volume_reimage_api_with_reimage_reserved(self, mock_reimage, mock_check): volume = tests_utils.create_volume(self.context) # The reserved volume can not be reimaged directly, and only can # be reimaged with reimage_reserved flag volume.status = 'reserved' volume.save() self.assertEqual(volume.status, 'reserved') self.volume_api.reimage(self.context, volume, self.image_meta['id'], reimage_reserved=True) mock_check.assert_called_once_with(self.image_meta, volume.size) mock_reimage.assert_called_once_with(self.context, volume, self.image_meta, image_snap=None) def test_volume_reimage_api_with_invaild_status(self): volume = tests_utils.create_volume(self.context) # The reserved volume can not be reimaged directly, and only can # be reimaged with reimage_reserved flag volume.status = 'reserved' volume.save() self.assertEqual(volume.status, 'reserved') ex = self.assertRaises(exception.InvalidVolume, self.volume_api.reimage, self.context, volume, self.image_meta['id'], reimage_reserved=False) self.assertIn("status must be available or error", str(ex)) # The other status volume can not be reimage volume.status = 'in-use' volume.save() self.assertEqual(volume.status, 'in-use') ex = self.assertRaises(exception.InvalidVolume, self.volume_api.reimage, self.context, volume, self.image_meta['id'], reimage_reserved=True) self.assertIn("status must be " "available or error or reserved", str(ex)) @mock.patch('cinder.volume.volume_utils.check_image_metadata') @mock.patch('cinder.volume.rpcapi.VolumeAPI.reimage') def test_volume_reimage_api_image_snapshot( self, mock_reimage, mock_check): volume = tests_utils.create_volume(self.context) self.volume_api.reimage( self.context, volume, self.image_meta['id'], image_snap='fake_snap') mock_check.assert_called_once_with(self.image_meta, volume['size']) mock_reimage.assert_called_once_with(self.context, volume, self.image_meta, image_snap='fake_snap') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_volume_retype.py0000664000175000017500000002445000000000000024300 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Volume retype Code.""" from unittest import mock from oslo_config import cfg from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.policies import volume_actions as vol_action_policies from cinder import quota from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base from cinder.volume import volume_types QUOTAS = quota.QUOTAS CONF = cfg.CONF class VolumeRetypeTestCase(base.BaseVolumeTestCase): """Verify multiattach retype restrictions.""" def setUp(self): super(VolumeRetypeTestCase, self).setUp() self.patch('cinder.volume.volume_utils.clear_volume', autospec=True) self.expected_status = 'available' self.service_id = 1 self.user_context = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID) volume_types.create(self.context, "fake_vol_type", {}, description="fake_type") volume_types.create(self.context, "fake_vol_type2", {}, description="fake_type2") volume_types.create(self.context, "multiattach-type", {'multiattach': " True"}, description="test-multiattach") volume_types.create(self.context, "multiattach-type2", {'multiattach': " True"}, description="test-multiattach") self.default_vol_type = objects.VolumeType.get_by_name_or_id( self.context, 'fake_vol_type') self.fake_vol_type2 = objects.VolumeType.get_by_name_or_id( self.context, 'fake_vol_type2') self.multiattach_type = objects.VolumeType.get_by_name_or_id( self.context, 'multiattach-type') self.multiattach_type2 = objects.VolumeType.get_by_name_or_id( self.context, 'multiattach-type2') def fake_get_vtype(self, context, identifier): if identifier == "multiattach-type": return self.multiattach_type elif identifier == 'multiattach-type2': return self.multiattach_type2 elif identifier == 'fake_vol_type2': return self.fake_vol_type2 else: return self.default_vol_type @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.retype') @mock.patch('cinder.context.RequestContext.authorize') @mock.patch.object(volume_types, 'get_by_name_or_id') def test_retype_has_az(self, _mock_get_types, mock_authorize, mock_rpc): """Verify retype has az in request spec.""" _mock_get_types.side_effect = self.fake_get_vtype vol = tests_utils.create_volume( self.context, volume_type_id=self.default_vol_type.id, status='available', availability_zone='nova') self.volume_api.retype(self.user_context, vol, 'fake_vol_type2') mock_authorize.assert_called_once_with( vol_action_policies.RETYPE_POLICY, target_obj=mock.ANY) fake_spec = { 'volume_properties': mock.ANY, 'volume_id': mock.ANY, 'volume_type': mock.ANY, 'migration_policy': mock.ANY, 'quota_reservations': mock.ANY, 'old_reservations': mock.ANY, 'availability_zones': ['nova'], } mock_rpc.assert_called_once_with( self.user_context, mock.ANY, request_spec=fake_spec, filter_properties=mock.ANY ) @mock.patch('cinder.context.RequestContext.authorize') def test_non_multi_to_multi_retype(self, mock_authorize): """Test going from non-multiattach type to multiattach""" vol = tests_utils.create_volume(self.context, volume_type_id= self.default_vol_type.id) self.assertFalse(vol.multiattach) self.volume_api.retype(self.user_context, vol, 'multiattach-type') vol.refresh() self.assertTrue(vol.multiattach) mock_authorize.assert_has_calls( [mock.call(vol_action_policies.RETYPE_POLICY, target_obj=mock.ANY) ]) @mock.patch('cinder.context.RequestContext.authorize') def test_multi_to_non_multi_retype(self, mock_authorize): """Test going from multiattach to a non-multiattach type""" vol = tests_utils.create_volume(self.context, multiattach=True, volume_type_id= self.multiattach_type.id) self.assertTrue(vol.multiattach) self.volume_api.retype(self.user_context, vol, 'fake_vol_type') vol.refresh() self.assertFalse(vol.multiattach) mock_authorize.assert_has_calls( [mock.call(vol_action_policies.RETYPE_POLICY, target_obj=mock.ANY) ]) @mock.patch('cinder.context.RequestContext.authorize') def test_in_use_volume_retype(self, mock_authorize): """Test trying to retype an in-use volume""" vol = tests_utils.create_volume(self.context, volume_type_id= self.multiattach_type.id) vol.update({'status': 'in-use'}) vol.save() self.assertRaises(exception.InvalidInput, self.volume_api.retype, self.context, vol, 'multiattach-type') mock_authorize.assert_has_calls( [mock.call(vol_action_policies.RETYPE_POLICY, target_obj=mock.ANY), ]) @mock.patch('cinder.context.RequestContext.authorize') def test_multiattach_to_multiattach_retype(self, mock_authorize): """Test going from multiattach to multiattach""" vol = tests_utils.create_volume(self.context, multiattach=True, volume_type_id= self.multiattach_type.id) self.assertTrue(vol.multiattach) self.volume_api.retype(self.user_context, vol, 'multiattach-type2') vol.refresh() self.assertTrue(vol.multiattach) mock_authorize.assert_has_calls( [mock.call(vol_action_policies.RETYPE_POLICY, target_obj=mock.ANY) ]) def test_retype_driver_not_initialized(self): volume = tests_utils.create_volume( self.context, host=CONF.host, status='available', volume_type_id=self.default_vol_type.id) host_obj = {'host': CONF.host, 'capabilities': {}} self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.retype, self.context, volume, self.multiattach_type.id, host_obj, migration_policy='on-demand') volume.refresh() self.assertEqual('available', volume.status) def test_retype_with_volume_type_resize_limits(self): def _create_min_max_size_dict(min_size, max_size): return {volume_types.MIN_SIZE_KEY: min_size, volume_types.MAX_SIZE_KEY: max_size} def _setup_volume_types(): spec_dict = _create_min_max_size_dict(2, 4) sized_vol_type_dict = {'name': 'limit_type', 'extra_specs': spec_dict} db.volume_type_create(self.context, sized_vol_type_dict) self.sized_vol_type = db.volume_type_get_by_name( self.context, sized_vol_type_dict['name']) unsized_vol_type_dict = {'name': 'unsized_type', 'extra_specs': {}} db.volume_type_create(context.get_admin_context(), unsized_vol_type_dict) self.unsized_vol_type = db.volume_type_get_by_name( self.context, unsized_vol_type_dict['name']) _setup_volume_types() volume_1 = tests_utils.create_volume( self.context, host=CONF.host, status='available', volume_type_id=self.default_vol_type.id, size=1) volume_3 = tests_utils.create_volume( self.context, host=CONF.host, status='available', volume_type_id=self.default_vol_type.id, size=3) volume_9 = tests_utils.create_volume( self.context, host=CONF.host, status='available', volume_type_id=self.default_vol_type.id, size=9) self.assertRaises(exception.InvalidInput, self.volume_api.retype, self.context, volume_1, 'limit_type', migration_policy='on-demand') self.assertRaises(exception.InvalidInput, self.volume_api.retype, self.context, volume_9, 'limit_type', migration_policy='on-demand') self.volume_api.retype(self.context, volume_3, 'limit_type', migration_policy='on-demand') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/volume/test_volume_usage_audit.py0000664000175000017500000002376700000000000025274 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Volume usage audit feature.""" import datetime from cinder import context from cinder import db from cinder import objects from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import volume as base class GetActiveByWindowTestCase(base.BaseVolumeTestCase): def setUp(self): super(GetActiveByWindowTestCase, self).setUp() self.ctx = context.get_admin_context(read_deleted="yes") self.db_vol_attrs = [ { 'id': fake.VOLUME_ID, 'host': 'devstack', 'project_id': fake.PROJECT_ID, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1), 'volume_type_id': fake.VOLUME_TYPE_ID, }, { 'id': fake.VOLUME2_ID, 'host': 'devstack', 'project_id': fake.PROJECT_ID, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1), 'volume_type_id': fake.VOLUME_TYPE_ID, }, { 'id': fake.VOLUME3_ID, 'host': 'devstack', 'project_id': fake.PROJECT_ID, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1), 'volume_type_id': fake.VOLUME_TYPE_ID, }, { 'id': fake.VOLUME4_ID, 'host': 'devstack', 'project_id': fake.PROJECT_ID, 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), 'volume_type_id': fake.VOLUME_TYPE_ID, }, { 'id': fake.VOLUME5_ID, 'host': 'devstack', 'project_id': fake.PROJECT_ID, 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), 'volume_type_id': fake.VOLUME_TYPE_ID, } ] self.db_snap_attrs = [ { 'id': fake.SNAPSHOT_ID, 'project_id': 'p1', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': fields.SnapshotStatus.DELETED, 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1), 'volume_id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID, }, { 'id': fake.SNAPSHOT2_ID, 'project_id': 'p1', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': fields.SnapshotStatus.DELETED, 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1), 'volume_id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID, }, { 'id': fake.SNAPSHOT3_ID, 'project_id': 'p1', 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': True, 'status': fields.SnapshotStatus.DELETED, 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1), 'volume_id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID, }, { 'id': fake.SNAPSHOT_ID, 'project_id': 'p1', 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), 'volume_id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID, }, { 'id': fake.SNAPSHOT2_ID, 'project_id': 'p1', 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), 'volume_id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID } ] self.db_back_attrs = [ { 'id': fake.BACKUP_ID, 'host': 'devstack', 'project_id': fake.PROJECT_ID, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': 1, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 2, 1, 1, 1, 1) }, { 'id': fake.BACKUP2_ID, 'host': 'devstack', 'project_id': fake.PROJECT_ID, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': 1, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 3, 10, 1, 1, 1) }, { 'id': fake.BACKUP3_ID, 'host': 'devstack', 'project_id': fake.PROJECT_ID, 'created_at': datetime.datetime(1, 1, 1, 1, 1, 1), 'deleted': 1, 'status': 'deleted', 'deleted_at': datetime.datetime(1, 5, 1, 1, 1, 1) }, { 'id': fake.BACKUP4_ID, 'host': 'devstack', 'project_id': fake.PROJECT_ID, 'created_at': datetime.datetime(1, 3, 10, 1, 1, 1), }, { 'id': fake.BACKUP5_ID, 'host': 'devstack', 'project_id': fake.PROJECT_ID, 'created_at': datetime.datetime(1, 5, 1, 1, 1, 1), }, ] def test_volume_get_all_active_by_window(self): # Find all all volumes valid within a timeframe window. # Not in window db.volume_create(self.ctx, self.db_vol_attrs[0]) # In - deleted in window db.volume_create(self.ctx, self.db_vol_attrs[1]) # In - deleted after window db.volume_create(self.ctx, self.db_vol_attrs[2]) # In - created in window db.volume_create(self.context, self.db_vol_attrs[3]) # Not of window. db.volume_create(self.context, self.db_vol_attrs[4]) volumes = db.volume_get_all_active_by_window( self.context, datetime.datetime(1, 3, 1, 1, 1, 1), datetime.datetime(1, 4, 1, 1, 1, 1), project_id=fake.PROJECT_ID) self.assertEqual(3, len(volumes)) self.assertEqual({fake.VOLUME2_ID, fake.VOLUME3_ID, fake.VOLUME4_ID}, {v.id for v in volumes}) def test_snapshot_get_all_active_by_window(self): # Find all all snapshots valid within a timeframe window. db.volume_create(self.context, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) for i in range(5): self.db_vol_attrs[i]['volume_id'] = fake.VOLUME_ID # Not in window del self.db_snap_attrs[0]['id'] snap1 = objects.Snapshot(self.ctx, **self.db_snap_attrs[0]) snap1.create() # In - deleted in window del self.db_snap_attrs[1]['id'] snap2 = objects.Snapshot(self.ctx, **self.db_snap_attrs[1]) snap2.create() # In - deleted after window del self.db_snap_attrs[2]['id'] snap3 = objects.Snapshot(self.ctx, **self.db_snap_attrs[2]) snap3.create() # In - created in window del self.db_snap_attrs[3]['id'] snap4 = objects.Snapshot(self.ctx, **self.db_snap_attrs[3]) snap4.create() # Not of window. del self.db_snap_attrs[4]['id'] snap5 = objects.Snapshot(self.ctx, **self.db_snap_attrs[4]) snap5.create() snapshots = objects.SnapshotList.get_all_active_by_window( self.context, datetime.datetime(1, 3, 1, 1, 1, 1), datetime.datetime(1, 4, 1, 1, 1, 1)).objects self.assertEqual(3, len(snapshots)) self.assertEqual({snap2.id, snap3.id, snap4.id}, {s.id for s in snapshots}) self.assertEqual({fake.VOLUME_ID}, {s.volume_id for s in snapshots}) def test_backup_get_all_active_by_window(self): # Find all backups valid within a timeframe window. db.volume_create(self.context, {'id': fake.VOLUME_ID, 'volume_type_id': fake.VOLUME_TYPE_ID}) for i in range(5): self.db_back_attrs[i]['volume_id'] = fake.VOLUME_ID # Not in window db.backup_create(self.ctx, self.db_back_attrs[0]) # In - deleted in window db.backup_create(self.ctx, self.db_back_attrs[1]) # In - deleted after window db.backup_create(self.ctx, self.db_back_attrs[2]) # In - created in window db.backup_create(self.ctx, self.db_back_attrs[3]) # Not of window db.backup_create(self.ctx, self.db_back_attrs[4]) backups = db.backup_get_all_active_by_window( self.context, datetime.datetime(1, 3, 1, 1, 1, 1), datetime.datetime(1, 4, 1, 1, 1, 1), project_id=fake.PROJECT_ID ) self.assertEqual(3, len(backups)) self.assertEqual({fake.BACKUP2_ID, fake.BACKUP3_ID, fake.BACKUP4_ID}, {b.id for b in backups}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3111205 cinder-27.0.0/cinder/tests/unit/windows/0000775000175000017500000000000000000000000020146 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/windows/__init__.py0000664000175000017500000000000000000000000022245 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/windows/db_fakes.py0000664000175000017500000000273300000000000022263 0ustar00zuulzuul00000000000000# Copyright 2012 Pedro Navarro Perez # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Stubouts, mocks and fixtures for windows volume test suite """ def get_fake_volume_info(): return {'name': 'volume_name', 'size': 1, 'provider_location': 'iqn.2010-10.org.openstack:' + 'volume_name', 'id': 1, 'provider_auth': None} def get_fake_volume_info_cloned(): return {'name': 'volume_name_cloned', 'size': 1, 'provider_location': 'iqn.2010-10.org.openstack:' + 'volume_name_cloned', 'id': 1, 'provider_auth': None} def get_fake_image_meta(): return {'id': '10958016-e196-42e3-9e7f-5d8927ae3099' } def get_fake_snapshot_info(): return {'name': 'snapshot_name', 'volume_name': 'volume_name', } def get_fake_connector_info(): return {'initiator': 'iqn.2010-10.org.openstack:' + 'volume_name', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/windows/test_iscsi.py0000664000175000017500000005215700000000000022703 0ustar00zuulzuul00000000000000# Copyright 2012 Pedro Navarro Perez # Copyright 2015 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unit tests for Windows Server 2012 OpenStack Cinder volume driver """ import os from unittest import mock import ddt from oslo_utils import fileutils from oslo_utils import timeutils from oslo_utils import units from cinder import context from cinder import exception from cinder.image import image_utils from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.tests.unit.windows import db_fakes from cinder.volume import configuration as conf from cinder.volume.drivers.windows import iscsi as windows_iscsi @ddt.ddt class TestWindowsISCSIDriver(test.TestCase): @mock.patch.object(windows_iscsi, 'utilsfactory') def setUp(self, mock_utilsfactory): super(TestWindowsISCSIDriver, self).setUp() self.configuration = conf.Configuration(None) self.configuration.append_config_values(windows_iscsi.windows_opts) self.flags(windows_iscsi_lun_path='fake_iscsi_lun_path') self.flags(image_conversion_dir='fake_image_conversion_dir') self._driver = windows_iscsi.WindowsISCSIDriver( configuration=self.configuration) self._context = context.get_admin_context() self.updated_at = timeutils.utcnow() @mock.patch.object(fileutils, 'ensure_tree') def test_do_setup(self, mock_ensure_tree): self._driver.do_setup(mock.sentinel.context) mock_ensure_tree.assert_has_calls( [mock.call('fake_iscsi_lun_path'), mock.call('fake_image_conversion_dir')]) @mock.patch.object(windows_iscsi.WindowsISCSIDriver, '_get_portals') def test_check_for_setup_error(self, mock_get_portals): self._driver.check_for_setup_error() mock_get_portals.assert_called_once_with() @ddt.data(True, False) def test_get_portals(self, portals_available=True): iscsi_port = mock.sentinel.iscsi_port available_ips = ['fake_ip0', 'fake_ip1', 'fake_unrequested_ip'] requested_ips = available_ips[:-1] + ['fake_inexistent_ips'] available_portals = ([":".join([ip_addr, str(iscsi_port)]) for ip_addr in available_ips] if portals_available else []) self._driver.configuration = mock.Mock() self._driver.configuration.target_port = iscsi_port self._driver.configuration.target_ip_address = requested_ips[0] self._driver.configuration.target_secondary_ip_addresses = ( requested_ips[1:]) self._driver._tgt_utils.get_portal_locations.return_value = ( available_portals) if portals_available: portals = self._driver._get_portals() self.assertEqual(set(available_portals[:-1]), set(portals)) else: self.assertRaises(exception.VolumeDriverException, self._driver._get_portals) self._driver._tgt_utils.get_portal_locations.assert_called_once_with( available_only=True, fail_if_none_found=True) @ddt.data(True, False) @mock.patch.object(windows_iscsi.WindowsISCSIDriver, '_get_portals') @mock.patch.object(windows_iscsi.WindowsISCSIDriver, '_get_target_name') def test_get_host_information(self, multipath, mock_get_target_name, mock_get_portals): tgt_utils = self._driver._tgt_utils fake_auth_meth = 'CHAP' fake_chap_username = 'fake_chap_username' fake_chap_password = 'fake_chap_password' fake_target_iqn = 'fake_target_iqn' fake_host_info = {'target_iqn': 'fake_target_iqn', 'fake_prop': 'fake_value'} fake_provider_auth = "%s %s %s" % (fake_auth_meth, fake_chap_username, fake_chap_password) fake_portals = [mock.sentinel.portal_location0, mock.sentinel.portal_location1] volume = fake_volume.fake_volume_obj(mock.sentinel.context, provider_auth=fake_provider_auth) mock_get_target_name.return_value = mock.sentinel.target_name mock_get_portals.return_value = fake_portals tgt_utils.get_target_information.return_value = fake_host_info expected_host_info = dict(fake_host_info, auth_method=fake_auth_meth, auth_username=fake_chap_username, auth_password=fake_chap_password, target_discovered=False, target_portal=fake_portals[0], target_lun=0, volume_id=volume.id) if multipath: expected_host_info['target_portals'] = fake_portals expected_host_info['target_iqns'] = [fake_target_iqn] * 2 expected_host_info['target_luns'] = [0] * 2 host_info = self._driver._get_host_information(volume, multipath) self.assertEqual(expected_host_info, host_info) mock_get_target_name.assert_called_once_with(volume) mock_get_portals.assert_called_once_with() tgt_utils.get_target_information.assert_called_once_with( mock.sentinel.target_name) @mock.patch.object(windows_iscsi.WindowsISCSIDriver, '_get_host_information') def test_initialize_connection(self, mock_get_host_info): tgt_utils = self._driver._tgt_utils volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) fake_initiator = db_fakes.get_fake_connector_info() fake_initiator['multipath'] = mock.sentinel.multipath fake_host_info = {'fake_host_prop': 'fake_value'} mock_get_host_info.return_value = fake_host_info expected_conn_info = {'driver_volume_type': 'iscsi', 'data': fake_host_info} conn_info = self._driver.initialize_connection(volume, fake_initiator) self.assertEqual(expected_conn_info, conn_info) mock_get_host_info.assert_called_once_with( volume, mock.sentinel.multipath) mock_associate = tgt_utils.associate_initiator_with_iscsi_target mock_associate.assert_called_once_with( fake_initiator['initiator'], volume.provider_location) def test_terminate_connection(self): volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) fake_initiator = db_fakes.get_fake_connector_info() self._driver.terminate_connection(volume, fake_initiator) self._driver._tgt_utils.deassociate_initiator.assert_called_once_with( fake_initiator['initiator'], volume.provider_location) @mock.patch.object(windows_iscsi.WindowsISCSIDriver, 'local_path') def test_create_volume(self, mock_local_path): volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) self._driver.create_volume(volume) mock_local_path.assert_called_once_with(volume) self._driver._tgt_utils.create_wt_disk.assert_called_once_with( mock_local_path.return_value, volume.name, size_mb=volume.size * 1024) def test_local_path(self): volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) fake_lun_path = 'fake_lun_path' self.flags(windows_iscsi_lun_path=fake_lun_path) disk_format = 'vhd' mock_get_fmt = self._driver._tgt_utils.get_supported_disk_format mock_get_fmt.return_value = disk_format disk_path = self._driver.local_path(volume) expected_fname = "%s.%s" % (volume.name, disk_format) expected_disk_path = os.path.join(fake_lun_path, expected_fname) self.assertEqual(expected_disk_path, disk_path) mock_get_fmt.assert_called_once_with() @mock.patch.object(windows_iscsi.WindowsISCSIDriver, 'local_path') @mock.patch.object(fileutils, 'delete_if_exists') def test_delete_volume(self, mock_delete_if_exists, mock_local_path): volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) self._driver.delete_volume(volume) mock_local_path.assert_called_once_with(volume) self._driver._tgt_utils.remove_wt_disk.assert_called_once_with( volume.name) mock_delete_if_exists.assert_called_once_with( mock_local_path.return_value) def test_create_snapshot(self): volume = fake_volume.fake_volume_obj(context.get_admin_context()) snapshot = fake_snapshot.fake_snapshot_obj(context.get_admin_context(), volume_id=volume.id) snapshot.volume = volume self._driver.create_snapshot(snapshot) self._driver._tgt_utils.create_snapshot.assert_called_once_with( snapshot.volume_name, snapshot.name) @mock.patch.object(windows_iscsi.WindowsISCSIDriver, 'local_path') def test_create_volume_from_snapshot(self, mock_local_path): volume = fake_volume.fake_volume_obj(context.get_admin_context()) snapshot = fake_snapshot.fake_snapshot_obj(context.get_admin_context()) snapshot.volume = volume self._driver.create_volume_from_snapshot(volume, snapshot) self._driver._tgt_utils.export_snapshot.assert_called_once_with( snapshot.name, mock_local_path.return_value) self._driver._tgt_utils.import_wt_disk.assert_called_once_with( mock_local_path.return_value, volume.name) def test_delete_snapshot(self): snapshot = fake_snapshot.fake_snapshot_obj(context.get_admin_context()) self._driver.delete_snapshot(snapshot) self._driver._tgt_utils.delete_snapshot.assert_called_once_with( snapshot.name) def test_get_target_name(self): volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) expected_target_name = "%s%s" % ( self._driver.configuration.target_prefix, volume.name) target_name = self._driver._get_target_name(volume) self.assertEqual(expected_target_name, target_name) @mock.patch.object(windows_iscsi.WindowsISCSIDriver, '_get_target_name') @mock.patch.object(windows_iscsi.volume_utils, 'generate_username') @mock.patch.object(windows_iscsi.volume_utils, 'generate_password') def test_create_export(self, mock_generate_password, mock_generate_username, mock_get_target_name): tgt_utils = self._driver._tgt_utils volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) self._driver.configuration.chap_username = None self._driver.configuration.chap_password = None self._driver.configuration.use_chap_auth = True fake_chap_username = 'fake_chap_username' fake_chap_password = 'fake_chap_password' mock_get_target_name.return_value = mock.sentinel.target_name mock_generate_username.return_value = fake_chap_username mock_generate_password.return_value = fake_chap_password tgt_utils.iscsi_target_exists.return_value = False vol_updates = self._driver.create_export(mock.sentinel.context, volume, mock.sentinel.connector) mock_get_target_name.assert_called_once_with(volume) tgt_utils.iscsi_target_exists.assert_called_once_with( mock.sentinel.target_name) tgt_utils.set_chap_credentials.assert_called_once_with( mock.sentinel.target_name, fake_chap_username, fake_chap_password) tgt_utils.add_disk_to_target.assert_called_once_with( volume.name, mock.sentinel.target_name) expected_provider_auth = ' '.join(('CHAP', fake_chap_username, fake_chap_password)) expected_vol_updates = dict( provider_location=mock.sentinel.target_name, provider_auth=expected_provider_auth) self.assertEqual(expected_vol_updates, vol_updates) @mock.patch.object(windows_iscsi.WindowsISCSIDriver, '_get_target_name') def test_remove_export(self, mock_get_target_name): volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) self._driver.remove_export(mock.sentinel.context, volume) mock_get_target_name.assert_called_once_with(volume) self._driver._tgt_utils.delete_iscsi_target.assert_called_once_with( mock_get_target_name.return_value) @mock.patch.object(windows_iscsi.WindowsISCSIDriver, 'local_path') @mock.patch.object(image_utils, 'temporary_file') @mock.patch.object(image_utils, 'fetch_to_vhd') @mock.patch('os.unlink') def test_copy_image_to_volume(self, mock_unlink, mock_fetch_to_vhd, mock_tmp_file, mock_local_path): tgt_utils = self._driver._tgt_utils volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) mock_tmp_file.return_value.__enter__.return_value = ( mock.sentinel.tmp_vhd_path) mock_local_path.return_value = mock.sentinel.vol_vhd_path self._driver.copy_image_to_volume(mock.sentinel.context, volume, mock.sentinel.image_service, mock.sentinel.image_id) mock_local_path.assert_called_once_with(volume) mock_tmp_file.assert_called_once_with(suffix='.vhd') image_utils.fetch_to_vhd.assert_called_once_with( mock.sentinel.context, mock.sentinel.image_service, mock.sentinel.image_id, mock.sentinel.tmp_vhd_path, self._driver.configuration.volume_dd_blocksize, disable_sparse=False) mock_unlink.assert_called_once_with(mock.sentinel.vol_vhd_path) self._driver._vhdutils.convert_vhd.assert_called_once_with( mock.sentinel.tmp_vhd_path, mock.sentinel.vol_vhd_path, tgt_utils.get_supported_vhd_type.return_value) self._driver._vhdutils.resize_vhd.assert_called_once_with( mock.sentinel.vol_vhd_path, volume.size * units.Gi, is_file_max_size=False) tgt_utils.change_wt_disk_status.assert_has_calls( [mock.call(volume.name, enabled=False), mock.call(volume.name, enabled=True)]) @mock.patch.object(windows_iscsi.uuidutils, 'generate_uuid') def test_temporary_snapshot(self, mock_generate_uuid): tgt_utils = self._driver._tgt_utils mock_generate_uuid.return_value = mock.sentinel.snap_uuid expected_snap_name = '%s-tmp-snapshot-%s' % ( mock.sentinel.volume_name, mock.sentinel.snap_uuid) with self._driver._temporary_snapshot( mock.sentinel.volume_name) as snap_name: self.assertEqual(expected_snap_name, snap_name) tgt_utils.create_snapshot.assert_called_once_with( mock.sentinel.volume_name, expected_snap_name) tgt_utils.delete_snapshot.assert_called_once_with( expected_snap_name) @mock.patch.object(windows_iscsi.WindowsISCSIDriver, '_temporary_snapshot') @mock.patch.object(image_utils, 'upload_volume') @mock.patch.object(fileutils, 'delete_if_exists') def test_copy_volume_to_image(self, mock_delete_if_exists, mock_upload_volume, mock_tmp_snap): tgt_utils = self._driver._tgt_utils disk_format = 'vhd' fake_image_meta = db_fakes.get_fake_image_meta() fake_volume = test_utils.create_volume( self._context, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) extra_specs = { 'image_service:store_id': 'fake-store' } test_utils.create_volume_type(self._context.elevated(), id=fake.VOLUME_TYPE_ID, name="test_type", extra_specs=extra_specs) fake_img_conv_dir = 'fake_img_conv_dir' self.flags(image_conversion_dir=fake_img_conv_dir) tgt_utils.get_supported_disk_format.return_value = disk_format mock_tmp_snap.return_value.__enter__.return_value = ( mock.sentinel.tmp_snap_name) expected_tmp_vhd_path = os.path.join( fake_img_conv_dir, fake_image_meta['id'] + '.' + disk_format) self._driver.copy_volume_to_image( mock.sentinel.context, fake_volume, mock.sentinel.image_service, fake_image_meta) mock_tmp_snap.assert_called_once_with(fake_volume.name) tgt_utils.export_snapshot.assert_called_once_with( mock.sentinel.tmp_snap_name, expected_tmp_vhd_path) mock_upload_volume.assert_called_once_with( mock.sentinel.context, mock.sentinel.image_service, fake_image_meta, expected_tmp_vhd_path, volume_fd=None, volume_format='vhd', store_id='fake-store', base_image_ref=None, compress=True, run_as_root=True) mock_delete_if_exists.assert_called_once_with( expected_tmp_vhd_path) @mock.patch.object(windows_iscsi.WindowsISCSIDriver, '_temporary_snapshot') @mock.patch.object(windows_iscsi.WindowsISCSIDriver, 'local_path') def test_create_cloned_volume(self, mock_local_path, mock_tmp_snap): tgt_utils = self._driver._tgt_utils volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) src_volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) mock_tmp_snap.return_value.__enter__.return_value = ( mock.sentinel.tmp_snap_name) mock_local_path.return_value = mock.sentinel.vol_vhd_path self._driver.create_cloned_volume(volume, src_volume) mock_tmp_snap.assert_called_once_with(src_volume.name) tgt_utils.export_snapshot.assert_called_once_with( mock.sentinel.tmp_snap_name, mock.sentinel.vol_vhd_path) self._driver._vhdutils.resize_vhd.assert_called_once_with( mock.sentinel.vol_vhd_path, volume.size * units.Gi, is_file_max_size=False) tgt_utils.import_wt_disk.assert_called_once_with( mock.sentinel.vol_vhd_path, volume.name) @mock.patch('os.path.splitdrive') def test_get_capacity_info(self, mock_splitdrive): mock_splitdrive.return_value = (mock.sentinel.drive, mock.sentinel.path_tail) fake_size_gb = 2 fake_free_space_gb = 1 self._driver._hostutils.get_volume_info.return_value = ( fake_size_gb * units.Gi, fake_free_space_gb * units.Gi) total_gb, free_gb = self._driver._get_capacity_info() self.assertEqual(fake_size_gb, total_gb) self.assertEqual(fake_free_space_gb, free_gb) self._driver._hostutils.get_volume_info.assert_called_once_with( mock.sentinel.drive) mock_splitdrive.assert_called_once_with('fake_iscsi_lun_path') @mock.patch.object(windows_iscsi.WindowsISCSIDriver, '_get_capacity_info') def test_update_volume_stats(self, mock_get_capacity_info): mock_get_capacity_info.return_value = ( mock.sentinel.size_gb, mock.sentinel.free_space_gb) self.flags(volume_backend_name='volume_backend_name') self.flags(reserved_percentage=10) expected_volume_stats = dict( volume_backend_name='volume_backend_name', vendor_name='Microsoft', driver_version=self._driver.VERSION, storage_protocol='iSCSI', total_capacity_gb=mock.sentinel.size_gb, free_capacity_gb=mock.sentinel.free_space_gb, reserved_percentage=10, QoS_support=False) self._driver._update_volume_stats() self.assertEqual(expected_volume_stats, self._driver._stats) def test_extend_volume(self): volume = fake_volume.fake_volume_obj(mock.sentinel.fake_context) new_size_gb = 2 expected_additional_sz_mb = 1024 self._driver.extend_volume(volume, new_size_gb) self._driver._tgt_utils.extend_wt_disk.assert_called_once_with( volume.name, expected_additional_sz_mb) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/windows/test_smbfs.py0000664000175000017500000011663100000000000022701 0ustar00zuulzuul00000000000000# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os from unittest import mock import ddt from oslo_utils import timeutils from oslo_utils import units from cinder import context from cinder import exception from cinder.image import image_utils from cinder.objects import fields from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume.drivers import remotefs from cinder.volume.drivers.windows import smbfs @ddt.ddt class WindowsSmbFsTestCase(test.TestCase): _FAKE_SHARE = '//1.2.3.4/share1' _FAKE_SHARE_HASH = 'db0bf952c1734092b83e8990bd321131' _FAKE_MNT_BASE = r'c:\openstack\mnt' _FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, _FAKE_SHARE_HASH) _FAKE_VOLUME_ID = '4f711859-4928-4cb7-801a-a50c37ceaccc' _FAKE_VOLUME_NAME = 'volume-%s.vhdx' % _FAKE_VOLUME_ID _FAKE_SNAPSHOT_ID = '50811859-4928-4cb7-801a-a50c37ceacba' _FAKE_SNAPSHOT_NAME = 'volume-%s-%s.vhdx' % (_FAKE_VOLUME_ID, _FAKE_SNAPSHOT_ID) _FAKE_SNAPSHOT_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_SNAPSHOT_NAME) _FAKE_VOLUME_SIZE = 1 _FAKE_TOTAL_SIZE = 2048 _FAKE_TOTAL_AVAILABLE = 1024 _FAKE_TOTAL_ALLOCATED = 1024 _FAKE_SHARE_OPTS = '-o username=Administrator,password=12345' _FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT, _FAKE_VOLUME_NAME) _FAKE_SHARE_OPTS = '-o username=Administrator,password=12345' @mock.patch.object(smbfs, 'utilsfactory') @mock.patch.object(smbfs, 'remotefs_brick') def setUp(self, mock_remotefs, mock_utilsfactory): super(WindowsSmbFsTestCase, self).setUp() self.context = context.get_admin_context() self._FAKE_SMBFS_CONFIG = mock.MagicMock( smbfs_shares_config=mock.sentinel.share_config_file, smbfs_default_volume_format='vhdx', nas_volume_prov_type='thin') self._smbfs_driver = smbfs.WindowsSmbfsDriver( configuration=self._FAKE_SMBFS_CONFIG) self._smbfs_driver._delete = mock.Mock() self._smbfs_driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) self._smbfs_driver.base = self._FAKE_MNT_BASE self._diskutils = self._smbfs_driver._diskutils self._vhdutils = self._smbfs_driver._vhdutils self.volume = self._simple_volume() self.snapshot = self._simple_snapshot(volume=self.volume) self._context = context.get_admin_context() self.updated_at = timeutils.utcnow() def _simple_volume(self, **kwargs): updates = {'id': self._FAKE_VOLUME_ID, 'size': self._FAKE_VOLUME_SIZE, 'provider_location': self._FAKE_SHARE} updates.update(kwargs) ctxt = context.get_admin_context() volume = test_utils.create_volume(ctxt, **updates) return volume def _simple_snapshot(self, **kwargs): volume = kwargs.pop('volume', None) or self._simple_volume() ctxt = context.get_admin_context() updates = {'id': self._FAKE_SNAPSHOT_ID, 'volume_id': volume.id} updates.update(kwargs) snapshot = test_utils.create_snapshot(ctxt, **updates) return snapshot @mock.patch.object(smbfs.WindowsSmbfsDriver, '_check_os_platform') @mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, 'do_setup') @mock.patch('os.path.exists') @mock.patch('os.path.isabs') @mock.patch.object(image_utils, 'check_qemu_img_version') def _test_setup(self, mock_check_qemu_img_version, mock_is_abs, mock_exists, mock_remotefs_do_setup, mock_check_os_platform, config, share_config_exists=True): mock_exists.return_value = share_config_exists fake_ensure_mounted = mock.MagicMock() self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted self._smbfs_driver._setup_pool_mappings = mock.Mock() self._smbfs_driver.configuration = config if not (config.smbfs_shares_config and share_config_exists): self.assertRaises(smbfs.SmbfsException, self._smbfs_driver.do_setup, mock.sentinel.context) else: self._smbfs_driver.do_setup(mock.sentinel.context) mock_check_qemu_img_version.assert_called_once_with( self._smbfs_driver._MINIMUM_QEMU_IMG_VERSION) mock_is_abs.assert_called_once_with(self._smbfs_driver.base) self.assertEqual({}, self._smbfs_driver.shares) fake_ensure_mounted.assert_called_once_with() self._smbfs_driver._setup_pool_mappings.assert_called_once_with() self.assertTrue(self._smbfs_driver._thin_provisioning_support) mock_check_os_platform.assert_called_once_with() def test_setup_pools(self): pool_mappings = { '//ip/share0': 'pool0', '//ip/share1': 'pool1', } self._smbfs_driver.configuration.smbfs_pool_mappings = pool_mappings self._smbfs_driver.shares = { '//ip/share0': None, '//ip/share1': None, '//ip/share2': None } expected_pool_mappings = pool_mappings.copy() expected_pool_mappings['//ip/share2'] = 'share2' self._smbfs_driver._setup_pool_mappings() self.assertEqual(expected_pool_mappings, self._smbfs_driver._pool_mappings) def test_setup_pool_duplicates(self): self._smbfs_driver.configuration.smbfs_pool_mappings = { 'share0': 'pool0', 'share1': 'pool0' } self.assertRaises(smbfs.SmbfsException, self._smbfs_driver._setup_pool_mappings) def test_initialize_connection(self): self._smbfs_driver.get_active_image_from_info = mock.Mock( return_value=self._FAKE_VOLUME_NAME) self._smbfs_driver._get_mount_point_base = mock.Mock( return_value=self._FAKE_MNT_BASE) self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS} self._smbfs_driver.get_volume_format = mock.Mock( return_value=mock.sentinel.format) fake_data = {'export': self._FAKE_SHARE, 'format': mock.sentinel.format, 'name': self._FAKE_VOLUME_NAME, 'options': self._FAKE_SHARE_OPTS} expected = { 'driver_volume_type': 'smbfs', 'data': fake_data, 'mount_point_base': self._FAKE_MNT_BASE} ret_val = self._smbfs_driver.initialize_connection( self.volume, None) self.assertEqual(expected, ret_val) @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_snapshot_backing_file') @mock.patch.object(smbfs.WindowsSmbfsDriver, 'get_volume_format') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_mount_point_base') def test_initialize_connection_snapshot(self, mock_get_mount_base, mock_get_volume_format, mock_get_snap_by_backing_file): self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS} mock_get_snap_by_backing_file.return_value = self._FAKE_VOLUME_NAME mock_get_volume_format.return_value = 'vhdx' mock_get_mount_base.return_value = self._FAKE_MNT_BASE exp_data = {'export': self._FAKE_SHARE, 'format': 'vhdx', 'name': self._FAKE_VOLUME_NAME, 'options': self._FAKE_SHARE_OPTS, 'access_mode': 'ro'} expected = { 'driver_volume_type': 'smbfs', 'data': exp_data, 'mount_point_base': self._FAKE_MNT_BASE} ret_val = self._smbfs_driver.initialize_connection_snapshot( self.snapshot, mock.sentinel.connector) self.assertEqual(expected, ret_val) mock_get_snap_by_backing_file.assert_called_once_with(self.snapshot) mock_get_volume_format.assert_called_once_with(self.snapshot.volume) mock_get_mount_base.assert_called_once_with() def test_setup(self): self._test_setup(config=self._FAKE_SMBFS_CONFIG) def test_setup_missing_shares_config_option(self): fake_config = copy.copy(self._FAKE_SMBFS_CONFIG) fake_config.smbfs_shares_config = None self._test_setup(config=fake_config, share_config_exists=False) def test_setup_missing_shares_config_file(self): self._test_setup(config=self._FAKE_SMBFS_CONFIG, share_config_exists=False) @mock.patch.object(smbfs, 'context') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_pool_name_from_share') def test_get_total_allocated(self, mock_get_pool_name, mock_ctxt): fake_pool_name = 'pool0' fake_host_name = 'fake_host@fake_backend' fake_vol_sz_sum = 5 mock_db = mock.Mock() mock_db.volume_data_get_for_host.return_value = [ mock.sentinel.vol_count, fake_vol_sz_sum] self._smbfs_driver.host = fake_host_name self._smbfs_driver.db = mock_db mock_get_pool_name.return_value = fake_pool_name allocated = self._smbfs_driver._get_total_allocated( mock.sentinel.share) self.assertEqual(fake_vol_sz_sum << 30, allocated) mock_get_pool_name.assert_called_once_with(mock.sentinel.share) mock_db.volume_data_get_for_host.assert_called_once_with( context=mock_ctxt.get_admin_context.return_value, host='fake_host@fake_backend#pool0') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_local_volume_path_template') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_lookup_local_volume_path') @mock.patch.object(smbfs.WindowsSmbfsDriver, 'get_volume_format') def _test_get_volume_path(self, mock_get_volume_format, mock_lookup_volume, mock_get_path_template, volume_exists=True): drv = self._smbfs_driver (mock_get_path_template.return_value, ext) = os.path.splitext(self._FAKE_VOLUME_PATH) volume_format = ext.strip('.') mock_lookup_volume.return_value = ( self._FAKE_VOLUME_PATH if volume_exists else None) mock_get_volume_format.return_value = volume_format ret_val = drv.local_path(self.volume) if volume_exists: self.assertFalse(mock_get_volume_format.called) else: mock_get_volume_format.assert_called_once_with(self.volume) self.assertEqual(self._FAKE_VOLUME_PATH, ret_val) def test_get_existing_volume_path(self): self._test_get_volume_path() def test_get_new_volume_path(self): self._test_get_volume_path(volume_exists=False) @mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_volume_dir') def test_get_local_volume_path_template(self, mock_get_local_dir): mock_get_local_dir.return_value = self._FAKE_MNT_POINT ret_val = self._smbfs_driver._get_local_volume_path_template( self.volume) exp_template = os.path.splitext(self._FAKE_VOLUME_PATH)[0] self.assertEqual(exp_template, ret_val) @mock.patch('os.path.exists') def test_lookup_local_volume_path(self, mock_exists): expected_path = self._FAKE_VOLUME_PATH + '.vhdx' mock_exists.side_effect = lambda x: x == expected_path ret_val = self._smbfs_driver._lookup_local_volume_path( self._FAKE_VOLUME_PATH) extensions = [ ".%s" % ext for ext in self._smbfs_driver._VALID_IMAGE_EXTENSIONS] possible_paths = [self._FAKE_VOLUME_PATH + ext for ext in extensions] mock_exists.assert_has_calls( [mock.call(path) for path in possible_paths]) self.assertEqual(expected_path, ret_val) @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_local_volume_path_template') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_lookup_local_volume_path') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_volume_format_spec') def _test_get_volume_format(self, mock_get_format_spec, mock_lookup_volume, mock_get_path_template, qemu_format=False, volume_format='vhdx', expected_vol_fmt=None, volume_exists=True): expected_vol_fmt = expected_vol_fmt or volume_format vol_path = '%s.%s' % (os.path.splitext(self._FAKE_VOLUME_PATH)[0], volume_format) mock_get_path_template.return_value = vol_path mock_lookup_volume.return_value = ( vol_path if volume_exists else None) mock_get_format_spec.return_value = volume_format supported_fmts = self._smbfs_driver._SUPPORTED_IMAGE_FORMATS if volume_format.lower() not in supported_fmts: self.assertRaises(smbfs.SmbfsException, self._smbfs_driver.get_volume_format, self.volume, qemu_format) else: ret_val = self._smbfs_driver.get_volume_format(self.volume, qemu_format) if volume_exists: self.assertFalse(mock_get_format_spec.called) else: mock_get_format_spec.assert_called_once_with(self.volume) self.assertEqual(expected_vol_fmt, ret_val) def test_get_volume_format_invalid_extension(self): self._test_get_volume_format(volume_format='fake') def test_get_existing_vhdx_volume_format(self): self._test_get_volume_format() def test_get_new_vhd_volume_format(self): fmt = 'vhd' self._test_get_volume_format(volume_format=fmt, volume_exists=False, expected_vol_fmt=fmt) def test_get_new_vhd_legacy_volume_format(self): img_fmt = 'vhd' expected_fmt = 'vpc' self._test_get_volume_format(volume_format=img_fmt, volume_exists=False, qemu_format=True, expected_vol_fmt=expected_fmt) @ddt.data([False, False], [True, True], [False, True]) @ddt.unpack def test_get_volume_format_spec(self, volume_meta_contains_fmt, volume_type_contains_fmt): self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG) fake_vol_meta_fmt = 'vhd' fake_vol_type_fmt = 'vhdx' volume_metadata = {} volume_type_extra_specs = {} if volume_meta_contains_fmt: volume_metadata['volume_format'] = fake_vol_meta_fmt elif volume_type_contains_fmt: volume_type_extra_specs['smbfs:volume_format'] = fake_vol_type_fmt volume_type = fake_volume.fake_volume_type_obj(self.context) volume = fake_volume.fake_volume_obj(self.context) # Optional arguments are not set in _from_db_object, # so have to set explicitly here volume.volume_type = volume_type volume.metadata = volume_metadata # Same for extra_specs and VolumeType volume_type.extra_specs = volume_type_extra_specs resulted_fmt = self._smbfs_driver._get_volume_format_spec(volume) if volume_meta_contains_fmt: expected_fmt = fake_vol_meta_fmt elif volume_type_contains_fmt: expected_fmt = fake_vol_type_fmt else: expected_fmt = self._FAKE_SMBFS_CONFIG.smbfs_default_volume_format self.assertEqual(expected_fmt, resulted_fmt) @mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, 'create_volume') def test_create_volume_base(self, mock_create_volume): self._smbfs_driver.create_volume(self.volume) mock_create_volume.assert_called_once_with(self.volume) @mock.patch('os.path.exists') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type') def _test_create_volume(self, mock_get_vhd_type, mock_exists, volume_exists=False, volume_format='vhdx'): mock_exists.return_value = volume_exists self._smbfs_driver.create_vhd = mock.MagicMock() fake_create = self._smbfs_driver._vhdutils.create_vhd self._smbfs_driver.get_volume_format = mock.Mock( return_value=volume_format) if volume_exists or volume_format not in ('vhd', 'vhdx'): self.assertRaises(exception.InvalidVolume, self._smbfs_driver._do_create_volume, self.volume) else: fake_vol_path = self._FAKE_VOLUME_PATH self._smbfs_driver._do_create_volume(self.volume) fake_create.assert_called_once_with( fake_vol_path, mock_get_vhd_type.return_value, max_internal_size=self.volume.size << 30, guid=self.volume.id) def test_create_volume(self): self._test_create_volume() def test_create_existing_volume(self): self._test_create_volume(True) def test_create_volume_invalid_volume(self): self._test_create_volume(volume_format="qcow") def test_delete_volume(self): drv = self._smbfs_driver fake_vol_info = self._FAKE_VOLUME_PATH + '.info' drv._ensure_share_mounted = mock.MagicMock() fake_ensure_mounted = drv._ensure_share_mounted drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv.get_active_image_from_info = mock.Mock( return_value=self._FAKE_VOLUME_NAME) drv._delete = mock.Mock() drv._local_path_volume_info = mock.Mock( return_value=fake_vol_info) with mock.patch('os.path.exists', lambda x: True): drv.delete_volume(self.volume) fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE) drv._delete.assert_any_call( self._FAKE_VOLUME_PATH) drv._delete.assert_any_call(fake_vol_info) def test_ensure_mounted(self): self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS} self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE) self._smbfs_driver._remotefsclient.mount.assert_called_once_with( self._FAKE_SHARE, self._FAKE_SHARE_OPTS) def test_get_capacity_info(self): self._diskutils.get_disk_capacity.return_value = ( self._FAKE_TOTAL_SIZE, self._FAKE_TOTAL_AVAILABLE) self._smbfs_driver._get_mount_point_for_share = mock.Mock( return_value=mock.sentinel.mnt_point) self._smbfs_driver._get_total_allocated = mock.Mock( return_value=self._FAKE_TOTAL_ALLOCATED) ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE) expected_ret_val = [int(x) for x in [self._FAKE_TOTAL_SIZE, self._FAKE_TOTAL_AVAILABLE, self._FAKE_TOTAL_ALLOCATED]] self.assertEqual(expected_ret_val, ret_val) self._smbfs_driver._get_mount_point_for_share.assert_called_once_with( self._FAKE_SHARE) self._diskutils.get_disk_capacity.assert_called_once_with( mock.sentinel.mnt_point) self._smbfs_driver._get_total_allocated.assert_called_once_with( self._FAKE_SHARE) def _test_get_img_info(self, backing_file=None): self._smbfs_driver._vhdutils.get_vhd_parent_path.return_value = ( backing_file) image_info = self._smbfs_driver._qemu_img_info(self._FAKE_VOLUME_PATH) self.assertEqual(self._FAKE_VOLUME_NAME, image_info.image) backing_file_name = backing_file and os.path.basename(backing_file) self.assertEqual(backing_file_name, image_info.backing_file) def test_get_img_info_without_backing_file(self): self._test_get_img_info() def test_get_snapshot_info(self): self._test_get_img_info(self._FAKE_VOLUME_PATH) @ddt.data('attached', 'detached') def test_create_snapshot(self, attach_status): self.snapshot.volume.attach_status = attach_status self.snapshot.volume.save() self._smbfs_driver._vhdutils.create_differencing_vhd = ( mock.Mock()) self._smbfs_driver._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) fake_create_diff = ( self._smbfs_driver._vhdutils.create_differencing_vhd) self._smbfs_driver._do_create_snapshot( self.snapshot, os.path.basename(self._FAKE_VOLUME_PATH), self._FAKE_SNAPSHOT_PATH) if attach_status != 'attached': fake_create_diff.assert_called_once_with(self._FAKE_SNAPSHOT_PATH, self._FAKE_VOLUME_PATH) else: fake_create_diff.assert_not_called() self.assertEqual(os.path.basename(self._FAKE_VOLUME_PATH), self.snapshot.metadata['backing_file']) # Ensure that the changes have been saved. self.assertFalse(bool(self.snapshot.obj_what_changed())) @mock.patch.object(smbfs.WindowsSmbfsDriver, '_check_extend_volume_support') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_path_active_image') def test_extend_volume(self, mock_get_active_img, mock_check_ext_support): volume = fake_volume.fake_volume_obj(self.context) new_size = volume.size + 1 self._smbfs_driver.extend_volume(volume, new_size) mock_check_ext_support.assert_called_once_with(volume, new_size) mock_get_active_img.assert_called_once_with(volume) self._vhdutils.resize_vhd.assert_called_once_with( mock_get_active_img.return_value, new_size * units.Gi, is_file_max_size=False) @ddt.data({'snapshots_exist': True}, {'vol_fmt': smbfs.WindowsSmbfsDriver._DISK_FORMAT_VHD, 'snapshots_exist': True, 'expected_exc': exception.InvalidVolume}) @ddt.unpack @mock.patch.object(smbfs.WindowsSmbfsDriver, 'get_volume_format') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_snapshots_exist') def test_check_extend_support(self, mock_snapshots_exist, mock_get_volume_format, vol_fmt=None, snapshots_exist=False, share_eligible=True, expected_exc=None): vol_fmt = vol_fmt or self._smbfs_driver._DISK_FORMAT_VHDX volume = fake_volume.fake_volume_obj( self.context, provider_location='fake_provider_location') new_size = volume.size + 1 mock_snapshots_exist.return_value = snapshots_exist mock_get_volume_format.return_value = vol_fmt if expected_exc: self.assertRaises(expected_exc, self._smbfs_driver._check_extend_volume_support, volume, new_size) else: self._smbfs_driver._check_extend_volume_support(volume, new_size) mock_get_volume_format.assert_called_once_with(volume) mock_snapshots_exist.assert_called_once_with(volume) @ddt.data({}, {'delete_latest': True}, {'attach_status': 'detached'}, {'snap_info_contains_snap_id': False}) @ddt.unpack @mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, '_delete_snapshot') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_volume_dir') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_path_volume_info') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_write_info_file') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_read_info_file') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_nova_assisted_vol_snap_delete') @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_snapshot_by_backing_file') def test_delete_snapshot(self, mock_get_snap_by_backing_file, mock_nova_assisted_snap_del, mock_read_info_file, mock_write_info_file, mock_local_path_volume_info, mock_get_local_dir, mock_remotefs_snap_delete, attach_status='attached', snap_info_contains_snap_id=True, delete_latest=False): self.snapshot.volume.attach_status = attach_status self.snapshot.metadata['backing_file'] = os.path.basename( self._FAKE_VOLUME_PATH) higher_snapshot = self._simple_snapshot(id=None, volume=self.volume) fake_snap_file = 'snap_file' fake_snap_parent_path = os.path.join(self._FAKE_MNT_POINT, 'snap_file_parent') active_img = 'active_img' if not delete_latest else fake_snap_file snap_info = dict(active=active_img) if snap_info_contains_snap_id: snap_info[self.snapshot.id] = fake_snap_file mock_get_snap_by_backing_file.return_value = ( higher_snapshot if not delete_latest else None) mock_info_path = mock_local_path_volume_info.return_value mock_read_info_file.return_value = snap_info mock_get_local_dir.return_value = self._FAKE_MNT_POINT self._vhdutils.get_vhd_parent_path.return_value = ( fake_snap_parent_path) expected_delete_info = {'file_to_merge': fake_snap_file, 'volume_id': self.snapshot.volume.id} self._smbfs_driver._delete_snapshot(self.snapshot) if attach_status != 'attached': mock_remotefs_snap_delete.assert_called_once_with(self.snapshot) elif snap_info_contains_snap_id: mock_local_path_volume_info.assert_called_once_with( self.snapshot.volume) mock_read_info_file.assert_called_once_with( mock_info_path, empty_if_missing=True) mock_nova_assisted_snap_del.assert_called_once_with( self.snapshot._context, self.snapshot, expected_delete_info) exp_merged_img_path = os.path.join(self._FAKE_MNT_POINT, fake_snap_file) self._smbfs_driver._delete.assert_called_once_with( exp_merged_img_path) if delete_latest: self._vhdutils.get_vhd_parent_path.assert_called_once_with( exp_merged_img_path) exp_active = os.path.basename(fake_snap_parent_path) else: exp_active = active_img self.assertEqual(exp_active, snap_info['active']) self.assertNotIn(snap_info, self.snapshot.id) mock_write_info_file.assert_called_once_with(mock_info_path, snap_info) if attach_status != 'attached' or not snap_info_contains_snap_id: mock_nova_assisted_snap_del.assert_not_called() mock_write_info_file.assert_not_called() if not delete_latest and snap_info_contains_snap_id: self.assertEqual(os.path.basename(self._FAKE_VOLUME_PATH), higher_snapshot.metadata['backing_file']) self.assertFalse(bool(higher_snapshot.obj_what_changed())) @ddt.data(True, False) def test_get_snapshot_by_backing_file(self, metadata_set): backing_file = 'fake_backing_file' if metadata_set: self.snapshot.metadata['backing_file'] = backing_file self.snapshot.save() for idx in range(2): # We're adding a few other snapshots. self._simple_snapshot(id=None, volume=self.volume) snapshot = self._smbfs_driver._get_snapshot_by_backing_file( self.volume, backing_file) if metadata_set: self.assertEqual(self.snapshot.id, snapshot.id) else: self.assertIsNone(snapshot) @ddt.data(True, False) @mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, '_get_snapshot_backing_file') def test_get_snapshot_backing_file_md_set(self, md_set, remotefs_get_backing_file): backing_file = 'fake_backing_file' if md_set: self.snapshot.metadata['backing_file'] = backing_file ret_val = self._smbfs_driver._get_snapshot_backing_file( self.snapshot) # If the metadata is not set, we expect the super class method to # be used, which is supposed to query the image. if md_set: self.assertEqual(backing_file, ret_val) else: self.assertEqual(remotefs_get_backing_file.return_value, ret_val) remotefs_get_backing_file.assert_called_once_with( self.snapshot) def test_create_volume_from_unavailable_snapshot(self): self.snapshot.status = fields.SnapshotStatus.ERROR self.assertRaises( exception.InvalidSnapshot, self._smbfs_driver.create_volume_from_snapshot, self.volume, self.snapshot) @ddt.data(True, False) def test_copy_volume_to_image(self, has_parent=False): drv = self._smbfs_driver volume = test_utils.create_volume( self._context, volume_type_id=fake.VOLUME_TYPE_ID, updated_at=self.updated_at) extra_specs = { 'image_service:store_id': 'fake-store' } test_utils.create_volume_type(self._context.elevated(), id=fake.VOLUME_TYPE_ID, name="test_type", extra_specs=extra_specs) fake_image_meta = {'id': 'fake-image-id'} fake_img_format = self._smbfs_driver._DISK_FORMAT_VHDX if has_parent: fake_volume_path = self._FAKE_SNAPSHOT_PATH fake_parent_path = self._FAKE_VOLUME_PATH else: fake_volume_path = self._FAKE_VOLUME_PATH fake_parent_path = None fake_active_image = os.path.basename(fake_volume_path) drv.get_active_image_from_info = mock.Mock( return_value=fake_active_image) drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv.get_volume_format = mock.Mock( return_value=fake_img_format) drv._vhdutils.get_vhd_parent_path.return_value = ( fake_parent_path) with mock.patch.object(image_utils, 'upload_volume') as ( fake_upload_volume): drv.copy_volume_to_image( mock.sentinel.context, volume, mock.sentinel.image_service, fake_image_meta) if has_parent: fake_temp_image_name = '%s.temp_image.%s.%s' % ( volume.id, fake_image_meta['id'], fake_img_format) fake_temp_image_path = os.path.join( self._FAKE_MNT_POINT, fake_temp_image_name) fake_active_image_path = os.path.join( self._FAKE_MNT_POINT, fake_active_image) upload_path = fake_temp_image_path drv._vhdutils.convert_vhd.assert_called_once_with( fake_active_image_path, fake_temp_image_path) drv._delete.assert_called_once_with( fake_temp_image_path) else: upload_path = fake_volume_path fake_upload_volume.assert_called_once_with( mock.sentinel.context, mock.sentinel.image_service, fake_image_meta, upload_path, volume_fd=None, volume_format=fake_img_format, store_id='fake-store', base_image_ref=None, compress=True, run_as_root=True) @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type') def test_copy_image_to_volume(self, mock_get_vhd_type): drv = self._smbfs_driver drv.get_volume_format = mock.Mock( return_value=mock.sentinel.volume_format) drv.local_path = mock.Mock( return_value=self._FAKE_VOLUME_PATH) drv.configuration = mock.MagicMock() drv.configuration.volume_dd_blocksize = mock.sentinel.block_size with mock.patch.object(image_utils, 'fetch_to_volume_format') as fake_fetch: drv.copy_image_to_volume( mock.sentinel.context, self.volume, mock.sentinel.image_service, mock.sentinel.image_id) fake_fetch.assert_called_once_with( mock.sentinel.context, mock.sentinel.image_service, mock.sentinel.image_id, self._FAKE_VOLUME_PATH, mock.sentinel.volume_format, mock.sentinel.block_size, mock_get_vhd_type.return_value, disable_sparse=False) drv._vhdutils.resize_vhd.assert_called_once_with( self._FAKE_VOLUME_PATH, self.volume.size * units.Gi, is_file_max_size=False) drv._vhdutils.set_vhd_guid.assert_called_once_with( self._FAKE_VOLUME_PATH, self.volume.id) @mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type') def test_copy_volume_from_snapshot(self, mock_get_vhd_type): drv = self._smbfs_driver drv._get_snapshot_backing_file = mock.Mock( return_value=self._FAKE_VOLUME_NAME) drv._local_volume_dir = mock.Mock( return_value=self._FAKE_MNT_POINT) drv.local_path = mock.Mock( return_value=mock.sentinel.new_volume_path) drv._copy_volume_from_snapshot(self.snapshot, self.volume, self.volume.size) drv._get_snapshot_backing_file.assert_called_once_with( self.snapshot) drv._delete.assert_called_once_with(mock.sentinel.new_volume_path) drv._vhdutils.convert_vhd.assert_called_once_with( self._FAKE_VOLUME_PATH, mock.sentinel.new_volume_path, vhd_type=mock_get_vhd_type.return_value) drv._vhdutils.set_vhd_guid.assert_called_once_with( mock.sentinel.new_volume_path, self.volume.id) drv._vhdutils.resize_vhd.assert_called_once_with( mock.sentinel.new_volume_path, self.volume.size * units.Gi, is_file_max_size=False) def test_copy_encrypted_volume_from_snapshot(self): # We expect an exception to be raised if an encryption # key is provided since we don't support encryted volumes # for the time being. self.assertRaises(exception.NotSupportedOperation, self._smbfs_driver._copy_volume_from_snapshot, self.snapshot, self.volume, self.volume.size, mock.sentinel.src_key, mock.sentinel.dest_key) def test_rebase_img(self): drv = self._smbfs_driver drv._rebase_img( self._FAKE_SNAPSHOT_PATH, self._FAKE_VOLUME_NAME, 'vhdx') drv._vhdutils.reconnect_parent_vhd.assert_called_once_with( self._FAKE_SNAPSHOT_PATH, self._FAKE_VOLUME_PATH) def test_copy_volume_image(self): self._smbfs_driver._copy_volume_image(mock.sentinel.src, mock.sentinel.dest) self._smbfs_driver._pathutils.copy.assert_called_once_with( mock.sentinel.src, mock.sentinel.dest) def test_get_pool_name_from_share(self): self._smbfs_driver._pool_mappings = { mock.sentinel.share: mock.sentinel.pool} pool = self._smbfs_driver._get_pool_name_from_share( mock.sentinel.share) self.assertEqual(mock.sentinel.pool, pool) def test_get_share_from_pool_name(self): self._smbfs_driver._pool_mappings = { mock.sentinel.share: mock.sentinel.pool} share = self._smbfs_driver._get_share_from_pool_name( mock.sentinel.pool) self.assertEqual(mock.sentinel.share, share) def test_get_pool_name_from_share_exception(self): self._smbfs_driver._pool_mappings = {} self.assertRaises(smbfs.SmbfsException, self._smbfs_driver._get_share_from_pool_name, mock.sentinel.pool) def test_get_vhd_type(self): drv = self._smbfs_driver mock_type = drv._get_vhd_type(qemu_subformat=True) self.assertEqual(mock_type, 'dynamic') mock_type = drv._get_vhd_type(qemu_subformat=False) self.assertEqual(mock_type, 3) self._smbfs_driver.configuration.nas_volume_prov_type = ( 'thick') mock_type = drv._get_vhd_type(qemu_subformat=True) self.assertEqual(mock_type, 'fixed') def test_get_managed_vol_expected_path(self): self._vhdutils.get_vhd_format.return_value = 'vhdx' vol_location = dict(vol_local_path=mock.sentinel.image_path, mountpoint=self._FAKE_MNT_POINT) path = self._smbfs_driver._get_managed_vol_expected_path( self.volume, vol_location) self.assertEqual(self._FAKE_VOLUME_PATH, path) self._vhdutils.get_vhd_format.assert_called_once_with( mock.sentinel.image_path) @mock.patch.object(remotefs.RemoteFSManageableVolumesMixin, 'manage_existing') def test_manage_existing(self, remotefs_manage): model_update = dict(provider_location=self._FAKE_SHARE) remotefs_manage.return_value = model_update self._smbfs_driver.local_path = mock.Mock( return_value=mock.sentinel.vol_path) # Let's make sure that the provider location gets set. # It's needed by self.local_path. self.volume.provider_location = None ret_val = self._smbfs_driver.manage_existing( self.volume, mock.sentinel.ref) self.assertEqual(model_update, ret_val) self.assertEqual(self._FAKE_SHARE, self.volume.provider_location) self._vhdutils.set_vhd_guid.assert_called_once_with( mock.sentinel.vol_path, self.volume.id) self._smbfs_driver.local_path.assert_called_once_with(self.volume) remotefs_manage.assert_called_once_with(self.volume, mock.sentinel.ref) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3151205 cinder-27.0.0/cinder/tests/unit/zonemanager/0000775000175000017500000000000000000000000020762 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/__init__.py0000664000175000017500000000000000000000000023061 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_brcd_fc_san_lookup_service.py0000664000175000017500000001433500000000000027735 0ustar00zuulzuul00000000000000# (c) Copyright 2016 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for brcd fc san lookup service.""" from unittest import mock from oslo_config import cfg from oslo_utils import importutils from cinder.tests.unit import test from cinder.volume import configuration as conf import cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service \ as brcd_lookup parsed_switch_port_wwns = ['20:1a:00:05:1e:e8:e3:29', '10:00:00:90:fa:34:40:f6'] switch_data = (""" Type Pid COS PortName NodeName TTL(sec) N 011a00; 2,3; %(port_1)s; 20:1a:00:05:1e:e8:e3:29; na FC4s: FCP PortSymb: [26] "222222 - 1:1:1 - LPe12442" NodeSymb: [32] "SomeSym 7211" Fabric Port Name: 20:1a:00:05:1e:e8:e3:29 Permanent Port Name: 22:22:00:22:ac:00:bc:b0 Port Index: 0 Share Area: No Device Shared in Other AD: No Redirect: No Partial: No LSAN: No N 010100; 2,3; %(port_2)s; 20:00:00:00:af:00:00:af; na FC4s: FCP PortSymb: [26] "333333 - 1:1:1 - LPe12442" NodeSymb: [32] "SomeSym 2222" Fabric Port Name: 10:00:00:90:fa:34:40:f6 Permanent Port Name: 22:22:00:22:ac:00:bc:b0 Port Index: 0 Share Area: No Device Shared in Other AD: No Redirect: No Partial: No LSAN: No""" % {'port_1': parsed_switch_port_wwns[0], 'port_2': parsed_switch_port_wwns[1]}) _device_map_to_verify = { 'BRCD_FAB_2': { 'initiator_port_wwn_list': [parsed_switch_port_wwns[1].replace(':', '')], 'target_port_wwn_list': [parsed_switch_port_wwns[0].replace(':', '')]}} class TestBrcdFCSanLookupService(brcd_lookup.BrcdFCSanLookupService, test.TestCase): def setUp(self): super(TestBrcdFCSanLookupService, self).setUp() self.configuration = conf.Configuration(None) self.configuration.set_default('fc_fabric_names', 'BRCD_FAB_2', 'fc-zone-manager') self.configuration.fc_fabric_names = 'BRCD_FAB_2' self.configuration.brcd_sb_connector = ('cinder.tests.unit.zonemanager' '.test_brcd_fc_san_lookup_' 'service' '.FakeBrcdFCZoneClientCLI') self.create_configuration() # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) def create_configuration(self): fc_fabric_opts = [] fc_fabric_opts.append(cfg.StrOpt('fc_fabric_address', default='10.24.49.100', help='')) fc_fabric_opts.append(cfg.StrOpt('fc_fabric_user', default='admin', help='')) fc_fabric_opts.append(cfg.StrOpt('fc_fabric_password', default='password', help='', secret=True)) fc_fabric_opts.append(cfg.PortOpt('fc_fabric_port', default=22, help='')) config = conf.Configuration(fc_fabric_opts, 'BRCD_FAB_2') self.fabric_configs = {'BRCD_FAB_2': config} def get_client(self, protocol='HTTPS'): conn = ('cinder.tests.unit.zonemanager.' 'test_brcd_fc_san_lookup_service.' + ('FakeBrcdFCZoneClientCLI' if protocol == "CLI" else 'FakeBrcdHttpFCZoneClient')) client = importutils.import_object( conn, ipaddress="10.24.48.213", username="admin", password="password", key="/home/stack/.ssh/id_rsa", port=22, vfid="2", protocol=protocol ) return client @mock.patch.object(brcd_lookup.BrcdFCSanLookupService, '_get_southbound_client') def test_get_device_mapping_from_network(self, get_southbound_client_mock): initiator_list = [parsed_switch_port_wwns[1]] target_list = [parsed_switch_port_wwns[0], '20240002ac000a40'] get_southbound_client_mock.return_value = self.get_client("HTTPS") device_map = self.get_device_mapping_from_network( initiator_list, target_list) self.assertDictEqual(_device_map_to_verify, device_map) @mock.patch.object(brcd_lookup.BrcdFCSanLookupService, '_get_southbound_client', side_effect=ValueError) def test_get_device_mapping_from_network_fail(self, get_southbound_client_mock): initiator_list = [parsed_switch_port_wwns[1]] target_list = [parsed_switch_port_wwns[0], '20240002ac000a40'] self.assertRaises(brcd_lookup.exception.FCSanLookupServiceException, self.get_device_mapping_from_network, initiator_list, target_list) class FakeClient(object): def is_supported_firmware(self): return True def get_nameserver_info(self): ns_info_list_expected = (parsed_switch_port_wwns) return ns_info_list_expected def close_connection(self): pass def cleanup(self): pass class FakeBrcdFCZoneClientCLI(FakeClient): def __init__(self, ipaddress, username, password, port, key, vfid, protocol): self.firmware_supported = True class FakeBrcdHttpFCZoneClient(FakeClient): def __init__(self, ipaddress, username, password, port, key, vfid, protocol): self.firmware_supported = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py0000664000175000017500000003526500000000000027210 0ustar00zuulzuul00000000000000# (c) Copyright 2016 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for brcd fc zone client cli.""" from unittest import mock from oslo_concurrency import processutils from cinder import exception from cinder.tests.unit import test from cinder.zonemanager.drivers.brocade import (brcd_fc_zone_client_cli as client_cli) from cinder.zonemanager.drivers.brocade import exception as b_exception import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant nsshow = '20:1a:00:05:1e:e8:e3:29' switch_data = [' N 011a00;2,3;20:1a:00:05:1e:e8:e3:29;\ 20:1a:00:05:1e:e8:e3:29;na', ' Fabric Port Name: 20:1a:00:05:1e:e8:e3:29'] cfgactvshow = ['Effective configuration:\n', ' cfg:\tOpenStack_Cfg\t\n', ' zone:\topenstack50060b0000c26604201900051ee8e329\t\n', '\t\t50:06:0b:00:00:c2:66:04\n', '\t\t20:19:00:05:1e:e8:e3:29\n'] active_zoneset = { 'zones': { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']}, 'active_zone_config': 'OpenStack_Cfg'} active_zoneset_multiple_zones = { 'zones': { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'], 'openstack50060b0000c26602201900051ee8e327': ['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']}, 'active_zone_config': 'OpenStack_Cfg'} new_zone_memb_same = { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']} new_zone_memb_not_same = { 'openstack50060b0000c26604201900051ee8e330': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:30']} new_zone = {'openstack10000012345678902001009876543210': ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']} new_zones = {'openstack10000012345678902001009876543210': ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'], 'openstack10000011111111112001001111111111': ['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']} zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329' supported_firmware = ['Kernel: 2.6', 'Fabric OS: v7.0.1'] unsupported_firmware = ['Fabric OS: v6.2.1'] class TestBrcdFCZoneClientCLI(client_cli.BrcdFCZoneClientCLI, test.TestCase): # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info') def test_get_active_zone_set(self, get_switch_info_mock): cmd_list = [zone_constant.GET_ACTIVE_ZONE_CFG] get_switch_info_mock.return_value = cfgactvshow active_zoneset_returned = self.get_active_zone_set() get_switch_info_mock.assert_called_once_with(cmd_list) self.assertDictEqual(active_zoneset, active_zoneset_returned) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test_get_active_zone_set_ssh_error(self, run_ssh_mock): run_ssh_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(b_exception.BrocadeZoningCliException, self.get_active_zone_set) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save') def test_add_zones_new_zone_no_activate(self, cfg_save_mock, apply_zone_change_mock, get_active_zs_mock): get_active_zs_mock.return_value = active_zoneset self.add_zones(new_zones, False, None) self.assertEqual(1, get_active_zs_mock.call_count) self.assertEqual(3, apply_zone_change_mock.call_count) cfg_save_mock.assert_called_once_with() @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') def test_add_zones_new_zone_activate(self, activate_zoneset_mock, apply_zone_change_mock, get_active_zs_mock): get_active_zs_mock.return_value = active_zoneset self.add_zones(new_zone, True, active_zoneset) self.assertEqual(2, apply_zone_change_mock.call_count) activate_zoneset_mock.assert_called_once_with( active_zoneset['active_zone_config']) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') def test_update_zone_exists_memb_same(self, apply_zone_change_mock, activate_zoneset_mock, get_active_zs_mock): get_active_zs_mock.return_value = active_zoneset self.update_zones(new_zone_memb_same, True, zone_constant.ZONE_ADD, active_zoneset) self.assertEqual(1, apply_zone_change_mock.call_count) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') def test_update_zone_exists_memb_not_same(self, apply_zone_change_mock, activate_zoneset_mock, get_active_zs_mock): get_active_zs_mock.return_value = active_zoneset self.update_zones(new_zone_memb_not_same, True, zone_constant.ZONE_ADD, active_zoneset) self.assertEqual(1, apply_zone_change_mock.call_count) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') def test_add_zone_all_exists_memb_not_same(self, apply_zone_change_mock, activate_zoneset_mock, get_active_zs_mock): self.add_zones(new_zone_memb_not_same, True, active_zoneset) call_args = apply_zone_change_mock.call_args[0][0] self.assertEqual(0, get_active_zs_mock.call_count) self.assertEqual(2, apply_zone_change_mock.call_count) self.assertIn(zone_constant.CFG_ADD.strip(), call_args) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute') def test_activate_zoneset(self, ssh_execute_mock): ssh_execute_mock.return_value = True return_value = self.activate_zoneset('zoneset1') self.assertTrue(return_value) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute') def test_deactivate_zoneset(self, ssh_execute_mock): ssh_execute_mock.return_value = True return_value = self.deactivate_zoneset() self.assertTrue(return_value) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save') def test_delete_zones_activate_false(self, cfg_save_mock, apply_zone_change_mock): with mock.patch.object(self, '_zone_delete') as zone_delete_mock: self.delete_zones(zone_names_to_delete, False, active_zoneset_multiple_zones) self.assertEqual(1, apply_zone_change_mock.call_count) zone_delete_mock.assert_called_once_with(zone_names_to_delete) cfg_save_mock.assert_called_once_with() @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset') def test_delete_zones_activate_true(self, activate_zs_mock, apply_zone_change_mock): with mock.patch.object(self, '_zone_delete') \ as zone_delete_mock: self.delete_zones(zone_names_to_delete, True, active_zoneset_multiple_zones) self.assertEqual(1, apply_zone_change_mock.call_count) zone_delete_mock.assert_called_once_with(zone_names_to_delete) activate_zs_mock.assert_called_once_with( active_zoneset['active_zone_config']) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info') def test_get_nameserver_info(self, get_switch_info_mock): ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29'] get_switch_info_mock.return_value = (switch_data) ns_info_list = self.get_nameserver_info() self.assertEqual(ns_info_list_expected, ns_info_list) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test_get_nameserver_info_ssh_error(self, run_ssh_mock): run_ssh_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(b_exception.BrocadeZoningCliException, self.get_nameserver_info) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute') def test__cfg_save(self, ssh_execute_mock): cmd_list = [zone_constant.CFG_SAVE] self._cfg_save() ssh_execute_mock.assert_called_once_with(cmd_list, True, 1) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') def test__zone_delete(self, apply_zone_change_mock): zone_name = 'testzone' cmd_list = ['zonedelete', '"testzone"'] self._zone_delete(zone_name) apply_zone_change_mock.assert_called_once_with(cmd_list) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change') def test__cfg_trans_abort(self, apply_zone_change_mock): cmd_list = [zone_constant.CFG_ZONE_TRANS_ABORT] with mock.patch.object(self, '_is_trans_abortable') \ as is_trans_abortable_mock: is_trans_abortable_mock.return_value = True self._cfg_trans_abort() is_trans_abortable_mock.assert_called_once_with() apply_zone_change_mock.assert_called_once_with(cmd_list) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test__is_trans_abortable_true(self, run_ssh_mock): cmd_list = [zone_constant.CFG_SHOW_TRANS] run_ssh_mock.return_value = (Stream(zone_constant.TRANS_ABORTABLE), None) data = self._is_trans_abortable() self.assertTrue(data) run_ssh_mock.assert_called_once_with(cmd_list, True, 1) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test__is_trans_abortable_ssh_error(self, run_ssh_mock): run_ssh_mock.return_value = (Stream(), Stream()) self.assertRaises(b_exception.BrocadeZoningCliException, self._is_trans_abortable) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test__is_trans_abortable_false(self, run_ssh_mock): cmd_list = [zone_constant.CFG_SHOW_TRANS] cfgtransshow = 'There is no outstanding zoning transaction' run_ssh_mock.return_value = (Stream(cfgtransshow), None) data = self._is_trans_abortable() self.assertFalse(data) run_ssh_mock.assert_called_once_with(cmd_list, True, 1) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test_apply_zone_change(self, run_ssh_mock): cmd_list = [zone_constant.CFG_SAVE] run_ssh_mock.return_value = (None, None) self.apply_zone_change(cmd_list) run_ssh_mock.assert_called_once_with(cmd_list, True, 1) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh') def test__get_switch_info(self, run_ssh_mock): cmd_list = [zone_constant.NS_SHOW] nsshow_list = [nsshow] run_ssh_mock.return_value = (Stream(nsshow), Stream()) switch_data = self._get_switch_info(cmd_list) self.assertEqual(nsshow_list, switch_data) run_ssh_mock.assert_called_once_with(cmd_list, True, 1) def test__parse_ns_output(self): invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29'] expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29'] return_wwn_list = self._parse_ns_output(switch_data) self.assertEqual(expected_wwn_list, return_wwn_list) self.assertRaises(exception.InvalidParameterValue, self._parse_ns_output, invalid_switch_data) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') def test_is_supported_firmware(self, exec_shell_cmd_mock): exec_shell_cmd_mock.return_value = (supported_firmware, None) self.assertTrue(self.is_supported_firmware()) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') def test_is_supported_firmware_invalid(self, exec_shell_cmd_mock): exec_shell_cmd_mock.return_value = (unsupported_firmware, None) self.assertFalse(self.is_supported_firmware()) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') def test_is_supported_firmware_no_ssh_response(self, exec_shell_cmd_mock): exec_shell_cmd_mock.return_value = (None, Stream()) self.assertFalse(self.is_supported_firmware()) @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd') def test_is_supported_firmware_ssh_error(self, exec_shell_cmd_mock): exec_shell_cmd_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(b_exception.BrocadeZoningCliException, self.is_supported_firmware) class Channel(object): def recv_exit_status(self): return 0 class Stream(object): def __init__(self, buffer=''): self.buffer = buffer self.channel = Channel() def readlines(self): return self.buffer def splitlines(self): return self.buffer.splitlines() def close(self): pass def flush(self): self.buffer = '' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py0000664000175000017500000002564000000000000026372 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Brocade fc zone driver.""" from unittest import mock from oslo_utils import importutils import paramiko import requests from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as driver from cinder.zonemanager import fc_zone_manager as zmanager _zone_name = 'openstack_fab1_10008c7cff523b0120240002ac000a50' _zone_name_initiator_mode = 'openstack_fab1_10008c7cff523b01' WWNS = ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'] _active_cfg_before_add = {} _activate = True _target_ns_map = {'100000051e55a100': ['20240002ac000a50']} _initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} _zone_map_to_add = {_zone_name: WWNS} _initiator_target_map = {'10008c7cff523b01': ['20240002ac000a50']} _device_map_to_verify = { '100000051e55a100': { 'initiator_port_wwn_list': [ '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} _fabric_wwn = '100000051e55a100' class BrcdFcZoneDriverBaseTest(object): def _set_conf_overrides(self, group, **kwargs): for name, value in kwargs.items(): self.override_config(name, value, group) def setup_config(self, is_normal, mode): self.override_config('zoning_mode', 'fabric') fabric_group_name = 'BRCD_FAB_1' self._set_conf_overrides( 'fc-zone-manager', zone_driver=('cinder.tests.unit.zonemanager.' 'test_brcd_fc_zone_driver.FakeBrcdFCZoneDriver'), brcd_sb_connector=('cinder.tests.unit.zonemanager.' 'test_brcd_fc_zone_driver.' 'FakeBrcdFCZoneClientCLI'), zoning_policy='initiator-target', fc_san_lookup_service=('cinder.tests.unit.zonemanager.' 'test_brcd_fc_zone_driver.' 'FakeBrcdFCSanLookupService'), fc_fabric_names=fabric_group_name, ) # Ensure that we have the fabric_name group conf.Configuration(fabric_opts.brcd_zone_opts, fabric_group_name) self._set_conf_overrides( fabric_group_name, fc_fabric_address='10.24.48.213', fc_fabric_password='password', fc_fabric_user='admin' if is_normal else 'invaliduser', zoning_policy='initiator' if mode == 2 else 'initiator-target', zone_activate=True, zone_name_prefix='openstack_fab1_', fc_southbound_protocol='SSH', ) configuration = conf.Configuration(zmanager.zone_manager_opts, 'fc-zone-manager') return configuration class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase): def setUp(self): global GlobalVars GlobalVars = GlobalVarsClass() super(TestBrcdFcZoneDriver, self).setUp() # setup config for normal flow self.setup_driver(self.setup_config(True, 1)) def setup_driver(self, config): self.driver = importutils.import_object( 'cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver' '.BrcdFCZoneDriver', configuration=config) def fake__get_active_zone_set(self, brcd_sb_connector, fabric_ip): return GlobalVars._active_cfg def get_client(self, protocol='HTTPS'): conn = ('cinder.tests.unit.zonemanager.test_brcd_fc_zone_driver.' + ('FakeBrcdFCZoneClientCLI' if protocol == "CLI" else 'FakeBrcdHttpFCZoneClient')) client = importutils.import_object( conn, ipaddress="10.24.48.213", username="admin", password="password", key="/home/stack/.ssh/id_rsa", port=22, vfid="2", protocol=protocol ) return client @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') def test_add_connection(self, get_southbound_client_mock): """Normal flow for i-t mode.""" GlobalVars._is_normal_test = True GlobalVars._zone_state = [] GlobalVars._active_cfg = _active_cfg_before_add get_southbound_client_mock.return_value = self.get_client("HTTPS") self.driver.add_connection('BRCD_FAB_1', _initiator_target_map) self.assertIn(_zone_name, GlobalVars._zone_state) def _active_cfg_before_delete(self, mode): zone_name = _zone_name if mode == 1 else _zone_name_initiator_mode return {'zones': {zone_name: WWNS, 't_zone': ['1,0']}, 'active_zone_config': 'cfg1'} @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') def test_delete_connection(self, get_southbound_client_mock): GlobalVars._is_normal_test = True GlobalVars._zone_state.append(_zone_name) get_southbound_client_mock.return_value = self.get_client("CLI") GlobalVars._active_cfg = self._active_cfg_before_delete(mode=1) self.driver.delete_connection( 'BRCD_FAB_1', _initiator_target_map) self.assertNotIn(_zone_name, GlobalVars._zone_state) @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') def test_add_connection_for_initiator_mode(self, get_southbound_client_mk): """Normal flow for i mode.""" GlobalVars._is_normal_test = True get_southbound_client_mk.return_value = self.get_client("CLI") GlobalVars._active_cfg = _active_cfg_before_add self.setup_driver(self.setup_config(True, 2)) self.driver.add_connection('BRCD_FAB_1', _initiator_target_map) self.assertIn(_zone_name_initiator_mode, GlobalVars._zone_state) @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') def test_delete_connection_for_initiator_mode(self, get_southbound_client_mk): GlobalVars._is_normal_test = True GlobalVars._zone_state.append(_zone_name_initiator_mode) get_southbound_client_mk.return_value = self.get_client("HTTPS") GlobalVars._active_cfg = self._active_cfg_before_delete(mode=2) self.setup_driver(self.setup_config(True, 2)) self.driver.delete_connection( 'BRCD_FAB_1', _initiator_target_map) self.assertNotIn(_zone_name_initiator_mode, GlobalVars._zone_state) @mock.patch('cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli.' 'BrcdFCZoneClientCLI.__init__', side_effect=Exception) def test_add_connection_for_invalid_fabric(self, create_client_mock): """Test abnormal flows.""" GlobalVars._active_cfg = _active_cfg_before_add self.setup_driver(self.setup_config(False, 1)) self.assertRaises(exception.FCZoneDriverException, self.driver.add_connection, 'BRCD_FAB_1', _initiator_target_map) @mock.patch('cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli.' 'BrcdFCZoneClientCLI.__init__', side_effect=Exception) def test_delete_connection_for_invalid_fabric(self, create_client_mock): GlobalVars._active_cfg = self._active_cfg_before_delete(mode=1) GlobalVars._is_normal_test = False self.setup_driver(self.setup_config(False, 1)) self.assertRaises(exception.FCZoneDriverException, self.driver.delete_connection, 'BRCD_FAB_1', _initiator_target_map) @mock.patch.object(driver.BrcdFCZoneDriver, '_get_southbound_client') def test_get_san_context(self, client_mock): GlobalVars._is_normal_test = True self.setup_driver(self.setup_config(True, 1)) get_ns_mock = client_mock.return_value.get_nameserver_info wwn = '20:24:00:02:ac:00:0a:50' get_ns_mock.return_value = [wwn] expected = {'BRCD_FAB_1': ['20240002ac000a50']} res = self.driver.get_san_context([WWNS[0], wwn.upper()]) client_mock.assert_called_once_with('BRCD_FAB_1') client_mock.return_value.cleanup.assert_called_once_with() get_ns_mock.assert_called_once_with() self.assertEqual(expected, res) class FakeClient(object): def get_active_zone_set(self): return GlobalVars._active_cfg def add_zones(self, zones, isActivate, active_zone_set): GlobalVars._zone_state.extend(zones.keys()) def delete_zones(self, zone_names, isActivate, active_zone_set): zone_list = zone_names.split(';') GlobalVars._zone_state = [ x for x in GlobalVars._zone_state if x not in zone_list] def is_supported_firmware(self): return True def get_nameserver_info(self): return _target_ns_map def close_connection(self): pass def cleanup(self): pass class FakeBrcdFCZoneClientCLI(FakeClient): def __init__(self, ipaddress, username, password, port, key, vfid, protocol): self.firmware_supported = True if not GlobalVars._is_normal_test: raise paramiko.SSHException("Unable to connect to fabric.") class FakeBrcdHttpFCZoneClient(FakeClient): def __init__(self, ipaddress, username, password, port, key, vfid, protocol): self.firmware_supported = True if not GlobalVars._is_normal_test: raise requests.exception.HTTPError("Unable to connect to fabric") class FakeBrcdFCSanLookupService(object): def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): device_map = {} initiators = [] targets = [] for i in initiator_wwn_list: if i in _initiator_ns_map[_fabric_wwn]: initiators.append(i) for t in target_wwn_list: if t in _target_ns_map[_fabric_wwn]: targets.append(t) device_map[_fabric_wwn] = { 'initiator_port_wwn_list': initiators, 'target_port_wwn_list': targets} return device_map class GlobalVarsClass(object): def __init__(self): self._active_cfg = {} self._zone_state = list() self._is_normal_test = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_brcd_http_fc_zone_client.py0000664000175000017500000006665300000000000027425 0ustar00zuulzuul00000000000000# (c) Copyright 2016 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for brcd fc zone client http(s).""" import time from unittest import mock from unittest.mock import patch from oslo_utils import encodeutils from cinder.tests.unit import test from cinder.zonemanager.drivers.brocade import (brcd_http_fc_zone_client as client) from cinder.zonemanager.drivers.brocade import exception as b_exception import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant cfgs = {'openstack_cfg': 'zone1;zone2'} cfgs_to_delete = { 'openstack_cfg': 'zone1;zone2;openstack50060b0000c26604201900051ee8e329'} zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'} zones_to_delete = { 'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', 'openstack50060b0000c26604201900051ee8e329': '50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29'} alias = {} qlps = {} ifas = {} parsed_raw_zoneinfo = "" random_no = '' auth_version = '' session = None active_cfg = 'openstack_cfg' activate = True no_activate = False vf_enable = True ns_info = ['10:00:00:05:1e:7c:64:96'] nameserver_info = """ NSInfo Page
--BEGIN DEVICEPORT 10:00:00:05:1e:7c:64:96
node.wwn=20:00:00:05:1e:7c:64:96
deviceport.portnum=9
deviceport.portid=300900
deviceport.portIndex=9
deviceport.porttype=N
deviceport.portwwn=10:00:00:05:1e:7c:64:96
--END DEVICEPORT 10:00:00:05:1e:7c:64:96
""" mocked_zone_string = 'zonecfginfo=openstack_cfg zone1;zone2 '\ 'zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 '\ 'zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 '\ 'alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 '\ 'qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\ 'fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\ 'openstack_cfg null &saveonly=false' mocked_zone_string_no_activate = 'zonecfginfo=openstack_cfg zone1;zone2 '\ 'zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 '\ 'zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 '\ 'alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 '\ 'qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c '\ 'fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c &saveonly=true' zone_string_to_post = "zonecfginfo=openstack_cfg "\ "openstack50060b0000c26604201900051ee8e329;zone1;zone2 "\ "openstack50060b0000c26604201900051ee8e329 "\ "50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\ "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ "openstack_cfg null &saveonly=false" zone_string_to_post_no_activate = "zonecfginfo=openstack_cfg "\ "openstack50060b0000c26604201900051ee8e329;zone1;zone2 "\ "openstack50060b0000c26604201900051ee8e329 "\ "50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 " \ "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ "&saveonly=true" zone_string_to_post_invalid_request = "zonecfginfo=openstack_cfg "\ "openstack50060b0000c26604201900051ee8e32900000000000000000000000000;"\ "zone1;zone2 "\ "openstack50060b0000c26604201900051ee8e329000000000000000000000"\ "00000 50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29 "\ "zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 &saveonly=true" zone_string_del_to_post = "zonecfginfo=openstack_cfg zone1;zone2"\ " zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 "\ "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ "openstack_cfg null &saveonly=false" zone_string_del_to_post_no_active = "zonecfginfo=openstack_cfg zone1;zone2"\ " zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 " \ "zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 "\ "&saveonly=true" zone_post_page = """
--BEGIN ZONE_TXN_INFO
txnId=34666
adId=0
user=admin
roleUser=admin
openTxnOwner=
openTxnId=0
openTxnAbortable=0
txnStarttime=1421916354
txnEndtime=1421916355
currStateInt=4
prevStateInt=3
actionInt=5
currState=done
prevState=progress
action=error
sessionId=5892021
selfAborted=false
status=done
errorCode=-1
errorMessage=Name too long
--END ZONE_TXN_INFO
""" zone_post_page_no_error = """
--BEGIN ZONE_TXN_INFO
txnId=34666
adId=0
user=admin
roleUser=admin
openTxnOwner=
openTxnId=0
openTxnAbortable=0
txnStarttime=1421916354
txnEndtime=1421916355
currStateInt=4
prevStateInt=3
actionInt=5
currState=done
prevState=progress
action=error
sessionId=5892021
selfAborted=false
status=done
errorCode=0
errorMessage=
--END ZONE_TXN_INFO
""" secinfo_resp = """
--BEGIN SECINFO
SECURITY = OFF
RANDOM = 6281590
DefaultPasswdBitmap = 0
primaryFCS = no
switchType = 66
resource = 10.24.48.210
REALM = FC Switch Administration
AUTHMETHOD = Custom_Basic
hasUpfrontLogin=yes
AUTHVERSION = 1
vfEnabled=false
vfSupported=true
--END SECINFO
""" authenticate_resp = """
--BEGIN AUTHENTICATE
authenticated = yes
username=admin
userrole=admin
adCapable=1
currentAD=AD0
trueADEnvironment=0
adId=0
adList=ALL
contextType=0
--END AUTHENTICATE
""" un_authenticate_resp = """ Authentication
--BEGIN AUTHENTICATE
authenticated = no
errCode = -3
authType = Custom_Basic
realm = FC Switch Administration
--END AUTHENTICATE
""" switch_page_resp = """
--BEGIN SWITCH INFORMATION
didOffset=96
swFWVersion=v7.3.0b_rc1_bld06
swDomain=2
--END SWITCH INFORMATION
""" switch_page_invalid_firm = """
--BEGIN SWITCH INFORMATION
didOffset=96
swFWVersion=v6.1.1
swDomain=2
--END SWITCH INFORMATION
""" parsed_value = """ didOffset=96 swFWVersion=v7.3.0b_rc1_bld06 swDomain=2 """ parsed_session_info_vf = """ sessionId=524461483 user=admin userRole=admin isAdminRole=Yes authSource=0 sessionIp=172.26.1.146 valid=yes adName= adId=128 adCapable=1 currentAD=AD0 currentADId=0 homeAD=AD0 trueADEnvironment=0 adList= adIdList= pfAdmin=0 switchIsMember=0 definedADList=AD0,Physical Fabric definedADIdList=0,255, effectiveADList=AD0,Physical Fabric rc=0 err= contextType=1 vfEnabled=true vfSupported=true HomeVF=128 sessionLFId=2 isContextManageable=1 manageableLFList=2,128, activeLFList=128,2, """ session_info_vf = """
--BEGIN SESSION
sessionId=524461483
user=admin
userRole=admin
isAdminRole=Yes
authSource=0
sessionIp=172.26.1.146
valid=yes
adName=
adId=128
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=0
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=0
err=
contextType=1
vfEnabled=true
vfSupported=true
HomeVF=128
sessionLFId=2
isContextManageable=1
manageableLFList=2,128,
activeLFList=128,2,
--END SESSION
""" session_info_vf_not_changed = """
--BEGIN SESSION
sessionId=524461483
user=admin
userRole=admin
isAdminRole=Yes
authSource=0
sessionIp=172.26.1.146
User-Agent=Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML,
valid=yes
adName=
adId=128
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=0
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=0
err=
contextType=1
vfEnabled=true
vfSupported=true
HomeVF=128
sessionLFId=128
isContextManageable=1
manageableLFList=2,128,
activeLFList=128,2,
--END SESSION
""" session_info_AD = """ Webtools Session Info
--BEGIN SESSION
sessionId=-2096740776
user=
userRole=root
isAdminRole=No
authSource=0
sessionIp=
User-Agent=
valid=no
adName=
adId=0
adCapable=1
currentAD=AD0
currentADId=0
homeAD=AD0
trueADEnvironment=0
adList=
adIdList=
pfAdmin=0
switchIsMember=1
definedADList=AD0,Physical Fabric
definedADIdList=0,255,
effectiveADList=AD0,Physical Fabric
rc=-2
err=Could not obtain session data from store
contextType=0
--END SESSION
""" zone_info = """ Zone Configuration Information
--BEGIN ZONE CHANGE
LastZoneChangeTime=1421926251
--END ZONE CHANGE
isZoneTxnSupported=true
ZoneLicense=true
QuickLoopLicense=true
DefZoneStatus=noaccess
McDataDefaultZone=false
McDataSafeZone=false
AvailableZoneSize=1043890
--BEGIN ZONE INFO
openstack_cfg zone1;zone2 """\
"""zone1 20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11 """\
    """zone2 20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11 """\
    """alia1 10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12 """\
    """qlp 10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c """\
    """fa1 20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c """\
    """openstack_cfg null 1045274"""\
    """--END ZONE INFO
""" active_zone_set = { 'zones': {'zone1': ['20:01:00:05:33:0e:96:15', '20:00:00:05:33:0e:93:11'], 'zone2': ['20:01:00:05:33:0e:96:14', '20:00:00:05:33:0e:93:11']}, 'active_zone_config': 'openstack_cfg'} updated_zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', 'test_updated_zone': '20:01:00:05:33:0e:96:10;20:00:00:05:33:0e:93:11'} updated_cfgs = {'openstack_cfg': 'test_updated_zone;zone1;zone2'} valid_zone_name = "openstack50060b0000c26604201900051ee8e329" class TestBrcdHttpFCZoneClient(client.BrcdHTTPFCZoneClient, test.TestCase): def setUp(self): self.auth_header = "YWRtaW46cGFzc3dvcmQ6NDM4ODEyNTIw" self.switch_user = "admin" self.switch_pwd = "password" self.protocol = "HTTPS" self.conn = None self.alias = {} self.qlps = {} self.ifas = {} self.parsed_raw_zoneinfo = "" self.random_no = '' self.auth_version = '' self.session = None super(TestBrcdHttpFCZoneClient, self).setUp() # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_create_auth_token(self, connect_mock): connect_mock.return_value = secinfo_resp self.assertEqual("Custom_Basic YWRtaW46cGFzc3dvcmQ6NjI4MTU5MA==", self.create_auth_token()) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_authenticate(self, connect_mock): connect_mock.return_value = authenticate_resp self.assertEqual( (True, "Custom_Basic YWRtaW46eHh4Og=="), self.authenticate()) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_authenticate_failed(self, connect_mock): connect_mock.return_value = un_authenticate_resp self.assertRaises( b_exception.BrocadeZoningHttpException, self.authenticate) def test_get_parsed_data(self): valid_delimiter1 = zone_constant.SWITCHINFO_BEGIN valid_delimiter2 = zone_constant.SWITCHINFO_END invalid_delimiter = "--END SWITCH INFORMATION1" self.assertEqual(parsed_value, self.get_parsed_data( switch_page_resp, valid_delimiter1, valid_delimiter2)) self.assertRaises(b_exception.BrocadeZoningHttpException, self.get_parsed_data, switch_page_resp, valid_delimiter1, invalid_delimiter) self.assertRaises(b_exception.BrocadeZoningHttpException, self.get_parsed_data, switch_page_resp, invalid_delimiter, valid_delimiter2) def test_get_nvp_value(self): valid_keyname = zone_constant.FIRMWARE_VERSION invalid_keyname = "swFWVersion1" self.assertEqual( "v7.3.0b_rc1_bld06", self.get_nvp_value(parsed_value, valid_keyname)) self.assertRaises(b_exception.BrocadeZoningHttpException, self.get_nvp_value, parsed_value, invalid_keyname) def test_get_managable_vf_list(self): manageable_list = ['2', '128'] self.assertEqual( manageable_list, self.get_managable_vf_list(session_info_vf)) self.assertRaises(b_exception.BrocadeZoningHttpException, self.get_managable_vf_list, session_info_AD) @mock.patch.object(client.BrcdHTTPFCZoneClient, 'is_vf_enabled') def test_check_change_vf_context_vf_enabled(self, is_vf_enabled_mock): is_vf_enabled_mock.return_value = (True, session_info_vf) self.vfid = None self.assertRaises( b_exception.BrocadeZoningHttpException, self.check_change_vf_context) self.vfid = "2" with mock.patch.object(self, 'change_vf_context') \ as change_vf_context_mock: self.check_change_vf_context() change_vf_context_mock.assert_called_once_with( self.vfid, session_info_vf) @mock.patch.object(client.BrcdHTTPFCZoneClient, 'is_vf_enabled') def test_check_change_vf_context_vf_disabled(self, is_vf_enabled_mock): is_vf_enabled_mock.return_value = (False, session_info_AD) self.vfid = "128" self.assertRaises( b_exception.BrocadeZoningHttpException, self.check_change_vf_context) @mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list') @mock.patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_change_vf_context_valid(self, connect_mock, get_managable_vf_list_mock): get_managable_vf_list_mock.return_value = ['2', '128'] connect_mock.return_value = session_info_vf self.assertIsNone(self.change_vf_context("2", session_info_vf)) data = zone_constant.CHANGE_VF.format(vfid="2") headers = {zone_constant.AUTH_HEADER: self.auth_header} connect_mock.assert_called_once_with( zone_constant.POST_METHOD, zone_constant.SESSION_PAGE, data, headers) @mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list') @mock.patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_change_vf_context_vf_not_changed(self, connect_mock, get_managable_vf_list_mock): get_managable_vf_list_mock.return_value = ['2', '128'] connect_mock.return_value = session_info_vf_not_changed self.assertRaises(b_exception.BrocadeZoningHttpException, self.change_vf_context, "2", session_info_vf) data = zone_constant.CHANGE_VF.format(vfid="2") headers = {zone_constant.AUTH_HEADER: self.auth_header} connect_mock.assert_called_once_with( zone_constant.POST_METHOD, zone_constant.SESSION_PAGE, data, headers) @mock.patch.object(client.BrcdHTTPFCZoneClient, 'get_managable_vf_list') def test_change_vf_context_vfid_not_managaed(self, get_managable_vf_list_mock): get_managable_vf_list_mock.return_value = ['2', '128'] self.assertRaises(b_exception.BrocadeZoningHttpException, self.change_vf_context, "12", session_info_vf) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_is_supported_firmware(self, connect_mock): connect_mock.return_value = switch_page_resp self.assertTrue(self.is_supported_firmware()) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_is_supported_firmware_invalid(self, connect_mock): connect_mock.return_value = switch_page_invalid_firm self.assertFalse(self.is_supported_firmware()) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_get_active_zone_set(self, connect_mock): connect_mock.return_value = zone_info returned_zone_map = self.get_active_zone_set() self.assertDictEqual(active_zone_set, returned_zone_map) def test_form_zone_string(self): new_alias = { 'alia1': u'10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12'} new_qlps = {'qlp': u'10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c'} new_ifas = {'fa1': u'20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c'} self.assertEqual(type(self.form_zone_string( cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, True)), bytes) self.assertEqual( encodeutils.safe_encode(mocked_zone_string), self.form_zone_string( cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, True)) self.assertEqual( encodeutils.safe_encode(mocked_zone_string_no_activate), self.form_zone_string( cfgs, active_cfg, zones, new_alias, new_qlps, new_ifas, False)) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_add_zones_activate(self, post_zone_data_mock): post_zone_data_mock.return_value = ("0", "") self.cfgs = cfgs.copy() self.zones = zones.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg add_zones_info = {valid_zone_name: ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } self.add_zones(add_zones_info, True) post_zone_data_mock.assert_called_once_with( encodeutils.safe_encode(zone_string_to_post)) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_add_zones_invalid_zone_name(self, post_zone_data_mock): post_zone_data_mock.return_value = ("-1", "Name Too Long") self.cfgs = cfgs.copy() self.zones = zones.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg invalid_zone_name = valid_zone_name + "00000000000000000000000000" add_zones_info = {invalid_zone_name: ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } self.assertRaises( b_exception.BrocadeZoningHttpException, self.add_zones, add_zones_info, False) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_add_zones_no_activate(self, post_zone_data_mock): post_zone_data_mock.return_value = ("0", "") self.cfgs = cfgs.copy() self.zones = zones.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg add_zones_info = {valid_zone_name: ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } self.add_zones(add_zones_info, False) post_zone_data_mock.assert_called_once_with( encodeutils.safe_encode(zone_string_to_post_no_activate)) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_delete_zones_activate(self, post_zone_data_mock): post_zone_data_mock.return_value = ("0", "") self.cfgs = cfgs_to_delete.copy() self.zones = zones_to_delete.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg delete_zones_info = valid_zone_name self.delete_zones(delete_zones_info, True) post_zone_data_mock.assert_called_once_with( encodeutils.safe_encode(zone_string_del_to_post)) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_delete_zones_no_activate(self, post_zone_data_mock): post_zone_data_mock.return_value = ("0", "") self.cfgs = cfgs_to_delete.copy() self.zones = zones_to_delete.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg delete_zones_info = valid_zone_name self.delete_zones(delete_zones_info, False) post_zone_data_mock.assert_called_once_with( encodeutils.safe_encode(zone_string_del_to_post_no_active)) @patch.object(client.BrcdHTTPFCZoneClient, 'post_zone_data') def test_delete_zones_invalid_zone_name(self, post_zone_data_mock): post_zone_data_mock.return_value = ("0", "") self.cfgs = cfgs_to_delete.copy() self.zones = zones_to_delete.copy() self.alias = alias.copy() self.qlps = qlps.copy() self.ifas = ifas.copy() self.active_cfg = active_cfg delete_zones_info = 'openstack50060b0000c26604201900051ee8e32' self.assertRaises(b_exception.BrocadeZoningHttpException, self.delete_zones, delete_zones_info, False) @patch.object(time, 'sleep') @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_post_zone_data(self, connect_mock, sleep_mock): connect_mock.return_value = zone_post_page self.assertEqual( ("-1", "Name too long"), self.post_zone_data(zone_string_to_post)) connect_mock.return_value = zone_post_page_no_error self.assertEqual(("0", ""), self.post_zone_data(zone_string_to_post)) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_get_nameserver_info(self, connect_mock): connect_mock.return_value = nameserver_info self.assertEqual(ns_info, self.get_nameserver_info()) @patch.object(client.BrcdHTTPFCZoneClient, 'get_session_info') def test_is_vf_enabled(self, get_session_info_mock): get_session_info_mock.return_value = session_info_vf self.assertEqual((True, parsed_session_info_vf), self.is_vf_enabled()) def test_delete_zones_cfgs(self): cfgs = {'openstack_cfg': 'zone1;zone2'} zones = {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'} delete_zones_info = valid_zone_name self.assertEqual( (zones, cfgs, active_cfg), self.delete_zones_cfgs( cfgs_to_delete.copy(), zones_to_delete.copy(), delete_zones_info, active_cfg)) cfgs = {'openstack_cfg': 'openstack50060b0000c26604201900051ee8e329'} res = self.delete_zones_cfgs(cfgs, zones_to_delete.copy(), delete_zones_info, active_cfg) self.assertEqual((zones, {}, ''), res) cfgs = {'openstack_cfg': 'zone2'} zones = {'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'} delete_zones_info = valid_zone_name + ";zone1" self.assertEqual( (zones, cfgs, active_cfg), self.delete_zones_cfgs( cfgs_to_delete.copy(), zones_to_delete.copy(), delete_zones_info, active_cfg)) def test_add_zones_cfgs(self): add_zones_info = {valid_zone_name: ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } updated_cfgs = { 'openstack_cfg': valid_zone_name + ';zone1;zone2'} updated_zones = { 'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', valid_zone_name: '50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29'} self.assertEqual((updated_zones, updated_cfgs, active_cfg), self.add_zones_cfgs( cfgs.copy(), zones.copy(), add_zones_info, active_cfg, "openstack_cfg")) add_zones_info = {valid_zone_name: ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'], 'test4': ['20:06:0b:00:00:b2:66:07', '20:10:00:05:1e:b8:c3:19'] } updated_cfgs = { 'openstack_cfg': 'test4;openstack50060b0000c26604201900051ee8e329;zone1;zone2'} updated_zones = { 'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11', valid_zone_name: '50:06:0b:00:00:c2:66:04;20:19:00:05:1e:e8:e3:29', 'test4': '20:06:0b:00:00:b2:66:07;20:10:00:05:1e:b8:c3:19'} result = self.add_zones_cfgs(cfgs.copy(), zones.copy(), add_zones_info, active_cfg, "openstack_cfg") self.assertEqual(updated_zones, result[0]) self.assertEqual(active_cfg, result[2]) result_cfg = result[1]['openstack_cfg'] self.assertIn('test4', result_cfg) self.assertIn('openstack50060b0000c26604201900051ee8e329', result_cfg) self.assertIn('zone1', result_cfg) self.assertIn('zone2', result_cfg) @patch.object(client.BrcdHTTPFCZoneClient, 'connect') def test_get_zone_info(self, connect_mock): connect_mock.return_value = zone_info self.get_zone_info() self.assertEqual({'openstack_cfg': 'zone1;zone2'}, self.cfgs) self.assertEqual( {'zone1': '20:01:00:05:33:0e:96:15;20:00:00:05:33:0e:93:11', 'zone2': '20:01:00:05:33:0e:96:14;20:00:00:05:33:0e:93:11'}, self.zones) self.assertEqual('openstack_cfg', self.active_cfg) self.assertEqual( {'alia1': '10:00:00:05:1e:7c:64:96;10:21:10:05:33:0e:96:12'}, self.alias) self.assertEqual( {'fa1': '20:15:f4:ce:96:ae:68:6c;20:11:f4:ce:46:ae:68:6c'}, self.ifas) self.assertEqual( {'qlp': '10:11:f4:ce:46:ae:68:6c;20:11:f4:ce:46:ae:68:6c'}, self.qlps) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_brcd_lookup_service.py0000664000175000017500000000673200000000000026426 0ustar00zuulzuul00000000000000# (c) Copyright 2013 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for fc san lookup service.""" from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.zonemanager import fc_san_lookup_service as san_service _target_ns_map = {'100000051e55a100': ['20240002ac000a50']} _initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} _device_map_to_verify = { '100000051e55a100': { 'initiator_port_wwn_list': [ '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} _fabric_wwn = '100000051e55a100' class TestFCSanLookupService(san_service.FCSanLookupService, test.TestCase): def setUp(self): super(TestFCSanLookupService, self).setUp() self.configuration = self.setup_config() # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) def setup_config(self): configuration = conf.Configuration(None) # fill up config configuration.fc_san_lookup_service = ( 'cinder.tests.unit.zonemanager.test_brcd_lookup_service.' 'FakeBrcdFCSanLookupService') return configuration def test_get_device_mapping_from_network(self): GlobalParams._is_normal_test = True initiator_list = ['10008c7cff523b01'] target_list = ['20240002ac000a50', '20240002ac000a40'] device_map = self.get_device_mapping_from_network( initiator_list, target_list) self.assertDictEqual(_device_map_to_verify, device_map) def test_get_device_mapping_from_network_for_invalid_config(self): GlobalParams._is_normal_test = False initiator_list = ['10008c7cff523b01'] target_list = ['20240002ac000a50', '20240002ac000a40'] self.assertRaises(exception.FCSanLookupServiceException, self.get_device_mapping_from_network, initiator_list, target_list) class FakeBrcdFCSanLookupService(object): def __init__(self, **kwargs): pass def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): if not GlobalParams._is_normal_test: raise exception.FCSanLookupServiceException("Error") device_map = {} initiators = [] targets = [] for i in initiator_wwn_list: if (i in _initiator_ns_map[_fabric_wwn]): initiators.append(i) for t in target_wwn_list: if (t in _target_ns_map[_fabric_wwn]): targets.append(t) device_map[_fabric_wwn] = { 'initiator_port_wwn_list': initiators, 'target_port_wwn_list': targets} return device_map class GlobalParams(object): global _is_normal_test _is_normal_test = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_brcd_rest_fc_zone_client.py0000664000175000017500000003175300000000000027414 0ustar00zuulzuul00000000000000# Copyright 2020 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from cinder.tests.unit import test from cinder.zonemanager.drivers.brocade import brcd_rest_fc_zone_client as \ rest_client from cinder.zonemanager.drivers.brocade import fc_zone_constants @ddt.ddt class TestBrcdRestClient(test.TestCase): def setUp(self): super(TestBrcdRestClient, self).setUp() self.client = self._get_client() @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_login', mock.Mock()) def _get_client(self, ip=mock.sentinel.ipaddress, user=mock.sentinel.username, password=mock.sentinel.password, port=mock.sentinel.port, vfid=mock.sentinel.vfid, protocol=mock.sentinel.protocol): return rest_client.BrcdRestFCZoneClient(ip, user, password, port, vfid, protocol) @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_login') def test_init(self, login_mock): res = rest_client.BrcdRestFCZoneClient(mock.sentinel.ipaddress, mock.sentinel.username, mock.sentinel.password, mock.sentinel.port, mock.sentinel.vfid, mock.sentinel.protocol) self.assertEqual(mock.sentinel.ipaddress, res.sw_ip) self.assertEqual(mock.sentinel.username, res.sw_user) self.assertEqual(mock.sentinel.password, res.sw_pwd) self.assertEqual(mock.sentinel.vfid, res.vfid) self.assertEqual(mock.sentinel.protocol, res.protocol) # Port parameter is not used by the class self.assertEqual('', res.status_code) self.assertIsNone(res.session) login_mock.assert_called_once_with() @ddt.data((False, '7.4.0'), (False, '8.2.0'), (True, '8.2.1'), (True, '9.0.0')) @ddt.unpack @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_get_firmware_version') def test_is_supported_firmware(self, expected, version, mock_get_fw): mock_get_fw.return_value = version res = self.client.is_supported_firmware() self.assertIs(expected, res) mock_get_fw.assert_called_once_with() @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_get_effective_zone_set') def test_get_active_zone_set(self, get_effective_mock): get_effective_mock.return_value = (mock.sentinel.active_zone_set, mock.sentinel.checksum) res = self.client.get_active_zone_set() self.assertEqual(mock.sentinel.active_zone_set, res) get_effective_mock.assert_called_once_with() @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_get_name_server') def test_get_nameserver_info(self, get_ns_mock): res = self.client.get_nameserver_info() self.assertEqual(get_ns_mock.return_value, res) get_ns_mock.assert_called_once_with() @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_add_zones') def test_add_zones(self, add_mock): self.client.add_zones(mock.sentinel.add_zone_map, mock.sentinel.activate, mock.sentinel.active_zone_set__not_used) add_mock.assert_called_once_with(mock.sentinel.add_zone_map, mock.sentinel.activate) @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_update_zones') def test_update_zones(self, update_mock): self.client.update_zones(mock.sentinel.zone_map, mock.sentinel.activate, mock.sentinel.operation, mock.sentinel.active_zone_set__not_used) update_mock.assert_called_once_with(mock.sentinel.zone_map, mock.sentinel.activate, mock.sentinel.operation) @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_delete_zones') def test_delete_zones(self, delete_mock): self.client.delete_zones(mock.sentinel.zone_names, mock.sentinel.activate, mock.sentinel.active_zone_set__not_used) delete_mock.assert_called_once_with(mock.sentinel.zone_names, mock.sentinel.activate) @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_logout') def test_cleanup(self, logout_mock): self.client.cleanup() logout_mock.assert_called_once_with() @ddt.data(200, 400) @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_build_url') @mock.patch.object(rest_client, 'requests') def test__login(self, status_code, requests_mock, url_mock): session_mock = requests_mock.Session post_mock = session_mock.return_value.post post_mock.return_value.status_code = status_code post_mock.return_value.headers = {'Authorization': mock.sentinel.auth} adapter_mock = requests_mock.adapters.HTTPAdapter adapter_mock.return_value = 'adapter' client = self._get_client(protocol=fc_zone_constants.REST_HTTPS, user='username', password='password') expected_headers = {'User-Agent': 'OpenStack Zone Driver', 'Accept': 'application/yang-data+json', 'Content-Type': 'application/yang-data+json', 'Authorization': mock.sentinel.auth} try: res = client._login() self.assertEqual(200, res) except rest_client.exception.BrocadeZoningRestException: self.assertNotEqual(200, status_code) expected_headers['Authorization'] = ('Basic ' 'dXNlcm5hbWU6cGFzc3dvcmQ=') del expected_headers['Content-Type'] self.assertEqual(fc_zone_constants.HTTPS, client.protocol) session_mock.assert_called_once_with() self.assertEqual(requests_mock.Session.return_value, client.session) adapter_mock.assert_called_once_with(pool_connections=1, pool_maxsize=1) session_mock.return_value.mount.assert_called_once_with('https://', 'adapter') url_mock.assert_called_once_with('/rest/login') post_mock.assert_called_once_with(url_mock.return_value) self.assertEqual(expected_headers, session_mock.return_value.headers) @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_build_url') def test_logout(self, url_mock): session = mock.Mock() session.post.return_value.status_code = 204 self.client.session = session self.client._logout() url_mock.assert_called_once_with('/rest/logout') session.post.assert_called_once_with(url_mock.return_value) @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_build_url') def test_logout_fail(self, url_mock): session = mock.Mock() session.post.return_value.status_code = 400 self.client.session = session self.assertRaises(rest_client.exception.BrocadeZoningRestException, self.client._logout) url_mock.assert_called_once_with('/rest/logout') session.post.assert_called_once_with(url_mock.return_value) @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_build_url') def test_get_firmware_version(self, url_mock): session = mock.Mock() session.get.return_value.status_code = 200 session.get.return_value.json.return_value = { 'Response': { 'fibrechannel-switch': { 'firmware-version': mock.sentinel.fw_version}}} self.client.session = session res = self.client._get_firmware_version() self.assertEqual(mock.sentinel.fw_version, res) url_mock.assert_called_once_with( '/rest/running/switch/fibrechannel-switch') session.get.assert_called_once_with(url_mock.return_value) session.get.return_value.json.assert_called_once_with() @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_build_url') def test_get_firmware_version_fail(self, url_mock): session = mock.Mock() session.get.return_value.status_code = 400 self.client.session = session self.assertRaises(rest_client.exception.BrocadeZoningRestException, self.client._get_firmware_version) url_mock.assert_called_once_with( '/rest/running/switch/fibrechannel-switch') session.get.assert_called_once_with(url_mock.return_value) session.get.return_value.json.assert_not_called() @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_build_url') def test__get_name_server(self, url_mock): session = mock.Mock() session.get.return_value.status_code = 200 session.get.return_value.json.return_value = { 'Response': { 'fibrechannel-name-server': [ {'port-name': mock.sentinel.port1}, {'port-name': mock.sentinel.port2}]}} self.client.session = session res = self.client._get_name_server() self.assertEqual([mock.sentinel.port1, mock.sentinel.port2], res) url_mock.assert_called_once_with( '/rest/running/brocade-name-server/fibrechannel-name-server') session.get.assert_called_once_with(url_mock.return_value) session.get.return_value.json.assert_called_once_with() @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_build_url') def test__get_name_server_fail(self, url_mock): session = mock.Mock() session.get.return_value.status_code = 400 self.client.session = session self.assertRaises(rest_client.exception.BrocadeZoningRestException, self.client._get_name_server) url_mock.assert_called_once_with( '/rest/running/brocade-name-server/fibrechannel-name-server') session.get.assert_called_once_with(url_mock.return_value) session.get.return_value.json.assert_not_called() @ddt.data(([{'zone-name': 'zone1', 'member-entry': {'entry-name': 'entry1'}}, {'zone-name': 'zone2', 'member-entry': {'entry-name': 'entry2'}}], {'zone1': 'entry1', 'zone2': 'entry2'}), ({'zone-name': 'zone1', 'member-entry': {'entry-name': 'entry1'}}, {'zone1': 'entry1'}), ({}, {})) @ddt.unpack @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_build_url') def test_get_effective_zone_set(self, enabled_zone, expected_zones, url_mock): session = mock.Mock() session.get.return_value.status_code = 200 session.get.return_value.json.return_value = { 'Response': { 'effective-configuration': { 'checksum': mock.sentinel.checksum, 'cfg-name': 'my-cfg', 'enabled-zone': enabled_zone}}} self.client.session = session res = self.client._get_effective_zone_set() expected = ({'active_zone_config': 'my-cfg' if expected_zones else '', 'zones': expected_zones}, mock.sentinel.checksum) self.assertEqual(expected, res) url_mock.assert_called_once_with( '/rest/running/zoning/effective-configuration') session.get.assert_called_once_with(url_mock.return_value) session.get.return_value.json.assert_called_once_with() @mock.patch.object(rest_client.BrcdRestFCZoneClient, '_build_url') def test_get_effective_zone_set_fail(self, url_mock): session = mock.Mock() session.get.return_value.status_code = 400 self.client.session = session self.assertRaises(rest_client.exception.BrocadeZoningRestException, self.client._get_effective_zone_set) url_mock.assert_called_once_with( '/rest/running/zoning/effective-configuration') session.get.assert_called_once_with(url_mock.return_value) session.get.return_value.json.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_cisco_fc_san_lookup_service.py0000664000175000017500000001470100000000000030120 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Cisco fc san lookup service.""" from unittest import mock from oslo_config import cfg from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf import cinder.zonemanager.drivers.cisco.cisco_fc_san_lookup_service \ as cisco_lookup import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant from cinder.zonemanager import utils as zm_utils nsshow = '20:1a:00:05:1e:e8:e3:29' switch_data = ['VSAN 304\n', '------------------------------------------------------\n', 'FCID TYPE PWWN (VENDOR) \n', '------------------------------------------------------\n', '0x030001 N 20:1a:00:05:1e:e8:e3:29 (Cisco) ipfc\n', '0x030101 NL 10:00:00:00:77:99:60:2c (Interphase)\n', '0x030200 N 10:00:00:49:c9:28:c7:01\n'] nsshow_data = ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'] _device_map_to_verify = { 'CISCO_FAB_2': { 'initiator_port_wwn_list': ['10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} class TestCiscoFCSanLookupService(cisco_lookup.CiscoFCSanLookupService, test.TestCase): def setUp(self): super(TestCiscoFCSanLookupService, self).setUp() self.configuration = conf.Configuration(None) self.configuration.set_default('fc_fabric_names', 'CISCO_FAB_2', 'fc-zone-manager') self.configuration.fc_fabric_names = 'CISCO_FAB_2' self.create_configuration() self.fabric_vsan = '304' # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) @mock.patch.object(cisco_lookup.CiscoFCSanLookupService, '_get_switch_info') def test_get_nameserver_info_exception_handling(self, mock_get_switch_info): mock_get_switch_info.side_effect = \ exception.FCSanLookupServiceException(reason='some reason') self.assertRaises( exception.FCSanLookupServiceException, self.get_nameserver_info, self.fabric_vsan ) def create_configuration(self): fc_fabric_opts = [] fc_fabric_opts.append(cfg.StrOpt('cisco_fc_fabric_address', default='172.24.173.142', help='')) fc_fabric_opts.append(cfg.StrOpt('cisco_fc_fabric_user', default='admin', help='')) fc_fabric_opts.append(cfg.StrOpt('cisco_fc_fabric_password', default='admin1234', help='', secret=True)) fc_fabric_opts.append(cfg.PortOpt('cisco_fc_fabric_port', default=22, help='')) fc_fabric_opts.append(cfg.StrOpt('cisco_zoning_vsan', default='304', help='')) config = conf.Configuration(fc_fabric_opts, 'CISCO_FAB_2') self.fabric_configs = {'CISCO_FAB_2': config} @mock.patch.object(cisco_lookup.CiscoFCSanLookupService, 'get_nameserver_info') def test_get_device_mapping_from_network(self, get_nameserver_info_mock): initiator_list = ['10008c7cff523b01'] target_list = ['20240002ac000a50', '20240002ac000a40'] get_nameserver_info_mock.return_value = (nsshow_data) device_map = self.get_device_mapping_from_network( initiator_list, target_list) self.assertDictEqual(_device_map_to_verify, device_map) @mock.patch.object(cisco_lookup.CiscoFCSanLookupService, '_get_switch_info') def test_get_nameserver_info(self, get_switch_data_mock): ns_info_list = [] ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29', '10:00:00:49:c9:28:c7:01'] get_switch_data_mock.return_value = (switch_data) ns_info_list = self.get_nameserver_info('304') self.assertEqual(ns_info_list_expected, ns_info_list) def test_parse_ns_output(self): invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29'] return_wwn_list = [] expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29', '10:00:00:49:c9:28:c7:01'] return_wwn_list = self._parse_ns_output(switch_data) self.assertEqual(expected_wwn_list, return_wwn_list) self.assertRaises(exception.InvalidParameterValue, self._parse_ns_output, invalid_switch_data) def test_get_formatted_wwn(self): wwn_list = ['10008c7cff523b01'] return_wwn_list = [] expected_wwn_list = ['10:00:8c:7c:ff:52:3b:01'] return_wwn_list.append(zm_utils.get_formatted_wwn(wwn_list[0])) self.assertEqual(expected_wwn_list, return_wwn_list) @mock.patch.object(cisco_lookup.CiscoFCSanLookupService, '_run_ssh') def test__get_switch_info(self, run_ssh_mock): cmd_list = [ZoneConstant.FCNS_SHOW, self.fabric_vsan, ' | no-more'] nsshow_list = [nsshow] run_ssh_mock.return_value = (Stream(nsshow), Stream()) switch_data = self._get_switch_info(cmd_list) self.assertEqual(nsshow_list, switch_data) run_ssh_mock.assert_called_once_with(cmd_list, True, 1) class Channel(object): def recv_exit_status(self): return 0 class Stream(object): def __init__(self, buffer=''): self.buffer = buffer self.channel = Channel() def readlines(self): return self.buffer def splitlines(self): return self.buffer.splitlines() def close(self): pass def flush(self): self.buffer = '' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_cisco_fc_zone_client_cli.py0000664000175000017500000003274000000000000027371 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Cisco fc zone client cli.""" import time from unittest import mock from oslo_concurrency import processutils from cinder.tests.unit import test from cinder.zonemanager.drivers.cisco \ import cisco_fc_zone_client_cli as cli from cinder.zonemanager.drivers.cisco import exception as c_exception import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant nsshow = '20:1a:00:05:1e:e8:e3:29' switch_data = ['VSAN 303\n', '----------------------------------------------------------\n', 'FCID TYPE PWWN (VENDOR) FC4-TYPE:FEATURE\n', '----------------------------------------------------------\n', '0x030001 N 20:1a:00:05:1e:e8:e3:29 (Cisco) ipfc\n', '0x030101 NL 10:00:00:00:77:99:60:2c (Interphase)\n', '0x030200 NL 10:00:00:49:c9:28:c7:01\n'] cfgactv = ['zoneset name OpenStack_Cfg vsan 303\n', 'zone name openstack50060b0000c26604201900051ee8e329 vsan 303\n', 'pwwn 50:06:0b:00:00:c2:66:04\n', 'pwwn 20:19:00:05:1e:e8:e3:29\n'] active_zoneset = { 'zones': { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']}, 'active_zone_config': 'OpenStack_Cfg'} zoning_status_data_basic = [ 'VSAN: 303 default-zone: deny distribute: active only Interop: default\n', ' mode: basic merge-control: allow\n', ' session: none\n', ' hard-zoning: enabled broadcast: unsupported\n', ' smart-zoning: disabled\n', ' rscn-format: fabric-address\n', 'Default zone:\n', ' qos: none broadcast: unsupported ronly: unsupported\n', 'Full Zoning Database :\n', ' DB size: 220 bytes\n', ' Zonesets:2 Zones:2 Aliases: 0\n', 'Active Zoning Database :\n', ' DB size: 80 bytes\n', ' Name: test-zs-test Zonesets:1 Zones:1\n', 'Status:\n'] zoning_status_basic = {'mode': 'basic', 'session': 'none'} zoning_status_data_enhanced_nosess = [ 'VSAN: 303 default-zone: deny distribute: active only Interop: default\n', ' mode: enhanced merge-control: allow\n', ' session: none\n', ' hard-zoning: enabled broadcast: unsupported\n', ' smart-zoning: disabled\n', ' rscn-format: fabric-address\n', 'Default zone:\n', ' qos: none broadcast: unsupported ronly: unsupported\n', 'Full Zoning Database :\n', ' DB size: 220 bytes\n', ' Zonesets:2 Zones:2 Aliases: 0\n', 'Active Zoning Database :\n', ' DB size: 80 bytes\n', ' Name: test-zs-test Zonesets:1 Zones:1\n', 'Status:\n'] zoning_status_enhanced_nosess = {'mode': 'enhanced', 'session': 'none'} zoning_status_data_enhanced_sess = [ 'VSAN: 303 default-zone: deny distribute: active only Interop: default\n', ' mode: enhanced merge-control: allow\n', ' session: otherthannone\n', ' hard-zoning: enabled broadcast: unsupported\n', ' smart-zoning: disabled\n', ' rscn-format: fabric-address\n', 'Default zone:\n', ' qos: none broadcast: unsupported ronly: unsupported\n', 'Full Zoning Database :\n', ' DB size: 220 bytes\n', ' Zonesets:2 Zones:2 Aliases: 0\n', 'Active Zoning Database :\n', ' DB size: 80 bytes\n', ' Name: test-zs-test Zonesets:1 Zones:1\n', 'Status:\n'] zoning_status_enhanced_sess = {'mode': 'enhanced', 'session': 'otherthannone'} active_zoneset_multiple_zones = { 'zones': { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'], 'openstack10000012345678902001009876543210': ['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']}, 'active_zone_config': 'OpenStack_Cfg'} new_zone = {'openstack10000012345678902001009876543210': ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']} new_zones = {'openstack10000012345678902001009876543210': ['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'], 'openstack10000011111111112001001111111111': ['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']} zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329' class TestCiscoFCZoneClientCLI(cli.CiscoFCZoneClientCLI, test.TestCase): def setUp(self): super(TestCiscoFCZoneClientCLI, self).setUp() self.fabric_vsan = '303' # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') def test_get_active_zone_set(self, get_switch_info_mock): cmd_list = [ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan, ' | no-more'] get_switch_info_mock.return_value = cfgactv active_zoneset_returned = self.get_active_zone_set() get_switch_info_mock.assert_called_once_with(cmd_list) self.assertDictEqual(active_zoneset, active_zoneset_returned) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') def test_get_active_zone_set_ssh_error(self, run_ssh_mock): run_ssh_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(c_exception.CiscoZoningCliException, self.get_active_zone_set) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') def test_get_zoning_status_basic(self, get_zoning_status_mock): cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan] get_zoning_status_mock.return_value = zoning_status_data_basic zoning_status_returned = self.get_zoning_status() get_zoning_status_mock.assert_called_once_with(cmd_list) self.assertDictEqual(zoning_status_basic, zoning_status_returned) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') def test_get_zoning_status_enhanced_nosess(self, get_zoning_status_mock): cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan] get_zoning_status_mock.return_value =\ zoning_status_data_enhanced_nosess zoning_status_returned = self.get_zoning_status() get_zoning_status_mock.assert_called_once_with(cmd_list) self.assertDictEqual(zoning_status_enhanced_nosess, zoning_status_returned) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') def test_get_zoning_status_enhanced_sess(self, get_zoning_status_mock): cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan] get_zoning_status_mock.return_value = zoning_status_data_enhanced_sess zoning_status_returned = self.get_zoning_status() get_zoning_status_mock.assert_called_once_with(cmd_list) self.assertDictEqual(zoning_status_enhanced_sess, zoning_status_returned) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info') def test_get_nameserver_info(self, get_switch_info_mock): ns_info_list = [] ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29'] get_switch_info_mock.return_value = (switch_data) ns_info_list = self.get_nameserver_info() self.assertEqual(ns_info_list_expected, ns_info_list) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') def test_get_nameserver_info_ssh_error(self, run_ssh_mock): run_ssh_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(c_exception.CiscoZoningCliException, self.get_nameserver_info) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') def test__cfg_save(self, run_ssh_mock): cmd_list = ['copy', 'running-config', 'startup-config'] self._cfg_save() run_ssh_mock.assert_called_once_with(cmd_list, True) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') @mock.patch.object(time, 'sleep') def test__cfg_save_with_retry(self, mock_sleep, run_ssh_mock): cmd_list = ['copy', 'running-config', 'startup-config'] run_ssh_mock.side_effect = [ processutils.ProcessExecutionError, ('', None) ] self._cfg_save() self.assertEqual(2, run_ssh_mock.call_count) run_ssh_mock.assert_has_calls([ mock.call(cmd_list, True), mock.call(cmd_list, True) ]) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') @mock.patch.object(time, 'sleep') def test__cfg_save_with_error(self, mock_sleep, run_ssh_mock): cmd_list = ['copy', 'running-config', 'startup-config'] run_ssh_mock.side_effect = processutils.ProcessExecutionError self.assertRaises(processutils.ProcessExecutionError, self._cfg_save) expected_num_calls = 5 expected_calls = [] for i in range(expected_num_calls): expected_calls.append(mock.call(cmd_list, True)) self.assertEqual(expected_num_calls, run_ssh_mock.call_count) run_ssh_mock.assert_has_calls(expected_calls) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh') def test__get_switch_info(self, run_ssh_mock): cmd_list = [ZoneConstant.FCNS_SHOW, self.fabric_vsan] nsshow_list = [nsshow] run_ssh_mock.return_value = (Stream(nsshow), Stream()) switch_data = self._get_switch_info(cmd_list) self.assertEqual(nsshow_list, switch_data) run_ssh_mock.assert_called_once_with(cmd_list, True) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_ssh_execute') @mock.patch.object(cli.CiscoFCZoneClientCLI, '_cfg_save') def test__update_zones_add(self, cfg_save_mock, ssh_execute_mock): self.update_zones(new_zone, False, self.fabric_vsan, ZoneConstant.ZONE_ADD, active_zoneset_multiple_zones, zoning_status_basic) ssh_cmd = [['conf'], ['zoneset', 'name', 'OpenStack_Cfg', 'vsan', self.fabric_vsan], ['zone', 'name', 'openstack10000012345678902001009876543210'], ['member', 'pwwn', '10:00:00:12:34:56:78:90'], ['member', 'pwwn', '20:01:00:98:76:54:32:10'], ['end']] self.assertEqual(1, cfg_save_mock.call_count) ssh_execute_mock.assert_called_once_with(ssh_cmd, True, 1) @mock.patch.object(cli.CiscoFCZoneClientCLI, '_ssh_execute') @mock.patch.object(cli.CiscoFCZoneClientCLI, '_cfg_save') def test__update_zones_remove(self, cfg_save_mock, ssh_execute_mock): self.update_zones(new_zone, False, self.fabric_vsan, ZoneConstant.ZONE_REMOVE, active_zoneset_multiple_zones, zoning_status_basic) ssh_cmd = [['conf'], ['zoneset', 'name', 'OpenStack_Cfg', 'vsan', self.fabric_vsan], ['zone', 'name', 'openstack10000012345678902001009876543210'], ['no', 'member', 'pwwn', '10:00:00:12:34:56:78:90'], ['no', 'member', 'pwwn', '20:01:00:98:76:54:32:10'], ['end']] self.assertEqual(1, cfg_save_mock.call_count) ssh_execute_mock.assert_called_once_with(ssh_cmd, True, 1) def test__parse_ns_output(self): return_wwn_list = [] expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29'] return_wwn_list = self._parse_ns_output(switch_data) self.assertEqual(expected_wwn_list, return_wwn_list) class TestCiscoFCZoneClientCLISSH(test.TestCase): def setUp(self): super(TestCiscoFCZoneClientCLISSH, self).setUp() self.client = cli.CiscoFCZoneClientCLI(None, None, None, None, None) self.client.sshpool = mock.MagicMock() self.mock_ssh = self.client.sshpool.item().__enter__() @mock.patch('oslo_concurrency.processutils.ssh_execute') def test__run_ssh(self, mock_execute): mock_execute.return_value = 'ssh output' ret = self.client._run_ssh(['cat', 'foo']) self.assertEqual('ssh output', ret) mock_execute.assert_called_once_with(self.mock_ssh, 'cat foo', check_exit_code=True) @mock.patch('oslo_concurrency.processutils.ssh_execute') def test__run_ssh_with_error(self, mock_execute): mock_execute.side_effect = processutils.ProcessExecutionError() self.assertRaises(processutils.ProcessExecutionError, self.client._run_ssh, ['cat', 'foo']) class Channel(object): def recv_exit_status(self): return 0 class Stream(object): def __init__(self, buffer=''): self.buffer = buffer self.channel = Channel() def readlines(self): return self.buffer def splitlines(self): return self.buffer.splitlines() def close(self): pass def flush(self): self.buffer = '' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py0000664000175000017500000002275000000000000026557 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Cisco FC zone driver.""" from unittest import mock from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import importutils from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.zonemanager.drivers.cisco import cisco_fc_zone_driver as driver _active_cfg_before_add = {} _active_cfg_before_delete = { 'zones': { 'openstack10008c7cff523b0120240002ac000a50': ( ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'])}, 'active_zone_config': 'cfg1'} _active_cfg_default = { 'zones': { 'openstack10008c7cff523b0120240002ac000b90': ( ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'])}, 'active_zone_config': 'cfg1'} _activate = True _zone_name = 'openstack10008c7cff523b0120240002ac000a50' _target_ns_map = {'100000051e55a100': ['20240002ac000a50']} _initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} _zone_map_to_add = {'openstack10008c7cff523b0120240002ac000a50': ( ['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'])} _initiator_target_map = {'10008c7cff523b01': ['20240002ac000a50']} _device_map_to_verify = { '304': { 'initiator_port_wwn_list': [ '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} _fabric_wwn = '304' class CiscoFcZoneDriverBaseTest(object): def setup_config(self, is_normal, mode): fc_test_opts = [ cfg.StrOpt('fc_fabric_address_CISCO_FAB_1', default='10.24.48.213', help='FC Fabric names'), ] configuration = conf.Configuration(fc_test_opts) # fill up config configuration.zoning_mode = 'fabric' configuration.zone_driver = ('cinder.tests.unit.zonemanager.' 'test_cisco_fc_zone_driver.' 'FakeCiscoFCZoneDriver') configuration.cisco_sb_connector = ('cinder.tests.unit.zonemanager.' 'test_cisco_fc_zone_driver' '.FakeCiscoFCZoneClientCLI') configuration.zoning_policy = 'initiator-target' configuration.zone_activate = True configuration.zone_name_prefix = 'openstack' configuration.fc_san_lookup_service = ('cinder.tests.unit.zonemanager.' 'test_cisco_fc_zone_driver.' 'FakeCiscoFCSanLookupService') configuration.fc_fabric_names = 'CISCO_FAB_1' configuration.fc_fabric_address_CISCO_FAB_1 = '172.21.60.220' if (is_normal): configuration.fc_fabric_user_CISCO_FAB_1 = 'admin' else: configuration.fc_fabric_user_CISCO_FAB_1 = 'invaliduser' configuration.fc_fabric_password_CISCO_FAB_1 = 'admin1234' if (mode == 1): configuration.zoning_policy_CISCO_FAB_1 = 'initiator-target' elif (mode == 2): configuration.zoning_policy_CISCO_FAB_1 = 'initiator' else: configuration.zoning_policy_CISCO_FAB_1 = 'initiator-target' configuration.zone_activate_CISCO_FAB_1 = True configuration.zone_name_prefix_CISCO_FAB_1 = 'openstack' configuration.zoning_vsan_CISCO_FAB_1 = '304' return configuration class TestCiscoFcZoneDriver(CiscoFcZoneDriverBaseTest, test.TestCase): def setUp(self): global GlobalVars GlobalVars = GlobalVarsClass() super(TestCiscoFcZoneDriver, self).setUp() # setup config for normal flow self.setup_driver(self.setup_config(True, 1)) def setup_driver(self, config): self.driver = importutils.import_object( 'cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver' '.CiscoFCZoneDriver', configuration=config) def fake_get_active_zone_set(self, fabric_ip, fabric_user, fabric_pwd, zoning_vsan): return GlobalVars._active_cfg def fake_get_san_context(self, target_wwn_list): fabric_map = {} return fabric_map def test_delete_connection(self): GlobalVars._is_normal_test = True GlobalVars._active_cfg = _active_cfg_before_delete self.driver.delete_connection( 'CISCO_FAB_1', _initiator_target_map) self.assertNotIn(_zone_name, GlobalVars._zone_state) def test_delete_connection_for_initiator_mode(self): GlobalVars._is_normal_test = True GlobalVars._active_cfg = _active_cfg_before_delete self.setup_driver(self.setup_config(True, 2)) self.driver.delete_connection( 'CISCO_FAB_1', _initiator_target_map) self.assertNotIn(_zone_name, GlobalVars._zone_state) @mock.patch.object(driver.CiscoFCZoneDriver, 'get_zoning_status') @mock.patch.object(driver.CiscoFCZoneDriver, 'get_active_zone_set') def test_add_connection(self, get_active_zone_set_mock, get_zoning_status_mock): """Test normal flows.""" GlobalVars._is_normal_test = True GlobalVars._zone_state = [] self.setup_driver(self.setup_config(True, 1)) get_zoning_status_mock.return_value = {'mode': 'basis', 'session': 'none'} get_active_zone_set_mock.return_value = _active_cfg_default self.driver.add_connection('CISCO_FAB_1', _initiator_target_map) self.assertIn(_zone_name, GlobalVars._zone_state) @mock.patch.object(driver.CiscoFCZoneDriver, 'get_zoning_status') @mock.patch.object(driver.CiscoFCZoneDriver, 'get_active_zone_set') def test_add_connection_with_no_cfg(self, get_active_zone_set_mock, get_zoning_status_mock): """Test normal flows.""" GlobalVars._is_normal_test = True GlobalVars._zone_state = [] self.setup_driver(self.setup_config(True, 1)) get_zoning_status_mock.return_value = {'mode': 'basis', 'session': 'none'} get_active_zone_set_mock.return_value = {} self.driver.add_connection('CISCO_FAB_1', _initiator_target_map) self.assertIn(_zone_name, GlobalVars._zone_state) def test_add_connection_for_invalid_fabric(self): """Test abnormal flows.""" GlobalVars._is_normal_test = True GlobalVars._active_cfg = _active_cfg_before_add GlobalVars._is_normal_test = False self.setup_driver(self.setup_config(False, 1)) self.assertRaises(exception.FCZoneDriverException, self.driver.add_connection, 'CISCO_FAB_1', _initiator_target_map) def test_delete_connection_for_invalid_fabric(self): GlobalVars._active_cfg = _active_cfg_before_delete GlobalVars._is_normal_test = False self.setup_driver(self.setup_config(False, 1)) self.assertRaises(exception.FCZoneDriverException, self.driver.delete_connection, 'CISCO_FAB_1', _initiator_target_map) class FakeCiscoFCZoneClientCLI(object): def __init__(self, ipaddress, username, password, port, vsan): if not GlobalVars._is_normal_test: raise processutils.ProcessExecutionError( "Unable to connect to fabric") def get_active_zone_set(self): return GlobalVars._active_cfg def add_zones(self, zones, activate, fabric_vsan, active_zone_set, zone_status): GlobalVars._zone_state.extend(zones.keys()) def delete_zones(self, zone_names, isActivate): zone_list = zone_names.split(';') GlobalVars._zone_state = [ x for x in GlobalVars._zone_state if x not in zone_list] def get_nameserver_info(self): return _target_ns_map def get_zoning_status(self): return GlobalVars._zoning_status def close_connection(self): pass def cleanup(self): pass class FakeCiscoFCSanLookupService(object): def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): device_map = {} initiators = [] targets = [] for i in initiator_wwn_list: if (i in _initiator_ns_map[_fabric_wwn]): initiators.append(i) for t in target_wwn_list: if (t in _target_ns_map[_fabric_wwn]): targets.append(t) device_map[_fabric_wwn] = { 'initiator_port_wwn_list': initiators, 'target_port_wwn_list': targets} return device_map class GlobalVarsClass(object): def __init__(self): self. _active_cfg = {} self._zone_state = list() self._is_normal_test = True self._zoning_status = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_cisco_lookup_service.py0000664000175000017500000000703300000000000026607 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Cisco FC san lookup service.""" from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.zonemanager import fc_san_lookup_service as san_service _target_ns_map = {'100000051e55a100': ['20240002ac000a50']} _initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']} _device_map_to_verify = { '100000051e55a100': { 'initiator_port_wwn_list': [ '10008c7cff523b01'], 'target_port_wwn_list': ['20240002ac000a50']}} _fabric_wwn = '100000051e55a100' class TestFCSanLookupService(san_service.FCSanLookupService, test.TestCase): def setUp(self): super(TestFCSanLookupService, self).setUp() self.configuration = self.setup_config() # override some of the functions def __init__(self, *args, **kwargs): test.TestCase.__init__(self, *args, **kwargs) def setup_config(self): configuration = conf.Configuration(None) # fill up config configuration.fc_san_lookup_service = ('cinder.tests.unit.zonemanager' '.test_cisco_lookup_service' '.FakeCiscoFCSanLookupService') return configuration def test_get_device_mapping_from_network(self): GlobalParams._is_normal_test = True initiator_list = ['10008c7cff523b01'] target_list = ['20240002ac000a50', '20240002ac000a40'] device_map = self.get_device_mapping_from_network( initiator_list, target_list) self.assertDictEqual(_device_map_to_verify, device_map) def test_get_device_mapping_from_network_for_invalid_config(self): GlobalParams._is_normal_test = False initiator_list = ['10008c7cff523b01'] target_list = ['20240002ac000a50', '20240002ac000a40'] self.assertRaises(exception.FCSanLookupServiceException, self.get_device_mapping_from_network, initiator_list, target_list) class FakeCiscoFCSanLookupService(object): def __init__(self, **kwargs): pass def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): if not GlobalParams._is_normal_test: raise exception.FCSanLookupServiceException("Error") device_map = {} initiators = [] targets = [] for i in initiator_wwn_list: if (i in _initiator_ns_map[_fabric_wwn]): initiators.append(i) for t in target_wwn_list: if (t in _target_ns_map[_fabric_wwn]): targets.append(t) device_map[_fabric_wwn] = { 'initiator_port_wwn_list': initiators, 'target_port_wwn_list': targets} return device_map class GlobalParams(object): global _is_normal_test _is_normal_test = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_driverutils.py0000664000175000017500000001262300000000000024753 0ustar00zuulzuul00000000000000# (c) Copyright 2015 Brocade Communications Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for friendly zone name.""" import string import ddt from cinder.tests.unit import test from cinder.zonemanager.drivers import driver_utils TEST_CHAR_SET = string.ascii_letters + string.digits @ddt.ddt class TestDriverUtils(test.TestCase): @ddt.data('OSHost10010008c7cff523b01AMCEArray20240002ac000a50') def test_get_friendly_zone_name_valid_hostname_storagesystem(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", "OS_Host100", 'AMCE' '_Array', "openstack", TEST_CHAR_SET)) @ddt.data('openstack10008c7cff523b0120240002ac000a50') def test_get_friendly_zone_name_hostname_storagesystem_none(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", None, None, "openstack", TEST_CHAR_SET)) @ddt.data('openstack10008c7cff523b0120240002ac000a50') def test_get_friendly_zone_name_storagesystem_none(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", "OS_Host100", None, "openstack", TEST_CHAR_SET)) @ddt.data('openstack10008c7cff523b0120240002ac000a50') def test_get_friendly_zone_name_hostname_none(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", None, "AMCE_Array", "openstack", TEST_CHAR_SET)) @ddt.data('OSHost10010008c7cff523b01') def test_get_friendly_zone_name_initiator_mode(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator', "10:00:8c:7c:ff:52:3b:01", None, "OS_Host100", None, "openstack", TEST_CHAR_SET)) @ddt.data('openstack10008c7cff523b01') def test_get_friendly_zone_name_initiator_mode_hostname_none(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator', "10:00:8c:7c:ff:52:3b:01", None, None, None, "openstack", TEST_CHAR_SET)) @ddt.data('OSHost100XXXX10008c7cff523b01AMCEArrayYYYY20240002ac000a50') def test_get_friendly_zone_name_storagename_length_too_long(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", "OS_Host100XXXXXXXXXX", "AMCE_ArrayYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY" "YYYY", "openstack", TEST_CHAR_SET)) @ddt.data('OSHost100XXXX10008c7cff523b01AMCEArrayYYYY20240002ac000a50') def test_get_friendly_zone_name_max_length(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", "OS_Host100XXXXXXXXXX", "AMCE_ArrayYYYYYYYYYY", "openstack", TEST_CHAR_SET)) @ddt.data('OSHost100XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX10008c7cff523b01') def test_get_friendly_zone_name_initiator_mode_hostname_max_length(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator', "10:00:8c:7c:ff:52:3b:01", None, 'OS_Host100XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' 'XXXXX', None, "openstack", TEST_CHAR_SET)) @ddt.data('openstack110008c7cff523b0120240002ac000a50') def test_get_friendly_zone_name_invalid_characters(self, value): self.assertEqual(value, driver_utils.get_friendly_zone_name( 'initiator-target', "10:00:8c:7c:ff:52:3b:01", "20:24:00:02:ac:00:0a:50", None, "AMCE_Array", "open-stack*1_", TEST_CHAR_SET)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_fc_zone_manager.py0000664000175000017500000001366000000000000025516 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for FC Zone Manager.""" from unittest import mock from cinder import exception from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.zonemanager.drivers import fc_zone_driver from cinder.zonemanager import fc_zone_manager fabric_name = 'BRCD_FAB_3' init_target_map = {'10008c7cff523b01': ['20240002ac000a50']} conn_info = { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '20240002ac000a50', 'initiator_target_map': { '10008c7cff523b01': ['20240002ac000a50'] } } } fabric_map = {'BRCD_FAB_3': ['20240002ac000a50']} target_list = ['20240002ac000a50'] class TestFCZoneManager(test.TestCase): def __init__(self, *args, **kwargs): super(TestFCZoneManager, self).__init__(*args, **kwargs) def setup_fake_driver(self): config = conf.Configuration(None) config.fc_fabric_names = fabric_name def fake_build_driver(self): self.driver = mock.Mock(fc_zone_driver.FCZoneDriver) self.set_initialized(True) self.mock_object(fc_zone_manager.ZoneManager, '_build_driver', fake_build_driver) self.zm = fc_zone_manager.ZoneManager(configuration=config) self.configuration = conf.Configuration(None) self.configuration.fc_fabric_names = fabric_name def test_unsupported_driver_disabled(self): config = conf.Configuration(fc_zone_manager.zone_manager_opts, 'fc-zone-manager') config.fc_fabric_names = fabric_name config.enable_unsupported_driver = False def fake_import(self, *args, **kwargs): fake_driver = mock.Mock(fc_zone_driver.FCZoneDriver) fake_driver.supported = False return fake_driver self.patch('oslo_utils.importutils.import_object', fake_import) zm = fc_zone_manager.ZoneManager(configuration=config) self.assertFalse(zm.driver.supported) self.assertFalse(zm.initialized) def test_unsupported_driver_enabled(self): config = conf.Configuration(None) config.fc_fabric_names = fabric_name def fake_import(self, *args, **kwargs): fake_driver = mock.Mock(fc_zone_driver.FCZoneDriver) fake_driver.supported = False return fake_driver self.patch('oslo_utils.importutils.import_object', fake_import) with mock.patch( 'cinder.volume.configuration.Configuration') as mock_config: mock_config.return_value.zone_driver = 'test' mock_config.return_value.enable_unsupported_driver = True zm = fc_zone_manager.ZoneManager(configuration=config) self.assertFalse(zm.driver.supported) self.assertTrue(zm.initialized) @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) def test_add_connection(self, opt_mock): self.setup_fake_driver() with mock.patch.object(self.zm.driver, 'add_connection')\ as add_connection_mock: self.zm.driver.get_san_context.return_value = fabric_map self.zm.add_connection(conn_info) self.zm.driver.get_san_context.assert_called_once_with(target_list) add_connection_mock.assert_called_once_with(fabric_name, init_target_map, None, None) @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) def test_add_connection_error(self, opt_mock): self.setup_fake_driver() with mock.patch.object(self.zm.driver, 'add_connection')\ as add_connection_mock: add_connection_mock.side_effect = exception.FCZoneDriverException self.assertRaises(exception.ZoneManagerException, self.zm.add_connection, conn_info) @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) def test_delete_connection(self, opt_mock): self.setup_fake_driver() with mock.patch.object(self.zm.driver, 'delete_connection')\ as delete_connection_mock: self.zm.driver.get_san_context.return_value = fabric_map self.zm.delete_connection(conn_info) self.zm.driver.get_san_context.assert_called_once_with(target_list) delete_connection_mock.assert_called_once_with(fabric_name, init_target_map, None, None) @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) def test_delete_connection_error(self, opt_mock): self.setup_fake_driver() with mock.patch.object(self.zm.driver, 'delete_connection')\ as del_connection_mock: del_connection_mock.side_effect = exception.FCZoneDriverException self.assertRaises(exception.ZoneManagerException, self.zm.delete_connection, conn_info) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/tests/unit/zonemanager/test_volume_driver.py0000664000175000017500000001231000000000000025252 0ustar00zuulzuul00000000000000# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Unit tests for Volume Manager.""" from unittest import mock from cinder.tests import fake_driver from cinder.tests.unit import test from cinder.volume import configuration as conf from cinder.volume import volume_utils from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver from cinder.zonemanager import fc_zone_manager class TestVolumeDriver(test.TestCase): def setUp(self): super(TestVolumeDriver, self).setUp() self.driver = fake_driver.FakeFibreChannelDriver() self.mock_object(brcd_fc_zone_driver, 'BrcdFCZoneDriver') self.addCleanup(self._cleanup) def _cleanup(self): self.driver = None def __init__(self, *args, **kwargs): super(TestVolumeDriver, self).__init__(*args, **kwargs) @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) @mock.patch.object(volume_utils, 'require_driver_initialized') def test_initialize_connection_with_decorator(self, utils_mock, opt_mock): utils_mock.return_value = True with mock.patch.object(fc_zone_manager.ZoneManager, 'add_connection')\ as add_zone_mock: with mock.patch.object(conf.Configuration, 'safe_get')\ as mock_safe_get: mock_safe_get.return_value = 'fabric' conn_info = self.driver.initialize_connection(None, None) add_zone_mock.assert_called_once_with(conn_info) @mock.patch('cinder.zonemanager.utils.create_zone_manager') @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) @mock.patch.object(volume_utils, 'require_driver_initialized') def test_initialize_connection_with_decorator_and_empty_map( self, utils_mock, opt_mock, zm_create_mock): utils_mock.return_value = True with mock.patch.object(fc_zone_manager.ZoneManager, 'add_connection')\ as add_zone_mock: self.driver.initialize_connection_with_empty_map(None, None) zm_create_mock.assert_not_called() add_zone_mock.assert_not_called() @mock.patch.object(volume_utils, 'require_driver_initialized') def test_initialize_connection_no_decorator(self, utils_mock): utils_mock.return_value = True with mock.patch.object(fc_zone_manager.ZoneManager, 'add_connection')\ as add_zone_mock: with mock.patch.object(conf.Configuration, 'safe_get')\ as mock_safe_get: mock_safe_get.return_value = 'fabric' self.driver.no_zone_initialize_connection(None, None) add_zone_mock.assert_not_called() @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) @mock.patch.object(volume_utils, 'require_driver_initialized') def test_terminate_connection_with_decorator(self, utils_mock, opt_mock): utils_mock.return_value = True with mock.patch.object(fc_zone_manager.ZoneManager, 'delete_connection') as remove_zone_mock: with mock.patch.object(conf.Configuration, 'safe_get')\ as mock_safe_get: mock_safe_get.return_value = 'fabric' conn_info = self.driver.terminate_connection(None, None) remove_zone_mock.assert_called_once_with(conn_info) @mock.patch('cinder.zonemanager.utils.create_zone_manager') @mock.patch('oslo_config.cfg._is_opt_registered', return_value=False) @mock.patch.object(volume_utils, 'require_driver_initialized') def test_terminate_connection_with_decorator_and_empty_map( self, utils_mock, opt_mock, zm_create_mock): utils_mock.return_value = True with mock.patch.object(fc_zone_manager.ZoneManager, 'delete_connection') as remove_zone_mock: self.driver.terminate_connection_with_empty_map(None, None) zm_create_mock.assert_not_called() remove_zone_mock.assert_not_called() @mock.patch.object(volume_utils, 'require_driver_initialized') def test_terminate_connection_no_decorator(self, utils_mock): utils_mock.return_value = True with mock.patch.object(fc_zone_manager.ZoneManager, 'delete_connection') as remove_zone_mock: with mock.patch.object(conf.Configuration, 'safe_get')\ as mock_safe_get: mock_safe_get.return_value = 'fabric' self.driver.no_zone_terminate_connection(None, None) remove_zone_mock.assert_not_called() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3151205 cinder-27.0.0/cinder/transfer/0000775000175000017500000000000000000000000016157 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/transfer/__init__.py0000664000175000017500000000166000000000000020273 0ustar00zuulzuul00000000000000# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from cinder.transfer import ' elsewhere. from oslo_config import cfg from oslo_utils import importutils CONF = cfg.CONF API = importutils.import_class(CONF.transfer_api_class) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/transfer/api.py0000664000175000017500000003663500000000000017317 0ustar00zuulzuul00000000000000# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to transferring ownership of volumes. """ import hashlib import hmac import os from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from cinder.db import base from cinder import exception from cinder.i18n import _ from cinder.keymgr import transfer as key_transfer from cinder import objects from cinder.policies import volume_transfer as policy from cinder import quota from cinder import quota_utils from cinder.volume import api as volume_api from cinder.volume import volume_utils volume_transfer_opts = [ cfg.IntOpt('volume_transfer_salt_length', default=8, help='The number of characters in the salt.'), cfg.IntOpt('volume_transfer_key_length', default=16, help='The number of characters in the ' 'autogenerated auth key.'), ] CONF = cfg.CONF CONF.register_opts(volume_transfer_opts) LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS class API(base.Base): """API for interacting volume transfers.""" def __init__(self): self.volume_api = volume_api.API() super().__init__() def get(self, context, transfer_id): context.authorize(policy.GET_POLICY) rv = self.db.transfer_get(context, transfer_id) return dict(rv) def delete(self, context, transfer_id): """Make the RPC call to delete a volume transfer.""" transfer = self.db.transfer_get(context, transfer_id) volume_ref = objects.Volume.get_by_id(context, transfer.volume_id) context.authorize(policy.DELETE_POLICY, target_obj=volume_ref) volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.delete.start") if volume_ref['status'] != 'awaiting-transfer': LOG.error("Volume in unexpected state") if volume_ref.encryption_key_id is not None: key_transfer.transfer_delete(context, volume_ref, conf=CONF) self.db.transfer_destroy(context, transfer_id) volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.delete.end") def get_all(self, context, marker=None, limit=None, sort_keys=None, sort_dirs=None, filters=None, offset=None): filters = filters or {} context.authorize(policy.GET_ALL_POLICY) all_tenants = strutils.bool_from_string(filters.pop('all_tenants', 'false')) if context.is_admin and all_tenants: transfers = self.db.transfer_get_all(context, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) else: transfers = self.db.transfer_get_all_by_project( context, context.project_id, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) return transfers def _get_random_string(self, length): """Get a random hex string of the specified length.""" rndstr = "" # Note that the string returned by this function must contain only # characters that the recipient can enter on their keyboard. The # function ssh224().hexdigit() achieves this by generating a hash # which will only contain hexadecimal digits. while len(rndstr) < length: rndstr += hashlib.sha224(os.urandom(255)).hexdigest() return rndstr[0:length] def _get_crypt_hash(self, salt, auth_key): """Generate a random hash based on the salt and the auth key.""" if not isinstance(salt, (bytes, str)): salt = str(salt) if isinstance(salt, str): salt = salt.encode('utf-8') if not isinstance(auth_key, (bytes, str)): auth_key = str(auth_key) if isinstance(auth_key, str): auth_key = auth_key.encode('utf-8') return hmac.new(salt, auth_key, hashlib.sha1).hexdigest() def create(self, context, volume_id, display_name, no_snapshots=False, allow_encrypted=False): """Creates an entry in the transfers table.""" LOG.info("Generating transfer record for volume %s", volume_id) volume_ref = objects.Volume.get_by_id(context, volume_id) context.authorize(policy.CREATE_POLICY, target_obj=volume_ref) if volume_ref['status'] != "available": raise exception.InvalidVolume(reason=_("status must be available")) if volume_ref.encryption_key_id is not None: if not allow_encrypted: raise exception.InvalidVolume( reason=_("transferring encrypted volume is not supported")) if no_snapshots: raise exception.InvalidVolume( reason=_("transferring an encrypted volume without its " "snapshots is not supported")) if not no_snapshots: snapshots = self.db.snapshot_get_all_for_volume(context, volume_id) for snapshot in snapshots: if snapshot['status'] != "available": msg = _("snapshot: %s status must be " "available") % snapshot['id'] raise exception.InvalidSnapshot(reason=msg) volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.create.start") # The salt is just a short random string. salt = self._get_random_string(CONF.volume_transfer_salt_length) auth_key = self._get_random_string(CONF.volume_transfer_key_length) crypt_hash = self._get_crypt_hash(salt, auth_key) # TODO(ollie): Transfer expiry needs to be implemented. transfer_rec = {'volume_id': volume_id, 'display_name': display_name, 'salt': salt, 'crypt_hash': crypt_hash, 'expires_at': None, 'no_snapshots': no_snapshots, 'source_project_id': volume_ref['project_id']} try: transfer = self.db.transfer_create(context, transfer_rec) except Exception: LOG.error("Failed to create transfer record for %s", volume_id) raise if volume_ref.encryption_key_id is not None: try: key_transfer.transfer_create(context, volume_ref, conf=CONF) except Exception: LOG.error("Failed to transfer keys for %s", volume_id) self.db.transfer_destroy(context, transfer.id) raise volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.create.end") return {'id': transfer['id'], 'volume_id': transfer['volume_id'], 'display_name': transfer['display_name'], 'auth_key': auth_key, 'created_at': transfer['created_at'], 'no_snapshots': transfer['no_snapshots'], 'source_project_id': transfer['source_project_id'], 'destination_project_id': transfer['destination_project_id'], 'accepted': transfer['accepted']} def _handle_snapshot_quota(self, context, snapshots, volume_type_id, donor_id): snapshots_num = len(snapshots) volume_sizes = 0 if not CONF.no_snapshot_gb_quota: for snapshot in snapshots: volume_sizes += snapshot.volume_size try: reserve_opts = {'snapshots': snapshots_num, 'gigabytes': volume_sizes} QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id) reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: quota_utils.process_reserve_over_quota( context, e, resource='snapshots', size=volume_sizes) try: reserve_opts = {'snapshots': -snapshots_num, 'gigabytes': -volume_sizes} QUOTAS.add_volume_type_opts(context.elevated(), reserve_opts, volume_type_id) donor_reservations = QUOTAS.reserve(context, project_id=donor_id, **reserve_opts) except exception.OverQuota: donor_reservations = None LOG.exception("Failed to update volume providing snapshots quota:" " Over quota.") return reservations, donor_reservations def accept(self, context, transfer_id, auth_key): """Accept a volume that has been offered for transfer.""" # We must use an elevated context to see the volume that is still # owned by the donor. context.authorize(policy.ACCEPT_POLICY) transfer = self.db.transfer_get(context.elevated(), transfer_id) crypt_hash = self._get_crypt_hash(transfer['salt'], auth_key) if crypt_hash != transfer['crypt_hash']: msg = (_("Attempt to transfer %s with invalid auth key.") % transfer_id) LOG.error(msg) raise exception.InvalidAuthKey(reason=msg) volume_id = transfer['volume_id'] vol_ref = objects.Volume.get_by_id(context.elevated(), volume_id) if vol_ref['consistencygroup_id']: msg = _("Volume %s must not be part of a consistency " "group.") % vol_ref['id'] LOG.error(msg) raise exception.InvalidVolume(reason=msg) try: values = {'per_volume_gigabytes': vol_ref.size} QUOTAS.limit_check(context, project_id=context.project_id, **values) except exception.OverQuota as e: quotas = e.kwargs['quotas'] raise exception.VolumeSizeExceedsLimit( size=vol_ref.size, limit=quotas['per_volume_gigabytes']) try: reserve_opts = {'volumes': 1, 'gigabytes': vol_ref.size} QUOTAS.add_volume_type_opts(context, reserve_opts, vol_ref.volume_type_id) reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: quota_utils.process_reserve_over_quota(context, e, resource='volumes', size=vol_ref.size) try: donor_id = vol_ref['project_id'] reserve_opts = {'volumes': -1, 'gigabytes': -vol_ref.size} QUOTAS.add_volume_type_opts(context, reserve_opts, vol_ref.volume_type_id) donor_reservations = QUOTAS.reserve(context.elevated(), project_id=donor_id, **reserve_opts) except Exception: donor_reservations = None LOG.exception("Failed to update quota donating volume" " transfer id %s", transfer_id) snap_res = None snap_donor_res = None if transfer['no_snapshots'] is False: snapshots = objects.SnapshotList.get_all_for_volume( context.elevated(), volume_id) volume_type_id = vol_ref.volume_type_id snap_res, snap_donor_res = self._handle_snapshot_quota( context, snapshots, volume_type_id, vol_ref['project_id']) volume_utils.notify_about_volume_usage(context, vol_ref, "transfer.accept.start") encryption_key_transferred = False try: # Transfer ownership of the volume now, must use an elevated # context. self.volume_api.accept_transfer(context, vol_ref, context.user_id, context.project_id, transfer['no_snapshots']) if vol_ref.encryption_key_id is not None: key_transfer.transfer_accept(context, vol_ref, conf=CONF) encryption_key_transferred = True self.db.transfer_accept(context.elevated(), transfer_id, context.user_id, context.project_id, transfer['no_snapshots']) QUOTAS.commit(context, reservations) if snap_res: QUOTAS.commit(context, snap_res) if donor_reservations: QUOTAS.commit(context, donor_reservations, project_id=donor_id) if snap_donor_res: QUOTAS.commit(context, snap_donor_res, project_id=donor_id) LOG.info("Volume %s has been transferred.", volume_id) except Exception: # If an exception occurs after the encryption key was transferred # then we need to transfer the key *back* to the service project. # This is done by making another key transfer request. if encryption_key_transferred: key_transfer.transfer_create(context, vol_ref, conf=CONF) with excutils.save_and_reraise_exception(): QUOTAS.rollback(context, reservations) if snap_res: QUOTAS.rollback(context, snap_res) if donor_reservations: QUOTAS.rollback(context, donor_reservations, project_id=donor_id) if snap_donor_res: QUOTAS.rollback(context, snap_donor_res, project_id=donor_id) vol_ref = objects.Volume.get_by_id(context.elevated(), volume_id) volume_utils.notify_about_volume_usage(context, vol_ref, "transfer.accept.end") return {'id': transfer_id, 'display_name': transfer['display_name'], 'volume_id': vol_ref['id']} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/utils.py0000664000175000017500000011277000000000000016055 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions for all Cinder code. This file is for utilities useful in all of Cinder, including cinder-manage, the api service, the scheduler, etc. Code related to volume drivers and connecting to volumes should be placed in volume_utils instead. """ from collections import OrderedDict import contextlib import datetime import functools import inspect import logging as py_logging import math import multiprocessing import operator import os import pyclbr import re import shutil import stat import sys import tempfile import typing from typing import Callable, Iterable, Iterator from typing import Optional, Type, Union import eventlet from eventlet import tpool from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils import tenacity from cinder import coordination from cinder import exception from cinder.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" INITIAL_AUTO_MOSR = 20 INFINITE_UNKNOWN_VALUES = ('infinite', 'unknown') synchronized = lockutils.synchronized_with_prefix('cinder-') synchronized_remove = lockutils.remove_external_lock_file_with_prefix( 'cinder-') def clean_volume_file_locks(volume_id, driver): """Remove file locks used by Cinder. This doesn't take care of driver locks, those should be handled in driver's delete_volume method. """ for name in (volume_id + '-delete_volume', volume_id, volume_id + '-detach_volume'): try: synchronized_remove(name) except Exception as exc: LOG.warning('Failed to cleanup volume lock %(name)s: %(exc)s', {'name': name, 'exc': exc}) try: driver.clean_volume_file_locks(volume_id) except Exception as exc: LOG.warning('Failed to cleanup driver locks for volume %(id)s: ' '%(exc)s', {'id': volume_id, 'exc': exc}) def api_clean_volume_file_locks(volume_id): coordination.synchronized_remove('attachment_update-' + volume_id + '-*') def clean_snapshot_file_locks(snapshot_id, driver): try: name = snapshot_id + '-delete_snapshot' synchronized_remove(name) except Exception as exc: LOG.warning('Failed to cleanup snapshot lock %(name)s: %(exc)s', {'name': name, 'exc': exc}) try: driver.clean_snapshot_file_locks(snapshot_id) except Exception as exc: LOG.warning('Failed to cleanup driver locks for snapshot %(id)s: ' '%(exc)s', {'id': snapshot_id, 'exc': exc}) def as_int(obj: Union[int, float, str], quiet: bool = True) -> int: # Try "2" -> 2 try: return int(obj) except (ValueError, TypeError): pass # Try "2.5" -> 2 try: return int(float(obj)) except (ValueError, TypeError): pass # Eck, not sure what this is then. if not quiet: raise TypeError(_("Can not translate %s to integer.") % (obj)) obj = typing.cast(int, obj) return obj def check_exclusive_options( **kwargs: Optional[Union[dict, str, bool]]) -> None: """Checks that only one of the provided options is actually not-none. Iterates over all the kwargs passed in and checks that only one of said arguments is not-none, if more than one is not-none then an exception will be raised with the names of those arguments who were not-none. """ if not kwargs: return pretty_keys = kwargs.pop("pretty_keys", True) exclusive_options = {} for (k, v) in kwargs.items(): if v is not None: exclusive_options[k] = True if len(exclusive_options) > 1: # Change the format of the names from pythonic to # something that is more readable. # # Ex: 'the_key' -> 'the key' if pretty_keys: tnames = [k.replace('_', ' ') for k in kwargs] else: tnames = list(kwargs.keys()) names = ", ".join(sorted(tnames)) msg = (_("May specify only one of %s") % (names)) raise exception.InvalidInput(reason=msg) def execute(*cmd: str, **kwargs: Union[bool, str]) -> tuple[str, str]: """Convenience wrapper around oslo's execute() method.""" if 'run_as_root' in kwargs and 'root_helper' not in kwargs: kwargs['root_helper'] = get_root_helper() return processutils.execute(*cmd, **kwargs) def check_ssh_injection(cmd_list: list[str]) -> None: ssh_injection_pattern: tuple[str, ...] = ('`', '$', '|', '||', ';', '&', '&&', '>', '>>', '<') # Check whether injection attacks exist for arg in cmd_list: arg = arg.strip() # Check for matching quotes on the ends is_quoted = re.match('^(?P[\'"])(?P.*)(?P=quote)$', arg) if is_quoted: # Check for unescaped quotes within the quoted argument quoted = is_quoted.group('quoted') if quoted: if (re.match('[\'"]', quoted) or re.search('[^\\\\][\'"]', quoted)): raise exception.SSHInjectionThreat(command=cmd_list) else: # We only allow spaces within quoted arguments, and that # is the only special character allowed within quotes if len(arg.split()) > 1: raise exception.SSHInjectionThreat(command=cmd_list) # Second, check whether danger character in command. So the shell # special operator must be a single argument. for c in ssh_injection_pattern: if c not in arg: continue result = arg.find(c) if not result == -1: if result == 0 or not arg[result - 1] == '\\': raise exception.SSHInjectionThreat(command=cmd_list) def check_metadata_properties( metadata: Optional[dict[str, str]]) -> None: """Checks that the volume metadata properties are valid.""" if not metadata: metadata = {} if not isinstance(metadata, dict): msg = _("Metadata should be a dict.") raise exception.InvalidInput(msg) for k, v in metadata.items(): try: check_string_length(k, "Metadata key: %s" % k, min_length=1) check_string_length(v, "Value for metadata key: %s" % k) except exception.InvalidInput as exc: raise exception.InvalidVolumeMetadata(reason=exc) # for backward compatibility if len(k) > 255: msg = _("Metadata property key %s greater than 255 " "characters.") % k raise exception.InvalidVolumeMetadataSize(reason=msg) if len(v) > 255: msg = _("Metadata property key %s value greater than " "255 characters.") % k raise exception.InvalidVolumeMetadataSize(reason=msg) def last_completed_audit_period(unit: Optional[str] = None) -> \ tuple[Union[datetime.datetime, datetime.timedelta], Union[datetime.datetime, datetime.timedelta]]: """This method gives you the most recently *completed* audit period. arguments: units: string, one of 'hour', 'day', 'month', 'year' Periods normally begin at the beginning (UTC) of the period unit (So a 'day' period begins at midnight UTC, a 'month' unit on the 1st, a 'year' on Jan, 1) unit string may be appended with an optional offset like so: 'day@18' This will begin the period at 18:00 UTC. 'month@15' starts a monthly period on the 15th, and year@3 begins a yearly one on March 1st. returns: 2 tuple of datetimes (begin, end) The begin timestamp of this audit period is the same as the end of the previous. """ if not unit: unit = CONF.volume_usage_audit_period unit = typing.cast(str, unit) offset: Union[str, int] = 0 if '@' in unit: unit, offset = unit.split("@", 1) offset = int(offset) offset = typing.cast(int, offset) rightnow = timeutils.utcnow() if unit not in ('month', 'day', 'year', 'hour'): raise ValueError('Time period must be hour, day, month or year') if unit == 'month': if offset == 0: offset = 1 end = datetime.datetime(day=offset, month=rightnow.month, year=rightnow.year) if end >= rightnow: year = rightnow.year if 1 >= rightnow.month: year -= 1 month = 12 + (rightnow.month - 1) else: month = rightnow.month - 1 end = datetime.datetime(day=offset, month=month, year=year) year = end.year if 1 >= end.month: year -= 1 month = 12 + (end.month - 1) else: month = end.month - 1 begin = datetime.datetime(day=offset, month=month, year=year) elif unit == 'year': if offset == 0: offset = 1 end = datetime.datetime(day=1, month=offset, year=rightnow.year) if end >= rightnow: end = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 2) else: begin = datetime.datetime(day=1, month=offset, year=rightnow.year - 1) elif unit == 'day': end = datetime.datetime(hour=offset, day=rightnow.day, month=rightnow.month, year=rightnow.year) if end >= rightnow: end = end - datetime.timedelta(days=1) begin = end - datetime.timedelta(days=1) elif unit == 'hour': end = rightnow.replace(minute=offset, second=0, microsecond=0) if end >= rightnow: end = end - datetime.timedelta(hours=1) begin = end - datetime.timedelta(hours=1) return (begin, end) def monkey_patch() -> None: """Patches decorators for all functions in a specified module. If the CONF.monkey_patch set as True, this function patches a decorator for all functions in specified modules. You can set decorators for each modules using CONF.monkey_patch_modules. The format is "Module path:Decorator function". Example: 'cinder.api.ec2.cloud:' \ cinder.openstack.common.notifier.api.notify_decorator' Parameters of the decorator are as follows. (See cinder.openstack.common.notifier.api.notify_decorator) :param name: name of the function :param function: object of the function """ # If CONF.monkey_patch is not True, this function do nothing. if not CONF.monkey_patch: return # Get list of modules and decorators for module_and_decorator in CONF.monkey_patch_modules: module, decorator_name = module_and_decorator.split(':') # import decorator function decorator = importutils.import_class(decorator_name) __import__(module) # Retrieve module information using pyclbr module_data = pyclbr.readmodule_ex(module) for key in module_data.keys(): # set the decorator for the class methods if isinstance(module_data[key], pyclbr.Class): clz = importutils.import_class("%s.%s" % (module, key)) # On Python 3, unbound methods are regular functions predicate = inspect.isfunction for method, func in inspect.getmembers(clz, predicate): setattr( clz, method, decorator("%s.%s.%s" % (module, key, method), func)) # set the decorator for the function elif isinstance(module_data[key], pyclbr.Function): func = importutils.import_class("%s.%s" % (module, key)) setattr(sys.modules[module], key, decorator("%s.%s" % (module, key), func)) def make_dev_path(dev: str, partition: Optional[str] = None, base: str = '/dev') -> str: """Return a path to a particular device. >>> make_dev_path('xvdc') /dev/xvdc >>> make_dev_path('xvdc', 1) /dev/xvdc1 """ path = os.path.join(base, dev) if partition: path += str(partition) return path def robust_file_write(directory: str, filename: str, data: str) -> None: """Robust file write. Use "write to temp file and rename" model for writing the persistence file. :param directory: Target directory to create a file. :param filename: File name to store specified data. :param data: String data. """ tempname = None dirfd = None try: dirfd = os.open(directory, os.O_DIRECTORY) # write data to temporary file with tempfile.NamedTemporaryFile(prefix=filename, dir=directory, delete=False) as tf: tempname = tf.name tf.write(data.encode('utf-8')) tf.flush() os.fdatasync(tf.fileno()) tf.close() # Fsync the directory to ensure the fact of the existence of # the temp file hits the disk. os.fsync(dirfd) # If destination file exists, it will be replaced silently. os.rename(tempname, os.path.join(directory, filename)) # Fsync the directory to ensure the rename hits the disk. os.fsync(dirfd) except OSError: with excutils.save_and_reraise_exception(): LOG.error("Failed to write persistence file: %(path)s.", {'path': os.path.join(directory, filename)}) if tempname is not None: if os.path.isfile(tempname): os.unlink(tempname) finally: if dirfd is not None: os.close(dirfd) @contextlib.contextmanager def temporary_chown(path: str, owner_uid: Optional[int] = None) -> Iterator[None]: """Temporarily chown a path. :params owner_uid: UID of temporary owner (defaults to current user) """ if os.name == 'nt': LOG.debug("Skipping chown for %s as this operation is " "not available on Windows.", path) yield return if owner_uid is None: owner_uid = os.getuid() orig_uid = os.stat(path).st_uid if orig_uid != owner_uid: execute('chown', str(owner_uid), path, run_as_root=True) try: yield finally: if orig_uid != owner_uid: execute('chown', str(orig_uid), path, run_as_root=True) @contextlib.contextmanager def tempdir(**kwargs) -> Iterator[str]: tmpdir = tempfile.mkdtemp(**kwargs) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: LOG.debug('Could not remove tmpdir: %s', str(e)) def get_root_helper() -> str: return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config def get_file_mode(path: str) -> int: """This primarily exists to make unit testing easier.""" return stat.S_IMODE(os.stat(path).st_mode) def get_file_gid(path: str) -> int: """This primarily exists to make unit testing easier.""" return os.stat(path).st_gid def get_file_size(path: str) -> int: """Returns the file size.""" return os.stat(path).st_size def _get_disk_of_partition( devpath: str, st: Optional[os.stat_result] = None) -> tuple[str, os.stat_result]: """Gets a disk device path and status from partition path. Returns a disk device path from a partition device path, and stat for the device. If devpath is not a partition, devpath is returned as it is. For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is for '/dev/disk1p1' ('p' is prepended to the partition number if the disk name ends with numbers). """ diskpath = re.sub(r'(?:(?<=\d)p)?\d+$', '', devpath) if diskpath != devpath: try: st_disk = os.stat(diskpath) if stat.S_ISBLK(st_disk.st_mode): return (diskpath, st_disk) except OSError: pass # devpath is not a partition if st is None: st = os.stat(devpath) return (devpath, st) def get_bool_param(param_string: str, params: dict, default: bool = False) -> bool: param = params.get(param_string, default) if not strutils.is_valid_boolstr(param): msg = _("Value '%(param)s' for '%(param_string)s' is not " "a boolean.") % {'param': param, 'param_string': param_string} raise exception.InvalidParameterValue(err=msg) return strutils.bool_from_string(param, strict=True) def get_blkdev_major_minor(path: str, lookup_for_file: bool = True) -> Optional[str]: """Get 'major:minor' number of block device. Get the device's 'major:minor' number of a block device to control I/O ratelimit of the specified path. If lookup_for_file is True and the path is a regular file, lookup a disk device which the file lies on and returns the result for the device. """ st = os.stat(path) if stat.S_ISBLK(st.st_mode): path, st = _get_disk_of_partition(path, st) return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev)) elif stat.S_ISCHR(st.st_mode): # No I/O ratelimit control is provided for character devices return None elif lookup_for_file: # lookup the mounted disk which the file lies on out, _err = execute('df', path) devpath = out.split("\n")[1].split()[0] if devpath[0] != '/': # the file is on a network file system return None return get_blkdev_major_minor(devpath, False) else: msg = _("Unable to get a block device for file \'%s\'") % path raise exception.CinderException(msg) def check_string_length(value: str, name: str, min_length: int = 0, max_length: Optional[int] = None, allow_all_spaces: bool = True) -> None: """Check the length of specified string. :param value: the value of the string :param name: the name of the string :param min_length: the min_length of the string :param max_length: the max_length of the string """ try: strutils.check_string_length(value, name=name, min_length=min_length, max_length=max_length) except (ValueError, TypeError) as exc: raise exception.InvalidInput(reason=exc) if not allow_all_spaces and value.isspace(): msg = _('%(name)s cannot be all spaces.') raise exception.InvalidInput(reason=msg) def is_blk_device(dev: str) -> bool: try: if stat.S_ISBLK(os.stat(dev).st_mode): return True return False except Exception: LOG.debug('Path %s not found in is_blk_device check', dev) return False class ComparableMixin(object): def _compare(self, other: object, method: Callable): try: return method(self._cmpkey(), other._cmpkey()) # type: ignore except (AttributeError, TypeError): # _cmpkey not implemented, or return different type, # so I can't compare with "other". return NotImplemented def __lt__(self, other: object): return self._compare(other, lambda s, o: s < o) def __le__(self, other: object): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other: object): return self._compare(other, lambda s, o: s == o) def __ge__(self, other: object): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other: object): return self._compare(other, lambda s, o: s > o) def __ne__(self, other: object): return self._compare(other, lambda s, o: s != o) class retry_if_exit_code(tenacity.retry_if_exception): """Retry on ProcessExecutionError specific exit codes.""" def __init__(self, codes): self.codes = (codes,) if isinstance(codes, int) else codes super(retry_if_exit_code, self).__init__(self._check_exit_code) def _check_exit_code(self, exc): return (exc and isinstance(exc, processutils.ProcessExecutionError) and exc.exit_code in self.codes) def retry(retry_param: Union[None, Type[Exception], tuple[Type[Exception], ...], int, tuple[int, ...]], interval: float = 1, retries: int = 3, backoff_rate: float = 2, wait_random: bool = False, retry=tenacity.retry_if_exception_type) -> Callable: if retries < 1: raise ValueError('Retries must be greater than or ' 'equal to 1 (received: %s). ' % retries) if wait_random: wait = tenacity.wait_random_exponential(multiplier=interval) else: wait = tenacity.wait_exponential( multiplier=interval, min=0, exp_base=backoff_rate) def _decorator(f: Callable) -> Callable: @functools.wraps(f) def _wrapper(*args, **kwargs): r = tenacity.Retrying( sleep=tenacity.nap.sleep, before_sleep=tenacity.before_sleep_log(LOG, logging.DEBUG), after=tenacity.after_log(LOG, logging.DEBUG), stop=tenacity.stop_after_attempt(retries), reraise=True, retry=retry(retry_param), wait=wait) return r(f, *args, **kwargs) return _wrapper return _decorator def convert_str(text: Union[str, bytes]) -> str: """Convert to native string. Convert bytes and Unicode strings to native strings: * convert to bytes on Python 2: encode Unicode using encodeutils.safe_encode() * convert to Unicode on Python 3: decode bytes from UTF-8 """ if isinstance(text, bytes): return text.decode('utf-8') else: return text def build_or_str(elements: Union[None, str, Iterable[str]], str_format: Optional[str] = None) -> str: """Builds a string of elements joined by 'or'. Will join strings with the 'or' word and if a str_format is provided it will be used to format the resulted joined string. If there are no elements an empty string will be returned. :param elements: Elements we want to join. :type elements: String or iterable of strings. :param str_format: String to use to format the response. :type str_format: String. """ if not elements: return '' if not isinstance(elements, str): elements = _(' or ').join(elements) elements = typing.cast(str, elements) if str_format: return str_format % elements return elements def calculate_capacity_factors(total_capacity: float, free_capacity: float, provisioned_capacity: float, thin_provisioning_support: bool, max_over_subscription_ratio: float, reserved_percentage: int, thin: bool) -> dict: """Create the various capacity factors of the a particular backend. Based off of definition of terms cinder-specs/specs/queens/provisioning-improvements.html Description of factors calculated where units of gb are Gibibytes. reserved_capacity - The amount of space reserved from the total_capacity as reported by the backend. total_reserved_available_capacity - The total capacity minus reserved capacity total_available_capacity - The total capacity available to cinder calculated from total_reserved_available_capacity (for thick) OR for thin total_reserved_available_capacity max_over_subscription_ratio calculated_free_capacity - total_available_capacity - provisioned_capacity virtual_free_capacity - The calculated free capacity available to cinder to allocate new storage. For thin: calculated_free_capacity For thick: the reported free_capacity can be less than the calculated capacity, so we use free_capacity - reserved_capacity. free_percent - the percentage of the virtual_free_capacity and total_available_capacity is left over provisioned_ratio - The ratio of provisioned storage to total_available_capacity :param total_capacity: The reported total capacity in the backend. :type total_capacity: float :param free_capacity: The free space/capacity as reported by the backend. :type free_capacity: float :param provisioned_capacity: as reported by backend or volume manager from allocated_capacity_gb :type provisioned_capacity: float :param thin_provisioning_support: Is thin provisioning supported? :type thin_provisioning_support: bool :param max_over_subscription_ratio: as reported by the backend :type max_over_subscription_ratio: float :param reserved_percentage: the % amount to reserve as unavailable. 0-100 :type reserved_percentage: int, 0-100 :param thin: calculate based on thin provisioning if enabled by thin_provisioning_support :type thin: bool :return: A dictionary of all of the capacity factors. :rtype: dict """ total = float(total_capacity) reserved = float(reserved_percentage) / 100 reserved_capacity = math.floor(total * reserved) total_reserved_available = total - reserved_capacity if thin and thin_provisioning_support: total_available_capacity = ( total_reserved_available * max_over_subscription_ratio ) calculated_free = total_available_capacity - provisioned_capacity virtual_free = calculated_free provisioned_type = 'thin' else: # Calculate how much free space is left after taking into # account the reserved space. total_available_capacity = total_reserved_available calculated_free = total_available_capacity - provisioned_capacity virtual_free = calculated_free if free_capacity < calculated_free: virtual_free = free_capacity provisioned_type = 'thick' if total_available_capacity: provisioned_ratio = provisioned_capacity / total_available_capacity free_percent = (virtual_free / total_available_capacity) * 100 else: provisioned_ratio = 0 free_percent = 0 def _limit(x): """Limit our floating points to 2 decimal places.""" return round(x, 2) return { "total_capacity": total, "free_capacity": free_capacity, "reserved_capacity": reserved_capacity, "total_reserved_available_capacity": _limit(total_reserved_available), "max_over_subscription_ratio": ( max_over_subscription_ratio if provisioned_type == 'thin' else None ), "total_available_capacity": _limit(total_available_capacity), "provisioned_capacity": provisioned_capacity, "calculated_free_capacity": _limit(calculated_free), "virtual_free_capacity": _limit(virtual_free), "free_percent": _limit(free_percent), "provisioned_ratio": _limit(provisioned_ratio), "provisioned_type": provisioned_type } def calculate_virtual_free_capacity(total_capacity: float, free_capacity: float, provisioned_capacity: float, thin_provisioning_support: bool, max_over_subscription_ratio: float, reserved_percentage: int, thin: bool) -> float: """Calculate the virtual free capacity based on multiple factors. :param total_capacity: total_capacity_gb of a host_state or pool. :param free_capacity: free_capacity_gb of a host_state or pool. :param provisioned_capacity: provisioned_capacity_gb of a host_state or pool. :param thin_provisioning_support: thin_provisioning_support of a host_state or a pool. :param max_over_subscription_ratio: max_over_subscription_ratio of a host_state or a pool :param reserved_percentage: reserved_percentage of a host_state or a pool. :param thin: whether volume to be provisioned is thin :returns: the calculated virtual free capacity. """ factors = calculate_capacity_factors( total_capacity, free_capacity, provisioned_capacity, thin_provisioning_support, max_over_subscription_ratio, reserved_percentage, thin ) return factors["virtual_free_capacity"] def calculate_max_over_subscription_ratio( capability: dict, global_max_over_subscription_ratio: float) -> float: # provisioned_capacity_gb is the apparent total capacity of # all the volumes created on a backend, which is greater than # or equal to allocated_capacity_gb, which is the apparent # total capacity of all the volumes created on a backend # in Cinder. Using allocated_capacity_gb as the default of # provisioned_capacity_gb if it is not set. allocated_capacity_gb = capability.get('allocated_capacity_gb', 0) provisioned_capacity_gb = capability.get('provisioned_capacity_gb', allocated_capacity_gb) thin_provisioning_support = capability.get('thin_provisioning_support', False) total_capacity_gb = capability.get('total_capacity_gb', 0) free_capacity_gb = capability.get('free_capacity_gb', 0) pool_name = capability.get('pool_name', capability.get('volume_backend_name')) # If thin provisioning is not supported the capacity filter will not use # the value we return, no matter what it is. if not thin_provisioning_support: LOG.debug("Trying to retrieve max_over_subscription_ratio from a " "service that does not support thin provisioning") return 1.0 # Again, if total or free capacity is infinite or unknown, the capacity # filter will not use the max_over_subscription_ratio at all. So, does # not matter what we return here. if ((total_capacity_gb in INFINITE_UNKNOWN_VALUES) or (free_capacity_gb in INFINITE_UNKNOWN_VALUES)): return 1.0 max_over_subscription_ratio = (capability.get( 'max_over_subscription_ratio') or global_max_over_subscription_ratio) # We only calculate the automatic max_over_subscription_ratio (mosr) # when the global or driver conf is set auto and while # provisioned_capacity_gb is not 0. When auto is set and # provisioned_capacity_gb is 0, we use the default value 20.0. if max_over_subscription_ratio == 'auto': if provisioned_capacity_gb != 0: used_capacity = total_capacity_gb - free_capacity_gb LOG.debug("Calculating max_over_subscription_ratio for " "pool %s: provisioned_capacity_gb=%s, " "used_capacity=%s", pool_name, provisioned_capacity_gb, used_capacity) max_over_subscription_ratio = 1 + ( float(provisioned_capacity_gb) / (used_capacity + 1)) else: max_over_subscription_ratio = INITIAL_AUTO_MOSR LOG.info("Auto max_over_subscription_ratio for pool %s is " "%s", pool_name, max_over_subscription_ratio) else: max_over_subscription_ratio = float(max_over_subscription_ratio) return max_over_subscription_ratio def validate_dictionary_string_length(specs: dict) -> None: """Check the length of each key and value of dictionary.""" if not isinstance(specs, dict): msg = _('specs must be a dictionary.') raise exception.InvalidInput(reason=msg) for key, value in specs.items(): if key is not None: check_string_length(key, 'Key "%s"' % key, min_length=1, max_length=255) if value is not None: check_string_length(value, 'Value for key "%s"' % key, min_length=0, max_length=255) def service_expired_time( with_timezone: Optional[bool] = False) -> datetime.datetime: return (timeutils.utcnow(with_timezone=with_timezone) - datetime.timedelta(seconds=CONF.service_down_time)) class DoNothing(str): """Class that literrally does nothing. We inherit from str in case it's called with json.dumps. """ def __call__(self, *args, **kwargs): return self def __getattr__(self, name): return self DO_NOTHING = DoNothing() def notifications_enabled(conf): """Check if oslo notifications are enabled.""" notifications_driver = set(conf.oslo_messaging_notifications.driver) return notifications_driver and notifications_driver != {'noop'} def if_notifications_enabled(f: Callable) -> Callable: """Calls decorated method only if notifications are enabled.""" @functools.wraps(f) def wrapped(*args, **kwargs): if notifications_enabled(CONF): return f(*args, **kwargs) return DO_NOTHING return wrapped LOG_LEVELS = ('INFO', 'WARNING', 'ERROR', 'DEBUG') def get_log_method(level_string: str) -> int: level_string = level_string or '' upper_level_string = level_string.upper() if upper_level_string not in LOG_LEVELS: raise exception.InvalidInput( reason=_('%s is not a valid log level.') % level_string) return getattr(logging, upper_level_string) def set_log_levels(prefix: str, level_string: str) -> None: level = get_log_method(level_string) prefix = prefix or '' for k, v in logging.get_loggers().items(): if k and k.startswith(prefix): v.logger.setLevel(level) def get_log_levels(prefix: str) -> dict: prefix = prefix or '' return {k: py_logging.getLevelName(v.logger.getEffectiveLevel()) for k, v in logging.get_loggers().items() if k and k.startswith(prefix)} def paths_normcase_equal(path_a: str, path_b: str) -> bool: return os.path.normcase(path_a) == os.path.normcase(path_b) def create_ordereddict(adict: dict) -> OrderedDict: """Given a dict, return a sorted OrderedDict.""" return OrderedDict(sorted(adict.items(), key=operator.itemgetter(0))) class Semaphore(object): """Custom semaphore to workaround eventlet issues with multiprocessing.""" def __init__(self, limit): self.limit = limit self.semaphore = multiprocessing.Semaphore(limit) def __enter__(self): # Eventlet does not work with multiprocessing's Semaphore, so we have # to execute it in a native thread to avoid getting blocked when trying # to acquire the semaphore. return tpool.execute(self.semaphore.__enter__) def __exit__(self, *args): # Don't use native thread for exit, as it will only add overhead return self.semaphore.__exit__(*args) def semaphore_factory(limit: int, concurrent_processes: int) -> Union[eventlet.Semaphore, Semaphore]: """Get a semaphore to limit concurrent operations. The semaphore depends on the limit we want to set and the concurrent processes that need to be limited. """ # Limit of 0 is no limit, so we won't use a semaphore if limit: # If we only have 1 process we can use eventlet's Semaphore if concurrent_processes == 1: return eventlet.Semaphore(limit) # Use our own Sempahore for interprocess because eventlet blocks with # the standard one return Semaphore(limit) return contextlib.suppress() def limit_operations(func: Callable) -> Callable: """Decorator to limit the number of concurrent operations. This method decorator expects to have a _semaphore attribute holding an initialized semaphore in the self instance object. We can get the appropriate semaphore with the semaphore_factory method. """ @functools.wraps(func) def wrapper(self, *args, **kwargs): with self._semaphore: return func(self, *args, **kwargs) return wrapper ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/version.py0000664000175000017500000000160300000000000016372 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pbr import version as pbr_version CINDER_VENDOR = "OpenStack Foundation" CINDER_PRODUCT = "OpenStack Cinder" CINDER_PACKAGE = None # OS distro package version suffix loaded = False version_info = pbr_version.VersionInfo('cinder') version_string = version_info.version_string ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3191204 cinder-27.0.0/cinder/volume/0000775000175000017500000000000000000000000015642 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/__init__.py0000664000175000017500000000211100000000000017746 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Importing full names to not pollute the namespace and cause possible # collisions with use of 'from cinder.volume import ' elsewhere. from oslo_utils import importutils from cinder.common import config CONF = config.CONF def API(*args, **kwargs): class_name = CONF.volume_api_class return importutils.import_object(class_name, *args, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/api.py0000664000175000017500000036607400000000000017005 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all requests relating to volumes.""" import ast import collections import datetime from typing import (Any, DefaultDict, Iterable, Optional, Union) from castellan import key_manager from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import versionutils import webob from cinder.api import common from cinder.common import constants from cinder import compute from cinder import context from cinder import coordination from cinder import db from cinder.db import base from cinder import exception from cinder import flow_utils from cinder.i18n import _ from cinder.image import cache as image_cache from cinder.image import glance from cinder.image import image_utils from cinder.message import api as message_api from cinder.message import message_field from cinder import objects from cinder.objects import base as objects_base from cinder.objects import fields from cinder.objects import volume_type from cinder.policies import attachments as attachment_policy from cinder.policies import services as svr_policy from cinder.policies import snapshot_metadata as s_meta_policy from cinder.policies import snapshots as snapshot_policy from cinder.policies import volume_actions as vol_action_policy from cinder.policies import volume_metadata as vol_meta_policy from cinder.policies import volumes as vol_policy from cinder import quota from cinder import quota_utils from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder import utils from cinder.volume.flows.api import create_volume from cinder.volume.flows.api import manage_existing from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import volume_types from cinder.volume import volume_utils allow_force_upload_opt = cfg.BoolOpt('enable_force_upload', default=False, help='Enables the Force option on ' 'upload_to_image. This enables ' 'running upload_volume on in-use ' 'volumes for backends that ' 'support it.') volume_host_opt = cfg.BoolOpt('snapshot_same_host', default=True, help='Create volume from snapshot at the host ' 'where snapshot resides') volume_same_az_opt = cfg.BoolOpt('cloned_volume_same_az', default=True, help='Ensure that the new volumes are the ' 'same AZ as snapshot or source volume') az_cache_time_opt = cfg.IntOpt('az_cache_duration', default=3600, help='Cache volume availability zones in ' 'memory for the provided duration in ' 'seconds') CONF = cfg.CONF CONF.register_opt(allow_force_upload_opt) CONF.register_opt(volume_host_opt) CONF.register_opt(volume_same_az_opt) CONF.register_opt(az_cache_time_opt) CONF.import_opt('glance_core_properties', 'cinder.image.glance') LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS AO_LIST = objects.VolumeAttachmentList class API(base.Base): """API for interacting with the volume manager.""" AVAILABLE_MIGRATION_STATUS = (None, 'deleting', 'error', 'success') def __init__(self, image_service=None): self.image_service = (image_service or glance.get_default_image_service()) self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.volume_rpcapi = volume_rpcapi.VolumeAPI() self.availability_zones = [] self.availability_zones_last_fetched = None self.key_manager = key_manager.API(CONF) self.message = message_api.API() super().__init__() def list_availability_zones(self, enable_cache: bool = False, refresh_cache: bool = False) -> tuple: """Describe the known availability zones :param enable_cache: Enable az cache :param refresh_cache: Refresh cache immediately :return: tuple of dicts, each with a 'name' and 'available' key """ if enable_cache: if self.availability_zones_last_fetched is None: refresh_cache = True else: cache_age = timeutils.delta_seconds( self.availability_zones_last_fetched, timeutils.utcnow()) if cache_age >= CONF.az_cache_duration: refresh_cache = True if refresh_cache or not enable_cache: topic = constants.VOLUME_TOPIC ctxt = context.get_admin_context() services = objects.ServiceList.get_all_by_topic(ctxt, topic) az_data = [(s.availability_zone, s.disabled) for s in services] disabled_map: dict[str, bool] = {} for (az_name, disabled) in az_data: tracked_disabled = disabled_map.get(az_name, True) disabled_map[az_name] = tracked_disabled and disabled azs = [{'name': name, 'available': not disabled} for (name, disabled) in disabled_map.items()] if refresh_cache: now = timeutils.utcnow() self.availability_zones = azs self.availability_zones_last_fetched = now LOG.debug("Availability zone cache updated, next update will" " occur around %s.", now + datetime.timedelta( seconds=CONF.az_cache_duration)) else: azs = self.availability_zones LOG.info("Availability Zones retrieved successfully.") return tuple(azs) def _retype_is_possible(self, context: context.RequestContext, source_type: objects.VolumeType, target_type: objects.VolumeType) -> bool: elevated = context.elevated() # If encryptions are different, it is not allowed # to create volume from source volume or snapshot. if volume_types.volume_types_encryption_changed( elevated, source_type.id if source_type else None, target_type.id if target_type else None): return False services = objects.ServiceList.get_all_by_topic( elevated, constants.VOLUME_TOPIC, disabled=True) if len(services.objects) == 1: return True source_extra_specs = {} if source_type: with source_type.obj_as_admin(): source_extra_specs = source_type.extra_specs target_extra_specs = {} if target_type: with target_type.obj_as_admin(): target_extra_specs = target_type.extra_specs if (volume_utils.matching_backend_name( source_extra_specs, target_extra_specs)): return True return False def _is_volume_migrating(self, volume: objects.Volume) -> bool: # The migration status 'none' means no migration has ever been done # before. The migration status 'error' means the previous migration # failed. The migration status 'success' means the previous migration # succeeded. The migration status 'deleting' means the source volume # fails to delete after a migration. # All of the statuses above means the volume is not in the process # of a migration. return (volume['migration_status'] not in self.AVAILABLE_MIGRATION_STATUS) def _is_multiattach(self, volume_type: objects.VolumeType) -> bool: # TODO: getattr not needed if using obj? specs = getattr(volume_type, 'extra_specs', {}) return specs.get('multiattach', 'False') == ' True' def _is_encrypted(self, volume_type: objects.volume_type.VolumeType) -> bool: specs = volume_type.get('extra_specs', {}) if 'encryption' not in specs: return False return specs.get('encryption', {}) is not {} def create(self, context: context.RequestContext, size: Union[str, int], name: Optional[str], description: Optional[str], snapshot: Optional[objects.Snapshot] = None, image_id: Optional[str] = None, volume_type: Optional[objects.VolumeType] = None, metadata: Optional[dict] = None, availability_zone: Optional[str] = None, source_volume: Optional[objects.Volume] = None, scheduler_hints=None, source_replica=None, consistencygroup: Optional[objects.ConsistencyGroup] = None, cgsnapshot: Optional[objects.CGSnapshot] = None, source_cg=None, group: Optional[objects.Group] = None, group_snapshot=None, source_group=None, backup: Optional[objects.Backup] = None): if image_id: context.authorize(vol_policy.CREATE_FROM_IMAGE_POLICY) else: context.authorize(vol_policy.CREATE_POLICY) # Check up front for legacy replication parameters to quick fail if source_replica: msg = _("Creating a volume from a replica source was part of the " "replication v1 implementation which is no longer " "available.") raise exception.InvalidInput(reason=msg) # NOTE(jdg): we can have a create without size if we're # doing a create from snap or volume. Currently # the taskflow api will handle this and pull in the # size from the source. # NOTE(jdg): cinderclient sends in a string representation # of the size value. BUT there is a possibility that somebody # could call the API directly so the is_int_like check # handles both cases (string representation of true float or int). if size and (not strutils.is_int_like(size) or int(size) <= 0): msg = _('Invalid volume size provided for create request: %s ' '(size argument must be an integer (or string ' 'representation of an integer) and greater ' 'than zero).') % size raise exception.InvalidInput(reason=msg) # ensure we pass the volume_type provisioning filter on size volume_types.provision_filter_on_size(context, volume_type, size) if consistencygroup and (not cgsnapshot and not source_cg): if not volume_type: msg = _("volume_type must be provided when creating " "a volume in a consistency group.") raise exception.InvalidInput(reason=msg) cg_voltypeids = consistencygroup.volume_type_id if volume_type.id not in cg_voltypeids: msg = _("Invalid volume_type provided: %s (requested " "type must be supported by this consistency " "group).") % volume_type raise exception.InvalidInput(reason=msg) if group and (not group_snapshot and not source_group): if not volume_type: msg = _("volume_type must be provided when creating " "a volume in a group.") raise exception.InvalidInput(reason=msg) vol_type_ids = [v_type.id for v_type in group.volume_types] if volume_type.id not in vol_type_ids: msg = _("Invalid volume_type provided: %s (requested " "type must be supported by this " "group).") % volume_type raise exception.InvalidInput(reason=msg) if source_volume and volume_type: if volume_type.id != source_volume.volume_type_id: if not self._retype_is_possible( context, source_volume.volume_type, volume_type): msg = _("Invalid volume_type provided: %s (requested type " "is not compatible; either match source volume, " "or omit type argument).") % volume_type.id raise exception.InvalidInput(reason=msg) if snapshot and volume_type: if volume_type.id != snapshot.volume_type_id: if not self._retype_is_possible(context, snapshot.volume.volume_type, volume_type): msg = _("Invalid volume_type provided: %s (requested " "type is not compatible; recommend omitting " "the type argument).") % volume_type.id raise exception.InvalidInput(reason=msg) # Determine the valid availability zones that the volume could be # created in (a task in the flow will/can use this information to # ensure that the availability zone requested is valid). raw_zones = self.list_availability_zones(enable_cache=True) availability_zones = set([az['name'] for az in raw_zones]) if CONF.storage_availability_zone: availability_zones.add(CONF.storage_availability_zone) utils.check_metadata_properties(metadata) create_what = { 'context': context, 'raw_size': size, 'name': name, 'description': description, 'snapshot': snapshot, 'image_id': image_id, 'raw_volume_type': volume_type, 'metadata': metadata or {}, 'raw_availability_zone': availability_zone, 'source_volume': source_volume, 'scheduler_hints': scheduler_hints, 'key_manager': self.key_manager, 'optional_args': {'is_quota_committed': False}, 'consistencygroup': consistencygroup, 'cgsnapshot': cgsnapshot, 'group': group, 'group_snapshot': group_snapshot, 'source_group': source_group, 'backup': backup, } try: sched_rpcapi = (self.scheduler_rpcapi if ( not cgsnapshot and not source_cg and not group_snapshot and not source_group) else None) volume_rpcapi = (self.volume_rpcapi if ( not cgsnapshot and not source_cg and not group_snapshot and not source_group) else None) flow_engine = create_volume.get_flow(self.db, self.image_service, availability_zones, create_what, sched_rpcapi, volume_rpcapi) except Exception: msg = _('Failed to create api volume flow.') LOG.exception(msg) raise exception.CinderException(msg) # Attaching this listener will capture all of the notifications that # taskflow sends out and redirect them to a more useful log for # cinders debugging (or error reporting) usage. with flow_utils.DynamicLogListener(flow_engine, logger=LOG): try: flow_engine.run() vref = flow_engine.storage.fetch('volume') # NOTE(tommylikehu): If the target az is not hit, # refresh the az cache immediately. if flow_engine.storage.fetch('refresh_az'): self.list_availability_zones(enable_cache=True, refresh_cache=True) LOG.info("Create volume request issued successfully.", resource=vref) return vref except exception.InvalidAvailabilityZone: with excutils.save_and_reraise_exception(): self.list_availability_zones(enable_cache=True, refresh_cache=True) def revert_to_snapshot(self, context: context.RequestContext, volume: objects.Volume, snapshot: objects.Snapshot) -> None: """revert a volume to a snapshot""" context.authorize(vol_action_policy.REVERT_POLICY, target_obj=volume) v_res = volume.update_single_status_where( 'reverting', 'available') if not v_res: msg = (_("Can't revert volume %(vol_id)s to its latest snapshot " "%(snap_id)s. Volume's status must be 'available'.") % {"vol_id": volume.id, "snap_id": snapshot.id}) raise exception.InvalidVolume(reason=msg) s_res = snapshot.update_single_status_where( fields.SnapshotStatus.RESTORING, fields.SnapshotStatus.AVAILABLE) if not s_res: msg = (_("Can't revert volume %(vol_id)s to its latest snapshot " "%(snap_id)s. Snapshot's status must be 'available'.") % {"vol_id": volume.id, "snap_id": snapshot.id}) raise exception.InvalidSnapshot(reason=msg) self.volume_rpcapi.revert_to_snapshot(context, volume, snapshot) def delete(self, context: context.RequestContext, volume: objects.Volume, force: bool = False, unmanage_only: bool = False, cascade: bool = False) -> None: context.authorize(vol_policy.DELETE_POLICY, target_obj=volume) if context.is_admin and context.project_id != volume.project_id: project_id = volume.project_id else: project_id = context.project_id if not volume.host: volume_utils.notify_about_volume_usage(context, volume, "delete.start") # NOTE(vish): scheduling failed, so delete it # Note(zhiteng): update volume quota reservation try: reservations = None if volume.status != 'error_managing' and volume.use_quota: LOG.debug("Decrease volume quotas for non temporary volume" " in non error_managing status.") reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: LOG.exception("Failed to update quota while " "deleting volume.") volume.destroy() if reservations: QUOTAS.commit(context, reservations, project_id=project_id) volume_utils.notify_about_volume_usage(context, volume, "delete.end") LOG.info("Delete volume request issued successfully.", resource={'type': 'volume', 'id': volume.id}) utils.api_clean_volume_file_locks(volume.id) return if not unmanage_only: volume.assert_not_frozen() if unmanage_only and volume.encryption_key_id is not None: msg = _("Unmanaging encrypted volumes is not supported.") e = exception.Invalid(reason=msg) self.message.create( context, message_field.Action.UNMANAGE_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.UNMANAGE_ENC_NOT_SUPPORTED, exception=e) raise e # Build required conditions for conditional update expected = { 'attach_status': db.Not(fields.VolumeAttachStatus.ATTACHED), 'migration_status': self.AVAILABLE_MIGRATION_STATUS, 'consistencygroup_id': None, 'group_id': None} # If not force deleting we have status conditions if not force: expected['status'] = ('available', 'error', 'error_restoring', 'error_extending', 'error_managing') if cascade: if force: # Ignore status checks, but ensure snapshots are not part # of a cgsnapshot. filters = [~db.volume_has_snapshots_in_a_cgsnapshot_filter()] else: # Allow deletion if all snapshots are in an expected state filters = [~db.volume_has_undeletable_snapshots_filter()] # Check if the volume has snapshots which are existing in # other project now. if not context.is_admin: filters.append(~db.volume_has_other_project_snp_filter()) else: # Don't allow deletion of volume with snapshots filters = [~db.volume_has_snapshots_filter()] values = {'status': 'deleting', 'terminated_at': timeutils.utcnow()} if unmanage_only is True: values['status'] = 'unmanaging' if volume.status == 'error_managing': values['status'] = 'error_managing_deleting' result = volume.conditional_update(values, expected, filters) if not result: status = utils.build_or_str(expected.get('status'), _('status must be %s and')) msg = _('Volume %s must not be migrating, attached, belong to a ' 'group, have snapshots, awaiting a transfer, ' 'or be disassociated from ' 'snapshots after volume transfer.') % status LOG.info(msg) raise exception.InvalidVolume(reason=msg) if cascade: values = {'status': 'deleting'} expected = {'cgsnapshot_id': None, 'group_snapshot_id': None} if not force: expected['status'] = ('available', 'error', 'deleting') snapshots = objects.snapshot.SnapshotList.get_all_for_volume( context, volume.id) for s in snapshots: result = s.conditional_update(values, expected, filters) if not result: volume.update({'status': 'error_deleting'}) volume.save() msg = _('Failed to update snapshot.') raise exception.InvalidVolume(reason=msg) cache = image_cache.ImageVolumeCache(self.db, self) entry = cache.get_by_image_volume(context, volume.id) if entry: cache.evict(context, entry) # If the volume is encrypted, delete its encryption key from the key # manager. This operation makes volume deletion an irreversible process # because the volume cannot be decrypted without its key. encryption_key_id = volume.get('encryption_key_id', None) if encryption_key_id is not None: try: volume_utils.delete_encryption_key(context, self.key_manager, encryption_key_id) except Exception as e: volume.update({'status': 'error_deleting'}) volume.save() if hasattr(e, 'msg'): # ignore type (Exception has no attr "msg") error msg = _("Unable to delete encryption key for " "volume: %s") % (e.msg) else: msg = _("Unable to delete encryption key for volume.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) self.volume_rpcapi.delete_volume(context, volume, unmanage_only, cascade) utils.api_clean_volume_file_locks(volume.id) LOG.info("Delete volume request issued successfully.", resource=volume) def update(self, context: context.RequestContext, volume: objects.Volume, fields: dict) -> None: context.authorize(vol_policy.UPDATE_POLICY, target_obj=volume) # TODO(karthikp): Making sure volume is always oslo-versioned # If not we convert it at the start of update method. This check # needs to be removed once we have moved to ovo. if not isinstance(volume, objects_base.CinderObject): vol_obj = objects.Volume() volume = objects.Volume._from_db_object(context, vol_obj, volume) if volume.status == 'maintenance': LOG.info("Unable to update volume, " "because it is in maintenance.", resource=volume) msg = _("The volume cannot be updated during maintenance.") raise exception.InvalidVolume(reason=msg) utils.check_metadata_properties(fields.get('metadata', None)) volume.update(fields) volume.save() LOG.info("Volume updated successfully.", resource=volume) def get(self, context: context.RequestContext, volume_id: str, viewable_admin_meta: bool = False) -> objects.Volume: volume = objects.Volume.get_by_id(context, volume_id) try: context.authorize(vol_policy.GET_POLICY, target_obj=volume) except exception.PolicyNotAuthorized: # raise VolumeNotFound to avoid providing info about # the existence of an unauthorized volume id raise exception.VolumeNotFound(volume_id=volume_id) if viewable_admin_meta: ctxt = context.elevated() admin_metadata = self.db.volume_admin_metadata_get(ctxt, volume_id) volume.admin_metadata = admin_metadata volume.obj_reset_changes() LOG.info("Volume info retrieved successfully.", resource=volume) return volume def calculate_resource_count(self, context: context.RequestContext, resource_type: str, filters: Optional[dict]) -> int: filters = filters if filters else {} allTenants = utils.get_bool_param('all_tenants', filters) if context.is_admin and allTenants: del filters['all_tenants'] else: filters['project_id'] = context.project_id return db.calculate_resource_count(context, resource_type, filters) def get_all(self, context: context.RequestContext, marker: Optional[str] = None, limit: Optional[int] = None, sort_keys: Optional[Iterable[str]] = None, sort_dirs: Optional[Iterable[str]] = None, filters: Optional[dict] = None, viewable_admin_meta: bool = False, offset: Optional[int] = None) -> objects.VolumeList: context.authorize(vol_policy.GET_ALL_POLICY) if filters is None: filters = {} allTenants = utils.get_bool_param('all_tenants', filters) try: if limit is not None: limit = int(limit) if limit < 0: msg = _('limit param must be positive') raise exception.InvalidInput(reason=msg) except ValueError: msg = _('limit param must be an integer') raise exception.InvalidInput(reason=msg) # Non-admin shouldn't see temporary target of a volume migration, add # unique filter data to reflect that only volumes with a NULL # 'migration_status' or a 'migration_status' that does not start with # 'target:' should be returned (processed in db/sqlalchemy/api.py) if not context.is_admin: filters['no_migration_targets'] = True if filters: LOG.debug("Searching by: %s.", filters) if context.is_admin and allTenants: # Need to remove all_tenants to pass the filtering below. del filters['all_tenants'] volumes = objects.VolumeList.get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) else: if viewable_admin_meta: context = context.elevated() volumes = objects.VolumeList.get_all_by_project( context, context.project_id, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, offset=offset) LOG.info("Get all volumes completed successfully.") return volumes def get_volume_summary( self, context: context.RequestContext, filters: Optional[dict] = None) -> objects.VolumeList: context.authorize(vol_policy.GET_ALL_POLICY) if filters is None: filters = {} all_tenants = utils.get_bool_param('all_tenants', filters) filters.pop('all_tenants', None) project_only = not (all_tenants and context.is_admin) volumes = objects.VolumeList.get_volume_summary(context, project_only) LOG.info("Get summary completed successfully.") return volumes def get_snapshot(self, context: context.RequestContext, snapshot_id: str) -> objects.Snapshot: snapshot = objects.Snapshot.get_by_id(context, snapshot_id) context.authorize(snapshot_policy.GET_POLICY, target_obj=snapshot) # FIXME(jdg): The objects don't have the db name entries # so build the resource tag manually for now. LOG.info("Snapshot retrieved successfully.", resource={'type': 'snapshot', 'id': snapshot.id}) return snapshot def get_volume(self, context: context.RequestContext, volume_id: str) -> objects.Volume: volume = objects.Volume.get_by_id(context, volume_id) context.authorize(vol_policy.GET_POLICY, target_obj=volume) LOG.info("Volume retrieved successfully.", resource=volume) return volume def get_all_snapshots( self, context: context.RequestContext, search_opts: Optional[dict] = None, marker: Optional[str] = None, limit: Optional[int] = None, sort_keys: Optional[list[str]] = None, sort_dirs: Optional[list[str]] = None, offset: Optional[int] = None) -> objects.SnapshotList: context.authorize(snapshot_policy.GET_ALL_POLICY) search_opts = search_opts or {} # Need to remove all_tenants to pass the filtering below. all_tenants = strutils.bool_from_string(search_opts.pop('all_tenants', 'false')) if context.is_admin and all_tenants: snapshots = objects.SnapshotList.get_all( context, search_opts, marker, limit, sort_keys, sort_dirs, offset) else: snapshots = objects.SnapshotList.get_all_by_project( context, context.project_id, search_opts, marker, limit, sort_keys, sort_dirs, offset) LOG.info("Get all snapshots completed successfully.") return snapshots def reserve_volume(self, context: context.RequestContext, volume: objects.Volume) -> None: context.authorize(vol_action_policy.RESERVE_POLICY, target_obj=volume) expected = {'multiattach': volume.multiattach, 'status': (('available', 'in-use') if volume.multiattach else 'available')} result = volume.conditional_update({'status': 'attaching'}, expected) if not result: expected_status = utils.build_or_str(expected['status']) msg = _('Volume status must be %(expected)s to reserve, but the ' 'status is %(current)s.') % {'expected': expected_status, 'current': volume.status} LOG.error(msg) raise exception.InvalidVolume(reason=msg) LOG.info("Reserve volume completed successfully.", resource=volume) def unreserve_volume(self, context: context.RequestContext, volume: objects.Volume) -> None: context.authorize(vol_action_policy.UNRESERVE_POLICY, target_obj=volume) expected = {'status': 'attaching'} # Status change depends on whether it has attachments (in-use) or not # (available) value = {'status': db.Case([(db.volume_has_attachments_filter(), 'in-use')], else_='available')} result = volume.conditional_update(value, expected) if not result: LOG.debug("Attempted to unreserve volume that was not " "reserved, nothing to do.", resource=volume) return LOG.info("Unreserve volume completed successfully.", resource=volume) def begin_detaching(self, context: context.RequestContext, volume: objects.Volume) -> None: context.authorize(vol_action_policy.BEGIN_DETACHING_POLICY, target_obj=volume) # If we are in the middle of a volume migration, we don't want the # user to see that the volume is 'detaching'. Having # 'migration_status' set will have the same effect internally. expected = {'status': 'in-use', 'attach_status': fields.VolumeAttachStatus.ATTACHED, 'migration_status': self.AVAILABLE_MIGRATION_STATUS} result = volume.conditional_update({'status': 'detaching'}, expected) if not (result or self._is_volume_migrating(volume)): msg = _("Unable to detach volume. Volume status must be 'in-use' " "and attach_status must be 'attached' to detach.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) LOG.info("Begin detaching volume completed successfully.", resource=volume) def roll_detaching(self, context: context.RequestContext, volume: objects.Volume) -> None: context.authorize(vol_action_policy.ROLL_DETACHING_POLICY, target_obj=volume) volume.conditional_update({'status': 'in-use'}, {'status': 'detaching'}) LOG.info("Roll detaching of volume completed successfully.", resource=volume) def attach(self, context: context.RequestContext, volume: objects.Volume, instance_uuid: str, host_name: str, mountpoint: str, mode: str) -> objects.VolumeAttachment: context.authorize(vol_action_policy.ATTACH_POLICY, target_obj=volume) if volume.status == 'maintenance': LOG.info('Unable to attach volume, ' 'because it is in maintenance.', resource=volume) msg = _("The volume cannot be attached in maintenance mode.") raise exception.InvalidVolume(reason=msg) # We add readonly metadata if it doesn't already exist readonly = self.update_volume_admin_metadata(context.elevated(), volume, {'readonly': 'False'}, update=False)['readonly'] if readonly == 'True' and mode != 'ro': raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume.id) attach_results = self.volume_rpcapi.attach_volume(context, volume, instance_uuid, host_name, mountpoint, mode) LOG.info("Attach volume completed successfully.", resource=volume) return attach_results def detach(self, context: context.RequestContext, volume: objects.Volume, attachment_id: str) -> None: context.authorize(vol_action_policy.DETACH_POLICY, target_obj=volume) self.attachment_deletion_allowed(context, attachment_id, volume) if volume['status'] == 'maintenance': LOG.info('Unable to detach volume, ' 'because it is in maintenance.', resource=volume) msg = _("The volume cannot be detached in maintenance mode.") raise exception.InvalidVolume(reason=msg) detach_results = self.volume_rpcapi.detach_volume(context, volume, attachment_id) LOG.info("Detach volume completed successfully.", resource=volume) return detach_results def initialize_connection(self, context: context.RequestContext, volume: objects.Volume, connector: dict) -> dict: context.authorize(vol_action_policy.INITIALIZE_POLICY, target_obj=volume) if volume.status == 'maintenance': LOG.info('Unable to initialize the connection for ' 'volume, because it is in ' 'maintenance.', resource=volume) msg = _("The volume connection cannot be initialized in " "maintenance mode.") raise exception.InvalidVolume(reason=msg) init_results = self.volume_rpcapi.initialize_connection(context, volume, connector) LOG.info("Initialize volume connection completed successfully.", resource=volume) return init_results @staticmethod def is_service_request(ctxt: 'context.RequestContext') -> bool: """Check if a request is coming from a service A request is coming from a service if it has a service token and the service user has one of the roles configured in the `service_token_roles` configuration option in the `[keystone_authtoken]` section (defaults to `service`). """ roles = ctxt.service_roles service_roles = set(CONF.keystone_authtoken.service_token_roles) return bool(roles and service_roles.intersection(roles)) def terminate_connection(self, context: context.RequestContext, volume: objects.Volume, connector: dict, force: bool = False) -> None: context.authorize(vol_action_policy.TERMINATE_POLICY, target_obj=volume) self.attachment_deletion_allowed(context, None, volume) self.volume_rpcapi.terminate_connection(context, volume, connector, force) LOG.info("Terminate volume connection completed successfully.", resource=volume) self.unreserve_volume(context, volume) def accept_transfer(self, context: context.RequestContext, volume: objects.Volume, new_user: str, new_project: str, no_snapshots: bool = False) -> dict: if volume['status'] == 'maintenance': LOG.info('Unable to accept transfer for volume, ' 'because it is in maintenance.', resource=volume) msg = _("The volume cannot accept transfer in maintenance mode.") raise exception.InvalidVolume(reason=msg) results = self.volume_rpcapi.accept_transfer(context, volume, new_user, new_project, no_snapshots=no_snapshots) LOG.info("Transfer volume completed successfully.", resource=volume) return results def _create_snapshot( self, context: context.RequestContext, volume: objects.Volume, name: str, description: str, force: bool = False, metadata: Optional[dict] = None, cgsnapshot_id: Optional[str] = None, group_snapshot_id: Optional[str] = None, allow_in_use: bool = False) -> objects.Snapshot: volume.assert_not_frozen() snapshot = self.create_snapshot_in_db( context, volume, name, description, force, metadata, cgsnapshot_id, True, group_snapshot_id, allow_in_use) # NOTE(tommylikehu): We only wrap the 'size' attribute here # because only the volume's host is passed and only capacity is # validated in the scheduler now. kwargs = {'snapshot_id': snapshot.id, 'volume_properties': objects.VolumeProperties( size=volume.size)} self.scheduler_rpcapi.create_snapshot(context, volume, snapshot, volume.service_topic_queue, objects.RequestSpec(**kwargs)) return snapshot def create_snapshot_in_db( self, context: context.RequestContext, volume: objects.Volume, name: Optional[str], description: Optional[str], force: bool, metadata: Optional[dict], cgsnapshot_id: Optional[str], commit_quota: bool = True, group_snapshot_id: Optional[str] = None, allow_in_use: bool = False) -> objects.Snapshot: self._create_snapshot_in_db_validate(context, volume) utils.check_metadata_properties(metadata) valid_status: tuple[str, ...] valid_status = ('available',) if force or allow_in_use: valid_status = ('available', 'in-use') if volume['status'] not in valid_status: msg = _("Volume %(vol_id)s status must be %(status)s, " "but current status is: " "%(vol_status)s.") % {'vol_id': volume['id'], 'status': ', '.join(valid_status), 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) if commit_quota: try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': 1} else: reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.get('volume_type_id')) reservations = QUOTAS.reserve(context, **reserve_opts) except exception.OverQuota as e: quota_utils.process_reserve_over_quota( context, e, resource='snapshots', size=volume.size) snapshot = None try: kwargs = { 'volume_id': volume['id'], 'cgsnapshot_id': cgsnapshot_id, 'group_snapshot_id': group_snapshot_id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description, 'volume_type_id': volume['volume_type_id'], 'encryption_key_id': volume['encryption_key_id'], 'metadata': metadata or {} } snapshot = objects.Snapshot(context=context, **kwargs) snapshot.create() volume.refresh() if volume['status'] not in valid_status: msg = _("Volume %(vol_id)s status must be %(status)s , " "but current status is: " "%(vol_status)s.") % {'vol_id': volume['id'], 'status': ', '.join(valid_status), 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) if commit_quota: QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: assert snapshot is not None if snapshot.obj_attr_is_set('id'): snapshot.destroy() finally: if commit_quota: QUOTAS.rollback(context, reservations) return snapshot def create_snapshots_in_db( self, context: context.RequestContext, volume_list: list, name: str, description: str, cgsnapshot_id: str, group_snapshot_id: Optional[str] = None) -> list: snapshot_list = [] for volume in volume_list: self._create_snapshot_in_db_validate(context, volume) reservations = self._create_snapshots_in_db_reserve( context, volume_list) options_list = [] for volume in volume_list: options = self._create_snapshot_in_db_options( context, volume, name, description, cgsnapshot_id, group_snapshot_id) options_list.append(options) try: for options in options_list: snapshot = objects.Snapshot(context=context, **options) snapshot.create() snapshot_list.append(snapshot) QUOTAS.commit(context, reservations) except Exception: with excutils.save_and_reraise_exception(): try: for snap in snapshot_list: snap.destroy() finally: QUOTAS.rollback(context, reservations) return snapshot_list def _create_snapshot_in_db_validate(self, context: context.RequestContext, volume: objects.Volume) -> None: context.authorize(snapshot_policy.CREATE_POLICY, target_obj=volume) if not volume.host: msg = _("The snapshot cannot be created because volume has " "not been scheduled to any host.") raise exception.InvalidVolume(reason=msg) if volume['status'] == 'maintenance': LOG.info('Unable to create the snapshot for volume, ' 'because it is in maintenance.', resource=volume) msg = _("The snapshot cannot be created when the volume is in " "maintenance mode.") raise exception.InvalidVolume(reason=msg) if self._is_volume_migrating(volume): # Volume is migrating, wait until done msg = _("Snapshot cannot be created while volume is migrating.") raise exception.InvalidVolume(reason=msg) if volume['status'] == 'error': msg = _("The snapshot cannot be created when the volume is " "in error status.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) if volume['status'].startswith('replica_'): # Can't snapshot secondary replica msg = _("Snapshot of secondary replica is not allowed.") raise exception.InvalidVolume(reason=msg) def _create_snapshots_in_db_reserve( self, context: context.RequestContext, volume_list: objects.VolumeList) -> list: reserve_opts_list = [] total_reserve_opts: dict[str, int] = {} try: for volume in volume_list: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': 1} else: reserve_opts = {'snapshots': 1, 'gigabytes': volume['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.get('volume_type_id')) reserve_opts_list.append(reserve_opts) for reserve_opts in reserve_opts_list: for (key, value) in reserve_opts.items(): if key not in total_reserve_opts.keys(): total_reserve_opts[key] = value else: total_reserve_opts[key] = \ total_reserve_opts[key] + value reservations = QUOTAS.reserve(context, **total_reserve_opts) except exception.OverQuota as e: quota_utils.process_reserve_over_quota( context, e, resource='snapshots', size=total_reserve_opts.get('gigabytes', volume.size)) return reservations def _create_snapshot_in_db_options( self, context: context.RequestContext, volume: objects.Volume, name: str, description: str, cgsnapshot_id: str, group_snapshot_id: Optional[str] = None) -> dict[str, Any]: options = {'volume_id': volume['id'], 'cgsnapshot_id': cgsnapshot_id, 'group_snapshot_id': group_snapshot_id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description, 'volume_type_id': volume['volume_type_id'], 'encryption_key_id': volume['encryption_key_id']} return options def create_snapshot( self, context: context.RequestContext, volume: objects.Volume, name: str, description: str, metadata: Optional[dict[str, Any]] = None, cgsnapshot_id: Optional[str] = None, group_snapshot_id: Optional[str] = None, allow_in_use: bool = False) -> objects.Snapshot: result = self._create_snapshot(context, volume, name, description, False, metadata, cgsnapshot_id, group_snapshot_id, allow_in_use) LOG.info("Snapshot create request issued successfully.", resource=result) return result def create_snapshot_force( self, context: context.RequestContext, volume: objects.Volume, name: str, description: str, metadata: Optional[dict[str, Any]] = None) -> objects.Snapshot: result = self._create_snapshot(context, volume, name, description, True, metadata) LOG.info("Snapshot force create request issued successfully.", resource=result) return result def delete_snapshot(self, context: context.RequestContext, snapshot: objects.Snapshot, force: bool = False, unmanage_only: bool = False) -> None: context.authorize(snapshot_policy.DELETE_POLICY, target_obj=snapshot) if not unmanage_only: snapshot.assert_not_frozen() # Build required conditions for conditional update expected: dict[str, Any] = {'cgsnapshot_id': None, 'group_snapshot_id': None} # If not force deleting we have status conditions if not force: expected['status'] = (fields.SnapshotStatus.AVAILABLE, fields.SnapshotStatus.ERROR) values = {'status': fields.SnapshotStatus.DELETING} if unmanage_only is True: values['status'] = fields.SnapshotStatus.UNMANAGING result = snapshot.conditional_update(values, expected) if not result: status = utils.build_or_str(expected.get('status'), _('status must be %s and')) msg = (_('Snapshot %s must not be part of a group.') % status) LOG.error(msg) raise exception.InvalidSnapshot(reason=msg) self.volume_rpcapi.delete_snapshot(context, snapshot, unmanage_only) LOG.info("Snapshot delete request issued successfully.", resource=snapshot) def update_snapshot(self, context: context.RequestContext, snapshot: objects.Snapshot, fields: dict[str, Any]) -> None: context.authorize(snapshot_policy.UPDATE_POLICY, target_obj=snapshot) snapshot.update(fields) snapshot.save() def get_volume_metadata(self, context: context.RequestContext, volume: objects.Volume) -> dict: """Get all metadata associated with a volume.""" context.authorize(vol_meta_policy.GET_POLICY, target_obj=volume) rv = self.db.volume_metadata_get(context, volume['id']) LOG.info("Get volume metadata completed successfully.", resource=volume) return dict(rv) def create_volume_metadata(self, context: context.RequestContext, volume: objects.Volume, metadata: dict[str, Any]) -> dict: """Creates volume metadata.""" context.authorize(vol_meta_policy.CREATE_POLICY, target_obj=volume) db_meta = self._update_volume_metadata(context, volume, metadata) LOG.info("Create volume metadata completed successfully.", resource=volume) return db_meta def delete_volume_metadata(self, context: context.RequestContext, volume: objects.Volume, key: str, meta_type=common.METADATA_TYPES.user) -> None: """Delete the given metadata item from a volume.""" context.authorize(vol_meta_policy.DELETE_POLICY, target_obj=volume) if volume.status in ('maintenance', 'uploading'): msg = _('Deleting volume metadata is not allowed for volumes in ' '%s status.') % volume.status LOG.info(msg, resource=volume) raise exception.InvalidVolume(reason=msg) self.db.volume_metadata_delete(context, volume.id, key, meta_type) LOG.info("Delete volume metadata completed successfully.", resource=volume) def _update_volume_metadata(self, context: context.RequestContext, volume: objects.Volume, metadata: dict[str, Any], delete: bool = False, meta_type=common.METADATA_TYPES.user) -> dict: if volume['status'] in ('maintenance', 'uploading'): msg = _('Updating volume metadata is not allowed for volumes in ' '%s status.') % volume['status'] LOG.info(msg, resource=volume) raise exception.InvalidVolume(reason=msg) return self.db.volume_metadata_update(context, volume['id'], metadata, delete, meta_type) def update_volume_metadata(self, context: context.RequestContext, volume: objects.Volume, metadata: dict[str, Any], delete: bool = False, meta_type=common.METADATA_TYPES.user) -> dict: """Updates volume metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ context.authorize(vol_meta_policy.UPDATE_POLICY, target_obj=volume) db_meta = self._update_volume_metadata(context, volume, metadata, delete, meta_type) # TODO(jdg): Implement an RPC call for drivers that may use this info LOG.info("Update volume metadata completed successfully.", resource=volume) return db_meta def update_volume_admin_metadata(self, context: context.RequestContext, volume: objects.Volume, metadata: dict[str, Any], delete: Optional[bool] = False, add: Optional[bool] = True, update: Optional[bool] = True) -> dict: """Updates or creates volume administration metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ context.authorize(vol_meta_policy.UPDATE_ADMIN_METADATA_POLICY, target_obj=volume) utils.check_metadata_properties(metadata) # Policy could allow non admin users to update admin metadata, but # underlying DB methods require admin privileges, so we elevate the # context. with volume.obj_as_admin(): volume.admin_metadata_update(metadata, delete, add, update) # TODO(jdg): Implement an RPC call for drivers that may use this info LOG.info("Update volume admin metadata completed successfully.", resource=volume) return volume.admin_metadata def get_snapshot_metadata(self, context: context.RequestContext, snapshot: objects.Snapshot) -> dict: """Get all metadata associated with a snapshot.""" context.authorize(s_meta_policy.GET_POLICY, target_obj=snapshot) LOG.info("Get snapshot metadata completed successfully.", resource=snapshot) return snapshot.metadata def delete_snapshot_metadata(self, context: context.RequestContext, snapshot: objects.Snapshot, key: str) -> None: """Delete the given metadata item from a snapshot.""" context.authorize(s_meta_policy.DELETE_POLICY, target_obj=snapshot) snapshot.delete_metadata_key(context, key) LOG.info("Delete snapshot metadata completed successfully.", resource=snapshot) def update_snapshot_metadata(self, context: context.RequestContext, snapshot: objects.Snapshot, metadata: dict[str, Any], delete: bool = False) -> dict: """Updates or creates snapshot metadata. If delete is True, metadata items that are not specified in the `metadata` argument will be deleted. """ context.authorize(s_meta_policy.UPDATE_POLICY, target_obj=snapshot) if delete: _metadata = metadata else: orig_meta = snapshot.metadata _metadata = orig_meta.copy() _metadata.update(metadata) utils.check_metadata_properties(_metadata) snapshot.metadata = _metadata snapshot.save() # TODO(jdg): Implement an RPC call for drivers that may use this info LOG.info("Update snapshot metadata completed successfully.", resource=snapshot) return snapshot.metadata def get_volumes_image_metadata( self, context: context.RequestContext) -> collections.defaultdict: context.authorize(vol_meta_policy.GET_POLICY) db_data = self.db.volume_glance_metadata_get_all(context) results: collections.defaultdict = collections.defaultdict(dict) for meta_entry in db_data: results[meta_entry['volume_id']].update({meta_entry['key']: meta_entry['value']}) return results def get_volume_image_metadata(self, context: context.RequestContext, volume: objects.Volume) -> dict[str, str]: context.authorize(vol_meta_policy.GET_POLICY, target_obj=volume) db_data = self.db.volume_glance_metadata_get(context, volume['id']) LOG.info("Get volume image-metadata completed successfully.", resource=volume) return {meta_entry.key: meta_entry.value for meta_entry in db_data} def get_list_volumes_image_metadata( self, context: context.RequestContext, volume_id_list: list[str]) -> DefaultDict[str, str]: db_data = self.db.volume_glance_metadata_list_get(context, volume_id_list) results: collections.defaultdict = collections.defaultdict(dict) for meta_entry in db_data: results[meta_entry['volume_id']].update({meta_entry['key']: meta_entry['value']}) return results def _merge_volume_image_meta(self, context: context.RequestContext, volume: objects.Volume, image_meta: dict) -> None: """Merges the image metadata from volume into image_meta""" glance_core_props = CONF.glance_core_properties if glance_core_props: try: vol_img_metadata = self.get_volume_image_metadata( context, volume) custom_property_set = ( set(vol_img_metadata).difference(glance_core_props)) if custom_property_set: # only include elements that haven't already been # assigned values filtered_property_set = custom_property_set.difference( image_meta) image_meta['properties'] = { custom_prop: vol_img_metadata[custom_prop] for custom_prop in filtered_property_set} except exception.GlanceMetadataNotFound: # If volume is not created from image, No glance metadata # would be available for that volume in # volume glance metadata table pass def copy_volume_to_image(self, context: context.RequestContext, volume: objects.Volume, metadata: dict[str, str], force: bool) -> dict[str, Optional[str]]: """Create a new image from the specified volume.""" if not CONF.enable_force_upload and force: LOG.info("Force upload to image is disabled, " "Force option will be ignored.", resource={'type': 'volume', 'id': volume['id']}) force = False # Build required conditions for conditional update expected = {'status': ('available', 'in-use') if force else 'available'} values = {'status': 'uploading', 'previous_status': volume.model.status} result = volume.conditional_update(values, expected) if not result: msg = (_('Volume %(vol_id)s status must be %(statuses)s') % {'vol_id': volume.id, 'statuses': utils.build_or_str(expected['status'])}) raise exception.InvalidVolume(reason=msg) try: self._merge_volume_image_meta(context, volume, metadata) metadata = image_utils.filter_out_reserved_namespaces_metadata( metadata) recv_metadata = self.image_service.create(context, metadata) # NOTE(ZhengMa): Check if allow image compression before image # uploading if recv_metadata.get('container_format') == 'compressed': allow_compression = CONF.allow_compression_on_image_upload if allow_compression is False: raise exception.ImageCompressionNotAllowed() except Exception: # NOTE(geguileo): To mimic behavior before conditional_update we # will rollback status if image create fails with excutils.save_and_reraise_exception(): volume.conditional_update( {'status': volume.model.previous_status, 'previous_status': None}, {'status': 'uploading'}) self.volume_rpcapi.copy_volume_to_image(context, volume, recv_metadata) response = {"id": volume['id'], "updated_at": volume['updated_at'], "status": 'uploading', "display_description": volume['display_description'], "size": volume['size'], "volume_type": volume['volume_type'], "image_id": recv_metadata['id'], "container_format": recv_metadata['container_format'], "disk_format": recv_metadata['disk_format'], "image_name": recv_metadata.get('name', None)} if 'protected' in recv_metadata: response['protected'] = recv_metadata.get('protected') if 'is_public' in recv_metadata: response['is_public'] = recv_metadata.get('is_public') elif 'visibility' in recv_metadata: response['visibility'] = recv_metadata.get('visibility') LOG.info("Copy volume to image completed successfully.", resource=volume) return response def _extend(self, context: context.RequestContext, volume: objects.Volume, new_size: int, attached: bool = False) -> None: value = {'status': 'extending', 'previous_status': volume.status} expected: dict if attached: expected = {'status': 'in-use'} else: expected = {'status': 'available'} orig_status = {'status': volume.status} def _roll_back_status() -> None: status = orig_status['status'] msg = _('Could not return volume %(id)s to %(status)s.') try: if not volume.conditional_update(orig_status, value): LOG.error(msg, {'id': volume.id, 'status': status}) except Exception: LOG.exception(msg, {'id': volume.id, 'status': status}) size_increase = (int(new_size)) - volume.size if size_increase <= 0: msg = (_("New size for extend must be greater " "than current size. (current: %(size)s, " "extended: %(new_size)s).") % {'new_size': new_size, 'size': volume.size}) raise exception.InvalidInput(reason=msg) # Make sure we pass the potential size limitations in the volume type try: volume_type = volume_types.get_volume_type(context, volume.volume_type_id) except (exception.InvalidVolumeType, exception.VolumeTypeNotFound): volume_type = None volume_types.provision_filter_on_size(context, volume_type, new_size) result = volume.conditional_update(value, expected) if not result: msg = (_("Volume %(vol_id)s status must be '%(expected)s' " "to extend, currently %(status)s.") % {'vol_id': volume.id, 'status': volume.status, 'expected': str(expected)}) raise exception.InvalidVolume(reason=msg) rollback = True try: values = {'per_volume_gigabytes': new_size} QUOTAS.limit_check(context, project_id=context.project_id, **values) rollback = False except exception.OverQuota as e: quotas = e.kwargs['quotas'] raise exception.VolumeSizeExceedsLimit( size=new_size, limit=quotas['per_volume_gigabytes']) finally: # NOTE(geguileo): To mimic behavior before conditional_update we # will rollback status on quota reservation failure regardless of # the exception that caused the failure. if rollback: _roll_back_status() reservations = None try: reserve_opts = {'gigabytes': size_increase} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) reservations = QUOTAS.reserve(context, project_id=volume.project_id, **reserve_opts) except exception.OverQuota as exc: gigabytes = exc.kwargs['usages']['gigabytes'] gb_quotas = exc.kwargs['quotas']['gigabytes'] consumed = gigabytes['reserved'] + gigabytes['in_use'] LOG.error("Quota exceeded for %(s_pid)s, tried to extend volume " "by %(s_size)sG, (%(d_consumed)dG of %(d_quota)dG " "already consumed).", {'s_pid': context.project_id, 's_size': size_increase, 'd_consumed': consumed, 'd_quota': gb_quotas}) raise exception.VolumeSizeExceedsAvailableQuota( requested=size_increase, consumed=consumed, quota=gb_quotas) finally: # NOTE(geguileo): To mimic behavior before conditional_update we # will rollback status on quota reservation failure regardless of # the exception that caused the failure. if reservations is None: _roll_back_status() volume_type = {} if volume.volume_type_id: volume_type = volume_types.get_volume_type(context.elevated(), volume.volume_type_id) request_spec = { 'volume_properties': volume, 'volume_type': volume_type, 'volume_id': volume.id } self.scheduler_rpcapi.extend_volume(context, volume, new_size, reservations, request_spec) LOG.info("Extend volume request issued successfully.", resource=volume) def extend(self, context: context.RequestContext, volume: objects.Volume, new_size: int) -> None: context.authorize(vol_action_policy.EXTEND_POLICY, target_obj=volume) self._extend(context, volume, new_size, attached=False) # NOTE(tommylikehu): New method is added here so that administrator # can enable/disable this ability by editing the policy file if the # cloud environment doesn't allow this operation. def extend_attached_volume(self, context: context.RequestContext, volume: objects.Volume, new_size: int) -> None: context.authorize(vol_action_policy.EXTEND_ATTACHED_POLICY, target_obj=volume) self._extend(context, volume, new_size, attached=True) def extend_volume_completion(self, context: context.RequestContext, volume: objects.Volume, error: bool): context.authorize(vol_action_policy.EXTEND_COMPLETE_POLICY, target_obj=volume) if volume.status != 'extending': msg = _('Volume is not being extended.') raise exception.InvalidVolume(reason=msg) try: with volume.obj_as_admin(): new_size = int(volume.admin_metadata['extend_new_size']) reservations = jsonutils.loads( volume.admin_metadata['extend_reservations']) except (KeyError, ValueError, jsonutils.json.decoder.JSONDecodeError): msg = _('Required volume admin metadata is malformed or missing.') raise exception.InvalidVolume(reason=msg) if new_size <= volume.size: msg = _('The target volume size provided in volume admin metadata ' '%(size)s is smaller or equal to the current volume size.' % volume.admin_metadata["extend_new_size"]) raise exception.InvalidVolume(reason=msg) if type(reservations) is not list: msg = _('The stored quota reservations for extending the volume ' 'must be in a list format.') raise exception.InvalidVolume(reason=msg) with volume.obj_as_admin(): del volume.admin_metadata['extend_new_size'] del volume.admin_metadata['extend_reservations'] volume.save() self.volume_rpcapi.extend_volume_completion(context, volume, new_size, reservations, error) LOG.info("Extend volume completion issued successfully.", resource=volume) def migrate_volume(self, context: context.RequestContext, volume: objects.Volume, host: str, cluster_name: str, force_copy: bool, lock_volume: bool) -> None: """Migrate the volume to the specified host or cluster.""" elevated = context.elevated() context.authorize(vol_action_policy.MIGRATE_POLICY, target_obj=volume) # If we received a request to migrate to a host # Look for the service - must be up and enabled svc_host = host and volume_utils.extract_host(host, 'backend') svc_cluster = cluster_name and volume_utils.extract_host(cluster_name, 'backend') # NOTE(geguileo): Only svc_host or svc_cluster is set, so when we get # a service from the DB we are getting either one specific service from # a host or any service from a cluster that is up, which means that the # cluster itself is also up. try: svc = objects.Service.get_by_id(elevated, None, is_up=True, topic=constants.VOLUME_TOPIC, host=svc_host, disabled=False, cluster_name=svc_cluster, backend_match_level='pool') except exception.ServiceNotFound: msg = _("No available service named '%s'") % (cluster_name or host) LOG.error(msg) raise exception.InvalidHost(reason=msg) # Even if we were requested to do a migration to a host, if the host is # in a cluster we will do a cluster migration. cluster_name = svc.cluster_name # Build required conditions for conditional update expected = {'status': ('available', 'in-use'), 'migration_status': self.AVAILABLE_MIGRATION_STATUS, 'replication_status': ( None, fields.ReplicationStatus.DISABLED, fields.ReplicationStatus.NOT_CAPABLE), 'consistencygroup_id': (None, ''), 'group_id': (None, '')} # We want to make sure that the migration is to another host or # another cluster. if cluster_name: expected['cluster_name'] = db.Not(cluster_name) else: expected['host'] = db.Not(host) filters = [~db.volume_has_snapshots_filter()] updates = {'migration_status': 'starting', 'previous_status': volume.model.status} # When the migration of an available volume starts, both the status # and the migration status of the volume will be changed. # If the admin sets lock_volume flag to True, the volume # status is changed to 'maintenance', telling users # that this volume is in maintenance mode, and no action is allowed # on this volume, e.g. attach, detach, retype, migrate, etc. if lock_volume: updates['status'] = db.Case( [(volume.model.status == 'available', 'maintenance')], else_=volume.model.status) result = volume.conditional_update(updates, expected, filters) if not result: msg = _('Volume %s status must be available or in-use, must not ' 'be migrating, have snapshots, be replicated, be part of ' 'a group and destination host/cluster must be different ' 'than the current one') % volume.id LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Call the scheduler to ensure that the host exists and that it can # accept the volume volume_type = {} if volume.volume_type_id: volume_type = volume_types.get_volume_type(context.elevated(), volume.volume_type_id) request_spec = {'volume_properties': volume, 'volume_type': volume_type, 'volume_id': volume.id} self.scheduler_rpcapi.migrate_volume(context, volume, cluster_name or host, force_copy, request_spec) LOG.info("Migrate volume request issued successfully.", resource=volume) def migrate_volume_completion(self, context: context.RequestContext, volume: objects.Volume, new_volume: objects.Volume, error: bool) -> str: context.authorize(vol_action_policy.MIGRATE_COMPLETE_POLICY, target_obj=volume) if not (volume.migration_status or new_volume.migration_status): # When we're not migrating and haven't hit any errors, we issue # volume attach and detach requests so the volumes don't end in # 'attaching' and 'detaching' state if not error: attachments = volume.volume_attachment for attachment in attachments: self.detach(context, volume, attachment.id) self.attach(context, new_volume, attachment.instance_uuid, attachment.attached_host, attachment.mountpoint, 'rw') return new_volume.id if not volume.migration_status: msg = _('Source volume not mid-migration.') raise exception.InvalidVolume(reason=msg) if not new_volume.migration_status: msg = _('Destination volume not mid-migration.') raise exception.InvalidVolume(reason=msg) expected_status = 'target:%s' % volume.id if not new_volume.migration_status == expected_status: msg = (_('Destination has migration_status %(stat)s, expected ' '%(exp)s.') % {'stat': new_volume.migration_status, 'exp': expected_status}) raise exception.InvalidVolume(reason=msg) LOG.info("Migrate volume completion issued successfully.", resource=volume) return self.volume_rpcapi.migrate_volume_completion(context, volume, new_volume, error) def update_readonly_flag(self, context: context.RequestContext, volume: objects.Volume, flag) -> None: context.authorize(vol_action_policy.UPDATE_READONLY_POLICY, target_obj=volume) if volume['status'] != 'available': msg = _('Volume %(vol_id)s status must be available ' 'to update readonly flag, but current status is: ' '%(vol_status)s.') % {'vol_id': volume['id'], 'vol_status': volume['status']} raise exception.InvalidVolume(reason=msg) self.update_volume_admin_metadata(context.elevated(), volume, {'readonly': str(flag)}) LOG.info("Update readonly setting on volume " "completed successfully.", resource=volume) def retype(self, context: context.RequestContext, volume: objects.Volume, new_type: Union[str, objects.VolumeType], migration_policy: Optional[str] = None) -> None: """Attempt to modify the type associated with an existing volume.""" context.authorize(vol_action_policy.RETYPE_POLICY, target_obj=volume) # Support specifying volume type by ID or name try: new_type = ( volume_type.VolumeType.get_by_name_or_id(context.elevated(), new_type)) except exception.InvalidVolumeType: msg = _('Invalid volume_type passed: %s.') % new_type LOG.error(msg) raise exception.InvalidInput(reason=msg) new_type_id = new_type['id'] # Make sure we pass the potential size limitations in the volume type volume_types.provision_filter_on_size(context, new_type, volume.size) # NOTE(jdg): We check here if multiattach is involved in either side # of the retype, we can't change multiattach on an in-use volume # because there's things the hypervisor needs when attaching, so # we just disallow retype of in-use volumes in this case. You still # have to get through scheduling if all the conditions are met, we # should consider an up front capabilities check to give fast feedback # rather than "No hosts found" and error status src_is_multiattach = volume.multiattach tgt_is_multiattach = False if new_type: tgt_is_multiattach = self._is_multiattach(new_type) if src_is_multiattach != tgt_is_multiattach: if volume.status != "available": msg = _('Invalid volume_type passed, retypes affecting ' 'multiattach are only allowed on available volumes, ' 'the specified volume however currently has a status ' 'of: %s.') % volume.status LOG.info(msg) raise exception.InvalidInput(reason=msg) # If they are retyping to a multiattach capable, make sure they # are allowed to do so. if tgt_is_multiattach: context.authorize(vol_policy.MULTIATTACH_POLICY, target_obj=volume) if tgt_is_multiattach and self._is_encrypted(new_type): msg = ('Retype requested both encryption and multi-attach, ' 'which is not supported.') raise exception.InvalidInput(reason=msg) # We're checking here in so that we can report any quota issues as # early as possible, but won't commit until we change the type. We # pass the reservations onward in case we need to roll back. reservations = quota_utils.get_volume_type_reservation( context, volume, new_type_id, reserve_vol_type_only=True) # Get old reservations try: old_reservations = quota_utils.get_volume_type_reservation( context, volume, volume.volume_type_id, reserve_vol_type_only=True, negative=True) except Exception: volume.status = volume.previous_status volume.save() msg = _("Failed to update quota usage while retyping volume.") LOG.exception(msg, resource=volume) raise exception.CinderException(msg) # Build required conditions for conditional update expected = {'status': ('available', 'in-use'), 'migration_status': self.AVAILABLE_MIGRATION_STATUS, 'consistencygroup_id': (None, ''), 'group_id': (None, ''), 'volume_type_id': db.Not(new_type_id)} # We don't support changing QoS at the front-end yet for in-use volumes # TODO(avishay): Call Nova to change QoS setting (libvirt has support # - virDomainSetBlockIoTune() - Nova does not have support yet). filters = [db.volume_qos_allows_retype(new_type_id)] updates = {'status': 'retyping', 'previous_status': objects.Volume.model.status} if not volume.conditional_update(updates, expected, filters): msg = _('Retype needs volume to be in available or in-use state, ' 'not be part of an active migration or a consistency ' 'group, requested type has to be different that the ' 'one from the volume, and for in-use volumes front-end ' 'qos specs cannot change.') LOG.error(msg) QUOTAS.rollback(context, reservations + old_reservations, project_id=volume.project_id) raise exception.InvalidVolume(reason=msg) request_spec = {'volume_properties': volume, 'volume_id': volume.id, 'volume_type': new_type, 'migration_policy': migration_policy, 'quota_reservations': reservations, 'old_reservations': old_reservations} type_azs = volume_utils.extract_availability_zones_from_volume_type( new_type) if type_azs is not None: request_spec['availability_zones'] = type_azs else: request_spec['availability_zones'] = [volume.availability_zone] self.scheduler_rpcapi.retype(context, volume, request_spec=request_spec, filter_properties={}) volume.multiattach = tgt_is_multiattach volume.save() LOG.info("Retype volume request issued successfully.", resource=volume) def _get_service_by_host_cluster( self, context: context.RequestContext, host: str, cluster_name: Optional[str], resource: str = 'volume') -> objects.Service: elevated = context.elevated() svc_cluster = cluster_name and volume_utils.extract_host(cluster_name, 'backend') svc_host = host and volume_utils.extract_host(host, 'backend') # NOTE(geguileo): Only svc_host or svc_cluster is set, so when we get # a service from the DB we are getting either one specific service from # a host or any service that is up from a cluster, which means that the # cluster itself is also up. try: service = objects.Service.get_by_id(elevated, None, host=svc_host, binary=constants.VOLUME_BINARY, cluster_name=svc_cluster) except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): LOG.error('Unable to find service: %(service)s for ' 'given host: %(host)s and cluster %(cluster)s.', {'service': constants.VOLUME_BINARY, 'host': host, 'cluster': cluster_name}) if service.disabled and (not service.cluster_name or service.cluster.disabled): LOG.error('Unable to manage existing %s on a disabled ' 'service.', resource) raise exception.ServiceUnavailable() if not service.is_up: LOG.error('Unable to manage existing %s on a service that is ' 'down.', resource) raise exception.ServiceUnavailable() return service def manage_existing(self, context: context.RequestContext, host: str, cluster_name: Optional[str], ref: dict, name: Optional[str] = None, description: Optional[str] = None, volume_type: Optional[objects.VolumeType] = None, metadata: Optional[dict] = None, availability_zone: Optional[str] = None, bootable: Optional[bool] = False) -> objects.Volume: if 'source-name' in ref: vol_id = volume_utils.extract_id_from_volume_name( ref['source-name']) if vol_id and volume_utils.check_already_managed_volume(vol_id): raise exception.InvalidVolume( _("Unable to manage existing volume." " The volume is already managed")) if not volume_type: try: volume_type = volume_types.get_default_volume_type(context) except exception.VolumeTypeDefaultMisconfiguredError: LOG.error('Default volume type not found. This must be ' 'corrected immediately or all volume-create ' 'requests that do not specify a volume type ' 'will fail.') raise is_encrypted = False if volume_type: is_encrypted = self._is_encrypted(volume_type) if is_encrypted: msg = _("Managing to an encrypted volume type is not supported.") LOG.error(msg) raise exception.InvalidVolumeType(msg) if volume_type and 'extra_specs' not in volume_type: extra_specs = volume_types.get_volume_type_extra_specs( volume_type['id']) volume_type['extra_specs'] = extra_specs service = self._get_service_by_host_cluster(context, host, cluster_name) if availability_zone is None: availability_zone = service.availability_zone if not cluster_name and bool(volume_utils.extract_host(host, 'pool')): manage_host = host else: manage_host = service.host manage_what = { 'context': context, 'name': name, 'description': description, 'host': manage_host, 'cluster_name': service.cluster_name, 'ref': ref, 'volume_type': volume_type, 'metadata': metadata, 'availability_zone': availability_zone, 'bootable': bootable, 'size': 0, 'group_snapshot': None, 'optional_args': {'is_quota_committed': False}, 'volume_type_id': None if not volume_type else volume_type['id'], } try: flow_engine = manage_existing.get_flow(self.scheduler_rpcapi, self.db, manage_what) except Exception: msg = _('Failed to manage api volume flow.') LOG.exception(msg) raise exception.CinderException(msg) # Attaching this listener will capture all of the notifications that # taskflow sends out and redirect them to a more useful log for # cinder's debugging (or error reporting) usage. with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() vol_ref = flow_engine.storage.fetch('volume') LOG.info("Manage volume request issued successfully.", resource=vol_ref) return vol_ref def get_manageable_volumes(self, context: context.RequestContext, host: str, cluster_name, marker: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, sort_keys: Optional[list[str]] = None, sort_dirs: Optional[list[str]] = None): svc = self._get_service_by_host_cluster(context, host, cluster_name) return self.volume_rpcapi.get_manageable_volumes(context, svc, marker, limit, offset, sort_keys, sort_dirs) def manage_existing_snapshot( self, context: context.RequestContext, ref: dict, volume: objects.Volume, name: Optional[str] = None, description: Optional[str] = None, metadata: Optional[dict] = None) -> objects.Snapshot: # Ensure the service is up and not disabled. self._get_service_by_host_cluster(context, volume.host, volume.cluster_name, 'snapshot') snapshot_object = self.create_snapshot_in_db(context, volume, name, description, True, metadata, None, commit_quota=True) kwargs = {'snapshot_id': snapshot_object.id, 'volume_properties': objects.VolumeProperties(size=volume.size)} self.scheduler_rpcapi.manage_existing_snapshot( context, volume, snapshot_object, ref, request_spec=objects.RequestSpec(**kwargs)) return snapshot_object def get_manageable_snapshots( self, context: context.RequestContext, host: str, cluster_name: Optional[str], marker: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None, sort_keys: Optional[list[str]] = None, sort_dirs: Optional[list[str]] = None) -> list[dict]: svc = self._get_service_by_host_cluster(context, host, cluster_name, 'snapshot') return self.volume_rpcapi.get_manageable_snapshots(context, svc, marker, limit, offset, sort_keys, sort_dirs) def _get_cluster_and_services_for_replication( self, ctxt: context.RequestContext, host: str, cluster_name: str) -> tuple: services = objects.ServiceList.get_all( ctxt, filters={'host': host, 'cluster_name': cluster_name, 'binary': constants.VOLUME_BINARY}) if not services: if host: msg = _("No service found with host=%s") % host else: msg = _("No service found with cluster=%s") % cluster_name raise exception.ServiceNotFound(msg) cluster = services[0].cluster # Check that the host or cluster we received only results in 1 host or # hosts from the same cluster. if cluster_name: check_attribute = 'cluster_name' expected = cluster.name else: check_attribute = 'host' expected = services[0].host if any(getattr(s, check_attribute) != expected for s in services): msg = _('Services from different clusters found.') raise exception.InvalidParameterValue(msg) # If we received host parameter but host belongs to a cluster we have # to change all the services in the cluster, not just one host if host and cluster: services = cluster.services return cluster, services def _replication_db_change(self, ctxt: context.RequestContext, field: str, expected_value: Union[bool, list], new_value: Union[bool, str], host: str, cluster_name: str, check_up: bool = False) -> tuple: def _error_msg(service) -> str: expected = utils.build_or_str(str(expected_value)) up_msg = 'and must be up ' if check_up else '' msg = (_('%(field)s in %(service)s must be %(expected)s ' '%(up_msg)sto failover.') % {'field': field, 'service': service, 'expected': expected, 'up_msg': up_msg}) LOG.error(msg) return msg cluster, services = self._get_cluster_and_services_for_replication( ctxt, host, cluster_name) expect = {field: expected_value} change = {field: new_value} if cluster: old_value = getattr(cluster, field) if ((check_up and not cluster.is_up) or not cluster.conditional_update(change, expect)): msg = _error_msg(cluster.name) raise exception.InvalidInput(reason=msg) changed = [] not_changed = [] for service in services: if ((not check_up or service.is_up) and service.conditional_update(change, expect)): changed.append(service) else: not_changed.append(service) # If there were some services that couldn't be changed we should at # least log the error. if not_changed: msg = _error_msg([s.host for s in not_changed]) # If we couldn't change any of the services if not changed: # Undo the cluster change if cluster: setattr(cluster, field, old_value) cluster.save() raise exception.InvalidInput( reason=_('No service could be changed: %s') % msg) LOG.warning('Some services could not be changed: %s', msg) return cluster, services def failover(self, ctxt: context.RequestContext, host: str, cluster_name: str, secondary_id: Optional[str] = None) -> None: ctxt.authorize(svr_policy.FAILOVER_POLICY) ctxt = ctxt if ctxt.is_admin else ctxt.elevated() # TODO(geguileo): In P - Remove this version check rpc_version = self.volume_rpcapi.determine_rpc_version_cap() rpc_version = versionutils.convert_version_to_tuple(rpc_version) if cluster_name and rpc_version < (3, 5): msg = _('replication operations with cluster field') raise exception.UnavailableDuringUpgrade(action=msg) rep_fields = fields.ReplicationStatus expected_values = [rep_fields.ENABLED, rep_fields.FAILED_OVER] new_value = rep_fields.FAILING_OVER cluster, services = self._replication_db_change( ctxt, 'replication_status', expected_values, new_value, host, cluster_name, check_up=True) self.volume_rpcapi.failover(ctxt, services[0], secondary_id) def freeze_host(self, ctxt: context.RequestContext, host: str, cluster_name: str) -> None: ctxt.authorize(svr_policy.FREEZE_POLICY) ctxt = ctxt if ctxt.is_admin else ctxt.elevated() expected = False new_value = True cluster, services = self._replication_db_change( ctxt, 'frozen', expected, new_value, host, cluster_name, check_up=False) # Should we set service status to disabled to keep # scheduler calls from being sent? Just use existing # `cinder service-disable reason=freeze` self.volume_rpcapi.freeze_host(ctxt, services[0]) def thaw_host(self, ctxt: context.RequestContext, host: str, cluster_name: str) -> Optional[str]: ctxt.authorize(svr_policy.THAW_POLICY) ctxt = ctxt if ctxt.is_admin else ctxt.elevated() expected = True new_value = False cluster, services = self._replication_db_change( ctxt, 'frozen', expected, new_value, host, cluster_name, check_up=False) if not self.volume_rpcapi.thaw_host(ctxt, services[0]): return "Backend reported error during thaw_host operation." return None def check_volume_filters(self, filters: dict, strict: bool = False) -> None: """Sets the user filter value to accepted format""" booleans = self.db.get_booleans_for_table('volume') # To translate any true/false equivalent to True/False # which is only acceptable format in database queries. temp_dict = filters.copy() for key, val in temp_dict.items(): try: if key in booleans: filters[key] = self._check_boolean_filter_value( key, val, strict) elif key == 'display_name': # Use the raw value of display name as is for the filter # without passing it through ast.literal_eval(). If the # display name is a properly quoted string (e.g. '"foo"') # then literal_eval() strips the quotes (i.e. 'foo'), so # the filter becomes different from the user input. continue else: # this is required as ast.literal_eval(/) # raises exception. Eg: ast.literal_eval(5) generates # ValueError: malformed node or string: 5 if not isinstance(val, str): val = str(val) filters[key] = ast.literal_eval(val) except (ValueError, SyntaxError): LOG.debug('Could not evaluate value %s, assuming string', val) def _check_boolean_filter_value(self, key: str, val: str, strict: bool = False) -> bool: """Boolean filter values in Volume GET. Before VOLUME_LIST_BOOTABLE, all values other than 'False', 'false', 'FALSE' were trated as True for specific boolean filter parameters in Volume GET request. But VOLUME_LIST_BOOTABLE onwards, only true/True/0/1/False/false parameters are supported. All other input values to specific boolean filter parameter will lead to raising exception. This changes API behavior. So, micro version introduced for VOLUME_LIST_BOOTABLE onwards. """ if strict: # for updated behavior, from VOLUME_LIST_BOOTABLE onwards. # To translate any true/false/t/f/0/1 to True/False # which is only acceptable format in database queries. try: return strutils.bool_from_string(val, strict=True) except ValueError: msg = _('\'%(key)s = %(value)s\'') % {'key': key, 'value': val} raise exception.InvalidInput(reason=msg) else: # For existing behavior(before version VOLUME_LIST_BOOTABLE) accepted_true = ['True', 'true', 'TRUE'] accepted_false = ['False', 'false', 'FALSE'] if val in accepted_false: return False elif val in accepted_true: return True else: return bool(val) def _attachment_reserve( self, ctxt: context.RequestContext, vref: objects.Volume, instance_uuid: Optional[str] = None) -> objects.VolumeAttachment: # NOTE(jdg): Reserved is a special case, we're avoiding allowing # creation of other new reserves/attachments while in this state # so we avoid contention issues with shared connections # Multiattach of bootable volumes is a special case with it's own # policy, check that here right off the bat if (vref.get('multiattach', False) and vref.status == 'in-use' and vref.bootable): ctxt.authorize( attachment_policy.MULTIATTACH_BOOTABLE_VOLUME_POLICY, target_obj=vref) # FIXME(JDG): We want to be able to do things here like reserve a # volume for Nova to do BFV WHILE the volume may be in the process of # downloading image, we add downloading here; that's easy enough but # we've got a race between with the attaching/detaching that we do # locally on the Cinder node. Just come up with an easy way to # determine if we're attaching to the Cinder host for some work or if # we're being used by the outside world. expected = {'multiattach': vref.multiattach, 'status': (('available', 'in-use', 'downloading') if vref.multiattach else ('available', 'downloading'))} result = vref.conditional_update({'status': 'reserved'}, expected) if not result: override = False if instance_uuid and vref.status in ('in-use', 'reserved'): # Refresh the volume reference in case multiple instances were # being concurrently attached to the same non-multiattach # volume. vref = objects.Volume.get_by_id(ctxt, vref.id) for attachment in vref.volume_attachment: # If we're attaching the same volume to the same instance, # we could be migrating the instance to another host in # which case we want to allow the reservation. # (LP BUG: 1694530) if attachment.instance_uuid == instance_uuid: override = True break if not override: msg = (_('Volume %(vol_id)s status must be %(statuses)s to ' 'reserve, but the current status is %(current)s.') % {'vol_id': vref.id, 'statuses': utils.build_or_str(expected['status']), 'current': vref.status}) raise exception.InvalidVolume(reason=msg) values = {'volume_id': vref.id, 'volume_host': vref.host, 'attach_status': 'reserved', 'instance_uuid': instance_uuid} db_ref = self.db.volume_attach(ctxt.elevated(), values) return objects.VolumeAttachment.get_by_id(ctxt, db_ref['id']) def attachment_create( self, ctxt: context.RequestContext, volume_ref: objects.Volume, instance_uuid: str, connector: Optional[dict] = None, attach_mode: Optional[str] = 'null') -> objects.VolumeAttachment: """Create an attachment record for the specified volume.""" ctxt.authorize(attachment_policy.CREATE_POLICY, target_obj=volume_ref) connection_info = {} if "error" in volume_ref.status: msg = ('Volume attachments can not be created if the volume ' 'is in an error state. ' 'The Volume %(volume_id)s currently has a status of: ' '%(volume_status)s ') % { 'volume_id': volume_ref.id, 'volume_status': volume_ref.status} LOG.error(msg) raise exception.InvalidVolume(reason=msg) attachment_ref = self._attachment_reserve(ctxt, volume_ref, instance_uuid) if connector: connection_info = ( self.volume_rpcapi.attachment_update(ctxt, volume_ref, connector, attachment_ref.id)) attachment_ref.connection_info = connection_info # Use of admin_metadata for RO settings is deprecated # switch to using mode argument to attachment-create if self.db.volume_admin_metadata_get( ctxt.elevated(), volume_ref['id']).get('readonly', False): LOG.warning("Using volume_admin_metadata to set " "Read Only mode is deprecated! Please " "use the mode argument in attachment-create.") attachment_ref.attach_mode = 'ro' # for now we have to let the admin_metadata override # so we're using an else in the next step here, in # other words, using volume_admin_metadata and mode params # are NOT compatible else: attachment_ref.attach_mode = attach_mode attachment_ref.save() return attachment_ref @coordination.synchronized( '{f_name}-{attachment_ref.volume_id}-{connector[host]}') def attachment_update(self, ctxt: context.RequestContext, attachment_ref: objects.VolumeAttachment, connector) -> objects.VolumeAttachment: """Update an existing attachment record.""" # Valid items to update (connector includes mode and mountpoint): # 1. connector (required) # a. mode (if None use value from attachment_ref) # b. mountpoint (if None use value from attachment_ref) # c. instance_uuid(if None use value from attachment_ref) # This method has a synchronized() lock on the volume id # because we have to prevent race conditions around checking # for duplicate attachment requests to the same host. # We fetch the volume object and pass it to the rpc call because we # need to direct this to the correct host/backend ctxt.authorize(attachment_policy.UPDATE_POLICY, target_obj=attachment_ref) volume_ref = objects.Volume.get_by_id(ctxt, attachment_ref.volume_id) if "error" in volume_ref.status: msg = ('Volume attachments can not be updated if the volume ' 'is in an error state. The Volume %(volume_id)s ' 'currently has a status of: %(volume_status)s ') % { 'volume_id': volume_ref.id, 'volume_status': volume_ref.status} LOG.error(msg) raise exception.ResourceConflict(reason=msg) if (len(volume_ref.volume_attachment) > 1 and not (volume_ref.multiattach or self._is_multiattach(volume_ref.volume_type))): # Check whether all connection hosts are unique # Multiple attachments to different hosts is permitted to # support Nova instance migration. # This particular check also does not prevent multiple attachments # for a multiattach volume to the same instance. connection_hosts = set(a.connector['host'] for a in volume_ref.volume_attachment if a.connection_info) if len(connection_hosts) > 0: # We raced, and have more than one connection msg = _('duplicate connectors detected on volume ' '%(vol)s') % {'vol': volume_ref.id} raise exception.ResourceConflict(reason=msg) connection_info = ( self.volume_rpcapi.attachment_update(ctxt, volume_ref, connector, attachment_ref.id)) attachment_ref.connection_info = connection_info attachment_ref.save() return attachment_ref def attachment_deletion_allowed(self, ctxt: context.RequestContext, attachment_or_attachment_id, volume=None): """Check if deleting an attachment is allowed (Bug #2004555) Allowed is based on the REST API policy, the status of the attachment, where it is used, and who is making the request. Deleting an attachment on the Cinder side while leaving the volume connected to the nova host results in leftover devices that can lead to data leaks/corruption. OS-Brick may have code to detect it, but in some cases it is detected after it has already been exposed, so it's better to prevent users from being able to intentionally triggering the issue. """ # It's ok to delete an attachment if the request comes from a service if self.is_service_request(ctxt): return if not attachment_or_attachment_id and volume: if not volume.volume_attachment: return if len(volume.volume_attachment) == 1: attachment_or_attachment_id = volume.volume_attachment[0] if isinstance(attachment_or_attachment_id, str): try: attachment = objects.VolumeAttachment.get_by_id( ctxt, attachment_or_attachment_id) except exception.VolumeAttachmentNotFound: attachment = None else: attachment = attachment_or_attachment_id if attachment: if volume: if volume.id != attachment.volume_id: raise exception.InvalidInput( reason='Mismatched volume and attachment') server_id = attachment.instance_uuid # It's ok to delete if it's not connected to a vm. if not server_id or not attachment.connection_info: return volume = volume or attachment.volume nova = compute.API() LOG.info('Attachment connected to vm %s, checking data on nova', server_id) # If nova is down the client raises 503 and we report that try: nova_volume = nova.get_server_volume(ctxt, server_id, volume.id) except nova.NotFound: LOG.warning('Instance or volume not found on Nova, deleting ' 'attachment locally, which may leave leftover ' 'devices on Nova compute') return if nova_volume.attachment_id != attachment.id: LOG.warning('Mismatch! Nova has different attachment id (%s) ' 'for the volume, deleting attachment locally. ' 'May leave leftover devices in a compute node', nova_volume.attachment_id) return else: server_id = '' LOG.error('Detected user call to delete in-use attachment. Call must ' 'come from the nova service and nova must be configured to ' 'send the service token. Bug #2004555') raise exception.ConflictNovaUsingAttachment(instance_id=server_id) def attachment_delete(self, ctxt: context.RequestContext, attachment) -> objects.VolumeAttachmentList: # Check if policy allows user to delete attachment ctxt.authorize(attachment_policy.DELETE_POLICY, target_obj=attachment) self.attachment_deletion_allowed(ctxt, attachment) volume = attachment.volume if attachment.attach_status == fields.VolumeAttachStatus.RESERVED: volume_utils.notify_about_volume_usage(ctxt, volume, "detach.start") else: # Generate the detach.start notification on the volume service to # include the host doing the operation. self.volume_rpcapi.attachment_delete(ctxt, attachment.id, volume) # Trigger attachments lazy load (missing since volume was loaded in the # attachment without joined tables). With them loaded the finish_detach # call removes the detached one from the list, and the notification and # return have the right attachment list. volume.volume_attachment volume.finish_detach(attachment.id) # Do end notification on the API so it comes after finish_detach. # Doing it on the volume service leads to race condition from bug # #1937084, and doing the notification there with the finish here leads # to bug #1916980. volume_utils.notify_about_volume_usage(ctxt, volume, "detach.end") return volume.volume_attachment def reimage(self, context, volume, image_id, reimage_reserved=False, image_snap=None): if volume.status in ['reserved']: context.authorize(vol_action_policy.REIMAGE_RESERVED_POLICY, target_obj=volume) else: context.authorize(vol_action_policy.REIMAGE_POLICY, target_obj=volume) if len(volume.volume_attachment) > 1: msg = _("Cannot re-image a volume which is attached to more than " "one server.") raise webob.exc.HTTPConflict(explanation=msg) # Build required conditions for conditional update expected = {'status': ('available', 'error', 'reserved' ) if reimage_reserved else ('available', 'error')} values = {'status': 'downloading', 'previous_status': volume.model.status} result = volume.conditional_update(values, expected) if not result: msg = (_('Volume %(vol_id)s status must be %(statuses)s, but ' 'current status is %(status)s.') % {'vol_id': volume.id, 'statuses': utils.build_or_str(expected['status']), 'status': volume.status}) raise exception.InvalidVolume(reason=msg) image_meta = self.image_service.show(context, image_id) try: # If the source of the image is a volume snapshot # (image_snap is not None), we will get image 'size' as 0 and # 'virtual_size' as None but at least we will verify the image # 'status' and 'min_disk' properties. volume_utils.check_image_metadata(image_meta, volume['size']) # Currently we only raise InvalidInput and ImageUnacceptable # exceptions in the check_image_metadata call but having Exception # here makes it more generic since we want to roll back to original # state in any case and we re-raise anyway. # Also this helps makes adding new exceptions easier in the future. except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Failed to reimage volume %(volume_id)s " "with image %(image_id)s", {'volume_id': volume.id, 'image_id': image_id}) volume.conditional_update( {'status': volume.model.previous_status, 'previous_status': None}, {'status': 'downloading'}) self.volume_rpcapi.reimage(context, volume, image_meta, image_snap=image_snap) class HostAPI(base.Base): """Sub-set of the Volume Manager API for managing host operations.""" def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new volumes.""" raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/configuration.py0000664000175000017500000001321600000000000021066 0ustar00zuulzuul00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Configuration support for all drivers. This module allows support for setting configurations either from default or from a particular FLAGS group, to be able to set multiple configurations for a given set of values. For instance, two lvm configurations can be set by naming them in groups as [lvm1] volume_group=lvm-group-1 ... [lvm2] volume_group=lvm-group-2 ... And the configuration group name will be passed in so that all calls to configuration.volume_group within that instance will be mapped to the proper named group. This class also ensures the implementation's configuration is grafted into the option group. This is due to the way cfg works. All cfg options must be defined and registered in the group in which they are used. """ from oslo_config import cfg CONF = cfg.CONF SHARED_CONF_GROUP = 'backend_defaults' class DefaultGroupConfiguration(object): """Get config options from only DEFAULT.""" def __init__(self): # set the local conf so that __call__'s know what to use self.local_conf = CONF def _ensure_config_values(self, volume_opts): CONF.register_opts(volume_opts, group=None) def append_config_values(self, volume_opts): self._ensure_config_values(volume_opts) def safe_get(self, value): try: return self.__getattr__(value) except cfg.NoSuchOptError: return None def __getattr__(self, value): # Don't use self.local_conf to avoid reentrant call to __getattr__() local_conf = object.__getattribute__(self, 'local_conf') return getattr(local_conf, value) class BackendGroupConfiguration(object): def __init__(self, volume_opts, config_group=None): """Initialize configuration. This takes care of grafting the implementation's config values into the config group and shared defaults. We will try to pull values from the specified 'config_group', but fall back to defaults from the SHARED_CONF_GROUP. """ self.config_group = config_group # set the local conf so that __call__'s know what to use self._ensure_config_values(volume_opts) self.backend_conf = CONF._get(self.config_group) self.shared_backend_conf = CONF._get(SHARED_CONF_GROUP) def _safe_register(self, opt, group): try: CONF.register_opt(opt, group=group) except cfg.DuplicateOptError: pass # If it's already registered ignore it def _ensure_config_values(self, volume_opts): """Register the options in the shared group. When we go to get a config option we will try the backend specific group first and fall back to the shared group. We override the default from all the config options for the backend group so we can know if it was set or not. """ for opt in volume_opts: self._safe_register(opt, SHARED_CONF_GROUP) # Assuming they aren't the same groups, graft on the options into # the backend group and override its default value. if self.config_group != SHARED_CONF_GROUP: self._safe_register(opt, self.config_group) CONF.set_default(opt.name, None, group=self.config_group) def append_config_values(self, volume_opts): self._ensure_config_values(volume_opts) def set_default(self, opt_name, default): CONF.set_default(opt_name, default, group=SHARED_CONF_GROUP) def get(self, key, default=None): return getattr(self, key, default) def safe_get(self, value): try: return self.__getattr__(value) except cfg.NoSuchOptError: return None def __getattr__(self, opt_name): # Don't use self.X to avoid reentrant call to __getattr__() backend_conf = object.__getattribute__(self, 'backend_conf') opt_value = getattr(backend_conf, opt_name) if opt_value is None: shared_conf = object.__getattribute__(self, 'shared_backend_conf') opt_value = getattr(shared_conf, opt_name) return opt_value class Configuration(object): def __init__(self, volume_opts, config_group=None): """Initialize configuration. This shim will allow for compatibility with the DEFAULT style of backend configuration which is used by some of the users of this configuration helper, or by the volume drivers that have all been forced over to the config_group style. """ self.config_group = config_group if config_group: self.conf = BackendGroupConfiguration(volume_opts, config_group) else: self.conf = DefaultGroupConfiguration() def append_config_values(self, volume_opts): self.conf.append_config_values(volume_opts) def safe_get(self, value): return self.conf.safe_get(value) def __getattr__(self, value): # Don't use self.conf to avoid reentrant call to __getattr__() conf = object.__getattribute__(self, 'conf') return getattr(conf, value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/driver.py0000664000175000017500000037347100000000000017526 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Drivers for volumes.""" import abc import time from os_brick import exception as brick_exception from oslo_concurrency import processutils from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from oslo_utils import excutils from cinder.common import constants from cinder import db from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume import configuration from cinder.volume import driver_utils from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import throttling from cinder.volume import volume_utils LOG = logging.getLogger(__name__) volume_opts = [ cfg.IntOpt('num_shell_tries', default=3, help='Number of times to attempt to run flakey shell commands'), cfg.IntOpt('reserved_percentage', default=0, min=0, max=100, help='The percentage of backend capacity is reserved'), cfg.StrOpt('target_prefix', default='iqn.2010-10.org.openstack:', help='Prefix for iSCSI/NVMEoF volumes'), cfg.StrOpt('target_ip_address', default='$my_ip', help='The IP address that the iSCSI/NVMEoF daemon is ' 'listening on'), cfg.ListOpt('target_secondary_ip_addresses', deprecated_name='iscsi_secondary_ip_addresses', default=[], help='The list of secondary IP addresses of the ' 'iSCSI/NVMEoF daemon'), cfg.PortOpt('target_port', default=3260, help='The port that the iSCSI/NVMEoF daemon is listening ' 'on'), cfg.IntOpt('num_volume_device_scan_tries', default=3, help='The maximum number of times to rescan targets' ' to find volume'), cfg.StrOpt('volume_backend_name', help='The backend name for a given driver implementation'), cfg.StrOpt('volume_clear', default='zero', choices=[('none', 'Do not wipe volumes on deletion'), ('zero', '(default) Zero out volumes on deletion')], help= "This option is applicable *only* to the LVM driver when thick " "volumes are being used. See " "https://cinder.openstack.org/admin/security.html#data-leakage " "for more information. " "Method used to wipe old volumes (LVM only)"), cfg.IntOpt('volume_clear_size', default=0, max=1024, help= "This option is applicable *only* to the LVM driver when thick " "volumes are being used. " "Size in MiB to wipe at start of old volumes (LVM only). 0 " "means to wipe all"), cfg.StrOpt('volume_clear_ionice', help= "This option is applicable *only* to the LVM driver when thick " "volumes are being used. " "The flag to pass to ionice to alter the i/o priority of the " "process used to zero a volume after deletion (LVM only). " "Example: pass \"-c3\" for idle only priority"), cfg.StrOpt('target_helper', default='tgtadm', choices=[('tgtadm', ('(default) ' 'Linux SCSI Target Administration Utility')), ('lioadm', 'LIO iSCSI support'), ('scstadmin', 'SCST target support'), ('iscsictl', 'Chelsio iSCSI Target support'), ('nvmet', 'for NVMEoF support'), ('spdk-nvmeof', 'for SPDK NVMe-oF'), ('fake', 'for testing')], help='Target user-land tool to use.'), cfg.StrOpt('volumes_dir', default='$state_path/volumes', help='Volume configuration file storage ' 'directory'), cfg.StrOpt('chiscsi_conf', default='/etc/chelsio-iscsi/chiscsi.conf', help='Chiscsi (CXT) global defaults configuration file'), cfg.StrOpt('iscsi_iotype', default='fileio', choices=[('blockio', 'perform Block IO'), ('fileio', '(default) perform File IO'), ('auto', 'Cinder will autodetect the type of backing device')], help='For ietadm: sets the IO behavior of the iSCSI target', deprecated_for_removal=True, deprecated_since='2024.2', deprecated_reason='No longer used (was for ietadm).' ), cfg.StrOpt('volume_dd_blocksize', default='1M', help='The default block size used when copying/clearing ' 'volumes'), cfg.StrOpt('volume_copy_blkio_cgroup_name', default='cinder-volume-copy', help='The blkio cgroup name to be used to limit bandwidth ' 'of volume copy'), cfg.IntOpt('volume_copy_bps_limit', default=0, help='The upper limit of bandwidth of volume copy. ' '0 => unlimited'), cfg.StrOpt('iscsi_write_cache', default='on', choices=[('on', '(default) Perform write-back'), ('off', 'Perform write-through')], help='For tgtadm: Sets the behavior of the iSCSI target to ' 'either perform write-back or write-through. ' 'This parameter is valid if target_helper is set ' 'to tgtadm.'), cfg.StrOpt('iscsi_target_flags', default='', help='For tgtadm: ' 'Sets the target-specific flags for the iSCSI target. ' 'Only used for tgtadm to specify backing device flags ' 'using bsoflags option. The specified string is passed ' 'as is to the underlying tool.'), cfg.StrOpt('target_protocol', default='iscsi', choices=[('iscsi', '(default) Use iSCSI target protocol'), ('iser', 'Use iSCSI Extensions for RDMA'), ('nvmet_rdma', 'Use RDMA with an nvmet target'), ('nvmet_tcp', 'Use TCP with an nvmet target')], help='Determines the target protocol for new volumes ' 'created with tgtadm, lioadm and nvmet target helpers.'), cfg.StrOpt('driver_client_cert_key', help='The path to the client certificate key for verification, ' 'if the driver supports it.'), cfg.StrOpt('driver_client_cert', help='The path to the client certificate for verification, ' 'if the driver supports it.'), cfg.BoolOpt('driver_use_ssl', default=False, help='Tell driver to use SSL for connection to backend ' 'storage if the driver supports it.'), cfg.StrOpt('max_over_subscription_ratio', default='20.0', regex=r'^(auto|\d*\.\d+|\d+)$', help='Representation of the over subscription ratio ' 'when thin provisioning is enabled. Default ratio is ' '20.0, meaning provisioned capacity can be 20 times of ' 'the total physical capacity. If the ratio is 10.5, it ' 'means provisioned capacity can be 10.5 times of the ' 'total physical capacity. A ratio of 1.0 means ' 'provisioned capacity cannot exceed the total physical ' 'capacity. If ratio is \'auto\', Cinder will ' 'automatically calculate the ratio based on the ' 'provisioned capacity and the used space. If not set to ' 'auto, the ratio has to be a minimum of 1.0.'), cfg.BoolOpt('use_chap_auth', default=False, help='Option to enable/disable CHAP authentication for ' 'targets.'), cfg.StrOpt('chap_username', default='', help='CHAP user name.'), cfg.StrOpt('chap_password', default='', help='Password for specified CHAP account name.', secret=True), cfg.StrOpt('driver_data_namespace', help='Namespace for driver private data values to be ' 'saved in.'), cfg.StrOpt('filter_function', help='String representation for an equation that will be ' 'used to filter hosts. Only used when the driver ' 'filter is set to be used by the Cinder scheduler.'), cfg.StrOpt('goodness_function', help='String representation for an equation that will be ' 'used to determine the goodness of a host. Only used ' 'when using the goodness weigher is set to be used by ' 'the Cinder scheduler.'), cfg.BoolOpt('driver_ssl_cert_verify', default=False, help='If set to True the http client will validate the SSL ' 'certificate of the backend endpoint.'), cfg.StrOpt('driver_ssl_cert_path', help='Can be used to specify a non default path to a ' 'CA_BUNDLE file or directory with certificates of ' 'trusted CAs, which will be used to validate the backend'), cfg.ListOpt('trace_flags', help='List of options that control which trace info ' 'is written to the DEBUG log level to assist ' 'developers. Valid values are method and api.'), cfg.MultiOpt('replication_device', item_type=types.Dict(), secret=True, help="Multi opt of dictionaries to represent a replication " "target device. This option may be specified multiple " "times in a single config section to specify multiple " "replication target devices. Each entry takes the " "standard dict config form: replication_device = " "target_device_id:," "key1:value1,key2:value2..."), cfg.BoolOpt('report_discard_supported', default=False, help='Report to clients of Cinder that the backend supports ' 'discard (aka. trim/unmap). This will not actually ' 'change the behavior of the backend or the client ' 'directly, it will only notify that it can be used.'), cfg.StrOpt('storage_protocol', ignore_case=True, default=constants.ISCSI, choices=[(constants.ISCSI, '(default) iSCSI'), (constants.FC, 'Fibre Channel')], help='Protocol for transferring data between host and ' 'storage back-end.'), cfg.BoolOpt('enable_unsupported_driver', default=False, help="Set this to True when you want to allow an unsupported " "driver to start. Drivers that haven't maintained a " "working CI system and testing are marked as unsupported " "until CI is working again. This also marks a driver as " "deprecated and may be removed in the next release."), cfg.StrOpt('backend_availability_zone', default=None, help='Availability zone for this volume backend. If not set, ' 'the storage_availability_zone option value is used as ' 'the default for all backends.'), ] # for backward compatibility iser_opts = [ cfg.IntOpt('num_iser_scan_tries', default=3, help='The maximum number of times to rescan iSER target ' 'to find volume', deprecated_for_removal=True, deprecated_since='2025.1', deprecated_reason='Has been unused since the Mitaka release.'), cfg.StrOpt('iser_target_prefix', default='iqn.2010-10.org.openstack:', help='Prefix for iSER volumes', deprecated_for_removal=True, deprecated_since='2025.1', deprecated_reason='Has been unused since the Mitaka release.'), cfg.StrOpt('iser_ip_address', default='$my_ip', help='The IP address that the iSER daemon is listening on', deprecated_for_removal=True, deprecated_since='2025.1', deprecated_reason='Has been unused since the Mitaka release.'), cfg.PortOpt('iser_port', default=3260, help='The port that the iSER daemon is listening on', deprecated_for_removal=True, deprecated_since='2025.1', deprecated_reason='Has been unused since the Mitaka release.'), cfg.StrOpt('iser_helper', default='tgtadm', help='The name of the iSER target user-land tool to use', deprecated_for_removal=True, deprecated_since='2025.1', deprecated_reason='Has been unused since the Havana release.'), ] nvmeof_opts = [ cfg.IntOpt('nvmeof_conn_info_version', default=1, min=1, max=2, help='NVMe os-brick connector has 2 different connection info ' 'formats, this allows some NVMe-oF drivers that use the ' 'original format (version 1), such as spdk and LVM-nvmet, ' 'to send the newer format.'), ] nvmet_opts = [ cfg.PortOpt('nvmet_port_id', default=1, help='The id of the NVMe target port definition when not ' 'sharing targets. The starting port id value when ' 'sharing, incremented for each secondary ip address.'), cfg.IntOpt('nvmet_ns_id', default=10, help='Namespace id for the subsystem for the LVM volume when ' 'not sharing targets. The minimum id value when sharing.' 'Maximum supported value in Linux is 8192') ] scst_opts = [ cfg.StrOpt('scst_target_iqn_name', help='Certain ISCSI targets have predefined target names, ' 'SCST target driver uses this name.'), cfg.StrOpt('scst_target_driver', default='iscsi', help='SCST target implementation can choose from multiple ' 'SCST target drivers.'), ] backup_opts = [ cfg.BoolOpt('backup_use_temp_snapshot', default=False, help='If this is set to True, a temporary snapshot will ' 'be created for performing non-disruptive backups. ' 'Otherwise a temporary volume will be cloned ' 'in order to perform a backup.'), ] image_opts = [ cfg.BoolOpt('image_upload_use_cinder_backend', default=False, help='If set to True, upload-to-image in raw format will ' 'create a cloned volume and register its location to ' 'the image service, instead of uploading the volume ' 'content. The cinder backend and locations support ' 'must be enabled in the image service.'), cfg.BoolOpt('image_upload_use_internal_tenant', default=False, help='If set to True, the image volume created by ' 'upload-to-image will be placed in the internal tenant. ' 'Otherwise, the image volume is created in the current ' 'context\'s tenant.'), cfg.BoolOpt('image_volume_cache_enabled', default=False, help='Enable the image volume cache for this backend.'), cfg.IntOpt('image_volume_cache_max_size_gb', default=0, help='Max size of the image volume cache for this backend in ' 'GB. 0 => unlimited.'), cfg.IntOpt('image_volume_cache_max_count', default=0, help='Max number of entries allowed in the image volume cache. ' '0 => unlimited.'), cfg.BoolOpt('use_multipath_for_image_xfer', default=False, help='Do we attach/detach volumes in cinder using multipath ' 'for volume to image and image to volume transfers? ' 'This parameter needs to be configured for each backend ' 'section or in [backend_defaults] section as a common ' 'configuration for all backends.'), cfg.BoolOpt('enforce_multipath_for_image_xfer', default=False, help='If this is set to True, attachment of volumes for ' 'image transfer will be aborted when multipathd is not ' 'running. Otherwise, it will fallback to single path. ' 'This parameter needs to be configured for each backend ' 'section or in [backend_defaults] section as a common ' 'configuration for all backends.'), ] fqdn_opts = [ cfg.BoolOpt('unique_fqdn_network', default=True, help="Whether or not our private network has unique FQDN on " "each initiator or not. For example networks with QA " "systems usually have multiple servers/VMs with the same " "FQDN. When true this will create host entries on 3PAR " "using the FQDN, when false it will use the reversed " "IQN/WWNN."), ] CONF = cfg.CONF CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(iser_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(nvmeof_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(nvmet_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(scst_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(image_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(volume_opts) CONF.register_opts(iser_opts) CONF.register_opts(nvmeof_opts) CONF.register_opts(nvmet_opts) CONF.register_opts(scst_opts) CONF.register_opts(backup_opts) CONF.register_opts(image_opts) CONF.register_opts(fqdn_opts, group=configuration.SHARED_CONF_GROUP) CONF.import_opt('backup_use_same_host', 'cinder.backup.api') class BaseVD(object, metaclass=abc.ABCMeta): """Executes commands relating to Volumes. Base Driver for Cinder Volume Control Path, This includes supported/required implementation for API calls. Also provides *generic* implementation of core features like cloning, copy_image_to_volume etc, this way drivers that inherit from this base class and don't offer their own impl can fall back on a general solution here. Key thing to keep in mind with this driver is that it's intended that these drivers ONLY implement Control Path details (create, delete, extend...), while transport or data path related implementation should be a *member object* that we call a connector. The point here is that for example don't allow the LVM driver to implement iSCSI methods, instead call whatever connector it has configured via conf file (iSCSI{LIO, TGT, ET}, FC, etc). In the base class and for example the LVM driver we do this via a has-a relationship and just provide an interface to the specific connector methods. How you do this in your own driver is of course up to you. """ VERSION = "N/A" # NOTE(geguileo): By default we assume drivers don't support Active-Active # configurations. If driver supports it then they can set this class # attribute on the driver, and if support depends on configuration options # then they can set it at the instance level on the driver's __init__ # method since the manager will do the check after that. SUPPORTS_ACTIVE_ACTIVE = False # If a driver hasn't maintained their CI system, this will get # set to False, which prevents the driver from starting. # Add enable_unsupported_driver = True in cinder.conf to get # the unsupported driver started. SUPPORTED = True # Methods checked to detect a driver implements a replication feature REPLICATION_FEATURE_CHECKERS = {'v2.1': 'failover_host', 'a/a': 'failover_completed'} def __init__(self, execute=utils.execute, *args, **kwargs): # TODO(stephenfin): Drop this in favour of using 'db' directly self.db = db self.host = kwargs.get('host') self.cluster_name = kwargs.get('cluster_name') self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(volume_opts) self.configuration.append_config_values(iser_opts) self.configuration.append_config_values(nvmeof_opts) self.configuration.append_config_values(nvmet_opts) self.configuration.append_config_values(scst_opts) self.configuration.append_config_values(backup_opts) self.configuration.append_config_values(image_opts) self.configuration.append_config_values(fqdn_opts) volume_utils.setup_tracing( self.configuration.safe_get('trace_flags')) # NOTE(geguileo): Don't allow to start if we are enabling # replication on a cluster service with a backend that doesn't # support the required mechanism for Active-Active. replication_devices = self.configuration.safe_get( 'replication_device') if (self.cluster_name and replication_devices and not self.supports_replication_feature('a/a')): raise exception.Invalid(_("Driver doesn't support clustered " "replication.")) self.driver_utils = driver_utils.VolumeDriverUtils( self._driver_data_namespace(), self.db) self._execute = execute self._stats = {} self._throttle = None self.pools = [] self.capabilities = {} # We set these mappings up in the base driver so they # can be used by children # (intended for LVM, but others could use as well) self.target_mapping = { 'fake': 'cinder.volume.targets.fake.FakeTarget', 'lioadm': 'cinder.volume.targets.lio.LioAdm', 'tgtadm': 'cinder.volume.targets.tgt.TgtAdm', 'scstadmin': 'cinder.volume.targets.scst.SCSTAdm', 'iscsictl': 'cinder.volume.targets.cxt.CxtAdm', 'nvmet': 'cinder.volume.targets.nvmet.NVMET', 'spdk-nvmeof': 'cinder.volume.targets.spdknvmf.SpdkNvmf'} # set True by manager after successful check_for_setup self._initialized = False def _driver_data_namespace(self): namespace = self.__class__.__name__ if self.configuration: namespace = self.configuration.safe_get('driver_data_namespace') if not namespace: namespace = self.configuration.safe_get('volume_backend_name') return namespace def _is_non_recoverable(self, err, non_recoverable_list): for item in non_recoverable_list: if item in err: return True return False def _try_execute(self, *command, **kwargs): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. non_recoverable = kwargs.pop('no_retry_list', []) tries = 0 while True: try: self._execute(*command, **kwargs) return True except processutils.ProcessExecutionError as ex: tries = tries + 1 if tries >= self.configuration.num_shell_tries or\ self._is_non_recoverable(ex.stderr, non_recoverable): raise LOG.exception("Recovering from a failed execute. " "Try number %s", tries) time.sleep(tries ** 2) def _detach_volume(self, context, attach_info, volume, properties, force=False, remote=False, ignore_errors=False): """Disconnect the volume from the host. With the force parameter we can indicate if we give more importance to cleaning up as much as possible or if data integrity has higher priority. This requires the latests OS-Brick code that adds this feature. We can also force errors to be ignored using ignore_errors. """ # Use Brick's code to do attach/detach exc = brick_exception.ExceptionChainer() if attach_info: connector = attach_info['connector'] with exc.context(force, 'Disconnect failed'): connector.disconnect_volume(attach_info['conn']['data'], attach_info['device'], force=force, ignore_errors=ignore_errors) if remote: # Call remote manager's terminate_connection which includes # driver's terminate_connection and remove export rpcapi = volume_rpcapi.VolumeAPI() with exc.context(force, 'Remote terminate connection failed'): rpcapi.terminate_connection(context, volume, properties, force=force) else: # Call local driver's terminate_connection and remove export. # NOTE(avishay) This is copied from the manager's code - need to # clean this up in the future. with exc.context(force, _('Unable to terminate volume connection')): try: self.terminate_connection(volume, properties, force=force) except Exception as err: err_msg = ( _('Unable to terminate volume connection: %(err)s') % {'err': err}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) with exc.context(force, _('Unable to remove export')): try: LOG.debug("volume %s: removing export", volume['id']) self.remove_export(context, volume) except Exception as ex: LOG.exception("Error detaching volume %(volume)s, " "due to remove export failure.", {"volume": volume['id']}) raise exception.RemoveExportException(volume=volume['id'], reason=ex) if exc and not ignore_errors: raise exc def set_initialized(self): self._initialized = True @property def initialized(self): return self._initialized @property def supported(self): return self.SUPPORTED def set_throttle(self): bps_limit = ((self.configuration and self.configuration.safe_get('volume_copy_bps_limit')) or CONF.volume_copy_bps_limit) cgroup_name = ((self.configuration and self.configuration.safe_get( 'volume_copy_blkio_cgroup_name')) or CONF.volume_copy_blkio_cgroup_name) self._throttle = None if bps_limit: try: self._throttle = throttling.BlkioCgroup(int(bps_limit), cgroup_name) except processutils.ProcessExecutionError as err: LOG.warning('Failed to activate volume copy throttling: ' '%(err)s', {'err': err}) throttling.Throttle.set_default(self._throttle) def get_version(self): """Get the current version of this driver.""" return self.VERSION @abc.abstractmethod def check_for_setup_error(self): return @staticmethod def get_driver_options(): """Return the oslo_config options specific to the driver.""" return volume_opts @abc.abstractmethod def create_volume(self, volume): """Creates a volume. Can optionally return a Dictionary of changes to the volume object to be persisted. If volume_type extra specs includes 'capabilities:replication True' the driver needs to create a volume replica (secondary), and setup replication between the newly created volume and the secondary volume. Returned dictionary should include: .. code-block:: python volume['replication_status'] = 'copying' volume['replication_extended_status'] = volume['driver_data'] = """ return @abc.abstractmethod def delete_volume(self, volume): """Deletes a volume. If volume_type extra specs includes 'replication: True' then the driver needs to delete the volume replica too. It is imperative that this operation ensures that the data from the deleted volume cannot leak into new volumes when they are created, as new volumes are likely to belong to a different tenant/project. If the driver uses custom file locks they should be cleaned on success using cinder.utils.synchronized_remove """ return def secure_file_operations_enabled(self): """Determine if driver is running in Secure File Operations mode. The Cinder Volume driver needs to query if this driver is running in a secure file operations mode. By default, it is False: any driver that does support secure file operations should override this method. """ return False def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ if not self._stats or refresh: self._update_volume_stats() return self._stats def _set_property(self, properties, entry, title, description, type, **kwargs): prop = dict(title=title, description=description, type=type) allowed_keys = ('enum', 'default', 'minimum', 'maximum') for key in kwargs: if key in allowed_keys: prop[key] = kwargs[key] properties[entry] = prop def _init_standard_capabilities(self): """Create a dictionary of Cinder standard capabilities. This method creates a dictionary of Cinder standard capabilities and returns the created dictionary. The keys of this dictionary don't contain prefix and separator(:). """ properties = {} self._set_property( properties, "thin_provisioning", "Thin Provisioning", _("Sets thin provisioning."), "boolean") self._set_property( properties, "compression", "Compression", _("Enables compression."), "boolean") self._set_property( properties, "qos", "QoS", _("Enables QoS."), "boolean") self._set_property( properties, "replication_enabled", "Replication", _("Enables replication."), "boolean") return properties def _init_vendor_properties(self): """Create a dictionary of vendor unique properties. This method creates a dictionary of vendor unique properties and returns both created dictionary and vendor name. Returned vendor name is used to check for name of vendor unique properties. - Vendor name shouldn't include colon(:) because of the separator and it is automatically replaced by underscore(_). ex. abc:d -> abc_d - Vendor prefix is equal to vendor name. ex. abcd - Vendor unique properties must start with vendor prefix + ':'. ex. abcd:maxIOPS Each backend driver needs to override this method to expose its own properties using _set_property() like this: self._set_property( properties, "vendorPrefix:specific_property", "Title of property", _("Description of property"), "type") : return dictionary of vendor unique properties : return vendor name Example of implementation:: properties = {} self._set_property( properties, "abcd:compression_type", "Compression type", _("Specifies compression type."), "string", enum=["lossy", "lossless", "special"]) self._set_property( properties, "abcd:minIOPS", "Minimum IOPS QoS", _("Sets minimum IOPS if QoS is enabled."), "integer", minimum=10, default=100) return properties, 'abcd' """ LOG.info("Driver hasn't implemented _init_vendor_properties()") return {}, None def init_capabilities(self): """Obtain backend volume stats and capabilities list. This stores a dictionary which is consisted of two parts. First part includes static backend capabilities which are obtained by get_volume_stats(). Second part is properties, which includes parameters correspond to extra specs. This properties part is consisted of cinder standard capabilities and vendor unique properties. Using this capabilities list, operator can manage/configure backend using key/value from capabilities without specific knowledge of backend. """ # Set static backend capabilities from get_volume_stats() stats = self.get_volume_stats(True) if stats: self.capabilities = stats.copy() # Set cinder standard capabilities self.capabilities['properties'] = self._init_standard_capabilities() # Set Vendor unique properties vendor_prop, vendor_name = self._init_vendor_properties() if vendor_name and vendor_prop: updated_vendor_prop = {} old_name = None # Replace colon in vendor name to underscore. if ':' in vendor_name: old_name = vendor_name vendor_name = vendor_name.replace(':', '_') LOG.warning('The colon in vendor name was replaced ' 'by underscore. Updated vendor name is ' '%(name)s".', {'name': vendor_name}) for key in vendor_prop: # If key has colon in vendor name field, we replace it to # underscore. # ex. abc:d:storagetype:provisioning # -> abc_d:storagetype:provisioning if old_name and key.startswith(old_name + ':'): new_key = key.replace(old_name, vendor_name, 1) updated_vendor_prop[new_key] = vendor_prop[key] continue if not key.startswith(vendor_name + ':'): LOG.warning('Vendor unique property "%(property)s" ' 'must start with vendor prefix with colon ' '"%(prefix)s". The property was ' 'not registered on capabilities list.', {'prefix': vendor_name + ':', 'property': key}) continue updated_vendor_prop[key] = vendor_prop[key] # Update vendor unique properties to the dictionary self.capabilities['vendor_prefix'] = vendor_name self.capabilities['properties'].update(updated_vendor_prop) LOG.debug("Initialized capabilities list: %s.", self.capabilities) def _update_pools_and_stats(self, data): """Updates data for pools and volume stats based on provided data.""" # provisioned_capacity_gb is set to None by default below, but # None won't be used in calculation. It will be overridden by # driver's provisioned_capacity_gb if reported, otherwise it # defaults to allocated_capacity_gb in host_manager.py. if self.pools: for pool in self.pools: new_pool = {} new_pool.update(dict( pool_name=pool, total_capacity_gb=0, free_capacity_gb=0, provisioned_capacity_gb=None, reserved_percentage=100, QoS_support=False, filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function() )) data["pools"].append(new_pool) else: # No pool configured, the whole backend will be treated as a pool single_pool = {} single_pool.update(dict( pool_name=data["volume_backend_name"], total_capacity_gb=0, free_capacity_gb=0, provisioned_capacity_gb=None, reserved_percentage=100, QoS_support=False, filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function() )) data["pools"].append(single_pool) self._stats = data def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch image from image_service and write to unencrypted volume. This does not attach an encryptor layer when connecting to the volume. """ self._copy_image_data_to_volume( context, volume, image_service, image_id, encrypted=False, disable_sparse=disable_sparse) def copy_image_to_encrypted_volume( self, context, volume, image_service, image_id, disable_sparse=False): """Fetch image from image_service and write to encrypted volume. This attaches the encryptor layer when connecting to the volume. """ self._copy_image_data_to_volume( context, volume, image_service, image_id, encrypted=True, disable_sparse=disable_sparse) def _copy_image_data_to_volume(self, context, volume, image_service, image_id, encrypted=False, disable_sparse=False): """Fetch the image from image_service and write it to the volume.""" LOG.debug('copy_image_to_volume %s.', volume['name']) use_multipath = self.configuration.use_multipath_for_image_xfer enforce_multipath = self.configuration.enforce_multipath_for_image_xfer properties = volume_utils.brick_get_connector_properties( use_multipath, enforce_multipath) attach_info, volume = self._attach_volume(context, volume, properties) try: if encrypted: encryption = self.db.volume_encryption_metadata_get(context, volume.id) volume_utils.brick_attach_volume_encryptor(context, attach_info, encryption) try: image_utils.fetch_to_raw( context, image_service, image_id, attach_info['device']['path'], self.configuration.volume_dd_blocksize, size=volume['size'], disable_sparse=disable_sparse) except exception.ImageTooBig: with excutils.save_and_reraise_exception(): LOG.exception("Copying image %(image_id)s " "to volume failed due to " "insufficient available space.", {'image_id': image_id}) finally: if encrypted: volume_utils.brick_detach_volume_encryptor(attach_info, encryption) finally: self._detach_volume(context, attach_info, volume, properties, force=True) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" LOG.debug('copy_volume_to_image %s.', volume['name']) use_multipath = self.configuration.use_multipath_for_image_xfer enforce_multipath = self.configuration.enforce_multipath_for_image_xfer properties = volume_utils.brick_get_connector_properties( use_multipath, enforce_multipath) attach_info, volume = self._attach_volume(context, volume, properties) try: volume_utils.upload_volume(context, image_service, image_meta, attach_info['device']['path'], volume, compress=True) finally: # Since attached volume was not used for writing we can force # detach it self._detach_volume(context, attach_info, volume, properties, force=True, ignore_errors=True) def before_volume_copy(self, context, src_vol, dest_vol, remote=None): """Driver-specific actions executed before copying a volume. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.before_volume_copy` for additional information. """ pass def after_volume_copy(self, context, src_vol, dest_vol, remote=None): """Driver-specific actions executed after copying a volume. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.after_volume_copy` for additional information. """ pass def get_filter_function(self): """Get filter_function string. Returns either the string from the driver instance or global section in cinder.conf. If nothing is specified in cinder.conf, then try to find the default filter_function. When None is returned the scheduler will always pass the driver instance. :returns: a filter_function string or None """ ret_function = self.configuration.filter_function if not ret_function: ret_function = CONF.filter_function if not ret_function: ret_function = self.get_default_filter_function() return ret_function def get_goodness_function(self): """Get good_function string. Returns either the string from the driver instance or global section in cinder.conf. If nothing is specified in cinder.conf, then try to find the default goodness_function. When None is returned the scheduler will give the lowest score to the driver instance. :returns: a goodness_function string or None """ ret_function = self.configuration.goodness_function if not ret_function: ret_function = CONF.goodness_function if not ret_function: ret_function = self.get_default_goodness_function() return ret_function def get_default_filter_function(self): """Get the default filter_function string. Each driver could overwrite the method to return a well-known default string if it is available. :returns: None """ return None def get_default_goodness_function(self): """Get the default goodness_function string. Each driver could overwrite the method to return a well-known default string if it is available. :returns: None """ return None def _attach_volume(self, context, volume, properties, remote=False): """Attach the volume.""" if remote: # Call remote manager's initialize_connection which includes # driver's create_export and initialize_connection rpcapi = volume_rpcapi.VolumeAPI() try: conn = rpcapi.initialize_connection(context, volume, properties) except Exception: with excutils.save_and_reraise_exception(): # It is possible that initialize_connection fails due to # timeout. In fact, the volume is already attached after # the timeout error is raised, so the connection worths # a try of terminating. try: rpcapi.terminate_connection(context, volume, properties, force=True) except Exception: LOG.warning("Failed terminating the connection " "of volume %(volume_id)s, but it is " "acceptable.", {'volume_id': volume['id']}) else: # Call local driver's create_export and initialize_connection. # NOTE(avishay) This is copied from the manager's code - need to # clean this up in the future. model_update = None try: LOG.debug("Volume %s: creating export", volume['id']) model_update = self.create_export(context, volume, properties) if model_update: volume.update(model_update) volume.save() except exception.CinderException as ex: if model_update: LOG.exception("Failed updating model of volume " "%(volume_id)s with driver provided " "model %(model)s", {'volume_id': volume['id'], 'model': model_update}) raise exception.ExportFailure(reason=ex) try: conn = self.initialize_connection(volume, properties) except Exception as err: try: err_msg = (_('Unable to fetch connection information from ' 'backend: %(err)s') % {'err': err}) LOG.error(err_msg) LOG.debug("Cleaning up failed connect initialization.") self.remove_export(context, volume) except Exception as ex: ex_msg = (_('Error encountered during cleanup ' 'of a failed attach: %(ex)s') % {'ex': ex}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=ex_msg) raise exception.VolumeBackendAPIException(data=err_msg) # Add encrypted flag to connection_info if not set in the driver. if conn['data'].get('encrypted') is None: encrypted = bool(volume.encryption_key_id) conn['data']['encrypted'] = encrypted # Append the enforce_multipath value if the connector has it conn['data']['enforce_multipath'] = properties.get( 'enforce_multipath', False) try: attach_info = self._connect_device(conn) except Exception as exc: # We may have reached a point where we have attached the volume, # so we have to detach it (do the cleanup). attach_info = getattr(exc, 'kwargs', {}).get('attach_info', None) try: LOG.debug('Device for volume %s is unavailable but did ' 'attach, detaching it.', volume['id']) self._detach_volume(context, attach_info, volume, properties, force=True, remote=remote) except Exception: LOG.exception('Error detaching volume %s', volume['id']) raise return (attach_info, volume) def _attach_snapshot(self, ctxt, snapshot, properties): """Attach the snapshot.""" model_update = None try: LOG.debug("Snapshot %s: creating export.", snapshot.id) model_update = self.create_export_snapshot(ctxt, snapshot, properties) if model_update: snapshot.provider_location = model_update.get( 'provider_location', None) snapshot.provider_auth = model_update.get( 'provider_auth', None) snapshot.save() except exception.CinderException as ex: if model_update: LOG.exception("Failed updating model of snapshot " "%(snapshot_id)s with driver provided " "model %(model)s.", {'snapshot_id': snapshot.id, 'model': model_update}) raise exception.ExportFailure(reason=ex) try: conn = self.initialize_connection_snapshot( snapshot, properties) except Exception as err: try: err_msg = (_('Unable to fetch connection information from ' 'backend: %(err)s') % {'err': err}) LOG.error(err_msg) LOG.debug("Cleaning up failed connect initialization.") self.remove_export_snapshot(ctxt, snapshot) except Exception as ex: ex_msg = (_('Error encountered during cleanup ' 'of a failed attach: %(ex)s') % {'ex': ex}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=ex_msg) raise exception.VolumeBackendAPIException(data=err_msg) return conn def _connect_device(self, conn): # Use Brick's code to do attach/detach use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = conn['driver_volume_type'] connector = volume_utils.brick_get_connector( protocol, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, conn=conn) device = connector.connect_volume(conn['data']) host_device = device['path'] attach_info = {'conn': conn, 'device': device, 'connector': connector} unavailable = True try: # Secure network file systems will NOT run as root. root_access = not self.secure_file_operations_enabled() unavailable = not connector.check_valid_device(host_device, root_access) except Exception: LOG.exception('Could not validate device %s', host_device) if unavailable: raise exception.DeviceUnavailable(path=host_device, attach_info=attach_info, reason=(_("Unable to access " "the backend storage " "via the path " "%(path)s.") % {'path': host_device})) return attach_info def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.clone_image` for additional information. """ return None, False def backup_use_temp_snapshot(self): """Get the configured setting for backup from snapshot. If an inheriting driver does not support this operation, the driver should override this method to return false and log a warning letting the administrator know they have configured something that cannot be done. """ return self.configuration.safe_get("backup_use_temp_snapshot") def snapshot_revert_use_temp_snapshot(self): # Specify whether a temporary backup snapshot should be used when # reverting a snapshot. For some backends, this operation is not # needed or not supported, in which case the driver should override # this method. return True def get_backup_device(self, context, backup): """Get a backup device from an existing volume. The function returns a volume or snapshot to backup service, and then backup service attaches the device and does backup. """ backup_device = None is_snapshot = False if self.backup_use_temp_snapshot(): (backup_device, is_snapshot) = ( self._get_backup_volume_temp_snapshot(context, backup)) else: backup_device = self._get_backup_volume_temp_volume( context, backup) is_snapshot = False return (backup_device, is_snapshot) def _get_backup_volume_temp_volume(self, context, backup): """Return a volume to do backup. To backup a snapshot, create a temp volume from the snapshot and back it up. Otherwise to backup an in-use volume, create a temp volume and back it up. """ volume = objects.Volume.get_by_id(context, backup.volume_id) snapshot = None if backup.snapshot_id: snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) LOG.debug('Creating a new backup for volume %s.', volume['name']) temp_vol_ref = None device_to_backup = volume # NOTE(xyang): If it is to backup from snapshot, create a temp # volume from the source snapshot, backup the temp volume, and # then clean up the temp volume. if snapshot: temp_vol_ref = self._create_temp_volume_from_snapshot( context, volume, snapshot, status=fields.VolumeStatus.BACKING_UP) backup.temp_volume_id = temp_vol_ref.id backup.save() device_to_backup = temp_vol_ref else: # NOTE(xyang): Check volume status if it is not to backup from # snapshot; if 'in-use', create a temp volume from the source # volume, backup the temp volume, and then clean up the temp # volume; if 'available', just backup the volume. previous_status = volume.get('previous_status') if previous_status == "in-use": temp_vol_ref = self._create_temp_cloned_volume( context, volume, status=fields.VolumeStatus.BACKING_UP) backup.temp_volume_id = temp_vol_ref.id backup.save() device_to_backup = temp_vol_ref return device_to_backup def _get_backup_volume_temp_snapshot(self, context, backup): """Return a device to backup. If it is to backup from snapshot, back it up directly. Otherwise for in-use volume, create a temp snapshot and back it up. """ volume = objects.Volume.get_by_id(context, backup.volume_id) snapshot = None if backup.snapshot_id: snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id) LOG.debug('Creating a new backup for volume %s.', volume['name']) device_to_backup = volume is_snapshot = False temp_snapshot = None # NOTE(xyang): If it is to backup from snapshot, back it up # directly. No need to clean it up. if snapshot: device_to_backup = snapshot is_snapshot = True else: # NOTE(xyang): If it is not to backup from snapshot, check volume # status. If the volume status is 'in-use', create a temp snapshot # from the source volume, backup the temp snapshot, and then clean # up the temp snapshot; if the volume status is 'available', just # backup the volume. previous_status = volume.get('previous_status') if previous_status == "in-use": temp_snapshot = self._create_temp_snapshot(context, volume) backup.temp_snapshot_id = temp_snapshot.id backup.save() device_to_backup = temp_snapshot is_snapshot = True return (device_to_backup, is_snapshot) def _create_temp_snapshot(self, context, volume): kwargs = { 'volume_id': volume['id'], 'cgsnapshot_id': None, 'user_id': context.user_id, 'project_id': context.project_id, 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'volume_size': volume['size'], 'display_name': 'backup-snap-%s' % volume['id'], 'display_description': None, 'volume_type_id': volume['volume_type_id'], 'encryption_key_id': volume['encryption_key_id'], 'use_quota': False, # Don't count for quota 'metadata': {}, } temp_snap_ref = objects.Snapshot(context=context, **kwargs) temp_snap_ref.create() try: model_update = self.create_snapshot(temp_snap_ref) if model_update: temp_snap_ref.update(model_update) except Exception: with excutils.save_and_reraise_exception(): with temp_snap_ref.obj_as_admin(): self.db.volume_glance_metadata_delete_by_snapshot( context, temp_snap_ref.id) temp_snap_ref.destroy() temp_snap_ref.status = fields.SnapshotStatus.AVAILABLE temp_snap_ref.progress = '100%' temp_snap_ref.save() return temp_snap_ref def _create_temp_volume(self, context, volume, volume_options=None): kwargs = { 'size': volume.size, 'display_name': 'backup-vol-%s' % volume.id, 'host': volume.host, 'cluster_name': volume.cluster_name, 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'attach_status': fields.VolumeAttachStatus.DETACHED, 'availability_zone': volume.availability_zone, 'volume_type_id': volume.volume_type_id, 'use_quota': False, # Don't count for quota } kwargs.update(volume_options or {}) temp_vol_ref = objects.Volume(context=context.elevated(), **kwargs) temp_vol_ref.create() return temp_vol_ref def _create_temp_cloned_volume(self, context, volume, status=fields.VolumeStatus.AVAILABLE): temp_vol_ref = self._create_temp_volume(context, volume) try: model_update = self.create_cloned_volume(temp_vol_ref, volume) if model_update: temp_vol_ref.update(model_update) except Exception: with excutils.save_and_reraise_exception(): temp_vol_ref.destroy() temp_vol_ref.status = status temp_vol_ref.save() return temp_vol_ref def _create_temp_volume_from_snapshot( self, context, volume, snapshot, volume_options=None, status=fields.VolumeStatus.AVAILABLE): temp_vol_ref = self._create_temp_volume(context, volume, volume_options=volume_options) try: model_update = self.create_volume_from_snapshot(temp_vol_ref, snapshot) if model_update: temp_vol_ref.update(model_update) except Exception: with excutils.save_and_reraise_exception(): temp_vol_ref.destroy() temp_vol_ref.status = status temp_vol_ref.save() return temp_vol_ref def clear_download(self, context, volume): """Clean up after an interrupted image copy.""" pass def do_setup(self, context): """Any initialization the volume driver does while starting.""" pass def validate_connector(self, connector): """Fail if connector doesn't contain all the data needed by driver.""" pass def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.update_migrated_volume` for additional information. """ msg = _("The method update_migrated_volume is not implemented.") raise NotImplementedError(msg) @staticmethod def validate_connector_has_setting(connector, setting): pass def retype(self, context, volume, new_type, diff, host): """Change the type of a volume. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.retype` for additional information. """ return False, None def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. If volume_type extra specs includes 'replication: True' the driver needs to create a volume replica (secondary) and setup replication between the newly created volume and the secondary volume. """ raise NotImplementedError() # ####### Interface methods for DataPath (Connector) ######## @abc.abstractmethod def ensure_export(self, context, volume): """Synchronously recreates an export for a volume.""" return @abc.abstractmethod def create_export(self, context, volume, connector): """Exports the volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ return def create_export_snapshot(self, context, snapshot, connector): """Exports the snapshot. Can optionally return a Dictionary of changes to the snapshot object to be persisted. """ return @abc.abstractmethod def remove_export(self, context, volume): """Removes an export for a volume.""" return def remove_export_snapshot(self, context, snapshot): """Removes an export for a snapshot.""" return @abc.abstractmethod def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. ..note:: Whether or not a volume is 'cacheable' for volume local cache on the hypervisor is normally configured in the volume-type extra-specs. Support may be disabled at the driver level, however, by returning "cacheable": False in the conn_info. This will override any setting in the volume-type extra-specs. :param volume: The volume to be attached :param connector: Dictionary containing information about what is being connected to. :returns conn_info: A dictionary of connection information. """ return def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Allow connection to connector and return connection info. :param snapshot: The snapshot to be attached :param connector: Dictionary containing information about what is being connected to. :returns conn_info: A dictionary of connection information. This can optionally include a "initiator_updates" field. The "initiator_updates" field must be a dictionary containing a "set_values" and/or "remove_values" field. The "set_values" field must be a dictionary of key-value pairs to be set/updated in the db. The "remove_values" field must be a list of keys, previously set with "set_values", that will be deleted from the db. """ return @abc.abstractmethod def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector. :param volume: The volume to be disconnected. :param connector: A dictionary describing the connection with details about the initiator. Can be None. """ return def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Disallow connection from connector.""" return def get_pool(self, volume): """Return pool name where volume reside on. :param volume: The volume hosted by the driver. :returns: name of the pool where given volume is in. """ return None def update_provider_info(self, volumes, snapshots): """Get provider info updates from driver. :param volumes: List of Cinder volumes to check for updates :param snapshots: List of Cinder snapshots to check for updates :returns: tuple (volume_updates, snapshot_updates) where volume updates {'id': uuid, provider_id: } and snapshot updates {'id': uuid, provider_id: } """ return None, None def migrate_volume(self, context, volume, host): """Migrate the volume to the specified host. This is a stub for drivers that don't implement an enhanced version of this operation. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.migrate_volume` for additional information. """ return (False, None) def manage_existing(self, volume, existing_ref): """Manage exiting stub. This is for drivers that don't implement manage_existing(). """ msg = _("Manage existing volume not implemented.") raise NotImplementedError(msg) def unmanage(self, volume): """Unmanage stub. This is for drivers that don't implement unmanage(). """ msg = _("Unmanage volume not implemented.") raise NotImplementedError(msg) def freeze_backend(self, context): """Notify the backend that it's frozen. We use set to prohibit the creation of any new resources on the backend, or any modifications to existing items on a backend. We set/enforce this by not allowing scheduling of new volumes to the specified backend, and checking at the api for modifications to resources and failing. In most cases the driver may not need to do anything, but this provides a handle if they need it. :param context: security context :response: True|False """ return True def thaw_backend(self, context): """Notify the backend that it's unfrozen/thawed. Returns the backend to a normal state after a freeze operation. In most cases the driver may not need to do anything, but this provides a handle if they need it. :param context: security context :response: True|False """ return True def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover a backend to a secondary replication target. Instructs a replication capable/configured backend to failover to one of it's secondary replication targets. host=None is an acceptable input, and leaves it to the driver to failover to the only configured target, or to choose a target on it's own. All of the hosts volumes will be passed on to the driver in order for it to determine the replicated volumes on the host, if needed. Response is a tuple, including the new target backend_id AND a lit of dictionaries with volume_id and updates. Key things to consider (attaching failed-over volumes): - provider_location - provider_auth - provider_id - replication_status :param context: security context :param volumes: list of volume objects, in case the driver needs to take action on them in some way :param secondary_id: Specifies rep target backend to fail over to :param groups: replication groups :returns: ID of the backend that was failed-over to, model update for volumes, and model update for groups """ # Example volume_updates data structure: # [{'volume_id': , # 'updates': {'provider_id': 8, # 'replication_status': 'failed-over', # 'replication_extended_status': 'whatever',...}},] # Example group_updates data structure: # [{'group_id': , # 'updates': {'replication_status': 'failed-over',...}},] raise NotImplementedError() def failover(self, context, volumes, secondary_id=None, groups=None): """Like failover but for a host that is clustered. Most of the time this will be the exact same behavior as failover_host, so if it's not overwritten, it is assumed to be the case. """ return self.failover_host(context, volumes, secondary_id, groups) def failover_completed(self, context, active_backend_id=None): """This method is called after failover for clustered backends.""" raise NotImplementedError() @classmethod def _is_base_method(cls, method_name): method = getattr(cls, method_name) return method.__module__ == getattr(BaseVD, method_name).__module__ # Replication Group (Tiramisu) def enable_replication(self, context, group, volumes): """Enables replication for a group and volumes in the group. :param group: group object :param volumes: list of volume objects in the group :returns: model_update - dict of group updates :returns: volume_model_updates - list of dicts of volume updates """ raise NotImplementedError() # Replication Group (Tiramisu) def disable_replication(self, context, group, volumes): """Disables replication for a group and volumes in the group. :param group: group object :param volumes: list of volume objects in the group :returns: model_update - dict of group updates :returns: volume_model_updates - list of dicts of volume updates """ raise NotImplementedError() # Replication Group (Tiramisu) def failover_replication(self, context, group, volumes, secondary_backend_id=None): """Fails over replication for a group and volumes in the group. :param group: group object :param volumes: list of volume objects in the group :param secondary_backend_id: backend_id of the secondary site :returns: model_update - dict of group updates :returns: volume_model_updates - list of dicts of volume updates """ raise NotImplementedError() def get_replication_error_status(self, context, groups): """Returns error info for replicated groups and its volumes. :returns: group_model_updates - list of dicts of group updates if error happens. For example, a dict of a group can be as follows: .. code:: python {'group_id': xxxx, 'replication_status': fields.ReplicationStatus.ERROR} :returns: volume_model_updates - list of dicts of volume updates if error happens. For example, a dict of a volume can be as follows: .. code:: python {'volume_id': xxxx, 'replication_status': fields.ReplicationStatus.ERROR} """ return [], [] @classmethod def supports_replication_feature(cls, feature): """Check if driver class supports replication features. Feature is a string that must be one of: - v2.1 - a/a """ if feature not in cls.REPLICATION_FEATURE_CHECKERS: return False # Check if method is being implemented/overwritten by the driver method_name = cls.REPLICATION_FEATURE_CHECKERS[feature] return not cls._is_base_method(method_name) def create_group(self, context, group): """Creates a group. :param context: the context of the caller. :param group: the Group object of the group to be created. :returns: model_update model_update will be in this format: {'status': xxx, ......}. If the status in model_update is 'error', the manager will throw an exception and it will be caught in the try-except block in the manager. If the driver throws an exception, the manager will also catch it in the try-except block. The group status in the db will be changed to 'error'. For a successful operation, the driver can either build the model_update and return it or return None. The group status will be set to 'available'. """ raise NotImplementedError() def delete_group(self, context, group, volumes): """Deletes a group. :param context: the context of the caller. :param group: the Group object of the group to be deleted. :param volumes: a list of Volume objects in the group. :returns: model_update, volumes_model_update param volumes is a list of objects retrieved from the db. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate volumes_model_update and model_update and return them. The manager will check volumes_model_update and update db accordingly for each volume. If the driver successfully deleted some volumes but failed to delete others, it should set statuses of the volumes accordingly so that the manager can update db correctly. If the status in any entry of volumes_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of the group will be set to 'error' in the db. If volumes_model_update is not returned by the driver, the manager will set the status of every volume in the group to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager. The statuses of the group and all volumes in it will be set to 'error'. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. The statuses of the group and all volumes will be set to 'deleted' after the manager deletes them from db. """ raise NotImplementedError() def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group. :param context: the context of the caller. :param group: the Group object of the group to be updated. :param add_volumes: a list of Volume objects to be added. :param remove_volumes: a list of Volume objects to be removed. :returns: model_update, add_volumes_update, remove_volumes_update model_update is a dictionary that the driver wants the manager to update upon a successful return. If None is returned, the manager will set the status to 'available'. add_volumes_update and remove_volumes_update are lists of dictionaries that the driver wants the manager to update upon a successful return. Note that each entry requires a {'id': xxx} so that the correct volume entry can be updated. If None is returned, the volume will remain its original status. Also note that you cannot directly assign add_volumes to add_volumes_update as add_volumes is a list of volume objects and cannot be used for db update directly. Same with remove_volumes. If the driver throws an exception, the status of the group as well as those of the volumes to be added/removed will be set to 'error'. """ raise NotImplementedError() def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param context: the context of the caller. :param group: the Group object to be created. :param volumes: a list of Volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of Snapshot objects in group_snapshot. :param source_group: the Group object as source. :param source_vols: a list of Volume objects in the source_group. :returns: model_update, volumes_model_update The source can be group_snapshot or a source_group. param volumes is a list of objects retrieved from the db. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. To be consistent with other volume operations, the manager will assume the operation is successful if no exception is thrown by the driver. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. """ raise NotImplementedError() def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be created. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update param snapshots is a list of Snapshot objects. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error', the status in model_update will be set to the same if it is not already 'error'. If the status in model_update is 'error', the manager will raise an exception and the status of group_snapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of group_snapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of group_snapshot and all snapshots will be set to 'available' at the end of the manager function. """ raise NotImplementedError() def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be deleted. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update param snapshots is a list of objects. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of group_snapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of group_snapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of group_snapshot and all snapshots will be set to 'deleted' after the manager deletes them from db. """ raise NotImplementedError() def extend_volume(self, volume, new_size): msg = _("Extend volume not implemented") raise NotImplementedError(msg) def accept_transfer(self, context, volume, new_user, new_project): pass def create_volume_from_backup(self, volume, backup): """Creates a volume from a backup. Can optionally return a Dictionary of changes to the volume object to be persisted. :param volume: the volume object to be created. :param backup: the backup object as source. :returns: volume_model_update """ raise NotImplementedError() @staticmethod def _get_oslo_driver_opts(*cfg_names): """Return an oslo driver options list from argument string (names).""" return [CONF.backend_defaults._group._opts[cfg_name]['opt'] for cfg_name in cfg_names] @classmethod def clean_volume_file_locks(cls, volume_id): """Clean up driver specific volume locks. This method will be called when a volume has been removed from Cinder or when we detect that the volume doesn't exist. There are 3 types of locks in Cinder: - Process locks: Don't need cleanup - Node locks: Must use cinder.utils.synchronized_remove - Global locks: Must use cinder.coordination.synchronized_remove When using method cinder.utils.synchronized_remove we must pass the exact lock name, whereas method cinder.coordination.synchronized_remove accepts a glob. Refer to clean_volume_file_locks, api_clean_volume_file_locks, and clean_snapshot_file_locks in cinder.utils for examples. """ pass @classmethod def clean_snapshot_file_locks(self, snapshot_id): """Clean up driver specific snapshot locks. This method will be called when a snapshot has been removed from cinder or when we detect that the snapshot doesn't exist. There are 3 types of locks in Cinder: - Process locks: Don't need cleanup - Node locks: Must use cinder.utils.synchronized_remove - Global locks: Must use cinder.coordination.synchronized_remove When using method cinder.utils.synchronized_remove we must pass the exact lock name, whereas method cinder.coordination.synchronized_remove accepts a glob. Refer to clean_volume_file_locks, api_clean_volume_file_locks, and clean_snapshot_file_locks in cinder.utils for examples. """ pass class CloneableImageVD(object, metaclass=abc.ABCMeta): @abc.abstractmethod def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.clone_image` for additional information. """ return None, False class MigrateVD(object, metaclass=abc.ABCMeta): @abc.abstractmethod def migrate_volume(self, context, volume, host): """Migrate the volume to the specified host. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.migrate_volume` for additional information. """ return (False, None) class ManageableVD(object, metaclass=abc.ABCMeta): @abc.abstractmethod def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. 2. Place some metadata on the volume, or somewhere in the backend, that allows other driver requests (e.g. delete, clone, attach, detach...) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. The volume may have a volume_type, and the driver can inspect that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ return @abc.abstractmethod def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume :returns size: Volume size in GiB (integer) """ return def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a volume in the host, with the following keys: - reference (dictionary): The reference for a volume, which can be passed to "manage_existing". - size (int): The size of the volume according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this volume is safe to manage according to the storage backend. For example, is the volume in use or invalid for any reason. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Any extra information to return to the user :param cinder_volumes: A list of volumes in this host that Cinder currently manages, used to determine if a volume is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ return [] @abc.abstractmethod def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param volume: Cinder volume to unmanage """ pass class ManageableSnapshotsVD(object, metaclass=abc.ABCMeta): # NOTE: Can't use abstractmethod before all drivers implement it def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder snapshot structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. 2. Place some metadata on the snapshot, or somewhere in the backend, that allows other driver requests (e.g. delete) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot """ return # NOTE: Can't use abstractmethod before all drivers implement it def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. When calculating the size, round up to the next GB. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot :returns size: Volume snapshot size in GiB (integer) """ return def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a snapshot in the host, with the following keys: - reference (dictionary): The reference for a snapshot, which can be passed to "manage_existing_snapshot". - size (int): The size of the snapshot according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this snapshot is safe to manage according to the storage backend. For example, is the snapshot in use or invalid for any reason. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Any extra information to return to the user - source_reference (string): Similar to "reference", but for the snapshot's source volume. :param cinder_snapshots: A list of snapshots in this host that Cinder currently manages, used to determine if a snapshot is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ return [] # NOTE: Can't use abstractmethod before all drivers implement it def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param snapshot: Cinder volume snapshot to unmanage """ pass class VolumeDriver(ManageableVD, CloneableImageVD, ManageableSnapshotsVD, MigrateVD, BaseVD): def check_for_setup_error(self): raise NotImplementedError() def create_volume(self, volume): raise NotImplementedError() def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. If volume_type extra specs includes 'replication: True' the driver needs to create a volume replica (secondary), and setup replication between the newly created volume and the secondary volume. """ raise NotImplementedError() def delete_volume(self, volume): raise NotImplementedError() def create_snapshot(self, snapshot): """Creates a snapshot.""" raise NotImplementedError() def delete_snapshot(self, snapshot): """Deletes a snapshot. If the driver uses custom file locks they should be cleaned on success using cinder.utils.synchronized_remove """ raise NotImplementedError() def local_path(self, volume): raise NotImplementedError() def clear_download(self, context, volume): pass def extend_volume(self, volume, new_size): msg = _("Extend volume not implemented") raise NotImplementedError(msg) def manage_existing(self, volume, existing_ref): msg = _("Manage existing volume not implemented.") raise NotImplementedError(msg) def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot. Note: the revert process should not change the volume's current size, that means if the driver shrank the volume during the process, it should extend the volume internally. """ msg = _("Revert volume to snapshot not implemented.") raise NotImplementedError(msg) def manage_existing_get_size(self, volume, existing_ref): msg = _("Manage existing volume not implemented.") raise NotImplementedError(msg) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): msg = _("Get manageable volumes not implemented.") raise NotImplementedError(msg) def unmanage(self, volume): pass def manage_existing_snapshot(self, snapshot, existing_ref): msg = _("Manage existing snapshot not implemented.") raise NotImplementedError(msg) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): msg = _("Manage existing snapshot not implemented.") raise NotImplementedError(msg) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): msg = _("Get manageable snapshots not implemented.") raise NotImplementedError(msg) def unmanage_snapshot(self, snapshot): """Unmanage the specified snapshot from Cinder management.""" def retype(self, context, volume, new_type, diff, host): """Change the type of a volume. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.retype` for additional information. """ return False, None # ####### Interface methods for DataPath (Connector) ######## def ensure_export(self, context, volume): raise NotImplementedError() def create_export(self, context, volume, connector): raise NotImplementedError() def create_export_snapshot(self, context, snapshot, connector): raise NotImplementedError() def remove_export(self, context, volume): raise NotImplementedError() def remove_export_snapshot(self, context, snapshot): raise NotImplementedError() def initialize_connection(self, volume, connector, **kwargs): raise NotImplementedError() def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Allow connection from connector for a snapshot.""" def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector :param volume: The volume to be disconnected. :param connector: A dictionary describing the connection with details about the initiator. Can be None. """ def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Disallow connection from connector for a snapshot.""" def create_consistencygroup(self, context, group): """Creates a consistencygroup. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :returns: model_update model_update will be in this format: {'status': xxx, ......}. If the status in model_update is 'error', the manager will throw an exception and it will be caught in the try-except block in the manager. If the driver throws an exception, the manager will also catch it in the try-except block. The group status in the db will be changed to 'error'. For a successful operation, the driver can either build the model_update and return it or return None. The group status will be set to 'available'. """ raise NotImplementedError() def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistencygroup from source. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :param volumes: a list of volume dictionaries in the group. :param cgsnapshot: the dictionary of the cgsnapshot as source. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :param source_cg: the dictionary of a consistency group as source. :param source_vols: a list of volume dictionaries in the source_cg. :returns: model_update, volumes_model_update The source can be cgsnapshot or a source cg. param volumes is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Volume to be precise. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. To be consistent with other volume operations, the manager will assume the operation is successful if no exception is thrown by the driver. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. """ raise NotImplementedError() def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be deleted. :param volumes: a list of volume dictionaries in the group. :returns: model_update, volumes_model_update param volumes is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Volume to be precise. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate volumes_model_update and model_update and return them. The manager will check volumes_model_update and update db accordingly for each volume. If the driver successfully deleted some volumes but failed to delete others, it should set statuses of the volumes accordingly so that the manager can update db correctly. If the status in any entry of volumes_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of the group will be set to 'error' in the db. If volumes_model_update is not returned by the driver, the manager will set the status of every volume in the group to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager. The statuses of the group and all volumes in it will be set to 'error'. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. The statuses of the group and all volumes will be set to 'deleted' after the manager deletes them from db. """ raise NotImplementedError() def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be updated. :param add_volumes: a list of volume dictionaries to be added. :param remove_volumes: a list of volume dictionaries to be removed. :returns: model_update, add_volumes_update, remove_volumes_update model_update is a dictionary that the driver wants the manager to update upon a successful return. If None is returned, the manager will set the status to 'available'. add_volumes_update and remove_volumes_update are lists of dictionaries that the driver wants the manager to update upon a successful return. Note that each entry requires a {'id': xxx} so that the correct volume entry can be updated. If None is returned, the volume will remain its original status. Also note that you cannot directly assign add_volumes to add_volumes_update as add_volumes is a list of cinder.db.sqlalchemy.models.Volume objects and cannot be used for db update directly. Same with remove_volumes. If the driver throws an exception, the status of the group as well as those of the volumes to be added/removed will be set to 'error'. """ raise NotImplementedError() def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot. :param context: the context of the caller. :param cgsnapshot: the dictionary of the cgsnapshot to be created. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :returns: model_update, snapshots_model_update param snapshots is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error', the status in model_update will be set to the same if it is not already 'error'. If the status in model_update is 'error', the manager will raise an exception and the status of cgsnapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of cgsnapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of cgsnapshot and all snapshots will be set to 'available' at the end of the manager function. """ raise NotImplementedError() def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot. :param context: the context of the caller. :param cgsnapshot: the dictionary of the cgsnapshot to be deleted. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :returns: model_update, snapshots_model_update param snapshots is retrieved directly from the db. It is a list of cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of cgsnapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of cgsnapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of cgsnapshot and all snapshots will be set to 'deleted' after the manager deletes them from db. """ raise NotImplementedError() def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.clone_image` for additional information. """ return None, False def get_pool(self, volume): """Return pool name where volume reside on. :param volume: The volume hosted by the driver. :returns: name of the pool where given volume is in. """ return None def migrate_volume(self, context, volume, host): """Migrate the volume to the specified host. Refer to :obj:`cinder.interface.volume_driver.VolumeDriverCore.migrate_volume` for additional information. """ return (False, None) def accept_transfer(self, context, volume, new_user, new_project): pass class ProxyVD(object): """Proxy Volume Driver to mark proxy drivers If a driver uses a proxy class (e.g. by using __setattr__ and __getattr__) without directly inheriting from base volume driver this class can help marking them and retrieve the actual used driver object. """ def _get_driver(self): """Returns the actual driver object. Can be overloaded by the proxy. """ return getattr(self, "driver", None) class ISCSIDriver(VolumeDriver): """Executes commands relating to ISCSI volumes. We make use of model provider properties as follows: ``provider_location`` if present, contains the iSCSI target information in the same format as an ietadm discovery i.e. ':, ' ``provider_auth`` if present, contains a space-separated triple: ' '. `CHAP` is the only auth_method in use at the moment. """ def __init__(self, *args, **kwargs): super(ISCSIDriver, self).__init__(*args, **kwargs) def _do_iscsi_discovery(self, volume): # TODO(justinsb): Deprecate discovery and use stored info # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) LOG.warning("ISCSI provider_location not stored, using discovery") volume_name = volume['name'] try: # NOTE(griff) We're doing the split straight away which should be # safe since using '@' in hostname is considered invalid (out, _err) = self._execute('iscsiadm', '-m', 'discovery', '-t', 'sendtargets', '-p', volume['host'].split('@')[0], run_as_root=True) except processutils.ProcessExecutionError as ex: LOG.error("ISCSI discovery attempt failed for:%s", volume['host'].split('@')[0]) LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr) return None for target in out.splitlines(): if (self.configuration.target_ip_address in target and volume_name in target): return target return None def _get_iscsi_properties(self, volume, multipath=False): """Gets iscsi configuration We ideally get saved information in the volume entity, but fall back to discovery if need be. Discovery may be completely removed in future The properties are: :target_discovered: boolean indicating whether discovery was used :target_iqn: the IQN of the iSCSI target :target_portal: the portal of the iSCSI target :target_lun: the lun of the iSCSI target :volume_id: the id of the volume (currently used by xen) :auth_method:, :auth_username:, :auth_password: the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. :discard: boolean indicating if discard is supported In some of drivers that support multiple connections (for multipath and for single path with failover on connection failure), it returns :target_iqns, :target_portals, :target_luns, which contain lists of multiple values. The main portal information is also returned in :target_iqn, :target_portal, :target_lun for backward compatibility. Note that some of drivers don't return :target_portals even if they support multipath. Then the connector should use sendtargets discovery to find the other portals if it supports multipath. """ properties = {} location = volume['provider_location'] if location: # provider_location is the same format as iSCSI discovery output properties['target_discovered'] = False else: location = self._do_iscsi_discovery(volume) if not location: msg = (_("Could not find iSCSI export for volume %s") % (volume['name'])) raise exception.InvalidVolume(reason=msg) LOG.debug("ISCSI Discovery: Found %s", location) properties['target_discovered'] = True results = location.split(" ") portals = results[0].split(",")[0].split(";") iqn = results[1] nr_portals = len(portals) try: lun = int(results[2]) except (IndexError, ValueError): if self.configuration.target_helper == 'tgtadm': lun = 1 else: lun = 0 if nr_portals > 1: properties['target_portals'] = portals properties['target_iqns'] = [iqn] * nr_portals properties['target_luns'] = [lun] * nr_portals properties['target_portal'] = portals[0] properties['target_iqn'] = iqn properties['target_lun'] = lun properties['volume_id'] = volume['id'] auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret geometry = volume.get('provider_geometry', None) if geometry: (physical_block_size, logical_block_size) = geometry.split() properties['physical_block_size'] = physical_block_size properties['logical_block_size'] = logical_block_size encryption_key_id = volume.get('encryption_key_id', None) properties['encrypted'] = encryption_key_id is not None return properties def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs): check_exit_code = kwargs.pop('check_exit_code', 0) (out, err) = self._execute('iscsiadm', '-m', 'node', '-T', iscsi_properties['target_iqn'], '-p', iscsi_properties['target_portal'], *iscsi_command, run_as_root=True, check_exit_code=check_exit_code) LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s", {'command': iscsi_command, 'out': out, 'err': err}) return (out, err) def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The iscsi driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined in _get_iscsi_properties. Example return value:: { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, 'discard': False, } } If the backend driver supports multiple connections for multipath and for single path with failover, "target_portals", "target_iqns", "target_luns" are also populated. In this example also LUN values greater than 255 use flat addressing mode:: { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'target_iqn': 'iqn.2010-10.org.openstack:volume1', 'target_iqns': ['iqn.2010-10.org.openstack:volume1', 'iqn.2010-10.org.openstack:volume1-2'], 'target_portal': '10.0.0.1:3260', 'target_portals': ['10.0.0.1:3260', '10.0.1.1:3260'], 'target_lun': 1, 'target_luns': [1, 1], 'volume_id': 1, 'discard': False, 'addressing_mode': os_brick.constants.SCSI_ADDRESSING_SAM2, } } """ # NOTE(jdg): Yes, this is duplicated in the volume/target # drivers, for now leaving it as there are 3'rd party # drivers that don't use target drivers, but inherit from # this base class and use this init data iscsi_properties = self._get_iscsi_properties(volume) return { 'driver_volume_type': self.configuration.safe_get('target_protocol'), 'data': iscsi_properties } def validate_connector(self, connector): # iSCSI drivers require the initiator information required = 'initiator' if required not in connector: LOG.error('The volume driver requires %(data)s ' 'in the connector.', {'data': required}) raise exception.InvalidConnectorException(missing=required) def terminate_connection(self, volume, connector, **kwargs): pass def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats...") data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'Generic_iSCSI' data["vendor_name"] = 'Open Source' data["driver_version"] = '1.0' data["storage_protocol"] = constants.ISCSI data["pools"] = [] data["replication_enabled"] = False self._update_pools_and_stats(data) class ISERDriver(ISCSIDriver): """Executes commands relating to ISER volumes. We make use of model provider properties as follows: ``provider_location`` if present, contains the iSER target information in the same format as an ietadm discovery i.e. ':, ' ``provider_auth`` if present, contains a space-separated triple: ' '. `CHAP` is the only auth_method in use at the moment. """ def __init__(self, *args, **kwargs): super(ISERDriver, self).__init__(*args, **kwargs) # for backward compatibility self.configuration.num_volume_device_scan_tries = \ self.configuration.num_iser_scan_tries self.configuration.target_prefix = \ self.configuration.iser_target_prefix self.configuration.target_ip_address = \ self.configuration.iser_ip_address self.configuration.target_port = self.configuration.iser_port def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The iser driver returns a driver_volume_type of 'iser'. The format of the driver data is defined in _get_iser_properties. Example return value: .. code-block:: default { 'driver_volume_type': 'iser', 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.iser.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, } } """ iser_properties = self._get_iscsi_properties(volume) return { 'driver_volume_type': 'iser', 'data': iser_properties } def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats...") data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'Generic_iSER' data["vendor_name"] = 'Open Source' data["driver_version"] = '1.0' data["storage_protocol"] = constants.ISER data["pools"] = [] self._update_pools_and_stats(data) class FibreChannelDriver(VolumeDriver): """Executes commands relating to Fibre Channel volumes.""" def __init__(self, *args, **kwargs): super(FibreChannelDriver, self).__init__(*args, **kwargs) def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: .. code-block:: default { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '1234567890123', 'discard': False, } } or .. code-block:: default { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], 'discard': False, 'addressing_mode': os_brick.constants.SCSI_ADDRESSING_SAM2, } } """ msg = _("Driver must implement initialize_connection") raise NotImplementedError(msg) def validate_connector(self, connector): """Fail if connector doesn't contain all the data needed by driver. Do a check on the connector and ensure that it has wwnns, wwpns. """ self.validate_connector_has_setting(connector, 'wwpns') self.validate_connector_has_setting(connector, 'wwnns') @staticmethod def validate_connector_has_setting(connector, setting): """Test for non-empty setting in connector.""" if setting not in connector or not connector[setting]: LOG.error( "FibreChannelDriver validate_connector failed. " "No '%(setting)s'. Make sure HBA state is Online.", {'setting': setting}) raise exception.InvalidConnectorException(missing=setting) def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats...") data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'Generic_FC' data["vendor_name"] = 'Open Source' data["driver_version"] = '1.0' data["storage_protocol"] = constants.FC data["pools"] = [] self._update_pools_and_stats(data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/driver_utils.py0000664000175000017500000000512400000000000020731 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Pure Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder import context from cinder import exception LOG = logging.getLogger(__name__) class VolumeDriverUtils(object): def __init__(self, namespace, db): self._data_namespace = namespace self._db = db @staticmethod def _get_context(ctxt): if not ctxt: return context.get_admin_context() return ctxt def get_driver_initiator_data(self, initiator, ctxt=None): try: return self._db.driver_initiator_data_get( self._get_context(ctxt), initiator, self._data_namespace ) except exception.CinderException: LOG.exception("Failed to get driver initiator data for" " initiator %(initiator)s and namespace" " %(namespace)s", {'initiator': initiator, 'namespace': self._data_namespace}) raise def insert_driver_initiator_data(self, initiator, key, value, ctxt=None): """Update the initiator data at key with value. If the key has already been set to something return False, otherwise if saved successfully return True. """ try: self._db.driver_initiator_data_insert_by_key( self._get_context(ctxt), initiator, self._data_namespace, key, value ) return True except exception.DriverInitiatorDataExists: return False except exception.CinderException: LOG.exception("Failed to insert initiator data for" " initiator %(initiator)s and backend" " %(backend)s for key %(key)s.", {'initiator': initiator, 'backend': self._data_namespace, 'key': key}) raise ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3271205 cinder-27.0.0/cinder/volume/drivers/0000775000175000017500000000000000000000000017320 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/__init__.py0000664000175000017500000000000000000000000021417 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3271205 cinder-27.0.0/cinder/volume/drivers/ceph/0000775000175000017500000000000000000000000020237 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ceph/__init__.py0000664000175000017500000000000000000000000022336 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ceph/rbd_iscsi.py0000664000175000017500000004623700000000000022566 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RADOS Block Device iSCSI Driver""" from oslo_config import cfg from oslo_log import log as logging from oslo_utils import netutils from packaging import version from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import configuration from cinder.volume.drivers import rbd from cinder.volume import volume_utils try: import rbd_iscsi_client from rbd_iscsi_client import client from rbd_iscsi_client import exceptions as client_exceptions except ImportError: rbd_iscsi_client = None client = None client_exceptions = None LOG = logging.getLogger(__name__) RBD_ISCSI_OPTS = [ cfg.StrOpt('rbd_iscsi_api_user', default='', help='The username for the rbd_target_api service'), cfg.StrOpt('rbd_iscsi_api_password', default='', secret=True, help='The username for the rbd_target_api service'), cfg.StrOpt('rbd_iscsi_api_url', default='', help='The url to the rbd_target_api service'), cfg.BoolOpt('rbd_iscsi_api_debug', default=False, help='Enable client request debugging.'), cfg.StrOpt('rbd_iscsi_target_iqn', default=None, help='The preconfigured target_iqn on the iscsi gateway.'), ] CONF = cfg.CONF CONF.register_opts(RBD_ISCSI_OPTS, group=configuration.SHARED_CONF_GROUP) MIN_CLIENT_VERSION = "0.1.8" @interface.volumedriver class RBDISCSIDriver(rbd.RBDDriver): """Implements RADOS block device (RBD) iSCSI volume commands.""" VERSION = '1.0.0' # ThirdPartySystems wiki page CI_WIKI_NAME = "Cinder_Jenkins" SUPPORTS_ACTIVE_ACTIVE = True STORAGE_PROTOCOL = constants.ISCSI CHAP_LENGTH = 16 # The target IQN to use for creating all exports # we map all the targets for OpenStack attaches to this. target_iqn = None def __init__(self, active_backend_id=None, *args, **kwargs): super().__init__(*args, **kwargs) self.configuration.append_config_values(RBD_ISCSI_OPTS) @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'replication_device', 'reserved_percentage', 'max_over_subscription_ratio', 'volume_dd_blocksize', 'driver_ssl_cert_verify', 'suppress_requests_ssl_warnings') return rbd.RBD_OPTS + RBD_ISCSI_OPTS + additional_opts def _create_client(self): client_version = rbd_iscsi_client.version if (version.parse(client_version) < version.parse(MIN_CLIENT_VERSION)): ex_msg = (_('Invalid rbd_iscsi_client version found (%(found)s). ' 'Version %(min)s or greater required. Run "pip' ' install --upgrade rbd-iscsi-client" to upgrade' ' the client.') % {'found': client_version, 'min': MIN_CLIENT_VERSION}) LOG.error(ex_msg) raise exception.InvalidInput(reason=ex_msg) config = self.configuration ssl_warn = config.safe_get('suppress_requests_ssl_warnings') cl = client.RBDISCSIClient( config.safe_get('rbd_iscsi_api_user'), config.safe_get('rbd_iscsi_api_password'), config.safe_get('rbd_iscsi_api_url'), secure=config.safe_get('driver_ssl_cert_verify'), suppress_ssl_warnings=ssl_warn ) return cl def _is_status_200(self, response): return (response and 'status' in response and response['status'] == '200') def do_setup(self, context): """Perform initialization steps that could raise exceptions.""" super(RBDISCSIDriver, self).do_setup(context) if client is None: msg = _("You must install rbd-iscsi-client python package " "before using this driver.") raise exception.VolumeDriverException(data=msg) # Make sure we have the basic settings we need to talk to the # iscsi api service config = self.configuration self.client = self._create_client() self.client.set_debug_flag(config.safe_get('rbd_iscsi_api_debug')) resp, body = self.client.get_api() if not self._is_status_200(resp): # failed to fetch the open api url raise exception.InvalidConfigurationValue( option='rbd_iscsi_api_url', value='Could not talk to the rbd-target-api') # The admin had to have setup a target_iqn in the iscsi gateway # already in order for the gateways to work properly self.target_iqn = self.configuration.safe_get('rbd_iscsi_target_iqn') LOG.info("Using target_iqn '%s'", self.target_iqn) def check_for_setup_error(self): """Return an error if prerequisites aren't met.""" super(RBDISCSIDriver, self).check_for_setup_error() required_options = ['rbd_iscsi_api_user', 'rbd_iscsi_api_password', 'rbd_iscsi_api_url', 'rbd_iscsi_target_iqn'] for attr in required_options: val = getattr(self.configuration, attr) if not val: raise exception.InvalidConfigurationValue(option=attr, value=val) def _get_clients(self): # make sure we have resp, body = self.client.get_clients(self.target_iqn) if not self._is_status_200(resp): msg = _("Failed to get_clients() from rbd-target-api") raise exception.VolumeBackendAPIException(data=msg) return body def _get_config(self): resp, body = self.client.get_config() if not self._is_status_200(resp): msg = _("Failed to get_config() from rbd-target-api") raise exception.VolumeBackendAPIException(data=msg) return body def _get_disks(self): resp, disks = self.client.get_disks() if not self._is_status_200(resp): msg = _("Failed to get_disks() from rbd-target-api") raise exception.VolumeBackendAPIException(data=msg) return disks def create_client(self, initiator_iqn): """Create a client iqn on the gateway if it doesn't exist.""" client = self._get_target_client(initiator_iqn) if not client: try: self.client.create_client(self.target_iqn, initiator_iqn) except client_exceptions.ClientException as ex: raise exception.VolumeBackendAPIException( data=ex.get_description()) def _get_target_client(self, initiator_iqn): """Get the config information for a client defined to a target.""" config = self._get_config() target_config = config['targets'][self.target_iqn] if initiator_iqn in target_config['clients']: return target_config['clients'][initiator_iqn] def _get_auth_for_client(self, initiator_iqn): initiator_config = self._get_target_client(initiator_iqn) if initiator_config: auth = initiator_config['auth'] return auth def _set_chap_for_client(self, initiator_iqn, username, password): """Save the CHAP creds in the client on the gateway.""" # username is 8-64 chars # Password has to be 12-16 chars LOG.debug("Setting chap creds to %(user)s : %(pass)s", {'user': username, 'pass': password}) try: self.client.set_client_auth(self.target_iqn, initiator_iqn, username, password) except client_exceptions.ClientException as ex: raise exception.VolumeBackendAPIException( data=ex.get_description()) def _get_lun(self, iscsi_config, lun_name, initiator_iqn): lun = None target_info = iscsi_config['targets'][self.target_iqn] luns = target_info['clients'][initiator_iqn]['luns'] if lun_name in luns: lun = {'name': lun_name, 'id': luns[lun_name]['lun_id']} return lun def _lun_name(self, volume_name): """Build the iscsi gateway lun name.""" return ("%(pool)s/%(volume_name)s" % {'pool': self.configuration.rbd_pool, 'volume_name': volume_name}) @volume_utils.trace def create_disk(self, volume_name): """Register the volume with the iscsi gateways. We have to register the volume with the iscsi gateway. Exporting the volume won't work unless the gateway knows about it. """ try: self.client.find_disk(self.configuration.rbd_pool, volume_name) except client_exceptions.HTTPNotFound: try: # disk isn't known by the gateways, so lets add it. self.client.create_disk(self.configuration.rbd_pool, volume_name) except client_exceptions.ClientException as ex: LOG.exception("Couldn't create the disk entry to " "export the volume.") raise exception.VolumeBackendAPIException( data=ex.get_description()) @volume_utils.trace def register_disk(self, target_iqn, volume_name): """Register the disk with the target_iqn.""" lun_name = self._lun_name(volume_name) try: self.client.register_disk(target_iqn, lun_name) except client_exceptions.HTTPBadRequest as ex: desc = ex.get_description() search_str = ('is already mapped on target %(target_iqn)s' % {'target_iqn': self.target_iqn}) if desc.find(search_str): # The volume is already registered return else: LOG.error("Couldn't register the volume to the target_iqn") raise exception.VolumeBackendAPIException( data=ex.get_description()) except client_exceptions.ClientException as ex: LOG.exception("Couldn't register the volume to the target_iqn", ex) raise exception.VolumeBackendAPIException( data=ex.get_description()) @volume_utils.trace def unregister_disk(self, target_iqn, volume_name): """Unregister the volume from the gateway.""" lun_name = self._lun_name(volume_name) try: self.client.unregister_disk(target_iqn, lun_name) except client_exceptions.ClientException as ex: LOG.exception("Couldn't unregister the volume to the target_iqn", ex) raise exception.VolumeBackendAPIException( data=ex.get_description()) @volume_utils.trace def export_disk(self, initiator_iqn, volume_name, iscsi_config): """Export a volume to an initiator.""" lun_name = self._lun_name(volume_name) LOG.debug("Export lun %(lun)s", {'lun': lun_name}) lun = self._get_lun(iscsi_config, lun_name, initiator_iqn) if lun: LOG.debug("Found existing lun export.") return lun try: LOG.debug("Creating new lun export for %(lun)s", {'lun': lun_name}) self.client.export_disk(self.target_iqn, initiator_iqn, self.configuration.rbd_pool, volume_name) resp, iscsi_config = self.client.get_config() return self._get_lun(iscsi_config, lun_name, initiator_iqn) except client_exceptions.ClientException as ex: raise exception.VolumeBackendAPIException( data=ex.get_description()) @volume_utils.trace def unexport_disk(self, initiator_iqn, volume_name, iscsi_config): """Remove a volume from an initiator.""" lun_name = self._lun_name(volume_name) LOG.debug("unexport lun %(lun)s", {'lun': lun_name}) lun = self._get_lun(iscsi_config, lun_name, initiator_iqn) if not lun: LOG.debug("Didn't find LUN on gateway.") return try: LOG.debug("unexporting %(lun)s", {'lun': lun_name}) self.client.unexport_disk(self.target_iqn, initiator_iqn, self.configuration.rbd_pool, volume_name) except client_exceptions.ClientException as ex: LOG.exception(ex) raise exception.VolumeBackendAPIException( data=ex.get_description()) def find_client_luns(self, target_iqn, client_iqn, iscsi_config): """Find luns already exported to an initiator.""" if 'targets' in iscsi_config: if target_iqn in iscsi_config['targets']: target_info = iscsi_config['targets'][target_iqn] if 'clients' in target_info: clients = target_info['clients'] client = clients[client_iqn] luns = client['luns'] return luns @volume_utils.trace def initialize_connection(self, volume, connector): """Export a volume to a host.""" # create client initiator_iqn = connector['initiator'] self.create_client(initiator_iqn) auth = self._get_auth_for_client(initiator_iqn) username = initiator_iqn if not auth['password']: password = volume_utils.generate_password(length=self.CHAP_LENGTH) self._set_chap_for_client(initiator_iqn, username, password) else: LOG.debug("using existing CHAP password") password = auth['password'] # add disk for export iscsi_config = self._get_config() # First have to ensure that the disk is registered with # the gateways. self.create_disk(volume.name) self.register_disk(self.target_iqn, volume.name) iscsi_config = self._get_config() # Now export the disk to the initiator lun = self.export_disk(initiator_iqn, volume.name, iscsi_config) # fetch the updated config so we can get the lun id iscsi_config = self._get_config() target_info = iscsi_config['targets'][self.target_iqn] ips = target_info['ip_list'] target_portal = ips[0] if netutils.is_valid_ipv6(target_portal): target_portal = "[%s]:3260" % target_portal else: target_portal = "%s:3260" % target_portal data = { 'driver_volume_type': 'iscsi', 'data': { 'target_iqn': self.target_iqn, 'target_portal': target_portal, 'target_lun': lun['id'], 'auth_method': 'CHAP', 'auth_username': username, 'auth_password': password, } } return data def _delete_disk(self, volume): """Remove the defined disk from the gateway.""" # We only do this when we know it's not exported # anywhere in the gateway lun_name = self._lun_name(volume.name) config = self._get_config() # Now look for the disk on any exported target found = False for target_iqn in config['targets']: # Do we have the volume we are looking for? target = config['targets'][target_iqn] for client_iqn in target['clients'].keys(): if lun_name in target['clients'][client_iqn]['luns']: found = True if not found: # we can delete the disk definition LOG.info("Deleting volume definition in iscsi gateway for %s", lun_name) self.client.delete_disk(self.configuration.rbd_pool, volume.name, preserve_image=True) def _terminate_connection(self, volume, initiator_iqn, target_iqn, iscsi_config): # remove the disk from the client. self.unexport_disk(initiator_iqn, volume.name, iscsi_config) # Try to unregister the disk, since nobody is using it. self.unregister_disk(self.target_iqn, volume.name) config = self._get_config() # If there are no more luns exported to this initiator # then delete the initiator luns = self.find_client_luns(target_iqn, initiator_iqn, config) if not luns: LOG.debug("There aren't any more LUNs attached to %(iqn)s." "So we unregister the volume and delete " "the client entry", {'iqn': initiator_iqn}) try: self.client.delete_client(target_iqn, initiator_iqn) except client_exceptions.ClientException: LOG.warning("Tried to delete initiator %(iqn)s, but delete " "failed.", {'iqns': initiator_iqn}) def _terminate_all(self, volume, iscsi_config): """Find all exports of this volume for our target_iqn and detach.""" disks = self._get_disks() lun_name = self._lun_name(volume.name) if lun_name not in disks['disks']: LOG.debug("Volume %s not attached anywhere.", lun_name) return for target_iqn_tmp in iscsi_config['targets']: if self.target_iqn != target_iqn_tmp: # We don't touch exports for targets # we aren't configured to manage. continue target = iscsi_config['targets'][self.target_iqn] for client_iqn in target['clients'].keys(): if lun_name in target['clients'][client_iqn]['luns']: self._terminate_connection(volume, client_iqn, self.target_iqn, iscsi_config) self._delete_disk(volume) @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Unexport the volume from the gateway.""" iscsi_config = self._get_config() if not connector: # No connector was passed in, so this is a force detach # we need to detach the volume from the configured target_iqn. self._terminate_all(volume, iscsi_config) initiator_iqn = connector['initiator'] self._terminate_connection(volume, initiator_iqn, self.target_iqn, iscsi_config) self._delete_disk(volume) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3311205 cinder-27.0.0/cinder/volume/drivers/datacore/0000775000175000017500000000000000000000000021102 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datacore/api.py0000664000175000017500000012347500000000000022241 0ustar00zuulzuul00000000000000# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Classes to invoke DataCore SANsymphony API.""" import copy import socket import sys import uuid from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils import suds from suds import client as suds_client from suds import plugin from suds.sax import attribute from suds.sax import element from suds import wsdl from suds import wsse from suds import xsd from suds.xsd.doctor import Import from suds.xsd.doctor import ImportDoctor from cinder.i18n import _ from cinder import utils as cinder_utils from cinder.volume.drivers.datacore import exception as datacore_exceptions from cinder.volume.drivers.datacore import utils as datacore_utils websocket = importutils.try_import('websocket') LOG = logging.getLogger(__name__) class FaultDefinitionsFilter(plugin.DocumentPlugin): """Plugin to process the DataCore API WSDL document. The document plugin removes fault definitions for callback operations from the DataCore API WSDL. """ def parsed(self, context): document = context.document tns = self._get_tns(document) message_qrefs = set() for message in self._get_wsdl_messages(document): message_qrefs.add((message.get('name'), tns[1])) bindings = self._get_wsdl_operation_bindings(document) for port_type in self._get_wsdl_port_types(document): for operation in self._get_wsdl_operations(port_type): self._filter_faults( document, operation, bindings, message_qrefs, tns) @staticmethod def _get_tns(document): target_namespace = document.get('targetNamespace') prefix = document.findPrefix(target_namespace) or 'tns' return prefix, target_namespace @staticmethod def _get_wsdl_port_types(document): return document.getChildren('portType', wsdl.wsdlns) @staticmethod def _get_wsdl_operations(port_type): return port_type.getChildren('operation', wsdl.wsdlns) @staticmethod def _get_wsdl_messages(document): return document.getChildren('message', wsdl.wsdlns) @staticmethod def _get_wsdl_operation_bindings(document): bindings = [] for binding in document.getChildren('binding', wsdl.wsdlns): operations = {} for operation in binding.getChildren('operation', wsdl.wsdlns): operations[operation.get('name')] = operation bindings.append(operations) return bindings @staticmethod def _filter_faults(document, operation, operation_bindings, message_qrefs, tns): filtered_faults = {} for fault in operation.getChildren('fault', wsdl.wsdlns): fault_message = fault.get('message') qref = xsd.qualify(fault_message, document, tns) if qref not in message_qrefs: filtered_faults[fault.get('name')] = fault for fault in filtered_faults.values(): operation.remove(fault) if filtered_faults: for binding in operation_bindings: filtered_binding_faults = [] faults = binding[operation.get('name')].getChildren( 'fault', wsdl.wsdlns) for binding_fault in faults: if binding_fault.get('name') in filtered_faults: filtered_binding_faults.append(binding_fault) for binding_fault in filtered_binding_faults: binding[operation.get('name')].remove(binding_fault) class DataCoreClient(object): """DataCore SANsymphony client.""" API_RETRY_INTERVAL = 10 DATACORE_EXECUTIVE_PORT = '3794' STORAGE_SERVICES = 'IStorageServices' STORAGE_SERVICES_BINDING = 'CustomBinding_IStorageServices' EXECUTIVE_SERVICE = 'IExecutiveServiceEx' EXECUTIVE_SERVICE_BINDING = 'CustomBinding_IExecutiveServiceEx' NS_WSA = ('wsa', 'http://www.w3.org/2005/08/addressing') WSA_ANONYMOUS = 'http://www.w3.org/2005/08/addressing/anonymous' MUST_UNDERSTAND = attribute.Attribute('SOAP-ENV:mustUnderstand', '1') # Namespaces that are defined within DataCore API WSDL NS_DATACORE_EXECUTIVE = ('http://schemas.datacontract.org/2004/07/' 'DataCore.Executive') NS_DATACORE_EXECUTIVE_SCSI = ('http://schemas.datacontract.org/2004/07/' 'DataCore.Executive.Scsi') NS_DATACORE_EXECUTIVE_ISCSI = ('http://schemas.datacontract.org/2004/07/' 'DataCore.Executive.iSCSI') NS_SERIALIZATION_ARRAYS = ('http://schemas.microsoft.com/2003/10/' 'Serialization/Arrays') # Fully qualified names of objects that are defined within # DataCore API WSDL O_ACCESS_TOKEN = '{%s}AccessToken' % NS_DATACORE_EXECUTIVE_ISCSI O_ARRAY_OF_PERFORMANCE_TYPE = ('{%s}ArrayOfPerformanceType' % NS_DATACORE_EXECUTIVE) O_ARRAY_OF_STRING = '{%s}ArrayOfstring' % NS_SERIALIZATION_ARRAYS O_CLIENT_MACHINE_TYPE = '{%s}ClientMachineType' % NS_DATACORE_EXECUTIVE O_DATA_SIZE = '{%s}DataSize' % NS_DATACORE_EXECUTIVE O_LOGICAL_DISK_ROLE = '{%s}LogicalDiskRole' % NS_DATACORE_EXECUTIVE O_LOGICAL_UNIT_TYPE = '{%s}LogicalUnitType' % NS_DATACORE_EXECUTIVE O_MIRROR_RECOVERY_PRIORITY = ('{%s}MirrorRecoveryPriority' % NS_DATACORE_EXECUTIVE) O_PATH_POLICY = '{%s}PathPolicy' % NS_DATACORE_EXECUTIVE O_PERFORMANCE_TYPE = '{%s}PerformanceType' % NS_DATACORE_EXECUTIVE O_POOL_VOLUME_TYPE = '{%s}PoolVolumeType' % NS_DATACORE_EXECUTIVE O_SNAPSHOT_TYPE = '{%s}SnapshotType' % NS_DATACORE_EXECUTIVE O_SCSI_MODE = '{%s}ScsiMode' % NS_DATACORE_EXECUTIVE_SCSI O_SCSI_PORT_DATA = '{%s}ScsiPortData' % NS_DATACORE_EXECUTIVE O_SCSI_PORT_NEXUS_DATA = '{%s}ScsiPortNexusData' % NS_DATACORE_EXECUTIVE O_SCSI_PORT_TYPE = '{%s}ScsiPortType' % NS_DATACORE_EXECUTIVE_SCSI O_VIRTUAL_DISK_DATA = '{%s}VirtualDiskData' % NS_DATACORE_EXECUTIVE O_VIRTUAL_DISK_STATUS = '{%s}VirtualDiskStatus' % NS_DATACORE_EXECUTIVE O_VIRTUAL_DISK_SUB_TYPE = '{%s}VirtualDiskSubType' % NS_DATACORE_EXECUTIVE O_VIRTUAL_DISK_TYPE = '{%s}VirtualDiskType' % NS_DATACORE_EXECUTIVE def __init__(self, host, username, password, timeout): if websocket is None: msg = _("Failed to import websocket-client python module." " Please, ensure the module is installed.") raise datacore_exceptions.DataCoreException(msg) self.timeout = timeout executive_service_net_addr = datacore_utils.build_network_address( host, self.DATACORE_EXECUTIVE_PORT) executive_service_endpoint = self._build_service_endpoint( executive_service_net_addr, self.EXECUTIVE_SERVICE) security_options = wsse.Security() username_token = wsse.UsernameToken(username, password) security_options.tokens.append(username_token) imp = Import('http://www.w3.org/2001/XMLSchema', location='http://www.w3.org/2001/XMLSchema.xsd') imp.filter.add( 'http://schemas.microsoft.com/2003/10/Serialization/Arrays') imp.filter.add('http://schemas.microsoft.com/2003/10/Serialization/') imp.filter.add( 'http://schemas.datacontract.org/2004/07/DataCore.Executive') self._executive_service_client = suds_client.Client( executive_service_endpoint['http_endpoint'] + '?singlewsdl', nosend=True, timeout=self.timeout, wsse=security_options, plugins=[FaultDefinitionsFilter()], doctor=ImportDoctor(imp)) self._update_storage_services_endpoint(executive_service_endpoint) storage_services_endpoint = self._get_storage_services_endpoint() self._storage_services_client = suds_client.Client( storage_services_endpoint['http_endpoint'] + '?singlewsdl', nosend=True, timeout=self.timeout, wsse=security_options, plugins=[FaultDefinitionsFilter()], doctor=ImportDoctor(imp)) self._update_executive_service_endpoints(storage_services_endpoint) @staticmethod def _get_list_data(obj, attribute_name): return getattr(obj, attribute_name, []) @staticmethod def _build_service_endpoint(network_address, path): return { 'network_address': network_address, 'http_endpoint': '%s://%s/%s' % ('http', network_address, path), 'ws_endpoint': '%s://%s/%s' % ('ws', network_address, path), } @cinder_utils.synchronized('datacore-api-request_context') def _get_soap_context(self, service_client, service_binding, method, message_id, *args, **kwargs): soap_action = (service_client.wsdl.services[0].port(service_binding) .methods[method].soap.action) soap_headers = self._get_soap_headers(soap_action, message_id) service_client.set_options(soapheaders=soap_headers) context = service_client.service[service_binding][method]( *args, **kwargs) return context def _get_soap_headers(self, soap_action, message_id): headers = [ element.Element('Action', ns=self.NS_WSA).setText( soap_action.replace('"', '')).append(self.MUST_UNDERSTAND), element.Element('To', ns=self.NS_WSA).setText( self.WSA_ANONYMOUS).append(self.MUST_UNDERSTAND), element.Element('MessageID', ns=self.NS_WSA).setText(message_id), element.Element('ReplyTo', ns=self.NS_WSA).insert( element.Element('Address', ns=self.NS_WSA).setText( self.WSA_ANONYMOUS)), ] return headers def _process_request(self, service_client, service_binding, service_endpoint, method, *args, **kwargs): max_date = '9999-12-31T23:59:59.9999999' r_date = '9999-12-31T23:59:59.9' message_id = uuid.uuid4().urn context = self._get_soap_context( service_client, service_binding, method, message_id, *args, **kwargs) channel = None try: channel = websocket.create_connection( service_endpoint, timeout=self.timeout, subprotocols=['soap'], header=['soap-content-type: text/xml']) channel.send(context.envelope) response = channel.recv() if not isinstance(response, str): response = response.decode('utf-8') response = response.replace(max_date, r_date) response = response.encode('utf-8') return context.process_reply(response) except (socket.error, websocket.WebSocketException) as e: error = datacore_exceptions.DataCoreConnectionException(reason=e) raise error.with_traceback(sys.exc_info()[2]) except suds.WebFault as e: fault = datacore_exceptions.DataCoreFaultException(reason=e) raise fault.with_traceback(sys.exc_info()[2]) finally: if channel and channel.connected: try: channel.close() except (socket.error, websocket.WebSocketException) as e: LOG.debug("Closing a connection to " "DataCore server failed. %s", e) def _invoke_storage_services(self, method, *args, **kwargs): @cinder_utils.retry( datacore_exceptions.DataCoreConnectionException, interval=self.API_RETRY_INTERVAL, retries=10, wait_random=True) def retry_call(): storage_services_endpoint = self._get_storage_services_endpoint() try: result = self._process_request( self._storage_services_client, self.STORAGE_SERVICES_BINDING, storage_services_endpoint['ws_endpoint'], method, *args, **kwargs) return result except datacore_exceptions.DataCoreConnectionException: with excutils.save_and_reraise_exception(): self._update_api_endpoints() return retry_call() def _update_api_endpoints(self): executive_service_endpoints = self._get_executive_service_endpoints() for endpoint in executive_service_endpoints: try: self._update_storage_services_endpoint(endpoint) break except datacore_exceptions.DataCoreConnectionException as e: LOG.warning("Failed to update DataCore Server Group " "endpoints. %s.", e) storage_services_endpoint = self._get_storage_services_endpoint() try: self._update_executive_service_endpoints( storage_services_endpoint) except datacore_exceptions.DataCoreConnectionException as e: LOG.warning("Failed to update DataCore Server Group " "endpoints. %s.", e) @cinder_utils.synchronized('datacore-api-storage_services_endpoint') def _get_storage_services_endpoint(self): if self._storage_services_endpoint: return copy.copy(self._storage_services_endpoint) return None @cinder_utils.synchronized('datacore-api-storage_services_endpoint') def _update_storage_services_endpoint(self, executive_service_endpoint): controller_address = self._process_request( self._executive_service_client, self.EXECUTIVE_SERVICE_BINDING, executive_service_endpoint['ws_endpoint'], 'GetControllerAddress') if not controller_address: msg = _("Could not determine controller node.") raise datacore_exceptions.DataCoreConnectionException(reason=msg) controller_host = controller_address.rsplit(':', 1)[0].strip('[]') controller_net_addr = datacore_utils.build_network_address( controller_host, self.DATACORE_EXECUTIVE_PORT) self._storage_services_endpoint = self._build_service_endpoint( controller_net_addr, self.STORAGE_SERVICES) @cinder_utils.synchronized('datacore-api-executive_service_endpoints') def _get_executive_service_endpoints(self): if self._executive_service_endpoints: return self._executive_service_endpoints[:] return [] @cinder_utils.synchronized('datacore-api-executive_service_endpoints') def _update_executive_service_endpoints(self, storage_services_endpoint): endpoints = [] nodes = self._get_list_data( self._process_request(self._storage_services_client, self.STORAGE_SERVICES_BINDING, storage_services_endpoint['ws_endpoint'], 'GetNodes'), 'RegionNodeData') if not nodes: msg = _("Could not determine executive nodes.") raise datacore_exceptions.DataCoreConnectionException(reason=msg) for node in nodes: host = node.HostAddress.rsplit(':', 1)[0].strip('[]') endpoint = self._build_service_endpoint( datacore_utils.build_network_address( host, self.DATACORE_EXECUTIVE_PORT), self.EXECUTIVE_SERVICE) endpoints.append(endpoint) self._executive_service_endpoints = endpoints def get_server_groups(self): """Get all the server groups in the configuration. :return: A list of server group data. """ return self._get_list_data( self._invoke_storage_services('GetServerGroups'), 'ServerHostGroupData') def get_servers(self): """Get all the server hosts in the configuration. :return: A list of server host data """ return self._get_list_data( self._invoke_storage_services('GetServers'), 'ServerHostData') def get_disk_pools(self): """Get all the pools in the server group. :return: A list of disk pool data """ return self._get_list_data( self._invoke_storage_services('GetDiskPools'), 'DiskPoolData') def get_logical_disks(self): """Get all the logical disks defined in the system. :return: A list of logical disks """ return self._get_list_data( self._invoke_storage_services('GetLogicalDisks'), 'LogicalDiskData') def create_pool_logical_disk(self, pool_id, pool_volume_type, size, min_quota=None, max_quota=None): """Create the pool logical disk. :param pool_id: Pool id :param pool_volume_type: Type, either striped or spanned :param size: Size :param min_quota: Min quota :param max_quota: Max quota :return: New logical disk data """ volume_type = getattr(self._storage_services_client.factory .create(self.O_POOL_VOLUME_TYPE), pool_volume_type) data_size = (self._storage_services_client.factory .create(self.O_DATA_SIZE)) data_size.Value = size data_size_min_quota = None if min_quota: data_size_min_quota = (self._storage_services_client.factory .create(self.O_DATA_SIZE)) data_size_min_quota.Value = min_quota data_size_max_quota = None if max_quota: data_size_max_quota = (self._storage_services_client.factory .create(self.O_DATA_SIZE)) data_size_max_quota.Value = max_quota return self._invoke_storage_services('CreatePoolLogicalDisk', poolId=pool_id, type=volume_type, size=data_size, minQuota=data_size_min_quota, maxQuota=data_size_max_quota) def delete_logical_disk(self, logical_disk_id): """Delete the logical disk. :param logical_disk_id: Logical disk id """ self._invoke_storage_services('DeleteLogicalDisk', logicalDiskId=logical_disk_id) def get_logical_disk_chunk_allocation_map(self, logical_disk_id): """Get the logical disk chunk allocation map. The logical disk allocation map details all the physical disk chunks that are currently allocated to this logical disk. :param logical_disk_id: Logical disk id :return: A list of member allocation maps, restricted to chunks allocated on to this logical disk """ return self._get_list_data( self._invoke_storage_services('GetLogicalDiskChunkAllocationMap', logicalDiskId=logical_disk_id), 'MemberAllocationInfoData') def get_next_virtual_disk_alias(self, base_alias): """Get the next available (unused) virtual disk alias. :param base_alias: Base string of the new alias :return: New alias """ return self._invoke_storage_services('GetNextVirtualDiskAlias', baseAlias=base_alias) def get_virtual_disks(self): """Get all the virtual disks in the configuration. :return: A list of virtual disk's data """ return self._get_list_data( self._invoke_storage_services('GetVirtualDisks'), 'VirtualDiskData') def build_virtual_disk_data(self, virtual_disk_alias, virtual_disk_type, size, description, storage_profile_id): """Create VirtualDiskData object. :param virtual_disk_alias: User-visible alias of the virtual disk, which must be unique :param virtual_disk_type: Virtual disk type :param size: Virtual disk size :param description: A user-readable description of the virtual disk :param storage_profile_id: Virtual disk storage profile :return: VirtualDiskData object """ vd_data = (self._storage_services_client.factory .create(self.O_VIRTUAL_DISK_DATA)) vd_data.Size = (self._storage_services_client.factory .create(self.O_DATA_SIZE)) vd_data.Size.Value = size vd_data.Alias = virtual_disk_alias vd_data.Description = description vd_data.Type = getattr(self._storage_services_client.factory .create(self.O_VIRTUAL_DISK_TYPE), virtual_disk_type) vd_data.SubType = getattr(self._storage_services_client.factory .create(self.O_VIRTUAL_DISK_SUB_TYPE), 'Standard') vd_data.DiskStatus = getattr(self._storage_services_client.factory .create(self.O_VIRTUAL_DISK_STATUS), 'Online') vd_data.RecoveryPriority = getattr( self._storage_services_client.factory .create(self.O_MIRROR_RECOVERY_PRIORITY), 'Unset') vd_data.StorageProfileId = storage_profile_id return vd_data def create_virtual_disk_ex2(self, virtual_disk_data, first_logical_disk_id, second_logical_disk_id, add_redundancy): """Create a virtual disk specifying both the logical disks. :param virtual_disk_data: Virtual disk's properties :param first_logical_disk_id: Id of the logical disk to use :param second_logical_disk_id: Id of the second logical disk to use :param add_redundancy: If True, the mirror has redundant mirror paths :return: New virtual disk's data """ return self._invoke_storage_services( 'CreateVirtualDiskEx2', virtualDisk=virtual_disk_data, firstLogicalDiskId=first_logical_disk_id, secondLogicalDiskId=second_logical_disk_id, addRedundancy=add_redundancy) def set_virtual_disk_size(self, virtual_disk_id, size): """Change the size of a virtual disk. :param virtual_disk_id: Id of the virtual disk :param size: New size :return: Virtual disk's data """ data_size = (self._storage_services_client.factory .create(self.O_DATA_SIZE)) data_size.Value = size return self._invoke_storage_services('SetVirtualDiskSize', virtualDiskId=virtual_disk_id, size=data_size) def delete_virtual_disk(self, virtual_disk_id, delete_logical_disks): """Delete a virtual disk. :param virtual_disk_id: Id of the virtual disk :param delete_logical_disks: If True, delete the associated logical disks """ self._invoke_storage_services('DeleteVirtualDisk', virtualDiskId=virtual_disk_id, deleteLogicalDisks=delete_logical_disks) def serve_virtual_disks_to_host(self, host_id, virtual_disks): """Serve multiple virtual disks to a specified host. :param host_id: Id of the host machine :param virtual_disks: A list of virtual disks to serve :return: A list of the virtual disks actually served to the host """ virtual_disk_array = (self._storage_services_client.factory .create(self.O_ARRAY_OF_STRING)) virtual_disk_array.string = virtual_disks return self._get_list_data( self._invoke_storage_services('ServeVirtualDisksToHost', hostId=host_id, virtualDisks=virtual_disk_array), 'VirtualLogicalUnitData') def unserve_virtual_disks_from_host(self, host_id, virtual_disks): """Unserve multiple virtual disks from a specified host. :param host_id: Id of the host machine :param virtual_disks: A list of virtual disks to unserve """ virtual_disk_array = (self._storage_services_client.factory .create(self.O_ARRAY_OF_STRING)) virtual_disk_array.string = virtual_disks self._invoke_storage_services('UnserveVirtualDisksFromHost', hostId=host_id, virtualDisks=virtual_disk_array) def unserve_virtual_disks_from_port(self, port_id, virtual_disks): """Unserve multiple virtual disks from a specified initiator port. :param port_id: Id of the initiator port :param virtual_disks: A list of virtual disks to unserve """ virtual_disk_array = (self._storage_services_client.factory .create(self.O_ARRAY_OF_STRING)) virtual_disk_array.string = virtual_disks self._invoke_storage_services('UnserveVirtualDisksFromPort', portId=port_id, virtualDisks=virtual_disk_array) def bind_logical_disk(self, virtual_disk_id, logical_disk_id, role, create_mirror_mappings, create_client_mappings, add_redundancy): """Bind (add) a logical disk to a virtual disk. :param virtual_disk_id: Id of the virtual disk to bind to :param logical_disk_id: Id of the logical disk being bound :param role: logical disk's role :param create_mirror_mappings: If True, automatically create the mirror mappings to this disk, assuming there is already another logical disk bound :param create_client_mappings: If True, automatically create mappings from mapped hosts to the new disk :param add_redundancy: If True, the mirror has redundant mirror paths :return: Updated virtual disk data """ logical_disk_role = getattr(self._storage_services_client.factory .create(self.O_LOGICAL_DISK_ROLE), role) return self._invoke_storage_services( 'BindLogicalDisk', virtualDiskId=virtual_disk_id, logicalDiskId=logical_disk_id, role=logical_disk_role, createMirrorMappings=create_mirror_mappings, createClientMappings=create_client_mappings, addRedundancy=add_redundancy) def get_snapshots(self): """Get all the snapshots on all the servers in the region. :return: A list of snapshot data. """ return self._get_list_data( self._invoke_storage_services('GetSnapshots'), 'SnapshotData') def create_snapshot(self, virtual_disk_id, name, description, destination_pool_id, snapshot_type, duplicate_disk_id, storage_profile_id): """Create a snapshot relationship. :param virtual_disk_id: Virtual disk id :param name: Name of snapshot :param description: Description :param destination_pool_id: Destination pool id :param snapshot_type: Type of snapshot :param duplicate_disk_id: If set to True then the destination virtual disk's SCSI id will be a duplicate of the source's :param storage_profile_id: Specifies the destination virtual disk's storage profile :return: New snapshot data """ st_type = getattr(self._storage_services_client.factory .create(self.O_SNAPSHOT_TYPE), snapshot_type) return self._invoke_storage_services( 'CreateSnapshot', virtualDiskId=virtual_disk_id, name=name, description=description, destinationPoolId=destination_pool_id, type=st_type, duplicateDiskId=duplicate_disk_id, storageProfileId=storage_profile_id) def delete_snapshot(self, snapshot_id): """Delete the snapshot. :param snapshot_id: Snapshot id """ self._invoke_storage_services('DeleteSnapshot', snapshotId=snapshot_id) def get_storage_profiles(self): """Get all the all the defined storage profiles. :return: A list of storage profiles """ return self._get_list_data( self._invoke_storage_services('GetStorageProfiles'), 'StorageProfileData') def designate_map_store(self, pool_id): """Designate which pool the snapshot mapstore will be allocated from. :param pool_id: Pool id :return: Updated server host data, which includes the mapstore pool id """ return self._invoke_storage_services('DesignateMapStore', poolId=pool_id) def get_performance_by_type(self, performance_types): """Get performance data for specific types of performance counters. :param performance_types: A list of performance counter types :return: A list of performance data points """ prfm_type_array = (self._storage_services_client.factory .create(self.O_ARRAY_OF_PERFORMANCE_TYPE)) prfm_type_array.PerformanceType = list( getattr(self._storage_services_client.factory .create(self.O_PERFORMANCE_TYPE), performance_type) for performance_type in performance_types) return self._get_list_data( self._invoke_storage_services('GetPerformanceByType', types=prfm_type_array), 'CollectionPointData') def get_ports(self): """Get all ports in the configuration. :return: A list of SCSI ports """ return self._get_list_data( self._invoke_storage_services('GetPorts'), 'ScsiPortData') def build_scsi_port_data(self, host_id, port_name, port_mode, port_type): """Create ScsiPortData object that represents SCSI port, of any type. :param host_id: Id of the port's host computer :param port_name: Unique name of the port. :param port_mode: Mode of port: initiator or target :param port_type: Type of port, Fc, iSCSI or loopback :return: ScsiPortData object """ scsi_port_data = (self._storage_services_client.factory .create(self.O_SCSI_PORT_DATA)) scsi_port_data.HostId = host_id scsi_port_data.PortName = port_name scsi_port_data.PortMode = getattr(self._storage_services_client.factory .create(self.O_SCSI_MODE), port_mode) scsi_port_data.PortType = getattr(self._storage_services_client.factory .create(self.O_SCSI_PORT_TYPE), port_type) return scsi_port_data def register_port(self, scsi_port_data): """Register a port in the configuration. :param scsi_port_data: Port data :return: Updated port data """ return self._invoke_storage_services('RegisterPort', port=scsi_port_data) def assign_port(self, client_id, port_id): """Assign a port to a client. :param client_id: Client id :param port_id: Port id :return: Updated port data, which will now have its host id set to the client id """ return self._invoke_storage_services('AssignPort', clientId=client_id, portId=port_id) def set_server_port_properties(self, port_id, properties): """Set a server port's properties. :param port_id: Port id :param properties: New properties :return: Updated port data """ return self._invoke_storage_services('SetServerPortProperties', portId=port_id, properties=properties) def build_access_token(self, initiator_node_name, initiator_username, initiator_password, mutual_authentication, target_username, target_password): """Create an AccessToken object. :param initiator_node_name: Initiator node name :param initiator_username: Initiator user name :param initiator_password: Initiator password :param mutual_authentication: If True the target and the initiator authenticate each other. A separate secret is set for each target and for each initiator in the storage area network (SAN). :param target_username: Target user name :param target_password: Target password :return: AccessToken object """ access_token = (self._storage_services_client.factory .create(self.O_ACCESS_TOKEN)) access_token.InitiatorNodeName = initiator_node_name access_token.InitiatorUsername = initiator_username access_token.InitiatorPassword = initiator_password access_token.MutualAuthentication = mutual_authentication access_token.TargetUsername = target_username access_token.TargetPassword = target_password return access_token def set_access_token(self, iscsi_port_id, access_token): """Set the access token. The access token allows access to a specific network node from a specific iSCSI port. :param iscsi_port_id: Id of the initiator iSCSI port :param access_token: Access token to be validated :return: Port data """ return self._invoke_storage_services('SetAccessToken', iScsiPortId=iscsi_port_id, inputToken=access_token) def get_clients(self): """Get all the clients in the configuration. :return: A list of client data """ return self._get_list_data( self._invoke_storage_services('GetClients'), 'ClientHostData') def register_client(self, host_name, description, machine_type, mode, preferred_server_ids): """Register the client, creating a client object in the configuration. :param host_name: Name of the client :param description: Description :param machine_type: Type of client :param mode: Path policy mode of the client :param preferred_server_ids: Preferred server ids :return: New client data """ client_machine_type = getattr(self._storage_services_client.factory .create(self.O_CLIENT_MACHINE_TYPE), machine_type) client_mode = getattr(self._storage_services_client.factory .create(self.O_PATH_POLICY), mode) return self._invoke_storage_services( 'RegisterClient', hostName=host_name, description=description, type=client_machine_type, mode=client_mode, preferredServerIds=preferred_server_ids) def set_client_capabilities(self, client_id, mpio, alua): """Set the client capabilities for MPIO and ALUA. :param client_id: Client id :param mpio: If set to True then MPIO-capable :param alua: If set to True then ALUA-capable :return: Updated client data """ return self._invoke_storage_services('SetClientCapabilities', clientId=client_id, mpio=mpio, alua=alua) def get_target_domains(self): """Get all the target domains in the configuration. :return: A list of target domains """ return self._get_list_data( self._invoke_storage_services('GetTargetDomains'), 'VirtualTargetDomainData') def create_target_domain(self, initiator_host_id, target_host_id): """Create a target domain given a pair of hosts, target and initiator. :param initiator_host_id: Id of the initiator host machine :param target_host_id: Id of the target host server :return: New target domain """ return self._invoke_storage_services('CreateTargetDomain', initiatorHostId=initiator_host_id, targetHostId=target_host_id) def delete_target_domain(self, target_domain_id): """Delete a target domain. :param target_domain_id: Target domain id """ self._invoke_storage_services('DeleteTargetDomain', targetDomainId=target_domain_id) def get_target_devices(self): """Get all the target devices in the configuration. :return: A list of target devices """ return self._get_list_data( self._invoke_storage_services('GetTargetDevices'), 'VirtualTargetDeviceData') def build_scsi_port_nexus_data(self, initiator_port_id, target_port_id): """Create a ScsiPortNexusData object. Nexus is a pair of ports that can communicate, one being the initiator, the other the target :param initiator_port_id: Id of the initiator port :param target_port_id: Id of the target port :return: ScsiPortNexusData object """ scsi_port_nexus_data = (self._storage_services_client.factory .create(self.O_SCSI_PORT_NEXUS_DATA)) scsi_port_nexus_data.InitiatorPortId = initiator_port_id scsi_port_nexus_data.TargetPortId = target_port_id return scsi_port_nexus_data def create_target_device(self, target_domain_id, nexus): """Create a target device, given a target domain and a nexus. :param target_domain_id: Target domain id :param nexus: Nexus, or pair of ports :return: New target device """ return self._invoke_storage_services('CreateTargetDevice', targetDomainId=target_domain_id, nexus=nexus) def delete_target_device(self, target_device_id): """Delete a target device. :param target_device_id: Target device id """ self._invoke_storage_services('DeleteTargetDevice', targetDeviceId=target_device_id) def get_next_free_lun(self, target_device_id): """Find the next unused LUN number for a specified target device. :param target_device_id: Target device id :return: LUN number """ return self._invoke_storage_services('GetNextFreeLun', targetDeviceId=target_device_id) def get_logical_units(self): """Get all the mappings configured in the system. :return: A list of mappings """ return self._get_list_data( self._invoke_storage_services('GetLogicalUnits'), 'VirtualLogicalUnitData') def map_logical_disk(self, logical_disk_id, nexus, lun, initiator_host_id, mapping_type): """Map a logical disk to a host. :param logical_disk_id: Id of the logical disk :param nexus: Nexus, or pair of ports :param lun: Logical Unit Number :param initiator_host_id: Id of the initiator host machine :param mapping_type: Type of mapping :return: New mapping """ logical_unit_type = getattr(self._storage_services_client.factory .create(self.O_LOGICAL_UNIT_TYPE), mapping_type) return self._invoke_storage_services('MapLogicalDisk', logicalDiskId=logical_disk_id, nexus=nexus, lun=lun, initiatorHostId=initiator_host_id, mappingType=logical_unit_type) def unmap_logical_disk(self, logical_disk_id, nexus): """Unmap a logical disk mapped with a specified nexus. :param logical_disk_id: Id of the logical disk :param nexus: Nexus, or pair of ports """ self._invoke_storage_services('UnmapLogicalDisk', logicalDiskId=logical_disk_id, nexusData=nexus) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datacore/driver.py0000664000175000017500000010565100000000000022757 0ustar00zuulzuul00000000000000# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base Driver for DataCore SANsymphony storage array.""" import math import time from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units from cinder import context as cinder_context from cinder import exception as cinder_exception from cinder.i18n import _ from cinder import interface from cinder import utils as cinder_utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.datacore import api from cinder.volume.drivers.datacore import exception as datacore_exception from cinder.volume.drivers.datacore import utils as datacore_utils from cinder.volume.drivers.san import san from cinder.volume import volume_types LOG = logging.getLogger(__name__) datacore_opts = [ cfg.StrOpt('datacore_disk_type', default='single', choices=['single', 'mirrored'], help='DataCore virtual disk type (single/mirrored). ' 'Mirrored virtual disks require two storage servers in ' 'the server group.'), cfg.StrOpt('datacore_storage_profile', default=None, help='DataCore virtual disk storage profile.'), cfg.ListOpt('datacore_disk_pools', default=[], help='List of DataCore disk pools that can be used ' 'by volume driver.'), cfg.IntOpt('datacore_api_timeout', default=300, min=1, help='Seconds to wait for a response from a ' 'DataCore API call.'), cfg.IntOpt('datacore_disk_failed_delay', default=300, min=0, help='Seconds to wait for DataCore virtual ' 'disk to come out of the "Failed" state.'), ] CONF = cfg.CONF CONF.register_opts(datacore_opts, group=configuration.SHARED_CONF_GROUP) @interface.volumedriver class DataCoreVolumeDriver(driver.VolumeDriver): """DataCore SANsymphony base volume driver.""" STORAGE_PROTOCOL = 'N/A' AWAIT_DISK_ONLINE_INTERVAL = 10 AWAIT_SNAPSHOT_ONLINE_INTERVAL = 10 AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY = 5 DATACORE_SINGLE_DISK = 'single' DATACORE_MIRRORED_DISK = 'mirrored' VOLUME_TYPE_STRIPED = 'Striped' VOLUME_TYPE_SPANNED = 'Spanned' DATACORE_DISK_TYPE_KEY = 'datacore:disk_type' DATACORE_STORAGE_PROFILE_KEY = 'datacore:storage_profile' DATACORE_DISK_POOLS_KEY = 'datacore:disk_pools' VALID_VOLUME_TYPE_KEYS = (DATACORE_DISK_TYPE_KEY, DATACORE_STORAGE_PROFILE_KEY, DATACORE_DISK_POOLS_KEY,) def __init__(self, *args, **kwargs): super(DataCoreVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(datacore_opts) self._api = None self._default_volume_options = None def do_setup(self, context): """Perform validations and establish connection to server. :param context: Context information """ required_params = [ 'san_ip', 'san_login', 'san_password', ] for param in required_params: if not getattr(self.configuration, param, None): raise cinder_exception.InvalidInput(_("%s not set.") % param) self._api = api.DataCoreClient( self.configuration.san_ip, self.configuration.san_login, self.configuration.san_password, self.configuration.datacore_api_timeout) disk_type = self.configuration.datacore_disk_type if disk_type: disk_type = disk_type.lower() storage_profile = self.configuration.datacore_storage_profile if storage_profile: storage_profile = storage_profile.lower() disk_pools = self.configuration.datacore_disk_pools if disk_pools: disk_pools = [pool.lower() for pool in disk_pools] self._default_volume_options = { self.DATACORE_DISK_TYPE_KEY: disk_type, self.DATACORE_STORAGE_PROFILE_KEY: storage_profile, self.DATACORE_DISK_POOLS_KEY: disk_pools, } def check_for_setup_error(self): pass def get_volume_backend_name(self): """Get volume backend name of the volume service. :return: Volume backend name """ backend_name = self.configuration.safe_get('volume_backend_name') return (backend_name or 'DataCore' + self.__class__.__name__) def create_volume(self, volume): """Creates a volume. :param volume: Volume object :return: Dictionary of changes to the volume object to be persisted """ volume_options = self._get_volume_options(volume) disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY] if disk_type == self.DATACORE_MIRRORED_DISK: logical_disk_count = 2 virtual_disk_type = 'MultiPathMirrored' elif disk_type == self.DATACORE_SINGLE_DISK: logical_disk_count = 1 virtual_disk_type = 'NonMirrored' else: msg = _("Virtual disk type '%s' is not valid.") % disk_type LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) profile_id = self._get_storage_profile_id( volume_options[self.DATACORE_STORAGE_PROFILE_KEY]) pools = datacore_utils.get_distinct_by( lambda pool: pool.ServerId, self._get_available_disk_pools( volume_options[self.DATACORE_DISK_POOLS_KEY])) if len(pools) < logical_disk_count: msg = _("Suitable disk pools were not found for " "creating virtual disk.") LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) disk_size = self._get_size_in_bytes(volume.size) logical_disks = [] virtual_disk = None try: for logical_disk_pool in pools[:logical_disk_count]: logical_disks.append( self._api.create_pool_logical_disk( logical_disk_pool.Id, self.VOLUME_TYPE_STRIPED, disk_size)) virtual_disk_data = self._api.build_virtual_disk_data( volume.id, virtual_disk_type, disk_size, volume.display_name, profile_id) virtual_disk = self._api.create_virtual_disk_ex2( virtual_disk_data, logical_disks[0].Id, logical_disks[1].Id if logical_disk_count == 2 else None, True) virtual_disk = self._await_virtual_disk_online(virtual_disk.Id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Creation of volume %(volume)s failed.", {'volume': volume.id}) try: if virtual_disk: self._api.delete_virtual_disk(virtual_disk.Id, True) else: for logical_disk in logical_disks: self._api.delete_logical_disk(logical_disk.Id) except datacore_exception.DataCoreException as e: LOG.warning("An error occurred on a cleanup after failed " "creation of volume %(volume)s: %(error)s.", {'volume': volume.id, 'error': e}) return {'provider_location': virtual_disk.Id} def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. :param volume: Volume object :param snapshot: Snapshot object :return: Dictionary of changes to the volume object to be persisted """ return self._create_volume_from(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates volume clone. :param volume: New Volume object :param src_vref: Volume object that must be cloned :return: Dictionary of changes to the volume object to be persisted """ return self._create_volume_from(volume, src_vref) def extend_volume(self, volume, new_size): """Extend an existing volume's size. :param volume: Volume object :param new_size: new size in GB to extend this volume to """ virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True) self._set_virtual_disk_size(virtual_disk, self._get_size_in_bytes(new_size)) virtual_disk = self._await_virtual_disk_online(virtual_disk.Id) def delete_volume(self, volume): """Deletes a volume. :param volume: Volume object """ virtual_disk = self._get_virtual_disk_for(volume) if virtual_disk: if virtual_disk.IsServed: logical_disks = self._api.get_logical_disks() logical_units = self._api.get_logical_units() target_devices = self._api.get_target_devices() logical_disks = [disk.Id for disk in logical_disks if disk.VirtualDiskId == virtual_disk.Id] logical_unit_devices = [unit.VirtualTargetDeviceId for unit in logical_units if unit.LogicalDiskId in logical_disks] initiator_ports = set(device.InitiatorPortId for device in target_devices if device.Id in logical_unit_devices) for port in initiator_ports: self._api.unserve_virtual_disks_from_port( port, [virtual_disk.Id]) self._api.delete_virtual_disk(virtual_disk.Id, True) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: Snapshot object :return: Dictionary of changes to the snapshot object to be persisted """ src_virtual_disk = self._get_virtual_disk_for(snapshot.volume, raise_not_found=True) volume_options = self._get_volume_options(snapshot.volume) profile_name = volume_options[self.DATACORE_STORAGE_PROFILE_KEY] profile_id = self._get_storage_profile_id(profile_name) pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY] if src_virtual_disk.DiskStatus != 'Online': LOG.warning("Attempting to make a snapshot from virtual disk " "%(disk)s that is in %(state)s state.", {'disk': src_virtual_disk.Id, 'state': src_virtual_disk.DiskStatus}) snapshot_virtual_disk = self._create_virtual_disk_copy( src_virtual_disk, snapshot.id, snapshot.display_name, profile_id=profile_id, pool_names=pool_names) snapshot_virtual_disk = self._await_virtual_disk_online( snapshot_virtual_disk.Id) return {'provider_location': snapshot_virtual_disk.Id} def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: Snapshot object """ snapshot_virtual_disk = self._get_virtual_disk_for(snapshot) if snapshot_virtual_disk: self._api.delete_virtual_disk(snapshot_virtual_disk.Id, True) def ensure_export(self, context, volume): pass def create_export(self, context, volume, connector): pass def remove_export(self, context, volume): pass def unserve_virtual_disks_from_host(self, volume, connector): virtual_disk = self._get_virtual_disk_for(volume) if virtual_disk: if connector is None: clients = self._api.get_clients() else: clients = [self._get_client(connector['host'], create_new=False)] server_group = self._get_our_server_group() @cinder_utils.synchronized( 'datacore-backend-%s' % server_group.Id, external=True) def unserve_virtual_disk(client_id): self._api.unserve_virtual_disks_from_host( client_id, [virtual_disk.Id]) for client in clients: unserve_virtual_disk(client.Id) def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector. :param volume: Volume object :param connector: Connector information """ self.unserve_virtual_disks_from_host(volume, connector) def manage_existing(self, volume, existing_ref): return self.manage_existing_object(volume, existing_ref, "volume") def manage_existing_get_size(self, volume, existing_ref): return self.manage_existing_object_get_size(volume, existing_ref, "volume") def manage_existing_snapshot(self, snapshot, existing_ref): return self.manage_existing_object(snapshot, existing_ref, "snapshot") def manage_existing_snapshot_get_size(self, snapshot, existing_ref): return self.manage_existing_object_get_size(snapshot, existing_ref, "snapshot") def manage_existing_object(self, existing_object, existing_ref, object_type): if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise cinder_exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) vd_alias = existing_ref['source-name'] virtual_disk = datacore_utils.get_first_or_default( lambda disk: disk.Alias == vd_alias, self._api.get_virtual_disks(), None) if not virtual_disk: kwargs = {'existing_ref': vd_alias, 'reason': 'Specified Virtual disk does not exist.'} raise cinder_exception.ManageExistingInvalidReference(**kwargs) return {'provider_location': virtual_disk.Id} def manage_existing_object_get_size(self, existing_object, existing_ref, object_type): if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise cinder_exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) vd_alias = existing_ref['source-name'] virtual_disk = datacore_utils.get_first_or_default( lambda disk: disk.Alias == vd_alias, self._api.get_virtual_disks(), None) if not virtual_disk: kwargs = {'existing_ref': vd_alias, 'reason': 'Specified Virtual disk does not exist.'} raise cinder_exception.ManageExistingInvalidReference(**kwargs) return self._get_size_in_gigabytes(virtual_disk.Size.Value) def _update_volume_stats(self): performance_data = self._api.get_performance_by_type( ['DiskPoolPerformance']) total = 0 available = 0 reserved = 0 for performance in performance_data: missing_perf_data = [] if hasattr(performance.PerformanceData, 'BytesTotal'): total += performance.PerformanceData.BytesTotal else: missing_perf_data.append('BytesTotal') if hasattr(performance.PerformanceData, 'BytesAvailable'): available += performance.PerformanceData.BytesAvailable else: missing_perf_data.append('BytesAvailable') if hasattr(performance.PerformanceData, 'BytesReserved'): reserved += performance.PerformanceData.BytesReserved else: missing_perf_data.append('BytesReserved') if missing_perf_data: LOG.warning("Performance data %(data)s is missing for " "disk pool %(pool)s", {'data': missing_perf_data, 'pool': performance.ObjectId}) provisioned = 0 logical_disks = self._api.get_logical_disks() for disk in logical_disks: if getattr(disk, 'PoolId', None): provisioned += disk.Size.Value total_capacity_gb = self._get_size_in_gigabytes(total) free = available + reserved free_capacity_gb = self._get_size_in_gigabytes(free) provisioned_capacity_gb = self._get_size_in_gigabytes(provisioned) reserved_percentage = 100.0 * reserved / total if total else 0.0 reserved_percentage = math.ceil(reserved_percentage) ratio = self.configuration.max_over_subscription_ratio stats_data = { 'vendor_name': 'DataCore', 'QoS_support': False, 'volume_backend_name': self.get_volume_backend_name(), 'driver_version': self.get_version(), 'storage_protocol': self.STORAGE_PROTOCOL, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'provisioned_capacity_gb': provisioned_capacity_gb, 'reserved_percentage': reserved_percentage, 'max_over_subscription_ratio': ratio, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'online_extend_support': False, } self._stats = stats_data def _get_our_server_group(self): server_group = datacore_utils.get_first(lambda group: group.OurGroup, self._api.get_server_groups()) return server_group def _get_volume_options_from_type(self, type_id, default_options): options = dict(default_options.items()) if type_id: admin_context = cinder_context.get_admin_context() volume_type = volume_types.get_volume_type(admin_context, type_id) specs = dict(volume_type).get('extra_specs') for key, value in specs.items(): if key in self.VALID_VOLUME_TYPE_KEYS: if key == self.DATACORE_DISK_POOLS_KEY: options[key] = [v.strip().lower() for v in value.split(',')] else: options[key] = value.lower() return options def _get_volume_options(self, volume): type_id = volume.volume_type_id volume_options = self._get_volume_options_from_type( type_id, self._default_volume_options) return volume_options def _get_online_servers(self): servers = self._api.get_servers() online_servers = [server for server in servers if server.State == 'Online'] return online_servers def _get_available_disk_pools(self, disk_pool_names=None): online_servers = [server.Id for server in self._get_online_servers()] pool_performance = { performance.ObjectId: performance.PerformanceData for performance in self._api.get_performance_by_type(['DiskPoolPerformance'])} disk_pools = self._api.get_disk_pools() lower_disk_pool_names = ([name.lower() for name in disk_pool_names] if disk_pool_names else []) available_disk_pools = [ pool for pool in disk_pools if (self._is_pool_healthy(pool, pool_performance, online_servers) and (not lower_disk_pool_names or pool.Caption.lower() in lower_disk_pool_names))] available_disk_pools.sort( key=lambda p: pool_performance[p.Id].BytesAvailable, reverse=True) return available_disk_pools def _get_virtual_disk_for(self, obj, raise_not_found=False): disk_id = obj.get('provider_location') virtual_disk = datacore_utils.get_first_or_default( lambda disk: disk.Id == disk_id, self._api.get_virtual_disks(), None) if not virtual_disk: msg = (_("Virtual disk not found for %(object)s %(object_id)s.") % {'object': obj.__class__.__name__.lower(), 'object_id': obj['id']}) if raise_not_found: LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) else: LOG.warning(msg) return virtual_disk def _set_virtual_disk_size(self, virtual_disk, new_size): return self._api.set_virtual_disk_size(virtual_disk.Id, new_size) def _get_storage_profile(self, profile_name, raise_not_found=False): profiles = self._api.get_storage_profiles() profile = datacore_utils.get_first_or_default( lambda p: p.Caption.lower() == profile_name.lower(), profiles, None) if not profile and raise_not_found: msg = (_("Specified storage profile %s not found.") % profile_name) LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) return profile def _get_storage_profile_id(self, profile_name): profile_id = None if profile_name: profile = self._get_storage_profile(profile_name, raise_not_found=True) profile_id = profile.Id return profile_id def _await_virtual_disk_online(self, virtual_disk_id): def inner(start_time): disk_failed_delay = self.configuration.datacore_disk_failed_delay virtual_disk = datacore_utils.get_first( lambda disk: disk.Id == virtual_disk_id, self._api.get_virtual_disks()) if virtual_disk.DiskStatus == 'Online': raise loopingcall.LoopingCallDone(virtual_disk) elif ( virtual_disk.DiskStatus != 'FailedRedundancy' and time.time() - start_time >= disk_failed_delay): msg = (_("Virtual disk %(disk)s did not come out of the " "%(state)s state after %(timeout)s seconds.") % {'disk': virtual_disk.Id, 'state': virtual_disk.DiskStatus, 'timeout': disk_failed_delay}) LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time()) return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait() def _create_volume_from(self, volume, src_obj): src_virtual_disk = self._get_virtual_disk_for(src_obj, raise_not_found=True) if src_virtual_disk.DiskStatus != 'Online': LOG.warning("Attempting to create a volume from virtual disk " "%(disk)s that is in %(state)s state.", {'disk': src_virtual_disk.Id, 'state': src_virtual_disk.DiskStatus}) volume_options = self._get_volume_options(volume) profile_id = self._get_storage_profile_id( volume_options[self.DATACORE_STORAGE_PROFILE_KEY]) pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY] volume_virtual_disk = self._create_virtual_disk_copy( src_virtual_disk, volume.id, volume.display_name, profile_id=profile_id, pool_names=pool_names) volume_logical_disk = datacore_utils.get_first( lambda disk: disk.VirtualDiskId == volume_virtual_disk.Id, self._api.get_logical_disks()) try: disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY] if disk_type == self.DATACORE_MIRRORED_DISK: pools = self._get_available_disk_pools(pool_names) selected_pool = datacore_utils.get_first_or_default( lambda pool: (pool.ServerId != volume_logical_disk.ServerHostId and pool.Id != volume_logical_disk.PoolId), pools, None) if selected_pool: logical_disk = self._api.create_pool_logical_disk( selected_pool.Id, self.VOLUME_TYPE_STRIPED, volume_virtual_disk.Size.Value) self._api.bind_logical_disk(volume_virtual_disk.Id, logical_disk.Id, 'Second', True, False, True) else: msg = _("Can not create mirrored virtual disk. " "Suitable disk pools not found.") LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) volume_virtual_disk = self._await_virtual_disk_online( volume_virtual_disk.Id) try: source_size = src_obj.size except AttributeError: source_size = src_obj.volume_size if volume.size > source_size: self._set_virtual_disk_size(volume_virtual_disk, self._get_size_in_bytes( volume.size)) volume_virtual_disk = datacore_utils.get_first( lambda disk: disk.Id == volume_virtual_disk.Id, self._api.get_virtual_disks()) volume_virtual_disk = self._await_virtual_disk_size_change( volume_virtual_disk.Id, self._get_size_in_bytes(source_size)) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Creation of volume %(volume)s failed.", {'volume': volume.id}) try: self._api.delete_virtual_disk(volume_virtual_disk.Id, True) except datacore_exception.DataCoreException as e: LOG.warning("An error occurred on a cleanup after failed " "creation of volume %(volume)s: %(error)s.", {'volume': volume.id, 'error': e}) return {'provider_location': volume_virtual_disk.Id} def _create_full_snapshot(self, description, name, pool_names, profile_id, src_virtual_disk): pools = self._get_available_disk_pools(pool_names) destination_pool = datacore_utils.get_first_or_default( lambda pool: (pool.ServerId == src_virtual_disk.FirstHostId or pool.ServerId == src_virtual_disk.SecondHostId), pools, None) if not destination_pool: msg = _("Suitable snapshot destination disk pool not found for " "virtual disk %s.") % src_virtual_disk.Id LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) server = datacore_utils.get_first( lambda srv: srv.Id == destination_pool.ServerId, self._api.get_servers()) if not server.SnapshotMapStorePoolId: self._api.designate_map_store(destination_pool.Id) snapshot = self._api.create_snapshot(src_virtual_disk.Id, name, description, destination_pool.Id, 'Full', False, profile_id) return snapshot def _await_snapshot_migrated(self, snapshot_id): def inner(): snapshot_data = datacore_utils.get_first( lambda snapshot: snapshot.Id == snapshot_id, self._api.get_snapshots()) if snapshot_data.State == 'Migrated': raise loopingcall.LoopingCallDone(snapshot_data) elif (snapshot_data.State != 'Healthy' and snapshot_data.Failure != 'NoFailure'): msg = (_("Full migration of snapshot %(snapshot)s failed. " "Snapshot is in %(state)s state.") % {'snapshot': snapshot_data.Id, 'state': snapshot_data.State}) LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) loop = loopingcall.FixedIntervalLoopingCall(inner) time.sleep(self.AWAIT_SNAPSHOT_ONLINE_INTERVAL) return loop.start(self.AWAIT_SNAPSHOT_ONLINE_INTERVAL, self.AWAIT_SNAPSHOT_ONLINE_INITIAL_DELAY).wait() def _create_virtual_disk_copy(self, src_virtual_disk, name, description, profile_id=None, pool_names=None): snapshot = self._create_full_snapshot( description, name, pool_names, profile_id, src_virtual_disk) try: snapshot = self._await_snapshot_migrated(snapshot.Id) self._api.delete_snapshot(snapshot.Id) self._await_snapshot_split_state_change(snapshot) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Split operation failed for snapshot " "%(snapshot)s.", {'snapshot': snapshot.Id}) try: logical_disk_copy = datacore_utils.get_first( lambda disk: (disk.Id == snapshot.DestinationLogicalDiskId), self._api.get_logical_disks()) virtual_disk_copy = datacore_utils.get_first( lambda disk: (disk.Id == logical_disk_copy.VirtualDiskId), self._api.get_virtual_disks()) self._api.delete_virtual_disk(virtual_disk_copy.Id, True) except datacore_exception.DataCoreException as e: LOG.warning("An error occurred on a cleanup after failed " "split of snapshot %(snapshot)s: %(error)s.", {'snapshot': snapshot.Id, 'error': e}) logical_disk_copy = datacore_utils.get_first( lambda disk: disk.Id == snapshot.DestinationLogicalDiskId, self._api.get_logical_disks()) virtual_disk_copy = datacore_utils.get_first( lambda disk: disk.Id == logical_disk_copy.VirtualDiskId, self._api.get_virtual_disks()) return virtual_disk_copy def _get_client(self, name, create_new=False): client_hosts = self._api.get_clients() client = datacore_utils.get_first_or_default( lambda host: host.HostName.split('.')[0] == name.split('.')[0], client_hosts, None) if create_new: if not client: client = self._api.register_client( name, None, 'Other', 'PreferredServer', None) self._api.set_client_capabilities(client.Id, True, True) return client @staticmethod def _is_pool_healthy(pool, pool_performance, online_servers): if (pool.PoolStatus == 'Running' and hasattr(pool_performance[pool.Id], 'BytesAvailable') and pool.ServerId in online_servers): return True return False @staticmethod def _get_size_in_bytes(size_in_gigabytes): return size_in_gigabytes * units.Gi @staticmethod def _get_size_in_gigabytes(size_in_bytes): return size_in_bytes / float(units.Gi) def _await_virtual_disk_size_change(self, virtual_disk_id, old_size): def inner(start_time): disk_failed_delay = self.configuration.datacore_disk_failed_delay virtual_disk = datacore_utils.get_first( lambda disk: disk.Id == virtual_disk_id, self._api.get_virtual_disks()) if virtual_disk.DiskStatus == 'Online' \ and virtual_disk.Size.Value > old_size: raise loopingcall.LoopingCallDone(virtual_disk) elif (virtual_disk.DiskStatus != 'FailedRedundancy' and time.time() - start_time >= disk_failed_delay): msg = (_("Virtual disk %(disk)s did not come out of the " "%(state)s state after %(timeout)s seconds.") % {'disk': virtual_disk.Id, 'state': virtual_disk.DiskStatus, 'timeout': disk_failed_delay}) LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time()) time.sleep(self.AWAIT_DISK_ONLINE_INTERVAL) return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait() def _await_snapshot_split_state_change(self, split_snapshot): def inner(start_time): disk_failed_delay = self.configuration.datacore_disk_failed_delay snapshot_found = False snapshot_list = self._api.get_snapshots() if not snapshot_list: raise loopingcall.LoopingCallDone() for entry in snapshot_list: if entry.Caption == split_snapshot.Caption: snapshot_found = True break if not snapshot_found: raise loopingcall.LoopingCallDone() if (time.time() - start_time >= disk_failed_delay): msg = (_("Split Snapshot disk %(disk)s did not happened " "after %(timeout)s seconds.") % {'disk': split_snapshot.Caption, 'timeout': disk_failed_delay}) LOG.error(msg) raise loopingcall.LoopingCallDone() inner_loop = loopingcall.FixedIntervalLoopingCall(inner, time.time()) return inner_loop.start(self.AWAIT_DISK_ONLINE_INTERVAL).wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datacore/exception.py0000664000175000017500000000236400000000000023457 0ustar00zuulzuul00000000000000# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Exception definitions.""" from cinder import exception from cinder.i18n import _ class DataCoreException(exception.VolumeBackendAPIException): """Base DataCore Exception.""" message = _('DataCore exception.') class DataCoreConnectionException(DataCoreException): """Thrown when there are connection problems during a DataCore API call.""" message = _('Failed to connect to DataCore Server Group: %(reason)s.') class DataCoreFaultException(DataCoreException): """Thrown when there are faults during a DataCore API call.""" message = _('DataCore Server Group reported an error: %(reason)s.') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datacore/fc.py0000664000175000017500000003663500000000000022061 0ustar00zuulzuul00000000000000# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fibre Channel Driver for DataCore SANsymphony storage array.""" from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder.common import constants from cinder import exception as cinder_exception from cinder.i18n import _ from cinder import interface from cinder import utils as cinder_utils from cinder.volume import configuration from cinder.volume.drivers.datacore import driver from cinder.volume.drivers.datacore import exception as datacore_exception from cinder.volume.drivers.datacore import utils as datacore_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) datacore_fc_opts = [ cfg.ListOpt('datacore_fc_unallowed_targets', default=[], help='List of FC targets that cannot be used to attach ' 'volume. To prevent the DataCore FibreChannel ' 'volume driver from using some front-end targets ' 'in volume attachment, specify this option and list ' 'the iqn and target machine for each target as ' 'the value, such as ' ', , ' '.'), ] CONF = cfg.CONF CONF.register_opts(datacore_fc_opts, group=configuration.SHARED_CONF_GROUP) @interface.volumedriver class FibreChannelVolumeDriver(driver.DataCoreVolumeDriver): """DataCore SANsymphony Fibre Channel volume driver. Version history: .. code-block:: none 1.0.0 - Initial driver 2.0.0 - Reintroduce the driver """ VERSION = '2.0.0' STORAGE_PROTOCOL = constants.FC CI_WIKI_NAME = 'DataCore_CI' def __init__(self, *args, **kwargs): super(FibreChannelVolumeDriver, self).__init__(*args, **kwargs) self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(datacore_fc_opts) @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password') return driver.datacore_opts + datacore_fc_opts + additional_opts def validate_connector(self, connector): """Fail if connector doesn't contain all the data needed by the driver. :param connector: Connector information """ required_data = ['host', 'wwpns'] for required in required_data: if required not in connector: LOG.error("The volume driver requires %(data)s " "in the connector.", {'data': required}) raise cinder_exception.InvalidConnectorException( missing=required) def _build_initiator_target_map(self, connector): target_wwns = [] init_targ_map = {} initiator_wwns = [] if connector: initiator_wwns = connector['wwpns'] fc_target_ports = self._get_frontend_fc_target_ports( self._api.get_ports()) for target_port in fc_target_ports: target_wwns.append( target_port.PortName.replace('-', '').lower()) for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return init_targ_map, target_wwns def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. :param volume: Volume object :param connector: Connector information :return: Connection information """ LOG.debug("Initialize connection for volume %(volume)s for " "connector %(connector)s.", {'volume': volume.id, 'connector': connector}) virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True) if virtual_disk.DiskStatus != 'Online': LOG.warning("Attempting to attach virtual disk %(disk)s " "that is in %(state)s state.", {'disk': virtual_disk.Id, 'state': virtual_disk.DiskStatus}) server_group = self._get_our_server_group() @cinder_utils.synchronized( 'datacore-backend-%s' % server_group.Id, external=True) def serve_virtual_disk(): available_ports = self._api.get_ports() connector_wwpns = list(wwpn.replace('-', '').lower() for wwpn in connector['wwpns']) fc_initiator = self._get_initiator(connector['host'], connector_wwpns, available_ports) if not fc_initiator: msg = (_("Suitable initiator not found for " "virtual disk %(disk)s for volume %(volume)s.") % {'disk': virtual_disk.Id, 'volume': volume.id}) LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) fc_targets = self._get_targets(virtual_disk, available_ports) if not fc_targets: msg = (_("Suitable targets not found for " "virtual disk %(disk)s for volume %(volume)s.") % {'disk': virtual_disk.Id, 'volume': volume.id}) LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) virtual_logical_units = self._map_virtual_disk( virtual_disk, fc_targets, fc_initiator) return fc_targets, virtual_logical_units targets, logical_units = serve_virtual_disk() init_targ_map, target_wwns = self._build_initiator_target_map( connector) info_backend = {'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': False, 'target_lun': logical_units[targets[0]].Lun.Quad, 'target_wwn': target_wwns, 'volume_id': volume.id, 'access_mode': 'rw', 'initiator_target_map': init_targ_map}} fczm_utils.add_fc_zone(info_backend) LOG.debug("Connection data: %s", info_backend) return info_backend def terminate_connection(self, volume, connector, **kwargs): init_targ_map, target_wwns = self._build_initiator_target_map( connector) info = {'driver_volume_type': 'fibre_channel', 'data': {}} info['data'] = {'target_wwn': target_wwns, 'initiator_target_map': init_targ_map} # First unserve the virtual disk from Host super().unserve_virtual_disks_from_host(volume, connector) fczm_utils.remove_fc_zone(info) return info def _get_initiator(self, host, connector_wwpns, available_ports): wwpn_list = [] for wwp in connector_wwpns: wwpn_list.append('-'.join( a + b for a, b in zip(*[iter(wwp.upper())] * 2))) client = self._get_client(host, create_new=True) valid_initiator = self._valid_fc_initiator(wwpn_list, available_ports) if not valid_initiator: return [] fc_initiator_ports = self._get_host_fc_initiator_ports( client, available_ports) fc_initiator = datacore_utils.get_first_or_default( lambda port: True if (port.PortName in wwpn_list) else False, fc_initiator_ports, None) if not fc_initiator: for wwn in wwpn_list: for port in available_ports: if (port.PortName == wwn and port.PortType == 'FibreChannel' and port.PortMode == 'Initiator' and port.Connected): scsi_port_data = self._api.build_scsi_port_data( client.Id, wwn, 'Initiator', 'FibreChannel') fc_initiator = self._api.register_port(scsi_port_data) return fc_initiator return fc_initiator @staticmethod def _get_host_fc_initiator_ports(host, ports): return [port for port in ports if port.PortType == 'FibreChannel' and port.PortMode == 'Initiator' and port.HostId == host.Id] def _get_targets(self, virtual_disk, available_ports): unallowed_targets = self.configuration.datacore_fc_unallowed_targets fc_target_ports = self._get_frontend_fc_target_ports( available_ports) server_port_map = {} for target_port in fc_target_ports: if target_port.HostId in server_port_map: server_port_map[target_port.HostId].append(target_port) else: server_port_map[target_port.HostId] = [target_port] fc_targets = [] if virtual_disk.FirstHostId in server_port_map: fc_targets += server_port_map[virtual_disk.FirstHostId] if virtual_disk.SecondHostId in server_port_map: fc_targets += server_port_map[virtual_disk.SecondHostId] return [target for target in fc_targets if target.PortName not in unallowed_targets] @staticmethod def _is_fc_frontend_port(port): if (port.PortType == 'FibreChannel' and port.PortMode == 'Target' and port.HostId): if port.PresenceStatus == 'Present': port_roles = port.ServerPortProperties.Role.split() port_state = port.StateInfo.State if 'Frontend' in port_roles and port_state == 'LoopLinkUp': return True return False def _get_frontend_fc_target_ports(self, ports): return [target_port for target_port in ports if self._is_fc_frontend_port(target_port)] def _map_virtual_disk(self, virtual_disk, targets, initiator): logical_disks = self._api.get_logical_disks() logical_units = {} created_mapping = {} created_devices = [] created_domains = [] try: for target in targets: target_domain = self._get_target_domain(target, initiator) if not target_domain: target_domain = self._api.create_target_domain( initiator.HostId, target.HostId) created_domains.append(target_domain) nexus = self._api.build_scsi_port_nexus_data( initiator.Id, target.Id) target_device = self._get_target_device( target_domain, target, initiator) if not target_device: target_device = self._api.create_target_device( target_domain.Id, nexus) created_devices.append(target_device) logical_disk = self._get_logical_disk_on_host( virtual_disk.Id, target.HostId, logical_disks) logical_unit = self._get_logical_unit( logical_disk, target_device) if not logical_unit: logical_unit = self._create_logical_unit( logical_disk, nexus, target_device) created_mapping[logical_unit] = target_device logical_units[target] = logical_unit except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Mapping operation for virtual disk %(disk)s " "failed with error.", {'disk': virtual_disk.Id}) try: for logical_unit in created_mapping: nexus = self._api.build_scsi_port_nexus_data( created_mapping[logical_unit].InitiatorPortId, created_mapping[logical_unit].TargetPortId) self._api.unmap_logical_disk( logical_unit.LogicalDiskId, nexus) for target_device in created_devices: self._api.delete_target_device(target_device.Id) for target_domain in created_domains: self._api.delete_target_domain(target_domain.Id) except datacore_exception.DataCoreException as e: LOG.warning("An error occurred on a cleanup after " "failed mapping operation: %s.", e) return logical_units def _get_target_domain(self, target, initiator): target_domains = self._api.get_target_domains() target_domain = datacore_utils.get_first_or_default( lambda domain: (domain.InitiatorHostId == initiator.HostId and domain.TargetHostId == target.HostId), target_domains, None) return target_domain def _get_target_device(self, target_domain, target, initiator): target_devices = self._api.get_target_devices() target_device = datacore_utils.get_first_or_default( lambda device: (device.TargetDomainId == target_domain.Id and device.InitiatorPortId == initiator.Id and device.TargetPortId == target.Id), target_devices, None) return target_device def _get_logical_unit(self, logical_disk, target_device): logical_units = self._api.get_logical_units() logical_unit = datacore_utils.get_first_or_default( lambda unit: (unit.LogicalDiskId == logical_disk.Id and unit.VirtualTargetDeviceId == target_device.Id), logical_units, None) return logical_unit def _create_logical_unit(self, logical_disk, nexus, target_device): free_lun = self._api.get_next_free_lun(target_device.Id) logical_unit = self._api.map_logical_disk(logical_disk.Id, nexus, free_lun, logical_disk.ServerHostId, 'Client') return logical_unit @staticmethod def _get_logical_disk_on_host(virtual_disk_id, host_id, logical_disks): logical_disk = datacore_utils.get_first( lambda disk: (disk.ServerHostId == host_id and disk.VirtualDiskId == virtual_disk_id), logical_disks) return logical_disk @staticmethod def _valid_fc_initiator(wwpn_list, available_ports): for port in available_ports: if (port.PortType == 'FibreChannel' and port.PortMode == 'Initiator'): if (port.PortName in wwpn_list): return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datacore/iscsi.py0000664000175000017500000004635100000000000022577 0ustar00zuulzuul00000000000000# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """iSCSI Driver for DataCore SANsymphony storage array.""" from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder.common import constants from cinder import exception as cinder_exception from cinder.i18n import _ from cinder import interface from cinder import utils as cinder_utils from cinder.volume import configuration from cinder.volume.drivers.datacore import driver from cinder.volume.drivers.datacore import exception as datacore_exception from cinder.volume.drivers.datacore import passwd from cinder.volume.drivers.datacore import utils as datacore_utils from cinder.volume import volume_utils as volume_utils LOG = logging.getLogger(__name__) datacore_iscsi_opts = [ cfg.ListOpt('datacore_iscsi_unallowed_targets', default=[], help='List of iSCSI targets that cannot be used to attach ' 'volume. To prevent the DataCore iSCSI volume driver ' 'from using some front-end targets in volume attachment, ' 'specify this option and list the iqn and target machine ' 'for each target as the value, such as ' ', , ' '.'), cfg.StrOpt('datacore_iscsi_chap_storage', default='$state_path/.datacore_chap', help='Fully qualified file name where dynamically generated ' 'iSCSI CHAP secrets are stored. This must be changed ' 'to a unique per-backend value if deploying multiple ' 'DataCore backends on the same host.'), ] CONF = cfg.CONF CONF.register_opts(datacore_iscsi_opts, group=configuration.SHARED_CONF_GROUP) @interface.volumedriver class ISCSIVolumeDriver(driver.DataCoreVolumeDriver): """DataCore SANsymphony iSCSI volume driver. Version history: .. code-block:: none 1.0.0 - Initial driver 2.0.0 - Reintroduce the driver """ VERSION = '2.0.0' STORAGE_PROTOCOL = constants.ISCSI CI_WIKI_NAME = 'DataCore_CI' def __init__(self, *args, **kwargs): super(ISCSIVolumeDriver, self).__init__(*args, **kwargs) self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(datacore_iscsi_opts) self._password_storage = None @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password') return driver.datacore_opts + datacore_iscsi_opts + additional_opts def do_setup(self, context): """Perform validations and establish connection to server. :param context: Context information """ super(ISCSIVolumeDriver, self).do_setup(context) self._password_storage = passwd.PasswordFileStorage( self.configuration.datacore_iscsi_chap_storage) def validate_connector(self, connector): """Fail if connector doesn't contain all the data needed by the driver. :param connector: Connector information """ required_data = ['host', 'initiator'] for required in required_data: if required not in connector: LOG.error("The volume driver requires %(data)s " "in the connector.", {'data': required}) raise cinder_exception.InvalidConnectorException( missing=required) def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. :param volume: Volume object :param connector: Connector information :return: Connection information """ LOG.debug("Initialize connection for volume %(volume)s for " "connector %(connector)s.", {'volume': volume.id, 'connector': connector}) virtual_disk = self._get_virtual_disk_for(volume, raise_not_found=True) if virtual_disk.DiskStatus != 'Online': LOG.warning("Attempting to attach virtual disk %(disk)s " "that is in %(state)s state.", {'disk': virtual_disk.Id, 'state': virtual_disk.DiskStatus}) server_group = self._get_our_server_group() @cinder_utils.synchronized( 'datacore-backend-%s' % server_group.Id, external=True) def serve_virtual_disk(): available_ports = self._api.get_ports() iscsi_initiator = self._get_initiator(connector['host'], connector['initiator'], available_ports) iscsi_targets = self._get_targets(virtual_disk, available_ports) if not iscsi_targets: msg = (_("Suitable targets not found for " "virtual disk %(disk)s for volume %(volume)s.") % {'disk': virtual_disk.Id, 'volume': volume.id}) LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) auth_params = self._setup_iscsi_chap_authentication( iscsi_targets, iscsi_initiator) virtual_logical_units = self._map_virtual_disk( virtual_disk, iscsi_targets, iscsi_initiator) return iscsi_targets, virtual_logical_units, auth_params targets, logical_units, chap_params = serve_virtual_disk() target_portal = datacore_utils.build_network_address( targets[0].PortConfigInfo.PortalsConfig.iScsiPortalConfigInfo[ 0].Address.Address, targets[0].PortConfigInfo.PortalsConfig.iScsiPortalConfigInfo[ 0].TcpPort) connection_data = {} if chap_params: connection_data['auth_method'] = 'CHAP' connection_data['auth_username'] = chap_params[0] connection_data['auth_password'] = chap_params[1] connection_data['target_discovered'] = False connection_data['target_iqn'] = targets[0].PortName connection_data['target_portal'] = target_portal connection_data['target_lun'] = logical_units[targets[0]].Lun.Quad connection_data['volume_id'] = volume.id connection_data['access_mode'] = 'rw' LOG.debug("Connection data: %s", connection_data) return { 'driver_volume_type': 'iscsi', 'data': connection_data, } def _map_virtual_disk(self, virtual_disk, targets, initiator): logical_disks = self._api.get_logical_disks() logical_units = {} created_mapping = {} created_devices = [] created_domains = [] try: for target in targets: target_domain = self._get_target_domain(target, initiator) if not target_domain: target_domain = self._api.create_target_domain( initiator.HostId, target.HostId) created_domains.append(target_domain) nexus = self._api.build_scsi_port_nexus_data( initiator.Id, target.Id) target_device = self._get_target_device( target_domain, target, initiator) if not target_device: target_device = self._api.create_target_device( target_domain.Id, nexus) created_devices.append(target_device) logical_disk = self._get_logical_disk_on_host( virtual_disk.Id, target.HostId, logical_disks) logical_unit = self._get_logical_unit( logical_disk, target_device) if not logical_unit: logical_unit = self._create_logical_unit( logical_disk, nexus, target_device) created_mapping[logical_unit] = target_device logical_units[target] = logical_unit except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Mapping operation for virtual disk %(disk)s " "failed with error.", {'disk': virtual_disk.Id}) try: for logical_unit in created_mapping: nexus = self._api.build_scsi_port_nexus_data( created_mapping[logical_unit].InitiatorPortId, created_mapping[logical_unit].TargetPortId) self._api.unmap_logical_disk( logical_unit.LogicalDiskId, nexus) for target_device in created_devices: self._api.delete_target_device(target_device.Id) for target_domain in created_domains: self._api.delete_target_domain(target_domain.Id) except datacore_exception.DataCoreException as e: LOG.warning("An error occurred on a cleanup after " "failed mapping operation: %s.", e) return logical_units def terminate_connection(self, volume, connector, **kwargs): super().unserve_virtual_disks_from_host(volume, connector) def _get_target_domain(self, target, initiator): target_domains = self._api.get_target_domains() target_domain = datacore_utils.get_first_or_default( lambda domain: (domain.InitiatorHostId == initiator.HostId and domain.TargetHostId == target.HostId), target_domains, None) return target_domain def _get_target_device(self, target_domain, target, initiator): target_devices = self._api.get_target_devices() target_device = datacore_utils.get_first_or_default( lambda device: (device.TargetDomainId == target_domain.Id and device.InitiatorPortId == initiator.Id and device.TargetPortId == target.Id), target_devices, None) return target_device def _get_logical_unit(self, logical_disk, target_device): logical_units = self._api.get_logical_units() logical_unit = datacore_utils.get_first_or_default( lambda unit: (unit.LogicalDiskId == logical_disk.Id and unit.VirtualTargetDeviceId == target_device.Id), logical_units, None) return logical_unit def _create_logical_unit(self, logical_disk, nexus, target_device): free_lun = self._api.get_next_free_lun(target_device.Id) logical_unit = self._api.map_logical_disk(logical_disk.Id, nexus, free_lun, logical_disk.ServerHostId, 'Client') return logical_unit def _check_iscsi_chap_configuration(self, chap, targets): logical_units = self._api.get_logical_units() target_devices = self._api.get_target_devices() for logical_unit in logical_units: target_device_id = logical_unit.VirtualTargetDeviceId target_device = datacore_utils.get_first( lambda device, key=target_device_id: device.Id == key, target_devices) target_port_id = target_device.TargetPortId target = datacore_utils.get_first_or_default( lambda target_port, key=target_port_id: target_port.Id == key, targets, None) if (target and chap == (target.ServerPortProperties.Authentication == 'None') and chap == (target.ServerPortProperties.Authentication == 'Default')): msg = _("iSCSI CHAP authentication can't be configured for " "target %s. Device exists that served through " "this target.") % target.PortName LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) def _setup_iscsi_chap_authentication(self, targets, initiator): iscsi_chap_enabled = self.configuration.use_chap_auth self._check_iscsi_chap_configuration(iscsi_chap_enabled, targets) server_group = self._get_our_server_group() update_access_token = False access_token = None chap_secret = None chap_username = initiator.PortName if iscsi_chap_enabled: authentication = 'CHAP' chap_username = self.configuration.chap_username if not chap_username: chap_username = initiator.PortName chap_secret = (self.configuration.chap_password or self._password_storage.get_password( server_group.Id, initiator.PortName)) if not chap_secret: chap_secret = volume_utils.generate_password(length=15) self._password_storage.set_password( server_group.Id, initiator.PortName, chap_secret) update_access_token = True access_token = self._api.build_access_token( initiator.PortName, None, None, False, chap_username, chap_secret) else: authentication = 'None' if self._password_storage: try: self._password_storage.delete_password(server_group.Id, initiator.PortName) except Exception: pass changed_targets = {} try: for target in targets: if iscsi_chap_enabled: target_iscsi_nodes = getattr(target.iSCSINodes, 'Node', []) iscsi_node = datacore_utils.get_first_or_default( lambda node: node.Name == initiator.PortName, target_iscsi_nodes, None) if ((not iscsi_node) or not (iscsi_node.AccessToken.TargetUsername) or (update_access_token)): self._api.set_access_token(target.Id, access_token) properties = target.ServerPortProperties if properties.Authentication != authentication: changed_targets[target] = properties.Authentication properties.Authentication = authentication self._api.set_server_port_properties( target.Id, properties) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Configuring of iSCSI CHAP authentication for " "initiator %(initiator)s failed.", {'initiator': initiator.PortName}) try: for target in changed_targets: properties = target.ServerPortProperties properties.Authentication = changed_targets[target] self._api.set_server_port_properties( target.Id, properties) except datacore_exception.DataCoreException as e: LOG.warning("An error occurred on a cleanup after failed " "configuration of iSCSI CHAP authentication " "on initiator %(initiator)s: %(error)s.", {'initiator': initiator.PortName, 'error': e}) if iscsi_chap_enabled: return chap_username, chap_secret def _get_initiator(self, host, iqn, available_ports): client = self._get_client(host, create_new=True) iscsi_initiator_ports = self._get_host_iscsi_initiator_ports( client, available_ports) iscsi_initiator = datacore_utils.get_first_or_default( lambda port: port.PortName == iqn, iscsi_initiator_ports, None) if not iscsi_initiator: scsi_port_data = self._api.build_scsi_port_data( client.Id, iqn, 'Initiator', 'iSCSI') iscsi_initiator = self._api.register_port(scsi_port_data) return iscsi_initiator def _get_targets(self, virtual_disk, available_ports): unallowed_targets = self.configuration.datacore_iscsi_unallowed_targets iscsi_target_ports = self._get_frontend_iscsi_target_ports( available_ports) server_port_map = {} for target_port in iscsi_target_ports: if target_port.HostId in server_port_map: server_port_map[target_port.HostId].append(target_port) else: server_port_map[target_port.HostId] = [target_port] iscsi_targets = [] if virtual_disk.FirstHostId in server_port_map: iscsi_targets += server_port_map[virtual_disk.FirstHostId] if virtual_disk.SecondHostId in server_port_map: iscsi_targets += server_port_map[virtual_disk.SecondHostId] iscsi_targets = [target for target in iscsi_targets if target.PortName not in unallowed_targets] return iscsi_targets @staticmethod def _get_logical_disk_on_host(virtual_disk_id, host_id, logical_disks): logical_disk = datacore_utils.get_first( lambda disk: (disk.ServerHostId == host_id and disk.VirtualDiskId == virtual_disk_id), logical_disks) return logical_disk @staticmethod def _is_iscsi_frontend_port(port): if (port.PortType == 'iSCSI' and port.PortMode == 'Target' and port.HostId and port.PresenceStatus == 'Present' and hasattr(port, 'IScsiPortStateInfo')): port_roles = port.ServerPortProperties.Role.split() port_state = (port.IScsiPortStateInfo.PortalsState .PortalStateInfo[0].State) if 'Frontend' in port_roles and port_state == 'Ready': return True return False @staticmethod def _get_frontend_iscsi_target_ports(ports): return [target_port for target_port in ports if ISCSIVolumeDriver._is_iscsi_frontend_port(target_port)] @staticmethod def _get_host_iscsi_initiator_ports(host, ports): return [port for port in ports if port.PortType == 'iSCSI' and port.PortMode == 'Initiator' and port.HostId == host.Id] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datacore/passwd.py0000664000175000017500000001255000000000000022760 0ustar00zuulzuul00000000000000# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Password storage.""" import contextlib import json import os import stat from oslo_log import log as logging from cinder.i18n import _ from cinder import utils as cinder_utils LOG = logging.getLogger(__name__) class FileStorage(object): """Represents a file as a dictionary.""" def __init__(self, file_path): self._file_path = file_path self._file = None self._is_open = False def open(self): """Open a file for simultaneous reading and writing. If the specified file does not exist, it will be created with the 0600 access permissions for the current user, if needed the appropriate directories will be created with the 0750 access permissions for the current user. """ file_dir = os.path.dirname(self._file_path) if file_dir and not os.path.isdir(file_dir): os.makedirs(file_dir) os.chmod(file_dir, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP) if not os.path.isfile(self._file_path): open(self._file_path, 'w').close() os.chmod(self._file_path, stat.S_IRUSR | stat.S_IWUSR) if self._file: self.close() self._file = open(self._file_path, 'r+') return self def load(self): """Reads the file and returns corresponded dictionary object. :return: The dictionary that represents the file content. """ storage = {} if os.stat(self._file_path).st_size != 0: storage = json.load(self._file) if not isinstance(storage, dict): msg = _('File %s has a malformed format.') % self._file_path raise ValueError(msg) return storage def save(self, storage): """Writes the specified dictionary to the file. :param storage: Dictionary that should be written to the file. """ if not isinstance(storage, dict): msg = _('%s is not a dict.') % repr(storage) raise TypeError(msg) self._file.seek(0) self._file.truncate() json.dump(storage, self._file) def close(self): """Close the file.""" if self._file: self._file.close() self._file = None class PasswordFileStorage(object): """Password storage implementation. It stores passwords in a file in a clear text. The password file must be secured by setting up file permissions. """ def __init__(self, file_path): self._file_path = file_path self._file_storage = FileStorage(file_path) def set_password(self, resource, username, password): """Store the credential for the resource. :param resource: Resource name for which credential will be stored :param username: User name :param password: Password """ @cinder_utils.synchronized( 'datacore-password_storage-' + self._file_path, external=True) def _set_password(): with contextlib.closing(self._file_storage.open()) as storage: passwords = storage.load() if resource not in passwords: passwords[resource] = {} passwords[resource][username] = password storage.save(passwords) _set_password() def get_password(self, resource, username): """Returns the stored password for the resource. If the password does not exist, it will return None :param resource: Resource name for which credential was stored :param username: User name :return password: Password """ @cinder_utils.synchronized( 'datacore-password_storage-' + self._file_path, external=True) def _get_password(): with contextlib.closing(self._file_storage.open()) as storage: passwords = storage.load() if resource in passwords: return passwords[resource].get(username) return _get_password() def delete_password(self, resource, username): """Delete the stored credential for the resource. :param resource: Resource name for which credential was stored :param username: User name """ @cinder_utils.synchronized( 'datacore-password_storage-' + self._file_path, external=True) def _delete_password(): with contextlib.closing(self._file_storage.open()) as storage: passwords = storage.load() if resource in passwords and username in passwords[resource]: del passwords[resource][username] if not passwords[resource].keys(): del passwords[resource] storage.save(passwords) _delete_password() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datacore/utils.py0000664000175000017500000000505500000000000022621 0ustar00zuulzuul00000000000000# Copyright (c) 2022 DataCore Software Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" from oslo_utils import netutils def build_network_address(host, port): """Combines the specified host name or IP address with the specified port. :param host: Host name or IP address in presentation (string) format :param port: Port number :return: The host name or IP address and port combination; IPv6 addresses are enclosed in the square brackets """ if netutils.is_valid_ipv6(host): return '[%s]:%s' % (host, port) else: return '%s:%s' % (host, port) def get_first(predicate, source): """Searches for an item that matches the conditions. :param predicate: Defines the conditions of the item to search for :param source: Iterable collection of items :return: The first item that matches the conditions defined by the specified predicate, if found; otherwise StopIteration is raised """ return next(item for item in source if predicate(item)) def get_first_or_default(predicate, source, default): """Searches for an item that matches the conditions. :param predicate: Defines the conditions of the item to search for :param source: Iterable collection of items :param default: Value that is returned if the iterator is exhausted :return: The first item that matches the conditions defined by the specified predicate, if found; otherwise the default value """ try: return get_first(predicate, source) except StopIteration: return default def get_distinct_by(key, source): """Finds distinct items for the key and returns the result in a list. :param key: Function computing a key value for each item :param source: Iterable collection of items :return: The list of distinct by the key value items """ seen_keys = set() return [item for item in source if key(item) not in seen_keys and not seen_keys.add(key(item))] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3311205 cinder-27.0.0/cinder/volume/drivers/datera/0000775000175000017500000000000000000000000020560 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datera/__init__.py0000664000175000017500000000000000000000000022657 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datera/datera_api21.py0000664000175000017500000012756200000000000023403 0ustar00zuulzuul00000000000000# Copyright 2020 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import ipaddress import math import random import time import uuid import eventlet from os_brick import exception as brick_exception from oslo_log import log as logging from oslo_serialization import jsonutils as json from oslo_utils import importutils from oslo_utils import units from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder.image import image_utils import cinder.volume.drivers.datera.datera_common as datc from cinder.volume import volume_types from cinder.volume import volume_utils as volutils LOG = logging.getLogger(__name__) dexceptions = importutils.try_import('dfs_sdk.exceptions') API_VERSION = "2.1" # The DateraAPI classes (2.1, 2.2) are enhanced by datera_common's lookup() # decorator which generates members run-time. Therefore on the class we disable # pylint's no-member check pylint: disable=no-member class DateraApi(object): # ================= # = Create Volume = # ================= def _create_volume_2_1(self, volume): policies = self._get_policies_for_resource(volume) num_replicas = int(policies['replica_count']) storage_name = 'storage-1' volume_name = 'volume-1' template = policies['template'] placement = policies['placement_mode'] ip_pool = policies['ip_pool'] name = datc.get_name(volume) if template: app_params = ( { 'create_mode': 'openstack', # 'uuid': str(volume['id']), 'name': name, 'app_template': {'path': '/app_templates/{}'.format( template)} }) else: app_params = ( { 'create_mode': 'openstack', 'uuid': str(volume['id']), 'name': name, 'access_control_mode': 'deny_all', 'storage_instances': [ { 'name': storage_name, 'ip_pool': {'path': ('/access_network_ip_pools/' '{}'.format(ip_pool))}, 'volumes': [ { 'name': volume_name, 'size': volume['size'], 'placement_mode': placement, 'replica_count': num_replicas, 'snapshot_policies': [ ] } ] } ] }) tenant = self.create_tenant(volume['project_id']) self.api.app_instances.create(tenant=tenant, **app_params) self._update_qos_2_1(volume, policies) self._add_vol_meta_2_1(volume) # ================= # = Extend Volume = # ================= def _extend_volume_2_1(self, volume, new_size): if volume['size'] >= new_size: LOG.warning("Volume size not extended due to original size being " "greater or equal to new size. Originial: " "%(original)s, New: %(new)s", { 'original': volume['size'], 'new': new_size}) return policies = self._get_policies_for_resource(volume) template = policies['template'] if template: LOG.warning("Volume size not extended due to template binding." " volume: %(volume)s, template: %(template)s", {'volume': volume, 'template': template}) return with self._offline_flip_2_1(volume): # Change Volume Size tenant = self.get_tenant(volume['project_id']) dvol = self.cvol_to_dvol(volume, tenant=tenant) dvol.set(tenant=tenant, size=new_size) # ================= # = Cloned Volume = # ================= def _create_cloned_volume_2_1(self, volume, src_vref): tenant = self.get_tenant(volume['project_id']) sdvol = self.cvol_to_dvol(src_vref, tenant=tenant) src = sdvol.path data = { 'create_mode': 'openstack', 'name': datc.get_name(volume), 'uuid': str(volume['id']), 'clone_volume_src': {'path': src}, } tenant = self.get_tenant(volume['project_id']) self.api.app_instances.create(tenant=tenant, **data) if volume['size'] > src_vref['size']: self._extend_volume_2_1(volume, volume['size']) self._add_vol_meta_2_1(volume) # ================= # = Delete Volume = # ================= def _delete_volume_2_1(self, volume): try: tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) si = ai.storage_instances.list(tenant=tenant)[0] # Clear out ACL acl = si.acl_policy.get(tenant=tenant) acl.set(tenant=tenant, initiators=[]) # Bring volume offline data = { 'admin_state': 'offline', 'force': True } ai.set(tenant=tenant, **data) ai.delete(tenant=tenant, force=True) except exception.NotFound: msg = ("Tried to delete volume %s, but it was not found in the " "Datera cluster. Continuing with delete.") LOG.info(msg, datc.get_name(volume)) # ================= # = Ensure Export = # ================= def _ensure_export_2_1(self, context, volume, connector=None): pass # ========================= # = Initialize Connection = # ========================= def _initialize_connection_2_1(self, volume, connector): # Now online the app_instance (which will online all storage_instances) multipath = connector.get('multipath', False) tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) data = { 'admin_state': 'online' } ai.set(tenant=tenant, **data) si = ai.storage_instances.list(tenant=tenant)[0] # randomize portal chosen choice = 0 policies = self._get_policies_for_resource(volume) if policies["round_robin"]: choice = random.randint(0, 1) portal = si.access['ips'][choice] + ':3260' iqn = si.access['iqn'] if multipath: portals = [p + ':3260' for p in si.access['ips']] iqns = [iqn for _ in si.access['ips']] lunids = [self._get_lunid() for _ in si.access['ips']] result = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'target_iqn': iqn, 'target_iqns': iqns, 'target_portal': portal, 'target_portals': portals, 'target_lun': self._get_lunid(), 'target_luns': lunids, 'volume_id': volume['id'], 'discard': False}} else: result = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'target_iqn': iqn, 'target_portal': portal, 'target_lun': self._get_lunid(), 'volume_id': volume['id'], 'discard': False}} if self.use_chap_auth: result['data'].update( auth_method="CHAP", auth_username=self.chap_username, auth_password=self.chap_password) return result # ================= # = Create Export = # ================= def _create_export_2_1(self, context, volume, connector): tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) data = { 'admin_state': 'offline', 'force': True } ai.set(tenant=tenant, **data) si = ai.storage_instances.list(tenant=tenant)[0] policies = self._get_policies_for_resource(volume) if connector and connector.get('ip'): # Case where volume_type has non default IP Pool info if policies['ip_pool'] != 'default': initiator_ip_pool_path = self.api.access_network_ip_pools.get( policies['ip_pool']).path # Fallback to trying reasonable IP based guess else: initiator_ip_pool_path = self._get_ip_pool_for_string_ip_2_1( connector['ip'], tenant) ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}} si.set(tenant=tenant, **ip_pool_data) data = { 'admin_state': 'online' } ai.set(tenant=tenant, **data) # Check if we've already setup everything for this volume storage_instances = ai.storage_instances.list(tenant=tenant) # Handle adding initiator to product if necessary # Then add initiator to ACL if connector and connector.get('initiator'): initiator_name = "OpenStack-{}".format(str(uuid.uuid4())[:8]) initiator = connector['initiator'] dinit = None data = {'id': initiator, 'name': initiator_name} # Try and create the initiator # If we get a conflict, ignore it try: dinit = self.api.initiators.create(tenant=tenant, **data) except dexceptions.ApiConflictError: dinit = self.api.initiators.get(initiator, tenant=tenant) initiator_path = dinit['path'] # Create ACL with initiator group as reference for each # storage_instance in app_instance # TODO(_alastor_): We need to avoid changing the ACLs if the # template already specifies an ACL policy. for si in storage_instances: existing_acl = si.acl_policy.get(tenant=tenant) data = {} # Grabbing only the 'path' key from each existing initiator # within the existing acl. eacli --> existing acl initiator eacli = [] for acl in existing_acl['initiators']: nacl = {} nacl['path'] = acl['path'] eacli.append(nacl) data['initiators'] = eacli data['initiators'].append({"path": initiator_path}) # Grabbing only the 'path' key from each existing initiator # group within the existing acl. eaclig --> existing # acl initiator group eaclig = [] for acl in existing_acl['initiator_groups']: nacl = {} nacl['path'] = acl['path'] eaclig.append(nacl) data['initiator_groups'] = eaclig si.acl_policy.set(tenant=tenant, **data) if self.use_chap_auth: for si in storage_instances: data = {'type': 'chap', 'target_user_name': self.chap_username, 'target_pswd': self.chap_password} si.auth.set(tenant=tenant, **data) # Check to ensure we're ready for go-time self._si_poll_2_1(volume, si, tenant) self._add_vol_meta_2_1(volume, connector=connector) # ================= # = Detach Volume = # ================= def _detach_volume_2_1(self, context, volume, attachment=None): try: tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) # Clear out ACL for this specific attachment si = ai.storage_instances.list(tenant=tenant)[0] existing_acl = si.acl_policy.get(tenant=tenant) data = {} # Grabbing only the 'path' key from each existing initiator # within the existing acl. eacli --> existing acl initiator eacli = [] for acl in existing_acl['initiators']: if ( attachment is not None and attachment.connector is not None and acl['path'].split('/')[-1] == attachment.connector['initiator'] ): continue nacl = {} nacl['path'] = acl['path'] eacli.append(nacl) data['initiators'] = eacli data['initiator_groups'] = existing_acl['initiator_groups'] si.acl_policy.set(tenant=tenant, **data) if not eacli: # bring the application instance offline if there # are no initiators left. data = { 'admin_state': 'offline', 'force': True } ai.set(tenant=tenant, **data) except exception.NotFound: msg = ("Tried to detach volume %s, but it was not found in the " "Datera cluster. Continuing with detach.") LOG.info(msg, volume['id']) # =================== # = Create Snapshot = # =================== def _create_snapshot_2_1(self, snapshot): dummy_vol = {'id': snapshot['volume_id'], 'project_id': snapshot['project_id']} tenant = self.get_tenant(dummy_vol['project_id']) dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) snap_params = { 'uuid': snapshot['id'], } snap = dvol.snapshots.create(tenant=tenant, **snap_params) self._snap_poll_2_1(snap, tenant) # =================== # = Delete Snapshot = # =================== def _delete_snapshot_2_1(self, snapshot): # Handle case where snapshot is "managed" dummy_vol = {'id': snapshot['volume_id'], 'project_id': snapshot['project_id']} tenant = self.get_tenant(dummy_vol['project_id']) dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) snapshots = None # Shortcut if this is a managed snapshot provider_location = snapshot.get('provider_location') if provider_location: snap = dvol.snapshots.get(provider_location, tenant=tenant) snap.delete(tenant=tenant) return # Long-way. UUID identification try: snapshots = dvol.snapshots.list(tenant=tenant) except exception.NotFound: msg = ("Tried to delete snapshot %s, but parent volume %s was " "not found in Datera cluster. Continuing with delete.") LOG.info(msg, datc.get_name(snapshot), datc.get_name({'id': snapshot['volume_id']})) return try: for snap in snapshots: if snap.uuid == snapshot['id']: snap.delete(tenant=tenant) break else: raise exception.NotFound except exception.NotFound: msg = ("Tried to delete snapshot %s, but was not found in " "Datera cluster. Continuing with delete.") LOG.info(msg, datc.get_name(snapshot)) # ======================== # = Volume From Snapshot = # ======================== def _create_volume_from_snapshot_2_1(self, volume, snapshot): # Handle case where snapshot is "managed" dummy_vol = {'id': snapshot['volume_id'], 'project_id': snapshot['project_id']} tenant = self.get_tenant(dummy_vol['project_id']) dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) found_snap = None provider_location = snapshot.get('provider_location') if provider_location: found_snap = dvol.snapshots.get(provider_location, tenant=tenant) else: snapshots = dvol.snapshots.list(tenant=tenant) for snap in snapshots: if snap.uuid == snapshot['id']: found_snap = snap break else: raise exception.SnapshotNotFound(snapshot_id=snapshot['id']) self._snap_poll_2_1(found_snap, tenant) src = found_snap.path app_params = ( { 'create_mode': 'openstack', 'uuid': str(volume['id']), 'name': datc.get_name(volume), 'clone_snapshot_src': {'path': src}, }) self.api.app_instances.create(tenant=tenant, **app_params) if (volume['size'] > snapshot['volume_size']): self._extend_volume_2_1(volume, volume['size']) self._add_vol_meta_2_1(volume) # ========== # = Retype = # ========== def _retype_2_1(self, ctxt, volume, new_type, diff, host): LOG.debug("Retype called\n" "Volume: %(volume)s\n" "NewType: %(new_type)s\n" "Diff: %(diff)s\n" "Host: %(host)s\n", {'volume': volume, 'new_type': new_type, 'diff': diff, 'host': host}) # We'll take the fast route only if the types share the same backend # And that backend matches this driver old_pol = self._get_policies_for_resource(volume) new_pol = self._get_policies_for_volume_type(new_type) if (host['capabilities']['volume_backend_name'].lower() == self.backend_name.lower()): LOG.debug("Starting fast volume retype") if old_pol.get('template') or new_pol.get('template'): LOG.warning( "Fast retyping between template-backed volume-types " "unsupported. Type1: %s, Type2: %s", volume['volume_type_id'], new_type) self._update_qos_2_1(volume, new_pol, clear_old=True) tenant = self.get_tenant(volume['project_id']) dvol = self.cvol_to_dvol(volume, tenant=tenant) # Only replica_count ip_pool requires offlining the app_instance if (new_pol['replica_count'] != old_pol['replica_count'] or new_pol['ip_pool'] != old_pol['ip_pool']): with self._offline_flip_2_1(volume): vol_params = ( { 'placement_mode': new_pol['placement_mode'], 'replica_count': new_pol['replica_count'], }) dvol.set(tenant=tenant, **vol_params) elif new_pol['placement_mode'] != old_pol['placement_mode']: vol_params = ( { 'placement_mode': new_pol['placement_mode'], }) dvol.set(tenant=tenant, **vol_params) self._add_vol_meta_2_1(volume) return True else: LOG.debug("Couldn't fast-retype volume between specified types") return False # ========== # = Manage = # ========== def _manage_existing_2_1(self, volume, existing_ref): # Only volumes created under the requesting tenant can be managed in # the v2.1+ API. Eg. If tenant A is the tenant for the volume to be # managed, it must also be tenant A that makes this request. # This will be fixed in a later API update existing_ref = existing_ref['source-name'] app_inst_name, __, __, __ = datc._parse_vol_ref(existing_ref) LOG.debug("Managing existing Datera volume %s " "Changing name to %s", datc.get_name(volume), existing_ref) # Rename AppInstance dummy_vol = {'id': app_inst_name, 'project_id': volume['project_id']} tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(dummy_vol, tenant=tenant) data = {'name': datc.get_name(volume)} ai.set(tenant=tenant, **data) self._add_vol_meta_2_1(volume) # =================== # = Manage Get Size = # =================== def _manage_existing_get_size_2_1(self, volume, existing_ref): existing_ref = existing_ref['source-name'] app_inst_name, storage_inst_name, vol_name, __ = datc._parse_vol_ref( existing_ref) dummy_vol = {'id': app_inst_name, 'project_id': volume['project_id']} dvol = self.cvol_to_dvol(dummy_vol) return dvol.size # ========================= # = Get Manageable Volume = # ========================= def _list_manageable_2_1(self, cinder_volumes): # Use the first volume to determine the tenant we're working under if cinder_volumes: tenant = self.get_tenant(cinder_volumes[0]['project_id']) else: tenant = None app_instances = self.api.app_instances.list(tenant=tenant) results = [] if cinder_volumes and 'volume_id' in cinder_volumes[0]: cinder_volume_ids = [vol['volume_id'] for vol in cinder_volumes] else: cinder_volume_ids = [vol['id'] for vol in cinder_volumes] for ai in app_instances: ai_name = ai['name'] reference = None size = None safe_to_manage = False reason_not_safe = "" cinder_id = None extra_info = {} (safe_to_manage, reason_not_safe, cinder_id) = self._is_manageable_2_1( ai, cinder_volume_ids, tenant) si = ai.storage_instances.list(tenant=tenant)[0] si_name = si.name vol = si.volumes.list(tenant=tenant)[0] vol_name = vol.name size = vol.size snaps = [(snap.utc_ts, snap.uuid) for snap in vol.snapshots.list(tenant=tenant)] extra_info["snapshots"] = json.dumps(snaps) reference = {"source-name": "{}:{}:{}".format( ai_name, si_name, vol_name)} results.append({ 'reference': reference, 'size': size, 'safe_to_manage': safe_to_manage, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info}) return results def _get_manageable_volumes_2_1(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): LOG.debug("Listing manageable Datera volumes") results = self._list_manageable_2_1(cinder_volumes) page_results = volutils.paginate_entries_list( results, marker, limit, offset, sort_keys, sort_dirs) return page_results def _is_manageable_2_1(self, ai, cinder_volume_ids, tenant): cinder_id = None ai_name = ai.name match = datc.UUID4_RE.match(ai_name) if match: cinder_id = match.group(1) if cinder_id and cinder_id in cinder_volume_ids: return (False, "App Instance already managed by Cinder", cinder_id) if len(ai.storage_instances.list(tenant=tenant)) == 1: si = ai.storage_instances.list(tenant=tenant)[0] if len(si['volumes']) == 1: return (True, "", cinder_id) return (False, "App Instance has more than one storage instance or volume", cinder_id) # ============ # = Unmanage = # ============ def _unmanage_2_1(self, volume): LOG.debug("Unmanaging Cinder volume %s. Changing name to %s", volume['id'], datc.get_unmanaged(volume['id'])) data = {'name': datc.get_unmanaged(volume['id'])} tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) ai.set(tenant=tenant, **data) # =================== # = Manage Snapshot = # =================== def _manage_existing_snapshot_2_1(self, snapshot, existing_ref): existing_ref = existing_ref['source-name'] datc._check_snap_ref(existing_ref) LOG.debug("Managing existing Datera volume snapshot %s for volume %s", existing_ref, datc.get_name({'id': snapshot['volume_id']})) return {'provider_location': existing_ref} def _manage_existing_snapshot_get_size_2_1(self, snapshot, existing_ref): existing_ref = existing_ref['source-name'] datc._check_snap_ref(existing_ref) dummy_vol = {'id': snapshot['volume_id'], 'project_id': snapshot['project_id']} dvol = self.cvol_to_dvol(dummy_vol) return dvol.size def _get_manageable_snapshots_2_1(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): LOG.debug("Listing manageable Datera snapshots") results = self._list_manageable_2_1(cinder_snapshots) snap_results = [] snapids = set((snap['id'] for snap in cinder_snapshots)) snaprefs = set((snap.get('provider_location') for snap in cinder_snapshots)) for volume in results: snaps = json.loads(volume["extra_info"]["snapshots"]) for snapshot in snaps: reference = snapshot[0] uuid = snapshot[1] size = volume["size"] safe_to_manage = True reason_not_safe = "" cinder_id = "" extra_info = {} source_reference = volume["reference"] if uuid in snapids or reference in snaprefs: safe_to_manage = False reason_not_safe = _("already managed by Cinder") elif not volume['safe_to_manage'] and not volume['cinder_id']: safe_to_manage = False reason_not_safe = _("parent volume not safe to manage") snap_results.append({ 'reference': {'source-name': reference}, 'size': size, 'safe_to_manage': safe_to_manage, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info, 'source_reference': source_reference}) page_results = volutils.paginate_entries_list( snap_results, marker, limit, offset, sort_keys, sort_dirs) return page_results def _unmanage_snapshot_2_1(self, snapshot): return {'provider_location': None} # ==================== # = Fast Image Clone = # ==================== def _clone_image_2_1(self, context, volume, image_location, image_meta, image_service): # We're not going to fast image clone if the feature is not enabled # and/or we can't reach the image being requested if (not self.image_cache or not self._image_accessible(context, volume, image_meta)): return None, False # Check to make sure we're working with a valid volume type try: found = volume_types.get_volume_type(context, self.image_type) except (exception.VolumeTypeNotFound, exception.InvalidVolumeType): found = None if not found: msg = "Invalid volume type: %s" LOG.error(msg, self.image_type) raise ValueError(_("Option datera_image_cache_volume_type_id must" " be set to a valid volume_type id")) # Check image format fmt = image_meta.get('disk_format', '') if fmt.lower() != 'raw': LOG.debug("Image format is not RAW, image requires conversion " "before clone. Image format: [%s]", fmt) return None, False LOG.debug("Starting fast image clone") # TODO(_alastor_): determine if Datera is already an image backend # for this request and direct clone instead of caching # Dummy volume, untracked by Cinder src_vol = {'id': image_meta['id'], 'volume_type_id': self.image_type, 'size': volume['size'], 'project_id': volume['project_id']} # Determine if we have a cached version of the image cached = self._vol_exists_2_1(src_vol) if cached: tenant = self.get_tenant(src_vol['project_id']) ai = self.cvol_to_ai(src_vol, tenant=tenant) metadata = ai.metadata.get(tenant=tenant) # Check to see if the master image has changed since we created # The cached version ts = self._get_vol_timestamp_2_1(src_vol) mts = time.mktime(image_meta['updated_at'].timetuple()) LOG.debug("Original image timestamp: %s, cache timestamp %s", mts, ts) # If the image is created by Glance, we'll trust that even if the # timestamps don't match up, the data is ok to clone as it's not # managed by this driver if metadata.get('type') == 'image': LOG.debug("Found Glance volume-backed image for %s", src_vol['id']) # If the master image time is greater than the volume creation # time, we invalidate the cache and delete the volume. The # exception is if the cached volume was created by Glance. We # NEVER want to delete this volume. It's annotated with # 'type': 'image' in the metadata, so we'll check for that elif mts > ts and metadata.get('type') != 'image': LOG.debug("Cache is older than original image, deleting cache") cached = False self._delete_volume_2_1(src_vol) # If we don't have the image, we'll cache it if not cached: LOG.debug("No image cache found for: %s, caching image", image_meta['id']) self._cache_vol_2_1(context, src_vol, image_meta, image_service) # Now perform the clone of the found image or newly cached image self._create_cloned_volume_2_1(volume, src_vol) # Force volume resize vol_size = volume['size'] volume['size'] = 0 self._extend_volume_2_1(volume, vol_size) volume['size'] = vol_size # Determine if we need to retype the newly created volume vtype_id = volume.get('volume_type_id') if vtype_id and self.image_type and vtype_id != self.image_type: vtype = volume_types.get_volume_type(context, vtype_id) LOG.debug("Retyping newly cloned volume from type: %s to type: %s", self.image_type, vtype_id) diff, discard = volume_types.volume_types_diff( context, self.image_type, vtype_id) host = {'capabilities': {'vendor_name': self.backend_name}} self._retype_2_1(context, volume, vtype, diff, host) return None, True def _cache_vol_2_1(self, context, vol, image_meta, image_service): image_id = image_meta['id'] # Pull down image and determine if valid with image_utils.TemporaryImages.fetch(image_service, context, image_id) as tmp_image: data = image_utils.qemu_img_info(tmp_image) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file, }) vsize = int( math.ceil(float(data.virtual_size) / units.Gi)) vol['size'] = vsize vtype = vol['volume_type_id'] LOG.info("Creating cached image with volume type: %(vtype)s and " "size %(size)s", {'vtype': vtype, 'size': vsize}) self._create_volume_2_1(vol) with self._connect_vol(context, vol) as device: LOG.debug("Moving image %s to volume %s", image_meta['id'], datc.get_name(vol)) image_utils.convert_image(tmp_image, device, 'raw', run_as_root=True) LOG.debug("Finished moving image %s to volume %s", image_meta['id'], datc.get_name(vol)) data = image_utils.qemu_img_info(device, run_as_root=True) if data.file_format != 'raw': raise exception.ImageUnacceptable( image_id=image_id, reason=_( "Converted to %(vol_format)s, but format is " "now %(file_format)s") % { 'vol_format': 'raw', 'file_format': data.file_format}) # TODO(_alastor_): Remove this snapshot creation when we fix # "created_at" attribute in the frontend # We don't actually care about the snapshot uuid, we just want # a single snapshot snapshot = {'id': str(uuid.uuid4()), 'volume_id': vol['id'], 'project_id': vol['project_id']} self._create_snapshot_2_1(snapshot) metadata = {'type': 'cached_image'} tenant = self.get_tenant(vol['project_id']) ai = self.cvol_to_ai(vol, tenant=tenant) ai.metadata.set(tenant=tenant, **metadata) # Cloning offline AI is ~4 seconds faster than cloning online AI self._detach_volume_2_1(None, vol) def _get_vol_timestamp_2_1(self, volume): tenant = self.get_tenant(volume['project_id']) dvol = self.cvol_to_dvol(volume, tenant=tenant) snapshots = dvol.snapshots.list(tenant=tenant) if len(snapshots) == 1: return float(snapshots[0].utc_ts) else: # We'll return 0 if we find no snapshots (or the incorrect number) # to ensure the timestamp comparison with the master copy fails # since the master copy will always have a timestamp > 0. LOG.debug("Number of snapshots found: %s", len(snapshots)) return 0 def _vol_exists_2_1(self, volume): LOG.debug("Checking if volume %s exists", volume['id']) try: ai = self.cvol_to_ai(volume) LOG.debug("Volume %s exists", volume['id']) return ai except exception.NotFound: LOG.debug("Volume %s not found", volume['id']) return None @contextlib.contextmanager def _connect_vol(self, context, vol): connector = None try: # Start connection, get the connector object and create the # export (ACL, IP-Pools, etc) conn = self._initialize_connection_2_1( vol, {'multipath': False}) connector = volutils.brick_get_connector( conn['driver_volume_type'], use_multipath=False, device_scan_attempts=10, conn=conn) connector_info = {'initiator': connector.get_initiator()} self._create_export_2_1(None, vol, connector_info) retries = 10 attach_info = conn['data'] while True: try: attach_info.update( connector.connect_volume(conn['data'])) break except brick_exception.FailedISCSITargetPortalLogin: retries -= 1 if not retries: LOG.error("Could not log into portal before end of " "polling period") raise LOG.debug("Failed to login to portal, retrying") eventlet.sleep(2) device_path = attach_info['path'] yield device_path finally: # Close target connection if connector: # Best effort disconnection try: connector.disconnect_volume(attach_info, attach_info) except Exception: pass # =========== # = Polling = # =========== def _snap_poll_2_1(self, snap, tenant): eventlet.sleep(datc.DEFAULT_SNAP_SLEEP) TIMEOUT = 20 retry = 0 poll = True while poll and not retry >= TIMEOUT: retry += 1 snap = snap.reload(tenant=tenant) if snap.op_state == 'available': poll = False else: eventlet.sleep(1) if retry >= TIMEOUT: raise exception.VolumeDriverException( message=_('Snapshot not ready.')) def _si_poll_2_1(self, volume, si, tenant): # Initial 4 second sleep required for some Datera versions eventlet.sleep(datc.DEFAULT_SI_SLEEP) TIMEOUT = 10 retry = 0 poll = True while poll and not retry >= TIMEOUT: retry += 1 si = si.reload(tenant=tenant) if si.op_state == 'available': poll = False else: eventlet.sleep(1) if retry >= TIMEOUT: raise exception.VolumeDriverException( message=_('Resource not ready.')) # ================ # = Volume Stats = # ================ def _get_volume_stats_2_1(self, refresh=False): # cluster_stats is defined by datera_iscsi # pylint: disable=access-member-before-definition if refresh or not self.cluster_stats: try: LOG.debug("Updating cluster stats info.") results = self.api.system.get() if 'uuid' not in results: LOG.error( 'Failed to get updated stats from Datera Cluster.') stats = { 'volume_backend_name': self.backend_name, 'vendor_name': 'Datera', 'driver_version': self.VERSION, 'storage_protocol': constants.ISCSI, 'total_capacity_gb': ( int(results.total_capacity) / units.Gi), 'free_capacity_gb': ( int(results.available_capacity) / units.Gi), 'reserved_percentage': 0, 'QoS_support': True, } self.cluster_stats = stats except exception.DateraAPIException: LOG.error('Failed to get updated stats from Datera cluster.') return self.cluster_stats # ======= # = QoS = # ======= def _update_qos_2_1(self, volume, policies, clear_old=False): tenant = self.get_tenant(volume['project_id']) dvol = self.cvol_to_dvol(volume, tenant=tenant) type_id = volume.get('volume_type_id', None) if type_id is not None: iops_per_gb = int(policies.get('iops_per_gb', 0)) bandwidth_per_gb = int(policies.get('bandwidth_per_gb', 0)) # Filter for just QOS policies in result. All of their keys # should end with "max" fpolicies = {k: int(v) for k, v in policies.items() if k.endswith("max")} # Filter all 0 values from being passed fpolicies = {k: int(v) for k, v in fpolicies.items() if v > 0} # Calculate and set iops/gb and bw/gb, but only if they don't # exceed total_iops_max and total_bw_max aren't set since they take # priority if iops_per_gb: ipg = iops_per_gb * volume['size'] # Not using zero, because zero means unlimited im = fpolicies.get('total_iops_max', 1) r = ipg if ipg > im: r = im fpolicies['total_iops_max'] = r if bandwidth_per_gb: bpg = bandwidth_per_gb * volume['size'] # Not using zero, because zero means unlimited bm = fpolicies.get('total_bandwidth_max', 1) r = bpg if bpg > bm: r = bm fpolicies['total_bandwidth_max'] = r if fpolicies or clear_old: try: pp = dvol.performance_policy.get(tenant=tenant) pp.delete(tenant=tenant) except dexceptions.ApiNotFoundError: LOG.debug("No existing performance policy found") if fpolicies: dvol.performance_policy.create(tenant=tenant, **fpolicies) # ============ # = IP Pools = # ============ def _get_ip_pool_for_string_ip_2_1(self, ip, tenant): """Takes a string ipaddress and return the ip_pool API object dict """ pool = 'default' ip_obj = ipaddress.ip_address(str(ip)) ip_pools = self.api.access_network_ip_pools.list(tenant=tenant) for ipdata in ip_pools: for adata in ipdata['network_paths']: if not adata.get('start_ip'): continue pool_if = ipaddress.ip_interface( "/".join((adata['start_ip'], str(adata['netmask'])))) if ip_obj in pool_if.network: pool = ipdata.name return self.api.access_network_ip_pools.get(pool, tenant=tenant).path # ==================== # = Volume Migration = # ==================== def _update_migrated_volume_2_1(self, context, volume, new_volume, volume_status): """Rename the newly created volume to the original volume. So we can find it correctly. """ tenant = self.get_tenant(new_volume['project_id']) ai = self.cvol_to_ai(new_volume, tenant=tenant) data = {'name': datc.get_name(volume)} ai.set(tenant=tenant, **data) return {'_name_id': None} @contextlib.contextmanager def _offline_flip_2_1(self, volume): reonline = False tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) if ai.admin_state == 'online': reonline = True ai.set(tenant=tenant, admin_state='offline') yield if reonline: ai.set(tenant=tenant, admin_state='online') def _add_vol_meta_2_1(self, volume, connector=None): if not self.do_metadata: return metadata = {'host': volume.get('host', ''), 'display_name': datc.filter_chars( volume.get('display_name', '')), 'bootable': str(volume.get('bootable', False)), 'availability_zone': volume.get('availability_zone', '')} if connector: metadata.update(connector) LOG.debug("Adding volume metadata: %s", metadata) tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) ai.metadata.set(tenant=tenant, **metadata) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datera/datera_api22.py0000664000175000017500000013767500000000000023412 0ustar00zuulzuul00000000000000# Copyright 2020 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import ipaddress import math import random import time import uuid import eventlet from os_brick import exception as brick_exception from oslo_log import log as logging from oslo_serialization import jsonutils as json from oslo_utils import importutils from oslo_utils import units from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder.image import image_utils import cinder.volume.drivers.datera.datera_common as datc from cinder.volume import volume_types from cinder.volume import volume_utils as volutils LOG = logging.getLogger(__name__) dexceptions = importutils.try_import('dfs_sdk.exceptions') API_VERSION = "2.2" # The DateraAPI classes (2.1, 2.2) are enhanced by datera_common's lookup() # decorator which generates members run-time. Therefore on the class we disable # pylint's no-member check pylint: disable=no-member class DateraApi(object): # ================= # = Create Volume = # ================= def _create_volume_2_2(self, volume): policies = self._get_policies_for_resource(volume) num_replicas = int(policies['replica_count']) storage_name = 'storage-1' volume_name = 'volume-1' template = policies['template'] placement = policies['placement_mode'] ppolicy = policies['placement_policy'] ip_pool = datc.get_ip_pool(policies) name = datc.get_name(volume) if template: app_params = ( { 'create_mode': 'openstack', # 'uuid': str(volume['id']), 'name': name, 'app_template': {'path': '/app_templates/{}'.format( template)} }) if self._support_template_override_2_2(): app_params['template_override'] = { 'storage_instances': { storage_name: { 'volumes': { volume_name: { 'size': str(volume['size'])}}}}} else: app_params = ( { 'create_mode': 'openstack', 'uuid': str(volume['id']), 'name': name, 'access_control_mode': 'deny_all', 'storage_instances': [ { 'name': storage_name, 'ip_pool': {'path': ('/access_network_ip_pools/' '{}'.format(ip_pool))}, 'volumes': [ { 'name': volume_name, 'size': volume['size'], 'replica_count': num_replicas, 'snapshot_policies': [ ] } ] } ] }) create_vol = app_params['storage_instances'][0]['volumes'][0] if datc.dat_version_gte(self.datera_version, '3.3.0.0'): create_vol['placement_policy'] = { 'path': '/placement_policies/{}'.format(ppolicy)} else: create_vol['placement_mode'] = placement tenant = self.create_tenant(volume['project_id']) self.api.app_instances.create(tenant=tenant, **app_params) self._update_qos_2_2(volume, policies) self._add_vol_meta_2_2(volume) # ================= # = Extend Volume = # ================= def _extend_volume_2_2(self, volume, new_size): if volume['size'] >= new_size: LOG.warning("Volume size not extended due to original size being " "greater or equal to new size. Original: " "%(original)s, New: %(new)s", {'original': volume['size'], 'new': new_size}) return policies = self._get_policies_for_resource(volume) template = policies['template'] if template and not self._support_template_override_2_2(): LOG.warning("Volume size not extended due to template binding. " "Template override is supported in product versions " "3.3.X+: volume: %(volume)s, template: %(template)s", {'volume': volume, 'template': template}) return with self._offline_flip_2_2(volume): # Change Volume Size tenant = self.get_tenant(volume['project_id']) dvol = self.cvol_to_dvol(volume, tenant) dvol.set(tenant=tenant, size=new_size) # ================= # = Cloned Volume = # ================= def _create_cloned_volume_2_2(self, volume, src_vref): tenant = self.get_tenant(volume['project_id']) sdvol = self.cvol_to_dvol(src_vref, tenant=tenant) src = sdvol.path data = { 'create_mode': 'openstack', 'name': datc.get_name(volume), 'uuid': str(volume['id']), 'clone_volume_src': {'path': src}, } tenant = self.get_tenant(volume['project_id']) self.api.app_instances.create(tenant=tenant, **data) if volume['size'] > src_vref['size']: self._extend_volume_2_2(volume, volume['size']) self._add_vol_meta_2_2(volume) # ================= # = Delete Volume = # ================= def _delete_volume_2_2(self, volume): try: tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) si = ai.storage_instances.list(tenant=tenant)[0] # Clear out ACL acl = si.acl_policy.get(tenant=tenant) acl.set(tenant=tenant, initiators=[]) # Bring volume offline data = { 'admin_state': 'offline', 'force': True } ai.set(tenant=tenant, **data) ai.delete(tenant=tenant, force=True) except exception.NotFound: msg = ("Tried to delete volume %s, but it was not found in the " "Datera cluster. Continuing with delete.") LOG.info(msg, datc.get_name(volume)) # ================= # = Ensure Export = # ================= def _ensure_export_2_2(self, context, volume, connector=None): pass # ========================= # = Initialize Connection = # ========================= def _initialize_connection_2_2(self, volume, connector): # Now online the app_instance (which will online all storage_instances) multipath = connector.get('multipath', False) tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) data = { 'admin_state': 'online' } ai.set(tenant=tenant, **data) si = ai.storage_instances.list(tenant=tenant)[0] # randomize portal chosen choice = 0 policies = self._get_policies_for_resource(volume) if policies["round_robin"]: choice = random.randint(0, 1) portal = si.access['ips'][choice] + ':3260' iqn = si.access['iqn'] if multipath: portals = [p + ':3260' for p in si.access['ips']] iqns = [iqn for _ in si.access['ips']] lunids = [self._get_lunid() for _ in si.access['ips']] result = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'target_iqn': iqn, 'target_iqns': iqns, 'target_portal': portal, 'target_portals': portals, 'target_lun': self._get_lunid(), 'target_luns': lunids, 'volume_id': volume['id'], 'discard': False}} else: result = { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': False, 'target_iqn': iqn, 'target_portal': portal, 'target_lun': self._get_lunid(), 'volume_id': volume['id'], 'discard': False}} if self.use_chap_auth: result['data'].update( auth_method="CHAP", auth_username=self.chap_username, auth_password=self.chap_password) return result # ================= # = Create Export = # ================= def _create_export_2_2(self, context, volume, connector): tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) data = { 'admin_state': 'offline', 'force': True } ai.set(tenant=tenant, **data) si = ai.storage_instances.list(tenant=tenant)[0] policies = self._get_policies_for_resource(volume) if connector and connector.get('ip'): # Case where volume_type has non default IP Pool info ip_pool = datc.get_ip_pool(policies) if ip_pool != 'default': initiator_ip_pool_path = self.api.access_network_ip_pools.get( ip_pool).path # Fallback to trying reasonable IP based guess else: initiator_ip_pool_path = self._get_ip_pool_for_string_ip_2_2( connector['ip'], tenant) ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}} if not ai.app_template["path"]: si.set(tenant=tenant, **ip_pool_data) data = { 'admin_state': 'online' } ai.set(tenant=tenant, **data) # Check if we've already setup everything for this volume storage_instances = ai.storage_instances.list(tenant=tenant) # Handle adding initiator to product if necessary # Then add initiator to ACL if connector and connector.get('initiator'): initiator_name = "OpenStack-{}".format(str(uuid.uuid4())[:8]) initiator = connector['initiator'] dinit = None try: # We want to make sure the initiator is created under the # current tenant rather than using the /root one dinit = self.api.initiators.get(initiator, tenant=tenant) if dinit.tenant != tenant: raise dexceptions.ApiNotFoundError( "Initiator {} was not found under tenant {} " "[{} != {}]".format( initiator, tenant, dinit.tenant, tenant)) except dexceptions.ApiNotFoundError: # TODO(_alastor_): Take out the 'force' flag when we fix # DAT-15931 data = {'id': initiator, 'name': initiator_name, 'force': True} # Try and create the initiator # If we get a conflict, ignore it try: dinit = self.api.initiators.create(tenant=tenant, **data) except dexceptions.ApiConflictError: pass initiator_path = dinit['path'] # Create ACL with initiator group as reference for each # storage_instance in app_instance # TODO(_alastor_): We need to avoid changing the ACLs if the # template already specifies an ACL policy. for si in storage_instances: existing_acl = si.acl_policy.get(tenant=tenant) data = {} # Grabbing only the 'path' key from each existing initiator # within the existing acl. eacli --> existing acl initiator eacli = [] for acl in existing_acl['initiators']: nacl = {} nacl['path'] = acl['path'] eacli.append(nacl) data['initiators'] = eacli data['initiators'].append({"path": initiator_path}) # Grabbing only the 'path' key from each existing initiator # group within the existing acl. eaclig --> existing # acl initiator group eaclig = [] for acl in existing_acl['initiator_groups']: nacl = {} nacl['path'] = acl['path'] eaclig.append(nacl) data['initiator_groups'] = eaclig si.acl_policy.set(tenant=tenant, **data) if self.use_chap_auth: for si in storage_instances: data = {'type': 'chap', 'target_user_name': self.chap_username, 'target_pswd': self.chap_password} si.auth.set(tenant=tenant, **data) # Check to ensure we're ready for go-time self._si_poll_2_2(volume, si, tenant) self._add_vol_meta_2_2(volume, connector=connector) # ================= # = Detach Volume = # ================= def _detach_volume_2_2(self, context, volume, attachment=None): try: tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) # Clear out ACL for this specific attachment si = ai.storage_instances.list(tenant=tenant)[0] existing_acl = si.acl_policy.get(tenant=tenant) data = {} # Grabbing only the 'path' key from each existing initiator # within the existing acl. eacli --> existing acl initiator eacli = [] for acl in existing_acl['initiators']: if ( attachment is not None and attachment.connector is not None and acl['path'].split('/')[-1] == attachment.connector['initiator'] ): continue nacl = {} nacl['path'] = acl['path'] eacli.append(nacl) data['initiators'] = eacli data['initiator_groups'] = existing_acl['initiator_groups'] si.acl_policy.set(tenant=tenant, **data) if not eacli: # bring the application instance offline if there # are no initiators left. data = { 'admin_state': 'offline', 'force': True } ai.set(tenant=tenant, **data) except exception.NotFound: msg = ("Tried to detach volume %s, but it was not found in the " "Datera cluster. Continuing with detach.") LOG.info(msg, volume['id']) # =================== # = Create Snapshot = # =================== def _create_snapshot_2_2(self, snapshot): dummy_vol = {'id': snapshot['volume_id'], 'project_id': snapshot['project_id']} tenant = self.get_tenant(dummy_vol['project_id']) dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) snap_params = { 'uuid': snapshot['id'], } snap = dvol.snapshots.create(tenant=tenant, **snap_params) self._snap_poll_2_2(snap, tenant) # =================== # = Delete Snapshot = # =================== def _delete_snapshot_2_2(self, snapshot): # Handle case where snapshot is "managed" dummy_vol = {'id': snapshot['volume_id'], 'project_id': snapshot['project_id']} tenant = self.get_tenant(dummy_vol['project_id']) dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) snapshots = None # Shortcut if this is a managed snapshot provider_location = snapshot.get('provider_location') if provider_location: snap = dvol.snapshots.get(provider_location, tenant=tenant) snap.delete(tenant=tenant) return # Long-way. UUID identification try: snapshots = dvol.snapshots.list(tenant=tenant) except exception.NotFound: msg = ("Tried to delete snapshot %s, but parent volume %s was " "not found in Datera cluster. Continuing with delete.") LOG.info(msg, datc.get_name(snapshot), datc.get_name({'id': snapshot['volume_id']})) return try: for snap in snapshots: if snap.uuid == snapshot['id']: snap.delete(tenant=tenant) break else: raise exception.NotFound except exception.NotFound: msg = ("Tried to delete snapshot %s, but was not found in " "Datera cluster. Continuing with delete.") LOG.info(msg, datc.get_name(snapshot)) # ======================== # = Volume From Snapshot = # ======================== def _create_volume_from_snapshot_2_2(self, volume, snapshot): # Handle case where snapshot is "managed" dummy_vol = {'id': snapshot['volume_id'], 'project_id': snapshot['project_id']} tenant = self.get_tenant(dummy_vol['project_id']) dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant) found_snap = None provider_location = snapshot.get('provider_location') if provider_location: found_snap = dvol.snapshots.get(provider_location, tenant=tenant) else: snapshots = dvol.snapshots.list(tenant=tenant) for snap in snapshots: if snap.uuid == snapshot['id']: found_snap = snap break else: raise exception.SnapshotNotFound(snapshot_id=snapshot['id']) self._snap_poll_2_2(found_snap, tenant) src = found_snap.path app_params = ( { 'create_mode': 'openstack', 'uuid': str(volume['id']), 'name': datc.get_name(volume), 'clone_snapshot_src': {'path': src}, }) self.api.app_instances.create(tenant=tenant, **app_params) if (volume['size'] > snapshot['volume_size']): self._extend_volume_2_2(volume, volume['size']) self._add_vol_meta_2_2(volume) # ========== # = Retype = # ========== def _retype_2_2(self, ctxt, volume, new_type, diff, host): LOG.debug("Retype called\n" "Volume: %(volume)s\n" "NewType: %(new_type)s\n" "Diff: %(diff)s\n" "Host: %(host)s\n", {'volume': volume, 'new_type': new_type, 'diff': diff, 'host': host}) # We'll take the fast route only if the types share the same backend # And that backend matches this driver old_pol = self._get_policies_for_resource(volume) new_pol = self._get_policies_for_volume_type(new_type) if (host['capabilities']['volume_backend_name'].lower() == self.backend_name.lower()): LOG.debug("Starting fast volume retype") if old_pol.get('template') or new_pol.get('template'): LOG.warning( "Fast retyping between template-backed volume-types " "unsupported. Type1: %s, Type2: %s", volume['volume_type_id'], new_type) self._update_qos_2_2(volume, new_pol, clear_old=True) tenant = self.get_tenant(volume['project_id']) dvol = self.cvol_to_dvol(volume, tenant=tenant) # Only replica_count ip_pool requires offlining the app_instance if (new_pol['replica_count'] != old_pol['replica_count'] or new_pol['ip_pool'] != old_pol['ip_pool']): with self._offline_flip_2_2(volume): # ip_pool is Storage Instance level ai = self.cvol_to_ai(volume, tenant=tenant) si = ai.storage_instances.list(tenant=tenant)[0] ip_pool = datc.get_ip_pool(new_pol) si_params = ( { 'ip_pool': {'path': ('/access_network_ip_pools/' '{}'.format(ip_pool))}, }) si.set(tenant=tenant, **si_params) # placement_mode and replica_count are Volume level vol_params = ( { 'placement_mode': new_pol['placement_mode'], 'replica_count': new_pol['replica_count'], }) if datc.dat_version_gte(self.datera_version, '3.3.0.0'): ppolicy = {'path': '/placement_policies/{}'.format( new_pol.get('placement_policy'))} vol_params['placement_policy'] = ppolicy dvol.set(tenant=tenant, **vol_params) elif (new_pol['placement_mode'] != old_pol[ 'placement_mode'] or new_pol[ 'placement_policy'] != old_pol['placement_policy']): vol_params = ( { 'placement_mode': new_pol['placement_mode'], }) if datc.dat_version_gte(self.datera_version, '3.3.0.0'): ppolicy = {'path': '/placement_policies/{}'.format( new_pol.get('placement_policy'))} vol_params['placement_policy'] = ppolicy dvol.set(tenant=tenant, **vol_params) self._add_vol_meta_2_2(volume) return True else: LOG.debug("Couldn't fast-retype volume between specified types") return False # ========== # = Manage = # ========== def _manage_existing_2_2(self, volume, existing_ref): # Only volumes created under the requesting tenant can be managed in # the v2.1+ API. Eg. If tenant A is the tenant for the volume to be # managed, it must also be tenant A that makes this request. # This will be fixed in a later API update existing_ref = existing_ref['source-name'] app_inst_name, __, __, __ = datc._parse_vol_ref(existing_ref) LOG.debug("Managing existing Datera volume %s " "Changing name to %s", datc.get_name(volume), existing_ref) # Rename AppInstance dummy_vol = {'id': app_inst_name, 'project_id': volume['project_id']} tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(dummy_vol, tenant=tenant) data = {'name': datc.get_name(volume)} ai.set(tenant=tenant, **data) self._add_vol_meta_2_2(volume) # =================== # = Manage Get Size = # =================== def _manage_existing_get_size_2_2(self, volume, existing_ref): existing_ref = existing_ref['source-name'] app_inst_name, storage_inst_name, vol_name, __ = datc._parse_vol_ref( existing_ref) dummy_vol = {'id': app_inst_name, 'project_id': volume['project_id']} dvol = self.cvol_to_dvol(dummy_vol) return dvol.size # ========================= # = Get Manageable Volume = # ========================= def _list_manageable_2_2(self, cinder_volumes): # Use the first volume to determine the tenant we're working under if cinder_volumes: tenant = self.get_tenant(cinder_volumes[0]['project_id']) else: tenant = None app_instances = self.api.app_instances.list(tenant=tenant) results = [] if cinder_volumes and 'volume_id' in cinder_volumes[0]: cinder_volume_ids = [vol['volume_id'] for vol in cinder_volumes] else: cinder_volume_ids = [vol['id'] for vol in cinder_volumes] for ai in app_instances: ai_name = ai['name'] reference = None size = None safe_to_manage = False reason_not_safe = "" cinder_id = None extra_info = {} (safe_to_manage, reason_not_safe, cinder_id) = self._is_manageable_2_2( ai, cinder_volume_ids, tenant) si = ai.storage_instances.list(tenant=tenant)[0] si_name = si.name vol = si.volumes.list(tenant=tenant)[0] vol_name = vol.name size = vol.size snaps = [(snap.utc_ts, snap.uuid) for snap in vol.snapshots.list(tenant=tenant)] extra_info["snapshots"] = json.dumps(snaps) reference = {"source-name": "{}:{}:{}".format( ai_name, si_name, vol_name)} results.append({ 'reference': reference, 'size': size, 'safe_to_manage': safe_to_manage, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info}) return results def _get_manageable_volumes_2_2(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): LOG.debug("Listing manageable Datera volumes") results = self._list_manageable_2_2(cinder_volumes) page_results = volutils.paginate_entries_list( results, marker, limit, offset, sort_keys, sort_dirs) return page_results def _is_manageable_2_2(self, ai, cinder_volume_ids, tenant): cinder_id = None ai_name = ai.name match = datc.UUID4_RE.match(ai_name) if match: cinder_id = match.group(1) if cinder_id and cinder_id in cinder_volume_ids: return (False, "App Instance already managed by Cinder", cinder_id) if len(ai.storage_instances.list(tenant=tenant)) == 1: si = ai.storage_instances.list(tenant=tenant)[0] if len(si['volumes']) == 1: return (True, "", cinder_id) return (False, "App Instance has more than one storage instance or volume", cinder_id) # ============ # = Unmanage = # ============ def _unmanage_2_2(self, volume): LOG.debug("Unmanaging Cinder volume %s. Changing name to %s", volume['id'], datc.get_unmanaged(volume['id'])) data = {'name': datc.get_unmanaged(volume['id'])} tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) ai.set(tenant=tenant, **data) # =================== # = Manage Snapshot = # =================== def _manage_existing_snapshot_2_2(self, snapshot, existing_ref): existing_ref = existing_ref['source-name'] datc._check_snap_ref(existing_ref) LOG.debug("Managing existing Datera volume snapshot %s for volume %s", existing_ref, datc.get_name({'id': snapshot['volume_id']})) return {'provider_location': existing_ref} def _manage_existing_snapshot_get_size_2_2(self, snapshot, existing_ref): existing_ref = existing_ref['source-name'] datc._check_snap_ref(existing_ref) dummy_vol = {'id': snapshot['volume_id'], 'project_id': snapshot['project_id']} dvol = self.cvol_to_dvol(dummy_vol) return dvol.size def _get_manageable_snapshots_2_2(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): LOG.debug("Listing manageable Datera snapshots") results = self._list_manageable_2_2(cinder_snapshots) snap_results = [] snapids = set((snap['id'] for snap in cinder_snapshots)) snaprefs = set((snap.get('provider_location') for snap in cinder_snapshots)) for volume in results: snaps = json.loads(volume["extra_info"]["snapshots"]) for snapshot in snaps: reference = snapshot[0] uuid = snapshot[1] size = volume["size"] safe_to_manage = True reason_not_safe = "" cinder_id = "" extra_info = {} source_reference = volume["reference"] if uuid in snapids or reference in snaprefs: safe_to_manage = False reason_not_safe = _("already managed by Cinder") elif not volume['safe_to_manage'] and not volume['cinder_id']: safe_to_manage = False reason_not_safe = _("parent volume not safe to manage") snap_results.append({ 'reference': {'source-name': reference}, 'size': size, 'safe_to_manage': safe_to_manage, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info, 'source_reference': source_reference}) page_results = volutils.paginate_entries_list( snap_results, marker, limit, offset, sort_keys, sort_dirs) return page_results def _unmanage_snapshot_2_2(self, snapshot): return {'provider_location': None} # ==================== # = Fast Image Clone = # ==================== def _clone_image_2_2(self, context, volume, image_location, image_meta, image_service): # We're not going to fast image clone if the feature is not enabled # and/or we can't reach the image being requested if (not self.image_cache or not self._image_accessible(context, volume, image_meta)): return None, False # Check to make sure we're working with a valid volume type try: found = volume_types.get_volume_type(context, self.image_type) except (exception.VolumeTypeNotFound, exception.InvalidVolumeType): found = None if not found: msg = "Invalid volume type: %s" LOG.error(msg, self.image_type) raise ValueError(_("Option datera_image_cache_volume_type_id must" " be set to a valid volume_type id")) # Check image format fmt = image_meta.get('disk_format', '') if fmt.lower() != 'raw': LOG.debug("Image format is not RAW, image requires conversion " "before clone. Image format: [%s]", fmt) return None, False LOG.debug("Starting fast image clone") # TODO(_alastor_): determine if Datera is already an image backend # for this request and direct clone instead of caching # Dummy volume, untracked by Cinder src_vol = {'id': image_meta['id'], 'volume_type_id': self.image_type, 'size': volume['size'], 'project_id': volume['project_id']} # Determine if we have a cached version of the image cached = self._vol_exists_2_2(src_vol) if cached: tenant = self.get_tenant(src_vol['project_id']) ai = self.cvol_to_ai(src_vol, tenant=tenant) metadata = ai.metadata.get(tenant=tenant) # Check to see if the master image has changed since we created # The cached version ts = self._get_vol_timestamp_2_2(src_vol) mts = time.mktime(image_meta['updated_at'].timetuple()) LOG.debug("Original image timestamp: %s, cache timestamp %s", mts, ts) # If the image is created by Glance, we'll trust that even if the # timestamps don't match up, the data is ok to clone as it's not # managed by this driver if metadata.get('type') == 'image': LOG.debug("Found Glance volume-backed image for %s", src_vol['id']) # If the master image time is greater than the volume creation # time, we invalidate the cache and delete the volume. The # exception is if the cached volume was created by Glance. We # NEVER want to delete this volume. It's annotated with # 'type': 'image' in the metadata, so we'll check for that elif mts > ts and metadata.get('type') != 'image': LOG.debug("Cache is older than original image, deleting cache") cached = False self._delete_volume_2_2(src_vol) # If we don't have the image, we'll cache it if not cached: LOG.debug("No image cache found for: %s, caching image", image_meta['id']) self._cache_vol_2_2(context, src_vol, image_meta, image_service) # Now perform the clone of the found image or newly cached image self._create_cloned_volume_2_2(volume, src_vol) # Force volume resize vol_size = volume['size'] volume['size'] = 0 self._extend_volume_2_2(volume, vol_size) volume['size'] = vol_size # Determine if we need to retype the newly created volume vtype_id = volume.get('volume_type_id') if vtype_id and self.image_type and vtype_id != self.image_type: vtype = volume_types.get_volume_type(context, vtype_id) LOG.debug("Retyping newly cloned volume from type: %s to type: %s", self.image_type, vtype_id) diff, discard = volume_types.volume_types_diff( context, self.image_type, vtype_id) host = {'capabilities': {'vendor_name': self.backend_name}} self._retype_2_2(context, volume, vtype, diff, host) return None, True def _cache_vol_2_2(self, context, vol, image_meta, image_service): image_id = image_meta['id'] # Pull down image and determine if valid with image_utils.TemporaryImages.fetch(image_service, context, image_id) as tmp_image: data = image_utils.qemu_img_info(tmp_image) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_id) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable( image_id=image_id, reason=_("fmt=%(fmt)s backed by:%(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file, }) vsize = int( math.ceil(float(data.virtual_size) / units.Gi)) vol['size'] = vsize vtype = vol['volume_type_id'] LOG.info("Creating cached image with volume type: %(vtype)s and " "size %(size)s", {'vtype': vtype, 'size': vsize}) self._create_volume_2_2(vol) with self._connect_vol(context, vol) as device: LOG.debug("Moving image %s to volume %s", image_meta['id'], datc.get_name(vol)) image_utils.convert_image(tmp_image, device, 'raw', run_as_root=True) LOG.debug("Finished moving image %s to volume %s", image_meta['id'], datc.get_name(vol)) data = image_utils.qemu_img_info(device, run_as_root=True) if data.file_format != 'raw': raise exception.ImageUnacceptable( image_id=image_id, reason=_( "Converted to %(vol_format)s, but format is " "now %(file_format)s") % { 'vol_format': 'raw', 'file_format': data.file_format}) # TODO(_alastor_): Remove this snapshot creation when we fix # "created_at" attribute in the frontend # We don't actually care about the snapshot uuid, we just want # a single snapshot snapshot = {'id': str(uuid.uuid4()), 'volume_id': vol['id'], 'project_id': vol['project_id']} self._create_snapshot_2_2(snapshot) metadata = {'type': 'cached_image'} tenant = self.get_tenant(vol['project_id']) ai = self.cvol_to_ai(vol, tenant=tenant) ai.metadata.set(tenant=tenant, **metadata) # Cloning offline AI is ~4 seconds faster than cloning online AI self._detach_volume_2_2(None, vol) def _get_vol_timestamp_2_2(self, volume): tenant = self.get_tenant(volume['project_id']) dvol = self.cvol_to_dvol(volume, tenant=tenant) snapshots = dvol.snapshots.list(tenant=tenant) if len(snapshots) == 1: return float(snapshots[0].utc_ts) else: # We'll return 0 if we find no snapshots (or the incorrect number) # to ensure the timestamp comparison with the master copy fails # since the master copy will always have a timestamp > 0. LOG.debug("Number of snapshots found: %s", len(snapshots)) return 0 def _vol_exists_2_2(self, volume): LOG.debug("Checking if volume %s exists", volume['id']) try: ai = self.cvol_to_ai(volume) LOG.debug("Volume %s exists", volume['id']) return ai except exception.NotFound: LOG.debug("Volume %s not found", volume['id']) return None @contextlib.contextmanager def _connect_vol(self, context, vol): connector = None try: # Start connection, get the connector object and create the # export (ACL, IP-Pools, etc) conn = self._initialize_connection_2_2( vol, {'multipath': False}) connector = volutils.brick_get_connector( conn['driver_volume_type'], use_multipath=False, device_scan_attempts=10, conn=conn) connector_info = {'initiator': connector.get_initiator()} self._create_export_2_2(None, vol, connector_info) retries = 10 attach_info = conn['data'] while True: try: attach_info.update( connector.connect_volume(conn['data'])) break except brick_exception.FailedISCSITargetPortalLogin: retries -= 1 if not retries: LOG.error("Could not log into portal before end of " "polling period") raise LOG.debug("Failed to login to portal, retrying") eventlet.sleep(2) device_path = attach_info['path'] yield device_path finally: # Close target connection if connector: # Best effort disconnection try: connector.disconnect_volume(attach_info, attach_info) except Exception: pass # =========== # = Polling = # =========== def _snap_poll_2_2(self, snap, tenant): eventlet.sleep(datc.DEFAULT_SNAP_SLEEP) TIMEOUT = 20 retry = 0 poll = True while poll and not retry >= TIMEOUT: retry += 1 snap = snap.reload(tenant=tenant) if snap.op_state == 'available': poll = False else: eventlet.sleep(1) if retry >= TIMEOUT: raise exception.VolumeDriverException( message=_('Snapshot not ready.')) def _si_poll_2_2(self, volume, si, tenant): # Initial 4 second sleep required for some Datera versions eventlet.sleep(datc.DEFAULT_SI_SLEEP) TIMEOUT = 10 retry = 0 poll = True while poll and not retry >= TIMEOUT: retry += 1 si = si.reload(tenant=tenant) if si.op_state == 'available': poll = False else: eventlet.sleep(1) if retry >= TIMEOUT: raise exception.VolumeDriverException( message=_('Resource not ready.')) # ================ # = Volume Stats = # ================ def _get_volume_stats_2_2(self, refresh=False): # cluster_stats is defined by datera_iscsi # pylint: disable=access-member-before-definition if refresh or not self.cluster_stats: try: LOG.debug("Updating cluster stats info.") results = self.api.system.get() self.datera_version = results.sw_version if 'uuid' not in results: LOG.error( 'Failed to get updated stats from Datera Cluster.') stats = { 'volume_backend_name': self.backend_name, 'vendor_name': 'Datera', 'driver_version': self.VERSION, 'storage_protocol': constants.ISCSI, 'total_capacity_gb': ( int(results.total_capacity) / units.Gi), 'free_capacity_gb': ( int(results.available_capacity) / units.Gi), 'total_flash_capacity_gb': ( int(results.all_flash_total_capacity) / units.Gi), 'total_hybrid_capacity_gb': ( int(results.hybrid_total_capacity) / units.Gi), 'free_flash_capacity_gb': ( int(results.all_flash_available_capacity) / units.Gi), 'free_hybrid_capacity_gb': ( int(results.hybrid_available_capacity) / units.Gi), 'reserved_percentage': 0, 'QoS_support': True, 'compression': results.get('compression_enabled', False), 'compression_ratio': results.get('compression_ratio', '0'), 'l3_enabled': results.get('l3_enabled', False), 'filter_function': self.filterf, 'goodness_function': self.goodnessf } self.cluster_stats = stats except exception.DateraAPIException: LOG.error('Failed to get updated stats from Datera cluster.') return self.cluster_stats # ======= # = QoS = # ======= def _update_qos_2_2(self, volume, policies, clear_old=False): tenant = self.get_tenant(volume['project_id']) dvol = self.cvol_to_dvol(volume, tenant=tenant) type_id = volume.get('volume_type_id', None) if type_id is not None: iops_per_gb = int(policies.get('iops_per_gb', 0)) bandwidth_per_gb = int(policies.get('bandwidth_per_gb', 0)) # Filter for just QOS policies in result. All of their keys # should end with "max" fpolicies = {k: int(v) for k, v in policies.items() if k.endswith("max")} # Filter all 0 values from being passed fpolicies = {k: int(v) for k, v in fpolicies.items() if v > 0} # Calculate and set iops/gb and bw/gb, but only if they don't # exceed total_iops_max and total_bw_max aren't set since they take # priority if iops_per_gb: ipg = iops_per_gb * volume['size'] # Not using zero, because zero means unlimited im = fpolicies.get('total_iops_max', 1) r = ipg if ipg > im: r = im fpolicies['total_iops_max'] = r if bandwidth_per_gb: bpg = bandwidth_per_gb * volume['size'] # Not using zero, because zero means unlimited bm = fpolicies.get('total_bandwidth_max', 1) r = bpg if bpg > bm: r = bm fpolicies['total_bandwidth_max'] = r if fpolicies or clear_old: try: pp = dvol.performance_policy.get(tenant=tenant) pp.delete(tenant=tenant) except dexceptions.ApiNotFoundError: LOG.debug("No existing performance policy found") if fpolicies: dvol.performance_policy.create(tenant=tenant, **fpolicies) # ============ # = IP Pools = # ============ def _get_ip_pool_for_string_ip_2_2(self, ip, tenant): """Takes a string ipaddress and return the ip_pool API object dict """ pool = 'default' ip_obj = ipaddress.ip_address(str(ip)) ip_pools = self.api.access_network_ip_pools.list(tenant=tenant) for ipdata in ip_pools: for adata in ipdata['network_paths']: if not adata.get('start_ip'): continue pool_if = ipaddress.ip_interface( "/".join((adata['start_ip'], str(adata['netmask'])))) if ip_obj in pool_if.network: pool = ipdata.name return self.api.access_network_ip_pools.get(pool, tenant=tenant).path # ==================== # = Volume Migration = # ==================== def _update_migrated_volume_2_2(self, context, volume, new_volume, volume_status): """Rename the newly created volume to the original volume. So we can find it correctly. """ tenant = self.get_tenant(new_volume['project_id']) ai = self.cvol_to_ai(new_volume, tenant=tenant) data = {'name': datc.get_name(volume)} ai.set(tenant=tenant, **data) return {'_name_id': None} @contextlib.contextmanager def _offline_flip_2_2(self, volume): reonline = False tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) if ai.admin_state == 'online': reonline = True ai.set(tenant=tenant, admin_state='offline') yield if reonline: ai.set(tenant=tenant, admin_state='online') def _add_vol_meta_2_2(self, volume, connector=None): if not self.do_metadata: return metadata = {'host': volume.get('host', ''), 'display_name': datc.filter_chars( volume.get('display_name', '')), 'bootable': str(volume.get('bootable', False)), 'availability_zone': volume.get('availability_zone', '')} if connector: metadata.update(connector) LOG.debug("Adding volume metadata: %s", metadata) tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) ai.metadata.set(tenant=tenant, **metadata) def _support_template_override_2_2(self): # Getting the whole api schema is expensive # so we only want to do this once per driver # instantiation. if not self.template_override: return False if not hasattr(self, '_to_22'): api = self.api.api.get() prop = api['/app_instances']['create']['bodyParamSchema'][ 'properties'] self._to_22 = 'template_override' in prop return self._to_22 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datera/datera_common.py0000664000175000017500000002715100000000000023750 0ustar00zuulzuul00000000000000# Copyright 2020 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import random import re import string import time import types import uuid from glanceclient import exc as glance_exc from oslo_log import log as logging from oslo_utils import importutils from cinder import context from cinder import exception from cinder.i18n import _ from cinder.image import glance from cinder.volume import qos_specs from cinder.volume import volume_types LOG = logging.getLogger(__name__) dfs_sdk = importutils.try_import('dfs_sdk') OS_PREFIX = "OS" UNMANAGE_PREFIX = "UNMANAGED" # Taken from this SO post : # http://stackoverflow.com/a/18516125 # Using old-style string formatting because of the nature of the regex # conflicting with new-style curly braces UUID4_STR_RE = ("%s.*([a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]" "[a-f0-9]{3}-?[a-f0-9]{12})") UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX) SNAP_RE = re.compile(r"\d{10,}\.\d+") # Recursive dict to assemble basic url structure for the most common # API URL endpoints. Most others are constructed from these DEFAULT_SI_SLEEP = 1 DEFAULT_SI_SLEEP_API_2 = 5 DEFAULT_SNAP_SLEEP = 1 API_VERSIONS = ["2.1", "2.2"] API_TIMEOUT = 20 VALID_CHARS = set(string.ascii_letters + string.digits + "-_.") class DateraAPIException(exception.VolumeBackendAPIException): message = _("Bad response from Datera API") def get_name(resource): dn = resource.get('display_name') cid = resource.get('id') if dn: dn = filter_chars(dn) # Check to ensure the name is short enough to fit. Prioritize # the prefix and Cinder ID, strip all invalid characters nl = len(OS_PREFIX) + len(dn) + len(cid) + 2 if nl >= 64: dn = dn[:-(nl - 63)] return "-".join((OS_PREFIX, dn, cid)) return "-".join((OS_PREFIX, cid)) def get_unmanaged(name): return "-".join((UNMANAGE_PREFIX, name)) def filter_chars(s): if s: return ''.join([c for c in s if c in VALID_CHARS]) return s def lookup(func): @functools.wraps(func) def wrapper(*args, **kwargs): obj = args[0] name = "_" + func.__name__ + "_" + obj.apiv.replace(".", "_") LOG.debug("Trying method: %s", name) call_id = uuid.uuid4() if obj.do_profile: LOG.debug("Profiling method: %s, id %s", name, call_id) t1 = time.time() obj.thread_local.trace_id = call_id result = getattr(obj, name)(*args[1:], **kwargs) if obj.do_profile: t2 = time.time() timedelta = round(t2 - t1, 3) LOG.debug("Profile for method %s, id %s: %ss", name, call_id, timedelta) return result return wrapper def _parse_vol_ref(ref): if ref.count(":") not in (2, 3): raise exception.ManageExistingInvalidReference( _("existing_ref argument must be of this format: " "tenant:app_inst_name:storage_inst_name:vol_name or " "app_inst_name:storage_inst_name:vol_name")) try: (tenant, app_inst_name, storage_inst_name, vol_name) = ref.split(":") if tenant == "root": tenant = None except (TypeError, ValueError): app_inst_name, storage_inst_name, vol_name = ref.split( ":") tenant = None return app_inst_name, storage_inst_name, vol_name, tenant def _check_snap_ref(ref): if not SNAP_RE.match(ref): raise exception.ManageExistingInvalidReference( _("existing_ref argument must be of this format: " "1234567890.12345678")) return True def _get_size(app_inst): """Helper method for getting the size of a backend object If app_inst is provided, we'll just parse the dict to get the size instead of making a separate http request """ if 'data' in app_inst: app_inst = app_inst['data'] sis = app_inst['storage_instances'] found_si = sis[0] found_vol = found_si['volumes'][0] return found_vol['size'] def _get_volume_type_obj(driver, resource): type_id = resource.get('volume_type_id', None) # Handle case of volume with no type. We still want the # specified defaults from above if type_id: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) else: volume_type = None return volume_type def _get_policies_for_resource(driver, resource): volume_type = driver._get_volume_type_obj(resource) return driver._get_policies_for_volume_type(volume_type) def _get_policies_for_volume_type(driver, volume_type): """Get extra_specs and qos_specs of a volume_type. This fetches the scoped keys from the volume type. Anything set from qos_specs will override key/values set from extra_specs. """ # Handle case of volume with no type. We still want the # specified defaults from above if volume_type: specs = volume_type.get('extra_specs', {}) else: specs = {} # Set defaults: policies = {k.lstrip('DF:'): str(v['default']) for (k, v) in driver._init_vendor_properties()[0].items()} if volume_type: qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id is not None: ctxt = context.get_admin_context() qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] if qos_kvs: specs.update(qos_kvs) # Populate updated value for key, value in specs.items(): if ':' in key: fields = key.split(':') key = fields[1] policies[key] = value # Cast everything except booleans int that can be cast for k, v in policies.items(): # Handle String Boolean case if v == 'True' or v == 'False': policies[k] = policies[k] == 'True' continue # Int cast try: policies[k] = int(v) except ValueError: pass return policies def _image_accessible(driver, context, volume, image_meta): # Determine if image is accessible by current project pid = volume.get('project_id', '') public = False visibility = image_meta.get('visibility', None) LOG.debug("Image %(image)s visibility: %(vis)s", {"image": image_meta['id'], "vis": visibility}) if visibility and visibility in ['public', 'community']: public = True elif visibility and visibility in ['shared', 'private']: # Do membership check. Newton and before didn't have a 'shared' # visibility option, so we have to do this check for 'private' # as well gclient = glance.get_default_image_service() members = [] # list_members is only available in Rocky+ try: members = gclient.list_members(context, image_meta['id']) except AttributeError: # This is the fallback method for the same query try: members = gclient._client.call(context, 'list', controller='image_members', image_id=image_meta['id']) except glance_exc.HTTPForbidden as e: LOG.warning(e) except glance_exc.HTTPForbidden as e: LOG.warning(e) members = list(members) LOG.debug("Shared image %(image)s members: %(members)s", {"image": image_meta['id'], "members": members}) for member in members: if (member['member_id'] == pid and member['status'] == 'accepted'): public = True break if image_meta.get('is_public', False): public = True else: if image_meta.get('owner', '') == pid: public = True if not public: LOG.warning("Requested image is not " "accessible by current Project.") return public def _format_tenant(tenant): if tenant == "all" or (tenant and ('/root' in tenant or 'root' in tenant)): return '/root' elif tenant and ('/root' not in tenant and 'root' not in tenant): return "/" + "/".join(('root', tenant)).strip('/') return tenant def get_ip_pool(policies): ip_pool = policies['ip_pool'] if ',' in ip_pool: ip_pools = ip_pool.split(',') ip_pool = random.choice(ip_pools) return ip_pool def create_tenant(driver, project_id): if driver.tenant_id.lower() == 'map': name = get_name({'id': project_id}) elif driver.tenant_id: name = driver.tenant_id.replace('root', '').strip('/') else: name = 'root' if name: try: driver.api.tenants.create(name=name) except dfs_sdk.exceptions.ApiConflictError: LOG.debug("Tenant %s already exists", name) return _format_tenant(name) def get_tenant(driver, project_id): if driver.tenant_id.lower() == 'map': return _format_tenant(get_name({'id': project_id})) elif not driver.tenant_id: return _format_tenant('root') return _format_tenant(driver.tenant_id) def cvol_to_ai(driver, resource, tenant=None): if not tenant: tenant = get_tenant(driver, resource['project_id']) try: # api.tenants.get needs a non '/'-prefixed tenant id driver.api.tenants.get(tenant.strip('/')) except dfs_sdk.exceptions.ApiNotFoundError: create_tenant(driver, resource['project_id']) cid = resource.get('id', None) if not cid: raise ValueError('Unsure what id key to use for object', resource) ais = driver.api.app_instances.list( filter='match(name,.*{}.*)'.format(cid), tenant=tenant) if not ais: raise exception.VolumeNotFound(volume_id=cid) return ais[0] def cvol_to_dvol(driver, resource, tenant=None): if not tenant: tenant = get_tenant(driver, resource['project_id']) ai = cvol_to_ai(driver, resource, tenant=tenant) si = ai.storage_instances.list(tenant=tenant)[0] vol = si.volumes.list(tenant=tenant)[0] return vol def _version_to_int(ver): # Using a factor of 100 per digit so up to 100 versions are supported # per major/minor/patch/subpatch digit in this calculation # Example: # In [2]: _version_to_int("3.3.0.0") # Out[2]: 303000000 # In [3]: _version_to_int("2.2.7.1") # Out[3]: 202070100 VERSION_DIGITS = 4 factor = pow(10, VERSION_DIGITS * 2) div = pow(10, 2) val = 0 for c in ver.split("."): val += int(int(c) * factor) factor /= div return val def dat_version_gte(version_a, version_b): return _version_to_int(version_a) >= _version_to_int(version_b) def register_driver(driver): for func in [_get_volume_type_obj, _get_policies_for_resource, _get_policies_for_volume_type, _image_accessible, get_tenant, create_tenant, cvol_to_ai, cvol_to_dvol]: f = types.MethodType(func, driver) setattr(driver, func.__name__, f) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/datera/datera_iscsi.py0000664000175000017500000010006400000000000023565 0ustar00zuulzuul00000000000000# Copyright 2020 Datera # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import uuid from eventlet.green import threading from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder import interface import cinder.volume.drivers.datera.datera_api21 as api21 import cinder.volume.drivers.datera.datera_api22 as api22 import cinder.volume.drivers.datera.datera_common as datc from cinder.volume.drivers.san import san from cinder.volume import volume_utils LOG = logging.getLogger(__name__) dfs_sdk = importutils.try_import('dfs_sdk') d_opts = [ cfg.StrOpt('datera_api_port', default='7717', deprecated_for_removal=True, help='Datera API port.'), cfg.StrOpt('datera_api_version', default='2.2', deprecated_for_removal=True, help='Datera API version.'), cfg.StrOpt('datera_ldap_server', default=None, help='LDAP authentication server'), cfg.IntOpt('datera_503_timeout', default='120', help='Timeout for HTTP 503 retry messages'), cfg.IntOpt('datera_503_interval', default='5', help='Interval between 503 retries'), cfg.BoolOpt('datera_debug', default=False, help="True to set function arg and return logging"), cfg.BoolOpt('datera_debug_replica_count_override', default=False, help="ONLY FOR DEBUG/TESTING PURPOSES\n" "True to set replica_count to 1"), cfg.StrOpt('datera_tenant_id', default=None, help="If set to 'Map' --> OpenStack project ID will be mapped " "implicitly to Datera tenant ID\n" "If set to None --> Datera tenant ID will not be used " "during volume provisioning\n" "If set to anything else --> Datera tenant ID will be the " "provided value"), cfg.BoolOpt('datera_enable_image_cache', default=False, help="Set to True to enable Datera backend image caching"), cfg.StrOpt('datera_image_cache_volume_type_id', default=None, help="Cinder volume type id to use for cached volumes"), cfg.BoolOpt('datera_disable_profiler', default=False, help="Set to True to disable profiling in the Datera driver"), cfg.BoolOpt('datera_disable_extended_metadata', default=False, help="Set to True to disable sending additional metadata to " "the Datera backend"), cfg.BoolOpt('datera_disable_template_override', default=False, help="Set to True to disable automatic template override of " "the size attribute when creating from a template"), cfg.DictOpt('datera_volume_type_defaults', default={}, help="Settings here will be used as volume-type defaults if " "the volume-type setting is not provided. This can be " "used, for example, to set a very low total_iops_max " "value if none is specified in the volume-type to " "prevent accidental overusage. Options are specified " "via the following format, WITHOUT ANY 'DF:' PREFIX: " "'datera_volume_type_defaults=" "iops_per_gb:100,bandwidth_per_gb:200...etc'."), ] CONF = cfg.CONF CONF.import_opt('driver_use_ssl', 'cinder.volume.driver') CONF.register_opts(d_opts) @interface.volumedriver class DateraDriver(san.SanISCSIDriver, api21.DateraApi, api22.DateraApi, metaclass=volume_utils.TraceWrapperWithABCMetaclass): """The OpenStack Datera iSCSI volume driver. .. code-block:: none Version history: * 1.0 - Initial driver * 1.1 - Look for lun-0 instead of lun-1. * 2.0 - Update For Datera API v2 * 2.1 - Multipath, ACL and reorg * 2.2 - Capabilites List, Extended Volume-Type Support Naming convention change, Volume Manage/Unmanage support * 2.3 - Templates, Tenants, Snapshot Polling, 2.1 Api Version Support, Restructure * 2.3.1 - Scalability bugfixes * 2.3.2 - Volume Placement, ACL multi-attach bugfix * 2.4.0 - Fast Retype Support * 2.5.0 - Glance Image Caching, retyping/QoS bugfixes * 2.6.0 - Api 2.2 support * 2.6.1 - Glance interoperability fix * 2.7.0 - IOPS/GB and BW/GB settings, driver level overrides (API 2.1+ only) * 2.7.2 - Allowing DF: QoS Spec prefix, QoS type leak bugfix * 2.7.3 - Fixed bug in clone_image where size was not set correctly * 2.7.4 - Fix for create_tenant incorrect API call Temporary fix for DAT-15931 * 2.7.5 - Removed "force" parameter from /initiators v2.1 API requests * 2.8.0 - iops_per_gb and bandwidth_per_gb are now limited by total_iops_max and total_bandwidth_max (API 2.1+ only) Bugfix for cinder retype with online volume * 2.8.1 - Bugfix for missing default dict during retype * 2.8.2 - Updated most retype operations to not detach volume * 2.8.3 - Bugfix for not allowing fast clones for shared/community volumes * 2.8.4 - Fixed missing API version pinning in _offline_flip * 2.8.5 - Membership check for fast image cloning. Metadata API pinning * 2.8.6 - Added LDAP support and CHAP support * 2.8.7 - Bugfix for missing tenancy calls in offline_flip * 2.9.0 - Volumes now correctly renamed during backend migration. Implemented update_migrated_volume (API 2.1+ only), Prevent non-raw image cloning * 2.9.1 - Added extended metadata attributes during volume creation and attachment. Added datera_disable_extended_metadata option to disable it. * 2.9.2 - Made ensure_export a no-op. Removed usage of initiator-groups * 2018.4.5.0 - Switch to new date-based versioning scheme. Removed v2 API support * 2018.4.17.1 - Bugfixes to IP Pools, Templates and Initiators * 2018.4.25.0 - Snapshot Manage. List Manageable Snapshots support * 2018.4.27.0 - Major driver revamp/restructure, no functionality change * 2018.5.1.0 - Bugfix for Map tenant auto-creation * 2018.5.18.0 - Bugfix for None tenant handling * 2018.6.7.0 - Bugfix for missing project_id during image clone * 2018.7.13.0 - Massive update porting to use the Datera Python-SDK * 2018.7.20.0 - Driver now includes display_name in created backend app_instances. * 2018.9.17.0 - Requirements and doc changes * 2018.10.8.0 - Added extra_headers to Python-SDK constructor call. This allows for the SDK to send the type of driver performing each request along with the request. This functionality existed before the Python-SDK revamp, so this change adds the functionality back in. * 2018.10.8.1 - Adding thread_local to Python-SDK constructor call. This preserves trace_id in the logs * 2018.10.30.0 - Adding template_override support. Added datera_disable_template_override cfgOpt to disable this feature. Updated required requests version to >=2.20.0 because of a security vulnerability in <=2.19.X. Added support for filter_function and goodness_function. * 2018.11.1.0 - Adding flash and hybrid capacity info to get_volume_stats * 2018.11.8.0 - Fixing bug that broke 2.2.X support * 2018.11.14.0 - Bugfixes for v2.1 API support and unicode character support * 2019.1.24.0 - Python-SDK requirements update, README updates * 2019.2.25.0 - Scalability fixes and utility script updates * 2019.6.4.1 - Added Pypi packaging installation support * 2019.12.10.0 - Python 3.x support, tox tests, CI ready, live migration support, image cache, bugfixes. """ VERSION = '2019.12.10.0' CI_WIKI_NAME = "datera-ci" HEADER_DATA = {'Datera-Driver': 'OpenStack-Cinder-{}'.format(VERSION)} def __init__(self, *args, **kwargs): super(DateraDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(d_opts) self.username = self.configuration.san_login self.password = self.configuration.san_password self.ldap = self.configuration.datera_ldap_server self.cluster_stats = {} self.datera_api_token = None self.interval = self.configuration.datera_503_interval self.retry_attempts = (self.configuration.datera_503_timeout / self.interval) self.driver_prefix = str(uuid.uuid4())[:4] self.datera_debug = self.configuration.datera_debug if self.datera_debug: volume_utils.setup_tracing(['method']) self.tenant_id = self.configuration.datera_tenant_id if self.tenant_id is None: self.tenant_id = '' self.defaults = self.configuration.datera_volume_type_defaults if self.tenant_id and self.tenant_id.lower() == 'none': self.tenant_id = None self.template_override = ( not self.configuration.datera_disable_template_override) self.api_check = time.time() self.api_cache = [] self.api_timeout = 0 self.do_profile = not self.configuration.datera_disable_profiler self.do_metadata = ( not self.configuration.datera_disable_extended_metadata) self.image_cache = self.configuration.datera_enable_image_cache self.image_type = self.configuration.datera_image_cache_volume_type_id self.thread_local = threading.local() # pylint: disable=no-member self.datera_version = None self.apiv = None self.api = None self.filterf = self.get_filter_function() self.goodnessf = self.get_goodness_function() self.use_chap_auth = self.configuration.use_chap_auth self.chap_username = self.configuration.chap_username self.chap_password = self.configuration.chap_password backend_name = self.configuration.safe_get( 'volume_backend_name') self.backend_name = backend_name or 'Datera' datc.register_driver(self) def do_setup(self, context): # If we can't authenticate through the old and new method, just fail # now. if not all([self.username, self.password]): msg = _("san_login and/or san_password is not set for Datera " "driver in the cinder.conf. Set this information and " "start the cinder-volume service again.") LOG.error(msg) raise exception.InvalidInput(msg) # Try each valid api version starting with the latest until we find # one that works for apiv in reversed(datc.API_VERSIONS): try: api = dfs_sdk.get_api(self.configuration.san_ip, self.username, self.password, 'v{}'.format(apiv), disable_log=True, extra_headers=self.HEADER_DATA, thread_local=self.thread_local, ldap_server=self.ldap) system = api.system.get() LOG.debug('Connected successfully to cluster: %s', system.name) self.api = api self.apiv = apiv break except Exception as e: LOG.warning(e) # ================= # ================= # = Create Volume = # ================= @datc.lookup def create_volume(self, volume): """Create a logical volume.""" pass # ================= # = Extend Volume = # ================= @datc.lookup def extend_volume(self, volume, new_size): pass # ================= # ================= # = Cloned Volume = # ================= @datc.lookup def create_cloned_volume(self, volume, src_vref): pass # ================= # = Delete Volume = # ================= @datc.lookup def delete_volume(self, volume): pass # ================= # = Ensure Export = # ================= @datc.lookup def ensure_export(self, context, volume, connector=None): """Gets the associated account, retrieves CHAP info and updates.""" # ========================= # = Initialize Connection = # ========================= @datc.lookup def initialize_connection(self, volume, connector): pass # ================= # = Create Export = # ================= @datc.lookup def create_export(self, context, volume, connector): pass # ================= # = Detach Volume = # ================= @datc.lookup def detach_volume(self, context, volume, attachment=None): pass # =================== # = Create Snapshot = # =================== @datc.lookup def create_snapshot(self, snapshot): pass # =================== # = Delete Snapshot = # =================== @datc.lookup def delete_snapshot(self, snapshot): pass # ======================== # = Volume From Snapshot = # ======================== @datc.lookup def create_volume_from_snapshot(self, volume, snapshot): pass # ========== # = Retype = # ========== @datc.lookup def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). """ pass # ========== # = Manage = # ========== @datc.lookup def manage_existing(self, volume, existing_ref): """Manage an existing volume on the Datera backend The existing_ref must be either the current name or Datera UUID of an app_instance on the Datera backend in a colon separated list with the storage instance name and volume name. This means only single storage instances and single volumes are supported for managing by cinder. Eg. (existing_ref['source-name'] == tenant:app_inst_name:storage_inst_name:vol_name) if using Datera 2.1 API or (existing_ref['source-name'] == app_inst_name:storage_inst_name:vol_name) if using 2.0 API :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ pass @datc.lookup def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder snapshot structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. 2. Place some metadata on the snapshot, or somewhere in the backend, that allows other driver requests (e.g. delete) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot """ pass # =================== # = Manage Get Size = # =================== @datc.lookup def manage_existing_get_size(self, volume, existing_ref): """Get the size of an unmanaged volume on the Datera backend The existing_ref must be either the current name or Datera UUID of an app_instance on the Datera backend in a colon separated list with the storage instance name and volume name. This means only single storage instances and single volumes are supported for managing by cinder. Eg. existing_ref == app_inst_name:storage_inst_name:vol_name :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume on the Datera backend """ pass @datc.lookup def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. When calculating the size, round up to the next GB. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot :returns size: Volume snapshot size in GiB (integer) """ pass # ========================= # = Get Manageable Volume = # ========================= @datc.lookup def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a volume in the host, with the following keys: - reference (dictionary): The reference for a volume, which can be passed to "manage_existing". - size (int): The size of the volume according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this volume is safe to manage according to the storage backend. For example, is the volume in use or invalid for any reason. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Any extra information to return to the user :param cinder_volumes: A list of volumes in this host that Cinder currently manages, used to determine if a volume is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ pass # ============================ # = Get Manageable Snapshots = # ============================ @datc.lookup def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a snapshot in the host, with the following keys: - reference (dictionary): The reference for a snapshot, which can be passed to "manage_existing_snapshot". - size (int): The size of the snapshot according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this snapshot is safe to manage according to the storage backend. For example, is the snapshot in use or invalid for any reason. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Any extra information to return to the user - source_reference (string): Similar to "reference", but for the snapshot's source volume. :param cinder_snapshots: A list of snapshots in this host that Cinder currently manages, used to determine if a snapshot is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ pass # ============ # = Unmanage = # ============ @datc.lookup def unmanage(self, volume): """Unmanage a currently managed volume in Cinder :param volume: Cinder volume to unmanage """ pass # ==================== # = Fast Image Clone = # ==================== @datc.lookup def clone_image(self, context, volume, image_location, image_meta, image_service): """Clone an existing image volume.""" pass # ==================== # = Volume Migration = # ==================== @datc.lookup def update_migrated_volume(self, context, volume, new_volume, volume_status): """Return model update for migrated volume. Each driver implementing this method needs to be responsible for the values of _name_id and provider_location. If None is returned or either key is not set, it means the volume table does not need to change the value(s) for the key(s). The return format is {"_name_id": value, "provider_location": value}. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ pass # ================ # = Volume Stats = # ================ @datc.lookup def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update first. The name is a bit misleading as the majority of the data here is cluster data. """ pass # ========= # = Login = # ========= @datc.lookup def login(self): pass def _get_lunid(self): return 0 # ============================ # = Volume-Types/Extra-Specs = # ============================ def _init_vendor_properties(self): """Create a dictionary of vendor unique properties. This method creates a dictionary of vendor unique properties and returns both created dictionary and vendor name. Returned vendor name is used to check for name of vendor unique properties. - Vendor name shouldn't include colon(:) because of the separator and it is automatically replaced by underscore(_). ex. abc:d -> abc_d - Vendor prefix is equal to vendor name. ex. abcd - Vendor unique properties must start with vendor prefix + ':'. ex. abcd:maxIOPS Each backend driver needs to override this method to expose its own properties using _set_property() like this: self._set_property( properties, "vendorPrefix:specific_property", "Title of property", _("Description of property"), "type") : return dictionary of vendor unique properties : return vendor name prefix: DF --> Datera Fabric """ LOG.debug("Using the following volume-type defaults: %s", self.defaults) properties = {} self._set_property( properties, "DF:iops_per_gb", "Datera IOPS Per GB Setting", _("Setting this value will calculate IOPS for each volume of " "this type based on their size. Eg. A setting of 100 will " "give a 1 GB volume 100 IOPS, but a 10 GB volume 1000 IOPS. " "A setting of '0' is unlimited. This value is applied to " "total_iops_max and will be overridden by total_iops_max if " "iops_per_gb is set and a large enough volume is provisioned " "which would exceed total_iops_max"), "integer", minimum=0, default=int(self.defaults.get('iops_per_gb', 0))) self._set_property( properties, "DF:bandwidth_per_gb", "Datera Bandwidth Per GB Setting", _("Setting this value will calculate bandwidth for each volume of " "this type based on their size in KiB/s. Eg. A setting of 100 " "will give a 1 GB volume 100 KiB/s bandwidth, but a 10 GB " "volume 1000 KiB/s bandwidth. A setting of '0' is unlimited. " "This value is applied to total_bandwidth_max and will be " "overridden by total_bandwidth_max if set and a large enough " "volume is provisioned which woudl exceed total_bandwidth_max"), "integer", minimum=0, default=int(self.defaults.get('bandwidth_per_gb', 0))) self._set_property( properties, "DF:placement_mode", "Datera Volume Placement Mode (deprecated)", _("'DEPRECATED: PLEASE USE 'placement_policy' on 3.3.X+ versions " " of the Datera product. 'single_flash' for " "single-flash-replica placement, " "'all_flash' for all-flash-replica placement, " "'hybrid' for hybrid placement"), "string", default=self.defaults.get('placement_mode', 'hybrid')) self._set_property( properties, "DF:placement_policy", "Datera Volume Placement Policy", _("Valid path to a media placement policy. Example: " "/placement_policies/all-flash"), "string", default=self.defaults.get('placement_policy', 'default')) self._set_property( properties, "DF:round_robin", "Datera Round Robin Portals", _("True to round robin the provided portals for a target"), "boolean", default="True" == self.defaults.get('round_robin', "False")) if self.configuration.get('datera_debug_replica_count_override'): replica_count = 1 else: replica_count = 3 self._set_property( properties, "DF:replica_count", "Datera Volume Replica Count", _("Specifies number of replicas for each volume. Can only be " "increased once volume is created"), "integer", minimum=1, default=int(self.defaults.get('replica_count', replica_count))) self._set_property( properties, "DF:ip_pool", "Datera IP Pool", _("Specifies IP pool to use for volume. If provided string " "contains commas, it will be split on the commas and each " "substring will be uses as a separate IP pool and the volume's " "IP pool will be chosen randomly from the list. Example: " "'my-ip-pool1,my-ip-pool2,my-ip-pool3', next attach " "my-ip-pool2 was chosen randomly as the volume IP pool"), "string", default=self.defaults.get('ip_pool', 'default')) self._set_property( properties, "DF:template", "Datera Template", _("Specifies Template to use for volume provisioning"), "string", default=self.defaults.get('template', '')) # ###### QoS Settings ###### # self._set_property( properties, "DF:read_bandwidth_max", "Datera QoS Max Bandwidth Read", _("Max read bandwidth setting for volume qos, " "use 0 for unlimited"), "integer", minimum=0, default=int(self.defaults.get('read_bandwidth_max', 0))) self._set_property( properties, "DF:write_bandwidth_max", "Datera QoS Max Bandwidth Write", _("Max write bandwidth setting for volume qos, " "use 0 for unlimited"), "integer", minimum=0, default=int(self.defaults.get('write_bandwidth_max', 0))) self._set_property( properties, "DF:total_bandwidth_max", "Datera QoS Max Bandwidth Total", _("Max total bandwidth setting for volume qos, " "use 0 for unlimited"), "integer", minimum=0, default=int(self.defaults.get('total_bandwidth_max', 0))) self._set_property( properties, "DF:read_iops_max", "Datera QoS Max iops Read", _("Max read iops setting for volume qos, " "use 0 for unlimited"), "integer", minimum=0, default=int(self.defaults.get('read_iops_max', 0))) self._set_property( properties, "DF:write_iops_max", "Datera QoS Max IOPS Write", _("Max write iops setting for volume qos, " "use 0 for unlimited"), "integer", minimum=0, default=int(self.defaults.get('write_iops_max', 0))) self._set_property( properties, "DF:total_iops_max", "Datera QoS Max IOPS Total", _("Max total iops setting for volume qos, " "use 0 for unlimited"), "integer", minimum=0, default=int(self.defaults.get('total_iops_max', 0))) # ###### End QoS Settings ###### # return properties, 'DF' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3311205 cinder-27.0.0/cinder/volume/drivers/dell_emc/0000775000175000017500000000000000000000000021064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/__init__.py0000664000175000017500000000000000000000000023163 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3351207 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerflex/0000775000175000017500000000000000000000000023077 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerflex/__init__.py0000664000175000017500000000000000000000000025176 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerflex/driver.py0000664000175000017500000026073700000000000024763 0ustar00zuulzuul00000000000000# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for Dell EMC PowerFlex (formerly named Dell EMC VxFlex OS). """ import http.client as http_client import math from operator import xor from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units from cinder.common import constants from cinder import context from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import objects from cinder.objects import fields from cinder.objects.volume import Volume from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.dell_emc.powerflex import options from cinder.volume.drivers.dell_emc.powerflex import rest_client from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils from cinder.volume.drivers.san import san from cinder.volume import manager from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils CONF = cfg.CONF powerflex_opts = options.deprecated_opts + options.actual_opts CONF.register_opts(powerflex_opts, group=configuration.SHARED_CONF_GROUP) LOG = logging.getLogger(__name__) PROVISIONING_KEY = "provisioning:type" REPLICATION_CG_KEY = "powerflex:replication_cg" QOS_IOPS_LIMIT_KEY = "maxIOPS" QOS_BANDWIDTH_LIMIT = "maxBWS" QOS_IOPS_PER_GB = "maxIOPSperGB" QOS_BANDWIDTH_PER_GB = "maxBWSperGB" BLOCK_SIZE = 8 MIN_BWS_SCALING_SIZE = 128 POWERFLEX_MAX_OVERSUBSCRIPTION_RATIO = 10.0 @interface.volumedriver class PowerFlexDriver(driver.VolumeDriver): """Cinder PowerFlex(formerly named Dell EMC VxFlex OS) Driver .. code-block:: none Version history: 2.0.1 - Added support for SIO 1.3x in addition to 2.0.x 2.0.2 - Added consistency group support to generic volume groups 2.0.3 - Added cache for storage pool and protection domains info 2.0.4 - Added compatibility with os_brick>1.15.3 2.0.5 - Change driver name, rename config file options 3.0.0 - Add support for VxFlex OS 3.0.x and for volumes compression 3.5.0 - Add support for PowerFlex 3.5.x 3.5.1 - Add volume replication v2.1 support for PowerFlex 3.5.x 3.5.2 - Add volume migration support 3.5.3 - Add revert volume to snapshot support 3.5.4 - Fix for Bug #1823200. See OSSN-0086 for details. 3.5.5 - Rebrand VxFlex OS to PowerFlex. 3.5.6 - Fix for Bug #1897598 when volume can be migrated without conversion of its type. 3.5.7 - Report trim/discard support. 3.5.8 - Added Cinder active/active support. 3.6.0 - Improved secret handling. """ VERSION = "3.6.0" SUPPORTS_ACTIVE_ACTIVE = True # ThirdPartySystems wiki CI_WIKI_NAME = "DellEMC_PowerFlex_CI" powerflex_qos_keys = (QOS_IOPS_LIMIT_KEY, QOS_BANDWIDTH_LIMIT, QOS_IOPS_PER_GB, QOS_BANDWIDTH_PER_GB) def __init__(self, *args, **kwargs): super(PowerFlexDriver, self).__init__(*args, **kwargs) self.active_backend_id = kwargs.get("active_backend_id") self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(powerflex_opts) self.statisticProperties = None self.storage_pools = None self.provisioning_type = None self.replication_enabled = None self.replication_device = None self.failover_choices = None self.primary_client = None self.secondary_client = None def _init_vendor_properties(self): properties = {} self._set_property( properties, "powerflex:replication_cg", "PowerFlex Replication Consistency Group.", _("Specifies the PowerFlex Replication Consistency group for a " "volume type. Source and target volumes will be added to the " "specified RCG during creation."), "string") return properties, "powerflex" @staticmethod def get_driver_options(): return powerflex_opts @staticmethod def _extract_domain_and_pool_from_host(host): pd_sp = volume_utils.extract_host(host, "pool") protection_domain_name = pd_sp.split(":")[0] storage_pool_name = pd_sp.split(":")[1] return protection_domain_name, storage_pool_name @property def _available_failover_choices(self): """Available choices to failover/failback host.""" return self.failover_choices.difference({self.active_backend_id}) @property def _is_failed_over(self): """Check if storage backend is in FAILED_OVER state. :return: storage backend failover state """ return bool(self.active_backend_id and self.active_backend_id != "default") def _get_client(self, secondary=False): """Get appropriate REST client for storage backend. :param secondary: primary or secondary client :return: REST client for storage backend """ if xor(self._is_failed_over, secondary): return self.secondary_client else: return self.primary_client def do_setup(self, context): if not self.active_backend_id: self.active_backend_id = manager.VolumeManager.FAILBACK_SENTINEL if not self.failover_choices: self.failover_choices = {manager.VolumeManager.FAILBACK_SENTINEL} powerflex_storage_pools = ( self.configuration.safe_get("powerflex_storage_pools") ) if powerflex_storage_pools: self.storage_pools = [ e.strip() for e in powerflex_storage_pools.split(",") ] LOG.info("Storage pools names: %s.", self.storage_pools) self.provisioning_type = ( "thin" if self.configuration.san_thin_provision else "thick" ) LOG.info("Default provisioning type: %s.", self.provisioning_type) self.configuration.max_over_subscription_ratio = ( self.configuration.powerflex_max_over_subscription_ratio ) self.primary_client = rest_client.RestClient(self.configuration) self.secondary_client = rest_client.RestClient(self.configuration, is_primary=False) self.primary_client.do_setup() self.secondary_client.do_setup() def check_for_setup_error(self): client = self._get_client() # validate oversubscription ratio if (self.configuration.max_over_subscription_ratio > POWERFLEX_MAX_OVERSUBSCRIPTION_RATIO): msg = (_("Max over subscription is configured to %(ratio)1f " "while PowerFlex support up to %(powerflex_ratio)s.") % {"ratio": self.configuration.max_over_subscription_ratio, "powerflex_ratio": POWERFLEX_MAX_OVERSUBSCRIPTION_RATIO}) raise exception.InvalidInput(reason=msg) # validate that version of PowerFlex is supported if not flex_utils.version_gte(client.query_rest_api_version(), "2.0"): # we are running against a pre-2.0.0 PowerFlex(ScaleIO) instance msg = (_("Using PowerFlex versions less " "than v2.0 has been deprecated and will be " "removed in a future version.")) versionutils.report_deprecated_feature(LOG, msg) if not self.storage_pools: msg = (_("Must specify storage pools. " "Option: powerflex_storage_pools.")) raise exception.InvalidInput(reason=msg) # validate the storage pools and check if zero padding is enabled for pool in self.storage_pools: try: pd, sp = pool.split(":") except (ValueError, IndexError): msg = (_("Invalid storage pool name. The correct format is: " "protection_domain:storage_pool. " "Value supplied was: %s.") % pool) raise exception.InvalidInput(reason=msg) try: properties = client.get_storage_pool_properties(pd, sp) padded = properties["zeroPaddingEnabled"] except Exception: msg = _("Failed to query properties for pool %s.") % pool raise exception.InvalidInput(reason=msg) if not padded: LOG.warning("Zero padding is disabled for pool %s. " "This could lead to existing data being " "accessible on new provisioned volumes. " "Consult the PowerFlex product documentation " "for information on how to enable zero padding " "and prevent this from occurring.", pool) # validate replication configuration if self.secondary_client.is_configured: self.replication_device = self.configuration.replication_device[0] self.failover_choices.add(self.replication_device["backend_id"]) if self._is_failed_over: LOG.warning("Storage backend is in FAILED_OVER state. " "Replication is DISABLED.") self.replication_enabled = False else: primary_version = self.primary_client.query_rest_api_version() secondary_version = ( self.secondary_client.query_rest_api_version() ) if not (flex_utils.version_gte(primary_version, "3.5") and flex_utils.version_gte(secondary_version, "3.5")): LOG.info("PowerFlex versions less than v3.5 do not " "support replication.") self.replication_enabled = False else: self.replication_enabled = True else: self.replication_enabled = False @property def replication_targets(self): """Replication targets for storage backend. :return: replication targets """ if self.replication_enabled and not self._is_failed_over: return [self.replication_device] else: return [] def _get_queryable_statistics(self, sio_type, sio_id): """Get statistic properties that can be obtained from PowerFlex. :param sio_type: PowerFlex resource type :param sio_id: PowerFlex resource id :return: statistic properties """ url = "/types/%(sio_type)s/instances/action/querySelectedStatistics" client = self._get_client() if self.statisticProperties is None: # in PowerFlex 3.5 snapCapacityInUseInKb is replaced by # snapshotCapacityInKb if flex_utils.version_gte(client.query_rest_api_version(), "3.5"): self.statisticProperties = [ "snapshotCapacityInKb", "thickCapacityInUseInKb", ] else: self.statisticProperties = [ "snapCapacityInUseInKb", "thickCapacityInUseInKb", ] # PowerFlex 3.0 provide useful precomputed stats if flex_utils.version_gte(client.query_rest_api_version(), "3.0"): self.statisticProperties.extend([ "netCapacityInUseInKb", "netUnusedCapacityInKb", "thinCapacityAllocatedInKb", ]) return self.statisticProperties self.statisticProperties.extend([ "capacityLimitInKb", "spareCapacityInKb", "capacityAvailableForVolumeAllocationInKb", ]) # version 2.0 of SIO introduced thin volumes if flex_utils.version_gte(client.query_rest_api_version(), "2.0"): # check to see if thinCapacityAllocatedInKb is valid # needed due to non-backwards compatible API params = { "ids": [ sio_id, ], "properties": [ "thinCapacityAllocatedInKb", ], } r, response = client.execute_powerflex_post_request( url=url, params=params, sio_type=sio_type ) if r.status_code == http_client.OK: # is it valid, use it self.statisticProperties.append( "thinCapacityAllocatedInKb" ) else: # it is not valid, assume use of thinCapacityAllocatedInKm self.statisticProperties.append( "thinCapacityAllocatedInKm" ) return self.statisticProperties def _setup_volume_replication(self, vol_or_snap, source_provider_id): """Configure replication for volume or snapshot. Create volume on secondary PowerFlex storage backend. Pair volumes and add replication pair to replication consistency group. :param vol_or_snap: source volume/snapshot :param source_provider_id: primary PowerFlex volume id """ try: # If vol_or_snap has 'volume' attribute we are dealing # with snapshot. Necessary parameters is stored in volume object. entity = vol_or_snap.volume entity_type = "snapshot" except AttributeError: entity = vol_or_snap entity_type = "volume" LOG.info("Configure replication for %(entity_type)s %(id)s. ", {"entity_type": entity_type, "id": vol_or_snap.id}) try: protection_domain_name, storage_pool_name = ( self._extract_domain_and_pool_from_host(entity.host) ) self._check_volume_creation_safe(protection_domain_name, storage_pool_name, secondary=True) storage_type = self._get_volumetype_extraspecs(entity) rcg_name = storage_type.get(REPLICATION_CG_KEY) LOG.info("Replication Consistency Group name: %s.", rcg_name) provisioning, compression = self._get_provisioning_and_compression( storage_type, protection_domain_name, storage_pool_name, secondary=True ) dest_provider_id = self._get_client(secondary=True).create_volume( protection_domain_name, storage_pool_name, vol_or_snap.id, entity.size, provisioning, compression) self._get_client().create_volumes_pair(rcg_name, source_provider_id, dest_provider_id) LOG.info("Successfully configured replication for %(entity_type)s " "%(id)s.", {"entity_type": entity_type, "id": vol_or_snap.id}) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.error("Failed to configure replication for " "%(entity_type)s %(id)s.", {"entity_type": entity_type, "id": vol_or_snap.id}) def _teardown_volume_replication(self, provider_id): """Stop volume/snapshot replication. Unpair volumes/snapshot and remove volume/snapshot from PowerFlex secondary storage backend. """ if not provider_id: LOG.warning("Volume or snapshot does not have provider_id thus " "does not map to PowerFlex volume.") return try: pair_id, remote_pair_id, vol_id, remote_vol_id = ( self._get_client().get_volumes_pair_attrs("localVolumeId", provider_id) ) except exception.VolumeBackendAPIException: LOG.info("Replication pair for volume %s is not found. " "Replication for volume was not configured or was " "modified from storage side.", provider_id) return self._get_client().remove_volumes_pair(pair_id) if not self._is_failed_over: self._get_client(secondary=True).remove_volume(remote_vol_id) def failover_host(self, context, volumes, secondary_id=None, groups=None): active_backend_id, model_updates, group_update_list = ( self.failover(context, volumes, secondary_id, groups)) self.failover_completed(context, secondary_id) return active_backend_id, model_updates, group_update_list def failover(self, context, volumes, secondary_id=None, groups=None): """Like failover but for a host that is clustered.""" LOG.info("Invoking failover with target %s.", secondary_id) if secondary_id not in self._available_failover_choices: msg = (_("Target %(target)s is not valid choice. " "Valid choices: %(choices)s.") % {"target": secondary_id, "choices": ', '.join(self._available_failover_choices)}) LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) is_failback = secondary_id == manager.VolumeManager.FAILBACK_SENTINEL failed_over_rcgs = {} model_updates = [] for volume in volumes: storage_type = self._get_volumetype_extraspecs(volume) rcg_name = storage_type.get(REPLICATION_CG_KEY) if not rcg_name: LOG.error("Replication Consistency Group is not specified in " "volume %s VolumeType.", volume.id) failover_status = fields.ReplicationStatus.FAILOVER_ERROR updates = self._generate_model_updates(volume, failover_status, is_failback) model_updates.append(updates) continue if rcg_name in failed_over_rcgs: failover_status = failed_over_rcgs[rcg_name] else: failover_status = self._failover_replication_cg( rcg_name, is_failback ) failed_over_rcgs[rcg_name] = failover_status updates = self._generate_model_updates(volume, failover_status, is_failback) model_updates.append({"volume_id": volume.id, "updates": updates}) LOG.info("Failover host completed.") return secondary_id, model_updates, [] def failover_completed(self, context, active_backend_id=None): """This method is called after failover for clustered backends.""" LOG.info("Invoking failover_completed with target %s.", active_backend_id) if (not active_backend_id or active_backend_id == manager.VolumeManager.FAILBACK_SENTINEL): # failback operation self.active_backend_id = manager.VolumeManager.FAILBACK_SENTINEL self.replication_enabled = True elif (active_backend_id == self.replication_device["backend_id"] or active_backend_id == "failed over"): # failover operation self.active_backend_id = self.replication_device["backend_id"] self.replication_enabled = False else: msg = f"Target {active_backend_id} is not valid." LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) LOG.info("Failover completion completed: " "active_backend_id = %s, " "replication_enabled = %s.", self.active_backend_id, self.replication_enabled) def _failover_replication_cg(self, rcg_name, is_failback): """Failover/failback Replication Consistency Group on storage backend. :param rcg_name: name of PowerFlex Replication Consistency Group :param is_failback: is failover or failback :return: failover status of Replication Consistency Group """ action = "failback" if is_failback else "failover" LOG.info("Perform %(action)s of Replication Consistency Group " "%(rcg_name)s.", {"action": action, "rcg_name": rcg_name}) try: self._get_client(secondary=True).failover_failback_replication_cg( rcg_name, is_failback ) failover_status = fields.ReplicationStatus.FAILED_OVER LOG.info("Successfully performed %(action)s of Replication " "Consistency Group %(rcg_name)s.", {"action": action, "rcg_name": rcg_name}) except exception.VolumeBackendAPIException: LOG.error("Failed to perform %(action)s of Replication " "Consistency Group %(rcg_name)s.", {"action": action, "rcg_name": rcg_name}) failover_status = fields.ReplicationStatus.FAILOVER_ERROR return failover_status def _generate_model_updates(self, volume, failover_status, is_failback): """Generate volume model updates after failover/failback. Get new provider_id for volume and update volume snapshots if presented. """ LOG.info("Generate model updates for volume %s and its snapshots.", volume.id) error_status = (fields.ReplicationStatus.ERROR if is_failback else fields.ReplicationStatus.FAILOVER_ERROR) updates = {} if failover_status == fields.ReplicationStatus.FAILED_OVER: client = self._get_client(secondary=True) try: LOG.info("Query new provider_id for volume %s.", volume.id) pair_id, remote_pair_id, vol_id, remote_vol_id = ( client.get_volumes_pair_attrs("remoteVolumeId", volume.provider_id) ) LOG.info("New provider_id for volume %(vol_id)s: " "%(provider_id)s.", {"vol_id": volume.id, "provider_id": vol_id}) updates["provider_id"] = vol_id except exception.VolumeBackendAPIException: LOG.error("Failed to query new provider_id for volume " "%(vol_id)s. Volume status will be changed to " "%(status)s.", {"vol_id": volume.id, "status": error_status}) updates["replication_status"] = error_status for snapshot in volume.snapshots: try: LOG.info("Query new provider_id for snapshot %(snap_id)s " "of volume %(vol_id)s.", {"snap_id": snapshot.id, "vol_id": volume.id}) pair_id, remote_pair_id, snap_id, remote_snap_id = ( client.get_volumes_pair_attrs( "remoteVolumeId", snapshot.provider_id) ) LOG.info("New provider_id for snapshot %(snap_id)s " "of volume %(vol_id)s: %(provider_id)s.", { "snap_id": snapshot.id, "vol_id": volume.id, "provider_id": snap_id, }) snapshot.update({"provider_id": snap_id}) except exception.VolumeBackendAPIException: LOG.error("Failed to query new provider_id for snapshot " "%(snap_id)s of volume %(vol_id)s. " "Snapshot status will be changed to " "%(status)s.", { "vol_id": volume.id, "snap_id": snapshot.id, "status": fields.SnapshotStatus.ERROR, }) snapshot.update({"status": fields.SnapshotStatus.ERROR}) finally: snapshot.save() else: updates["replication_status"] = error_status return updates def _get_provisioning_and_compression(self, storage_type, protection_domain_name, storage_pool_name, secondary=False): """Get volume provisioning and compression from VolumeType extraspecs. :param storage_type: extraspecs :param protection_domain_name: name of PowerFlex Protection Domain :param storage_pool_name: name of PowerFlex Storage Pool :param secondary: primary or secondary client :return: volume provisioning and compression """ provisioning_type = storage_type.get(PROVISIONING_KEY) if provisioning_type is not None: if provisioning_type not in ("thick", "thin", "compressed"): msg = _("Illegal provisioning type. The supported " "provisioning types are 'thick', 'thin' " "or 'compressed'.") raise exception.VolumeBackendAPIException(data=msg) else: provisioning_type = self.provisioning_type provisioning = "ThinProvisioned" if (provisioning_type == "thick" and self._check_pool_support_thick_vols(protection_domain_name, storage_pool_name, secondary)): provisioning = "ThickProvisioned" compression = "None" if self._check_pool_support_compression(protection_domain_name, storage_pool_name, secondary): if provisioning_type == "compressed": compression = "Normal" return provisioning, compression def create_volume(self, volume): """Create volume on PowerFlex storage backend. :param volume: volume to be created :return: volume model updates """ client = self._get_client() self._check_volume_size(volume.size) protection_domain_name, storage_pool_name = ( self._extract_domain_and_pool_from_host(volume.host) ) self._check_volume_creation_safe(protection_domain_name, storage_pool_name) storage_type = self._get_volumetype_extraspecs(volume) LOG.info("Create volume %(vol_id)s. Volume type: %(volume_type)s, " "Storage Pool name: %(pool_name)s, Protection Domain name: " "%(domain_name)s.", { "vol_id": volume.id, "volume_type": storage_type, "pool_name": storage_pool_name, "domain_name": protection_domain_name, }) provisioning, compression = self._get_provisioning_and_compression( storage_type, protection_domain_name, storage_pool_name ) provider_id = client.create_volume(protection_domain_name, storage_pool_name, volume.id, volume.size, provisioning, compression) real_size = int(flex_utils.round_to_num_gran(volume.size)) model_updates = { "provider_id": provider_id, "size": real_size, "replication_status": fields.ReplicationStatus.DISABLED, } LOG.info("Successfully created volume %(vol_id)s. " "Volume size: %(size)s. PowerFlex volume name: %(vol_name)s, " "id: %(provider_id)s.", { "vol_id": volume.id, "size": real_size, "vol_name": flex_utils.id_to_base64(volume.id), "provider_id": provider_id, }) if volume.is_replicated(): self._setup_volume_replication(volume, provider_id) model_updates["replication_status"] = ( fields.ReplicationStatus.ENABLED ) return model_updates def _check_volume_size(self, size): """Check volume size to be multiple of 8GB. :param size: volume size in GB """ if size % 8 != 0: round_volume_capacity = ( self.configuration.powerflex_round_volume_capacity ) if not round_volume_capacity: msg = (_("Cannot create volume of size %s: " "not multiple of 8GB.") % size) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _check_volume_creation_safe(self, protection_domain_name, storage_pool_name, secondary=False): allowed = self._get_client(secondary).is_volume_creation_safe( protection_domain_name, storage_pool_name ) if not allowed: # Do not allow volume creation on this backend. # Volumes may leak data between tenants. LOG.error("Volume creation rejected due to " "zero padding being disabled for pool, %s:%s. " "This behaviour can be changed by setting " "the configuration option " "powerflex_allow_non_padded_volumes = True.", protection_domain_name, storage_pool_name) msg = _("Volume creation rejected due to " "unsafe backend configuration.") raise exception.VolumeBackendAPIException(data=msg) def create_snapshot(self, snapshot): """Create volume snapshot on PowerFlex storage backend. :param snapshot: volume snapshot to be created :return: snapshot model updates """ client = self._get_client() LOG.info("Create snapshot %(snap_id)s for volume %(vol_id)s.", {"snap_id": snapshot.id, "vol_id": snapshot.volume.id}) provider_id = client.snapshot_volume(snapshot.volume.provider_id, snapshot.id) model_updates = {"provider_id": provider_id} LOG.info("Successfully created snapshot %(snap_id)s " "for volume %(vol_id)s. PowerFlex volume name: %(vol_name)s, " "id: %(vol_provider_id)s, snapshot name: %(snap_name)s, " "snapshot id: %(snap_provider_id)s.", { "snap_id": snapshot.id, "vol_id": snapshot.volume.id, "vol_name": flex_utils.id_to_base64(snapshot.volume.id), "vol_provider_id": snapshot.volume.provider_id, "snap_name": flex_utils.id_to_base64(provider_id), "snap_provider_id": provider_id, }) if snapshot.volume.is_replicated(): self._setup_volume_replication(snapshot, provider_id) return model_updates def _create_volume_from_source(self, volume, source): """Create volume from volume or snapshot on PowerFlex storage backend. We interchange 'volume' and 'snapshot' because in PowerFlex snapshot is a volume: once a snapshot is generated it becomes a new unmapped volume in the system and the user may manipulate it in the same manner as any other volume exposed by the system. :param volume: volume to be created :param source: snapshot or volume from which volume will be created :return: volume model updates """ client = self._get_client() provider_id = client.snapshot_volume(source.provider_id, volume.id) model_updates = { "provider_id": provider_id, "replication_status": fields.ReplicationStatus.DISABLED, } LOG.info("Successfully created volume %(vol_id)s " "from source %(source_id)s. PowerFlex volume name: " "%(vol_name)s, id: %(vol_provider_id)s, source name: " "%(source_name)s, source id: %(source_provider_id)s.", { "vol_id": volume.id, "source_id": source.id, "vol_name": flex_utils.id_to_base64(volume.id), "vol_provider_id": provider_id, "source_name": flex_utils.id_to_base64(source.id), "source_provider_id": source.provider_id, }) try: # Snapshot object does not have 'size' attribute. source_size = source.volume_size except AttributeError: source_size = source.size if volume.size > source_size: real_size = flex_utils.round_to_num_gran(volume.size) client.extend_volume(provider_id, real_size) if volume.is_replicated(): self._setup_volume_replication(volume, provider_id) model_updates["replication_status"] = ( fields.ReplicationStatus.ENABLED ) return model_updates def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot on PowerFlex storage backend. :param volume: volume to be created :param snapshot: snapshot from which volume will be created :return: volume model updates """ LOG.info("Create volume %(vol_id)s from snapshot %(snap_id)s.", {"vol_id": volume.id, "snap_id": snapshot.id}) return self._create_volume_from_source(volume, snapshot) def extend_volume(self, volume, new_size): """Extend size of existing and available PowerFlex volume. This action will round up volume to nearest size that is granularity of 8 GBs. :param volume: volume to be extended :param new_size: volume size after extending """ LOG.info("Extend volume %(vol_id)s to size %(size)s.", {"vol_id": volume.id, "size": new_size}) volume_new_size = flex_utils.round_to_num_gran(new_size) volume_real_old_size = flex_utils.round_to_num_gran(volume.size) if volume_real_old_size == volume_new_size: return if volume.is_replicated(): pair_id, remote_pair_id, vol_id, remote_vol_id = ( self._get_client().get_volumes_pair_attrs("localVolumeId", volume.provider_id) ) self._get_client(secondary=True).extend_volume(remote_vol_id, volume_new_size) self._get_client().extend_volume(volume.provider_id, volume_new_size) def create_cloned_volume(self, volume, src_vref): """Create cloned volume on PowerFlex storage backend. :param volume: volume to be created :param src_vref: source volume from which volume will be cloned :return: volume model updates """ LOG.info("Clone volume %(vol_id)s to %(target_vol_id)s.", {"vol_id": src_vref.id, "target_vol_id": volume.id}) return self._create_volume_from_source(volume, src_vref) def delete_volume(self, volume): """Delete volume from PowerFlex storage backend. If volume is replicated, replication will be stopped first. :param volume: volume to be deleted """ LOG.info("Delete volume %s.", volume.id) if volume.is_replicated(): self._teardown_volume_replication(volume.provider_id) self._get_client().remove_volume(volume.provider_id) def delete_snapshot(self, snapshot): """Delete snapshot from PowerFlex storage backend. :param snapshot: snapshot to be deleted """ LOG.info("Delete snapshot %s.", snapshot.id) if snapshot.volume.is_replicated(): self._teardown_volume_replication(snapshot.provider_id) self._get_client().remove_volume(snapshot.provider_id) def initialize_connection(self, volume, connector, **kwargs): res = self._initialize_connection(volume, connector, volume.size) # TODO: Should probably be enabled for SSDs as well # It is recommended not to trim volumes that contain snapshots as the # logical capacity may not shrink. if self.provisioning_type == 'thin' and not len(volume.snapshots): res['data']['discard'] = True return res def _initialize_connection(self, vol_or_snap, connector, vol_size): """Initialize connection and return connection info. PowerFlex driver returns a driver_volume_type of 'scaleio'. """ try: sdc_guid = connector["sdc_guid"] except Exception: msg = "SDC guid is not configured." raise exception.InvalidHost(reason=msg) LOG.info("Initialize connection for %(vol_id)s to SDC %(sdc)s.", {"vol_id": vol_or_snap.id, "sdc": sdc_guid}) connection_properties = {} volume_name = flex_utils.id_to_base64(vol_or_snap.id) connection_properties["scaleIO_volname"] = volume_name connection_properties["scaleIO_volume_id"] = vol_or_snap.provider_id # map volume sdc_id = self._get_client().query_sdc_id_by_guid(sdc_guid) self._attach_volume_to_host(vol_or_snap, sdc_id) # verify volume is mapped self._check_volume_mapped(sdc_id, vol_or_snap.provider_id) if vol_size is not None: extra_specs = self._get_volumetype_extraspecs(vol_or_snap) qos_specs = self._get_volumetype_qos(vol_or_snap) storage_type = extra_specs.copy() storage_type.update(qos_specs) round_volume_size = flex_utils.round_to_num_gran(vol_size) iops_limit = self._get_iops_limit(round_volume_size, storage_type) bandwidth_limit = self._get_bandwidth_limit(round_volume_size, storage_type) LOG.info("IOPS limit: %s.", iops_limit) LOG.info("Bandwidth limit: %s.", bandwidth_limit) # Set QoS settings after map was performed if iops_limit is not None or bandwidth_limit is not None: self._get_client().set_sdc_limits(vol_or_snap.provider_id, sdc_id, bandwidth_limit, iops_limit) return { "driver_volume_type": "scaleio", "data": connection_properties, } def _attach_volume_to_host(self, volume, sdc_id): """Attach PowerFlex volume to host. :param volume: OpenStack volume object :param sdc_id: PowerFlex SDC id """ host = self._get_client().query_sdc_by_id(sdc_id) provider_id = volume.provider_id # check if volume is already attached to the host vol = self._get_client().query_volume(provider_id) if vol["mappedSdcInfo"]: ids = [sdc["sdcId"] for sdc in vol["mappedSdcInfo"]] if sdc_id in ids: LOG.debug("PowerFlex volume %(volume_name)s " "with id %(volume_id)s is already attached to " "host %(host_name)s. " "PowerFlex volume id: %(volume_provider_id)s, " "host id: %(host_provider_id)s. ", { "volume_name": volume.name, "volume_id": volume.id, "host_name": host["name"], "volume_provider_id": provider_id, "host_provider_id": sdc_id, }) return LOG.debug("Attach PowerFlex volume %(volume_name)s with id " "%(volume_id)s to host %(host_name)s. PowerFlex volume id: " "%(volume_provider_id)s, host id: %(host_provider_id)s.", { "volume_name": volume.name, "volume_id": volume.id, "host_name": host["name"], "volume_provider_id": provider_id, "host_provider_id": sdc_id, }) self._get_client().map_volume(provider_id, sdc_id) LOG.debug("Successfully attached PowerFlex volume %(volume_name)s " "with id %(volume_id)s to host %(host_name)s. " "PowerFlex volume id: %(volume_provider_id)s, " "host id: %(host_provider_id)s. ", { "volume_name": volume.name, "volume_id": volume.id, "host_name": host["name"], "volume_provider_id": provider_id, "host_provider_id": sdc_id, }) @utils.retry(exception.VolumeBackendAPIException, retries=3) def _check_volume_mapped(self, sdc_id, volume_id): mappedVols = self._get_client().query_sdc_volumes(sdc_id) if volume_id not in mappedVols: msg = f'Volume {volume_id} is not mapped to SDC {sdc_id}.' raise exception.VolumeBackendAPIException(msg) LOG.info("Volume %s is mapped to SDC %s.", volume_id, sdc_id) @staticmethod def _get_bandwidth_limit(size, storage_type): try: max_bandwidth = storage_type.get(QOS_BANDWIDTH_LIMIT) if max_bandwidth is not None: max_bandwidth = flex_utils.round_to_num_gran( int(max_bandwidth), units.Ki ) max_bandwidth = str(max_bandwidth) LOG.info("Max bandwidth: %s.", max_bandwidth) bw_per_gb = storage_type.get(QOS_BANDWIDTH_PER_GB) LOG.info("Bandwidth per GB: %s.", bw_per_gb) if bw_per_gb is None: return max_bandwidth # Since PowerFlex volumes size is in 8GB granularity # and BWS limitation is in 1024 KBs granularity, we need to make # sure that scaled_bw_limit is in 128 granularity. scaled_bw_limit = ( size * flex_utils.round_to_num_gran(int(bw_per_gb), MIN_BWS_SCALING_SIZE) ) if max_bandwidth is None or scaled_bw_limit < int(max_bandwidth): return str(scaled_bw_limit) else: return str(max_bandwidth) except ValueError: msg = _("None numeric BWS QoS limitation.") raise exception.InvalidInput(reason=msg) @staticmethod def _get_iops_limit(size, storage_type): max_iops = storage_type.get(QOS_IOPS_LIMIT_KEY) LOG.info("Max IOPS: %s.", max_iops) iops_per_gb = storage_type.get(QOS_IOPS_PER_GB) LOG.info("IOPS per GB: %s.", iops_per_gb) try: if iops_per_gb is None: if max_iops is not None: return str(max_iops) else: return None scaled_iops_limit = size * int(iops_per_gb) if max_iops is None or scaled_iops_limit < int(max_iops): return str(scaled_iops_limit) else: return str(max_iops) except ValueError: msg = _("None numeric IOPS QoS limitation.") raise exception.InvalidInput(reason=msg) def terminate_connection(self, volume, connector, **kwargs): self._terminate_connection(volume, connector) def _terminate_connection(self, volume_or_snap, connector): """Terminate connection to volume or snapshot. With PowerFlex, snaps and volumes are terminated identically. """ if connector is None: self._detach_volume_from_host(volume_or_snap) return try: sdc_guid = connector["sdc_guid"] except Exception: msg = "Host IP is not configured." raise exception.InvalidHost(reason=msg) LOG.info("Terminate connection for %(vol_id)s to SDC %(sdc)s.", {"vol_id": volume_or_snap.id, "sdc": sdc_guid}) if isinstance(volume_or_snap, Volume): is_multiattached = self._is_multiattached_to_host( volume_or_snap.volume_attachment, connector["host"] ) if is_multiattached: # Do not detach volume if it is attached to more than one # instance on the same host. LOG.info("Will not terminate connection for " "%(vol_id)s to initiator at %(sdc)s " "because it's multiattach.", {"vol_id": volume_or_snap.id, "sdc": sdc_guid}) return # unmap volume host_id = self._get_client().query_sdc_id_by_guid(sdc_guid) self._detach_volume_from_host(volume_or_snap, host_id) self._check_volume_unmapped(host_id, volume_or_snap.provider_id) LOG.info("Terminated connection for %(vol_id)s to SDC %(sdc)s.", {"vol_id": volume_or_snap.id, "sdc": sdc_guid}) @staticmethod def _is_multiattached_to_host(volume_attachment, host_name): """Check if volume is attached to multiple instances on one host. When multiattach is enabled, a volume could be attached to two or more instances which are hosted on one nova host. We should keep the volume attached to the nova host until the volume is detached from the last instance. :param volume_attachment: list of VolumeAttachment objects :param host_name: OpenStack host name :return: multiattach flag """ if not volume_attachment: return False attachments = [ attachment for attachment in volume_attachment if (attachment.attach_status == fields.VolumeAttachStatus.ATTACHED and attachment.attached_host == host_name) ] return len(attachments) > 1 def _detach_volume_from_host(self, volume, sdc_id=None): """Detach PowerFlex volume from nvme host. :param volume: OpenStack volume object :param sdc_id: PowerFlex SDC id """ provider_id = volume.provider_id vol = self._get_client().query_volume(provider_id) # check if volume is already detached if not vol["mappedSdcInfo"]: LOG.debug("PowerFlex volume %(volume_name)s " "with id %(volume_id)s is already detached from " "all hosts. " "PowerFlex volume id: %(volume_provider_id)s. ", { "volume_name": volume.name, "volume_id": volume.id, "volume_provider_id": provider_id, }) return if sdc_id: host = self._get_client().query_sdc_by_id(sdc_id) # check if volume is already detached from the host ids = [sdc["sdcId"] for sdc in vol["mappedSdcInfo"]] if sdc_id not in ids: LOG.debug("PowerFlex volume %(volume_name)s " "with id %(volume_id)s is already detached from " "host %(host_name)s. " "PowerFlex volume id: %(volume_provider_id)s, " "host id: %(host_provider_id)s. ", { "volume_name": volume.name, "volume_id": volume.id, "host_name": host["name"], "volume_provider_id": provider_id, "host_provider_id": sdc_id, }) return LOG.debug("Detach PowerFlex volume %(volume_name)s with id " "%(volume_id)s from host %(host_name)s. " "PowerFlex volume id: %(volume_provider_id)s, " "host id: %(host_provider_id)s.", { "volume_name": volume.name, "volume_id": volume.id, "host_name": host["name"], "volume_provider_id": provider_id, "host_provider_id": sdc_id, }) self._get_client().unmap_volume(provider_id, sdc_id) LOG.debug("Successfully detached PowerFlex volume %(volume_name)s " "with id %(volume_id)s from host %(host_name)s. " "PowerFlex volume id: %(volume_provider_id)s, " "host id: %(host_provider_id)s. ", { "volume_name": volume.name, "volume_id": volume.id, "host_name": host["name"], "volume_provider_id": provider_id, "host_provider_id": sdc_id, }) else: LOG.debug("Detach PowerFlex volume %(volume_name)s with id " "%(volume_id)s from all mapped hosts. " "PowerFlex volume id: %(volume_provider_id)s.", { "volume_name": volume.name, "volume_id": volume.id, "volume_provider_id": provider_id, }) self._get_client().unmap_volume(provider_id) LOG.debug("Successfully detached PowerFlex volume %(volume_name)s " "with id %(volume_id)s from all mapped hosts. " "PowerFlex volume id: %(volume_provider_id)s.", { "volume_name": volume.name, "volume_id": volume.id, "volume_provider_id": provider_id, }) @utils.retry(exception.VolumeBackendAPIException, retries=3) def _check_volume_unmapped(self, sdc_id, volume_id): mappedVols = self._get_client().query_sdc_volumes(sdc_id) if volume_id in mappedVols: msg = f'Volume {volume_id} is still mapped to SDC {sdc_id}.' raise exception.VolumeBackendAPIException(msg) LOG.info("Volume %s is unmapped from SDC %s.", volume_id, sdc_id) def _update_volume_stats(self): """Update storage backend driver statistics.""" stats = {} backend_name = self.configuration.safe_get("volume_backend_name") stats["volume_backend_name"] = backend_name or "powerflex" stats["vendor_name"] = "Dell EMC" stats["driver_version"] = self.VERSION stats["storage_protocol"] = constants.SCALEIO stats["reserved_percentage"] = 0 stats["QoS_support"] = True stats["consistent_group_snapshot_enabled"] = True stats["thick_provisioning_support"] = True stats["thin_provisioning_support"] = True stats["multiattach"] = True stats["replication_enabled"] = ( self.replication_enabled and not self._is_failed_over ) stats["replication_targets"] = self.replication_targets pools = [] backend_free_capacity = 0 backend_total_capacity = 0 backend_provisioned_capacity = 0 for sp_name in self.storage_pools: splitted_name = sp_name.split(":") domain_name = splitted_name[0] pool_name = splitted_name[1] total_capacity_gb, free_capacity_gb, provisioned_capacity = ( self._query_pool_stats(domain_name, pool_name) ) pool_support_thick_vols = self._check_pool_support_thick_vols( domain_name, pool_name ) pool_support_thin_vols = self._check_pool_support_thin_vols( domain_name, pool_name ) pool_support_compression = self._check_pool_support_compression( domain_name, pool_name ) pool = { "pool_name": sp_name, "total_capacity_gb": total_capacity_gb, "free_capacity_gb": free_capacity_gb, "QoS_support": True, "consistent_group_snapshot_enabled": True, "reserved_percentage": 0, "thin_provisioning_support": pool_support_thin_vols, "thick_provisioning_support": pool_support_thick_vols, "replication_enabled": stats["replication_enabled"], "replication_targets": stats["replication_targets"], "multiattach": True, "provisioned_capacity_gb": provisioned_capacity, "max_over_subscription_ratio": self.configuration.max_over_subscription_ratio, "compression_support": pool_support_compression, } pools.append(pool) backend_free_capacity += free_capacity_gb backend_total_capacity += total_capacity_gb backend_provisioned_capacity += provisioned_capacity stats["total_capacity_gb"] = backend_total_capacity stats["free_capacity_gb"] = backend_free_capacity stats["provisioned_capacity_gb"] = backend_provisioned_capacity LOG.info("Free capacity for backend '%(backend)s': %(free)s, " "total capacity: %(total)s, " "provisioned capacity: %(prov)s.", { "backend": stats["volume_backend_name"], "free": backend_free_capacity, "total": backend_total_capacity, "prov": backend_provisioned_capacity, }) stats["pools"] = pools # TODO: Should probably be enabled for SSDs as well stats['sparse_copy_volume'] = self.provisioning_type == 'thin' self._stats = stats def _query_pool_stats(self, domain_name, pool_name): """Get PowerFlex Storage Pool statistics. :param domain_name: name of PowerFlex Protection Domain :param pool_name: name of PowerFlex Storage Pool :return: total, free and provisioned capacity in GB """ client = self._get_client() url = "/types/StoragePool/instances/action/querySelectedStatistics" LOG.info("Query stats for Storage Pool %s.", pool_name) pool_id = client.get_storage_pool_id(domain_name, pool_name) props = self._get_queryable_statistics("StoragePool", pool_id) params = {"ids": [pool_id], "properties": props} r, response = client.execute_powerflex_post_request(url, params) if r.status_code != http_client.OK: msg = (_("Failed to query stats for Storage Pool %s.") % pool_name) raise exception.VolumeBackendAPIException(data=msg) # there is always exactly one value in response raw_pool_stats, = response.values() total_capacity_gb, free_capacity_gb, provisioned_capacity = ( self._compute_pool_stats(raw_pool_stats) ) LOG.info("Free capacity of Storage Pool %(domain)s:%(pool)s: " "%(free)s, total capacity: %(total)s, " "provisioned capacity: %(prov)s.", { "domain": domain_name, "pool": pool_name, "free": free_capacity_gb, "total": total_capacity_gb, "prov": provisioned_capacity, }) return total_capacity_gb, free_capacity_gb, provisioned_capacity def _compute_pool_stats(self, stats): client = self._get_client() if flex_utils.version_gte(client.query_rest_api_version(), "3.0"): return self._compute_pool_stats_v3(stats) # Divide by two because PowerFlex creates # a copy for each volume total_capacity_raw = flex_utils.convert_kb_to_gib( (stats["capacityLimitInKb"] - stats["spareCapacityInKb"]) / 2 ) total_capacity_gb = flex_utils.round_down_to_num_gran( total_capacity_raw ) # This property is already rounded # to 8 GB granularity in backend free_capacity_gb = flex_utils.convert_kb_to_gib( stats["capacityAvailableForVolumeAllocationInKb"] ) # some versions of the API had a typo in the response thin_capacity_allocated = stats.get("thinCapacityAllocatedInKm") if thin_capacity_allocated is None: thin_capacity_allocated = stats.get("thinCapacityAllocatedInKb", 0) # Divide by two because PowerFlex creates # a copy for each volume provisioned_capacity = flex_utils.convert_kb_to_gib( (stats["thickCapacityInUseInKb"] + stats["snapCapacityInUseInKb"] + thin_capacity_allocated) / 2 ) return total_capacity_gb, free_capacity_gb, provisioned_capacity @staticmethod def _compute_pool_stats_v3(stats): # in PowerFlex 3.5 snapCapacityInUseInKb is replaced by # snapshotCapacityInKb snap_capacity_allocated = stats.get("snapshotCapacityInKb") if snap_capacity_allocated is None: snap_capacity_allocated = stats.get("snapCapacityInUseInKb", 0) total_capacity_gb = flex_utils.convert_kb_to_gib( stats["netCapacityInUseInKb"] + stats["netUnusedCapacityInKb"] ) free_capacity_gb = flex_utils.convert_kb_to_gib( stats["netUnusedCapacityInKb"] ) provisioned_capacity_gb = flex_utils.convert_kb_to_gib( (stats["thickCapacityInUseInKb"] + snap_capacity_allocated + stats["thinCapacityAllocatedInKb"]) / 2 ) return total_capacity_gb, free_capacity_gb, provisioned_capacity_gb def _check_pool_support_thick_vols(self, domain_name, pool_name, secondary=False): # storage pools with fine granularity doesn't support # thick volumes return not self._is_fine_granularity_pool(domain_name, pool_name, secondary) def _check_pool_support_thin_vols(self, domain_name, pool_name, secondary=False): # thin volumes available since PowerFlex 2.x client = self._get_client(secondary) return flex_utils.version_gte(client.query_rest_api_version(), "2.0") def _check_pool_support_compression(self, domain_name, pool_name, secondary=False): # volume compression available only in storage pools # with fine granularity return self._is_fine_granularity_pool(domain_name, pool_name, secondary) def _is_fine_granularity_pool(self, domain_name, pool_name, secondary=False): client = self._get_client(secondary) if flex_utils.version_gte(client.query_rest_api_version(), "3.0"): r = client.get_storage_pool_properties(domain_name, pool_name) if r and "dataLayout" in r: return r["dataLayout"] == "FineGranularity" return False @staticmethod def _get_volumetype_extraspecs(volume): specs = {} ctxt = context.get_admin_context() type_id = volume["volume_type_id"] if type_id: volume_type = volume_types.get_volume_type(ctxt, type_id) specs = volume_type.get("extra_specs") for key, value in specs.items(): specs[key] = value return specs def _get_volumetype_qos(self, volume): qos = {} ctxt = context.get_admin_context() type_id = volume["volume_type_id"] if type_id: volume_type = volume_types.get_volume_type(ctxt, type_id) qos_specs_id = volume_type.get("qos_specs_id") if qos_specs_id is not None: specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)["specs"] else: specs = {} for key, value in specs.items(): if key in self.powerflex_qos_keys: qos[key] = value return qos def migrate_volume(self, ctxt, volume, host): """Migrate PowerFlex volume within the same backend.""" LOG.info("Migrate volume %(vol_id)s to %(host)s.", {"vol_id": volume.id, "host": host["host"]}) client = self._get_client() def fall_back_to_host_assisted(): LOG.debug("Falling back to host-assisted migration.") return False, None if volume.is_replicated(): msg = _("Migration of replicated volumes is not allowed.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Check migration between different backends src_backend = volume_utils.extract_host(volume.host, "backend") dst_backend = volume_utils.extract_host(host["host"], "backend") if src_backend != dst_backend: LOG.debug("Cross-backends migration is not supported " "by PowerFlex.") return fall_back_to_host_assisted() # Check migration is supported by storage API if not flex_utils.version_gte(client.query_rest_api_version(), "3.0"): LOG.debug("PowerFlex versions less than v3.0 do not " "support volume migration.") return fall_back_to_host_assisted() # Check storage pools compatibility src_pd, src_sp = self._extract_domain_and_pool_from_host(volume.host) dst_pd, dst_sp = self._extract_domain_and_pool_from_host(host["host"]) if not self._pools_compatible_for_migration(src_pd, src_sp, dst_pd, dst_sp): return fall_back_to_host_assisted() real_provisioning, vtree_id = ( self._get_real_provisioning_and_vtree(volume.provider_id) ) params = self._get_volume_migration_params(volume, dst_pd, dst_sp, real_provisioning) client.migrate_vtree(volume, params) try: self._wait_for_volume_migration_to_complete(vtree_id, volume.id) except loopingcall.LoopingCallTimeOut: # Volume migration is still in progress but timeout has expired. # Volume status is set to maintenance to prevent performing other # operations with volume. Migration status should be checked on the # storage side. If the migration successfully completed, volume # status should be manually changed to AVAILABLE. updates = { "status": fields.VolumeStatus.MAINTENANCE, } msg = (_("Migration of volume %s is still in progress " "but timeout has expired. Volume status is set to " "maintenance to prevent performing operations with this " "volume. Check the migration status " "on the storage side and set volume status manually if " "migration succeeded.") % volume.id) LOG.warning(msg) return True, updates return True, {} def _pools_compatible_for_migration(self, src_pd, src_sp, dst_pd, dst_sp): """Compare storage pools properties to determine migration possibility. Limitations: - For migration from Medium Granularity (MG) to Fine Granularity (FG) storage pool zero padding must be enabled on the MG pool. - For migration from MG to MG pool zero padding must be either enabled or disabled on both pools. """ client = self._get_client() src_zero_padding_enabled = client.is_volume_creation_safe(src_pd, src_sp) dst_zero_padding_enabled = client.is_volume_creation_safe(dst_pd, dst_sp) src_is_fg_pool = self._is_fine_granularity_pool(src_pd, src_sp) dst_is_fg_pool = self._is_fine_granularity_pool(dst_pd, dst_sp) if src_is_fg_pool: return True elif dst_is_fg_pool: if not src_zero_padding_enabled: LOG.debug("Migration from Medium Granularity storage pool " "with zero padding disabled to Fine Granularity one " "is not allowed.") return False return True elif not src_zero_padding_enabled == dst_zero_padding_enabled: LOG.debug("Zero padding must be either enabled or disabled on " "both storage pools.") return False return True def _get_real_provisioning_and_vtree(self, provider_id): """Get volume real provisioning type and vtree_id.""" response = self._get_client().query_volume(provider_id) try: provisioning = response["volumeType"] vtree_id = response["vtreeId"] return provisioning, vtree_id except KeyError: msg = (_("Query volume response does not contain " "required fields: volumeType and vtreeId.")) LOG.error(msg) raise exception.MalformedResponse( cmd="_get_real_provisioning_and_vtree", reason=msg ) def _get_volume_migration_params(self, volume, dst_domain_name, dst_pool_name, real_provisioning): client = self._get_client() dst_pool_id = client.get_storage_pool_id(dst_domain_name, dst_pool_name) params = { "destSPId": dst_pool_id, "volTypeConversion": "NoConversion", "compressionMethod": "None", "allowDuringRebuild": str( self.configuration.powerflex_allow_migration_during_rebuild ), } storage_type = self._get_volumetype_extraspecs(volume) provisioning, compression = self._get_provisioning_and_compression( storage_type, dst_domain_name, dst_pool_name ) pool_supports_thick_vols = self._check_pool_support_thick_vols( dst_domain_name, dst_pool_name ) if ( real_provisioning == "ThickProvisioned" and (provisioning == "ThinProvisioned" or not pool_supports_thick_vols) ): params["volTypeConversion"] = "ThickToThin" elif ( real_provisioning == "ThinProvisioned" and provisioning == "ThickProvisioned" and pool_supports_thick_vols ): params["volTypeConversion"] = "ThinToThick" params["compressionMethod"] = compression return params @utils.retry(exception.VolumeBackendAPIException, interval=5, backoff_rate=1, retries=3) def _wait_for_volume_migration_to_complete(self, vtree_id, vol_id): """Check volume migration status.""" LOG.debug("Wait for migration of volume %s to complete.", vol_id) def _inner(): response = self._get_client().query_vtree(vtree_id, vol_id) try: migration_status = ( response["vtreeMigrationInfo"]["migrationStatus"] ) migration_pause_reason = ( response["vtreeMigrationInfo"]["migrationPauseReason"] ) if ( migration_status == "NotInMigration" and not migration_pause_reason ): # Migration completed successfully. raise loopingcall.LoopingCallDone() elif migration_pause_reason: # Migration failed or paused on the storage side. # Volume remains on the source backend. msg = (_("Migration of volume %(vol_id)s failed or " "paused on the storage side. " "Migration status: %(status)s, " "pause reason: %(reason)s.") % {"vol_id": vol_id, "status": migration_status, "reason": migration_pause_reason}) LOG.error(msg) raise exception.VolumeMigrationFailed(msg) except KeyError: msg = (_("Check Migration status response does not contain " "required fields: migrationStatus and " "migrationPauseReason.")) LOG.error(msg) raise exception.MalformedResponse( cmd="_wait_for_volume_migration_to_complete", reason=msg ) timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(_inner) timer.start(interval=30, timeout=3600).wait() def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Update volume name of new PowerFlex volume to match updated ID. Original volume is renamed first since PowerFlex does not allow multiple volumes to have same name. """ client = self._get_client() name_id = None location = None if original_volume_status == fields.VolumeStatus.AVAILABLE: # During migration, a new volume is created and will replace # the original volume at the end of the migration. We need to # rename the new volume. The current_name of the new volume, # which is the id of the new volume, will be changed to the # new_name, which is the id of the original volume. current_name = new_volume.id new_name = volume.id vol_id = new_volume.id LOG.info("Rename volume %(vol_id)s from %(current_name)s to " "%(new_name)s.", { "vol_id": vol_id, "current_name": current_name, "new_name": new_name, }) # Original volume needs to be renamed first client.rename_volume(volume, "ff" + new_name) client.rename_volume(new_volume, new_name) LOG.info("Successfully renamed volume %(vol_id)s to %(new_name)s.", {"vol_id": vol_id, "new_name": new_name}) else: # The back-end will not be renamed. name_id = getattr(new_volume, "_name_id", None) or new_volume.id location = new_volume.provider_location return {"_name_id": name_id, "provider_location": location} def revert_to_snapshot(self, context, volume, snapshot): """Revert PowerFlex volume to the specified snapshot.""" LOG.info("Revert volume %(vol_id)s to snapshot %(snap_id)s.", {"vol_id": volume.id, "snap_id": snapshot.id}) client = self._get_client() if not flex_utils.version_gte(client.query_rest_api_version(), "3.0"): LOG.debug("PowerFlex versions less than v3.0 do not " "support reverting volume to snapshot. " "Falling back to generic revert to snapshot method.") raise NotImplementedError elif volume.is_replicated(): msg = _("Reverting replicated volume is not allowed.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) elif snapshot.volume_size != volume.size: msg = (_("Volume %(vol_id)s size is not equal to snapshot " "%(snap_id)s size. Revert to snapshot operation is not " "allowed.") % {"vol_id": volume.id, "snap_id": snapshot.id}) LOG.error(msg) raise exception.InvalidVolume(reason=msg) client.overwrite_volume_content(volume, snapshot) def _query_powerflex_volume(self, volume, existing_ref): type_id = volume.get("volume_type_id") if "source-id" not in existing_ref: reason = _("Reference must contain source-id.") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason ) if type_id is None: reason = _("Volume must have a volume type.") raise exception.ManageExistingVolumeTypeMismatch( existing_ref=existing_ref, reason=reason ) vol_id = existing_ref["source-id"] LOG.info("Query volume %(vol_id)s with PowerFlex id %(provider_id)s.", {"vol_id": volume.id, "provider_id": vol_id}) response = self._get_client().query_volume(vol_id) self._manage_existing_check_legal_response(response, existing_ref) return response def _get_all_powerflex_volumes(self): """Get all volumes in configured PowerFlex Storage Pools.""" client = self._get_client() url = ("/instances/StoragePool::%(storage_pool_id)s" "/relationships/Volume") all_volumes = [] # check for every storage pool configured for sp_name in self.storage_pools: splitted_name = sp_name.split(":") domain_name = splitted_name[0] pool_name = splitted_name[1] sp_id = client.get_storage_pool_id(domain_name, pool_name) r, volumes = client.execute_powerflex_get_request( url, storage_pool_id=sp_id ) if r.status_code != http_client.OK: msg = (_("Failed to query volumes in Storage Pool " "%(pool_name)s of Protection Domain " "%(domain_name)s.") % {"pool_name": pool_name, "domain_name": domain_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) all_volumes.extend(volumes) return all_volumes def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on storage backend available for management by Cinder. Rule out volumes that are mapped to SDC or are already in list of cinder_volumes. Return references of volume ids for any others. """ all_sio_volumes = self._get_all_powerflex_volumes() # Put together a map of existing cinder volumes on the array # so we can lookup cinder id's to SIO id existing_vols = {} for cinder_vol in cinder_volumes: provider_id = cinder_vol.provider_id existing_vols[provider_id] = cinder_vol.name_id manageable_volumes = [] for sio_vol in all_sio_volumes: cinder_id = existing_vols.get(sio_vol["id"]) is_safe = True reason = None if sio_vol["mappedSdcInfo"]: is_safe = False hosts_connected = len(sio_vol["mappedSdcInfo"]) reason = _("Volume mapped to %d host(s).") % hosts_connected if cinder_id: is_safe = False reason = _("Volume already managed.") if sio_vol["volumeType"] != "Snapshot": manageable_volumes.append( { "reference": { "source-id": sio_vol["id"], }, "size": flex_utils.convert_kb_to_gib( sio_vol["sizeInKb"] ), "safe_to_manage": is_safe, "reason_not_safe": reason, "cinder_id": cinder_id, "extra_info": { "volumeType": sio_vol["volumeType"], "name": sio_vol["name"], }, }) return volume_utils.paginate_entries_list(manageable_volumes, marker, limit, offset, sort_keys, sort_dirs) def _is_managed(self, volume_id): lst = objects.VolumeList.get_all_by_host(context.get_admin_context(), self.host) for vol in lst: if vol.provider_id == volume_id: return True return False def manage_existing(self, volume, existing_ref): """Manage existing PowerFlex volume. :param volume: volume to be managed :param existing_ref: dictionary of form {'source-id': 'id of PowerFlex volume'} """ response = self._query_powerflex_volume(volume, existing_ref) return {"provider_id": response["id"]} def manage_existing_get_size(self, volume, existing_ref): return self._get_volume_size(volume, existing_ref) def manage_existing_snapshot(self, snapshot, existing_ref): """Manage existing PowerFlex snapshot. :param snapshot: snapshot to be managed :param existing_ref: dictionary of form {'source-id': 'id of PowerFlex snapshot'} """ response = self._query_powerflex_volume(snapshot, existing_ref) not_real_parent = (response.get("orig_parent_overriden") or response.get("is_source_deleted")) if not_real_parent: reason = (_("Snapshot's parent is not original parent due " "to deletion or revert action, therefore " "this snapshot cannot be managed.")) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason ) ancestor_id = response["ancestorVolumeId"] volume_id = snapshot.volume.provider_id if ancestor_id != volume_id: reason = (_("Snapshot's parent in PowerFlex is %(ancestor_id)s " "and not %(vol_id)s.") % {"ancestor_id": ancestor_id, "vol_id": volume_id}) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason ) return {"provider_id": response["id"]} def manage_existing_snapshot_get_size(self, snapshot, existing_ref): return self._get_volume_size(snapshot, existing_ref) def _get_volume_size(self, volume, existing_ref): response = self._query_powerflex_volume(volume, existing_ref) return int(math.ceil(float(response["sizeInKb"]) / units.Mi)) def _manage_existing_check_legal_response(self, response, existing_ref): # check if it is already managed if self._is_managed(response["id"]): reason = _("Failed to manage volume. Volume is already managed.") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason ) if response["mappedSdcInfo"] is not None: reason = _("Failed to manage volume. " "Volume is connected to hosts. " "Please disconnect volume from existing hosts " "before importing.") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason ) def create_group(self, context, group): """Create Consistency Group. PowerFlex won't create CG until cg-snapshot creation, db will maintain the volumes and CG relationship. """ # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() LOG.info("Create Consistency Group %s.", group.id) model_updates = {"status": fields.GroupStatus.AVAILABLE} return model_updates def delete_group(self, context, group, volumes): """Delete Consistency Group. PowerFlex will delete volumes of CG. """ # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() LOG.info("Delete Consistency Group %s.", group.id) model_updates = {"status": fields.GroupStatus.DELETED} error_statuses = [ fields.GroupStatus.ERROR, fields.GroupStatus.ERROR_DELETING, ] volume_model_updates = [] for volume in volumes: update_item = {"id": volume.id} try: self.delete_volume(volume) update_item["status"] = "deleted" except exception.VolumeBackendAPIException: update_item["status"] = fields.VolumeStatus.ERROR_DELETING if model_updates["status"] not in error_statuses: model_updates["status"] = fields.GroupStatus.ERROR_DELETING LOG.error("Failed to delete volume %(vol_id)s of " "group %(group_id)s.", {"vol_id": volume.id, "group_id": group.id}) volume_model_updates.append(update_item) return model_updates, volume_model_updates def create_group_snapshot(self, context, group_snapshot, snapshots): """Create Consistency Group snapshot.""" # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() snapshot_model_updates = [] for snapshot in snapshots: update_item = self.create_snapshot(snapshot) update_item["id"] = snapshot.id update_item["status"] = fields.SnapshotStatus.AVAILABLE snapshot_model_updates.append(update_item) model_updates = {"status": fields.GroupStatus.AVAILABLE} return model_updates, snapshot_model_updates def delete_group_snapshot(self, context, group_snapshot, snapshots): """Delete Consistency Group snapshot.""" # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() LOG.info("Delete Consistency Group Snapshot %s.", group_snapshot.id) model_updates = {"status": fields.SnapshotStatus.DELETED} error_statuses = [ fields.SnapshotStatus.ERROR, fields.SnapshotStatus.ERROR_DELETING, ] snapshot_model_updates = [] for snapshot in snapshots: update_item = {"id": snapshot.id} try: self.delete_snapshot(snapshot) update_item["status"] = fields.SnapshotStatus.DELETED except exception.VolumeBackendAPIException: update_item["status"] = fields.SnapshotStatus.ERROR_DELETING if model_updates["status"] not in error_statuses: model_updates["status"] = ( fields.SnapshotStatus.ERROR_DELETING ) LOG.error("Failed to delete snapshot %(snap_id)s " "of group snapshot %(group_snap_id)s.", { "snap_id": snapshot.id, "group_snap_id": group_snapshot.id, }) snapshot_model_updates.append(update_item) return model_updates, snapshot_model_updates def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Create Consistency Group from source.""" # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() if group_snapshot and snapshots: sources = snapshots else: sources = source_vols volume_model_updates = [] for source, volume in zip(sources, volumes): update_item = self.create_cloned_volume(volume, source) update_item["id"] = volume.id update_item["status"] = fields.VolumeStatus.AVAILABLE volume_model_updates.append(update_item) model_updates = {"status": fields.GroupStatus.AVAILABLE} return model_updates, volume_model_updates def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Update Consistency Group. PowerFlex does not handle volume grouping. Cinder maintains volumes and CG relationship. """ if volume_utils.is_group_a_cg_snapshot_type(group): return None, None, None # we'll rely on the generic group implementation if it is not a # consistency group request. raise NotImplementedError() def ensure_export(self, context, volume): """Driver entry point to get export info for existing volume.""" pass def create_export(self, context, volume, connector): """Driver entry point to get export info for new volume.""" pass def remove_export(self, context, volume): """Driver entry point to remove export for volume.""" pass def check_for_export(self, context, volume_id): """Make sure volume is exported.""" pass def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Initialize connection and return connection info.""" try: vol_size = snapshot.volume_size except Exception: vol_size = None return self._initialize_connection(snapshot, connector, vol_size) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Terminate connection to snapshot.""" return self._terminate_connection(snapshot, connector) def create_export_snapshot(self, context, volume, connector): """Driver entry point to get export info for snapshot.""" pass def remove_export_snapshot(self, context, volume): """Driver entry point to remove export for snapshot.""" pass def backup_use_temp_snapshot(self): return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerflex/options.py0000664000175000017500000001731300000000000025151 0ustar00zuulzuul00000000000000# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Configuration options for Dell EMC PowerFlex (formerly named Dell EMC VxFlex OS). """ from oslo_config import cfg from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils # deprecated options VXFLEXOS_REST_SERVER_PORT = "vxflexos_rest_server_port" VXFLEXOS_ROUND_VOLUME_CAPACITY = "vxflexos_round_volume_capacity" VXFLEXOS_UNMAP_VOLUME_BEFORE_DELETION = "vxflexos_unmap_volume_before_deletion" VXFLEXOS_STORAGE_POOLS = "vxflexos_storage_pools" VXFLEXOS_SERVER_API_VERSION = "vxflexos_server_api_version" VXFLEXOS_MAX_OVER_SUBSCRIPTION_RATIO = "vxflexos_max_over_subscription_ratio" VXFLEXOS_ALLOW_NON_PADDED_VOLUMES = "vxflexos_allow_non_padded_volumes" VXFLEXOS_ALLOW_MIGRATION_DURING_REBUILD = ( "vxflexos_allow_migration_during_rebuild") # actual options POWERFLEX_REST_SERVER_PORT = "powerflex_rest_server_port" POWERFLEX_ROUND_VOLUME_CAPACITY = "powerflex_round_volume_capacity" POWERFLEX_UNMAP_VOLUME_BEFORE_DELETION = ( "powerflex_unmap_volume_before_deletion") POWERFLEX_STORAGE_POOLS = "powerflex_storage_pools" POWERFLEX_SERVER_API_VERSION = "powerflex_server_api_version" POWERFLEX_MAX_OVER_SUBSCRIPTION_RATIO = "powerflex_max_over_subscription_ratio" POWERFLEX_ALLOW_NON_PADDED_VOLUMES = "powerflex_allow_non_padded_volumes" POWERFLEX_ALLOW_MIGRATION_DURING_REBUILD = ( "powerflex_allow_migration_during_rebuild") deprecated_opts = [ cfg.PortOpt(VXFLEXOS_REST_SERVER_PORT, default=443, help='renamed to %s.' % POWERFLEX_REST_SERVER_PORT, deprecated_for_removal=True, deprecated_reason='Replaced by %s.' % POWERFLEX_REST_SERVER_PORT), cfg.BoolOpt(VXFLEXOS_ROUND_VOLUME_CAPACITY, default=True, help='renamed to %s.' % POWERFLEX_ROUND_VOLUME_CAPACITY, deprecated_for_removal=True, deprecated_reason='Replaced by %s.' % POWERFLEX_ROUND_VOLUME_CAPACITY), cfg.BoolOpt(VXFLEXOS_UNMAP_VOLUME_BEFORE_DELETION, default=False, help='renamed to %s.' % POWERFLEX_ROUND_VOLUME_CAPACITY, deprecated_for_removal=True, deprecated_reason='Replaced by %s.' % POWERFLEX_ROUND_VOLUME_CAPACITY), cfg.StrOpt(VXFLEXOS_STORAGE_POOLS, help='renamed to %s.' % POWERFLEX_STORAGE_POOLS, deprecated_for_removal=True, deprecated_reason='Replaced by %s.' % POWERFLEX_STORAGE_POOLS), cfg.StrOpt(VXFLEXOS_SERVER_API_VERSION, help='renamed to %s.' % POWERFLEX_SERVER_API_VERSION, deprecated_for_removal=True, deprecated_reason='Replaced by %s.' % POWERFLEX_SERVER_API_VERSION), cfg.FloatOpt(VXFLEXOS_MAX_OVER_SUBSCRIPTION_RATIO, # This option exists to provide a default value for the # PowerFlex driver which is different than the global default. default=10.0, help='renamed to %s.' % POWERFLEX_MAX_OVER_SUBSCRIPTION_RATIO, deprecated_for_removal=True, deprecated_reason='Replaced by %s.' % POWERFLEX_MAX_OVER_SUBSCRIPTION_RATIO), cfg.BoolOpt(VXFLEXOS_ALLOW_NON_PADDED_VOLUMES, default=False, help='renamed to %s.' % POWERFLEX_ALLOW_NON_PADDED_VOLUMES, deprecated_for_removal=True, deprecated_reason='Replaced by %s.' % POWERFLEX_ALLOW_NON_PADDED_VOLUMES), cfg.BoolOpt(VXFLEXOS_ALLOW_MIGRATION_DURING_REBUILD, default=False, help='renamed to %s.' % POWERFLEX_ALLOW_MIGRATION_DURING_REBUILD, deprecated_for_removal=True, deprecated_reason='Replaced by %s.' % POWERFLEX_ALLOW_MIGRATION_DURING_REBUILD), ] actual_opts = [ cfg.PortOpt(POWERFLEX_REST_SERVER_PORT, default=443, help='Gateway REST server port.', deprecated_name=VXFLEXOS_REST_SERVER_PORT), cfg.BoolOpt(POWERFLEX_ROUND_VOLUME_CAPACITY, default=True, help='Round volume sizes up to 8GB boundaries. ' 'PowerFlex/VxFlex OS requires volumes to be sized ' 'in multiples of 8GB. If set to False, volume ' 'creation will fail for volumes not sized properly', deprecated_name=VXFLEXOS_ROUND_VOLUME_CAPACITY ), cfg.BoolOpt(POWERFLEX_UNMAP_VOLUME_BEFORE_DELETION, default=False, help='Unmap volumes before deletion.', deprecated_name=VXFLEXOS_UNMAP_VOLUME_BEFORE_DELETION), cfg.StrOpt(POWERFLEX_STORAGE_POOLS, help='Storage Pools. Comma separated list of storage ' 'pools used to provide volumes. Each pool should ' 'be specified as a ' 'protection_domain_name:storage_pool_name value', deprecated_name=VXFLEXOS_STORAGE_POOLS), cfg.StrOpt(POWERFLEX_SERVER_API_VERSION, help='PowerFlex/ScaleIO API version. This value should be ' 'left as the default value unless otherwise instructed ' 'by technical support.', deprecated_name=VXFLEXOS_SERVER_API_VERSION), cfg.FloatOpt(POWERFLEX_MAX_OVER_SUBSCRIPTION_RATIO, # This option exists to provide a default value for the # PowerFlex driver which is different than the global default. default=10.0, help='max_over_subscription_ratio setting for the driver. ' 'Maximum value allowed is 10.0.', deprecated_name=VXFLEXOS_MAX_OVER_SUBSCRIPTION_RATIO), cfg.BoolOpt(POWERFLEX_ALLOW_NON_PADDED_VOLUMES, default=False, help='Allow volumes to be created in Storage Pools ' 'when zero padding is disabled. This option should ' 'not be enabled if multiple tenants will utilize ' 'volumes from a shared Storage Pool.', deprecated_name=VXFLEXOS_ALLOW_NON_PADDED_VOLUMES), cfg.BoolOpt(POWERFLEX_ALLOW_MIGRATION_DURING_REBUILD, default=False, help='Allow volume migration during rebuild.', deprecated_name=VXFLEXOS_ALLOW_MIGRATION_DURING_REBUILD), cfg.IntOpt(flex_utils.POWERFLEX_REST_CONNECT_TIMEOUT, default=30, min=1, help='Use this value to specify connect ' 'timeout value (in seconds) for rest call.'), cfg.IntOpt(flex_utils.POWERFLEX_REST_READ_TIMEOUT, default=30, min=1, help='Use this value to specify read ' 'timeout value (in seconds) for rest call.') ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerflex/rest_client.py0000664000175000017500000011012500000000000025764 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as http_client import json import re import urllib.parse from oslo_log import log as logging from oslo_utils import units import requests from requests.exceptions import Timeout from cinder import exception from cinder.i18n import _ from cinder.utils import retry from cinder.volume.drivers.dell_emc.powerflex import simplecache from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils LOG = logging.getLogger(__name__) VOLUME_MIGRATION_IN_PROGRESS_ERROR = 717 VOLUME_MIGRATION_ALREADY_ON_DESTINATION_POOL_ERROR = 718 VOLUME_NOT_FOUND_ERROR = 79 OLD_VOLUME_NOT_FOUND_ERROR = 78 TOO_MANY_SNAPS_ERROR = 182 ILLEGAL_SYNTAX = 0 MAX_SNAPS_IN_VTREE = 126 class RestClient(object): def __init__(self, configuration, is_primary=True): self.configuration = configuration self.is_primary = is_primary self.spCache = simplecache.SimpleCache("Storage Pool", age_minutes=5) self.pdCache = simplecache.SimpleCache("Protection Domain", age_minutes=5) self.rcgCache = simplecache.SimpleCache("Replication CG", age_minutes=5) self.rest_ip = None self.rest_port = None self.rest_username = None self.rest_password = None self.rest_token = None self.rest_api_version = None self.verify_certificate = None self.certificate_path = None self.base_url = None self.is_configured = False self.rest_api_connect_timeout = 30 self.rest_api_read_timeout = 30 @staticmethod def _get_headers(): return {"content-type": "application/json"} def do_setup(self): if self.is_primary: get_config_value = self.configuration.safe_get else: replication_targets = self.configuration.safe_get( "replication_device" ) if not replication_targets: return elif len(replication_targets) > 1: msg = _("PowerFlex does not support more than one " "replication backend.") raise exception.InvalidInput(reason=msg) get_config_value = replication_targets[0].get self.verify_certificate = bool( get_config_value("sio_verify_server_certificate") or get_config_value("driver_ssl_cert_verify") ) self.rest_ip = get_config_value("san_ip") self.rest_port = int( get_config_value("powerflex_rest_server_port") or get_config_value("sio_rest_server_port") or 443 ) self.rest_username = get_config_value("san_login") self.rest_password = get_config_value("san_password") self.rest_api_connect_timeout = ( get_config_value(flex_utils.POWERFLEX_REST_CONNECT_TIMEOUT) or self.rest_api_connect_timeout) self.rest_api_read_timeout = ( get_config_value(flex_utils.POWERFLEX_REST_READ_TIMEOUT) or self.rest_api_read_timeout) if self.verify_certificate: self.certificate_path = ( get_config_value("sio_server_certificate_path") or get_config_value("driver_ssl_cert_path") ) if not all([self.rest_ip, self.rest_username, self.rest_password]): msg = _("REST server IP, username and password must be specified.") raise exception.InvalidInput(reason=msg) # validate certificate settings if self.verify_certificate and not self.certificate_path: msg = _("Path to REST server's certificate must be specified.") raise exception.InvalidInput(reason=msg) # log warning if not using certificates if not self.verify_certificate: LOG.warning("Verify certificate is not set, using default of " "False.") self.base_url = ("https://%(server_ip)s:%(server_port)s/api" % { "server_ip": self.rest_ip, "server_port": self.rest_port }) LOG.info("REST server IP: %(ip)s, port: %(port)s, " "username: %(user)s, rest connect timeout: " "%(rest_api_connect_timeout)s, rest read timeout: " "%(rest_api_read_timeout)s. Verify server's certificate: " "%(verify_cert)s.", { "ip": self.rest_ip, "port": self.rest_port, "user": self.rest_username, "verify_cert": self.verify_certificate, "rest_api_connect_timeout": self.rest_api_connect_timeout, "rest_api_read_timeout": self.rest_api_read_timeout }) self.is_configured = True def query_rest_api_version(self, fromcache=True): url = "/version" if self.rest_api_version is None or fromcache is False: r, unused = self.execute_powerflex_get_request(url) if r.status_code == http_client.OK: self.rest_api_version = r.text.replace('\"', "") LOG.info("REST API Version: %(api_version)s.", {"api_version": self.rest_api_version}) else: msg = (_("Failed to query REST API version. " "Status code: %d.") % r.status_code) raise exception.VolumeBackendAPIException(data=msg) # make sure the response was valid pattern = re.compile(r"^\d+(\.\d+)*$") if not pattern.match(self.rest_api_version): msg = (_("Failed to query REST API version. Response: %s.") % r.text) raise exception.VolumeBackendAPIException(data=msg) return self.rest_api_version def query_volume(self, vol_id): url = "/instances/Volume::%(vol_id)s" r, response = self.execute_powerflex_get_request(url, vol_id=vol_id) if r.status_code != http_client.OK and "errorCode" in response: msg = (_("Failed to query volume: %s.") % response["message"]) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response def create_volume(self, protection_domain_name, storage_pool_name, volume_id, volume_size, provisioning, compression): url = "/types/Volume/instances" domain_id = self._get_protection_domain_id(protection_domain_name) LOG.info("Protection Domain id: %s.", domain_id) pool_id = self.get_storage_pool_id(protection_domain_name, storage_pool_name) LOG.info("Storage Pool id: %s.", pool_id) volume_name = flex_utils.id_to_base64(volume_id) # units.Mi = 1024 ** 2 volume_size_kb = volume_size * units.Mi params = { "protectionDomainId": domain_id, "storagePoolId": pool_id, "name": volume_name, "volumeType": provisioning, "volumeSizeInKb": str(volume_size_kb), "compressionMethod": compression, } r, response = self.execute_powerflex_post_request(url, params) if r.status_code != http_client.OK and "errorCode" in response: msg = (_("Failed to create volume: %s.") % response["message"]) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response["id"] def snapshot_volume(self, volume_provider_id, snapshot_id): url = "/instances/System/action/snapshotVolumes" snap_name = flex_utils.id_to_base64(snapshot_id) params = { "snapshotDefs": [ { "volumeId": volume_provider_id, "snapshotName": snap_name, }, ], } r, response = self.execute_powerflex_post_request(url, params) if r.status_code != http_client.OK and "errorCode" in response: msg = (_("Failed to create snapshot for volume %(vol_name)s: " "%(response)s.") % {"vol_name": volume_provider_id, "response": response["message"]}) LOG.error(msg) # check if the volume reached snapshot limit if ("details" in response and response["details"][0]["rc"] == TOO_MANY_SNAPS_ERROR): raise exception.SnapshotLimitReached( set_limit=MAX_SNAPS_IN_VTREE) raise exception.VolumeBackendAPIException(data=msg) return response["volumeIdList"][0] def _get_replication_cg_id_by_name(self, rcg_name): url = ("/types/ReplicationConsistencyGroup/instances" "/action/queryIdByKey") if not rcg_name: msg = _("Unable to query Replication CG id with None name.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) cached_val = self.rcgCache.get_value(rcg_name) if cached_val is not None: return cached_val encoded_rcg_name = urllib.parse.quote(rcg_name, "") params = {"name": encoded_rcg_name} r, rcg_id = self.execute_powerflex_post_request(url, params) if not rcg_id: msg = (_("Replication CG with name %s wasn't found.") % rcg_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if r.status_code != http_client.OK and "errorCode" in rcg_id: msg = (_("Failed to get Replication CG id with name " "%(name)s: %(message)s.") % {"name": rcg_name, "message": rcg_id["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info("Replication CG id: %s.", rcg_id) self.rcgCache.update(rcg_name, rcg_id) return rcg_id def _query_volumes_pair(self, pair_id): url = "/instances/ReplicationPair::%(pair_id)s" r, response = self.execute_powerflex_get_request(url, pair_id=pair_id) if r.status_code != http_client.OK and "errorCode" in response: msg = (_("Failed to query volumes pair %(pair_id)s: %(err)s.") % {"pair_id": pair_id, "err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response def _query_replication_pairs(self): url = "/types/ReplicationPair/instances" r, response = self.execute_powerflex_get_request(url) if r.status_code != http_client.OK and "errorCode" in response: msg = (_("Failed to query replication pairs: %s.") % response["message"]) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response @staticmethod def _filter_replication_pairs(replication_pairs, filter_key, filter_value): try: return next(filter(lambda pair: pair[filter_key] == filter_value, replication_pairs)) except StopIteration: msg = (_("Volume pair for volume with id %s is not found.") % filter_value) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_volumes_pair_attrs(self, filter_key, filter_value): replication_pairs = self._query_replication_pairs() founded = self._filter_replication_pairs(replication_pairs, filter_key, filter_value) pair_id = founded["id"] remote_pair_id = founded["remoteId"] vol_provider_id = founded["localVolumeId"] remote_vol_provider_id = founded["remoteVolumeId"] return pair_id, remote_pair_id, vol_provider_id, remote_vol_provider_id def create_volumes_pair(self, rcg_name, source_provider_id, dest_provider_id): url = "/types/ReplicationPair/instances" rcg_id = self._get_replication_cg_id_by_name(rcg_name) params = { "name": source_provider_id, "replicationConsistencyGroupId": rcg_id, "copyType": "OnlineCopy", "sourceVolumeId": source_provider_id, "destinationVolumeId": dest_provider_id, } r, response = self.execute_powerflex_post_request(url, params, ) if r.status_code != http_client.OK and "errorCode" in response: msg = (_("Failed to create volumes pair: %s.") % response["message"]) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) replication_pair = self._query_volumes_pair(response["id"]) LOG.info("Created volumes pair %(vol_pair_id)s. " "Remote pair %(remote_pair_id)s.", { "vol_pair_id": replication_pair["id"], "remote_pair_id": replication_pair["remoteId"], }) return replication_pair["id"], replication_pair["remoteId"] def remove_volumes_pair(self, vol_pair_id): url = ("/instances/ReplicationPair::%(vol_pair_id)s/action" "/removeReplicationPair") r, response = self.execute_powerflex_post_request( url, vol_pair_id=vol_pair_id ) if r.status_code != http_client.OK: msg = (_("Failed to delete volumes pair " "%(vol_pair_id)s: %(err)s.") % {"vol_pair_id": vol_pair_id, "err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _get_protection_domain_id_by_name(self, domain_name): url = "/types/Domain/instances/getByName::%(encoded_domain_name)s" if not domain_name: msg = _("Unable to query Protection Domain id with None name.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) encoded_domain_name = urllib.parse.quote(domain_name, "") r, domain_id = self.execute_powerflex_get_request( url, encoded_domain_name=encoded_domain_name ) if not domain_id: msg = (_("Prorection Domain with name %s wasn't found.") % domain_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if r.status_code != http_client.OK and "errorCode" in domain_id: msg = (_("Failed to get Protection Domain id with name " "%(name)s: %(err_msg)s.") % {"name": domain_name, "err_msg": domain_id["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info("Protection Domain id: %s.", domain_id) return domain_id def _get_protection_domain_id(self, domain_name): response = self._get_protection_domain_properties(domain_name) if response is None: return None return response["id"] def _get_protection_domain_properties(self, domain_name): url = "/instances/ProtectionDomain::%(domain_id)s" cached_val = self.pdCache.get_value(domain_name) if cached_val is not None: return cached_val domain_id = self._get_protection_domain_id_by_name(domain_name) r, response = self.execute_powerflex_get_request( url, domain_id=domain_id ) if r.status_code != http_client.OK: msg = (_("Failed to get domain properties from id %(domain_id)s: " "%(err_msg)s.") % {"domain_id": domain_id, "err_msg": response}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self.pdCache.update(domain_name, response) return response def _get_storage_pool_id_by_name(self, domain_name, pool_name): url = ("/types/Pool/instances/getByName::" "%(domain_id)s,%(encoded_pool_name)s") if not domain_name or not pool_name: msg = (_("Unable to query storage pool id for " "Pool %(pool_name)s and Domain %(domain_name)s.") % {"pool_name": pool_name, "domain_name": domain_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) domain_id = self._get_protection_domain_id(domain_name) encoded_pool_name = urllib.parse.quote(pool_name, "") r, pool_id = self.execute_powerflex_get_request( url, domain_id=domain_id, encoded_pool_name=encoded_pool_name ) if not pool_id: msg = (_("Pool with name %(pool_name)s wasn't found in " "domain %(domain_id)s.") % {"pool_name": pool_name, "domain_id": domain_id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if r.status_code != http_client.OK and "errorCode" in pool_id: msg = (_("Failed to get pool id from name %(pool_name)s: " "%(err_msg)s.") % {"pool_name": pool_name, "err_msg": pool_id["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info("Pool id: %s.", pool_id) return pool_id def get_storage_pool_properties(self, domain_name, pool_name): url = "/instances/StoragePool::%(pool_id)s" fullname = "{}:{}".format(domain_name, pool_name) cached_val = self.spCache.get_value(fullname) if cached_val is not None: return cached_val pool_id = self._get_storage_pool_id_by_name(domain_name, pool_name) r, response = self.execute_powerflex_get_request(url, pool_id=pool_id) if r.status_code != http_client.OK: msg = (_("Failed to get pool properties from id %(pool_id)s: " "%(err_msg)s.") % {"pool_id": pool_id, "err_msg": response}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self.spCache.update(fullname, response) return response def get_storage_pool_id(self, domain_name, pool_name): response = self.get_storage_pool_properties(domain_name, pool_name) if response is None: return None return response["id"] def _get_verify_cert(self): verify_cert = False if self.verify_certificate: verify_cert = self.certificate_path return verify_cert def execute_powerflex_get_request(self, url, **url_params): r = requests.Response try: request = self.base_url + url % url_params timeout = (self.rest_api_connect_timeout, self.rest_api_read_timeout) r = requests.get(request, auth=(self.rest_username, self.rest_token), verify=self._get_verify_cert(), timeout=timeout) r = self._check_response(r, request) response = r.json() except Timeout as e: r.status_code = http_client.INTERNAL_SERVER_ERROR msg = _("The request to URL %s failed with timeout " "exception %s" % (url, str(e))) LOG.error(msg) response = {'errorCode': http_client.INTERNAL_SERVER_ERROR, 'message': msg} return r, response def execute_powerflex_post_request(self, url, params=None, **url_params): r = requests.Response try: if not params: params = {} request = self.base_url + url % url_params timeout = (self.rest_api_connect_timeout, self.rest_api_read_timeout) r = requests.post(request, data=json.dumps(params), headers=self._get_headers(), auth=(self.rest_username, self.rest_token), verify=self._get_verify_cert(), timeout=timeout) r = self._check_response(r, request, False, params) try: response = r.json() except ValueError: response = None except Timeout as e: r.status_code = http_client.INTERNAL_SERVER_ERROR msg = _("The request to URL %s failed with timeout " "exception %s" % (url, str(e))) LOG.error(msg) response = {'errorCode': http_client.INTERNAL_SERVER_ERROR, 'message': msg} return r, response def _check_response(self, response, request, is_get_request=True, params=None): login_url = "/login" if (response.status_code == http_client.UNAUTHORIZED or response.status_code == http_client.FORBIDDEN): LOG.info("Token is invalid, going to re-login and get " "a new one.") login_request = self.base_url + login_url verify_cert = self._get_verify_cert() timeout = (self.rest_api_connect_timeout, self.rest_api_read_timeout) r = requests.get(login_request, auth=(self.rest_username, self.rest_password), verify=verify_cert, timeout=timeout) token = r.json() self.rest_token = token # Repeat request with valid token. LOG.info("Going to perform request again %s with valid token.", request) if is_get_request: response = requests.get(request, auth=( self.rest_username, self.rest_token ), verify=verify_cert, timeout=timeout) else: response = requests.post(request, data=json.dumps(params), headers=self._get_headers(), auth=( self.rest_username, self.rest_token ), verify=verify_cert, timeout=timeout) level = logging.DEBUG # for anything other than an OK from the REST API, log an error if response.status_code != http_client.OK: level = logging.ERROR LOG.log(level, "REST Request: %s with params %s", request, json.dumps(params)) LOG.log(level, "REST Response: %s with data %s", response.status_code, response.text) return response @retry(exception.VolumeBackendAPIException) def extend_volume(self, vol_id, new_size): url = "/instances/Volume::%(vol_id)s/action/setVolumeSize" round_volume_capacity = ( self.configuration.powerflex_round_volume_capacity ) if not round_volume_capacity and not new_size % 8 == 0: LOG.warning("PowerFlex only supports volumes with a granularity " "of 8 GBs. The new volume size is: %d.", new_size) params = {"sizeInGB": str(new_size)} r, response = self.execute_powerflex_post_request(url, params, vol_id=vol_id) if r.status_code != http_client.OK: response = r.json() msg = (_("Failed to extend volume %(vol_id)s: %(err)s.") % {"vol_id": vol_id, "err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _unmap_volume_from_all_sdcs(self, vol_id): url = "/instances/Volume::%(vol_id)s/action/removeMappedSdc" volume_is_mapped = False try: volume = self.query_volume(vol_id) if volume.get("mappedSdcInfo") is not None: volume_is_mapped = True except exception.VolumeBackendAPIException: LOG.info("Volume %s is not found thus is not mapped to any SDC.", vol_id) if volume_is_mapped: params = {"allSdcs": ""} LOG.info("Unmap volume from all sdcs.") r, response = self.execute_powerflex_post_request(url, params, vol_id=vol_id) if r.status_code != http_client.OK: msg = (_("Failed to unmap volume %(vol_id)s from all SDCs: " "%(err)s.") % {"vol_id": vol_id, "err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @retry(exception.VolumeBackendAPIException) def remove_volume(self, vol_id): url = "/instances/Volume::%(vol_id)s/action/removeVolume" self._unmap_volume_from_all_sdcs(vol_id) params = {"removeMode": "ONLY_ME"} r, response = self.execute_powerflex_post_request(url, params, vol_id=vol_id) if r.status_code != http_client.OK: error_code = response["errorCode"] if error_code == VOLUME_NOT_FOUND_ERROR: LOG.warning("Ignoring error in delete volume %s: " "Volume not found.", vol_id) elif vol_id is None: LOG.warning("Volume does not have provider_id thus does not " "map to PowerFlex volume. " "Allowing deletion to proceed.") else: msg = (_("Failed to delete volume %(vol_id)s: %(err)s.") % {"vol_id": vol_id, "err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def is_volume_creation_safe(self, protection_domain, storage_pool): """Checks if volume creation is safe or not. Using volumes with zero padding disabled can lead to existing data being read off of a newly created volume. """ # if we have been told to allow unsafe volumes if self.configuration.powerflex_allow_non_padded_volumes: # Enabled regardless of type, so safe to proceed return True try: properties = self.get_storage_pool_properties( protection_domain, storage_pool ) padded = properties["zeroPaddingEnabled"] except Exception: msg = (_("Unable to retrieve properties for pool %s.") % storage_pool) raise exception.InvalidInput(reason=msg) # zero padded storage pools are safe if padded: return True # if we got here, it's unsafe return False def rename_volume(self, volume, name): url = "/instances/Volume::%(id)s/action/setVolumeName" new_name = flex_utils.id_to_base64(name) vol_id = volume["provider_id"] params = {"newName": new_name} r, response = self.execute_powerflex_post_request(url, params, id=vol_id) if r.status_code != http_client.OK: error_code = response["errorCode"] if ((error_code == VOLUME_NOT_FOUND_ERROR or error_code == OLD_VOLUME_NOT_FOUND_ERROR or error_code == ILLEGAL_SYNTAX)): LOG.info("Ignore renaming action because the volume " "%(vol_id)s is not a PowerFlex volume.", {"vol_id": vol_id}) else: msg = (_("Failed to rename volume %(vol_id)s: %(err)s.") % {"vol_id": vol_id, "err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: LOG.info("PowerFlex volume %(vol_id)s was renamed to " "%(new_name)s.", {"vol_id": vol_id, "new_name": new_name}) def failover_failback_replication_cg(self, rcg_name, is_failback): url = ("/instances/ReplicationConsistencyGroup::%(rcg_id)s" "/action/%(action)sReplicationConsistencyGroup") action = "restore" if is_failback else "failover" rcg_id = self._get_replication_cg_id_by_name(rcg_name) r, response = self.execute_powerflex_post_request(url, rcg_id=rcg_id, action=action) if r.status_code != http_client.OK: msg = (_("Failed to %(action)s rcg with id %(rcg_id)s: " "%(err_msg)s.") % {"action": action, "rcg_id": rcg_id, "err_msg": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def query_vtree(self, vtree_id, vol_id): url = "/instances/VTree::%(vtree_id)s" r, response = self.execute_powerflex_get_request(url, vtree_id=vtree_id) if r.status_code != http_client.OK: msg = (_("Failed to check migration status of volume %s.") % vol_id) LOG.error(msg) raise exception.VolumeBackendAPIException(msg) return response def migrate_vtree(self, volume, params): url = "/instances/Volume::%(vol_id)s/action/migrateVTree" r, response = self.execute_powerflex_post_request( url, params=params, vol_id=volume.provider_id ) if r.status_code != http_client.OK: error_code = response["errorCode"] if error_code not in [ VOLUME_MIGRATION_IN_PROGRESS_ERROR, VOLUME_MIGRATION_ALREADY_ON_DESTINATION_POOL_ERROR, ]: msg = (_("Failed to migrate volume %s.") % volume.id) LOG.error(msg) raise exception.VolumeBackendAPIException(msg) return response def overwrite_volume_content(self, volume, snapshot): url = "/instances/Volume::%(vol_id)s/action/overwriteVolumeContent" params = {"srcVolumeId": snapshot.provider_id} r, response = self.execute_powerflex_post_request( url, params=params, vol_id=volume.provider_id ) if r.status_code != http_client.OK: msg = (_("Failed to revert volume %(vol_id)s to snapshot " "%(snap_id)s: %(err)s.") % {"vol_id": volume.id, "snap_id": snapshot.id, "err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(msg) def query_sdc_id_by_guid(self, sdc_guid): url = "/types/Sdc/instances" r, response = self.execute_powerflex_get_request(url) if r.status_code != http_client.OK: msg = (_("Failed to query SDC: %(err)s.") % {"err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for sdc in response: if (sdc["sdcGuid"] and sdc["sdcGuid"].lower() == sdc_guid.lower()): return sdc["id"] msg = (_("Failed to query SDC by guid %(sdc)s: Not Found.") % {"sdc": sdc_guid}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def query_sdc_by_id(self, sdc_id): url = "/instances/Sdc::%(sdc_id)s" r, response = self.execute_powerflex_get_request( url, sdc_id = sdc_id) if r.status_code != http_client.OK: msg = (_("Failed to query SDC id %(sdc_id)s: %(err)s.") % { "sdc_id": sdc_id, "err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response def map_volume(self, volume_id, sdc_id): params = {'sdcId': sdc_id, 'allowMultipleMappings': 'True'} url = "/instances/Volume::%(vol_id)s/action/addMappedSdc" % { 'vol_id': volume_id } r, response = self.execute_powerflex_post_request(url, params) if r.status_code != http_client.OK: msg = (_("Failed to map volume %(vol_id)s to SDC " "%(host_id)s: %(err)s.") % {"vol_id": volume_id, "host_id": sdc_id, "err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def unmap_volume(self, volume_id, sdc_id=None): if sdc_id: params = {'sdcId': sdc_id} url = "/instances/Volume::%(vol_id)s/action/removeMappedSdc" % { 'vol_id': volume_id } r, response = self.execute_powerflex_post_request(url, params) if r.status_code != http_client.OK: msg = (_("Failed to unmap volume %(vol_id)s from SDC " "%(sdc_id)s: %(err)s.") % { "vol_id": volume_id, "sdc_id": sdc_id, "err": response["message"]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: self._unmap_volume_from_all_sdcs(volume_id) def query_sdc_volumes(self, sdc_id): url = ("/instances/Sdc::%(sdc_id)s/relationships/Volume" % {'sdc_id': sdc_id}) r, response = self.execute_powerflex_get_request(url) if r.status_code != http_client.OK: msg = (_("Failed to query SDC volumes: %s.") % response["message"]) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return [volume["id"] for volume in response] def set_sdc_limits(self, volume_id, sdc_id, bandwidth_limit=None, iops_limit=None): params = {'sdcId': sdc_id} if bandwidth_limit is not None: params['bandwidthLimitInKbps'] = bandwidth_limit if iops_limit is not None: params['iopsLimit'] = iops_limit url = ( "/instances/Volume::%(volume_id)s/action/setMappedSdcLimits" % {'volume_id': volume_id} ) r, response = self.execute_powerflex_post_request(url, params) if r.status_code != http_client.OK: msg = (_("Failed to set SDC limits: %s.") % response["message"]) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerflex/simplecache.py0000664000175000017500000000721400000000000025732 0ustar00zuulzuul00000000000000# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SimpleCache utility class for Dell EMC PowerFlex (formerly named Dell EMC VxFlex OS). """ import datetime from oslo_log import log as logging from oslo_utils import timeutils LOG = logging.getLogger(__name__) class SimpleCache(object): def __init__(self, name, age_minutes=30): self.cache = {} self.name = name self.age_minutes = age_minutes def __contains__(self, key): """Checks if a key exists in cache :param key: Key for the item being checked. :return: True if item exists, otherwise False """ return key in self.cache def _remove(self, key): """Removes item from the cache :param key: Key for the item being removed. :return: """ if self.__class__(key): del self.cache[key] def _validate(self, key): """Validate if an item exists and has not expired. :param key: Key for the item being requested. :return: The value of the related key, or None. """ if key not in self: return None # make sure the cache has not expired entry = self.cache[key]['value'] now = timeutils.utcnow() age = now - self.cache[key]['date'] if age > datetime.timedelta(minutes=self.age_minutes): # if has expired, remove from cache LOG.debug("Removing item '%(item)s' from cache '%(name)s' " "due to age", {'item': key, 'name': self.name}) self._remove(key) return None return entry def purge(self, key): """Purge an item from the cache, regardless of age :param key: Key for the item being removed. :return: """ self._remove(key) def purge_all(self): """Purge all items from the cache, regardless of age :return: """ self.cache = {} def set_cache_period(self, age_minutes): """Define the period of time to cache values for :param age_minutes: Number of minutes to cache items for. :return: """ self.age_minutes = age_minutes def update(self, key, value): """Update/Store an item in the cache :param key: Key for the item being added. :param value: Value to store :return: """ LOG.debug("Updating item '%(item)s' in cache '%(name)s'", {'item': key, 'name': self.name}) self.cache[key] = {'date': timeutils.utcnow(), 'value': value} def get_value(self, key): """Returns an item from the cache :param key: Key for the item being requested. :return: Value of item or None if doesn't exist or expired """ value = self._validate(key) if value is None: LOG.debug("Item '%(item)s' is not in cache '%(name)s' ", {'item': key, 'name': self.name}) return value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerflex/utils.py0000664000175000017500000000365100000000000024616 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import binascii import math from oslo_log import log as logging from oslo_utils import units from packaging import version LOG = logging.getLogger(__name__) POWERFLEX_REST_CONNECT_TIMEOUT = "rest_api_connect_timeout" POWERFLEX_REST_READ_TIMEOUT = "rest_api_read_timeout" def version_gte(ver1, ver2): return version.parse(ver1) >= version.parse(ver2) def convert_kb_to_gib(size): return int(math.floor(float(size) / units.Mi)) def id_to_base64(_id): # Base64 encode the id to get a volume name less than 32 characters due # to PowerFlex limitation. name = str(_id).replace("-", "") try: name = base64.b16decode(name.upper()) except (TypeError, binascii.Error): pass if isinstance(name, str): name = name.encode() encoded_name = base64.b64encode(name).decode() LOG.debug("Converted id %(id)s to PowerFlex OS name %(name)s.", {"id": _id, "name": encoded_name}) return encoded_name def round_to_num_gran(size, num=8): """Round size to nearest value that is multiple of `num`.""" if size % num == 0: return size return size + num - (size % num) def round_down_to_num_gran(size, num=8): """Round size down to nearest value that is multiple of `num`.""" return size - (size % num) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3391206 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/0000775000175000017500000000000000000000000022726 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/__init__.py0000664000175000017500000000000000000000000025025 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/common.py0000664000175000017500000131257400000000000024605 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast from collections import namedtuple from copy import deepcopy import json import math import random import sys import time from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from cinder.common import constants as cinder_constants from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.utils import retry from cinder.volume import configuration from cinder.volume.drivers.dell_emc.powermax import masking from cinder.volume.drivers.dell_emc.powermax import metadata as volume_metadata from cinder.volume.drivers.dell_emc.powermax import migrate from cinder.volume.drivers.dell_emc.powermax import performance from cinder.volume.drivers.dell_emc.powermax import provision from cinder.volume.drivers.dell_emc.powermax import rest from cinder.volume.drivers.dell_emc.powermax import utils from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF BACKENDNAME = 'volume_backend_name' PREFIXBACKENDNAME = 'capabilities:volume_backend_name' # Replication REPLICATION_DISABLED = fields.ReplicationStatus.DISABLED REPLICATION_ENABLED = fields.ReplicationStatus.ENABLED REPLICATION_FAILOVER = fields.ReplicationStatus.FAILED_OVER FAILOVER_ERROR = fields.ReplicationStatus.FAILOVER_ERROR REPLICATION_ERROR = fields.ReplicationStatus.ERROR retry_exc_tuple = (exception.VolumeBackendAPIException,) powermax_opts = [ cfg.IntOpt('interval', default=3, help='Use this value to specify ' 'length of the interval in seconds.'), cfg.IntOpt('retries', default=200, help='Use this value to specify ' 'number of retries.'), cfg.BoolOpt('initiator_check', default=False, help='Use this value to enable ' 'the initiator_check.'), cfg.StrOpt(utils.VMAX_WORKLOAD, help='Workload, setting this as an extra spec in ' 'pool_name is preferable.'), cfg.IntOpt(utils.U4P_FAILOVER_TIMEOUT, default=20.0, help='How long to wait for the server to send data before ' 'giving up.'), cfg.IntOpt(utils.U4P_FAILOVER_RETRIES, default=3, help='The maximum number of retries each connection should ' 'attempt. Note, this applies only to failed DNS lookups, ' 'socket connections and connection timeouts, never to ' 'requests where data has made it to the server.'), cfg.IntOpt(utils.U4P_FAILOVER_BACKOFF_FACTOR, default=1, help='A backoff factor to apply between attempts after the ' 'second try (most errors are resolved immediately by a ' 'second try without a delay). Retries will sleep for: ' '{backoff factor} * (2 ^ ({number of total retries} - 1)) ' 'seconds.'), cfg.BoolOpt(utils.U4P_FAILOVER_AUTOFAILBACK, default=True, help='If the driver should automatically failback to the ' 'primary instance of Unisphere when a successful ' 'connection is re-established.'), cfg.MultiOpt(utils.U4P_FAILOVER_TARGETS, item_type=types.Dict(), help='Dictionary of Unisphere failover target info.'), cfg.StrOpt(utils.POWERMAX_ARRAY, help='Serial number of the array to connect to.'), cfg.StrOpt(utils.POWERMAX_SRP, help='Storage resource pool on array to use for ' 'provisioning.'), cfg.StrOpt(utils.POWERMAX_SERVICE_LEVEL, help='Service level to use for provisioning storage. ' 'Setting this as an extra spec in pool_name ' 'is preferable.'), cfg.ListOpt(utils.POWERMAX_PORT_GROUPS, bounds=True, help='List of port groups containing frontend ports ' 'configured prior for server connection.'), cfg.ListOpt(utils.POWERMAX_ARRAY_TAG_LIST, bounds=True, help='List of user assigned name for storage array.'), cfg.StrOpt(utils.POWERMAX_SHORT_HOST_NAME_TEMPLATE, default='shortHostName', help='User defined override for short host name.'), cfg.StrOpt(utils.POWERMAX_PORT_GROUP_NAME_TEMPLATE, default='portGroupName', help='User defined override for port group name.'), cfg.BoolOpt(utils.LOAD_BALANCE, default=False, help='Enable/disable load balancing for a PowerMax backend.'), cfg.BoolOpt(utils.LOAD_BALANCE_RT, default=False, help='Enable/disable real-time performance metrics for Port ' 'level load balancing for a PowerMax backend.'), cfg.StrOpt(utils.PERF_DATA_FORMAT, default='Avg', help='Performance data format, not applicable for real-time ' 'metrics. Available options are "avg" and "max".'), cfg.IntOpt(utils.LOAD_LOOKBACK, default=60, help='How far in minutes to look back for diagnostic ' 'performance metrics in load calculation, minimum of 0 ' 'maximum of 1440 (24 hours).'), cfg.IntOpt(utils.LOAD_LOOKBACK_RT, default=1, help='How far in minutes to look back for real-time ' 'performance metrics in load calculation, minimum of 1 ' 'maximum of 10.'), cfg.StrOpt(utils.PORT_GROUP_LOAD_METRIC, default='PercentBusy', help='Metric used for port group load calculation.'), cfg.StrOpt(utils.PORT_LOAD_METRIC, default='PercentBusy', help='Metric used for port load calculation.'), cfg.IntOpt(utils.REST_API_CONNECT_TIMEOUT, default=30, min=1, help='Use this value to specify connect ' 'timeout value (in seconds) for rest call.'), cfg.IntOpt(utils.REST_API_READ_TIMEOUT, default=30, min=1, help='Use this value to specify read ' 'timeout value (in seconds) for rest call.'), cfg.BoolOpt(utils.SNAPVX_UNLINK_SYMFORCE, default=False, help='Enable SnapVx unlink symforce, which forces ' 'the operation to execute when normally it is rejected.'), ] CONF.register_opts(powermax_opts, group=configuration.SHARED_CONF_GROUP) ReplicationFlags = namedtuple( 'ReplicationFlags', 'was_rep_enabled, is_rep_enabled, ' 'backend_ids_differ, rep_mode, target_extra_specs') RepToNonRep = namedtuple( 'RepToNonRep', 'model_update, resume_original_sg_dict, rdf_pair_broken, ' 'resume_original_sg, is_partitioned') NonRepToRep = namedtuple( 'NonRepToRep', 'model_update, rdf_pair_created, rep_status, rep_driver_data, ' 'rep_info_dict, rep_extra_specs, resume_target_sg') class PowerMaxCommon(object): """Common class for Rest based PowerMax volume drivers. This common class is for Dell EMC PowerMax volume drivers based on UniSphere Rest API. It supports VMAX 3 and VMAX All Flash and PowerMax arrays. """ pool_info = {'backend_name': None, 'config_file': None, 'arrays_info': {}, 'max_over_subscription_ratio': None, 'reserved_percentage': 0, 'replication_enabled': False} def __init__(self, prtcl, version, configuration=None, active_backend_id=None): self.rest = rest.PowerMaxRest() self.utils = utils.PowerMaxUtils() self.volume_metadata = volume_metadata.PowerMaxVolumeMetadata( self.rest, version, LOG.isEnabledFor(logging.DEBUG)) # Configuration/Attributes self.protocol = prtcl self.configuration = configuration self.configuration.append_config_values(powermax_opts) self.active_backend_id = active_backend_id self.version = version self.version_dict = {} self.ucode_level = None self.next_gen = False self.replication_enabled = False self.rep_devices = [] self.failedover = True if active_backend_id else False self.promotion = False self.powermax_array_tag_list = None self.powermax_short_host_name_template = None self.powermax_port_group_name_template = None if active_backend_id == utils.PMAX_FAILOVER_START_ARRAY_PROMOTION: self.promotion = True # Init provision, masking and migrate instances self.provision = provision.PowerMaxProvision( self.rest, self.configuration) self.masking = masking.PowerMaxMasking( prtcl, self.rest, self.configuration) self.migrate = migrate.PowerMaxMigrate( prtcl, self.rest, self.configuration) # Gather environment info self._get_replication_info() self._get_u4p_failover_info() self._gather_info() self._get_performance_config() def _gather_info(self): """Gather the relevant information for update_volume_stats.""" self._get_attributes_from_config() array_info = self.get_attributes_from_cinder_config() if array_info is None: LOG.error("Unable to get attributes from cinder.conf. Please " "refer to the current online documentation for correct " "configuration and note that the xml file is no " "longer supported.") self.rest.set_rest_credentials(array_info) self.rest.validate_unisphere_version() if array_info: serial_number = array_info['SerialNumber'] self.array_model, self.next_gen = ( self.rest.get_array_model_info(serial_number)) self.rest.set_residuals(serial_number) self.ucode_level = self.rest.get_array_ucode_version(serial_number) if self.replication_enabled: if serial_number in self.replication_targets: msg = (_("The same array serial number (%s) is defined " "for powermax_array and replication_device in " "cinder.conf. Please ensure your " "target_device_id points to a different " "array." % serial_number)) LOG.error(msg) raise exception.InvalidConfigurationValue(msg) finalarrayinfolist = self._get_slo_workload_combinations( array_info) self.pool_info['arrays_info'] = finalarrayinfolist def _get_attributes_from_config(self): """Get relevent details from configuration file.""" self.interval = self.configuration.safe_get('interval') self.retries = self.configuration.safe_get('retries') self.powermax_array_tag_list = self.configuration.safe_get( utils.POWERMAX_ARRAY_TAG_LIST) self.powermax_short_host_name_template = self.configuration.safe_get( utils.POWERMAX_SHORT_HOST_NAME_TEMPLATE) self.powermax_port_group_name_template = self.configuration.safe_get( utils.POWERMAX_PORT_GROUP_NAME_TEMPLATE) self.pool_info['backend_name'] = ( self.configuration.safe_get('volume_backend_name')) mosr = volume_utils.get_max_over_subscription_ratio( self.configuration.safe_get('max_over_subscription_ratio'), True) self.pool_info['max_over_subscription_ratio'] = mosr self.pool_info['reserved_percentage'] = ( self.configuration.safe_get('reserved_percentage')) LOG.debug( "Updating volume stats on Cinder backend %(backendName)s.", {'backendName': self.pool_info['backend_name']}) def _get_performance_config(self): """Gather performance configuration, if provided in cinder.conf.""" performance_config = {'load_balance': False} self.performance = performance.PowerMaxPerformance( self.rest, performance_config) if self.configuration.safe_get(utils.LOAD_BALANCE): LOG.info( "Updating performance config for Cinder backend %(be)s.", {'be': self.pool_info['backend_name']}) array_info = self.get_attributes_from_cinder_config() self.performance.set_performance_configuration( array_info['SerialNumber'], self.configuration) def _get_u4p_failover_info(self): """Gather Unisphere failover target information, if provided.""" key_dict = {'san_ip': 'RestServerIp', 'san_api_port': 'RestServerPort', 'san_login': 'RestUserName', 'san_password': 'RestPassword', 'driver_ssl_cert_verify': 'SSLVerify', 'driver_ssl_cert_path': 'SSLPath'} if self.configuration.safe_get('u4p_failover_target'): serial_number = self.configuration.safe_get(utils.POWERMAX_ARRAY) u4p_targets = self.configuration.safe_get('u4p_failover_target') formatted_target_list = list() for target in u4p_targets: formatted_target = {key_dict[key]: value for key, value in target.items()} formatted_target['SerialNumber'] = serial_number try: formatted_target['SSLVerify'] = formatted_target['SSLPath'] del formatted_target['SSLPath'] except KeyError: if formatted_target['SSLVerify'] == 'False': formatted_target['SSLVerify'] = False else: formatted_target['SSLVerify'] = True formatted_target_list.append(formatted_target) u4p_failover_config = dict() u4p_failover_config['u4p_failover_targets'] = formatted_target_list u4p_failover_config['u4p_failover_backoff_factor'] = ( self.configuration.safe_get('u4p_failover_backoff_factor')) u4p_failover_config['u4p_failover_retries'] = ( self.configuration.safe_get('u4p_failover_retries')) u4p_failover_config['u4p_failover_timeout'] = ( self.configuration.safe_get('u4p_failover_timeout')) u4p_failover_config['u4p_failover_autofailback'] = ( self.configuration.safe_get('u4p_failover_autofailback')) u4p_failover_config['u4p_primary'] = ( self.get_attributes_from_cinder_config()) self.rest.set_u4p_failover_config(u4p_failover_config) else: LOG.warning("There has been no failover instances of Unisphere " "configured for this instance of Cinder. If your " "primary instance of Unisphere goes down then your " "PowerMax/VMAX will be inaccessible until the " "Unisphere REST API is responsive again.") def retest_primary_u4p(self): """Retest connection to the primary instance of Unisphere.""" primary_array_info = self.get_attributes_from_cinder_config() temp_conn = rest.PowerMaxRest() temp_conn.set_rest_credentials(primary_array_info) LOG.debug( "Running connection check to primary instance of Unisphere " "at %(primary)s", { 'primary': primary_array_info['RestServerIp']}) sc, response = temp_conn.request(target_uri='/system/version', method='GET', u4p_check=True, request_object=None) if sc and int(sc) == 200: self._get_u4p_failover_info() self.rest.set_rest_credentials(primary_array_info) self.rest.u4p_in_failover = False LOG.info("Connection to primary instance of Unisphere at " "%(primary)s restored, available failover instances of " "Unisphere reset to default.", { 'primary': primary_array_info['RestServerIp']}) else: LOG.debug( "Connection check to primary instance of Unisphere at " "%(primary)s failed, maintaining session with backup " "instance of Unisphere at %(bu_in_use)s", { 'primary': primary_array_info['RestServerIp'], 'bu_in_use': self.rest.base_uri}) temp_conn.session.close() def _get_initiator_check_flag(self): """Reads the configuration for initator_check flag. :returns: flag """ return self.configuration.safe_get('initiator_check') def _get_replication_info(self): """Gather replication information, if provided.""" self.rep_configs = None self.replication_targets = [] if hasattr(self.configuration, 'replication_device'): self.rep_devices = self.configuration.safe_get( 'replication_device') if self.rep_devices: if len(self.rep_devices) > 1: self.utils.validate_multiple_rep_device(self.rep_devices) self.rep_configs = self.utils.get_replication_config( self.rep_devices) # use self.replication_enabled for update_volume_stats self.replication_enabled = True self.replication_targets = self.utils.get_replication_targets( self.rep_configs) LOG.debug("The replication configuration is %(rep_configs)s.", {'rep_configs': self.rep_configs}) if self.next_gen: for rc in self.rep_configs: rc[utils.RDF_CONS_EXEMPT] = True else: for rc in self.rep_configs: rc[utils.RDF_CONS_EXEMPT] = False def _get_slo_workload_combinations(self, array_info): """Method to query the array for SLO and Workloads. Takes the arrayinfolist object and generates a set which has all available SLO & Workload combinations :param array_info: the array information :returns: finalarrayinfolist :raises: VolumeBackendAPIException: """ try: upgraded_afa = False if self.array_model in utils.VMAX_HYBRID_MODELS: sls = deepcopy(utils.HYBRID_SLS) wls = deepcopy(utils.HYBRID_WLS) elif self.array_model in utils.VMAX_AFA_MODELS: wls = deepcopy(utils.AFA_WLS) if not self.next_gen: sls = deepcopy(utils.AFA_H_SLS) else: sls = deepcopy(utils.AFA_P_SLS) upgraded_afa = True elif self.array_model in utils.PMAX_MODELS: sls, wls = deepcopy(utils.PMAX_SLS), deepcopy(utils.PMAX_WLS) else: raise exception.VolumeBackendAPIException( message="Unable to determine array model.") if self.next_gen: LOG.warning( "Workloads have been deprecated for arrays running " "PowerMax OS uCode level 5978 or higher. Any supplied " "workloads will be treated as None values. It is " "recommended to create a new volume type without a " "workload specified.") # Add service levels: pools = sls # Array Specific SL/WL Combos pools += ( ['{}:{}'.format(x, y) for x in sls for y in wls if x.lower() not in ['optimized', 'none']]) # Add Optimized & None combinations pools += ( ['{}:{}'.format(x, y) for x in ['Optimized', 'NONE', 'None'] for y in ['NONE', 'None']]) if upgraded_afa: # Cleanup is required here for service levels that were not # present in AFA HyperMax but added for AFA PowerMax, we # do not need these SL/WL combinations for backwards # compatibility but we do for Diamond SL afa_pool = list() for p in pools: try: pl = p.split(':') if (pl[0] not in [ 'Platinum', 'Gold', 'Silver', 'Bronze']) or ( pl[1] not in [ 'OLTP', 'OLTP_REP', 'DSS', 'DSS_REP']): afa_pool.append(p) except IndexError: # Pool has no workload present afa_pool.append(p) pools = afa_pool # Build array pool of SL/WL combinations array_pool = list() for pool in pools: _array_info = array_info.copy() try: slo, workload = pool.split(':') _array_info['SLO'] = slo _array_info['Workload'] = workload except ValueError: _array_info['SLO'] = pool array_pool.append(_array_info) except Exception as e: exception_message = (_( "Unable to get the SLO/Workload combinations from the array. " "Exception received was %(e)s") % {'e': str(e)}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return array_pool def create_volume(self, volume): """Creates a EMC(PowerMax/VMAX) volume from a storage group. :param volume: volume object :returns: model_update - dict """ model_update, rep_driver_data = dict(), dict() volume_id = volume.id extra_specs = self._initial_setup(volume) if 'qos' in extra_specs: del extra_specs['qos'] # Volume_name naming convention is 'OS-UUID'. volume_name = self.utils.get_volume_element_name(volume_id) volume_size = volume.size volume_dict, rep_update, rep_info_dict = self._create_volume( volume, volume_name, volume_size, extra_specs) if rep_update: rep_driver_data = rep_update['replication_driver_data'] model_update.update(rep_update) # Add volume to group group_name = self._add_to_group( volume, volume_dict['device_id'], volume_name, volume.group_id, volume.group, extra_specs, rep_driver_data) # Gather Metadata model_update.update( {'provider_location': str(volume_dict)}) model_update = self.update_metadata( model_update, volume.metadata, self.get_volume_metadata( volume_dict['array'], volume_dict['device_id'])) if rep_update: model_update['metadata']['BackendID'] = extra_specs[ utils.REP_CONFIG].get(utils.BACKEND_ID, 'None') array_tag_list = self.get_tags_of_storage_array( extra_specs[utils.ARRAY]) self.volume_metadata.capture_create_volume( volume_dict['device_id'], volume, group_name, volume.group_id, extra_specs, rep_info_dict, 'create', array_tag_list=array_tag_list) LOG.info("Leaving create_volume: %(name)s. Volume dict: %(dict)s.", {'name': volume_name, 'dict': volume_dict}) return model_update def _add_to_group( self, volume, device_id, volume_name, group_id, group, extra_specs, rep_driver_data=None): """Add a volume to a volume group :param volume: volume object :param device_id: the device id :param volume_name: volume name :param group_id: the group id :param group: group object :param extra_specs: extra specifications :param rep_driver_data: replication data (optional) :returns: group_id - string """ group_name = None if group_id is not None: if group and (volume_utils.is_group_a_cg_snapshot_type(group) or group.is_replicated): self._find_volume_group(extra_specs[utils.ARRAY], group) extra_specs[utils.FORCE_VOL_EDIT] = True group_name = self._add_new_volume_to_volume_group( volume, device_id, volume_name, extra_specs, rep_driver_data) return group_name def _add_new_volume_to_volume_group(self, volume, device_id, volume_name, extra_specs, rep_driver_data=None): """Add a new volume to a volume group. This may also be called after extending a replicated volume. :param volume: the volume object :param device_id: the device id :param volume_name: the volume name :param extra_specs: the extra specifications :param rep_driver_data: the replication driver data, optional :returns: group_name string """ self.utils.check_replication_matched(volume, extra_specs) group_name = self.provision.get_or_create_volume_group( extra_specs[utils.ARRAY], volume.group, extra_specs) self.masking.add_volume_to_storage_group( extra_specs[utils.ARRAY], device_id, group_name, volume_name, extra_specs) # Add remote volume to remote group, if required if volume.group.is_replicated: self.masking.add_remote_vols_to_volume_group( volume, volume.group, extra_specs, rep_driver_data) return group_name def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. :param volume: volume object :param snapshot: snapshot object :returns: model_update :raises: VolumeBackendAPIException: """ LOG.debug("Entering create_volume_from_snapshot.") model_update, rep_info_dict = {}, {} extra_specs = self._initial_setup(volume) # Check if legacy snapshot sourcedevice_id = self._find_device_on_array( snapshot, extra_specs) from_snapvx = False if sourcedevice_id else True clone_dict, rep_update, rep_info_dict = self._create_cloned_volume( volume, snapshot, extra_specs, from_snapvx=from_snapvx) # Update model with replication session info if applicable if rep_update: model_update.update(rep_update) model_update.update( {'provider_location': str(clone_dict)}) model_update = self.update_metadata( model_update, volume.metadata, self.get_volume_metadata( clone_dict['array'], clone_dict['device_id'])) if rep_update: model_update['metadata']['BackendID'] = extra_specs[ utils.REP_CONFIG].get(utils.BACKEND_ID, 'None') array_tag_list = self.get_tags_of_storage_array( extra_specs[utils.ARRAY]) self.volume_metadata.capture_create_volume( clone_dict['device_id'], volume, None, None, extra_specs, rep_info_dict, 'createFromSnapshot', source_snapshot_id=snapshot.id, array_tag_list=array_tag_list) return model_update def create_cloned_volume(self, clone_volume, source_volume): """Creates a clone of the specified volume. :param clone_volume: clone volume Object :param source_volume: volume object :returns: model_update, dict """ model_update, rep_info_dict = {}, {} rep_driver_data = None extra_specs = self._initial_setup(clone_volume) array = extra_specs[utils.ARRAY] source_device_id = self._find_device_on_array( source_volume, extra_specs) self._cleanup_device_snapvx(array, source_device_id, extra_specs) clone_dict, rep_update, rep_info_dict = self._create_cloned_volume( clone_volume, source_volume, extra_specs) # Update model with replication session info if applicable if rep_update: rep_driver_data = rep_update['replication_driver_data'] model_update.update(rep_update) # Add volume to group group_name = self._add_to_group( clone_volume, clone_dict['device_id'], clone_volume.name, clone_volume.group_id, clone_volume.group, extra_specs, rep_driver_data) model_update.update( {'provider_location': str(clone_dict)}) model_update = self.update_metadata( model_update, clone_volume.metadata, self.get_volume_metadata( clone_dict['array'], clone_dict['device_id'])) if rep_update: model_update['metadata']['BackendID'] = extra_specs[ utils.REP_CONFIG].get(utils.BACKEND_ID, 'None') array_tag_list = self.get_tags_of_storage_array( extra_specs[utils.ARRAY]) self.volume_metadata.capture_create_volume( clone_dict['device_id'], clone_volume, group_name, source_volume.group_id, extra_specs, rep_info_dict, 'createFromVolume', temporary_snapvx=clone_dict.get('snap_name'), source_device_id=clone_dict.get('source_device_id'), array_tag_list=array_tag_list) return model_update def delete_volume(self, volume): """Deletes a EMC(PowerMax/VMAX) volume. :param volume: volume object """ LOG.info("Deleting Volume: %(volume)s", {'volume': volume.name}) volume_name = self._delete_volume(volume) self.volume_metadata.capture_delete_info(volume) LOG.info("Leaving delete_volume: %(volume_name)s.", {'volume_name': volume_name}) def create_snapshot(self, snapshot, volume): """Creates a snapshot. :param snapshot: snapshot object :param volume: volume Object to create snapshot from :returns: dict -- the cloned volume dictionary """ extra_specs = self._initial_setup(volume) snapshot_dict, __, __ = self._create_cloned_volume( snapshot, volume, extra_specs, is_snapshot=True) model_update = { 'provider_location': str(snapshot_dict)} snapshot_metadata = self.get_snapshot_metadata( extra_specs.get('array'), snapshot_dict.get('source_id'), snapshot_dict.get('snap_name')) model_update = self.update_metadata( model_update, snapshot.metadata, snapshot_metadata) if snapshot.metadata: model_update['metadata'].update(snapshot.metadata) snapshot_metadata.update( {'snap_display_name': snapshot_dict.get('snap_name')}) self.volume_metadata.capture_snapshot_info( volume, extra_specs, 'createSnapshot', snapshot_metadata) return model_update def delete_snapshot(self, snapshot, volume): """Deletes a snapshot. :param snapshot: snapshot object :param volume: source volume """ LOG.info("Delete Snapshot: %(snapshotName)s.", {'snapshotName': snapshot.name}) extra_specs = self._initial_setup(volume) sourcedevice_id, snap_name, snap_id_list = self._parse_snap_info( extra_specs[utils.ARRAY], snapshot) if not sourcedevice_id and not snap_name: # Check if legacy snapshot sourcedevice_id = self._find_device_on_array( snapshot, extra_specs) if sourcedevice_id: self._delete_volume(snapshot) else: LOG.info("No snapshot found on the array") elif not sourcedevice_id or not snap_name: LOG.info("No snapshot found on the array") else: # Ensure snap has not been recently deleted for snap_id in snap_id_list: self.provision.delete_volume_snap_check_for_links( extra_specs[utils.ARRAY], snap_name, sourcedevice_id, extra_specs, snap_id) LOG.info("Leaving delete_snapshot: %(ssname)s.", {'ssname': snap_name}) self.volume_metadata.capture_snapshot_info( volume, extra_specs, 'deleteSnapshot', None) def _remove_members(self, array, volume, device_id, extra_specs, connector, is_multiattach, async_grp=None, host_template=None): """This method unmaps a volume from a host. Removes volume from the storage group that belongs to a masking view. :param array: the array serial number :param volume: volume object :param device_id: the PowerMax/VMAX volume device id :param extra_specs: extra specifications :param connector: the connector object :param is_multiattach: flag to indicate if this is a multiattach case :param async_grp: the name if the async group, if applicable """ volume_name = volume.name LOG.debug("Detaching volume %s.", volume_name) reset = False if is_multiattach else True if is_multiattach: storage_group_names = self.rest.get_storage_groups_from_volume( array, device_id) self.masking.remove_and_reset_members( array, volume, device_id, volume_name, extra_specs, reset, connector, async_grp=async_grp, host_template=host_template) if is_multiattach: self.masking.return_volume_to_fast_managed_group( array, device_id, extra_specs) self.migrate.cleanup_staging_objects( array, storage_group_names, extra_specs) def _unmap_lun(self, volume, connector): """Unmaps a volume from the host. :param volume: the volume Object :param connector: the connector Object """ mv_list, sg_list = None, None extra_specs = self._initial_setup(volume) rep_config = None rep_extra_specs = None current_host_occurances = 0 if 'qos' in extra_specs: del extra_specs['qos'] if self.utils.is_replication_enabled(extra_specs): backend_id = self._get_replicated_volume_backend_id(volume) rep_config = self.utils.get_rep_config( backend_id, self.rep_configs) extra_specs[utils.FORCE_VOL_EDIT] = True rep_extra_specs = self._get_replication_extra_specs( extra_specs, rep_config) if self.utils.is_volume_failed_over(volume): extra_specs = rep_extra_specs volume_name = volume.name mgmt_sg_name = None LOG.info("Unmap volume: %(volume)s.", {'volume': volume}) if connector is not None: host_name = connector.get('host') attachment_list = volume.volume_attachment LOG.debug("Volume attachment list: %(atl)s. " "Attachment type: %(at)s", {'atl': attachment_list, 'at': type(attachment_list)}) try: att_list = attachment_list.objects except AttributeError: att_list = attachment_list if att_list is not None: host_list = [att.connector['host'] for att in att_list if att is not None and att.connector is not None] current_host_occurances = host_list.count(host_name) else: LOG.warning("Cannot get host name from connector object - " "assuming force-detach.") host_name = None device_info, is_multiattach = ( self.find_host_lun_id(volume, host_name, extra_specs)) if 'hostlunid' not in device_info: LOG.info("Volume %s is not mapped. No volume to unmap.", volume_name) return if current_host_occurances > 1: LOG.info("Volume is attached to multiple instances on " "this host. Not removing the volume from the " "masking view.") else: array = extra_specs[utils.ARRAY] if self.utils.does_vol_need_rdf_management_group(extra_specs): mgmt_sg_name = self.utils.get_rdf_management_group_name( rep_config) self._remove_members( array, volume, device_info['device_id'], extra_specs, connector, is_multiattach, async_grp=mgmt_sg_name, host_template=self.powermax_short_host_name_template) if (self.utils.is_metro_device(rep_config, extra_specs) and not self.promotion): # Need to remove from remote masking view device_info, __ = (self.find_host_lun_id( volume, host_name, extra_specs, rep_extra_specs)) if 'hostlunid' in device_info: self._remove_members( rep_extra_specs[utils.ARRAY], volume, device_info['device_id'], rep_extra_specs, connector, is_multiattach, async_grp=mgmt_sg_name, host_template=self.powermax_short_host_name_template) else: # Make an attempt to clean up initiator group self.masking.attempt_ig_cleanup( connector, self.protocol, rep_extra_specs[utils.ARRAY], True, host_template=self.powermax_short_host_name_template) if is_multiattach and LOG.isEnabledFor(logging.DEBUG): mv_list, sg_list = ( self._get_mvs_and_sgs_from_volume( extra_specs[utils.ARRAY], device_info.get('device_id'))) self.volume_metadata.capture_detach_info( volume, extra_specs, device_info.get('device_id'), mv_list, sg_list) def _unmap_lun_promotion(self, volume, connector): """Unmaps a volume from the host during promotion. :param volume: the volume Object :param connector: the connector Object """ extra_specs = self._initial_setup(volume) if not self.utils.is_replication_enabled(extra_specs): LOG.error('Unable to terminate connections for non-replicated ' 'volumes during promotion failover. Could not unmap ' 'volume %s', volume.id) else: mode = extra_specs[utils.REP_MODE] if mode == utils.REP_METRO: self._unmap_lun(volume, connector) else: # During a promotion scenario only Metro volumes will have # connections present on their remote volumes. loc = ast.literal_eval(volume.provider_location) device_id = loc.get('device_id') promotion_key = [utils.PMAX_FAILOVER_START_ARRAY_PROMOTION] self.volume_metadata.capture_detach_info( volume, extra_specs, device_id, promotion_key, promotion_key) def initialize_connection(self, volume, connector): """Initializes the connection and returns device and connection info. The volume may be already mapped, if this is so the deviceInfo tuple is returned. If the volume is not already mapped then we need to gather information to either 1. Create an new masking view or 2. Add the volume to an existing storage group within an already existing maskingview. The naming convention is the following: .. code-block:: none initiator_group_name = OS---IG e.g OS-myShortHost-I-IG storage_group_name = OS----SG e.g OS-myShortHost-SRP_1-I-SG port_group_name = OS--PG The port_group_name will come from the cinder.conf or as an extra spec on the volume type. These are precreated. If the portGroup does not exist then an error will be returned to the user maskingview_name = OS----MV e.g OS-myShortHost-SRP_1-I-MV :param volume: volume Object :param connector: the connector Object :returns: dict -- device_info_dict - device information dict """ LOG.info("Initialize connection: %(vol)s.", {'vol': volume.name}) extra_specs = self._initial_setup(volume, init_conn=True) is_multipath = connector.get('multipath', False) rep_config = extra_specs.get(utils.REP_CONFIG) rep_extra_specs = self._get_replication_extra_specs( extra_specs, rep_config) remote_port_group = None if (self.utils.is_metro_device(rep_config, extra_specs) and not is_multipath and self.protocol.lower() == 'iscsi'): exception_message = _( "Either multipathing is not correctly/currently " "enabled on your system or the volume was created " "prior to multipathing being enabled. Please refer " "to the online PowerMax Cinder driver documentation " "for this release for further details.") LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) if self.utils.is_volume_failed_over(volume): extra_specs = rep_extra_specs device_info_dict, is_multiattach = ( self.find_host_lun_id(volume, connector.get('host'), extra_specs, connector=connector)) masking_view_dict = self._populate_masking_dict( volume, connector, extra_specs) masking_view_dict[utils.IS_MULTIATTACH] = is_multiattach if self.rest.is_next_gen_array(extra_specs['array']): masking_view_dict['workload'] = 'NONE' temp_pool = masking_view_dict['storagegroup_name'] splitPool = temp_pool.split('+') if len(splitPool) == 4: splitPool[1] = 'NONE' masking_view_dict['storagegroup_name'] = '+'.join(splitPool) if ('hostlunid' in device_info_dict and device_info_dict['hostlunid'] is not None): hostlunid = device_info_dict['hostlunid'] LOG.info("Volume %(volume)s is already mapped to host %(host)s. " "The hostlunid is %(hostlunid)s.", {'volume': volume.name, 'host': connector['host'], 'hostlunid': hostlunid}) port_group_name = ( self.get_port_group_from_masking_view( extra_specs[utils.ARRAY], device_info_dict['maskingview'])) if self.utils.is_metro_device(rep_config, extra_specs): remote_info_dict, is_multiattach = ( self.find_host_lun_id(volume, connector.get('host'), extra_specs, rep_extra_specs)) if remote_info_dict.get('hostlunid') is None: # Need to attach on remote side metro_host_lun, remote_port_group = ( self._attach_metro_volume( volume, connector, is_multiattach, extra_specs, rep_extra_specs)) else: metro_host_lun = remote_info_dict['hostlunid'] remote_port_group = self.get_port_group_from_masking_view( rep_extra_specs[utils.ARRAY], remote_info_dict['maskingview']) device_info_dict['metro_hostlunid'] = metro_host_lun else: if is_multiattach and extra_specs[utils.SLO]: # Need to move volume to a non-fast managed storagegroup # before attach on subsequent host(s) masking_view_dict = self.masking.pre_multiattach( extra_specs[utils.ARRAY], masking_view_dict[utils.DEVICE_ID], masking_view_dict, extra_specs) device_info_dict, port_group_name = ( self._attach_volume( volume, connector, extra_specs, masking_view_dict)) if self.utils.is_metro_device(rep_config, extra_specs): # Need to attach on remote side metro_host_lun, remote_port_group = self._attach_metro_volume( volume, connector, is_multiattach, extra_specs, rep_extra_specs) device_info_dict['metro_hostlunid'] = metro_host_lun if self.protocol.lower() == 'iscsi': device_info_dict['ip_and_iqn'] = ( self._find_ip_and_iqns( extra_specs[utils.ARRAY], port_group_name)) if self.utils.is_metro_device(rep_config, extra_specs): device_info_dict['metro_ip_and_iqn'] = ( self._find_ip_and_iqns( rep_extra_specs[utils.ARRAY], remote_port_group)) device_info_dict['is_multipath'] = is_multipath if self.protocol.lower() == cinder_constants.NVMEOF_TCP.lower(): ips = self._find_nvme_target_ips( extra_specs[utils.ARRAY], port_group_name) device_info_dict['ips'] = ips array_tag_list = self.get_tags_of_storage_array( extra_specs[utils.ARRAY]) if array_tag_list: masking_view_dict['array_tag_list'] = array_tag_list if is_multiattach and LOG.isEnabledFor(logging.DEBUG): masking_view_dict['mv_list'], masking_view_dict['sg_list'] = ( self._get_mvs_and_sgs_from_volume( extra_specs[utils.ARRAY], masking_view_dict[utils.DEVICE_ID])) elif not is_multiattach and LOG.isEnabledFor(logging.DEBUG): masking_view_dict['tag_list'] = self.get_tags_of_storage_group( extra_specs[utils.ARRAY], masking_view_dict[utils.SG_NAME]) self.volume_metadata.capture_attach_info( volume, extra_specs, masking_view_dict, connector['host'], is_multipath, is_multiattach) return device_info_dict def get_tags_of_storage_group(self, array, storage_group_name): """Get the tag information from a storage group :param array: serial number of array :param storage_group_name: storage group name :returns: tag list """ try: storage_group = self.rest.get_storage_group( array, storage_group_name) except Exception: return None return storage_group.get('tags') def get_tags_of_storage_array(self, array): """Get the tag information from an array :param array: serial number of array :returns: tag list """ tag_name_list = None try: tag_name_list = self.rest.get_array_tags(array) except Exception: pass return tag_name_list def _attach_metro_volume(self, volume, connector, is_multiattach, extra_specs, rep_extra_specs): """Helper method to attach a metro volume. Metro protected volumes point to two PowerMax/VMAX devices on different arrays, which are presented as a single device to the host. This method masks the remote device to the host. :param volume: the volume object :param connector: the connector dict :param is_multiattach: flag to indicate if this a multiattach case :param extra_specs: the extra specifications :param rep_extra_specs: replication extra specifications :returns: hostlunid, remote_port_group """ remote_mv_dict = self._populate_masking_dict( volume, connector, extra_specs, rep_extra_specs) remote_mv_dict[utils.IS_MULTIATTACH] = ( True if is_multiattach else False) if is_multiattach and rep_extra_specs[utils.SLO]: # Need to move volume to a non-fast managed sg # before attach on subsequent host(s) remote_mv_dict = self.masking.pre_multiattach( rep_extra_specs[utils.ARRAY], remote_mv_dict[utils.DEVICE_ID], remote_mv_dict, rep_extra_specs) remote_info_dict, remote_port_group = ( self._attach_volume( volume, connector, extra_specs, remote_mv_dict, rep_extra_specs=rep_extra_specs)) remote_port_group = self.get_port_group_from_masking_view( rep_extra_specs[utils.ARRAY], remote_info_dict['maskingview']) return remote_info_dict['hostlunid'], remote_port_group def _attach_volume(self, volume, connector, extra_specs, masking_view_dict, rep_extra_specs=None): """Attach a volume to a host. :param volume: the volume object :param connector: the connector object :param extra_specs: extra specifications :param masking_view_dict: masking view information :param rep_extra_specs: rep extra specs are passed if metro device :returns: dict -- device_info_dict String -- port group name :raises: VolumeBackendAPIException """ m_specs = extra_specs if rep_extra_specs is None else rep_extra_specs rollback_dict = self.masking.setup_masking_view( masking_view_dict[utils.ARRAY], volume, masking_view_dict, m_specs) # Find host lun id again after the volume is exported to the host. device_info_dict, __ = self.find_host_lun_id( volume, connector.get('host'), extra_specs, rep_extra_specs) if 'hostlunid' not in device_info_dict: # Did not successfully attach to host, so a rollback is required. error_message = (_("Error Attaching volume %(vol)s. Cannot " "retrieve hostlunid.") % {'vol': volume.id}) LOG.error(error_message) self.masking.check_if_rollback_action_for_masking_required( masking_view_dict[utils.ARRAY], volume, masking_view_dict[utils.DEVICE_ID], rollback_dict) raise exception.VolumeBackendAPIException( message=error_message) return device_info_dict, rollback_dict[utils.PORTGROUPNAME] def terminate_connection(self, volume, connector): """Disallow connection from connector. :param volume: the volume Object :param connector: the connector Object """ volume_name = volume.name LOG.info("Terminate connection: %(volume)s.", {'volume': volume_name}) if self.promotion: self._unmap_lun_promotion(volume, connector) else: self._unmap_lun(volume, connector) def extend_volume(self, volume, new_size): """Extends an existing volume. :param volume: the volume Object :param new_size: the new size to increase the volume to :raises: VolumeBackendAPIException: """ # Set specific attributes for extend operation ex_specs = self._initial_setup(volume) array = ex_specs[utils.ARRAY] device_id = self._find_device_on_array(volume, ex_specs) vol_name = volume.name orig_vol_size = volume.size rep_enabled = self.utils.is_replication_enabled(ex_specs) rdf_grp_no = None legacy_extend = False # Run validation and capabilities checks self._extend_vol_validation_checks( array, device_id, vol_name, ex_specs, orig_vol_size, new_size) # Get extend workflow dependent on array gen and replication status if rep_enabled: rep_config = ex_specs[utils.REP_CONFIG] rdf_grp_no, __ = self.get_rdf_details(array, rep_config) self._validate_rdfg_status(array, ex_specs) r1_ode, r1_ode_metro, r2_ode, r2_ode_metro = ( self._array_ode_capabilities_check(array, rep_config, True)) if self.next_gen: if self.utils.is_metro_device(rep_config, ex_specs): if not r1_ode_metro or not r2_ode or not r2_ode_metro: legacy_extend = True else: legacy_extend = True # Handle the extend process using workflow info from previous steps if legacy_extend: rep_config = ex_specs[utils.REP_CONFIG] if rep_config.get('allow_extend', False): LOG.info("Legacy extend volume %(volume)s to %(new_size)d GBs", {'volume': vol_name, 'new_size': int(new_size)}) self._extend_legacy_replicated_vol( array, volume, device_id, vol_name, new_size, ex_specs, rdf_grp_no) else: exception_message = ( "Extending a replicated volume on this backend is not " "permitted. Please set 'allow_extend:True' in your " "PowerMax replication target_backend configuration.") LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) else: LOG.info("ODE extend volume %(volume)s to %(new_size)d GBs", {'volume': vol_name, 'new_size': int(new_size)}) self.provision.extend_volume( array, device_id, new_size, ex_specs, rdf_grp_no) self.volume_metadata.capture_extend_info( volume, new_size, device_id, ex_specs, array) LOG.debug("Leaving extend_volume: %(volume_name)s. ", {'volume_name': vol_name}) def _extend_vol_validation_checks(self, array, device_id, vol_name, ex_specs, orig_vol_size, new_size): """Run validation checks on settings for extend volume operation. :param array: the array serial number :param device_id: the device id :param vol_name: the volume name :param ex_specs: extra specifications :param orig_vol_size: the original volume size :param new_size: the new size the volume should be :raises: VolumeBackendAPIException: """ # 1 - Check device exists if device_id is None: exception_message = (_( "Cannot find Volume: %(volume_name)s. Extend operation. " "Exiting....") % {'volume_name': vol_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) # 2 - Check if volume is part of an on-going clone operation or if vol # has source snapshots but not next-gen array self._cleanup_device_snapvx(array, device_id, ex_specs) __, snapvx_src, __ = self.rest.is_vol_in_rep_session(array, device_id) if snapvx_src: if not self.next_gen: exception_message = ( _("The volume: %(volume)s is a snapshot source. " "Extending a volume with snapVx snapshots is only " "supported on PowerMax/VMAX from OS version 5978 " "onwards. Exiting...") % {'volume': vol_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) # 3 - Check new size is larger than old size if int(orig_vol_size) >= int(new_size): exception_message = (_( "Your original size: %(orig_vol_size)s GB is greater " "than or the same as: %(new_size)s GB. Only extend ops are " "supported. Exiting...") % {'orig_vol_size': orig_vol_size, 'new_size': new_size}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) def _array_ode_capabilities_check(self, array, rep_config, rep_enabled=False): """Given an array, check Online Device Expansion (ODE) support. :param array: the array serial number :param rep_config: the replication configuration :param rep_enabled: if replication is enabled for backend :returns: r1_ode: (bool) If R1 array supports ODE :returns: r1_ode_metro: (bool) If R1 array supports ODE with Metro vols :returns: r2_ode: (bool) If R2 array supports ODE :returns: r2_ode_metro: (bool) If R2 array supports ODE with Metro vols """ r1_ucode = self.ucode_level.split('.') r1_ode, r1_ode_metro = False, False r2_ode, r2_ode_metro = False, False if self.next_gen: r1_ode = True if rep_enabled: __, r2_array = self.get_rdf_details(array, rep_config) r2_ucode = self.rest.get_array_ucode_version(r2_array) if self.utils.ode_capable(r1_ucode): r1_ode_metro = True r2_ucode = r2_ucode.split('.') if self.rest.is_next_gen_array(r2_array): r2_ode = True if self.utils.ode_capable(r2_ucode): r2_ode_metro = True return r1_ode, r1_ode_metro, r2_ode, r2_ode_metro @coordination.synchronized('emc-{rdf_group_no}-rdf') def _extend_legacy_replicated_vol( self, array, volume, device_id, volume_name, new_size, extra_specs, rdf_group_no): """Extend a legacy OS volume without Online Device Expansion :param array: the array serial number :param volume: the volume objcet :param device_id: the volume device id :param volume_name: the volume name :param new_size: the new size the volume should be :param extra_specs: extra specifications :param rdf_group_no: the RDF group number """ try: # Break the RDF device pair relationship and cleanup R2 LOG.info("Breaking replication relationship...") self.break_rdf_device_pair_session( array, device_id, volume_name, extra_specs, volume) # Extend the R1 volume LOG.info("Extending source volume...") self.provision.extend_volume( array, device_id, new_size, extra_specs) # Setup volume replication again for source volume LOG.info("Recreating replication relationship...") rep_status, __, __, rep_extra_specs, resume_rdf = ( self.configure_volume_replication( array, volume, device_id, extra_specs)) # If first/only volume in SG then RDF protect SG if rep_status == 'first_vol_in_rdf_group': self._protect_storage_group( array, device_id, volume, volume_name, rep_extra_specs) # If more than one volume in SG then resume replication if resume_rdf: self.rest.srdf_resume_replication( array, rep_extra_specs['mgmt_sg_name'], rep_extra_specs['rdf_group_no'], extra_specs) except Exception as e: exception_message = (_("Error extending volume. Error received " "was %(e)s") % {'e': e}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) def update_volume_stats(self): """Retrieve stats info.""" if self.rest.u4p_in_failover and self.rest.u4p_failover_autofailback: self.retest_primary_u4p() pools = [] # Dictionary to hold the arrays for which the SRP details # have already been queried. arrays = {} total_capacity_gb = 0 free_capacity_gb = 0 provisioned_capacity_gb = 0 location_info = None backend_name = self.pool_info['backend_name'] max_oversubscription_ratio = ( self.pool_info['max_over_subscription_ratio']) reserved_percentage = self.pool_info['reserved_percentage'] array_reserve_percent = None array_info_list = self.pool_info['arrays_info'] already_queried = False for array_info in array_info_list: if self.failedover: rep_config = self.rep_configs[0] array_info = self.get_secondary_stats_info( rep_config, array_info) # Add both SLO & Workload name in the pool name # Only insert the array details in the dict once if array_info['SerialNumber'] not in arrays: (location_info, total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, array_reserve_percent) = self._update_srp_stats(array_info) arrays[array_info['SerialNumber']] = ( [total_capacity_gb, free_capacity_gb, provisioned_capacity_gb, array_reserve_percent]) else: already_queried = True try: pool_name = ("%(slo)s+%(workload)s+%(srpName)s+%(array)s" % {'slo': array_info['SLO'], 'workload': array_info['Workload'], 'srpName': array_info['srpName'], 'array': array_info['SerialNumber']}) except KeyError: pool_name = ("%(slo)s+%(srpName)s+%(array)s" % {'slo': array_info['SLO'], 'srpName': array_info['srpName'], 'array': array_info['SerialNumber']}) if already_queried: # The dictionary will only have one key per PowerMax/VMAX # Construct the location info pool = self._construct_location_info_and_pool( array_info, pool_name, arrays, max_oversubscription_ratio, reserved_percentage) else: pool = {'pool_name': pool_name, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'provisioned_capacity_gb': provisioned_capacity_gb, 'QoS_support': False, 'location_info': location_info, 'consistencygroup_support': False, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'consistent_group_snapshot_enabled': True, 'max_over_subscription_ratio': max_oversubscription_ratio, 'reserved_percentage': reserved_percentage, 'replication_enabled': self.replication_enabled, 'group_replication_enabled': self.replication_enabled, 'consistent_group_replication_enabled': self.replication_enabled } if array_reserve_percent: if isinstance(reserved_percentage, int): if array_reserve_percent > reserved_percentage: pool['reserved_percentage'] = array_reserve_percent else: pool['reserved_percentage'] = array_reserve_percent pools.append(pool) pools = self.utils.add_legacy_pools(pools) if self.promotion: primary_array = self.configuration.safe_get('powermax_array') pools = self.utils.add_promotion_pools(pools, primary_array) data = {'vendor_name': "Dell EMC", 'driver_version': self.version, 'storage_protocol': 'unknown', 'volume_backend_name': backend_name or self.__class__.__name__, # Use zero capacities here so we always use a pool. 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'reserved_percentage': 0, 'replication_enabled': self.replication_enabled, 'replication_targets': self.replication_targets, 'sparse_copy_volume': True, 'pools': pools} return data def _construct_location_info_and_pool( self, array_info, pool_name, arrays, max_oversubscription_ratio, reserved_percentage): """Construct the location info string and the pool dict :param array_info: array information dict :param pool_name: pool name :param arrays: arrays dict :param max_oversubscription_ratio: max oversubscription ratio :param reserved_percentage: reserved percentage :returns: pool - dict """ try: temp_location_info = ( ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s" % {'arrayName': array_info['SerialNumber'], 'srpName': array_info['srpName'], 'slo': array_info['SLO'], 'workload': array_info['Workload']})) except KeyError: temp_location_info = ( ("%(arrayName)s#%(srpName)s#%(slo)s" % {'arrayName': array_info['SerialNumber'], 'srpName': array_info['srpName'], 'slo': array_info['SLO']})) pool = {'pool_name': pool_name, 'total_capacity_gb': arrays[array_info['SerialNumber']][0], 'free_capacity_gb': arrays[array_info['SerialNumber']][1], 'provisioned_capacity_gb': arrays[array_info['SerialNumber']][2], 'QoS_support': False, 'location_info': temp_location_info, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'consistent_group_snapshot_enabled': True, 'max_over_subscription_ratio': max_oversubscription_ratio, 'reserved_percentage': reserved_percentage, 'replication_enabled': self.replication_enabled, 'multiattach': True} if arrays[array_info['SerialNumber']][3]: if reserved_percentage: if (arrays[array_info['SerialNumber']][3] > reserved_percentage): pool['reserved_percentage'] = ( arrays[array_info['SerialNumber']][3]) else: pool['reserved_percentage'] = ( arrays[array_info['SerialNumber']][3]) return pool def _update_srp_stats(self, array_info): """Update SRP stats. :param array_info: array information :returns: location_info :returns: totalManagedSpaceGbs :returns: remainingManagedSpaceGbs :returns: provisionedManagedSpaceGbs :returns: array_reserve_percent :returns: wlpEnabled """ (totalManagedSpaceGbs, remainingManagedSpaceGbs, provisionedManagedSpaceGbs, array_reserve_percent) = ( self.provision.get_srp_pool_stats( array_info['SerialNumber'], array_info)) LOG.info("Capacity stats for SRP pool %(srpName)s on array " "%(arrayName)s total_capacity_gb=%(total_capacity_gb)lu, " "free_capacity_gb=%(free_capacity_gb)lu, " "provisioned_capacity_gb=%(provisioned_capacity_gb)lu", {'srpName': array_info['srpName'], 'arrayName': array_info['SerialNumber'], 'total_capacity_gb': totalManagedSpaceGbs, 'free_capacity_gb': remainingManagedSpaceGbs, 'provisioned_capacity_gb': provisionedManagedSpaceGbs}) try: location_info = ("%(arrayName)s#%(srpName)s#%(slo)s#%(workload)s" % {'arrayName': array_info['SerialNumber'], 'srpName': array_info['srpName'], 'slo': array_info['SLO'], 'workload': array_info['Workload']}) except KeyError: location_info = ("%(arrayName)s#%(srpName)s#%(slo)s" % {'arrayName': array_info['SerialNumber'], 'srpName': array_info['srpName'], 'slo': array_info['SLO']}) return (location_info, totalManagedSpaceGbs, remainingManagedSpaceGbs, provisionedManagedSpaceGbs, array_reserve_percent) def _set_config_file_and_get_extra_specs(self, volume, volume_type_id=None): """Given the volume object get the associated volumetype. Given the volume object get the associated volumetype and the extra specs associated with it. Based on the name of the config group, register the config file :param volume: the volume object including the volume_type_id :param volume_type_id: Optional override of volume.volume_type_id :returns: dict -- the extra specs dict :returns: dict -- QoS specs """ qos_specs = {} extra_specs = self.utils.get_volumetype_extra_specs( volume, volume_type_id) type_id = volume.volume_type_id if type_id: res = volume_types.get_volume_type_qos_specs(type_id) qos_specs = res['qos_specs'] # If there are no extra specs then the default case is assumed. if extra_specs: if extra_specs.get('replication_enabled') == ' True': extra_specs[utils.IS_RE] = True backend_id = self._get_replicated_volume_backend_id(volume) rep_config = self.utils.get_rep_config( backend_id, self.rep_configs) if rep_config is None: msg = _('Could not determine which rep_device to use ' 'from cinder.conf') raise exception.VolumeBackendAPIException(msg) extra_specs[utils.REP_CONFIG] = rep_config if rep_config.get('mode'): extra_specs[utils.REP_MODE] = rep_config['mode'] if rep_config.get(utils.METROBIAS): extra_specs[utils.METROBIAS] = ( rep_config[utils.METROBIAS]) extra_specs[utils.DISABLE_PROTECTED_SNAP] =\ self.utils.is_protected_snap_disabled(extra_specs) return extra_specs, qos_specs def _get_replicated_volume_backend_id(self, volume): """Given a volume, return its rep device backend id. :param volume: volume used to retrieve backend id -- volume :returns: backend id -- str """ backend_id = utils.BACKEND_ID_LEGACY_REP volume_extra_specs = self.utils.get_volumetype_extra_specs(volume) if volume_extra_specs: volume_backend_id = volume_extra_specs.get( utils.REPLICATION_DEVICE_BACKEND_ID) if volume_backend_id: backend_id = volume_backend_id return backend_id def _find_device_on_array(self, volume, extra_specs, remote_device=False): """Given the volume get the PowerMax/VMAX device Id. :param volume: volume object :param extra_specs: the extra Specs :param remote_device: find remote device for replicated volumes :returns: array, device_id """ founddevice_id = None volume_name = volume.id try: name_id = volume._name_id except AttributeError: name_id = None if remote_device: loc = volume.replication_driver_data else: loc = volume.provider_location if isinstance(loc, str): name = ast.literal_eval(loc) array = extra_specs[utils.ARRAY] if name.get('device_id'): device_id = name['device_id'] elif name.get('keybindings'): device_id = name['keybindings']['DeviceID'] else: device_id = None try: founddevice_id = self.rest.check_volume_device_id( array, device_id, volume_name, name_id) except exception.VolumeBackendAPIException: pass if founddevice_id is None: LOG.debug("Volume %(volume_name)s not found on the array.", {'volume_name': volume_name}) else: LOG.debug("Volume name: %(volume_name)s Volume device id: " "%(founddevice_id)s.", {'volume_name': volume_name, 'founddevice_id': founddevice_id}) return founddevice_id def find_host_lun_id(self, volume, host, extra_specs, rep_extra_specs=None, connector=None): """Given the volume dict find the host lun id for a volume. :param volume: the volume dict :param host: host from connector (can be None on a force-detach) :param extra_specs: the extra specs :param rep_extra_specs: rep extra specs, passed in if metro device :param connector: connector object can be none. :returns: dict -- the data dict """ maskedvols = {} is_multiattach = False volume_name = volume.name device_id = self._find_device_on_array(volume, extra_specs) if connector: if self.migrate.do_migrate_if_candidate( extra_specs[utils.ARRAY], extra_specs[utils.SRP], device_id, volume, connector): LOG.debug("MIGRATE - Successfully migrated from device " "%(dev)s from legacy shared storage groups, " "pre Pike release.", {'dev': device_id}) if rep_extra_specs: rdf_pair_info = self.rest.get_rdf_pair_volume( extra_specs[utils.ARRAY], rep_extra_specs['rdf_group_no'], device_id) device_id = rdf_pair_info.get('remoteVolumeName', None) extra_specs = rep_extra_specs host_name = self.utils.get_host_name_label( host, self.powermax_short_host_name_template) if host else None if device_id: array = extra_specs[utils.ARRAY] # Return only masking views for this host host_maskingviews, all_masking_view_list = ( self._get_masking_views_from_volume( array, device_id, host_name)) if not host_maskingviews: # Backward compatibility if a new template was added to # an existing backend. host_name = self.utils.get_host_short_name( host) if host else None host_maskingviews, all_masking_view_list = ( self._get_masking_views_from_volume_for_host( all_masking_view_list, host_name)) for maskingview in host_maskingviews: host_lun_id = self.rest.find_mv_connections_for_vol( array, maskingview, device_id) if host_lun_id is not None: devicedict = {'hostlunid': host_lun_id, 'maskingview': maskingview, 'array': array, 'device_id': device_id} maskedvols = devicedict if not maskedvols: LOG.debug( "Host lun id not found for volume: %(volume_name)s " "with the device id: %(device_id)s on host: %(host)s.", {'volume_name': volume_name, 'device_id': device_id, 'host': host_name}) if len(all_masking_view_list) > len(host_maskingviews): other_maskedvols = [] for maskingview in all_masking_view_list: host_lun_id = self.rest.find_mv_connections_for_vol( array, maskingview, device_id) if host_lun_id is not None: devicedict = {'hostlunid': host_lun_id, 'maskingview': maskingview, 'array': array, 'device_id': device_id} other_maskedvols.append(devicedict) if len(other_maskedvols) > 0: LOG.debug("Volume is masked to a different host " "than %(host)s - Live Migration or Multi-Attach " "use case.", {'host': host}) is_multiattach = True else: exception_message = (_("Cannot retrieve volume %(vol)s " "from the array.") % {'vol': volume_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException(exception_message) return maskedvols, is_multiattach def get_masking_views_from_volume(self, array, volume, device_id, host): """Get all masking views from a volume. :param array: array serial number :param volume: the volume object :param device_id: the volume device id :param host: the host :returns: masking view list, is metro """ is_metro = False extra_specs = self._initial_setup(volume) mv_list, __ = self._get_masking_views_from_volume(array, device_id, host) if self.utils.is_metro_device( extra_specs.get(utils.REP_CONFIG), extra_specs): is_metro = True return mv_list, is_metro def _get_masking_views_from_volume(self, array, device_id, host): """Helper function to retrieve masking view list for a volume. :param array: array serial number :param device_id: the volume device id :param host: the host :returns: masking view list, all masking view list """ LOG.debug("Getting masking views from volume") mvs, __ = self._get_mvs_and_sgs_from_volume(array, device_id) return self._get_masking_views_from_volume_for_host(mvs, host) def _get_masking_views_from_volume_for_host( self, masking_views, host_name): """Check all masking views for host_name :param masking_views: list of masking view :param host_name: the host name for comparision :returns: masking view list, all masking view list """ LOG.debug("Getting masking views from volume for host %(host)s ", {'host': host_name}) host_masking_view_list, all_masking_view_list = [], [] for masking_view in masking_views: all_masking_view_list.append(masking_view) if host_name: if host_name.lower() in masking_view.lower(): host_masking_view_list.append(masking_view) host_masking_view_list = (host_masking_view_list if host_name else all_masking_view_list) return host_masking_view_list, all_masking_view_list def _get_mvs_and_sgs_from_volume(self, array, device_id): """Helper function to retrieve masking views and storage groups. :param array: array serial number :param device_id: the volume device id :returns: masking view list, storage group list """ final_masking_view_list = [] storage_group_list = self.rest.get_storage_groups_from_volume( array, device_id) for sg in storage_group_list: masking_view_list = self.rest.get_masking_views_from_storage_group( array, sg) final_masking_view_list.extend(masking_view_list) return final_masking_view_list, storage_group_list def _initial_setup(self, volume, volume_type_id=None, init_conn=False): """Necessary setup to accumulate the relevant information. The volume object has a host in which we can parse the config group name. The config group name is the key to our EMC configuration file. The emc configuration file contains srp name and array name which are mandatory fields. :param volume: the volume object -- obj :param volume_type_id: optional override of volume.volume_type_id -- str :param init_conn: if extra specs are for initialize connection -- bool :returns: dict -- extra spec dict :raises: VolumeBackendAPIException: """ try: array_info = self.get_attributes_from_cinder_config() if array_info: extra_specs, qos_specs = ( self._set_config_file_and_get_extra_specs( volume, volume_type_id)) else: exception_message = (_( "Unable to get corresponding record for srp. Please " "refer to the current online documentation for correct " "configuration and note that the xml file is no longer " "supported.")) raise exception.VolumeBackendAPIException( message=exception_message) extra_specs = self._set_vmax_extra_specs( extra_specs, array_info, init_conn) if qos_specs and qos_specs.get('consumer') != "front-end": extra_specs['qos'] = qos_specs.get('specs') except Exception: exception_message = (_( "Unable to get configuration information necessary to " "create a volume: %(errorMessage)s.") % {'errorMessage': sys.exc_info()[1]}) raise exception.VolumeBackendAPIException( message=exception_message) return extra_specs def _populate_masking_dict(self, volume, connector, extra_specs, rep_extra_specs=None): """Get all the names of the maskingview and sub-components. :param volume: the volume object :param connector: the connector object :param extra_specs: extra specifications :param rep_extra_specs: replication extra specs, if metro volume :returns: dict -- a dictionary with masking view information """ masking_view_dict = {} volume_name = volume.name device_id = self._find_device_on_array(volume, extra_specs) if rep_extra_specs is not None: rdf_pair_info = self.rest.get_rdf_pair_volume( extra_specs[utils.ARRAY], rep_extra_specs['rdf_group_no'], device_id) device_id = rdf_pair_info.get('remoteVolumeName', None) extra_specs = rep_extra_specs if not device_id: exception_message = (_("Cannot retrieve volume %(vol)s " "from the array. ") % {'vol': volume_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException(exception_message) protocol = self.utils.get_short_protocol_type(self.protocol) short_host_name = self.utils.get_host_name_label( connector['host'], self.powermax_short_host_name_template) masking_view_dict[utils.USED_HOST_NAME] = short_host_name masking_view_dict[utils.SLO] = extra_specs[utils.SLO] masking_view_dict[utils.WORKLOAD] = 'NONE' if self.next_gen else ( extra_specs[utils.WORKLOAD]) masking_view_dict[utils.ARRAY] = extra_specs[utils.ARRAY] masking_view_dict[utils.SRP] = extra_specs[utils.SRP] if not extra_specs[utils.PORTGROUPNAME]: LOG.warning("You must supply a valid pre-created port group " "in cinder.conf or as an extra spec. Port group " "cannot be left empty as creating a new masking " "view will fail.") masking_view_dict[utils.PORT_GROUP_LABEL] = ( self.utils.get_port_name_label( extra_specs[utils.PORTGROUPNAME], self.powermax_port_group_name_template)) masking_view_dict[utils.PORTGROUPNAME] = ( extra_specs[utils.PORTGROUPNAME]) masking_view_dict[utils.INITIATOR_CHECK] = ( self._get_initiator_check_flag()) child_sg_name, do_disable_compression, rep_enabled = ( self.utils.get_child_sg_name( short_host_name, extra_specs, masking_view_dict[utils.PORT_GROUP_LABEL])) masking_view_dict[utils.DISABLECOMPRESSION] = do_disable_compression masking_view_dict[utils.IS_RE] = rep_enabled mv_prefix = ( "OS-%(shortHostName)s-%(protocol)s-%(pg)s" % {'shortHostName': short_host_name, 'protocol': protocol, 'pg': masking_view_dict[utils.PORT_GROUP_LABEL]}) masking_view_dict[utils.SG_NAME] = child_sg_name masking_view_dict[utils.MV_NAME] = ("%(prefix)s-MV" % {'prefix': mv_prefix}) masking_view_dict[utils.PARENT_SG_NAME] = ("%(prefix)s-SG" % {'prefix': mv_prefix}) masking_view_dict[utils.IG_NAME] = ( ("OS-%(shortHostName)s-%(protocol)s-IG" % {'shortHostName': short_host_name, 'protocol': protocol})) masking_view_dict[utils.CONNECTOR] = connector masking_view_dict[utils.DEVICE_ID] = device_id masking_view_dict[utils.VOL_NAME] = volume_name return masking_view_dict def _create_cloned_volume( self, volume, source_volume, extra_specs, is_snapshot=False, from_snapvx=False): """Create a clone volume from the source volume. :param volume: clone volume :param source_volume: source of the clone volume :param extra_specs: extra specs :param is_snapshot: boolean -- Defaults to False :param from_snapvx: bool -- Defaults to False :returns: dict -- cloneDict the cloned volume dictionary :raises: VolumeBackendAPIException: """ clone_name = volume.name snap_name = None rep_update, rep_info_dict = dict(), dict() LOG.info("Create a replica from Volume: Clone Volume: %(clone_name)s " "from Source Volume: %(source_name)s.", {'clone_name': clone_name, 'source_name': source_volume.name}) array = extra_specs[utils.ARRAY] is_clone_license = self.rest.is_snapvx_licensed(array) if not is_clone_license: exception_message = (_( "SnapVx feature is not licensed on %(array)s.") % {'array': array}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) if from_snapvx: source_device_id, snap_name, __ = self._parse_snap_info( array, source_volume) else: source_device_id = self._find_device_on_array( source_volume, extra_specs) if not source_device_id: exception_message = (_( "Cannot find source device on %(array)s.") % {'array': array}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) # Perform any snapvx cleanup if required before creating the clone if is_snapshot or from_snapvx: self._cleanup_device_snapvx(array, source_device_id, extra_specs) if not is_snapshot: clone_dict, rep_update, rep_info_dict = self._create_replica( array, volume, source_device_id, extra_specs, snap_name=snap_name) else: clone_dict = self._create_snapshot( array, volume, source_device_id, extra_specs) LOG.debug("Leaving _create_cloned_volume: Volume: " "%(clone_name)s Source Device Id: %(source_name)s ", {'clone_name': clone_name, 'source_name': source_device_id}) return clone_dict, rep_update, rep_info_dict def _parse_snap_info(self, array, snapshot): """Given a snapshot object, parse the provider_location. :param array: the array serial number :param snapshot: the snapshot object :returns: sourcedevice_id -- str foundsnap_name -- str found_snap_id_list -- list """ foundsnap_name = None sourcedevice_id = None found_snap_id_list = list() volume_name = snapshot.id loc = snapshot.provider_location if isinstance(loc, str): name = ast.literal_eval(loc) try: sourcedevice_id = name['source_id'] snap_name = name['snap_name'] except KeyError: LOG.info("Error retrieving snapshot details. Assuming " "legacy structure of snapshot...") return None, None, None try: snap_detail_list = self.rest.get_volume_snaps( array, sourcedevice_id, snap_name) for snap_details in snap_detail_list: foundsnap_name = snap_name found_snap_id_list.append(snap_details.get( 'snap_id') if self.rest.is_snap_id else ( snap_details.get('generation'))) except Exception as e: LOG.info("Exception in retrieving snapshot: %(e)s.", {'e': e}) foundsnap_name = None if not foundsnap_name or not sourcedevice_id or not found_snap_id_list: LOG.debug("Error retrieving snapshot details. " "Snapshot name: %(snap)s", {'snap': volume_name}) else: LOG.debug("Source volume: %(volume_name)s Snap name: " "%(foundsnap_name)s.", {'volume_name': sourcedevice_id, 'foundsnap_name': foundsnap_name, 'snap_ids': found_snap_id_list}) return sourcedevice_id, foundsnap_name, found_snap_id_list def _create_snapshot(self, array, snapshot, source_device_id, extra_specs): """Create a snap Vx of a volume. :param array: the array serial number :param snapshot: the snapshot object :param source_device_id: the source device id :param extra_specs: the extra specifications :returns: snap_dict """ clone_name = self.utils.get_volume_element_name(snapshot.id) snap_name = self.utils.truncate_string(clone_name, 19) try: self.provision.create_volume_snapvx(array, source_device_id, snap_name, extra_specs) except Exception as e: exception_message = (_("Error creating snap Vx of %(vol)s. " "Exception received: %(e)s.") % {'vol': source_device_id, 'e': str(e)}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) snap_dict = {'snap_name': snap_name, 'source_id': source_device_id} return snap_dict def _delete_volume(self, volume): """Helper function to delete the specified volume. Pass in host if is snapshot :param volume: volume object to be deleted :returns: volume_name (string vol name) """ volume_name = volume.name extra_specs = self._initial_setup(volume) device_id = self._find_device_on_array(volume, extra_specs) if device_id is None: LOG.warning("Volume %(name)s not found on the array. " "No volume to delete.", {'name': volume_name}) return volume_name array = extra_specs[utils.ARRAY] dps = self.utils.is_protected_snap_disabled(extra_specs) # If a volume is not replicated and has the # powermax:disable_protected_snap set to True, # then clean up the volume without replication cleanup. if dps and volume.replication_status is None: self.masking.remove_and_reset_members( array, volume, device_id, volume_name, extra_specs, False) self._cleanup_device_retry(array, device_id, extra_specs) else: if self.utils.is_replication_enabled(extra_specs): self._validate_rdfg_status(array, extra_specs) self._cleanup_device_retry(array, device_id, extra_specs) # Remove from any storage groups and cleanup replication self._remove_vol_and_cleanup_replication( array, device_id, volume_name, extra_specs, volume) self._delete_from_srp( array, device_id, volume_name, extra_specs) return volume_name @retry(retry_exc_tuple, interval=2, retries=7) def _cleanup_device_retry(self, array, device_id, extra_specs): """Cleanup snapvx on the device :param array: the serial number of the array -- str :param device_id: the device id -- str :param extra_specs: extra specs -- dict """ # Check if the volume being deleted is a # source or target for copy session self._cleanup_device_snapvx(array, device_id, extra_specs) # Confirm volume has no more snapshots associated and is not a target snapshots = self.rest.get_volume_snapshot_list(array, device_id) if snapshots: snapshot_names = ', '.join( snap.get('snapshotName') for snap in snapshots) raise exception.VolumeBackendAPIException(_( 'Cannot delete device %s as it currently has the following ' 'active snapshots: %s. Please try again once these snapshots ' 'are no longer active.') % (device_id, snapshot_names)) __, snapvx_target_details = self.rest.find_snap_vx_sessions( array, device_id, tgt_only=True) if snapvx_target_details: source_device = snapvx_target_details.get('source_vol_id') snapshot_name = snapvx_target_details.get('snap_name') if snapshot_name: raise exception.VolumeBackendAPIException(_( 'Cannot delete device %s as it is currently a linked ' 'target of snapshot %s. The source device of this link ' 'is %s. Please try again once this snapshot is no longer ' 'active.') % (device_id, snapshot_name, source_device)) def _create_volume(self, volume, volume_name, volume_size, extra_specs): """Create a volume. :param volume: the volume :param volume_name: the volume name :param volume_size: the volume size :param extra_specs: extra specifications :returns: volume_dict, rep_update, rep_info_dict --dict """ # Set Create Volume options is_re, rep_mode, storagegroup_name = False, None, None rep_info_dict, rep_update = dict(), dict() # Get Array details array = extra_specs[utils.ARRAY] array_model, next_gen = self.rest.get_array_model_info(array) if next_gen: extra_specs[utils.WORKLOAD] = 'NONE' # Verify valid SL/WL combination is_valid_slo, is_valid_workload = self.provision.verify_slo_workload( array, extra_specs[utils.SLO], extra_specs[utils.WORKLOAD], next_gen, array_model) if not is_valid_slo or not is_valid_workload: exception_message = (_( "Either SLO: %(slo)s or workload %(workload)s is invalid. " "Examine previous error statement for valid values.") % {'slo': extra_specs[utils.SLO], 'workload': extra_specs[utils.WORKLOAD]}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) LOG.debug("Create Volume: %(volume)s Srp: %(srp)s " "Array: %(array)s " "Size: %(size)lu.", {'volume': volume_name, 'srp': extra_specs[utils.SRP], 'array': array, 'size': volume_size}) do_disable_compression = self.utils.is_compression_disabled( extra_specs) if self.utils.is_replication_enabled(extra_specs): is_re, rep_mode = True, extra_specs['rep_mode'] storagegroup_name = self.masking.get_or_create_default_storage_group( array, extra_specs[utils.SRP], extra_specs[utils.SLO], extra_specs[utils.WORKLOAD], extra_specs, do_disable_compression, is_re, rep_mode) if not is_re: volume_dict = self._create_non_replicated_volume( array, volume, volume_name, storagegroup_name, volume_size, extra_specs) else: volume_dict, rep_update, rep_info_dict = ( self._create_replication_enabled_volume( array, volume, volume_name, volume_size, extra_specs, storagegroup_name, rep_mode)) device_id = self._get_device_id_from_identifier( array, volume_name, volume_dict['device_id']) if device_id: volume_dict['device_id'] = device_id return volume_dict, rep_update, rep_info_dict def _get_device_id_from_identifier( self, array, volume_name, orig_device_id): """Get the device(s) using the identifier name :param array: the serial number of the array -- str :param volume_name: the user supplied volume name -- str :param orig_device_id: the original device id -- str :returns: device id -- str """ # Compare volume ID against identifier on array. Update if needed. # This can occur in cases where multiple edits are occurring at once. dev_id_from_identifier = self.rest.find_volume_device_id( array, volume_name) if isinstance(dev_id_from_identifier, list): if orig_device_id in dev_id_from_identifier: return orig_device_id else: if dev_id_from_identifier != orig_device_id: LOG.warning( "The device id %(dev_ident)s associated with %(vol_name)s " "is not the same as device %(dev_orig)s.", {'dev_ident': dev_id_from_identifier, 'vol_name': volume_name, 'dev_orig': orig_device_id}) return dev_id_from_identifier return None @coordination.synchronized("emc-nonrdf-vol-{storagegroup_name}-{array}") def _create_non_replicated_volume( self, array, volume, volume_name, storagegroup_name, volume_size, extra_specs): """Create a volume without replication enabled :param array: the primary array -- string :param volume: the volume -- dict :param volume_name: the volume name -- string :param storagegroup_name: the storage group name -- string :param volume_size: the volume size -- string :param extra_specs: extra specifications -- dict :returns: volume_dict -- dict :raises: VolumeBackendAPIException: """ existing_devices = self.rest.get_volumes_in_storage_group( array, storagegroup_name) try: volume_dict = self.provision.create_volume_from_sg( array, volume_name, storagegroup_name, volume_size, extra_specs, rep_info=None) return volume_dict except Exception as e: try: self._reset_identifier_on_rollback(array, volume_name) # Attempt cleanup of storage group post exception. updated_devices = set(self.rest.get_volumes_in_storage_group( array, storagegroup_name)) devices_to_delete = [device for device in updated_devices if device not in existing_devices] if devices_to_delete: self._cleanup_non_rdf_volume_create_post_failure( volume, volume_name, extra_specs, devices_to_delete) elif not existing_devices: self.rest.delete_storage_group(array, storagegroup_name) finally: # Pass actual exception that was raised now that cleanup # attempt is finished. Mainly VolumeBackendAPIException raised # from error status codes returned from the various REST jobs. raise e @coordination.synchronized('emc-rdf-vol-{storagegroup_name}-{array}') def _create_replication_enabled_volume( self, array, volume, volume_name, volume_size, extra_specs, storagegroup_name, rep_mode): """Create a volume with replication enabled :param array: the primary array :param volume: the volume :param volume_name: the volume name :param volume_size: the volume size :param extra_specs: extra specifications :param storagegroup_name: the storage group name :param rep_mode: the replication mode :returns: volume_dict, rep_update, rep_info_dict --dict :raises: VolumeBackendAPIException: """ def _is_first_vol_in_replicated_sg(): vol_dict = dict() first_vol, rep_ex_specs, rep_info, rdfg_empty = ( self.prepare_replication_details(extra_specs)) if first_vol: vol_dict = self.provision.create_volume_from_sg( array, volume_name, storagegroup_name, volume_size, extra_specs, rep_info) rep_vol = deepcopy(vol_dict) rep_vol.update({'device_uuid': volume_name, 'storage_group': storagegroup_name, 'size': volume_size}) if first_vol and rdfg_empty: # First volume in SG, first volume in RDFG self.srdf_protect_storage_group( extra_specs, rep_ex_specs, rep_vol) elif not rdfg_empty and not rep_info: # First volume in SG, not first in RDFG __, __, __, rep_ex_specs, resume_rdf = ( self.configure_volume_replication( array, volume, vol_dict['device_id'], extra_specs)) if resume_rdf: self.rest.srdf_resume_replication( array, rep_ex_specs['mgmt_sg_name'], rep_ex_specs['rdf_group_no'], extra_specs) return first_vol, rep_ex_specs, vol_dict existing_devices = self.rest.get_volumes_in_storage_group( array, storagegroup_name) try: is_first_volume, rep_extra_specs, volume_info_dict = ( _is_first_vol_in_replicated_sg()) if not is_first_volume: self._validate_rdfg_status(array, extra_specs) __, rep_extra_specs, rep_info_dict, __ = ( self.prepare_replication_details(extra_specs)) volume_info_dict = self.provision.create_volume_from_sg( array, volume_name, storagegroup_name, volume_size, extra_specs, rep_info_dict) rep_vol_dict = deepcopy(volume_info_dict) rep_vol_dict.update({'device_uuid': volume_name, 'storage_group': storagegroup_name, 'size': volume_size}) remote_device_id = self.get_and_set_remote_device_uuid( extra_specs, rep_extra_specs, rep_vol_dict) rep_vol_dict.update({'remote_device_id': remote_device_id}) rep_update, rep_info_dict = self.gather_replication_updates( extra_specs, rep_extra_specs, rep_vol_dict) if rep_mode in [utils.REP_ASYNC, utils.REP_METRO]: self._add_volume_to_rdf_management_group( array, volume_info_dict['device_id'], volume_name, rep_extra_specs['array'], remote_device_id, extra_specs) return volume_info_dict, rep_update, rep_info_dict except Exception as e: try: # Attempt cleanup of rdfg & storage group post exception updated_devices = set(self.rest.get_volumes_in_storage_group( array, storagegroup_name)) devices_to_delete = [device for device in updated_devices if device not in existing_devices] if devices_to_delete: self._cleanup_rdf_volume_create_post_failure( volume, volume_name, extra_specs, devices_to_delete) elif not existing_devices: self.rest.delete_storage_group(array, storagegroup_name) finally: # Pass actual exception that was raised now that cleanup # attempt is finished. Mainly VolumeBackendAPIException raised # from error status codes returned from the various REST jobs. raise e def _set_vmax_extra_specs(self, extra_specs, pool_record, init_conn=False): """Set the PowerMax/VMAX extra specs. The pool_name extra spec must be set, otherwise a default slo/workload will be chosen. The portgroup can either be passed as an extra spec on the volume type (e.g. 'storagetype:portgroupname = os-pg1-pg'), or can be chosen from a list provided in the cinder.conf :param extra_specs: extra specifications -- dict :param pool_record: pool record -- dict :param: init_conn: if extra specs are for initialize connection -- bool :returns: the extra specifications -- dict """ # set extra_specs from pool_record extra_specs[utils.SRP] = pool_record['srpName'] extra_specs[utils.ARRAY] = pool_record['SerialNumber'] extra_specs[utils.PORTGROUPNAME] = ( self._select_port_group_for_extra_specs(extra_specs, pool_record, init_conn)) self._validate_storage_group_tag_list(extra_specs) extra_specs[utils.INTERVAL] = self.interval LOG.debug("The interval is set at: %(intervalInSecs)s.", {'intervalInSecs': self.interval}) extra_specs[utils.RETRIES] = self.retries LOG.debug("Retries are set at: %(retries)s.", {'retries': self.retries}) # Set pool_name slo and workload if 'pool_name' in extra_specs: pool_name = extra_specs['pool_name'] pool_details = pool_name.split('+') slo_from_extra_spec = pool_details[0] workload_from_extra_spec = pool_details[1] # Check if legacy pool chosen if (workload_from_extra_spec == pool_record['srpName'] or self.next_gen): workload_from_extra_spec = 'NONE' elif pool_record.get('ServiceLevel'): slo_from_extra_spec = pool_record['ServiceLevel'] workload_from_extra_spec = pool_record.get('Workload', 'None') # If workload is None in cinder.conf, convert to string if not workload_from_extra_spec or self.next_gen: workload_from_extra_spec = 'NONE' LOG.info("Pool_name is not present in the extra_specs " "- using slo/ workload from cinder.conf: %(slo)s/%(wl)s.", {'slo': slo_from_extra_spec, 'wl': workload_from_extra_spec}) else: slo_list = self.rest.get_slo_list( pool_record['SerialNumber'], self.next_gen, self.array_model) if 'Optimized' in slo_list: slo_from_extra_spec = 'Optimized' elif 'Diamond' in slo_list: slo_from_extra_spec = 'Diamond' else: slo_from_extra_spec = 'None' workload_from_extra_spec = 'NONE' LOG.warning("Pool_name is not present in the extra_specs " "so no slo/ workload information is present " "using default slo/ workload combination: " "%(slo)s/%(wl)s.", {'slo': slo_from_extra_spec, 'wl': workload_from_extra_spec}) # Standardize slo and workload 'NONE' naming conventions if workload_from_extra_spec.lower() == 'none': workload_from_extra_spec = 'NONE' if slo_from_extra_spec.lower() == 'none': slo_from_extra_spec = None extra_specs[utils.SLO] = slo_from_extra_spec extra_specs[utils.WORKLOAD] = workload_from_extra_spec if self.rest.is_compression_capable(extra_specs[utils.ARRAY]): if not self.utils.is_compression_disabled(extra_specs): extra_specs.pop(utils.DISABLECOMPRESSION, None) else: extra_specs.pop(utils.DISABLECOMPRESSION, None) LOG.warning( "Array %(array)s is not compression capable. Any attempt to " "disable compression using an extra spec on the volume type " "will be ignored.", {'array': extra_specs[utils.ARRAY]}) self._check_and_add_tags_to_storage_array( extra_specs[utils.ARRAY], self.powermax_array_tag_list, extra_specs) LOG.debug("SRP is: %(srp)s, Array is: %(array)s " "SLO is: %(slo)s, Workload is: %(workload)s.", {'srp': extra_specs[utils.SRP], 'array': extra_specs[utils.ARRAY], 'slo': extra_specs[utils.SLO], 'workload': extra_specs[utils.WORKLOAD]}) if self.version_dict: self.volume_metadata.print_pretty_table(self.version_dict) else: self.version_dict = ( self.volume_metadata.gather_version_info( extra_specs[utils.ARRAY])) return extra_specs def _select_port_group_for_extra_specs(self, extra_specs, pool_record, init_conn=False): """Determine Port Group for operation extra specs. :param extra_specs: existing extra specs -- dict :param pool_record: pool record -- dict :param init_conn: if extra specs are for initialize connection -- bool :returns: Port Group -- str :raises: exception.VolumeBackendAPIException """ port_group = None conf_port_groups = pool_record.get(utils.PORT_GROUP, []) vt_port_group = extra_specs.get(utils.PORTGROUPNAME, None) # Scenario 1: Port Group is set in volume-type extra specs, over-rides # any settings in cinder.conf if vt_port_group: port_group = vt_port_group LOG.info("Using Port Group '%(pg)s' from volume-type extra specs.", {'pg': port_group}) # Scenario 2: Port Group(s) set in cinder.conf and not in volume-type elif conf_port_groups: # Scenario 2-1: There is only one Port Group defined, no load # balance or random selection required if len(conf_port_groups) == 1: port_group = conf_port_groups[0] LOG.info( "Using Port Group '%(pg)s' from cinder.conf backend " "configuration.", {'pg': port_group}) # Scenario 2-2: Else more than one Port Group in cinder.conf else: # Scenario 2-2-1: If load balancing is enabled and the extra # specs are for initialize_connection() method then use load # balance selection if init_conn and ( self.performance.config.get('load_balance', False)): try: load, metric, port_group = ( self.performance.process_port_group_load( extra_specs[utils.ARRAY], conf_port_groups)) LOG.info( "Selecting Port Group %(pg)s with %(met)s load of " "%(load)s", {'pg': port_group, 'met': metric, 'load': load}) except exception.VolumeBackendAPIException: LOG.error( "There has been a problem calculating Port Group " "load, reverting to default random selection.") # Scenario 2-2-2: If the call is not for initialize_connection, # load balancing is not enabled, or there was an error while # calculating PG load, revert to random PG selection method if not port_group: port_group = random.choice(conf_port_groups) # Port group not extracted from volume-type or cinder.conf, raise if not port_group: error_message = (_( "Port Group name has not been provided - please configure the " "'storagetype:portgroupname' extra spec on the volume type, " "or enter a list of Port Groups in the cinder.conf associated " "with this backend.")) LOG.error(error_message) raise exception.VolumeBackendAPIException(message=error_message) return port_group def _validate_storage_group_tag_list(self, extra_specs): """Validate the storagetype:storagegrouptags list :param extra_specs: the extra specifications :raises: VolumeBackendAPIException: """ tag_list = extra_specs.get(utils.STORAGE_GROUP_TAGS) if tag_list: if not self.utils.verify_tag_list(tag_list.split(',')): exception_message = (_( "Unable to get verify " "storagetype:storagegrouptags in the Volume Type. " "Only alpha-numeric, dashes and underscores " "allowed. List values must be separated by commas. " "The number of values must not exceed 8")) raise exception.VolumeBackendAPIException( message=exception_message) else: LOG.info("The tag list %(tag_list)s has been verified.", {'tag_list': tag_list}) def _validate_array_tag_list(self, array_tag_list): """Validate the array tag list :param array_tag_list: the array tag list :raises: VolumeBackendAPIException: """ if array_tag_list: if not self.utils.verify_tag_list(array_tag_list): exception_message = (_( "Unable to get verify " "config option powermax_array_tag_list. " "Only alpha-numeric, dashes and underscores " "allowed. List values must be separated by commas. " "The number of values must not exceed 8")) raise exception.VolumeBackendAPIException( message=exception_message) else: LOG.info("The tag list %(tag_list)s has been verified.", {'tag_list': array_tag_list}) @retry(retry_exc_tuple, interval=3, retries=3) def _delete_from_srp(self, array, device_id, volume_name, extra_specs): """Delete from srp. :param array: the array serial number :param device_id: the device id :param volume_name: the volume name :param extra_specs: the extra specifications :raises: VolumeBackendAPIException: """ try: LOG.debug("Delete Volume: %(name)s. device_id: %(device_id)s.", {'name': volume_name, 'device_id': device_id}) self.provision.delete_volume_from_srp( array, device_id, volume_name) except Exception as e: error_message = (_( "Failed to delete volume %(volume_name)s with device id " "%(dev)s. Exception received: %(e)s.") % {'volume_name': volume_name, 'dev': device_id, 'e': str(e)}) LOG.error(error_message) LOG.warning("Attempting device cleanup after a failed delete of: " "%(name)s. device_id: %(device_id)s.", {'name': volume_name, 'device_id': device_id}) self._cleanup_device_snapvx(array, device_id, extra_specs) raise exception.VolumeBackendAPIException(message=error_message) def _remove_vol_and_cleanup_replication( self, array, device_id, volume_name, extra_specs, volume): """Remove a volume from its storage groups and cleanup replication. :param array: the array serial number :param device_id: the device id :param volume_name: the volume name :param extra_specs: the extra specifications :param volume: the volume object """ if volume and volume.migration_status == 'deleting': extra_specs = self.utils.get_migration_delete_extra_specs( volume, extra_specs, self.rep_configs) # Cleanup remote replication if self.utils.is_replication_enabled(extra_specs): rdf_group_no, __ = self.get_rdf_details( array, extra_specs[utils.REP_CONFIG]) self.cleanup_rdf_device_pair(array, rdf_group_no, device_id, extra_specs) else: self.masking.remove_and_reset_members( array, volume, device_id, volume_name, extra_specs, False) @coordination.synchronized('emc-{rdf_group_no}-rdf') def cleanup_rdf_device_pair(self, array, rdf_group_no, device_id, extra_specs): """Cleanup replication on a RDF device pair, leave only source volume. :param array: the array serial number :param rdf_group_no: the rdf group number :param device_id: the device id :param extra_specs: the extra specifications :raises: exception.VolumeBackendAPIException """ resume_replication, rdf_mgmt_cleanup = False, False rdf_mgmt_sg, vols_in_mgmt_sg = None, None rep_config = extra_specs[utils.REP_CONFIG] rep_mode = extra_specs['rep_mode'] if rep_mode in [utils.REP_METRO, utils.REP_ASYNC]: extra_specs[utils.FORCE_VOL_EDIT] = True rdf_group_no, remote_array = self.get_rdf_details(array, rep_config) rep_extra_specs = self._get_replication_extra_specs( extra_specs, rep_config) # 1. Get the remote device ID so it can be deleted later remote_device = self.rest.get_rdf_pair_volume( array, rdf_group_no, device_id) remote_device_id = remote_device['remoteVolumeName'] vol_sg_list = self.rest.get_storage_groups_from_volume( array, device_id) # 2. If replication mode is async or metro, get RDF mgmt group info and # suspend RDFG before proceeding to delete operation if rep_mode in [utils.REP_METRO, utils.REP_ASYNC]: # Make sure devices are in a valid state before continuing self.rest.wait_for_rdf_pair_sync( array, rdf_group_no, device_id, rep_extra_specs) rdf_mgmt_sg = self.utils.get_rdf_management_group_name(rep_config) vols_in_mgmt_sg = self.rest.get_num_vols_in_sg(array, rdf_mgmt_sg) if vols_in_mgmt_sg > 1: resume_replication = True else: rdf_mgmt_cleanup = True self.rest.srdf_suspend_replication( array, rdf_mgmt_sg, rdf_group_no, rep_extra_specs) try: # 3. Check vol doesnt live in any SGs outside OpenStack managed SGs if rdf_mgmt_sg and rdf_mgmt_sg in vol_sg_list: vol_sg_list.remove(rdf_mgmt_sg) if len(vol_sg_list) > 1: exception_message = (_( "There is more than one storage group associated with " "device %(dev)s not including RDF management groups. " "Please check device is not member of non-OpenStack " "managed storage groups") % {'dev': device_id}) LOG.error(exception_message) raise exception.VolumeBackendAPIException(exception_message) else: vol_src_sg = vol_sg_list[0] # 4. Remove device from SG and delete RDFG device pair self.rest.srdf_remove_device_pair_from_storage_group( array, vol_src_sg, rep_extra_specs['array'], device_id, rep_extra_specs) # 5. Remove the volume from any additional SGs if rdf_mgmt_sg: self.rest.remove_vol_from_sg( array, rdf_mgmt_sg, device_id, extra_specs) self.rest.remove_vol_from_sg( remote_array, rdf_mgmt_sg, remote_device_id, rep_extra_specs) # 6. Delete the r2 volume self.rest.delete_volume(remote_array, remote_device_id) # 7. Delete the SGs if there are no volumes remaining self._cleanup_rdf_storage_groups_post_r2_delete( array, remote_array, vol_src_sg, rdf_mgmt_sg, rdf_mgmt_cleanup) # 8. Resume replication if RDFG still contains volumes if resume_replication: self.rest.srdf_resume_replication( array, rdf_mgmt_sg, rep_extra_specs['rdf_group_no'], rep_extra_specs) LOG.info('Remote device %(dev)s deleted from RDF Group %(grp)s', {'dev': remote_device_id, 'grp': rep_extra_specs['rdf_group_label']}) except Exception as e: # Attempt to resume SRDF groups after exception to avoid leaving # them in a suspended state. try: if rdf_mgmt_sg: self.rest.srdf_resume_replication( array, rdf_mgmt_sg, rdf_group_no, rep_extra_specs, False) elif len(vol_sg_list) == 1: self.rest.srdf_resume_replication( array, vol_sg_list[0], rdf_group_no, rep_extra_specs, False) except Exception: LOG.debug('Could not resume SRDF group after exception ' 'during cleanup_rdf_device_pair.') raise e def _cleanup_rdf_storage_groups_post_r2_delete( self, array, remote_array, sg_name, rdf_mgmt_sg, rdf_mgmt_cleanup): """Cleanup storage groups after a RDF device pair has been deleted. :param array: the array serial number :param remote_array: the remote array serial number :param sg_name: the storage group name :param rdf_mgmt_sg: the RDF managment group name :param rdf_mgmt_cleanup: is RDF management group cleanup required """ vols_in_sg = self.rest.get_num_vols_in_sg(array, sg_name) vols_in_remote_sg = self.rest.get_num_vols_in_sg(remote_array, sg_name) if not vols_in_sg: parent_sg = self.masking.get_parent_sg_from_child( array, sg_name) self.rest.delete_storage_group(array, sg_name) if not vols_in_remote_sg: self.rest.delete_storage_group(remote_array, sg_name) if parent_sg: vols_in_parent = self.rest.get_num_vols_in_sg( array, parent_sg) if not vols_in_parent: mv_name = self.rest.get_masking_views_from_storage_group( array, parent_sg) if mv_name: self.rest.delete_masking_view(array, mv_name) if sg_name != parent_sg: self.rest.delete_storage_group(array, parent_sg) self.rest.delete_storage_group(remote_array, parent_sg) if rdf_mgmt_cleanup: self.rest.delete_storage_group(array, rdf_mgmt_sg) self.rest.delete_storage_group(remote_array, rdf_mgmt_sg) def get_target_wwns_from_masking_view( self, volume, connector): """Find target WWNs via the masking view. :param volume: volume to be attached :param connector: the connector dict :returns: list -- the target WWN list """ metro_wwns = [] host = connector['host'] short_host_name = self.utils.get_host_name_label( host, self.powermax_short_host_name_template) if host else None extra_specs = self._initial_setup(volume) if self.utils.is_volume_failed_over(volume): rep_extra_specs = self._get_replication_extra_specs( extra_specs, extra_specs[utils.REP_CONFIG]) extra_specs = rep_extra_specs device_id = self._find_device_on_array(volume, extra_specs) target_wwns = self._get_target_wwns_from_masking_view( device_id, short_host_name, extra_specs) if extra_specs.get(utils.REP_CONFIG) and self.utils.is_metro_device( extra_specs[utils.REP_CONFIG], extra_specs): rdf_group_no, __ = self.get_rdf_details( extra_specs[utils.ARRAY], extra_specs[utils.REP_CONFIG]) rdf_pair_info = self.rest.get_rdf_pair_volume( extra_specs[utils.ARRAY], rdf_group_no, device_id) remote_device_id = rdf_pair_info.get('remoteVolumeName', None) rep_extra_specs = self._get_replication_extra_specs( extra_specs, extra_specs[utils.REP_CONFIG]) metro_wwns = self._get_target_wwns_from_masking_view( remote_device_id, short_host_name, rep_extra_specs) return target_wwns, metro_wwns def _get_target_wwns_from_masking_view( self, device_id, short_host_name, extra_specs): """Helper function to get wwns from a masking view. :param device_id: the device id :param short_host_name: the short host name :param extra_specs: the extra specs :returns: target wwns -- list """ target_wwns = [] array = extra_specs[utils.ARRAY] masking_view_list, __ = self._get_masking_views_from_volume( array, device_id, short_host_name) if masking_view_list: portgroup = self.get_port_group_from_masking_view( array, masking_view_list[0]) target_wwns = self.rest.get_target_wwns(array, portgroup) LOG.info("Target wwns in masking view %(maskingView)s: " "%(targetWwns)s.", {'maskingView': masking_view_list[0], 'targetWwns': target_wwns}) return target_wwns def get_port_group_from_masking_view(self, array, maskingview_name): """Get the port groups in a masking view. :param array: the array serial number :param maskingview_name: masking view name :returns: port group name """ return self.rest.get_element_from_masking_view( array, maskingview_name, portgroup=True) def get_initiator_group_from_masking_view(self, array, maskingview_name): """Get the initiator group in a masking view. :param array: the array serial number :param maskingview_name: masking view name :returns: initiator group name """ return self.rest.get_element_from_masking_view( array, maskingview_name, host=True) def get_common_masking_views(self, array, portgroup_name, initiator_group_name): """Get common masking views, if any. :param array: the array serial number :param portgroup_name: port group name :param initiator_group_name: ig name :returns: list of masking views """ LOG.debug("Finding Masking Views for port group %(pg)s and %(ig)s.", {'pg': portgroup_name, 'ig': initiator_group_name}) masking_view_list = self.rest.get_common_masking_views( array, portgroup_name, initiator_group_name) return masking_view_list def _get_iscsi_ip_iqn_port(self, array, port): """Get ip and iqn from a virtual director port. :param array: the array serial number -- str :param port: the director & virtual port on the array -- str :returns: ip_and_iqn -- dict """ ip_iqn_list = [] ip_addresses, iqn = self.rest.get_iscsi_ip_address_and_iqn( array, port) for ip in ip_addresses: physical_port = self.rest.get_ip_interface_physical_port( array, port.split(':')[0], ip) ip_iqn_list.append({'iqn': iqn, 'ip': ip, 'physical_port': physical_port}) return ip_iqn_list def _find_ip_and_iqns(self, array, port_group_name): """Find the list of ips and iqns for the ports in a port group. :param array: the array serial number -- str :param port_group_name: the port group name -- str :returns: ip_and_iqn -- list of dicts """ ips_and_iqns = [] LOG.debug("The portgroup name for iscsiadm is %(pg)s", {'pg': port_group_name}) ports = self.rest.get_port_ids(array, port_group_name) for port in ports: ip_and_iqn = self._get_iscsi_ip_iqn_port(array, port) ips_and_iqns.extend(ip_and_iqn) return ips_and_iqns def _find_nvme_target_ips(self, array, port_group_name): """Find the list of ips and iqns for the ports in a port group. :param array: the array serial number -- str :param port_group_name: the port group name -- str :returns: ip_and_iqn -- list of dicts """ ips = [] LOG.debug("The port-group name for nvme cli is %(pg)s", {'pg': port_group_name}) ports = self.rest.get_port_ids(array, port_group_name) for port in ports: ip = self.rest.get_nvme_tcp_ip_address(array, port) ips.extend(ip) return ips def _create_replica( self, array, clone_volume, source_device_id, extra_specs, snap_name=None): """Create a replica. Create replica for source volume, source can be volume or snapshot. :param array: the array serial number :param clone_volume: the clone volume object :param source_device_id: the device ID of the volume :param extra_specs: extra specifications :param snap_name: the snapshot name - optional :returns: int -- return code :returns: dict -- cloneDict """ clone_id, target_device_id = clone_volume.id, None clone_name = self.utils.get_volume_element_name(clone_id) create_snap, copy_mode, rep_extra_specs = False, False, dict() volume_dict = self.rest.get_volume(array, source_device_id) replication_enabled = self.utils.is_replication_enabled(extra_specs) if self.utils.is_protected_snap_disabled(extra_specs): extra_specs.pop(utils.IS_RE, None) replication_enabled = False if replication_enabled: copy_mode = True __, rep_extra_specs, __, __ = ( self.prepare_replication_details(extra_specs)) # PowerMax/VMAX supports using a target volume that is bigger than # the source volume, so we create the target volume the desired # size at this point to avoid having to extend later try: clone_dict, rep_update, rep_info_dict = self._create_volume( clone_volume, clone_name, clone_volume.size, extra_specs) target_device_id = clone_dict['device_id'] if target_device_id: clone_volume_dict = self.rest.get_volume( array, target_device_id) self.utils.compare_cylinders( volume_dict['cap_cyl'], clone_volume_dict['cap_cyl']) LOG.info("The target device id is: %(device_id)s.", {'device_id': target_device_id}) if not snap_name: snap_name = self.utils.get_temp_snap_name(source_device_id) create_snap = True if replication_enabled: if rep_extra_specs[utils.REP_CONFIG]['mode'] in ( [utils.REP_ASYNC, utils.REP_METRO]): rep_extra_specs['sg_name'] = ( self.utils.get_rdf_management_group_name( rep_extra_specs[utils.REP_CONFIG])) self.rest.wait_for_rdf_pair_sync( array, rep_extra_specs['rdf_group_no'], target_device_id, rep_extra_specs) self.rest.srdf_suspend_replication( array, rep_extra_specs['sg_name'], rep_extra_specs['rdf_group_no'], rep_extra_specs) self.provision.create_volume_replica( array, source_device_id, target_device_id, snap_name, extra_specs, create_snap, copy_mode) if replication_enabled: self.rest.rdf_resume_with_retries(array, rep_extra_specs) except Exception as e: if target_device_id: LOG.warning("Create replica failed. Cleaning up the target " "volume. Clone name: %(cloneName)s, Error " "received is %(e)s.", {'cloneName': clone_name, 'e': e}) self._cleanup_target( array, target_device_id, source_device_id, clone_name, snap_name, extra_specs, target_volume=clone_volume) # Re-throw the exception. raise # add source id and snap_name to the clone dict clone_dict['source_device_id'] = source_device_id clone_dict['snap_name'] = snap_name return clone_dict, rep_update, rep_info_dict def _cleanup_target( self, array, target_device_id, source_device_id, clone_name, snap_name, extra_specs, target_volume=None): """Cleanup target volume on failed clone/ snapshot creation. :param array: the array serial number :param target_device_id: the target device ID :param source_device_id: the source device ID :param clone_name: the name of the clone volume :param snap_name: the snapVX name :param extra_specs: the extra specifications :param target_volume: the target volume object """ snap_id = self.rest.get_snap_id(array, source_device_id, snap_name) snap_session = self.rest.get_sync_session( array, source_device_id, snap_name, target_device_id, snap_id) if snap_session: self.provision.unlink_snapvx_tgt_volume( array, target_device_id, source_device_id, snap_name, extra_specs, snap_id) self._remove_vol_and_cleanup_replication( array, target_device_id, clone_name, extra_specs, target_volume) self._delete_from_srp( array, target_device_id, clone_name, extra_specs) def _get_target_source_device(self, array, device_id): """Get the source device id of the target. :param array: the array serial number :param device_id: volume instance return source_device_id """ LOG.debug("Getting the source device ID for target device %(tgt)s", {'tgt': device_id}) source_device_id = None snapvx_tgt, __, __ = self.rest.is_vol_in_rep_session( array, device_id) if snapvx_tgt: __, tgt_session = self.rest.find_snap_vx_sessions( array, device_id, tgt_only=True) source_device_id = tgt_session['source_vol_id'] LOG.debug("Target %(tgt)s source device %(src)s", {'tgt': device_id, 'src': source_device_id}) return source_device_id @retry(retry_exc_tuple, interval=10, retries=3) def _cleanup_device_snapvx( self, array, device_id, extra_specs): """Perform any snapvx cleanup before creating clones or snapshots :param array: the array serial :param device_id: the device ID of the volume :param extra_specs: extra specifications """ snapvx_tgt, snapvx_src, __ = self.rest.is_vol_in_rep_session( array, device_id) if snapvx_src or snapvx_tgt: LOG.debug("Device %(dev)s is involved into a SnapVX session", {'dev': device_id}) if snapvx_src: LOG.debug("Device %(dev)s is the SnapVX source volume", {'dev': device_id}) else: LOG.debug("Device %(dev)s is the SnapVX target volume", {'dev': device_id}) @coordination.synchronized("emc-source-{src_device_id}") def do_unlink_and_delete_snap(src_device_id): src_sessions, tgt_session = self.rest.find_snap_vx_sessions( array, src_device_id) if tgt_session: self._unlink_and_delete_temporary_snapshots( tgt_session, array, extra_specs) if src_sessions: if not self.rest.is_snap_id: src_sessions.sort( key=lambda k: k['snapid'], reverse=True) for src_session in src_sessions: self._unlink_and_delete_temporary_snapshots( src_session, array, extra_specs) do_unlink_and_delete_snap(device_id) def _unlink_and_delete_temporary_snapshots( self, session, array, extra_specs): """Helper for unlinking and deleting temporary snapshot sessions :param session: snapvx session :param array: the array serial number :param extra_specs: extra specifications """ session_unlinked = self._unlink_snapshot( session, array, extra_specs) if session_unlinked: self._delete_temp_snapshot(session, array) else: LOG.warning( "Snap name %(snap)s is still linked. The delete of " "the temporary snapshot has not occurred.", {'snap': session.get('snap_name')}) def _unlink_snapshot(self, session, array, extra_specs): """Helper for unlinking temporary snapshot during cleanup. :param session: session that contains snapshot :param array: the array serial number :param extra_specs: extra specifications :return: """ snap_name = session.get('snap_name') source = session.get('source_vol_id') snap_id = session.get('snapid') if snap_id is None: exception_message = ( _("Unable to get snapid from session %(session)s for source " "device %(dev)s. Retrying...") % {'dev': source, 'session': session}) # Warning only as there will be a retry LOG.warning(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) snap_info = self.rest.get_volume_snap( array, source, snap_name, snap_id) is_linked = snap_info.get('linkedDevices') if snap_info else None target, cm_enabled = None, False if session.get('target_vol_id'): target = session.get('target_vol_id') cm_enabled = session.get('copy_mode') if target and snap_name and is_linked: loop = True if cm_enabled else False LOG.debug( "Unlinking source from target. Source: %(vol)s, Target: " "%(tgt)s, Snap id: %(snapid)s.", {'vol': source, 'tgt': target, 'snapid': snap_id}) self.provision.unlink_snapvx_tgt_volume( array, target, source, snap_name, extra_specs, snap_id, loop) is_unlinked = True snap_info = self.rest.get_volume_snap( array, source, snap_name, snap_id) if snap_info and snap_info.get('linkedDevices'): is_unlinked = False return is_unlinked def _delete_temp_snapshot(self, session, array): """Helper for deleting temporary snapshot during cleanup. :param session: Session that contains snapshot :param array: the array serial number """ snap_name = session.get('snap_name') source = session.get('source_vol_id') snap_id = session.get('snapid') LOG.debug( "Deleting temp snapshot if it exists. Snap name is: " "%(snap_name)s, Source is: %(source)s, " "Snap id: %(snap_id)s.", {'snap_name': snap_name, 'source': source, 'snap_id': snap_id}) is_legacy = 'EMC_SMI' in snap_name if snap_name else False is_temp = ( utils.CLONE_SNAPSHOT_NAME in snap_name if snap_name else False) snap_info = self.rest.get_volume_snap( array, source, snap_name, snap_id) is_linked = snap_info.get('linkedDevices') if snap_info else False # Candidates for deletion: # 1. If legacy snapshot with 'EMC_SMI' in snapshot name # 2. If snapVX snapshot is temporary # 3. Snapshot is unlinked. Call _unlink_snapshot before delete. if (is_legacy or is_temp) and not is_linked: LOG.debug( "Deleting temporary snapshot. Source: %(vol)s, snap name: " "%(name)s, snap id: %(snapid)s.", { 'vol': source, 'name': snap_name, 'snapid': snap_id}) self.provision.delete_temp_volume_snap( array, snap_name, source, snap_id) def manage_existing(self, volume, external_ref): """Manages an existing PowerMax/VMAX Volume (import to Cinder). Renames the existing volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. :param volume: the volume object including the volume_type_id :param external_ref: reference to the existing volume :returns: dict -- model_update """ LOG.info("Beginning manage existing volume process") rep_info_dict, resume_rdf, rep_status = dict(), False, None rep_model_update, rep_driver_data = dict(), dict() rep_extra_specs = dict() extra_specs = self._initial_setup(volume) try: array, device_id = self.utils.get_array_and_device_id( volume, external_ref) except exception.VolumeBackendAPIException: array, device_id = self._manage_volume_with_uuid( external_ref, volume) if not device_id: exception_message = _( "Unable to get the device id to manage volume into OpenStack.") LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) volume_id = volume.id # Check if the existing volume is valid for cinder management orig_vol_name, src_sg = self._check_lun_valid_for_cinder_management( array, device_id, volume_id, external_ref) # If volume name is not present, then assign the device id as the name if not orig_vol_name: orig_vol_name = device_id LOG.debug("Original volume name %(vol)s and source sg: %(sg_name)s.", {'vol': orig_vol_name, 'sg_name': src_sg}) # Rename the volume volume_name = self.utils.get_volume_element_name(volume_id) LOG.debug("Rename volume %(vol)s to %(element_name)s.", {'vol': orig_vol_name, 'element_name': volume_name}) self.rest.rename_volume(array, device_id, volume_name) provider_location = {'device_id': device_id, 'array': array} model_update = {'provider_location': str(provider_location)} # Set-up volume replication, if enabled if self.utils.is_replication_enabled(extra_specs): (rep_status, rep_driver_data, rep_info_dict, rep_extra_specs, resume_rdf) = ( self.configure_volume_replication( array, volume, device_id, extra_specs)) if rep_driver_data: rep_model_update = { 'replication_status': rep_status, 'replication_driver_data': str( {'device_id': rep_info_dict['target_device_id'], 'array': rep_info_dict['remote_array']})} try: # Add/move volume to default storage group self.masking.add_volume_to_default_storage_group( array, device_id, volume_name, extra_specs, src_sg=src_sg) if rep_status == 'first_vol_in_rdf_group': rep_status, rep_driver_data, rep_info_dict = ( self._protect_storage_group( array, device_id, volume, volume_name, rep_extra_specs)) except Exception as e: exception_message = (_( "Unable to move the volume to the default SG. " "Exception received was %(e)s") % {'e': str(e)}) LOG.error(exception_message) LOG.debug("Rename volume %(vol)s back to %(element_name)s.", {'vol': volume_id, 'element_name': orig_vol_name}) self.rest.rename_volume(array, device_id, orig_vol_name) raise exception.VolumeBackendAPIException( message=exception_message) if resume_rdf: self.rest.srdf_resume_replication( array, rep_extra_specs['mgmt_sg_name'], rep_extra_specs['rdf_group_no'], extra_specs) if rep_driver_data: rep_model_update = { 'replication_status': rep_status, 'replication_driver_data': str( {'device_id': rep_info_dict['target_device_id'], 'array': rep_info_dict['remote_array']})} model_update.update(rep_model_update) model_update = self.update_metadata( model_update, volume.metadata, self.get_volume_metadata( array, device_id)) if rep_model_update: target_backend_id = extra_specs.get( utils.REPLICATION_DEVICE_BACKEND_ID, 'None') model_update['metadata']['BackendID'] = target_backend_id self.volume_metadata.capture_manage_existing( volume, rep_info_dict, device_id, extra_specs) return model_update def _manage_volume_with_uuid(self, external_ref, volume): """Manage volume using the uuid :param external_ref: the external reference :param volume: the volume object :raises: VolumeBackendAPIException :returns: array, device_id -- str, str """ LOG.debug("External_ref: %(er)s", {'er': external_ref}) uuid_vol = external_ref.get('source-name', None) if not uuid_vol: uuid_vol = external_ref.get('source-id', None) if uuid_vol: uuid_vol = uuid_vol.replace('OS-', '').replace('volume-', '') if uuid_vol and self.utils.check_uuid_regex(uuid_vol): array = self.utils.get_array_from_host(volume) device_id = self.rest.find_volume_device_id(array, uuid_vol) return array, device_id else: exception_message = _( "Unable to verify the uuid of volume.") LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) def _protect_storage_group( self, array, device_id, volume, volume_name, rep_extra_specs): """Enable RDF on a volume after it has been managed into OpenStack. :param array: the array serial number :param device_id: the device id :param volume: the volume object :param volume_name: the volume name :param rep_extra_specs: replication information dictionary :returns: replication status, device pair info, replication info -- str, dict, dict """ rdf_group_no = rep_extra_specs['rdf_group_no'] remote_array = rep_extra_specs['array'] rep_mode = rep_extra_specs['rep_mode'] rep_config = rep_extra_specs[utils.REP_CONFIG] if rep_mode in [utils.REP_ASYNC, utils.REP_METRO]: rep_extra_specs['mgmt_sg_name'] = ( self.utils.get_rdf_management_group_name(rep_config)) else: rep_extra_specs['mgmt_sg_name'] = None sg_list = self.rest.get_storage_groups_from_volume(array, device_id) if len(sg_list) == 1: sg_name = sg_list[0] elif len(sg_list) > 1: exception_message = (_( "Unable to RDF protect device %(dev)s in OpenStack managed " "storage group because it currently exists in one or more " "user managed storage groups.") % {'dev': device_id}) LOG.error(exception_message) raise exception.VolumeBackendAPIException(exception_message) rep_status, pair_info, r2_device_id = ( self._post_retype_srdf_protect_storage_group( array, sg_name, device_id, volume_name, rep_extra_specs, volume)) target_name = self.utils.get_volume_element_name(volume.id) rep_info_dict = self.volume_metadata.gather_replication_info( volume.id, 'replication', False, rdf_group_no=rdf_group_no, target_name=target_name, remote_array=remote_array, target_device_id=r2_device_id, replication_status=rep_status, rep_mode=rep_mode, rdf_group_label=rep_config['rdf_group_label'], target_array_model=rep_extra_specs['target_array_model'], mgmt_sg_name=rep_extra_specs['mgmt_sg_name']) return rep_status, pair_info, rep_info_dict def _check_lun_valid_for_cinder_management( self, array, device_id, volume_id, external_ref): """Check if a volume is valid for cinder management. :param array: the array serial number :param device_id: the device id :param volume_id: the cinder volume id :param external_ref: the external reference :returns volume_identifier - name of the volume on PowerMax/VMAX :returns sg - the storage group which the LUN belongs to :raises: ManageExistingInvalidReference, ManageExistingAlreadyManaged: """ # Ensure the volume exists on the array volume_details = self.rest.get_volume(array, device_id) if not volume_details: msg = (_('Unable to retrieve volume details from array for ' 'device %(device_id)s') % {'device_id': device_id}) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check if volume is FBA emulation fba_devices = self.rest.get_volume_list(array, "emulation=FBA") if device_id not in fba_devices: msg = (_("Unable to import volume %(device_id)s to cinder as it " "is not an FBA volume. Only volumes with an emulation " "type of FBA are supported.") % {'device_id': device_id}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) volume_identifier = None # Check if volume is already cinder managed if volume_details.get('volume_identifier'): volume_identifier = volume_details.get('volume_identifier') if volume_identifier.startswith(utils.VOLUME_ELEMENT_NAME_PREFIX): raise exception.ManageExistingAlreadyManaged( volume_ref=volume_id) # Check if the volume is part of multiple SGs and # check if the volume is attached by checking if in any masking view. storagegrouplist = self.rest.get_storage_groups_from_volume( array, device_id) if storagegrouplist and len(storagegrouplist) > 1: msg = (_("Unable to import volume %(device_id)s to cinder. " "Volume is in multiple SGs.") % {'device_id': device_id}) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) sg = None if storagegrouplist: sg = storagegrouplist[0] mvs = self.rest.get_masking_views_from_storage_group( array, sg) if mvs: msg = (_("Unable to import volume %(device_id)s to cinder. " "Volume is in masking view(s): %(mv)s.") % {'device_id': device_id, 'mv': mvs}) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check if there are any replication sessions associated # with the volume. snapvx_tgt, __, rdf = self.rest.is_vol_in_rep_session( array, device_id) if snapvx_tgt or rdf: msg = (_("Unable to import volume %(device_id)s to cinder. " "It is part of a replication session.") % {'device_id': device_id}) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) return volume_identifier, sg def manage_existing_get_size(self, volume, external_ref): """Return size of an existing PowerMax/VMAX volume to manage_existing. :param self: reference to class :param volume: the volume object including the volume_type_id :param external_ref: reference to the existing volume :returns: size of the volume in GB """ LOG.debug("Volume in manage_existing_get_size: %(volume)s.", {'volume': volume}) try: array, device_id = self.utils.get_array_and_device_id( volume, external_ref) except exception.VolumeBackendAPIException: array, device_id = self._manage_volume_with_uuid( external_ref, volume) if not device_id: exception_message = _( "Unable to get the device id to manage volume into OpenStack.") LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) # Ensure the volume exists on the array volume_details = self.rest.get_volume(array, device_id) if not volume_details: msg = (_('Unable to retrieve volume details from array for ' 'device %(device_id)s') % {'device_id': device_id}) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) size = float(self.rest.get_size_of_device_on_array(array, device_id)) if not size.is_integer(): exception_message = ( _("Cannot manage existing PowerMax/VMAX volume %(device_id)s " "- it has a size of %(vol_size)s but only whole GB " "sizes are supported. Please extend the " "volume to the nearest GB value before importing.") % {'device_id': device_id, 'vol_size': size, }) LOG.error(exception_message) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=exception_message) LOG.debug("Size of volume %(device_id)s is %(vol_size)s GB.", {'device_id': device_id, 'vol_size': int(size)}) return int(size) def unmanage(self, volume): """Export PowerMax/VMAX volume from Cinder. Leave the volume intact on the backend array. :param volume: the volume object """ volume_name = volume.name volume_id = volume.id LOG.info("Unmanage volume %(name)s, id=%(id)s", {'name': volume_name, 'id': volume_id}) extra_specs = self._initial_setup(volume) device_id = self._find_device_on_array(volume, extra_specs) array = extra_specs['array'] if device_id is None: LOG.error("Cannot find Volume: %(id)s for " "unmanage operation. Exiting...", {'id': volume_id}) else: # Check if volume is snap source self._cleanup_device_snapvx(array, device_id, extra_specs) snapvx_tgt, snapvx_src, __ = self.rest.is_vol_in_rep_session( array, device_id) if snapvx_src or snapvx_tgt: msg = _( 'Cannot unmanage volume %s with device id %s as it is ' 'busy. Please either wait until all temporary snapshot ' 'have expired or manually unlink and terminate any ' 'remaining temporary sessions when they have been ' 'fully copied to their targets. Volume is a snapvx ' 'source: %s. Volume is a snapvx target: %s' % (volume_id, device_id, snapvx_src, snapvx_tgt)) LOG.error(msg) raise exception.VolumeIsBusy(volume.id) # Remove volume from any openstack storage groups # and remove any replication self._remove_vol_and_cleanup_replication( extra_specs['array'], device_id, volume_name, extra_specs, volume) # Rename the volume to volumeId, thus remove the 'OS-' prefix. self.rest.rename_volume( extra_specs[utils.ARRAY], device_id, volume_id) # First check/create the unmanaged sg # Don't fail if we fail to create the SG try: self.provision.create_storage_group( extra_specs[utils.ARRAY], utils.UNMANAGED_SG, extra_specs[utils.SRP], None, None, extra_specs=extra_specs) except Exception as e: msg = ("Exception creating %(sg)s. " "Exception received was %(e)s." % {'sg': utils.UNMANAGED_SG, 'e': str(e)}) LOG.warning(msg) return # Try to add the volume self.masking._check_adding_volume_to_storage_group( extra_specs[utils.ARRAY], device_id, utils.UNMANAGED_SG, volume_id, extra_specs) def manage_existing_snapshot(self, snapshot, existing_ref): """Manage an existing PowerMax/VMAX Snapshot (import to Cinder). Renames the Snapshot to prefix it with OS- to indicate it is managed by Cinder :param snapshot: the snapshot object :param existing_ref: the snapshot name on the backend VMAX :raises: VolumeBackendAPIException :returns: model update """ persist_metadata = True volume = snapshot.volume extra_specs = self._initial_setup(volume) array = extra_specs[utils.ARRAY] device_id = self._find_device_on_array(volume, extra_specs) if not device_id: exception_message = ( (_("Cannot find device for volume %(name)s.") % { 'name': volume.id})) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) try: snap_name = existing_ref['source-name'] except KeyError: snap_name = existing_ref['source-id'] if snapshot.display_name: snap_display_name = snapshot.display_name else: snap_display_name = snapshot.id if snap_name.startswith(utils.VOLUME_ELEMENT_NAME_PREFIX): exception_message = ( _("Unable to manage existing Snapshot. Snapshot " "%(snapshot)s is already managed by Cinder.") % {'snapshot': snap_name}) raise exception.VolumeBackendAPIException( message=exception_message) if self.utils.is_volume_failed_over(volume): exception_message = ( (_("Volume %(name)s is failed over from the source volume, " "it is not possible to manage a snapshot of a failed over " "volume.") % {'name': volume.id})) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) try: snap_id = self.rest.get_snap_id(array, device_id, snap_name) except exception.VolumeBackendAPIException: snap_id, snap_name = self._get_snap_id_with_uuid( array, device_id, snap_name) persist_metadata = False if not snap_id: exception_message = ( (_("Cannot find snap_id of snapshot %(snap_name)s.") % { 'snap_name': snap_name})) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) snap_backend_name = self.utils.modify_snapshot_prefix( snap_name, manage=True) try: self.rest.modify_volume_snap( array, device_id, device_id, snap_name, extra_specs, snap_id=snap_id, rename=True, new_snap_name=snap_backend_name) except Exception as e: exception_message = ( _("There was an issue managing %(snap_name)s, it was not " "possible to add the OS- prefix. Error Message: %(e)s.") % {'snap_name': snap_name, 'e': str(e)}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) prov_loc = {'source_id': device_id, 'snap_name': snap_backend_name} model_update = { 'display_name': snap_display_name, 'provider_location': str(prov_loc)} snapshot_metadata = self.get_snapshot_metadata( array, device_id, snap_backend_name) if persist_metadata: model_update = self.update_metadata( model_update, snapshot.metadata, snapshot_metadata) LOG.info("Managing SnapVX Snapshot %(snap_name)s of source " "volume %(device_id)s, OpenStack Snapshot display name: " "%(snap_display_name)s", { 'snap_name': snap_name, 'device_id': device_id, 'snap_display_name': snap_display_name}) snapshot_metadata.update({'snap_display_name': snap_display_name}) self.volume_metadata.capture_snapshot_info( volume, extra_specs, 'manageSnapshot', snapshot_metadata) return model_update def _get_snap_id_with_uuid(self, array, device_id, snap_name): """Get the snap_id using the uuid as input :param array: the serial number of the array :param device_id: the device id of the volume :param snap_name: the snap_name containing the uuid :returns: snap_id, snap_name -- str, str """ snap_id = None snap_uuid = snap_name.replace('_snapshot-', '') element_name = self.utils.get_volume_element_name(snap_uuid) snap_name = self.utils.truncate_string(element_name, 19) snap_name = snap_name.replace('OS-', '') snap_list = self.rest.get_volume_snaps( array, device_id, snap_name) if len(snap_list) == 1: snap_id = snap_list[0].get('snap_id') return snap_id, snap_name def manage_existing_snapshot_get_size(self, snapshot): """Return the size of the source volume for manage-existing-snapshot. :param snapshot: the snapshot object :returns: size of the source volume in GB """ volume = snapshot.volume extra_specs = self._initial_setup(volume) device_id = self._find_device_on_array(volume, extra_specs) return self.rest.get_size_of_device_on_array( extra_specs[utils.ARRAY], device_id) def unmanage_snapshot(self, snapshot): """Export PowerMax/VMAX Snapshot from Cinder. Leaves the snapshot intact on the backend VMAX :param snapshot: the snapshot object :raises: VolumeBackendAPIException """ volume = snapshot.volume extra_specs = self._initial_setup(volume) array = extra_specs[utils.ARRAY] device_id, snap_name, snap_id_list = self._parse_snap_info( array, snapshot) if len(snap_id_list) != 1: exception_message = (_( "It is not possible to unmanage snapshot because there " "are either multiple or no snapshots associated with " "%(snap_name)s.") % {'snap_name': snap_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) if self.utils.is_volume_failed_over(volume): exception_message = ( _("It is not possible to unmanage a snapshot where the " "source volume is failed-over, revert back to source " "PowerMax/VMAX to unmanage snapshot %(snap_name)s") % {'snap_name': snap_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) new_snap_backend_name = self.utils.modify_snapshot_prefix( snap_name, unmanage=True) try: self.rest.modify_volume_snap( array, device_id, device_id, snap_name, extra_specs, snap_id=snap_id_list[0], rename=True, new_snap_name=new_snap_backend_name) except Exception as e: exception_message = ( _("There was an issue unmanaging Snapshot, it " "was not possible to remove the OS- prefix. Error " "message is: %(e)s.") % {'snap_name': snap_name, 'e': str(e)}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) LOG.info("Snapshot %(snap_name)s is no longer managed in " "OpenStack but still remains on PowerMax/VMAX source " "%(array_id)s", {'snap_name': snap_name, 'array_id': array}) LOG.warning("In order to remove the snapshot source volume from " "OpenStack you will need to either delete the linked " "SnapVX snapshot on the array or un-manage the volume " "from Cinder.") def get_manageable_volumes(self, marker, limit, offset, sort_keys, sort_dirs): """Lists all manageable volumes. :param marker: Begin returning volumes that appear later in the volume list than that represented by this reference. This reference should be json like. Default=None. :param limit: Maximum number of volumes to return. Default=None. :param offset: Number of volumes to skip after marker. Default=None. :param sort_keys: Key to sort by, sort by size or reference. Valid keys: size, reference. Default=None. :param sort_dirs: Direction to sort by. Valid dirs: asc, desc. Default=None. :returns: List of dicts containing all volumes valid for management """ valid_vols = [] manageable_vols = [] array = self.pool_info['arrays_info'][0]["SerialNumber"] LOG.info("Listing manageable volumes for array %(array_id)s", { 'array_id': array}) volumes = self.rest.get_private_volume_list(array) # No volumes returned from PowerMax/VMAX if not volumes: LOG.info("There were no volumes found on the backend " "PowerMax/VMAX. You need to create some volumes before " "they can be managed into Cinder.") return manageable_vols volumes = volumes or list() for device in volumes: # Determine if volume is valid for management if self.utils.is_volume_manageable(device): valid_vols.append(device['volumeHeader']) # For all valid vols, extract relevant data for Cinder response for vol in valid_vols: volume_dict = {'reference': {'source-id': vol['volumeId']}, 'safe_to_manage': True, 'size': int(math.ceil(vol['capGB'])), 'reason_not_safe': None, 'cinder_id': None, 'extra_info': { 'config': vol['configuration'], 'emulation': vol['emulationType']}} manageable_vols.append(volume_dict) # If volume list is populated, perform filtering on user params if manageable_vols: # If sort keys selected, determine if by size or reference, and # direction of sort manageable_vols = self._sort_manageable_volumes( manageable_vols, marker, limit, offset, sort_keys, sort_dirs) return manageable_vols def _sort_manageable_volumes( self, manageable_vols, marker, limit, offset, sort_keys, sort_dirs): """Sort manageable volumes. :param manageable_vols: Unsort list of dicts :param marker: Begin returning volumes that appear later in the volume list than that represented by this reference. This reference should be json like. Default=None. :param limit: Maximum number of volumes to return. Default=None. :param offset: Number of volumes to skip after marker. Default=None. :param sort_keys: Key to sort by, sort by size or reference. Valid keys: size, reference. Default=None. :param sort_dirs: Direction to sort by. Valid dirs: asc, desc. Default=None. :returns: manageable_vols -Sorted list of dicts """ # If sort keys selected, determine if by size or reference, and # direction of sort if sort_keys: reverse = False if sort_dirs: if 'desc' in sort_dirs[0]: reverse = True if sort_keys[0] == 'size': manageable_vols = sorted(manageable_vols, key=lambda k: k['size'], reverse=reverse) if sort_keys[0] == 'reference': manageable_vols = sorted(manageable_vols, key=lambda k: k['reference'][ 'source-id'], reverse=reverse) # If marker provided, return only manageable volumes after marker if marker: vol_index = None for vol in manageable_vols: if vol['reference']['source-id'] == marker: vol_index = manageable_vols.index(vol) if vol_index: manageable_vols = manageable_vols[vol_index:] else: msg = _("Volume marker not found, please check supplied " "device ID and try again.") raise exception.VolumeBackendAPIException(msg) # If offset or limit provided, offset or limit result list if offset: manageable_vols = manageable_vols[offset:] if limit: manageable_vols = manageable_vols[:limit] return manageable_vols def get_manageable_snapshots(self, marker, limit, offset, sort_keys, sort_dirs): """Lists all manageable snapshots. :param marker: Begin returning volumes that appear later in the volume list than that represented by this reference. This reference should be json like. Default=None. :param limit: Maximum number of volumes to return. Default=None. :param offset: Number of volumes to skip after marker. Default=None. :param sort_keys: Key to sort by, sort by size or reference. Valid keys: size, reference. Default=None. :param sort_dirs: Direction to sort by. Valid dirs: asc, desc. Default=None. :returns: List of dicts containing all snapshots valid for management """ manageable_snaps = [] array = self.pool_info['arrays_info'][0]["SerialNumber"] LOG.info("Listing manageable snapshots for array %(array_id)s", { 'array_id': array}) volumes = self.rest.get_private_volume_list(array) # No volumes returned from PowerMax/VMAX if not volumes: LOG.info("There were no volumes found on the backend " "PowerMax/VMAX. You need to create some volumes " "before a snapshot can be created and managed into " "Cinder.") return manageable_snaps volumes = volumes or list() for device in volumes: # Determine if volume is valid for management manageable_snaps = self._is_snapshot_valid_for_management( manageable_snaps, device) # If snapshot list is populated, perform filtering on user params if len(manageable_snaps) > 0: # Order snapshots by source deviceID and not snapshot name manageable_snaps = self._sort_manageable_snapshots( manageable_snaps, marker, limit, offset, sort_keys, sort_dirs) return manageable_snaps def _sort_manageable_snapshots( self, manageable_snaps, marker, limit, offset, sort_keys, sort_dirs): """Sorts manageable snapshots list. :param manageable_snaps: unsorted manageable snapshot list :param marker: Begin returning volumes that appear later in the volume list than that represented by this reference. This reference should be json like. Default=None. :param limit: Maximum number of volumes to return. Default=None. :param offset: Number of volumes to skip after marker. Default=None. :param sort_keys: Key to sort by, sort by size or reference. Valid keys: size, reference. Default=None. :param sort_dirs: Direction to sort by. Valid dirs: asc, desc. Default=None. :returns: List of dicts containing all snapshots valid for management """ manageable_snaps = sorted( manageable_snaps, key=lambda k: k['source_reference']['source-id']) # If sort keys selected, determine if by size or reference, and # direction of sort if sort_keys: reverse = False if sort_dirs: if 'desc' in sort_dirs[0]: reverse = True if sort_keys[0] == 'size': manageable_snaps = sorted(manageable_snaps, key=lambda k: k['size'], reverse=reverse) if sort_keys[0] == 'reference': manageable_snaps = sorted(manageable_snaps, key=lambda k: k['reference'][ 'source-name'], reverse=reverse) # If marker provided, return only manageable volumes after marker if marker: snap_index = None for snap in manageable_snaps: if snap['reference']['source-name'] == marker: snap_index = manageable_snaps.index(snap) if snap_index: manageable_snaps = manageable_snaps[snap_index:] else: msg = (_("Snapshot marker %(marker)s not found, marker " "provided must be a valid PowerMax/VMAX " "snapshot ID") % {'marker': marker}) raise exception.VolumeBackendAPIException(msg) # If offset or limit provided, offset or limit result list if offset: manageable_snaps = manageable_snaps[offset:] if limit: manageable_snaps = manageable_snaps[:limit] return manageable_snaps def _is_snapshot_valid_for_management(self, manageable_snaps, device): """check if snapshot is valid for management :param manageable_snaps: list of manageable snapshots :param device: the source device :returns: List of dicts containing all snapshots valid for management """ if self.utils.is_snapshot_manageable(device): # Snapshot valid, extract relevant snap info snap_info = device['timeFinderInfo']['snapVXSession'][0][ 'srcSnapshotGenInfo'][0]['snapshotHeader'] # Convert timestamp to human readable format human_timestamp = time.strftime( "%Y/%m/%d, %H:%M:%S", time.localtime( float(str( snap_info['timestamp'])[:-3]))) # If TTL is set, convert value to human readable format if int(snap_info['timeToLive']) > 0: human_ttl_timestamp = time.strftime( "%Y/%m/%d, %H:%M:%S", time.localtime( float(str( snap_info['timeToLive'])))) else: human_ttl_timestamp = 'N/A' # For all valid snaps, extract relevant data for Cinder # response snap_dict = { 'reference': { 'source-name': snap_info['snapshotName']}, 'safe_to_manage': True, 'size': int( math.ceil(device['volumeHeader']['capGB'])), 'reason_not_safe': None, 'cinder_id': None, 'extra_info': { 'generation': snap_info.get('generation'), 'snap_id': snap_info.get('snapid'), 'secured': snap_info.get('secured'), 'timeToLive': human_ttl_timestamp, 'timestamp': human_timestamp}, 'source_reference': {'source-id': snap_info['device']}} manageable_snaps.append(snap_dict) return manageable_snaps def retype(self, volume, new_type, host): """Migrate volume to another host using retype. :param volume: the volume object including the volume_type_id :param new_type: the new volume type. :param host: The host dict holding the relevant target(destination) information :returns: boolean -- True if retype succeeded, False if error """ volume_name = volume.name LOG.info("Migrating Volume %(volume)s via retype.", {'volume': volume_name}) extra_specs = self._initial_setup(volume) if self.utils.is_replication_enabled(extra_specs) and self.promotion: rep_config = extra_specs.get('rep_config') extra_specs = self._get_replication_extra_specs( extra_specs, rep_config) if not self.utils.is_retype_supported(volume, extra_specs, new_type['extra_specs'], self.rep_configs): src_mode = extra_specs.get('rep_mode', 'non-replicated') LOG.error("It is not possible to perform storage-assisted retype " "from %(src_mode)s to Metro replication type whilst the " "volume is attached to a host. To perform this " "operation please first detach the volume.", {'src_mode': src_mode}) return False device_id = self._find_device_on_array(volume, extra_specs) if device_id is None: LOG.error("Volume %(name)s not found on the array. " "No volume to migrate using retype.", {'name': volume_name}) return False return self._slo_workload_migration(device_id, volume, host, volume_name, new_type, extra_specs) def _slo_workload_migration(self, device_id, volume, host, volume_name, new_type, extra_specs): """Migrate from SLO/Workload combination to another. :param device_id: the volume device id :param volume: the volume object :param host: the host dict :param volume_name: the name of the volume :param new_type: the type to migrate to :param extra_specs: extra specifications :returns: boolean -- True if migration succeeded, False if error. """ do_change_compression = False # Check if old type and new type have different replication types do_change_replication = self.utils.change_replication( extra_specs, new_type[utils.EXTRA_SPECS]) if self.rest.is_compression_capable(extra_specs[utils.ARRAY]): is_compression_disabled = self.utils.is_compression_disabled( extra_specs) # Check if old type and new type have different compression types do_change_compression = (self.utils.change_compression_type( is_compression_disabled, new_type)) else: LOG.warning( "Array %(array)s is not compression capable. Any attempt to " "disable compression using an extra spec on the volume type " "will be ignored.", {'array': extra_specs[utils.ARRAY]}) is_tgt_rep = self.utils.is_replication_enabled( new_type[utils.EXTRA_SPECS]) is_valid, target_slo, target_workload = ( self._is_valid_for_storage_assisted_migration( device_id, host, extra_specs[utils.ARRAY], extra_specs[utils.SRP], volume_name, do_change_compression, do_change_replication, extra_specs[utils.SLO], extra_specs[utils.WORKLOAD], is_tgt_rep)) if not is_valid: # Check if this is multiattach retype case do_change_multiattach = self.utils.change_multiattach( extra_specs, new_type['extra_specs']) if do_change_multiattach and not self.promotion: return True else: LOG.error( "Volume %(name)s is not suitable for storage " "assisted migration using retype.", {'name': volume_name}) return False if (volume.host != host['host'] or do_change_compression or do_change_replication): LOG.debug( "Retype Volume %(name)s from source host %(sourceHost)s " "to target host %(targetHost)s. Compression change is %(cc)r. " "Replication change is %(rc)s.", {'name': volume_name, 'sourceHost': volume.host, 'targetHost': host['host'], 'cc': do_change_compression, 'rc': do_change_replication}) return self._migrate_volume( extra_specs[utils.ARRAY], volume, device_id, extra_specs[utils.SRP], target_slo, target_workload, volume_name, new_type, extra_specs) return False def _migrate_volume( self, array, volume, device_id, srp, target_slo, target_workload, volume_name, new_type, extra_specs): """Migrate from one slo/workload combination to another. This requires moving the volume from its current SG to a new or existing SG that has the target attributes. :param array: the array serial number :param volume: the volume object :param device_id: the device number :param srp: the storage resource pool :param target_slo: the target service level :param target_workload: the target workload :param volume_name: the volume name :param new_type: the volume type to migrate to :param extra_specs: the extra specifications :returns: bool, dict """ orig_mgmt_sg_name = None target_extra_specs = dict(new_type['extra_specs']) target_extra_specs.update({ utils.SRP: srp, utils.ARRAY: array, utils.SLO: target_slo, utils.WORKLOAD: target_workload, utils.INTERVAL: extra_specs[utils.INTERVAL], utils.RETRIES: extra_specs[utils.RETRIES]}) compression_disabled = self.utils.is_compression_disabled( target_extra_specs) target_extra_specs.update( {utils.DISABLECOMPRESSION: compression_disabled}) rf = self._get_replication_flags(extra_specs, target_extra_specs) if rf.was_rep_enabled and not self.promotion: self._validate_rdfg_status(array, extra_specs) orig_mgmt_sg_name = self.utils.get_rdf_management_group_name( extra_specs[utils.REP_CONFIG]) if rf.is_rep_enabled: self._validate_rdfg_status(array, target_extra_specs) # Data to determine what we need to reset during exception cleanup initial_sg_list = self.rest.get_storage_groups_from_volume( array, device_id) if orig_mgmt_sg_name in initial_sg_list: initial_sg_list.remove(orig_mgmt_sg_name) rdf_pair_broken, rdf_pair_created, vol_retyped, remote_retyped = ( False, False, False, False) self._perform_snapshot_cleanup( array, device_id, rf.was_rep_enabled, rf.is_rep_enabled, rf.backend_ids_differ, extra_specs, target_extra_specs) try: # Scenario 1: Rep -> Non-Rep # Scenario 2: Cleanup for Rep -> Diff Rep type rnr = self._prep_rep_to_non_rep( array, device_id, volume_name, volume, rf.was_rep_enabled, rf.is_rep_enabled, rf.backend_ids_differ, extra_specs) model_update = rnr.model_update # Scenario 1: Non-Rep -> Rep # Scenario 2: Rep -> Diff Rep type nrr = self._prep_non_rep_to_rep( array, device_id, volume, rf.was_rep_enabled, rf.is_rep_enabled, rf.backend_ids_differ, target_extra_specs) if not model_update: model_update = nrr.model_update success, target_sg_name = self._retype_volume( array, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs) vol_retyped = True # Volume is first volume in RDFG, SG needs to be protected if nrr.rep_status == 'first_vol_in_rdf_group': volume_name = self.utils.get_volume_element_name(volume.id) rep_status, rdf_pair_info, tgt_device_id = ( self._post_retype_srdf_protect_storage_group( array, target_sg_name, device_id, volume_name, nrr.rep_extra_specs, volume)) model_update = { 'replication_status': rep_status, 'replication_driver_data': str( {'device_id': tgt_device_id, 'array': rdf_pair_info['remoteSymmetrixId']})} rdf_pair_created = True # Scenario: Rep -> Same Rep if rf.was_rep_enabled and rf.is_rep_enabled and not ( rf.backend_ids_differ): # No change in replication config, retype remote device success = self._retype_remote_volume( array, volume, device_id, volume_name, rf.rep_mode, rf.is_rep_enabled, target_extra_specs) remote_retyped = True if nrr.resume_target_sg: self.rest.srdf_resume_replication( array, nrr.rep_extra_specs['mgmt_sg_name'], nrr.rep_extra_specs['rdf_group_no'], nrr.rep_extra_specs) if (rnr.resume_original_sg and rnr.resume_original_sg_dict and not self.promotion): self.rest.srdf_resume_replication( rnr.resume_original_sg_dict[utils.ARRAY], rnr.resume_original_sg_dict[utils.SG_NAME], rnr.resume_original_sg_dict[utils.RDF_GROUP_NO], rnr.resume_original_sg_dict[utils.EXTRA_SPECS]) if success: model_update = self.update_metadata( model_update, volume.metadata, self.get_volume_metadata(array, device_id)) if self.promotion: previous_host = volume.get('host') host_details = previous_host.split('+') array_index = len(host_details) - 1 srp_index = len(host_details) - 2 host_details[array_index] = array host_details[srp_index] = srp updated_host = '+'.join(host_details) model_update['host'] = updated_host if rnr.is_partitioned: # Must set these here as offline R1 promotion does # not perform rdf cleanup. model_update[ 'metadata']['ReplicationEnabled'] = 'False' model_update['metadata']['Configuration'] = 'TDEV' target_backend_id = None if rf.is_rep_enabled: target_backend_id = target_extra_specs.get( utils.REPLICATION_DEVICE_BACKEND_ID, 'None') model_update['metadata']['BackendID'] = target_backend_id if rf.was_rep_enabled and not rf.is_rep_enabled: model_update = self.remove_stale_data(model_update) self.volume_metadata.capture_retype_info( volume, device_id, array, srp, target_slo, target_workload, target_sg_name, rf.is_rep_enabled, rf.rep_mode, self.utils.is_compression_disabled(target_extra_specs), target_backend_id) return success, model_update except Exception as e: try: self._cleanup_on_migrate_failure( rdf_pair_broken, rdf_pair_created, vol_retyped, remote_retyped, extra_specs, target_extra_specs, volume, volume_name, device_id, initial_sg_list[0]) except Exception: # Don't care what this is, just catch it to prevent exception # occurred while handling another exception type stack trace. LOG.debug( 'Volume migrate cleanup - Could not revert volume to ' 'previous state post volume migrate exception.') finally: raise e def _get_replication_flags(self, extra_specs, target_extra_specs): """Get replication flags from extra specifications. :param extra_specs: extra specification -- dict :type extra_specs: dict :param target_extra_specs: target extra specification -- dict :type target_extra_specs: dict :returns: ReplicationFlags :rtype NonRepToRep: namedtuple """ rep_mode = None was_rep_enabled = self.utils.is_replication_enabled(extra_specs) if self.utils.is_replication_enabled(target_extra_specs): target_backend_id = target_extra_specs.get( utils.REPLICATION_DEVICE_BACKEND_ID, utils.BACKEND_ID_LEGACY_REP) target_rep_config = self.utils.get_rep_config( target_backend_id, self.rep_configs) rep_mode = target_rep_config['mode'] target_extra_specs[utils.REP_MODE] = rep_mode target_extra_specs[utils.REP_CONFIG] = target_rep_config is_rep_enabled = True else: is_rep_enabled = False backend_ids_differ = False if was_rep_enabled and is_rep_enabled: curr_backend_id = extra_specs.get( utils.REPLICATION_DEVICE_BACKEND_ID, utils.BACKEND_ID_LEGACY_REP) tgt_backend_id = target_extra_specs.get( utils.REPLICATION_DEVICE_BACKEND_ID, utils.BACKEND_ID_LEGACY_REP) backend_ids_differ = curr_backend_id != tgt_backend_id return ReplicationFlags( was_rep_enabled=was_rep_enabled, is_rep_enabled=is_rep_enabled, backend_ids_differ=backend_ids_differ, rep_mode=rep_mode, target_extra_specs=target_extra_specs) def _prep_non_rep_to_rep( self, array, device_id, volume, was_rep_enabled, is_rep_enabled, backend_ids_differ, target_extra_specs): """Prepare for non rep to rep retype. :param array: the array serial number :type array: str :param device_id: the device id :type device_id: str :param volume: the volume object :type volume: objects.Volume :param was_rep_enabled: flag :type was_rep_enabled: bool :param is_rep_enabled: flag :type is_rep_enabled: bool :param backend_ids_differ: flag :type backend_ids_differ: bool :param target_extra_specs: target extra specs :type target_extra_specs: dict :returns: NonRepToRep :rtype NonRepToRep: namedtuple """ model_update, rep_status = None, None resume_target_sg = False rdf_pair_created = False rep_driver_data, rep_info_dict = dict(), dict() rep_extra_specs = dict() if (not was_rep_enabled and is_rep_enabled) or backend_ids_differ: (rep_status, rep_driver_data, rep_info_dict, rep_extra_specs, resume_target_sg) = ( self.configure_volume_replication( array, volume, device_id, target_extra_specs)) if rep_status != 'first_vol_in_rdf_group': rdf_pair_created = True model_update = { 'replication_status': rep_status, 'replication_driver_data': str( {'device_id': rep_info_dict['target_device_id'], 'array': rep_info_dict['remote_array']})} return NonRepToRep( model_update=model_update, rdf_pair_created=rdf_pair_created, rep_status=rep_status, rep_driver_data=rep_driver_data, rep_info_dict=rep_info_dict, rep_extra_specs=rep_extra_specs, resume_target_sg=resume_target_sg) def _prep_rep_to_non_rep( self, array, device_id, volume_name, volume, was_rep_enabled, is_rep_enabled, backend_ids_differ, extra_specs): """Preparation for replication to non-replicated. :param array: the array serial number :type array: str :param device_id: device_id: the device id :type device_id: str :param volume_name: the volume name :type volume_name: str :param volume: the volume object :type volume: objects.Volume :param was_rep_enabled: flag :type was_rep_enabled: bool :param is_rep_enabled: flag :type is_rep_enabled: bool :param backend_ids_differ: flag :type backend_ids_differ: bool :param extra_specs: extra specs :type extra_specs: dict :returns: RepToNonRep :rtype RepToNonRep: namedtuple """ model_update = dict() resume_original_sg_dict = dict() rdf_pair_broken = False resume_original_sg = False rep_extra_specs = dict() is_partitioned = False if (was_rep_enabled and not is_rep_enabled) or backend_ids_differ: if self.promotion: resume_original_sg = False rdf_group = extra_specs['rdf_group_no'] is_partitioned = self._rdf_vols_partitioned( array, [volume], rdf_group) if not is_partitioned: self.break_rdf_device_pair_session_promotion( array, device_id, volume_name, extra_specs) else: rep_extra_specs, resume_original_sg = ( self.break_rdf_device_pair_session( array, device_id, volume_name, extra_specs, volume)) status = (REPLICATION_ERROR if self.promotion else REPLICATION_DISABLED) model_update = { 'replication_status': status, 'replication_driver_data': None} rdf_pair_broken = True if resume_original_sg: resume_original_sg_dict = { utils.ARRAY: array, utils.SG_NAME: rep_extra_specs['mgmt_sg_name'], utils.RDF_GROUP_NO: rep_extra_specs['rdf_group_no'], utils.EXTRA_SPECS: rep_extra_specs} return RepToNonRep( model_update=model_update, resume_original_sg_dict=resume_original_sg_dict, rdf_pair_broken=rdf_pair_broken, resume_original_sg=resume_original_sg, is_partitioned=is_partitioned) def _perform_snapshot_cleanup( self, array, device_id, was_rep_enabled, is_rep_enabled, backend_ids_differ, extra_specs, target_extra_specs): """Perform snapshot cleanup. Perform snapshot cleanup before any other changes. If retyping to either async or metro then there should be no linked snapshots on the volume. :param array: the array serial number :type array: str :param device_id: device_id: the device id :type device_id: str :param was_rep_enabled: flag :type was_rep_enabled: bool :param is_rep_enabled: flag :type is_rep_enabled: bool :param backend_ids_differ: flag :type backend_ids_differ: bool :param extra_specs: extra specs :type extra_specs: dict :param target_extra_specs: target extra specs :type target_extra_specs: dict """ if (not was_rep_enabled and is_rep_enabled) or backend_ids_differ: target_rep_mode = target_extra_specs.get(utils.REP_MODE) target_is_async = target_rep_mode == utils.REP_ASYNC target_is_metro = target_rep_mode == utils.REP_METRO if target_is_async or target_is_metro: self._cleanup_device_snapvx(array, device_id, extra_specs) snapshots = self.rest.get_volume_snapshot_list( array, device_id) __, snapvx_target_details = self.rest.find_snap_vx_sessions( array, device_id, tgt_only=True) linked_snapshots = list() for snapshot in snapshots: linked_devices = snapshot.get('linkedDevices') if linked_devices: snapshot_name = snapshot.get('snapshotName') linked_snapshots.append(snapshot_name) if linked_snapshots: snapshot_names = ', '.join(linked_snapshots) raise exception.VolumeBackendAPIException(_( 'Unable to complete retype as volume has active' 'snapvx links. Cannot retype to Asynchronous or ' 'Metro modes while the volume has active links. ' 'Please wait until these snapvx operations have ' 'completed and try again. Snapshots: ' '%s') % snapshot_names) if snapvx_target_details: source_vol_id = snapvx_target_details.get('source_vol_id') snap_name = snapvx_target_details.get('snap_name') raise exception.VolumeBackendAPIException(_( 'Unable to complete retype as volume is a snapvx ' 'target. Cannot retype to Asynchronous or Metro ' 'modes in this state. Please wait until these snapvx ' 'operations complete and try again. Volume %s is ' 'currently a target of snapshot %s with source device ' '%s') % (device_id, snap_name, source_vol_id)) def _cleanup_on_migrate_failure( self, rdf_pair_broken, rdf_pair_created, vol_retyped, remote_retyped, extra_specs, target_extra_specs, volume, volume_name, device_id, source_sg): """Attempt rollback to previous volume state before migrate exception. :param rdf_pair_broken: was the rdf pair broken during migration :param rdf_pair_created: was a new rdf pair created during migration :param vol_retyped: was the local volume retyped during migration :param remote_retyped: was the remote volume retyped during migration :param extra_specs: extra specs :param target_extra_specs: target extra specs :param volume: volume :param volume_name: volume name :param device_id: local device id :param source_sg: local device pre-migrate storage group name """ array = extra_specs[utils.ARRAY] srp = extra_specs[utils.SRP] slo = extra_specs[utils.SLO] workload = extra_specs.get(utils.WORKLOAD, 'NONE') LOG.debug('Volume migrate cleanup - starting revert attempt.') if remote_retyped: LOG.debug('Volume migrate cleanup - Attempt to revert remote ' 'volume retype.') rep_mode = extra_specs[utils.REP_MODE] is_rep_enabled = self.utils.is_replication_enabled(extra_specs) self._retype_remote_volume( array, volume, device_id, volume_name, rep_mode, is_rep_enabled, extra_specs) LOG.debug('Volume migrate cleanup - Revert remote retype ' 'volume successful.') if rdf_pair_created: LOG.debug('Volume migrate cleanup - Attempt to revert rdf ' 'pair creation.') rep_extra_specs, resume_rdf = ( self.break_rdf_device_pair_session( array, device_id, volume_name, extra_specs, volume)) if resume_rdf: self.rest.srdf_resume_replication( array, rep_extra_specs['mgmt_sg_name'], rep_extra_specs['rdf_group_no'], rep_extra_specs) LOG.debug('Volume migrate cleanup - Revert rdf pair ' 'creation successful.') if vol_retyped: LOG.debug('Volume migrate cleanup - Attempt to revert local ' 'volume retype.') self._retype_volume( array, srp, device_id, volume, volume_name, target_extra_specs, slo, workload, extra_specs) LOG.debug('Volume migrate cleanup - Revert local volume ' 'retype successful.') if rdf_pair_broken: LOG.debug('Volume migrate cleanup - Attempt to revert to ' 'original rdf pair.') (rep_status, __, __, rep_extra_specs, resume_rdf) = ( self.configure_volume_replication( array, volume, device_id, extra_specs)) if rep_status == 'first_vol_in_rdf_group': volume_name = self.utils.get_volume_element_name(volume.id) __, __, __ = ( self._post_retype_srdf_protect_storage_group( array, source_sg, device_id, volume_name, rep_extra_specs, volume)) if resume_rdf: self.rest.srdf_resume_replication( array, rep_extra_specs['mgmt_sg_name'], rep_extra_specs['rdf_group_no'], rep_extra_specs) LOG.debug('Volume migrate cleanup - Revert to original rdf ' 'pair successful.') LOG.debug('Volume migrate cleanup - Reverted volume to previous ' 'state post retype exception.') def _retype_volume( self, array, srp, device_id, volume, volume_name, extra_specs, target_slo, target_workload, target_extra_specs, remote=False, metro_attach=False): """Retype a volume from one volume type to another. The target storage group ID is returned so the next phase in the calling function can SRDF protect it if required. :param array: the array serial number :param srp: the storage resource pool name :param device_id: the device ID to be retyped :param volume: the volume object :param volume_name: the volume name :param extra_specs: source extra specs :param target_slo: target service level id :param target_workload: target workload id :param target_extra_specs: target extra specs :param remote: if the volume being retyped is on a remote replication target :param metro_attach: is it metro attached -- bool :returns: retype success, target storage group -- bool, str """ is_re, rep_mode, mgmt_sg_name = False, None, None parent_sg = None if self.utils.is_replication_enabled(target_extra_specs): is_re, rep_mode = True, target_extra_specs['rep_mode'] mgmt_sg_name = self.utils.get_rdf_management_group_name( target_extra_specs[utils.REP_CONFIG]) if self.promotion and self.utils.is_replication_enabled(extra_specs): # Need to check this when performing promotion while R1 is offline # as RDF cleanup is not performed. Target is not RDF enabled # in that scenario. mgmt_sg_name = self.utils.get_rdf_management_group_name( extra_specs[utils.REP_CONFIG]) device_info = self.rest.get_volume(array, device_id) target_extra_specs[utils.PORTGROUPNAME] = ( extra_specs.get(utils.PORTGROUPNAME, None)) disable_compression = self.utils.is_compression_disabled( target_extra_specs) source_sg_list = device_info['storageGroupId'] if mgmt_sg_name in source_sg_list: source_sg_list.remove(mgmt_sg_name) source_sg_name = source_sg_list[0] # Flags for exception handling (created_child_sg, add_sg_to_parent, got_default_sg, moved_between_sgs, target_sg_name) = (False, False, False, False, False) try: # If volume is attached set up the parent/child SGs if not already # present on array is_attached_vol = False if volume.attach_status == 'attached' and remote and metro_attach: is_attached_vol = True rep_config = target_extra_specs.get('rep_config') if rep_config: port_group_name = rep_config.get('portgroup') if port_group_name: port_group_label = self.utils.get_port_name_label( port_group_name, self.powermax_port_group_name_template) else: LOG.error("Unable to get the port group name from " "replication configuration.") return False, None elif volume.attach_status == 'attached' and not remote: is_attached_vol = True port_group_label = self.utils.get_port_name_label( target_extra_specs[utils.PORTGROUPNAME], self.powermax_port_group_name_template) if is_attached_vol: attached_host = self.utils.get_volume_attached_hostname( volume) if not attached_host: LOG.error( "There was an issue retrieving attached host from " "volume %(volume_name)s, aborting storage-assisted " "migration.", {'volume_name': device_id}) return False, None target_sg_name, __, __ = self.utils.get_child_sg_name( attached_host, target_extra_specs, port_group_label) target_sg = self.rest.get_storage_group(array, target_sg_name) if not target_sg: self.provision.create_storage_group( array, target_sg_name, srp, target_slo, target_workload, target_extra_specs, disable_compression) source_sg = self.rest.get_storage_group( array, source_sg_name) parent_sg = source_sg.get('parent_storage_group', None) created_child_sg = True if parent_sg: parent_sg = parent_sg[0] self.masking.add_child_sg_to_parent_sg( array, target_sg_name, parent_sg, target_extra_specs) add_sg_to_parent = True # Else volume is not attached or is remote (not attached) volume, # use default SGs else: target_sg_name = ( self.masking.get_or_create_default_storage_group( array, srp, target_slo, target_workload, extra_specs, disable_compression, is_re, rep_mode)) got_default_sg = True # Move the volume from the source to target storage group self.masking.move_volume_between_storage_groups( array, device_id, source_sg_name, target_sg_name, extra_specs, force=True, parent_sg=parent_sg) moved_between_sgs = True # Check if volume should be member of GVG self.masking.return_volume_to_volume_group( array, volume, device_id, volume_name, extra_specs) # Check the move was successful success = self.rest.is_volume_in_storagegroup( array, device_id, target_sg_name) if not success: LOG.error( "Volume: %(volume_name)s has not been " "added to target storage group %(storageGroup)s.", {'volume_name': device_id, 'storageGroup': target_sg_name}) return False, None else: LOG.info("Move successful: %(success)s", {'success': success}) return success, target_sg_name except Exception as e: try: self._cleanup_on_retype_volume_failure( created_child_sg, add_sg_to_parent, got_default_sg, moved_between_sgs, array, source_sg_name, parent_sg, target_sg_name, extra_specs, device_id, volume, volume_name) except Exception: # Don't care what this is, just catch it to prevent exception # occurred while handling another exception type stack trace. LOG.debug( 'Volume retype cleanup - Could not revert volume to ' 'previous state post volume retype exception.') finally: raise e def _cleanup_on_retype_volume_failure( self, created_child_sg, add_sg_to_parent, got_default_sg, moved_between_sgs, array, source_sg, parent_sg, target_sg_name, extra_specs, device_id, volume, volume_name): """Attempt to rollback to previous volume state on retype exception. :param created_child_sg: was a child sg created during retype :param add_sg_to_parent: was a child sg added to parent during retype :param got_default_sg: was a default sg possibly created during retype :param moved_between_sgs: was the volume moved between storage groups :param array: array :param source_sg: volumes originating storage group name :param parent_sg: parent storage group name :param target_sg_name: storage group volume was to be moved to :param extra_specs: extra specs :param device_id: device id :param volume: volume :param volume_name: volume name """ if moved_between_sgs: LOG.debug('Volume retype cleanup - Attempt to revert move between ' 'storage groups.') storage_groups = self.rest.get_storage_group_list(array) if source_sg not in storage_groups: disable_compression = self.utils.is_compression_disabled( extra_specs) self.rest.create_storage_group( array, source_sg, extra_specs['srp'], extra_specs['slo'], extra_specs['workload'], extra_specs, disable_compression) if parent_sg: self.masking.add_child_sg_to_parent_sg( array, source_sg, parent_sg, extra_specs) self.masking.move_volume_between_storage_groups( array, device_id, target_sg_name, source_sg, extra_specs, force=True, parent_sg=parent_sg) self.masking.return_volume_to_volume_group( array, volume, device_id, volume_name, extra_specs) LOG.debug('Volume retype cleanup - Revert move between storage ' 'groups successful.') elif got_default_sg: vols = self.rest.get_volumes_in_storage_group( array, target_sg_name) if len(vols) == 0: LOG.debug('Volume retype cleanup - Attempt to delete empty ' 'target sg.') self.rest.delete_storage_group(array, target_sg_name) LOG.debug('Volume retype cleanup - Delete target sg ' 'successful') elif created_child_sg: if add_sg_to_parent: LOG.debug('Volume retype cleanup - Attempt to revert add ' 'child sg to parent') self.rest.remove_child_sg_from_parent_sg( array, target_sg_name, parent_sg, extra_specs) LOG.debug('Volume retype cleanup - Revert add child sg to ' 'parent successful.') LOG.debug('Volume retype cleanup - Attempt to delete empty ' 'target sg.') self.rest.delete_storage_group(array, target_sg_name) LOG.debug('Volume retype cleanup - Delete target sg ' 'successful') def remove_stale_data(self, model_update): """Remove stale RDF data :param model_update: the model :returns: model_update -- dict """ new_metadata = model_update.get('metadata') if isinstance(new_metadata, dict): keys = ['R2-DeviceID', 'R2-ArrayID', 'R2-ArrayModel', 'ReplicationMode', 'RDFG-Label', 'R1-RDFG', 'R2-RDFG', 'BackendID'] for k in keys: new_metadata.pop(k, None) return model_update def _post_retype_srdf_protect_storage_group( self, array, local_sg_name, device_id, volume_name, rep_extra_specs, volume): """SRDF protect SG if first volume in SG after retype operation. :param array: the array serial number :param local_sg_name: the local storage group name :param device_id: the local device ID :param volume_name: the volume name :param rep_extra_specs: replication info dictionary :param volume: the volume being used :returns: replication enables status, device pair info, remote device id -- str, dict, str """ rep_mode = rep_extra_specs['rep_mode'] remote_array = rep_extra_specs['array'] rdf_group_no = rep_extra_specs['rdf_group_no'] service_level = rep_extra_specs['slo'] remote_sg_name = self.utils.derive_default_sg_from_extra_specs( rep_extra_specs, rep_mode) # Flags for exception handling rdf_pair_created = False try: self.rest.srdf_protect_storage_group( array, remote_array, rdf_group_no, rep_mode, local_sg_name, service_level, rep_extra_specs, target_sg=remote_sg_name) rdf_pair_created = True pair_info = self.rest.get_rdf_pair_volume( array, rdf_group_no, device_id) r2_device_id = pair_info['remoteVolumeName'] self.rest.rename_volume(remote_array, r2_device_id, volume_name) if rep_mode in [utils.REP_ASYNC, utils.REP_METRO]: self._add_volume_to_rdf_management_group( array, device_id, volume_name, remote_array, r2_device_id, rep_extra_specs) return REPLICATION_ENABLED, pair_info, r2_device_id except Exception as e: try: if rdf_pair_created: LOG.debug('Volume retype srdf protect cleanup - Attempt ' 'to break new rdf pair.') self.break_rdf_device_pair_session( array, device_id, volume_name, rep_extra_specs, volume) LOG.debug('Volume retype srdf protect cleanup - Break new ' 'rdf pair successful.') except Exception: # Don't care what this is, just catch it to prevent exception # occurred while handling another exception type stack trace. LOG.debug( 'Retype SRDF protect cleanup - Unable to break new RDF ' 'pair on volume post volume retype srdf protect ' 'exception.') finally: raise e def _retype_remote_volume(self, array, volume, device_id, volume_name, rep_mode, is_re, extra_specs): """Retype the remote volume. :param array: the array serial number :param volume: the volume object :param device_id: the device id :param volume_name: the volume name :param rep_mode: the replication mode :param is_re: replication enabled :param extra_specs: the target extra specs :returns: bool """ success = True rep_config = extra_specs[utils.REP_CONFIG] rep_extra_specs = self._get_replication_extra_specs( extra_specs, rep_config) target_device = self.rest.get_rdf_pair_volume( array, rep_extra_specs['rdf_group_no'], device_id) target_device_id = target_device['remoteVolumeName'] remote_array = rep_extra_specs['array'] rep_compr_disabled = self.utils.is_compression_disabled( rep_extra_specs) remote_sg_name = None metro_attach = False if volume.attach_status == 'detached' or ( rep_mode in [utils.REP_SYNC, utils.REP_ASYNC]): remote_sg_name = self.masking.get_or_create_default_storage_group( remote_array, rep_extra_specs[utils.SRP], rep_extra_specs[utils.SLO], rep_extra_specs[utils.WORKLOAD], rep_extra_specs, rep_compr_disabled, is_re=is_re, rep_mode=rep_mode) elif volume.attach_status == 'attached' and ( rep_mode in [utils.REP_METRO]): metro_attach = True found_storage_group_list = self.rest.get_storage_groups_from_volume( remote_array, target_device_id) move_rqd = True for found_storage_group_name in found_storage_group_list: # Check if remote volume is already in the correct sg if found_storage_group_name == remote_sg_name: move_rqd = False break if move_rqd: try: success, __ = self._retype_volume( remote_array, rep_extra_specs[utils.SRP], target_device_id, volume, volume_name, rep_extra_specs, extra_specs[utils.SLO], extra_specs[utils.WORKLOAD], extra_specs, remote=True, metro_attach=metro_attach) except Exception as e: try: volumes = self.rest.get_volumes_in_storage_group( remote_array, remote_sg_name) if len(volumes) == 0: LOG.debug('Volume retype remote cleanup - Attempt to ' 'delete target sg.') self.rest.delete_storage_group( remote_array, remote_sg_name) LOG.debug('Volume retype remote cleanup - Delete ' 'target sg successful.') except Exception: # Don't care what this is, just catch it to prevent # exception occurred while handling another exception # type messaging. LOG.debug( 'Retype remote volume cleanup - Could not delete ' 'target storage group on remote array post retype ' 'remote volume exception.') finally: raise e return success def _is_valid_for_storage_assisted_migration( self, device_id, host, source_array, source_srp, volume_name, do_change_compression, do_change_replication, source_slo, source_workload, is_tgt_rep): """Check if volume is suitable for storage assisted (pool) migration. :param device_id: the volume device id :param host: the host dict :param source_array: the volume's current array serial number :param source_srp: the volume's current pool name :param volume_name: the name of the volume to be migrated :param do_change_compression: do change compression :param do_change_replication: flag indicating replication change :param source_slo: slo setting for source volume type :param source_workload: workload setting for source volume type :param is_tgt_rep: is the target volume type replication enabled :returns: boolean -- True/False :returns: string -- targetSlo :returns: string -- targetWorkload """ false_ret = (False, None, None) host_info = host['host'] LOG.debug("Target host is : %(info)s.", {'info': host_info}) try: info_detail = host_info.split('#') pool_details = info_detail[1].split('+') if len(pool_details) == 4: target_slo = pool_details[0] if pool_details[1].lower() == 'none': target_workload = 'NONE' else: target_workload = pool_details[1] target_srp = pool_details[2] target_array_serial = pool_details[3] elif len(pool_details) == 3: target_slo = pool_details[0] target_srp = pool_details[1] target_array_serial = pool_details[2] target_workload = 'NONE' else: raise IndexError if target_slo.lower() == 'none': target_slo = None if self.rest.is_next_gen_array(target_array_serial): target_workload = 'NONE' except IndexError: LOG.debug("Error parsing array, pool, SLO and workload.") return false_ret if self.promotion: if do_change_compression: LOG.error( "When retyping during array promotion, compression " "changes should not occur during the retype operation. " "Please ensure the same compression settings are defined " "in the source and target volume types.") return false_ret if source_slo != target_slo: LOG.error( "When retyping during array promotion, the SLO setting " "for the source and target volume types should match. " "Found %s SLO for the source volume type and %s SLO for " "the target volume type.", source_slo, target_slo) return false_ret if source_workload != target_workload: LOG.error( "When retyping during array promotion, the workload " "setting for the source and target volume types should " "match. Found %s workload for the source volume type " "and %s workload for the target volume type.", source_workload, target_workload) return false_ret if is_tgt_rep: LOG.error( "When retyping during array promotion, the target volume " "type should not have replication enabled. Please ensure " "replication is disabled on the target volume type.") return false_ret if not self.promotion: if target_array_serial not in source_array: LOG.error("The source array: %s does not match the target " "array: %s - skipping storage-assisted " "migration.", source_array, target_array_serial) return false_ret if target_srp not in source_srp: LOG.error( "Only SLO/workload migration within the same SRP Pool is " "supported in this version. The source pool: %s does not " "match the target array: %s. Skipping storage-assisted " "migration.", source_srp, target_srp) return false_ret found_storage_group_list = self.rest.get_storage_groups_from_volume( source_array, device_id) if not found_storage_group_list: LOG.warning("Volume: %(volume_name)s does not currently " "belong to any storage groups.", {'volume_name': volume_name}) else: for found_storage_group_name in found_storage_group_list: if self.utils.get_rdf_group_component_dict( found_storage_group_name): continue emc_fast_setting = ( self.provision. get_slo_workload_settings_from_storage_group( source_array, found_storage_group_name)) target_combination = ("%(targetSlo)s+%(targetWorkload)s" % {'targetSlo': target_slo, 'targetWorkload': target_workload}) if target_combination.lower() == emc_fast_setting.lower(): # Check if migration is to change compression # or replication types action_rqd = (True if do_change_compression or do_change_replication else False) if not action_rqd: LOG.warning( "No action required. Volume: %(volume_name)s is " "already part of slo/workload combination: " "%(targetCombination)s.", {'volume_name': volume_name, 'targetCombination': target_combination}) return false_ret return True, target_slo, target_workload def configure_volume_replication(self, array, volume, device_id, extra_specs): """Configure volume replication for a source device. :param array: the array serial number :param volume: the volume object :param device_id: the device id :param extra_specs: volume extra specifications :returns: replication status, device pair info, replication info, resume rdf -- str, dict, dict, bool """ # Set session attributes LOG.debug('Starting replication setup for volume %(vol)s', {'vol': volume.name}) resume_rdf, mgmt_sg_name = False, None disable_compression = self.utils.is_compression_disabled( extra_specs) rep_config = extra_specs[utils.REP_CONFIG] rdf_group_no, remote_array = self.get_rdf_details( array, rep_config) rep_extra_specs = self._get_replication_extra_specs( extra_specs, rep_config) rep_mode = rep_extra_specs['rep_mode'] rep_extra_specs['mgmt_sg_name'] = None group_details = self.rest.get_rdf_group(array, rdf_group_no) if group_details['numDevices'] == 0: rep_info = { 'remote_array': remote_array, 'rdf_group_no': rdf_group_no, 'rep_mode': rep_mode, 'slo': rep_extra_specs['slo'], 'extra_specs': rep_extra_specs, 'target_device_id': None} return ('first_vol_in_rdf_group', None, rep_info, rep_extra_specs, False) # Flags for exception handling (rdf_pair_created, remote_sg_get, add_to_mgmt_sg, r2_device_id, tgt_sg_name) = (False, False, False, False, False) try: if group_details['numDevices'] > 0 and ( rep_mode in [utils.REP_ASYNC, utils.REP_METRO]): mgmt_sg_name = self.utils.get_rdf_management_group_name( rep_config) self.rest.srdf_suspend_replication( array, mgmt_sg_name, rdf_group_no, rep_extra_specs) rep_extra_specs['mgmt_sg_name'] = mgmt_sg_name resume_rdf = True pair_info = self.rest.srdf_create_device_pair( array, rdf_group_no, rep_mode, device_id, rep_extra_specs, self.next_gen) rdf_pair_created = True r2_device_id = pair_info['tgt_device'] device_uuid = self.utils.get_volume_element_name(volume.id) self.rest.rename_volume(remote_array, r2_device_id, device_uuid) if rep_extra_specs['srp'].lower() != extra_specs['srp'].lower(): LOG.warning("The source %(src)s and target %(tgt)s array SRPs " "are different.", {'src': extra_specs['srp'], 'tgt': rep_extra_specs['srp']}) target_srp = extra_specs['srp'] else: target_srp = rep_extra_specs['srp'] tgt_sg_name = self.masking.get_or_create_default_storage_group( remote_array, target_srp, rep_extra_specs['slo'], rep_extra_specs['workload'], rep_extra_specs, disable_compression, is_re=True, rep_mode=rep_mode) remote_sg_get = True self.rest.add_vol_to_sg(remote_array, tgt_sg_name, r2_device_id, rep_extra_specs, force=True) if rep_mode in [utils.REP_ASYNC, utils.REP_METRO]: self._add_volume_to_rdf_management_group( array, device_id, device_uuid, remote_array, r2_device_id, extra_specs) add_to_mgmt_sg = True rep_status = REPLICATION_ENABLED target_name = self.utils.get_volume_element_name(volume.id) rep_info_dict = self.volume_metadata.gather_replication_info( volume.id, 'replication', False, rdf_group_no=rdf_group_no, target_name=target_name, remote_array=remote_array, target_device_id=r2_device_id, replication_status=rep_status, rep_mode=rep_mode, rdf_group_label=rep_config['rdf_group_label'], target_array_model=rep_extra_specs['target_array_model'], mgmt_sg_name=rep_extra_specs['mgmt_sg_name']) return (rep_status, pair_info, rep_info_dict, rep_extra_specs, resume_rdf) except Exception as e: try: self._cleanup_on_configure_volume_replication_failure( resume_rdf, rdf_pair_created, remote_sg_get, add_to_mgmt_sg, device_id, r2_device_id, mgmt_sg_name, array, remote_array, rdf_group_no, extra_specs, rep_extra_specs, volume, tgt_sg_name) except Exception: # Don't care what this is, just catch it to prevent exception # occurred while handling another exception type stack trace. LOG.debug( 'Configure volume replication cleanup - Could not revert ' 'volume to non-rdf state post configure volume ' 'replication exception.') raise e def _cleanup_on_configure_volume_replication_failure( self, resume_rdf, rdf_pair_created, remote_sg_get, add_to_mgmt_sg, r1_device_id, r2_device_id, mgmt_sg_name, array, remote_array, rdf_group_no, extra_specs, rep_extra_specs, volume, tgt_sg_name): """Attempt rollback to previous volume state on setup rep exception. :param resume_rdf: does the rdfg need to be resumed :param rdf_pair_created: was an rdf pair created :param remote_sg_get: was a remote storage group possibly created :param add_to_mgmt_sg: was the volume added to a management group :param r1_device_id: local device id :param r2_device_id: remote device id :param mgmt_sg_name: rdfg management storage group name :param array: array :param remote_array: remote array :param rdf_group_no: rdf group number :param extra_specs: extra specs :param rep_extra_specs: rep extra specs :param volume: volume :param tgt_sg_name: remote replication storage group name """ if resume_rdf and not rdf_pair_created: LOG.debug('Configure volume replication cleanup - Attempt to ' 'resume replication.') self.rest.srdf_resume_replication( array, mgmt_sg_name, rdf_group_no, rep_extra_specs) LOG.debug('Configure volume replication cleanup - Resume ' 'replication successful.') elif rdf_pair_created: volume_name = self.utils.get_volume_element_name(volume.id) LOG.debug('Configure volume replication cleanup - Attempt to ' 'break new rdf pair.') rep_extra_specs, resume_rdf = ( self.break_rdf_device_pair_session( array, r1_device_id, volume_name, extra_specs, volume)) if resume_rdf: self.rest.srdf_resume_replication( array, rep_extra_specs['mgmt_sg_name'], rep_extra_specs['rdf_group_no'], rep_extra_specs) LOG.debug('Configure volume replication cleanup - Break new rdf ' 'pair successful.') if add_to_mgmt_sg: LOG.debug('Configure volume replication cleanup - Attempt to ' 'remove r1 device from mgmt sg.') self.masking.remove_vol_from_storage_group( array, r1_device_id, mgmt_sg_name, '', extra_specs) LOG.debug('Configure volume replication cleanup - Remove r1 ' 'device from mgmt sg successful.') LOG.debug('Configure volume replication cleanup - Attempt to ' 'remove r2 device from mgmt sg.') self.masking.remove_vol_from_storage_group( remote_array, r2_device_id, mgmt_sg_name, '', rep_extra_specs) LOG.debug('Configure volume replication cleanup - Remove r2 ' 'device from mgmt sg successful.') if remote_sg_get: volumes = self.rest.get_volumes_in_storage_group( remote_array, tgt_sg_name) if len(volumes) == 0: LOG.debug('Configure volume replication cleanup - Attempt ' 'to delete empty target sg.') self.rest.delete_storage_group(remote_array, tgt_sg_name) LOG.debug('Configure volume replication cleanup - Delete ' 'empty target sg successful.') elif r2_device_id in volumes: LOG.debug('Configure volume replication cleanup - Attempt ' 'to remove r2 device and delete sg.') self.masking.remove_vol_from_storage_group( remote_array, r2_device_id, tgt_sg_name, '', rep_extra_specs) LOG.debug('Configure volume replication cleanup - Remove ' 'r2 device and delete sg successful.') def _add_volume_to_rdf_management_group( self, array, device_id, volume_name, remote_array, target_device_id, extra_specs): """Add a volume to its rdf management group. :param array: the array serial number :param device_id: the device id :param volume_name: the volume name :param remote_array: the remote array :param target_device_id: the target device id :param extra_specs: the extra specifications :raises: VolumeBackendAPIException """ grp_name = self.utils.get_rdf_management_group_name( extra_specs[utils.REP_CONFIG]) try: self.provision.get_or_create_group(array, grp_name, extra_specs) self.masking.add_volume_to_storage_group( array, device_id, grp_name, volume_name, extra_specs, force=True) # Add remote volume self.provision.get_or_create_group( remote_array, grp_name, extra_specs) self.masking.add_volume_to_storage_group( remote_array, target_device_id, grp_name, volume_name, extra_specs, force=True) except Exception as e: exception_message = ( _('Exception occurred adding volume %(vol)s to its ' 'rdf management group - the exception received was: %(e)s') % {'vol': volume_name, 'e': str(e)}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) def break_rdf_device_pair_session(self, array, device_id, volume_name, extra_specs, volume): """Delete RDF device pair deleting R2 volume but leaving R1 in place. :param array: the array serial number :param device_id: the device id :param volume_name: the volume name :param extra_specs: the volume extra specifications :param volume: the volume being used :returns: replication extra specs, resume rdf -- dict, bool """ LOG.debug('Starting replication cleanup for RDF pair source device: ' '%(d_id)s.', {'d_id': device_id}) # Set session attributes resume_rdf, mgmt_sg_name = True, None rep_config = extra_specs[utils.REP_CONFIG] rep_extra_specs = self._get_replication_extra_specs( extra_specs, rep_config) remote_array = rep_extra_specs['array'] rdfg_no = rep_extra_specs['rdf_group_no'] remote_device = self.rest.get_rdf_pair_volume( array, rdfg_no, device_id) remote_device_id = remote_device['remoteVolumeName'] extra_specs[utils.FORCE_VOL_EDIT] = True rep_extra_specs[utils.FORCE_VOL_EDIT] = True # Get the names of the SGs associated with the volume on the R2 array # before any operations are carried out - this will be used later for # remove vol operations r1_sg_names = self.rest.get_storage_groups_from_volume( array, device_id) r2_sg_names = self.rest.get_storage_groups_from_volume( remote_array, remote_device_id) if rep_config['mode'] in [utils.REP_ASYNC, utils.REP_METRO]: mgmt_sg_name = self.utils.get_rdf_management_group_name(rep_config) sg_name = mgmt_sg_name rdf_group_state = self.rest.get_storage_group_rdf_group_state( array, sg_name, rdfg_no) if len(rdf_group_state) > 1 or ( rdf_group_state[0] not in utils.RDF_SYNCED_STATES): self.rest.wait_for_rdf_group_sync( array, sg_name, rdfg_no, rep_extra_specs) else: sg_name = r1_sg_names[0] rdf_pair = self.rest.get_rdf_pair_volume( array, rdfg_no, device_id) rdf_pair_state = rdf_pair[utils.RDF_PAIR_STATE] if rdf_pair_state.lower() not in utils.RDF_SYNCED_STATES: self.rest.wait_for_rdf_pair_sync( array, rdfg_no, device_id, rep_extra_specs) # Flags for exception handling rdfg_suspended, pair_deleted, r2_sg_remove = False, False, False try: self.rest.srdf_suspend_replication( array, sg_name, rdfg_no, rep_extra_specs) rdfg_suspended = True self.rest.srdf_delete_device_pair(array, rdfg_no, device_id) pair_deleted = True # Remove the volume from the R1 RDFG mgmt SG (R1) if rep_config['mode'] in [utils.REP_ASYNC, utils.REP_METRO]: self.masking.remove_volume_from_sg( array, device_id, volume_name, mgmt_sg_name, extra_specs) # Remove volume from R2 replication SGs for r2_sg_name in r2_sg_names: self.masking.remove_volume_from_sg( remote_array, remote_device_id, volume_name, r2_sg_name, rep_extra_specs) r2_sg_remove = True if mgmt_sg_name: if not self.rest.get_volumes_in_storage_group( array, mgmt_sg_name): resume_rdf = False else: if not self.rest.get_volumes_in_storage_group(array, sg_name): resume_rdf = False if resume_rdf: rep_extra_specs['mgmt_sg_name'] = sg_name self._delete_from_srp(remote_array, remote_device_id, volume_name, extra_specs) return rep_extra_specs, resume_rdf except Exception as e: try: self._cleanup_on_break_rdf_device_pair_session_failure( rdfg_suspended, pair_deleted, r2_sg_remove, array, mgmt_sg_name, rdfg_no, extra_specs, r2_sg_names, device_id, remote_array, remote_device_id, volume, volume_name, rep_extra_specs) except Exception: # Don't care what this is, just catch it to prevent exception # occurred while handling another exception type stack trace. LOG.debug( 'Break rdf pair cleanup - Could not revert ' 'volume to previous rdf enabled state post break rdf ' 'device pair exception replication exception.') finally: raise e def _cleanup_on_break_rdf_device_pair_session_failure( self, rdfg_suspended, pair_deleted, r2_sg_remove, array, management_sg, rdf_group_no, extra_specs, r2_sg_names, device_id, remote_array, remote_device_id, volume, volume_name, rep_extra_specs): """Attempt rollback to previous volume state on remove rep exception. :param rdfg_suspended: was the rdf group suspended :param pair_deleted: was the rdf pair deleted :param r2_sg_remove: was the remote volume removed from its sg :param array: array :param management_sg: rdf management storage group name :param rdf_group_no: rdf group number :param extra_specs: extra specs :param r2_sg_names: remote volume storage group names :param device_id: device id :param remote_array: remote array sid :param remote_device_id: remote device id :param volume: volume :param volume_name: volume name :param rep_extra_specs: rep extra specs """ if rdfg_suspended and not pair_deleted: LOG.debug('Break RDF pair cleanup - Attempt to resume RDFG.') self.rest.srdf_resume_replication( array, management_sg, rdf_group_no, extra_specs) LOG.debug('Break RDF pair cleanup - Resume RDFG successful.') elif pair_deleted: LOG.debug('Break RDF pair cleanup - Attempt to cleanup remote ' 'volume storage groups.') # Need to cleanup the remote SG in case of first RDFG vol scenario if not r2_sg_remove: for r2_sg_name in r2_sg_names: self.masking.remove_volume_from_sg( remote_array, remote_device_id, volume_name, r2_sg_name, rep_extra_specs) LOG.debug('Break RDF pair cleanup - Cleanup remote volume storage ' 'groups successful.') LOG.debug('Break RDF pair cleanup - Attempt to delete remote ' 'volume.') self._delete_from_srp(remote_array, remote_device_id, volume_name, extra_specs) LOG.debug('Break RDF pair cleanup - Delete remote volume ' 'successful.') LOG.debug('Break RDF pair cleanup - Attempt to revert to ' 'original rdf pair.') (rep_status, __, __, rep_extra_specs, resume_rdf) = ( self.configure_volume_replication( array, volume, device_id, extra_specs)) if rep_status == 'first_vol_in_rdf_group': volume_name = self.utils.get_volume_element_name(volume.id) self._protect_storage_group( array, device_id, volume, volume_name, rep_extra_specs) if resume_rdf: self.rest.srdf_resume_replication( array, rep_extra_specs['mgmt_sg_name'], rep_extra_specs['rdf_group_no'], rep_extra_specs) LOG.debug('Break RDF pair cleanup - Revert to original rdf ' 'pair successful.') def break_rdf_device_pair_session_promotion( self, array, device_id, volume_name, extra_specs): """Delete RDF device pair deleting R2 volume but leaving R1 in place. :param array: the array serial number :param device_id: the device id :param volume_name: the volume name :param extra_specs: the volume extra specifications """ LOG.debug('Starting promotion replication cleanup for RDF pair ' 'source device: %(d_id)s.', {'d_id': device_id}) mgmt_sg_name = None rep_config = extra_specs[utils.REP_CONFIG] rdfg_no = extra_specs['rdf_group_no'] extra_specs[utils.FORCE_VOL_EDIT] = True if rep_config['mode'] in [utils.REP_ASYNC, utils.REP_METRO]: mgmt_sg_name = self.utils.get_rdf_management_group_name( rep_config) if rep_config['mode'] == utils.REP_METRO: group_states = self.rest.get_storage_group_rdf_group_state( array, mgmt_sg_name, rdfg_no) group_states = set([x.lower() for x in group_states]) metro_active_states = { utils.RDF_ACTIVE, utils.RDF_ACTIVEACTIVE, utils.RDF_ACTIVEBIAS} active_state_found = ( bool(group_states.intersection(metro_active_states))) if active_state_found: LOG.debug('Found Metro RDF in active state during promotion, ' 'attempting to suspend.') try: self.rest.srdf_suspend_replication( array, mgmt_sg_name, rdfg_no, extra_specs) except exception.VolumeBackendAPIException: LOG.error( 'Found Metro rdf pair in active state during ' 'promotion. Attempt to suspend this group using ' 'storage group %s failed. Please move the rdf pairs ' 'in this storage group to a non-active state and ' 'retry the retype operation.', mgmt_sg_name) raise self.rest.srdf_delete_device_pair(array, rdfg_no, device_id) # Remove the volume from the R1 RDFG mgmt SG (R1) if rep_config['mode'] in [utils.REP_ASYNC, utils.REP_METRO]: self.masking.remove_volume_from_sg( array, device_id, volume_name, mgmt_sg_name, extra_specs) @coordination.synchronized('emc-{rdf_group}-rdf') def _cleanup_remote_target( self, array, volume, remote_array, device_id, target_device, rdf_group, volume_name, rep_extra_specs): """Clean-up remote replication target after exception or on deletion. :param array: the array serial number :param volume: the volume object :param remote_array: the remote array serial number :param device_id: the source device id :param target_device: the target device id :param rdf_group: the RDF group :param volume_name: the volume name :param rep_extra_specs: replication extra specifications """ are_vols_paired, __, pair_state = ( self.rest.are_vols_rdf_paired( array, remote_array, device_id, target_device)) if are_vols_paired: async_grp = None rep_mode = rep_extra_specs['rep_mode'] if rep_mode in [utils.REP_ASYNC, utils.REP_METRO]: async_grp = self.utils.get_rdf_management_group_name( rep_extra_specs[utils.REP_CONFIG]) sg_name = self.rest.get_storage_groups_from_volume( array, device_id) self.provision.break_rdf_relationship( array, device_id, sg_name, rdf_group, rep_extra_specs, pair_state) self.masking.remove_and_reset_members( remote_array, volume, target_device, volume_name, rep_extra_specs, sg_name) if async_grp: self.masking.remove_and_reset_members( remote_array, volume, target_device, volume_name, rep_extra_specs, async_grp) rdfg_details = self.rest.get_rdf_group(array, rdf_group) if rdfg_details and int(rdfg_details.get('numDevices', 0)): self.rest.srdf_resume_replication( array, sg_name, rdf_group, rep_extra_specs) self._delete_from_srp( remote_array, target_device, volume_name, rep_extra_specs) def _cleanup_replication_source( self, array, volume, volume_name, volume_dict, extra_specs): """Cleanup a remote replication source volume on failure. If replication setup fails at any stage on a new volume create, we must clean-up the source instance as the cinder database won't be updated with the provider_location. This means the volume cannot be properly deleted from the array by cinder. :param array: the array serial number :param volume: the volume object :param volume_name: the name of the volume :param volume_dict: the source volume dictionary :param extra_specs: the extra specifications """ LOG.warning( "Replication failed. Cleaning up the source volume. " "Volume name: %(sourceName)s ", {'sourceName': volume_name}) device_id = volume_dict['device_id'] # Check if volume is snap target (e.g. if clone volume) self._cleanup_device_snapvx(array, device_id, extra_specs) # Remove from any storage groups and cleanup replication self._remove_vol_and_cleanup_replication( array, device_id, volume_name, extra_specs, volume) self._delete_from_srp( array, device_id, volume_name, extra_specs) def get_rdf_details(self, array, rep_config): """Retrieves an SRDF group instance. :param array: the array serial number :param rep_config: rep config to get details of :returns: rdf_group_no, remote_array """ if not self.rep_configs: exception_message = (_("Replication is not configured on " "backend: %(backend)s.") % {'backend': self.configuration.safe_get( 'volume_backend_name')}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) remote_array = rep_config['array'] rdf_group_label = rep_config['rdf_group_label'] LOG.info("Replication group: %(RDFGroup)s.", {'RDFGroup': rdf_group_label}) rdf_group_no = self.rest.get_rdf_group_number(array, rdf_group_label) if rdf_group_no is None: exception_message = (_("Cannot find replication group: " "%(RDFGroup)s. Please check the name " "and the array") % {'RDFGroup': rdf_group_label}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) LOG.info("Found RDF group number: %(RDFGroup)s.", {'RDFGroup': rdf_group_no}) return rdf_group_no, remote_array def failover(self, volumes, secondary_id=None, groups=None): """Fails over the volumes on a host back and forth. Driver needs to update following info for failed-over volume: 1. provider_location: update array details 2. replication_status: new status for replication-enabled volume :param volumes: the list of volumes to be failed over :param secondary_id: the target backend :param groups: replication groups :returns: secondary_id, volume_update_list, group_update_list :raises: InvalidReplicationTarget """ volume_update_list = list() group_update_list = list() primary_array = self.configuration.safe_get(utils.POWERMAX_ARRAY) array_list = self.rest.get_arrays_list() is_valid, msg = self.utils.validate_failover_request( self.failedover, secondary_id, self.rep_configs, primary_array, array_list, self.promotion) if not is_valid: LOG.error(msg) raise exception.InvalidReplicationTarget(msg) group_fo = None if not self.failedover: self.failedover = True if not secondary_id: secondary_id = utils.RDF_FAILEDOVER_STATE elif secondary_id == 'default': self.failedover = False group_fo = 'default' if secondary_id != utils.PMAX_FAILOVER_START_ARRAY_PROMOTION: volume_update_list, group_update_list = ( self._populate_volume_and_group_update_lists( volumes, groups, group_fo)) LOG.info("Failover host completed.") return secondary_id, volume_update_list, group_update_list def failover_completed(self, secondary_id=None, isAA=False): """This method is called after failover for clustered backends.""" if secondary_id == utils.PMAX_FAILOVER_START_ARRAY_PROMOTION: self.promotion = True LOG.info("Enabled array promotion.") else: if isAA: if secondary_id == 'failed over': self.failedover = True elif not secondary_id: self.failedover = False if self.promotion: self.promotion = False LOG.info("Disabled array promotion.") else: if not secondary_id: self.failedover = True elif secondary_id == 'default': self.failedover = False if self.promotion: self.promotion = False LOG.info("Disabled array promotion.") LOG.info('Failover completion completed.') def _populate_volume_and_group_update_lists( self, volumes, groups, group_fo): """Populate volume and group update lists :param volumes: the list of volumes to be failed over :param groups: replication groups :param group_fo: group fail over :returns: volume_update_list, group_update_list """ volume_update_list = [] group_update_list = [] # Since we are updating volumes if a volume is in a group, copy to # a new variable otherwise we will be updating the replicated_vols # variable assigned in manager.py's failover method. vols = deepcopy(volumes) if groups: for group in groups: group_vol_list = [] for index, vol in enumerate(vols): if vol.group_id == group.id: group_vol_list.append(vols[index]) vols = [vol for vol in vols if vol not in group_vol_list] grp_update, vol_updates = ( self.failover_replication( None, group, group_vol_list, group_fo, host=True)) group_update_list.append({'group_id': group.id, 'updates': grp_update}) volume_update_list += vol_updates non_rep_vol_list, sync_vol_dict, async_vol_dict, metro_vol_list = ( [], {}, {}, []) for volume in vols: array = ast.literal_eval(volume.provider_location)['array'] extra_specs = self._initial_setup(volume) extra_specs[utils.ARRAY] = array if self.utils.is_replication_enabled(extra_specs): rep_mode = extra_specs.get(utils.REP_MODE, utils.REP_SYNC) backend_id = self._get_replicated_volume_backend_id( volume) rep_config = self.utils.get_rep_config( backend_id, self.rep_configs) if rep_mode == utils.REP_SYNC: key = rep_config['rdf_group_label'] sync_vol_dict.setdefault(key, []).append(volume) elif rep_mode == utils.REP_ASYNC: vol_grp_name = self.utils.get_rdf_management_group_name( rep_config) async_vol_dict.setdefault(vol_grp_name, []).append(volume) else: metro_vol_list.append(volume) else: non_rep_vol_list.append(volume) if len(sync_vol_dict) > 0: for key, sync_vol_list in sync_vol_dict.items(): vol_updates = ( self._update_volume_list_from_sync_vol_list( sync_vol_list, group_fo)) volume_update_list += vol_updates if len(async_vol_dict) > 0: for vol_grp_name, async_vol_list in async_vol_dict.items(): __, vol_updates = self._failover_replication( async_vol_list, None, vol_grp_name, secondary_backend_id=group_fo, host=True) volume_update_list += vol_updates if len(metro_vol_list) > 0: __, vol_updates = ( self._failover_replication( metro_vol_list, None, None, secondary_backend_id=group_fo, host=True, is_metro=True)) volume_update_list += vol_updates if len(non_rep_vol_list) > 0: if self.promotion: # Volumes that were promoted will have a replication state # of error with no other replication metadata. Use this to # determine which volumes should updated to have a replication # state of disabled. for vol in non_rep_vol_list: volume_update_list.append({ 'volume_id': vol.id, 'updates': { 'replication_status': REPLICATION_DISABLED}}) elif self.failedover: # Since the array has been failed-over, # volumes without replication should be in error. for vol in non_rep_vol_list: volume_update_list.append({ 'volume_id': vol.id, 'updates': {'status': 'error'}}) return volume_update_list, group_update_list def _update_volume_list_from_sync_vol_list( self, sync_vol_list, group_fo): """Update the volume update list from the synced volume list :param sync_vol_list: synced volume list :param group_fo: group fail over :returns: vol_updates """ extra_specs = self._initial_setup(sync_vol_list[0]) replication_details = ast.literal_eval( sync_vol_list[0].replication_driver_data) remote_array = replication_details.get(utils.ARRAY) extra_specs[utils.ARRAY] = remote_array temp_grp_name = self.utils.get_temp_failover_grp_name( extra_specs[utils.REP_CONFIG]) self.provision.create_volume_group( remote_array, temp_grp_name, extra_specs) device_ids = self._get_volume_device_ids( sync_vol_list, remote_array, remote_volumes=True) self.masking.add_volumes_to_storage_group( remote_array, device_ids, temp_grp_name, extra_specs) __, vol_updates = ( self._failover_replication( sync_vol_list, None, temp_grp_name, secondary_backend_id=group_fo, host=True)) self.rest.delete_storage_group(remote_array, temp_grp_name) return vol_updates def _get_replication_extra_specs(self, extra_specs, rep_config): """Get replication extra specifications. Called when target array operations are necessary - on create, extend, etc and when volume is failed over. :param extra_specs: the extra specifications :param rep_config: the replication configuration :returns: repExtraSpecs - dict """ if not self.utils.is_replication_enabled(extra_specs): # Skip this if the volume is not replicated return rep_extra_specs = deepcopy(extra_specs) rep_extra_specs[utils.ARRAY] = rep_config['array'] rep_extra_specs[utils.SRP] = rep_config['srp'] if extra_specs[utils.SRP].lower() != ( rep_extra_specs[utils.SRP].lower()): LOG.warning("The source %(src)s and target %(tgt)s array SRPs " "are different.", {'src': extra_specs[utils.SRP], 'tgt': rep_extra_specs[utils.SRP]}) rep_extra_specs[utils.PORTGROUPNAME] = rep_config['portgroup'] # Get the RDF Group label & number array = (rep_config[utils.ARRAY] if self.promotion else extra_specs[utils.ARRAY]) rep_extra_specs['rdf_group_label'] = rep_config['rdf_group_label'] rdf_group_no, __ = self.get_rdf_details( array, rep_config) rep_extra_specs['rdf_group_no'] = rdf_group_no # Get the SRDF wait/retries settings rep_extra_specs['sync_retries'] = rep_config['sync_retries'] rep_extra_specs['sync_interval'] = rep_config['sync_interval'] if rep_config['mode'] == utils.REP_METRO: exempt = True if self.next_gen else False rep_extra_specs[utils.RDF_CONS_EXEMPT] = exempt bias = True if rep_config.get(utils.METROBIAS) else False rep_extra_specs[utils.METROBIAS] = bias rep_extra_specs[utils.DISABLE_PROTECTED_SNAP] =\ self.utils.is_protected_snap_disabled(extra_specs) # If disable compression is set, check if target array is all flash do_disable_compression = self.utils.is_compression_disabled( extra_specs) if do_disable_compression: if not self.rest.is_compression_capable( rep_extra_specs[utils.ARRAY]): rep_extra_specs.pop(utils.DISABLECOMPRESSION, None) LOG.warning( "Array %(array)s is not compression capable. Any attempt " "to disable compression using an extra spec on the volume " "type will be ignored.", {'array': rep_extra_specs[utils.ARRAY]}) # Check to see if SLO and Workload are configured on the target array. rep_extra_specs['target_array_model'], next_gen = ( self.rest.get_array_model_info(rep_config['array'])) if extra_specs[utils.SLO]: is_valid_slo, is_valid_workload = ( self.provision.verify_slo_workload( rep_extra_specs[utils.ARRAY], extra_specs[utils.SLO], rep_extra_specs[utils.WORKLOAD], next_gen, rep_extra_specs['target_array_model'])) if not is_valid_slo: LOG.warning("The target array does not support the " "storage pool setting for SLO %(slo)s, " "setting to NONE.", {'slo': extra_specs[utils.SLO]}) rep_extra_specs[utils.SLO] = None if not is_valid_workload: LOG.warning("The target array does not support the " "storage pool setting for workload " "%(workload)s, setting to NONE.", {'workload': extra_specs[utils.WORKLOAD]}) rep_extra_specs[utils.WORKLOAD] = None return rep_extra_specs @staticmethod def get_secondary_stats_info(rep_config, array_info): """On failover, report on secondary array statistics. :param rep_config: the replication configuration :param array_info: the array info :returns: secondary_info - dict """ secondary_info = array_info.copy() secondary_info['SerialNumber'] = str(rep_config['array']) secondary_info['srpName'] = rep_config['srp'] return secondary_info def create_group(self, context, group): """Creates a generic volume group. :param context: the context :param group: the group object to be created :returns: dict -- modelUpdate :raises: VolumeBackendAPIException, NotImplementedError, InvalidInput """ if (not volume_utils.is_group_a_cg_snapshot_type(group) and not group.is_replicated): raise NotImplementedError() # If volume types are added during creation, validate replication # extra_spec consistency across volume types. extra_specs_list = list() for volume_type_id in group.get('volume_type_ids'): vt_extra_specs = self.utils.get_volumetype_extra_specs( None, volume_type_id) extra_specs_list.append(vt_extra_specs) if group.is_replicated: self.utils.validate_replication_group_config( self.rep_configs, extra_specs_list) else: self.utils.validate_non_replication_group_config(extra_specs_list) model_update = {'status': fields.GroupStatus.AVAILABLE} LOG.info("Create generic volume group: %(group)s.", {'group': group.id}) vol_grp_name = self.utils.update_volume_group_name(group) try: array, interval_retries_dict = self._get_volume_group_info(group) self.provision.create_volume_group( array, vol_grp_name, interval_retries_dict) if group.is_replicated: LOG.debug("Group: %(group)s is a replication group.", {'group': group.id}) target_backend_id = extra_specs_list[0].get( utils.REPLICATION_DEVICE_BACKEND_ID, utils.BACKEND_ID_LEGACY_REP) target_rep_config = self.utils.get_rep_config( target_backend_id, self.rep_configs) # Create remote group __, remote_array = self.get_rdf_details( array, target_rep_config) self.provision.create_volume_group( remote_array, vol_grp_name, interval_retries_dict) model_update.update({ 'replication_status': fields.ReplicationStatus.ENABLED}) except Exception: exception_message = (_("Failed to create generic volume group:" " %(volGrpName)s.") % {'volGrpName': vol_grp_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return model_update def delete_group(self, context, group, volumes): """Deletes a generic volume group. :param context: the context :param group: the group object to be deleted :param volumes: the list of volumes in the generic group to be deleted :returns: dict -- modelUpdate :returns: list -- list of volume model updates :raises: NotImplementedError """ LOG.info("Delete generic volume group: %(group)s.", {'group': group.id}) if (not volume_utils.is_group_a_cg_snapshot_type(group) and not group.is_replicated): raise NotImplementedError() model_update, volumes_model_update = self._delete_group( group, volumes) return model_update, volumes_model_update def _delete_group(self, group, volumes): """Helper function to delete a volume group. :param group: the group object :param volumes: the member volume objects :returns: model_update, volumes_model_update """ volumes_model_update = list() array, interval_retries_dict = self._get_volume_group_info(group) vol_grp_name = None volume_group = self._find_volume_group( array, group) if volume_group is None: LOG.error("Cannot find generic volume group %(volGrpName)s.", {'volGrpName': group.id}) model_update = {'status': fields.GroupStatus.DELETED} volumes_model_update = self.utils.update_volume_model_updates( volumes_model_update, volumes, group.id, status='deleted') return model_update, volumes_model_update if 'name' in volume_group: vol_grp_name = volume_group['name'] volume_device_ids = self._get_members_of_volume_group( array, vol_grp_name) deleted_volume_device_ids = list() # If volumes are being deleted along with the group, ensure snapshot # cleanup completes before doing any replication/storage group cleanup. remaining_device_snapshots = list() remaining_snapvx_targets = list() def _cleanup_snapshots(device_id): self._cleanup_device_snapvx(array, device_id, extra_specs) snapshots = self.rest.get_volume_snapshot_list(array, device_id) __, snapvx_target_details = self.rest.find_snap_vx_sessions( array, device_id, tgt_only=True) if snapshots: snapshot_names = ', '.join( snap.get('snapshotName') for snap in snapshots) snap_details = { 'device_id': device_id, 'snapshot_names': snapshot_names} remaining_device_snapshots.append(snap_details) if snapvx_target_details: source_vol_id = snapvx_target_details.get('source_vol_id') snap_name = snapvx_target_details.get('snap_name') target_details = { 'device_id': device_id, 'source_vol_id': source_vol_id, 'snapshot_name': snap_name} remaining_snapvx_targets.append(target_details) vol_not_deleted = list() for vol in volumes: extra_specs = self._initial_setup(vol) device_id = self._find_device_on_array(vol, extra_specs) if device_id: _cleanup_snapshots(device_id) else: LOG.debug('Cannot find device id for volume. It is ' 'possible this information was not persisted.') vol_not_deleted.append(vol) if len(vol_not_deleted) == len(volume_device_ids): for volume_device_id in volume_device_ids: _cleanup_snapshots(volume_device_id) # Fail out if volumes to be deleted still have snapshots. if remaining_device_snapshots: for details in remaining_device_snapshots: device_id = details.get('device_id') snapshot_names = details.get('snapshot_names') LOG.error('Cannot delete device %s, it has the ' 'following active snapshots, %s.', device_id, snapshot_names) raise exception.VolumeBackendAPIException(_( 'Group volumes have active snapshots. Cannot perform group ' 'delete. Wait for snapvx sessions to complete their ' 'processes or remove volumes from group before attempting ' 'to delete again. Please see previously logged error ' 'message for device and snapshot details.')) if remaining_snapvx_targets: for details in remaining_snapvx_targets: device_id = details.get('device_id') snap_name = details.get('snapshot_name') source_vol_id = details.get('source_vol_id') LOG.error('Cannot delete device %s, it is current a target ' 'of snapshot %s with source device id %s', device_id, snap_name, source_vol_id) raise exception.VolumeBackendAPIException(_( 'Some group volumes are targets of a snapvx session. Cannot ' 'perform group delete. Wait for snapvx sessions to complete ' 'their processes or remove volumes from group before ' 'attempting to delete again. Please see previously logged ' 'error message for device and snapshot details.')) # Remove replication for group, if applicable if group.is_replicated: vt_extra_specs = self.utils.get_volumetype_extra_specs( None, group.get('volume_types')[0]['id']) target_backend_id = vt_extra_specs.get( utils.REPLICATION_DEVICE_BACKEND_ID, utils.BACKEND_ID_LEGACY_REP) target_rep_config = self.utils.get_rep_config( target_backend_id, self.rep_configs) self._cleanup_group_replication( array, vol_grp_name, volume_device_ids, interval_retries_dict, target_rep_config) try: if volume_device_ids: def _delete_vol(dev_id): if group.is_replicated: # Set flag to True if replicated. extra_specs[utils.FORCE_VOL_EDIT] = True if dev_id in volume_device_ids: self.masking.remove_and_reset_members( array, vol, dev_id, vol.name, extra_specs, False) self._delete_from_srp( array, dev_id, "group vol", extra_specs) else: LOG.debug("Volume not found on the array.") # Add the device id to the deleted list deleted_volume_device_ids.append(dev_id) # First remove all the volumes from the SG self.masking.remove_volumes_from_storage_group( array, volume_device_ids, vol_grp_name, interval_retries_dict) for vol in volumes: extra_specs = self._initial_setup(vol) device_id = self._find_device_on_array(vol, extra_specs) if device_id: _delete_vol(device_id) if volume_device_ids != deleted_volume_device_ids: delta_list = list(set(volume_device_ids).difference( deleted_volume_device_ids)) for device_id in delta_list: _delete_vol(device_id) # Once all volumes are deleted then delete the SG self.rest.delete_storage_group(array, vol_grp_name) model_update = {'status': fields.GroupStatus.DELETED} volumes_model_update = self.utils.update_volume_model_updates( volumes_model_update, volumes, group.id, status='deleted') except Exception as e: LOG.error("Error deleting volume group." "Error received: %(e)s", {'e': e}) model_update = {'status': fields.GroupStatus.ERROR_DELETING} volumes_model_update = self._handle_delete_group_exception( deleted_volume_device_ids, volume_device_ids, group.id, array, vol_grp_name, interval_retries_dict, volumes_model_update) return model_update, volumes_model_update def _handle_delete_group_exception( self, deleted_volume_device_ids, volume_device_ids, group_id, array, vol_grp_name, interval_retries_dict, volumes_model_update): """Handle delete group exception and update volume model :param deleted_volume_device_ids: deleted volume device ids :param volume_device_ids: volume device ids :param group_id: group id :param array: array serial number :param vol_grp_name: volume group name :param interval_retries_dict: intervals and retries dict :param volumes_model_update: volume model update dict :returns: volumes_model_update """ # Update the volumes_model_update if deleted_volume_device_ids: LOG.debug("Device ids: %(dev)s are deleted.", {'dev': deleted_volume_device_ids}) volumes_not_deleted = [] for vol in volume_device_ids: if vol not in deleted_volume_device_ids: volumes_not_deleted.append(vol) if not deleted_volume_device_ids: volumes_model_update = self.utils.update_volume_model_updates( volumes_model_update, deleted_volume_device_ids, group_id, status='deleted') if not volumes_not_deleted: volumes_model_update = self.utils.update_volume_model_updates( volumes_model_update, volumes_not_deleted, group_id, status='error_deleting') # As a best effort try to add back the undeleted volumes to sg # Don't throw any exception in case of failure try: if not volumes_not_deleted: self.masking.add_volumes_to_storage_group( array, volumes_not_deleted, vol_grp_name, interval_retries_dict) except Exception as ex: LOG.error("Error in rollback - %(ex)s. " "Failed to add back volumes to sg %(sg_name)s", {'ex': ex, 'sg_name': vol_grp_name}) return volumes_model_update def _cleanup_group_replication( self, array, vol_grp_name, volume_device_ids, extra_specs, rep_config): """Cleanup remote replication. Break and delete the rdf replication relationship and delete the remote storage group and member devices. :param array: the array serial number :param vol_grp_name: the volume group name :param volume_device_ids: the device ids of the local volumes :param extra_specs: the extra specifications :param rep_config: the rep config to use for rdf operations """ extra_specs[utils.FORCE_VOL_EDIT] = True rdf_group_no, remote_array = self.get_rdf_details(array, rep_config) # Delete replication for group, if applicable group_details = self.rest.get_storage_group_rep( array, vol_grp_name) if group_details and group_details.get('rdf', False): self.rest.srdf_suspend_replication( array, vol_grp_name, rdf_group_no, extra_specs) if volume_device_ids: LOG.debug("Deleting remote replication for group %(sg)s", { 'sg': vol_grp_name}) self.rest.delete_storagegroup_rdf(array, vol_grp_name, rdf_group_no) remote_device_ids = self._get_members_of_volume_group( remote_array, vol_grp_name) # Remove volumes from remote replication group if remote_device_ids: self.masking.remove_volumes_from_storage_group( remote_array, remote_device_ids, vol_grp_name, extra_specs) for device_id in remote_device_ids: # Make sure they are not members of any other storage groups self.masking.remove_and_reset_members( remote_array, None, device_id, 'target_vol', extra_specs, False) self._delete_from_srp( remote_array, device_id, "group vol", extra_specs) # Once all volumes are deleted then delete the SG if self.rest.get_storage_group(remote_array, vol_grp_name): self.rest.delete_storage_group(remote_array, vol_grp_name) def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a generic volume group snapshot. :param context: the context :param group_snapshot: the group snapshot to be created :param snapshots: snapshots :returns: dict -- modelUpdate :returns: list -- list of snapshots :raises: VolumeBackendAPIException, NotImplementedError """ grp_id = group_snapshot.group_id source_group = group_snapshot.get('group') if not volume_utils.is_group_a_cg_snapshot_type(source_group): raise NotImplementedError() snapshots_model_update = [] LOG.info( "Create snapshot for %(grpId)s " "group Snapshot ID: %(group_snapshot)s.", {'group_snapshot': group_snapshot.id, 'grpId': grp_id}) try: snap_name = self.utils.truncate_string(group_snapshot.id, 19) self._create_group_replica(source_group, snap_name) except Exception as e: exception_message = (_("Failed to create snapshot for group: " "%(volGrpName)s. Exception received: %(e)s") % {'volGrpName': grp_id, 'e': str(e)}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) for snapshot in snapshots: src_dev_id = self._get_src_device_id_for_group_snap(snapshot) extra_specs = self._initial_setup(snapshot.volume) array = extra_specs['array'] snapshot_model_dict = { 'id': snapshot.id, 'provider_location': str( {'source_id': src_dev_id, 'snap_name': snap_name}), 'status': fields.SnapshotStatus.AVAILABLE} snapshot_model_dict = self.update_metadata( snapshot_model_dict, snapshot.metadata, self.get_snapshot_metadata( array, src_dev_id, snap_name)) snapshots_model_update.append(snapshot_model_dict) model_update = {'status': fields.GroupStatus.AVAILABLE} return model_update, snapshots_model_update def _get_src_device_id_for_group_snap(self, snapshot): """Get the source device id for the provider_location. :param snapshot: the snapshot object :returns: src_device_id """ volume = snapshot.volume extra_specs = self._initial_setup(volume) return self._find_device_on_array(volume, extra_specs) def _create_group_replica( self, source_group, snap_name): """Create a group replica. This can be a group snapshot or a cloned volume group. :param source_group: the group object :param snap_name: the name of the snapshot """ array, interval_retries_dict = self._get_volume_group_info( source_group) vol_grp_name = None volume_group = ( self._find_volume_group(array, source_group)) if volume_group: if 'name' in volume_group: vol_grp_name = volume_group['name'] if vol_grp_name is None: exception_message = ( _("Cannot find generic volume group %(group_id)s.") % {'group_id': source_group.id}) raise exception.VolumeBackendAPIException( message=exception_message) self.provision.create_group_replica( array, vol_grp_name, snap_name, interval_retries_dict) def delete_group_snapshot(self, context, group_snapshot, snapshots): """Delete a volume group snapshot. :param context: the context :param group_snapshot: the volume group snapshot to be deleted :param snapshots: the snapshot objects :returns: model_update, snapshots_model_update """ model_update, snapshots_model_update = self._delete_group_snapshot( group_snapshot, snapshots) return model_update, snapshots_model_update def _delete_group_snapshot(self, group_snapshot, snapshots): """Helper function to delete a group snapshot. :param group_snapshot: the group snapshot object :param snapshots: the snapshot objects :returns: model_update, snapshots_model_update :raises: VolumeBackendApiException, NotImplementedError """ snapshots_model_update = [] source_group = group_snapshot.get('group') grp_id = group_snapshot.group_id if not volume_utils.is_group_a_cg_snapshot_type(source_group): raise NotImplementedError() LOG.info("Delete snapshot grpSnapshotId: %(grpSnapshotId)s" " for source group %(grpId)s", {'grpSnapshotId': group_snapshot.id, 'grpId': grp_id}) snap_name = self.utils.truncate_string(group_snapshot.id, 19) vol_grp_name = None try: # Get the array serial array, extra_specs = self._get_volume_group_info( source_group) # Get the volume group dict for getting the group name volume_group = (self._find_volume_group(array, source_group)) if volume_group and volume_group.get('name'): vol_grp_name = volume_group.get('name') if vol_grp_name is None: LOG.warning("Cannot find generic volume group %(grp_ss_id)s. " "on array %(array)s", {'grp_ss_id': group_snapshot.id, 'array': array}) else: self.provision.delete_group_replica( array, snap_name, vol_grp_name) model_update = {'status': fields.GroupSnapshotStatus.DELETED} for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot.id, 'status': fields.SnapshotStatus.DELETED}) except Exception as e: LOG.error("Error deleting volume group snapshot." "Error received: %(e)s", {'e': e}) model_update = { 'status': fields.GroupSnapshotStatus.ERROR_DELETING} return model_update, snapshots_model_update def _get_snap_src_dev_list(self, array, snapshots): """Get the list of source devices for a list of snapshots. :param array: the array serial number :param snapshots: the list of snapshot objects :returns: src_dev_ids """ src_dev_ids = [] for snap in snapshots: src_dev_id, snap_name, __ = self._parse_snap_info(array, snap) if snap_name: src_dev_ids.append(src_dev_id) return src_dev_ids def _find_volume_group(self, array, group): """Finds a volume group given the group. :param array: the array serial number :param group: the group object :returns: volume group dictionary """ __, interval_retries_dict = self._get_volume_group_info(group) group_name = self.utils.update_volume_group_name(group) sg_name_filter = utils.LIKE_FILTER + group.id volume_group = self.rest.get_or_rename_storage_group_rep( array, group_name, interval_retries_dict, sg_filter=sg_name_filter) if not volume_group: LOG.warning("Volume group %(group_id)s cannot be found", {'group_id': group_name}) return None return volume_group def _get_members_of_volume_group(self, array, group_name): """Get the members of a volume group. :param array: the array serial number :param group_name: the storage group name :returns: list -- member_device_ids """ member_device_ids = self.rest.get_volumes_in_storage_group( array, group_name) if not member_device_ids: LOG.info("No member volumes found in %(group_id)s", {'group_id': group_name}) return member_device_ids def update_group(self, group, add_volumes, remove_volumes): """Updates LUNs in generic volume group. :param group: storage configuration service instance :param add_volumes: the volumes uuids you want to add to the vol grp :param remove_volumes: the volumes uuids you want to remove from the CG :returns: model_update :raises: VolumeBackendAPIException, NotImplementedError """ LOG.info("Update generic volume Group: %(group)s. " "This adds and/or removes volumes from " "a generic volume group.", {'group': group.id}) if (not volume_utils.is_group_a_cg_snapshot_type(group) and not group.is_replicated): raise NotImplementedError() model_update = {'status': fields.GroupStatus.AVAILABLE} if self.promotion: self._update_group_promotion( group, add_volumes, remove_volumes) elif self.failedover: msg = _('Cannot perform group updates during failover, please ' 'either failback or perform a promotion operation.') raise exception.VolumeBackendAPIException(msg) else: array, interval_retries_dict = self._get_volume_group_info(group) add_vols = [vol for vol in add_volumes] if add_volumes else [] add_device_ids = self._get_volume_device_ids(add_vols, array) remove_vols = [ vol for vol in remove_volumes] if remove_volumes else [] remove_device_ids = self._get_volume_device_ids(remove_vols, array) vol_grp_name = None try: volume_group = self._find_volume_group(array, group) if volume_group: if 'name' in volume_group: vol_grp_name = volume_group['name'] if vol_grp_name is None: raise exception.GroupNotFound(group_id=group.id) if group.is_replicated: # Need force flag when manipulating RDF enabled SGs interval_retries_dict[utils.FORCE_VOL_EDIT] = True # Add volume(s) to the group if add_device_ids: self.utils.check_rep_status_enabled(group) for vol in add_vols: extra_specs = self._initial_setup(vol) self.utils.check_replication_matched(vol, extra_specs) self.masking.add_volumes_to_storage_group( array, add_device_ids, vol_grp_name, interval_retries_dict) if group.is_replicated: # Add remote volumes to remote storage group self.masking.add_remote_vols_to_volume_group( add_vols, group, interval_retries_dict) # Remove volume(s) from the group if remove_device_ids: # Check if the volumes exist in the storage group temp_list = deepcopy(remove_device_ids) for device_id in temp_list: if not self.rest.is_volume_in_storagegroup( array, device_id, vol_grp_name): remove_device_ids.remove(device_id) if remove_device_ids: self.masking.remove_volumes_from_storage_group( array, remove_device_ids, vol_grp_name, interval_retries_dict) if group.is_replicated: # Remove remote volumes from the remote storage group self._remove_remote_vols_from_volume_group( array, remove_vols, group, interval_retries_dict) except exception.GroupNotFound: raise except Exception as ex: exception_message = (_("Failed to update volume group:" " %(volGrpName)s. Exception: %(ex)s.") % {'volGrpName': group.id, 'ex': ex}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) self.volume_metadata.capture_modify_group( vol_grp_name, group.id, add_vols, remove_volumes, array) return model_update, None, None def _update_group_promotion(self, group, add_volumes, remove_volumes): """Updates LUNs in generic volume group during array promotion. :param group: storage configuration service instance :param add_volumes: the volumes uuids you want to add to the vol grp :param remove_volumes: the volumes uuids you want to remove from the CG :returns: model_update :raises: VolumeBackendAPIException """ if not group.is_replicated: msg = _('Group updates are only supported on replicated volume ' 'groups during failover promotion.') raise exception.VolumeBackendAPIException(msg) if add_volumes: msg = _('Unable to add to volumes to a group, only volume ' 'removal is supported during promotion.') raise exception.VolumeBackendAPIException(msg) # Either add_volumes or remove_volumes must be provided, if add_volumes # then excep is raised, other there must be remove_volumes present volume = remove_volumes[0] extra_specs = self._initial_setup(volume, volume.volume_type_id) rep_extra_specs = self._get_replication_extra_specs( extra_specs, extra_specs[utils.REP_CONFIG]) remote_array = rep_extra_specs['array'] vol_grp_name = None volume_group = self._find_volume_group(remote_array, group) if volume_group: if 'name' in volume_group: vol_grp_name = volume_group['name'] if vol_grp_name is None: raise exception.GroupNotFound(group_id=group.id) interval_retries_dict = { utils.INTERVAL: self.interval, utils.RETRIES: self.retries} # Volumes have already failed over and had their provider_location # updated, do not get remote device IDs here remove_device_ids = self._get_volume_device_ids( remove_volumes, remote_array) if remove_device_ids: interval_retries_dict[utils.FORCE_VOL_EDIT] = True # Check if the volumes exist in the storage group temp_list = deepcopy(remove_device_ids) for device_id in temp_list: if not self.rest.is_volume_in_storagegroup( remote_array, device_id, vol_grp_name): remove_device_ids.remove(device_id) if remove_device_ids: self.masking.remove_volumes_from_storage_group( remote_array, remove_device_ids, vol_grp_name, interval_retries_dict) self.volume_metadata.capture_modify_group( vol_grp_name, group.id, list(), remove_volumes, remote_array) def _remove_remote_vols_from_volume_group( self, array, volumes, group, extra_specs): """Remove the remote volumes from their volume group. :param array: the array serial number :param volumes: list of volumes :param group: the id of the group :param extra_specs: the extra specifications """ remote_device_list = [] backend_id = self._get_replicated_volume_backend_id(volumes[0]) rep_config = self.utils.get_rep_config(backend_id, self.rep_configs) __, remote_array = self.get_rdf_details(array, rep_config) for vol in volumes: remote_loc = ast.literal_eval(vol.replication_driver_data) founddevice_id = self.rest.check_volume_device_id( remote_array, remote_loc['device_id'], vol.id) if founddevice_id is not None: remote_device_list.append(founddevice_id) group_name = self.provision.get_or_create_volume_group( array, group, extra_specs) self.masking.remove_volumes_from_storage_group( remote_array, remote_device_list, group_name, extra_specs) LOG.info("Removed volumes from remote volume group.") def _get_volume_device_ids(self, volumes, array, remote_volumes=False): """Get volume device ids from volume. :param volumes: volume objects :param array: array id :param remote_volumes: get the remote ids for replicated volumes :returns: device_ids """ device_ids = [] for volume in volumes: if remote_volumes: replication_details = ast.literal_eval( volume.replication_driver_data) remote_array = replication_details.get(utils.ARRAY) specs = {utils.ARRAY: remote_array} device_id = self._find_device_on_array( volume, specs, remote_volumes) else: specs = {utils.ARRAY: array} device_id = self._find_device_on_array(volume, specs) if device_id is None: LOG.error("Volume %(name)s not found on the array.", {'name': volume['name']}) else: device_ids.append(device_id) return device_ids def create_group_from_src(self, context, group, volumes, group_snapshot, snapshots, source_group, source_vols): """Creates the volume group from source. :param context: the context :param group: the volume group object to be created :param volumes: volumes in the consistency group :param group_snapshot: the source volume group snapshot :param snapshots: snapshots of the source volumes :param source_group: the source volume group :param source_vols: the source vols :returns: model_update, volumes_model_update model_update is a dictionary of cg status volumes_model_update is a list of dictionaries of volume update :raises: VolumeBackendAPIException, NotImplementedError """ if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() create_snapshot = False volumes_model_update = [] if group_snapshot: source_id = group_snapshot.id actual_source_grp = group_snapshot.get('group') elif source_group: source_id = source_group.id actual_source_grp = source_group create_snapshot = True else: exception_message = (_("Must supply either group snapshot or " "a source group.")) raise exception.VolumeBackendAPIException( message=exception_message) tgt_name = self.utils.update_volume_group_name(group) rollback_dict = {} array, interval_retries_dict = self._get_volume_group_info(group) source_sg = self._find_volume_group(array, actual_source_grp) if source_sg is not None: src_grp_name = (source_sg['name'] if 'name' in source_sg else None) rollback_dict['source_group_name'] = src_grp_name else: error_msg = (_("Cannot retrieve source volume group %(grp_id)s " "from the array.") % {'grp_id': actual_source_grp.id}) LOG.error(error_msg) raise exception.VolumeBackendAPIException(message=error_msg) LOG.debug("Enter PowerMax/VMAX create_volume group_from_src. Group " "to be created: %(grpId)s, Source : %(SourceGrpId)s.", {'grpId': group.id, 'SourceGrpId': source_id}) try: self.provision.create_volume_group( array, tgt_name, interval_retries_dict) rollback_dict.update({ 'target_group_name': tgt_name, 'volumes': [], 'device_ids': [], 'list_volume_pairs': [], 'interval_retries_dict': interval_retries_dict}) model_update = {'status': fields.GroupStatus.AVAILABLE} # Create the target devices list_volume_pairs = [] for volume in volumes: (volumes_model_update, rollback_dict, list_volume_pairs, extra_specs) = ( self._create_vol_and_add_to_group( volume, group, tgt_name, rollback_dict, source_vols, snapshots, list_volume_pairs, volumes_model_update)) snap_name, rollback_dict = ( self._create_group_replica_and_get_snap_name( group.id, actual_source_grp, source_id, source_sg, rollback_dict, create_snapshot)) # Link and break the snapshot to the source group snap_id_list = self.rest.get_storage_group_snap_id_list( array, src_grp_name, snap_name) if snap_id_list: if group.is_replicated: interval_retries_dict[utils.FORCE_VOL_EDIT] = True self.provision.link_and_break_replica( array, src_grp_name, tgt_name, snap_name, interval_retries_dict, list_volume_pairs, delete_snapshot=create_snapshot, snap_id=snap_id_list[0]) # Update the replication status if group.is_replicated: backend = self._get_replicated_volume_backend_id(volumes[0]) rep_config = self.utils.get_rep_config( backend, self.rep_configs) interval_retries_dict[utils.REP_CONFIG] = rep_config volumes_model_update = self._replicate_group( array, volumes_model_update, tgt_name, interval_retries_dict) # Add the volumes to the default storage group extra_specs[utils.FORCE_VOL_EDIT] = True self._add_replicated_volumes_to_default_storage_group( array, volumes_model_update, extra_specs) model_update.update({ 'replication_status': fields.ReplicationStatus.ENABLED}) except Exception: exception_message = (_("Failed to create vol grp %(volGrpName)s" " from source %(grpSnapshot)s.") % {'volGrpName': group.id, 'grpSnapshot': source_id}) LOG.error(exception_message) if array is not None: LOG.info("Attempting rollback for the create group from src.") self._rollback_create_group_from_src(array, rollback_dict) raise exception.VolumeBackendAPIException( message=exception_message) return model_update, volumes_model_update def _add_replicated_volumes_to_default_storage_group( self, array, volumes_model_update, extra_specs): """Add replicated volumes to the default storage group. :param array: the serial number of the array :param volumes_model_update: the list of volume updates :param extra_specs: the extra specifications """ is_re = False rep_mode = None if self.utils.is_replication_enabled(extra_specs): is_re, rep_mode = True, extra_specs['rep_mode'] do_disable_compression = self.utils.is_compression_disabled( extra_specs) storage_group_name = self.masking.get_or_create_default_storage_group( array, extra_specs[utils.SRP], extra_specs[utils.SLO], extra_specs[utils.WORKLOAD], extra_specs, do_disable_compression, is_re, rep_mode) local_device_list = list() remote_device_list = list() for volume_dict in volumes_model_update: if volume_dict.get('provider_location'): loc = ast.literal_eval(volume_dict.get('provider_location')) device_id = loc.get('device_id') local_array = loc.get('array') local_device_list.append(device_id) if volume_dict.get('replication_driver_data'): loc = ast.literal_eval(volume_dict.get( 'replication_driver_data')) remote_device_id = loc.get('device_id') remote_array = loc.get('array') remote_device_list.append(remote_device_id) if local_device_list: self.masking.add_volumes_to_storage_group( local_array, local_device_list, storage_group_name, extra_specs) if remote_device_list: self.masking.add_volumes_to_storage_group( remote_array, remote_device_list, storage_group_name, extra_specs) def _create_group_replica_and_get_snap_name( self, group_id, actual_source_grp, source_id, source_sg, rollback_dict, create_snapshot): """Create group replica and get snap name :param group_id: the group id :param actual_source_grp: the source group :param source_id: source id :param source_sg: source storage goup :param rollback_dict: rollback dict :param create_snapshot: boolean :returns: snap_name, rollback_dict """ if create_snapshot is True: # We have to create a snapshot of the source group snap_name = self.utils.truncate_string(group_id, 19) self._create_group_replica(actual_source_grp, snap_name) rollback_dict['snap_name'] = snap_name else: # We need to check if the snapshot exists snap_name = self.utils.truncate_string(source_id, 19) if ('snapVXSnapshots' in source_sg and snap_name in source_sg['snapVXSnapshots']): LOG.info("Snapshot is present on the array") else: error_msg = (_("Cannot retrieve source snapshot %(snap_id)s " "from the array.") % {'snap_id': source_id}) LOG.error(error_msg) raise exception.VolumeBackendAPIException( message=error_msg) return snap_name, rollback_dict def _create_vol_and_add_to_group( self, volume, group, tgt_name, rollback_dict, source_vols, snapshots, list_volume_pairs, volumes_model_update): """Creates the volume group from source. :param volume: volume object :param group: the group object :param tgt_name: target name :param rollback_dict: rollback dict :param source_vols: source volumes :param snapshots: snapshot objects :param list_volume_pairs: volume pairs list :param volumes_model_update: volume model update :returns: volumes_model_update, rollback_dict, list_volume_pairs extra_specs """ src_dev_id, extra_specs, vol_size, tgt_vol_name = ( self._get_clone_vol_info( volume, source_vols, snapshots)) array = extra_specs[utils.ARRAY] volume_name = self.utils.get_volume_element_name(volume.id) if group.is_replicated: volume_dict = self._create_non_replicated_volume( array, volume, volume_name, tgt_name, vol_size, extra_specs) device_id = volume_dict['device_id'] else: volume_dict, __, __, = self._create_volume( volume, tgt_vol_name, vol_size, extra_specs) device_id = volume_dict['device_id'] # Add the volume to the volume group SG self.masking.add_volume_to_storage_group( extra_specs[utils.ARRAY], device_id, tgt_name, tgt_vol_name, extra_specs) # Record relevant information list_volume_pairs.append((src_dev_id, device_id)) # Add details to rollback dict rollback_dict['device_ids'].append(device_id) rollback_dict['list_volume_pairs'].append( (src_dev_id, device_id)) rollback_dict['volumes'].append( (device_id, extra_specs, volume)) volumes_model_update.append( self.utils.get_grp_volume_model_update( volume, volume_dict, group.id, meta=self.get_volume_metadata(volume_dict['array'], volume_dict['device_id']))) return (volumes_model_update, rollback_dict, list_volume_pairs, extra_specs) def _get_clone_vol_info(self, volume, source_vols, snapshots): """Get the clone volume info. :param volume: the new volume object :param source_vols: the source volume list :param snapshots: the source snapshot list :returns: src_dev_id, extra_specs, vol_size, tgt_vol_name """ src_dev_id, vol_size = None, None extra_specs = self._initial_setup(volume) if not source_vols: for snap in snapshots: if snap.id == volume.snapshot_id: src_dev_id, __, __ = self._parse_snap_info( extra_specs[utils.ARRAY], snap) vol_size = snap.volume_size else: for src_vol in source_vols: if src_vol.id == volume.source_volid: src_extra_specs = self._initial_setup(src_vol) src_dev_id = self._find_device_on_array( src_vol, src_extra_specs) vol_size = src_vol.size tgt_vol_name = self.utils.get_volume_element_name(volume.id) return src_dev_id, extra_specs, vol_size, tgt_vol_name def _rollback_create_group_from_src(self, array, rollback_dict): """Performs rollback for create group from src in case of failure. :param array: the array serial number :param rollback_dict: dict containing rollback details """ try: # Delete the snapshot if required if rollback_dict.get("snap_name"): try: self.provision.delete_group_replica( array, rollback_dict["snap_name"], rollback_dict["source_group_name"]) except Exception as e: LOG.debug("Failed to delete group snapshot. Attempting " "further rollback. Exception received: %(e)s.", {'e': e}) if rollback_dict.get('volumes'): # Remove any devices which were added to the target SG if rollback_dict['device_ids']: self.masking.remove_volumes_from_storage_group( array, rollback_dict['device_ids'], rollback_dict['target_group_name'], rollback_dict['interval_retries_dict']) # Delete all the volumes for dev_id, extra_specs, volume in rollback_dict['volumes']: self._remove_vol_and_cleanup_replication( array, dev_id, "group vol", extra_specs, volume) self._delete_from_srp( array, dev_id, "group vol", extra_specs) # Delete the target SG if rollback_dict.get("target_group_name"): self.rest.delete_storage_group( array, rollback_dict['target_group_name']) LOG.info("Rollback completed for create group from src.") except Exception as e: LOG.error("Rollback failed for the create group from src. " "Exception received: %(e)s.", {'e': e}) def _replicate_group(self, array, volumes_model_update, group_name, extra_specs): """Replicate a cloned volume group. :param array: the array serial number :param volumes_model_update: the volumes model updates :param group_name: the group name :param extra_specs: the extra specs :returns: volumes_model_update """ ret_volumes_model_update = [] rdf_group_no, remote_array = self.get_rdf_details( array, extra_specs[utils.REP_CONFIG]) self.rest.replicate_group( array, group_name, rdf_group_no, remote_array, extra_specs) # Need to set SRP to None for remote generic volume group - Not set # automatically, and a volume can only be in one storage group # managed by FAST self.rest.set_storagegroup_srp( remote_array, group_name, "None", extra_specs) for volume_model_update in volumes_model_update: vol_id = volume_model_update['id'] loc = ast.literal_eval(volume_model_update['provider_location']) src_device_id = loc['device_id'] rdf_vol_details = self.rest.get_rdf_group_volume( array, src_device_id) tgt_device_id = rdf_vol_details['remoteDeviceID'] element_name = self.utils.get_volume_element_name(vol_id) self.rest.rename_volume(remote_array, tgt_device_id, element_name) rep_update = {'device_id': tgt_device_id, 'array': remote_array} volume_model_update.update( {'replication_driver_data': str(rep_update), 'replication_status': fields.ReplicationStatus.ENABLED}) volume_model_update = self.update_metadata( volume_model_update, None, self.get_volume_metadata( array, src_device_id)) ret_volumes_model_update.append(volume_model_update) return ret_volumes_model_update def enable_replication(self, context, group, volumes): """Enable replication for a group. Replication is enabled on replication-enabled groups by default. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ if not group.is_replicated: raise NotImplementedError() model_update = {} if not volumes: # Return if empty group return model_update, None try: vol_grp_name = None extra_specs = self._initial_setup(volumes[0]) array = extra_specs[utils.ARRAY] volume_group = self._find_volume_group(array, group) if volume_group: if 'name' in volume_group: vol_grp_name = volume_group['name'] if vol_grp_name is None: raise exception.GroupNotFound(group_id=group.id) rdf_group_no, __ = self.get_rdf_details( array, extra_specs[utils.REP_CONFIG]) self.rest.srdf_resume_replication( array, vol_grp_name, rdf_group_no, extra_specs) model_update.update({ 'replication_status': fields.ReplicationStatus.ENABLED}) except Exception as e: model_update.update({ 'replication_status': fields.ReplicationStatus.ERROR}) LOG.error("Error enabling replication on group %(group)s. " "Exception received: %(e)s.", {'group': group.id, 'e': e}) return model_update, None def disable_replication(self, context, group, volumes): """Disable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ if not group.is_replicated: raise NotImplementedError() model_update = {} if not volumes: # Return if empty group return model_update, None try: vol_grp_name = None extra_specs = self._initial_setup(volumes[0]) array = extra_specs[utils.ARRAY] volume_group = self._find_volume_group(array, group) if volume_group: if 'name' in volume_group: vol_grp_name = volume_group['name'] if vol_grp_name is None: raise exception.GroupNotFound(group_id=group.id) rdf_group_no, __ = self.get_rdf_details( array, extra_specs[utils.REP_CONFIG]) self.rest.srdf_suspend_replication( array, vol_grp_name, rdf_group_no, extra_specs) model_update.update({ 'replication_status': fields.ReplicationStatus.DISABLED}) except Exception as e: model_update.update({ 'replication_status': fields.ReplicationStatus.ERROR}) LOG.error("Error disabling replication on group %(group)s. " "Exception received: %(e)s.", {'group': group.id, 'e': e}) return model_update, None def failover_replication(self, context, group, volumes, secondary_backend_id=None, host=False): """Failover replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :param secondary_backend_id: the secondary backend id - default None :param host: flag to indicate if whole host is being failed over :returns: model_update, vol_model_updates """ return self._failover_replication( volumes, group, None, secondary_backend_id=secondary_backend_id, host=host) def _failover_replication( self, volumes, group, vol_grp_name, secondary_backend_id=None, host=False, is_metro=False): """Failover replication for a group. :param volumes: the list of volumes :param group: the group object :param vol_grp_name: the group name :param secondary_backend_id: the secondary backend id - default None :param host: flag to indicate if whole host is being failed over :returns: model_update, vol_model_updates """ model_update, vol_model_updates = dict(), list() if not volumes: # Return if empty group return model_update, vol_model_updates extra_specs = self._initial_setup(volumes[0]) replication_details = ast.literal_eval( volumes[0].replication_driver_data) remote_array = replication_details.get(utils.ARRAY) extra_specs[utils.ARRAY] = remote_array failover = False if secondary_backend_id == 'default' else True try: rdf_group_no, __ = self.get_rdf_details( remote_array, extra_specs[utils.REP_CONFIG]) if group: volume_group = self._find_volume_group(remote_array, group) if volume_group: if 'name' in volume_group: vol_grp_name = volume_group['name'] if vol_grp_name is None: raise exception.GroupNotFound(group_id=group.id) is_partitioned = self._rdf_vols_partitioned( remote_array, volumes, rdf_group_no) if not is_metro and not is_partitioned: if failover: self.rest.srdf_failover_group( remote_array, vol_grp_name, rdf_group_no, extra_specs) else: self.rest.srdf_failback_group( remote_array, vol_grp_name, rdf_group_no, extra_specs) if failover: model_update.update({ 'replication_status': fields.ReplicationStatus.FAILED_OVER}) vol_rep_status = fields.ReplicationStatus.FAILED_OVER else: model_update.update({ 'replication_status': fields.ReplicationStatus.ENABLED}) vol_rep_status = fields.ReplicationStatus.ENABLED except Exception as e: model_update.update({ 'replication_status': fields.ReplicationStatus.ERROR}) vol_rep_status = fields.ReplicationStatus.ERROR LOG.error("Error failover replication on group %(group)s. " "Exception received: %(e)s.", {'group': vol_grp_name, 'e': e}) for vol in volumes: loc = vol.provider_location rep_data = vol.replication_driver_data if vol_rep_status != fields.ReplicationStatus.ERROR: loc = vol.replication_driver_data rep_data = vol.provider_location local = ast.literal_eval(loc) remote = ast.literal_eval(rep_data) self.volume_metadata.capture_failover_volume( vol, local['device_id'], local['array'], rdf_group_no, remote['device_id'], remote['array'], extra_specs, failover, vol_grp_name, vol_rep_status, extra_specs[utils.REP_MODE]) update = {'id': vol.id, 'replication_status': vol_rep_status, 'provider_location': loc, 'replication_driver_data': rep_data} if host: update = {'volume_id': vol.id, 'updates': update} vol_model_updates.append(update) LOG.debug("Volume model updates: %s", vol_model_updates) return model_update, vol_model_updates def _rdf_vols_partitioned(self, array, volumes, rdfg): """Check if rdf volumes have been failed over by powermax array :param array: remote array :param volumes: rdf volumes :param rdfg: rdf group :returns: devices have partitioned states """ is_partitioned = False for volume in volumes: if self.promotion: vol_data = volume.provider_location else: vol_data = volume.replication_driver_data vol_data = ast.literal_eval(vol_data) device_id = vol_data.get(utils.DEVICE_ID) vol_details = self.rest.get_rdf_pair_volume(array, rdfg, device_id) rdf_pair_state = vol_details.get(utils.RDF_PAIR_STATE, '').lower() if rdf_pair_state in utils.RDF_PARTITIONED_STATES: is_partitioned = True break return is_partitioned def get_attributes_from_cinder_config(self): """Get all attributes from the configuration file :returns: kwargs """ kwargs = None username = self.configuration.safe_get(utils.VMAX_USER_NAME) password = self.configuration.safe_get(utils.VMAX_PASSWORD) if username and password: serial_number = self.configuration.safe_get(utils.POWERMAX_ARRAY) if serial_number is None: msg = _("Powermax Array Serial must be set in cinder.conf") LOG.error(msg) raise exception.InvalidConfigurationValue(message=msg) srp_name = self.configuration.safe_get(utils.POWERMAX_SRP) if srp_name is None: msg = _("Powermax SRP must be set in cinder.conf") LOG.error(msg) raise exception.InvalidConfigurationValue(message=msg) slo = self.configuration.safe_get(utils.POWERMAX_SERVICE_LEVEL) workload = self.configuration.safe_get(utils.VMAX_WORKLOAD) port_groups = self.configuration.safe_get( utils.POWERMAX_PORT_GROUPS) rest_api_connect_timeout = ( self.configuration.safe_get(utils.REST_API_CONNECT_TIMEOUT)) rest_api_read_timeout = ( self.configuration.safe_get(utils.REST_API_READ_TIMEOUT)) kwargs = ( {'RestServerIp': self.configuration.safe_get( utils.VMAX_SERVER_IP), 'RestServerPort': self._get_unisphere_port(), 'RestUserName': username, 'RestPassword': password, 'SerialNumber': serial_number, 'srpName': srp_name, 'PortGroup': port_groups, utils.REST_API_CONNECT_TIMEOUT_KEY: rest_api_connect_timeout, utils.REST_API_READ_TIMEOUT_KEY: rest_api_read_timeout}) if self.configuration.safe_get('driver_ssl_cert_verify'): if self.configuration.safe_get('driver_ssl_cert_path'): kwargs.update({'SSLVerify': self.configuration.safe_get( 'driver_ssl_cert_path')}) else: kwargs.update({'SSLVerify': True}) else: kwargs.update({'SSLVerify': False}) if slo: kwargs.update({'ServiceLevel': slo, 'Workload': workload}) return kwargs def _get_volume_group_info(self, group): """Get the volume group array, retries and intervals :param group: the group object :returns: array -- str interval_retries_dict -- dict """ array, interval_retries_dict = self.utils.get_volume_group_utils( group, self.interval, self.retries) if not array: array = self.configuration.safe_get(utils.POWERMAX_ARRAY) if not array: exception_message = _( "Cannot get the array serial_number") LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return array, interval_retries_dict def _get_unisphere_port(self): """Get unisphere port from the configuration file :returns: unisphere port """ if self.configuration.safe_get(utils.U4P_SERVER_PORT): return self.configuration.safe_get(utils.U4P_SERVER_PORT) else: LOG.debug("PowerMax/VMAX port is not set, using default port: %s", utils.DEFAULT_PORT) return utils.DEFAULT_PORT def revert_to_snapshot(self, volume, snapshot): """Revert volume to snapshot. :param volume: the volume object :param snapshot: the snapshot object """ extra_specs = self._initial_setup(volume) if self.utils.is_replication_enabled(extra_specs): exception_message = (_( "Volume is replicated - revert to snapshot feature is not " "supported for replicated volumes.")) LOG.error(exception_message) raise exception.VolumeDriverException(message=exception_message) array = extra_specs[utils.ARRAY] sourcedevice_id, snap_name, snap_id_list = self._parse_snap_info( array, snapshot) if not sourcedevice_id or not snap_name: LOG.error("No snapshot found on the array") exception_message = (_( "Failed to revert the volume to the snapshot")) raise exception.VolumeDriverException(message=exception_message) if len(snap_id_list) != 1: exception_message = (_( "It is not possible to revert snapshot because there are " "either multiple or no snapshots associated with " "%(snap_name)s.") % {'snap_name': snap_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) else: snap_id = snap_id_list[0] self._cleanup_device_snapvx(array, sourcedevice_id, extra_specs) try: LOG.info("Reverting device: %(deviceid)s " "to snapshot: %(snapname)s.", {'deviceid': sourcedevice_id, 'snapname': snap_name}) self.provision.revert_volume_snapshot( array, sourcedevice_id, snap_name, snap_id, extra_specs) # Once the restore is done, we need to check if it is complete restore_complete = self.provision.is_restore_complete( array, sourcedevice_id, snap_name, snap_id, extra_specs) if not restore_complete: LOG.debug("Restore couldn't complete in the specified " "time interval. The terminate restore may fail") LOG.debug("Terminating restore session") # This may throw an exception if restore_complete is False self.provision.delete_volume_snap( array, snap_name, sourcedevice_id, snap_id, restored=True) # Revert volume to snapshot is successful if termination was # successful - possible even if restore_complete was False # when we checked last. LOG.debug("Restored session was terminated") LOG.info("Reverted the volume to snapshot successfully") except Exception as e: exception_message = (_( "Failed to revert the volume to the snapshot. " "Exception received was %(e)s") % {'e': str(e)}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) def update_metadata( self, model_update, existing_metadata, new_metadata): """Update volume metadata in model_update. :param model_update: existing model :param existing_metadata: existing metadata :param new_metadata: new object metadata :returns: dict -- updated model """ if existing_metadata: self._is_dict(existing_metadata, 'existing metadata') else: existing_metadata = dict() if model_update: self._is_dict(model_update, 'existing model') if 'metadata' in model_update: model_update['metadata'].update(existing_metadata) else: model_update.update({'metadata': existing_metadata}) else: model_update = {} model_update.update({'metadata': existing_metadata}) if new_metadata: self._is_dict(new_metadata, 'new object metadata') model_update['metadata'].update(new_metadata) return model_update def _is_dict(self, input, description): """Check that the input is a dict :param input: object for checking :raises: VolumeBackendAPIException """ if not isinstance(input, dict): exception_message = (_( "Input %(desc)s is not a dict.") % {'desc': description}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) def get_volume_metadata(self, array, device_id): """Get volume metadata for model_update. :param array: the array ID :param device_id: the device ID :returns: dict -- volume metadata """ if device_id is None: return dict() vol_info = self.rest._get_private_volume(array, device_id) vol_header = vol_info['volumeHeader'] array_model, __ = self.rest.get_array_model_info(array) sl = (vol_header['serviceLevel'] if vol_header.get('serviceLevel') else 'None') wl = vol_header['workload'] if vol_header.get('workload') else 'None' cd = 'False' if vol_header.get('compressionEnabled') else 'True' metadata = {'DeviceID': device_id, 'DeviceLabel': vol_header['userDefinedIdentifier'], 'ArrayID': array, 'ArrayModel': array_model, 'ServiceLevel': sl, 'Workload': wl, 'Emulation': vol_header['emulationType'], 'Configuration': vol_header['configuration'], 'CompressionDisabled': cd} is_rep_enabled = vol_info['rdfInfo']['RDF'] if is_rep_enabled: rdf_info = vol_info['rdfInfo'] rdf_session = rdf_info['RDFSession'][0] rdf_num = rdf_session['SRDFGroupNumber'] rdfg_info = self.rest.get_rdf_group(array, str(rdf_num)) r2_array_model, __ = self.rest.get_array_model_info( rdf_session['remoteSymmetrixID']) metadata.update( {'ReplicationEnabled': 'True', 'R2-DeviceID': rdf_session['remoteDeviceID'], 'R2-ArrayID': rdf_session['remoteSymmetrixID'], 'R2-ArrayModel': r2_array_model, 'ReplicationMode': rdf_session['SRDFReplicationMode'], 'RDFG-Label': rdfg_info['label'], 'R1-RDFG': rdf_session['SRDFGroupNumber'], 'R2-RDFG': rdf_session['SRDFRemoteGroupNumber']}) if metadata.get('ReplicationMode') == utils.RDF_ACTIVE.title(): metadata['ReplicationMode'] = utils.REP_METRO else: metadata['ReplicationEnabled'] = 'False' return metadata def get_snapshot_metadata(self, array, device_id, snap_name): """Get snapshot metadata for model_update. :param array: the array ID :param device_id: the device ID :param snap_name: the snapshot name :returns: dict -- volume metadata """ snap_id_list = list() snap_info = self.rest.get_volume_snap_info(array, device_id) device_name = snap_info.get('deviceName') snapshot_src_list = snap_info.get('snapshotSrcs') for snapshot_src in snapshot_src_list: if snap_name == snapshot_src.get('snapshotName'): snap_id_list.append(snapshot_src.get( 'snap_id') if self.rest.is_snap_id else snapshot_src.get( 'generation')) try: device_label = device_name.split(':')[1] if device_name else None except IndexError: device_label = None metadata = {'SnapshotLabel': snap_name, 'SourceDeviceID': device_id, 'SnapIdList': ', '.join( str(v) for v in snap_id_list), 'is_snap_id': self.rest.is_snap_id} if device_label: metadata['SourceDeviceLabel'] = device_label return metadata def _check_and_add_tags_to_storage_array( self, serial_number, array_tag_list, extra_specs): """Add tags to a storage group. :param serial_number: the array serial number :param array_tag_list: the array tag list :param extra_specs: the extra specifications """ if array_tag_list: existing_array_tags = self.rest.get_array_tags(serial_number) new_tag_list = self.utils.get_new_tags( self.utils.convert_list_to_string(array_tag_list), self.utils.convert_list_to_string(existing_array_tags)) if not new_tag_list: LOG.warning("No new tags to add. Existing tags " "associated with %(array)s are " "%(tags)s.", {'array': serial_number, 'tags': existing_array_tags}) else: self._validate_array_tag_list(new_tag_list) LOG.info("Adding the tags %(tag_list)s to %(array)s", {'tag_list': new_tag_list, 'array': serial_number}) try: self.rest.add_storage_array_tags( serial_number, new_tag_list, extra_specs) except Exception as ex: LOG.warning("Unexpected error: %(ex)s. If you still " "want to add tags to this storage array, " "please do so on the Unisphere UI.", {'ex': ex}) def prepare_replication_details(self, extra_specs): """Prepare details required for initialising replication. :param extra_specs: extra sepcifications :returns: first volume in SG, replication extra specs, replication info dict -- bool, dict, dict """ rep_info_dict, rep_first_vol, rdfg_empty = dict(), True, True # Get volume type replication extra specs rep_extra_specs = self._get_replication_extra_specs( extra_specs, extra_specs[utils.REP_CONFIG]) # Get the target SG name for the current volume create op sg_name = self.utils.derive_default_sg_from_extra_specs( extra_specs, rep_mode=extra_specs['rep_mode']) rep_extra_specs['sg_name'] = sg_name # Check if the RDFG has volume in it regardless of target SG state rdf_group_details = self.rest.get_rdf_group( extra_specs['array'], rep_extra_specs['rdf_group_no']) rdfg_device_count = rdf_group_details['numDevices'] if rdfg_device_count > 0: rdfg_empty = False # Check if there are any volumes in the SG, will return 0 if the SG # does not exist if self.rest.get_num_vols_in_sg(extra_specs['array'], sg_name): # Volumes exist, not first volume in SG rep_first_vol = False # Get the list of the current devices in the SG, this will help # with determining the new device added because no device ID is # returned local_device_list = self.rest.get_volume_list( extra_specs['array'], {'storageGroupId': sg_name}) # Set replication info that we will need for creating volume in # existing SG, these are not required for new SGs as the only # additional step required is to SRDF protect the SG rep_info_dict.update({ 'local_array': extra_specs['array'], 'remote_array': rep_extra_specs['array'], 'rdf_group_no': rep_extra_specs['rdf_group_no'], 'rep_mode': extra_specs['rep_mode'], 'sg_name': sg_name, 'service_level': extra_specs['slo'], 'initial_device_list': local_device_list, 'sync_interval': rep_extra_specs['sync_interval'], 'sync_retries': rep_extra_specs['sync_retries']}) return rep_first_vol, rep_extra_specs, rep_info_dict, rdfg_empty def srdf_protect_storage_group(self, extra_specs, rep_extra_specs, volume_dict): """SRDF protect a storage group. :param extra_specs: source extra specs :param rep_extra_specs: replication extra specs :param volume_dict: volume details dict """ self.rest.srdf_protect_storage_group( extra_specs['array'], rep_extra_specs['array'], rep_extra_specs['rdf_group_no'], extra_specs['rep_mode'], volume_dict['storage_group'], rep_extra_specs['slo'], extra_specs) def get_and_set_remote_device_uuid( self, extra_specs, rep_extra_specs, volume_dict): """Get a remote device id and set device UUID. :param extra_specs: source extra specs :param rep_extra_specs: replication extra specs :param volume_dict: volume details dict :returns: remote device ID -- str """ rdf_pair = self.rest.get_rdf_pair_volume( extra_specs['array'], rep_extra_specs['rdf_group_no'], volume_dict['device_id']) self.rest.rename_volume(rep_extra_specs['array'], rdf_pair['remoteVolumeName'], volume_dict['device_uuid']) return rdf_pair['remoteVolumeName'] def gather_replication_updates(self, extra_specs, rep_extra_specs, volume_dict): """Gather replication updates for returns. :param extra_specs: extra specs :param rep_extra_specs: replication extra specs :param volume_dict: volume info dict :returns: replication status, replication info -- str, dict """ replication_update = ( {'replication_status': REPLICATION_ENABLED, 'replication_driver_data': str( {'array': rep_extra_specs['array'], 'device_id': volume_dict['remote_device_id']})}) rep_info_dict = self.volume_metadata.gather_replication_info( volume_dict['device_uuid'], 'replication', False, local_array=extra_specs['array'], remote_array=rep_extra_specs['array'], target_device_id=volume_dict['remote_device_id'], target_name=volume_dict['device_uuid'], rdf_group_no=rep_extra_specs['rdf_group_no'], rep_mode=extra_specs['rep_mode'], replication_status=REPLICATION_ENABLED, rdf_group_label=rep_extra_specs['rdf_group_label'], target_array_model=rep_extra_specs['target_array_model'], backend_id=rep_extra_specs[ utils.REP_CONFIG].get(utils.BACKEND_ID, None)) return replication_update, rep_info_dict def _cleanup_non_rdf_volume_create_post_failure( self, volume, volume_name, extra_specs, device_ids): """Delete lingering volumes that exist in an non-RDF SG post exception. :param volume: Cinder volume -- Volume :param volume_name: Volume name -- str :param extra_specs: Volume extra specs -- dict :param device_ids: Devices ids to be deleted -- list """ array = extra_specs[utils.ARRAY] for device_id in device_ids: self.masking.remove_and_reset_members( array, volume, device_id, volume_name, extra_specs, False) self._delete_from_srp( array, device_id, volume_name, extra_specs) def _cleanup_rdf_volume_create_post_failure( self, volume, volume_name, extra_specs, device_ids): """Delete lingering volumes that exist in an RDF SG post exception. :param volume: Cinder volume -- Volume :param volume_name: Volume name -- str :param extra_specs: Volume extra specs -- dict :param device_ids: Devices ids to be deleted -- list """ __, rep_extra_specs, __, __ = self.prepare_replication_details( extra_specs) array = extra_specs[utils.ARRAY] srp = extra_specs['srp'] slo = extra_specs['slo'] workload = extra_specs['workload'] do_disable_compression = self.utils.is_compression_disabled( extra_specs) rep_mode = extra_specs['rep_mode'] rdf_group = rep_extra_specs['rdf_group_no'] rep_config = extra_specs[utils.REP_CONFIG] if rep_mode is utils.REP_SYNC: storagegroup_name = self.utils.get_default_storage_group_name( srp, slo, workload, do_disable_compression, True, rep_mode) else: storagegroup_name = self.utils.get_rdf_management_group_name( rep_config) self.rest.srdf_resume_replication( array, storagegroup_name, rdf_group, rep_extra_specs) for device_id in device_ids: __, __, vol_is_rdf = self.rest.is_vol_in_rep_session( array, device_id) if vol_is_rdf: self.cleanup_rdf_device_pair(array, rdf_group, device_id, extra_specs) else: self.masking.remove_and_reset_members( array, volume, device_id, volume_name, extra_specs, False) self._delete_from_srp( array, device_id, volume_name, extra_specs) def _validate_rdfg_status(self, array, extra_specs): """Validate RDF group states before and after various operations :param array: array serial number -- str :param extra_specs: volume extra specs -- dict """ rep_extra_specs = self._get_replication_extra_specs( extra_specs, extra_specs[utils.REP_CONFIG]) rep_mode = extra_specs['rep_mode'] rdf_group_no = rep_extra_specs['rdf_group_no'] # Get default storage group for volume disable_compression = self.utils.is_compression_disabled(extra_specs) storage_group_name = self.utils.get_default_storage_group_name( extra_specs['srp'], extra_specs['slo'], extra_specs['workload'], disable_compression, True, extra_specs['rep_mode']) # Check for storage group. Will be unavailable for first vol create storage_group_details = self.rest.get_storage_group( array, storage_group_name) storage_group_available = storage_group_details is not None if storage_group_available: is_rep = self._validate_storage_group_is_replication_enabled( array, storage_group_name) is_exclusive = self._validate_rdf_group_storage_group_exclusivity( array, storage_group_name) is_valid_states = self._validate_storage_group_rdf_states( array, storage_group_name, rdf_group_no, rep_mode) if not (is_rep and is_exclusive and is_valid_states): msg = (_('RDF validation for storage group %s failed. Please ' 'see logged error messages for specific details.' ) % storage_group_name) raise exception.VolumeBackendAPIException(msg) # Perform checks against Async or Metro management storage groups if rep_mode is not utils.REP_SYNC: management_sg_name = self.utils.get_rdf_management_group_name( extra_specs['rep_config']) management_sg_details = self.rest.get_storage_group( array, management_sg_name) management_sg_available = management_sg_details is not None if management_sg_available: is_rep = self._validate_storage_group_is_replication_enabled( array, management_sg_name) is_excl = self._validate_rdf_group_storage_group_exclusivity( array, management_sg_name) is_valid_states = self._validate_storage_group_rdf_states( array, management_sg_name, rdf_group_no, rep_mode) is_cons = self._validate_management_group_volume_consistency( array, management_sg_name, rdf_group_no) if not (is_rep and is_excl and is_valid_states and is_cons): msg = (_( 'RDF validation for storage group %s failed. Please ' 'see logged error messages for specific details.') % management_sg_name) raise exception.VolumeBackendAPIException(msg) # Perform check to make sure we have the same number of devices remote_array = rep_extra_specs[utils.ARRAY] rdf_group = self.rest.get_rdf_group( array, rdf_group_no) remote_rdf_group_no = rdf_group.get('remoteRdfgNumber') remote_rdf_group = self.rest.get_rdf_group( remote_array, remote_rdf_group_no) local_rdfg_device_count = rdf_group.get('numDevices') remote_rdfg_device_count = remote_rdf_group.get('numDevices') if local_rdfg_device_count != remote_rdfg_device_count: msg = (_( 'RDF validation failed. Different device counts found for ' 'local and remote RDFGs. Local RDFG %s has %s devices. Remote ' 'RDFG %s has %s devices. The same number of devices is ' 'expected. Check RDFGs for broken RDF pairs and cleanup or ' 'recreate the pairs as needed.') % ( rdf_group_no, local_rdfg_device_count, remote_rdf_group_no, remote_rdfg_device_count)) raise exception.VolumeDriverException(msg) def _validate_storage_group_is_replication_enabled( self, array, storage_group_name): """Validate that a storage groups is marked as RDF enabled :param array: array serial number -- str :param storage_group_name: name of the storage group -- str :returns: consistency validation checks passed -- boolean """ is_valid = True sg_details = self.rest.get_storage_group_rep(array, storage_group_name) sg_rdf_enabled = sg_details.get('rdf', False) if not sg_rdf_enabled: LOG.error('Storage group %s is expected to be RDF enabled but ' 'is not. Please check that all volumes in this storage ' 'group are RDF enabled and part of the same RDFG.', storage_group_name) is_valid = False return is_valid def _validate_storage_group_rdf_states( self, array, storage_group_name, rdf_group_no, rep_mode): """Validate that the RDF states found for storage groups are valid. :param array: array serial number -- str :param storage_group_name: name of the storage group -- str :param rep_mode: replication mode being used -- str :returns: consistency validation checks passed -- boolean """ is_valid = True sg_rdf_states = self.rest.get_storage_group_rdf_group_state( array, storage_group_name, rdf_group_no) # Verify Async & Metro modes only have a single state if rep_mode is not utils.REP_SYNC: if len(sg_rdf_states) > 1: sg_states_str = (', '.join(sg_rdf_states)) LOG.error('More than one RDFG state found for storage group ' '%s. We expect a single state for all volumes when ' 'using %s replication mode. Found %s states.', storage_group_name, rep_mode, sg_states_str) is_valid = False # Determine which list of valid states to use if rep_mode is utils.REP_SYNC: valid_states = utils.RDF_VALID_STATES_SYNC elif rep_mode is utils.REP_ASYNC: valid_states = utils.RDF_VALID_STATES_ASYNC else: valid_states = utils.RDF_VALID_STATES_METRO # Validate storage group states for state in sg_rdf_states: if state.lower() not in valid_states: valid_states_str = (', '.join(valid_states)) LOG.error('Invalid RDF state found for storage group %s. ' 'Found state %s. Valid states are %s.', storage_group_name, state, valid_states_str) is_valid = False return is_valid def _validate_rdf_group_storage_group_exclusivity( self, array, storage_group_name): """Validate that a storage group only has one RDF group. :param array: array serial number -- str :param storage_group_name: name of storage group -- str :returns: consistency validation checks passed -- boolean """ is_valid = True sg_rdf_groups = self.rest.get_storage_group_rdf_groups( array, storage_group_name) if len(sg_rdf_groups) > 1: rdf_groups_str = ', '.join(sg_rdf_groups) LOG.error('Detected more than one RDF group associated with ' 'storage group %s. Only one RDFG should be associated ' 'with a storage group. Found RDF groups %s', storage_group_name, rdf_groups_str) is_valid = False return is_valid def _validate_management_group_volume_consistency( self, array, management_sg_name, rdf_group_number): """Validate volume consistency between management SG and RDF group :param array: array serial number -- str :param management_sg_name: name of storage group -- str :param rdf_group_number: rdf group number to check -- str :returns: consistency validation checks passed -- boolean """ is_valid = True rdfg_volumes = self.rest.get_rdf_group_volume_list( array, rdf_group_number) sg_volumes = self.rest.get_volumes_in_storage_group( array, management_sg_name) missing_volumes = list() for rdfg_volume in rdfg_volumes: if rdfg_volume not in sg_volumes: missing_volumes.append(rdfg_volume) if missing_volumes: missing_volumes_str = ', '.join(missing_volumes) LOG.error( 'Inconsistency found between management group %s and RDF ' 'group %s. The following volumes are not in the management ' 'storage group %s. All Asynchronous and Metro volumes must ' 'be managed together in their respective management storage ' 'groups.', management_sg_name, rdf_group_number, missing_volumes_str) is_valid = False return is_valid def _reset_identifier_on_rollback(self, array, volume_name): """Reset the user supplied name on a rollback :param array: the serial number -- str :param volume_name: the volume name assigned -- str """ # Find volume based on identifier name dev_id_from_identifier = self.rest.find_volume_device_id( array, volume_name) if dev_id_from_identifier and isinstance( dev_id_from_identifier, str): vol_identifier_name = self.rest.find_volume_identifier( array, dev_id_from_identifier) if vol_identifier_name and ( vol_identifier_name == volume_name): LOG.warning( "Attempting to reset name of %(vol_name)s on device " "%(dev_ident)s on a create volume rollback operation.", {'vol_name': volume_name, 'dev_ident': dev_id_from_identifier}) self.rest.rename_volume( array, dev_id_from_identifier, None) @staticmethod def get_vendor_properties(self): """Retrieves the vendor properties for the powermax driver. :param self: The object instance. :return: A tuple containing the properties dictionary and the driver name. """ properties = {} self._set_property( properties, utils.DISABLE_PROTECTED_SNAP, "Disable protected snap", _("Prevent protected snap being created on SRDF device."), "boolean") return properties, "powermax" def get_target_nqn(self, ips, nvme_connector): """Discover the NVMe Qualified Name (NQN) of NVMe targets. :param self: The object instance. :param ips: A list of tuples where each tuple contains the IP address and other connection details of the NVMe targets. :param nvme_connector: The object of nvme connector :return: The NVMe Qualified Name (NQN) of the first discovered target, or None if no matching target is found. """ for ip in ips: command = ['discover', '-t', ip[2], '-a', ip[0], '-s', utils.POWERMAX_NVME_TCP_PORT, '-o', 'json'] out, err = nvme_connector.run_nvme_cli(command) if out is not None and len(out) > 0: nvme_discover_result = json.loads(out) if "records" in nvme_discover_result: for record in nvme_discover_result["records"]: if record["traddr"] == ip[0]: return record["subnqn"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/fc.py0000664000175000017500000010232600000000000023674 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast from oslo_log import log as logging from cinder.common import constants from cinder import coordination from cinder import exception from cinder import interface from cinder.volume import driver from cinder.volume.drivers.dell_emc.powermax import common from cinder.volume.drivers.san import san from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @interface.volumedriver class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver): """FC Drivers for PowerMax using REST. Version history: .. code-block:: none 1.0.0 - Initial driver 1.1.0 - Multiple pools and thick/thin provisioning, performance enhancement. 2.0.0 - Add driver requirement functions 2.1.0 - Add consistency group functions 2.1.1 - Fixed issue with mismatched config (bug #1442376) 2.1.2 - Clean up failed clones (bug #1440154) 2.1.3 - Fixed a problem with FAST support (bug #1435069) 2.2.0 - Add manage/unmanage 2.2.1 - Support for SE 8.0.3 2.2.2 - Update Consistency Group 2.2.3 - Pool aware scheduler(multi-pool) support 2.2.4 - Create CG from CG snapshot 2.3.0 - Name change for MV and SG for FAST (bug #1515181) - Fix for randomly choosing port group. (bug #1501919) - get_short_host_name needs to be called in find_device_number (bug #1520635) - Proper error handling for invalid SLOs (bug #1512795) - Extend Volume for VMAX3, SE8.1.0.3 https://blueprints.launchpad.net/cinder/+spec/vmax3-extend-volume - Incorrect SG selected on an attach (#1515176) - Cleanup Zoning (bug #1501938) NOTE: FC only - Last volume in SG fix - _remove_last_vol_and_delete_sg is not being called for VMAX3 (bug #1520549) - necessary updates for CG changes (#1534616) - Changing PercentSynced to CopyState (bug #1517103) - Getting iscsi ip from port in existing masking view - Replacement of EMCGetTargetEndpoints api (bug #1512791) - VMAX3 snapvx improvements (bug #1522821) - Operations and timeout issues (bug #1538214) 2.4.0 - EMC VMAX - locking SG for concurrent threads (bug #1554634) - SnapVX licensing checks for VMAX3 (bug #1587017) - VMAX oversubscription Support (blueprint vmax-oversubscription) - QoS support (blueprint vmax-qos) 2.5.0 - Attach and detach snapshot (blueprint vmax-attach-snapshot) - MVs and SGs not reflecting correct protocol (bug #1640222) - Storage assisted volume migration via retype (bp vmax-volume-migration) - Support for compression on All Flash - Volume replication 2.1 (bp add-vmax-replication) - rename and restructure driver (bp vmax-rename-dell-emc) 3.0.0 - REST based driver - Retype (storage-assisted migration) - QoS support - Support for compression on All Flash - Support for volume replication - Support for live migration - Support for Generic Volume Group 3.1.0 - Support for replication groups (Tiramisu) - Deprecate backend xml configuration - Support for async replication (vmax-replication-enhancements) - Support for SRDF/Metro (vmax-replication-enhancements) - Support for manage/unmanage snapshots (vmax-manage-unmanage-snapshot) - Support for revert to volume snapshot 3.2.0 - Support for retyping replicated volumes (bp vmax-retype-replicated-volumes) - Support for multiattach volumes (bp vmax-allow-multi-attach) - Support for list manageable volumes and snapshots (bp/vmax-list-manage-existing) - Fix for SSL verification/cert application (bug #1772924) - Log VMAX metadata of a volume (bp vmax-metadata) - Fix for get-pools command (bug #1784856) 4.0.0 - Fix for initiator retrieval and short hostname unmapping (bugs #1783855 #1783867) - Fix for HyperMax OS Upgrade Bug (bug #1790141) - Support for failover to secondary Unisphere (bp/vmax-unisphere-failover) - Rebrand from VMAX to PowerMax(bp/vmax-powermax-rebrand) - Change from 84 to 90 REST endpoints (bug #1808539) - Fix for PowerMax OS replication settings (bug #1812685) - Support for storage-assisted in-use retype (bp/powermax-storage-assisted-inuse-retype) 4.1.0 - Changing from 90 to 91 rest endpoints - Support for Rapid TDEV Delete (bp powermax-tdev-deallocation) - PowerMax OS Metro formatted volumes fix (bug #1829876) - Support for Metro ODE (bp/powermax-metro-ode) - Removal of san_rest_port from PowerMax cinder.conf config - SnapVX noCopy mode enabled for all links - Volume/Snapshot backed metadata inclusion - Debug metadata compression and service level info fix 4.2.0 - Support of Unisphere storage group and array tags - User defined override for short host name and port group name (bp powermax-user-defined-hostname-portgroup) - Switch to Unisphere REST API public replication endpoints - Support for multiple replication devices - Pools bug fix allowing 'None' variants (bug #1873253) 4.3.0 - Changing from 91 to 92 REST endpoints - Support for Port Group and Port load balancing (bp powermax-port-load-balance) - Fix to enable legacy volumes to live migrate (#1867163) - Use of snap id instead of generation (bp powermax-snapset-ids) - Support for Failover Abilities (bp/powermax-failover-abilities) 4.4.0 - Early check for status of port 4.4.1 - Report trim/discard support 4.5.0 - Add PowerMax v4 support 4.5.1 - Add active/active compliance 4.5.2 - Add 'disable_protected_snap' option """ VERSION = "4.5.2" SUPPORTS_ACTIVE_ACTIVE = True # ThirdPartySystems wiki CI_WIKI_NAME = "DellEMC_PowerMAX_CI" driver_prefix = 'powermax' def __init__(self, *args, **kwargs): super(PowerMaxFCDriver, self).__init__(*args, **kwargs) self.active_backend_id = kwargs.get('active_backend_id', None) self.common = common.PowerMaxCommon( 'FC', self.VERSION, configuration=self.configuration, active_backend_id=self.active_backend_id) self.performance = self.common.performance self.rest = self.common.rest self.zonemanager_lookup_service = fczm_utils.create_lookup_service() @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'driver_ssl_cert_verify', 'max_over_subscription_ratio', 'reserved_percentage', 'replication_device') return common.powermax_opts + additional_opts def check_for_setup_error(self): pass def _init_vendor_properties(self): return self.common.get_vendor_properties(self) def create_volume(self, volume): """Creates a PowerMax/VMAX volume. :param volume: the cinder volume object :returns: provider location dict """ return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. :param volume: the cinder volume object :param snapshot: the cinder snapshot object :returns: provider location dict """ return self.common.create_volume_from_snapshot( volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume. :param volume: the cinder volume object :param src_vref: the source volume reference :returns: provider location dict """ return self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): """Deletes a PowerMax/VMAX volume. :param volume: the cinder volume object """ self.common.delete_volume(volume) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: the cinder snapshot object :returns: provider location dict """ src_volume = snapshot.volume return self.common.create_snapshot(snapshot, src_volume) def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: the cinder snapshot object """ src_volume = snapshot.volume self.common.delete_snapshot(snapshot, src_volume) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume. :param context: the context :param volume: the cinder volume object """ pass def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume. :param context: the context :param volume: the cinder volume object :param connector: the connector object """ pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume. :param context: the context :param volume: the cinder volume object """ pass @staticmethod def check_for_export(context, volume_id): """Make sure volume is exported. :param context: the context :param volume_id: the volume id """ pass @coordination.synchronized('{self.driver_prefix}-{volume.id}') def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: .. code-block:: json { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '1234567890123', } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], } } :param volume: the cinder volume object :param connector: the connector object :returns: dict -- the target_wwns and initiator_target_map """ device_info = self.common.initialize_connection( volume, connector) if device_info: conn_info = self.populate_data(device_info, volume, connector) fczm_utils.add_fc_zone(conn_info) return conn_info else: return {} def populate_data(self, device_info, volume, connector): """Populate data dict. Add relevant data to data dict, target_lun, target_wwn and initiator_target_map. :param device_info: device_info :param volume: the volume object :param connector: the connector object :returns: dict -- the target_wwns and initiator_target_map """ device_number = device_info['hostlunid'] target_wwns, init_targ_map = self._build_initiator_target_map( volume, connector, device_info) data = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': device_number, 'target_discovered': True, 'target_wwn': target_wwns, 'discard': True, 'initiator_target_map': init_targ_map}} LOG.debug("Return FC data for zone addition: %(data)s.", {'data': data}) return data @coordination.synchronized('{self.driver_prefix}-{volume.id}') def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :param volume: the volume object :param connector: the connector object :returns: dict -- the target_wwns and initiator_target_map if the zone is to be removed, otherwise empty """ data = {'driver_volume_type': 'fibre_channel', 'data': {}} zoning_mappings = {} if connector: zoning_mappings = self._get_zoning_mappings(volume, connector) if zoning_mappings: self.common.terminate_connection(volume, connector) data = self._cleanup_zones(zoning_mappings) fczm_utils.remove_fc_zone(data) return data def _get_zoning_mappings(self, volume, connector): """Get zoning mappings by building up initiator/target map. :param volume: the volume object :param connector: the connector object :returns: dict -- the target_wwns and initiator_target_map if the zone is to be removed, otherwise empty """ loc = volume.provider_location name = ast.literal_eval(loc) host_label = self.common.utils.get_host_name_label( connector['host'], self.common.powermax_short_host_name_template) zoning_mappings = {} try: array = name['array'] device_id = name['device_id'] except KeyError: array = name['keybindings']['SystemName'].split('+')[1].strip('-') device_id = name['keybindings']['DeviceID'] LOG.debug("Start FC detach process for volume: %(volume)s.", {'volume': volume.name}) masking_views, is_metro = ( self.common.get_masking_views_from_volume( array, volume, device_id, host_label)) if not masking_views: # Backward compatibility with pre Ussuri short host name. host_label = self.common.utils.get_host_short_name( connector['host']) masking_views, is_metro = ( self.common.get_masking_views_from_volume( array, volume, device_id, host_label)) if masking_views: portgroup = ( self.common.get_port_group_from_masking_view( array, masking_views[0])) initiator_group = ( self.common.get_initiator_group_from_masking_view( array, masking_views[0])) LOG.debug("Found port group: %(portGroup)s " "in masking view %(maskingView)s.", {'portGroup': portgroup, 'maskingView': masking_views[0]}) # Map must be populated before the terminate_connection target_wwns, init_targ_map = self._build_initiator_target_map( volume, connector) zoning_mappings = {'port_group': portgroup, 'initiator_group': initiator_group, 'target_wwns': target_wwns, 'init_targ_map': init_targ_map, 'array': array} if is_metro: rep_data = volume.replication_driver_data name = ast.literal_eval(rep_data) try: metro_array = name['array'] metro_device_id = name['device_id'] except KeyError: LOG.error("Cannot get remote Metro device information " "for zone cleanup. Attempting terminate " "connection...") else: masking_views, __ = ( self.common.get_masking_views_from_volume( metro_array, volume, metro_device_id, host_label)) if masking_views: metro_portgroup = ( self.common.get_port_group_from_masking_view( metro_array, masking_views[0])) metro_ig = ( self.common.get_initiator_group_from_masking_view( metro_array, masking_views[0])) zoning_mappings.update( {'metro_port_group': metro_portgroup, 'metro_ig': metro_ig, 'metro_array': metro_array}) if not masking_views: LOG.warning("Volume %(volume)s is not in any masking view.", {'volume': volume.name}) return zoning_mappings def _cleanup_zones(self, zoning_mappings): """Cleanup zones after terminate connection. :param zoning_mappings: zoning mapping dict :returns: data - dict """ data = {'driver_volume_type': 'fibre_channel', 'data': {}} try: LOG.debug("Looking for masking views still associated with " "Port Group %s.", zoning_mappings['port_group']) masking_views = self.common.get_common_masking_views( zoning_mappings['array'], zoning_mappings['port_group'], zoning_mappings['initiator_group']) except (KeyError, ValueError, TypeError): masking_views = [] if masking_views: LOG.debug("Found %(numViews)d MaskingViews.", {'numViews': len(masking_views)}) else: # no masking views found # Check if there any Metro masking views if zoning_mappings.get('metro_array'): masking_views = self.common.get_common_masking_views( zoning_mappings['metro_array'], zoning_mappings['metro_port_group'], zoning_mappings['metro_ig']) if not masking_views: LOG.debug("No MaskingViews were found. Deleting zone.") data = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': zoning_mappings['target_wwns'], 'initiator_target_map': zoning_mappings['init_targ_map']}} LOG.debug("Return FC data for zone removal: %(data)s.", {'data': data}) return data def _build_initiator_target_map(self, volume, connector, device_info=None): """Build the target_wwns and the initiator target map. :param volume: the cinder volume object :param connector: the connector object :param device_info: device_info :returns: target_wwns -- list, init_targ_map -- dict """ target_wwns, init_targ_map = [], {} initiator_wwns = connector['wwpns'] fc_targets, metro_fc_targets = ( self.common.get_target_wwns_from_masking_view( volume, connector)) # If load balance is enabled we want to select only the FC target that # has the lowest load of all ports in selected port group. # Note: device_info in if condition as this method is called also for # terminate connection, we only want to calculate load on initialise # connection. if device_info and self.performance.config.get('load_balance'): try: array_id = device_info.get('array') masking_view = device_info.get('maskingview') # Get PG from MV port_group = self.rest.get_element_from_masking_view( array_id, masking_view, portgroup=True) # Get port list from PG port_list = self.rest.get_port_ids(array_id, port_group) # Get lowest load port in PG load, metric, port = self.performance.process_port_load( array_id, port_list) LOG.info("Lowest %(met)s load port is %(port)s: %(load)s", {'met': metric, 'port': port, 'load': load}) # Get target WWN port_details = self.rest.get_port(array_id, port) port_info = port_details.get('symmetrixPort') port_wwn = port_info.get('identifier') LOG.info("Port %(p)s WWN: %(wwn)s", {'p': port, 'wwn': port_wwn}) # Set lowest load port WWN as FC target for connection fc_targets = [port_wwn] except exception.VolumeBackendAPIException: LOG.error("There was an error calculating port load, " "reverting to default target selection.") fc_targets, __ = ( self.common.get_target_wwns_from_masking_view( volume, connector)) if self.zonemanager_lookup_service: fc_targets.extend(metro_fc_targets) mapping = ( self.zonemanager_lookup_service. get_device_mapping_from_network(initiator_wwns, fc_targets)) for entry in mapping: map_d = mapping[entry] target_wwns.extend(map_d['target_port_wwn_list']) for initiator in map_d['initiator_port_wwn_list']: init_targ_map[initiator] = map_d['target_port_wwn_list'] else: # No lookup service, pre-zoned case. target_wwns = fc_targets fc_targets.extend(metro_fc_targets) for initiator in initiator_wwns: init_targ_map[initiator] = fc_targets return list(set(target_wwns)), init_targ_map def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: the cinder volume object :param new_size: the required new size """ self.common.extend_volume(volume, new_size) def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats") data = self.common.update_volume_stats() data['storage_protocol'] = constants.FC data['driver_version'] = self.VERSION self._stats = data def manage_existing(self, volume, external_ref): """Manages an existing PowerMax/VMAX Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. :param volume: the volume object :param external_ref: the reference for the PowerMax/VMAX volume :returns: model_update """ return self.common.manage_existing(volume, external_ref) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing PowerMax/VMAX volume to manage_existing. :param self: reference to class :param volume: the volume object including the volume_type_id :param external_ref: reference to the existing volume :returns: size of the volume in GB """ return self.common.manage_existing_get_size(volume, external_ref) def unmanage(self, volume): """Export PowerMax/VMAX volume from Cinder. Leave the volume intact on the backend array. """ return self.common.unmanage(volume) def manage_existing_snapshot(self, snapshot, existing_ref): """Manage an existing PowerMax/VMAX Snapshot (import to Cinder). Renames the Snapshot to prefix it with OS- to indicate it is managed by Cinder. :param snapshot: the snapshot object :param existing_ref: the snapshot name on the backend PowerMax/VMAX :returns: model_update """ return self.common.manage_existing_snapshot(snapshot, existing_ref) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return the size of the source volume for manage-existing-snapshot. :param snapshot: the snapshot object :param existing_ref: the snapshot name on the backend PowerMax/VMAX :returns: size of the source volume in GB """ return self.common.manage_existing_snapshot_get_size(snapshot) def unmanage_snapshot(self, snapshot): """Export PowerMax/VMAX Snapshot from Cinder. Leaves the snapshot intact on the backend PowerMax/VMAX. :param snapshot: the snapshot object """ self.common.unmanage_snapshot(snapshot) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """Lists all manageable volumes. :param cinder_volumes: List of currently managed Cinder volumes. Unused in driver. :param marker: Begin returning volumes that appear later in the volume list than that represented by this reference. :param limit: Maximum number of volumes to return. Default=1000. :param offset: Number of volumes to skip after marker. :param sort_keys: Results sort key. Valid keys: size, reference. :param sort_dirs: Results sort direction. Valid dirs: asc, desc. :returns: List of dicts containing all manageable volumes. """ return self.common.get_manageable_volumes(marker, limit, offset, sort_keys, sort_dirs) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """Lists all manageable snapshots. :param cinder_snapshots: List of currently managed Cinder snapshots. Unused in driver. :param marker: Begin returning volumes that appear later in the snapshot list than that represented by this reference. :param limit: Maximum number of snapshots to return. Default=1000. :param offset: Number of snapshots to skip after marker. :param sort_keys: Results sort key. Valid keys: size, reference. :param sort_dirs: Results sort direction. Valid dirs: asc, desc. :returns: List of dicts containing all manageable snapshots. """ return self.common.get_manageable_snapshots(marker, limit, offset, sort_keys, sort_dirs) def retype(self, ctxt, volume, new_type, diff, host): """Migrate volume to another host using retype. :param ctxt: context :param volume: the volume object including the volume_type_id :param new_type: the new volume type. :param diff: difference between old and new volume types. Unused in driver. :param host: the host dict holding the relevant target(destination) information :returns: boolean -- True if retype succeeded, False if error """ return self.common.retype(volume, new_type, host) def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover volumes to a secondary host/ backend. :param context: the context :param volumes: the list of volumes to be failed over :param secondary_id: the backend to be failed over to, is 'default' if fail back :param groups: replication groups :returns: secondary_id, volume_update_list, group_update_list """ active_backend_id, volume_update_list, group_update_list = ( self.common.failover(volumes, secondary_id, groups)) self.common.failover_completed(secondary_id, False) return active_backend_id, volume_update_list, group_update_list def failover(self, context, volumes, secondary_id=None, groups=None): """Like failover but for a host that is clustered.""" return self.common.failover(volumes, secondary_id, groups) def failover_completed(self, context, active_backend_id=None): """This method is called after failover for clustered backends.""" return self.common.failover_completed(active_backend_id, True) def create_group(self, context, group): """Creates a generic volume group. :param context: the context :param group: the group object :returns: model_update """ return self.common.create_group(context, group) def delete_group(self, context, group, volumes): """Deletes a generic volume group. :param context: the context :param group: the group object :param volumes: the member volumes """ return self.common.delete_group( context, group, volumes) def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group snapshot. :param context: the context :param group_snapshot: the grouop snapshot :param snapshots: snapshots list """ return self.common.create_group_snapshot(context, group_snapshot, snapshots) def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group snapshot. :param context: the context :param group_snapshot: the grouop snapshot :param snapshots: snapshots list """ return self.common.delete_group_snapshot(context, group_snapshot, snapshots) def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates LUNs in generic volume group. :param context: the context :param group: the group object :param add_volumes: flag for adding volumes :param remove_volumes: flag for removing volumes """ return self.common.update_group(group, add_volumes, remove_volumes) def create_group_from_src( self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates the volume group from source. :param context: the context :param group: the group object to be created :param volumes: volumes in the group :param group_snapshot: the source volume group snapshot :param snapshots: snapshots of the source volumes :param source_group: the dictionary of a volume group as source. :param source_vols: a list of volume dictionaries in the source_group. """ return self.common.create_group_from_src( context, group, volumes, group_snapshot, snapshots, source_group, source_vols) def enable_replication(self, context, group, volumes): """Enable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ return self.common.enable_replication(context, group, volumes) def disable_replication(self, context, group, volumes): """Disable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ return self.common.disable_replication(context, group, volumes) def failover_replication(self, context, group, volumes, secondary_backend_id=None): """Failover replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :param secondary_backend_id: the secondary backend id - default None :returns: model_update, vol_model_updates """ return self.common.failover_replication( context, group, volumes, secondary_backend_id) def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot :param context: the context :param volume: the cinder volume object :param snapshot: the cinder snapshot object """ self.common.revert_to_snapshot(volume, snapshot) @classmethod def clean_volume_file_locks(cls, volume_id): coordination.synchronized_remove(f'{cls.driver_prefix}-{volume_id}') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/iscsi.py0000664000175000017500000007245300000000000024425 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ ISCSI Drivers for Dell EMC PowerMax/PowerMax/VMAX arrays based on REST. """ import random from oslo_log import log as logging from oslo_utils import strutils from cinder.common import constants from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume.drivers.dell_emc.powermax import common from cinder.volume.drivers.san import san LOG = logging.getLogger(__name__) @interface.volumedriver class PowerMaxISCSIDriver(san.SanISCSIDriver): """ISCSI Drivers for PowerMax using Rest. Version history: .. code-block:: none 1.0.0 - Initial driver 1.1.0 - Multiple pools and thick/thin provisioning, performance enhancement. 2.0.0 - Add driver requirement functions 2.1.0 - Add consistency group functions 2.1.1 - Fixed issue with mismatched config (bug #1442376) 2.1.2 - Clean up failed clones (bug #1440154) 2.1.3 - Fixed a problem with FAST support (bug #1435069) 2.2.0 - Add manage/unmanage 2.2.1 - Support for SE 8.0.3 2.2.2 - Update Consistency Group 2.2.3 - Pool aware scheduler(multi-pool) support 2.2.4 - Create CG from CG snapshot 2.3.0 - Name change for MV and SG for FAST (bug #1515181) - Fix for randomly choosing port group. (bug #1501919) - get_short_host_name needs to be called in find_device_number (bug #1520635) - Proper error handling for invalid SLOs (bug #1512795) - Extend Volume for VMAX3, SE8.1.0.3 https://blueprints.launchpad.net/cinder/+spec/vmax3-extend-volume - Incorrect SG selected on an attach (#1515176) - Cleanup Zoning (bug #1501938) NOTE: FC only - Last volume in SG fix - _remove_last_vol_and_delete_sg is not being called for VMAX3 (bug #1520549) - necessary updates for CG changes (#1534616) - Changing PercentSynced to CopyState (bug #1517103) - Getting iscsi ip from port in existing masking view - Replacement of EMCGetTargetEndpoints api (bug #1512791) - VMAX3 snapvx improvements (bug #1522821) - Operations and timeout issues (bug #1538214) 2.4.0 - EMC VMAX - locking SG for concurrent threads (bug #1554634) - SnapVX licensing checks for VMAX3 (bug #1587017) - VMAX oversubscription Support (blueprint vmax-oversubscription) - QoS support (blueprint vmax-qos) - VMAX2/VMAX3 iscsi multipath support (iscsi only) https://blueprints.launchpad.net/cinder/+spec/vmax-iscsi-multipath 2.5.0 - Attach and detach snapshot (blueprint vmax-attach-snapshot) - MVs and SGs not reflecting correct protocol (bug #1640222) - Storage assisted volume migration via retype (bp vmax-volume-migration) - Support for compression on All Flash - Volume replication 2.1 (bp add-vmax-replication) - rename and restructure driver (bp vmax-rename-dell-emc) 3.0.0 - REST based driver - Retype (storage-assisted migration) - QoS support - Support for compression on All Flash - Support for volume replication - Support for live migration - Support for Generic Volume Group 3.1.0 - Support for replication groups (Tiramisu) - Deprecate backend xml configuration - Support for async replication (vmax-replication-enhancements) - Support for SRDF/Metro (vmax-replication-enhancements) - Support for manage/unmanage snapshots (vmax-manage-unmanage-snapshot) - Support for revert to volume snapshot 3.2.0 - Support for retyping replicated volumes (bp vmax-retype-replicated-volumes) - Support for multiattach volumes (bp vmax-allow-multi-attach) - Support for list manageable volumes and snapshots (bp/vmax-list-manage-existing) - Fix for SSL verification/cert application (bug #1772924) - Log VMAX metadata of a volume (bp vmax-metadata) - Fix for get-pools command (bug #1784856) 4.0.0 - Fix for initiator retrieval and short hostname unmapping (bugs #1783855 #1783867) - Fix for HyperMax OS Upgrade Bug (bug #1790141) - Support for failover to secondary Unisphere (bp/vmax-unisphere-failover) - Rebrand from VMAX to PowerMax(bp/vmax-powermax-rebrand) - Change from 84 to 90 REST endpoints (bug #1808539) - Fix for PowerMax OS replication settings (bug #1812685) - Support for storage-assisted in-use retype (bp/powermax-storage-assisted-inuse-retype) 4.1.0 - Changing from 90 to 91 rest endpoints - Support for Rapid TDEV Delete (bp powermax-tdev-deallocation) - PowerMax OS Metro formatted volumes fix (bug #1829876) - Support for Metro ODE (bp/powermax-metro-ode) - Removal of san_rest_port from PowerMax cinder.conf config - SnapVX noCopy mode enabled for all links - Volume/Snapshot backed metadata inclusion - Debug metadata compression and service level info fix 4.2.0 - Support of Unisphere storage group and array tags - User defined override for short host name and port group name (bp powermax-user-defined-hostname-portgroup) - Switch to Unisphere REST API public replication endpoints - Support for multiple replication devices - Pools bug fix allowing 'None' variants (bug #1873253) 4.3.0 - Changing from 91 to 92 REST endpoints - Support for Port Group and Port load balancing (bp powermax-port-load-balance) - Fix to enable legacy volumes to live migrate (#1867163) - Use of snap id instead of generation (bp powermax-snapset-ids) - Support for Failover Abilities (bp/powermax-failover-abilities) 4.4.0 - Early check for status of port 4.4.1 - Report trim/discard support 4.5.0 - Add PowerMax v4 support 4.5.1 - Add active/active compliance 4.5.2 - Add 'disable_protected_snap' option """ VERSION = "4.5.2" SUPPORTS_ACTIVE_ACTIVE = True # ThirdPartySystems wiki CI_WIKI_NAME = "DellEMC_PowerMAX_CI" driver_prefix = 'powermax' def __init__(self, *args, **kwargs): super(PowerMaxISCSIDriver, self).__init__(*args, **kwargs) self.active_backend_id = kwargs.get('active_backend_id', None) self.common = ( common.PowerMaxCommon( 'iSCSI', self.VERSION, configuration=self.configuration, active_backend_id=self.active_backend_id)) self.performance = self.common.performance @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'driver_ssl_cert_verify', 'max_over_subscription_ratio', 'reserved_percentage', 'replication_device', 'use_chap_auth', 'chap_username', 'chap_password') return common.powermax_opts + additional_opts def check_for_setup_error(self): pass def _init_vendor_properties(self): return self.common.get_vendor_properties(self) def create_volume(self, volume): """Creates a PowerMax/VMAX volume. :param volume: the cinder volume object :returns: provider location dict """ return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. :param volume: the cinder volume object :param snapshot: the cinder snapshot object :returns: provider location dict """ return self.common.create_volume_from_snapshot( volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume. :param volume: the cinder volume object :param src_vref: the source volume reference :returns: provider location dict """ return self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): """Deletes a PowerMax/VMAX volume. :param volume: the cinder volume object """ self.common.delete_volume(volume) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: the cinder snapshot object :returns: provider location dict """ src_volume = snapshot.volume return self.common.create_snapshot(snapshot, src_volume) def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: the cinder snapshot object """ src_volume = snapshot.volume self.common.delete_snapshot(snapshot, src_volume) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume. :param context: the context :param volume: the cinder volume object """ pass def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume. :param context: the context :param volume: the cinder volume object :param connector: the connector object """ pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume. :param context: the context :param volume: the cinder volume object """ pass @staticmethod def check_for_export(context, volume_id): """Make sure volume is exported. :param context: the context :param volume_id: the volume id """ @coordination.synchronized('{self.driver_prefix}-{volume.id}') def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The iscsi driver returns a driver_volume_type of 'iscsi'. the format of the driver data is defined in smis_get_iscsi_properties. Example return value: .. code-block:: default { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': '12345678-1234-4321-1234-123456789012' } } Example return value (multipath is enabled): .. code-block:: default { 'driver_volume_type': 'iscsi', 'data': { 'target_discovered': True, 'target_iqns': ['iqn.2010-10.org.openstack:volume-00001', 'iqn.2010-10.org.openstack:volume-00002'], 'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'], 'target_luns': [1, 1] } } :param volume: the cinder volume object :param connector: the connector object :returns: dict -- the iscsi dict """ device_info = self.common.initialize_connection( volume, connector) if device_info: return self.get_iscsi_dict(device_info, volume) else: return {} def get_iscsi_dict(self, device_info, volume): """Populate iscsi dict to pass to nova. :param device_info: device info dict :param volume: volume object :returns: iscsi dict """ metro_ip_iqn, metro_host_lun = None, None try: array_id = device_info['array'] ip_and_iqn = device_info['ip_and_iqn'] is_multipath = device_info['is_multipath'] host_lun_id = device_info['hostlunid'] except KeyError as e: exception_message = (_("Cannot get iSCSI ipaddresses, multipath " "flag, or hostlunid. Exception is %(e)s.") % {'e': str(e)}) raise exception.VolumeBackendAPIException( message=exception_message) if device_info.get('metro_ip_and_iqn'): LOG.debug("Volume is Metro device...") metro_ip_iqn = device_info['metro_ip_and_iqn'] metro_host_lun = device_info['metro_hostlunid'] iscsi_properties = self.vmax_get_iscsi_properties( array_id, volume, ip_and_iqn, is_multipath, host_lun_id, metro_ip_iqn, metro_host_lun) LOG.info("iSCSI properties are: %(props)s", {'props': strutils.mask_dict_password(iscsi_properties)}) LOG.info("ISCSI volume is: %(volume)s.", {'volume': volume}) return {'driver_volume_type': 'iscsi', 'data': iscsi_properties} def vmax_get_iscsi_properties( self, array_id, volume, ip_and_iqn, is_multipath, host_lun_id, metro_ip_iqn, metro_host_lun): """Gets iscsi configuration. We ideally get saved information in the volume entity, but fall back to discovery if need be. Discovery may be completely removed in future The properties are: :target_discovered: boolean indicating whether discovery was used :target_iqn: the IQN of the iSCSI target :target_portal: the portal of the iSCSI target :target_lun: the lun of the iSCSI target :volume_id: the UUID of the volume :auth_method:, :auth_username:, :auth_password: the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. :param array_id: the array serial number :param volume: the cinder volume object :param ip_and_iqn: list of ip and iqn dicts :param is_multipath: flag for multipath :param host_lun_id: the host lun id of the device :param metro_ip_iqn: metro remote device ip and iqn, if applicable :param metro_host_lun: metro remote host lun, if applicable :returns: properties """ properties = {} populate_plurals = False tgt_iqn, tgt_portal = None, None if len(ip_and_iqn) > 1 and is_multipath: populate_plurals = True elif len(ip_and_iqn) == 1 and is_multipath and metro_ip_iqn: populate_plurals = True if populate_plurals: properties['target_portals'] = ([t['ip'] + ":3260" for t in ip_and_iqn]) properties['target_iqns'] = ([t['iqn'].split(",")[0] for t in ip_and_iqn]) properties['target_luns'] = [host_lun_id] * len(ip_and_iqn) if metro_ip_iqn: LOG.info("Volume %(vol)s is metro-enabled - " "adding additional attachment information", {'vol': volume.name}) properties['target_portals'].extend(([t['ip'] + ":3260" for t in metro_ip_iqn])) properties['target_iqns'].extend(([t['iqn'].split(",")[0] for t in metro_ip_iqn])) properties['target_luns'].extend( [metro_host_lun] * len(metro_ip_iqn)) # If load balancing is enabled select target IQN and IP address of # lowest load physical port from all ports in selected port group if self.performance.config.get('load_balance', False): try: # Get the dir/ports and create a new mapped dict tgt_map = {} for tgt in ip_and_iqn: tgt_map.update({ tgt['physical_port']: {'ip': tgt['ip'], 'iqn': tgt['iqn']}}) # Calculate load for the ports load, metric, port = self.performance.process_port_load( array_id, tgt_map.keys()) # Get the lowest load port from mapping in step 1 low_port_map = tgt_map.get(port) LOG.info("Selecting port %(port)s with %(met)s load %(load)s.", {'port': port, 'met': metric, 'load': load}) # Set the target IQN and portal tgt_iqn = low_port_map['iqn'] tgt_portal = low_port_map['ip'] + ":3260" except (KeyError, exception.VolumeBackendAPIException): LOG.error("There was an error calculating port load, " "reverting to default port selection.") # If load balancing is not enabled or if there has been a problem # calculating the load, revert to default random IP/IQN selection if not tgt_iqn or not tgt_portal: port_idx = random.randint(0, len(ip_and_iqn) - 1) tgt_iqn = ip_and_iqn[port_idx]['iqn'] tgt_portal = ip_and_iqn[port_idx]['ip'] + ":3260" properties['target_iqn'] = tgt_iqn properties['target_portal'] = tgt_portal properties['target_discovered'] = True properties['target_lun'] = host_lun_id properties['volume_id'] = volume.id properties['discard'] = True if self.configuration.safe_get('use_chap_auth'): LOG.info("Chap authentication enabled.") properties['auth_method'] = 'CHAP' properties['auth_username'] = self.configuration.safe_get( 'chap_username') properties['auth_password'] = self.configuration.safe_get( 'chap_password') return properties @coordination.synchronized('{self.driver_prefix}-{volume.id}') def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector. :param volume: the volume object :param connector: the connector object """ self.common.terminate_connection(volume, connector) def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: the cinder volume object :param new_size: the required new size """ self.common.extend_volume(volume, new_size) def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats") data = self.common.update_volume_stats() data['storage_protocol'] = constants.ISCSI data['driver_version'] = self.VERSION self._stats = data def manage_existing(self, volume, existing_ref): """Manages an existing PowerMax/VMAX Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. """ return self.common.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Return size of an existing PowerMax/VMAX volume to manage_existing. :param self: reference to class :param volume: the volume object including the volume_type_id :param external_ref: reference to the existing volume :returns: size of the volume in GB """ return self.common.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): """Export PowerMax/VMAX volume from Cinder. Leave the volume intact on the backend array. """ return self.common.unmanage(volume) def manage_existing_snapshot(self, snapshot, existing_ref): """Manage an existing PowerMax/VMAX Snapshot (import to Cinder). Renames the Snapshot to prefix it with OS- to indicate it is managed by Cinder. :param snapshot: the snapshot object :param existing_ref: the snapshot name on the backend PowerMax/VMAX :returns: model_update """ return self.common.manage_existing_snapshot(snapshot, existing_ref) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return the size of the source volume for manage-existing-snapshot. :param snapshot: the snapshot object :param existing_ref: the snapshot name on the backend PowerMax/VMAX :returns: size of the source volume in GB """ return self.common.manage_existing_snapshot_get_size(snapshot) def unmanage_snapshot(self, snapshot): """Export PowerMax/VMAX Snapshot from Cinder. Leaves the snapshot intact on the backend PowerMax/VMAX. :param snapshot: the snapshot object """ self.common.unmanage_snapshot(snapshot) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """Lists all manageable volumes. :param cinder_volumes: List of currently managed Cinder volumes. Unused in driver. :param marker: Begin returning volumes that appear later in the volume list than that represented by this reference. :param limit: Maximum number of volumes to return. Default=1000. :param offset: Number of volumes to skip after marker. :param sort_keys: Results sort key. Valid keys: size, reference. :param sort_dirs: Results sort direction. Valid dirs: asc, desc. :returns: List of dicts containing all manageable volumes. """ return self.common.get_manageable_volumes(marker, limit, offset, sort_keys, sort_dirs) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """Lists all manageable snapshots. :param cinder_snapshots: List of currently managed Cinder snapshots. Unused in driver. :param marker: Begin returning volumes that appear later in the snapshot list than that represented by this reference. :param limit: Maximum number of snapshots to return. Default=1000. :param offset: Number of snapshots to skip after marker. :param sort_keys: Results sort key. Valid keys: size, reference. :param sort_dirs: Results sort direction. Valid dirs: asc, desc. :returns: List of dicts containing all manageable snapshots. """ return self.common.get_manageable_snapshots(marker, limit, offset, sort_keys, sort_dirs) def retype(self, context, volume, new_type, diff, host): """Migrate volume to another host using retype. :param context: context :param volume: the volume object including the volume_type_id :param new_type: the new volume type. :param diff: difference between old and new volume types. Unused in driver. :param host: the host dict holding the relevant target(destination) information :returns: boolean -- True if retype succeeded, False if error """ return self.common.retype(volume, new_type, host) def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover volumes to a secondary host/ backend. :param context: the context :param volumes: the list of volumes to be failed over :param secondary_id: the backend to be failed over to, is 'default' if fail back :param groups: replication groups :returns: secondary_id, volume_update_list, group_update_list """ active_backend_id, volume_update_list, group_update_list = ( self.common.failover(volumes, secondary_id, groups)) self.common.failover_completed(secondary_id, False) return active_backend_id, volume_update_list, group_update_list def failover(self, context, volumes, secondary_id=None, groups=None): """Like failover but for a host that is clustered.""" return self.common.failover(volumes, secondary_id, groups) def failover_completed(self, context, active_backend_id=None): """This method is called after failover for clustered backends.""" return self.common.failover_completed(active_backend_id, True) def create_group(self, context, group): """Creates a generic volume group. :param context: the context :param group: the group object :returns: model_update """ return self.common.create_group(context, group) def delete_group(self, context, group, volumes): """Deletes a generic volume group. :param context: the context :param group: the group object :param volumes: the member volumes """ return self.common.delete_group( context, group, volumes) def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group snapshot. :param context: the context :param group_snapshot: the group snapshot :param snapshots: snapshots list """ return self.common.create_group_snapshot(context, group_snapshot, snapshots) def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group snapshot. :param context: the context :param group_snapshot: the grouop snapshot :param snapshots: snapshots list """ return self.common.delete_group_snapshot(context, group_snapshot, snapshots) def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates LUNs in group. :param context: the context :param group: the group object :param add_volumes: flag for adding volumes :param remove_volumes: flag for removing volumes """ return self.common.update_group(group, add_volumes, remove_volumes) def create_group_from_src( self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates the volume group from source. :param context: the context :param group: the consistency group object to be created :param volumes: volumes in the group :param group_snapshot: the source volume group snapshot :param snapshots: snapshots of the source volumes :param source_group: the dictionary of a volume group as source. :param source_vols: a list of volume dictionaries in the source_group. """ return self.common.create_group_from_src( context, group, volumes, group_snapshot, snapshots, source_group, source_vols) def enable_replication(self, context, group, volumes): """Enable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ return self.common.enable_replication(context, group, volumes) def disable_replication(self, context, group, volumes): """Disable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ return self.common.disable_replication(context, group, volumes) def failover_replication(self, context, group, volumes, secondary_backend_id=None): """Failover replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :param secondary_backend_id: the secondary backend id - default None :returns: model_update, vol_model_updates """ return self.common.failover_replication( context, group, volumes, secondary_backend_id) def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot :param context: the context :param volume: the cinder volume object :param snapshot: the cinder snapshot object """ self.common.revert_to_snapshot(volume, snapshot) @classmethod def clean_volume_file_locks(cls, volume_id): coordination.synchronized_remove(f'{cls.driver_prefix}-{volume_id}') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/masking.py0000664000175000017500000032167300000000000024745 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast from copy import deepcopy import re import sys import time from oslo_log import log as logging from cinder.common import constants as cinder_constants from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.dell_emc.powermax import provision from cinder.volume.drivers.dell_emc.powermax import utils from cinder.volume import volume_utils LOG = logging.getLogger(__name__) CREATE_IG_ERROR = "is already in use in another Initiator Group" class PowerMaxMasking(object): """Masking class for Dell EMC PowerMax. Masking code to dynamically create a masking view. It supports VMAX 3, All Flash and PowerMax arrays. """ def __init__(self, prtcl, rest, configuration): self.protocol = prtcl self.utils = utils.PowerMaxUtils() self.rest = rest self.provision = provision.PowerMaxProvision(self.rest, configuration) def setup_masking_view( self, serial_number, volume, masking_view_dict, extra_specs): @coordination.synchronized("emc-mv-{maskingview_name}-{serial_number}") def do_get_or_create_masking_view_and_map_lun( maskingview_name, serial_number): return self.get_or_create_masking_view_and_map_lun( serial_number, volume, maskingview_name, masking_view_dict, extra_specs) return do_get_or_create_masking_view_and_map_lun( masking_view_dict[utils.MV_NAME], serial_number) def get_or_create_masking_view_and_map_lun( self, serial_number, volume, maskingview_name, masking_view_dict, extra_specs): """Get or Create a masking view and add a volume to the storage group. Given a masking view dict either get or create a masking view and add the volume to the associated storage group. :param serial_number: the array serial number :param volume: the volume object :param maskingview_name: the masking view name :param masking_view_dict: the masking view dict :param extra_specs: the extra specifications :returns: rollback_dict :raises: VolumeBackendAPIException """ storagegroup_name = masking_view_dict[utils.SG_NAME] volume_name = masking_view_dict[utils.VOL_NAME] masking_view_dict[utils.EXTRA_SPECS] = extra_specs device_id = masking_view_dict[utils.DEVICE_ID] rep_mode = extra_specs.get(utils.REP_MODE, None) default_sg_name = self.utils.get_default_storage_group_name( masking_view_dict[utils.SRP], masking_view_dict[utils.SLO], masking_view_dict[utils.WORKLOAD], masking_view_dict[utils.DISABLECOMPRESSION], masking_view_dict[utils.IS_RE], rep_mode) rollback_dict = masking_view_dict try: error_message = self._get_or_create_masking_view( serial_number, masking_view_dict, default_sg_name, extra_specs) # Check that the device is in the correct storage group if not self._validate_attach( serial_number, device_id, storagegroup_name, maskingview_name): error_message = ("The attach validation for device " "%(dev)s was unsuccessful.") raise exception.VolumeBackendAPIException( message=error_message) rollback_dict['portgroup_name'] = ( self.rest.get_element_from_masking_view( serial_number, maskingview_name, portgroup=True)) except Exception as e: LOG.exception( "Masking View creation or retrieval was not successful " "for masking view %(maskingview_name)s. " "Attempting rollback.", {'maskingview_name': masking_view_dict[utils.MV_NAME]}) error_message = str(e) rollback_dict['default_sg_name'] = default_sg_name if error_message: # Rollback code if we cannot complete any of the steps above # successfully then we must roll back by adding the volume back to # the default storage group for that slo/workload combination. self.check_if_rollback_action_for_masking_required( serial_number, volume, device_id, rollback_dict) exception_message = (_( "Failed to get, create or add volume %(volumeName)s " "to masking view %(maskingview_name)s. " "The error message received was %(errorMessage)s.") % {'maskingview_name': maskingview_name, 'volumeName': volume_name, 'errorMessage': error_message}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return rollback_dict def _validate_attach( self, serial_number, device_id, dest_sg_name, dest_mv_name): """Validate that the attach was successful. :param serial_number: the array serial number :param device_id: the device id :param dest_sg_name: the correct storage group :param dest_mv_name: the correct masking view :returns: bool """ sg_list = self.rest.get_storage_groups_from_volume( serial_number, device_id) def _validate_masking_view(): mv_list = self.rest.get_masking_views_from_storage_group( serial_number, dest_sg_name) if not mv_list: LOG.error( "The masking view list of %(sg_name)s where device " "%(dev)s exists is empty.", {'sg_name': dest_sg_name, 'dev': device_id}) return False if dest_mv_name.lower() in ( mv_name.lower() for mv_name in mv_list): LOG.debug( "The masking view in the attach operation is " "%(masking_name)s. The storage group " "in the masking view is %(storage_name)s. " "The device id is %(dev)s.", {'masking_name': dest_mv_name, 'storage_name': dest_sg_name, 'dev': device_id}) return True else: LOG.error( "The masking view %(masking_name)s is not in " "the masking view list %(mv_list)s. " "%(sg_name)s is the storage group where device " "%(dev)s exists.", {'masking_name': dest_mv_name, 'mv_list': mv_list, 'sg_name': dest_sg_name, 'dev': device_id}) return False if not sg_list: LOG.error( "Device %(dev)s does not exist in any storage group.", {'dev': device_id}) return False if dest_sg_name.lower() in ( sg_name.lower() for sg_name in sg_list): return _validate_masking_view() else: LOG.error( "The storage group %(sg_name)s is not in " "the storage group list %(sg_list)s " "where device %(dev)s exists.", {'sg_name': dest_sg_name, 'sg_list': sg_list, 'dev': device_id}) return False def _move_vol_from_default_sg( self, serial_number, device_id, volume_name, default_sg_name, dest_storagegroup, extra_specs, parent_sg_name=None): """Get the default storage group and move the volume. :param serial_number: the array serial number :param device_id: the device id :param volume_name: the volume name :param default_sg_name: the name of the default sg :param dest_storagegroup: the destination storage group :param extra_specs: the extra specifications :param parent_sg_name: optional parent storage group name :returns: msg """ msg = None check_vol = self.rest.is_volume_in_storagegroup( serial_number, device_id, default_sg_name) if check_vol: try: self.move_volume_between_storage_groups( serial_number, device_id, default_sg_name, dest_storagegroup, extra_specs, parent_sg=parent_sg_name) except Exception as e: msg = ("Exception while moving volume from the default " "storage group to %(sg)s. Exception received was " "%(e)s") LOG.error(msg, {'sg': dest_storagegroup, 'e': e}) else: LOG.warning( "Volume: %(volume_name)s does not belong " "to default storage group %(default_sg_name)s.", {'volume_name': volume_name, 'default_sg_name': default_sg_name}) msg = self._check_adding_volume_to_storage_group( serial_number, device_id, dest_storagegroup, volume_name, extra_specs) return msg def _get_or_create_masking_view(self, serial_number, masking_view_dict, default_sg_name, extra_specs): """Retrieve an existing masking view or create a new one. :param serial_number: the array serial number :param masking_view_dict: the masking view dict :param default_sg_name: the name of the default sg :param extra_specs: the extra specifications :returns: error message """ maskingview_name = masking_view_dict[utils.MV_NAME] masking_view_details = self.rest.get_masking_view( serial_number, masking_view_name=maskingview_name) if not masking_view_details: self._sanity_port_group_check( masking_view_dict[utils.PORTGROUPNAME], serial_number) error_message = self._create_new_masking_view( serial_number, masking_view_dict, maskingview_name, default_sg_name, extra_specs) else: storagegroup_name, error_message = ( self._validate_existing_masking_view( serial_number, masking_view_dict, maskingview_name, default_sg_name, extra_specs)) return error_message def _sanity_port_group_check(self, port_group_name, serial_number): """Check if the port group exists :param port_group_name: the port group name (can be None) :param serial_number: the array serial number """ exc_msg = None if port_group_name: portgroup = self.rest.get_portgroup( serial_number, port_group_name) if not portgroup: exc_msg = ("Failed to get portgroup %(pg)s." % {'pg': port_group_name}) else: self._check_director_and_port_status( serial_number, port_group_name) else: exc_msg = "Port group cannot be left empty." if exc_msg: exception_message = (_( "%(exc_msg)s You must supply a valid pre-created " "port group in cinder.conf or as an extra spec.") % {'exc_msg': exc_msg}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) def _check_director_and_port_status(self, serial_number, port_group_name): """Check the status of the director and port. :param serial_number: the array serial number :param port_group_name: the port group name (can be None) """ exc_msg = None port_id_list = self.rest.get_port_ids(serial_number, port_group_name) if not port_id_list: exc_msg = ("Unable to get ports from portgroup %(pgn)s " % {'pgn': port_group_name}) for port in port_id_list: port_info = self.rest.get_port(serial_number, port) if port_info: # Check that the director and port are online port_details = port_info.get("symmetrixPort") if port_details: director_status = port_details.get('director_status') port_status = port_details.get('port_status') if not director_status or not port_status: exc_msg = ("Unable to get the director or port status " "for dir:port %(port)s." % {'port': port}) elif not (director_status.lower() == 'online' and ( port_status.lower() == 'on')): exc_msg = ("The director status is %(ds)s and the " "port status is %(ps)s for dir:port " "%(port)s." % {'ds': director_status, 'ps': port_status, 'port': port}) else: exc_msg = ("Unable to get port details for dir:port " "%(port)s." % {'port': port}) else: exc_msg = ("Unable to get port information for dir:port " "%(port)s." % {'port': port}) if exc_msg: LOG.error(exc_msg) raise exception.VolumeBackendAPIException( message=exc_msg) def _create_new_masking_view( self, serial_number, masking_view_dict, maskingview_name, default_sg_name, extra_specs): """Create a new masking view. :param serial_number: the array serial number :param masking_view_dict: the masking view dict :param maskingview_name: the masking view name :param default_sg_name: the name of the default sg :param extra_specs: the extra specifications :returns: error_message """ init_group_name = masking_view_dict[utils.IG_NAME] parent_sg_name = masking_view_dict[utils.PARENT_SG_NAME] storagegroup_name = masking_view_dict[utils.SG_NAME] connector = masking_view_dict[utils.CONNECTOR] port_group_name = masking_view_dict[utils.PORTGROUPNAME] LOG.info("Port Group in masking view operation: %(pg_name)s. " "The port group labels is %(pg_label)s.", {'pg_name': masking_view_dict[utils.PORTGROUPNAME], 'pg_label': masking_view_dict[utils.PORT_GROUP_LABEL]}) init_group_name, error_message = (self._get_or_create_initiator_group( serial_number, init_group_name, connector, extra_specs)) if error_message: return error_message # get or create parent sg error_message = self._get_or_create_storage_group( serial_number, masking_view_dict, parent_sg_name, extra_specs, parent=True) if error_message: return error_message # get or create child sg error_message = self._get_or_create_storage_group( serial_number, masking_view_dict, storagegroup_name, extra_specs) if error_message: return error_message # Only after the components of the MV have been validated, # move the volume from the default storage group to the # masking view storage group. This is necessary before # creating a new masking view. error_message = self._move_vol_from_default_sg( serial_number, masking_view_dict[utils.DEVICE_ID], masking_view_dict[utils.VOL_NAME], default_sg_name, storagegroup_name, extra_specs, parent_sg_name=parent_sg_name) if error_message: return error_message error_message = self._check_add_child_sg_to_parent_sg( serial_number, storagegroup_name, parent_sg_name, masking_view_dict[utils.EXTRA_SPECS]) if error_message: return error_message error_message = (self.create_masking_view( serial_number, maskingview_name, parent_sg_name, port_group_name, init_group_name, extra_specs)) return error_message def _validate_existing_masking_view( self, serial_number, masking_view_dict, maskingview_name, default_sg_name, extra_specs): """Validate the components of an existing masking view. :param serial_number: the array serial number :param masking_view_dict: the masking view dict :param maskingview_name: the amsking view name :param default_sg_name: the default sg name :param extra_specs: the extra specifications :returns: storage_group_name -- string, msg -- string """ storage_group_name, msg = self._check_existing_storage_group( serial_number, maskingview_name, default_sg_name, masking_view_dict, extra_specs) if not msg: portgroup_name = self.rest.get_element_from_masking_view( serial_number, maskingview_name, portgroup=True) __, msg = self._check_port_group( serial_number, portgroup_name) if not msg: initiator_group, msg = self._check_existing_initiator_group( serial_number, maskingview_name, masking_view_dict, storage_group_name, portgroup_name, extra_specs) return storage_group_name, msg def _check_add_child_sg_to_parent_sg( self, serial_number, child_sg_name, parent_sg_name, extra_specs): """Check adding a child storage group to a parent storage group. :param serial_number: the array serial number :param child_sg_name: the name of the child storage group :param parent_sg_name: the name of the aprent storage group :param extra_specs: the extra specifications :returns: error_message or None """ msg = None if self.rest.is_child_sg_in_parent_sg( serial_number, child_sg_name, parent_sg_name): LOG.info("Child sg: %(child_sg)s is already part " "of parent storage group %(parent_sg)s.", {'child_sg': child_sg_name, 'parent_sg': parent_sg_name}) else: try: self.add_child_sg_to_parent_sg( serial_number, child_sg_name, parent_sg_name, extra_specs) except Exception as e: msg = ("Exception adding child sg %(child_sg)s to " "%(parent_sg)s. Exception received was %(e)s" % {'child_sg': child_sg_name, 'parent_sg': parent_sg_name, 'e': str(e)}) LOG.error(msg) return msg def add_child_sg_to_parent_sg( self, serial_number, child_sg_name, parent_sg_name, extra_specs): """Add a child storage group to a parent storage group. :param serial_number: the array serial number :param child_sg_name: the name of the child storage group :param parent_sg_name: the name of the aprent storage group :param extra_specs: the extra specifications """ start_time = time.time() @coordination.synchronized("emc-sg-{child_sg}-{serial_number}") @coordination.synchronized("emc-sg-{parent_sg}-{serial_number}") def do_add_sg_to_sg(child_sg, parent_sg, serial_number): # Check if another process has added the child to the # parent sg while this process was waiting for the lock if self.rest.is_child_sg_in_parent_sg( serial_number, child_sg_name, parent_sg_name): pass else: self.rest.add_child_sg_to_parent_sg( serial_number, child_sg, parent_sg, extra_specs) do_add_sg_to_sg(child_sg_name, parent_sg_name, serial_number) LOG.debug("Add child to storagegroup took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) LOG.info("Added child sg: %(child_name)s to parent storage " "group %(parent_name)s.", {'child_name': child_sg_name, 'parent_name': parent_sg_name}) def _get_or_create_storage_group( self, serial_number, masking_view_dict, storagegroup_name, extra_specs, parent=False): """Get or create a storage group for a masking view. :param serial_number: the array serial number :param masking_view_dict: the masking view dict :param storagegroup_name: the storage group name :param extra_specs: the extra specifications :param parent: flag to indicate if this a parent storage group :returns: msg -- string or None """ msg = None srp = extra_specs[utils.SRP] workload = extra_specs[utils.WORKLOAD] if parent: slo = None else: slo = extra_specs[utils.SLO] do_disable_compression = ( masking_view_dict[utils.DISABLECOMPRESSION]) storagegroup = self.rest.get_storage_group( serial_number, storagegroup_name) if storagegroup is None: storagegroup_name = self.provision.create_storage_group( serial_number, storagegroup_name, srp, slo, workload, extra_specs, do_disable_compression) storagegroup = self.rest.get_storage_group( serial_number, storagegroup_name) if storagegroup is None: msg = ("Cannot get or create a storage group: " "%(storagegroup_name)s for volume %(volume_name)s." % {'storagegroup_name': storagegroup_name, 'volume_name': masking_view_dict[utils.VOL_NAME]}) LOG.error(msg) if not parent: # If qos exists, update storage group to reflect qos parameters if 'qos' in extra_specs: self.rest.update_storagegroup_qos( serial_number, storagegroup_name, extra_specs) # If storagetype:storagegrouptags exist update storage group # to add tags self._add_tags_to_storage_group( serial_number, storagegroup, extra_specs) return msg def _add_tags_to_storage_group( self, serial_number, storage_group, extra_specs): """Add tags to a storage group. :param serial_number: the array serial number :param storage_group: the storage group object :param extra_specs: the extra specifications """ if utils.STORAGE_GROUP_TAGS in extra_specs: # Check if the tags exist if 'tags' in storage_group: new_tag_list = self.utils.get_new_tags( extra_specs[utils.STORAGE_GROUP_TAGS], storage_group['tags']) if not new_tag_list: LOG.info("No new tags to add. Existing tags " "associated with %(sg_name)s are " "%(tags)s.", {'sg_name': storage_group['storageGroupId'], 'tags': storage_group['tags']}) else: new_tag_list = ( extra_specs[utils.STORAGE_GROUP_TAGS].split(",")) if self.utils.verify_tag_list(new_tag_list): LOG.info("Adding the tags %(tag_list)s to %(sg_name)s", {'tag_list': new_tag_list, 'sg_name': storage_group['storageGroupId']}) try: self.rest.add_storage_group_tag( serial_number, storage_group['storageGroupId'], new_tag_list, extra_specs) except Exception as ex: LOG.warning("Unexpected error: %(ex)s. If you still " "want to add tags to this storage group, " "please do so on the Unisphere UI.", {'ex': ex}) def _check_existing_storage_group( self, serial_number, maskingview_name, default_sg_name, masking_view_dict, extra_specs): """Check if the masking view has the child storage group. Get the parent storage group associated with a masking view and check if the required child storage group is already a member. If not, get or create the child storage group. :param serial_number: the array serial number :param maskingview_name: the masking view name :param default_sg_name: the default sg name :param masking_view_dict: the masking view dict :param extra_specs: the extra specifications :returns: storage group name, msg """ msg = None child_sg_name = masking_view_dict[utils.SG_NAME] sg_from_mv = self.rest.get_element_from_masking_view( serial_number, maskingview_name, storagegroup=True) storagegroup = self.rest.get_storage_group(serial_number, sg_from_mv) if not storagegroup: msg = ("Cannot get storage group: %(sg_from_mv)s " "from masking view %(masking_view)s." % {'sg_from_mv': sg_from_mv, 'masking_view': maskingview_name}) LOG.error(msg) else: check_child = self.rest.is_child_sg_in_parent_sg( serial_number, child_sg_name, sg_from_mv) child_sg = self.rest.get_storage_group( serial_number, child_sg_name) # Ensure the child sg can be retrieved if check_child and not child_sg: msg = ("Cannot get child storage group: %(sg_name)s " "but it is listed as child of %(parent_sg)s" % {'sg_name': child_sg_name, 'parent_sg': sg_from_mv}) LOG.error(msg) elif check_child and child_sg: LOG.info("Retrieved child sg %(sg_name)s from %(mv_name)s", {'sg_name': child_sg_name, 'mv_name': maskingview_name}) self._add_tags_to_storage_group( serial_number, child_sg, extra_specs) else: msg = self._get_or_create_storage_group( serial_number, masking_view_dict, child_sg_name, masking_view_dict[utils.EXTRA_SPECS]) if not msg: msg = self._move_vol_from_default_sg( serial_number, masking_view_dict[utils.DEVICE_ID], masking_view_dict[utils.VOL_NAME], default_sg_name, child_sg_name, masking_view_dict[utils.EXTRA_SPECS], parent_sg_name=sg_from_mv) if not msg and not check_child: msg = self._check_add_child_sg_to_parent_sg( serial_number, child_sg_name, sg_from_mv, masking_view_dict[utils.EXTRA_SPECS]) return child_sg_name, msg def move_volume_between_storage_groups( self, serial_number, device_id, source_storage_group_name, target_storage_group_name, extra_specs, force=False, parent_sg=None): """Move a volume between storage groups. :param serial_number: the array serial number :param device_id: the device id :param source_storage_group_name: the source sg :param target_storage_group_name: the target sg :param extra_specs: the extra specifications :param force: optional Force flag required for replicated vols :param parent_sg: optional Parent storage group """ if source_storage_group_name.lower() == ( target_storage_group_name.lower()): LOG.info( "Source storage group %(src)s and target storage " "group %(tgt)s are the same, no move operation " "required.", {'src': source_storage_group_name, 'tgt': target_storage_group_name}) return @coordination.synchronized( "emc-sg-{source_storage_group_name}-{serial_number}", "emc-sg-{target_storage_group_name}-{serial_number}") def do_move_volume_between_storage_groups( serial_number, source_storage_group_name, target_storage_group_name): self._check_child_storage_group_exists( device_id, serial_number, target_storage_group_name, extra_specs, parent_sg) num_vol_in_sg = self.rest.get_num_vols_in_sg( serial_number, source_storage_group_name) LOG.debug("There are %(num_vol)d volumes in the " "storage group %(sg_name)s.", {'num_vol': num_vol_in_sg, 'sg_name': source_storage_group_name}) self.rest.move_volume_between_storage_groups( serial_number, device_id, source_storage_group_name, target_storage_group_name, extra_specs, force) if num_vol_in_sg == 1: # Check if storage group is a child sg parent_sg_name = self.get_parent_sg_from_child( serial_number, source_storage_group_name) if parent_sg_name: self.rest.remove_child_sg_from_parent_sg( serial_number, source_storage_group_name, parent_sg_name, extra_specs) # Last volume in the storage group - delete sg. self.rest.delete_storage_group( serial_number, source_storage_group_name) do_move_volume_between_storage_groups( serial_number, source_storage_group_name, target_storage_group_name) def _check_port_group(self, serial_number, portgroup_name): """Check that you can get a port group. :param serial_number: the array serial number :param portgroup_name: the port group name :returns: string -- msg, the error message """ msg = None portgroup = self.rest.get_portgroup(serial_number, portgroup_name) if portgroup: self._check_director_and_port_status( serial_number, portgroup_name) else: msg = ("Cannot get port group: %(portgroup)s from the array " "%(array)s. Portgroups must be pre-configured - please " "check the array." % {'portgroup': portgroup_name, 'array': serial_number}) LOG.error(msg) return portgroup_name, msg def _get_or_create_initiator_group( self, serial_number, init_group_name, connector, extra_specs): """Retrieve or create an initiator group. :param serial_number: the array serial number :param init_group_name: the name of the initiator group :param connector: the connector object :param extra_specs: the extra specifications :returns: name of the initiator group -- string, msg """ msg = None initiator_names = self.find_initiator_names(connector) LOG.debug("The initiator name(s) are: %(initiatorNames)s.", {'initiatorNames': initiator_names}) found_init_group_name = self._find_initiator_group( serial_number, initiator_names) # If you cannot find an initiator group that matches the connector # info, create a new initiator group. if found_init_group_name is None: # Check if the initiator group exists even if the initiators # not found. This will happen if there is no entry for them # in the login table initiator_group = self.rest.get_initiator_group( serial_number, initiator_group=init_group_name) if not initiator_group: found_init_group_name = self._create_initiator_group( serial_number, init_group_name, initiator_names, extra_specs) LOG.info("Created new initiator group name: " "%(init_group_name)s.", {'init_group_name': init_group_name}) else: initiator_list = initiator_group.get( 'initiator', list()) if initiator_group else list() if initiator_list: if set(initiator_list) == set(initiator_names): LOG.debug( "Found initiator group %(ign)s, but could not " "find initiator_names %(ins)s in the login " "table. The contained initiator(s) are the " "same as those supplied by OpenStack, therefore " "reusing %(ign)s.", {'ign': init_group_name, 'ins': initiator_names}) else: msg = ("Found initiator group %(ign)s, but could not " "find initiator_names %(ins)s in the login " "table. The contained initiators %(ins_host)s " "do match up with those in the connector " "object. Delete initiator group %(ign)s and " "retry." % {'ign': init_group_name, 'ins': initiator_names, 'ins_host': initiator_list}) LOG.error(msg) return None, msg else: msg = ("Found initiator group %(ign)s, but could not " "find initiator_names %(ins)s in the login " "table. There are no initiators in %(ign)s. " "Delete initiator group %(ign)s and retry." % {'ign': init_group_name, 'ins': initiator_names}) LOG.error(msg) return None, msg found_init_group_name = initiator_group.get('hostId') else: LOG.info("Using existing initiator group name: " "%(init_group_name)s.", {'init_group_name': found_init_group_name}) if found_init_group_name is None: msg = ("Cannot get or create initiator group: " "%(init_group_name)s. " % {'init_group_name': init_group_name}) LOG.error(msg) return found_init_group_name, msg def _check_existing_initiator_group( self, serial_number, maskingview_name, masking_view_dict, storagegroup_name, portgroup_name, extra_specs): """Checks an existing initiator group in the masking view. Check if the initiators in the initiator group match those in the system. :param serial_number: the array serial number :param maskingview_name: name of the masking view :param masking_view_dict: masking view dict :param storagegroup_name: the storage group name :param portgroup_name: the port group name :param extra_specs: the extra specifications :returns: ig_from_mv, msg """ msg = None ig_from_mv = self.rest.get_element_from_masking_view( serial_number, maskingview_name, host=True) # First verify that the initiator group matches the initiators. if not self._verify_initiator_group_from_masking_view( serial_number, maskingview_name, masking_view_dict, ig_from_mv, storagegroup_name, portgroup_name, extra_specs): msg = ("Unable to verify initiator group: %(ig_name)s " "in masking view %(maskingview_name)s." % {'ig_name': ig_from_mv, 'maskingview_name': maskingview_name}) LOG.error(msg) return ig_from_mv, msg def _check_adding_volume_to_storage_group( self, serial_number, device_id, storagegroup_name, volume_name, extra_specs): """Check if a volume is part of an sg and add it if not. :param serial_number: the array serial number :param device_id: the device id :param storagegroup_name: the storage group name :param volume_name: volume name :param extra_specs: extra specifications :returns: msg """ msg = None if self.rest.is_volume_in_storagegroup( serial_number, device_id, storagegroup_name): LOG.info("Volume: %(volume_name)s is already part " "of storage group %(sg_name)s.", {'volume_name': volume_name, 'sg_name': storagegroup_name}) else: storage_group_list = self.rest.get_storage_groups_from_volume( serial_number, device_id) if storage_group_list: msg = self._check_add_volume_suitability( serial_number, device_id, volume_name, storagegroup_name) if msg: return msg try: force = True if extra_specs.get(utils.IS_RE) else False self.add_volume_to_storage_group( serial_number, device_id, storagegroup_name, volume_name, extra_specs, force) except Exception as e: msg = ("Exception adding volume %(vol)s to %(sg)s. " "Exception received was %(e)s." % {'vol': volume_name, 'sg': storagegroup_name, 'e': str(e)}) LOG.error(msg) return msg def _check_add_volume_suitability( self, serial_number, device_id, volume_name, add_sg_name): """Check if possible to add a volume to a storage group If a volume already belongs to a storage group that is associated with FAST it is not possible to add that same volume to another storage group which is also associated with FAST :param serial_number: the array serial number :param device_id: the device id :param volume_name: the client supplied volume name :param add_sg_name: storage group that the volume is to be added to :returns: msg -- str or None """ storage_group = self.rest.get_storage_group( serial_number, add_sg_name) if storage_group and storage_group.get('slo') and ( storage_group.get('slo').lower() == 'none'): return None storage_group_list = self.rest.get_storage_groups_from_volume( serial_number, device_id) if storage_group_list: msg = ("Volume %(vol)s with device id %(dev)s belongs " "to storage group(s) %(sgs)s. Cannot add volume " "to another storage group associated with FAST." % {'vol': volume_name, 'dev': device_id, 'sgs': storage_group_list}) for storage_group_name in storage_group_list: storage_group = self.rest.get_storage_group( serial_number, storage_group_name) if storage_group and storage_group.get('slo') and ( storage_group.get('slo').lower() != 'none'): LOG.error(msg) return msg return None def add_volume_to_storage_group( self, serial_number, device_id, storagegroup_name, volume_name, extra_specs, force=False): """Add a volume to a storage group. :param serial_number: array serial number :param device_id: volume device id :param storagegroup_name: storage group name :param volume_name: volume name :param extra_specs: extra specifications :param force: add force argument to call """ start_time = time.time() @coordination.synchronized("emc-sg-{sg_name}-{serial_number}") def do_add_volume_to_sg(sg_name, serial_number): # Check if another process has added the volume to the # sg while this process was waiting for the lock if self.rest.is_volume_in_storagegroup( serial_number, device_id, storagegroup_name): LOG.info("Volume: %(volume_name)s is already part " "of storage group %(sg_name)s.", {'volume_name': volume_name, 'sg_name': storagegroup_name}) else: self.rest.add_vol_to_sg(serial_number, sg_name, device_id, extra_specs, force=force) do_add_volume_to_sg(storagegroup_name, serial_number) LOG.debug("Add volume to storagegroup took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) LOG.info("Added volume: %(vol_name)s to storage group %(sg_name)s.", {'vol_name': volume_name, 'sg_name': storagegroup_name}) def add_volumes_to_storage_group( self, serial_number, list_device_id, storagegroup_name, extra_specs): """Add a volume to a storage group. :param serial_number: array serial number :param list_device_id: list of volume device id :param storagegroup_name: storage group name :param extra_specs: extra specifications """ if not list_device_id: LOG.info("add_volumes_to_storage_group: No volumes to add") return start_time = time.time() temp_device_id_list = list_device_id force = extra_specs.get(utils.FORCE_VOL_EDIT, False) @coordination.synchronized("emc-sg-{sg_name}-{serial_number}") def do_add_volumes_to_sg(sg_name, serial_number): # Check if another process has added any volume to the # sg while this process was waiting for the lock volume_list = self.rest.get_volumes_in_storage_group( serial_number, storagegroup_name) for volume in volume_list: if volume in temp_device_id_list: LOG.info("Volume: %(volume_name)s is already part " "of storage group %(sg_name)s.", {'volume_name': volume, 'sg_name': storagegroup_name}) # Remove this device id from the list temp_device_id_list.remove(volume) self.rest.add_vol_to_sg(serial_number, storagegroup_name, temp_device_id_list, extra_specs, force=force) do_add_volumes_to_sg(storagegroup_name, serial_number) LOG.debug("Add volumes to storagegroup took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) LOG.info("Added volumes to storage group %(sg_name)s.", {'sg_name': storagegroup_name}) def remove_vol_from_storage_group( self, serial_number, device_id, storagegroup_name, volume_name, extra_specs): """Remove a volume from a storage group. :param serial_number: the array serial number :param device_id: the volume device id :param storagegroup_name: the name of the storage group :param volume_name: the volume name :param extra_specs: the extra specifications :raises: VolumeBackendAPIException """ start_time = time.time() self.rest.remove_vol_from_sg( serial_number, storagegroup_name, device_id, extra_specs) LOG.debug("Remove volume from storagegroup took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) check_vol = (self.rest.is_volume_in_storagegroup( serial_number, device_id, storagegroup_name)) if check_vol: exception_message = (_( "Failed to remove volume %(vol)s from SG: %(sg_name)s.") % {'vol': volume_name, 'sg_name': storagegroup_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) def remove_volumes_from_storage_group( self, serial_number, list_of_device_ids, storagegroup_name, extra_specs): """Remove multiple volumes from a storage group. :param serial_number: the array serial number :param list_of_device_ids: list of device ids :param storagegroup_name: the name of the storage group :param extra_specs: the extra specifications :raises: VolumeBackendAPIException """ start_time = time.time() @coordination.synchronized("emc-sg-{sg_name}-{serial_number}") def do_remove_volumes_from_storage_group(sg_name, serial_number): self.rest.remove_vol_from_sg( serial_number, storagegroup_name, list_of_device_ids, extra_specs) LOG.debug("Remove volumes from storagegroup " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) volume_list = self.rest.get_volumes_in_storage_group( serial_number, storagegroup_name) for device_id in list_of_device_ids: if device_id in volume_list: exception_message = (_( "Failed to remove device " "with id %(dev_id)s from SG: %(sg_name)s.") % {'dev_id': device_id, 'sg_name': storagegroup_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return do_remove_volumes_from_storage_group( storagegroup_name, serial_number) def find_initiator_names(self, connector): """Check the connector object and return the initiator names. The initiator name can be for (iSCSI), wwpns (FC), or nqn (NVMe/TCP) :param connector: the connector object :returns: list -- list of found initiator names :raises: VolumeBackendAPIException """ foundinitiatornames = [] name = 'initiator name' if self.protocol.lower() == utils.ISCSI and connector['initiator']: foundinitiatornames.append(connector['initiator']) elif self.protocol.lower() == utils.FC: if 'wwpns' in connector and connector['wwpns']: for wwn in connector['wwpns']: foundinitiatornames.append(wwn) name = 'world wide port names' else: msg = (_("FC is the protocol but wwpns are " "not supplied by OpenStack.")) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) elif self.protocol.lower() == cinder_constants.NVMEOF_TCP.lower(): if 'nqn' in connector and connector['nqn']: host_id = connector.get('nvme_hostid') if host_id: host_id = str(host_id).replace("-", "") initiator_name = connector['nqn'] + ":" + host_id foundinitiatornames.append(initiator_name) else: msg = (_("Failed to determine NVMe HostId " "for connector NQN '%s'. " "Ensure the host is properly " "registered and HostId is available.") % connector.get('nqn')) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) else: msg = (_("NVMe/TCP is the protocol but nqn is " "not supplied by OpenStack.")) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) if not foundinitiatornames: msg = (_("Error finding %(name)s.") % {'name': name}) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) LOG.debug("Found %(name)s: %(initiator)s.", {'name': name, 'initiator': foundinitiatornames}) return foundinitiatornames def _find_initiator_group(self, serial_number, initiator_names): """Check to see if an initiator group already exists. NOTE: An initiator/wwn can only belong to one initiator group. If we were to attempt to create one with an initiator/wwn that is already belonging to another initiator group, it would fail. :param serial_number: the array serial number :param initiator_names: the list of initiator names :returns: initiator group name -- string or None """ ig_name = None for initiator in initiator_names: if self.protocol.lower() == cinder_constants.NVMEOF_TCP.lower(): params = {'initiator_hba': initiator.rsplit(":", 1)} else: params = {'initiator_hba': initiator.lower()} found_init = self.rest.get_initiator_list(serial_number, params) if found_init: ig_name = self.rest.get_initiator_group_from_initiator( serial_number, found_init[0]) break return ig_name def create_masking_view( self, serial_number, maskingview_name, storagegroup_name, port_group_name, init_group_name, extra_specs): """Create a new masking view. :param serial_number: the array serial number :param maskingview_name: the masking view name :param storagegroup_name: the storage group name :param port_group_name: the port group :param init_group_name: the initiator group :param extra_specs: extra specifications :returns: error_message -- string or None """ error_message = None try: self.rest.create_masking_view( serial_number, maskingview_name, storagegroup_name, port_group_name, init_group_name, extra_specs) except Exception as e: error_message = ("Error creating new masking view. Exception " "received: %(e)s" % {'e': str(e)}) return error_message def check_if_rollback_action_for_masking_required( self, serial_number, volume, device_id, rollback_dict): """Rollback action for volumes with an associated service level. We need to be able to return the volume to its previous storage group if anything has gone wrong. We also may need to clean up any unused initiator groups. :param serial_number: the array serial number :param volume: the volume object :param device_id: the device id :param rollback_dict: the rollback dict :raises: VolumeBackendAPIException """ reset = False if rollback_dict[utils.IS_MULTIATTACH] else True # Check if ig has been created. If so, check for other # masking views associated with the ig. If none, delete the ig. self._check_ig_rollback( serial_number, rollback_dict[utils.IG_NAME], rollback_dict[utils.CONNECTOR]) try: # Remove it from the storage group associated with the connector, # if any. If not multiattach case, return to the default sg. self.remove_and_reset_members( serial_number, volume, device_id, rollback_dict[utils.VOL_NAME], rollback_dict[utils.EXTRA_SPECS], reset, rollback_dict[utils.CONNECTOR]) if rollback_dict[utils.IS_MULTIATTACH]: # Move from the nonfast storage group to the fast sg if rollback_dict[utils.SLO] is not None: self._return_volume_to_fast_managed_group( serial_number, device_id, rollback_dict[utils.OTHER_PARENT_SG], rollback_dict[utils.FAST_SG], rollback_dict[utils.NO_SLO_SG], rollback_dict[utils.EXTRA_SPECS]) except Exception as e: error_message = (_( "Rollback for Volume: %(volume_name)s has failed. " "Please contact your system administrator to manually return " "your volume to the default storage group for its slo. " "Exception received: %(e)s") % {'volume_name': rollback_dict['volume_name'], 'e': str(e)}) LOG.exception(error_message) raise exception.VolumeBackendAPIException(message=error_message) def _verify_initiator_group_from_masking_view( self, serial_number, masking_view_name, masking_view_dict, ig_from_mv, storage_group_name, port_group_name, extra_specs): """Check that the initiator group contains the correct initiators. :param serial_number: the array serial number :param masking_view_name: name of the masking view :param masking_view_dict: the masking view dict :param ig_from_mv: the initiator group name :param storage_group_name: the storage group :param port_group_name: the port group :param extra_specs: extra specifications :returns: boolean """ connector = masking_view_dict['connector'] initiator_names = self.find_initiator_names(connector) found_ig_from_connector = self._find_initiator_group( serial_number, initiator_names) if found_ig_from_connector != ig_from_mv: check_ig_flag = masking_view_dict[utils.INITIATOR_CHECK] if check_ig_flag: return self._recreate_masking_view( serial_number, found_ig_from_connector, ig_from_mv, masking_view_dict['init_group_name'], masking_view_name, initiator_names, storage_group_name, port_group_name, extra_specs) else: msg = (_( "Initiator group %(ig_conn)s contains initiators " "%(init_list)s and does not match IG %(ig_mv)s " "contained in masking view %(mv_name)s." "Please delete the masking view or set 'initiator_check' " "to True in the extra specs to let the driver do it for " "you.") % {'ig_conn': found_ig_from_connector, 'init_list': initiator_names, 'ig_mv': ig_from_mv, 'mv_name': masking_view_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) return True def _recreate_masking_view( self, serial_number, ig_from_conn, ig_from_mv, ig_name, mv_name, initiator_names, sg_name, pg_name, extra_specs): """Recreate a masking view if the initiators do not match. If using an existing masking view check that the initiator group contains the correct initiators. If it does not contain the correct initiators then we delete the initiator group from the masking view, re-create it with the correct initiators and add it to the masking view NOTE: PowerMax/VMAX does not support ModifyMaskingView so we must first delete the masking view and recreate it. :param serial_number: the array serial number :param ig_from_conn: initiator group from initiators in connector :param ig_from_mv: initiator group from masking view :param ig_name: drivers initiator group name by convention :param mv_name: masking view :param initiator_names: initiator(s) in the connector object :param sg_name: storage group name :param pg_name: port group name :param extra_specs: extra specifications :returns: boolean """ check_ig = self.rest.get_initiator_group( serial_number, initiator_group=ig_from_mv) if check_ig: if not ig_from_conn: # If the name of the current initiator group from the # masking view matches the igGroupName supplied for the # new group, the existing ig needs to be deleted before # the new one with the correct initiators can be created. if ig_name == ig_from_mv: # Masking view needs to be deleted before IG # can be deleted. self.rest.delete_masking_view( serial_number, mv_name) self.rest.delete_initiator_group( serial_number, ig_from_mv) ig_from_conn = ( self._create_initiator_group( serial_number, ig_from_mv, initiator_names, extra_specs)) if ig_from_conn and sg_name and pg_name: # Existing masking view (if still on the array) needs # to be deleted before a new one can be created. try: self.rest.delete_masking_view( serial_number, mv_name) except Exception: pass error_message = ( self.create_masking_view( serial_number, mv_name, sg_name, pg_name, ig_name, extra_specs)) if not error_message: LOG.debug( "The old masking view has been replaced: " "%(maskingview_name)s.", {'maskingview_name': mv_name}) else: LOG.error( "One of the components of the original masking view " "%(maskingview_name)s cannot be retrieved so " "please contact your system administrator to check " "that the correct initiator(s) are part of masking.", {'maskingview_name': mv_name}) return False return True def _create_initiator_group( self, serial_number, init_group_name, initiator_names, extra_specs): """Create a new initiator group. Given a list of initiators, create a new initiator group. :param serial_number: array serial number :param init_group_name: the name for the initiator group :param initiator_names: initaitor names :param extra_specs: the extra specifications :returns: the initiator group name """ try: self.rest.create_initiator_group( serial_number, init_group_name, initiator_names, extra_specs) except exception.VolumeBackendAPIException as ex: if re.search(CREATE_IG_ERROR, ex.msg): LOG.error("It is probable that initiator(s) %(initiators)s " "belong to an existing initiator group (host) " "that is neither logged into the array or part " "of a masking view and as such cannot be queried. " "Please delete this initiator group (host) and " "re-run the operation.", {'initiators': initiator_names}) raise exception.VolumeBackendAPIException(message=ex) return init_group_name def _check_ig_rollback( self, serial_number, init_group_name, connector, force=False): """Check if rollback action is required on an initiator group. If anything goes wrong on a masking view creation, we need to check if the process created a now-stale initiator group before failing, i.e. an initiator group a) matching the name used in the mv process and b) not associated with any other masking views. If a stale ig exists, delete the ig. :param serial_number: the array serial number :param init_group_name: the initiator group name :param connector: the connector object :param force: force a delete even if no entry in login table """ initiator_names = self.find_initiator_names(connector) found_ig_name = self._find_initiator_group( serial_number, initiator_names) if found_ig_name: if found_ig_name == init_group_name: force = True if force: found_ig_name = init_group_name host = init_group_name.split("-")[1] LOG.debug("Searching for masking views associated with " "%(init_group_name)s", {'init_group_name': init_group_name}) self._last_volume_delete_initiator_group( serial_number, found_ig_name, host) @coordination.synchronized("emc-vol-{device_id}") def remove_and_reset_members( self, serial_number, volume, device_id, volume_name, extra_specs, reset=True, connector=None, async_grp=None, host_template=None): """This is called on a delete, unmap device or rollback. :param serial_number: the array serial number :param volume: the volume object :param device_id: the volume device id :param volume_name: the volume name :param extra_specs: additional info :param reset: reset, return to original SG (optional) :param connector: the connector object (optional) :param async_grp: the async rep group (optional) :param host_template: the host template (optional) """ self._cleanup_deletion( serial_number, volume, device_id, volume_name, extra_specs, connector, reset, async_grp, host_template=host_template) def _cleanup_deletion( self, serial_number, volume, device_id, volume_name, extra_specs, connector, reset, async_grp, host_template=None): """Prepare a volume for a delete operation. :param serial_number: the array serial number :param volume: the volume object :param device_id: the volume device id :param volume_name: the volume name :param extra_specs: the extra specifications :param connector: the connector object :param reset: flag to indicate if reset is required -- bool :param async_grp: the async rep group :param host_template: the host template (if it exists) """ move = False short_host_name = None storagegroup_names = (self.rest.get_storage_groups_from_volume( serial_number, device_id)) if storagegroup_names: if async_grp is not None: for index, sg in enumerate(storagegroup_names): if sg == async_grp: storagegroup_names.pop(index) if len(storagegroup_names) == 1 and reset is True: move = True elif connector is not None: short_host_name = self.utils.get_host_name_label( connector.get('host'), host_template) if connector.get('host') else None # Legacy code legacy_short_host_name = self.utils.get_host_short_name( connector.get('host')) if connector.get('host') else None move = reset if short_host_name: for sg_name in storagegroup_names: if short_host_name in sg_name: self.remove_volume_from_sg( serial_number, device_id, volume_name, sg_name, extra_specs, connector, move, host_template=host_template) break elif legacy_short_host_name and ( legacy_short_host_name in sg_name): self.remove_volume_from_sg( serial_number, device_id, volume_name, sg_name, extra_specs, connector, move, host_template=host_template) break else: for sg_name in storagegroup_names: self.remove_volume_from_sg( serial_number, device_id, volume_name, sg_name, extra_specs, connector, move, host_template=host_template) if reset is True and move is False: self.add_volume_to_default_storage_group( serial_number, device_id, volume_name, extra_specs, volume=volume) def remove_volume_from_sg( self, serial_number, device_id, vol_name, storagegroup_name, extra_specs, connector=None, move=False, host_template=None): """Remove a volume from a storage group. :param serial_number: the array serial number :param device_id: the volume device id :param vol_name: the volume name :param storagegroup_name: the storage group name :param extra_specs: the extra specifications :param connector: the connector object :param move: flag to indicate if move should be used instead of remove :param host_template: the host template (if it exists) """ masking_list = self.rest.get_masking_views_from_storage_group( serial_number, storagegroup_name) if not masking_list: LOG.debug("No masking views associated with storage group " "%(sg_name)s", {'sg_name': storagegroup_name}) @coordination.synchronized("emc-sg-{sg_name}-{serial_number}") def do_remove_volume_from_sg(sg_name, serial_number): # Make sure volume hasn't been recently removed from the sg if self.rest.is_volume_in_storagegroup( serial_number, device_id, sg_name): num_vol_in_sg = self.rest.get_num_vols_in_sg( serial_number, sg_name) LOG.debug("There are %(num_vol)d volumes in the " "storage group %(sg_name)s.", {'num_vol': num_vol_in_sg, 'sg_name': sg_name}) if num_vol_in_sg == 1: # Last volume in the storage group - delete sg. self._last_vol_in_sg( serial_number, device_id, vol_name, sg_name, extra_specs, move, host_template=host_template) else: # Not the last volume so remove it from storage group self._multiple_vols_in_sg( serial_number, device_id, sg_name, vol_name, extra_specs, move) else: LOG.info("Volume with device_id %(dev)s is no longer a " "member of %(sg)s.", {'dev': device_id, 'sg': sg_name}) return do_remove_volume_from_sg(storagegroup_name, serial_number) else: # Need to lock masking view when we are locking the storage # group to avoid possible deadlock situations from concurrent # processes masking_name = masking_list[0] parent_sg_name = self.rest.get_element_from_masking_view( serial_number, masking_name, storagegroup=True) @coordination.synchronized("emc-mv-{parent_name}-{serial_number}") @coordination.synchronized("emc-mv-{mv_name}-{serial_number}") @coordination.synchronized("emc-sg-{sg_name}-{serial_number}") def do_remove_volume_from_sg( mv_name, sg_name, parent_name, serial_number): # Make sure volume hasn't been recently removed from the sg is_vol = self.rest.is_volume_in_storagegroup( serial_number, device_id, sg_name) if is_vol: num_vol_in_sg = self.rest.get_num_vols_in_sg( serial_number, sg_name) LOG.debug( "There are %(num_vol)d volumes in the storage group " "%(sg_name)s associated with %(mv_name)s. Parent " "storagegroup is %(parent)s.", {'num_vol': num_vol_in_sg, 'sg_name': sg_name, 'mv_name': mv_name, 'parent': parent_name}) if num_vol_in_sg == 1: # Last volume in the storage group - delete sg. self._last_vol_in_sg( serial_number, device_id, vol_name, sg_name, extra_specs, move, connector, host_template=host_template) else: # Not the last volume so remove it from storage group self._multiple_vols_in_sg( serial_number, device_id, sg_name, vol_name, extra_specs, move) else: LOG.info("Volume with device_id %(dev)s is no longer a " "member of %(sg)s", {'dev': device_id, 'sg': sg_name}) return do_remove_volume_from_sg(masking_name, storagegroup_name, parent_sg_name, serial_number) def _last_vol_in_sg( self, serial_number, device_id, volume_name, storagegroup_name, extra_specs, move, connector=None, host_template=None): """Steps if the volume is the last in a storage group. 1. Check if the volume is in a masking view. 2. If it is in a masking view, check if it is the last volume in the masking view or just this child storage group. 3. If it is last in the masking view, delete the masking view, delete the initiator group if there are no other masking views associated with it, and delete the both the current storage group and its parent group. 4. Otherwise, remove the volume and delete the child storage group. 5. If it is not in a masking view, delete the storage group. :param serial_number: array serial number :param device_id: volume device id :param volume_name: volume name :param storagegroup_name: storage group name :param extra_specs: extra specifications :param move: flag to indicate a move instead of remove :param connector: the connector object :param host_template: the host template (if it exists) :returns: status -- bool """ LOG.debug("Only one volume remains in storage group " "%(sgname)s. Driver will attempt cleanup.", {'sgname': storagegroup_name}) maskingview_list = self.rest.get_masking_views_from_storage_group( serial_number, storagegroup_name) if not bool(maskingview_list): status = self._last_vol_no_masking_views( serial_number, storagegroup_name, device_id, volume_name, extra_specs, move) else: status = self._last_vol_masking_views( serial_number, storagegroup_name, maskingview_list, device_id, volume_name, extra_specs, connector, move, host_template) return status def _last_vol_no_masking_views(self, serial_number, storagegroup_name, device_id, volume_name, extra_specs, move): """Remove the last vol from an sg not associated with an mv. Helper function for removing the last vol from a storage group which is not associated with a masking view. :param serial_number: the array serial number :param storagegroup_name: the storage group name :param device_id: the device id :param volume_name: the volume name :param extra_specs: the extra specifications :param move: flag to indicate a move instead of remove :returns: status -- bool """ # Check if storage group is a child sg: parent_sg = self.get_parent_sg_from_child( serial_number, storagegroup_name) if parent_sg is None: # Move the volume back to the default storage group, if required if move: self.add_volume_to_default_storage_group( serial_number, device_id, volume_name, extra_specs, src_sg=storagegroup_name) # Remove last volume and delete the storage group. self._remove_last_vol_and_delete_sg( serial_number, device_id, volume_name, storagegroup_name, extra_specs) status = True else: num_vols_parent = self.rest.get_num_vols_in_sg( serial_number, parent_sg) if num_vols_parent == 1: self._delete_cascaded_storage_groups( serial_number, storagegroup_name, parent_sg, extra_specs, device_id, move) else: self._remove_last_vol_and_delete_sg( serial_number, device_id, volume_name, storagegroup_name, extra_specs, parent_sg, move) status = True return status def _last_vol_masking_views( self, serial_number, storagegroup_name, maskingview_list, device_id, volume_name, extra_specs, connector, move, host_template=None): """Remove the last vol from an sg associated with masking views. Helper function for removing the last vol from a storage group which is associated with one or more masking views. :param serial_number: the array serial number :param storagegroup_name: the storage group name :param maskingview_list: the liast of masking views :param device_id: the device id :param volume_name: the volume name :param extra_specs: the extra specifications :param move: flag to indicate a move instead of remove :param host_template: the host template (if it exists) :returns: status -- bool """ status = False for mv in maskingview_list: num_vols_in_mv, parent_sg_name = ( self._get_num_vols_from_mv(serial_number, mv)) # If the volume is the last in the masking view, full cleanup if num_vols_in_mv == 1: self._delete_mv_ig_and_sg( serial_number, device_id, mv, storagegroup_name, parent_sg_name, connector, move, extra_specs, host_template=host_template) else: self._remove_last_vol_and_delete_sg( serial_number, device_id, volume_name, storagegroup_name, extra_specs, parent_sg_name, move) status = True return status def get_parent_sg_from_child(self, serial_number, storagegroup_name): """Given a storage group name, get its parent storage group, if any. :param serial_number: the array serial number :param storagegroup_name: the name of the storage group :returns: the parent storage group name, or None """ parent_sg_name = None storagegroup = self.rest.get_storage_group( serial_number, storagegroup_name) if storagegroup and storagegroup.get('parent_storage_group'): parent_sg_name = storagegroup['parent_storage_group'][0] return parent_sg_name def _get_num_vols_from_mv(self, serial_number, maskingview_name): """Get the total number of volumes associated with a masking view. :param serial_number: the array serial number :param maskingview_name: the name of the masking view :returns: num_vols, parent_sg_name """ parent_sg_name = self.rest.get_element_from_masking_view( serial_number, maskingview_name, storagegroup=True) num_vols = self.rest.get_num_vols_in_sg(serial_number, parent_sg_name) return num_vols, parent_sg_name def _multiple_vols_in_sg(self, serial_number, device_id, storagegroup_name, volume_name, extra_specs, move): """Remove the volume from the SG. If the volume is not the last in the storage group, remove the volume from the SG and leave the sg on the array. :param serial_number: array serial number :param device_id: volume device id :param volume_name: volume name :param storagegroup_name: storage group name :param move: flag to indicate a move instead of remove :param extra_specs: extra specifications """ if move: self.add_volume_to_default_storage_group( serial_number, device_id, volume_name, extra_specs, src_sg=storagegroup_name) LOG.debug( "Volume %(volume_name)s successfully added to " "storage group %(sg)s.", {'volume_name': volume_name, 'sg': storagegroup_name}) else: self.remove_vol_from_storage_group( serial_number, device_id, storagegroup_name, volume_name, extra_specs) LOG.debug( "Volume %(volume_name)s successfully removed from " "storage group %(sg)s.", {'volume_name': volume_name, 'sg': storagegroup_name}) num_vol_in_sg = self.rest.get_num_vols_in_sg( serial_number, storagegroup_name) LOG.debug("There are %(num_vol)d volumes remaining in the storage " "group %(sg_name)s.", {'num_vol': num_vol_in_sg, 'sg_name': storagegroup_name}) def _delete_cascaded_storage_groups(self, serial_number, child_sg_name, parent_sg_name, extra_specs, device_id, move): """Delete a child and parent storage groups. :param serial_number: the array serial number :param child_sg_name: the child storage group name :param parent_sg_name: the parent storage group name :param extra_specs: the extra specifications :param device_id: the volume device id :param move: flag to indicate if volume should be moved to default sg """ if move: self.add_volume_to_default_storage_group( serial_number, device_id, "", extra_specs, src_sg=child_sg_name) if child_sg_name != parent_sg_name: self.rest.remove_child_sg_from_parent_sg( serial_number, child_sg_name, parent_sg_name, extra_specs) self.rest.delete_storage_group(serial_number, parent_sg_name) LOG.debug("Storage Group %(storagegroup_name)s " "successfully deleted.", {'storagegroup_name': parent_sg_name}) # Remove last volume and delete the storage group. if self.rest.is_volume_in_storagegroup( serial_number, device_id, child_sg_name): self._remove_last_vol_and_delete_sg( serial_number, device_id, 'last_vol', child_sg_name, extra_specs) else: self.rest.delete_storage_group(serial_number, child_sg_name) LOG.debug("Storage Group %(storagegroup_name)s successfully deleted.", {'storagegroup_name': child_sg_name}) def _delete_mv_ig_and_sg( self, serial_number, device_id, masking_view, storagegroup_name, parent_sg_name, connector, move, extra_specs, host_template=None): """Delete the masking view, storage groups and initiator group. :param serial_number: array serial number :param device_id: the device id :param masking_view: masking view name :param storagegroup_name: storage group name :param parent_sg_name: the parent storage group name :param connector: the connector object :param move: flag to indicate if the volume should be moved :param extra_specs: the extra specifications :param host_template: the host template (if it exists) """ initiatorgroup = self.rest.get_element_from_masking_view( serial_number, masking_view, host=True) self._last_volume_delete_masking_view(serial_number, masking_view) self._last_volume_delete_initiator_group( serial_number, initiatorgroup, connector.get('host') if connector else None, host_template) self._delete_cascaded_storage_groups( serial_number, storagegroup_name, parent_sg_name, extra_specs, device_id, move) def _last_volume_delete_masking_view(self, serial_number, masking_view): """Delete the masking view. Delete the masking view if the volume is the last one in the storage group. :param serial_number: the array serial number :param masking_view: masking view name """ LOG.debug("Last volume in the storage group, deleting masking view " "%(maskingview_name)s.", {'maskingview_name': masking_view}) self.rest.delete_masking_view(serial_number, masking_view) LOG.info("Masking view %(maskingview)s successfully deleted.", {'maskingview': masking_view}) def add_volume_to_default_storage_group( self, serial_number, device_id, volume_name, extra_specs, src_sg=None, volume=None): """Return volume to its default storage group. :param serial_number: the array serial number :param device_id: the volume device id :param volume_name: the volume name :param extra_specs: the extra specifications :param src_sg: the source storage group, if any :param volume: the volume object """ do_disable_compression = self.utils.is_compression_disabled( extra_specs) rep_enabled = self.utils.is_replication_enabled(extra_specs) rep_mode = extra_specs.get(utils.REP_MODE, None) if self.rest.is_next_gen_array(serial_number): extra_specs[utils.WORKLOAD] = 'NONE' storagegroup_name = self.get_or_create_default_storage_group( serial_number, extra_specs[utils.SRP], extra_specs[utils.SLO], extra_specs[utils.WORKLOAD], extra_specs, do_disable_compression, rep_enabled, rep_mode) if src_sg is not None: # Need to lock the default storage group @coordination.synchronized( "emc-sg-{default_sg_name}-{serial_number}") def _move_vol_to_default_sg(default_sg_name, serial_number): self.rest.move_volume_between_storage_groups( serial_number, device_id, src_sg, default_sg_name, extra_specs, force=True) _move_vol_to_default_sg(storagegroup_name, serial_number) else: self._check_adding_volume_to_storage_group( serial_number, device_id, storagegroup_name, volume_name, extra_specs) if volume: # Need to check if the volume needs to be returned to a # generic volume group. This may be necessary in a force-detach # situation. self.return_volume_to_volume_group( serial_number, volume, device_id, volume_name, extra_specs) def return_volume_to_volume_group(self, serial_number, volume, device_id, volume_name, extra_specs): """Return a volume to its volume group, if required. :param serial_number: the array serial number :param volume: the volume object :param device_id: the device id :param volume_name: the volume name :param extra_specs: the extra specifications """ if (volume.group_id is not None and (volume_utils.is_group_a_cg_snapshot_type(volume.group) or volume.group.is_replicated)): vol_grp_name = self.provision.get_or_create_volume_group( serial_number, volume.group, extra_specs) self._check_adding_volume_to_storage_group( serial_number, device_id, vol_grp_name, volume_name, extra_specs) if volume.group.is_replicated: self.add_remote_vols_to_volume_group( volume, volume.group, extra_specs) def add_remote_vols_to_volume_group( self, volumes, group, extra_specs, rep_driver_data=None): """Add the remote volumes to their volume group. :param volumes: list of volumes :param group: the id of the group :param extra_specs: the extra specifications :param rep_driver_data: replication driver data, optional """ remote_device_list = [] remote_array = None if not isinstance(volumes, list): volumes = [volumes] for vol in volumes: try: remote_loc = ast.literal_eval(vol.replication_driver_data) except (ValueError, KeyError): remote_loc = ast.literal_eval(rep_driver_data) remote_array = remote_loc['array'] founddevice_id = self.rest.check_volume_device_id( remote_array, remote_loc['device_id'], vol.id) if founddevice_id is not None: remote_device_list.append(founddevice_id) group_name = self.provision.get_or_create_volume_group( remote_array, group, extra_specs) self.add_volumes_to_storage_group( remote_array, remote_device_list, group_name, extra_specs) LOG.info("Added volumes to remote volume group.") def get_or_create_default_storage_group( self, serial_number, srp, slo, workload, extra_specs, do_disable_compression=False, is_re=False, rep_mode=None): """Get or create a default storage group. :param serial_number: the array serial number :param srp: the SRP name :param slo: the SLO :param workload: the workload :param extra_specs: extra specifications :param do_disable_compression: flag for compression :param is_re: is replication enabled :param rep_mode: flag to indicate replication mode :returns: storagegroup_name :raises: VolumeBackendAPIException """ storagegroup, storagegroup_name = ( self.rest.get_vmax_default_storage_group( serial_number, srp, slo, workload, do_disable_compression, is_re, rep_mode)) if storagegroup is None: self.provision.create_storage_group( serial_number, storagegroup_name, srp, slo, workload, extra_specs, do_disable_compression) else: # Check that SG is not part of a masking view LOG.info("Using existing default storage group") masking_views = self.rest.get_masking_views_from_storage_group( serial_number, storagegroup_name) if masking_views: exception_message = (_( "Default storage group %(sg_name)s is part of masking " "views %(mvs)s. Please remove it from all masking views") % {'sg_name': storagegroup_name, 'mvs': masking_views}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) # If qos exists, update storage group to reflect qos parameters if 'qos' in extra_specs: self.rest.update_storagegroup_qos( serial_number, storagegroup_name, extra_specs) return storagegroup_name def _remove_last_vol_and_delete_sg( self, serial_number, device_id, volume_name, storagegroup_name, extra_specs, parent_sg_name=None, move=False): """Remove the last volume and delete the storage group. If the storage group is a child of another storage group, it must be removed from the parent before deletion. :param serial_number: the array serial number :param device_id: the volume device id :param volume_name: the volume name :param storagegroup_name: the sg name :param extra_specs: extra specifications :param parent_sg_name: the parent sg name """ if move: self.add_volume_to_default_storage_group( serial_number, device_id, volume_name, extra_specs, src_sg=storagegroup_name) else: self.remove_vol_from_storage_group( serial_number, device_id, storagegroup_name, volume_name, extra_specs) LOG.debug("Remove the last volume %(volumeName)s completed " "successfully.", {'volumeName': volume_name}) if parent_sg_name: self.rest.remove_child_sg_from_parent_sg( serial_number, storagegroup_name, parent_sg_name, extra_specs) self.rest.delete_storage_group(serial_number, storagegroup_name) def _last_volume_delete_initiator_group( self, serial_number, initiatorgroup_name, host, host_template=None): """Delete the initiator group. Delete the Initiator group if it has been created by the PowerMax driver, and if there are no masking views associated with it. :param serial_number: the array serial number :param initiatorgroup_name: initiator group name :param host: the short name of the host :param host_template: the host template (if it exists) """ def _do_delete_initiator_group(array, init_group_name): is_deleted = False maskingview_names = ( self.rest.get_masking_views_by_initiator_group( array, init_group_name)) if not maskingview_names: @coordination.synchronized( "emc-ig-{ig_name}-{array}") def _delete_ig(ig_name, array): # Check initiator group hasn't been recently deleted ig_details = self.rest.get_initiator_group( serial_number, ig_name) if ig_details: LOG.debug( "Last volume associated with the initiator " "group - deleting the associated initiator " "group %(initiatorgroup_name)s.", {'initiatorgroup_name': ig_name}) self.rest.delete_initiator_group( array, ig_name) return True else: return False is_deleted = _delete_ig(init_group_name, array) else: LOG.warning("Initiator group %(ig_name)s is associated " "with masking views and can't be deleted. " "Number of associated masking view is: " "%(nmv)d.", {'ig_name': init_group_name, 'nmv': len(maskingview_names)}) return is_deleted if host is not None: is_deleted = False host_label = (self.utils.get_host_name_label( host, host_template) if host else None) default_ig_name = self.utils.get_possible_initiator_name( host_label, self.protocol) if initiatorgroup_name == default_ig_name: is_deleted = _do_delete_initiator_group( serial_number, initiatorgroup_name) else: # Legacy cleanup legacy_short_host = (self.utils.get_host_short_name( host) if host else None) default_ig_name = self.utils.get_possible_initiator_name( legacy_short_host, self.protocol) if initiatorgroup_name == default_ig_name: is_deleted = _do_delete_initiator_group( serial_number, initiatorgroup_name) if not is_deleted: LOG.warning("Initiator group %(ig_name)s was " "not created by the PowerMax driver so will " "not be deleted by the PowerMax driver.", {'ig_name': initiatorgroup_name}) else: LOG.warning("Cannot get host name from connector object - " "initiator group %(ig_name)s will not be deleted.", {'ig_name': initiatorgroup_name}) def pre_multiattach(self, serial_number, device_id, mv_dict, extra_specs): """Run before attaching a device to multiple hosts. :param serial_number: the array serial number :param device_id: the device id :param mv_dict: the masking view dict :param extra_specs: extra specifications :returns: masking view dict """ no_slo_sg_name, fast_source_sg_name, parent_sg_name = None, None, None sg_list = self.rest.get_storage_group_list( serial_number, params={ 'child': 'true', 'volumeId': device_id}) # You need to put in something here for legacy if not sg_list.get('storageGroupId'): storage_group_list = self.rest.get_storage_groups_from_volume( serial_number, device_id) if storage_group_list and len(storage_group_list) == 1: if 'STG-' in storage_group_list[0]: return mv_dict if 'pool_name' in extra_specs: split_pool = extra_specs['pool_name'].split('+') src_slo = split_pool[0] src_wl = split_pool[1] if len(split_pool) == 4 else 'NONE' else: src_slo = extra_specs[utils.SLO] src_wl = extra_specs[utils.WORKLOAD] slo_wl_combo = self.utils.truncate_string(src_slo + src_wl.upper(), 10) for sg in sg_list.get('storageGroupId', []): if slo_wl_combo in sg: fast_source_sg_name = sg short_host_name, port_group_label = ( self._get_host_and_port_group_labels( serial_number, fast_source_sg_name)) no_slo_extra_specs = deepcopy(extra_specs) no_slo_extra_specs[utils.SLO] = None no_slo_sg_name, __, __ = self.utils.get_child_sg_name( short_host_name, no_slo_extra_specs, port_group_label) source_sg_details = self.rest.get_storage_group( serial_number, fast_source_sg_name) parent_sg_name = source_sg_details[ 'parent_storage_group'][0] mv_dict[utils.OTHER_PARENT_SG] = parent_sg_name mv_dict[utils.FAST_SG] = fast_source_sg_name mv_dict[utils.NO_SLO_SG] = no_slo_sg_name try: no_slo_sg = self.rest.get_storage_group( serial_number, no_slo_sg_name) if no_slo_sg is None: self.provision.create_storage_group( serial_number, no_slo_sg_name, None, None, None, extra_specs) self._check_add_child_sg_to_parent_sg( serial_number, no_slo_sg_name, parent_sg_name, extra_specs) self.move_volume_between_storage_groups( serial_number, device_id, fast_source_sg_name, no_slo_sg_name, extra_specs, parent_sg=parent_sg_name) # Clean up the fast managed group, if required self._clean_up_child_storage_group( serial_number, fast_source_sg_name, parent_sg_name, extra_specs) except Exception: # Move it back to original storage group, if required self._return_volume_to_fast_managed_group( serial_number, device_id, parent_sg_name, fast_source_sg_name, no_slo_sg_name, extra_specs) exception_message = (_("Unable to setup for multiattach because " "of the following error: %(error_msg)s.") % {'error_msg': sys.exc_info()[1]}) raise exception.VolumeBackendAPIException( message=exception_message) return mv_dict def _get_host_and_port_group_labels( self, serial_number, storage_group): """Get the host and port group labels :param serial_number: the array serial number :param storage_group: the storage group :returns: short_host_name, port_group_label """ masking_view_name = ( self.rest.get_masking_views_from_storage_group( serial_number, storage_group))[0] object_dict = self.get_components_from_masking_view_name( masking_view_name) return object_dict['host'], object_dict['portgroup'] def get_components_from_masking_view_name(self, masking_view_name): """Get the host and port group labels :param masking_view_name: the masking view name :returns: object dict """ regex_str = (r'^(?POS)-(?P.+?)(?PI|F|NT)-' r'(?P(?!CD|RE|CD-RE).+)-(?PMV)$') object_dict = self.utils.get_object_components_and_correct_host( regex_str, masking_view_name) return object_dict def return_volume_to_fast_managed_group( self, serial_number, device_id, extra_specs): """Return a volume to a fast managed group if slo is set. On a detach on a multiattach volume, return the volume to its fast managed group, if slo is set. :param serial_number: the array serial number :param device_id: the device id :param extra_specs: the extra specifications """ if extra_specs[utils.SLO]: # Get a parent storage group of the volume sg_list = self.rest.get_storage_group_list( serial_number, params={ 'child': 'true', 'volumeId': device_id}) slo_wl_combo = '-No_SLO-' for sg in sg_list.get('storageGroupId', []): if slo_wl_combo in sg: no_slo_sg_name = sg short_host_name, port_group_label = ( self._get_host_and_port_group_labels( serial_number, no_slo_sg_name)) fast_sg_name, __, __ = self.utils.get_child_sg_name( short_host_name, extra_specs, port_group_label) source_sg_details = self.rest.get_storage_group( serial_number, no_slo_sg_name) parent_sg_name = source_sg_details[ 'parent_storage_group'][0] self._return_volume_to_fast_managed_group( serial_number, device_id, parent_sg_name, fast_sg_name, no_slo_sg_name, extra_specs) break def _return_volume_to_fast_managed_group( self, serial_number, device_id, parent_sg_name, fast_sg_name, no_slo_sg_name, extra_specs): """Return a volume to its fast managed group. On a detach, or failed attach, on a multiattach volume, return the volume to its fast managed group, if required. :param serial_number: the array serial number :param device_id: the device id :param parent_sg_name: the parent sg name :param fast_sg_name: the fast managed sg name :param no_slo_sg_name: the no slo sg name :param extra_specs: the extra specifications """ sg_list = self.rest.get_storage_groups_from_volume( serial_number, device_id) in_fast_sg = True if fast_sg_name in sg_list else False if in_fast_sg is False: disable_compr = self.utils.is_compression_disabled(extra_specs) mv_dict = {utils.DISABLECOMPRESSION: disable_compr, utils.VOL_NAME: device_id} # Get or create the fast child sg self._get_or_create_storage_group( serial_number, mv_dict, fast_sg_name, extra_specs) # Add child sg to parent sg if required self.add_child_sg_to_parent_sg( serial_number, fast_sg_name, parent_sg_name, extra_specs) # Add or move volume to fast sg self._move_vol_from_default_sg( serial_number, device_id, device_id, no_slo_sg_name, fast_sg_name, extra_specs, parent_sg_name=parent_sg_name) else: LOG.debug("Volume already a member of the FAST managed storage " "group.") # Check if non-fast storage group needs to be cleaned up self._clean_up_child_storage_group( serial_number, no_slo_sg_name, parent_sg_name, extra_specs) def _clean_up_child_storage_group(self, serial_number, child_sg_name, parent_sg_name, extra_specs): """Clean up an empty child storage group, if required. :param serial_number: the array serial number :param child_sg_name: the child storage group :param parent_sg_name: the parent storage group :param extra_specs: extra specifications """ child_sg = self.rest.get_storage_group(serial_number, child_sg_name) if child_sg: num_vol_in_sg = self.rest.get_num_vols_in_sg( serial_number, child_sg_name) if num_vol_in_sg == 0: if self.rest.is_child_sg_in_parent_sg( serial_number, child_sg_name, parent_sg_name): self.rest.remove_child_sg_from_parent_sg( serial_number, child_sg_name, parent_sg_name, extra_specs) self.rest.delete_storage_group( serial_number, child_sg_name) def attempt_ig_cleanup( self, connector, protocol, serial_number, force, host_template=None): """Attempt to cleanup an orphan initiator group :param connector: connector object :param protocol: iscsi or fc :param serial_number: extra the array serial number :param force: flag to indicate if operation should be forced :param host_template: the host template (if it exists) """ protocol = self.utils.get_short_protocol_type(protocol) host_name = connector.get('host') host_label = self.utils.get_host_name_label( host_name, host_template=host_template) initiator_group_name = self.utils.get_possible_initiator_name( host_label, protocol) self._check_ig_rollback( serial_number, initiator_group_name, connector, force) def _check_child_storage_group_exists( self, device_id, serial_number, child_sg_name, extra_specs, parent_sg_name): """Check if the storage group exists. If the storage group does not exist create it and add it to the parent :param device_id: device id :param serial_number: extra the array serial number :param child_sg_name: child storage group :param extra_specs: extra specifications :param parent_sg_name: parent storage group """ disable_compr = self.utils.is_compression_disabled(extra_specs) mv_dict = {utils.DISABLECOMPRESSION: disable_compr, utils.VOL_NAME: device_id} # Get or create the storage group self._get_or_create_storage_group( serial_number, mv_dict, child_sg_name, extra_specs) if parent_sg_name: if not self.rest.is_child_sg_in_parent_sg( serial_number, child_sg_name, parent_sg_name): self.rest.add_child_sg_to_parent_sg( serial_number, child_sg_name, parent_sg_name, extra_specs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/metadata.py0000664000175000017500000007241100000000000025065 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import platform import time import traceback import types from oslo_log import log as logging import tabulate from cinder.objects import volume from cinder import version from cinder.volume.drivers.dell_emc.powermax import utils LOG = logging.getLogger(__name__) CLEANUP_LIST = ['masking_view', 'child_storage_group', 'parent_storage_group', 'initiator_group', 'port_group', 'storage_group'] def debug_required(func): """Only execute the function if debug is enabled.""" def func_wrapper(*args, **kwargs): try: if args[0].is_debug: return func(*args, **kwargs) else: pass except Exception: LOG.warning("Volume metadata logging failure. " "Exception is %s.", traceback.format_exc()) return func_wrapper class PowerMaxVolumeMetadata(object): """Gathers PowerMax/VMAX specific volume information. Also gathers Unisphere, Microcode OS/distribution and python versions. """ def __init__(self, rest, version, is_debug): self.version_dict = {} self.rest = rest self.utils = utils.PowerMaxUtils() self.volume_trace_list = [] self.is_debug = is_debug self.powermax_driver_version = version def _update_platform(self): """Update the platform.""" try: self.version_dict['openstack_platform'] = platform.platform() except Exception as ex: LOG.warning("Unable to determine the platform. " "Exception is %s.", ex) def _get_python_version(self): """Get the python version.""" try: self.version_dict['python_version'] = platform.python_version() except Exception as ex: LOG.warning("Unable to determine the python version. " "Exception is %s.", ex) def _update_version_from_version_string(self): """Update the version from the version string.""" try: self.version_dict['openstack_version'] = ( version.version_info.version_string()) except Exception as ex: LOG.warning("Unable to determine the OS version. " "Exception is %s.", ex) def _update_release_from_release_string(self): """Update the release from the release string.""" try: self.version_dict['openstack_release'] = ( version.version_info.release_string()) except Exception as ex: LOG.warning("Unable to get release info. " "Exception is %s.", ex) @staticmethod def _get_version_info_version(): """Gets the version. :returns: string -- version """ return version.version_info.version def _update_info_from_version_info(self): """Update class variables from version info.""" try: ver = self._get_version_info_version() if ver: self.version_dict['openstack_version'] = ver except Exception as ex: LOG.warning("Unable to get version info. " "Exception is %s.", ex) def _update_openstack_info(self): """Update openstack info.""" self._update_version_from_version_string() self._update_release_from_release_string() self._update_platform() self._get_python_version() # Some distributions override with more meaningful information self._update_info_from_version_info() def _update_array_info(self, serial_number): """Update PowerMax/VMAX info. :param serial_number: the serial number of the array """ u4p_version_dict = ( self.rest.get_unisphere_version()) self.version_dict['unisphere_for_powermax_version'] = ( u4p_version_dict['version']) self.version_dict['serial_number'] = serial_number array_info_dict = self.rest.get_array_detail(serial_number) self.version_dict['storage_firmware_version'] = ( array_info_dict.get( 'ucode', array_info_dict.get('microcode'))) self.version_dict['storage_model'] = array_info_dict['model'] self.version_dict['powermax_cinder_driver_version'] = ( self.powermax_driver_version) @debug_required def gather_version_info(self, serial_number): """Gather info on the array :param serial_number: the serial number of the array :returns: version_dict """ try: self._update_openstack_info() self._update_array_info(serial_number) self.print_pretty_table(self.version_dict) except Exception as ex: LOG.warning("Unable to gather version info. " "Exception is %s.", ex) return self.version_dict @debug_required def gather_volume_info( self, volume_id, successful_operation, append, **kwargs): """Gather volume information. :param volume_id: the unique volume id key :param successful_operation: the operation e.g "create" :param append: append flag :param kwargs: variable length argument list :returns: datadict """ volume_trace_dict = {} volume_key_value = {} datadict = {} try: volume_trace_dict = self._fill_volume_trace_dict( volume_id, successful_operation, append, **kwargs) volume_trace_dict['volume_updated_time'] = ( datetime.datetime.fromtimestamp( int(time.time())).strftime('%Y-%m-%d %H:%M:%S')) volume_key_value[volume_id] = volume_trace_dict if not self.volume_trace_list: self.volume_trace_list.append(volume_key_value.copy()) else: self._consolidate_volume_trace_list( volume_id, volume_trace_dict, volume_key_value) for datadict in list(self.volume_trace_list): if volume_id in datadict: if not append: self.volume_trace_list.remove(datadict) return datadict except Exception as ex: LOG.warning("Exception in gather volume metadata. " "Exception is %s.", ex) return datadict def _fill_volume_trace_dict( self, volume_id, successful_operation, append, **kwargs): """Populates a dictionary with key value pairs :param volume_id: the unique volume id key :param successful_operation: the operation e.g "create" :param append: append flag :param kwargs: variable length argument list :returns: my_volume_trace_dict """ param_dict = locals() my_volume_trace_dict = {} for k, v in param_dict.items(): if self._param_condition(k, v): my_volume_trace_dict[k] = v if k == 'kwargs': for k2, v2 in v.items(): if self._param_condition(k2, v2): my_volume_trace_dict[k2] = v2 elif k2 == 'mv_list' and v2: for i, item in enumerate(v2, 1): my_volume_trace_dict["masking_view_%d" % i] = item elif k2 == 'sg_list' and v2: for i, item in enumerate(v2, 1): my_volume_trace_dict["storage_group_%d" % i] = item return my_volume_trace_dict def _param_condition(self, key, value): """Determines condition for inclusion. :param key: the key :param value: the value :returns: True or False """ exclude_list = ('self', 'append', 'mv_list', 'sg_list') return (value is not None and key not in exclude_list and not isinstance(value, (dict, types.FunctionType, type))) @debug_required def print_pretty_table(self, datadict): """Prints the data in the dict. :param datadict: the data dictionary """ rows = [] for k, v in datadict.items(): if v is not None: rows.append([k, v]) t = tabulate.tabulate(rows, headers=['Key', 'Value'], tablefmt='psql') LOG.debug('\n%s\n', t) def _consolidate_volume_trace_list( self, volume_id, volume_trace_dict, volume_key_value): """Consolidate data into self.volume_trace_list :param volume_id: the unique volume identifier :param volume_trace_dict: the existing dict :param volume_key_value: the volume id key and dict value """ is_merged = False for datadict in list(self.volume_trace_list): if volume_id in datadict: for key, dict_value in datadict.items(): merged_dict = ( self.utils.merge_dicts( volume_trace_dict, dict_value)) self.volume_trace_list.remove(datadict) volume_key_value[volume_id] = merged_dict self.volume_trace_list.append(volume_key_value.copy()) is_merged = True if not is_merged: self.volume_trace_list.append(volume_key_value.copy()) @debug_required def update_volume_info_metadata(self, datadict, version_dict): """Get update volume metadata with volume info :param datadict: volume info key value pairs :param version_dict: version dictionary :returns: volume_metadata """ return self.utils.merge_dicts( version_dict, *datadict.values()) @debug_required def capture_attach_info( self, volume, extra_specs, masking_view_dict, host, is_multipath, is_multiattach): """Captures attach info in volume metadata :param volume: the volume object :param extra_specs: extra specifications :param masking_view_dict: masking view dict :param host: host :param is_multipath: is mulitipath flag :param is_multiattach: is multi attach """ mv_list, sg_list = [], [] child_storage_group, parent_storage_group = None, None initiator_group, port_group = None, None child_storage_group_tag_list = None if is_multiattach: successful_operation = 'multi_attach' mv_list = masking_view_dict['mv_list'] sg_list = masking_view_dict['sg_list'] else: successful_operation = 'attach' child_storage_group = masking_view_dict[utils.SG_NAME] child_storage_group_tag_list = ( masking_view_dict.get(utils.TAG_LIST, None)) parent_storage_group = masking_view_dict[utils.PARENT_SG_NAME] initiator_group = masking_view_dict[utils.IG_NAME] port_group = masking_view_dict[utils.PORTGROUPNAME] sl, wl = self.utils.get_service_level_workload(extra_specs) datadict = self.gather_volume_info( volume.id, successful_operation, False, serial_number=extra_specs[utils.ARRAY], service_level=sl, workload=wl, srp=extra_specs[utils.SRP], masking_view=masking_view_dict[utils.MV_NAME], child_storage_group=child_storage_group, parent_storage_group=parent_storage_group, initiator_group=initiator_group, port_group=port_group, host=host, used_host_name=masking_view_dict[utils.USED_HOST_NAME], is_multipath=is_multipath, identifier_name=self.utils.get_volume_element_name(volume.id), openstack_name=volume.display_name, mv_list=mv_list, sg_list=sg_list, child_storage_group_tag_list=child_storage_group_tag_list, array_tag_list=masking_view_dict.get('array_tag_list', None)) volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(volume_metadata) @debug_required def capture_detach_info( self, volume, extra_specs, device_id, mv_list, sg_list): """Captures detach info in volume metadata :param volume: the volume object :param extra_specs: extra specifications :param device_id: masking view dict :param mv_list: masking view list :param sg_list: storage group list """ default_sg = self.utils.derive_default_sg_from_extra_specs(extra_specs) sl, wl = self.utils.get_service_level_workload(extra_specs) datadict = self.gather_volume_info( volume.id, 'detach', False, device_id=device_id, serial_number=extra_specs[utils.ARRAY], service_level=sl, workload=wl, srp=extra_specs[utils.SRP], default_sg_name=default_sg, identifier_name=self.utils.get_volume_element_name(volume.id), openstack_name=volume.display_name, mv_list=mv_list, sg_list=sg_list ) volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(volume_metadata) @debug_required def capture_extend_info( self, volume, new_size, device_id, extra_specs, array): """Capture extend info in volume metadata :param volume: the volume object :param new_size: new size :param device_id: device id :param extra_specs: extra specifications :param array: array serial number """ default_sg = self.utils.derive_default_sg_from_extra_specs(extra_specs) sl, wl = self.utils.get_service_level_workload(extra_specs) datadict = self.gather_volume_info( volume.id, 'extend', False, volume_size=new_size, device_id=device_id, default_sg_name=default_sg, serial_number=array, service_level=sl, workload=wl, srp=extra_specs[utils.SRP], identifier_name=self.utils.get_volume_element_name(volume.id), openstack_name=volume.display_name, is_compression_disabled=self.utils.is_compression_disabled( extra_specs)) volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(volume_metadata) @debug_required def capture_snapshot_info( self, source, extra_specs, successful_operation, snapshot_metadata): """Captures snapshot info in volume metadata :param source: the source volume object :param extra_specs: extra specifications :param successful_operation: snapshot operation :param snapshot_metadata: snapshot metadata """ last_ss_name, snapshot_label, source_device_id = None, None, None source_device_label, snap_ids, is_snap_id = None, None, None if isinstance(source, volume.Volume): if 'create' or 'manage' in successful_operation: snapshot_count = str(len(source.snapshots)) if snapshot_metadata: last_ss_name = snapshot_metadata.get('snap_display_name') snapshot_label = snapshot_metadata.get('SnapshotLabel') source_device_id = snapshot_metadata.get('SourceDeviceID') source_device_label = snapshot_metadata.get( 'SourceDeviceLabel') snap_ids = snapshot_metadata.get('SnapIdList') is_snap_id = snapshot_metadata.get('is_snap_id') else: snapshot_count = str(len(source.snapshots) - 1) default_sg = ( self.utils.derive_default_sg_from_extra_specs(extra_specs)) sl, wl = self.utils.get_service_level_workload(extra_specs) datadict = self.gather_volume_info( source.id, successful_operation, False, volume_size=source.size, default_sg_name=default_sg, serial_number=extra_specs[utils.ARRAY], service_level=sl, workload=wl, srp=extra_specs[utils.SRP], identifier_name=( self.utils.get_volume_element_name(source.id)), openstack_name=source.display_name, snapshot_count=snapshot_count, last_ss_name=last_ss_name, snapshot_label=snapshot_label, is_snap_id=is_snap_id, snap_ids_or_gens=snap_ids, source_device_id=source_device_id, source_device_label=source_device_label) volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(volume_metadata) @debug_required def capture_modify_group( self, group_name, group_id, add_vols, remove_volumes, array): """Captures group info after a modify operation :param group_name: group name :param group_id: group id :param add_vols: add volume list :param remove_volumes: remove volume list :param array: array serial number """ if not self.version_dict: self.version_dict = self.gather_version_info(array) for add_vol in add_vols: datadict = self.gather_volume_info( add_vol.id, 'addToGroup', True, group_name=group_name, group_id=group_id) add_volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(add_volume_metadata) for remove_volume in remove_volumes: datadict = self.gather_volume_info( remove_volume.id, 'removeFromGroup', True, group_name='Removed from %s' % group_name, group_id='Removed from %s' % group_id) remove_volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(remove_volume_metadata) @debug_required def capture_create_volume( self, device_id, volume, group_name, group_id, extra_specs, rep_info_dict, successful_operation, source_snapshot_id=None, source_device_id=None, temporary_snapvx=None, array_tag_list=None): """Captures create volume info in volume metadata :param device_id: device id :param volume: volume object :param group_name: group name :param group_id: group id :param extra_specs: additional info :param rep_info_dict: information gathered from replication :param successful_operation: the type of create operation :param source_snapshot_id: the source snapshot id :param temporary_snapvx: temporary snapVX :param array_tag_list: array tag list :returns: volume_metadata dict """ rdf_group_no, target_name, remote_array, target_device_id = ( None, None, None, None) rep_mode, replication_status, rdf_group_label, use_bias = ( None, None, None, None) target_array_model, backend_id = None, None if rep_info_dict: rdf_group_no = rep_info_dict.get('rdf_group_no') target_name = rep_info_dict.get('target_name') remote_array = rep_info_dict.get('remote_array') target_device_id = rep_info_dict.get('target_device_id') rep_mode = rep_info_dict.get('rep_mode') replication_status = rep_info_dict.get('replication_status') rdf_group_label = rep_info_dict.get('rdf_group_label') backend_id = rep_info_dict.get('backend_id') if utils.METROBIAS in extra_specs: use_bias = extra_specs[utils.METROBIAS] target_array_model = rep_info_dict.get('target_array_model') default_sg = self.utils.derive_default_sg_from_extra_specs( extra_specs, rep_mode) sl, wl = self.utils.get_service_level_workload(extra_specs) datadict = self.gather_volume_info( volume.id, successful_operation, True, volume_size=volume.size, device_id=device_id, default_sg_name=default_sg, serial_number=extra_specs[utils.ARRAY], service_level=sl, workload=wl, srp=extra_specs[utils.SRP], identifier_name=self.utils.get_volume_element_name(volume.id), openstack_name=volume.display_name, source_volid=volume.source_volid, group_name=group_name, group_id=group_id, rdf_group_no=rdf_group_no, backend_id=backend_id, target_name=target_name, remote_array=remote_array, target_device_id=target_device_id, source_snapshot_id=source_snapshot_id, rep_mode=rep_mode, replication_status=replication_status, rdf_group_label=rdf_group_label, use_bias=use_bias, is_compression_disabled=self.utils.is_compression_disabled( extra_specs), source_device_id=source_device_id, temporary_snapvx=temporary_snapvx, target_array_model=target_array_model, array_tag_list=array_tag_list) volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(volume_metadata) @debug_required def gather_replication_info( self, volume_id, successful_operation, append, **kwargs): """Gathers replication information :param volume_id: volume id :param successful_operation: the successful operation type :param append: boolean :param **kwargs: variable length of arguments :returns: rep_dict """ return self._fill_volume_trace_dict( volume_id, successful_operation, append, **kwargs) @debug_required def capture_failover_volume( self, volume, target_device, remote_array, rdf_group, device_id, array, extra_specs, failover, vol_grp_name, replication_status, rep_mode): """Captures failover info in volume metadata :param volume: volume object :param target_device: the device to failover to :param remote_array: the array to failover to :param rdf_group: the rdf group :param device_id: the device to failover from :param array: the array to failover from :param extra_specs: additional info :param failover: failover flag :param vol_grp_name: async group name :param replication_status: volume replication status :param rep_mode: replication mode """ operation = "Failover" if failover else "Failback" sl, wl = self.utils.get_service_level_workload(extra_specs) datadict = self.gather_volume_info( volume.id, operation, True, volume_size=volume.size, device_id=target_device, serial_number=remote_array, service_level=sl, workload=wl, srp=extra_specs[utils.SRP], identifier_name=self.utils.get_volume_element_name(volume.id), openstack_name=volume.display_name, source_volid=volume.source_volid, rdf_group_no=rdf_group, remote_array=array, target_device_id=device_id, vol_grp_name=vol_grp_name, replication_status=replication_status, rep_mode=rep_mode ) self.version_dict = ( self.gather_version_info(remote_array)) volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(volume_metadata) @debug_required def capture_manage_existing( self, volume, rep_info_dict, device_id, extra_specs): """Captures manage existing info in volume metadata :param volume: volume object :param rep_info_dict: information gathered from replication :param device_id: the PowerMax/VMAX device id :param extra_specs: the extra specs """ successful_operation = "manage_existing_volume" rdf_group_no, target_name, remote_array, target_device_id = ( None, None, None, None) rep_mode, replication_status, rdf_group_label, backend_id = ( None, None, None, None) if rep_info_dict: rdf_group_no = rep_info_dict.get('rdf_group_no') target_name = rep_info_dict.get('target_name') remote_array = rep_info_dict.get('remote_array') target_device_id = rep_info_dict.get('target_device_id') rep_mode = rep_info_dict.get('rep_mode') replication_status = rep_info_dict.get('replication_status') rdf_group_label = rep_info_dict.get('rdf_group_label') backend_id = rep_info_dict.get('backend_id') default_sg = self.utils.derive_default_sg_from_extra_specs( extra_specs, rep_mode) sl, wl = self.utils.get_service_level_workload(extra_specs) datadict = self.gather_volume_info( volume.id, successful_operation, True, volume_size=volume.size, device_id=device_id, default_sg_name=default_sg, serial_number=extra_specs[utils.ARRAY], service_level=sl, workload=wl, srp=extra_specs[utils.SRP], identifier_name=self.utils.get_volume_element_name(volume.id), openstack_name=volume.display_name, source_volid=volume.source_volid, rdf_group_no=rdf_group_no, backend_id=backend_id, target_name=target_name, remote_array=remote_array, target_device_id=target_device_id, rep_mode=rep_mode, replication_status=replication_status, rdf_group_label=rdf_group_label ) volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(volume_metadata) @debug_required def capture_retype_info( self, volume, device_id, array, srp, target_slo, target_workload, target_sg_name, is_rep_enabled, rep_mode, is_compression_disabled, target_backend_id): """Captures manage existing info in volume metadata :param volume_id: volume identifier :param volume_size: volume size :param device_id: the PowerMax/VMAX device id :param array: the PowerMax/VMAX serialnumber :param srp: PowerMax/VMAX SRP :param target_slo: volume name :param target_workload: the PowerMax/VMAX device id :param is_rep_enabled: replication enabled flag :param rep_mode: replication mode :param is_compression_disabled: compression disabled flag :param target_backend_id: target replication backend id """ successful_operation = "retype" if not target_slo: target_slo, target_workload = 'None', 'None' datadict = self.gather_volume_info( volume.id, successful_operation, False, volume_size=volume.size, device_id=device_id, target_sg_name=target_sg_name, serial_number=array, target_service_level=target_slo, target_workload=target_workload, srp=srp, identifier_name=self.utils.get_volume_element_name(volume.id), openstack_name=volume.display_name, is_rep_enabled=('yes' if is_rep_enabled else 'no'), backend_id=target_backend_id, rep_mode=rep_mode, is_compression_disabled=( True if is_compression_disabled else False)) if not is_rep_enabled: delete_list = ['rdf_group_no', 'rep_mode', 'target_array_model', 'service_level', 'remote_array', 'target_device_id', 'replication_status', 'rdf_group_label', 'backend_id'] self.utils.delete_values_from_dict(datadict, delete_list) update_list = [('default_sg_name', 'source_sg_name'), ('service_level', 'source_service_level')] self.utils.update_values_in_dict(datadict, update_list) volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(volume_metadata) @debug_required def capture_delete_info(self, volume): """Captures delete info in volume metadata :param volume: the volume object """ datadict = self.gather_volume_info( volume.id, 'delete', False, identifier_name=self.utils.get_volume_element_name(volume.id), openstack_name=volume.display_name) volume_metadata = self.update_volume_info_metadata( datadict, self.version_dict) self.print_pretty_table(volume_metadata) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/migrate.py0000664000175000017500000004360100000000000024734 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.dell_emc.powermax import masking from cinder.volume.drivers.dell_emc.powermax import provision from cinder.volume.drivers.dell_emc.powermax import utils LOG = logging.getLogger(__name__) class PowerMaxMigrate(object): """Upgrade class for Rest based PowerMax volume drivers. This upgrade class is for Dell EMC PowerMax volume drivers based on UniSphere Rest API. It supports VMAX 3 and VMAX All Flash and PowerMax arrays. """ def __init__(self, prtcl, rest, configuration): self.rest = rest self.utils = utils.PowerMaxUtils() self.masking = masking.PowerMaxMasking(prtcl, self.rest, configuration) self.provision = provision.PowerMaxProvision(self.rest, configuration) def do_migrate_if_candidate( self, array, srp, device_id, volume, connector): """Check and migrate if the volume is a candidate If the volume is in the legacy (SMIS) masking view structure move it to staging storage group within a staging masking view. :param array: array serial number :param srp: the SRP :param device_id: the volume device id :param volume: the volume object :param connector: the connector object """ mv_detail_list = list() masking_view_list, storage_group_list = ( self._get_mvs_and_sgs_from_volume( array, device_id)) for masking_view in masking_view_list: masking_view_dict = self.get_masking_view_component_dict( masking_view, srp) if masking_view_dict: mv_detail_list.append(masking_view_dict) if not mv_detail_list: return False if len(storage_group_list) != 1: LOG.warning("MIGRATE - The volume %(dev_id)s is not in one " "storage group as is expected for migration. " "The volume is in storage groups %(sg_list)s." "Migration will not proceed.", {'dev_id': device_id, 'sg_list': storage_group_list}) return False else: source_storage_group_name = storage_group_list[0] # Get the host that OpenStack has volume exposed to (it should only # be one host). os_host_list = self.get_volume_host_list(volume, connector) if len(os_host_list) != 1: LOG.warning("MIGRATE - OpenStack has recorded that " "%(dev_id)s is attached to hosts %(os_hosts)s " "and not 1 host as is expected. " "Migration will not proceed.", {'dev_id': device_id, 'os_hosts': os_host_list}) return False else: os_host_name = os_host_list[0] LOG.info("MIGRATE - Volume %(dev_id)s is a candidate for " "migration. The OpenStack host is %(os_host_name)s." "The volume is in storage group %(sg_name)s.", {'dev_id': device_id, 'os_host_name': os_host_name, 'sg_name': source_storage_group_name}) return self._perform_migration( array, device_id, mv_detail_list, source_storage_group_name, os_host_name) def _perform_migration( self, array, device_id, mv_detail_list, source_storage_group_name, os_host_name): """Perform steps so we can get the volume in a correct state. :param array: the storage array :param device_id: the device_id :param mv_detail_list: the masking view list :param source_storage_group_name: the source storage group :param os_host_name: the host the volume is exposed to :returns: boolean """ extra_specs = {utils.INTERVAL: 3, utils.RETRIES: 200} stg_sg_name = self._create_stg_storage_group_with_vol( array, os_host_name, extra_specs) if not stg_sg_name: # Throw an exception here exception_message = _("MIGRATE - Unable to create staging " "storage group.") LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) LOG.info("MIGRATE - Staging storage group %(stg_sg_name)s has " "been successfully created.", {'stg_sg_name': stg_sg_name}) new_stg_mvs = self._create_stg_masking_views( array, mv_detail_list, stg_sg_name, extra_specs) LOG.info("MIGRATE - Staging masking views %(new_stg_mvs)s have " "been successfully created.", {'new_stg_mvs': new_stg_mvs}) if not new_stg_mvs: exception_message = _("MIGRATE - Unable to create staging " "masking views.") LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) # Move volume from old storage group to new staging storage group self.move_volume_from_legacy_to_staging( array, device_id, source_storage_group_name, stg_sg_name, extra_specs) LOG.info("MIGRATE - Device id %(device_id)s has been successfully " "moved from %(src_sg)s to %(tgt_sg)s.", {'device_id': device_id, 'src_sg': source_storage_group_name, 'tgt_sg': stg_sg_name}) new_masking_view_list, new_storage_group_list = ( self._get_mvs_and_sgs_from_volume( array, device_id)) if len(new_storage_group_list) != 1: exception_message = (_( "MIGRATE - The current storage group list has %(list_len)d " "members. The list is %(sg_list)s. Will not proceed with " "cleanup. Please contact customer representative.") % { 'list_len': len(new_storage_group_list), 'sg_list': new_storage_group_list}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) else: current_storage_group_name = new_storage_group_list[0] if current_storage_group_name.lower() != stg_sg_name.lower(): exception_message = (_( "MIGRATE - The current storage group %(sg_1)s " "does not match %(sg_2)s. Will not proceed with " "cleanup. Please contact customer representative.") % { 'sg_1': current_storage_group_name, 'sg_2': stg_sg_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) if not self._delete_staging_masking_views( array, new_masking_view_list, os_host_name): exception_message = _("MIGRATE - Unable to delete staging masking " "views. Please contact customer " "representative.") LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) final_masking_view_list, final_storage_group_list = ( self._get_mvs_and_sgs_from_volume( array, device_id)) if len(final_masking_view_list) != 1: exception_message = (_( "MIGRATE - The final masking view list has %(list_len)d " "entries and not 1 entry as is expected. The list is " "%(mv_list)s. Please contact customer representative.") % { 'list_len': len(final_masking_view_list), 'sg_list': final_masking_view_list}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return True def move_volume_from_legacy_to_staging( self, array, device_id, source_storage_group_name, stg_sg_name, extra_specs): """Move the volume from legacy SG to staging SG :param array: array serial number :param device_id: the device id of the volume :param source_storage_group_name: the source storage group :param stg_sg_name: the target staging storage group :param extra_specs: the extra specs """ num_vol_in_sg = self.rest.get_num_vols_in_sg( array, source_storage_group_name) if num_vol_in_sg == 1: # Can't move last volume and leave masking view empty # so creating a holder volume temp_vol_size = '1' hold_vol_name = 'hold-' + str(uuid.uuid1()) self.provision.create_volume_from_sg( array, hold_vol_name, source_storage_group_name, temp_vol_size, extra_specs) LOG.info("MIGRATE - Volume %(vol)s has been created because " "there was only one volume remaining in storage group " "%(src_sg)s and we are attempting a move it to staging " "storage group %(tgt_sg)s.", {'vol': hold_vol_name, 'src_sg': source_storage_group_name, 'tgt_sg': stg_sg_name}) self.rest.move_volume_between_storage_groups( array, device_id, source_storage_group_name, stg_sg_name, extra_specs) def _delete_staging_masking_views( self, array, masking_view_list, os_host_name): """Delete the staging masking views Delete the staging masking views except the masking view exposed to the OpenStack compute :param array: array serial number :param masking_view_list: masking view namelist :param os_host_name: the host the volume is exposed to in OpenStack :returns: boolean """ delete_mv_list = list() safe_to_delete = False for masking_view_name in masking_view_list: if os_host_name in masking_view_name: safe_to_delete = True else: delete_mv_list.append(masking_view_name) if safe_to_delete: for delete_mv in delete_mv_list: self.rest.delete_masking_view(array, delete_mv) LOG.info("MIGRATE - Masking view %(delete_mv)s has been " "successfully deleted.", {'delete_mv': delete_mv}) return safe_to_delete def _create_stg_masking_views( self, array, mv_detail_list, stg_sg_name, extra_specs): """Create a staging masking views :param array: array serial number :param mv_detail_list: masking view detail list :param stg_sg_name: staging storage group name :param extra_specs: the extra specs :returns: masking view list """ new_masking_view_list = list() for mv_detail in mv_detail_list: host_name = mv_detail.get('host') masking_view_name = mv_detail.get('mv_name') masking_view_components = self.rest.get_masking_view( array, masking_view_name) # Create a staging masking view random_uuid = uuid.uuid1() staging_mv_name = 'STG-' + host_name + '-' + str( random_uuid) + '-MV' if masking_view_components: self.rest.create_masking_view( array, staging_mv_name, stg_sg_name, masking_view_components.get('portGroupId'), masking_view_components.get('hostId'), extra_specs) masking_view_dict = self.rest.get_masking_view( array, staging_mv_name) if masking_view_dict: new_masking_view_list.append(staging_mv_name) else: LOG.warning("Failed to create staging masking view " "%(mv_name)s. Migration cannot proceed.", {'mv_name': masking_view_name}) return None return new_masking_view_list def _create_stg_storage_group_with_vol(self, array, os_host_name, extra_specs): """Create a staging storage group and add volume :param array: array serial number :param os_host_name: the openstack host name :param extra_specs: the extra specs :returns: storage group name """ random_uuid = uuid.uuid1() # Create a staging SG stg_sg_name = 'STG-' + os_host_name + '-' + ( str(random_uuid) + '-SG') temp_vol_name = 'tempvol-' + str(random_uuid) temp_vol_size = '1' _stg_storage_group = self.provision.create_storage_group( array, stg_sg_name, None, None, None, extra_specs) if _stg_storage_group: self.provision.create_volume_from_sg( array, temp_vol_name, stg_sg_name, temp_vol_size, extra_specs) return stg_sg_name else: return None def _get_mvs_and_sgs_from_volume(self, array, device_id): """Given a device Id get its storage groups and masking views. :param array: array serial number :param device_id: the volume device id :returns: masking view list, storage group list """ final_masking_view_list = [] storage_group_list = self.rest.get_storage_groups_from_volume( array, device_id) for sg in storage_group_list: masking_view_list = self.rest.get_masking_views_from_storage_group( array, sg) final_masking_view_list.extend(masking_view_list) return final_masking_view_list, storage_group_list def get_masking_view_component_dict( self, masking_view_name, srp): """Get components from input string. :param masking_view_name: the masking view name -- str :param srp: the srp -- str :returns: object components -- dict """ regex_str_share = ( r'^(?POS)-(?P.+?)((?P' + srp + r')-' r'(?P.+?)-(?P.+?)|(?PNo_SLO))' r'((?P-I|-F)|)' r'(?P-CD|)(?P-RE|)' r'(?P-[0-9A-Fa-f]{8}|)' r'-(?PMV)$') object_dict = self.utils.get_object_components_and_correct_host( regex_str_share, masking_view_name) if object_dict: object_dict['mv_name'] = masking_view_name return object_dict def get_volume_host_list(self, volume, connector): """Get host list attachments from connector object :param volume: the volume object :param connector: the connector object :returns os_host_list """ os_host_list = list() if connector is not None: attachment_list = volume.volume_attachment LOG.debug("Volume attachment list: %(atl)s. " "Attachment type: %(at)s", {'atl': attachment_list, 'at': type(attachment_list)}) try: att_list = attachment_list.objects except AttributeError: att_list = attachment_list if att_list is not None: host_list = [att.connector['host'] for att in att_list if att is not None and att.connector is not None] for host_name in host_list: os_host_list.append(self.utils.get_host_short_name(host_name)) return os_host_list def cleanup_staging_objects( self, array, storage_group_names, extra_specs): """Delete the staging masking views and storage groups :param array: the array serial number :param storage_group_names: a list of storage group names :param extra_specs: the extra specs """ def _do_cleanup(sg_name, device_id): masking_view_list = ( self.rest.get_masking_views_from_storage_group( array, sg_name)) for masking_view in masking_view_list: if 'STG-' in masking_view: self.rest.delete_masking_view(array, masking_view) self.rest.remove_vol_from_sg( array, sg_name, device_id, extra_specs) self.rest.delete_volume(array, device_id) self.rest.delete_storage_group(array, sg_name) for storage_group_name in storage_group_names: if 'STG-' in storage_group_name: volume_list = self.rest.get_volumes_in_storage_group( array, storage_group_name) if len(volume_list) == 1: try: _do_cleanup(storage_group_name, volume_list[0]) except Exception: LOG.warning("MIGRATE - An attempt was made to " "cleanup after a legacy live migration, " "but it failed. You may choose to " "cleanup manually.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/nvme.py0000664000175000017500000003271000000000000024250 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import coordination from cinder.volume import driver from cinder.volume.drivers.san import san class PowerMaxNVMEBaseDriver(san.SanDriver, driver.BaseVD): driver_prefix = 'powermax' def ensure_export(self, context, volume): pass def create_export(self, context, volume, connector): pass def remove_export(self, context, volume): pass @staticmethod def check_for_export(context, volume_id): """Make sure volume is exported. :param context: the context :param volume_id: the volume id """ pass def check_for_setup_error(self): """Validate the Unisphere version.""" pass def create_volume(self, volume): """Creates a PowerMax volume. :param volume: the cinder volume object :returns: provider location dict """ return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. :param volume: the cinder volume object :param snapshot: the cinder snapshot object :returns: provider location dict """ return self.common.create_volume_from_snapshot( volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume. :param volume: the cinder volume object :param src_vref: the source volume reference :returns: provider location dict """ return self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): """Deletes a PowerMax volume. :param volume: the cinder volume object """ self.common.delete_volume(volume) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: the cinder snapshot object :returns: provider location dict """ src_volume = snapshot.volume return self.common.create_snapshot(snapshot, src_volume) def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: the cinder snapshot object """ src_volume = snapshot.volume self.common.delete_snapshot(snapshot, src_volume) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info.""" pass @coordination.synchronized('{self.driver_prefix}-{volume.id}') def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector. :param volume: the volume object :param connector: the connector object """ self.common.terminate_connection(volume, connector) def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: the cinder volume object :param new_size: the required new size """ self.common.extend_volume(volume, new_size) def manage_existing(self, volume, existing_ref): """Manages an existing PowerMax Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant. """ return self.common.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Return size of an existing PowerMax volume to manage_existing. :param self: reference to class :param volume: the volume object including the volume_type_id :param external_ref: reference to the existing volume :returns: size of the volume in GB """ return self.common.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): """Export PowerMax volume from Cinder. Leave the volume intact on the backend array. """ return self.common.unmanage(volume) def manage_existing_snapshot(self, snapshot, existing_ref): """Manage an existing PowerMax Snapshot (import to Cinder). Renames the Snapshot to prefix it with OS- to indicate it is managed by Cinder. :param snapshot: the snapshot object :param existing_ref: the snapshot name on the backend PowerMax :returns: model_update """ return self.common.manage_existing_snapshot(snapshot, existing_ref) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return the size of the source volume for manage-existing-snapshot. :param snapshot: the snapshot object :param existing_ref: the snapshot name on the backend PowerMax :returns: size of the source volume in GB """ return self.common.manage_existing_snapshot_get_size(snapshot) def unmanage_snapshot(self, snapshot): """Export PowerMax Snapshot from Cinder. Leaves the snapshot intact on the backend PowerMax. :param snapshot: the snapshot object """ self.common.unmanage_snapshot(snapshot) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """Lists all manageable volumes. :param cinder_volumes: List of currently managed Cinder volumes. Unused in driver. :param marker: Begin returning volumes that appear later in the volume list than that represented by this reference. :param limit: Maximum number of volumes to return. Default=1000. :param offset: Number of volumes to skip after marker. :param sort_keys: Results sort key. Valid keys: size, reference. :param sort_dirs: Results sort direction. Valid dirs: asc, desc. :returns: List of dicts containing all manageable volumes. """ return self.common.get_manageable_volumes(marker, limit, offset, sort_keys, sort_dirs) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """Lists all manageable snapshots. :param cinder_snapshots: List of currently managed Cinder snapshots. Unused in driver. :param marker: Begin returning volumes that appear later in the snapshot list than that represented by this reference. :param limit: Maximum number of snapshots to return. Default=1000. :param offset: Number of snapshots to skip after marker. :param sort_keys: Results sort key. Valid keys: size, reference. :param sort_dirs: Results sort direction. Valid dirs: asc, desc. :returns: List of dicts containing all manageable snapshots. """ return self.common.get_manageable_snapshots(marker, limit, offset, sort_keys, sort_dirs) def retype(self, context, volume, new_type, diff, host): """Migrate volume to another host using retype. :param context: context :param volume: the volume object including the volume_type_id :param new_type: the new volume type. :param diff: difference between old and new volume types. Unused in driver. :param host: the host dict holding the relevant target(destination) information :returns: boolean -- True if retype succeeded, False if error """ return self.common.retype(volume, new_type, host) def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover volumes to a secondary host/ backend. :param context: the context :param volumes: the list of volumes to be failed over :param secondary_id: the backend to be failed over to, is 'default' if fail back :param groups: replication groups :returns: secondary_id, volume_update_list, group_update_list """ active_backend_id, volume_update_list, group_update_list = ( self.common.failover(volumes, secondary_id, groups)) self.common.failover_completed(secondary_id, False) return active_backend_id, volume_update_list, group_update_list def failover(self, context, volumes, secondary_id=None, groups=None): """Like failover but for a host that is clustered.""" return self.common.failover(volumes, secondary_id, groups) def failover_completed(self, context, active_backend_id=None): """This method is called after failover for clustered backends.""" return self.common.failover_completed(active_backend_id, True) def create_group(self, context, group): """Creates a generic volume group. :param context: the context :param group: the group object :returns: model_update """ return self.common.create_group(context, group) def delete_group(self, context, group, volumes): """Deletes a generic volume group. :param context: the context :param group: the group object :param volumes: the member volumes """ return self.common.delete_group( context, group, volumes) def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group snapshot. :param context: the context :param group_snapshot: the group snapshot :param snapshots: snapshots list """ return self.common.create_group_snapshot(context, group_snapshot, snapshots) def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group snapshot. :param context: the context :param group_snapshot: the grouop snapshot :param snapshots: snapshots list """ return self.common.delete_group_snapshot(context, group_snapshot, snapshots) def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates LUNs in group. :param context: the context :param group: the group object :param add_volumes: flag for adding volumes :param remove_volumes: flag for removing volumes """ return self.common.update_group(group, add_volumes, remove_volumes) def create_group_from_src( self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates the volume group from source. :param context: the context :param group: the consistency group object to be created :param volumes: volumes in the group :param group_snapshot: the source volume group snapshot :param snapshots: snapshots of the source volumes :param source_group: the dictionary of a volume group as source. :param source_vols: a list of volume dictionaries in the source_group. """ return self.common.create_group_from_src( context, group, volumes, group_snapshot, snapshots, source_group, source_vols) def enable_replication(self, context, group, volumes): """Enable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ return self.common.enable_replication(context, group, volumes) def disable_replication(self, context, group, volumes): """Disable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ return self.common.disable_replication(context, group, volumes) def failover_replication(self, context, group, volumes, secondary_backend_id=None): """Failover replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :param secondary_backend_id: the secondary backend id - default None :returns: model_update, vol_model_updates """ return self.common.failover_replication( context, group, volumes, secondary_backend_id) def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot :param context: the context :param volume: the cinder volume object :param snapshot: the cinder snapshot object """ self.common.revert_to_snapshot(volume, snapshot) @classmethod def clean_volume_file_locks(cls, volume_id): coordination.synchronized_remove(f'{cls.driver_prefix}-{volume_id}') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/nvme_tcp.py0000664000175000017500000002227600000000000025124 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from os_brick.initiator.connectors.nvmeof import NVMeOFConnector from oslo_log import log as logging from cinder.common import constants as cinder_constants from cinder import coordination from cinder import exception from cinder.utils import get_root_helper from cinder.volume.drivers.dell_emc.powermax import common from cinder.volume.drivers.dell_emc.powermax import nvme from cinder.volume.drivers.dell_emc.powermax import utils LOG = logging.getLogger(__name__) U4P_100_VERSION = 100 class PowerMaxNVMETCPDriver(nvme.PowerMaxNVMEBaseDriver): """NVMe/TCP Drivers for PowerMax using Rest. Version history: .. code-block:: none 1.0.0 - Initial driver """ VERSION = "1.0.0" SUPPORTS_ACTIVE_ACTIVE = True # ThirdPartySystems wiki CI_WIKI_NAME = "DellEMC_PowerMAX_CI" driver_prefix = 'powermax' def __init__(self, *args, **kwargs): super(PowerMaxNVMETCPDriver, self).__init__(*args, **kwargs) self.active_backend_id = kwargs.get('active_backend_id', None) self.common = common.PowerMaxCommon( cinder_constants.NVMEOF_TCP, self.VERSION, configuration=self.configuration, active_backend_id=self.active_backend_id) self.performance = self.common.performance self.rest = self.common.rest self.nvme_connector = NVMeOFConnector(root_helper=get_root_helper()) @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'driver_ssl_cert_verify', 'max_over_subscription_ratio', 'reserved_percentage', 'replication_device') return common.powermax_opts + additional_opts def check_for_setup_error(self): """Validate the Unisphere version. This function checks the running and major versions of Unisphere retrieved from the REST API. If the versions are invalid or do not meet the minimum supported requirements, it logs appropriate warnings or errors and raises an exception. :raises InvalidConfigurationValue: If the Unisphere version does not meet the minimum requirements. """ running_version, major_version = self.rest.get_uni_version() array = self.configuration.safe_get('powermax_array') powermax_version = self.rest.get_vmax_model(array) LOG.info("Unisphere running version %(running_version)s and " "major version %(major_version)s", {'running_version': running_version, 'major_version': major_version}) LOG.info("PowerMax version %(version)s", {'version': powermax_version}) if not running_version or not major_version or not powermax_version: msg = ("Unable to validate Unisphere instance " "or PowerMax version.") LOG.error(msg) raise exception.InvalidConfigurationValue(message=msg) else: if (int(major_version) < int(U4P_100_VERSION) or (powermax_version.lower() != "powermax_2500" and powermax_version.lower() != "powermax_8500")): msg = (("Unisphere version %(running_version)s or " "PowerMax version %(version)s " "is not supported.") % {'running_version': running_version, 'version': powermax_version}) LOG.error(msg) raise exception.InvalidConfigurationValue(message=msg) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The nvme driver returns a driver_volume_type of 'nvmeof'. The target_nqn can be a single entry correspond to the one powermax array. Example return value: .. code-block:: default { 'driver_volume_type': 'nvmeof', 'data': { "portals": target_portals, "target_nqn": device_info['target_nqn'], "volume_nguid": device_nguid, "discard": True } } :param volume: the cinder volume object :param connector: the connector object :returns: dict -- the nvmeof dict """ device_info = self.common.initialize_connection( volume, connector) if device_info: return self._populate_data(device_info) return {} def _populate_data(self, device_info): """Populate NVMe over Fabrics (NVMe-oF) connection data for a device. This function retrieves the necessary NVMe-oF connection details for the specified device, including the target NQN, portals, and volume NGUID.If load balancing is enabled in the configuration, it attempts to select the optimal port based on performance metrics. If an error occurs during this process, it falls back to default target selection. :param device_info: Dictionary containing device information, including: - array: The storage array ID. - device_id: The device identifier. - ips: List of IP addresses. - maskingview: The masking view associated with the device. :return: A dictionary containing NVMe-oF connection details: - driver_volume_type: Always "nvmeof". - data: A dictionary with keys: - portals: List of target portals as tuples (IP, port, protocol). - target_nqn: The NVMe Qualified Name for the target. - volume_nguid: The globally unique identifier for the volume. - discard: Boolean indicating discard support. :raises VolumeBackendAPIException: If an error occurs during port performance analysis or target selection. """ device_nguid = self.rest.get_device_nguid(device_info['array'], device_info['device_id']) target_portals = [] ips = device_info['ips'] if self.performance.config.get('load_balance'): try: masking_view = device_info.get('maskingview') array_id = device_info.get('array') # Get PG from MV port_group = self.rest.get_element_from_masking_view( array_id, masking_view, portgroup=True) port_list = self.rest.get_port_ids(array_id, port_group) load, metric, port = self.performance.process_port_load( array_id, port_list) LOG.info("Lowest %(met)s load port for NVMe" " is %(port)s: %(load)s", {'met': metric, 'port': port, 'load': load}) port_details = self.rest.get_port(array_id, port) port_info = port_details.get('symmetrixPort') ips = port_info.get('ip_addresses') for ip in ips: (target_portals. append((ip, utils.POWERMAX_NVME_TCP_PORT, utils.POWERMAX_NVME_TRANSPORT_PROTOCOL_TCP))) except exception.VolumeBackendAPIException: LOG.error("There was an error calculating port load, " "reverting to default target selection.") for ip in ips: (target_portals. append((ip, utils.POWERMAX_NVME_TCP_PORT, utils.POWERMAX_NVME_TRANSPORT_PROTOCOL_TCP))) else: for ip in ips: (target_portals. append((ip, utils.POWERMAX_NVME_TCP_PORT, utils.POWERMAX_NVME_TRANSPORT_PROTOCOL_TCP))) target_nqn = (self.common.get_target_nqn(target_portals, self.nvme_connector)) return { "driver_volume_type": "nvmeof", "data": { "portals": target_portals, "target_nqn": target_nqn, "volume_nguid": device_nguid, "discard": True }, } def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats") data = self.common.update_volume_stats() data['storage_protocol'] = cinder_constants.NVMEOF_TCP data['driver_version'] = self.VERSION self._stats = data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/performance.py0000664000175000017500000004120000000000000025576 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heapq import heappop from heapq import heappush import time from oslo_log import log as logging from cinder.volume.drivers.dell_emc.powermax import utils LOG = logging.getLogger(__name__) class PowerMaxPerformance(object): """Performance Class for Dell EMC PowerMax volume drivers. It supports VMAX 3, All Flash and PowerMax arrays. """ def __init__(self, rest, performance_config): self.rest = rest self.config = performance_config def set_performance_configuration(self, array_id, cinder_conf): """Set the performance configuration if details present in cinder.conf. :param array_id: the array serial number -- str :param cinder_conf: cinder configuration options -- dict """ # Get performance registration, real-time registration, and collection # interval information for PowerMax array p_reg, rt_reg, c_int = self.get_array_registration_details(array_id) # Get load balance settings from cinder backend configuration lb_enabled = cinder_conf.safe_get(utils.LOAD_BALANCE) rt_enabled = cinder_conf.safe_get(utils.LOAD_BALANCE_RT) # Real-time if rt_enabled and not rt_reg: LOG.warning( "Real-time load balancing is enabled but array %(arr)s is not " "registered for real-time performance metrics collection. " "Diagnostic performance metrics will be used instead.", {'arr': array_id}) rt_enabled = False # Load balancing enabled but array not registered for perf metrics if (lb_enabled or rt_enabled) and not p_reg: LOG.warning( "Load balancing is enabled but array %(arr)s is not " "registered for performance metrics collection. Reverting to " "default random Port and Port Group selection", {'arr': array_id}) return {'load_balance': False} data_format = cinder_conf.safe_get(utils.PERF_DATA_FORMAT) if data_format.lower() not in ['average', 'avg', 'maximum', 'max']: LOG.warning("Incorrect data format '%(df)s', reverting to " "default value 'Average'.", {'df': data_format}) data_format = 'Average' if data_format.lower() in ['average', 'avg']: data_format = 'Average' elif data_format.lower() in ['maximum', 'max']: data_format = 'Maximum' # Get diagnostic metrics look back window lb_diagnostic = cinder_conf.safe_get(utils.LOAD_LOOKBACK) if not lb_diagnostic: LOG.warning( "Diagnostic look back window not set in cinder.conf, " "reverting to default value of 60 for most recent hour of " "metrics.") lb_diagnostic = 60 elif lb_diagnostic < 0 or lb_diagnostic > 1440: LOG.warning( "Diagnostic look back window '%(lb)s' is not within the " "minimum and maximum range 0-1440, reverting to default " "value of 60 for most recent hour of metrics.", { 'lb': lb_diagnostic}) lb_diagnostic = 60 # Get real-time metrics look back window lb_real_time = cinder_conf.safe_get(utils.LOAD_LOOKBACK_RT) if rt_enabled: if not lb_real_time: LOG.warning( "Real-time look back window not set in cinder.conf, " "reverting to default value of 1 for for most recent " "minute of metrics.") lb_real_time = 1 elif lb_real_time < 1 or lb_real_time > 60: LOG.warning( "Real-time look back window '%(lb)s' is not within the " "minimum and maximum range 1-60, reverting to default " "value of 1 for for most recent minute of metrics.", { 'lb': lb_real_time}) lb_real_time = 1 # Get Port Group metric for load calculation pg_metric = cinder_conf.safe_get(utils.PORT_GROUP_LOAD_METRIC) if not pg_metric: LOG.warning( "Port Group performance metric not set in cinder.conf, " "reverting to default metric 'PercentBusy'.") pg_metric = 'PercentBusy' elif pg_metric not in utils.PG_METRICS: LOG.warning( "Port Group performance metric selected for load " "balancing '%(pg_met)s' is not valid, reverting to " "default metric 'PercentBusy'.", { 'pg_met': pg_metric}) pg_metric = 'PercentBusy' # Get Port metric for load calculation port_metric = cinder_conf.safe_get(utils.PORT_LOAD_METRIC) valid_port_metrics = ( utils.PORT_RT_METRICS if rt_enabled else utils.PORT_METRICS) if not port_metric: LOG.warning( "Port performance metric not set in cinder.conf, " "reverting to default metric 'PercentBusy'.") port_metric = 'PercentBusy' elif port_metric not in valid_port_metrics: LOG.warning( "Port performance metric selected for load balancing " "'%(port_met)s' is not valid, reverting to default metric " "'PercentBusy'.", {'port_met': port_metric}) port_metric = 'PercentBusy' self.config = { 'load_balance': lb_enabled, 'load_balance_rt': rt_enabled, 'perf_registered': p_reg, 'rt_registered': rt_reg, 'collection_interval': c_int, 'data_format': data_format, 'look_back': lb_diagnostic, 'look_back_rt': lb_real_time, 'port_group_metric': pg_metric, 'port_metric': port_metric} def get_array_registration_details(self, array_id): """Get array performance registration details. :param array_id: the array serial number -- str :returns: performance registered, real-time registered, collection interval -- bool, bool, int """ LOG.info("Retrieving array %(arr)s performance registration details.", {'arr': array_id}) array_reg_uri = self.rest.build_uri( category=utils.PERFORMANCE, resource_level=utils.ARRAY_PERF, resource_type=utils.REG_DETAILS, resource_type_id=array_id, no_version=True) reg_details = self.rest.get_request( target_uri=array_reg_uri, resource_type='Array registration details') array_reg_info = reg_details.get(utils.REG_DETAILS_INFO)[0] perf_registered = array_reg_info.get(utils.DIAGNOSTIC) real_time_registered = array_reg_info.get(utils.REAL_TIME) collection_interval = array_reg_info.get(utils.COLLECTION_INT) return perf_registered, real_time_registered, collection_interval def get_array_performance_keys(self, array_id): """Get array performance keys (first and last available timestamps). :param array_id: the array serial number :returns: first date, last date -- int, int """ LOG.debug("Retrieving array %(arr)s performance keys.", {'arr': array_id}) array_keys_uri = self.rest.build_uri( category=utils.PERFORMANCE, resource_level=utils.ARRAY_PERF, resource_type=utils.KEYS, no_version=True) array_keys = self.rest.get_request( target_uri=array_keys_uri, resource_type='Array performance keys') env_symm_info = array_keys.get(utils.ARRAY_INFO) f_date, l_date = None, None for symm in env_symm_info: if symm.get(utils.SYMM_ID) == array_id: f_date, l_date = symm.get(utils.F_DATE), symm.get(utils.L_DATE) return f_date, l_date @staticmethod def _get_look_back_window_interval_timestamp(l_date, lb_window): """Get first date value when calculated from last date and window. :param l_date: the last (most recent) timestamp -- int :param lb_window: the look back window in minutes -- int :returns: the first timestamp -- int """ return l_date - (utils.ONE_MINUTE * lb_window) @staticmethod def _process_load(performance_data, metric): """Process the load for a given performance response, return average. :param performance_data: raw performance data from REST API -- dict :param metric: performance metric in use -- str :returns: range average, range total, interval count -- float, int, int """ data = performance_data.get(utils.RESULT_LIST) result = data.get(utils.RESULT) total = 0 for timestamp in result: total += timestamp.get(metric) return total / len(result), total, len(result) def _get_port_group_performance_stats( self, array_id, port_group_id, f_date, l_date, metric, data_format): """Get performance data for a given port group and performance metric. :param array_id: the array serial number -- str :param port_group_id: the port group id -- str :param f_date: first date for stats -- int :param l_date: last date for stats -- int :param metric: performance metric -- str :param data_format: performance data format -- str :returns: range average, range total, interval count -- float, float, int """ request_body = { utils.SYMM_ID: array_id, utils.PORT_GROUP_ID: port_group_id, utils.S_DATE: f_date, utils.E_DATE: l_date, utils.DATA_FORMAT: data_format, utils.METRICS: [metric]} port_group_uri = self.rest.build_uri( category=utils.PERFORMANCE, resource_level=utils.PORT_GROUP, resource_type=utils.METRICS, no_version=True) result = self.rest.post_request( port_group_uri, 'Port Group performance metrics', request_body) return self._process_load(result, metric) def _get_port_performance_stats( self, array_id, director_id, port_id, f_date, l_date, metric, data_format=None, real_time=False): """Get performance data for a given port and performance metric. :param array_id: the array serial number -- str :param director_id: the director id -- str :param port_id: the port id -- str :param f_date: first date for stats -- int :param l_date: last date for stats -- int :param metric: performance metric -- str :param data_format: performance data format -- str :param real_time: if metrics are real-time -- bool :returns: range average, range total, interval count -- float, float, int """ if real_time: target_uri = self.rest.build_uri( category=utils.PERFORMANCE, resource_level=utils.REAL_TIME, resource_type=utils.METRICS, no_version=True) res_type = 'real-time' dir_port = ('%(dir)s:%(port)s' % {'dir': director_id, 'port': port_id}) request_body = { utils.SYMM_ID: array_id, utils.INST_ID: dir_port, utils.S_DATE: f_date, utils.E_DATE: l_date, utils.CAT: utils.FE_PORT_RT, utils.METRICS: [metric]} else: target_uri = self.rest.build_uri( category=utils.PERFORMANCE, resource_level=utils.FE_PORT_DIAG, resource_type=utils.METRICS, no_version=True) res_type = 'diagnostic' request_body = { utils.SYMM_ID: array_id, utils.DIR_ID: director_id, utils.PORT_ID: port_id, utils.S_DATE: f_date, utils.E_DATE: l_date, utils.DATA_FORMAT: data_format, utils.METRICS: [metric]} resource = '%(res)s Port performance metrics' % {'res': res_type} result = self.rest.post_request( target_uri, resource, request_body) return self._process_load(result, metric) def process_port_group_load( self, array_id, port_groups, max_load=False): """Calculate the load for one or more port groups. :param array_id: the array serial number -- str :param port_groups: port group names -- list :param max_load: if max load port group should be returned -- bool :returns: low/max avg, metric, port group -- tuple(float, str, str) """ LOG.info("Calculating array %(arr)s load for Port Groups %(pg)s.", {'arr': array_id, 'pg': port_groups}) data_format = self.config.get('data_format') lb_window = self.config.get('look_back') pg_metric = self.config.get('port_group_metric') __, l_date = self.get_array_performance_keys(array_id) f_date = self._get_look_back_window_interval_timestamp( l_date, lb_window) heap_low, heap_high = [], [] start_time = time.time() for pg in port_groups: avg, total, cnt = self._get_port_group_performance_stats( array_id, pg, f_date, l_date, pg_metric, data_format) LOG.debug( "Port Group '%(pg)s' %(df)s %(met)s load for %(interval)s min " "interval: %(avg)s", {'pg': pg, 'df': data_format, 'met': pg_metric, 'interval': lb_window, 'avg': avg}) # Add PG average to lowest load heap heappush(heap_low, (avg, pg_metric, pg)) # Add inverse PG average to highest load heap heappush(heap_high, (-avg, pg_metric, pg)) LOG.debug("Time taken to analyse Port Group performance: %(t)ss", {'t': time.time() - start_time}) return heappop(heap_high) if max_load else heappop(heap_low) def process_port_load(self, array_id, ports, max_load=False): """Calculate the load for one or more ports. :param array_id: the array serial number -- str :param ports: physical dir:port names -- list :param max_load: if max load port should be returned -- bool :returns: low/max avg, metric, port -- tuple(float, str, str) """ LOG.info("Calculating array %(arr)s load for Ports %(port)s.", {'arr': array_id, 'port': ports}) rt_enabled = self.config.get('load_balance_rt') rt_registered = self.config.get('rt_registered') if rt_enabled and rt_registered: real_time, data_format = True, None lb_window = self.config.get('look_back_rt') else: real_time, data_format = False, self.config.get('data_format') lb_window = self.config.get('look_back') port_metric = self.config.get('port_metric') __, l_date = self.get_array_performance_keys(array_id) f_date = self._get_look_back_window_interval_timestamp( l_date, lb_window) heap_low, heap_high = [], [] start_time = time.time() for port in ports: dir_id = port.split(':')[0] port_no = port.split(':')[1] avg, total, cnt = self._get_port_performance_stats( array_id, dir_id, port_no, f_date, l_date, port_metric, data_format, real_time=real_time) LOG.debug( "Port '%(dir)s:%(port)s' %(df)s %(met)s load for %(int)s min " "interval: %(avg)s", {'dir': dir_id, 'port': port_no, 'df': data_format if data_format else '', 'met': port_metric, 'int': lb_window, 'avg': avg}) # Add PG average to lowest load heap heappush(heap_low, (avg, port_metric, port)) # Add inverse PG average to highest load heap heappush(heap_high, (-avg, port_metric, port)) LOG.debug("Time taken to analyse Port Group performance: %(t)ss", {'t': time.time() - start_time}) return heappop(heap_high) if max_load else heappop(heap_low) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/provision.py0000664000175000017500000010314000000000000025327 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.dell_emc.powermax import utils LOG = logging.getLogger(__name__) WRITE_DISABLED = "Write Disabled" UNLINK_INTERVAL = 15 UNLINK_RETRIES = 30 class PowerMaxProvision(object): """Provisioning Class for Dell EMC PowerMax volume drivers. It supports VMAX 3, All Flash and PowerMax arrays. """ def __init__(self, rest, configuration): self.utils = utils.PowerMaxUtils() self.rest = rest self.snapvx_unlink_symforce = configuration.safe_get( utils.SNAPVX_UNLINK_SYMFORCE) or False def create_storage_group( self, array, storagegroup_name, srp, slo, workload, extra_specs, do_disable_compression=False): """Create a new storage group. :param array: the array serial number :param storagegroup_name: the group name (String) :param srp: the SRP (String) :param slo: the SLO (String) :param workload: the workload (String) :param extra_specs: additional info :param do_disable_compression: disable compression flag :returns: storagegroup - storage group object """ start_time = time.time() @coordination.synchronized("emc-sg-{storage_group}-{array}") def do_create_storage_group(storage_group, array): # Check if storage group has been recently created storagegroup = self.rest.get_storage_group( array, storagegroup_name) if storagegroup is None: storagegroup = self.rest.create_storage_group( array, storage_group, srp, slo, workload, extra_specs, do_disable_compression) LOG.debug("Create storage group took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) LOG.info("Storage group %(sg)s created successfully.", {'sg': storagegroup_name}) else: LOG.info("Storage group %(sg)s already exists.", {'sg': storagegroup_name}) return storagegroup return do_create_storage_group(storagegroup_name, array) def create_volume_from_sg(self, array, volume_name, storagegroup_name, volume_size, extra_specs, rep_info=None): """Create a new volume in the given storage group. :param array: the array serial number :param volume_name: the volume name -- string :param storagegroup_name: the storage group name :param volume_size: volume size -- string :param extra_specs: extra specifications :param rep_info: replication session info dict -- optional :returns: volume info -- dict """ @coordination.synchronized("emc-sg-{storage_group}-{array}") def do_create_volume_from_sg(storage_group, array): start_time = time.time() if rep_info and rep_info.get('initial_device_list', False): local_device_list = self.rest.get_volume_list( extra_specs['array'], {'storageGroupId': storagegroup_name}) rep_info['initial_device_list'] = local_device_list volume_dict = self.rest.create_volume_from_sg( array, volume_name, storage_group, volume_size, extra_specs, rep_info) LOG.debug("Create volume from storage group " "took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) return volume_dict return do_create_volume_from_sg(storagegroup_name, array) def delete_volume_from_srp(self, array, device_id, volume_name): """Delete a volume from the srp. :param array: the array serial number :param device_id: the volume device id :param volume_name: the volume name """ start_time = time.time() LOG.debug("Delete volume %(volume_name)s with device id %(dev)s " "from srp.", {'volume_name': volume_name, 'dev': device_id}) self.rest.delete_volume(array, device_id) LOG.debug("Delete volume took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta( start_time, time.time())}) def create_volume_snapvx(self, array, source_device_id, snap_name, extra_specs, ttl=0): """Create a snapVx of a volume. :param array: the array serial number :param source_device_id: source volume device id :param snap_name: the snapshot name :param extra_specs: the extra specifications :param ttl: time to live in hours, defaults to 0 """ @coordination.synchronized("emc-snapvx-{src_device_id}") def do_create_volume_snap(src_device_id): start_time = time.time() LOG.debug("Create Snap Vx snapshot of: %(source)s.", {'source': src_device_id}) self.rest.create_volume_snap( array, snap_name, src_device_id, extra_specs, ttl) LOG.debug("Create volume snapVx took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) do_create_volume_snap(source_device_id) def create_volume_replica( self, array, source_device_id, target_device_id, snap_name, extra_specs, create_snap=False, copy_mode=False): """Create a snap vx of a source and copy to a target. :param array: the array serial number :param source_device_id: source volume device id :param target_device_id: target volume device id :param snap_name: the name for the snap shot :param extra_specs: extra specifications :param create_snap: Flag for create snapvx :param copy_mode: If copy mode should be used for SnapVX target links """ start_time = time.time() if create_snap: # We are creating a temporary snapshot. Specify a ttl of 1 hour self.create_volume_snapvx(array, source_device_id, snap_name, extra_specs, ttl=1) # Link source to target @coordination.synchronized("emc-snapvx-{src_device_id}") def do_modify_volume_snap(src_device_id): self.rest.modify_volume_snap( array, src_device_id, target_device_id, snap_name, extra_specs, link=True, copy=copy_mode) do_modify_volume_snap(source_device_id) LOG.debug("Create element replica took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) def _is_symforce_enabled(self, defined): return self.snapvx_unlink_symforce and (not defined) def unlink_snapvx_tgt_volume( self, array, target_device_id, source_device_id, snap_name, extra_specs, snap_id, loop=True): """Unlink a snapshot from its target volume. :param array: the array serial number :param source_device_id: source volume device id :param target_device_id: target volume device id :param snap_name: the name for the snap shot :param extra_specs: extra specifications :param snap_id: the unique snap id of the SnapVX :param loop: if looping call is required for handling retries """ @coordination.synchronized("emc-snapvx-{src_device_id}") def do_unlink_volume(src_device_id, symforce=False): LOG.debug("Break snap vx link relationship between: %(src)s " "and: %(tgt)s.", {'src': src_device_id, 'tgt': target_device_id}) self._unlink_volume(array, src_device_id, target_device_id, snap_name, extra_specs, snap_id=snap_id, list_volume_pairs=None, loop=loop, symforce=symforce) # Get the link defined status defined = True linked_list = self.rest.get_snap_linked_device_list( array, source_device_id, snap_name, snap_id) for link in linked_list: if target_device_id == link['targetDevice']: if not link['defined']: defined = False break LOG.debug("The link defined status: %s", defined) do_unlink_volume( source_device_id, symforce=self._is_symforce_enabled(defined)) def _unlink_volume( self, array, source_device_id, target_device_id, snap_name, extra_specs, snap_id=None, list_volume_pairs=None, loop=True, symforce=False): """Unlink a target volume from its source volume. :param array: the array serial number :param source_device_id: the source device id :param target_device_id: the target device id :param snap_name: the snap name :param extra_specs: extra specifications :param snap_id: the unique snap id of the SnapVX :param list_volume_pairs: list of volume pairs, optional :param loop: if looping call is required for handling retries :param symforce: if symforce is enabled :returns: return code """ def _unlink_vol(): """Called at an interval until the synchronization is finished. :raises: loopingcall.LoopingCallDone """ retries = kwargs['retries'] try: kwargs['retries'] = retries + 1 if not kwargs['modify_vol_success']: self.rest.modify_volume_snap( array, source_device_id, target_device_id, snap_name, extra_specs, snap_id=snap_id, unlink=True, symforce=symforce, list_volume_pairs=list_volume_pairs) kwargs['modify_vol_success'] = True except exception.VolumeBackendAPIException: pass if kwargs['retries'] > UNLINK_RETRIES: LOG.error("_unlink_volume failed after %(retries)d " "tries.", {'retries': retries}) raise loopingcall.LoopingCallDone(retvalue=30) if kwargs['modify_vol_success']: raise loopingcall.LoopingCallDone() if not loop: self.rest.modify_volume_snap( array, source_device_id, target_device_id, snap_name, extra_specs, snap_id=snap_id, unlink=True, symforce=symforce, list_volume_pairs=list_volume_pairs) else: kwargs = {'retries': 0, 'modify_vol_success': False} timer = loopingcall.FixedIntervalLoopingCall(_unlink_vol) rc = timer.start(interval=UNLINK_INTERVAL).wait() return rc def delete_volume_snap(self, array, snap_name, source_device_ids, snap_id=None, restored=False): """Delete a snapVx snapshot of a volume. :param array: the array serial number :param snap_name: the snapshot name :param source_device_ids: the source device ids :param snap_id: the unique snap id of the SnapVX :param restored: Flag to indicate if restored session is being deleted """ @coordination.synchronized("emc-snapvx-{src_device_id}") def do_delete_volume_snap(src_device_id): LOG.debug("Delete SnapVx: %(snap_name)s for source %(src)s and " "devices %(devs)s.", {'snap_name': snap_name, 'src': src_device_id, 'devs': source_device_ids}) self.rest.delete_volume_snap( array, snap_name, source_device_ids, snap_id=snap_id, restored=restored) device_id = source_device_ids[0] if isinstance( source_device_ids, list) else source_device_ids if snap_id is None: snap_id = self.rest.get_snap_id(array, device_id, snap_name) do_delete_volume_snap(device_id) def is_restore_complete(self, array, source_device_id, snap_name, snap_id, extra_specs): """Check and wait for a restore to complete :param array: the array serial number :param source_device_id: source device id :param snap_name: snapshot name :param snap_id: unique snap id :param extra_specs: extra specification :returns: bool """ def _wait_for_restore(): """Called at an interval until the restore is finished. :raises: loopingcall.LoopingCallDone :raises: VolumeBackendAPIException """ retries = kwargs['retries'] try: kwargs['retries'] = retries + 1 if not kwargs['wait_for_restore_called']: if self._is_restore_complete( array, source_device_id, snap_name, snap_id): kwargs['wait_for_restore_called'] = True except Exception: exception_message = (_("Issue encountered waiting for " "restore.")) LOG.exception(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) if kwargs['wait_for_restore_called']: raise loopingcall.LoopingCallDone() if kwargs['retries'] > int(extra_specs[utils.RETRIES]): LOG.error("_wait_for_restore failed after %(retries)d " "tries.", {'retries': retries}) raise loopingcall.LoopingCallDone( retvalue=int(extra_specs[utils.RETRIES])) kwargs = {'retries': 0, 'wait_for_restore_called': False} timer = loopingcall.FixedIntervalLoopingCall(_wait_for_restore) rc = timer.start(interval=int(extra_specs[utils.INTERVAL])).wait() return rc def _is_restore_complete( self, array, source_device_id, snap_name, snap_id): """Helper function to check if restore is complete. :param array: the array serial number :param source_device_id: source device id :param snap_name: the snapshot name :param snap_id: unique snap id :returns: restored -- bool """ restored = False snap_details = self.rest.get_volume_snap( array, source_device_id, snap_name, snap_id) if snap_details: linked_devices = snap_details.get("linkedDevices", []) for linked_device in linked_devices: if ('targetDevice' in linked_device and source_device_id == linked_device['targetDevice']): if ('state' in linked_device and linked_device['state'] == "Restored"): restored = True return restored def delete_temp_volume_snap(self, array, snap_name, source_device_id, snap_id): """Delete the temporary snapshot created for clone operations. There can be instances where the source and target both attempt to delete a temp snapshot simultaneously, so we must lock the snap and then double check it is on the array. :param array: the array serial number :param snap_name: the snapshot name :param source_device_id: the source device id :param snap_id: the unique snap id of the SnapVX """ snapvx = self.rest.get_volume_snap( array, source_device_id, snap_name, snap_id) if snapvx: self.delete_volume_snap( array, snap_name, source_device_id, snap_id=snap_id, restored=False) def delete_volume_snap_check_for_links( self, array, snap_name, source_devices, extra_specs, snap_id): """Check if a snap has any links before deletion. If a snapshot has any links, break the replication relationship before deletion. :param array: the array serial number :param snap_name: the snapshot name :param source_devices: the source device ids :param extra_specs: the extra specifications :param snap_id: the unique snap id of the SnapVX """ list_device_pairs = [] list_device_pairs_defined = True if not isinstance(source_devices, list): source_devices = [source_devices] for source_device in source_devices: LOG.debug("Check for linked devices to SnapVx: %(snap_name)s " "for volume %(vol)s.", {'vol': source_device, 'snap_name': snap_name}) linked_list = self.rest.get_snap_linked_device_list( array, source_device, snap_name, snap_id) if len(linked_list) == 1: target_device = linked_list[0]['targetDevice'] list_device_pairs.append((source_device, target_device)) if not linked_list[0]['defined']: list_device_pairs_defined = False else: for link in linked_list: # If a single source volume has multiple targets, # we must unlink each target individually target_device = link['targetDevice'] self._unlink_volume( array, source_device, target_device, snap_name, extra_specs, snap_id=snap_id, symforce=self._is_symforce_enabled(link['defined'])) if list_device_pairs: self._unlink_volume( array, "", "", snap_name, extra_specs, snap_id=snap_id, list_volume_pairs=list_device_pairs, symforce=self._is_symforce_enabled(list_device_pairs_defined)) if source_devices: self.delete_volume_snap( array, snap_name, source_devices, snap_id, restored=False) def extend_volume(self, array, device_id, new_size, extra_specs, rdf_group=None): """Extend a volume. :param array: the array serial number :param device_id: the volume device id :param new_size: the new size (GB) :param extra_specs: the extra specifications :param rdf_group: the rdf group number, if required :returns: status_code """ start_time = time.time() if rdf_group: @coordination.synchronized('emc-rg-{rdf_group}') def _extend_replicated_volume(rdf_group): self.rest.extend_volume(array, device_id, new_size, extra_specs, rdf_group) _extend_replicated_volume(rdf_group) else: self.rest.extend_volume(array, device_id, new_size, extra_specs) LOG.debug("Extend PowerMax/VMAX volume took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) def get_srp_pool_stats(self, array, array_info): """Get the srp capacity stats. :param array: the array serial number :param array_info: the array dict :returns: total_capacity_gb :returns: remaining_capacity_gb :returns: subscribed_capacity_gb :returns: array_reserve_percent """ total_capacity_gb = 0 remaining_capacity_gb = 0 subscribed_capacity_gb = 0 array_reserve_percent = 0 srp = array_info['srpName'] LOG.debug( "Retrieving capacity for srp %(srpName)s on array %(array)s.", {'srpName': srp, 'array': array}) srp_details = self.rest.get_srp_by_name(array, srp) if not srp_details: LOG.error("Unable to retrieve srp instance of %(srpName)s on " "array %(array)s.", {'srpName': srp, 'array': array}) return 0, 0, 0, 0 try: srp_capacity = srp_details['srp_capacity'] total_capacity_gb = srp_capacity['usable_total_tb'] * units.Ki try: used_capacity_gb = srp_capacity['usable_used_tb'] * units.Ki remaining_capacity_gb = float( total_capacity_gb - used_capacity_gb) except KeyError: LOG.error("Unable to retrieve remaining_capacity_gb.") subscribed_capacity_gb = ( srp_capacity['subscribed_total_tb'] * units.Ki) array_reserve_percent = srp_details['reserved_cap_percent'] except KeyError: try: srp_capacity = srp_details['fba_srp_capacity'] effective_capacity = srp_capacity['effective'] total_capacity_gb = effective_capacity['total_tb'] * units.Ki remaining_capacity_gb = ( effective_capacity['free_tb'] * units.Ki) array_reserve_percent = srp_details['reserved_cap_percent'] subscribed_capacity_gb = ( effective_capacity['used_tb'] * units.Ki) except KeyError: pass return (total_capacity_gb, remaining_capacity_gb, subscribed_capacity_gb, array_reserve_percent) def verify_slo_workload( self, array, slo, workload, is_next_gen=None, array_model=None): """Check if SLO and workload values are valid. :param array: the array serial number :param slo: Service Level Object e.g bronze :param workload: workload e.g DSS :param is_next_gen: can be None :returns: boolean """ is_valid_slo, is_valid_workload = False, False if workload and workload.lower() == 'none': workload = None if not workload: is_valid_workload = True if slo and slo.lower() == 'none': slo = None if is_next_gen or is_next_gen is None: array_model, is_next_gen = self.rest.get_array_model_info( array) valid_slos = self.rest.get_slo_list(array, is_next_gen, array_model) valid_workloads = self.rest.get_workload_settings(array, is_next_gen) for valid_slo in valid_slos: if slo == valid_slo: is_valid_slo = True break for valid_workload in valid_workloads: if workload == valid_workload: is_valid_workload = True break if not slo: is_valid_slo = True if workload: is_valid_workload = False if not is_valid_slo: LOG.error( "SLO: %(slo)s is not valid. Valid values are: " "%(valid_slos)s.", {'slo': slo, 'valid_slos': valid_slos}) if not is_valid_workload: LOG.warning( "Workload: %(workload)s is not valid. Valid values are " "%(valid_workloads)s. Note you cannot " "set a workload without an SLO.", {'workload': workload, 'valid_workloads': valid_workloads}) return is_valid_slo, is_valid_workload def get_slo_workload_settings_from_storage_group( self, array, sg_name): """Get slo and workload settings from a storage group. :param array: the array serial number :param sg_name: the storage group name :returns: storage group slo settings """ slo = 'NONE' workload = 'NONE' storage_group = self.rest.get_storage_group(array, sg_name) if storage_group: try: slo = storage_group['slo'] workload = 'NONE' if self.rest.is_next_gen_array(array) else ( storage_group['workload']) except KeyError: pass else: exception_message = (_( "Could not retrieve storage group %(sg_name)s. ") % {'sg_name': sg_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload} @coordination.synchronized('emc-rg-{rdf_group}') def break_rdf_relationship(self, array, device_id, sg_name, rdf_group, rep_extra_specs, state): """Break the rdf relationship between a pair of devices. Resuming replication after suspending is necessary where this function is called from. Doing so in here will disrupt the ability to perform further actions on the RDFG without suspending again. :param array: the array serial number :param device_id: the source device id :param sg_name: storage group :param rdf_group: the rdf group number :param rep_extra_specs: replication extra specs :param state: the state of the rdf pair """ LOG.info("Suspending RDF group %(rdf)s to delete source device " "%(dev)s RDF pair.", {'rdf': rdf_group, 'dev': device_id}) if state.lower() == utils.RDF_SYNCINPROG_STATE: self.rest.wait_for_rdf_pair_sync( array, rdf_group, device_id, rep_extra_specs) if state.lower() != utils.RDF_SUSPENDED_STATE: self.rest.srdf_suspend_replication( array, sg_name, rdf_group, rep_extra_specs) self.rest.srdf_delete_device_pair(array, rdf_group, device_id) def get_or_create_volume_group(self, array, group, extra_specs): """Get or create a volume group. Sometimes it may be necessary to recreate a volume group on the backend - for example, when the last member volume has been removed from the group, but the cinder group object has not been deleted. :param array: the array serial number :param group: the group object :param extra_specs: the extra specifications :returns: group name """ vol_grp_name = self.utils.update_volume_group_name(group) return self.get_or_create_group(array, vol_grp_name, extra_specs) def get_or_create_group(self, array, group_name, extra_specs): """Get or create a generic volume group. :param array: the array serial number :param group_name: the group name :param extra_specs: the extra specifications :returns: group name """ storage_group = self.rest.get_storage_group(array, group_name) if not storage_group: self.create_volume_group(array, group_name, extra_specs) return group_name def create_volume_group(self, array, group_name, extra_specs): """Create a generic volume group. :param array: the array serial number :param group_name: the name of the group :param extra_specs: the extra specifications :returns: volume_group """ return self.create_storage_group(array, group_name, None, None, None, extra_specs) def create_group_replica( self, array, source_group, snap_name, extra_specs): """Create a replica (snapVx) of a volume group. :param array: the array serial number :param source_group: the source group name :param snap_name: the name for the snap shot :param extra_specs: extra specifications """ LOG.debug("Creating Snap Vx snapshot of storage group: %(srcGroup)s.", {'srcGroup': source_group}) # Create snapshot self.rest.create_storagegroup_snap( array, source_group, snap_name, extra_specs) def delete_group_replica(self, array, snap_name, source_group_name): """Delete the snapshot. :param array: the array serial number :param snap_name: the name for the snap shot :param source_group_name: the source group name """ LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s " "snapshot: %(snap_name)s.", {'srcGroup': source_group_name, 'snap_name': snap_name}) snap_id_list = self.rest.get_storage_group_snap_id_list( array, source_group_name, snap_name) if snap_id_list: if not self.rest.is_snap_id: snap_id_list.sort(reverse=True) for snap_id in snap_id_list: self.rest.delete_storagegroup_snap( array, source_group_name, snap_name, snap_id, force=True) else: LOG.debug("Unable to get snap ids for: %(srcGroup)s.", {'srcGroup': source_group_name}) def link_and_break_replica(self, array, source_group_name, target_group_name, snap_name, extra_specs, list_volume_pairs, delete_snapshot=False, snap_id=None): """Links a group snap and breaks the relationship. :param array: the array serial :param source_group_name: the source group name :param target_group_name: the target group name :param snap_name: the snapshot name :param extra_specs: extra specifications :param list_volume_pairs: the list of volume pairs :param delete_snapshot: delete snapshot flag :param snap_id: the unique snapVx identifier """ LOG.debug("Linking Snap Vx snapshot: source group: %(srcGroup)s " "targetGroup: %(tgtGroup)s.", {'srcGroup': source_group_name, 'tgtGroup': target_group_name}) # Link the snapshot self.rest.modify_volume_snap( array, None, None, snap_name, extra_specs, snap_id=snap_id, link=True, list_volume_pairs=list_volume_pairs) # Get the link defined status defined = True for src, dst in list_volume_pairs: linked_list = self.rest.get_snap_linked_device_list( array, src, snap_name, snap_id) if not linked_list[0]['defined']: defined = False break # Unlink the snapshot LOG.debug("Unlinking Snap Vx snapshot: source group: %(srcGroup)s " "targetGroup: %(tgtGroup)s.", {'srcGroup': source_group_name, 'tgtGroup': target_group_name}) self._unlink_volume( array, None, None, snap_name, extra_specs, snap_id=snap_id, list_volume_pairs=list_volume_pairs, symforce=self._is_symforce_enabled(defined)) # Delete the snapshot if necessary if delete_snapshot: LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s " "snapshot: %(snap_name)s.", {'srcGroup': source_group_name, 'snap_name': snap_name}) source_devices = [a for a, b in list_volume_pairs] self.delete_volume_snap(array, snap_name, source_devices) def revert_volume_snapshot(self, array, source_device_id, snap_name, snap_id, extra_specs): """Revert a volume snapshot :param array: the array serial number :param source_device_id: device id of the source :param snap_name: snapvx snapshot name :param snap_id: the unique snap identifier :param extra_specs: the extra specifications """ start_time = time.time() try: self.rest.modify_volume_snap( array, source_device_id, "", snap_name, extra_specs, snap_id=snap_id, restore=True) except exception.VolumeBackendAPIException as ex: if utils.REVERT_SS_EXC in ex.message: exception_message = _( "Link must be fully copied for this operation to proceed. " "Please reset the volume state from error to available " "and wait for awhile before attempting another " "revert to snapshot operation. You may want to delete " "the latest snapshot taken in this revert to snapshot " "operation, as you will only be able to revert to the " "last snapshot.") else: exception_message = (_( "Revert to snapshot failed with exception " "%(e)s.") % {'e': ex}) raise exception.VolumeBackendAPIException( message=exception_message) LOG.debug("Restore volume snapshot took: %(delta)s H:MM:SS.", {'delta': self.utils.get_time_delta(start_time, time.time())}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/rest.py0000664000175000017500000046227400000000000024274 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import re import sys import time from oslo_log import log as logging from oslo_service import loopingcall import requests import requests.auth import requests.exceptions as r_exc # pylint: disable=E0401 import requests.packages.urllib3.util.retry as requests_retry from cinder import exception from cinder.i18n import _ from cinder.utils import retry from cinder.volume.drivers.dell_emc.powermax import utils from cinder.volume import volume_utils LOG = logging.getLogger(__name__) SLOPROVISIONING = 'sloprovisioning' REPLICATION = 'replication' SYSTEM = 'system' U4P_110_VERSION = '110' U4P_100_VERSION = '100' MIN_U4P_100_VERSION = '10.0.0.0' U4P_92_VERSION = '92' MIN_U4P_92_VERSION = '9.2.0.0' UCODE_5978 = '5978' retry_exc_tuple = (exception.VolumeBackendAPIException,) u4p_failover_max_wait = 120 # HTTP constants GET = 'GET' POST = 'POST' PUT = 'PUT' DELETE = 'DELETE' STATUS_200 = 200 STATUS_201 = 201 STATUS_202 = 202 STATUS_204 = 204 SERVER_ERROR_STATUS_CODES = [408, 501, 502, 503, 504] ITERATOR_EXPIRATION = 180 # Job constants INCOMPLETE_LIST = ['created', 'unscheduled', 'scheduled', 'running', 'validating', 'validated'] CREATED = 'created' SUCCEEDED = 'succeeded' CREATE_VOL_STRING = "Creating new Volumes" POPULATE_SG_LIST = "Populating Storage Group(s) with volumes" # Sequence of beta microcode (in order) DEV_CODE = 'x' TEST_CODE = 't' QUAL_CODE = 'v' class PowerMaxRest(object): """Rest class based on Unisphere for PowerMax Rest API.""" def __init__(self): self.utils = utils.PowerMaxUtils() self.session = None self.base_uri = None self.user = None self.passwd = None self.verify = None self.cert = None # Failover Unisphere configuration self.primary_u4p = None self.u4p_failover_enabled = False self.u4p_failover_autofailback = True self.u4p_failover_targets = list() self.u4p_failover_retries = 3 self.u4p_failover_timeout = 30 self.u4p_failover_backoff_factor = 1 self.u4p_in_failover = False self.u4p_failover_lock = False self.ucode_major_level = None self.ucode_minor_level = None self.is_snap_id = False self.u4p_version = None self.rest_api_connect_timeout = 30 self.rest_api_read_timeout = 30 def set_rest_credentials(self, array_info): """Given the array record set the rest server credentials. :param array_info: record """ ip = array_info['RestServerIp'] port = array_info['RestServerPort'] self.user = array_info['RestUserName'] self.passwd = array_info['RestPassword'] self.verify = array_info['SSLVerify'] ip_port = "%(ip)s:%(port)s" % {'ip': ip, 'port': port} self.base_uri = ("https://%(ip_port)s/univmax/restapi" % { 'ip_port': ip_port}) self.session = self._establish_rest_session() new_connect_timeout = ( int(array_info.get(utils.REST_API_CONNECT_TIMEOUT_KEY, 0))) if new_connect_timeout > 0: self.rest_api_connect_timeout = new_connect_timeout new_read_timeout = ( int(array_info.get(utils.REST_API_READ_TIMEOUT_KEY, 0))) if new_read_timeout > 0: self.rest_api_read_timeout = new_read_timeout def set_residuals(self, serial_number): """Set ucode and snapid information. :param serial_number: the array serial number """ self.ucode_major_level, self.ucode_minor_level = ( self.get_major_minor_ucode(serial_number)) self.is_snap_id = self._is_snapid_enabled() def set_u4p_failover_config(self, failover_info): """Set the environment failover Unisphere targets and configuration. :param failover_info: failover target record """ self.u4p_failover_enabled = True self.primary_u4p = failover_info['u4p_primary'] self.u4p_failover_targets = failover_info['u4p_failover_targets'] if failover_info['u4p_failover_retries']: self.u4p_failover_retries = failover_info['u4p_failover_retries'] if failover_info['u4p_failover_timeout']: self.u4p_failover_timeout = failover_info['u4p_failover_timeout'] if failover_info['u4p_failover_backoff_factor']: self.u4p_failover_backoff_factor = failover_info[ 'u4p_failover_backoff_factor'] if failover_info['u4p_failover_autofailback']: self.u4p_failover_autofailback = failover_info[ 'u4p_failover_autofailback'] def _establish_rest_session(self): """Establish the rest session. :returns: requests.session() -- session, the rest session """ LOG.info("Establishing REST session with %(base_uri)s", {'base_uri': self.base_uri}) if self.session: self.session.close() session = requests.session() session.headers = {'content-type': 'application/json', 'accept': 'application/json', 'Application-Type': 'openstack'} session.auth = requests.auth.HTTPBasicAuth(self.user, self.passwd) if self.verify is not None: session.verify = self.verify # SESSION FAILOVER CONFIGURATION if self.u4p_failover_enabled: timeout = self.u4p_failover_timeout class MyHTTPAdapter(requests.adapters.HTTPAdapter): def send(self, *args, **kwargs): kwargs['timeout'] = timeout return super(MyHTTPAdapter, self).send(*args, **kwargs) retry = requests_retry.Retry( total=self.u4p_failover_retries, backoff_factor=self.u4p_failover_backoff_factor, status_forcelist=SERVER_ERROR_STATUS_CODES) adapter = MyHTTPAdapter(max_retries=retry) session.mount('https://', adapter) session.mount('http://', adapter) return session def _handle_u4p_failover(self): """Handle the failover process to secondary instance of Unisphere. :raises: VolumeBackendAPIException """ if self.u4p_failover_targets: LOG.error("Unisphere failure at %(prim)s, switching to next " "backup instance of Unisphere at %(sec)s", { 'prim': self.base_uri, 'sec': self.u4p_failover_targets[0][ 'RestServerIp']}) self.set_rest_credentials(self.u4p_failover_targets[0]) self.u4p_failover_targets.pop(0) if self.u4p_in_failover: LOG.warning("PowerMax driver still in u4p failover mode. A " "periodic check will be made to see if primary " "Unisphere comes back online for seamless " "restoration.") else: LOG.warning("PowerMax driver set to u4p failover mode. A " "periodic check will be made to see if primary " "Unisphere comes back online for seamless " "restoration.") self.u4p_in_failover = True else: msg = _("A connection could not be established with the " "primary instance of Unisphere or any of the " "specified failover instances of Unisphere. Please " "check your local environment setup and restart " "Cinder Volume service to revert back to the primary " "Unisphere instance.") self.u4p_failover_lock = False raise exception.VolumeBackendAPIException(message=msg) @volume_utils.trace() def request(self, target_uri, method, params=None, request_object=None, u4p_check=False, retry=False): """Sends a request (GET, POST, PUT, DELETE) to the target api. :param target_uri: target uri (string) :param method: The method (GET, POST, PUT, or DELETE) :param params: Additional URL parameters :param request_object: request payload (dict) :param u4p_check: if request is testing connection (boolean) :param retry: if request is retry from prior failed request (boolean) :returns: server response object (dict) :raises: VolumeBackendAPIException, Timeout, ConnectionError, HTTPError, SSLError """ waiting_time = 0 while self.u4p_failover_lock and not retry and ( waiting_time < u4p_failover_max_wait): LOG.warning("Unisphere failover lock in process, holding request " "until lock is released when Unisphere connection " "re-established.") sleeptime = 10 time.sleep(sleeptime) waiting_time += sleeptime if waiting_time >= u4p_failover_max_wait: self.u4p_failover_lock = False url, message, status_code, response = None, None, None, None if not self.session: self.session = self._establish_rest_session() try: url = ("%(self.base_uri)s%(target_uri)s" % { 'self.base_uri': self.base_uri, 'target_uri': target_uri}) timeout = (self.rest_api_connect_timeout, self.rest_api_read_timeout) if request_object: response = self.session.request( method=method, url=url, data=json.dumps(request_object, sort_keys=True, indent=4), timeout=timeout) elif params: response = self.session.request( method=method, url=url, params=params, timeout=timeout) else: response = self.session.request( method=method, url=url, timeout=timeout) status_code = response.status_code if retry and status_code and status_code in [STATUS_200, STATUS_201, STATUS_202, STATUS_204]: self.u4p_failover_lock = False try: message = response.json() except ValueError: LOG.debug("No response received from API. Status code " "received is: %(status_code)s", { 'status_code': status_code}) message = None if retry: self.u4p_failover_lock = False LOG.debug("%(method)s request to %(url)s has returned with " "a status code of: %(status_code)s.", { 'method': method, 'url': url, 'status_code': status_code}) except r_exc.SSLError as e: if retry: self.u4p_failover_lock = False msg = _("The connection to %(base_uri)s has encountered an " "SSL error. Please check your SSL config or supplied " "SSL cert in Cinder configuration. SSL Exception " "message: %(e)s") raise r_exc.SSLError(msg % {'base_uri': self.base_uri, 'e': e}) except (r_exc.Timeout, r_exc.ConnectionError, r_exc.HTTPError) as e: if isinstance(e, r_exc.Timeout): msg = _("The %s request to URL %s failed with timeout " "exception %s" % (method, url, str(e))) LOG.error(msg) if self.u4p_failover_enabled or u4p_check: if not u4p_check: # Failover process LOG.warning("Running failover to backup instance " "of Unisphere") self.u4p_failover_lock = True self._handle_u4p_failover() # Failover complete, re-run failed operation LOG.info("Running request again to backup instance of " "Unisphere") status_code, message = self.request( target_uri, method, params, request_object, retry=True) elif not self.u4p_failover_enabled: exc_class, __, __ = sys.exc_info() msg = _("The %(method)s to Unisphere server %(base)s has " "experienced a %(error)s error. Please check your " "Unisphere server connection/availability. " "Exception message: %(exc_msg)s") raise exc_class(msg % {'method': method, 'base': self.base_uri, 'error': e.__class__.__name__, 'exc_msg': e}) except Exception as e: if retry: self.u4p_failover_lock = False msg = _("The %s request to URL %s failed with exception " "%s" % (method, url, str(e))) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) return status_code, message def wait_for_job_complete(self, job, extra_specs): """Given the job wait for it to complete. :param job: the job dict :param extra_specs: the extra_specs dict. :returns: rc -- int, result -- string, status -- string, task -- list of dicts detailing tasks in the job :raises: VolumeBackendAPIException """ res, tasks = None, None if job['status'].lower == CREATED: try: res, tasks = job['result'], job['task'] except KeyError: pass return 0, res, job['status'], tasks def _wait_for_job_complete(): result = None # Called at an interval until the job is finished. retries = kwargs['retries'] try: kwargs['retries'] = retries + 1 if not kwargs['wait_for_job_called']: is_complete, result, rc, status, task = ( self._is_job_finished(job_id)) if is_complete is True: kwargs['wait_for_job_called'] = True kwargs['rc'], kwargs['status'] = rc, status kwargs['result'], kwargs['task'] = result, task except Exception: exception_message = (_("Issue encountered waiting for job.")) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) if retries > int(extra_specs[utils.RETRIES]): LOG.error("_wait_for_job_complete failed after " "%(retries)d tries.", {'retries': retries}) kwargs['rc'], kwargs['result'] = -1, result raise loopingcall.LoopingCallDone() if kwargs['wait_for_job_called']: raise loopingcall.LoopingCallDone() job_id = job['jobId'] kwargs = {'retries': 0, 'wait_for_job_called': False, 'rc': 0, 'result': None} timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete) timer.start(interval=int(extra_specs[utils.INTERVAL])).wait() LOG.debug("Return code is: %(rc)lu. Result is %(res)s.", {'rc': kwargs['rc'], 'res': kwargs['result']}) return (kwargs['rc'], kwargs['result'], kwargs['status'], kwargs['task']) def _is_job_finished(self, job_id): """Check if the job is finished. :param job_id: the id of the job :returns: complete -- bool, result -- string, rc -- int, status -- string, task -- list of dicts """ complete, rc, status, result, task = False, 0, None, None, None job_url = "/%s/system/job/%s" % (self.u4p_version, job_id) job = self.get_request(job_url, 'job') if job: status = job['status'] try: result, task = job['result'], job['task'] except KeyError: pass if status.lower() == SUCCEEDED: complete = True elif status.lower() in INCOMPLETE_LIST: complete = False else: rc, complete = -1, True return complete, result, rc, status, task @staticmethod def check_status_code_success(operation, status_code, message): """Check if a status code indicates success. :param operation: the operation :param status_code: the status code :param message: the server response :raises: VolumeBackendAPIException """ if status_code not in [STATUS_200, STATUS_201, STATUS_202, STATUS_204]: exception_message = ( _("Error %(operation)s. The status code received is %(sc)s " "and the message is %(message)s.") % { 'operation': operation, 'sc': status_code, 'message': message}) raise exception.VolumeBackendAPIException( message=exception_message) def wait_for_job(self, operation, status_code, job, extra_specs): """Check if call is async, wait for it to complete. :param operation: the operation being performed :param status_code: the status code :param job: the job :param extra_specs: the extra specifications :returns: task -- list of dicts detailing tasks in the job :raises: VolumeBackendAPIException """ task = None if status_code == STATUS_202: rc, result, status, task = self.wait_for_job_complete( job, extra_specs) if rc != 0: exception_message = ( _("Error %(operation)s. Status code: %(sc)lu. Error: " "%(error)s. Status: %(status)s.") % { 'operation': operation, 'sc': rc, 'error': str(result), 'status': status}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return task def build_uri(self, *args, **kwargs): """Build the target url. :param args: input args, see _build_uri_legacy_args() for input breakdown :param kwargs: input keyword args, see _build_uri_kwargs() for input breakdown :return: target uri -- str """ if args: target_uri = self._build_uri_legacy_args(*args, **kwargs) else: target_uri = self._build_uri_kwargs(**kwargs) return target_uri def _build_uri_legacy_args(self, *args, **kwargs): """Build the target URI using legacy args & kwargs. Expected format: arg[0]: the array serial number: the array serial number -- str arg[1]: the resource category e.g. 'sloprovisioning' -- str arg[2]: the resource type e.g. 'maskingview' -- str kwarg resource_name: the name of a specific resource -- str kwarg private: if endpoint is private -- bool kwarg version: U4P REST endpoint version -- int/str kwarg no_version: if endpoint should be versionless -- bool :param args: input args -- see above :param kwargs: input keyword args -- see above :return: target URI -- str """ # Extract args following legacy _build_uri() format array_id, category, resource_type = args[0], args[1], args[2] # Extract keyword args following legacy _build_uri() format resource_name = kwargs.get('resource_name') private = kwargs.get('private') version = kwargs.get('version', self.u4p_version) if kwargs.get('no_version'): version = None # Build URI target_uri = '' if private: target_uri += '/private' if version: target_uri += '/%(version)s' % {'version': version} target_uri += ( '/{cat}/symmetrix/{array_id}/{res_type}'.format( cat=category, array_id=array_id, res_type=resource_type)) if resource_name: target_uri += '/{resource_name}'.format( resource_name=kwargs.get('resource_name')) return target_uri def _build_uri_kwargs(self, **kwargs): """Build the target URI using kwargs. Expected kwargs: private: if endpoint is private (optional) -- bool version: U4P REST endpoint version (optional) -- int/None no_version: if endpoint should be versionless (optional) -- bool category: U4P REST category eg. 'common', 'replication'-- str resource_level: U4P REST resource level eg. 'symmetrix' (optional) -- str resource_level_id: U4P REST resource level id (optional) -- str resource_type: U4P REST resource type eg. 'rdf_director', 'host' (optional) -- str resource_type_id: U4P REST resource type id (optional) -- str resource: U4P REST resource eg. 'port' (optional) -- str resource_id: U4P REST resource id (optional) -- str object_type: U4P REST resource eg. 'rdf_group' (optional) -- str object_type_id: U4P REST resource id (optional) -- str :param kwargs: input keyword args -- see above :return: target URI -- str """ version = kwargs.get('version', self.u4p_version) if kwargs.get('no_version'): version = None target_uri = '' if kwargs.get('private'): target_uri += '/private' if version: target_uri += '/%(ver)s' % {'ver': version} target_uri += '/%(cat)s' % {'cat': kwargs.get('category')} if kwargs.get('resource_level'): target_uri += '/%(res_level)s' % { 'res_level': kwargs.get('resource_level')} if kwargs.get('resource_level_id'): target_uri += '/%(res_level_id)s' % { 'res_level_id': kwargs.get('resource_level_id')} if kwargs.get('resource_type'): target_uri += '/%(res_type)s' % { 'res_type': kwargs.get('resource_type')} if kwargs.get('resource_type_id'): target_uri += '/%(res_type_id)s' % { 'res_type_id': kwargs.get('resource_type_id')} if kwargs.get('resource'): target_uri += '/%(res)s' % { 'res': kwargs.get('resource')} if kwargs.get('resource_id'): target_uri += '/%(res_id)s' % { 'res_id': kwargs.get('resource_id')} if kwargs.get('object_type'): target_uri += '/%(object_type)s' % { 'object_type': kwargs.get('object_type')} if kwargs.get('object_type_id'): target_uri += '/%(object_type_id)s' % { 'object_type_id': kwargs.get('object_type_id')} return target_uri def get_request(self, target_uri, resource_type, params=None): """Send a GET request to the array. :param target_uri: the target uri :param resource_type: the resource type, e.g. maskingview :param params: optional dict of filter params :returns: resource_object -- dict or None """ resource_object = None sc, message = self.request(target_uri, GET, params=params) operation = 'get %(res)s' % {'res': resource_type} try: self.check_status_code_success(operation, sc, message) except Exception as e: LOG.debug("Get resource failed with %(e)s", {'e': e}) if sc == STATUS_200: resource_object = message resource_object = self.list_pagination(resource_object) return resource_object def post_request(self, target_uri, resource_type, request_body): """Send a POST request to the array. :param target_uri: the target uri -- str :param resource_type: the resource type -- str :param request_body: the POST request body -- dict :return: resource object -- dict or None """ resource_object = None sc, msg = self.request(target_uri, POST, request_object=request_body) operation = 'POST %(res)s' % {'res': resource_type} try: self.check_status_code_success(operation, sc, msg) except Exception as e: LOG.debug("POST resource failed with %(e)s", {'e': e}) if sc == STATUS_200: resource_object = msg return resource_object def get_resource(self, array, category, resource_type, resource_name=None, params=None, private=False, version=None): """Get resource details from array. :param array: the array serial number :param category: the resource category e.g. sloprovisioning :param resource_type: the resource type e.g. maskingview :param resource_name: the name of a specific resource :param params: query parameters :param private: empty string or '/private' if private url :param version: None or specific version number if required :returns: resource object -- dict or None """ target_uri = self.build_uri( array, category, resource_type, resource_name=resource_name, private=private, version=self.u4p_version) return self.get_request(target_uri, resource_type, params) def create_resource(self, array, category, resource_type, payload, private=False): """Create a provisioning resource. :param array: the array serial number :param category: the category :param resource_type: the resource type :param payload: the payload :param private: empty string or '/private' if private url :returns: status_code -- int, message -- string, server response """ target_uri = self.build_uri( array, category, resource_type, private=private) status_code, message = self.request(target_uri, POST, request_object=payload) operation = 'Create %(res)s resource' % {'res': resource_type} self.check_status_code_success( operation, status_code, message) return status_code, message def modify_resource( self, array, category, resource_type, payload, version=None, resource_name=None, private=False): """Modify a resource. :param version: the uv4 version :param array: the array serial number :param category: the category :param resource_type: the resource type :param payload: the payload :param resource_name: the resource name :param private: empty string or '/private' if private url :returns: status_code -- int, message -- string (server response) """ target_uri = self.build_uri( array, category, resource_type, resource_name=resource_name, private=private, version=self.u4p_version) status_code, message = self.request(target_uri, PUT, request_object=payload) operation = 'modify %(res)s resource' % {'res': resource_type} self.check_status_code_success(operation, status_code, message) return status_code, message @retry(retry_exc_tuple, interval=2, retries=3) def delete_resource( self, array, category, resource_type, resource_name, payload=None, private=False, params=None): """Delete a provisioning resource. :param array: the array serial number :param category: the resource category e.g. sloprovisioning :param resource_type: the type of resource to be deleted :param resource_name: the name of the resource to be deleted :param payload: the payload, optional :param private: empty string or '/private' if private url :param params: dict of optional query params """ target_uri = self.build_uri( array, category, resource_type, resource_name=resource_name, private=private) status_code, message = self.request(target_uri, DELETE, request_object=payload, params=params) operation = 'delete %(res)s resource' % {'res': resource_type} self.check_status_code_success(operation, status_code, message) def get_arrays_list(self): """Get a list of all arrays on U4P instance. :returns arrays -- list """ target_uri = '/%s/sloprovisioning/symmetrix' % self.u4p_version array_details = self.get_request(target_uri, 'sloprovisioning') if not array_details: LOG.error("Could not get array details from Unisphere instance.") arrays = array_details.get('symmetrixId', list()) return arrays def get_array_detail(self, array): """Get an array from its serial number. :param array: the array serial number :returns: array_details -- dict or None """ target_uri = '/%s/system/symmetrix/%s' % (self.u4p_version, array) array_details = self.get_request(target_uri, 'system') if not array_details: LOG.error("Cannot connect to array %(array)s.", {'array': array}) return array_details def get_array_tags(self, array): """Get the tags from its serial number. :param array: the array serial number :returns: tag list -- list or empty list """ target_uri = '/%s/system/tag?array_id=%s' % ( self.u4p_version, array) array_tags = self.get_request(target_uri, 'system') return array_tags.get('tag_name') def is_next_gen_array(self, array): """Check to see if array is a next gen array(ucode 5978 or greater). :param array: the array serial number :returns: bool """ is_next_gen = False array_details = self.get_array_detail(array) if array_details: ucode = array_details.get('ucode', array_details.get('microcode')) ucode_version = ucode.split('.')[0] if ucode else None if ucode_version >= UCODE_5978: is_next_gen = True return is_next_gen def get_uni_version(self): """Get the unisphere version from the server. :returns: version and major_version(e.g. ("V8.4.0.16", "84")) """ version, major_version = None, None response = self.get_unisphere_version() if response and response.get('version'): regex = re.compile(r'^[a-zA-Z]\d+(.\d+){3}$') if regex.match(response['version']): version = response['version'] version_list = version.split('.') major_version = version_list[0][1:] + version_list[1] return version, major_version def get_unisphere_version(self): """Get the unisphere version from the server. :returns: version dict """ version_endpoint = '/version' status_code, version_dict = self.request(version_endpoint, GET) if not version_dict: LOG.error("Unisphere version info not found.") return version_dict def get_srp_by_name(self, array, srp=None): """Returns the details of a storage pool. :param array: the array serial number :param srp: the storage resource pool name :returns: SRP_details -- dict or None """ LOG.debug("storagePoolName: %(srp)s, array: %(array)s.", {'srp': srp, 'array': array}) srp_details = self.get_resource(array, SLOPROVISIONING, 'srp', resource_name=srp, params=None) return srp_details def get_slo_list(self, array, is_next_gen, array_model): """Retrieve the list of slo's from the array :param array: the array serial number :param is_next_gen: next generation flag :param array_model :returns: slo_list -- list of service level names """ slo_list = [] slo_dict = self.get_resource(array, SLOPROVISIONING, 'slo') if slo_dict and slo_dict.get('sloId'): if not is_next_gen and ( any(array_model in x for x in utils.VMAX_AFA_MODELS)): if 'Optimized' in slo_dict.get('sloId'): slo_dict['sloId'].remove('Optimized') for slo in slo_dict['sloId']: if slo and slo not in slo_list: slo_list.append(slo) return slo_list def get_workload_settings(self, array, is_next_gen): """Get valid workload options from array. Workloads are no longer supported from HyperMaxOS 5978 onwards. :param array: the array serial number :param is_next_gen: is next generation flag :returns: workload_setting -- list of workload names """ workload_setting = [] if is_next_gen: workload_setting.append('None') else: wl_details = self.get_resource( array, SLOPROVISIONING, 'workloadtype') if wl_details: workload_setting = wl_details['workloadId'] return workload_setting def get_vmax_model(self, array): """Get the PowerMax/VMAX model. :param array: the array serial number :returns: the PowerMax/VMAX model """ vmax_version = None system_info = self.get_array_detail(array) if system_info and system_info.get('model'): vmax_version = system_info.get('model') return vmax_version def get_array_model_info(self, array): """Get the PowerMax/VMAX model. :param array: the array serial number :returns: the PowerMax/VMAX model """ array_model = None is_next_gen = False system_info = self.get_array_detail(array) if system_info and system_info.get('model'): array_model = system_info.get('model') if system_info: ucode = system_info.get('ucode', system_info.get('microcode')) ucode_version = ucode.split('.')[0] if ucode_version >= UCODE_5978: is_next_gen = True return array_model, is_next_gen def get_array_ucode_version(self, array): """Get the PowerMax/VMAX uCode version. :param array: the array serial number :returns: the PowerMax/VMAX uCode version """ ucode_version = None system_info = self.get_array_detail(array) if system_info: ucode_version = system_info.get( 'ucode', system_info.get('microcode')) return ucode_version def is_compression_capable(self, array): """Check if array is compression capable. :param array: array serial number :returns: bool """ is_compression_capable = False target_uri = ("/%s/sloprovisioning/symmetrix?compressionCapable=true" % self.u4p_version) status_code, message = self.request(target_uri, GET) self.check_status_code_success( "Check if compression enabled", status_code, message) if message.get('symmetrixId'): if array in message['symmetrixId']: is_compression_capable = True return is_compression_capable def get_storage_group(self, array, storage_group_name): """Given a name, return storage group details. :param array: the array serial number :param storage_group_name: the name of the storage group :returns: storage group dict or None """ return self.get_resource( array, SLOPROVISIONING, 'storagegroup', resource_name=storage_group_name) def get_storage_group_list(self, array, params=None): """Given a name, return storage group details. :param array: the array serial number :param params: dict of optional filters :returns: storage group dict or None """ return self.get_resource( array, SLOPROVISIONING, 'storagegroup', params=params) def get_num_vols_in_sg(self, array, storage_group_name): """Get the number of volumes in a storage group. :param array: the array serial number :param storage_group_name: the storage group name :returns: num_vols -- int """ num_vols = 0 storagegroup = self.get_storage_group(array, storage_group_name) try: num_vols = int(storagegroup['num_of_vols']) except (KeyError, TypeError): pass return num_vols def is_child_sg_in_parent_sg(self, array, child_name, parent_name): """Check if a child storage group is a member of a parent group. :param array: the array serial number :param child_name: the child sg name :param parent_name: the parent sg name :returns: bool """ parent_sg = self.get_storage_group(array, parent_name) if parent_sg and parent_sg.get('child_storage_group'): child_sg_list = parent_sg['child_storage_group'] if child_name.lower() in ( child.lower() for child in child_sg_list): return True return False def add_child_sg_to_parent_sg( self, array, child_sg, parent_sg, extra_specs): """Add a storage group to a parent storage group. This method adds an existing storage group to another storage group, i.e. cascaded storage groups. :param array: the array serial number :param child_sg: the name of the child sg :param parent_sg: the name of the parent sg :param extra_specs: the extra specifications """ payload = {"editStorageGroupActionParam": { "expandStorageGroupParam": { "addExistingStorageGroupParam": { "storageGroupId": [child_sg]}}}} sc, job = self.modify_storage_group(array, parent_sg, payload) self.wait_for_job('Add child sg to parent sg', sc, job, extra_specs) def remove_child_sg_from_parent_sg( self, array, child_sg, parent_sg, extra_specs): """Remove a storage group from its parent storage group. This method removes a child storage group from its parent group. :param array: the array serial number :param child_sg: the name of the child sg :param parent_sg: the name of the parent sg :param extra_specs: the extra specifications """ payload = {"editStorageGroupActionParam": { "removeStorageGroupParam": { "storageGroupId": [child_sg], "force": 'true'}}} status_code, job = self.modify_storage_group( array, parent_sg, payload) self.wait_for_job( 'Remove child sg from parent sg', status_code, job, extra_specs) def _create_storagegroup(self, array, payload): """Create a storage group. :param array: the array serial number :param payload: the payload -- dict :returns: status_code -- int, message -- string, server response """ return self.create_resource( array, SLOPROVISIONING, 'storagegroup', payload) def create_storage_group(self, array, storagegroup_name, srp, slo, workload, extra_specs, do_disable_compression=False): """Create the volume in the specified storage group. :param array: the array serial number :param storagegroup_name: the group name (String) :param srp: the SRP (String) :param slo: the SLO (String) :param workload: the workload (String) :param do_disable_compression: flag for disabling compression :param extra_specs: additional info :returns: storagegroup_name - string """ srp_id = srp if slo else "None" payload = ({"srpId": srp_id, "storageGroupId": storagegroup_name, "emulation": "FBA"}) if slo: if self.is_next_gen_array(array): workload = 'NONE' slo_param = {"sloId": slo, "workloadSelection": workload, "volumeAttributes": [{ "volume_size": "0", "capacityUnit": "GB", "num_of_vols": 0}]} if do_disable_compression: slo_param.update({"noCompression": "true"}) elif self.is_compression_capable(array): slo_param.update({"noCompression": "false"}) payload.update({"sloBasedStorageGroupParam": [slo_param]}) status_code, job = self._create_storagegroup(array, payload) self.wait_for_job('Create storage group', status_code, job, extra_specs) return storagegroup_name def modify_storage_group(self, array, storagegroup, payload, version=None): """Modify a storage group (PUT operation). :param version: the uv4 version :param array: the array serial number :param storagegroup: storage group name :param payload: the request payload :returns: status_code -- int, message -- string, server response """ return self.modify_resource( array, SLOPROVISIONING, 'storagegroup', payload, self.u4p_version, resource_name=storagegroup) def modify_storage_array(self, array, payload): """Modify a storage array (PUT operation). :param array: the array serial number :param payload: the request payload :returns: status_code -- int, message -- string, server response """ target_uri = '/%s/sloprovisioning/symmetrix/%s' % ( self.u4p_version, array) status_code, message = self.request(target_uri, PUT, request_object=payload) operation = 'modify %(res)s resource' % {'res': 'symmetrix'} self.check_status_code_success(operation, status_code, message) return status_code, message def create_volume_from_sg(self, array, volume_name, storagegroup_name, volume_size, extra_specs, rep_info=None): """Create a new volume in the given storage group. :param array: the array serial number :param volume_name: the volume name (String) :param storagegroup_name: the storage group name :param volume_size: volume size (String) :param extra_specs: the extra specifications :param rep_info: replication info dict if volume is replication enabled :returns: dict -- volume_dict - the volume dict :raises: VolumeBackendAPIException """ payload = ( {"executionOption": "ASYNCHRONOUS", "editStorageGroupActionParam": { "expandStorageGroupParam": { "addVolumeParam": { "emulation": "FBA", "create_new_volumes": "False", "volumeAttributes": [ { "num_of_vols": 1, "volumeIdentifier": { "identifier_name": volume_name, "volumeIdentifierChoice": "identifier_name" }, "volume_size": volume_size, "capacityUnit": "GB"}]}}}}) if rep_info: payload = self.utils.update_payload_for_rdf_vol_create( payload, rep_info[utils.REMOTE_ARRAY], storagegroup_name) status_code, job = self.modify_storage_group( array, storagegroup_name, payload) LOG.debug("Create Volume: %(volumename)s. Status code: %(sc)lu.", {'volumename': volume_name, 'sc': status_code}) task = self.wait_for_job('Create volume', status_code, job, extra_specs) # Find the newly created volume. device_id = None if rep_info: updated_device_list = self.get_volume_list( array, {'storageGroupId': storagegroup_name, 'rdf_group_number': rep_info['rdf_group_no']}) unique_devices = self.utils.get_unique_device_ids_from_lists( rep_info['initial_device_list'], updated_device_list) if 0 < len(unique_devices) < 2: device_id = unique_devices[0] self.rename_volume(array, device_id, volume_name) else: raise exception.VolumeBackendAPIException(_( "There has been more than one volume created in the " "SRDF protected Storage Group since the current create " "volume process begun. Not possible to discern what " "volume has been created by PowerMax Cinder driver.")) # Find the newly created volume if not located as part of replication # OPT workaround if not device_id and task: for t in task: try: desc = t["description"] if CREATE_VOL_STRING in desc: t_list = desc.split() device_id = t_list[(len(t_list) - 1)] device_id = device_id[1:-1] break elif POPULATE_SG_LIST in desc: regex_str = (r'Populating Storage Group\(s\) ' + r'with volumes : \[(.+)\]$') full_str = re.compile(regex_str) match = full_str.match(desc) device_id = match.group(1) if match else None if device_id: self.get_volume(array, device_id) except Exception as e: LOG.info("Could not retrieve device id from job. " "Exception received was %(e)s. Attempting " "retrieval by volume_identifier.", {'e': e}) if not device_id: device_id = self.find_volume_device_id(array, volume_name) volume_dict = {utils.ARRAY: array, utils.DEVICE_ID: device_id} return volume_dict def add_storage_group_tag(self, array, storagegroup_name, tag_list, extra_specs): """Create a new tag(s) on a storage group :param array: the array serial number :param storagegroup_name: the storage group name :param tag_list: comma delimited list :param extra_specs: the extra specifications """ payload = ( {"executionOption": "ASYNCHRONOUS", "editStorageGroupActionParam": { "tagManagementParam": { "addTagsParam": { "tag_name": tag_list }}}}) status_code, job = self.modify_storage_group( array, storagegroup_name, payload) LOG.debug("Add tag to storage group: %(sg_name)s. " "Status code: %(sc)lu.", {'sg_name': storagegroup_name, 'sc': status_code}) self.wait_for_job( 'Add tag to storage group', status_code, job, extra_specs) def add_storage_array_tags(self, array, tag_list, extra_specs): """Create a new tag(s) on a storage group :param array: the array serial number :param tag_list: comma delimited list :param extra_specs: the extra specifications """ payload = ( {"executionOption": "ASYNCHRONOUS", "editSymmetrixActionParam": { "tagManagementParam": { "addTagsParam": { "tag_name": tag_list }}}}) status_code, job = self.modify_storage_array( array, payload) LOG.debug("Add tag to storage array: %(array)s. " "Status code: %(sc)lu.", {'array': array, 'sc': status_code}) self.wait_for_job( 'Add tag to storage array', status_code, job, extra_specs) def check_volume_device_id(self, array, device_id, volume_id, name_id=None): """Check if the identifiers match for a given volume. :param array: the array serial number :param device_id: the device id :param volume_id: cinder volume id :param name_id: name id - used in host_assisted migration, optional :returns: found_device_id """ found_device_id = None if not device_id: return found_device_id element_name = self.utils.get_volume_element_name(volume_id) vol_details = self.get_volume(array, device_id) if vol_details: vol_identifier = vol_details.get('volume_identifier', None) LOG.debug('Element name = %(en)s, Vol identifier = %(vi)s, ' 'Device id = %(di)s', {'en': element_name, 'vi': vol_identifier, 'di': device_id}) if vol_identifier: if vol_identifier in element_name: found_device_id = device_id if vol_identifier != element_name: LOG.debug("Device %(di)s is a legacy volume created " "using SMI-S.", {'di': device_id}) elif name_id: # This may be host-assisted migration case element_name = self.utils.get_volume_element_name(name_id) if vol_identifier == element_name: found_device_id = device_id else: LOG.error("We cannot verify that device %(dev)s was " "created/managed by openstack by its " "identifier name.", {'dev': device_id}) return found_device_id def add_vol_to_sg(self, array, storagegroup_name, device_id, extra_specs, force=False): """Add a volume to a storage group. :param array: the array serial number :param storagegroup_name: storage group name :param device_id: the device id :param extra_specs: extra specifications :param force: add force argument to call """ if not isinstance(device_id, list): device_id = [device_id] force_add = self._check_force(extra_specs, force) payload = ({"executionOption": "ASYNCHRONOUS", "editStorageGroupActionParam": { "expandStorageGroupParam": { "addSpecificVolumeParam": { "volumeId": device_id, "remoteSymmSGInfoParam": { "force": force_add}}}}}) status_code, job = self.modify_storage_group( array, storagegroup_name, payload) self.wait_for_job('Add volume to sg', status_code, job, extra_specs) @retry(retry_exc_tuple, interval=2, retries=3) def remove_vol_from_sg(self, array, storagegroup_name, device_id, extra_specs): """Remove a volume from a storage group. :param array: the array serial number :param storagegroup_name: storage group name :param device_id: the device id :param extra_specs: the extra specifications """ force_vol_edit = self._check_force(extra_specs) if not isinstance(device_id, list): device_id = [device_id] payload = ({"executionOption": "ASYNCHRONOUS", "editStorageGroupActionParam": { "removeVolumeParam": { "volumeId": device_id, "remoteSymmSGInfoParam": { "force": force_vol_edit}}}}) status_code, job = self.modify_storage_group( array, storagegroup_name, payload) self.wait_for_job('Remove vol from sg', status_code, job, extra_specs) def update_storagegroup_qos(self, array, storage_group_name, extra_specs): """Update the storagegroupinstance with qos details. If maxIOPS or maxMBPS is in extra_specs, then DistributionType can be modified in addition to maxIOPS or/and maxMBPS If maxIOPS or maxMBPS is NOT in extra_specs, we check to see if either is set in StorageGroup. If so, then DistributionType can be modified :param array: the array serial number :param storage_group_name: the storagegroup instance name :param extra_specs: extra specifications :returns: bool, True if updated, else False """ return_value = False sg_details = self.get_storage_group(array, storage_group_name) sg_qos_details = None sg_maxiops = None sg_maxmbps = None sg_distribution_type = None property_dict = {} try: sg_qos_details = sg_details['hostIOLimit'] sg_maxiops = sg_qos_details['host_io_limit_io_sec'] sg_maxmbps = sg_qos_details['host_io_limit_mb_sec'] sg_distribution_type = sg_qos_details['dynamicDistribution'] except KeyError: LOG.debug("Unable to get storage group QoS details.") if 'total_iops_sec' in extra_specs.get('qos'): property_dict = self.utils.validate_qos_input( 'total_iops_sec', sg_maxiops, extra_specs.get('qos'), property_dict) if 'total_bytes_sec' in extra_specs.get('qos'): property_dict = self.utils.validate_qos_input( 'total_bytes_sec', sg_maxmbps, extra_specs.get('qos'), property_dict) if 'DistributionType' in extra_specs.get('qos') and property_dict: property_dict = self.utils.validate_qos_distribution_type( sg_distribution_type, extra_specs.get('qos'), property_dict) if property_dict: payload = {"editStorageGroupActionParam": { "setHostIOLimitsParam": property_dict}} status_code, message = ( self.modify_storage_group(array, storage_group_name, payload)) try: self.check_status_code_success('Add qos specs', status_code, message) return_value = True except Exception as e: LOG.error("Error setting qos. Exception received was: " "%(e)s", {'e': e}) return_value = False return return_value def set_storagegroup_srp( self, array, storagegroup_name, srp_name, extra_specs): """Modify a storage group's srp value. :param array: the array serial number :param storagegroup_name: the storage group name :param srp_name: the srp pool name :param extra_specs: the extra specifications """ payload = {"editStorageGroupActionParam": { "editStorageGroupSRPParam": {"srpId": srp_name}}} status_code, job = self.modify_storage_group( array, storagegroup_name, payload) self.wait_for_job("Set storage group srp", status_code, job, extra_specs) def get_vmax_default_storage_group( self, array, srp, slo, workload, do_disable_compression=False, is_re=False, rep_mode=None): """Get the default storage group. :param array: the array serial number :param srp: the pool name :param slo: the SLO :param workload: the workload :param do_disable_compression: flag for disabling compression :param is_re: flag for replication :param rep_mode: flag to indicate replication mode :returns: the storage group dict (or None), the storage group name """ if self.is_next_gen_array(array): workload = 'NONE' storagegroup_name = self.utils.get_default_storage_group_name( srp, slo, workload, do_disable_compression, is_re, rep_mode) storagegroup = self.get_storage_group(array, storagegroup_name) return storagegroup, storagegroup_name def delete_storage_group(self, array, storagegroup_name): """Delete a storage group. :param array: the array serial number :param storagegroup_name: storage group name """ self.delete_resource( array, SLOPROVISIONING, 'storagegroup', storagegroup_name) LOG.debug("Storage Group successfully deleted.") def move_volume_between_storage_groups( self, array, device_id, source_storagegroup_name, target_storagegroup_name, extra_specs, force=False): """Move a volume to a different storage group. :param array: the array serial number :param source_storagegroup_name: the originating storage group name :param target_storagegroup_name: the destination storage group name :param device_id: the device id :param extra_specs: extra specifications :param force: force flag (necessary on a detach) """ force_flag = self._check_force(extra_specs, force) payload = ({"executionOption": "ASYNCHRONOUS", "editStorageGroupActionParam": { "moveVolumeToStorageGroupParam": { "volumeId": [device_id], "storageGroupId": target_storagegroup_name, "force": force_flag}}}) status_code, job = self.modify_storage_group( array, source_storagegroup_name, payload) self.wait_for_job('move volume between storage groups', status_code, job, extra_specs) def get_volume(self, array, device_id): """Get a PowerMax/VMAX volume from array. :param array: the array serial number :param device_id: the volume device id :returns: volume dict :raises: VolumeBackendAPIException """ volume_dict = self.get_resource( array, SLOPROVISIONING, 'volume', resource_name=device_id) if not volume_dict: exception_message = (_("Volume %(deviceID)s not found.") % {'deviceID': device_id}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return volume_dict def _get_private_volume(self, array, device_id): """Get a more detailed list of attributes of a volume. :param array: the array serial number :param device_id: the volume device id :returns: volume dict :raises: VolumeBackendAPIException """ try: wwn = (self.get_volume(array, device_id))['wwn'] params = {'wwn': wwn} volume_info = self.get_resource( array, SLOPROVISIONING, 'volume', params=params, private='/private') volume_dict = volume_info[0] except (KeyError, TypeError): exception_message = (_("Volume %(deviceID)s not found.") % {'deviceID': device_id}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return volume_dict def get_volume_list(self, array, params): """Get a filtered list of PowerMax/VMAX volumes from array. Filter parameters are required as the unfiltered volume list could be very large and could affect performance if called often. :param array: the array serial number :param params: filter parameters :returns: device_ids -- list """ device_ids = [] volume_dict_list = self.get_resource( array, SLOPROVISIONING, 'volume', params=params) try: for vol_dict in volume_dict_list: device_id = vol_dict['volumeId'] device_ids.append(device_id) except (KeyError, TypeError): pass return device_ids def get_private_volume_list(self, array, params=None): """Retrieve list with volume details. :param array: the array serial number :param params: filter parameters :returns: list -- dicts with volume information """ if isinstance(params, dict): params['expiration_time_mins'] = ITERATOR_EXPIRATION elif isinstance(params, str): params += '&expiration_time_mins=%(expire)s' % { 'expire': ITERATOR_EXPIRATION} else: params = {'expiration_time_mins': ITERATOR_EXPIRATION} return self.get_resource( array, SLOPROVISIONING, 'volume', params=params, private='/private') def _modify_volume(self, array, device_id, payload): """Modify a volume (PUT operation). :param array: the array serial number :param device_id: volume device id :param payload: the request payload """ return self.modify_resource(array, SLOPROVISIONING, 'volume', payload, resource_name=device_id) def extend_volume(self, array, device_id, new_size, extra_specs, rdf_grp_no=None): """Extend a PowerMax/VMAX volume. :param array: the array serial number :param device_id: volume device id :param new_size: the new required size for the device :param extra_specs: the extra specifications :param rdf_grp_no: the RDG group number """ extend_vol_payload = {'executionOption': 'ASYNCHRONOUS', 'editVolumeActionParam': { 'expandVolumeParam': { 'volumeAttribute': { 'volume_size': new_size, 'capacityUnit': 'GB'}}}} if rdf_grp_no: extend_vol_payload['editVolumeActionParam'][ 'expandVolumeParam'].update({'rdfGroupNumber': rdf_grp_no}) status_code, job = self._modify_volume( array, device_id, extend_vol_payload) LOG.debug("Extend Device: %(device_id)s. Status code: %(sc)lu.", {'device_id': device_id, 'sc': status_code}) self.wait_for_job('Extending volume', status_code, job, extra_specs) def rename_volume(self, array, device_id, new_name): """Rename a volume. :param array: the array serial number :param device_id: the volume device id :param new_name: the new name for the volume, can be None """ if not device_id: LOG.warning('No device id supplied to rename operation.') return if new_name is not None: vol_identifier_dict = { "identifier_name": new_name, "volumeIdentifierChoice": "identifier_name"} else: vol_identifier_dict = {"volumeIdentifierChoice": "none"} rename_vol_payload = {"editVolumeActionParam": { "modifyVolumeIdentifierParam": { "volumeIdentifier": vol_identifier_dict}}} self._modify_volume(array, device_id, rename_vol_payload) def delete_volume(self, array, device_id): """Delete a volume. :param array: the array serial number :param device_id: volume device id """ # Check if volume is in any storage group sg_list = self.get_storage_groups_from_volume( array, device_id) if sg_list: exception_message = (_( "Device %(device_id)s is in storage group(s) " "%(sg_list)s prior to delete.") % {'device_id': device_id, 'sg_list': sg_list}) LOG.error(exception_message) raise exception.VolumeBackendAPIException(exception_message) if ((self.ucode_major_level >= utils.UCODE_5978) and (self.ucode_minor_level > utils.UCODE_5978_ELMSR) or ( self.ucode_major_level >= utils.UCODE_6079)): # Use Rapid TDEV Deallocation to delete after ELMSR try: self.delete_resource(array, SLOPROVISIONING, "volume", device_id) except Exception as e: LOG.warning('Delete volume %(dev)s failed with %(e)s.', {'dev': device_id, 'e': e}) raise else: # Pre-Foxtail, deallocation and delete are separate calls payload = {"editVolumeActionParam": { "freeVolumeParam": {"free_volume": 'true'}}} try: # Rename volume, removing the OS- self.rename_volume(array, device_id, None) self._modify_volume(array, device_id, payload) pass except Exception as e: LOG.warning('Deallocate volume %(dev)s failed with %(e)s.' 'Attempting delete.', {'dev': device_id, 'e': e}) # Try to delete the volume if deallocate failed. self.delete_resource(array, SLOPROVISIONING, "volume", device_id) @retry(retry_exc_tuple, interval=2, retries=8) def find_mv_connections_for_vol(self, array, maskingview, device_id): """Find the host_lun_id for a volume in a masking view. :param array: the array serial number :param maskingview: the masking view name :param device_id: the device ID :returns: host_lun_id -- int """ host_lun_id = None resource_name = ('%(maskingview)s/connections' % {'maskingview': maskingview}) params = {'volume_id': device_id} connection_info = self.get_resource( array, SLOPROVISIONING, 'maskingview', resource_name=resource_name, params=params) if not connection_info: LOG.error('Cannot retrieve masking view connection information ' 'for %(mv)s.', {'mv': maskingview}) else: try: masking_view_conn = connection_info.get( 'maskingViewConnection') if (masking_view_conn and isinstance( masking_view_conn, list) and len(masking_view_conn) > 0): host_lun_id = masking_view_conn[0].get( 'host_lun_address') if host_lun_id: host_lun_id = int(host_lun_id, 16) else: exception_message = (_("Unable to get " "host_lun_address for device " "%(dev)s on masking view " "%(mv)s. Retrying...") % {"dev": device_id, "mv": maskingview}) LOG.warning(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) else: exception_message = (_("Unable to retrieve connection " "information for volume %(vol)s " "in masking view %(mv)s. " "Retrying...") % {"vol": device_id, "mv": maskingview}) LOG.warning(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) except Exception as e: exception_message = (_("Unable to retrieve connection " "information for volume %(vol)s " "in masking view %(mv)s. " "Exception received: %(e)s. " "Retrying...") % {"vol": device_id, "mv": maskingview, "e": e}) LOG.warning(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) LOG.debug("The hostlunid is %(hli)s for %(dev)s.", {'hli': host_lun_id, 'dev': device_id}) return host_lun_id def get_storage_groups_from_volume(self, array, device_id): """Returns all the storage groups for a particular volume. :param array: the array serial number :param device_id: the volume device id :returns: storagegroup_list """ sg_list = [] if not device_id: return sg_list vol = self.get_volume(array, device_id) if vol and isinstance(vol, list): LOG.warning( "Device id %(dev_id)s has brought back " "multiple volume objects.", {'vol_name': device_id}) return sg_list if vol and vol.get('storageGroupId'): sg_list = vol['storageGroupId'] num_storage_groups = len(sg_list) LOG.debug("There are %(num)d storage groups associated " "with volume %(deviceId)s.", {'num': num_storage_groups, 'deviceId': device_id}) return sg_list def is_volume_in_storagegroup(self, array, device_id, storagegroup): """See if a volume is a member of the given storage group. :param array: the array serial number :param device_id: the device id :param storagegroup: the storage group name :returns: bool """ is_vol_in_sg = False sg_list = self.get_storage_groups_from_volume(array, device_id) if storagegroup in sg_list: is_vol_in_sg = True return is_vol_in_sg def find_volume_device_id(self, array, volume_name): """Given a volume identifier, find the corresponding device_id. :param array: the array serial number :param volume_name: the volume name (OS-) :returns: device_id """ device_id = None params = {"volume_identifier": volume_name} device_list = self.get_volume_list(array, params) if not device_list: LOG.debug("Cannot find record for volume %(vol_name)s.", {'vol_name': volume_name}) else: LOG.debug("The device id list is %(dev_list)s for %(vol_name)s.", {'dev_list': device_list, 'vol_name': volume_name}) device_id = device_list[0] if len(device_list) == 1 else ( device_list) if isinstance(device_id, list): LOG.warning("More than one devices returned for %(vol_name)s.", {'vol_name': volume_name}) return device_id def find_volume_identifier(self, array, device_id): """Get the volume identifier of a PowerMax/VMAX volume. :param array: array serial number :param device_id: the device id :returns: the volume identifier -- string """ vol = self.get_volume(array, device_id) return vol.get('volume_identifier') if vol else None def get_size_of_device_on_array(self, array, device_id): """Get the size of the volume from the array. :param array: the array serial number :param device_id: the volume device id :returns: size -- or None """ cap = None try: vol = self.get_volume(array, device_id) cap = vol['cap_gb'] except Exception as e: LOG.error("Error retrieving size of volume %(vol)s. " "Exception received was %(e)s.", {'vol': device_id, 'e': e}) return cap def get_portgroup(self, array, portgroup): """Get a portgroup from the array. :param array: array serial number :param portgroup: the portgroup name :returns: portgroup dict or None """ return self.get_resource( array, SLOPROVISIONING, 'portgroup', resource_name=portgroup) def get_port_ids(self, array, port_group_name): """Get a list of port identifiers from a port group. :param array: the array serial number :param port_group_name: the name of the portgroup :returns: list of port ids, e.g. ['FA-3D:35', 'FA-4D:32'] """ portlist = [] portgroup_info = self.get_portgroup(array, port_group_name) if portgroup_info: port_key = portgroup_info["symmetrixPortKey"] for key in port_key: port = "%s:%s" % (key['directorId'], key['portId']) portlist.append(port) else: exception_message = (_("Cannot find port group %(pg)s.") % {'pg': port_group_name}) raise exception.VolumeBackendAPIException(exception_message) return portlist def get_port(self, array, port_id): """Get director port details. :param array: the array serial number :param port_id: the port id :returns: port dict, or None """ dir_id = port_id.split(':')[0] port_no = port_id.split(':')[1] resource_name = ('%(directorId)s/port/%(port_number)s' % {'directorId': dir_id, 'port_number': port_no}) return self.get_resource(array, SYSTEM, 'director', resource_name=resource_name) def get_iscsi_ip_address_and_iqn(self, array, port_id): """Get the IPv4Address from the director port. :param array: the array serial number :param port_id: the director port identifier :returns: (list of ip_addresses, iqn) """ ip_addresses, iqn = None, None port_details = self.get_port(array, port_id) if port_details: ip_addresses = port_details['symmetrixPort']['ip_addresses'] iqn = port_details['symmetrixPort']['identifier'] return ip_addresses, iqn def get_nvme_tcp_ip_address(self, array, port_id): """Get the IPv4Address from the director port. :param array: the array serial number :param port_id: the director port identifier :returns: (list of ip_addresses) """ ip_addresses = None port_details = self.get_port(array, port_id) if port_details: ip_addresses = port_details['symmetrixPort']['ip_addresses'] return ip_addresses def get_ip_interface_physical_port(self, array_id, virtual_port, ip_address): """Get the physical port associated with a virtual port and IP address. :param array_id: the array serial number -- str :param virtual_port: the director & virtual port identifier -- str :param ip_address: the ip address associated with the port -- str :returns: physical director:port -- str """ if self.u4p_version == U4P_100_VERSION: target_key = 'iscsi_endpoint' elif self.u4p_version == U4P_92_VERSION: target_key = 'iscsi_target' else: msg = (_( "Unable to determine the target_key for version %(ver)s." % { 'ver': self.u4p_version} )) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) director_id = virtual_port.split(':')[0] params = {'ip_list': ip_address, target_key: False} target_uri = self.build_uri( category=SYSTEM, resource_level='symmetrix', resource_level_id=array_id, resource_type='director', resource_type_id=director_id, resource='port') port_info = self.get_request( target_uri, 'port IP interface', params) if not port_info: msg = (_( "Unable to get port IP interface from Virtual port %(vp)s " "using IP address %(ip)s. Please check iSCSI configuration " "of backend array %(arr)s." % { 'vp': virtual_port, 'ip': ip_address, 'arr': array_id} )) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) port_key = port_info.get('symmetrixPortKey', []) if len(port_key) == 1: port_info = port_key[0] port_id = port_info.get('portId') dir_port = '%(d)s:%(p)s' % {'d': director_id, 'p': port_id} else: if len(port_key) == 0: msg = (_( "Virtual port %(vp)s and IP address %(ip)s are not " "associated a physical director:port. Please check " "iSCSI configuration of backend array %(arr)s." % { 'vp': virtual_port, 'ip': ip_address, 'arr': array_id} )) else: msg = (_( "Virtual port %(vp)s and IP address %(ip)s are associated " "with more than one physical director:port. Please check " "iSCSI configuration of backend array %(arr)s." % { 'vp': virtual_port, 'ip': ip_address, 'arr': array_id} )) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) return dir_port def get_target_wwns(self, array, portgroup): """Get the director ports' wwns. :param array: the array serial number :param portgroup: portgroup :returns: target_wwns -- the list of target wwns for the masking view """ target_wwns = [] port_ids = self.get_port_ids(array, portgroup) for port in port_ids: port_info = self.get_port(array, port) if port_info: wwn = port_info['symmetrixPort']['identifier'] target_wwns.append(wwn) else: LOG.error("Error retrieving port %(port)s " "from portgroup %(portgroup)s.", {'port': port, 'portgroup': portgroup}) return target_wwns def get_initiator_group(self, array, initiator_group=None, params=None): """Retrieve initiator group details from the array. :param array: the array serial number :param initiator_group: the initaitor group name :param params: optional filter parameters :returns: initiator group dict, or None """ return self.get_resource( array, SLOPROVISIONING, 'host', resource_name=initiator_group, params=params) def get_initiator(self, array, initiator_id): """Retrieve initiator details from the array. :param array: the array serial number :param initiator_id: the initiator id :returns: initiator dict, or None """ return self.get_resource( array, SLOPROVISIONING, 'initiator', resource_name=initiator_id) def get_initiator_list(self, array, params=None): """Retrieve initiator list from the array. :param array: the array serial number :param params: dict of optional params :returns: list of initiators """ init_dict = self.get_resource(array, SLOPROVISIONING, 'initiator', params=params) try: init_list = init_dict['initiatorId'] except (KeyError, TypeError): init_list = [] return init_list def get_initiator_group_from_initiator(self, array, initiator): """Given an initiator, get its corresponding initiator group, if any. :param array: the array serial number :param initiator: the initiator id :returns: found_init_group_name -- string """ found_init_group_name = None init_details = self.get_initiator(array, initiator) if init_details: found_init_group_name = init_details.get('host') else: LOG.error("Unable to retrieve initiator details for " "%(init)s.", {'init': initiator}) return found_init_group_name def create_initiator_group(self, array, init_group_name, init_list, extra_specs): """Create a new initiator group containing the given initiators. :param array: the array serial number :param init_group_name: the initiator group name :param init_list: the list of initiators :param extra_specs: extra specifications """ new_ig_data = ({"executionOption": "ASYNCHRONOUS", "hostId": init_group_name, "initiatorId": init_list}) sc, job = self.create_resource(array, SLOPROVISIONING, 'host', new_ig_data) self.wait_for_job('create initiator group', sc, job, extra_specs) def delete_initiator_group(self, array, initiatorgroup_name): """Delete an initiator group. :param array: the array serial number :param initiatorgroup_name: initiator group name """ self.delete_resource( array, SLOPROVISIONING, 'host', initiatorgroup_name) LOG.debug("Initiator Group successfully deleted.") def get_masking_view(self, array, masking_view_name): """Get details of a masking view. :param array: array serial number :param masking_view_name: the masking view name :returns: masking view dict """ return self.get_resource( array, SLOPROVISIONING, 'maskingview', masking_view_name) def get_masking_view_list(self, array, params): """Get a list of masking views from the array. :param array: array serial number :param params: optional GET parameters :returns: masking view list """ masking_view_list = [] masking_view_details = self.get_resource( array, SLOPROVISIONING, 'maskingview', params=params) try: masking_view_list = masking_view_details['maskingViewId'] except (KeyError, TypeError): pass return masking_view_list def get_masking_views_from_storage_group(self, array, storagegroup): """Return any masking views associated with a storage group. :param array: the array serial number :param storagegroup: the storage group name :returns: masking view list """ maskingviewlist = [] storagegroup = self.get_storage_group(array, storagegroup) if storagegroup and storagegroup.get('maskingview'): maskingviewlist = storagegroup['maskingview'] return maskingviewlist def get_masking_views_by_initiator_group( self, array, initiatorgroup_name): """Given initiator group, retrieve the masking view instance name. Retrieve the list of masking view instances associated with the given initiator group. :param array: the array serial number :param initiatorgroup_name: the name of the initiator group :returns: list of masking view names """ masking_view_list = [] ig_details = self.get_initiator_group( array, initiatorgroup_name) if ig_details: if ig_details.get('maskingview'): masking_view_list = ig_details['maskingview'] else: LOG.error("Error retrieving initiator group %(ig_name)s", {'ig_name': initiatorgroup_name}) return masking_view_list def get_element_from_masking_view( self, array, maskingview_name, portgroup=False, host=False, storagegroup=False): """Return the name of the specified element from a masking view. :param array: the array serial number :param maskingview_name: the masking view name :param portgroup: the port group name - optional :param host: the host name - optional :param storagegroup: the storage group name - optional :returns: name of the specified element -- string :raises: VolumeBackendAPIException """ element = None masking_view_details = self.get_masking_view(array, maskingview_name) if masking_view_details: if portgroup: element = masking_view_details['portGroupId'] elif host: element = masking_view_details['hostId'] elif storagegroup: element = masking_view_details['storageGroupId'] else: exception_message = (_("Error retrieving masking group.")) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return element def get_common_masking_views(self, array, portgroup_name, ig_name): """Get common masking views for a given portgroup and initiator group. :param array: the array serial number :param portgroup_name: the port group name :param ig_name: the initiator group name :returns: masking view list """ params = {'port_group_name': portgroup_name, 'host_or_host_group_name': ig_name} masking_view_list = self.get_masking_view_list(array, params) if not masking_view_list: LOG.info("No common masking views found for %(pg_name)s " "and %(ig_name)s.", {'pg_name': portgroup_name, 'ig_name': ig_name}) return masking_view_list def create_masking_view(self, array, maskingview_name, storagegroup_name, port_group_name, init_group_name, extra_specs): """Create a new masking view. :param array: the array serial number :param maskingview_name: the masking view name :param storagegroup_name: the storage group name :param port_group_name: the port group :param init_group_name: the initiator group :param extra_specs: extra specifications """ payload = ({"executionOption": "ASYNCHRONOUS", "portGroupSelection": { "useExistingPortGroupParam": { "portGroupId": port_group_name}}, "maskingViewId": maskingview_name, "hostOrHostGroupSelection": { "useExistingHostParam": { "hostId": init_group_name}}, "storageGroupSelection": { "useExistingStorageGroupParam": { "storageGroupId": storagegroup_name}}}) status_code, job = self.create_resource( array, SLOPROVISIONING, 'maskingview', payload) self.wait_for_job('Create masking view', status_code, job, extra_specs) def delete_masking_view(self, array, maskingview_name): """Delete a masking view. :param array: the array serial number :param maskingview_name: the masking view name """ return self.delete_resource( array, SLOPROVISIONING, 'maskingview', maskingview_name) def get_replication_capabilities(self, array): """Check what replication features are licensed and enabled. Example return value for this method: .. code:: python {"symmetrixId": "000197800128", "snapVxCapable": true, "rdfCapable": true} :param array :returns: capabilities dict for the given array """ array_capabilities = None target_uri = ("/%s/replication/capabilities/symmetrix" % self.u4p_version) capabilities = self.get_request( target_uri, 'replication capabilities') if capabilities: symm_list = capabilities['symmetrixCapability'] for symm in symm_list: if symm['symmetrixId'] == array: array_capabilities = symm break return array_capabilities def is_snapvx_licensed(self, array): """Check if the snapVx feature is licensed and enabled. :param array: the array serial number :returns: True if licensed and enabled; False otherwise. """ snap_capability = False capabilities = self.get_replication_capabilities(array) if capabilities: snap_capability = capabilities['snapVxCapable'] else: LOG.error("Cannot access replication capabilities " "for array %(array)s", {'array': array}) return snap_capability def create_volume_snap(self, array, snap_name, device_id, extra_specs, ttl=0): """Create a snapVx snapshot of a volume. :param array: the array serial number :param snap_name: the name of the snapshot :param device_id: the source device id :param extra_specs: the extra specifications :param ttl: time to live in hours, defaults to 0 """ payload = {"deviceNameListSource": [{"name": device_id}], "bothSides": 'false', "star": 'false', "force": 'false'} if int(ttl) > 0: payload['timeToLive'] = ttl payload['timeInHours'] = 'true' resource_type = 'snapshot/%(snap)s' % {'snap': snap_name} status_code, job = self.create_resource( array, REPLICATION, resource_type, payload, private='/private') self.wait_for_job('Create volume snapVx', status_code, job, extra_specs) def modify_volume_snap(self, array, source_id, target_id, snap_name, extra_specs, snap_id=None, link=False, unlink=False, rename=False, new_snap_name=None, restore=False, list_volume_pairs=None, copy=False, symforce=False): """Modify a snapvx snapshot :param array: the array serial number :param source_id: the source device id :param target_id: the target device id :param snap_name: the snapshot name :param extra_specs: extra specifications :param snap_id: the unique snap id of the snapVX :param link: Flag to indicate action = Link :param unlink: Flag to indicate action = Unlink :param rename: Flag to indicate action = Rename :param new_snap_name: Optional new snapshot name :param restore: Flag to indicate action = Restore :param list_volume_pairs: list of volume pairs to link, optional :param copy: If copy mode should be used for SnapVX target links """ action, operation, payload = '', '', {} copy = 'true' if copy else 'false' force = self._check_force(extra_specs) if link: action = "Link" elif unlink: action = "Unlink" elif rename: action = "Rename" elif restore: action = "Restore" if action == "Restore": operation = 'Restore snapVx snapshot' payload = {"deviceNameListSource": [{"name": source_id}], "deviceNameListTarget": [{"name": source_id}], "action": action, "star": 'false', "force": 'false'} elif action in ('Link', 'Unlink'): operation = 'Modify snapVx relationship to target' src_list, tgt_list = [], [] if list_volume_pairs: for a, b in list_volume_pairs: src_list.append({'name': a}) tgt_list.append({'name': b}) else: src_list.append({'name': source_id}) tgt_list.append({'name': target_id}) payload = {"deviceNameListSource": src_list, "deviceNameListTarget": tgt_list, "copy": copy, "action": action, "star": 'false', "force": force, "exact": 'false', "remote": 'false', "symforce": str(symforce).lower()} elif action == "Rename": operation = 'Rename snapVx snapshot' payload = {"deviceNameListSource": [{"name": source_id}], "deviceNameListTarget": [{"name": source_id}], "action": action, "newsnapshotname": new_snap_name} if self.is_snap_id: payload.update({"snap_id": snap_id}) if snap_id else ( payload.update({"generation": "0"})) else: payload.update({"generation": snap_id}) if snap_id else ( payload.update({"generation": "0"})) if action: status_code, job = self.modify_resource( array, REPLICATION, 'snapshot', payload, resource_name=snap_name, private='/private') self.wait_for_job(operation, status_code, job, extra_specs) def delete_volume_snap(self, array, snap_name, source_device_ids, snap_id=None, restored=False): """Delete the snapshot of a volume or volumes. :param array: the array serial number :param snap_name: the name of the snapshot :param source_device_ids: the source device ids :param snap_id: the unique snap id of the snapVX :param restored: Flag to indicate terminate restore session """ device_list = [] if not isinstance(source_device_ids, list): source_device_ids = [source_device_ids] for dev in source_device_ids: device_list.append({"name": dev}) payload = {"deviceNameListSource": device_list} if self.is_snap_id: payload.update({"snap_id": snap_id}) if snap_id else ( payload.update({"generation": 0})) else: payload.update({"generation": snap_id}) if snap_id else ( payload.update({"generation": 0})) if restored: payload.update({"restore": True}) LOG.debug("The payload is %(payload)s.", {'payload': payload}) return self.delete_resource( array, REPLICATION, 'snapshot', snap_name, payload=payload, private='/private') def get_volume_snap_info(self, array, source_device_id): """Get snapVx information associated with a volume. :param array: the array serial number :param source_device_id: the source volume device ID :returns: message -- dict, or None """ resource_name = ("%(device_id)s/snapshot" % {'device_id': source_device_id}) return self.get_resource(array, REPLICATION, 'volume', resource_name, private='/private') def get_volume_snap(self, array, device_id, snap_name, snap_id): """Given a volume snap info, retrieve the snapVx object. :param array: the array serial number :param device_id: the source volume device id :param snap_name: the name of the snapshot :param snap_id: the unique snap id of the snapVX :returns: snapshot dict, or None """ snapshot = None snap_info = self.get_volume_snap_info(array, device_id) if snap_info: if (snap_info.get('snapshotSrcs', None) and bool(snap_info['snapshotSrcs'])): for snap in snap_info['snapshotSrcs']: if snap['snapshotName'] == snap_name: if self.is_snap_id: if snap['snap_id'] == snap_id: snapshot = snap break else: if snap['generation'] == snap_id: snapshot = snap break return snapshot def get_volume_snaps(self, array, device_id, snap_name): """Given a volume snap info, retrieve the snapVx object. :param array: the array serial number :param device_id: the source volume device id :param snap_name: the name of the snapshot :returns: snapshot dict, or None """ snapshots = list() snap_info = self.get_volume_snap_info(array, device_id) if snap_info: if (snap_info.get('snapshotSrcs', None) and bool(snap_info['snapshotSrcs'])): for snap in snap_info['snapshotSrcs']: if snap['snapshotName'] == snap_name: snapshots.append(snap) return snapshots def get_volume_snapshot_list(self, array, source_device_id): """Get a list of snapshot details for a particular volume. :param array: the array serial number :param source_device_id: the osurce device id :returns: snapshot list or None """ snapshot_list = [] snap_info = self.get_volume_snap_info(array, source_device_id) if snap_info: if (snap_info.get('snapshotSrcs', None) and bool(snap_info['snapshotSrcs'])): snapshot_list = snap_info['snapshotSrcs'] return snapshot_list def is_vol_in_rep_session(self, array, device_id): """Check if a volume is in a replication session. :param array: the array serial number :param device_id: the device id :returns: snapvx_tgt -- bool, snapvx_src -- bool, rdf_grp -- list or None """ snapvx_src = False snapvx_tgt = False rdf_grp = None volume_details = self.get_volume(array, device_id) if volume_details and isinstance(volume_details, dict): if volume_details.get('snapvx_target'): snapvx_tgt = volume_details['snapvx_target'] if volume_details.get('snapvx_source'): snapvx_src = volume_details['snapvx_source'] if volume_details.get('rdfGroupId'): rdf_grp = volume_details['rdfGroupId'] return snapvx_tgt, snapvx_src, rdf_grp def is_sync_complete(self, array, source_device_id, target_device_id, snap_name, extra_specs, snap_id): """Check if a sync session is complete. :param array: the array serial number :param source_device_id: source device id :param target_device_id: target device id :param snap_name: snapshot name :param extra_specs: extra specifications :param snap_id: the unique snap id of the SnapVX :returns: bool """ def _wait_for_sync(): """Called at an interval until the synchronization is finished. :raises: loopingcall.LoopingCallDone :raises: VolumeBackendAPIException """ retries = kwargs['retries'] try: kwargs['retries'] = retries + 1 if not kwargs['wait_for_sync_called']: if self._is_sync_complete( array, source_device_id, snap_name, target_device_id, snap_id): kwargs['wait_for_sync_called'] = True except Exception: exception_message = (_("Issue encountered waiting for " "synchronization.")) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) if kwargs['retries'] > int(extra_specs[utils.RETRIES]): LOG.error("_wait_for_sync failed after %(retries)d " "tries.", {'retries': retries}) raise loopingcall.LoopingCallDone( retvalue=int(extra_specs[utils.RETRIES])) if kwargs['wait_for_sync_called']: raise loopingcall.LoopingCallDone() kwargs = {'retries': 0, 'wait_for_sync_called': False} timer = loopingcall.FixedIntervalLoopingCall(_wait_for_sync) rc = timer.start(interval=int(extra_specs[utils.INTERVAL])).wait() return rc def _is_sync_complete(self, array, source_device_id, snap_name, target_device_id, snap_id): """Helper function to check if snapVx sync session is complete. :param array: the array serial number :param source_device_id: source device id :param snap_name: the snapshot name :param target_device_id: the target device id :param snap_id: the unique snap id of the SnapVX :returns: defined -- bool """ defined = True session = self.get_sync_session( array, source_device_id, snap_name, target_device_id, snap_id) if session: defined = session['defined'] return defined def get_sync_session(self, array, source_device_id, snap_name, target_device_id, snap_id): """Get a particular sync session. :param array: the array serial number :param source_device_id: source device id :param snap_name: the snapshot name :param target_device_id: the target device id :param snap_id: the unique snapid of the snapshot :returns: sync session -- dict, or None """ session = None linked_device_list = self.get_snap_linked_device_list( array, source_device_id, snap_name, snap_id) for target in linked_device_list: if target_device_id == target['targetDevice']: session = target break return session def _find_snap_vx_source_sessions(self, array, source_device_id): """Find all snap sessions for a given source volume. :param array: the array serial number :param source_device_id: the source device id :returns: list of snapshot dicts """ snap_dict_list = [] snapshots = self.get_volume_snapshot_list(array, source_device_id) for snapshot in snapshots: try: snap_id = snapshot['snap_id'] if self.is_snap_id else ( snapshot['generation']) if bool(snapshot['linkedDevices']): link_info = {'linked_vols': snapshot['linkedDevices'], 'snap_name': snapshot['snapshotName'], 'snapid': snap_id} snap_dict_list.append(link_info) except KeyError: pass return snap_dict_list def get_snap_linked_device_list(self, array, source_device_id, snap_name, snap_id, state=None): """Get the list of linked devices for a particular snapVx snapshot. :param array: the array serial number :param source_device_id: source device id :param snap_name: the snapshot name :param snap_id: the unique snapid of the snapshot :param state: filter for state of the link :returns: linked_device_list or empty list """ snap_dict_list = None linked_device_list = [] snap_dict_list = self._get_snap_linked_device_dict_list( array, source_device_id, snap_name, state=state) for snap_dict in snap_dict_list: if snap_id == snap_dict['snapid']: linked_device_list = snap_dict['linked_vols'] break return linked_device_list def _get_snap_linked_device_dict_list( self, array, source_device_id, snap_name, state=None): """Get list of linked devices for all snap ids for a snapVx snapshot :param array: the array serial number :param source_device_id: source device id :param snap_name: the snapshot name :param state: filter for state of the link :returns: list of dict of snapids with linked devices """ snap_dict_list = [] snap_list = self._find_snap_vx_source_sessions( array, source_device_id) for snap in snap_list: if snap['snap_name'] == snap_name: for linked_vol in snap['linked_vols']: snap_state = linked_vol.get('state', None) # If state is None or # both snap_state and state are not None and are equal if not state or (snap_state and state and snap_state == state): snap_id = snap['snapid'] found = False for snap_dict in snap_dict_list: if snap_id == snap_dict['snapid']: snap_dict['linked_vols'].append( linked_vol) found = True break if not found: snap_dict_list.append( {'snapid': snap_id, 'linked_vols': [linked_vol]}) return snap_dict_list def find_snap_vx_sessions(self, array, device_id, tgt_only=False): """Find all snapVX sessions for a device (source and target). :param array: the array serial number :param device_id: the device id :param tgt_only: Flag - return only sessions where device is target :returns: list of snapshot dicts """ snap_tgt_dict, snap_src_dict_list = dict(), list() s_in = self.get_volume_snap_info(array, device_id) snap_src = ( s_in['snapshotSrcs'] if s_in and s_in.get( 'snapshotSrcs') else list()) snap_tgt = ( s_in['snapshotLnks'][0] if s_in and s_in.get( 'snapshotLnks') else dict()) if snap_src and not tgt_only: for session in snap_src: snap_src_dict = dict() snap_src_dict['source_vol_id'] = device_id snap_src_dict['snapid'] = session.get( 'snap_id') if self.is_snap_id else session.get( 'generation') snap_src_dict['snap_name'] = session.get('snapshotName') snap_src_dict['expired'] = session.get('expired') if session.get('linkedDevices'): snap_src_link = session.get('linkedDevices')[0] snap_src_dict['target_vol_id'] = snap_src_link.get( 'targetDevice') snap_src_dict['copy_mode'] = snap_src_link.get('copy') snap_src_dict['state'] = snap_src_link.get('state') snap_src_dict_list.append(snap_src_dict) if snap_tgt: snap_tgt_dict['source_vol_id'] = snap_tgt.get('linkSourceName') snap_tgt_dict['target_vol_id'] = device_id snap_tgt_dict['state'] = snap_tgt.get('state') snap_tgt_dict['copy_mode'] = snap_tgt.get('copy') vol_info = self._get_private_volume(array, device_id) if vol_info.get('timeFinderInfo'): vol_tf_sessions = vol_info.get( 'timeFinderInfo').get('snapVXSession') if vol_tf_sessions: for session in vol_tf_sessions: if session.get('tgtSrcSnapshotGenInfo'): snap_tgt_link = session.get( 'tgtSrcSnapshotGenInfo') snap_tgt_dict['snap_name'] = snap_tgt_link.get( 'snapshotName') snap_tgt_dict['expired'] = snap_tgt_link.get( 'expired') snap_tgt_dict['snapid'] = snap_tgt_link.get( 'snapid') if self.is_snap_id else ( snap_tgt_link.get('generation')) return snap_src_dict_list, snap_tgt_dict def get_rdf_group(self, array, rdf_number): """Get specific rdf group details. :param array: the array serial number :param rdf_number: the rdf number """ return self.get_resource(array, REPLICATION, 'rdf_group', rdf_number) def get_storage_group_rdf_group_state(self, array, storage_group, rdf_group_no): """Get the RDF group state from a replication enabled Storage Group. :param array: the array serial number :param storage_group: the storage group name :param rdf_group_no: the RDF group number :returns: storage group RDF group state """ resource = ('storagegroup/%(sg)s/rdf_group/%(rdfg)s' % { 'sg': storage_group, 'rdfg': rdf_group_no}) rdf_group = self.get_resource(array, REPLICATION, resource) return rdf_group.get('states', list()) if rdf_group else dict() def get_storage_group_rdf_groups(self, array, storage_group): """Get a list of rdf group numbers used by a storage group. :param array: the array serial number -- str :param storage_group: the storage group name to check -- str :return: RDFGs associated with the storage group -- dict """ resource = ('storagegroup/%(storage_group)s/rdf_group' % { 'storage_group': storage_group}) storage_group_details = self.get_resource(array, REPLICATION, resource) return storage_group_details['rdfgs'] def get_rdf_group_list(self, array): """Get rdf group list from array. :param array: the array serial number """ return self.get_resource(array, REPLICATION, 'rdf_group') def get_rdf_group_volume_list(self, array, rdf_group_no): """Get a list of all volumes in an RDFG. :param array: the array serial number -- str :param rdf_group_no: the RDF group number -- str :return: RDFG volume list -- list """ resource = ('rdf_group/%(rdf_group)s/volume' % { 'rdf_group': rdf_group_no}) rdf_group_volumes = self.get_resource(array, REPLICATION, resource) return rdf_group_volumes['name'] def get_rdf_group_volume(self, array, src_device_id): """Get the RDF details for a volume. :param array: the array serial number :param src_device_id: the source device id :returns: rdf_session """ rdf_session = None volume = self._get_private_volume(array, src_device_id) try: rdf_session = volume['rdfInfo']['RDFSession'][0] except (KeyError, TypeError, IndexError): LOG.warning("Cannot locate source RDF volume %s", src_device_id) return rdf_session def get_rdf_pair_volume(self, array, rdf_group_no, device_id): """Get information on an RDF pair from the source volume. :param array: the array serial number :param rdf_group_no: the RDF group number :param device_id: the source device ID :returns: RDF pair information -- dict """ resource = ('rdf_group/%(rdf_group)s/volume/%(device)s' % { 'rdf_group': rdf_group_no, 'device': device_id}) return self.get_resource(array, REPLICATION, resource) def are_vols_rdf_paired(self, array, remote_array, device_id, target_device): """Check if a pair of volumes are RDF paired. :param array: the array serial number :param remote_array: the remote array serial number :param device_id: the device id :param target_device: the target device id :returns: paired -- bool, local_vol_state, rdf_pair_state """ paired, local_vol_state, rdf_pair_state = False, '', '' rdf_session = self.get_rdf_group_volume(array, device_id) if rdf_session: remote_volume = rdf_session['remoteDeviceID'] remote_symm = rdf_session['remoteSymmetrixID'] if (remote_volume == target_device and remote_array == remote_symm): paired = True local_vol_state = rdf_session['SRDFStatus'] rdf_pair_state = rdf_session['pairState'] else: LOG.warning("Cannot locate RDF session for volume %s", device_id) return paired, local_vol_state, rdf_pair_state def wait_for_rdf_group_sync(self, array, storage_group, rdf_group_no, rep_extra_specs): """Wait for an RDF group to reach 'Synchronised' state. :param array: the array serial number :param storage_group: the storage group name :param rdf_group_no: the RDF group number :param rep_extra_specs: replication extra specifications :raises: exception.VolumeBackendAPIException """ def _wait_for_synced_state(): try: kwargs['retries'] -= 1 if not kwargs['synced']: rdf_group_state = self.get_storage_group_rdf_group_state( array, storage_group, rdf_group_no) if rdf_group_state: kwargs['state'] = rdf_group_state[0] if kwargs['state'].lower() in utils.RDF_SYNCED_STATES: kwargs['synced'] = True kwargs['rc'] = 0 except Exception as e_msg: ex_msg = _("Issue encountered waiting for job: %(e)s" % { 'e': e_msg}) LOG.error(ex_msg) raise exception.VolumeBackendAPIException(message=ex_msg) if kwargs['retries'] == 0: ex_msg = _("Wait for RDF Sync State failed after %(r)d " "tries." % {'r': rep_extra_specs['sync_retries']}) LOG.error(ex_msg) raise exception.VolumeBackendAPIException(message=ex_msg) if kwargs['synced']: raise loopingcall.LoopingCallDone() kwargs = {'retries': rep_extra_specs['sync_retries'], 'synced': False, 'rc': 0, 'state': 'syncinprog'} timer = loopingcall.FixedIntervalLoopingCall(_wait_for_synced_state) timer.start(interval=rep_extra_specs['sync_interval']).wait() LOG.debug("Return code is: %(rc)lu. State is %(state)s", {'rc': kwargs['rc'], 'state': kwargs['state']}) def wait_for_rdf_pair_sync(self, array, rdf_group_no, device_id, rep_extra_specs): """Wait for an RDF device pair to reach 'Synchronised' state. :param array: the array serial number :param rdf_group_no: the RDF group number :param device_id: the source device ID :param rep_extra_specs: replication extra specifications :raises: exception.VolumeBackendAPIException """ def _wait_for_synced_state(): try: kwargs['retries'] -= 1 if not kwargs['synced']: rdf_pair = self.get_rdf_pair_volume(array, rdf_group_no, device_id) kwargs['state'] = rdf_pair['rdfpairState'] if kwargs['state'].lower() in utils.RDF_SYNCED_STATES: kwargs['synced'] = True kwargs['rc'] = 0 except Exception as e_msg: ex_msg = _("Issue encountered waiting for job: %(e)s" % { 'e': e_msg}) LOG.error(ex_msg) raise exception.VolumeBackendAPIException(message=ex_msg) if kwargs['retries'] == 0: ex_msg = _("Wait for RDF Sync State failed after %(r)d " "tries." % {'r': rep_extra_specs['sync_retries']}) LOG.error(ex_msg) raise exception.VolumeBackendAPIException(message=ex_msg) if kwargs['synced']: raise loopingcall.LoopingCallDone() kwargs = {'retries': rep_extra_specs['sync_retries'], 'synced': False, 'rc': 0, 'state': 'syncinprog'} timer = loopingcall.FixedIntervalLoopingCall(_wait_for_synced_state) timer.start(interval=rep_extra_specs['sync_interval']).wait() LOG.debug("Return code is: %(rc)lu. State is %(state)s", {'rc': kwargs['rc'], 'state': kwargs['state']}) def rdf_resume_with_retries(self, array, rep_extra_specs): """Resume RDF on a RDF group with retry operator included. The retry operator is required here because on occassion when we are waiting on a snap copy session to complete we have no way of determining if the copy is complete, operation is retried until either the copy completes or the max interval/retries has been met. :param array: the array serial number :param rep_extra_specs: replication extra specifications :raises: exception.VolumeBackendAPIException """ def wait_for_copy_complete(): kwargs['retries'] -= 1 if not kwargs['copied']: try: self.srdf_resume_replication( array, rep_extra_specs['sg_name'], rep_extra_specs['rdf_group_no'], rep_extra_specs, async_call=False) kwargs['copied'] = True kwargs['state'] = 'copy_complete' kwargs['rc'] = 0 raise loopingcall.LoopingCallDone() except exception.VolumeBackendAPIException: LOG.debug('Snapshot copy process still ongoing, Cinder ' 'will retry again in %(interval)s seconds. ' 'There are %(retries)s remaining.', { 'interval': rep_extra_specs['sync_interval'], 'retries': kwargs['retries']}) if kwargs['retries'] == 0: ex_msg = _("Wait for snapshot copy complete failed after " "%(r)d tries." % { 'r': rep_extra_specs['sync_retries']}) LOG.error(ex_msg) raise exception.VolumeBackendAPIException(message=ex_msg) kwargs = {'retries': rep_extra_specs['sync_retries'], 'copied': False, 'rc': 0, 'state': 'copy_in_progress'} timer = loopingcall.FixedIntervalLoopingCall(wait_for_copy_complete) timer.start(interval=rep_extra_specs['sync_interval']).wait() LOG.debug("Return code: %(rc)lu. State: %(state)s", {'rc': kwargs['rc'], 'state': kwargs['state']}) def get_rdf_group_number(self, array, rdf_group_label): """Given an rdf_group_label, return the associated group number. :param array: the array serial number :param rdf_group_label: the group label :returns: rdf_group_number """ number = None rdf_list = self.get_rdf_group_list(array) if rdf_list and rdf_list.get('rdfGroupID'): number_list = [rdf['rdfgNumber'] for rdf in rdf_list['rdfGroupID'] if rdf['label'] == rdf_group_label] number = number_list[0] if len(number_list) > 0 else None if number: rdf_group = self.get_rdf_group(array, number) if not rdf_group: number = None return number def get_metro_payload_info(self, array, payload, rdf_group_no, extra_specs, next_gen): """Get the payload details for a metro active create pair. :param array: the array serial number :param payload: the payload :param rdf_group_no: the rdf group number :param extra_specs: the replication configuration :param next_gen: if the array is next gen uCode :returns: updated payload """ num_vols = 0 payload["rdfMode"] = "Active" payload['rdfType'] = "RDF1" rdfg_details = self.get_rdf_group(array, rdf_group_no) if rdfg_details is not None and rdfg_details.get('numDevices'): num_vols = int(rdfg_details['numDevices']) if num_vols == 0: # First volume - set bias if required if extra_specs.get(utils.METROBIAS): payload.update({"metroBias": "true"}) else: if next_gen: payload["exempt"] = "true" if payload.get('establish'): payload.pop('establish') return payload @retry(retry_exc_tuple, interval=5, retries=6) def srdf_protect_storage_group( self, array_id, remote_array_id, rdf_group_no, replication_mode, sg_name, service_level, extra_specs, target_sg=None): """SRDF protect a storage group. :param array_id: local array serial number :param remote_array_id: remote array serial number :param rdf_group_no: RDF group number :param replication_mode: replication mode :param sg_name: storage group name :param service_level: service level :param extra_specs: extra specifications :param target_sg: target storage group -- optional """ remote_sg = target_sg if target_sg else sg_name payload = { "executionOption": "ASYNCHRONOUS", "replicationMode": replication_mode, "remoteSLO": service_level, "remoteSymmId": remote_array_id, "rdfgNumber": rdf_group_no, "remoteStorageGroupName": remote_sg, "establish": "true"} # Metro specific configuration if replication_mode == utils.REP_METRO: bias = "true" if extra_specs.get(utils.METROBIAS) else "false" payload.update({ "replicationMode": "Active", "metroBias": bias}) LOG.debug('SRDF Protect Payload: %(pay)s', {'pay': payload}) resource = 'storagegroup/%(sg_name)s/rdf_group' % {'sg_name': sg_name} status_code, job = self.create_resource(array_id, REPLICATION, resource, payload) self.wait_for_job('SRDF Protect Storage Group', status_code, job, extra_specs) def srdf_modify_group(self, array, rdf_group_no, storage_group, payload, extra_specs, msg, async_call=True): """Modify RDF enabled storage group replication options. :param array: array serial number :param rdf_group_no: RDF group number :param storage_group: storage group name :param payload: REST request payload dict :param extra_specs: extra specifications :param msg: message to use for logs when waiting on job to complete :param async_call: if the REST call should be run, this only comes into effect when trying to resume replication and interval/retries are a factor. """ resource = ('storagegroup/%(sg_name)s/rdf_group/%(rdf_group_no)s' % { 'sg_name': storage_group, 'rdf_group_no': rdf_group_no}) if async_call: payload.update({"executionOption": "ASYNCHRONOUS"}) status_code, job = self.modify_resource(array, REPLICATION, resource, payload) self.wait_for_job(msg, status_code, job, extra_specs) else: self.modify_resource(array, REPLICATION, resource, payload) def srdf_suspend_replication(self, array_id, storage_group, rdf_group_no, rep_extra_specs): """Suspend replication on a RDF group. :param array_id: array serial number :param storage_group: storage group name :param rdf_group_no: RDF group number :param rep_extra_specs: replication extra specifications """ group_state = self.get_storage_group_rdf_group_state( array_id, storage_group, rdf_group_no) if group_state: group_state = [x.lower() for x in group_state] if len(group_state) == 1 and utils.RDF_SUSPENDED_STATE in group_state: LOG.info('SRDF Group %(grp_num)s is already in a suspended state', {'grp_num': rdf_group_no}) else: cons_exempt = self._get_cons_exempt( array_id, storage_group, rdf_group_no, rep_extra_specs['rep_mode']) payload = {"suspend": {"force": "true"}, "action": "Suspend"} payload["suspend"]["consExempt"] = cons_exempt self.srdf_modify_group( array_id, rdf_group_no, storage_group, payload, rep_extra_specs, 'Suspend SRDF Group Replication') def _get_cons_exempt(self, array_id, storage_group, rdf_group_no, rep_mode=None): """Get the consistency exempt flag for a storage group. :param array_id: array serial number :param storage_group: storage group name :param rdf_group_no: RDF group number :param rep_mode: Replication mode of the SRDF session :returns: A boolean indicating if consistency is exempt """ if not rep_mode: return "false" resource = ('storagegroup/%(sg)s/rdf_group/%(rdfg)s' % { 'sg': storage_group, 'rdfg': rdf_group_no}) rdf_group = self.get_resource(array_id, REPLICATION, resource) modes = [rep_mode] if rdf_group and rdf_group.get('modes'): modes.append(rdf_group.get('modes')) # Ensure we don't see the error message: # "A problem occurred modifying the Storage Group # SRDF Group resource: The device is not in # asynchronous mode" cons_exempt = utils.REP_ASYNC in modes LOG.debug("Consistency exempt: %s", cons_exempt) return self._bool_to_str(cons_exempt) def srdf_resume_replication(self, array_id, storage_group, rdf_group_no, rep_extra_specs, async_call=True): """Resume replication on a RDF group. :param array_id: array serial number :param storage_group: storage group name :param rdf_group_no: RDF group number :param rep_extra_specs: replication extra specifications :param async_call: if the REST call should be run, this only comes into effect when trying to resume replication and interval/retries are a factor. """ if self.get_storage_group(array_id, storage_group): group_state = self.get_storage_group_rdf_group_state( array_id, storage_group, rdf_group_no) if group_state: group_state = [x.lower() for x in group_state] if utils.RDF_SUSPENDED_STATE in group_state: payload = {"action": "Resume"} if rep_extra_specs['rep_mode'] == utils.REP_METRO: payload = {"action": "Establish"} if rep_extra_specs.get(utils.METROBIAS): payload.update({"establish": {"metroBias": "true"}}) self.srdf_modify_group( array_id, rdf_group_no, storage_group, payload, rep_extra_specs, 'Resume SRDF Group Replication', async_call) else: LOG.debug('SRDF Group %(grp_num)s is already in a resumed ' 'state.', {'grp_num': rdf_group_no}) else: LOG.debug('Storage Group %(sg)s not present on array ' '%(array)s, no resume required.', { 'sg': storage_group, 'array': array_id}) def srdf_establish_replication(self, array_id, storage_group, rdf_group_no, rep_extra_specs): """Establish replication on a RDF group. :param array_id: array serial number :param storage_group: storage group name :param rdf_group_no: RDF group number :param rep_extra_specs: replication extra specifications """ group_state = self.get_storage_group_rdf_group_state( array_id, storage_group, rdf_group_no) if utils.RDF_SUSPENDED_STATE not in group_state: LOG.info('Suspending SRDF Group %(grp_num)s', { 'grp_num': rdf_group_no}) self.srdf_modify_group( array_id, rdf_group_no, storage_group, {"action": "Suspend"}, rep_extra_specs, 'Suspend SRDF Group Replication') wait_msg = 'Incremental Establish SRDF Group Replication' LOG.info('Initiating incremental establish on SRDF Group %(grp_num)s', {'grp_num': rdf_group_no}) self.srdf_modify_group( array_id, rdf_group_no, storage_group, {"action": "Establish"}, rep_extra_specs, wait_msg) def srdf_failover_group(self, array_id, storage_group, rdf_group_no, rep_extra_specs): """Failover a RDFG/SG volume group to replication target. :param array_id: array serial number :param storage_group: storage group name :param rdf_group_no: RDF group number :param rep_extra_specs: replication extra specifications """ self.srdf_modify_group( array_id, rdf_group_no, storage_group, {"action": "Failover"}, rep_extra_specs, 'Failing over SRDF group replication') def srdf_failback_group(self, array_id, storage_group, rdf_group_no, rep_extra_specs): """Failback a RDFG/SG volume group from replication target. :param array_id: :param storage_group: :param rdf_group_no: :param rep_extra_specs: """ self.srdf_modify_group( array_id, rdf_group_no, storage_group, {"action": "Failback"}, rep_extra_specs, 'Failing back SRDF group replication') def srdf_remove_device_pair_from_storage_group( self, array_id, storage_group, remote_array_id, device_id, rep_extra_specs): """Remove a volume from local and remote storage groups simultaneously. :param array_id: local array serial number :param storage_group: storage group name :param remote_array_id: remote array serial number :param device_id: source device id :param rep_extra_specs: replication extra specifications """ payload = { "editStorageGroupActionParam": { "removeVolumeParam": { "volumeId": [device_id], "remoteSymmSGInfoParam": { "remote_symmetrix_1_id": remote_array_id, "remote_symmetrix_1_sgs": [storage_group]}}}} status_code, job = self.modify_storage_group(array_id, storage_group, payload) self.wait_for_job('SRDF Group remove device pair', status_code, job, rep_extra_specs) def srdf_delete_device_pair(self, array, rdf_group_no, local_device_id): """Delete a RDF device pair. :param array: array serial number :param rdf_group_no: RDF group number :param local_device_id: local device id """ resource = ('%(rdfg)s/volume/%(dev)s' % { 'rdfg': rdf_group_no, 'dev': local_device_id}) self.delete_resource(array, REPLICATION, 'rdf_group', resource) LOG.debug("Device Pair successfully deleted.") @retry(retry_exc_tuple, interval=5, retries=6) def srdf_create_device_pair(self, array, rdf_group_no, mode, device_id, rep_extra_specs, next_gen): """Create a RDF device pair in an existing RDF group. :param array: array serial number :param rdf_group_no: RDF group number :param mode: replication mode :param device_id: local device ID :param rep_extra_specs: replication extra specifications :param next_gen: if the array is next gen uCode :returns: replication session info -- dict """ payload = { "executionOption": "ASYNCHRONOUS", "rdfMode": mode, "localDeviceListCriteriaParam": {"localDeviceList": [device_id]}, "rdfType": "RDF1"} if mode == utils.REP_SYNC: payload.update({"establish": "true"}) elif mode == utils.REP_ASYNC: payload.update({"invalidateR2": "true", "exempt": "true"}) elif mode.lower() in [utils.REP_METRO.lower(), utils.RDF_ACTIVE.lower()]: payload = self.get_metro_payload_info( array, payload, rdf_group_no, rep_extra_specs, next_gen) LOG.debug('Create Pair Payload: %(pay)s', {'pay': payload}) resource = 'rdf_group/%(rdfg)s/volume' % {'rdfg': rdf_group_no} status_code, job = self.create_resource( array, REPLICATION, resource, payload) self.wait_for_job('SRDF Group remove device pair', status_code, job, rep_extra_specs) session_info = self.get_rdf_pair_volume(array, rdf_group_no, device_id) r2_device_id = session_info['remoteVolumeName'] return {'array': session_info['localSymmetrixId'], 'remote_array': session_info['remoteSymmetrixId'], 'src_device': device_id, 'tgt_device': r2_device_id, 'session_info': session_info} def get_or_rename_storage_group_rep( self, array, storage_group_name, extra_specs, sg_filter=None): """Get storage group rep info if it exist. If a generic volume group has been renamed we also need to rename it on the array based on the uuid component. We check for uuid if we cannot find it based on its old name. :param array: the array serial number :param storage_group_name: the name of the storage group :param extra_specs: extra specification :param sg_filter: uuid substring :returns: storage group dict or None """ rep_details = self.get_storage_group_rep(array, storage_group_name) if not rep_details: # It is possible that the group has been renamed if sg_filter: sg_dict = self.get_storage_group_list( array, params={ 'storageGroupId': sg_filter}) sg_list = sg_dict.get('storageGroupId') if sg_dict else None if sg_list and len(sg_list) == 1: rep_details = self.get_storage_group_rep( array, sg_list[0]) # Update the new storage group name if rep_details: self._rename_storage_group( array, sg_list[0], storage_group_name, extra_specs) rep_details = self.get_storage_group_rep( array, storage_group_name) LOG.warning( "Volume group %(old)s has been renamed to %(new)s " "due to a rename operation in OpenStack.", {'old': sg_list[0], 'new': storage_group_name}) return rep_details def get_storage_group_rep(self, array, storage_group_name): """Given a name, return storage group details wrt replication. :param array: the array serial number :param storage_group_name: the name of the storage group :returns: storage group dict or None """ return self.get_resource( array, REPLICATION, 'storagegroup', resource_name=storage_group_name) def get_volumes_in_storage_group(self, array, storagegroup_name): """Given a volume identifier, find the corresponding device_id. :param array: the array serial number :param storagegroup_name: the storage group name :returns: volume_list """ params = {"storageGroupId": storagegroup_name} volume_list = self.get_volume_list(array, params) if not volume_list: LOG.debug("Cannot find record for storage group %(storageGrpId)s", {'storageGrpId': storagegroup_name}) return volume_list def create_storagegroup_snap(self, array, source_group, snap_name, extra_specs): """Create a snapVx snapshot of a storage group. :param array: the array serial number :param source_group: the source group name :param snap_name: the name of the snapshot :param extra_specs: the extra specifications """ payload = {"snapshotName": snap_name} resource_type = ('storagegroup/%(sg_name)s/snapshot' % {'sg_name': source_group}) status_code, job = self.create_resource( array, REPLICATION, resource_type, payload) self.wait_for_job('Create storage group snapVx', status_code, job, extra_specs) def delete_storagegroup_snap(self, array, source_group, snap_name, snap_id, force=False): """Delete a snapVx snapshot of a storage group. :param array: the array serial number :param source_group: the source group name :param snap_name: the name of the snapshot :param snap_id: the unique snap id of the SnapVX :param force: optional force flag """ force_flag = "true" if force else "false" query_params = {'force': force_flag} if force else None postfix_uri = "/snapid/%s" % snap_id if self.is_snap_id else ( "/generation/%s" % snap_id) resource_name = ("%(sg_name)s/snapshot/%(snap_name)s" "%(postfix_uri)s" % {'sg_name': source_group, 'snap_name': snap_name, 'postfix_uri': postfix_uri}) self.delete_resource( array, REPLICATION, 'storagegroup', resource_name=resource_name, params=query_params) def get_storage_group_snap_id_list( self, array, source_group, snap_name): """Get a snapshot and its snapid count information for an sg. :param array: name of the array -- str :param source_group: name of the storage group -- str :param snap_name: the name of the snapshot -- str :returns: snapids -- list """ postfix_uri = "snapid" if self.is_snap_id else "generation" resource_name = ("%(sg_name)s/snapshot/%(snap_name)s/%(postfix_uri)s" % {'sg_name': source_group, 'snap_name': snap_name, 'postfix_uri': postfix_uri}) response = self.get_resource(array, REPLICATION, 'storagegroup', resource_name=resource_name) if self.is_snap_id: return response.get('snapids', list()) if response else list() else: return response.get('generations', list()) if response else list() def get_storagegroup_rdf_details(self, array, storagegroup_name, rdf_group_num): """Get the remote replication details of a storage group. :param array: the array serial number :param storagegroup_name: the storage group name :param rdf_group_num: the rdf group number """ resource_name = ("%(sg_name)s/rdf_group/%(rdf_num)s" % {'sg_name': storagegroup_name, 'rdf_num': rdf_group_num}) return self.get_resource(array, REPLICATION, 'storagegroup', resource_name=resource_name) def replicate_group(self, array, storagegroup_name, rdf_group_num, remote_array, extra_specs): """Create a target group on the remote array and enable replication. :param array: the array serial number :param storagegroup_name: the name of the group :param rdf_group_num: the rdf group number :param remote_array: the remote array serial number :param extra_specs: the extra specifications """ resource_name = ("storagegroup/%(sg_name)s/rdf_group" % {'sg_name': storagegroup_name}) payload = {"executionOption": "ASYNCHRONOUS", "replicationMode": utils.REP_SYNC, "remoteSymmId": remote_array, "remoteStorageGroupName": storagegroup_name, "rdfgNumber": rdf_group_num, "establish": 'true'} status_code, job = self.create_resource( array, REPLICATION, resource_name, payload) self.wait_for_job('Create storage group rdf', status_code, job, extra_specs) def _verify_rdf_state(self, array, storagegroup_name, rdf_group_num, action): """Verify if a storage group requires the requested state change. :param array: the array serial number :param storagegroup_name: the storage group name :param rdf_group_num: the rdf group number :param action: the requested action :returns: bool """ mod_rqd = False sg_rdf_details = self.get_storagegroup_rdf_details( array, storagegroup_name, rdf_group_num) if sg_rdf_details: state_list = sg_rdf_details['states'] LOG.debug("RDF state: %(sl)s; Action required: %(action)s", {'sl': state_list, 'action': action}) for state in state_list: if (action.lower() in ["establish", "failback", "resume"] and state.lower() in [utils.RDF_SUSPENDED_STATE, utils.RDF_FAILEDOVER_STATE]): mod_rqd = True break elif (action.lower() in ["split", "failover", "suspend"] and state.lower() in [utils.RDF_SYNC_STATE, utils.RDF_SYNCINPROG_STATE, utils.RDF_CONSISTENT_STATE, utils.RDF_ACTIVE, utils.RDF_ACTIVEACTIVE, utils.RDF_ACTIVEBIAS]): mod_rqd = True break return mod_rqd def delete_storagegroup_rdf(self, array, storagegroup_name, rdf_group_num): """Delete the rdf pairs for a storage group. :param array: the array serial number :param storagegroup_name: the name of the storage group :param rdf_group_num: the number of the rdf group """ resource_name = ('%(sg_name)s/rdf_group/%(rdf_num)s' % {'sg_name': storagegroup_name, 'rdf_num': rdf_group_num}) query_params = {'force': 'true'} self.delete_resource( array, REPLICATION, 'storagegroup', resource_name=resource_name, params=query_params) def list_pagination(self, list_info): """Process lists under or over the maxPageSize :param list_info: the object list information :returns: the result list """ result_list = [] try: result_list = list_info['resultList']['result'] iterator_id = list_info['id'] list_count = list_info['count'] max_page_size = list_info['maxPageSize'] start_position = list_info['resultList']['from'] end_position = list_info['resultList']['to'] except (KeyError, TypeError): return list_info if list_count > max_page_size: LOG.info("More entries exist in the result list, retrieving " "remainder of results from iterator.") start_position = end_position + 1 if list_count < (end_position + max_page_size): end_position = list_count else: end_position += max_page_size iterator_response = self.get_iterator_page_list( iterator_id, list_count, start_position, end_position, max_page_size) result_list += iterator_response return result_list def get_iterator_page_list(self, iterator_id, result_count, start_position, end_position, max_page_size): """Iterate through response if more than one page available. :param iterator_id: the iterator ID :param result_count: the amount of results in the iterator :param start_position: position to begin iterator from :param end_position: position to stop iterator :param max_page_size: the max page size :returns: list -- merged results from multiple pages """ LOG.debug('Iterator %(it)s contains %(cnt)s results.', { 'it': iterator_id, 'cnt': result_count}) iterator_result = [] has_more_entries = True while has_more_entries: if start_position <= result_count <= end_position: end_position = result_count has_more_entries = False params = {'to': end_position, 'from': start_position} LOG.debug('Retrieving iterator %(it)s page %(st)s to %(fn)s', { 'it': iterator_id, 'st': start_position, 'fn': end_position}) target_uri = ('/common/Iterator/%(iterator_id)s/page' % { 'iterator_id': iterator_id}) iterator_response = self.get_request(target_uri, 'iterator', params) try: iterator_result += iterator_response['result'] start_position += max_page_size end_position += max_page_size except (KeyError, TypeError): pass LOG.info('All results extracted, deleting iterator %(it)s', { 'it': iterator_id}) self._delete_iterator(iterator_id) return iterator_result def _delete_iterator(self, iterator_id): """Delete an iterator containing full request result list. Note: This should only be called once all required results have been extracted from the iterator. :param iterator_id: the iterator ID -- str """ target_uri = self.build_uri( category='common', resource_level='Iterator', resource_level_id=iterator_id, no_version=True) status_code, message = self.request(target_uri, DELETE) operation = 'delete iterator' self.check_status_code_success(operation, status_code, message) LOG.info('Successfully deleted iterator %(it)s', {'it': iterator_id}) def validate_unisphere_version(self): """Validate that the running Unisphere version meets min requirement :raises: InvalidConfigurationValue :returns: unisphere_meets_min_req -- boolean """ unisphere_meets_min_req = False self.u4p_version = U4P_92_VERSION minimum_version = MIN_U4P_92_VERSION running_version, major_version = self.get_uni_version() if not running_version or not major_version: LOG.warning("Unable to validate Unisphere instance meets minimum " "requirements.") else: if int(major_version) >= int(U4P_110_VERSION): msg = _("Unisphere version %(running_version)s " "is not supported.") % { 'running_version': running_version} LOG.error(msg) raise exception.InvalidConfigurationValue(message=msg) if int(major_version) >= int(U4P_100_VERSION): self.u4p_version = U4P_100_VERSION minimum_version = MIN_U4P_100_VERSION # remove leading letter if running_version.lower()[0] == QUAL_CODE: version = running_version[1:] unisphere_meets_min_req = ( self.utils.version_meet_req(version, minimum_version)) elif running_version.lower()[0] == TEST_CODE or ( running_version.lower()[0] == DEV_CODE): LOG.warning("%(version)s This is not a official release of " "Unisphere.", {'version': running_version}) return int(major_version) >= int(self.u4p_version) if unisphere_meets_min_req: LOG.info("Unisphere version %(running_version)s meets minimum " "requirement of version %(minimum_version)s.", {'running_version': running_version, 'minimum_version': minimum_version}) else: LOG.error("Unisphere version %(running_version)s does " "not meet minimum requirement for use with this " "release, please upgrade to Unisphere " "%(minimum_version)s at minimum.", {'running_version': running_version, 'minimum_version': minimum_version}) return unisphere_meets_min_req def get_snap_id(self, array, device_id, snap_name): """Get the unique snap id for a particular snap name :param array: the array serial number :param device_id: the source device ID :param snap_name: the user supplied snapVX name :raises: VolumeBackendAPIException :returns: snap_id -- str """ snapshots = self.get_volume_snaps(array, device_id, snap_name) if not snapshots: exception_message = (_( "Snapshot %(snap_name)s is not associated with " "specified volume %(device_id)s.") % { 'device_id': device_id, 'snap_name': snap_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) elif len(snapshots) > 1: exception_message = (_( "Snapshot %(snap_name)s is associated with more than " "one snap id. No information available to choose " "which one.") % { 'device_id': device_id, 'snap_name': snap_name}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) else: return snapshots[0].get('snap_id') if self.is_snap_id else ( self.utils.convert_to_string(snapshots[0].get('generation'))) def get_major_minor_ucode(self, array): """Get the major and minor parts of the ucode :param array: the array serial number :returns: ucode_major_level, ucode_minor_level -- str, str """ array_details = self.get_array_detail(array) ucode_major_level = 0 ucode_minor_level = 0 if array_details: ucode = array_details.get('ucode', array_details.get('microcode')) split_ucode_level = ucode.split('.') ucode_level = [int(level) for level in split_ucode_level] ucode_major_level = ucode_level[0] ucode_minor_level = ucode_level[1] return ucode_major_level, ucode_minor_level def _is_snapid_enabled(self): """Check if array is snap_id enabled :returns: boolean """ return (self.ucode_major_level >= utils.UCODE_5978 and self.ucode_minor_level >= utils.UCODE_5978_HICKORY) or ( self.ucode_major_level >= utils.UCODE_6079) def _bool_to_str(self, param): return "true" if param else "false" @staticmethod def _check_force(extra_specs, force_flag=False): """Determine whether force should be used Returns 'true' if force_flag is True or FORCE_VOL_EDIT is set in extra_specs, otherwise returns 'false'. :param extra_specs: extra specs dict :param force_flag: force flag boolean :returns: str (true or false) """ return "true" if force_flag else ( "true" if utils.FORCE_VOL_EDIT in extra_specs else "false") def get_device_nguid(self, array, device_id): """Retrieve the NGUID for a device. This function queries the specified storage array for the NGUID of a given device using its device ID. :param array: The identifier of the storage array. :param device_id: The identifier of the device. :return: The NGUID of the device as a string, or None if the device is not found. """ volume = self.get_resource( array, SLOPROVISIONING, 'volume', resource_name=device_id) if volume is not None: return volume["nguid"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powermax/utils.py0000664000175000017500000024726400000000000024457 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from copy import deepcopy import datetime import hashlib import re from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import units import packaging.version from cinder.common import constants as cinder_constants from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.objects.group import Group from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) # SHARED CONSTANTS ISCSI = 'iscsi' FC = 'fc' INTERVAL = 'interval' RETRIES = 'retries' VOLUME_ELEMENT_NAME_PREFIX = 'OS-' MAX_SRP_LENGTH = 16 TRUNCATE_5 = 5 TRUNCATE_27 = 27 UCODE_5978_ELMSR = 221 UCODE_5978_HICKORY = 660 UCODE_5978 = 5978 UCODE_6079 = 6079 UPPER_HOST_CHARS = 16 UPPER_PORT_GROUP_CHARS = 12 ARRAY = 'array' REMOTE_ARRAY = 'remote_array' SLO = 'slo' WORKLOAD = 'workload' SRP = 'srp' PORTGROUPNAME = 'storagetype:portgroupname' DEVICE_ID = 'device_id' INITIATOR_CHECK = 'initiator_check' SG_NAME = 'storagegroup_name' SG_ID = 'storageGroupId' MV_NAME = 'maskingview_name' IG_NAME = 'init_group_name' PARENT_SG_NAME = 'parent_sg_name' CONNECTOR = 'connector' VOL_NAME = 'volume_name' EXTRA_SPECS = 'extra_specs' HOST_NAME = 'short_host_name' IS_RE = 'replication_enabled' IS_RE_CAMEL = 'ReplicationEnabled' DISABLECOMPRESSION = 'storagetype:disablecompression' REP_SYNC = 'Synchronous' REP_ASYNC = 'Asynchronous' REP_METRO = 'Metro' REP_MODE = 'rep_mode' RDF_SYNC_STATE = 'synchronized' RDF_SYNCINPROG_STATE = 'syncinprog' RDF_CONSISTENT_STATE = 'consistent' RDF_SUSPENDED_STATE = 'suspended' RDF_FAILEDOVER_STATE = 'failed over' RDF_ACTIVE = 'active' RDF_ACTIVEACTIVE = 'activeactive' RDF_ACTIVEBIAS = 'activebias' RDF_PARTITIONED_STATE = 'partitioned' RDF_TRANSIDLE_STATE = 'transidle' RDF_PAIR_STATE = 'rdfpairState' RDF_VALID_STATES_SYNC = [RDF_SYNC_STATE, RDF_SUSPENDED_STATE, RDF_SYNCINPROG_STATE] RDF_VALID_STATES_ASYNC = [RDF_CONSISTENT_STATE, RDF_SUSPENDED_STATE, RDF_SYNCINPROG_STATE] RDF_VALID_STATES_METRO = [RDF_ACTIVEBIAS, RDF_ACTIVEACTIVE, RDF_SUSPENDED_STATE, RDF_SYNCINPROG_STATE] RDF_PARTITIONED_STATES = [RDF_PARTITIONED_STATE, RDF_TRANSIDLE_STATE] RDF_CONS_EXEMPT = 'exempt' RDF_ALLOW_METRO_DELETE = 'allow_delete_metro' RDF_GROUP_NO = 'rdf_group_number' METROBIAS = 'metro_bias' BACKEND_ID = 'backend_id' BACKEND_ID_LEGACY_REP = 'backend_id_legacy_rep' REPLICATION_DEVICE_BACKEND_ID = 'storagetype:replication_device_backend_id' REP_CONFIG = 'rep_config' DEFAULT_PORT = 8443 CLONE_SNAPSHOT_NAME = "snapshot_for_clone" STORAGE_GROUP_TAGS = 'storagetype:storagegrouptags' TAG_LIST = 'tag_list' USED_HOST_NAME = "used_host_name" RDF_SYNCED_STATES = [RDF_SYNC_STATE, RDF_CONSISTENT_STATE, RDF_ACTIVEACTIVE, RDF_ACTIVEBIAS] FORCE_VOL_EDIT = 'force_vol_edit' PMAX_FAILOVER_START_ARRAY_PROMOTION = 'pmax_failover_start_array_promotion' POWERMAX_NVME_TRANSPORT_PROTOCOL_TCP = 'tcp' POWERMAX_NVME_TCP_PORT = 4420 # Multiattach constants IS_MULTIATTACH = 'multiattach' OTHER_PARENT_SG = 'other_parent_sg_name' FAST_SG = 'fast_managed_sg' NO_SLO_SG = 'no_slo_sg' # SG for unmanaged volumes UNMANAGED_SG = 'OS-Unmanaged' # Cinder.conf vmax configuration VMAX_SERVER_IP = 'san_ip' VMAX_USER_NAME = 'san_login' VMAX_PASSWORD = 'san_password' U4P_SERVER_PORT = 'san_api_port' VMAX_WORKLOAD = 'vmax_workload' U4P_FAILOVER_TIMEOUT = 'u4p_failover_timeout' U4P_FAILOVER_RETRIES = 'u4p_failover_retries' U4P_FAILOVER_BACKOFF_FACTOR = 'u4p_failover_backoff_factor' U4P_FAILOVER_AUTOFAILBACK = 'u4p_failover_autofailback' U4P_FAILOVER_TARGETS = 'u4p_failover_target' POWERMAX_ARRAY = 'powermax_array' POWERMAX_SRP = 'powermax_srp' POWERMAX_SERVICE_LEVEL = 'powermax_service_level' POWERMAX_PORT_GROUPS = 'powermax_port_groups' POWERMAX_ARRAY_TAG_LIST = 'powermax_array_tag_list' POWERMAX_SHORT_HOST_NAME_TEMPLATE = 'powermax_short_host_name_template' POWERMAX_PORT_GROUP_NAME_TEMPLATE = 'powermax_port_group_name_template' PORT_GROUP_LABEL = 'port_group_label' REST_API_CONNECT_TIMEOUT = 'rest_api_connect_timeout' REST_API_READ_TIMEOUT = 'rest_api_read_timeout' REST_API_CONNECT_TIMEOUT_KEY = 'RestAPIConnectTimeout' REST_API_READ_TIMEOUT_KEY = 'RestAPIReadTimeout' DISABLE_PROTECTED_SNAP = 'powermax:disable_protected_snap' # Array Models, Service Levels & Workloads VMAX_HYBRID_MODELS = ['VMAX100K', 'VMAX200K', 'VMAX400K'] VMAX_AFA_MODELS = ['VMAX250F', 'VMAX450F', 'VMAX850F', 'VMAX950F'] PMAX_MODELS = ['PowerMax_2000', 'PowerMax_8000', 'PowerMax_8500', 'PowerMax_2500'] HYBRID_SLS = ['Diamond', 'Platinum', 'Gold', 'Silver', 'Bronze', 'Optimized', 'None', 'NONE'] HYBRID_WLS = ['OLTP', 'OLTP_REP', 'DSS', 'DSS_REP', 'NONE', 'None'] AFA_H_SLS = ['Diamond', 'Optimized', 'None', 'NONE'] AFA_P_SLS = ['Diamond', 'Platinum', 'Gold', 'Silver', 'Bronze', 'Optimized', 'None', 'NONE'] AFA_WLS = ['OLTP', 'OLTP_REP', 'DSS', 'DSS_REP', 'NONE', 'None'] PMAX_SLS = ['Diamond', 'Platinum', 'Gold', 'Silver', 'Bronze', 'Optimized', 'None', 'NONE'] PMAX_WLS = ['NONE', 'None'] # Performance # Metrics PG_METRICS = [ 'AvgIOSize', 'IOs', 'MBRead', 'MBWritten', 'MBs', 'PercentBusy', 'Reads', 'Writes'] PORT_METRICS = [ 'AvgIOSize', 'IOs', 'MBRead', 'MBWritten', 'MBs', 'MaxSpeedGBs', 'PercentBusy', 'ReadResponseTime', 'Reads', 'ResponseTime', 'SpeedGBs', 'WriteResponseTime', 'Writes'] PORT_RT_METRICS = [ 'AvgIOSize', 'IOs', 'MBRead', 'MBWritten', 'MBs', 'PercentBusy', 'Reads', 'ResponseTime', 'Writes'] # Cinder config options LOAD_BALANCE = 'load_balance' LOAD_BALANCE_RT = 'load_balance_real_time' PERF_DATA_FORMAT = 'load_data_format' LOAD_LOOKBACK = 'load_look_back' LOAD_LOOKBACK_RT = 'load_look_back_real_time' PORT_GROUP_LOAD_METRIC = 'port_group_load_metric' PORT_LOAD_METRIC = 'port_load_metric' SNAPVX_UNLINK_SYMFORCE = 'snapvx_unlink_symforce' # One minute in milliseconds ONE_MINUTE = 60000 # Default look back windows in minutes DEFAULT_DIAG_WINDOw = 60 DEFAULT_RT_WINDOW = 1 # REST API keys PERFORMANCE = 'performance' REG_DETAILS = 'registrationdetails' REG_DETAILS_INFO = 'registrationDetailsInfo' COLLECTION_INT = 'collectionintervalmins' DIAGNOSTIC = 'diagnostic' REAL_TIME = 'realtime' RESULT_LIST = 'resultList' RESULT = 'result' KEYS = 'keys' METRICS = 'metrics' CAT = 'category' F_DATE = 'firstAvailableDate' S_DATE = 'startDate' L_DATE = 'lastAvailableDate' E_DATE = 'endDate' SYMM_ID = 'symmetrixId' ARRAY_PERF = 'Array' ARRAY_INFO = 'arrayInfo' PORT_GROUP = 'PortGroup' PORT_GROUP_ID = 'portGroupId' FE_PORT_RT = 'FEPORT' FE_PORT_DIAG = 'FEPort' DATA_FORMAT = 'dataFormat' INST_ID = 'instanceId' DIR_ID = 'directorId' PORT_ID = 'portId' # Revert snapshot exception REVERT_SS_EXC = 'Link must be fully copied for this operation to proceed' # extra specs IS_TRUE = [' True', 'True', 'true', True] IS_FALSE = [' False', 'False', 'false', False] # filter LIKE_FILTER = '' class PowerMaxUtils(object): """Utility class for Rest based PowerMax volume drivers. This Utility class is for PowerMax volume drivers based on Unisphere Rest API. """ def __init__(self): """Utility class for Rest based PowerMax volume drivers.""" def get_host_short_name(self, host_name): """Returns the short name for a given qualified host name. Checks the host name to see if it is the fully qualified host name and returns part before the dot. If there is no dot in the host name the full host name is returned. :param host_name: the fully qualified host name :returns: string -- the short host_name """ short_host_name = self.get_host_short_name_from_fqn(host_name) return self.generate_unique_trunc_host(short_host_name) @staticmethod def get_host_short_name_from_fqn(host_name): """Returns the short name for a given qualified host name. Checks the host name to see if it is the fully qualified host name and returns part before the dot. If there is no dot in the host name the full host name is returned. :param host_name: the fully qualified host name :returns: string -- the short host_name """ host_array = host_name.split('.') if len(host_array) > 1: short_host_name = host_array[0] else: short_host_name = host_name return short_host_name @staticmethod def get_volumetype_extra_specs(volume, volume_type_id=None): """Gets the extra specs associated with a volume type. :param volume: the volume dictionary :param volume_type_id: Optional override for volume.volume_type_id :returns: dict -- extra_specs - the extra specs :raises: VolumeBackendAPIException """ extra_specs = {} try: if volume_type_id: type_id = volume_type_id else: type_id = volume.volume_type_id if type_id is not None: extra_specs = volume_types.get_volume_type_extra_specs(type_id) except Exception as e: LOG.debug('Exception getting volume type extra specs: %(e)s', {'e': str(e)}) return extra_specs @staticmethod def get_short_protocol_type(protocol): """Get short protocol type. :param protocol: iscsi or fc or nvme :returns: string -- 'I' for iscsi or 'F' for fc 'NT' for nvme/tcp """ if protocol.lower() == ISCSI.lower(): return 'I' elif protocol.lower() == FC.lower(): return 'F' elif protocol.lower() == cinder_constants.NVMEOF_TCP.lower(): return 'NT' else: return protocol @staticmethod def truncate_string(str_to_truncate, max_num): """Truncate a string by taking first and last characters. :param str_to_truncate: the string to be truncated :param max_num: the maximum number of characters :returns: string -- truncated string or original string """ if len(str_to_truncate) > max_num: new_num = len(str_to_truncate) - max_num // 2 first_chars = str_to_truncate[:max_num // 2] last_chars = str_to_truncate[new_num:] str_to_truncate = first_chars + last_chars return str_to_truncate @staticmethod def get_time_delta(start_time, end_time): """Get the delta between start and end time. :param start_time: the start time :param end_time: the end time :returns: string -- delta in string H:MM:SS """ delta = end_time - start_time return str(datetime.timedelta(seconds=int(delta))) def get_default_storage_group_name( self, srp_name, slo, workload, is_compression_disabled=False, is_re=False, rep_mode=None): """Determine default storage group from extra_specs. :param srp_name: the name of the srp on the array :param slo: the service level string e.g Bronze :param workload: the workload string e.g DSS :param is_compression_disabled: flag for disabling compression :param is_re: flag for replication :param rep_mode: flag to indicate replication mode :returns: storage_group_name """ if slo and workload: prefix = ("OS-%(srpName)s-%(slo)s-%(workload)s" % {'srpName': srp_name, 'slo': slo, 'workload': workload}) if is_compression_disabled: prefix += "-CD" else: prefix = "OS-no_SLO" if is_re: prefix += self.get_replication_prefix(rep_mode) storage_group_name = ("%(prefix)s-SG" % {'prefix': prefix}) return storage_group_name @staticmethod def get_volume_element_name(volume_id): """Get volume element name follows naming convention, i.e. 'OS-UUID'. :param volume_id: Openstack volume ID containing uuid :returns: volume element name in format of OS-UUID """ element_name = volume_id uuid_regex = (re.compile( r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', re.I)) match = uuid_regex.search(volume_id) if match: volume_uuid = match.group() element_name = ("%(prefix)s%(volumeUUID)s" % {'prefix': VOLUME_ELEMENT_NAME_PREFIX, 'volumeUUID': volume_uuid}) LOG.debug( "get_volume_element_name elementName: %(elementName)s.", {'elementName': element_name}) return element_name @staticmethod def check_uuid_regex(volume_id): """Check the uuid regex :param volume_id: the unique volume identifier :returns: bool """ uuid_regex = (re.compile( r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', re.I)) return uuid_regex.search(volume_id) @staticmethod def modify_snapshot_prefix(snapshot_name, manage=False, unmanage=False): """Modify a Snapshot prefix on PowerMax/VMAX backend. Prepare a snapshot name for manage/unmanage snapshot process either by adding or removing 'OS-' prefix. :param snapshot_name: the old snapshot backend display name :param manage: (bool) if the operation is managing a snapshot :param unmanage: (bool) if the operation is unmanaging a snapshot :returns: snapshot name ready for backend PowerMax/VMAX assignment """ new_snap_name = None if manage: new_snap_name = ("%(prefix)s%(snapshot_name)s" % {'prefix': 'OS-', 'snapshot_name': snapshot_name}) if unmanage: snap_split = snapshot_name.split("-", 1) if snap_split[0] == 'OS': new_snap_name = snap_split[1] return new_snap_name def generate_unique_trunc_host(self, host_name): """Create a unique short host name under 16 characters. :param host_name: long host name :returns: truncated host name """ if host_name and len(host_name) > UPPER_HOST_CHARS: uuid = self.get_uuid_of_input(host_name) new_name = ("%(host)s%(uuid)s" % {'host': host_name[-6:], 'uuid': uuid}) host_name = self.truncate_string(new_name, UPPER_HOST_CHARS) return host_name def get_pg_short_name(self, portgroup_name): """Create a unique port group name under 12 characters. :param portgroup_name: long portgroup_name :returns: truncated portgroup_name """ if portgroup_name and len(portgroup_name) > UPPER_PORT_GROUP_CHARS: uuid = self.get_uuid_of_input(portgroup_name) new_name = ("%(pg)s%(uuid)s" % {'pg': portgroup_name[-6:], 'uuid': uuid}) portgroup_name = self.truncate_string( new_name, UPPER_PORT_GROUP_CHARS) return portgroup_name @staticmethod def get_uuid_of_input(input_str): """Get the uuid of the input string :param input_str: input string :returns: uuid """ input_str = input_str.lower() m = hashlib.md5(usedforsecurity=False) m.update(input_str.encode('utf-8')) return m.hexdigest() @staticmethod def get_default_oversubscription_ratio(max_over_sub_ratio): """Override ratio if necessary. The over subscription ratio will be overridden if the user supplied max oversubscription ratio is less than 1. :param max_over_sub_ratio: user supplied over subscription ratio :returns: max_over_sub_ratio """ if max_over_sub_ratio < 1.0: LOG.info("The user supplied value for max_over_subscription " "ratio is less than 1.0. Using the default value of " "20.0 instead...") max_over_sub_ratio = 20.0 return max_over_sub_ratio def get_temp_snap_name(self, source_device_id): """Construct a temporary snapshot name for clone operation :param source_device_id: the source device id :returns: snap_name """ snap_name = ("temp-%(device)s-%(snap_name)s" % {'device': source_device_id, 'snap_name': CLONE_SNAPSHOT_NAME}) return snap_name @staticmethod def get_array_and_device_id(volume, external_ref): """Helper function for manage volume to get array name and device ID. :param volume: volume object from API :param external_ref: the existing volume object to be manged :returns: string value of the array name and device ID """ device_id = external_ref.get(u'source-name', None) LOG.debug("External_ref: %(er)s", {'er': external_ref}) if not device_id: device_id = external_ref.get(u'source-id', None) host = volume.host host_list = host.split('+') array = host_list[(len(host_list) - 1)] if device_id: if len(device_id) != 5: error_message = (_("Device ID: %(device_id)s is invalid. " "Device ID should be exactly 5 digits.") % {'device_id': device_id}) LOG.error(error_message) raise exception.VolumeBackendAPIException( message=error_message) LOG.debug("Get device ID of existing volume - device ID: " "%(device_id)s, Array: %(array)s.", {'device_id': device_id, 'array': array}) else: exception_message = (_("Source volume device ID is required.")) raise exception.VolumeBackendAPIException( message=exception_message) return array, device_id.upper() @staticmethod def get_array_from_host(volume): """Get the array from the host string :param volume: volume object :returns: array -- str """ host = volume.host host_list = host.split('+') return host_list[-1] def is_protected_snap_disabled(self, extra_specs): """Check is the disable_protected_snap flag set. :param extra_specs: extra specifications :returns: boolean """ if extra_specs.get(DISABLE_PROTECTED_SNAP, False) in IS_TRUE: return True return False def is_compression_disabled(self, extra_specs): """Check is compression is to be disabled. :param extra_specs: extra specifications :returns: boolean """ compression_disabled = False if extra_specs.get(DISABLECOMPRESSION, False): if extra_specs.get(DISABLECOMPRESSION) in IS_TRUE: compression_disabled = True else: if extra_specs.get(SLO): service_level = extra_specs.get(SLO) else: __, __, service_level, __ = self.parse_specs_from_pool_name( extra_specs.get('pool_name')) if not service_level: compression_disabled = True return compression_disabled def change_compression_type(self, is_source_compr_disabled, new_type): """Check if volume type have different compression types :param is_source_compr_disabled: from source :param new_type: from target :returns: boolean """ extra_specs = new_type['extra_specs'] is_target_compr_disabled = self.is_compression_disabled(extra_specs) if is_target_compr_disabled == is_source_compr_disabled: return False else: return True def change_replication(self, curr_type_extra_specs, tgt_type_extra_specs): """Check if volume types have different replication status. :param curr_type_extra_specs: extra specs from source volume type :param tgt_type_extra_specs: extra specs from target volume type :returns: bool """ change_replication = False # Compare non-rep & rep enabled changes is_cur_rep = self.is_replication_enabled(curr_type_extra_specs) is_tgt_rep = self.is_replication_enabled(tgt_type_extra_specs) rep_enabled_diff = is_cur_rep != is_tgt_rep if rep_enabled_diff: change_replication = True elif is_cur_rep: # Both types are rep enabled, check for backend id differences rdbid = REPLICATION_DEVICE_BACKEND_ID curr_rep_backend_id = curr_type_extra_specs.get(rdbid, None) tgt_rep_backend_id = tgt_type_extra_specs.get(rdbid, None) rdbid_diff = curr_rep_backend_id != tgt_rep_backend_id if rdbid_diff: change_replication = True return change_replication @staticmethod def is_replication_enabled(extra_specs): """Check if replication is to be enabled. :param extra_specs: extra specifications :returns: bool - true if enabled, else false """ replication_enabled = False if IS_RE in extra_specs: replication_enabled = True return replication_enabled @staticmethod def get_replication_config(rep_device_list): """Gather necessary replication configuration info. :param rep_device_list: the replication device list from cinder.conf :returns: rep_configs, replication configuration list """ rep_config = list() if not rep_device_list: return None else: for rep_device in rep_device_list: rep_config_element = {} try: rep_config_element['array'] = rep_device[ 'target_device_id'] rep_config_element['srp'] = rep_device['remote_pool'] rep_config_element['rdf_group_label'] = rep_device[ 'rdf_group_label'] rep_config_element['portgroup'] = rep_device[ 'remote_port_group'] except KeyError as ke: error_message = ( _("Failed to retrieve all necessary SRDF " "information. Error received: %(ke)s.") % {'ke': str(ke)}) LOG.exception(error_message) raise exception.VolumeBackendAPIException( message=error_message) try: rep_config_element['sync_retries'] = int( rep_device['sync_retries']) rep_config_element['sync_interval'] = int( rep_device['sync_interval']) except (KeyError, ValueError) as ke: LOG.debug( "SRDF Sync wait/retries options not set or set " "incorrectly, defaulting to 200 retries with a 3 " "second wait. Configuration load warning: %(ke)s.", {'ke': str(ke)}) rep_config_element['sync_retries'] = 200 rep_config_element['sync_interval'] = 3 allow_extend = rep_device.get('allow_extend', 'false') if strutils.bool_from_string(allow_extend): rep_config_element['allow_extend'] = True else: rep_config_element['allow_extend'] = False rep_mode = rep_device.get('mode', '') if rep_mode.lower() in ['async', 'asynchronous']: rep_config_element['mode'] = REP_ASYNC elif rep_mode.lower() == 'metro': rep_config_element['mode'] = REP_METRO metro_bias = rep_device.get('metro_use_bias', 'false') if strutils.bool_from_string(metro_bias): rep_config_element[METROBIAS] = True else: rep_config_element[METROBIAS] = False else: rep_config_element['mode'] = REP_SYNC backend_id = rep_device.get(BACKEND_ID, '') if backend_id: rep_config_element[BACKEND_ID] = backend_id rep_config.append(rep_config_element) return rep_config @staticmethod def is_volume_failed_over(volume): """Check if a volume has been failed over. :param volume: the volume object :returns: bool """ if volume is not None: if volume.get('replication_status') and ( volume.replication_status == fields.ReplicationStatus.FAILED_OVER): return True return False @staticmethod def update_volume_model_updates(volume_model_updates, volumes, group_id, status='available'): """Update the volume model's status and return it. :param volume_model_updates: list of volume model update dicts :param volumes: volumes object api :param group_id: consistency group id :param status: string value reflects the status of the member volume :returns: volume_model_updates - updated volumes """ LOG.info("Updating status for group: %(id)s.", {'id': group_id}) if volumes: for volume in volumes: volume_model_updates.append({'id': volume.id, 'status': status}) else: LOG.info("No volume found for group: %(cg)s.", {'cg': group_id}) return volume_model_updates @staticmethod def get_grp_volume_model_update(volume, volume_dict, group_id, meta=None): """Create and return the volume model update on creation. :param volume: volume object :param volume_dict: the volume dict :param group_id: consistency group id :param meta: the volume metadata :returns: model_update """ LOG.info("Updating status for group: %(id)s.", {'id': group_id}) model_update = ({'id': volume.id, 'status': 'available', 'provider_location': str(volume_dict)}) if meta: model_update['metadata'] = meta return model_update @staticmethod def update_extra_specs(extraspecs): """Update extra specs. :param extraspecs: the additional info :returns: extraspecs """ try: pool_details = extraspecs['pool_name'].split('+') extraspecs[SLO] = pool_details[0] if len(pool_details) == 4: extraspecs[WORKLOAD] = pool_details[1] extraspecs[SRP] = pool_details[2] extraspecs[ARRAY] = pool_details[3] else: # Assume no workload given in pool name extraspecs[SRP] = pool_details[1] extraspecs[ARRAY] = pool_details[2] extraspecs[WORKLOAD] = 'NONE' except KeyError: LOG.error("Error parsing SLO, workload from" " the provided extra_specs.") return extraspecs def get_volume_group_utils(self, group, interval, retries): """Standard utility for generic volume groups. :param group: the generic volume group object to be created :param interval: Interval in seconds between retries :param retries: Retry count :returns: array, intervals_retries_dict :raises: VolumeBackendAPIException """ arrays = set() # Check if it is a generic volume group instance intervals_retries_dict = {INTERVAL: interval, RETRIES: retries} if isinstance(group, Group): for volume_type in group.volume_types: extra_specs = self.update_extra_specs(volume_type.extra_specs) try: arrays.add(extra_specs[ARRAY]) except KeyError: return None, intervals_retries_dict else: msg = (_("Unable to get volume type ids.")) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) if len(arrays) > 1: msg = (_("There are multiple arrays " "associated with volume group: %(groupid)s.") % {'groupid': group.id}) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) array = arrays.pop() LOG.debug("Serial number %s retrieved from the volume type extra " "specs.", array) return array, intervals_retries_dict def update_volume_group_name(self, group): """Format id and name consistency group. :param group: the generic volume group object :returns: group_name -- formatted name + id """ group_name = "" if group.name is not None and group.name != group.id: group_name = ( self.truncate_string( group.name, TRUNCATE_27) + "_") group_name += group.id return group_name @staticmethod def add_legacy_pools(pools): """Add legacy pools to allow extending a volume after upgrade. :param pools: the pool list :returns: pools - the updated pool list """ extra_pools = [] for pool in pools: if 'none' in pool['pool_name'].lower(): extra_pools.append(pool) for pool in extra_pools: try: slo = pool['pool_name'].split('+')[0] srp = pool['pool_name'].split('+')[2] array = pool['pool_name'].split('+')[3] except IndexError: slo = pool['pool_name'].split('+')[0] srp = pool['pool_name'].split('+')[1] array = pool['pool_name'].split('+')[2] new_pool_name = ('%(slo)s+%(srp)s+%(array)s' % {'slo': slo, 'srp': srp, 'array': array}) new_pool = deepcopy(pool) new_pool['pool_name'] = new_pool_name pools.append(new_pool) return pools @staticmethod def add_promotion_pools(pools, primary_array): """Add duplicate pools with primary SID for operations during promotion :param pools: the pool list :param primary_array: the original primary array. :returns: pools - the updated pool list """ i_pools = deepcopy(pools) for pool in i_pools: # pool name pool_name = pool['pool_name'] split_name = pool_name.split('+') array_pos = 3 if len(split_name) == 4 else 2 array_sid = split_name[array_pos] updated_pool_name = re.sub(array_sid, primary_array, pool_name) # location info loc = pool['location_info'] split_loc = loc.split('#') split_loc[0] = primary_array # Replace the array SID updated_loc = '#'.join(split_loc) new_pool = deepcopy(pool) new_pool['pool_name'] = updated_pool_name new_pool['location_info'] = updated_loc pools.append(new_pool) return pools def check_replication_matched(self, volume, extra_specs): """Check volume type and group type. This will make sure they do not conflict with each other. :param volume: volume to be checked :param extra_specs: the extra specifications :raises: InvalidInput """ # If volume is not a member of group, skip this check anyway. if not volume.group: return vol_is_re = self.is_replication_enabled(extra_specs) group_is_re = volume.group.is_replicated if not (vol_is_re == group_is_re): msg = _('Replication should be enabled or disabled for both ' 'volume or group. Volume replication status: ' '%(vol_status)s, group replication status: ' '%(group_status)s') % { 'vol_status': vol_is_re, 'group_status': group_is_re} raise exception.InvalidInput(reason=msg) @staticmethod def check_rep_status_enabled(group): """Check replication status for group. Group status must be enabled before proceeding with certain operations. :param group: the group object :raises: InvalidInput """ if group.is_replicated: if group.replication_status != fields.ReplicationStatus.ENABLED: msg = (_('Replication status should be %s for ' 'replication-enabled group.') % fields.ReplicationStatus.ENABLED) LOG.error(msg) raise exception.InvalidInput(reason=msg) else: LOG.debug('Replication is not enabled on group %s, ' 'skip status check.', group.id) @staticmethod def get_replication_prefix(rep_mode): """Get the replication prefix. Replication prefix for storage group naming is based on whether it is synchronous, asynchronous, or metro replication mode. :param rep_mode: flag to indicate if replication is async :returns: prefix """ if rep_mode == REP_ASYNC: prefix = "-RA" elif rep_mode == REP_METRO: prefix = "-RM" else: prefix = "-RE" return prefix @staticmethod def get_rdf_management_group_name(rep_config): """Get the name of the group used for async replication management. :param rep_config: the replication configuration :returns: group name """ grp_name = ("OS-%(rdf)s-%(mode)s-rdf-sg" % {'rdf': rep_config['rdf_group_label'], 'mode': rep_config['mode']}) LOG.debug("The rdf managed group name is %(name)s", {'name': grp_name}) return grp_name def is_metro_device(self, rep_config, extra_specs): """Determine if a volume is a Metro enabled device. :param rep_config: the replication configuration :param extra_specs: the extra specifications :returns: bool """ is_metro = (True if self.is_replication_enabled(extra_specs) and rep_config is not None and rep_config.get('mode') == REP_METRO else False) return is_metro def does_vol_need_rdf_management_group(self, extra_specs): """Determine if a volume is a Metro or Async. :param extra_specs: the extra specifications :returns: bool """ if (self.is_replication_enabled(extra_specs) and extra_specs.get(REP_MODE, None) in [REP_ASYNC, REP_METRO]): return True return False def derive_default_sg_from_extra_specs(self, extra_specs, rep_mode=None): """Get the name of the default sg from the extra specs. :param extra_specs: extra specs :param rep_mode: replication mode :returns: default sg - string """ do_disable_compression = self.is_compression_disabled( extra_specs) rep_enabled = self.is_replication_enabled(extra_specs) return self.get_default_storage_group_name( extra_specs[SRP], extra_specs[SLO], extra_specs[WORKLOAD], is_compression_disabled=do_disable_compression, is_re=rep_enabled, rep_mode=rep_mode) @staticmethod def merge_dicts(d1, *args): """Merge dictionaries :param d1: dict 1 :param *args: one or more dicts :returns: merged dict """ d2 = {} for d in args: d2 = d.copy() d2.update(d1) d1 = d2 return d2 @staticmethod def get_temp_failover_grp_name(rep_config): """Get the temporary group name used for failover. :param rep_config: the replication config :returns: temp_grp_name """ temp_grp_name = ("OS-%(rdf)s-temp-rdf-sg" % {'rdf': rep_config['rdf_group_label']}) LOG.debug("The temp rdf managed group name is %(name)s", {'name': temp_grp_name}) return temp_grp_name def get_child_sg_name(self, host_name, extra_specs, port_group_label): """Get the child storage group name for a masking view. :param host_name: the short host name :param extra_specs: the extra specifications :param port_group_label: the port group label :returns: child sg name, compression flag, rep flag, short pg name """ do_disable_compression = False rep_enabled = self.is_replication_enabled(extra_specs) if extra_specs[SLO]: slo_wl_combo = self.truncate_string( extra_specs[SLO] + extra_specs[WORKLOAD], 10) unique_name = self.truncate_string(extra_specs[SRP], 12) child_sg_name = ( "OS-%(shortHostName)s-%(srpName)s-%(combo)s-%(pg)s" % {'shortHostName': host_name, 'srpName': unique_name, 'combo': slo_wl_combo, 'pg': port_group_label}) do_disable_compression = self.is_compression_disabled( extra_specs) if do_disable_compression: child_sg_name = ("%(child_sg_name)s-CD" % {'child_sg_name': child_sg_name}) else: child_sg_name = ( "OS-%(shortHostName)s-No_SLO-%(pg)s" % {'shortHostName': host_name, 'pg': port_group_label}) if rep_enabled: rep_mode = extra_specs.get(REP_MODE, None) child_sg_name += self.get_replication_prefix(rep_mode) return child_sg_name, do_disable_compression, rep_enabled @staticmethod def change_multiattach(extra_specs, new_type_extra_specs): """Check if a change in multiattach is required for retype. :param extra_specs: the source type extra specs :param new_type_extra_specs: the target type extra specs :returns: bool """ is_src_multiattach = volume_utils.is_boolean_str( extra_specs.get('multiattach')) is_tgt_multiattach = volume_utils.is_boolean_str( new_type_extra_specs.get('multiattach')) return is_src_multiattach != is_tgt_multiattach @staticmethod def is_volume_manageable(source_vol): """Check if a volume with verbose description is valid for management. :param source_vol: the verbose volume dict :returns: bool True/False """ vol_head = source_vol['volumeHeader'] # PowerMax/VMAX disk geometry uses cylinders, so volume sizes are # matched to the nearest full cylinder size: 1GB = 547cyl = 1026MB if vol_head['capMB'] < 1026 or not vol_head['capGB'].is_integer(): return False if (vol_head['numSymDevMaskingViews'] > 0 or vol_head['mapped'] is True or source_vol['maskingInfo']['masked'] is True): return False if (vol_head['status'] != 'Ready' or vol_head['serviceState'] != 'Normal' or vol_head['emulationType'] != 'FBA' or vol_head['configuration'] != 'TDEV' or vol_head['system_resource'] is True or vol_head['private'] is True or vol_head['encapsulated'] is True or vol_head['reservationInfo']['reserved'] is True): return False for key, value in source_vol['rdfInfo'].items(): if value is True: return False if source_vol['timeFinderInfo']['snapVXTgt'] is True: return False if vol_head['userDefinedIdentifier'][0:3] == 'OS-': return False if vol_head.get('numStorageGroups', 0) > 1: return False return True @staticmethod def is_snapshot_manageable(source_vol): """Check if a volume with snapshot description is valid for management. :param source_vol: the verbose volume dict :returns: bool True/False """ vol_head = source_vol['volumeHeader'] if not source_vol['timeFinderInfo']['snapVXSrc']: return False # PowerMax/VMAX disk geometry uses cylinders, so volume sizes are # matched to the nearest full cylinder size: 1GB = 547cyl = 1026MB if (vol_head['capMB'] < 1026 or not vol_head['capGB'].is_integer()): return False if (vol_head['emulationType'] != 'FBA' or vol_head['configuration'] != 'TDEV' or vol_head['private'] is True or vol_head['system_resource'] is True): return False snap_gen_info = (source_vol['timeFinderInfo']['snapVXSession'][0][ 'srcSnapshotGenInfo'][0]['snapshotHeader']) if (snap_gen_info['snapshotName'][0:3] == 'OS-' or snap_gen_info['snapshotName'][0:5] == 'temp-'): return False if (snap_gen_info['expired'] is True or snap_gen_info['generation'] > 0): return False return True def get_volume_attached_hostname(self, volume): """Get the host name from the attached volume :param volume: the volume object :returns: str -- the attached hostname """ host_name_set = set() attachment_list = volume.volume_attachment LOG.debug("Volume attachment list: %(atl)s. " "Attachment type: %(at)s", {'atl': attachment_list, 'at': type(attachment_list)}) try: att_list = attachment_list.objects except AttributeError: att_list = attachment_list for att in att_list: host_name_set.add(att.attached_host) if host_name_set: if len(host_name_set) > 1: LOG.warning("Volume is attached to multiple instances " "on more than one compute node.") else: return host_name_set.pop() return None def get_rdf_managed_storage_group(self, device_info): """Get the RDF managed storage group :param device_info: the device info dict :returns: str -- the attached hostname dict -- storage group details """ try: sg_list = device_info.get("storageGroupId") for sg_id in sg_list: sg_details = self.get_rdf_group_component_dict(sg_id) if sg_details: return sg_id, sg_details except IndexError: return None, None return None, None def get_production_storage_group(self, device_info): """Get the production storage group :param device_info: the device info dict :returns: str -- the storage group id dict -- storage group details """ try: sg_list = device_info.get("storageGroupId") for sg_id in sg_list: sg_details = self.get_storage_group_component_dict(sg_id) if sg_details: return sg_id, sg_details except IndexError: return None, None return None, None @staticmethod def validate_qos_input(input_key, sg_value, qos_extra_spec, property_dict): max_value = 100000 qos_unit = "IO/Sec" if input_key == 'total_iops_sec': min_value = 100 input_value = int(qos_extra_spec['total_iops_sec']) sg_key = 'host_io_limit_io_sec' else: qos_unit = "MB/sec" min_value = 1 input_value = int( int(qos_extra_spec['total_bytes_sec']) / units.Mi) sg_key = 'host_io_limit_mb_sec' if min_value <= input_value <= max_value: if sg_value is None or input_value != int(sg_value): property_dict[sg_key] = input_value else: exception_message = ( _("Invalid %(ds)s with value %(dt)s entered. Valid values " "range from %(du)s %(dv)s to 100,000 %(dv)s") % { 'ds': input_key, 'dt': input_value, 'du': min_value, 'dv': qos_unit}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return property_dict @staticmethod def validate_qos_distribution_type( sg_value, qos_extra_spec, property_dict): dynamic_list = ['never', 'onfailure', 'always'] if qos_extra_spec.get('DistributionType').lower() in dynamic_list: distribution_type = qos_extra_spec['DistributionType'] if distribution_type != sg_value: property_dict["dynamicDistribution"] = distribution_type else: exception_message = ( _("Wrong Distribution type value %(dt)s entered. Please " "enter one of: %(dl)s") % { 'dt': qos_extra_spec.get('DistributionType'), 'dl': dynamic_list}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( message=exception_message) return property_dict @staticmethod def validate_multiple_rep_device(rep_devices): """Validate the validity of multiple replication devices. Validates uniqueness and presence of backend ids in rep_devices, consistency in target arrays and replication modes when multiple replication devices are present in cinder.conf. :param rep_devices: rep_devices imported from cinder.conf --list """ rdf_group_labels = set() backend_ids = set() rep_modes = set() target_arrays = set() repdev_count = len(rep_devices) if repdev_count > 3: msg = (_('Up to three replication_devices are currently ' 'supported, one for each replication mode. ' '%d replication_devices found in cinder.conf.') % repdev_count) raise exception.InvalidConfigurationValue(msg) for rep_device in rep_devices: backend_id = rep_device.get(BACKEND_ID) if backend_id: if backend_id in backend_ids: msg = (_('Backend IDs must be unique across all ' 'rep_device when multiple replication devices ' 'are defined in cinder.conf, backend_id %s is ' 'defined more than once.') % backend_id) raise exception.InvalidConfigurationValue(msg) elif backend_id == PMAX_FAILOVER_START_ARRAY_PROMOTION: msg = (_('Invalid Backend ID found. Defining a ' 'replication device with a Backend ID of %s is ' 'currently not supported. Please update ' 'the Backend ID of the related replication ' 'device in cinder.conf to use valid ' 'Backend ID value.') % backend_id) raise exception.InvalidConfigurationValue(msg) else: msg = _('Backend IDs must be assigned for each rep_device ' 'when multiple replication devices are defined in ' 'cinder.conf.') raise exception.InvalidConfigurationValue(msg) backend_ids.add(backend_id) rdf_group_label = rep_device.get('rdf_group_label') if rdf_group_label in rdf_group_labels: msg = (_('RDF Group Labels must be unique across all ' 'rep_device when multiple replication devices are ' 'defined in cinder.conf. RDF Group Label %s is ' 'defined more than once.') % rdf_group_label) raise exception.InvalidConfigurationValue(msg) rdf_group_labels.add(rdf_group_label) rep_mode = rep_device.get('mode', '') if rep_mode.lower() in ['async', 'asynchronous']: rep_mode = REP_ASYNC elif rep_mode.lower() == 'metro': rep_mode = REP_METRO else: rep_mode = REP_SYNC if rep_mode in rep_modes: msg = (_('RDF Modes must be unique across all ' 'replication_device. Found multiple instances of %s ' 'mode defined in cinder.conf.') % rep_mode) raise exception.InvalidConfigurationValue(msg) rep_modes.add(rep_mode) target_device_id = rep_device.get('target_device_id') target_arrays.add(target_device_id) target_arrays.discard(None) if len(target_arrays) > 1: msg = _('Found multiple target_device_id set in cinder.conf. A ' 'single target_device_id value must be used across all ' 'replication device when defining using multiple ' 'replication devices.') raise exception.InvalidConfigurationValue(msg) @staticmethod def compare_cylinders(cylinders_source, cylinder_target): """Compare number of cylinders of source and target. :param cylinders_source: number of cylinders on source :param cylinder_target: number of cylinders on target """ if float(cylinders_source) > float(cylinder_target): exception_message = ( _("The number of source cylinders %(cylinders_source)s " "cannot be greater than the number of target cylinders " "%(cylinder_target)s. Please extend your source volume by " "at least 1GiB.") % { 'cylinders_source': cylinders_source, 'cylinder_target': cylinder_target}) raise exception.VolumeBackendAPIException( message=exception_message) @staticmethod def get_service_level_workload(extra_specs): """Get the service level and workload combination from extra specs. :param extra_specs: extra specifications :returns: string, string """ service_level, workload = 'None', 'None' if extra_specs.get(SLO): service_level = extra_specs.get(SLO) if (extra_specs.get(WORKLOAD) and 'NONE' not in extra_specs.get(WORKLOAD)): workload = extra_specs.get(WORKLOAD) return service_level, workload def get_new_tags(self, list_str1, list_str2): """Get elements in list_str1 not in list_str2 :param list_str1: list one in string format :param list_str2: list two in string format :returns: list """ list_str1 = re.sub(r"\s+", "", list_str1) if not list_str1: return [] common_list = self._get_intersection( list_str1, list_str2) my_list1 = sorted(list_str1.split(",")) return [x for x in my_list1 if x.lower() not in common_list] def verify_tag_list(self, tag_list): """Verify that the tag list has allowable character :param tag_list: list of tags :returns: boolean """ if not tag_list: return False if not isinstance(tag_list, list): LOG.warning("The list of tags %(tag_list)s is not " "in list format. Tagging will not proceed.", {'tag_list': tag_list}) return False if len(tag_list) > 8: LOG.warning("The list of tags %(tag_list)s is more " "than the upper limit of 8. Tagging will not " "proceed.", {'tag_list': tag_list}) return False for tag in tag_list: tag = tag.strip() if not re.match('^[a-zA-Z0-9_\\-]+$', tag): return False return True def convert_list_to_string(self, list_input): """Convert a list to a comma separated list :param list_input: list :returns: string or None """ return ','.join(map(str, list_input)) if isinstance( list_input, list) else list_input def validate_short_host_name_from_template( self, short_host_template, short_host_name): """Validate that the short host name is in a format we can use. Can be one of shortHostName - where shortHostName is what the driver specifies it to be, default shortHostName[:x]uuid[:x] - where first x characters of the short host name and x uuid characters created from md5 hash of short host name shortHostName[:x]userdef - where first x characters of the short host name and a user defined name shortHostName[-x:]uuid[:x] - where last x characters of short host name and x uuid characters created from md5 hash of short host name shortHostName[-x:]suserdef - where last x characters of the short host name and a user defined name :param short_host_template: short host name template :param short_host_name: short host name :raises: VolumeBackendAPIException :returns: new short host name -- string """ new_short_host_name = None is_ok, case = self.regex_check(short_host_template, True) if is_ok: new_short_host_name = ( self.generate_entity_string( case, short_host_template, short_host_name, True)) if not new_short_host_name: error_message = (_('Unable to generate string from short ' 'host template %(template)s. Please refer to ' 'the online documentation for correct ' 'template format(s) for short host name.') % {'template': short_host_template}) LOG.error(error_message) raise exception.VolumeBackendAPIException( message=error_message) return new_short_host_name def validate_port_group_name_from_template( self, port_group_template, port_group_name): """Validate that the port group name is in a format we can use. Can be one of portGroupName - where portGroupName is what the driver specifies it to be, default portGroupName[:x]uuid[:x] - where first x characters of the short host name and x uuid characters created from md5 hash of short host name portGroupName[:x]userdef - where first x characters of the short host name and a user defined name portGroupName[-x:]uuid[:x] - where last x characters of short host name and x uuid characters created from md5 hash of short host name portGroupName[-x:]userdef - where last x characters of the short host name and a user defined name :param port_group_template: port group name template :param port_group_name: port group name :raises: VolumeBackendAPIException :returns: new port group name -- string """ new_port_group_name = None is_ok, case = self.regex_check(port_group_template, False) if is_ok: new_port_group_name = ( self.generate_entity_string( case, port_group_template, port_group_name, False)) if not new_port_group_name: error_message = (_('Unable to generate string from port group ' 'template %(template)s. Please refer to ' 'the online documentation for correct ' 'template format(s) for port groups.') % {'template': port_group_template}) LOG.error(error_message) raise exception.VolumeBackendAPIException( message=error_message) return new_port_group_name def generate_entity_string( self, case, entity_template, entity_name, entity_flag): """Generate the entity string if the template checks out :param case: one of five cases :param entity_template: entity template :param entity_name: entity name :param entity_flag: storage group or port group flag :returns: new entity name -- string """ new_entity_name = None override_rule_warning = False try: if case == '1': new_entity_name = self.get_name_if_default_template( entity_name, entity_flag) elif case == '2': pass_two, uuid = self.prepare_string_with_uuid( entity_template, entity_name, entity_flag) m = re.match(r'^' + entity_name + r'\[:(\d+)\]' + uuid + r'\[:(\d+)\]$', pass_two) if m: num_1 = m.group(1) num_2 = m.group(2) self.check_upper_limit( int(num_1), int(num_2), entity_flag) new_entity_name = ( entity_name[:int(num_1)] + uuid[:int(num_2)]) override_rule_warning = True elif case == '3': pass_two, uuid = self.prepare_string_with_uuid( entity_template, entity_name, entity_flag) m = re.match(r'^' + entity_name + r'\[-(\d+):\]' + uuid + r'\[:(\d+)\]$', pass_two) if m: num_1 = m.group(1) num_2 = m.group(2) self.check_upper_limit( int(num_1), int(num_2), entity_flag) new_entity_name = ( entity_name[-int(num_1):] + uuid[:int(num_2)]) override_rule_warning = True elif case == '4': pass_two = self.prepare_string_entity( entity_template, entity_name, entity_flag) m = re.match(r'^' + entity_name + r'\[:(\d+)\]' + r'([a-zA-Z0-9_\\-]+)$', pass_two) if m: num_1 = m.group(1) user_defined = m.group(2) self.check_upper_limit( int(num_1), len(user_defined), entity_flag) new_entity_name = entity_name[:int(num_1)] + user_defined override_rule_warning = True elif case == '5': pass_two = self.prepare_string_entity( entity_template, entity_name, entity_flag) m = re.match(r'^' + entity_name + r'\[-(\d+):\]' + r'([a-zA-Z0-9_\\-]+)$', pass_two) if m: num_1 = m.group(1) user_defined = m.group(2) self.check_upper_limit( int(num_1), len(user_defined), entity_flag) new_entity_name = entity_name[-int(num_1):] + user_defined override_rule_warning = True if override_rule_warning: LOG.warning( "You have opted to override the %(entity)s naming format. " "Once changed and you have attached volumes or created " "new instances, you cannot revert to default or change to " "another format.", {'entity': 'storage group' if entity_flag else 'port group'}) except Exception: new_entity_name = None return new_entity_name def get_name_if_default_template(self, entity_name, is_short_host_flag): """Get the entity name if it is the default template :param entity_name: the first number :param is_short_host_flag: the second number :returns: entity name -- string """ if is_short_host_flag: return self.get_host_short_name(entity_name) else: return self.get_pg_short_name(entity_name) @staticmethod def check_upper_limit(num_1, num_2, is_host_flag): """Check that the sum of number is less than upper limit. :param num_1: the first number :param num_2: the second number :param is_host_flag: is short host boolean :raises: VolumeBackendAPIException """ if is_host_flag: if (num_1 + num_2) > UPPER_HOST_CHARS: error_message = (_("Host name exceeds the character upper " "limit of %(upper)d. Please check your " "short host template.") % {'upper': UPPER_HOST_CHARS}) LOG.error(error_message) raise exception.VolumeBackendAPIException( message=error_message) else: if (num_1 + num_2) > UPPER_PORT_GROUP_CHARS: error_message = (_("Port group name exceeds the character " "upper limit of %(upper)d. Please check " "your port group template") % {'upper': UPPER_PORT_GROUP_CHARS}) LOG.error(error_message) raise exception.VolumeBackendAPIException( message=error_message) def prepare_string_with_uuid( self, template, entity_str, is_short_host_flag): """Prepare string for pass three :param template: the template :param entity_str: the entity string :param is_short_host_flag: is short host :returns: pass_two -- string uuid -- string """ pass_one = self.prepare_string_entity( template, entity_str, is_short_host_flag) uuid = self.get_uuid_of_input(entity_str) pass_two = pass_one.replace('uuid', uuid) return pass_two, uuid @staticmethod def prepare_string_entity(template, entity_str, is_host_flag): """Prepare string for pass two :param template: the template :param entity_str: the entity string :param is_host_flag: is host boolean :returns: pass_one -- string """ entity_type = 'shortHostName' if is_host_flag else 'portGroupName' # Replace entity type with variable return template.replace( entity_type, entity_str) @staticmethod def regex_check(template, is_short_host_flag): """Check the template is in a validate format. :param template: short host name template :param is_short_host_flag: short host boolean :returns: boolean, case -- string """ if is_short_host_flag: entity = 'shortHostName' else: entity = 'portGroupName' if re.match(r'^' + entity + r'$', template): return True, '1' elif re.match(r'^' + entity + r'\[:\d+\]uuid\[:\d+\]$', template): return True, '2' elif re.match(r'^' + entity + r'\[-\d+:\]uuid\[:\d+\]$', template): return True, '3' elif re.match(r'^' + entity + r'\[:\d+\][a-zA-Z0-9_\\-]+$', template): return True, '4' elif re.match(r'^' + entity + r'\[-\d+:\][a-zA-Z0-9_\\-]+$', template): return True, '5' return False, '0' def get_host_name_label(self, host_name_in, host_template): """Get the host name label that will be used in PowerMax Objects :param host_name_in: host name as portrayed in connector object :param host_template: :returns: host_name_out """ host_name_out = self.get_host_short_name( host_name_in) if host_template: short_host_name = self.get_host_short_name_from_fqn( host_name_in) host_name_out = ( self.validate_short_host_name_from_template( host_template, short_host_name)) return host_name_out def get_port_name_label(self, port_name_in, port_group_template): """Get the port name label that will be used in PowerMax Objects :rtype: object :param host_name_in: host name as portrayed in connector object :param port_group_template: port group template :returns: port_name_out """ port_name_out = self.get_pg_short_name(port_name_in) if port_group_template: port_name_out = ( self.validate_port_group_name_from_template( port_group_template, port_name_in)) return port_name_out def get_storage_group_component_dict(self, storage_group_name): """Parse the storage group string. :param storage_group_name: the storage group name -- str :returns: object components -- dict """ regex_str = (r'^(?POS)-(?P.+?)' r'((?PNo_SLO)|((?PSRP.+?)-' r'(?P.+?)))-(?P.+?)' r'(?P$|-CD|-RE|-RA|-RM)') return self.get_object_components_and_correct_host( regex_str, storage_group_name) def get_rdf_group_component_dict(self, storage_group_name): """Parse the storage group string. :param storage_group_name: the storage group name -- str :returns: object components -- dict """ regex_str = (r'^(?POS)-(?P.+?)-' r'(?PAsynchronous|Metro)-' r'(?Prdf-sg)$') return self.get_object_components( regex_str, storage_group_name) def get_object_components_and_correct_host(self, regex_str, input_str): """Get components from input string. :param regex_str: the regex -- str :param input_str: the input string -- str :returns: object components -- dict """ object_dict = self.get_object_components(regex_str, input_str) if object_dict and 'host' in object_dict: if object_dict['host'].endswith('-'): object_dict['host'] = object_dict['host'][:-1] return object_dict @staticmethod def get_object_components(regex_str, input_str): """Get components from input string. :param regex_str: the regex -- str :param input_str: the input string -- str :returns: dict """ full_str = re.compile(regex_str) match = full_str.match(input_str) return match.groupdict() if match else None def get_possible_initiator_name(self, host_label, protocol): """Get possible initiator name based on the host :param host_label: the host label -- str :param protocol: the protocol -- str :returns: initiator_group_name -- str """ protocol = self.get_short_protocol_type(protocol) return ("OS-%(shortHostName)s-%(protocol)s-IG" % {'shortHostName': host_label, 'protocol': protocol}) @staticmethod def delete_values_from_dict(datadict, key_list): """Delete values from a dict :param datadict: dictionary :param key_list: list of keys :returns: dict """ for key in key_list: if datadict.get(key): del datadict[key] return datadict @staticmethod def update_values_in_dict(datadict, tuple_list): """Delete values from a dict :param datadict: dictionary :param tuple_list: list of tuples :returns: dict """ for tuple in tuple_list: if datadict.get(tuple[0]): datadict.update({tuple[1]: datadict.get(tuple[0])}) del datadict[tuple[0]] return datadict @staticmethod def _get_intersection(list_str1, list_str2): """Get the common values between 2 comma separated list :param list_str1: list one :param list_str2: list two :returns: sorted list """ list_str1 = re.sub(r"\s+", "", list_str1).lower() list_str2 = re.sub(r"\s+", "", list_str2).lower() my_list1 = sorted(list_str1.split(",")) my_list2 = sorted(list_str2.split(",")) sorted_common_list = ( sorted(list(set(my_list1).intersection(set(my_list2))))) return sorted_common_list @staticmethod def get_unique_device_ids_from_lists(list_a, list_b): """Get the unique values from list B that don't appear in list A. :param list_a: list A :param list_b: list B :returns: values unique between two lists -- list """ set_a = set(list_a) return [dev_id for dev_id in list_b if dev_id not in set_a] @staticmethod def update_payload_for_rdf_vol_create(payload, remote_array_id, storage_group_name): """Construct the REST payload for creating RDF enabled volumes. :param payload: the existing payload -- dict :param remote_array_id: the remote array serial number -- str :param storage_group_name: the storage group name -- str :returns: updated payload -- dict """ create_new_vol = {"create_new_volumes": "True"} payload["editStorageGroupActionParam"]["expandStorageGroupParam"][ "addVolumeParam"].update(create_new_vol) remote_dict = {"remoteSymmSGInfoParam": { "remote_symmetrix_1_id": remote_array_id, "remote_symmetrix_1_sgs": [storage_group_name], "force": "true"}} payload["editStorageGroupActionParam"]["expandStorageGroupParam"][ "addVolumeParam"].update(remote_dict) return payload @staticmethod def is_retype_supported(volume, src_extra_specs, tgt_extra_specs, rep_configs): """Determine if a retype operation involving Metro is supported. :param volume: the volume object -- obj :param src_extra_specs: the source extra specs -- dict :param tgt_extra_specs: the target extra specs -- dict :param rep_configs: imported cinder.conf replication devices -- dict :returns: is supported -- bool """ if volume.attach_status == 'detached': return True src_rep_mode = src_extra_specs.get('rep_mode', None) tgt_rep_mode = None if PowerMaxUtils.is_replication_enabled(tgt_extra_specs): target_backend_id = tgt_extra_specs.get( REPLICATION_DEVICE_BACKEND_ID, BACKEND_ID_LEGACY_REP) target_rep_config = PowerMaxUtils.get_rep_config( target_backend_id, rep_configs) tgt_rep_mode = target_rep_config.get('mode', REP_SYNC) if tgt_rep_mode != REP_METRO: return True else: if src_rep_mode == REP_METRO: return True else: if not src_rep_mode or src_rep_mode in [REP_SYNC, REP_ASYNC]: return False @staticmethod def get_rep_config(backend_id, rep_configs, promotion_vol_stats=False): """Get rep_config for given backend_id. :param backend_id: rep config search key -- str :param rep_configs: backend rep_configs -- list :param promotion_vol_stats: get rep config for vol stats -- bool :returns: rep_config -- dict """ if len(rep_configs) == 1: rep_device = rep_configs[0] else: rep_device = None for rep_config in rep_configs: if rep_config[BACKEND_ID] == backend_id: rep_device = rep_config if rep_device is None: if promotion_vol_stats: # Stat collection only need remote array and srp, any of # the available replication_devices can provide this. rep_device = rep_configs[0] else: msg = (_('Could not find a replication_device with a ' 'backend_id of "%s" in cinder.conf. Please ' 'confirm that the replication_device_backend_id ' 'extra spec for this volume type matches the ' 'backend_id of the intended replication_device ' 'in cinder.conf.') % backend_id) if BACKEND_ID_LEGACY_REP in msg: msg = (_('Could not find replication_device. Legacy ' 'replication_device key found, please ensure ' 'the backend_id for the legacy ' 'replication_device in cinder.conf has been ' 'changed to "%s".') % BACKEND_ID_LEGACY_REP) LOG.error(msg) raise exception.InvalidInput(msg) return rep_device @staticmethod def get_replication_targets(rep_configs): """Set the replication targets for the backend. :param rep_configs: backend rep_configs -- list :returns: arrays configured for replication -- list """ replication_targets = set() if rep_configs: for rep_config in rep_configs: array = rep_config.get(ARRAY) if array: replication_targets.add(array) return list(replication_targets) def validate_failover_request(self, is_failed_over, failover_backend_id, rep_configs, primary_array, arrays_list, is_promoted): """Validate failover_host request's parameters Validate that a failover_host operation can be performed with the user entered parameters and system configuration/state :param is_failed_over: current failover state :param failover_backend_id: backend_id given during failover request :param rep_configs: backend rep_configs -- list :param primary_array: configured primary array SID -- string :param arrays_list: list of U4P symmetrix IDs -- list :param is_promoted: current promotion state -- bool :returns: (bool, str) is valid, reason on invalid """ is_valid = True msg = "" if is_failed_over: valid_backend_ids = [ 'default', PMAX_FAILOVER_START_ARRAY_PROMOTION] if failover_backend_id not in valid_backend_ids: is_valid = False msg = _('Cannot failover, the backend is already in a failed ' 'over state, if you meant to failback, please add ' '--backend_id default to the command.') elif (failover_backend_id == 'default' and primary_array not in arrays_list): is_valid = False msg = _('Cannot failback, the configured primary array is ' 'not currently available to perform failback to. ' 'Please ensure array %s is visible in ' 'Unisphere.') % primary_array elif is_promoted and failover_backend_id != 'default': is_valid = False msg = _('Failover promotion currently in progress, please ' 'finish the promotion process and issue a failover ' 'using the "default" backend_id to complete this ' 'process.') else: if failover_backend_id == 'default': is_valid = False msg = _('Cannot failback, backend is not in a failed over ' 'state. If you meant to failover, please either omit ' 'the --backend_id parameter or use the --backend_id ' 'parameter with a valid backend id.') return is_valid, msg def validate_replication_group_config(self, rep_configs, extra_specs_list): """Validate replication group configuration Validate the extra specs of volume types being added to a volume group against rep_config imported from cinder.conf :param rep_configs: list of replication_device dicts from cinder.conf :param extra_specs_list: extra_specs of volume types added to group :raises InvalidInput: If any of the validation check fail """ if not rep_configs: LOG.error('No replication devices set in cinder.conf please ' 'disable replication in Volume Group extra specs ' 'or add replication device to cinder.conf.') msg = _('No replication devices are defined in cinder.conf, ' 'can not enable volume group replication.') raise exception.InvalidInput(reason=msg) rep_group_backend_ids = set() for extra_specs in extra_specs_list: target_backend_id = extra_specs.get( REPLICATION_DEVICE_BACKEND_ID, BACKEND_ID_LEGACY_REP) try: target_rep_config = self.get_rep_config( target_backend_id, rep_configs) rep_group_backend_ids.add(target_backend_id) except exception.InvalidInput: target_rep_config = None if not (extra_specs.get(IS_RE) == ' True'): # Replication is disabled or not set to correct value # in the Volume Type being added msg = _('Replication is not enabled for a Volume Type, ' 'all Volume Types in a replication enabled ' 'Volume Group must have replication enabled.') raise exception.InvalidInput(reason=msg) if not target_rep_config: # Unable to determine rep_configs to use. msg = _('Unable to determine which rep_device to use from ' 'cinder.conf. Could not validate volume types being ' 'added to group.') raise exception.InvalidInput(reason=msg) # Verify that replication is Synchronous mode if not target_rep_config.get('mode'): LOG.warning('Unable to verify the replication mode ' 'of Volume Type, please ensure only ' 'Synchronous replication is used.') elif not target_rep_config['mode'] == REP_SYNC: msg = _('Replication for Volume Type is not set ' 'to Synchronous. Only Synchronous ' 'can be used with replication groups') raise exception.InvalidInput(reason=msg) if len(rep_group_backend_ids) > 1: # We should only have a single backend_id # (replication type) across all the Volume Types msg = _('Multiple replication backend ids detected ' 'please ensure only a single replication device ' '(backend_id) is used for all Volume Types in a ' 'Volume Group.') raise exception.InvalidInput(reason=msg) @staticmethod def validate_non_replication_group_config(extra_specs_list): """Validate volume group configuration Validate that none of the Volume Type extra specs are replication enabled. :param extra_specs_list: list of Volume Type extra specs :returns: bool replication enabled found in any extra specs """ for extra_specs in extra_specs_list: if extra_specs.get(IS_RE) == ' True': msg = _('Replication is enabled in one or more of the ' 'Volume Types being added to new Volume Group but ' 'the Volume Group is not replication enabled. Please ' 'enable replication in the Volume Group or select ' 'only non-replicated Volume Types.') raise exception.InvalidInput(reason=msg) @staticmethod def get_migration_delete_extra_specs(volume, extra_specs, rep_configs): """Get previous extra specs rep details during migration delete :param volume: volume object -- volume :param extra_specs: volumes extra specs -- dict :param rep_configs: imported cinder.conf replication devices -- dict :returns: updated extra specs -- dict """ metadata = volume.metadata replication_enabled = strutils.bool_from_string( metadata.get(IS_RE_CAMEL, 'False')) if replication_enabled: rdfg_label = metadata['RDFG-Label'] rep_config = next( (r_c for r_c in rep_configs if r_c[ 'rdf_group_label'] == rdfg_label), None) extra_specs[IS_RE] = replication_enabled extra_specs[REP_MODE] = metadata['ReplicationMode'] extra_specs[REP_CONFIG] = rep_config extra_specs[REPLICATION_DEVICE_BACKEND_ID] = rep_config[BACKEND_ID] else: extra_specs.pop(IS_RE, None) return extra_specs @staticmethod def version_meet_req(version, minimum_version): """Check if current version meets the minimum version allowed :param version: unisphere version :param minimum_version: minimum version allowed :returns: boolean """ checking = packaging.version.parse(version) minimum = packaging.version.parse(minimum_version) return checking >= minimum @staticmethod def parse_specs_from_pool_name(pool_name): """Parse basic volume type specs from pool_name. :param pool_name: the pool name -- str :returns: array_id, srp, service_level, workload -- str, str, str, str """ array_id, srp, service_level, workload = str(), str(), str(), str() pool_details = pool_name.split('+') if len(pool_details) == 4: array_id = pool_details[3] srp = pool_details[2] service_level = pool_details[0] if not pool_details[1].lower() == 'none': workload = pool_details[1] elif len(pool_details) == 3: service_level = pool_details[0] srp = pool_details[1] array_id = pool_details[2] else: if not pool_name: msg = (_('No pool_name specified in volume-type.')) else: msg = (_("There has been a problem parsing the pool " "information from pool_name '%(pool)s'." % { 'pool': pool_name})) raise exception.VolumeBackendAPIException(msg) if service_level.lower() == 'none': service_level = str() return array_id, srp, service_level, workload @staticmethod def convert_to_string(in_value): """Convert to string if value is an int :param in_value: the input (most likely a str or int) :returns: str """ return in_value if isinstance(in_value, str) else str(in_value) @staticmethod def ode_capable(in_value): """Check if online device expansion capable :param in_value: microcode :returns: Boolean """ return ((int(in_value[0]) == UCODE_5978 and int(in_value[2]) > UCODE_5978_ELMSR) or (int(in_value[0]) > UCODE_5978)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3391206 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerstore/0000775000175000017500000000000000000000000023275 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerstore/__init__.py0000664000175000017500000000000000000000000025374 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerstore/adapter.py0000664000175000017500000015141000000000000025271 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Adapter for Dell EMC PowerStore Cinder driver.""" from oslo_log import log as logging from oslo_utils import strutils from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.objects.group_snapshot import GroupSnapshot from cinder.objects.snapshot import Snapshot from cinder.utils import retry from cinder.volume.drivers.dell_emc.powerstore import ( exception as powerstore_exception) from cinder.volume.drivers.dell_emc.powerstore import client from cinder.volume.drivers.dell_emc.powerstore import utils from cinder.volume import manager from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) CHAP_MODE_SINGLE = "Single" POWERSTORE_NVME_VERSION_SUPPORT = "3.0" POWERSTORE_QOS_VERSION_SUPPORT = "4.0" retry_exc_tuple = (powerstore_exception.DellPowerStoreQoSIORuleExists, powerstore_exception.DellPowerStoreQoSPolicyExists) class CommonAdapter(object): def __init__(self, backend_id, backend_name, ports, **client_config): if isinstance(ports, str): ports = ports.split(",") self.allowed_ports = [port.strip().lower() for port in ports] self.backend_id = backend_id self.backend_name = backend_name self.client = client.PowerStoreClient(**client_config) self.storage_protocol = None self.use_chap_auth = False @staticmethod def initiators(connector): raise NotImplementedError def _port_is_allowed(self, port): """Check if port is in allowed ports list. If allowed ports are empty then all ports are allowed. :param port: iSCSI IP/FC WWN to check :return: is port allowed """ if not self.allowed_ports: return True return port.lower() in self.allowed_ports def _get_connection_properties(self, volume_lun): raise NotImplementedError def check_for_setup_error(self): self.client.check_for_setup_error() if self.storage_protocol == utils.PROTOCOL_ISCSI: chap_config = self.client.get_chap_config() if chap_config.get("mode") == CHAP_MODE_SINGLE: self.use_chap_auth = True LOG.debug("Successfully initialized PowerStore %(protocol)s adapter " "for %(backend_id)s %(backend_name)s backend. " "Allowed ports: %(allowed_ports)s. " "Use CHAP authentication: %(use_chap_auth)s.", { "protocol": self.storage_protocol, "backend_id": self.backend_id, "backend_name": self.backend_name, "allowed_ports": self.allowed_ports, "use_chap_auth": self.use_chap_auth, }) def create_volume(self, volume): group_provider_id = None if ( volume.group_id and volume_utils.is_group_a_cg_snapshot_type(volume.group) ): if volume.is_replicated(): msg = _("Volume with enabled replication can not be added to " "PowerStore volume group.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) group_provider_id = self.client.get_vg_id_by_name( volume.group_id ) if volume.is_replicated(): pp_name = utils.get_protection_policy_from_volume(volume) pp_id = self.client.get_protection_policy_id_by_name(pp_name) replication_status = fields.ReplicationStatus.ENABLED else: pp_name = None pp_id = None replication_status = fields.ReplicationStatus.DISABLED LOG.debug("Create PowerStore volume %(volume_name)s of size " "%(volume_size)s GiB with id %(volume_id)s. " "Protection policy: %(pp_name)s. " "Volume group id: %(group_id)s. ", { "volume_name": volume.name, "volume_size": volume.size, "volume_id": volume.id, "pp_name": pp_name, "group_id": volume.group_id, }) size_in_bytes = utils.gib_to_bytes(volume.size) provider_id = self.client.create_volume(volume.name, size_in_bytes, pp_id, group_provider_id) LOG.debug("Successfully created PowerStore volume %(volume_name)s of " "size %(volume_size)s GiB with id %(volume_id)s on " "Protection policy: %(pp_name)s. " "Volume group id: %(group_id)s. " "PowerStore volume id: %(volume_provider_id)s.", { "volume_name": volume.name, "volume_size": volume.size, "volume_id": volume.id, "pp_name": pp_name, "group_id": volume.group_id, "volume_provider_id": provider_id, }) return { "provider_id": provider_id, "replication_status": replication_status, } def delete_volume(self, volume): try: provider_id = self._get_volume_provider_id(volume) except exception.VolumeBackendAPIException: provider_id = None if not provider_id: LOG.warning("Volume %(volume_name)s with id %(volume_id)s " "does not have provider_id thus does not " "map to PowerStore volume.", { "volume_name": volume.name, "volume_id": volume.id, }) return LOG.debug("Delete PowerStore volume %(volume_name)s with id " "%(volume_id)s. PowerStore volume id: " "%(volume_provider_id)s.", { "volume_name": volume.name, "volume_id": volume.id, "volume_provider_id": provider_id, }) self._detach_volume_from_hosts(volume) self.client.delete_volume_or_snapshot(provider_id) LOG.debug("Successfully deleted PowerStore volume %(volume_name)s " "with id %(volume_id)s. PowerStore volume id: " "%(volume_provider_id)s.", { "volume_name": volume.name, "volume_id": volume.id, "volume_provider_id": provider_id, }) def extend_volume(self, volume, new_size): provider_id = self._get_volume_provider_id(volume) LOG.debug("Extend PowerStore volume %(volume_name)s of size " "%(volume_size)s GiB with id %(volume_id)s to " "%(volume_new_size)s GiB. " "PowerStore volume id: %(volume_provider_id)s.", { "volume_name": volume.name, "volume_size": volume.size, "volume_id": volume.id, "volume_new_size": new_size, "volume_provider_id": provider_id, }) size_in_bytes = utils.gib_to_bytes(new_size) self.client.extend_volume(provider_id, size_in_bytes) LOG.debug("Successfully extended PowerStore volume %(volume_name)s " "of size %(volume_size)s GiB with id " "%(volume_id)s to %(volume_new_size)s GiB. " "PowerStore volume id: %(volume_provider_id)s.", { "volume_name": volume.name, "volume_size": volume.size, "volume_id": volume.id, "volume_new_size": new_size, "volume_provider_id": provider_id, }) def create_snapshot(self, snapshot): volume_provider_id = self._get_volume_provider_id(snapshot.volume) LOG.debug("Create PowerStore snapshot %(snapshot_name)s with id " "%(snapshot_id)s of volume %(volume_name)s with id " "%(volume_id)s. PowerStore volume id: " "%(volume_provider_id)s.", { "snapshot_name": snapshot.name, "snapshot_id": snapshot.id, "volume_name": snapshot.volume.name, "volume_id": snapshot.volume.id, "volume_provider_id": volume_provider_id, }) self.client.create_snapshot(volume_provider_id, snapshot.name) LOG.debug("Successfully created PowerStore snapshot %(snapshot_name)s " "with id %(snapshot_id)s of volume %(volume_name)s with " "id %(volume_id)s. PowerStore volume id: " "%(volume_provider_id)s.", { "snapshot_name": snapshot.name, "snapshot_id": snapshot.id, "volume_name": snapshot.volume.name, "volume_id": snapshot.volume.id, "volume_provider_id": volume_provider_id, }) def delete_snapshot(self, snapshot): try: volume_provider_id = self._get_volume_provider_id(snapshot.volume) except exception.VolumeBackendAPIException: return LOG.debug("Delete PowerStore snapshot %(snapshot_name)s with id " "%(snapshot_id)s of volume %(volume_name)s with " "id %(volume_id)s. PowerStore volume id: " "%(volume_provider_id)s.", { "snapshot_name": snapshot.name, "snapshot_id": snapshot.id, "volume_name": snapshot.volume.name, "volume_id": snapshot.volume.id, "volume_provider_id": volume_provider_id, }) try: snapshot_provider_id = self.client.get_snapshot_id_by_name( volume_provider_id, snapshot.name ) except exception.VolumeBackendAPIException: return self.client.delete_volume_or_snapshot(snapshot_provider_id, entity="snapshot") LOG.debug("Successfully deleted PowerStore snapshot %(snapshot_name)s " "with id %(snapshot_id)s of volume %(volume_name)s with " "id %(volume_id)s. PowerStore volume id: " "%(volume_provider_id)s.", { "snapshot_name": snapshot.name, "snapshot_id": snapshot.id, "volume_name": snapshot.volume.name, "volume_id": snapshot.volume.id, "volume_provider_id": volume_provider_id, }) def initialize_connection(self, volume, connector, **kwargs): connection_properties = self._connect_volume(volume, connector) LOG.debug("Connection properties for volume %(volume_name)s with id " "%(volume_id)s: %(connection_properties)s.", { "volume_name": volume.name, "volume_id": volume.id, "connection_properties": strutils.mask_password( connection_properties ), }) return connection_properties def terminate_connection(self, volume, connector, **kwargs): self._disconnect_volume(volume, connector) return {} def update_volume_stats(self): stats = { "volume_backend_name": self.backend_name, "storage_protocol": self.storage_protocol, "thick_provisioning_support": False, "thin_provisioning_support": True, "compression_support": True, "multiattach": True, "consistent_group_snapshot_enabled": True, "sparse_copy_volume": True, } backend_stats = self.client.get_metrics() backend_total_capacity = utils.bytes_to_gib( backend_stats["physical_total"] ) backend_free_capacity = ( backend_total_capacity - utils.bytes_to_gib(backend_stats["physical_used"]) ) stats["total_capacity_gb"] = backend_total_capacity stats["free_capacity_gb"] = backend_free_capacity LOG.debug("Free capacity for backend '%(backend)s': " "%(free)s GiB, total capacity: %(total)s GiB.", { "backend": self.backend_name, "free": backend_free_capacity, "total": backend_total_capacity, }) return stats def create_volume_from_source(self, volume, source): if isinstance(source, Snapshot): entity = "snapshot" source_size = source.volume_size source_volume_provider_id = self._get_volume_provider_id( source.volume ) source_provider_id = self.client.get_snapshot_id_by_name( source_volume_provider_id, source.name ) else: entity = "volume" source_size = source.size source_provider_id = self._get_volume_provider_id(source) if volume.is_replicated(): pp_name = utils.get_protection_policy_from_volume(volume) pp_id = self.client.get_protection_policy_id_by_name(pp_name) replication_status = fields.ReplicationStatus.ENABLED else: pp_name = None pp_id = None replication_status = fields.ReplicationStatus.DISABLED LOG.debug("Create PowerStore volume %(volume_name)s of size " "%(volume_size)s GiB with id %(volume_id)s from %(entity)s " "%(entity_name)s with id %(entity_id)s. " "Protection policy: %(pp_name)s.", { "volume_name": volume.name, "volume_id": volume.id, "volume_size": volume.size, "entity": entity, "entity_name": source.name, "entity_id": source.id, "pp_name": pp_name, }) volume_provider_id = self.client.clone_volume_or_snapshot( volume.name, source_provider_id, pp_id, entity ) if volume.size > source_size: size_in_bytes = utils.gib_to_bytes(volume.size) self.client.extend_volume(volume_provider_id, size_in_bytes) LOG.debug("Successfully created PowerStore volume %(volume_name)s " "of size %(volume_size)s GiB with id %(volume_id)s from " "%(entity)s %(entity_name)s with id %(entity_id)s. " "Protection policy %(pp_name)s. " "PowerStore volume id: %(volume_provider_id)s.", { "volume_name": volume.name, "volume_id": volume.id, "volume_size": volume.size, "entity": entity, "entity_name": source.name, "entity_id": source.id, "pp_name": pp_name, "volume_provider_id": volume_provider_id, }) return { "provider_id": volume_provider_id, "replication_status": replication_status, } def _filter_hosts_by_initiators(self, initiators): """Filter hosts by given list of initiators. If initiators are added to different hosts the exception will be raised. In this case one of the hosts should be deleted. :param initiators: list of initiators :return: PowerStore host object """ LOG.debug("Query PowerStore %(protocol)s hosts.", { "protocol": self.storage_protocol, }) hosts = self.client.get_all_hosts(self.storage_protocol) hosts_found = utils.filter_hosts_by_initiators(hosts, initiators) if hosts_found: if len(hosts_found) > 1: hosts_names_found = [host["name"] for host in hosts_found] msg = (_("Initiators are added to different PowerStore hosts: " "%(hosts_names_found)s. Remove all of the hosts " "except one to proceed. Initiators will be modified " "during the next volume attach procedure.") % {"hosts_names_found": hosts_names_found, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: return hosts_found[0] @coordination.synchronized("powerstore-create-host") def _create_host_if_not_exist(self, connector): """Create PowerStore host if it does not exist. :param connector: connection properties :return: PowerStore host object, iSCSI CHAP credentials """ initiators = self.initiators(connector) host = self._filter_hosts_by_initiators(initiators) if self.use_chap_auth: chap_credentials = utils.get_chap_credentials() else: chap_credentials = {} if host: self._modify_host_initiators(host, chap_credentials, initiators) else: host_name = utils.powerstore_host_name( connector, self.storage_protocol ) LOG.debug("Create PowerStore host %(host_name)s. " "Initiators: %(initiators)s.", { "host_name": host_name, "initiators": initiators, }) ports = [ { "port_name": initiator, "port_type": self.storage_protocol, **chap_credentials, } for initiator in initiators ] host = self.client.create_host(host_name, ports) host["name"] = host_name LOG.debug("Successfully created PowerStore host %(host_name)s. " "Initiators: %(initiators)s. PowerStore host id: " "%(host_provider_id)s.", { "host_name": host["name"], "initiators": initiators, "host_provider_id": host["id"], }) return host, chap_credentials def _modify_host_initiators(self, host, chap_credentials, initiators): """Update PowerStore host initiators if needed. :param host: PowerStore host object :param chap_credentials: iSCSI CHAP credentials :param initiators: list of initiators :return: None """ initiators_added = [ initiator["port_name"] for initiator in host["host_initiators"] ] initiators_to_add = [] initiators_to_modify = [] initiators_to_remove = [ initiator for initiator in initiators_added if initiator not in initiators ] for initiator in initiators: initiator_add_modify = { "port_name": initiator, **chap_credentials, } if initiator not in initiators_added: initiator_add_modify["port_type"] = self.storage_protocol initiators_to_add.append(initiator_add_modify) elif self.use_chap_auth: initiators_to_modify.append(initiator_add_modify) if initiators_to_remove: LOG.debug("Remove initiators from PowerStore host %(host_name)s. " "Initiators: %(initiators_to_remove)s. " "PowerStore host id: %(host_provider_id)s.", { "host_name": host["name"], "initiators_to_remove": initiators_to_remove, "host_provider_id": host["id"], }) self.client.modify_host_initiators( host["id"], remove_initiators=initiators_to_remove ) LOG.debug("Successfully removed initiators from PowerStore host " "%(host_name)s. Initiators: %(initiators_to_remove)s. " "PowerStore host id: %(host_provider_id)s.", { "host_name": host["name"], "initiators_to_remove": initiators_to_remove, "host_provider_id": host["id"], }) if initiators_to_add: LOG.debug("Add initiators to PowerStore host %(host_name)s. " "Initiators: %(initiators_to_add)s. PowerStore host id: " "%(host_provider_id)s.", { "host_name": host["name"], "initiators_to_add": strutils.mask_password( initiators_to_add ), "host_provider_id": host["id"], }) self.client.modify_host_initiators( host["id"], add_initiators=initiators_to_add ) LOG.debug("Successfully added initiators to PowerStore host " "%(host_name)s. Initiators: %(initiators_to_add)s. " "PowerStore host id: %(host_provider_id)s.", { "host_name": host["name"], "initiators_to_add": strutils.mask_password( initiators_to_add ), "host_provider_id": host["id"], }) if initiators_to_modify: LOG.debug("Modify initiators of PowerStore host %(host_name)s. " "Initiators: %(initiators_to_modify)s. " "PowerStore host id: %(host_provider_id)s.", { "host_name": host["name"], "initiators_to_modify": strutils.mask_password( initiators_to_modify ), "host_provider_id": host["id"], }) self.client.modify_host_initiators( host["id"], modify_initiators=initiators_to_modify ) LOG.debug("Successfully modified initiators of PowerStore host " "%(host_name)s. Initiators: %(initiators_to_modify)s. " "PowerStore host id: %(host_provider_id)s.", { "host_name": host["name"], "initiators_to_modify": strutils.mask_password( initiators_to_modify ), "host_provider_id": host["id"], }) def _attach_volume_to_host(self, host, volume): """Attach PowerStore volume to host. :param host: PowerStore host object :param volume: OpenStack volume object :return: attached volume identifier """ provider_id = self._get_volume_provider_id(volume) LOG.debug("Attach PowerStore volume %(volume_name)s with id " "%(volume_id)s to host %(host_name)s. PowerStore volume id: " "%(volume_provider_id)s, host id: %(host_provider_id)s.", { "volume_name": volume.name, "volume_id": volume.id, "host_name": host["name"], "volume_provider_id": provider_id, "host_provider_id": host["id"], }) self.client.attach_volume_to_host(host["id"], provider_id) if self.storage_protocol == utils.PROTOCOL_NVME: volume_identifier = self.client.get_volume_nguid(provider_id) else: volume_identifier = self.client.get_volume_lun(host["id"], provider_id) LOG.debug("Successfully attached PowerStore volume %(volume_name)s " "with id %(volume_id)s to host %(host_name)s. " "PowerStore volume id: %(volume_provider_id)s, " "host id: %(host_provider_id)s. Volume identifier: " "%(volume_identifier)s.", { "volume_name": volume.name, "volume_id": volume.id, "host_name": host["name"], "volume_provider_id": provider_id, "host_provider_id": host["id"], "volume_identifier": volume_identifier, }) self._create_or_update_volume_qos_policy(volume, provider_id, utils.VOLUME_ATTACH_OPERATION) return volume_identifier def _create_host_and_attach(self, connector, volume): """Create PowerStore host and attach volume. :param connector: connection properties :param volume: OpenStack volume object :return: iSCSI CHAP credentials, volume logical number """ host, chap_credentials = self._create_host_if_not_exist(connector) return chap_credentials, self._attach_volume_to_host(host, volume) def _connect_volume(self, volume, connector): """Attach PowerStore volume and return it's connection properties. :param volume: OpenStack volume object :param connector: connection properties :return: volume connection properties """ chap_credentials, volume_identifier = self._create_host_and_attach( connector, volume ) connection_properties = self._get_connection_properties( volume_identifier ) if self.use_chap_auth: connection_properties["data"]["auth_method"] = "CHAP" connection_properties["data"]["auth_username"] = ( chap_credentials.get("chap_single_username") ) connection_properties["data"]["auth_password"] = ( chap_credentials.get("chap_single_password") ) return connection_properties def _detach_volume_from_hosts(self, volume, hosts_to_detach=None): """Detach volume from PowerStore hosts. If hosts_to_detach is None, detach volume from all hosts. :param volume: OpenStack volume object :param hosts_to_detach: list of hosts to detach from :return: None """ provider_id = self._get_volume_provider_id(volume) if hosts_to_detach is None: # Force detach. Get all mapped hosts and detach. hosts_to_detach = self.client.get_volume_mapped_hosts(provider_id) if not hosts_to_detach: # Volume is not attached to any host. return LOG.debug("Detach PowerStore volume %(volume_name)s with id " "%(volume_id)s from hosts. PowerStore volume id: " "%(volume_provider_id)s, hosts ids: %(hosts_provider_ids)s.", { "volume_name": volume.name, "volume_id": volume.id, "volume_provider_id": provider_id, "hosts_provider_ids": hosts_to_detach, }) for host_id in hosts_to_detach: self.client.detach_volume_from_host(host_id, provider_id) LOG.debug("Successfully detached PowerStore volume " "%(volume_name)s with id %(volume_id)s from hosts. " "PowerStore volume id: %(volume_provider_id)s, " "hosts ids: %(hosts_provider_ids)s.", { "volume_name": volume.name, "volume_id": volume.id, "volume_provider_id": provider_id, "hosts_provider_ids": hosts_to_detach, }) self._create_or_update_volume_qos_policy(volume, provider_id, utils.VOLUME_DETACH_OPERATION) def _disconnect_volume(self, volume, connector): """Detach PowerStore volume. :param volume: OpenStack volume object :param connector: connection properties :return: None """ if connector is None: self._detach_volume_from_hosts(volume) else: is_multiattached = utils.is_multiattached_to_host( volume.volume_attachment, connector["host"] ) if is_multiattached: # Do not detach volume until it is attached to more than one # instance on the same host. return initiators = self.initiators(connector) host = self._filter_hosts_by_initiators(initiators) if host: self._detach_volume_from_hosts(volume, [host["id"]]) def revert_to_snapshot(self, volume, snapshot): volume_provider_id = self._get_volume_provider_id(volume) snapshot_volume_provider_id = self._get_volume_provider_id( snapshot.volume ) LOG.debug("Restore PowerStore volume %(volume_name)s with id " "%(volume_id)s from snapshot %(snapshot_name)s with id " "%(snapshot_id)s. PowerStore volume id: " "%(volume_provider_id)s.", { "volume_name": volume.name, "volume_id": volume.id, "snapshot_name": snapshot.name, "snapshot_id": snapshot.id, "volume_provider_id": volume_provider_id, }) snapshot_provider_id = self.client.get_snapshot_id_by_name( snapshot_volume_provider_id, snapshot.name ) self.client.restore_from_snapshot(volume_provider_id, snapshot_provider_id) LOG.debug("Successfully restored PowerStore volume %(volume_name)s " "with id %(volume_id)s from snapshot %(snapshot_name)s " "with id %(snapshot_id)s. PowerStore volume id: " "%(volume_provider_id)s.", { "volume_name": volume.name, "volume_id": volume.id, "snapshot_name": snapshot.name, "snapshot_id": snapshot.id, "volume_provider_id": volume_provider_id, }) def _get_volume_provider_id(self, volume): """Get provider_id for volume. If the secondary backend is used after failover operation try to get volume provider_id from PowerStore API. :param volume: OpenStack volume object :return: volume provider_id """ if ( self.backend_id == manager.VolumeManager.FAILBACK_SENTINEL or not volume.is_replicated() ): return volume.provider_id else: return self.client.get_volume_id_by_name(volume.name) def teardown_volume_replication(self, volume): """Teardown replication for volume so it can be deleted. :param volume: OpenStack volume object :return: None """ LOG.debug("Teardown replication for volume %(volume_name)s " "with id %(volume_id)s.", { "volume_name": volume.name, "volume_id": volume.id, }) try: provider_id = self._get_volume_provider_id(volume) rep_session_id = self.client.get_volume_replication_session_id( provider_id ) except exception.VolumeBackendAPIException: LOG.warning("Replication session for volume %(volume_name)s with " "id %(volume_id)s is not found. Replication for " "volume was not configured or was modified from " "storage side.", { "volume_name": volume.name, "volume_id": volume.id, }) return self.client.unassign_volume_protection_policy(provider_id) self.client.wait_for_replication_session_deletion(rep_session_id) def failover_host(self, volumes, groups, is_failback): volumes_updates = [] groups_updates = [] for volume in volumes: updates = self.failover_volume(volume, is_failback) if updates: volumes_updates.append(updates) return volumes_updates, groups_updates def failover_volume(self, volume, is_failback): error_status = (fields.ReplicationStatus.ERROR if is_failback else fields.ReplicationStatus.FAILOVER_ERROR) try: provider_id = self._get_volume_provider_id(volume) rep_session_id = self.client.get_volume_replication_session_id( provider_id ) failover_job_id = self.client.failover_volume_replication_session( rep_session_id, is_failback ) failover_success = self.client.wait_for_failover_completion( failover_job_id ) if is_failback: self.client.reprotect_volume_replication_session( rep_session_id ) except exception.VolumeBackendAPIException: failover_success = False if not failover_success: return { "volume_id": volume.id, "updates": { "replication_status": error_status, }, } @utils.is_group_a_cg_snapshot_type def create_group(self, group): LOG.debug("Create PowerStore volume group %(group_name)s with id " "%(group_id)s.", { "group_name": group.name, "group_id": group.id, }) self.client.create_vg(group.id) LOG.debug("Successfully created PowerStore volume group " "%(group_name)s with id %(group_id)s.", { "group_name": group.name, "group_id": group.id, }) @utils.is_group_a_cg_snapshot_type def delete_group(self, group): LOG.debug("Delete PowerStore volume group %(group_name)s with id " "%(group_id)s.", { "group_name": group.name, "group_id": group.id, }) try: group_provider_id = self.client.get_vg_id_by_name( group.id ) except exception.VolumeBackendAPIException: return None, None self.client.delete_volume_or_snapshot(group_provider_id, entity="volume group") LOG.debug("Successfully deleted PowerStore volume group " "%(group_name)s with id %(group_id)s.", { "group_name": group.name, "group_id": group.id, }) return None, None @utils.is_group_a_cg_snapshot_type def update_group(self, group, add_volumes, remove_volumes): volumes_to_add = [] for volume in add_volumes: if volume.is_replicated(): msg = _("Volume with enabled replication can not be added to " "PowerStore volume group.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) volumes_to_add.append(self._get_volume_provider_id(volume)) volumes_to_remove = [ self._get_volume_provider_id(volume) for volume in remove_volumes ] LOG.debug("Update PowerStore volume group %(group_name)s with id " "%(group_id)s. Add PowerStore volumes with ids: " "%(volumes_to_add)s, remove PowerStore volumes with ids: " "%(volumes_to_remove)s.", { "group_name": group.name, "group_id": group.id, "volumes_to_add": volumes_to_add, "volumes_to_remove": volumes_to_remove, }) group_provider_id = self.client.get_vg_id_by_name(group.id) if volumes_to_add: self.client.add_volumes_to_vg(group_provider_id, volumes_to_add) if volumes_to_remove: self.client.remove_volumes_from_vg(group_provider_id, volumes_to_remove) LOG.debug("Successfully updated PowerStore volume group " "%(group_name)s with id %(group_id)s. " "Add PowerStore volumes with ids: %(volumes_to_add)s, " "remove PowerStore volumes with ids: %(volumes_to_remove)s.", { "group_name": group.name, "group_id": group.id, "volumes_to_add": volumes_to_add, "volumes_to_remove": volumes_to_remove, }) return None, None, None @utils.is_group_a_cg_snapshot_type def create_group_snapshot(self, group_snapshot): LOG.debug("Create PowerStore snapshot %(snapshot_name)s with id " "%(snapshot_id)s of volume group %(group_name)s with id " "%(group_id)s.", { "snapshot_name": group_snapshot.name, "snapshot_id": group_snapshot.id, "group_name": group_snapshot.group.name, "group_id": group_snapshot.group.id, }) group_provider_id = self.client.get_vg_id_by_name( group_snapshot.group.id ) self.client.create_vg_snapshot( group_provider_id, group_snapshot.id ) LOG.debug("Successfully created PowerStore snapshot %(snapshot_name)s " "with id %(snapshot_id)s of volume group %(group_name)s " "with id %(group_id)s.", { "snapshot_name": group_snapshot.name, "snapshot_id": group_snapshot.id, "group_name": group_snapshot.group.name, "group_id": group_snapshot.group.id, }) return None, None @utils.is_group_a_cg_snapshot_type def delete_group_snapshot(self, group_snapshot): LOG.debug("Delete PowerStore snapshot %(snapshot_name)s with id " "%(snapshot_id)s of volume group %(group_name)s with " "id %(group_id)s.", { "snapshot_name": group_snapshot.name, "snapshot_id": group_snapshot.id, "group_name": group_snapshot.group.name, "group_id": group_snapshot.group.id, }) try: group_provider_id = self.client.get_vg_id_by_name( group_snapshot.group.id ) group_snapshot_provider_id = ( self.client.get_vg_snapshot_id_by_name( group_provider_id, group_snapshot.id )) except exception.VolumeBackendAPIException: return None, None self.client.delete_volume_or_snapshot(group_snapshot_provider_id, entity="volume group snapshot") LOG.debug("Successfully deleted PowerStore snapshot %(snapshot_name)s " "with id %(snapshot_id)s of volume group %(group_name)s " "with id %(group_id)s.", { "snapshot_name": group_snapshot.name, "snapshot_id": group_snapshot.id, "group_name": group_snapshot.group.name, "group_id": group_snapshot.group.id, }) return None, None @utils.is_group_a_cg_snapshot_type def create_group_from_source(self, group, volumes, source, snapshots, source_vols): if isinstance(source, GroupSnapshot): entity = "volume group snapshot" group_provider_id = self.client.get_vg_id_by_name( source.group.id ) source_provider_id = self.client.get_vg_snapshot_id_by_name( group_provider_id, source.id ) source_vols = [snapshot.volume for snapshot in snapshots] base_clone_name = "%s.%s" % (group.id, source.id) else: entity = "volume group" source_provider_id = self.client.get_vg_id_by_name(source.id) base_clone_name = group.id LOG.debug("Create PowerStore volume group %(group_name)s with id " "%(group_id)s from %(entity)s %(entity_name)s with id " "%(entity_id)s.", { "group_name": group.name, "group_id": group.id, "entity": entity, "entity_name": source.name, "entity_id": source.id, }) self.client.clone_vg_or_vg_snapshot( group.id, source_provider_id, entity ) LOG.debug("Successfully created PowerStore volume group " "%(group_name)s with id %(group_id)s from %(entity)s " "%(entity_name)s with id %(entity_id)s.", { "group_name": group.name, "group_id": group.id, "entity": entity, "entity_name": source.name, "entity_id": source.id, }) updates = [] for volume, source_vol in zip(volumes, source_vols): volume_name = "%s.%s" % (base_clone_name, source_vol.name) volume_provider_id = self.client.get_volume_id_by_name(volume_name) self.client.rename_volume(volume_provider_id, volume.name) volume_updates = { "id": volume.id, "provider_id": volume_provider_id, "replication_status": group.replication_status, } updates.append(volume_updates) return None, updates def _create_or_update_volume_qos_policy(self, volume, provider_id, operation): """Create or update volume QoS policy @param volume: OpenStack volume object @param provider_id: Volume provider Id @param operation: QoS create or update operation """ volume_type_id = volume["volume_type_id"] specs = volume_types.get_volume_type_qos_specs(volume_type_id) qos_specs = specs['qos_specs'] if (qos_specs is not None and (qos_specs["consumer"] == "back-end" or qos_specs["consumer"] == "both") and self._check_qos_support()): if operation == utils.VOLUME_ATTACH_OPERATION: qos_policy_id = self._get_or_create_qos_policy(qos_specs) self.client.update_volume_with_qos_policy(provider_id, qos_policy_id) else: self.client.update_volume_with_qos_policy(provider_id, None) def _check_qos_support(self): """Check PowerStore array support QoS or not @return: Version is supported or not in boolean """ array_version = self.client.get_array_version() if not utils.version_gte( array_version, POWERSTORE_QOS_VERSION_SUPPORT ): msg = (_("PowerStore arrays support QoS starting from version " "%(qos_support_version)s. Current PowerStore array " "version: %(current_version)s.") % {"qos_support_version": POWERSTORE_QOS_VERSION_SUPPORT, "current_version": array_version}) LOG.error(msg) else: return True return False @retry(retry_exc_tuple, interval=1, retries=3, backoff_rate=2) def _get_or_create_qos_policy(self, qos_specs): """Get or create QoS policy 1. Create operation: It will verify if a QoS policy is created for the volume type. If not, it will create an I/O limit rule, establish a policy with this rule, and attach the policy to the volume. 2. Update operation: It will verify if a QoS policy is created for the volume type. If it is, it will update the existing I/O limit rule with the specified QoS values. @param qos_specs: Volume QoS specs @return: QoS policy id """ qos_id = qos_specs["id"] policy_name = "qos-policy-%s" % qos_id io_rule_name = "io-rule-%s" % qos_id specs = qos_specs["specs"] io_rule_params = { "type": (specs["bandwidth_limit_type"] if "bandwidth_limit_type" in specs else None), "max_iops": int(specs["max_iops"]) if "max_iops" in specs else None, "max_bw": int(specs["max_bw"]) if "max_bw" in specs else None, "burst_percentage": (int(specs["burst_percentage"]) if "burst_percentage" in specs else None) } policy_id = self.client.get_qos_policy_id_by_name(policy_name) if policy_id is None: io_rule_params["name"] = io_rule_name io_rule_id = self.client.create_qos_io_rule(io_rule_params) policy_params = { "name": policy_name, "io_limit_rule_id": io_rule_id } return self.client.create_qos_policy(policy_params) else: self.client.update_qos_io_rule(io_rule_name, io_rule_params) return policy_id class FibreChannelAdapter(CommonAdapter): def __init__(self, **kwargs): super(FibreChannelAdapter, self).__init__(**kwargs) self.storage_protocol = utils.PROTOCOL_FC self.driver_volume_type = "fibre_channel" @staticmethod def initiators(connector): return utils.extract_fc_wwpns(connector) def _get_fc_targets(self): """Get available FC WWNs. :return: list of FC WWNs """ wwns = [] fc_ports = self.client.get_fc_port() for port in fc_ports: if self._port_is_allowed(port["wwn"]): wwns.append(utils.fc_wwn_to_string(port["wwn"])) if not wwns: msg = _("There are no accessible Fibre Channel targets on the " "system.") raise exception.VolumeBackendAPIException(data=msg) return wwns def _get_connection_properties(self, volume_identifier): """Fill connection properties dict with data to attach volume. :param volume_identifier: attached volume logical unit number :return: connection properties """ target_wwns = self._get_fc_targets() return { "driver_volume_type": self.driver_volume_type, "data": { "target_discovered": False, "target_lun": volume_identifier, "target_wwn": target_wwns, "discard": True, } } class iSCSIAdapter(CommonAdapter): def __init__(self, **kwargs): super(iSCSIAdapter, self).__init__(**kwargs) self.storage_protocol = utils.PROTOCOL_ISCSI self.driver_volume_type = "iscsi" @staticmethod def initiators(connector): return [connector["initiator"]] def _get_iscsi_targets(self): """Get available iSCSI portals and IQNs. :return: iSCSI portals and IQNs """ iqns = [] portals = [] ip_pool_addresses = self.client.get_ip_pool_address( self.storage_protocol ) for address in ip_pool_addresses: if self._port_is_allowed(address["address"]): portals.append( utils.iscsi_portal_with_port(address["address"]) ) iqns.append(address["ip_port"]["target_iqn"]) if not portals: msg = _("There are no accessible iSCSI targets on the " "system.") raise exception.VolumeBackendAPIException(data=msg) return iqns, portals def _get_connection_properties(self, volume_identifier): """Fill connection properties dict with data to attach volume. :param volume_identifier: attached volume logical unit number :return: connection properties """ iqns, portals = self._get_iscsi_targets() return { "driver_volume_type": self.driver_volume_type, "data": { "target_discovered": False, "target_portal": portals[0], "target_iqn": iqns[0], "target_lun": volume_identifier, "target_portals": portals, "target_iqns": iqns, "target_luns": [volume_identifier] * len(portals), }, } class NVMEoFAdapter(CommonAdapter): def __init__(self, **kwargs): super(NVMEoFAdapter, self).__init__(**kwargs) self.storage_protocol = utils.PROTOCOL_NVME self.driver_volume_type = "nvmeof" @staticmethod def initiators(connector): return [connector["nqn"]] def check_for_setup_error(self): array_version = self.client.get_array_version() if not utils.version_gte( array_version, POWERSTORE_NVME_VERSION_SUPPORT ): msg = (_("PowerStore arrays support NVMe-OF starting from version " "%(nvme_support_version)s. Current PowerStore array " "version: %(current_version)s.") % {"nvme_support_version": POWERSTORE_NVME_VERSION_SUPPORT, "current_version": array_version, }) LOG.error(msg) raise exception.InvalidInput(reason=msg) super(NVMEoFAdapter, self).check_for_setup_error() def _get_nvme_targets(self): """Get available NVMe portals and subsystem NQN. :return: NVMe portals and NQN """ portals = [] ip_pool_addresses = self.client.get_ip_pool_address( self.storage_protocol ) for address in ip_pool_addresses: if self._port_is_allowed(address["address"]): portals.append(address["address"]) if not portals: msg = _("There are no accessible NVMe targets on the " "system.") raise exception.VolumeBackendAPIException(data=msg) nqn = self.client.get_subsystem_nqn() return portals, nqn def _get_connection_properties(self, volume_identifier): """Fill connection properties dict with data to attach volume. :param volume_identifier: attached volume NGUID :return: connection properties """ portals, nqn = self._get_nvme_targets() target_portals = [(portal, 4420, "tcp") for portal in portals] return { "driver_volume_type": self.driver_volume_type, "data": { "portals": target_portals, "target_nqn": nqn, "volume_nguid": volume_identifier, "discard": True, }, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerstore/client.py0000664000175000017500000010470500000000000025134 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """REST client for Dell EMC PowerStore Cinder Driver.""" import functools import json from oslo_log import log as logging from oslo_utils import strutils import requests import requests.exceptions from cinder import exception from cinder.i18n import _ from cinder import utils as cinder_utils from cinder.volume.drivers.dell_emc.powerstore import ( exception as powerstore_exception) from cinder.volume.drivers.dell_emc.powerstore import utils LOG = logging.getLogger(__name__) VOLUME_NOT_MAPPED_ERROR = "0xE0A08001000F" SESSION_ALREADY_FAILED_OVER_ERROR = "0xE0201005000C" TOO_MANY_SNAPS_ERROR = "0xE0A040010003" MAX_SNAPS_IN_VTREE = 32 QOS_IO_RULE_EXISTS_ERROR = "0xE0A0E0010009" QOS_POLICY_EXISTS_ERROR = "0xE02020010004" QOS_UNEXPECTED_RESPONSE_ERROR = "0xE0101001000C" class PowerStoreClient(object): def __init__(self, rest_ip, rest_username, rest_password, verify_certificate, certificate_path, rest_api_connect_timeout, rest_api_read_timeout): self.rest_ip = rest_ip self.rest_username = rest_username self.rest_password = rest_password self.verify_certificate = verify_certificate self.certificate_path = certificate_path self.base_url = "https://%s:/api/rest" % self.rest_ip self.ok_codes = [ requests.codes.ok, requests.codes.created, requests.codes.accepted, requests.codes.no_content, requests.codes.partial_content ] self.rest_api_connect_timeout = rest_api_connect_timeout self.rest_api_read_timeout = rest_api_read_timeout @property def _verify_cert(self): verify_cert = self.verify_certificate if self.verify_certificate and self.certificate_path: verify_cert = self.certificate_path return verify_cert def check_for_setup_error(self): if not all([self.rest_ip, self.rest_username, self.rest_password]): msg = _("REST server IP, username and password must be set.") raise exception.InvalidInput(reason=msg) # log warning if not using certificates if not self.verify_certificate: LOG.warning("Verify certificate is not set, using default of " "False.") self.verify_certificate = False LOG.debug("Successfully initialized PowerStore REST client. " "Server IP: %(ip)s, username: %(username)s. " "Verify server's certificate: %(verify_cert)s.", { "ip": self.rest_ip, "username": self.rest_username, "verify_cert": self._verify_cert, }) def _send_request(self, method, url, payload=None, params=None, log_response_data=True): response = None r = requests.Response try: if not params: params = {} request_params = { "auth": (self.rest_username, self.rest_password), "verify": self._verify_cert, "params": params } if payload and method != "GET": request_params["data"] = json.dumps(payload) request_url = self.base_url + url timeout = (self.rest_api_connect_timeout, self.rest_api_read_timeout) r = requests.request(method, request_url, **request_params, timeout=timeout) log_level = logging.DEBUG if r.status_code not in self.ok_codes: log_level = logging.ERROR LOG.log(log_level, "REST Request: %s %s with body %s", r.request.method, r.request.url, strutils.mask_password(r.request.body)) if (log_response_data or log_level == logging.ERROR): msg = ("REST Response: %s with data %s" % (r.status_code, r.text)) else: msg = "REST Response: %s" % r.status_code LOG.log(log_level, msg) try: response = r.json() except ValueError: response = None except requests.exceptions.Timeout as e: r.status_code = requests.codes.internal_server_error LOG.error("The request to URL %(url)s failed with timeout " "exception %(exc)s", {"url": url, "exc": e}) return r, response _send_get_request = functools.partialmethod(_send_request, "GET") _send_post_request = functools.partialmethod(_send_request, "POST") _send_patch_request = functools.partialmethod(_send_request, "PATCH") _send_delete_request = functools.partialmethod(_send_request, "DELETE") def get_chap_config(self): r, response = self._send_get_request( "/chap_config/0", params={ "select": "mode" } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore CHAP configuration.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response def get_metrics(self): r, response = self._send_post_request( "/metrics/generate", payload={ "entity": "space_metrics_by_cluster", "entity_id": "0", }, log_response_data=False ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore metrics.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: latest_metrics = response[-1] return latest_metrics except IndexError: msg = _("Failed to query PowerStore metrics.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_volume(self, name, size, pp_id, group_id): r, response = self._send_post_request( "/volume", payload={ "name": name, "size": size, "protection_policy_id": pp_id, "volume_group_id": group_id, } ) if r.status_code not in self.ok_codes: msg = _("Failed to create PowerStore volume %s.") % name LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response["id"] def delete_volume_or_snapshot(self, entity_id, entity="volume"): if entity in ["volume group", "volume group snapshot"]: r, response = self._send_delete_request( "/volume_group/%s" % entity_id, payload={ "delete_members": True, }, ) else: r, response = self._send_delete_request("/volume/%s" % entity_id) if r.status_code not in self.ok_codes: if r.status_code == requests.codes.not_found: LOG.warning("PowerStore %(entity)s with id %(entity_id)s is " "not found. Ignoring error.", { "entity": entity, "entity_id": entity_id, }) else: msg = (_("Failed to delete PowerStore %(entity)s with id " "%(entity_id)s.") % {"entity": entity, "entity_id": entity_id, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def extend_volume(self, volume_id, size): r, response = self._send_patch_request( "/volume/%s" % volume_id, payload={ "size": size, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to extend PowerStore volume with id %s.") % volume_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_snapshot(self, volume_id, name): r, response = self._send_post_request( "/volume/%s/snapshot" % volume_id, payload={ "name": name, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to create snapshot %(snapshot_name)s for " "PowerStore volume with id %(volume_id)s.") % {"snapshot_name": name, "volume_id": volume_id, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response["id"] def get_snapshot_id_by_name(self, volume_id, name): r, response = self._send_get_request( "/volume", params={ "name": "eq.%s" % name, "protection_data->>source_id": "eq.%s" % volume_id, "type": "eq.Snapshot", } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore snapshots.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: snap_id = response[0].get("id") return snap_id except IndexError: msg = (_("PowerStore snapshot %(snapshot_name)s for volume " "with id %(volume_id)s is not found.") % {"snapshot_name": name, "volume_id": volume_id, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def clone_volume_or_snapshot(self, name, entity_id, pp_id, entity="volume"): r, response = self._send_post_request( "/volume/%s/clone" % entity_id, payload={ "name": name, "protection_policy_id": pp_id, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to create clone %(clone_name)s for " "PowerStore %(entity)s with id %(entity_id)s.") % {"clone_name": name, "entity": entity, "entity_id": entity_id, }) LOG.error(msg) if ("messages" in response and response["messages"][0]["code"] == TOO_MANY_SNAPS_ERROR): raise exception.SnapshotLimitReached( set_limit=MAX_SNAPS_IN_VTREE) raise exception.VolumeBackendAPIException(data=msg) return response["id"] def get_all_hosts(self, protocol): r, response = self._send_get_request( "/host", params={ "select": "id,name,host_initiators", "host_initiators->0->>port_type": "eq.%s" % protocol, } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore hosts.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response def create_host(self, name, ports): r, response = self._send_post_request( "/host", payload={ "name": name, "os_type": "Linux", "initiators": ports } ) if r.status_code not in self.ok_codes: msg = _("Failed to create PowerStore host %s.") % name LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response def modify_host_initiators(self, host_id, **kwargs): r, response = self._send_patch_request( "/host/%s" % host_id, payload={ **kwargs, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to modify initiators of PowerStore host " "with id %s.") % host_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def attach_volume_to_host(self, host_id, volume_id): r, response = self._send_post_request( "/volume/%s/attach" % volume_id, payload={ "host_id": host_id, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to attach PowerStore volume %(volume_id)s " "to host %(host_id)s.") % {"volume_id": volume_id, "host_id": host_id, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_volume_mapped_hosts(self, volume_id): r, response = self._send_get_request( "/host_volume_mapping", params={ "volume_id": "eq.%s" % volume_id, "select": "host_id" } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore host volume mappings.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) mapped_hosts = [mapped_host["host_id"] for mapped_host in response] return mapped_hosts def get_volume_lun(self, host_id, volume_id): r, response = self._send_get_request( "/host_volume_mapping", params={ "host_id": "eq.%s" % host_id, "volume_id": "eq.%s" % volume_id, "select": "logical_unit_number" } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore host volume mappings.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: logical_unit_number = response[0].get("logical_unit_number") return logical_unit_number except IndexError: msg = (_("PowerStore mapping of volume with id %(volume_id)s " "to host %(host_id)s is not found.") % {"volume_id": volume_id, "host_id": host_id, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_fc_port(self): r, response = self._send_get_request( "/fc_port", params={ "is_link_up": "eq.True", "select": "wwn" } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore FC ports.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response def get_subsystem_nqn(self): r, response = self._send_get_request( "/cluster", params={ "select": "nvm_subsystem_nqn" } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore NVMe subsystem NQN.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: nqn = response[0].get("nvm_subsystem_nqn") return nqn except IndexError: msg = _("PowerStore NVMe subsystem NQN is not found.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_ip_pool_address(self, protocol): params = {} if protocol == utils.PROTOCOL_ISCSI: params = { "purposes": "cs.{Storage_Iscsi_Target}", "select": "address,ip_port(target_iqn)" } elif protocol == utils.PROTOCOL_NVME: params = { "purposes": "cs.{Storage_NVMe_TCP_Port}", "select": "address" } r, response = self._send_get_request( "/ip_pool_address", params=params ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore IP pool addresses.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response def detach_volume_from_host(self, host_id, volume_id): r, response = self._send_post_request( "/volume/%s/detach" % volume_id, payload={ "host_id": host_id, } ) if r.status_code not in self.ok_codes: if r.status_code == requests.codes.not_found: LOG.warning("PowerStore volume with id %(volume_id)s is " "not found. Ignoring error.", { "volume_id": volume_id, }) elif ( r.status_code == requests.codes.unprocessable and any([ message["code"] == VOLUME_NOT_MAPPED_ERROR for message in response["messages"] ]) ): LOG.warning("PowerStore volume with id %(volume_id)s is " "not mapped to host with id %(host_id)s. " "Ignoring error.", { "volume_id": volume_id, "host_id": host_id, }) else: msg = (_("Failed to detach PowerStore volume %(volume_id)s " "to host %(host_id)s.") % {"volume_id": volume_id, "host_id": host_id, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def restore_from_snapshot(self, volume_id, snapshot_id): r, response = self._send_post_request( "/volume/%s/restore" % volume_id, payload={ "from_snap_id": snapshot_id, "create_backup_snap": False, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to restore PowerStore volume with id " "%(volume_id)s from snapshot with id %(snapshot_id)s.") % {"volume_id": volume_id, "snapshot_id": snapshot_id, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_protection_policy_id_by_name(self, name): r, response = self._send_get_request( "/policy", params={ "name": "eq.%s" % name, "type": "eq.Protection", } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore Protection policies.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: pp_id = response[0].get("id") return pp_id except IndexError: msg = _("PowerStore Protection policy %s is not found.") % name LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_volume_replication_session_id(self, volume_id): r, response = self._send_get_request( "/replication_session", params={ "local_resource_id": "eq.%s" % volume_id, } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore Replication sessions.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: return response[0].get("id") except IndexError: msg = _("Replication session for PowerStore volume with " "id %s is not found.") % volume_id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_volume_id_by_name(self, name): r, response = self._send_get_request( "/volume", params={ "name": "eq.%s" % name, "type": "in.(Primary,Clone)", } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore volumes.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: vol_id = response[0].get("id") return vol_id except IndexError: msg = _("PowerStore volume %s is not found.") % name LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def unassign_volume_protection_policy(self, volume_id): r, response = self._send_patch_request( "/volume/%s" % volume_id, payload={ "protection_policy_id": "", } ) if r.status_code not in self.ok_codes: msg = (_("Failed to unassign Protection policy for PowerStore " "volume with id %s.") % volume_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @cinder_utils.retry(exception.VolumeBackendAPIException, interval=1, backoff_rate=3, retries=5) def wait_for_replication_session_deletion(self, rep_session_id): r, response = self._send_get_request( "/job", params={ "resource_type": "eq.replication_session", "resource_action": "eq.delete", "resource_id": "eq.%s" % rep_session_id, "state": "eq.COMPLETED", } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore jobs.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not response: msg = _("PowerStore Replication session with " "id %s is still exists.") % rep_session_id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def failover_volume_replication_session(self, rep_session_id, is_failback): r, response = self._send_post_request( "/replication_session/%s/failover" % rep_session_id, payload={ "is_planned": False, "force": is_failback, }, params={ "is_async": True, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to failover PowerStore replication session " "with id %s.") % rep_session_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response["id"] @cinder_utils.retry(exception.VolumeBackendAPIException, interval=1, backoff_rate=3, retries=5) def wait_for_failover_completion(self, job_id): r, response = self._send_get_request( "/job/%s" % job_id, params={ "select": "resource_action,resource_type," "resource_id,state,response_body", } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore job with id %s.") % job_id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif ( isinstance(response["response_body"], dict) and any([ message["code"] == SESSION_ALREADY_FAILED_OVER_ERROR for message in response["response_body"].get("messages", []) ]) ): # Replication session is already in Failed-Over state. return True elif response["state"] == "COMPLETED": return True elif response["state"] in ["FAILED", "UNRECOVERABLE_FAILED"]: return False else: msg = _("Failover of PowerStore Replication session with id " "%s is still in progress.") % response["resource_id"] LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def reprotect_volume_replication_session(self, rep_session_id): r, response = self._send_post_request( "/replication_session/%s/reprotect" % rep_session_id ) if r.status_code not in self.ok_codes: msg = (_("Failed to reprotect PowerStore replication session " "with id %s.") % rep_session_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_vg(self, name): r, response = self._send_post_request( "/volume_group", payload={ "name": name, } ) if r.status_code not in self.ok_codes: msg = _("Failed to create PowerStore volume group %s.") % name LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response["id"] def get_vg_id_by_name(self, name): r, response = self._send_get_request( "/volume_group", params={ "name": "eq.%s" % name, } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore volume groups.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: group_id = response[0].get("id") return group_id except IndexError: msg = _("PowerStore volume group %s is not found.") % name LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def add_volumes_to_vg(self, group_id, volume_ids): r, response = self._send_post_request( "/volume_group/%s/add_members" % group_id, payload={ "volume_ids": volume_ids, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to add volumes to PowerStore volume group " "with id %(group_id)s. Volumes: %(volume_ids)s.") % {"group_id": group_id, "volume_ids": volume_ids, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def remove_volumes_from_vg(self, group_id, volume_ids): r, response = self._send_post_request( "/volume_group/%s/remove_members" % group_id, payload={ "volume_ids": volume_ids, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to remove volumes from PowerStore volume group " "with id %(group_id)s. Volumes: %(volume_ids)s.") % {"group_id": group_id, "volume_ids": volume_ids, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_vg_snapshot(self, group_id, name): r, response = self._send_post_request( "/volume_group/%s/snapshot" % group_id, payload={ "name": name, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to create snapshot %(snapshot_name)s for " "PowerStore volume group with id %(group_id)s.") % {"snapshot_name": name, "group_id": group_id, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response["id"] def get_vg_snapshot_id_by_name(self, group_id, name): r, response = self._send_get_request( "/volume_group", params={ "name": "eq.%s" % name, "protection_data->>source_id": "eq.%s" % group_id, "type": "eq.Snapshot", } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore volume groups snapshots.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: vg_snap_id = response[0].get("id") return vg_snap_id except IndexError: msg = (_("PowerStore snapshot %(snapshot_name)s for volume group" "with id %(group_id)s is not found.") % {"snapshot_name": name, "group_id": group_id, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def clone_vg_or_vg_snapshot(self, name, entity_id, entity="volume group"): r, response = self._send_post_request( "/volume_group/%s/clone" % entity_id, payload={ "name": name, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to create clone %(clone_name)s for " "PowerStore %(entity)s with id %(entity_id)s.") % {"clone_name": name, "entity": entity, "entity_id": entity_id, }) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response["id"] def rename_volume(self, volume_id, name): r, response = self._send_patch_request( "/volume/%s" % volume_id, payload={ "name": name, } ) if r.status_code not in self.ok_codes: msg = (_("Failed to rename PowerStore volume with id %s.") % volume_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_array_version(self): r, response = self._send_get_request( "/software_installed", params={ "select": "release_version", "is_cluster": "eq.True" } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore array version.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return response[0].get("release_version") def get_volume_nguid(self, volume_id): r, response = self._send_get_request( "/volume/%s" % volume_id, params={ "select": "nguid", } ) if r.status_code not in self.ok_codes: msg = (_("Failed to query PowerStore volume with id %s.") % volume_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) nguid = response["nguid"].split('.')[1] return nguid def get_qos_policy_id_by_name(self, name): r, response = self._send_get_request( "/policy", params={ "name": "eq.%s" % name, "type": "eq.QoS", } ) if r.status_code not in self.ok_codes: msg = _("Failed to query PowerStore QoS policy " "with name %s." % name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if len(response) > 0: qos_policy_id = response[0].get("id") return qos_policy_id return None def create_qos_io_rule(self, io_rule_params): r, response = self._send_post_request( "/io_limit_rule", payload=io_rule_params ) if r.status_code not in self.ok_codes: msg = _("Failed to create PowerStore I/O limit " "rule %s." % io_rule_params["name"]) LOG.error(msg) if ("messages" in response and (response["messages"][0]["code"] == QOS_IO_RULE_EXISTS_ERROR or response["messages"][0]["code"] == QOS_UNEXPECTED_RESPONSE_ERROR)): raise ( powerstore_exception. DellPowerStoreQoSIORuleExists(name=io_rule_params["name"])) raise exception.VolumeBackendAPIException(data=msg) return response["id"] def create_qos_policy(self, policy_params): r, response = self._send_post_request( "/policy", payload=policy_params ) if r.status_code not in self.ok_codes: msg = _("Failed to create PowerStore QoS " "policy %s." % policy_params["name"]) LOG.error(msg) if ("messages" in response and (response["messages"][0]["code"] == QOS_POLICY_EXISTS_ERROR or response["messages"][0]["code"] == QOS_UNEXPECTED_RESPONSE_ERROR)): raise ( powerstore_exception. DellPowerStoreQoSPolicyExists(name=policy_params["name"])) raise exception.VolumeBackendAPIException(data=msg) return response["id"] def update_volume_with_qos_policy(self, provider_id, qos_policy_id): r, response = self._send_patch_request( "/volume/%s" % provider_id, payload={ "qos_performance_policy_id": qos_policy_id, } ) if r.status_code not in self.ok_codes: msg = _("Failed to update PowerStore volume %(volume_id)s with " "QoS policy %(policy_id)s." % {"volume_id": provider_id, "policy_id": qos_policy_id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def update_qos_io_rule(self, io_rule_name, io_rule_params): r, response = self._send_patch_request( "/io_limit_rule/name:%s" % io_rule_name, payload=io_rule_params ) if r.status_code not in self.ok_codes: msg = (_("Failed to update PowerStore I/O limit rule %s.") % io_rule_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerstore/driver.py0000664000175000017500000003036000000000000025144 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cinder driver for Dell EMC PowerStore.""" from oslo_config import cfg from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.dell_emc.powerstore import adapter from cinder.volume.drivers.dell_emc.powerstore import options from cinder.volume.drivers.dell_emc.powerstore import utils from cinder.volume.drivers.san import san from cinder.volume import manager POWERSTORE_OPTS = options.POWERSTORE_OPTS CONF = cfg.CONF CONF.register_opts(POWERSTORE_OPTS, group=configuration.SHARED_CONF_GROUP) LOG = logging.getLogger(__name__) @interface.volumedriver class PowerStoreDriver(driver.VolumeDriver): """Dell EMC PowerStore Driver. .. code-block:: none Version history: 1.0.0 - Initial version 1.0.1 - Add CHAP support 1.1.0 - Add volume replication v2.1 support 1.1.1 - Add Consistency Groups support 1.1.2 - Fix iSCSI targets not being returned from the REST API call if targets are used for multiple purposes (iSCSI target, Replication target, etc.) 1.2.0 - Add NVMe-OF support 1.2.1 - Report trim/discard support 1.2.2 - QoS (Quality of Service) support 1.2.3 - Added Cinder active/active support """ VERSION = "1.2.3" VENDOR = "Dell EMC" SUPPORTS_ACTIVE_ACTIVE = True # ThirdPartySystems wiki page CI_WIKI_NAME = "DellEMC_PowerStore_CI" def __init__(self, *args, **kwargs): super(PowerStoreDriver, self).__init__(*args, **kwargs) self.active_backend_id = kwargs.get("active_backend_id") self.adapters = {} self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(POWERSTORE_OPTS) self.replication_configured = False self.replication_devices = None def _init_vendor_properties(self): properties = {} self._set_property( properties, utils.POWERSTORE_PP_KEY, "PowerStore Protection Policy.", _("Specifies the PowerStore Protection Policy for a " "volume type. Protection Policy is assigned to a volume during " "creation."), "string" ) return properties, "powerstore" @staticmethod def get_driver_options(): return POWERSTORE_OPTS def do_setup(self, context): if not self.active_backend_id: self.active_backend_id = manager.VolumeManager.FAILBACK_SENTINEL storage_protocol = self.configuration.safe_get("storage_protocol") if self.configuration.safe_get(options.POWERSTORE_NVME): adapter_class = adapter.NVMEoFAdapter elif ( storage_protocol and storage_protocol.lower() == utils.PROTOCOL_FC.lower() ): adapter_class = adapter.FibreChannelAdapter else: adapter_class = adapter.iSCSIAdapter self.replication_devices = ( self.configuration.safe_get("replication_device") or [] ) self.adapters[manager.VolumeManager.FAILBACK_SENTINEL] = adapter_class( **self._get_device_configuration() ) for index, device in enumerate(self.replication_devices): self.adapters[device["backend_id"]] = adapter_class( **self._get_device_configuration(is_primary=False, device_index=index) ) def check_for_setup_error(self): if len(self.replication_devices) > 1: msg = _("PowerStore driver does not support more than one " "replication device.") raise exception.InvalidInput(reason=msg) self.replication_configured = True for adapter in self.adapters.values(): adapter.check_for_setup_error() def create_volume(self, volume): return self.adapter.create_volume(volume) def delete_volume(self, volume): if volume.is_replicated(): self.adapter.teardown_volume_replication(volume) self.adapter.delete_volume(volume) if not self.is_failed_over: for backend_id in self.failover_choices: self.adapters.get(backend_id).delete_volume(volume) else: self.adapter.delete_volume(volume) def extend_volume(self, volume, new_size): return self.adapter.extend_volume(volume, new_size) def create_snapshot(self, snapshot): return self.adapter.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.adapter.delete_snapshot(snapshot) if snapshot.volume.is_replicated() and not self.is_failed_over: for backend_id in self.failover_choices: self.adapters.get(backend_id).delete_snapshot(snapshot) def create_cloned_volume(self, volume, src_vref): return self.adapter.create_volume_from_source(volume, src_vref) def create_volume_from_snapshot(self, volume, snapshot): return self.adapter.create_volume_from_source(volume, snapshot) def initialize_connection(self, volume, connector, **kwargs): return self.adapter.initialize_connection(volume, connector, **kwargs) def terminate_connection(self, volume, connector, **kwargs): return self.adapter.terminate_connection(volume, connector, **kwargs) def revert_to_snapshot(self, context, volume, snapshot): return self.adapter.revert_to_snapshot(volume, snapshot) def _update_volume_stats(self): stats = self.adapter.update_volume_stats() stats["driver_version"] = self.VERSION stats["vendor_name"] = self.VENDOR stats["replication_enabled"] = self.replication_enabled stats["replication_targets"] = self.replication_targets self._stats = stats def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def failover_host(self, context, volumes, secondary_id=None, groups=None): active_backend_id, model_updates, group_update_list = ( self.failover(context, volumes, secondary_id, groups)) self.failover_completed(context, secondary_id) return active_backend_id, model_updates, group_update_list def failover(self, context, volumes, secondary_id=None, groups=None): """Like failover but for a host that is clustered.""" LOG.info("Invoking failover with target %s.", secondary_id) if secondary_id not in self.failover_choices: msg = (_("Target %(target)s is not a valid choice. " "Valid choices: %(choices)s.") % {"target": secondary_id, "choices": ', '.join(self.failover_choices)}) LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) is_failback = secondary_id == manager.VolumeManager.FAILBACK_SENTINEL self.active_backend_id = secondary_id volumes_updates, groups_updates = self.adapter.failover_host( volumes, groups, is_failback ) LOG.info("Failover host completed.") return secondary_id, volumes_updates, groups_updates def failover_completed(self, context, active_backend_id=None): """This method is called after failover for clustered backends.""" LOG.info("Invoking failover_completed with target %s.", active_backend_id) target_device = self.replication_devices[0]["backend_id"] if (not active_backend_id or active_backend_id == manager.VolumeManager.FAILBACK_SENTINEL): # failback operation self.active_backend_id = manager.VolumeManager.FAILBACK_SENTINEL elif (active_backend_id == target_device or active_backend_id == "failed over"): # failover operation self.active_backend_id = target_device else: choices = ['None', manager.VolumeManager.FAILBACK_SENTINEL, 'failed over', target_device] msg = (_("Target %(target)s is not a valid choice. " "Valid choices: %(choices)s.") % {"target": active_backend_id, "choices": ', '.join(choices)}) msg = f"Target {active_backend_id} is not valid." LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) LOG.info("Failover completion completed: " "active_backend_id = %s.", self.active_backend_id) def create_group(self, context, group): return self.adapter.create_group(group) def delete_group(self, context, group, volumes): return self.adapter.delete_group(group) def update_group(self, context, group, add_volumes=None, remove_volumes=None): return self.adapter.update_group(group, add_volumes, remove_volumes) def create_group_snapshot(self, context, group_snapshot, snapshots): return self.adapter.create_group_snapshot(group_snapshot) def delete_group_snapshot(self, context, group_snapshot, snapshots): return self.adapter.delete_group_snapshot(group_snapshot) def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): source = group_snapshot or source_group return self.adapter.create_group_from_source(group, volumes, source, snapshots, source_vols) @property def adapter(self): return self.adapters.get(self.active_backend_id) @property def failover_choices(self): return ( set(self.adapters.keys()).difference({self.active_backend_id}) ) @property def is_failed_over(self): return ( self.active_backend_id != manager.VolumeManager.FAILBACK_SENTINEL ) @property def replication_enabled(self): return self.replication_configured and not self.is_failed_over @property def replication_targets(self): if self.replication_enabled: return list(self.adapters.keys()) return [] def _get_device_configuration(self, is_primary=True, device_index=0): conf = {} if is_primary: get_value = self.configuration.safe_get backend_id = manager.VolumeManager.FAILBACK_SENTINEL else: get_value = self.replication_devices[device_index].get backend_id = get_value("backend_id") conf["backend_id"] = backend_id conf["backend_name"] = ( self.configuration.safe_get("volume_backend_name") or "powerstore" ) conf["ports"] = get_value(options.POWERSTORE_PORTS) or [] conf["rest_ip"] = get_value("san_ip") conf["rest_username"] = get_value("san_login") conf["rest_password"] = get_value("san_password") conf["verify_certificate"] = get_value("driver_ssl_cert_verify") conf["certificate_path"] = get_value("driver_ssl_cert_path") conf["rest_api_connect_timeout"] = ( self.configuration.safe_get(utils.POWERSTORE_REST_CONNECT_TIMEOUT)) conf["rest_api_read_timeout"] = ( self.configuration.safe_get(utils.POWERSTORE_REST_READ_TIMEOUT)) return conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerstore/exception.py0000664000175000017500000000156300000000000025652 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import exception from cinder.i18n import _ class DellPowerStoreQoSIORuleExists(exception.VolumeDriverException): message = _('QoS I/O Rule %(name)s already exists.') class DellPowerStoreQoSPolicyExists(exception.VolumeDriverException): message = _('QoS policy %(name)s already exists.') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerstore/nfs.py0000664000175000017500000002201100000000000024431 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import errno import os from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.volume import configuration from cinder.volume.drivers.nfs import nfs_opts from cinder.volume.drivers.nfs import NfsDriver LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(nfs_opts, group=configuration.SHARED_CONF_GROUP) class PowerStoreNFSDriverInitialization(NfsDriver): """Implementation of PowerStoreNFSDriver initialization. Added multiattach support option and checking that the required packages are installed. """ driver_volume_type = 'nfs' driver_prefix = 'nfs' volume_backend_name = 'PowerStore_NFS' VERSION = '1.0.0' VENDOR = 'Dell' # ThirdPartySystems wiki page CI_WIKI_NAME = "DellEMC_PowerStore_CI" def __init__(self, execute=putils.execute, *args, **kwargs): super(PowerStoreNFSDriverInitialization, self).__init__( execute, *args, **kwargs) self.multiattach_support = False def do_setup(self, context): super(PowerStoreNFSDriverInitialization, self).do_setup(context) self._check_multiattach_support() def _check_multiattach_support(self): """Enable multiattach support if nfs_qcow2_volumes disabled.""" self.multiattach_support = not self.configuration.nfs_qcow2_volumes if not self.multiattach_support: msg = _("Multi-attach feature won't work " "with qcow2 volumes enabled for nfs") LOG.warning(msg) def _check_package_is_installed(self, package): try: self._execute(package, check_exit_code=False, run_as_root=False) except OSError as exc: if exc.errno == errno.ENOENT: msg = _('%s is not installed') % package raise exception.VolumeDriverException(msg) else: raise def check_for_setup_error(self): self._check_package_is_installed('dellfcopy') def _update_volume_stats(self): super(PowerStoreNFSDriverInitialization, self)._update_volume_stats() self._stats["vendor_name"] = self.VENDOR self._stats['multiattach'] = self.multiattach_support @interface.volumedriver class PowerStoreNFSDriver(PowerStoreNFSDriverInitialization): """Dell PowerStore NFS Driver. .. code-block:: none Version history: 1.0.0 - Initial version """ @coordination.synchronized('{self.driver_prefix}-{volume[id]}') def delete_volume(self, volume): """Deletes a logical volume.""" if not volume.provider_location: LOG.warning("Volume %s does not have provider_location " "specified, skipping", volume.name) return self._ensure_share_mounted(volume.provider_location) info_path = self._local_path_volume_info(volume) info = self._read_info_file(info_path, empty_if_missing=True) if info: base_snap_path = os.path.join(self._local_volume_dir(volume), info['active']) self._delete(info_path) self._delete(base_snap_path) base_volume_path = self._local_path_volume(volume) self._delete(base_volume_path) def extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" if self._is_volume_attached(volume): msg = (_("Cannot extend volume %s while it is attached") % volume.name_id) raise exception.ExtendVolumeError(msg) LOG.info("Extending volume %s.", volume.name_id) extend_by = int(new_size) - volume.size if not self._is_share_eligible(volume.provider_location, extend_by): raise exception.ExtendVolumeError( reason="Insufficient space to extend " "volume %s to %sG" % (volume.name_id, new_size) ) path = self.local_path(volume) info = self._qemu_img_info(path, volume.name) backing_fmt = info.file_format if backing_fmt not in ['raw', 'qcow2']: msg = _("Unrecognized backing format: %s") % backing_fmt raise exception.InvalidVolume(msg) image_utils.resize_image(path, new_size) def _do_fast_clone_file(self, volume_path, new_volume_path): """Fast clone a file using a dellfcopy package.""" command = ['dellfcopy', '-o', 'fastclone', '-s', volume_path, '-d', new_volume_path, '-v', '1'] try: LOG.info('Cloning file from %s to %s', volume_path, new_volume_path) self._execute(*command, run_as_root=self._execute_as_root) LOG.info('Cloning volume: %s succeeded', volume_path) except putils.ProcessExecutionError: raise def _create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug("Creating volume %(vol)s from snapshot %(snap)s", {'vol': volume.name_id, 'snap': snapshot.id}) volume.provider_location = self._find_share(volume) self._copy_volume_from_snapshot(snapshot, volume, volume.size) return {'provider_location': volume.provider_location} def _copy_volume_from_snapshot(self, snapshot, volume, volume_size, src_encryption_key_id=None, new_encryption_key_id=None): """Copy snapshot to destination volume.""" LOG.debug("snapshot: %(snap)s, volume: %(vol)s, ", {'snap': snapshot.id, 'vol': volume.id, 'size': volume_size}) info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path) vol_path = self._local_volume_dir(snapshot.volume) forward_file = snap_info[snapshot.id] forward_path = os.path.join(vol_path, forward_file) img_info = self._qemu_img_info(forward_path, snapshot.volume.name) path_to_snap_img = os.path.join(vol_path, img_info.backing_file) path_to_new_vol = self._local_path_volume(volume) if img_info.backing_file_format == 'raw': image_utils.convert_image(path_to_snap_img, path_to_new_vol, img_info.backing_file_format, run_as_root=self._execute_as_root) else: self._do_fast_clone_file(path_to_snap_img, path_to_new_vol) command = ['qemu-img', 'rebase', '-b', "", '-F', img_info.backing_file_format, path_to_new_vol] self._execute(*command, run_as_root=self._execute_as_root) self._set_rw_permissions(path_to_new_vol) def _create_cloned_volume(self, volume, src_vref, context): """Clone src volume to destination volume.""" LOG.debug('Cloning volume %(src)s to volume %(dst)s', {'src': src_vref.id, 'dst': volume.id}) volume_name = CONF.volume_name_template % volume.name_id vol_attrs = ['provider_location', 'size', 'id', 'name', 'status', 'volume_type', 'metadata', 'obj_context'] Volume = collections.namedtuple('Volume', vol_attrs) volume_info = Volume(provider_location=src_vref.provider_location, size=src_vref.size, id=volume.name_id, name=volume_name, status=src_vref.status, volume_type=src_vref.volume_type, metadata=src_vref.metadata, obj_context=volume.obj_context) src_volue_path = self._local_path_volume(src_vref) dst_volume = self._local_path_volume(volume_info) self._do_fast_clone_file(src_volue_path, dst_volume) if src_vref.admin_metadata and 'format' in src_vref.admin_metadata: volume.admin_metadata['format'] = ( src_vref.admin_metadata['format']) with volume.obj_as_admin(): volume.save() return {'provider_location': src_vref.provider_location} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerstore/options.py0000664000175000017500000000456600000000000025355 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Configuration options for Dell EMC PowerStore Cinder driver.""" from oslo_config import cfg from cinder.volume.drivers.dell_emc.powerstore import utils as store_utils POWERSTORE_APPLIANCES = "powerstore_appliances" POWERSTORE_PORTS = "powerstore_ports" POWERSTORE_NVME = "powerstore_nvme" POWERSTORE_OPTS = [ cfg.ListOpt(POWERSTORE_APPLIANCES, default=[], help="Appliances names. Comma separated list of PowerStore " "appliances names used to provision volumes.", deprecated_for_removal=True, deprecated_reason="Is not used anymore. " "PowerStore Load Balancer is used to " "provision volumes instead.", deprecated_since="Wallaby"), cfg.ListOpt(POWERSTORE_PORTS, default=[], help="Allowed ports. Comma separated list of PowerStore " "iSCSI IPs or FC WWNs (ex. 58:cc:f0:98:49:22:07:02) " "to be used. If option is not set all ports are allowed." ), cfg.BoolOpt(POWERSTORE_NVME, default=False, help="Connect PowerStore volumes using NVMe-OF."), cfg.IntOpt(store_utils.POWERSTORE_REST_CONNECT_TIMEOUT, default=30, min=1, help='Use this value to specify the connect ' 'timeout value (in seconds) for REST API calls ' 'to the PowerStore backend.'), cfg.IntOpt(store_utils.POWERSTORE_REST_READ_TIMEOUT, default=30, min=1, help='Use this value to specify the read ' 'timeout value (in seconds) for REST API calls ' 'to the PowerStore backend.') ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powerstore/utils.py0000664000175000017500000001267500000000000025022 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for Dell EMC PowerStore Cinder driver.""" import functools import re from oslo_log import log as logging from oslo_utils import units from packaging import version from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.volume import volume_utils LOG = logging.getLogger(__name__) CHAP_DEFAULT_USERNAME = "PowerStore_iSCSI_CHAP_Username" CHAP_DEFAULT_SECRET_LENGTH = 60 PROTOCOL_FC = constants.FC PROTOCOL_ISCSI = constants.ISCSI PROTOCOL_NVME = "NVMe" POWERSTORE_PP_KEY = "powerstore:protection_policy" VOLUME_ATTACH_OPERATION = 1 VOLUME_DETACH_OPERATION = 2 POWERSTORE_REST_CONNECT_TIMEOUT = "rest_api_call_connect_timeout" POWERSTORE_REST_READ_TIMEOUT = "rest_api_call_read_timeout" def bytes_to_gib(size_in_bytes): """Convert size in bytes to GiB. :param size_in_bytes: size in bytes :return: size in GiB """ return size_in_bytes // units.Gi def gib_to_bytes(size_in_gb): """Convert size in GiB to bytes. :param size_in_gb: size in GiB :return: size in bytes """ return size_in_gb * units.Gi def extract_fc_wwpns(connector): """Convert connector FC ports to appropriate format with colons. :param connector: connection properties :return: FC ports in appropriate format with colons """ if "wwnns" not in connector or "wwpns" not in connector: msg = _("Host %s does not have FC initiators.") % connector["host"] LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return [":".join(re.findall("..", wwpn)) for wwpn in connector["wwpns"]] def fc_wwn_to_string(wwn): """Convert FC WWN to string without colons. :param wwn: FC WWN :return: FC WWN without colons """ return wwn.replace(":", "") def iscsi_portal_with_port(address): """Add default port 3260 to iSCSI portal :param address: iSCSI portal without port :return: iSCSI portal with default port 3260 """ return "%(address)s:3260" % {"address": address} def powerstore_host_name(connector, protocol): """Generate PowerStore host name for connector. :param connector: connection properties :param protocol: storage protocol (FC, iSCSI or NVMe) :return: unique host name """ return ("%(host)s-%(protocol)s" % {"host": connector["host"], "protocol": protocol, }) def filter_hosts_by_initiators(hosts, initiators): """Filter hosts by given list of initiators. :param hosts: list of PowerStore host objects :param initiators: list of initiators :return: PowerStore hosts list """ hosts_names_found = set() for host in hosts: for initiator in host["host_initiators"]: if initiator["port_name"] in initiators: hosts_names_found.add(host["name"]) return list(filter(lambda host: host["name"] in hosts_names_found, hosts)) def is_multiattached_to_host(volume_attachment, host_name): """Check if volume is attached to multiple instances on one host. When multiattach is enabled, a volume could be attached to two or more instances which are hosted on one nova host. Because PowerStore cannot recognize the volume is attached to two or more instances, we should keep the volume attached to the nova host until the volume is detached from the last instance. :param volume_attachment: list of VolumeAttachment objects :param host_name: OpenStack host name :return: multiattach flag """ if not volume_attachment: return False attachments = [ attachment for attachment in volume_attachment if (attachment.attach_status == fields.VolumeAttachStatus.ATTACHED and attachment.attached_host == host_name) ] return len(attachments) > 1 def get_chap_credentials(): """Generate CHAP credentials. :return: CHAP username and secret """ return { "chap_single_username": CHAP_DEFAULT_USERNAME, "chap_single_password": volume_utils.generate_password( CHAP_DEFAULT_SECRET_LENGTH ) } def get_protection_policy_from_volume(volume): """Get PowerStore Protection policy name from volume type. :param volume: OpenStack Volume object :return: Protection policy name """ return volume.volume_type.extra_specs.get(POWERSTORE_PP_KEY) def is_group_a_cg_snapshot_type(func): """Check if group is a consistent snapshot group. Fallback to generic volume group implementation if consistent group snapshot is not enabled. """ @functools.wraps(func) def inner(self, *args, **kwargs): if not volume_utils.is_group_a_cg_snapshot_type(args[0]): raise NotImplementedError return func(self, *args, **kwargs) return inner def version_gte(ver1, ver2): return version.parse(ver1) >= version.parse(ver2) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3391206 cinder-27.0.0/cinder/volume/drivers/dell_emc/powervault/0000775000175000017500000000000000000000000023274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powervault/__init__.py0000664000175000017500000000000000000000000025373 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powervault/client.py0000664000175000017500000000175100000000000025130 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # Copyright 2016-2020 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import cinder.volume.drivers.stx.client as client class PVMEClient(client.STXClient): def __init__(self, host, login, password, protocol, ssl_verify): super(PVMEClient, self).__init__(host, login, password, protocol, ssl_verify) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powervault/common.py0000664000175000017500000000477200000000000025150 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # Copyright 2016-2020 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from cinder.volume import configuration from cinder.volume import driver import cinder.volume.drivers.dell_emc.powervault.client as pvme_client import cinder.volume.drivers.stx.common as common common_opts = [ cfg.StrOpt('pvme_pool_name', default='A', help="Pool or Vdisk name to use for volume creation."), ] iscsi_opts = [ cfg.ListOpt('pvme_iscsi_ips', default=[], help="List of comma-separated target iSCSI IP addresses."), ] CONF = cfg.CONF CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(iscsi_opts, group=configuration.SHARED_CONF_GROUP) class PVMECommon(common.STXCommon): VERSION = "2.0" def __init__(self, config): self.config = config self.vendor_name = "PVME" self.backend_name = self.config.pvme_pool_name self.backend_type = 'virtual' self.api_protocol = 'http' if self.config.driver_use_ssl: self.api_protocol = 'https' ssl_verify = self.config.driver_ssl_cert_verify if ssl_verify and self.config.driver_ssl_cert_path: ssl_verify = self.config.driver_ssl_cert_path self.client = pvme_client.PVMEClient(self.config.san_ip, self.config.san_login, self.config.san_password, self.api_protocol, ssl_verify) @staticmethod def get_driver_options(): additional_opts = driver.BaseVD._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'driver_use_ssl', 'driver_ssl_cert_verify', 'driver_ssl_cert_path') return common_opts + additional_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powervault/fc.py0000664000175000017500000000304500000000000024240 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 Dot Hill Systems Corp. # Copyright 2016-2020 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder import interface import cinder.volume.drivers.dell_emc.powervault.common as pvme_common import cinder.volume.drivers.stx.fc as fc @interface.volumedriver class PVMEFCDriver(fc.STXFCDriver): """Cinder FC driver for Dell EMC PowerVault ME-Series arrays. .. code-block:: default Version history: 1.0 - Inheriting from Seagate Cinder driver. """ VERSION = "2.0" CI_WIKI_NAME = "DellEMC_PowerVault_ME_CI" SUPPORTED = True def __init__(self, *args, **kwargs): super(PVMEFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(pvme_common.common_opts) @staticmethod def get_driver_options(): return pvme_common.PVMECommon.get_driver_options() def _init_common(self): return pvme_common.PVMECommon(self.configuration) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/powervault/iscsi.py0000664000175000017500000000335000000000000024761 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 Dot Hill Systems Corp. # Copyright 2016-2020 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder import interface import cinder.volume.drivers.dell_emc.powervault.common as pvme_common import cinder.volume.drivers.stx.iscsi as iscsi @interface.volumedriver class PVMEISCSIDriver(iscsi.STXISCSIDriver): """Cinder iSCSI driver for Dell EMC PowerVault ME-Series arrays. .. code-block:: default Version history: 1.0 - Inheriting from Seagate Cinder driver. """ VERSION = "2.0" CI_WIKI_NAME = "DellEMC_PowerVault_ME_CI" SUPPORTED = True def __init__(self, *args, **kwargs): super(PVMEISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(pvme_common.common_opts) self.configuration.append_config_values(pvme_common.iscsi_opts) self.iscsi_ips = self.configuration.pvme_iscsi_ips @staticmethod def get_driver_options(): return (pvme_common.PVMECommon.get_driver_options() + pvme_common.iscsi_opts) def _init_common(self): return pvme_common.PVMECommon(self.configuration) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3431208 cinder-27.0.0/cinder/volume/drivers/dell_emc/sc/0000775000175000017500000000000000000000000021471 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/sc/__init__.py0000664000175000017500000000000000000000000023570 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/sc/storagecenter_api.py0000664000175000017500000045763200000000000025562 0ustar00zuulzuul00000000000000# Copyright (c) 2015-2017 Dell Inc, or its subsidiaries. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Interface for interacting with the Dell Storage Center array.""" import http.client as http_client import json import os.path import uuid import eventlet from oslo_log import log as logging from oslo_utils import excutils import requests from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import utils LOG = logging.getLogger(__name__) class DellDriverRetryableException(exception.VolumeBackendAPIException): message = _("Retryable Dell Exception encountered") class PayloadFilter(object): """PayloadFilter Simple class for creating filters for interacting with the Dell Storage API 15.3 and later. """ def __init__(self, filtertype='AND'): self.payload = {} self.payload['filter'] = {'filterType': filtertype, 'filters': []} def append(self, name, val, filtertype='Equals'): if val is not None: apifilter = {} apifilter['attributeName'] = name apifilter['attributeValue'] = val apifilter['filterType'] = filtertype self.payload['filter']['filters'].append(apifilter) class LegacyPayloadFilter(object): """LegacyPayloadFilter Simple class for creating filters for interacting with the Dell Storage API 15.1 and 15.2. """ def __init__(self, filter_type='AND'): self.payload = {'filterType': filter_type, 'filters': []} def append(self, name, val, filtertype='Equals'): if val is not None: apifilter = {} apifilter['attributeName'] = name apifilter['attributeValue'] = val apifilter['filterType'] = filtertype self.payload['filters'].append(apifilter) class HttpClient(object): """HttpClient Helper for making the REST calls. """ def __init__(self, host, port, user, password, verify, asynctimeout, synctimeout, apiversion): """HttpClient handles the REST requests. :param host: IP address of the Dell Data Collector. :param port: Port the Data Collector is listening on. :param user: User account to login with. :param password: Password. :param verify: Boolean indicating whether certificate verification should be turned on or not. :param asynctimeout: async REST call time out. :param synctimeout: sync REST call time out. :param apiversion: Dell API version. """ self.baseUrl = 'https://%s:%s/' % (host, port) self.session = requests.Session() self.session.auth = (user, password) self.header = {} self.header['Content-Type'] = 'application/json; charset=utf-8' self.header['Accept'] = 'application/json' self.header['x-dell-api-version'] = apiversion self.verify = verify self.asynctimeout = asynctimeout self.synctimeout = synctimeout # Verify is a configurable option. So if this is false do not # spam the c-vol log. if not verify: requests.packages.urllib3.disable_warnings() def __enter__(self): return self def __exit__(self, type, value, traceback): self.session.close() def __formatUrl(self, url): baseurl = self.baseUrl # Some url sources have api/rest and some don't. Handle. if 'api/rest' not in url: baseurl += 'api/rest/' return '%s%s' % (baseurl, url if url[0] != '/' else url[1:]) def _get_header(self, header_async): if header_async: header = self.header.copy() header['async'] = 'True' return header return self.header def _get_async_url(self, asyncTask): """Handle a bug in SC API that gives a full url.""" try: # strip off the https. url = asyncTask.get('returnValue').split( 'https://')[1].split('/', 1)[1] except IndexError: url = asyncTask.get('returnValue') except AttributeError: LOG.debug('_get_async_url: Attribute Error. (%r)', asyncTask) url = 'api/rest/ApiConnection/AsyncTask/' # Blank URL if not url: LOG.debug('_get_async_url: No URL. (%r)', asyncTask) url = 'api/rest/ApiConnection/AsyncTask/' # Check for incomplete url error case. if url.endswith('/'): # Try to fix. id = asyncTask.get('instanceId') if id: # We have an id so note the error and add the id. LOG.debug('_get_async_url: url format error. (%r)', asyncTask) url = url + id else: # No hope. LOG.error('_get_async_url: Bogus return async task %r', asyncTask) raise exception.VolumeBackendAPIException( message=_('_get_async_url: Invalid URL.')) # Check for an odd error case if url.startswith('<') and url.endswith('>'): LOG.error('_get_async_url: Malformed URL (XML returned). (%r)', asyncTask) raise exception.VolumeBackendAPIException( message=_('_get_async_url: Malformed URL.')) return url def _wait_for_async_complete(self, asyncTask): url = self._get_async_url(asyncTask) while True and url: try: r = self.get(url) # We can leave this loop for a variety of reasons. # Nothing returned. # r.content blanks. # Object returned switches to one without objectType or with # a different objectType. if not SCApi._check_result(r): LOG.debug('Async error:\n' '\tstatus_code: %(code)s\n' '\ttext: %(text)s\n', {'code': r.status_code, 'text': r.text}) else: # In theory we have a good run. if r.content: content = r.json() if content.get('objectType') == 'AsyncTask': url = self._get_async_url(content) eventlet.sleep(1) continue else: LOG.debug('Async debug: r.content is None') return r except Exception: methodname = asyncTask.get('methodName') objectTypeName = asyncTask.get('objectTypeName') msg = (_('Async error: Unable to retrieve %(obj)s ' 'method %(method)s result') % {'obj': objectTypeName, 'method': methodname}) raise exception.VolumeBackendAPIException(message=msg) # Shouldn't really be able to get here. LOG.debug('_wait_for_async_complete: Error asyncTask: %r', asyncTask) return None def _rest_ret(self, rest_response, async_call): # If we made an async call and it was accepted # we wait for our response. if async_call: if rest_response.status_code == http_client.ACCEPTED: asyncTask = rest_response.json() return self._wait_for_async_complete(asyncTask) else: LOG.debug('REST Async error command not accepted:\n' '\tUrl: %(url)s\n' '\tCode: %(code)d\n' '\tReason: %(reason)s\n', {'url': rest_response.url, 'code': rest_response.status_code, 'reason': rest_response.reason}) msg = _('REST Async Error: Command not accepted.') raise exception.VolumeBackendAPIException(message=msg) return rest_response @utils.retry(retry_param=(requests.ConnectionError, DellDriverRetryableException)) def get(self, url): LOG.debug('get: %(url)s', {'url': url}) rest_response = self.session.get(self.__formatUrl(url), headers=self.header, verify=self.verify, timeout=self.synctimeout) if (rest_response and rest_response.status_code == ( http_client.BAD_REQUEST)) and ( 'Unhandled Exception' in rest_response.text): raise DellDriverRetryableException() return rest_response @utils.retry(retry_param=(requests.ConnectionError,)) def post(self, url, payload, async_call=False): LOG.debug('post: %(url)s data: %(payload)s', {'url': url, 'payload': payload}) return self._rest_ret(self.session.post( self.__formatUrl(url), data=json.dumps(payload, ensure_ascii=False).encode('utf-8'), headers=self._get_header(async_call), verify=self.verify, timeout=( self.asynctimeout if async_call else self.synctimeout)), async_call) @utils.retry(retry_param=(requests.ConnectionError,)) def put(self, url, payload, async_call=False): LOG.debug('put: %(url)s data: %(payload)s', {'url': url, 'payload': payload}) return self._rest_ret(self.session.put( self.__formatUrl(url), data=json.dumps(payload, ensure_ascii=False).encode('utf-8'), headers=self._get_header(async_call), verify=self.verify, timeout=( self.asynctimeout if async_call else self.synctimeout)), async_call) @utils.retry(retry_param=(requests.ConnectionError,)) def delete(self, url, payload=None, async_call=False): LOG.debug('delete: %(url)s data: %(payload)s', {'url': url, 'payload': payload}) named = {'headers': self._get_header(async_call), 'verify': self.verify, 'timeout': ( self.asynctimeout if async_call else self.synctimeout)} if payload: named['data'] = json.dumps( payload, ensure_ascii=False).encode('utf-8') return self._rest_ret( self.session.delete(self.__formatUrl(url), **named), async_call) class SCApiHelper(object): """SCApiHelper Helper class for API access. Handles opening and closing the connection to the Dell REST API. """ def __init__(self, config, active_backend_id, storage_protocol): self.config = config # Now that active_backend_id is set on failover. # Use that if set. Mark the backend as failed over. self.active_backend_id = active_backend_id self.primaryssn = self.config.dell_sc_ssn self.storage_protocol = storage_protocol self.san_ip = self.config.san_ip self.san_login = self.config.san_login self.san_password = self.config.san_password self.san_port = self.config.dell_sc_api_port self.apiversion = '2.0' def _swap_credentials(self): """Change out to our secondary credentials Or back to our primary creds. :return: True if swapped. False if no alt credentials supplied. """ if self.san_ip == self.config.san_ip: # Do we have a secondary IP and credentials? if (self.config.secondary_san_ip and self.config.secondary_san_login and self.config.secondary_san_password): self.san_ip = self.config.secondary_san_ip self.san_login = self.config.secondary_san_login self.san_password = self.config.secondary_san_password else: LOG.info('Swapping DSM credentials: Secondary DSM ' 'credentials are not set or are incomplete.') # Cannot swap. return False # Odds on this hasn't changed so no need to make setting this a # requirement. if self.config.secondary_sc_api_port: self.san_port = self.config.secondary_sc_api_port else: # These have to be set. self.san_ip = self.config.san_ip self.san_login = self.config.san_login self.san_password = self.config.san_password self.san_port = self.config.dell_sc_api_port LOG.info('Swapping DSM credentials: New DSM IP is %r.', self.san_ip) return True def _setup_connection(self): """Attempts to open a connection to the storage center.""" connection = SCApi(self.san_ip, self.san_port, self.san_login, self.san_password, self.config.dell_sc_verify_cert, self.config.dell_api_async_rest_timeout, self.config.dell_api_sync_rest_timeout, self.apiversion) # This instance is for a single backend. That backend has a # few items of information we should save rather than passing them # about. connection.vfname = self.config.dell_sc_volume_folder connection.sfname = self.config.dell_sc_server_folder connection.excluded_domain_ips = self.config.excluded_domain_ips connection.included_domain_ips = self.config.included_domain_ips if self.config.excluded_domain_ip: LOG.info("Using excluded_domain_ip for " "excluding domain IPs is deprecated in the " "Stein release of OpenStack. Please use the " "excluded_domain_ips configuration option.") connection.excluded_domain_ips += self.config.excluded_domain_ip # Remove duplicates connection.excluded_domain_ips = list(set( connection.excluded_domain_ips)) # Our primary SSN doesn't change connection.primaryssn = self.primaryssn if self.storage_protocol == constants.FC: connection.protocol = 'FibreChannel' # Set appropriate ssn and failover state. if self.active_backend_id: # active_backend_id is a string. Convert to int. connection.ssn = int(self.active_backend_id) else: connection.ssn = self.primaryssn # Make the actual connection to the DSM. connection.open_connection() return connection def open_connection(self): """Creates the SCApi object. :return: SCApi object. :raises VolumeBackendAPIException: """ connection = None LOG.info('open_connection to %(ssn)s at %(ip)s', {'ssn': self.primaryssn, 'ip': self.san_ip}) if self.primaryssn: try: """Open connection to REST API.""" connection = self._setup_connection() except Exception: # If we have credentials to swap to we try it here. if self._swap_credentials(): connection = self._setup_connection() else: with excutils.save_and_reraise_exception(): LOG.error('Failed to connect to the API. ' 'No backup DSM provided.') # Save our api version for next time. if self.apiversion != connection.apiversion: LOG.info('open_connection: Updating API version to %s', connection.apiversion) self.apiversion = connection.apiversion else: raise exception.VolumeBackendAPIException( data=_('Configuration error: dell_sc_ssn not set.')) return connection class SCApi(object): """SCApi Handles calls to Dell SC and EM via the REST API interface. Version history: 1.0.0 - Initial driver 1.1.0 - Added extra spec support for Storage Profile selection 1.2.0 - Added consistency group support. 2.0.0 - Switched to inheriting functional objects rather than volume driver. 2.1.0 - Added support for ManageableVD. 2.2.0 - Added API 2.2 support. 2.3.0 - Added Legacy Port Mode Support 2.3.1 - Updated error handling. 2.4.0 - Added Replication V2 support. 2.4.1 - Updated Replication support to V2.1. 2.5.0 - ManageableSnapshotsVD implemented. 3.0.0 - ProviderID utilized. 3.1.0 - Failback supported. 3.2.0 - Live Volume support. 3.3.0 - Support for a secondary DSM. 3.4.0 - Support for excluding a domain. 3.5.0 - Support for AFO. 3.6.0 - Server type support. 3.7.0 - Support for Data Reduction, Group QOS and Volume QOS. 4.0.0 - Driver moved to dell_emc. 4.1.0 - Timeouts added to rest calls. 4.1.1 - excluded_domain_ips support. 4.1.2 - included_domain_ips support. """ APIDRIVERVERSION = '4.1.2' def __init__(self, host, port, user, password, verify, asynctimeout, synctimeout, apiversion): """This creates a connection to Dell SC or EM. :param host: IP address of the REST interface.. :param port: Port the REST interface is listening on. :param user: User account to login with. :param password: Password. :param verify: Boolean indicating whether certificate verification should be turned on or not. :param asynctimeout: async REST call time out. :param synctimeout: sync REST call time out. :param apiversion: Version used on login. """ self.notes = 'Created by Dell EMC Cinder Driver' self.repl_prefix = 'Cinder repl of ' self.ssn = None # primaryssn is the ssn of the SC we are configured to use. This # doesn't change in the case of a failover. self.primaryssn = None self.failed_over = False self.vfname = 'openstack' self.sfname = 'openstack' self.excluded_domain_ips = [] self.included_domain_ips = [] self.legacypayloadfilters = False self.consisgroups = True self.protocol = 'Iscsi' self.apiversion = apiversion self.legacyfoldernames = True # Nothing other than Replication should care if we are direct connect # or not. self.is_direct_connect = False self.client = HttpClient(host, port, user, password, verify, asynctimeout, synctimeout, apiversion) def __enter__(self): return self def __exit__(self, type, value, traceback): self.close_connection() @staticmethod def _check_result(rest_response): """Checks and logs API responses. :param rest_response: The result from a REST API call. :returns: ``True`` if success, ``False`` otherwise. """ if rest_response is not None: if http_client.OK <= rest_response.status_code < ( http_client.MULTIPLE_CHOICES): # API call was a normal success return True # Some versions return this as a dict. try: response_json = rest_response.json() response_text = response_json.text['result'] except Exception: # We do not care why that failed. Just use the text. response_text = rest_response.text LOG.debug('REST call result:\n' '\tUrl: %(url)s\n' '\tCode: %(code)d\n' '\tReason: %(reason)s\n' '\tText: %(text)s', {'url': rest_response.url, 'code': rest_response.status_code, 'reason': rest_response.reason, 'text': response_text}) else: LOG.warning('Failed to get REST call result.') return False @staticmethod def _path_to_array(path): """Breaks a path into a reversed string array. :param path: Path to a folder on the Storage Center. :return: A reversed array of each path element. """ array = [] while True: (path, tail) = os.path.split(path) if tail == '': array.reverse() return array array.append(tail) def _first_result(self, blob): """Get the first result from the JSON return value. :param blob: Full return from a REST call. :return: The JSON encoded dict or the first item in a JSON encoded list. """ return self._get_result(blob, None, None) def _get_result(self, blob, attribute, value): """Find the result specified by attribute and value. If the JSON blob is a list then it will be searched for the attribute and value combination. If attribute and value are not specified then the first item is returned. If the JSON blob is a dict then it will be returned so long as the dict matches the attribute and value combination or attribute is None. :param blob: The REST call's JSON response. Can be a list or dict. :param attribute: The attribute we are looking for. If it is None the first item in the list, or the dict, is returned. :param value: The attribute value we are looking for. If the attribute is None this value is ignored. :returns: The JSON content in blob, the dict specified by matching the attribute and value or None. """ rsp = None content = self._get_json(blob) if content is not None: # We can get a list or a dict or nothing if isinstance(content, list): for r in content: if attribute is None or r.get(attribute) == value: rsp = r break elif isinstance(content, dict): if attribute is None or content.get(attribute) == value: rsp = content elif attribute is None: rsp = content if rsp is None: LOG.debug('Unable to find result where %(attr)s is %(val)s', {'attr': attribute, 'val': value}) LOG.debug('Blob was %(blob)s', {'blob': blob.text}) return rsp def _get_json(self, blob): """Returns a dict from the JSON of a REST response. :param blob: The response from a REST call. :returns: JSON or None on error. """ try: return blob.json() except AttributeError: LOG.error('Error invalid json: %s', blob) except TypeError as ex: LOG.error('Error TypeError. %s', ex) except ValueError as ex: LOG.error('JSON decoding error. %s', ex) # We are here so this went poorly. Log our blob. LOG.debug('_get_json blob %s', blob) return None def _get_id(self, blob): """Returns the instanceId from a Dell REST object. :param blob: A Dell SC REST call's response. :returns: The instanceId from the Dell SC object or None on error. """ try: if isinstance(blob, dict): return blob.get('instanceId') except AttributeError: LOG.error('Invalid API object: %s', blob) except TypeError as ex: LOG.error('Error TypeError. %s', ex) except ValueError as ex: LOG.error('JSON decoding error. %s', ex) LOG.debug('_get_id failed: blob %s', blob) return None def _get_payload_filter(self, filterType='AND'): # 2.1 or earlier and we are talking LegacyPayloadFilters. if self.legacypayloadfilters: return LegacyPayloadFilter(filterType) return PayloadFilter(filterType) def _check_version_fail(self, payload, response): try: # Is it even our error? result = self._get_json(response).get('result') if result and result.startswith( 'Invalid API version specified, ' 'the version must be in the range ['): # We're looking for something very specific. The except # will catch any errors. # Update our version and update our header. self.apiversion = response.text.split('[')[1].split(',')[0] self.client.header['x-dell-api-version'] = self.apiversion LOG.debug('API version updated to %s', self.apiversion) # Give login another go. r = self.client.post('ApiConnection/Login', payload) return r except Exception: # We don't care what failed. The clues are already in the logs. # Just log a parsing error and move on. LOG.error('_check_version_fail: Parsing error.') # Just eat this if it isn't a version error. return response def open_connection(self): """Authenticate with Dell REST interface. :raises VolumeBackendAPIException.: """ # Set our fo state. self.failed_over = (self.primaryssn != self.ssn) # Login payload = {} payload['Application'] = 'Cinder REST Driver' payload['ApplicationVersion'] = self.APIDRIVERVERSION r = self.client.post('ApiConnection/Login', payload) if not self._check_result(r): # SC requires a specific version. See if we can get it. r = self._check_version_fail(payload, r) # Either we tried to login and have a new result or we are # just checking the same result. Either way raise on fail. if not self._check_result(r): raise exception.VolumeBackendAPIException( data=_('Failed to connect to Dell REST API')) # We should be logged in. Try to grab the api version out of the # response. try: apidict = self._get_json(r) version = apidict['apiVersion'] self.is_direct_connect = apidict['provider'] == 'StorageCenter' splitver = version.split('.') if splitver[0] == '2': if splitver[1] == '0': self.consisgroups = False self.legacypayloadfilters = True elif splitver[1] == '1': self.legacypayloadfilters = True self.legacyfoldernames = (splitver[0] < '4') except Exception: # Good return but not the login response we were expecting. # Log it and error out. LOG.error('Unrecognized Login Response: %s', r) def close_connection(self): """Logout of Dell REST API.""" r = self.client.post('ApiConnection/Logout', {}) # 204 expected. self._check_result(r) self.client = None def _use_provider_id(self, provider_id): """See if our provider_id points at our current backend. provider_id is instanceId. The instanceId contains the ssn of the StorageCenter it is hosted on. This must equal our current ssn or it isn't valid. :param provider_id: Provider_id from an volume or snapshot object. :returns: True/False """ ret = False if provider_id: try: if provider_id.split('.')[0] == str(self.ssn): ret = True else: LOG.debug('_use_provider_id: provider_id ' '%(pid)r not valid on %(ssn)r', {'pid': provider_id, 'ssn': self.ssn}) except Exception: LOG.error('_use_provider_id: provider_id %s is invalid!', provider_id) return ret def find_sc(self, ssn=-1): """Check that the SC is there and being managed by EM. :returns: The SC SSN. :raises VolumeBackendAPIException: """ # We might be looking for another ssn. If not then # look for our default. ssn = self._vet_ssn(ssn) r = self.client.get('StorageCenter/StorageCenter') result = self._get_result(r, 'scSerialNumber', ssn) if result is None: LOG.error('Failed to find %(s)s. Result %(r)s', {'s': ssn, 'r': r}) raise exception.VolumeBackendAPIException( data=_('Failed to find Storage Center')) return self._get_id(result) # Folder functions def _create_folder(self, url, parent, folder, ssn=-1): """Creates folder under parent. This can create both to server and volume folders. The REST url sent in defines the folder type being created on the Dell Storage Center backend. :param url: This is the Dell SC rest url for creating the specific (server or volume) folder type. :param parent: The instance ID of this folder's parent folder. :param folder: The folder name to be created. This is one level deep. :returns: The REST folder object. """ ssn = self._vet_ssn(ssn) scfolder = None payload = {} payload['Name'] = folder payload['StorageCenter'] = ssn if parent != '': payload['Parent'] = parent payload['Notes'] = self.notes r = self.client.post(url, payload, True) if self._check_result(r): scfolder = self._first_result(r) return scfolder def _create_folder_path(self, url, foldername, ssn=-1): """Creates a folder path from a fully qualified name. The REST url sent in defines the folder type being created on the Dell Storage Center backend. Thus this is generic to server and volume folders. :param url: This is the Dell SC REST url for creating the specific (server or volume) folder type. :param foldername: The full folder name with path. :returns: The REST folder object. """ ssn = self._vet_ssn(ssn) path = self._path_to_array(foldername) folderpath = '' instanceId = '' # Technically the first folder is the root so that is already created. found = True scfolder = None for folder in path: folderpath = folderpath + folder # If the last was found see if this part of the path exists too if found: listurl = url + '/GetList' scfolder = self._find_folder(listurl, folderpath, ssn) if scfolder is None: found = False # We didn't find it so create it if found is False: scfolder = self._create_folder(url, instanceId, folder, ssn) # If we haven't found a folder or created it then leave if scfolder is None: LOG.error('Unable to create folder path %s', folderpath) break # Next part of the path will need this instanceId = self._get_id(scfolder) folderpath = folderpath + '/' return scfolder def _find_folder(self, url, foldername, ssn=-1): """Find a folder on the SC using the specified url. Most of the time the folder will already have been created so we look for the end folder and check that the rest of the path is right. The REST url sent in defines the folder type being created on the Dell Storage Center backend. Thus this is generic to server and volume folders. :param url: The portion of the url after the base url (see http class) to use for this operation. (Can be for Server or Volume folders.) :param foldername: Full path to the folder we are looking for. :returns: Dell folder object. """ ssn = self._vet_ssn(ssn) pf = self._get_payload_filter() pf.append('scSerialNumber', ssn) basename = os.path.basename(foldername) pf.append('Name', basename) # save the user from themselves. folderpath = foldername.strip('/') folderpath = os.path.dirname(folderpath) # Put our path into the filters if folderpath != '': # Legacy didn't begin with a slash. if not self.legacyfoldernames: folderpath = '/' + folderpath # SC convention is to end with a '/' so make sure we do. folderpath += '/' elif not self.legacyfoldernames: folderpath = '/' pf.append('folderPath', folderpath) folder = None r = self.client.post(url, pf.payload) if self._check_result(r): folder = self._get_result(r, 'folderPath', folderpath) return folder def _find_volume_folder(self, create=False, ssn=-1): """Looks for the volume folder where backend volumes will be created. Volume folder is specified in the cindef.conf. See __init. :param create: If True will create the folder if not found. :returns: Folder object. """ folder = self._find_folder('StorageCenter/ScVolumeFolder/GetList', self.vfname, ssn) # Doesn't exist? make it if folder is None and create is True: folder = self._create_folder_path('StorageCenter/ScVolumeFolder', self.vfname, ssn) return folder def _init_volume(self, scvolume): """Initializes the volume. Maps the volume to a random server and immediately unmaps it. This initializes the volume. Don't wig out if this fails. :param scvolume: Dell Volume object. """ pf = self._get_payload_filter() pf.append('scSerialNumber', scvolume.get('scSerialNumber')) r = self.client.post('StorageCenter/ScServer/GetList', pf.payload) if self._check_result(r): scservers = self._get_json(r) # Sort through the servers looking for one with connectivity. for scserver in scservers: # This needs to be either a physical or virtual server. # Outside of tempest tests this should not matter as we only # "init" a volume to allow snapshotting of an empty volume. if (scserver.get('status', 'down').lower() != 'down' and scserver.get('type', '').lower() == 'physical'): # Map to actually create the volume self.map_volume(scvolume, scserver) # We have changed the volume so grab a new copy of it. scvolume = self.get_volume(self._get_id(scvolume)) # Unmap self.unmap_volume(scvolume, scserver) # Did it work? if not scvolume.get('active', False): LOG.debug('Failed to activate volume %(name)s via ' 'server %(srvr)s)', {'name': scvolume['name'], 'srvr': scserver['name']}) else: return # We didn't map/unmap the volume. So no initialization done. # Warn the user before we leave. Note that this is almost certainly # a tempest test failure we are trying to catch here. A snapshot # has likely been attempted before the volume has been instantiated # on the Storage Center. In the real world no one will snapshot # a volume without first putting some data in that volume. LOG.warning('Volume %(name)s initialization failure. ' 'Operations such as snapshot and clone may fail due ' 'to inactive volume.)', {'name': scvolume['name']}) def _find_storage_profile(self, storage_profile): """Looks for a Storage Profile on the array. Storage Profiles determine tiering settings. If not specified a volume will use the Default storage profile. :param storage_profile: The Storage Profile name to find with any spaces stripped. :returns: The Storage Profile object or None. """ if not storage_profile: return None # Since we are stripping out spaces for convenience we are not # able to just filter on name. Need to get all Storage Profiles # and look through for the one we want. Never many profiles, so # this doesn't cause as much overhead as it might seem. storage_profile = storage_profile.replace(' ', '').lower() pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) r = self.client.post('StorageCenter/ScStorageProfile/GetList', pf.payload) if self._check_result(r): profiles = self._get_json(r) for profile in profiles: # Look for the stripped, case insensitive match name = profile.get('name', '').replace(' ', '').lower() if name == storage_profile: return profile return None def _find_user_replay_profiles(self): """Find user default profiles. Note that this only deals with standard and not cg profiles. :return: List of replay profiles. """ user_prefs = self._get_user_preferences() if user_prefs: profileids = [profile['instanceId'] for profile in user_prefs['replayProfileList']] return profileids return [] def _find_daily_replay_profile(self): """Find the system replay profile named "Daily". :return: Profile instanceId or None. """ pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) pf.append('instanceName', 'Daily') r = self.client.post('StorageCenter/ScReplayProfile/GetList', pf.payload) if self._check_result(r): profiles = self._get_json(r) if profiles: return profiles[0]['instanceId'] return None def _find_replay_profiles(self, replay_profile_string): """Find our replay profiles. Note that if called on volume creation the removeids list can be safely ignored. :param replay_profile_string: Comma separated list of profile names. :return: List replication profiles to use, List to remove. :raises VolumeBackendAPIException: If we can't find our profiles. """ addids = [] removeids = [] replay_profiles = [] if replay_profile_string: replay_profiles = replay_profile_string.split(',') # Most of the time they will not specify this so don't call anything. if replay_profiles: pf = self._get_payload_filter() pf.append('scSerialNumber', self.ssn) pf.append('type', 'Standard') r = self.client.post('StorageCenter/ScReplayProfile/GetList', pf.payload) if self._check_result(r): profiles = self._get_json(r) for profile in profiles: if replay_profiles.count(profile['name']) > 0: addids.append(profile['instanceId']) else: # in the volume. removeids.append(profile['instanceId']) # Check that we've found what we are looking for if anything if len(addids) != len(replay_profiles): msg = (_('Unable to locate specified replay profiles %s ') % replay_profile_string) raise exception.VolumeBackendAPIException(data=msg) return addids, removeids def update_replay_profiles(self, scvolume, replay_profile_string): """Update our replay profiles. If the replay_profile_string is empty we look for the user's default profiles. If those aren't found we look for the Daily profile. Note that this is in addition to the CG profiles which we do not touch. :param scvolume: SC Volume object. :param replay_profile_string: Comma separated string of replay profile names. :return: True/False. """ # Find our replay_profiles. addids, removeids = self._find_replay_profiles(replay_profile_string) # We either found what we were looking for. # If we are clearing out our ids then find a default. if not addids: # if no replay profiles specified we must be clearing out. addids = self._find_user_replay_profiles() if not addids: addids = [self._find_daily_replay_profile()] # Do any removals first. for id in removeids: # We might have added to the addids list after creating removeids. # User preferences or the daily profile could have been added. # If our id is in both lists just skip it and remove it from # The add list. if addids.count(id): addids.remove(id) elif not self._update_volume_profiles( scvolume, addid=None, removeid=id): return False # Add anything new. for id in addids: if not self._update_volume_profiles( scvolume, addid=id, removeid=None): return False return True def _check_add_profile_payload(self, payload, profile, name, type): if name: if profile is None: msg = _('Profile %s not found.') % name raise exception.VolumeBackendAPIException(data=msg) else: payload[type] = self._get_id(profile) def create_volume(self, name, size, storage_profile=None, replay_profile_string=None, volume_qos=None, group_qos=None, datareductionprofile=None): """Creates a new volume on the Storage Center. It will create it in a folder called self.vfname. If self.vfname does not exist it will create it. If it cannot create it the volume will be created in the root. :param name: Name of the volume to be created on the Dell SC backend. This is the cinder volume ID. :param size: The size of the volume to be created in GB. :param storage_profile: Optional storage profile to set for the volume. :param replay_profile_string: Optional replay profile to set for the volume. :param volume_qos: Volume QOS profile name. :param group_qos: Group QOS profile name. :param datareductionprofile: Data reduction profile name :returns: Dell Volume object or None. """ LOG.debug('create_volume: %(name)s %(ssn)s %(folder)s %(profile)s ' '%(vqos)r %(gqos)r %(dup)r', {'name': name, 'ssn': self.ssn, 'folder': self.vfname, 'profile': storage_profile, 'replay': replay_profile_string, 'vqos': volume_qos, 'gqos': group_qos, 'dup': datareductionprofile}) # Find our folder folder = self._find_volume_folder(True) # If we actually have a place to put our volume create it if folder is None: LOG.warning('Unable to create folder %s', self.vfname) # Find our replay_profiles. addids, removeids = self._find_replay_profiles(replay_profile_string) # Init our return. scvolume = None # Create the volume payload = {} payload['Name'] = name payload['Notes'] = self.notes payload['Size'] = '%d GB' % size payload['StorageCenter'] = self.ssn if folder is not None: payload['VolumeFolder'] = self._get_id(folder) # Add our storage profile. self._check_add_profile_payload( payload, self._find_storage_profile(storage_profile), storage_profile, 'StorageProfile') # Add our Volume QOS Profile. self._check_add_profile_payload( payload, self._find_qos_profile(volume_qos), volume_qos, 'VolumeQosProfile') # Add our Group QOS Profile. self._check_add_profile_payload( payload, self._find_qos_profile(group_qos, True), group_qos, 'GroupQosProfile') # Add our Data Reduction Proflie. self._check_add_profile_payload( payload, self._find_data_reduction_profile(datareductionprofile), datareductionprofile, 'DataReductionProfile') # This is a new volume so there is nothing to remove. if addids: payload['ReplayProfileList'] = addids r = self.client.post('StorageCenter/ScVolume', payload, True) if self._check_result(r): # Our volume should be in the return. scvolume = self._get_json(r) if scvolume: LOG.info('Created volume %(instanceId)s: %(name)s', {'instanceId': scvolume['instanceId'], 'name': scvolume['name']}) else: LOG.error('ScVolume returned success with empty payload.' ' Attempting to locate volume') # In theory it is there since success was returned. # Try one last time to find it before returning. scvolume = self._search_for_volume(name) else: LOG.error('Unable to create volume on SC: %s', name) return scvolume def _get_volume_list(self, name, deviceid, filterbyvfname=True, ssn=-1): """Return the specified list of volumes. :param name: Volume name. :param deviceid: Volume device ID on the SC backend. :param filterbyvfname: If set to true then this filters by the preset folder name. :param ssn: SSN to search on. :return: Returns the scvolume list or None. """ ssn = self._vet_ssn(ssn) result = None # We need a name or a device ID to find a volume. if name or deviceid: pf = self._get_payload_filter() pf.append('scSerialNumber', ssn) if name is not None: pf.append('Name', name) if deviceid is not None: pf.append('DeviceId', deviceid) # set folderPath if filterbyvfname: vfname = (self.vfname if self.vfname.endswith('/') else self.vfname + '/') pf.append('volumeFolderPath', vfname) r = self.client.post('StorageCenter/ScVolume/GetList', pf.payload) if self._check_result(r): result = self._get_json(r) # We return None if there was an error and a list if the command # succeeded. It might be an empty list. return result def _autofailback(self, lv): # if we have a working replication state. ret = False LOG.debug('Attempting autofailback of %s', lv) if (lv and lv['status'] == 'Up' and lv['replicationState'] == 'Up' and lv['failoverState'] == 'Protected' and lv['secondaryStatus'] == 'Up' and lv['primarySwapRoleState'] == 'NotSwapping'): ret = self.swap_roles_live_volume(lv) return ret def _find_volume_primary(self, provider_id, name): # We look for our primary. If it doesn't exist and we have an activated # secondary then we return that. # if there is no live volume then we return our provider_id. primary_id = provider_id lv = self.get_live_volume(provider_id, name) LOG.info('Volume %(name)r, id %(provider)s at primary %(primary)s.', {'name': name, 'provider': provider_id, 'primary': primary_id}) # If we have a live volume and are swapped and are not failed over # at least give failback a shot. if lv and (self.is_swapped(provider_id, lv) and not self.failed_over and self._autofailback(lv)): lv = self.get_live_volume(provider_id) LOG.info('After failback %s', lv) # Make sure we still have a LV. if lv: # At this point if the secondaryRole is Active we have # to return that. Else return normal primary. if lv.get('secondaryRole') == 'Activated': primary_id = lv['secondaryVolume']['instanceId'] else: primary_id = lv['primaryVolume']['instanceId'] return primary_id def find_volume(self, name, provider_id, islivevol=False): """Find the volume by name or instanceId. We check if we can use provider_id before using it. If so then we expect to find it by provider_id. We also conclude our failover at this point. If we are failed over we run _import_one to rename the volume. :param name: Volume name. :param provider_id: instanceId of the volume if known. :param islivevol: Is this a live volume. :return: sc volume object or None. :raises VolumeBackendAPIException: if unable to import. """ LOG.debug('find_volume: name:%(name)r provider_id:%(id)r islv:%(lv)r', {'name': name, 'id': provider_id, 'lv': islivevol}) scvolume = None if islivevol: # Just get the primary from the sc live vol. primary_id = self._find_volume_primary(provider_id, name) scvolume = self.get_volume(primary_id) elif self._use_provider_id(provider_id): # just get our volume scvolume = self.get_volume(provider_id) # if we are failed over we need to check if we # need to import the failed over volume. if self.failed_over: if scvolume['name'] == self._repl_name(name): scvolume = self._import_one(scvolume, name) if not scvolume: msg = (_('Unable to complete failover of %s.') % name) raise exception.VolumeBackendAPIException(data=msg) LOG.info('Imported %(fail)s to %(guid)s.', {'fail': self._repl_name(name), 'guid': name}) else: # No? Then search for it. scvolume = self._search_for_volume(name) return scvolume def _search_for_volume(self, name): """Search self.ssn for volume of name. This searches the folder self.vfname (specified in the cinder.conf) for the volume first. If not found it searches the entire array for the volume. :param name: Name of the volume to search for. This is the cinder volume ID. :returns: Dell Volume object or None if not found. :raises VolumeBackendAPIException: If multiple copies are found. """ LOG.debug('Searching %(sn)s for %(name)s', {'sn': self.ssn, 'name': name}) # Cannot find a volume without the name. if name is None: return None # Look for our volume in our folder. vollist = self._get_volume_list(name, None, True) # If an empty list was returned they probably moved the volumes or # changed the folder name so try again without the folder. if not vollist: LOG.debug('Cannot find volume %(n)s in %(v)s. Searching SC.', {'n': name, 'v': self.vfname}) vollist = self._get_volume_list(name, None, False) # If multiple volumes of the same name are found we need to error. if len(vollist) > 1: # blow up msg = _('Multiple copies of volume %s found.') % name raise exception.VolumeBackendAPIException(data=msg) # We made it and should have a valid volume. return None if not vollist else vollist[0] def get_volume(self, provider_id): """Returns the scvolume associated with provider_id. :param provider_id: This is the instanceId :return: Dell SCVolume object. """ result = None if provider_id: r = self.client.get('StorageCenter/ScVolume/%s' % provider_id) if self._check_result(r): result = self._get_json(r) return result def delete_volume(self, name, provider_id=None): """Deletes the volume from the SC backend array. If the volume cannot be found we claim success. :param name: Name of the volume to search for. This is the cinder volume ID. :param provider_id: This is the instanceId :returns: Boolean indicating success or failure. """ vol = self.find_volume(name, provider_id) provider_id = None if not vol else self._get_id(vol) # If we have an id then delete the volume. if provider_id: r = self.client.delete('StorageCenter/ScVolume/%s' % provider_id, async_call=True) if not self._check_result(r): msg = _('Error deleting volume %(ssn)s: %(volume)s') % { 'ssn': self.ssn, 'volume': provider_id} raise exception.VolumeBackendAPIException(data=msg) # json return should be true or false return self._get_json(r) # If we can't find the volume then it is effectively gone. LOG.warning('delete_volume: unable to find volume ' 'provider_id: %s', provider_id) return True def _find_server_folder(self, create=False, ssn=-1): """Looks for the server folder on the Dell Storage Center. This is the folder where a server objects for mapping volumes will be created. Server folder is specified in cinder.conf. See __init. :param create: If True will create the folder if not found. :return: Folder object. """ ssn = self._vet_ssn(ssn) folder = self._find_folder('StorageCenter/ScServerFolder/GetList', self.sfname, ssn) if folder is None and create is True: folder = self._create_folder_path('StorageCenter/ScServerFolder', self.sfname, ssn) return folder def _add_hba(self, scserver, wwnoriscsiname): """This adds a server HBA to the Dell server object. The HBA is taken from the connector provided in initialize_connection. The Dell server object is largely a container object for the list of HBAs associated with a single server (or vm or cluster) for the purposes of mapping volumes. :param scserver: Dell server object. :param wwnoriscsiname: The WWN or IQN to add to this server. :returns: Boolean indicating success or failure. """ payload = {} payload['HbaPortType'] = self.protocol payload['WwnOrIscsiName'] = wwnoriscsiname payload['AllowManual'] = True r = self.client.post('StorageCenter/ScPhysicalServer/%s/AddHba' % self._get_id(scserver), payload, True) if not self._check_result(r): LOG.error('_add_hba error: %(wwn)s to %(srvname)s', {'wwn': wwnoriscsiname, 'srvname': scserver['name']}) return False return True def _find_serveros(self, osname='Red Hat Linux 6.x', ssn=-1): """Returns the serveros instance id of the specified osname. Required to create a Dell server object. We do not know that we are Red Hat Linux 6.x but that works best for Red Hat and Ubuntu. So we use that. :param osname: The name of the OS to look for. :param ssn: ssn of the backend SC to use. Default if -1. :returns: InstanceId of the ScServerOperatingSystem object. """ ssn = self._vet_ssn(ssn) pf = self._get_payload_filter() pf.append('scSerialNumber', ssn) r = self.client.post('StorageCenter/ScServerOperatingSystem/GetList', pf.payload) if self._check_result(r): oslist = self._get_json(r) for srvos in oslist: name = srvos.get('name', 'nope') if name.lower() == osname.lower(): # Found it return the id return self._get_id(srvos) LOG.warning('Unable to find appropriate OS %s', osname) return None def create_server(self, wwnlist, serveros, ssn=-1): """Creates a server with multiple WWNS associated with it. Same as create_server except it can take a list of HBAs. :param wwnlist: A list of FC WWNs or iSCSI IQNs associated with this server. :param serveros: Name of server OS to use when creating the server. :param ssn: ssn of the backend SC to use. Default if -1. :returns: Dell server object. """ # Find our folder or make it folder = self._find_server_folder(True, ssn) # Create our server. scserver = self._create_server('Server_' + wwnlist[0], folder, serveros, ssn) if not scserver: return None # Add our HBAs. if scserver: for wwn in wwnlist: if not self._add_hba(scserver, wwn): # We failed so log it. Delete our server and return None. LOG.error('Error adding HBA %s to server', wwn) self._delete_server(scserver) return None return scserver def _create_server(self, servername, folder, serveros, ssn): ssn = self._vet_ssn(ssn) LOG.info('Creating server %s', servername) payload = {} payload['Name'] = servername payload['StorageCenter'] = ssn payload['Notes'] = self.notes payload['AlertOnConnectivity'] = False # We pick Red Hat Linux 6.x because it supports multipath and # will attach luns to paths as they are found. scserveros = self._find_serveros(serveros, ssn) if not scserveros: scserveros = self._find_serveros(ssn=ssn) if scserveros is not None: payload['OperatingSystem'] = scserveros # At this point it doesn't matter if we have a folder or not. # Let it be in the root if the folder creation fails. if folder is not None: payload['ServerFolder'] = self._get_id(folder) # create our server r = self.client.post('StorageCenter/ScPhysicalServer', payload, True) if self._check_result(r): # Server was created scserver = self._first_result(r) LOG.info('SC server created %s', scserver) return scserver LOG.error('Unable to create SC server %s', servername) return None def _vet_ssn(self, ssn): """Returns the default if a ssn was not set. Added to support live volume as we aren't always on the primary ssn anymore :param ssn: ssn to check. :return: Current ssn or the ssn sent down. """ if ssn == -1: return self.ssn return ssn def find_server(self, instance_name, ssn=-1): """Hunts for a server on the Dell backend by instance_name. The instance_name is the same as the server's HBA. This is the IQN or WWN listed in the connector. If found, the server the HBA is attached to, if any, is returned. :param instance_name: instance_name is a FC WWN or iSCSI IQN from the connector. In cinder a server is identified by its HBA. :param ssn: Storage center to search. :returns: Dell server object or None. """ ssn = self._vet_ssn(ssn) scserver = None # We search for our server by first finding our HBA hba = self._find_serverhba(instance_name, ssn) # Once created hbas stay in the system. So it isn't enough # that we found one it actually has to be attached to a # server. if hba is not None and hba.get('server') is not None: pf = self._get_payload_filter() pf.append('scSerialNumber', ssn) pf.append('instanceId', self._get_id(hba['server']).upper()) r = self.client.post('StorageCenter/ScServer/GetList', pf.payload) if self._check_result(r): scserver = self._first_result(r) if scserver is None: LOG.debug('Server (%s) not found.', instance_name) return scserver def _find_serverhba(self, instance_name, ssn): """Hunts for a server HBA on the Dell backend by instance_name. Instance_name is the same as the IQN or WWN specified in the connector. :param instance_name: Instance_name is a FC WWN or iSCSI IQN from the connector. :param ssn: Storage center to search. :returns: Dell server HBA object. """ scserverhba = None # We search for our server by first finding our HBA pf = self._get_payload_filter() pf.append('scSerialNumber', ssn) pf.append('instanceName', instance_name.upper()) r = self.client.post('StorageCenter/ScServerHba/GetList', pf.payload) if self._check_result(r): scserverhba = self._first_result(r) return scserverhba def _find_domains(self, cportid): """Find the list of Dell domain objects associated with the cportid. :param cportid: The Instance ID of the Dell controller port. :returns: List of fault domains associated with this controller port. """ r = self.client.get('StorageCenter/ScControllerPort/%s/FaultDomainList' % cportid) if self._check_result(r): domains = self._get_json(r) return domains LOG.error('Error getting FaultDomainList for %s', cportid) return None def _find_initiators(self, scserver): """Returns a list of WWNs associated with the specified Dell server. :param scserver: The Dell backend server object. :returns: A list of WWNs associated with this server. """ initiators = [] r = self.client.get('StorageCenter/ScServer/%s/HbaList' % self._get_id(scserver)) if self._check_result(r): hbas = self._get_json(r) for hba in hbas: wwn = hba.get('instanceName') if (hba.get('portType') == self.protocol and wwn is not None): initiators.append(wwn.upper()) else: LOG.error('Unable to find initiators') LOG.debug('_find_initiators: %s', initiators) return initiators def get_volume_count(self, scserver): """Returns the number of volumes attached to specified Dell server. :param scserver: The Dell backend server object. :returns: Mapping count. -1 if there was an error. """ r = self.client.get('StorageCenter/ScServer/%s/MappingList' % self._get_id(scserver)) if self._check_result(r): mappings = self._get_json(r) return len(mappings) # Panic mildly but do not return 0. return -1 def _find_mappings(self, scvolume): """Find the Dell volume object mappings. :param scvolume: Dell volume object. :returns: A list of Dell mappings objects. """ mappings = [] if scvolume.get('active', False): r = self.client.get('StorageCenter/ScVolume/%s/MappingList' % self._get_id(scvolume)) if self._check_result(r): mappings = self._get_json(r) else: LOG.error('_find_mappings: volume is not active') LOG.info('Volume mappings for %(name)s: %(mappings)s', {'name': scvolume.get('name'), 'mappings': mappings}) return mappings def _find_mapping_profiles(self, scvolume): """Find the Dell volume object mapping profiles. :param scvolume: Dell volume object. :returns: A list of Dell mapping profile objects. """ mapping_profiles = [] r = self.client.get('StorageCenter/ScVolume/%s/MappingProfileList' % self._get_id(scvolume)) if self._check_result(r): mapping_profiles = self._get_json(r) else: LOG.error('Unable to find mapping profiles: %s', scvolume.get('name')) LOG.debug(mapping_profiles) return mapping_profiles def _find_controller_port(self, cportid): """Finds the SC controller port object for the specified cportid. :param cportid: The instanceID of the Dell backend controller port. :returns: The controller port object. """ controllerport = None r = self.client.get('StorageCenter/ScControllerPort/%s' % cportid) if self._check_result(r): controllerport = self._first_result(r) LOG.debug('_find_controller_port: %s', controllerport) return controllerport @staticmethod def _get_wwn(controllerport): """Return the WWN value of the controller port. Usually the WWN key in the controller port is wwn or WWN, but there are cases where the backend returns wWW, so we have to check all the keys. """ for key, value in controllerport.items(): if key.lower() == 'wwn': return value return None def find_wwns(self, scvolume, scserver): """Finds the lun and wwns of the mapped volume. :param scvolume: Storage Center volume object. :param scserver: Storage Center server opbject. :returns: Lun, wwns, initiator target map """ lun = None # our lun. We return the first lun. wwns = [] # list of targets itmap = {} # dict of initiators and the associated targets # Make sure we know our server's initiators. Only return # mappings that contain HBA for this server. initiators = self._find_initiators(scserver) # Get our volume mappings mappings = self._find_mappings(scvolume) # We check each of our mappings. We want to return # the mapping we have been configured to use. for mapping in mappings: # Find the controller port for this mapping cport = mapping.get('controllerPort') controllerport = self._find_controller_port(self._get_id(cport)) if controllerport is not None: # This changed case at one point or another. # Look for both keys. wwn = self._get_wwn(controllerport) if wwn: serverhba = mapping.get('serverHba') if serverhba: hbaname = serverhba.get('instanceName') if hbaname.upper() in list( map(lambda x: x.upper(), initiators) ): if itmap.get(hbaname) is None: itmap[hbaname] = [] itmap[hbaname].append(wwn) wwns.append(wwn) mappinglun = mapping.get('lun') if lun is None: lun = mappinglun elif lun != mappinglun: LOG.warning('Inconsistent Luns.') else: LOG.debug('%s not found in initiator list', hbaname) else: LOG.warning('_find_wwn: serverhba is None.') else: LOG.warning('_find_wwn: Unable to find port wwn.') else: LOG.warning('_find_wwn: controllerport is None.') LOG.info('_find_wwns-lun: %(lun)s wwns: %(wwn)s itmap: %(map)s', {'lun': lun, 'wwn': wwns, 'map': itmap}) # Return the response in lowercase wwns_lower = [w.lower() for w in wwns] itmap_lower = dict() for key in itmap: itmap_lower[key.lower()] = [v.lower() for v in itmap[key]] return lun, wwns_lower, itmap_lower def _find_active_controller(self, scvolume): """Finds the controller on which the Dell volume is active. There can be more than one Dell backend controller per Storage center but a given volume can only be active on one of them at a time. :param scvolume: Dell backend volume object. :returns: Active controller ID. """ actvctrl = None volconfig = self._get_volume_configuration(scvolume) if volconfig: controller = volconfig.get('controller') actvctrl = self._get_id(controller) else: LOG.error('Unable to retrieve VolumeConfiguration: %s', self._get_id(scvolume)) LOG.debug('_find_active_controller: %s', actvctrl) return actvctrl def _get_controller_id(self, mapping): # The mapping lists the associated controller. return self._get_id(mapping.get('controller')) def _get_domains(self, mapping): # Return a list of domains associated with this controller port. return self._find_domains(self._get_id(mapping.get('controllerPort'))) def _get_iqn(self, mapping): # Get our iqn from the controller port listed in our mapping. iqn = None cportid = self._get_id(mapping.get('controllerPort')) controllerport = self._find_controller_port(cportid) if controllerport: iqn = controllerport.get('iscsiName') LOG.debug('_get_iqn: %s', iqn) return iqn def _is_virtualport_mode(self, ssn=-1): ssn = self._vet_ssn(ssn) isvpmode = False r = self.client.get('StorageCenter/ScConfiguration/%s' % ssn) if self._check_result(r): scconfig = self._get_json(r) if scconfig and scconfig['iscsiTransportMode'] == 'VirtualPort': isvpmode = True return isvpmode def _find_controller_port_iscsi_config(self, cportid): """Finds the SC controller port object for the specified cportid. :param cportid: The instanceID of the Dell backend controller port. :returns: The controller port object. """ controllerport = None r = self.client.get( 'StorageCenter/ScControllerPortIscsiConfiguration/%s' % cportid) if self._check_result(r): controllerport = self._first_result(r) else: LOG.error('_find_controller_port_iscsi_config: ' 'Error finding configuration: %s', cportid) return controllerport def find_iscsi_properties(self, scvolume, scserver): """Finds target information for a given Dell scvolume object mapping. The data coming back is both the preferred path and all the paths. :param scvolume: The dell sc volume object. :param scserver: The dell sc server object. :returns: iSCSI property dictionary. :raises VolumeBackendAPIException: """ LOG.debug('find_iscsi_properties: scvolume: %s', scvolume) # Our mutable process object. pdata = {'active': -1, 'up': -1} # Our output lists. portals = [] luns = [] iqns = [] # Process just looks for the best port to return. def process(lun, iqn, address, port, status, active): """Process this mapping information. :param lun: SCSI Lun. :param iqn: iSCSI IQN address. :param address: IP address. :param port: IP Port number :param readonly: Boolean indicating mapping is readonly. :param status: String indicating mapping status. (Up is what we are looking for.) :param active: Boolean indicating whether this is on the active controller or not. :return: Nothing """ process_it = False # Check the white list # If the white list is empty, check the black list if (not self.included_domain_ips): # Check the black list if self.excluded_domain_ips.count(address) == 0: process_it = True elif (self.included_domain_ips.count(address) > 0): process_it = True if process_it: # Make sure this isn't a duplicate. newportal = address + ':' + str(port) for idx, portal in enumerate(portals): if (portal == newportal and iqns[idx] == iqn and luns[idx] == lun): LOG.debug('Skipping duplicate portal %(ptrl)s and' ' iqn %(iqn)s and lun %(lun)s.', {'ptrl': portal, 'iqn': iqn, 'lun': lun}) return # It isn't in the list so process it. portals.append(newportal) iqns.append(iqn) luns.append(lun) # We need to point to the best link. # So state active and status up is preferred # but we don't actually need the state to be # up at this point. if pdata['up'] == -1: if active: pdata['active'] = len(iqns) - 1 if status == 'Up': pdata['up'] = pdata['active'] # Start by getting our mappings. mappings = self._find_mappings(scvolume) # We should have mappings at the time of this call but do check. if len(mappings) > 0: # This might not be on the current controller. ssn = self._get_id(scvolume).split('.')[0] # In multipath (per Liberty) we will return all paths. But # if multipath is not set (ip and port are None) then we need # to return a mapping from the controller on which the volume # is active. So find that controller. actvctrl = self._find_active_controller(scvolume) # Two different methods are used to find our luns and portals # depending on whether we are in virtual or legacy port mode. isvpmode = self._is_virtualport_mode(ssn) # Trundle through our mappings. for mapping in mappings: msrv = mapping.get('server') if msrv: # Don't return remote sc links. if msrv.get('objectType') == 'ScRemoteStorageCenter': continue # Don't return information for other servers. But # do log it. if self._get_id(msrv) != self._get_id(scserver): LOG.debug('find_iscsi_properties: Multiple servers' ' attached to volume.') continue # The lun, ro mode and status are in the mapping. LOG.debug('find_iscsi_properties: mapping: %s', mapping) lun = mapping.get('lun') status = mapping.get('status') # Get our IQN from our mapping. iqn = self._get_iqn(mapping) # Check if our controller ID matches our active controller ID. isactive = True if (self._get_controller_id(mapping) == actvctrl) else False # If we have an IQN and are in virtual port mode. if isvpmode and iqn: domains = self._get_domains(mapping) if domains: for dom in domains: LOG.debug('find_iscsi_properties: domain: %s', dom) ipaddress = dom.get('targetIpv4Address', dom.get('wellKnownIpAddress')) portnumber = dom.get('portNumber') # We have all our information. Process this portal. process(lun, iqn, ipaddress, portnumber, status, isactive) # Else we are in legacy mode. elif iqn: # Need to get individual ports cportid = self._get_id(mapping.get('controllerPort')) # Legacy mode stuff is in the ISCSI configuration object. cpconfig = self._find_controller_port_iscsi_config(cportid) # This should really never fail. Things happen so if it # does just keep moving. Return what we can. if cpconfig: ipaddress = cpconfig.get('ipAddress') portnumber = cpconfig.get('portNumber') # We have all our information. Process this portal. process(lun, iqn, ipaddress, portnumber, status, isactive) # We've gone through all our mappings. # Make sure we found something to return. if len(luns) == 0: # Since we just mapped this and can't find that mapping the world # is wrong so we raise exception. raise exception.VolumeBackendAPIException( data=_('Unable to find iSCSI mappings.')) # Make sure we point to the best portal we can. This means it is # on the active controller and, preferably, up. If it isn't return # what we have. if pdata['up'] != -1: # We found a connection that is already up. Return that. pdata['active'] = pdata['up'] elif pdata['active'] == -1: # This shouldn't be able to happen. Maybe a controller went # down in the middle of this so just return the first one and # hope the ports are up by the time the connection is attempted. LOG.debug('find_iscsi_properties: ' 'Volume is not yet active on any controller.') pdata['active'] = 0 # Make sure we have a good item at the top of the list. iqns.insert(0, iqns.pop(pdata['active'])) portals.insert(0, portals.pop(pdata['active'])) luns.insert(0, luns.pop(pdata['active'])) data = {'target_discovered': False, 'target_iqn': iqns[0], 'target_iqns': iqns, 'target_portal': portals[0], 'target_portals': portals, 'target_lun': luns[0], 'target_luns': luns } LOG.debug('find_iscsi_properties: %s', data) return data def map_volume(self, scvolume, scserver): """Maps the Dell backend volume object to the Dell server object. The check for the Dell server object existence is elsewhere; does not create the Dell server object. :param scvolume: Storage Center volume object. :param scserver: Storage Center server object. :returns: SC mapping profile or None """ # Make sure we have what we think we have serverid = self._get_id(scserver) volumeid = self._get_id(scvolume) if serverid is not None and volumeid is not None: # If we have a mapping to our server return it here. mprofiles = self._find_mapping_profiles(scvolume) for mprofile in mprofiles: if self._get_id(mprofile.get('server')) == serverid: LOG.info('Volume %(vol)s already mapped to %(srv)s', {'vol': scvolume['name'], 'srv': scserver['name']}) return mprofile # No? Then map it up. payload = {} payload['server'] = serverid payload['Advanced'] = {'MapToDownServerHbas': True} r = self.client.post('StorageCenter/ScVolume/%s/MapToServer' % volumeid, payload, True) if self._check_result(r): # We just return our mapping LOG.info('Volume %(vol)s mapped to %(srv)s', {'vol': scvolume['name'], 'srv': scserver['name']}) return self._first_result(r) # Error out LOG.error('Unable to map %(vol)s to %(srv)s', {'vol': scvolume['name'], 'srv': scserver['name']}) return None def unmap_volume(self, scvolume, scserver): """Unmaps the Dell volume object from the Dell server object. Deletes all mappings to a Dell server object, not just the ones on the path defined in cinder.conf. :param scvolume: Storage Center volume object. :param scserver: Storage Center server object. :returns: True or False. """ rtn = True serverid = self._get_id(scserver) volumeid = self._get_id(scvolume) if serverid is not None and volumeid is not None: profiles = self._find_mapping_profiles(scvolume) for profile in profiles: prosrv = profile.get('server') if prosrv is not None and self._get_id(prosrv) == serverid: r = self.client.delete('StorageCenter/ScMappingProfile/%s' % self._get_id(profile), async_call=True) if self._check_result(r): # Check our result in the json. result = self._get_json(r) # EM 15.1 and 15.2 return a boolean directly. # 15.3 on up return it in a dict under 'result'. if result is True or (type(result) is dict and result.get('result')): LOG.info( 'Volume %(vol)s unmapped from %(srv)s', {'vol': scvolume['name'], 'srv': scserver['name']}) continue LOG.error('Unable to unmap %(vol)s from %(srv)s', {'vol': scvolume['name'], 'srv': scserver['name']}) # 1 failed unmap is as good as 100. # Fail it and leave rtn = False break # return true/false. return rtn def unmap_all(self, scvolume): """Unmaps a volume from all connections except SCs. :param scvolume: The SC Volume object. :return: Boolean """ rtn = True profiles = self._find_mapping_profiles(scvolume) for profile in profiles: # get our server scserver = None r = self.client.get('StorageCenter/ScServer/%s' % self._get_id(profile.get('server'))) if self._check_result(r): scserver = self._get_json(r) # We do not want to whack our replication or live volume # connections. So anything other than a remote storage center # is fair game. if scserver and scserver['type'].upper() != 'REMOTESTORAGECENTER': # we can whack the connection. r = self.client.delete('StorageCenter/ScMappingProfile/%s' % self._get_id(profile), async_call=True) if self._check_result(r): # Check our result in the json. result = self._get_json(r) # EM 15.1 and 15.2 return a boolean directly. # 15.3 on up return it in a dict under 'result'. if result is True or (type(result) is dict and result.get('result')): LOG.info( 'Volume %(vol)s unmapped from %(srv)s', {'vol': scvolume['name'], 'srv': scserver['instanceName']}) # yay, it is gone, carry on. continue LOG.error('Unable to unmap %(vol)s from %(srv)s', {'vol': scvolume['name'], 'srv': scserver['instanceName']}) # 1 failed unmap is as good as 100. # Fail it and leave rtn = False break return rtn def get_storage_usage(self): """Gets the storage usage object from the Dell backend. This contains capacity and usage information for the SC. :returns: The SC storageusage object. """ storageusage = None if self.ssn is not None: r = self.client.get( 'StorageCenter/StorageCenter/%s/StorageUsage' % self.ssn) if self._check_result(r): storageusage = self._get_json(r) return storageusage def _is_active(self, scvolume): if (scvolume.get('active') is not True or scvolume.get('replayAllowed') is not True): return False return True def create_replay(self, scvolume, replayid, expire): """Takes a snapshot of a volume. One could snap a volume before it has been activated, so activate by mapping and unmapping to a random server and let them. This should be a fail but the Tempest tests require it. :param scvolume: Volume to snapshot. :param replayid: Name to use for the snapshot. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :param expire: Time in minutes before the replay expires. For most snapshots this will be 0 (never expire) but if we are cloning a volume we will snap it right before creating the clone. :returns: The Dell replay object or None. :raises VolumeBackendAPIException: On failure to intialize volume. """ replay = None if scvolume is not None: if not self._is_active(scvolume): self._init_volume(scvolume) scvolume = self.get_volume(self._get_id(scvolume)) if not self._is_active(scvolume): raise exception.VolumeBackendAPIException( message=( _('Unable to create snapshot from empty volume.' ' %s') % scvolume['name'])) # We have a volume and it is initialized. payload = {} payload['description'] = replayid payload['expireTime'] = expire r = self.client.post('StorageCenter/ScVolume/%s/CreateReplay' % self._get_id(scvolume), payload, True) if self._check_result(r): replay = self._first_result(r) # Quick double check. if replay is None: LOG.warning('Unable to create snapshot %s', replayid) # Return replay or None. return replay def find_replay(self, scvolume, replayid): """Searches for the replay by replayid. replayid is stored in the replay's description attribute. :param scvolume: Dell volume object. :param replayid: Name to search for. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :returns: Dell replay object or None. """ r = self.client.get('StorageCenter/ScVolume/%s/ReplayList' % self._get_id(scvolume)) try: replays = self._get_json(r) # This will be a list. If it isn't bail if isinstance(replays, list): for replay in replays: # The only place to save our information with the public # api is the description field which isn't quite long # enough. So we check that our description is pretty much # the max length and we compare that to the start of # the snapshot id. description = replay.get('description') if (len(description) >= 30 and replayid.startswith(description) is True and replay.get('markedForExpiration') is not True): # We found our replay so return it. return replay except Exception: LOG.error('Invalid ReplayList return: %s', r) # If we are here then we didn't find the replay so warn and leave. LOG.warning('Unable to find snapshot %s', replayid) return None def manage_replay(self, screplay, replayid): """Basically renames the screplay and sets it to never expire. :param screplay: DellSC object. :param replayid: New name for replay. :return: True on success. False on fail. """ if screplay and replayid: payload = {} payload['description'] = replayid payload['expireTime'] = 0 r = self.client.put('StorageCenter/ScReplay/%s' % self._get_id(screplay), payload, True) if self._check_result(r): return True LOG.error('Error managing replay %s', screplay.get('description')) return False def unmanage_replay(self, screplay): """Basically sets the expireTime :param screplay: DellSC object. :return: True on success. False on fail. """ if screplay: payload = {} payload['expireTime'] = 1440 r = self.client.put('StorageCenter/ScReplay/%s' % self._get_id(screplay), payload, True) if self._check_result(r): return True LOG.error('Error unmanaging replay %s', screplay.get('description')) return False def delete_replay(self, scvolume, replayid): """Finds a Dell replay by replayid string and expires it. Once marked for expiration we do not return the replay as a snapshot even though it might still exist. (Backend requirements.) :param scvolume: Dell volume object. :param replayid: Name to search for. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :returns: Boolean for success or failure. """ ret = True LOG.debug('Expiring replay %s', replayid) # if we do not have the instanceid then we have to find the replay. replay = self.find_replay(scvolume, replayid) if replay is not None: # expire our replay. r = self.client.post('StorageCenter/ScReplay/%s/Expire' % self._get_id(replay), {}, True) ret = self._check_result(r) # If we couldn't find it we call that a success. return ret def create_view_volume(self, volname, screplay, replay_profile_string, volume_qos, group_qos, dr_profile): """Creates a new volume named volname from the screplay. :param volname: Name of new volume. This is the cinder volume ID. :param screplay: Dell replay object from which to make a new volume. :param replay_profile_string: Profiles to be applied to the volume :param volume_qos: Volume QOS Profile to use. :param group_qos: Group QOS Profile to use. :param dr_profile: Data reduction profile to use. :returns: Dell volume object or None. """ folder = self._find_volume_folder(True) # Find our replay_profiles. addids, removeids = self._find_replay_profiles(replay_profile_string) # payload is just the volume name and folder if we have one. payload = {} payload['Name'] = volname payload['Notes'] = self.notes if folder is not None: payload['VolumeFolder'] = self._get_id(folder) if addids: payload['ReplayProfileList'] = addids # Add our Volume QOS Profile. self._check_add_profile_payload( payload, self._find_qos_profile(volume_qos), volume_qos, 'VolumeQosProfile') # Add our Group QOS Profile. self._check_add_profile_payload( payload, self._find_qos_profile(group_qos, True), group_qos, 'GroupQosProfile') r = self.client.post('StorageCenter/ScReplay/%s/CreateView' % self._get_id(screplay), payload, True) volume = None if self._check_result(r): volume = self._first_result(r) # If we have a dr_profile to apply we should do so now. if dr_profile and not self.update_datareduction_profile(volume, dr_profile): LOG.error('Unable to apply %s to volume.', dr_profile) volume = None if volume is None: LOG.error('Unable to create volume %s from replay', volname) return volume def _expire_all_replays(self, scvolume): # We just try to grab the replay list and then expire them. # If this doens't work we aren't overly concerned. r = self.client.get('StorageCenter/ScVolume/%s/ReplayList' % self._get_id(scvolume)) if self._check_result(r): replays = self._get_json(r) # This will be a list. If it isn't bail if isinstance(replays, list): for replay in replays: if not replay['active']: # Send down an async expire. # We don't care if this fails. self.client.post('StorageCenter/ScReplay/%s/Expire' % self._get_id(replay), {}, True) def _wait_for_cmm(self, cmm, scvolume, replayid): # We wait for either the CMM to indicate that our copy has finished or # for our marker replay to show up. We do this because the CMM might # have been cleaned up by the system before we have a chance to check # it. Great. # Pick our max number of loops to run AFTER the CMM has gone away and # the time to wait between loops. # With a 3 second wait time this will be up to a 1 minute timeout # after the system claims to have finished. sleep = 3 waitforreplaymarkerloops = 20 while waitforreplaymarkerloops >= 0: r = self.client.get('StorageCenter/ScCopyMirrorMigrate/%s' % self._get_id(cmm)) if self._check_result(r): cmm = self._get_json(r) if cmm['state'] == 'Erred' or cmm['state'] == 'Paused': return False elif cmm['state'] == 'Finished': return True elif self.find_replay(scvolume, replayid): return True else: waitforreplaymarkerloops -= 1 eventlet.sleep(sleep) return False def create_cloned_volume(self, volumename, scvolume, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile): """Creates a volume named volumename from a copy of scvolume. :param volumename: Name of new volume. This is the cinder volume ID. :param scvolume: Dell volume object. :param storage_profile: Storage profile. :param replay_profile_list: List of snapshot profiles. :param volume_qos: Volume QOS Profile to use. :param group_qos: Group QOS Profile to use. :param dr_profile: Data reduction profile to use. :returns: The new volume's Dell volume object. :raises VolumeBackendAPIException: if error doing copy. """ LOG.info('create_cloned_volume: Creating %(dst)s from %(src)s', {'dst': volumename, 'src': scvolume['name']}) n = scvolume['configuredSize'].split(' ', 1) size = int(float(n[0]) // 1073741824) # Create our new volume. newvol = self.create_volume( volumename, size, storage_profile, replay_profile_list, volume_qos, group_qos, dr_profile) if newvol: try: replayid = str(uuid.uuid4()) screplay = self.create_replay(scvolume, replayid, 60) if not screplay: raise exception.VolumeBackendAPIException( message='Unable to create replay marker.') # Copy our source. payload = {} payload['CopyReplays'] = True payload['DestinationVolume'] = self._get_id(newvol) payload['SourceVolume'] = self._get_id(scvolume) payload['StorageCenter'] = self.ssn payload['Priority'] = 'High' r = self.client.post('StorageCenter/ScCopyMirrorMigrate/Copy', payload, True) if self._check_result(r): cmm = self._get_json(r) if (cmm['state'] == 'Erred' or cmm['state'] == 'Paused' or not self._wait_for_cmm(cmm, newvol, replayid)): raise exception.VolumeBackendAPIException( message='ScCopyMirrorMigrate error.') LOG.debug('create_cloned_volume: Success') self._expire_all_replays(newvol) return newvol else: raise exception.VolumeBackendAPIException( message='ScCopyMirrorMigrate fail.') except exception.VolumeBackendAPIException: # It didn't. Delete the volume. self.delete_volume(volumename, self._get_id(newvol)) raise # Tell the user. LOG.error('create_cloned_volume: Unable to clone volume') return None def expand_volume(self, scvolume, newsize): """Expands scvolume to newsize GBs. :param scvolume: Dell volume object to be expanded. :param newsize: The new size of the volume object. :returns: The updated Dell volume object on success or None on failure. """ vol = None payload = {} payload['NewSize'] = '%d GB' % newsize r = self.client.post('StorageCenter/ScVolume/%s/ExpandToSize' % self._get_id(scvolume), payload, True) if self._check_result(r): vol = self._get_json(r) # More info might be good. if vol is not None: LOG.debug('Volume expanded: %(name)s %(size)s', {'name': vol['name'], 'size': vol['configuredSize']}) else: LOG.error('Error expanding volume %s.', scvolume['name']) return vol def rename_volume(self, scvolume, name): """Rename scvolume to name. This is mostly used by update_migrated_volume. :param scvolume: The Dell volume object to be renamed. :param name: The new volume name. :returns: Boolean indicating success or failure. """ payload = {} payload['Name'] = name r = self.client.put('StorageCenter/ScVolume/%s' % self._get_id(scvolume), payload, True) if self._check_result(r): return True LOG.error('Error renaming volume %(original)s to %(name)s', {'original': scvolume['name'], 'name': name}) return False def _update_profile(self, scvolume, profile, profilename, profiletype, restname, allowprefname, continuewithoutdefault=False): prefs = self._get_user_preferences() if not prefs: return False if not prefs.get(allowprefname): LOG.error('User does not have permission to change ' '%s selection.', profiletype) return False if profilename: if not profile: LOG.error('%(ptype)s %(pname)s was not found.', {'ptype': profiletype, 'pname': profilename}) return False else: # Going from specific profile to the user default profile = prefs.get(restname) if not profile and not continuewithoutdefault: LOG.error('Default %s was not found.', profiletype) return False LOG.info('Switching volume %(vol)s to profile %(prof)s.', {'vol': scvolume['name'], 'prof': profile.get('name')}) payload = {} payload[restname] = self._get_id(profile) if profile else None r = self.client.put('StorageCenter/ScVolumeConfiguration/%s' % self._get_id(scvolume), payload, True) if self._check_result(r): return True LOG.error('Error changing %(ptype)s for volume ' '%(original)s to %(name)s', {'ptype': profiletype, 'original': scvolume['name'], 'name': profilename}) return False def update_storage_profile(self, scvolume, storage_profile): """Update a volume's Storage Profile. Changes the volume setting to use a different Storage Profile. If storage_profile is None, will reset to the default profile for the cinder user account. :param scvolume: The Storage Center volume to be updated. :param storage_profile: The requested Storage Profile name. :returns: True if successful, False otherwise. """ profile = self._find_storage_profile(storage_profile) return self._update_profile(scvolume, profile, storage_profile, 'Storage Profile', 'storageProfile', 'allowStorageProfileSelection') def update_datareduction_profile(self, scvolume, dr_profile): """Update a volume's Data Reduction Profile Changes the volume setting to use a different data reduction profile. If dr_profile is None, will reset to the default profile for the cinder user account. :param scvolume: The Storage Center volume to be updated. :param dr_profile: The requested data reduction profile name. :returns: True if successful, False otherwise. """ profile = self._find_data_reduction_profile(dr_profile) return self._update_profile(scvolume, profile, dr_profile, 'Data Reduction Profile', 'dataReductionProfile', 'allowDataReductionSelection') def update_qos_profile(self, scvolume, qosprofile, grouptype=False): """Update a volume's QOS profile Changes the volume setting to use a different QOS Profile. :param scvolume: The Storage Center volume to be updated. :param qosprofile: The requested QOS profile name. :param grouptype: Is this a group QOS profile? :returns: True if successful, False otherwise. """ profiletype = 'groupQosProfile' if grouptype else 'volumeQosProfile' profile = self._find_qos_profile(qosprofile, grouptype) return self._update_profile(scvolume, profile, qosprofile, 'Qos Profile', profiletype, 'allowQosProfileSelection', grouptype) def _get_user_preferences(self): """Gets the preferences and defaults for this user. There are a set of preferences and defaults for each user on the Storage Center. This retrieves all settings for the current account used by Cinder. """ r = self.client.get('StorageCenter/StorageCenter/%s/UserPreferences' % self.ssn) if self._check_result(r): return self._get_json(r) return {} def _delete_server(self, scserver): """Deletes scserver from the backend. Just give it a shot. If it fails it doesn't matter to cinder. This is generally used when a create_server call fails in the middle of creation. Cinder knows nothing of the servers objects on Dell backends so success or failure is purely an internal thing. Note that we do not delete a server object in normal operation. :param scserver: Dell server object to delete. :returns: Nothing. Only logs messages. """ LOG.debug('ScServer delete %s', self._get_id(scserver)) if scserver.get('deleteAllowed') is True: r = self.client.delete('StorageCenter/ScServer/%s' % self._get_id(scserver), async_call=True) if self._check_result(r): LOG.debug('ScServer deleted.') else: LOG.debug('_delete_server: deleteAllowed is False.') def find_replay_profile(self, name): """Finds the Dell SC replay profile object name. :param name: Name of the replay profile object. This is the consistency group id. :return: Dell SC replay profile or None. :raises VolumeBackendAPIException: """ self.cg_except_on_no_support() pf = self._get_payload_filter() pf.append('ScSerialNumber', self.ssn) pf.append('Name', name) r = self.client.post('StorageCenter/ScReplayProfile/GetList', pf.payload) if self._check_result(r): profilelist = self._get_json(r) if profilelist: if len(profilelist) > 1: LOG.error('Multiple replay profiles under name %s', name) raise exception.VolumeBackendAPIException( data=_('Multiple profiles found.')) return profilelist[0] return None def create_replay_profile(self, name): """Creates a replay profile on the Dell SC. :param name: The ID of the consistency group. This will be matched to the name on the Dell SC. :return: SC profile or None. """ self.cg_except_on_no_support() profile = self.find_replay_profile(name) if not profile: payload = {} payload['StorageCenter'] = self.ssn payload['Name'] = name payload['Type'] = 'Consistent' payload['Notes'] = self.notes r = self.client.post('StorageCenter/ScReplayProfile', payload, True) # 201 expected. if self._check_result(r): profile = self._first_result(r) return profile def delete_replay_profile(self, profile): """Delete the replay profile from the Dell SC. :param profile: SC replay profile. :return: Nothing. :raises VolumeBackendAPIException: """ self.cg_except_on_no_support() r = self.client.delete('StorageCenter/ScReplayProfile/%s' % self._get_id(profile), async_call=True) if self._check_result(r): LOG.info('Profile %s has been deleted.', profile.get('name')) else: # We failed due to a failure to delete an existing profile. # This is reason to raise an exception. LOG.error('Unable to delete profile %s.', profile.get('name')) raise exception.VolumeBackendAPIException( data=_('Error deleting replay profile.')) def _get_volume_configuration(self, scvolume): """Get the ScVolumeConfiguration object. :param scvolume: The Dell SC volume object. :return: The SCVolumeConfiguration object or None. """ r = self.client.get('StorageCenter/ScVolume/%s/VolumeConfiguration' % self._get_id(scvolume)) if self._check_result(r): return self._first_result(r) return None def _update_volume_profiles(self, scvolume, addid=None, removeid=None): """Either Adds or removes the listed profile from the SC volume. :param scvolume: Dell SC volume object. :param addid: Profile ID to be added to the SC volume configuration. :param removeid: ID to be removed to the SC volume configuration. :return: True/False on success/failure. """ if scvolume: scvolumecfg = self._get_volume_configuration(scvolume) if scvolumecfg: profilelist = scvolumecfg.get('replayProfileList', []) newprofilelist = [] # Do we have one to add? Start the list with it. if addid: newprofilelist = [addid] # Re-add our existing profiles. for profile in profilelist: profileid = self._get_id(profile) # Make sure it isn't one we want removed and that we # haven't already added it. (IE it isn't the addid.) if (profileid != removeid and newprofilelist.count(profileid) == 0): newprofilelist.append(profileid) # Update our volume configuration. payload = {} payload['ReplayProfileList'] = newprofilelist r = self.client.put('StorageCenter/ScVolumeConfiguration/%s' % self._get_id(scvolumecfg), payload, True) # check result LOG.debug('_update_volume_profiles %s : %s : %s', self._get_id(scvolume), profilelist, r) # Good return? if self._check_result(r): return True return False def _add_cg_volumes(self, profileid, add_volumes): """Trundles through add_volumes and adds the replay profile to them. :param profileid: The ID of the replay profile. :param add_volumes: List of Dell SC volume objects that are getting added to the consistency group. :return: True/False on success/failure. """ for vol in add_volumes: scvolume = self.find_volume(vol['id'], vol['provider_id']) if (self._update_volume_profiles(scvolume, addid=profileid, removeid=None)): LOG.info('Added %s to cg.', vol['id']) else: LOG.error('Failed to add %s to cg.', vol['id']) return False return True def _remove_cg_volumes(self, profileid, remove_volumes): """Removes the replay profile from the remove_volumes list of vols. :param profileid: The ID of the replay profile. :param remove_volumes: List of Dell SC volume objects that are getting removed from the consistency group. :return: True/False on success/failure. """ for vol in remove_volumes: scvolume = self.find_volume(vol['id'], vol['provider_id']) if (self._update_volume_profiles(scvolume, addid=None, removeid=profileid)): LOG.info('Removed %s from cg.', vol['id']) else: LOG.error('Failed to remove %s from cg.', vol['id']) return False return True def update_cg_volumes(self, profile, add_volumes=None, remove_volumes=None): """Adds or removes the profile from the specified volumes :param profile: Dell SC replay profile object. :param add_volumes: List of volumes we are adding to the consistency group. (Which is to say we are adding the profile to this list of volumes.) :param remove_volumes: List of volumes we are removing from the consistency group. (Which is to say we are removing the profile from this list of volumes.) :return: True/False on success/failure. """ self.cg_except_on_no_support() ret = True profileid = self._get_id(profile) if add_volumes: LOG.info('Adding volumes to cg %s.', profile['name']) ret = self._add_cg_volumes(profileid, add_volumes) if ret and remove_volumes: LOG.info('Removing volumes from cg %s.', profile['name']) ret = self._remove_cg_volumes(profileid, remove_volumes) return ret def _init_cg_volumes(self, profileid): """Gets the cg volume list and maps/unmaps the non active volumes. :param profileid: Replay profile identifier. :return: Nothing """ r = self.client.get('StorageCenter/ScReplayProfile/%s/VolumeList' % profileid) if self._check_result(r): vols = self._get_json(r) for vol in vols: if (vol.get('active') is not True or vol.get('replayAllowed') is not True): self._init_volume(vol) def snap_cg_replay(self, profile, replayid, expire): """Snaps a replay of a consistency group. :param profile: The name of the consistency group profile. :param replayid: The name of the replay. :param expire: Time in mintues before a replay expires. 0 means no expiration. :returns: Dell SC replay object. """ self.cg_except_on_no_support() if profile: # We have to make sure these are snappable. self._init_cg_volumes(self._get_id(profile)) # Succeed or fail we soldier on. payload = {} payload['description'] = replayid payload['expireTime'] = expire r = self.client.post('StorageCenter/ScReplayProfile/%s/' 'CreateReplay' % self._get_id(profile), payload, True) if self._check_result(r): LOG.info('CreateReplay success %s', replayid) return True return False def _find_sc_cg(self, profile, replayid): """Finds the sc consistency group that matches replayid :param profile: Dell profile object. :param replayid: Name to search for. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :return: Consistency group object or None. """ self.cg_except_on_no_support() r = self.client.get( 'StorageCenter/ScReplayProfile/%s/ConsistencyGroupList' % self._get_id(profile)) if self._check_result(r): cglist = self._get_json(r) if cglist and isinstance(cglist, list): for cg in cglist: desc = cg.get('description') if (len(desc) >= 30 and replayid.startswith(desc) is True): # We found our cg so return it. return cg return None def _find_cg_replays(self, profile, replayid): """Searches for the replays that match replayid for a given profile. replayid is stored in the replay's description attribute. :param profile: Dell profile object. :param replayid: Name to search for. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :returns: Dell replay object array. """ self.cg_except_on_no_support() replays = [] sccg = self._find_sc_cg(profile, replayid) if sccg: r = self.client.get( 'StorageCenter/ScReplayConsistencyGroup/%s/ReplayList' % self._get_id(sccg)) replays = self._get_json(r) else: LOG.error('Unable to locate snapshot %s', replayid) return replays def delete_cg_replay(self, profile, replayid): """Finds a Dell cg replay by replayid string and expires it. Once marked for expiration we do not return the replay as a snapshot even though it might still exist. (Backend requirements.) :param cg_name: Consistency Group name. This is the ReplayProfileName. :param replayid: Name to search for. This is a portion of the snapshot ID as we do not have space for the entire GUID in the replay description. :returns: Boolean for success or failure. """ self.cg_except_on_no_support() LOG.debug('Expiring consistency group replay %s', replayid) replays = self._find_cg_replays(profile, replayid) for replay in replays: instanceid = self._get_id(replay) LOG.debug('Expiring replay %s', instanceid) r = self.client.post('StorageCenter/ScReplay/%s/Expire' % instanceid, {}, True) if not self._check_result(r): return False # We either couldn't find it or expired it. return True def cg_except_on_no_support(self): if not self.consisgroups: msg = _('Dell API 2.1 or later required' ' for Consistency Group support') raise NotImplementedError(data=msg) @staticmethod def size_to_gb(spacestring): """Splits a SC size string into GB and a remainder. Space is returned in a string like ... 7.38197504E8 Bytes Need to split that apart and convert to GB. :param spacestring: SC size string. :return: Size in GB and remainder in byte. """ try: n = spacestring.split(' ', 1) fgb = int(float(n[0]) // 1073741824) frem = int(float(n[0]) % 1073741824) return fgb, frem except Exception: # We received an invalid size string. Blow up. raise exception.VolumeBackendAPIException( data=_('Error retrieving volume size')) def _import_one(self, scvolume, newname): # Find our folder folder = self._find_volume_folder(True) # If we actually have a place to put our volume create it if folder is None: LOG.warning('Unable to create folder %s', self.vfname) # Rename and move our volume. payload = {} payload['Name'] = newname if folder: payload['VolumeFolder'] = self._get_id(folder) r = self.client.put('StorageCenter/ScVolume/%s' % self._get_id(scvolume), payload, True) if self._check_result(r): return self._get_json(r) return None def manage_existing(self, newname, existing): """Finds the volume named existing and renames it. This checks a few things. The volume has to exist. There can only be one volume by that name. Since cinder manages volumes by the GB it has to be defined on a GB boundary. This renames existing to newname. newname is the guid from the cinder volume['id']. The volume is moved to the defined cinder volume folder. :param newname: Name to rename the volume to. :param existing: The existing volume dict.. :return: scvolume. :raises VolumeBackendAPIException, ManageExistingInvalidReference: """ vollist = self._get_volume_list(existing.get('source-name'), existing.get('source-id'), False) count = len(vollist) # If we found one volume with that name we can work with it. if count == 1: # First thing to check is if the size is something we can # work with. sz, rem = self.size_to_gb(vollist[0]['configuredSize']) if rem > 0: raise exception.VolumeBackendAPIException( data=_('Volume size must multiple of 1 GB.')) # We only want to grab detached volumes. mappings = self._find_mappings(vollist[0]) if len(mappings) > 0: msg = _('Volume is attached to a server. (%s)') % existing raise exception.VolumeBackendAPIException(data=msg) scvolume = self._import_one(vollist[0], newname) if scvolume: return scvolume msg = _('Unable to manage volume %s') % existing raise exception.VolumeBackendAPIException(data=msg) elif count > 1: raise exception.ManageExistingInvalidReference( existing_ref=existing, reason=_('Volume not unique.')) else: raise exception.ManageExistingInvalidReference( existing_ref=existing, reason=_('Volume not found.')) def get_unmanaged_volume_size(self, existing): """Looks up the volume named existing and returns its size string. :param existing: Existing volume dict. :return: The SC configuredSize string. :raises ManageExistingInvalidReference: """ vollist = self._get_volume_list(existing.get('source-name'), existing.get('source-id'), False) count = len(vollist) # If we found one volume with that name we can work with it. if count == 1: sz, rem = self.size_to_gb(vollist[0]['configuredSize']) if rem > 0: raise exception.VolumeBackendAPIException( data=_('Volume size must multiple of 1 GB.')) return sz elif count > 1: raise exception.ManageExistingInvalidReference( existing_ref=existing, reason=_('Volume not unique.')) else: raise exception.ManageExistingInvalidReference( existing_ref=existing, reason=_('Volume not found.')) def unmanage(self, scvolume): """Unmanage our volume. We simply rename with a prefix of `Unmanaged_` That's it. :param scvolume: The Dell SC volume object. :return: Nothing. :raises VolumeBackendAPIException: """ newname = 'Unmanaged_' + scvolume['name'] payload = {} payload['Name'] = newname r = self.client.put('StorageCenter/ScVolume/%s' % self._get_id(scvolume), payload, True) if self._check_result(r): LOG.info('Volume %s unmanaged.', scvolume['name']) else: msg = _('Unable to rename volume %(existing)s to %(newname)s') % { 'existing': scvolume['name'], 'newname': newname} raise exception.VolumeBackendAPIException(data=msg) def _find_qos(self, qosnode, ssn=-1): """Find Dell SC QOS Node entry for replication. :param qosnode: Name of qosnode. :param ssn: SSN to search on. :return: scqos node object. """ ssn = self._vet_ssn(ssn) pf = self._get_payload_filter() pf.append('scSerialNumber', ssn) pf.append('name', qosnode) r = self.client.post('StorageCenter/ScReplicationQosNode/GetList', pf.payload) if self._check_result(r): nodes = self._get_json(r) if len(nodes) > 0: return nodes[0] else: payload = {} payload['LinkSpeed'] = '1 Gbps' payload['Name'] = qosnode payload['StorageCenter'] = ssn payload['BandwidthLimited'] = False r = self.client.post('StorageCenter/ScReplicationQosNode', payload, True) if self._check_result(r): return self._get_json(r) LOG.error('Unable to find or create QoS Node named %s', qosnode) raise exception.VolumeBackendAPIException( data=_('Failed to find QoSnode')) def update_replicate_active_replay(self, scvolume, replactive): """Enables or disables replicating the active replay for given vol. :param scvolume: SC Volume object. :param replactive: True or False :return: True or False """ r = self.client.get('StorageCenter/ScVolume/%s/ReplicationSourceList' % self._get_id(scvolume)) if self._check_result(r): replications = self._get_json(r) for replication in replications: if replication['replicateActiveReplay'] != replactive: payload = {'ReplicateActiveReplay': replactive} r = self.client.put('StorageCenter/ScReplication/%s' % replication['instanceId'], payload, True) if not self._check_result(r): return False return True def get_screplication(self, scvolume, destssn): """Find the screplication object for the volume on the dest backend. :param scvolume: :param destssn: :return: """ LOG.debug('get_screplication') r = self.client.get('StorageCenter/ScVolume/%s/ReplicationSourceList' % self._get_id(scvolume)) if self._check_result(r): replications = self._get_json(r) for replication in replications: # So we need to find the replication we are looking for. LOG.debug(replication) LOG.debug('looking for %s', destssn) if replication.get('destinationScSerialNumber') == destssn: return replication # Unable to locate replication. LOG.warning('Unable to locate replication %(vol)s to %(ssn)s', {'vol': scvolume.get('name'), 'ssn': destssn}) return None def delete_replication(self, scvolume, destssn, deletedestvolume=True): """Deletes the SC replication object from scvolume to the destssn. :param scvolume: Dell SC Volume object. :param destssn: SC the replication is replicating to. :param deletedestvolume: Delete or keep dest volume. :return: True on success. False on fail. """ replication = self.get_screplication(scvolume, destssn) if replication: payload = {} payload['DeleteDestinationVolume'] = deletedestvolume payload['RecycleDestinationVolume'] = deletedestvolume payload['DeleteRestorePoint'] = True r = self.client.delete('StorageCenter/ScReplication/%s' % self._get_id(replication), payload=payload, async_call=True) if self._check_result(r): # check that we whacked the dest volume LOG.info('Replication %(vol)s to %(dest)s.', {'vol': scvolume.get('name'), 'dest': destssn}) return True LOG.error('Unable to delete replication for ' '%(vol)s to %(dest)s.', {'vol': scvolume.get('name'), 'dest': destssn}) return False def _repl_name(self, name): return self.repl_prefix + name def _get_disk_folder(self, ssn, foldername): diskfolder = None # If no folder name we just pass through this. if foldername: pf = self._get_payload_filter() pf.append('scSerialNumber', ssn) pf.append('name', foldername) r = self.client.post('StorageCenter/ScDiskFolder/GetList', pf.payload) if self._check_result(r): try: # Go for broke. diskfolder = self._get_json(r)[0] except Exception: # We just log this as an error and return nothing. LOG.error('Unable to find ' 'disk folder %(name)s on %(ssn)s', {'name': foldername, 'ssn': ssn}) return diskfolder def create_replication(self, scvolume, destssn, qosnode, synchronous, diskfolder, replicate_active): """Create repl from scvol to destssn. :param scvolume: Dell SC volume object. :param destssn: Destination SSN string. :param qosnode: Name of Dell SC QOS Node for this replication. :param synchronous: Boolean. :param diskfolder: optional disk folder name. :param replicate_active: replicate active replay. :return: Dell SC replication object. """ screpl = None ssn = self.find_sc(int(destssn)) payload = {} payload['DestinationStorageCenter'] = ssn payload['QosNode'] = self._get_id(self._find_qos(qosnode)) payload['SourceVolume'] = self._get_id(scvolume) payload['StorageCenter'] = self.find_sc() # Have to replicate the active replay. payload['ReplicateActiveReplay'] = replicate_active or synchronous if synchronous: payload['Type'] = 'Synchronous' # If our type is synchronous we prefer high availability be set. payload['SyncMode'] = 'HighAvailability' else: payload['Type'] = 'Asynchronous' destinationvolumeattributes = {} destinationvolumeattributes['CreateSourceVolumeFolderPath'] = True destinationvolumeattributes['Notes'] = self.notes destinationvolumeattributes['Name'] = self._repl_name(scvolume['name']) # Find our disk folder. If they haven't specified one this will just # drop through. If they have specified one and it can't be found the # error will be logged but this will keep going. df = self._get_disk_folder(destssn, diskfolder) if df: destinationvolumeattributes['DiskFolder'] = self._get_id(df) payload['DestinationVolumeAttributes'] = destinationvolumeattributes r = self.client.post('StorageCenter/ScReplication', payload, True) # 201 expected. if self._check_result(r): LOG.info('Replication created for %(volname)s to %(destsc)s', {'volname': scvolume.get('name'), 'destsc': destssn}) screpl = self._get_json(r) # Check we did something. if not screpl: # Failed to launch. Inform user. Throw. LOG.error('Unable to replicate %(volname)s to %(destsc)s', {'volname': scvolume.get('name'), 'destsc': destssn}) return screpl def find_repl_volume(self, name, destssn, instance_id=None, source=False, destination=True): """Find our replay destination volume on the destssn. :param name: Name to search for. :param destssn: Where to look for the volume. :param instance_id: If we know our exact volume ID use that. :param source: Replication source boolen. :param destination: Replication destination boolean. :return: SC Volume object or None """ # Do a normal volume search. pf = self._get_payload_filter() pf.append('scSerialNumber', destssn) # Are we looking for a replication destination? pf.append('ReplicationDestination', destination) # Are we looking for a replication source? pf.append('ReplicationSource', source) # There is a chance we know the exact volume. If so then use that. if instance_id: pf.append('instanceId', instance_id) else: # Try the name. pf.append('Name', name) r = self.client.post('StorageCenter/ScVolume/GetList', pf.payload) if self._check_result(r): volumes = self._get_json(r) if len(volumes) == 1: return volumes[0] return None def remove_mappings(self, scvol): """Peels all the mappings off of scvol. :param scvol: Storage Center volume object. :return: True/False on Success/Failure. """ if scvol: r = self.client.post('StorageCenter/ScVolume/%s/Unmap' % self._get_id(scvol), {}, True) return self._check_result(r) return False def break_replication(self, volumename, instance_id, destssn): """This just breaks the replication. If we find the source we just delete the replication. If the source is down then we find the destination and unmap it. Fail pretty much every time this goes south. :param volumename: Volume name is the guid from the cinder volume. :param instance_id: Storage Center volume object instance id. :param destssn: Destination ssn. :return: Replication SC volume object. """ replinstanceid = None scvolume = self.find_volume(volumename, instance_id) if scvolume: screplication = self.get_screplication(scvolume, destssn) # if we got our replication volume we can do this nicely. if screplication: replinstanceid = ( screplication['destinationVolume']['instanceId']) screplvol = self.find_repl_volume(self._repl_name(volumename), destssn, replinstanceid) # delete_replication fails to delete replication without also # stuffing it into the recycle bin. # Instead we try to unmap the destination volume which will break # the replication but leave the replication object on the SC. if self.remove_mappings(screplvol): # Try to kill mappings on the source. # We don't care that this succeeded or failed. Just move on. self.remove_mappings(scvolume) return screplvol def _get_replay_list(self, scvolume): r = self.client.get('StorageCenter/ScVolume/%s/ReplayList' % self._get_id(scvolume)) if self._check_result(r): return self._get_json(r) return [] def find_common_replay(self, svolume, dvolume): """Finds the common replay between two volumes. This assumes that one volume was replicated from the other. This should return the most recent replay. :param svolume: Source SC Volume. :param dvolume: Destination SC Volume. :return: Common replay or None. """ if svolume and dvolume: sreplays = self._get_replay_list(svolume) dreplays = self._get_replay_list(dvolume) for dreplay in dreplays: for sreplay in sreplays: if dreplay['globalIndex'] == sreplay['globalIndex']: return dreplay return None def start_replication(self, svolume, dvolume, replicationtype, qosnode, activereplay): """Starts a replication between volumes. Requires the dvolume to be in an appropriate state to start this. :param svolume: Source SC Volume. :param dvolume: Destiation SC Volume :param replicationtype: Asynchronous or synchronous. :param qosnode: QOS node name. :param activereplay: Boolean to replicate the active replay or not. :return: ScReplication object or None. """ if svolume and dvolume: qos = self._find_qos(qosnode, svolume['scSerialNumber']) if qos: payload = {} payload['QosNode'] = self._get_id(qos) payload['SourceVolume'] = self._get_id(svolume) payload['StorageCenter'] = svolume['scSerialNumber'] # Have to replicate the active replay. payload['ReplicateActiveReplay'] = activereplay payload['Type'] = replicationtype payload['DestinationVolume'] = self._get_id(dvolume) payload['DestinationStorageCenter'] = dvolume['scSerialNumber'] r = self.client.post('StorageCenter/ScReplication', payload, True) # 201 expected. if self._check_result(r): LOG.info('Replication created for ' '%(src)s to %(dest)s', {'src': svolume.get('name'), 'dest': dvolume.get('name')}) screpl = self._get_json(r) return screpl return None def replicate_to_common(self, svolume, dvolume, qosnode): """Reverses a replication between two volumes. :param fovolume: Failed over volume. (Current) :param ovolume: Original source volume. :param qosnode: QOS node name to use to create the replay. :return: ScReplication object or None. """ # find our common replay. creplay = self.find_common_replay(svolume, dvolume) # if we found one. if creplay: # create a view volume from the common replay. payload = {} # funky name. payload['Name'] = 'fback:' + dvolume['name'] payload['Notes'] = self.notes payload['VolumeFolder'] = self._get_id(dvolume['volumeFolder']) r = self.client.post('StorageCenter/ScReplay/%s/CreateView' % self._get_id(creplay), payload, True) if self._check_result(r): vvolume = self._get_json(r) if vvolume: # snap a replay and start replicating. if self.create_replay(svolume, 'failback', 600): return self.start_replication(svolume, vvolume, 'Asynchronous', qosnode, False) # No joy. Error the volume. return None def flip_replication(self, svolume, dvolume, name, replicationtype, qosnode, activereplay): """Enables replication from current destination volume to source. :param svolume: Current source. New destination. :param dvolume: Current destination. New source. :param name: Volume name. :param replicationtype: Sync or async :param qosnode: qos node for the new source ssn. :param activereplay: replicate the active replay. :return: True/False. """ # We are flipping a replication. That means there was a replication to # start with. Delete that. if self.delete_replication(svolume, dvolume['scSerialNumber'], False): # Kick off a replication going the other way. if self.start_replication(dvolume, svolume, replicationtype, qosnode, activereplay) is not None: # rename if (self.rename_volume(svolume, self._repl_name(name)) and self.rename_volume(dvolume, name)): return True LOG.warning('flip_replication: Unable to replicate ' '%(name)s from %(src)s to %(dst)s', {'name': name, 'src': dvolume['scSerialNumber'], 'dst': svolume['scSerialNumber']}) return False def replication_progress(self, screplid): """Get's the current progress of the replication. :param screplid: instanceId of the ScReplication object. :return: Boolean for synced, float of remaining bytes. (Or None, None.) """ if screplid: r = self.client.get( 'StorageCenter/ScReplication/%s/CurrentProgress' % screplid) if self._check_result(r): progress = self._get_json(r) try: remaining = float( progress['amountRemaining'].split(' ', 1)[0]) return progress['synced'], remaining except Exception: LOG.warning('replication_progress: Invalid replication' ' progress information returned: %s', progress) return None, None def is_swapped(self, provider_id, sclivevolume): if (sclivevolume.get('primaryVolume') and sclivevolume['primaryVolume']['instanceId'] != provider_id): LOG.debug('Volume %(pid)r in Live Volume %(lv)r is swapped.', {'pid': provider_id, 'lv': sclivevolume}) return True return False def is_failed_over(self, provider_id, sclivevolume): # either the secondary is active or the secondary is now our primary. if (sclivevolume.get('secondaryRole') == 'Activated' or self.is_swapped(provider_id, sclivevolume)): return True return False def _sc_live_volumes(self, ssn): if ssn: r = self.client.get('StorageCenter/StorageCenter/%s/LiveVolumeList' % ssn) if self._check_result(r): return self._get_json(r) return [] def _get_live_volumes(self): # Work around for a FW bug. Instead of grabbing the entire list at # once we have to Trundle through each SC's list. lvs = [] pf = self._get_payload_filter() pf.append('connected', True) r = self.client.post('StorageCenter/StorageCenter/GetList', pf.payload) if self._check_result(r): # Should return [] if nothing there. # Just in case do the or. scs = self._get_json(r) or [] for sc in scs: lvs += self._sc_live_volumes(self._get_id(sc)) return lvs def get_live_volume(self, primaryid, name=None): """Get's the live ScLiveVolume object for the vol with primaryid. :param primaryid: InstanceId of the primary volume. :param name: Volume name associated with this live volume. :return: ScLiveVolume object or None """ sclivevol = None if primaryid: # Try from our primary SSN. This would be the authoritay on the # Live Volume in question. lvs = self._sc_live_volumes(primaryid.split('.')[0]) # No, grab them all and see if we are on the secondary. if not lvs: lvs = self._get_live_volumes() if lvs: # Look for our primaryid. for lv in lvs: if ((lv.get('primaryVolume') and lv['primaryVolume']['instanceId'] == primaryid) or (lv.get('secondaryVolume') and lv['secondaryVolume']['instanceId'] == primaryid)): sclivevol = lv break # We might not be able to find the LV via the primaryid. # So look for LVs that match our name. if name and sclivevol is None: # If we have a primaryVolume we will have an # instanceName. Otherwise check the secondaryVolume # if it exists. if (name in lv['instanceName'] or (lv.get('secondaryVolume') and name in lv['secondaryVolume']['instanceName'])): sclivevol = lv LOG.debug('get_live_volume: %r', sclivevol) return sclivevol def _get_hbas(self, serverid): # Helper to get the hba's of a given server. r = self.client.get('StorageCenter/ScServer/%s/HbaList' % serverid) if self._check_result(r): return self._get_json(r) return None def map_secondary_volume(self, sclivevol, scdestsrv): """Map's the secondary volume or a LiveVolume to destsrv. :param sclivevol: ScLiveVolume object. :param scdestsrv: ScServer object for the destination. :return: ScMappingProfile object or None on failure. """ payload = {} payload['Server'] = self._get_id(scdestsrv) payload['Advanced'] = {'MapToDownServerHbas': True} r = self.client.post('StorageCenter/ScLiveVolume/%s/MapSecondaryVolume' % self._get_id(sclivevol), payload, True) if self._check_result(r): return self._get_json(r) return None def create_live_volume(self, scvolume, remotessn, active=False, sync=False, autofailover=False, primaryqos='CinderQOS', secondaryqos='CinderQOS'): """This create's a live volume instead of a replication. Servers are not created at this point so we cannot map up a remote server immediately. :param scvolume: Source SC Volume :param remotessn: Destination SSN. :param active: Replicate the active replay boolean. :param sync: Sync replication boolean. :param autofailover: enable autofailover and failback boolean. :param primaryqos: QOS node name for the primary side. :param secondaryqos: QOS node name for the remote side. :return: ScLiveVolume object or None on failure. """ destssn = self.find_sc(int(remotessn)) pscqos = self._find_qos(primaryqos) sscqos = self._find_qos(secondaryqos, destssn) if not destssn: LOG.error('create_live_volume: Unable to find remote %s', remotessn) elif not pscqos: LOG.error('create_live_volume: Unable to find or create ' 'qos node %s', primaryqos) elif not sscqos: LOG.error('create_live_volume: Unable to find or create remote' ' qos node %(qos)s on %(ssn)s', {'qos': secondaryqos, 'ssn': destssn}) else: payload = {} payload['PrimaryVolume'] = self._get_id(scvolume) payload['PrimaryQosNode'] = self._get_id(pscqos) payload['SecondaryQosNode'] = self._get_id(sscqos) payload['SecondaryStorageCenter'] = destssn payload['StorageCenter'] = self.ssn # payload['Dedup'] = False payload['FailoverAutomaticallyEnabled'] = autofailover payload['RestoreAutomaticallyEnabled'] = autofailover payload['SwapRolesAutomaticallyEnabled'] = False payload['ReplicateActiveReplay'] = (active or autofailover) if sync or autofailover: payload['Type'] = 'Synchronous' payload['SyncMode'] = 'HighAvailability' else: payload['Type'] = 'Asynchronous' secondaryvolumeattributes = {} secondaryvolumeattributes['CreateSourceVolumeFolderPath'] = True secondaryvolumeattributes['Notes'] = self.notes secondaryvolumeattributes['Name'] = scvolume['name'] payload[ 'SecondaryVolumeAttributes'] = secondaryvolumeattributes r = self.client.post('StorageCenter/ScLiveVolume', payload, True) if self._check_result(r): LOG.info('create_live_volume: Live Volume created from ' '%(svol)s to %(ssn)s', {'svol': self._get_id(scvolume), 'ssn': remotessn}) return self._get_json(r) LOG.error('create_live_volume: Failed to create Live Volume from ' '%(svol)s to %(ssn)s', {'svol': self._get_id(scvolume), 'ssn': remotessn}) return None def delete_live_volume(self, sclivevolume, deletesecondaryvolume): """Deletes the live volume. :param sclivevolume: ScLiveVolume object to be whacked. :return: Boolean on success/fail. """ payload = {} payload['ConvertToReplication'] = False payload['DeleteSecondaryVolume'] = deletesecondaryvolume payload['RecycleSecondaryVolume'] = deletesecondaryvolume payload['DeleteRestorePoint'] = deletesecondaryvolume r = self.client.delete('StorageCenter/ScLiveVolume/%s' % self._get_id(sclivevolume), payload, True) if self._check_result(r): return True return False def swap_roles_live_volume(self, sclivevolume): """Swap live volume roles. :param sclivevolume: Dell SC live volume object. :return: True/False on success/failure. """ r = self.client.post('StorageCenter/ScLiveVolume/%s/SwapRoles' % self._get_id(sclivevolume), {}, True) if self._check_result(r): return True return False def _find_qos_profile(self, qosprofile, grouptype=False): if qosprofile: pf = self._get_payload_filter() pf.append('ScSerialNumber', self.ssn) pf.append('Name', qosprofile) if grouptype: pf.append('profileType', 'GroupQosProfile') else: pf.append('profileType', 'VolumeQosProfile') r = self.client.post('StorageCenter/ScQosProfile/GetList', pf.payload) if self._check_result(r): qosprofiles = self._get_json(r) if len(qosprofiles): return qosprofiles[0] return None def _find_data_reduction_profile(self, drprofile): if drprofile: pf = self._get_payload_filter() pf.append('ScSerialNumber', self.ssn) pf.append('type', drprofile) r = self.client.post( 'StorageCenter/ScDataReductionProfile/GetList', pf.payload) if self._check_result(r): drps = self._get_json(r) if len(drps): return drps[0] return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/sc/storagecenter_common.py0000664000175000017500000027134700000000000026276 0ustar00zuulzuul00000000000000# Copyright (c) 2015-2017 Dell Inc, or its subsidiaries. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from oslo_utils import excutils from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.dell_emc.sc import storagecenter_api from cinder.volume.drivers.san.san import san_opts from cinder.volume import volume_types from cinder.volume import volume_utils common_opts = [ cfg.IntOpt('dell_sc_ssn', default=64702, help='Storage Center System Serial Number'), cfg.PortOpt('dell_sc_api_port', default=3033, help='Dell API port'), cfg.StrOpt('dell_sc_server_folder', default='openstack', help='Name of the server folder to use on the Storage Center'), cfg.StrOpt('dell_sc_volume_folder', default='openstack', help='Name of the volume folder to use on the Storage Center'), cfg.BoolOpt('dell_sc_verify_cert', default=False, help='Enable HTTPS SC certificate verification'), cfg.StrOpt('secondary_san_ip', default='', help='IP address of secondary DSM controller'), cfg.StrOpt('secondary_san_login', default='Admin', help='Secondary DSM user name'), cfg.StrOpt('secondary_san_password', default='', help='Secondary DSM user password name', secret=True), cfg.PortOpt('secondary_sc_api_port', default=3033, help='Secondary Dell API port'), cfg.IntOpt('dell_api_async_rest_timeout', default=15, help='Dell SC API async call default timeout in seconds.'), cfg.IntOpt('dell_api_sync_rest_timeout', default=30, help='Dell SC API sync call default timeout in seconds.'), cfg.MultiOpt('excluded_domain_ip', item_type=types.IPAddress(), default=None, deprecated_for_removal=True, deprecated_reason="Replaced by excluded_domain_ips option", deprecated_since="Stein", help='DEPRECATED: Fault Domain IP to be excluded from ' 'iSCSI returns.'), cfg.ListOpt('excluded_domain_ips', item_type=types.IPAddress(), default=[], help='Comma separated Fault Domain IPs to be excluded ' 'from iSCSI returns.'), cfg.ListOpt('included_domain_ips', item_type=types.IPAddress(), default=[], help='Comma separated Fault Domain IPs to be included ' 'from iSCSI returns.'), cfg.StrOpt('dell_server_os', default='Red Hat Linux 6.x', help='Server OS type to use when creating a new server on the ' 'Storage Center.') ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) class SCCommonDriver(driver.ManageableVD, driver.ManageableSnapshotsVD, driver.BaseVD): def __init__(self, *args, **kwargs): super(SCCommonDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(common_opts) self.configuration.append_config_values(san_opts) self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'Dell' self.backends = self.configuration.safe_get('replication_device') self.replication_enabled = True if self.backends else False self.is_direct_connect = False self.active_backend_id = kwargs.get('active_backend_id', None) self.failed_over = True if self.active_backend_id else False LOG.info('Loading %(name)s: Failover state is %(state)r', {'name': self.backend_name, 'state': self.failed_over}) self.storage_protocol = constants.ISCSI self.failback_timeout = 60 @staticmethod def get_driver_options(): return common_opts def _bytes_to_gb(self, spacestring): """Space is returned in a string like ... 7.38197504E8 Bytes Need to split that apart and convert to GB. :returns: gbs in int form """ try: n = spacestring.split(' ', 1) fgbs = float(n[0]) / 1073741824.0 igbs = int(fgbs) return igbs except Exception: # If any of that blew up it isn't in the format we # thought so eat our error and return None return None def do_setup(self, context): """One time driver setup. Called once by the manager after the driver is loaded. Sets up clients, check licenses, sets up protocol specific helpers. """ self._client = storagecenter_api.SCApiHelper( self.configuration, self.active_backend_id, self.storage_protocol) def check_for_setup_error(self): """Validates the configuration information.""" with self._client.open_connection() as api: api.find_sc() self.is_direct_connect = api.is_direct_connect if self.is_direct_connect and self.replication_enabled: msg = _('Dell Cinder driver configuration error replication ' 'not supported with direct connect.') raise exception.InvalidHost(reason=msg) # If we are a healthy replicated system make sure our backend # is alive. if self.replication_enabled and not self.failed_over: # Check that our replication destinations are available. for backend in self.backends: replssn = backend['target_device_id'] try: # Just do a find_sc on it. If it raises we catch # that and raise with a correct exception. api.find_sc(int(replssn)) except exception.VolumeBackendAPIException: msg = _('Dell Cinder driver configuration error ' 'replication_device %s not found') % replssn raise exception.InvalidHost(reason=msg) def _get_volume_extra_specs(self, obj): """Gets extra specs for the given object.""" type_id = obj.get('volume_type_id') if type_id: return volume_types.get_volume_type_extra_specs(type_id) return {} def _add_volume_to_group(self, api, scvolume, volume): """Just a helper to add a volume to a group. :param api: Dell SC API opbject. :param scvolume: Dell SC Volume object. :param volume: Cinder Volume object. :returns: Nothing. """ if scvolume and volume.get('group_id'): profile = api.find_replay_profile( volume.get('group_id')) # If there is a profile then we need to add our # volumes to it. If there isn't then it was a normal # group. if profile: api.update_cg_volumes(profile, [volume]) def _get_replication_specs(self, specs): """Checks if we can do replication. Need the extra spec set and we have to be talking to EM. :param specs: Cinder Volume or snapshot extra specs. :return: rinfo dict. """ rinfo = {'enabled': False, 'sync': False, 'live': False, 'active': False, 'autofailover': False} # Repl does not work with direct connect. if not self.is_direct_connect: if (not self.failed_over and specs.get('replication_enabled') == ' True'): rinfo['enabled'] = True if specs.get('replication_type') == ' sync': rinfo['sync'] = True if specs.get('replication:livevolume') == ' True': rinfo['live'] = True if specs.get('replication:livevolume:autofailover') == ' True': rinfo['autofailover'] = True if specs.get('replication:activereplay') == ' True': rinfo['active'] = True # Some quick checks. if rinfo['enabled']: replication_target_count = len(self.backends) msg = None if replication_target_count == 0: msg = _( 'Replication setup failure: replication has been ' 'enabled but no replication target has been specified ' 'for this backend.') if rinfo['live'] and replication_target_count != 1: msg = _('Replication setup failure: replication:livevolume' ' has been enabled but more than one replication ' 'target has been specified for this backend.') if msg: LOG.debug(msg) raise exception.ReplicationError(message=msg) # Got this far. Life is good. Return our data. return rinfo def _is_live_vol(self, obj): rspecs = self._get_replication_specs(self._get_volume_extra_specs(obj)) return rspecs['enabled'] and rspecs['live'] def _create_replications(self, api, volume, scvolume, extra_specs=None): """Creates any appropriate replications for a given volume. :param api: Dell REST API object. :param volume: Cinder volume object. :param scvolume: Dell Storage Center Volume object. :param extra_specs: Extra specs if we have them otherwise gets them from the volume. :return: model_update """ # Replication V2 # for now we assume we have an array named backends. replication_driver_data = None # Replicate if we are supposed to. if not extra_specs: extra_specs = self._get_volume_extra_specs(volume) rspecs = self._get_replication_specs(extra_specs) if rspecs['enabled']: for backend in self.backends: targetdeviceid = backend['target_device_id'] primaryqos = backend.get('qosnode', 'cinderqos') secondaryqos = backend.get('remoteqos', 'cinderqos') diskfolder = backend.get('diskfolder', None) obj = None if rspecs['live']: # We are rolling with a live volume. obj = api.create_live_volume(scvolume, targetdeviceid, rspecs['active'], rspecs['sync'], rspecs['autofailover'], primaryqos, secondaryqos) else: # Else a regular replication. obj = api.create_replication(scvolume, targetdeviceid, primaryqos, rspecs['sync'], diskfolder, rspecs['active']) # This is either a ScReplication object or a ScLiveVolume # object. So long as it isn't None we are fine. if not obj: # Create replication will have printed a better error. msg = _('Replication %(name)s to %(ssn)s failed.') % { 'name': volume['id'], 'ssn': targetdeviceid} raise exception.VolumeBackendAPIException(data=msg) if not replication_driver_data: replication_driver_data = backend['target_device_id'] else: replication_driver_data += ',' replication_driver_data += backend['target_device_id'] # If we did something return model update. model_update = {} if replication_driver_data: model_update = { 'replication_status': fields.ReplicationStatus.ENABLED, 'replication_driver_data': replication_driver_data} return model_update @staticmethod def _cleanup_failed_create_volume(api, volumename): try: api.delete_volume(volumename) except exception.VolumeBackendAPIException as ex: LOG.info('Non fatal cleanup error: %s.', ex.msg) def create_volume(self, volume): """Create a volume.""" model_update = {} # We use id as our name as it is unique. volume_name = volume.get('id') # Look for our volume volume_size = volume.get('size') LOG.debug('Creating volume %(name)s of size %(size)s', {'name': volume_name, 'size': volume_size}) scvolume = None with self._client.open_connection() as api: try: # Get our extra specs. specs = self._get_volume_extra_specs(volume) scvolume = api.create_volume( volume_name, volume_size, specs.get('storagetype:storageprofile'), specs.get('storagetype:replayprofiles'), specs.get('storagetype:volumeqos'), specs.get('storagetype:groupqos'), specs.get('storagetype:datareductionprofile')) if scvolume is None: raise exception.VolumeBackendAPIException( message=_('Unable to create volume %s') % volume_name) # Update Group self._add_volume_to_group(api, scvolume, volume) # Create replications. (Or not. It checks.) model_update = self._create_replications(api, volume, scvolume) # Save our provider_id. model_update['provider_id'] = scvolume['instanceId'] except Exception: # if we actually created a volume but failed elsewhere # clean up the volume now. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): LOG.error('Failed to create volume %s', volume_name) if scvolume is None: raise exception.VolumeBackendAPIException( data=_('Unable to create volume. Backend down.')) return model_update def _split_driver_data(self, replication_driver_data): """Splits the replication_driver_data into an array of ssn strings. :param replication_driver_data: A string of comma separated SSNs. :returns: SSNs in an array of strings. """ ssnstrings = [] # We have any replication_driver_data. if replication_driver_data: # Split the array and wiffle through the entries. for str in replication_driver_data.split(','): # Strip any junk from the string. ssnstring = str.strip() # Anything left? if ssnstring: # Add it to our array. ssnstrings.append(ssnstring) return ssnstrings def _delete_live_volume(self, api, volume): """Delete live volume associated with volume. :param api: Dell REST API object. :param volume: Cinder Volume object :return: True if we actually deleted something. False for everything else. """ # Live Volume was added after provider_id support. So just assume it is # there. replication_driver_data = volume.get('replication_driver_data') # Do we have any replication driver data? if replication_driver_data: # Valid replication data? ssnstrings = self._split_driver_data(replication_driver_data) if ssnstrings: ssn = int(ssnstrings[0]) sclivevolume = api.get_live_volume(volume.get('provider_id'), volume.get('id')) # Have we found the live volume? if (sclivevolume and sclivevolume.get('secondaryScSerialNumber') == ssn and api.delete_live_volume(sclivevolume, True)): LOG.info('%(vname)s\'s replication live volume has ' 'been deleted from storage Center %(sc)s,', {'vname': volume.get('id'), 'sc': ssn}) return True # If we are here either we do not have a live volume, we do not have # one on our configured SC or we were not able to delete it. # Either way, warn and leave. LOG.warning('Unable to delete %s live volume.', volume.get('id')) return False def _delete_replications(self, api, volume): """Delete replications associated with a given volume. We should be able to roll through the replication_driver_data list of SSNs and delete replication objects between them and the source volume. :param api: Dell REST API object. :param volume: Cinder Volume object :return: None """ replication_driver_data = volume.get('replication_driver_data') if replication_driver_data: ssnstrings = self._split_driver_data(replication_driver_data) volume_name = volume.get('id') provider_id = volume.get('provider_id') scvol = api.find_volume(volume_name, provider_id) # This is just a string of ssns separated by commas. # Trundle through these and delete them all. for ssnstring in ssnstrings: ssn = int(ssnstring) # Are we a replication or a live volume? if not api.delete_replication(scvol, ssn): LOG.warning('Unable to delete replication of Volume ' '%(vname)s to Storage Center %(sc)s.', {'vname': volume_name, 'sc': ssnstring}) # If none of that worked or there was nothing to do doesn't matter. # Just move on. def delete_volume(self, volume): deleted = False # We use id as our name as it is unique. volume_name = volume.get('id') provider_id = volume.get('provider_id') # Unless we are migrating. if volume.get('migration_status') == 'deleting': volume_name = volume.get('_name_id') provider_id = None LOG.debug('Deleting volume %s', volume_name) with self._client.open_connection() as api: try: rspecs = self._get_replication_specs( self._get_volume_extra_specs(volume)) if rspecs['enabled']: if rspecs['live']: self._delete_live_volume(api, volume) else: self._delete_replications(api, volume) deleted = api.delete_volume(volume_name, provider_id) except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to delete volume %s', volume_name) # if there was an error we will have raised an # exception. If it failed to delete it is because # the conditions to delete a volume were not met. if deleted is False: raise exception.VolumeIsBusy(volume_name=volume_name) def create_snapshot(self, snapshot): """Create snapshot""" # our volume name is the volume id volume_name = snapshot.get('volume_id') provider_id = snapshot.volume.get('provider_id') snapshot_id = snapshot.get('id') LOG.debug('Creating snapshot %(snap)s on volume %(vol)s', {'snap': snapshot_id, 'vol': volume_name}) with self._client.open_connection() as api: scvolume = api.find_volume(volume_name, provider_id, self._is_live_vol(snapshot)) if scvolume is not None: replay = api.create_replay(scvolume, snapshot_id, 0) if replay: return {'status': fields.SnapshotStatus.AVAILABLE, 'provider_id': scvolume['instanceId']} else: LOG.warning('Unable to locate volume:%s', volume_name) snapshot['status'] = fields.SnapshotStatus.ERROR msg = _('Failed to create snapshot %s') % snapshot_id raise exception.VolumeBackendAPIException(data=msg) def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other volume's snapshot on appliance.""" model_update = {} scvolume = None volume_name = volume.get('id') src_provider_id = snapshot.get('provider_id') src_volume_name = snapshot.get('volume_id') # This snapshot could have been created on its own or as part of a # cgsnapshot. If it was a cgsnapshot it will be identified on the Dell # backend under group_snapshot_id. Given the volume ID and the # group_snapshot_id we can find the appropriate snapshot. # So first we look for group_snapshot_id. If that is blank then it # must have been a normal snapshot which will be found under # snapshot_id. snapshot_id = snapshot.get('group_snapshot_id') if not snapshot_id: snapshot_id = snapshot.get('id') LOG.debug( 'Creating new volume %(vol)s from snapshot %(snap)s ' 'from vol %(src)s', {'vol': volume_name, 'snap': snapshot_id, 'src': src_volume_name}) with self._client.open_connection() as api: try: srcvol = api.find_volume(src_volume_name, src_provider_id) if srcvol is not None: replay = api.find_replay(srcvol, snapshot_id) if replay is not None: # See if we have any extra specs. specs = self._get_volume_extra_specs(volume) scvolume = api.create_view_volume( volume_name, replay, specs.get('storagetype:replayprofiles'), specs.get('storagetype:volumeqos'), specs.get('storagetype:groupqos'), specs.get('storagetype:datareductionprofile')) # Extend Volume if scvolume and (volume['size'] > snapshot["volume_size"]): LOG.debug('Resize the new volume to %s.', volume['size']) scvolume = api.expand_volume(scvolume, volume['size']) if scvolume is None: raise exception.VolumeBackendAPIException( message=_('Unable to create volume ' '%(name)s from %(snap)s.') % {'name': volume_name, 'snap': snapshot_id}) # Update Group self._add_volume_to_group(api, scvolume, volume) # Replicate if we are supposed to. model_update = self._create_replications(api, volume, scvolume) # Save our instanceid. model_update['provider_id'] = ( scvolume['instanceId']) except Exception: # Clean up after ourselves. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): LOG.error('Failed to create volume %s', volume_name) if scvolume is not None: LOG.debug('Volume %(vol)s created from %(snap)s', {'vol': volume_name, 'snap': snapshot_id}) else: msg = _('Failed to create volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) return model_update def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" model_update = {} scvolume = None src_volume_name = src_vref.get('id') src_provider_id = src_vref.get('provider_id') volume_name = volume.get('id') LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s', {'clone': volume_name, 'vol': src_volume_name}) with self._client.open_connection() as api: try: srcvol = api.find_volume(src_volume_name, src_provider_id) if srcvol is not None: # Get our specs. specs = self._get_volume_extra_specs(volume) # Create our volume scvolume = api.create_cloned_volume( volume_name, srcvol, specs.get('storagetype:storageprofile'), specs.get('storagetype:replayprofiles'), specs.get('storagetype:volumeqos'), specs.get('storagetype:groupqos'), specs.get('storagetype:datareductionprofile')) # Extend Volume if scvolume and volume['size'] > src_vref['size']: LOG.debug('Resize the volume to %s.', volume['size']) scvolume = api.expand_volume(scvolume, volume['size']) # If either of those didn't work we bail. if scvolume is None: raise exception.VolumeBackendAPIException( message=_('Unable to create volume ' '%(name)s from %(vol)s.') % {'name': volume_name, 'vol': src_volume_name}) # Update Group self._add_volume_to_group(api, scvolume, volume) # Replicate if we are supposed to. model_update = self._create_replications(api, volume, scvolume) # Save our provider_id. model_update['provider_id'] = scvolume['instanceId'] except Exception: # Clean up after ourselves. self._cleanup_failed_create_volume(api, volume_name) with excutils.save_and_reraise_exception(): LOG.error('Failed to create volume %s', volume_name) if scvolume is not None: LOG.debug('Volume %(vol)s cloned from %(src)s', {'vol': volume_name, 'src': src_volume_name}) else: msg = _('Failed to create volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) return model_update def delete_snapshot(self, snapshot): """delete_snapshot""" volume_name = snapshot.get('volume_id') snapshot_id = snapshot.get('id') provider_id = snapshot.get('provider_id') LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s', {'snap': snapshot_id, 'vol': volume_name}) with self._client.open_connection() as api: scvolume = api.find_volume(volume_name, provider_id) if scvolume and api.delete_replay(scvolume, snapshot_id): return # if we are here things went poorly. snapshot['status'] = fields.SnapshotStatus.ERROR_DELETING msg = _('Failed to delete snapshot %s') % snapshot_id raise exception.VolumeBackendAPIException(data=msg) def create_export(self, context, volume, connector): """Create an export of a volume. The volume exists on creation and will be visible on initialize connection. So nothing to do here. """ pass def ensure_export(self, context, volume): """Ensure an export of a volume. Per the sc driver we just make sure that the volume actually exists where we think it does. """ scvolume = None volume_name = volume.get('id') provider_id = volume.get('provider_id') LOG.debug('Checking existence of volume %s', volume_name) with self._client.open_connection() as api: try: scvolume = api.find_volume(volume_name, provider_id, self._is_live_vol(volume)) except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to ensure export of volume %s', volume_name) if scvolume is None: msg = _('Unable to find volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) def remove_export(self, context, volume): """Remove an export of a volume. We do nothing here to match the nothing we do in create export. Again we do everything in initialize and terminate connection. """ pass def extend_volume(self, volume, new_size): """Extend the size of the volume.""" volume_name = volume.get('id') provider_id = volume.get('provider_id') LOG.debug('Extending volume %(vol)s to %(size)s', {'vol': volume_name, 'size': new_size}) if volume is not None: with self._client.open_connection() as api: scvolume = api.find_volume(volume_name, provider_id) if api.expand_volume(scvolume, new_size) is not None: return # If we are here nothing good happened. msg = _('Unable to extend volume %s') % volume_name raise exception.VolumeBackendAPIException(data=msg) def _update_volume_stats(self): """Retrieve stats info from volume group.""" with self._client.open_connection() as api: # Static stats. data = {} data['volume_backend_name'] = self.backend_name data['vendor_name'] = 'Dell EMC' data['driver_version'] = self.VERSION data['storage_protocol'] = self.storage_protocol data['reserved_percentage'] = 0 data['consistencygroup_support'] = True data['consistent_group_snapshot_enabled'] = True data['thin_provisioning_support'] = True data['QoS_support'] = False data['replication_enabled'] = self.replication_enabled data['multiattach'] = True if self.replication_enabled: data['replication_type'] = ['async', 'sync'] data['replication_count'] = len(self.backends) replication_targets = [] # Trundle through our backends. for backend in self.backends: target_device_id = backend.get('target_device_id') if target_device_id: replication_targets.append(target_device_id) data['replication_targets'] = replication_targets # Get our capacity. storageusage = api.get_storage_usage() if storageusage: # Get actual stats. totalcapacity = storageusage.get('availableSpace') totalcapacitygb = self._bytes_to_gb(totalcapacity) data['total_capacity_gb'] = totalcapacitygb freespace = storageusage.get('freeSpace') freespacegb = self._bytes_to_gb(freespace) data['free_capacity_gb'] = freespacegb else: # Soldier on. Just return 0 for this iteration. LOG.error('Unable to retrieve volume stats.') data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 self._stats = data LOG.debug('Total cap %(total)s Free cap %(free)s', {'total': data['total_capacity_gb'], 'free': data['free_capacity_gb']}) # Take this opportunity to report our failover state. if self.failed_over: LOG.debug('%(source)s has been failed over to %(dest)s', {'source': self.backend_name, 'dest': self.active_backend_id}) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ # We use id as our volume name so we need to rename the backend # volume to the original volume name. original_volume_name = volume.get('id') current_name = new_volume.get('id') # We should have this. If we don't we'll set it below. provider_id = new_volume.get('provider_id') LOG.debug('update_migrated_volume: %(current)s to %(original)s', {'current': current_name, 'original': original_volume_name}) if original_volume_name: with self._client.open_connection() as api: # todo(tswanson): Delete old volume repliations/live volumes # todo(tswanson): delete old volume? scvolume = api.find_volume(current_name, provider_id) if (scvolume and api.rename_volume(scvolume, original_volume_name)): # Replicate if we are supposed to. model_update = self._create_replications(api, new_volume, scvolume) model_update['_name_id'] = None model_update['provider_id'] = scvolume['instanceId'] return model_update # The world was horrible to us so we should error and leave. LOG.error('Unable to rename the logical volume for volume: %s', original_volume_name) return {'_name_id': new_volume['_name_id'] or new_volume['id']} def create_group(self, context, group): """Creates a group. :param context: the context of the caller. :param group: the Group object of the group to be created. :returns: model_update model_update will be in this format: {'status': xxx, ......}. If the status in model_update is 'error', the manager will throw an exception and it will be caught in the try-except block in the manager. If the driver throws an exception, the manager will also catch it in the try-except block. The group status in the db will be changed to 'error'. For a successful operation, the driver can either build the model_update and return it or return None. The group status will be set to 'available'. """ if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() model_update = {'status': fields.GroupStatus.ERROR} gid = group['id'] with self._client.open_connection() as api: cgroup = api.create_replay_profile(gid) if cgroup: LOG.info('Created group %s', gid) model_update['status'] = fields.GroupStatus.AVAILABLE return model_update def delete_group(self, context, group, volumes): """Deletes a group. :param context: the context of the caller. :param group: the Group object of the group to be deleted. :param volumes: a list of Volume objects in the group. :returns: model_update, volumes_model_update param volumes is a list of objects retrieved from the db. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate volumes_model_update and model_update and return them. The manager will check volumes_model_update and update db accordingly for each volume. If the driver successfully deleted some volumes but failed to delete others, it should set statuses of the volumes accordingly so that the manager can update db correctly. If the status in any entry of volumes_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of the group will be set to 'error' in the db. If volumes_model_update is not returned by the driver, the manager will set the status of every volume in the group to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager. The statuses of the group and all volumes in it will be set to 'error'. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. The statuses of the group and all volumes will be set to 'deleted' after the manager deletes them from db. """ if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() model_update = {'status': fields.GroupStatus.DELETED} with self._client.open_connection() as api: gid = group['id'] profile = api.find_replay_profile(gid) if profile: try: api.delete_replay_profile(profile) except exception.VolumeBackendAPIException: LOG.error('delete_group: error deleting %s', gid) model_update['status'] = fields.GroupStatus.ERROR # Trundle through the list deleting the volumes. volumes_model_update = [] for volume in volumes: status = fields.GroupStatus.ERROR try: self.delete_volume(volume) # We throw if that fails. status = fields.GroupStatus.DELETED except (exception.VolumeBackendAPIException, exception.VolumeIsBusy): LOG.error('delete_group: error deleting volume %s', volume['id']) volumes_model_update.append({'id': volume['id'], 'status': status}) return model_update, volumes_model_update def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group. :param context: the context of the caller. :param group: the Group object of the group to be updated. :param add_volumes: a list of Volume objects to be added. :param remove_volumes: a list of Volume objects to be removed. :returns: model_update, add_volumes_update, remove_volumes_update model_update is a dictionary that the driver wants the manager to update upon a successful return. If None is returned, the manager will set the status to 'available'. add_volumes_update and remove_volumes_update are lists of dictionaries that the driver wants the manager to update upon a successful return. Note that each entry requires a {'id': xxx} so that the correct volume entry can be updated. If None is returned, the volume will remain its original status. Also note that you cannot directly assign add_volumes to add_volumes_update as add_volumes is a list of volume objects and cannot be used for db update directly. Same with remove_volumes. If the driver throws an exception, the status of the group as well as those of the volumes to be added/removed will be set to 'error'. """ if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() with self._client.open_connection() as api: gid = group['id'] profile = api.find_replay_profile(gid) if not profile: LOG.error('update_group: Cannot find volume Group %s', gid) elif api.update_cg_volumes(profile, add_volumes, remove_volumes): LOG.info('update_group: Updated volume group %s', gid) # we need nothing updated above us so just return None. return None, None, None # Things did not go well so throw. msg = _('Unable to update group %s') % gid raise exception.VolumeBackendAPIException(data=msg) def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param context: the context of the caller. :param group: the Group object to be created. :param volumes: a list of Volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of Snapshot objects in group_snapshot. :param source_group: the Group object as source. :param source_vols: a list of Volume objects in the source_group. :returns: model_update, volumes_model_update The source can be group_snapshot or a source_group. param volumes is a list of objects retrieved from the db. It cannot be assigned to volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. To be consistent with other volume operations, the manager will assume the operation is successful if no exception is thrown by the driver. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. """ if not (group_snapshot and snapshots and not source_group or source_group and source_vols and not group_snapshot): msg = _("create_group_from_src only supports a " "group_snapshot source or a group source. " "Multiple sources cannot be used.") raise exception.InvalidInput(msg) if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() # Mark us as working. If we are a CG then that will be settled below. model_update = {'status': fields.GroupStatus.AVAILABLE} volumes_model_update = [] if source_group: for volume, src_vol in zip(volumes, source_vols): update = self.create_cloned_volume(volume, src_vol) update['status'] = fields.GroupStatus.AVAILABLE update['id'] = volume['id'] volumes_model_update.append(update.copy()) else: for volume, src_snap in zip(volumes, snapshots): update = self.create_volume_from_snapshot(volume, src_snap) update['status'] = fields.GroupStatus.AVAILABLE update['id'] = volume['id'] volumes_model_update.append(update.copy()) # So, in theory, everything has been created. Now is the time to # add the volumes to the group. model_update = self.create_group(context, group) if model_update['status'] == fields.GroupStatus.AVAILABLE: self.update_group(context, group, volumes, None) return model_update, volumes_model_update def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be created. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update param snapshots is a list of Snapshot objects. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error', the status in model_update will be set to the same if it is not already 'error'. If the status in model_update is 'error', the manager will raise an exception and the status of group_snapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of group_snapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of group_snapshot and all snapshots will be set to 'available' at the end of the manager function. """ if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() model_update = {'status': fields.GroupSnapshotStatus.ERROR} snapshot_updates = None with self._client.open_connection() as api: gid = group_snapshot['group_id'] snapshotid = group_snapshot['id'] profile = api.find_replay_profile(gid) if not profile: LOG.error('create_group_snapshot: profile %s not found', gid) else: if not api.snap_cg_replay(profile, snapshotid, 0): LOG.error('create_group_snapshot: ' 'failed to snap %(ss)s on %(pro)s', {'ss': snapshotid, 'pro': profile}) else: LOG.info('create_group_snapshot: ' 'created %(ss)s on %(pro)s', {'ss': snapshotid, 'pro': profile}) # Set our returns model_update['status'] = ( fields.GroupSnapshotStatus.AVAILABLE) snapshot_updates = [] for snapshot in snapshots: snapshot_updates.append({ 'id': snapshot.id, 'status': fields.SnapshotStatus.AVAILABLE}) return model_update, snapshot_updates def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be deleted. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update param snapshots is a list of objects. It cannot be assigned to snapshots_model_update. snapshots_model_update is a list of of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of group_snapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of group_snapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of group_snapshot and all snapshots will be set to 'deleted' after the manager deletes them from db. """ # Setup a generic error return. if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() model_update = {'status': fields.GroupSnapshotStatus.ERROR} snapshot_updates = None with self._client.open_connection() as api: snapshotid = group_snapshot['id'] profile = api.find_replay_profile(group_snapshot['group_id']) if profile: LOG.info('delete_group_snapshot: %(ss)s from %(pro)s', {'ss': snapshotid, 'pro': profile}) if not api.delete_cg_replay(profile, snapshotid): model_update['status'] = ( fields.GroupSnapshotStatus.ERROR_DELETING) else: model_update['status'] = fields.GroupSnapshotStatus.DELETED snapshot_updates = [] for snapshot in snapshots: snapshot_updates.append( {'id': snapshot['id'], 'status': fields.SnapshotStatus.DELETED}) return model_update, snapshot_updates def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. 2. Place some metadata on the volume, or somewhere in the backend, that allows other driver requests (e.g. delete, clone, attach, detach...) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. The volume may have a volume_type, and the driver can inspect that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ if existing_ref.get('source-name') or existing_ref.get('source-id'): with self._client.open_connection() as api: api.manage_existing(volume['id'], existing_ref) # Replicate if we are supposed to. volume_name = volume.get('id') provider_id = volume.get('provider_id') scvolume = api.find_volume(volume_name, provider_id) model_update = self._create_replications(api, volume, scvolume) if model_update: return model_update else: msg = _('Must specify source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Only return a model_update if we have replication info to add. return None def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ if existing_ref.get('source-name') or existing_ref.get('source-id'): with self._client.open_connection() as api: return api.get_unmanaged_volume_size(existing_ref) else: msg = _('Must specify source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param volume: Cinder volume to unmanage """ with self._client.open_connection() as api: volume_name = volume.get('id') provider_id = volume.get('provider_id') scvolume = api.find_volume(volume_name, provider_id) if scvolume: api.unmanage(scvolume) def _get_retype_spec(self, diff, volume_name, specname, spectype): """Helper function to get current and requested spec. :param diff: A difference dictionary. :param volume_name: The volume name we are working with. :param specname: The pretty name of the parameter. :param spectype: The actual spec string. :return: current, requested spec. :raises VolumeBackendAPIException: """ spec = (diff['extra_specs'].get(spectype)) if spec: if len(spec) != 2: msg = _('Unable to retype %(specname)s, expected to receive ' 'current and requested %(spectype)s values. Value ' 'received: %(spec)s') % {'specname': specname, 'spectype': spectype, 'spec': spec} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) current = spec[0] requested = spec[1] if current != requested: LOG.debug('Retyping volume %(vol)s to use %(specname)s ' '%(spec)s.', {'vol': volume_name, 'specname': specname, 'spec': requested}) return current, requested else: LOG.info('Retype was to same Storage Profile.') return None, None def _retype_replication(self, api, volume, scvolume, new_type, diff): model_update = None ret = True # Replication. current, requested = ( self._get_retype_spec(diff, volume.get('id'), 'replication_enabled', 'replication_enabled')) # We only toggle at the repl level. if current != requested: # If we are changing to on... if requested == ' True': # We create our replication using our new type's extra specs. model_update = self._create_replications( api, volume, scvolume, new_type.get('extra_specs')) elif current == ' True': # If we are killing replication we have to see if we currently # have live volume enabled or not. if self._is_live_vol(volume): ret = self._delete_live_volume(api, volume) else: self._delete_replications(api, volume) model_update = {'replication_status': fields.ReplicationStatus.DISABLED, 'replication_driver_data': ''} # TODO(tswanson): Add support for changing replication options. return ret, model_update def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). :returns: Boolean or Boolean, model_update tuple. """ LOG.info('retype: volume_name: %(name)s new_type: %(newtype)s ' 'diff: %(diff)s host: %(host)s', {'name': volume.get('id'), 'newtype': new_type, 'diff': diff, 'host': host}) model_update = None # Any spec changes? if diff['extra_specs']: volume_name = volume.get('id') provider_id = volume.get('provider_id') with self._client.open_connection() as api: try: # Get our volume scvolume = api.find_volume(volume_name, provider_id) if scvolume is None: LOG.error('Retype unable to find volume %s.', volume_name) return False # Check our specs. # Storage profiles. current, requested = ( self._get_retype_spec(diff, volume_name, 'Storage Profile', 'storagetype:storageprofile')) # if there is a change and it didn't work fast fail. if (current != requested and not api.update_storage_profile(scvolume, requested)): LOG.error('Failed to update storage profile') return False # Replay profiles. current, requested = ( self._get_retype_spec(diff, volume_name, 'Replay Profiles', 'storagetype:replayprofiles')) # if there is a change and it didn't work fast fail. if requested and not api.update_replay_profiles(scvolume, requested): LOG.error('Failed to update replay profiles') return False # Volume QOS profiles. current, requested = ( self._get_retype_spec(diff, volume_name, 'Volume QOS Profile', 'storagetype:volumeqos')) if current != requested: if not api.update_qos_profile(scvolume, requested): LOG.error('Failed to update volume qos profile') # Group QOS profiles. current, requested = ( self._get_retype_spec(diff, volume_name, 'Group QOS Profile', 'storagetype:groupqos')) if current != requested: if not api.update_qos_profile(scvolume, requested, True): LOG.error('Failed to update group qos profile') return False # Data reduction profiles. current, requested = ( self._get_retype_spec( diff, volume_name, 'Data Reduction Profile', 'storagetype:datareductionprofile')) if current != requested: if not api.update_datareduction_profile(scvolume, requested): LOG.error('Failed to update data reduction ' 'profile') return False # Active Replay current, requested = ( self._get_retype_spec(diff, volume_name, 'Replicate Active Replay', 'replication:activereplay')) if current != requested and not ( api.update_replicate_active_replay( scvolume, requested == ' True')): LOG.error('Failed to apply ' 'replication:activereplay setting') return False # Deal with replication. ret, model_update = self._retype_replication( api, volume, scvolume, new_type, diff) if not ret: return False except exception.VolumeBackendAPIException: # We do nothing with this. We simply return failure. return False # If we have something to send down... if model_update: return True, model_update return True def _parse_secondary(self, api, secondary): """Find the replication destination associated with secondary. :param api: Dell SCApi :param secondary: String indicating the secondary to failover to. :return: Destination SSN for the given secondary. """ LOG.debug('_parse_secondary. Looking for %s.', secondary) destssn = None # Trundle through these looking for our secondary. for backend in self.backends: ssnstring = backend['target_device_id'] # If they list a secondary it has to match. # If they do not list a secondary we return the first # replication on a working system. if not secondary or secondary == ssnstring: # Is a string. Need an int. ssn = int(ssnstring) # Without the source being up we have no good # way to pick a destination to failover to. So just # look for one that is just up. try: # If the SC ssn exists use it. if api.find_sc(ssn): destssn = ssn break except exception.VolumeBackendAPIException: LOG.warning('SSN %s appears to be down.', ssn) LOG.info('replication failover secondary is %(ssn)s', {'ssn': destssn}) return destssn def _update_backend(self, active_backend_id): # Mark for failover or undo failover. LOG.debug('active_backend_id: %s', active_backend_id) if active_backend_id: self.active_backend_id = str(active_backend_id) self.failed_over = True else: self.active_backend_id = None self.failed_over = False self._client.active_backend_id = self.active_backend_id def _get_qos(self, targetssn): # Find our QOS. qosnode = None for backend in self.backends: if int(backend['target_device_id']) == targetssn: qosnode = backend.get('qosnode', 'cinderqos') return qosnode def _parse_extraspecs(self, volume): # Digest our extra specs for replication. extraspecs = {} specs = self._get_volume_extra_specs(volume) if specs.get('replication_type') == ' sync': extraspecs['replicationtype'] = 'Synchronous' else: extraspecs['replicationtype'] = 'Asynchronous' if specs.get('replication:activereplay') == ' True': extraspecs['activereplay'] = True else: extraspecs['activereplay'] = False extraspecs['storage_profile'] = specs.get('storagetype:storageprofile') extraspecs['replay_profile_string'] = ( specs.get('storagetype:replayprofiles')) return extraspecs def _wait_for_replication(self, api, items): # Wait for our replications to resync with their original volumes. # We wait for completion, errors or timeout. deadcount = 5 lastremain = 0.0 # The big wait loop. while True: # We run until all volumes are synced or in error. done = True currentremain = 0.0 # Run the list. for item in items: # If we have one cooking. if item['status'] == 'inprogress': # Is it done? synced, remain = api.replication_progress(item['screpl']) currentremain += remain if synced: # It is! Get our volumes. cvol = api.get_volume(item['cvol']) nvol = api.get_volume(item['nvol']) # Flip replication. if (cvol and nvol and api.flip_replication( cvol, nvol, item['volume']['id'], item['specs']['replicationtype'], item['qosnode'], item['specs']['activereplay'])): # rename the original. Doesn't matter if it # succeeded as we should have the provider_id # of the new volume. ovol = api.get_volume(item['ovol']) if not ovol or not api.rename_volume( ovol, 'org:' + ovol['name']): # Not a reason to fail but will possibly # cause confusion so warn. LOG.warning('Unable to locate and rename ' 'original volume: %s', item['ovol']) item['status'] = 'synced' else: item['status'] = 'error' elif synced is None: # Couldn't get info on this one. Call it baked. item['status'] = 'error' else: # Miles to go before we're done. done = False # done? then leave. if done: break # Confirm we are or are not still making progress. if lastremain == currentremain: # One chance down. Warn user. deadcount -= 1 LOG.warning('Waiting for replications to complete. ' 'No progress for %(timeout)d seconds. ' 'deadcount = %(cnt)d', {'timeout': self.failback_timeout, 'cnt': deadcount}) else: # Reset lastremain = currentremain deadcount = 5 # If we've used up our 5 chances we error and log.. if deadcount == 0: LOG.error('Replication progress has stopped: %f remaining.', currentremain) for item in items: if item['status'] == 'inprogress': LOG.error('Failback failed for volume: %s. ' 'Timeout waiting for replication to ' 'sync with original volume.', item['volume']['id']) item['status'] = 'error' break # This is part of an async call so we should be good sleeping here. # Have to balance hammering the backend for no good reason with # the max timeout for the unit tests. Yeah, silly. eventlet.sleep(self.failback_timeout) def _reattach_remaining_replications(self, api, items): # Wiffle through our backends and reattach any remaining replication # targets. for item in items: if item['status'] == 'synced': svol = api.get_volume(item['nvol']) # assume it went well. Will error out if not. item['status'] = 'reattached' # wiffle through our backends and kick off replications. for backend in self.backends: rssn = int(backend['target_device_id']) if rssn != api.ssn: rvol = api.find_repl_volume(item['volume']['id'], rssn, None) # if there is an old replication whack it. api.delete_replication(svol, rssn, False) if api.start_replication( svol, rvol, item['specs']['replicationtype'], self._get_qos(rssn), item['specs']['activereplay']): # Save our replication_driver_data. item['rdd'] += ',' item['rdd'] += backend['target_device_id'] else: # No joy. Bail item['status'] = 'error' def _fixup_types(self, api, items): # Update our replay profiles. for item in items: if item['status'] == 'reattached': # Re-apply any appropriate replay profiles. item['status'] = 'available' rps = item['specs']['replay_profile_string'] if rps: svol = api.get_volume(item['nvol']) if not api.update_replay_profiles(svol, rps): item['status'] = 'error' def _volume_updates(self, items): # Update our volume updates. volume_updates = [] for item in items: # Set our status for our replicated volumes model_update = {'provider_id': item['nvol'], 'replication_driver_data': item['rdd']} # These are simple. If the volume reaches available then, # since we were replicating it, replication status must # be good. Else error/error. if item['status'] == 'available': model_update['status'] = 'available' model_update['replication_status'] = ( fields.ReplicationStatus.ENABLED) else: model_update['status'] = 'error' model_update['replication_status'] = ( fields.ReplicationStatus.ERROR) volume_updates.append({'volume_id': item['volume']['id'], 'updates': model_update}) return volume_updates def _failback_replication(self, api, volume, qosnode): """Sets up the replication failback. :param api: Dell SC API. :param volume: Cinder Volume :param qosnode: Dell QOS node object. :return: replitem dict. """ LOG.info('failback_volumes: replicated volume') # Get our current volume. cvol = api.find_volume(volume['id'], volume['provider_id']) # Original volume on the primary. ovol = api.find_repl_volume(volume['id'], api.primaryssn, None, True, False) # Delete our current mappings. api.remove_mappings(cvol) # If there is a replication to delete do so. api.delete_replication(ovol, api.ssn, False) # Replicate to a common replay. screpl = api.replicate_to_common(cvol, ovol, 'tempqos') # We made it this far. Update our status. screplid = None status = '' if screpl: screplid = screpl['instanceId'] nvolid = screpl['destinationVolume']['instanceId'] status = 'inprogress' else: LOG.error('Unable to restore %s', volume['id']) screplid = None nvolid = None status = 'error' # Save some information for the next step. # nvol is the new volume created by replicate_to_common. # We also grab our extra specs here. replitem = { 'volume': volume, 'specs': self._parse_extraspecs(volume), 'qosnode': qosnode, 'screpl': screplid, 'cvol': cvol['instanceId'], 'ovol': ovol['instanceId'], 'nvol': nvolid, 'rdd': str(api.ssn), 'status': status} return replitem def _failback_live_volume(self, api, id, provider_id): """failback the live volume to its original :param api: Dell SC API :param id: Volume ID :param provider_id: Dell Instance ID :return: model_update dict """ model_update = {} # We do not search by name. Only failback if we have a complete # LV object. sclivevolume = api.get_live_volume(provider_id) # TODO(tswanson): Check swapped state first. if sclivevolume and api.swap_roles_live_volume(sclivevolume): LOG.info('Success swapping sclivevolume roles %s', id) model_update = { 'status': 'available', 'replication_status': fields.ReplicationStatus.ENABLED, 'provider_id': sclivevolume['secondaryVolume']['instanceId']} else: LOG.info('Failure swapping roles %s', id) model_update = {'status': 'error'} return model_update def _finish_failback(self, api, replitems): # Wait for replication to complete. # This will also flip replication. self._wait_for_replication(api, replitems) # Replications are done. Attach to any additional replication # backends. self._reattach_remaining_replications(api, replitems) self._fixup_types(api, replitems) return self._volume_updates(replitems) def failback_volumes(self, volumes): """This is a generic volume failback. :param volumes: List of volumes that need to be failed back. :return: volume_updates for the list of volumes. """ LOG.info('failback_volumes') with self._client.open_connection() as api: # Get our qosnode. This is a good way to make sure the backend # is still setup so that we can do this. qosnode = self._get_qos(api.ssn) if not qosnode: raise exception.VolumeBackendAPIException( message=_('Unable to failback. Backend is misconfigured.')) volume_updates = [] replitems = [] # Trundle through the volumes. Update non replicated to alive again # and reverse the replications for the remaining volumes. for volume in volumes: LOG.info('failback_volumes: starting volume: %s', volume) model_update = {} if volume.get('replication_driver_data'): rspecs = self._get_replication_specs( self._get_volume_extra_specs(volume)) if rspecs['live']: model_update = self._failback_live_volume( api, volume['id'], volume['provider_id']) else: replitem = self._failback_replication(api, volume, qosnode) # Save some information for the next step. # nvol is the new volume created by # replicate_to_common. We also grab our # extra specs here. replitems.append(replitem) else: # Not replicated. Just set it to available. model_update = {'status': 'available'} # Save our update if model_update: volume_updates.append({'volume_id': volume['id'], 'updates': model_update}) # Let's do up to 5 replications at once. if len(replitems) == 5: volume_updates += self._finish_failback(api, replitems) replitems = [] # Finish any leftover items if replitems: volume_updates += self._finish_failback(api, replitems) # Set us back to a happy state. # The only way this doesn't happen is if the primary is down. self._update_backend(None) return volume_updates def _failover_replication(self, api, id, provider_id, destssn): rvol = api.break_replication(id, provider_id, destssn) model_update = {} if rvol: LOG.info('Success failing over volume %s', id) model_update = {'replication_status': fields.ReplicationStatus.FAILED_OVER, 'provider_id': rvol['instanceId']} else: LOG.info('Failed failing over volume %s', id) model_update = {'status': 'error'} return model_update def _failover_live_volume(self, api, id, provider_id): model_update = {} # Search for volume by id if we have to. sclivevolume = api.get_live_volume(provider_id, id) if sclivevolume: swapped = api.is_swapped(provider_id, sclivevolume) # If we aren't swapped try it. If fail error out. if not swapped and not api.swap_roles_live_volume(sclivevolume): LOG.info('Failure swapping roles %s', id) model_update = {'status': 'error'} return model_update LOG.info('Success swapping sclivevolume roles %s', id) sclivevolume = api.get_live_volume(provider_id) model_update = { 'replication_status': fields.ReplicationStatus.FAILED_OVER, 'provider_id': sclivevolume['primaryVolume']['instanceId']} # Error and leave. return model_update def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover to secondary. :param context: security context :param secondary_id: Specifies rep target to fail over to :param volumes: List of volumes serviced by this backend. :returns: destssn, volume_updates data structure Example volume_updates data structure: .. code-block:: json [{'volume_id': , 'updates': {'provider_id': 8, 'replication_status': 'failed-over', 'replication_extended_status': 'whatever',...}},] """ LOG.debug('failover-host') LOG.debug(self.failed_over) LOG.debug(self.active_backend_id) LOG.debug(self.replication_enabled) if self.failed_over: if secondary_id == 'default': LOG.debug('failing back') return 'default', self.failback_volumes(volumes), [] raise exception.InvalidReplicationTarget( reason=_('Already failed over')) LOG.info('Failing backend to %s', secondary_id) # basic check if self.replication_enabled: with self._client.open_connection() as api: # Look for the specified secondary. destssn = self._parse_secondary(api, secondary_id) if destssn: # We roll through trying to break replications. # Is failing here a complete failure of failover? volume_updates = [] for volume in volumes: model_update = {} if volume.get('replication_driver_data'): rspecs = self._get_replication_specs( self._get_volume_extra_specs(volume)) if rspecs['live']: model_update = self._failover_live_volume( api, volume['id'], volume.get('provider_id')) else: model_update = self._failover_replication( api, volume['id'], volume.get('provider_id'), destssn) else: # Not a replicated volume. Try to unmap it. scvolume = api.find_volume( volume['id'], volume.get('provider_id')) api.remove_mappings(scvolume) model_update = {'status': 'error'} # Either we are failed over or our status is now error. volume_updates.append({'volume_id': volume['id'], 'updates': model_update}) # this is it. self._update_backend(destssn) LOG.debug('after update backend') LOG.debug(self.failed_over) LOG.debug(self.active_backend_id) LOG.debug(self.replication_enabled) return destssn, volume_updates, [] else: raise exception.InvalidReplicationTarget(reason=( _('replication_failover failed. %s not found.') % secondary_id)) # I don't think we should ever get here. raise exception.VolumeBackendAPIException(message=( _('replication_failover failed. ' 'Backend not configured for failover'))) def _get_unmanaged_replay(self, api, volume_name, provider_id, existing_ref): replay_name = None if existing_ref: replay_name = existing_ref.get('source-name') if not replay_name: msg = _('_get_unmanaged_replay: Must specify source-name.') LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Find our volume. scvolume = api.find_volume(volume_name, provider_id) if not scvolume: # Didn't find it. msg = (_('_get_unmanaged_replay: Cannot find volume id %s') % volume_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Find our replay. screplay = api.find_replay(scvolume, replay_name) if not screplay: # Didn't find it. Reference must be invalid. msg = (_('_get_unmanaged_replay: Cannot ' 'find snapshot named %s') % replay_name) LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) return screplay def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder snapshot structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. 2. Place some metadata on the snapshot, or somewhere in the backend, that allows other driver requests (e.g. delete) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. """ with self._client.open_connection() as api: # Find our unmanaged snapshot. This will raise on error. volume_name = snapshot.get('volume_id') provider_id = snapshot.get('provider_id') snapshot_id = snapshot.get('id') screplay = self._get_unmanaged_replay(api, volume_name, provider_id, existing_ref) # Manage means update description and update expiration. if not api.manage_replay(screplay, snapshot_id): # That didn't work. Error. msg = (_('manage_existing_snapshot: Error managing ' 'existing replay %(ss)s on volume %(vol)s') % {'ss': screplay.get('description'), 'vol': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Life is good. Let the world know what we've done. LOG.info('manage_existing_snapshot: snapshot %(exist)s on ' 'volume %(volume)s has been renamed to %(id)s and is ' 'now managed by Cinder.', {'exist': screplay.get('description'), 'volume': volume_name, 'id': snapshot_id}) return {'provider_id': screplay['createVolume']['instanceId']} # NOTE: Can't use abstractmethod before all drivers implement it def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. When calculating the size, round up to the next GB. """ volume_name = snapshot.get('volume_id') provider_id = snapshot.get('provider_id') with self._client.open_connection() as api: screplay = self._get_unmanaged_replay(api, volume_name, provider_id, existing_ref) sz, rem = storagecenter_api.SCApi.size_to_gb( screplay['size']) if rem > 0: raise exception.VolumeBackendAPIException( data=_('Volume size must be a multiple of 1 GB.')) return sz # NOTE: Can't use abstractmethod before all drivers implement it def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. NOTE: We do set the expire countdown to 1 day. Once a snapshot is unmanaged it will expire 24 hours later. """ with self._client.open_connection() as api: snapshot_id = snapshot.get('id') # provider_id is the snapshot's parent volume's instanceId. provider_id = snapshot.get('provider_id') volume_name = snapshot.get('volume_id') # Find our volume. scvolume = api.find_volume(volume_name, provider_id) if not scvolume: # Didn't find it. msg = (_('unmanage_snapshot: Cannot find volume id %s') % volume_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Find our replay. screplay = api.find_replay(scvolume, snapshot_id) if not screplay: # Didn't find it. Reference must be invalid. msg = (_('unmanage_snapshot: Cannot find snapshot named %s') % snapshot_id) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Free our snapshot. api.unmanage_replay(screplay) # Do not check our result. def thaw_backend(self, context): """Notify the backend that it's unfrozen/thawed. This is a gate. We do not allow the backend to be thawed if it is still failed over. :param context: security context :response: True on success :raises Invalid: if it cannot be thawed. """ # We shouldn't be called if we are not failed over. if self.failed_over: msg = _('The Dell SC array does not support thawing a failed over' ' replication. Please migrate volumes to an operational ' 'back-end or resolve primary system issues and ' 'fail back to reenable full functionality.') LOG.error(msg) raise exception.Invalid(msg) return True def is_multiattach_to_host(self, volume_attachment, host_name): # When multiattach is enabled, a volume could be attached to two or # more instances which are hosted on one nova host. # Because the backend cannot recognize the volume is attached to two # or more instances, we should keep the volume attached to the nova # host until the volume is detached from the last instance. LOG.info('is_multiattach_to_host: volume_attachment %s.', volume_attachment) LOG.info('is_multiattach_to_host: host_name %s.', host_name) if not volume_attachment: return False for a in volume_attachment: LOG.debug('attachment %s.', a) attachment = [a for a in volume_attachment if a['attach_status'] == fields.VolumeAttachStatus.ATTACHED and a['attached_host'] == host_name] LOG.info('is_multiattach_to_host: attachment %s.', attachment) return len(attachment) > 1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/sc/storagecenter_fc.py0000664000175000017500000004003600000000000025363 0ustar00zuulzuul00000000000000# Copyright (c) 2015-2017 Dell Inc, or its subsidiaries. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Dell Storage Center.""" from oslo_log import log as logging from oslo_utils import excutils from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import driver from cinder.volume.drivers.dell_emc.sc import storagecenter_common from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @interface.volumedriver class SCFCDriver(storagecenter_common.SCCommonDriver, driver.FibreChannelDriver): """Implements commands for Dell Storage Center FC management. To enable the driver add the following line to the cinder configuration: volume_driver=cinder.volume.drivers.dell_emc.sc.storagecenter_fc.\ SCFCDriver Version history: .. code-block:: none 1.0.0 - Initial driver 1.1.0 - Added extra spec support for Storage Profile selection 1.2.0 - Added consistency group support. 2.0.0 - Switched to inheriting functional objects rather than volume driver. 2.1.0 - Added support for ManageableVD. 2.2.0 - Driver retype support for switching volume's Storage Profile 2.3.0 - Added Legacy Port Mode Support 2.3.1 - Updated error handling. 2.4.0 - Added Replication V2 support. 2.4.1 - Updated Replication support to V2.1. 2.5.0 - ManageableSnapshotsVD implemented. 3.0.0 - ProviderID utilized. 3.1.0 - Failback supported. 3.2.0 - Live Volume support. 3.3.0 - Support for a secondary DSM. 3.4.0 - Support for excluding a domain. 3.5.0 - Support for AFO. 3.6.0 - Server type support. 3.7.0 - Support for Data Reduction, Group QOS and Volume QOS. 4.0.0 - Driver moved to dell_emc. 4.1.0 - Timeouts added to rest calls. 4.1.1 - excluded_domain_ips support. 4.1.2 - included_domain_ips IP support. """ VERSION = '4.1.2' CI_WIKI_NAME = "DellEMC_SC_CI" SUPPORTED = False def __init__(self, *args, **kwargs): super(SCFCDriver, self).__init__(*args, **kwargs) self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'Dell-FC' self.storage_protocol = constants.FC def validate_connector(self, connector): """Fail if connector doesn't contain all the data needed by driver. Do a check on the connector and ensure that it has wwnns, wwpns. """ self.validate_connector_has_setting(connector, 'wwpns') self.validate_connector_has_setting(connector, 'wwnns') def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. """ # We use id to name the volume name as it is a # known unique name. volume_name = volume.get('id') provider_id = volume.get('provider_id') islivevol = self._is_live_vol(volume) LOG.debug('Initialize connection: %s', volume_name) with self._client.open_connection() as api: try: wwpns = connector.get('wwpns') # Find the volume on the storage center. Note that if this # is live volume and we are swapped this will be the back # half of the live volume. scvolume = api.find_volume(volume_name, provider_id, islivevol) if scvolume: # Get the SSN it is on. ssn = scvolume['instanceId'].split('.')[0] # Find our server. scserver = self._find_server(api, wwpns, ssn) # No? Create it. if scserver is None: scserver = api.create_server( wwpns, self.configuration.dell_server_os, ssn) # We have a volume and a server. Map them. if scserver is not None: mapping = api.map_volume(scvolume, scserver) if mapping is not None: # Since we just mapped our volume we had # best update our sc volume object. scvolume = api.get_volume(scvolume['instanceId']) lun, targets, init_targ_map = api.find_wwns( scvolume, scserver) # Do we have extra live volume work? if islivevol: # Get our live volume. sclivevolume = api.get_live_volume(provider_id) # Do not map to a failed over volume. if (sclivevolume and not api.is_failed_over(provider_id, sclivevolume)): # Now map our secondary. lvlun, lvtargets, lvinit_targ_map = ( self.initialize_secondary(api, sclivevolume, wwpns)) # Unmapped. Add info to our list. targets += lvtargets init_targ_map.update(lvinit_targ_map) # Roll up our return data. if lun is not None and len(targets) > 0: data = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': lun, 'target_discovered': True, 'target_wwn': targets, 'initiator_target_map': init_targ_map, 'discard': True}} LOG.debug('Return FC data: %s', data) fczm_utils.add_fc_zone(data) return data LOG.error('Lun mapping returned null!') except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to initialize connection.') # We get here because our mapping is none so blow up. raise exception.VolumeBackendAPIException( data=_('Unable to map volume.')) def _find_server(self, api, wwns, ssn=-1): for wwn in wwns: scserver = api.find_server(wwn, ssn) if scserver is not None: return scserver return None def initialize_secondary(self, api, sclivevolume, wwns): """Initialize the secondary connection of a live volume pair. :param api: Dell SC api object. :param sclivevolume: Dell SC live volume object. :param wwns: Cinder list of wwns from the connector. :return: lun, targets and initiator target map. """ # Find our server. secondary = self._find_server( api, wwns, sclivevolume['secondaryScSerialNumber']) # No? Create it. if secondary is None: secondary = api.create_server( wwns, self.configuration.dell_server_os, sclivevolume['secondaryScSerialNumber']) if secondary: if api.map_secondary_volume(sclivevolume, secondary): # Get mappings. secondaryvol = api.get_volume( sclivevolume['secondaryVolume']['instanceId']) if secondaryvol: return api.find_wwns(secondaryvol, secondary) LOG.warning('Unable to map live volume secondary volume' ' %(vol)s to secondary server wwns: %(wwns)r', {'vol': sclivevolume['secondaryVolume']['instanceName'], 'wwns': wwns}) return None, [], {} def force_detach(self, volume): """Breaks all volume server connections including to the live volume. :param volume: volume to be detached :raises VolumeBackendAPIException: On failure to sever connections. """ with self._client.open_connection() as api: volume_name = volume.get('id') provider_id = volume.get('provider_id') try: islivevol = self._is_live_vol(volume) scvolume = api.find_volume(volume_name, provider_id, islivevol) if scvolume: rtn = api.unmap_all(scvolume) # If this fails we blow up. if not rtn: raise exception.VolumeBackendAPIException( _('Terminate connection failed')) # If there is a livevol we just take a shot at # disconnecting. if islivevol: sclivevolume = api.get_live_volume(provider_id) if sclivevolume: self.terminate_secondary(api, sclivevolume, None) except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to terminates %(vol)s connections.', {'vol': volume_name}) # We don't know the servers that were involved so we just return # the most basic of data. info = {'driver_volume_type': 'fibre_channel', 'data': {}} return info @utils.synchronized('{self.driver_prefix}-{volume.id}') def terminate_connection(self, volume, connector, force=False, **kwargs): # Special case if connector is None: return self.force_detach(volume) # Grab some quick info. volume_name = volume.get('id') provider_id = volume.get('provider_id') LOG.debug('Terminate connection: %s', volume_name) LOG.debug('Volume details %s', volume) # None `connector` indicates force detach, then detach all even if the # volume is multi-attached. is_multiattached = (hasattr(volume, 'volume_attachment') and self.is_multiattach_to_host( volume.get('volume_attachment'), connector['host'])) if is_multiattached: LOG.info('Cannot terminate connection: ' '%(vol)s is multiattached.', {'vol': volume_name}) return True with self._client.open_connection() as api: try: wwpns = [] if not connector else connector.get('wwpns', []) # Find the volume on the storage center. islivevol = self._is_live_vol(volume) scvolume = api.find_volume(volume_name, provider_id, islivevol) if scvolume: # Get the SSN it is on. ssn = scvolume['instanceId'].split('.')[0] # Will be None if we have no wwpns. scserver = self._find_server(api, wwpns, ssn) # Get our target map so we can return it to free up a zone. lun, targets, init_targ_map = api.find_wwns(scvolume, scserver) # Do we have extra live volume work? if islivevol: # Get our live volume. sclivevolume = api.get_live_volume(provider_id) # Do not map to a failed over volume. if (sclivevolume and not api.is_failed_over(provider_id, sclivevolume)): lvlun, lvtargets, lvinit_targ_map = ( self.terminate_secondary( api, sclivevolume, wwpns)) # Add to our return. if lvlun: targets += lvtargets init_targ_map.update(lvinit_targ_map) if (wwpns and scserver and api.unmap_volume(scvolume, scserver) is True): LOG.debug('Connection terminated') elif not wwpns and api.unmap_all(scvolume): LOG.debug('All connections terminated') else: raise exception.VolumeBackendAPIException( data=_('Terminate connection failed')) info = {'driver_volume_type': 'fibre_channel', 'data': {}} # if not then we return the target map so that # the zone can be freed up. if scserver and api.get_volume_count(scserver) == 0: info['data'] = {'target_wwn': targets, 'initiator_target_map': init_targ_map} fczm_utils.remove_fc_zone(info) return info except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to terminate connection') raise exception.VolumeBackendAPIException( data=_('Terminate connection unable to connect to backend.')) def terminate_secondary(self, api, sclivevolume, wwns): lun = None targets = [] init_targ_map = {} # Get our volume. secondaryvol = api.get_volume( sclivevolume['secondaryVolume']['instanceId']) # We have one so let's get to work. if secondaryvol: # Are we unmapping a specific server? if wwns: # Find our server. secondary = self._find_server( api, wwns, sclivevolume['secondaryScSerialNumber']) # Get our map. lun, targets, init_targ_map = api.find_wwns(secondaryvol, secondary) # If we have a server and a volume lets unmap them. ret = api.unmap_volume(secondaryvol, secondary) LOG.debug('terminate_secondary: ' 'secondary volume %(name)s unmap ' 'to secondary server %(server)s result: %(result)r', {'name': secondaryvol['name'], 'server': secondary['name'], 'result': ret}) else: # Just unmap all. ret = api.unmap_all(secondaryvol) LOG.debug('terminate_secondary: secondary volume %(name)s ' 'unmap all result: %(result)r', {'name': secondaryvol['name'], 'result': ret}) else: LOG.debug('terminate_secondary: secondary volume not found.') # return info if any return lun, targets, init_targ_map ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/sc/storagecenter_iscsi.py0000664000175000017500000003620000000000000026103 0ustar00zuulzuul00000000000000# Copyright (c) 2015-2017 Dell Inc, or its subsidiaries. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Dell Storage Center.""" from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import driver from cinder.volume.drivers.dell_emc.sc import storagecenter_common LOG = logging.getLogger(__name__) @interface.volumedriver class SCISCSIDriver(storagecenter_common.SCCommonDriver, driver.ISCSIDriver): """Implements commands for Dell Storage Center ISCSI management. To enable the driver add the following line to the cinder configuration: volume_driver=cinder.volume.drivers.dell_emc.sc.\ storagecenter_iscsi.SCISCSIDriver Version history: .. code-block:: none 1.0.0 - Initial driver 1.1.0 - Added extra spec support for Storage Profile selection 1.2.0 - Added consistency group support. 2.0.0 - Switched to inheriting functional objects rather than volume driver. 2.1.0 - Added support for ManageableVD. 2.2.0 - Driver retype support for switching volume's Storage Profile. Added API 2.2 support. 2.3.0 - Added Legacy Port Mode Support 2.3.1 - Updated error handling. 2.4.0 - Added Replication V2 support. 2.4.1 - Updated Replication support to V2.1. 2.5.0 - ManageableSnapshotsVD implemented. 3.0.0 - ProviderID utilized. 3.1.0 - Failback Supported. 3.2.0 - Live Volume support. 3.3.0 - Support for a secondary DSM. 3.4.0 - Support for excluding a domain. 3.5.0 - Support for AFO. 3.6.0 - Server type support. 3.7.0 - Support for Data Reduction, Group QOS and Volume QOS. 4.0.0 - Driver moved to dell_emc. 4.1.0 - Timeouts added to rest calls. 4.1.1 - excluded_domain_ips support. 4.1.2 - included_domain_ips IP support. """ VERSION = '4.1.2' CI_WIKI_NAME = "DellEMC_SC_CI" SUPPORTED = False def __init__(self, *args, **kwargs): super(SCISCSIDriver, self).__init__(*args, **kwargs) self.backend_name = ( self.configuration.safe_get('volume_backend_name') or 'Dell-iSCSI') def initialize_connection(self, volume, connector): # Initialize_connection will find or create a server identified by the # connector on the Dell backend. It will then map the volume to it # and return the properties as follows.. # {'driver_volume_type': 'iscsi', # data = {'target_discovered': False, # 'target_iqn': preferred iqn, # 'target_iqns': all iqns, # 'target_portal': preferred portal, # 'target_portals': all portals, # 'target_lun': preferred lun, # 'target_luns': all luns, # } # We use id to name the volume name as it is a # known unique name. volume_name = volume.get('id') provider_id = volume.get('provider_id') islivevol = self._is_live_vol(volume) initiator_name = connector.get('initiator') multipath = connector.get('multipath', False) LOG.info('initialize_connection: %(vol)s:%(pid)s:' '%(intr)s. Multipath is %(mp)r', {'vol': volume_name, 'pid': provider_id, 'intr': initiator_name, 'mp': multipath}) with self._client.open_connection() as api: try: # Find the volume on the storage center. Note that if this # is live volume and we are swapped this will be the back # half of the live volume. scvolume = api.find_volume(volume_name, provider_id, islivevol) if scvolume: # Get the SSN it is on. ssn = scvolume['instanceId'].split('.')[0] # Find our server. scserver = api.find_server(initiator_name, ssn) # No? Create it. if scserver is None: scserver = api.create_server( [initiator_name], self.configuration.dell_server_os, ssn) # if we have a server and a volume lets bring them # together. if scserver is not None: mapping = api.map_volume(scvolume, scserver) if mapping is not None: # Since we just mapped our volume we had best # update our sc volume object. scvolume = api.get_volume(scvolume['instanceId']) # Our return. iscsiprops = {} # Three cases that should all be satisfied with the # same return of Target_Portal and Target_Portals. # 1. Nova is calling us so we need to return the # Target_Portal stuff. It should ignore the # Target_Portals stuff. # 2. OS brick is calling us in multipath mode so we # want to return Target_Portals. It will ignore # the Target_Portal stuff. # 3. OS brick is calling us in single path mode so # we want to return Target_Portal and # Target_Portals as alternates. iscsiprops = api.find_iscsi_properties(scvolume, scserver) # If this is a live volume we need to map up our # secondary volume. Note that if we have failed # over we do not wish to do this. if islivevol: sclivevolume = api.get_live_volume(provider_id) # Only map if we are not failed over. if (sclivevolume and not api.is_failed_over(provider_id, sclivevolume)): secondaryprops = self.initialize_secondary( api, sclivevolume, initiator_name) # Combine with iscsiprops iscsiprops['target_iqns'] += ( secondaryprops['target_iqns']) iscsiprops['target_portals'] += ( secondaryprops['target_portals']) iscsiprops['target_luns'] += ( secondaryprops['target_luns']) # Return our iscsi properties. iscsiprops['discard'] = True return {'driver_volume_type': 'iscsi', 'data': iscsiprops} # Re-raise any backend exception. except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.error('Failed to initialize connection') # If there is a data structure issue then detail the exception # and bail with a Backend Exception. except Exception as error: LOG.error(error) raise exception.VolumeBackendAPIException(error) # We get here because our mapping is none or we have no valid iqn to # return so blow up. raise exception.VolumeBackendAPIException( _('Unable to map volume')) def initialize_secondary(self, api, sclivevolume, initiatorname): """Initialize the secondary connection of a live volume pair. :param api: Dell SC api. :param sclivevolume: Dell SC live volume object. :param initiatorname: Cinder iscsi initiator from the connector. :return: ISCSI properties. """ # Find our server. secondary = api.find_server(initiatorname, sclivevolume['secondaryScSerialNumber']) # No? Create it. if secondary is None: secondary = api.create_server( [initiatorname], self.configuration.dell_server_os, sclivevolume['secondaryScSerialNumber']) if secondary: if api.map_secondary_volume(sclivevolume, secondary): # Get our volume and get our properties. secondaryvol = api.get_volume( sclivevolume['secondaryVolume']['instanceId']) if secondaryvol: return api.find_iscsi_properties(secondaryvol, secondary) # Dummy return on failure. data = {'target_discovered': False, 'target_iqn': None, 'target_iqns': [], 'target_portal': None, 'target_portals': [], 'target_lun': None, 'target_luns': [], } LOG.warning('Unable to map live volume secondary volume' ' %(vol)s to secondary server intiator: %(init)r', {'vol': sclivevolume['secondaryVolume']['instanceName'], 'init': initiatorname}) return data def force_detach(self, volume): """Breaks all volume server connections including to the live volume. :param volume: volume to be detached :raises VolumeBackendAPIException: On failure to sever connections. """ with self._client.open_connection() as api: volume_name = volume.get('id') provider_id = volume.get('provider_id') try: rtn = False islivevol = self._is_live_vol(volume) scvolume = api.find_volume(volume_name, provider_id, islivevol) if scvolume: rtn = api.unmap_all(scvolume) if rtn and islivevol: sclivevolume = api.get_live_volume(provider_id) if sclivevolume: rtn = self.terminate_secondary(api, sclivevolume, None) return rtn except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to terminates %(vol)s connections.', {'vol': volume_name}) raise exception.VolumeBackendAPIException( _('Terminate connection failed')) @utils.synchronized('{self.driver_prefix}-{volume.id}') def terminate_connection(self, volume, connector, force=False, **kwargs): # Special case # None `connector` indicates force detach, then detach all even if the # volume is multi-attached. if connector is None: return self.force_detach(volume) # Normal terminate connection, then. # Grab some quick info. volume_name = volume.get('id') provider_id = volume.get('provider_id') initiator_name = None if not connector else connector.get('initiator') LOG.info('Volume in terminate connection: %(vol)s', {'vol': volume}) is_multiattached = (hasattr(volume, 'volume_attachment') and self.is_multiattach_to_host( volume.get('volume_attachment'), connector['host'])) if is_multiattached: LOG.info('Cannot terminate connection: ' '%(vol)s is multiattached.', {'vol': volume_name}) return True with self._client.open_connection() as api: try: # Find the volume on the storage center. Note that if this # is live volume and we are swapped this will be the back # half of the live volume. islivevol = self._is_live_vol(volume) scvolume = api.find_volume(volume_name, provider_id, islivevol) if scvolume: # Get the SSN it is on. ssn = scvolume['instanceId'].split('.')[0] # Unmap our secondary if not failed over.. if islivevol: sclivevolume = api.get_live_volume(provider_id) if (sclivevolume and not api.is_failed_over(provider_id, sclivevolume)): self.terminate_secondary(api, sclivevolume, initiator_name) # Find our server. scserver = (None if not initiator_name else api.find_server(initiator_name, ssn)) # If we have a server and a volume lets pull them apart if ((scserver and api.unmap_volume(scvolume, scserver) is True) or (not scserver and api.unmap_all(scvolume))): LOG.debug('Connection terminated') return except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to terminate connection ' '%(initiator)s %(vol)s', {'initiator': initiator_name, 'vol': volume_name}) raise exception.VolumeBackendAPIException( _('Terminate connection failed')) def terminate_secondary(self, api, sclivevolume, initiatorname): # Only return False if we tried something and it failed. rtn = True secondaryvol = api.get_volume( sclivevolume['secondaryVolume']['instanceId']) if secondaryvol: if initiatorname: # Find our server. secondary = api.find_server( initiatorname, sclivevolume['secondaryScSerialNumber']) rtn = api.unmap_volume(secondaryvol, secondary) else: rtn = api.unmap_all(secondaryvol) else: LOG.debug('terminate_secondary: secondary volume not found.') return rtn ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3431208 cinder-27.0.0/cinder/volume/drivers/dell_emc/unity/0000775000175000017500000000000000000000000022234 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/unity/__init__.py0000664000175000017500000000130400000000000024343 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.volume.drivers.dell_emc.unity import driver Driver = driver.UnityDriver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/unity/adapter.py0000664000175000017500000016450700000000000024243 0ustar00zuulzuul00000000000000# Copyright (c) 2016 - 2018 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import copy import functools import os import random from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder import utils as cinder_utils from cinder.volume.drivers.dell_emc.unity import client from cinder.volume.drivers.dell_emc.unity import utils from cinder.volume import volume_types from cinder.volume import volume_utils storops = importutils.try_import('storops') if storops: from storops import exception as storops_ex from storops.unity import enums else: # Set storops_ex to be None for unit test storops_ex = None enums = None LOG = logging.getLogger(__name__) PROTOCOL_FC = constants.FC PROTOCOL_ISCSI = constants.ISCSI class VolumeParams(object): def __init__(self, adapter, volume, group_specs=None): self._adapter = adapter self._volume = volume self._volume_id = volume.id self._name = volume.name self._size = volume.size self._description = (volume.display_description if volume.display_description else volume.display_name) self._pool = None self._io_limit_policy = None self._is_thick = None self._is_compressed = None self._is_in_cg = None self._is_replication_enabled = None self._tiering_policy = None self.group_specs = group_specs if group_specs else {} @property def volume_id(self): return self._volume_id @property def name(self): return self._name @name.setter def name(self, value): self._name = value @property def size(self): return self._size @size.setter def size(self, value): self._size = value @property def description(self): return self._description @description.setter def description(self, value): self._description = value @property def pool(self): if self._pool is None: self._pool = self._adapter._get_target_pool(self._volume) return self._pool @pool.setter def pool(self, value): self._pool = value @property def io_limit_policy(self): if self._io_limit_policy is None: qos_specs = utils.get_backend_qos_specs(self._volume) self._io_limit_policy = self._adapter.client.get_io_limit_policy( qos_specs) return self._io_limit_policy @io_limit_policy.setter def io_limit_policy(self, value): self._io_limit_policy = value @property def is_thick(self): if self._is_thick is None: provision = utils.get_extra_spec(self._volume, utils.PROVISIONING_TYPE) support = utils.get_extra_spec(self._volume, 'thick_provisioning_support') self._is_thick = (provision == 'thick' and support == ' True') return self._is_thick @property def is_compressed(self): if self._is_compressed is None: provision = utils.get_extra_spec(self._volume, utils.PROVISIONING_TYPE) compression = utils.get_extra_spec(self._volume, 'compression_support') if (provision == utils.PROVISIONING_COMPRESSED and compression == ' True'): self._is_compressed = True return self._is_compressed @is_compressed.setter def is_compressed(self, value): self._is_compressed = value @property def is_in_cg(self): if self._is_in_cg is None: self._is_in_cg = (self._volume.group and volume_utils.is_group_a_cg_snapshot_type( self._volume.group)) return self._is_in_cg @property def tiering_policy(self): tiering_policy_map = {'StartHighThenAuto': enums.TieringPolicyEnum.AUTOTIER_HIGH, 'Auto': enums.TieringPolicyEnum.AUTOTIER, 'HighestAvailable': enums.TieringPolicyEnum.HIGHEST, 'LowestAvailable': enums.TieringPolicyEnum.LOWEST} if not self._tiering_policy: tiering_value = utils.get_extra_spec(self._volume, 'storagetype:tiering') support = utils.get_extra_spec(self._volume, 'fast_support') == ' True' if tiering_value and support: self._tiering_policy = tiering_policy_map.get(tiering_value) # if no value, unity sets StartHighThenAuto as default return self._tiering_policy @property def cg_id(self): if self.is_in_cg: return self._volume.group_id return None @property def is_replication_enabled(self): if self._is_replication_enabled is None: value = utils.get_extra_spec(self._volume, 'replication_enabled') self._is_replication_enabled = value == ' True' return self._is_replication_enabled def __eq__(self, other): return (self.volume_id == other.volume_id and self.name == other.name and self.size == other.size and self.io_limit_policy == other.io_limit_policy and self.is_thick == other.is_thick and self.is_compressed == other.is_compressed and self.is_in_cg == other.is_in_cg and self.cg_id == other.cg_id and self.tiering_policy == other.tiering_policy and self.is_replication_enabled == other.is_replication_enabled) class CommonAdapter(object): protocol = 'unknown' driver_name = 'UnityAbstractDriver' driver_volume_type = 'unknown' def __init__(self, version=None): self.is_setup = False self.version = version self.driver = None self.config = None self.configured_pool_names = None self.reserved_percentage = None self.max_over_subscription_ratio = None self.volume_backend_name = None self.ip = None self.username = None self.password = None self.array_cert_verify = None self.array_ca_cert_path = None self._serial_number = None self.storage_pools_map = None self._client = None self.allowed_ports = None self.remove_empty_host = False self.to_lock_host = False self.replication_manager = None def do_setup(self, driver, conf): """Sets up the attributes of adapter. :param driver: the unity driver. :param conf: the driver configurations. """ self.driver = driver self.config = self.normalize_config(conf) self.replication_manager = driver.replication_manager self.configured_pool_names = self.config.unity_storage_pool_names self.reserved_percentage = self.config.reserved_percentage self.max_over_subscription_ratio = ( self.config.max_over_subscription_ratio) self.volume_backend_name = (self.config.safe_get('volume_backend_name') or self.driver_name) self.ip = self.config.san_ip self.username = self.config.san_login self.password = self.config.san_password # Allow for customized CA self.array_cert_verify = self.config.driver_ssl_cert_verify self.array_ca_cert_path = self.config.driver_ssl_cert_path sys_version = self.client.system.system_version if utils.is_before_4_1(sys_version): raise exception.VolumeBackendAPIException( data=_('Unity driver does not support array OE version: %s. ' 'Upgrade to 4.1 or later.') % sys_version) self.storage_pools_map = self.get_managed_pools() self.allowed_ports = self.validate_ports(self.config.unity_io_ports) self.remove_empty_host = self.config.remove_empty_host self.to_lock_host = self.remove_empty_host group_name = (self.config.config_group if self.config.config_group else 'DEFAULT') folder_name = '%(group)s.%(sys_name)s' % { 'group': group_name, 'sys_name': self.client.system.info.name} persist_path = os.path.join(cfg.CONF.state_path, 'unity', folder_name) storops.TCHelper.set_up(persist_path) self.is_setup = True def normalize_config(self, config): config.unity_storage_pool_names = utils.remove_empty( '%s.unity_storage_pool_names' % config.config_group, config.unity_storage_pool_names) config.unity_io_ports = utils.remove_empty( '%s.unity_io_ports' % config.config_group, config.unity_io_ports) return config def get_all_ports(self): raise NotImplementedError() def validate_ports(self, ports_whitelist): all_ports = self.get_all_ports() # After normalize_config, `ports_whitelist` could be only None or valid # list in which the items are stripped. if ports_whitelist is None: return all_ports.id # For iSCSI port, the format is 'spa_eth0', and 'spa_iom_0_fc0' for FC. # Unix style glob like 'spa_*' is supported. whitelist = set(ports_whitelist) matched, _ignored, unmatched_whitelist = utils.match_any(all_ports.id, whitelist) if not matched: LOG.error('No matched ports filtered by all patterns: %s', whitelist) raise exception.InvalidConfigurationValue( option='%s.unity_io_ports' % self.config.config_group, value=self.config.unity_io_ports) if unmatched_whitelist: LOG.error('No matched ports filtered by below patterns: %s', unmatched_whitelist) raise exception.InvalidConfigurationValue( option='%s.unity_io_ports' % self.config.config_group, value=self.config.unity_io_ports) LOG.info('These ports %(matched)s will be used based on ' 'the option unity_io_ports: %(config)s', {'matched': matched, 'config': self.config.unity_io_ports}) return matched @property def verify_cert(self): verify_cert = self.array_cert_verify if verify_cert and self.array_ca_cert_path is not None: verify_cert = self.array_ca_cert_path return verify_cert @property def client(self): if self._client is None: self._client = client.UnityClient( self.ip, self.username, self.password, verify_cert=self.verify_cert) return self._client @property def serial_number(self): if self._serial_number is None: self._serial_number = self.client.get_serial() return self._serial_number def get_managed_pools(self): names = self.configured_pool_names array_pools = self.client.get_pools() valid_names = utils.validate_pool_names(names, array_pools.name) return {p.name: p for p in array_pools if p.name in valid_names} def makeup_model(self, lun_id, is_snap_lun=False): lun_type = 'snap_lun' if is_snap_lun else 'lun' location = self._build_provider_location(lun_id=lun_id, lun_type=lun_type) return { 'provider_location': location, 'provider_id': lun_id } def setup_replications(self, lun, model_update): if not self.replication_manager.is_replication_configured: LOG.debug('Replication device not configured, ' 'skip setting up replication for lun %s', lun.name) return model_update rep_data = {} rep_devices = self.replication_manager.replication_devices for backend_id, dst in rep_devices.items(): remote_serial_number = dst.adapter.serial_number LOG.debug('Setting up replication to remote system %s', remote_serial_number) remote_system = self.client.get_remote_system(remote_serial_number) if remote_system is None: raise exception.VolumeBackendAPIException( data=_('Setup replication to remote system %s failed.' 'Cannot find it.') % remote_serial_number) rep_session = self.client.create_replication( lun, dst.max_time_out_of_sync, dst.destination_pool.get_id(), remote_system) rep_data[backend_id] = rep_session.name return utils.enable_replication_status(model_update, rep_data) def create_volume(self, volume): """Creates a volume. :param volume: volume information """ params = VolumeParams(self, volume) log_params = { 'name': params.name, 'size': params.size, 'description': params.description, 'pool': params.pool, 'io_limit_policy': params.io_limit_policy, 'is_thick': params.is_thick, 'is_compressed': params.is_compressed, 'cg_id': params.cg_id, 'is_replication_enabled': params.is_replication_enabled, 'tiering_policy': params.tiering_policy } LOG.info('Create Volume: %(name)s, size: %(size)s, description: ' '%(description)s, pool: %(pool)s, io limit policy: ' '%(io_limit_policy)s, thick: %(is_thick)s, ' 'compressed: %(is_compressed)s, cg_group: %(cg_id)s, ' 'replication_enabled: %(is_replication_enabled)s.', log_params) lun = self.client.create_lun( name=params.name, size=params.size, pool=params.pool, description=params.description, io_limit_policy=params.io_limit_policy, is_thin=False if params.is_thick else None, is_compressed=params.is_compressed, tiering_policy=params.tiering_policy) if params.cg_id: if self.client.is_cg_replicated(params.cg_id): msg = (_('Consistency group %(cg_id)s is in ' 'replication status, cannot add lun to it.') % {'cg_id': params.cg_id}) raise exception.InvalidGroupStatus(reason=msg) LOG.info('Adding lun %(lun)s to cg %(cg)s.', {'lun': lun.get_id(), 'cg': params.cg_id}) self.client.update_cg(params.cg_id, [lun.get_id()], ()) model_update = self.makeup_model(lun.get_id()) if params.is_replication_enabled: if not params.cg_id: model_update = self.setup_replications( lun, model_update) else: # Volume replication_status need be disabled # And be controlled by group replication model_update['replication_status'] = ( fields.ReplicationStatus.DISABLED) return model_update def delete_volume(self, volume): lun_id = self.get_lun_id(volume) if lun_id is None: LOG.info('Backend LUN not found, skipping the deletion. ' 'Volume: %(volume_name)s.', {'volume_name': volume.name}) else: self.client.delete_lun(lun_id) def retype(self, ctxt, volume, new_type, diff, host): """Changes volume from one type to another.""" old_qos_specs = {utils.QOS_SPECS: None} old_provision = None new_specs = volume_types.get_volume_type_extra_specs( new_type.get(utils.QOS_ID)) new_qos_specs = volume_types.get_volume_type_qos_specs( new_type.get(utils.QOS_ID)) lun = self.client.get_lun(name=volume.name) volume_type_id = volume.volume_type_id if volume_type_id: old_provision = utils.get_extra_spec(volume, utils.PROVISIONING_TYPE) old_qos_specs = volume_types.get_volume_type_qos_specs( volume_type_id) need_migration = utils.retype_need_migration( volume, old_provision, new_specs.get(utils.PROVISIONING_TYPE), host) need_change_compress = utils.retype_need_change_compression( old_provision, new_specs.get(utils.PROVISIONING_TYPE)) need_change_qos = utils.retype_need_change_qos( old_qos_specs, new_qos_specs) if need_migration or need_change_compress[0] or need_change_qos: if self.client.lun_has_snapshot(lun): LOG.warning('Driver is not able to do retype because ' 'the volume %s has snapshot(s).', volume.id) return False new_qos_dict = new_qos_specs.get(utils.QOS_SPECS) if need_change_qos: new_io_policy = (self.client.get_io_limit_policy(new_qos_dict) if need_change_qos else None) # Modify lun to change qos settings if new_io_policy: lun.modify(io_limit_policy=new_io_policy) else: # remove current qos settings old_qos_dict = old_qos_specs.get(utils.QOS_SPECS) old_io_policy = self.client.get_io_limit_policy(old_qos_dict) old_io_policy.remove_from_storage(lun) if need_migration: LOG.debug('Driver needs to use storage-assisted migration ' 'to retype the volume.') return self.migrate_volume(volume, host, new_specs) if need_change_compress[0]: # Modify lun to change compression lun.modify(is_compression=need_change_compress[1]) return True def _create_host_and_attach(self, host_name, lun_or_snap): @utils.lock_if(self.to_lock_host, '{lock_name}') def _lock_helper(lock_name): if not self.to_lock_host: host = self.client.create_host(host_name) else: # Use the lock in the decorator host = self.client.create_host_wo_lock(host_name) hlu = self.client.attach(host, lun_or_snap) return host, hlu return _lock_helper('{unity}-{host}'.format(unity=self.client.host, host=host_name)) def _initialize_connection(self, lun_or_snap, connector, vol_id): host, hlu = self._create_host_and_attach(connector['host'], lun_or_snap) self.client.update_host_initiators( host, self.get_connector_uids(connector)) data = self.get_connection_info(hlu, host, connector) data['target_discovered'] = True if vol_id is not None: data['volume_id'] = vol_id conn_info = { 'driver_volume_type': self.driver_volume_type, 'data': data, } return conn_info @volume_utils.trace def initialize_connection(self, volume, connector): lun = self.client.get_lun(lun_id=self.get_lun_id(volume)) return self._initialize_connection(lun, connector, volume.id) @staticmethod def filter_targets_by_host(host): # No target info for iSCSI driver return [] def _detach_and_delete_host(self, host_name, lun_or_snap, is_multiattach_to_host=False): @utils.lock_if(self.to_lock_host, '{lock_name}') def _lock_helper(lock_name): # Only get the host from cache here host = self.client.create_host_wo_lock(host_name) if not is_multiattach_to_host: self.client.detach(host, lun_or_snap) host.update() # need update to get the latest `host_luns` targets = self.filter_targets_by_host(host) if self.remove_empty_host and not host.host_luns: self.client.delete_host_wo_lock(host) return targets return _lock_helper('{unity}-{host}'.format(unity=self.client.host, host=host_name)) @staticmethod def get_terminate_connection_info(connector, targets): # No return data from terminate_connection for iSCSI driver return {} def _terminate_connection(self, lun_or_snap, connector, is_multiattach_to_host=False): is_force_detach = connector is None data = {} if is_force_detach: self.client.detach_all(lun_or_snap) else: targets = self._detach_and_delete_host( connector['host'], lun_or_snap, is_multiattach_to_host=is_multiattach_to_host) data = self.get_terminate_connection_info(connector, targets) return { 'driver_volume_type': self.driver_volume_type, 'data': data, } @volume_utils.trace def terminate_connection(self, volume, connector): lun = self.client.get_lun(lun_id=self.get_lun_id(volume)) # None `connector` indicates force detach, then detach all even the # volume is multi-attached. multiattach_flag = (connector is not None and utils.is_multiattach_to_host( volume.volume_attachment, connector['host'])) return self._terminate_connection( lun, connector, is_multiattach_to_host=multiattach_flag) def get_connector_uids(self, connector): return None def get_connection_info(self, hlu, host, connector): return {} def extend_volume(self, volume, new_size): lun_id = self.get_lun_id(volume) if lun_id is None: msg = (_('Backend LUN not found for Volume: %(volume_name)s.') % {'volume_name': volume.name}) raise exception.VolumeBackendAPIException(data=msg) else: self.client.extend_lun(lun_id, new_size) def _get_target_pool(self, volume): return self.storage_pools_map[utils.get_pool_name(volume)] def _build_provider_location(self, lun_id=None, lun_type=None): return utils.build_provider_location( system=self.serial_number, lun_type=lun_type, lun_id=lun_id, version=self.version) @utils.append_capabilities def update_volume_stats(self): return { 'volume_backend_name': self.volume_backend_name, 'storage_protocol': self.protocol, 'pools': self.get_pools_stats(), 'replication_enabled': self.replication_manager.is_replication_configured, 'replication_targets': list(self.replication_manager.replication_devices), } def get_pools_stats(self): self.storage_pools_map = self.get_managed_pools() return [self._get_pool_stats(pool) for pool in self.pools] @property def pools(self): return self.storage_pools_map.values() @utils.append_capabilities def _get_pool_stats(self, pool): return { 'pool_name': pool.name, 'total_capacity_gb': utils.byte_to_gib(pool.size_total), 'provisioned_capacity_gb': utils.byte_to_gib( pool.size_subscribed), 'free_capacity_gb': utils.byte_to_gib(pool.size_free), 'reserved_percentage': self.reserved_percentage, 'location_info': ('%(pool_name)s|%(array_serial)s' % {'pool_name': pool.name, 'array_serial': self.serial_number}), 'compression_support': pool.is_all_flash, 'max_over_subscription_ratio': ( self.max_over_subscription_ratio), 'multiattach': True, 'replication_enabled': self.replication_manager.is_replication_configured, 'replication_targets': list(self.replication_manager.replication_devices), } def get_lun_id(self, volume): """Retrieves id of the volume's backing LUN. :param volume: volume information """ if volume.provider_location: return utils.extract_provider_location(volume.provider_location, 'id') else: # In some cases, cinder will not update volume info in DB with # provider_location returned by us. We need to retrieve the id # from array. lun = self.client.get_lun(name=volume.name) return lun.get_id() if lun is not None else None def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: snapshot information. """ src_lun_id = self.get_lun_id(snapshot.volume) snap = self.client.create_snap(src_lun_id, snapshot.name) location = self._build_provider_location(lun_type='snapshot', lun_id=snap.get_id()) return {'provider_location': location, 'provider_id': snap.get_id()} def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: the snapshot to delete. """ snap = self.client.get_snap(name=snapshot.name) self.client.delete_snap(snap) def _get_referenced_lun(self, existing_ref): if 'source-id' in existing_ref: lun = self.client.get_lun(lun_id=existing_ref['source-id']) elif 'source-name' in existing_ref: lun = self.client.get_lun(name=existing_ref['source-name']) else: reason = _('Reference must contain source-id or source-name key.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) if lun is None or not lun.existed: raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_("LUN doesn't exist.")) return lun def manage_existing(self, volume, existing_ref): """Manages an existing LUN in the array. The LUN should be in a manageable pool backend, otherwise return error. Rename the backend storage object so that it matches the `volume['name']` which is how drivers traditionally map between a cinder volume and the associated backend storage object. LUN ID or name are supported in `existing_ref`, like: .. code-block:: none existing_ref:{ 'source-id': } or .. code-block:: none existing_ref:{ 'source-name': } """ lun = self._get_referenced_lun(existing_ref) lun.modify(name=volume.name) return { 'provider_location': self._build_provider_location(lun_id=lun.get_id(), lun_type='lun'), 'provider_id': lun.get_id() } def manage_existing_get_size(self, volume, existing_ref): """Returns size of volume to be managed by `manage_existing`. The driver does some check here: 1. The LUN `existing_ref` should be managed by the `volume.host`. """ lun = self._get_referenced_lun(existing_ref) target_pool_name = utils.get_pool_name(volume) lun_pool_name = lun.pool.name if target_pool_name and lun_pool_name != target_pool_name: reason = (_('The imported LUN is in pool %(pool_name)s ' 'which is not managed by the host %(host)s.') % {'pool_name': lun_pool_name, 'host': volume.host}) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return utils.byte_to_gib(lun.size_total) def _disconnect_device(self, conn): conn['connector'].disconnect_volume(conn['conn']['data'], conn['device']) def _connect_device(self, conn): return self.driver._connect_device(conn) @contextlib.contextmanager def _connect_resource(self, lun_or_snap, connector, res_id): """Connects to LUN or snapshot, and makes sure disconnect finally. :param lun_or_snap: the LUN or snapshot to connect/disconnect. :param connector: the host connector information. :param res_id: the ID of the LUN or snapshot. :return: the connection information, in a dict with format like (same as the one returned by `_connect_device`): { 'conn': , 'device': , 'connector': } """ init_conn_func = functools.partial(self._initialize_connection, lun_or_snap, connector, res_id) term_conn_func = functools.partial(self._terminate_connection, lun_or_snap, connector) with utils.assure_cleanup(init_conn_func, term_conn_func, False) as conn_info: conn_device_func = functools.partial(self._connect_device, conn_info) with utils.assure_cleanup(conn_device_func, self._disconnect_device, True) as attach_info: yield attach_info def _dd_copy(self, vol_params, src_snap, src_lun=None): """Creates a volume via copying a Unity snapshot. It attaches the `volume` and `snap`, then use `dd` to copy the data from the Unity snapshot to the `volume`. """ dest_lun = self.client.create_lun( name=vol_params.name, size=vol_params.size, pool=vol_params.pool, description=vol_params.description, io_limit_policy=vol_params.io_limit_policy, is_thin=False if vol_params.is_thick else None, is_compressed=vol_params.is_compressed) src_id = src_snap.get_id() try: conn_props = volume_utils.brick_get_connector_properties( self.config.use_multipath_for_image_xfer, self.config.enforce_multipath_for_image_xfer) with self._connect_resource(dest_lun, conn_props, vol_params.volume_id) as dest_info, \ self._connect_resource(src_snap, conn_props, src_id) as src_info: if src_lun is None: # If size is not specified, need to get the size from LUN # of snapshot. size_in_m = utils.byte_to_mib(src_snap.size) else: size_in_m = utils.byte_to_mib(src_lun.size_total) volume_utils.copy_volume( src_info['device']['path'], dest_info['device']['path'], size_in_m, self.driver.configuration.volume_dd_blocksize, sparse=True) except Exception: with excutils.save_and_reraise_exception(): utils.ignore_exception(self.client.delete_lun, dest_lun.get_id()) LOG.error('Failed to create cloned volume: %(vol_id)s, ' 'from source unity snapshot: %(snap_name)s.', {'vol_id': vol_params.volume_id, 'snap_name': src_snap.name}) return dest_lun def _thin_clone(self, vol_params, src_snap, src_lun=None): tc_src = src_snap if src_lun is None else src_lun try: LOG.debug('Try to thin clone from %s.', tc_src.name) lun = self.client.thin_clone( tc_src, vol_params.name, description=vol_params.description, io_limit_policy=vol_params.io_limit_policy, new_size_gb=vol_params.size) except storops_ex.UnityThinCloneLimitExceededError: LOG.info('Number of thin clones of base LUN exceeds system ' 'limit, dd-copy a new one and thin clone from it.') # Copy via dd if thin clone meets the system limit hidden = copy.copy(vol_params) hidden.name = 'hidden-%s' % vol_params.name hidden.description = 'hidden-%s' % vol_params.description copied_lun = self._dd_copy(hidden, src_snap, src_lun=src_lun) LOG.debug('Notify storops the dd action of lun: %(src_name)s. And ' 'the newly copied lun is: %(copied)s.', {'src_name': tc_src.name, 'copied': copied_lun.name}) storops.TCHelper.notify(tc_src, storops.ThinCloneActionEnum.DD_COPY, copied_lun) lun = self.client.thin_clone( copied_lun, vol_params.name, description=vol_params.description, io_limit_policy=vol_params.io_limit_policy, new_size_gb=vol_params.size) except storops_ex.SystemAPINotSupported: # Thin clone not support on array version before Merlin lun = self._dd_copy(vol_params, src_snap, src_lun=src_lun) LOG.debug( 'Volume copied via dd because array OE is too old to support ' 'thin clone api. source snap: %(src_snap)s, lun: %(src_lun)s.', {'src_snap': src_snap.name, 'src_lun': 'Unknown' if src_lun is None else src_lun.name}) except storops_ex.UnityThinCloneNotAllowedError: # Thin clone not allowed on some resources, # like thick luns and their snaps lun = self._dd_copy(vol_params, src_snap, src_lun=src_lun) LOG.debug( 'Volume copied via dd because source snap/lun is not allowed ' 'to thin clone, i.e. it is thick. source snap: %(src_snap)s, ' 'lun: %(src_lun)s.', {'src_snap': src_snap.name, 'src_lun': 'Unknown' if src_lun is None else src_lun.name}) return lun def create_volume_from_snapshot(self, volume, snapshot): snap = self.client.get_snap(snapshot.name) params = VolumeParams(self, volume) lun = self._thin_clone(params, snap) model_update = self.makeup_model(lun.get_id(), is_snap_lun=True) if params.is_replication_enabled: model_update = self.setup_replications(lun, model_update) return model_update def create_cloned_volume(self, volume, src_vref): """Creates cloned volume. 1. Take an internal snapshot of source volume, and attach it. 2. Thin clone from the snapshot to a new volume. Note: there are several cases the thin clone will downgrade to `dd`, 2.1 Source volume is attached (in-use). 2.2 Array OE version doesn't support thin clone. 2.3 The current LUN family reaches the thin clone limits. 3. Delete the internal snapshot created in step 1. """ src_lun_id = self.get_lun_id(src_vref) if src_lun_id is None: raise exception.VolumeBackendAPIException( data=_( "LUN ID of source volume: %s not found.") % src_vref.name) src_lun = self.client.get_lun(lun_id=src_lun_id) src_snap_name = 'snap_clone_%s' % volume.id create_snap_func = functools.partial(self.client.create_snap, src_lun_id, src_snap_name) vol_params = VolumeParams(self, volume) with utils.assure_cleanup(create_snap_func, self.client.delete_snap, True) as src_snap: LOG.debug('Internal snapshot for clone is created, ' 'name: %(name)s, id: %(id)s.', {'name': src_snap_name, 'id': src_snap.get_id()}) if src_vref.volume_attachment: lun = self._dd_copy(vol_params, src_snap, src_lun=src_lun) LOG.debug('Volume copied using dd because source volume: ' '%(name)s is attached: %(attach)s.', {'name': src_vref.name, 'attach': src_vref.volume_attachment}) model_update = self.makeup_model(lun.get_id()) else: lun = self._thin_clone(vol_params, src_snap, src_lun=src_lun) model_update = self.makeup_model(lun.get_id(), is_snap_lun=True) if vol_params.is_replication_enabled: model_update = self.setup_replications(lun, model_update) return model_update def get_pool_name(self, volume): return self.client.get_pool_name(volume.name) def get_pool_id_by_name(self, name): return self.client.get_pool_id_by_name(name=name) @volume_utils.trace def initialize_connection_snapshot(self, snapshot, connector): snap = self.client.get_snap(snapshot.name) return self._initialize_connection(snap, connector, snapshot.id) @volume_utils.trace def terminate_connection_snapshot(self, snapshot, connector): snap = self.client.get_snap(snapshot.name) return self._terminate_connection(snap, connector) @volume_utils.trace def restore_snapshot(self, volume, snapshot): return self.client.restore_snapshot(snapshot.name) def migrate_volume(self, volume, host, extra_specs=None): """Leverage the Unity move session functionality. This method is invoked at the source backend. :param extra_specs: Instance of ExtraSpecs. The new volume will be changed to align with the new extra specs. """ log_params = { 'name': volume.name, 'src_host': volume.host, 'dest_host': host['host'], 'extra_specs': extra_specs, } LOG.info('Migrate Volume: %(name)s, host: %(src_host)s, destination: ' '%(dest_host)s, extra_specs: %(extra_specs)s', log_params) src_backend = utils.get_backend_name_from_volume(volume) dest_backend = utils.get_backend_name_from_host(host) if src_backend != dest_backend: LOG.debug('Cross-backends migration not supported by Unity ' 'driver. Falling back to host-assisted migration.') return False, None lun_id = self.get_lun_id(volume) provision = None if extra_specs: provision = extra_specs.get(utils.PROVISIONING_TYPE) dest_pool_name = utils.get_pool_name_from_host(host) dest_pool_id = self.get_pool_id_by_name(dest_pool_name) if self.client.migrate_lun(lun_id, dest_pool_id, provision): LOG.debug('Volume migrated successfully.') model_update = {} return True, model_update LOG.debug('Volume migrated failed. Falling back to ' 'host-assisted migration.') return False, None def create_group(self, group): """Creates a generic group. :param group: group information """ cg_name = group.id description = group.description if group.description else group.name LOG.info('Create group: %(name)s, description: %(description)s', {'name': cg_name, 'description': description}) self.client.create_cg(cg_name, description=description) return {'status': fields.GroupStatus.AVAILABLE} def delete_group(self, group): """Deletes the generic group. :param group: the group to delete """ # Deleting cg will also delete all the luns in it. group_id = group.id if self.client.is_cg_replicated(group_id): self.client.delete_cg_rep_session(group_id) self.client.delete_cg(group_id) return None, None def update_group(self, group, add_volumes, remove_volumes): add_lun_ids = (set(map(self.get_lun_id, add_volumes)) if add_volumes else set()) remove_lun_ids = (set(map(self.get_lun_id, remove_volumes)) if remove_volumes else set()) self.client.update_cg(group.id, add_lun_ids, remove_lun_ids) return {'status': fields.GroupStatus.AVAILABLE}, None, None def copy_luns_in_group(self, group, volumes, src_cg_snap, src_volumes): # Use dd to copy data here. The reason why not using thinclone is: # 1. Cannot use cg thinclone due to the tight couple between source # group and cloned one. # 2. Cannot use lun thinclone due to clone lun in cg is not supported. lun_snaps = self.client.filter_snaps_in_cg_snap(src_cg_snap.id) # Make sure the `lun_snaps` is as order of `src_volumes` src_lun_ids = [self.get_lun_id(volume) for volume in src_volumes] lun_snaps.sort(key=lambda snap: src_lun_ids.index(snap.lun.id)) dest_luns = [self._dd_copy(VolumeParams(self, dest_volume), lun_snap) for dest_volume, lun_snap in zip(volumes, lun_snaps)] self.client.create_cg(group.id, lun_add=dest_luns) return ({'status': fields.GroupStatus.AVAILABLE}, [{'id': dest_volume.id, 'status': fields.GroupStatus.AVAILABLE} for dest_volume in volumes]) def create_group_from_snap(self, group, volumes, group_snapshot, snapshots): src_cg_snap = self.client.get_snap(group_snapshot.id) src_vols = ([snap.volume for snap in snapshots] if snapshots else []) return self.copy_luns_in_group(group, volumes, src_cg_snap, src_vols) def create_cloned_group(self, group, volumes, source_group, source_vols): src_group_snap_name = 'snap_clone_group_{}'.format(source_group.id) create_snap_func = functools.partial(self.client.create_cg_snap, source_group.id, src_group_snap_name) with utils.assure_cleanup(create_snap_func, self.client.delete_snap, True) as src_cg_snap: LOG.debug('Internal group snapshot for clone is created, ' 'name: %(name)s, id: %(id)s.', {'name': src_group_snap_name, 'id': src_cg_snap.get_id()}) source_vols = source_vols if source_vols else [] return self.copy_luns_in_group(group, volumes, src_cg_snap, source_vols) def create_group_snapshot(self, group_snapshot, snapshots): self.client.create_cg_snap(group_snapshot.group_id, snap_name=group_snapshot.id) model_update = {'status': fields.GroupStatus.AVAILABLE} snapshots_model_update = [{'id': snapshot.id, 'status': fields.SnapshotStatus.AVAILABLE} for snapshot in snapshots] return model_update, snapshots_model_update def delete_group_snapshot(self, group_snapshot): cg_snap = self.client.get_snap(group_snapshot.id) self.client.delete_snap(cg_snap) return None, None def enable_replication(self, context, group, volumes): """Enable the group replication.""" @cinder_utils.retry(exception.InvalidGroup, interval=20, retries=6) def _wait_until_cg_not_replicated(_client, _cg_id): cg = _client.get_cg(name=_cg_id) if cg.check_cg_is_replicated(): msg = _('The remote cg (%s) is still in replication status, ' 'maybe the source cg was just deleted, ' 'retrying.') % group_id LOG.info(msg) raise exception.InvalidGroup(reason=msg) return cg group_update = {} group_id = group.id if not volumes: LOG.warning('There is no Volume in group: %s, cannot enable ' 'group replication', group_id) return group_update, [] # check whether the group was created as cg in unity group_is_cg = utils.group_is_cg(group) if not group_is_cg: msg = (_('Cannot enable replication on generic group ' '%(group_id)s, need to use CG type instead ' '(need to enable consistent_group_snapshot_enabled in ' 'the group type).') % {'group_id': group_id}) raise exception.InvalidGroupType(reason=msg) cg = self.client.get_cg(name=group_id) try: if not cg.check_cg_is_replicated(): rep_devices = self.replication_manager.replication_devices for backend_id, dst in rep_devices.items(): remote_serial_number = dst.adapter.serial_number max_time = dst.max_time_out_of_sync pool_id = dst.destination_pool.get_id() _client = dst.adapter.client remote_system = self.client.get_remote_system( remote_serial_number) # check if remote cg exists and delete it # before enable replication remote_cg = _wait_until_cg_not_replicated(_client, group_id) remote_cg.delete() # create cg replication session self.client.create_cg_replication( group_id, pool_id, remote_system, max_time) group_update.update({ 'replication_status': fields.ReplicationStatus.ENABLED}) else: LOG.info('group: %s is already in replication, no need to ' 'enable again.', group_id) except Exception as e: group_update.update({ 'replication_status': fields.ReplicationStatus.ERROR}) LOG.error("Error enabling replication on group %(group)s. " "Exception received: %(e)s.", {'group': group.id, 'e': e}) return group_update, None def disable_replication(self, context, group, volumes): """Disable the group replication.""" group_update = {} group_id = group.id if not volumes: # Return if empty group LOG.warning('There is no Volume in group: %s, cannot disable ' 'group replication', group_id) return group_update, [] group_is_cg = utils.group_is_cg(group) if not group_is_cg: msg = (_('Cannot disable replication on generic group ' '%(group_id)s, need use CG type instead of ' 'that (need enable ' 'consistent_group_snapshot_enabled in ' 'group type).') % {'group_id': group_id}) raise exception.InvalidGroupType(reason=msg) try: if self.client.is_cg_replicated(group_id): # delete rep session if exists self.client.delete_cg_rep_session(group_id) if not self.client.is_cg_replicated(group_id): LOG.info('Group is not in replication, ' 'not need to disable replication again.') group_update.update({ 'replication_status': fields.ReplicationStatus.DISABLED}) except Exception as e: group_update.update({ 'replication_status': fields.ReplicationStatus.ERROR}) LOG.error("Error disabling replication on group %(group)s. " "Exception received: %(e)s.", {'group': group.id, 'e': e}) return group_update, None def failover_replication(self, context, group, volumes, secondary_id): """"Fail-over the consistent group.""" group_update = {} volume_update_list = [] if not volumes: # Return if empty group return group_update, volume_update_list group_is_cg = utils.group_is_cg(group) group_id = group.id if not group_is_cg: msg = (_('Cannot failover replication on generic group ' '%(group_id)s, need use CG type instead of ' 'that (need enable ' 'consistent_group_snapshot_enabled in ' 'group type).') % {'group_id': group_id}) raise exception.InvalidGroupType(reason=msg) real_secondary_id = random.choice( list(self.replication_manager.replication_devices)) group_update = {'replication_status': group.replication_status} if self.client.is_cg_replicated(group_id): try: if secondary_id != 'default': try: # Planed failover after sync date when the source unity # is in health status self.client.failover_cg_rep_session(group_id, True) except Exception as ex: LOG.warning('ERROR happened when failover from source ' 'unity, issue details: %s. Try failover ' 'from target unity', ex) # Something wrong with the source unity, try failover # from target unity without sync date _adapter = self.replication_manager.replication_devices [real_secondary_id].adapter _client = _adapter.client _client.failover_cg_rep_session(group_id, False) rep_status = fields.ReplicationStatus.FAILED_OVER else: # start failback when secondary_id is 'default' _adapter = self.replication_manager.replication_devices[ real_secondary_id].adapter _client = _adapter.client _client.failback_cg_rep_session(group_id) rep_status = fields.ReplicationStatus.ENABLED except Exception as ex: rep_status = fields.ReplicationStatus.ERROR LOG.error("Error failover replication on group %(group)s. " "Exception received: %(e)s.", {'group': group_id, 'e': ex}) group_update['replication_status'] = rep_status for volume in volumes: volume_update = { 'id': volume.id, 'replication_status': rep_status} volume_update_list.append(volume_update) return group_update, volume_update_list def get_replication_error_status(self, context, groups): """The failover only happens manually, no need to update the status.""" return [], [] @volume_utils.trace def failover(self, volumes, secondary_id=None, groups=None): # TODO(ryan) support group failover after group bp merges # https://review.opendev.org/#/c/574119/ if secondary_id is None: LOG.debug('No secondary specified when failover. ' 'Randomly choose a secondary') secondary_id = random.choice( list(self.replication_manager.replication_devices)) LOG.debug('Chose %s as secondary', secondary_id) is_failback = secondary_id == 'default' def _failover_or_back(volume): LOG.debug('Failing over volume: %(vol)s to secondary id: ' '%(sec_id)s', vol=volume.name, sec_id=secondary_id) model_update = { 'volume_id': volume.id, 'updates': {} } if not volume.replication_driver_data: LOG.error('Empty replication_driver_data of volume: %s, ' 'replication session name should be in it.', volume.name) return utils.error_replication_status(model_update) rep_data = utils.load_replication_data( volume.replication_driver_data) if is_failback: # Failback executed on secondary backend which is currently # active. _adapter = self.replication_manager.default_device.adapter _client = self.replication_manager.active_adapter.client rep_name = rep_data[self.replication_manager.active_backend_id] else: # Failover executed on secondary backend because primary could # die. _adapter = self.replication_manager.replication_devices[ secondary_id].adapter _client = _adapter.client rep_name = rep_data[secondary_id] try: rep_session = _client.get_replication_session(name=rep_name) if is_failback: _client.failback_replication(rep_session) new_model = _adapter.makeup_model( rep_session.src_resource_id) else: _client.failover_replication(rep_session) new_model = _adapter.makeup_model( rep_session.dst_resource_id) model_update['updates'].update(new_model) self.replication_manager.failover_service(secondary_id) return model_update except client.ClientReplicationError as ex: LOG.error('Failover failed, volume: %(vol)s, secondary id: ' '%(sec_id)s, error: %(err)s', vol=volume.name, sec_id=secondary_id, err=ex) return utils.error_replication_status(model_update) return (secondary_id, [_failover_or_back(volume) for volume in volumes], []) class ISCSIAdapter(CommonAdapter): protocol = PROTOCOL_ISCSI driver_name = 'UnityISCSIDriver' driver_volume_type = 'iscsi' def get_all_ports(self): return self.client.get_ethernet_ports() def get_connector_uids(self, connector): return utils.extract_iscsi_uids(connector) def get_connection_info(self, hlu, host, connector): targets = self.client.get_iscsi_target_info(self.allowed_ports) if not targets: msg = _("There is no accessible iSCSI targets on the system.") raise exception.VolumeBackendAPIException(data=msg) one_target = random.choice(targets) portals = [a['portal'] for a in targets] iqns = [a['iqn'] for a in targets] data = { 'target_luns': [hlu] * len(portals), 'target_iqns': iqns, 'target_portals': portals, 'target_lun': hlu, 'target_portal': one_target['portal'], 'target_iqn': one_target['iqn'], } return data class FCAdapter(CommonAdapter): protocol = PROTOCOL_FC driver_name = 'UnityFCDriver' driver_volume_type = 'fibre_channel' def __init__(self, version=None): super(FCAdapter, self).__init__(version=version) self.lookup_service = None def do_setup(self, driver, config): super(FCAdapter, self).do_setup(driver, config) self.lookup_service = utils.create_lookup_service() def get_all_ports(self): return self.client.get_fc_ports() def get_connector_uids(self, connector): return utils.extract_fc_uids(connector) @property def auto_zone_enabled(self): return self.lookup_service is not None def get_connection_info(self, hlu, host, connector): targets = self.client.get_fc_target_info( host, logged_in_only=(not self.auto_zone_enabled), allowed_ports=self.allowed_ports) if not targets: msg = _("There is no accessible fibre channel targets on the " "system.") raise exception.VolumeBackendAPIException(data=msg) if self.auto_zone_enabled: data = self._get_fc_zone_info(connector['wwpns'], targets) else: data = { 'target_wwn': targets, } data['target_lun'] = hlu return data def filter_targets_by_host(self, host): if self.auto_zone_enabled and not host.host_luns: return self.client.get_fc_target_info( host=host, logged_in_only=False, allowed_ports=self.allowed_ports) return [] def get_terminate_connection_info(self, connector, targets): # For FC, terminate_connection needs to return data to zone manager # which would clean the zone based on the data. if targets: return self._get_fc_zone_info(connector['wwpns'], targets) return {} def _get_fc_zone_info(self, initiator_wwns, target_wwns): mapping = self.lookup_service.get_device_mapping_from_network( initiator_wwns, target_wwns) targets, itor_tgt_map = utils.convert_to_itor_tgt_map(mapping) return { 'target_wwn': targets, 'initiator_target_map': itor_tgt_map, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/unity/client.py0000664000175000017500000005255100000000000024074 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils storops = importutils.try_import('storops') if storops: from storops import exception as storops_ex else: # Set storops_ex to be None for unit test storops_ex = None from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.dell_emc.unity import utils LOG = log.getLogger(__name__) class UnityClient(object): def __init__(self, host, username, password, verify_cert=True): if storops is None: msg = _('Python package storops is not installed which ' 'is required to run Unity driver.') raise exception.VolumeBackendAPIException(data=msg) self._system = None self.host = host self.username = username self.password = password self.verify_cert = verify_cert self.host_cache = {} @property def system(self): if self._system is None: self._system = storops.UnitySystem( host=self.host, username=self.username, password=self.password, verify=self.verify_cert) return self._system def get_serial(self): return self.system.serial_number def create_lun(self, name, size, pool, description=None, io_limit_policy=None, is_thin=None, is_compressed=None, cg_name=None, tiering_policy=None): """Creates LUN on the Unity system. :param name: lun name :param size: lun size in GiB :param pool: UnityPool object represent to pool to place the lun :param description: lun description :param io_limit_policy: io limit on the LUN :param is_thin: if False, a thick LUN will be created :param is_compressed: is compressed LUN enabled :param tiering_policy: tiering policy for the LUN :param cg_name: the name of cg to join if any :return: UnityLun object """ try: lun = pool.create_lun(lun_name=name, size_gb=size, description=description, io_limit_policy=io_limit_policy, is_thin=is_thin, is_compression=is_compressed, tiering_policy=tiering_policy) except storops_ex.UnityLunNameInUseError: LOG.debug("LUN %s already exists. Return the existing one.", name) lun = self.system.get_lun(name=name) return lun def thin_clone(self, lun_or_snap, name, io_limit_policy=None, description=None, new_size_gb=None): try: lun = lun_or_snap.thin_clone( name=name, io_limit_policy=io_limit_policy, description=description) except storops_ex.UnityLunNameInUseError: LOG.debug("LUN(thin clone) %s already exists. " "Return the existing one.", name) lun = self.system.get_lun(name=name) if new_size_gb is not None and new_size_gb > lun.total_size_gb: lun = self.extend_lun(lun.get_id(), new_size_gb) return lun def delete_lun(self, lun_id): """Deletes LUN on the Unity system. :param lun_id: id of the LUN """ try: lun = self.system.get_lun(_id=lun_id) except storops_ex.UnityResourceNotFoundError: LOG.debug("Cannot get LUN %s from unity. Do nothing.", lun_id) return def _delete_lun_if_exist(force_snap_delete=False): """Deletes LUN, skip if it doesn't exist.""" try: lun.delete(force_snap_delete=force_snap_delete) except storops_ex.UnityResourceNotFoundError: LOG.debug("LUN %s doesn't exist. Deletion is not needed.", lun_id) try: _delete_lun_if_exist() except storops_ex.UnityDeleteLunInReplicationError: LOG.info("LUN %s is participating in replication sessions. " "Delete replication sessions first", lun_id) self.delete_lun_replications(lun_id) # It could fail if not pass in force_snap_delete when # deleting the lun immediately after # deleting the replication sessions. _delete_lun_if_exist(force_snap_delete=True) def delete_lun_replications(self, lun_id): LOG.debug("Deleting all the replication sessions which are from " "lun %s", lun_id) try: rep_sessions = self.system.get_replication_session( src_resource_id=lun_id) except storops_ex.UnityResourceNotFoundError: LOG.debug("No replication session found from lun %s. Do nothing.", lun_id) else: for session in rep_sessions: try: session.delete() except storops_ex.UnityResourceNotFoundError: LOG.debug("Replication session %s doesn't exist. " "Skip the deletion.", session.get_id()) def get_lun(self, lun_id=None, name=None): """Gets LUN on the Unity system. :param lun_id: id of the LUN :param name: name of the LUN :return: `UnityLun` object """ lun = None if lun_id is None and name is None: LOG.warning( "Both lun_id and name are None to get LUN. Return None.") else: try: lun = self.system.get_lun(_id=lun_id, name=name) except storops_ex.UnityResourceNotFoundError: LOG.warning( "LUN id=%(id)s, name=%(name)s doesn't exist.", {'id': lun_id, 'name': name}) return lun def extend_lun(self, lun_id, size_gib): lun = self.system.get_lun(lun_id) try: lun.total_size_gb = size_gib except storops_ex.UnityNothingToModifyError: LOG.debug("LUN %s is already expanded. LUN expand is not needed.", lun_id) return lun def migrate_lun(self, lun_id, dest_pool_id, dest_provision=None): # dest_provision possible value ('thin', 'thick', 'compressed') lun = self.system.get_lun(lun_id) dest_pool = self.system.get_pool(dest_pool_id) is_thin = True if dest_provision == 'thin' else None if dest_provision == 'compressed': # compressed needs work with thin is_compressed = True is_thin = True else: is_compressed = False if dest_provision == 'thick': # thick needs work with uncompressed is_thin = False is_compressed = False return lun.migrate(dest_pool, is_compressed=is_compressed, is_thin=is_thin) def get_pools(self): """Gets all storage pools on the Unity system. :return: list of UnityPool object """ return self.system.get_pool() def create_snap(self, src_lun_id, name=None): """Creates a snapshot of LUN on the Unity system. :param src_lun_id: the source LUN ID of the snapshot. :param name: the name of the snapshot. The Unity system will give one if `name` is None. """ try: lun = self.get_lun(lun_id=src_lun_id) snap = lun.create_snap(name, is_auto_delete=False) except storops_ex.UnitySnapNameInUseError as err: LOG.debug( "Snap %(snap_name)s already exists on LUN %(lun_id)s. " "Return the existing one. Message: %(err)s", {'snap_name': name, 'lun_id': src_lun_id, 'err': err}) snap = self.get_snap(name=name) return snap @staticmethod def delete_snap(snap): if snap is None: LOG.debug("Snap to delete is None, skipping deletion.") return try: snap.delete() except storops_ex.UnityResourceNotFoundError as err: LOG.debug("Snap %(snap_name)s may be deleted already. " "Message: %(err)s", {'snap_name': snap.name, 'err': err}) except storops_ex.UnityDeleteAttachedSnapError as err: with excutils.save_and_reraise_exception(): LOG.warning("Failed to delete snapshot %(snap_name)s " "which is in use. Message: %(err)s", {'snap_name': snap.name, 'err': err}) def get_snap(self, name=None): try: return self.system.get_snap(name=name) except storops_ex.UnityResourceNotFoundError as err: LOG.warning("Snapshot %(name)s doesn't exist. Message: %(err)s", {'name': name, 'err': err}) return None def lun_has_snapshot(self, lun): snaps = lun.snapshots return len(snaps) != 0 @coordination.synchronized('{self.host}-{name}') def create_host(self, name): return self.create_host_wo_lock(name) def create_host_wo_lock(self, name): """Provides existing host if exists else create one.""" if name not in self.host_cache: try: host = self.system.get_host(name=name) except storops_ex.UnityResourceNotFoundError: LOG.debug('Host %s not found. Create a new one.', name) host = self.system.create_host(name=name) self.host_cache[name] = host else: host = self.host_cache[name] return host def delete_host_wo_lock(self, host): host.delete() del self.host_cache[host.name] def update_host_initiators(self, host, uids): """Updates host with the supplied uids.""" host_initiators_ids = self.get_host_initiator_ids(host) un_registered = [h for h in uids if h not in host_initiators_ids] if un_registered: for uid in un_registered: try: host.add_initiator(uid, force_create=True) except storops_ex.UnityHostInitiatorExistedError: # This make concurrent modification of # host initiators safe LOG.debug( 'The uid(%s) was already in ' '%s.', uid, host.name) host.update() # Update host cached with new initiators. self.host_cache[host.name] = host return host @staticmethod def get_host_initiator_ids(host): fc = host.fc_host_initiators fc_ids = [] if fc is None else fc.initiator_id iscsi = host.iscsi_host_initiators iscsi_ids = [] if iscsi is None else iscsi.initiator_id return fc_ids + iscsi_ids @staticmethod def attach(host, lun_or_snap): """Attaches a `UnityLun` or `UnitySnap` to a `UnityHost`. :param host: `UnityHost` object :param lun_or_snap: `UnityLun` or `UnitySnap` object :return: hlu """ try: return host.attach(lun_or_snap, skip_hlu_0=True) except storops_ex.UnityResourceAlreadyAttachedError: return host.get_hlu(lun_or_snap) @staticmethod def detach(host, lun_or_snap): """Detaches a `UnityLun` or `UnitySnap` from a `UnityHost`. :param host: `UnityHost` object :param lun_or_snap: `UnityLun` object """ lun_or_snap.update() host.detach(lun_or_snap) @staticmethod def detach_all(lun): """Detaches a `UnityLun` from all hosts. :param lun: `UnityLun` object """ lun.update() lun.detach_from(host=None) def get_ethernet_ports(self): return self.system.get_ethernet_port() def get_iscsi_target_info(self, allowed_ports=None): portals = self.system.get_iscsi_portal() portals = portals.shadow_copy(port_ids=allowed_ports) return [{'portal': utils.convert_ip_to_portal(p.ip_address), 'iqn': p.iscsi_node.name} for p in portals] def get_fc_ports(self): return self.system.get_fc_port() def get_fc_target_info(self, host=None, logged_in_only=False, allowed_ports=None): """Get the ports WWN of FC on array. :param host: the host to which the FC port is registered. :param logged_in_only: whether to retrieve only the logged-in port. :return: the WWN of FC ports. For example, the FC WWN on array is like: 50:06:01:60:89:20:09:25:50:06:01:6C:09:20:09:25. This function removes the colons and returns the last 16 bits: 5006016C09200925. """ wwns = set() if logged_in_only: for paths in filter(None, host.fc_host_initiators.paths): paths = paths.shadow_copy(is_logged_in=True) # `paths.fc_port` is just a list, not a UnityFcPortList, # so use filter instead of shadow_copy here. wwns.update(p.wwn.upper() for p in filter( lambda fcp: (allowed_ports is None or fcp.get_id() in allowed_ports), paths.fc_port)) else: ports = self.get_fc_ports() ports = ports.shadow_copy(port_ids=allowed_ports) wwns.update(p.wwn.upper() for p in ports) return [wwn.replace(':', '')[16:] for wwn in wwns] def create_io_limit_policy(self, name, max_iops=None, max_kbps=None): try: limit = self.system.create_io_limit_policy( name, max_iops=max_iops, max_kbps=max_kbps) except storops_ex.UnityPolicyNameInUseError: limit = self.system.get_io_limit_policy(name=name) return limit def get_io_limit_policy(self, qos_specs): limit_policy = None if qos_specs is not None: limit_policy = self.create_io_limit_policy( qos_specs['id'], qos_specs.get(utils.QOS_MAX_IOPS), qos_specs.get(utils.QOS_MAX_BWS)) return limit_policy def get_pool_id_by_name(self, name): pool = self.system.get_pool(name=name) return pool.get_id() def get_pool_name(self, lun_name): lun = self.system.get_lun(name=lun_name) return lun.pool_name def restore_snapshot(self, snap_name): snap = self.get_snap(snap_name) return snap.restore(delete_backup=True) def create_cg(self, name, description=None, lun_add=None): try: cg = self.system.create_cg(name, description=description, lun_add=lun_add) except storops_ex.UnityConsistencyGroupNameInUseError: LOG.debug('CG %s already exists. Return the existing one.', name) cg = self.system.get_cg(name=name) return cg def get_cg(self, name): try: cg = self.system.get_cg(name=name) except storops_ex.UnityResourceNotFoundError: LOG.info('CG %s not found.', name) return None else: return cg def delete_cg(self, name): cg = self.get_cg(name) if cg: cg.delete() # Deleting cg will also delete the luns in it def update_cg(self, name, add_lun_ids, remove_lun_ids): cg = self.get_cg(name) cg.update_lun(add_luns=[self.get_lun(lun_id=lun_id) for lun_id in add_lun_ids], remove_luns=[self.get_lun(lun_id=lun_id) for lun_id in remove_lun_ids]) def create_cg_snap(self, cg_name, snap_name=None): cg = self.get_cg(cg_name) # Creating snap of cg will create corresponding snaps of luns in it return cg.create_snap(name=snap_name, is_auto_delete=False) def filter_snaps_in_cg_snap(self, cg_snap_id): return self.system.get_snap(snap_group=cg_snap_id).list def create_cg_replication(self, cg_name, pool_id, remote_system, max_time_out_of_sync): # Creates a new cg on remote system and sets up replication to it. src_cg = self.get_cg(cg_name) src_luns = src_cg.luns return src_cg.replicate_cg_with_dst_resource_provisioning( max_time_out_of_sync, src_luns, pool_id, dst_cg_name=cg_name, remote_system=remote_system) def is_cg_replicated(self, cg_name): src_cg = self.get_cg(cg_name) return src_cg.check_cg_is_replicated() def delete_cg_rep_session(self, cg_name): src_cg = self.get_cg(cg_name) rep_sessions = self.get_replication_session(src_resource_id=src_cg.id) for rep_session in rep_sessions: rep_session.delete() def failover_cg_rep_session(self, cg_name, sync): src_cg = self.get_cg(cg_name) rep_sessions = self.get_replication_session(src_resource_id=src_cg.id) for rep_session in rep_sessions: rep_session.failover(sync=sync) def failback_cg_rep_session(self, cg_name): cg = self.get_cg(cg_name) # failback starts from remote replication session rep_sessions = self.get_replication_session(dst_resource_id=cg.id) for rep_session in rep_sessions: rep_session.failback(force_full_copy=True) @staticmethod def create_replication(src_lun, max_time_out_of_sync, dst_pool_id, remote_system): """Creates a new lun on remote system and sets up replication to it.""" return src_lun.replicate_with_dst_resource_provisioning( max_time_out_of_sync, dst_pool_id, remote_system=remote_system, dst_lun_name=src_lun.name) def get_remote_system(self, name=None): """Gets remote system on the Unity system. :param name: remote system name. :return: remote system. """ try: return self.system.get_remote_system(name=name) except storops_ex.UnityResourceNotFoundError: LOG.warning("Not found remote system with name %s. Return None.", name) return None def get_replication_session(self, name=None, src_resource_id=None, dst_resource_id=None): """Gets replication session via its name. :param name: replication session name. :param src_resource_id: replication session's src_resource_id. :param dst_resource_id: replication session's dst_resource_id. :return: replication session. """ try: return self.system.get_replication_session( name=name, src_resource_id=src_resource_id, dst_resource_id=dst_resource_id) except storops_ex.UnityResourceNotFoundError: raise ClientReplicationError( 'Replication session with name %(name)s not found.' % {'name': name}) def failover_replication(self, rep_session): """Fails over a replication session. :param rep_session: replication session to fail over. """ name = rep_session.name LOG.debug('Failing over replication: %s', name) try: # In OpenStack, only support to failover triggered from secondary # backend because the primary could be down. Then `sync=False` # is required here which means it won't sync from primary to # secondary before failover. return rep_session.failover(sync=False) except storops_ex.UnityException as ex: raise ClientReplicationError( 'Failover of replication: %(name)s failed, ' 'error: %(err)s' % {'name': name, 'err': ex} ) LOG.debug('Replication: %s failed over', name) def failback_replication(self, rep_session): """Fails back a replication session. :param rep_session: replication session to fail back. """ name = rep_session.name LOG.debug('Failing back replication: %s', name) try: # If the replication was failed-over before initial copy done, # following failback will fail without `force_full_copy` because # the primary # and secondary data have no common base. # `force_full_copy=True` has no effect if initial copy done. return rep_session.failback(force_full_copy=True) except storops_ex.UnityException as ex: raise ClientReplicationError( 'Failback of replication: %(name)s failed, ' 'error: %(err)s' % {'name': name, 'err': ex} ) LOG.debug('Replication: %s failed back', name) class ClientReplicationError(exception.CinderException): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/unity/driver.py0000664000175000017500000003056400000000000024111 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cinder Driver for Unity""" import functools from oslo_config import cfg from oslo_log import log as logging from cinder import interface from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.dell_emc.unity import adapter from cinder.volume.drivers.dell_emc.unity import replication from cinder.volume.drivers.san.san import san_opts from cinder.volume import volume_utils from cinder.zonemanager import utils as zm_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF UNITY_OPTS = [ cfg.ListOpt('unity_storage_pool_names', default=[], help='A comma-separated list of storage pool names to be ' 'used.'), cfg.ListOpt('unity_io_ports', default=[], help='A comma-separated list of iSCSI or FC ports to be used. ' 'Each port can be Unix-style glob expressions.'), cfg.BoolOpt('remove_empty_host', default=False, help='To remove the host from Unity when the last LUN is ' 'detached from it. By default, it is False.')] CONF.register_opts(UNITY_OPTS, group=configuration.SHARED_CONF_GROUP) def skip_if_not_cg(func): @functools.wraps(func) def inner(self, *args, **kwargs): # Only used to decorating the second argument is `group` if volume_utils.is_group_a_cg_snapshot_type(args[1]): return func(self, *args, **kwargs) LOG.debug('Group is not a consistency group. Unity driver does ' 'nothing.') # This exception will let cinder handle it as a generic group raise NotImplementedError() return inner @interface.volumedriver class UnityDriver(driver.ManageableVD, driver.ManageableSnapshotsVD, driver.BaseVD): """Unity Driver. .. code-block:: none Version history: 1.0.0 - Initial version 2.0.0 - Add thin clone support 3.0.0 - Add IPv6 support 3.1.0 - Support revert to snapshot API 4.0.0 - Support remove empty host 4.2.0 - Support compressed volume 5.0.0 - Support storage assisted volume migration 6.0.0 - Support generic group and consistent group 6.1.0 - Support volume replication 7.0.0 - Support tiering policy 7.1.0 - Support consistency group replication 7.2.0 - Support retype volume """ VERSION = '07.02.00' VENDOR = 'Dell EMC' # ThirdPartySystems wiki page CI_WIKI_NAME = "DellEMC_Unity_CI" def __init__(self, *args, **kwargs): super(UnityDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(UNITY_OPTS) self.configuration.append_config_values(san_opts) # active_backend_id is not None if the service is failed over. self.active_backend_id = kwargs.get('active_backend_id') self.replication_manager = replication.ReplicationManager() protocol = self.configuration.storage_protocol if protocol.lower() == adapter.PROTOCOL_FC.lower(): self.protocol = adapter.PROTOCOL_FC else: self.protocol = adapter.PROTOCOL_ISCSI @staticmethod def get_driver_options(): return UNITY_OPTS def do_setup(self, context): self.replication_manager.do_setup(self) @property def adapter(self): return self.replication_manager.active_adapter def check_for_setup_error(self): pass def create_volume(self, volume): """Creates a volume.""" return self.adapter.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" return self.adapter.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume.""" return self.adapter.create_cloned_volume(volume, src_vref) def extend_volume(self, volume, new_size): """Extend a volume.""" self.adapter.extend_volume(volume, new_size) def delete_volume(self, volume): """Deletes a volume.""" self.adapter.delete_volume(volume) def migrate_volume(self, context, volume, host): """Migrates a volume.""" return self.adapter.migrate_volume(volume, host) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" return self.adapter.retype(ctxt, volume, new_type, diff, host) def create_snapshot(self, snapshot): """Creates a snapshot.""" self.adapter.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" self.adapter.delete_snapshot(snapshot) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" pass def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" pass def check_for_export(self, context, volume_id): """Make sure volume is exported.""" pass def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. The initiator_target_map is a map that represents the remote wwn(s) and a list of wwns which are visible to the remote wwn(s). Example return values: FC: .. code-block:: json { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], 'initiator_target_map': { '1122334455667788': ['1234567890123', '0987654321321'] } } } iSCSI: .. code-block:: json { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqns': ['iqn.2010-10.org.openstack:volume-00001', 'iqn.2010-10.org.openstack:volume-00002'], 'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'], 'target_luns': [1, 1], } } """ conn_info = self.adapter.initialize_connection(volume, connector) zm_utils.add_fc_zone(conn_info) return conn_info def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" conn_info = self.adapter.terminate_connection(volume, connector) zm_utils.remove_fc_zone(conn_info) return conn_info def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") stats = self.adapter.update_volume_stats() stats['driver_version'] = self.VERSION stats['vendor_name'] = self.VENDOR self._stats = stats def manage_existing(self, volume, existing_ref): """Manages an existing LUN in the array. :param volume: the mapping cinder volume of the Unity LUN. :param existing_ref: the Unity LUN info. """ return self.adapter.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Returns size of volume to be managed by manage_existing.""" return self.adapter.manage_existing_get_size(volume, existing_ref) def get_pool(self, volume): """Returns the pool name of a volume.""" return self.adapter.get_pool_name(volume) def unmanage(self, volume): """Unmanages a volume.""" pass def backup_use_temp_snapshot(self): return True def create_export_snapshot(self, context, snapshot, connector): """Creates the mount point of the snapshot for backup. Not necessary to create on Unity. """ pass def remove_export_snapshot(self, context, snapshot): """Deletes the mount point the snapshot for backup. Not necessary to create on Unity. """ pass def initialize_connection_snapshot(self, snapshot, connector, **kwargs): return self.adapter.initialize_connection_snapshot(snapshot, connector) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): return self.adapter.terminate_connection_snapshot(snapshot, connector) def revert_to_snapshot(self, context, volume, snapshot): """Reverts a volume to a snapshot.""" return self.adapter.restore_snapshot(volume, snapshot) @skip_if_not_cg def create_group(self, context, group): """Creates a consistency group.""" return self.adapter.create_group(group) @skip_if_not_cg def delete_group(self, context, group, volumes): """Deletes a consistency group.""" return self.adapter.delete_group(group) @skip_if_not_cg def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group, i.e. add/remove luns to/from it.""" # TODO(Ryan L) update other information (like description) of group return self.adapter.update_group(group, add_volumes, remove_volumes) @skip_if_not_cg def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a consistency group from another group or group snapshot.""" if group_snapshot: return self.adapter.create_group_from_snap(group, volumes, group_snapshot, snapshots) elif source_group: return self.adapter.create_cloned_group(group, volumes, source_group, source_vols) @skip_if_not_cg def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a snapshot of consistency group.""" return self.adapter.create_group_snapshot(group_snapshot, snapshots) @skip_if_not_cg def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a snapshot of consistency group.""" return self.adapter.delete_group_snapshot(group_snapshot) def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failovers volumes to secondary backend.""" return self.adapter.failover(volumes, secondary_id=secondary_id, groups=groups) def enable_replication(self, context, group, volumes): return self.adapter.enable_replication(context, group, volumes) def disable_replication(self, context, group, volumes): return self.adapter.disable_replication(context, group, volumes) def failover_replication(self, context, group, volumes, secondary_backend_id=None): return self.adapter.failover_replication( context, group, volumes, secondary_backend_id) def get_replication_error_status(self, context, groups): return self.adapter.get_replication_error_status(context, groups) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/unity/replication.py0000664000175000017500000002102600000000000025120 0ustar00zuulzuul00000000000000# Copyright (c) 2016 - 2019 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.volume.drivers.dell_emc.unity import adapter as unity_adapter LOG = logging.getLogger(__name__) class ReplicationDevice(object): def __init__(self, conf_dict, driver): """Constructs a replication device from driver configuration. :param conf_dict: the conf of one replication device entry. It's a dict with content like `{backend_id: vendor-id-1, key-1: val-1, ...}` :param driver: the backend driver. """ driver_conf = driver.configuration self.backend_id = conf_dict.get('backend_id') self.san_ip = conf_dict.get('san_ip', None) if (self.backend_id is None or not self.backend_id.strip() or self.san_ip is None or not self.san_ip.strip()): LOG.error('No backend_id or san_ip in %(conf)s of ' '%(group)s.replication_device', conf=conf_dict, group=driver_conf.config_group) raise exception.InvalidConfigurationValue( option='%s.replication_device' % driver_conf.config_group, value=driver_conf.replication_device) # Use the driver settings if not configured in replication_device. self.san_login = conf_dict.get('san_login', driver_conf.san_login) self.san_password = conf_dict.get('san_password', driver_conf.san_password) # Max time (in minute) out of sync is a setting for replication. # It means maximum time to wait before syncing the source and # destination. `0` means it is a sync replication. Default is `60`. try: self.max_time_out_of_sync = int( conf_dict.get('max_time_out_of_sync', 60)) except ValueError: LOG.error('max_time_out_of_sync is not a number, %(conf)s of ' '%(group)s.replication_device', conf=conf_dict, group=driver_conf.config_group) raise exception.InvalidConfigurationValue( option='%s.replication_device' % driver_conf.config_group, value=driver_conf.replication_device) if self.max_time_out_of_sync < 0: LOG.error('max_time_out_of_sync should be greater than 0, ' '%(conf)s of %(group)s.replication_device', conf=conf_dict, group=driver_conf.config_group) raise exception.InvalidConfigurationValue( option='%s.replication_device' % driver_conf.config_group, value=driver_conf.replication_device) self.driver = driver self._adapter = init_adapter(driver.get_version(), driver.protocol) self._dst_pool = None self._serial_number = None @property def device_conf(self): conf = self.driver.configuration conf.san_ip = self.san_ip conf.san_login = self.san_login conf.san_password = self.san_password return conf def setup_adapter(self): if not self._adapter.is_setup: try: self._adapter.do_setup(self.driver, self.device_conf) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.error('replication_device configured but its adapter ' 'setup failed: %s', self.backend_id) @property def adapter(self): self.setup_adapter() return self._adapter @property def destination_pool(self): if self._dst_pool is None: LOG.debug('getting destination pool for replication device: %s', self.backend_id) pools_dict = self.adapter.storage_pools_map pool_name = random.choice(list(pools_dict)) LOG.debug('got destination pool for replication device: %s, ' 'pool: %s', self.backend_id, pool_name) self._dst_pool = pools_dict[pool_name] return self._dst_pool def init_adapter(version, protocol): if protocol == unity_adapter.PROTOCOL_FC: return unity_adapter.FCAdapter(version) return unity_adapter.ISCSIAdapter(version) DEFAULT_ADAPTER_NAME = 'default' class ReplicationManager(object): def __init__(self): self.is_replication_configured = False self.default_conf = None self.default_device = None self.replication_devices = None self.active_backend_id = None def do_setup(self, driver): self.default_conf = driver.configuration self.replication_devices = self.parse_rep_device(driver) if DEFAULT_ADAPTER_NAME in self.replication_devices: LOG.error('backend_id cannot be `default`') raise exception.InvalidConfigurationValue( option=('%s.replication_device' % self.default_conf.config_group), value=self.default_conf.replication_device) # Only support one replication device currently. if len(self.replication_devices) > 1: LOG.error('At most one replication_device is supported') raise exception.InvalidConfigurationValue( option=('%s.replication_device' % self.default_conf.config_group), value=self.default_conf.replication_device) self.is_replication_configured = len(self.replication_devices) >= 1 self.active_backend_id = driver.active_backend_id if self.active_backend_id: if self.active_backend_id not in self.replication_devices: LOG.error('Service starts under failed-over status, ' 'active_backend_id: %s is not empty, but not in ' 'replication_device.', self.active_backend_id) raise exception.InvalidConfigurationValue( option=('%s.replication_device' % self.default_conf.config_group), value=self.default_conf.replication_device) else: self.active_backend_id = DEFAULT_ADAPTER_NAME default_device_conf = { 'backend_id': DEFAULT_ADAPTER_NAME, 'san_ip': driver.configuration.san_ip } self.default_device = ReplicationDevice(default_device_conf, driver) if not self.is_service_failed_over: # If service doesn't fail over, setup the adapter. # Otherwise, the primary backend could be down, adapter setup could # fail. self.default_device.setup_adapter() if self.is_replication_configured: # If replication_device is configured, consider the replication is # enabled and check the same configuration is valid for secondary # backend or not. self.setup_rep_adapters() @property def is_service_failed_over(self): return (self.active_backend_id is not None and self.active_backend_id != DEFAULT_ADAPTER_NAME) def setup_rep_adapters(self): for backend_id, rep_device in self.replication_devices.items(): rep_device.setup_adapter() @property def active_adapter(self): if self.is_service_failed_over: return self.replication_devices[self.active_backend_id].adapter else: self.active_backend_id = DEFAULT_ADAPTER_NAME return self.default_device.adapter @staticmethod def parse_rep_device(driver): driver_conf = driver.configuration rep_devices = {} if not driver_conf.replication_device: return rep_devices for device_conf in driver_conf.replication_device: rep_device = ReplicationDevice(device_conf, driver) rep_devices[rep_device.backend_id] = rep_device return rep_devices def failover_service(self, backend_id): self.active_backend_id = backend_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/unity/utils.py0000664000175000017500000003345300000000000023756 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import fnmatch import functools import json from oslo_log import log as logging from oslo_utils import units from packaging import version from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.volume import group_types from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as zm_utils LOG = logging.getLogger(__name__) BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both']) QOS_MAX_IOPS = 'maxIOPS' QOS_MAX_BWS = 'maxBWS' PROVISIONING_TYPE = 'provisioning:type' PROVISIONING_COMPRESSED = 'compressed' QOS_SPECS = 'qos_specs' SPECS_OF_QOS = 'specs' QOS_ID = 'id' def dump_provider_location(location_dict): sorted_keys = sorted(location_dict.keys()) return '|'.join('%(k)s^%(v)s' % {'k': k, 'v': location_dict[k]} for k in sorted_keys) def build_provider_location(system, lun_type, lun_id, version): """Builds provider_location for volume or snapshot. :param system: Unity serial number :param lun_id: LUN ID in Unity :param lun_type: 'lun' :param version: driver version """ location_dict = {'system': system, 'type': lun_type, 'id': str(lun_id), 'version': version} return dump_provider_location(location_dict) def extract_provider_location(provider_location, key): """Extracts value of the specified field from provider_location string. :param provider_location: provider_location string :param key: field name of the value that to be extracted :return: value of the specified field if it exists, otherwise, None is returned """ if provider_location: for kvp in provider_location.split('|'): fields = kvp.split('^') if len(fields) == 2 and fields[0] == key: return fields[1] else: LOG.warning('"%(key)s" is not found in provider ' 'location "%(location)s."', {'key': key, 'location': provider_location}) else: LOG.warning('Empty provider location received.') def byte_to_gib(byte): return byte / units.Gi def byte_to_mib(byte): return byte / units.Mi def gib_to_mib(gib): return gib * units.Ki def validate_pool_names(conf_pools, array_pools): if not conf_pools: LOG.debug('No storage pools are specified. This host will manage ' 'all the pools on the Unity system.') return array_pools conf_pools = set(map(lambda i: i.strip(), conf_pools)) array_pools = set(map(lambda i: i.strip(), array_pools)) existed = conf_pools & array_pools if not existed: msg = (_('No storage pools to be managed exist. Please check ' 'your configuration. The available storage pools on the ' 'system are %s.') % array_pools) raise exception.VolumeBackendAPIException(data=msg) return existed def retype_need_migration(volume, old_provision, new_provision, host): if volume['host'] != host['host']: return True if old_provision != new_provision: if retype_need_change_compression(old_provision, new_provision)[0]: return False else: return True return False def retype_need_change_compression(old_provision, new_provision): """:return: whether need change compression and the new value""" if ((not old_provision or old_provision == 'thin') and new_provision == PROVISIONING_COMPRESSED): return True, True elif (old_provision == PROVISIONING_COMPRESSED and (not new_provision or old_provision == 'thin')): return True, False # no need change compression return False, None def retype_need_change_qos(old_qos=None, new_qos=None): old = old_qos.get(QOS_SPECS).get(QOS_ID) if old_qos.get(QOS_SPECS) else '' new = new_qos.get(QOS_SPECS).get(QOS_ID) if new_qos.get(QOS_SPECS) else '' return old != new def extract_iscsi_uids(connector): if 'initiator' not in connector: msg = _("Host %s doesn't have iSCSI initiator.") % connector['host'] raise exception.VolumeBackendAPIException(data=msg) return [connector['initiator']] def extract_fc_uids(connector): if 'wwnns' not in connector or 'wwpns' not in connector: msg = _("Host %s doesn't have FC initiators.") % connector['host'] LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) wwnns = connector['wwnns'] wwpns = connector['wwpns'] wwns = [(node + port).upper() for node, port in zip(wwnns, wwpns)] def _to_wwn(wwn): # Format the wwn to include the colon # For example, convert 1122200000051E55E100 to # 11:22:20:00:00:05:1E:55:A1:00 return ':'.join(wwn[i:i + 2] for i in range(0, len(wwn), 2)) return list(map(_to_wwn, wwns)) def convert_ip_to_portal(ip): is_ipv6_without_brackets = ':' in ip and ip[-1] != ']' if is_ipv6_without_brackets: return '[%s]:3260' % ip return '%s:3260' % ip def convert_to_itor_tgt_map(zone_mapping): """Function to process data from lookup service. :param zone_mapping: mapping is the data from the zone lookup service with below format { : { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'..) 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121'..) } } """ target_wwns = [] itor_tgt_map = {} for san_name in zone_mapping: one_map = zone_mapping[san_name] for target in one_map['target_port_wwn_list']: if target not in target_wwns: target_wwns.append(target) for initiator in one_map['initiator_port_wwn_list']: itor_tgt_map[initiator] = one_map['target_port_wwn_list'] LOG.debug("target_wwns: %(tgt_wwns)s\n init_targ_map: %(itor_tgt_map)s", {'tgt_wwns': target_wwns, 'itor_tgt_map': itor_tgt_map}) return target_wwns, itor_tgt_map def get_pool_name(volume): return volume_utils.extract_host(volume.host, 'pool') def get_pool_name_from_host(host): return volume_utils.extract_host(host['host'], 'pool') def get_backend_name_from_volume(volume): return volume_utils.extract_host(volume.host, 'backend') def get_backend_name_from_host(host): return volume_utils.extract_host(host['host'], 'backend') def get_extra_spec(volume, spec_key): spec_value = None type_id = volume.volume_type_id if type_id is not None: extra_specs = volume_types.get_volume_type_extra_specs(type_id) if spec_key in extra_specs: spec_value = extra_specs[spec_key] return spec_value def group_is_cg(group): result = get_group_specs(group, 'consistent_group_snapshot_enabled') return result == ' True' def get_group_specs(group, spec_key): spec_value = None if group.group_type_id: group_specs = group_types.get_group_type_specs(group.group_type_id) if spec_key in group_specs: spec_value = group_specs[spec_key] return spec_value def ignore_exception(func, *args, **kwargs): try: func(*args, **kwargs) except Exception as ex: LOG.warning('Error occurred but ignored. Function: %(func_name)s, ' 'args: %(args)s, kwargs: %(kwargs)s, ' 'exception: %(ex)s.', {'func_name': func, 'args': args, 'kwargs': kwargs, 'ex': ex}) @contextlib.contextmanager def assure_cleanup(enter_func, exit_func, use_enter_return): """Assures the resource is cleaned up. Used as a context. :param enter_func: the function to execute when entering the context. :param exit_func: the function to execute when leaving the context. :param use_enter_return: the flag indicates whether to pass the return value of enter_func in to the exit_func. """ enter_return = None try: if isinstance(enter_func, functools.partial): enter_func_name = enter_func.func.__name__ else: enter_func_name = enter_func.__name__ LOG.debug(('Entering context. Function: %(func_name)s, ' 'use_enter_return: %(use)s.'), {'func_name': enter_func_name, 'use': use_enter_return}) enter_return = enter_func() yield enter_return finally: if isinstance(exit_func, functools.partial): exit_func_name = exit_func.func.__name__ else: exit_func_name = exit_func.__name__ LOG.debug(('Exiting context. Function: %(func_name)s, ' 'use_enter_return: %(use)s.'), {'func_name': exit_func_name, 'use': use_enter_return}) if enter_return is not None: if use_enter_return: ignore_exception(exit_func, enter_return) else: ignore_exception(exit_func) def create_lookup_service(): return zm_utils.create_lookup_service() def get_backend_qos_specs(volume): type_id = volume.volume_type_id if type_id is None: return None qos_specs = volume_types.get_volume_type_qos_specs(type_id) if qos_specs is None: return None qos_specs = qos_specs[QOS_SPECS] if qos_specs is None: return None consumer = qos_specs['consumer'] # Front end QoS specs are handled by nova. We ignore them here. if consumer not in BACKEND_QOS_CONSUMERS: return None max_iops = qos_specs[SPECS_OF_QOS].get(QOS_MAX_IOPS) max_bws = qos_specs[SPECS_OF_QOS].get(QOS_MAX_BWS) if max_iops is None and max_bws is None: return None return { 'id': qos_specs['id'], QOS_MAX_IOPS: max_iops, QOS_MAX_BWS: max_bws, } def remove_empty(option, value_list): if value_list: value_list = list(filter(None, map(str.strip, value_list))) if not value_list: raise exception.InvalidConfigurationValue(option=option, value=value_list) return value_list return None def match_any(full, patterns): matched = list( filter(lambda x: any(fnmatch.fnmatchcase(x, p) for p in patterns), full)) unmatched = list( filter(lambda x: not any(fnmatch.fnmatchcase(x, p) for p in patterns), full)) unmatched_patterns = list( filter(lambda p: not any(fnmatch.fnmatchcase(x, p) for x in full), patterns)) return matched, unmatched, unmatched_patterns def is_before_4_1(ver): return version.parse(ver) < version.parse('4.1') def lock_if(condition, lock_name): if condition: return coordination.synchronized(lock_name) else: return functools.partial def append_capabilities(func): capabilities = { 'thin_provisioning_support': True, 'thick_provisioning_support': True, 'consistent_group_snapshot_enabled': True, 'fast_support': True, 'consistent_group_replication_enabled': True } @functools.wraps(func) def _inner(*args, **kwargs): output = func(*args, **kwargs) output.update(capabilities) return output return _inner def is_multiattach_to_host(volume_attachment, host_name): # When multiattach is enabled, a volume could be attached to two or more # instances which are hosted on one nova host. # Because unity cannot recognize the volume is attached to two or more # instances, we should keep the volume attached to the nova host until # the volume is detached from the last instance. if not volume_attachment: return False attachment = [a for a in volume_attachment if a.attach_status == fields.VolumeAttachStatus.ATTACHED and a.attached_host == host_name] return len(attachment) > 1 def load_replication_data(rep_data_str): # rep_data_str is string dumped from a dict like: # { # 'default': 'rep_session_name_failed_over', # 'backend_id_1': 'rep_session_name_1', # 'backend_id_2': 'rep_session_name_2' # } return json.loads(rep_data_str) def dump_replication_data(model_update, rep_data): # rep_data is a dict like: # { # 'backend_id_1': 'rep_session_name_1', # 'backend_id_2': 'rep_session_name_2' # } model_update['replication_driver_data'] = json.dumps(rep_data) return model_update def enable_replication_status(model_update, rep_data): model_update['replication_status'] = fields.ReplicationStatus.ENABLED return dump_replication_data(model_update, rep_data) def error_replication_status(model_update): # model_update is a dict like: # { # 'volume_id': volume.id, # 'updates': { # 'provider_id': new_provider_id, # 'provider_location': new_provider_location, # 'replication_status': fields.ReplicationStatus.FAILOVER_ERROR, # ... # } # } model_update['updates']['replication_status'] = ( fields.ReplicationStatus.FAILOVER_ERROR ) return model_update ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3471208 cinder-27.0.0/cinder/volume/drivers/dell_emc/vnx/0000775000175000017500000000000000000000000021677 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/vnx/__init__.py0000664000175000017500000000000000000000000023776 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/vnx/adapter.py0000664000175000017500000017453500000000000023710 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import math import os import random import re from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.volume.drivers.dell_emc.vnx import client from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.dell_emc.vnx import replication from cinder.volume.drivers.dell_emc.vnx import taskflows as emc_taskflow from cinder.volume.drivers.dell_emc.vnx import utils from cinder.volume import volume_utils from cinder.zonemanager import utils as zm_utils storops = importutils.try_import('storops') if storops: from storops import exception as storops_ex CONF = cfg.CONF LOG = logging.getLogger(__name__) class CommonAdapter(replication.ReplicationAdapter): VERSION = None def __init__(self, configuration, active_backend_id): self.config = configuration self.active_backend_id = active_backend_id self.client = None self.protocol = None self.serial_number = None self.mirror_view = None self.storage_pools = None self.max_retries = 5 self.allowed_ports = None self.force_delete_lun_in_sg = None self.max_over_subscription_ratio = None self.max_luns_per_storage_group = None self.ignore_pool_full_threshold = None self.reserved_percentage = None self.destroy_empty_sg = None self.itor_auto_dereg = None self.queue_path = None self.async_migrate = None def _build_client_from_config(self, config, queue_path=None): return client.Client( config.san_ip, config.san_login, config.san_password, config.storage_vnx_authentication_type, config.naviseccli_path, config.storage_vnx_security_file_dir, queue_path) def do_setup(self): self._normalize_config() self.client = self._build_client_from_config( self.config, self.queue_path) self.client.set_max_luns_per_sg( self.config.max_luns_per_storage_group) # Replication related if (self.active_backend_id in common.ReplicationDeviceList.get_backend_ids(self.config)): # The backend is in failed-over state self.mirror_view = self.build_mirror_view(self.config, False) self.client = self.mirror_view.primary_client else: self.mirror_view = self.build_mirror_view(self.config, True) self.serial_number = self.client.get_serial() self.storage_pools = self.parse_pools() self.force_delete_lun_in_sg = ( self.config.force_delete_lun_in_storagegroup) self.max_over_subscription_ratio = ( self.config.max_over_subscription_ratio) self.ignore_pool_full_threshold = ( self.config.ignore_pool_full_threshold) self.reserved_percentage = self.config.reserved_percentage self.protocol = self.config.storage_protocol self.destroy_empty_sg = self.config.destroy_empty_storage_group self.itor_auto_dereg = self.config.initiator_auto_deregistration self.async_migrate = self.config.vnx_async_migrate self.set_extra_spec_defaults() def _normalize_config(self): group_name = ( self.config.config_group if self.config.config_group else 'DEFAULT') self.queue_path = os.path.join(CONF.state_path, 'vnx', group_name) # Check option `naviseccli_path`. # Set to None (then pass to storops) if it is not set or set to an # empty string. naviseccli_path = self.config.naviseccli_path if naviseccli_path is None or len(naviseccli_path.strip()) == 0: LOG.warning('[%(group)s] naviseccli_path is not set or set to ' 'an empty string. None will be passed into ' 'storops.', {'group': self.config.config_group}) self.config.naviseccli_path = None # Check option `storage_vnx_pool_names`. # Raise error if it is set to an empty list. pool_names = self.config.storage_vnx_pool_names if pool_names is not None: # Filter out the empty string in the list. pool_names = [name.strip() for name in [x for x in pool_names if len(x.strip()) != 0]] if len(pool_names) == 0: raise exception.InvalidConfigurationValue( option='[{group}] storage_vnx_pool_names'.format( group=self.config.config_group), value=pool_names) self.config.storage_vnx_pool_names = pool_names # Check option `io_port_list`. # Raise error if it is set to an empty list. io_port_list = self.config.io_port_list if io_port_list is not None: io_port_list = [port.strip().upper() for port in [x for x in io_port_list if len(x.strip()) != 0]] if len(io_port_list) == 0: # io_port_list is allowed to be an empty list, which means # none of the ports will be registered. raise exception.InvalidConfigurationValue( option='[{group}] io_port_list'.format( group=self.config.config_group), value=io_port_list) self.config.io_port_list = io_port_list if self.config.ignore_pool_full_threshold: LOG.warning('[%(group)s] ignore_pool_full_threshold: True. ' 'LUN creation will still be forced even if the ' 'pool full threshold is exceeded.', {'group': self.config.config_group}) if self.config.destroy_empty_storage_group: LOG.warning('[%(group)s] destroy_empty_storage_group: True. ' 'Empty storage group will be deleted after volume ' 'is detached.', {'group': self.config.config_group}) if not self.config.initiator_auto_registration: LOG.info('[%(group)s] initiator_auto_registration: False. ' 'Initiator auto registration is not enabled. ' 'Please register initiator manually.', {'group': self.config.config_group}) if self.config.force_delete_lun_in_storagegroup: LOG.warning( '[%(group)s] force_delete_lun_in_storagegroup=True', {'group': self.config.config_group}) if self.config.ignore_pool_full_threshold: LOG.warning('[%(group)s] ignore_pool_full_threshold: True. ' 'LUN creation will still be forced even if the ' 'pool full threshold is exceeded.', {'group': self.config.config_group}) def _build_port_str(self, port): raise NotImplementedError() def validate_ports(self, all_ports, ports_whitelist): # `ports_whitelist` passed the _normalize_config, then it could be only # None or valid list in which the items are stripped and converted to # upper case. result_ports = None if ports_whitelist is None: result_ports = all_ports else: # Split the whitelist, remove spaces around the comma, # and remove the empty item. port_strs_configed = set(ports_whitelist) # For iSCSI port, the format is 'A-1-1', # while for FC, it is 'A-2'. valid_port_map = {self._build_port_str(port): port for port in all_ports} invalid_port_strs = port_strs_configed - set(valid_port_map.keys()) if invalid_port_strs: msg = (_('[%(group)s] Invalid %(protocol)s ports %(port)s ' 'specified for io_port_list.') % { 'group': self.config.config_group, 'protocol': self.config.storage_protocol, 'port': ','.join(invalid_port_strs)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) result_ports = [valid_port_map[port_str] for port_str in port_strs_configed] if not result_ports: raise exception.VolumeBackendAPIException( data=_('No valid ports.')) return result_ports def set_extra_spec_defaults(self): provision_default = storops.VNXProvisionEnum.THICK tier_default = None if self.client.is_fast_enabled(): tier_default = storops.VNXTieringEnum.HIGH_AUTO common.ExtraSpecs.set_defaults(provision_default, tier_default) def create_volume(self, volume): """Creates a EMC volume.""" volume_size = volume['size'] volume_name = volume['name'] utils.check_type_matched(volume) volume_metadata = utils.get_metadata(volume) pool = utils.get_pool_from_host(volume.host) specs = common.ExtraSpecs.from_volume(volume) provision = specs.provision tier = specs.tier volume_metadata['snapcopy'] = 'False' LOG.info('Create Volume: %(volume)s Size: %(size)s ' 'pool: %(pool)s ' 'provision: %(provision)s ' 'tier: %(tier)s ', {'volume': volume_name, 'size': volume_size, 'pool': pool, 'provision': provision, 'tier': tier}) qos_specs = utils.get_backend_qos_specs(volume) if (volume.group and volume_utils.is_group_a_cg_snapshot_type(volume.group)): cg_id = volume.group_id else: cg_id = None lun = self.client.create_lun( pool, volume_name, volume_size, provision, tier, cg_id, ignore_thresholds=self.config.ignore_pool_full_threshold, qos_specs=qos_specs) location = self._build_provider_location( lun_type='lun', lun_id=lun.lun_id, base_lun_name=volume.name) # Setup LUN Replication/MirrorView between devices. # Secondary LUN will inherit properties from primary LUN. rep_update = self.setup_lun_replication( volume, lun.lun_id) model_update = {'provider_location': location, 'metadata': volume_metadata} model_update.update(rep_update) return model_update def retype(self, ctxt, volume, new_type, diff, host): """Changes volume from one type to another.""" new_specs = common.ExtraSpecs.from_volume_type(new_type) new_specs.validate(self.client.get_vnx_enabler_status()) lun = self.client.get_lun(name=volume.name) if volume.volume_type_id: old_specs = common.ExtraSpecs.from_volume(volume) else: # Get extra specs from the LUN properties when the lun # has no volume type. utils.update_res_without_poll(lun) old_specs = common.ExtraSpecs.from_lun(lun) old_provision = old_specs.provision old_tier = old_specs.tier need_migration = utils.retype_need_migration( volume, old_provision, new_specs.provision, host) turn_on_compress = utils.retype_need_turn_on_compression( old_provision, new_specs.provision) change_tier = utils.retype_need_change_tier( old_tier, new_specs.tier) if need_migration or turn_on_compress: if self.client.lun_has_snapshot(lun): LOG.debug('Driver is not able to do retype because the volume ' '%s has a snapshot.', volume.id) return False if need_migration: LOG.debug('Driver needs to use storage-assisted migration ' 'to retype the volume.') return self._migrate_volume(volume, host, new_specs) if turn_on_compress: # Turn on compression feature on the volume self.client.enable_compression(lun) if change_tier: # Modify lun to change tiering policy lun.tier = new_specs.tier return True def create_volume_from_snapshot(self, volume, snapshot): """Constructs a work flow to create a volume from snapshot. :param volume: new volume :param snapshot: base snapshot This flow will do the following: #. Create a snap mount point (SMP) for the snapshot. #. Attach the snapshot to the SMP created in the first step. #. Create a temporary lun prepare for migration. (Skipped if snapcopy='true') #. Start a migration between the SMP and the temp lun. (Skipped if snapcopy='true') """ volume_metadata = utils.get_metadata(volume) pool = utils.get_pool_from_host(volume.host) specs = common.ExtraSpecs.from_volume(volume) tier = specs.tier base_lun_name = utils.get_base_lun_name(snapshot.volume) rep_update = dict() if utils.is_snapcopy_enabled(volume): new_lun_id = emc_taskflow.fast_create_volume_from_snapshot( client=self.client, snap_name=snapshot.name, new_snap_name=utils.construct_snap_name(volume), lun_name=volume.name, base_lun_name=base_lun_name, pool_name=pool) location = self._build_provider_location( lun_type='smp', lun_id=new_lun_id, base_lun_name=base_lun_name) volume_metadata['snapcopy'] = 'True' volume_metadata['async_migrate'] = 'False' else: async_migrate, provision = utils.calc_migrate_and_provision( volume, default_async_migrate=self.async_migrate) new_snap_name = ( utils.construct_snap_name(volume) if async_migrate else None) new_lun_id = emc_taskflow.create_volume_from_snapshot( client=self.client, src_snap_name=snapshot.name, lun_name=volume.name, lun_size=volume.size, base_lun_name=base_lun_name, pool_name=pool, provision=provision, tier=tier, new_snap_name=new_snap_name) location = self._build_provider_location( lun_type='lun', lun_id=new_lun_id, base_lun_name=volume.name) volume_metadata['snapcopy'] = 'False' volume_metadata['async_migrate'] = str(async_migrate) rep_update = self.setup_lun_replication(volume, new_lun_id) model_update = {'provider_location': location, 'metadata': volume_metadata} model_update.update(rep_update) return model_update def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" volume_metadata = utils.get_metadata(volume) pool = utils.get_pool_from_host(volume.host) specs = common.ExtraSpecs.from_volume(volume) tier = specs.tier base_lun_name = utils.get_base_lun_name(src_vref) source_lun_id = self.client.get_lun_id(src_vref) snap_name = utils.construct_snap_name(volume) rep_update = dict() if utils.is_snapcopy_enabled(volume): # snapcopy feature enabled new_lun_id = emc_taskflow.fast_create_cloned_volume( client=self.client, snap_name=snap_name, lun_id=source_lun_id, lun_name=volume.name, base_lun_name=base_lun_name ) location = self._build_provider_location( lun_type='smp', lun_id=new_lun_id, base_lun_name=base_lun_name) volume_metadata['snapcopy'] = 'True' volume_metadata['async_migrate'] = 'False' else: async_migrate, provision = utils.calc_migrate_and_provision( volume, default_async_migrate=self.async_migrate) new_lun_id = emc_taskflow.create_cloned_volume( client=self.client, snap_name=snap_name, lun_id=source_lun_id, lun_name=volume.name, lun_size=volume.size, base_lun_name=base_lun_name, pool_name=pool, provision=provision, tier=tier, async_migrate=async_migrate) # After migration, volume's base lun is itself location = self._build_provider_location( lun_type='lun', lun_id=new_lun_id, base_lun_name=volume.name) volume_metadata['snapcopy'] = 'False' volume_metadata['async_migrate'] = str(async_migrate) rep_update = self.setup_lun_replication(volume, new_lun_id) model_update = {'provider_location': location, 'metadata': volume_metadata} model_update.update(rep_update) return model_update def migrate_volume(self, context, volume, host): """Leverage the VNX on-array migration functionality. This method is invoked at the source backend. """ specs = common.ExtraSpecs.from_volume(volume) return self._migrate_volume(volume, host, specs) def _migrate_volume(self, volume, host, extra_specs): """Migrates volume. :param extra_specs: Instance of ExtraSpecs. The new volume will be changed to align with the new extra specs. """ r = utils.validate_storage_migration( volume, host, self.serial_number, self.protocol) if not r: return r, None rate = utils.get_migration_rate(volume) new_pool = utils.get_pool_from_host(host['host']) lun_id = self.client.get_lun_id(volume) lun_name = volume.name provision = extra_specs.provision tier = extra_specs.tier emc_taskflow.run_migration_taskflow( self.client, lun_id, lun_name, volume.size, new_pool, provision, tier, rate) # A smp will become a LUN after migration if utils.is_volume_smp(volume): self.client.delete_snapshot( utils.construct_snap_name(volume)) volume_metadata = utils.get_metadata(volume) pl = self._build_provider_location( lun_type='lun', lun_id=lun_id, base_lun_name=volume.name) volume_metadata['snapcopy'] = 'False' model_update = {'provider_location': pl, 'metadata': volume_metadata} return True, model_update def create_consistencygroup(self, context, group): cg_name = group.id model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} self.client.create_consistency_group(cg_name=cg_name) return model_update def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" cg_name = group.id model_update = {} volumes_model_update = [] model_update['status'] = group.status LOG.info('Start to delete consistency group: %(cg_name)s', {'cg_name': cg_name}) self.client.delete_consistency_group(cg_name) for volume in volumes: try: self.client.delete_lun(volume.name) volumes_model_update.append( {'id': volume.id, 'status': fields.ConsistencyGroupStatus.DELETED}) except storops_ex.VNXDeleteLunError: volumes_model_update.append( {'id': volume.id, 'status': fields.ConsistencyGroupStatus.ERROR_DELETING}) return model_update, volumes_model_update def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a CG snapshot(snap group).""" return self.do_create_cgsnap(cgsnapshot.consistencygroup_id, cgsnapshot.id, snapshots) def do_create_cgsnap(self, group_name, snap_name, snapshots): model_update = {} snapshots_model_update = [] LOG.info('Creating consistency snapshot for group' ': %(group_name)s', {'group_name': group_name}) self.client.create_cg_snapshot(snap_name, group_name) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot.id, 'status': 'available'}) model_update['status'] = 'available' return model_update, snapshots_model_update def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a CG snapshot(snap group).""" return self.do_delete_cgsnap(cgsnapshot.consistencygroup_id, cgsnapshot.id, cgsnapshot.status, snapshots) def do_delete_cgsnap(self, group_name, snap_name, snap_status, snapshots): model_update = {} snapshots_model_update = [] model_update['status'] = snap_status LOG.info('Deleting consistency snapshot %(snap_name)s for ' 'group: %(group_name)s', {'snap_name': snap_name, 'group_name': group_name}) self.client.delete_cg_snapshot(snap_name) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot.id, 'status': 'deleted'}) model_update['status'] = 'deleted' return model_update, snapshots_model_update def create_cg_from_cgsnapshot(self, context, group, volumes, cgsnapshot, snapshots): return self.do_create_cg_from_cgsnap( group.id, group.host, volumes, cgsnapshot.id, snapshots) def do_create_cg_from_cgsnap(self, cg_id, cg_host, volumes, cgsnap_id, snapshots): # 1. Copy a temp CG snapshot from CG snapshot # and allow RW for it # 2. Create SMPs from source volumes # 3. Attach SMPs to the CG snapshot # 4. Create migration target LUNs # 5. Migrate from SMPs to LUNs one by one # 6. Wait completion of migration # 7. Create a new CG, add all LUNs to it # 8. Delete the temp CG snapshot cg_name = cg_id src_cg_snap_name = cgsnap_id pool_name = utils.get_pool_from_host(cg_host) lun_sizes = [] lun_names = [] src_lun_names = [] specs_list = [] for volume, snapshot in zip(volumes, snapshots): lun_sizes.append(volume.size) lun_names.append(volume.name) src_lun_names.append(snapshot.volume.name) specs_list.append(common.ExtraSpecs.from_volume(volume)) lun_id_list = emc_taskflow.create_cg_from_cg_snapshot( client=self.client, cg_name=cg_name, src_cg_name=None, cg_snap_name=None, src_cg_snap_name=src_cg_snap_name, pool_name=pool_name, lun_sizes=lun_sizes, lun_names=lun_names, src_lun_names=src_lun_names, specs_list=specs_list) volume_model_updates = [] for volume, lun_id in zip(volumes, lun_id_list): model_update = { 'id': volume.id, 'provider_location': self._build_provider_location( lun_id=lun_id, lun_type='lun', base_lun_name=volume.name )} volume_model_updates.append(model_update) return None, volume_model_updates def create_cloned_cg(self, context, group, volumes, source_cg, source_vols): self.do_clone_cg(group.id, group.host, volumes, source_cg.id, source_vols) def do_clone_cg(self, cg_id, cg_host, volumes, source_cg_id, source_vols): # 1. Create temp CG snapshot from source_cg # Same with steps 2-8 of create_cg_from_cgsnapshot pool_name = utils.get_pool_from_host(cg_host) lun_sizes = [] lun_names = [] src_lun_names = [] specs_list = [] for volume, source_vol in zip(volumes, source_vols): lun_sizes.append(volume.size) lun_names.append(volume.name) src_lun_names.append(source_vol.name) specs_list.append(common.ExtraSpecs.from_volume(volume)) lun_id_list = emc_taskflow.create_cloned_cg( client=self.client, cg_name=cg_id, src_cg_name=source_cg_id, pool_name=pool_name, lun_sizes=lun_sizes, lun_names=lun_names, src_lun_names=src_lun_names, specs_list=specs_list) volume_model_updates = [] for volume, lun_id in zip(volumes, lun_id_list): model_update = { 'id': volume.id, 'provider_location': self._build_provider_location( lun_id=lun_id, lun_type='lun', base_lun_name=volume.name )} volume_model_updates.append(model_update) return None, volume_model_updates def parse_pools(self): pool_names = self.config.storage_vnx_pool_names array_pools = self.client.get_pools() if pool_names: pool_names = set([po.strip() for po in pool_names]) array_pool_names = set([po.name for po in array_pools]) nonexistent_pools = pool_names.difference(array_pool_names) pool_names.difference_update(nonexistent_pools) if not pool_names: msg = _('All the specified storage pools to be managed ' 'do not exist. Please check your configuration. ' 'Non-existent pools: %s') % ','.join(nonexistent_pools) raise exception.VolumeBackendAPIException(data=msg) if nonexistent_pools: LOG.warning('The following specified storage pools ' 'do not exist: %(nonexistent)s. ' 'This host will only manage the storage ' 'pools: %(exist)s', {'nonexistent': ','.join(nonexistent_pools), 'exist': ','.join(pool_names)}) else: LOG.debug('This host will manage the storage pools: %s.', ','.join(pool_names)) else: pool_names = [p.name for p in array_pools] LOG.info('No storage pool is configured. This host will ' 'manage all the pools on the VNX system.') return [pool for pool in array_pools if pool.name in pool_names] def get_enabler_stats(self): stats = dict() stats['compression_support'] = self.client.is_compression_enabled() stats['fast_support'] = self.client.is_fast_enabled() stats['deduplication_support'] = self.client.is_dedup_enabled() stats['thin_provisioning_support'] = self.client.is_thin_enabled() stats['consistencygroup_support'] = self.client.is_snap_enabled() stats['replication_enabled'] = True if self.mirror_view else False stats['consistent_group_snapshot_enabled'] = ( self.client.is_snap_enabled()) return stats def get_pool_stats(self, enabler_stats=None): stats = enabler_stats if enabler_stats else self.get_enabler_stats() self.storage_pools = self.parse_pools() pool_feature = self.client.get_pool_feature() pools_stats = list() for pool in self.storage_pools: pool_stats = { 'pool_name': pool.name, 'total_capacity_gb': pool.user_capacity_gbs, 'provisioned_capacity_gb': pool.total_subscribed_capacity_gbs } # Handle pool state Initializing, Ready, Faulted, Offline # or Deleting. if pool.state in common.PoolState.VALID_CREATE_LUN_STATE: pool_stats['free_capacity_gb'] = 0 LOG.warning('Storage Pool [%(pool)s] is [%(state)s].', {'pool': pool.name, 'state': pool.state}) else: pool_stats['free_capacity_gb'] = pool.available_capacity_gbs if (pool_feature.max_pool_luns <= pool_feature.total_pool_luns): LOG.warning('Maximum number of Pool LUNs %(max_luns)s ' 'have been created for %(pool_name)s. ' 'No more LUN creation can be done.', {'max_luns': pool_feature.max_pool_luns, 'pool_name': pool.name}) pool_stats['free_capacity_gb'] = 0 if not self.reserved_percentage: # Since the admin is not sure of what value is proper, # the driver will calculate the recommended value. # Some extra capacity will be used by meta data of pool LUNs. # The overhead is about LUN_Capacity * 0.02 + 3 GB # reserved_percentage will be used to make sure the scheduler # takes the overhead into consideration. # Assume that all the remaining capacity is to be used to # create a thick LUN, reserved_percentage is estimated as # follows: reserved = (((0.02 * pool.available_capacity_gbs + 3) / (1.02 * pool.user_capacity_gbs)) * 100) # Take pool full threshold into consideration if not self.ignore_pool_full_threshold: reserved += 100 - pool.percent_full_threshold pool_stats['reserved_percentage'] = int(math.ceil(min(reserved, 100))) else: pool_stats['reserved_percentage'] = self.reserved_percentage array_serial = self.serial_number pool_stats['location_info'] = ('%(pool_name)s|%(array_serial)s' % {'pool_name': pool.name, 'array_serial': array_serial}) pool_stats['fast_cache_enabled'] = pool.fast_cache # Copy advanced feature stats from backend stats pool_stats['compression_support'] = stats['compression_support'] pool_stats['fast_support'] = stats['fast_support'] pool_stats['deduplication_support'] = ( stats['deduplication_support']) pool_stats['thin_provisioning_support'] = ( stats['thin_provisioning_support']) pool_stats['thick_provisioning_support'] = True pool_stats['consistencygroup_support'] = ( stats['consistencygroup_support']) pool_stats['consistent_group_snapshot_enabled'] = ( stats['consistent_group_snapshot_enabled']) pool_stats['max_over_subscription_ratio'] = ( self.max_over_subscription_ratio) pool_stats['QoS_support'] = True # Add replication v2.1 support self.append_replication_stats(pool_stats) pools_stats.append(pool_stats) return pools_stats def update_volume_stats(self): stats = self.get_enabler_stats() stats['pools'] = self.get_pool_stats(stats) stats['storage_protocol'] = self.config.storage_protocol self.append_replication_stats(stats) return stats def delete_volume(self, volume): """Deletes an EMC volume.""" async_migrate = utils.is_async_migrate_enabled( volume, default=self.async_migrate) snap_copy = (utils.construct_snap_name(volume) if utils.is_snapcopy_enabled(volume) else None) self.cleanup_lun_replication(volume) try: self.client.delete_lun(volume.name, force=self.force_delete_lun_in_sg, snap_copy=snap_copy) except storops_ex.VNXLunUsedByFeatureError: # Case 1. Migration not finished, cleanup related stuff. if async_migrate: self.client.cleanup_async_lun( name=volume.name, force=self.force_delete_lun_in_sg) else: raise except (storops_ex.VNXLunHasSnapError, storops_ex.VNXLunHasSnapMountPointError): # Here, we assume no Cinder managed snaps, and add it to queue # for later deletion self.client.delay_delete_lun(volume.name) # Case 2. Migration already finished, try to delete the temp snap # when it's a cloned volume or created from snapshot. if async_migrate and (volume.source_volid or volume.snapshot_id): self.client.delete_snapshot(utils.construct_snap_name(volume)) def extend_volume(self, volume, new_size): """Extends an EMC volume.""" self.client.expand_lun(volume.name, new_size, poll=False) def create_snapshot(self, snapshot): """Creates a snapshot.""" src_lun_id = self.client.get_lun_id(snapshot.volume) self.client.create_snapshot(src_lun_id, snapshot.name) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" self.client.delete_snapshot(snapshot.name) def restore_snapshot(self, volume, snapshot): """Restores a snapshot.""" lun_id = self.client.get_lun_id(volume) self.client.restore_snapshot(lun_id, snapshot.name) def _get_referenced_lun(self, existing_ref): lun = None if 'source-id' in existing_ref: lun = self.client.get_lun(lun_id=existing_ref['source-id']) elif 'source-name' in existing_ref: lun = self.client.get_lun(name=existing_ref['source-name']) else: reason = _('Reference must contain source-id or source-name key.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) if not lun.existed: raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_("LUN doesn't exist.")) return lun def manage_existing_get_size(self, volume, existing_ref): """Returns size of volume to be managed by manage_existing.""" lun = self._get_referenced_lun(existing_ref) target_pool = utils.get_pool_from_host(volume.host) if target_pool and lun.pool_name != target_pool: reason = (_('The imported lun is in pool %(lun_pool)s ' 'which is not managed by the host %(host)s.') % {'lun_pool': lun.pool_name, 'host': volume['host']}) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return lun.total_capacity_gb def manage_existing(self, volume, existing_ref): """Imports the existing backend storage object as a volume. .. code-block:: python manage_existing_ref:{ 'source-id': } or .. code-block:: python manage_existing_ref:{ 'source-name': } When the volume has a volume_type, the driver inspects that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch exception. """ lun = self._get_referenced_lun(existing_ref) if volume.volume_type_id: type_specs = common.ExtraSpecs.from_volume(volume) if not type_specs.match_with_lun(lun): raise exception.ManageExistingVolumeTypeMismatch( reason=_("The volume to be managed is a %(provision)s LUN " "and the tiering setting is %(tier)s. This " "doesn't match with the type %(type)s.") % {'provision': lun.provision, 'tier': lun.tier, 'type': volume.volume_type_id}) lun.rename(volume.name) if lun.is_snap_mount_point: lun_type = 'smp' base_lun_name = lun.primary_lun else: lun_type = 'lun' base_lun_name = volume.name pl = self._build_provider_location( lun_id=lun.lun_id, lun_type=lun_type, base_lun_name=base_lun_name) return {'provider_location': pl} def unmanage(self, volume): """Unmanages a volume.""" pass def build_host(self, connector): raise NotImplementedError def assure_storage_group(self, host): """Assures that the storage group with name of `host` exists. If the storage group doesn't exist, create a one. """ sg = self.client.get_storage_group(host.name) is_new_sg = False if not sg.existed: sg = self.client.create_storage_group(host.name) is_new_sg = True return (sg, is_new_sg) def assure_host_access(self, storage_group, host, volume, is_new_sg): """Assures that `host` is connected to the Array. It first registers initiators to `storage_group` then add `volume` to `storage_group`. :param storage_group: object of storops storage group to which the host access is registered. :param host: `common.Host` object with initiator information. :param volume: `common.Volume` object with volume information. :param is_new_sg: flag indicating whether the `storage_group` is newly created or not. """ if not self.config.initiator_auto_registration: if is_new_sg: # Invoke connect_host on storage group to register all # host information. # Call connect_host only once when sg is newly created. storage_group.connect_host(host.name) else: self.auto_register_initiator(storage_group, host) return self.client.add_lun_to_sg( storage_group, self.client.get_lun(lun_id=volume.vnx_lun_id), self.max_retries) def auto_register_initiator(self, storage_group, host): """Registers the initiators to storage group. :param storage_group: storage group object to which the initiator is registered. :param host: information of initiator, etc. The behavior depends on the combination of the registered initiators of SG and the configured white list of the ports (that is `self.config.io_port_list`). #. Register all non-registered initiators to `self.allowed_ports`. #. For registered initiators, if the white list is configured, register them to `self.allowed_ports` except the ones which are already registered. Note that `self.allowed_ports` comprises of all iSCSI/FC ports on array or the valid ports of the white list if `self.config.io_port_list` is configured. """ host_initiators = set(host.initiators) sg_initiators = set(storage_group.initiator_uid_list) unreg_initiators = host_initiators - sg_initiators initiator_port_map = {unreg_id: set(self.allowed_ports) for unreg_id in unreg_initiators} if self.config.io_port_list is not None: reg_initiators = host_initiators & sg_initiators for reg_id in reg_initiators: ports_to_reg = (set(self.allowed_ports) - set(storage_group.get_ports(reg_id))) if ports_to_reg: initiator_port_map[reg_id] = ports_to_reg LOG.debug('Ports [%(ports)s] in white list will be bound ' 'to the registered initiator: %(reg_id)s', {'ports': ports_to_reg, 'reg_id': reg_id}) self.client.register_initiator(storage_group, host, initiator_port_map) def prepare_target_data(self, storage_group, host, volume, hlu): raise NotImplementedError() def initialize_connection(self, cinder_volume, connector): """Initializes the connection to `cinder_volume`.""" volume = common.Volume( cinder_volume.name, cinder_volume.id, vnx_lun_id=self.client.get_lun_id(cinder_volume)) return self._initialize_connection(volume, connector) def _initialize_connection(self, volume, connector): """Helps to initialize the connection. To share common codes with initialize_connection_snapshot. :param volume: `common.Volume` object with volume information. :param connector: connector information from Nova. """ host = self.build_host(connector) sg, is_new_sg = self.assure_storage_group(host) hlu = self.assure_host_access(sg, host, volume, is_new_sg) return self.prepare_target_data(sg, host, volume, hlu) def terminate_connection(self, cinder_volume, connector): """Terminates the connection to `cinder_volume`.""" volume = common.Volume( cinder_volume.name, cinder_volume.id, vnx_lun_id=self.client.get_lun_id(cinder_volume)) return self._terminate_connection(volume, connector) def _terminate_connection(self, volume, connector): """Helps to terminate the connection. To share common codes with terminate_connection_snapshot. :param volume: `common.Volume` object with volume information. :param connector: connector information from Nova. """ # None `connector` means force detach the volume from all hosts. is_force_detach = False if connector is None: LOG.info('Force detaching volume %s from all hosts.', volume.name) is_force_detach = True host = None if is_force_detach else self.build_host(connector) sg_list = (self.client.filter_sg(volume.vnx_lun_id) if is_force_detach else [self.client.get_storage_group(host.name)]) return_data = None for sg in sg_list: self.remove_host_access(volume, host, sg) # build_terminate_connection return data should go before # terminate_connection_cleanup. The storage group may be deleted in # the terminate_connection_cleanup which is needed during getting # return data self.update_storage_group_if_required(sg) if not is_force_detach: # force detach will return None return_data = self.build_terminate_connection_return_data( host, sg) self.terminate_connection_cleanup(host, sg) return return_data def update_storage_group_if_required(self, sg): if sg.existed and self.destroy_empty_sg: utils.update_res_with_poll(sg) def remove_host_access(self, volume, host, sg): """Removes the host access from `volume`. :param volume: `common.Volume` object with volume information. :param host: `common.Host` object with host information. :param sg: object of `storops` storage group. """ lun = self.client.get_lun(lun_id=volume.vnx_lun_id) if not sg.existed: # `host` is None when force-detach if host is not None: # Only print this warning message when normal detach LOG.warning("Storage Group %s is not found. " "Nothing can be done in terminate_connection().", host.name) else: try: sg.detach_alu(lun) except storops_ex.VNXDetachAluNotFoundError: LOG.warning("Volume %(vol)s is not in Storage Group %(sg)s.", {'vol': volume.name, 'sg': sg.name}) def build_terminate_connection_return_data(self, host, sg): raise NotImplementedError() def terminate_connection_cleanup(self, host, sg): if not sg.existed: return if self.destroy_empty_sg: if not self.client.sg_has_lun_attached(sg): self._destroy_empty_sg(host, sg) def _destroy_empty_sg(self, host, sg): try: LOG.info("Storage Group %s is empty.", sg.name) sg.disconnect_host(sg.name) sg.delete() # Update sg with poll makes the one in client.sg_cache is with # latest status which is un-exist. This makes sure the sg is # created during next attaching. self.update_storage_group_if_required(sg) if host is not None and self.itor_auto_dereg: # `host` is None when force-detach self._deregister_initiator(host) except storops_ex.StoropsException: LOG.warning("Failed to destroy Storage Group %s.", sg.name) try: sg.connect_host(sg.name) except storops_ex.StoropsException: LOG.warning("Failed to connect host %(host)s " "back to storage group %(sg)s.", {'host': sg.name, 'sg': sg.name}) def _deregister_initiator(self, host): initiators = host.initiators try: self.client.deregister_initiators(initiators) except storops_ex: LOG.warning("Failed to deregister the initiators %s", initiators) def _is_allowed_port(self, port): return port in self.allowed_ports def _build_provider_location( self, lun_id=None, lun_type=None, base_lun_name=None): return utils.build_provider_location( system=self.serial_number, lun_type=lun_type, lun_id=lun_id, base_lun_name=base_lun_name, version=self.VERSION) def update_consistencygroup(self, context, group, add_volumes, remove_volumes): return self.do_update_cg(group.id, add_volumes, remove_volumes) def do_update_cg(self, cg_name, add_volumes, remove_volumes): cg = self.client.get_cg(name=cg_name) lun_ids_to_add = [self.client.get_lun_id(volume) for volume in add_volumes] lun_ids_to_remove = [self.client.get_lun_id(volume) for volume in remove_volumes] self.client.update_consistencygroup(cg, lun_ids_to_add, lun_ids_to_remove) return ({'status': fields.ConsistencyGroupStatus.AVAILABLE}, None, None) def create_export_snapshot(self, context, snapshot, connector): self.client.create_mount_point(snapshot.volume_name, utils.construct_smp_name(snapshot.id)) def remove_export_snapshot(self, context, snapshot): self.client.delete_lun(utils.construct_smp_name(snapshot.id)) def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Initializes connection for snapshot mount point.""" smp_name = utils.construct_smp_name(snapshot.id) self.client.attach_snapshot(smp_name, snapshot.name) lun = self.client.get_lun(name=smp_name) volume = common.Volume(smp_name, snapshot.id, vnx_lun_id=lun.lun_id) return self._initialize_connection(volume, connector) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Terminates connection for snapshot mount point.""" smp_name = utils.construct_smp_name(snapshot.id) lun = self.client.get_lun(name=smp_name) volume = common.Volume(smp_name, snapshot.id, vnx_lun_id=lun.lun_id) connection_info = self._terminate_connection(volume, connector) self.client.detach_snapshot(smp_name) return connection_info def get_pool_name(self, volume): return self.client.get_pool_name(volume.name) def update_migrated_volume(self, context, volume, new_volume, original_volume_status=None): """Updates metadata after host-assisted migration.""" metadata = utils.get_metadata(volume) metadata['snapcopy'] = ('True' if utils.is_volume_smp(new_volume) else 'False') return {'provider_location': new_volume.provider_location, 'metadata': metadata} def create_group(self, context, group): rep_update = self.create_group_replication(group) model_update = self.create_consistencygroup(context, group) model_update.update(rep_update) return model_update def delete_group(self, context, group, volumes): self.delete_group_replication(group) return self.delete_consistencygroup(context, group, volumes) def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group_snapshot.""" return self.do_create_cgsnap(group_snapshot.group_id, group_snapshot.id, snapshots) def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group snapshot.""" return self.do_delete_cgsnap( group_snapshot.group_id, group_snapshot.id, group_snapshot.status, snapshots) def create_group_from_group_snapshot(self, context, group, volumes, group_snapshot, snapshots): """Creates a group from a group snapshot.""" return self.do_create_cg_from_cgsnap(group.id, group.host, volumes, group_snapshot.id, snapshots) def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group.""" # 1. First make sure group and volumes have same # replication extra-specs and replications status. for volume in (add_volumes + remove_volumes): utils.check_type_matched(volume) # 2. Secondly, make sure replication status must be enabled for # replication-enabled group, utils.check_rep_status_matched(group) self.add_volumes_to_group_replication(group, add_volumes) self.remove_volumes_from_group_replication(group, remove_volumes) return self.do_update_cg(group.id, add_volumes, remove_volumes) def create_cloned_group(self, context, group, volumes, source_group, source_vols): """Clones a group""" return self.do_clone_cg(group.id, group.host, volumes, source_group.id, source_vols) class ISCSIAdapter(CommonAdapter): def __init__(self, configuration, active_backend_id): super(ISCSIAdapter, self).__init__(configuration, active_backend_id) self.iscsi_initiator_map = None def do_setup(self): super(ISCSIAdapter, self).do_setup() self.iscsi_initiator_map = self.config.iscsi_initiators self.allowed_ports = self.validate_ports( self.client.get_iscsi_targets(), self.config.io_port_list) LOG.debug('[%(group)s] allowed_ports are: [%(ports)s].', {'group': self.config.config_group, 'ports': ','.join( [port.display_name for port in self.allowed_ports])}) def _normalize_config(self): super(ISCSIAdapter, self)._normalize_config() # Check option `iscsi_initiators`. # Set to None if it is not set or set to an empty string. # Raise error if it is set to an empty string. iscsi_initiators = self.config.iscsi_initiators option = '[{group}] iscsi_initiators'.format( group=self.config.config_group) if iscsi_initiators is None: return elif len(iscsi_initiators.strip()) == 0: raise exception.InvalidConfigurationValue(option=option, value=iscsi_initiators) else: try: self.config.iscsi_initiators = json.loads(iscsi_initiators) except ValueError: raise exception.InvalidConfigurationValue( option=option, value=iscsi_initiators) if not isinstance(self.config.iscsi_initiators, dict): raise exception.InvalidConfigurationValue( option=option, value=iscsi_initiators) LOG.info("[%(group)s] iscsi_initiators is configured: %(value)s", {'group': self.config.config_group, 'value': self.config.iscsi_initiators}) def update_volume_stats(self): """Retrieves stats info.""" stats = super(ISCSIAdapter, self).update_volume_stats() self.allowed_ports = self.validate_ports( self.client.get_iscsi_targets(), self.config.io_port_list) backend_name = self.config.safe_get('volume_backend_name') stats['volume_backend_name'] = backend_name or 'VNXISCSIDriver' return stats def _build_port_str(self, port): return '%(sp)s-%(pid)s-%(vpid)s' % { 'sp': 'A' if port.sp == storops.VNXSPEnum.SP_A else 'B', 'pid': port.port_id, 'vpid': port.vport_id} def build_host(self, connector): return common.Host(connector['host'], [connector['initiator']], ip=connector['ip']) def arrange_io_ports(self, reg_port_white_list, iscsi_initiator_ips): """Arranges IO ports. Arranges the registered IO ports and puts a pingable port in the first place as the main portal. """ random.shuffle(reg_port_white_list) random.shuffle(iscsi_initiator_ips) main_portal_index = None for index, port in enumerate(reg_port_white_list): for initiator_ip in iscsi_initiator_ips: if self.client.ping_node(port, initiator_ip): main_portal_index = index break else: # For loop fell through without finding a pingable initiator. continue break if main_portal_index is not None: reg_port_white_list.insert( 0, reg_port_white_list.pop(main_portal_index)) return reg_port_white_list def prepare_target_data(self, storage_group, host, volume, hlu): """Prepares the target data for Nova. :param storage_group: object of `storops` storage group. :param host: `common.Host` object with initiator information. :param volume: `common.Volume` object with volume information. :param hlu: the HLU number assigned to volume. """ target_io_ports = utils.sift_port_white_list( self.allowed_ports, storage_group.get_ports(host.initiators[0])) if not target_io_ports: msg = (_('Failed to find available iSCSI targets for %s.') % storage_group.name) raise exception.VolumeBackendAPIException(data=msg) if self.iscsi_initiator_map and host.name in self.iscsi_initiator_map: iscsi_initiator_ips = list(self.iscsi_initiator_map[host.name]) target_io_ports = self.arrange_io_ports(target_io_ports, iscsi_initiator_ips) iscsi_target_data = common.ISCSITargetData(volume.id, False) iqns = [port.wwn for port in target_io_ports] portals = ["%s:3260" % port.ip_address for port in target_io_ports] iscsi_target_data = common.ISCSITargetData( volume.id, True, iqn=iqns[0], iqns=iqns, portal=portals[0], portals=portals, lun=hlu, luns=[hlu] * len(target_io_ports)) LOG.debug('Prepared iSCSI targets for %(host)s: %(target_data)s.', {'host': host.name, 'target_data': iscsi_target_data}) return iscsi_target_data.to_dict() def build_terminate_connection_return_data(self, host, sg): return None class FCAdapter(CommonAdapter): def __init__(self, configuration, active_backend_id): super(FCAdapter, self).__init__(configuration, active_backend_id) self.lookup_service = None def do_setup(self): super(FCAdapter, self).do_setup() self.lookup_service = zm_utils.create_lookup_service() self.allowed_ports = self.validate_ports( self.client.get_fc_targets(), self.config.io_port_list) LOG.debug('[%(group)s] allowed_ports are: [%(ports)s].', {'group': self.config.config_group, 'ports': ','.join( [port.display_name for port in self.allowed_ports])}) def update_volume_stats(self): """Retrieves stats info.""" stats = super(FCAdapter, self).update_volume_stats() backend_name = self.config.safe_get('volume_backend_name') stats['volume_backend_name'] = backend_name or 'VNXFCDriver' return stats def _build_port_str(self, port): return '%(sp)s-%(pid)s' % { 'sp': 'A' if port.sp == storops.VNXSPEnum.SP_A else 'B', 'pid': port.port_id} def build_host(self, connector): if 'wwnns' not in connector or 'wwpns' not in connector: msg = _('Host %s has no FC initiators') % connector['host'] LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) wwnns = connector['wwnns'] wwpns = connector['wwpns'] wwns = [(node + port).upper() for (node, port) in zip(wwnns, wwpns)] # WWNS is like '20000090FA534CD110000090FA534CD1', convert it to # '20:00:00:90:FA:53:4C:D1:10:00:00:90:FA:53:4C:D1' # Note that use // division operator due to the change behavior of # / division operator in Python 3. wwns = [re.sub(r'\S\S', lambda m: m.group(0) + ':', wwn, len(wwn) // 2 - 1) for wwn in wwns] return common.Host(connector['host'], wwns, wwpns=wwpns) def prepare_target_data(self, storage_group, host, volume, hlu): """Prepares the target data for Nova. :param storage_group: object of `storops` storage group. :param host: `common.Host` object with initiator information. :param volume: `common.Volume` object with volume information. :param hlu: the HLU number assigned to volume. """ if self.lookup_service is None: registed_ports = [] for wwn in host.initiators: registed_ports.extend(storage_group.get_ports(wwn)) reg_port_white_list = utils.sift_port_white_list( self.allowed_ports, registed_ports) if not reg_port_white_list: msg = (_('Failed to find available FC targets for %s.') % storage_group.name) raise exception.VolumeBackendAPIException(data=msg) target_wwns = [utils.truncate_fc_port_wwn(port.wwn) for port in reg_port_white_list] return common.FCTargetData(volume.id, True, wwn=target_wwns, lun=hlu).to_dict() else: target_wwns, initiator_target_map = ( self._get_tgt_list_and_initiator_tgt_map( storage_group, host, True)) return common.FCTargetData( volume.id, True, wwn=target_wwns, lun=hlu, initiator_target_map=initiator_target_map).to_dict() def update_storage_group_if_required(self, sg): if sg.existed and (self.destroy_empty_sg or self.lookup_service): utils.update_res_with_poll(sg) def build_terminate_connection_return_data(self, host, sg): conn_info = {'driver_volume_type': 'fibre_channel', 'data': {}} if self.lookup_service is None: return conn_info if not sg.existed or self.client.sg_has_lun_attached(sg): return conn_info itor_tgt_map = self._get_initiator_tgt_map(sg, host, False) conn_info['data']['initiator_target_map'] = itor_tgt_map return conn_info def _get_initiator_tgt_map( self, sg, host, allowed_port_only=False): return self._get_tgt_list_and_initiator_tgt_map( sg, host, allowed_port_only)[1] def _get_tgt_list_and_initiator_tgt_map( self, sg, host, allowed_port_only=False): fc_initiators = host.wwpns fc_ports_wwns = list(map(utils.truncate_fc_port_wwn, self._get_wwns_of_online_fc_ports( sg, allowed_port_only=allowed_port_only))) mapping = ( self.lookup_service. get_device_mapping_from_network(fc_initiators, fc_ports_wwns)) return utils.convert_to_tgt_list_and_itor_tgt_map(mapping) def _get_wwns_of_online_fc_ports(self, sg, allowed_port_only=False): ports = sg.fc_ports if allowed_port_only: ports = [po for po in ports if self._is_allowed_port(po)] fc_port_wwns = self.client.get_wwn_of_online_fc_ports(ports) return fc_port_wwns ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/vnx/client.py0000664000175000017500000007041300000000000023534 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder import utils as cinder_utils from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.dell_emc.vnx import const from cinder.volume.drivers.dell_emc.vnx import utils storops = importutils.try_import('storops') if storops: from storops import exception as storops_ex from storops.lib import tasks as storops_tasks LOG = logging.getLogger(__name__) class Condition(object): """Defines some condition checker which are used in wait_until, .etc.""" @staticmethod def is_lun_io_ready(lun): utils.update_res_without_poll(lun) if not lun.existed: return False lun_state = lun.state if lun_state == common.LUNState.INITIALIZING: return False elif lun_state in [common.LUNState.READY, common.LUNState.FAULTED]: return lun.operation == 'None' else: # Quick exit wait_until when the lun is other state to avoid # long-time timeout. msg = (_('Volume %(name)s was created in VNX, ' 'but in %(state)s state.') % { 'name': lun.name, 'state': lun_state}) raise exception.VolumeBackendAPIException(data=msg) @staticmethod def is_object_existed(vnx_obj): utils.update_res_without_poll(vnx_obj) return vnx_obj.existed @staticmethod def is_lun_ops_ready(lun): utils.update_res_without_poll(lun) return 'None' == lun.operation @staticmethod def is_lun_expanded(lun, new_size): utils.update_res_without_poll(lun) return new_size == lun.total_capacity_gb @staticmethod def is_mirror_synced(mirror): utils.update_res_without_poll(mirror) return ( mirror.secondary_image.state == storops.VNXMirrorImageState.SYNCHRONIZED) class Client(object): def __init__(self, ip, username, password, scope, naviseccli, sec_file, queue_path=None): self.naviseccli = naviseccli if not storops: msg = _('storops Python library is not installed.') raise exception.VolumeBackendAPIException(message=msg) self.vnx = storops.VNXSystem(ip=ip, username=username, password=password, scope=scope, naviseccli=naviseccli, sec_file=sec_file) self.sg_cache = {} if queue_path: self.queue = storops_tasks.PQueue(path=queue_path) self.queue.start() LOG.info('PQueue[%s] starts now.', queue_path) def create_lun(self, pool, name, size, provision, tier, cg_id=None, ignore_thresholds=False, qos_specs=None): pool = self.vnx.get_pool(name=pool) try: with pool.with_no_poll(): lun = pool.create_lun(lun_name=name, size_gb=size, provision=provision, tier=tier, ignore_thresholds=ignore_thresholds) except storops_ex.VNXLunNameInUseError: lun = self.vnx.get_lun(name=name) utils.wait_until(condition=Condition.is_lun_io_ready, lun=lun) if cg_id: cg = self.vnx.get_cg(name=cg_id) cg.add_member(lun) ioclasses = self.get_ioclass(qos_specs) if ioclasses: policy, is_new = self.get_running_policy() for one in ioclasses: one.add_lun(lun) policy.add_class(one) if is_new: policy.run_policy() return lun def get_lun(self, name=None, lun_id=None): return self.vnx.get_lun(name=name, lun_id=lun_id) def get_lun_id(self, volume): """Retrieves the LUN ID of volume.""" if volume.provider_location: return int(utils.extract_provider_location( volume.provider_location, 'id')) else: # In some cases, cinder will not update volume info in DB with # provider_location returned by us. We need to retrieve the id # from array. For example, cinder backup-create doesn't use the # provider_location returned from create_cloned_volume. lun = self.get_lun(name=volume.name) return lun.lun_id def delete_lun(self, name, force=False, snap_copy=False): """Deletes a LUN or mount point.""" lun = self.get_lun(name=name) try: # Do not delete the snapshots of the lun. lun.delete(force_detach=True, detach_from_sg=force) if snap_copy: snap = self.vnx.get_snap(name=snap_copy) snap.delete() except storops_ex.VNXLunNotFoundError as ex: LOG.info("LUN %(name)s is already deleted. This message can " "be safely ignored. Message: %(msg)s", {'name': name, 'msg': ex.message}) def cleanup_async_lun(self, name, force=False): """Helper method to cleanup stuff for async migration. .. note:: Only call it when VNXLunUsedByFeatureError occurs """ lun = self.get_lun(name=name) self.cleanup_migration(src_id=lun.lun_id) lun.delete(force_detach=True, detach_from_sg=force) def delay_delete_lun(self, name): """Delay the deletion by putting it in a storops queue.""" self.queue.put(self.vnx.delete_lun, name=name) LOG.info("VNX object has been added to queue for later" " deletion: %s", name) @cinder_utils.retry(const.VNXLunPreparingError, retries=1, backoff_rate=1) def expand_lun(self, name, new_size, poll=True): lun = self.get_lun(name=name) try: lun.poll = poll lun.expand(new_size, ignore_thresholds=True) except storops_ex.VNXLunExpandSizeError as ex: LOG.warning("LUN %(name)s is already expanded. " "Message: %(msg)s.", {'name': name, 'msg': ex.message}) except storops_ex.VNXLunPreparingError as ex: # The error means the operation cannot be performed because the LUN # is 'Preparing'. Wait for a while so that the LUN may get out of # the transitioning state. with excutils.save_and_reraise_exception(): LOG.warning("LUN %(name)s is not ready for extension: %(msg)s", {'name': name, 'msg': ex.message}) utils.wait_until(Condition.is_lun_ops_ready, lun=lun) utils.wait_until(Condition.is_lun_expanded, lun=lun, new_size=new_size) def modify_lun(self): pass @cinder_utils.retry(retry_param=const.VNXTargetNotReadyError, interval=15, retries=5, backoff_rate=1) def migrate_lun(self, src_id, dst_id, rate=const.MIGRATION_RATE_HIGH): src = self.vnx.get_lun(lun_id=src_id) src.migrate(dst_id, rate) def session_finished(self, src_lun): session = self.vnx.get_migration_session(src_lun) if not session.existed: return True elif session.current_state in ('FAULTED', 'STOPPED'): LOG.warning('Session is %s, need to handled then.', session.current_state) return True else: return False def verify_migration(self, src_id, dst_id, dst_wwn): """Verify whether migration session finished successfully. :param src_id: source LUN id :param dst_id: destination LUN id :param dst_wwn: destination LUN WWN :returns Boolean: True or False """ src_lun = self.vnx.get_lun(lun_id=src_id) # Sleep 30 seconds to make sure the session starts on the VNX. time.sleep(common.INTERVAL_30_SEC) utils.wait_until(condition=self.session_finished, interval=common.INTERVAL_30_SEC, src_lun=src_lun) new_lun = self.vnx.get_lun(lun_id=dst_id) new_wwn = new_lun.wwn if not new_wwn or new_wwn != dst_wwn: return True else: return False def cleanup_migration(self, src_id, dst_id=None): """Invoke when migration meets error. :param src_id: source LUN id :param dst_id: destination LUN id """ # if migration session is still there # we need to cancel the session session = self.vnx.get_migration_session(src_id) src_lun = self.vnx.get_lun(lun_id=src_id) if session.existed: LOG.warning('Cancelling migration session: ' '%(src_id)s -> %(dst_id)s.', {'src_id': src_id, 'dst_id': dst_id}) try: src_lun.cancel_migrate() except storops_ex.VNXLunNotMigratingError: LOG.info('The LUN is not migrating or completed, ' 'this message can be safely ignored') except (storops_ex.VNXLunSyncCompletedError, storops_ex.VNXMigrationError): # Wait until session finishes self.verify_migration(src_id, session.dest_lu_id, None) def create_snapshot(self, lun_id, snap_name, keep_for=None): """Creates a snapshot.""" lun = self.get_lun(lun_id=lun_id) try: lun.create_snap( snap_name, allow_rw=True, auto_delete=False, keep_for=keep_for) except storops_ex.VNXSnapNameInUseError as ex: LOG.warning('Snapshot %(name)s already exists. ' 'Message: %(msg)s', {'name': snap_name, 'msg': ex.message}) def delete_snapshot(self, snapshot_name): """Deletes a snapshot.""" snap = self.vnx.get_snap(name=snapshot_name) try: snap.delete() except storops_ex.VNXSnapNotExistsError as ex: LOG.warning("Snapshot %(name)s may be deleted already. " "Message: %(msg)s", {'name': snapshot_name, 'msg': ex.message}) except storops_ex.VNXDeleteAttachedSnapError as ex: with excutils.save_and_reraise_exception(): LOG.warning("Failed to delete snapshot %(name)s " "which is in use. Message: %(msg)s", {'name': snapshot_name, 'msg': ex.message}) def copy_snapshot(self, snap_name, new_snap_name): snap = self.vnx.get_snap(name=snap_name) snap.copy(new_name=new_snap_name) def create_mount_point(self, lun_name, smp_name): lun = self.vnx.get_lun(name=lun_name) try: return lun.create_mount_point(name=smp_name) except storops_ex.VNXLunNameInUseError as ex: LOG.warning('Mount point %(name)s already exists. ' 'Message: %(msg)s', {'name': smp_name, 'msg': ex.message}) # Ignore the failure that due to retry. return self.vnx.get_lun(name=smp_name) def attach_snapshot(self, smp_name, snap_name): lun = self.vnx.get_lun(name=smp_name) try: lun.attach_snap(snap=snap_name) except storops_ex.VNXSnapAlreadyMountedError as ex: LOG.warning("Snapshot %(snap_name)s is attached to " "snapshot mount point %(smp_name)s already. " "Message: %(msg)s", {'snap_name': snap_name, 'smp_name': smp_name, 'msg': ex.message}) def detach_snapshot(self, smp_name): lun = self.vnx.get_lun(name=smp_name) try: lun.detach_snap() except storops_ex.VNXSnapNotAttachedError as ex: LOG.warning("Snapshot mount point %(smp_name)s is not " "currently attached. Message: %(msg)s", {'smp_name': smp_name, 'msg': ex.message}) def modify_snapshot(self, snap_name, allow_rw=None, auto_delete=None, keep_for=None): snap = self.vnx.get_snap(name=snap_name) snap.modify(allow_rw=allow_rw, auto_delete=auto_delete, keep_for=None) def restore_snapshot(self, lun_id, snap_name): lun = self.get_lun(lun_id=lun_id) lun.restore_snap(snap_name) def create_consistency_group(self, cg_name, lun_id_list=None): try: cg = self.vnx.create_cg(name=cg_name, members=lun_id_list) except storops_ex.VNXConsistencyGroupNameInUseError: cg = self.vnx.get_cg(name=cg_name) # Wait until cg is found on VNX, or deletion will fail afterwards utils.wait_until(Condition.is_object_existed, vnx_obj=cg) return cg def delete_consistency_group(self, cg_name): cg = self.vnx.get_cg(cg_name) try: cg.delete() except storops_ex.VNXConsistencyGroupNotFoundError: pass def create_cg_snapshot(self, cg_snap_name, cg_name): cg = self.vnx.get_cg(cg_name) try: snap = cg.create_snap(cg_snap_name, allow_rw=True) except storops_ex.VNXSnapNameInUseError: snap = self.vnx.get_snap(cg_snap_name) utils.wait_until(Condition.is_object_existed, vnx_obj=snap) return snap def delete_cg_snapshot(self, cg_snap_name): self.delete_snapshot(cg_snap_name) def get_serial(self): return self.vnx.serial def get_pools(self): return self.vnx.get_pool() def get_pool(self, name): return self.vnx.get_pool(name=name) def get_iscsi_targets(self, sp=None, port_id=None, vport_id=None): return self.vnx.get_iscsi_port(sp=sp, port_id=port_id, vport_id=vport_id, has_ip=True) def get_fc_targets(self, sp=None, port_id=None): return self.vnx.get_fc_port(sp=sp, port_id=port_id) def get_enablers(self): return self.vnx.get_ndu() def is_fast_enabled(self): return self.vnx.is_auto_tiering_enabled() def is_compression_enabled(self): return self.vnx.is_compression_enabled() def is_dedup_enabled(self): return self.vnx.is_dedup_enabled() def is_fast_cache_enabled(self): return self.vnx.is_fast_cache_enabled() def is_thin_enabled(self): return self.vnx.is_thin_enabled() def is_snap_enabled(self): return self.vnx.is_snap_enabled() def is_mirror_view_enabled(self): return self.vnx.is_mirror_view_sync_enabled() def get_pool_feature(self): return self.vnx.get_pool_feature() def lun_has_snapshot(self, lun): """Checks lun has snapshot. :param lun: instance of VNXLun """ snaps = lun.get_snap() return len(snaps) != 0 def enable_compression(self, lun): """Enables compression on lun. :param lun: instance of VNXLun """ try: lun.enable_compression(ignore_thresholds=True) except storops_ex.VNXCompressionAlreadyEnabledError: LOG.warning("Compression has already been enabled on %s.", lun.name) def get_vnx_enabler_status(self): return common.VNXEnablerStatus( dedup=self.is_dedup_enabled(), compression=self.is_compression_enabled(), thin=self.is_thin_enabled(), fast=self.is_fast_enabled(), snap=self.is_snap_enabled()) def create_storage_group(self, name): try: self.sg_cache[name] = self.vnx.create_sg(name) except storops_ex.VNXStorageGroupNameInUseError as ex: # Ignore the failure due to retry LOG.warning('Storage group %(name)s already exists. ' 'Message: %(msg)s', {'name': name, 'msg': ex.message}) self.sg_cache[name] = self.vnx.get_sg(name=name) return self.sg_cache[name] def get_storage_group(self, name): """Retrieve the storage group by name. Check the storage group instance cache first to save CLI call. If the specified storage group doesn't exist in the cache, try to grab it from CLI. :param name: name of the storage group :return: storage group instance """ if name not in self.sg_cache: self.sg_cache[name] = self.vnx.get_sg(name) return self.sg_cache[name] def register_initiator(self, storage_group, host, initiator_port_map): """Registers the initiators of `host` to the `storage_group`. :param storage_group: the storage group object. :param host: the ip and name information of the initiator. :param initiator_port_map: the dict specifying which initiators are bound to which ports. """ for (initiator_id, ports_to_bind) in initiator_port_map.items(): for port in ports_to_bind: try: storage_group.connect_hba(port, initiator_id, host.name, host_ip=host.ip) except storops_ex.VNXStorageGroupError as ex: LOG.warning('Failed to set path to port %(port)s for ' 'initiator %(hba_id)s. Message: %(msg)s', {'port': port, 'hba_id': initiator_id, 'msg': ex.message}) if any(initiator_port_map.values()): LOG.debug('New path set for initiator %(hba_id)s, so update ' 'storage group with poll.', {'hba_id': initiator_id}) utils.update_res_with_poll(storage_group) def ping_node(self, port, ip_address): iscsi_port = self.get_iscsi_targets(sp=port.sp, port_id=port.port_id, vport_id=port.vport_id) try: iscsi_port.ping_node(ip_address, count=1) return True except storops_ex.VNXPingNodeError: return False def add_lun_to_sg(self, storage_group, lun, max_retries): """Adds the `lun` to `storage_group`.""" try: return storage_group.attach_alu(lun, max_retries) except storops_ex.VNXAluAlreadyAttachedError: # Ignore the failure due to retry. return storage_group.get_hlu(lun) except storops_ex.VNXNoHluAvailableError as ex: with excutils.save_and_reraise_exception(): # Reach the max times of retry, fail the attach action. LOG.error('Failed to add %(lun)s into %(sg)s after ' '%(tried)s tries. Reach the max retry times. ' 'Message: %(msg)s', {'lun': lun.lun_id, 'sg': storage_group.name, 'tried': max_retries, 'msg': ex.message}) def get_wwn_of_online_fc_ports(self, ports): """Returns wwns of online fc ports. wwn of a certain port will not be included in the return list when it is not present or down. """ wwns = set() ports_with_all_info = self.vnx.get_fc_port() for po in ports: online_list = [p for p in ports_with_all_info if p == po and p.link_status == 'Up' and p.port_status == 'Online'] wwns.update([p.wwn for p in online_list]) return list(wwns) def sg_has_lun_attached(self, sg): return bool(sg.get_alu_hlu_map()) def deregister_initiators(self, initiators): if not isinstance(initiators, list): initiators = [initiators] for initiator_uid in initiators: try: self.vnx.delete_hba(initiator_uid) except AttributeError: self.vnx.remove_hba(initiator_uid) def update_consistencygroup(self, cg, lun_ids_to_add, lun_ids_to_remove): lun_ids_in_cg = (set([lu.lun_id for lu in cg.lun_list]) if cg.lun_list else set()) # lun_ids_to_add and lun_ids_to_remove never overlap. lun_ids_updated = ((lun_ids_in_cg | set(lun_ids_to_add)) - set(lun_ids_to_remove)) if lun_ids_updated: cg.replace_member(*[self.get_lun(lun_id=lun_id) for lun_id in lun_ids_updated]) else: # Need to remove all LUNs from cg. However, replace_member cannot # handle empty list. So use delete_member. cg.delete_member(*[self.get_lun(lun_id=lun_id) for lun_id in lun_ids_in_cg]) def get_cg(self, name): return self.vnx.get_cg(name=name) def get_available_ip(self): return self.vnx.alive_sp_ip def get_mirror(self, mirror_name): return self.vnx.get_mirror_view(mirror_name) def create_mirror(self, mirror_name, primary_lun_id): src_lun = self.vnx.get_lun(lun_id=primary_lun_id) try: mv = self.vnx.create_mirror_view(mirror_name, src_lun) except storops_ex.VNXMirrorNameInUseError: mv = self.vnx.get_mirror_view(mirror_name) return mv def delete_mirror(self, mirror_name): mv = self.vnx.get_mirror_view(mirror_name) try: mv.delete() except storops_ex.VNXMirrorNotFoundError: pass def add_image(self, mirror_name, sp_ip, secondary_lun_id): mv = self.vnx.get_mirror_view(mirror_name) mv.add_image(sp_ip, secondary_lun_id) # Secondary image info usually did not appear, so # here add a poll to update. utils.update_res_with_poll(mv) utils.wait_until(Condition.is_mirror_synced, mirror=mv) def remove_image(self, mirror_name): mv = self.vnx.get_mirror_view(mirror_name) mv.remove_image() def fracture_image(self, mirror_name): mv = self.vnx.get_mirror_view(mirror_name) mv.fracture_image() def sync_image(self, mirror_name): mv = self.vnx.get_mirror_view(mirror_name) mv.sync_image() utils.wait_until(Condition.is_mirror_synced, mirror=mv) def promote_image(self, mirror_name): mv = self.vnx.get_mirror_view(mirror_name) mv.promote_image() def create_mirror_group(self, group_name): try: mg = self.vnx.create_mirror_group(group_name) except storops_ex.VNXMirrorGroupNameInUseError: mg = self.vnx.get_mirror_group(group_name) return mg def delete_mirror_group(self, group_name): mg = self.vnx.get_mirror_group(group_name) try: mg.delete() except storops_ex.VNXMirrorGroupNotFoundError: LOG.info('Mirror group %s was already deleted.', group_name) def add_mirror(self, group_name, mirror_name): mg = self.vnx.get_mirror_group(group_name) mv = self.vnx.get_mirror_view(mirror_name) try: mg.add_mirror(mv) except storops_ex.VNXMirrorGroupAlreadyMemberError: LOG.info('Mirror %(mirror)s is already a member of %(group)s', {'mirror': mirror_name, 'group': group_name}) return mg def remove_mirror(self, group_name, mirror_name): mg = self.vnx.get_mirror_group(group_name) mv = self.vnx.get_mirror_view(mirror_name) try: mg.remove_mirror(mv) except storops_ex.VNXMirrorGroupMirrorNotMemberError: LOG.info('Mirror %(mirror)s is not a member of %(group)s', {'mirror': mirror_name, 'group': group_name}) def promote_mirror_group(self, group_name): mg = self.vnx.get_mirror_group(group_name) try: mg.promote_group() except storops_ex.VNXMirrorGroupAlreadyPromotedError: LOG.info('Mirror group %s was already promoted.', group_name) return mg def sync_mirror_group(self, group_name): mg = self.vnx.get_mirror_group(group_name) mg.sync_group() def fracture_mirror_group(self, group_name): mg = self.vnx.get_mirror_group(group_name) mg.fracture_group() def get_pool_name(self, lun_name): lun = self.get_lun(name=lun_name) utils.update_res_without_poll(lun) return lun.pool_name def get_ioclass(self, qos_specs): ioclasses = [] if qos_specs is not None: prefix = qos_specs['id'] max_bws = qos_specs[common.QOS_MAX_BWS] max_iops = qos_specs[common.QOS_MAX_IOPS] if max_bws: name = '%(prefix)s-bws-%(max)s' % { 'prefix': prefix, 'max': max_bws} class_bws = self.vnx.get_ioclass(name=name) if not class_bws.existed: class_bws = self.create_ioclass_bws(name, max_bws) ioclasses.append(class_bws) if max_iops: name = '%(prefix)s-iops-%(max)s' % { 'prefix': prefix, 'max': max_iops} class_iops = self.vnx.get_ioclass(name=name) if not class_iops.existed: class_iops = self.create_ioclass_iops(name, max_iops) ioclasses.append(class_iops) return ioclasses def create_ioclass_iops(self, name, max_iops): """Creates a ioclass by IOPS.""" max_iops = int(max_iops) ctrl_method = storops.VNXCtrlMethod( method=storops.VNXCtrlMethod.LIMIT_CTRL, metric='tt', value=max_iops) ioclass = self.vnx.create_ioclass(name=name, iotype='rw', ctrlmethod=ctrl_method) return ioclass def create_ioclass_bws(self, name, max_bws): """Creates a ioclass by bandwidth in MiB.""" max_bws = int(max_bws) ctrl_method = storops.VNXCtrlMethod( method=storops.VNXCtrlMethod.LIMIT_CTRL, metric='bw', value=max_bws) ioclass = self.vnx.create_ioclass(name=name, iotype='rw', ctrlmethod=ctrl_method) return ioclass def create_policy(self, policy_name): """Creates the policy and starts it.""" policy = self.vnx.get_policy(name=policy_name) if not policy.existed: LOG.info('Creating the policy: %s', policy_name) policy = self.vnx.create_policy(name=policy_name) return policy def get_running_policy(self): """Returns the only running/measuring policy on VNX. .. note: VNX only allows one running policy. """ policies = self.vnx.get_policy() policies = list(filter(lambda p: p.state == "Running" or p.state == "Measuring", policies)) if len(policies) >= 1: return policies[0], False else: return self.create_policy("vnx_policy"), True def add_lun_to_ioclass(self, ioclass_name, lun_id): ioclass = self.vnx.get_ioclass(name=ioclass_name) ioclass.add_lun(lun_id) def filter_sg(self, attached_lun_id): return self.vnx.get_sg().shadow_copy(attached_lun=attached_lun_id) def set_max_luns_per_sg(self, max_luns): """Sets max LUNs per storage group.""" storops.vnx.resource.sg.VNXStorageGroup.set_max_luns_per_sg(max_luns) LOG.info('Set max LUNs per storage group to %s.', max_luns) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/vnx/common.py0000664000175000017500000004664500000000000023560 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ VNX Common Utils """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder.volume import configuration from cinder.volume.drivers.dell_emc.vnx import const from cinder.volume import group_types from cinder.volume import volume_types storops = importutils.try_import('storops') CONF = cfg.CONF LOG = logging.getLogger(__name__) DEFAULT_TIMEOUT = 60 * 60 * 24 * 365 INTERVAL_5_SEC = 5 INTERVAL_20_SEC = 20 INTERVAL_30_SEC = 30 INTERVAL_60_SEC = 60 SNAP_EXPIRATION_HOUR = '1h' BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both']) QOS_MAX_IOPS = 'maxIOPS' QOS_MAX_BWS = 'maxBWS' VNX_OPTS = [ cfg.StrOpt('storage_vnx_authentication_type', default='global', help='VNX authentication scope type. ' 'By default, the value is global.'), cfg.StrOpt('storage_vnx_security_file_dir', help='Directory path that contains the VNX security file. ' 'Make sure the security file is generated first.'), cfg.StrOpt('naviseccli_path', help='Naviseccli Path.'), cfg.ListOpt('storage_vnx_pool_names', help='Comma-separated list of storage pool names to be used.'), cfg.IntOpt('default_timeout', default=DEFAULT_TIMEOUT, help='Default timeout for CLI operations in minutes. ' 'For example, LUN migration is a typical long ' 'running operation, which depends on the LUN size and ' 'the load of the array. ' 'An upper bound in the specific deployment can be set to ' 'avoid unnecessary long wait. ' 'By default, it is 365 days long.'), cfg.IntOpt('max_luns_per_storage_group', default=255, help='Default max number of LUNs in a storage group.' ' By default, the value is 255.'), cfg.BoolOpt('destroy_empty_storage_group', default=False, help='To destroy storage group ' 'when the last LUN is removed from it. ' 'By default, the value is False.'), # iscsi_initiators is a dict which key is string and value is a list. # This could be a DictOpt. Unfortunately DictOpt doesn't support the value # of list type. cfg.StrOpt('iscsi_initiators', help='Mapping between hostname and ' 'its iSCSI initiator IP addresses.'), cfg.ListOpt('io_port_list', help='Comma separated iSCSI or FC ports ' 'to be used in Nova or Cinder.'), cfg.BoolOpt('initiator_auto_registration', default=False, help='Automatically register initiators. ' 'By default, the value is False.'), cfg.BoolOpt('initiator_auto_deregistration', default=False, help='Automatically deregister initiators after the related ' 'storage group is destroyed. ' 'By default, the value is False.'), cfg.BoolOpt('check_max_pool_luns_threshold', default=False, deprecated_for_removal=True, help='DEPRECATED: Report free_capacity_gb as 0 when the limit ' 'to maximum number of pool LUNs is reached. ' 'By default, the value is False.'), cfg.BoolOpt('force_delete_lun_in_storagegroup', default=True, help='Delete a LUN even if it is in Storage Groups.'), cfg.BoolOpt('ignore_pool_full_threshold', default=False, help='Force LUN creation even if ' 'the full threshold of pool is reached. ' 'By default, the value is False.'), cfg.BoolOpt('vnx_async_migrate', default=True, help='Always use asynchronous migration during volume cloning ' 'and creating from snapshot. As described in ' 'configuration doc, async migration has some ' 'constraints. Besides using metadata, customers could ' 'use this option to disable async migration. Be aware ' 'that `async_migrate` in metadata overrides this ' 'option when both are set. By default, the value is True.' ) ] CONF.register_opts(VNX_OPTS, group=configuration.SHARED_CONF_GROUP) PROTOCOL_FC = 'fc' PROTOCOL_ISCSI = 'iscsi' class ExtraSpecs(object): _provision_key = 'provisioning:type' _tier_key = 'storagetype:tiering' _replication_key = 'replication_enabled' PROVISION_DEFAULT = const.PROVISION_THICK TIER_DEFAULT = None def __init__(self, extra_specs, group_specs=None): self.specs = extra_specs self._provision = self._get_provision() self.provision = self._provision self._tier = self._get_tier() self.tier = self._tier self.apply_default_values() self.group_specs = group_specs if group_specs else {} def apply_default_values(self): self.provision = (ExtraSpecs.PROVISION_DEFAULT if self.provision is None else self.provision) # Can not set Tier when provision is set to deduped. So don't set the # tier default when provision is deduped. if self.provision != storops.VNXProvisionEnum.DEDUPED: self.tier = (ExtraSpecs.TIER_DEFAULT if self.tier is None else self.tier) @classmethod def set_defaults(cls, provision_default, tier_default): cls.PROVISION_DEFAULT = provision_default cls.TIER_DEFAULT = tier_default def _get_provision(self): value = self._parse_to_enum(self._provision_key, storops.VNXProvisionEnum) return value def _get_tier(self): return self._parse_to_enum(self._tier_key, storops.VNXTieringEnum) @property def is_replication_enabled(self): return self.specs.get('replication_enabled', '').lower() == ' true' @property def is_group_replication_enabled(self): return self.group_specs.get( 'consistent_group_replication_enabled', '').lower() == ' true' def _parse_to_enum(self, key, enum_class): value = (self.specs[key] if key in self.specs else None) if value is not None: try: value = enum_class.parse(value) except ValueError: reason = (_("The value %(value)s for key %(key)s in extra " "specs is invalid.") % {'key': key, 'value': value}) raise exception.InvalidVolumeType(reason=reason) return value @classmethod def from_volume(cls, volume): specs = {} type_id = volume['volume_type_id'] if type_id is not None: specs = volume_types.get_volume_type_extra_specs(type_id) return cls(specs) @classmethod def from_group(cls, group): group_specs = {} if group and group.group_type_id: group_specs = group_types.get_group_type_specs( group.group_type_id) return cls(extra_specs={}, group_specs=group_specs) @classmethod def from_volume_type(cls, type): return cls(type['extra_specs']) @classmethod def from_lun(cls, lun): ex = cls({}) ex.provision = lun.provision ex.tier = (lun.tier if lun.provision != storops.VNXProvisionEnum.DEDUPED else None) return ex def match_with_lun(self, lun): ex = ExtraSpecs.from_lun(lun) return (self.provision == ex.provision and self.tier == ex.tier) def validate(self, enabler_status): """Checks whether the extra specs are valid. :param enabler_status: Instance of VNXEnablerStatus """ if "storagetype:pool" in self.specs: LOG.warning("Extra spec key 'storagetype:pool' is obsoleted " "since driver version 5.1.0. This key will be " "ignored.") if (self._provision == storops.VNXProvisionEnum.DEDUPED and self._tier is not None): msg = _("Can not set tiering policy for a deduplicated volume. " "Set the tiering policy on the pool where the " "deduplicated volume locates.") raise exception.InvalidVolumeType(reason=msg) if (self._provision == storops.VNXProvisionEnum.COMPRESSED and not enabler_status.compression_enabled): msg = _("Compression Enabler is not installed. " "Can not create compressed volume.") raise exception.InvalidVolumeType(reason=msg) if (self._provision == storops.VNXProvisionEnum.DEDUPED and not enabler_status.dedup_enabled): msg = _("Deduplication Enabler is not installed. " "Can not create deduplicated volume.") raise exception.InvalidVolumeType(reason=msg) if (self._provision in [storops.VNXProvisionEnum.THIN, storops.VNXProvisionEnum.COMPRESSED, storops.VNXProvisionEnum.DEDUPED] and not enabler_status.thin_enabled): msg = _("ThinProvisioning Enabler is not installed. " "Can not create thin volume.") raise exception.InvalidVolumeType(reason=msg) if (self._tier is not None and not enabler_status.fast_enabled): msg = _("FAST VP Enabler is not installed. " "Can not set tiering policy for the volume.") raise exception.InvalidVolumeType(reason=msg) return True def __len__(self): return len(self.specs) def __getitem__(self, key): return self.specs[key] def __iter__(self): return iter(self.specs) def __contains__(self, item): return item in self.specs def __eq__(self, other): if isinstance(other, ExtraSpecs): return self.specs == other.specs elif isinstance(other, dict): return self.specs == other else: return False def __hash__(self): return self.specs.__hash__() class LUNState(object): INITIALIZING = 'Initializing' READY = 'Ready' FAULTED = 'Faulted' class PoolState(object): INITIALIZING = 'Initializing' OFFLINE = 'Offline' DELETING = 'Deleting' VALID_CREATE_LUN_STATE = (INITIALIZING, OFFLINE, DELETING) class VNXEnablerStatus(object): def __init__(self, dedup=False, compression=False, fast=False, thin=False, snap=False): self.dedup_enabled = dedup self.compression_enabled = compression self.fast_enabled = fast self.thin_enabled = thin self.snap_enabled = snap class WaitUtilTimeoutException(exception.VolumeDriverException): """Raised when timeout occurs in wait_until.""" # TODO(Ryan) put this exception under Cinder shared module. pass class Host(object): """The model of a host which acts as an initiator to access the storage.""" def __init__(self, name, initiators, ip=None, wwpns=None): # ip and wwpns are optional. self.name = name if not self.name: raise ValueError(('Name of host cannot be empty.')) self.initiators = initiators if not self.initiators: raise ValueError(_('Initiators of host cannot be empty.')) self.ip = ip self.wwpns = wwpns class Volume(object): """The internal volume which is used to pass in method call.""" def __init__(self, name, id, vnx_lun_id=None): self.name = name self.id = id self.vnx_lun_id = vnx_lun_id class ISCSITargetData(dict): def __init__(self, volume_id, is_discovered, iqn='unknown', iqns=None, portal='unknown', portals=None, lun='unknown', luns=None): data = {'volume_id': volume_id, 'target_discovered': is_discovered, 'target_iqn': iqn, 'target_iqns': iqns, 'target_portal': portal, 'target_portals': portals, 'target_lun': lun, 'target_luns': luns} self['driver_volume_type'] = 'iscsi' self['data'] = data def to_dict(self): """Converts to the dict. It helps serialize and deserialize the data before returning to nova. """ return {key: value for (key, value) in self.items()} class FCTargetData(dict): def __init__(self, volume_id, is_discovered, wwn=None, lun=None, initiator_target_map=None): data = {'volume_id': volume_id, 'target_discovered': is_discovered, 'target_lun': lun, 'target_wwn': wwn, 'initiator_target_map': initiator_target_map} self['driver_volume_type'] = 'fibre_channel' self['data'] = {key: value for key, value in data.items() if value is not None} def to_dict(self): """Converts to the dict. It helps serialize and deserialize the data before returning to nova. """ return {key: value for (key, value) in self.items()} class ReplicationDevice(object): def __init__(self, replication_device): self.replication_device = replication_device @property def backend_id(self): return self.replication_device.get('backend_id') @property def san_ip(self): return self.replication_device.get('san_ip') @property def san_login(self): return self.replication_device.get('san_login') @property def san_password(self): return self.replication_device.get('san_password') @property def storage_vnx_authentication_type(self): return self.replication_device.get( 'storage_vnx_authentication_type', 'global') @property def storage_vnx_security_file_dir(self): return self.replication_device.get('storage_vnx_security_file_dir') @property def pool_name(self): return self.replication_device.get('pool_name', None) class ReplicationDeviceList(list): """Replication devices configured in cinder.conf Cinder supports multiple replication_device, while VNX driver only support one replication_device for now. """ def __init__(self, configuration): self.list = [] self.configuration = configuration self._device_map = dict() self.parse_configuration() def parse_configuration(self): if self.configuration.replication_device: for replication_device in self.configuration.replication_device: rd = ReplicationDevice(replication_device) if not rd.backend_id or not rd.san_ip: msg = _('backend_id or san_ip cannot be empty for ' 'replication_device.') raise exception.InvalidInput(reason=msg) self._device_map[rd.backend_id] = rd self.list.append(rd) return self._device_map def get_device(self, backend_id): try: device = self._device_map[backend_id] except KeyError: device = None LOG.warning('Unable to find secondary device named: %s', backend_id) return device @property def devices(self): return self._device_map.values() def __len__(self): return len(self.list) def __iter__(self): self._iter = self.list.__iter__() return self def next(self): return next(self._iter) def __next__(self): return self.next() def __getitem__(self, item): return self.list[item] @classmethod def get_backend_ids(cls, config): """Returns all configured device_id.""" rep_list = cls(config) backend_ids = [] for item in rep_list.devices: backend_ids.append(item.backend_id) return backend_ids class VNXMirrorView(object): def __init__(self, primary_client, secondary_client): self.primary_client = primary_client self.secondary_client = secondary_client def create_mirror(self, name, primary_lun_id): self.primary_client.create_mirror(name, primary_lun_id) def create_secondary_lun(self, pool_name, lun_name, size, provision, tier): return self.secondary_client.create_lun( pool_name, lun_name, size, provision, tier) def delete_secondary_lun(self, lun_name): self.secondary_client.delete_lun(lun_name) def delete_mirror(self, mirror_name): self.primary_client.delete_mirror(mirror_name) def add_image(self, mirror_name, secondary_lun_id): sp_ip = self.secondary_client.get_available_ip() self.primary_client.add_image(mirror_name, sp_ip, secondary_lun_id) def remove_image(self, mirror_name): self.primary_client.remove_image(mirror_name) def fracture_image(self, mirror_name): self.primary_client.fracture_image(mirror_name) def promote_image(self, mirror_name): """Promote the image on the secondary array.""" self.secondary_client.promote_image(mirror_name) def destroy_mirror(self, mirror_name, secondary_lun_name): """Destroy the mirror view's related VNX objects. NOTE: primary lun will not be deleted here. :param mirror_name: name of mirror to be destroyed :param secondary_lun_name: name of LUN name """ mv = self.primary_client.get_mirror(mirror_name) if not mv.existed: # We will skip the mirror operations if not existed LOG.warning('Mirror view %s was deleted already.', mirror_name) return self.fracture_image(mirror_name) self.remove_image(mirror_name) self.delete_mirror(mirror_name) self.delete_secondary_lun(lun_name=secondary_lun_name) def create_mirror_group(self, group_name): return self.primary_client.create_mirror_group(group_name) def delete_mirror_group(self, group_name): return self.primary_client.delete_mirror_group(group_name) def add_mirror(self, group_name, mirror_name): return self.primary_client.add_mirror(group_name, mirror_name) def remove_mirror(self, group_name, mirror_name): return self.primary_client.remove_mirror(group_name, mirror_name) def sync_mirror_group(self, group_name): return self.primary_client.sync_mirror_group(group_name) def promote_mirror_group(self, group_name): """Promote the mirror group on the secondary array.""" return self.secondary_client.promote_mirror_group(group_name) def fracture_mirror_group(self, group_name): return self.primary_client.fracture_mirror_group(group_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/vnx/const.py0000664000175000017500000000263300000000000023403 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ VNX Constants This module includes re-declaration from storops which directly used by driver in module scope. That's to say: If a constant from storops is used in class level, function signature, module level, a re-declaration is needed in this file to avoid some static import error when storops is not installed. """ from oslo_utils import importutils storops = importutils.try_import('storops') if storops: from storops import exception as storops_ex VNXLunPreparingError = storops_ex.VNXLunPreparingError VNXTargetNotReadyError = storops_ex.VNXTargetNotReadyError MIGRATION_RATE_HIGH = storops.VNXMigrationRate.HIGH PROVISION_THICK = storops.VNXProvisionEnum.THICK else: VNXLunPreparingError = None MIGRATION_RATE_HIGH = None PROVISION_THICK = None VNXTargetNotReadyError = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/vnx/driver.py0000664000175000017500000003646000000000000023555 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Cinder Driver for EMC VNX based on CLI.""" from oslo_log import log as logging from cinder import interface from cinder.volume import driver from cinder.volume.drivers.dell_emc.vnx import adapter from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.dell_emc.vnx import utils from cinder.zonemanager import utils as zm_utils LOG = logging.getLogger(__name__) @interface.volumedriver class VNXDriver(driver.ManageableVD, driver.ManageableSnapshotsVD, driver.MigrateVD, driver.BaseVD): """Dell EMC Cinder Driver for VNX using CLI. .. code-block:: default Version history: 1.0.0 - Initial driver 2.0.0 - Thick/thin provisioning, robust enhancement 3.0.0 - Array-based Backend Support, FC Basic Support, Target Port Selection for MPIO, Initiator Auto Registration, Storage Group Auto Deletion, Multiple Authentication Type Support, Storage-Assisted Volume Migration, SP Toggle for HA 3.0.1 - Security File Support 4.0.0 - Advance LUN Features (Compression Support, Deduplication Support, FAST VP Support, FAST Cache Support), Storage-assisted Retype, External Volume Management, Read-only Volume, FC Auto Zoning 4.1.0 - Consistency group support 5.0.0 - Performance enhancement, LUN Number Threshold Support, Initiator Auto Deregistration, Force Deleting LUN in Storage Groups, robust enhancement 5.1.0 - iSCSI multipath enhancement 5.2.0 - Pool-aware scheduler support 5.3.0 - Consistency group modification support 6.0.0 - Over subscription support Create consistency group from cgsnapshot support Multiple pools support enhancement Manage/unmanage volume revise White list target ports support Snap copy support Support efficient non-disruptive backup 7.0.0 - Clone consistency group support Replication v2 support(managed) Configurable migration rate support 8.0.0 - New VNX Cinder driver 9.0.0 - Use asynchronous migration for cloning 10.0.0 - Extend SMP size before async migration when cloning from an image cache volume 10.1.0 - Add QoS support 10.2.0 - Add replication group support 11.0.0 - Fix failure of migration during cloning 12.0.0 - Add `volume revert to snapshot` support 12.1.0 - Adjust max_luns_per_storage_group and check_max_pool_luns_threshold 12.1.1 - Fix perf issue when create/delete volume 13.0.0 - Fix bug https://bugs.launchpad.net/cinder/+bug/1817385 to make sure sg can be created again after it was destroyed under `destroy_empty_stroage_group` setting to `True` 14.0.0 - Fix bug 1794646: failed to delete LUNs from backend due to the temporary snapshots on them wasn't deleted. 14.0.1 - Fix bug 1796825, add an option to set default value for `async_migrate`. """ VERSION = '14.00.01' VENDOR = 'Dell EMC' # ThirdPartySystems wiki page CI_WIKI_NAME = "DellEMC_VNX_CI" SUPPORTED = False def __init__(self, *args, **kwargs): super(VNXDriver, self).__init__(*args, **kwargs) utils.init_ops(self.configuration) self.protocol = self.configuration.storage_protocol.lower() self.active_backend_id = kwargs.get('active_backend_id', None) self.adapter = None self._stats = {} @staticmethod def get_driver_options(): return common.VNX_OPTS def do_setup(self, context): if self.protocol == common.PROTOCOL_FC: self.adapter = adapter.FCAdapter(self.configuration, self.active_backend_id) else: self.adapter = adapter.ISCSIAdapter(self.configuration, self.active_backend_id) self.adapter.VERSION = self.VERSION self.adapter.do_setup() def check_for_setup_error(self): pass def create_volume(self, volume): """Creates a volume.""" return self.adapter.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" return self.adapter.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a cloned volume.""" return self.adapter.create_cloned_volume(volume, src_vref) def extend_volume(self, volume, new_size): """Extend a volume.""" self.adapter.extend_volume(volume, new_size) def delete_volume(self, volume): """Deletes a volume.""" self.adapter.delete_volume(volume) def migrate_volume(self, ctxt, volume, host): """Migrate volume via EMC migration functionality.""" return self.adapter.migrate_volume(ctxt, volume, host) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" return self.adapter.retype(ctxt, volume, new_type, diff, host) def create_snapshot(self, snapshot): """Creates a snapshot.""" self.adapter.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" self.adapter.delete_snapshot(snapshot) def revert_to_snapshot(self, context, volume, snapshot): """Reverts a volume to a snapshot""" self.adapter.restore_snapshot(volume, snapshot) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" pass def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" pass def check_for_export(self, context, volume_id): """Make sure volume is exported.""" pass def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. The initiator_target_map is a map that represents the remote wwn(s) and a list of wwns which are visible to the remote wwn(s). Example return values: FC: .. code-block:: json { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], 'initiator_target_map': { '1122334455667788': ['1234567890123', '0987654321321'] } } } iSCSI: .. code-block:: json { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqns': ['iqn.2010-10.org.openstack:volume-00001', 'iqn.2010-10.org.openstack:volume-00002'], 'target_portals': ['127.0.0.1:3260', '127.0.1.1:3260'], 'target_luns': [1, 1], } } """ LOG.debug("Entering initialize_connection" " - connector: %(connector)s.", {'connector': connector}) conn_info = self.adapter.initialize_connection(volume, connector) LOG.debug("Exit initialize_connection" " - Returning connection info: %(conn_info)s.", {'conn_info': conn_info}) zm_utils.add_fc_zone(conn_info) return conn_info def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" LOG.debug("Entering terminate_connection" " - connector: %(connector)s.", {'connector': connector}) conn_info = self.adapter.terminate_connection(volume, connector) LOG.debug("Exit terminate_connection" " - Returning connection info: %(conn_info)s.", {'conn_info': conn_info}) zm_utils.remove_fc_zone(conn_info) return conn_info def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") self._stats = self.adapter.update_volume_stats() self._stats['driver_version'] = self.VERSION self._stats['vendor_name'] = self.VENDOR def manage_existing(self, volume, existing_ref): """Manage an existing lun in the array. The lun should be in a manageable pool backend, otherwise error would return. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. .. code-block:: python manage_existing_ref:{ 'source-id': } or .. code-block:: python manage_existing_ref:{ 'source-name': } """ return self.adapter.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing.""" return self.adapter.manage_existing_get_size(volume, existing_ref) def get_pool(self, volume): """Returns the pool name of a volume.""" return self.adapter.get_pool_name(volume) def unmanage(self, volume): """Unmanages a volume.""" return self.adapter.unmanage(volume) def update_migrated_volume(self, context, volume, new_volume, original_volume_status=None): """Returns model update for migrated volume.""" return self.adapter.update_migrated_volume(context, volume, new_volume, original_volume_status) def create_export_snapshot(self, context, snapshot, connector): """Creates a snapshot mount point for snapshot.""" return self.adapter.create_export_snapshot( context, snapshot, connector) def remove_export_snapshot(self, context, snapshot): """Removes snapshot mount point for snapshot.""" return self.adapter.remove_export_snapshot(context, snapshot) def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Allows connection to snapshot.""" return self.adapter.initialize_connection_snapshot(snapshot, connector, **kwargs) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Disallows connection to snapshot.""" return self.adapter.terminate_connection_snapshot(snapshot, connector, **kwargs) def backup_use_temp_snapshot(self): return True def failover_host(self, context, volumes, secondary_id=None, groups=None): """Fail-overs volumes from primary device to secondary.""" return self.adapter.failover_host(context, volumes, secondary_id, groups) @utils.require_consistent_group_snapshot_enabled def create_group(self, context, group): """Creates a group.""" return self.adapter.create_group(context, group) @utils.require_consistent_group_snapshot_enabled def delete_group(self, context, group, volumes): """Deletes a group.""" return self.adapter.delete_group( context, group, volumes) @utils.require_consistent_group_snapshot_enabled def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group.""" return self.adapter.update_group(context, group, add_volumes, remove_volumes) @utils.require_consistent_group_snapshot_enabled def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source.""" if group_snapshot: return self.adapter.create_group_from_group_snapshot( context, group, volumes, group_snapshot, snapshots) elif source_group: return self.adapter.create_cloned_group( context, group, volumes, source_group, source_vols) @utils.require_consistent_group_snapshot_enabled def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group_snapshot.""" return self.adapter.create_group_snapshot( context, group_snapshot, snapshots) @utils.require_consistent_group_snapshot_enabled def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group_snapshot.""" return self.adapter.delete_group_snapshot( context, group_snapshot, snapshots) def is_consistent_group_snapshot_enabled(self): return self._stats.get('consistent_group_snapshot_enabled') def enable_replication(self, context, group, volumes): return self.adapter.enable_replication(context, group, volumes) def disable_replication(self, context, group, volumes): return self.adapter.disable_replication(context, group, volumes) def failover_replication(self, context, group, volumes, secondary_backend_id): return self.adapter.failover_replication( context, group, volumes, secondary_backend_id) def get_replication_error_status(self, context, groups): return self.adapter.get_replication_error_status(context, groups) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/vnx/replication.py0000664000175000017500000003702100000000000024565 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.volume.drivers.dell_emc.vnx import client from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.dell_emc.vnx import taskflows as emc_taskflow from cinder.volume.drivers.dell_emc.vnx import utils storops = importutils.try_import('storops') if storops: from storops import exception as storops_ex LOG = logging.getLogger(__name__) class ReplicationAdapter(object): def __init__(self, client=None, config=None): self.client = client self.config = config self.mirror_view = None def do_setup(self): pass def setup_lun_replication(self, volume, primary_lun_id): """Setup replication for LUN, this only happens in primary system.""" specs = common.ExtraSpecs.from_volume(volume) provision = specs.provision tier = specs.tier rep_update = {'replication_driver_data': None, 'replication_status': fields.ReplicationStatus.DISABLED} mirror_name = utils.construct_mirror_name(volume) if specs.is_replication_enabled: LOG.debug('Starting setup replication ' 'for volume: %s.', volume.id) lun_size = volume.size pool_name = utils.get_remote_pool(self.config, volume) emc_taskflow.create_mirror_view( self.mirror_view, mirror_name, primary_lun_id, pool_name, volume.name, lun_size, provision, tier) LOG.info('Successfully setup replication for %s.', volume.id) rep_update.update({'replication_status': fields.ReplicationStatus.ENABLED}) group_specs = common.ExtraSpecs.from_group(volume.group) if volume.group and group_specs.is_group_replication_enabled: # If in a group, add it to group then. LOG.debug('Starting add volume %(volume)s to group %(group)s', {'volume': volume.id, 'group': volume.group.id}) group_name = utils.construct_group_name(volume.group) self.client.add_mirror(group_name, mirror_name) return rep_update def create_group_replication(self, group): rep_update = {'replication_status': group.replication_status} group_specs = common.ExtraSpecs.from_group(group) if group_specs.is_group_replication_enabled: group_name = utils.construct_group_name(group) self.client.create_mirror_group(group_name) rep_update['replication_status'] = ( fields.ReplicationStatus.ENABLED) return rep_update def add_volumes_to_group_replication(self, group, volumes): group_specs = common.ExtraSpecs.from_group(group) if group_specs.is_group_replication_enabled: group_name = utils.construct_group_name(group) for volume in volumes: mirror_name = utils.construct_mirror_name(volume) self.client.add_mirror(group_name, mirror_name) def delete_group_replication(self, group): group_specs = common.ExtraSpecs.from_group(group) if group_specs.is_group_replication_enabled: group_name = utils.construct_group_name(group) self.client.delete_mirror_group(group_name) def remove_volumes_from_group_replication(self, group, volumes): group_name = utils.construct_group_name(group) group_specs = common.ExtraSpecs.from_group(group) if group_specs.is_group_replication_enabled: for volume in volumes: mirror_name = utils.construct_mirror_name(volume) self.client.remove_mirror(group_name, mirror_name) def cleanup_lun_replication(self, volume): specs = common.ExtraSpecs.from_volume(volume) group_specs = common.ExtraSpecs.from_group(volume.group) if group_specs.is_group_replication_enabled: # If in a group, remove from group first. group_name = utils.construct_group_name(volume.group) mirror_name = utils.construct_mirror_name(volume) self.client.remove_mirror(group_name, mirror_name) if specs.is_replication_enabled: LOG.debug('Starting cleanup replication for volume: ' '%s.', volume.id) mirror_name = utils.construct_mirror_name(volume) mirror_view = self.build_mirror_view(self.config, True) mirror_view.destroy_mirror(mirror_name, volume.name) LOG.info( 'Successfully destroyed replication for volume: %s', volume.id) def append_replication_stats(self, stats): if self.mirror_view: stats['replication_enabled'] = True stats['group_replication_enabled'] = False stats['consistent_group_replication_enabled'] = True stats['replication_count'] = 1 stats['replication_type'] = ['sync'] else: stats['replication_enabled'] = False stats['replication_targets'] = [ device.backend_id for device in common.ReplicationDeviceList( self.config)] def build_mirror_view(self, configuration, failover=True): """Builds a mirror view operation class. :param configuration: driver configuration :param failover: True if from primary to configured array, False if from configured array to primary. """ rep_devices = configuration.replication_device if not rep_devices: LOG.info('Replication is not configured on backend: %s.', configuration.config_group) return None elif len(rep_devices) == 1: if not self.client.is_mirror_view_enabled(): error_msg = _('Replication is configured, ' 'but no MirrorView/S enabler installed on VNX.') raise exception.InvalidInput(reason=error_msg) rep_list = common.ReplicationDeviceList(configuration) device = rep_list[0] # primary_client always points to the configed VNX. primary_client = self._build_client_from_config(self.config) # secondary_client always points to the VNX in replication_device. secondary_client = client.Client( ip=device.san_ip, username=device.san_login, password=device.san_password, scope=device.storage_vnx_authentication_type, naviseccli=self.client.naviseccli, sec_file=device.storage_vnx_security_file_dir) if failover: mirror_view = common.VNXMirrorView( primary_client, secondary_client) else: # For fail-back, we need to take care of reversed ownership. mirror_view = common.VNXMirrorView( secondary_client, primary_client) return mirror_view else: error_msg = _('VNX Cinder driver does not support ' 'multiple replication targets.') raise exception.InvalidInput(reason=error_msg) def validate_backend_id(self, backend_id): # Currently, VNX driver only supports 1 remote device. if self.active_backend_id: if backend_id != 'default': raise exception.InvalidReplicationTarget( reason=_('Invalid backend_id specified.')) elif backend_id not in ( common.ReplicationDeviceList.get_backend_ids(self.config)): raise exception.InvalidReplicationTarget( reason=_('Invalid backend_id specified.')) def failover_host(self, context, volumes, secondary_backend_id, groups): """Fails over the volume back and forth. Driver needs to update following info for failed-over volume: 1. provider_location: update serial number and lun id 2. replication_status: new status for replication-enabled volume """ volume_update_list = [] group_update_list = [] self.validate_backend_id(secondary_backend_id) if secondary_backend_id != 'default': rep_status = fields.ReplicationStatus.FAILED_OVER mirror_view = self.build_mirror_view(self.config, True) else: rep_status = fields.ReplicationStatus.ENABLED mirror_view = self.build_mirror_view(self.config, False) def failover_volume(volume, new_status): mirror_name = utils.construct_mirror_name(volume) provider_location = volume.provider_location try: mirror_view.promote_image(mirror_name) except storops_ex.VNXMirrorException as ex: LOG.error( 'Failed to failover volume %(volume_id)s ' 'to %(target)s: %(error)s.', {'volume_id': volume.id, 'target': secondary_backend_id, 'error': ex}) new_status = fields.ReplicationStatus.FAILOVER_ERROR else: # Transfer ownership to secondary_backend_id and # update provider_location field secondary_client = mirror_view.secondary_client provider_location = utils.update_remote_provider_location( volume, secondary_client) model_update = {'volume_id': volume.id, 'updates': {'replication_status': new_status, 'provider_location': provider_location}} volume_update_list.append(model_update) # Fail over groups if needed. def failover_group(group): is_failover_needed = False if (secondary_backend_id != 'default' and group.replication_status == fields.ReplicationStatus.ENABLED): # Group is on the primary VNX, failover is needed. LOG.info('%(group_id)s will be failed over to secondary ' '%(secondary_backend_id)s.', {'group_id': group.id, 'secondary_backend_id': secondary_backend_id}) is_failover_needed = True if (secondary_backend_id == 'default' and group.replication_status == fields.ReplicationStatus.FAILED_OVER): # Group is on the secondary VNX, failover is needed. LOG.info('%(group_id)s will be failed over to primary ' '%(secondary_backend_id)s.', {'group_id': group.id, 'secondary_backend_id': secondary_backend_id}) is_failover_needed = True if is_failover_needed: group_update, volume_update_list = self.failover_replication( context, group, group.volumes, secondary_backend_id) return ({'group_id': group.id, 'updates': group_update}, [{'volume_id': vol_update['id'], 'updates': vol_update} for vol_update in volume_update_list]) return [], [] for group in groups: specs = common.ExtraSpecs.from_group(group) if specs.is_group_replication_enabled: group_update, vols_in_group_update = failover_group(group) if group_update: group_update_list.append(group_update) volume_update_list.extend(vols_in_group_update) # Filter out the volumes in passed-in groups. group_ids = [group.id for group in groups] for volume in [volume for volume in volumes if volume.group_id not in group_ids]: specs = common.ExtraSpecs.from_volume(volume) if specs.is_replication_enabled: failover_volume(volume, rep_status) # After failover, the secondary is now the primary, # any subsequent request will be redirected to it. self.client = mirror_view.secondary_client # Remember the current backend id. self.active_backend_id = (None if secondary_backend_id == 'default' else secondary_backend_id) return secondary_backend_id, volume_update_list, group_update_list def enable_replication(self, context, group, volumes): """Enable the group replication. Note: this will not interfere with the replication on individual LUNs. """ self.create_group_replication(group) self.add_volumes_to_group_replication(group, volumes) return {}, [] def disable_replication(self, context, group, volumes): """Disable the group replication. Note: This will not disable the replication on individual LUNs. """ self.remove_volumes_from_group_replication(group, volumes) self.delete_group_replication(group) return {}, [] def failover_replication(self, context, group, volumes, secondary_backend_id): """"Fail-over the consistent mirror group. Note: VNX supports fail over all the mirrors in a group as a whole, no need to handle each mirror one by one. """ volume_update_list = [] group_update = {'replication_status': group.replication_status} if secondary_backend_id != 'default': mirror_view = self.build_mirror_view(self.config, True) rep_status = fields.ReplicationStatus.FAILED_OVER else: mirror_view = self.build_mirror_view(self.config, False) rep_status = fields.ReplicationStatus.ENABLED # Update volume provider_location secondary_client = mirror_view.secondary_client group_name = utils.construct_group_name(group) try: mirror_view.promote_mirror_group(group_name) except storops_ex.VNXMirrorException as ex: LOG.error( 'Failed to failover group %(group_id)s ' 'to %(target)s: %(error)s.', {'group_id': group.id, 'target': secondary_backend_id, 'error': ex}) rep_status = fields.ReplicationStatus.FAILOVER_ERROR for volume in volumes: volume_update = { 'id': volume.id, 'provider_location': utils.update_remote_provider_location( volume, secondary_client), 'replication_status': rep_status} volume_update_list.append(volume_update) group_update['replication_status'] = rep_status return group_update, volume_update_list def get_replication_error_status(self, context, groups): """The failover only happens manually, no need to update the status.""" return [], [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/vnx/taskflows.py0000664000175000017500000006023400000000000024273 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import importutils import taskflow.engines from taskflow.patterns import linear_flow from taskflow import task from taskflow.types import failure from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.dell_emc.vnx import const from cinder.volume.drivers.dell_emc.vnx import utils storops = importutils.try_import('storops') LOG = logging.getLogger(__name__) class MigrateLunTask(task.Task): """Starts a migration between two LUNs/SMPs. Reversion strategy: Cleanup the migration session """ def __init__(self, name=None, provides=None, inject=None, rebind=None): super(MigrateLunTask, self).__init__(name=name, provides=provides, inject=inject, rebind=rebind) def execute(self, client, src_id, dst_id, async_migrate, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) dst_lun = client.get_lun(lun_id=dst_id) dst_wwn = dst_lun.wwn client.migrate_lun(src_id, dst_id) if not async_migrate: migrated = client.verify_migration(src_id, dst_id, dst_wwn) if not migrated: msg = _("Failed to migrate volume between source vol %(src)s" " and dest vol %(dst)s.") % { 'src': src_id, 'dst': dst_id} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def revert(self, result, client, src_id, dst_id, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.warning('%(method)s: cleanup migration session: ' '%(src_id)s -> %(dst_id)s.', {'method': method_name, 'src_id': src_id, 'dst_id': dst_id}) client.cleanup_migration(src_id, dst_id) class CreateLunTask(task.Task): """Creates a new lun task. Reversion strategy: Delete the lun. """ def __init__(self, name=None, provides=('new_lun_id', 'new_lun_wwn'), inject=None): super(CreateLunTask, self).__init__(name=name, provides=provides, inject=inject) if provides and not isinstance(provides, tuple): raise ValueError('Only tuple is allowed for [provides].') def execute(self, client, pool_name, lun_name, lun_size, provision, tier, ignore_thresholds=False, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) lun = client.create_lun(pool=pool_name, name=lun_name, size=lun_size, provision=provision, tier=tier, ignore_thresholds=ignore_thresholds) return lun.lun_id, lun.wwn def revert(self, result, client, lun_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ if isinstance(result, failure.Failure): return else: LOG.warning('%(method_name)s: delete lun %(lun_name)s', {'method_name': method_name, 'lun_name': lun_name}) client.delete_lun(lun_name) class CopySnapshotTask(task.Task): """Task to copy a volume snapshot/consistency group snapshot. Reversion Strategy: Delete the copied snapshot/cgsnapshot """ def execute(self, client, snap_name, new_snap_name, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) client.copy_snapshot(snap_name, new_snap_name) def revert(self, result, client, snap_name, new_snap_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.warning('%(method_name)s: delete the ' 'copied snapshot %(new_name)s of ' '%(source_name)s.', {'method_name': method_name, 'new_name': new_snap_name, 'source_name': snap_name}) client.delete_snapshot(new_snap_name) class CreateSMPTask(task.Task): """Creates a snap mount point (SMP) for the source snapshot. Reversion strategy: Delete the SMP. """ def __init__(self, name=None, provides='smp_id', inject=None): super(CreateSMPTask, self).__init__(name=name, provides=provides, inject=inject) def execute(self, client, smp_name, base_lun_name, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) client.create_mount_point(base_lun_name, smp_name) lun = client.get_lun(name=smp_name) return lun.lun_id def revert(self, result, client, smp_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.warning('%(method_name)s: delete mount point %(name)s', {'method_name': method_name, 'name': smp_name}) client.delete_lun(smp_name) class AttachSnapTask(task.Task): """Attaches the snapshot to the SMP created before. Reversion strategy: Detach the SMP. """ def execute(self, client, smp_name, snap_name, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) client.attach_snapshot(smp_name, snap_name) def revert(self, result, client, smp_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.warning('%(method_name)s: detach mount point %(smp_name)s', {'method_name': method_name, 'smp_name': smp_name}) client.detach_snapshot(smp_name) class CreateSnapshotTask(task.Task): """Creates a snapshot of a volume. Reversion Strategy: Delete the created snapshot. """ def execute(self, client, snap_name, lun_id, keep_for=None, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) LOG.info('Create snapshot: %(snapshot)s: lun: %(lun)s', {'snapshot': snap_name, 'lun': lun_id}) client.create_snapshot(lun_id, snap_name, keep_for=keep_for) def revert(self, result, client, snap_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.warning('%(method_name)s: ' 'delete temp snapshot %(snap_name)s', {'method_name': method_name, 'snap_name': snap_name}) client.delete_snapshot(snap_name) class ModifySnapshotTask(task.Task): """Task to modify a Snapshot to allow ReadWrite on it.""" def execute(self, client, snap_name, keep_for=None, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) client.modify_snapshot(snap_name, allow_rw=True, keep_for=keep_for) def revert(self, result, client, snap_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.warning('%(method_name)s: ' 'setting snapshot %(snap_name)s to read-only.', {'method_name': method_name, 'snap_name': snap_name}) client.modify_snapshot(snap_name, allow_rw=False) class WaitMigrationsTask(task.Task): """Task to wait migrations to be completed.""" def __init__(self, src_id_template, dst_id_template, dst_wwn_template, num_of_members, *args, **kwargs): self.migrate_tuples = [ (src_id_template % x, dst_id_template % x, dst_wwn_template % x) for x in range(num_of_members)] src_id_keys = sorted(set( [src_id_template % i for i in range(num_of_members)])) dst_id_keys = sorted(set( [dst_id_template % i for i in range(num_of_members)])) dst_wwn_keys = sorted(set( [dst_wwn_template % i for i in range(num_of_members)])) super(WaitMigrationsTask, self).__init__( requires=(src_id_keys + dst_id_keys + dst_wwn_keys), *args, **kwargs) def execute(self, client, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) for src_id_key, dst_id_key, dst_wwn_key in self.migrate_tuples: src_id = kwargs[src_id_key] dst_id = kwargs[dst_id_key] dst_wwn = kwargs[dst_wwn_key] migrated = client.verify_migration(src_id, dst_id, dst_wwn) if not migrated: msg = _("Failed to migrate volume %(src)s.") % {'src': src_id} raise exception.VolumeBackendAPIException(data=msg) class CreateConsistencyGroupTask(task.Task): """Task to create a consistency group.""" def __init__(self, lun_id_key_template, num_of_members, *args, **kwargs): self.lun_id_keys = sorted(set( [lun_id_key_template % i for i in range(num_of_members)])) super(CreateConsistencyGroupTask, self).__init__( requires=self.lun_id_keys, *args, **kwargs) def execute(self, client, new_cg_name, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) lun_ids = [kwargs[key] for key in self.lun_id_keys] client.create_consistency_group(new_cg_name, lun_ids) class CreateCGSnapshotTask(task.Task): """Task to create a CG snapshot.""" def __init__(self, provides='new_cg_snap_name', *args, **kwargs): super(CreateCGSnapshotTask, self).__init__( provides=provides, *args, **kwargs) def execute(self, client, cg_snap_name, cg_name, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) return client.create_cg_snapshot(cg_snap_name, cg_name) def revert(self, client, cg_snap_name, cg_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.warning('%(method_name)s: ' 'deleting CG snapshot %(snap_name)s.', {'method_name': method_name, 'snap_name': cg_snap_name}) client.delete_cg_snapshot(cg_snap_name) class CreateMirrorTask(task.Task): """Creates a MirrorView with primary lun for replication. Reversion strategy: Destroy the created MirrorView. """ def execute(self, mirror, mirror_name, primary_lun_id, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) mirror.create_mirror(mirror_name, primary_lun_id) def revert(self, result, mirror, mirror_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.warning('%(method)s: removing mirror ' 'view %(name)s.', {'method': method_name, 'name': mirror_name}) mirror.delete_mirror(mirror_name) class AddMirrorImageTask(task.Task): """Add the secondary image to MirrorView. Reversion strategy: Remove the secondary image. """ def execute(self, mirror, mirror_name, secondary_lun_id, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) mirror.add_image(mirror_name, secondary_lun_id) def revert(self, result, mirror, mirror_name, *args, **kwargs): method_name = '%s.revert' % self.__class__.__name__ LOG.warning('%(method)s: removing secondary image ' 'from %(name)s.', {'method': method_name, 'name': mirror_name}) mirror.remove_image(mirror_name) class ExtendSMPTask(task.Task): """Extend the SMP if needed. If the SMP is thin and the new size is larger than the old one, then extend it. """ def execute(self, client, smp_name, lun_size, *args, **kwargs): LOG.debug('%s.execute', self.__class__.__name__) smp = client.get_lun(name=smp_name) if lun_size > smp.total_capacity_gb: if smp.primary_lun.is_thin_lun: client.expand_lun(smp_name, lun_size) else: LOG.warning('Not extending the SMP: %s, because its base lun ' 'is not thin.', smp_name) else: LOG.info('Not extending the SMP: %(smp)s, size: %(size)s, because ' 'the new size: %(new_size)s is smaller.', {'smp': smp_name, 'size': smp.total_capacity_gb, 'new_size': lun_size}) def run_migration_taskflow(client, lun_id, lun_name, lun_size, pool_name, provision, tier, rate=const.MIGRATION_RATE_HIGH): # Step 1: create target LUN # Step 2: start and migrate migration session tmp_lun_name = utils.construct_tmp_lun_name(lun_name) flow_name = 'migrate_lun' store_spec = {'client': client, 'pool_name': pool_name, 'lun_name': tmp_lun_name, 'lun_size': lun_size, 'provision': provision, 'tier': tier, 'ignore_thresholds': True, 'src_id': lun_id, 'async_migrate': False, } work_flow = linear_flow.Flow(flow_name) work_flow.add(CreateLunTask(), MigrateLunTask(rebind={'dst_id': 'new_lun_id'})) engine = taskflow.engines.load( work_flow, store=store_spec) engine.run() def fast_create_volume_from_snapshot(client, snap_name, new_snap_name, lun_name, base_lun_name, pool_name): # Step 1: copy snapshot # Step 2: allow read/write for snapshot # Step 3: create smp LUN # Step 4: attach the snapshot flow_name = 'create_snapcopy_volume_from_snapshot' store_spec = {'client': client, 'snap_name': snap_name, 'new_snap_name': new_snap_name, 'pool_name': pool_name, 'smp_name': lun_name, 'base_lun_name': base_lun_name, 'ignore_thresholds': True, } work_flow = linear_flow.Flow(flow_name) work_flow.add(CopySnapshotTask(), ModifySnapshotTask(rebind={'snap_name': 'new_snap_name'}), CreateSMPTask(), AttachSnapTask(rebind={'snap_name': 'new_snap_name'})) engine = taskflow.engines.load( work_flow, store=store_spec) engine.run() lun_id = engine.storage.fetch('smp_id') return lun_id def create_volume_from_snapshot(client, src_snap_name, lun_name, lun_size, base_lun_name, pool_name, provision, tier, new_snap_name=None): # Step 1: Copy and modify snap(only for async migrate) # Step 2: Create smp from base lun # Step 3: Attach snapshot to smp # Step 4: Create new LUN # Step 5: migrate the smp to new LUN tmp_lun_name = '%s_dest' % lun_name flow_name = 'create_volume_from_snapshot' store_spec = {'client': client, 'snap_name': src_snap_name, 'new_snap_name': new_snap_name, 'smp_name': lun_name, 'lun_name': tmp_lun_name, 'lun_size': lun_size, 'base_lun_name': base_lun_name, 'pool_name': pool_name, 'provision': provision, 'tier': tier, 'keep_for': (common.SNAP_EXPIRATION_HOUR if new_snap_name else None), 'async_migrate': True if new_snap_name else False, } work_flow = linear_flow.Flow(flow_name) if new_snap_name: work_flow.add(CopySnapshotTask(), ModifySnapshotTask( rebind={'snap_name': 'new_snap_name'})) work_flow.add(CreateSMPTask(), AttachSnapTask(rebind={'snap_name': 'new_snap_name'}) if new_snap_name else AttachSnapTask(), ExtendSMPTask(), CreateLunTask(), MigrateLunTask( rebind={'src_id': 'smp_id', 'dst_id': 'new_lun_id'})) engine = taskflow.engines.load( work_flow, store=store_spec) engine.run() lun_id = engine.storage.fetch('smp_id') return lun_id def fast_create_cloned_volume(client, snap_name, lun_id, lun_name, base_lun_name): flow_name = 'create_cloned_snapcopy_volume' store_spec = { 'client': client, 'snap_name': snap_name, 'lun_id': lun_id, 'smp_name': lun_name, 'base_lun_name': base_lun_name} work_flow = linear_flow.Flow(flow_name) work_flow.add(CreateSnapshotTask(), CreateSMPTask(), AttachSnapTask()) engine = taskflow.engines.load(work_flow, store=store_spec) engine.run() lun_id = engine.storage.fetch('smp_id') return lun_id def create_cloned_volume(client, snap_name, lun_id, lun_name, lun_size, base_lun_name, pool_name, provision, tier, async_migrate=False): tmp_lun_name = '%s_dest' % lun_name flow_name = 'create_cloned_volume' store_spec = {'client': client, 'snap_name': snap_name, 'lun_id': lun_id, 'smp_name': lun_name, 'lun_name': tmp_lun_name, 'lun_size': lun_size, 'base_lun_name': base_lun_name, 'pool_name': pool_name, 'provision': provision, 'tier': tier, 'keep_for': (common.SNAP_EXPIRATION_HOUR if async_migrate else None), 'async_migrate': async_migrate, } work_flow = linear_flow.Flow(flow_name) work_flow.add( CreateSnapshotTask(), CreateSMPTask(), AttachSnapTask(), ExtendSMPTask(), CreateLunTask(), MigrateLunTask( rebind={'src_id': 'smp_id', 'dst_id': 'new_lun_id'})) engine = taskflow.engines.load( work_flow, store=store_spec) engine.run() if not async_migrate: client.delete_snapshot(snap_name) lun_id = engine.storage.fetch('smp_id') return lun_id def create_cg_from_cg_snapshot(client, cg_name, src_cg_name, cg_snap_name, src_cg_snap_name, pool_name, lun_sizes, lun_names, src_lun_names, specs_list, copy_snap=True): prepare_tasks = [] store_spec = {} if copy_snap: flow_name = 'create_cg_from_cg_snapshot' temp_cg_snap = utils.construct_tmp_cg_snap_name(cg_name) snap_name = temp_cg_snap store_spec.update({'snap_name': src_cg_snap_name, 'new_snap_name': snap_name}) prepare_tasks.append( CopySnapshotTask()) prepare_tasks.append( ModifySnapshotTask(rebind={'snap_name': 'new_snap_name'})) else: flow_name = 'create_cg_from_cg' snap_name = cg_snap_name store_spec.update({'cg_name': src_cg_name, 'cg_snap_name': snap_name}) prepare_tasks.append(CreateCGSnapshotTask()) work_flow = linear_flow.Flow(flow_name) work_flow.add(*prepare_tasks) new_src_id_template = 'new_src_id_%s' new_dst_id_template = 'new_dst_id_%s' new_dst_wwn_template = 'new_dst_wwn_%s' common_store_spec = { 'client': client, 'pool_name': pool_name, 'ignore_thresholds': True, 'new_cg_name': cg_name } store_spec.update(common_store_spec) # Create LUNs for CG for i, lun_name in enumerate(lun_names): sub_store_spec = { 'lun_name': utils.construct_tmp_lun_name(lun_name), 'lun_size': lun_sizes[i], 'provision': specs_list[i].provision, 'tier': specs_list[i].tier, 'base_lun_name': src_lun_names[i], 'smp_name': lun_name, 'snap_name': snap_name, 'async_migrate': True, } work_flow.add(CreateSMPTask(name="CreateSMPTask_%s" % i, inject=sub_store_spec, provides=new_src_id_template % i), AttachSnapTask(name="AttachSnapTask_%s" % i, inject=sub_store_spec), CreateLunTask(name="CreateLunTask_%s" % i, inject=sub_store_spec, provides=(new_dst_id_template % i, new_dst_wwn_template % i)), MigrateLunTask( name="MigrateLunTask_%s" % i, inject=sub_store_spec, rebind={'src_id': new_src_id_template % i, 'dst_id': new_dst_id_template % i})) # Wait all migration session finished work_flow.add(WaitMigrationsTask(new_src_id_template, new_dst_id_template, new_dst_wwn_template, len(lun_names)), CreateConsistencyGroupTask(new_src_id_template, len(lun_names))) engine = taskflow.engines.load(work_flow, store=store_spec) engine.run() # Fetch all created LUNs and add them into CG lun_id_list = [] for i, lun_name in enumerate(lun_names): lun_id = engine.storage.fetch(new_src_id_template % i) lun_id_list.append(lun_id) client.delete_cg_snapshot(snap_name) return lun_id_list def create_cloned_cg(client, cg_name, src_cg_name, pool_name, lun_sizes, lun_names, src_lun_names, specs_list): cg_snap_name = utils.construct_tmp_cg_snap_name(cg_name) return create_cg_from_cg_snapshot( client, cg_name, src_cg_name, cg_snap_name, None, pool_name, lun_sizes, lun_names, src_lun_names, specs_list, copy_snap=False) def create_mirror_view(mirror_view, mirror_name, primary_lun_id, pool_name, lun_name, lun_size, provision, tier): flow_name = 'create_mirror_view' store_specs = { 'mirror': mirror_view, 'mirror_name': mirror_name, 'primary_lun_id': primary_lun_id, 'pool_name': pool_name, 'lun_name': lun_name, 'lun_size': lun_size, 'provision': provision, 'tier': tier, 'ignore_thresholds': True } # NOTE: should create LUN on secondary device/array work_flow = linear_flow.Flow(flow_name) work_flow.add(CreateMirrorTask(), CreateLunTask( name='CreateSecondaryLunTask', provides=('secondary_lun_id', 'secondary_lun_wwn'), inject={'client': mirror_view.secondary_client}), AddMirrorImageTask()) engine = taskflow.engines.load(work_flow, store=store_specs) engine.run() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/vnx/utils.py0000664000175000017500000003745700000000000023431 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import time from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import uuidutils from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.volume.drivers.dell_emc.vnx import common from cinder.volume.drivers.san.san import san_opts from cinder.volume import volume_types from cinder.volume import volume_utils storops = importutils.try_import('storops') storops = importutils.try_import('storops') LOG = logging.getLogger(__name__) def init_ops(configuration): configuration.append_config_values(common.VNX_OPTS) configuration.append_config_values(san_opts) def get_metadata(volume): # Since versionedobjects is partially merged, metadata # may come from 'volume_metadata' or 'metadata', here # we need to take care both of them. volume_metadata = {} if 'volume_metadata' in volume: for metadata in volume['volume_metadata']: volume_metadata[metadata['key']] = metadata['value'] return volume_metadata return volume['metadata'] if 'metadata' in volume else {} def dump_provider_location(location_dict): return '|'.join([k + '^' + v for k, v in location_dict.items()]) def build_provider_location(system, lun_type, lun_id, base_lun_name, version): """Builds provider_location for volume or snapshot. :param system: VNX serial number :param lun_id: LUN ID in VNX :param lun_type: 'lun' or 'smp' :param base_lun_name: primary LUN name, it will be used when creating snap lun :param version: driver version """ location_dict = {'system': system, 'type': lun_type, 'id': str(lun_id), 'base_lun_name': str(base_lun_name), 'version': version} return dump_provider_location(location_dict) def extract_provider_location(provider_location, key): """Extracts value of the specified field from provider_location string. :param provider_location: provider_location string :param key: field name of the value that to be extracted :return: value of the specified field if it exists, otherwise, None is returned """ if not provider_location: return None kvps = provider_location.split('|') for kvp in kvps: fields = kvp.split('^') if len(fields) == 2 and fields[0] == key: return fields[1] def update_provider_location(provider_location, items): """Updates provider_location with new dict items. :param provider_location: volume's provider_location. :param items: dict items for updating. """ location_dict = {tp.split('^')[0]: tp.split('^')[1] for tp in provider_location.split('|')} for key, value in items.items(): location_dict[key] = value return dump_provider_location(location_dict) def update_remote_provider_location(volume, client): """Update volume provider_location after volume failed-over.""" provider_location = volume.provider_location updated = {} updated['system'] = client.get_serial() updated['id'] = str( client.get_lun(name=volume.name).lun_id) provider_location = update_provider_location( provider_location, updated) return provider_location def get_pool_from_host(host): return volume_utils.extract_host(host, 'pool') def wait_until(condition, timeout=None, interval=common.INTERVAL_5_SEC, reraise_arbiter=lambda ex: True, *args, **kwargs): start_time = time.time() if not timeout: timeout = common.DEFAULT_TIMEOUT def _inner(): try: test_value = condition(*args, **kwargs) except Exception as ex: test_value = False with excutils.save_and_reraise_exception( reraise=reraise_arbiter(ex)): LOG.debug('Exception raised when executing %(condition_name)s ' 'in wait_until. Message: %(msg)s', {'condition_name': condition.__name__, 'msg': ex.message}) if test_value: raise loopingcall.LoopingCallDone() if int(time.time()) - start_time > timeout: msg = (_('Timeout waiting for %(condition_name)s in wait_until.') % {'condition_name': condition.__name__}) LOG.error(msg) raise common.WaitUtilTimeoutException(msg) timer = loopingcall.FixedIntervalLoopingCall(_inner) timer.start(interval=interval).wait() def validate_storage_migration(volume, target_host, src_serial, src_protocol): if 'location_info' not in target_host['capabilities']: LOG.warning("Failed to get pool name and " "serial number. 'location_info' " "from %s.", target_host['host']) return False info = target_host['capabilities']['location_info'] LOG.debug("Host for migration is %s.", info) try: serial_number = info.split('|')[1] except AttributeError: LOG.warning('Error on getting serial number ' 'from %s.', target_host['host']) return False if serial_number != src_serial: LOG.debug('Skip storage-assisted migration because ' 'target and source backend are not managing ' 'the same array.') return False if (target_host['capabilities']['storage_protocol'] != src_protocol and get_original_status(volume) == 'in-use'): LOG.debug('Skip storage-assisted migration because ' 'in-use volume can not be ' 'migrate between different protocols.') return False return True def retype_need_migration(volume, old_provision, new_provision, host): if volume['host'] != host['host']: return True lun_type = extract_provider_location(volume['provider_location'], 'type') if lun_type == 'smp': return True if old_provision != new_provision: if retype_need_turn_on_compression(old_provision, new_provision): return False else: return True return False def retype_need_turn_on_compression(old_provision, new_provision): return (old_provision in [storops.VNXProvisionEnum.THIN, storops.VNXProvisionEnum.THICK] and new_provision == storops.VNXProvisionEnum.COMPRESSED) def retype_need_change_tier(old_tier, new_tier): return new_tier is not None and old_tier != new_tier def get_original_status(volume): if not volume['volume_attachment']: return 'available' else: return 'in-use' def construct_snap_name(volume): """Return snapshot name.""" if is_snapcopy_enabled(volume): return 'snap-as-vol-' + str(volume.name_id) else: return 'tmp-snap-' + str(volume.name_id) def construct_mirror_name(volume): """Constructs MirrorView name for volume.""" return 'mirror_' + str(volume.id) def construct_group_name(group): """Constructs MirrorGroup name for volumes. VNX only allows for 32-character group name, so trim the dash(-) from group id. """ return group.id.replace('-', '') def construct_tmp_cg_snap_name(cg_name): """Return CG snapshot name.""" return 'tmp-snap-' + str(cg_name) def construct_tmp_lun_name(lun_name): """Constructs a time-based temporary LUN name.""" return '%(src)s-%(ts)s' % {'src': lun_name, 'ts': int(time.time())} def construct_smp_name(snap_id): return 'tmp-smp-' + str(snap_id) def is_snapcopy_enabled(volume): meta = get_metadata(volume) return 'snapcopy' in meta and meta['snapcopy'].lower() == 'true' def is_async_migrate_enabled(volume, default=True): extra_specs = common.ExtraSpecs.from_volume(volume) if extra_specs.is_replication_enabled: # For replication-enabled volume, we should not use the async-cloned # volume, or setup replication would fail with # VNXMirrorLunNotAvailableError return False meta = get_metadata(volume) if 'async_migrate' not in meta: return default return 'async_migrate' in meta and meta['async_migrate'].lower() == 'true' def get_migration_rate(volume): metadata = get_metadata(volume) rate = metadata.get('migrate_rate', None) if rate: if rate.lower() in storops.VNXMigrationRate.values(): return storops.VNXMigrationRate.parse(rate.lower()) else: LOG.warning('Unknown migration rate specified, ' 'using [high] as migration rate.') return storops.VNXMigrationRate.HIGH def check_type_matched(volume): """Check volume type and group type This will make sure they do not conflict with each other. :param volume: volume to be checked :returns: None :raises: InvalidInput """ # If volume is not a member of group, skip this check anyway. if not volume.group: return extra_specs = common.ExtraSpecs.from_volume(volume) group_specs = common.ExtraSpecs.from_group(volume.group) if not (group_specs.is_group_replication_enabled == extra_specs.is_replication_enabled): msg = _('Replication should be enabled or disabled for both ' 'volume or group. volume replication status: %(vol_status)s, ' 'group replication status: %(group_status)s') % { 'vol_status': extra_specs.is_replication_enabled, 'group_status': group_specs.is_group_replication_enabled} raise exception.InvalidInput(reason=msg) def check_rep_status_matched(group): """Check replication status for group. Group status must be enabled before proceeding. """ group_specs = common.ExtraSpecs.from_group(group) if group_specs.is_group_replication_enabled: if group.replication_status != fields.ReplicationStatus.ENABLED: msg = _('Replication status should be %s for replication-enabled ' 'group.') % fields.ReplicationStatus.ENABLED raise exception.InvalidInput(reason=msg) else: LOG.info('Replication is not enabled on group %s, skip status check.', group.id) def update_res_without_poll(res): with res.with_no_poll(): res.update() def update_res_with_poll(res): with res.with_poll(): res.update() def get_base_lun_name(volume): """Returns base LUN name for LUN/snapcopy LUN.""" base_name = extract_provider_location( volume.provider_location, 'base_lun_name') if base_name is None or base_name == 'None': return volume.name return base_name def sift_port_white_list(port_white_list, registered_io_ports): """Filters out the unregistered ports. Goes through the `port_white_list`, and filters out the ones not registered (that is not in `registered_io_ports`). """ valid_port_list = [] LOG.debug('Filter ports in [%(white)s}] but not in [%(reg_ports)s].', {'white': ','.join( [port.display_name for port in port_white_list]), 'reg_ports': ','.join( [port.display_name for port in registered_io_ports])}) for io_port in port_white_list: if io_port not in registered_io_ports: LOG.debug('Skipped SP port %(port)s due to it is not registered. ' 'The registered IO ports: %(reg_ports)s.', {'port': io_port, 'reg_ports': registered_io_ports}) else: valid_port_list.append(io_port) return valid_port_list def convert_to_tgt_list_and_itor_tgt_map(zone_mapping): """Function to process data from lookup service. :param zone_mapping: mapping is the data from the zone lookup service with below format .. code:: python { : { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'..) 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121'..) } } """ target_wwns = [] itor_tgt_map = {} for san_name in zone_mapping: one_map = zone_mapping[san_name] for target in one_map['target_port_wwn_list']: if target not in target_wwns: target_wwns.append(target) for initiator in one_map['initiator_port_wwn_list']: itor_tgt_map[initiator] = one_map['target_port_wwn_list'] LOG.debug("target_wwns: %(tgt_wwns)s\n init_targ_map: %(itor_tgt_map)s", {'tgt_wwns': target_wwns, 'itor_tgt_map': itor_tgt_map}) return target_wwns, itor_tgt_map def truncate_fc_port_wwn(wwn): return wwn.replace(':', '')[16:] def is_volume_smp(volume): return 'smp' == extract_provider_location(volume.provider_location, 'type') def require_consistent_group_snapshot_enabled(func): @functools.wraps(func) def inner(self, *args, **kwargs): if not volume_utils.is_group_a_cg_snapshot_type(args[1]): raise NotImplementedError return func(self, *args, **kwargs) return inner def get_remote_pool(config, volume): """Select remote pool name for replication. Prefer configured remote pool name, or same pool name as the source volume. """ pool_name = get_pool_from_host(volume.host) rep_list = common.ReplicationDeviceList(config) remote_pool_name = rep_list[0].pool_name return remote_pool_name if remote_pool_name else pool_name def is_image_cache_volume(volume): display_name = volume.display_name if (display_name.startswith('image-') and uuidutils.is_uuid_like(display_name[6:])): LOG.debug('Volume: %s is for image cache. Use sync migration and ' 'thin provisioning.', volume.name) return True return False def calc_migrate_and_provision(volume, default_async_migrate=True): """Returns a tuple of async migrate and provision type. The first element is the flag whether to enable async migrate, the second is the provision type (thin or thick). """ if is_image_cache_volume(volume): return False, storops.VNXProvisionEnum.THIN else: specs = common.ExtraSpecs.from_volume(volume) return (is_async_migrate_enabled(volume, default_async_migrate), specs.provision) def get_backend_qos_specs(volume): type_id = volume.volume_type_id if type_id is None: return None # Use the provided interface to avoid permission issue qos_specs = volume_types.get_volume_type_qos_specs(type_id) if qos_specs is None: return None qos_specs = qos_specs['qos_specs'] if qos_specs is None: return None consumer = qos_specs['consumer'] # Front end QoS specs are handled by nova. We ignore them here. if consumer not in common.BACKEND_QOS_CONSUMERS: return None max_iops = qos_specs['specs'].get(common.QOS_MAX_IOPS) max_bws = qos_specs['specs'].get(common.QOS_MAX_BWS) if max_iops is None and max_bws is None: return None return { 'id': qos_specs['id'], common.QOS_MAX_IOPS: max_iops, common.QOS_MAX_BWS: max_bws, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/dell_emc/xtremio.py0000664000175000017500000015577600000000000023152 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Dell Inc. or its subsidiaries. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Driver for Dell EMC XtremIO Storage. Supports XtremIO version 2.4 and up. .. code-block:: none 1.0.0 - initial release 1.0.1 - enable volume extend 1.0.2 - added FC support, improved error handling 1.0.3 - update logging level, add translation 1.0.4 - support for FC zones 1.0.5 - add support for XtremIO 4.0 1.0.6 - add support for iSCSI multipath, CA validation, consistency groups, R/O snapshots, CHAP discovery authentication 1.0.7 - cache glance images on the array 1.0.8 - support for volume retype, CG fixes 1.0.9 - performance improvements, support force detach, support for X2 1.0.10 - option to clean unused IGs 1.0.11 - add support for multiattach 1.0.12 - add support for ports filtering 1.0.13 - add support for iSCSI IPv6 """ import http.client as http_client import json import math import random import string from oslo_config import cfg from oslo_log import log as logging from oslo_utils import netutils from oslo_utils import strutils from oslo_utils import units import requests from cinder.common import constants from cinder import context from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF XTREMIO_OPTS = [ cfg.StrOpt('xtremio_cluster_name', default='', help='XMS cluster id in multi-cluster environment'), cfg.IntOpt('xtremio_array_busy_retry_count', default=5, help='Number of retries in case array is busy'), cfg.IntOpt('xtremio_array_busy_retry_interval', default=5, help='Interval between retries in case array is busy'), cfg.IntOpt('xtremio_volumes_per_glance_cache', default=100, help='Number of volumes created from each cached glance image'), cfg.BoolOpt('xtremio_clean_unused_ig', default=False, help='Should the driver remove initiator groups with no ' 'volumes after the last connection was terminated. ' 'Since the behavior till now was to leave ' 'the IG be, we default to False (not deleting IGs ' 'without connected volumes); setting this parameter ' 'to True will remove any IG after terminating its ' 'connection to the last volume.'), cfg.ListOpt('xtremio_ports', default=[], help='Allowed ports. Comma separated list of XtremIO ' 'iSCSI IPs or FC WWNs (ex. 58:cc:f0:98:49:22:07:02) ' 'to be used. If option is not set all ports are allowed.') ] CONF.register_opts(XTREMIO_OPTS, group=configuration.SHARED_CONF_GROUP) RANDOM = random.Random() OBJ_NOT_FOUND_ERR = 'obj_not_found' VOL_NOT_UNIQUE_ERR = 'vol_obj_name_not_unique' VOL_OBJ_NOT_FOUND_ERR = 'vol_obj_not_found' ALREADY_MAPPED_ERR = 'already_mapped' SYSTEM_BUSY = 'system_is_busy' TOO_MANY_OBJECTS = 'too_many_objs' TOO_MANY_SNAPSHOTS_PER_VOL = 'too_many_snapshots_per_vol' XTREMIO_OID_NAME = 1 XTREMIO_OID_INDEX = 2 class XtremIOAlreadyMappedError(exception.VolumeDriverException): message = _("Volume to Initiator Group mapping already exists") class XtremIOArrayBusy(exception.VolumeDriverException): message = _("System is busy, retry operation.") class XtremIOSnapshotsLimitExceeded(exception.VolumeDriverException): message = _("Exceeded the limit of snapshots per volume") class XtremIOClient(object): def __init__(self, configuration, cluster_id): self.configuration = configuration self.cluster_id = cluster_id self.verify = (self.configuration. safe_get('driver_ssl_cert_verify') or False) if self.verify: verify_path = (self.configuration. safe_get('driver_ssl_cert_path') or None) if verify_path: self.verify = verify_path def get_base_url(self, ver): if ver == 'v1': return 'https://%s/api/json/types' % self.configuration.san_ip elif ver == 'v2': return 'https://%s/api/json/v2/types' % self.configuration.san_ip def req(self, object_type='volumes', method='GET', data=None, name=None, idx=None, ver='v1'): @utils.retry(XtremIOArrayBusy, self.configuration.xtremio_array_busy_retry_count, self.configuration.xtremio_array_busy_retry_interval, 1) def _do_req(object_type, method, data, name, idx, ver): if not data: data = {} if name and idx: msg = _("can't handle both name and index in req") LOG.error(msg) raise exception.VolumeDriverException(message=msg) url = '%s/%s' % (self.get_base_url(ver), object_type) params = {} key = None if name: params['name'] = name key = name elif idx: url = '%s/%d' % (url, idx) key = str(idx) if method in ('GET', 'DELETE'): params.update(data) self.update_url(params, self.cluster_id) if method != 'GET': self.update_data(data, self.cluster_id) # data may include chap password LOG.debug('data: %s', strutils.mask_password(data)) LOG.debug('%(type)s %(url)s', {'type': method, 'url': url}) try: response = requests.request( method, url, params=params, data=json.dumps(data), verify=self.verify, auth=(self.configuration.san_login, self.configuration.san_password)) except requests.exceptions.RequestException as exc: msg = (_('Exception: %s') % str(exc)) raise exception.VolumeDriverException(message=msg) if (http_client.OK <= response.status_code < http_client.MULTIPLE_CHOICES): if method in ('GET', 'POST'): return response.json() else: return '' self.handle_errors(response, key, object_type) return _do_req(object_type, method, data, name, idx, ver) def handle_errors(self, response, key, object_type): if response.status_code == http_client.BAD_REQUEST: error = response.json() err_msg = error.get('message') if err_msg.endswith(OBJ_NOT_FOUND_ERR): LOG.warning("object %(key)s of " "type %(typ)s not found, %(err_msg)s", {'key': key, 'typ': object_type, 'err_msg': err_msg, }) raise exception.NotFound() elif err_msg == VOL_NOT_UNIQUE_ERR: LOG.error("can't create 2 volumes with the same name, %s", err_msg) msg = _('Volume by this name already exists') raise exception.VolumeBackendAPIException(data=msg) elif err_msg == VOL_OBJ_NOT_FOUND_ERR: LOG.error("Can't find volume to map %(key)s, %(msg)s", {'key': key, 'msg': err_msg, }) raise exception.VolumeNotFound(volume_id=key) elif ALREADY_MAPPED_ERR in err_msg: raise XtremIOAlreadyMappedError() elif err_msg == SYSTEM_BUSY: raise XtremIOArrayBusy() elif err_msg in (TOO_MANY_OBJECTS, TOO_MANY_SNAPSHOTS_PER_VOL): raise XtremIOSnapshotsLimitExceeded() msg = _('Bad response from XMS, %s') % response.text LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) def update_url(self, data, cluster_id): return def update_data(self, data, cluster_id): return def get_cluster(self): return self.req('clusters', idx=1)['content'] def create_snapshot(self, src, dest, ro=False): """Create a snapshot of a volume on the array. XtreamIO array snapshots are also volumes. :src: name of the source volume to be cloned :dest: name for the new snapshot :ro: new snapshot type ro/regular. only applicable to Client4 """ raise NotImplementedError() def get_extra_capabilities(self): return {} def get_initiator(self, port_address): raise NotImplementedError() def add_vol_to_cg(self, vol_id, cg_id): pass def get_initiators_igs(self, port_addresses): ig_indexes = set() for port_address in port_addresses: initiator = self.get_initiator(port_address) ig_indexes.add(initiator['ig-id'][XTREMIO_OID_INDEX]) return list(ig_indexes) def get_fc_up_ports(self): targets = [self.req('targets', name=target['name'])['content'] for target in self.req('targets')['targets']] return [target for target in targets if target['port-type'] == 'fc' and target["port-state"] == 'up'] class XtremIOClient3(XtremIOClient): def __init__(self, configuration, cluster_id): super(XtremIOClient3, self).__init__(configuration, cluster_id) self._portals = [] def find_lunmap(self, ig_name, vol_name): try: lun_mappings = self.req('lun-maps')['lun-maps'] except exception.NotFound: raise (exception.VolumeDriverException (_("can't find lun-map, ig:%(ig)s vol:%(vol)s") % {'ig': ig_name, 'vol': vol_name})) for lm_link in lun_mappings: idx = lm_link['href'].split('/')[-1] # NOTE(geguileo): There can be races so mapped elements retrieved # in the listing may no longer exist. try: lm = self.req('lun-maps', idx=int(idx))['content'] except exception.NotFound: continue if lm['ig-name'] == ig_name and lm['vol-name'] == vol_name: return lm return None def num_of_mapped_volumes(self, initiator): cnt = 0 for lm_link in self.req('lun-maps')['lun-maps']: idx = lm_link['href'].split('/')[-1] # NOTE(geguileo): There can be races so mapped elements retrieved # in the listing may no longer exist. try: lm = self.req('lun-maps', idx=int(idx))['content'] except exception.NotFound: continue if lm['ig-name'] == initiator: cnt += 1 return cnt def get_iscsi_portals(self): if self._portals: return self._portals iscsi_portals = [t['name'] for t in self.req('iscsi-portals') ['iscsi-portals']] for portal_name in iscsi_portals: try: self._portals.append(self.req('iscsi-portals', name=portal_name)['content']) except exception.NotFound: raise (exception.VolumeBackendAPIException (data=_("iscsi portal, %s, not found") % portal_name)) return self._portals def create_snapshot(self, src, dest, ro=False): data = {'snap-vol-name': dest, 'ancestor-vol-id': src} self.req('snapshots', 'POST', data) def get_initiator(self, port_address): try: return self.req('initiators', 'GET', name=port_address)['content'] except exception.NotFound: pass class XtremIOClient4(XtremIOClient): def __init__(self, configuration, cluster_id): super(XtremIOClient4, self).__init__(configuration, cluster_id) self._cluster_name = None def req(self, object_type='volumes', method='GET', data=None, name=None, idx=None, ver='v2'): return super(XtremIOClient4, self).req(object_type, method, data, name, idx, ver) def get_extra_capabilities(self): return {'consistencygroup_support': True} def find_lunmap(self, ig_name, vol_name): try: return (self.req('lun-maps', data={'full': 1, 'filter': ['vol-name:eq:%s' % vol_name, 'ig-name:eq:%s' % ig_name]}) ['lun-maps'][0]) except (KeyError, IndexError): raise exception.VolumeNotFound(volume_id=vol_name) def num_of_mapped_volumes(self, initiator): return len(self.req('lun-maps', data={'filter': 'ig-name:eq:%s' % initiator}) ['lun-maps']) def update_url(self, data, cluster_id): if cluster_id: data['cluster-name'] = cluster_id def update_data(self, data, cluster_id): if cluster_id: data['cluster-id'] = cluster_id def get_iscsi_portals(self): return self.req('iscsi-portals', data={'full': 1})['iscsi-portals'] def get_cluster(self): if not self.cluster_id: self.cluster_id = self.req('clusters')['clusters'][0]['name'] return self.req('clusters', name=self.cluster_id)['content'] def create_snapshot(self, src, dest, ro=False): data = {'snapshot-set-name': dest, 'snap-suffix': dest, 'volume-list': [src], 'snapshot-type': 'readonly' if ro else 'regular'} res = self.req('snapshots', 'POST', data, ver='v2') typ, idx = res['links'][0]['href'].split('/')[-2:] # rename the snapshot data = {'name': dest} try: self.req(typ, 'PUT', data, idx=int(idx)) except exception.VolumeBackendAPIException: # reverting LOG.error('Failed to rename the created snapshot, reverting.') self.req(typ, 'DELETE', idx=int(idx)) raise def add_vol_to_cg(self, vol_id, cg_id): add_data = {'vol-id': vol_id, 'cg-id': cg_id} self.req('consistency-group-volumes', 'POST', add_data, ver='v2') def get_initiator(self, port_address): inits = self.req('initiators', data={'filter': 'port-address:eq:' + port_address, 'full': 1})['initiators'] if len(inits) == 1: return inits[0] else: pass def get_fc_up_ports(self): return self.req('targets', data={'full': 1, 'filter': ['port-type:eq:fc', 'port-state:eq:up'], 'prop': 'port-address'})["targets"] class XtremIOClient42(XtremIOClient4): def get_initiators_igs(self, port_addresses): init_filter = ','.join('port-address:eq:{}'.format(port_address) for port_address in port_addresses) initiators = self.req('initiators', data={'filter': init_filter, 'full': 1, 'prop': 'ig-id'})['initiators'] return list(set(ig_id['ig-id'][XTREMIO_OID_INDEX] for ig_id in initiators)) class XtremIOVolumeDriver(san.SanDriver): """Executes commands relating to Volumes.""" VERSION = '1.0.13' # ThirdPartySystems wiki CI_WIKI_NAME = "DellEMC_XtremIO_CI" driver_name = 'XtremIO' MIN_XMS_VERSION = [3, 0, 0] SUPPORTED = False def __init__(self, *args, **kwargs): super(XtremIOVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(XTREMIO_OPTS) self.protocol = None self.backend_name = (self.configuration.safe_get('volume_backend_name') or self.driver_name) self.cluster_id = (self.configuration.safe_get('xtremio_cluster_name') or '') self.provisioning_factor = \ volume_utils.get_max_over_subscription_ratio( self.configuration.max_over_subscription_ratio, supports_auto=False) self.clean_ig = (self.configuration.safe_get('xtremio_clean_unused_ig') or False) self._stats = {} self.allowed_ports = [ port.strip().lower() for port in self.configuration.safe_get('xtremio_ports') ] self.client = XtremIOClient3(self.configuration, self.cluster_id) @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'driver_ssl_cert_verify', 'driver_ssl_cert_path', 'max_over_subscription_ratio', 'reserved_percentage') return XTREMIO_OPTS + additional_opts def _obj_from_result(self, res): typ, idx = res['links'][0]['href'].split('/')[-2:] return self.client.req(typ, idx=int(idx))['content'] def check_for_setup_error(self): try: name = self.client.req('clusters')['clusters'][0]['name'] cluster = self.client.req('clusters', name=name)['content'] version_text = cluster['sys-sw-version'] except exception.NotFound: msg = _("XtremIO not initialized correctly, no clusters found") raise (exception.VolumeBackendAPIException (data=msg)) ver = [int(n) for n in version_text.split('-')[0].split('.')] if ver < self.MIN_XMS_VERSION: msg = (_('Invalid XtremIO version %(cur)s,' ' version %(min)s or up is required') % {'min': self.MIN_XMS_VERSION, 'cur': ver}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: LOG.info('XtremIO Cluster version %s', version_text) client_ver = '3' if ver[0] >= 4: # get XMS version xms = self.client.req('xms', idx=1)['content'] xms_version = tuple([int(i) for i in xms['sw-version'].split('-')[0].split('.')]) LOG.info('XtremIO XMS version %s', version_text) if xms_version >= (4, 2): self.client = XtremIOClient42(self.configuration, self.cluster_id) client_ver = '4.2' else: self.client = XtremIOClient4(self.configuration, self.cluster_id) client_ver = '4' LOG.info('Using XtremIO Client %s', client_ver) def create_volume(self, volume): """Creates a volume.""" data = {'vol-name': volume['id'], 'vol-size': str(volume['size']) + 'g' } self.client.req('volumes', 'POST', data) # Add the volume to a cg in case volume requested a cgid or group_id. # If both cg_id and group_id exists in a volume. group_id will take # place. consistency_group = volume.get('consistencygroup_id') # if cg_id and group_id are both exists, we gives priority to group_id. if volume.get('group_id'): consistency_group = volume.get('group_id') if consistency_group: self.client.add_vol_to_cg(volume['id'], consistency_group) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" if snapshot.get('cgsnapshot_id'): # get array snapshot id from CG snapshot snap_by_anc = self._get_snapset_ancestors(snapshot.cgsnapshot) snapshot_id = snap_by_anc[snapshot['volume_id']] else: snapshot_id = snapshot['id'] try: self.client.create_snapshot(snapshot_id, volume['id']) except XtremIOSnapshotsLimitExceeded as e: raise exception.CinderException(e.message) # extend the snapped volume if requested size is larger then original if volume['size'] > snapshot['volume_size']: try: self.extend_volume(volume, volume['size']) except Exception: LOG.error('failed to extend volume %s, ' 'reverting volume from snapshot operation', volume['id']) # remove the volume in case resize failed self.delete_volume(volume) raise # add new volume to consistency group if (volume.get('consistencygroup_id') and self.client is XtremIOClient4): self.client.add_vol_to_cg(volume['id'], snapshot['consistencygroup_id']) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" vol = self.client.req('volumes', name=src_vref['id'])['content'] ctxt = context.get_admin_context() cache = self.db.image_volume_cache_get_by_volume_id(ctxt, src_vref['id']) limit = self.configuration.safe_get('xtremio_volumes_per_glance_cache') if cache and limit and limit > 0 and limit <= vol['num-of-dest-snaps']: raise exception.SnapshotLimitReached(set_limit=limit) try: self.client.create_snapshot(src_vref['id'], volume['id']) except XtremIOSnapshotsLimitExceeded as e: raise exception.CinderException(e.message) # extend the snapped volume if requested size is larger then original if volume['size'] > src_vref['size']: try: self.extend_volume(volume, volume['size']) except Exception: LOG.error('failed to extend volume %s, ' 'reverting clone operation', volume['id']) # remove the volume in case resize failed self.delete_volume(volume) raise if volume.get('consistencygroup_id') and self.client is XtremIOClient4: self.client.add_vol_to_cg(volume['id'], volume['consistencygroup_id']) def delete_volume(self, volume): """Deletes a volume.""" try: self.client.req('volumes', 'DELETE', name=volume.name_id) except exception.NotFound: LOG.info("volume %s doesn't exist", volume.name_id) def create_snapshot(self, snapshot): """Creates a snapshot.""" self.client.create_snapshot(snapshot.volume_id, snapshot.id, True) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" try: self.client.req('volumes', 'DELETE', name=snapshot.id) except exception.NotFound: LOG.info("snapshot %s doesn't exist", snapshot.id) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): # as the volume name is used to id the volume we need to rename it name_id = None provider_location = None current_name = new_volume['id'] original_name = volume['id'] try: data = {'name': original_name} self.client.req('volumes', 'PUT', data, name=current_name) except exception.VolumeBackendAPIException: LOG.error('Unable to rename the logical volume ' 'for volume: %s', original_name) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] return {'_name_id': name_id, 'provider_location': provider_location} def _update_volume_stats(self): sys = self.client.get_cluster() physical_space = int(sys["ud-ssd-space"]) / units.Mi used_physical_space = int(sys["ud-ssd-space-in-use"]) / units.Mi free_physical = physical_space - used_physical_space actual_prov = int(sys["vol-size"]) / units.Mi self._stats = {'volume_backend_name': self.backend_name, 'vendor_name': 'Dell EMC', 'driver_version': self.VERSION, 'storage_protocol': self.protocol, 'total_capacity_gb': physical_space, 'free_capacity_gb': free_physical, 'provisioned_capacity_gb': actual_prov, 'max_over_subscription_ratio': self.provisioning_factor, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'multiattach': True, } self._stats.update(self.client.get_extra_capabilities()) def manage_existing(self, volume, existing_ref, is_snapshot=False): """Manages an existing LV.""" lv_name = existing_ref['source-name'] # Attempt to locate the volume. try: vol_obj = self.client.req('volumes', name=lv_name)['content'] if ( is_snapshot and (not vol_obj['ancestor-vol-id'] or vol_obj['ancestor-vol-id'][XTREMIO_OID_NAME] != volume.volume_id)): kwargs = {'existing_ref': lv_name, 'reason': 'Not a snapshot of vol %s' % volume.volume_id} raise exception.ManageExistingInvalidReference(**kwargs) except exception.NotFound: kwargs = {'existing_ref': lv_name, 'reason': 'Specified logical %s does not exist.' % 'snapshot' if is_snapshot else 'volume'} raise exception.ManageExistingInvalidReference(**kwargs) # Attempt to rename the LV to match the OpenStack internal name. self.client.req('volumes', 'PUT', data={'vol-name': volume['id']}, idx=vol_obj['index']) def manage_existing_get_size(self, volume, existing_ref, is_snapshot=False): """Return size of an existing LV for manage_existing.""" # Check that the reference is valid if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) lv_name = existing_ref['source-name'] # Attempt to locate the volume. try: vol_obj = self.client.req('volumes', name=lv_name)['content'] except exception.NotFound: kwargs = {'existing_ref': lv_name, 'reason': 'Specified logical %s does not exist.' % 'snapshot' if is_snapshot else 'volume'} raise exception.ManageExistingInvalidReference(**kwargs) # LV size is returned in gigabytes. Attempt to parse size as a float # and round up to the next integer. lv_size = int(math.ceil(float(vol_obj['vol-size']) / units.Mi)) return lv_size def unmanage(self, volume, is_snapshot=False): """Removes the specified volume from Cinder management.""" # trying to rename the volume to [cinder name]-unmanged try: self.client.req('volumes', 'PUT', name=volume['id'], data={'vol-name': volume['name'] + '-unmanged'}) except exception.NotFound: LOG.info("%(typ)s with the name %(name)s wasn't found, " "can't unmanage", {'typ': 'Snapshot' if is_snapshot else 'Volume', 'name': volume['id']}) raise exception.VolumeNotFound(volume_id=volume['id']) def manage_existing_snapshot(self, snapshot, existing_ref): self.manage_existing(snapshot, existing_ref, True) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): return self.manage_existing_get_size(snapshot, existing_ref, True) def unmanage_snapshot(self, snapshot): self.unmanage(snapshot, True) def extend_volume(self, volume, new_size): """Extend an existing volume's size.""" data = {'vol-size': str(new_size) + 'g'} try: self.client.req('volumes', 'PUT', data, name=volume['id']) except exception.NotFound: msg = _("can't find the volume to extend") raise exception.VolumeDriverException(message=msg) def check_for_export(self, context, volume_id): """Make sure volume is exported.""" pass def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector""" tg_index = '1' if not connector: vol = self.client.req('volumes', name=volume.id)['content'] # foce detach, unmap all IGs from volume IG_OID = 0 ig_indexes = [lun_map[IG_OID][XTREMIO_OID_INDEX] for lun_map in vol['lun-mapping-list']] LOG.info('Force detach volume %(vol)s from luns %(luns)s.', {'vol': vol['name'], 'luns': ig_indexes}) else: host = connector['host'] attachment_list = volume.volume_attachment LOG.debug("Volume attachment list: %(atl)s. " "Attachment type: %(at)s", {'atl': attachment_list, 'at': type(attachment_list)}) try: att_list = attachment_list.objects except AttributeError: att_list = attachment_list if att_list is not None: host_list = [att.connector['host'] for att in att_list if att is not None and att.connector is not None] current_host_occurances = host_list.count(host) if current_host_occurances > 1: LOG.info("Volume is attached to multiple instances on " "this host. Not removing the lun map.") return vol = self.client.req('volumes', name=volume.id, data={'prop': 'index'})['content'] ig_indexes = self._get_ig_indexes_from_initiators(connector) for ig_idx in ig_indexes: lm_name = '%s_%s_%s' % (str(vol['index']), str(ig_idx), tg_index) LOG.debug('Removing lun map %s.', lm_name) try: self.client.req('lun-maps', 'DELETE', name=lm_name) except exception.NotFound: LOG.warning("terminate_connection: lun map not found") if self.clean_ig: for idx in ig_indexes: try: ig = self.client.req('initiator-groups', 'GET', {'prop': 'num-of-vols'}, idx=idx)['content'] if ig['num-of-vols'] == 0: self.client.req('initiator-groups', 'DELETE', idx=idx) except (exception.NotFound, exception.VolumeBackendAPIException): LOG.warning('Failed to clean IG %d without mappings', idx) def _get_password(self): return volume_utils.generate_password( length=12, symbolgroups=(string.ascii_uppercase + string.digits)) def create_lun_map(self, volume, ig, lun_num=None): try: data = {'ig-id': ig, 'vol-id': volume['id']} if lun_num: data['lun'] = lun_num res = self.client.req('lun-maps', 'POST', data) lunmap = self._obj_from_result(res) LOG.info('Created lun-map:\n%s', lunmap) except XtremIOAlreadyMappedError: LOG.info('Volume already mapped, retrieving %(ig)s, %(vol)s', {'ig': ig, 'vol': volume['id']}) lunmap = self.client.find_lunmap(ig, volume['id']) return lunmap def _get_ig_name(self, connector): raise NotImplementedError() def _get_ig_indexes_from_initiators(self, connector): initiator_names = self._get_initiator_names(connector) return self.client.get_initiators_igs(initiator_names) def _get_initiator_names(self, connector): raise NotImplementedError() def create_consistencygroup(self, context, group): """Creates a consistency group. :param context: the context :param group: the group object to be created :returns: dict -- modelUpdate = {'status': 'available'} :raises: VolumeBackendAPIException """ create_data = {'consistency-group-name': group['id']} self.client.req('consistency-groups', 'POST', data=create_data, ver='v2') return {'status': fields.ConsistencyGroupStatus.AVAILABLE} def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" self.client.req('consistency-groups', 'DELETE', name=group['id'], ver='v2') volumes_model_update = [] for volume in volumes: self.delete_volume(volume) update_item = {'id': volume['id'], 'status': 'deleted'} volumes_model_update.append(update_item) model_update = {'status': group['status']} return model_update, volumes_model_update def _get_snapset_ancestors(self, snapset_name): snapset = self.client.req('snapshot-sets', name=snapset_name)['content'] volume_ids = [s[XTREMIO_OID_INDEX] for s in snapset['vol-list']] return {v['ancestor-vol-id'][XTREMIO_OID_NAME]: v['name'] for v in self.client.req('volumes', data={'full': 1, 'props': 'ancestor-vol-id'})['volumes'] if v['index'] in volume_ids} def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistencygroup from source. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :param volumes: a list of volume dictionaries in the group. :param cgsnapshot: the dictionary of the cgsnapshot as source. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :param source_cg: the dictionary of a consistency group as source. :param source_vols: a list of volume dictionaries in the source_cg. :returns: model_update, volumes_model_update """ if not (cgsnapshot and snapshots and not source_cg or source_cg and source_vols and not cgsnapshot): msg = _("create_consistencygroup_from_src only supports a " "cgsnapshot source or a consistency group source. " "Multiple sources cannot be used.") raise exception.InvalidInput(msg) if cgsnapshot: snap_name = self._get_cgsnap_name(cgsnapshot) snap_by_anc = self._get_snapset_ancestors(snap_name) for volume, snapshot in zip(volumes, snapshots): real_snap = snap_by_anc[snapshot['volume_id']] self.create_volume_from_snapshot( volume, {'id': real_snap, 'volume_size': snapshot['volume_size']}) elif source_cg: data = {'consistency-group-id': source_cg['id'], 'snapshot-set-name': group['id']} self.client.req('snapshots', 'POST', data, ver='v2') snap_by_anc = self._get_snapset_ancestors(group['id']) for volume, src_vol in zip(volumes, source_vols): snap_vol_name = snap_by_anc[src_vol['id']] self.client.req('volumes', 'PUT', {'name': volume['id']}, name=snap_vol_name) create_data = {'consistency-group-name': group['id'], 'vol-list': [v['id'] for v in volumes]} self.client.req('consistency-groups', 'POST', data=create_data, ver='v2') return None, None def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be updated. :param add_volumes: a list of volume dictionaries to be added. :param remove_volumes: a list of volume dictionaries to be removed. :returns: model_update, add_volumes_update, remove_volumes_update """ add_volumes = add_volumes if add_volumes else [] remove_volumes = remove_volumes if remove_volumes else [] for vol in add_volumes: add_data = {'vol-id': vol['id'], 'cg-id': group['id']} self.client.req('consistency-group-volumes', 'POST', add_data, ver='v2') for vol in remove_volumes: remove_data = {'vol-id': vol['id'], 'cg-id': group['id']} self.client.req('consistency-group-volumes', 'DELETE', remove_data, name=group['id'], ver='v2') return None, None, None def _get_cgsnap_name(self, cgsnapshot): group_id = cgsnapshot.get('group_id') if group_id is None: group_id = cgsnapshot.get('consistencygroup_id') return '%(cg)s%(snap)s' % {'cg': group_id .replace('-', ''), 'snap': cgsnapshot['id'].replace('-', '')} def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" group_id = cgsnapshot.get('group_id') if group_id is None: group_id = cgsnapshot.get('consistencygroup_id') data = {'consistency-group-id': group_id, 'snapshot-set-name': self._get_cgsnap_name(cgsnapshot)} self.client.req('snapshots', 'POST', data, ver='v2') return None, None def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" self.client.req('snapshot-sets', 'DELETE', name=self._get_cgsnap_name(cgsnapshot), ver='v2') return None, None def create_group(self, context, group): """Creates a group. :param context: the context of the caller. :param group: the group object. :returns: model_update """ # the driver treats a group as a CG internally. # We proxy the calls to the CG api. return self.create_consistencygroup(context, group) def delete_group(self, context, group, volumes): """Deletes a group. :param context: the context of the caller. :param group: the group object. :param volumes: a list of volume objects in the group. :returns: model_update, volumes_model_update """ # the driver treats a group as a CG internally. # We proxy the calls to the CG api. return self.delete_consistencygroup(context, group, volumes) def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group. :param context: the context of the caller. :param group: the group object. :param add_volumes: a list of volume objects to be added. :param remove_volumes: a list of volume objects to be removed. :returns: model_update, add_volumes_update, remove_volumes_update """ # the driver treats a group as a CG internally. # We proxy the calls to the CG api. return self.update_consistencygroup(context, group, add_volumes, remove_volumes) def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param context: the context of the caller. :param group: the Group object to be created. :param volumes: a list of Volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of snapshot objects in group_snapshot. :param source_group: the Group object as source. :param source_vols: a list of volume objects in the source_group. :returns: model_update, volumes_model_update """ # the driver treats a group as a CG internally. # We proxy the calls to the CG api. return self.create_consistencygroup_from_src(context, group, volumes, group_snapshot, snapshots, source_group, source_vols) def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be created. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ # the driver treats a group as a CG internally. # We proxy the calls to the CG api. return self.create_cgsnapshot(context, group_snapshot, snapshots) def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be deleted. :param snapshots: a list of snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ # the driver treats a group as a CG internally. # We proxy the calls to the CG api. return self.delete_cgsnapshot(context, group_snapshot, snapshots) def _get_ig(self, name): try: return self.client.req('initiator-groups', 'GET', name=name)['content'] except exception.NotFound: pass def _create_ig(self, name): # create an initiator group to hold the initiator data = {'ig-name': name} self.client.req('initiator-groups', 'POST', data) try: return self.client.req('initiator-groups', name=name)['content'] except exception.NotFound: raise (exception.VolumeBackendAPIException (data=_("Failed to create IG, %s") % name)) def _port_is_allowed(self, port): """Check if port is in allowed ports list. If allowed ports are empty then all ports are allowed. :param port: iSCSI IP/FC WWN to check :return: is port allowed """ if not self.allowed_ports: return True return port.lower() in self.allowed_ports @interface.volumedriver class XtremIOISCSIDriver(XtremIOVolumeDriver, driver.ISCSIDriver): """Executes commands relating to ISCSI volumes. We make use of model provider properties as follows: ``provider_location`` if present, contains the iSCSI target information in the same format as an ietadm discovery i.e. ':, ' ``provider_auth`` if present, contains a space-separated triple: ' '. `CHAP` is the only auth_method in use at the moment. """ driver_name = 'XtremIO_ISCSI' def __init__(self, *args, **kwargs): super(XtremIOISCSIDriver, self).__init__(*args, **kwargs) self.protocol = constants.ISCSI def _add_auth(self, data, login_chap, discovery_chap): login_passwd, discovery_passwd = None, None if login_chap: data['initiator-authentication-user-name'] = 'chap_user' login_passwd = self._get_password() data['initiator-authentication-password'] = login_passwd if discovery_chap: data['initiator-discovery-user-name'] = 'chap_user' discovery_passwd = self._get_password() data['initiator-discovery-password'] = discovery_passwd return login_passwd, discovery_passwd def _create_initiator(self, connector, login_chap, discovery_chap): initiator = self._get_initiator_names(connector)[0] # create an initiator data = {'initiator-name': initiator, 'ig-id': initiator, 'port-address': initiator} l, d = self._add_auth(data, login_chap, discovery_chap) self.client.req('initiators', 'POST', data) return l, d def initialize_connection(self, volume, connector): try: sys = self.client.get_cluster() except exception.NotFound: msg = _("XtremIO not initialized correctly, no clusters found") raise exception.VolumeBackendAPIException(data=msg) login_chap = (sys.get('chap-authentication-mode', 'disabled') != 'disabled') discovery_chap = (sys.get('chap-discovery-mode', 'disabled') != 'disabled') initiator_name = self._get_initiator_names(connector)[0] initiator = self.client.get_initiator(initiator_name) if initiator: login_passwd = initiator['chap-authentication-initiator-password'] discovery_passwd = initiator['chap-discovery-initiator-password'] ig = self._get_ig(initiator['ig-id'][XTREMIO_OID_NAME]) else: ig = self._get_ig(self._get_ig_name(connector)) if not ig: ig = self._create_ig(self._get_ig_name(connector)) (login_passwd, discovery_passwd) = self._create_initiator(connector, login_chap, discovery_chap) # if CHAP was enabled after the initiator was created if login_chap and not login_passwd: LOG.info('Initiator has no password while using chap, adding it.') data = {} (login_passwd, d_passwd) = self._add_auth(data, login_chap, discovery_chap and not discovery_passwd) discovery_passwd = (discovery_passwd if discovery_passwd else d_passwd) self.client.req('initiators', 'PUT', data, idx=initiator['index']) # lun mappping lunmap = self.create_lun_map(volume, ig['ig-id'][XTREMIO_OID_NAME]) properties = self._get_iscsi_properties(lunmap) if login_chap: properties['auth_method'] = 'CHAP' properties['auth_username'] = 'chap_user' properties['auth_password'] = login_passwd if discovery_chap: properties['discovery_auth_method'] = 'CHAP' properties['discovery_auth_username'] = 'chap_user' properties['discovery_auth_password'] = discovery_passwd LOG.debug('init conn params:\n%s', strutils.mask_dict_password(properties)) return { 'driver_volume_type': 'iscsi', 'data': properties } def _get_iscsi_properties(self, lunmap): """Gets iscsi configuration. :target_discovered: boolean indicating whether discovery was used :target_iqn: the IQN of the iSCSI target :target_portal: the portal of the iSCSI target :target_lun: the lun of the iSCSI target :volume_id: the id of the volume (currently used by xen) :auth_method:, :auth_username:, :auth_password: the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. multiple connection return :target_iqns, :target_portals, :target_luns, which contain lists of multiple values. The main portal information is also returned in :target_iqn, :target_portal, :target_lun for backward compatibility. """ iscsi_portals = self.client.get_iscsi_portals() allowed_portals = [] for iscsi_portal in iscsi_portals: iscsi_portal['ip-addr'] = iscsi_portal['ip-addr'].split('/')[0] if self._port_is_allowed(iscsi_portal['ip-addr']): allowed_portals.append(iscsi_portal) if not allowed_portals: msg = _("There are no accessible iSCSI targets on the " "system.") raise exception.VolumeBackendAPIException(data=msg) portal = RANDOM.choice(allowed_portals) portal_addr = ('%(ip)s:%(port)d' % {'ip': netutils.escape_ipv6(portal['ip-addr']), 'port': portal['ip-port']}) tg_portals = ['%(ip)s:%(port)d' % {'ip': netutils.escape_ipv6(p['ip-addr']), 'port': p['ip-port']} for p in allowed_portals] properties = {'target_discovered': False, 'target_iqn': portal['port-address'], 'target_lun': lunmap['lun'], 'target_portal': portal_addr, 'target_iqns': [ p['port-address'] for p in allowed_portals ], 'target_portals': tg_portals, 'target_luns': [lunmap['lun']] * len(allowed_portals)} return properties def _get_initiator_names(self, connector): return [connector['initiator']] def _get_ig_name(self, connector): return connector['initiator'] @interface.volumedriver class XtremIOFCDriver(XtremIOVolumeDriver, driver.FibreChannelDriver): def __init__(self, *args, **kwargs): super(XtremIOFCDriver, self).__init__(*args, **kwargs) self.protocol = constants.FC self._targets = None def get_targets(self): if not self._targets: try: targets = self.client.get_fc_up_ports() allowed_targets = [] for target in targets: if self._port_is_allowed(target['port-address']): allowed_targets.append( target['port-address'].replace(':', '') ) if not allowed_targets: msg = _("There are no accessible Fibre Channel targets " "on the system.") raise exception.VolumeBackendAPIException(data=msg) self._targets = allowed_targets except exception.NotFound: raise (exception.VolumeBackendAPIException (data=_("Failed to get targets"))) return self._targets def _get_free_lun(self, igs): luns = [] for ig in igs: luns.extend(lm['lun'] for lm in self.client.req('lun-maps', data={'full': 1, 'prop': 'lun', 'filter': 'ig-name:eq:%s' % ig}) ['lun-maps']) uniq_luns = set(luns + [0]) seq = range(len(uniq_luns) + 1) return min(set(seq) - uniq_luns) def initialize_connection(self, volume, connector): wwpns = self._get_initiator_names(connector) ig_name = self._get_ig_name(connector) i_t_map = {} found = [] new = [] for wwpn in wwpns: init = self.client.get_initiator(wwpn) if init: found.append(init) else: new.append(wwpn) i_t_map[wwpn.replace(':', '')] = self.get_targets() # get or create initiator group if new: ig = self._get_ig(ig_name) if not ig: ig = self._create_ig(ig_name) for wwpn in new: data = {'initiator-name': wwpn, 'ig-id': ig_name, 'port-address': wwpn} self.client.req('initiators', 'POST', data) igs = list(set([i['ig-id'][XTREMIO_OID_NAME] for i in found])) if new and ig['ig-id'][XTREMIO_OID_NAME] not in igs: igs.append(ig['ig-id'][XTREMIO_OID_NAME]) if len(igs) > 1: lun_num = self._get_free_lun(igs) else: lun_num = None for ig in igs: lunmap = self.create_lun_map(volume, ig, lun_num) lun_num = lunmap['lun'] conn_info = {'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': False, 'target_lun': lun_num, 'target_wwn': self.get_targets(), 'initiator_target_map': i_t_map}} fczm_utils.add_fc_zone(conn_info) return conn_info def terminate_connection(self, volume, connector, **kwargs): (super(XtremIOFCDriver, self) .terminate_connection(volume, connector, **kwargs)) has_volumes = (not connector or self.client. num_of_mapped_volumes(self._get_ig_name(connector)) > 0) if has_volumes: data = {} else: i_t_map = {} for initiator in self._get_initiator_names(connector): i_t_map[initiator.replace(':', '')] = self.get_targets() data = {'target_wwn': self.get_targets(), 'initiator_target_map': i_t_map} conn_info = {'driver_volume_type': 'fibre_channel', 'data': data} fczm_utils.remove_fc_zone(conn_info) return conn_info def _get_initiator_names(self, connector): return [wwpn if ':' in wwpn else ':'.join(wwpn[i:i + 2] for i in range(0, len(wwpn), 2)) for wwpn in connector['wwpns']] def _get_ig_name(self, connector): return connector['host'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3471208 cinder-27.0.0/cinder/volume/drivers/fujitsu/0000775000175000017500000000000000000000000021011 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fujitsu/__init__.py0000664000175000017500000000000000000000000023110 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3471208 cinder-27.0.0/cinder/volume/drivers/fujitsu/eternus_dx/0000775000175000017500000000000000000000000023171 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fujitsu/eternus_dx/__init__.py0000664000175000017500000000000000000000000025270 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fujitsu/eternus_dx/constants.py0000664000175000017500000032350100000000000025563 0ustar00zuulzuul00000000000000# Copyright (c) 2019 FUJITSU LIMITED # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # RAIDGROUP = 2 TPPOOL = 5 SNAPOPC = 4 OPC = 5 RETURN_TO_RESOURCEPOOL = 19 DETACH = 8 BROKEN = 5 DX_S2 = 2 DX_S3 = 3 JOB_RETRIES = 60 JOB_INTERVAL_SEC = 10 TIMES_MIN = 3 EC_REC = 3 RETRY_INTERVAL = 5 # Error code keyword. RG_VOLNUM_MAX = 32769 VOLUME_IS_BUSY = 32786 DEVICE_IS_BUSY = 32787 VOLUMENAME_IN_USE = 32788 COPYSESSION_NOT_EXIST = 32793 LUNAME_IN_USE = 4102 LUNAME_NOT_EXIST = 4097 # Only for InvokeMethod(HidePaths). VOL_PREFIX = "FJosv_" REPL = "FUJITSU_ReplicationService" STOR_CONF = "FUJITSU_StorageConfigurationService" CTRL_CONF = "FUJITSU_ControllerConfigurationService" UNDEF_MSG = 'Undefined Error!!' MAX_IOPS = 4294967295 MAX_THROUGHPUT = 2097151 MIN_IOPS = 1 MIN_THROUGHPUT = 1 RC_OK = 0 RC_FAILED = 4 QOS_VERSION = 'V11L30-0000' # Here is a misspelling, and the right value should be "Thinprovisioning_POOL". # It would not be compatible with the metadata of the legacy volumes, # so this spelling mistake needs to be retained. POOL_TYPE_dic = { RAIDGROUP: 'RAID_GROUP', TPPOOL: 'Thinporvisioning_POOL', } POOL_TYPE_list = [ 'RAID', 'TPP' ] OPERATION_dic = { SNAPOPC: RETURN_TO_RESOURCEPOOL, OPC: DETACH, EC_REC: DETACH, } FJ_QOS_KEY_list = [ 'maxBWS' ] FJ_QOS_KEY_BYTES_list = [ 'read_bytes_sec', 'write_bytes_sec', 'total_bytes_sec' ] FJ_QOS_KEY_IOPS_list = [ 'read_iops_sec', 'write_iops_sec', 'total_iops_sec' ] RETCODE_dic = { '0': 'Success', '1': 'Method Not Supported', '4': 'Failed', '5': 'Invalid Parameter', '4096': 'Method Parameters Checked - Job Started', '4097': 'Size Not Supported', '4101': 'Target/initiator combination already exposed', '4102': 'Requested logical unit number in use', '32769': 'Maximum number of Logical Volume in a RAID group ' 'has been reached', '32770': 'Maximum number of Logical Volume in the storage device ' 'has been reached', '32771': 'Maximum number of registered Host WWN ' 'has been reached', '32772': 'Maximum number of affinity group has been reached', '32773': 'Maximum number of host affinity has been reached', '32781': 'Not available under current system configuration', '32782': 'Controller firmware update in process', '32785': 'The RAID group is in busy state', '32786': 'The Logical Volume is in busy state', '32787': 'The device is in busy state', '32788': 'Element Name is in use', '32791': 'Maximum number of copy session has been reached', '32792': 'No Copy License', '32793': 'Session does not exist', '32794': 'Phase is not correct', '32796': 'Quick Format Error', '32801': 'The CA port is in invalid setting', '32802': 'The Logical Volume is Mainframe volume', '32803': 'The RAID group is not operative', '32804': 'The Logical Volume is not operative', '32805': 'The Logical Element is Thin provisioning Pool Volume', '32806': 'The Logical Volume is pool for copy volume', '32807': 'The Logical Volume is unknown volume', '32808': 'No Thin Provisioning License', '32809': 'The Logical Element is ODX volume', '32810': 'The specified volume is under use as NAS volume', '32811': 'This operation cannot be performed to the NAS resources', '32812': 'This operation cannot be performed to the ' 'Transparent Failover resources', '32813': 'This operation cannot be performed to the ' 'VVOL resources', '32816': 'Generic fatal error', '32817': 'Inconsistent State with 1Step Restore ' 'operation', '32818': 'REC Path failure during copy', '32819': 'RAID failure during EC/REC copy', '32820': 'Previous command in process', '32821': 'Cascade local copy session exist', '32822': 'Cascade EC/REC session is not suspended', '35302': 'Invalid LogicalElement', '35304': 'LogicalElement state error', '35316': 'Multi-hop error', '35318': 'Maximum number of multi-hop has been reached', '35324': 'RAID is broken', '35331': 'Maximum number of session has been reached(per device)', '35333': 'Maximum number of session has been reached(per SourceElement)', '35334': 'Maximum number of session has been reached(per TargetElement)', '35335': 'Maximum number of Snapshot generation has been reached ' '(per SourceElement)', '35346': 'Copy table size is not setup', '35347': 'Copy table size is not enough', } CLIRETCODE_dic = { 'E0001': 'Bad value', 'E0002': 'Value out of range', 'E0003': 'Too many parameters', 'E0004': 'Missing parameter', 'E0005': 'Incorrect parameter combination', 'E0006': 'Inconsistent status', 'E0007': 'Inconsistent usage', 'E0008': 'Inconsistent size', 'E0009': 'Inconsistent RAID level', 'E0010': 'Inconsistent model type of device', 'E0011': 'Inconsistent network setup', 'E0012': 'Inconsistent e-mail setup', 'E0014': 'Inconsistent disk status', 'E0015': 'Inconsistent enclosure status', 'E0019': 'Inconsistent parameter', 'E0020': 'Internal error', 'E0021': 'The requested operation has failed', 'E0030': 'Command not supported', 'E0031': 'Reserved keyword is used', 'E0032': 'Controller firmware cannot be downgraded', 'E0033': 'Not applicable to this target', 'E0034': 'Mainframe resources', 'E0035': 'Disk firmware can only be upgraded', 'E0041': 'Incorrect password syntax', 'E0042': 'Incorrect password', 'E0050': 'Incorrect file', 'E0051': 'Incorrect license key', 'E0052': 'File access failure', 'E0053': 'Remote server access failure', 'E0060': 'Resource locked', 'E0061': 'Lock was relinquished to another user', 'E0070': 'Resource busy', 'E0071': 'Resource is linked to the other resource', 'E0072': 'Resource is temporarily insufficient', 'E0073': 'Drive is currently busy. Wait a while, and then retry', 'E0080': 'Resource limited', 'E0081': 'Number of active disks has reached the system limit', 'E0089': 'Not available under current Advanced Copy usable' ' mode conditions', 'E0090': 'Not available under current system status conditions', 'E0091': 'Not available under current SNMP settings', 'E0092': 'Not available under current operation mode conditions', 'E0093': 'Not available under current host affinity mode conditions', 'E0094': 'Not available under current encryption status conditions', 'E0095': 'Not available under current e-mailing conditions', 'E0097': 'Not available under master controller module', 'E0098': 'Not available under slave controller module', 'E0099': 'Not available under current system configuration', 'E0100': 'No space', 'E0101': 'No memory', 'E0102': 'Not available under system disk status', 'E0110': 'Resource does not exist', 'E0111': 'Resource is not reserved', 'E0113': 'No SNMP trap information', 'E0114': 'No volumes in the RAID group / Thin Provisioning Pool', 'E0115': 'Performance monitor has not started', 'E0116': 'The system disks are included in the RAID group', 'E0117': 'No target disks', 'E0118': 'Remote Copy target is not supported model', 'E0120': 'Already registered', 'E0122': 'Closure of all CLI and GUI ports requires confirmation', 'E0123': 'Closure of all CLI ports requires confirmation', 'E0131': 'Already unmapped', 'E0132': 'Already stopped', 'E0133': 'Already running for expanding others', 'E0140': 'One or more components have failed', 'E0141': 'At least one resource is required', 'E0142': 'One or more encrypted volumes exist', 'E0143': 'Unexpected error occurred during operator intervention', 'E0145': 'Advanced Copy table exists', 'E0146': 'RAID group contains a temporary volume', 'E0150': 'Collecting performance data', 'E0151': 'Power-off or power-on in process', 'E0152': 'Volumes formatting in process', 'E0153': 'Encryption or decryption of volumes in process', 'E0154': 'Advanced Copy session active', 'E0155': 'Volumes migration in process', 'E0156': 'RAID group expansion in process', 'E0157': 'Remote Copy session active', 'E0158': 'Controller firmware update in process', 'E0159': 'Remote maintenance in process', 'E0160': 'Competing with background process', 'E0161': 'Competing with disk diagnosis running in background process', 'E0162': 'Competing with RAID group diagnosis running in ' 'background process', 'E0163': 'Competing with hot update of firmware in background process', 'E0164': 'Competing with cold update of firmware in background process', 'E0165': 'Competing with update of disk firmware in background process', 'E0166': 'Competing with quick formatting of volume in ' 'background process', 'E0167': 'Competing with changing Advanced Copy parameters in ' 'background process', 'E0168': 'Competing with allocating remote copy buffer in ' 'background process', 'E0169': 'Competing with preparing firmware update in background process', 'E0170': 'Competing with setting cache control in background process', 'E0171': 'Competing with reassigning RAID group controller in ' 'background process', 'E0172': 'Competing with initializing volume in background process', 'E0173': 'Competing with encrypting or decrypting volume in ' 'background process', 'E0174': 'Competing with registering RAID group in background process', 'E0175': 'Competing with deleting RAID group in background process', 'E0176': 'Competing with registering volume in background process', 'E0177': 'Competing with deleting volume in background process', 'E0178': 'Competing with registering global hot spare in ' 'background process', 'E0179': 'Competing with changing maintenance mode in background process', 'E0180': 'Competing with moving volume in background process', 'E0181': 'Competing with expanding RAID group in background process', 'E0182': 'Competing with collecting G-List information in ' 'background process', 'E0183': 'Competing with setting Eco-mode in background process', 'E0184': 'Competing with assigning Eco-mode schedule in ' 'background process', 'E0185': 'Competing with setting Eco-mode schedule in background process', 'E0186': 'Competing with setting date and time in background process', 'E0187': 'Competing with expanding volume in background process', 'E0188': 'Competing with deleting Advanced Copy session in ' 'background process', 'E0190': 'Competing with registering dedicated hot spare in ' 'background process', 'E0191': 'Competing with releasing dedicated hot spare in ' 'background process', 'E0192': 'Competing with collecting event information in ' 'background process', 'E0193': 'Competing with deleting snap data volume in ' 'background process', 'E0194': 'Reclamation of Thin Provisioning Volume is in progress', 'E0195': 'Rebuild or Copyback in process', 'E0196': 'Competing with storage migration in background process', 'E0197': 'Quick UNMAP in process', 'E0198': 'Flexible tier migration in process', 'E0200': 'Competing with setting Flexible tier mode in background process', 'E0201': 'Competing with deleting Flexible tier pool in ' 'background process', 'E0202': 'Competing with formatting Flexible tier pool in ' 'background process', 'E0203': 'Competing with registering Flexible tier volume in ' 'background process', 'E0204': 'Competing with setting Flexible tier sub pool priority in ' 'background process', 'E0205': 'Competing with setting Flexible tier pool parameters in ' 'background process', 'E0206': 'Competing with Flexible tier migration in background process', 'E0207': 'Competing with registering Thin Provisioning Pool in ' 'background process', 'E0208': 'Competing with deleting Thin Provisioning Volume in ' 'background process', 'E0209': 'Competing with formatting Thin Provisioning Volume in ' 'background process', 'E0210': 'Competing with setting Thin Provisioning Volume parameters in ' 'background process', 'E0211': 'Competing with registering REC Disk Buffer Volume in ' 'background process', 'E0212': 'Competing with deleting REC Disk Buffer Volume in ' 'background process', 'E0213': 'Competing with inhibiting copy destination volume in ' 'background process', 'E0214': 'Competing with Thin Provisioning Pool migration in ' 'background process', 'E0215': 'Competing with setting cache size limit to volume in ' 'background process', 'E0216': 'Competing with setting Offloaded Data Transfer Mode in ' 'background process', 'E0217': 'Competing with setting Key management group ID in ' 'background process', 'E0218': 'Competing with changing Key in background process', 'E0300': 'Syntax error in REC path information. (Incorrect file header)', 'E0301': 'Syntax error in REC path information. (Version mismatch)', 'E0302': 'Syntax error in REC path information. (Incorrect label)', 'E0303': 'Syntax error in REC path information. (Incorrect operand)', 'E0304': 'Syntax error in REC path information. (Duplicate definition)', 'E0305': 'Syntax error in REC path information. (Missing label)', 'E0306': 'Syntax error in REC path information. (Too many labels)', 'E0307': 'Syntax error in REC path information. (Missing double quotes)', 'E0308': 'Syntax error in REC path information. (Unexpected label)', 'E0309': 'Syntax error in REC path information. (Undefined information)', 'E0311': 'Syntax error in REC path information. (Too many lines)', 'E0312': 'Syntax error in REC path information. (Overlong line)', 'E0313': 'Syntax error in REC path information. ' '(WWN does not match actual)', 'E0314': 'Syntax error in REC path information. ' '(Host port mode does not match actual)', 'E0315': 'Syntax error in REC path information. (Number of storage-links ' 'for one storage system over upper limit)', 'E0316': 'Syntax error in REC path information. (Number of storage-links ' 'between one pair of storage systems over upper limit)', 'E0317': 'Syntax error in REC path information. (Number of port-links for ' 'one host interface port over upper limit)', 'E0318': 'Syntax error in REC path information. (Number of host interface ' 'ports for one storage system over upper limit)', 'E0319': 'Syntax error in REC path information. (Total number of ' 'storage systems over upper limit)', 'E0320': 'Syntax error in REC path information. (Total number of ' 'links over upper limit)', 'E0321': 'Syntax error in REC path information. (CA type or IP version ' 'do not match)', 'E0330': 'Flexible tier mode is valid', 'E0331': 'Flexible tier mode is not valid', 'E0332': 'One or more Flexible Tier Pools exist', 'E0333': 'Cannot format Flexible Tier Pool', 'E0334': 'RAID Migration cannot be set to the specified volume', 'E0335': 'RAID Migration cannot be set to the specified ' 'Flexible Tier Pool', 'E0336': 'Migration failed because of insufficient free space of ' 'the destination pool', 'E0337': 'The specified Flexible Tier Pool does not have a ' 'Flexible Tier Sub Pool', 'E0342': 'The time out occurred', 'E0343': 'The network is not normal', 'E0344': 'The time out occurred in the network', 'E0345': 'The network of IDM server is unreachable', 'E0346': 'The IDM server is unreachable', 'E0347': 'The IDM server refused the connection', 'E0348': 'The IDM server reset the connection', 'E0349': 'The SSL communication fault occurred', 'E0350': 'The name resolution of the host name failed', 'E0351': 'It failed in the HTTP authentication', 'E0352': 'The HTTP authentic method does not correspond', 'E0353': 'It failed in the SOCKS authentication', 'E0354': 'The SOCKS authentic method does not correspond', 'E0355': 'Export log in process', 'E0356': 'AIS Connect or AIS Connect server authentication is enabled', 'E0357': 'AIS Connect is disabled', 'E0358': 'REMCS is enabled', 'E0359': 'Log Transmission of E-Mail notification is enabled', 'E0360': 'AIS SSL certificate is not registered', 'E0361': 'AIS SSL certificate is invalid', 'E0362': 'Log transmission of E-Mail notification and AIS connect ' 'cannot be enabled simultaneously', 'E0390': 'Backup REC path information does not exist', 'E0391': 'Round trip time measurement has failed', 'E0392': 'Unsupported path type', 'E0393': 'Syntax error in REC path information. (iSCSI parameter(s) ' 'do not match actual)', 'E0394': 'Failed to access the server', 'E0395': 'The object cannot be operated', 'E0396': 'A part of SpinUp/Down failed', 'E0397': 'All SpinUp/Down failed', 'E0399': 'Syntax error in REC path information', 'E5000': 'Parameter not supported', 'E5001': 'User authority to use the parameter is improper', 'E5002': 'Authority of security is necessary for data decryption', 'E5003': 'The user authority to use the command is improper', 'E5010': 'The volume encryption is specified for SED disk', 'E5081': 'Abnormal pinned CBE error', 'E5084': 'System not ready', 'E5100': 'Thin Provisioning mode is invalid', 'E5033': 'Cannot Warm Boot CFL', 'E5034': 'Cannot Hard Boot CFL', 'E5101': 'Check thin-pro-pool Status', 'E5102': 'Migration session count is limit', 'E5104': 'Thin Provisioning Pool capacity is limit', 'E5105': 'Existing unused disks are not enough', 'E5106': 'RAID or Volume is insufficient', 'E5107': 'RAID type is temporary', 'E5108': 'Volume type is not Thin Provisioning Volume', 'E5109': 'RAID group belong to thin-provisioning-pool/flexible-tier-pool', 'E5110': 'Thin Provisioning Volume count is limit', 'E5200': 'No copy license', 'E5201': 'Invalid copy phase', 'E5202': 'Exist SDV / SDPV', 'E5203': 'Exist REC disk buffer', 'E5204': 'Exist REC buffer', 'E5205': 'Exist REC path setting', 'E5206': 'Exist any of copy session(s)', 'E5207': 'Exist volume(s) of protection from copy destination', 'E5208': 'Copy license information updating due to trial license expired', 'E5209': 'Not support E6K to target of REC', 'E5210': 'Data in disk buffer', 'E5211': 'The RAID group is for REC disk buffer', 'E5212': 'Source and destination RA type is not match', 'E5213': 'The times registering trial license has been reached ' 'the system limit', 'E5214': 'Exist RA', 'E5215': 'Result string is too long', 'E5216': 'Compete for the affinity path', 'E5217': 'The specified multiplicity or priority level mismatch connect ' 'mode (Direct/Switched) of the REC path', 'E5300': 'An error occurred in the copy path connection', 'E5301': 'An unsupported command was issued by the remote storage', 'E5302': 'The specified volume number is not correct (exceeding ' 'the maximum volume number)', 'E5303': 'The specified volume is not supported', 'E5304': 'Advanced copy cannot be set to the specified volume', 'E5305': 'There is "Bad Sector" in the copy source volume', 'E5306': 'Encryption settings of copy source volume and copy ' 'destination volume are different', 'E5307': 'The copy source volume and copy destination volume don\'t ' 'belong to the same resource domain', 'E5308': 'The specified volume is a "Temporary"', 'E5309': 'Disk failure occurred while the relevant copy session is in ' '"Suspend" state. The copy session turns into "Error" state', 'E5310': 'Parameter error occurred', 'E5311': 'Source volume whose capacity is larger than destination ' 'volume\'s cannot be specified', 'E5312': 'It failed to reverse the copy session', 'E5313': 'Copy range conflicts with the existing RAID migration session', 'E5314': 'The specified copy range of the copy source volume is ' 'overlap with the copy range in an existing session ' '(excluding cascade and restore)', 'E5315': 'The specified copy range of the copy destination volume is ' 'overlap with the copy range in an existing session ' '(excluding cascade and restore copy)', 'E5316': 'The specified cascade copy cannot be done', 'E5317': 'The copy session which is in progress of restoring was ' 'specified', 'E5318': 'The number of cascades exceeds the maximum', 'E5319': 'An "Error Suspend" session was specified', 'E5320': 'Multiple copy sessions in REC Consistency mode cannot operate ' 'in a single storage', 'E5321': 'The state of the specified session is not correct', 'E5322': 'A command was issued while processing ' 'CONCURRENT SUSPEND command', 'E5323': 'The specified operation is not a "Force specify"', 'E5324': 'There is no path to access to the copy source volume or ' 'copy destination volume', 'E5325': 'The specified volume is an Advanced Copy read-only volume. ' 'It cannot be set as copy destination volume', 'E5326': 'The STOP command was issued to a SnapOPC/SnapOPC+ session ' 'which is in progress of restoring', 'E5327': 'REC buffer transfer is not complete in time or ' 'buffer recovery is processing under SUSPEND command process. ' 'SUSPEND command cannot be done', 'E5328': 'REC buffer data transfer is under monitoring. The specified ' 'session cannot be reversed', 'E5329': 'It will lead to EC/REC cascade copy session that is not in ' '"Suspend" state but has cascade source volume', 'E5330': 'The copy session has already been reversed', 'E5331': 'The number of copy sessions exceeds the allowable maximum ' 'copy sessions for this storage', 'E5332': 'The copy license is not valid', 'E5333': 'The number of copy sessions exceeds the allowable maximum ' 'copy sessions for each copy source volume', 'E5334': 'The number of copy sessions exceeds the allowable maximum ' 'copy sessions for each copy destination volume', 'E5335': 'The number of SnapOPC+ copy session generations exceeds ' 'the maximum for a copy source volume', 'E5336': 'Copy area of copy source volumes in monitoring copy sessions is ' 'overlap', 'E5337': 'The new copy session settings are the same with an existing ' 'one\'s. The new copy session cannot be started', 'E5338': 'Copy destination volume and cascade copy destination volume ' 'in the copy session is overlap', 'E5339': 'It will lead to copy destination volumes overlap. EC/REC ' 'cascade copy session cannot be reversed', 'E5340': 'SDV is being initialized', 'E5341': 'There is already a copy session where the specified SDV ' 'was set as copy destination', 'E5342': 'The copy session has already been set', 'E5343': 'The copy session has already been deleted', 'E5344': 'The copy session is in progress of transition to "Suspend" ' 'state asynchronously or has already been in "Suspend" state', 'E5345': 'The state of the session is already Active', 'E5346': 'The copy table has not been set yet', 'E5347': 'Copy table size is not sufficient', 'E5348': 'REC buffer is not in "Active" state', 'E5349': 'Copy source and copy destination, usage (sending or receiving) ' 'of REC buffer settings after resuming copy sessions don\'t ' 'match the original settings', 'E5350': 'REC buffer setting is being changed or REC buffer related ' 'functions are in progress', 'E5351': 'Copy source and copy destination, usage (sending or receiving) ' 'of REC buffer settings after reversing copy sessions don\'t ' 'match the original settings', 'E5352': 'The disk configured the RAID group of the specified volume is ' 'in motor OFF state due to ECO-mode', 'E5353': 'The specified BoxID cannot be found', 'E5354': 'The copy path is not in "Normal" state. Copy sessions in ' 'this storage were deleted but copy sessions in the remote ' 'storage still exist', 'E5355': 'Firmware update is in progress. The specified operation ' 'cannot be done', 'E5356': 'Advanced copy resolution settings of the local storage and ' 'remote storage are different', 'E5357': 'SDV was specified as a copy destination volume where ' 'the copy session is not SnapOPC+', 'E5358': 'SDV was specified as a copy source volume in SnapOPC+', 'E5359': 'A standard volume was specified as copy destination volume ' 'in SnapOPC+', 'E5360': 'An error, which can be recovered by retry, occurred', 'E5361': 'The storage is in "Not Ready" or internal error state', 'E5362': 'The specified volume is currently configured with ' 'Bind-in-Cache extent. RAID Migration cannot apply ' 'to this volume', 'E5363': 'The previous generation session is Readying', 'E5364': 'The restore OPC cannot start by using concurrent OPC', 'E5365': 'The restore OPC of readying session cannot start', 'E5366': 'The specified copy range is overlap with the copy range in ' 'an existing xcopy session', 'E5367': 'The specified copy range is overlap with the copy range in ' 'an existing Readying or Copying OPC session', 'E5368': 'The specified session cannot restart because it is ' 'under restore', 'E5369': 'The specified remote Box ID is not support the out of band copy', 'E5370': 'In the remote old model storage, the specified volume ' 'is invalid', 'E5371': 'In the remote old model storage, parameter error occurred', 'E5372': 'In the remote old model storage, the specified copy range ' 'is overlap with the copy range in an existing session', 'E5373': 'In the remote old model storage, status of session or status of ' 'volume is error', 'E5374': 'In the remote old model storage, the number of copy sessions ' 'exceeds the allowable maximum copy sessions', 'E5375': 'In the remote old model storage, the new copy session overlap ' 'with the existing one\'s. The new copy session cannot ' 'be started', 'E5376': 'In the remote old model storage, error occurred about setting ' 'of the copy table or status of REC Buffer', 'E5377': 'In the remote old model storage, the specified copy volume is ' 'a "SDV"', 'E5378': 'In the remote old model storage, an error occurred in the copy ' 'path connection', 'E5379': 'An unsupported command was issued by the remote old ' 'model storage', 'E5380': 'In the remote old model storage, copy session has been ' 'already set', 'E5381': 'In the remote old model storage, copy session has been ' 'already deleted', 'E5382': 'In the remote old model storage, copy session is already ' 'in "Suspend" status or changing to be "Suspend" status', 'E5383': 'In the remote old model storage, copy session status is ' 'already in "Active" status', 'E5384': 'In the remote old model storage, no copy license', 'E5385': 'In the remote old model storage configuration, ' 'the specified BoxID cannot be found', 'E5386': 'The copy path is not in "Normal" state. Copy sessions in ' 'this storage were deleted but copy sessions in the remote ' 'old model storage still exist', 'E5387': 'In the remote old model storage, firmware update is in ' 'progress. The specified operation cannot be done', 'E5388': 'Copy resolution settings of the local storage and remote ' 'old model storage are different', 'E5389': 'In the remote old model storage, an error, which can be ' 'recovered by retry, occurred', 'E5390': 'The remote old model storage is in "Not Ready" or ' 'internal error state', 'E5391': 'There is not the certification of consistency', 'E5392': 'Multiple copy source storage exists', 'E5393': 'The certification of consistency is unknown', 'E5394': 'The copy source storage is not support this command', 'E5395': 'Controller Module failed', 'E5396': 'The remote storage is not support this function', 'E5400': 'The same command that was issued by specifying by ' 'start has already been processed', 'E5401': 'The same command that was issued by specifying by ' 'restart has already been processed', 'E5402': 'REC transfer mode which specified by Start or ' 'Resume command is invalid at all RA ports which configure path', 'E5501': 'iSNS server cannot be connected from the specified ' 'iSCSI CA port', 'E5502': 'CLI cannot change the host or port parameter ' 'setting created by GUI', 'E5503': 'The Multiple VLAN setting of a specified port is invalid', 'E5504': 'The specified Additional IP Information setting is invalid', 'E5601': 'The automatic setup of IPv6 address cannot be performed', 'E5701': 'The factory setup is not done', 'E5900': 'Command error', 'E6000': 'Advanced Copy session that covers the entire volume is active', 'E6001': 'The specified volume is ODX Buffer Volume', 'E6002': 'The specified volume is volume during Zero Reclamation ' 'execution', 'E6003': 'Offloaded Data Transfer Mode is valid', 'E6004': 'Offloaded Data Transfer Mode is not valid', 'E6005': 'ODX Buffer Volume exist', 'E6006': 'The specified volume is not ODX Buffer Volume', 'E6007': 'Offloaded Data Transfer in process', 'E6008': 'The specified volume is not volume during Zero Reclamation ' 'execution', 'E6009': 'Not available under operating Bind-in-Cache', 'E6010': 'Current cache page size is over specified cache limit size', 'E6011': 'Not available under cache limit settings', 'E6012': 'The RAID migration from which a security level differs requires ' 'security authority', 'E6201': 'The specified RAID group does not consist of SED', 'E7001': 'SED authentication key is not registered', 'E7002': 'The master server is not registered in the key management group', 'E7003': 'Rejected by the server. Please try again to be ' 'accepted on the server', 'E7004': 'The key which can be changed is not in the server', 'E7005': 'Abnormal state of the key', 'E7006': 'The key is not acquired', 'E7007': 'The key management group is not registered', 'E7100': 'The specified Flexible Tier Pool has Flexible Tier Volume(s) ' 'which is balancing', 'E7101': 'There is no free OLU or SLU to create destination LUN', 'E7102': 'It is in the process of deleting source Thin Provisioning ' 'Volume internally which is done after migration', 'E7103': 'Number of migration sessions has reached the system limit', 'E7104': 'The source LUN has already using for migrated', 'E7105': 'The source LUN has already been used at other session', 'E7106': 'The resource in the internal is depleted', 'E7107': 'State of source volume or destination volume is error', 'E7108': 'The specified volume doesn\'t have migration session ' 'during startup', 'E7109': 'The specified volume is currently configured with ' 'Bind-in-Cache extent', 'E7110': 'Logical capacity which can be migrate is over', 'E7111': 'Physical capacity of destination pool is error', 'E7112': 'There is not enough free space to create the pool in the device', 'E7113': 'Balancing cannot be executed because there is not ' 'enough free space in the pool', 'E7114': 'Balancing cannot be executed because the device is in ' 'error state', 'E8000': 'Undefined command', 'E8001': 'Undefined parameter', 'E8002': 'Another user is performing an operation', 'E8003': 'The lock session ID cannot be obtained', 'E8004': 'The value cannot be specified under current user authority', 'E8005': 'The specified user account does not exist', 'E8006': 'Because there will be no user account that can configure ' 'user account or role, the specified operation cannot be done', 'E8007': 'Your password has expired. You must change your password and ' 'log in again', 'E8008': 'Password policy and Lockout policy cannot be enforced on ' 'a user account with the Software role', 'E8100': 'The syntax is incorrect', 'E8101': 'An unusable character is specified', 'E8102': 'The parameter is out of the allowed range', 'E8103': 'An unnecessary parameter is specified', 'E8104': 'The required parameter is not specified', 'E8105': 'The number of specified values is too many', 'E8106': 'The number of specified values is not enough', 'E8107': 'The number of specified characters is too many', 'E8108': 'The number of specified characters is not enough', 'E8109': 'The combination of the parameters or values is incorrect', 'E810A': 'A value that is not a multiple of 100GB is specified for ' 'the Extreme Cache capacity', 'E810B': 'The specified value does not match the current setting value', 'E810C': 'The specified value is not supported by the device model', 'E810D': 'No values are specified', 'E810E': 'The format of the value is incorrect', 'E810F': 'The password is incorrect', 'E8110': 'The file is incorrect', 'E8111': 'The update interval needs to be a multiple of 30 seconds', 'E8800': 'Unable to resolve destination address', 'E8801': 'The route addition failed. Check the network address of ' 'the destination and the source port', 'E8802': 'Cannot connect to the server', 'E8803': 'Login incorrect', 'E8804': 'The processing status of packet capture is invalid', 'E8805': 'Detected an error during FTP command establishment. Maybe an ' 'incorrect file path is the cause of the error', 'E8806': 'Detected an error during FTP command execution. Maybe ' 'the incorrect file name or permission settings are ' 'the cause of the error', 'E8807': 'Detect FTP Connection Failure', 'E8808': 'Reading data from the FTP server failed', 'E8809': 'Writing data to the FTP server failed', 'E8900': 'IP setting is required for at least one port', 'E8901': 'The master IP address is not configured', 'E8902': 'The specified allow IP or allow netmask is 0', 'E8903': 'Netmask is not configured', 'E8904': 'The same IP address cannot be specified', 'E8905': 'Master IP and slave IP addresses must be in ' 'the same network address', 'E8906': 'The master connect IP is not configured', 'E8907': 'The slave link local IP is not configured', 'E8908': 'The specified IPv6 prefix length is out of range', 'E8909': 'Allow IP address is in the same network address ' 'with the master IP address', 'E890A': 'Allow IP address is in the same network address ' 'with the master connect IP address', 'E890B': 'Same network address with other port\'s master IP address', 'E890C': 'Same network address with other port\'s allow IP address', 'E890D': 'Same network address with other port\'s connect IP address', 'E890E': 'Bad subnet mask for IP address', 'E890F': 'Bad prefix length for IP address', 'E8910': 'Invalid IP address', 'E8911': 'The specified IPv6 link local address is incorrect', 'E8912': 'The specified IPv6 global address is incorrect', 'E8913': 'The subnet mask setting is incorrect', 'E8914': 'The primary DNS IP address is not configured', 'E8915': 'The gateway setting is incorrect', 'E8916': 'The master link local IP is not configured', 'E8917': 'Gateway and master IP addresses must be in ' 'the same network address', 'E8918': 'Master and slave connect IP addresses must be in ' 'the same network address', 'E8919': 'Gateway and connect IP addresses must be in ' 'the same network address', 'E891A': 'Same network address with other port\'s DNS IP address', 'E891B': 'The specified address is broadcast address', 'E9000': 'The device model does not support the command', 'E9001': 'The command cannot be executed because the device is in ' '"Not Ready" status', 'E9002': 'The storage cluster license is not registered', 'E9003': 'The storage cluster license is already registered', 'E9004': 'The copy license and storage cluster license is not registered', 'E9005': 'The dedup license is not registered', 'E9006': 'The command cannot be executed because the device is not in ' '"Normal" status', 'E9007': 'The GS license is registered', 'E9008': 'The Advanced Copy license is not registered', 'E9009': 'The Non-disruptive Storage Migration license is not registered', 'E900A': 'The Non-disruptive Storage Migration license is ' 'already registered', 'E9200': 'The Extreme Cache function is not enabled', 'E9201': 'The Flexible Tier mode is enabled', 'E9202': 'The Thin Provisioning allocation mode is TPV balancing', 'E9203': 'Disk Patrol is disabled', 'E9204': 'The device contains pinned data', 'E9205': 'The command cannot be executed because the network setting is ' 'the factory default setting', 'E9206': 'The Extreme Cache function is enabled', 'E9207': 'The operation mode is not "Maintenance Mode"', 'E9208': 'SMI-S server is enabled', 'E9209': 'SMI-S server is disabled', 'E920A': 'SMI-S server startup or shutdown is in progress', 'E920B': 'The VVOL function is not enabled', 'E920C': 'The Extreme Cache Pool function is not enabled', 'E920D': 'The Extreme Cache function and Extreme Cache Pool function is ' 'not enabled', 'E920E': 'The encryption mode is disabled', 'E920F': 'It is necessary to disable the EXC or EXC Pool function before ' 'enable the EXC or EXC Pool function', 'E9210': 'Collecting performance data is already running', 'E9211': 'Collecting performance data has been started by Storage Cruiser', 'E9212': 'The Deduplication/Compression mode is not enabled', 'E9213': 'The NAS audit log is enabled', 'E9214': 'The NAS audit log is disabled', 'E9215': 'The Deduplication/Compression mode is enabled', 'E9217': 'Collecting performance data is not started', 'E9218': 'Performance data is being collected', 'E9219': 'The current default chunk size of the device is different from ' 'one or more existing Flexible Tier Pools', 'E9220': 'SSL certificate used for SMI-S HTTPS connection can be changed ' 'only when enabling SMI-S function', 'E9221': 'SSL certificate for Web GUI is not registered', 'E9222': 'The Veeam B&R storage integration function is not enabled', 'E9223': 'Thin provisioning is not enabled', 'E9224': 'One or more specified objects are used for Veeam B&R', 'E9225': 'One or more specified objects are not used for Veeam B&R', 'E9300': 'Competing with cold update of firmware in background process', 'E9301': 'Competing with hot update of firmware in background process', 'E9302': 'Competing with update of disk firmware in background process', 'E9303': 'Competing with diagnosing RAID groups', 'E9304': 'Competing with diagnosing Disks', 'E9305': 'Competing with quick formatting of volume in background process', 'E9306': 'Competing with changing Advanced Copy parameters in ' 'background process', 'E9307': 'Competing with allocating remote copy buffer in ' 'background process', 'E9308': 'Competing with preparing firmware update in background process', 'E9309': 'Competing with setting cache control in background process', 'E930A': 'Competing with reassigning RAID group controller in ' 'background process', 'E930B': 'Competing with initializing volume in background process', 'E930C': 'Competing with encrypting or decrypting volume in ' 'background process', 'E930D': 'Competing with registering RAID group in background process', 'E930E': 'Competing with deleting RAID group in background process', 'E930F': 'Competing with registering volume in background process', 'E9310': 'Competing with deleting volume in background process', 'E9311': 'Competing with registering global hot spare in ' 'background process', 'E9312': 'Competing with changing maintenance mode in background process', 'E9313': 'Competing with expanding RAID group in background process', 'E9314': 'Competing with collecting G-List information in ' 'background process', 'E9315': 'Competing with setting Eco-mode in background process', 'E9316': 'Competing with assigning Eco-mode schedule in ' 'background process', 'E9317': 'Competing with setting Eco-mode schedule in background process', 'E9318': 'Competing with setting date and time in background process', 'E9319': 'Competing with expanding volume in background process', 'E931A': 'Competing with deleting Advanced Copy session in ' 'background process', 'E931B': 'Competing with deleting Advanced Copy session in ' 'background process', 'E931C': 'Competing with storage migration in background process', 'E931D': 'Competing with storage migration in background process', 'E931E': 'Competing with deleting snap data volume in background process', 'E931F': 'Competing with changing Advanced Copy parameters in ' 'background process', 'E9320': 'Competing with searching target WWNs', 'E9321': 'Competing with collecting disk performance information', 'E9322': 'Competing with checking file of storage migration ' 'path information', 'E9323': 'Competing with checking file of storage migration ' 'path information', 'E9324': 'Competing with registering Thin Provisioning Pool in ' 'background process', 'E9325': 'Competing with deleting Thin Provisioning Pool in ' 'background process', 'E9326': 'Competing with formatting Thin Provisioning Pool in ' 'background process', 'E9327': 'Competing with registering Thin Provisioning Volume in ' 'background process', 'E9328': 'Competing with deleting Thin Provisioning Volume in ' 'background process', 'E9329': 'Competing with formatting Thin Provisioning Volume in ' 'background process', 'E932A': 'Competing with setting Thin Provisioning Pool parameters in ' 'background process', 'E932B': 'Competing with setting Thin Provisioning Volume parameters in ' 'background process', 'E932C': 'Competing with setting Thin Provisioning mode in ' 'background process', 'E932D': 'Competing with assigning Eco-mode schedule in ' 'background process', 'E932E': 'Competing with registering REC Disk Buffer Volume in ' 'background process', 'E932F': 'Competing with deleting REC Disk Buffer Volume in ' 'background process', 'E9330': 'Competing with inhibiting copy destination volume in ' 'background process', 'E9331': 'Competing with moving volume in background process', 'E9332': 'Competing with balancing Thin Provisioning Pool or ' 'Flexible Tier Pool data in background process', 'E9333': 'Competing with registering dedicated hot spare in ' 'background process', 'E9334': 'Competing with releasing dedicated hot spare in ' 'background process', 'E9335': 'Competing with collecting event information in ' 'background process', 'E9336': 'Competing with controlling advanced copy session', 'E9337': 'Competing with controlling advanced copy session', 'E9338': 'Competing with controlling advanced copy session', 'E9339': 'Competing with controlling advanced copy session', 'E933A': 'Competing with setting Flexible tier mode in background process', 'E933B': 'Competing with deleting Flexible Tier Pool in ' 'background process', 'E933C': 'Competing with formatting Flexible Tier Pool in ' 'background process', 'E933D': 'Competing with registering Flexible Tier Volume in ' 'background process', 'E933E': 'Competing with setting Flexible Tier Sub Pool priority in ' 'background process', 'E933F': 'Competing with setting Flexible Tier Pool parameters in ' 'background process', 'E9340': 'Flexible Tier Migration in process', 'E9341': 'Competing with setting cache size limit to volume in ' 'background process', 'E9342': 'Competing with setting Offloaded Data Transfer Mode in ' 'background process', 'E9343': 'Competing with setting Key management group ID in ' 'background process', 'E9344': 'Competing with changing Key in background process', 'E9345': 'NAS configuration process is in progress', 'E9346': 'Storage cluster license configuration process is in progress', 'E9347': 'TFO group configuration process is in progress', 'E9348': 'TFOV configuration process is in progress', 'E9349': 'TFO group activate process is in progress', 'E934A': 'TFO pair configuration process is in progress', 'E934B': 'VVOL mode setting process is in progress', 'E934D': 'System cache function setting process is in progress', 'E934E': 'Starting SSD sanitization process is in progress', 'E9380': 'The Storage migration is in progress', 'E9400': 'No memory', 'E9401': 'No message queue', 'E9402': 'No semaphore', 'E9403': 'CLI session limit reached', 'EA000': 'The CM status is not normal', 'EA001': 'The specifed CE does not exist', 'EA002': 'The specified CM does not exist', 'EA003': 'One or more CMs are not normal', 'EA004': 'One or more CEs are not normal', 'EA200': 'The CA port type is incorrect', 'EA201': 'The specified CA port does not exist', 'EA202': 'The relevant operation cannot be executed because all of ' 'the CAs are NAS CAs', 'EA203': 'Host port mode of the CA port is incorrect', 'EA204': 'The WWPN/WWNN has not been changed', 'EA205': 'The CA Port status is not normal', 'EA400': 'The number of maximum disk slots is exceeded', 'EA401': 'Cannot add Drive Enclosure any more', 'EA402': 'The Drive Enclosure type does not support', 'EA600': 'No PFM is installed in the device', 'EA601': 'A PFM is not installed in some of the CMs in the device', 'EA602': 'The PFM status is not normal', 'EA603': 'The number of PFMs is different between the CMs', 'EA604': 'The specified disk does not exist', 'EA605': 'The disk type is incorrect', 'EA606': 'The capacity of the specified disk is insufficient', 'EA607': 'The specified disk is not available as a member disk', 'EA608': 'One or more specified disks are installed in ' 'the CE different from the specified assigned CM', 'EA609': 'SED and non-SED cannot be specified at the same time', 'EA60A': 'The drive is being used', 'EA60B': 'The drive status is incorrect', 'EA60C': 'The specified PFM does not exist', 'EA60D': 'The specified PFMs are not available as Extreme Cache', 'EA800': 'The maintenance target is inconsistent status', 'EA801': 'Not available under current system status conditions', 'EB000': 'The specified Flexible Tier Sub Pool does not exist', 'EB001': 'One or more TPP or FTSP exists in the device', 'EB002': 'The Fast Recovery RAID group cannot be specified', 'EB003': 'The specified RAID group does not exist', 'EB004': 'The specified RAID group status is not normal', 'EB005': 'The specified RAID group are already used', 'EB006': 'The number of volumes exceeds the maximum number of ' 'registrations in the RAID group', 'EB007': 'The free capacity of the RAID group is insufficient', 'EB008': 'One or more VVOLs exist in the specified Flexible Tier Pool', 'EB009': 'The RAID group used for the Extreme Cache Pool ' 'cannot be specified', 'EB00A': 'The Extreme Cache Pool already exists for the specified CM', 'EB00B': 'The Extreme Cache Pool does not exist for the specified CM', 'EB00C': 'The specified disk is already used', 'EB00D': 'One or more Deduplication/Compression volumes exist ' 'in the specified pool', 'EB00E': 'Deduplication and/or Compression is not enabled on ' 'the specified pool', 'EB00F': 'Extreme Cache Pool exists', 'EB010': 'Not allowed to configure this RAID Level with the ' 'specified disks', 'EB011': 'The free capacity of the pool is insufficient', 'EB012': 'The pool status is not normal', 'EB013': 'The Flexible Tier Pool status is not normal', 'EB014': 'A volume for VVOL metadata exists in the specified pool', 'EB015': 'There are one or more Thin provisioning pools with ' 'Compression enabled', 'EB016': 'Encryption option cannot be used for Extreme Cache Pool ' 'composed of SED-SSDs', 'EB017': 'Eco-mode schedule is assigned to the specified pool', 'EB018': 'Deduplication and/or Compression is enabled on ' 'the specified pool', 'EB019': 'The specified RAID Group is in use for Thin Provisioning Pool ' 'or Flexible Tier Pool', 'EB01A': 'The specified RAID Group is in use for REC disk buffer', 'EB01B': 'The specified RAID Group is in use for Mainframe system ' '(DVCF mode is on)', 'EB01C': 'The specified RAID Group is in use for Mainframe system ' '(Mainframe volume exists)', 'EB01D': 'The type of volume that configures the specified RAID Group ' 'is inapplicable', 'EB01E': 'The disk kind of the RAID Groups must be the same', 'EB01F': 'The RAID level must be the same', 'EB020': 'The number of member disks must be the same', 'EB021': 'The stripe depth must be the same', 'EB022': 'Physical free capacity of the destination pool is insufficient', 'EB023': 'RAID group expansion is running', 'EB024': 'The total logical capacity of the pool volumes exceeds ' 'the maximum value', 'EB025': 'The specified Thin provisioning pool does not exist', 'EB026': 'The specified Flexible tier pool does not exist', 'EB027': 'Raid group\'s stripe depth is expanded', 'EB028': 'Encryption option cannot be used for RAID Group or ' 'Thin Provisioning Pool composed of SED', 'EB029': 'The specified Thin Provisioning Pool is in use for ' 'Flexible Tier Pool', 'EB02A': 'The specified RAID group is not in use for Flexible Tier Pool', 'EB02B': 'The specified Thin Provisioning Pool is not applicable for ' 'Deduplication or Compression', 'EB02C': 'The specified chunk size exceeds the current default chunk ' 'size of the device', 'EB02D': 'The specified PFM has already been used as Extreme Cache', 'EB02E': 'There are no PFMs which can be used as Extreme Cache', 'EB02F': 'One or more PFMs being currently used as Extreme Cache exist', 'EB030': 'The specified CE is not using Extreme Cache', 'EB031': 'There are no PFMs being used as Extreme Cache', 'EB032': 'There are one or more PFMs which cannot be used as ' 'Extreme Cache', 'EB033': 'Chunk size cannot be specified under the current ' 'maximum pool capacity', 'EB034': 'Neither Thin provisioning pool nor Flexible Tier Pool exists', 'EB300': 'The volume type is incorrect', 'EB301': 'The volume status is not normal', 'EB302': 'The type of drive that configures the RLU or the TPP that ' 'the volume belongs to is incorrect', 'EB303': 'The specified volume does not exist', 'EB304': 'An incorrect UID is specified', 'EB305': 'The cache size limit is set', 'EB306': 'The volume in Fast Recovery RAID group cannot be specified', 'EB307': 'One or more VVOLs exist in the device', 'EB308': 'The number of volumes exceeds the maximum number of ' 'registrations', 'EB309': 'The specified volume is being used as a VVOL', 'EB30A': 'The specified volume\'s data integrity is T10-DIF', 'EB30B': 'The specified volume is thick provisioning volume', 'EB30C': 'One or more Deduplication/Compression volumes exist', 'EB30D': 'Zero Reclamation is running', 'EB30E': 'The VVOL cannot be specified with the other resources', 'EB30F': 'Data migration is running', 'EB31A': 'Balancing process is running', 'EB31B': 'The specified volume has no error data', 'EB31C': 'The specified volume has too many error data', 'EB31D': 'The volume for VVOL metadata already exists', 'EB31E': 'The specified volume is being used as a volume for ' 'VVOL metadata', 'EB31F': 'The Deduplication/Compression System volume status is ' 'not normal', 'EB320': 'One or more NAS volumes exist', 'EB321': 'An encryption process is running at the specified volume', 'EB322': 'Volume formatting is running', 'EB323': 'New volume size must be equal or greater than the original one', 'EB324': 'The number of migration sessions exceeds the maximum value', 'EB325': 'Total migration capacity exceeds the maximum value', 'EB326': 'The destination RAID Group must be different from the one of ' 'the specified volume', 'EB327': 'The total capacity of Deduplication/Compression volumes in ' 'the specified pool must be equal or less than ten times ' 'the capacity of the Deduplication/Compression System volume', 'EB328': 'Since the specified volume is configured by SED, encryption ' 'option is not applicable', 'EB329': 'The size of the specified volume is not enough', 'EB32A': 'The specified size exceeds the maximum size under ' 'the current NAS configuration', 'EB32B': 'The concatenation count of the specified volume exceeds ' 'the maximum value', 'EB32D': 'The specified volume name is already registered', 'EB32E': 'The specified volume name is reserved keyword', 'EB32F': 'The specified volume is already encrypted', 'EB330': 'The specified volume is already decrypted', 'EB331': 'The specified Snap Data Pool Volume capacity is not ' 'a multiple of Snap Data Pool Volume Resolution', 'EB332': 'The specified volume is already registered', 'EB333': 'The total capacity of the Snap Data Pool Volume ' 'exceeds the maximum value', 'EB334': 'The resource, which can be used only in ' 'expand volume mode, exists', 'EB335': 'Advanced Copy session is active', 'EB336': 'Advanced Copy (ODX) session is active', 'EB337': 'Volume(s) is used in LUN mapping', 'EB338': 'Volume(s) is used in Storage Cluster', 'EB339': 'The Deduplication/Compression volume is being used', 'EB33A': 'Non-disruptive Storage Migration is in process', 'EB33B': 'External LU information cannot be deleted or does not ' 'exist for the specified volumes', 'EB33C': 'Data migration is not running for the specified volume(s)', 'EB33D': 'The migration status of the specified volume(s) is not normal', 'EB33E': 'Data synchronization cannot be stopped manually for ' 'the specified volume(s) because it is not running in ' 'manual-stop mode', 'EB33F': 'No target volumes to stop data synchronization', 'EB340': 'Compression is not applicable for the specified volume. ' 'For migration to the compression enabled pool, ' '"-data-reduction-disable yes" needs to be specified', 'EB341': 'The specified volume is used for Snapshot. TPP or FTRP needs to ' 'be specified for the destination', 'EB342': 'The specified operation is not applicable because compression ' 'is enabled for the specified Thin provisioning pool or volume', 'EB343': 'The specified volume is used for Data Container', 'EB344': 'No target volumes exist', 'EB345': 'One or more Data Container Volumes are not normal', 'EB346': 'The specified volume is not enabled for Compression', 'EB347': 'Migrating within the same pool is not applicable except for ' 'changing compression function of the specified volume', 'EB348': 'Only Data Container Volume can be specified', 'EB349': 'One or more target volumes are being used for LUN mapping', 'EB34A': 'Snapshot Volume for Veeam B&R cannot be created for ' 'the specified volume', 'EB34B': 'Snapshot cannot be created due to internal resource shortage', 'EB500': 'Number of iSNS server has reached the iSCSI CA port limit', 'EB501': 'The specified port belongs to a port group', 'EB502': 'The LUN group, which is set in specified host affinity and ' 'port, specify volume does not exist', 'EB503': 'Host Response resource does not exist', 'EB504': 'Host I/F resource limited', 'EB505': 'Host affinity mode is inconsistent', 'EB506': 'The host specified does not exist', 'EB507': 'There is no host affinity setting including the specified port ' 'and host', 'EB508': 'The specified port is affinity setting', 'EB509': 'The specified LUN group has already been used in ' 'the host affinity', 'EB50A': 'The specified LUN group has already been used in the TFO group', 'EB50B': 'The specified volume has already been used in the host affinity', 'EB50C': 'The specified volume has already been used in the TFO group', 'EB50D': 'The specified volume is already used in other TFO group', 'EB50E': 'Host I/F already registered', 'EB50F': 'Host Group resource does not exist', 'EB510': 'TFO pair does not exist in the volume of all of ' 'the LUN group that has been set affinity in the specified host ' 'and port specified', 'EB511': 'The specified host is already used in the host affinity that ' 'includes the LUN mask group', 'EB512': 'The specified host belongs to a host group', 'EB513': 'The specified LUN mask group has already been used in ' 'the host affinity', 'EB514': 'The LUN mask group which can be affinity setting does not exist', 'EB515': 'The source port is already used in the host affinity ' 'that includes the LUN mask group', 'EB516': 'The destination port is already used in the host affinity that ' 'includes the LUN mask group', 'EB517': 'Host number or LUN group number, which can be used only in ' 'expand host mode, exists', 'EB518': 'The number of hosts exceeds the maximum number of hosts which ' 'can be registered if expand host mode is disabled', 'EB519': 'The iSCSI hosts, which have the same iSCSI name but one of ' 'them has no IP address configuration, cannot be used for ' 'the same CA port in host affinity setting', 'EB51A': 'The iSCSI hosts, which have the same iSCSI name but one of ' 'them has no IP address configuration, cannot be used for ' 'the same host group', 'EB51B': 'The specified iSCSI Name cannot be used because it causes a ' 'conflict in host affinity setting at a CA port in which a ' 'host with the same iSCSI Name has already been used', 'EB51C': 'The specified iSCSI Name cannot be used because it causes a ' 'conflict in host group setting in which a host with the same ' 'iSCSI Name has already been used', 'EB51D': 'The LUN group cannot be used for Veeam B&R', 'EB900': 'REC path is not set', 'EB901': 'REC path is not normal', 'EB902': 'REC Buffer is mirror recovery status', 'EB903': 'CFL is canceled because REC session is not continuable state', 'EB904': 'REC path is set in this device', 'EB905': 'REC path using iSCSI interface exists', 'EB906': 'There is CA port whose port mode is CA/RA or RA', 'EB907': 'The resource, which can be used only in expand volume mode, ' 'exists', 'EB908': 'There is no REC path information connected to the specified ' 'remote storage', 'EB909': 'The specified RA path does not exist', 'EB90A': 'REC Line Speed cannot be changed since the Connection Type of ' 'the REC path connected to the specified remote storage is ' '"Direct"', 'EBD00': 'Does not meet a requirement for downgrading', 'EBD01': 'The specified firmware version is older than the ' 'current firmware version. If firmware downgrade is required, ' 'specify the "-cm-downgrade" option', 'EBD02': 'The controller firmware is being received from the REMCS center', 'EBD03': 'The specified generation is not in valid status', 'EBD04': 'The specified controller firmware is already registered', 'EBD05': 'The specified generation is already registered on the ' 'Flash memory', 'EBD06': 'Not available under current system status conditions', 'EBD07': 'The "hot-auto" application type cannot be executed in ' 'current configuration', 'EBD08': 'The "hot-manual" application type cannot be executed in ' 'current configuration', 'EBD09': 'One or more components have failed when applying the firmware', 'EBD0A': 'An internal process failed', 'EBD0B': 'The rebooting process has finished, but an error has been ' 'detected in the Master CM', 'EBD0C': 'The status of the Master CM is not normal. The rebooting ' 'process cannot be executed', 'EBD0D': 'The hot firmware application failed because the system is ' 'under heavy I/O load', 'EBD0E': 'The hot firmware application failed because the pinned data ' 'exists', 'EBD0F': 'The Data migration is in progress', 'EBD10': 'The hot firmware application cannot be executed under ' 'the current condition of the Advanced Copy function', 'EBD11': 'There is no redundant path available for accessing ' 'the external storage device(s)', 'EBD12': 'External storage access path redundancy error', 'EBD13': 'The specified firmware version is newer than or equal to ' 'the current firmware version', 'EBD14': 'An error has been detected. The controller firmware ' 'application has failed', 'EBD15': 'The controller firmware application has finished, ' 'but has failed for one or more components', 'EBD16': 'The status of the Master CM is not normal. The firmware ' 'application cannot be executed', 'EBD17': 'An error has been detected. The rebooting process has failed', 'EBD18': 'The rebooting process has finished, but has failed for ' 'one or more components', 'EBD19': 'The hot firmware application has been cancelled', 'EBD1A': 'The rebooting process has finished, but one or ' 'more access paths, by which external LUs are not accessible, ' 'have been detected. There is a possibility that ' 'an error occurs on the access paths or the external LUs', 'EBD1B': 'An error has been detected. The firmware application for ' 'the PFMs has failed', 'EBD1C': 'The firmware application for the PFMs has finished, but ' 'has failed for one or more PFMs', 'EBD1D': 'Switching firmware has failed', 'EBE00': 'The specified volume is being used as a NAS volume', 'EBE01': 'This operation is not applicable to the specified object', 'EBE02': 'The NAS function is not available', 'EBE03': 'The number of NAS-TPVs exceeds the maximum number of ' 'registrations', 'EBE04': 'The number of NAS-TPVs (Backup) exceeds the maximum number of ' 'registrations', 'EBE05': 'Number of NAS System Volume has reached the system limit', 'EBE06': 'Capacity of NAS System Volume has reached the system limit', 'EBE07': 'An error was detected in NAS Engine', 'EBE08': 'NAS system volume does not exist', 'EBE09': 'NAS system volume is not writable', 'EBE0A': 'The firmware does not support NAS', 'EBE0B': 'This operation is not applicable to the Unified Storage', 'EBE20': 'Specified NAS share does not exist', 'EBE21': 'The number of NAS share exceeds the maximum number of ' 'registrations', 'EBE22': 'Specified NAS share name already exists', 'EBE23': 'Insufficient NAS share resources', 'EBE24': '[-force] option is only used for the NAS Volume whose ' 'status is "Readying"', 'EBE25': 'R and RW cannot be set to the same user or group', 'EBE26': 'Specified host is not registered in the Allow NFS Hosts', 'EBE27': 'Specified NAS share does not support CIFS service', 'EBE28': 'Home directory function is already enabled', 'EBE29': 'The specified NAS share is used for home directory function', 'EBE30': 'Specified NAS interface does not exist', 'EBE31': 'The number of NAS interfaces exceeds the maximum ' 'number of registrations', 'EBE32': 'Another non-VLAN IP address has been registered with this port', 'EBE33': 'The specified IPv4 address is already registered', 'EBE34': 'The specified IPv6 link local address is already registered', 'EBE35': 'The specified IPv6 address is already registered', 'EBE36': 'No valid IP address exists', 'EBE37': 'The VLAN ID setting is incorrect', 'EBE38': 'The IPv4 address is incorrect', 'EBE39': 'The subnet mask setting is incorrect', 'EBE3A': 'The gateway address is incorrect', 'EBE3B': 'The IPv4 host address bits should be non-all-0 and non-all-1', 'EBE3C': 'The specified IPv6 link local address is incorrect', 'EBE3D': 'The specified IPv6 global address is incorrect', 'EBE3E': 'The IPv6 prefix length should be 3-128', 'EBE3F': 'The IPv6 gateway address should be same subnet or ' 'have other interface ID', 'EBE40': 'The specified IPv6 address is not a link local address or ' 'a global address', 'EBE41': 'The specified IP address already exists', 'EBE42': 'The primary DNS server information is not set', 'EBE43': 'Specified NAS interface is not assigned to port', 'EBE44': 'The same VLAN ID has been registered to this port', 'EBE45': 'The specified NAS interface is used by multi-path', 'EBE70': 'The specified port is the master port for the bonding', 'EBE71': 'The specified port is the member port for the bonding', 'EBE72': 'The specified port is not the master port for the bonding', 'EBE73': 'The specified port is not the member port for the bonding', 'EBE74': 'The specified port is on a different CM', 'EBE75': 'The number of member ports exceeds the maximum number of ' 'registrations', 'EBE76': 'Cannot delete the bond because the multi-path is enabled', 'EBE80': 'The specified port belong to the multi-path', 'EBE81': 'The specified port is not multi-path pair', 'EBE82': 'The specified port is installed in the same CM', 'EBE83': 'The IP address of the NAS interface under the multi-path ports ' 'has to have the same network address', 'EBE90': 'The server settings conflicted', 'EBE91': 'NAS AD/LDAP server setting is not complete. Some more ' 'parameters need to be specified', 'EBE92': 'Available NAS interface not exist', 'EBE93': 'One or more NAS AD/LDAP servers are registered', 'EBE94': 'The same local group cannot be set to both Primary and ' 'Secondary groups', 'EBE95': 'The specified local user name or ID is already registered', 'EBE96': 'The specified local group does not exist', 'EBE97': 'The specified local user does not exist', 'EBE98': 'The specified local group name or ID is already registered', 'EBE99': 'The specified local group is used as Primary group', 'EBE9A': 'One or more local users or groups are registered', 'EBE9B': 'The number of local users exceeds the maximum number of ' 'registrations', 'EBE9C': 'The number of local groups exceeds the maximum number of ' 'registrations', 'EBE9D': 'BUILTIN group can be used only for Secondary group', 'EBE9E': 'Specified group name is incorrect', 'EBE9F': 'LDAP server is not configured', 'EBEA0': 'The specified route is already registered', 'EBEA1': 'The specified route is not registered', 'EBEA2': 'The specified gateway cannot be accessed', 'EBEA3': 'The host address or the interface ID portion of the IP address ' 'should be zero', 'EBEA4': 'The destination address is the same as the interface address', 'EBEA5': 'The gateway address is the same as the interface address', 'EBEA6': 'The specified destination address is incorrect', 'EBEB0': 'The number of NAS snapshot volumes exceeds ' 'the maximum number of registrations', 'EBEB1': 'The specified NAS snapshot configurations not exist', 'EBEB2': 'The NAS snapshot configurations is set to specified volume', 'EBEB3': 'The Snap Data Pool Volume which is match ' 'the encryption status of the specified volume, does not exist', 'EBEB4': 'The NAS snapshot configurations is the manual collecting mode', 'EBEC0': 'The number of NAS quota settings exceeds the maximum number of ' 'registrations', 'EBEC1': 'A NAS quota setting already exists', 'EBEC2': 'Warning value larger than limit value is specified', 'EBEC3': 'Specified NAS quota setting does not exist', 'EBEC4': 'Deletion of the quota setting associated with ' 'specified volume failed', 'EBEC5': 'All of quota setting failed.', 'EBEC6': 'Deletion of the quota setting associated with ' 'specified NAS share failed', 'EBEE0': 'The specified NAS share has already been configured for ' 'FTP service', 'EBEE1': 'The number of NAS share folders for FTP service exceeds ' 'the allowable maximum', 'EBEE2': 'The specified NAS share has not been configured for FTP service', 'EBF00': 'The number of registered TFO group has exceeded maximum ' 'in this device', 'EBF01': 'TFO group is exist', 'EBF02': 'TFO group does not exist', 'EBF03': 'The specified TFO group name is already registered', 'EBF04': 'The specified CA Port is not in the specified TFO group', 'EBF05': 'The specified TFO group is primary', 'EBF06': 'The specified port is not TFO port', 'EBF07': 'The specified port is already TFO pair port configured', 'EBF08': 'Different types of CA ports cannot be used in TFO group', 'EBF09': 'The maximum TFO capacity cannot be decreased when ' 'TFO pair exists', 'EBF0A': 'The specified volume is not in process of TFO pair', 'EBF0B': 'The TFO group is primary', 'EBF0C': 'The specified volume is in process of TFO pair', 'EBF0D': 'There is a volume what is in process of TFO pair', 'EBF0E': 'The specified volume is configured TFOV', 'EBF10': 'Change of size was specified volume is TFOV', 'EBF11': 'The parameter needs storage cluster license', 'EBF12': 'The destination port belongs to TFO group', 'EBF13': 'The source port belongs to TFO group', 'EBF15': 'The specified port has been changed WWPN/WWNN', 'EBF16': 'TFO group is set to manual failover', 'EC000': 'VVOL Fault : ActivateProviderFailed', 'EC001': 'VVOL Fault : InactiveProvider', 'EC002': 'VVOL Fault : IncompatibleVolume', 'EC003': 'VVOL Fault : IncorrectSite', 'EC004': 'VVOL Fault : InvalidArgument', 'EC005': 'VVOL Fault : InvalidCertificate', 'EC006': 'VVOL Fault : InvalidLogin', 'EC007': 'VVOL Fault : InvalidProfile', 'EC008': 'VVOL Fault : InvalidSession', 'EC009': 'VVOL Fault : InvalidStatisticsContext', 'EC00A': 'The specified VVOL copy session does not exist', 'EC010': 'VVOL Fault : LostAlarm', 'EC011': 'VVOL Fault : LostEvent', 'EC012': 'VVOL Fault : NotCancellable', 'EC013': 'VVOL Fault : NotFound', 'EC014': 'VVOL Fault : NotImplemented', 'EC015': 'VVOL Fault : NotSupported', 'EC016': 'VVOL Fault : OutOfResource', 'EC017': 'VVOL Fault : PermissionDenied', 'EC018': 'VVOL Fault : ResourceInUse', 'EC020': 'VVOL Fault : StorageFault', 'EC021': 'VVOL Fault : Timeout', 'EC022': 'VVOL Fault : TooMany', 'EC100': 'One or more external drives exist', 'EC101': 'The specified external storage devices do not exist', 'EC102': 'The number of external drives exceeds the maximum number of ' 'registrations', 'EC103': 'External LUs do not exist', 'EC104': 'External drives do not exist', 'EC105': 'External drives are already used', 'EC106': 'The status of external drives is not normal', 'EC107': 'The specified external RAID group does not exist', 'EC108': 'External RAID groups are already used', 'EC109': 'The specified external RAID group is not in "Broken" state', 'EC10A': 'The number of external RAID groups exceeds ' 'the maximum number of registrations', 'EC10B': 'The status of external RAID groups is not normal', 'EC10C': 'The specified external RAID group name has already been used', 'EC10D': 'External LUs are not accessible', 'ED000': 'Send failed internal command', 'ED001': 'Receive failed internal command response', 'ED002': 'Internal command retry timeout', 'ED003': 'Internal command progress retry timeout', 'ED180': 'Flexible Tier Migration is running', 'ED181': 'Quick UNMAP is being performed', 'ED182': 'The cache LUN size limit is being set', 'ED183': 'Because EC is being executed, the processing was discontinued', 'ED184': 'Because OPC is being executed, the processing was discontinued', 'ED185': 'Because REC is being executed, the processing was discontinued', 'ED186': 'Offloaded Data Transfer is being performed', 'ED187': 'The REC disk buffer volume is associated', 'ED190': 'The internal resources are insufficient', 'ED191': 'The internal resources are insufficient', 'ED192': 'The internal resources are insufficient', 'ED193': 'A non-master-CM component received a command', 'ED194': 'The internal resources are insufficient', 'ED195': 'Internal processes are running. Wait for a while and try again', 'ED196': 'The internal resources are insufficient', 'ED197': 'Number of the processing request is reached the limit', 'ED198': 'Process is timeout', 'ED199': 'The process terminated with an error because pinned ' 'data existed', 'ED19A': 'The key management server responded with an error', 'ED19B': 'An error occurred during communication with the key ' 'management server', 'ED19C': 'The key management server contains no keys that can be changed', 'ED19F': 'The command process is being canceled', 'ED1A0': 'Another process is running', 'ED1A1': 'EC is running', 'ED1A2': 'OPC is running', 'ED1A3': 'REC is running', 'ED1A4': 'ROPC is running', 'ED1A5': 'CCP is running', 'ED1A6': 'Quick Format is running', 'ED1A7': 'Rebuild operation is running', 'ED1A8': 'There is no redundancy', 'ED1A9': 'A DE is being rebooted', 'ED1AA': 'CFL is running', 'ED1AB': 'CFD is running', 'ED1AC': 'Operations associated with Log file, Panic Dump or ' 'Event information are being processed', 'ED1AD': 'The hot spare is in use', 'ED1AE': 'Upgrade Dirty Recovery is running', 'ED1AF': 'Degrade Dirty Recovery is running', 'ED1B0': 'Remote Maintenance is running', 'ED1B1': 'Command Lock is being processed', 'ED1B2': 'The configuration is being changed', 'ED1B3': 'Bind In Cache (Extent) is set', 'ED1B4': 'Data Migration is running', 'ED1B5': 'Logical Device Expansion is running', 'ED1B6': 'Write Through is running', 'ED1B7': 'An encryption process or a decryption process is running', 'ED1B8': 'Bind In Cache is set', 'ED1B9': 'Some of the spinup or spindown operations failed', 'ED1BA': 'Eco-mode schedule suspension timeout occurred', 'ED1BB': 'All of the spinup and spindown operations failed', 'ED1BC': 'There is an encryption volume', 'ED1BD': 'Operation Mode is not in "Maintenance Mode"', 'ED1BE': 'A Storage Migration path is set or Storage Migration is running', 'ED1BF': 'Extended Copy is running', 'ED1C0': 'An error occurred in the module', 'ED1C1': 'An error occurred in the CM', 'ED1C2': 'An error occurred in the CA', 'ED1C3': 'An error occurred in the BRT', 'ED1C4': 'An error occurred in the SVC', 'ED1C5': 'An error occurred in the RSP', 'ED1C6': 'An error occurred in the FRT', 'ED1C7': 'An error occurred in the PBC', 'ED1C8': 'An error occurred in the battery', 'ED1C9': 'An error occurred in the DE', 'ED1CA': 'An error occurred in the DE path', 'ED1CB': 'An error occurred in the user drive', 'ED1CC': 'An error occurred in the system drive', 'ED1CD': 'An error occurred in the Flash-ROM', 'ED1CE': 'An error occurred in the FE Expander', 'ED1CF': 'An error occurred in the BE Expander', 'ED1D0': 'An error occurred in the EXP', 'ED1D1': 'An error occurred in the drive path', 'ED1D2': 'An error occurred in the drive', 'ED1D3': 'Unable to retrieve data from NAS Engine. ' 'Please check the status of the NAS Engine', 'ED1E0': 'Power-on has not been performed yet or power-off is being ' 'performed', 'ED1E1': 'Zero is specified for the module ID in the transmitter', 'ED1E2': 'The lock has been acquired', 'ED1E3': 'Locking has not been performed', 'ED1E4': 'An unsupported command was specified', 'ED1E5': 'The parameter length is incorrect', 'ED1E6': 'The specified parameter is incorrect.', 'ED1E7': 'The data length is incorrect', 'ED1E8': 'The specified data is incorrect', 'ED1E9': 'The execution of the command is requested while this command is ' 'already being performed', 'ED1EA': 'The target object cannot be operated', 'ED1EB': 'An internal process failed', 'ED1EC': 'Because Storage Cluster is being executed, the processing was ' 'discontinued', 'ED1ED': 'The Flexible Tier Pool shrinking is in process', 'ED200': 'The user name or password is incorrect', 'ED201': 'The user name is duplicated', 'ED202': 'The number of registered users has reached the limit', 'ED203': 'This user has already registered the User Key. The process was ' 'aborted', 'ED204': 'The specified role name is not registered', 'ED205': 'An internal process failed', 'ED206': 'The login request exceeds the allowable maximum number of ' 'login process', 'ED207': 'The specified process cannot be performed because a process ' 'that the Virtual Disk Service issued is already running', 'ED208': 'The specified RAID group is not in "Available" state', 'ED209': 'An error has occurred in a communication path', 'ED20A': 'No writable generation exists', 'ED20B': 'The source volume of migration is being deleted by internal ' 'process after completed migration', 'ED20C': 'The cache memory size is insufficient for Bind-in-Cache', 'ED20D': 'No response is received', 'ED20E': 'iSNS server is not set', 'ED20F': 'The installation type information for the DE that is ' 'to be added is insufficient', 'ED210': 'Maintenance mode start or maintenance mode end is being ' 'executed by operation', 'ED211': 'The license information is being updated because ' 'the trial license expired', 'ED212': 'The Bitmap is being acquired', 'ED213': 'The storage is not in "Not Ready" state', 'ED214': 'The Not Ready factor is not Machine Down Recovery failed', 'ED215': '(if processing mode is 0x00) CM with the following status ' 'exists among defined CM: Status other than Online - This CM is ' 'not included in the Cyclic composition', 'ED216': 'The device is a busy state. Please wait for a while', 'ED217': 'Storage Cruiser is being used. The process was aborted', 'ED218': 'Command executed from except Storage Cruiser. The process was ' 'aborted', 'ED219': 'Reading all BUDs failed', 'ED21A': 'No BUDs are accessible', 'ED21B': 'Writing all BUDs failed', 'ED21C': 'All of the BUD capacity is used', 'ED21D': 'A timeout occurred during firmware registration', 'ED220': 'The disk where the archive that tries to be registered ' 'can be applied doesn\'t exist in the device', 'ED221': 'The archive that tries to be registered is unsupported firmware', 'ED222': 'Reading the history data failed', 'ED223': 'Reading the composition data failed', 'ED224': 'Writing the history data failed', 'ED225': 'Keeping the composition data failed', 'ED226': 'Keeping the newest composition data failed', 'ED227': 'The configuration is internally being updated', 'ED228': 'Reading from a BUD failed', 'ED229': 'The BUD doesn\'t exist', 'ED22A': 'The target module does not exist', 'ED22B': 'The process cannot be performed because another function is ' 'being executed', 'ED22C': 'The revision that changes the Advanced Copy version cannot be ' 'performed because an EC, an OPC, or a REC is running', 'ED22D': 'The execution was canceled because an error occurred during ' 'communication with the CM', 'ED22E': 'The firmware application or EC switch has not executed', 'ED22F': 'The free capacity of the Flexible Tier Pool is insufficient', 'ED230': 'The EC switching operation that changes ' 'the Advanced Copy version is attempted while an EC, an OPC, ' 'or a REC is running', 'ED231': 'Distribution of the control domain failed', 'ED232': 'The storage is not in "Normal" state', 'ED233': 'The version is not normal', 'ED234': 'A remote copy is running', 'ED235': 'Reclamation of Thin Provisioning Volume is in progress', 'ED236': 'Not all batteries are in "Full Charge" state', 'ED237': 'Controller Firmware is not registered', 'ED238': 'CFL is not executed yet', 'ED239': 'Because the numbers of connections to the specified device ' 'reached the maximum number, it is not possible to connect it. ' 'Please wait for a while', 'ED23A': 'The firmware distribution function between devices of ' 'the specified device doesn\'t have interchangeability with ' 'this device', 'ED23B': 'The firmware types do not match', 'ED23C': 'The error occurred by the communication with ' 'the specified device', 'ED23D': 'Powering off is being performed', 'ED23E': 'CFL is running', 'ED23F': 'The firmware is being downloaded', 'ED240': 'Gateway is not set though the specified device is set ' 'outside the subnet', 'ED241': 'Duplicated IP address between the specified device and ' 'used LAN port', 'ED242': 'The specified device is in the subnet of unused LAN port', 'ED243': 'Duplicated IP address between the specified device and ' 'allowed IP of unused LAN port', 'ED244': 'Group IDs of the specified storage and ' 'the current storage are different', 'ED245': 'IP address of DNS is not valid', 'ED246': 'Acceptable IP addresses from other subnet have been specified ' 'but Gateway has not been set', 'ED247': 'The port specified for used LAN port of a remote support is ' 'not set', 'ED248': 'Gateway is not set though DNS is set outside of the subnet', 'ED249': 'Gateway is not set though the PROXY server is set outside of ' 'the subnet', 'ED24A': 'Gateway is not set though the HTTP server is set outside of ' 'the subnet', 'ED24B': 'Gateway is not set though the SMTP server is set outside of ' 'the subnet', 'ED24C': 'Gateway is not set though the POP server is set outside of ' 'the subnet', 'ED24D': 'Gateway is not set though the NTP server is set outside of ' 'the subnet', 'ED24E': 'DNS server to resolve server name is not specified', 'ED24F': 'Please export the log, and contact the person in charge of ' 'maintenance', 'ED250': 'The name resolution of the PROXY server failed', 'ED251': 'The name resolution of the HTTP server failed', 'ED252': 'The name resolution of the SMTP server failed', 'ED253': 'The name resolution of the POP server failed', 'ED254': 'The name resolution of the NTP server failed', 'ED255': 'Even though the command terminated successfully, ' 'the name resolution of the primary DNS failed. The secondary ' 'DNS is used instead', 'ED256': 'The name resolution succeeded by the IPv6 Primary DNS server', 'ED257': 'The name resolution succeeded by the IPv6 Secondary DNS server', 'ED258': 'The name resolution succeeded by the IPv4 Primary DNS server', 'ED259': 'The name resolution succeeded by the IPv4 Secondary DNS server', 'ED25A': 'Login to the POP server is impossible because the user name or ' 'password is incorrect', 'ED25B': 'Error occurred in authentication with AUTH', 'ED25C': 'Error occurred in communication with SMTP server', 'ED25D': 'Error occurred in communication with HTTP server', 'ED25E': 'Error occurred in communication with PROXY server', 'ED25F': 'Error occurred in communication with POP server', 'ED260': 'Time out occurred in communication with SMTP server', 'ED261': 'Time out occurred in communication with HTTP server', 'ED262': 'Time out occurred in communication with PROXY server', 'ED263': 'Time out occurred in communication with POP server', 'ED264': 'Error occurred in sending data to SMTP server', 'ED265': 'Error occurred in sending data to HTTP server', 'ED266': 'Error occurred in sending data to PROXY server', 'ED267': 'Error occurred in sending data to POP server', 'ED268': 'Error occurred in receiving data from SMTP server', 'ED269': 'Error occurred in receiving data from HTTP server', 'ED26A': 'Error occurred in receiving data from PROXY server', 'ED26B': 'Error occurred in receiving data from POP server', 'ED26C': 'Duplicated IP address between DNS server and used LAN port', 'ED26D': 'The IP address for DNS server is in the subnet of unused ' 'LAN port', 'ED26E': 'Duplicated IP address between DNS server and allowed IP of ' 'unused LAN port', 'ED26F': 'Duplicated IP address between PROXY server and used LAN port', 'ED270': 'Duplicated IP address between HTTP server and used LAN port', 'ED271': 'Duplicated IP address between SMTP server and used LAN port', 'ED272': 'Duplicated IP address between POP server and used LAN port', 'ED273': 'Duplicated IP address between NTP server and used LAN port', 'ED274': 'The IP address for the PROXY server is in the subnet of ' 'unused LAN port', 'ED275': 'The IP address for the HTTP server is in the subnet of ' 'unused LAN port', 'ED276': 'The IP address for the SMTP server is in the subnet of ' 'unused LAN port', 'ED277': 'The IP address for the POP server is in the subnet of ' 'unused LAN port', 'ED278': 'The IP address for the NTP server is in the subnet of ' 'unused LAN port', 'ED279': 'Duplicated IP address between PROXY server and ' 'allowed IP of unused LAN port', 'ED27A': 'The Flexible Tier Pool is in "Broken" state', 'ED27B': 'The ODX Buffer volume exists.', 'ED27C': 'The Flexible Tier Pool balancing is in process', 'ED27D': 'Online Storage Migration is in process', 'ED27E': 'Freeing up space in the Flexible Tier Pool is in process', 'ED27F': 'The last RAID group in the Flexible Tier Pool cannot be deleted', 'ED280': 'The RAID group is being deleted by internal process after ' 'Flexible Tier Pool shrinking', 'ED281': 'Duplicated IP address between HTTP server and allowed IP of ' 'unused LAN port', 'ED282': 'Duplicated IP address between SMTP server and allowed IP of ' 'unused LAN port', 'ED283': 'Duplicated IP address between POP server and allowed IP of ' 'unused LAN port', 'ED284': 'Duplicated IP address between NTP server and allowed IP of ' 'unused LAN port', 'ED285': 'The Flexible Tier Pool shrinking is in process', 'ED286': 'Failed to start SSD sanitization', 'ED287': 'The Flexible Tier Pool shrinking is in process', 'ED288': 'The Flexible Tier Pool shrinking is not in process', 'ED289': 'The password cannot be set. (Minimum password age ' 'policy violation', 'ED28A': 'The device is not registered', 'ED28B': 'The password cannot be set. (Password history policy violation', 'ED28C': 'No BUDs are available', 'ED28D': 'The password cannot be set. (Minimum password length ' 'policy violation', 'ED28E': 'The remote support center is busy', 'ED28F': 'The network information is being set', 'ED290': 'No controller firmware can be downloaded', 'ED291': 'The information of the device is being sent again ' 'because outdated information is registered in the remote ' 'support center. Wait approximately ten minutes and try again', 'ED292': 'An error occurred during HTTP communication', 'ED293': 'An error occurred during SMTP communication', 'ED294': 'A communication error occurred', 'ED295': 'No log files exist', 'ED296': 'The specified SLU does not exist', 'ED297': 'Data cannot be obtained because of a cache miss', 'ED298': 'The cache data cannot be obtained because the specified mirror ' 'cache does not exist', 'ED299': 'The cache data cannot be obtained because the cache of ' 'the drive that is specified contains dirty data', 'ED29A': 'Even though the CCHH mode is specified, the relevant volume is ' 'not a Mainframe volume or a MVV volume', 'ED29B': 'Specified Head Number is invalid', 'ED29C': 'Specified CCHH or SLBA is out of range', 'ED29D': 'The LU type of the specified SLU is TPPC, FTV, or TMP FTV', 'ED29E': 'The storage is in "Machine Down" state', 'ED29F': 'Status of target RAID group is not Broken', 'ED2A0': 'The access path to the specified RAID group is not normal', 'ED2A1': 'There is no access path to the target RAID group', 'ED2A2': 'The password cannot be set. (Password complexity policy ' 'violation', 'ED2A3': 'Invalid firmware file', 'ED2A4': 'The specified Role name has already been used', 'ED2A5': 'The number of roles has reached the maximum number of ' 'registrations', 'ED2A6': 'Deletion of a role that is assigned to a user is attempted', 'ED2A8': 'The specified Snap Data Pool Volume does not exist', 'ED2A9': 'The Copy Bitmap is insufficient', 'ED2AA': 'Processing was interrupted because it reached max copy session ' 'count or copy function is not enable', 'ED2AB': 'The specified volume is in process of copy session or ' 'RAID Migration', 'ED2AC': 'An invalid LU is specified', 'ED2AD': 'Because specified session is not the oldest one, ' 'the processing was not performed', 'ED2AE': 'The specified volume is being initialized', 'ED2AF': 'The encryption settings of the copy source and the copy ' 'destination are different', 'ED2B0': 'The drive motor is stopped for either the copy source or ' 'the copy destination due to an Eco-mode schedule', 'ED2B1': 'The specified destination volume is being used by ' 'another session', 'ED2B2': 'The Thin Provisioning function is disabled', 'ED2B3': 'Slave CM: Execution was discontinued for the other ' 'command accepted', 'ED2B4': 'Slave CM: Error occurred in receiving data from Master CM', 'ED2B5': 'Master CM: Error occurred in sending data from Slave CM', 'ED2B6': 'Master CM: Error occurred in receiving data from Slave CM', 'ED2B7': 'Bind-in-Cache Memory Size has already been set. ' 'Cache Parameters cannot be changed', 'ED2B8': 'The specified resource number exceeds the maximum value for ' 'the allowed range', 'ED2B9': 'Incorrect parameter combination', 'ED2BA': 'The specified license key is incorrect', 'ED2BB': 'The specified User Public Key file is not correct', 'ED2BC': 'The specified SSL Server Key file does not match the SSL Server ' 'Certificate file', 'ED2BD': 'No session is running', 'ED2BE': 'Access to the BUD is being suppressed', 'ED2BF': 'The pool capacity that can be created in the device exceeds ' 'the maximum pool capacity', 'ED2C0': 'The number of unused disks is insufficient', 'ED2C1': 'RLU/DLU/SLU are insufficient', 'ED2C2': 'The Flexible Tier function is disabled', 'ED2C6': 'The SSL/KMIP certificate file is not normal', 'ED2C7': 'The process has failed. It failed in some CA port(s)', 'ED2C8': 'The process has failed. It failed in all CA ports', 'ED2C9': 'The specified TPPE ID does not exist', 'ED2CA': 'The trial license key is incorrect', 'ED2CB': 'The trial license key has reached the registration ' 'limit number of times', 'ED2CC': 'Competing with AIS connect operation in background process', 'ED2CD': 'Competing with AIS connect send log operation in ' 'background process', 'ED2CE': 'Volume Type which is the Migration destination is different', 'ED2CF': 'Another Deduplication/Compression check already in progress', 'ED2D0': 'Displaying Snap OPC restore size is not supported', 'ED2D1': 'Recovery process is running. Wait for a while and try again', 'ED2D2': 'The installed memory is insufficient', 'ED2D3': 'The VVOL function is not disabled', 'ED2D4': 'Drives are not installed on the required slots for using the ' 'specified maximum pool capacity', 'ED2D5': 'The total volume capacity which can be created or expanded by ' 'one operation is up to 2PB', 'ED2D6': 'There are one or more PFMs which can be downgraded', 'ED500': 'An error occurred in the Deduplication/Compression Process', 'ED501': 'Master link local IP conflicted', 'ED502': 'Slave link local IP conflicted', 'ED503': 'Global/gateway IP cannot be obtained', 'ED504': 'Duplication check of link local IP failed', 'ED505': 'Prefix length is incorrect', 'ED506': 'The usable capacity of the Deduplication/Compression Map volume ' 'is insufficient temporarily. Please wait for a while and retry', 'ED507': 'The system is in high-load state. Please wait for a while', 'ED508': 'The specified external LU has already been registered', 'ED509': 'The access path of the external storage device is not normal', 'ED50A': 'The number of drives that is used exceeds the maximum number', 'ED50B': 'The target mapping table number exceeds the maximum number in ' 'the allowed range', 'ED50C': 'The target OLU already exists in the same mapping table', 'ED50D': 'Incorrect parameter combination', 'ED50E': 'The specified host number exceeds the maximum number in ' 'the allowed range', 'ED50F': 'The WWN that is to be registered is duplicated', 'ED510': 'The specified external RAID group cannot be recovered', 'ED511': 'CA port is overlapping in group', 'ED512': 'The external LU information is not consistent. Please ' 'refer to ETERNUS CLI User\'s Guide for more details', 'ED513': 'The specified LCU number exceeds the maximum number in ' 'the allowed range', 'ED514': 'The specified host response number exceeds the maximum number ' 'in the allowed range', 'ED515': 'The external storage device responded with an error', 'ED516': 'An error occurred in accessing the external storage device', 'ED517': 'A copy session is running', 'ED518': 'The connected device does not support this function', 'ED519': 'The forwarding interval cannot be specified when the ' 'ETERNUS6000 is connected', 'ED51A': 'The buffer size exceeds the maximum size for the device', 'ED51B': 'REC Buffer has already been configured. The process was aborted', 'ED51C': 'The storage is in "Not Ready" state. The process was aborted', 'ED51D': 'The REC disk buffer contains some data', 'ED51E': 'Some REC Consistency sessions are not in Suspend status', 'ED51F': 'There is no Pinned Data or Bad Data in the specified volume', 'ED520': 'The specified volume has too many Pinned Data or ' 'Bad Data for checking', 'ED523': 'The number of migration sessions has reached ' 'the maximum number for operations in the device', 'ED524': 'The migration source LUN is being used by ' 'another migration process', 'ED525': 'The migration source LUN is being used by another copy session', 'ED526': 'All of the internal resources have already been used', 'ED527': 'The status of the volume in the migration source or ' 'the migration destination is not normal', 'ED528': 'Migration session(s) are not running for ' 'the specified volume(s)', 'ED529': 'Bind-in-Cache is set for the specified OLU', 'ED52A': 'The migration capacity exceeds the maximum logical ' 'capacity that can be migrated', 'ED52B': 'There is not enough free space in the specified destination ' 'pool', 'ED52C': 'The total capacity of pool is not enough in the storage system. ' 'The process was aborted', 'ED52D': 'There are one or more volumes whose migration status is ' 'not normal', 'ED52F': 'Enough work capacity for Balancing Thin Provisioning Volume or ' 'Balancing Flexible Tier Pool does not exist. This function ' 'cannot be executed', 'ED531': 'The necessary LU resources are insufficient', 'ED53A': 'Communication to other device is failure', 'ED53B': 'TFO group status is inconsistent', 'ED53C': 'TFO group phase is inconsistent', 'ED53D': 'The specified TFO group has no volume', 'ED53E': 'Capacity of volume differs with in the secondary and primary', 'ED53F': 'The volume has already been used in the TFO group', 'ED540': 'Firmware of the other storage does not support ' 'the Storage Cluster', 'ED541': 'The specified type of TFO group is already registered', 'ED542': 'Box ID is inconsistent', 'ED543': 'The TFO group is inconsistent of pair port configuration ' 'between primary and secondary', 'ED544': 'Volume that cannot be used in TFO pair port exists', 'ED545': 'Failover mode or Split mode does not match between ' 'the secondary and primary', 'ED546': 'Copy session exists in the volume', 'ED548': 'Volume UID differs with in the secondary and primary', 'ED549': 'Cannot delete the specified TFO group because the specified ' 'port WWN mode is incorrect', 'ED54A': 'Volume of primary paired with specified volume is not exist', 'ED54B': 'TFO group activation was specified for incorrect device', 'ED54C': 'Port of primary paired is not affinity setting', 'ED54D': 'Storage Cluster data transfer feature is disabled in all RA ' 'ports constituting the copy path', 'ED54E': 'TFO pair is active', 'ED54F': 'Incorrect TFO group condition', 'ED550': 'The volume cannot be set the copy', 'ED551': 'There is "Bad Sector" in the copy source volume', 'ED552': 'The number of copy sessions exceeds the allowable maximum ' 'copy sessions for this storage', 'ED553': 'The number of copy sessions exceeds the allowable maximum ' 'copy sessions for each copy source volume', 'ED554': 'The number of copy sessions exceeds the allowable maximum ' 'copy sessions for each copy destination volume', 'ED555': 'Firmware update is in progress. The specified operation ' 'cannot be done', 'ED556': 'VVOL session is active', 'ED557': 'The free capacity of the pool is insufficient', 'ED558': 'Process to free up space in the TPP from a host is running', 'ED700': 'The free capacity of the NAS volumes is insufficient', 'ED701': 'The free capacity of the NAS system volumes is insufficient', 'ED702': 'Filesystem check is required', 'ED703': 'Full filesystem check is required', 'ED704': 'The mounting status of the NAS file system is incorrect', 'ED705': 'Maintenance of the filesystem is required', 'ED706': 'DNS lookup failure', 'ED707': 'The VLAN setting for the NAS is incorrect', 'ED708': 'The NAS bonding setting is incorrect', 'ED709': 'The network setting for the NAS is incorrect', 'ED70A': 'An I/O error occurs in the NAS system', 'ED70B': 'The authentication process via the authentication server failed', 'ED70C': 'Updating file system version is required', 'ED70D': 'NAS interface failover is currently active', 'ED70E': 'Updating file system version is required', 'ED710': 'An internal error occurs in the NAS system', 'ED711 ': 'NAS internal error', 'ED712 ': 'NAS internal error', 'ED713 ': 'NAS internal error', 'ED714 ': 'NAS internal error', 'ED715 ': 'NAS internal error', 'ED716 ': 'NAS internal error', 'ED717 ': 'NAS internal error', 'ED718 ': 'NAS internal error', 'ED719 ': 'NAS internal error', 'ED71A ': 'NAS internal error', 'ED71B ': 'NAS internal error', 'ED71C ': 'NAS internal error', 'ED71D ': 'NAS internal error', 'ED71E ': 'NAS internal error', 'ED71F ': 'NAS internal error', 'ED720': 'Filesystem check is already running', 'ED721': 'Invalid operation', 'ED722': 'NAS engine is not started', 'ED723': 'The Volume not mounted', 'ED724': 'Failed to connect to the other CM', 'ED725': 'The NAS Snapshot is currently busy', 'ED726': 'The free capacity of the storage pool is insufficient', 'ED727': 'Domain join error', 'ED728': 'Server connection error', 'ED729': 'Clock skew too great', 'ED72A': 'Improper user or group', 'ED72B': 'User or group does not exists', 'ED72C': 'Improper allow host address', 'ED72D': 'Authority error', 'ED72E': 'Filesystem is being accessed', 'ED72F': 'NAS quota setting failed partially', 'ED730': 'LDAPS certificate is not registered', 'ED731': 'LDAPS certificate is invalid', 'ED732': 'The specified domain name is incorrect', 'ED733': 'The shared folder is not empty. Before deleting ' 'the share folder, delete all files/folders inside the folder. ' 'Please refer to "clear nas-data"', 'ED734': 'NAS data deletion process is running', 'ED735': 'NAS extension system volume does not exist', 'ED736': 'The snap data volume is being used', 'ED737': 'Consistency check of NAS extension system volume is in progress', 'ED738': 'NAS extension system volume is not normal', 'ED739': 'One or more clients have connected to this shared folder. ' 'Please disconnect it first.', 'ED73A': 'Improper path', 'ED73B': 'Path does not exist', 'ED73C': 'Packet capture is in progress at the specified NAS interface', 'ED73D': 'Specified user name is incorrect', 'ED73E': 'FTP connection session exists', 'ED73F': 'The Access Control List is being initialized', 'ED740': 'The free file system space is insufficient', 'ED741': 'The specified file has non-empty data. Overwriting is required', 'ED742': 'The file inflating process is running', 'ED743': 'Specified group name is incorrect', 'ED744': 'Snapshot or NAS cache distribution process is running', 'ED745': 'The user is already registered', 'ED746': 'The group is already registered', 'ED747': 'The provisioned file size is too small to inflate', 'ED748': 'The user cannot be deleted because it is currently being ' 'used to access to a shared folder', 'ED749': 'User home directory deletion process is running', 'ED74A': 'Cannot start to inflate the specified file because it is in use', 'ED74B': 'Firewall setting for secure connection to change local user ' 'password is configured as "open" for some NAS ports. ' 'Please change the setting of these ports to "close"', 'ED74C': 'Initializing NAS cache distribution failed. ' 'The storage system might be in high-load temporarily. ' 'Please wait for a while and retry', 'ED800': 'Stack suspend timeout', 'ED801': 'Cascade copy session exist', 'ED802': 'Cascade local copy session exist', 'ED803': 'Cascade EC/REC session is not suspended', 'ED805': 'Advanced copy operations for TFOV are not supported', 'ED806': 'Copy of an illegal combination with TFO pair', 'ED807': 'Copy of an illegal combination with storage cluster continuous ' 'copy session', 'ED808': 'Illegal copy session has been specified for the TFO port', 'ED809': 'Illegal combination with Online Storage Migration', } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_cli.py0000664000175000017500000004304600000000000026561 0ustar00zuulzuul00000000000000# Copyright (c) 2019 FUJITSU LIMITED # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Cinder Volume driver for Fujitsu ETERNUS DX S3 series.""" from cinder.i18n import _ from cinder import ssh_utils from cinder.volume.drivers.fujitsu.eternus_dx import constants as CONSTANTS class FJDXCLI(object): """ETERNUS CLI Code.""" def __init__(self, user, storage_ip, password=None, keyfile=None): """Constructor.""" self.user = user self.storage_ip = storage_ip if password and keyfile: raise Exception(_('can not specify both password and keyfile')) self.use_ipv6 = False if storage_ip.find(':') != -1: self.use_ipv6 = True if password: self.ssh_pool = ssh_utils.SSHPool(storage_ip, 22, None, user, password=password, max_size=2) if keyfile: self.ssh_pool = ssh_utils.SSHPool(storage_ip, 22, None, user, privatekey=keyfile, max_size=2) self.ce_support = False self.CMD_dic = { 'check_user_role': self._check_user_role, 'expand_volume': self._expand_volume, 'show_pool_provision': self._show_pool_provision, 'show_qos_bandwidth_limit': self._show_qos_bandwidth_limit, 'set_qos_bandwidth_limit': self._set_qos_bandwidth_limit, 'set_volume_qos': self._set_volume_qos, 'show_copy_sessions': self._show_copy_sessions, 'show_volume_qos': self._show_volume_qos, 'show_enclosure_status': self._show_enclosure_status, 'start_copy_snap_opc': self._start_copy_snap_opc, 'stop_copy_session': self._stop_copy_session, 'start_copy_opc': self._start_copy_opc, 'delete_volume': self._delete_volume } def done(self, command, **option): func = self.CMD_dic.get(command, self._default_func) return func(**option) def _exec_cli(self, cmd, StrictHostKeyChecking=True, **option): exec_cmdline = cmd + self._get_option(**option) stdoutdata = self._exec_cli_with_eternus(exec_cmdline) output = [] message = [] stdoutlist = stdoutdata.split('\r\n') output_header = "" for no, outline in enumerate(stdoutlist): if len(outline) <= 0 or outline is None: continue if not output_header.endswith(exec_cmdline): output_header += outline continue if 0 <= outline.find('Error'): raise Exception(_("Output: %(outline)s: " "Command: %(cmdline)s") % {'outline': outline, 'cmdline': exec_cmdline}) if not self._is_status(outline): continue status = int(outline, 16) lineno = no + 1 break else: raise Exception(_( "Invalid CLI output: %(exec_cmdline)s, %(stdoutlist)s") % {'exec_cmdline': exec_cmdline, 'stdoutlist': stdoutlist}) if status == 0: rc = str(CONSTANTS.RC_OK) for outline in stdoutlist[lineno:]: if 0 <= outline.find('CLI>'): continue if len(outline) <= 0: continue if outline is None: continue message.append(outline) else: code = stdoutlist[lineno] for outline in stdoutlist[lineno + 1:]: if 0 <= outline.find('CLI>'): continue if len(outline) <= 0: continue if outline is None: continue output.append(outline) if cmd != "show cli-error-code": rc, message = self._create_error_message(code, output) else: rc = 'E' + code message = output return {'result': 0, 'rc': rc, 'message': message} def _exec_cli_with_eternus(self, exec_cmdline): """Execute CLI command with arguments.""" ssh = None try: ssh = self.ssh_pool.get() chan = ssh.invoke_shell() chan.send(exec_cmdline + '\n') stdoutdata = '' while True: temp = chan.recv(65535) if isinstance(temp, bytes): temp = temp.decode('utf-8') else: temp = str(temp) stdoutdata += temp # CLI command end with 'CLI>'. if stdoutdata == '\r\nCLI> ': continue if (stdoutdata[len(stdoutdata) - 5: len(stdoutdata) - 1] == 'CLI>'): break except Exception as e: raise Exception(_("Execute CLI " "command error. Error: %s") % e) finally: if ssh: self.ssh_pool.put(ssh) self.ssh_pool.remove(ssh) return stdoutdata def _show_cli_error_message(self, **option): """Get error messages by error code.""" output = self._exec_cli("show cli-error-code", **option) rc = output['rc'] if rc != str(CONSTANTS.RC_OK): raise Exception(_('_show_cli_error_message failed. ' 'Return code: %lu') % rc) message = output['message'][1] output['message'] = message.split('\t')[1] return output def _create_error_message(self, code, msg): """Create error code and message using arguements.""" message = None rc = 'E' + code try: option = { 'error-code': code } message = self._show_cli_error_message(**option)['message'] except Exception: message = CONSTANTS.CLIRETCODE_dic.get(rc, msg) return rc, message @staticmethod def _is_status(value): """Check whether input value is status value or not.""" try: if len(value) != 2: return False int(value, 16) int(value[0], 16) int(value[1], 16) return True except ValueError: return False @staticmethod def _get_option(**option): """Create option strings from dictionary.""" ret = "" for key, value in option.items(): ret += " -%(key)s %(value)s" % {'key': key, 'value': value} return ret def _default_func(self, **option): """Default function.""" raise Exception(_("Invalid function is specified")) def _check_user_role(self, **option): """Check user role.""" try: output = self._exec_cli("show users", StrictHostKeyChecking=False, **option) # Return error. rc = output['rc'] if rc != str(CONSTANTS.RC_OK): return output userlist = output.get('message') role = None for userinfo in userlist: username = userinfo.split('\t')[0] if username == self.user: role = userinfo.split('\t')[1] break output['message'] = role except Exception as ex: if 'show users' in str(ex): msg = ("Specified user(%s) does not have Software role" % self.user) elif 'Error connecting' in str(ex): msg = (str(ex)[34:] + ', Please check fujitsu_private_key_path or .xml file') else: msg = str(ex) output = { 'result': 0, 'rc': str(CONSTANTS.RC_FAILED), 'message': msg } return output def _expand_volume(self, **option): """Exec expand volume.""" return self._exec_cli("expand volume", **option) def _set_volume_qos(self, **option): """Exec set volume-qos.""" return self._exec_cli("set volume-qos", **option) def _show_pool_provision(self, **option): """Get TPP provision capacity information.""" try: output = self._exec_cli("show volumes", **option) rc = output['rc'] if rc != str(CONSTANTS.RC_OK): return output clidatalist = output.get('message') data = 0 for clidataline in clidatalist[1:]: clidata = clidataline.split('\t') if clidata[0] == 'FFFF': break data += int(clidata[7], 16) provision = data / 2048 output['message'] = provision except Exception as ex: output = { 'result': 0, 'rc': str(CONSTANTS.RC_FAILED), 'message': "show pool provision capacity error: %s" % ex } return output def _show_copy_sessions(self, **option): """Get copy sessions.""" try: output = self._exec_cli("show copy-sessions", **option) # return error rc = output['rc'] if rc != str(CONSTANTS.RC_OK): return output cpsdatalist = [] clidatalist = output.get('message') for clidataline in clidatalist[1:]: clidata = clidataline.split('\t') # Get CopyType if clidata[2] == '01': # CopyKind: OPC if bin(int(clidata[3], 16) & 16) != 0: # eg. 0b10010000 temp_type = 'Snap' elif bin(int(clidata[3], 16) & 64) != 0: # eg. 0b11000000 temp_type = 'Snap+' else: temp_type = 'Other' elif clidata[2] == '02': # CopyKind: EC if clidata[5] == 'FF': temp_type = 'EC' elif clidata[5] == '10': temp_type = 'Sync_REC' else: temp_type = 'Other' else: temp_type = 'Other' # Get Phases if clidata[6] == '00': temp_phase = 'No_Pair' elif clidata[6] == '01': temp_phase = 'Copying' elif clidata[6] == '02': temp_phase = 'Equivalent' elif clidata[6] == '03': temp_phase = 'Tracking' elif clidata[6] == '04': temp_phase = 'Tracking_Copying' elif clidata[6] == '06': temp_phase = 'Readying' else: temp_phase = 'Other' # Get CopyStatus if clidata[7] == '00': temp_status = 'Idle' elif clidata[7] == '01': temp_status = 'Reserve' elif clidata[7] == '02': temp_status = 'Active' elif clidata[7] == '03': temp_status = 'Error_Suspend' elif clidata[7] == '04': temp_status = 'Suspend' elif clidata[7] == '05': temp_status = 'Halt' else: temp_status = 'Other' cpsdatalist.append({'Source Num': int(clidata[13], 16), 'Dest Num': int(clidata[14], 16), 'Type': temp_type, 'Status': temp_status, 'Phase': temp_phase, 'Session ID': int(clidata[0], 16)}) output['message'] = cpsdatalist except Exception as ex: output = {'result': 0, 'rc': str(CONSTANTS.RC_FAILED), 'message': "Show copy sessions error: %s" % str(ex)} return output def _show_qos_bandwidth_limit(self, **option): """Get qos bandwidth limit.""" clidata = None try: output = self._exec_cli("show qos-bandwidth-limit", **option) # return error rc = output['rc'] if rc != str(CONSTANTS.RC_OK): return output qoslist = [] clidatalist = output.get('message') for clidataline in clidatalist[1:]: clidata = clidataline.split('\t') qoslist.append({'total_limit': int(clidata[0], 16), 'total_iops_sec': int(clidata[1], 16), 'total_bytes_sec': int(clidata[2], 16), 'read_limit': int(clidata[0], 16), 'read_iops_sec': int(clidata[3], 16), 'read_bytes_sec': int(clidata[4], 16), 'write_limit': int(clidata[0], 16), 'write_iops_sec': int(clidata[5], 16), 'write_bytes_sec': int(clidata[6], 16)}) output['message'] = qoslist except IndexError as ex: msg = ('The results returned by cli are not as expected. ' 'Exception string: %s' % clidata) output = {'result': 0, 'rc': str(CONSTANTS.RC_FAILED), 'message': "Show qos bandwidth limit error: %s. %s" % (ex, msg)} except Exception as ex: output = {'result': 0, 'rc': str(CONSTANTS.RC_FAILED), 'message': "Show qos bandwidth limit error: %s" % ex} return output def _set_qos_bandwidth_limit(self, **option): """Set qos bandwidth limit""" return self._exec_cli("set qos-bandwidth-limit", **option) def _show_volume_qos(self, **option): """Get volumes with qos.""" clidata = None try: output = self._exec_cli("show volume-qos", **option) # return error rc = output['rc'] if rc != str(CONSTANTS.RC_OK): return output vqosdatalist = [] clidatalist = output.get('message') for clidataline in clidatalist[1:]: clidata = clidataline.split('\t') vqosdatalist.append({'total_limit': int(clidata[2], 16), 'read_limit': int(clidata[3], 16), 'write_limit': int(clidata[4], 16)}) output['message'] = vqosdatalist except IndexError as ex: msg = ('The results returned by cli are not as expected. ' 'Exception string: %s' % clidata) output = {'result': 0, 'rc': str(CONSTANTS.RC_FAILED), 'message': "Show volume qos error: %s. %s" % (ex, msg)} except Exception as ex: output = {'result': 0, 'rc': str(CONSTANTS.RC_FAILED), 'message': "Show volume qos error: %s" % ex} return output def _show_enclosure_status(self, **option): """Get the version of machine.""" clidata = None try: output = self._exec_cli("show enclosure-status", **option) # return error rc = output['rc'] if rc != str(CONSTANTS.RC_OK): return output clidatalist = output.get('message') clidata = clidatalist[0].split('\t') versioninfo = {'version': clidata[11]} output['message'] = versioninfo except IndexError as ex: msg = ('The results returned by cli are not as expected. ' 'Exception string: %s' % clidata) output = {'result': 0, 'rc': str(CONSTANTS.RC_FAILED), 'message': "Show enclosure status error: %s. %s" % (ex, msg)} except Exception as ex: output = {'result': 0, 'rc': str(CONSTANTS.RC_FAILED), 'message': "Show enclosure status error: %s" % ex} return output def _start_copy_snap_opc(self, **option): """Exec start copy-snap-opc.""" return self._exec_cli("start copy-snap-opc", **option) def _stop_copy_session(self, **option): """Exec stop copy-session.""" return self._exec_cli("stop copy-session", **option) def _start_copy_opc(self, **option): """Exec start copy-opc.""" return self._exec_cli("start copy-opc", **option) def _delete_volume(self, **option): """Exec delete volume.""" return self._exec_cli('delete volume', **option) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py0000664000175000017500000043274100000000000027306 0ustar00zuulzuul00000000000000# Copyright (c) 2015 FUJITSU LIMITED # Copyright (c) 2012 EMC Corporation. # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Cinder Volume driver for Fujitsu ETERNUS DX S3 series.""" import base64 import hashlib import time from lxml import etree as ET from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.fujitsu.eternus_dx import constants as CONSTANTS from cinder.volume.drivers.fujitsu.eternus_dx import eternus_dx_cli from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF try: import pywbem pywbemAvailable = True except ImportError: pywbemAvailable = False FJ_ETERNUS_DX_OPT_opts = [ cfg.StrOpt('cinder_eternus_config_file', default='/etc/cinder/cinder_fujitsu_eternus_dx.xml', help='Config file for cinder eternus_dx volume driver.'), cfg.BoolOpt('fujitsu_passwordless', default=True, help='Use SSH key to connect to storage.'), cfg.StrOpt('fujitsu_private_key_path', default='$state_path/eternus', help='Filename of private key for ETERNUS CLI. ' 'This option must be set when ' 'the fujitsu_passwordless is True.'), cfg.BoolOpt('fujitsu_use_cli_copy', default=False, help='If True use CLI command to create snapshot.'), ] CONF.register_opts(FJ_ETERNUS_DX_OPT_opts, group=conf.SHARED_CONF_GROUP) class FJDXCommon(object): """Common code that does not depend on protocol. Version history: 1.0 - Initial driver 1.3.0 - Community base version 1.4.0 - Add support for QoS. 1.4.1 - Add the method for expanding RAID volumes by CLI. 1.4.2 - Add the secondary check for copy-sessions when deleting volumes. 1.4.3 - Add fragment capacity information of RAID Group. 1.4.4 - Add support for update migrated volume. 1.4.5 - Add metadata for snapshot. 1.4.6 - Add parameter fujitsu_use_cli_copy. 1.4.7 - Add support for revert-to-snapshot. 1.4.8 - Improve the processing flow of CLI error messages.(bug #2048850) - Add support connect to storage using SSH key. """ VERSION = "1.4.8" stats = { 'driver_version': VERSION, 'storage_protocol': None, 'vendor_name': 'FUJITSU', 'QoS_support': True, 'volume_backend_name': None, } def __init__(self, prtcl, configuration=None): self.pywbemAvailable = pywbemAvailable self.protocol = prtcl self.configuration = configuration self.configuration.append_config_values(FJ_ETERNUS_DX_OPT_opts) self.conn = None self.passwordless = self.configuration.fujitsu_passwordless self.private_key_path = self.configuration.fujitsu_private_key_path self.use_cli_copy = self.configuration.fujitsu_use_cli_copy self.fjdxcli = {} self.model_name = self._get_eternus_model() self._check_user() @staticmethod def get_driver_options(): return FJ_ETERNUS_DX_OPT_opts def create_volume(self, volume): """Create volume on ETERNUS.""" LOG.debug('create_volume, ' 'volume id: %(vid)s, volume size: %(vsize)s.', {'vid': volume['id'], 'vsize': volume['size']}) d_metadata = self.get_metadata(volume) element_path, metadata = self._create_volume(volume) d_metadata.update(metadata) model_update = { 'provider_location': str(element_path), 'metadata': d_metadata } # Set qos to created volume. try: self._set_qos(volume, use_id=True) except Exception as ex: LOG.error('create_volume, ' 'error occurred while setting volume qos. ' 'Error information: %s', ex) # While set qos failed, delete volume from backend volumename = metadata['FJ_Volume_Name'] self._delete_volume_after_error(volumename) return model_update def _create_volume(self, volume): LOG.debug('_create_volume, ' 'volume id: %(vid)s, volume size: %(vsize)s.', {'vid': volume['id'], 'vsize': volume['size']}) self.conn = self._get_eternus_connection() volumesize = volume['size'] * units.Gi volumename = self._get_volume_name(volume, use_id=True) LOG.debug('_create_volume, volumename: %(volumename)s, ' 'volumesize: %(volumesize)u.', {'volumename': volumename, 'volumesize': volumesize}) configservice = self._find_eternus_service(CONSTANTS.STOR_CONF) if not configservice: msg = (_('_create_volume, volume: %(volume)s, ' 'volumename: %(volumename)s, ' 'eternus_pool: %(eternus_pool)s, ' 'Storage Configuration Service not found.') % {'volume': volume, 'volumename': volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Get all pools information on ETERNUS. pools_instance_list = self._find_all_pools_instances(self.conn) if 'host' in volume: eternus_pool = volume_utils.extract_host(volume['host'], 'pool') for pool, ptype in pools_instance_list: if eternus_pool == pool['ElementName']: pool_instance = pool if ptype == 'RAID': pooltype = CONSTANTS.RAIDGROUP else: pooltype = CONSTANTS.TPPOOL break else: msg = (_('_create_volume, volume: %(volume)s, ' 'volumename: %(volumename)s, ' 'poolname: %(poolname)s, ' 'Cannot find this pool on ETERNUS.') % {'volume': volume, 'volumename': volumename, 'poolname': eternus_pool}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_create_volume, ' 'CreateOrModifyElementFromStoragePool, ' 'ConfigService: %(service)s, ' 'ElementName: %(volumename)s, ' 'InPool: %(eternus_pool)s, ' 'ElementType: %(pooltype)u, ' 'Size: %(volumesize)u.', {'service': configservice, 'volumename': volumename, 'eternus_pool': eternus_pool, 'pooltype': pooltype, 'volumesize': volumesize}) # Invoke method for create volume. rc, errordesc, job = self._exec_eternus_service( 'CreateOrModifyElementFromStoragePool', configservice, ElementName=volumename, InPool=pool_instance.path, ElementType=self._pywbem_uint(pooltype, '16'), Size=self._pywbem_uint(volumesize, '64')) else: msg = (_('create_volume, volume id: %(vid)s, ' 'volume size: %(vsize)s, ' 'Cannot find volume host.') % {'vid': volume['id'], 'vsize': volume['size']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if rc == CONSTANTS.VOLUMENAME_IN_USE: # Element Name is in use. LOG.warning('_create_volume, ' 'volumename: %(volumename)s, ' 'Element Name is in use.', {'volumename': volumename}) element = self._find_lun(volume) elif rc != CONSTANTS.RC_OK: msg = (_('_create_volume, ' 'volumename: %(volumename)s, ' 'poolname: %(eternus_pool)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'volumename': volumename, 'eternus_pool': eternus_pool, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: element = job['TheElement'] # Get eternus model name. try: systemnamelist = self._enum_eternus_instances( 'FUJITSU_StorageProduct', conn=self.conn) except Exception: msg = (_('_create_volume, ' 'volume: %(volume)s, ' 'EnumerateInstances, ' 'cannot connect to ETERNUS.') % {'volume': volume}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_create_volume, ' 'volumename: %(volumename)s, ' 'Backend: %(backend)s, ' 'Pool Name: %(eternus_pool)s, ' 'Pool Type: %(pooltype)s.', {'volumename': volumename, 'backend': systemnamelist[0]['IdentifyingNumber'], 'eternus_pool': eternus_pool, 'pooltype': CONSTANTS.POOL_TYPE_dic[pooltype]}) # Create return value. element_path = { 'classname': element.classname, 'keybindings': { 'SystemName': element['SystemName'], 'DeviceID': element['DeviceID'], }, 'vol_name': volumename, } volume_no = self._get_volume_number(element) metadata = { 'FJ_Backend': systemnamelist[0]['IdentifyingNumber'], 'FJ_Volume_Name': volumename, 'FJ_Volume_No': volume_no, 'FJ_Pool_Name': eternus_pool, 'FJ_Pool_Type': CONSTANTS.POOL_TYPE_dic[pooltype], } return element_path, metadata def create_pool_info(self, pool_instance, volume_count, pool_type, **kwargs): """Create pool information from pool instance.""" LOG.debug('create_pool_info, pool_instance: %(pool)s, ' 'volume_count: %(volcount)s, pool_type: %(ptype)s.', {'pool': pool_instance, 'volcount': volume_count, 'ptype': pool_type}) if pool_type not in CONSTANTS.POOL_TYPE_list: msg = (_('Invalid pool type was specified : %s.') % pool_type) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) total_mb = pool_instance['TotalManagedSpace'] * 1.0 / units.Mi free_mb = pool_instance['RemainingManagedSpace'] * 1.0 / units.Mi fragment_mb = free_mb if kwargs.get('provisioned_capacity_mb'): prov_mb = kwargs.get('provisioned_capacity_mb') else: prov_mb = total_mb - free_mb if pool_type == 'RAID': useable_mb = free_mb if kwargs.get('fragment_size'): if kwargs.get('fragment_size') != -1: fragment_mb = kwargs.get('fragment_size') / (2 * 1024) else: fragment_mb = useable_mb else: max_capacity_mb = total_mb * float( self.configuration.max_over_subscription_ratio) useable_mb = max_capacity_mb - prov_mb pool = { 'name': pool_instance['ElementName'], 'path': pool_instance.path, 'total_capacity_gb': int(total_mb / 1024), 'free_capacity_gb': int(free_mb / 1024), 'type': pool_type, 'volume_count': volume_count, 'provisioned_capacity_gb': int(prov_mb / 1024), 'useable_capacity_gb': int(useable_mb / 1024), 'useable_capacity_mb': useable_mb, 'fragment_capacity_mb': fragment_mb, } LOG.debug('create_pool_info, pool: %s.', pool) return pool def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug('create_volume_from_snapshot, ' 'volume id: %(vid)s, volume size: %(vsize)s, ' 'snapshot id: %(sid)s.', {'vid': volume['id'], 'vsize': volume['size'], 'sid': snapshot['id']}) self.conn = self._get_eternus_connection() source_volume_instance = self._find_lun(snapshot) # Check the existence of source volume. if source_volume_instance is None: msg = _('create_volume_from_snapshot, ' 'Source Volume does not exist in ETERNUS.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Create volume for the target volume. model_update = self.create_volume(volume) element_path = eval(model_update.get('provider_location')) metadata = model_update.get('metadata') target_volume_instancename = self._create_eternus_instance_name( element_path['classname'], element_path['keybindings'].copy()) try: target_volume_instance = ( self._get_eternus_instance(target_volume_instancename)) except Exception: msg = (_('create_volume_from_snapshot, ' 'target volume instancename: %(volume_instancename)s, ' 'Get Instance Failed.') % {'volume_instancename': target_volume_instancename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self._create_local_cloned_volume(target_volume_instance, source_volume_instance) return (element_path, metadata) def create_cloned_volume(self, volume, src_vref): """Create clone of the specified volume.""" LOG.debug('create_cloned_volume, ' 'tgt: (%(tid)s, %(tsize)s), src: (%(sid)s, %(ssize)s).', {'tid': volume['id'], 'tsize': volume['size'], 'sid': src_vref['id'], 'ssize': src_vref['size']}) self.conn = self._get_eternus_connection() source_volume_instance = self._find_lun(src_vref) if source_volume_instance is None: msg = _('create_cloned_volume, ' 'Source Volume does not exist in ETERNUS.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) model_update = self.create_volume(volume) element_path = eval(model_update.get('provider_location')) metadata = model_update.get('metadata') target_volume_instancename = self._create_eternus_instance_name( element_path['classname'], element_path['keybindings'].copy()) try: target_volume_instance = ( self._get_eternus_instance(target_volume_instancename)) except Exception: msg = (_('create_cloned_volume, ' 'target volume instancename: %(volume_instancename)s, ' 'Get Instance Failed.') % {'volume_instancename': target_volume_instancename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self._create_local_cloned_volume(target_volume_instance, source_volume_instance) return (element_path, metadata) @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def _create_local_cloned_volume(self, tgt_vol_instance, src_vol_instance): """Create local clone of the specified volume.""" s_volumename = src_vol_instance['ElementName'] t_volumename = tgt_vol_instance['ElementName'] LOG.debug('_create_local_cloned_volume, ' 'tgt volume name: %(t_volumename)s, ' 'src volume name: %(s_volumename)s, ', {'t_volumename': t_volumename, 's_volumename': s_volumename}) # Get replication service for CreateElementReplica. repservice = self._find_eternus_service(CONSTANTS.REPL) if repservice is None: msg = _('_create_local_cloned_volume, ' 'Replication Service not found.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Invoke method for create cloned volume from volume. rc, errordesc, job = self._exec_eternus_service( 'CreateElementReplica', repservice, SyncType=self._pywbem_uint(8, '16'), SourceElement=src_vol_instance.path, TargetElement=tgt_vol_instance.path) if rc != CONSTANTS.RC_OK: msg = (_('_create_local_cloned_volume, ' 'volumename: %(volumename)s, ' 'sourcevolumename: %(sourcevolumename)s, ' 'source volume instance: %(source_volume)s, ' 'target volume instance: %(target_volume)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'volumename': t_volumename, 'sourcevolumename': s_volumename, 'source_volume': src_vol_instance.path, 'target_volume': tgt_vol_instance.path, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_create_local_cloned_volume, out: %(rc)s, %(job)s.', {'rc': rc, 'job': job}) def delete_volume(self, volume): """Delete volume on ETERNUS.""" LOG.debug('delete_volume, volume id: %(vid)s.', {'vid': volume['id']}) vol_exist = self._delete_volume_setting(volume) if not vol_exist: LOG.debug('delete_volume, volume not found in 1st check.') return try: self._delete_volume(volume) except Exception as ex: msg = (_('delete_volume, ' 'delete volume failed, ' 'Error information: %s.') % ex) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def _delete_volume_setting(self, volume): """Delete volume setting (HostAffinity, CopySession) on ETERNUS.""" LOG.debug('_delete_volume_setting, ' 'volume id: %(vid)s.', {'vid': volume['id']}) # Check the existence of volume. volumename = self._get_volume_name(volume) vol_instance = self._find_lun(volume) if not vol_instance: LOG.info('_delete_volume_setting, volumename:%(volumename)s, ' 'volume not found on ETERNUS.', {'volumename': volumename}) return False # Delete host-affinity setting remained by unexpected error. self._unmap_lun(volume, None, force=True) # Check copy session relating to target volume. cpsessionlist = self._find_copysession(vol_instance) delete_copysession_list = [] wait_copysession_list = [] for cpsession in cpsessionlist: LOG.debug('_delete_volume_setting, ' 'volumename: %(volumename)s, ' 'cpsession: %(cpsession)s.', {'volumename': volumename, 'cpsession': cpsession}) if cpsession['SyncedElement'] == vol_instance.path: # Copy target : other_volume --(copy)--> vol_instance delete_copysession_list.append(cpsession) elif cpsession['SystemElement'] == vol_instance.path: # Copy source : vol_instance --(copy)--> other volume wait_copysession_list.append(cpsession) LOG.debug('_delete_volume_setting, ' 'wait_cpsession: %(wait_cpsession)s, ' 'delete_cpsession: %(delete_cpsession)s.', {'wait_cpsession': wait_copysession_list, 'delete_cpsession': delete_copysession_list}) for cpsession in wait_copysession_list: self._wait_for_copy_complete(cpsession) for cpsession in delete_copysession_list: self._delete_copysession(cpsession) volume_no = self._get_volume_number(vol_instance) cp_session_list = self._get_copy_sessions_list() for cp in cp_session_list: if cp['Dest Num'] != int(volume_no, 16): continue if cp['Type'] == 'Snap': session_id = cp['Session ID'] param_dict = ({'session-id': session_id}) rc, emsg, clidata = self._exec_eternus_cli( 'stop_copy_session', **param_dict) if rc != CONSTANTS.RC_OK: msg = (_('_delete_volume_setting, ' 'stop_copy_session failed. ' 'Return code: %(rc)lu, ' 'Error: %(errormsg)s, ' 'Message: %(clidata)s.') % {'rc': rc, 'errormsg': emsg, 'clidata': clidata}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) break LOG.debug('_delete_volume_setting, ' 'wait_cpsession: %(wait_cpsession)s, ' 'delete_cpsession: %(delete_cpsession)s, complete.', {'wait_cpsession': wait_copysession_list, 'delete_cpsession': delete_copysession_list}) return True @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def _delete_volume(self, volume): """Delete volume on ETERNUS.""" LOG.debug('_delete_volume, volume id: %(vid)s.', {'vid': volume['id']}) vol_instance = self._find_lun(volume) if not vol_instance: LOG.debug('_delete_volume, volume not found in 2nd check, ' 'but no problem.') return volumename = vol_instance['ElementName'] configservice = self._find_eternus_service(CONSTANTS.STOR_CONF) if not configservice: msg = (_('_delete_volume, volumename: %(volumename)s, ' 'Storage Configuration Service not found.') % {'volumename': volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_delete_volume, volumename: %(volumename)s, ' 'vol_instance: %(vol_instance)s, ' 'Method: ReturnToStoragePool.', {'volumename': volumename, 'vol_instance': vol_instance.path}) # Invoke method for delete volume rc, errordesc, job = self._exec_eternus_service( 'ReturnToStoragePool', configservice, TheElement=vol_instance.path) if rc != CONSTANTS.RC_OK: msg = (_('_delete_volume, volumename: %(volumename)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'volumename': volumename, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_delete_volume, volumename: %(volumename)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.', {'volumename': volumename, 'rc': rc, 'errordesc': errordesc}) def _delete_volume_after_error(self, volumename): # If error occures while set qos after create a volume,then delete # the created volume. LOG.debug('_delete_volume_after_error, ' 'volume name: %(volumename)s.', {'volumename': volumename}) param_dict = {'volume-name': volumename} rc, errordesc, data = self._exec_eternus_cli( 'delete_volume', **param_dict) if rc == CONSTANTS.RC_OK: msg = (_('_delete_volume_after_error, ' 'volumename: %(volumename)s, ' 'Delete Successed.') % {'volumename': volumename}) else: msg = (_('_delete_volume_after_error, ' 'volumename: %(volumename)s, ' 'Delete Failed.') % {'volumename': volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_snapshot(self, snapshot): """Create snapshot using SnapOPC.""" LOG.debug('create_snapshot, ' 'snapshot id: %(sid)s, volume id: %(vid)s.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) volume = snapshot['volume'] s_volumename = self._get_volume_name(volume) vol_instance = self._find_lun(volume) # Check the existence of volume. if not vol_instance: # Volume not found on ETERNUS. msg = (_('create_snapshot, ' 'volumename: %(s_volumename)s, ' 'source volume not found on ETERNUS.') % {'s_volumename': s_volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) model_update = self._create_snapshot(snapshot) return model_update @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def _create_snapshot(self, snapshot): LOG.debug('_create_snapshot, ' 'snapshot id: %(sid)s, volume id: %(vid)s.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) snapshotname = snapshot['name'] volume = snapshot['volume'] volumename = snapshot['volume_name'] d_volumename = self._get_volume_name(snapshot, use_id=True) vol_instance = self._find_lun(volume) service_name = (CONSTANTS.REPL if self.model_name != CONSTANTS.DX_S2 else CONSTANTS.STOR_CONF) volume_size = snapshot['volume']['size'] * 1024 smis_service = self._find_eternus_service(service_name) if not smis_service: msg = (_('_create_snapshot, ' 'volumename: %(volumename)s, ' '%(servicename)s not found.') % {'volumename': volumename, 'servicename': service_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Get all pools information on ETERNUS. pools_instance_list = self._find_all_pools_instances(self.conn) # Get the user specified pool name. pool_name_list = self._get_drvcfg('EternusSnapPool', multiple=True) poollen = len(pool_name_list) for i in range(poollen): # Traverse the user specified pool one by one. pool_instances, notfound_poolnames = self._find_pools( [pool_name_list[i]], self.conn, poolinstances_list=pools_instance_list) if pool_instances['pools']: useable = pool_instances['pools'][0]['useable_capacity_mb'] poolname = pool_instances['pools'][0]['pool_name'] istpp = pool_instances['pools'][0]['thin_provisioning_support'] if useable < 24 + volume_size: continue if not istpp: # If it is a RAID Group pool, we need to determine # the number of volumes and fragmentation capacity. # The number of RAID Group pool volumes cannot exceed 128. # The minimum space required for snapshot is 24MB. fragment = pool_instances['pools'][0][ 'fragment_capacity_mb'] volcnt = pool_instances['pools'][0]['total_volumes'] if volcnt >= 128 or fragment < 24 + volume_size: LOG.debug('_create_volume, The pool: %(poolname)s ' 'can not create volume. ' 'Volume Count: %(volcnt)s, ' 'Maximum fragment capacity: %(frag)s.', {'poolname': poolname, 'volcnt': volcnt, 'frag': fragment}) continue pool_instance = pool_instances['pools'][0] eternus_pool = pool_instance['pool_name'] pool = pool_instance['path'] if 'RSP' in pool['InstanceID']: pooltype = CONSTANTS.RAIDGROUP else: pooltype = CONSTANTS.TPPOOL if self.use_cli_copy is False: LOG.debug('_create_snapshot, ' 'snapshotname: %(snapshotname)s, ' 'source volume name: %(volumename)s, ' 'vol_instance.path: %(vol_instance)s, ' 'dest_volumename: %(d_volumename)s, ' 'pool: %(pool)s, ' 'Invoke CreateElementReplica.', {'snapshotname': snapshotname, 'volumename': volumename, 'vol_instance': vol_instance.path, 'd_volumename': d_volumename, 'pool': eternus_pool}) if self.model_name != CONSTANTS.DX_S2: smis_method = 'CreateElementReplica' params = { 'ElementName': d_volumename, 'TargetPool': pool, 'SyncType': self._pywbem_uint(7, '16'), 'SourceElement': vol_instance.path } else: smis_method = 'CreateReplica' params = { 'ElementName': d_volumename, 'TargetPool': pool, 'CopyType': self._pywbem_uint(4, '16'), 'SourceElement': vol_instance.path } # Invoke method for create snapshot. rc, errordesc, job = self._exec_eternus_service( smis_method, smis_service, **params) if rc != CONSTANTS.RC_OK: LOG.warning('_create_snapshot, ' 'snapshotname: %(snapshotname)s, ' 'source volume name: %(volumename)s, ' 'vol_instance.path: %(vol_instance)s, ' 'dest volume name: %(d_volumename)s, ' 'pool: %(pool)s, Return code: %(rc)lu, ' 'Error: %(errordesc)s.', {'snapshotname': snapshotname, 'volumename': volumename, 'vol_instance': vol_instance.path, 'd_volumename': d_volumename, 'pool': eternus_pool, 'rc': rc, 'errordesc': errordesc}) continue else: element = job['TargetElement'] d_volume_no = self._get_volume_number(element) break else: if pooltype == CONSTANTS.RAIDGROUP: LOG.warning('_create_snapshot, ' 'Can not create SDV by SMI-S.') continue configservice = self._find_eternus_service( CONSTANTS.STOR_CONF) vol_size = snapshot['volume']['size'] * units.Gi LOG.debug('_create_snapshot, ' 'CreateOrModifyElementFromStoragePool, ' 'ConfigService: %(service)s, ' 'ElementName: %(volumename)s, ' 'InPool: %(eternus_pool)s, ' 'ElementType: %(pooltype)u, ' 'Size: %(volumesize)u.', {'service': configservice, 'volumename': d_volumename, 'eternus_pool': pool, 'pooltype': pooltype, 'volumesize': vol_size}) # Invoke method for create volume. rc, errordesc, job = self._exec_eternus_service( 'CreateOrModifyElementFromStoragePool', configservice, ElementName=d_volumename, InPool=pool, ElementType=self._pywbem_uint(pooltype, '16'), Size=self._pywbem_uint(vol_size, '64')) if rc == CONSTANTS.RG_VOLNUM_MAX: LOG.warning('_create_snapshot, RAID Group pool: %s. ' 'Maximum number of Logical Volume in a ' 'RAID Group has been reached. ' 'Try other pool.', pool) continue elif rc != CONSTANTS.RC_OK: msg = (_('_create_volume, ' 'volumename: %(volumename)s, ' 'poolname: %(eternus_pool)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'volumename': volumename, 'eternus_pool': pool, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: element = job['TheElement'] d_volume_no = self._get_volume_number(element) volume_no = self._get_volume_number(vol_instance) volume_lba = int(vol_size / 512) param_dict = ( {'mode': 'normal', 'source-volume-number': int(volume_no, 16), 'destination-volume-number': int(d_volume_no, 16), 'source-lba': 0, 'destination-lba': 0, 'size': volume_lba}) rc, emsg, clidata = self._exec_eternus_cli( 'start_copy_snap_opc', **param_dict) if rc != CONSTANTS.RC_OK: msg = (_('_create_snapshot, ' 'create_volume failed. ' 'Return code: %(rc)lu, ' 'Error: %(errormsg)s, ' 'Message: %(clidata)s.') % {'rc': rc, 'errormsg': emsg, 'clidata': clidata}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) break else: if notfound_poolnames: LOG.warning('_create_snapshot, ' 'pool names: %(notfound_poolnames)s ' 'are not found.', {'notfound_poolnames': notfound_poolnames}) else: # It means that all RAID Group pools do not meet # the volume limit (<128), and the creation request of # this volume will be rejected. # If there is a thin pool available, it will not enter this branch. msg = (_('_create_snapshot, volume id: %(sid)s, ' 'All pools cannot create this volume.') % {'sid': snapshot['id']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Create return value. element_path = { 'classname': element.classname, 'keybindings': { 'SystemName': element['SystemName'], 'DeviceID': element['DeviceID'], }, 'vol_name': d_volumename, } metadata = { 'FJ_SDV_Name': d_volumename, 'FJ_SDV_No': d_volume_no, 'FJ_Pool_Name': eternus_pool, 'FJ_Pool_Type': pooltype } d_metadata = self.get_metadata(snapshot) d_metadata.update(metadata) model_update = { 'provider_location': str(element_path), 'metadata': d_metadata, } return model_update def delete_snapshot(self, snapshot): """Delete snapshot.""" LOG.debug('delete_snapshot, ' 'snapshot id: %(sid)s, volume id: %(vid)s.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) self.delete_volume(snapshot) def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" LOG.debug('initialize_connection, ' 'volume id: %(vid)s, protocol: %(prtcl)s.', {'vid': volume['id'], 'prtcl': self.protocol}) self.conn = self._get_eternus_connection() vol_instance = self._find_lun(volume) # Check the existence of volume if vol_instance is None: # Volume not found msg = (_('initialize_connection, ' 'volume: %(volume)s, ' 'Volume not found.') % {'volume': volume['name']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) target_portlist = self._get_target_port() mapdata = self._get_mapdata(vol_instance, connector, target_portlist) if mapdata: # volume is already mapped target_lun = mapdata.get('target_lun', None) target_luns = mapdata.get('target_luns', None) LOG.info('initialize_connection, ' 'volume: %(volume)s, ' 'target_lun: %(target_lun)s, ' 'target_luns: %(target_luns)s, ' 'Volume is already mapped.', {'volume': volume['name'], 'target_lun': target_lun, 'target_luns': target_luns}) else: self._map_lun(vol_instance, connector, target_portlist) mapdata = self._get_mapdata(vol_instance, connector, target_portlist) mapdata['target_discovered'] = True mapdata['volume_id'] = volume['id'] if self.protocol == 'fc': device_info = {'driver_volume_type': 'fibre_channel', 'data': mapdata} elif self.protocol == 'iSCSI': device_info = {'driver_volume_type': 'iscsi', 'data': mapdata} LOG.debug('initialize_connection, ' 'device_info:%(info)s.', {'info': device_info}) return device_info def terminate_connection(self, volume, connector, force=False, **kwargs): """Disallow connection from connector.""" LOG.debug('terminate_connection, ' 'volume id: %(vid)s, protocol: %(prtcl)s, force: %(frc)s.', {'vid': volume['id'], 'prtcl': self.protocol, 'frc': force}) self.conn = self._get_eternus_connection() force = True if not connector else force map_exist = self._unmap_lun(volume, connector, force) LOG.debug('terminate_connection, map_exist: %s.', map_exist) return map_exist def build_fc_init_tgt_map(self, connector, target_wwn=None): """Build parameter for Zone Manager""" LOG.debug('build_fc_init_tgt_map, target_wwn: %s.', target_wwn) initiatorlist = self._find_initiator_names(connector) if target_wwn is None: target_wwn = [] target_portlist = self._get_target_port() for target_port in target_portlist: target_wwn.append(target_port['Name']) init_tgt_map = {initiator: target_wwn for initiator in initiatorlist} LOG.debug('build_fc_init_tgt_map, ' 'initiator target mapping: %s.', init_tgt_map) return init_tgt_map def check_attached_volume_in_zone(self, connector): """Check Attached Volume in Same FC Zone or not""" LOG.debug('check_attached_volume_in_zone, connector: %s.', connector) aglist = self._find_affinity_group(connector) if not aglist: attached = False else: attached = True LOG.debug('check_attached_volume_in_zone, attached: %s.', attached) return attached @lockutils.synchronized('ETERNUS-vol', 'cinder-', True) def extend_volume(self, volume, new_size): """Extend volume on ETERNUS.""" LOG.debug('extend_volume, volume id: %(vid)s, ' 'size: %(size)s, new_size: %(nsize)s.', {'vid': volume['id'], 'size': volume['size'], 'nsize': new_size}) self.conn = self._get_eternus_connection() volumename = self._get_volume_name(volume) # Get volume instance. volume_instance = self._find_lun(volume) if not volume_instance: msg = (_('extend_volume, ' 'volumename: %(volumename)s, ' 'not found.') % {'volumename': volumename}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('extend_volume, volumename: %(volumename)s, ' 'volumesize: %(volumesize)u, ' 'volume instance: %(volume_instance)s.', {'volumename': volumename, 'volumesize': new_size, 'volume_instance': volume_instance.path}) # Get poolname from driver configuration file. pool_name, pool = self._find_pool_from_volume(volume_instance) # Check the existence of pool. if not pool: msg = (_('extend_volume, ' 'eternus_pool: %(eternus_pool)s, ' 'not found.') % {'eternus_pool': pool_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Set pooltype. if 'RSP' in pool['InstanceID']: pooltype = CONSTANTS.RAIDGROUP else: pooltype = CONSTANTS.TPPOOL if pooltype == CONSTANTS.RAIDGROUP: extend_size = str(new_size - volume['size']) + 'gb' param_dict = { 'volume-name': volumename, 'rg-name': pool_name, 'size': extend_size } rc, errordesc, data = self._exec_eternus_cli( 'expand_volume', **param_dict) if rc != CONSTANTS.RC_OK: msg = (_('extend_volume, ' 'volumename: %(volumename)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'Message: %(job)s, ' 'PoolType: %(pooltype)s.') % {'volumename': volumename, 'rc': rc, 'errordesc': errordesc, 'pooltype': CONSTANTS.POOL_TYPE_dic[pooltype], 'job': data}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: # Pooltype is TPPOOL. volumesize = new_size * units.Gi configservice = self._find_eternus_service(CONSTANTS.STOR_CONF) if not configservice: msg = (_('extend_volume, volume: %(volume)s, ' 'volumename: %(volumename)s, ' 'eternus_pool: %(eternus_pool)s, ' 'Storage Configuration Service not found.') % {'volume': volume, 'volumename': volumename, 'eternus_pool': pool_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('extend_volume, ' 'CreateOrModifyElementFromStoragePool, ' 'ConfigService: %(service)s, ' 'ElementName: %(volumename)s, ' 'InPool: %(eternus_pool)s, ' 'ElementType: %(pooltype)u, ' 'Size: %(volumesize)u, ' 'TheElement: %(vol_instance)s.', {'service': configservice, 'volumename': volumename, 'eternus_pool': pool_name, 'pooltype': pooltype, 'volumesize': volumesize, 'vol_instance': volume_instance.path}) # Invoke method for extend volume. rc, errordesc, _x = self._exec_eternus_service( 'CreateOrModifyElementFromStoragePool', configservice, ElementName=volumename, InPool=pool, ElementType=self._pywbem_uint(pooltype, '16'), Size=self._pywbem_uint(volumesize, '64'), TheElement=volume_instance.path) if rc != CONSTANTS.RC_OK: msg = (_('extend_volume, ' 'volumename: %(volumename)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'PoolType: %(pooltype)s.') % {'volumename': volumename, 'rc': rc, 'errordesc': errordesc, 'pooltype': CONSTANTS.POOL_TYPE_dic[pooltype]}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('extend_volume, ' 'volumename: %(volumename)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'Pool Name: %(eternus_pool)s, ' 'Pool Type: %(pooltype)s.', {'volumename': volumename, 'rc': rc, 'errordesc': errordesc, 'eternus_pool': pool_name, 'pooltype': CONSTANTS.POOL_TYPE_dic[pooltype]}) return pool_name @lockutils.synchronized('ETERNUS-update', 'cinder-', True) def update_volume_stats(self): """Get pool capacity.""" self.conn = self._get_eternus_connection() poolname_list = self._get_drvcfg('EternusPool', multiple=True) self._find_pools(poolname_list, self.conn) return (self.stats, poolname_list) def _get_mapdata(self, vol_instance, connector, target_portlist): """return mapping information.""" mapdata = None multipath = connector.get('multipath', False) LOG.debug('_get_mapdata, volume name: %(vname)s, ' 'protocol: %(prtcl)s, multipath: %(mpath)s.', {'vname': vol_instance['ElementName'], 'prtcl': self.protocol, 'mpath': multipath}) # find affinity group # attach the connector and include the volume aglist = self._find_affinity_group(connector, vol_instance) if not aglist: LOG.debug('_get_mapdata, ag_list:%s.', aglist) else: if self.protocol == 'fc': mapdata = self._get_mapdata_fc(aglist, vol_instance, target_portlist) elif self.protocol == 'iSCSI': mapdata = self._get_mapdata_iscsi(aglist, vol_instance, multipath) LOG.debug('_get_mapdata, mapdata: %s.', mapdata) return mapdata def _get_mapdata_fc(self, aglist, vol_instance, target_portlist): """_get_mapdata for FibreChannel.""" target_wwn = [] try: ag_volmaplist = self._reference_eternus_names( aglist[0], ResultClass='CIM_ProtocolControllerForUnit') vo_volmaplist = self._reference_eternus_names( vol_instance.path, ResultClass='CIM_ProtocolControllerForUnit') except pywbem.CIM_Error: msg = (_('_get_mapdata_fc, ' 'getting host-affinity from aglist/vol_instance failed, ' 'affinitygroup: %(ag)s, ' 'ReferenceNames, ' 'cannot connect to ETERNUS.') % {'ag': aglist[0]}) LOG.exception(msg) raise exception.VolumeBackendAPIException(data=msg) volmap = None for vo_volmap in vo_volmaplist: if vo_volmap in ag_volmaplist: volmap = vo_volmap break try: volmapinstance = self._get_eternus_instance( volmap, LocalOnly=False) except pywbem.CIM_Error: msg = (_('_get_mapdata_fc, ' 'getting host-affinity instance failed, ' 'volmap: %(volmap)s, ' 'GetInstance, ' 'cannot connect to ETERNUS.') % {'volmap': volmap}) LOG.exception(msg) raise exception.VolumeBackendAPIException(data=msg) target_lun = int(volmapinstance['DeviceNumber'], 16) for target_port in target_portlist: target_wwn.append(target_port['Name']) mapdata = {'target_wwn': target_wwn, 'target_lun': target_lun} LOG.debug('_get_mapdata_fc, mapdata: %s.', mapdata) return mapdata def _get_mapdata_iscsi(self, aglist, vol_instance, multipath): """_get_mapdata for iSCSI.""" target_portals = [] target_iqns = [] target_luns = [] try: vo_volmaplist = self._reference_eternus_names( vol_instance.path, ResultClass='CIM_ProtocolControllerForUnit') except Exception: msg = (_('_get_mapdata_iscsi, ' 'vol_instance: %(vol_instance)s, ' 'ReferenceNames: CIM_ProtocolControllerForUnit, ' 'cannot connect to ETERNUS.') % {'vol_instance': vol_instance}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) target_properties_list = self._get_eternus_iscsi_properties() target_list = [prop[0] for prop in target_properties_list] properties_list = ( [(prop[1], prop[2]) for prop in target_properties_list]) for ag in aglist: try: iscsi_endpointlist = ( self._assoc_eternus_names( ag, AssocClass='FUJITSU_SAPAvailableForElement', ResultClass='FUJITSU_iSCSIProtocolEndpoint')) except Exception: msg = (_('_get_mapdata_iscsi, ' 'Associators: FUJITSU_SAPAvailableForElement, ' 'cannot connect to ETERNUS.')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) iscsi_endpoint = iscsi_endpointlist[0] if iscsi_endpoint not in target_list: continue idx = target_list.index(iscsi_endpoint) target_portal, target_iqn = properties_list[idx] try: ag_volmaplist = self._reference_eternus_names( ag, ResultClass='CIM_ProtocolControllerForUnit') except Exception: msg = (_('_get_mapdata_iscsi, ' 'affinitygroup: %(ag)s, ' 'ReferenceNames, ' 'cannot connect to ETERNUS.') % {'ag': ag}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) volmap = None for vo_volmap in vo_volmaplist: if vo_volmap in ag_volmaplist: volmap = vo_volmap break if volmap is None: continue try: volmapinstance = self._get_eternus_instance( volmap, LocalOnly=False) except Exception: msg = (_('_get_mapdata_iscsi, ' 'volmap: %(volmap)s, ' 'GetInstance, ' 'cannot connect to ETERNUS.') % {'volmap': volmap}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) target_lun = int(volmapinstance['DeviceNumber'], 16) target_portals.append(target_portal) target_iqns.append(target_iqn) target_luns.append(target_lun) if multipath: mapdata = {'target_portals': target_portals, 'target_iqns': target_iqns, 'target_luns': target_luns} else: mapdata = {'target_portal': target_portals[0], 'target_iqn': target_iqns[0], 'target_lun': target_luns[0]} LOG.debug('_get_mapdata_iscsi, mapdata: %s.', mapdata) return mapdata def _get_drvcfg(self, tagname, filename=None, multiple=False): """Read from driver configuration file.""" if not filename: # Set default configuration file name. filename = self.configuration.cinder_eternus_config_file LOG.debug("_get_drvcfg, input[%(filename)s][%(tagname)s].", {'filename': filename, 'tagname': tagname}) tree = ET.parse(filename) elem = tree.getroot() if not multiple: ret = elem.findtext(".//" + tagname) else: ret = [] for e in elem.findall(".//" + tagname): if e.text and (e.text not in ret): ret.append(e.text) if not ret: msg = (_('_get_drvcfg, ' 'filename: %(filename)s, ' 'tagname: %(tagname)s, ' 'data is None!! ' 'Please edit driver configuration file and correct.') % {'filename': filename, 'tagname': tagname}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return ret def _get_eternus_connection(self, filename=None): """return WBEM connection.""" LOG.debug('_get_eternus_connection, filename: %s.', filename) ip = self._get_drvcfg('EternusIP', filename) port = self._get_drvcfg('EternusPort', filename) user = self._get_drvcfg('EternusUser', filename) passwd = self._get_drvcfg('EternusPassword', filename) url = 'http://' + ip + ':' + port conn = pywbem.WBEMConnection(url, (user, passwd), default_namespace='root/eternus') if conn is None: msg = (_('_get_eternus_connection, ' 'filename: %(filename)s, ' 'ip: %(ip)s, ' 'port: %(port)s, ' 'user: %(user)s, ' 'passwd: ****, ' 'url: %(url)s, ' 'FAILED!!.') % {'filename': filename, 'ip': ip, 'port': port, 'user': user, 'url': url}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_get_eternus_connection, conn: %s.', conn) return conn def _get_volume_name(self, volume, use_id=False): """Get volume_name on ETERNUS from volume on OpenStack.""" LOG.debug('_get_volume_name, volume_id: %s.', volume['id']) if not use_id and volume['provider_location']: location = eval(volume['provider_location']) if 'vol_name' in location: LOG.debug('_get_volume_name, by provider_location, ' 'vol_name: %s.', location['vol_name']) return location['vol_name'] id_code = volume['id'] m = hashlib.md5(usedforsecurity=False) m.update(id_code.encode('utf-8')) # Pylint: disable=E1121. volumename = base64.urlsafe_b64encode(m.digest()).decode() vol_name = CONSTANTS.VOL_PREFIX + str(volumename) if self.model_name == CONSTANTS.DX_S2: LOG.debug('_get_volume_name, volume name is 16 digit.') vol_name = vol_name[:16] LOG.debug('_get_volume_name, by volume id, ' 'vol_name: %s.', vol_name) return vol_name def _find_pool(self, eternus_pool, detail=False): """find Instance or InstanceName of pool by pool name on ETERNUS.""" LOG.debug('_find_pool, pool name: %s.', eternus_pool) tppoollist = [] rgpoollist = [] # Get pools info form CIM instance(include info about instance path). try: tppoollist = self._enum_eternus_instances( 'FUJITSU_ThinProvisioningPool') rgpoollist = self._enum_eternus_instances( 'FUJITSU_RAIDStoragePool') except Exception: msg = (_('_find_pool, ' 'eternus_pool:%(eternus_pool)s, ' 'EnumerateInstances, ' 'cannot connect to ETERNUS.') % {'eternus_pool': eternus_pool}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Make total pools list. poollist = tppoollist + rgpoollist # One eternus backend has only one special pool name # so just use pool name can get the target pool. for pool in poollist: if pool['ElementName'] == eternus_pool: poolinstance = pool break else: poolinstance = None if poolinstance is None: ret = None elif detail is True: ret = poolinstance else: ret = poolinstance.path LOG.debug('_find_pool, pool: %s.', ret) return ret def _find_all_pools_instances(self, conn): LOG.debug('_find_all_pools_instances, conn: %s', conn) try: tppoollist = self._enum_eternus_instances( 'FUJITSU_ThinProvisioningPool', conn=conn) rgpoollist = self._enum_eternus_instances( 'FUJITSU_RAIDStoragePool', conn=conn) except Exception: msg = _('_find_pool, ' 'eternus_pool:%(eternus_pool)s, ' 'EnumerateInstances, ' 'cannot connect to ETERNUS.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Make total pools list. tppools = [(tppool, 'TPP') for tppool in tppoollist] rgpools = [(rgpool, 'RAID') for rgpool in rgpoollist] poollist = tppools + rgpools LOG.debug('_find_all_pools_instances, poollist: %s', len(poollist)) return poollist def _find_pools(self, poolname_list, conn, poolinstances_list=None): """Find pool instances by using pool name on ETERNUS.""" LOG.debug('_find_pools, pool names: %s.', poolname_list) target_poolname = list(poolname_list) pools = [] # Get pools info from CIM instance(include info about instance path). if not poolinstances_list: poollist = self._find_all_pools_instances(conn) is_create = False else: poollist = poolinstances_list is_create = True for pool, ptype in poollist: poolname = pool['ElementName'] LOG.debug('_find_pools, ' 'pool: %(pool)s, ptype: %(ptype)s.', {'pool': poolname, 'ptype': ptype}) volume_count = None provisioned_capacity_mb = None fragment_size = None if poolname in target_poolname: if ptype == 'TPP': param_dict = { 'pool-name': poolname } rc, errordesc, data = self._exec_eternus_cli( 'show_pool_provision', **param_dict) if rc != CONSTANTS.RC_OK: msg = (_('_find_pools, show_pool_provision, ' 'pool name: %(pool_name)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'Message: %(job)s.') % {'pool_name': poolname, 'rc': rc, 'errordesc': errordesc, 'job': data}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) provisioned_capacity_mb = data elif ptype == 'RAID': # Get volume number and fragment capacity information # only at creation time. try: volume_list = self._assoc_eternus_names( pool.path, conn=conn, AssocClass='FUJITSU_AllocatedFromStoragePool', ResultClass='FUJITSU_StorageVolume') volume_count = len(volume_list) except Exception: msg = (_('_find_pools, ' 'poolname: %(poolname)s, ' 'pooltype: %(ptype)s, ' 'Associator Names, ' 'cannot connect to ETERNUS.') % {'ptype': ptype, 'poolname': poolname}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: sdpv_list = self._assoc_eternus_names( pool.path, conn=conn, AssocClass='FUJITSU_AllocatedFromStoragePool', ResultClass='FUJITSU_SDPVPool') volume_count += len(sdpv_list) except Exception: msg = (_('_find_pools, ' 'pool name: %(poolname)s, ' 'Associator Names FUJITSU_SDPVPool, ' 'cannot connect to ETERNUS.') % {'poolname': poolname}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: fragment_list = self._assoc_eternus( pool.path, conn=conn, PropertyList=['NumberOfBlocks'], AssocClass='FUJITSU_AssociatedRemainingExtent', ResultClass='FUJITSU_FreeExtent') if fragment_list: fragment_size = max( fragment_list, key=lambda x: x['NumberOfBlocks']) else: fragment_size = {'NumberOfBlocks': 0} except Exception: # S2 models do not support this query. fragment_size = {'NumberOfBlocks': -1} fragment_size = fragment_size['NumberOfBlocks'] poolinfo = self.create_pool_info( pool, volume_count, ptype, provisioned_capacity_mb=provisioned_capacity_mb, fragment_size=fragment_size) target_poolname.remove(poolname) pools.append((poolinfo, poolname)) if not target_poolname: break if not pools: LOG.warning('_find_pools, all the EternusPools in driver ' 'configuration file are not exist. ' 'Please edit driver configuration file.') # Sort pools in the order defined in driver configuration file. sorted_pools = ( [pool for name in poolname_list for pool, pname in pools if name == pname]) LOG.debug('_find_pools, ' 'pools: %(pools)s, ' 'notfound_pools: %(notfound_pools)s.', {'pools': pools, 'notfound_pools': target_poolname}) pools_stats = {'pools': []} for pool in sorted_pools: single_pool = {} if pool['type'] == 'TPP': thin_enabled = True max_ratio = self.configuration.max_over_subscription_ratio else: thin_enabled = False max_ratio = 1 single_pool['total_volumes'] = pool['volume_count'] single_pool['fragment_capacity_mb'] = \ pool['fragment_capacity_mb'] single_pool.update(dict( path=pool['path'], pool_name=pool['name'], total_capacity_gb=pool['total_capacity_gb'], free_capacity_gb=pool['free_capacity_gb'], provisioned_capacity_gb=pool['provisioned_capacity_gb'], useable_capacity_gb=pool['useable_capacity_gb'], thin_provisioning_support=thin_enabled, thick_provisioning_support=not thin_enabled, max_over_subscription_ratio=max_ratio, )) if is_create: single_pool['useable_capacity_mb'] = \ pool['useable_capacity_mb'] single_pool['multiattach'] = True pools_stats['pools'].append(single_pool) self.stats['shared_targets'] = True self.stats['backend_state'] = 'up' self.stats['pools'] = pools_stats['pools'] return self.stats, target_poolname def _find_eternus_service(self, classname): """find CIM instance about service information.""" LOG.debug('_find_eternus_service, ' 'classname: %s.', classname) try: services = self._enum_eternus_instance_names(str(classname)) except Exception: msg = (_('_find_eternus_service, ' 'classname: %(classname)s, ' 'EnumerateInstanceNames, ' 'cannot connect to ETERNUS.') % {'classname': classname}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ret = services[0] LOG.debug('_find_eternus_service, ' 'classname: %(classname)s, ' 'ret: %(ret)s.', {'classname': classname, 'ret': ret}) return ret @lockutils.synchronized('ETERNUS-SMIS-exec', 'cinder-', True) @utils.retry(exception.VolumeBackendAPIException) def _exec_eternus_service(self, classname, instanceNameList, **param_dict): """Execute SMI-S Method.""" LOG.debug('_exec_eternus_service, ' 'classname: %(a)s, ' 'instanceNameList: %(b)s, ' 'parameters: %(c)s.', {'a': classname, 'b': instanceNameList, 'c': param_dict}) rc = None retdata = None # Use InvokeMethod. try: rc, retdata = self.conn.InvokeMethod( classname, instanceNameList, **param_dict) except Exception: if rc is None: msg = (_('_exec_eternus_service, ' 'classname: %(classname)s, ' 'InvokeMethod, ' 'cannot connect to ETERNUS.') % {'classname': classname}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # If the result has job information, wait for job complete if "Job" in retdata: rc = self._wait_for_job_complete(self.conn, retdata) if rc == CONSTANTS.DEVICE_IS_BUSY: msg = _('Device is in Busy state') raise exception.VolumeBackendAPIException(data=msg) errordesc = CONSTANTS.RETCODE_dic.get(str(rc), CONSTANTS.UNDEF_MSG) ret = (rc, errordesc, retdata) LOG.debug('_exec_eternus_service, ' 'classname: %(a)s, ' 'instanceNameList: %(b)s, ' 'parameters: %(c)s, ' 'Return code: %(rc)s, ' 'Error: %(errordesc)s, ' 'Return data: %(retdata)s.', {'a': classname, 'b': instanceNameList, 'c': param_dict, 'rc': rc, 'errordesc': errordesc, 'retdata': retdata}) return ret @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) @utils.retry(exception.VolumeBackendAPIException) def _enum_eternus_instances(self, classname, conn=None, **param_dict): """Enumerate Instances.""" LOG.debug('_enum_eternus_instances, classname: %s.', classname) if not conn: conn = self.conn ret = conn.EnumerateInstances(classname, **param_dict) LOG.debug('_enum_eternus_instances, enum %d instances.', len(ret)) return ret @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) @utils.retry(exception.VolumeBackendAPIException) def _enum_eternus_instance_names(self, classname): """Enumerate Instance Names.""" LOG.debug('_enum_eternus_instance_names, classname: %s.', classname) ret = self.conn.EnumerateInstanceNames(classname) LOG.debug('_enum_eternus_instance_names, enum %d names.', len(ret)) return ret @lockutils.synchronized('ETERNUS-SMIS-getinstance', 'cinder-', True) @utils.retry(exception.VolumeBackendAPIException) def _get_eternus_instance(self, classname, AllowNone=False, **param_dict): """Get Instance.""" LOG.debug('_get_eternus_instance, ' 'classname: %(cls)s, param: %(param)s.', {'cls': classname, 'param': param_dict}) ret = None try: ret = self.conn.GetInstance(classname, **param_dict) except Exception as e: if e.args[0] == 6 and AllowNone: return ret else: msg = _('_get_eternus_instance, Error:%s.') % e raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_get_eternus_instance, ret: %s.', ret) return ret @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) @utils.retry(exception.VolumeBackendAPIException) def _assoc_eternus(self, classname, conn=None, **param_dict): """Associator.""" LOG.debug('_assoc_eternus, ' 'classname: %(cls)s, param: %(param)s.', {'cls': classname, 'param': param_dict}) if not conn: conn = self.conn ret = conn.Associators(classname, **param_dict) LOG.debug('_assoc_eternus, enum %d instances.', len(ret)) return ret @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) @utils.retry(exception.VolumeBackendAPIException) def _assoc_eternus_names(self, classname, conn=None, **param_dict): """Associator Names.""" LOG.debug('_assoc_eternus_names, ' 'classname: %(cls)s, param: %(param)s.', {'cls': classname, 'param': param_dict}) if not conn: conn = self.conn ret = conn.AssociatorNames(classname, **param_dict) LOG.debug('_assoc_eternus_names, enum %d names.', len(ret)) return ret @lockutils.synchronized('ETERNUS-SMIS-other', 'cinder-', True) @utils.retry(exception.VolumeBackendAPIException) def _reference_eternus_names(self, classname, **param_dict): """Refference Names.""" LOG.debug('_reference_eternus_names, ' 'classname: %(cls)s, param: %(param)s.', {'cls': classname, 'param': param_dict}) ret = self.conn.ReferenceNames(classname, **param_dict) LOG.debug('_reference_eternus_names, enum %d names.', len(ret)) return ret def _create_eternus_instance_name(self, classname, bindings): """create CIM InstanceName from classname and bindings.""" LOG.debug('_create_eternus_instance_name, ' 'classname: %(cls)s, bindings: %(bind)s.', {'cls': classname, 'bind': bindings}) bindings['CreationClassName'] = classname bindings['SystemCreationClassName'] = 'FUJITSU_StorageComputerSystem' try: instancename = pywbem.CIMInstanceName( classname, namespace='root/eternus', keybindings=bindings) except NameError: instancename = None LOG.debug('_create_eternus_instance_name, ret: %s.', instancename) return instancename def _find_lun(self, volume): """Find lun instance from volume class or volumename on ETERNUS.""" LOG.debug('_find_lun, volume id: %s.', volume['id']) volumeinstance = None volumename = self._get_volume_name(volume) try: location = eval(volume['provider_location']) classname = location['classname'] bindings = location['keybindings'] isSuccess = True if classname and bindings: LOG.debug('_find_lun, ' 'classname: %(classname)s, ' 'bindings: %(bindings)s.', {'classname': classname, 'bindings': bindings}) volume_instance_name = ( self._create_eternus_instance_name(classname, bindings)) LOG.debug('_find_lun, ' 'volume_insatnce_name: %(volume_instance_name)s.', {'volume_instance_name': volume_instance_name}) vol_instance = self._get_eternus_instance(volume_instance_name, AllowNone=True) if vol_instance and vol_instance['ElementName'] == volumename: volumeinstance = vol_instance except Exception: isSuccess = False LOG.debug('_find_lun, ' 'Cannot get volume instance from provider location, ' 'Search all volume using EnumerateInstanceNames.') if not isSuccess and self.model_name == CONSTANTS.DX_S2: # For old version. LOG.debug('_find_lun, ' 'volumename: %(volumename)s.', {'volumename': volumename}) vol_name = { 'source-name': volumename } # Get volume instance from volumename on ETERNUS. volumeinstance = self._find_lun_with_listup(**vol_name) LOG.debug('_find_lun, ret: %s.', volumeinstance) return volumeinstance def _find_copysession(self, vol_instance): """find copysession from volumename on ETERNUS.""" LOG.debug('_find_copysession, volume name: %s.', vol_instance['ElementName']) try: cpsessionlist = self.conn.ReferenceNames( vol_instance.path, ResultClass='FUJITSU_StorageSynchronized') except Exception: msg = (_('_find_copysession, ' 'ReferenceNames, ' 'vol_instance: %(vol_instance_path)s, ' 'Cannot connect to ETERNUS.') % {'vol_instance_path': vol_instance.path}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_find_copysession, ' 'cpsessionlist: %(cpsessionlist)s.', {'cpsessionlist': cpsessionlist}) LOG.debug('_find_copysession, ret: %s.', cpsessionlist) return cpsessionlist def _wait_for_copy_complete(self, cpsession): """Wait for the completion of copy.""" LOG.debug('_wait_for_copy_complete, cpsession: %s.', cpsession) cpsession_instance = None while True: try: cpsession_instance = self.conn.GetInstance( cpsession, LocalOnly=False) except Exception: cpsession_instance = None # if copy session is none, # it means copy session was finished,break and return if cpsession_instance is None: break LOG.debug('_wait_for_copy_complete, ' 'find target copysession, ' 'wait for end of copysession.') if cpsession_instance['CopyState'] == CONSTANTS.BROKEN: msg = (_('_wait_for_copy_complete, ' 'cpsession: %(cpsession)s, ' 'copysession state is BROKEN.') % {'cpsession': cpsession}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) time.sleep(10) @utils.retry(exception.VolumeBackendAPIException) def _delete_copysession(self, cpsession): """delete copysession.""" LOG.debug('_delete_copysession: cpssession: %s.', cpsession) try: cpsession_instance = self._get_eternus_instance( cpsession, LocalOnly=False) except Exception: LOG.info('_delete_copysession, ' 'the copysession was already completed.') return copytype = cpsession_instance['CopyType'] # set oparation code # SnapOPC: 19 (Return To ResourcePool) # OPC:8 (Detach) # EC/REC:8 (Detach) operation = CONSTANTS.OPERATION_dic.get(copytype, None) if operation is None: msg = (_('_delete_copysession, ' 'copy session type is undefined! ' 'copy session: %(cpsession)s, ' 'copy type: %(copytype)s.') % {'cpsession': cpsession, 'copytype': copytype}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) repservice = self._find_eternus_service(CONSTANTS.REPL) if repservice is None: msg = (_('_delete_copysession, ' 'Cannot find Replication Service')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Invoke method for delete copysession rc, errordesc, job = self._exec_eternus_service( 'ModifyReplicaSynchronization', repservice, Operation=self._pywbem_uint(operation, '16'), Synchronization=cpsession, Force=True, WaitForCopyState=self._pywbem_uint(15, '16')) LOG.debug('_delete_copysession, ' 'copysession: %(cpsession)s, ' 'operation: %(operation)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.', {'cpsession': cpsession, 'operation': operation, 'rc': rc, 'errordesc': errordesc}) if rc == CONSTANTS.COPYSESSION_NOT_EXIST: LOG.debug('_delete_copysession, ' 'cpsession: %(cpsession)s, ' 'copysession is not exist.', {'cpsession': cpsession}) elif rc == CONSTANTS.VOLUME_IS_BUSY: msg = (_('_delete_copysession, ' 'copysession: %(cpsession)s, ' 'operation: %(operation)s, ' 'Error: Volume is in Busy state') % {'cpsession': cpsession, 'operation': operation}) raise exception.VolumeIsBusy(msg) elif rc != CONSTANTS.RC_OK: msg = (_('_delete_copysession, ' 'copysession: %(cpsession)s, ' 'operation: %(operation)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'cpsession': cpsession, 'operation': operation, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _get_target_port(self): """return target portid.""" LOG.debug('_get_target_port, protocol: %s.', self.protocol) target_portlist = [] if self.protocol == 'fc': prtcl_endpoint = 'FUJITSU_SCSIProtocolEndpoint' connection_type = 2 elif self.protocol == 'iSCSI': prtcl_endpoint = 'FUJITSU_iSCSIProtocolEndpoint' connection_type = 7 try: tgtportlist = self._enum_eternus_instances(prtcl_endpoint) except Exception: msg = (_('_get_target_port, ' 'EnumerateInstances, ' 'cannot connect to ETERNUS.')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for tgtport in tgtportlist: # Check : protocol of tgtport if tgtport['ConnectionType'] != connection_type: continue # Check : if port is for remote copy, continue if (tgtport['RAMode'] & 0x7B) != 0x00: continue # Check : if port is for StorageCluster, continue if 'SCGroupNo' in tgtport: continue target_portlist.append(tgtport) LOG.debug('_get_target_port, ' 'connection type: %(cont)s, ' 'ramode: %(ramode)s.', {'cont': tgtport['ConnectionType'], 'ramode': tgtport['RAMode']}) LOG.debug('_get_target_port, ' 'target port: %(target_portid)s.', {'target_portid': target_portlist}) if len(target_portlist) == 0: msg = (_('_get_target_port, ' 'protcol: %(protocol)s, ' 'target_port not found.') % {'protocol': self.protocol}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_get_target_port, ret: %s.', target_portlist) return target_portlist @lockutils.synchronized('ETERNUS-connect', 'cinder-', True) def _map_lun(self, vol_instance, connector, targetlist=None): """map volume to host.""" volumename = vol_instance['ElementName'] LOG.debug('_map_lun, ' 'volume name: %(vname)s, connector: %(connector)s.', {'vname': volumename, 'connector': connector}) volume_uid = vol_instance['Name'] initiatorlist = self._find_initiator_names(connector) aglist = self._find_affinity_group(connector) configservice = self._find_eternus_service(CONSTANTS.CTRL_CONF) if targetlist is None: targetlist = self._get_target_port() if configservice is None: msg = (_('_map_lun, ' 'vol_instance.path:%(vol)s, ' 'volumename: %(volumename)s, ' 'volume_uid: %(uid)s, ' 'initiator: %(initiator)s, ' 'target: %(tgt)s, ' 'aglist: %(aglist)s, ' 'Storage Configuration Service not found.') % {'vol': vol_instance.path, 'volumename': volumename, 'uid': volume_uid, 'initiator': initiatorlist, 'tgt': targetlist, 'aglist': aglist}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_map_lun, ' 'vol_instance.path: %(vol_instance)s, ' 'volumename:%(volumename)s, ' 'initiator:%(initiator)s, ' 'target:%(tgt)s.', {'vol_instance': vol_instance.path, 'volumename': [volumename], 'initiator': initiatorlist, 'tgt': targetlist}) if not aglist: # Create affinity group and set host-affinity. for target in targetlist: LOG.debug('_map_lun, ' 'lun_name: %(volume_uid)s, ' 'Initiator: %(initiator)s, ' 'target: %(target)s.', {'volume_uid': [volume_uid], 'initiator': initiatorlist, 'target': target['Name']}) rc, errordesc, job = self._exec_eternus_service( 'ExposePaths', configservice, LUNames=[volume_uid], InitiatorPortIDs=initiatorlist, TargetPortIDs=[target['Name']], DeviceAccesses=[self._pywbem_uint(2, '16')]) LOG.debug('_map_lun, ' 'Error: %(errordesc)s, ' 'Return code: %(rc)lu, ' 'Create affinitygroup and set host-affinity.', {'errordesc': errordesc, 'rc': rc}) if rc != CONSTANTS.RC_OK and rc != CONSTANTS.LUNAME_IN_USE: LOG.warning('_map_lun, ' 'lun_name: %(volume_uid)s, ' 'Initiator: %(initiator)s, ' 'target: %(target)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.', {'volume_uid': [volume_uid], 'initiator': initiatorlist, 'target': target['Name'], 'rc': rc, 'errordesc': errordesc}) else: # Add lun to affinity group for ag in aglist: LOG.debug('_map_lun, ' 'ag: %(ag)s, lun_name: %(volume_uid)s.', {'ag': ag, 'volume_uid': volume_uid}) rc, errordesc, job = self._exec_eternus_service( 'ExposePaths', configservice, LUNames=[volume_uid], DeviceAccesses=[self._pywbem_uint(2, '16')], ProtocolControllers=[ag]) LOG.debug('_map_lun, ' 'Error: %(errordesc)s, ' 'Return code: %(rc)lu, ' 'Add lun to affinity group.', {'errordesc': errordesc, 'rc': rc}) if rc != CONSTANTS.RC_OK and rc != CONSTANTS.LUNAME_IN_USE: LOG.warning('_map_lun, ' 'lun_name: %(volume_uid)s, ' 'Initiator: %(initiator)s, ' 'ag: %(ag)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.', {'volume_uid': [volume_uid], 'initiator': initiatorlist, 'ag': ag, 'rc': rc, 'errordesc': errordesc}) def _find_initiator_names(self, connector): """return initiator names.""" initiatornamelist = [] if self.protocol == 'fc' and connector['wwpns']: LOG.debug('_find_initiator_names, wwpns: %s.', connector['wwpns']) initiatornamelist = connector['wwpns'] elif self.protocol == 'iSCSI' and connector['initiator']: LOG.debug('_find_initiator_names, initiator: %s.', connector['initiator']) initiatornamelist.append(connector['initiator']) if not initiatornamelist: msg = (_('_find_initiator_names, ' 'connector: %(connector)s, ' 'initiator not found.') % {'connector': connector}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_find_initiator_names, ' 'initiator list: %(initiator)s.', {'initiator': initiatornamelist}) return initiatornamelist def _find_affinity_group(self, connector, vol_instance=None): """find affinity group from connector.""" LOG.debug('_find_affinity_group, vol_instance: %s.', vol_instance) affinity_grouplist = [] initiatorlist = self._find_initiator_names(connector) if vol_instance is None: try: aglist = self._enum_eternus_instance_names( 'FUJITSU_AffinityGroupController') except Exception: msg = (_('_find_affinity_group, ' 'connector: %(connector)s, ' 'EnumerateInstanceNames, ' 'cannot connect to ETERNUS.') % {'connector': connector}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_find_affinity_group,' 'affinity_groups:%s', aglist) else: try: aglist = self._assoc_eternus_names( vol_instance.path, AssocClass='FUJITSU_ProtocolControllerForUnit', ResultClass='FUJITSU_AffinityGroupController') except Exception: msg = (_('_find_affinity_group,' 'connector: %(connector)s,' 'AssocNames: FUJITSU_ProtocolControllerForUnit, ' 'cannot connect to ETERNUS.') % {'connector': connector}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_find_affinity_group, ' 'vol_instance.path: %(volume)s, ' 'affinity_groups: %(aglist)s.', {'volume': vol_instance.path, 'aglist': aglist}) for ag in aglist: try: hostaglist = self._assoc_eternus( ag, AssocClass='FUJITSU_AuthorizedTarget', ResultClass='FUJITSU_AuthorizedPrivilege') except Exception: msg = (_('_find_affinity_group, ' 'connector: %(connector)s, ' 'Associators: FUJITSU_AuthorizedTarget, ' 'cannot connect to ETERNUS.') % {'connector': connector}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for hostag in hostaglist: for initiator in initiatorlist: if initiator.lower() not in hostag['InstanceID'].lower(): continue LOG.debug('_find_affinity_group, ' 'AffinityGroup: %(ag)s.', {'ag': ag}) affinity_grouplist.append(ag) break break LOG.debug('_find_affinity_group, ' 'initiators: %(initiator)s, ' 'affinity_group: %(affinity_group)s.', {'initiator': initiatorlist, 'affinity_group': affinity_grouplist}) return affinity_grouplist @lockutils.synchronized('ETERNUS-connect', 'cinder-', True) def _unmap_lun(self, volume, connector, force=False): """unmap volume from host.""" LOG.debug('_map_lun, volume id: %(vid)s, ' 'connector: %(connector)s, force: %(frc)s.', {'vid': volume['id'], 'connector': connector, 'frc': force}) volumename = self._get_volume_name(volume) vol_instance = self._find_lun(volume) if vol_instance is None: LOG.info('_unmap_lun, ' 'volumename:%(volumename)s, ' 'volume not found.', {'volumename': volumename}) return False volume_uid = vol_instance['Name'] if not force: aglist = self._find_affinity_group(connector, vol_instance) if not aglist: LOG.info('_unmap_lun, ' 'volumename: %(volumename)s, ' 'volume is not mapped.', {'volumename': volumename}) return False else: try: aglist = self._assoc_eternus_names( vol_instance.path, AssocClass='CIM_ProtocolControllerForUnit', ResultClass='FUJITSU_AffinityGroupController') except Exception: msg = (_('_unmap_lun,' 'vol_instance.path: %(volume)s, ' 'AssociatorNames: CIM_ProtocolControllerForUnit, ' 'cannot connect to ETERNUS.') % {'volume': vol_instance.path}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_unmap_lun, ' 'vol_instance.path: %(volume)s, ' 'affinity_groups: %(aglist)s.', {'volume': vol_instance.path, 'aglist': aglist}) configservice = self._find_eternus_service(CONSTANTS.CTRL_CONF) if configservice is None: msg = (_('_unmap_lun, ' 'vol_instance.path: %(volume)s, ' 'volumename: %(volumename)s, ' 'volume_uid: %(uid)s, ' 'aglist: %(aglist)s, ' 'Controller Configuration Service not found.') % {'vol': vol_instance.path, 'volumename': [volumename], 'uid': [volume_uid], 'aglist': aglist}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for ag in aglist: LOG.debug('_unmap_lun, ' 'volumename: %(volumename)s, ' 'volume_uid: %(volume_uid)s, ' 'AffinityGroup: %(ag)s.', {'volumename': volumename, 'volume_uid': volume_uid, 'ag': ag}) rc, errordesc, job = self._exec_eternus_service( 'HidePaths', configservice, LUNames=[volume_uid], ProtocolControllers=[ag]) LOG.debug('_unmap_lun, ' 'Error: %(errordesc)s, ' 'Return code: %(rc)lu.', {'errordesc': errordesc, 'rc': rc}) if rc == CONSTANTS.LUNAME_NOT_EXIST: LOG.debug('_unmap_lun, ' 'volumename: %(volumename)s, ' 'Invalid LUNames.', {'volumename': volumename}) elif rc != CONSTANTS.RC_OK: msg = (_('_unmap_lun, ' 'volumename: %(volumename)s, ' 'volume_uid: %(volume_uid)s, ' 'AffinityGroup: %(ag)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s.') % {'volumename': volumename, 'volume_uid': volume_uid, 'ag': ag, 'rc': rc, 'errordesc': errordesc}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_unmap_lun, ' 'volumename: %(volumename)s.', {'volumename': volumename}) return True def _get_eternus_iscsi_properties(self): """get target port iqns and target_portals.""" iscsi_properties_list = [] iscsiip_list = self._get_drvcfg('EternusISCSIIP', multiple=True) iscsi_port = self.configuration.target_port LOG.debug('_get_eternus_iscsi_properties, iplist: %s.', iscsiip_list) try: ip_endpointlist = self._enum_eternus_instance_names( 'FUJITSU_IPProtocolEndpoint') except Exception: msg = (_('_get_eternus_iscsi_properties, ' 'iscsiip: %(iscsiip)s, ' 'EnumerateInstanceNames, ' 'cannot connect to ETERNUS.') % {'iscsiip': iscsiip_list}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for ip_endpoint in ip_endpointlist: try: ip_endpoint_instance = self._get_eternus_instance( ip_endpoint) ip_address = ip_endpoint_instance['IPv4Address'] LOG.debug('_get_eternus_iscsi_properties, ' 'instanceip: %(ip)s, ' 'iscsiip: %(iscsiip)s.', {'ip': ip_address, 'iscsiip': iscsiip_list}) except Exception: msg = (_('_get_eternus_iscsi_properties, ' 'iscsiip: %(iscsiip)s, ' 'GetInstance, ' 'cannot connect to ETERNUS.') % {'iscsiip': iscsiip_list}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if ip_address not in iscsiip_list: continue LOG.debug('_get_eternus_iscsi_properties, ' 'find iscsiip: %(ip)s.', {'ip': ip_address}) try: tcp_endpointlist = self._assoc_eternus_names( ip_endpoint, AssocClass='CIM_BindsTo', ResultClass='FUJITSU_TCPProtocolEndpoint') except Exception: msg = (_('_get_eternus_iscsi_properties, ' 'iscsiip: %(iscsiip)s, ' 'AssociatorNames: CIM_BindsTo, ' 'cannot connect to ETERNUS.') % {'iscsiip': ip_address}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for tcp_endpoint in tcp_endpointlist: try: iscsi_endpointlist = ( self._assoc_eternus(tcp_endpoint, AssocClass='CIM_BindsTo', ResultClass='FUJITSU_iSCSI' 'ProtocolEndpoint')) except Exception: msg = (_('_get_eternus_iscsi_properties, ' 'iscsiip: %(iscsiip)s, ' 'AssociatorNames: CIM_BindsTo, ' 'cannot connect to ETERNUS.') % {'iscsiip': ip_address}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for iscsi_endpoint in iscsi_endpointlist: target_portal = "%s:%s" % (ip_address, iscsi_port) iqn = iscsi_endpoint['Name'].split(',')[0] iscsi_properties_list.append((iscsi_endpoint.path, target_portal, iqn)) LOG.debug('_get_eternus_iscsi_properties, ' 'target_portal: %(target_portal)s, ' 'iqn: %(iqn)s.', {'target_portal': target_portal, 'iqn': iqn}) if len(iscsi_properties_list) == 0: msg = (_('_get_eternus_iscsi_properties, ' 'iscsiip list: %(iscsiip_list)s, ' 'iqn not found.') % {'iscsiip_list': iscsiip_list}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_get_eternus_iscsi_properties, ' 'iscsi_properties_list: %(iscsi_properties_list)s.', {'iscsi_properties_list': iscsi_properties_list}) return iscsi_properties_list def _wait_for_job_complete(self, conn, job): """Given the job wait for it to complete.""" self.retries = 0 self.wait_for_job_called = False def _wait_for_job_complete(): """Called at an interval until the job is finished.""" if self._is_job_finished(conn, job): raise loopingcall.LoopingCallDone() if self.retries > CONSTANTS.JOB_RETRIES: LOG.error("_wait_for_job_complete, " "failed after %(retries)d tries.", {'retries': self.retries}) raise loopingcall.LoopingCallDone() try: self.retries += 1 if not self.wait_for_job_called: if self._is_job_finished(conn, job): self.wait_for_job_called = True except Exception: exceptionMessage = _("Issue encountered waiting for job.") LOG.exception(exceptionMessage) raise exception.VolumeBackendAPIException(exceptionMessage) self.wait_for_job_called = False timer = loopingcall.FixedIntervalLoopingCall(_wait_for_job_complete) timer.start(interval=CONSTANTS.JOB_INTERVAL_SEC).wait() jobInstanceName = job['Job'] jobinstance = conn.GetInstance(jobInstanceName, LocalOnly=False) rc = jobinstance['ErrorCode'] LOG.debug('_wait_for_job_complete, rc: %s.', rc) return rc def _is_job_finished(self, conn, job): """Check if the job is finished.""" jobInstanceName = job['Job'] jobinstance = conn.GetInstance(jobInstanceName, LocalOnly=False) jobstate = jobinstance['JobState'] LOG.debug('_is_job_finished,' 'state: %(state)s', {'state': jobstate}) # From ValueMap of JobState in CIM_ConcreteJob # 2=New, 3=Starting, 4=Running, 32767=Queue Pending # ValueMap("2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13..32767, # 32768..65535"), # Values("New, Starting, Running, Suspended, Shutting Down, # Completed, Terminated, Killed, Exception, Service, # Query Pending, DMTF Reserved, Vendor Reserved")] # NOTE(deva): string matching based on # http://ipmitool.cvs.sourceforge.net/ # viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c if jobstate in [2, 3, 4]: job_finished = False else: job_finished = True LOG.debug('_is_job_finished, finish: %s.', job_finished) return job_finished @staticmethod def _pywbem_uint(num, datatype): try: if datatype == '8': result = pywbem.Uint8(num) elif datatype == '16': result = pywbem.Uint16(num) elif datatype == '32': result = pywbem.Uint32(num) elif datatype == '64': result = pywbem.Uint64(num) except NameError: result = num return result def _find_lun_with_listup(self, conn=None, **kwargs): """Find lun instance with source name or source id on ETERNUS.""" LOG.debug('_find_lun_with_listup start.') volumeinstance = None src_id = kwargs.get('source-id', None) src_name = kwargs.get('source-name', None) if not src_id and not src_name: msg = (_('_find_lun_with_listup, ' 'source-name or source-id: %s, ' 'Must specify source-name or source-id.') % kwargs) LOG.error(msg) raise exception.ManageExistingInvalidReference(data=msg) if src_id and src_name: msg = (_('_find_lun_with_listup, ' 'source-name or source-id: %s, ' 'Must only specify source-name or source-id.') % kwargs) LOG.error(msg) raise exception.ManageExistingInvalidReference(data=msg) if src_id and not src_id.isdigit(): msg = (_('_find_lun_with_listup, ' 'the specified source-id(%s) must be a decimal number.') % src_id) LOG.error(msg) raise exception.ManageExistingInvalidReference(data=msg) # Get volume instance by volumename or volumeno on ETERNUS. try: propertylist = [ 'SystemName', 'DeviceID', 'ElementName', 'Purpose', 'BlockSize', 'NumberOfBlocks', 'Name', 'OtherUsageDescription', 'IsCompressed', 'IsDeduplicated' ] vollist = self._enum_eternus_instances( 'FUJITSU_StorageVolume', conn=conn, PropertyList=propertylist) except Exception: msg = (_('_find_lun_with_listup, ' 'source-name or source-id: %s, ' 'EnumerateVolumeInstance.') % kwargs) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for vol_instance in vollist: if src_id: volume_no = self._get_volume_number(vol_instance) try: # Skip hidden tppv volumes. if int(src_id) == int(volume_no, 16): volumeinstance = vol_instance break except ValueError: continue if src_name: if vol_instance['ElementName'] == src_name: volumeinstance = vol_instance break else: LOG.debug('_find_lun_with_listup, ' 'source-name or source-id: %s, ' 'volume not found on ETERNUS.', kwargs) LOG.debug('_find_lun_with_listup end, ' 'volume instance: %s.', volumeinstance) return volumeinstance def _find_pool_from_volume(self, vol_instance, manage_type='volume'): """Find Instance or InstanceName of pool by volume instance.""" LOG.debug('_find_pool_from_volume, volume: %(volume)s.', {'volume': vol_instance}) poolname = None target_pool = None filename = None conn = self.conn # Get poolname of volume on Eternus. try: pools = self._assoc_eternus( vol_instance.path, conn=conn, AssocClass='FUJITSU_AllocatedFromStoragePool', ResultClass='CIM_StoragePool') except Exception: msg = (_('_find_pool_from_volume, ' 'vol_instance: %s, ' 'Associators: FUJITSU_AllocatedFromStoragePool, ' 'cannot connect to ETERNUS.') % vol_instance.path) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not pools: msg = (_('_find_pool_from_volume, ' 'vol_instance: %s, ' 'pool not found.') % vol_instance.path) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Get poolname from driver configuration file. if manage_type == 'volume': cfgpool_list = list(self._get_drvcfg('EternusPool', filename=filename, multiple=True)) elif manage_type == 'snapshot': cfgpool_list = list(self._get_drvcfg('EternusSnapPool', filename=filename, multiple=True)) LOG.debug('_find_pool_from_volume, cfgpool_list: %(cfgpool_list)s.', {'cfgpool_list': cfgpool_list}) for pool in pools: if pool['ElementName'] in cfgpool_list: poolname = pool['ElementName'] target_pool = pool.path break if not target_pool: msg = (_('_find_pool_from_volume, ' 'vol_instance: %s, ' 'the pool of volume not in driver configuration file.') % vol_instance.path) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_find_pool_from_volume, poolname: %(poolname)s, ' 'target_pool: %(target_pool)s.', {'poolname': poolname, 'target_pool': target_pool}) return poolname, target_pool def update_migrated_volume(self, ctxt, volume, new_volume): """Update migrated volume.""" LOG.debug('update_migrated_volume, ' 'source volume id: %(s_id)s, ' 'target volume id: %(t_id)s.', {'s_id': volume['id'], 't_id': new_volume['id']}) model_update = None dst_metadata = self.get_metadata(new_volume) src_metadata = self.get_metadata(volume) LOG.debug('source: (%(src_meta)s)(%(src_loc)s), ' 'target: (%(dst_meta)s)(%(dst_loc)s).', {'src_meta': src_metadata, 'src_loc': volume['provider_location'], 'dst_meta': dst_metadata, 'dst_loc': new_volume['provider_location']}) if volume['provider_location']: dst_location = new_volume['provider_location'] model_update = {'_name_id': new_volume['id'], 'provider_location': dst_location} LOG.debug('update_migrated_volume, model_update: %s.', model_update) return model_update def _get_eternus_model(self): """Get ENTERNUS model.""" self.conn = self._get_eternus_connection() ret = CONSTANTS.DX_S3 try: systemnamelist = self._enum_eternus_instances( 'FUJITSU_StorageProduct', conn=self.conn) except Exception: msg = _('_get_eternus_model, EnumerateInstances, ' 'cannot connect to ETERNUS.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) systemname = systemnamelist[0]['IdentifyingNumber'] LOG.debug('_get_eternus_model, ' 'systemname: %(systemname)s, ' 'storage is DX S%(model)s.', {'systemname': systemname, 'model': systemname[4]}) if str(systemname[4]) == '2': ret = CONSTANTS.DX_S2 return ret def _get_volume_number(self, vol): """Get volume no(return a hex string).""" if self.model_name == CONSTANTS.DX_S2: volume_number = "0x%04X" % int(vol['DeviceID'][-5:]) else: volume_number = "0x" + vol['DeviceID'][24:28] LOG.debug('_get_volume_number: %s.', volume_number) return volume_number def _exec_eternus_smis_ReferenceNames(self, classname, conn=None, **param_dict): ret = conn.ReferenceNames(classname, **param_dict) return ret def _check_user(self): """Check whether user's role is accessible to ETERNUS and Software.""" ret = True rc, errordesc, job = self._exec_eternus_cli('check_user_role') if rc != CONSTANTS.RC_OK: msg = (_('_check_user, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'Message: %(job)s.') % {'rc': rc, 'errordesc': errordesc, 'job': job}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if job != 'Software': msg = (_('_check_user, ' 'Specified user(%(user)s) does not have ' 'Software role: %(role)s.') % {'user': self._get_drvcfg('EternusUser'), 'role': job}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return ret def _exec_eternus_cli(self, command, retry=CONSTANTS.TIMES_MIN, retry_interval=CONSTANTS.RETRY_INTERVAL, retry_code=['E0060'], filename=None, **param_dict): """Execute ETERNUS CLI.""" LOG.debug('_exec_eternus_cli, ' 'command: %(a)s, ' 'filename: %(f)s, ' 'parameters: %(b)s.', {'a': command, 'f': filename, 'b': param_dict}) out = None rc = None retdata = None errordesc = None filename = self.configuration.cinder_eternus_config_file storage_ip = self._get_drvcfg('EternusIP', filename) if not self.fjdxcli.get(filename): user = self._get_drvcfg('EternusUser', filename) if self.passwordless: self.fjdxcli[filename] = ( eternus_dx_cli.FJDXCLI(user, storage_ip, keyfile=self.private_key_path)) else: password = self._get_drvcfg('EternusPassword', filename) self.fjdxcli[filename] = ( eternus_dx_cli.FJDXCLI(user, storage_ip, password=password)) for retry_num in range(retry): # Execute ETERNUS CLI and get return value. try: out = self.fjdxcli[filename].done(command, **param_dict) out_dict = out rc_str = out_dict.get('rc') retdata = out_dict.get('message') except Exception as ex: msg = (_('_exec_eternus_cli, ' 'stdout: %(out)s, ' 'unexpected error: %(ex)s.') % {'out': out, 'ex': ex}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if rc_str.startswith('E'): errordesc = rc_str rc = CONSTANTS.RC_FAILED if rc_str in retry_code: LOG.info('_exec_eternus_cli, retry, ' 'ip: %(ip)s, ' 'RetryCode: %(rc)s, ' 'TryNum: %(rn)s.', {'ip': storage_ip, 'rc': rc_str, 'rn': (retry_num + 1)}) time.sleep(retry_interval) continue else: LOG.warning('_exec_eternus_cli, ' 'WARNING!! ' 'ip: %(ip)s, ' 'ReturnCode: %(rc_str)s, ' 'ReturnData: %(retdata)s.', {'ip': storage_ip, 'rc_str': rc_str, 'retdata': retdata}) break else: if rc_str == str(CONSTANTS.RC_FAILED): errordesc = rc_str rc = CONSTANTS.RC_FAILED if ('Authentication failed' in retdata and retry_num + 1 < retry): LOG.warning('_exec_eternus_cli, retry, ip: %(ip)s, ' 'Message: %(message)s, ' 'TryNum: %(rn)s.', {'ip': storage_ip, 'message': retdata, 'rn': (retry_num + 1)}) time.sleep(1) continue else: errordesc = None rc = CONSTANTS.RC_OK break else: LOG.warning('_exec_eternus_cli, Retry was exceeded.') ret = (rc, errordesc, retdata) LOG.debug('_exec_eternus_cli, ' 'command: %(a)s, ' 'parameters: %(b)s, ' 'ip: %(ip)s, ' 'Return code: %(rc)s, ' 'Error: %(errordesc)s.', {'a': command, 'b': param_dict, 'ip': storage_ip, 'rc': rc, 'errordesc': errordesc}) return ret @staticmethod def get_metadata(volume): """Get metadata using volume information.""" LOG.debug('get_metadata, volume id: %s.', volume['id']) d_metadata = {} metadata = volume.get('volume_metadata') # value={} enters the if branch, value=None enters the else. if metadata is not None: d_metadata = { data['key']: data['value'] for data in metadata } else: metadata = volume.get('metadata') if metadata: d_metadata = { key: metadata[key] for key in metadata } LOG.debug('get_metadata, metadata is: %s.', d_metadata) return d_metadata def _set_qos(self, volume, use_id=False): """Set volume qos using ETERNUS CLI.""" LOG.debug('_set_qos, volumeid: %(volumeid)s.', {'volumeid': volume['id']}) qos_support = self._is_qos_or_format_support('QOS setting') # Storage is DX S2 series, qos is not supported. if not qos_support: return qos_specs_dict = self._get_qos_specs(volume) if not qos_specs_dict: # Can not get anything from 'qos_specs_id'. return # Get storage version information. rc, emsg, clidata = self._exec_eternus_cli('show_enclosure_status') if rc != CONSTANTS.RC_OK: msg = (_('_set_qos, ' 'show_enclosure_status failed. ' 'Return code: %(rc)lu, ' 'Error: %(errormsg)s, ' 'Message: %(clidata)s.') % {'rc': rc, 'errormsg': emsg, 'clidata': clidata}) LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) category_dict = {} unsupport = [] # If storage version is before V11L30. if clidata['version'] < CONSTANTS.QOS_VERSION: for key, value in qos_specs_dict.items(): if (key in CONSTANTS.FJ_QOS_KEY_BYTES_list or key in CONSTANTS.FJ_QOS_KEY_IOPS_list): msg = (_('_set_qos, Can not support QoS ' 'parameter "%(key)s" on firmware version ' '%(version)s.') % {'key': key, 'version': clidata['version']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if key in CONSTANTS.FJ_QOS_KEY_list: category_dict = self._get_qos_category_by_value( key, value) else: unsupport.append(key) if unsupport: LOG.warning('_set_qos, ' 'Can not support QoS parameter "%s".', unsupport) # If storage version is after V11L30. if clidata['version'] >= 'V11L30-0000': key_dict = self._get_param(qos_specs_dict) if not key_dict: return # Get total/read/write bandwidth limit. category_dict = self._get_qos_category(key_dict) if category_dict: # Set volume qos. volumename = self._get_volume_name(volume, use_id=use_id) category_dict['volume-name'] = volumename rc, errordesc, job = self._exec_eternus_cli( 'set_volume_qos', **category_dict) if rc != CONSTANTS.RC_OK: msg = (_('_set_qos, ' 'set_volume_qos failed. ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'Message: %(job)s.') % {'rc': rc, 'errordesc': errordesc, 'job': job}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @staticmethod def _get_qos_specs(volume): """Get qos specs information from volume information.""" LOG.debug('_get_qos_specs, volume id: %s.', volume['id']) qos_specs_dict = {} qos_specs_id = None ctxt = None volume_type_id = volume.get('volume_type_id') if volume_type_id: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, volume_type_id) qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id: qos_specs_dict = ( qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']) LOG.debug('_get_qos_specs, qos_specs_dict: %s.', qos_specs_dict) return qos_specs_dict def _is_qos_or_format_support(self, func_name): """If storage is DX S2 series, qos or format is not supported.""" is_support = True if self.model_name == CONSTANTS.DX_S2: is_support = False LOG.warning('%s is not supported for DX S2, ' 'Skip this process.', func_name) return is_support @staticmethod def _get_qos_category_by_value(key, value): """Get qos category using value.""" LOG.debug('_get_qos_category_by_value, ' 'key: %(key)s, value: %(value)s.', {'key': key, 'value': value}) ret = 0 # Log error method. def _get_qos_category_by_value_error(): """Input value is invalid, log error and raise exception.""" msg = (_('_get_qos_category_by_value, ' 'Invalid value is input, ' 'key: %(key)s, ' 'value: %(value)s.') % {'key': key, 'value': value}) LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) if key == "maxBWS": try: digit = int(float(value)) except Exception: _get_qos_category_by_value_error() if digit >= 800: ret = 1 elif digit >= 700: ret = 2 elif digit >= 600: ret = 3 elif digit >= 500: ret = 4 elif digit >= 400: ret = 5 elif digit >= 300: ret = 6 elif digit >= 200: ret = 7 elif digit >= 100: ret = 8 elif digit >= 70: ret = 9 elif digit >= 40: ret = 10 elif digit >= 25: ret = 11 elif digit >= 20: ret = 12 elif digit >= 15: ret = 13 elif digit >= 10: ret = 14 elif digit > 0: ret = 15 else: _get_qos_category_by_value_error() LOG.debug('_get_qos_category_by_value (%s).', ret) category_dict = {} if ret > 0: category_dict = {'bandwidth-limit': ret} return category_dict def _get_param(self, qos_specs_dict): # Get all keys which have been set and its value. LOG.debug('_get_param, ' 'qos_specs_dict: %(qos_specs_dict)s.', {'qos_specs_dict': qos_specs_dict}) key_dict = {} unsupport = [] for key, value in qos_specs_dict.items(): if key in CONSTANTS.FJ_QOS_KEY_list: msg = (_('_get_param, Can not support QoS ' 'parameter "%(key)s" on firmware version ' 'V11L30-0000 or above.') % {'key': key}) LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) if key in CONSTANTS.FJ_QOS_KEY_BYTES_list: key_dict[key] = self._check_throughput(key, value) # Example: When "read_bytes_sec" is specified, # the corresponding "read_iops_sec" also needs to be specified. # If not, it is specified as the maximum. iopsStr = key.replace('bytes', 'iops') if iopsStr not in qos_specs_dict.keys(): key_dict[iopsStr] = CONSTANTS.MAX_IOPS elif key in CONSTANTS.FJ_QOS_KEY_IOPS_list: key_dict[key] = self._check_iops(key, value) # If can not get the corresponding bytes, # the bytes is set to the maximum value. throughputStr = key.replace('iops', 'bytes') if throughputStr not in qos_specs_dict.keys(): key_dict[throughputStr] = CONSTANTS.MAX_THROUGHPUT else: unsupport.append(key) if unsupport: LOG.warning('_get_param, ' 'Can not support QoS parameter "%s".', unsupport) return key_dict def _check_iops(self, key, value): """Check input value of IOPS.""" LOG.debug('_check_iops, key: %(key)s, value: %(value)s.', {'key': key, 'value': value}) value = int(float(value)) if value < CONSTANTS.MIN_IOPS or value > CONSTANTS.MAX_IOPS: msg = (_('_check_iops, ' '%(key)s is out of range.') % {'key': key}) LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) return value def _check_throughput(self, key, value): LOG.debug('_check_throughput, key: %(key)s, value: %(value)s.', {'key': key, 'value': value}) value = float(value) / units.Mi if (value < CONSTANTS.MIN_THROUGHPUT or value > CONSTANTS.MAX_THROUGHPUT): msg = (_('_check_throughput, ' '%(key)s is out of range.') % {'key': key}) LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) return int(value) def _get_qos_category(self, key_dict): """Get qos category by parameters according to the specific volume.""" LOG.debug('_get_qos_category, ' 'key_dict: %(key_dict)s.', {'key_dict': key_dict}) # Get all the bandwidth limits. rc, errordesc, bandwidthlist = self._exec_eternus_cli( 'show_qos_bandwidth_limit') if rc != CONSTANTS.RC_OK: msg = (_('_get_qos_category, ' 'show_qos_bandwidth_limit failed. ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'Message: %(clidata)s.') % {'rc': rc, 'errordesc': errordesc, 'clidata': bandwidthlist}) LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) ret_dict = {} for bw in bandwidthlist: if 'total_iops_sec' in key_dict.keys(): if (bw['total_iops_sec'] == key_dict['total_iops_sec'] and bw['total_bytes_sec'] == key_dict['total_bytes_sec']): ret_dict['bandwidth-limit'] = bw['total_limit'] if 'read_iops_sec' in key_dict.keys(): if (bw['read_iops_sec'] == key_dict['read_iops_sec'] and bw['read_bytes_sec'] == key_dict['read_bytes_sec']): ret_dict['read-bandwidth-limit'] = bw['read_limit'] if 'write_iops_sec' in key_dict.keys(): if (bw['write_iops_sec'] == key_dict['write_iops_sec'] and bw['write_bytes_sec'] == key_dict['write_bytes_sec']): ret_dict['write-bandwidth-limit'] = bw['write_limit'] # If find all available pairs. # len(key_dict) must be 2, 4 or 6 if len(key_dict) / 2 == len(ret_dict): return ret_dict rc, errordesc, vqosdatalist = self._exec_eternus_cli('show_volume_qos') if rc != CONSTANTS.RC_OK: msg = (_('_get_qos_category, ' 'show_volume_qos failed. ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'Message: %(clidata)s.') % {'rc': rc, 'errordesc': errordesc, 'clidata': vqosdatalist}) LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) # Get used total/read/write bandwidth limit. totalusedlimits = set() readusedlimits = set() writeusedlimits = set() for vqos in vqosdatalist: totalusedlimits.add(vqos['total_limit']) readusedlimits.add(vqos['read_limit']) writeusedlimits.add(vqos['write_limit']) # Get unused total/read/write bandwidth limit. totalunusedlimits = list(set(range(1, 16)) - totalusedlimits) readunusedlimits = list(set(range(1, 16)) - readusedlimits) writeunusedlimits = list(set(range(1, 16)) - writeusedlimits) # If there is no same couple, set new qos bandwidth limit. if 'total_iops_sec' in key_dict.keys(): if 'bandwidth-limit' not in ret_dict.keys(): if len(totalunusedlimits) == 0: msg = _('_get_qos_category, ' 'There is no available total bandwidth limit.') LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) else: self._set_limit('volume-qos', totalunusedlimits[0], key_dict['total_iops_sec'], key_dict['total_bytes_sec']) ret_dict['bandwidth-limit'] = totalunusedlimits[0] else: ret_dict['bandwidth-limit'] = 0 if 'read_iops_sec' in key_dict.keys(): if 'read-bandwidth-limit' not in ret_dict.keys(): if len(readunusedlimits) == 0: msg = _('_get_qos_category, ' 'There is no available read bandwidth limit.') LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) else: self._set_limit('volume-qos-read', readunusedlimits[0], key_dict['read_iops_sec'], key_dict['read_bytes_sec']) ret_dict['read-bandwidth-limit'] = readunusedlimits[0] else: ret_dict['read-bandwidth-limit'] = 0 if 'write_bytes_sec' in key_dict.keys(): if 'write-bandwidth-limit' not in ret_dict.keys(): if len(writeunusedlimits) == 0: msg = _('_get_qos_category, ' 'There is no available write bandwidth limit.') LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) else: self._set_limit('volume-qos-write', writeunusedlimits[0], key_dict['write_iops_sec'], key_dict['write_bytes_sec']) ret_dict['write-bandwidth-limit'] = writeunusedlimits[0] else: ret_dict['write-bandwidth-limit'] = 0 return ret_dict def _set_limit(self, mode, limit, iops, throughput): """Register a new qos scheme at the specified bandwidth""" LOG.debug('_set_limit, mode: %(mode)s, ' 'limit: %(limit)s, iops:%(iops)s, ' 'throughput: %(throughput)s.', {'mode': mode, 'limit': limit, 'iops': iops, 'throughput': throughput}) param_dict = ({'mode': mode, 'bandwidth-limit': limit, 'iops': iops, 'throughput': throughput}) rc, emsg, clidata = self._exec_eternus_cli( 'set_qos_bandwidth_limit', **param_dict) if rc != CONSTANTS.RC_OK: msg = (_('_set_limit, ' 'set_qos_bandwidth_limit failed. ' 'Return code: %(rc)lu, ' 'Error: %(errormsg)s, ' 'Message: %(clidata)s.') % {'rc': rc, 'errormsg': emsg, 'clidata': clidata}) LOG.warning(msg) raise exception.VolumeBackendAPIException(data=msg) def revert_to_snapshot(self, volume, snapshot): """Revert volume to snapshot.""" LOG.debug('revert_to_snapshot, Enter method, ' 'volume id: %(vid)s, ' 'snapshot id: %(sid)s. ', {'vid': volume['id'], 'sid': snapshot['id']}) vol_instance = self._find_lun(volume) sdv_instance = self._find_lun(snapshot) volume_no = self._get_volume_number(vol_instance) snapshot_no = self._get_volume_number(sdv_instance) # Check the existence of volume. if not vol_instance: msg = (_('revert_to_snapshot, ' 'source volume not found on ETERNUS, ' 'volume: %(volume)s. ') % {'volume': volume}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Check the existence of sdv. if not sdv_instance: msg = (_('revert_to_snapshot, ' 'snapshot volume not found on ETERNUS. ' 'snapshot: %(snapshot)s. ') % {'snapshot': snapshot}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) sdvsession = None cpsessionlist = self._find_copysession(vol_instance) LOG.debug('revert_to_snapshot, ' 'cpsessionlist: %(cpsessionlist)s. ', {'cpsessionlist': cpsessionlist}) for cpsession in cpsessionlist: if (cpsession['SystemElement'].keybindings.get('DeviceID') == vol_instance.path.keybindings.get('DeviceID')): if (cpsession['SyncedElement'].keybindings.get('DeviceID') == sdv_instance.path.keybindings.get('DeviceID')): sdvsession = cpsession break if sdvsession: LOG.debug('revert_to_snapshot, ' 'sdvsession: %(sdvsession)s. ', {'sdvsession': sdvsession}) repservice = self._find_eternus_service( "FUJITSU_ReplicationService") if repservice is None: msg = _('revert_to_snapshot, ' 'Replication Service not found. ') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Invoke method for revert to snapshot rc, errordesc, job = self._exec_eternus_service( 'ModifyReplicaSynchronization', repservice, Operation=self._pywbem_uint(15, '16'), WaitForCopyState=self._pywbem_uint(8, '16'), Synchronization=sdvsession) if rc != CONSTANTS.RC_OK: msg = (_('revert_to_snapshot, ' '_exec_eternus_service error, ' 'volume: %(volume)s, ' 'Return code: %(rc)lu, ' 'Error: %(errordesc)s, ' 'Message: %(job)s.') % {'volume': volume['id'], 'rc': rc, 'errordesc': errordesc, 'job': job}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: LOG.debug('revert_to_snapshot, ' 'successfully. ') else: is_find = False cp_session_list = self._get_copy_sessions_list() for cp in cp_session_list: if (cp['Source Num'] == int(volume_no, 16) and cp['Dest Num'] == int(snapshot_no, 16) and cp['Type'] == 'Snap'): is_find = True break if is_find is True: param_dict = ( {'source-volume-number': int(snapshot_no, 16), 'destination-volume-number': int(volume_no, 16)}) rc, emsg, clidata = self._exec_eternus_cli( 'start_copy_opc', **param_dict) if rc != CONSTANTS.RC_OK: msg = (_('revert_to_snapshot, ' 'start_copy_opc failed. ' 'Return code: %(rc)lu, ' 'Error: %(errormsg)s, ' 'Message: %(clidata)s.') % {'rc': rc, 'errormsg': emsg, 'clidata': clidata}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: msg = (_('revert_to_snapshot, ' 'snapshot volume not found on ETERNUS. ' 'snapshot: %(snapshot)s. ') % {'snapshot': snapshot}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('revert_to_snapshot, Exit method. ') def _get_copy_sessions_list(self, **param): """Get copy sessions list.""" LOG.debug('_get_copy_sessions_list, Enter method.') rc, emsg, clidata = self._exec_eternus_cli( 'show_copy_sessions', **param ) if rc != CONSTANTS.RC_OK: msg = (_('_get_copy_sessions_list, ' 'get copy sessions failed. ' 'Return code: %(rc)lu, ' 'Error: %(emsg)s, ' 'Message: %(clidata)s.') % {'rc': rc, 'emsg': emsg, 'clidata': clidata}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_get_copy_sessions_list, Exit method, ' 'copy sessions list: %(clidata)s. ', {'clidata': clidata}) return clidata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_fc.py0000664000175000017500000001640400000000000026400 0ustar00zuulzuul00000000000000# Copyright (c) 2015 FUJITSU LIMITED # Copyright (c) 2012 EMC Corporation. # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ FibreChannel Cinder Volume driver for Fujitsu ETERNUS DX S3 series. """ from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.fujitsu.eternus_dx import eternus_dx_common from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @interface.volumedriver class FJDXFCDriver(driver.FibreChannelDriver): """FC Cinder Volume Driver for Fujitsu ETERNUS DX S3 series.""" # ThirdPartySystems wiki page CI_WIKI_NAME = "Fujitsu_ETERNUS_CI" VERSION = eternus_dx_common.FJDXCommon.VERSION def __init__(self, *args, **kwargs): super(FJDXFCDriver, self).__init__(*args, **kwargs) self.common = eternus_dx_common.FJDXCommon( 'fc', configuration=self.configuration) self.VERSION = self.common.VERSION @staticmethod def get_driver_options(): return eternus_dx_common.FJDXCommon.get_driver_options() def check_for_setup_error(self): if not self.common.pywbemAvailable: msg = _('pywbem could not be imported! ' 'pywbem is necessary for this volume driver.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_volume(self, volume): """Create volume.""" model_update = self.common.create_volume(volume) return model_update def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" location, metadata = ( self.common.create_volume_from_snapshot(volume, snapshot)) v_metadata = self._get_metadata(volume) metadata.update(v_metadata) return {'provider_location': str(location), 'metadata': metadata} def create_cloned_volume(self, volume, src_vref): """Create cloned volume.""" location, metadata = ( self.common.create_cloned_volume(volume, src_vref)) v_metadata = self._get_metadata(volume) metadata.update(v_metadata) return {'provider_location': str(location), 'metadata': metadata} def delete_volume(self, volume): """Delete volume on ETERNUS.""" LOG.debug('delete_volume, ' 'volume id: %s, Enter method.', volume['id']) self.common.delete_volume(volume) LOG.debug('delete_volume, ' 'volume id: %s, delete succeed.', volume['id']) def create_snapshot(self, snapshot): """Creates a snapshot.""" LOG.debug('create_snapshot, ' 'snap id: %(sid)s, volume id: %(vid)s, Enter method.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) model_update = self.common.create_snapshot(snapshot) LOG.debug('create_snapshot, info: %s, Exit method.', model_update['metadata']) return model_update def delete_snapshot(self, snapshot): """Deletes a snapshot.""" self.common.delete_snapshot(snapshot) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" return def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" return def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" return def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" info = self.common.initialize_connection(volume, connector) data = info['data'] init_tgt_map = ( self.common.build_fc_init_tgt_map(connector, data['target_wwn'])) data['initiator_target_map'] = init_tgt_map info['data'] = data fczm_utils.add_fc_zone(info) return info def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" self.common.terminate_connection(volume, connector) info = {'driver_volume_type': 'fibre_channel', 'data': {}} if connector: attached = self.common.check_attached_volume_in_zone(connector) if not attached: # No more volumes attached to the host init_tgt_map = self.common.build_fc_init_tgt_map(connector) info['data'] = {'initiator_target_map': init_tgt_map} fczm_utils.remove_fc_zone(info) return info def get_volume_stats(self, refresh=False): """Get volume stats.""" LOG.debug('get_volume_stats, refresh: %s, Enter method.', refresh) pool_name = None if refresh: data, pool_name = self.common.update_volume_stats() backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or 'FJDXFCDriver' data['storage_protocol'] = constants.FC self._stats = data LOG.debug('get_volume_stats, ' 'pool name: %s, Exit method.', pool_name) return self._stats def extend_volume(self, volume, new_size): """Extend volume.""" LOG.debug('extend_volume, ' 'volume id: %s, Enter method.', volume['id']) used_pool_name = self.common.extend_volume(volume, new_size) LOG.debug('extend_volume, ' 'used pool name: %s, Exit method.', used_pool_name) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Update migrated volume.""" LOG.debug('update_migrated_volume, ' 'source volume id: %(s_id)s, ' 'target volume id: %(t_id)s, Enter method.', {'s_id': volume['id'], 't_id': new_volume['id']}) model_update = self.common.update_migrated_volume( ctxt, volume, new_volume) LOG.debug('update_migrated_volume, ' 'target volume meta: %s, Exit method.', model_update) return model_update def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot.""" return self.common.revert_to_snapshot(volume, snapshot) def _get_metadata(self, volume): v_metadata = volume.get('volume_metadata') if v_metadata: ret = {data['key']: data['value'] for data in v_metadata} else: ret = volume.get('metadata', {}) return ret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_iscsi.py0000664000175000017500000001516700000000000027127 0ustar00zuulzuul00000000000000# Copyright (c) 2015 FUJITSU LIMITED # Copyright (c) 2012 EMC Corporation. # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """iSCSI Cinder Volume driver for Fujitsu ETERNUS DX S3 series.""" from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.fujitsu.eternus_dx import eternus_dx_common LOG = logging.getLogger(__name__) @interface.volumedriver class FJDXISCSIDriver(driver.ISCSIDriver): """iSCSI Cinder Volume Driver for Fujitsu ETERNUS DX S3 series.""" # ThirdPartySystems wiki page CI_WIKI_NAME = "Fujitsu_ETERNUS_CI" VERSION = eternus_dx_common.FJDXCommon.VERSION def __init__(self, *args, **kwargs): super(FJDXISCSIDriver, self).__init__(*args, **kwargs) self.common = eternus_dx_common.FJDXCommon( 'iSCSI', configuration=self.configuration) self.VERSION = self.common.VERSION @staticmethod def get_driver_options(): return eternus_dx_common.FJDXCommon.get_driver_options() def check_for_setup_error(self): if not self.common.pywbemAvailable: msg = _('pywbem could not be imported! ' 'pywbem is necessary for this volume driver.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_volume(self, volume): """Create volume.""" model_update = self.common.create_volume(volume) return model_update def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" element_path, metadata = ( self.common.create_volume_from_snapshot(volume, snapshot)) v_metadata = volume.get('volume_metadata') if v_metadata: for data in v_metadata: metadata[data['key']] = data['value'] else: v_metadata = volume.get('metadata', {}) metadata.update(v_metadata) return {'provider_location': str(element_path), 'metadata': metadata} def create_cloned_volume(self, volume, src_vref): """Create cloned volume.""" element_path, metadata = ( self.common.create_cloned_volume(volume, src_vref)) v_metadata = volume.get('volume_metadata') if v_metadata: for data in v_metadata: metadata[data['key']] = data['value'] else: v_metadata = volume.get('metadata', {}) metadata.update(v_metadata) return {'provider_location': str(element_path), 'metadata': metadata} def delete_volume(self, volume): """Delete volume on ETERNUS.""" LOG.debug('delete_volume, ' 'volume id: %s, Enter method.', volume['id']) self.common.delete_volume(volume) LOG.debug('delete_volume, ' 'volume id: %s, delete succeed.', volume['id']) def create_snapshot(self, snapshot): """Creates a snapshot.""" LOG.debug('create_snapshot, ' 'snap id: %(sid)s, volume id: %(vid)s, Enter method.', {'sid': snapshot['id'], 'vid': snapshot['volume_id']}) model_update = self.common.create_snapshot(snapshot) LOG.debug('create_snapshot, info: %s, Exit method.', model_update['metadata']) return model_update def delete_snapshot(self, snapshot): """Deletes a snapshot.""" self.common.delete_snapshot(snapshot) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" return def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" return def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" return def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" info = self.common.initialize_connection(volume, connector) return info def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" self.common.terminate_connection(volume, connector) def get_volume_stats(self, refresh=False): """Get volume stats.""" LOG.debug('get_volume_stats, refresh: %s, Enter method.', refresh) pool_name = None if refresh: data, pool_name = self.common.update_volume_stats() backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or 'FJDXISCSIDriver' data['storage_protocol'] = constants.ISCSI self._stats = data LOG.debug('get_volume_stats, ' 'pool name: %s, Exit method.', pool_name) return self._stats def extend_volume(self, volume, new_size): """Extend volume.""" LOG.debug('extend_volume, ' 'volume id: %s, Enter method.', volume['id']) used_pool_name = self.common.extend_volume(volume, new_size) LOG.debug('extend_volume, ' 'used pool name: %s, Exit method.', used_pool_name) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Update migrated volume.""" LOG.debug('update_migrated_volume, ' 'source volume id: %(s_id)s, ' 'target volume id: %(t_id)s, Enter method.', {'s_id': volume['id'], 't_id': new_volume['id']}) model_update = self.common.update_migrated_volume( ctxt, volume, new_volume) LOG.debug('update_migrated_volume, ' 'target volume meta: %s, Exit method.', model_update) return model_update def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot.""" return self.common.revert_to_snapshot(volume, snapshot) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3511207 cinder-27.0.0/cinder/volume/drivers/fungible/0000775000175000017500000000000000000000000021113 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fungible/__init__.py0000664000175000017500000000000000000000000023212 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fungible/constants.py0000664000175000017500000000356300000000000023510 0ustar00zuulzuul00000000000000# (c) Copyright 2022 Fungible, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Define all constants required for fungible driver """ # API constants VERSION = '1.0.0' STATIC_URL = '/FunCC/v1' # Volume type constants VOLUME_TYPE_EC = 'VOL_TYPE_BLK_EC' VOLUME_TYPE_REPLICA = 'VOL_TYPE_BLK_REPLICA' VOLUME_TYPE_RAW = 'VOL_TYPE_BLK_LOCAL_THIN' VOLUME_TYPE_RF1 = 'VOL_TYPE_BLK_RF1' # General constants FALSE = 'false' TRUE = 'true' BOOLEAN = [TRUE, FALSE] BYTES_PER_GIB = 1073741824 FSC_IOPS_IMG_MIG = "iops_for_image_migration" # Extra specs constants FSC_QOS_BAND = 'fungible:qos_band' FSC_SPACE_ALLOCATION_POLICY = 'fungible:space_allocation_policy' FSC_COMPRESSION = 'fungible:compression' FSC_EC_SCHEME = 'fungible:ec_scheme' FSC_SNAPSHOTS = "fungible:snapshots" FSC_KMIP_SECRET_KEY = 'fungible:kmip_secret_key' FSC_VOL_TYPE = 'fungible:vol_type' FSC_BLK_SIZE = "fungible:block_size" FSC_FD_IDS = 'fungible:fault_domain_ids' FSC_FD_OP = 'fungible:fd_op' BLOCK_SIZE_4K = '4096' BLOCK_SIZE_8K = '8192' BLOCK_SIZE_16K = '16384' BLOCK_SIZE = [BLOCK_SIZE_4K, BLOCK_SIZE_8K, BLOCK_SIZE_16K] FSC_FD_OPS = ['SUGGESTED_FD_IDS', 'EXCLUDE_FD_IDS', 'ASSIGNED_FD_ID'] SPACE_ALLOCATION_POLICY = ['balanced', 'write_optimized', 'capacity_optimized'] EC_8_2 = '8_2' EC_4_2 = '4_2' EC_2_1 = '2_1' QOS_BAND = { 'gold': 0, 'silver': 1, 'bronze': 2 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fungible/driver.py0000664000175000017500000013150500000000000022765 0ustar00zuulzuul00000000000000# (c) Copyright 2022 Fungible, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Fungible Storage Cluster""" import json import os import time from oslo_config import cfg from oslo_log import log from oslo_utils import excutils from cinder.common import constants as cinderconstants from cinder import context from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.objects import fields from cinder import utils from cinder.volume import driver from cinder.volume.drivers.fungible import constants from cinder.volume.drivers.fungible import rest_client as rest_api from cinder.volume.drivers.fungible import swagger_api_client as swagger_client from cinder.volume.drivers.san import san from cinder.volume import volume_types from cinder.volume import volume_utils LOG = log.getLogger(__name__) fungible_opts = [ cfg.PortOpt('nvme_connect_port', default=4420, help='The port number to be used' ' when doing nvme connect from host'), cfg.BoolOpt('api_enable_ssl', default=True, help='Specify whether to use SSL' ' or not when accessing the composer APIs'), cfg.IntOpt('iops_for_image_migration', default=250000, help='Maximum read IOPS that volume can get' ' when reading data from the volume during' ' host assisted migration'), cfg.IntOpt('fsc_clone_volume_timeout', default=1800, help='Create clone volume timeout in seconds') ] CONF = cfg.CONF CONF.register_opts(fungible_opts) @interface.volumedriver class FungibleDriver(driver.BaseVD): """Fungible Storage driver Fungible driver is a volume driver for Fungible Storage. Version history: 1.0.0 - First source driver version """ VERSION = constants.VERSION CI_WIKI_NAME = "Fungible_Storage_CI" def __init__(self, *args, **kwargs): """Initialize the driver.""" super(FungibleDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(fungible_opts) self.rest_client = None self.use_multipath = True def do_setup(self, context): """Initial setup of driver variables""" self.rest_client = rest_api.RestClient(self.configuration) self.rest_client.do_setup() @staticmethod def get_driver_options(): additional_opts = driver.BaseVD._get_oslo_driver_opts( "san_ip", "san_login", "san_password", "san_api_port" ) return fungible_opts + additional_opts @staticmethod def wait_for_device(device_path): time.sleep(1) # wait for a second time_to_wait = 4 # 4 seconds time_counter = 0 while not os.path.exists(device_path): time.sleep(1) # wait for a second time_counter += 1 if time_counter > time_to_wait: break def check_for_setup_error(self): """Verify that requirements are in place to use Fungible Storage Backend. """ try: # backend call for health check fungible_res = self.rest_client.check_for_setup_error() if fungible_res["status"]: LOG.info( "Backend Storage Api Status is %(message)s", {'message': fungible_res['message']}) else: LOG.error( "Backend api status is : %(status)s", {'status': fungible_res['status']}) raise exception.VolumeBackendAPIException( data=_( "Backend Storage Api Status is " "%(message)s, Error Message: %(err_msg)s)") % { "message": fungible_res["message"], "err_msg": fungible_res["error_message"] } ) except swagger_client.ApiException as e: LOG.error( "[check_for_setup_error]Request to BackendApi Failed -> %s", e.body ) error = json.loads(e.body) raise exception.VolumeBackendAPIException( data=_( "Failed to get backend api status, " "error message: %(error)s." % {'error': error['error_message']} ) ) except Exception as e: LOG.error("[check_for_setup_error]Error occurred: %s", e) raise exception.VolumeBackendAPIException( data=_( "Failed to get backend api response: %(args)s" % { 'args': e.args } ) ) @staticmethod def _get_volume_type_extra_specs(self, volume): """Get the Volume type using volume_type_id :param: volume object :return: volume type & extra specs """ specs = {} vol_type = "" ctxt = context.get_admin_context() type_id = volume["volume_type_id"] if type_id: LOG.debug("[_get_volume_type_extra_specs]type_id=%s", type_id) # get volume type name by volume type id volume_type = volume_types.get_volume_type(ctxt, type_id) LOG.debug("[_get_volume_type_extra_specs]volume_type=%s", volume_type) specs = volume_type.get("extra_specs") if constants.FSC_VOL_TYPE in specs: vol_type = volume_type.get( "extra_specs").get(constants.FSC_VOL_TYPE) else: error_msg = ( "Key %(type)s was not found in extraspecs" % { 'type': constants.FSC_VOL_TYPE } ) LOG.error("[create_volume]Error occurred: %s", error_msg) raise exception.VolumeBackendAPIException( data=_( "Failed to create volume %(display_name)s: " "%(error)s." % {'error': error_msg, 'display_name': volume.display_name} ) ) for key, value in specs.items(): specs[key] = value return specs, vol_type def _get_dpu_enabled_host_list(self, ports): host_uuid_list = list( map(lambda port: port["host_uuid"], ports.values())) hosts = self.rest_client.get_hosts_subset(host_uuid_list) hosts_fac_enabled = {host["host_uuid"]: host["fac_enabled"] for host in hosts} return hosts_fac_enabled def create_volume(self, volume): """Create volume on Fungible storage backend. :param volume: volume to be created :return: volume model updates """ fungible_specs = {} volume_type = "" if "volume_type_id" in volume: fungible_specs, volume_type = self._get_volume_type_extra_specs( self, volume ) # request fungible to create a volume try: fungible_res = self.rest_client.create_volume( volume, fungible_specs, volume_type ) provider_id = fungible_res["data"]["uuid"] # preparing model updates dict to return model_updates = {"provider_id": provider_id, "size": volume["size"]} LOG.info( "Volume created successfully %s. " "Volume size: %s. ", volume['id'], volume["size"] ) return model_updates except swagger_client.ApiException as e: LOG.error( "[create_volume]Request to BackendApi Failed -> %s", e.body) error = json.loads(e.body) raise exception.VolumeBackendAPIException( data=_( "Failed to create volume %(display_name)s: " "%(error)s." % {'error': error['error_message'], 'display_name': volume['display_name']} ) ) except Exception as e: LOG.error("[create_volume]Error occurred: %s", e) raise exception.VolumeBackendAPIException( data=_( "Failed to create volume %(name)s: %(args)s" % { 'name': volume['display_name'], 'args': e.args } ) ) def create_volume_from_snapshot(self, volume, snapshot): """Create Volume on Fungible storage backend Args: volume: volume to be created snapshot: source snapshot from which the volume to be created Returns:: volume model updates """ volume_type = "" fungible_specs = {} if "volume_type_id" in volume: fungible_specs, volume_type = self._get_volume_type_extra_specs( self, volume ) # request fungible to create a volume try: fungible_res = self.rest_client.create_volume( volume, fungible_specs, volume_type, snapshot ) provider_id = fungible_res["data"]["uuid"] # preparing model updates dict to return model_updates = {"provider_id": provider_id, "size": volume["size"]} LOG.info( "Volume created from snapshot successfully with volume " "ID: %s. Volume size: %s. ", volume['id'], volume['size'] ) return model_updates except swagger_client.ApiException as e: LOG.error( "[create_volume_from_snapshot]Request to BackendApi " "Failed -> %s", e.body ) error = json.loads(e.body) raise exception.VolumeBackendAPIException( data=_( "Failed to create volume from snapshot with volume " "ID: %(name)s: %(error)s." % {'name': volume['display_name'], 'error': error['error_message']} ) ) except Exception as e: LOG.error("[create_volume_from_snapshot]Error occurred: %s", e) raise exception.VolumeBackendAPIException( data=_( "Failed to create volume %(name)s: %(args)s" % { 'name': volume['display_name'], 'args': e.args } ) ) def delete_volume(self, volume): """Delete the available volume :param volume: volume to be deleted :return: none """ LOG.info("Request to delete volume : %s.", volume['id']) if "provider_id" in volume: if volume["provider_id"]: # request fungible to delete volume try: del_res = self.rest_client.delete_volume( volume["provider_id"]) LOG.info("Volume delete : %s.", del_res['message']) except swagger_client.ApiException as e: LOG.error( "[delete_volume]Request to BackendApi Failed -> %s", e.body ) error = json.loads(e.body) raise exception.VolumeBackendAPIException( data=_( "Failed to delete volume " "{volume['display_name']}: " "%(error)s." % {'error': error['error_message']} ) ) except Exception as e: LOG.error("[delete_volume]Error occurred: %s", e) raise exception.VolumeBackendAPIException( data=_( "Failed to delete volume %(name)s: %(args)s" % { 'name': volume['display_name'], 'args': e.args } ) ) else: LOG.info("Volume backend UUID not found in volume details.") else: raise exception.VolumeBackendAPIException( data=_("Failed to delete volume: %s." % volume["id"]) ) def create_cloned_volume(self, volume, src_vref): """Create volume from volume :param volume: volume to be created :param src_vref: source volume :return: volume model updates Logic: 1. create new volume. 2. add copy volume task. 3. in loop check for task status 4. delete volume copy task """ snapshot_id = None try: src_volume_uuid = src_vref["provider_id"] # create a snapshot to copy the data from fungible_res = self.rest_client.create_snapshot( src_volume_uuid, src_volume_uuid ) snapshot_id = fungible_res["data"]["uuid"] # create new volume. new_volume = self.create_volume(volume) new_volume_uuid = new_volume.get("provider_id") LOG.info( "[clone_volume] new volume is created." " volume uuid: %s", new_volume_uuid ) # prepare response to return model_updates = {"provider_id": new_volume_uuid, "size": volume["size"]} # add task to copy volume add_task_response = self.rest_client.copy_volume( new_volume_uuid, snapshot_id ) # check task status in loop task_uuid = add_task_response["data"]["task_uuid"] LOG.info( "[clone_volume] Copy volume task is added. task_uuid: %s", task_uuid ) status = "RUNNING" error_message = "" sleep_for_seconds = 1 while status == "RUNNING": # Wait before checking for the task status # This is done to reduce number of api calls to backend # Wait time is increased exponentially to a maximum of 8 secs time.sleep(sleep_for_seconds) if sleep_for_seconds < 8: sleep_for_seconds = sleep_for_seconds * 2 task_response = self.rest_client.get_volume_copy_task( task_uuid) status = task_response["data"]["task_state"] error_message = task_response.get("error_message") LOG.info( "[clone_volume] Copy volume task with task_uuid:" " %s is complete. status: %s", task_uuid, status ) # delete the snapshot created for data copy if snapshot_id: fungible_res = self.rest_client.delete_snapshot( snapshot_id ) snapshot_id = None LOG.info( "Snapshot deleted successfully: %s.", fungible_res['message'] ) if status == "FAILED": # Delete the new volume created since the data copy failed del_res = self.rest_client.delete_volume(new_volume_uuid) LOG.info("Volume delete : %s.", del_res['message']) raise exception.VolumeBackendAPIException( data=_( "Failed to create new volume %(new_volume_uuid)s: " "from source volume %(src_volume_uuid)s %(error)s." % { 'new_volume_uuid': new_volume_uuid, 'src_volume_uuid': src_volume_uuid, 'error': error_message } ) ) try: self.rest_client.delete_volume_copy_task(task_uuid) except swagger_client.ApiException as e: # Just log warning as volume copy is already completed. LOG.warning( "[clone_volume] request to delete task %s" " to BackendApi Failed " "-> %s", task_uuid, e.body ) except swagger_client.ApiException as e: LOG.error("[clone_volume] request to BackendApi Failed. %s", e.body) error = json.loads(e.body) # delete the snapshot created for data copy if snapshot_id: fungible_res = self.rest_client.delete_snapshot( snapshot_id ) snapshot_id = None LOG.info( "Snapshot deleted successfully: %s.", fungible_res['message'] ) raise exception.VolumeBackendAPIException( data=_( "Failed to create new volume %(new_volume_uuid)s: " "from source volume %(src_volume_uuid)s %(error)s." % { 'new_volume_uuid': new_volume_uuid, 'src_volume_uuid': src_volume_uuid, 'error': error['error_message'] } ) ) except Exception as e: # delete the snapshot created for data copy if snapshot_id: fungible_res = self.rest_client.delete_snapshot( snapshot_id ) snapshot_id = None LOG.info( "Snapshot deleted successfully: %s.", fungible_res['message'] ) LOG.error("[create_clone_volume]Error occurred: %s", e) raise exception.VolumeBackendAPIException( data=_( "Failed to create volume %(name)s: %(args)s" % { 'name': volume['display_name'], 'args': e.args } ) ) return model_updates def ensure_export(self, context, volume): pass def create_export(self, context, volume, connector): pass def remove_export(self, context, volume): pass def initialize_connection(self, volume, connector): """Initialize connection and return connection info. :param volume: the volume object :param connector: the connector object :return: connection info dict """ # check for nqn in connector host_nqn = connector.get("nqn") if not host_nqn: host_name = connector.get("host") if host_name: host_nqn = "nqn.2015-09.com.host:" + host_name if not host_nqn: raise exception.VolumeBackendAPIException( data=_("initialize_connection error: no host nqn available!") ) provider_id = volume.get("provider_id") LOG.info("initialize_connection - provider_id=%s", provider_id) if not provider_id: raise exception.VolumeBackendAPIException( data=_("initialize_connection error: no uuid available!") ) try: img_mig_iops = False # high iops set to true when volume is uploading to image # or downloading from image if constants.FSC_IOPS_IMG_MIG in connector: img_mig_iops = connector.get(constants.FSC_IOPS_IMG_MIG) # high iops set to true when volume is migrating mig_status = [ fields.VolumeMigrationStatus.SUCCESS, ] if volume.get("migration_status") is not None: if volume.get("migration_status") not in mig_status: img_mig_iops = True # get host_uuid from the host_nqn LOG.info("initialize_connection - host_nqn=%s", host_nqn) host_uuid = self.rest_client.get_host_uuid_from_host_nqn(host_nqn) # create host if it does not exists if host_uuid is None: host_create_response = self.rest_client.create_host(host_nqn) host_uuid = host_create_response["data"]["uuid"] LOG.info("initialize_connection - host_uuid=%s", host_uuid) host = self.rest_client.get_host_details(host_uuid) # request composer to attach volume self.rest_client.attach_volume( uuid=provider_id, host_uuid=host_uuid, fac_enabled=host["fac_enabled"], iops=img_mig_iops, ) if host["fac_enabled"] is False: volume_details = self.rest_client.get_volume_detail( uuid=provider_id) target_nqn = volume_details.get("data").get("subsys_nqn") get_config_value = self.configuration.safe_get port = get_config_value("nvme_connect_port") topology_response = self.rest_client.get_topology() LOG.info( "initialize_connection - topology_response=%s", topology_response ) str_portals = [] # find primary dpu ip primary_dpu = volume_details.get("data").get("dpu") LOG.info("initialize_connection - primary_dpu=%s", primary_dpu) if primary_dpu: if topology_response["status"] is True: topology_data = topology_response.get("data") for device in topology_data.values(): for dpu in device["dpus"]: if dpu["uuid"] == primary_dpu: portal_ip = str(dpu["dataplane_ip"]) portal_port = str(port) portal_transport = "tcp" str_portals.append( ( portal_ip, portal_port, portal_transport ) ) # find secondary dpu ip secondary_dpu = volume_details.get("data").get("secy_dpu") LOG.info( "initialize_connection - secondary_dpu=%s", secondary_dpu) if secondary_dpu: if topology_response["status"] is True: topology_data = topology_response.get("data") for device in topology_data.values(): for dpu in device["dpus"]: if dpu["uuid"] == secondary_dpu: portal_ip = str(dpu["dataplane_ip"]) portal_port = str(port) portal_transport = "tcp" str_portals.append( ( portal_ip, portal_port, portal_transport ) ) # preparing connection info dict to return vol_nguid = provider_id.replace("-", "") data = { "vol_uuid": provider_id, "target_nqn": str(target_nqn), "host_nqn": host_nqn, "portals": str_portals, "volume_nguid": vol_nguid, } conn_info = {"driver_volume_type": "nvmeof", "data": data} LOG.info("initialize_connection - conn_info=%s", conn_info) else: raise exception.VolumeBackendAPIException( data=_("FAC enabled hosts are not supported") ) return conn_info except swagger_client.ApiException as e: LOG.error( "[initialize_connection]Request to BackendApi Failed -> %s", e.body ) error = json.loads(e.body) raise exception.VolumeBackendAPIException( data=_( "Failed to attach the volume %(name)s: %(error)s." % { 'name': volume.get('display_name'), 'error': error['error_message'] } ) ) except Exception as e: LOG.error("[initialize_connection]Error occurred: %s", e) raise exception.VolumeBackendAPIException( data=_( "Failed to attach volume %(name)s: %(args)s" % { 'name': volume.get('display_name'), 'args': e.args } ) ) def terminate_connection(self, volume, connector, **kwargs): """Terminate connection for detaching the port from volume. :param volume: the volume object :param connector: the connector object """ provider_id = volume.get("provider_id") LOG.info("terminate_connection - provider_id=%s", provider_id) if not provider_id: raise exception.VolumeBackendAPIException( data=_("terminate_connection error: no provider_id available.") ) try: volume_details = self.rest_client.get_volume_detail( uuid=provider_id) LOG.info("terminate_connection - volume_details=%s", volume_details) if connector is None: # None connector means force-detach # Remove all ports from backend ports = volume_details["data"]["ports"] if ports: # Get the host details for each attachment hosts_fac_enabled = self._get_dpu_enabled_host_list(ports) # request composer to detach volume for port_id in ports.keys(): if ( ports.get(port_id)["transport"] == "PCI" or not hosts_fac_enabled[ ports.get(port_id)["host_uuid"] ] ): self.rest_client.detach_volume(port_id) LOG.info("Removed all the ports from storage backend.") return host_nqn = connector.get("nqn") if not host_nqn: host_name = connector.get("host") if host_name: host_nqn = "nqn.2015-09.com.host:" + host_name if not host_nqn: raise exception.VolumeBackendAPIException( data=_("terminate_connection error: " "no host nqn available.") ) # get host_uuid from the host_nqn LOG.info("terminate_connection - host_nqn=%s", host_nqn) host_uuid = self.rest_client.get_host_uuid_from_host_nqn(host_nqn) LOG.info("terminate_connection - host_uuid=%s", host_uuid) ports = volume_details["data"]["ports"] if host_uuid and ports: port_ids = [ port for port in ports.keys() if ports.get(port)["host_uuid"] == host_uuid ] # request fungible to detach volume if port_ids: # Get the host details for each attachment hosts_fac_enabled = self._get_dpu_enabled_host_list(ports) # request composer to detach volume for port_id in port_ids: if ( ports.get(port_id)["transport"] == "PCI" or not hosts_fac_enabled[ ports.get(port_id)["host_uuid"] ] ): self.rest_client.detach_volume(port_id) LOG.info( "Volume detached successfully. \ provider_id=%s", provider_id ) else: raise exception.VolumeBackendAPIException( data=_( "terminate_connection error: " "required port is not available for detach." ) ) else: raise exception.VolumeBackendAPIException( data=_( "terminate_connection error: " "Volume not attached to any ports." ) ) except swagger_client.ApiException as e: LOG.error( "[terminate_connection]Request to BackendApi Failed -> %s", e.body ) error = json.loads(e.body) raise exception.VolumeBackendAPIException( data=_( "Failed to detach the volume " "%(name)s: %(error)s." % { 'name': volume.get('display_name'), 'error': error['error_message'] } ) ) except Exception as e: LOG.error("[terminate_connection]Error occurred: %s", e) raise exception.VolumeBackendAPIException( data=_( "Failed to detach volume %(name)s: %(args)s" % { 'name': volume.get('display_name'), 'args': e.args } ) ) def create_snapshot(self, snapshot): """Create volume snapshot on storage backend. :param snapshot: volume snapshot to be created :return: snapshot model updates """ if "provider_id" in snapshot.volume: if snapshot.volume.provider_id: try: # request fungible to create snapshot fungible_res = self.rest_client.create_snapshot( snapshot.volume.provider_id, snapshot.id ) provider_id = fungible_res["data"]["uuid"] # fungible model updates dict to return model_updates = { "provider_id": provider_id, } LOG.info( "Snapshot created successfully %s. ", snapshot.id) return model_updates except swagger_client.ApiException as e: LOG.error( "[create_snapshot]Request to BackendApi Failed -> %s", e.body ) error = json.loads(e.body) raise exception.VolumeBackendAPIException( data=_( "Failed to create the snapshot " "%(name)s: %(error)s." ) % { 'name': snapshot.display_name, 'error': error['error_message'] } ) except Exception as e: LOG.error("[create_snapshot]Error occurred: %s", e) raise exception.VolumeBackendAPIException( data=_( "Failed to create snapshot %(name)s: %(args)s" % { 'name': snapshot.display_name, 'args': e.args } ) ) else: raise exception.VolumeBackendAPIException( data=_( "Failed to create snapshot: volume provider_id " "not found in snapshot's volume details." ) ) else: raise exception.VolumeBackendAPIException( data=_( "Failed to create snapshot, volume provider_id attribute " "not found in snapshot details :%s." % snapshot.id ) ) def delete_snapshot(self, snapshot): """Delete snapshot from storage backend. :param snapshot: snapshot to be deleted """ LOG.info("Request to delete snapshot : %s.", snapshot['id']) if "provider_id" in snapshot: if snapshot["provider_id"]: try: # request fungible to delete snapshot fungible_res = self.rest_client.delete_snapshot( snapshot["provider_id"] ) LOG.info( "Snapshot deleted successfully: %s.", fungible_res['message'] ) except swagger_client.ApiException as e: LOG.error( "[delete_snapshot]Request to BackendApi Failed -> %s", e.body ) error = json.loads(e.body) raise exception.VolumeBackendAPIException( data=_( "Failed to delete the snapshot " "%(name)s: %(error)s." % { 'name': snapshot['display_name'], 'error': error['error_message'] } ) ) except Exception as e: LOG.error("[delete_snapshot]Error occurred: %s", e) raise exception.VolumeBackendAPIException( data=_( "Failed to delete snapshot %(name)s: %(args)s" % { 'name': snapshot['display_name'], 'args': e.args } ) ) else: LOG.info("Snapshot backend UUID not found in snapshot " "details.") else: raise exception.VolumeBackendAPIException( data=_( "Failed to delete snapshot, provider_id attribute " "not found in snapshot details :%s." % snapshot["id"] ) ) def extend_volume(self, volume, new_size): """Extend size of existing fungible volume. :param volume: volume to be extended :param new_size: volume size after extending """ LOG.info("Request to extend volume : %s.", volume['id']) if "provider_id" in volume: if volume["provider_id"]: try: # request fungible to extend volume self.rest_client.extend_volume( volume["provider_id"], new_size) LOG.info( "Volume %s is resized successfully", volume['id']) except swagger_client.ApiException as e: LOG.error( "[extend_volume]Request to BackendApi Failed -> %s", e.body ) error = json.loads(e.body) raise exception.VolumeBackendAPIException( data=_( "Failed to extend the volume " "%(name)s: %(error)s." % { 'name': volume.get('display_name'), 'error': error['error_message'] } ) ) except Exception as e: LOG.error("[extend_volume]Error occurred: {e}") raise exception.VolumeBackendAPIException( data=_( "Failed to extend volume %(name)s: %(args)s" % { 'name': volume.get('display_name'), 'args': e.args } ) ) else: LOG.warning( "Volume backend UUID not found in volume details.") else: raise exception.VolumeBackendAPIException( data=_( "Failed to extend volume, provider_id attribute " "not found in volume details :%s." % volume["id"] ) ) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" LOG.info( "Copy volume %s to image on " "image service %s. Image meta: %s.", volume['id'], image_service, image_meta ) use_multipath = self.configuration.use_multipath_for_image_xfer enforce_multipath = self.configuration.enforce_multipath_for_image_xfer if hasattr(utils, "brick_get_connector_properties"): properties = utils.brick_get_connector_properties( use_multipath, enforce_multipath ) else: properties = volume_utils.brick_get_connector_properties( use_multipath, enforce_multipath ) # added iops parameter in properties to # perform high iops while uploading volume to image properties[constants.FSC_IOPS_IMG_MIG] = True attach_info, volume = self._attach_volume(context, volume, properties) try: # Wait until the device path appears self.wait_for_device(attach_info["device"]["path"]) image_utils.upload_volume( context, image_service, image_meta, attach_info["device"]["path"], compress=True, ) LOG.debug( "Copy volume %s to image complete", volume['id'] ) finally: # Since attached volume was not used for writing we can force # detach it self._detach_volume( context, attach_info, volume, properties, force=True, ignore_errors=True ) def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume.""" LOG.info( "Copy image %s from image service %s " "to volume %s.", image_id, image_service, volume['id'] ) use_multipath = self.configuration.use_multipath_for_image_xfer enforce_multipath = self.configuration.enforce_multipath_for_image_xfer if hasattr(utils, "brick_get_connector_properties"): properties = utils.brick_get_connector_properties( use_multipath, enforce_multipath ) else: properties = volume_utils.brick_get_connector_properties( use_multipath, enforce_multipath ) # added iops parameter in properties to # perform high iops while downloading image to volume properties[constants.FSC_IOPS_IMG_MIG] = True attach_info, volume = self._attach_volume(context, volume, properties) try: # Wait until the device path appears self.wait_for_device(attach_info["device"]["path"]) image_utils.fetch_to_raw( context, image_service, image_id, attach_info["device"]["path"], self.configuration.volume_dd_blocksize, size=volume["size"], disable_sparse=disable_sparse, ) LOG.debug( "Copy image %s to volume %s complete", image_id, volume['id'] ) except exception.ImageTooBig: with excutils.save_and_reraise_exception(): LOG.exception( "Copying image %(image_id)s to " "volume failed due to insufficient available " "space.", {"image_id": image_id}, ) finally: self._detach_volume(context, attach_info, volume, properties, force=True) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Update volume name of new fungible volume. Original volume is renamed first since fungible does not allow multiple volumes to have same name. """ try: new_name = volume["id"] LOG.info("Rename volume from %s to %s.", new_volume['id'], new_name) LOG.info("Update backend volume name to %s", new_name) # if new volume provider id is None, # volume will not be renamed. if new_volume["provider_id"]: # if original provider id is None & volume host doesn't match, # original volume will not be renamed if volume["provider_id"] and (volume["host"] == new_volume["host"]): try: self.rest_client.rename_volume( volume["provider_id"], "migrating_" + new_name ) except swagger_client.ApiException as e: LOG.warning( "Failed to rename the original volume %s.", e.body ) else: LOG.warning( "Original volume backend UUID not found in " "volume details." ) self.rest_client.rename_volume( new_volume["provider_id"], new_name) else: LOG.warning( "New volume backend UUID not found in volume details.") return {"_name_id": None} except swagger_client.ApiException as e: LOG.error( "[update_migrated_volume]Request to BackendApi Failed -> %s", e.body ) error = json.loads(e.body) raise exception.VolumeBackendAPIException( data=_( "Failed to rename the volume %(name)s:" " %(error)s." % { 'name': volume.get('display_name'), 'error': error['error_message'] } ) ) except Exception as e: LOG.error("[update_migrated_volume]Error occurred: {e}") raise exception.VolumeBackendAPIException( data=_( "Failed to rename volume %(name)s: %(args)s" % { 'name': volume.get('display_name'), 'args': e.args } ) ) def get_volume_stats(self, refresh=False): """Get the volume stats""" data = { "volume_backend_name": self.configuration.safe_get("volume_backend_name"), "vendor_name": "Fungible Inc.", "driver_version": self.VERSION, "storage_protocol": cinderconstants.NVMEOF_TCP, "total_capacity_gb": "unknown", "free_capacity_gb": "unknown", } return data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fungible/rest_client.py0000664000175000017500000004065300000000000024010 0ustar00zuulzuul00000000000000# (c) Copyright 2022 Fungible, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder import exception from cinder.volume.drivers.fungible import constants from cinder.volume.drivers.fungible import \ swagger_api_client as swagger_client LOG = logging.getLogger(__name__) class RestClient(object): def __init__(self, configuration): """Initialize the api request fields.""" self.configuration = configuration self.rest_ip = None self.rest_port = None self.is_configured = False @staticmethod def log_error(error_msg): """Raise exception with error message""" LOG.exception(error_msg) raise exception.VolumeBackendAPIException(data=error_msg) def do_setup(self): """Initial setup of API request variables""" get_config_value = self.configuration.safe_get self.client = swagger_client.Configuration() self.client.username = get_config_value("san_login") self.client.password = get_config_value("san_password") self.rest_ip = get_config_value("san_ip") self.rest_port = get_config_value("san_api_port") protocol = "https" self.client.host = f"{protocol}://{self.rest_ip}{constants.STATIC_URL}" self.client.verify_ssl = False if not self.configuration.api_enable_ssl: protocol = "http" self.client.host = (f"{protocol}://{self.rest_ip}:{self.rest_port}" f"{constants.STATIC_URL}") LOG.info("REST server IP: %(ip)s, port: %(port)s, " "username: %(user)s.", { "ip": self.rest_ip, "port": self.rest_port, "user": self.client.username, }) self.api_storage = swagger_client.StorageApi( swagger_client.ApiClient(self.client)) self.api_gateway = swagger_client.ApigatewayApi( swagger_client.ApiClient(self.client)) self.api_topology = swagger_client.TopologyApi( swagger_client.ApiClient(self.client)) self.is_configured = True def check_for_setup_error(self): """Check status of fungible storage clusters.""" api_response = self.api_gateway.get_fc_health().to_dict() return api_response def create_volume(self, volume, fungible_specs, volume_type, snapshot=None): """Creates new volume using the specified parameters""" # Convert GB to bytes, Default 1 GB size volume_size = constants.BYTES_PER_GIB if volume['size']: volume_size = constants.BYTES_PER_GIB * volume['size'] fungible_request_obj = { "name": volume['id'], "vol_type": volume_type.upper(), "capacity": volume_size, "is_clone": False, "encrypt": False, "qos_band": constants.QOS_BAND.get('silver'), "block_size": int(constants.BLOCK_SIZE_4K) } data_protection = { "num_failed_disks": 2, "num_data_disks": 4, "num_redundant_dpus": 1, } durable_param = { "compression_effort": 2, "snap_support": True, "space_allocation_policy": 'balanced', } # Create Volume From Snapshot if snapshot is not None: fungible_request_obj["is_clone"] = True fungible_request_obj["clone_source_volume_uuid"] = \ snapshot['provider_id'] errors = [] # Validation check for Extraspecs self._validation_check(fungible_request_obj, fungible_specs, volume_type, errors, "ExtraSpecs", durable_param, data_protection) # Validation check for Metadata self._validation_check(fungible_request_obj, volume.get('metadata', {}), volume_type, errors, "Metadata", durable_param, data_protection) if len(errors) != 0: msg = "ERROR: " for error in errors: msg = msg + " | " + error self.log_error(error_msg=msg) LOG.info("create_volume: " "fungible_request_obj=%(fungible_request_obj)s", {'fungible_request_obj': fungible_request_obj}) api_response = self.api_storage.create_volume( body_volume_intent_create=fungible_request_obj).to_dict() return api_response def _validation_check(self, fungible_obj, data, volume_type, errors, prefix, durable_param, data_protection): if constants.FSC_KMIP_SECRET_KEY in data: if data[constants.FSC_KMIP_SECRET_KEY]: fungible_obj['encrypt'] = True fungible_obj['kmip_secret_key'] = data[ constants.FSC_KMIP_SECRET_KEY] if constants.FSC_BLK_SIZE in data: if data[constants.FSC_BLK_SIZE] in constants.BLOCK_SIZE: if (volume_type.upper() == constants.VOLUME_TYPE_RF1 and data[constants.FSC_BLK_SIZE] != constants.BLOCK_SIZE_16K): msg = ( f"{prefix} {constants.FSC_BLK_SIZE} value is invalid \ for the volume type specified") errors.append(msg) else: fungible_obj['block_size'] = int( data[constants.FSC_BLK_SIZE]) else: msg = (f"{prefix} {constants.FSC_BLK_SIZE} value is invalid") errors.append(msg) elif volume_type.upper() == constants.VOLUME_TYPE_RF1: # Set default block size for RF1 to 16K fungible_obj['block_size'] = int(constants.BLOCK_SIZE_16K) if constants.FSC_QOS_BAND in data: if data[constants.FSC_QOS_BAND].lower() in constants.QOS_BAND: fungible_obj['qos_band'] = constants.QOS_BAND.get( data[constants.FSC_QOS_BAND].lower()) else: msg = (f"{prefix} {constants.FSC_QOS_BAND} value is invalid") errors.append(msg) if (volume_type.upper() == constants.VOLUME_TYPE_RAW or volume_type.upper() == constants.VOLUME_TYPE_RF1): if constants.FSC_FD_IDS in data: ids = data[constants.FSC_FD_IDS].split(',', 2) if len(ids) <= 2: ids = [item.strip() for item in ids] fungible_obj['fault_domain_ids'] = ids else: msg = (f"{prefix} {constants.FSC_FD_IDS} - " f"Only two fault domain ids can be provided.") errors.append(msg) if constants.FSC_FD_OP in data: if (data[constants.FSC_FD_OP].upper() in constants.FSC_FD_OPS): fungible_obj['fd_op'] = data[constants.FSC_FD_OP] else: msg = (f"{prefix} {constants.FSC_FD_OP} " f"value is invalid") errors.append(msg) if (volume_type.upper() == constants.VOLUME_TYPE_REPLICA or volume_type.upper() == constants.VOLUME_TYPE_EC or volume_type.upper() == constants.VOLUME_TYPE_RF1): if constants.FSC_SPACE_ALLOCATION_POLICY in data: if (data[constants.FSC_SPACE_ALLOCATION_POLICY].lower() in constants.SPACE_ALLOCATION_POLICY): durable_param['space_allocation_policy'] = data[ constants.FSC_SPACE_ALLOCATION_POLICY] else: msg = (f"{prefix} {constants.FSC_SPACE_ALLOCATION_POLICY}" f" value is invalid") errors.append(msg) if constants.FSC_COMPRESSION in data: if (data[constants.FSC_COMPRESSION].lower() == constants.FALSE): durable_param['compression_effort'] = 0 elif (data[constants.FSC_COMPRESSION].lower() == constants.TRUE): durable_param['compression_effort'] = 2 else: msg = (f"{prefix} {constants.FSC_COMPRESSION} value is " f"invalid") errors.append(msg) if constants.FSC_SNAPSHOTS in data: if (data[constants.FSC_SNAPSHOTS].lower() in constants.BOOLEAN): if (data[constants.FSC_SNAPSHOTS].lower() == constants.FALSE): durable_param['snap_support'] = False else: msg = (f"{prefix} {constants.FSC_SNAPSHOTS} value is " f"invalid") errors.append(msg) if volume_type.upper() == constants.VOLUME_TYPE_EC: fungible_obj.update(durable_param) if constants.FSC_EC_SCHEME in data: if data[constants.FSC_EC_SCHEME] == constants.EC_8_2: data_protection['num_data_disks'] = 8 elif data[constants.FSC_EC_SCHEME] == constants.EC_4_2: data_protection['num_data_disks'] = 4 elif data[constants.FSC_EC_SCHEME] == constants.EC_2_1: data_protection['num_data_disks'] = 2 data_protection['num_failed_disks'] = 1 else: msg = (f"{prefix} {constants.FSC_EC_SCHEME} value is " f"invalid") errors.append(msg) fungible_obj["data_protection"] = data_protection elif volume_type.upper() == constants.VOLUME_TYPE_REPLICA: fungible_obj.update(durable_param) data_protection = { "num_failed_disks": 1, "num_data_disks": 1, "num_redundant_dpus": 1, } fungible_obj["data_protection"] = data_protection elif volume_type.upper() == constants.VOLUME_TYPE_RF1: fungible_obj.update(durable_param) pass def delete_volume(self, volume_uuid): """Deletes the specified volume""" LOG.info("delete_volume: volume_uuid=%(volume_uuid)s", {'volume_uuid': volume_uuid}) api_response = self.api_storage.delete_volume( volume_uuid=volume_uuid).to_dict() return api_response def get_volume_detail(self, uuid): """Get volume details by uuid""" api_response = self.api_storage.get_volume(volume_uuid=uuid).to_dict() return api_response def get_host_uuid_from_host_nqn(self, host_nqn): """Get host uuid from the host_nqn supplied""" api_response = self.api_topology.get_host_id_list( host_nqn_contains=host_nqn).to_dict() host_uuids = api_response.get("data").get("host_uuids") if len(host_uuids) == 1: return host_uuids[0] else: return None def get_host_details(self, host_uuid): """Get host details for the host_uuid supplied""" api_response = self.api_topology.get_host_info( host_uuid=host_uuid).to_dict() host = api_response.get("data") return host def get_hosts_subset(self, host_uuids): """Get host details in a list for the list of host_uuids supplied""" request_obj = { "host_id_list": host_uuids } api_response = self.api_topology.fetch_hosts_with_ids( body_fetch_hosts_with_ids=request_obj).to_dict() hosts = api_response.get("data") return hosts def create_host(self, host_nqn): """Create host with the host_nqn supplied""" request_obj = { "host_name": host_nqn, "host_nqn": host_nqn, "fac_enabled": False } LOG.info("create_host: request_obj=%(request_obj)s", {'request_obj': request_obj}) api_response = self.api_topology.add_host( body_host_create=request_obj).to_dict() return api_response def attach_volume(self, uuid, host_uuid, fac_enabled, iops=False): """Attaches a volume to a host server, using the specified transport method """ if fac_enabled: request_obj = { "transport": 'PCI', "host_uuid": host_uuid, "fnid": 3, "huid": 1, "ctlid": 0 } else: request_obj = { "transport": 'TCP', "host_uuid": host_uuid } # high iops set when uploading, downloading or migrating volume if iops: request_obj["max_read_iops"] = self.configuration.safe_get( 'iops_for_image_migration') LOG.info("attach_volume: uuid=%(uuid)s " "request_obj=%(request_obj)s", {'uuid': uuid, 'request_obj': request_obj}) api_response = self.api_storage.attach_volume( volume_uuid=uuid, body_volume_attach=request_obj).to_dict() return api_response def detach_volume(self, port_uuid): """Detach the volume specified port""" LOG.info("detach_volume: port_uuid=%(port_uuid)s", {'port_uuid': port_uuid}) api_response = self.api_storage.delete_port( port_uuid=port_uuid).to_dict() return api_response def create_snapshot(self, uuid, snapshot_name): """Create snapshot of volume with specified uuid""" fungible_request_obj = { "name": snapshot_name } api_response = self.api_storage.create_snapshot( volume_uuid=uuid, body_volume_snapshot_create=fungible_request_obj).to_dict() return api_response def delete_snapshot(self, uuid): """Delete snapshot with specified uuid""" api_response = self.api_storage.delete_snapshot( snapshot_uuid=uuid).to_dict() return api_response def extend_volume(self, uuid, new_size): """Update volume size to new size""" fungible_request_obj = { "op": "UPDATE_CAPACITY", "capacity": constants.BYTES_PER_GIB * new_size, } api_response = self.api_storage.update_volume( volume_uuid=uuid, body_volume_update=fungible_request_obj).to_dict() return api_response def rename_volume(self, uuid, new_name): """Update volume name to new name""" fungible_request_obj = { "op": "RENAME_VOLUME", "new_vol_name": new_name, } api_response = self.api_storage.update_volume( volume_uuid=uuid, body_volume_update=fungible_request_obj).to_dict() return api_response def copy_volume(self, volumeId, src_vrefId): """Submit copy volume task.""" payload = { "src_volume_uuid": src_vrefId, "dest_volume_uuid": volumeId, "timeout": self.configuration.safe_get( 'fsc_clone_volume_timeout') } LOG.info("Volume clone payload: %(payload)s.", {'payload': payload}) api_response = self.api_storage.create_volume_copy_task( body_create_volume_copy_task=payload).to_dict() return api_response def get_volume_copy_task(self, task_uuid): """Get volume copy task status""" api_response = self.api_storage.get_volume_copy_task( task_uuid).to_dict() return api_response def delete_volume_copy_task(self, task_uuid): """Delete volume copy task""" api_response = self.api_storage.delete_volume_copy_task( task_uuid).to_dict() return api_response def get_topology(self): api_response = self.api_topology.get_hierarchical_topology().to_dict() return api_response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fungible/swagger_api_client.py0000664000175000017500000171226000000000000025324 0ustar00zuulzuul00000000000000# (c) Copyright 2022 Fungible, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import import copy import datetime import http.client as httplib import io import json import logging import mimetypes import multiprocessing from multiprocessing.pool import ThreadPool import os import pprint import re import ssl import sys import tempfile from urllib.parse import quote from urllib.parse import urlencode import certifi try: import urllib3 except ImportError: raise ImportError('Swagger python client requires urllib3.') logger = logging.getLogger(__name__) class RESTResponse(io.IOBase): def __init__(self, resp): self.urllib3_response = resp self.status = resp.status self.reason = resp.reason self.data = resp.data def getheaders(self): """Returns a dictionary of the response headers.""" return self.urllib3_response.getheaders() def getheader(self, name, default=None): """Returns a given response header.""" return self.urllib3_response.getheader(name, default) class RESTClientObject(object): def __init__(self, configuration, pools_size=4, maxsize=None): # urllib3.PoolManager will pass all kw parameters to connectionpool # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501 # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501 # maxsize is the number of requests to host that are allowed in parallel # noqa: E501 # Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501 # cert_reqs if configuration.verify_ssl: cert_reqs = ssl.CERT_REQUIRED else: cert_reqs = ssl.CERT_NONE # ca_certs if configuration.ssl_ca_cert: ca_certs = configuration.ssl_ca_cert else: # if not set certificate file, use Mozilla's root certificates. ca_certs = certifi.where() addition_pool_args = {} if configuration.assert_hostname is not None: addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501 if maxsize is None: if configuration.connection_pool_maxsize is not None: maxsize = configuration.connection_pool_maxsize else: maxsize = 4 # https pool manager if configuration.proxy: self.pool_manager = urllib3.ProxyManager( num_pools=pools_size, maxsize=maxsize, cert_reqs=cert_reqs, ca_certs=ca_certs, cert_file=configuration.cert_file, key_file=configuration.key_file, proxy_url=configuration.proxy, **addition_pool_args ) else: self.pool_manager = urllib3.PoolManager( num_pools=pools_size, maxsize=maxsize, cert_reqs=cert_reqs, ca_certs=ca_certs, cert_file=configuration.cert_file, key_file=configuration.key_file, **addition_pool_args ) def request(self, method, url, query_params=None, headers=None, body=None, post_params=None, _preload_content=True, _request_timeout=None): """Perform requests. :param method: http request method :param url: http request url :param query_params: query parameters in the url :param headers: http request headers :param body: request json body, for `application/json` :param post_params: request post parameters, `application/x-www-form-urlencoded` and `multipart/form-data` :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. """ method = method.upper() assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', 'PATCH', 'OPTIONS'] if post_params and body: raise ValueError( "body parameter cannot be used with post_params parameter." ) post_params = post_params or {} headers = headers or {} timeout = None if _request_timeout: if isinstance(_request_timeout, (int, )): # noqa: E501,F821 timeout = urllib3.Timeout(total=_request_timeout) elif (isinstance(_request_timeout, tuple) and len(_request_timeout) == 2): timeout = urllib3.Timeout( connect=_request_timeout[0], read=_request_timeout[1]) if 'Content-Type' not in headers: headers['Content-Type'] = 'application/json' try: # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE` if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']: if query_params: url += '?' + urlencode(query_params) if re.search('json', headers['Content-Type'], re.IGNORECASE): request_body = '{}' if body is not None: request_body = json.dumps(body) r = self.pool_manager.request( method, url, body=request_body, preload_content=_preload_content, timeout=timeout, headers=headers) elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501 r = self.pool_manager.request( method, url, fields=post_params, encode_multipart=False, preload_content=_preload_content, timeout=timeout, headers=headers) elif headers['Content-Type'] == 'multipart/form-data': # must del headers['Content-Type'], or the correct # Content-Type which generated by urllib3 will be # overwritten. del headers['Content-Type'] r = self.pool_manager.request( method, url, fields=post_params, encode_multipart=True, preload_content=_preload_content, timeout=timeout, headers=headers) # Pass a `string` parameter directly in the body to support # other content types than Json when `body` argument is # provided in serialized form elif isinstance(body, str): request_body = body r = self.pool_manager.request( method, url, body=request_body, preload_content=_preload_content, timeout=timeout, headers=headers) else: # Cannot generate the request from given parameters msg = """Cannot prepare a request message for provided arguments. Please check that your arguments match declared content type.""" raise ApiException(status=0, reason=msg) # For `GET`, `HEAD` else: r = self.pool_manager.request(method, url, fields=query_params, preload_content=_preload_content, timeout=timeout, headers=headers) except urllib3.exceptions.SSLError as e: msg = "{0}\n{1}".format(type(e).__name__, str(e)) raise ApiException(status=0, reason=msg) if _preload_content: r = RESTResponse(r) # In the python 3, the response.data is bytes. # we need to decode it to string. r.data = r.data.decode('utf8') # log response body logger.debug("response body: %s", r.data) if not 200 <= r.status <= 299: raise ApiException(http_resp=r) return r def GET(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None): return self.request("GET", url, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, query_params=query_params) def HEAD(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None): return self.request("HEAD", url, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, query_params=query_params) def OPTIONS(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): return self.request("OPTIONS", url, headers=headers, query_params=query_params, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) def DELETE(self, url, headers=None, query_params=None, body=None, _preload_content=True, _request_timeout=None): return self.request("DELETE", url, headers=headers, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) def POST(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): return self.request("POST", url, headers=headers, query_params=query_params, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) def PUT(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): return self.request("PUT", url, headers=headers, query_params=query_params, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) def PATCH(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): return self.request("PATCH", url, headers=headers, query_params=query_params, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) class ApiException(Exception): def __init__(self, status=None, reason=None, http_resp=None): if http_resp: self.status = http_resp.status self.reason = http_resp.reason self.body = http_resp.data self.headers = http_resp.getheaders() else: self.status = status self.reason = reason self.body = None self.headers = None def __str__(self): """Custom error messages for exception""" error_message = "({0})\n"\ "Reason: {1}\n".format(self.status, self.reason) if self.headers: error_message += "HTTP response headers: {0}\n".format( self.headers) if self.body: error_message += "HTTP response body: {0}\n".format(self.body) return error_message class Configuration(object): """NOTE: This class is auto generated by the swagger code generator program Ref: https://github.com/swagger-api/swagger-codegen Do not edit the class manually. """ _default = None def __init__(self): """Constructor""" if self._default: for key in self._default.__dict__.keys(): self.__dict__[key] = copy.copy(self._default.__dict__[key]) return # Default Base url self.host = "http://localhost:50220/FunCC/v1" # Temp file folder for downloading files self.temp_folder_path = None # Authentication Settings # dict to store API key(s) self.api_key = {} # dict to store API prefix (e.g. Bearer) self.api_key_prefix = {} # function to refresh API key if expired self.refresh_api_key_hook = None # Username for HTTP basic authentication self.username = "" # Password for HTTP basic authentication self.password = "" # Logging Settings self.logger = {} self.logger["package_logger"] = logging.getLogger("swagger_client") self.logger["urllib3_logger"] = logging.getLogger("urllib3") # Log format self.logger_format = '%(asctime)s %(levelname)s %(message)s' # Log stream handler self.logger_stream_handler = None # Log file handler self.logger_file_handler = None # Debug file location self.logger_file = None # Debug switch self.debug = False # SSL/TLS verification # Set this to false to skip verifying SSL certificate when calling API # from https server. self.verify_ssl = True # Set this to customize the certificate file to verify the peer. self.ssl_ca_cert = None # client certificate file self.cert_file = None # client key file self.key_file = None # Set this to True/False to enable/disable SSL hostname verification. self.assert_hostname = None # urllib3 connection pool's maximum number of connections saved # per pool. urllib3 uses 1 connection as default value, but this is # not the best value when you are making a lot of possibly parallel # requests to the same host, which is often the case here. # cpu_count * 5 is used as default value to increase performance. self.connection_pool_maxsize = multiprocessing.cpu_count() * 5 # Proxy URL self.proxy = None # Safe chars for path_param self.safe_chars_for_path_param = '' # Disable client side validation self.client_side_validation = True @classmethod def set_default(cls, default): cls._default = default @property def logger_file(self): """The logger file. If the logger_file is None, then add stream handler and remove file handler. Otherwise, add file handler and remove stream handler. :param value: The logger_file path. :type: str """ return self.__logger_file @logger_file.setter def logger_file(self, value): """The logger file. If the logger_file is None, then add stream handler and remove file handler. Otherwise, add file handler and remove stream handler. :param value: The logger_file path. :type: str """ self.__logger_file = value if self.__logger_file: # If set logging file, # then add file handler and remove stream handler. self.logger_file_handler = logging.FileHandler(self.__logger_file) self.logger_file_handler.setFormatter(self.logger_formatter) for _, logger in self.logger.items(): logger.addHandler(self.logger_file_handler) if self.logger_stream_handler: logger.removeHandler(self.logger_stream_handler) else: # If not set logging file, # then add stream handler and remove file handler. self.logger_stream_handler = logging.StreamHandler() self.logger_stream_handler.setFormatter(self.logger_formatter) for _, logger in self.logger.items(): logger.addHandler(self.logger_stream_handler) if self.logger_file_handler: logger.removeHandler(self.logger_file_handler) @property def debug(self): """Debug status :param value: The debug status, True or False. :type: bool """ return self.__debug @debug.setter def debug(self, value): """Debug status :param value: The debug status, True or False. :type: bool """ self.__debug = value if self.__debug: # if debug status is True, turn on debug logging for _, logger in self.logger.items(): logger.setLevel(logging.DEBUG) # turn on httplib debug httplib.HTTPConnection.debuglevel = 1 else: # if debug status is False, turn off debug logging, # setting log level to default `logging.WARNING` for _, logger in self.logger.items(): logger.setLevel(logging.WARNING) # turn off httplib debug httplib.HTTPConnection.debuglevel = 0 @property def logger_format(self): """The logger format. The logger_formatter will be updated when sets logger_format. :param value: The format string. :type: str """ return self.__logger_format @logger_format.setter def logger_format(self, value): """The logger format. The logger_formatter will be updated when sets logger_format. :param value: The format string. :type: str """ self.__logger_format = value self.logger_formatter = logging.Formatter(self.__logger_format) def get_api_key_with_prefix(self, identifier): """Gets API key (with prefix if set). :param identifier: The identifier of apiKey. :return: The token for api key authentication. """ if self.refresh_api_key_hook: self.refresh_api_key_hook(self) key = self.api_key.get(identifier) if key: prefix = self.api_key_prefix.get(identifier) if prefix: return "%s %s" % (prefix, key) else: return key def get_basic_auth_token(self): """Gets HTTP basic authentication header (string). :return: The token for basic HTTP authentication. """ return urllib3.util.make_headers( basic_auth=self.username + ':' + self.password ).get('authorization') def auth_settings(self): """Gets Auth Settings dict for api client. :return: The Auth Settings information dict. """ return { 'Basic': { 'type': 'basic', 'in': 'header', 'key': 'Authorization', 'value': self.get_basic_auth_token() }, 'Bearer': { 'type': 'api_key', 'in': 'header', 'key': 'Authorization', 'value': self.get_api_key_with_prefix('Authorization') }, } def to_debug_report(self): """Gets the essential information for debugging. :return: The report for debugging. """ return "Python SDK Debug Report:\n"\ "OS: {env}\n"\ "Python Version: {pyversion}\n"\ "Version of the API: 2.2.10\n"\ "SDK Package Version: 1.0.0".\ format(env=sys.platform, pyversion=sys.version) class ApiClient(object): """Generic API client for Swagger client library builds. Swagger generic API client. This client handles the client- server communication, and is invariant across implementations. Specifics of the methods and models for each application are generated from the Swagger templates. NOTE: This class is auto generated by the swagger code generator program Ref: https://github.com/swagger-api/swagger-codegen Do not edit the class manually. :param configuration: .Configuration object for this client :param header_name: a header to pass when making calls to the API. :param header_value: a header value to pass when making calls to the API. :param cookie: a cookie to include in the header when making calls to the API """ PRIMITIVE_TYPES = (float, bool, bytes, str, int) NATIVE_TYPES_MAPPING = { 'int': int, 'long': int, # noqa: F821 'float': float, 'str': str, 'bool': bool, 'date': datetime.date, 'datetime': datetime.datetime, 'object': object, } def __init__(self, configuration=None, header_name=None, header_value=None, cookie=None): if configuration is None: configuration = Configuration() self.configuration = configuration # Use the pool property to lazily initialize the ThreadPool. self._pool = None self.rest_client = RESTClientObject(configuration) self.default_headers = {} if header_name is not None: self.default_headers[header_name] = header_value self.cookie = cookie # Set default User-Agent. self.user_agent = 'Swagger-Codegen/1.0.0/python' self.client_side_validation = configuration.client_side_validation def __del__(self): if self._pool is not None: self._pool.close() self._pool.join() @property def pool(self): if self._pool is None: self._pool = ThreadPool() return self._pool @property def user_agent(self): """User agent for this API client""" return self.default_headers['User-Agent'] @user_agent.setter def user_agent(self, value): self.default_headers['User-Agent'] = value def set_default_header(self, header_name, header_value): self.default_headers[header_name] = header_value def __call_api( self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None): config = self.configuration # header parameters header_params = header_params or {} header_params.update(self.default_headers) if self.cookie: header_params['Cookie'] = self.cookie if header_params: header_params = self.sanitize_for_serialization(header_params) header_params = dict(self.parameters_to_tuples(header_params, collection_formats)) # path parameters if path_params: path_params = self.sanitize_for_serialization(path_params) path_params = self.parameters_to_tuples(path_params, collection_formats) for k, v in path_params: # specified safe chars, encode everything resource_path = resource_path.replace( '{%s}' % k, quote(str(v), safe=config.safe_chars_for_path_param) ) # query parameters if query_params: query_params = self.sanitize_for_serialization(query_params) query_params = self.parameters_to_tuples(query_params, collection_formats) # post parameters if post_params or files: post_params = self.prepare_post_parameters(post_params, files) post_params = self.sanitize_for_serialization(post_params) post_params = self.parameters_to_tuples(post_params, collection_formats) # auth setting self.update_params_for_auth(header_params, query_params, auth_settings) # body if body: body = self.sanitize_for_serialization(body) # request url url = self.configuration.host + resource_path # perform request and return response response_data = self.request( method, url, query_params=query_params, headers=header_params, post_params=post_params, body=body, _preload_content=_preload_content, _request_timeout=_request_timeout) self.last_response = response_data return_data = response_data if _preload_content: # deserialize response data if response_type: return_data = self.deserialize(response_data, response_type) else: return_data = None if _return_http_data_only: return (return_data) else: return (return_data, response_data.status, response_data.getheaders()) def sanitize_for_serialization(self, obj): """Builds a JSON POST object. If obj is None, return None. If obj is str, int, long, float, bool, return directly. If obj is datetime.datetime, datetime.date convert to string in iso8601 format. If obj is list, sanitize each element in the list. If obj is dict, return the dict. If obj is swagger model, return the properties dict. :param obj: The data to serialize. :return: The serialized form of data. """ if obj is None: return None elif isinstance(obj, self.PRIMITIVE_TYPES): return obj elif isinstance(obj, list): return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: # Convert model obj to dict except # attributes `swagger_types`, `attribute_map` # and attributes which value is not None. # Convert attribute name to json key in # model definition for request. obj_dict = {obj.attribute_map[attr]: getattr(obj, attr) for attr, _ in obj.swagger_types.items() if getattr(obj, attr) is not None} return {key: self.sanitize_for_serialization(val) for key, val in obj_dict.items()} def deserialize(self, response, response_type): """Deserializes response into an object. :param response: RESTResponse object to be deserialized. :param response_type: class literal for deserialized object, or string of class name. :return: deserialized object. """ # handle file downloading # save response body into a tmp file and return the instance if response_type == "file": return self.__deserialize_file(response) # fetch data from response object try: data = json.loads(response.data) except ValueError: data = response.data return self.__deserialize(data, response_type) def __deserialize(self, data, klass): """Deserializes dict, list, str into an object. :param data: dict, list or str. :param klass: class literal, or string of class name. :return: object. """ if data is None: return None if type(klass) is str: if klass.startswith('list['): sub_kls = re.match(r'list\[(.*)\]', klass).group(1) return [self.__deserialize(sub_data, sub_kls) for sub_data in data] if klass.startswith('dict('): sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2) return {k: self.__deserialize(v, sub_kls) for k, v in data.items()} # convert str to class if klass in self.NATIVE_TYPES_MAPPING: klass = self.NATIVE_TYPES_MAPPING[klass] else: logger.debug("klass: %s", klass) klass = getattr(sys.modules[__name__], klass) if klass in self.PRIMITIVE_TYPES: return self.__deserialize_primitive(data, klass) elif klass == object: return self.__deserialize_object(data) elif klass == datetime.date: return self.__deserialize_date(data) elif klass == datetime.datetime: return self.__deserialize_datatime(data) else: return self.__deserialize_model(data, klass) def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, async_req=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None): """Makes the HTTP request (synchronous) and returns deserialized data. To make an async request, set the async_req parameter. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response: Response data type. :param files dict: key -> filename, value -> filepath, for `multipart/form-data`. :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: If async_req parameter is True, the request will be called asynchronously. The method will return the request thread. If parameter async_req is False or missing, then the method will return the response directly. """ if not async_req: return self.__call_api(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout) else: thread = self.pool.apply_async(self.__call_api, (resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout)) return thread def request(self, method, url, query_params=None, headers=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): """Makes the HTTP request using RESTClient.""" if method == "GET": return self.rest_client.GET(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif method == "HEAD": return self.rest_client.HEAD(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif method == "OPTIONS": return self.rest_client.OPTIONS(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "POST": return self.rest_client.POST(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "PUT": return self.rest_client.PUT(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "PATCH": return self.rest_client.PATCH(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "DELETE": return self.rest_client.DELETE(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) else: raise ValueError( "http method must be `GET`, `HEAD`, `OPTIONS`," " `POST`, `PATCH`, `PUT` or `DELETE`." ) def parameters_to_tuples(self, params, collection_formats): """Get parameters as list of tuples, formatting collections. :param params: Parameters as dict or list of two-tuples :param dict collection_formats: Parameter collection formats :return: Parameters as list of tuples, collections formatted """ new_params = [] if collection_formats is None: collection_formats = {} for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501 if k in collection_formats: collection_format = collection_formats[k] if collection_format == 'multi': new_params.extend((k, value) for value in v) else: if collection_format == 'ssv': delimiter = ' ' elif collection_format == 'tsv': delimiter = '\t' elif collection_format == 'pipes': delimiter = '|' else: # csv is the default delimiter = ',' new_params.append( (k, delimiter.join(str(value) for value in v))) else: new_params.append((k, v)) return new_params def prepare_post_parameters(self, post_params=None, files=None): """Builds form parameters. :param post_params: Normal form parameters. :param files: File parameters. :return: Form parameters with files. """ params = [] if post_params: params = post_params if files: for k, v in files.items(): if not v: continue file_names = v if type(v) is list else [v] for n in file_names: with open(n, 'rb') as f: filename = os.path.basename(f.name) filedata = f.read() mimetype = (mimetypes.guess_type(filename)[0] or 'application/octet-stream') params.append( tuple([k, tuple([filename, filedata, mimetype])])) return params def select_header_accept(self, accepts): """Returns `Accept` based on an array of accepts provided. :param accepts: List of headers. :return: Accept (e.g. application/json). """ if not accepts: return accepts = [x.lower() for x in accepts] if 'application/json' in accepts: return 'application/json' else: return ', '.join(accepts) def select_header_content_type(self, content_types): """Returns `Content-Type` based on an array of content_types provided. :param content_types: List of content-types. :return: Content-Type (e.g. application/json). """ if not content_types: return 'application/json' content_types = [x.lower() for x in content_types] if 'application/json' in content_types or '*/*' in content_types: return 'application/json' else: return content_types[0] def update_params_for_auth(self, headers, querys, auth_settings): """Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list. """ if not auth_settings: return for auth in auth_settings: auth_setting = self.configuration.auth_settings().get(auth) if auth_setting: if not auth_setting['value']: continue elif auth_setting['in'] == 'header': headers[auth_setting['key']] = auth_setting['value'] elif auth_setting['in'] == 'query': querys.append((auth_setting['key'], auth_setting['value'])) else: raise ValueError( 'Authentication token must be in `query` or `header`' ) def __deserialize_file(self, response): """Deserializes body to file Saves response body into a file in a temporary folder, using the filename from the `Content-Disposition` header if provided. :param response: RESTResponse. :return: file path. """ fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path) os.close(fd) os.remove(path) content_disposition = response.getheader("Content-Disposition") if content_disposition: filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).group(1) path = os.path.join(os.path.dirname(path), filename) with open(path, "w") as f: f.write(response.data) return path def __deserialize_primitive(self, data, klass): """Deserializes string to primitive type. :param data: str. :param klass: class literal. :return: int, long, float, str, bool. """ try: return klass(data) except UnicodeEncodeError: return str(data) except TypeError: return data def __deserialize_object(self, value): """Return a original value. :return: object. """ return value def __deserialize_date(self, string): """Deserializes string to date. :param string: str. :return: date. """ try: from dateutil.parser import parse return parse(string).date() except ImportError: return string except ValueError: raise ApiException( status=0, reason="Failed to parse `{0}` as date object".format(string) ) def __deserialize_datatime(self, string): """Deserializes string to datetime. The string should be in iso8601 datetime format. :param string: str. :return: datetime. """ try: from dateutil.parser import parse return parse(string) except ImportError: return string except ValueError: raise ApiException( status=0, reason=( "Failed to parse `{0}` as datetime object" .format(string) ) ) def __hasattr(self, object, name): return name in object.__class__.__dict__ def __deserialize_model(self, data, klass): """Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object. """ if (not klass.swagger_types and not self.__hasattr(klass, 'get_real_child_model')): return data kwargs = {} if klass.swagger_types is not None: for attr, attr_type in klass.swagger_types.items(): if (data is not None and klass.attribute_map[attr] in data and isinstance(data, (list, dict))): value = data[klass.attribute_map[attr]] kwargs[attr] = self.__deserialize(value, attr_type) instance = klass(**kwargs) if (isinstance(instance, dict) and klass.swagger_types is not None and isinstance(data, dict)): for key, value in data.items(): if key not in klass.swagger_types: instance[key] = value if self.__hasattr(instance, 'get_real_child_model'): klass_name = instance.get_real_child_model(data) if klass_name: instance = self.__deserialize(data, klass_name) return instance class ApigatewayApi(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def get_fc_health(self, **kwargs): # noqa: E501 """Get health of the API gateway # noqa: E501 Retrieves the health of the API gateway # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_fc_health(async_req=True) >>> result = thread.get() :param async_req bool :return: CommonResponseFields If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_fc_health_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_fc_health_with_http_info(**kwargs) # noqa: E501 return data def get_fc_health_with_http_info(self, **kwargs): # noqa: E501 """Get health of the API gateway # noqa: E501 Retrieves the health of the API gateway # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_fc_health_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :return: CommonResponseFields If the method is called asynchronously, returns the request thread. """ all_params = [] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_fc_health" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/api_server/health', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CommonResponseFields', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) class StorageApi(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def attach_volume(self, volume_uuid, body_volume_attach, **kwargs): # noqa: E501 """Attach a volume # noqa: E501 Attaches a volume to a host server, using the specified transport method # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.attach_volume(volume_uuid, body_volume_attach, async_req=True) >>> result = thread.get() :param async_req bool :param str volume_uuid: FC assigned volume UUID (required) :param BodyVolumeAttach body_volume_attach: (required) :return: ResponseDataWithCreateUuid If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.attach_volume_with_http_info(volume_uuid, body_volume_attach, **kwargs) # noqa: E501 else: (data) = self.attach_volume_with_http_info(volume_uuid, body_volume_attach, **kwargs) # noqa: E501 return data def attach_volume_with_http_info(self, volume_uuid, body_volume_attach, **kwargs): # noqa: E501 """Attach a volume # noqa: E501 Attaches a volume to a host server, using the specified transport method # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.attach_volume_with_http_info(volume_uuid, body_volume_attach, async_req=True) >>> result = thread.get() :param async_req bool :param str volume_uuid: FC assigned volume UUID (required) :param BodyVolumeAttach body_volume_attach: (required) :return: ResponseDataWithCreateUuid If the method is called asynchronously, returns the request thread. """ all_params = ['volume_uuid', 'body_volume_attach'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method attach_volume" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'volume_uuid' is set if self.api_client.client_side_validation and ('volume_uuid' not in params or # noqa: E501 params['volume_uuid'] is None): # noqa: E501 raise ValueError("Missing the required parameter `volume_uuid` when calling `attach_volume`") # noqa: E501 # verify the required parameter 'body_volume_attach' is set if self.api_client.client_side_validation and ('body_volume_attach' not in params or # noqa: E501 params['body_volume_attach'] is None): # noqa: E501 raise ValueError("Missing the required parameter `body_volume_attach` when calling `attach_volume`") # noqa: E501 if self.api_client.client_side_validation and ('volume_uuid' in params and not re.search(r'^[A-Fa-f0-9\\-]+$', params['volume_uuid'])): # noqa: E501 raise ValueError("Invalid value for parameter `volume_uuid` when calling `attach_volume`, must conform to the pattern `/^[A-Fa-f0-9\\-]+$/`") # noqa: E501 collection_formats = {} path_params = {} if 'volume_uuid' in params: path_params['volume_uuid'] = params['volume_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body_volume_attach' in params: body_params = params['body_volume_attach'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/volumes/{volume_uuid}/ports', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseDataWithCreateUuid', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_snapshot(self, volume_uuid, body_volume_snapshot_create, **kwargs): # noqa: E501 """Create a new snapshot of a volume # noqa: E501 Create new snapshot volume using the specified parameters # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_snapshot(volume_uuid, body_volume_snapshot_create, async_req=True) >>> result = thread.get() :param async_req bool :param str volume_uuid: FC assigned volume UUID (required) :param BodyVolumeSnapshotCreate body_volume_snapshot_create: (required) :return: ResponseDataWithCreateUuid If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_snapshot_with_http_info(volume_uuid, body_volume_snapshot_create, **kwargs) # noqa: E501 else: (data) = self.create_snapshot_with_http_info(volume_uuid, body_volume_snapshot_create, **kwargs) # noqa: E501 return data def create_snapshot_with_http_info(self, volume_uuid, body_volume_snapshot_create, **kwargs): # noqa: E501 """Create a new snapshot of a volume # noqa: E501 Create new snapshot volume using the specified parameters # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_snapshot_with_http_info(volume_uuid, body_volume_snapshot_create, async_req=True) >>> result = thread.get() :param async_req bool :param str volume_uuid: FC assigned volume UUID (required) :param BodyVolumeSnapshotCreate body_volume_snapshot_create: (required) :return: ResponseDataWithCreateUuid If the method is called asynchronously, returns the request thread. """ all_params = ['volume_uuid', 'body_volume_snapshot_create'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_snapshot" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'volume_uuid' is set if self.api_client.client_side_validation and ('volume_uuid' not in params or # noqa: E501 params['volume_uuid'] is None): # noqa: E501 raise ValueError("Missing the required parameter `volume_uuid` when calling `create_snapshot`") # noqa: E501 # verify the required parameter 'body_volume_snapshot_create' is set if self.api_client.client_side_validation and ('body_volume_snapshot_create' not in params or # noqa: E501 params['body_volume_snapshot_create'] is None): # noqa: E501 raise ValueError("Missing the required parameter `body_volume_snapshot_create` when calling `create_snapshot`") # noqa: E501 if self.api_client.client_side_validation and ('volume_uuid' in params and not re.search(r'^[A-Fa-f0-9\\-]+$', params['volume_uuid'])): # noqa: E501 raise ValueError("Invalid value for parameter `volume_uuid` when calling `create_snapshot`, must conform to the pattern `/^[A-Fa-f0-9\\-]+$/`") # noqa: E501 collection_formats = {} path_params = {} if 'volume_uuid' in params: path_params['volume_uuid'] = params['volume_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body_volume_snapshot_create' in params: body_params = params['body_volume_snapshot_create'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/volumes/{volume_uuid}/snapshots', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseDataWithCreateUuid', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_snapshot(self, snapshot_uuid, **kwargs): # noqa: E501 """Delete snapshot # noqa: E501 Deletes the snapshot with specified uuid # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_snapshot(snapshot_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str snapshot_uuid: FC assigned snapshot UUID (required) :return: SuccessResponseFields If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_snapshot_with_http_info(snapshot_uuid, **kwargs) # noqa: E501 else: (data) = self.delete_snapshot_with_http_info(snapshot_uuid, **kwargs) # noqa: E501 return data def delete_snapshot_with_http_info(self, snapshot_uuid, **kwargs): # noqa: E501 """Delete snapshot # noqa: E501 Deletes the snapshot with specified uuid # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_snapshot_with_http_info(snapshot_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str snapshot_uuid: FC assigned snapshot UUID (required) :return: SuccessResponseFields If the method is called asynchronously, returns the request thread. """ all_params = ['snapshot_uuid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_snapshot" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'snapshot_uuid' is set if ('snapshot_uuid' not in params or params['snapshot_uuid'] is None): raise ValueError("Missing the required parameter `snapshot_uuid` when calling `delete_snapshot`") # noqa: E501 if 'snapshot_uuid' in params and not re.search(r'^[A-Fa-f0-9\\-]+$', params['snapshot_uuid']): # noqa: E501 raise ValueError("Invalid value for parameter `snapshot_uuid` when calling `delete_snapshot`, must conform to the pattern `/^[A-Fa-f0-9\\-]+$/`") # noqa: E501 collection_formats = {} path_params = {} if 'snapshot_uuid' in params: path_params['snapshot_uuid'] = params['snapshot_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/snapshots/{snapshot_uuid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SuccessResponseFields', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_volume(self, body_volume_intent_create, **kwargs): # noqa: E501 """Create a new volume # noqa: E501 Creates new volume using the specified parameters # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_volume(body_volume_intent_create, async_req=True) >>> result = thread.get() :param async_req bool :param BodyVolumeIntentCreate body_volume_intent_create: (required) :return: ResponseDataWithCreateUuid If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_volume_with_http_info(body_volume_intent_create, **kwargs) # noqa: E501 else: (data) = self.create_volume_with_http_info(body_volume_intent_create, **kwargs) # noqa: E501 return data def create_volume_with_http_info(self, body_volume_intent_create, **kwargs): # noqa: E501 """Create a new volume # noqa: E501 Creates new volume using the specified parameters # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_volume_with_http_info(body_volume_intent_create, async_req=True) >>> result = thread.get() :param async_req bool :param BodyVolumeIntentCreate body_volume_intent_create: (required) :return: ResponseDataWithCreateUuid If the method is called asynchronously, returns the request thread. """ all_params = ['body_volume_intent_create'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_volume" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body_volume_intent_create' is set if self.api_client.client_side_validation and ('body_volume_intent_create' not in params or # noqa: E501 params['body_volume_intent_create'] is None): # noqa: E501 raise ValueError("Missing the required parameter `body_volume_intent_create` when calling `create_volume`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body_volume_intent_create' in params: body_params = params['body_volume_intent_create'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/volumes', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseDataWithCreateUuid', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def create_volume_copy_task(self, body_create_volume_copy_task, **kwargs): # noqa: E501 """Create a task to copy a volume # noqa: E501 Creates a task to copy a specified volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_volume_copy_task(body_create_volume_copy_task, async_req=True) >>> result = thread.get() :param async_req bool :param BodyCreateVolumeCopyTask body_create_volume_copy_task: (required) :return: ResponseCreateVolumeCopyTask If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_volume_copy_task_with_http_info(body_create_volume_copy_task, **kwargs) # noqa: E501 else: (data) = self.create_volume_copy_task_with_http_info(body_create_volume_copy_task, **kwargs) # noqa: E501 return data def create_volume_copy_task_with_http_info(self, body_create_volume_copy_task, **kwargs): # noqa: E501 """Create a task to copy a volume # noqa: E501 Creates a task to copy a specified volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_volume_copy_task_with_http_info(body_create_volume_copy_task, async_req=True) >>> result = thread.get() :param async_req bool :param BodyCreateVolumeCopyTask body_create_volume_copy_task: (required) :return: ResponseCreateVolumeCopyTask If the method is called asynchronously, returns the request thread. """ all_params = ['body_create_volume_copy_task'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_volume_copy_task" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body_create_volume_copy_task' is set if ('body_create_volume_copy_task' not in params or params['body_create_volume_copy_task'] is None): raise ValueError("Missing the required parameter `body_create_volume_copy_task` when calling `create_volume_copy_task`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body_create_volume_copy_task' in params: body_params = params['body_create_volume_copy_task'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/volumes/copy', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseCreateVolumeCopyTask', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_volume_copy_task(self, task_uuid, **kwargs): # noqa: E501 """Get the status of a task to copy a volume # noqa: E501 Retrieves the status of the specified task to copy a volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_volume_copy_task(task_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str task_uuid: FC assigned task UUID (required) :return: ResponseGetVolumeCopyTask If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_volume_copy_task_with_http_info(task_uuid, **kwargs) # noqa: E501 else: (data) = self.get_volume_copy_task_with_http_info(task_uuid, **kwargs) # noqa: E501 return data def get_volume_copy_task_with_http_info(self, task_uuid, **kwargs): # noqa: E501 """Get the status of a task to copy a volume # noqa: E501 Retrieves the status of the specified task to copy a volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_volume_copy_task_with_http_info(task_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str task_uuid: FC assigned task UUID (required) :return: ResponseGetVolumeCopyTask If the method is called asynchronously, returns the request thread. """ all_params = ['task_uuid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_volume_copy_task" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'task_uuid' is set if ('task_uuid' not in params or params['task_uuid'] is None): raise ValueError("Missing the required parameter `task_uuid` when calling `get_volume_copy_task`") # noqa: E501 collection_formats = {} path_params = {} if 'task_uuid' in params: path_params['task_uuid'] = params['task_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/volumes/copy/{task_uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseGetVolumeCopyTask', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_volume_copy_task(self, task_uuid, **kwargs): # noqa: E501 """Delete a task to copy a volume # noqa: E501 Deletes the specified task to copy a volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_volume_copy_task(task_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str task_uuid: FC assigned task UUID (required) :return: SuccessResponseFields If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_volume_copy_task_with_http_info(task_uuid, **kwargs) # noqa: E501 else: (data) = self.delete_volume_copy_task_with_http_info(task_uuid, **kwargs) # noqa: E501 return data def delete_volume_copy_task_with_http_info(self, task_uuid, **kwargs): # noqa: E501 """Delete a task to copy a volume # noqa: E501 Deletes the specified task to copy a volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_volume_copy_task_with_http_info(task_uuid, async_req=True) # noqa: E501 >>> result = thread.get() :param async_req bool :param str task_uuid: FC assigned task UUID (required) :return: SuccessResponseFields If the method is called asynchronously, returns the request thread. """ all_params = ['task_uuid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_volume_copy_task" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'task_uuid' is set if ('task_uuid' not in params or params['task_uuid'] is None): raise ValueError("Missing the required parameter `task_uuid` when calling `delete_volume_copy_task`") # noqa: E501 collection_formats = {} path_params = {} if 'task_uuid' in params: path_params['task_uuid'] = params['task_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/volumes/copy/{task_uuid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SuccessResponseFields', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_port(self, port_uuid, **kwargs): # noqa: E501 """Delete a port # noqa: E501 Deletes the specified port # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_port(port_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str port_uuid: FC assigned port UUID (required) :param bool force_clean: :return: CommonResponseFields If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_port_with_http_info(port_uuid, **kwargs) # noqa: E501 else: (data) = self.delete_port_with_http_info(port_uuid, **kwargs) # noqa: E501 return data def delete_port_with_http_info(self, port_uuid, **kwargs): # noqa: E501 """Delete a port # noqa: E501 Deletes the specified port # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_port_with_http_info(port_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str port_uuid: FC assigned port UUID (required) :param bool force_clean: :return: CommonResponseFields If the method is called asynchronously, returns the request thread. """ all_params = ['port_uuid', 'force_clean'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_port" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'port_uuid' is set if self.api_client.client_side_validation and ('port_uuid' not in params or # noqa: E501 params['port_uuid'] is None): # noqa: E501 raise ValueError("Missing the required parameter `port_uuid` when calling `delete_port`") # noqa: E501 if self.api_client.client_side_validation and ('port_uuid' in params and not re.search(r'^[A-Fa-f0-9\\-]+$', params['port_uuid'])): # noqa: E501 raise ValueError("Invalid value for parameter `port_uuid` when calling `delete_port`, must conform to the pattern `/^[A-Fa-f0-9\\-]+$/`") # noqa: E501 collection_formats = {} path_params = {} if 'port_uuid' in params: path_params['port_uuid'] = params['port_uuid'] # noqa: E501 query_params = [] if 'force_clean' in params: query_params.append(('force_clean', params['force_clean'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/ports/{port_uuid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CommonResponseFields', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_volume(self, volume_uuid, **kwargs): # noqa: E501 """Delete a volume # noqa: E501 Deletes the specified volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_volume(volume_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str volume_uuid: FC assigned volume UUID (required) :return: SuccessResponseFields If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_volume_with_http_info(volume_uuid, **kwargs) # noqa: E501 else: (data) = self.delete_volume_with_http_info(volume_uuid, **kwargs) # noqa: E501 return data def delete_volume_with_http_info(self, volume_uuid, **kwargs): # noqa: E501 """Delete a volume # noqa: E501 Deletes the specified volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_volume_with_http_info(volume_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str volume_uuid: FC assigned volume UUID (required) :return: SuccessResponseFields If the method is called asynchronously, returns the request thread. """ all_params = ['volume_uuid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_volume" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'volume_uuid' is set if self.api_client.client_side_validation and ('volume_uuid' not in params or # noqa: E501 params['volume_uuid'] is None): # noqa: E501 raise ValueError("Missing the required parameter `volume_uuid` when calling `delete_volume`") # noqa: E501 if self.api_client.client_side_validation and ('volume_uuid' in params and not re.search(r'^[A-Fa-f0-9\\-]+$', params['volume_uuid'])): # noqa: E501 raise ValueError("Invalid value for parameter `volume_uuid` when calling `delete_volume`, must conform to the pattern `/^[A-Fa-f0-9\\-]+$/`") # noqa: E501 collection_formats = {} path_params = {} if 'volume_uuid' in params: path_params['volume_uuid'] = params['volume_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/volumes/{volume_uuid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SuccessResponseFields', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_port(self, port_uuid, **kwargs): # noqa: E501 """Get port properties # noqa: E501 Retrieves properties of the specified port # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_port(port_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str port_uuid: FC assigned port UUID (required) :return: ResponseDataWithSinglePort If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_port_with_http_info(port_uuid, **kwargs) # noqa: E501 else: (data) = self.get_port_with_http_info(port_uuid, **kwargs) # noqa: E501 return data def get_port_with_http_info(self, port_uuid, **kwargs): # noqa: E501 """Get port properties # noqa: E501 Retrieves properties of the specified port # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_port_with_http_info(port_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str port_uuid: FC assigned port UUID (required) :return: ResponseDataWithSinglePort If the method is called asynchronously, returns the request thread. """ all_params = ['port_uuid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_port" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'port_uuid' is set if self.api_client.client_side_validation and ('port_uuid' not in params or # noqa: E501 params['port_uuid'] is None): # noqa: E501 raise ValueError("Missing the required parameter `port_uuid` when calling `get_port`") # noqa: E501 if self.api_client.client_side_validation and ('port_uuid' in params and not re.search(r'^[A-Fa-f0-9\\-]+$', params['port_uuid'])): # noqa: E501 raise ValueError("Invalid value for parameter `port_uuid` when calling `get_port`, must conform to the pattern `/^[A-Fa-f0-9\\-]+$/`") # noqa: E501 collection_formats = {} path_params = {} if 'port_uuid' in params: path_params['port_uuid'] = params['port_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/ports/{port_uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseDataWithSinglePort', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_volume(self, volume_uuid, **kwargs): # noqa: E501 """Get properties of a volume # noqa: E501 Retrieves properties of the specified volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_volume(volume_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str volume_uuid: FC assigned volume UUID (required) :return: ResponseDataWithSingleVolume If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_volume_with_http_info(volume_uuid, **kwargs) # noqa: E501 else: (data) = self.get_volume_with_http_info(volume_uuid, **kwargs) # noqa: E501 return data def get_volume_with_http_info(self, volume_uuid, **kwargs): # noqa: E501 """Get properties of a volume # noqa: E501 Retrieves properties of the specified volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_volume_with_http_info(volume_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str volume_uuid: FC assigned volume UUID (required) :return: ResponseDataWithSingleVolume If the method is called asynchronously, returns the request thread. """ all_params = ['volume_uuid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_volume" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'volume_uuid' is set if self.api_client.client_side_validation and ('volume_uuid' not in params or # noqa: E501 params['volume_uuid'] is None): # noqa: E501 raise ValueError("Missing the required parameter `volume_uuid` when calling `get_volume`") # noqa: E501 if self.api_client.client_side_validation and ('volume_uuid' in params and not re.search(r'^[A-Fa-f0-9\\-]+$', params['volume_uuid'])): # noqa: E501 raise ValueError("Invalid value for parameter `volume_uuid` when calling `get_volume`, must conform to the pattern `/^[A-Fa-f0-9\\-]+$/`") # noqa: E501 collection_formats = {} path_params = {} if 'volume_uuid' in params: path_params['volume_uuid'] = params['volume_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/volumes/{volume_uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseDataWithSingleVolume', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def update_volume(self, volume_uuid, body_volume_update, **kwargs): # noqa: E501 """Modify volume attributes # noqa: E501 Modify the attributes of an existing volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_volume(volume_uuid, body_volume_update, async_req=True) >>> result = thread.get() :param async_req bool :param str volume_uuid: FC assigned volume UUID (required) :param BodyVolumeUpdate body_volume_update: (required) :return: CommonResponseFields If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.update_volume_with_http_info(volume_uuid, body_volume_update, **kwargs) # noqa: E501 else: (data) = self.update_volume_with_http_info(volume_uuid, body_volume_update, **kwargs) # noqa: E501 return data def update_volume_with_http_info(self, volume_uuid, body_volume_update, **kwargs): # noqa: E501 """Modify volume attributes # noqa: E501 Modify the attributes of an existing volume # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_volume_with_http_info(volume_uuid, body_volume_update, async_req=True) # noqa: E501 >>> result = thread.get() :param async_req bool :param str volume_uuid: FC assigned volume UUID (required) :param BodyVolumeUpdate body_volume_update: (required) :return: CommonResponseFields If the method is called asynchronously, returns the request thread. """ all_params = ['volume_uuid', 'body_volume_update'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_volume" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'volume_uuid' is set if self.api_client.client_side_validation and ('volume_uuid' not in params or # noqa: E501 params['volume_uuid'] is None): # noqa: E501 raise ValueError("Missing the required parameter `volume_uuid` when calling `update_volume`") # noqa: E501 # verify the required parameter 'body_volume_update' is set if self.api_client.client_side_validation and ('body_volume_update' not in params or # noqa: E501 params['body_volume_update'] is None): # noqa: E501 raise ValueError("Missing the required parameter `body_volume_update` when calling `update_volume`") # noqa: E501 if self.api_client.client_side_validation and ('volume_uuid' in params and not re.search(r'^[A-Fa-f0-9\\-]+$', params['volume_uuid'])): # noqa: E501 raise ValueError("Invalid value for parameter `volume_uuid` when calling `update_volume`, must conform to the pattern `/^[A-Fa-f0-9\\-]+$/`") # noqa: E501 collection_formats = {} path_params = {} if 'volume_uuid' in params: path_params['volume_uuid'] = params['volume_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body_volume_update' in params: body_params = params['body_volume_update'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/storage/volumes/{volume_uuid}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CommonResponseFields', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) class TopologyApi(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def add_host(self, body_host_create, **kwargs): # noqa: E501 """Add a host # noqa: E501 Adds a host/server. The host/server may contain up to two FACs # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.add_host(body_host_create, async_req=True) >>> result = thread.get() :param async_req bool :param HostInfo body_host_create: (required) :return: ResponseDataWithCreateUuidString If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.add_host_with_http_info(body_host_create, **kwargs) # noqa: E501 else: (data) = self.add_host_with_http_info(body_host_create, **kwargs) # noqa: E501 return data def add_host_with_http_info(self, body_host_create, **kwargs): # noqa: E501 """Add a host # noqa: E501 Adds a host/server. The host/server may contain up to two FACs # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.add_host_with_http_info(body_host_create, async_req=True) # noqa: E501 >>> result = thread.get() :param async_req bool :param HostInfo body_host_create: (required) :return: ResponseDataWithCreateUuidString If the method is called asynchronously, returns the request thread. """ all_params = ['body_host_create'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method add_host" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body_host_create' is set if self.api_client.client_side_validation and ('body_host_create' not in params or # noqa: E501 params['body_host_create'] is None): # noqa: E501 raise ValueError("Missing the required parameter `body_host_create` when calling `add_host`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body_host_create' in params: body_params = params['body_host_create'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/topology/hosts', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseDataWithCreateUuidString', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def delete_host(self, host_uuid, **kwargs): # noqa: E501 """Delete a host # noqa: E501 Deletes the specified host. The delete operation will fail if there are any volumes attached to this host. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_host(host_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str host_uuid: Host UUID (required) :return: CommonResponseFields If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_host_with_http_info(host_uuid, **kwargs) # noqa: E501 else: (data) = self.delete_host_with_http_info(host_uuid, **kwargs) # noqa: E501 return data def delete_host_with_http_info(self, host_uuid, **kwargs): # noqa: E501 """Delete a host # noqa: E501 Deletes the specified host. The delete operation will fail if there are any volumes attached to this host. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_host_with_http_info(host_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str host_uuid: Host UUID (required) :return: CommonResponseFields If the method is called asynchronously, returns the request thread. """ all_params = ['host_uuid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_host" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'host_uuid' is set if self.api_client.client_side_validation and ('host_uuid' not in params or # noqa: E501 params['host_uuid'] is None): # noqa: E501 raise ValueError("Missing the required parameter `host_uuid` when calling `delete_host`") # noqa: E501 collection_formats = {} path_params = {} if 'host_uuid' in params: path_params['host_uuid'] = params['host_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/topology/hosts/{host_uuid}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='CommonResponseFields', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def fetch_hosts_with_ids(self, body_fetch_hosts_with_ids, **kwargs): # noqa: E501 """Get properties for the specified hosts # noqa: E501 Retrieves the properties of up to 128 specified hosts # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.fetch_hosts_with_ids(body_fetch_hosts_with_ids, async_req=True) >>> result = thread.get() :param async_req bool :param BodyFetchHostsWithIds body_fetch_hosts_with_ids: (required) :return: ResponseDataWithListOfHosts If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.fetch_hosts_with_ids_with_http_info(body_fetch_hosts_with_ids, **kwargs) # noqa: E501 else: (data) = self.fetch_hosts_with_ids_with_http_info(body_fetch_hosts_with_ids, **kwargs) # noqa: E501 return data def fetch_hosts_with_ids_with_http_info(self, body_fetch_hosts_with_ids, **kwargs): # noqa: E501 """Get properties for the specified hosts # noqa: E501 Retrieves the properties of up to 128 specified hosts # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.fetch_hosts_with_ids_with_http_info(body_fetch_hosts_with_ids, async_req=True) >>> result = thread.get() :param async_req bool :param BodyFetchHostsWithIds body_fetch_hosts_with_ids: (required) :return: ResponseDataWithListOfHosts If the method is called asynchronously, returns the request thread. """ all_params = ['body_fetch_hosts_with_ids'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method fetch_hosts_with_ids" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'body_fetch_hosts_with_ids' is set if self.api_client.client_side_validation and ('body_fetch_hosts_with_ids' not in params or # noqa: E501 params['body_fetch_hosts_with_ids'] is None): # noqa: E501 raise ValueError("Missing the required parameter `body_fetch_hosts_with_ids` when calling `fetch_hosts_with_ids`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body_fetch_hosts_with_ids' in params: body_params = params['body_fetch_hosts_with_ids'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/topology/host_ids/subset', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseDataWithListOfHosts', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_hierarchical_topology(self, **kwargs): # noqa: E501 """Get system topology # noqa: E501 Retrieve the hierarchal information of DPUs and their drives in the Fungible Storage Cluster # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_hierarchical_topology(async_req=True) >>> result = thread.get() :param async_req bool :param str node_class: The type of ndoes to return in the resulting list :param BodyGetHierarchicalTopology body_get_hierarchical_topology: :return: ResponseDpuDriveHierarchy If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_hierarchical_topology_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_hierarchical_topology_with_http_info(**kwargs) # noqa: E501 return data def get_hierarchical_topology_with_http_info(self, **kwargs): # noqa: E501 """Get system topology # noqa: E501 Retrieve the hierarchal information of DPUs and their drives in the Fungible Storage Cluster # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_hierarchical_topology_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str node_class: The type of ndoes to return in the resulting list :param BodyGetHierarchicalTopology body_get_hierarchical_topology: :return: ResponseDpuDriveHierarchy If the method is called asynchronously, returns the request thread. """ all_params = ['node_class', 'body_get_hierarchical_topology'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_hierarchical_topology" % key ) params[key] = val del params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'node_class' in params: query_params.append(('node_class', params['node_class'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body_get_hierarchical_topology' in params: body_params = params['body_get_hierarchical_topology'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/topology', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseDpuDriveHierarchy', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_host_id_list(self, **kwargs): # noqa: E501 """Get list of host identifiers # noqa: E501 Retrieves a list of up to 36,864 identifiers for user-added hosts/servers. By default returns list of all host ids. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_host_id_list(async_req=True) >>> result = thread.get() :param async_req bool :param str host_name_contains: Filter \"name\" parameter of hosts to only those than contain specified string :param str host_nqn_contains: Server/host's nqn name :param str fac_type: FAC type :param int limit_ids: The numbers of items to return in the resulting id list :param datetime start_date: List volumes starting from created time :param BodyGetHostIdList body_get_host_id_list: :return: ResponseDataWithListOfHostUuids If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_host_id_list_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_host_id_list_with_http_info(**kwargs) # noqa: E501 return data def get_host_id_list_with_http_info(self, **kwargs): # noqa: E501 """Get list of host identifiers # noqa: E501 Retrieves a list of up to 36,864 identifiers for user-added hosts/servers. By default returns list of all host ids. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_host_id_list_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool :param str host_name_contains: Filter \"name\" parameter of hosts to only those than contain specified string :param str host_nqn_contains: Server/host's nqn name :param str fac_type: FAC type :param int limit_ids: The numbers of items to return in the resulting id list :param datetime start_date: List volumes starting from created time :param BodyGetHostIdList body_get_host_id_list: :return: ResponseDataWithListOfHostUuids If the method is called asynchronously, returns the request thread. """ all_params = ['host_name_contains', 'host_nqn_contains', 'fac_type', 'limit_ids', 'start_date', 'body_get_host_id_list'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_host_id_list" % key ) params[key] = val del params['kwargs'] if self.api_client.client_side_validation and ('host_name_contains' in params and not re.search(r'^[A-Za-z0-9_\-\\.\\:]+$', params['host_name_contains'])): # noqa: E501 raise ValueError("Invalid value for parameter `host_name_contains` when calling `get_host_id_list`, must conform to the pattern `/^[A-Za-z0-9_\\-\\.\\:]+$/`") # noqa: E501 if self.api_client.client_side_validation and ('host_nqn_contains' in params and not re.search(r'^[A-Za-z0-9_\-\\.\\:]+$', params['host_nqn_contains'])): # noqa: E501 raise ValueError("Invalid value for parameter `host_nqn_contains` when calling `get_host_id_list`, must conform to the pattern `/^[A-Za-z0-9_\\-\\.\\:]+$/`") # noqa: E501 if self.api_client.client_side_validation and ('limit_ids' in params and params['limit_ids'] > 36864): # noqa: E501 raise ValueError("Invalid value for parameter `limit_ids` when calling `get_host_id_list`, must be a value less than or equal to `36864`") # noqa: E501 if self.api_client.client_side_validation and ('limit_ids' in params and params['limit_ids'] < 1): # noqa: E501 raise ValueError("Invalid value for parameter `limit_ids` when calling `get_host_id_list`, must be a value greater than or equal to `1`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'host_name_contains' in params: query_params.append(('host_name_contains', params['host_name_contains'])) # noqa: E501 if 'host_nqn_contains' in params: query_params.append(('host_nqn_contains', params['host_nqn_contains'])) # noqa: E501 if 'fac_type' in params: query_params.append(('fac_type', params['fac_type'])) # noqa: E501 if 'limit_ids' in params: query_params.append(('limit_ids', params['limit_ids'])) # noqa: E501 if 'start_date' in params: query_params.append(('start_date', params['start_date'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body_get_host_id_list' in params: body_params = params['body_get_host_id_list'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/topology/host_id_list', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseDataWithListOfHostUuids', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_host_info(self, host_uuid, **kwargs): # noqa: E501 """Get details of a host # noqa: E501 Retrieves details of a host # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_host_info(host_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str host_uuid: Host UUID (required) :return: ResponseDataWithHostInfo If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_host_info_with_http_info(host_uuid, **kwargs) # noqa: E501 else: (data) = self.get_host_info_with_http_info(host_uuid, **kwargs) # noqa: E501 return data def get_host_info_with_http_info(self, host_uuid, **kwargs): # noqa: E501 """Get details of a host # noqa: E501 Retrieves details of a host # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_host_info_with_http_info(host_uuid, async_req=True) >>> result = thread.get() :param async_req bool :param str host_uuid: Host UUID (required) :return: ResponseDataWithHostInfo If the method is called asynchronously, returns the request thread. """ all_params = ['host_uuid'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_host_info" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'host_uuid' is set if self.api_client.client_side_validation and ('host_uuid' not in params or # noqa: E501 params['host_uuid'] is None): # noqa: E501 raise ValueError("Missing the required parameter `host_uuid` when calling `get_host_info`") # noqa: E501 collection_formats = {} path_params = {} if 'host_uuid' in params: path_params['host_uuid'] = params['host_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/topology/hosts/{host_uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ResponseDataWithHostInfo', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def patch_host(self, host_uuid, body_host_patch, **kwargs): # noqa: E501 """Change selected properties of a host # noqa: E501 Changes the specified properties of the specified host. Can be used to update (add/delete/modify) the FAC information # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_host(host_uuid, body_host_patch, async_req=True) >>> result = thread.get() :param async_req bool :param str host_uuid: Host UUID (required) :param HostInfo body_host_patch: (required) :return: SuccessResponseFields If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_host_with_http_info(host_uuid, body_host_patch, **kwargs) # noqa: E501 else: (data) = self.patch_host_with_http_info(host_uuid, body_host_patch, **kwargs) # noqa: E501 return data def patch_host_with_http_info(self, host_uuid, body_host_patch, **kwargs): # noqa: E501 """Change selected properties of a host # noqa: E501 Changes the specified properties of the specified host. Can be used to update (add/delete/modify) the FAC information # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_host_with_http_info(host_uuid, body_host_patch, async_req=True) >>> result = thread.get() :param async_req bool :param str host_uuid: Host UUID (required) :param HostInfo body_host_patch: (required) :return: SuccessResponseFields If the method is called asynchronously, returns the request thread. """ all_params = ['host_uuid', 'body_host_patch'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in params['kwargs'].items(): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method patch_host" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'host_uuid' is set if self.api_client.client_side_validation and ('host_uuid' not in params or # noqa: E501 params['host_uuid'] is None): # noqa: E501 raise ValueError("Missing the required parameter `host_uuid` when calling `patch_host`") # noqa: E501 # verify the required parameter 'body_host_patch' is set if self.api_client.client_side_validation and ('body_host_patch' not in params or # noqa: E501 params['body_host_patch'] is None): # noqa: E501 raise ValueError("Missing the required parameter `body_host_patch` when calling `patch_host`") # noqa: E501 collection_formats = {} path_params = {} if 'host_uuid' in params: path_params['host_uuid'] = params['host_uuid'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'body_host_patch' in params: body_params = params['body_host_patch'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['Basic', 'Bearer'] # noqa: E501 return self.api_client.call_api( '/topology/hosts/{host_uuid}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='SuccessResponseFields', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) class AdditionalFields(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'field_type': 'str', 'field': 'object' } attribute_map = { 'field_type': 'field_type', 'field': 'field' } def __init__(self, field_type=None, field=None): # noqa: E501 """AdditionalFields - a model defined in Swagger""" # noqa: E501 self._field_type = None self._field = None self.discriminator = None self.field_type = field_type if field is not None: self.field = field @property def field_type(self): """Gets the field_type of this AdditionalFields. # noqa: E501 :return: The field_type of this AdditionalFields. # noqa: E501 :rtype: str """ return self._field_type @field_type.setter def field_type(self, field_type): """Sets the field_type of this AdditionalFields. :param field_type: The field_type of this AdditionalFields. # noqa: E501 :type: str """ if field_type is None: raise ValueError("Invalid value for `field_type`, must not be `None`") # noqa: E501 self._field_type = field_type @property def field(self): """Gets the field of this AdditionalFields. # noqa: E501 :return: The field of this AdditionalFields. # noqa: E501 :rtype: object """ return self._field @field.setter def field(self, field): """Sets the field of this AdditionalFields. :param field: The field of this AdditionalFields. # noqa: E501 :type: object """ self._field = field def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(AdditionalFields, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, AdditionalFields): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other # coding: utf-8 """ Fungible Cluster Services Intent API Intent based REST API for interfacing between the management/orchestration system and Fungible Cluster Services # noqa: E501 OpenAPI spec version: 2.2.10 Contact: support@fungible.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ class BlockSize(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ allowed enum values """ _4096 = "4096" _8192 = "8192" _16384 = "16384" """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """BlockSize - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BlockSize, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BlockSize): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class BodyFetchHostsWithIds(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'host_id_list': 'list[str]' } attribute_map = { 'host_id_list': 'host_id_list' } def __init__(self, host_id_list=None): # noqa: E501 """BodyFetchHostsWithIds - a model defined in Swagger""" # noqa: E501 self._host_id_list = None self.discriminator = None self.host_id_list = host_id_list @property def host_id_list(self): """Gets the host_id_list of this BodyFetchHostsWithIds. # noqa: E501 :return: The host_id_list of this BodyFetchHostsWithIds. # noqa: E501 :rtype: list[str] """ return self._host_id_list @host_id_list.setter def host_id_list(self, host_id_list): """Sets the host_id_list of this BodyFetchHostsWithIds. :param host_id_list: The host_id_list of this BodyFetchHostsWithIds. # noqa: E501 :type: list[str] """ if host_id_list is None: raise ValueError("Invalid value for `host_id_list`, must not be `None`") # noqa: E501 self._host_id_list = host_id_list def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BodyFetchHostsWithIds, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BodyFetchHostsWithIds): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class BodyGetHierarchicalTopology(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'node_class': 'str' } attribute_map = { 'node_class': 'node_class' } def __init__(self, node_class=None): # noqa: E501 """BodyGetHierarchicalTopology - a model defined in Swagger""" # noqa: E501 self._node_class = None self.discriminator = None if node_class is not None: self.node_class = node_class @property def node_class(self): """Gets the node_class of this BodyGetHierarchicalTopology. # noqa: E501 Internally assigned from query parameter limit # noqa: E501 :return: The node_class of this BodyGetHierarchicalTopology. # noqa: E501 :rtype: str """ return self._node_class @node_class.setter def node_class(self, node_class): """Sets the node_class of this BodyGetHierarchicalTopology. Internally assigned from query parameter limit # noqa: E501 :param node_class: The node_class of this BodyGetHierarchicalTopology. # noqa: E501 :type: str """ self._node_class = node_class def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BodyGetHierarchicalTopology, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BodyGetHierarchicalTopology): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class BodyGetHostIdList(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'limit_ids_param': 'int', 'host_name_contains': 'str', 'start_date_param': 'datetime', 'host_nqn_contains': 'str', 'fac_type': 'str' } attribute_map = { 'limit_ids_param': 'limit_ids_param', 'host_name_contains': 'host_name_contains', 'start_date_param': 'start_date_param', 'host_nqn_contains': 'host_nqn_contains', 'fac_type': 'fac_type' } def __init__(self, limit_ids_param=None, host_name_contains=None, start_date_param=None, host_nqn_contains=None, fac_type=None): # noqa: E501 """BodyGetHostIdList - a model defined in Swagger""" # noqa: E501 self._limit_ids_param = None self._host_name_contains = None self._start_date_param = None self._host_nqn_contains = None self._fac_type = None self.discriminator = None if limit_ids_param is not None: self.limit_ids_param = limit_ids_param if host_name_contains is not None: self.host_name_contains = host_name_contains if start_date_param is not None: self.start_date_param = start_date_param if host_nqn_contains is not None: self.host_nqn_contains = host_nqn_contains if fac_type is not None: self.fac_type = fac_type @property def limit_ids_param(self): """Gets the limit_ids_param of this BodyGetHostIdList. # noqa: E501 Internally assigned from query parameter limit # noqa: E501 :return: The limit_ids_param of this BodyGetHostIdList. # noqa: E501 :rtype: int """ return self._limit_ids_param @limit_ids_param.setter def limit_ids_param(self, limit_ids_param): """Sets the limit_ids_param of this BodyGetHostIdList. Internally assigned from query parameter limit # noqa: E501 :param limit_ids_param: The limit_ids_param of this BodyGetHostIdList. # noqa: E501 :type: int """ self._limit_ids_param = limit_ids_param @property def host_name_contains(self): """Gets the host_name_contains of this BodyGetHostIdList. # noqa: E501 Filter \"name\" parameter of hosts to only those than contain specified string # noqa: E501 :return: The host_name_contains of this BodyGetHostIdList. # noqa: E501 :rtype: str """ return self._host_name_contains @host_name_contains.setter def host_name_contains(self, host_name_contains): """Sets the host_name_contains of this BodyGetHostIdList. Filter \"name\" parameter of hosts to only those than contain specified string # noqa: E501 :param host_name_contains: The host_name_contains of this BodyGetHostIdList. # noqa: E501 :type: str """ self._host_name_contains = host_name_contains @property def start_date_param(self): """Gets the start_date_param of this BodyGetHostIdList. # noqa: E501 Query parameter from created time # noqa: E501 :return: The start_date_param of this BodyGetHostIdList. # noqa: E501 :rtype: datetime """ return self._start_date_param @start_date_param.setter def start_date_param(self, start_date_param): """Sets the start_date_param of this BodyGetHostIdList. Query parameter from created time # noqa: E501 :param start_date_param: The start_date_param of this BodyGetHostIdList. # noqa: E501 :type: datetime """ self._start_date_param = start_date_param @property def host_nqn_contains(self): """Gets the host_nqn_contains of this BodyGetHostIdList. # noqa: E501 Host nqn name # noqa: E501 :return: The host_nqn_contains of this BodyGetHostIdList. # noqa: E501 :rtype: str """ return self._host_nqn_contains @host_nqn_contains.setter def host_nqn_contains(self, host_nqn_contains): """Sets the host_nqn_contains of this BodyGetHostIdList. Host nqn name # noqa: E501 :param host_nqn_contains: The host_nqn_contains of this BodyGetHostIdList. # noqa: E501 :type: str """ self._host_nqn_contains = host_nqn_contains @property def fac_type(self): """Gets the fac_type of this BodyGetHostIdList. # noqa: E501 FAC type # noqa: E501 :return: The fac_type of this BodyGetHostIdList. # noqa: E501 :rtype: str """ return self._fac_type @fac_type.setter def fac_type(self, fac_type): """Sets the fac_type of this BodyGetHostIdList. FAC type # noqa: E501 :param fac_type: The fac_type of this BodyGetHostIdList. # noqa: E501 :type: str """ self._fac_type = fac_type def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BodyGetHostIdList, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BodyGetHostIdList): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class BodyVolumeAttach(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'transport': 'Transport', 'host_uuid': 'str', 'host_nqn': 'str', 'remote_ip': 'str', 'fnid': 'object', 'huid': 'object', 'ctlid': 'object', 'pci_bus': 'int', 'pci_device': 'int', 'pci_function': 'int', 'initiator_uuid': 'str', 'persistent_attach': 'bool', 'max_connections': 'int', 'disable_crc_check': 'bool', 'max_read_iops_ratio': 'int', 'max_read_iops': 'int', 'host_lba_size': 'int', 'queue_depth': 'int' } attribute_map = { 'transport': 'transport', 'host_uuid': 'host_uuid', 'host_nqn': 'host_nqn', 'remote_ip': 'remote_ip', 'fnid': 'fnid', 'huid': 'huid', 'ctlid': 'ctlid', 'pci_bus': 'pci_bus', 'pci_device': 'pci_device', 'pci_function': 'pci_function', 'initiator_uuid': 'initiator_uuid', 'persistent_attach': 'persistent_attach', 'max_connections': 'max_connections', 'disable_crc_check': 'disable_crc_check', 'max_read_iops_ratio': 'max_read_iops_ratio', 'max_read_iops': 'max_read_iops', 'host_lba_size': 'host_lba_size', 'queue_depth': 'queue_depth' } def __init__(self, transport=None, host_uuid=None, host_nqn=None, remote_ip=None, fnid=None, huid=None, ctlid=None, pci_bus=None, pci_device=None, pci_function=None, initiator_uuid=None, persistent_attach=None, max_connections=None, disable_crc_check=None, max_read_iops_ratio=None, max_read_iops=0, host_lba_size=None, queue_depth=0): # noqa: E501 """BodyVolumeAttach - a model defined in Swagger""" # noqa: E501 self._transport = None self._host_uuid = None self._host_nqn = None self._remote_ip = None self._fnid = None self._huid = None self._ctlid = None self._pci_bus = None self._pci_device = None self._pci_function = None self._initiator_uuid = None self._persistent_attach = None self._max_connections = None self._disable_crc_check = None self._max_read_iops_ratio = None self._max_read_iops = None self._host_lba_size = None self._queue_depth = None self.discriminator = None if transport is not None: self.transport = transport if host_uuid is not None: self.host_uuid = host_uuid if host_nqn is not None: self.host_nqn = host_nqn if remote_ip is not None: self.remote_ip = remote_ip if fnid is not None: self.fnid = fnid if huid is not None: self.huid = huid if ctlid is not None: self.ctlid = ctlid if pci_bus is not None: self.pci_bus = pci_bus if pci_device is not None: self.pci_device = pci_device if pci_function is not None: self.pci_function = pci_function if initiator_uuid is not None: self.initiator_uuid = initiator_uuid if persistent_attach is not None: self.persistent_attach = persistent_attach if max_connections is not None: self.max_connections = max_connections if disable_crc_check is not None: self.disable_crc_check = disable_crc_check if max_read_iops_ratio is not None: self.max_read_iops_ratio = max_read_iops_ratio if max_read_iops is not None: self.max_read_iops = max_read_iops if host_lba_size is not None: self.host_lba_size = host_lba_size if queue_depth is not None: self.queue_depth = queue_depth @property def transport(self): """Gets the transport of this BodyVolumeAttach. # noqa: E501 :return: The transport of this BodyVolumeAttach. # noqa: E501 :rtype: Transport """ return self._transport @transport.setter def transport(self, transport): """Sets the transport of this BodyVolumeAttach. :param transport: The transport of this BodyVolumeAttach. # noqa: E501 :type: Transport """ self._transport = transport @property def host_uuid(self): """Gets the host_uuid of this BodyVolumeAttach. # noqa: E501 When specified, this field supercedes the host_nqn field # noqa: E501 :return: The host_uuid of this BodyVolumeAttach. # noqa: E501 :rtype: str """ return self._host_uuid @host_uuid.setter def host_uuid(self, host_uuid): """Sets the host_uuid of this BodyVolumeAttach. When specified, this field supercedes the host_nqn field # noqa: E501 :param host_uuid: The host_uuid of this BodyVolumeAttach. # noqa: E501 :type: str """ self._host_uuid = host_uuid @property def host_nqn(self): """Gets the host_nqn of this BodyVolumeAttach. # noqa: E501 This parameter is ignored if the host_uuid is specified # noqa: E501 :return: The host_nqn of this BodyVolumeAttach. # noqa: E501 :rtype: str """ return self._host_nqn @host_nqn.setter def host_nqn(self, host_nqn): """Sets the host_nqn of this BodyVolumeAttach. This parameter is ignored if the host_uuid is specified # noqa: E501 :param host_nqn: The host_nqn of this BodyVolumeAttach. # noqa: E501 :type: str """ if host_nqn is not None and len(host_nqn) > 223: raise ValueError("Invalid value for `host_nqn`, length must be less than or equal to `223`") # noqa: E501 if host_nqn is not None and len(host_nqn) < 5: raise ValueError("Invalid value for `host_nqn`, length must be greater than or equal to `5`") # noqa: E501 if host_nqn is not None and not re.search(r'^[A-Za-z0-9_\\-\\.\\:]+$', host_nqn): # noqa: E501 raise ValueError(r"Invalid value for `host_nqn`, must be a follow pattern or equal to `/^[A-Za-z0-9_\\-\\.\\:]+$/`") # noqa: E501 self._host_nqn = host_nqn @property def remote_ip(self): """Gets the remote_ip of this BodyVolumeAttach. # noqa: E501 :return: The remote_ip of this BodyVolumeAttach. # noqa: E501 :rtype: str """ return self._remote_ip @remote_ip.setter def remote_ip(self, remote_ip): """Sets the remote_ip of this BodyVolumeAttach. :param remote_ip: The remote_ip of this BodyVolumeAttach. # noqa: E501 :type: str """ self._remote_ip = remote_ip @property def fnid(self): """Gets the fnid of this BodyVolumeAttach. # noqa: E501 Valid for transport=PCI # noqa: E501 :return: The fnid of this BodyVolumeAttach. # noqa: E501 :rtype: object """ return self._fnid @fnid.setter def fnid(self, fnid): """Sets the fnid of this BodyVolumeAttach. Valid for transport=PCI # noqa: E501 :param fnid: The fnid of this BodyVolumeAttach. # noqa: E501 :type: object """ self._fnid = fnid @property def huid(self): """Gets the huid of this BodyVolumeAttach. # noqa: E501 Valid for transport=PCI # noqa: E501 :return: The huid of this BodyVolumeAttach. # noqa: E501 :rtype: object """ return self._huid @huid.setter def huid(self, huid): """Sets the huid of this BodyVolumeAttach. Valid for transport=PCI # noqa: E501 :param huid: The huid of this BodyVolumeAttach. # noqa: E501 :type: object """ self._huid = huid @property def ctlid(self): """Gets the ctlid of this BodyVolumeAttach. # noqa: E501 Valid for transport=PCI # noqa: E501 :return: The ctlid of this BodyVolumeAttach. # noqa: E501 :rtype: object """ return self._ctlid @ctlid.setter def ctlid(self, ctlid): """Sets the ctlid of this BodyVolumeAttach. Valid for transport=PCI # noqa: E501 :param ctlid: The ctlid of this BodyVolumeAttach. # noqa: E501 :type: object """ self._ctlid = ctlid @property def pci_bus(self): """Gets the pci_bus of this BodyVolumeAttach. # noqa: E501 Valid for transport=PCI_BDF # noqa: E501 :return: The pci_bus of this BodyVolumeAttach. # noqa: E501 :rtype: int """ return self._pci_bus @pci_bus.setter def pci_bus(self, pci_bus): """Sets the pci_bus of this BodyVolumeAttach. Valid for transport=PCI_BDF # noqa: E501 :param pci_bus: The pci_bus of this BodyVolumeAttach. # noqa: E501 :type: int """ self._pci_bus = pci_bus @property def pci_device(self): """Gets the pci_device of this BodyVolumeAttach. # noqa: E501 Valid for transport=PCI_BDF # noqa: E501 :return: The pci_device of this BodyVolumeAttach. # noqa: E501 :rtype: int """ return self._pci_device @pci_device.setter def pci_device(self, pci_device): """Sets the pci_device of this BodyVolumeAttach. Valid for transport=PCI_BDF # noqa: E501 :param pci_device: The pci_device of this BodyVolumeAttach. # noqa: E501 :type: int """ self._pci_device = pci_device @property def pci_function(self): """Gets the pci_function of this BodyVolumeAttach. # noqa: E501 Valid for transport=PCI_BDF # noqa: E501 :return: The pci_function of this BodyVolumeAttach. # noqa: E501 :rtype: int """ return self._pci_function @pci_function.setter def pci_function(self, pci_function): """Sets the pci_function of this BodyVolumeAttach. Valid for transport=PCI_BDF # noqa: E501 :param pci_function: The pci_function of this BodyVolumeAttach. # noqa: E501 :type: int """ self._pci_function = pci_function @property def initiator_uuid(self): """Gets the initiator_uuid of this BodyVolumeAttach. # noqa: E501 Storage initiator's unique identifier to attach volume to # noqa: E501 :return: The initiator_uuid of this BodyVolumeAttach. # noqa: E501 :rtype: str """ return self._initiator_uuid @initiator_uuid.setter def initiator_uuid(self, initiator_uuid): """Sets the initiator_uuid of this BodyVolumeAttach. Storage initiator's unique identifier to attach volume to # noqa: E501 :param initiator_uuid: The initiator_uuid of this BodyVolumeAttach. # noqa: E501 :type: str """ self._initiator_uuid = initiator_uuid @property def persistent_attach(self): """Gets the persistent_attach of this BodyVolumeAttach. # noqa: E501 Flag that indicates that volume needs to be reattached to the Storage Initiator after a reboot # noqa: E501 :return: The persistent_attach of this BodyVolumeAttach. # noqa: E501 :rtype: bool """ return self._persistent_attach @persistent_attach.setter def persistent_attach(self, persistent_attach): """Sets the persistent_attach of this BodyVolumeAttach. Flag that indicates that volume needs to be reattached to the Storage Initiator after a reboot # noqa: E501 :param persistent_attach: The persistent_attach of this BodyVolumeAttach. # noqa: E501 :type: bool """ self._persistent_attach = persistent_attach @property def max_connections(self): """Gets the max_connections of this BodyVolumeAttach. # noqa: E501 The number of connections allowed per controller. To be compatible with the Swagger Python client, the value '0' is accepted. The actual max_connections offered by the datapath to a host client is 4 if 0 <= max_connections < 4 Note: a. Users must ensure that the queue_depth * max_connections <= 1024 b. There's a finite pool of connections per DPU. Assigning an arbitrarily large number of connections to every volume may exhaust that pool, preventing new volumes from being attached. # noqa: E501 :return: The max_connections of this BodyVolumeAttach. # noqa: E501 :rtype: int """ return self._max_connections @max_connections.setter def max_connections(self, max_connections): """Sets the max_connections of this BodyVolumeAttach. The number of connections allowed per controller. To be compatible with the Swagger Python client, the value '0' is accepted. The actual max_connections offered by the datapath to a host client is 4 if 0 <= max_connections < 4 Note: a. Users must ensure that the queue_depth * max_connections <= 1024 b. There's a finite pool of connections per DPU. Assigning an arbitrarily large number of connections to every volume may exhaust that pool, preventing new volumes from being attached. # noqa: E501 :param max_connections: The max_connections of this BodyVolumeAttach. # noqa: E501 :type: int """ if max_connections is not None and max_connections > 144: # noqa: E501 raise ValueError("Invalid value for `max_connections`, must be a value less than or equal to `144`") # noqa: E501 if max_connections is not None and max_connections < 0: # noqa: E501 raise ValueError("Invalid value for `max_connections`, must be a value greater than or equal to `0`") # noqa: E501 self._max_connections = max_connections @property def disable_crc_check(self): """Gets the disable_crc_check of this BodyVolumeAttach. # noqa: E501 Disable crc check on a volume # noqa: E501 :return: The disable_crc_check of this BodyVolumeAttach. # noqa: E501 :rtype: bool """ return self._disable_crc_check @disable_crc_check.setter def disable_crc_check(self, disable_crc_check): """Sets the disable_crc_check of this BodyVolumeAttach. Disable crc check on a volume # noqa: E501 :param disable_crc_check: The disable_crc_check of this BodyVolumeAttach. # noqa: E501 :type: bool """ self._disable_crc_check = disable_crc_check @property def max_read_iops_ratio(self): """Gets the max_read_iops_ratio of this BodyVolumeAttach. # noqa: E501 This setting can be specified in addition to or in place of an absolute max_read_iops number. For qos critical volumes this is a percentage ratio of the intended max_read_iops compared to the default min_read_iops of the volume. For best effort volumes, it is a percentage ratio of the intended max_read_iops compared to the default max_read_iops of the volume. When specified in addition to the absolute max_read_iops, the greater computed value will be set. # noqa: E501 :return: The max_read_iops_ratio of this BodyVolumeAttach. # noqa: E501 :rtype: int """ return self._max_read_iops_ratio @max_read_iops_ratio.setter def max_read_iops_ratio(self, max_read_iops_ratio): """Sets the max_read_iops_ratio of this BodyVolumeAttach. This setting can be specified in addition to or in place of an absolute max_read_iops number. For qos critical volumes this is a percentage ratio of the intended max_read_iops compared to the default min_read_iops of the volume. For best effort volumes, it is a percentage ratio of the intended max_read_iops compared to the default max_read_iops of the volume. When specified in addition to the absolute max_read_iops, the greater computed value will be set. # noqa: E501 :param max_read_iops_ratio: The max_read_iops_ratio of this BodyVolumeAttach. # noqa: E501 :type: int """ if max_read_iops_ratio is not None and max_read_iops_ratio > 400: # noqa: E501 raise ValueError("Invalid value for `max_read_iops_ratio`, must be a value less than or equal to `400`") # noqa: E501 if max_read_iops_ratio is not None and max_read_iops_ratio < 100: # noqa: E501 raise ValueError("Invalid value for `max_read_iops_ratio`, must be a value greater than or equal to `100`") # noqa: E501 self._max_read_iops_ratio = max_read_iops_ratio @property def max_read_iops(self): """Gets the max_read_iops of this BodyVolumeAttach. # noqa: E501 If specified, the max_read_iops setting overrides the QoS settings used during volume creation. In release <= 4.1, this setting also applies to the min_read_iops since max=min. In later releases however this setting refers exclusively to max_read_iops. When specified in addition to the max_read_iops_ratio, the greater computed value will be set. The datapath will deliver a minimum of 20 IOPS/GiB when this parameter has a value < 20. To be compatible with the Swagger Python client, the value '0' is accepted but the actual value applied is the default IOPS/GiB specified during volume creation. # noqa: E501 :return: The max_read_iops of this BodyVolumeAttach. # noqa: E501 :rtype: int """ return self._max_read_iops @max_read_iops.setter def max_read_iops(self, max_read_iops): """Sets the max_read_iops of this BodyVolumeAttach. If specified, the max_read_iops setting overrides the QoS settings used during volume creation. In release <= 4.1, this setting also applies to the min_read_iops since max=min. In later releases however this setting refers exclusively to max_read_iops. When specified in addition to the max_read_iops_ratio, the greater computed value will be set. The datapath will deliver a minimum of 20 IOPS/GiB when this parameter has a value < 20. To be compatible with the Swagger Python client, the value '0' is accepted but the actual value applied is the default IOPS/GiB specified during volume creation. # noqa: E501 :param max_read_iops: The max_read_iops of this BodyVolumeAttach. # noqa: E501 :type: int """ if max_read_iops is not None and max_read_iops > 250000: # noqa: E501 raise ValueError("Invalid value for `max_read_iops`, must be a value less than or equal to `250000`") # noqa: E501 if max_read_iops is not None and max_read_iops < 0: # noqa: E501 raise ValueError("Invalid value for `max_read_iops`, must be a value greater than or equal to `0`") # noqa: E501 self._max_read_iops = max_read_iops @property def host_lba_size(self): """Gets the host_lba_size of this BodyVolumeAttach. # noqa: E501 Block size defaults to 4KiB when not specified. The 512Byte size is used by ESXi host. # noqa: E501 :return: The host_lba_size of this BodyVolumeAttach. # noqa: E501 :rtype: int """ return self._host_lba_size @host_lba_size.setter def host_lba_size(self, host_lba_size): """Sets the host_lba_size of this BodyVolumeAttach. Block size defaults to 4KiB when not specified. The 512Byte size is used by ESXi host. # noqa: E501 :param host_lba_size: The host_lba_size of this BodyVolumeAttach. # noqa: E501 :type: int """ allowed_values = [512, 4096] # noqa: E501 if host_lba_size not in allowed_values: raise ValueError( "Invalid value for `host_lba_size` ({0}), must be one of {1}" # noqa: E501 .format(host_lba_size, allowed_values) ) self._host_lba_size = host_lba_size @property def queue_depth(self): """Gets the queue_depth of this BodyVolumeAttach. # noqa: E501 Indicates the max number of outstanding I/O requests per connection. To be compatible with the Swagger Python client, the value '0' is accepted. The actual queue_depth offered by the datapath to a host client is 4 if 0 <= queue_depth < 4 Note: Users must ensure that the queue_depth * max_connections <= 1024 # noqa: E501 :return: The queue_depth of this BodyVolumeAttach. # noqa: E501 :rtype: int """ return self._queue_depth @queue_depth.setter def queue_depth(self, queue_depth): """Sets the queue_depth of this BodyVolumeAttach. Indicates the max number of outstanding I/O requests per connection. To be compatible with the Swagger Python client, the value '0' is accepted. The actual queue_depth offered by the datapath to a host client is 4 if 0 <= queue_depth < 4 Note: Users must ensure that the queue_depth * max_connections <= 1024 # noqa: E501 :param queue_depth: The queue_depth of this BodyVolumeAttach. # noqa: E501 :type: int """ if queue_depth is not None and queue_depth > 128: # noqa: E501 raise ValueError("Invalid value for `queue_depth`, must be a value less than or equal to `128`") # noqa: E501 if queue_depth is not None and queue_depth < 0: # noqa: E501 raise ValueError("Invalid value for `queue_depth`, must be a value greater than or equal to `0`") # noqa: E501 self._queue_depth = queue_depth def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BodyVolumeAttach, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BodyVolumeAttach): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class BodyVolumeIntentCreate(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'name': 'str', 'vol_type': 'VolumeTypes', 'capacity': 'int', 'compression_effort': 'int', 'qos_band': 'int', 'data_protection': 'DataProtection', 'encrypt': 'bool', 'kmip_secret_key': 'str', 'crc_enable': 'bool', 'is_clone': 'bool', 'clone_source_volume_uuid': 'str', 'snap_support': 'bool', 'space_allocation_policy': 'SpaceAllocationPolicy', 'block_size': 'BlockSize', 'initialize_to_zeros': 'bool', 'additional_fields': 'AdditionalFields', 'fd_op': 'str', 'fault_domain_ids': 'list[str]' } attribute_map = { 'name': 'name', 'vol_type': 'vol_type', 'capacity': 'capacity', 'compression_effort': 'compression_effort', 'qos_band': 'qos_band', 'data_protection': 'data_protection', 'encrypt': 'encrypt', 'kmip_secret_key': 'kmip_secret_key', 'crc_enable': 'crc_enable', 'is_clone': 'is_clone', 'clone_source_volume_uuid': 'clone_source_volume_uuid', 'snap_support': 'snap_support', 'space_allocation_policy': 'space_allocation_policy', 'block_size': 'block_size', 'initialize_to_zeros': 'initialize_to_zeros', 'additional_fields': 'additional_fields', 'fd_op': 'fd_op', 'fault_domain_ids': 'fault_domain_ids' } def __init__(self, name=None, vol_type=None, capacity=None, compression_effort=None, qos_band=None, data_protection=None, encrypt=None, kmip_secret_key=None, crc_enable=None, is_clone=None, clone_source_volume_uuid=None, snap_support=None, space_allocation_policy=None, block_size=None, initialize_to_zeros=None, additional_fields=None, fd_op=None, fault_domain_ids=None): # noqa: E501 """BodyVolumeIntentCreate - a model defined in Swagger""" # noqa: E501 self._name = None self._vol_type = None self._capacity = None self._compression_effort = None self._qos_band = None self._data_protection = None self._encrypt = None self._kmip_secret_key = None self._crc_enable = None self._is_clone = None self._clone_source_volume_uuid = None self._snap_support = None self._space_allocation_policy = None self._block_size = None self._initialize_to_zeros = None self._additional_fields = None self._fd_op = None self._fault_domain_ids = None self.discriminator = None self.name = name self.vol_type = vol_type self.capacity = capacity if compression_effort is not None: self.compression_effort = compression_effort if qos_band is not None: self.qos_band = qos_band if data_protection is not None: self.data_protection = data_protection if encrypt is not None: self.encrypt = encrypt if kmip_secret_key is not None: self.kmip_secret_key = kmip_secret_key if crc_enable is not None: self.crc_enable = crc_enable if is_clone is not None: self.is_clone = is_clone if clone_source_volume_uuid is not None: self.clone_source_volume_uuid = clone_source_volume_uuid if snap_support is not None: self.snap_support = snap_support if space_allocation_policy is not None: self.space_allocation_policy = space_allocation_policy if block_size is not None: self.block_size = block_size if initialize_to_zeros is not None: self.initialize_to_zeros = initialize_to_zeros if additional_fields is not None: self.additional_fields = additional_fields if fd_op is not None: self.fd_op = fd_op if fault_domain_ids is not None: self.fault_domain_ids = fault_domain_ids @property def name(self): """Gets the name of this BodyVolumeIntentCreate. # noqa: E501 :return: The name of this BodyVolumeIntentCreate. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this BodyVolumeIntentCreate. :param name: The name of this BodyVolumeIntentCreate. # noqa: E501 :type: str """ if name is None: raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 if name is not None and len(name) > 255: raise ValueError("Invalid value for `name`, length must be less than or equal to `255`") # noqa: E501 if name is not None and len(name) < 1: raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501 if name is not None and not re.search(r'^[A-Za-z0-9_\\-\\.\\:]+$', name): # noqa: E501 raise ValueError(r"Invalid value for `name`, must be a follow pattern or equal to `/^[A-Za-z0-9_\\-\\.\\:]+$/`") # noqa: E501 self._name = name @property def vol_type(self): """Gets the vol_type of this BodyVolumeIntentCreate. # noqa: E501 :return: The vol_type of this BodyVolumeIntentCreate. # noqa: E501 :rtype: VolumeTypes """ return self._vol_type @vol_type.setter def vol_type(self, vol_type): """Sets the vol_type of this BodyVolumeIntentCreate. :param vol_type: The vol_type of this BodyVolumeIntentCreate. # noqa: E501 :type: VolumeTypes """ if vol_type is None: raise ValueError("Invalid value for `vol_type`, must not be `None`") # noqa: E501 self._vol_type = vol_type @property def capacity(self): """Gets the capacity of this BodyVolumeIntentCreate. # noqa: E501 :return: The capacity of this BodyVolumeIntentCreate. # noqa: E501 :rtype: int """ return self._capacity @capacity.setter def capacity(self, capacity): """Sets the capacity of this BodyVolumeIntentCreate. :param capacity: The capacity of this BodyVolumeIntentCreate. # noqa: E501 :type: int """ if capacity is None: raise ValueError("Invalid value for `capacity`, must not be `None`") # noqa: E501 self._capacity = capacity @property def compression_effort(self): """Gets the compression_effort of this BodyVolumeIntentCreate. # noqa: E501 :return: The compression_effort of this BodyVolumeIntentCreate. # noqa: E501 :rtype: int """ return self._compression_effort @compression_effort.setter def compression_effort(self, compression_effort): """Sets the compression_effort of this BodyVolumeIntentCreate. :param compression_effort: The compression_effort of this BodyVolumeIntentCreate. # noqa: E501 :type: int """ if compression_effort is not None and compression_effort > 8: # noqa: E501 raise ValueError("Invalid value for `compression_effort`, must be a value less than or equal to `8`") # noqa: E501 if compression_effort is not None and compression_effort < 0: # noqa: E501 raise ValueError("Invalid value for `compression_effort`, must be a value greater than or equal to `0`") # noqa: E501 self._compression_effort = compression_effort @property def qos_band(self): """Gets the qos_band of this BodyVolumeIntentCreate. # noqa: E501 index of the QoS band # noqa: E501 :return: The qos_band of this BodyVolumeIntentCreate. # noqa: E501 :rtype: int """ return self._qos_band @qos_band.setter def qos_band(self, qos_band): """Sets the qos_band of this BodyVolumeIntentCreate. index of the QoS band # noqa: E501 :param qos_band: The qos_band of this BodyVolumeIntentCreate. # noqa: E501 :type: int """ self._qos_band = qos_band @property def data_protection(self): """Gets the data_protection of this BodyVolumeIntentCreate. # noqa: E501 :return: The data_protection of this BodyVolumeIntentCreate. # noqa: E501 :rtype: DataProtection """ return self._data_protection @data_protection.setter def data_protection(self, data_protection): """Sets the data_protection of this BodyVolumeIntentCreate. :param data_protection: The data_protection of this BodyVolumeIntentCreate. # noqa: E501 :type: DataProtection """ self._data_protection = data_protection @property def encrypt(self): """Gets the encrypt of this BodyVolumeIntentCreate. # noqa: E501 :return: The encrypt of this BodyVolumeIntentCreate. # noqa: E501 :rtype: bool """ return self._encrypt @encrypt.setter def encrypt(self, encrypt): """Sets the encrypt of this BodyVolumeIntentCreate. :param encrypt: The encrypt of this BodyVolumeIntentCreate. # noqa: E501 :type: bool """ self._encrypt = encrypt @property def kmip_secret_key(self): """Gets the kmip_secret_key of this BodyVolumeIntentCreate. # noqa: E501 Key to the KMIP secret used for volume encryption # noqa: E501 :return: The kmip_secret_key of this BodyVolumeIntentCreate. # noqa: E501 :rtype: str """ return self._kmip_secret_key @kmip_secret_key.setter def kmip_secret_key(self, kmip_secret_key): """Sets the kmip_secret_key of this BodyVolumeIntentCreate. Key to the KMIP secret used for volume encryption # noqa: E501 :param kmip_secret_key: The kmip_secret_key of this BodyVolumeIntentCreate. # noqa: E501 :type: str """ self._kmip_secret_key = kmip_secret_key @property def crc_enable(self): """Gets the crc_enable of this BodyVolumeIntentCreate. # noqa: E501 :return: The crc_enable of this BodyVolumeIntentCreate. # noqa: E501 :rtype: bool """ return self._crc_enable @crc_enable.setter def crc_enable(self, crc_enable): """Sets the crc_enable of this BodyVolumeIntentCreate. :param crc_enable: The crc_enable of this BodyVolumeIntentCreate. # noqa: E501 :type: bool """ self._crc_enable = crc_enable @property def is_clone(self): """Gets the is_clone of this BodyVolumeIntentCreate. # noqa: E501 :return: The is_clone of this BodyVolumeIntentCreate. # noqa: E501 :rtype: bool """ return self._is_clone @is_clone.setter def is_clone(self, is_clone): """Sets the is_clone of this BodyVolumeIntentCreate. :param is_clone: The is_clone of this BodyVolumeIntentCreate. # noqa: E501 :type: bool """ self._is_clone = is_clone @property def clone_source_volume_uuid(self): """Gets the clone_source_volume_uuid of this BodyVolumeIntentCreate. # noqa: E501 :return: The clone_source_volume_uuid of this BodyVolumeIntentCreate. # noqa: E501 :rtype: str """ return self._clone_source_volume_uuid @clone_source_volume_uuid.setter def clone_source_volume_uuid(self, clone_source_volume_uuid): """Sets the clone_source_volume_uuid of this BodyVolumeIntentCreate. :param clone_source_volume_uuid: The clone_source_volume_uuid of this BodyVolumeIntentCreate. # noqa: E501 :type: str """ self._clone_source_volume_uuid = clone_source_volume_uuid @property def snap_support(self): """Gets the snap_support of this BodyVolumeIntentCreate. # noqa: E501 :return: The snap_support of this BodyVolumeIntentCreate. # noqa: E501 :rtype: bool """ return self._snap_support @snap_support.setter def snap_support(self, snap_support): """Sets the snap_support of this BodyVolumeIntentCreate. :param snap_support: The snap_support of this BodyVolumeIntentCreate. # noqa: E501 :type: bool """ self._snap_support = snap_support @property def space_allocation_policy(self): """Gets the space_allocation_policy of this BodyVolumeIntentCreate. # noqa: E501 :return: The space_allocation_policy of this BodyVolumeIntentCreate. # noqa: E501 :rtype: SpaceAllocationPolicy """ return self._space_allocation_policy @space_allocation_policy.setter def space_allocation_policy(self, space_allocation_policy): """Sets the space_allocation_policy of this BodyVolumeIntentCreate. :param space_allocation_policy: The space_allocation_policy of this BodyVolumeIntentCreate. # noqa: E501 :type: SpaceAllocationPolicy """ self._space_allocation_policy = space_allocation_policy @property def block_size(self): """Gets the block_size of this BodyVolumeIntentCreate. # noqa: E501 :return: The block_size of this BodyVolumeIntentCreate. # noqa: E501 :rtype: BlockSize """ return self._block_size @block_size.setter def block_size(self, block_size): """Sets the block_size of this BodyVolumeIntentCreate. :param block_size: The block_size of this BodyVolumeIntentCreate. # noqa: E501 :type: BlockSize """ self._block_size = block_size @property def initialize_to_zeros(self): """Gets the initialize_to_zeros of this BodyVolumeIntentCreate. # noqa: E501 After creation, volume contents should appear to be initialized to zero. # noqa: E501 :return: The initialize_to_zeros of this BodyVolumeIntentCreate. # noqa: E501 :rtype: bool """ return self._initialize_to_zeros @initialize_to_zeros.setter def initialize_to_zeros(self, initialize_to_zeros): """Sets the initialize_to_zeros of this BodyVolumeIntentCreate. After creation, volume contents should appear to be initialized to zero. # noqa: E501 :param initialize_to_zeros: The initialize_to_zeros of this BodyVolumeIntentCreate. # noqa: E501 :type: bool """ self._initialize_to_zeros = initialize_to_zeros @property def additional_fields(self): """Gets the additional_fields of this BodyVolumeIntentCreate. # noqa: E501 :return: The additional_fields of this BodyVolumeIntentCreate. # noqa: E501 :rtype: AdditionalFields """ return self._additional_fields @additional_fields.setter def additional_fields(self, additional_fields): """Sets the additional_fields of this BodyVolumeIntentCreate. :param additional_fields: The additional_fields of this BodyVolumeIntentCreate. # noqa: E501 :type: AdditionalFields """ self._additional_fields = additional_fields @property def fd_op(self): """Gets the fd_op of this BodyVolumeIntentCreate. # noqa: E501 :return: The fd_op of this BodyVolumeIntentCreate. # noqa: E501 :rtype: str """ return self._fd_op @fd_op.setter def fd_op(self, fd_op): """Sets the fd_op of this BodyVolumeIntentCreate. :param fd_op: The fd_op of this BodyVolumeIntentCreate. # noqa: E501 :type: str """ allowed_values = ["SUGGESTED_FD_IDS", "EXCLUDE_FD_IDS", "ASSIGNED_FD_ID"] # noqa: E501 if fd_op not in allowed_values: raise ValueError( "Invalid value for `fd_op` ({0}), must be one of {1}" # noqa: E501 .format(fd_op, allowed_values) ) self._fd_op = fd_op @property def fault_domain_ids(self): """Gets the fault_domain_ids of this BodyVolumeIntentCreate. # noqa: E501 The new volume should be created in a fault zone different from those of the raw volume UUIDs listed in this array # noqa: E501 :return: The fault_domain_ids of this BodyVolumeIntentCreate. # noqa: E501 :rtype: list[str] """ return self._fault_domain_ids @fault_domain_ids.setter def fault_domain_ids(self, fault_domain_ids): """Sets the fault_domain_ids of this BodyVolumeIntentCreate. The new volume should be created in a fault zone different from those of the raw volume UUIDs listed in this array # noqa: E501 :param fault_domain_ids: The fault_domain_ids of this BodyVolumeIntentCreate. # noqa: E501 :type: list[str] """ self._fault_domain_ids = fault_domain_ids def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BodyVolumeIntentCreate, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BodyVolumeIntentCreate): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class BodyVolumeSnapshotCreate(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'name': 'str' } attribute_map = { 'name': 'name' } def __init__(self, name=None): # noqa: E501 """BodyVolumeSnapshotCreate - a model defined in Swagger""" # noqa: E501 self._name = None self.discriminator = None self.name = name @property def name(self): """Gets the name of this BodyVolumeSnapshotCreate. # noqa: E501 :return: The name of this BodyVolumeSnapshotCreate. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this BodyVolumeSnapshotCreate. :param name: The name of this BodyVolumeSnapshotCreate. # noqa: E501 :type: str """ if name is None: raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 self._name = name def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BodyVolumeSnapshotCreate, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BodyVolumeSnapshotCreate): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class BodyVolumeUpdate(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'op': 'VolumeUpdateOp', 'issue_rebuild': 'bool', 'failed_vol': 'str', 'failed_uuid': 'str', 'dpu_id': 'str', 'state': 'ResourceState', 'capacity': 'int', 'qos_band': 'int', 'vol_type': 'VolumeTypes', 'data_protection': 'DataProtection', 'new_vol_name': 'str' } attribute_map = { 'op': 'op', 'issue_rebuild': 'issue_rebuild', 'failed_vol': 'failed_vol', 'failed_uuid': 'failed_uuid', 'dpu_id': 'dpu_id', 'state': 'state', 'capacity': 'capacity', 'qos_band': 'qos_band', 'vol_type': 'vol_type', 'data_protection': 'data_protection', 'new_vol_name': 'new_vol_name' } def __init__(self, op=None, issue_rebuild=None, failed_vol=None, failed_uuid=None, dpu_id=None, state=None, capacity=None, qos_band=None, vol_type=None, data_protection=None, new_vol_name=None): # noqa: E501 """BodyVolumeUpdate - a model defined in Swagger""" # noqa: E501 self._op = None self._issue_rebuild = None self._failed_vol = None self._failed_uuid = None self._dpu_id = None self._state = None self._capacity = None self._qos_band = None self._vol_type = None self._data_protection = None self._new_vol_name = None self.discriminator = None self.op = op if issue_rebuild is not None: self.issue_rebuild = issue_rebuild if failed_vol is not None: self.failed_vol = failed_vol if failed_uuid is not None: self.failed_uuid = failed_uuid if dpu_id is not None: self.dpu_id = dpu_id if state is not None: self.state = state if capacity is not None: self.capacity = capacity if qos_band is not None: self.qos_band = qos_band if vol_type is not None: self.vol_type = vol_type if data_protection is not None: self.data_protection = data_protection if new_vol_name is not None: self.new_vol_name = new_vol_name @property def op(self): """Gets the op of this BodyVolumeUpdate. # noqa: E501 :return: The op of this BodyVolumeUpdate. # noqa: E501 :rtype: VolumeUpdateOp """ return self._op @op.setter def op(self, op): """Sets the op of this BodyVolumeUpdate. :param op: The op of this BodyVolumeUpdate. # noqa: E501 :type: VolumeUpdateOp """ if op is None: raise ValueError("Invalid value for `op`, must not be `None`") # noqa: E501 self._op = op @property def issue_rebuild(self): """Gets the issue_rebuild of this BodyVolumeUpdate. # noqa: E501 :return: The issue_rebuild of this BodyVolumeUpdate. # noqa: E501 :rtype: bool """ return self._issue_rebuild @issue_rebuild.setter def issue_rebuild(self, issue_rebuild): """Sets the issue_rebuild of this BodyVolumeUpdate. :param issue_rebuild: The issue_rebuild of this BodyVolumeUpdate. # noqa: E501 :type: bool """ self._issue_rebuild = issue_rebuild @property def failed_vol(self): """Gets the failed_vol of this BodyVolumeUpdate. # noqa: E501 :return: The failed_vol of this BodyVolumeUpdate. # noqa: E501 :rtype: str """ return self._failed_vol @failed_vol.setter def failed_vol(self, failed_vol): """Sets the failed_vol of this BodyVolumeUpdate. :param failed_vol: The failed_vol of this BodyVolumeUpdate. # noqa: E501 :type: str """ self._failed_vol = failed_vol @property def failed_uuid(self): """Gets the failed_uuid of this BodyVolumeUpdate. # noqa: E501 :return: The failed_uuid of this BodyVolumeUpdate. # noqa: E501 :rtype: str """ return self._failed_uuid @failed_uuid.setter def failed_uuid(self, failed_uuid): """Sets the failed_uuid of this BodyVolumeUpdate. :param failed_uuid: The failed_uuid of this BodyVolumeUpdate. # noqa: E501 :type: str """ self._failed_uuid = failed_uuid @property def dpu_id(self): """Gets the dpu_id of this BodyVolumeUpdate. # noqa: E501 id of dpu to which this volume is to be moved # noqa: E501 :return: The dpu_id of this BodyVolumeUpdate. # noqa: E501 :rtype: str """ return self._dpu_id @dpu_id.setter def dpu_id(self, dpu_id): """Sets the dpu_id of this BodyVolumeUpdate. id of dpu to which this volume is to be moved # noqa: E501 :param dpu_id: The dpu_id of this BodyVolumeUpdate. # noqa: E501 :type: str """ self._dpu_id = dpu_id @property def state(self): """Gets the state of this BodyVolumeUpdate. # noqa: E501 :return: The state of this BodyVolumeUpdate. # noqa: E501 :rtype: ResourceState """ return self._state @state.setter def state(self, state): """Sets the state of this BodyVolumeUpdate. :param state: The state of this BodyVolumeUpdate. # noqa: E501 :type: ResourceState """ self._state = state @property def capacity(self): """Gets the capacity of this BodyVolumeUpdate. # noqa: E501 :return: The capacity of this BodyVolumeUpdate. # noqa: E501 :rtype: int """ return self._capacity @capacity.setter def capacity(self, capacity): """Sets the capacity of this BodyVolumeUpdate. :param capacity: The capacity of this BodyVolumeUpdate. # noqa: E501 :type: int """ self._capacity = capacity @property def qos_band(self): """Gets the qos_band of this BodyVolumeUpdate. # noqa: E501 index of the new QoS band # noqa: E501 :return: The qos_band of this BodyVolumeUpdate. # noqa: E501 :rtype: int """ return self._qos_band @qos_band.setter def qos_band(self, qos_band): """Sets the qos_band of this BodyVolumeUpdate. index of the new QoS band # noqa: E501 :param qos_band: The qos_band of this BodyVolumeUpdate. # noqa: E501 :type: int """ self._qos_band = qos_band @property def vol_type(self): """Gets the vol_type of this BodyVolumeUpdate. # noqa: E501 :return: The vol_type of this BodyVolumeUpdate. # noqa: E501 :rtype: VolumeTypes """ return self._vol_type @vol_type.setter def vol_type(self, vol_type): """Sets the vol_type of this BodyVolumeUpdate. :param vol_type: The vol_type of this BodyVolumeUpdate. # noqa: E501 :type: VolumeTypes """ self._vol_type = vol_type @property def data_protection(self): """Gets the data_protection of this BodyVolumeUpdate. # noqa: E501 :return: The data_protection of this BodyVolumeUpdate. # noqa: E501 :rtype: DataProtection """ return self._data_protection @data_protection.setter def data_protection(self, data_protection): """Sets the data_protection of this BodyVolumeUpdate. :param data_protection: The data_protection of this BodyVolumeUpdate. # noqa: E501 :type: DataProtection """ self._data_protection = data_protection @property def new_vol_name(self): """Gets the new_vol_name of this BodyVolumeUpdate. # noqa: E501 :return: The new_vol_name of this BodyVolumeUpdate. # noqa: E501 :rtype: str """ return self._new_vol_name @new_vol_name.setter def new_vol_name(self, new_vol_name): """Sets the new_vol_name of this BodyVolumeUpdate. :param new_vol_name: The new_vol_name of this BodyVolumeUpdate. # noqa: E501 :type: str """ if new_vol_name is not None and len(new_vol_name) > 255: raise ValueError("Invalid value for `new_vol_name`, length must be less than or equal to `255`") # noqa: E501 if new_vol_name is not None and len(new_vol_name) < 1: raise ValueError("Invalid value for `new_vol_name`, length must be greater than or equal to `1`") # noqa: E501 if new_vol_name is not None and not re.search(r'^[A-Za-z0-9_\\-\\.\\:]+$', new_vol_name): # noqa: E501 raise ValueError(r"Invalid value for `new_vol_name`, must be a follow pattern or equal to `/^[A-Za-z0-9_\\-\\.\\:]+$/`") # noqa: E501 self._new_vol_name = new_vol_name def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BodyVolumeUpdate, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BodyVolumeUpdate): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class CommonResponseFields(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning' } def __init__(self, status=None, message=None, error_message=None, warning=None): # noqa: E501 """CommonResponseFields - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning @property def status(self): """Gets the status of this CommonResponseFields. # noqa: E501 :return: The status of this CommonResponseFields. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this CommonResponseFields. :param status: The status of this CommonResponseFields. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this CommonResponseFields. # noqa: E501 :return: The message of this CommonResponseFields. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this CommonResponseFields. :param message: The message of this CommonResponseFields. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this CommonResponseFields. # noqa: E501 :return: The error_message of this CommonResponseFields. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this CommonResponseFields. :param error_message: The error_message of this CommonResponseFields. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this CommonResponseFields. # noqa: E501 :return: The warning of this CommonResponseFields. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this CommonResponseFields. :param warning: The warning of this CommonResponseFields. # noqa: E501 :type: str """ self._warning = warning def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(CommonResponseFields, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, CommonResponseFields): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataProtection(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'num_redundant_dpus': 'int', 'num_data_disks': 'int', 'num_failed_disks': 'int' } attribute_map = { 'num_redundant_dpus': 'num_redundant_dpus', 'num_data_disks': 'num_data_disks', 'num_failed_disks': 'num_failed_disks' } def __init__(self, num_redundant_dpus=None, num_data_disks=None, num_failed_disks=None): # noqa: E501 """DataProtection - a model defined in Swagger""" # noqa: E501 self._num_redundant_dpus = None self._num_data_disks = None self._num_failed_disks = None self.discriminator = None if num_redundant_dpus is not None: self.num_redundant_dpus = num_redundant_dpus if num_data_disks is not None: self.num_data_disks = num_data_disks if num_failed_disks is not None: self.num_failed_disks = num_failed_disks @property def num_redundant_dpus(self): """Gets the num_redundant_dpus of this DataProtection. # noqa: E501 :return: The num_redundant_dpus of this DataProtection. # noqa: E501 :rtype: int """ return self._num_redundant_dpus @num_redundant_dpus.setter def num_redundant_dpus(self, num_redundant_dpus): """Sets the num_redundant_dpus of this DataProtection. :param num_redundant_dpus: The num_redundant_dpus of this DataProtection. # noqa: E501 :type: int """ if num_redundant_dpus is not None and num_redundant_dpus < 0: # noqa: E501 raise ValueError("Invalid value for `num_redundant_dpus`, must be a value greater than or equal to `0`") # noqa: E501 self._num_redundant_dpus = num_redundant_dpus @property def num_data_disks(self): """Gets the num_data_disks of this DataProtection. # noqa: E501 :return: The num_data_disks of this DataProtection. # noqa: E501 :rtype: int """ return self._num_data_disks @num_data_disks.setter def num_data_disks(self, num_data_disks): """Sets the num_data_disks of this DataProtection. :param num_data_disks: The num_data_disks of this DataProtection. # noqa: E501 :type: int """ if num_data_disks is not None and num_data_disks < 0: # noqa: E501 raise ValueError("Invalid value for `num_data_disks`, must be a value greater than or equal to `0`") # noqa: E501 self._num_data_disks = num_data_disks @property def num_failed_disks(self): """Gets the num_failed_disks of this DataProtection. # noqa: E501 :return: The num_failed_disks of this DataProtection. # noqa: E501 :rtype: int """ return self._num_failed_disks @num_failed_disks.setter def num_failed_disks(self, num_failed_disks): """Sets the num_failed_disks of this DataProtection. :param num_failed_disks: The num_failed_disks of this DataProtection. # noqa: E501 :type: int """ if num_failed_disks is not None and num_failed_disks < 0: # noqa: E501 raise ValueError("Invalid value for `num_failed_disks`, must be a value greater than or equal to `0`") # noqa: E501 self._num_failed_disks = num_failed_disks def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataProtection, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataProtection): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithHostInfo(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'HostInfo' } attribute_map = { 'data': 'data' } def __init__(self, data=None): # noqa: E501 """DataWithHostInfo - a model defined in Swagger""" # noqa: E501 self._data = None self.discriminator = None if data is not None: self.data = data @property def data(self): """Gets the data of this DataWithHostInfo. # noqa: E501 :return: The data of this DataWithHostInfo. # noqa: E501 :rtype: HostInfo """ return self._data @data.setter def data(self, data): """Sets the data of this DataWithHostInfo. :param data: The data of this DataWithHostInfo. # noqa: E501 :type: HostInfo """ self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithHostInfo, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithHostInfo): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithUuidData(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'uuid': 'str' } attribute_map = { 'uuid': 'uuid' } def __init__(self, uuid=None): # noqa: E501 """DataWithUuidData - a model defined in Swagger""" # noqa: E501 self._uuid = None self.discriminator = None self.uuid = uuid @property def uuid(self): """Gets the uuid of this DataWithUuidData. # noqa: E501 :return: The uuid of this DataWithUuidData. # noqa: E501 :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """Sets the uuid of this DataWithUuidData. :param uuid: The uuid of this DataWithUuidData. # noqa: E501 :type: str """ if uuid is None: raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501 self._uuid = uuid def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithUuidData, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithUuidData): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithListOfHostUuidsData(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'total_hosts_with_fac': 'int', 'total_hosts_without_fac': 'int', 'last_uuid_datetime': 'datetime', 'host_uuids': 'list[str]' } attribute_map = { 'total_hosts_with_fac': 'total_hosts_with_fac', 'total_hosts_without_fac': 'total_hosts_without_fac', 'last_uuid_datetime': 'last_uuid_datetime', 'host_uuids': 'host_uuids' } def __init__(self, total_hosts_with_fac=None, total_hosts_without_fac=None, last_uuid_datetime=None, host_uuids=None): # noqa: E501 """DataWithListOfHostUuidsData - a model defined in Swagger""" # noqa: E501 self._total_hosts_with_fac = None self._total_hosts_without_fac = None self._last_uuid_datetime = None self._host_uuids = None self.discriminator = None if total_hosts_with_fac is not None: self.total_hosts_with_fac = total_hosts_with_fac if total_hosts_without_fac is not None: self.total_hosts_without_fac = total_hosts_without_fac if last_uuid_datetime is not None: self.last_uuid_datetime = last_uuid_datetime if host_uuids is not None: self.host_uuids = host_uuids @property def total_hosts_with_fac(self): """Gets the total_hosts_with_fac of this DataWithListOfHostUuidsData. # noqa: E501 Count of hosts/servers which have at least one FAC card installed # noqa: E501 :return: The total_hosts_with_fac of this DataWithListOfHostUuidsData. # noqa: E501 :rtype: int """ return self._total_hosts_with_fac @total_hosts_with_fac.setter def total_hosts_with_fac(self, total_hosts_with_fac): """Sets the total_hosts_with_fac of this DataWithListOfHostUuidsData. Count of hosts/servers which have at least one FAC card installed # noqa: E501 :param total_hosts_with_fac: The total_hosts_with_fac of this DataWithListOfHostUuidsData. # noqa: E501 :type: int """ self._total_hosts_with_fac = total_hosts_with_fac @property def total_hosts_without_fac(self): """Gets the total_hosts_without_fac of this DataWithListOfHostUuidsData. # noqa: E501 Count of hosts/servers which use non-Fungible NIC interfaces # noqa: E501 :return: The total_hosts_without_fac of this DataWithListOfHostUuidsData. # noqa: E501 :rtype: int """ return self._total_hosts_without_fac @total_hosts_without_fac.setter def total_hosts_without_fac(self, total_hosts_without_fac): """Sets the total_hosts_without_fac of this DataWithListOfHostUuidsData. Count of hosts/servers which use non-Fungible NIC interfaces # noqa: E501 :param total_hosts_without_fac: The total_hosts_without_fac of this DataWithListOfHostUuidsData. # noqa: E501 :type: int """ self._total_hosts_without_fac = total_hosts_without_fac @property def last_uuid_datetime(self): """Gets the last_uuid_datetime of this DataWithListOfHostUuidsData. # noqa: E501 created time for the last host uuid from the list # noqa: E501 :return: The last_uuid_datetime of this DataWithListOfHostUuidsData. # noqa: E501 :rtype: datetime """ return self._last_uuid_datetime @last_uuid_datetime.setter def last_uuid_datetime(self, last_uuid_datetime): """Sets the last_uuid_datetime of this DataWithListOfHostUuidsData. created time for the last host uuid from the list # noqa: E501 :param last_uuid_datetime: The last_uuid_datetime of this DataWithListOfHostUuidsData. # noqa: E501 :type: datetime """ self._last_uuid_datetime = last_uuid_datetime @property def host_uuids(self): """Gets the host_uuids of this DataWithListOfHostUuidsData. # noqa: E501 List of Host UUIDs # noqa: E501 :return: The host_uuids of this DataWithListOfHostUuidsData. # noqa: E501 :rtype: list[str] """ return self._host_uuids @host_uuids.setter def host_uuids(self, host_uuids): """Sets the host_uuids of this DataWithListOfHostUuidsData. List of Host UUIDs # noqa: E501 :param host_uuids: The host_uuids of this DataWithListOfHostUuidsData. # noqa: E501 :type: list[str] """ self._host_uuids = host_uuids def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithListOfHostUuidsData, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithListOfHostUuidsData): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithListOfHostUuids(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'DataWithListOfHostUuidsData' } attribute_map = { 'data': 'data' } def __init__(self, data=None): # noqa: E501 """DataWithListOfHostUuids - a model defined in Swagger""" # noqa: E501 self._data = None self.discriminator = None if data is not None: self.data = data @property def data(self): """Gets the data of this DataWithListOfHostUuids. # noqa: E501 :return: The data of this DataWithListOfHostUuids. # noqa: E501 :rtype: DataWithListOfHostUuidsData """ return self._data @data.setter def data(self, data): """Sets the data of this DataWithListOfHostUuids. :param data: The data of this DataWithListOfHostUuids. # noqa: E501 :type: DataWithListOfHostUuidsData """ self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithListOfHostUuids, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithListOfHostUuids): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithListOfHosts(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'list[HostInfo]' } attribute_map = { 'data': 'data' } def __init__(self, data=None): # noqa: E501 """DataWithListOfHosts - a model defined in Swagger""" # noqa: E501 self._data = None self.discriminator = None self.data = data @property def data(self): """Gets the data of this DataWithListOfHosts. # noqa: E501 :return: The data of this DataWithListOfHosts. # noqa: E501 :rtype: list[HostInfo] """ return self._data @data.setter def data(self, data): """Sets the data of this DataWithListOfHosts. :param data: The data of this DataWithListOfHosts. # noqa: E501 :type: list[HostInfo] """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithListOfHosts, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithListOfHosts): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithMapOfDpuDrives(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'dict(str, NodeDpu)' } attribute_map = { 'data': 'data' } def __init__(self, data=None): # noqa: E501 """DataWithMapOfDpuDrives - a model defined in Swagger""" # noqa: E501 self._data = None self.discriminator = None self.data = data @property def data(self): """Gets the data of this DataWithMapOfDpuDrives. # noqa: E501 :return: The data of this DataWithMapOfDpuDrives. # noqa: E501 :rtype: dict(str, NodeDpu) """ return self._data @data.setter def data(self, data): """Sets the data of this DataWithMapOfDpuDrives. :param data: The data of this DataWithMapOfDpuDrives. # noqa: E501 :type: dict(str, NodeDpu) """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithMapOfDpuDrives, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithMapOfDpuDrives): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithSinglePort(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'Port' } attribute_map = { 'data': 'data' } def __init__(self, data=None): # noqa: E501 """DataWithSinglePort - a model defined in Swagger""" # noqa: E501 self._data = None self.discriminator = None self.data = data @property def data(self): """Gets the data of this DataWithSinglePort. # noqa: E501 :return: The data of this DataWithSinglePort. # noqa: E501 :rtype: Port """ return self._data @data.setter def data(self, data): """Sets the data of this DataWithSinglePort. :param data: The data of this DataWithSinglePort. # noqa: E501 :type: Port """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithSinglePort, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithSinglePort): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithSingleVolume(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'Volume' } attribute_map = { 'data': 'data' } def __init__(self, data=None): # noqa: E501 """DataWithSingleVolume - a model defined in Swagger""" # noqa: E501 self._data = None self.discriminator = None self.data = data @property def data(self): """Gets the data of this DataWithSingleVolume. # noqa: E501 :return: The data of this DataWithSingleVolume. # noqa: E501 :rtype: Volume """ return self._data @data.setter def data(self, data): """Sets the data of this DataWithSingleVolume. :param data: The data of this DataWithSingleVolume. # noqa: E501 :type: Volume """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithSingleVolume, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithSingleVolume): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithUuidString(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'DataWithUuidStringData' } attribute_map = { 'data': 'data' } def __init__(self, data=None): # noqa: E501 """DataWithUuidString - a model defined in Swagger""" # noqa: E501 self._data = None self.discriminator = None self.data = data @property def data(self): """Gets the data of this DataWithUuidString. # noqa: E501 :return: The data of this DataWithUuidString. # noqa: E501 :rtype: DataWithUuidStringData """ return self._data @data.setter def data(self, data): """Sets the data of this DataWithUuidString. :param data: The data of this DataWithUuidString. # noqa: E501 :type: DataWithUuidStringData """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithUuidString, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithUuidString): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithUuid(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'data': 'DataWithUuidData' } attribute_map = { 'data': 'data' } def __init__(self, data=None): # noqa: E501 """DataWithUuid - a model defined in Swagger""" # noqa: E501 self._data = None self.discriminator = None self.data = data @property def data(self): """Gets the data of this DataWithUuid. # noqa: E501 :return: The data of this DataWithUuid. # noqa: E501 :rtype: DataWithUuidData """ return self._data @data.setter def data(self, data): """Sets the data of this DataWithUuid. :param data: The data of this DataWithUuid. # noqa: E501 :type: DataWithUuidData """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithUuid, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithUuid): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DpIpSetup(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'ip_assignment_dhcp': 'bool', 'subnet_mask': 'str', 'next_hop': 'str' } attribute_map = { 'ip_assignment_dhcp': 'ip_assignment_dhcp', 'subnet_mask': 'subnet_mask', 'next_hop': 'next_hop' } def __init__(self, ip_assignment_dhcp=None, subnet_mask=None, next_hop=None): # noqa: E501 """DpIpSetup - a model defined in Swagger""" # noqa: E501 self._ip_assignment_dhcp = None self._subnet_mask = None self._next_hop = None self.discriminator = None if ip_assignment_dhcp is not None: self.ip_assignment_dhcp = ip_assignment_dhcp if subnet_mask is not None: self.subnet_mask = subnet_mask if next_hop is not None: self.next_hop = next_hop @property def ip_assignment_dhcp(self): """Gets the ip_assignment_dhcp of this DpIpSetup. # noqa: E501 :return: The ip_assignment_dhcp of this DpIpSetup. # noqa: E501 :rtype: bool """ return self._ip_assignment_dhcp @ip_assignment_dhcp.setter def ip_assignment_dhcp(self, ip_assignment_dhcp): """Sets the ip_assignment_dhcp of this DpIpSetup. :param ip_assignment_dhcp: The ip_assignment_dhcp of this DpIpSetup. # noqa: E501 :type: bool """ self._ip_assignment_dhcp = ip_assignment_dhcp @property def subnet_mask(self): """Gets the subnet_mask of this DpIpSetup. # noqa: E501 :return: The subnet_mask of this DpIpSetup. # noqa: E501 :rtype: str """ return self._subnet_mask @subnet_mask.setter def subnet_mask(self, subnet_mask): """Sets the subnet_mask of this DpIpSetup. :param subnet_mask: The subnet_mask of this DpIpSetup. # noqa: E501 :type: str """ self._subnet_mask = subnet_mask @property def next_hop(self): """Gets the next_hop of this DpIpSetup. # noqa: E501 :return: The next_hop of this DpIpSetup. # noqa: E501 :rtype: str """ return self._next_hop @next_hop.setter def next_hop(self, next_hop): """Sets the next_hop of this DpIpSetup. :param next_hop: The next_hop of this DpIpSetup. # noqa: E501 :type: str """ self._next_hop = next_hop def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DpIpSetup, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DpIpSetup): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class Dpu(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'uuid': 'str', 'name': 'str', 'node_class': 'str', 'mgmt_ip': 'str', 'dpu_version': 'str', 'dataplane_ip': 'str', 'fpg_num': 'int', 'storage_agent': 'str', 'drives': 'list[Drive]', 'fault_zones': 'list[str]', 'capacity': 'int', 'state': 'ResourceState', 'available': 'bool', 'dp_ip_setup': 'DpIpSetup', 'additional_fields': 'AdditionalFields', 'created_at': 'datetime', 'modified_at': 'datetime', 'sku': 'str', 'product': 'str', 'fault_domain_id': 'str' } attribute_map = { 'uuid': 'uuid', 'name': 'name', 'node_class': 'node_class', 'mgmt_ip': 'mgmt_ip', 'dpu_version': 'dpu_version', 'dataplane_ip': 'dataplane_ip', 'fpg_num': 'fpg_num', 'storage_agent': 'storage_agent', 'drives': 'drives', 'fault_zones': 'fault_zones', 'capacity': 'capacity', 'state': 'state', 'available': 'available', 'dp_ip_setup': 'dp_ip_setup', 'additional_fields': 'additional_fields', 'created_at': 'created_at', 'modified_at': 'modified_at', 'sku': 'sku', 'product': 'product', 'fault_domain_id': 'fault_domain_id' } def __init__(self, uuid=None, name=None, node_class=None, mgmt_ip=None, dpu_version=None, dataplane_ip=None, fpg_num=None, storage_agent=None, drives=None, fault_zones=None, capacity=None, state=None, available=None, dp_ip_setup=None, additional_fields=None, created_at=None, modified_at=None, sku=None, product='UNKNOWN', fault_domain_id=None): # noqa: E501 """Dpu - a model defined in Swagger""" # noqa: E501 self._uuid = None self._name = None self._node_class = None self._mgmt_ip = None self._dpu_version = None self._dataplane_ip = None self._fpg_num = None self._storage_agent = None self._drives = None self._fault_zones = None self._capacity = None self._state = None self._available = None self._dp_ip_setup = None self._additional_fields = None self._created_at = None self._modified_at = None self._sku = None self._product = None self._fault_domain_id = None self.discriminator = None self.uuid = uuid self.name = name if node_class is not None: self.node_class = node_class if mgmt_ip is not None: self.mgmt_ip = mgmt_ip if dpu_version is not None: self.dpu_version = dpu_version if dataplane_ip is not None: self.dataplane_ip = dataplane_ip if fpg_num is not None: self.fpg_num = fpg_num if storage_agent is not None: self.storage_agent = storage_agent if drives is not None: self.drives = drives if fault_zones is not None: self.fault_zones = fault_zones if capacity is not None: self.capacity = capacity if state is not None: self.state = state if available is not None: self.available = available if dp_ip_setup is not None: self.dp_ip_setup = dp_ip_setup if additional_fields is not None: self.additional_fields = additional_fields if created_at is not None: self.created_at = created_at if modified_at is not None: self.modified_at = modified_at if sku is not None: self.sku = sku if product is not None: self.product = product if fault_domain_id is not None: self.fault_domain_id = fault_domain_id @property def uuid(self): """Gets the uuid of this Dpu. # noqa: E501 unique id of dpu # noqa: E501 :return: The uuid of this Dpu. # noqa: E501 :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """Sets the uuid of this Dpu. unique id of dpu # noqa: E501 :param uuid: The uuid of this Dpu. # noqa: E501 :type: str """ if uuid is None: raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501 self._uuid = uuid @property def name(self): """Gets the name of this Dpu. # noqa: E501 Descriptive name of dpu # noqa: E501 :return: The name of this Dpu. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this Dpu. Descriptive name of dpu # noqa: E501 :param name: The name of this Dpu. # noqa: E501 :type: str """ if name is None: raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 self._name = name @property def node_class(self): """Gets the node_class of this Dpu. # noqa: E501 :return: The node_class of this Dpu. # noqa: E501 :rtype: str """ return self._node_class @node_class.setter def node_class(self, node_class): """Sets the node_class of this Dpu. :param node_class: The node_class of this Dpu. # noqa: E501 :type: str """ self._node_class = node_class @property def mgmt_ip(self): """Gets the mgmt_ip of this Dpu. # noqa: E501 :return: The mgmt_ip of this Dpu. # noqa: E501 :rtype: str """ return self._mgmt_ip @mgmt_ip.setter def mgmt_ip(self, mgmt_ip): """Sets the mgmt_ip of this Dpu. :param mgmt_ip: The mgmt_ip of this Dpu. # noqa: E501 :type: str """ self._mgmt_ip = mgmt_ip @property def dpu_version(self): """Gets the dpu_version of this Dpu. # noqa: E501 :return: The dpu_version of this Dpu. # noqa: E501 :rtype: str """ return self._dpu_version @dpu_version.setter def dpu_version(self, dpu_version): """Sets the dpu_version of this Dpu. :param dpu_version: The dpu_version of this Dpu. # noqa: E501 :type: str """ self._dpu_version = dpu_version @property def dataplane_ip(self): """Gets the dataplane_ip of this Dpu. # noqa: E501 :return: The dataplane_ip of this Dpu. # noqa: E501 :rtype: str """ return self._dataplane_ip @dataplane_ip.setter def dataplane_ip(self, dataplane_ip): """Sets the dataplane_ip of this Dpu. :param dataplane_ip: The dataplane_ip of this Dpu. # noqa: E501 :type: str """ self._dataplane_ip = dataplane_ip @property def fpg_num(self): """Gets the fpg_num of this Dpu. # noqa: E501 :return: The fpg_num of this Dpu. # noqa: E501 :rtype: int """ return self._fpg_num @fpg_num.setter def fpg_num(self, fpg_num): """Sets the fpg_num of this Dpu. :param fpg_num: The fpg_num of this Dpu. # noqa: E501 :type: int """ self._fpg_num = fpg_num @property def storage_agent(self): """Gets the storage_agent of this Dpu. # noqa: E501 :return: The storage_agent of this Dpu. # noqa: E501 :rtype: str """ return self._storage_agent @storage_agent.setter def storage_agent(self, storage_agent): """Sets the storage_agent of this Dpu. :param storage_agent: The storage_agent of this Dpu. # noqa: E501 :type: str """ self._storage_agent = storage_agent @property def drives(self): """Gets the drives of this Dpu. # noqa: E501 :return: The drives of this Dpu. # noqa: E501 :rtype: list[Drive] """ return self._drives @drives.setter def drives(self, drives): """Sets the drives of this Dpu. :param drives: The drives of this Dpu. # noqa: E501 :type: list[Drive] """ self._drives = drives @property def fault_zones(self): """Gets the fault_zones of this Dpu. # noqa: E501 :return: The fault_zones of this Dpu. # noqa: E501 :rtype: list[str] """ return self._fault_zones @fault_zones.setter def fault_zones(self, fault_zones): """Sets the fault_zones of this Dpu. :param fault_zones: The fault_zones of this Dpu. # noqa: E501 :type: list[str] """ self._fault_zones = fault_zones @property def capacity(self): """Gets the capacity of this Dpu. # noqa: E501 :return: The capacity of this Dpu. # noqa: E501 :rtype: int """ return self._capacity @capacity.setter def capacity(self, capacity): """Sets the capacity of this Dpu. :param capacity: The capacity of this Dpu. # noqa: E501 :type: int """ self._capacity = capacity @property def state(self): """Gets the state of this Dpu. # noqa: E501 :return: The state of this Dpu. # noqa: E501 :rtype: ResourceState """ return self._state @state.setter def state(self, state): """Sets the state of this Dpu. :param state: The state of this Dpu. # noqa: E501 :type: ResourceState """ self._state = state @property def available(self): """Gets the available of this Dpu. # noqa: E501 :return: The available of this Dpu. # noqa: E501 :rtype: bool """ return self._available @available.setter def available(self, available): """Sets the available of this Dpu. :param available: The available of this Dpu. # noqa: E501 :type: bool """ self._available = available @property def dp_ip_setup(self): """Gets the dp_ip_setup of this Dpu. # noqa: E501 :return: The dp_ip_setup of this Dpu. # noqa: E501 :rtype: DpIpSetup """ return self._dp_ip_setup @dp_ip_setup.setter def dp_ip_setup(self, dp_ip_setup): """Sets the dp_ip_setup of this Dpu. :param dp_ip_setup: The dp_ip_setup of this Dpu. # noqa: E501 :type: DpIpSetup """ self._dp_ip_setup = dp_ip_setup @property def additional_fields(self): """Gets the additional_fields of this Dpu. # noqa: E501 :return: The additional_fields of this Dpu. # noqa: E501 :rtype: AdditionalFields """ return self._additional_fields @additional_fields.setter def additional_fields(self, additional_fields): """Sets the additional_fields of this Dpu. :param additional_fields: The additional_fields of this Dpu. # noqa: E501 :type: AdditionalFields """ self._additional_fields = additional_fields @property def created_at(self): """Gets the created_at of this Dpu. # noqa: E501 set on create # noqa: E501 :return: The created_at of this Dpu. # noqa: E501 :rtype: datetime """ return self._created_at @created_at.setter def created_at(self, created_at): """Sets the created_at of this Dpu. set on create # noqa: E501 :param created_at: The created_at of this Dpu. # noqa: E501 :type: datetime """ self._created_at = created_at @property def modified_at(self): """Gets the modified_at of this Dpu. # noqa: E501 set when modified # noqa: E501 :return: The modified_at of this Dpu. # noqa: E501 :rtype: datetime """ return self._modified_at @modified_at.setter def modified_at(self, modified_at): """Sets the modified_at of this Dpu. set when modified # noqa: E501 :param modified_at: The modified_at of this Dpu. # noqa: E501 :type: datetime """ self._modified_at = modified_at @property def sku(self): """Gets the sku of this Dpu. # noqa: E501 :return: The sku of this Dpu. # noqa: E501 :rtype: str """ return self._sku @sku.setter def sku(self, sku): """Sets the sku of this Dpu. :param sku: The sku of this Dpu. # noqa: E501 :type: str """ self._sku = sku @property def product(self): """Gets the product of this Dpu. # noqa: E501 :return: The product of this Dpu. # noqa: E501 :rtype: str """ return self._product @product.setter def product(self, product): """Sets the product of this Dpu. :param product: The product of this Dpu. # noqa: E501 :type: str """ allowed_values = ["UNKNOWN", "DS200", "FC200", "FC50", "FS800", "FC100", "FS1600"] # noqa: E501 if product not in allowed_values: raise ValueError( "Invalid value for `product` ({0}), must be one of {1}" # noqa: E501 .format(product, allowed_values) ) self._product = product @property def fault_domain_id(self): """Gets the fault_domain_id of this Dpu. # noqa: E501 :return: The fault_domain_id of this Dpu. # noqa: E501 :rtype: str """ return self._fault_domain_id @fault_domain_id.setter def fault_domain_id(self, fault_domain_id): """Sets the fault_domain_id of this Dpu. :param fault_domain_id: The fault_domain_id of this Dpu. # noqa: E501 :type: str """ self._fault_domain_id = fault_domain_id def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Dpu, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Dpu): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class Drive(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'uuid': 'str', 'dpu': 'str', 'fault_zone': 'str', 'nguid_low': 'int', 'nguid_high': 'int', 'usage': 'int', 'slot_id': 'int', 'state': 'ResourceState', 'plugged': 'bool', 'capacity': 'int', 'volumes': 'list[str]', 'created_at': 'datetime', 'modified_at': 'datetime', 'smart': 'Smart', 'identity': 'Identity' } attribute_map = { 'uuid': 'uuid', 'dpu': 'dpu', 'fault_zone': 'fault_zone', 'nguid_low': 'nguid_low', 'nguid_high': 'nguid_high', 'usage': 'usage', 'slot_id': 'slot_id', 'state': 'state', 'plugged': 'plugged', 'capacity': 'capacity', 'volumes': 'volumes', 'created_at': 'created_at', 'modified_at': 'modified_at', 'smart': 'smart', 'identity': 'identity' } def __init__(self, uuid=None, dpu=None, fault_zone=None, nguid_low=None, nguid_high=None, usage=None, slot_id=None, state=None, plugged=None, capacity=None, volumes=None, created_at=None, modified_at=None, smart=None, identity=None): # noqa: E501 """Drive - a model defined in Swagger""" # noqa: E501 self._uuid = None self._dpu = None self._fault_zone = None self._nguid_low = None self._nguid_high = None self._usage = None self._slot_id = None self._state = None self._plugged = None self._capacity = None self._volumes = None self._created_at = None self._modified_at = None self._smart = None self._identity = None self.discriminator = None if uuid is not None: self.uuid = uuid self.dpu = dpu if fault_zone is not None: self.fault_zone = fault_zone if nguid_low is not None: self.nguid_low = nguid_low if nguid_high is not None: self.nguid_high = nguid_high if usage is not None: self.usage = usage self.slot_id = slot_id if state is not None: self.state = state if plugged is not None: self.plugged = plugged if capacity is not None: self.capacity = capacity if volumes is not None: self.volumes = volumes if created_at is not None: self.created_at = created_at if modified_at is not None: self.modified_at = modified_at if smart is not None: self.smart = smart if identity is not None: self.identity = identity @property def uuid(self): """Gets the uuid of this Drive. # noqa: E501 unique id of drive assigned by FS # noqa: E501 :return: The uuid of this Drive. # noqa: E501 :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """Sets the uuid of this Drive. unique id of drive assigned by FS # noqa: E501 :param uuid: The uuid of this Drive. # noqa: E501 :type: str """ self._uuid = uuid @property def dpu(self): """Gets the dpu of this Drive. # noqa: E501 id of dpu to which this drive is attached # noqa: E501 :return: The dpu of this Drive. # noqa: E501 :rtype: str """ return self._dpu @dpu.setter def dpu(self, dpu): """Sets the dpu of this Drive. id of dpu to which this drive is attached # noqa: E501 :param dpu: The dpu of this Drive. # noqa: E501 :type: str """ if dpu is None: raise ValueError("Invalid value for `dpu`, must not be `None`") # noqa: E501 self._dpu = dpu @property def fault_zone(self): """Gets the fault_zone of this Drive. # noqa: E501 :return: The fault_zone of this Drive. # noqa: E501 :rtype: str """ return self._fault_zone @fault_zone.setter def fault_zone(self, fault_zone): """Sets the fault_zone of this Drive. :param fault_zone: The fault_zone of this Drive. # noqa: E501 :type: str """ self._fault_zone = fault_zone @property def nguid_low(self): """Gets the nguid_low of this Drive. # noqa: E501 :return: The nguid_low of this Drive. # noqa: E501 :rtype: int """ return self._nguid_low @nguid_low.setter def nguid_low(self, nguid_low): """Sets the nguid_low of this Drive. :param nguid_low: The nguid_low of this Drive. # noqa: E501 :type: int """ if nguid_low is not None and nguid_low < 0: # noqa: E501 raise ValueError("Invalid value for `nguid_low`, must be a value greater than or equal to `0`") # noqa: E501 self._nguid_low = nguid_low @property def nguid_high(self): """Gets the nguid_high of this Drive. # noqa: E501 :return: The nguid_high of this Drive. # noqa: E501 :rtype: int """ return self._nguid_high @nguid_high.setter def nguid_high(self, nguid_high): """Sets the nguid_high of this Drive. :param nguid_high: The nguid_high of this Drive. # noqa: E501 :type: int """ if nguid_high is not None and nguid_high < 0: # noqa: E501 raise ValueError("Invalid value for `nguid_high`, must be a value greater than or equal to `0`") # noqa: E501 self._nguid_high = nguid_high @property def usage(self): """Gets the usage of this Drive. # noqa: E501 :return: The usage of this Drive. # noqa: E501 :rtype: int """ return self._usage @usage.setter def usage(self, usage): """Sets the usage of this Drive. :param usage: The usage of this Drive. # noqa: E501 :type: int """ if usage is not None and usage < 0: # noqa: E501 raise ValueError("Invalid value for `usage`, must be a value greater than or equal to `0`") # noqa: E501 self._usage = usage @property def slot_id(self): """Gets the slot_id of this Drive. # noqa: E501 dpu slot to which drive is connected # noqa: E501 :return: The slot_id of this Drive. # noqa: E501 :rtype: int """ return self._slot_id @slot_id.setter def slot_id(self, slot_id): """Sets the slot_id of this Drive. dpu slot to which drive is connected # noqa: E501 :param slot_id: The slot_id of this Drive. # noqa: E501 :type: int """ if slot_id is None: raise ValueError("Invalid value for `slot_id`, must not be `None`") # noqa: E501 self._slot_id = slot_id @property def state(self): """Gets the state of this Drive. # noqa: E501 :return: The state of this Drive. # noqa: E501 :rtype: ResourceState """ return self._state @state.setter def state(self, state): """Sets the state of this Drive. :param state: The state of this Drive. # noqa: E501 :type: ResourceState """ self._state = state @property def plugged(self): """Gets the plugged of this Drive. # noqa: E501 :return: The plugged of this Drive. # noqa: E501 :rtype: bool """ return self._plugged @plugged.setter def plugged(self, plugged): """Sets the plugged of this Drive. :param plugged: The plugged of this Drive. # noqa: E501 :type: bool """ self._plugged = plugged @property def capacity(self): """Gets the capacity of this Drive. # noqa: E501 :return: The capacity of this Drive. # noqa: E501 :rtype: int """ return self._capacity @capacity.setter def capacity(self, capacity): """Sets the capacity of this Drive. :param capacity: The capacity of this Drive. # noqa: E501 :type: int """ self._capacity = capacity @property def volumes(self): """Gets the volumes of this Drive. # noqa: E501 :return: The volumes of this Drive. # noqa: E501 :rtype: list[str] """ return self._volumes @volumes.setter def volumes(self, volumes): """Sets the volumes of this Drive. :param volumes: The volumes of this Drive. # noqa: E501 :type: list[str] """ self._volumes = volumes @property def created_at(self): """Gets the created_at of this Drive. # noqa: E501 set on create # noqa: E501 :return: The created_at of this Drive. # noqa: E501 :rtype: datetime """ return self._created_at @created_at.setter def created_at(self, created_at): """Sets the created_at of this Drive. set on create # noqa: E501 :param created_at: The created_at of this Drive. # noqa: E501 :type: datetime """ self._created_at = created_at @property def modified_at(self): """Gets the modified_at of this Drive. # noqa: E501 set when modified # noqa: E501 :return: The modified_at of this Drive. # noqa: E501 :rtype: datetime """ return self._modified_at @modified_at.setter def modified_at(self, modified_at): """Sets the modified_at of this Drive. set when modified # noqa: E501 :param modified_at: The modified_at of this Drive. # noqa: E501 :type: datetime """ self._modified_at = modified_at @property def smart(self): """Gets the smart of this Drive. # noqa: E501 :return: The smart of this Drive. # noqa: E501 :rtype: Smart """ return self._smart @smart.setter def smart(self, smart): """Sets the smart of this Drive. :param smart: The smart of this Drive. # noqa: E501 :type: Smart """ self._smart = smart @property def identity(self): """Gets the identity of this Drive. # noqa: E501 :return: The identity of this Drive. # noqa: E501 :rtype: Identity """ return self._identity @identity.setter def identity(self, identity): """Sets the identity of this Drive. :param identity: The identity of this Drive. # noqa: E501 :type: Identity """ self._identity = identity def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Drive, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Drive): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ErrorResponseFields(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message' } def __init__(self, status=False, message=None, error_message=None): # noqa: E501 """ErrorResponseFields - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self.discriminator = None self.status = status self.message = message if error_message is not None: self.error_message = error_message @property def status(self): """Gets the status of this ErrorResponseFields. # noqa: E501 :return: The status of this ErrorResponseFields. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ErrorResponseFields. :param status: The status of this ErrorResponseFields. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ErrorResponseFields. # noqa: E501 :return: The message of this ErrorResponseFields. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ErrorResponseFields. :param message: The message of this ErrorResponseFields. # noqa: E501 :type: str """ if message is None: raise ValueError("Invalid value for `message`, must not be `None`") # noqa: E501 self._message = message @property def error_message(self): """Gets the error_message of this ErrorResponseFields. # noqa: E501 :return: The error_message of this ErrorResponseFields. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ErrorResponseFields. :param error_message: The error_message of this ErrorResponseFields. # noqa: E501 :type: str """ self._error_message = error_message def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ErrorResponseFields, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ErrorResponseFields): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class FacInfo(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'fac_uuid': 'str' } attribute_map = { 'fac_uuid': 'fac_uuid' } def __init__(self, fac_uuid=None): # noqa: E501 """FacInfo - a model defined in Swagger""" # noqa: E501 self._fac_uuid = None self.discriminator = None if fac_uuid is not None: self.fac_uuid = fac_uuid @property def fac_uuid(self): """Gets the fac_uuid of this FacInfo. # noqa: E501 :return: The fac_uuid of this FacInfo. # noqa: E501 :rtype: str """ return self._fac_uuid @fac_uuid.setter def fac_uuid(self, fac_uuid): """Sets the fac_uuid of this FacInfo. :param fac_uuid: The fac_uuid of this FacInfo. # noqa: E501 :type: str """ self._fac_uuid = fac_uuid def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(FacInfo, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, FacInfo): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class HostInfo(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'host_uuid': 'str', 'host_name': 'str', 'host_location': 'str', 'host_nqn': 'str', 'created_at': 'datetime', 'fac_enabled': 'bool', 'facs': 'list[FacInfo]' } attribute_map = { 'host_uuid': 'host_uuid', 'host_name': 'host_name', 'host_location': 'host_location', 'host_nqn': 'host_nqn', 'created_at': 'created_at', 'fac_enabled': 'fac_enabled', 'facs': 'facs' } def __init__(self, host_uuid=None, host_name=None, host_location=None, host_nqn=None, created_at=None, fac_enabled=None, facs=None): # noqa: E501 """HostInfo - a model defined in Swagger""" # noqa: E501 self._host_uuid = None self._host_name = None self._host_location = None self._host_nqn = None self._created_at = None self._fac_enabled = None self._facs = None self.discriminator = None if host_uuid is not None: self.host_uuid = host_uuid if host_name is not None: self.host_name = host_name if host_location is not None: self.host_location = host_location if host_nqn is not None: self.host_nqn = host_nqn if created_at is not None: self.created_at = created_at if fac_enabled is not None: self.fac_enabled = fac_enabled if facs is not None: self.facs = facs @property def host_uuid(self): """Gets the host_uuid of this HostInfo. # noqa: E501 This UUID is generated by the StorageService # noqa: E501 :return: The host_uuid of this HostInfo. # noqa: E501 :rtype: str """ return self._host_uuid @host_uuid.setter def host_uuid(self, host_uuid): """Sets the host_uuid of this HostInfo. This UUID is generated by the StorageService # noqa: E501 :param host_uuid: The host_uuid of this HostInfo. # noqa: E501 :type: str """ self._host_uuid = host_uuid @property def host_name(self): """Gets the host_name of this HostInfo. # noqa: E501 This is the user-friendly name assigned by an admin to this host. # noqa: E501 :return: The host_name of this HostInfo. # noqa: E501 :rtype: str """ return self._host_name @host_name.setter def host_name(self, host_name): """Sets the host_name of this HostInfo. This is the user-friendly name assigned by an admin to this host. # noqa: E501 :param host_name: The host_name of this HostInfo. # noqa: E501 :type: str """ if host_name is not None and len(host_name) > 223: raise ValueError("Invalid value for `host_name`, length must be less than or equal to `223`") # noqa: E501 self._host_name = host_name @property def host_location(self): """Gets the host_location of this HostInfo. # noqa: E501 Optional, location information assigned by admin such as Chassis, Rack or shelf IDs. # noqa: E501 :return: The host_location of this HostInfo. # noqa: E501 :rtype: str """ return self._host_location @host_location.setter def host_location(self, host_location): """Sets the host_location of this HostInfo. Optional, location information assigned by admin such as Chassis, Rack or shelf IDs. # noqa: E501 :param host_location: The host_location of this HostInfo. # noqa: E501 :type: str """ self._host_location = host_location @property def host_nqn(self): """Gets the host_nqn of this HostInfo. # noqa: E501 The nqn name used during NVME connect operations # noqa: E501 :return: The host_nqn of this HostInfo. # noqa: E501 :rtype: str """ return self._host_nqn @host_nqn.setter def host_nqn(self, host_nqn): """Sets the host_nqn of this HostInfo. The nqn name used during NVME connect operations # noqa: E501 :param host_nqn: The host_nqn of this HostInfo. # noqa: E501 :type: str """ if host_nqn is not None and len(host_nqn) > 223: raise ValueError("Invalid value for `host_nqn`, length must be less than or equal to `223`") # noqa: E501 self._host_nqn = host_nqn @property def created_at(self): """Gets the created_at of this HostInfo. # noqa: E501 Time at which this entry was created. Generated by StorageService and useful in paginating a list of hosts # noqa: E501 :return: The created_at of this HostInfo. # noqa: E501 :rtype: datetime """ return self._created_at @created_at.setter def created_at(self, created_at): """Sets the created_at of this HostInfo. Time at which this entry was created. Generated by StorageService and useful in paginating a list of hosts # noqa: E501 :param created_at: The created_at of this HostInfo. # noqa: E501 :type: datetime """ self._created_at = created_at @property def fac_enabled(self): """Gets the fac_enabled of this HostInfo. # noqa: E501 Set to true if this server/host contains at least one FAC cards. # noqa: E501 :return: The fac_enabled of this HostInfo. # noqa: E501 :rtype: bool """ return self._fac_enabled @fac_enabled.setter def fac_enabled(self, fac_enabled): """Sets the fac_enabled of this HostInfo. Set to true if this server/host contains at least one FAC cards. # noqa: E501 :param fac_enabled: The fac_enabled of this HostInfo. # noqa: E501 :type: bool """ self._fac_enabled = fac_enabled @property def facs(self): """Gets the facs of this HostInfo. # noqa: E501 Contains an array of FAC UUIDs, when fac_enabled is set to True # noqa: E501 :return: The facs of this HostInfo. # noqa: E501 :rtype: list[FacInfo] """ return self._facs @facs.setter def facs(self, facs): """Sets the facs of this HostInfo. Contains an array of FAC UUIDs, when fac_enabled is set to True # noqa: E501 :param facs: The facs of this HostInfo. # noqa: E501 :type: list[FacInfo] """ self._facs = facs def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(HostInfo, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, HostInfo): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class Identity(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'firmware_revision': 'str', 'pci_vendor_id': 'str', 'serial_number': 'str', 'model_number': 'str' } attribute_map = { 'firmware_revision': 'firmware_revision', 'pci_vendor_id': 'pci_vendor_id', 'serial_number': 'serial_number', 'model_number': 'model_number' } def __init__(self, firmware_revision=None, pci_vendor_id=None, serial_number=None, model_number=None): # noqa: E501 """Identity - a model defined in Swagger""" # noqa: E501 self._firmware_revision = None self._pci_vendor_id = None self._serial_number = None self._model_number = None self.discriminator = None if firmware_revision is not None: self.firmware_revision = firmware_revision if pci_vendor_id is not None: self.pci_vendor_id = pci_vendor_id if serial_number is not None: self.serial_number = serial_number if model_number is not None: self.model_number = model_number @property def firmware_revision(self): """Gets the firmware_revision of this Identity. # noqa: E501 :return: The firmware_revision of this Identity. # noqa: E501 :rtype: str """ return self._firmware_revision @firmware_revision.setter def firmware_revision(self, firmware_revision): """Sets the firmware_revision of this Identity. :param firmware_revision: The firmware_revision of this Identity. # noqa: E501 :type: str """ self._firmware_revision = firmware_revision @property def pci_vendor_id(self): """Gets the pci_vendor_id of this Identity. # noqa: E501 :return: The pci_vendor_id of this Identity. # noqa: E501 :rtype: str """ return self._pci_vendor_id @pci_vendor_id.setter def pci_vendor_id(self, pci_vendor_id): """Sets the pci_vendor_id of this Identity. :param pci_vendor_id: The pci_vendor_id of this Identity. # noqa: E501 :type: str """ self._pci_vendor_id = pci_vendor_id @property def serial_number(self): """Gets the serial_number of this Identity. # noqa: E501 :return: The serial_number of this Identity. # noqa: E501 :rtype: str """ return self._serial_number @serial_number.setter def serial_number(self, serial_number): """Sets the serial_number of this Identity. :param serial_number: The serial_number of this Identity. # noqa: E501 :type: str """ self._serial_number = serial_number @property def model_number(self): """Gets the model_number of this Identity. # noqa: E501 :return: The model_number of this Identity. # noqa: E501 :rtype: str """ return self._model_number @model_number.setter def model_number(self, model_number): """Sets the model_number of this Identity. :param model_number: The model_number of this Identity. # noqa: E501 :type: str """ self._model_number = model_number def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Identity, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Identity): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class MapOfPorts(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """MapOfPorts - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(MapOfPorts, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, MapOfPorts): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class NodeDpu(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'available': 'bool', 'fault_zones': 'list[str]', 'mgmt_ip': 'str', 'mgmt_port': 'str', 'name': 'str', 'node_class': 'str', 'state': 'str', 'uuid': 'str', 'version': 'str', 'sku': 'str', 'product': 'str', 'dpus': 'list[Dpu]', 'fault_domain_id': 'str', 'host_uuid': 'str' } attribute_map = { 'available': 'available', 'fault_zones': 'fault_zones', 'mgmt_ip': 'mgmt_ip', 'mgmt_port': 'mgmt_port', 'name': 'name', 'node_class': 'node_class', 'state': 'state', 'uuid': 'uuid', 'version': 'version', 'sku': 'sku', 'product': 'product', 'dpus': 'dpus', 'fault_domain_id': 'fault_domain_id', 'host_uuid': 'host_uuid' } def __init__(self, available=None, fault_zones=None, mgmt_ip=None, mgmt_port=None, name=None, node_class=None, state=None, uuid=None, version=None, sku=None, product='UNKNOWN', dpus=None, fault_domain_id=None, host_uuid=None): # noqa: E501 """NodeDpu - a model defined in Swagger""" # noqa: E501 self._available = None self._fault_zones = None self._mgmt_ip = None self._mgmt_port = None self._name = None self._node_class = None self._state = None self._uuid = None self._version = None self._sku = None self._product = None self._dpus = None self._fault_domain_id = None self._host_uuid = None self.discriminator = None if available is not None: self.available = available if fault_zones is not None: self.fault_zones = fault_zones if mgmt_ip is not None: self.mgmt_ip = mgmt_ip if mgmt_port is not None: self.mgmt_port = mgmt_port if name is not None: self.name = name if node_class is not None: self.node_class = node_class if state is not None: self.state = state if uuid is not None: self.uuid = uuid if version is not None: self.version = version if sku is not None: self.sku = sku if product is not None: self.product = product if dpus is not None: self.dpus = dpus if fault_domain_id is not None: self.fault_domain_id = fault_domain_id if host_uuid is not None: self.host_uuid = host_uuid @property def available(self): """Gets the available of this NodeDpu. # noqa: E501 :return: The available of this NodeDpu. # noqa: E501 :rtype: bool """ return self._available @available.setter def available(self, available): """Sets the available of this NodeDpu. :param available: The available of this NodeDpu. # noqa: E501 :type: bool """ self._available = available @property def fault_zones(self): """Gets the fault_zones of this NodeDpu. # noqa: E501 :return: The fault_zones of this NodeDpu. # noqa: E501 :rtype: list[str] """ return self._fault_zones @fault_zones.setter def fault_zones(self, fault_zones): """Sets the fault_zones of this NodeDpu. :param fault_zones: The fault_zones of this NodeDpu. # noqa: E501 :type: list[str] """ self._fault_zones = fault_zones @property def mgmt_ip(self): """Gets the mgmt_ip of this NodeDpu. # noqa: E501 :return: The mgmt_ip of this NodeDpu. # noqa: E501 :rtype: str """ return self._mgmt_ip @mgmt_ip.setter def mgmt_ip(self, mgmt_ip): """Sets the mgmt_ip of this NodeDpu. :param mgmt_ip: The mgmt_ip of this NodeDpu. # noqa: E501 :type: str """ self._mgmt_ip = mgmt_ip @property def mgmt_port(self): """Gets the mgmt_port of this NodeDpu. # noqa: E501 :return: The mgmt_port of this NodeDpu. # noqa: E501 :rtype: str """ return self._mgmt_port @mgmt_port.setter def mgmt_port(self, mgmt_port): """Sets the mgmt_port of this NodeDpu. :param mgmt_port: The mgmt_port of this NodeDpu. # noqa: E501 :type: str """ self._mgmt_port = mgmt_port @property def name(self): """Gets the name of this NodeDpu. # noqa: E501 :return: The name of this NodeDpu. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this NodeDpu. :param name: The name of this NodeDpu. # noqa: E501 :type: str """ self._name = name @property def node_class(self): """Gets the node_class of this NodeDpu. # noqa: E501 :return: The node_class of this NodeDpu. # noqa: E501 :rtype: str """ return self._node_class @node_class.setter def node_class(self, node_class): """Sets the node_class of this NodeDpu. :param node_class: The node_class of this NodeDpu. # noqa: E501 :type: str """ self._node_class = node_class @property def state(self): """Gets the state of this NodeDpu. # noqa: E501 :return: The state of this NodeDpu. # noqa: E501 :rtype: str """ return self._state @state.setter def state(self, state): """Sets the state of this NodeDpu. :param state: The state of this NodeDpu. # noqa: E501 :type: str """ self._state = state @property def uuid(self): """Gets the uuid of this NodeDpu. # noqa: E501 :return: The uuid of this NodeDpu. # noqa: E501 :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """Sets the uuid of this NodeDpu. :param uuid: The uuid of this NodeDpu. # noqa: E501 :type: str """ self._uuid = uuid @property def version(self): """Gets the version of this NodeDpu. # noqa: E501 :return: The version of this NodeDpu. # noqa: E501 :rtype: str """ return self._version @version.setter def version(self, version): """Sets the version of this NodeDpu. :param version: The version of this NodeDpu. # noqa: E501 :type: str """ self._version = version @property def sku(self): """Gets the sku of this NodeDpu. # noqa: E501 :return: The sku of this NodeDpu. # noqa: E501 :rtype: str """ return self._sku @sku.setter def sku(self, sku): """Sets the sku of this NodeDpu. :param sku: The sku of this NodeDpu. # noqa: E501 :type: str """ self._sku = sku @property def product(self): """Gets the product of this NodeDpu. # noqa: E501 :return: The product of this NodeDpu. # noqa: E501 :rtype: str """ return self._product @product.setter def product(self, product): """Sets the product of this NodeDpu. :param product: The product of this NodeDpu. # noqa: E501 :type: str """ allowed_values = ["UNKNOWN", "DS200", "FC200", "FC50", "FS800", "FC100", "FS1600"] # noqa: E501 if product not in allowed_values: raise ValueError( "Invalid value for `product` ({0}), must be one of {1}" # noqa: E501 .format(product, allowed_values) ) self._product = product @property def dpus(self): """Gets the dpus of this NodeDpu. # noqa: E501 :return: The dpus of this NodeDpu. # noqa: E501 :rtype: list[Dpu] """ return self._dpus @dpus.setter def dpus(self, dpus): """Sets the dpus of this NodeDpu. :param dpus: The dpus of this NodeDpu. # noqa: E501 :type: list[Dpu] """ self._dpus = dpus @property def fault_domain_id(self): """Gets the fault_domain_id of this NodeDpu. # noqa: E501 :return: The fault_domain_id of this NodeDpu. # noqa: E501 :rtype: str """ return self._fault_domain_id @fault_domain_id.setter def fault_domain_id(self, fault_domain_id): """Sets the fault_domain_id of this NodeDpu. :param fault_domain_id: The fault_domain_id of this NodeDpu. # noqa: E501 :type: str """ self._fault_domain_id = fault_domain_id @property def host_uuid(self): """Gets the host_uuid of this NodeDpu. # noqa: E501 UUID of the host to which the node is added to # noqa: E501 :return: The host_uuid of this NodeDpu. # noqa: E501 :rtype: str """ return self._host_uuid @host_uuid.setter def host_uuid(self, host_uuid): """Sets the host_uuid of this NodeDpu. UUID of the host to which the node is added to # noqa: E501 :param host_uuid: The host_uuid of this NodeDpu. # noqa: E501 :type: str """ self._host_uuid = host_uuid def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(NodeDpu, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, NodeDpu): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class Operation(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'operation_name': 'str', 'percentage_complete': 'int' } attribute_map = { 'operation_name': 'operation_name', 'percentage_complete': 'percentage_complete' } def __init__(self, operation_name=None, percentage_complete=None): # noqa: E501 """Operation - a model defined in Swagger""" # noqa: E501 self._operation_name = None self._percentage_complete = None self.discriminator = None if operation_name is not None: self.operation_name = operation_name if percentage_complete is not None: self.percentage_complete = percentage_complete @property def operation_name(self): """Gets the operation_name of this Operation. # noqa: E501 The name of the operation # noqa: E501 :return: The operation_name of this Operation. # noqa: E501 :rtype: str """ return self._operation_name @operation_name.setter def operation_name(self, operation_name): """Sets the operation_name of this Operation. The name of the operation # noqa: E501 :param operation_name: The operation_name of this Operation. # noqa: E501 :type: str """ allowed_values = ["rebuild", "expansion", "rebalance", "hydration"] # noqa: E501 if operation_name not in allowed_values: raise ValueError( "Invalid value for `operation_name` ({0}), must be one of {1}" # noqa: E501 .format(operation_name, allowed_values) ) self._operation_name = operation_name @property def percentage_complete(self): """Gets the percentage_complete of this Operation. # noqa: E501 :return: The percentage_complete of this Operation. # noqa: E501 :rtype: int """ return self._percentage_complete @percentage_complete.setter def percentage_complete(self, percentage_complete): """Sets the percentage_complete of this Operation. :param percentage_complete: The percentage_complete of this Operation. # noqa: E501 :type: int """ if percentage_complete is not None and percentage_complete > 100: # noqa: E501 raise ValueError("Invalid value for `percentage_complete`, must be a value less than or equal to `100`") # noqa: E501 if percentage_complete is not None and percentage_complete < 0: # noqa: E501 raise ValueError("Invalid value for `percentage_complete`, must be a value greater than or equal to `0`") # noqa: E501 self._percentage_complete = percentage_complete def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Operation, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Operation): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class Port(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'uuid': 'str', 'transport': 'Transport', 'host_nqn': 'str', 'ip': 'str', 'nsid': 'int', 'remote_ip': 'str', 'subsys_nqn': 'str', 'fnid': 'object', 'huid': 'object', 'ctlid': 'object', 'pci_bus': 'int', 'pci_device': 'int', 'pci_function': 'int', 'ctrlr_uuid': 'str', 'secondary_ctrlr_uuid': 'str', 'host_uuid': 'str' } attribute_map = { 'uuid': 'uuid', 'transport': 'transport', 'host_nqn': 'host_nqn', 'ip': 'ip', 'nsid': 'nsid', 'remote_ip': 'remote_ip', 'subsys_nqn': 'subsys_nqn', 'fnid': 'fnid', 'huid': 'huid', 'ctlid': 'ctlid', 'pci_bus': 'pci_bus', 'pci_device': 'pci_device', 'pci_function': 'pci_function', 'ctrlr_uuid': 'ctrlr_uuid', 'secondary_ctrlr_uuid': 'secondary_ctrlr_uuid', 'host_uuid': 'host_uuid' } def __init__(self, uuid=None, transport=None, host_nqn=None, ip=None, nsid=None, remote_ip=None, subsys_nqn=None, fnid=None, huid=None, ctlid=None, pci_bus=None, pci_device=None, pci_function=None, ctrlr_uuid=None, secondary_ctrlr_uuid=None, host_uuid=None): # noqa: E501 """Port - a model defined in Swagger""" # noqa: E501 self._uuid = None self._transport = None self._host_nqn = None self._ip = None self._nsid = None self._remote_ip = None self._subsys_nqn = None self._fnid = None self._huid = None self._ctlid = None self._pci_bus = None self._pci_device = None self._pci_function = None self._ctrlr_uuid = None self._secondary_ctrlr_uuid = None self._host_uuid = None self.discriminator = None self.uuid = uuid self.transport = transport if host_nqn is not None: self.host_nqn = host_nqn if ip is not None: self.ip = ip if nsid is not None: self.nsid = nsid if remote_ip is not None: self.remote_ip = remote_ip if subsys_nqn is not None: self.subsys_nqn = subsys_nqn if fnid is not None: self.fnid = fnid if huid is not None: self.huid = huid if ctlid is not None: self.ctlid = ctlid if pci_bus is not None: self.pci_bus = pci_bus if pci_device is not None: self.pci_device = pci_device if pci_function is not None: self.pci_function = pci_function if ctrlr_uuid is not None: self.ctrlr_uuid = ctrlr_uuid if secondary_ctrlr_uuid is not None: self.secondary_ctrlr_uuid = secondary_ctrlr_uuid if host_uuid is not None: self.host_uuid = host_uuid @property def uuid(self): """Gets the uuid of this Port. # noqa: E501 assigned by FC # noqa: E501 :return: The uuid of this Port. # noqa: E501 :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """Sets the uuid of this Port. assigned by FC # noqa: E501 :param uuid: The uuid of this Port. # noqa: E501 :type: str """ if uuid is None: raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501 self._uuid = uuid @property def transport(self): """Gets the transport of this Port. # noqa: E501 :return: The transport of this Port. # noqa: E501 :rtype: Transport """ return self._transport @transport.setter def transport(self, transport): """Sets the transport of this Port. :param transport: The transport of this Port. # noqa: E501 :type: Transport """ if transport is None: raise ValueError("Invalid value for `transport`, must not be `None`") # noqa: E501 self._transport = transport @property def host_nqn(self): """Gets the host_nqn of this Port. # noqa: E501 :return: The host_nqn of this Port. # noqa: E501 :rtype: str """ return self._host_nqn @host_nqn.setter def host_nqn(self, host_nqn): """Sets the host_nqn of this Port. :param host_nqn: The host_nqn of this Port. # noqa: E501 :type: str """ self._host_nqn = host_nqn @property def ip(self): """Gets the ip of this Port. # noqa: E501 :return: The ip of this Port. # noqa: E501 :rtype: str """ return self._ip @ip.setter def ip(self, ip): """Sets the ip of this Port. :param ip: The ip of this Port. # noqa: E501 :type: str """ self._ip = ip @property def nsid(self): """Gets the nsid of this Port. # noqa: E501 :return: The nsid of this Port. # noqa: E501 :rtype: int """ return self._nsid @nsid.setter def nsid(self, nsid): """Sets the nsid of this Port. :param nsid: The nsid of this Port. # noqa: E501 :type: int """ self._nsid = nsid @property def remote_ip(self): """Gets the remote_ip of this Port. # noqa: E501 :return: The remote_ip of this Port. # noqa: E501 :rtype: str """ return self._remote_ip @remote_ip.setter def remote_ip(self, remote_ip): """Sets the remote_ip of this Port. :param remote_ip: The remote_ip of this Port. # noqa: E501 :type: str """ self._remote_ip = remote_ip @property def subsys_nqn(self): """Gets the subsys_nqn of this Port. # noqa: E501 :return: The subsys_nqn of this Port. # noqa: E501 :rtype: str """ return self._subsys_nqn @subsys_nqn.setter def subsys_nqn(self, subsys_nqn): """Sets the subsys_nqn of this Port. :param subsys_nqn: The subsys_nqn of this Port. # noqa: E501 :type: str """ self._subsys_nqn = subsys_nqn @property def fnid(self): """Gets the fnid of this Port. # noqa: E501 Valid for transport=PCI # noqa: E501 :return: The fnid of this Port. # noqa: E501 :rtype: object """ return self._fnid @fnid.setter def fnid(self, fnid): """Sets the fnid of this Port. Valid for transport=PCI # noqa: E501 :param fnid: The fnid of this Port. # noqa: E501 :type: object """ self._fnid = fnid @property def huid(self): """Gets the huid of this Port. # noqa: E501 Valid for transport=PCI # noqa: E501 :return: The huid of this Port. # noqa: E501 :rtype: object """ return self._huid @huid.setter def huid(self, huid): """Sets the huid of this Port. Valid for transport=PCI # noqa: E501 :param huid: The huid of this Port. # noqa: E501 :type: object """ self._huid = huid @property def ctlid(self): """Gets the ctlid of this Port. # noqa: E501 Valid for transport=PCI # noqa: E501 :return: The ctlid of this Port. # noqa: E501 :rtype: object """ return self._ctlid @ctlid.setter def ctlid(self, ctlid): """Sets the ctlid of this Port. Valid for transport=PCI # noqa: E501 :param ctlid: The ctlid of this Port. # noqa: E501 :type: object """ self._ctlid = ctlid @property def pci_bus(self): """Gets the pci_bus of this Port. # noqa: E501 Valid for transport=PCI_BDF # noqa: E501 :return: The pci_bus of this Port. # noqa: E501 :rtype: int """ return self._pci_bus @pci_bus.setter def pci_bus(self, pci_bus): """Sets the pci_bus of this Port. Valid for transport=PCI_BDF # noqa: E501 :param pci_bus: The pci_bus of this Port. # noqa: E501 :type: int """ self._pci_bus = pci_bus @property def pci_device(self): """Gets the pci_device of this Port. # noqa: E501 Valid for transport=PCI_BDF # noqa: E501 :return: The pci_device of this Port. # noqa: E501 :rtype: int """ return self._pci_device @pci_device.setter def pci_device(self, pci_device): """Sets the pci_device of this Port. Valid for transport=PCI_BDF # noqa: E501 :param pci_device: The pci_device of this Port. # noqa: E501 :type: int """ self._pci_device = pci_device @property def pci_function(self): """Gets the pci_function of this Port. # noqa: E501 Valid for transport=PCI_BDF # noqa: E501 :return: The pci_function of this Port. # noqa: E501 :rtype: int """ return self._pci_function @pci_function.setter def pci_function(self, pci_function): """Sets the pci_function of this Port. Valid for transport=PCI_BDF # noqa: E501 :param pci_function: The pci_function of this Port. # noqa: E501 :type: int """ self._pci_function = pci_function @property def ctrlr_uuid(self): """Gets the ctrlr_uuid of this Port. # noqa: E501 :return: The ctrlr_uuid of this Port. # noqa: E501 :rtype: str """ return self._ctrlr_uuid @ctrlr_uuid.setter def ctrlr_uuid(self, ctrlr_uuid): """Sets the ctrlr_uuid of this Port. :param ctrlr_uuid: The ctrlr_uuid of this Port. # noqa: E501 :type: str """ self._ctrlr_uuid = ctrlr_uuid @property def secondary_ctrlr_uuid(self): """Gets the secondary_ctrlr_uuid of this Port. # noqa: E501 :return: The secondary_ctrlr_uuid of this Port. # noqa: E501 :rtype: str """ return self._secondary_ctrlr_uuid @secondary_ctrlr_uuid.setter def secondary_ctrlr_uuid(self, secondary_ctrlr_uuid): """Sets the secondary_ctrlr_uuid of this Port. :param secondary_ctrlr_uuid: The secondary_ctrlr_uuid of this Port. # noqa: E501 :type: str """ self._secondary_ctrlr_uuid = secondary_ctrlr_uuid @property def host_uuid(self): """Gets the host_uuid of this Port. # noqa: E501 UUID of the host to which the volume is attached to # noqa: E501 :return: The host_uuid of this Port. # noqa: E501 :rtype: str """ return self._host_uuid @host_uuid.setter def host_uuid(self, host_uuid): """Sets the host_uuid of this Port. UUID of the host to which the volume is attached to # noqa: E501 :param host_uuid: The host_uuid of this Port. # noqa: E501 :type: str """ self._host_uuid = host_uuid def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Port, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Port): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class RebuildState(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ allowed enum values """ STATE_NONE = "REBUILD_STATE_NONE" START = "REBUILD_START" ISSUE = "REBUILD_ISSUE" STATE_IN_PROGRESS = "REBUILD_STATE_IN_PROGRESS" STATE_DELETE_FAILED = "REBUILD_STATE_DELETE_FAILED" SUSPENDED_NO_SPACE = "REBUILD_SUSPENDED_NO_SPACE" """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """RebuildState - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(RebuildState, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, RebuildState): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResourceState(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ allowed enum values """ INIT = "Init" ONLINE = "Online" FAILED = "Failed" DEGRADED = "Degraded" STOPPED = "Stopped" UNKNOWN = "Unknown" """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """ResourceState - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResourceState, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResourceState): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResponseDataWithCreateUuidString(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str', 'data': 'DataWithUuidStringData' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning', 'data': 'data' } def __init__(self, status=None, message=None, error_message=None, warning=None, data=None): # noqa: E501 """ResponseDataWithCreateUuidString - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self._data = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning self.data = data @property def status(self): """Gets the status of this ResponseDataWithCreateUuidString. # noqa: E501 :return: The status of this ResponseDataWithCreateUuidString. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseDataWithCreateUuidString. :param status: The status of this ResponseDataWithCreateUuidString. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ResponseDataWithCreateUuidString. # noqa: E501 :return: The message of this ResponseDataWithCreateUuidString. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ResponseDataWithCreateUuidString. :param message: The message of this ResponseDataWithCreateUuidString. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this ResponseDataWithCreateUuidString. # noqa: E501 :return: The error_message of this ResponseDataWithCreateUuidString. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ResponseDataWithCreateUuidString. :param error_message: The error_message of this ResponseDataWithCreateUuidString. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this ResponseDataWithCreateUuidString. # noqa: E501 :return: The warning of this ResponseDataWithCreateUuidString. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this ResponseDataWithCreateUuidString. :param warning: The warning of this ResponseDataWithCreateUuidString. # noqa: E501 :type: str """ self._warning = warning @property def data(self): """Gets the data of this ResponseDataWithCreateUuidString. # noqa: E501 :return: The data of this ResponseDataWithCreateUuidString. # noqa: E501 :rtype: DataWithUuidStringData """ return self._data @data.setter def data(self, data): """Sets the data of this ResponseDataWithCreateUuidString. :param data: The data of this ResponseDataWithCreateUuidString. # noqa: E501 :type: DataWithUuidStringData """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseDataWithCreateUuidString, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseDataWithCreateUuidString): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResponseDataWithCreateUuid(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str', 'data': 'DataWithUuidData' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning', 'data': 'data' } def __init__(self, status=None, message=None, error_message=None, warning=None, data=None): # noqa: E501 """ResponseDataWithCreateUuid - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self._data = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning self.data = data @property def status(self): """Gets the status of this ResponseDataWithCreateUuid. # noqa: E501 :return: The status of this ResponseDataWithCreateUuid. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseDataWithCreateUuid. :param status: The status of this ResponseDataWithCreateUuid. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ResponseDataWithCreateUuid. # noqa: E501 :return: The message of this ResponseDataWithCreateUuid. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ResponseDataWithCreateUuid. :param message: The message of this ResponseDataWithCreateUuid. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this ResponseDataWithCreateUuid. # noqa: E501 :return: The error_message of this ResponseDataWithCreateUuid. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ResponseDataWithCreateUuid. :param error_message: The error_message of this ResponseDataWithCreateUuid. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this ResponseDataWithCreateUuid. # noqa: E501 :return: The warning of this ResponseDataWithCreateUuid. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this ResponseDataWithCreateUuid. :param warning: The warning of this ResponseDataWithCreateUuid. # noqa: E501 :type: str """ self._warning = warning @property def data(self): """Gets the data of this ResponseDataWithCreateUuid. # noqa: E501 :return: The data of this ResponseDataWithCreateUuid. # noqa: E501 :rtype: DataWithUuidData """ return self._data @data.setter def data(self, data): """Sets the data of this ResponseDataWithCreateUuid. :param data: The data of this ResponseDataWithCreateUuid. # noqa: E501 :type: DataWithUuidData """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseDataWithCreateUuid, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseDataWithCreateUuid): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResponseDataWithHostInfo(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str', 'data': 'HostInfo' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning', 'data': 'data' } def __init__(self, status=None, message=None, error_message=None, warning=None, data=None): # noqa: E501 """ResponseDataWithHostInfo - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self._data = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning if data is not None: self.data = data @property def status(self): """Gets the status of this ResponseDataWithHostInfo. # noqa: E501 :return: The status of this ResponseDataWithHostInfo. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseDataWithHostInfo. :param status: The status of this ResponseDataWithHostInfo. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ResponseDataWithHostInfo. # noqa: E501 :return: The message of this ResponseDataWithHostInfo. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ResponseDataWithHostInfo. :param message: The message of this ResponseDataWithHostInfo. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this ResponseDataWithHostInfo. # noqa: E501 :return: The error_message of this ResponseDataWithHostInfo. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ResponseDataWithHostInfo. :param error_message: The error_message of this ResponseDataWithHostInfo. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this ResponseDataWithHostInfo. # noqa: E501 :return: The warning of this ResponseDataWithHostInfo. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this ResponseDataWithHostInfo. :param warning: The warning of this ResponseDataWithHostInfo. # noqa: E501 :type: str """ self._warning = warning @property def data(self): """Gets the data of this ResponseDataWithHostInfo. # noqa: E501 :return: The data of this ResponseDataWithHostInfo. # noqa: E501 :rtype: HostInfo """ return self._data @data.setter def data(self, data): """Sets the data of this ResponseDataWithHostInfo. :param data: The data of this ResponseDataWithHostInfo. # noqa: E501 :type: HostInfo """ self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseDataWithHostInfo, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseDataWithHostInfo): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResponseDataWithListOfHostUuids(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str', 'data': 'DataWithListOfHostUuidsData' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning', 'data': 'data' } def __init__(self, status=None, message=None, error_message=None, warning=None, data=None): # noqa: E501 """ResponseDataWithListOfHostUuids - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self._data = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning if data is not None: self.data = data @property def status(self): """Gets the status of this ResponseDataWithListOfHostUuids. # noqa: E501 :return: The status of this ResponseDataWithListOfHostUuids. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseDataWithListOfHostUuids. :param status: The status of this ResponseDataWithListOfHostUuids. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ResponseDataWithListOfHostUuids. # noqa: E501 :return: The message of this ResponseDataWithListOfHostUuids. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ResponseDataWithListOfHostUuids. :param message: The message of this ResponseDataWithListOfHostUuids. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this ResponseDataWithListOfHostUuids. # noqa: E501 :return: The error_message of this ResponseDataWithListOfHostUuids. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ResponseDataWithListOfHostUuids. :param error_message: The error_message of this ResponseDataWithListOfHostUuids. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this ResponseDataWithListOfHostUuids. # noqa: E501 :return: The warning of this ResponseDataWithListOfHostUuids. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this ResponseDataWithListOfHostUuids. :param warning: The warning of this ResponseDataWithListOfHostUuids. # noqa: E501 :type: str """ self._warning = warning @property def data(self): """Gets the data of this ResponseDataWithListOfHostUuids. # noqa: E501 :return: The data of this ResponseDataWithListOfHostUuids. # noqa: E501 :rtype: DataWithListOfHostUuidsData """ return self._data @data.setter def data(self, data): """Sets the data of this ResponseDataWithListOfHostUuids. :param data: The data of this ResponseDataWithListOfHostUuids. # noqa: E501 :type: DataWithListOfHostUuidsData """ self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseDataWithListOfHostUuids, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseDataWithListOfHostUuids): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResponseDataWithListOfHosts(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str', 'data': 'list[HostInfo]' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning', 'data': 'data' } def __init__(self, status=None, message=None, error_message=None, warning=None, data=None): # noqa: E501 """ResponseDataWithListOfHosts - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self._data = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning self.data = data @property def status(self): """Gets the status of this ResponseDataWithListOfHosts. # noqa: E501 :return: The status of this ResponseDataWithListOfHosts. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseDataWithListOfHosts. :param status: The status of this ResponseDataWithListOfHosts. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ResponseDataWithListOfHosts. # noqa: E501 :return: The message of this ResponseDataWithListOfHosts. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ResponseDataWithListOfHosts. :param message: The message of this ResponseDataWithListOfHosts. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this ResponseDataWithListOfHosts. # noqa: E501 :return: The error_message of this ResponseDataWithListOfHosts. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ResponseDataWithListOfHosts. :param error_message: The error_message of this ResponseDataWithListOfHosts. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this ResponseDataWithListOfHosts. # noqa: E501 :return: The warning of this ResponseDataWithListOfHosts. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this ResponseDataWithListOfHosts. :param warning: The warning of this ResponseDataWithListOfHosts. # noqa: E501 :type: str """ self._warning = warning @property def data(self): """Gets the data of this ResponseDataWithListOfHosts. # noqa: E501 :return: The data of this ResponseDataWithListOfHosts. # noqa: E501 :rtype: list[HostInfo] """ return self._data @data.setter def data(self, data): """Sets the data of this ResponseDataWithListOfHosts. :param data: The data of this ResponseDataWithListOfHosts. # noqa: E501 :type: list[HostInfo] """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseDataWithListOfHosts, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseDataWithListOfHosts): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResponseDataWithSinglePort(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str', 'data': 'Port' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning', 'data': 'data' } def __init__(self, status=None, message=None, error_message=None, warning=None, data=None): # noqa: E501 """ResponseDataWithSinglePort - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self._data = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning self.data = data @property def status(self): """Gets the status of this ResponseDataWithSinglePort. # noqa: E501 :return: The status of this ResponseDataWithSinglePort. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseDataWithSinglePort. :param status: The status of this ResponseDataWithSinglePort. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ResponseDataWithSinglePort. # noqa: E501 :return: The message of this ResponseDataWithSinglePort. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ResponseDataWithSinglePort. :param message: The message of this ResponseDataWithSinglePort. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this ResponseDataWithSinglePort. # noqa: E501 :return: The error_message of this ResponseDataWithSinglePort. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ResponseDataWithSinglePort. :param error_message: The error_message of this ResponseDataWithSinglePort. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this ResponseDataWithSinglePort. # noqa: E501 :return: The warning of this ResponseDataWithSinglePort. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this ResponseDataWithSinglePort. :param warning: The warning of this ResponseDataWithSinglePort. # noqa: E501 :type: str """ self._warning = warning @property def data(self): """Gets the data of this ResponseDataWithSinglePort. # noqa: E501 :return: The data of this ResponseDataWithSinglePort. # noqa: E501 :rtype: Port """ return self._data @data.setter def data(self, data): """Sets the data of this ResponseDataWithSinglePort. :param data: The data of this ResponseDataWithSinglePort. # noqa: E501 :type: Port """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseDataWithSinglePort, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseDataWithSinglePort): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResponseDataWithSingleVolume(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str', 'data': 'Volume' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning', 'data': 'data' } def __init__(self, status=None, message=None, error_message=None, warning=None, data=None): # noqa: E501 """ResponseDataWithSingleVolume - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self._data = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning self.data = data @property def status(self): """Gets the status of this ResponseDataWithSingleVolume. # noqa: E501 :return: The status of this ResponseDataWithSingleVolume. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseDataWithSingleVolume. :param status: The status of this ResponseDataWithSingleVolume. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ResponseDataWithSingleVolume. # noqa: E501 :return: The message of this ResponseDataWithSingleVolume. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ResponseDataWithSingleVolume. :param message: The message of this ResponseDataWithSingleVolume. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this ResponseDataWithSingleVolume. # noqa: E501 :return: The error_message of this ResponseDataWithSingleVolume. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ResponseDataWithSingleVolume. :param error_message: The error_message of this ResponseDataWithSingleVolume. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this ResponseDataWithSingleVolume. # noqa: E501 :return: The warning of this ResponseDataWithSingleVolume. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this ResponseDataWithSingleVolume. :param warning: The warning of this ResponseDataWithSingleVolume. # noqa: E501 :type: str """ self._warning = warning @property def data(self): """Gets the data of this ResponseDataWithSingleVolume. # noqa: E501 :return: The data of this ResponseDataWithSingleVolume. # noqa: E501 :rtype: Volume """ return self._data @data.setter def data(self, data): """Sets the data of this ResponseDataWithSingleVolume. :param data: The data of this ResponseDataWithSingleVolume. # noqa: E501 :type: Volume """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseDataWithSingleVolume, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseDataWithSingleVolume): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResponseDpuDriveHierarchy(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str', 'data': 'dict(str, NodeDpu)' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning', 'data': 'data' } def __init__(self, status=None, message=None, error_message=None, warning=None, data=None): # noqa: E501 """ResponseDpuDriveHierarchy - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self._data = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning self.data = data @property def status(self): """Gets the status of this ResponseDpuDriveHierarchy. # noqa: E501 :return: The status of this ResponseDpuDriveHierarchy. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseDpuDriveHierarchy. :param status: The status of this ResponseDpuDriveHierarchy. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ResponseDpuDriveHierarchy. # noqa: E501 :return: The message of this ResponseDpuDriveHierarchy. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ResponseDpuDriveHierarchy. :param message: The message of this ResponseDpuDriveHierarchy. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this ResponseDpuDriveHierarchy. # noqa: E501 :return: The error_message of this ResponseDpuDriveHierarchy. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ResponseDpuDriveHierarchy. :param error_message: The error_message of this ResponseDpuDriveHierarchy. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this ResponseDpuDriveHierarchy. # noqa: E501 :return: The warning of this ResponseDpuDriveHierarchy. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this ResponseDpuDriveHierarchy. :param warning: The warning of this ResponseDpuDriveHierarchy. # noqa: E501 :type: str """ self._warning = warning @property def data(self): """Gets the data of this ResponseDpuDriveHierarchy. # noqa: E501 :return: The data of this ResponseDpuDriveHierarchy. # noqa: E501 :rtype: dict(str, NodeDpu) """ return self._data @data.setter def data(self, data): """Sets the data of this ResponseDpuDriveHierarchy. :param data: The data of this ResponseDpuDriveHierarchy. # noqa: E501 :type: dict(str, NodeDpu) """ if data is None: raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501 self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseDpuDriveHierarchy, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseDpuDriveHierarchy): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class Smart(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'controller_busy_time': 'str', 'host_read_commands': 'str', 'available_spare': 'str', 'critical_composite_temperature_time': 'str', 'host_write_commands': 'str', 'media_and_data_integrity_errors': 'str', 'data_units_written': 'str', 'warning_composite_temperature_time': 'str', 'endurance_group_critical_warning_summary': 'str', 'critical_warning': 'str', 'power_cycles': 'str', 'number_of_error_information_log_entries': 'str', 'percentage_used': 'str', 'power_on_hours': 'str', 'composite_temperature': 'str', 'data_units_read': 'str', 'unsafe_shutdowns': 'str' } attribute_map = { 'controller_busy_time': 'controller_busy_time', 'host_read_commands': 'host_read_commands', 'available_spare': 'available_spare', 'critical_composite_temperature_time': 'critical_composite_temperature_time', # noqa: E501 'host_write_commands': 'host_write_commands', 'media_and_data_integrity_errors': 'media_and_data_integrity_errors', 'data_units_written': 'data_units_written', 'warning_composite_temperature_time': 'warning_composite_temperature_time', # noqa: E501 'endurance_group_critical_warning_summary': 'endurance_group_critical_warning_summary', # noqa: E501 'critical_warning': 'critical_warning', 'power_cycles': 'power_cycles', 'number_of_error_information_log_entries': 'number_of_error_information_log_entries', # noqa: E501 'percentage_used': 'percentage_used', 'power_on_hours': 'power_on_hours', 'composite_temperature': 'composite_temperature', 'data_units_read': 'data_units_read', 'unsafe_shutdowns': 'unsafe_shutdowns' } def __init__(self, controller_busy_time=None, host_read_commands=None, available_spare=None, critical_composite_temperature_time=None, host_write_commands=None, media_and_data_integrity_errors=None, data_units_written=None, warning_composite_temperature_time=None, endurance_group_critical_warning_summary=None, critical_warning=None, power_cycles=None, number_of_error_information_log_entries=None, percentage_used=None, power_on_hours=None, composite_temperature=None, data_units_read=None, unsafe_shutdowns=None): # noqa: E501 """Smart - a model defined in Swagger""" # noqa: E501 self._controller_busy_time = None self._host_read_commands = None self._available_spare = None self._critical_composite_temperature_time = None self._host_write_commands = None self._media_and_data_integrity_errors = None self._data_units_written = None self._warning_composite_temperature_time = None self._endurance_group_critical_warning_summary = None self._critical_warning = None self._power_cycles = None self._number_of_error_information_log_entries = None self._percentage_used = None self._power_on_hours = None self._composite_temperature = None self._data_units_read = None self._unsafe_shutdowns = None self.discriminator = None if controller_busy_time is not None: self.controller_busy_time = controller_busy_time if host_read_commands is not None: self.host_read_commands = host_read_commands if available_spare is not None: self.available_spare = available_spare if critical_composite_temperature_time is not None: self.critical_composite_temperature_time = critical_composite_temperature_time # noqa: E501 if host_write_commands is not None: self.host_write_commands = host_write_commands if media_and_data_integrity_errors is not None: self.media_and_data_integrity_errors = media_and_data_integrity_errors # noqa: E501 if data_units_written is not None: self.data_units_written = data_units_written if warning_composite_temperature_time is not None: self.warning_composite_temperature_time = warning_composite_temperature_time # noqa: E501 if endurance_group_critical_warning_summary is not None: self.endurance_group_critical_warning_summary = endurance_group_critical_warning_summary # noqa: E501 if critical_warning is not None: self.critical_warning = critical_warning if power_cycles is not None: self.power_cycles = power_cycles if number_of_error_information_log_entries is not None: self.number_of_error_information_log_entries = number_of_error_information_log_entries # noqa: E501 if percentage_used is not None: self.percentage_used = percentage_used if power_on_hours is not None: self.power_on_hours = power_on_hours if composite_temperature is not None: self.composite_temperature = composite_temperature if data_units_read is not None: self.data_units_read = data_units_read if unsafe_shutdowns is not None: self.unsafe_shutdowns = unsafe_shutdowns @property def controller_busy_time(self): """Gets the controller_busy_time of this Smart. # noqa: E501 :return: The controller_busy_time of this Smart. # noqa: E501 :rtype: str """ return self._controller_busy_time @controller_busy_time.setter def controller_busy_time(self, controller_busy_time): """Sets the controller_busy_time of this Smart. :param controller_busy_time: The controller_busy_time of this Smart. # noqa: E501 :type: str """ self._controller_busy_time = controller_busy_time @property def host_read_commands(self): """Gets the host_read_commands of this Smart. # noqa: E501 :return: The host_read_commands of this Smart. # noqa: E501 :rtype: str """ return self._host_read_commands @host_read_commands.setter def host_read_commands(self, host_read_commands): """Sets the host_read_commands of this Smart. :param host_read_commands: The host_read_commands of this Smart. # noqa: E501 :type: str """ self._host_read_commands = host_read_commands @property def available_spare(self): """Gets the available_spare of this Smart. # noqa: E501 :return: The available_spare of this Smart. # noqa: E501 :rtype: str """ return self._available_spare @available_spare.setter def available_spare(self, available_spare): """Sets the available_spare of this Smart. :param available_spare: The available_spare of this Smart. # noqa: E501 :type: str """ self._available_spare = available_spare @property def critical_composite_temperature_time(self): """Gets the critical_composite_temperature_time of this Smart. # noqa: E501 :return: The critical_composite_temperature_time of this Smart. # noqa: E501 :rtype: str """ return self._critical_composite_temperature_time @critical_composite_temperature_time.setter def critical_composite_temperature_time(self, critical_composite_temperature_time): # noqa: E501 """Sets the critical_composite_temperature_time of this Smart. :param critical_composite_temperature_time: The critical_composite_temperature_time of this Smart. # noqa: E501 :type: str """ self._critical_composite_temperature_time = critical_composite_temperature_time # noqa: E501 @property def host_write_commands(self): """Gets the host_write_commands of this Smart. # noqa: E501 :return: The host_write_commands of this Smart. # noqa: E501 :rtype: str """ return self._host_write_commands @host_write_commands.setter def host_write_commands(self, host_write_commands): """Sets the host_write_commands of this Smart. :param host_write_commands: The host_write_commands of this Smart. # noqa: E501 :type: str """ self._host_write_commands = host_write_commands @property def media_and_data_integrity_errors(self): """Gets the media_and_data_integrity_errors of this Smart. # noqa: E501 :return: The media_and_data_integrity_errors of this Smart. # noqa: E501 :rtype: str """ return self._media_and_data_integrity_errors @media_and_data_integrity_errors.setter def media_and_data_integrity_errors(self, media_and_data_integrity_errors): """Sets the media_and_data_integrity_errors of this Smart. :param media_and_data_integrity_errors: The media_and_data_integrity_errors of this Smart. # noqa: E501 :type: str """ self._media_and_data_integrity_errors = media_and_data_integrity_errors @property def data_units_written(self): """Gets the data_units_written of this Smart. # noqa: E501 :return: The data_units_written of this Smart. # noqa: E501 :rtype: str """ return self._data_units_written @data_units_written.setter def data_units_written(self, data_units_written): """Sets the data_units_written of this Smart. :param data_units_written: The data_units_written of this Smart. # noqa: E501 :type: str """ self._data_units_written = data_units_written @property def warning_composite_temperature_time(self): """Gets the warning_composite_temperature_time of this Smart. # noqa: E501 :return: The warning_composite_temperature_time of this Smart. # noqa: E501 :rtype: str """ return self._warning_composite_temperature_time @warning_composite_temperature_time.setter def warning_composite_temperature_time(self, warning_composite_temperature_time): # noqa: E501 """Sets the warning_composite_temperature_time of this Smart. :param warning_composite_temperature_time: The warning_composite_temperature_time of this Smart. # noqa: E501 :type: str """ self._warning_composite_temperature_time = warning_composite_temperature_time # noqa: E501 @property def endurance_group_critical_warning_summary(self): """Gets the endurance_group_critical_warning_summary of this Smart. # noqa: E501 :return: The endurance_group_critical_warning_summary of this Smart. # noqa: E501 :rtype: str """ return self._endurance_group_critical_warning_summary @endurance_group_critical_warning_summary.setter def endurance_group_critical_warning_summary(self, endurance_group_critical_warning_summary): # noqa: E501 """Sets the endurance_group_critical_warning_summary of this Smart. :param endurance_group_critical_warning_summary: The endurance_group_critical_warning_summary of this Smart. # noqa: E501 :type: str """ self._endurance_group_critical_warning_summary = endurance_group_critical_warning_summary # noqa: E501 @property def critical_warning(self): """Gets the critical_warning of this Smart. # noqa: E501 :return: The critical_warning of this Smart. # noqa: E501 :rtype: str """ return self._critical_warning @critical_warning.setter def critical_warning(self, critical_warning): """Sets the critical_warning of this Smart. :param critical_warning: The critical_warning of this Smart. # noqa: E501 :type: str """ self._critical_warning = critical_warning @property def power_cycles(self): """Gets the power_cycles of this Smart. # noqa: E501 :return: The power_cycles of this Smart. # noqa: E501 :rtype: str """ return self._power_cycles @power_cycles.setter def power_cycles(self, power_cycles): """Sets the power_cycles of this Smart. :param power_cycles: The power_cycles of this Smart. # noqa: E501 :type: str """ self._power_cycles = power_cycles @property def number_of_error_information_log_entries(self): """Gets the number_of_error_information_log_entries of this Smart. # noqa: E501 :return: The number_of_error_information_log_entries of this Smart. # noqa: E501 :rtype: str """ return self._number_of_error_information_log_entries @number_of_error_information_log_entries.setter def number_of_error_information_log_entries(self, number_of_error_information_log_entries): # noqa: E501 """Sets the number_of_error_information_log_entries of this Smart. :param number_of_error_information_log_entries: The number_of_error_information_log_entries of this Smart. # noqa: E501 :type: str """ self._number_of_error_information_log_entries = number_of_error_information_log_entries # noqa: E501 @property def percentage_used(self): """Gets the percentage_used of this Smart. # noqa: E501 :return: The percentage_used of this Smart. # noqa: E501 :rtype: str """ return self._percentage_used @percentage_used.setter def percentage_used(self, percentage_used): """Sets the percentage_used of this Smart. :param percentage_used: The percentage_used of this Smart. # noqa: E501 :type: str """ self._percentage_used = percentage_used @property def power_on_hours(self): """Gets the power_on_hours of this Smart. # noqa: E501 :return: The power_on_hours of this Smart. # noqa: E501 :rtype: str """ return self._power_on_hours @power_on_hours.setter def power_on_hours(self, power_on_hours): """Sets the power_on_hours of this Smart. :param power_on_hours: The power_on_hours of this Smart. # noqa: E501 :type: str """ self._power_on_hours = power_on_hours @property def composite_temperature(self): """Gets the composite_temperature of this Smart. # noqa: E501 :return: The composite_temperature of this Smart. # noqa: E501 :rtype: str """ return self._composite_temperature @composite_temperature.setter def composite_temperature(self, composite_temperature): """Sets the composite_temperature of this Smart. :param composite_temperature: The composite_temperature of this Smart. # noqa: E501 :type: str """ self._composite_temperature = composite_temperature @property def data_units_read(self): """Gets the data_units_read of this Smart. # noqa: E501 :return: The data_units_read of this Smart. # noqa: E501 :rtype: str """ return self._data_units_read @data_units_read.setter def data_units_read(self, data_units_read): """Sets the data_units_read of this Smart. :param data_units_read: The data_units_read of this Smart. # noqa: E501 :type: str """ self._data_units_read = data_units_read @property def unsafe_shutdowns(self): """Gets the unsafe_shutdowns of this Smart. # noqa: E501 :return: The unsafe_shutdowns of this Smart. # noqa: E501 :rtype: str """ return self._unsafe_shutdowns @unsafe_shutdowns.setter def unsafe_shutdowns(self, unsafe_shutdowns): """Sets the unsafe_shutdowns of this Smart. :param unsafe_shutdowns: The unsafe_shutdowns of this Smart. # noqa: E501 :type: str """ self._unsafe_shutdowns = unsafe_shutdowns def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Smart, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Smart): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class SpaceAllocationPolicy(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ allowed enum values """ BALANCED = "balanced" WRITE_OPTIMIZED = "write_optimized" CAPACITY_OPTIMIZED = "capacity_optimized" """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """SpaceAllocationPolicy - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(SpaceAllocationPolicy, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, SpaceAllocationPolicy): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class SuccessResponseFields(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str' } attribute_map = { 'status': 'status', 'message': 'message' } def __init__(self, status=True, message=None): # noqa: E501 """SuccessResponseFields - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self.discriminator = None self.status = status self.message = message @property def status(self): """Gets the status of this SuccessResponseFields. # noqa: E501 :return: The status of this SuccessResponseFields. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this SuccessResponseFields. :param status: The status of this SuccessResponseFields. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this SuccessResponseFields. # noqa: E501 :return: The message of this SuccessResponseFields. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this SuccessResponseFields. :param message: The message of this SuccessResponseFields. # noqa: E501 :type: str """ if message is None: raise ValueError("Invalid value for `message`, must not be `None`") # noqa: E501 self._message = message def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(SuccessResponseFields, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, SuccessResponseFields): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class Transport(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ allowed enum values """ RDS = "RDS" PCI = "PCI" PCI_BDF = "PCI_BDF" TCP = "TCP" """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """Transport - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Transport, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Transport): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class UserVolumeType(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ allowed enum values """ REPLICA = "VOL_TYPE_BLK_REPLICA" EC = "VOL_TYPE_BLK_EC" RF1 = "VOL_TYPE_BLK_RF1" LOCAL_THIN = "VOL_TYPE_BLK_LOCAL_THIN" """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """UserVolumeType - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(UserVolumeType, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, UserVolumeType): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class VolumeQos(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'band_name': 'str', 'band_index': 'int', 'min_read_iops': 'int', 'max_read_iops': 'int', 'min_write_iops': 'int', 'max_write_iops': 'int' } attribute_map = { 'band_name': 'band_name', 'band_index': 'band_index', 'min_read_iops': 'min_read_iops', 'max_read_iops': 'max_read_iops', 'min_write_iops': 'min_write_iops', 'max_write_iops': 'max_write_iops' } def __init__(self, band_name=None, band_index=None, min_read_iops=None, max_read_iops=None, min_write_iops=None, max_write_iops=None): # noqa: E501 """VolumeQos - a model defined in Swagger""" # noqa: E501 self._band_name = None self._band_index = None self._min_read_iops = None self._max_read_iops = None self._min_write_iops = None self._max_write_iops = None self.discriminator = None if band_name is not None: self.band_name = band_name self.band_index = band_index if min_read_iops is not None: self.min_read_iops = min_read_iops self.max_read_iops = max_read_iops if min_write_iops is not None: self.min_write_iops = min_write_iops if max_write_iops is not None: self.max_write_iops = max_write_iops @property def band_name(self): """Gets the band_name of this VolumeQos. # noqa: E501 e.g. Gold, Silver or Bronze # noqa: E501 :return: The band_name of this VolumeQos. # noqa: E501 :rtype: str """ return self._band_name @band_name.setter def band_name(self, band_name): """Sets the band_name of this VolumeQos. e.g. Gold, Silver or Bronze # noqa: E501 :param band_name: The band_name of this VolumeQos. # noqa: E501 :type: str """ self._band_name = band_name @property def band_index(self): """Gets the band_index of this VolumeQos. # noqa: E501 :return: The band_index of this VolumeQos. # noqa: E501 :rtype: int """ return self._band_index @band_index.setter def band_index(self, band_index): """Sets the band_index of this VolumeQos. :param band_index: The band_index of this VolumeQos. # noqa: E501 :type: int """ if band_index is None: raise ValueError("Invalid value for `band_index`, must not be `None`") # noqa: E501 self._band_index = band_index @property def min_read_iops(self): """Gets the min_read_iops of this VolumeQos. # noqa: E501 :return: The min_read_iops of this VolumeQos. # noqa: E501 :rtype: int """ return self._min_read_iops @min_read_iops.setter def min_read_iops(self, min_read_iops): """Sets the min_read_iops of this VolumeQos. :param min_read_iops: The min_read_iops of this VolumeQos. # noqa: E501 :type: int """ self._min_read_iops = min_read_iops @property def max_read_iops(self): """Gets the max_read_iops of this VolumeQos. # noqa: E501 :return: The max_read_iops of this VolumeQos. # noqa: E501 :rtype: int """ return self._max_read_iops @max_read_iops.setter def max_read_iops(self, max_read_iops): """Sets the max_read_iops of this VolumeQos. :param max_read_iops: The max_read_iops of this VolumeQos. # noqa: E501 :type: int """ if max_read_iops is None: raise ValueError("Invalid value for `max_read_iops`, must not be `None`") # noqa: E501 self._max_read_iops = max_read_iops @property def min_write_iops(self): """Gets the min_write_iops of this VolumeQos. # noqa: E501 :return: The min_write_iops of this VolumeQos. # noqa: E501 :rtype: int """ return self._min_write_iops @min_write_iops.setter def min_write_iops(self, min_write_iops): """Sets the min_write_iops of this VolumeQos. :param min_write_iops: The min_write_iops of this VolumeQos. # noqa: E501 :type: int """ self._min_write_iops = min_write_iops @property def max_write_iops(self): """Gets the max_write_iops of this VolumeQos. # noqa: E501 :return: The max_write_iops of this VolumeQos. # noqa: E501 :rtype: int """ return self._max_write_iops @max_write_iops.setter def max_write_iops(self, max_write_iops): """Sets the max_write_iops of this VolumeQos. :param max_write_iops: The max_write_iops of this VolumeQos. # noqa: E501 :type: int """ self._max_write_iops = max_write_iops def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(VolumeQos, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, VolumeQos): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class VolumeStats(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'physical_usage': 'int', 'physical_writes': 'int', 'stats': 'VolumeStatsStats' } attribute_map = { 'physical_usage': 'physical_usage', 'physical_writes': 'physical_writes', 'stats': 'stats' } def __init__(self, physical_usage=None, physical_writes=None, stats=None): # noqa: E501 """VolumeStats - a model defined in Swagger""" # noqa: E501 self._physical_usage = None self._physical_writes = None self._stats = None self.discriminator = None if physical_usage is not None: self.physical_usage = physical_usage if physical_writes is not None: self.physical_writes = physical_writes if stats is not None: self.stats = stats @property def physical_usage(self): """Gets the physical_usage of this VolumeStats. # noqa: E501 :return: The physical_usage of this VolumeStats. # noqa: E501 :rtype: int """ return self._physical_usage @physical_usage.setter def physical_usage(self, physical_usage): """Sets the physical_usage of this VolumeStats. :param physical_usage: The physical_usage of this VolumeStats. # noqa: E501 :type: int """ self._physical_usage = physical_usage @property def physical_writes(self): """Gets the physical_writes of this VolumeStats. # noqa: E501 :return: The physical_writes of this VolumeStats. # noqa: E501 :rtype: int """ return self._physical_writes @physical_writes.setter def physical_writes(self, physical_writes): """Sets the physical_writes of this VolumeStats. :param physical_writes: The physical_writes of this VolumeStats. # noqa: E501 :type: int """ self._physical_writes = physical_writes @property def stats(self): """Gets the stats of this VolumeStats. # noqa: E501 :return: The stats of this VolumeStats. # noqa: E501 :rtype: VolumeStatsStats """ return self._stats @stats.setter def stats(self, stats): """Sets the stats of this VolumeStats. :param stats: The stats of this VolumeStats. # noqa: E501 :type: VolumeStatsStats """ self._stats = stats def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(VolumeStats, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, VolumeStats): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class VolumeTypes(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ allowed enum values """ LOCAL_THIN = "VOL_TYPE_BLK_LOCAL_THIN" RF1 = "VOL_TYPE_BLK_RF1" RDS = "VOL_TYPE_BLK_RDS" LSV = "VOL_TYPE_BLK_LSV" NV_MEMORY = "VOL_TYPE_BLK_NV_MEMORY" FILE = "VOL_TYPE_BLK_FILE" EC = "VOL_TYPE_BLK_EC" REPLICA = "VOL_TYPE_BLK_REPLICA" STRIPE = "VOL_TYPE_BLK_STRIPE" CONCAT = "VOL_TYPE_BLK_CONCAT" PART_VOL = "VOL_TYPE_BLK_PART_VOL" DURABLE = "VOL_TYPE_BLK_DURABLE" """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """VolumeTypes - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(VolumeTypes, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, VolumeTypes): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class VolumeUpdateOp(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ allowed enum values """ UNMOUNT = "UNMOUNT" MOUNT = "MOUNT" MARK_FAIL = "MARK_FAIL" UPDATE_VOLUME_DPU = "UPDATE_VOLUME_DPU" RESYNC = "RESYNC" UPDATE_STATE = "UPDATE_STATE" UPDATE_CAPACITY = "UPDATE_CAPACITY" UPDATE_PROPERTIES = "UPDATE_PROPERTIES" INJECT_FAILURE = "INJECT_FAILURE" RENAME_VOLUME = "RENAME_VOLUME" """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """VolumeUpdateOp - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(VolumeUpdateOp, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, VolumeUpdateOp): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class Volume(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'name': 'str', 'uuid': 'str', 'nguid': 'str', 'drive_uuid': 'str', 'type': 'str', 'pool': 'str', 'dpu': 'str', 'secy_dpu': 'str', 'capacity': 'int', 'compress': 'bool', 'encrypt': 'bool', 'zip_effort': 'ZipEffort', 'crc_enable': 'bool', 'snap_support': 'bool', 'crc_type': 'str', 'is_clone': 'bool', 'clone_source_volume_uuid': 'str', 'state': 'ResourceState', 'clone_source_volume_state': 'ResourceState', 'version': 'str', 'failed_uuids': 'list[str]', 'num_failed_plexes': 'int', 'rebuild_state': 'RebuildState', 'rebuild_percent': 'int', 'spare_vol': 'str', 'subsys_nqn': 'str', 'qos': 'VolumeQos', 'ports': 'MapOfPorts', 'src_vols': 'list[str]', 'durability_scheme': 'str', 'stats': 'VolumeStats', 'physical_capacity': 'int', 'space_allocation_policy': 'SpaceAllocationPolicy', 'additional_fields': 'AdditionalFields', 'created_at': 'datetime', 'modified_at': 'datetime', 'block_size': 'BlockSize', 'operations': 'list[Operation]', 'fault_domain_id': 'str', 'volume_type': 'UserVolumeType' } attribute_map = { 'name': 'name', 'uuid': 'uuid', 'nguid': 'nguid', 'drive_uuid': 'drive_uuid', 'type': 'type', 'pool': 'pool', 'dpu': 'dpu', 'secy_dpu': 'secy_dpu', 'capacity': 'capacity', 'compress': 'compress', 'encrypt': 'encrypt', 'zip_effort': 'zip_effort', 'crc_enable': 'crc_enable', 'snap_support': 'snap_support', 'crc_type': 'crc_type', 'is_clone': 'is_clone', 'clone_source_volume_uuid': 'clone_source_volume_uuid', 'state': 'state', 'clone_source_volume_state': 'clone_source_volume_state', 'version': 'version', 'failed_uuids': 'failed_uuids', 'num_failed_plexes': 'num_failed_plexes', 'rebuild_state': 'rebuild_state', 'rebuild_percent': 'rebuild_percent', 'spare_vol': 'spare_vol', 'subsys_nqn': 'subsys_nqn', 'qos': 'qos', 'ports': 'ports', 'src_vols': 'src_vols', 'durability_scheme': 'durability_scheme', 'stats': 'stats', 'physical_capacity': 'physical_capacity', 'space_allocation_policy': 'space_allocation_policy', 'additional_fields': 'additional_fields', 'created_at': 'created_at', 'modified_at': 'modified_at', 'block_size': 'block_size', 'operations': 'operations', 'fault_domain_id': 'fault_domain_id', 'volume_type': 'volume_type' } def __init__(self, name=None, uuid=None, nguid=None, drive_uuid=None, type=None, pool=None, dpu=None, secy_dpu=None, capacity=None, compress=None, encrypt=None, zip_effort=None, crc_enable=None, snap_support=None, crc_type='nocrc', is_clone=None, clone_source_volume_uuid=None, state=None, clone_source_volume_state=None, version=None, failed_uuids=None, num_failed_plexes=None, rebuild_state=None, rebuild_percent=None, spare_vol=None, subsys_nqn=None, qos=None, ports=None, src_vols=None, durability_scheme=None, stats=None, physical_capacity=None, space_allocation_policy=None, additional_fields=None, created_at=None, modified_at=None, block_size=None, operations=None, fault_domain_id=None, volume_type=None): # noqa: E501,C901 """Volume - a model defined in Swagger""" # noqa: E501 self._name = None self._uuid = None self._nguid = None self._drive_uuid = None self._type = None self._pool = None self._dpu = None self._secy_dpu = None self._capacity = None self._compress = None self._encrypt = None self._zip_effort = None self._crc_enable = None self._snap_support = None self._crc_type = None self._is_clone = None self._clone_source_volume_uuid = None self._state = None self._clone_source_volume_state = None self._version = None self._failed_uuids = None self._num_failed_plexes = None self._rebuild_state = None self._rebuild_percent = None self._spare_vol = None self._subsys_nqn = None self._qos = None self._ports = None self._src_vols = None self._durability_scheme = None self._stats = None self._physical_capacity = None self._space_allocation_policy = None self._additional_fields = None self._created_at = None self._modified_at = None self._block_size = None self._operations = None self._fault_domain_id = None self._volume_type = None self.discriminator = None if name is not None: self.name = name self.uuid = uuid if nguid is not None: self.nguid = nguid if drive_uuid is not None: self.drive_uuid = drive_uuid self.type = type if pool is not None: self.pool = pool if dpu is not None: self.dpu = dpu if secy_dpu is not None: self.secy_dpu = secy_dpu if capacity is not None: self.capacity = capacity if compress is not None: self.compress = compress if encrypt is not None: self.encrypt = encrypt if zip_effort is not None: self.zip_effort = zip_effort if crc_enable is not None: self.crc_enable = crc_enable if snap_support is not None: self.snap_support = snap_support if crc_type is not None: self.crc_type = crc_type if is_clone is not None: self.is_clone = is_clone if clone_source_volume_uuid is not None: self.clone_source_volume_uuid = clone_source_volume_uuid if state is not None: self.state = state if clone_source_volume_state is not None: self.clone_source_volume_state = clone_source_volume_state if version is not None: self.version = version if failed_uuids is not None: self.failed_uuids = failed_uuids if num_failed_plexes is not None: self.num_failed_plexes = num_failed_plexes if rebuild_state is not None: self.rebuild_state = rebuild_state if rebuild_percent is not None: self.rebuild_percent = rebuild_percent if spare_vol is not None: self.spare_vol = spare_vol self.subsys_nqn = subsys_nqn if qos is not None: self.qos = qos if ports is not None: self.ports = ports if src_vols is not None: self.src_vols = src_vols if durability_scheme is not None: self.durability_scheme = durability_scheme if stats is not None: self.stats = stats self.physical_capacity = physical_capacity if space_allocation_policy is not None: self.space_allocation_policy = space_allocation_policy if additional_fields is not None: self.additional_fields = additional_fields if created_at is not None: self.created_at = created_at if modified_at is not None: self.modified_at = modified_at if block_size is not None: self.block_size = block_size if operations is not None: self.operations = operations if fault_domain_id is not None: self.fault_domain_id = fault_domain_id if volume_type is not None: self.volume_type = volume_type @property def name(self): """Gets the name of this Volume. # noqa: E501 user specified name of volume # noqa: E501 :return: The name of this Volume. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this Volume. user specified name of volume # noqa: E501 :param name: The name of this Volume. # noqa: E501 :type: str """ self._name = name @property def uuid(self): """Gets the uuid of this Volume. # noqa: E501 assigned by FC # noqa: E501 :return: The uuid of this Volume. # noqa: E501 :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """Sets the uuid of this Volume. assigned by FC # noqa: E501 :param uuid: The uuid of this Volume. # noqa: E501 :type: str """ if uuid is None: raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501 self._uuid = uuid @property def nguid(self): """Gets the nguid of this Volume. # noqa: E501 assigned by FC # noqa: E501 :return: The nguid of this Volume. # noqa: E501 :rtype: str """ return self._nguid @nguid.setter def nguid(self, nguid): """Sets the nguid of this Volume. assigned by FC # noqa: E501 :param nguid: The nguid of this Volume. # noqa: E501 :type: str """ self._nguid = nguid @property def drive_uuid(self): """Gets the drive_uuid of this Volume. # noqa: E501 :return: The drive_uuid of this Volume. # noqa: E501 :rtype: str """ return self._drive_uuid @drive_uuid.setter def drive_uuid(self, drive_uuid): """Sets the drive_uuid of this Volume. :param drive_uuid: The drive_uuid of this Volume. # noqa: E501 :type: str """ self._drive_uuid = drive_uuid @property def type(self): """Gets the type of this Volume. # noqa: E501 :return: The type of this Volume. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this Volume. :param type: The type of this Volume. # noqa: E501 :type: str """ if type is None: raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501 self._type = type @property def pool(self): """Gets the pool of this Volume. # noqa: E501 :return: The pool of this Volume. # noqa: E501 :rtype: str """ return self._pool @pool.setter def pool(self, pool): """Sets the pool of this Volume. :param pool: The pool of this Volume. # noqa: E501 :type: str """ self._pool = pool @property def dpu(self): """Gets the dpu of this Volume. # noqa: E501 :return: The dpu of this Volume. # noqa: E501 :rtype: str """ return self._dpu @dpu.setter def dpu(self, dpu): """Sets the dpu of this Volume. :param dpu: The dpu of this Volume. # noqa: E501 :type: str """ self._dpu = dpu @property def secy_dpu(self): """Gets the secy_dpu of this Volume. # noqa: E501 secondary dpu (valid for durable volume) # noqa: E501 :return: The secy_dpu of this Volume. # noqa: E501 :rtype: str """ return self._secy_dpu @secy_dpu.setter def secy_dpu(self, secy_dpu): """Sets the secy_dpu of this Volume. secondary dpu (valid for durable volume) # noqa: E501 :param secy_dpu: The secy_dpu of this Volume. # noqa: E501 :type: str """ self._secy_dpu = secy_dpu @property def capacity(self): """Gets the capacity of this Volume. # noqa: E501 :return: The capacity of this Volume. # noqa: E501 :rtype: int """ return self._capacity @capacity.setter def capacity(self, capacity): """Sets the capacity of this Volume. :param capacity: The capacity of this Volume. # noqa: E501 :type: int """ self._capacity = capacity @property def compress(self): """Gets the compress of this Volume. # noqa: E501 :return: The compress of this Volume. # noqa: E501 :rtype: bool """ return self._compress @compress.setter def compress(self, compress): """Sets the compress of this Volume. :param compress: The compress of this Volume. # noqa: E501 :type: bool """ self._compress = compress @property def encrypt(self): """Gets the encrypt of this Volume. # noqa: E501 :return: The encrypt of this Volume. # noqa: E501 :rtype: bool """ return self._encrypt @encrypt.setter def encrypt(self, encrypt): """Sets the encrypt of this Volume. :param encrypt: The encrypt of this Volume. # noqa: E501 :type: bool """ self._encrypt = encrypt @property def zip_effort(self): """Gets the zip_effort of this Volume. # noqa: E501 :return: The zip_effort of this Volume. # noqa: E501 :rtype: ZipEffort """ return self._zip_effort @zip_effort.setter def zip_effort(self, zip_effort): """Sets the zip_effort of this Volume. :param zip_effort: The zip_effort of this Volume. # noqa: E501 :type: ZipEffort """ self._zip_effort = zip_effort @property def crc_enable(self): """Gets the crc_enable of this Volume. # noqa: E501 :return: The crc_enable of this Volume. # noqa: E501 :rtype: bool """ return self._crc_enable @crc_enable.setter def crc_enable(self, crc_enable): """Sets the crc_enable of this Volume. :param crc_enable: The crc_enable of this Volume. # noqa: E501 :type: bool """ self._crc_enable = crc_enable @property def snap_support(self): """Gets the snap_support of this Volume. # noqa: E501 :return: The snap_support of this Volume. # noqa: E501 :rtype: bool """ return self._snap_support @snap_support.setter def snap_support(self, snap_support): """Sets the snap_support of this Volume. :param snap_support: The snap_support of this Volume. # noqa: E501 :type: bool """ self._snap_support = snap_support @property def crc_type(self): """Gets the crc_type of this Volume. # noqa: E501 :return: The crc_type of this Volume. # noqa: E501 :rtype: str """ return self._crc_type @crc_type.setter def crc_type(self, crc_type): """Sets the crc_type of this Volume. :param crc_type: The crc_type of this Volume. # noqa: E501 :type: str """ allowed_values = ["crc16", "crc32", "crc32c", "crc64", "nocrc"] # noqa: E501 if crc_type not in allowed_values: raise ValueError( "Invalid value for `crc_type` ({0}), must be one of {1}" # noqa: E501 .format(crc_type, allowed_values) ) self._crc_type = crc_type @property def is_clone(self): """Gets the is_clone of this Volume. # noqa: E501 :return: The is_clone of this Volume. # noqa: E501 :rtype: bool """ return self._is_clone @is_clone.setter def is_clone(self, is_clone): """Sets the is_clone of this Volume. :param is_clone: The is_clone of this Volume. # noqa: E501 :type: bool """ self._is_clone = is_clone @property def clone_source_volume_uuid(self): """Gets the clone_source_volume_uuid of this Volume. # noqa: E501 :return: The clone_source_volume_uuid of this Volume. # noqa: E501 :rtype: str """ return self._clone_source_volume_uuid @clone_source_volume_uuid.setter def clone_source_volume_uuid(self, clone_source_volume_uuid): """Sets the clone_source_volume_uuid of this Volume. :param clone_source_volume_uuid: The clone_source_volume_uuid of this Volume. # noqa: E501 :type: str """ self._clone_source_volume_uuid = clone_source_volume_uuid @property def state(self): """Gets the state of this Volume. # noqa: E501 :return: The state of this Volume. # noqa: E501 :rtype: ResourceState """ return self._state @state.setter def state(self, state): """Sets the state of this Volume. :param state: The state of this Volume. # noqa: E501 :type: ResourceState """ self._state = state @property def clone_source_volume_state(self): """Gets the clone_source_volume_state of this Volume. # noqa: E501 :return: The clone_source_volume_state of this Volume. # noqa: E501 :rtype: ResourceState """ return self._clone_source_volume_state @clone_source_volume_state.setter def clone_source_volume_state(self, clone_source_volume_state): """Sets the clone_source_volume_state of this Volume. :param clone_source_volume_state: The clone_source_volume_state of this Volume. # noqa: E501 :type: ResourceState """ self._clone_source_volume_state = clone_source_volume_state @property def version(self): """Gets the version of this Volume. # noqa: E501 :return: The version of this Volume. # noqa: E501 :rtype: str """ return self._version @version.setter def version(self, version): """Sets the version of this Volume. :param version: The version of this Volume. # noqa: E501 :type: str """ self._version = version @property def failed_uuids(self): """Gets the failed_uuids of this Volume. # noqa: E501 list of uuids of failed data/parity partitions (valid for durable volumes) # noqa: E501 :return: The failed_uuids of this Volume. # noqa: E501 :rtype: list[str] """ return self._failed_uuids @failed_uuids.setter def failed_uuids(self, failed_uuids): """Sets the failed_uuids of this Volume. list of uuids of failed data/parity partitions (valid for durable volumes) # noqa: E501 :param failed_uuids: The failed_uuids of this Volume. # noqa: E501 :type: list[str] """ self._failed_uuids = failed_uuids @property def num_failed_plexes(self): """Gets the num_failed_plexes of this Volume. # noqa: E501 number of failed data/parity partitions (valid for durable volumes) # noqa: E501 :return: The num_failed_plexes of this Volume. # noqa: E501 :rtype: int """ return self._num_failed_plexes @num_failed_plexes.setter def num_failed_plexes(self, num_failed_plexes): """Sets the num_failed_plexes of this Volume. number of failed data/parity partitions (valid for durable volumes) # noqa: E501 :param num_failed_plexes: The num_failed_plexes of this Volume. # noqa: E501 :type: int """ self._num_failed_plexes = num_failed_plexes @property def rebuild_state(self): """Gets the rebuild_state of this Volume. # noqa: E501 :return: The rebuild_state of this Volume. # noqa: E501 :rtype: RebuildState """ return self._rebuild_state @rebuild_state.setter def rebuild_state(self, rebuild_state): """Sets the rebuild_state of this Volume. :param rebuild_state: The rebuild_state of this Volume. # noqa: E501 :type: RebuildState """ self._rebuild_state = rebuild_state @property def rebuild_percent(self): """Gets the rebuild_percent of this Volume. # noqa: E501 :return: The rebuild_percent of this Volume. # noqa: E501 :rtype: int """ return self._rebuild_percent @rebuild_percent.setter def rebuild_percent(self, rebuild_percent): """Sets the rebuild_percent of this Volume. :param rebuild_percent: The rebuild_percent of this Volume. # noqa: E501 :type: int """ if rebuild_percent is not None and rebuild_percent > 100: # noqa: E501 raise ValueError("Invalid value for `rebuild_percent`, must be a value less than or equal to `100`") # noqa: E501 if rebuild_percent is not None and rebuild_percent < 0: # noqa: E501 raise ValueError("Invalid value for `rebuild_percent`, must be a value greater than or equal to `0`") # noqa: E501 self._rebuild_percent = rebuild_percent @property def spare_vol(self): """Gets the spare_vol of this Volume. # noqa: E501 :return: The spare_vol of this Volume. # noqa: E501 :rtype: str """ return self._spare_vol @spare_vol.setter def spare_vol(self, spare_vol): """Sets the spare_vol of this Volume. :param spare_vol: The spare_vol of this Volume. # noqa: E501 :type: str """ self._spare_vol = spare_vol @property def subsys_nqn(self): """Gets the subsys_nqn of this Volume. # noqa: E501 :return: The subsys_nqn of this Volume. # noqa: E501 :rtype: str """ return self._subsys_nqn @subsys_nqn.setter def subsys_nqn(self, subsys_nqn): """Sets the subsys_nqn of this Volume. :param subsys_nqn: The subsys_nqn of this Volume. # noqa: E501 :type: str """ if subsys_nqn is None: raise ValueError("Invalid value for `subsys_nqn`, must not be `None`") # noqa: E501 self._subsys_nqn = subsys_nqn @property def qos(self): """Gets the qos of this Volume. # noqa: E501 :return: The qos of this Volume. # noqa: E501 :rtype: VolumeQos """ return self._qos @qos.setter def qos(self, qos): """Sets the qos of this Volume. :param qos: The qos of this Volume. # noqa: E501 :type: VolumeQos """ self._qos = qos @property def ports(self): """Gets the ports of this Volume. # noqa: E501 :return: The ports of this Volume. # noqa: E501 :rtype: MapOfPorts """ return self._ports @ports.setter def ports(self, ports): """Sets the ports of this Volume. :param ports: The ports of this Volume. # noqa: E501 :type: MapOfPorts """ self._ports = ports @property def src_vols(self): """Gets the src_vols of this Volume. # noqa: E501 :return: The src_vols of this Volume. # noqa: E501 :rtype: list[str] """ return self._src_vols @src_vols.setter def src_vols(self, src_vols): """Sets the src_vols of this Volume. :param src_vols: The src_vols of this Volume. # noqa: E501 :type: list[str] """ self._src_vols = src_vols @property def durability_scheme(self): """Gets the durability_scheme of this Volume. # noqa: E501 :return: The durability_scheme of this Volume. # noqa: E501 :rtype: str """ return self._durability_scheme @durability_scheme.setter def durability_scheme(self, durability_scheme): """Sets the durability_scheme of this Volume. :param durability_scheme: The durability_scheme of this Volume. # noqa: E501 :type: str """ self._durability_scheme = durability_scheme @property def stats(self): """Gets the stats of this Volume. # noqa: E501 :return: The stats of this Volume. # noqa: E501 :rtype: VolumeStats """ return self._stats @stats.setter def stats(self, stats): """Sets the stats of this Volume. :param stats: The stats of this Volume. # noqa: E501 :type: VolumeStats """ self._stats = stats @property def physical_capacity(self): """Gets the physical_capacity of this Volume. # noqa: E501 :return: The physical_capacity of this Volume. # noqa: E501 :rtype: int """ return self._physical_capacity @physical_capacity.setter def physical_capacity(self, physical_capacity): """Sets the physical_capacity of this Volume. :param physical_capacity: The physical_capacity of this Volume. # noqa: E501 :type: int """ if physical_capacity is None: raise ValueError("Invalid value for `physical_capacity`, must not be `None`") # noqa: E501 self._physical_capacity = physical_capacity @property def space_allocation_policy(self): """Gets the space_allocation_policy of this Volume. # noqa: E501 :return: The space_allocation_policy of this Volume. # noqa: E501 :rtype: SpaceAllocationPolicy """ return self._space_allocation_policy @space_allocation_policy.setter def space_allocation_policy(self, space_allocation_policy): """Sets the space_allocation_policy of this Volume. :param space_allocation_policy: The space_allocation_policy of this Volume. # noqa: E501 :type: SpaceAllocationPolicy """ self._space_allocation_policy = space_allocation_policy @property def additional_fields(self): """Gets the additional_fields of this Volume. # noqa: E501 :return: The additional_fields of this Volume. # noqa: E501 :rtype: AdditionalFields """ return self._additional_fields @additional_fields.setter def additional_fields(self, additional_fields): """Sets the additional_fields of this Volume. :param additional_fields: The additional_fields of this Volume. # noqa: E501 :type: AdditionalFields """ self._additional_fields = additional_fields @property def created_at(self): """Gets the created_at of this Volume. # noqa: E501 set on create # noqa: E501 :return: The created_at of this Volume. # noqa: E501 :rtype: datetime """ return self._created_at @created_at.setter def created_at(self, created_at): """Sets the created_at of this Volume. set on create # noqa: E501 :param created_at: The created_at of this Volume. # noqa: E501 :type: datetime """ self._created_at = created_at @property def modified_at(self): """Gets the modified_at of this Volume. # noqa: E501 set when modified # noqa: E501 :return: The modified_at of this Volume. # noqa: E501 :rtype: datetime """ return self._modified_at @modified_at.setter def modified_at(self, modified_at): """Sets the modified_at of this Volume. set when modified # noqa: E501 :param modified_at: The modified_at of this Volume. # noqa: E501 :type: datetime """ self._modified_at = modified_at @property def block_size(self): """Gets the block_size of this Volume. # noqa: E501 :return: The block_size of this Volume. # noqa: E501 :rtype: BlockSize """ return self._block_size @block_size.setter def block_size(self, block_size): """Sets the block_size of this Volume. :param block_size: The block_size of this Volume. # noqa: E501 :type: BlockSize """ self._block_size = block_size @property def operations(self): """Gets the operations of this Volume. # noqa: E501 The operations currently running on this Volume # noqa: E501 :return: The operations of this Volume. # noqa: E501 :rtype: list[Operation] """ return self._operations @operations.setter def operations(self, operations): """Sets the operations of this Volume. The operations currently running on this Volume # noqa: E501 :param operations: The operations of this Volume. # noqa: E501 :type: list[Operation] """ self._operations = operations @property def fault_domain_id(self): """Gets the fault_domain_id of this Volume. # noqa: E501 :return: The fault_domain_id of this Volume. # noqa: E501 :rtype: str """ return self._fault_domain_id @fault_domain_id.setter def fault_domain_id(self, fault_domain_id): """Sets the fault_domain_id of this Volume. :param fault_domain_id: The fault_domain_id of this Volume. # noqa: E501 :type: str """ self._fault_domain_id = fault_domain_id @property def volume_type(self): """Gets the volume_type of this Volume. # noqa: E501 :return: The volume_type of this Volume. # noqa: E501 :rtype: UserVolumeType """ return self._volume_type @volume_type.setter def volume_type(self, volume_type): """Sets the volume_type of this Volume. :param volume_type: The volume_type of this Volume. # noqa: E501 :type: UserVolumeType """ self._volume_type = volume_type def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(Volume, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Volume): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ZipEffort(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ allowed enum values """ NONE = "ZIP_EFFORT_NONE" _64GBPS = "ZIP_EFFORT_64Gbps" _56GBPS = "ZIP_EFFORT_56Gbps" _30GBPS = "ZIP_EFFORT_30Gbps" _15GBPS = "ZIP_EFFORT_15Gbps" _7GBPS = "ZIP_EFFORT_7Gbps" _3GBPS = "ZIP_EFFORT_3Gbps" _2GBPS = "ZIP_EFFORT_2Gbps" AUTO = "ZIP_EFFORT_AUTO" """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { } attribute_map = { } def __init__(self): # noqa: E501 """ZipEffort - a model defined in Swagger""" # noqa: E501 self.discriminator = None def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ZipEffort, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ZipEffort): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class BodyCreateVolumeCopyTask(object): """This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'src_volume_uuid': 'str', 'dest_volume_uuid': 'str', 'num_threads': 'int', 'timeout': 'int' } attribute_map = { 'src_volume_uuid': 'src_volume_uuid', 'dest_volume_uuid': 'dest_volume_uuid', 'num_threads': 'num_threads', 'timeout': 'timeout' } def __init__(self, src_volume_uuid=None, dest_volume_uuid=None, num_threads=None, timeout=None): # noqa: E501 """BodyCreateVolumeCopyTask - a model defined in Swagger""" # noqa: E501 self._src_volume_uuid = None self._dest_volume_uuid = None self._num_threads = None self._timeout = None self.discriminator = None if src_volume_uuid is not None: self.src_volume_uuid = src_volume_uuid if dest_volume_uuid is not None: self.dest_volume_uuid = dest_volume_uuid if num_threads is not None: self.num_threads = num_threads if timeout is not None: self.timeout = timeout @property def src_volume_uuid(self): """Gets the src_volume_uuid of this BodyCreateVolumeCopyTask. # noqa: E501 Source volume UUID # noqa: E501 :return: The src_volume_uuid of this BodyCreateVolumeCopyTask. # noqa: E501 :rtype: str """ return self._src_volume_uuid @src_volume_uuid.setter def src_volume_uuid(self, src_volume_uuid): """Sets the src_volume_uuid of this BodyCreateVolumeCopyTask. Source volume UUID # noqa: E501 :param src_volume_uuid: The src_volume_uuid of this BodyCreateVolumeCopyTask. # noqa: E501 :type: str """ self._src_volume_uuid = src_volume_uuid @property def dest_volume_uuid(self): """Gets the dest_volume_uuid of this BodyCreateVolumeCopyTask. # noqa: E501 Destination volume UUID # noqa: E501 :return: The dest_volume_uuid of this BodyCreateVolumeCopyTask. # noqa: E501 :rtype: str """ return self._dest_volume_uuid @dest_volume_uuid.setter def dest_volume_uuid(self, dest_volume_uuid): """Sets the dest_volume_uuid of this BodyCreateVolumeCopyTask. Destination volume UUID # noqa: E501 :param dest_volume_uuid: The dest_volume_uuid of this BodyCreateVolumeCopyTask. # noqa: E501 :type: str """ self._dest_volume_uuid = dest_volume_uuid @property def num_threads(self): """Gets the num_threads of this BodyCreateVolumeCopyTask. # noqa: E501 number of threads # noqa: E501 :return: The num_threads of this BodyCreateVolumeCopyTask. # noqa: E501 :rtype: int """ return self._num_threads @num_threads.setter def num_threads(self, num_threads): """Sets the num_threads of this BodyCreateVolumeCopyTask. number of threads # noqa: E501 :param num_threads: The num_threads of this BodyCreateVolumeCopyTask. # noqa: E501 :type: int """ if num_threads is not None and num_threads > 16: # noqa: E501 raise ValueError("Invalid value for `num_threads`, must be a value less than or equal to `16`") # noqa: E501 if num_threads is not None and num_threads < 1: # noqa: E501 raise ValueError("Invalid value for `num_threads`, must be a value greater than or equal to `1`") # noqa: E501 self._num_threads = num_threads @property def timeout(self): """Gets the timeout of this BodyCreateVolumeCopyTask. # noqa: E501 maximum duration in seconds # noqa: E501 :return: The timeout of this BodyCreateVolumeCopyTask. # noqa: E501 :rtype: int """ return self._timeout @timeout.setter def timeout(self, timeout): """Sets the timeout of this BodyCreateVolumeCopyTask. maximum duration in seconds # noqa: E501 :param timeout: The timeout of this BodyCreateVolumeCopyTask. # noqa: E501 :type: int """ if timeout is not None and timeout > 86400: # noqa: E501 raise ValueError("Invalid value for `timeout`, must be a value less than or equal to `86400`") # noqa: E501 if timeout is not None and timeout < 60: # noqa: E501 raise ValueError("Invalid value for `timeout`, must be a value greater than or equal to `60`") # noqa: E501 self._timeout = timeout def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(BodyCreateVolumeCopyTask, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, BodyCreateVolumeCopyTask): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResponseCreateVolumeCopyTask(object): """This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str', 'data': 'VolumeCopyTask' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning', 'data': 'data' } def __init__(self, status=None, message=None, error_message=None, warning=None, data=None): # noqa: E501 """ResponseCreateVolumeCopyTask - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self._data = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning if data is not None: self.data = data @property def status(self): """Gets the status of this ResponseCreateVolumeCopyTask. # noqa: E501 :return: The status of this ResponseCreateVolumeCopyTask. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseCreateVolumeCopyTask. :param status: The status of this ResponseCreateVolumeCopyTask. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ResponseCreateVolumeCopyTask. # noqa: E501 :return: The message of this ResponseCreateVolumeCopyTask. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ResponseCreateVolumeCopyTask. :param message: The message of this ResponseCreateVolumeCopyTask. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this ResponseCreateVolumeCopyTask. # noqa: E501 :return: The error_message of this ResponseCreateVolumeCopyTask. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ResponseCreateVolumeCopyTask. :param error_message: The error_message of this ResponseCreateVolumeCopyTask. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this ResponseCreateVolumeCopyTask. # noqa: E501 :return: The warning of this ResponseCreateVolumeCopyTask. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this ResponseCreateVolumeCopyTask. :param warning: The warning of this ResponseCreateVolumeCopyTask. # noqa: E501 :type: str """ self._warning = warning @property def data(self): """Gets the data of this ResponseCreateVolumeCopyTask. # noqa: E501 :return: The data of this ResponseCreateVolumeCopyTask. # noqa: E501 :rtype: VolumeCopyTask """ return self._data @data.setter def data(self, data): """Sets the data of this ResponseCreateVolumeCopyTask. :param data: The data of this ResponseCreateVolumeCopyTask. # noqa: E501 :type: VolumeCopyTask """ self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseCreateVolumeCopyTask, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseCreateVolumeCopyTask): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class ResponseGetVolumeCopyTask(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'status': 'bool', 'message': 'str', 'error_message': 'str', 'warning': 'str', 'data': 'DataWithCopyTaskData' } attribute_map = { 'status': 'status', 'message': 'message', 'error_message': 'error_message', 'warning': 'warning', 'data': 'data' } def __init__(self, status=None, message=None, error_message=None, warning=None, data=None): # noqa: E501 """ResponseGetVolumeCopyTask - a model defined in Swagger""" # noqa: E501 self._status = None self._message = None self._error_message = None self._warning = None self._data = None self.discriminator = None self.status = status if message is not None: self.message = message if error_message is not None: self.error_message = error_message if warning is not None: self.warning = warning if data is not None: self.data = data @property def status(self): """Gets the status of this ResponseGetVolumeCopyTask. # noqa: E501 :return: The status of this ResponseGetVolumeCopyTask. # noqa: E501 :rtype: bool """ return self._status @status.setter def status(self, status): """Sets the status of this ResponseGetVolumeCopyTask. :param status: The status of this ResponseGetVolumeCopyTask. # noqa: E501 :type: bool """ if status is None: raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501 self._status = status @property def message(self): """Gets the message of this ResponseGetVolumeCopyTask. # noqa: E501 :return: The message of this ResponseGetVolumeCopyTask. # noqa: E501 :rtype: str """ return self._message @message.setter def message(self, message): """Sets the message of this ResponseGetVolumeCopyTask. :param message: The message of this ResponseGetVolumeCopyTask. # noqa: E501 :type: str """ self._message = message @property def error_message(self): """Gets the error_message of this ResponseGetVolumeCopyTask. # noqa: E501 :return: The error_message of this ResponseGetVolumeCopyTask. # noqa: E501 :rtype: str """ return self._error_message @error_message.setter def error_message(self, error_message): """Sets the error_message of this ResponseGetVolumeCopyTask. :param error_message: The error_message of this ResponseGetVolumeCopyTask. # noqa: E501 :type: str """ self._error_message = error_message @property def warning(self): """Gets the warning of this ResponseGetVolumeCopyTask. # noqa: E501 :return: The warning of this ResponseGetVolumeCopyTask. # noqa: E501 :rtype: str """ return self._warning @warning.setter def warning(self, warning): """Sets the warning of this ResponseGetVolumeCopyTask. :param warning: The warning of this ResponseGetVolumeCopyTask. # noqa: E501 :type: str """ self._warning = warning @property def data(self): """Gets the data of this ResponseGetVolumeCopyTask. # noqa: E501 :return: The data of this ResponseGetVolumeCopyTask. # noqa: E501 :rtype: DataWithCopyTaskData """ return self._data @data.setter def data(self, data): """Sets the data of this ResponseGetVolumeCopyTask. :param data: The data of this ResponseGetVolumeCopyTask. # noqa: E501 :type: DataWithCopyTaskData """ self._data = data def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResponseGetVolumeCopyTask, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResponseGetVolumeCopyTask): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class VolumeCopyTask(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'task_uuid': 'str' } attribute_map = { 'task_uuid': 'task_uuid' } def __init__(self, task_uuid=None): # noqa: E501 """VolumeCopyTask - a model defined in Swagger""" # noqa: E501 self._task_uuid = None self.discriminator = None if task_uuid is not None: self.task_uuid = task_uuid @property def task_uuid(self): """Gets the task_uuid of this VolumeCopyTask. # noqa: E501 Volume copy task UUID # noqa: E501 :return: The task_uuid of this VolumeCopyTask. # noqa: E501 :rtype: str """ return self._task_uuid @task_uuid.setter def task_uuid(self, task_uuid): """Sets the task_uuid of this VolumeCopyTask. Volume copy task UUID # noqa: E501 :param task_uuid: The task_uuid of this VolumeCopyTask. # noqa: E501 :type: str """ self._task_uuid = task_uuid def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(VolumeCopyTask, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, VolumeCopyTask): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithUuidStringData(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'uuid': 'str' } attribute_map = { 'uuid': 'uuid' } def __init__(self, uuid=None): # noqa: E501 """DataWithUuidStringData - a model defined in Swagger""" # noqa: E501 self._uuid = None self.discriminator = None self.uuid = uuid @property def uuid(self): """Gets the uuid of this DataWithUuidStringData. # noqa: E501 :return: The uuid of this DataWithUuidStringData. # noqa: E501 :rtype: str """ return self._uuid @uuid.setter def uuid(self, uuid): """Sets the uuid of this DataWithUuidStringData. :param uuid: The uuid of this DataWithUuidStringData. # noqa: E501 :type: str """ if uuid is None: raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501 self._uuid = uuid def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithUuidStringData, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithUuidStringData): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other class DataWithCopyTaskData(object): """NOTE: This class is auto generated by the swagger code generator program Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'task_state': 'str', 'completion_pct': 'str' } attribute_map = { 'task_state': 'task_state', 'completion_pct': 'Completion_pct' } def __init__(self, task_state=None, completion_pct=None): # noqa: E501 """DataWithCopyTaskData - a model defined in Swagger""" # noqa: E501 self._task_state = None self._completion_pct = None self.discriminator = None if task_state is not None: self.task_state = task_state if completion_pct is not None: self.completion_pct = completion_pct @property def task_state(self): """Gets the task_state of this DataWithCopyTaskData. # noqa: E501 Status of the volume copy task (RUNNING, SUCCES or FAILED) # noqa: E501 :return: The task_state of this DataWithCopyTaskData. # noqa: E501 :rtype: str """ return self._task_state @task_state.setter def task_state(self, task_state): """Sets the task_state of this DataWithCopyTaskData. Status of the volume copy task (RUNNING, SUCCES or FAILED) # noqa: E501 :param task_state: The task_state of this DataWithCopyTaskData. # noqa: E501 :type: str """ allowed_values = ["RUNNING", "SUCCESS", "FAILED"] # noqa: E501 if task_state not in allowed_values: raise ValueError( "Invalid value for `task_state` ({0}), must be one of {1}" # noqa: E501 .format(task_state, allowed_values) ) self._task_state = task_state @property def completion_pct(self): """Gets the completion_pct of this DataWithCopyTaskData. # noqa: E501 Percent complete (0-100) # noqa: E501 :return: The completion_pct of this DataWithCopyTaskData. # noqa: E501 :rtype: str """ return self._completion_pct @completion_pct.setter def completion_pct(self, completion_pct): """Sets the completion_pct of this DataWithCopyTaskData. Percent complete (0-100) # noqa: E501 :param completion_pct: The completion_pct of this DataWithCopyTaskData. # noqa: E501 :type: str """ self._completion_pct = completion_pct def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(DataWithCopyTaskData, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, DataWithCopyTaskData): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3511207 cinder-27.0.0/cinder/volume/drivers/fusionstorage/0000775000175000017500000000000000000000000022210 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fusionstorage/__init__.py0000664000175000017500000000000000000000000024307 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fusionstorage/constants.py0000664000175000017500000000176100000000000024603 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. DEFAULT_TIMEOUT = 50 LOGIN_SOCKET_TIMEOUT = 32 CONNECT_ERROR = 403 ERROR_UNAUTHORIZED = 10000003 VOLUME_NOT_EXIST = (31000000, 50150005) BASIC_URI = '/dsware/service/' CONF_PATH = "/etc/cinder/cinder.conf" CONF_ADDRESS = "dsware_rest_url" CONF_MANAGER_IP = "manager_ips" CONF_POOLS = "dsware_storage_pools" CONF_PWD = "san_password" CONF_USER = "san_login" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fusionstorage/dsware.py0000664000175000017500000003635500000000000024063 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.fusionstorage import fs_client from cinder.volume.drivers.fusionstorage import fs_conf from cinder.volume.drivers.san import san from cinder.volume import volume_utils LOG = logging.getLogger(__name__) volume_opts = [ cfg.BoolOpt("dsware_isthin", default=False, help='The flag of thin storage allocation.', deprecated_for_removal=True, deprecated_since='14.0.0', deprecated_reason='FusionStorage cinder driver refactored the ' 'code with Restful method and the old CLI ' 'mode has been abandon. So those ' 'configuration items are no longer used.'), cfg.StrOpt("dsware_manager", default='', help='Fusionstorage manager ip addr for cinder-volume.', deprecated_for_removal=True, deprecated_since='14.0.0', deprecated_reason='FusionStorage cinder driver refactored the ' 'code with Restful method and the old CLI ' 'mode has been abandon. So those ' 'configuration items are no longer used.'), cfg.StrOpt('fusionstorageagent', default='', help='Fusionstorage agent ip addr range', deprecated_for_removal=True, deprecated_since='14.0.0', deprecated_reason='FusionStorage cinder driver refactored the ' 'code with Restful method and the old CLI ' 'mode has been abandon. So those ' 'configuration items are no longer used.'), cfg.StrOpt('pool_type', default='default', help='Pool type, like sata-2copy', deprecated_for_removal=True, deprecated_since='14.0.0', deprecated_reason='FusionStorage cinder driver refactored the ' 'code with Restful method and the old CLI ' 'mode has been abandon. So those ' 'configuration items are no longer used.'), cfg.ListOpt('pool_id_filter', default=[], help='Pool id permit to use', deprecated_for_removal=True, deprecated_since='14.0.0', deprecated_reason='FusionStorage cinder driver refactored the ' 'code with Restful method and the old CLI ' 'mode has been abandon. So those ' 'configuration items are no longer used.'), cfg.IntOpt('clone_volume_timeout', default=680, help='Create clone volume timeout', deprecated_for_removal=True, deprecated_since='14.0.0', deprecated_reason='FusionStorage cinder driver refactored the ' 'code with Restful method and the old CLI ' 'mode has been abandon. So those ' 'configuration items are no longer used.'), cfg.DictOpt('manager_ips', default={}, help='This option is to support the FSA to mount across the ' 'different nodes. The parameters takes the standard dict ' 'config form, manager_ips = host1:ip1, host2:ip2...'), cfg.StrOpt('dsware_rest_url', default='', help='The address of FusionStorage array. For example, ' '"dsware_rest_url=xxx"'), cfg.StrOpt('dsware_storage_pools', default="", help='The list of pools on the FusionStorage array, the ' 'semicolon(;) was used to split the storage pools, ' '"dsware_storage_pools = xxx1; xxx2; xxx3"') ] CONF = cfg.CONF CONF.register_opts(volume_opts) @interface.volumedriver class DSWAREDriver(driver.VolumeDriver): VERSION = '2.0' CI_WIKI_NAME = 'Huawei_FusionStorage_CI' def __init__(self, *args, **kwargs): super(DSWAREDriver, self).__init__(*args, **kwargs) if not self.configuration: msg = _('Configuration is not found.') LOG.error(msg) raise exception.InvalidInput(reason=msg) self.configuration.append_config_values(volume_opts) self.configuration.append_config_values(san.san_opts) self.conf = fs_conf.FusionStorageConf(self.configuration, self.host) self.client = None @staticmethod def get_driver_options(): return volume_opts def do_setup(self, context): self.conf.update_config_value() url_str = self.configuration.san_address url_user = self.configuration.san_user url_password = self.configuration.san_password self.client = fs_client.RestCommon( fs_address=url_str, fs_user=url_user, fs_password=url_password) self.client.login() def check_for_setup_error(self): all_pools = self.client.query_pool_info() all_pools_name = [p['poolName'] for p in all_pools if p.get('poolName')] for pool in self.configuration.pools_name: if pool not in all_pools_name: msg = _('Storage pool %(pool)s does not exist ' 'in the FusionStorage.') % {'pool': pool} LOG.error(msg) raise exception.InvalidInput(reason=msg) def _update_pool_stats(self): backend_name = self.configuration.safe_get( 'volume_backend_name') or self.__class__.__name__ data = {"volume_backend_name": backend_name, "driver_version": "2.0.9", "QoS_support": False, "thin_provisioning_support": False, "pools": [], "vendor_name": "Huawei", "storage_protocol": constants.SCSI, } all_pools = self.client.query_pool_info() for pool in all_pools: if pool['poolName'] in self.configuration.pools_name: single_pool_info = self._update_single_pool_info_status(pool) data['pools'].append(single_pool_info) return data def _get_capacity(self, pool_info): pool_capacity = {} total = float(pool_info['totalCapacity']) / units.Ki free = (float(pool_info['totalCapacity']) - float(pool_info['usedCapacity'])) / units.Ki pool_capacity['total_capacity_gb'] = total pool_capacity['free_capacity_gb'] = free return pool_capacity def _update_single_pool_info_status(self, pool_info): status = {} capacity = self._get_capacity(pool_info=pool_info) status.update({ "pool_name": pool_info['poolName'], "total_capacity_gb": capacity['total_capacity_gb'], "free_capacity_gb": capacity['free_capacity_gb'], }) return status def get_volume_stats(self, refresh=False): self.client.keep_alive() stats = self._update_pool_stats() return stats def _check_volume_exist(self, volume): vol_name = self._get_vol_name(volume) result = self.client.query_volume_by_name(vol_name=vol_name) if result: return result def _raise_exception(self, msg): LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _get_pool_id(self, volume): pool_id = None pool_name = volume_utils.extract_host(volume.host, level='pool') all_pools = self.client.query_pool_info() for pool in all_pools: if pool_name == pool['poolName']: pool_id = pool['poolId'] if pool_id is None: msg = _('Storage pool %(pool)s does not exist on the array. ' 'Please check.') % {"pool": pool_id} LOG.error(msg) raise exception.InvalidInput(reason=msg) return pool_id def _get_vol_name(self, volume): provider_location = volume.get("provider_location", None) if provider_location: vol_name = json.loads(provider_location).get("name") else: vol_name = volume.name return vol_name def create_volume(self, volume): pool_id = self._get_pool_id(volume) vol_name = volume.name vol_size = volume.size vol_size *= units.Ki self.client.create_volume( pool_id=pool_id, vol_name=vol_name, vol_size=vol_size) def delete_volume(self, volume): vol_name = self._get_vol_name(volume) if self._check_volume_exist(volume): self.client.delete_volume(vol_name=vol_name) def extend_volume(self, volume, new_size): vol_name = self._get_vol_name(volume) if not self._check_volume_exist(volume): msg = _("Volume: %(vol_name)s does not exist!" ) % {"vol_name": vol_name} self._raise_exception(msg) else: new_size *= units.Ki self.client.expand_volume(vol_name, new_size) def _check_snapshot_exist(self, volume, snapshot): pool_id = self._get_pool_id(volume) snapshot_name = self._get_snapshot_name(snapshot) result = self.client.query_snapshot_by_name( pool_id=pool_id, snapshot_name=snapshot_name) if result.get('totalNum'): return result def _get_snapshot_name(self, snapshot): provider_location = snapshot.get("provider_location", None) if provider_location: snapshot_name = json.loads(provider_location).get("name") else: snapshot_name = snapshot.name return snapshot_name def create_volume_from_snapshot(self, volume, snapshot): vol_name = self._get_vol_name(volume) snapshot_name = self._get_snapshot_name(snapshot) vol_size = volume.size if not self._check_snapshot_exist(snapshot.volume, snapshot): msg = _("Snapshot: %(name)s does not exist!" ) % {"name": snapshot_name} self._raise_exception(msg) elif self._check_volume_exist(volume): msg = _("Volume: %(vol_name)s already exists!" ) % {'vol_name': vol_name} self._raise_exception(msg) else: vol_size *= units.Ki self.client.create_volume_from_snapshot( snapshot_name=snapshot_name, vol_name=vol_name, vol_size=vol_size) def create_cloned_volume(self, volume, src_volume): vol_name = self._get_vol_name(volume) src_vol_name = self._get_vol_name(src_volume) vol_size = volume.size vol_size *= units.Ki if not self._check_volume_exist(src_volume): msg = _("Volume: %(vol_name)s does not exist!" ) % {"vol_name": src_vol_name} self._raise_exception(msg) else: self.client.create_volume_from_volume( vol_name=vol_name, vol_size=vol_size, src_vol_name=src_vol_name) def create_snapshot(self, snapshot): snapshot_name = self._get_snapshot_name(snapshot) vol_name = self._get_vol_name(snapshot.volume) self.client.create_snapshot( snapshot_name=snapshot_name, vol_name=vol_name) def delete_snapshot(self, snapshot): snapshot_name = self._get_snapshot_name(snapshot) if self._check_snapshot_exist(snapshot.volume, snapshot): self.client.delete_snapshot(snapshot_name=snapshot_name) def _get_manager_ip(self, context): if self.configuration.manager_ips.get(context['host']): return self.configuration.manager_ips.get(context['host']) else: msg = _("The required host: %(host)s and its manager ip are not " "included in the configuration file." ) % {"host": context['host']} LOG.error(msg) raise exception.VolumeBackendAPIException(msg) def _attach_volume(self, context, volume, properties, remote=False): vol_name = self._get_vol_name(volume) if not self._check_volume_exist(volume): msg = _("Volume: %(vol_name)s does not exist!" ) % {"vol_name": vol_name} self._raise_exception(msg) manager_ip = self._get_manager_ip(properties) result = self.client.attach_volume(vol_name, manager_ip) attach_path = result[vol_name][0]['devName'] attach_info = dict() attach_info['device'] = dict() attach_info['device']['path'] = attach_path if attach_path == '': msg = _("Host attach volume failed!") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return attach_info, volume def _detach_volume(self, context, attach_info, volume, properties, force=False, remote=False, ignore_errors=False): vol_name = self._get_vol_name(volume) if self._check_volume_exist(volume): manager_ip = self._get_manager_ip(properties) self.client.detach_volume(vol_name, manager_ip) def initialize_connection(self, volume, connector): vol_name = self._get_vol_name(volume) manager_ip = self._get_manager_ip(connector) if not self._check_volume_exist(volume): msg = _("Volume: %(vol_name)s does not exist!" ) % {"vol_name": vol_name} self._raise_exception(msg) self.client.attach_volume(vol_name, manager_ip) volume_info = self.client.query_volume_by_name(vol_name=vol_name) vol_wwn = volume_info.get('wwn') by_id_path = "/dev/disk/by-id/" + "wwn-0x%s" % vol_wwn properties = {'device_path': by_id_path} return {'driver_volume_type': 'local', 'data': properties} def terminate_connection(self, volume, connector, **kwargs): if self._check_volume_exist(volume): manager_ip = self._get_manager_ip(connector) vol_name = self._get_vol_name(volume) self.client.detach_volume(vol_name, manager_ip) def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fusionstorage/fs_client.py0000664000175000017500000002332600000000000024536 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_log import log as logging import requests from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.fusionstorage import constants LOG = logging.getLogger(__name__) class RestCommon(object): def __init__(self, fs_address, fs_user, fs_password): self.address = fs_address self.user = fs_user self.password = fs_password self.session = None self.token = None self.version = None self.init_http_head() LOG.warning("Suppressing requests library SSL Warnings") requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning) requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecurePlatformWarning) def init_http_head(self): self.session = requests.Session() self.session.headers.update({ "Content-Type": "application/json;charset=UTF-8", }) self.session.verify = False def call(self, url, method, data=None, call_timeout=constants.DEFAULT_TIMEOUT, get_version=False, filter_flag=False, json_flag=False): kwargs = {'timeout': call_timeout} if data: kwargs['data'] = json.dumps(data) if not get_version: call_url = self.address + constants.BASIC_URI + self.version + url else: call_url = self.address + constants.BASIC_URI + url func = getattr(self.session, method.lower()) try: result = func(call_url, **kwargs) except Exception as err: LOG.error('Bad response from server: %(url)s. ' 'Error: %(err)s', {'url': url, 'err': err}) return {"error": { "code": constants.CONNECT_ERROR, "description": "Connect to server error."}} try: result.raise_for_status() except requests.HTTPError as exc: return {"error": {"code": exc.response.status_code, "description": str(exc)}} if not filter_flag: LOG.info(''' Request URL: %(url)s, Call Method: %(method)s, Request Data: %(data)s, Response Data: %(res)s, Result Data: %(res_json)s''', {'url': url, 'method': method, 'data': data, 'res': result, 'res_json': result.json()}) if json_flag: return result else: return result.json() def _assert_rest_result(self, result, err_str): if result.get('result') != 0: msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str, 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_version(self): url = 'rest/version' self.session.headers.update({ "Referer": self.address + constants.BASIC_URI }) result = self.call(url=url, method='GET', get_version=True) self._assert_rest_result(result, _('Get version session error.')) if result.get("currentVersion"): self.version = result["currentVersion"] def login(self): self.get_version() url = '/sec/login' data = {"userName": self.user, "password": self.password} result = self.call(url, 'POST', data=data, call_timeout=constants.LOGIN_SOCKET_TIMEOUT, filter_flag=True, json_flag=True) self._assert_rest_result(result.json(), _('Login session error.')) self.token = result.headers['X-Auth-Token'] self.session.headers.update({ "x-auth-token": self.token }) def logout(self): url = '/sec/logout' if self.address: result = self.call(url, 'POST') self._assert_rest_result(result, _('Logout session error.')) def keep_alive(self): url = '/sec/keepAlive' result = self.call(url, 'POST', filter_flag=True) if result.get('result') == constants.ERROR_UNAUTHORIZED: try: self.login() except Exception: LOG.error('The FusionStorage may have been powered off. ' 'Power on the FusionStorage and then log in.') raise else: self._assert_rest_result(result, _('Keep alive session error.')) def query_pool_info(self, pool_id=None): pool_id = str(pool_id) if pool_id != 'None': url = '/storagePool' + '?poolId=' + pool_id else: url = '/storagePool' result = self.call(url, 'GET', filter_flag=True) self._assert_rest_result(result, _("Query pool session error.")) return result['storagePools'] def query_volume_by_name(self, vol_name): url = '/volume/queryByName?volName=' + vol_name result = self.call(url, 'GET') if result.get('errorCode') in constants.VOLUME_NOT_EXIST: return None self._assert_rest_result( result, _("Query volume by name session error")) return result.get('lunDetailInfo') def query_volume_by_id(self, vol_id): url = '/volume/queryById?volId=' + vol_id result = self.call(url, 'GET') if result.get('errorCode') in constants.VOLUME_NOT_EXIST: return None self._assert_rest_result( result, _("Query volume by ID session error")) return result.get('lunDetailInfo') def create_volume(self, vol_name, vol_size, pool_id): url = '/volume/create' params = {"volName": vol_name, "volSize": vol_size, "poolId": pool_id} result = self.call(url, "POST", params) self._assert_rest_result(result, _('Create volume session error.')) def delete_volume(self, vol_name): url = '/volume/delete' params = {"volNames": [vol_name]} result = self.call(url, "POST", params) self._assert_rest_result(result, _('Delete volume session error.')) def attach_volume(self, vol_name, manage_ip): url = '/volume/attach' params = {"volName": [vol_name], "ipList": [manage_ip]} result = self.call(url, "POST", params) self._assert_rest_result(result, _('Attach volume session error.')) if int(result[vol_name][0]['errorCode']) != 0: msg = _("Host attach volume failed!") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return result def detach_volume(self, vol_name, manage_ip): url = '/volume/detach/' params = {"volName": [vol_name], "ipList": [manage_ip]} result = self.call(url, "POST", params) self._assert_rest_result(result, _('Detach volume session error.')) def expand_volume(self, vol_name, new_vol_size): url = '/volume/expand' params = {"volName": vol_name, "newVolSize": new_vol_size} result = self.call(url, "POST", params) self._assert_rest_result(result, _('Expand volume session error.')) def query_snapshot_by_name(self, pool_id, snapshot_name, page_num=1, page_size=1000): # Filter the snapshot according to the name, while the "page_num" and # "page_size" must be set while using the interface. url = '/snapshot/list' params = {"poolId": pool_id, "pageNum": page_num, "pageSize": page_size, "filters": {"volumeName": snapshot_name}} result = self.call(url, "POST", params) self._assert_rest_result( result, _('query snapshot list session error.')) return result def create_snapshot(self, snapshot_name, vol_name): url = '/snapshot/create/' params = {"volName": vol_name, "snapshotName": snapshot_name} result = self.call(url, "POST", params) self._assert_rest_result(result, _('Create snapshot error.')) def delete_snapshot(self, snapshot_name): url = '/snapshot/delete/' params = {"snapshotName": snapshot_name} result = self.call(url, "POST", params) self._assert_rest_result(result, _('Delete snapshot session error.')) def create_volume_from_snapshot(self, snapshot_name, vol_name, vol_size): url = '/snapshot/volume/create/' params = {"src": snapshot_name, "volName": vol_name, "volSize": vol_size} result = self.call(url, "POST", params) self._assert_rest_result( result, _('create volume from snapshot session error.')) def create_volume_from_volume(self, vol_name, vol_size, src_vol_name): temp_snapshot_name = "temp" + src_vol_name + "clone" + vol_name self.create_snapshot(vol_name=src_vol_name, snapshot_name=temp_snapshot_name) self.create_volume_from_snapshot(snapshot_name=temp_snapshot_name, vol_name=vol_name, vol_size=vol_size) self.delete_snapshot(snapshot_name=temp_snapshot_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/fusionstorage/fs_conf.py0000664000175000017500000001130600000000000024200 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import configparser import os from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.fusionstorage import constants LOG = logging.getLogger(__name__) class FusionStorageConf(object): def __init__(self, configuration, host): self.configuration = configuration self._check_host(host) def _check_host(self, host): if host and len(host.split('@')) > 1: self.host = host.split('@')[1] else: msg = _("The host %s is not reliable. Please check cinder-volume " "backend.") % host LOG.error(msg) raise exception.InvalidInput(reason=msg) def update_config_value(self): self._encode_authentication() self._pools_name() self._san_address() self._san_user() self._san_password() def _encode_authentication(self): name_node = self.configuration.safe_get(constants.CONF_USER) pwd_node = self.configuration.safe_get(constants.CONF_PWD) need_encode = False if name_node is not None and not name_node.startswith('!&&&'): encoded = base64.b64encode(name_node.encode('latin-1')).decode() name_node = '!&&&' + encoded need_encode = True if pwd_node is not None and not pwd_node.startswith('!&&&'): encoded = base64.b64encode(pwd_node.encode('latin-1')).decode() pwd_node = '!&&&' + encoded need_encode = True if need_encode: self._rewrite_conf(name_node, pwd_node) def _rewrite_conf(self, name_node, pwd_node): if os.path.exists(constants.CONF_PATH): utils.execute("chmod", "666", constants.CONF_PATH, run_as_root=True) conf = configparser.ConfigParser() conf.read(constants.CONF_PATH) if name_node: conf.set(self.host, constants.CONF_USER, name_node) if pwd_node: conf.set(self.host, constants.CONF_PWD, pwd_node) fh = open(constants.CONF_PATH, 'w') conf.write(fh) fh.close() utils.execute("chmod", "644", constants.CONF_PATH, run_as_root=True) def _assert_text_result(self, text, mess): if not text: msg = _("%s is not configured.") % mess LOG.error(msg) raise exception.InvalidInput(reason=msg) def _san_address(self): address = self.configuration.safe_get(constants.CONF_ADDRESS) self._assert_text_result(address, mess=constants.CONF_ADDRESS) setattr(self.configuration, 'san_address', address) def _decode_text(self, text): return (base64.b64decode(text[4:].encode('latin-1')).decode() if text.startswith('!&&&') else text) def _san_user(self): user_text = self.configuration.safe_get(constants.CONF_USER) self._assert_text_result(user_text, mess=constants.CONF_USER) user = self._decode_text(user_text) setattr(self.configuration, 'san_user', user) def _san_password(self): pwd_text = self.configuration.safe_get(constants.CONF_PWD) self._assert_text_result(pwd_text, mess=constants.CONF_PWD) pwd = self._decode_text(pwd_text) setattr(self.configuration, 'san_password', pwd) def _pools_name(self): pools_name = self.configuration.safe_get(constants.CONF_POOLS) self._assert_text_result(pools_name, mess=constants.CONF_POOLS) pools = set(x.strip() for x in pools_name.split(';') if x.strip()) if not pools: msg = _('No valid storage pool configured.') LOG.error(msg) raise exception.InvalidInput(msg) setattr(self.configuration, 'pools_name', list(pools)) def _manager_ip(self): manager_ips = self.configuration.safe_get(constants.CONF_MANAGER_IP) self._assert_text_result(manager_ips, mess=constants.CONF_MANAGER_IP) setattr(self.configuration, 'manager_ips', manager_ips) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3511207 cinder-27.0.0/cinder/volume/drivers/hedvig/0000775000175000017500000000000000000000000020566 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hedvig/__init__.py0000664000175000017500000000000000000000000022665 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hedvig/config.py0000664000175000017500000000230100000000000022401 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Hedvig, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class Config(object): ReplicationPolicy = { 0: "Agnostic", 1: "RackAware", 2: "DataCenterAware", } DiskResidence = { 0: "Flash", 1: "HDD", } # Default Port Configuration defaultHControllerPort_ = 50000 # Default Cinder Configuration defaultCinderReplicationFactor = 3 defaultCinderDedupEnable = False defaultCinderCompressEnable = False defaultCinderCacheEnable = False defaultCinderDiskResidence = DiskResidence[1] defaultCinderReplicationPolicy = ReplicationPolicy[0] retryCount = 5 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hedvig/hedvig_cinder.py0000664000175000017500000006254300000000000023744 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Hedvig, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for Hedvig Block Storage. """ import socket from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import units from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.hedvig import config from cinder.volume.drivers.hedvig import rest_client from cinder.volume.drivers.san import san from cinder.volume import volume_types LOG = logging.getLogger(__name__) @interface.volumedriver class HedvigISCSIDriver(driver.ISCSIDriver, san.SanDriver): """OpenStack Cinder driver to enable Hedvig storage. .. code-block:: none Version history: 1.0 - Initial driver """ DEFAULT_VOL_BLOCK_SIZE = 4 * units.Ki DEFAULT_CREATEDBY = "OpenStack" DEFAULT_EXPORT_BLK_SIZE = 4096 DEFAULT_CAPACITY = units.Gi DEFAULT_ISCSI_PORT = 3260 DEFAULT_TARGET_NAME = "iqn.2012-05.com.hedvig:storage." VERSION = "1.0.0" CI_WIKI_NAME = "Hedvig_CI" def __init__(self, *args, **kwargs): super(HedvigISCSIDriver, self).__init__(*args, **kwargs) self.group_stats = {} self.hrs = None @staticmethod def get_driver_options(): return [] def check_for_setup_error(self): self.hrs.connect() LOG.info("Initialization complete") def do_setup(self, context): # Ensure that the data required by hedvig are provided required_config = ['san_login', 'san_password', 'san_ip', 'san_clustername'] for attr in required_config: if not getattr(self.configuration, attr, None): msg = _('Hedvig param %s is not set.') % attr LOG.error(msg) raise exception.VolumeDriverException(msg) self.san_ip = self.configuration.san_ip self.san_login = self.configuration.san_login self.san_password = self.configuration.san_password self.san_clustername = self.configuration.san_clustername LOG.info('Initializing hedvig cinder driver with ' 'server: %s', self.san_ip) self.hrs = rest_client.RestClient(self.san_ip, self.san_login, self.san_password, self.san_clustername) def get_volume_stats(self, refresh=False): # we need to get stats for server. if refresh is True: total_capacity, free_capacity = self.update_volume_stats() stats = dict() stats["volume_backend_name"] = "hedvig" stats["vendor_name"] = "Hedvig Inc" stats["driver_version"] = self.VERSION stats["storage_protocol"] = constants.ISCSI stats["total_capacity_gb"] = total_capacity stats["free_capacity_gb"] = free_capacity stats["QoS_support"] = True self.group_stats = stats return self.group_stats def create_volume(self, volume): """Driver entry point for creating a new volume.""" try: qos_specs = None name, description, size = self.get_hedvig_volume_details(volume) vol_type_id = volume.volume_type_id if vol_type_id is not None: qos = volume_types.get_volume_type_qos_specs(vol_type_id) qos_specs = qos['qos_specs'] self.hedvig_create_virtualdisk(name, description, size, qos_specs) except exception.VolumeDriverException: msg = _('Failed to create volume %s. Rest API failed' ) % volume.name LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = _('Failed to create volume: %s') % volume.name LOG.exception(msg) raise exception.VolumeDriverException(msg) def delete_volume(self, volume): """Driver entry point for deleting volume.""" LOG.debug("Deleting volume: %s", volume.name) name = volume.name try: self.hedvig_delete_virtualdisk(name) except exception.VolumeDriverException: msg = _('Failed to delete volume %s. Rest API failed' ) % volume.name LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = _('Failed to delete volume: %s') % volume.name LOG.exception(msg) raise exception.VolumeDriverException(msg) def create_cloned_volume(self, volume, src_vref): """Create a clone of the volume.""" try: LOG.debug('Create cloned volume called ' 'volume_id = %(volume)s and src_vol_id = %(src_vol_id)s', {'volume': volume.id, 'src_vol_id': src_vref.id}) name, desc, size = self.get_hedvig_volume_details(volume) self.hrs.clone_vdisk(srcVolName=src_vref.name, dstVolName=name, size=size) except exception.VolumeDriverException: msg = _('Failed to create cloned volume. Rest API failed') LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = _('Failed to create cloned volume') LOG.exception(msg) raise exception.VolumeDriverException(msg) def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance. Assign any created volume to a compute node/controllerVM so that it can be attached to a instance. This driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined as follows -- similar to _get_iscsi_properties. """ LOG.debug('Initializing connection. volume: %s, ' 'connector: %s', volume, connector) try: computeHost = self.get_compute_host(connector) volName = volume.name tgtHost = self.hedvig_lookup_tgt(computeHost) if tgtHost is None: LOG.warning("No target registered for compute host %s", computeHost) tgtHost = self.hedvig_lookup_tgt() lunnum = self.hedvig_get_lun(tgtHost, volName) if lunnum == -1: LOG.error('Failed to get lun for volume: %s, ' 'hedvig controller: %s', volume, tgtHost) raise exception.VolumeDriverException() # Add access to the mgmt interface addr and iqn of compute host LOG.debug("Calling add access %(host)s : %(vol)s : %(iqn)s ", {'host': tgtHost, 'vol': volName, 'iqn': connector['initiator']}) self.hedvig_add_access(tgtHost, volName, connector['initiator']) # Add access to both storage and mgmt interface addrs for # iscsi discovery to succeed LOG.debug("Calling hedvig_get_iqn %s", socket.getfqdn()) controller_host_iqn = self.hedvig_get_iqn(socket.getfqdn()) LOG.debug("Calling add access with %s : %s : %s ", tgtHost, volName, controller_host_iqn) self.hedvig_add_access(tgtHost, volName, controller_host_iqn) targetName = ("%s%s-%s" % (self.DEFAULT_TARGET_NAME, tgtHost, lunnum)) portal = ("%s:%s" % (socket.gethostbyname(tgtHost), self.DEFAULT_ISCSI_PORT)) iscsi_properties = ({'target_discovered': True, 'target_iqn': targetName, 'target_portal': portal, 'target_lun': lunnum}) LOG.debug("iscsi_properties: %s", iscsi_properties) return {'driver_volume_type': 'iscsi', 'data': iscsi_properties} except exception.VolumeDriverException: msg = _('Volume assignment to connect failed. volume: %s ' 'Rest API failed') % volume LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = _('Volume assignment to connect failed. volume: %s') % volume LOG.exception(msg) raise exception.VolumeDriverException(msg) def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to detach volume from instance.""" LOG.debug("Terminating connection. volume: %s, connector: %s", volume, connector) try: volName = volume.name if connector is None: LOG.debug("Removing ALL host connections for volume %s", volume) targetList = self.hrs.list_targets(computeHost=None) for target in targetList: self.hedvig_delete_lun(target, volName) return computeHost = self.get_compute_host(connector) tgtHost = self.hedvig_lookup_tgt(computeHost) if tgtHost is None: LOG.debug("No target registered for compute host %s", computeHost) tgtHost = self.hedvig_lookup_tgt() if tgtHost is None: msg = _('Failed to get hedvig controller') LOG.error(msg) raise exception.VolumeDriverException(msg) self.hedvig_delete_lun(tgtHost, volName) except exception.VolumeDriverException: msg = _('Failed to terminate connection. volume: %s ' 'Rest API failed') % volume LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = _('Failed to terminate connection. volume: %s') % volume LOG.exception(msg) raise exception.VolumeDriverException(msg) def create_snapshot(self, snapshot): """Driver entry point for creating a snapshot.""" try: volName = snapshot.volume_name snapshotName = snapshot.name project = snapshot.project_id snapshotId = snapshot.id LOG.info("Creating snapshot. volName: %s, snapshotName: %s, " "project: %s, snapshotId: %s", volName, snapshotName, project, snapshotId) self.hedvig_create_snapshot(volName, snapshotId) except exception.VolumeDriverException: msg = (_('Failed to create snapshot. snapshotName: %s ' 'Rest API failed') % snapshotName) LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = (_('Failed to create snapshot. snapshotName: %s') % snapshotName) LOG.exception(msg) raise exception.VolumeDriverException(msg) def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" try: volName = snapshot.volume_name snapshotName = snapshot.display_name project = snapshot.project_id snapshotId = snapshot.id LOG.info("Deleting snapshot. volName: %s, snapshotName: %s, " "project: %s", volName, snapshotName, project) self.hrs.delete_snapshot(snapshotName, volName, snapshotId) except exception.VolumeDriverException: msg = _('Failed to delete snapshot: %s, ' 'Rest API failed') % snapshotName LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = _('Failed to delete snapshot: %s') % snapshotName LOG.exception(msg) raise exception.VolumeDriverException(msg) def create_volume_from_snapshot(self, volume, snapshot): """Driver entry point for creating a new volume from a snapshot. This is the same as cloning. """ name, description, size = self.get_hedvig_volume_details(volume) snapshotName = snapshot.display_name snapshotId = snapshot.id srcVolName = snapshot.volume_name try: LOG.info('Creating volume from snapshot. Name: %(volname)s,' ' SrcVolName: %(src)s, Snap_id: %(sid)s', {'volname': name, 'src': srcVolName, 'sid': snapshotId}) self.hedvig_clone_snapshot(name, snapshotId, srcVolName, size) except exception.VolumeDriverException: msg = _('Failed to create volume from snapshot %s' ' Rest API failed') % snapshotName LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = _('Failed to create volume from snapshot %s') % snapshotName LOG.exception(msg) raise exception.VolumeDriverException(msg) def extend_volume(self, volume, newSize): """Resizes virtual disk. newSize should be greater than current size. """ try: name, description, size = self.get_hedvig_volume_details(volume) LOG.info('Resizing virtual disk. name: %s, ' 'newSize: %s', name, newSize) if (size / units.Gi) >= newSize: err = _("Shrinking of volumes are not allowed") LOG.error(err) raise exception.VolumeDriverException(err) self.hrs.resize_vdisk( name, newSize) except exception.VolumeDriverException: msg = _('Failed to extend volume. Rest API failed') LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = _('Failed to extend volume') LOG.exception(msg) raise exception.VolumeDriverException(msg) def check_for_export(self, context, volume_id): """Not relevant to Hedvig""" pass def get_export(self, volume): """Get the iSCSI export details for a volume.""" pass def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume. Irrelevant for Hedvig. Export is created during attachment to instance. """ pass def create_export(self, context, volume, properties): """Driver entry point to get the export info for a new volume. Irrelevant for Hedvig. Export is created during attachment to instance. """ pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume. Irrelevant for Hedvig. Export should be deleted on detachment. """ pass def detach_volume(self, context, volume, attachment): pass def hedvig_create_snapshot(self, vDiskName, snapshotId=None): """Hedvig call to create snapshot of vdisk.""" LOG.debug("Creating snapshot..%s , %s.", vDiskName, snapshotId) try: snapshotName = self.hrs.create_snapshot(vDiskName, snapshotId) LOG.debug("Received snapshotName %s from rest call", snapshotName) return snapshotName except exception.VolumeDriverException: msg = _('Failed to create snapshot for vdisk %s ' 'Rest API failed') % vDiskName LOG.exception(msg) raise exception.VolumeDriverException() except Exception: msg = _('Failed to create snapshot for vdisk %s') % vDiskName LOG.exception(msg) raise exception.VolumeDriverException() def update_volume_stats(self): LOG.debug('Update volume stats called') try: total_capacity, free_capacity = self.hrs.update_volume_stats() except exception.VolumeDriverException: msg = _('Unable to fetch volume stats. Rest API failed') LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = _('Unable to fetch volume stats') LOG.exception(msg) raise exception.VolumeDriverException(msg) return (total_capacity, free_capacity) def get_hedvig_volume_details(self, volume): volName = volume.name project = volume.project_id displayName = volume.display_name displayDescription = volume.display_description description = ("%s\n%s\n%s" % (project, displayName, displayDescription)) size = volume.size * units.Gi return volName, description, size def get_compute_host(self, connector): connectorHost = socket.getfqdn(connector['host']) localHost = socket.gethostname() computeHost = localHost if connectorHost != localHost: computeHost = connectorHost return computeHost def hedvig_lookup_tgt(self, host=None): """Get the tgt instance associated with the compute host""" LOG.debug("Looking up hedvig controller for compute host: %s", host) try: targetList = self.hrs.list_targets(computeHost=host) tgt = None if len(targetList) > 0: tgt = targetList[0] LOG.debug("Found hedvig controller: %s, for host: %s", tgt, host) return tgt except exception.VolumeDriverException: msg = _('Failed to get hedvig controller for compute %s ' 'Rest API failed') % host LOG.exception(msg) raise exception.VolumeDriverException(msg) except Exception: msg = _('Failed to get hedvig controller for compute %s ') % host LOG.exception(msg) raise exception.VolumeDriverException(msg) def hedvig_delete_lun(self, tgtHost, vDiskName): try: LOG.debug("Deleting lun. hedvig controller: %s, vDiskName: %s,", tgtHost, vDiskName) self.hrs.unmap_lun(tgtHost, vDiskName) except Exception: msg = _('Failed to delete lun') LOG.exception(msg) raise exception.VolumeDriverException(msg) def hedvig_get_lun(self, tgtHost, vDiskName): """Looks up lun based on tgthost and vDiskName. If lun does not exist then call add_lun and return the lun number. If lun exists, just return the lun number. """ LOG.debug("Getting lun. hedvig controller: %s, vDiskName: %s", tgtHost, vDiskName) try: lunNo = self.hrs.get_lun(tgtHost, vDiskName) if lunNo > -1: return lunNo # If the lun is not found, add lun for the vdisk LOG.debug("Calling add lun on target : %s vdisk %s", tgtHost, vDiskName) self.hrs.add_lun(tgtHost, vDiskName, False) lunNo = self.hrs.get_lun(tgtHost, vDiskName) return lunNo except Exception: msg = _('Failed to get lun for vdisk: %s') % vDiskName LOG.exception(msg) raise exception.VolumeDriverException(msg) def hedvig_get_iqn(self, hostname): """Looks up the iqn for the given host.""" try: iqn = self.hrs.get_iqn(hostname) LOG.debug("Got IQN: %s, for hostname: %s", iqn, hostname) return iqn except Exception: msg = _('Failed to get iqn for hostname: %s') % hostname LOG.exception(msg) raise exception.VolumeDriverException(msg) def hedvig_add_access(self, tgtHost, volName, initiator): """Adds access to LUN for initiator's ip/iqn.""" try: LOG.info("Adding access. hedvig controller: %s, vol name %s, " "initiator: %s", tgtHost, volName, initiator) self.hrs.add_access(tgtHost, volName, "iqn", initiator) except Exception: msg = _('Failed to add access. hedvig controller: %s') % tgtHost LOG.exception(msg) raise exception.VolumeDriverException(msg) def hedvig_create_virtualdisk(self, name, description, size, qos_specs): try: LOG.info('Creating virtual disk. name: %s, description: %s,' 'size: %s', name, description, size) vDiskInfo = { 'name': name, 'blockSize': HedvigISCSIDriver.DEFAULT_VOL_BLOCK_SIZE, 'size': size, 'createdBy': HedvigISCSIDriver.DEFAULT_CREATEDBY, 'description': description, 'residence': config.Config.DiskResidence[1], 'replicationFactor': 3, 'replicationPolicy': 'Agnostic', 'clusteredFileSystem': False, 'exportedBlockSize': HedvigISCSIDriver.DEFAULT_EXPORT_BLK_SIZE, 'cacheEnabled': config.Config.defaultCinderCacheEnable, 'diskType': 'BLOCK', 'immutable': False, 'deduplication': config.Config.defaultCinderDedupEnable, 'compressed': config.Config.defaultCinderCompressEnable, 'cloudEnabled': False, 'cloudProvider': 0, 'isClone': False, 'consistency': 'STRONG', 'scsi3pr': False } if qos_specs: kvs = qos_specs['specs'] for key, value in kvs.items(): if "dedup_enable" == key: val = self.parse_and_get_boolean_entry( value) if val: vDiskInfo['deduplication'] = val elif "compressed_enable" == key: val = self.parse_and_get_boolean_entry( value) if val: vDiskInfo['compressed'] = True elif "cache_enable" == key: val = self.parse_and_get_boolean_entry( value) if val: vDiskInfo['cacheEnabled'] = val elif "encryption" == key: val = self.parse_and_get_boolean_entry( value) if val: vDiskInfo['encryption'] = val elif "replication_factor" == key: val = int(value) if val > 0: vDiskInfo['replicationFactor'] = val elif "replication_policy" == key: val = value.strip(" \n\t").lower() if val: vDiskInfo['replicationPolicy'] = val elif "disk_residence" == key: val = value.strip(" \n\t").lower() if val: vDiskInfo['residence'] = val elif "replication_policy_info" == key: val = value.split(',') if len(val) != 0: dcList = [] for dataCenter in val: dcList.append(dataCenter) vDiskInfo['dataCenters'] = dcList if vDiskInfo['deduplication'] and ( vDiskInfo['compressed'] is False): LOG.error('Cannot create dedup enabled disk without' ' compression enabled') raise exception.VolumeDriverException() self.hrs.create_vdisk(vDiskInfo) except Exception: msg = _('Failed to create volume') LOG.exception(msg) raise exception.VolumeDriverException(msg) def hedvig_delete_virtualdisk(self, name): LOG.info('Deleting virtual disk. name - %s', name) try: self.hrs.delete_vdisk(name) except Exception: msg = _('Failed to delete Vdisk') LOG.exception(msg) raise exception.VolumeDriverException(msg) def hedvig_clone_snapshot(self, dstVolName, openstackSID, srcVolName, size): LOG.info("Cloning a snapshot.dstVolName: %s,openstackSID:%s," "srcVolName: %s", dstVolName, openstackSID, srcVolName) try: self.hrs.clone_hedvig_snapshot( dstVolName=dstVolName, snapshotID=openstackSID, srcVolName=srcVolName, size=size) except Exception: msg = _('Failed to clone snapshot') LOG.exception(msg) raise exception.VolumeDriverException(msg) def parse_and_get_boolean_entry(self, entry): entry = entry.strip(" \t\n") return strutils.bool_from_string(entry) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hedvig/rest_client.py0000664000175000017500000005230600000000000023461 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Hedvig, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Rest Client for Hedvig Openstack implementation. """ from http import HTTPStatus import json import random import urllib from oslo_log import log as logging from oslo_utils import units from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.hedvig import config LOG = logging.getLogger(__name__) class RestClient(object): def __init__(self, nodes, username, password, cluster): """Hedvig Rest Client :param node: hostname of one of the nodes in the cluster :param username: username of the cluster :param password: password of the cluster :param cluster: clustername of the cluster """ LOG.debug('init called with %s , %s', nodes, cluster) self.username = username self.password = password self.cluster = cluster self.nodes = nodes self.nodeMap = {} def connect(self): self.store_node_map(self.nodes) if len(self.nodeMap) == 0: msg = _('Unable to connect to the nodes') raise exception.VolumeDriverException(msg) def get_session_id(self, node): """Retrieves the session Id :param node: hostname of the node :return: session ID which is valid for 15 minutes """ LOG.debug("get_session_id called with node %s", node) data = { 'request': { 'type': 'Login', 'category': 'UserManagement', 'params': { 'userName': self.username, 'password': self.password, 'cluster': self.cluster } } } obj = self.query(data=data, node=node) if obj['status'] != 'ok': msg = _('GetSessionId failure') raise exception.VolumeDriverException(msg) return (obj['result']['sessionId']) def get_all_cluster_nodes(self, node): """Retrieves all the nodes present in the cluster :param node: hostname of the node :return: nodes present in the cluster """ LOG.debug("get_all_cluster_nodes called with node %s", node) data = { 'request': { 'type': 'ListClusterNodes', 'category': 'VirtualDiskManagement', 'sessionId': self.get_session_id(node), } } obj = self.make_rest_call(data=data, node=node) return obj['result'] def store_node_map(self, nodes): """Stores all the node information along with their sessionID in dict :param nodes: hostname of the nodes in the cluster """ LOG.debug("store_node_map called with node %s", nodes) exitFlag = False node_list = [] for n in nodes.split(','): node_list.append(n.strip()) for node in node_list: try: LOG.debug("Attempting store_node_map with node %s", node) nodeList = self.get_all_cluster_nodes(node) exitFlag = True for node_ in nodeList: self.nodeMap[node_] = self.get_session_id(node_) except urllib.error.HTTPError as e: if e.code == HTTPStatus.NOT_FOUND: LOG.debug("Client not found") else: LOG.debug("Client not available") except Exception: LOG.exception('Retrying store_node_map with next node') if exitFlag: return def refresh_session_ids(self): """In case of session failure , it refreshes all the session ID stored in nodeMap """ LOG.debug("refresh_session_ids called") if len(self.nodeMap.keys()) == 0: msg = _('NodeMap is empty') raise exception.VolumeDriverException(msg) for node, val in self.nodeMap.items(): self.nodeMap[node] = self.get_session_id(node) def query(self, data, node): """Makes a rest query with given params :param data: json given as param to Rest call :param node: hostname of the node :return: REST response """ data = urllib.parse.urlencode(data).encode("utf-8") req = urllib.request.Request("http://%s/rest/" % node, data) response = urllib.request.urlopen(req) json_str = response.read() obj = json.loads(json_str) LOG.debug("Rest call output %s ", obj) return obj def make_rest_call(self, data, node): """Makes a rest Call and retries it 5 times in case of rest failure :param data: json given as param to Rest call :param node: hostname of the node :return: """ retryCount = 0 while retryCount < config.Config.retryCount: retryCount = retryCount + 1 try: LOG.debug("Rest call started with node %s " "and data: %s", node, data) obj = self.query(data, node) if obj['status'] == 'ok' or obj['status'] == 'warning': return obj # We need to refresh sessionIDs if earlier ones are expired elif 'session-failure' in obj['status']: self.refresh_session_ids() session_id = self.retrieve_session_id(node) data['request']['sessionId'] = session_id except Exception as e: LOG.debug("Exception details: data - %s, node - %s " "exception - %s", data, node, e.args) node = self.get_pages_host() else: msg = _('REST call status - Retry limit reached') raise exception.VolumeDriverException(msg) def create_vdisk(self, vDiskInfo): """Rest call to create a vdisk :param vDiskInfo: json passsed to the rest call """ LOG.debug("create_vdisk called") node = self.get_pages_host() sessionId = self.retrieve_session_id(node) sizeInB = vDiskInfo['size'] / units.Gi sizeInJson = {'unit': "GB", 'value': float(sizeInB)} vDiskInfo['size'] = sizeInJson data = { 'request': { 'type': 'AddVirtualDisk', 'category': 'VirtualDiskManagement', 'params': vDiskInfo, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['result'][0]['status'] != 'ok': errmsg = _('create_vdisk REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) def resize_vdisk(self, vDiskName, value): """Rest Call to resize Vdisk :param vDiskName: name of the vdisk :param unit: unit is GB for openstack :param value: size of the resized vdisk in GB """ node = self.get_pages_host() sessionId = self.retrieve_session_id(node) LOG.debug("resize_vdisk called") data = { 'request': { 'type': 'ResizeDisks', 'category': 'VirtualDiskManagement', 'params': { 'virtualDisks': [vDiskName], 'size': { 'unit': "GB", 'value': value }, }, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['result'][0]['status'] != 'ok': errmsg = _('resize_vdisk REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) def delete_vdisk(self, vDiskName): """Rest call to delete Vdisk :param vDiskName: name of the vdisk :return: Status of the rest call """ LOG.debug("delete_vdisk called %s", vDiskName) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'DeleteVDisk', 'category': 'VirtualDiskManagement', 'params': { 'virtualDisks': [vDiskName], }, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok': if "couldn't be found" not in obj['message']: errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) def get_lun(self, target, vDiskName): """Retrieve lun number :param target: hostname of the target :param vDiskName: name of the Vdisk :return: lun number """ try: LOG.debug("get_lun called for vdisk %s", vDiskName) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'GetLun', 'category': 'VirtualDiskManagement', 'params': { 'virtualDisk': vDiskName, 'target': target, }, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok': return -1 return obj['result']['lun'] except Exception: return -1 def get_iqn(self, host): """Retrieve IQN of the host. :param host: hostname :return: iqn of the host """ LOG.debug("get_iqn called for host %s", host) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'GetIqn', 'category': 'VirtualDiskManagement', 'params': { 'host': host, }, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok': if "IQN not found" in obj['message']: return "ALL" errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) return obj['result']['iqn'] def add_lun(self, tgtHost, vDiskName, readonly): """Rest Call to Add Lun :param tgtHost: hostname of target :param vDiskName: name of vdisk :param readonly: boolean readonly value """ LOG.debug( "add_lun called with target %s, vdisk %s", tgtHost, vDiskName) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'AddLun', 'category': 'VirtualDiskManagement', 'params': { 'virtualDisks': [vDiskName], 'targets': [tgtHost], 'readonly': readonly, }, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) restCallStatus = obj['result'][0]['status'] tgts = obj['result'][0]['targets'] addLunStatus = tgts[0]['status'] if restCallStatus != 'ok' or addLunStatus != 'ok': errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) def unmap_lun(self, target, vDiskName): """Rest call to unmap Lun :param target: hostname of the target :param vDiskName: name of the vdisk :return: true if successful """ LOG.debug("unmap_lun called with target %s, vdisk %s", target, vDiskName) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'UnmapLun', 'category': 'VirtualDiskManagement', 'params': { 'virtualDisk': vDiskName, 'target': target, }, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok': msg = "is not mapped to the specified controller" if msg not in obj['message']: errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) return True def add_access(self, host, vDiskName, type, address): """Rest Call to Add access :param host: hostname :param vDiskName: name of vdisk :param type: type is iqn for openstack :param address: iqn address """ LOG.debug( "add_access called with param host %s, vdisk %s", host, vDiskName) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'PersistACLAccess', 'category': 'VirtualDiskManagement', 'params': { 'virtualDisks': [vDiskName], 'host': host, 'type': type, 'address': address }, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok' or obj['result'][0]['status'] != 'ok': errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) def create_snapshot(self, vDiskName, snapshotId): """Rest Call to create snapshot :param vDiskName: name of the vdisk :param snapshotId: snapshotId of the snapshot :return: status of the rest call """ LOG.debug("create_snapshot called with vdisk %s", vDiskName) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'MakeSnapshot', 'category': 'SnapshotManagement', 'params': { 'virtualDisks': [vDiskName], }, 'sessionId': sessionId, } } if snapshotId: param = data['request']['params'] param['openstackSID'] = snapshotId obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok' or obj['result'][0]['status'] != 'ok': errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) return obj['result'][0]['snapshotName'] def clone_vdisk(self, srcVolName, dstVolName, size): """Rest Call to clone vdisk """ LOG.debug("clonevdisk called vdisk %s, %s", srcVolName, dstVolName) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'CloneVdisk', 'category': 'SnapshotManagement', 'params': { 'srcVolName': srcVolName, 'cloneVolName': dstVolName, 'size': size }, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok': errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) def get_val_in_gb(self, value, unit): unitRef = { 'B': 1, 'KB': units.Ki, 'MB': units.Mi, 'GB': units.Gi, 'TB': units.Ti, 'PB': units.Pi } return value * unitRef[unit] / units.Gi def update_volume_stats(self): """Fetch cluster level details""" LOG.debug("Update volume stats called") node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'ClusterInformation', 'category': 'ClusterWatch', 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok': errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) total = obj['result']['capacity']['total']['value'] used = obj['result']['capacity']['used']['value'] capacity = obj['result']['capacity'] total_unit = capacity['total']['units'] used_unit = capacity['used']['units'] total_capacity = self.get_val_in_gb(total, total_unit) used_capacity = self.get_val_in_gb(used, used_unit) free_capacity = total_capacity - used_capacity LOG.debug("total_capacity %s free_capactity %s", total_capacity, free_capacity) return (total_capacity, free_capacity) def clone_hedvig_snapshot(self, dstVolName, snapshotID, srcVolName, size): """Rest Call to clone hedvig snapshot """ LOG.debug("clone_hedvig_snapshot %s, %s", dstVolName, srcVolName) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'CloneVdisk', 'category': 'SnapshotManagement', 'params': { 'cloneVolName': dstVolName, 'openstackSID': snapshotID, 'srcVolName': srcVolName, 'size': size }, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok': errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) def delete_snapshot(self, snapshotName, vDiskName, snapshotId): """Rest call to delete snapshot :param snapshotName: name of the snapshot to be deleted """ LOG.debug( "delete_snapshot called with snapshot %s", snapshotName) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) data = { 'request': { 'type': 'DeleteSnapshot', 'category': 'SnapshotManagement', 'params': { 'snapshotName': snapshotName, 'openstackSID': snapshotId, 'openstackVolName': vDiskName }, 'sessionId': sessionId, } } obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok': errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) def list_targets(self, computeHost): """Rest Call to ListTargets for a given hostname :param computeHost: hostname of the computeHost :return: list of targets """ LOG.debug("list_targets called with computehost %s", computeHost) node = self.get_pages_host() sessionId = self.retrieve_session_id(node) targets = [] data = { 'request': { 'type': 'ListTargets', 'category': 'VirtualDiskManagement', 'sessionId': sessionId, } } if computeHost: data['request']['params'] = {} data['request']['params']['computeHost'] = computeHost obj = self.make_rest_call(data=data, node=node) if obj['status'] != 'ok': errmsg = _('REST call status - %s') % obj['status'] raise exception.VolumeDriverException(errmsg) for ch in obj['result']: if ch['protocol'] == 'block': targets.append(ch['target']) return targets def get_pages_host(self): """Returns a random host from nodemap :return: hostname """ LOG.debug("get_pages_host called") if not self.nodeMap: msg = _('NodeMap is empty') raise exception.VolumeDriverException(msg) return random.choice(list(self.nodeMap.keys())) def retrieve_session_id(self, node): """returns sessionID of the given node :param node: hostname of the node :return: session ID of the given host """ LOG.debug("retrieve_session_id called with node %s", node) if len(self.nodeMap.keys()) == 0: msg = _('NodeMap is empty') raise exception.VolumeDriverException(msg) return self.nodeMap[str(node)] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.355121 cinder-27.0.0/cinder/volume/drivers/hitachi/0000775000175000017500000000000000000000000020731 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hitachi/__init__.py0000664000175000017500000000000000000000000023030 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hitachi/hbsd_common.py0000664000175000017500000015177600000000000023614 0ustar00zuulzuul00000000000000# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Common module for Hitachi HBSD Driver.""" from collections import defaultdict import json import re from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.volume import configuration from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.volume import volume_types from cinder.volume import volume_utils _GROUP_NAME_MAX_LEN_FC = 64 _GROUP_NAME_MAX_LEN_ISCSI = 32 GROUP_NAME_ALLOWED_CHARS = 'a-zA-Z0-9.@_:-' GROUP_NAME_VAR_WWN = '{wwn}' GROUP_NAME_VAR_IP = '{ip}' GROUP_NAME_VAR_HOST = '{host}' _GROUP_NAME_VAR_WWN_LEN = 16 _GROUP_NAME_VAR_IP_LEN = 15 _GROUP_NAME_VAR_HOST_LEN = 1 _GROUP_NAME_VAR_LEN = {GROUP_NAME_VAR_WWN: _GROUP_NAME_VAR_WWN_LEN, GROUP_NAME_VAR_IP: _GROUP_NAME_VAR_IP_LEN, GROUP_NAME_VAR_HOST: _GROUP_NAME_VAR_HOST_LEN} STR_VOLUME = 'volume' STR_SNAPSHOT = 'snapshot' _UUID_PATTERN = re.compile(r'^[\da-f]{32}$') _INHERITED_VOLUME_OPTS = [ 'volume_backend_name', 'volume_driver', 'reserved_percentage', 'use_multipath_for_image_xfer', 'enforce_multipath_for_image_xfer', 'max_over_subscription_ratio', 'use_chap_auth', 'chap_username', 'chap_password', ] COMMON_VOLUME_OPTS = [ cfg.StrOpt( 'hitachi_storage_id', default=None, help='Product number of the storage system.'), cfg.ListOpt( 'hitachi_pools', default=[], deprecated_name='hitachi_pool', help='Pool number[s] or pool name[s] of the DP pool.'), cfg.StrOpt( 'hitachi_snap_pool', default=None, help='Pool number or pool name of the snapshot pool.'), cfg.StrOpt( 'hitachi_ldev_range', default=None, help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that ' 'can be used by the driver. Values can be in decimal format ' '(e.g. 1000) or in colon-separated hexadecimal format ' '(e.g. 00:03:E8).'), cfg.ListOpt( 'hitachi_target_ports', default=[], help='IDs of the storage ports used to attach volumes to the ' 'controller node. To specify multiple ports, connect them by ' 'commas (e.g. CL1-A,CL2-A).'), cfg.ListOpt( 'hitachi_compute_target_ports', default=[], help='IDs of the storage ports used to attach volumes to compute ' 'nodes. To specify multiple ports, connect them by commas ' '(e.g. CL1-A,CL2-A).'), cfg.BoolOpt( 'hitachi_group_create', default=False, help='If True, the driver will create host groups or iSCSI targets on ' 'storage ports as needed.'), cfg.BoolOpt( 'hitachi_group_delete', default=False, help='If True, the driver will delete host groups or iSCSI targets on ' 'storage ports as needed.'), cfg.IntOpt( 'hitachi_copy_speed', default=3, min=1, max=15, help='Copy speed of storage system. 1 or 2 indicates ' 'low speed, 3 indicates middle speed, and a value between 4 and ' '15 indicates high speed.'), cfg.IntOpt( 'hitachi_copy_check_interval', default=3, min=1, max=600, help='Interval in seconds to check copying status during a volume ' 'copy.'), cfg.IntOpt( 'hitachi_async_copy_check_interval', default=10, min=1, max=600, help='Interval in seconds to check asynchronous copying status during ' 'a copy pair deletion or data restoration.'), ] COMMON_PORT_OPTS = [ cfg.BoolOpt( 'hitachi_port_scheduler', default=False, help='Enable port scheduling of WWNs to the configured ports so that ' 'WWNs are registered to ports in a round-robin fashion.'), ] COMMON_PAIR_OPTS = [ cfg.IntOpt( 'hitachi_pair_target_number', default=0, min=0, max=99, help='Pair target name of the host group or iSCSI target'), ] COMMON_NAME_OPTS = [ cfg.StrOpt( 'hitachi_group_name_format', default=None, help='Format of host groups, iSCSI targets, and server objects.'), ] CONF = cfg.CONF CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(COMMON_PORT_OPTS, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(COMMON_PAIR_OPTS, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(COMMON_NAME_OPTS, group=configuration.SHARED_CONF_GROUP) LOG = logging.getLogger(__name__) MSG = utils.HBSDMsg def str2int(num): """Convert a string into an integer.""" if not num: return None if num.isdigit(): return int(num) if not re.match(r'[0-9a-fA-F][0-9a-fA-F]:[0-9a-fA-F]' + '[0-9a-fA-F]:[0-9a-fA-F][0-9a-fA-F]$', num): return None try: return int(num.replace(':', ''), 16) except ValueError: return None class HBSDCommon(): """Common class for Hitachi HBSD Driver.""" def __init__(self, conf, driverinfo, db): """Initialize instance variables.""" self.conf = conf self.db = db self.ctxt = None self.lock = { 'do_setup': 'do_setup', } self.driver_info = driverinfo self.storage_info = { 'protocol': driverinfo['proto'], 'pool_id': None, 'snap_pool_id': None, 'ldev_range': [], 'controller_ports': [], 'compute_ports': [], 'pair_ports': [], 'wwns': {}, 'portals': {}, } self.storage_id = None if self.storage_info['protocol'] == 'FC': self.group_name_format = { 'group_name_max_len': _GROUP_NAME_MAX_LEN_FC, 'group_name_var_cnt': { GROUP_NAME_VAR_WWN: [1], GROUP_NAME_VAR_IP: [0], GROUP_NAME_VAR_HOST: [0, 1], }, 'group_name_format_default': self.driver_info[ 'target_prefix'] + '{wwn}', } if self.storage_info['protocol'] == 'iSCSI': self.group_name_format = { 'group_name_max_len': _GROUP_NAME_MAX_LEN_ISCSI, 'group_name_var_cnt': { GROUP_NAME_VAR_WWN: [0], GROUP_NAME_VAR_IP: [1], GROUP_NAME_VAR_HOST: [0, 1], }, 'group_name_format_default': self.driver_info[ 'target_prefix'] + '{ip}', } self.format_info = { 'group_name_format': self.group_name_format[ 'group_name_format_default'], 'group_name_format_without_var_len': ( len(re.sub('|'.join([GROUP_NAME_VAR_WWN, GROUP_NAME_VAR_IP, GROUP_NAME_VAR_HOST]), '', self.group_name_format['group_name_format_default']))), 'group_name_var_cnt': { GROUP_NAME_VAR_WWN: self.group_name_format[ 'group_name_format_default'].count(GROUP_NAME_VAR_WWN), GROUP_NAME_VAR_IP: self.group_name_format[ 'group_name_format_default'].count(GROUP_NAME_VAR_IP), GROUP_NAME_VAR_HOST: self.group_name_format[ 'group_name_format_default'].count(GROUP_NAME_VAR_HOST), } } self._required_common_opts = [ self.driver_info['param_prefix'] + '_storage_id', self.driver_info['param_prefix'] + '_pools', ] self.port_index = {} def get_pool_id_of_volume(self, volume): pools = self._stats['pools'] if len(pools) == 1: return pools[0]['location_info']['pool_id'] pool_name = volume_utils.extract_host(volume['host'], 'pool') for pool in pools: if pool['pool_name'] == pool_name: return pool['location_info']['pool_id'] return None def create_ldev( self, size, extra_specs, pool_id, ldev_range, qos_specs=None): """Create an LDEV and return its LDEV number.""" raise NotImplementedError() def modify_ldev_name(self, ldev, name): """Modify LDEV name.""" raise NotImplementedError() def create_volume(self, volume): """Create a volume and return its properties.""" extra_specs = self.get_volume_extra_specs(volume) pool_id = self.get_pool_id_of_volume(volume) ldev_range = self.storage_info['ldev_range'] qos_specs = utils.get_qos_specs_from_volume(volume) try: ldev = self.create_ldev(volume['size'], extra_specs, pool_id, ldev_range, qos_specs=qos_specs) except Exception: with excutils.save_and_reraise_exception(): self.output_log(MSG.CREATE_LDEV_FAILED) self.modify_ldev_name(ldev, volume['id'].replace("-", "")) return { 'provider_location': str(ldev), } def get_ldev_info(self, keys, ldev, **kwargs): """Return a dictionary of LDEV-related items.""" raise NotImplementedError() def create_pair_on_storage( self, pvol, svol, snap_pool_id, is_snapshot=False): """Create a copy pair on the storage.""" raise NotImplementedError() def wait_copy_completion(self, pvol, svol): """Wait until copy is completed.""" raise NotImplementedError() def copy_on_storage( self, pvol, size, extra_specs, pool_id, snap_pool_id, ldev_range, is_snapshot=False, sync=False, is_rep=False, qos_specs=None): """Create a copy of the specified LDEV on the storage.""" ldev_info = self.get_ldev_info(['status', 'attributes'], pvol) if ldev_info['status'] != 'NML': msg = self.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol) self.raise_error(msg) svol = self.create_ldev( size, extra_specs, pool_id, ldev_range, qos_specs=qos_specs) try: self.create_pair_on_storage( pvol, svol, snap_pool_id, is_snapshot=is_snapshot) if sync or is_rep: self.wait_copy_completion(pvol, svol) except Exception: with excutils.save_and_reraise_exception(): try: self.delete_ldev(svol) except exception.VolumeDriverException: self.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol) return svol def create_volume_from_src(self, volume, src, src_type, is_rep=False): """Create a volume from a volume or snapshot and return its properties. """ ldev = self.get_ldev(src) if ldev is None: msg = self.output_log( MSG.INVALID_LDEV_FOR_VOLUME_COPY, type=src_type, id=src['id']) self.raise_error(msg) size = volume['size'] extra_specs = self.get_volume_extra_specs(volume) pool_id = self.get_pool_id_of_volume(volume) snap_pool_id = self.storage_info['snap_pool_id'] ldev_range = self.storage_info['ldev_range'] qos_specs = utils.get_qos_specs_from_volume(volume) new_ldev = self.copy_on_storage(ldev, size, extra_specs, pool_id, snap_pool_id, ldev_range, is_rep=is_rep, qos_specs=qos_specs) self.modify_ldev_name(new_ldev, volume['id'].replace("-", "")) if is_rep: self.delete_pair(new_ldev) return { 'provider_location': str(new_ldev), } def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume and return its properties.""" return self.create_volume_from_src(volume, src_vref, STR_VOLUME) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot and return its properties.""" return self.create_volume_from_src(volume, snapshot, STR_SNAPSHOT) def delete_pair_based_on_svol(self, pvol, svol_info): """Disconnect all volume pairs to which the specified S-VOL belongs.""" raise NotImplementedError() def get_pair_info(self, ldev, ldev_info=None): """Return volume pair info(LDEV number, pair status and pair type).""" raise NotImplementedError() def delete_pair(self, ldev, ldev_info=None): """Disconnect all volume pairs to which the specified LDEV belongs. :param int ldev: The ID of the LDEV whose TI pair needs be deleted :param dict ldev_info: LDEV info :return: None :raises VolumeDriverException: if the LDEV is a P-VOL of a TI pair """ pair_info = self.get_pair_info(ldev, ldev_info) if not pair_info: return if pair_info['pvol'] == ldev: self.output_log( MSG.UNABLE_TO_DELETE_PAIR, pvol=pair_info['pvol']) self.raise_busy() else: self.delete_pair_based_on_svol( pair_info['pvol'], pair_info['svol_info'][0]) def find_all_mapped_targets_from_storage(self, targets, ldev): """Add all port-gids connected with the LDEV to the list.""" raise NotImplementedError() def unmap_ldev(self, targets, ldev): """Delete the LUN between the specified LDEV and port-gid.""" raise NotImplementedError() def unmap_ldev_from_storage(self, ldev): """Delete the connection between the specified LDEV and servers.""" targets = { 'list': [], } self.find_all_mapped_targets_from_storage(targets, ldev) self.unmap_ldev(targets, ldev) def delete_ldev_from_storage(self, ldev): """Delete the specified LDEV from the storage.""" raise NotImplementedError() def delete_ldev(self, ldev, ldev_info=None): """Delete the specified LDEV. :param int ldev: The ID of the LDEV to be deleted :param dict ldev_info: LDEV info :return: None """ self.delete_pair(ldev, ldev_info) self.unmap_ldev_from_storage(ldev) self.delete_ldev_from_storage(ldev) def is_invalid_ldev(self, ldev, obj, ldev_info_): """Check if the specified LDEV corresponds to the specified object. If the LDEV label and the object's id or name_id do not match, the LDEV was deleted and another LDEV with the same ID was created for another volume or snapshot. In this case, we say that the LDEV is invalid. If the LDEV label is not set or its format is unexpected, we cannot judge if the LDEV corresponds to the object. This can happen if the LDEV was created in older versions of this product or if the user overwrote the label. In this case, we just say that the LDEV is not invalid, although we are not completely sure about it. The reason for using name_id rather than id for volumes in comparison is that id of the volume that corresponds to the LDEV changes by host-assisted migration while that is not the case with name_id and that the LDEV label is created from id of the volume when the LDEV is created and is never changed after that. Because Snapshot objects do not have name_id, we use id instead of name_id if the object is a Snapshot. We assume that the object is a Snapshot object if hasattr(obj, 'name_id') returns False. This method returns False if the LDEV does not exist on the storage. The absence of the LDEV on the storage is detected elsewhere. :param int ldev: The ID of the LDEV to be checked :param obj: The object to be checked :type obj: Volume or Snapshot :param dict ldev_info_: LDEV info. This is an output area. Data is written by this method, but the area must be secured by the caller. :return: True if the LDEV does not correspond to the object, False otherwise :rtype: bool """ ldev_info = self.get_ldev_info(None, ldev) # To avoid calling the same REST API multiple times, we pass the LDEV # info to the caller. ldev_info_.update(ldev_info) return ('label' in ldev_info and _UUID_PATTERN.match(ldev_info['label']) and ldev_info['label'] != ( obj.name_id if hasattr(obj, 'name_id') else obj.id).replace('-', '')) def delete_volume(self, volume): """Delete the specified volume.""" ldev = self.get_ldev(volume) if ldev is None: self.output_log( MSG.INVALID_LDEV_FOR_DELETION, method='delete_volume', id=volume['id']) return # Check if the LDEV corresponds to the volume. # To avoid KeyError when accessing a missing attribute, set the default # value to None. ldev_info = defaultdict(lambda: None) if self.is_invalid_ldev(ldev, volume, ldev_info): # If the LDEV is assigned to another object, skip deleting it. self.output_log(MSG.SKIP_DELETING_LDEV, obj='volume', obj_id=volume.id, ldev=ldev, ldev_label=ldev_info['label']) return try: self.delete_ldev(ldev, ldev_info) except exception.VolumeDriverException as ex: if utils.BUSY_MESSAGE in ex.msg: raise exception.VolumeIsBusy(volume_name=volume['name']) else: raise ex def create_snapshot(self, snapshot): """Create a snapshot from a volume and return its properties.""" src_vref = snapshot.volume ldev = self.get_ldev(src_vref) if ldev is None: msg = self.output_log( MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=src_vref['id']) self.raise_error(msg) size = snapshot['volume_size'] extra_specs = self.get_volume_extra_specs(snapshot['volume']) pool_id = self.get_pool_id_of_volume(snapshot['volume']) snap_pool_id = self.storage_info['snap_pool_id'] ldev_range = self.storage_info['ldev_range'] qos_specs = utils.get_qos_specs_from_volume(snapshot) new_ldev = self.copy_on_storage( ldev, size, extra_specs, pool_id, snap_pool_id, ldev_range, is_snapshot=True, qos_specs=qos_specs) self.modify_ldev_name(new_ldev, snapshot.id.replace("-", "")) return { 'provider_location': str(new_ldev), } def delete_snapshot(self, snapshot): """Delete the specified snapshot.""" ldev = self.get_ldev(snapshot) if ldev is None: self.output_log( MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot', id=snapshot['id']) return # Check if the LDEV corresponds to the snapshot. # To avoid KeyError when accessing a missing attribute, set the default # value to None. ldev_info = defaultdict(lambda: None) if self.is_invalid_ldev(ldev, snapshot, ldev_info): # If the LDEV is assigned to another object, skip deleting it. self.output_log(MSG.SKIP_DELETING_LDEV, obj='snapshot', obj_id=snapshot.id, ldev=ldev, ldev_label=ldev_info['label']) return try: self.delete_ldev(ldev, ldev_info) except exception.VolumeDriverException as ex: if utils.BUSY_MESSAGE in ex.msg: raise exception.SnapshotIsBusy(snapshot_name=snapshot['name']) else: raise ex def get_pool_info(self, pool_id, result=None): """Return the total and free capacity of the storage pool.""" raise NotImplementedError() def get_pool_infos(self, pool_ids): """Return the total and free capacity of the storage pools.""" raise NotImplementedError() def _create_single_pool_data(self, pool_id, pool_name, cap_data): location_info = { 'storage_id': self.conf.hitachi_storage_id, 'pool_id': pool_id, 'snap_pool_id': self.storage_info['snap_pool_id'], 'ldev_range': self.storage_info['ldev_range']} single_pool = {} single_pool.update(dict( pool_name=pool_name, reserved_percentage=self.conf.safe_get('reserved_percentage'), QoS_support=True, thin_provisioning_support=True, thick_provisioning_support=False, multiattach=True, consistencygroup_support=True, consistent_group_snapshot_enabled=True, max_over_subscription_ratio=( volume_utils.get_max_over_subscription_ratio( self.conf.safe_get('max_over_subscription_ratio'), True)), location_info=location_info )) if cap_data is None: single_pool.update(dict( total_capacity_gb=0, free_capacity_gb=0, provisioned_capacity_gb=0, backend_state='down')) self.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool=pool_name) return single_pool total_capacity, free_capacity, provisioned_capacity = cap_data single_pool.update(dict( total_capacity_gb=total_capacity, free_capacity_gb=free_capacity, provisioned_capacity_gb=provisioned_capacity )) single_pool.update(dict(backend_state='up')) return single_pool def update_volume_stats(self): """Update properties, capabilities and current states of the driver.""" data = {} backend_name = (self.conf.safe_get('volume_backend_name') or self.driver_info['volume_backend_name']) data = { 'volume_backend_name': backend_name, 'vendor_name': self.driver_info['vendor_name'], 'driver_version': self.driver_info['version'], 'storage_protocol': self.storage_info['protocol'], 'pools': [], } for pool_id, pool_name, cap_data in zip( self.storage_info['pool_id'], self.conf.hitachi_pools, self.get_pool_infos(self.storage_info['pool_id'])): single_pool = self._create_single_pool_data( pool_id, pool_name if len(self.conf.hitachi_pools) > 1 else data['volume_backend_name'], cap_data) data['pools'].append(single_pool) LOG.debug("Updating volume status. (%s)", data) self._stats = data return data def discard_zero_page(self, volume): """Return the volume's no-data pages to the storage pool.""" raise NotImplementedError() def check_pair_svol(self, ldev): """Check if the specified LDEV is S-VOL in a copy pair.""" raise NotImplementedError() def extend_ldev(self, ldev, old_size, new_size): """Extend the specified LDEV to the specified new size.""" raise NotImplementedError() def extend_volume(self, volume, new_size): """Extend the specified volume to the specified size.""" ldev = self.get_ldev(volume) if ldev is None: msg = self.output_log(MSG.INVALID_LDEV_FOR_EXTENSION, volume_id=volume['id']) self.raise_error(msg) if self.check_pair_svol(ldev): msg = self.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND, volume_id=volume['id']) self.raise_error(msg) self.delete_pair(ldev) self.extend_ldev(ldev, volume['size'], new_size) def get_ldev_by_name(self, name): """Get the LDEV number from the given name.""" raise NotImplementedError() def check_ldev_manageability(self, ldev, existing_ref): """Check if the LDEV meets the criteria for being managed.""" raise NotImplementedError() def get_qos_specs_from_ldev(self, ldev): raise NotImplementedError() def change_qos_specs(self, ldev, old_qos_specs, new_qos_specs): raise NotImplementedError() def manage_existing(self, volume, existing_ref): """Return volume properties which Cinder needs to manage the volume.""" if 'source-name' in existing_ref: ldev = self.get_ldev_by_name( existing_ref.get('source-name').replace('-', '')) elif 'source-id' in existing_ref: ldev = str2int(existing_ref.get('source-id')) self.check_ldev_manageability(ldev, existing_ref) self.modify_ldev_name(ldev, volume['id'].replace("-", "")) new_qos_specs = utils.get_qos_specs_from_volume(volume) old_qos_specs = self.get_qos_specs_from_ldev(ldev) if old_qos_specs != new_qos_specs: self.change_qos_specs(ldev, old_qos_specs, new_qos_specs) return { 'provider_location': str(ldev), } def get_ldev_size_in_gigabyte(self, ldev, existing_ref): """Return the size[GB] of the specified LDEV.""" raise NotImplementedError() def manage_existing_get_size(self, volume, existing_ref): """Return the size[GB] of the specified volume.""" ldev = None if 'source-name' in existing_ref: ldev = self.get_ldev_by_name( existing_ref.get('source-name').replace("-", "")) elif 'source-id' in existing_ref: ldev = str2int(existing_ref.get('source-id')) if ldev is None: msg = self.output_log(MSG.INVALID_LDEV_FOR_MANAGE) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) return self.get_ldev_size_in_gigabyte(ldev, existing_ref) def unmanage(self, volume): """Prepare the volume for removing it from Cinder management.""" ldev = self.get_ldev(volume) if ldev is None: self.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage', id=volume['id']) return if self.check_pair_svol(ldev): self.output_log( MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'], volume_type=utils.NORMAL_LDEV_TYPE) raise exception.VolumeIsBusy(volume_name=volume['name']) try: self.delete_pair(ldev) except exception.VolumeDriverException as ex: if utils.BUSY_MESSAGE in ex.msg: raise exception.VolumeIsBusy(volume_name=volume['name']) else: raise ex def _range2list(self, param): """Analyze a 'xxx-xxx' string and return a list of two integers.""" values = [str2int(value) for value in self.conf.safe_get(param).split('-')] if len(values) != 2 or None in values or values[0] > values[1]: msg = self.output_log(MSG.INVALID_PARAMETER, param=param) self.raise_error(msg) return values def check_param_fc(self): """Check FC-related parameter values and consistency among them.""" if hasattr( self.conf, self.driver_info['param_prefix'] + '_port_scheduler'): self.check_opts(self.conf, COMMON_PORT_OPTS) if (self.conf.hitachi_port_scheduler and not self.conf.hitachi_group_create): msg = self.output_log( MSG.INVALID_PARAMETER, param=self.driver_info['param_prefix'] + '_port_scheduler') self.raise_error(msg) if (self._lookup_service is None and self.conf.hitachi_port_scheduler): msg = self.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE) self.raise_error(msg) def check_param_iscsi(self): """Check iSCSI-related parameter values and consistency among them.""" if self.conf.use_chap_auth: if not self.conf.chap_username: msg = self.output_log(MSG.INVALID_PARAMETER, param='chap_username') self.raise_error(msg) if not self.conf.chap_password: msg = self.output_log(MSG.INVALID_PARAMETER, param='chap_password') self.raise_error(msg) def check_param(self): """Check parameter values and consistency among them.""" self.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS) self.check_opts(self.conf, COMMON_VOLUME_OPTS) if hasattr( self.conf, self.driver_info['param_prefix'] + '_pair_target_number'): self.check_opts(self.conf, COMMON_PAIR_OPTS) if hasattr( self.conf, self.driver_info['param_prefix'] + '_group_name_format'): self.check_opts(self.conf, COMMON_NAME_OPTS) if self.conf.hitachi_ldev_range: self.storage_info['ldev_range'] = self._range2list( self.driver_info['param_prefix'] + '_ldev_range') if (not self.conf.hitachi_target_ports and not self.conf.hitachi_compute_target_ports): msg = self.output_log( MSG.INVALID_PARAMETER, param=self.driver_info['param_prefix'] + '_target_ports or ' + self.driver_info['param_prefix'] + '_compute_target_ports') self.raise_error(msg) self._check_param_group_name_format() if (self.conf.hitachi_group_delete and not self.conf.hitachi_group_create): msg = self.output_log( MSG.INVALID_PARAMETER, param=self.driver_info['param_prefix'] + '_group_delete or ' + self.driver_info['param_prefix'] + '_group_create') self.raise_error(msg) for opt in self._required_common_opts: if not self.conf.safe_get(opt): msg = self.output_log(MSG.INVALID_PARAMETER, param=opt) self.raise_error(msg) for pool in self.conf.hitachi_pools: if len(pool) == 0: msg = self.output_log( MSG.INVALID_PARAMETER, param=self.driver_info['param_prefix'] + '_pools') self.raise_error(msg) if self.storage_info['protocol'] == 'FC': self.check_param_fc() if self.storage_info['protocol'] == 'iSCSI': self.check_param_iscsi() def _check_param_group_name_format(self): if not hasattr( self.conf, self.driver_info['param_prefix'] + '_group_name_format'): return if self.conf.hitachi_group_name_format is not None: error_flag = False if re.match( self.driver_info['target_prefix'] + '(' + GROUP_NAME_VAR_WWN + '|' + GROUP_NAME_VAR_IP + '|' + GROUP_NAME_VAR_HOST + '|' + '[' + GROUP_NAME_ALLOWED_CHARS + '])+$', self.conf.hitachi_group_name_format) is None: error_flag = True if not error_flag: for var in _GROUP_NAME_VAR_LEN: self.format_info['group_name_var_cnt'][var] = ( self.conf.hitachi_group_name_format.count(var)) if (self.format_info[ 'group_name_var_cnt'][var] not in self.group_name_format['group_name_var_cnt'][var]): error_flag = True break if not error_flag: group_name_var_replaced = self.conf.hitachi_group_name_format for var, length in _GROUP_NAME_VAR_LEN.items(): group_name_var_replaced = ( group_name_var_replaced.replace(var, '_' * length)) if len(group_name_var_replaced) > self.group_name_format[ 'group_name_max_len']: error_flag = True if error_flag: msg = self.output_log( MSG.INVALID_PARAMETER, param=self.driver_info['param_prefix'] + '_group_name_format') self.raise_error(msg) self.format_info['group_name_format'] = ( self.conf.hitachi_group_name_format) self.format_info['group_name_format_without_var_len'] = ( len(re.sub('|'.join( [GROUP_NAME_VAR_WWN, GROUP_NAME_VAR_IP, GROUP_NAME_VAR_HOST]), '', self.format_info['group_name_format']))) def need_client_setup(self): """Check if the making of the communication client is necessary.""" raise NotImplementedError() def setup_client(self): """Initialize RestApiClient.""" pass def enter_keep_session(self): """Begin the keeping of the session.""" pass def check_pool_id(self): """Check the pool id of hitachi_pools and hitachi_snap_pool.""" raise NotImplementedError() def connect_storage(self): """Prepare for using the storage.""" self.check_pool_id() self.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID', value=self.storage_info['pool_id']) self.storage_info['controller_ports'] = [] self.storage_info['compute_ports'] = [] def find_targets_from_storage(self, targets, connector, target_ports): """Find mapped ports, memorize them and return unmapped port count.""" raise NotImplementedError() def get_hba_ids_from_connector(self, connector): """Return the HBA ID stored in the connector.""" if self.driver_info['hba_id'] in connector: return connector[self.driver_info['hba_id']] msg = self.output_log(MSG.RESOURCE_NOT_FOUND, resource=self.driver_info['hba_id_type']) self.raise_error(msg) def set_device_map(self, targets, hba_ids, volume): return None, hba_ids def get_port_scheduler_param(self): if hasattr( self.conf, self.driver_info['param_prefix'] + '_port_scheduler'): return self.conf.hitachi_port_scheduler else: return False def create_target_by_port_scheduler( self, devmap, targets, connector, volume): raise NotImplementedError() def create_target_to_storage(self, port, connector, hba_ids): """Create a host group or an iSCSI target on the specified port.""" raise NotImplementedError() def get_gid_from_targets(self, targets, port): for target_port, target_gid in targets['list']: if target_port == port: return target_gid msg = self.output_log(MSG.NO_CONNECTED_TARGET) self.raise_error(msg) def set_target_mode(self, port, gid): """Configure the target to meet the environment.""" raise NotImplementedError() def set_hba_ids(self, port, gid, hba_ids): """Connect all specified HBAs with the specified port.""" raise NotImplementedError() def delete_target_from_storage(self, port, gid): """Delete the host group or the iSCSI target from the port.""" raise NotImplementedError() def set_target_map_info(self, targets, hba_ids, port): pass def create_target(self, targets, port, connector, hba_ids): """Create a host group or an iSCSI target on the storage port.""" if port not in targets['info'] or not targets['info'][port]: target_name, gid = self.create_target_to_storage( port, connector, hba_ids) self.output_log( MSG.OBJECT_CREATED, object='a target', details='port: %(port)s, gid: %(gid)s, target_name: ' '%(target)s' % {'port': port, 'gid': gid, 'target': target_name}) else: gid = self.get_gid_from_targets(targets, port) try: if port not in targets['info'] or not targets['info'][port]: self.set_target_mode(port, gid) self.set_hba_ids(port, gid, hba_ids) except Exception: with excutils.save_and_reraise_exception(): self.delete_target_from_storage(port, gid) targets['info'][port] = True if (port, gid) not in targets['list']: targets['list'].append((port, gid)) self.set_target_map_info(targets, hba_ids, port) def create_mapping_targets(self, targets, connector, volume=None): """Create server-storage connection for all specified storage ports.""" active_hba_ids = [] hba_ids = self.get_hba_ids_from_connector(connector) devmap, active_hba_ids = self.set_device_map(targets, hba_ids, volume) if self.get_port_scheduler_param(): self.create_target_by_port_scheduler( devmap, targets, connector, volume) else: for port in targets['info'].keys(): if targets['info'][port]: continue try: self.create_target( targets, port, connector, active_hba_ids) except exception.VolumeDriverException: self.output_log( self.driver_info['msg_id']['target'], port=port) # When other threads created a host group at same time, need to # re-find targets. if not targets['list']: self.find_targets_from_storage( targets, connector, list(targets['info'].keys())) def get_port_index_to_be_used(self, ports, network_name): backend_name = self.conf.safe_get('volume_backend_name') code = ( str(self.conf.hitachi_storage_id) + backend_name + network_name) if code in self.port_index.keys(): if self.port_index[code] >= len(ports) - 1: self.port_index[code] = 0 else: self.port_index[code] += 1 else: self.port_index[code] = 0 return self.port_index[code] def init_cinder_hosts(self, **kwargs): """Initialize server-storage connection.""" targets = kwargs.pop( 'targets', {'info': {}, 'list': [], 'iqns': {}, 'target_map': {}}) connector = volume_utils.brick_get_connector_properties( multipath=self.conf.use_multipath_for_image_xfer, enforce_multipath=self.conf.enforce_multipath_for_image_xfer) target_ports = self.storage_info['controller_ports'] if target_ports: if (self.find_targets_from_storage( targets, connector, target_ports) and self.conf.hitachi_group_create): self.create_mapping_targets(targets, connector) self.require_target_existed(targets) def do_setup(self, context): """Prepare for the startup of the driver.""" @coordination.synchronized('{self.lock[do_setup]}') def _with_synchronized(self): self.connect_storage() self.init_cinder_hosts() self.ctxt = context self.check_param() if self.need_client_setup(): self.setup_client() self.enter_keep_session() _with_synchronized(self) def check_ports_info(self): """Check if available storage ports exist.""" if (self.conf.hitachi_target_ports and not self.storage_info['controller_ports']): msg = self.output_log(MSG.RESOURCE_NOT_FOUND, resource="Target ports") self.raise_error(msg) if (self.conf.hitachi_compute_target_ports and not self.storage_info['compute_ports']): msg = self.output_log(MSG.RESOURCE_NOT_FOUND, resource="Compute target ports") self.raise_error(msg) self.output_log(MSG.SET_CONFIG_VALUE, object='target port list', value=self.storage_info['controller_ports']) self.output_log(MSG.SET_CONFIG_VALUE, object='compute target port list', value=self.storage_info['compute_ports']) def attach_ldev( self, volume, ldev, connector, is_snapshot, targets, lun=None): """Initialize connection between the server and the volume.""" raise NotImplementedError() def get_properties_fc(self, targets): """Return FC-specific server-LDEV connection info.""" data = {} data['target_wwn'] = [ self.storage_info['wwns'][target[0]] for target in targets['list'] if targets['lun'][target[0]]] return data def get_properties_iscsi(self, targets, multipath): """Return iSCSI-specific server-LDEV connection info.""" data = {} primary_target = targets['list'][0] if not multipath: data['target_portal'] = self.storage_info[ 'portals'][primary_target[0]] data['target_iqn'] = targets['iqns'][primary_target] else: # Set the list of numbers that LUN was added data['target_portals'] = [ self.storage_info['portals'][target[0]] for target in targets['list'] if targets['lun'][target[0]]] data['target_iqns'] = [ targets['iqns'][target] for target in targets['list'] if targets['lun'][target[0]]] if self.conf.use_chap_auth: data['auth_method'] = 'CHAP' data['auth_username'] = self.conf.chap_username data['auth_password'] = self.conf.chap_password return data def get_properties(self, targets, target_lun, connector): """Return server-LDEV connection info.""" multipath = connector.get('multipath', False) if self.storage_info['protocol'] == 'FC': data = self.get_properties_fc(targets) elif self.storage_info['protocol'] == 'iSCSI': data = self.get_properties_iscsi(targets, multipath) data['target_discovered'] = False if not multipath or self.storage_info['protocol'] == 'FC': data['target_lun'] = target_lun else: # Set the list of numbers that LUN was added target_luns = [] for target in targets['list']: if targets['lun'][target[0]]: target_luns.append(target_lun) data['target_luns'] = target_luns return data # A synchronization to prevent conflicts between host group creation # and deletion. @coordination.synchronized( '{self.driver_info[driver_file_prefix]}-host-' '{self.conf.hitachi_storage_id}-{connector[host]}') def initialize_connection( self, volume, connector, is_snapshot=False, lun=None): """Initialize connection between the server and the volume.""" targets = { 'info': {}, 'list': [], 'lun': {}, 'iqns': {}, 'target_map': {}, } ldev = self.get_ldev(volume) if ldev is None: msg = self.output_log(MSG.INVALID_LDEV_FOR_CONNECTION, volume_id=volume['id']) self.raise_error(msg) target_lun = self.attach_ldev( volume, ldev, connector, is_snapshot, targets, lun) return { 'driver_volume_type': self.driver_info['volume_type'], 'data': self.get_properties(targets, target_lun, connector), }, targets['target_map'] def get_target_ports(self, connector): """Return a list of ports corresponding to the specified connector.""" if 'ip' in connector and connector['ip'] == CONF.my_ip: return self.storage_info['controller_ports'] return (self.storage_info['compute_ports'] or self.storage_info['controller_ports']) def get_port_hostgroup_map(self, ldev_id): """Get the mapping of a port and host group.""" raise NotImplementedError() def set_terminate_target(self, fake_connector, port_hostgroup_map): """Set necessary information in connector in terminate.""" raise NotImplementedError() def detach_ldev(self, volume, ldev, connector): """Terminate connection between the server and the volume.""" raise NotImplementedError() def terminate_connection(self, volume, connector): """Terminate connection between the server and the volume.""" ldev = self.get_ldev(volume) if ldev is None: self.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING, volume_id=volume['id']) return # If a fake connector is generated by nova when the host # is down, then the connector will not have a host property, # In this case construct the lock without the host property # so that all the fake connectors to an SVC are serialized if 'host' not in connector: port_hostgroup_map = self.get_port_hostgroup_map(ldev) if not port_hostgroup_map: self.output_log(MSG.NO_LUN, ldev=ldev) return self.set_terminate_target(connector, port_hostgroup_map) # A synchronization to prevent conflicts between host group creation # and deletion. @coordination.synchronized( '%(prefix)s-host-%(storage_id)s-%(host)s' % { 'prefix': self.driver_info['driver_file_prefix'], 'storage_id': self.conf.hitachi_storage_id, 'host': connector.get('host'), } ) def inner(self, volume, connector): deleted_targets = self.detach_ldev(volume, ldev, connector) if self.storage_info['protocol'] == 'FC': target_wwn = [ self.storage_info['wwns'][target] for target in deleted_targets] return {'driver_volume_type': self.driver_info['volume_type'], 'data': {'target_wwn': target_wwn}} return inner(self, volume, connector) def filter_target_ports(self, target_ports, volume, is_snapshot=False): specs = self.get_volume_extra_specs(volume) if volume else None if not specs: return target_ports if self.driver_info.get('driver_dir_name'): if getattr(self, 'is_secondary', False): tps_name = self.driver_info[ 'driver_dir_name'] + ':remote_target_ports' else: tps_name = self.driver_info[ 'driver_dir_name'] + ':target_ports' else: return target_ports tps = specs.get(tps_name) if tps is None: return target_ports tpsset = set([s.strip() for s in tps.split(',')]) filtered_tps = list(tpsset.intersection(target_ports)) if is_snapshot: volume = volume['volume'] for port in tpsset: if port not in target_ports: self.output_log( MSG.INVALID_EXTRA_SPEC_KEY_PORT, port=port, target_ports_param=tps_name, volume_type=volume['volume_type']['name']) return filtered_tps def clean_mapping_targets(self, targets): raise NotImplementedError() def unmanage_snapshot(self, snapshot): """Output error message and raise NotImplementedError.""" self.output_log( MSG.SNAPSHOT_UNMANAGE_FAILED, snapshot_id=snapshot['id']) raise NotImplementedError() def migrate_volume(self, volume, host): """Migrate the specified volume.""" return False def update_migrated_volume(self, new_volume): """Return model update for migrated volume.""" return {'_name_id': new_volume.name_id, 'provider_location': new_volume.provider_location} def retype(self, ctxt, volume, new_type, diff, host): """Retype the specified volume.""" return False def has_snap_pair(self, pvol, svol): """Check if the volume have the pair of the snapshot.""" raise NotImplementedError() def restore_ldev(self, pvol, svol): """Restore a pair of the specified LDEV.""" raise NotImplementedError() def revert_to_snapshot(self, volume, snapshot): """Rollback the specified snapshot.""" pvol = self.get_ldev(volume) svol = self.get_ldev(snapshot) if (pvol is not None and svol is not None and self.has_snap_pair(pvol, svol)): self.restore_ldev(pvol, svol) else: raise NotImplementedError() def create_group(self): raise NotImplementedError() def delete_group(self, group, volumes): raise NotImplementedError() def create_group_from_src( self, context, group, volumes, snapshots=None, source_vols=None): raise NotImplementedError() def update_group(self, group, add_volumes=None): raise NotImplementedError() def create_group_snapshot(self, context, group_snapshot, snapshots): raise NotImplementedError() def delete_group_snapshot(self, group_snapshot, snapshots): raise NotImplementedError() def output_log(self, msg_enum, **kwargs): if self.storage_id is not None: return utils.output_log( msg_enum, storage_id=self.storage_id, **kwargs) else: return utils.output_log(msg_enum, **kwargs) def get_ldev(self, obj, both=False): if not obj: return None provider_location = obj.get('provider_location') if not provider_location: return None if provider_location.isdigit() and not getattr(self, 'is_secondary', False): # This format implies that the value is the ID of an LDEV in the # primary storage. Therefore, the secondary instance should not # retrieve this value. return int(provider_location) if provider_location.startswith('{'): loc = json.loads(provider_location) if isinstance(loc, dict): if getattr(self, 'is_primary', False) or ( hasattr(self, 'primary_storage_id') and not both): return None if 'pldev' not in loc else int(loc['pldev']) elif getattr(self, 'is_secondary', False): return None if 'sldev' not in loc else int(loc['sldev']) if hasattr(self, 'primary_storage_id'): return {key: loc.get(key) for key in ['pldev', 'sldev']} return None def check_opt_value(self, conf, names): """Check if the parameter names and values are valid.""" for name in names: try: getattr(conf, name) except (cfg.NoSuchOptError, cfg.ConfigFileValueError): with excutils.save_and_reraise_exception(): self.output_log(MSG.INVALID_PARAMETER, param=name) def check_opts(self, conf, opts): """Check if the specified configuration is valid.""" names = [] for opt in opts: if opt.required and not conf.safe_get(opt.name): msg = self.output_log(MSG.INVALID_PARAMETER, param=opt.name) self.raise_error(msg) names.append(opt.name) self.check_opt_value(conf, names) def get_volume_extra_specs(self, volume): if volume is None: return {} type_id = volume.get('volume_type_id', None) if type_id is None: return {} return volume_types.get_volume_type_extra_specs(type_id) def require_target_existed(self, targets): """Check if the target list includes one or more members.""" if not targets['list']: msg = self.output_log(MSG.NO_CONNECTED_TARGET) self.raise_error(msg) def raise_error(self, msg): """Raise a VolumeDriverException by driver error message.""" message = _( '%(prefix)s error occurred. %(msg)s' % { 'prefix': self.driver_info['driver_prefix'], 'msg': msg, } ) raise exception.VolumeDriverException(message) def raise_busy(self): """Raise a VolumeDriverException by driver busy message.""" message = _(utils.BUSY_MESSAGE) raise exception.VolumeDriverException(message) def is_controller(self, connector): return True if ( 'ip' in connector and connector['ip'] == CONF.my_ip) else False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hitachi/hbsd_fc.py0000664000175000017500000003052400000000000022677 0ustar00zuulzuul00000000000000# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Fibre channel module for Hitachi HBSD Driver.""" import os from oslo_utils import excutils from cinder import interface from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common as common from cinder.volume.drivers.hitachi import hbsd_replication as replication from cinder.volume.drivers.hitachi import hbsd_rest as rest from cinder.volume.drivers.hitachi import hbsd_rest_fc as rest_fc from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.volume import volume_utils MSG = utils.HBSDMsg _DRIVER_INFO = { 'version': utils.VERSION, 'proto': 'FC', 'hba_id': 'wwpns', 'hba_id_type': 'World Wide Name', 'msg_id': { 'target': MSG.CREATE_HOST_GROUP_FAILED, }, 'volume_backend_name': '%(prefix)sFC' % { 'prefix': utils.DRIVER_PREFIX, }, 'volume_type': 'fibre_channel', 'param_prefix': utils.PARAM_PREFIX, 'vendor_name': utils.VENDOR_NAME, 'driver_dir_name': utils.DRIVER_DIR_NAME, 'driver_prefix': utils.DRIVER_PREFIX, 'driver_file_prefix': utils.DRIVER_FILE_PREFIX, 'target_prefix': utils.TARGET_PREFIX, 'hdp_vol_attr': utils.HDP_VOL_ATTR, 'hdt_vol_attr': utils.HDT_VOL_ATTR, 'nvol_ldev_type': utils.NVOL_LDEV_TYPE, 'target_iqn_suffix': utils.TARGET_IQN_SUFFIX, 'pair_attr': utils.PAIR_ATTR, 'mirror_attr': utils.MIRROR_ATTR, 'driver_impl_class': rest_fc.HBSDRESTFC, } @interface.volumedriver class HBSDFCDriver(driver.FibreChannelDriver): """Fibre channel class for Hitachi HBSD Driver. Version history: .. code-block:: none 1.0.0 - Initial driver. 1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods 2.0.0 - Major redesign of the driver. This version requires the REST API for communication with the storage backend. 2.1.0 - Add Cinder generic volume groups. 2.2.0 - Add maintenance parameters. 2.2.1 - Make the parameters name variable for supporting OEM storages. 2.2.2 - Add Target Port Assignment. 2.2.3 - Add port scheduler. 2.3.0 - Support multi pool. 2.3.1 - Update retype and support storage assisted migration. 2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets. 2.3.3 - Add GAD volume support. 2.3.4 - Support data deduplication and compression. 2.3.5 - Fix key error when backend is down. 2.4.0 - Add QoS support. """ VERSION = utils.VERSION # ThirdPartySystems wiki page CI_WIKI_NAME = utils.CI_WIKI_NAME driver_info = dict(_DRIVER_INFO) def __init__(self, *args, **kwargs): """Initialize instance variables.""" utils.output_log(MSG.DRIVER_INITIALIZATION_START, driver=self.__class__.__name__, version=self.get_version()) super(HBSDFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(common.COMMON_VOLUME_OPTS) self.configuration.append_config_values(common.COMMON_PAIR_OPTS) self.configuration.append_config_values(common.COMMON_PORT_OPTS) self.configuration.append_config_values(common.COMMON_NAME_OPTS) self.configuration.append_config_values(rest_fc.FC_VOLUME_OPTS) self.configuration.append_config_values( replication.COMMON_MIRROR_OPTS) os.environ['LANG'] = 'C' kwargs.setdefault('driver_info', _DRIVER_INFO) self.driver_info = dict(kwargs['driver_info']) self.driver_info['driver_class'] = self.__class__ if self.configuration.safe_get('hitachi_mirror_storage_id'): self.common = replication.HBSDREPLICATION( self.configuration, self.driver_info, kwargs.get('db')) elif not hasattr(self, '_init_common'): self.common = self.driver_info['driver_impl_class']( self.configuration, self.driver_info, kwargs.get('db')) else: self.common = self._init_common( self.configuration, kwargs.get('db')) @staticmethod def get_driver_options(): additional_opts = HBSDFCDriver._get_oslo_driver_opts( *(common._INHERITED_VOLUME_OPTS + rest._REQUIRED_REST_OPTS + ['driver_ssl_cert_verify', 'driver_ssl_cert_path', 'san_api_port', ])) return (common.COMMON_VOLUME_OPTS + common.COMMON_PORT_OPTS + common.COMMON_PAIR_OPTS + common.COMMON_NAME_OPTS + rest.REST_VOLUME_OPTS + rest.REST_PAIR_OPTS + rest_fc.FC_VOLUME_OPTS + replication._REP_OPTS + replication.COMMON_MIRROR_OPTS + replication.ISCSI_MIRROR_OPTS + replication.REST_MIRROR_OPTS + replication.REST_MIRROR_API_OPTS + replication.REST_MIRROR_SSL_OPTS + additional_opts) def check_for_setup_error(self): pass @volume_utils.trace def create_volume(self, volume): """Create a volume and return its properties.""" return self.common.create_volume(volume) @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot and return its properties.""" return self.common.create_volume_from_snapshot(volume, snapshot) @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume and return its properties.""" return self.common.create_cloned_volume(volume, src_vref) @volume_utils.trace def delete_volume(self, volume): """Delete the specified volume.""" self.common.delete_volume(volume) @volume_utils.trace def create_snapshot(self, snapshot): """Create a snapshot from a volume and return its properties.""" return self.common.create_snapshot(snapshot) @volume_utils.trace def delete_snapshot(self, snapshot): """Delete the specified snapshot.""" self.common.delete_snapshot(snapshot) def local_path(self, volume): pass def _update_volume_stats(self): """Return properties, capabilities and current states of the driver.""" data = self.common.update_volume_stats() if 'pools' in data: for pool in data['pools']: pool["filter_function"] = self.get_filter_function() pool["goodness_function"] = ( self.get_goodness_function()) self._stats = data @volume_utils.trace def update_migrated_volume( self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" self.common.discard_zero_page(new_volume) return self.common.update_migrated_volume(new_volume) @volume_utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume.""" super(HBSDFCDriver, self).copy_image_to_volume( context, volume, image_service, image_id, disable_sparse=disable_sparse) self.common.discard_zero_page(volume) @volume_utils.trace def extend_volume(self, volume, new_size): """Extend the specified volume to the specified size.""" self.common.extend_volume(volume, new_size) @volume_utils.trace def manage_existing(self, volume, existing_ref): """Return volume properties which Cinder needs to manage the volume.""" return self.common.manage_existing(volume, existing_ref) @volume_utils.trace def manage_existing_get_size(self, volume, existing_ref): """Return the size[GB] of the specified volume.""" return self.common.manage_existing_get_size(volume, existing_ref) @volume_utils.trace def unmanage(self, volume): """Prepare the volume for removing it from Cinder management.""" self.common.unmanage(volume) @volume_utils.trace def do_setup(self, context): """Prepare for the startup of the driver.""" self.common.do_setup(context) def ensure_export(self, context, volume): """Synchronously recreate an export for a volume.""" pass def create_export(self, context, volume, connector): """Export the volume.""" pass def remove_export(self, context, volume): """Remove an export for a volume.""" pass def create_export_snapshot(self, context, snapshot, connector): pass def remove_export_snapshot(self, context, snapshot): pass @volume_utils.trace def initialize_connection(self, volume, connector): """Initialize connection between the server and the volume.""" return self.common.initialize_connection(volume, connector) @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Terminate connection between the server and the volume.""" if connector is None: connector = {} if utils.is_shared_connection(volume, connector): return self.common.terminate_connection(volume, connector) @volume_utils.trace def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Initialize connection between the server and the snapshot.""" return self.common.initialize_connection( snapshot, connector, is_snapshot=True) @volume_utils.trace def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Terminate connection between the server and the snapshot.""" self.common.terminate_connection(snapshot, connector) @volume_utils.trace def unmanage_snapshot(self, snapshot): """Prepare the snapshot for removing it from Cinder management.""" return self.common.unmanage_snapshot(snapshot) @volume_utils.trace def retype(self, ctxt, volume, new_type, diff, host): """Retype the specified volume.""" return self.common.retype(ctxt, volume, new_type, diff, host) @volume_utils.trace def migrate_volume(self, ctxt, volume, host): """Migrate the specified volume.""" return self.common.migrate_volume(volume, host) def backup_use_temp_snapshot(self): return True @volume_utils.trace def revert_to_snapshot(self, context, volume, snapshot): """Rollback the specified snapshot""" return self.common.revert_to_snapshot(volume, snapshot) @volume_utils.trace def create_group(self, context, group): return self.common.create_group() @volume_utils.trace def delete_group(self, context, group, volumes): return self.common.delete_group(group, volumes) @volume_utils.trace def create_group_from_src( self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): return self.common.create_group_from_src( context, group, volumes, snapshots, source_vols) @volume_utils.trace def update_group( self, context, group, add_volumes=None, remove_volumes=None): try: return self.common.update_group(group, add_volumes) except Exception: with excutils.save_and_reraise_exception(): for remove_volume in remove_volumes: utils.cleanup_cg_in_volume(remove_volume) @volume_utils.trace def create_group_snapshot(self, context, group_snapshot, snapshots): return self.common.create_group_snapshot( context, group_snapshot, snapshots) @volume_utils.trace def delete_group_snapshot(self, context, group_snapshot, snapshots): return self.common.delete_group_snapshot(group_snapshot, snapshots) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hitachi/hbsd_iscsi.py0000664000175000017500000003017100000000000023417 0ustar00zuulzuul00000000000000# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """iSCSI module for Hitachi HBSD Driver.""" import os from oslo_utils import excutils from cinder import interface from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common as common from cinder.volume.drivers.hitachi import hbsd_replication as replication from cinder.volume.drivers.hitachi import hbsd_rest as rest from cinder.volume.drivers.hitachi import hbsd_rest_iscsi as rest_iscsi from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.volume import volume_utils MSG = utils.HBSDMsg _DRIVER_INFO = { 'version': utils.VERSION, 'proto': 'iSCSI', 'hba_id': 'initiator', 'hba_id_type': 'iSCSI initiator IQN', 'msg_id': { 'target': MSG.CREATE_ISCSI_TARGET_FAILED, }, 'volume_backend_name': '%(prefix)siSCSI' % { 'prefix': utils.DRIVER_PREFIX, }, 'volume_type': 'iscsi', 'param_prefix': utils.PARAM_PREFIX, 'vendor_name': utils.VENDOR_NAME, 'driver_dir_name': utils.DRIVER_DIR_NAME, 'driver_prefix': utils.DRIVER_PREFIX, 'driver_file_prefix': utils.DRIVER_FILE_PREFIX, 'target_prefix': utils.TARGET_PREFIX, 'hdp_vol_attr': utils.HDP_VOL_ATTR, 'hdt_vol_attr': utils.HDT_VOL_ATTR, 'nvol_ldev_type': utils.NVOL_LDEV_TYPE, 'target_iqn_suffix': utils.TARGET_IQN_SUFFIX, 'pair_attr': utils.PAIR_ATTR, 'mirror_attr': utils.MIRROR_ATTR, 'driver_impl_class': rest_iscsi.HBSDRESTISCSI, } @interface.volumedriver class HBSDISCSIDriver(driver.ISCSIDriver): """iSCSI class for Hitachi HBSD Driver. Version history: .. code-block:: none 1.0.0 - Initial driver. 1.1.0 - Add manage_existing/manage_existing_get_size/unmanage methods 2.0.0 - Major redesign of the driver. This version requires the REST API for communication with the storage backend. 2.1.0 - Add Cinder generic volume groups. 2.2.0 - Add maintenance parameters. 2.2.1 - Make the parameters name variable for supporting OEM storages. 2.2.2 - Add Target Port Assignment. 2.2.3 - Add port scheduler. 2.3.0 - Support multi pool. 2.3.1 - Update retype and support storage assisted migration. 2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets. 2.3.3 - Add GAD volume support. 2.3.4 - Support data deduplication and compression. 2.3.5 - Fix key error when backend is down. 2.4.0 - Add QoS support. """ VERSION = utils.VERSION # ThirdPartySystems wiki page CI_WIKI_NAME = utils.CI_WIKI_NAME driver_info = dict(_DRIVER_INFO) def __init__(self, *args, **kwargs): """Initialize instance variables.""" utils.output_log(MSG.DRIVER_INITIALIZATION_START, driver=self.__class__.__name__, version=self.get_version()) super(HBSDISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(common.COMMON_VOLUME_OPTS) self.configuration.append_config_values(common.COMMON_PAIR_OPTS) self.configuration.append_config_values(common.COMMON_NAME_OPTS) self.configuration.append_config_values( replication.COMMON_MIRROR_OPTS) os.environ['LANG'] = 'C' kwargs.setdefault('driver_info', _DRIVER_INFO) self.driver_info = dict(kwargs['driver_info']) self.driver_info['driver_class'] = self.__class__ if self.configuration.safe_get('hitachi_mirror_storage_id'): self.common = replication.HBSDREPLICATION( self.configuration, self.driver_info, kwargs.get('db')) elif not hasattr(self, '_init_common'): self.common = self.driver_info['driver_impl_class']( self.configuration, self.driver_info, kwargs.get('db')) else: self.common = self._init_common( self.configuration, kwargs.get('db')) @staticmethod def get_driver_options(): additional_opts = HBSDISCSIDriver._get_oslo_driver_opts( *(common._INHERITED_VOLUME_OPTS + rest._REQUIRED_REST_OPTS + ['driver_ssl_cert_verify', 'driver_ssl_cert_path', 'san_api_port', ])) return (common.COMMON_VOLUME_OPTS + common.COMMON_PAIR_OPTS + common.COMMON_NAME_OPTS + rest.REST_VOLUME_OPTS + rest.REST_PAIR_OPTS + replication._REP_OPTS + replication.COMMON_MIRROR_OPTS + replication.ISCSI_MIRROR_OPTS + replication.REST_MIRROR_OPTS + replication.REST_MIRROR_API_OPTS + replication.REST_MIRROR_SSL_OPTS + additional_opts) def check_for_setup_error(self): pass @volume_utils.trace def create_volume(self, volume): """Create a volume and return its properties.""" return self.common.create_volume(volume) @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot and return its properties.""" return self.common.create_volume_from_snapshot(volume, snapshot) @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume and return its properties.""" return self.common.create_cloned_volume(volume, src_vref) @volume_utils.trace def delete_volume(self, volume): """Delete the specified volume.""" self.common.delete_volume(volume) @volume_utils.trace def create_snapshot(self, snapshot): """Create a snapshot from a volume and return its properties.""" return self.common.create_snapshot(snapshot) @volume_utils.trace def delete_snapshot(self, snapshot): """Delete the specified snapshot.""" self.common.delete_snapshot(snapshot) def local_path(self, volume): pass def _update_volume_stats(self): """Return properties, capabilities and current states of the driver.""" data = self.common.update_volume_stats() if 'pools' in data: for pool in data['pools']: pool["filter_function"] = self.get_filter_function() pool["goodness_function"] = ( self.get_goodness_function()) self._stats = data @volume_utils.trace def update_migrated_volume( self, ctxt, volume, new_volume, original_volume_status): """Do any remaining jobs after migration.""" self.common.discard_zero_page(new_volume) return self.common.update_migrated_volume(new_volume) @volume_utils.trace def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume.""" super(HBSDISCSIDriver, self).copy_image_to_volume( context, volume, image_service, image_id, disable_sparse=disable_sparse) self.common.discard_zero_page(volume) @volume_utils.trace def extend_volume(self, volume, new_size): """Extend the specified volume to the specified size.""" self.common.extend_volume(volume, new_size) @volume_utils.trace def manage_existing(self, volume, existing_ref): """Return volume properties which Cinder needs to manage the volume.""" return self.common.manage_existing(volume, existing_ref) @volume_utils.trace def manage_existing_get_size(self, volume, existing_ref): """Return the size[GB] of the specified volume.""" return self.common.manage_existing_get_size(volume, existing_ref) @volume_utils.trace def unmanage(self, volume): """Prepare the volume for removing it from Cinder management.""" self.common.unmanage(volume) @volume_utils.trace def do_setup(self, context): """Prepare for the startup of the driver.""" self.common.do_setup(context) def ensure_export(self, context, volume): """Synchronously recreate an export for a volume.""" pass def create_export(self, context, volume, connector): """Export the volume.""" pass def remove_export(self, context, volume): """Remove an export for a volume.""" pass def create_export_snapshot(self, context, snapshot, connector): pass def remove_export_snapshot(self, context, snapshot): pass @volume_utils.trace def initialize_connection(self, volume, connector): """Initialize connection between the server and the volume.""" return self.common.initialize_connection(volume, connector) @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Terminate connection between the server and the volume.""" if connector is None: connector = {} if utils.is_shared_connection(volume, connector): return self.common.terminate_connection(volume, connector) @volume_utils.trace def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Initialize connection between the server and the snapshot.""" return self.common.initialize_connection( snapshot, connector, is_snapshot=True) @volume_utils.trace def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Terminate connection between the server and the snapshot.""" self.common.terminate_connection(snapshot, connector) @volume_utils.trace def unmanage_snapshot(self, snapshot): """Prepare the snapshot for removing it from Cinder management.""" return self.common.unmanage_snapshot(snapshot) @volume_utils.trace def retype(self, ctxt, volume, new_type, diff, host): """Retype the specified volume.""" return self.common.retype(ctxt, volume, new_type, diff, host) @volume_utils.trace def migrate_volume(self, ctxt, volume, host): """Migrate the specified volume.""" return self.common.migrate_volume(volume, host) def backup_use_temp_snapshot(self): return True @volume_utils.trace def revert_to_snapshot(self, context, volume, snapshot): """Rollback the specified snapshot""" return self.common.revert_to_snapshot(volume, snapshot) @volume_utils.trace def create_group(self, context, group): return self.common.create_group() @volume_utils.trace def delete_group(self, context, group, volumes): return self.common.delete_group(group, volumes) @volume_utils.trace def create_group_from_src( self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): return self.common.create_group_from_src( context, group, volumes, snapshots, source_vols) @volume_utils.trace def update_group( self, context, group, add_volumes=None, remove_volumes=None): try: return self.common.update_group(group, add_volumes) except Exception: with excutils.save_and_reraise_exception(): for remove_volume in remove_volumes: utils.cleanup_cg_in_volume(remove_volume) @volume_utils.trace def create_group_snapshot(self, context, group_snapshot, snapshots): return self.common.create_group_snapshot( context, group_snapshot, snapshots) @volume_utils.trace def delete_group_snapshot(self, context, group_snapshot, snapshots): return self.common.delete_group_snapshot(group_snapshot, snapshots) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hitachi/hbsd_replication.py0000664000175000017500000012752000000000000024623 0ustar00zuulzuul00000000000000# Copyright (C) 2022, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """replication module for Hitachi HBSD Driver.""" from collections import defaultdict import json from eventlet import greenthread from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import timeutils from cinder import exception from cinder.volume.drivers.hitachi import hbsd_common as common from cinder.volume.drivers.hitachi import hbsd_rest as rest from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.zonemanager import utils as fczm_utils _REP_STATUS_CHECK_SHORT_INTERVAL = 5 _REP_STATUS_CHECK_LONG_INTERVAL = 10 * 60 _REP_STATUS_CHECK_TIMEOUT = 24 * 60 * 60 _WAIT_PAIR = 1 _WAIT_PSUS = 2 _REP_OPTS = [ cfg.IntOpt( 'hitachi_replication_status_check_short_interval', default=_REP_STATUS_CHECK_SHORT_INTERVAL, help='Initial interval at which remote replication pair status is ' 'checked'), cfg.IntOpt( 'hitachi_replication_status_check_long_interval', default=_REP_STATUS_CHECK_LONG_INTERVAL, help='Interval at which remote replication pair status is checked. ' 'This parameter is applied if the status has not changed to the ' 'expected status after the time indicated by this parameter has ' 'elapsed.'), cfg.IntOpt( 'hitachi_replication_status_check_timeout', default=_REP_STATUS_CHECK_TIMEOUT, help='Maximum wait time before the remote replication pair status ' 'changes to the expected status'), cfg.IntOpt( 'hitachi_path_group_id', default=0, min=0, max=255, help='Path group ID assigned to the remote connection for remote ' 'replication'), cfg.IntOpt( 'hitachi_quorum_disk_id', min=0, max=31, help='ID of the Quorum disk used for global-active device'), cfg.IntOpt( 'hitachi_replication_copy_speed', min=1, max=15, default=3, help='Remote copy speed of storage system. 1 or 2 indicates ' 'low speed, 3 indicates middle speed, and a value between 4 and ' '15 indicates high speed.'), cfg.BoolOpt( 'hitachi_set_mirror_reserve_attribute', default=True, help='Whether or not to set the mirror reserve attribute'), cfg.IntOpt( 'hitachi_replication_number', default=0, min=0, max=255, help='Instance number for REST API'), ] COMMON_MIRROR_OPTS = [ cfg.StrOpt( 'hitachi_mirror_storage_id', default=None, help='ID of secondary storage system'), cfg.StrOpt( 'hitachi_mirror_pool', default=None, help='Pool of secondary storage system'), cfg.StrOpt( 'hitachi_mirror_snap_pool', default=None, help='Thin pool of secondary storage system'), cfg.StrOpt( 'hitachi_mirror_ldev_range', default=None, help='Logical device range of secondary storage system'), cfg.ListOpt( 'hitachi_mirror_target_ports', default=[], help='Target port names for host group or iSCSI target'), cfg.ListOpt( 'hitachi_mirror_compute_target_ports', default=[], help=( 'Target port names of compute node ' 'for host group or iSCSI target')), cfg.IntOpt( 'hitachi_mirror_pair_target_number', min=0, max=99, default=0, help='Pair target name of the host group or iSCSI target'), ] ISCSI_MIRROR_OPTS = [ cfg.BoolOpt( 'hitachi_mirror_use_chap_auth', default=False, help='Whether or not to use iSCSI authentication'), cfg.StrOpt( 'hitachi_mirror_auth_user', default=None, help='iSCSI authentication username'), cfg.StrOpt( 'hitachi_mirror_auth_password', default=None, secret=True, help='iSCSI authentication password'), ] REST_MIRROR_OPTS = [ cfg.ListOpt( 'hitachi_mirror_rest_pair_target_ports', default=[], help='Target port names for pair of the host group or iSCSI target'), ] REST_MIRROR_API_OPTS = [ cfg.StrOpt( 'hitachi_mirror_rest_user', default=None, help='Username of secondary storage system for REST API'), cfg.StrOpt( 'hitachi_mirror_rest_password', default=None, secret=True, help='Password of secondary storage system for REST API'), cfg.StrOpt( 'hitachi_mirror_rest_api_ip', default=None, help='IP address of REST API server'), cfg.PortOpt( 'hitachi_mirror_rest_api_port', default=443, help='Port number of REST API server'), ] REST_MIRROR_SSL_OPTS = [ cfg.BoolOpt('hitachi_mirror_ssl_cert_verify', default=False, help='If set to True the http client will validate the SSL ' 'certificate of the backend endpoint.'), cfg.StrOpt('hitachi_mirror_ssl_cert_path', help='Can be used to specify a non default path to a ' 'CA_BUNDLE file or directory with certificates of ' 'trusted CAs, which will be used to validate the backend'), ] CONF = cfg.CONF CONF.register_opts(_REP_OPTS) CONF.register_opts(COMMON_MIRROR_OPTS) CONF.register_opts(ISCSI_MIRROR_OPTS) CONF.register_opts(REST_MIRROR_OPTS) CONF.register_opts(REST_MIRROR_API_OPTS) CONF.register_opts(REST_MIRROR_SSL_OPTS) LOG = logging.getLogger(__name__) MSG = utils.HBSDMsg def _pack_rep_provider_location(pldev=None, sldev=None, rep_type=None): provider_location = {} if pldev is not None: provider_location['pldev'] = pldev if sldev is not None: provider_location['sldev'] = sldev if rep_type is not None: provider_location['remote-copy'] = rep_type return json.dumps(provider_location) def _delays(short_interval, long_interval, timeout): start_time = timeutils.utcnow() watch = timeutils.StopWatch() i = 0 while True: watch.restart() yield i if utils.timed_out(start_time, timeout): raise StopIteration() watch.stop() interval = long_interval if utils.timed_out( start_time, long_interval) else short_interval idle = max(interval - watch.elapsed(), 0) greenthread.sleep(idle) i += 1 class HBSDREPLICATION(rest.HBSDREST): def __init__(self, conf, driverinfo, db): super(HBSDREPLICATION, self).__init__(conf, driverinfo, db) conf.append_config_values(_REP_OPTS) if driverinfo['proto'] == 'iSCSI': conf.append_config_values(ISCSI_MIRROR_OPTS) conf.append_config_values(REST_MIRROR_OPTS) conf.append_config_values(REST_MIRROR_API_OPTS) conf.append_config_values(REST_MIRROR_SSL_OPTS) driver_impl_class = self.driver_info['driver_impl_class'] self.primary = driver_impl_class(conf, driverinfo, db) self.rep_primary = self.primary self.rep_primary.is_primary = True self.rep_primary.storage_id = conf.safe_get( self.driver_info['param_prefix'] + '_storage_id') or '' self.primary_storage_id = self.rep_primary.storage_id self.secondary = driver_impl_class(conf, driverinfo, db) self.rep_secondary = self.secondary self.rep_secondary.is_secondary = True self.rep_secondary.storage_id = ( conf.safe_get( self.driver_info['param_prefix'] + '_mirror_storage_id') or '') self.secondary_storage_id = self.rep_secondary.storage_id self.instances = self.rep_primary, self.rep_secondary self._LDEV_NAME = self.driver_info['driver_prefix'] + '-LDEV-%d-%d' def update_mirror_conf(self, conf, opts): for opt in opts: name = opt.name.replace('hitachi_mirror_', 'hitachi_') try: if opt.name == 'hitachi_mirror_pool': if conf.safe_get('hitachi_mirror_pool'): name = 'hitachi_pools' value = [getattr(conf, opt.name)] else: raise ValueError() else: value = getattr(conf, opt.name) setattr(conf, name, value) except Exception: with excutils.save_and_reraise_exception(): self.rep_secondary.output_log( MSG.INVALID_PARAMETER, param=opt.name) def _replace_with_mirror_conf(self): conf = self.conf new_conf = utils.Config(conf) self.rep_secondary.conf = new_conf self.update_mirror_conf(new_conf, COMMON_MIRROR_OPTS) self.update_mirror_conf(new_conf, REST_MIRROR_OPTS) if self.rep_secondary.driver_info['volume_type'] == 'iscsi': self.update_mirror_conf(new_conf, ISCSI_MIRROR_OPTS) new_conf.san_login = ( conf.safe_get(self.driver_info['param_prefix'] + '_mirror_rest_user')) new_conf.san_password = ( conf.safe_get(self.driver_info['param_prefix'] + '_mirror_rest_password')) new_conf.san_ip = ( conf.safe_get(self.driver_info['param_prefix'] + '_mirror_rest_api_ip')) new_conf.san_api_port = ( conf.safe_get(self.driver_info['param_prefix'] + '_mirror_rest_api_port')) new_conf.driver_ssl_cert_verify = ( conf.safe_get(self.driver_info['param_prefix'] + '_mirror_ssl_cert_verify')) new_conf.driver_ssl_cert_path = ( conf.safe_get(self.driver_info['param_prefix'] + '_mirror_ssl_cert_path')) def do_setup(self, context): """Prepare for the startup of the driver.""" self.rep_primary = self.primary self.rep_secondary = self.secondary self.ctxt = context try: self.rep_primary.do_setup(context) self.client = self.rep_primary.client except Exception: self.rep_primary.output_log( MSG.SITE_INITIALIZATION_FAILED, site='primary') self.rep_primary = None try: self._replace_with_mirror_conf() self.rep_secondary.do_setup(context) except Exception: self.rep_secondary.output_log( MSG.SITE_INITIALIZATION_FAILED, site='secondary') if not self.rep_primary: raise self.rep_secondary = None def update_volume_stats(self): """Update properties, capabilities and current states of the driver.""" if self.rep_primary: data = self.rep_primary.update_volume_stats() else: data = self.rep_secondary.update_volume_stats() return data def _require_rep_primary(self): if not self.rep_primary: msg = utils.output_log( MSG.SITE_NOT_INITIALIZED, storage_id=self.primary_storage_id, site='primary') self.raise_error(msg) def _require_rep_secondary(self): if not self.rep_secondary: msg = utils.output_log( MSG.SITE_NOT_INITIALIZED, storage_id=self.secondary_storage_id, site='secondary') self.raise_error(msg) def _is_mirror_spec(self, extra_specs): topology = None if not extra_specs: return False if self.driver_info.get('driver_dir_name'): topology = extra_specs.get( self.driver_info['driver_dir_name'] + ':topology') if topology is None: return False elif topology == 'active_active_mirror_volume': return True else: msg = self.rep_primary.output_log( MSG.INVALID_EXTRA_SPEC_KEY, key=self.driver_info['driver_dir_name'] + ':topology', value=topology) self.raise_error(msg) def _create_rep_ldev(self, volume, extra_specs, rep_type, pvol=None): """Create a primary volume and a secondary volume.""" pool_id = self.rep_secondary.storage_info['pool_id'][0] ldev_range = self.rep_secondary.storage_info['ldev_range'] qos_specs = utils.get_qos_specs_from_volume(volume) thread = greenthread.spawn( self.rep_secondary.create_ldev, volume.size, extra_specs, pool_id, ldev_range, qos_specs=qos_specs) if pvol is None: try: pool_id = self.rep_primary.get_pool_id_of_volume(volume) ldev_range = self.rep_primary.storage_info['ldev_range'] pvol = self.rep_primary.create_ldev(volume.size, extra_specs, pool_id, ldev_range, qos_specs=qos_specs) except exception.VolumeDriverException: self.rep_primary.output_log(MSG.CREATE_LDEV_FAILED) try: svol = thread.wait() except Exception: self.rep_secondary.output_log(MSG.CREATE_LDEV_FAILED) svol = None if pvol is None or svol is None: for vol, type_, instance in zip((pvol, svol), ('P-VOL', 'S-VOL'), self.instances): if vol is None: msg = instance.output_log( MSG.CREATE_REPLICATION_VOLUME_FAILED, type=type_, rep_type=rep_type, volume_id=volume.id, volume_type=volume.volume_type.name, size=volume.size) else: instance.delete_ldev(vol) self.raise_error(msg) thread = greenthread.spawn( self.rep_secondary.modify_ldev_name, svol, volume['id'].replace("-", "")) try: self.rep_primary.modify_ldev_name( pvol, volume['id'].replace("-", "")) finally: thread.wait() return pvol, svol def _create_rep_copy_group_name(self, ldev): return self.driver_info['target_prefix'] + '%s%02XG%02d' % ( CONF.my_ip, self.conf.hitachi_replication_number, ldev >> 10) def _get_rep_copy_speed(self): rep_copy_speed = self.rep_primary.conf.safe_get( self.driver_info['param_prefix'] + '_replication_copy_speed') if rep_copy_speed: return rep_copy_speed else: return self.rep_primary.conf.hitachi_copy_speed def _get_wait_pair_status_change_params(self, wait_type): """Get a replication pair status information.""" _wait_pair_status_change_params = { _WAIT_PAIR: { 'instance': self.rep_primary, 'remote_client': self.rep_secondary.client, 'is_secondary': False, 'transitional_status': ['COPY'], 'expected_status': ['PAIR', 'PFUL'], 'msgid': MSG.CREATE_REPLICATION_PAIR_FAILED, 'status_keys': ['pvolStatus', 'svolStatus'], }, _WAIT_PSUS: { 'instance': self.rep_primary, 'remote_client': self.rep_secondary.client, 'is_secondary': False, 'transitional_status': ['PAIR', 'PFUL'], 'expected_status': ['PSUS', 'SSUS'], 'msgid': MSG.SPLIT_REPLICATION_PAIR_FAILED, 'status_keys': ['pvolStatus', 'svolStatus'], } } return _wait_pair_status_change_params[wait_type] def _wait_pair_status_change(self, copy_group_name, pvol, svol, rep_type, wait_type): """Wait until the replication pair status changes to the specified status. """ for _ in _delays( self.conf.hitachi_replication_status_check_short_interval, self.conf.hitachi_replication_status_check_long_interval, self.conf.hitachi_replication_status_check_timeout): params = self._get_wait_pair_status_change_params(wait_type) status = params['instance'].client.get_remote_copypair( params['remote_client'], copy_group_name, pvol, svol, is_secondary=params['is_secondary']) statuses = [status.get(status_key) for status_key in params['status_keys']] unexpected_status_set = (set(statuses) - set(params['expected_status'])) if not unexpected_status_set: break if unexpected_status_set.issubset( set(params['transitional_status'])): continue msg = params['instance'].output_log( params['msgid'], rep_type=rep_type, pvol=pvol, svol=svol, copy_group=copy_group_name, status='/'.join(statuses)) self.raise_error(msg) else: status = params['instance'].client.get_remote_copypair( params['remote_client'], copy_group_name, pvol, svol, is_secondary=params['is_secondary']) msg = params['instance'].output_log( MSG.PAIR_CHANGE_TIMEOUT, rep_type=rep_type, pvol=pvol, svol=svol, copy_group=copy_group_name, current_status='/'.join(statuses), expected_status=str(params['expected_status']), timeout=self.conf.hitachi_replication_status_check_timeout) self.raise_error(msg) def _create_rep_pair(self, volume, pvol, svol, rep_type, do_initialcopy=True): """Create a replication pair.""" copy_group_name = self._create_rep_copy_group_name(pvol) @utils.synchronized_on_copy_group() def inner(self, remote_client, copy_group_name, secondary_storage_id, conf, copyPace, parent): is_new_copy_grp = True result = self.get_remote_copy_grps(remote_client) if result: for data in result: if copy_group_name == data['copyGroupName']: is_new_copy_grp = False break body = { 'copyGroupName': copy_group_name, 'copyPairName': parent._LDEV_NAME % (pvol, svol), 'replicationType': rep_type, 'remoteStorageDeviceId': secondary_storage_id, 'pvolLdevId': pvol, 'svolLdevId': svol, 'pathGroupId': conf.hitachi_path_group_id, 'localDeviceGroupName': copy_group_name + 'P', 'remoteDeviceGroupName': copy_group_name + 'S', 'isNewGroupCreation': is_new_copy_grp, 'doInitialCopy': do_initialcopy, 'isDataReductionForceCopy': False } if rep_type == parent.driver_info['mirror_attr']: body['quorumDiskId'] = conf.hitachi_quorum_disk_id body['copyPace'] = copyPace if is_new_copy_grp: body['muNumber'] = 0 self.add_remote_copypair(remote_client, body) inner( self.rep_primary.client, self.rep_secondary.client, copy_group_name, self.rep_secondary.storage_id, self.rep_secondary.conf, self._get_rep_copy_speed(), self) self._wait_pair_status_change( copy_group_name, pvol, svol, rep_type, _WAIT_PAIR) def _create_rep_ldev_and_pair( self, volume, extra_specs, rep_type, pvol=None): """Create volume and Replication pair.""" capacity_saving = None if self.driver_info.get('driver_dir_name'): capacity_saving = extra_specs.get( self.driver_info['driver_dir_name'] + ':capacity_saving') if capacity_saving == 'deduplication_compression': msg = self.output_log( MSG.DEDUPLICATION_IS_ENABLED, rep_type=rep_type, volume_id=volume.id, volume_type=volume.volume_type.name, size=volume.size) if pvol is not None: self.rep_primary.delete_ldev(pvol) self.raise_error(msg) svol = None pvol, svol = self._create_rep_ldev(volume, extra_specs, rep_type, pvol) try: thread = greenthread.spawn( self.rep_secondary.initialize_pair_connection, svol) try: self.rep_primary.initialize_pair_connection(pvol) finally: thread.wait() if self.rep_primary.conf.\ hitachi_set_mirror_reserve_attribute: self.rep_secondary.client.assign_virtual_ldevid(svol) self._create_rep_pair(volume, pvol, svol, rep_type) except Exception: with excutils.save_and_reraise_exception(): if svol is not None: self.rep_secondary.terminate_pair_connection(svol) if self.rep_primary.conf.\ hitachi_set_mirror_reserve_attribute: self.rep_secondary.client.unassign_virtual_ldevid( svol) self.rep_secondary.delete_ldev(svol) if pvol is not None: self.rep_primary.terminate_pair_connection(pvol) self.rep_primary.delete_ldev(pvol) return pvol, svol def create_volume(self, volume): """Create a volume from a volume or snapshot and return its properties. """ self._require_rep_primary() extra_specs = self.rep_primary.get_volume_extra_specs(volume) if self._is_mirror_spec(extra_specs): self._require_rep_secondary() rep_type = self.driver_info['mirror_attr'] pldev, sldev = self._create_rep_ldev_and_pair( volume, extra_specs, rep_type) provider_location = _pack_rep_provider_location( pldev, sldev, rep_type) return { 'provider_location': provider_location } return self.rep_primary.create_volume(volume) def _has_rep_pair(self, ldev, ldev_info=None): """Return if the specified LDEV has a replication pair. :param int ldev: The LDEV ID :param dict ldev_info: LDEV info :return: True if the LDEV status is normal and the LDEV has a replication pair, False otherwise :rtype: bool """ if ldev_info is None: ldev_info = self.rep_primary.get_ldev_info( ['status', 'attributes'], ldev) return (ldev_info['status'] == rest.NORMAL_STS and self.driver_info['mirror_attr'] in ldev_info['attributes']) def _get_rep_pair_info(self, pldev, ldev_info=None): """Return replication pair info. :param int pldev: The ID of the LDEV(P-VOL in case of a pair) :param dict ldev_info: LDEV info :return: replication pair info. An empty dict if the LDEV does not have a pair. :rtype: dict """ pair_info = {} if not self._has_rep_pair(pldev, ldev_info): return pair_info self._require_rep_secondary() copy_group_name = self._create_rep_copy_group_name(pldev) pairs = self.rep_primary.client.get_remote_copy_grp( self.rep_secondary.client, copy_group_name).get('copyPairs', []) for pair in pairs: if (pair.get('replicationType') in [self.driver_info['mirror_attr']] and pair['pvolLdevId'] == pldev): break else: return pair_info pair_info['pvol'] = pldev pair_info['svol_info'] = [{ 'ldev': pair.get('svolLdevId'), 'rep_type': pair.get('replicationType'), 'is_psus': pair.get('svolStatus') in ['SSUS', 'PFUS'], 'pvol_status': pair.get('pvolStatus'), 'svol_status': pair.get('svolStatus')}] return pair_info def _split_rep_pair(self, pvol, svol): copy_group_name = self._create_rep_copy_group_name(pvol) rep_type = self.driver_info['mirror_attr'] self.rep_primary.client.split_remote_copypair( self.rep_secondary.client, copy_group_name, pvol, svol, rep_type) self._wait_pair_status_change( copy_group_name, pvol, svol, rep_type, _WAIT_PSUS) def _delete_rep_pair(self, pvol, svol): """Delete a replication pair.""" copy_group_name = self._create_rep_copy_group_name(pvol) self._split_rep_pair(pvol, svol) self.rep_primary.client.delete_remote_copypair( self.rep_secondary.client, copy_group_name, pvol, svol) def _delete_volume_pre_check(self, volume): """Pre-check for delete_volume(). :param Volume volume: The volume to be checked :return: svol: The ID of the S-VOL :rtype: int :return: pvol_is_invalid: True if P-VOL is invalid, False otherwise :rtype: bool :return: svol_is_invalid: True if S-VOL is invalid, False otherwise :rtype: bool :return: pair_exists: True if the pair exists, False otherwise :rtype: bool """ # Check if the LDEV in the primary storage corresponds to the volume pvol_is_invalid = True # To avoid KeyError when accessing a missing attribute, set the default # value to None. pvol_info = defaultdict(lambda: None) pvol = self.rep_primary.get_ldev(volume) if pvol is not None: if self.rep_primary.is_invalid_ldev(pvol, volume, pvol_info): # If the LDEV is assigned to another object, skip deleting it. self.rep_primary.output_log( MSG.SKIP_DELETING_LDEV, obj='volume', obj_id=volume.id, ldev=pvol, ldev_label=pvol_info['label']) else: pvol_is_invalid = False # Check if the pair exists on the storage. pair_exists = False svol_is_invalid = True svol = None if not pvol_is_invalid: pair_info = self._get_rep_pair_info(pvol, pvol_info) if pair_info: pair_exists = True # Because this pair is a valid P-VOL's pair, we need to delete # it and its LDEVs. The LDEV ID of the S-VOL to be deleted is # uniquely determined from the pair info. Therefore, there is # no need to get it from provider_location or to validate the # S-VOL by comparing the volume ID with the S-VOL's label. svol = pair_info['svol_info'][0]['ldev'] svol_is_invalid = False # Check if the LDEV in the secondary storage corresponds to the volume if svol_is_invalid: svol = self.rep_secondary.get_ldev(volume) if svol is not None: # To avoid KeyError when accessing a missing attribute, set the # default value to None. svol_info = defaultdict(lambda: None) if self.rep_secondary.is_invalid_ldev(svol, volume, svol_info): # If the LDEV is assigned to another object, skip deleting # it. self.rep_secondary.output_log( MSG.SKIP_DELETING_LDEV, obj='volume', obj_id=volume.id, ldev=svol, ldev_label=svol_info['label']) else: svol_is_invalid = False return svol, pvol_is_invalid, svol_is_invalid, pair_exists def delete_volume(self, volume): """Delete the specified volume.""" self._require_rep_primary() ldev = self.rep_primary.get_ldev(volume) if ldev is None: self.rep_primary.output_log( MSG.INVALID_LDEV_FOR_DELETION, method='delete_volume', id=volume.id) return # Run pre-check. svol, pvol_is_invalid, svol_is_invalid, pair_exists = ( self._delete_volume_pre_check(volume)) # Delete the pair if it exists. if pair_exists: self._delete_rep_pair(ldev, svol) # Delete LDEVs if they are valid. thread = None if not svol_is_invalid: thread = greenthread.spawn( self.rep_secondary.delete_volume, volume) try: if not pvol_is_invalid: self.rep_primary.delete_volume(volume) finally: if thread is not None: thread.wait() def delete_ldev(self, ldev, ldev_info=None): """Delete the specified LDEV[s]. :param int ldev: The ID of the LDEV(P-VOL in case of a pair) to be deleted :param dict ldev_info: LDEV(P-VOL in case of a pair) info :return: None """ self._require_rep_primary() pair_info = self._get_rep_pair_info(ldev, ldev_info) if pair_info: self._delete_rep_pair(ldev, pair_info['svol_info'][0]['ldev']) th = greenthread.spawn(self.rep_secondary.delete_ldev, pair_info['svol_info'][0]['ldev']) try: self.rep_primary.delete_ldev(ldev) finally: th.wait() else: self.rep_primary.delete_ldev(ldev) def _create_rep_volume_from_src( self, volume, extra_specs, src, src_type, operation): """Create a replication volume from a volume or snapshot and return its properties. """ rep_type = self.driver_info['mirror_attr'] data = self.rep_primary.create_volume_from_src( volume, src, src_type, is_rep=True) new_ldev = self.rep_primary.get_ldev(data) sldev = self._create_rep_ldev_and_pair( volume, extra_specs, rep_type, new_ldev)[1] provider_location = _pack_rep_provider_location( new_ldev, sldev, rep_type) return { 'provider_location': provider_location, } def _create_volume_from_src(self, volume, src, src_type): """Create a volume from a volume or snapshot and return its properties. """ self._require_rep_primary() operation = ('create a volume from a %s' % src_type) extra_specs = self.rep_primary.get_volume_extra_specs(volume) if self._is_mirror_spec(extra_specs): self._require_rep_secondary() return self._create_rep_volume_from_src( volume, extra_specs, src, src_type, operation) return self.rep_primary.create_volume_from_src(volume, src, src_type) def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume and return its properties.""" return self._create_volume_from_src( volume, src_vref, common.STR_VOLUME) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot and return its properties.""" return self._create_volume_from_src( volume, snapshot, common.STR_SNAPSHOT) def create_snapshot(self, snapshot): """Create a snapshot from a volume and return its properties.""" self._require_rep_primary() return self.rep_primary.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Delete the specified snapshot.""" self._require_rep_primary() self.rep_primary.delete_snapshot(snapshot) def _get_remote_copy_mode(self, vol): provider_location = vol.get('provider_location') if not provider_location: return None if provider_location.startswith('{'): loc = json.loads(provider_location) if isinstance(loc, dict): return loc.get('remote-copy') return None def _merge_properties(self, prop1, prop2): if prop1 is None: if prop2 is None: return [] return prop2 elif prop2 is None: return prop1 d = dict(prop1) for key in ('target_luns', 'target_wwn', 'target_portals', 'target_iqns'): if key in d: d[key] = d[key] + prop2[key] if 'initiator_target_map' in d: for key2 in d['initiator_target_map']: d['initiator_target_map'][key2] = ( d['initiator_target_map'][key2] + prop2['initiator_target_map'][key2]) return d def initialize_connection_mirror(self, volume, connector): lun = None prop1 = None prop2 = None if self.rep_primary: try: conn_info1 = ( self.rep_primary.initialize_connection( volume, connector, is_mirror=True)) except Exception as ex: self.rep_primary.output_log( MSG.REPLICATION_VOLUME_OPERATION_FAILED, operation='attach', type='P-VOL', volume_id=volume.id, reason=str(ex)) else: prop1 = conn_info1['data'] if self.driver_info['volume_type'] == 'fibre_channel': if 'target_lun' in prop1: lun = prop1['target_lun'] else: lun = prop1['target_luns'][0] if self.rep_secondary: try: conn_info2 = ( self.rep_secondary.initialize_connection( volume, connector, lun=lun, is_mirror=True)) except Exception as ex: self.rep_secondary.output_log( MSG.REPLICATION_VOLUME_OPERATION_FAILED, operation='attach', type='S-VOL', volume_id=volume.id, reason=str(ex)) if prop1 is None: raise ex else: prop2 = conn_info2['data'] conn_info = { 'driver_volume_type': self.driver_info['volume_type'], 'data': self._merge_properties(prop1, prop2), } return conn_info def initialize_connection(self, volume, connector, is_snapshot=False): """Initialize connection between the server and the volume.""" if (self._get_remote_copy_mode(volume) == self.driver_info['mirror_attr']): conn_info = self.initialize_connection_mirror(volume, connector) if self.driver_info['volume_type'] == 'fibre_channel': fczm_utils.add_fc_zone(conn_info) return conn_info else: self._require_rep_primary() return self.rep_primary.initialize_connection( volume, connector, is_snapshot) def terminate_connection_mirror(self, volume, connector): prop1 = None prop2 = None if self.rep_primary: try: conn_info1 = self.rep_primary.terminate_connection( volume, connector, is_mirror=True) except Exception as ex: self.rep_primary.output_log( MSG.REPLICATION_VOLUME_OPERATION_FAILED, operation='detach', type='P-VOL', volume_id=volume.id, reason=str(ex)) raise ex else: if conn_info1: prop1 = conn_info1['data'] if self.rep_secondary: try: conn_info2 = self.rep_secondary.terminate_connection( volume, connector, is_mirror=True) except Exception as ex: self.rep_secondary.output_log( MSG.REPLICATION_VOLUME_OPERATION_FAILED, operation='detach', type='S-VOL', volume_id=volume.id, reason=str(ex)) raise ex else: if conn_info2: prop2 = conn_info2['data'] conn_info = { 'driver_volume_type': self.driver_info['volume_type'], 'data': self._merge_properties(prop1, prop2), } return conn_info def terminate_connection(self, volume, connector): """Terminate connection between the server and the volume.""" if (self._get_remote_copy_mode(volume) == self.driver_info['mirror_attr']): conn_info = self.terminate_connection_mirror(volume, connector) if self.driver_info['volume_type'] == 'fibre_channel': fczm_utils.remove_fc_zone(conn_info) return conn_info else: self._require_rep_primary() return self.rep_primary.terminate_connection(volume, connector) def _extend_pair_volume(self, volume, new_size, ldev, pair_info): """Extend the specified replication volume to the specified size.""" rep_type = self.driver_info['mirror_attr'] pvol_info = self.rep_primary.get_ldev_info( ['numOfPorts'], pair_info['pvol']) if pvol_info['numOfPorts'] > 1: msg = self.rep_primary.output_log( MSG.EXTEND_REPLICATION_VOLUME_ERROR, rep_type=rep_type, volume_id=volume.id, ldev=ldev, source_size=volume.size, destination_size=new_size, pvol=pair_info['pvol'], svol='', pvol_num_of_ports=pvol_info['numOfPorts'], svol_num_of_ports='') self.raise_error(msg) self._delete_rep_pair( ldev, pair_info['svol_info'][0]['ldev']) thread = greenthread.spawn( self.rep_secondary.extend_volume, volume, new_size) try: self.rep_primary.extend_volume(volume, new_size) finally: thread.wait() self._create_rep_pair( volume, pair_info['pvol'], pair_info['svol_info'][0]['ldev'], rep_type, do_initialcopy=False) def extend_volume(self, volume, new_size): """Extend the specified volume to the specified size.""" self._require_rep_primary() ldev = self.rep_primary.get_ldev(volume) if ldev is None: msg = self.rep_primary.output_log( MSG.INVALID_LDEV_FOR_EXTENSION, volume_id=volume.id) self.raise_error(msg) pair_info = self._get_rep_pair_info(ldev) if pair_info: self._extend_pair_volume(volume, new_size, ldev, pair_info) else: self.rep_primary.extend_volume(volume, new_size) def manage_existing(self, volume, existing_ref): """Return volume properties which Cinder needs to manage the volume.""" self._require_rep_primary() return self.rep_primary.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Return the size[GB] of the specified volume.""" self._require_rep_primary() return self.rep_primary.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): """Prepare the volume for removing it from Cinder management.""" self._require_rep_primary() ldev = self.rep_primary.get_ldev(volume) if ldev is None: self.rep_primary.output_log( MSG.INVALID_LDEV_FOR_DELETION, method='unmanage', id=volume.id) return if self._has_rep_pair(ldev): msg = self.rep_primary.output_log( MSG.REPLICATION_PAIR_ERROR, operation='unmanage a volume', volume=volume.id, snapshot_info='', ldev=ldev) self.raise_error(msg) self.rep_primary.unmanage(volume) def discard_zero_page(self, volume): self._require_rep_primary() ldev = self.rep_primary.get_ldev(volume) if self._has_rep_pair(ldev): self._require_rep_secondary() th = greenthread.spawn( self.rep_secondary.discard_zero_page, volume) try: self.rep_primary.discard_zero_page(volume) finally: th.wait() else: self.rep_primary.discard_zero_page(volume) def unmanage_snapshot(self, snapshot): if not self.rep_primary: return self.rep_secondary.unmanage_snapshot(snapshot) else: return self.rep_primary.unmanage_snapshot(snapshot) def retype(self, ctxt, volume, new_type, diff, host): self._require_rep_primary() ldev = self.rep_primary.get_ldev(volume) if ldev is None: msg = self.rep_primary.output_log( MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=volume.id) self.raise_error(msg) if (self._has_rep_pair(ldev) or self._is_mirror_spec(new_type['extra_specs'])): return False return self.rep_primary.retype( ctxt, volume, new_type, diff, host) def migrate_volume(self, volume, host): self._require_rep_primary() ldev = self.rep_primary.get_ldev(volume) if ldev is None: msg = self.rep_primary.output_log( MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=volume.id) self.raise_error(msg) if self._get_rep_pair_info(ldev): return False, None else: return self.rep_primary.migrate_volume(volume, host) def _resync_rep_pair(self, pvol, svol): copy_group_name = self._create_rep_copy_group_name(pvol) rep_type = self.driver_info['mirror_attr'] self.rep_primary.client.resync_remote_copypair( self.rep_secondary.client, copy_group_name, pvol, svol, rep_type, copy_speed=self._get_rep_copy_speed()) self._wait_pair_status_change( copy_group_name, pvol, svol, rep_type, _WAIT_PAIR) def revert_to_snapshot(self, volume, snapshot): """Rollback the specified snapshot.""" self._require_rep_primary() ldev = self.rep_primary.get_ldev(volume) svol = self.rep_primary.get_ldev(snapshot) if None in (ldev, svol): raise NotImplementedError() pair_info = self._get_rep_pair_info(ldev) is_snap = self.rep_primary.has_snap_pair(ldev, svol) if pair_info and is_snap: self._split_rep_pair(pair_info['pvol'], pair_info['svol_info'][0]['ldev']) try: self.rep_primary.revert_to_snapshot(volume, snapshot) finally: if pair_info and is_snap: self._resync_rep_pair(pair_info['pvol'], pair_info['svol_info'][0]['ldev']) def create_group(self): self._require_rep_primary() return self.rep_primary.create_group() def delete_group(self, group, volumes): self._require_rep_primary() return super(HBSDREPLICATION, self).delete_group(group, volumes) def create_group_from_src( self, context, group, volumes, snapshots=None, source_vols=None): self._require_rep_primary() return super(HBSDREPLICATION, self).create_group_from_src( context, group, volumes, snapshots, source_vols) def update_group(self, group, add_volumes=None): self._require_rep_primary() return self.rep_primary.update_group(group, add_volumes) def create_group_snapshot(self, context, group_snapshot, snapshots): self._require_rep_primary() return self.rep_primary.create_group_snapshot( context, group_snapshot, snapshots) def delete_group_snapshot(self, group_snapshot, snapshots): self._require_rep_primary() return self.rep_primary.delete_group_snapshot( group_snapshot, snapshots) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hitachi/hbsd_rest.py0000664000175000017500000021000600000000000023257 0ustar00zuulzuul00000000000000# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """REST interface module for Hitachi HBSD Driver.""" from collections import defaultdict import re from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import units from cinder import exception from cinder.objects import fields from cinder.objects import SnapshotList from cinder.volume import configuration from cinder.volume.drivers.hitachi import hbsd_common as common from cinder.volume.drivers.hitachi import hbsd_rest_api as rest_api from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.volume.drivers.san import san from cinder.volume import volume_utils _GROUP_NAME_PROHIBITED_CHAR_PATTERN = re.compile( '[^' + common.GROUP_NAME_ALLOWED_CHARS + ']') _LU_PATH_DEFINED = ('B958', '015A') NORMAL_STS = 'NML' _LUN_TIMEOUT = 50 _LUN_RETRY_INTERVAL = 1 _RESTORE_TIMEOUT = 24 * 60 * 60 _STATE_TRANSITION_TIMEOUT = 15 * 60 _CHECK_LDEV_MANAGEABILITY_KEYS = ( 'emulationType', 'numOfPorts', 'attributes', 'status') _CHECK_LDEV_SIZE_KEYS = ('blockCapacity',) SMPL = 1 PVOL = 2 SVOL = 3 COPY = 2 PAIR = 3 PSUS = 4 PSUE = 5 SMPP = 6 UNKN = 0xff _STATUS_TABLE = { 'SMPL': SMPL, 'COPY': COPY, 'RCPY': COPY, 'PAIR': PAIR, 'PFUL': PAIR, 'PSUS': PSUS, 'PFUS': PSUS, 'SSUS': PSUS, 'PSUE': PSUE, 'PSUP': PSUS, 'SSUP': PSUS, 'SMPP': SMPP, } _SNAP_HASH_SIZE = 8 EX_ENOOBJ = 'EX_ENOOBJ' _REST_DEFAULT_PORT = 443 _GET_LDEV_COUNT = 16384 _MAX_LDEV_ID = 65535 EX_ENLDEV = 'EX_ENLDEV' EX_INVARG = 'EX_INVARG' _INVALID_RANGE = [EX_ENLDEV, EX_INVARG] _MAX_COPY_GROUP_NAME = 29 _MAX_CTG_COUNT_EXCEEDED_ADD_SNAPSHOT = ('2E10', '2302') _MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT = ('2E13', '9900') _PAIR_TARGET_NAME_BODY_DEFAULT = 'pair00' _DR_VOL_PATTERN = { 'disabled': ('REHYDRATING',), 'compression_deduplication': ('ENABLED',), None: ('DELETING',), } _DISABLE_ABLE_DR_STATUS = { 'disabled': ('DISABLED', 'ENABLING', 'REHYDRATING'), 'compression_deduplication': ('ENABLED', 'ENABLING'), } _DEDUPCOMP_ABLE_DR_STATUS = { 'disabled': ('DISABLED', 'ENABLING'), 'compression_deduplication': ('ENABLED', 'ENABLING'), } _CAPACITY_SAVING_DR_MODE = { 'disable': 'disabled', 'deduplication_compression': 'compression_deduplication', '': 'disabled', None: 'disabled', } REST_VOLUME_OPTS = [ cfg.BoolOpt( 'hitachi_rest_disable_io_wait', default=True, help='This option will allow detaching volume immediately. ' 'If set False, storage may take few minutes to detach volume ' 'after I/O.'), cfg.BoolOpt( 'hitachi_rest_tcp_keepalive', default=True, help='Enables or disables use of REST API tcp keepalive'), cfg.BoolOpt( 'hitachi_discard_zero_page', default=True, help='Enable or disable zero page reclamation in a DP-VOL.'), cfg.IntOpt( 'hitachi_lun_timeout', default=_LUN_TIMEOUT, help='Maximum wait time in seconds for adding a LUN mapping to ' 'the server.'), cfg.IntOpt( 'hitachi_lun_retry_interval', default=_LUN_RETRY_INTERVAL, help='Retry interval in seconds for REST API adding a LUN mapping to ' 'the server.'), cfg.IntOpt( 'hitachi_restore_timeout', default=_RESTORE_TIMEOUT, help='Maximum wait time in seconds for the restore operation to ' 'complete.'), cfg.IntOpt( 'hitachi_state_transition_timeout', default=_STATE_TRANSITION_TIMEOUT, help='Maximum wait time in seconds for a volume transition to ' 'complete.'), cfg.IntOpt( 'hitachi_lock_timeout', default=rest_api._LOCK_TIMEOUT, help='Maximum wait time in seconds for storage to be logined or ' 'unlocked.'), cfg.IntOpt( 'hitachi_rest_timeout', default=rest_api._REST_TIMEOUT, help='Maximum wait time in seconds for each REST API request.'), cfg.IntOpt( 'hitachi_extend_timeout', default=rest_api._EXTEND_TIMEOUT, help='Maximum wait time in seconds for a volume extention to ' 'complete.'), cfg.IntOpt( 'hitachi_exec_retry_interval', default=rest_api._EXEC_RETRY_INTERVAL, help='Retry interval in seconds for REST API execution.'), cfg.IntOpt( 'hitachi_rest_connect_timeout', default=rest_api._DEFAULT_CONNECT_TIMEOUT, help='Maximum wait time in seconds for connecting to ' 'REST API session.'), cfg.IntOpt( 'hitachi_rest_job_api_response_timeout', default=rest_api._JOB_API_RESPONSE_TIMEOUT, help='Maximum wait time in seconds for a response against ' 'async methods from REST API, for example PUT and DELETE.'), cfg.IntOpt( 'hitachi_rest_get_api_response_timeout', default=rest_api._GET_API_RESPONSE_TIMEOUT, help='Maximum wait time in seconds for a response against ' 'sync methods, for example GET'), cfg.IntOpt( 'hitachi_rest_server_busy_timeout', default=rest_api._REST_SERVER_BUSY_TIMEOUT, help='Maximum wait time in seconds when REST API returns busy.'), cfg.IntOpt( 'hitachi_rest_keep_session_loop_interval', default=rest_api._KEEP_SESSION_LOOP_INTERVAL, help='Loop interval in seconds for keeping REST API session.'), cfg.IntOpt( 'hitachi_rest_another_ldev_mapped_retry_timeout', default=rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT, help='Retry time in seconds when new LUN allocation request fails.'), cfg.IntOpt( 'hitachi_rest_tcp_keepidle', default=rest_api._TCP_KEEPIDLE, help='Wait time in seconds for sending a first TCP keepalive packet.'), cfg.IntOpt( 'hitachi_rest_tcp_keepintvl', default=rest_api._TCP_KEEPINTVL, help='Interval of transmissions in seconds for TCP keepalive packet.'), cfg.IntOpt( 'hitachi_rest_tcp_keepcnt', default=rest_api._TCP_KEEPCNT, help='Maximum number of transmissions for TCP keepalive packet.'), cfg.ListOpt( 'hitachi_host_mode_options', item_type=types.Integer(), default=[], help='Host mode option for host group or iSCSI target.'), ] REST_PAIR_OPTS = [ cfg.ListOpt( 'hitachi_rest_pair_target_ports', default=[], help='Target port names for pair of the host group or iSCSI target'), ] _REQUIRED_REST_OPTS = [ 'san_login', 'san_password', 'san_ip', ] CONF = cfg.CONF CONF.register_opts(REST_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(REST_PAIR_OPTS, group=configuration.SHARED_CONF_GROUP) LOG = logging.getLogger(__name__) MSG = utils.HBSDMsg def _is_valid_target(self, target, target_name, target_ports, is_pair): """Check if the specified target is valid.""" if is_pair: return (target[:utils.PORT_ID_LENGTH] in target_ports and target_name == self._PAIR_TARGET_NAME) return (target[:utils.PORT_ID_LENGTH] in target_ports and target_name.startswith(self.driver_info['target_prefix']) and target_name != self._PAIR_TARGET_NAME) def _check_ldev_manageability(self, ldev_info, ldev, existing_ref): """Check if the LDEV meets the criteria for being managed.""" if ldev_info['status'] != NORMAL_STS: msg = self.output_log(MSG.INVALID_LDEV_FOR_MANAGE) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) attributes = set(ldev_info['attributes']) if (not ldev_info['emulationType'].startswith('OPEN-V') or len(attributes) < 2 or not attributes.issubset( set(['CVS', self.driver_info['hdp_vol_attr'], self.driver_info['hdt_vol_attr']]))): msg = self.output_log(MSG.INVALID_LDEV_ATTR_FOR_MANAGE, ldev=ldev, ldevtype=self.driver_info['nvol_ldev_type']) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) if ldev_info['numOfPorts']: msg = self.output_log(MSG.INVALID_LDEV_PORT_FOR_MANAGE, ldev=ldev) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) def _check_ldev_size(self, ldev_info, ldev, existing_ref): """Hitachi storage calculates volume sizes in a block unit, 512 bytes.""" if ldev_info['blockCapacity'] % utils.GIGABYTE_PER_BLOCK_SIZE: msg = self.output_log(MSG.INVALID_LDEV_SIZE_FOR_MANAGE, ldev=ldev) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) class HBSDREST(common.HBSDCommon): """REST interface class for Hitachi HBSD Driver.""" def __init__(self, conf, storage_protocol, db): """Initialize instance variables.""" super(HBSDREST, self).__init__(conf, storage_protocol, db) self.conf.append_config_values(REST_VOLUME_OPTS) self.conf.append_config_values(REST_PAIR_OPTS) self.conf.append_config_values(san.san_opts) self.client = None def do_setup(self, context): if hasattr( self.conf, self.driver_info['param_prefix'] + '_pair_target_number'): self._PAIR_TARGET_NAME_BODY = 'pair%02d' % ( self.conf.safe_get(self.driver_info['param_prefix'] + '_pair_target_number')) else: self._PAIR_TARGET_NAME_BODY = _PAIR_TARGET_NAME_BODY_DEFAULT self._PAIR_TARGET_NAME = (self.driver_info['target_prefix'] + self._PAIR_TARGET_NAME_BODY) super(HBSDREST, self).do_setup(context) def setup_client(self): """Initialize RestApiClient.""" verify = self.conf.driver_ssl_cert_verify if verify: verify_path = self.conf.safe_get('driver_ssl_cert_path') if verify_path: verify = verify_path self.verify = verify is_rep = False if self.storage_id is not None: is_rep = True self.client = rest_api.RestApiClient( self.conf, self.conf.san_ip, self.conf.san_api_port, self.conf.hitachi_storage_id, self.conf.san_login, self.conf.san_password, self.driver_info['driver_prefix'], tcp_keepalive=self.conf.hitachi_rest_tcp_keepalive, verify=verify, is_rep=is_rep) self.client.login() def need_client_setup(self): """Check if the making of the communication client is necessary.""" return not self.client or not self.client.get_my_session() def enter_keep_session(self): """Begin the keeping of the session.""" if self.client is not None: self.client.enter_keep_session() def _set_dr_mode(self, body, capacity_saving): dr_mode = _CAPACITY_SAVING_DR_MODE.get(capacity_saving) if not dr_mode: msg = self.output_log( MSG.INVALID_EXTRA_SPEC_KEY, key=self.driver_info['driver_dir_name'] + ':capacity_saving', value=capacity_saving) self.raise_error(msg) body['dataReductionMode'] = dr_mode def _create_ldev_on_storage(self, size, extra_specs, pool_id, ldev_range): """Create an LDEV on the storage system.""" body = { 'byteFormatCapacity': '%sG' % size, 'poolId': pool_id, 'isParallelExecutionEnabled': True, } capacity_saving = None if self.driver_info.get('driver_dir_name'): capacity_saving = extra_specs.get( self.driver_info['driver_dir_name'] + ':capacity_saving') if capacity_saving: self._set_dr_mode(body, capacity_saving) if self.storage_info['ldev_range']: min_ldev, max_ldev = self.storage_info['ldev_range'][:2] body['startLdevId'] = min_ldev body['endLdevId'] = max_ldev return self.client.add_ldev(body, no_log=True) def set_qos_specs(self, ldev, qos_specs): self.client.set_qos_specs(ldev, qos_specs) def create_ldev(self, size, extra_specs, pool_id, ldev_range, qos_specs=None): """Create an LDEV of the specified size and the specified type.""" ldev = self._create_ldev_on_storage( size, extra_specs, pool_id, ldev_range) LOG.debug('Created logical device. (LDEV: %s)', ldev) if qos_specs: try: self.set_qos_specs(ldev, qos_specs) except Exception: with excutils.save_and_reraise_exception(): try: self.delete_ldev(ldev) except exception.VolumeDriverException: self.output_log(MSG.DELETE_LDEV_FAILED, ldev=ldev) return ldev def modify_ldev_name(self, ldev, name): """Modify LDEV name.""" body = {'label': name} self.client.modify_ldev(ldev, body) def delete_ldev_from_storage(self, ldev): """Delete the specified LDEV from the storage.""" result = self.get_ldev_info(['emulationType', 'dataReductionMode', 'dataReductionStatus'], ldev) if result['dataReductionStatus'] == 'FAILED': msg = self.output_log( MSG.CONSISTENCY_NOT_GUARANTEE, ldev=ldev) self.raise_error(msg) if result['dataReductionStatus'] in _DR_VOL_PATTERN.get( result['dataReductionMode'], ()): body = {'isDataReductionDeleteForceExecute': True} else: body = None if result['emulationType'] == 'NOT DEFINED': self.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev) return self.client.delete_ldev( ldev, body, timeout_message=(MSG.LDEV_DELETION_WAIT_TIMEOUT, {'ldev': ldev})) def _get_snap_pool_id(self, pvol): return ( self.storage_info['snap_pool_id'] if self.storage_info['snap_pool_id'] is not None else self.get_ldev_info(['poolId'], pvol)['poolId']) def _get_copy_pair_status(self, ldev): """Return the status of the volume in a copy pair.""" params_s = {"svolLdevId": ldev} result_s = self.client.get_snapshots(params_s) if not result_s: params_p = {"pvolLdevId": ldev} result_p = self.client.get_snapshots(params_p) if not result_p: return SMPL return _STATUS_TABLE.get(result_p[0]['status'], UNKN) return _STATUS_TABLE.get(result_s[0]['status'], UNKN) def _wait_copy_pair_status(self, ldev, status, **kwargs): """Wait until the S-VOL status changes to the specified status.""" interval = kwargs.pop( 'interval', self.conf.hitachi_copy_check_interval) timeout = kwargs.pop( 'timeout', self.conf.hitachi_state_transition_timeout) def _wait_for_copy_pair_status( start_time, ldev, status, timeout): """Raise True if the S-VOL is in the specified status.""" if not isinstance(status, set): status = set([status]) if self._get_copy_pair_status(ldev) in status: raise loopingcall.LoopingCallDone() if utils.timed_out(start_time, timeout): raise loopingcall.LoopingCallDone(False) loop = loopingcall.FixedIntervalLoopingCall( _wait_for_copy_pair_status, timeutils.utcnow(), ldev, status, timeout) if not loop.start(interval=interval).wait(): msg = self.output_log( MSG.PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) self.raise_error(msg) def _create_snap_pair(self, pvol, svol): """Create a snapshot copy pair on the storage.""" snapshot_name = '%(prefix)s%(svol)s' % { 'prefix': self.driver_info['driver_prefix'] + '-snap', 'svol': svol % _SNAP_HASH_SIZE, } try: body = {"snapshotGroupName": snapshot_name, "snapshotPoolId": self._get_snap_pool_id(pvol), "pvolLdevId": pvol, "svolLdevId": svol, "autoSplit": True, "canCascade": True, "isDataReductionForceCopy": True} self.client.add_snapshot(body) except exception.VolumeDriverException as ex: if (utils.safe_get_err_code(ex.kwargs.get('errobj')) == rest_api.INVALID_SNAPSHOT_POOL and not self.conf.hitachi_snap_pool): msg = self.output_log( MSG.INVALID_PARAMETER, param=self.driver_info['param_prefix'] + '_snap_pool') self.raise_error(msg) else: raise try: self._wait_copy_pair_status(svol, PSUS) except Exception: with excutils.save_and_reraise_exception(): try: self._delete_pair_from_storage(pvol, svol) except exception.VolumeDriverException: self.output_log( MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol) def _create_clone_pair(self, pvol, svol, snap_pool_id): """Create a clone copy pair on the storage.""" snapshot_name = '%(prefix)s%(svol)s' % { 'prefix': self.driver_info['driver_prefix'] + '-clone', 'svol': svol % _SNAP_HASH_SIZE, } try: if self.conf.hitachi_copy_speed <= 2: pace = 'slower' elif self.conf.hitachi_copy_speed == 3: pace = 'medium' else: pace = 'faster' body = {"snapshotGroupName": snapshot_name, "snapshotPoolId": self._get_snap_pool_id(pvol), "pvolLdevId": pvol, "svolLdevId": svol, "isClone": True, "clonesAutomation": True, "copySpeed": pace, "isDataReductionForceCopy": True} self.client.add_snapshot(body) except exception.VolumeDriverException as ex: if (utils.safe_get_err_code(ex.kwargs.get('errobj')) == rest_api.INVALID_SNAPSHOT_POOL and not self.conf.hitachi_snap_pool): msg = self.output_log( MSG.INVALID_PARAMETER, param=self.driver_info['param_prefix'] + '_snap_pool') self.raise_error(msg) else: raise try: self._wait_copy_pair_status(svol, set([PSUS, SMPP, SMPL])) except Exception: with excutils.save_and_reraise_exception(): try: self._delete_pair_from_storage(pvol, svol) except exception.VolumeDriverException: self.output_log( MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol) def create_pair_on_storage( self, pvol, svol, snap_pool_id, is_snapshot=False): """Create a copy pair on the storage.""" if is_snapshot: self._create_snap_pair(pvol, svol) else: self._create_clone_pair(pvol, svol, snap_pool_id) def get_ldev_info(self, keys, ldev, **kwargs): """Return a dictionary of LDEV-related items. :param keys: LDEV Attributes to be obtained. Specify None to obtain all LDEV attributes. :type keys: list or NoneType :param int ldev: The LDEV ID :param dict kwargs: REST API options :return: LDEV info :rtype: dict """ d = {} result = self.client.get_ldev(ldev, **kwargs) if not keys: # To avoid KeyError when accessing a missing attribute, set the # default value to None. return defaultdict(lambda: None, result) for key in keys: d[key] = result.get(key) return d def _wait_copy_pair_deleting(self, ldev, **kwargs): """Wait until the LDEV is no longer in a copy pair.""" interval = kwargs.pop( 'interval', self.conf.hitachi_async_copy_check_interval) def _wait_for_copy_pair_smpl(start_time, ldev): """Raise True if the LDEV is no longer in a copy pair.""" ldev_info = self.get_ldev_info(['status', 'attributes'], ldev) if (ldev_info['status'] != NORMAL_STS or self.driver_info['pair_attr'] not in ldev_info['attributes']): raise loopingcall.LoopingCallDone() if utils.timed_out( start_time, self.conf.hitachi_state_transition_timeout): raise loopingcall.LoopingCallDone(False) loop = loopingcall.FixedIntervalLoopingCall( _wait_for_copy_pair_smpl, timeutils.utcnow(), ldev) if not loop.start(interval=interval).wait(): msg = self.output_log( MSG.PAIR_STATUS_WAIT_TIMEOUT, svol=ldev) self.raise_error(msg) def _delete_pair_from_storage(self, pvol, svol): """Disconnect the volume pair that consists of the specified LDEVs.""" params_s = {"svolLdevId": svol} result = self.client.get_snapshots(params_s) if not result: return mun = result[0]['muNumber'] # If the snapshot is in deleting status, # not need to call a delete operation. if _STATUS_TABLE.get(result[0]['status']) != SMPP: self.client.unassign_snapshot_volume(pvol, mun, ignore_all_errors=True) ignore_return_code = [EX_ENOOBJ] self.client.delete_snapshot( pvol, mun, ignore_return_code=ignore_return_code) self._wait_copy_pair_deleting(svol) def _get_pair_ports(self): return (self.storage_info['pair_ports'] or self.storage_info['controller_ports']) def terminate_pair_connection(self, ldev): targets = { 'list': [], } ldev_info = self.get_ldev_info(['status', 'attributes'], ldev) if (ldev_info['status'] == NORMAL_STS and self.driver_info['mirror_attr'] in ldev_info['attributes']): LOG.debug( 'The specified LDEV has replication pair. ' 'Therefore, unmapping operation was skipped. ' '(LDEV: %(ldev)s, vol_attr: %(info)s)', {'ldev': ldev, 'info': ldev_info['attributes']}) return self._find_mapped_targets_from_storage( targets, ldev, self._get_pair_ports(), is_pair=True) self.unmap_ldev(targets, ldev) def delete_pair_based_on_svol(self, pvol, svol_info): """Disconnect all volume pairs to which the specified S-VOL belongs.""" # If the pair status does not satisfy the execution condition, if not (svol_info['is_psus'] or _STATUS_TABLE.get(svol_info['status']) == SMPP): self.output_log( MSG.UNABLE_TO_DELETE_PAIR, pvol=pvol, svol=svol_info['ldev']) self.raise_busy() self._delete_pair_from_storage(pvol, svol_info['ldev']) if hasattr( self.conf, self.driver_info['param_prefix'] + '_rest_pair_target_ports'): self.terminate_pair_connection(svol_info['ldev']) self.terminate_pair_connection(pvol) def check_param(self): """Check parameter values and consistency among them.""" super(HBSDREST, self).check_param() self.check_opts(self.conf, REST_VOLUME_OPTS) self.check_opts(self.conf, san.san_opts) if hasattr( self.conf, self.driver_info['param_prefix'] + '_rest_pair_target_ports'): self.check_opts(self.conf, REST_PAIR_OPTS) if (not self.conf.hitachi_target_ports and not self.conf.hitachi_rest_pair_target_ports): msg = self.output_log( MSG.INVALID_PARAMETER, param=self.driver_info['param_prefix'] + '_target_ports or ' + self.driver_info['param_prefix'] + '_rest_pair_target_ports') self.raise_error(msg) LOG.debug( 'Setting ldev_range: %s', self.storage_info['ldev_range']) for opt in _REQUIRED_REST_OPTS: if not self.conf.safe_get(opt): msg = self.output_log(MSG.INVALID_PARAMETER, param=opt) self.raise_error(msg) if not self.conf.safe_get('san_api_port'): self.conf.san_api_port = _REST_DEFAULT_PORT def _find_lun(self, ldev, port, gid): """Get LUN using.""" luns_info = self.client.get_luns(port, gid) for lun_info in luns_info: if lun_info['ldevId'] == ldev: return lun_info['lun'] return None def _run_add_lun(self, ldev, port, gid, lun=None): """Create a LUN between the specified LDEV and port-gid.""" ignore_error = [_LU_PATH_DEFINED] if lun is not None: ignore_error = [rest_api.ANOTHER_LDEV_MAPPED] assigned_lun, errobj = self.client.add_lun( port, gid, ldev, lun=lun, ignore_error=ignore_error, interval=self.conf.hitachi_lun_retry_interval, timeout=self.conf.hitachi_lun_timeout) err_code = utils.safe_get_err_code(errobj) if lun is None: if err_code == _LU_PATH_DEFINED: lun = self._find_lun(ldev, port, gid) LOG.debug( 'An logical unit path has already defined in the ' 'specified logical device. (LDEV: %(ldev)s, ' 'port: %(port)s, gid: %(gid)s, lun: %(lun)s)', {'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun}) else: lun = assigned_lun elif err_code == rest_api.ANOTHER_LDEV_MAPPED: self.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, id=gid, lun=lun) return None LOG.debug( 'Created logical unit path to the specified logical device. ' '(LDEV: %(ldev)s, port: %(port)s, ' 'gid: %(gid)s, lun: %(lun)s)', {'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun}) return lun def map_ldev(self, targets, ldev, lun=None): """Create the path between the server and the LDEV and return LUN.""" raise_err = False if lun is not None: head = 0 raise_err = True else: head = 1 port, gid = targets['list'][0] lun = self._run_add_lun(ldev, port, gid) targets['lun'][port] = True for port, gid in targets['list'][head:]: # When multipath is configured, Nova compute expects that # target_lun define the same value in all storage target. # Therefore, it should use same value of lun in other target. try: lun2 = self._run_add_lun(ldev, port, gid, lun=lun) if lun2 is not None: targets['lun'][port] = True raise_err = False except exception.VolumeDriverException: self.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, id=gid, lun=lun) if raise_err: msg = self.output_log( MSG.CONNECT_VOLUME_FAILED, ldev=ldev, reason='Failed to attach in all ports.') self.raise_error(msg) return lun def attach_ldev( self, volume, ldev, connector, is_snapshot, targets, lun=None): """Initialize connection between the server and the volume.""" target_ports = self.get_target_ports(connector) target_ports = self.filter_target_ports(target_ports, volume, is_snapshot) if (self.find_targets_from_storage( targets, connector, target_ports) and self.conf.hitachi_group_create): self.create_mapping_targets(targets, connector, volume) self.require_target_existed(targets) targets['list'].sort() for port in target_ports: targets['lun'][port] = False return int(self.map_ldev(targets, ldev, lun)) def _find_mapped_targets_from_storage( self, targets, ldev, target_ports, is_pair=False): """Update port-gid list for the specified LDEV.""" ldev_info = self.get_ldev_info(['ports'], ldev) if not ldev_info['ports']: return for port_info in ldev_info['ports']: if _is_valid_target(self, port_info['portId'], port_info['hostGroupName'], target_ports, is_pair): targets['list'].append(port_info) def _get_unmap_targets_list(self, target_list, mapped_list): """Return a list of IDs of ports that need to be disconnected.""" unmap_list = [] for mapping_info in mapped_list: if ((mapping_info['portId'][:utils.PORT_ID_LENGTH], mapping_info['hostGroupNumber']) in target_list): unmap_list.append(mapping_info) return unmap_list def unmap_ldev(self, targets, ldev): """Delete the LUN between the specified LDEV and port-gid.""" interval = self.conf.hitachi_lun_retry_interval ignore_return_code = [EX_ENOOBJ] ignore_message_id = [rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST] timeout = self.conf.hitachi_state_transition_timeout for target in targets['list']: port = target['portId'] gid = target['hostGroupNumber'] lun = target['lun'] self.client.delete_lun(port, gid, lun, interval=interval, ignore_return_code=ignore_return_code, ignore_message_id=ignore_message_id, timeout=timeout) LOG.debug( 'Deleted logical unit path of the specified logical ' 'device. (LDEV: %(ldev)s, host group: %(target)s)', {'ldev': ldev, 'target': target}) def _get_target_luns(self, target): """Get the LUN mapping information of the host group.""" port = target['portId'] gid = target['hostGroupNumber'] mapping_list = [] luns_info = self.client.get_luns(port, gid) if luns_info: for lun_info in luns_info: mapping_list.append((port, gid, lun_info['lun'], lun_info['ldevId'])) return mapping_list def delete_target_from_storage(self, port, gid): """Delete the host group or the iSCSI target from the port.""" result = 1 try: self.client.delete_host_grp(port, gid) result = 0 except exception.VolumeDriverException: self.output_log(MSG.DELETE_TARGET_FAILED, port=port, id=gid) else: LOG.debug( 'Deleted target. (port: %(port)s, gid: %(gid)s)', {'port': port, 'gid': gid}) return result def clean_mapping_targets(self, targets): """Delete the empty host group without LU.""" deleted_targets = [] for target in targets['list']: if not len(self._get_target_luns(target)): port = target['portId'] gid = target['hostGroupNumber'] ret = self.delete_target_from_storage(port, gid) if not ret: deleted_targets.append(port) return deleted_targets def detach_ldev(self, volume, ldev, connector): """Terminate connection between the server and the volume.""" targets = { 'info': {}, 'list': [], 'iqns': {}, } mapped_targets = { 'list': [], } unmap_targets = {} deleted_targets = [] target_ports = self.get_target_ports(connector) self.find_targets_from_storage(targets, connector, target_ports) self._find_mapped_targets_from_storage( mapped_targets, ldev, target_ports) unmap_targets['list'] = self._get_unmap_targets_list( targets['list'], mapped_targets['list']) unmap_targets['list'].sort( reverse=True, key=lambda port: (port.get('portId'), port.get('hostGroupNumber'))) self.unmap_ldev(unmap_targets, ldev) if self.conf.hitachi_group_delete: deleted_targets = self.clean_mapping_targets(unmap_targets) return deleted_targets def find_all_mapped_targets_from_storage(self, targets, ldev): """Add all port-gids connected with the LDEV to the list.""" ldev_info = self.get_ldev_info(['ports'], ldev) if ldev_info['ports']: for port in ldev_info['ports']: targets['list'].append(port) def extend_ldev(self, ldev, old_size, new_size): """Extend the specified LDEV to the specified new size.""" body = {"parameters": {"additionalByteFormatCapacity": '%sG' % (new_size - old_size)}} self.client.extend_ldev(ldev, body) def get_pool_info(self, pool_id, result=None): """Return the total and free capacity of the storage pool.""" if result is None: result = self.client.get_pool( pool_id, ignore_message_id=[ rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST]) if 'errorSource' in result: msg = self.output_log(MSG.POOL_NOT_FOUND, pool=pool_id) self.raise_error(msg) tp_cap = result['totalPoolCapacity'] // units.Ki ta_cap = result['availableVolumeCapacity'] // units.Ki tl_cap = result['totalLocatedCapacity'] // units.Ki return tp_cap, ta_cap, tl_cap def get_pool_infos(self, pool_ids): """Return the total and free capacity of the storage pools.""" result = [] try: result = self.client.get_pools() except exception.VolumeDriverException: self.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool='all') pool_infos = [] for pool_id in pool_ids: for pool_data in result: if pool_data['poolId'] == pool_id: cap_data = self.get_pool_info(pool_id, pool_data) break else: self.output_log(MSG.POOL_NOT_FOUND, pool=pool_id) cap_data = None pool_infos.append(cap_data) return pool_infos def discard_zero_page(self, volume): """Return the volume's no-data pages to the storage pool.""" if self.conf.hitachi_discard_zero_page: ldev = self.get_ldev(volume) try: self.client.discard_zero_page(ldev) except exception.VolumeDriverException: self.output_log(MSG.DISCARD_ZERO_PAGE_FAILED, ldev=ldev) def _get_copy_pair_info(self, ldev): """Return info of the copy pair.""" params_p = {"pvolLdevId": ldev} result_p = self.client.get_snapshots(params_p) if result_p: is_psus = _STATUS_TABLE.get(result_p[0]['status']) == PSUS pvol, svol = ldev, int(result_p[0]['svolLdevId']) status = result_p[0]['status'] else: params_s = {"svolLdevId": ldev} result_s = self.client.get_snapshots(params_s) if result_s: is_psus = _STATUS_TABLE.get(result_s[0]['status']) == PSUS pvol, svol = int(result_s[0]['pvolLdevId']), ldev status = result_s[0]['status'] else: return None, None LOG.debug( 'Copy pair status. (P-VOL: %(pvol)s, S-VOL: %(svol)s, ' 'status: %(status)s)', {'pvol': pvol, 'svol': svol, 'status': status}) return pvol, [{'ldev': svol, 'is_psus': is_psus, 'status': status}] def get_pair_info(self, ldev, ldev_info=None): """Return info of the volume pair. :param int ldev: The LDEV ID :param dict ldev_info: LDEV info :return: TI pair info if the LDEV has TI pairs, None otherwise :rtype: dict or NoneType """ pair_info = {} if ldev_info is None: ldev_info = self.get_ldev_info(['status', 'attributes'], ldev) if (ldev_info['status'] != NORMAL_STS or self.driver_info['pair_attr'] not in ldev_info['attributes']): return None pvol, svol_info = self._get_copy_pair_info(ldev) if svol_info and svol_info[0]['status'] in ('SMPP', 'PSUP'): self._wait_copy_pair_deleting(svol_info[0]['ldev']) return self.get_pair_info(ldev) if pvol is not None: pair_info['pvol'] = pvol pair_info.setdefault('svol_info', []) pair_info['svol_info'].extend(svol_info) return pair_info def get_ldev_by_name(self, name): """Get the LDEV number from the given name.""" ignore_message_id = ['KART40044-E'] ignore_return_code = _INVALID_RANGE if self.storage_info['ldev_range']: start, end = self.storage_info['ldev_range'][:2] if end - start + 1 > _GET_LDEV_COUNT: cnt = _GET_LDEV_COUNT else: cnt = end - start + 1 else: start = 0 end = _MAX_LDEV_ID cnt = _GET_LDEV_COUNT for current in range(start, end, cnt): params = {'headLdevId': current, 'ldevOption': 'dpVolume', 'count': cnt} ldev_list = self.client.get_ldevs( params, ignore_message_id=ignore_message_id, ignore_return_code=ignore_return_code) for ldev_data in ldev_list: if 'label' in ldev_data and name == ldev_data['label']: return ldev_data['ldevId'] return None def check_ldev_manageability(self, ldev, existing_ref): """Check if the LDEV meets the criteria for being managed.""" ldev_info = self.get_ldev_info( _CHECK_LDEV_MANAGEABILITY_KEYS, ldev) _check_ldev_manageability(self, ldev_info, ldev, existing_ref) def get_ldev_size_in_gigabyte(self, ldev, existing_ref): """Return the size[GB] of the specified LDEV.""" ldev_info = self.get_ldev_info( _CHECK_LDEV_SIZE_KEYS, ldev) _check_ldev_size(self, ldev_info, ldev, existing_ref) return ldev_info['blockCapacity'] / utils.GIGABYTE_PER_BLOCK_SIZE def _get_pool_id(self, pool_list, pool_name_or_id): """Get the pool id from specified name.""" if pool_name_or_id.isdigit(): return int(pool_name_or_id) if pool_list['pool_list'] is None: pool_list['pool_list'] = self.client.get_pools() for pool_data in pool_list['pool_list']: if pool_data['poolName'] == pool_name_or_id: return pool_data['poolId'] msg = self.output_log(MSG.POOL_NOT_FOUND, pool=pool_name_or_id) self.raise_error(msg) def check_pool_id(self): """Check the pool id of hitachi_pools and hitachi_snap_pool.""" pool_id_list = [] pool_list = {'pool_list': None} for pool in self.conf.hitachi_pools: pool_id_list.append(self._get_pool_id(pool_list, pool)) snap_pool = self.conf.hitachi_snap_pool if snap_pool is not None: self.storage_info['snap_pool_id'] = self._get_pool_id( pool_list, snap_pool) elif len(pool_id_list) == 1: self.storage_info['snap_pool_id'] = pool_id_list[0] self.storage_info['pool_id'] = pool_id_list def _to_hostgroup(self, port, gid): """Get a host group name from host group ID.""" return self.client.get_host_grp(port, gid)['hostGroupName'] def get_port_hostgroup_map(self, ldev_id): """Get the mapping of a port and host group.""" hostgroups = defaultdict(list) ldev_info = self.get_ldev_info(['ports'], ldev_id) if not ldev_info['ports']: return hostgroups for port in ldev_info['ports']: portId = port["portId"] hostgroup = self._to_hostgroup( portId, port["hostGroupNumber"]) hostgroups[portId].append(hostgroup) return hostgroups def check_pair_svol(self, ldev): """Check if the specified LDEV is S-VOL in a copy pair.""" ldev_info = self.get_ldev_info(['status', 'snapshotPoolId'], ldev) if ldev_info['status'] != NORMAL_STS: return False if ldev_info['snapshotPoolId'] is not None: _, svol_info = self._get_copy_pair_info(ldev) if svol_info and svol_info[0]['status'] in ('SMPP', 'PSUP'): self._wait_copy_pair_deleting(ldev) return False else: return True return False def restore_ldev(self, pvol, svol): """Restore a pair of the specified LDEV.""" params_s = {"svolLdevId": svol} result = self.client.get_snapshots(params_s) mun = result[0]['muNumber'] body = {"parameters": {"autoSplit": True}} self.client.restore_snapshot(pvol, mun, body) self._wait_copy_pair_status( svol, PSUS, timeout=self.conf.hitachi_restore_timeout, interval=self.conf.hitachi_async_copy_check_interval) def has_snap_pair(self, pvol, svol): """Check if the volume have the pair of the snapshot.""" ldev_info = self.get_ldev_info(['status', 'attributes'], svol) if (ldev_info['status'] != NORMAL_STS or self.driver_info['pair_attr'] not in ldev_info['attributes']): return False params_s = {"svolLdevId": svol} result = self.client.get_snapshots(params_s) if not result: return False return (result[0]['primaryOrSecondary'] == "S-VOL" and int(result[0]['pvolLdevId']) == pvol) def create_group(self): return None def _delete_group(self, group, objs, is_snapshot): model_update = {'status': group.status} objs_model_update = [] events = [] def _delete_group_obj(group, obj, is_snapshot): obj_update = {'id': obj.id} try: if is_snapshot: self.delete_snapshot(obj) else: self.delete_volume(obj) obj_update['status'] = 'deleted' except (exception.VolumeDriverException, exception.VolumeIsBusy, exception.SnapshotIsBusy) as exc: obj_update['status'] = 'available' if isinstance( exc, (exception.VolumeIsBusy, exception.SnapshotIsBusy)) else 'error' self.output_log( MSG.GROUP_OBJECT_DELETE_FAILED, obj='snapshot' if is_snapshot else 'volume', group='group snapshot' if is_snapshot else 'group', group_id=group.id, obj_id=obj.id, ldev=self.get_ldev(obj), reason=exc.msg) raise loopingcall.LoopingCallDone(obj_update) for obj in objs: loop = loopingcall.FixedIntervalLoopingCall( _delete_group_obj, group, obj, is_snapshot) event = loop.start(interval=0) events.append(event) for e in events: obj_update = e.wait() if obj_update['status'] != 'deleted': model_update['status'] = 'error' objs_model_update.append(obj_update) return model_update, objs_model_update def delete_group(self, group, volumes): return self._delete_group(group, volumes, False) def delete_group_snapshot(self, group_snapshot, snapshots): return self._delete_group(group_snapshot, snapshots, True) def create_group_from_src( self, context, group, volumes, snapshots=None, source_vols=None): volumes_model_update = [] new_ldevs = [] events = [] def _create_group_volume_from_src(context, volume, src, from_snapshot): volume_model_update = {'id': volume.id} try: ldev = self.get_ldev(src) if ldev is None: msg = self.output_log( MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='snapshot' if from_snapshot else 'volume', id=src.id) self.raise_error(msg) volume_model_update.update( self.create_volume_from_snapshot(volume, src) if from_snapshot else self.create_cloned_volume(volume, src)) except Exception as exc: volume_model_update['msg'] = utils.get_exception_msg(exc) raise loopingcall.LoopingCallDone(volume_model_update) try: from_snapshot = True if snapshots else False for volume, src in zip(volumes, snapshots if snapshots else source_vols): loop = loopingcall.FixedIntervalLoopingCall( _create_group_volume_from_src, context, volume, src, from_snapshot) event = loop.start(interval=0) events.append(event) is_success = True for e in events: volume_model_update = e.wait() if 'msg' in volume_model_update: is_success = False msg = volume_model_update['msg'] else: volumes_model_update.append(volume_model_update) ldev = self.get_ldev(volume_model_update) if ldev is not None: new_ldevs.append(ldev) if not is_success: self.raise_error(msg) except Exception: with excutils.save_and_reraise_exception(): for new_ldev in new_ldevs: try: self.delete_ldev(new_ldev) except exception.VolumeDriverException: self.output_log(MSG.DELETE_LDEV_FAILED, ldev=new_ldev) return None, volumes_model_update def update_group(self, group, add_volumes=None): if add_volumes and volume_utils.is_group_a_cg_snapshot_type(group): for volume in add_volumes: ldev = self.get_ldev(volume) if ldev is None: msg = self.output_log(MSG.LDEV_NOT_EXIST_FOR_ADD_GROUP, volume_id=volume.id, group='consistency group', group_id=group.id) self.raise_error(msg) return None, None, None def _create_non_cgsnapshot(self, group_snapshot, snapshots): model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} snapshots_model_update = [] events = [] def _create_non_cgsnapshot_snapshot(group_snapshot, snapshot): snapshot_model_update = {'id': snapshot.id} try: snapshot_model_update.update(self.create_snapshot(snapshot)) snapshot_model_update['status'] = ( fields.SnapshotStatus.AVAILABLE) except Exception: snapshot_model_update['status'] = fields.SnapshotStatus.ERROR self.output_log( MSG.GROUP_SNAPSHOT_CREATE_FAILED, group=group_snapshot.group_id, group_snapshot=group_snapshot.id, group_type=group_snapshot.group_type_id, volume=snapshot.volume_id, snapshot=snapshot.id) raise loopingcall.LoopingCallDone(snapshot_model_update) for snapshot in snapshots: loop = loopingcall.FixedIntervalLoopingCall( _create_non_cgsnapshot_snapshot, group_snapshot, snapshot) event = loop.start(interval=0) events.append(event) for e in events: snapshot_model_update = e.wait() if (snapshot_model_update['status'] == fields.SnapshotStatus.ERROR): model_update['status'] = fields.GroupSnapshotStatus.ERROR snapshots_model_update.append(snapshot_model_update) return model_update, snapshots_model_update def _create_ctg_snapshot_group_name(self, ldev): now = timeutils.utcnow() strnow = now.strftime("%y%m%d%H%M%S%f") ctg_name = '%(prefix)sC%(ldev)s%(time)s' % { 'prefix': self.driver_info['driver_prefix'], 'ldev': "{0:06X}".format(ldev), 'time': strnow[:len(strnow) - 3], } return ctg_name[:_MAX_COPY_GROUP_NAME] def _delete_pairs_from_storage(self, pairs): for pair in pairs: try: self._delete_pair_from_storage(pair['pvol'], pair['svol']) except exception.VolumeDriverException: self.output_log(MSG.DELETE_PAIR_FAILED, pvol=pair['pvol'], svol=pair['svol']) def _create_ctg_snap_pair(self, pairs): snapshotgroup_name = self._create_ctg_snapshot_group_name( pairs[0]['pvol']) try: for pair in pairs: try: body = {"snapshotGroupName": snapshotgroup_name, "snapshotPoolId": self._get_snap_pool_id( pair['pvol']), "pvolLdevId": pair['pvol'], "svolLdevId": pair['svol'], "isConsistencyGroup": True, "canCascade": True, "isDataReductionForceCopy": True} self.client.add_snapshot(body) except exception.VolumeDriverException as ex: if ((utils.safe_get_err_code(ex.kwargs.get('errobj')) == _MAX_CTG_COUNT_EXCEEDED_ADD_SNAPSHOT) or (utils.safe_get_err_code(ex.kwargs.get('errobj')) == _MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT)): msg = self.output_log(MSG.FAILED_CREATE_CTG_SNAPSHOT) self.raise_error(msg) elif (utils.safe_get_err_code(ex.kwargs.get('errobj')) == rest_api.INVALID_SNAPSHOT_POOL and not self.conf.hitachi_snap_pool): msg = self.output_log( MSG.INVALID_PARAMETER, param=self.driver_info['param_prefix'] + '_snap_pool') self.raise_error(msg) raise self._wait_copy_pair_status(pair['svol'], PAIR) self.client.split_snapshotgroup(snapshotgroup_name) for pair in pairs: self._wait_copy_pair_status(pair['svol'], PSUS) except Exception: with excutils.save_and_reraise_exception(): self._delete_pairs_from_storage(pairs) def _create_cgsnapshot(self, context, cgsnapshot, snapshots): pairs = [] events = [] snapshots_model_update = [] def _create_cgsnapshot_volume(snapshot): pair = {'snapshot': snapshot} try: pair['pvol'] = self.get_ldev(snapshot.volume) if pair['pvol'] is None: msg = self.output_log( MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=snapshot.volume_id) self.raise_error(msg) size = snapshot.volume_size pool_id = self.get_pool_id_of_volume(snapshot.volume) ldev_range = self.storage_info['ldev_range'] extra_specs = self.get_volume_extra_specs(snapshot.volume) qos_specs = utils.get_qos_specs_from_volume(snapshot) pair['svol'] = self.create_ldev(size, extra_specs, pool_id, ldev_range, qos_specs=qos_specs) self.modify_ldev_name(pair['svol'], snapshot.id.replace("-", "")) except Exception as exc: pair['msg'] = utils.get_exception_msg(exc) raise loopingcall.LoopingCallDone(pair) try: for snapshot in snapshots: ldev = self.get_ldev(snapshot.volume) if ldev is None: msg = self.output_log( MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=snapshot.volume_id) self.raise_error(msg) for snapshot in snapshots: loop = loopingcall.FixedIntervalLoopingCall( _create_cgsnapshot_volume, snapshot) event = loop.start(interval=0) events.append(event) is_success = True for e in events: pair = e.wait() if 'msg' in pair: is_success = False msg = pair['msg'] pairs.append(pair) if not is_success: self.raise_error(msg) self._create_ctg_snap_pair(pairs) except Exception: for pair in pairs: if 'svol' in pair and pair['svol'] is not None: try: self.delete_ldev(pair['svol']) except exception.VolumeDriverException: self.output_log( MSG.DELETE_LDEV_FAILED, ldev=pair['svol']) model_update = {'status': fields.GroupSnapshotStatus.ERROR} for snapshot in snapshots: snapshot_model_update = {'id': snapshot.id, 'status': fields.SnapshotStatus.ERROR} snapshots_model_update.append(snapshot_model_update) return model_update, snapshots_model_update for pair in pairs: snapshot_model_update = { 'id': pair['snapshot'].id, 'status': fields.SnapshotStatus.AVAILABLE, 'provider_location': str(pair['svol'])} snapshots_model_update.append(snapshot_model_update) return None, snapshots_model_update def create_group_snapshot(self, context, group_snapshot, snapshots): if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self._create_cgsnapshot(context, group_snapshot, snapshots) else: return self._create_non_cgsnapshot(group_snapshot, snapshots) def _init_pair_targets(self, targets_info): self._pair_targets = [] for port in targets_info.keys(): if not targets_info[port]: continue params = {'portId': port} host_grp_list = self.client.get_host_grps(params) gid = None for host_grp_data in host_grp_list: if host_grp_data['hostGroupName'] == self._PAIR_TARGET_NAME: gid = host_grp_data['hostGroupNumber'] break if not gid: try: connector = { 'ip': self._PAIR_TARGET_NAME_BODY, 'wwpns': [self._PAIR_TARGET_NAME_BODY], } target_name, gid = self.create_target_to_storage( port, connector, None) LOG.debug( 'Created host group for pair operation. ' '(port: %(port)s, gid: %(gid)s)', {'port': port, 'gid': gid}) except exception.VolumeDriverException: self.output_log(MSG.CREATE_HOST_GROUP_FAILED, port=port) continue self._pair_targets.append((port, gid)) if not self._pair_targets: msg = self.output_log(MSG.PAIR_TARGET_FAILED) self.raise_error(msg) self._pair_targets.sort(reverse=True) LOG.debug('Setting pair_targets: %s', self._pair_targets) def init_cinder_hosts(self, **kwargs): targets = { 'info': {}, 'list': [], 'iqns': {}, 'target_map': {}, } super(HBSDREST, self).init_cinder_hosts(targets=targets) if self.storage_info['pair_ports']: targets['info'] = {} ports = self._get_pair_ports() for port in ports: targets['info'][port] = True if hasattr( self.conf, self.driver_info['param_prefix'] + '_rest_pair_target_ports'): self._init_pair_targets(targets['info']) def initialize_pair_connection(self, ldev): port, gid = None, None for port, gid in self._pair_targets: try: targets = { 'info': {}, 'list': [(port, gid)], 'lun': {}, } return self.map_ldev(targets, ldev) except exception.VolumeDriverException: self.output_log( MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, id=gid, lun=None) msg = self.output_log(MSG.MAP_PAIR_TARGET_FAILED, ldev=ldev) self.raise_error(msg) def migrate_volume(self, volume, host, new_type=None): """Migrate the specified volume.""" attachments = volume.volume_attachment if attachments: return False, None pvol = self.get_ldev(volume) if pvol is None: msg = self.output_log( MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=volume.id) self.raise_error(msg) pair_info = self.get_pair_info(pvol) if pair_info: if pair_info['pvol'] == pvol: svols = [] copy_methods = [] svol_statuses = [] for svol_info in pair_info['svol_info']: svols.append(str(svol_info['ldev'])) copy_methods.append(utils.THIN) svol_statuses.append(svol_info['status']) if svols: pair_info = ['(%s, %s, %s, %s)' % (pvol, svol, copy_method, status) for svol, copy_method, status in zip(svols, copy_methods, svol_statuses)] msg = self.output_log( MSG.MIGRATE_VOLUME_FAILED, volume=volume.id, ldev=pvol, pair_info=', '.join(pair_info)) self.raise_error(msg) else: svol_info = pair_info['svol_info'][0] if svol_info['is_psus'] and svol_info['status'] != 'PSUP': return False, None else: pair_info = '(%s, %s, %s, %s)' % ( pair_info['pvol'], svol_info['ldev'], utils.THIN, svol_info['status']) msg = self.output_log( MSG.MIGRATE_VOLUME_FAILED, volume=volume.id, ldev=svol_info['ldev'], pair_info=pair_info) self.raise_error(msg) old_storage_id = self.conf.hitachi_storage_id new_storage_id = ( host['capabilities']['location_info'].get('storage_id')) if new_type is None: old_pool_id = self.get_ldev_info(['poolId'], pvol)['poolId'] new_pool_id = host['capabilities']['location_info'].get('pool_id') if old_storage_id != new_storage_id: return False, None ldev_range = host['capabilities']['location_info'].get('ldev_range') if (new_type or old_pool_id != new_pool_id or (ldev_range and (pvol < ldev_range[0] or ldev_range[1] < pvol))): extra_specs = self.get_volume_extra_specs(volume) if new_type: qos_specs = utils.get_qos_specs_from_volume_type(new_type) else: qos_specs = utils.get_qos_specs_from_volume(volume) snap_pool_id = host['capabilities']['location_info'].get( 'snap_pool_id') ldev_range = host['capabilities']['location_info'].get( 'ldev_range') svol = self.copy_on_storage( pvol, volume.size, extra_specs, new_pool_id, snap_pool_id, ldev_range, is_snapshot=False, sync=True, qos_specs=qos_specs) self.modify_ldev_name(svol, volume['id'].replace("-", "")) try: self.delete_ldev(pvol) except exception.VolumeDriverException: self.output_log(MSG.DELETE_LDEV_FAILED, ldev=pvol) return True, { 'provider_location': str(svol), } return True, None def _is_modifiable_dr_value(self, dr_mode, dr_status, new_dr_mode, volume): if (dr_status == 'REHYDRATING' and new_dr_mode == 'compression_deduplication'): self.output_log(MSG.VOLUME_IS_BEING_REHYDRATED, volume_id=volume['id'], volume_type=volume['volume_type']['name']) return False elif dr_status == 'FAILED': self.output_log(MSG.INCONSISTENCY_DEDUPLICATION_SYSTEM_VOLUME, volume_id=volume['id'], volume_type=volume['volume_type']['name']) return False elif new_dr_mode == 'disabled': return dr_status in _DISABLE_ABLE_DR_STATUS.get(dr_mode, ()) elif new_dr_mode == 'compression_deduplication': return dr_status in _DEDUPCOMP_ABLE_DR_STATUS.get(dr_mode, ()) return False def _modify_capacity_saving(self, ldev, capacity_saving): body = {'dataReductionMode': capacity_saving} self.client.modify_ldev( ldev, body, timeout_message=( MSG.NOT_COMPLETED_CHANGE_VOLUME_TYPE, {'ldev': ldev})) def retype(self, ctxt, volume, new_type, diff, host): """Retype the specified volume.""" diff_items = [] def _check_specs_diff(diff, allowed_extra_specs): for specs_key, specs_val in diff.items(): if specs_key == 'qos_specs': diff_items.append(specs_key) continue for diff_key, diff_val in specs_val.items(): if (specs_key == 'extra_specs' and diff_key in allowed_extra_specs): diff_items.append(diff_key) continue if diff_val[0] != diff_val[1]: return False return True extra_specs_capacity_saving = None new_capacity_saving = None allowed_extra_specs = [] if self.driver_info.get('driver_dir_name'): extra_specs_capacity_saving = ( self.driver_info['driver_dir_name'] + ':capacity_saving') new_capacity_saving = ( new_type['extra_specs'].get(extra_specs_capacity_saving)) allowed_extra_specs.append(extra_specs_capacity_saving) new_dr_mode = _CAPACITY_SAVING_DR_MODE.get(new_capacity_saving) if not new_dr_mode: msg = self.output_log( MSG.FAILED_CHANGE_VOLUME_TYPE, key=extra_specs_capacity_saving, value=new_capacity_saving) self.raise_error(msg) ldev = self.get_ldev(volume) if ldev is None: msg = self.output_log( MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=volume['id']) self.raise_error(msg) ldev_info = self.get_ldev_info( ['dataReductionMode', 'dataReductionStatus', 'poolId'], ldev) old_pool_id = ldev_info['poolId'] new_pool_id = host['capabilities']['location_info'].get('pool_id') if (not _check_specs_diff(diff, allowed_extra_specs) or new_pool_id != old_pool_id): snaps = SnapshotList.get_all_for_volume(ctxt, volume.id) if not snaps: return self.migrate_volume(volume, host, new_type) return False if (extra_specs_capacity_saving and extra_specs_capacity_saving in diff_items): ldev_info = self.get_ldev_info( ['dataReductionMode', 'dataReductionStatus'], ldev) if not self._is_modifiable_dr_value( ldev_info['dataReductionMode'], ldev_info['dataReductionStatus'], new_dr_mode, volume): return False self._modify_capacity_saving(ldev, new_dr_mode) if 'qos_specs' in diff_items: old_qos_specs = self.get_qos_specs_from_ldev(ldev) new_qos_specs = utils.get_qos_specs_from_volume_type(new_type) if old_qos_specs != new_qos_specs: self.change_qos_specs(ldev, old_qos_specs, new_qos_specs) return True def wait_copy_completion(self, pvol, svol): """Wait until copy is completed.""" self._wait_copy_pair_status(svol, set([SMPL, PSUE])) status = self._get_copy_pair_status(svol) if status == PSUE: msg = self.output_log(MSG.VOLUME_COPY_FAILED, pvol=pvol, svol=svol) self.raise_error(msg) def create_target_name(self, connector): if ('ip' in connector and connector['ip'] == self._PAIR_TARGET_NAME_BODY): return self._PAIR_TARGET_NAME wwn = (min(self.get_hba_ids_from_connector(connector)) if self.format_info['group_name_var_cnt'][ common.GROUP_NAME_VAR_WWN] else '') ip = (connector['ip'] if self.format_info[ 'group_name_var_cnt'][common.GROUP_NAME_VAR_IP] else '') if not self.format_info['group_name_var_cnt'][ common.GROUP_NAME_VAR_HOST]: return self.format_info['group_name_format'].format(wwn=wwn, ip=ip) host = connector['host'] if 'host' in connector else '' max_host_len = (self.group_name_format['group_name_max_len'] - self.format_info['group_name_format_without_var_len'] - len(wwn) - len(ip)) host = _GROUP_NAME_PROHIBITED_CHAR_PATTERN.sub( '_', host[:max_host_len]) return self.format_info['group_name_format'].format( host=host, wwn=wwn, ip=ip) def change_qos_specs(self, ldev, old_qos_specs, new_qos_specs): delete_specs = {key: 0 for key in old_qos_specs if key in utils.QOS_KEYS} if delete_specs: self.client.set_qos_specs(ldev, delete_specs) if new_qos_specs: self.client.set_qos_specs(ldev, new_qos_specs) def get_qos_specs_from_ldev(self, ldev): params = {'detailInfoType': 'qos', 'headLdevId': ldev, 'count': 1} ldev_info = self.client.get_ldevs(params=params)[0] return ldev_info.get('qos', {}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hitachi/hbsd_rest_api.py0000664000175000017500000012035700000000000024121 0ustar00zuulzuul00000000000000# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ REST API client class for Hitachi HBSD Driver. """ from http import client as httpclient import socket import threading from eventlet import greenthread from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import timeutils import requests from requests.adapters import HTTPAdapter from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.volume import volume_utils _LOCK_TIMEOUT = 2 * 60 * 60 _REST_TIMEOUT = 30 _EXTEND_TIMEOUT = 10 * 60 _EXEC_RETRY_INTERVAL = 5 _DEFAULT_CONNECT_TIMEOUT = 30 _JOB_API_RESPONSE_TIMEOUT = 30 * 60 _GET_API_RESPONSE_TIMEOUT = 30 * 60 _REST_SERVER_BUSY_TIMEOUT = 2 * 60 * 60 _REST_SERVER_RESTART_TIMEOUT = 10 * 60 _REST_SERVER_ERROR_TIMEOUT = 10 * 60 _KEEP_SESSION_LOOP_INTERVAL = 3 * 60 _ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT = 10 * 60 _LOCK_RESOURCE_GROUP_TIMEOUT = 3 * 60 _TCP_KEEPIDLE = 60 _TCP_KEEPINTVL = 15 _TCP_KEEPCNT = 4 _MIRROR_RESERVED_VIRTUAL_LDEV_ID = 65535 _HTTPS = 'https://' _NOT_SPECIFIED = 'NotSpecified' _REST_LOCKED_ERRORS = [ ('2E11', '2205'), ('2E11', '2207'), ] LDEV_ALREADY_DEFINED = ('2E22', '0001') NO_AVAILABLE_LDEV_ID = ('2E11', '2209') INVALID_SNAPSHOT_POOL = ('2E30', '600E') _MSGID_REST_SERVER_BUSY = ('KART00003-E',) _MSGID_LOCK_FAILURE = ('KART40050-E', 'KART40051-E', 'KART40052-E') EXCEED_WWN_MAX = ('B957', '4184') ANOTHER_LDEV_MAPPED = ('B958', '0947') REST_NO_RETRY_ERRORS = [ ('2E10', '9705'), ('2E10', '9706'), ('2E10', '9707'), ('2E11', '8303'), ('2E30', '0007'), ('B956', '3173'), ('B956', '31D7'), ('B956', '31D9'), ('B957', '4188'), ('B958', '015A'), ('B958', '015E'), LDEV_ALREADY_DEFINED, NO_AVAILABLE_LDEV_ID, EXCEED_WWN_MAX, INVALID_SNAPSHOT_POOL, ] MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST = 'KART30013-E' _REST_NO_RETRY_MESSAGEIDS = [ MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST ] LOG = logging.getLogger(__name__) MSG = utils.HBSDMsg def _get_device_group_name(remote_client, copy_group_name, is_secondary, is_remote=False): if remote_client is None and is_remote: return _NOT_SPECIFIED return copy_group_name + ('S' if is_secondary ^ is_remote else 'P') def _build_base_url(ip_addr, ip_port): return '%(https)s%(ip)s:%(port)s/ConfigurationManager' % { 'https': _HTTPS, 'ip': ip_addr, 'port': ip_port, } class KeepAliveAdapter(HTTPAdapter): def __init__(self, conf): self.socket_options = [ (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1), (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), (socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, conf.hitachi_rest_tcp_keepidle), (socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, conf.hitachi_rest_tcp_keepintvl), (socket.IPPROTO_TCP, socket.TCP_KEEPCNT, conf.hitachi_rest_tcp_keepcnt), ] super(KeepAliveAdapter, self).__init__() def init_poolmanager(self, *args, **kwargs): kwargs['socket_options'] = self.socket_options super(KeepAliveAdapter, self).init_poolmanager(*args, **kwargs) class ResponseData(dict): def is_json(self): return (self['rsp'].content and 'json' in self['rsp'].headers['Content-Type']) def _init_content(self): """Set response object.""" if self.is_json(): self['rsp_body'] = self['rsp'].json() elif self['rsp'].content: self['rsp_body'] = self['rsp'].text else: self['rsp_body'] = None def _init_error(self): """Set error object""" if self['rsp_body'] and 'errorSource' in self['rsp_body']: self['errobj'] = self['rsp_body'] elif self['rsp_body'] and 'error' in self['rsp_body']: self['errobj'] = self['rsp_body']['error'] else: self['errobj'] = {} def __init__(self, rsp): """Initialize instance variables.""" super(ResponseData, self).__init__() self['rsp'] = rsp self['status_code'] = rsp.status_code self._init_content() self._init_error() def job_succeeded(self): return (self.is_json() and self['rsp_body'].get('status') == 'Completed' and self['rsp_body'].get('state') == 'Succeeded') def get_err_code(self): return utils.safe_get_err_code(self['errobj']) def get_return_code(self): return utils.safe_get_return_code(self['errobj']) def is_success(self, ignore_error, ignore_message_id, ignore_return_code, ignore_all_errors=False): """Check the success or failure of the response.""" return (ignore_all_errors or self['status_code'] == httpclient.OK or (self['status_code'] == httpclient.ACCEPTED and self.job_succeeded()) or self.get_err_code() in ignore_error or self['errobj'].get('messageId') in ignore_message_id or self.get_return_code() in ignore_return_code) def is_locked(self): """Check if a response is the error of the lock factor.""" if not self['errobj']: return False message_id = self['errobj'].get('messageId') retcode = self['errobj'].get('errorCode', {}).get('errorCode') return (message_id in _MSGID_LOCK_FAILURE or self.get_err_code() in _REST_LOCKED_ERRORS or retcode == 'EX_EACCES') def is_auth_fail(self): """Check if a response is an authorization error.""" return self['status_code'] == httpclient.UNAUTHORIZED def get_message_id(self): return utils.safe_get_message_id(self['errobj']) def is_no_retry_error(self, no_retry_error_code): """Check if a response is a no retry error.""" return (not self.is_auth_fail() and ((self['status_code'] not in list(range(200, 300)) + list(range(500, 600))) or self.get_err_code() in no_retry_error_code or self.get_message_id() in _REST_NO_RETRY_MESSAGEIDS)) def is_rest_server_busy(self): """Check if a response is a server busy error.""" if not self['errobj']: return False message_id = self['errobj'].get('messageId') return (message_id in _MSGID_REST_SERVER_BUSY) def get_errobj(self): return { 'errorSource': self['errobj'].get('errorSource', ''), 'messageId': self['errobj'].get('messageId', ''), 'message': self['errobj'].get('message', ''), 'cause': self['errobj'].get('cause', ''), 'solution': self['errobj'].get('solution', ''), 'errorCode': self['errobj'].get('errorCode', {}), } def get_job_result(self): return {'job_id': self['rsp_body'].get('jobId', ''), 'status': self['rsp_body'].get('status', ''), 'state': self['rsp_body'].get('state', '')} class RestApiClient(): def __init__(self, conf, ip_addr, ip_port, storage_device_id, user_id, user_pass, driver_prefix, tcp_keepalive=False, verify=False, is_rep=False): """Initialize instance variables.""" self.conf = conf self.ip_addr = ip_addr self.ip_port = ip_port self.storage_id = storage_device_id self.storage_info = {} self.user_id = user_id self.user_pass = user_pass self.tcp_keepalive = tcp_keepalive self.verify = verify self.connect_timeout = self.conf.hitachi_rest_connect_timeout self.is_rep = is_rep self.login_lock = threading.Lock() self.keep_session_loop = loopingcall.FixedIntervalLoopingCall( self._keep_session) self.nested_count = 0 self.resource_lock = threading.Lock() self.base_url = _build_base_url(ip_addr, self.ip_port) self.object_url = '%(base_url)s/v1/objects/storages/%(storage_id)s' % { 'base_url': self.base_url, 'storage_id': self.storage_id, } self.service_url = '%(base_url)s/v1/%(storage_id)s/services' % { 'base_url': self.base_url, 'storage_id': self.storage_id, } self.headers = {"content-type": "application/json", "accept": "application/json"} self.driver_prefix = driver_prefix class Session(requests.auth.AuthBase): def __init__(self, id, token): """Initialize instance variables.""" self.id = id self.token = token def __call__(self, req): req.headers['Authorization'] = 'Session %(token)s' % { 'token': self.token, } return req def _request(self, method, url, params=None, body=None, async_=False, **kwargs): """Transmit the request to REST API server.""" kwargs.setdefault('ignore_error', []) kwargs['no_retry_error'] = (kwargs['ignore_error'] + REST_NO_RETRY_ERRORS) kwargs.setdefault('no_retry', False) kwargs.setdefault('do_raise', True) kwargs.setdefault('ignore_message_id', []) kwargs.setdefault('no_relogin', False) kwargs.setdefault('ignore_return_code', []) kwargs.setdefault('ignore_all_errors', False) kwargs.setdefault('timeout_message', None) kwargs.setdefault('no_log', False) kwargs.setdefault('timeout', self.conf.hitachi_rest_timeout) headers = dict(self.headers) if async_: read_timeout = self.conf.hitachi_rest_job_api_response_timeout headers.update({ "Response-Max-Wait": str( self.conf.hitachi_rest_job_api_response_timeout), "Response-Job-Status": "Completed;"}) else: read_timeout = self.conf.hitachi_rest_get_api_response_timeout remote_auth = kwargs.get('remote_auth') if remote_auth: headers["Remote-Authorization"] = 'Session ' + remote_auth.token auth_data = kwargs.get('auth', self.get_my_session()) timeout = (self.connect_timeout, read_timeout) interval = kwargs.get( 'interval', self.conf.hitachi_exec_retry_interval) retry = True start_time = timeutils.utcnow() watch = timeutils.StopWatch() while retry: watch.restart() try: with requests.Session() as session: if self.tcp_keepalive: session.mount(_HTTPS, KeepAliveAdapter(self.conf)) rsp = session.request(method, url, params=params, json=body, headers=headers, auth=auth_data, timeout=timeout, verify=self.verify) except Exception as e: msg = self.output_log( MSG.REST_SERVER_CONNECT_FAILED, exception=type(e), message=e, method=method, url=url, params=params, body=body) message = _( '%(prefix)s error occurred. %(msg)s' % { 'prefix': self.driver_prefix, 'msg': msg, } ) raise exception.VolumeDriverException(message) response = ResponseData(rsp) if (response['status_code'] == httpclient.INTERNAL_SERVER_ERROR and kwargs['timeout'] < _REST_SERVER_RESTART_TIMEOUT): kwargs['timeout'] = _REST_SERVER_RESTART_TIMEOUT if (response['status_code'] == httpclient.SERVICE_UNAVAILABLE and kwargs['timeout'] < _REST_SERVER_ERROR_TIMEOUT): kwargs['timeout'] = _REST_SERVER_ERROR_TIMEOUT retry, rsp_data, errobj = self._check_rest_api_response( response, start_time, method=method, url=url, params=params, body=body, **kwargs) if retry: watch.stop() idle = max(interval - watch.elapsed(), 0) greenthread.sleep(idle) if not kwargs['no_relogin'] and response.is_auth_fail(): auth_data = self.get_my_session() return rsp_data, errobj def _check_rest_api_response( self, response, start_time, method=None, url=None, params=None, body=None, **kwargs): """Check the response from REST API server.""" rsp_body = response['rsp_body'] errobj = response['errobj'] if response.is_locked(): if (kwargs['no_retry'] or utils.timed_out( start_time, self.conf.hitachi_lock_timeout)): msg = self.output_log(MSG.REST_API_FAILED, no_log=kwargs['no_log'], method=method, url=url, params=params, body=body, **response.get_errobj()) if kwargs['do_raise']: message = _( '%(prefix)s error occurred. %(msg)s' % { 'prefix': self.driver_prefix, 'msg': msg, } ) raise exception.VolumeDriverException( message, errobj=errobj) return False, rsp_body, errobj else: LOG.debug("The resource group to which the operation object " "belongs is being locked by other software.") return True, rsp_body, errobj if response.is_success(kwargs['ignore_error'], kwargs['ignore_message_id'], kwargs['ignore_return_code'], kwargs['ignore_all_errors']): return False, rsp_body, errobj if (kwargs['no_retry'] and response['status_code'] != httpclient.INTERNAL_SERVER_ERROR or response.is_no_retry_error(kwargs['no_retry_error'])): retry = False elif response.is_auth_fail(): retry = self.relogin(kwargs['no_relogin']) else: retry = True if retry and response.is_rest_server_busy(): if utils.timed_out( start_time, self.conf.hitachi_rest_server_busy_timeout): retry = False elif retry and response.get_err_code() in (ANOTHER_LDEV_MAPPED, ): if utils.timed_out( start_time, self.conf.hitachi_rest_another_ldev_mapped_retry_timeout): LOG.debug( "Another LDEV is already mapped to the specified LUN.") retry = False elif retry and utils.timed_out(start_time, kwargs['timeout']): if kwargs['timeout_message']: self.output_log(kwargs['timeout_message'][0], **kwargs['timeout_message'][1]) if response.is_json(): msg = self.output_log(MSG.REST_API_TIMEOUT, no_log=kwargs['no_log'], method=method, url=url, params=params, body=body, **response.get_job_result()) if errobj: msg = self.output_log(MSG.REST_API_FAILED, no_log=kwargs['no_log'], method=method, url=url, params=params, body=body, **response.get_errobj()) else: msg = self.output_log(MSG.REST_API_HTTP_ERROR, no_log=kwargs['no_log'], status_code=response['status_code'], response_body=rsp_body, method=method, url=url, params=params, body=body) if kwargs['do_raise']: message = _( '%(prefix)s error occurred. %(msg)s' % { 'prefix': self.driver_prefix, 'msg': msg, } ) raise exception.VolumeDriverException( message, errobj=errobj) return False, rsp_body, errobj if errobj: LOG.debug('ERROR %s', errobj) else: LOG.debug('ERROR %s', ' '.join(str(rsp_body).splitlines())) if not retry: if response.is_json(): msg = self.output_log(MSG.REST_API_FAILED, no_log=kwargs['no_log'], method=method, url=url, params=params, body=body, **response.get_errobj()) else: msg = self.output_log(MSG.REST_API_HTTP_ERROR, no_log=kwargs['no_log'], status_code=response['status_code'], response_body=rsp_body, method=method, url=url, params=params, body=body) if kwargs['do_raise']: message = _( '%(prefix)s error occurred. %(msg)s' % { 'prefix': self.driver_prefix, 'msg': msg, } ) raise exception.VolumeDriverException( message, errobj=errobj) return retry, rsp_body, errobj def lock_resource_group(self, waittime=_LOCK_RESOURCE_GROUP_TIMEOUT): """Lock resources. Lock resources of a resource group allocated to the user who executes API requests, preventing other users from performing operations on the resources. """ with self.resource_lock: if self.nested_count <= 0: url = '%(url)s/resource-group-service/actions/%(action)s' % { 'url': self.service_url, 'action': 'lock', } + '/invoke' if waittime: body = {"parameters": {"waitTime": waittime}} self._invoke(url, body=body, timeout=waittime) else: self._invoke(url) self.nested_count += 1 def unlock_resource_group(self): """If the lock is already released, there is no need to unlock.""" with self.resource_lock: if self.nested_count == 0: return self.nested_count -= 1 if self.nested_count <= 0: url = '%(url)s/resource-group-service/actions/%(action)s' % { 'url': self.service_url, 'action': 'unlock', } + '/invoke' self._invoke(url) def set_my_session(self, session): self.session = session def get_my_session(self): return getattr(self, 'session', None) @volume_utils.trace def _login(self, do_raise=True): """Establishes a session and manages the session.""" url = '%(url)s/sessions' % { 'url': self.object_url, } auth = (self.user_id, self.user_pass) rsp, err = self._request("POST", url, auth=auth, no_relogin=True, do_raise=do_raise, timeout=self.conf.hitachi_lock_timeout) if not err: self.set_my_session(self.Session(rsp["sessionId"], rsp["token"])) self.nested_count = 0 return True else: return False def login(self): """Establishes a session and manages the session.""" LOG.debug("Trying to login.") return self._login() def get_session(self, session_id, **kwargs): """Get a session information.""" url = '%(url)s/sessions/%(id)s' % { 'url': self.object_url, 'id': session_id, } return self._get_object(url, **kwargs) def _has_session(self): """Check if there is a session managing.""" has_session = False try: session = self.get_my_session() if session is not None: self.get_session(session.id, no_retry=True, no_log=True) has_session = True except exception.VolumeDriverException as ex: LOG.debug('Failed to get session info: %s', ex) return has_session def relogin(self, no_relogin, no_log=False): """Establishes a session again.""" retry = False if not no_relogin: with self.login_lock: retry = self._has_session() if not retry: LOG.debug("Trying to re-login.") self.nested_count = 0 retry = self._login(do_raise=False) if not retry: self.output_log( MSG.REST_LOGIN_FAILED, no_log=no_log, user=self.user_id) return retry def _keep_session(self): """Keep a session.""" LOG.debug('_keep_session thread is started') try: self.relogin(False, no_log=True) except Exception as ex: LOG.debug( 'relogin() in _keep_session() failed. %s', ex) def enter_keep_session(self): """Begin the keeping of a session.""" self.keep_session_loop.start( self.conf.hitachi_rest_keep_session_loop_interval) LOG.debug('enter_keep_session') @volume_utils.trace def _get_object(self, url, params=None, **kwargs): """Transmit a GET request that appointed object ID.""" rsp = self._request("GET", url, params=params, **kwargs)[0] return rsp if rsp else None @volume_utils.trace def _get_objects(self, url, params=None, **kwargs): """Transmit a GET request.""" rsp = self._request("GET", url, params=params, **kwargs)[0] return rsp.get("data") if rsp else None @volume_utils.trace def _add_object(self, url, body, **kwargs): """Transmit a POST request.""" rsp, errobj = self._request( "POST", url, body=body, async_=True, **kwargs) if not rsp: return None, errobj resources = rsp.get('affectedResources') if resources: return resources[0].split('/')[-1], errobj return None, errobj @volume_utils.trace def _delete_object(self, url, params=None, body=None, **kwargs): """Transmit a DELETE request.""" self._request("DELETE", url, params=params, body=body, async_=True, **kwargs) @volume_utils.trace def _invoke(self, url, body=None, **kwargs): """Transmit a PUT request.""" self._request("PUT", url, body=body, async_=True, **kwargs) def get_pools(self, params=None): """Get a list of pool information.""" url = '%(url)s/pools' % { 'url': self.object_url, } return self._get_objects(url, params=params) def get_pool(self, pool_id, **kwargs): """Get a pool information.""" url = '%(url)s/pools/%(id)s' % { 'url': self.object_url, 'id': pool_id, } return self._get_object(url, **kwargs) def get_ldev(self, ldev_id, **kwargs): """Get a ldev information.""" url = '%(url)s/ldevs/%(id)s' % { 'url': self.object_url, 'id': ldev_id, } return self._get_object(url, **kwargs) def get_ldevs(self, params=None, **kwargs): """Get a list of ldev information.""" url = '%(url)s/ldevs' % { 'url': self.object_url, } return self._get_objects(url, params=params, **kwargs) def add_ldev(self, body, **kwargs): """Add a ldev information.""" url = '%(url)s/ldevs' % { 'url': self.object_url, } ldev_id = self._add_object(url, body=body, **kwargs)[0] return int(ldev_id) if ldev_id else None def delete_ldev(self, ldev_id, body=None, **kwargs): """Delete a ldev information.""" url = '%(url)s/ldevs/%(id)s' % { 'url': self.object_url, 'id': ldev_id, } self._delete_object(url, body=body, **kwargs) def modify_ldev(self, ldev_id, body, **kwargs): """Modify a ldev information.""" url = '%(url)s/ldevs/%(id)s' % { 'url': self.object_url, 'id': ldev_id, } self._invoke(url, body=body, **kwargs) def extend_ldev(self, ldev_id, body): """Expand a ldev size.""" url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % { 'url': self.object_url, 'id': ldev_id, 'action': 'expand', } self._invoke(url, body=body, timeout=self.conf.hitachi_extend_timeout) def get_ports(self, params=None): """Get a list of port information.""" url = '%(url)s/ports' % { 'url': self.object_url, } return self._get_objects(url, params=params) def get_port(self, port_id): """Get a port information.""" url = '%(url)s/ports/%(id)s' % { 'url': self.object_url, 'id': port_id, } return self._get_object(url) def get_host_grps(self, params=None): """Get a list of host group information.""" url = '%(url)s/host-groups' % { 'url': self.object_url, } return self._get_objects(url, params=params) def get_host_grp(self, port_id, host_group_number): """Get a host group information.""" url = '%(url)s/host-groups/%(port)s,%(number)d' % { 'url': self.object_url, 'port': port_id, 'number': host_group_number, } return self._get_object(url) def add_host_grp(self, body, **kwargs): """Add a host group information.""" url = '%(url)s/host-groups' % { 'url': self.object_url, } host_group_id = self._add_object(url, body=body, **kwargs)[0] return int(host_group_id.split(',')[-1]) if host_group_id else None def delete_host_grp(self, port_id, host_group_number): """Delete a host group information.""" url = '%(url)s/host-groups/%(port)s,%(number)d' % { 'url': self.object_url, 'port': port_id, 'number': host_group_number, } self._delete_object(url) def modify_host_grp(self, port_id, host_group_number, body, **kwargs): """Modify a host group information.""" url = '%(url)s/host-groups/%(port)s,%(number)d' % { 'url': self.object_url, 'port': port_id, 'number': host_group_number, } self._invoke(url, body=body, **kwargs) def get_hba_wwns(self, port_id, host_group_number): """Get a list of wwn information.""" url = '%(url)s/host-wwns' % { 'url': self.object_url, } params = {"portId": port_id, "hostGroupNumber": host_group_number} return self._get_objects(url, params=params) def get_hba_wwns_by_name(self, port_id, host_group_name): """Get a list of wwn information of the specified name.""" url = '%(url)s/host-wwns' % { 'url': self.object_url, } params = {"portId": port_id, "hostGroupName": host_group_name} return self._get_objects(url, params=params) def add_hba_wwn(self, port_id, host_group_number, host_wwn, **kwargs): """Add a wwn information.""" url = '%(url)s/host-wwns' % { 'url': self.object_url, } body = {"hostWwn": host_wwn, "portId": port_id, "hostGroupNumber": host_group_number} return self._add_object(url, body=body, **kwargs)[0] def get_hba_iscsis(self, port_id, host_group_number): """Get a list of ISCSI information.""" url = '%(url)s/host-iscsis' % { 'url': self.object_url, } params = {"portId": port_id, "hostGroupNumber": host_group_number} return self._get_objects(url, params=params) def get_hba_iscsis_by_name(self, port_id, host_group_name): """Get a list of ISCSI information of the specified name.""" url = '%(url)s/host-iscsis' % { 'url': self.object_url, } params = {"portId": port_id, "hostGroupName": host_group_name} return self._get_objects(url, params=params) def add_hba_iscsi(self, port_id, host_group_number, iscsi_name): """Add a ISCSI information.""" url = '%(url)s/host-iscsis' % { 'url': self.object_url, } body = {"iscsiName": iscsi_name, "portId": port_id, "hostGroupNumber": host_group_number} return self._add_object(url, body=body)[0] def get_luns(self, port_id, host_group_number, is_basic_lun_information=False): """Get a list of lun information.""" url = '%(url)s/luns' % { 'url': self.object_url, } params = {"portId": port_id, "hostGroupNumber": host_group_number, "isBasicLunInformation": is_basic_lun_information} return self._get_objects(url, params=params) def add_lun(self, port_id, host_group_number, ldev_id, lun=None, **kwargs): """Add a lun information.""" url = '%(url)s/luns' % { 'url': self.object_url, } body = {"portId": port_id, "hostGroupNumber": host_group_number, "ldevId": ldev_id} if lun is not None: body['lun'] = lun lun_id, errobj = self._add_object(url, body=body, **kwargs) return int(lun_id.split(',')[-1]) if lun_id else None, errobj def delete_lun(self, port_id, host_group_number, lun, **kwargs): """Delete a lun information.""" url = '%(url)s/luns/%(port)s,%(number)s,%(lun)d' % { 'url': self.object_url, 'port': port_id, 'number': host_group_number, 'lun': lun, } self._delete_object(url, **kwargs) def get_snapshots(self, params=None): """Get a list of snapshot information.""" url = '%(url)s/snapshots' % { 'url': self.object_url, } return self._get_objects(url, params=params) def add_snapshot(self, body, **kwargs): """Add a snapshot information.""" url = '%(url)s/snapshots' % { 'url': self.object_url, } return self._add_object(url, body=body, **kwargs)[0] def delete_snapshot(self, pvol_ldev_id, mu_number, **kwargs): """Delete a snapshot information.""" url = '%(url)s/snapshots/%(pvol)d,%(mu)d' % { 'url': self.object_url, 'pvol': pvol_ldev_id, 'mu': mu_number, } self._delete_object(url, **kwargs) def unassign_snapshot_volume(self, pvol_ldev_id, mu_number, **kwargs): """Unassign a snapshot information.""" url = '%(url)s/snapshots/%(pvol)d,%(mu)d/actions/%(action)s/invoke' % { 'url': self.object_url, 'pvol': pvol_ldev_id, 'mu': mu_number, 'action': 'unassign-volume', } self._invoke(url, **kwargs) def restore_snapshot(self, pvol_ldev_id, mu_number, body=None): """Restore a snapshot information.""" url = '%(url)s/snapshots/%(pvol)d,%(mu)d/actions/%(action)s/invoke' % { 'url': self.object_url, 'pvol': pvol_ldev_id, 'mu': mu_number, 'action': 'restore', } self._invoke(url, body=body) def split_snapshotgroup(self, snapshot_group_id): url = '%(url)s/snapshot-groups/%(id)s/actions/%(action)s/invoke' % { 'url': self.object_url, 'id': snapshot_group_id, 'action': 'split', } self._invoke(url) def discard_zero_page(self, ldev_id): """Return the ldev's no-data pages to the storage pool.""" url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % { 'url': self.object_url, 'id': ldev_id, 'action': 'discard-zero-page', } self._invoke(url) def get_remote_copy_grps(self, remote_client): url = '%(url)s/remote-mirror-copygroups' % { 'url': self.object_url, } params = {"remoteStorageDeviceId": remote_client.storage_id} with RemoteSession(remote_client) as session: return self._get_objects(url, params=params, remote_auth=session) def get_remote_copy_grp(self, remote_client, copy_group_name, **kwargs): url = '%(url)s/remote-mirror-copygroups/%(id)s' % { 'url': self.object_url, 'id': self._remote_copygroup_id(remote_client, copy_group_name), } with RemoteSession(remote_client) as session: return self._get_object(url, remote_auth=session, **kwargs) def get_remote_copypair(self, remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id, is_secondary=False, **kwargs): url = '%(url)s/remote-mirror-copypairs/%(id)s' % { 'url': self.object_url, 'id': self._remote_copypair_id( remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id, is_secondary), } if remote_client: with RemoteSession(remote_client) as session: return self._get_object(url, remote_auth=session, **kwargs) return self._get_object(url, **kwargs) def add_remote_copypair(self, remote_client, body): url = '%(url)s/remote-mirror-copypairs' % { 'url': self.object_url, } if self.storage_id > remote_client.storage_id: client1, client2 = self, remote_client else: client1, client2 = remote_client, self with ResourceGroupLock(client1): with ResourceGroupLock(client2): session = remote_client.get_my_session() return self._add_object(url, body=body, no_relogin=True, remote_auth=session, job_nowait=True)[0] @utils.synchronized_on_copy_group() def split_remote_copypair(self, remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id, rep_type): body = {"parameters": {"replicationType": rep_type}} url = '%(url)s/remote-mirror-copypairs/%(id)s/actions/%(action)s' % { 'url': self.object_url, 'id': self._remote_copypair_id(remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id), 'action': 'split', } + '/invoke' with RemoteSession(remote_client) as session: self._invoke(url, body=body, remote_auth=session, job_nowait=True) @utils.synchronized_on_copy_group() def resync_remote_copypair( self, remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id, rep_type, copy_speed=None): body = {"parameters": {"replicationType": rep_type}} if copy_speed: body["parameters"]["copyPace"] = copy_speed url = '%(url)s/remote-mirror-copypairs/%(id)s/actions/%(action)s' % { 'url': self.object_url, 'id': self._remote_copypair_id(remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id), 'action': 'resync', } + '/invoke' with RemoteSession(remote_client) as session: self._invoke(url, body=body, remote_auth=session, job_nowait=True) @utils.synchronized_on_copy_group() def delete_remote_copypair(self, remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id): url = '%(url)s/remote-mirror-copypairs/%(id)s' % { 'url': self.object_url, 'id': self._remote_copypair_id( remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id), } if self.storage_id > remote_client.storage_id: client1, client2 = self, remote_client else: client1, client2 = remote_client, self with ResourceGroupLock(client1): with ResourceGroupLock(client2): session = remote_client.get_my_session() self._delete_object( url, no_relogin=True, remote_auth=session) def _remote_copygroup_id(self, remote_client, copy_group_name, is_secondary=False): storage_id = (remote_client.storage_id if remote_client else _NOT_SPECIFIED) return "%s,%s,%s,%s" % ( storage_id, copy_group_name, _get_device_group_name(remote_client, copy_group_name, is_secondary), _get_device_group_name(remote_client, copy_group_name, is_secondary, is_remote=True)) def _remote_copypair_id(self, remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id, is_secondary=False): return "%s,HBSD-LDEV-%d-%d" % ( self._remote_copygroup_id(remote_client, copy_group_name, is_secondary), pvol_ldev_id, svol_ldev_id) def assign_virtual_ldevid( self, ldev_id, virtual_ldev_id=_MIRROR_RESERVED_VIRTUAL_LDEV_ID): url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % { 'url': self.object_url, 'id': ldev_id, 'action': 'assign-virtual-ldevid', } body = {"parameters": {"virtualLdevId": virtual_ldev_id}} ignore_error = [('2E21', '9305'), ('2E30', '0088')] self._invoke(url, body=body, ignore_error=ignore_error) def unassign_virtual_ldevid( self, ldev_id, virtual_ldev_id=_MIRROR_RESERVED_VIRTUAL_LDEV_ID): url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % { 'url': self.object_url, 'id': ldev_id, 'action': 'unassign-virtual-ldevid', } body = {"parameters": {"virtualLdevId": virtual_ldev_id}} self._invoke(url, body=body) def set_qos_specs(self, ldev_id, qos_specs): url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % { 'url': self.object_url, 'id': ldev_id, 'action': 'set-qos', } for (key, value) in qos_specs.items(): body = {'parameters': {key: value}} self._invoke(url, body=body) def output_log(self, msg_enum, **kwargs): if self.is_rep: return utils.output_log( msg_enum, storage_id=self.storage_id, **kwargs) else: return utils.output_log(msg_enum, **kwargs) class RemoteSession(object): def __init__(self, remote_client): self.remote_client = remote_client def __enter__(self): return self.remote_client.get_my_session() def __exit__(self, exc_type, exc_value, traceback): pass class ResourceGroupLock(object): def __init__(self, client): self.client = client def __enter__(self): self.client.lock_resource_group() return self def __exit__(self, exc_type, exc_value, traceback): self.client.unlock_resource_group() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hitachi/hbsd_rest_fc.py0000664000175000017500000004602700000000000023741 0ustar00zuulzuul00000000000000# Copyright (C) 2020, 2023, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """REST interface fibre channel module for Hitachi HBSD Driver.""" from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.volume import configuration from cinder.volume.drivers.hitachi import hbsd_rest as rest from cinder.volume.drivers.hitachi import hbsd_rest_api as rest_api from cinder.volume.drivers.hitachi import hbsd_utils as utils from cinder.zonemanager import utils as fczm_utils FC_VOLUME_OPTS = [ cfg.BoolOpt( 'hitachi_zoning_request', default=False, help='If True, the driver will configure FC zoning between the server ' 'and the storage system provided that FC zoning manager is ' 'enabled.'), ] _FC_HMO_DISABLE_IO = 91 _MSG_EXCEED_HOST_GROUP_MAX = "could not find empty Host group ID for adding." LOG = logging.getLogger(__name__) MSG = utils.HBSDMsg CONF = cfg.CONF CONF.register_opts(FC_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) class HBSDRESTFC(rest.HBSDREST): """REST interface fibre channel class for Hitachi HBSD Driver.""" def __init__(self, conf, storage_protocol, db): """Initialize instance variables.""" super(HBSDRESTFC, self).__init__(conf, storage_protocol, db) self._lookup_service = fczm_utils.create_lookup_service() def connect_storage(self): """Prepare for using the storage.""" target_ports = self.conf.hitachi_target_ports compute_target_ports = self.conf.hitachi_compute_target_ports if hasattr( self.conf, self.driver_info['param_prefix'] + '_rest_pair_target_ports'): pair_target_ports = self.conf.hitachi_rest_pair_target_ports else: pair_target_ports = [] available_ports = [] available_compute_ports = [] super(HBSDRESTFC, self).connect_storage() # The port attributes must contain TAR. params = {'portAttributes': 'TAR'} port_list = self.client.get_ports(params=params) for port in set(target_ports + compute_target_ports + pair_target_ports): if port not in [port_data['portId'] for port_data in port_list]: self.output_log(MSG.INVALID_PORT, port=port, additional_info='portAttributes: not TAR') for port_data in port_list: port = port_data['portId'] if port not in set(target_ports + compute_target_ports + pair_target_ports): continue secure_fc_port = True can_port_schedule = True if hasattr( self.conf, self.driver_info['param_prefix'] + '_port_scheduler'): port_scheduler_param = self.conf.hitachi_port_scheduler else: port_scheduler_param = False if (port_data['portType'] not in ['FIBRE', 'FCoE'] or not port_data['lunSecuritySetting']): secure_fc_port = False elif (port in set(target_ports + compute_target_ports) and port_scheduler_param and not ( port_data.get('fabricMode') and port_data.get('portConnection') == 'PtoP')): can_port_schedule = False if not secure_fc_port or not can_port_schedule: self.output_log( MSG.INVALID_PORT, port=port, additional_info='portType: %s, lunSecuritySetting: %s, ' 'fabricMode: %s, portConnection: %s' % (port_data['portType'], port_data.get('lunSecuritySetting'), port_data.get('fabricMode'), port_data.get('portConnection'))) if not secure_fc_port: continue wwn = port_data.get('wwn') if target_ports and port in target_ports and can_port_schedule: available_ports.append(port) self.storage_info['wwns'][port] = wwn if (compute_target_ports and port in compute_target_ports and can_port_schedule): available_compute_ports.append(port) self.storage_info['wwns'][port] = wwn if pair_target_ports and port in pair_target_ports: self.storage_info['pair_ports'].append(port) if target_ports: for port in target_ports: if port in available_ports: self.storage_info['controller_ports'].append(port) if compute_target_ports: for port in compute_target_ports: if port in available_compute_ports: self.storage_info['compute_ports'].append(port) self.check_ports_info() if pair_target_ports and not self.storage_info['pair_ports']: msg = self.output_log( MSG.RESOURCE_NOT_FOUND, resource="Pair target ports") self.raise_error(msg) self.output_log(MSG.SET_CONFIG_VALUE, object='pair_target_ports', value=self.storage_info['pair_ports']) self.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list', value=self.storage_info['wwns']) def check_param(self): """Check parameter values and consistency among them.""" super(HBSDRESTFC, self).check_param() self.check_opts(self.conf, FC_VOLUME_OPTS) def create_target_to_storage(self, port, connector, hba_ids): """Create a host group on the specified port.""" target_name = self.create_target_name(connector) try: body = {'portId': port, 'hostGroupName': target_name} gid = self.client.add_host_grp(body, no_log=True) except Exception: params = {'portId': port} host_grp_list = self.client.get_host_grps(params) for host_grp_data in host_grp_list: if host_grp_data['hostGroupName'] == target_name: return target_name, host_grp_data['hostGroupNumber'] raise return target_name, gid def set_hba_ids(self, port, gid, hba_ids): """Connect all specified HBAs with the specified port.""" registered_wwns = [] for wwn in hba_ids: try: self.client.add_hba_wwn(port, gid, wwn, no_log=True) registered_wwns.append(wwn) except exception.VolumeDriverException as ex: self.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid, wwn=wwn) if (self.get_port_scheduler_param() and utils.safe_get_err_code(ex.kwargs.get('errobj')) == rest_api.EXCEED_WWN_MAX): raise ex if not registered_wwns: msg = self.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port, gid=gid) self.raise_error(msg) def set_target_mode(self, port, gid): """Configure the host group to meet the environment.""" body = {'hostMode': 'LINUX/IRIX'} if self.conf.hitachi_rest_disable_io_wait: body['hostModeOptions'] = [_FC_HMO_DISABLE_IO] if self.conf.hitachi_host_mode_options: if 'hostModeOptions' not in body: body['hostModeOptions'] = [] for opt in self.conf.hitachi_host_mode_options: if int(opt) not in body['hostModeOptions']: body['hostModeOptions'].append(int(opt)) self.client.modify_host_grp(port, gid, body, ignore_all_errors=True) def _get_hwwns_in_hostgroup(self, port, gid, wwpns): """Return WWN registered with the host group.""" hwwns_in_hostgroup = [] for hba_wwn in self.client.get_hba_wwns(port, gid): hwwn = hba_wwn['hostWwn'] if hwwn in wwpns: hwwns_in_hostgroup.append(hwwn) return hwwns_in_hostgroup def _set_target_info(self, targets, host_grps, wwpns): """Set the information of the host group having the specified WWN.""" for host_grp in host_grps: port = host_grp['portId'] gid = host_grp['hostGroupNumber'] hwwns_in_hostgroup = self._get_hwwns_in_hostgroup(port, gid, wwpns) if hwwns_in_hostgroup: targets['info'][port] = True targets['list'].append((port, gid)) LOG.debug( 'Found wwpns in host group. (port: %(port)s, ' 'gid: %(gid)s, wwpns: %(wwpns)s)', {'port': port, 'gid': gid, 'wwpns': hwwns_in_hostgroup}) return True return False def _get_hwwns_in_hostgroup_by_name(self, port, host_group_name, wwpns): """Return WWN registered with the host group of the specified name.""" hba_wwns = self.client.get_hba_wwns_by_name(port, host_group_name) return [hba_wwn for hba_wwn in hba_wwns if hba_wwn['hostWwn'] in wwpns] def _set_target_info_by_names(self, targets, port, target_names, wwpns): """Set the information of the host group having the specified name and the specified WWN. """ for target_name in target_names: hwwns_in_hostgroup = self._get_hwwns_in_hostgroup_by_name( port, target_name, wwpns) if hwwns_in_hostgroup: gid = hwwns_in_hostgroup[0]['hostGroupNumber'] targets['info'][port] = True targets['list'].append((port, gid)) LOG.debug( 'Found wwpns in host group. (port: %(port)s, ' 'gid: %(gid)s, wwpns: %(wwpns)s)', {'port': port, 'gid': gid, 'wwpns': [hwwn['hostWwn'] for hwwn in hwwns_in_hostgroup]}) return True return False def find_targets_from_storage( self, targets, connector, target_ports): """Find mapped ports, memorize them and return unmapped port count.""" wwpns = self.get_hba_ids_from_connector(connector) target_names = [self.create_target_name(connector)] if 'ip' in connector: target_names.append( '%(prefix)s-%(ip)s' % { 'prefix': self.driver_info['driver_prefix'], 'ip': connector['ip'], } ) not_found_count = 0 for port in target_ports: targets['info'][port] = False if self._set_target_info_by_names( targets, port, target_names, wwpns): continue host_grps = self.client.get_host_grps({'portId': port}) if self._set_target_info( targets, [hg for hg in host_grps if hg['hostGroupName'] not in target_names], wwpns): pass else: not_found_count += 1 if self.get_port_scheduler_param(): """ When port scheduler feature is enabled, it is OK to find any mapped port. so: - return 0, if any mapped port is found - return port count, if no mapped port is found. It is no case with both not_found_count and len(target_ports) are zero, bcz it must be failed in param checker if any target ports are not defined. """ return (not_found_count if not_found_count == len(target_ports) else 0) return not_found_count def initialize_connection( self, volume, connector, is_snapshot=False, lun=None, is_mirror=False): """Initialize connection between the server and the volume.""" conn_info, map_info = super(HBSDRESTFC, self).initialize_connection( volume, connector, is_snapshot, lun) if self.conf.hitachi_zoning_request: if (self.get_port_scheduler_param() and not self.is_controller(connector)): init_targ_map = map_info else: init_targ_map = utils.build_initiator_target_map( connector, conn_info['data']['target_wwn'], self._lookup_service) if init_targ_map: conn_info['data']['initiator_target_map'] = init_targ_map if not is_mirror: fczm_utils.add_fc_zone(conn_info) return conn_info def terminate_connection(self, volume, connector, is_mirror=False): """Terminate connection between the server and the volume.""" conn_info = super(HBSDRESTFC, self).terminate_connection( volume, connector) if self.conf.hitachi_zoning_request: if conn_info and conn_info['data']['target_wwn']: init_targ_map = utils.build_initiator_target_map( connector, conn_info['data']['target_wwn'], self._lookup_service) if init_targ_map: conn_info['data']['initiator_target_map'] = init_targ_map if not is_mirror: fczm_utils.remove_fc_zone(conn_info) return conn_info def _get_wwpns(self, port, hostgroup): """Get WWPN from a port and the host group.""" wwpns = [] hba_wwns = self.client.get_hba_wwns_by_name(port, hostgroup) for hba_wwn in hba_wwns: wwpns.append(hba_wwn['hostWwn']) return wwpns def set_terminate_target(self, fake_connector, port_hostgroup_map): """Set necessary information in connector in terminate.""" wwpns = set() for port, hostgroups in port_hostgroup_map.items(): for hostgroup in hostgroups: wwpns.update(self._get_wwpns(port, hostgroup)) fake_connector['wwpns'] = list(wwpns) def set_device_map(self, targets, hba_ids, volume): active_hba_ids = [] target_wwns = [] active_target_wwns = [] vol_id = volume['id'] if volume and 'id' in volume.keys() else "" if not self.get_port_scheduler_param(): return None, hba_ids for port in targets['info'].keys(): target_wwns.append(self.storage_info['wwns'][port]) devmap = self._lookup_service.get_device_mapping_from_network( hba_ids, target_wwns) for fabric_name in devmap.keys(): active_hba_ids.extend( devmap[fabric_name]['initiator_port_wwn_list']) active_target_wwns.extend( devmap[fabric_name]['target_port_wwn_list']) active_hba_ids = list(set(active_hba_ids)) if not active_hba_ids: msg = self.output_log(MSG.NO_ACTIVE_WWN, wwn=', '.join(hba_ids), volume=vol_id) self.raise_error(msg) active_target_wwns = list(set(active_target_wwns)) if not active_target_wwns: port_wwns = "" for port in targets['info'].keys(): if port_wwns: port_wwns += ", " port_wwns += ("port, WWN: " + port + ", " + self.storage_info['wwns'][port]) msg = self.output_log( MSG.NO_PORT_WITH_ACTIVE_WWN, port_wwns=port_wwns, volume=vol_id) self.raise_error(msg) return devmap, active_hba_ids def build_wwpn_groups(self, wwpns, connector): count = 1 return ([wwpns[i:i + count] for i in range(0, len(wwpns), count)]) def _create_target_to_any_port( self, targets, ports, connector, hba_ids, fabric_name): for port in ports: index = self.get_port_index_to_be_used(ports, fabric_name) try: self.create_target( targets, ports[index], connector, hba_ids) return except exception.VolumeDriverException as ex: if ((utils.safe_get_message_id(ex.kwargs.get('errobj')) == rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST) or (_MSG_EXCEED_HOST_GROUP_MAX in utils.safe_get_message(ex.kwargs.get('errobj')))): self.output_log( MSG.HOST_GROUP_NUMBER_IS_MAXIMUM, port=ports[index]) elif (utils.safe_get_err_code(ex.kwargs.get('errobj')) == rest_api.EXCEED_WWN_MAX): self.output_log( MSG.WWN_NUMBER_IS_MAXIMUM, port=ports[index], wwn=", ". join(hba_ids)) else: raise ex msg = self.output_log( MSG.HOST_GROUP_OR_WWN_IS_NOT_AVAILABLE, ports=', '.join(ports)) self.raise_error(msg) def create_target_by_port_scheduler( self, devmap, targets, connector, volume): available_ports = [] active_ports = [] if not devmap: msg = self.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE) self.raise_error(msg) for fabric_name in devmap.keys(): available_ports = [] active_ports = [] active_initiator_wwns = devmap[ fabric_name]['initiator_port_wwn_list'] wwpn_groups = self.build_wwpn_groups( active_initiator_wwns, connector) for port, wwn in self.storage_info['wwns'].items(): if wwn in devmap[fabric_name]['target_port_wwn_list']: available_ports.append(port) target_ports = self.get_target_ports(connector) filter_ports = self.filter_target_ports(target_ports, volume) for port in target_ports: if port in available_ports and port in filter_ports: active_ports.append(port) elif port not in available_ports and port in filter_ports: self.output_log( MSG.INVALID_PORT_BY_ZONE_MANAGER, port=port) for wwpns in wwpn_groups: try: self._create_target_to_any_port( targets, active_ports, connector, wwpns, fabric_name) except exception.VolumeDriverException: with excutils.save_and_reraise_exception(): self.clean_mapping_targets(targets) def set_target_map_info(self, targets, hba_ids, port): for hba_id in hba_ids: target_map = {hba_id: [self.storage_info['wwns'][port]]} targets['target_map'].update(target_map) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py0000664000175000017500000002647700000000000024472 0ustar00zuulzuul00000000000000# Copyright (C) 2020, 2023, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """REST interface iSCSI module for Hitachi HBSD Driver.""" from oslo_log import log as logging from cinder.volume.drivers.hitachi import hbsd_rest as rest from cinder.volume.drivers.hitachi import hbsd_utils as utils _ISCSI_HMO_REPORT_FULL_PORTAL = 83 _ISCSI_HMO_DISABLE_IO = 91 LOG = logging.getLogger(__name__) MSG = utils.HBSDMsg class HBSDRESTISCSI(rest.HBSDREST): """REST interface iscsi class for Hitachi HBSD Driver.""" def _set_target_portal(self, port): """Get port info and store it in an instance variable.""" result = self.client.get_port(port) ipv4_addr = result.get('ipv4Address') tcp_port = result.get('tcpPort') if not ipv4_addr or not tcp_port: return False, ipv4_addr, tcp_port self.storage_info['portals'][port] = '%(ip)s:%(port)s' % { 'ip': ipv4_addr, 'port': tcp_port, } return True, ipv4_addr, tcp_port def connect_storage(self): """Prepare for using the storage.""" target_ports = self.conf.hitachi_target_ports compute_target_ports = self.conf.hitachi_compute_target_ports if hasattr( self.conf, self.driver_info['param_prefix'] + '_rest_pair_target_ports'): pair_target_ports = self.conf.hitachi_rest_pair_target_ports else: pair_target_ports = [] super(HBSDRESTISCSI, self).connect_storage() # The port type must be ISCSI and the port attributes must contain TAR. params = {'portType': 'ISCSI', 'portAttributes': 'TAR'} port_list = self.client.get_ports(params=params) for port in set(target_ports + compute_target_ports + pair_target_ports): if port not in [port_data['portId'] for port_data in port_list]: self.output_log( MSG.INVALID_PORT, port=port, additional_info='(portType, ' 'portAttributes): not (ISCSI, TAR)') for port_data in port_list: port = port_data['portId'] if port not in set(target_ports + compute_target_ports + pair_target_ports): continue has_addr = True if not port_data['lunSecuritySetting']: addr_info = "" elif port in set(target_ports + compute_target_ports): has_addr, ipv4_addr, tcp_port = self._set_target_portal(port) if not has_addr: addr_info = (', ipv4Address: %s, tcpPort: %s' % (ipv4_addr, tcp_port)) if not port_data['lunSecuritySetting'] or not has_addr: self.output_log( MSG.INVALID_PORT, port=port, additional_info='portType: %s, lunSecuritySetting: %s%s' % (port_data['portType'], port_data['lunSecuritySetting'], addr_info)) if not port_data['lunSecuritySetting']: continue if target_ports and port in target_ports and has_addr: self.storage_info['controller_ports'].append(port) if (compute_target_ports and port in compute_target_ports and has_addr): self.storage_info['compute_ports'].append(port) if pair_target_ports and port in pair_target_ports: self.storage_info['pair_ports'].append(port) self.check_ports_info() if pair_target_ports and not self.storage_info['pair_ports']: msg = self.output_log( MSG.RESOURCE_NOT_FOUND, resource="Pair target ports") self.raise_error(msg) self.output_log(MSG.SET_CONFIG_VALUE, object='pair_target_ports', value=self.storage_info['pair_ports']) self.output_log(MSG.SET_CONFIG_VALUE, object='port- list', value=self.storage_info['portals']) def create_target_to_storage(self, port, connector, hba_ids): """Create an iSCSI target on the specified port.""" target_name = self.create_target_name(connector) body = {'portId': port, 'hostGroupName': target_name} if hba_ids: body['iscsiName'] = '%(id)s%(suffix)s' % { 'id': hba_ids, 'suffix': self.driver_info['target_iqn_suffix'], } try: gid = self.client.add_host_grp(body, no_log=True) except Exception: params = {'portId': port} host_grp_list = self.client.get_host_grps(params) for host_grp_data in host_grp_list: if host_grp_data['hostGroupName'] == target_name: return target_name, host_grp_data['hostGroupNumber'] else: raise return target_name, gid def set_hba_ids(self, port, gid, hba_ids): """Connect the specified HBA with the specified port.""" self.client.add_hba_iscsi(port, gid, hba_ids) def set_target_mode(self, port, gid): """Configure the iSCSI target to meet the environment.""" body = {'hostMode': 'LINUX/IRIX', 'hostModeOptions': [_ISCSI_HMO_REPORT_FULL_PORTAL]} if self.conf.hitachi_rest_disable_io_wait: body['hostModeOptions'].append(_ISCSI_HMO_DISABLE_IO) if self.conf.hitachi_host_mode_options: for opt in self.conf.hitachi_host_mode_options: if int(opt) not in body['hostModeOptions']: body['hostModeOptions'].append(int(opt)) self.client.modify_host_grp(port, gid, body) def _is_host_iqn_registered_in_target(self, port, gid, host_iqn): """Check if the specified IQN is registered with iSCSI target.""" for hba_iscsi in self.client.get_hba_iscsis(port, gid): if host_iqn == hba_iscsi['iscsiName']: return True return False def _set_target_info(self, targets, host_grps, iqn): """Set the information of the iSCSI target having the specified IQN.""" for host_grp in host_grps: port = host_grp['portId'] gid = host_grp['hostGroupNumber'] storage_iqn = host_grp['iscsiName'] if self._is_host_iqn_registered_in_target(port, gid, iqn): targets['info'][port] = True targets['list'].append((port, gid)) targets['iqns'][(port, gid)] = storage_iqn return True return False def _get_host_iqn_registered_in_target_by_name( self, port, target_name, host_iqn): """Get the information of the iSCSI target having the specified name and the specified IQN. """ for hba_iscsi in self.client.get_hba_iscsis_by_name(port, target_name): if host_iqn == hba_iscsi['iscsiName']: return hba_iscsi return None def _set_target_info_by_name(self, targets, port, target_name, iqn): """Set the information of the iSCSI target having the specified name and the specified IQN. """ host_iqn_registered_in_target = ( self._get_host_iqn_registered_in_target_by_name( port, target_name, iqn)) if host_iqn_registered_in_target: gid = host_iqn_registered_in_target['hostGroupNumber'] storage_iqn = self.client.get_host_grp(port, gid)['iscsiName'] targets['info'][port] = True targets['list'].append((port, gid)) targets['iqns'][(port, gid)] = storage_iqn return True return False def find_targets_from_storage(self, targets, connector, target_ports): """Find mapped ports, memorize them and return unmapped port count.""" iqn = self.get_hba_ids_from_connector(connector) not_found_count = 0 for port in target_ports: targets['info'][port] = False if 'ip' in connector: target_name = self.create_target_name(connector) if self._set_target_info_by_name( targets, port, target_name, iqn): continue host_grps = self.client.get_host_grps({'portId': port}) if 'ip' in connector: host_grps = [hg for hg in host_grps if hg['hostGroupName'] != target_name] if self._set_target_info(targets, host_grps, iqn): pass else: not_found_count += 1 return not_found_count def initialize_connection( self, volume, connector, is_snapshot=False, lun=None, is_mirror=False): """Initialize connection between the server and the volume.""" conn_info, map_info = super(HBSDRESTISCSI, self).initialize_connection( volume, connector, is_snapshot, lun) return conn_info def terminate_connection(self, volume, connector, is_mirror=False): """Terminate connection between the server and the volume.""" return super(HBSDRESTISCSI, self).terminate_connection( volume, connector) def get_properties_iscsi(self, targets, multipath): """Return iSCSI-specific server-LDEV connection info.""" if not multipath: target_list = targets['list'][:1] else: target_list = targets['list'][:] for target in target_list: if target not in targets['iqns']: port, gid = target target_info = self.client.get_host_grp(port, gid) iqn = target_info.get('iscsiName') if target_info else None if not iqn: msg = self.output_log(MSG.RESOURCE_NOT_FOUND, resource='Target IQN') self.raise_error(msg) targets['iqns'][target] = iqn LOG.debug( 'Found target iqn of host group. (port: %(port)s, ' 'gid: %(gid)s, target iqn: %(iqn)s)', {'port': port, 'gid': gid, 'iqn': iqn}) return super(HBSDRESTISCSI, self).get_properties_iscsi( targets, multipath) def _get_iqn(self, port, hostgroup): """Get IQN from a port and the ISCSI target.""" hba_iscsis = self.client.get_hba_iscsis_by_name(port, hostgroup) return hba_iscsis[0]['iscsiName'] def set_terminate_target(self, fake_connector, port_hostgroup_map): """Set necessary information in connector in terminate.""" for port, hostgroups in port_hostgroup_map.items(): for hostgroup in hostgroups: iqn = self._get_iqn(port, hostgroup) if iqn: fake_connector['initiator'] = iqn return ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hitachi/hbsd_utils.py0000664000175000017500000007516700000000000023463 0ustar00zuulzuul00000000000000# Copyright (C) 2020, 2024, Hitachi, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Utility module for Hitachi HBSD Driver.""" import enum import functools import logging as base_logging from oslo_log import log as logging from oslo_utils import timeutils from oslo_utils import units from cinder import exception from cinder import utils as cinder_utils from cinder.volume import volume_types VERSION = '2.4.0' CI_WIKI_NAME = 'Hitachi_VSP_CI' PARAM_PREFIX = 'hitachi' VENDOR_NAME = 'Hitachi' DRIVER_DIR_NAME = 'hbsd' DRIVER_PREFIX = 'HBSD' DRIVER_FILE_PREFIX = 'hbsd' TARGET_PREFIX = 'HBSD-' HDP_VOL_ATTR = 'HDP' HDT_VOL_ATTR = 'HDT' NVOL_LDEV_TYPE = 'DP-VOL' TARGET_IQN_SUFFIX = '.hbsd-target' PAIR_ATTR = 'HTI' MIRROR_ATTR = 'GAD' GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512 PRIMARY_STR = 'primary' SECONDARY_STR = 'secondary' NORMAL_LDEV_TYPE = 'Normal' FULL = 'Full copy' THIN = 'Thin copy' INFO_SUFFIX = 'I' WARNING_SUFFIX = 'W' ERROR_SUFFIX = 'E' PORT_ID_LENGTH = 5 BUSY_MESSAGE = "Device or resource is busy." QOS_KEYS = ['upperIops', 'upperTransferRate', 'lowerIops', 'lowerTransferRate', 'responsePriority'] @enum.unique class HBSDMsg(enum.Enum): """messages for Hitachi HBSD Driver.""" DRIVER_INITIALIZATION_START = { 'msg_id': 4, 'loglevel': base_logging.INFO, 'msg': 'Initialization of %(driver)s %(version)s started.', 'suffix': INFO_SUFFIX, } SET_CONFIG_VALUE = { 'msg_id': 5, 'loglevel': base_logging.INFO, 'msg': 'Set %(object)s to %(value)s.', 'suffix': INFO_SUFFIX, } OBJECT_CREATED = { 'msg_id': 6, 'loglevel': base_logging.INFO, 'msg': 'Created %(object)s. (%(details)s)', 'suffix': INFO_SUFFIX, } NO_LUN = { 'msg_id': 301, 'loglevel': base_logging.WARNING, 'msg': 'A LUN (HLUN) was not found. (LDEV: %(ldev)s)', 'suffix': WARNING_SUFFIX, } INVALID_LDEV_FOR_UNMAPPING = { 'msg_id': 302, 'loglevel': base_logging.WARNING, 'msg': 'Failed to specify a logical device for the volume ' '%(volume_id)s to be unmapped.', 'suffix': WARNING_SUFFIX, } INVALID_LDEV_FOR_DELETION = { 'msg_id': 304, 'loglevel': base_logging.WARNING, 'msg': 'Failed to specify a logical device to be deleted. ' '(method: %(method)s, id: %(id)s)', 'suffix': WARNING_SUFFIX, } DELETE_TARGET_FAILED = { 'msg_id': 306, 'loglevel': base_logging.WARNING, 'msg': 'A host group or an iSCSI target could not be deleted. ' '(port: %(port)s, gid: %(id)s)', 'suffix': WARNING_SUFFIX, } CREATE_HOST_GROUP_FAILED = { 'msg_id': 308, 'loglevel': base_logging.WARNING, 'msg': 'A host group could not be added. (port: %(port)s)', 'suffix': WARNING_SUFFIX, } CREATE_ISCSI_TARGET_FAILED = { 'msg_id': 309, 'loglevel': base_logging.WARNING, 'msg': 'An iSCSI target could not be added. (port: %(port)s)', 'suffix': WARNING_SUFFIX, } UNMAP_LDEV_FAILED = { 'msg_id': 310, 'loglevel': base_logging.WARNING, 'msg': 'Failed to unmap a logical device. (LDEV: %(ldev)s)', 'suffix': WARNING_SUFFIX, } DELETE_LDEV_FAILED = { 'msg_id': 313, 'loglevel': base_logging.WARNING, 'msg': 'Failed to delete a logical device. (LDEV: %(ldev)s)', 'suffix': WARNING_SUFFIX, } MAP_LDEV_FAILED = { 'msg_id': 314, 'loglevel': base_logging.WARNING, 'msg': 'Failed to map a logical device. (LDEV: %(ldev)s, port: ' '%(port)s, id: %(id)s, lun: %(lun)s)', 'suffix': WARNING_SUFFIX, } DISCARD_ZERO_PAGE_FAILED = { 'msg_id': 315, 'loglevel': base_logging.WARNING, 'msg': 'Failed to perform a zero-page reclamation. (LDEV: ' '%(ldev)s)', 'suffix': WARNING_SUFFIX, } ADD_HBA_WWN_FAILED = { 'msg_id': 317, 'loglevel': base_logging.WARNING, 'msg': 'Failed to assign the WWN. (port: %(port)s, gid: %(gid)s, ' 'wwn: %(wwn)s)', 'suffix': WARNING_SUFFIX, } LDEV_NOT_EXIST = { 'msg_id': 319, 'loglevel': base_logging.WARNING, 'msg': 'The logical device does not exist in the storage system. ' '(LDEV: %(ldev)s)', 'suffix': WARNING_SUFFIX, } REST_LOGIN_FAILED = { 'msg_id': 321, 'loglevel': base_logging.WARNING, 'msg': 'Failed to perform user authentication of the REST API server. ' '(user: %(user)s)', 'suffix': WARNING_SUFFIX, } DELETE_PAIR_FAILED = { 'msg_id': 325, 'loglevel': base_logging.WARNING, 'msg': 'Failed to delete copy pair. (P-VOL: %(pvol)s, S-VOL: ' '%(svol)s)', 'suffix': WARNING_SUFFIX, } DISCONNECT_VOLUME_FAILED = { 'msg_id': 329, 'loglevel': base_logging.WARNING, 'msg': 'Failed to detach the logical device. (LDEV: %(ldev)s, ' 'reason: %(reason)s)', 'suffix': WARNING_SUFFIX, } INVALID_EXTRA_SPEC_KEY_PORT = { 'msg_id': 330, 'loglevel': base_logging.WARNING, 'msg': 'The port name specified for the extra spec key ' '"%(target_ports_param)s" ' 'of the volume type is not specified for the ' 'target_ports or compute_target_ports ' 'parameter in cinder.conf. (port: %(port)s, volume type: ' '%(volume_type)s)', 'suffix': WARNING_SUFFIX, } VOLUME_IS_BEING_REHYDRATED = { 'msg_id': 333, 'loglevel': base_logging.WARNING, 'msg': 'Retyping the volume will be performed using migration ' 'because the specified volume is being rehydrated. ' 'This process may take a long time depending on the data ' 'size. (volume: %(volume_id)s, volume type: %(volume_type)s)', 'suffix': WARNING_SUFFIX, } INCONSISTENCY_DEDUPLICATION_SYSTEM_VOLUME = { 'msg_id': 334, 'loglevel': base_logging.WARNING, 'msg': 'Retyping the volume will be performed using migration ' 'because inconsistency was found in the deduplication ' 'system data volume. This process may take a long time ' 'depending on the data size. ' '(volume: %(volume_id)s, volume type: %(volume_type)s)', 'suffix': WARNING_SUFFIX, } HOST_GROUP_NUMBER_IS_MAXIMUM = { 'msg_id': 335, 'loglevel': base_logging.WARNING, 'msg': 'Failed to create the host group because the host group ' 'maximum of the port is exceeded. (port: %(port)s)', 'suffix': WARNING_SUFFIX, } WWN_NUMBER_IS_MAXIMUM = { 'msg_id': 336, 'loglevel': base_logging.WARNING, 'msg': 'Failed to add the wwns to the host group port because the ' 'WWN maximum of the port is exceeded. ' '(port: %(port)s, WWN: %(wwn)s)', 'suffix': WARNING_SUFFIX, } REPLICATION_VOLUME_OPERATION_FAILED = { 'msg_id': 337, 'loglevel': base_logging.WARNING, 'msg': 'Failed to %(operation)s the %(type)s in a replication pair. ' '(volume: %(volume_id)s, reason: %(reason)s)', 'suffix': WARNING_SUFFIX, } SITE_INITIALIZATION_FAILED = { 'msg_id': 338, 'loglevel': base_logging.WARNING, 'msg': 'Failed to initialize the driver for the %(site)s storage ' 'system.', 'suffix': WARNING_SUFFIX, } INVALID_PORT = { 'msg_id': 339, 'loglevel': base_logging.WARNING, 'msg': 'Port %(port)s will not be used because its settings are ' 'invalid. (%(additional_info)s)', 'suffix': WARNING_SUFFIX, } INVALID_PORT_BY_ZONE_MANAGER = { 'msg_id': 340, 'loglevel': base_logging.WARNING, 'msg': 'Port %(port)s will not be used because it is not considered ' 'to be active by the Fibre Channel Zone Manager.', 'suffix': WARNING_SUFFIX, } SKIP_DELETING_LDEV = { 'msg_id': 348, 'loglevel': base_logging.WARNING, 'msg': 'Skip deleting the LDEV and its LUNs and pairs because the ' 'LDEV is used by another object. (%(obj)s: %(obj_id)s, LDEV: ' '%(ldev)s, LDEV label: %(ldev_label)s)', 'suffix': WARNING_SUFFIX, } STORAGE_COMMAND_FAILED = { 'msg_id': 600, 'loglevel': base_logging.ERROR, 'msg': 'The command %(cmd)s failed. (ret: %(ret)s, stdout: ' '%(out)s, stderr: %(err)s)', 'suffix': ERROR_SUFFIX, } INVALID_PARAMETER = { 'msg_id': 601, 'loglevel': base_logging.ERROR, 'msg': 'A parameter is invalid. (%(param)s)', 'suffix': ERROR_SUFFIX, } PAIR_STATUS_WAIT_TIMEOUT = { 'msg_id': 611, 'loglevel': base_logging.ERROR, 'msg': 'The status change of copy pair could not be ' 'completed. (S-VOL: %(svol)s)', 'suffix': ERROR_SUFFIX, } INVALID_LDEV_STATUS_FOR_COPY = { 'msg_id': 612, 'loglevel': base_logging.ERROR, 'msg': 'The source logical device to be replicated does not exist ' 'in the storage system. (LDEV: %(ldev)s)', 'suffix': ERROR_SUFFIX, } INVALID_LDEV_FOR_EXTENSION = { 'msg_id': 613, 'loglevel': base_logging.ERROR, 'msg': 'The volume %(volume_id)s to be extended was not found.', 'suffix': ERROR_SUFFIX, } NO_HBA_WWN_ADDED_TO_HOST_GRP = { 'msg_id': 614, 'loglevel': base_logging.ERROR, 'msg': 'No WWN is assigned. (port: %(port)s, gid: %(gid)s)', 'suffix': ERROR_SUFFIX, } UNABLE_TO_DELETE_PAIR = { 'msg_id': 616, 'loglevel': base_logging.ERROR, 'msg': 'Failed to delete a pair. (P-VOL: %(pvol)s)', 'suffix': ERROR_SUFFIX, } INVALID_VOLUME_TYPE_FOR_EXTEND = { 'msg_id': 618, 'loglevel': base_logging.ERROR, 'msg': 'The volume %(volume_id)s could not be extended. The ' 'volume type must be Normal.', 'suffix': ERROR_SUFFIX, } INVALID_LDEV_FOR_CONNECTION = { 'msg_id': 619, 'loglevel': base_logging.ERROR, 'msg': 'The volume %(volume_id)s to be mapped was not found.', 'suffix': ERROR_SUFFIX, } POOL_INFO_RETRIEVAL_FAILED = { 'msg_id': 620, 'loglevel': base_logging.ERROR, 'msg': 'Failed to provide information about a pool. (pool: ' '%(pool)s)', 'suffix': ERROR_SUFFIX, } INVALID_LDEV_FOR_VOLUME_COPY = { 'msg_id': 624, 'loglevel': base_logging.ERROR, 'msg': 'The %(type)s %(id)s source to be replicated was not ' 'found.', 'suffix': ERROR_SUFFIX, } CONNECT_VOLUME_FAILED = { 'msg_id': 634, 'loglevel': base_logging.ERROR, 'msg': 'Failed to attach the logical device. (LDEV: %(ldev)s, ' 'reason: %(reason)s)', 'suffix': ERROR_SUFFIX, } CREATE_LDEV_FAILED = { 'msg_id': 636, 'loglevel': base_logging.ERROR, 'msg': 'Failed to add the logical device.', 'suffix': ERROR_SUFFIX, } PAIR_TARGET_FAILED = { 'msg_id': 638, 'loglevel': base_logging.ERROR, 'msg': 'Failed to add the pair target.', 'suffix': ERROR_SUFFIX, } MAP_PAIR_TARGET_FAILED = { 'msg_id': 639, 'loglevel': base_logging.ERROR, 'msg': 'Failed to map a logical device to any pair targets. ' '(LDEV: %(ldev)s)', 'suffix': ERROR_SUFFIX, } POOL_NOT_FOUND = { 'msg_id': 640, 'loglevel': base_logging.ERROR, 'msg': 'A pool could not be found. (pool: %(pool)s)', 'suffix': ERROR_SUFFIX, } NO_AVAILABLE_RESOURCE = { 'msg_id': 648, 'loglevel': base_logging.ERROR, 'msg': 'There are no resources available for use. (resource: ' '%(resource)s)', 'suffix': ERROR_SUFFIX, } NO_CONNECTED_TARGET = { 'msg_id': 649, 'loglevel': base_logging.ERROR, 'msg': 'The host group or iSCSI target was not found.', 'suffix': ERROR_SUFFIX, } RESOURCE_NOT_FOUND = { 'msg_id': 650, 'loglevel': base_logging.ERROR, 'msg': 'The resource %(resource)s was not found.', 'suffix': ERROR_SUFFIX, } LDEV_DELETION_WAIT_TIMEOUT = { 'msg_id': 652, 'loglevel': base_logging.ERROR, 'msg': 'Failed to delete a logical device. (LDEV: %(ldev)s)', 'suffix': ERROR_SUFFIX, } INVALID_LDEV_ATTR_FOR_MANAGE = { 'msg_id': 702, 'loglevel': base_logging.ERROR, 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' 'must be an unpaired %(ldevtype)s.', 'suffix': ERROR_SUFFIX, } INVALID_LDEV_SIZE_FOR_MANAGE = { 'msg_id': 703, 'loglevel': base_logging.ERROR, 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' 'size must be expressed in gigabytes.', 'suffix': ERROR_SUFFIX, } INVALID_LDEV_PORT_FOR_MANAGE = { 'msg_id': 704, 'loglevel': base_logging.ERROR, 'msg': 'Failed to manage the specified LDEV (%(ldev)s). The LDEV ' 'must not be mapped.', 'suffix': ERROR_SUFFIX, } INVALID_LDEV_TYPE_FOR_UNMANAGE = { 'msg_id': 706, 'loglevel': base_logging.ERROR, 'msg': 'Failed to unmanage the volume %(volume_id)s. The volume ' 'type must be %(volume_type)s.', 'suffix': ERROR_SUFFIX, } INVALID_LDEV_FOR_MANAGE = { 'msg_id': 707, 'loglevel': base_logging.ERROR, 'msg': 'No valid value is specified for "source-id" or "source-name". ' 'A valid LDEV number must be specified in "source-id" or ' 'a valid LDEV name must be specified in "source-name" ' 'to manage the volume.', 'suffix': ERROR_SUFFIX, } FAILED_CREATE_CTG_SNAPSHOT = { 'msg_id': 712, 'loglevel': base_logging.ERROR, 'msg': 'Failed to create a consistency group snapshot. ' 'The number of pairs in the consistency group or the number of ' 'consistency group snapshots has reached the limit.', 'suffix': ERROR_SUFFIX, } LDEV_NOT_EXIST_FOR_ADD_GROUP = { 'msg_id': 716, 'loglevel': base_logging.ERROR, 'msg': 'No logical device exists in the storage system for the volume ' '%(volume_id)s to be added to the %(group)s %(group_id)s.', 'suffix': ERROR_SUFFIX, } SNAPSHOT_UNMANAGE_FAILED = { 'msg_id': 722, 'loglevel': base_logging.ERROR, 'msg': 'Failed to unmanage the snapshot %(snapshot_id)s. ' 'This driver does not support unmanaging snapshots.', 'suffix': ERROR_SUFFIX, } INVALID_EXTRA_SPEC_KEY = { 'msg_id': 723, 'loglevel': base_logging.ERROR, 'msg': 'Failed to create a volume. ' 'An invalid value is specified for the extra spec key ' '"%(key)s" of the volume type. (value: %(value)s)', 'suffix': ERROR_SUFFIX, } VOLUME_COPY_FAILED = { 'msg_id': 725, 'loglevel': base_logging.ERROR, 'msg': 'Failed to copy a volume. (P-VOL: %(pvol)s, S-VOL: %(svol)s)', 'suffix': ERROR_SUFFIX } CONSISTENCY_NOT_GUARANTEE = { 'msg_id': 726, 'loglevel': base_logging.ERROR, 'msg': 'A volume or snapshot cannot be deleted. ' 'The consistency of logical device for ' 'a volume or snapshot cannot be guaranteed. (LDEV: %(ldev)s)', 'suffix': ERROR_SUFFIX } FAILED_CHANGE_VOLUME_TYPE = { 'msg_id': 727, 'loglevel': base_logging.ERROR, 'msg': 'Failed to change a volume type. ' 'An invalid value is specified for the extra spec key ' '"%(key)s" of the volume type after change. ' '(value: %(value)s)', 'suffix': ERROR_SUFFIX } NOT_COMPLETED_CHANGE_VOLUME_TYPE = { 'msg_id': 728, 'loglevel': base_logging.ERROR, 'msg': 'The volume type change could not be completed. ' '(LDEV: %(ldev)s)', 'suffix': ERROR_SUFFIX } REST_SERVER_CONNECT_FAILED = { 'msg_id': 731, 'loglevel': base_logging.ERROR, 'msg': 'Failed to communicate with the REST API server. ' '(exception: %(exception)s, message: %(message)s, ' 'method: %(method)s, url: %(url)s, params: %(params)s, ' 'body: %(body)s)', 'suffix': ERROR_SUFFIX, } REST_API_FAILED = { 'msg_id': 732, 'loglevel': base_logging.ERROR, 'msg': 'The REST API failed. (source: %(errorSource)s, ' 'ID: %(messageId)s, message: %(message)s, cause: %(cause)s, ' 'solution: %(solution)s, code: %(errorCode)s, ' 'method: %(method)s, url: %(url)s, params: %(params)s, ' 'body: %(body)s)', 'suffix': ERROR_SUFFIX, } REST_API_TIMEOUT = { 'msg_id': 733, 'loglevel': base_logging.ERROR, 'msg': 'The REST API timed out. (job ID: %(job_id)s, ' 'job status: %(status)s, job state: %(state)s, ' 'method: %(method)s, url: %(url)s, params: %(params)s, ' 'body: %(body)s)', 'suffix': ERROR_SUFFIX, } REST_API_HTTP_ERROR = { 'msg_id': 734, 'loglevel': base_logging.ERROR, 'msg': 'The REST API failed. (HTTP status code: %(status_code)s, ' 'response body: %(response_body)s, ' 'method: %(method)s, url: %(url)s, params: %(params)s, ' 'body: %(body)s)', 'suffix': ERROR_SUFFIX, } GROUP_OBJECT_DELETE_FAILED = { 'msg_id': 736, 'loglevel': base_logging.ERROR, 'msg': 'Failed to delete a %(obj)s in a %(group)s. (%(group)s: ' '%(group_id)s, %(obj)s: %(obj_id)s, LDEV: %(ldev)s, reason: ' '%(reason)s)', 'suffix': ERROR_SUFFIX, } GROUP_SNAPSHOT_CREATE_FAILED = { 'msg_id': 737, 'loglevel': base_logging.ERROR, 'msg': 'Failed to create a volume snapshot in a group snapshot that ' 'does not guarantee consistency. (group: %(group)s, ' 'group snapshot: %(group_snapshot)s, group type: ' '%(group_type)s, volume: %(volume)s, snapshot: %(snapshot)s)', 'suffix': ERROR_SUFFIX, } NO_ACTIVE_WWN = { 'msg_id': 747, 'loglevel': base_logging.ERROR, 'msg': 'Failed to initialize volume connection because no active WWN ' 'was found for the connector. (WWN: %(wwn)s, volume: %(volume)s' ')', 'suffix': ERROR_SUFFIX, } NO_PORT_WITH_ACTIVE_WWN = { 'msg_id': 748, 'loglevel': base_logging.ERROR, 'msg': 'Failed to initialize volume connection because no port with ' 'an active WWN was found. (%(port_wwns)s, volume: %(volume)s)', 'suffix': ERROR_SUFFIX, } ZONE_MANAGER_IS_NOT_AVAILABLE = { 'msg_id': 749, 'loglevel': base_logging.ERROR, 'msg': 'The Fibre Channel Zone Manager is not available. The Fibre ' 'Channel Zone Manager must be up and running when ' 'port_scheduler parameter is set to True.', 'suffix': ERROR_SUFFIX, } HOST_GROUP_OR_WWN_IS_NOT_AVAILABLE = { 'msg_id': 750, 'loglevel': base_logging.ERROR, 'msg': 'Failed to initialize volume connection because no available ' 'resource of host group or wwn was found. (ports: %(ports)s)', 'suffix': ERROR_SUFFIX, } SITE_NOT_INITIALIZED = { 'msg_id': 751, 'loglevel': base_logging.ERROR, 'msg': 'The driver is not initialized for the %(site)s storage ' 'system.', 'suffix': ERROR_SUFFIX, } CREATE_REPLICATION_VOLUME_FAILED = { 'msg_id': 752, 'loglevel': base_logging.ERROR, 'msg': 'Failed to create the %(type)s for a %(rep_type)s pair. ' '(volume: %(volume_id)s, volume type: %(volume_type)s, ' 'size: %(size)s)', 'suffix': ERROR_SUFFIX, } DEDUPLICATION_IS_ENABLED = { 'msg_id': 753, 'loglevel': base_logging.ERROR, 'msg': 'Failed to create a volume in a %(rep_type)s environment ' 'because deduplication is enabled for the volume type. ' '(volume: %(volume_id)s, volume type: %(volume_type)s, ' 'size: %(size)s)', 'suffix': ERROR_SUFFIX, } CREATE_REPLICATION_PAIR_FAILED = { 'msg_id': 754, 'loglevel': base_logging.ERROR, 'msg': 'Failed to create a %(rep_type)s pair or ' 'to mirror data in a %(rep_type)s pair. ' '(P-VOL: %(pvol)s, S-VOL: %(svol)s, copy group: ' '%(copy_group)s, pair status: %(status)s)', 'suffix': ERROR_SUFFIX, } SPLIT_REPLICATION_PAIR_FAILED = { 'msg_id': 755, 'loglevel': base_logging.ERROR, 'msg': 'Failed to split a %(rep_type)s pair. ' '(P-VOL: %(pvol)s, S-VOL: %(svol)s, ' 'copy group: %(copy_group)s, pair status: %(status)s)', 'suffix': ERROR_SUFFIX, } PAIR_CHANGE_TIMEOUT = { 'msg_id': 756, 'loglevel': base_logging.ERROR, 'msg': 'A timeout occurred before the status of ' 'the %(rep_type)s pair changes. ' '(P-VOL: %(pvol)s, S-VOL: %(svol)s, copy group: ' '%(copy_group)s, current status: %(current_status)s, ' 'expected status: %(expected_status)s, timeout: %(timeout)s ' 'seconds)', 'suffix': ERROR_SUFFIX, } EXTEND_REPLICATION_VOLUME_ERROR = { 'msg_id': 758, 'loglevel': base_logging.ERROR, 'msg': 'Failed to extend a volume. The LDEVs for the volume are in ' 'a %(rep_type)s pair and the volume is attached. ' '(volume: %(volume_id)s, ' 'LDEV: %(ldev)s, source size: %(source_size)s, destination ' 'size: %(destination_size)s, P-VOL: %(pvol)s, S-VOL: %(svol)s, ' 'P-VOL[numOfPorts]: %(pvol_num_of_ports)s, ' 'S-VOL[numOfPorts]: %(svol_num_of_ports)s)', 'suffix': ERROR_SUFFIX, } MIGRATE_VOLUME_FAILED = { 'msg_id': 760, 'loglevel': base_logging.ERROR, 'msg': 'Failed to migrate a volume. The volume is in a copy pair that ' 'cannot be deleted. (volume: %(volume)s, LDEV: %(ldev)s, ' '(P-VOL, S-VOL, copy method, status): %(pair_info)s)', 'suffix': ERROR_SUFFIX, } REPLICATION_PAIR_ERROR = { 'msg_id': 766, 'loglevel': base_logging.ERROR, 'msg': 'Failed to %(operation)s. The LDEV for the volume is in ' 'a remote replication pair. (volume: %(volume)s, ' '%(snapshot_info)sLDEV: %(ldev)s)', 'suffix': ERROR_SUFFIX, } LDEV_NUMBER_NOT_FOUND = { 'msg_id': 770, 'loglevel': base_logging.ERROR, 'msg': 'Failed to %(operation)s. The LDEV number is not found in the ' 'Cinder object. (%(obj)s: %(obj_id)s)', 'suffix': ERROR_SUFFIX, } def __init__(self, error_info): """Initialize Enum attributes.""" self.msg_id = error_info['msg_id'] self.level = error_info['loglevel'] self.msg = error_info['msg'] self.suffix = error_info['suffix'] def output_log(self, storage_id, **kwargs): """Output the message to the log file and return the message.""" msg = self.msg % kwargs if storage_id: LOG.log( self.level, "%(storage_id)s MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s", {'storage_id': storage_id[-6:], 'msg_id': self.msg_id, 'msg_suffix': self.suffix, 'msg': msg}) else: LOG.log( self.level, "MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s", {'msg_id': self.msg_id, 'msg_suffix': self.suffix, 'msg': msg}) return msg def output_log(msg_enum, storage_id=None, **kwargs): """Output the specified message to the log file and return the message.""" return msg_enum.output_log(storage_id, **kwargs) LOG = logging.getLogger(__name__) MSG = HBSDMsg def timed_out(start_time, timeout): """Check if the specified time has passed.""" return timeutils.is_older_than(start_time, timeout) def build_initiator_target_map(connector, target_wwns, lookup_service): """Return a dictionary mapping server-wwns and lists of storage-wwns.""" init_targ_map = {} initiator_wwns = connector['wwpns'] if lookup_service: dev_map = lookup_service.get_device_mapping_from_network( initiator_wwns, target_wwns) for fabric_name in dev_map: fabric = dev_map[fabric_name] for initiator in fabric['initiator_port_wwn_list']: init_targ_map[initiator] = fabric['target_port_wwn_list'] else: for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return init_targ_map def safe_get_err_code(errobj): if not errobj: return '', '' err_code = errobj.get('errorCode', {}) return err_code.get('SSB1', '').upper(), err_code.get('SSB2', '').upper() def safe_get_return_code(errobj): if not errobj: return '' err_code = errobj.get('errorCode', {}) return err_code.get('errorCode', '') def safe_get_message_id(errobj): if not errobj: return '' return errobj.get('messageId', '') def safe_get_message(errobj): if not errobj: return '' return errobj.get('message', '') def is_shared_connection(volume, connector): """Check if volume is multiattach to 1 node.""" connection_count = 0 host = connector.get('host') if connector else None if host and volume.get('multiattach'): attachment_list = volume.volume_attachment try: att_list = attachment_list.object except AttributeError: att_list = attachment_list for attachment in att_list: if attachment.attached_host == host: connection_count += 1 return connection_count > 1 def cleanup_cg_in_volume(volume): if ('group_id' in volume and volume.group_id and 'consistencygroup_id' in volume and volume.consistencygroup_id): volume.consistencygroup_id = None if 'consistencygroup' in volume: volume.consistencygroup = None def get_exception_msg(exc): if exc.args: return exc.msg if isinstance( exc, exception.CinderException) else exc.args[0] else: return "" def synchronized_on_copy_group(): def wrap(func): @functools.wraps(func) def inner(self, remote_client, copy_group_name, *args, **kwargs): sync_key = '%s-%s' % (copy_group_name, self.storage_id[-6:]) @cinder_utils.synchronized(sync_key, external=True) def _inner(): return func(self, remote_client, copy_group_name, *args, **kwargs) return _inner() return inner return wrap def get_qos_specs_from_volume(target): """Return a dictionary of the QoS specs of the target. :param target: Volume or Snapshot whose QoS specs are queried. :type target: Volume or Snapshot :return: QoS specs. :rtype: dict """ # If the target is a Volume, volume_type is volume.volume_type. # If the target is a Snapshot, volume_type is snapshot.volume.volume_type. # We combine these into "getattr(target, 'volume', target).volume_type)". return get_qos_specs_from_volume_type( getattr(target, 'volume', target).volume_type) def get_qos_specs_from_volume_type(volume_type): """Return a dictionary of the QoS specs of the volume_type. :param volume_type: VolumeType whose QoS specs are queried. This must not be None. :type volume_type: VolumeType :return: QoS specs. :rtype: dict The following is an example of the returned value: {'lowerTransferRate': 7, 'responsePriority': 2, 'upperIops': 456} """ qos = {} specs = volume_types.get_volume_type_qos_specs(volume_type.id)['qos_specs'] # The following is an example of the specs: # {'consumer': 'back-end', # 'created_at': datetime.datetime(2024, 9, 2, 3, 11, 1), # 'id': '81058c04-06eb-49d7-9199-7016785bf386', # 'name': 'qos1', # 'specs': {'lowerTransferRate': '7', # 'responsePriority': '2', # 'upperIops': '456'}} if specs is None: return qos if 'consumer' in specs and specs['consumer'] not in ('back-end', 'both'): return qos for key in specs['specs'].keys(): if key in QOS_KEYS: if specs['specs'][key].isdigit(): qos[key] = int(specs['specs'][key]) else: qos[key] = specs['specs'][key] return qos DICT = '_dict' CONF = '_conf' class Config(object): def __init__(self, conf): super().__setattr__(CONF, conf) super().__setattr__(DICT, dict()) self._opts = {} def __getitem__(self, name): return (super().__getattribute__(DICT)[name] if name in super().__getattribute__(DICT) else super().__getattribute__(CONF).safe_get(name)) def __getattr__(self, name): return (super().__getattribute__(DICT)[name] if name in super().__getattribute__(DICT) else getattr(super().__getattribute__(CONF), name)) def __setitem__(self, key, value): super().__getattribute__(DICT)[key] = value def __setattr__(self, key, value): self.__setitem__(key, value) def safe_get(self, name): return (super().__getattribute__(DICT)[name] if name in super().__getattribute__(DICT) else super().__getattribute__(CONF).safe_get(name)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.355121 cinder-27.0.0/cinder/volume/drivers/hpe/0000775000175000017500000000000000000000000020074 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hpe/__init__.py0000664000175000017500000000000000000000000022173 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hpe/hpe_3par_base.py0000664000175000017500000005310200000000000023142 0ustar00zuulzuul00000000000000# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Base class for HPE Storage Drivers. This driver requires 3.1.3 or later firmware on the 3PAR array, using the 4.x version of the hpe3parclient. You will need to install the python hpe3parclient. sudo pip install --upgrade "hpe3parclient>=4.0" """ try: from hpe3parclient import exceptions as hpeexceptions except ImportError: hpeexceptions = None from oslo_log import log as logging from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.volume import driver from cinder.volume.drivers.hpe import hpe_3par_common as hpecommon from cinder.volume.drivers.san import san from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class HPE3PARDriverBase(driver.ManageableVD, driver.ManageableSnapshotsVD, driver.MigrateVD, driver.BaseVD): """OpenStack base driver to enable 3PAR storage array. Version history: .. code-block:: none 1.0.0 - Initial base driver 1.0.1 - Adds consistency group capability in generic volume groups. 1.0.2 - Adds capability. 1.0.3 - Added Tiramisu feature on 3PAR. 1.0.4 - Fixed Volume migration for "in-use" volume. bug #1744021 1.0.5 - Set proper backend on subsequent operation, after group failover. bug #1773069 """ VERSION = "1.0.5" def __init__(self, *args, **kwargs): super(HPE3PARDriverBase, self).__init__(*args, **kwargs) self._active_backend_id = kwargs.get('active_backend_id', None) self.configuration.append_config_values(hpecommon.hpe3par_opts) self.configuration.append_config_values(san.san_opts) self.protocol = None self.common = None @staticmethod def get_driver_options(): return hpecommon.HPE3PARCommon.get_driver_options() def _init_common(self): self.common = hpecommon.HPE3PARCommon(self.configuration, self._active_backend_id) return self.common def _login(self, timeout=None, array_id=None): self.common = self._init_common() # If replication is enabled and we cannot login, we do not want to # raise an exception so a failover can still be executed. try: self.common.do_setup(None, timeout=timeout, stats=self._stats, array_id=array_id) self.common.client_login() except Exception: if self.common._replication_enabled: LOG.warning("The primary array is not reachable at this " "time. Since replication is enabled, " "listing replication targets and failing over " "a volume can still be performed.") else: raise return self.common def _logout(self, common): # If replication is enabled and we do not have a client ID, we did not # login, but can still failover. There is no need to logout. if common.client is None and common._replication_enabled: return common.client_logout() def _check_flags(self, common): """Sanity check to ensure we have required options set.""" required_flags = ['hpe3par_api_url', 'hpe3par_username', 'hpe3par_password', 'san_ip', 'san_login', 'san_password'] common.check_flags(self.configuration, required_flags) def get_volume_replication_driver_data(self, volume): if (volume.get("group_id") and volume.get("replication_status") and volume.get("replication_status") == "failed-over"): return int(volume.get("replication_driver_data")) return None @volume_utils.trace def get_volume_stats(self, refresh=False): # NOTE(geguileo): We don't need to login to the backed if we are not # going to refresh the stats, furthermore if we login, then we'll # return an empty dict, because the _login method calls calls # _init_common which returns a new HPE3PARCommon instance each time, # so it won't have any cached values. if not refresh: return self._stats self._stats = self.common.get_volume_stats( refresh, self.get_filter_function(), self.get_goodness_function()) self._stats['storage_protocol'] = self.protocol self._stats['driver_version'] = self.VERSION backend_name = self.configuration.safe_get('volume_backend_name') self._stats['volume_backend_name'] = (backend_name or self.__class__.__name__) return self._stats def check_for_setup_error(self): """Setup errors are already checked for in do_setup so return pass.""" pass @volume_utils.trace def create_volume(self, volume, perform_replica=True): return self.common.create_volume(volume) @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Clone an existing volume.""" return self.common.create_cloned_volume(volume, src_vref) @volume_utils.trace def delete_volume(self, volume): return self.common.delete_volume(volume) @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. TODO: support using the size from the user. """ return self.common.create_volume_from_snapshot(volume, snapshot) @volume_utils.trace def create_snapshot(self, snapshot): return self.common.create_snapshot(snapshot) @volume_utils.trace def delete_snapshot(self, snapshot): return self.common.delete_snapshot(snapshot) @volume_utils.trace def extend_volume(self, volume, new_size): return self.common.extend_volume(volume, new_size) @volume_utils.trace def create_group(self, context, group): return self.common.create_group(context, group) @volume_utils.trace def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): return self.common.create_group_from_src( context, group, volumes, group_snapshot, snapshots, source_group, source_vols) @volume_utils.trace def delete_group(self, context, group, volumes): return self.common.delete_group(context, group, volumes) @volume_utils.trace def update_group(self, context, group, add_volumes=None, remove_volumes=None): return self.common.update_group(context, group, add_volumes, remove_volumes) @volume_utils.trace def create_group_snapshot(self, context, group_snapshot, snapshots): return self.common.create_group_snapshot(context, group_snapshot, snapshots) @volume_utils.trace def delete_group_snapshot(self, context, group_snapshot, snapshots): return self.common.delete_group_snapshot(context, group_snapshot, snapshots) @volume_utils.trace def manage_existing(self, volume, existing_ref): return self.common.manage_existing(volume, existing_ref) @volume_utils.trace def manage_existing_snapshot(self, snapshot, existing_ref): return self.common.manage_existing_snapshot(snapshot, existing_ref) @volume_utils.trace def manage_existing_get_size(self, volume, existing_ref): return self.common.manage_existing_get_size(volume, existing_ref) @volume_utils.trace def manage_existing_snapshot_get_size(self, snapshot, existing_ref): return self.common.manage_existing_snapshot_get_size(snapshot, existing_ref) @volume_utils.trace def unmanage(self, volume): return self.common.unmanage(volume) @volume_utils.trace def unmanage_snapshot(self, snapshot): return self.common.unmanage_snapshot(snapshot) @volume_utils.trace def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): return self.common.get_manageable_volumes(cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) @volume_utils.trace def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): return self.common.get_manageable_snapshots(cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs) @volume_utils.trace def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type.""" common = self._login() try: return common.retype(volume, new_type, diff, host) finally: self._logout(common) @volume_utils.trace def migrate_volume(self, context, volume, host): if volume['status'] == 'in-use': protocol = host['capabilities']['storage_protocol'] if protocol != self.protocol: LOG.debug("3PAR %(protocol)s driver cannot migrate in-use " "volume to a host with " "storage_protocol=%(storage_protocol)s", {'protocol': self.protocol, 'storage_protocol': protocol}) return False, None return self.common.migrate_volume(volume, host) @volume_utils.trace def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Update the name of the migrated volume to it's new ID.""" return self.common.update_migrated_volume(context, volume, new_volume, original_volume_status) @volume_utils.trace def get_pool(self, volume): try: return self.common.get_cpg(volume) except hpeexceptions.HTTPNotFound: reason = (_("Volume %s doesn't exist on array.") % volume) LOG.error(reason) raise exception.InvalidVolume(reason) @volume_utils.trace def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot.""" return self.common.revert_to_snapshot(volume, snapshot) @volume_utils.trace def failover_host(self, context, volumes, secondary_id=None, groups=None): """Force failover to a secondary replication target.""" common = self._login(timeout=30) try: # Update the active_backend_id in the driver and return it. active_backend_id, volume_updates, group_update_list = ( common.failover_host( context, volumes, secondary_id, groups)) self._active_backend_id = active_backend_id return active_backend_id, volume_updates, group_update_list finally: self._logout(common) def enable_replication(self, context, group, volumes): """Enable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ return self.common.enable_replication(context, group, volumes) def disable_replication(self, context, group, volumes): """Disable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ return self.common.disable_replication(context, group, volumes) def failover_replication(self, context, group, volumes, secondary_backend_id=None): """Failover replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :param secondary_backend_id: the secondary backend id - default None :returns: model_update, vol_model_updates """ common = self._login() try: return common.failover_replication( context, group, volumes, secondary_backend_id) finally: self._logout(common) def do_setup(self, context): common = self._init_common() common.do_setup(context) self._check_flags(common) common.check_for_setup_error() self._do_setup(common) def _do_setup(self, common): pass def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def terminate_connection(self, volume, connector, **kwargs): pass def initialize_connection(self, volume, connector): pass @volume_utils.trace def _init_vendor_properties(self): """Create a dictionary of vendor unique properties. This method creates a dictionary of vendor unique properties and returns both created dictionary and vendor name. Returned vendor name is used to check for name of vendor unique properties. - Vendor name shouldn't include colon(:) because of the separator and it is automatically replaced by underscore(_). ex. abc:d -> abc_d - Vendor prefix is equal to vendor name. ex. abcd - Vendor unique properties must start with vendor prefix + ':'. ex. abcd:maxIOPS Each backend driver needs to override this method to expose its own properties using _set_property() like this: self._set_property( properties, "vendorPrefix:specific_property", "Title of property", _("Description of property"), "type") : return dictionary of vendor unique properties : return vendor name prefix: HPE:3PAR --> HPE_3PAR """ properties = {} valid_prov_values = ['thin', 'full', 'dedup'] valid_persona_values = ['2 - Generic-ALUA', '1 - Generic', '3 - Generic-legacy', '4 - HPEUX-legacy', '5 - AIX-legacy', '6 - EGENERA', '7 - ONTAP-legacy', '8 - VMware', '9 - OpenVMS', '10 - HPEUX', '11 - WindowsServer'] self._set_property( properties, "HPE:3PAR:hpe3par:snap_cpg", "Snap CPG Extra-specs.", _("Specifies the Snap CPG for a volume type. It overrides the " "hpe3par_cpg_snap setting. Defaults to the hpe3par_cpg_snap " "setting in the cinder.conf file. If hpe3par_cpg_snap is not " "set, it defaults to the hpe3par_cpg setting."), "string") self._set_property( properties, "HPE:3PAR:hpe3par:persona", "Host Persona Extra-specs.", _("Specifies the host persona property for a volume type. It " "overrides the hpe3par_cpg_snap setting. Defaults to the " "hpe3par_cpg_snap setting in the cinder.conf file. " "If hpe3par_cpg_snap is not set, " "it defaults to the hpe3par_cpg setting."), "string", enum=valid_persona_values, default="2 - Generic-ALUA") self._set_property( properties, "HPE:3PAR:hpe3par:vvs", "Virtual Volume Set Extra-specs.", _("The virtual volume set name that has been set up by the " "administrator that would have predefined QoS rules " "associated with it. If you specify extra_specs " "hpe3par:vvs, the qos_specs minIOPS, maxIOPS, minBWS, " "and maxBWS settings are ignored."), "string") self._set_property( properties, "HPE:3PAR:hpe3par:flash_cache", "Flash cache Extra-specs.", _("Enables Flash cache setting for a volume type."), "boolean", default=False) self._set_property( properties, "HPE:3PAR:hpe3par:provisioning", "Storage Provisioning Extra-specs.", _("Specifies the provisioning for a volume type."), "string", enum=valid_prov_values, default="thin") self._set_property( properties, "HPE:3PAR:hpe3par:compression", "Storage Provisioning Extra-specs.", _("Enables compression for a volume type. " "Minimum requirement of 3par OS version is 3.3.1 " "with SSD drives only. " "Volume size must have > 16 GB to enable " "compression on volume. " "A full provisioned volume cannot be compressed."), "boolean", default=False) self._set_property( properties, "HPE:3PAR:replication_enabled", "Volume Replication Extra-specs.", _("The valid value is: True " "If True, the volume is to be replicated, if supported, " "by the backend driver. If the option is not specified or " "false, then replication is not enabled. This option is " "required to enable replication."), "string", enum=[" True"], default=False) self._set_property( properties, "HPE:3PAR:replication:mode", "Replication Mode Extra-specs.", _("Sets the replication mode for 3par."), "string", enum=["sync", "periodic"], default="periodic") self._set_property( properties, "HPE:3PAR:replication:sync_period", "Sync Period for Volume Replication Extra-specs.", _("Sets the time interval for synchronization. " "Only needed if replication:mode is periodic."), "integer", default=900) self._set_property( properties, "HPE:3PAR:replication:retention_count", "Retention Count for Replication Extra-specs.", _("Sets the number of snapshots that will be " "saved on the primary array."), "integer", default=5) self._set_property( properties, "HPE:3PAR:replication:remote_retention_count", "Remote Retention Count for Replication Extra-specs.", _("Sets the number of snapshots that will be " "saved on the secondary array."), "integer", default=5) # ###### QoS Settings ###### # self._set_property( properties, "HPE:3PAR:minIOPS", "Minimum IOPS QoS.", _("Sets the QoS, I/O issue count minimum goal. " "If not specified, there is no limit on I/O issue count."), "integer") self._set_property( properties, "HPE:3PAR:maxIOPS", "Maximum IOPS QoS.", _("Sets the QoS, I/O issue count rate limit. " "If not specified, there is no limit on I/O issue count."), "integer") self._set_property( properties, "HPE:3PAR:minBWS", "Minimum Bandwidth QoS.", _("Sets the QoS, I/O issue bandwidth minimum goal. " "If not specified, there is no limit on " "I/O issue bandwidth rate."), "integer") self._set_property( properties, "HPE:3PAR:maxBWS", "Maximum Bandwidth QoS.", _("Sets the QoS, I/O issue bandwidth rate limit. " "If not specified, there is no limit on I/O issue " "bandwidth rate."), "integer") self._set_property( properties, "HPE:3PAR:latency", "Latency QoS.", _("Sets the latency goal in milliseconds."), "integer") self._set_property( properties, "HPE:3PAR:priority", "Priority QoS.", _("Sets the priority of the QoS rule over other rules."), "string", enum=["low", "normal", "high"], default="normal") return properties, 'HPE:3PAR' @classmethod def clean_volume_file_locks(cls, volume_id): coordination.synchronized_remove('3par-' + volume_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hpe/hpe_3par_common.py0000664000175000017500000074616400000000000023541 0ustar00zuulzuul00000000000000# (c) Copyright 2012-2016 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Volume driver common utilities for HPE 3PAR Storage array. The 3PAR drivers requires 3.1.3 firmware on the 3PAR array. You will need to install the python hpe3parclient module. sudo pip install python-3parclient The drivers uses both the REST service and the SSH command line to correctly operate. Since the ssh credentials and the REST credentials can be different we need to have settings for both. The drivers requires the use of the san_ip, san_login, san_password settings for ssh connections into the 3PAR array. It also requires the setting of hpe3par_api_url, hpe3par_username, hpe3par_password for credentials to talk to the REST service on the 3PAR array. """ import ast import json import math import pprint import re import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_serialization import base64 from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units import taskflow.engines from taskflow.patterns import linear_flow from cinder import context from cinder import exception from cinder import flow_utils from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils try: import hpe3parclient from hpe3parclient import client from hpe3parclient import exceptions as hpeexceptions except ImportError: hpe3parclient = None client = None hpeexceptions = None LOG = logging.getLogger(__name__) MIN_CLIENT_VERSION = '4.2.10' DEDUP_API_VERSION = 30201120 FLASH_CACHE_API_VERSION = 30201200 COMPRESSION_API_VERSION = 30301215 SRSTATLD_API_VERSION = 30201200 REMOTE_COPY_API_VERSION = 30202290 API_VERSION_2023 = 100000000 API_VERSION_2025 = 100500000 hpe3par_opts = [ cfg.StrOpt('hpe3par_api_url', default='', help="WSAPI Server URL. " "This setting applies to: 3PAR, Primera, Alletra 9k and " "Alletra MP" "\n Example 1: for 3PAR, URL is: " "\n https://<3par ip>:8080/api/v1 " "\n Example 2: for Primera/Alletra 9k/Alletra MP, " "URL is: " "\n https://:443/api/v1"), cfg.StrOpt('hpe3par_username', default='', help="3PAR/Primera/Alletra 9k/Alletra MP username with the " "'edit' role"), cfg.StrOpt('hpe3par_password', default='', help="3PAR/Primera/Alletra 9k/Alletra MP password for the " "user specified in hpe3par_username", secret=True), cfg.ListOpt('hpe3par_cpg', default=["OpenStack"], help="List of the 3PAR/Primera/Alletra 9k/Alletra MP CPG(s) " "to use for volume creation"), cfg.StrOpt('hpe3par_cpg_snap', default="", help="The 3PAR/Primera/Alletra 9k/Alletra MP CPG to use for " "snapshots of volumes. If empty the userCPG will be used"), cfg.StrOpt('hpe3par_snapshot_retention', default="", help="The time in hours to retain a snapshot. " "You can't delete it before this expires."), cfg.StrOpt('hpe3par_snapshot_expiration', default="", help="The time in hours when a snapshot expires " " and is deleted. This must be larger than expiration"), cfg.BoolOpt('hpe3par_debug', default=False, help="Enable HTTP debugging to 3PAR/Primera/Alletra 9k/" "Alletra MP"), cfg.ListOpt('hpe3par_iscsi_ips', default=[], help="List of target iSCSI addresses to use."), cfg.BoolOpt('hpe3par_iscsi_chap_enabled', default=False, help="Enable CHAP authentication for iSCSI connections."), cfg.StrOpt('hpe3par_target_nsp', default="", help="The nsp of 3PAR/Primera/Alletra 9k/Alletra MP backend to " "be used when: (1) multipath is not enabled in cinder.conf" ". (2) Fiber Channel Zone Manager is not used. " "(3) the backend is prezoned with this " "specific nsp only. For example if nsp is 2 1 2, the " "format of the option's value is 2:1:2"), ] CONF = cfg.CONF CONF.register_opts(hpe3par_opts, group=configuration.SHARED_CONF_GROUP) # Input/output (total read/write) operations per second. THROUGHPUT = 'throughput' # Data processed (total read/write) per unit time: kilobytes per second. BANDWIDTH = 'bandwidth' # Response time (total read/write): microseconds. LATENCY = 'latency' # IO size (total read/write): kilobytes. IO_SIZE = 'io_size' # Queue length for processing IO requests QUEUE_LENGTH = 'queue_length' # Average busy percentage AVG_BUSY_PERC = 'avg_busy_perc' class Invalid3PARDomain(exception.VolumeDriverException): message = _("Invalid 3PAR Domain: %(err)s") class HPE3PARCommon(object): """Class that contains common code for the 3PAR drivers. Version history: .. code-block:: none 1.2.0 - Updated hp3parclient API use to 2.0.x 1.2.1 - Check that the VVS exists 1.2.2 - log prior to raising exceptions 1.2.3 - Methods to update key/value pair bug #1258033 1.2.4 - Remove deprecated config option hp3par_domain 1.2.5 - Raise Ex when deleting snapshot with dependencies bug #1250249 1.2.6 - Allow optional specifying n:s:p for vlun creation bug #1269515 This update now requires 3.1.2 MU3 firmware 1.3.0 - Removed all SSH code. We rely on the hp3parclient now. 2.0.0 - Update hp3parclient API uses 3.0.x 2.0.1 - Updated to use qos_specs, added new qos settings and personas 2.0.2 - Add back-end assisted volume migrate 2.0.3 - Allow deleting missing snapshots bug #1283233 2.0.4 - Allow volumes created from snapshots to be larger bug #1279478 2.0.5 - Fix extend volume units bug #1284368 2.0.6 - use loopingcall.wait instead of time.sleep 2.0.7 - Allow extend volume based on snapshot bug #1285906 2.0.8 - Fix detach issue for multiple hosts bug #1288927 2.0.9 - Remove unused 3PAR driver method bug #1310807 2.0.10 - Fixed an issue with 3PAR vlun location bug #1315542 2.0.11 - Remove hp3parclient requirement from unit tests #1315195 2.0.12 - Volume detach hangs when host is in a host set bug #1317134 2.0.13 - Added support for managing/unmanaging of volumes 2.0.14 - Modified manage volume to use standard 'source-name' element. 2.0.15 - Added support for volume retype 2.0.16 - Add a better log during delete_volume time. Bug #1349636 2.0.17 - Added iSCSI CHAP support This update now requires 3.1.3 MU1 firmware and hp3parclient 3.1.0 2.0.18 - HP 3PAR manage_existing with volume-type support 2.0.19 - Update default persona from Generic to Generic-ALUA 2.0.20 - Configurable SSH missing key policy and known hosts file 2.0.21 - Remove bogus invalid snapCPG=None exception 2.0.22 - HP 3PAR drivers should not claim to have 'infinite' space 2.0.23 - Increase the hostname size from 23 to 31 Bug #1371242 2.0.24 - Add pools (hp3par_cpg now accepts a list of CPGs) 2.0.25 - Migrate without losing type settings bug #1356608 2.0.26 - Don't ignore extra-specs snap_cpg when missing cpg #1368972 2.0.27 - Fixing manage source-id error bug #1357075 2.0.28 - Removing locks bug #1381190 2.0.29 - Report a limitless cpg's stats better bug #1398651 2.0.30 - Update the minimum hp3parclient version bug #1402115 2.0.31 - Removed usage of host name cache #1398914 2.0.32 - Update LOG usage to fix translations. bug #1384312 2.0.33 - Fix host persona to match WSAPI mapping bug #1403997 2.0.34 - Fix log messages to match guidelines. bug #1411370 2.0.35 - Fix default snapCPG for manage_existing bug #1393609 2.0.36 - Added support for dedup provisioning 2.0.37 - Added support for enabling Flash Cache 2.0.38 - Add stats for hp3par goodness_function and filter_function 2.0.39 - Added support for updated detach_volume attachment. 2.0.40 - Make the 3PAR drivers honor the pool in create bug #1432876 2.0.41 - Only log versions at startup. bug #1447697 2.0.42 - Fix type for snapshot config settings. bug #1461640 2.0.43 - Report the capability of supporting multiattach 2.0.44 - Update help strings to reduce the 3PAR user role requirements 2.0.45 - Python 3 fixes 2.0.46 - Improved VLUN creation and deletion logic. #1469816 2.0.47 - Changed initialize_connection to use getHostVLUNs. #1475064 2.0.48 - Adding changes to support 3PAR iSCSI multipath. 2.0.49 - Added client CPG stats to driver volume stats. bug #1482741 2.0.50 - Add over subscription support 2.0.51 - Adds consistency group support 2.0.52 - Added update_migrated_volume. bug #1492023 2.0.53 - Fix volume size conversion. bug #1513158 3.0.0 - Rebranded HP to HPE. 3.0.1 - Fixed find_existing_vluns bug #1515033 3.0.2 - Python 3 support 3.0.3 - Remove db access for consistency groups 3.0.4 - Adds v2 managed replication support 3.0.5 - Adds v2 unmanaged replication support 3.0.6 - Adding manage/unmanage snapshot support 3.0.7 - Enable standard capabilities based on 3PAR licenses 3.0.8 - Optimize array ID retrieval 3.0.9 - Bump minimum API version for volume replication 3.0.10 - Added additional volumes checks to the manage snapshot API 3.0.11 - Fix the image cache capability bug #1491088 3.0.12 - Remove client version checks for replication 3.0.13 - Support creating a cg from a source cg 3.0.14 - Comparison of WWNs now handles case difference. bug #1546453 3.0.15 - Update replication to version 2.1 3.0.16 - Use same LUN ID for each VLUN path #1551994 3.0.17 - Don't fail on clearing 3PAR object volume key. bug #1546392 3.0.18 - create_cloned_volume account for larger size. bug #1554740 3.0.19 - Remove metadata that tracks the instance ID. bug #1572665 3.0.20 - Fix lun_id of 0 issue. bug #1573298 3.0.21 - Driver no longer fails to initialize if System Reporter license is missing. bug #1568078 3.0.22 - Rework delete_vlun. Bug #1582922 3.0.23 - Fix CG create failures with long display name or special characters. bug #1573647 3.0.24 - Fix terminate connection on failover 3.0.25 - Fix delete volume when online clone is active. bug #1349639 3.0.26 - Fix concurrent snapshot delete conflict. bug #1600104 3.0.27 - Fix snapCPG error during backup of attached volume. Bug #1646396 and also ,Fix backup of attached ISCSI and CHAP enabled volume.bug #1644238. 3.0.28 - Remove un-necessary snapshot creation of source volume while doing online copy in create_cloned_volume call. Bug #1661541 3.0.29 - Fix convert snapshot volume to base volume type. bug #1656186 3.0.30 - Handle manage and unmanage hosts present. bug #1648067 3.0.31 - Enable HPE-3PAR Compression Feature. 3.0.32 - Add consistency group capability to generic volume group in HPE-3APR 3.0.33 - Added replication feature in retype flow. bug #1680313 3.0.34 - Add cloned volume to vvset in online copy. bug #1664464 3.0.35 - Add volume to consistency group if flag enabled. bug #1702317 3.0.36 - Swap volume name in migration. bug #1699733 3.0.37 - Fixed image cache enabled capability. bug #1686985 3.0.38 - Fixed delete operation of replicated volume which is part of QOS. bug #1717875 3.0.39 - Add support for revert to snapshot. 4.0.0 - Code refactor. 4.0.1 - Added check to modify host after volume detach. bug #1730720 4.0.2 - Added Tiramisu feature on 3PAR. 4.0.3 - Fixed create group from source functionality in case of tiramisu. bug #1742092. 4.0.4 - Fixed setting of sync_period value in rcopygroup. bug #1746235 4.0.5 - Fixed volume created and added in cloned group, differs from volume present in the source group in terms of extra-specs. bug #1744025 4.0.6 - Monitor task of promoting a virtual copy. bug #1749642 4.0.7 - Handle force detach case. bug #1686745 4.0.8 - Added support for report backend state in service list. 4.0.9 - Set proper backend on subsequent operation, after group failover. bug #1773069 4.0.10 - Added retry in delete_volume. bug #1783934 4.0.11 - Added extra spec hpe3par:convert_to_base 4.0.12 - Added multiattach support 4.0.13 - Fixed detaching issue for volume with type multiattach enabled. bug #1834660 4.0.14 - Added Peer Persistence feature 4.0.15 - Support duplicated FQDN in network. Bug #1834695 4.0.16 - In multi host env, fix multi-detach operation. Bug #1958122 4.0.17 - Added get_manageable_volumes and get_manageable_snapshots. Bug #1819903 4.0.18 - During conversion of volume to base volume, error out if it has child snapshot(s). Bug #1994521 4.0.19 - Update code to work with new WSAPI (of 2023). Bug #2015746 4.0.20 - Use small QoS Latency value. Bug #2018994 4.0.21 - Fix issue seen during retype/migrate. Bug #2026718 4.0.22 - Fixed clone of replicated volume. Bug #2021941 4.0.23 - Fixed login/logout while accessing wsapi. Bug #2068795 4.0.24 - Fixed retype volume - thin to deco. Bug #2080927 4.0.25 - Update the calculation of free_capacity 4.0.26 - Added comment for cloned volumes. Bug #2062524 4.0.27 - Skip license check for new WSAPI (of 2025). Bug #2119709 """ VERSION = "4.0.27" stats = {} # TODO(Ramy): move these to the 3PAR Client VLUN_TYPE_EMPTY = 1 VLUN_TYPE_PORT = 2 VLUN_TYPE_HOST = 3 VLUN_TYPE_MATCHED_SET = 4 VLUN_TYPE_HOST_SET = 5 THIN = 2 DEDUP = 6 CONVERT_TO_THIN = 1 CONVERT_TO_FULL = 2 CONVERT_TO_DEDUP = 3 # v2 replication constants SYNC = 1 PERIODIC = 2 EXTRA_SPEC_REP_MODE = "replication:mode" EXTRA_SPEC_REP_SYNC_PERIOD = "replication:sync_period" RC_ACTION_CHANGE_TO_PRIMARY = 7 DEFAULT_REP_MODE = 'periodic' DEFAULT_SYNC_PERIOD = 900 RC_GROUP_STARTED = 3 SYNC_STATUS_COMPLETED = 3 FAILBACK_VALUE = 'default' # License values for reported capabilities PRIORITY_OPT_LIC = "Priority Optimization" THIN_PROV_LIC = "Thin Provisioning" REMOTE_COPY_LIC = "Remote Copy" SYSTEM_REPORTER_LIC = "System Reporter" COMPRESSION_LIC = "Compression" # Valid values for volume type extra specs # The first value in the list is the default value valid_prov_values = ['thin', 'full', 'dedup'] valid_persona_values = ['2 - Generic-ALUA', '1 - Generic', '3 - Generic-legacy', '4 - HPUX-legacy', '5 - AIX-legacy', '6 - EGENERA', '7 - ONTAP-legacy', '8 - VMware', '9 - OpenVMS', '10 - HPUX', '11 - WindowsServer'] hpe_qos_keys = ['minIOPS', 'maxIOPS', 'minBWS', 'maxBWS', 'latency', 'priority'] qos_priority_level = {'low': 1, 'normal': 2, 'high': 3} hpe3par_valid_keys = ['cpg', 'snap_cpg', 'provisioning', 'persona', 'vvs', 'flash_cache', 'compression', 'group_replication', 'convert_to_base'] def __init__(self, config, active_backend_id=None): self.config = config self.client = None self.uuid = uuid.uuid4() self._client_conf = {} self._replication_targets = [] self._replication_enabled = False self._active_backend_id = active_backend_id def get_version(self): return self.VERSION @classmethod def get_driver_options(cls): additional_opts = driver.BaseVD._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'reserved_percentage', 'max_over_subscription_ratio', 'replication_device', 'target_port', 'san_ssh_port', 'ssh_conn_timeout', 'san_private_key', 'target_ip_address', 'unique_fqdn_network') return hpe3par_opts + additional_opts def check_flags(self, options, required_flags): for flag in required_flags: if not getattr(options, flag, None): msg = _('%s is not set') % flag LOG.error(msg) raise exception.InvalidInput(reason=msg) def check_replication_flags(self, options, required_flags): for flag in required_flags: if not options.get(flag, None): msg = (_('%s is not set and is required for the replication ' 'device to be valid.') % flag) LOG.error(msg) raise exception.InvalidInput(reason=msg) def _create_client(self, timeout=None): hpe3par_api_url = self._client_conf['hpe3par_api_url'] cl = client.HPE3ParClient(hpe3par_api_url, timeout=timeout) client_version = hpe3parclient.version if client_version < MIN_CLIENT_VERSION: ex_msg = (_('Invalid hpe3parclient version found (%(found)s). ' 'Version %(minimum)s or greater required. Run "pip' ' install --upgrade python-3parclient" to upgrade' ' the hpe3parclient.') % {'found': client_version, 'minimum': MIN_CLIENT_VERSION}) LOG.error(ex_msg) raise exception.InvalidInput(reason=ex_msg) return cl def client_login(self): try: LOG.debug("Connecting to 3PAR") self.client.login(self._client_conf['hpe3par_username'], self._client_conf['hpe3par_password']) except hpeexceptions.HTTPUnauthorized as ex: msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") % {'url': self._client_conf['hpe3par_api_url'], 'err': ex}) LOG.error(msg) raise exception.InvalidInput(reason=msg) def client_logout(self): if self.client is not None: LOG.debug("Disconnect from 3PAR REST and SSH %s", self.uuid) self.client.logout() def _create_replication_client(self, remote_array): try: cl = client.HPE3ParClient(remote_array['hpe3par_api_url']) cl.login(remote_array['hpe3par_username'], remote_array['hpe3par_password']) except hpeexceptions.HTTPUnauthorized as ex: msg = (_("Failed to Login to 3PAR (%(url)s) because %(err)s") % {'url': remote_array['hpe3par_api_url'], 'err': ex}) LOG.error(msg) raise exception.InvalidInput(reason=msg) return cl def _destroy_replication_client(self, client): if client is not None: client.logout() def do_setup(self, context, timeout=None, stats=None, array_id=None): if hpe3parclient is None: msg = _('You must install hpe3parclient before using 3PAR' ' drivers. Run "pip install python-3parclient" to' ' install the hpe3parclient.') raise exception.VolumeBackendAPIException(data=msg) try: # This will set self._client_conf with the proper credentials # to communicate with the 3PAR array. It will contain either # the values for the primary array or secondary array in the # case of a fail-over. self._get_3par_config(array_id=array_id) self.client = self._create_client(timeout=timeout) self.client_login() wsapi_version = self.client.getWsApiVersion() self.API_VERSION = wsapi_version['build'] # If replication is properly configured, the primary array's # API version must meet the minimum requirements. if self._replication_enabled and ( self.API_VERSION < REMOTE_COPY_API_VERSION): self._replication_enabled = False LOG.error("The primary array must have an API version of " "%(min_ver)s or higher, but is only on " "%(current_ver)s, therefore replication is not " "supported.", {'min_ver': REMOTE_COPY_API_VERSION, 'current_ver': self.API_VERSION}) except hpeexceptions.UnsupportedVersion as ex: # In the event we cannot contact the configured primary array, # we want to allow a failover if replication is enabled. self._do_replication_setup(array_id=array_id) if self._replication_enabled: self.client = None raise exception.InvalidInput(ex) finally: self.client_logout() if context: # The context is None except at driver startup. LOG.info("HPE3PARCommon %(common_ver)s," "hpe3parclient %(rest_ver)s", {"common_ver": self.VERSION, "rest_ver": hpe3parclient.get_version_string()}) if self.config.hpe3par_debug: self.client.debug_rest(True) if self.API_VERSION < SRSTATLD_API_VERSION: # Firmware version not compatible with srstatld LOG.warning("srstatld requires " "WSAPI version '%(srstatld_version)s' " "version '%(version)s' is installed.", {'srstatld_version': SRSTATLD_API_VERSION, 'version': self.API_VERSION}) # Get the client ID for provider_location. We only need to retrieve # the ID directly from the array if the driver stats are not provided. if not stats or 'array_id' not in stats: try: self.client_login() info = self.client.getStorageSystemInfo() self.client.id = str(info['id']) except Exception: self.client.id = 0 finally: self.client_logout() else: self.client.id = stats['array_id'] # TODO: This duplicate call is to see SSH logs. Remove it when issue # https://github.com/hpe-storage/python-3parclient/pull/77 is fixed. if self.config.hpe3par_debug: self.client.debug_rest(True) def check_for_setup_error(self): """Verify that requirements are in place to use HPE driver.""" if not all((hpe3parclient, client, hpeexceptions)): msg = _('HPE driver setup error: some required ' 'libraries (hpe3parclient, client.*) not found.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) if self.client: self.client_login() try: cpg_names = self._client_conf['hpe3par_cpg'] for cpg_name in cpg_names: self.validate_cpg(cpg_name) finally: self.client_logout() def validate_cpg(self, cpg_name): try: self.client.getCPG(cpg_name) except hpeexceptions.HTTPNotFound: err = (_("CPG (%s) doesn't exist on array") % cpg_name) LOG.error(err) raise exception.InvalidInput(reason=err) def get_domain(self, cpg_name): try: cpg = self.client.getCPG(cpg_name) except hpeexceptions.HTTPNotFound: err = (_("Failed to get domain because CPG (%s) doesn't " "exist on array.") % cpg_name) LOG.error(err) raise exception.InvalidInput(reason=err) if 'domain' in cpg: return cpg['domain'] return None def extend_volume(self, volume, new_size): volume_name = self._get_3par_vol_name(volume) old_size = volume['size'] growth_size = int(new_size) - old_size LOG.debug("Extending Volume %(vol)s from %(old)s to %(new)s, " " by %(diff)s GB.", {'vol': volume_name, 'old': old_size, 'new': new_size, 'diff': growth_size}) growth_size_mib = growth_size * units.Ki self._extend_volume(volume, volume_name, growth_size_mib) def create_group(self, context, group): """Creates a group.""" if (not volume_utils.is_group_a_cg_snapshot_type(group) and not group.is_replicated): raise NotImplementedError() model_update = {'status': fields.GroupStatus.AVAILABLE} if group.volume_type_ids is not None: for volume_type in group.volume_types: allow_type = self.is_volume_group_snap_type( volume_type) if not allow_type: msg = _('For a volume type to be a part of consistent ' 'group, volume type extra spec must have ' 'consistent_group_snapshot_enabled=" True"') LOG.error(msg) raise exception.InvalidInput(reason=msg) pool = volume_utils.extract_host(group.host, level='pool') domain = self.get_domain(pool) cg_name = self._get_3par_vvs_name(group.id) extra = {'group_id': group.id} if group.group_snapshot_id is not None: extra['group_snapshot_id'] = group.group_snapshot_id if group.is_replicated: LOG.debug("Group: %(group)s is a replication group.", {'group': group.id}) # Check replication configuration on each volume type self._check_replication_configuration_on_volume_types( group.volume_types) # Check hpe3par:group_replication flag in each volume type. self._check_tiramisu_configuration_on_volume_types( group.volume_types) # Attributes of Remote must be same on each volume type self._check_attributes_of_remote_per_volume_type(group) # Create remote copy group self._create_remote_copy_group_for_group(group) # Start Remote copy self._start_remote_copy_group(group) model_update.update({ 'replication_status': fields.ReplicationStatus.ENABLED}) self.client.createVolumeSet(cg_name, domain=domain, comment=str(extra)) return model_update def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): self.create_group(context, group) volumes_model_update = [] task_id_list = [] volumes_cpg_map = [] snap_vol_dict = {} replication_flag = False model_update = {'status': fields.GroupStatus.AVAILABLE} vvs_name = self._get_3par_vvs_name(group.id) if group_snapshot and snapshots: cgsnap_name = self._get_3par_snap_name(group_snapshot.id) snap_base = cgsnap_name elif source_group and source_vols: cg_id = source_group.id # Create a brand new uuid for the temp snap. snap_uuid = uuid.uuid4().hex # Create a temporary snapshot of the volume set in order to # perform an online copy. These temp snapshots will be deleted # when the source consistency group is deleted. temp_snap = self._get_3par_snap_name(snap_uuid, temp_snap=True) snap_shot_name = temp_snap + "-@count@" copy_of_name = self._get_3par_vvs_name(cg_id) optional = {'expirationHours': 1} self.client.createSnapshotOfVolumeSet(snap_shot_name, copy_of_name, optional=optional) snap_base = temp_snap if group.is_replicated: replication_flag = True # Stop remote copy, so we can add volumes in RCG. self._stop_remote_copy_group(group) for i in range(0, len(volumes)): # In case of group created from group,we are mapping # source volume with it's snapshot snap_name = snap_base + "-" + str(i) snap_detail = self.client.getVolume(snap_name) vol_name = snap_detail.get('copyOf') src_vol_name = vol_name # In case of group created from group snapshots,we are mapping # source volume with it's snapshot if source_group is None: for snapshot in snapshots: # Getting vol_name from snapshot, in case of group created # from group snapshot. # Don't use the "volume_id" from the snapshot directly in # case the volume has been migrated and uses a different ID # in the backend. This may trigger OVO lazy loading. Use # dict compatibility to avoid changing all the unit tests. vol_name = self._get_3par_vol_name(snapshot['volume']) if src_vol_name == vol_name: vol_name = ( self._get_3par_vol_name(snapshot.get('id'))) break LOG.debug("Source volume name: %(vol)s of snapshot: %(snap)s", {'vol': src_vol_name, 'snap': snap_name}) snap_vol_dict[vol_name] = snap_name for volume in volumes: src_vol_name = volume.get('source_volid') if src_vol_name is None: src_vol_name = volume.get('snapshot_id') # Finding source volume from volume and then use snap_vol_dict # to get right snap name from source volume. vol_name = self._get_3par_vol_name(src_vol_name) snap_name = snap_vol_dict.get(vol_name) volume_name = self._get_3par_vol_name(volume) type_info = self.get_volume_settings_from_type(volume) cpg = type_info['cpg'] snapcpg = type_info['snap_cpg'] tpvv = type_info.get('tpvv', False) tdvv = type_info.get('tdvv', False) volumes_cpg_map.append((volume, volume_name, cpg)) compression = self.get_compression_policy( type_info['hpe3par_keys']) optional = {'online': True, 'tpvv': tpvv, 'tdvv': tdvv} if self.API_VERSION < API_VERSION_2023: optional['snapCPG'] = snapcpg if compression is not None: optional['compression'] = compression body = self.client.copyVolume(snap_name, volume_name, cpg, optional) task_id = body['taskid'] task_id_list.append((task_id, volume.get('id'))) # Only in case of replication, we are waiting for tasks to complete. if group.is_replicated: for task_id, vol_id in task_id_list: task_status = self._wait_for_task_completion(task_id) if task_status['status'] is not self.client.TASK_DONE: dbg = {'status': task_status, 'id': vol_id} msg = _('Copy volume task failed: ' 'create_group_from_src_group ' 'id=%(id)s, status=%(status)s.') % dbg LOG.error(msg) raise exception.CinderException(msg) else: LOG.debug('Online copy volume completed: ' 'create_group_from_src_group: id=%s.', vol_id) for volume, volume_name, cpg in volumes_cpg_map: if group.is_replicated: # Add volume to remote copy group self._add_vol_to_remote_copy_group(group, volume) self.client.addVolumeToVolumeSet(vvs_name, volume_name) volume_model_update = self._get_model_update( volume.get('host'), cpg, replication=replication_flag, provider_location=self.client.id) if volume_model_update is not None: volume_model_update.update({'id': volume.get('id')}) # Update volumes_model_update volumes_model_update.append(volume_model_update) if group.is_replicated: # Start remote copy. self._start_remote_copy_group(group) model_update.update({ 'replication_status': fields.ReplicationStatus.ENABLED}) return model_update, volumes_model_update def delete_group(self, context, group, volumes): """Deletes a group.""" if (not volume_utils.is_group_a_cg_snapshot_type(group) and not group.is_replicated): raise NotImplementedError() if group.is_replicated: self._remove_volumes_and_remote_copy_group(group, volumes) try: cg_name = self._get_3par_vvs_name(group.id) self.client.deleteVolumeSet(cg_name) except hpeexceptions.HTTPNotFound: LOG.warning("Virtual Volume Set '%s' doesn't exist on array.", cg_name) except hpeexceptions.HTTPConflict as e: LOG.error("Conflict detected in Virtual Volume Set" " %(volume_set)s: %(error)s", {"volume_set": cg_name, "error": e}) volume_model_updates = [] for volume in volumes: volume_update = {'id': volume.get('id')} try: self.delete_volume(volume) volume_update['status'] = 'deleted' except Exception as ex: LOG.error("There was an error deleting volume %(id)s: " "%(error)s.", {'id': volume.id, 'error': ex}) volume_update['status'] = 'error' volume_model_updates.append(volume_update) model_update = {'status': group.status} return model_update, volume_model_updates def update_group(self, context, group, add_volumes=None, remove_volumes=None): grp_snap_enable = volume_utils.is_group_a_cg_snapshot_type(group) if not grp_snap_enable and not group.is_replicated: raise NotImplementedError() add_volume = [] remove_volume = [] vol_rep_status = fields.ReplicationStatus.ENABLED volume_set_name = self._get_3par_vvs_name(group.id) # If replication is enabled on a group then we need # to stop RCG, so we can add/remove in/from RCG. if group.is_replicated: # Check replication status on a group. self._check_rep_status_enabled_on_group(group) # Stop remote copy. self._stop_remote_copy_group(group) # TODO(kushal) : we will use volume as object when we re-write # the design for unit tests to use objects instead of dicts. for volume in add_volumes: volume_name = self._get_3par_vol_name(volume) vol_snap_enable = self.is_volume_group_snap_type( volume.get('volume_type')) try: if vol_snap_enable: self._check_replication_matched(volume, group) if group.is_replicated: # Add volume to remote copy group self._add_vol_to_remote_copy_group(group, volume) # We have introduced one flag hpe3par:group_replication # in extra_spec of volume_type,which denotes group # level replication on 3par,so when a volume from this # type is added into group we need to set # replication_status on a volume. update = {'id': volume.get('id'), 'replication_status': vol_rep_status} add_volume.append(update) self.client.addVolumeToVolumeSet(volume_set_name, volume_name) else: msg = (_('Volume with volume id %s is not ' 'supported as extra specs of this ' 'volume does not have ' 'consistent_group_snapshot_enabled=" True"' ) % volume['id']) LOG.error(msg) raise exception.InvalidInput(reason=msg) except hpeexceptions.HTTPNotFound: msg = (_('Virtual Volume Set %s does not exist.') % volume_set_name) LOG.error(msg) raise exception.InvalidInput(reason=msg) for volume in remove_volumes: volume_name = self._get_3par_vol_name(volume) if group.is_replicated: # Remove a volume from remote copy group self._remove_vol_from_remote_copy_group( group, volume) update = {'id': volume.get('id'), 'replication_status': None} remove_volume.append(update) try: self.client.removeVolumeFromVolumeSet( volume_set_name, volume_name) except hpeexceptions.HTTPNotFound: msg = (_('Virtual Volume Set %s does not exist.') % volume_set_name) LOG.error(msg) raise exception.InvalidInput(reason=msg) if group.is_replicated: # Start remote copy. self._start_remote_copy_group(group) return None, add_volume, remove_volume def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group snapshot.""" if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() cg_id = group_snapshot.group_id snap_shot_name = self._get_3par_snap_name(group_snapshot.id) + ( "-@count@") copy_of_name = self._get_3par_vvs_name(cg_id) extra = {'group_snapshot_id': group_snapshot.id} extra['group_id'] = cg_id extra['description'] = group_snapshot.description optional = {'comment': json.dumps(extra), 'readOnly': False} if self.config.hpe3par_snapshot_expiration: optional['expirationHours'] = ( int(self.config.hpe3par_snapshot_expiration)) if self.config.hpe3par_snapshot_retention: optional['retentionHours'] = ( int(self.config.hpe3par_snapshot_retention)) try: self.client.createSnapshotOfVolumeSet(snap_shot_name, copy_of_name, optional=optional) except Exception as ex: msg = (_('There was an error creating the cgsnapshot: %s'), str(ex)) LOG.error(msg) raise exception.InvalidInput(reason=msg) snapshot_model_updates = [] for snapshot in snapshots: snapshot_update = {'id': snapshot['id'], 'status': fields.SnapshotStatus.AVAILABLE} snapshot_model_updates.append(snapshot_update) model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} return model_update, snapshot_model_updates def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group snapshot.""" if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() cgsnap_name = self._get_3par_snap_name(group_snapshot.id) snapshot_model_updates = [] for i, snapshot in enumerate(snapshots): snapshot_update = {'id': snapshot['id']} try: snap_name = cgsnap_name + "-" + str(i) self.client.deleteVolume(snap_name) snapshot_update['status'] = fields.SnapshotStatus.DELETED except hpeexceptions.HTTPNotFound as ex: # We'll let this act as if it worked # it helps clean up the cinder entries. LOG.warning("Delete Snapshot id not found. Removing from " "cinder: %(id)s Ex: %(msg)s", {'id': snapshot['id'], 'msg': ex}) snapshot_update['status'] = fields.SnapshotStatus.ERROR except Exception as ex: LOG.error("There was an error deleting snapshot %(id)s: " "%(error)s.", {'id': snapshot['id'], 'error': str(ex)}) snapshot_update['status'] = fields.SnapshotStatus.ERROR snapshot_model_updates.append(snapshot_update) model_update = {'status': fields.GroupSnapshotStatus.DELETED} return model_update, snapshot_model_updates def manage_existing(self, volume, existing_ref): """Manage an existing 3PAR volume. existing_ref is a dictionary of the form: {'source-name': } """ target_vol_name = self._get_existing_volume_ref_name(existing_ref) # Check for the existence of the virtual volume. old_comment_str = "" try: vol = self.client.getVolume(target_vol_name) if 'comment' in vol: old_comment_str = vol['comment'] except hpeexceptions.HTTPNotFound: err = (_("Virtual volume '%s' doesn't exist on array.") % target_vol_name) LOG.error(err) raise exception.InvalidInput(reason=err) new_comment = {} # Use the display name from the existing volume if no new name # was chosen by the user. if volume['display_name']: display_name = volume['display_name'] new_comment['display_name'] = volume['display_name'] elif 'comment' in vol: display_name = self._get_3par_vol_comment_value(vol['comment'], 'display_name') if display_name: new_comment['display_name'] = display_name else: display_name = None # Generate the new volume information based on the new ID. new_vol_name = self._get_3par_vol_name(volume) # No need to worry about "_name_id" because this is a newly created # volume that cannot have been migrated. name = 'volume-' + volume['id'] new_comment['volume_id'] = volume['id'] new_comment['name'] = name new_comment['type'] = 'OpenStack' self._add_name_id_to_comment(new_comment, volume) volume_type = None if volume['volume_type_id']: try: volume_type = self._get_volume_type(volume['volume_type_id']) except Exception: reason = (_("Volume type ID '%s' is invalid.") % volume['volume_type_id']) raise exception.ManageExistingVolumeTypeMismatch(reason=reason) new_vals = {'newName': new_vol_name, 'comment': json.dumps(new_comment)} # Ensure that snapCPG is set if 'snapCPG' not in vol and self.API_VERSION < API_VERSION_2023: new_vals['snapCPG'] = vol['userCPG'] LOG.info("Virtual volume %(disp)s '%(new)s' snapCPG " "is empty so it will be set to: %(cpg)s", {'disp': display_name, 'new': new_vol_name, 'cpg': new_vals['snapCPG']}) # Update the existing volume with the new name and comments. self.client.modifyVolume(target_vol_name, new_vals) LOG.info("Virtual volume '%(ref)s' renamed to '%(new)s'.", {'ref': existing_ref['source-name'], 'new': new_vol_name}) retyped = False model_update = None if volume_type: LOG.info("Virtual volume %(disp)s '%(new)s' is being retyped.", {'disp': display_name, 'new': new_vol_name}) try: retyped, model_update = self._retype_from_no_type(volume, volume_type) LOG.info("Virtual volume %(disp)s successfully retyped to " "%(new_type)s.", {'disp': display_name, 'new_type': volume_type.get('name')}) except Exception: with excutils.save_and_reraise_exception(): LOG.warning("Failed to manage virtual volume %(disp)s " "due to error during retype.", {'disp': display_name}) # Try to undo the rename and clear the new comment. self.client.modifyVolume( new_vol_name, {'newName': target_vol_name, 'comment': old_comment_str}) updates = {'display_name': display_name} if retyped and model_update: updates.update(model_update) LOG.info("Virtual volume %(disp)s '%(new)s' is now being managed.", {'disp': display_name, 'new': new_vol_name}) # Return display name to update the name displayed in the GUI and # any model updates from retype. return updates def manage_existing_snapshot(self, snapshot, existing_ref): """Manage an existing 3PAR snapshot. existing_ref is a dictionary of the form: {'source-name': } """ # Potential parent volume for the snapshot volume = snapshot['volume'] # Do not allow for managing of snapshots for 'failed-over' volumes. if volume.get('replication_status') == 'failed-over': err = (_("Managing of snapshots to failed-over volumes is " "not allowed.")) raise exception.InvalidInput(reason=err) target_snap_name = self._get_existing_volume_ref_name(existing_ref, is_snapshot=True) # Check for the existence of the snapshot. try: snap = self.client.getVolume(target_snap_name) except hpeexceptions.HTTPNotFound: err = (_("Snapshot '%s' doesn't exist on array.") % target_snap_name) LOG.error(err) raise exception.InvalidInput(reason=err) # Make sure the snapshot is being associated with the correct volume. parent_vol_name = self._get_3par_vol_name(volume) if parent_vol_name != snap['copyOf']: err = (_("The provided snapshot '%s' is not a snapshot of " "the provided volume.") % target_snap_name) LOG.error(err) raise exception.InvalidInput(reason=err) new_comment = {} # Use the display name from the existing snapshot if no new name # was chosen by the user. if snapshot['display_name']: display_name = snapshot['display_name'] new_comment['display_name'] = snapshot['display_name'] elif 'comment' in snap: display_name = self._get_3par_vol_comment_value(snap['comment'], 'display_name') if display_name: new_comment['display_name'] = display_name else: display_name = None # Generate the new snapshot information based on the new ID. new_snap_name = self._get_3par_snap_name(snapshot['id']) new_comment['volume_id'] = volume['id'] new_comment['volume_name'] = 'volume-' + volume['id'] self._add_name_id_to_comment(new_comment, volume) if snapshot.get('display_description', None): new_comment['description'] = snapshot['display_description'] else: new_comment['description'] = "" new_vals = {'newName': new_snap_name, 'comment': json.dumps(new_comment)} # Update the existing snapshot with the new name and comments. self.client.modifyVolume(target_snap_name, new_vals) LOG.info("Snapshot '%(ref)s' renamed to '%(new)s'.", {'ref': existing_ref['source-name'], 'new': new_snap_name}) updates = {'display_name': display_name} LOG.info("Snapshot %(disp)s '%(new)s' is now being managed.", {'disp': display_name, 'new': new_snap_name}) # Return display name to update the name displayed in the GUI. return updates def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. existing_ref is a dictionary of the form: {'source-name': } """ target_vol_name = self._get_existing_volume_ref_name(existing_ref) # Make sure the reference is not in use. if re.match('osv-*|oss-*|vvs-*', target_vol_name): reason = _("Reference must be for an unmanaged virtual volume.") raise exception.ManageExistingInvalidReference( existing_ref=target_vol_name, reason=reason) # Check for the existence of the virtual volume. try: vol = self.client.getVolume(target_vol_name) except hpeexceptions.HTTPNotFound: err = (_("Virtual volume '%s' doesn't exist on array.") % target_vol_name) LOG.error(err) raise exception.InvalidInput(reason=err) return int(math.ceil(float(vol['sizeMiB']) / units.Ki)) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing_snapshot. existing_ref is a dictionary of the form: {'source-name': } """ target_snap_name = self._get_existing_volume_ref_name(existing_ref, is_snapshot=True) # Make sure the reference is not in use. if re.match('osv-*|oss-*|vvs-*|unm-*', target_snap_name): reason = _("Reference must be for an unmanaged snapshot.") raise exception.ManageExistingInvalidReference( existing_ref=target_snap_name, reason=reason) # Check for the existence of the snapshot. try: snap = self.client.getVolume(target_snap_name) except hpeexceptions.HTTPNotFound: err = (_("Snapshot '%s' doesn't exist on array.") % target_snap_name) LOG.error(err) raise exception.InvalidInput(reason=err) return int(math.ceil(float(snap['sizeMiB']) / units.Ki)) def unmanage(self, volume): """Removes the specified volume from Cinder management.""" # Rename the volume's name to unm-* format so that it can be # easily found later. vol_name = self._get_3par_vol_name(volume) # Rename using the user visible ID ignoring the internal "_name_id" # that may have been generated during a retype. This makes it easier # to locate volumes in the backend. new_vol_name = self._get_3par_unm_name(volume['id']) self.client.modifyVolume(vol_name, {'newName': new_vol_name}) LOG.info("Virtual volume %(disp)s '%(vol)s' is no longer managed. " "Volume renamed to '%(new)s'.", {'disp': volume['display_name'], 'vol': vol_name, 'new': new_vol_name}) def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management.""" # Parent volume for the snapshot volume = snapshot['volume'] # Do not allow unmanaging of snapshots from 'failed-over' volumes. if volume.get('replication_status') == 'failed-over': err = (_("Unmanaging of snapshots from failed-over volumes is " "not allowed.")) LOG.error(err) # TODO(leeantho) Change this exception to Invalid when the volume # manager supports handling that. raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) # Rename the snapshots's name to ums-* format so that it can be # easily found later. snap_name = self._get_3par_snap_name(snapshot['id']) new_snap_name = self._get_3par_ums_name(snapshot['id']) self.client.modifyVolume(snap_name, {'newName': new_snap_name}) LOG.info("Snapshot %(disp)s '%(vol)s' is no longer managed. " "Snapshot renamed to '%(new)s'.", {'disp': snapshot['display_name'], 'vol': snap_name, 'new': new_snap_name}) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): already_managed = {} for vol_obj in cinder_volumes: cinder_id = vol_obj.id volume_name = self._get_3par_vol_name(cinder_id) already_managed[volume_name] = cinder_id cinder_cpg = self._client_conf['hpe3par_cpg'][0] manageable_vols = [] body = self.client.getVolumes() all_volumes = body['members'] for vol in all_volumes: cpg = vol.get('userCPG') if cpg == cinder_cpg: size_gb = int(vol['sizeMiB'] / 1024) vol_name = vol['name'] if vol_name in already_managed: is_safe = False reason_not_safe = _('Volume already managed') cinder_id = already_managed[vol_name] else: is_safe = False hostname = None cinder_id = None # Check if the unmanaged volume is attached to any host try: vlun = self.client.getVLUN(vol_name) hostname = vlun['hostname'] except hpe3parclient.exceptions.HTTPNotFound: # not attached to any host is_safe = True if is_safe: reason_not_safe = None else: reason_not_safe = _('Volume attached to host ' + hostname) manageable_vols.append({ 'reference': {'name': vol_name}, 'size': size_gb, 'safe_to_manage': is_safe, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, }) return volume_utils.paginate_entries_list( manageable_vols, marker, limit, offset, sort_keys, sort_dirs) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): already_managed = {} for snap_obj in cinder_snapshots: cinder_snap_id = snap_obj.id snap_name = self._get_3par_snap_name(cinder_snap_id) already_managed[snap_name] = cinder_snap_id cinder_cpg = self._client_conf['hpe3par_cpg'][0] cpg_volumes = [] body = self.client.getVolumes() all_volumes = body['members'] for vol in all_volumes: cpg = vol.get('userCPG') if cpg == cinder_cpg: cpg_volumes.append(vol) manageable_snaps = [] for vol in cpg_volumes: size_gb = int(vol['sizeMiB'] / 1024) snapshots = self.client.getSnapshotsOfVolume(cinder_cpg, vol['name']) for snap_name in snapshots: if snap_name in already_managed: is_safe = False reason_not_safe = _('Snapshot already managed') cinder_snap_id = already_managed[snap_name] else: is_safe = True reason_not_safe = None cinder_snap_id = None manageable_snaps.append({ 'reference': {'name': snap_name}, 'size': size_gb, 'safe_to_manage': is_safe, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_snap_id, 'source_reference': {'name': vol['name']}, }) return volume_utils.paginate_entries_list( manageable_snaps, marker, limit, offset, sort_keys, sort_dirs) def _get_existing_volume_ref_name(self, existing_ref, is_snapshot=False): """Returns the volume name of an existing reference. Checks if an existing volume reference has a source-name or source-id element. If source-name or source-id is not present an error will be thrown. """ vol_name = None if 'source-name' in existing_ref: vol_name = existing_ref['source-name'] elif 'source-id' in existing_ref: if is_snapshot: vol_name = self._get_3par_ums_name(existing_ref['source-id']) else: vol_name = self._get_3par_unm_name(existing_ref['source-id']) else: reason = _("Reference must contain source-name or source-id.") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return vol_name def _extend_volume(self, volume, volume_name, growth_size_mib, _convert_to_base=False): model_update = None rcg_name = self._get_3par_rcg_name(volume) is_volume_replicated = self._volume_of_replicated_type( volume, hpe_tiramisu_check=True) volume_part_of_group = ( self._volume_of_hpe_tiramisu_type_and_part_of_group(volume)) if volume_part_of_group: group = volume.get('group') rcg_name = self._get_3par_rcg_name_of_group(group.id) try: if _convert_to_base: LOG.debug("Converting to base volume prior to growing.") model_update = self._convert_to_base_volume(volume) # If the volume is replicated and we are not failed over, # remote copy has to be stopped before the volume can be extended. failed_over = volume.get("replication_status", None) is_failed_over = failed_over == "failed-over" if ((is_volume_replicated or volume_part_of_group) and not is_failed_over): self.client.stopRemoteCopy(rcg_name) self.client.growVolume(volume_name, growth_size_mib) if ((is_volume_replicated or volume_part_of_group) and not is_failed_over): self.client.startRemoteCopy(rcg_name) except Exception as ex: # If the extend fails, we must restart remote copy. if is_volume_replicated or volume_part_of_group: self.client.startRemoteCopy(rcg_name) with excutils.save_and_reraise_exception() as ex_ctxt: if (not _convert_to_base and isinstance(ex, hpeexceptions.HTTPForbidden) and ex.get_code() == 150): # Error code 150 means 'invalid operation: Cannot grow # this type of volume'. # Suppress raising this exception because we can # resolve it by converting it into a base volume. # Afterwards, extending the volume should succeed, or # fail with a different exception/error code. ex_ctxt.reraise = False model_update = self._extend_volume( volume, volume_name, growth_size_mib, _convert_to_base=True) else: LOG.error("Error extending volume: %(vol)s. " "Exception: %(ex)s", {'vol': volume_name, 'ex': ex}) return model_update @classmethod def _get_3par_vol_name(cls, volume_id, temp_vol=False): """Get converted 3PAR volume name. Converts the openstack volume id from ecffc30f-98cb-4cf5-85ee-d7309cc17cd2 to osv-7P.DD5jLTPWF7tcwnMF80g We convert the 128 bits of the uuid into a 24character long base64 encoded string to ensure we don't exceed the maximum allowed 31 character name limit on 3Par We strip the padding '=' and replace + with . and / with - volume_id is a polymorphic parameter and can be either a string or a volume (OVO or dict representation). """ # Accept OVOs (what we should only receive), dict (so we don't have to # change all our unit tests), and ORM (because we some methods still # pass it, such as terminate_connection). if isinstance(volume_id, (objects.Volume, objects.Volume.model, dict)): volume_id = volume_id.get('_name_id') or volume_id['id'] volume_name = cls._encode_name(volume_id) if temp_vol: # is this a temporary volume # this is done during migration prefix = "tsv-%s" else: prefix = "osv-%s" return prefix % volume_name def _get_3par_snap_name(self, snapshot_id, temp_snap=False): snapshot_name = self._encode_name(snapshot_id) if temp_snap: # is this a temporary snapshot # this is done during cloning prefix = "tss-%s" else: prefix = "oss-%s" return prefix % snapshot_name def _get_3par_ums_name(self, snapshot_id): ums_name = self._encode_name(snapshot_id) return "ums-%s" % ums_name def _get_3par_vvs_name(self, volume_id): vvs_name = self._encode_name(volume_id) return "vvs-%s" % vvs_name def _get_3par_unm_name(self, volume_id): unm_name = self._encode_name(volume_id) return "unm-%s" % unm_name # v2 replication conversion def _get_3par_rcg_name(self, volume): # if non-replicated volume is retyped or migrated to replicated vol, # then rcg_name is different. Try to get that new rcg_name. if volume['migration_status'] == 'success': vol_name = self._get_3par_vol_name(volume) vol_details = self.client.getVolume(vol_name) rcg_name = vol_details.get('rcopyGroup') LOG.debug("new rcg_name: %(name)s", {'name': rcg_name}) return rcg_name else: # by default, rcg_name is similar to volume name rcg_name = self._encode_name(volume.get('_name_id') or volume['id']) rcg = "rcg-%s" % rcg_name return rcg[:22] def _get_3par_remote_rcg_name(self, volume, provider_location): return self._get_3par_rcg_name(volume) + ".r" + ( str(provider_location)) @staticmethod def _encode_name(name): uuid_str = name.replace("-", "") vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) vol_encoded = base64.encode_as_text(vol_uuid.bytes) # 3par doesn't allow +, nor / vol_encoded = vol_encoded.replace('+', '.') vol_encoded = vol_encoded.replace('/', '-') # strip off the == as 3par doesn't like those. vol_encoded = vol_encoded.replace('=', '') return vol_encoded def _capacity_from_size(self, vol_size): # because 3PAR volume sizes are in Mebibytes. if int(vol_size) == 0: capacity = units.Gi # default: 1GiB else: capacity = vol_size * units.Gi capacity = int(math.ceil(capacity / units.Mi)) return capacity def _delete_3par_host(self, hostname, client_obj): client_obj.deleteHost(hostname) def _get_prioritized_host_on_3par(self, host, hosts, hostname): # Check whether host with wwn/iqn of initiator present on 3par if hosts and hosts['members'] and 'name' in hosts['members'][0]: # Retrieving 'host' and 'hosts' from 3par using hostname # and wwn/iqn respectively. Compare hostname of 'host' and 'hosts', # if they do not match it means 3par has a pre-existing host # with some other name. if host['name'] != hosts['members'][0]['name']: hostname = hosts['members'][0]['name'] LOG.info(("Prioritize the host retrieved from wwn/iqn " "Hostname : %(hosts)s is used instead " "of Hostname: %(host)s"), {'hosts': hostname, 'host': host['name']}) host = self._get_3par_host(hostname) return host, hostname return host, hostname def _create_3par_vlun(self, volume, hostname, nsp, lun_id=None, remote_client=None): try: location = None auto = True if lun_id is not None: auto = False if remote_client: client_obj = remote_client else: client_obj = self.client if nsp is None: location = client_obj.createVLUN(volume, hostname=hostname, auto=auto, lun=lun_id) else: port = self.build_portPos(nsp) location = client_obj.createVLUN(volume, hostname=hostname, auto=auto, portPos=port, lun=lun_id) vlun_info = None if location: # The LUN id is returned as part of the location URI vlun = location.split(',') vlun_info = {'volume_name': vlun[0], 'lun_id': int(vlun[1]), 'host_name': vlun[2], } if len(vlun) > 3: vlun_info['nsp'] = vlun[3] return vlun_info except hpeexceptions.HTTPBadRequest as e: if 'must be in the same domain' in e.get_description(): LOG.error(e.get_description()) raise Invalid3PARDomain(err=e.get_description()) else: raise exception.VolumeBackendAPIException( data=e.get_description()) def _safe_hostname(self, connector, configuration): """We have to use a safe hostname length for 3PAR host names.""" hostname = connector['host'] unique_fqdn_network = configuration.unique_fqdn_network if not unique_fqdn_network and connector.get('initiator'): iqn = connector.get('initiator') iqn = iqn.replace(":", "-") return iqn[::-1][:31] else: try: index = hostname.index('.') except ValueError: # couldn't find it index = len(hostname) # we'll just chop this off for now. if index > 31: index = 31 return hostname[:index] def _get_3par_host(self, hostname): return self.client.getHost(hostname) def get_ports(self): return self.client.getPorts() def get_active_target_ports(self, remote_client=None): if remote_client: client_obj = remote_client ports = remote_client.getPorts() else: client_obj = self.client ports = self.get_ports() target_ports = [] for port in ports['members']: if ( port['mode'] == client_obj.PORT_MODE_TARGET and port['linkState'] == client_obj.PORT_STATE_READY ): port['nsp'] = self.build_nsp(port['portPos']) target_ports.append(port) return target_ports def get_active_fc_target_ports(self, remote_client=None): ports = self.get_active_target_ports(remote_client) if remote_client: client_obj = remote_client else: client_obj = self.client fc_ports = [] for port in ports: if port['protocol'] == client_obj.PORT_PROTO_FC: fc_ports.append(port) return fc_ports def get_active_iscsi_target_ports(self, remote_client=None): ports = self.get_active_target_ports(remote_client) if remote_client: client_obj = remote_client else: client_obj = self.client iscsi_ports = [] for port in ports: if port['protocol'] == client_obj.PORT_PROTO_ISCSI: iscsi_ports.append(port) return iscsi_ports def get_volume_stats(self, refresh, filter_function=None, goodness_function=None): if refresh: self._update_volume_stats( filter_function=filter_function, goodness_function=goodness_function) return self.stats def _update_volume_stats(self, filter_function=None, goodness_function=None): # const to convert MiB to GB const = 0.0009765625 # storage_protocol and volume_backend_name are # set in the child classes pools = [] try: info = self.client.getStorageSystemInfo() backend_state = 'up' except Exception as ex: info = {} backend_state = 'down' LOG.warning("Exception at getStorageSystemInfo() " "Reason: '%(reason)s'", {'reason': ex}) qos_support = True thin_support = True remotecopy_support = True sr_support = True compression_support = False if 'licenseInfo' in info: if 'licenses' in info['licenseInfo']: valid_licenses = info['licenseInfo']['licenses'] qos_support = self._check_license_enabled( valid_licenses, self.PRIORITY_OPT_LIC, "QoS_support") thin_support = self._check_license_enabled( valid_licenses, self.THIN_PROV_LIC, "Thin_provisioning_support") remotecopy_support = self._check_license_enabled( valid_licenses, self.REMOTE_COPY_LIC, "Replication") sr_support = self._check_license_enabled( valid_licenses, self.SYSTEM_REPORTER_LIC, "System_reporter_support") compression_support = self._check_license_enabled( valid_licenses, self.COMPRESSION_LIC, "Compression") for cpg_name in self._client_conf['hpe3par_cpg']: try: stat_capabilities = { THROUGHPUT: None, BANDWIDTH: None, LATENCY: None, IO_SIZE: None, QUEUE_LENGTH: None, AVG_BUSY_PERC: None } cpg = self.client.getCPG(cpg_name) if (self.API_VERSION >= SRSTATLD_API_VERSION and sr_support): interval = 'daily' history = '7d' try: stat_capabilities = self.client.getCPGStatData( cpg_name, interval, history) except Exception as ex: LOG.warning("Exception at getCPGStatData() " "for cpg: '%(cpg_name)s' " "Reason: '%(reason)s'", {'cpg_name': cpg_name, 'reason': ex}) if 'numTDVVs' in cpg: total_volumes = int( cpg['numFPVVs'] + cpg['numTPVVs'] + cpg['numTDVVs'] ) else: total_volumes = int( cpg['numFPVVs'] + cpg['numTPVVs'] ) if 'limitMiB' not in cpg['SDGrowth']: # cpg usable free space cpg_avail_space = ( self.client.getCPGAvailableSpace(cpg_name)) # total_capacity is the best we can do for a limitless cpg total_capacity = int( (cpg['SDUsage']['usedMiB'] + cpg['UsrUsage']['usedMiB'] + cpg_avail_space['usableFreeMiB']) * const) else: total_capacity = int(cpg['SDGrowth']['limitMiB'] * const) provisioned_capacity = int((cpg['UsrUsage']['totalMiB'] + cpg['SAUsage']['totalMiB'] + cpg['SDUsage']['totalMiB']) * const) free_capacity = total_capacity - provisioned_capacity capacity_utilization = ( (float(total_capacity - free_capacity) / float(total_capacity)) * 100) except hpeexceptions.HTTPNotFound: err = (_("CPG (%s) doesn't exist on array") % cpg_name) LOG.error(err) raise exception.InvalidInput(reason=err) pool = {'pool_name': cpg_name, 'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'provisioned_capacity_gb': provisioned_capacity, 'QoS_support': qos_support, 'thin_provisioning_support': thin_support, 'thick_provisioning_support': True, 'max_over_subscription_ratio': ( self.config.safe_get('max_over_subscription_ratio')), 'reserved_percentage': ( self.config.safe_get('reserved_percentage')), 'location_info': ('HPE3PARDriver:%(sys_id)s:%(dest_cpg)s' % {'sys_id': info.get('serialNumber'), 'dest_cpg': cpg_name}), 'total_volumes': total_volumes, 'capacity_utilization': capacity_utilization, THROUGHPUT: stat_capabilities[THROUGHPUT], BANDWIDTH: stat_capabilities[BANDWIDTH], LATENCY: stat_capabilities[LATENCY], IO_SIZE: stat_capabilities[IO_SIZE], QUEUE_LENGTH: stat_capabilities[QUEUE_LENGTH], AVG_BUSY_PERC: stat_capabilities[AVG_BUSY_PERC], 'filter_function': filter_function, 'goodness_function': goodness_function, 'multiattach': True, 'consistent_group_snapshot_enabled': True, 'compression': compression_support, 'consistent_group_replication_enabled': self._replication_enabled, 'backend_state': backend_state } if remotecopy_support: pool['replication_enabled'] = self._replication_enabled pool['replication_type'] = ['sync', 'periodic'] pool['replication_count'] = len(self._replication_targets) pools.append(pool) self.stats = {'driver_version': '4.0', 'storage_protocol': None, 'vendor_name': 'Hewlett Packard Enterprise', 'volume_backend_name': None, 'array_id': info.get('id'), 'replication_enabled': self._replication_enabled, 'replication_targets': self._get_replication_targets(), 'pools': pools} def _check_license_enabled(self, valid_licenses, license_to_check, capability): """Check a license against valid licenses on the array.""" if valid_licenses: if self.API_VERSION >= API_VERSION_2025: # with new wsapi, all licenses are enabled return True for license in valid_licenses: if license_to_check in license.get('name'): return True LOG.debug("'%(capability)s' requires a '%(license)s' " "license which is not installed.", {'capability': capability, 'license': license_to_check}) return False def _get_vlun(self, volume_name, hostname, lun_id=None, nsp=None, remote_client=None): """find a VLUN on a 3PAR host.""" if remote_client: vluns = remote_client.getHostVLUNs(hostname) else: vluns = self.client.getHostVLUNs(hostname) found_vlun = None for vlun in vluns: if volume_name in vlun['volumeName']: if lun_id is not None: if vlun['lun'] == lun_id: if nsp: port = self.build_portPos(nsp) if vlun['portPos'] == port: found_vlun = vlun break else: found_vlun = vlun break else: found_vlun = vlun break if found_vlun is None: LOG.info("3PAR vlun %(name)s not found on host %(host)s", {'name': volume_name, 'host': hostname}) return found_vlun def create_vlun(self, volume, host, nsp=None, lun_id=None, remote_client=None): """Create a VLUN. In order to export a volume on a 3PAR box, we have to create a VLUN. """ volume_name = self._get_3par_vol_name(volume) vlun_info = self._create_3par_vlun(volume_name, host['name'], nsp, lun_id=lun_id, remote_client=remote_client) return self._get_vlun(volume_name, host['name'], vlun_info['lun_id'], nsp, remote_client) def _delete_vlun(self, client_obj, volume, hostname, wwn=None, iqn=None): volume_name = self._get_3par_vol_name(volume) if hostname: vluns = client_obj.getHostVLUNs(hostname) else: # In case of 'force detach', hostname is None vluns = client_obj.getVLUNs()['members'] # When deleteing VLUNs, you simply need to remove the template VLUN # and any active VLUNs will be automatically removed. The template # VLUN are marked as active: False modify_host = True volume_vluns = [] for vlun in vluns: if volume_name in vlun['volumeName']: # template VLUNs are 'active' = False if not vlun['active']: volume_vluns.append(vlun) if not volume_vluns: LOG.warning("3PAR vlun for volume %(name)s not found on host " "%(host)s", {'name': volume_name, 'host': hostname}) return # VLUN Type of MATCHED_SET 4 requires the port to be provided for vlun in volume_vluns: if hostname is None: hostname = vlun.get('hostname') if 'portPos' in vlun: client_obj.deleteVLUN(volume_name, vlun['lun'], hostname=hostname, port=vlun['portPos']) else: client_obj.deleteVLUN(volume_name, vlun['lun'], hostname=hostname) # Determine if there are other volumes attached to the host. # This will determine whether we should try removing host from host set # and deleting the host. vluns = [] try: vluns = client_obj.getHostVLUNs(hostname) except hpeexceptions.HTTPNotFound: LOG.debug("All VLUNs removed from host %s", hostname) if wwn is not None and not isinstance(wwn, list): wwn = [wwn] if iqn is not None and not isinstance(iqn, list): iqn = [iqn] for vlun in vluns: if vlun.get('active'): if (wwn is not None and vlun.get('remoteName').lower() in wwn)\ or (iqn is not None and vlun.get('remoteName').lower() in iqn): # vlun with wwn/iqn exists so do not modify host. modify_host = False break if len(vluns) == 0: # We deleted the last vlun, so try to delete the host too. # This check avoids the old unnecessary try/fail when vluns exist # but adds a minor race condition if a vlun is manually deleted # externally at precisely the wrong time. Worst case is leftover # host, so it is worth the unlikely risk. try: # TODO(sonivi): since multiattach is not supported for now, # delete only single host, if its not exported to volume. self._delete_3par_host(hostname, client_obj) except Exception as ex: # Any exception down here is only logged. The vlun is deleted. # If the host is in a host set, the delete host will fail and # the host will remain in the host set. This is desired # because cinder was not responsible for the host set # assignment. The host set could be used outside of cinder # for future needs (e.g. export volume to host set). # The log info explains why the host was left alone. LOG.info("3PAR vlun for volume '%(name)s' was deleted, " "but the host '%(host)s' was not deleted " "because: %(reason)s", {'name': volume_name, 'host': hostname, 'reason': ex.get_description()}) elif modify_host: if wwn is not None: mod_request = {'pathOperation': client_obj.HOST_EDIT_REMOVE, 'FCWWNs': wwn} else: mod_request = {'pathOperation': client_obj.HOST_EDIT_REMOVE, 'iSCSINames': iqn} try: client_obj.modifyHost(hostname, mod_request) except Exception as ex: LOG.info("3PAR vlun for volume '%(name)s' was deleted, " "but the host '%(host)s' was not Modified " "because: %(reason)s", {'name': volume_name, 'host': hostname, 'reason': ex.get_description()}) def delete_vlun(self, volume, hostname, wwn=None, iqn=None, remote_client=None): self._delete_vlun(self.client, volume, hostname, wwn, iqn) if remote_client: self._delete_vlun(remote_client, volume, hostname, wwn, iqn) def _get_volume_type(self, type_id): ctxt = context.get_admin_context() return volume_types.get_volume_type(ctxt, type_id) def _get_key_value(self, hpe3par_keys, key, default=None): if hpe3par_keys is not None and key in hpe3par_keys: return hpe3par_keys[key] else: return default def _get_boolean_key_value(self, hpe3par_keys, key, default=False): value = self._get_key_value( hpe3par_keys, key, default) if isinstance(value, str): if value.lower() == 'true': value = True else: value = False return value def _get_qos_value(self, qos, key, default=None): if key in qos: return qos[key] else: return default def _get_qos_by_volume_type(self, volume_type): qos = {} qos_specs_id = volume_type.get('qos_specs_id') specs = volume_type.get('extra_specs') # NOTE(kmartin): We prefer the qos_specs association # and override any existing extra-specs settings # if present. if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(context.get_admin_context(), qos_specs_id)['specs'] else: kvs = specs for key, value in kvs.items(): if 'qos:' in key: fields = key.split(':') key = fields[1] if key in self.hpe_qos_keys: qos[key] = value return qos def _get_keys_by_volume_type(self, volume_type): hpe3par_keys = {} specs = volume_type.get('extra_specs') for key, value in specs.items(): if ':' in key: fields = key.split(':') key = fields[1] if key in self.hpe3par_valid_keys: hpe3par_keys[key] = value return hpe3par_keys def _set_qos_rule(self, qos, vvs_name): min_io = self._get_qos_value(qos, 'minIOPS') max_io = self._get_qos_value(qos, 'maxIOPS') min_bw = self._get_qos_value(qos, 'minBWS') max_bw = self._get_qos_value(qos, 'maxBWS') latency = self._get_qos_value(qos, 'latency') priority = self._get_qos_value(qos, 'priority', 'normal') qosRule = {} if min_io: qosRule['ioMinGoal'] = int(min_io) if max_io is None: qosRule['ioMaxLimit'] = int(min_io) if max_io: qosRule['ioMaxLimit'] = int(max_io) if min_io is None: qosRule['ioMinGoal'] = int(max_io) if min_bw: qosRule['bwMinGoalKB'] = int(min_bw) * units.Ki if max_bw is None: qosRule['bwMaxLimitKB'] = int(min_bw) * units.Ki if max_bw: qosRule['bwMaxLimitKB'] = int(max_bw) * units.Ki if min_bw is None: qosRule['bwMinGoalKB'] = int(max_bw) * units.Ki if latency: # latency could be values like 2, 5, etc or # small values like 0.1, 0.02, etc. # we are converting to float so that 0.1 doesn't become 0 latency = float(latency) if latency >= 1: # by default, latency in millisecs qosRule['latencyGoal'] = int(latency) else: # latency < 1 Eg. 0.1, 0.02, etc # convert latency to microsecs qosRule['latencyGoaluSecs'] = int(latency * 1000) if priority: qosRule['priority'] = self.qos_priority_level.get(priority.lower()) try: self.client.createQoSRules(vvs_name, qosRule) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error creating QOS rule %s", qosRule) def get_flash_cache_policy(self, hpe3par_keys): if hpe3par_keys is not None: # First check list of extra spec keys val = self._get_key_value(hpe3par_keys, 'flash_cache', None) if val is not None: # If requested, see if supported on back end if self.API_VERSION < FLASH_CACHE_API_VERSION: err = (_("Flash Cache Policy requires " "WSAPI version '%(fcache_version)s' " "version '%(version)s' is installed.") % {'fcache_version': FLASH_CACHE_API_VERSION, 'version': self.API_VERSION}) LOG.error(err) raise exception.InvalidInput(reason=err) else: if val.lower() == 'true': return self.client.FLASH_CACHE_ENABLED else: return self.client.FLASH_CACHE_DISABLED return None def get_compression_policy(self, hpe3par_keys): if hpe3par_keys is not None: # here it should return true/false/None val = self._get_key_value(hpe3par_keys, 'compression', None) compression_support = False if val is not None: info = self.client.getStorageSystemInfo() if 'licenseInfo' in info: if 'licenses' in info['licenseInfo']: valid_licenses = info['licenseInfo']['licenses'] compression_support = self._check_license_enabled( valid_licenses, self.COMPRESSION_LIC, "Compression") # here check the wsapi version if self.API_VERSION < COMPRESSION_API_VERSION: err = (_("Compression Policy requires " "WSAPI version '%(compression_version)s' " "version '%(version)s' is installed.") % {'compression_version': COMPRESSION_API_VERSION, 'version': self.API_VERSION}) LOG.error(err) raise exception.InvalidInput(reason=err) else: if val.lower() == 'true': if not compression_support: msg = _('Compression is not supported on ' 'underlying hardware') LOG.error(msg) raise exception.InvalidInput(reason=msg) return True else: return False return None def _set_flash_cache_policy_in_vvs(self, flash_cache, vvs_name): # Update virtual volume set if flash_cache: try: self.client.modifyVolumeSet(vvs_name, flashCachePolicy=flash_cache) LOG.info("Flash Cache policy set to %s", flash_cache) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error setting Flash Cache policy " "to %s - exception", flash_cache) def _add_volume_to_volume_set(self, volume, volume_name, cpg, vvs_name, qos, flash_cache): if vvs_name is not None: # Admin has set a volume set name to add the volume to try: self.client.addVolumeToVolumeSet(vvs_name, volume_name) except hpeexceptions.HTTPNotFound: msg = _('VV Set %s does not exist.') % vvs_name LOG.error(msg) raise exception.InvalidInput(reason=msg) else: vvs_name = self._get_3par_vvs_name(volume['id']) domain = self.get_domain(cpg) self.client.createVolumeSet(vvs_name, domain) try: self._set_qos_rule(qos, vvs_name) self._set_flash_cache_policy_in_vvs(flash_cache, vvs_name) self.client.addVolumeToVolumeSet(vvs_name, volume_name) except Exception as ex: # Cleanup the volume set if unable to create the qos rule # or flash cache policy or add the volume to the volume set self.client.deleteVolumeSet(vvs_name) raise exception.CinderException(ex) def get_cpg(self, volume, allowSnap=False): volume_name = self._get_3par_vol_name(volume) vol = self.client.getVolume(volume_name) # Search for 'userCPG' in the get volume REST API, # if found return userCPG , else search for snapCPG attribute # when allowSnap=True. For the cases where 3PAR REST call for # get volume doesn't have either userCPG or snapCPG , # take the default value of cpg from 'host' attribute from volume param LOG.debug("get volume response is: %s", vol) if 'userCPG' in vol: return vol['userCPG'] elif allowSnap and 'snapCPG' in vol: return vol['snapCPG'] else: return volume_utils.extract_host(volume['host'], 'pool') def _get_3par_vol_comment(self, volume_name): vol = self.client.getVolume(volume_name) if 'comment' in vol: return vol['comment'] return None def validate_persona(self, persona_value): """Validate persona value. If the passed in persona_value is not valid, raise InvalidInput, otherwise return the persona ID. :param persona_value: :raises exception.InvalidInput: :returns: persona ID """ if persona_value not in self.valid_persona_values: err = (_("Must specify a valid persona %(valid)s," "value '%(persona)s' is invalid.") % {'valid': self.valid_persona_values, 'persona': persona_value}) LOG.error(err) raise exception.InvalidInput(reason=err) # persona is set by the id so remove the text and return the id # i.e for persona '1 - Generic' returns 1 persona_id = persona_value.split(' ') return persona_id[0] def get_persona_type(self, volume, hpe3par_keys=None): default_persona = self.valid_persona_values[0] type_id = volume.get('volume_type_id', None) if type_id is not None: volume_type = self._get_volume_type(type_id) if hpe3par_keys is None: hpe3par_keys = self._get_keys_by_volume_type(volume_type) persona_value = self._get_key_value(hpe3par_keys, 'persona', default_persona) return self.validate_persona(persona_value) def get_type_info(self, type_id): """Get 3PAR type info for the given type_id. Reconciles VV Set, old-style extra-specs, and QOS specs and returns commonly used info about the type. :returns: hpe3par_keys, qos, volume_type, vvs_name """ volume_type = None vvs_name = None hpe3par_keys = {} qos = {} if type_id is not None: volume_type = self._get_volume_type(type_id) hpe3par_keys = self._get_keys_by_volume_type(volume_type) vvs_name = self._get_key_value(hpe3par_keys, 'vvs') if vvs_name is None: qos = self._get_qos_by_volume_type(volume_type) return hpe3par_keys, qos, volume_type, vvs_name def get_volume_settings_from_type_id(self, type_id, pool): """Get 3PAR volume settings given a type_id. Combines type info and config settings to return a dictionary describing the 3PAR volume settings. Does some validation (CPG). Uses pool as the default cpg (when not specified in volume type specs). :param type_id: id of type to get settings for :param pool: CPG to use if type does not have one set :returns: dict """ hpe3par_keys, qos, volume_type, vvs_name = self.get_type_info(type_id) # Default to pool extracted from host. # If that doesn't work use the 1st CPG in the config as the default. default_cpg = pool or self._client_conf['hpe3par_cpg'][0] cpg = self._get_key_value(hpe3par_keys, 'cpg', default_cpg) if cpg is not default_cpg: # The cpg was specified in a volume type extra spec so it # needs to be validated that it's in the correct domain. # log warning here msg = ("'hpe3par:cpg' is not supported as an extra spec " "in a volume type. CPG's are chosen by " "the cinder scheduler, as a pool, from the " "cinder.conf entry 'hpe3par_cpg', which can " "be a list of CPGs.") versionutils.report_deprecated_feature(LOG, msg) LOG.info("Using pool %(pool)s instead of %(cpg)s", {'pool': pool, 'cpg': cpg}) cpg = pool self.validate_cpg(cpg) # Look to see if the snap_cpg was specified in volume type # extra spec, if not use hpe3par_cpg_snap from config as the # default. snap_cpg = self.config.hpe3par_cpg_snap snap_cpg = self._get_key_value(hpe3par_keys, 'snap_cpg', snap_cpg) # If it's still not set or empty then set it to the cpg. if not snap_cpg: snap_cpg = cpg # Check group level replication hpe3par_tiramisu = ( self._get_key_value(hpe3par_keys, 'group_replication')) # by default, set convert_to_base to False convert_to_base = self._get_boolean_key_value( hpe3par_keys, 'convert_to_base') # if provisioning is not set use thin default_prov = self.valid_prov_values[0] prov_value = self._get_key_value(hpe3par_keys, 'provisioning', default_prov) # check for valid provisioning type if prov_value not in self.valid_prov_values: err = (_("Must specify a valid provisioning type %(valid)s, " "value '%(prov)s' is invalid.") % {'valid': self.valid_prov_values, 'prov': prov_value}) LOG.error(err) raise exception.InvalidInput(reason=err) tpvv = True tdvv = False if prov_value == "full": tpvv = False elif prov_value == "dedup": tpvv = False tdvv = True if tdvv and (self.API_VERSION < DEDUP_API_VERSION): err = (_("Dedup is a valid provisioning type, " "but requires WSAPI version '%(dedup_version)s' " "version '%(version)s' is installed.") % {'dedup_version': DEDUP_API_VERSION, 'version': self.API_VERSION}) LOG.error(err) raise exception.InvalidInput(reason=err) return {'hpe3par_keys': hpe3par_keys, 'cpg': cpg, 'snap_cpg': snap_cpg, 'vvs_name': vvs_name, 'qos': qos, 'tpvv': tpvv, 'tdvv': tdvv, 'volume_type': volume_type, 'group_replication': hpe3par_tiramisu, 'convert_to_base': convert_to_base} def get_volume_settings_from_type(self, volume, host=None): """Get 3PAR volume settings given a volume. Combines type info and config settings to return a dictionary describing the 3PAR volume settings. Does some validation (CPG and persona). :param volume: :param host: Optional host to use for default pool. :returns: dict """ type_id = volume.get('volume_type_id', None) pool = None if host: pool = volume_utils.extract_host(host['host'], 'pool') else: pool = volume_utils.extract_host(volume['host'], 'pool') volume_settings = self.get_volume_settings_from_type_id(type_id, pool) # check for valid persona even if we don't use it until # attach time, this will give the end user notice that the # persona type is invalid at volume creation time self.get_persona_type(volume, volume_settings['hpe3par_keys']) return volume_settings def create_volume(self, volume, perform_replica=True): LOG.debug('CREATE VOLUME (%(disp_name)s: %(vol_name)s %(id)s on ' '%(host)s)', {'disp_name': volume['display_name'], 'vol_name': volume['name'], 'id': self._get_3par_vol_name(volume), 'host': volume['host']}) try: comments = {'volume_id': volume['id'], 'name': volume['name'], 'type': 'OpenStack'} self._add_name_id_to_comment(comments, volume) # This flag denotes group level replication on # hpe 3par. hpe_tiramisu = False name = volume.get('display_name', None) if name: comments['display_name'] = name # get the options supported by volume types type_info = self.get_volume_settings_from_type(volume) volume_type = type_info['volume_type'] vvs_name = type_info['vvs_name'] qos = type_info['qos'] cpg = type_info['cpg'] snap_cpg = type_info['snap_cpg'] tpvv = type_info['tpvv'] tdvv = type_info['tdvv'] flash_cache = self.get_flash_cache_policy( type_info['hpe3par_keys']) compression = self.get_compression_policy( type_info['hpe3par_keys']) consis_group_snap_type = False if volume_type is not None: consis_group_snap_type = self.is_volume_group_snap_type( volume_type) cg_id = volume.get('group_id', None) group = volume.get('group', None) if cg_id and consis_group_snap_type: vvs_name = self._get_3par_vvs_name(cg_id) type_id = volume.get('volume_type_id', None) if type_id is not None: comments['volume_type_name'] = volume_type.get('name') comments['volume_type_id'] = type_id if vvs_name is not None: comments['vvs'] = vvs_name else: comments['qos'] = qos extras = {'comment': json.dumps(comments), 'tpvv': tpvv} LOG.debug("self.API_VERSION: %(version)s", {'version': self.API_VERSION}) if self.API_VERSION < API_VERSION_2023: extras['snapCPG'] = snap_cpg # Only set the dedup option if the backend supports it. if self.API_VERSION >= DEDUP_API_VERSION: extras['tdvv'] = tdvv capacity = self._capacity_from_size(volume['size']) volume_name = self._get_3par_vol_name(volume) if compression is not None: extras['compression'] = compression self.client.createVolume(volume_name, cpg, capacity, extras) # v2 replication check replication_flag = False if consis_group_snap_type: if (self._volume_of_hpe_tiramisu_type(volume)): hpe_tiramisu = True # Add volume to remote group. if (group is not None and hpe_tiramisu): if group.is_replicated: self._check_rep_status_enabled_on_group(group) self._add_vol_to_remote_group(group, volume) replication_flag = True if qos or vvs_name or flash_cache is not None: try: self._add_volume_to_volume_set(volume, volume_name, cpg, vvs_name, qos, flash_cache) except exception.InvalidInput as ex: # Delete the volume if unable to add it to the volume set self.client.deleteVolume(volume_name) LOG.error("Exception: %s", ex) raise exception.CinderException(ex) if perform_replica: if (self._volume_of_replicated_type(volume, hpe_tiramisu_check=True) and self._do_volume_replication_setup(volume)): replication_flag = True except hpeexceptions.HTTPConflict: msg = _("Volume (%s) already exists on array") % volume_name LOG.error(msg) raise exception.Duplicate(msg) except hpeexceptions.HTTPBadRequest as ex: LOG.error("Exception: %s", ex) raise exception.Invalid(ex.get_description()) except exception.InvalidInput as ex: LOG.error("Exception: %s", ex) raise except exception.CinderException as ex: LOG.error("Exception: %s", ex) raise except Exception as ex: LOG.error("Exception: %s", ex) raise exception.CinderException(ex) return self._get_model_update(volume['host'], cpg, replication=replication_flag, provider_location=self.client.id, hpe_tiramisu=hpe_tiramisu) def _copy_volume(self, src_name, dest_name, cpg, snap_cpg=None, tpvv=True, tdvv=False, compression=None, comment=None): # Virtual volume sets are not supported with the -online option LOG.debug('Creating clone of a volume %(src)s to %(dest)s.', {'src': src_name, 'dest': dest_name}) optional = {'tpvv': tpvv, 'online': True} if snap_cpg is not None and self.API_VERSION < API_VERSION_2023: optional['snapCPG'] = snap_cpg if self.API_VERSION >= DEDUP_API_VERSION: optional['tdvv'] = tdvv if (compression is not None and self.API_VERSION >= COMPRESSION_API_VERSION): optional['compression'] = compression if comment: optional['comment'] = comment body = self.client.copyVolume(src_name, dest_name, cpg, optional) return body['taskid'] def get_next_word(self, s, search_string): """Return the next word. Search 's' for 'search_string', if found return the word preceding 'search_string' from 's'. """ word = re.search(search_string.strip(' ') + ' ([^ ]*)', s) return word.groups()[0].strip(' ') def _get_3par_vol_comment_value(self, vol_comment, key): comment_dict = dict(ast.literal_eval(vol_comment)) if key in comment_dict: return comment_dict[key] return None def _get_model_update(self, volume_host, cpg, replication=False, provider_location=None, hpe_tiramisu=None): """Get model_update dict to use when we select a pool. The pools implementation uses a volume['host'] suffix of :poolname. When the volume comes in with this selected pool, we sometimes use a different pool (e.g. because the type says to use a different pool). So in the several places that we do this, we need to return a model update so that the volume will have the actual pool name in the host suffix after the operation. Given a volume_host, which should (might) have the pool suffix, and given the CPG we actually chose to use, return a dict to use for a model update iff an update is needed. :param volume_host: The volume's host string. :param cpg: The actual pool (cpg) used, for example from the type. :returns: dict Model update if we need to update volume host, else None """ model_update = {} host = volume_utils.extract_host(volume_host, 'backend') host_and_pool = volume_utils.append_host(host, cpg) if volume_host != host_and_pool: # Since we selected a pool based on type, update the model. model_update['host'] = host_and_pool if replication: model_update['replication_status'] = 'enabled' if (replication or hpe_tiramisu) and provider_location: model_update['provider_location'] = provider_location if not model_update: model_update = None return model_update def _create_temp_snapshot(self, volume): """This creates a temporary snapshot of a volume. This is used by cloning a volume so that we can then issue extend volume against the original volume. """ vol_name = self._get_3par_vol_name(volume) # create a brand new uuid for the temp snap snap_uuid = uuid.uuid4().hex # this will be named tss-%s snap_name = self._get_3par_snap_name(snap_uuid, temp_snap=True) extra = {'volume_name': volume['name'], 'volume_id': volume['id']} self._add_name_id_to_comment(extra, volume) optional = {'comment': json.dumps(extra)} # let the snapshot die in an hour optional['expirationHours'] = 1 LOG.info("Creating temp snapshot %(snap)s from volume %(vol)s", {'snap': snap_name, 'vol': vol_name}) self.client.createSnapshot(snap_name, vol_name, optional) return self.client.getVolume(snap_name) def create_cloned_volume(self, volume, src_vref): try: vol_name = self._get_3par_vol_name(volume) src_vol_name = self._get_3par_vol_name(src_vref) back_up_process = False vol_chap_enabled = False hpe_tiramisu = False # Check whether a volume is ISCSI and CHAP enabled on it. if self._client_conf['hpe3par_iscsi_chap_enabled']: try: vol_chap_enabled = self.client.getVolumeMetaData( src_vol_name, 'HPQ-cinder-CHAP-name')['value'] except hpeexceptions.HTTPNotFound: LOG.debug("CHAP is not enabled on volume %(vol)s ", {'vol': src_vref['id']}) vol_chap_enabled = False # Check whether a process is a backup if str(src_vref['status']) == 'backing-up': back_up_process = True # (i) if the sizes of the 2 volumes are the same and # (ii) this is not a backup process for ISCSI volume with chap # enabled on it and # (iii) volume is not replicated # we can do an online copy, which is a background process # on the 3PAR that makes the volume instantly available. # We can't resize a volume, while it's being copied. if volume['size'] == src_vref['size'] and not ( back_up_process and vol_chap_enabled) and not ( self._volume_of_replicated_type(volume, hpe_tiramisu_check=True)): LOG.debug("Creating a clone of volume, using online copy.") type_info = self.get_volume_settings_from_type(volume) snapshot = self._create_temp_snapshot(src_vref) cpg = type_info['cpg'] qos = type_info['qos'] vvs_name = type_info['vvs_name'] flash_cache = self.get_flash_cache_policy( type_info['hpe3par_keys']) compression_val = self.get_compression_policy( type_info['hpe3par_keys']) LOG.info("array version: %(ver)s", {'ver': self.API_VERSION}) comment_line = None if self.API_VERSION >= 40600000: # comment can be added comments = {'volume_id': volume['id'], 'name': volume['name'], 'type': 'OpenStack'} volume_type = type_info['volume_type'] type_id = volume.get('volume_type_id', None) if type_id: comments['volume_type_name'] = volume_type.get('name') comments['volume_type_id'] = type_id if vvs_name: comments['vvs'] = vvs_name else: comments['qos'] = qos display_name = volume.get('display_name', None) if display_name: comments['display_name'] = display_name comment_line = json.dumps(comments) LOG.debug("comment_line: %(comment)s", {'comment': comment_line}) # make the 3PAR copy the contents. # can't delete the original until the copy is done. self._copy_volume(snapshot['name'], vol_name, cpg=cpg, snap_cpg=type_info['snap_cpg'], tpvv=type_info['tpvv'], tdvv=type_info['tdvv'], compression=compression_val, comment=comment_line) if qos or vvs_name or flash_cache is not None: try: self._add_volume_to_volume_set( volume, vol_name, cpg, vvs_name, qos, flash_cache) except exception.InvalidInput as ex: # Delete volume if unable to add it to the volume set self.client.deleteVolume(vol_name) dbg = {'volume': vol_name, 'vvs_name': vvs_name, 'err': str(ex)} msg = _("Failed to add volume '%(volume)s' to vvset " "'%(vvs_name)s' because '%(err)s'") % dbg LOG.error(msg) raise exception.CinderException(msg) if self._volume_of_hpe_tiramisu_type(volume): hpe_tiramisu = True return self._get_model_update(volume['host'], cpg, replication=False, provider_location=self.client.id, hpe_tiramisu=hpe_tiramisu) else: # The size of the new volume is different, so we have to # copy the volume and wait. Do the resize after the copy # is complete. LOG.debug("Creating a clone of volume, using non-online copy.") # we first have to create the destination volume model_update = self.create_volume(volume, perform_replica=False) optional = {'priority': 1} body = self.client.copyVolume(src_vol_name, vol_name, None, optional=optional) task_id = body['taskid'] task_status = self._wait_for_task_completion(task_id) if task_status['status'] is not self.client.TASK_DONE: dbg = {'status': task_status, 'id': volume['id']} msg = _('Copy volume task failed: create_cloned_volume ' 'id=%(id)s, status=%(status)s.') % dbg raise exception.CinderException(msg) else: LOG.debug('Copy volume completed: create_cloned_volume: ' 'id=%s.', volume['id']) # v2 replication check LOG.debug("v2 replication check") replication_flag = False if (self._volume_of_replicated_type(volume, hpe_tiramisu_check=True) and self._do_volume_replication_setup(volume)): replication_flag = True type_info = self.get_volume_settings_from_type(volume) cpg = type_info['cpg'] model_update = self._get_model_update( volume['host'], cpg, replication=True, provider_location=self.client.id, hpe_tiramisu=hpe_tiramisu) LOG.debug("replication_flag: %(flag)s", {'flag': replication_flag}) return model_update except hpeexceptions.HTTPForbidden: raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound: raise exception.NotFound() except Exception as ex: LOG.error("Exception: %s", ex) raise exception.CinderException(ex) def delete_volume(self, volume): vol_id = volume.id name_id = volume.get('_name_id') LOG.debug("DELETE volume vol_id: %(vol_id)s, name_id: %(name_id)s", {'vol_id': vol_id, 'name_id': name_id}) @utils.retry(exception.VolumeIsBusy, interval=2, retries=10) def _try_remove_volume(volume_name): try: self.client.deleteVolume(volume_name) except Exception: msg = _("The volume is currently busy on the 3PAR " "and cannot be deleted at this time. " "You can try again later.") raise exception.VolumeIsBusy(message=msg) # v2 replication check # If the volume type is replication enabled, we want to call our own # method of deconstructing the volume and its dependencies if self._volume_of_replicated_type(volume, hpe_tiramisu_check=True): LOG.debug("volume is of replicated_type") replication_status = volume.get('replication_status', None) LOG.debug("replication_status: %(status)s", {'status': replication_status}) if replication_status: if replication_status == "failed-over": self._delete_replicated_failed_over_volume(volume) else: self._do_volume_replication_destroy(volume) return volume_name = self._get_3par_vol_name(volume) # during retype/migrate if (self._volume_of_replicated_type(volume, hpe_tiramisu_check=True) and volume['migration_status'] == 'deleting'): # don't use current osv_name (which was from name_id) # get new osv_name from id LOG.debug("get osv_name from volume id") volume_name = self._encode_name(volume.id) volume_name = "osv-" + volume_name LOG.debug("volume_name: %(name)s", {'name': volume_name}) try: # Try and delete the volume, it might fail here because # the volume is part of a volume set which will have the # volume set name in the error. try: self.client.deleteVolume(volume_name) except hpeexceptions.HTTPBadRequest as ex: if ex.get_code() == 29: if self.client.isOnlinePhysicalCopy(volume_name): LOG.debug("Found an online copy for %(volume)s", {'volume': volume_name}) # the volume is in process of being cloned. # stopOnlinePhysicalCopy will also delete # the volume once it stops the copy. self.client.stopOnlinePhysicalCopy(volume_name) else: LOG.error("Exception: %s", ex) raise else: LOG.error("Exception: %s", ex) raise except hpeexceptions.HTTPConflict as ex: if ex.get_code() == 34: # This is a special case which means the # volume is part of a volume set. self._delete_vvset(volume) self.client.deleteVolume(volume_name) elif ex.get_code() == 151: if self.client.isOnlinePhysicalCopy(volume_name): LOG.debug("Found an online copy for %(volume)s", {'volume': volume_name}) # the volume is in process of being cloned. # stopOnlinePhysicalCopy will also delete # the volume once it stops the copy. self.client.stopOnlinePhysicalCopy(volume_name) else: # the volume is being operated on in a background # task on the 3PAR. _try_remove_volume(volume_name) elif (ex.get_code() == 32): # Error 32 means that the volume has children # see if we have any temp snapshots snaps = self.client.getVolumeSnapshots(volume_name) for snap in snaps: if snap.startswith('tss-'): # looks like we found a temp snapshot. LOG.info( "Found a temporary snapshot %(name)s", {'name': snap}) try: self.client.deleteVolume(snap) except hpeexceptions.HTTPNotFound: # if the volume is gone, it's as good as a # successful delete pass except Exception: msg = _("Volume has a temporary snapshot that " "can't be deleted at this time.") raise exception.VolumeIsBusy(message=msg) try: self.delete_volume(volume) except Exception: msg = _("Volume has children and cannot be deleted!") raise exception.VolumeIsBusy(message=msg) else: LOG.error("Exception: %s", ex) raise exception.VolumeIsBusy(message=ex.get_description()) except hpeexceptions.HTTPNotFound as ex: # We'll let this act as if it worked # it helps clean up the cinder entries. LOG.warning("Delete volume id not found. Removing from " "cinder: %(id)s Ex: %(msg)s", {'id': volume['id'], 'msg': ex}) except hpeexceptions.HTTPForbidden as ex: LOG.error("Exception: %s", ex) raise exception.NotAuthorized(ex.get_description()) except hpeexceptions.HTTPConflict as ex: LOG.error("Exception: %s", ex) raise exception.VolumeIsBusy(message=ex.get_description()) except Exception as ex: LOG.error("Exception: %s", ex) raise exception.CinderException(ex) def create_volume_from_snapshot(self, volume, snapshot, snap_name=None, vvs_name=None): """Creates a volume from a snapshot.""" LOG.debug("Create Volume from Snapshot\n%(vol_name)s\n%(ss_name)s", {'vol_name': pprint.pformat(volume['display_name']), 'ss_name': pprint.pformat(snapshot['display_name'])}) model_update = {} try: if not snap_name: snap_name = self._get_3par_snap_name(snapshot['id']) volume_name = self._get_3par_vol_name(volume) extra = {'volume_id': volume['id'], 'snapshot_id': snapshot['id']} self._add_name_id_to_comment(extra, volume) type_id = volume.get('volume_type_id', None) hpe3par_keys, qos, _volume_type, vvs = self.get_type_info( type_id) if vvs: vvs_name = vvs name = volume.get('display_name', None) if name: extra['display_name'] = name description = volume.get('display_description', None) if description: extra['description'] = description optional = {'comment': json.dumps(extra), 'readOnly': False} self.client.createSnapshot(volume_name, snap_name, optional) # by default, set convert_to_base to False convert_to_base = self._get_boolean_key_value( hpe3par_keys, 'convert_to_base') LOG.debug("convert_to_base: %(convert)s", {'convert': convert_to_base}) growth_size = volume['size'] - snapshot['volume_size'] LOG.debug("growth_size: %(size)s", {'size': growth_size}) if growth_size > 0 or convert_to_base: # Convert snapshot volume to base volume type LOG.debug('Converting to base volume type: %(id)s.', {'id': volume['id']}) model_update = self._convert_to_base_volume(volume) else: LOG.debug("volume is created as child of snapshot") if growth_size > 0: try: growth_size_mib = growth_size * units.Gi / units.Mi LOG.debug('Growing volume: %(id)s by %(size)s GiB.', {'id': volume['id'], 'size': growth_size}) self.client.growVolume(volume_name, growth_size_mib) except Exception as ex: LOG.error("Error extending volume %(id)s. " "Ex: %(ex)s", {'id': volume['id'], 'ex': ex}) # Delete the volume if unable to grow it self.client.deleteVolume(volume_name) raise exception.CinderException(ex) # Check for flash cache setting in extra specs flash_cache = self.get_flash_cache_policy(hpe3par_keys) if qos or vvs_name or flash_cache is not None: cpg_names = self._get_key_value( hpe3par_keys, 'cpg', self._client_conf['hpe3par_cpg']) try: self._add_volume_to_volume_set(volume, volume_name, cpg_names[0], vvs_name, qos, flash_cache) except Exception as ex: # Delete the volume if unable to add it to the volume set self.client.deleteVolume(volume_name) LOG.error("Exception: %s", ex) raise exception.CinderException(ex) if self._volume_of_hpe_tiramisu_type(volume): model_update['provider_location'] = self.client.id # v2 replication check if (self._volume_of_replicated_type(volume, hpe_tiramisu_check=True) and self._do_volume_replication_setup(volume)): model_update['replication_status'] = 'enabled' model_update['provider_location'] = self.client.id except hpeexceptions.HTTPForbidden as ex: LOG.error("Exception: %s", ex) raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound as ex: LOG.error("Exception: %s", ex) raise exception.NotFound() except Exception as ex: LOG.error("Exception: %s", ex) raise exception.CinderException(ex) return model_update def create_snapshot(self, snapshot): LOG.debug("Create Snapshot\n%s", pprint.pformat(snapshot)) try: snap_name = self._get_3par_snap_name(snapshot['id']) # Don't use the "volume_id" from the snapshot directly in case the # volume has been migrated and uses a different ID in the backend. # This may trigger OVO lazy loading. Use dict compatibility to # avoid changing all the unit tests. vol_name = self._get_3par_vol_name(snapshot['volume']) extra = {'volume_name': snapshot['volume_name'], 'volume_id': snapshot.get('volume_id')} self._add_name_id_to_comment(extra, snapshot['volume']) try: extra['display_name'] = snapshot['display_name'] except AttributeError: pass try: extra['description'] = snapshot['display_description'] except AttributeError: pass optional = {'comment': json.dumps(extra), 'readOnly': True} if self.config.hpe3par_snapshot_expiration: optional['expirationHours'] = ( int(self.config.hpe3par_snapshot_expiration)) if self.config.hpe3par_snapshot_retention: optional['retentionHours'] = ( int(self.config.hpe3par_snapshot_retention)) self.client.createSnapshot(snap_name, vol_name, optional) except hpeexceptions.HTTPForbidden as ex: LOG.error("Exception: %s", ex) raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound as ex: LOG.error("Exception: %s", ex) raise exception.NotFound() def migrate_volume(self, volume, host): """Migrate directly if source and dest are managed by same storage. :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. :returns: (False, None) if the driver does not support migration, (True, model_update) if successful """ dbg = {'id': volume['id'], 'host': host['host'], 'status': volume['status']} LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s, ' 'status=%(status)s.', dbg) ret = False, None if volume['status'] in ['available', 'in-use']: volume_type = None if volume['volume_type_id']: volume_type = self._get_volume_type(volume['volume_type_id']) try: ret = self.retype(volume, volume_type, None, host) except Exception as e: LOG.info('3PAR driver cannot perform migration. ' 'Retype exception: %s', e) LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s, ' 'status=%(status)s.', dbg) dbg_ret = {'supported': ret[0], 'model_update': ret[1]} LOG.debug('migrate_volume result: %(supported)s, %(model_update)s', dbg_ret) return ret def _rename_migrated(self, volume, dest_volume): """Rename the destination volume after a migration. Returns whether the destination volume has the name matching the source volume or not. That way we know whether we need to set the _name_id or not. """ def log_error(vol_type, error, src, dest, rename_name=None, original_name=None): LOG.error("Changing the %(vol_type)s volume name from %(src)s to " "%(dest)s failed because %(reason)s", {'vol_type': vol_type, 'src': src, 'dest': dest, 'reason': error}) if rename_name: original_name = original_name or dest # Don't fail the migration, but help the user fix the # source volume stuck in error_deleting. LOG.error("Migration will fail to delete the original volume. " "It must be manually renamed from %(rename_name)s to" " %(original_name)s in the backend, and then we " "have to tell cinder to delete volume %(vol_id)s", {'rename_name': rename_name, 'original_name': original_name, 'vol_id': dest_volume['id']}) original_volume_renamed = False # We don't need to rename the source volume if it uses a _name_id, # since the id we want to use to rename the new volume is available. if volume['id'] == (volume.get('_name_id') or volume['id']): original_name = self._get_3par_vol_name(volume) temp_name = self._get_3par_vol_name(volume, temp_vol=True) # In case the original volume is on the same backend, try # renaming it to a temporary name. try: volumeTempMods = {'newName': temp_name} self.client.modifyVolume(original_name, volumeTempMods) original_volume_renamed = True except hpeexceptions.HTTPNotFound: pass except Exception as e: log_error('original', e, original_name, temp_name) return False # Change the destination volume name to the source's ID name current_name = self._get_3par_vol_name(dest_volume) volume_id_name = self._get_3par_vol_name(volume['id']) try: # After this call the volume manager will call # finish_volume_migration and swap the fields, so we want to # have the right info on the comments if we succeed in renaming # the volumes in the backend. new_comment = self._get_updated_comment(current_name, volume_id=volume['id'], _name_id=None) volumeMods = {'newName': volume_id_name, 'comment': new_comment} self.client.modifyVolume(current_name, volumeMods) LOG.info("Current volume changed from %(cur)s to %(orig)s", {'cur': current_name, 'orig': volume_id_name}) except Exception as e: if original_volume_renamed: _name = temp_name else: _name = original_name = None log_error('migrating', e, current_name, volume_id_name, _name, original_name) return False # If it was renamed, rename the original volume again to the # migrated volume's name (effectively swapping the names). If # this operation fails, the newly migrated volume is OK but the # original volume (with the temp name) may need to be manually # cleaned up on the backend. if original_volume_renamed: try: old_comment = self._get_updated_comment( original_name, volume_id=dest_volume['id'], _name_id=volume.get('_name_id')) volumeCurrentMods = {'newName': current_name, 'comment': old_comment} self.client.modifyVolume(temp_name, volumeCurrentMods) except Exception as e: log_error('original', e, temp_name, current_name, temp_name) return True def _rename_migrated_vvset(self, src_volume, dest_volume): """Rename the vvsets after a migration. """ vvs_name_src = self._get_3par_vvs_name(src_volume['id']) vvs_name_dest = self._get_3par_vvs_name(dest_volume['id']) # There can be parallel execution. Ensure that temp_vvs_name is unique # eg. if vvs_name_src is: vvs-DK3sEwkPTCqVHdHKHtwZBA # then temp_vvs_name is : tos-DK3sEwkPTCqVHdHKHtwZBA temp_vvs_name = 'tos-' + vvs_name_src[4:] try: self.client.modifyVolumeSet(vvs_name_dest, newName=temp_vvs_name) LOG.debug("Renamed vvset %(old)s to %(new)s", {'old': vvs_name_dest, 'new': temp_vvs_name}) except Exception as ex: LOG.error("exception: %(details)s", {'details': str(ex)}) try: self.client.modifyVolumeSet(vvs_name_src, newName=vvs_name_dest) LOG.debug("Renamed vvset %(old)s to %(new)s", {'old': vvs_name_src, 'new': vvs_name_dest}) except Exception as ex: LOG.error("exception: %(details)s", {'details': str(ex)}) try: self.client.modifyVolumeSet(temp_vvs_name, newName=vvs_name_src) LOG.debug("Renamed vvset %(old)s to %(new)s", {'old': temp_vvs_name, 'new': vvs_name_src}) except Exception as ex: LOG.error("exception: %(details)s", {'details': str(ex)}) def update_migrated_volume(self, context, volume, new_volume, original_volume_status): """Rename the new (temp) volume to it's original name. This method tries to rename the new volume to it's original name after the migration has completed. """ LOG.debug("Update volume name for %(id)s", {'id': new_volume['id']}) # For available volumes we'll try renaming the destination volume to # match the id of the source volume. if original_volume_status == 'available': new_volume_renamed = self._rename_migrated(volume, new_volume) else: new_volume_renamed = False if new_volume_renamed: name_id = None # NOTE: I think this will break with replicated volumes. provider_location = None else: # the backend can't change the name. name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] # Update the comment in the backend to reflect the _name_id current_name = self._get_3par_vol_name(new_volume) self._update_comment(current_name, volume_id=volume['id'], _name_id=name_id) if new_volume_renamed: type_info = self.get_volume_settings_from_type(volume) qos = type_info['qos'] if qos: # rename the vvsets as per volume names self._rename_migrated_vvset(volume, new_volume) return {'_name_id': name_id, 'provider_location': provider_location} @staticmethod def _add_name_id_to_comment(comment, volume): name_id = volume.get('_name_id') if name_id: comment['_name_id'] = name_id def _get_updated_comment(self, vol_name, **values): vol = self.client.getVolume(vol_name) comment = json.loads(vol['comment']) if vol.get('comment') else {} comment.update(values) def _update_comment(self, vol_name, **values): """Update key-value pairs on the comment of a volume in the backend.""" if not values: return comment = self._get_updated_comment(vol_name, **values) self.client.modifyVolume(vol_name, {'comment': json.dumps(comment)}) def _wait_for_task_completion(self, task_id): """This waits for a 3PAR background task complete or fail. This looks for a task to get out of the 'active' state. """ # Wait for the physical copy task to complete def _wait_for_task(task_id): status = self.client.getTask(task_id) LOG.debug("3PAR Task id %(id)s status = %(status)s", {'id': task_id, 'status': status['status']}) if status['status'] is not self.client.TASK_ACTIVE: self._task_status = status raise loopingcall.LoopingCallDone() self._task_status = None timer = loopingcall.FixedIntervalLoopingCall( _wait_for_task, task_id) timer.start(interval=1).wait() return self._task_status def _convert_to_base_volume(self, volume, new_cpg=None): try: type_info = self.get_volume_settings_from_type(volume) if new_cpg: cpg = new_cpg else: cpg = type_info['cpg'] # Change the name such that it is unique since 3PAR # names must be unique across all CPGs volume_name = self._get_3par_vol_name(volume) temp_vol_name = volume_name.replace("osv-", "omv-") compression = self.get_compression_policy( type_info['hpe3par_keys']) # If volume (osv-) has snapshot, while converting the volume # to base volume (omv-), snapshot cannot be transferred to # new base volume (omv-) i.e it remain with volume (osv-). # So error out for such volume. snap_list = self.client.getVolumeSnapshots(volume_name) if snap_list: snap_str = ",".join(snap_list) msg = (_("Volume %(name)s has dependent snapshots: %(snap)s." " Either flatten or remove the dependent snapshots:" " %(snap)s for the conversion of volume %(name)s to" " succeed." % {'name': volume_name, 'snap': snap_str})) raise exception.VolumeIsBusy(message=msg) # Create a physical copy of the volume task_id = self._copy_volume(volume_name, temp_vol_name, cpg, cpg, type_info['tpvv'], type_info['tdvv'], compression) LOG.debug('Copy volume scheduled: convert_to_base_volume: ' 'id=%s.', volume['id']) task_status = self._wait_for_task_completion(task_id) if task_status['status'] is not self.client.TASK_DONE: dbg = {'status': task_status, 'id': volume['id']} msg = _('Copy volume task failed: convert_to_base_volume: ' 'id=%(id)s, status=%(status)s.') % dbg raise exception.CinderException(msg) else: LOG.debug('Copy volume completed: convert_to_base_volume: ' 'id=%s.', volume['id']) comment = self._get_3par_vol_comment(volume_name) if comment: self.client.modifyVolume(temp_vol_name, {'comment': comment}) LOG.debug('Assigned the comment: convert_to_base_volume: ' 'id=%s.', volume['id']) # Delete source volume (osv-) after the copy is complete self.client.deleteVolume(volume_name) LOG.debug('Delete src volume completed: convert_to_base_volume: ' 'id=%s.', volume['id']) # Rename the new volume (omv-) to the original name (osv-) self.client.modifyVolume(temp_vol_name, {'newName': volume_name}) LOG.debug('Volume rename completed: convert_to_base_volume: ' 'id=%s.', volume['id']) LOG.info('Completed: convert_to_base_volume: ' 'id=%s.', volume['id']) except hpeexceptions.HTTPConflict: msg = _("Volume (%s) already exists on array.") % temp_vol_name LOG.error(msg) raise exception.Duplicate(msg) except hpeexceptions.HTTPBadRequest as ex: LOG.error("Exception: %s", ex) raise exception.Invalid(ex.get_description()) except exception.CinderException as ex: LOG.error("Exception: %s", ex) raise except Exception as ex: LOG.error("Exception: %s", ex) raise exception.CinderException(ex) return self._get_model_update(volume['host'], cpg) def delete_snapshot(self, snapshot): LOG.debug("Delete Snapshot id %(id)s %(name)s", {'id': snapshot['id'], 'name': pprint.pformat(snapshot)}) try: snap_name = self._get_3par_snap_name(snapshot['id']) self.client.deleteVolume(snap_name) except hpeexceptions.HTTPForbidden as ex: LOG.error("Exception: %s", ex) raise exception.NotAuthorized() except hpeexceptions.HTTPNotFound as ex: # We'll let this act as if it worked # it helps clean up the cinder entries. LOG.warning("Delete Snapshot id not found. Removing from " "cinder: %(id)s Ex: %(msg)s", {'id': snapshot['id'], 'msg': ex}) except hpeexceptions.HTTPConflict as ex: if (ex.get_code() == 32): # Error 32 means that the snapshot has children # see if we have any temp snapshots snaps = self.client.getVolumeSnapshots(snap_name) for snap in snaps: if snap.startswith('tss-'): LOG.info( "Found a temporary snapshot %(name)s", {'name': snap}) try: self.client.deleteVolume(snap) except hpeexceptions.HTTPNotFound: # if the volume is gone, it's as good as a # successful delete pass except Exception: msg = _("Snapshot has a temporary snapshot that " "can't be deleted at this time.") raise exception.SnapshotIsBusy(message=msg) if snap.startswith('osv-'): LOG.info( "Found a volume %(name)s", {'name': snap}) # Get details of original volume v1 # These details would be required to form v2 s1_detail = self.client.getVolume(snap_name) v1_name = s1_detail.get('copyOf') v1 = self.client.getVolume(v1_name) # Get details of volume v2, # which is child of snapshot s1 v2_name = snap v2 = self.client.getVolume(v2_name) # Update v2 object as required for # _convert_to_base function v2['volume_type_id'] = ( self._get_3par_vol_comment_value( v1['comment'], 'volume_type_id')) v2['id'] = self._get_3par_vol_comment_value( v2['comment'], 'volume_id') v2['_name_id'] = self._get_3par_vol_comment_value( v2['comment'], '_name_id') v2['host'] = '#' + v1['userCPG'] LOG.debug('Converting to base volume type: ' '%(id)s.', {'id': v2['id']}) self._convert_to_base_volume(v2) try: self.client.deleteVolume(snap_name) except Exception: msg = _("Snapshot has children and cannot be deleted!") raise exception.SnapshotIsBusy(message=msg) else: LOG.error("Exception: %s", ex) raise exception.SnapshotIsBusy(message=ex.get_description()) def _get_3par_hostname_from_wwn_iqn(self, wwns, iqns): if wwns is not None and not isinstance(wwns, list): wwns = [wwns] if iqns is not None and not isinstance(iqns, list): iqns = [iqns] out = self.client.getHosts() hosts = out['members'] for host in hosts: if 'iSCSIPaths' in host and iqns is not None: iscsi_paths = host['iSCSIPaths'] for iscsi in iscsi_paths: for iqn in iqns: if iqn == iscsi['name']: return host['name'] if 'FCPaths' in host and wwns is not None: fc_paths = host['FCPaths'] for fc in fc_paths: for wwn in wwns: if wwn.upper() == fc['wwn'].upper(): return host['name'] def terminate_connection(self, volume, hostname, wwn=None, iqn=None, remote_client=None): """Driver entry point to detach a volume from an instance.""" if volume.multiattach: attachment_list = volume.volume_attachment LOG.debug("Volume attachment list: %(atl)s", {'atl': attachment_list}) try: attachment_list = attachment_list.objects except AttributeError: pass if attachment_list is not None and len(attachment_list) > 1: # There are two possibilities: the instances can reside: # [1] either on same host. # [2] or on different hosts. # # case [1]: # In such case, behaviour is same as earlier i.e vlun is # not deleted now i.e skip remainder of terminate volume # connection. # # case [2]: # In such case, vlun of that host on 3par array should # be deleted now. Otherwise, it remains as stale entry on # 3par array; which later leads to error during volume # deletion. same_host = False num_hosts = len(attachment_list) all_hostnames = [] all_hostnames.append(hostname) count = 0 for i in range(num_hosts): hostname_i = str(attachment_list[i].attached_host) if hostname == hostname_i: # current host count = count + 1 if count > 1: # volume attached to multiple instances on # current host same_host = True else: # different host all_hostnames.append(hostname_i) if same_host: LOG.info("Volume %(volume)s is attached to multiple " "instances on same host %(host_name)s, " "skip terminate volume connection", {'volume': volume.name, 'host_name': volume.host.split('@')[0]}) return else: hostnames = ",".join(all_hostnames) LOG.info("Volume %(volume)s is attached to instances " "on multiple hosts %(hostnames)s. Proceed with " "deletion of vlun on this host.", {'volume': volume.name, 'hostnames': hostnames}) # does 3par know this host by a different name? hosts = None if wwn: hosts = self.client.queryHost(wwns=wwn) elif iqn: hosts = self.client.queryHost(iqns=[iqn]) if hosts is not None: if hosts and hosts['members'] and 'name' in hosts['members'][0]: hostname = hosts['members'][0]['name'] try: self.delete_vlun(volume, hostname, wwn=wwn, iqn=iqn, remote_client=remote_client) return except hpeexceptions.HTTPNotFound as e: if 'host does not exist' in e.get_description(): # If a host is failed-over, we want to allow the detach to # 'succeed' when it cannot find the host. We can simply # return out of the terminate connection in order for things # to be updated correctly. if self._active_backend_id: LOG.warning("Because the host is currently in a " "failed-over state, the volume will not " "be properly detached from the primary " "array. The detach will be considered a " "success as far as Cinder is concerned. " "The volume can now be attached to the " "secondary target.") return else: if hosts is None: # In case of 'force detach', hosts is None LOG.exception("Exception: %s", e) raise else: # use the wwn to see if we can find the hostname hostname = self._get_3par_hostname_from_wwn_iqn( wwn, iqn) # no 3par host, re-throw if hostname is None: LOG.exception("Exception: %s", e) raise else: # not a 'host does not exist' HTTPNotFound exception, re-throw LOG.error("Exception: %s", e) raise # try again with name retrieved from 3par self.delete_vlun(volume, hostname, wwn=wwn, iqn=iqn, remote_client=remote_client) def build_nsp(self, portPos): return '%s:%s:%s' % (portPos['node'], portPos['slot'], portPos['cardPort']) def build_portPos(self, nsp): split = nsp.split(":") portPos = {} portPos['node'] = int(split[0]) portPos['slot'] = int(split[1]) portPos['cardPort'] = int(split[2]) return portPos def tune_vv(self, old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_cpg, new_cpg, volume_name, new_compression): """Tune the volume to change the userCPG and/or provisioningType. The volume will be modified/tuned/converted to the new userCPG and provisioningType, as needed. TaskWaiter is used to make this function wait until the 3PAR task is no longer active. When the task is no longer active, then it must either be done or it is in a state that we need to treat as an error. """ compression = False if new_compression is not None: compression = new_compression if old_tpvv == new_tpvv and old_tdvv == new_tdvv: if new_cpg != old_cpg: LOG.info("Modifying %(volume_name)s userCPG " "from %(old_cpg)s" " to %(new_cpg)s", {'volume_name': volume_name, 'old_cpg': old_cpg, 'new_cpg': new_cpg}) _response, body = self.client.modifyVolume( volume_name, {'action': 6, 'tuneOperation': 1, 'userCPG': new_cpg}) task_id = body['taskid'] status = self.TaskWaiter(self.client, task_id).wait_for_task() if status['status'] is not self.client.TASK_DONE: msg = (_('Tune volume task stopped before it was done: ' 'volume_name=%(volume_name)s, ' 'task-status=%(status)s.') % {'status': status, 'volume_name': volume_name}) raise exception.VolumeBackendAPIException(msg) else: if new_tpvv: cop = self.CONVERT_TO_THIN LOG.info("Converting %(volume_name)s to thin provisioning " "with userCPG=%(new_cpg)s", {'volume_name': volume_name, 'new_cpg': new_cpg}) elif new_tdvv: cop = self.CONVERT_TO_DEDUP LOG.info("Converting %(volume_name)s to thin dedup " "provisioning with userCPG=%(new_cpg)s", {'volume_name': volume_name, 'new_cpg': new_cpg}) else: cop = self.CONVERT_TO_FULL LOG.info("Converting %(volume_name)s to full provisioning " "with userCPG=%(new_cpg)s", {'volume_name': volume_name, 'new_cpg': new_cpg}) response = None body = None try: if self.API_VERSION < COMPRESSION_API_VERSION: response, body = self.client.modifyVolume( volume_name, {'action': 6, 'tuneOperation': 1, 'userCPG': new_cpg, 'conversionOperation': cop}) else: LOG.debug("compression: %(compression)s", {'compression': compression}) body = self.client.tuneVolume( volume_name, 1, {'action': 6, 'userCPG': new_cpg, 'compression': compression, 'conversionOperation': cop}) LOG.debug("body: %(body)s", {'body': body}) except hpeexceptions.HTTPBadRequest as ex: if ex.get_code() == 40 and "keepVV" in str(ex): # Cannot retype with snapshots because we don't want to # use keepVV and have straggling volumes. Log additional # info and then raise. LOG.info("tunevv failed because the volume '%s' " "has snapshots.", volume_name) raise task_id = body['taskid'] status = self.TaskWaiter(self.client, task_id).wait_for_task() if status['status'] is not self.client.TASK_DONE: msg = (_('Tune volume task stopped before it was done: ' 'volume_name=%(volume_name)s, ' 'task-status=%(status)s.') % {'status': status, 'volume_name': volume_name}) raise exception.VolumeBackendAPIException(msg) def _retype_pre_checks(self, volume, host, new_persona, old_cpg, new_cpg, new_snap_cpg): """Test retype parameters before making retype changes. Do pre-retype parameter validation. These checks will raise an exception if we should not attempt this retype. """ if new_persona: self.validate_persona(new_persona) if host is not None: (host_type, host_id, _host_cpg) = ( host['capabilities']['location_info']).split(':') if not (host_type == 'HPE3PARDriver'): reason = (_("Cannot retype from HPE3PARDriver to %s.") % host_type) raise exception.InvalidHost(reason=reason) sys_info = self.client.getStorageSystemInfo() if not (host_id == sys_info['serialNumber']): reason = (_("Cannot retype from one 3PAR array to another.")) raise exception.InvalidHost(reason=reason) # Validate new_snap_cpg. A white-space snapCPG will fail eventually, # but we'd prefer to fail fast -- if this ever happens. if not new_snap_cpg or new_snap_cpg.isspace(): reason = (_("Invalid new snapCPG name for retype. " "new_snap_cpg='%s'.") % new_snap_cpg) raise exception.InvalidInput(reason) # Check to make sure CPGs are in the same domain domain = self.get_domain(old_cpg) if domain != self.get_domain(new_cpg): reason = (_('Cannot retype to a CPG in a different domain.')) raise Invalid3PARDomain(reason) if domain != self.get_domain(new_snap_cpg): reason = (_('Cannot retype to a snap CPG in a different domain.')) raise Invalid3PARDomain(reason) def _retype(self, volume, volume_name, new_type_name, new_type_id, host, new_persona, old_cpg, new_cpg, old_snap_cpg, new_snap_cpg, old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_vvs, new_vvs, old_qos, new_qos, old_flash_cache, new_flash_cache, old_comment, new_compression): action = "volume:retype" self._retype_pre_checks(volume, host, new_persona, old_cpg, new_cpg, new_snap_cpg) flow_name = action.replace(":", "_") + "_api" retype_flow = linear_flow.Flow(flow_name) # Keep this linear and do the big tunevv last. Everything leading # up to that is reversible, but we'd let the 3PAR deal with tunevv # errors on its own. retype_flow.add( ModifyVolumeTask(action), ModifySpecsTask(action), TuneVolumeTask(action), ReplicateVolumeTask(action)) taskflow.engines.run( retype_flow, store={'common': self, 'volume_name': volume_name, 'volume': volume, 'old_tpvv': old_tpvv, 'new_tpvv': new_tpvv, 'old_tdvv': old_tdvv, 'new_tdvv': new_tdvv, 'old_cpg': old_cpg, 'new_cpg': new_cpg, 'old_snap_cpg': old_snap_cpg, 'new_snap_cpg': new_snap_cpg, 'old_vvs': old_vvs, 'new_vvs': new_vvs, 'old_qos': old_qos, 'new_qos': new_qos, 'old_flash_cache': old_flash_cache, 'new_flash_cache': new_flash_cache, 'new_type_name': new_type_name, 'new_type_id': new_type_id, 'old_comment': old_comment, 'new_compression': new_compression }) def _retype_from_old_to_new(self, volume, new_type, old_volume_settings, host): """Convert the volume to be of the new type. Given old type settings. Returns True if the retype was successful. Uses taskflow to revert changes if errors occur. :param volume: A dictionary describing the volume to retype :param new_type: A dictionary describing the volume type to convert to :param old_volume_settings: Volume settings describing the old type. :param host: A dictionary describing the host, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. Host validation is just skipped if host is None. """ volume_name = self._get_3par_vol_name(volume) new_type_name = None new_type_id = None if new_type: new_type_name = new_type['name'] new_type_id = new_type['id'] pool = None if host: pool = volume_utils.extract_host(host['host'], 'pool') else: pool = volume_utils.extract_host(volume['host'], 'pool') new_volume_settings = self.get_volume_settings_from_type_id( new_type_id, pool) new_cpg = new_volume_settings['cpg'] new_snap_cpg = new_volume_settings['snap_cpg'] new_tpvv = new_volume_settings['tpvv'] new_tdvv = new_volume_settings['tdvv'] new_qos = new_volume_settings['qos'] new_vvs = new_volume_settings['vvs_name'] new_persona = None new_hpe3par_keys = new_volume_settings['hpe3par_keys'] if 'persona' in new_hpe3par_keys: new_persona = new_hpe3par_keys['persona'] new_flash_cache = self.get_flash_cache_policy(new_hpe3par_keys) # it will return None / True /False$ new_compression = self.get_compression_policy(new_hpe3par_keys) old_qos = old_volume_settings['qos'] old_vvs = old_volume_settings['vvs_name'] old_hpe3par_keys = old_volume_settings['hpe3par_keys'] old_flash_cache = self.get_flash_cache_policy(old_hpe3par_keys) # Get the current volume info because we can get in a bad state # if we trust that all the volume type settings are still the # same settings that were used with this volume. old_volume_info = self.client.getVolume(volume_name) old_tpvv = old_volume_info['provisioningType'] == self.THIN old_tdvv = old_volume_info['provisioningType'] == self.DEDUP old_cpg = old_volume_info['userCPG'] old_comment = old_volume_info.get('comment') old_snap_cpg = None if 'snapCPG' in old_volume_info: old_snap_cpg = old_volume_info['snapCPG'] LOG.debug("retype old_volume_info=%s", old_volume_info) LOG.debug("retype old_volume_settings=%s", old_volume_settings) LOG.debug("retype new_volume_settings=%s", new_volume_settings) self._retype(volume, volume_name, new_type_name, new_type_id, host, new_persona, old_cpg, new_cpg, old_snap_cpg, new_snap_cpg, old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_vvs, new_vvs, old_qos, new_qos, old_flash_cache, new_flash_cache, old_comment, new_compression) if host: return True, self._get_model_update(host['host'], new_cpg) else: return True, self._get_model_update(volume['host'], new_cpg) def _retype_from_no_type(self, volume, new_type): """Convert the volume to be of the new type. Starting from no type. Returns True if the retype was successful. Uses taskflow to revert changes if errors occur. :param volume: A dictionary describing the volume to retype. Except the volume-type is not used here. This method uses None. :param new_type: A dictionary describing the volume type to convert to """ pool = volume_utils.extract_host(volume['host'], 'pool') none_type_settings = self.get_volume_settings_from_type_id(None, pool) return self._retype_from_old_to_new(volume, new_type, none_type_settings, None) def retype(self, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns True if the retype was successful. Uses taskflow to revert changes if errors occur. :param volume: A dictionary describing the volume to retype :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. Host validation is just skipped if host is None. """ LOG.debug(("enter: retype: id=%(id)s, new_type=%(new_type)s," "diff=%(diff)s, host=%(host)s"), {'id': volume['id'], 'new_type': new_type, 'diff': diff, 'host': host}) self.remove_temporary_snapshots(volume) old_volume_settings = self.get_volume_settings_from_type(volume, host) return self._retype_from_old_to_new(volume, new_type, old_volume_settings, host) def remove_temporary_snapshots(self, volume): vol_name = self._get_3par_vol_name(volume) snapshots_list = self.client.getVolumeSnapshots(vol_name) tmp_snapshots_list = [snap for snap in snapshots_list if snap.startswith('tss-')] LOG.debug("temporary snapshot list %(name)s", {'name': tmp_snapshots_list}) for temp_snap in tmp_snapshots_list: LOG.debug("Found a temporary snapshot %(name)s", {'name': temp_snap}) try: self.client.deleteVolume(temp_snap) except hpeexceptions.HTTPNotFound: # if the volume is gone, it's as good as a # successful delete pass except Exception: msg = _("Volume has a temporary snapshot.") raise exception.VolumeIsBusy(message=msg) def revert_to_snapshot(self, volume, snapshot): """Revert volume to snapshot. :param volume: A dictionary describing the volume to revert :param snapshot: A dictionary describing the latest snapshot """ volume_name = self._get_3par_vol_name(volume) snapshot_name = self._get_3par_snap_name(snapshot['id']) rcg_name = self._get_3par_rcg_name(volume) volume_part_of_group = ( self._volume_of_hpe_tiramisu_type_and_part_of_group(volume)) if volume_part_of_group: group = volume.get('group') rcg_name = self._get_3par_rcg_name_of_group(group.id) optional = {} replication_flag = self._volume_of_replicated_type( volume, hpe_tiramisu_check=True) if replication_flag or volume_part_of_group: LOG.debug("Found replicated volume: %(volume)s.", {'volume': volume_name}) optional['allowRemoteCopyParent'] = True try: self.client.stopRemoteCopy(rcg_name) except Exception as ex: msg = (_("There was an error stopping remote copy: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if self.client.isOnlinePhysicalCopy(volume_name): LOG.debug("Found an online copy for %(volume)s.", {'volume': volume_name}) optional['online'] = True body = self.client.promoteVirtualCopy(snapshot_name, optional=optional) task_id = body.get('taskid') task_status = self._wait_for_task_completion(task_id) if task_status['status'] is not self.client.TASK_DONE: dbg = {'status': task_status, 'id': volume['id']} msg = _('Promote virtual copy failed: ' 'id=%(id)s, status=%(status)s.') % dbg raise exception.CinderException(msg) else: LOG.debug('Promote virtual copy completed: ' 'id=%s.', volume['id']) if replication_flag or volume_part_of_group: try: self.client.startRemoteCopy(rcg_name) except Exception as ex: msg = (_("There was an error starting remote copy: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info("Volume %(volume)s succesfully reverted to %(snap)s.", {'volume': volume_name, 'snap': snapshot_name}) def find_existing_vlun(self, volume, host, remote_client=None): """Finds an existing VLUN for a volume on a host. Returns an existing VLUN's information. If no existing VLUN is found, None is returned. :param volume: A dictionary describing a volume. :param host: A dictionary describing a host. """ existing_vlun = None try: vol_name = self._get_3par_vol_name(volume) if remote_client: host_vluns = remote_client.getHostVLUNs(host['name']) else: host_vluns = self.client.getHostVLUNs(host['name']) # The first existing VLUN found will be returned. for vlun in host_vluns: if vlun['volumeName'] == vol_name: existing_vlun = vlun break except hpeexceptions.HTTPNotFound: # ignore, no existing VLUNs were found LOG.debug("No existing VLUNs were found for host/volume " "combination: %(host)s, %(vol)s", {'host': host['name'], 'vol': vol_name}) return existing_vlun def find_existing_vluns(self, volume, host, remote_client=None): existing_vluns = [] try: vol_name = self._get_3par_vol_name(volume) if remote_client: host_vluns = remote_client.getHostVLUNs(host['name']) else: host_vluns = self.client.getHostVLUNs(host['name']) for vlun in host_vluns: if vlun['volumeName'] == vol_name: existing_vluns.append(vlun) except hpeexceptions.HTTPNotFound: # ignore, no existing VLUNs were found LOG.debug("No existing VLUNs were found for host/volume " "combination: %(host)s, %(vol)s", {'host': host['name'], 'vol': vol_name}) return existing_vluns # v2 replication methods def failover_host(self, context, volumes, secondary_backend_id, groups): """Force failover to a secondary replication target.""" volume_update_list = [] group_update_list = [] # Ensure replication is enabled before we try and failover. if not self._replication_enabled: msg = _("Issuing a fail-over failed because replication is " "not properly configured.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # We are removing volumes which are part of group, # So creating volume_copy before doing that. # After failover/failback operation,making volumes as like # previous with the help of volume_copy. volumes_copy = [] volumes_copy[:] = volumes # Check to see if the user requested to failback. if (secondary_backend_id and secondary_backend_id == self.FAILBACK_VALUE): failover = False target_id = None group_target_id = self.FAILBACK_VALUE else: # Find the failover target. failover_target = None for target in self._replication_targets: if target['backend_id'] == secondary_backend_id: failover_target = target break if not failover_target: msg = _("A valid secondary target MUST be specified in order " "to failover.") LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) failover = True target_id = failover_target['backend_id'] group_target_id = target_id if groups: for group in groups: vol_list = [] vols_obj = [] for index, vol in enumerate(volumes): if vol.get('group_id') == group.id: vols_obj.append(vol) vol_list.append(volumes[index]) for vol_obj in vols_obj: # Remove volumes which are part of a group. volumes.remove(vol_obj) grp_update, vol_updates = ( self.failover_replication( None, group, vol_list, group_target_id, host=True)) group_update_list.append({'group_id': group.id, 'updates': grp_update}) volume_update_list += vol_updates # user requested failback. if not failover: vol_updates = self._replication_failback(volumes) volume_update_list += vol_updates # user requested failover. else: # For each volume, if it is replicated, we want to fail it over. for volume in volumes: if self._volume_of_replicated_type(volume, hpe_tiramisu_check=True): try: # Try and stop remote-copy on main array. We eat the # exception here because when an array goes down, the # groups will stop automatically. rcg_name = self._get_3par_rcg_name(volume) self.client.stopRemoteCopy(rcg_name) except Exception: pass try: # Failover to secondary array. remote_rcg_name = self._get_3par_remote_rcg_name( volume, volume['provider_location']) cl = self._create_replication_client(failover_target) cl.recoverRemoteCopyGroupFromDisaster( remote_rcg_name, self.RC_ACTION_CHANGE_TO_PRIMARY) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'failed-over', 'replication_driver_data': failover_target['id']}}) except Exception as ex: LOG.error("There was a problem with the failover " "(%(error)s) and it was unsuccessful. " "Volume '%(volume)s will not be available " "on the failed over target.", {'error': ex, 'volume': volume['id']}) LOG.error(msg) volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'error'}}) finally: self._destroy_replication_client(cl) else: # If the volume is not of replicated type, we need to # force the status into error state so a user knows they # do not have access to the volume. volume_update_list.append( {'volume_id': volume['id'], 'updates': {'status': 'error'}}) volumes[:] = volumes_copy return target_id, volume_update_list, group_update_list def _replication_failback(self, volumes): # Make sure the proper steps on the backend have been completed before # we allow a fail-over. if not self._is_host_ready_for_failback(volumes): msg = _("The host is not ready to be failed back. Please " "resynchronize the volumes and resume replication on the " "3PAR backends.") LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) # Update the volumes status to available. volume_update_list = [] for volume in volumes: if self._volume_of_replicated_type(volume, hpe_tiramisu_check=True): volume_update_list.append( {'volume_id': volume['id'], 'updates': {'replication_status': 'available', 'replication_driver_data': self.client.id}}) else: # Upon failing back, we can move the non-replicated volumes # back into available state. volume_update_list.append( {'volume_id': volume['id'], 'updates': {'status': 'available'}}) return volume_update_list def _is_host_ready_for_failback(self, volumes): """Checks to make sure the volume has been synchronized This ensures that all the remote copy targets have been restored to their natural direction, and all of the volumes have been fully synchronized. """ try: for volume in volumes: if self._volume_of_replicated_type(volume, hpe_tiramisu_check=True): location = volume.get('provider_location') remote_rcg_name = self._get_3par_remote_rcg_name(volume, location) rcg = self.client.getRemoteCopyGroup(remote_rcg_name) if not self._are_targets_in_their_natural_direction(rcg): return False except Exception: # If there was a problem, we will return false so we can # log an error in the parent function. return False return True def _do_replication_setup(self, array_id=None): replication_targets = [] replication_devices = self.config.replication_device if replication_devices: for dev in replication_devices: remote_array = dict(dev.items()) # Override and set defaults for certain entries remote_array['managed_backend_name'] = ( dev.get('managed_backend_name')) remote_array['replication_mode'] = ( self._get_remote_copy_mode_num( dev.get('replication_mode'))) remote_array['san_ssh_port'] = ( dev.get('san_ssh_port', self.config.san_ssh_port)) remote_array['ssh_conn_timeout'] = ( dev.get('ssh_conn_timeout', self.config.ssh_conn_timeout)) remote_array['san_private_key'] = ( dev.get('san_private_key', self.config.san_private_key)) # Format iscsi IPs correctly iscsi_ips = dev.get('hpe3par_iscsi_ips') if iscsi_ips: remote_array['hpe3par_iscsi_ips'] = iscsi_ips.split(' ') # Format hpe3par_iscsi_chap_enabled as a bool remote_array['hpe3par_iscsi_chap_enabled'] = ( dev.get('hpe3par_iscsi_chap_enabled') == 'True') array_name = remote_array['backend_id'] # Make sure we can log into the array, that it has been # correctly configured, and its API version meets the # minimum requirement. cl = None try: cl = self._create_replication_client(remote_array) info = cl.getStorageSystemInfo() remote_array['id'] = str(info['id']) if array_id and array_id == info['id']: self._active_backend_id = str(info['name']) wsapi_version = cl.getWsApiVersion()['build'] if wsapi_version < REMOTE_COPY_API_VERSION: LOG.warning("The secondary array must have an API " "version of %(min_ver)s or higher. Array " "'%(target)s' is on %(target_ver)s, " "therefore it will not be added as a " "valid replication target.", {'target': array_name, 'min_ver': REMOTE_COPY_API_VERSION, 'target_ver': wsapi_version}) elif not self._is_valid_replication_array(remote_array): LOG.warning("'%s' is not a valid replication array. " "In order to be valid, backend_id, " "replication_mode, " "hpe3par_api_url, hpe3par_username, " "hpe3par_password, cpg_map, san_ip, " "san_login, and san_password " "must be specified. If the target is " "managed, managed_backend_name must be " "set as well.", array_name) else: replication_targets.append(remote_array) except Exception: LOG.error("Could not log in to 3PAR array (%s) with the " "provided credentials.", array_name) finally: self._destroy_replication_client(cl) self._replication_targets = replication_targets if self._is_replication_configured_correct(): self._replication_enabled = True def _is_valid_replication_array(self, target): required_flags = ['hpe3par_api_url', 'hpe3par_username', 'hpe3par_password', 'san_ip', 'san_login', 'san_password', 'backend_id', 'replication_mode', 'cpg_map'] try: self.check_replication_flags(target, required_flags) return True except Exception: return False def _is_replication_configured_correct(self): rep_flag = True # Make sure there is at least one replication target. if len(self._replication_targets) < 1: LOG.error("There must be at least one valid replication " "device configured.") rep_flag = False return rep_flag def _is_replication_mode_correct(self, mode, sync_num): rep_flag = True # Make sure replication_mode is set to either sync|periodic. mode = self._get_remote_copy_mode_num(mode) if not mode: LOG.error("Extra spec replication:mode must be set and must " "be either 'sync' or 'periodic'.") rep_flag = False else: # If replication:mode is periodic, replication_sync_period must be # set between 300 - 31622400 seconds. if mode == self.PERIODIC and ( sync_num < 300 or sync_num > 31622400): LOG.error("Extra spec replication:sync_period must be " "greater than 299 and less than 31622401 " "seconds.") rep_flag = False return rep_flag def is_volume_group_snap_type(self, volume_type): consis_group_snap_type = False if volume_type: extra_specs = volume_type.get('extra_specs') if 'consistent_group_snapshot_enabled' in extra_specs: gsnap_val = extra_specs['consistent_group_snapshot_enabled'] consis_group_snap_type = (gsnap_val == " True") return consis_group_snap_type def _volume_of_replicated_type(self, volume, hpe_tiramisu_check=None): replicated_type = False volume_type_id = volume.get('volume_type_id') if volume_type_id: volume_type = self._get_volume_type(volume_type_id) extra_specs = volume_type.get('extra_specs') if extra_specs and 'replication_enabled' in extra_specs: rep_val = extra_specs['replication_enabled'] replicated_type = (rep_val == " True") if hpe_tiramisu_check and replicated_type: hpe3par_tiramisu = self._get_hpe3par_tiramisu_value( volume_type) if hpe3par_tiramisu: replicated_type = False return replicated_type def _volume_of_hpe_tiramisu_type(self, volume): hpe_tiramisu_type = False replicated_type = False volume_type_id = volume.get('volume_type_id') if volume_type_id: volume_type = self._get_volume_type(volume_type_id) extra_specs = volume_type.get('extra_specs') if extra_specs and 'replication_enabled' in extra_specs: rep_val = extra_specs['replication_enabled'] replicated_type = (rep_val == " True") if replicated_type: hpe3par_tiramisu = self._get_hpe3par_tiramisu_value( volume_type) if hpe3par_tiramisu: hpe_tiramisu_type = True return hpe_tiramisu_type def _volume_of_hpe_tiramisu_type_and_part_of_group(self, volume): volume_part_of_group = False hpe_tiramisu_type = self._volume_of_hpe_tiramisu_type(volume) if hpe_tiramisu_type: if volume.get('group'): volume_part_of_group = True return volume_part_of_group def _is_volume_type_replicated(self, volume_type): replicated_type = False extra_specs = volume_type.get('extra_specs') if extra_specs and 'replication_enabled' in extra_specs: rep_val = extra_specs['replication_enabled'] replicated_type = (rep_val == " True") return replicated_type def _is_volume_in_remote_copy_group(self, volume): rcg_name = self._get_3par_rcg_name(volume) try: self.client.getRemoteCopyGroup(rcg_name) return True except hpeexceptions.HTTPNotFound: return False def _get_remote_copy_mode_num(self, mode): ret_mode = None if mode == "sync": ret_mode = self.SYNC if mode == "periodic": ret_mode = self.PERIODIC return ret_mode def _get_3par_config(self, array_id=None): self._do_replication_setup(array_id=array_id) conf = None if self._replication_enabled: for target in self._replication_targets: if target['backend_id'] == self._active_backend_id: conf = target break self._build_3par_config(conf) def _build_3par_config(self, conf=None): """Build 3PAR client config dictionary. self._client_conf will contain values from self.config if the volume is located on the primary array in order to properly contact it. If the volume has been failed over and therefore on a secondary array, self._client_conf will contain values on how to contact that array. The only time we will return with entries from a secondary array is with unmanaged replication. """ if conf: self._client_conf['hpe3par_cpg'] = self._generate_hpe3par_cpgs( conf.get('cpg_map')) self._client_conf['hpe3par_username'] = ( conf.get('hpe3par_username')) self._client_conf['hpe3par_password'] = ( conf.get('hpe3par_password')) self._client_conf['san_ip'] = conf.get('san_ip') self._client_conf['san_login'] = conf.get('san_login') self._client_conf['san_password'] = conf.get('san_password') self._client_conf['san_ssh_port'] = conf.get('san_ssh_port') self._client_conf['ssh_conn_timeout'] = ( conf.get('ssh_conn_timeout')) self._client_conf['san_private_key'] = conf.get('san_private_key') self._client_conf['hpe3par_api_url'] = conf.get('hpe3par_api_url') self._client_conf['hpe3par_iscsi_ips'] = ( conf.get('hpe3par_iscsi_ips')) self._client_conf['hpe3par_iscsi_chap_enabled'] = ( conf.get('hpe3par_iscsi_chap_enabled')) self._client_conf['iscsi_ip_address'] = ( conf.get('target_ip_address')) self._client_conf['iscsi_port'] = conf.get('iscsi_port') else: self._client_conf['hpe3par_cpg'] = ( self.config.hpe3par_cpg) self._client_conf['hpe3par_username'] = ( self.config.hpe3par_username) self._client_conf['hpe3par_password'] = ( self.config.hpe3par_password) self._client_conf['san_ip'] = self.config.san_ip self._client_conf['san_login'] = self.config.san_login self._client_conf['san_password'] = self.config.san_password self._client_conf['san_ssh_port'] = self.config.san_ssh_port self._client_conf['ssh_conn_timeout'] = ( self.config.ssh_conn_timeout) self._client_conf['san_private_key'] = self.config.san_private_key self._client_conf['hpe3par_api_url'] = self.config.hpe3par_api_url self._client_conf['hpe3par_iscsi_ips'] = ( self.config.hpe3par_iscsi_ips) self._client_conf['hpe3par_iscsi_chap_enabled'] = ( self.config.hpe3par_iscsi_chap_enabled) self._client_conf['iscsi_ip_address'] = ( self.config.target_ip_address) self._client_conf['iscsi_port'] = self.config.target_port def _get_cpg_from_cpg_map(self, cpg_map, target_cpg): ret_target_cpg = None cpg_pairs = cpg_map.split(' ') for cpg_pair in cpg_pairs: cpgs = cpg_pair.split(':') cpg = cpgs[0] dest_cpg = cpgs[1] if cpg == target_cpg: ret_target_cpg = dest_cpg return ret_target_cpg def _generate_hpe3par_cpgs(self, cpg_map): hpe3par_cpgs = [] cpg_pairs = cpg_map.split(' ') for cpg_pair in cpg_pairs: cpgs = cpg_pair.split(':') hpe3par_cpgs.append(cpgs[1]) return hpe3par_cpgs def _get_replication_targets(self): replication_targets = [] for target in self._replication_targets: replication_targets.append(target['backend_id']) return replication_targets def _do_volume_replication_setup(self, volume, retype=False, dist_type_id=None): """This function will do or ensure the following: -Create volume on main array (already done in create_volume) -Create Remote Copy Group on main array -Add volume to Remote Copy Group on main array -Start remote copy If anything here fails, we will need to clean everything up in reverse order, including the original volume. """ rcg_name = self._get_3par_rcg_name(volume) # If the volume is already in a remote copy group, return True # after starting remote copy. If remote copy is already started, # issuing this command again will be fine. if self._is_volume_in_remote_copy_group(volume): try: self.client.startRemoteCopy(rcg_name) except Exception: pass return True try: # Grab the extra_spec entries for replication and make sure they # are set correctly. volume_type = self._get_volume_type(volume["volume_type_id"]) if retype and dist_type_id is not None: dist_type = self._get_volume_type(dist_type_id) extra_specs = dist_type.get("extra_specs") else: extra_specs = volume_type.get("extra_specs") replication_mode = extra_specs.get( self.EXTRA_SPEC_REP_MODE, self.DEFAULT_REP_MODE) replication_mode_num = self._get_remote_copy_mode_num( replication_mode) replication_sync_period = extra_specs.get( self.EXTRA_SPEC_REP_SYNC_PERIOD, self.DEFAULT_SYNC_PERIOD) if replication_sync_period: replication_sync_period = int(replication_sync_period) if not self._is_replication_mode_correct(replication_mode, replication_sync_period): msg = _("The replication mode was not configured correctly " "in the volume type extra_specs. If replication:mode " "is periodic, replication:sync_period must also be " "specified and be between 300 and 31622400 seconds.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) vol_settings = self.get_volume_settings_from_type(volume) local_cpg = vol_settings['cpg'] vol_name = self._get_3par_vol_name(volume) # Create remote copy group on main array. rcg_targets = [] sync_targets = [] for target in self._replication_targets: # Only add targets that match the volumes replication mode. if target['replication_mode'] == replication_mode_num: cpg = self._get_cpg_from_cpg_map(target['cpg_map'], local_cpg) rcg_target = {'targetName': target['backend_id'], 'mode': replication_mode_num, 'userCPG': cpg} if self.API_VERSION < API_VERSION_2023: rcg_target['snapCPG'] = cpg rcg_targets.append(rcg_target) sync_target = {'targetName': target['backend_id'], 'syncPeriod': replication_sync_period} sync_targets.append(sync_target) optional = {'localUserCPG': local_cpg} if self.API_VERSION < API_VERSION_2023: optional['localSnapCPG'] = vol_settings['snap_cpg'] pool = volume_utils.extract_host(volume['host'], level='pool') domain = self.get_domain(pool) if domain: optional["domain"] = domain try: self.client.createRemoteCopyGroup(rcg_name, rcg_targets, optional) except Exception as ex: msg = (_("There was an error creating the remote copy " "group: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("created rcg %(name)s", {'name': rcg_name}) # Add volume to remote copy group. rcg_targets = [] for target in self._replication_targets: # Only add targets that match the volumes replication mode. if target['replication_mode'] == replication_mode_num: rcg_target = {'targetName': target['backend_id'], 'secVolumeName': vol_name} rcg_targets.append(rcg_target) optional = {'volumeAutoCreation': True} try: self.client.addVolumeToRemoteCopyGroup(rcg_name, vol_name, rcg_targets, optional=optional) except Exception as ex: msg = (_("There was an error adding the volume to the remote " "copy group: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Check and see if we are in periodic mode. If we are, update # Remote Copy Group to have a sync period. if replication_sync_period and ( replication_mode_num == self.PERIODIC): opt = {'targets': sync_targets} try: self.client.modifyRemoteCopyGroup(rcg_name, opt) except Exception as ex: msg = (_("There was an error setting the sync period for " "the remote copy group: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Check if we are in sync mode and quorum_witness_ip is present. # If yes, add options for Peer Persistence (PP) quorum_witness_ip = None if replication_mode_num == self.SYNC: remote_target = self._replication_targets[0] quorum_witness_ip = remote_target.get('quorum_witness_ip') if quorum_witness_ip: LOG.debug('setting pp_params') pp_params = {'targets': [ {'policies': {'autoFailover': True, 'pathManagement': True, 'autoRecover': True}}]} try: self.client.modifyRemoteCopyGroup(rcg_name, pp_params) except Exception as ex: msg = (_("There was an error while modifying remote " "copy group: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Start the remote copy. try: self.client.startRemoteCopy(rcg_name) except Exception as ex: msg = (_("There was an error starting remote copy: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return True except Exception as ex: self._do_volume_replication_destroy(volume, retype=retype) msg = (_("There was an error setting up a remote copy group " "on the 3PAR arrays: ('%s'). The volume will not be " "recognized as replication type.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _do_volume_replication_destroy(self, volume, rcg_name=None, retype=False): """This will completely remove all traces of a remote copy group. It should be used when deleting a replication enabled volume or if setting up a remote copy group fails. It will try and do the following: -Stop remote copy -Remove volume from Remote Copy Group on main array -Delete Remote Copy Group from main array -Delete volume from main array """ if not rcg_name: rcg_name = self._get_3par_rcg_name(volume) vol_name = self._get_3par_vol_name(volume) # Stop remote copy. try: self.client.stopRemoteCopy(rcg_name) except Exception: pass # Delete volume from remote copy group on main array. try: self.client.removeVolumeFromRemoteCopyGroup( rcg_name, vol_name, removeFromTarget=True) except Exception: pass # Delete remote copy group on main array. try: self.client.removeRemoteCopyGroup(rcg_name) except Exception: pass # Delete volume on the main array. try: if not retype: self.client.deleteVolume(vol_name) except hpeexceptions.HTTPConflict as ex: if ex.get_code() == 34: # This is a special case which means the # volume is part of a volume set. self._delete_vvset(volume) self.client.deleteVolume(vol_name) except Exception: pass def _delete_replicated_failed_over_volume(self, volume): location = volume.get('provider_location') rcg_name = self._get_3par_remote_rcg_name(volume, location) targets = self.client.getRemoteCopyGroup(rcg_name)['targets'] # When failed over, we want to temporarily disable config mirroring # in order to be allowed to delete the volume and remote copy group for target in targets: target_name = target['targetName'] self.client.toggleRemoteCopyConfigMirror(target_name, mirror_config=False) # Do regular volume replication destroy now config mirroring is off try: self._do_volume_replication_destroy(volume, rcg_name) except Exception as ex: msg = (_("The failed-over volume could not be deleted: %s") % str(ex)) LOG.error(msg) raise exception.VolumeIsBusy(message=msg) finally: # Turn config mirroring back on for target in targets: target_name = target['targetName'] self.client.toggleRemoteCopyConfigMirror(target_name, mirror_config=True) def _delete_vvset(self, volume): # volume is part of a volume set. LOG.debug("_delete_vvset. vol_id: %(id)s", {'id': volume['id']}) volume_name = self._get_3par_vol_name(volume) vvset_name = self._get_3par_vvs_name(volume['id']) try: # find vvset self.client.getVolumeSet(vvset_name) # (a) vvset is found: # We have a single volume per volume set, so # remove the volume set. LOG.debug("Deleting vvset: %(name)s", {'name': vvset_name}) self.client.deleteVolumeSet(vvset_name) except hpeexceptions.HTTPNotFound: # (b) vvset not found: # - find the vvset name from volume name # - remove the volume and leave the vvset vvset_name = self.client.findVolumeSet(volume_name) LOG.debug("Removing vol %(volume_name)s from vvset %(vvset_name)s", {'volume_name': volume_name, 'vvset_name': vvset_name}) self.client.removeVolumeFromVolumeSet(vvset_name, volume_name) def _get_3par_rcg_name_of_group(self, group_id): rcg_name = self._encode_name(group_id) rcg = "rcg-%s" % rcg_name return rcg[:22] def _get_3par_remote_rcg_name_of_group(self, group_id, provider_location): return self._get_3par_rcg_name_of_group(group_id) + ".r" + ( str(provider_location)) def _get_hpe3par_tiramisu_value(self, volume_type): hpe3par_tiramisu = False hpe3par_keys = self._get_keys_by_volume_type(volume_type) if hpe3par_keys.get('group_replication'): hpe3par_tiramisu = ( hpe3par_keys['group_replication'] == " True") return hpe3par_tiramisu def _stop_remote_copy_group(self, group): # Stop remote copy. rcg_name = self._get_3par_rcg_name_of_group(group.id) try: self.client.stopRemoteCopy(rcg_name) except Exception: LOG.debug("Stopping remote copy group on group: %(group_id)s is " "failed", {'group_id': group.id}) def _start_remote_copy_group(self, group): # Start remote copy. rcg_name = self._get_3par_rcg_name_of_group(group.id) rcg = self.client.getRemoteCopyGroup(rcg_name) if not rcg['volumes']: return try: self.client.startRemoteCopy(rcg_name) except Exception as ex: msg = (_("There was an error starting remote copy: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _check_rep_status_enabled_on_group(self, group): """Check replication status for group. Group status must be enabled before proceeding with certain operations. :param group: the group object :raises: InvalidInput """ if group.is_replicated: if group.replication_status != fields.ReplicationStatus.ENABLED: msg = (_('Replication status should be %(status)s for ' 'replication-enabled group: %(group)s.') % {'status': fields.ReplicationStatus.ENABLED, 'group': group.id}) LOG.error(msg) raise exception.InvalidInput(reason=msg) if not self._replication_enabled: host_backend = volume_utils.extract_host(group.host, 'backend') msg = _("replication is not properly configured on backend: " "(backend)%s") % {'backend': host_backend} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: LOG.debug('Replication is not enabled on group %s, ' 'skip status check.', group.id) def _get_replication_mode_from_volume(self, volume): volume_type = self._get_volume_type(volume["volume_type_id"]) replication_mode_num = ( self._get_replication_mode_from_volume_type(volume_type)) return replication_mode_num def _get_replication_mode_from_volume_type(self, volume_type): # Default replication mode is PERIODIC replication_mode_num = self.PERIODIC extra_specs = volume_type.get("extra_specs") if extra_specs: replication_mode = extra_specs.get( self.EXTRA_SPEC_REP_MODE, self.DEFAULT_REP_MODE) replication_mode_num = self._get_remote_copy_mode_num( replication_mode) return replication_mode_num def _get_replication_sync_period_from_volume(self, volume): volume_type = self._get_volume_type(volume["volume_type_id"]) replication_sync_period = ( self._get_replication_sync_period_from_volume_type(volume_type)) return replication_sync_period def _get_replication_sync_period_from_volume_type(self, volume_type): # Default replication sync period is 900s replication_sync_period = self.DEFAULT_SYNC_PERIOD rep_mode = self.DEFAULT_REP_MODE extra_specs = volume_type.get("extra_specs") if extra_specs: replication_sync_period = extra_specs.get( self.EXTRA_SPEC_REP_SYNC_PERIOD, self.DEFAULT_SYNC_PERIOD) replication_sync_period = int(replication_sync_period) if not self._is_replication_mode_correct(rep_mode, replication_sync_period): msg = _("The replication mode was not configured " "correctly in the volume type extra_specs. " "If replication:mode is periodic, " "replication:sync_period must also be specified " "and be between 300 and 31622400 seconds.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return replication_sync_period def _check_replication_matched(self, volume, group): """Check volume type and group type. This will make sure they do not conflict with each other. :param volume: volume to be checked :param extra_specs: the extra specifications :raises: InvalidInput """ vol_is_re = self._volume_of_replicated_type(volume) group_is_re = group.is_replicated if not (vol_is_re == group_is_re): msg = _('Replication should be enabled or disabled for both ' 'volume or group. Volume replication status: ' '%(vol_status)s, group replication status: ' '%(group_status)s') % { 'vol_status': vol_is_re, 'group_status': group_is_re} raise exception.InvalidInput(reason=msg) def _remove_vol_from_remote_copy_group(self, group, volume): rcg_name = self._get_3par_rcg_name_of_group(group.id) vol_name = self._get_3par_vol_name(volume) try: # Delete volume from remote copy group on secondary array. self.client.removeVolumeFromRemoteCopyGroup( rcg_name, vol_name, removeFromTarget=True) except Exception as ex: # Start RCG even if we fail to remove volume from it. self._start_remote_copy_group(group) msg = (_("There was an error removing a volume: %(volume)s from " "Group: %(group)s : %(err)s") % {'volume': volume.get('id'), 'group': group.id, 'err': str(ex)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _add_vol_to_remote_group(self, group, volume): # Stop remote copy, so we can add volumes in RCG. self._stop_remote_copy_group(group) # Add a volume to RCG self._add_vol_to_remote_copy_group(group, volume) # Start RCG self._start_remote_copy_group(group) def _add_vol_to_remote_copy_group(self, group, volume): rcg_name = self._get_3par_rcg_name_of_group(group.id) try: rcg = self.client.getRemoteCopyGroup(rcg_name) # If volumes are not present in RCG, which means we need to set, # RCG attributes. if not len(rcg['volumes']): self._set_rcg_attributes(volume, rcg_name) self._add_vol_to_remote(volume, rcg_name) # If replication mode is periodic then set sync period on RCG. self._set_rcg_sync_period(volume, rcg_name) except Exception as ex: # Start RCG even if we fail to add volume to it self._start_remote_copy_group(group) msg = (_("There was an error adding a volume: %(volume)s to " "Group: %(group)s : %(err)s") % {'volume': volume.get('id'), 'group': group.id, 'err': str(ex)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _set_rcg_sync_period(self, volume, rcg_name): sync_targets = [] replication_mode_num = self._get_replication_mode_from_volume(volume) replication_sync_period = ( self._get_replication_sync_period_from_volume(volume)) if not (replication_mode_num == self.PERIODIC): return rcg = self.client.getRemoteCopyGroup(rcg_name) # Check and see if we are in periodic mode. If we are, update # Remote Copy Group to have a sync period. if len(rcg['volumes']) and 'syncPeriod' in rcg['targets'][0]: if replication_sync_period != int(rcg['targets'][0]['syncPeriod']): for target in self._replication_targets: if target['replication_mode'] == replication_mode_num: sync_target = {'targetName': target['backend_id'], 'syncPeriod': replication_sync_period} sync_targets.append(sync_target) opt = {'targets': sync_targets} try: self.client.modifyRemoteCopyGroup(rcg_name, opt) except Exception as ex: msg = (_("There was an error setting the sync period for " "the remote copy group: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _set_rcg_attributes(self, volume, rcg_name): rcg_targets = [] vol_settings = self.get_volume_settings_from_type(volume) local_cpg = vol_settings['cpg'] replication_mode_num = self._get_replication_mode_from_volume(volume) for target in self._replication_targets: if target['replication_mode'] == replication_mode_num: cpg = self._get_cpg_from_cpg_map(target['cpg_map'], local_cpg) rcg_target = {'targetName': target['backend_id'], 'remoteUserCPG': cpg, 'remoteSnapCPG': cpg} rcg_targets.append(rcg_target) optional = {'localSnapCPG': vol_settings['snap_cpg'], 'localUserCPG': local_cpg, 'targets': rcg_targets} try: self.client.modifyRemoteCopyGroup(rcg_name, optional) except Exception as ex: msg = (_("There was an error modifying the remote copy " "group: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _add_vol_to_remote(self, volume, rcg_name): # Add a volume to remote copy group. rcg_targets = [] vol_name = self._get_3par_vol_name(volume) replication_mode_num = self._get_replication_mode_from_volume(volume) for target in self._replication_targets: if target['replication_mode'] == replication_mode_num: rcg_target = {'targetName': target['backend_id'], 'secVolumeName': vol_name} rcg_targets.append(rcg_target) optional = {'volumeAutoCreation': True} try: self.client.addVolumeToRemoteCopyGroup(rcg_name, vol_name, rcg_targets, optional=optional) except Exception as ex: msg = (_("There was an error adding the volume to the remote " "copy group: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _is_group_in_remote_copy_group(self, group): rcg_name = self._get_3par_rcg_name_of_group(group.id) try: self.client.getRemoteCopyGroup(rcg_name) return True except hpeexceptions.HTTPNotFound: return False def _remove_volumes_and_remote_copy_group(self, group, volumes): if not self._is_group_in_remote_copy_group(group): return True rcg_name = self._get_3par_rcg_name_of_group(group.id) # Stop remote copy. try: self.client.stopRemoteCopy(rcg_name) except Exception: pass for volume in volumes: vol_name = self._get_3par_vol_name(volume) # Delete volume from remote copy group on secondary array. try: self.client.removeVolumeFromRemoteCopyGroup( rcg_name, vol_name, removeFromTarget=True) except Exception: pass # Delete remote copy group on main array. try: self.client.removeRemoteCopyGroup(rcg_name) except Exception as ex: msg = (_("There was an error deleting RCG %(rcg_name)s: " "%(error)s.") % {'rcg_name': rcg_name, 'error': ex}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _check_tiramisu_configuration_on_volume_types(self, volume_types): for volume_type in volume_types: self._check_tiramisu_configuration_on_volume_type(volume_type) def _check_tiramisu_configuration_on_volume_type(self, volume_type): hpe3par_tiramisu = self._get_hpe3par_tiramisu_value(volume_type) if not hpe3par_tiramisu: msg = _("hpe3par:group_replication is not set on volume type: " "(id)%s") % {'id': volume_type.get('id')} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return hpe3par_tiramisu def _check_replication_configuration_on_volume_types(self, volume_types): for volume_type in volume_types: replicated_type = self._is_volume_type_replicated(volume_type) if not replicated_type: msg = _("replication is not set on volume type: " "(id)%s") % {'id': volume_type.get('id')} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _check_attributes_of_remote_per_volume_type(self, group): rep_modes = [] rep_sync_periods = [] for volume_type in group.volume_types: replication_mode_num = ( self._get_replication_mode_from_volume_type(volume_type)) rep_modes.append(replication_mode_num) if replication_mode_num == self.PERIODIC: rep_sync_period = ( self._get_replication_sync_period_from_volume_type( volume_type)) rep_sync_periods.append(rep_sync_period) # Check attributes of Remote on all volume types are same or not? if not (all(x == rep_modes[0] for x in rep_modes) and all(y == rep_sync_periods[0] for y in rep_sync_periods)): msg = _("replication mode or replication sync period must be same " "on each volume type of Group:(id)%s") % {'id': group.id} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _create_remote_copy_group_for_group(self, group): # Create remote copy group on main array. host_backend = volume_utils.extract_host(group.host, 'backend') rcg_targets = [] optional = {} if not self._replication_enabled: msg = _("replication is not properly configured on backend: " "(backend)%s") % {'backend': host_backend} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) rcg_name = self._get_3par_rcg_name_of_group(group.id) replication_mode_num = ( self._get_replication_mode_from_volume_type(group.volume_types[0])) for target in self._replication_targets: if (target['replication_mode'] == replication_mode_num): rcg_target = {'targetName': target['backend_id'], 'mode': target['replication_mode']} rcg_targets.append(rcg_target) pool = volume_utils.extract_host(group.host, level='pool') domain = self.get_domain(pool) if domain: optional = {"domain": domain} try: self.client.createRemoteCopyGroup(rcg_name, rcg_targets, optional) except Exception as ex: msg = (_("There was an error creating the remote copy " "group: %s.") % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _are_targets_in_their_natural_direction(self, rcg): targets = rcg['targets'] for target in targets: if target['roleReversed'] or ( target['state'] != self.RC_GROUP_STARTED): return False # Make sure all volumes are fully synced. volumes = rcg['volumes'] for volume in volumes: remote_volumes = volume['remoteVolumes'] for remote_volume in remote_volumes: if remote_volume['syncStatus'] != ( self.SYNC_STATUS_COMPLETED): return False return True def _group_failover_replication(self, failover_target, group, provider_location): rcg_name = self._get_3par_rcg_name_of_group(group.id) try: # Try and stop remote-copy on main array. We eat the # exception here because when an array goes down, the # groups will stop automatically. self.client.stopRemoteCopy(rcg_name) except Exception: pass try: # Failover to secondary array. remote_rcg_name = self._get_3par_remote_rcg_name_of_group( group.id, provider_location) cl = self._create_replication_client(failover_target) cl.recoverRemoteCopyGroupFromDisaster( remote_rcg_name, self.RC_ACTION_CHANGE_TO_PRIMARY) except Exception as ex: msg = (_("There was a problem with the failover: " "(%(error)s) and it was unsuccessful.") % {'err': str(ex)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) finally: self._destroy_replication_client(cl) def _group_failback_replication(self, failback_target, group, provider_location): remote_rcg_name = self._get_3par_remote_rcg_name_of_group( group.id, provider_location) try: cl = self._create_replication_client(failback_target) remote_rcg = cl.getRemoteCopyGroup(remote_rcg_name) except Exception as ex: msg = (_("There was a problem with the failback: " "(%(error)s) and it was unsuccessful.") % {'err': str(ex)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) finally: self._destroy_replication_client(cl) if not self._are_targets_in_their_natural_direction(remote_rcg): msg = _("The host is not ready to be failed back. Please " "resynchronize the volumes and resume replication on the " "3PAR backends.") LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) def enable_replication(self, context, group, volumes): """Enable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ model_update = {} if not group.is_replicated: raise NotImplementedError() if not volumes: # Return if empty group return model_update, None try: vvs_name = self._get_3par_vvs_name(group.id) rcg_name = self._get_3par_rcg_name_of_group(group.id) # Check VV and RCG exist on 3par, # if RCG exist then start RCG self.client.getVolumeSet(vvs_name) self.client.startRemoteCopy(rcg_name) except hpeexceptions.HTTPNotFound as ex: # The remote-copy group does not exist or # set does not exist. if (ex.get_code() == 187 or ex.get_code() == 102): raise exception.GroupNotFound(group_id=group.id) except hpeexceptions.HTTPForbidden as ex: # The remote-copy group has already been started. if ex.get_code() == 215: pass except Exception as ex: model_update.update({ 'replication_status': fields.ReplicationStatus.ERROR}) LOG.error("Error enabling replication on group %(group)s. " "Exception received: %(e)s.", {'group': group.id, 'e': ex}) return model_update, None def disable_replication(self, context, group, volumes): """Disable replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :returns: model_update, None """ model_update = {} if not group.is_replicated: raise NotImplementedError() if not volumes: # Return if empty group return model_update, None try: vvs_name = self._get_3par_vvs_name(group.id) rcg_name = self._get_3par_rcg_name_of_group(group.id) # Check VV and RCG exist on 3par, # if RCG exist then stop RCG self.client.getVolumeSet(vvs_name) self.client.stopRemoteCopy(rcg_name) except hpeexceptions.HTTPNotFound as ex: # The remote-copy group does not exist or # set does not exist. if (ex.get_code() == 187 or ex.get_code() == 102): raise exception.GroupNotFound(group_id=group.id) except Exception as ex: model_update.update({ 'replication_status': fields.ReplicationStatus.ERROR}) LOG.error("Error disabling replication on group %(group)s. " "Exception received: %(e)s.", {'group': group.id, 'e': ex}) return model_update, None def failover_replication(self, context, group, volumes, secondary_backend_id=None, host=False): """Failover replication for a group. :param context: the context :param group: the group object :param volumes: the list of volumes :param secondary_backend_id: the secondary backend id - default None :param host: flag to indicate if whole host is being failed over :returns: model_update, None """ model_update = {} vol_model_updates = [] failover_target = None failback_target = None rep_data = None if not group.is_replicated: raise NotImplementedError() if not volumes: # Return if empty group return model_update, vol_model_updates if not self._replication_enabled: msg = _("Issuing a fail-over failed because replication is " "not properly configured.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: provider_location = volumes[0].get('provider_location') replication_driver_data = volumes[0].get('replication_driver_data') failover = False if secondary_backend_id == 'default' else True if failover: # Find the failover target. for target in self._replication_targets: if target['backend_id'] == secondary_backend_id: failover_target = target break if not failover_target: msg = _("A valid secondary target MUST be specified " "in order to failover.") LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) self._group_failover_replication(failover_target, group, provider_location) model_update.update({ 'replication_status': fields.ReplicationStatus.FAILED_OVER}) vol_rep_status = fields.ReplicationStatus.FAILED_OVER else: # Find the failback target. for target in self._replication_targets: if target['id'] == replication_driver_data: failback_target = target break if not failback_target: msg = _("A valid target is not found " "in order to failback.") LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) self._group_failback_replication(failback_target, group, provider_location) model_update.update({ 'replication_status': fields.ReplicationStatus.ENABLED}) vol_rep_status = fields.ReplicationStatus.ENABLED except Exception as ex: model_update.update({ 'replication_status': fields.ReplicationStatus.ERROR}) vol_rep_status = fields.ReplicationStatus.ERROR LOG.error("Error failover replication on group %(group)s. " "Exception received: %(e)s.", {'group': group.id, 'e': ex}) rep_data = target['id'] for vol in volumes: loc = vol.get('provider_location') update = {'id': vol.get('id'), 'replication_status': vol_rep_status, 'provider_location': loc, 'replication_driver_data': rep_data} if host: update = {'volume_id': vol.get('id'), 'updates': update} vol_model_updates.append(update) return model_update, vol_model_updates class TaskWaiter(object): """TaskWaiter waits for task to be not active and returns status.""" def __init__(self, client, task_id, interval=1, initial_delay=0): self.client = client self.task_id = task_id self.interval = interval self.initial_delay = initial_delay def _wait_for_task(self): status = self.client.getTask(self.task_id) LOG.debug("3PAR Task id %(id)s status = %(status)s", {'id': self.task_id, 'status': status['status']}) if status['status'] is not self.client.TASK_ACTIVE: raise loopingcall.LoopingCallDone(status) def wait_for_task(self): timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_task) return timer.start(interval=self.interval, initial_delay=self.initial_delay).wait() class ReplicateVolumeTask(flow_utils.CinderTask): """Task to replicate a volume. This is a task for adding/removing the replication feature to volume. It is intended for use during retype(). This task has no revert. # TODO(sumit): revert back to original volume extra-spec """ def __init__(self, action, **kwargs): super(ReplicateVolumeTask, self).__init__(addons=[action]) def execute(self, common, volume, new_type_id): new_replicated_type = False if new_type_id: new_volume_type = common._get_volume_type(new_type_id) extra_specs = new_volume_type.get('extra_specs', None) if extra_specs and 'replication_enabled' in extra_specs: rep_val = extra_specs['replication_enabled'] new_replicated_type = (rep_val == " True") if (common._volume_of_replicated_type(volume, hpe_tiramisu_check=True) and new_replicated_type): # Retype from replication enabled to replication enable. common._do_volume_replication_destroy(volume, retype=True) common._do_volume_replication_setup( volume, retype=True, dist_type_id=new_type_id) elif (not common._volume_of_replicated_type(volume, hpe_tiramisu_check=True) and new_replicated_type): # Retype from replication disabled to replication enable. common._do_volume_replication_setup( volume, retype=True, dist_type_id=new_type_id) elif common._volume_of_replicated_type(volume, hpe_tiramisu_check=True): # Retype from replication enabled to replication disable. common._do_volume_replication_destroy(volume, retype=True) class ModifyVolumeTask(flow_utils.CinderTask): """Task to change a volume's snapCPG and comment. This is a task for changing the snapCPG and comment. It is intended for use during retype(). These changes are done together with a single modify request which should be fast and easy to revert. Because we do not support retype with existing snapshots, we can change the snapCPG without using a keepVV. If snapshots exist, then this will fail, as desired. This task does not change the userCPG or provisioningType. Those changes may require tunevv, so they are done by the TuneVolumeTask. The new comment will contain the new type, VVS and QOS information along with whatever else was in the old comment dict. The old comment and snapCPG are restored if revert is called. """ def __init__(self, action): self.needs_revert = False super(ModifyVolumeTask, self).__init__(addons=[action]) def _get_new_comment(self, old_comment, new_vvs, new_qos, new_type_name, new_type_id): # Modify the comment during ModifyVolume if not old_comment: comment_dict = {} else: comment_dict = dict(ast.literal_eval(old_comment)) if 'vvs' in comment_dict: del comment_dict['vvs'] if 'qos' in comment_dict: del comment_dict['qos'] if new_vvs: comment_dict['vvs'] = new_vvs elif new_qos: comment_dict['qos'] = new_qos else: comment_dict['qos'] = {} if new_type_name: comment_dict['volume_type_name'] = new_type_name else: comment_dict.pop('volume_type_name', None) if new_type_id: comment_dict['volume_type_id'] = new_type_id else: comment_dict.pop('volume_type_id', None) return comment_dict def execute(self, common, volume_name, old_snap_cpg, new_snap_cpg, old_comment, new_vvs, new_qos, new_type_name, new_type_id): comment_dict = self._get_new_comment( old_comment, new_vvs, new_qos, new_type_name, new_type_id) LOG.debug("API_VERSION: %(ver_1)s, API_VERSION_2023: %(ver_2)s", {'ver_1': common.API_VERSION, 'ver_2': API_VERSION_2023}) if (new_snap_cpg != old_snap_cpg and common.API_VERSION < API_VERSION_2023): # Modify the snap_cpg. This will fail with snapshots. LOG.info("Modifying %(volume_name)s snap_cpg from " "%(old_snap_cpg)s to %(new_snap_cpg)s.", {'volume_name': volume_name, 'old_snap_cpg': old_snap_cpg, 'new_snap_cpg': new_snap_cpg}) common.client.modifyVolume( volume_name, {'snapCPG': new_snap_cpg, 'comment': json.dumps(comment_dict)}) self.needs_revert = True else: LOG.info("Modifying %s comments.", volume_name) common.client.modifyVolume( volume_name, {'comment': json.dumps(comment_dict)}) self.needs_revert = True def revert(self, common, volume_name, old_snap_cpg, new_snap_cpg, old_comment, **kwargs): if self.needs_revert: LOG.info("Retype revert %(volume_name)s snap_cpg from " "%(new_snap_cpg)s back to %(old_snap_cpg)s.", {'volume_name': volume_name, 'new_snap_cpg': new_snap_cpg, 'old_snap_cpg': old_snap_cpg}) try: common.client.modifyVolume( volume_name, {'snapCPG': old_snap_cpg, 'comment': old_comment}) except Exception as ex: LOG.error("Exception during snapCPG revert: %s", ex) class TuneVolumeTask(flow_utils.CinderTask): """Task to change a volume's CPG and/or provisioning type. This is a task for changing the CPG and/or provisioning type. It is intended for use during retype(). This task has no revert. The current design is to do this task last and do revert-able tasks first. Un-doing a tunevv can be expensive and should be avoided. """ def __init__(self, action, **kwargs): super(TuneVolumeTask, self).__init__(addons=[action]) def execute(self, common, old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_cpg, new_cpg, volume_name, new_compression): common.tune_vv(old_tpvv, new_tpvv, old_tdvv, new_tdvv, old_cpg, new_cpg, volume_name, new_compression) class ModifySpecsTask(flow_utils.CinderTask): """Set/unset the QOS settings and/or VV set for the volume's new type. This is a task for changing the QOS settings and/or VV set. It is intended for use during retype(). If changes are made during execute(), then they need to be undone if revert() is called (i.e., if a later task fails). For 3PAR, we ignore QOS settings if a VVS is explicitly set, otherwise we create a VV set and use that for QOS settings. That is why they are lumped together here. Most of the decision-making about VVS vs. QOS settings vs. old-style scoped extra-specs is handled in existing reusable code. Here we mainly need to know what old stuff to remove before calling the function that knows how to set the new stuff. Basic task flow is as follows: Remove the volume from the old externally created VVS (when appropriate), delete the old cinder-created VVS, call the function that knows how to set a new VVS or QOS settings. If any changes are made during execute, then revert needs to reverse them. """ def __init__(self, action): self.needs_revert = False super(ModifySpecsTask, self).__init__(addons=[action]) def execute(self, common, volume_name, volume, old_cpg, new_cpg, old_vvs, new_vvs, old_qos, new_qos, old_flash_cache, new_flash_cache): if (old_vvs != new_vvs or old_qos != new_qos or old_flash_cache != new_flash_cache): # Remove VV from old VV Set. if old_vvs is not None and old_vvs != new_vvs: common.client.removeVolumeFromVolumeSet(old_vvs, volume_name) self.needs_revert = True # If any extra or qos specs changed then remove the old # special VV set that we create. We'll recreate it # as needed. vvs_name = common._get_3par_vvs_name(volume['id']) try: common.client.deleteVolumeSet(vvs_name) self.needs_revert = True except hpeexceptions.HTTPNotFound as ex: # HTTPNotFound(code=102) is OK. Set does not exist. if ex.get_code() != 102: LOG.error("Unexpected error when retype() tried to " "deleteVolumeSet(%s)", vvs_name) raise if new_vvs or new_qos or new_flash_cache: common._add_volume_to_volume_set( volume, volume_name, new_cpg, new_vvs, new_qos, new_flash_cache) self.needs_revert = True def revert(self, common, volume_name, volume, old_vvs, new_vvs, old_qos, old_cpg, **kwargs): if self.needs_revert: # If any extra or qos specs changed then remove the old # special VV set that we create and recreate it per # the old type specs. vvs_name = common._get_3par_vvs_name(volume['id']) try: common.client.deleteVolumeSet(vvs_name) except hpeexceptions.HTTPNotFound as ex: # HTTPNotFound(code=102) is OK. Set does not exist. if ex.get_code() != 102: LOG.error("Unexpected error when retype() revert " "tried to deleteVolumeSet(%s)", vvs_name) except Exception: LOG.error("Unexpected error when retype() revert " "tried to deleteVolumeSet(%s)", vvs_name) if old_vvs is not None or old_qos is not None: try: common._add_volume_to_volume_set( volume, volume_name, old_cpg, old_vvs, old_qos) except Exception as ex: LOG.error("%(exception)s: Exception during revert of " "retype for volume %(volume_name)s. " "Original volume set/QOS settings may not " "have been fully restored.", {'exception': ex, 'volume_name': volume_name}) if new_vvs is not None and old_vvs != new_vvs: try: common.client.removeVolumeFromVolumeSet( new_vvs, volume_name) except Exception as ex: LOG.error("%(exception)s: Exception during revert of " "retype for volume %(volume_name)s. " "Failed to remove from new volume set " "%(new_vvs)s.", {'exception': ex, 'volume_name': volume_name, 'new_vvs': new_vvs}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hpe/hpe_3par_fc.py0000664000175000017500000006452300000000000022631 0ustar00zuulzuul00000000000000# (c) Copyright 2013-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Volume driver for HPE 3PAR Storage array. This driver requires 3.1.3 or later firmware on the 3PAR array, using the 4.x version of the hpe3parclient. You will need to install the python hpe3parclient. sudo pip install --upgrade "hpe3parclient>=4.0" Set the following in the cinder.conf file to enable the 3PAR Fibre Channel Driver along with the required flags: volume_driver=cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver """ try: from hpe3parclient import exceptions as hpeexceptions except ImportError: hpeexceptions = None from oslo_log import log as logging from oslo_utils.excutils import save_and_reraise_exception from cinder.common import constants from cinder import coordination from cinder import interface from cinder.volume.drivers.hpe import hpe_3par_base as hpebasedriver from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) # EXISTENT_PATH error code returned from hpe3parclient EXISTENT_PATH = 73 @interface.volumedriver class HPE3PARFCDriver(hpebasedriver.HPE3PARDriverBase): """OpenStack Fibre Channel driver to enable 3PAR storage array. Version history: .. code-block:: none 1.0 - Initial driver 1.1 - QoS, extend volume, multiple iscsi ports, remove domain, session changes, faster clone, requires 3.1.2 MU2 firmware, copy volume <--> Image. 1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored the drivers to use the new APIs. 1.2.1 - Synchronized extend_volume method. 1.2.2 - Added try/finally around client login/logout. 1.2.3 - Added ability to add WWNs to host. 1.2.4 - Added metadata during attach/detach bug #1258033. 1.3.0 - Removed all SSH code. We rely on the hp3parclient now. 2.0.0 - Update hp3parclient API uses 3.0.x 2.0.2 - Add back-end assisted volume migrate 2.0.3 - Added initiator-target map for FC Zone Manager 2.0.4 - Added support for managing/unmanaging of volumes 2.0.5 - Only remove FC Zone on last volume detach 2.0.6 - Added support for volume retype 2.0.7 - Only one FC port is used when a single FC path is present. bug #1360001 2.0.8 - Fixing missing login/logout around attach/detach bug #1367429 2.0.9 - Add support for pools with model update 2.0.10 - Migrate without losing type settings bug #1356608 2.0.11 - Removing locks bug #1381190 2.0.12 - Fix queryHost call to specify wwns bug #1398206 2.0.13 - Fix missing host name during attach bug #1398206 2.0.14 - Removed usage of host name cache #1398914 2.0.15 - Added support for updated detach_volume attachment. 2.0.16 - Added encrypted property to initialize_connection #1439917 2.0.17 - Improved VLUN creation and deletion logic. #1469816 2.0.18 - Changed initialize_connection to use getHostVLUNs. #1475064 2.0.19 - Adds consistency group support 2.0.20 - Update driver to use ABC metaclasses 2.0.21 - Added update_migrated_volume. bug # 1492023 3.0.0 - Rebranded HP to HPE. 3.0.1 - Remove db access for consistency groups 3.0.2 - Adds v2 managed replication support 3.0.3 - Adds v2 unmanaged replication support 3.0.4 - Adding manage/unmanage snapshot support 3.0.5 - Optimize array ID retrieval 3.0.6 - Update replication to version 2.1 3.0.7 - Remove metadata that tracks the instance ID. bug #1572665 3.0.8 - NSP feature, creating FC Vlun as match set instead of host sees. bug #1577993 3.0.9 - Handling HTTP conflict 409, host WWN/iSCSI name already used by another host, while creating 3PAR FC Host. bug #1597454 3.0.10 - Added Entry point tracing 3.0.11 - Handle manage and unmanage hosts present. bug #1648067 3.0.12 - Adds consistency group capability in generic volume groups. 4.0.0 - Adds base class. 4.0.1 - Added check to remove FC zones. bug #1730720 4.0.2 - Create one vlun in single path configuration. bug #1727176 4.0.3 - Create FC vlun as host sees. bug #1734505 4.0.4 - Handle force detach case. bug #1686745 4.0.5 - Set proper backend on subsequent operation, after group failover. bug #1773069 4.0.6 - Set NSP for single path attachments. Bug #1809249 4.0.7 - Added Peer Persistence feature 4.0.8 - For PP, return LUN ids from both arrays. Bug #2044255 """ VERSION = "4.0.8" # The name of the CI wiki page. CI_WIKI_NAME = "HPE_Storage_CI" def __init__(self, *args, **kwargs): super(HPE3PARFCDriver, self).__init__(*args, **kwargs) self.lookup_service = fczm_utils.create_lookup_service() self.protocol = constants.FC def _initialize_connection_common(self, volume, connector, common, host, target_wwns, init_targ_map, numPaths, remote_client=None): # check if a VLUN already exists for this host existing_vlun = common.find_existing_vlun(volume, host, remote_client) vlun = None if existing_vlun is None: # now that we have a host, create the VLUN if self.lookup_service and numPaths == 1: nsp = None active_fc_port_list = ( common.get_active_fc_target_ports(remote_client)) for port in active_fc_port_list: if port['portWWN'].lower() == target_wwns[0].lower(): nsp = port['nsp'] break vlun = common.create_vlun(volume, host, nsp, None, remote_client) else: vlun = common.create_vlun(volume, host, None, None, remote_client) else: vlun = existing_vlun info_backend = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': vlun['lun'], 'target_discovered': True, 'target_wwn': target_wwns, 'initiator_target_map': init_targ_map}} encryption_key_id = volume.get('encryption_key_id') info_backend['data']['encrypted'] = encryption_key_id is not None fczm_utils.add_fc_zone(info_backend) return info_backend @volume_utils.trace @coordination.synchronized('3par-{volume.id}') def initialize_connection(self, volume, connector): """Assigns the volume to a server. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'encrypted': False, 'target_discovered': True, 'target_lun': 1, 'target_wwn': '1234567890123', } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'encrypted': False, 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], } } Steps to export a volume on 3PAR * Create a host on the 3par with the target wwn * Create a VLUN for that HOST with the volume we want to export. """ LOG.debug("volume id: %(volume_id)s", {'volume_id': volume['id']}) array_id = self.get_volume_replication_driver_data(volume) common = self._login(array_id=array_id) try: # we have to make sure we have a host host, cpg = self._create_host(common, volume, connector) target_wwns, init_targ_map, numPaths = ( self._build_initiator_target_map(common, connector)) multipath = connector.get('multipath') LOG.debug("multipath: %(multipath)s", {'multipath': multipath}) user_target = None if not multipath: user_target = self._get_user_target(common) initiator = connector.get('wwpns')[0] if user_target is None: target_wwns = target_wwns[:1] init_targ_map[initiator] = init_targ_map[initiator][:1] else: target_wwns = [user_target] init_targ_map[initiator] = [user_target] info = self._initialize_connection_common( volume, connector, common, host, target_wwns, init_targ_map, numPaths) if not multipath: return info if volume.get('replication_status') != 'enabled': return info LOG.debug('This is a replication setup') remote_target = common._replication_targets[0] replication_mode = remote_target['replication_mode'] quorum_witness_ip = remote_target.get('quorum_witness_ip') if replication_mode == 1: LOG.debug('replication_mode is sync') if quorum_witness_ip: LOG.debug('quorum_witness_ip is present') LOG.debug('Peer Persistence has been configured') else: LOG.debug('Since quorum_witness_ip is absent, ' 'considering this as Active/Passive ' 'replication') return info else: LOG.debug('Active/Passive replication has been ' 'configured') return info # Peer Persistence has been configured remote_client = common._create_replication_client(remote_target) host, cpg = self._create_host( common, volume, connector, remote_target, cpg, remote_client) target_wwns, init_targ_map, numPaths = ( self._build_initiator_target_map( common, connector, remote_client)) info_peer = self._initialize_connection_common( volume, connector, common, host, target_wwns, init_targ_map, numPaths, remote_client) common._destroy_replication_client(remote_client) len_main_wwn = len(info['data']['target_wwn']) target_luns = [] target_luns = [info['data']['target_lun']] * len_main_wwn len_backup_wwn = len(info_peer['data']['target_wwn']) target_luns += [info_peer['data']['target_lun']] * len_backup_wwn info = {'driver_volume_type': 'fibre_channel', 'data': {'encrypted': info['data']['encrypted'], 'target_luns': target_luns, 'target_discovered': True, 'target_wwn': info['data']['target_wwn'] + info_peer['data']['target_wwn'], 'initiator_target_map': self.merge_dicts( info['data']['initiator_target_map'], info_peer['data']['initiator_target_map'])}} return info finally: self._logout(common) @volume_utils.trace @coordination.synchronized('3par-{volume.id}') def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to detach a volume from an instance.""" array_id = self.get_volume_replication_driver_data(volume) common = self._login(array_id=array_id) try: is_force_detach = connector is None remote_client = None multipath = False if connector: multipath = connector.get('multipath') LOG.debug("multipath: %(multipath)s", {'multipath': multipath}) if multipath: if volume.get('replication_status') == 'enabled': LOG.debug('This is a replication setup') remote_target = common._replication_targets[0] replication_mode = remote_target['replication_mode'] quorum_witness_ip = ( remote_target.get('quorum_witness_ip')) if replication_mode == 1: LOG.debug('replication_mode is sync') if quorum_witness_ip: LOG.debug('quorum_witness_ip is present') LOG.debug('Peer Persistence has been configured') else: LOG.debug('Since quorum_witness_ip is absent, ' 'considering this as Active/Passive ' 'replication') else: LOG.debug('Active/Passive replication has been ' 'configured') if replication_mode == 1 and quorum_witness_ip: remote_client = ( common._create_replication_client(remote_target)) if is_force_detach: common.terminate_connection(volume, None, None) # TODO(sonivi): remove zones, if not required # for now, do not remove zones zone_remove = False else: hostname = common._safe_hostname(connector, self.configuration) common.terminate_connection(volume, hostname, wwn=connector['wwpns'], remote_client=remote_client) zone_remove = True try: vluns = common.client.getHostVLUNs(hostname) except hpeexceptions.HTTPNotFound: # No more exports for this host. pass else: # Vlun exists, so check for wwpn entry. for wwpn in connector.get('wwpns'): for vlun in vluns: if (vlun.get('active') and vlun.get('remoteName') == wwpn.upper()): zone_remove = False break info = {'driver_volume_type': 'fibre_channel', 'data': {}} if zone_remove: LOG.info("Need to remove FC Zone, building initiator " "target map") target_wwns, init_targ_map, _numPaths = ( self._build_initiator_target_map(common, connector)) info['data'] = {'target_wwn': target_wwns, 'initiator_target_map': init_targ_map} fczm_utils.remove_fc_zone(info) if remote_client: if zone_remove: try: vluns = remote_client.getHostVLUNs(hostname) except hpeexceptions.HTTPNotFound: # No more exports for this host. pass else: # Vlun exists, so check for wwpn entry. for wwpn in connector.get('wwpns'): for vlun in vluns: if (vlun.get('active') and vlun.get('remoteName') == wwpn.upper()): zone_remove = False break info_peer = {'driver_volume_type': 'fibre_channel', 'data': {}} if zone_remove: LOG.info("Need to remove FC Zone, building initiator " "target map") target_wwns, init_targ_map, _numPaths = ( self._build_initiator_target_map(common, connector, remote_client)) info_peer['data'] = {'target_wwn': target_wwns, 'initiator_target_map': init_targ_map} fczm_utils.remove_fc_zone(info_peer) info = ( {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': info['data']['target_wwn'] + info_peer['data']['target_wwn'], 'initiator_target_map': self.merge_dicts( info['data']['initiator_target_map'], info_peer['data']['initiator_target_map'])}}) return info finally: self._logout(common) def _build_initiator_target_map(self, common, connector, remote_client=None): """Build the target_wwns and the initiator target map.""" fc_ports = common.get_active_fc_target_ports(remote_client) all_target_wwns = [] target_wwns = [] init_targ_map = {} numPaths = 0 for port in fc_ports: all_target_wwns.append(port['portWWN']) if self.lookup_service is not None: # use FC san lookup to determine which NSPs to use # for the new VLUN. dev_map = self.lookup_service.get_device_mapping_from_network( connector['wwpns'], all_target_wwns) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) for _target in init_targ_map[initiator]: numPaths += 1 target_wwns = list(set(target_wwns)) else: initiator_wwns = connector['wwpns'] target_wwns = all_target_wwns for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return target_wwns, init_targ_map, numPaths def _create_3par_fibrechan_host(self, common, hostname, wwns, domain, persona_id, remote_client=None): """Create a 3PAR host. Create a 3PAR host, if there is already a host on the 3par using the same wwn but with a different hostname, return the hostname used by 3PAR. """ # first search for an existing host host_found = None if remote_client: client_obj = remote_client else: client_obj = common.client hosts = client_obj.queryHost(wwns=wwns) if hosts and hosts['members'] and 'name' in hosts['members'][0]: host_found = hosts['members'][0]['name'] if host_found is not None: return host_found else: persona_id = int(persona_id) try: client_obj.createHost(hostname, FCWwns=wwns, optional={'domain': domain, 'persona': persona_id}) except hpeexceptions.HTTPConflict as path_conflict: msg = "Create FC host caught HTTP conflict code: %s" LOG.exception(msg, path_conflict.get_code()) with save_and_reraise_exception(reraise=False) as ctxt: if path_conflict.get_code() is EXISTENT_PATH: # Handle exception : EXISTENT_PATH - host WWN/iSCSI # name already used by another host hosts = client_obj.queryHost(wwns=wwns) if hosts and hosts['members'] and ( 'name' in hosts['members'][0]): hostname = hosts['members'][0]['name'] else: # re rasise last caught exception ctxt.reraise = True else: # re rasise last caught exception # for other HTTP conflict ctxt.reraise = True return hostname def _modify_3par_fibrechan_host(self, common, hostname, wwn, remote_client): if remote_client: client_obj = remote_client else: client_obj = common.client mod_request = {'pathOperation': client_obj.HOST_EDIT_ADD, 'FCWWNs': wwn} try: client_obj.modifyHost(hostname, mod_request) except hpeexceptions.HTTPConflict as path_conflict: msg = ("Modify FC Host %(hostname)s caught " "HTTP conflict code: %(code)s") LOG.exception(msg, {'hostname': hostname, 'code': path_conflict.get_code()}) def _create_host(self, common, volume, connector, remote_target=None, src_cpg=None, remote_client=None): """Creates or modifies existing 3PAR host.""" host = None domain = None hostname = common._safe_hostname(connector, self.configuration) if remote_target: cpg = common._get_cpg_from_cpg_map( remote_target['cpg_map'], src_cpg) cpg_obj = remote_client.getCPG(cpg) if 'domain' in cpg_obj: domain = cpg_obj['domain'] else: cpg = common.get_cpg(volume, allowSnap=True) domain = common.get_domain(cpg) if not connector.get('multipath'): connector['wwpns'] = connector['wwpns'][:1] try: if remote_target: host = remote_client.getHost(hostname) else: host = common._get_3par_host(hostname) # Check whether host with wwn of initiator present on 3par hosts = common.client.queryHost(wwns=connector['wwpns']) host, hostname = ( common._get_prioritized_host_on_3par( host, hosts, hostname)) except hpeexceptions.HTTPNotFound: # get persona from the volume type extra specs persona_id = common.get_persona_type(volume) # host doesn't exist, we have to create it hostname = self._create_3par_fibrechan_host(common, hostname, connector['wwpns'], domain, persona_id, remote_client) if remote_target: host = remote_client.getHost(hostname) else: host = common._get_3par_host(hostname) return host, cpg else: host = self._add_new_wwn_to_host( common, host, connector['wwpns'], remote_client) return host, cpg def _add_new_wwn_to_host(self, common, host, wwns, remote_client=None): """Add wwns to a host if one or more don't exist. Identify if argument wwns contains any world wide names not configured in the 3PAR host path. If any are found, add them to the 3PAR host. """ # get the currently configured wwns # from the host's FC paths host_wwns = [] if 'FCPaths' in host: for path in host['FCPaths']: wwn = path.get('wwn', None) if wwn is not None: host_wwns.append(wwn.lower()) # lower case all wwns in the compare list compare_wwns = [x.lower() for x in wwns] # calculate wwns in compare list, but not in host_wwns list new_wwns = list(set(compare_wwns).difference(host_wwns)) # if any wwns found that were not in host list, # add them to the host if (len(new_wwns) > 0): self._modify_3par_fibrechan_host( common, host['name'], new_wwns, remote_client) if remote_client: host = remote_client.getHost(host['name']) else: host = common._get_3par_host(host['name']) return host def _get_user_target(self, common): target_nsp = common.config.hpe3par_target_nsp if not target_nsp: return None # Get target wwn from target nsp fc_ports = common.get_active_fc_target_ports() target_wwn = None for port in fc_ports: nsp = port['nsp'] if target_nsp == nsp: target_wwn = port['portWWN'] break if not target_wwn: LOG.warning("Did not get wwn for target nsp: " "%(nsp)s", {'nsp': target_nsp}) return target_wwn def merge_dicts(self, dict_1, dict_2): keys = set(dict_1).union(dict_2) no = [] return {k: (dict_1.get(k, no) + dict_2.get(k, no)) for k in keys} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hpe/hpe_3par_iscsi.py0000664000175000017500000011675000000000000023353 0ustar00zuulzuul00000000000000# (c) Copyright 2012-2015 Hewlett Packard Enterprise Development LP # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for HPE 3PAR Storage array. This driver requires 3.1.3 or later firmware on the 3PAR array, using the 4.x version of the hpe3parclient. You will need to install the python hpe3parclient. sudo pip install --upgrade "hpe3parclient>=4.0" Set the following in the cinder.conf file to enable the 3PAR iSCSI Driver along with the required flags: volume_driver=cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver """ import re import sys try: from hpe3parclient import exceptions as hpeexceptions except ImportError: hpeexceptions = None from oslo_log import log as logging from oslo_utils.excutils import save_and_reraise_exception from cinder.common import constants from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume.drivers.hpe import hpe_3par_base as hpebasedriver from cinder.volume import volume_utils LOG = logging.getLogger(__name__) # EXISTENT_PATH error code returned from hpe3parclient EXISTENT_PATH = 73 DEFAULT_ISCSI_PORT = 3260 CHAP_USER_KEY = "HPQ-cinder-CHAP-name" CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret" @interface.volumedriver class HPE3PARISCSIDriver(hpebasedriver.HPE3PARDriverBase): """OpenStack iSCSI driver to enable 3PAR storage array. Version history: .. code-block:: none 1.0 - Initial driver 1.1 - QoS, extend volume, multiple iscsi ports, remove domain, session changes, faster clone, requires 3.1.2 MU2 firmware. 1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored the drivers to use the new APIs. 1.2.1 - Synchronized extend_volume method. 1.2.2 - Added try/finally around client login/logout. 1.2.3 - log exceptions before raising 1.2.4 - Fixed iSCSI active path bug #1224594 1.2.5 - Added metadata during attach/detach bug #1258033 1.2.6 - Use least-used iscsi n:s:p for iscsi volume attach bug #1269515 This update now requires 3.1.2 MU3 firmware 1.3.0 - Removed all SSH code. We rely on the hp3parclient now. 2.0.0 - Update hp3parclient API uses 3.0.x 2.0.2 - Add back-end assisted volume migrate 2.0.3 - Added support for managing/unmanaging of volumes 2.0.4 - Added support for volume retype 2.0.5 - Added CHAP support, requires 3.1.3 MU1 firmware and hp3parclient 3.1.0. 2.0.6 - Fixing missing login/logout around attach/detach bug #1367429 2.0.7 - Add support for pools with model update 2.0.8 - Migrate without losing type settings bug #1356608 2.0.9 - Removing locks bug #1381190 2.0.10 - Add call to queryHost instead SSH based findHost #1398206 2.0.11 - Added missing host name during attach fix #1398206 2.0.12 - Removed usage of host name cache #1398914 2.0.13 - Update LOG usage to fix translations. bug #1384312 2.0.14 - Do not allow a different iSCSI IP (hp3par_iscsi_ips) to be used during live-migration. bug #1423958 2.0.15 - Added support for updated detach_volume attachment. 2.0.16 - Added encrypted property to initialize_connection #1439917 2.0.17 - Python 3 fixes 2.0.18 - Improved VLUN creation and deletion logic. #1469816 2.0.19 - Changed initialize_connection to use getHostVLUNs. #1475064 2.0.20 - Adding changes to support 3PAR iSCSI multipath. 2.0.21 - Adds consistency group support 2.0.22 - Update driver to use ABC metaclasses 2.0.23 - Added update_migrated_volume. bug # 1492023 3.0.0 - Rebranded HP to HPE. 3.0.1 - Python 3 support 3.0.2 - Remove db access for consistency groups 3.0.3 - Fix multipath dictionary key error. bug #1522062 3.0.4 - Adds v2 managed replication support 3.0.5 - Adds v2 unmanaged replication support 3.0.6 - Adding manage/unmanage snapshot support 3.0.7 - Optimize array ID retrieval 3.0.8 - Update replication to version 2.1 3.0.9 - Use same LUN ID for each VLUN path #1551994 3.0.10 - Remove metadata that tracks the instance ID. bug #1572665 3.0.11 - _create_3par_iscsi_host() now accepts iscsi_iqn as list only. Bug #1590180 3.0.12 - Added entry point tracing 3.0.13 - Handling HTTP conflict 409, host WWN/iSCSI name already used by another host, while creating 3PAR iSCSI Host. bug #1642945 3.0.14 - Handle manage and unmanage hosts present. bug #1648067 3.0.15 - Adds consistency group capability in generic volume groups. 3.0.16 - Get host from os-brick connector. bug #1690244 4.0.0 - Adds base class. 4.0.1 - Update CHAP on host record when volume is migrated to new compute host. bug # 1737181 4.0.2 - Handle force detach case. bug #1686745 4.0.3 - Set proper backend on subsequent operation, after group failover. bug #1773069 4.0.4 - Added Peer Persistence feature 4.0.5 - Added Primera array check. bug #1849525 4.0.6 - Allow iSCSI support for Primera 4.2 onwards 4.0.7 - Use vlan iscsi ips. Bug #2015034 4.0.8 - Add ipv6 support. Bug #2045411 4.0.9 - getWsApiVersion now requires login 4.0.10 - Ignore duplicate IP address in iSCSI/vlan ip """ VERSION = "4.0.10" # The name of the CI wiki page. CI_WIKI_NAME = "HPE_Storage_CI" def __init__(self, *args, **kwargs): super(HPE3PARISCSIDriver, self).__init__(*args, **kwargs) self.protocol = constants.ISCSI def _do_setup(self, common): client_obj = common.client is_primera = client_obj.is_primera_array() if is_primera: common.client_login() api_version = client_obj.getWsApiVersion() array_version = api_version['build'] LOG.debug("array version: %(version)s", {'version': array_version}) if array_version < 40200000: err_msg = (_('The iSCSI driver is not supported for ' 'Primera %(version)s. It is supported ' 'for Primera 4.2 or higher versions.') % {'version': array_version}) LOG.error(err_msg) raise NotImplementedError() self.iscsi_ips = {} common.client_login() try: self.initialize_iscsi_ports(common) finally: self._logout(common) def _update_dicts(self, temp_iscsi_ip, iscsi_ip_list, ip, port): ip_port = temp_iscsi_ip[ip]['ip_port'] iscsi_ip_list[ip] = {'ip_port': ip_port, 'nsp': port['nsp'], 'iqn': port['iSCSIName']} del temp_iscsi_ip[ip] def initialize_iscsi_ports(self, common, remote_target=None, remote_client=None): # map iscsi_ip-> ip_port # -> iqn # -> nsp iscsi_ip_list = {} temp_iscsi_ip = {} if remote_target: backend_conf = remote_target else: backend_conf = common._client_conf # use the 3PAR ip_addr list for iSCSI configuration if len(backend_conf['hpe3par_iscsi_ips']) > 0: # add port values to ip_addr, if necessary for ip_addr in backend_conf['hpe3par_iscsi_ips']: if "." in ip_addr: # v4 ip = ip_addr.split(':') if len(ip) == 1: temp_iscsi_ip[ip_addr] = ( {'ip_port': DEFAULT_ISCSI_PORT}) elif len(ip) == 2: temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]} elif ":" in ip_addr: # v6 if "]" in ip_addr: ip = ip_addr.split(']:') ip_addr_v6 = ip[0] ip_addr_v6 = ip_addr_v6.strip('[') port_v6 = ip[1] temp_iscsi_ip[ip_addr_v6] = {'ip_port': port_v6} else: temp_iscsi_ip[ip_addr] = ( {'ip_port': DEFAULT_ISCSI_PORT}) else: LOG.warning("Invalid IP address format '%s'", ip_addr) # add the single value iscsi_ip_address option to the IP dictionary. # This way we can see if it's a valid iSCSI IP. If it's not valid, # we won't use it and won't bother to report it, see below if 'iscsi_ip_address' in backend_conf: if (backend_conf['iscsi_ip_address'] not in temp_iscsi_ip): ip = backend_conf['iscsi_ip_address'] ip_port = backend_conf['iscsi_port'] temp_iscsi_ip[ip] = {'ip_port': ip_port} # get all the valid iSCSI ports from 3PAR # when found, add the valid iSCSI ip, ip port, iqn and nsp # to the iSCSI IP dictionary iscsi_ports = common.get_active_iscsi_target_ports(remote_client) LOG.debug("iscsi_ports: %(iscsi_ports)s", {'iscsi_ports': iscsi_ports}) for port in iscsi_ports: ip = port['IPAddr'] if ip in temp_iscsi_ip: self._update_dicts(temp_iscsi_ip, iscsi_ip_list, ip, port) if 'iSCSIVlans' in port: for vip in port['iSCSIVlans']: ip = vip['IPAddr'] if ip in temp_iscsi_ip: LOG.debug("vlan ip: %(ip)s", {'ip': ip}) self._update_dicts(temp_iscsi_ip, iscsi_ip_list, ip, port) # if the single value iscsi_ip_address option is still in the # temp dictionary it's because it defaults to $my_ip which doesn't # make sense in this context. So, if present, remove it and move on. if 'iscsi_ip_address' in backend_conf: if backend_conf['iscsi_ip_address'] in temp_iscsi_ip: del temp_iscsi_ip[backend_conf['iscsi_ip_address']] # lets see if there are invalid iSCSI IPs left in the temp dict if len(temp_iscsi_ip) > 0: LOG.warning("Found invalid iSCSI IP address(s) in " "configuration option(s) hpe3par_iscsi_ips or " "target_ip_address '%s.'", (", ".join(temp_iscsi_ip))) if not len(iscsi_ip_list): msg = _('At least one valid iSCSI IP address must be set.') LOG.error(msg) raise exception.InvalidInput(reason=msg) if remote_target: self.iscsi_ips[remote_target['hpe3par_api_url']] = iscsi_ip_list else: self.iscsi_ips[common._client_conf['hpe3par_api_url']] = ( iscsi_ip_list) def _vlun_create_or_use_existing(self, volume, common, host, iscsi_ips, target_portals, target_iqns, target_luns, remote_client, target_portal_ips, existing_vluns, iscsi_ip, lun_id, port): vlun = None # check for an already existing VLUN matching the # nsp for this iSCSI IP. If one is found, use it # instead of creating a new VLUN. for v in existing_vluns: portPos = common.build_portPos( iscsi_ips[iscsi_ip]['nsp']) if v['portPos'] == portPos: vlun = v break else: vlun = common.create_vlun( volume, host, iscsi_ips[iscsi_ip]['nsp'], lun_id=lun_id, remote_client=remote_client) # This function is called multiple times (from a for loop). # We want to use the same LUN ID for every port. # For first port, lun_id is received as None. # - assign lun_id = vlun['lun'] and return it. # Thus for subsequent ports, that same lun_id is used # in create_vlun() above. if lun_id is None: lun_id = vlun['lun'] if ":" in iscsi_ip: # v6 iscsi_ip_port = "[%s]:%s" % ( iscsi_ip, iscsi_ips[iscsi_ip]['ip_port']) else: # v4 iscsi_ip_port = "%s:%s" % ( iscsi_ip, iscsi_ips[iscsi_ip]['ip_port']) LOG.debug("iscsi_ip_port: %(var)s", {'var': iscsi_ip_port}) target_portals.append(iscsi_ip_port) target_iqns.append(port['iSCSIName']) target_luns.append(vlun['lun']) return lun_id def _initialize_connection_common(self, volume, connector, common, host, iscsi_ips, ready_ports, target_portals, target_iqns, target_luns, remote_client=None): # Target portal ips are defined in cinder.conf. target_portal_ips = iscsi_ips.keys() # Collect all existing VLUNs for this volume/host combination. existing_vluns = common.find_existing_vluns(volume, host, remote_client) # Cycle through each ready iSCSI port and determine if a new # VLUN should be created or an existing one used. lun_id = None for port in ready_ports: iscsi_ip = port['IPAddr'] if iscsi_ip in target_portal_ips: LOG.debug("for iscsi ip: %(ip)s, create vlun or use existing", {'ip': iscsi_ip}) lun_id = ( self._vlun_create_or_use_existing( volume, common, host, iscsi_ips, target_portals, target_iqns, target_luns, remote_client, target_portal_ips, existing_vluns, iscsi_ip, lun_id, port)) else: LOG.debug("iscsi ip: %(ip)s was not found in " "hpe3par_iscsi_ips list defined in " "cinder.conf.", {'ip': iscsi_ip}) if 'iSCSIVlans' in port: LOG.debug("for port IPAddr: %(ip)s, the iSCSIVlans are: " "%(vlans)s", {'ip': iscsi_ip, 'vlans': port['iSCSIVlans']}) for vip in port['iSCSIVlans']: vlan_ip = vip['IPAddr'] # if vlan_ip is in cinder.conf and # vlan_ip is not same as iscsi_ip # only then proceed with lun creation if vlan_ip in target_portal_ips and vlan_ip != iscsi_ip: LOG.debug("for vlan ip: %(ip)s, create vlun or use " "existing", {'ip': vlan_ip}) lun_id = ( self._vlun_create_or_use_existing( volume, common, host, iscsi_ips, target_portals, target_iqns, target_luns, remote_client, target_portal_ips, existing_vluns, vlan_ip, lun_id, port)) @volume_utils.trace @coordination.synchronized('3par-{volume.id}') def initialize_connection(self, volume, connector): """Assigns the volume to a server. Assign any created volume to a compute node/host so that it can be used from that host. This driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined in _get_iscsi_properties. Example return value: .. code-block:: default { 'driver_volume_type': 'iscsi', 'data': { 'encrypted': False, 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_protal': '127.0.0.1:3260', 'volume_id': 1, } } Steps to export a volume on 3PAR * Get the 3PAR iSCSI iqn * Create a host on the 3par * create vlun on the 3par """ LOG.debug("volume id: %(volume_id)s", {'volume_id': volume['id']}) array_id = self.get_volume_replication_driver_data(volume) common = self._login(array_id=array_id) try: # If the volume has been failed over, we need to reinitialize # iSCSI ports so they represent the new array. if volume.get('replication_status') == 'failed-over' and ( common._client_conf['hpe3par_api_url'] not in self.iscsi_ips): self.initialize_iscsi_ports(common) # Grab the correct iSCSI ports iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']] # we have to make sure we have a host host, username, password, cpg = self._create_host( common, volume, connector) multipath = connector.get('multipath') LOG.debug("multipath: %(multipath)s", {'multipath': multipath}) if multipath: ready_ports = common.client.getiSCSIPorts( state=common.client.PORT_STATE_READY) target_portals = [] target_iqns = [] target_luns = [] self._initialize_connection_common( volume, connector, common, host, iscsi_ips, ready_ports, target_portals, target_iqns, target_luns) if volume.get('replication_status') == 'enabled': LOG.debug('This is a replication setup') remote_target = common._replication_targets[0] replication_mode = remote_target['replication_mode'] quorum_witness_ip = ( remote_target.get('quorum_witness_ip')) if replication_mode == 1: LOG.debug('replication_mode is sync') if quorum_witness_ip: LOG.debug('quorum_witness_ip is present') LOG.debug('Peer Persistence has been configured') else: LOG.debug('Since quorum_witness_ip is absent, ' 'considering this as Active/Passive ' 'replication') else: LOG.debug('Active/Passive replication has been ' 'configured') if replication_mode == 1 and quorum_witness_ip: remote_client = ( common._create_replication_client(remote_target)) self.initialize_iscsi_ports( common, remote_target, remote_client) remote_iscsi_ips = ( self.iscsi_ips[remote_target['hpe3par_api_url']]) # we have to make sure we have a host host, username, password, cpg = ( self._create_host( common, volume, connector, remote_target, cpg, remote_client)) ready_ports = remote_client.getiSCSIPorts( state=remote_client.PORT_STATE_READY) self._initialize_connection_common( volume, connector, common, host, remote_iscsi_ips, ready_ports, target_portals, target_iqns, target_luns, remote_client) common._destroy_replication_client(remote_client) info = {'driver_volume_type': 'iscsi', 'data': {'target_portals': target_portals, 'target_iqns': target_iqns, 'target_luns': target_luns, 'target_discovered': True } } else: least_used_nsp = None # check if a VLUN already exists for this host existing_vlun = common.find_existing_vlun(volume, host) if existing_vlun: # We override the nsp here on purpose to force the # volume to be exported out the same IP as it already is. # This happens during nova live-migration, we want to # disable the picking of a different IP that we export # the volume to, or nova complains. least_used_nsp = common.build_nsp(existing_vlun['portPos']) if not least_used_nsp: least_used_nsp = self._get_least_used_nsp_for_host( common, host['name']) vlun = None if existing_vlun is None: # now that we have a host, create the VLUN vlun = common.create_vlun(volume, host, least_used_nsp) else: vlun = existing_vlun if least_used_nsp is None: LOG.warning("Least busy iSCSI port not found, " "using first iSCSI port in list.") iscsi_ip = list(iscsi_ips)[0] else: iscsi_ip = self._get_ip_using_nsp(least_used_nsp, common) iscsi_ip_port = iscsi_ips[iscsi_ip]['ip_port'] iscsi_target_iqn = iscsi_ips[iscsi_ip]['iqn'] if ":" in iscsi_ip: # v6 target_portal = "[%s]:%s" % ( iscsi_ip, iscsi_ip_port) else: # v4 target_portal = "%s:%s" % ( iscsi_ip, iscsi_ip_port) LOG.debug("target_portal: %(var)s", {'var': target_portal}) info = {'driver_volume_type': 'iscsi', 'data': {'target_portal': target_portal, 'target_iqn': iscsi_target_iqn, 'target_lun': vlun['lun'], 'target_discovered': True } } if common._client_conf['hpe3par_iscsi_chap_enabled']: info['data']['auth_method'] = 'CHAP' info['data']['auth_username'] = username info['data']['auth_password'] = password encryption_key_id = volume.get('encryption_key_id', None) info['data']['encrypted'] = encryption_key_id is not None return info finally: self._logout(common) @volume_utils.trace @coordination.synchronized('3par-{volume.id}') def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to detach a volume from an instance.""" array_id = self.get_volume_replication_driver_data(volume) common = self._login(array_id=array_id) try: is_force_detach = connector is None remote_client = None multipath = False if connector: multipath = connector.get('multipath') LOG.debug("multipath: %(multipath)s", {'multipath': multipath}) if multipath: if volume.get('replication_status') == 'enabled': LOG.debug('This is a replication setup') remote_target = common._replication_targets[0] replication_mode = remote_target['replication_mode'] quorum_witness_ip = ( remote_target.get('quorum_witness_ip')) if replication_mode == 1: LOG.debug('replication_mode is sync') if quorum_witness_ip: LOG.debug('quorum_witness_ip is present') LOG.debug('Peer Persistence has been configured') else: LOG.debug('Since quorum_witness_ip is absent, ' 'considering this as Active/Passive ' 'replication') else: LOG.debug('Active/Passive replication has been ' 'configured') if replication_mode == 1 and quorum_witness_ip: remote_client = ( common._create_replication_client(remote_target)) if is_force_detach: common.terminate_connection(volume, None, None) else: hostname = common._safe_hostname(connector, self.configuration) common.terminate_connection( volume, hostname, iqn=connector['initiator'], remote_client=remote_client) self._clear_chap_3par(common, volume) finally: self._logout(common) def _clear_chap_3par(self, common, volume): """Clears CHAP credentials on a 3par volume. Ignore exceptions caused by the keys not being present on a volume. """ vol_name = common._get_3par_vol_name(volume) try: common.client.removeVolumeMetaData(vol_name, CHAP_USER_KEY) except hpeexceptions.HTTPNotFound: pass except Exception: raise try: common.client.removeVolumeMetaData(vol_name, CHAP_PASS_KEY) except hpeexceptions.HTTPNotFound: pass except Exception: raise def _create_3par_iscsi_host(self, common, hostname, iscsi_iqn, domain, persona_id, remote_client=None): """Create a 3PAR host. Create a 3PAR host, if there is already a host on the 3par using the same iqn but with a different hostname, return the hostname used by 3PAR. """ # first search for an existing host host_found = None if remote_client: client_obj = remote_client else: client_obj = common.client hosts = client_obj.queryHost(iqns=iscsi_iqn) if hosts and hosts['members'] and 'name' in hosts['members'][0]: host_found = hosts['members'][0]['name'] if host_found is not None: return host_found else: persona_id = int(persona_id) try: client_obj.createHost(hostname, iscsiNames=iscsi_iqn, optional={'domain': domain, 'persona': persona_id}) except hpeexceptions.HTTPConflict as path_conflict: msg = "Create iSCSI host caught HTTP conflict code: %s" with save_and_reraise_exception(reraise=False) as ctxt: if path_conflict.get_code() is EXISTENT_PATH: # Handle exception : EXISTENT_PATH - host WWN/iSCSI # name already used by another host hosts = client_obj.queryHost(iqns=iscsi_iqn) if hosts and hosts['members'] and ( 'name' in hosts['members'][0]): hostname = hosts['members'][0]['name'] else: # re-raise last caught exception ctxt.reraise = True LOG.exception(msg, path_conflict.get_code()) else: # re-raise last caught exception # for other HTTP conflict ctxt.reraise = True LOG.exception(msg, path_conflict.get_code()) return hostname def _modify_3par_iscsi_host(self, common, hostname, iscsi_iqn): mod_request = {'pathOperation': common.client.HOST_EDIT_ADD, 'iSCSINames': [iscsi_iqn]} common.client.modifyHost(hostname, mod_request) def _set_3par_chaps(self, common, hostname, volume, username, password): """Sets a 3PAR host's CHAP credentials.""" if not common._client_conf['hpe3par_iscsi_chap_enabled']: return mod_request = {'chapOperation': common.client.HOST_EDIT_ADD, 'chapOperationMode': common.client.CHAP_INITIATOR, 'chapName': username, 'chapSecret': password} common.client.modifyHost(hostname, mod_request) def _create_host(self, common, volume, connector, remote_target=None, src_cpg=None, remote_client=None): """Creates or modifies existing 3PAR host.""" # make sure we don't have the host already host = None domain = None username = None password = None hostname = common._safe_hostname(connector, self.configuration) if remote_target: cpg = common._get_cpg_from_cpg_map( remote_target['cpg_map'], src_cpg) cpg_obj = remote_client.getCPG(cpg) if 'domain' in cpg_obj: domain = cpg_obj['domain'] else: cpg = common.get_cpg(volume, allowSnap=True) domain = common.get_domain(cpg) if not remote_target: # Get the CHAP secret if CHAP is enabled if common._client_conf['hpe3par_iscsi_chap_enabled']: vol_name = common._get_3par_vol_name(volume) username = common.client.getVolumeMetaData( vol_name, CHAP_USER_KEY)['value'] password = common.client.getVolumeMetaData( vol_name, CHAP_PASS_KEY)['value'] try: if remote_target: host = remote_client.getHost(hostname) else: host = common._get_3par_host(hostname) # Check whether host with iqn of initiator present on 3par hosts = common.client.queryHost(iqns=[connector['initiator']]) host, hostname = ( common._get_prioritized_host_on_3par( host, hosts, hostname)) except hpeexceptions.HTTPNotFound: # get persona from the volume type extra specs persona_id = common.get_persona_type(volume) # host doesn't exist, we have to create it hostname = self._create_3par_iscsi_host(common, hostname, [connector['initiator']], domain, persona_id, remote_client) else: if not remote_target: if 'iSCSIPaths' not in host or len(host['iSCSIPaths']) < 1: self._modify_3par_iscsi_host( common, hostname, connector['initiator']) elif (not host['initiatorChapEnabled'] and common._client_conf['hpe3par_iscsi_chap_enabled']): LOG.warning("Host exists without CHAP credentials set and " "has iSCSI attachments but CHAP is enabled. " "Updating host with new CHAP credentials.") if remote_target: host = remote_client.getHost(hostname) else: # set/update the chap details for the host self._set_3par_chaps(common, hostname, volume, username, password) host = common._get_3par_host(hostname) return host, username, password, cpg def _do_export(self, common, volume, connector): """Gets the associated account, generates CHAP info and updates.""" model_update = {} if not common._client_conf['hpe3par_iscsi_chap_enabled']: model_update['provider_auth'] = None return model_update # CHAP username will be the hostname chap_username = connector['host'] chap_password = None try: # Get all active VLUNs for the host vluns = common.client.getHostVLUNs(chap_username) # Host has active VLUNs... is CHAP enabled on host? host_info = common.client.getHost(chap_username) if not host_info['initiatorChapEnabled']: LOG.warning("Host has no CHAP key, but CHAP is enabled.") except hpeexceptions.HTTPNotFound: chap_password = volume_utils.generate_password(16) LOG.warning("No host or VLUNs exist. Generating new " "CHAP key.") else: # Get a list of all iSCSI VLUNs and see if there is already a CHAP # key assigned to one of them. Use that CHAP key if present, # otherwise create a new one. Skip any VLUNs that are missing # CHAP credentials in metadata. chap_exists = False active_vluns = 0 for vlun in vluns: if not vlun['active']: continue active_vluns += 1 # iSCSI connections start with 'iqn'. if ('remoteName' in vlun and re.match('iqn.*', vlun['remoteName'])): try: chap_password = common.client.getVolumeMetaData( vlun['volumeName'], CHAP_PASS_KEY)['value'] chap_exists = True break except hpeexceptions.HTTPNotFound: LOG.debug("The VLUN %s is missing CHAP credentials " "but CHAP is enabled. Skipping.", vlun['remoteName']) else: LOG.warning("Non-iSCSI VLUN detected.") if not chap_exists: chap_password = volume_utils.generate_password(16) LOG.warning("No VLUN contained CHAP credentials. " "Generating new CHAP key.") # Add CHAP credentials to the volume metadata vol_name = common._get_3par_vol_name(volume) common.client.setVolumeMetaData( vol_name, CHAP_USER_KEY, chap_username) common.client.setVolumeMetaData( vol_name, CHAP_PASS_KEY, chap_password) model_update['provider_auth'] = ('CHAP %s %s' % (chap_username, chap_password)) return model_update @volume_utils.trace def create_export(self, context, volume, connector): common = self._login() try: return self._do_export(common, volume, connector) finally: self._logout(common) @volume_utils.trace def ensure_export(self, context, volume): """Ensure the volume still exists on the 3PAR. Also retrieves CHAP credentials, if present on the volume """ common = self._login() try: vol_name = common._get_3par_vol_name(volume) common.client.getVolume(vol_name) except hpeexceptions.HTTPNotFound: LOG.error("Volume %s doesn't exist on array.", vol_name) else: metadata = common.client.getAllVolumeMetaData(vol_name) username = None password = None model_update = {} model_update['provider_auth'] = None for member in metadata['members']: if member['key'] == CHAP_USER_KEY: username = member['value'] elif member['key'] == CHAP_PASS_KEY: password = member['value'] if username and password: model_update['provider_auth'] = ('CHAP %s %s' % (username, password)) return model_update finally: self._logout(common) def _get_least_used_nsp_for_host(self, common, hostname): """Get the least used NSP for the current host. Steps to determine which NSP to use. * If only one iSCSI NSP, return it * If there is already an active vlun to this host, return its NSP * Return NSP with fewest active vluns """ iscsi_nsps = self._get_iscsi_nsps(common) # If there's only one path, use it if len(iscsi_nsps) == 1: return iscsi_nsps[0] # Try to reuse an existing iscsi path to the host vluns = common.client.getVLUNs() for vlun in vluns['members']: if vlun['active']: if vlun['hostname'] == hostname: temp_nsp = common.build_nsp(vlun['portPos']) if temp_nsp in iscsi_nsps: # this host already has an iscsi path, so use it return temp_nsp # Calculate the least used iscsi nsp least_used_nsp = self._get_least_used_nsp(common, vluns['members'], self._get_iscsi_nsps(common)) return least_used_nsp def _get_iscsi_nsps(self, common): """Return the list of candidate nsps.""" nsps = [] iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']] for value in iscsi_ips.values(): nsps.append(value['nsp']) return nsps def _get_ip_using_nsp(self, nsp, common): """Return IP associated with given nsp.""" iscsi_ips = self.iscsi_ips[common._client_conf['hpe3par_api_url']] for (key, value) in iscsi_ips.items(): if value['nsp'] == nsp: return key def _get_least_used_nsp(self, common, vluns, nspss): """Return the nsp that has the fewest active vluns.""" # return only the nsp (node:server:port) # count the number of nsps nsp_counts = {} for nsp in nspss: # initialize counts to zero nsp_counts[nsp] = 0 current_least_used_nsp = None for vlun in vluns: if vlun['active']: nsp = common.build_nsp(vlun['portPos']) if nsp in nsp_counts: nsp_counts[nsp] = nsp_counts[nsp] + 1 # identify key (nsp) of least used nsp current_smallest_count = sys.maxsize for (nsp, count) in nsp_counts.items(): if count < current_smallest_count: current_least_used_nsp = nsp current_smallest_count = count return current_least_used_nsp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hpe/nimble.py0000664000175000017500000032770000000000000021725 0ustar00zuulzuul00000000000000# Nimble Storage, Inc. (c) 2013-2014 # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for Nimble Storage. This driver supports Nimble Storage controller CS-Series and Nimble AF Arrays. """ import abc import functools import json import random import re import string import sys import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import requests from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder.objects import volume from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils DRIVER_VERSION = "4.3.0" AES_256_XTS_CIPHER = 'aes_256_xts' DEFAULT_CIPHER = 'none' EXTRA_SPEC_ENCRYPTION = 'nimble:encryption' EXTRA_SPEC_PERF_POLICY = 'nimble:perfpol-name' EXTRA_SPEC_DEDUPE = 'nimble:dedupe' EXTRA_SPEC_IOPS_LIMIT = 'nimble:iops-limit' EXTRA_SPEC_FOLDER = 'nimble:folder' DEFAULT_PERF_POLICY_SETTING = 'default' DEFAULT_ENCRYPTION_SETTING = 'no' DEFAULT_DEDUPE_SETTING = 'false' DEFAULT_IOPS_LIMIT_SETTING = None DEFAULT_FOLDER_SETTING = None DEFAULT_SNAP_QUOTA = sys.maxsize BACKUP_VOL_PREFIX = 'backup-vol-' AGENT_TYPE_OPENSTACK = 'openstack' AGENT_TYPE_OPENSTACK_GST = 'openstackv2' AGENT_TYPE_NONE = 'none' SM_SUBNET_DATA = 'data' SM_SUBNET_MGMT_PLUS_DATA = 'mgmt-data' SM_STATE_MSG = "is already in requested state" SM_OBJ_EXIST_MSG = "Object exists" SM_OBJ_ENOENT_MSG = "No such object" SM_OBJ_HAS_CLONE = "has a clone" IOPS_ERR_MSG = "Please set valid IOPS limit in the range" LUN_ID = '0' WARN_LEVEL = 80 DEFAULT_SLEEP = 5 MIN_IOPS = 256 MAX_IOPS = 4294967294 NimbleDefaultVersion = 1 LOG = logging.getLogger(__name__) nimble_opts = [ cfg.StrOpt('nimble_pool_name', default='default', help='Nimble Controller pool name'), cfg.StrOpt('nimble_subnet_label', default='*', help='Nimble Subnet Label'), cfg.BoolOpt('nimble_verify_certificate', default=False, help='Whether to verify Nimble SSL Certificate'), cfg.StrOpt('nimble_verify_cert_path', help='Path to Nimble Array SSL certificate'), ] CONF = cfg.CONF CONF.register_opts(nimble_opts, group=configuration.SHARED_CONF_GROUP) class NimbleDriverException(exception.VolumeDriverException): message = _("Nimble Cinder Driver exception") class NimbleAPIException(exception.VolumeBackendAPIException): message = _("Unexpected response from Nimble API") class NimbleBaseVolumeDriver(san.SanDriver): """OpenStack driver to enable Nimble Controller. Version history: .. code-block:: none 1.0 - Initial driver 1.1.1 - Updated VERSION to Nimble driver version 1.1.2 - Update snap-quota to unlimited 2.0.0 - Added Extra Spec Capability Correct capacity reporting Added Manage/Unmanage volume support 2.0.1 - Added multi-initiator support through extra-specs 2.0.2 - Fixed supporting extra specs while cloning vols 3.0.0 - Newton Support for Force Backup 3.1.0 - Fibre Channel Support 4.0.0 - Migrate from SOAP to REST API Add support for Group Scoped Target 4.0.1 - Add QoS and dedupe support 4.1.0 - Added multiattach support Added revert to snapshot support Added consistency groups support 4.2.0 - The Nimble driver is now located in the cinder.volume.drivers.hpe module. 4.3.0 - Added group replication support """ VERSION = DRIVER_VERSION # ThirdPartySystems wiki page CI_WIKI_NAME = "HPE_Nimble_Storage_CI" def __init__(self, *args, **kwargs): super(NimbleBaseVolumeDriver, self).__init__(*args, **kwargs) self.APIExecutor = None self.group_stats = {} self.api_protocol = None self._storage_protocol = None self._group_target_enabled = False self.configuration.append_config_values(nimble_opts) self.verify = False if self.configuration.nimble_verify_certificate is True: self.verify = self.configuration.nimble_verify_cert_path or True self.APIExecutor_remote_array = None self.remote_array = {} self._replicated_type = False @staticmethod def get_driver_options(): additional_opts = driver.BaseVD._get_oslo_driver_opts( 'max_over_subscription_ratio') return nimble_opts + additional_opts def _check_config(self): """Ensure that the flags we care about are set.""" required_config = ['san_ip', 'san_login', 'san_password'] for attr in required_config: if not getattr(self.configuration, attr, None): raise exception.InvalidInput(reason=_('%s is not set.') % attr) def create_volume(self, volume): """Create a new volume.""" reserve = not self.configuration.san_thin_provision LOG.debug("Creating volume: %(name)s", {'name': volume['name']}) self.APIExecutor.create_vol( volume, self.configuration.nimble_pool_name, reserve, self._storage_protocol, self._group_target_enabled) volume_type = volume.get('volume_type') consis_group_snap_type = False LOG.debug("volume_type: %(vol_type)s", {'vol_type': volume_type}) if volume_type is not None: consis_group_snap_type = self.is_volume_group_snap_type( volume_type) LOG.debug("consis_group_snap_type: %(cg_type)s", {'cg_type': consis_group_snap_type}) cg_id = volume.get('group_id', None) LOG.debug("cg_id: %(cg_id)s", {'cg_id': cg_id}) if consis_group_snap_type and cg_id: volume_id = self.APIExecutor.get_volume_id_by_name(volume['name']) cg_volcoll_id = self.APIExecutor.get_volcoll_id_by_name(cg_id) self.APIExecutor.associate_volcoll(volume_id, cg_volcoll_id) model_info = self._get_model_info(volume['name']) if self._replicated_type: model_info['replication_status'] = 'enabled' return model_info def is_volume_backup_clone(self, volume): """check if the volume is created through cinder-backup workflow. :param volume """ vol_info = self.APIExecutor.get_vol_info(volume['name']) LOG.debug("is_clone: %(is_clone)s base_snap_id: %(snap)s, " "parent_vol_id: %(vol)s", {'is_clone': vol_info['clone'], 'snap': vol_info['base_snap_id'], 'vol': vol_info['parent_vol_id']}) if vol_info['base_snap_id'] and ( vol_info['parent_vol_id'] is not None): LOG.debug("Nimble base-snap exists for volume %(vol)s", {'vol': volume['name']}) volume_name_prefix = volume['name'].replace(volume['id'], "") LOG.debug("volume_name_prefix : %(prefix)s", {'prefix': volume_name_prefix}) snap_id = self.APIExecutor.get_snap_info_by_id( vol_info['base_snap_id'], vol_info['parent_vol_id']) snap_info = self.APIExecutor.get_snap_info_detail(snap_id['id']) LOG.debug("snap_info description %(snap_info)s", {'snap_info': snap_info['description']}) if snap_info['description'] and BACKUP_VOL_PREFIX in ( snap_info['description']): # TODO(rkumar): get parent vol id from parent volume name parent_vol_name = self.APIExecutor.get_volume_name( vol_info['parent_vol_id']) parent_vol_id = parent_vol_name. replace( volume_name_prefix, "") if BACKUP_VOL_PREFIX + parent_vol_id in snap_info[ 'description']: LOG.info('Nimble backup-snapshot exists name=%(' 'name)s', {'name': snap_info['name']}) snap_vol_name = self.APIExecutor.get_volume_name( snap_info['vol_id']) LOG.debug("snap_vol_name %(snap)s", {'snap': snap_vol_name}) return snap_info['name'], snap_vol_name return "", "" def delete_volume(self, volume): """Delete the specified volume.""" backup_snap_name, backup_vol_name = self.is_volume_backup_clone(volume) eventlet.sleep(DEFAULT_SLEEP) if self._replicated_type: group_id = self.APIExecutor_remote_array.get_group_id() LOG.debug("group_id: %(id)s", {'id': group_id}) volume_id = self.APIExecutor_remote_array.get_volume_id_by_name( volume['name']) LOG.debug("volume_id: %(id)s", {'id': volume_id}) LOG.debug("claim vol on remote array") self.APIExecutor_remote_array.claim_vol(volume_id, group_id) LOG.debug("delete vol on remote array") self.APIExecutor_remote_array.delete_vol(volume['name']) # make the volume as offline self.APIExecutor.online_vol(volume['name'], False) LOG.debug("Deleting volume %(vol)s", {'vol': volume['name']}) @utils.retry(NimbleAPIException, retries=3) def _retry_remove_vol(volume): self.APIExecutor.delete_vol(volume['name']) try: _retry_remove_vol(volume) except NimbleAPIException as ex: LOG.debug("delete volume exception: %s", ex) if SM_OBJ_HAS_CLONE in str(ex): LOG.warning('Volume %(vol)s : %(state)s', {'vol': volume['name'], 'state': SM_OBJ_HAS_CLONE}) # set the volume back to be online and raise busy exception self.APIExecutor.online_vol(volume['name'], True) raise exception.VolumeIsBusy(volume_name=volume['name']) raise # Nimble backend does not delete the snapshot from the parent volume # if there is a dependent clone. So the deletes need to be in reverse # order i.e. # 1. First delete the clone volume used for backup # 2. Delete the base snapshot used for clone from the parent volume. # This is only done for the force backup clone operation as it is # a temporary operation in which we are certain that the snapshot does # not need to be preserved after the backup is completed. if (backup_snap_name != "" and backup_vol_name != "") and ( backup_snap_name is not None): LOG.debug("Delete volume backup vol: %(vol)s snap: %(snap)s", {'vol': backup_vol_name, 'snap': backup_snap_name}) self.APIExecutor.online_snap(backup_vol_name, False, backup_snap_name) self.APIExecutor.delete_snap(backup_vol_name, backup_snap_name) def _generate_random_string(self, length): """Generates random_string.""" char_set = string.ascii_lowercase return ''.join(random.sample(char_set, length)) def _clone_volume_from_snapshot(self, volume, snapshot): """Clone volume from snapshot. Extend the volume if the size of the volume is more than the snapshot. """ reserve = not self.configuration.san_thin_provision pool_name = self.configuration.nimble_pool_name self.APIExecutor.clone_vol(volume, snapshot, reserve, self._group_target_enabled, self._storage_protocol, pool_name) if volume['size'] > snapshot['volume_size']: vol_size = volume['size'] * units.Ki reserve_size = 100 if reserve else 0 data = {"data": {'size': vol_size, 'reserve': reserve_size, 'warn_level': int(WARN_LEVEL), 'limit': 100, 'snap_limit': DEFAULT_SNAP_QUOTA}} LOG.debug("Edit Vol request %(data)s", {'data': data}) self.APIExecutor.edit_vol(volume['name'], data) return self._get_model_info(volume['name']) def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume.""" snapshot_name = ('openstack-clone-' + volume['name'] + '-' + self._generate_random_string(12)) snapshot = {'volume_name': src_vref['name'], 'name': snapshot_name, 'volume_size': src_vref['size'], 'display_name': volume.display_name, 'display_description': ''} self.APIExecutor.snap_vol(snapshot) self._clone_volume_from_snapshot(volume, snapshot) return self._get_model_info(volume['name']) def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" return self._get_model_info(volume['name']) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" return self._get_model_info(volume['name']) def create_snapshot(self, snapshot): """Create a snapshot.""" self.APIExecutor.snap_vol(snapshot) def delete_snapshot(self, snapshot): """Delete a snapshot.""" self.APIExecutor.online_snap( snapshot['volume_name'], False, snapshot['name']) self.APIExecutor.delete_snap(snapshot['volume_name'], snapshot['name']) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" self._clone_volume_from_snapshot(volume, snapshot) return self._get_model_info(volume['name']) def _enable_group_scoped_target(self, group_info): if 'version_current' in group_info: current_version = group_info['version_current'] major_minor = current_version.split(".") if len(major_minor) >= 3: major = major_minor[0] minor = major_minor[1] # TODO(rkumar): Fix the major version if int(major) >= 4 and int(minor) >= 0: # Enforce group scoped target if 'group_target_enabled' in group_info: if group_info['group_target_enabled'] is False: try: self.APIExecutor.enable_group_scoped_target() except Exception: raise NimbleAPIException(_("Unable to enable" " GST")) self._group_target_enabled = True LOG.info("Group Scoped Target enabled for " "group %(group)s: %(ip)s", {'group': group_info['name'], 'ip': self.configuration.san_ip}) elif 'group_target_enabled' not in group_info: LOG.info("Group Scoped Target NOT " "present for group %(group)s: " "%(ip)s", {'group': group_info['name'], 'ip': self.configuration.san_ip}) else: raise NimbleAPIException(_("Unable to get current software " "version for %s") % self.configuration.san_ip) def get_volume_stats(self, refresh=False): """Get volume stats. This is more of getting group stats.""" if refresh: group_info = self.APIExecutor.get_group_info() if 'usage_valid' not in group_info: raise NimbleDriverException(_('SpaceInfo returned by ' 'array is invalid')) total_capacity = (group_info['usable_capacity_bytes'] / float(units.Gi)) free_space = (group_info['free_space'] / float(units.Gi)) LOG.debug('total_capacity=%(capacity)f ' 'free_space=%(free)f', {'capacity': total_capacity, 'free': free_space}) backend_name = self.configuration.safe_get( 'volume_backend_name') or self.__class__.__name__ self.group_stats = {'volume_backend_name': backend_name, 'vendor_name': 'Nimble', 'driver_version': DRIVER_VERSION, 'storage_protocol': self._storage_protocol} # Just use a single pool for now, FIXME to support multiple # pools mor = self.configuration.max_over_subscription_ratio LOG.debug("mor: %(mor)s", {'mor': mor}) single_pool = dict( pool_name=backend_name, total_capacity_gb=total_capacity, free_capacity_gb=free_space, reserved_percentage=0, QoS_support=False, multiattach=True, max_over_subscription_ratio=mor, thin_provisioning_support=True, consistent_group_snapshot_enabled=True, consistent_group_replication_enabled=self._replicated_type, replication_enabled=self._replicated_type) self.group_stats['pools'] = [single_pool] return self.group_stats def extend_volume(self, volume, new_size): """Extend an existing volume.""" volume_name = volume['name'] LOG.info('Entering extend_volume volume=%(vol)s ' 'new_size=%(size)s', {'vol': volume_name, 'size': new_size}) vol_size = int(new_size) * units.Ki reserve = not self.configuration.san_thin_provision reserve_size = 100 if reserve else 0 LOG.debug("new volume size in MB (size)s", {'size': vol_size}) data = {"data": {'size': vol_size, 'reserve': reserve_size, 'warn_level': int(WARN_LEVEL), 'limit': 100, 'snap_limit': DEFAULT_SNAP_QUOTA}} self.APIExecutor.edit_vol(volume_name, data) def _get_existing_volume_ref_name(self, existing_ref): """Returns the volume name of an existing ref""" vol_name = None if 'source-name' in existing_ref: vol_name = existing_ref['source-name'] else: reason = _("Reference must contain source-name.") raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return vol_name def _get_volumetype_extraspecs_with_type(self, type_id): specs = {} if type_id is not None: specs = volume_types.get_volume_type_extra_specs(type_id) return specs def retype(self, context, volume, new_type, diff, host): """Retype from one volume type to another. At this point HPE Nimble Storage does not differentiate between volume types on the same array. This is a no-op for us if there are no extra specs else honor the extra-specs. """ if new_type is None: return True, None LOG.debug("retype called with volume_type %s", new_type) volume_type_id = new_type['id'] if volume_type_id is None: raise NimbleAPIException(_("No volume_type_id present in" " %(type)s") % {'type': new_type}) LOG.debug("volume_type id is %s", volume_type_id) specs_map = self._get_volumetype_extraspecs_with_type( volume_type_id) if specs_map is None: # no extra specs to retype LOG.debug("volume_type %s has no extra specs", volume_type_id) return True, None vol_info = self.APIExecutor.get_vol_info(volume['name']) LOG.debug("new extra specs %s", specs_map) data = self.APIExecutor.get_valid_nimble_extraspecs(specs_map, vol_info) if data is None: # return if there is no update LOG.debug("no data to update for %s", new_type) return True, None try: # offline the volume before edit self.APIExecutor.online_vol(volume['name'], False) # modify the volume LOG.debug("updated volume %s", data) self.APIExecutor.edit_vol(volume['name'], data) # make the volume online after changing the specs self.APIExecutor.online_vol(volume['name'], True) except NimbleAPIException as ex: raise NimbleAPIException(_("Unable to retype %(vol)s to " "%(type)s: %(err)s") % {'vol': volume['name'], 'type': new_type, 'err': ex.message}) return True, None def manage_existing(self, volume, external_ref): """Manage an existing nimble volume (import to cinder)""" # Get the volume name from the external reference target_vol_name = self._get_existing_volume_ref_name(external_ref) LOG.debug('Entering manage_existing. ' 'Target_volume_name =%s', target_vol_name) # Get vol info from the volume name obtained from the reference vol_info = self.APIExecutor.get_vol_info(target_vol_name) # Check if volume is already managed by OpenStack if vol_info['agent_type'] == AGENT_TYPE_OPENSTACK or ( vol_info['agent_type'] == AGENT_TYPE_OPENSTACK_GST): raise exception.ManageExistingAlreadyManaged( volume_ref=volume['id']) # If agent-type is not None then raise exception if vol_info['agent_type'] != AGENT_TYPE_NONE: msg = (_('Volume should have agent-type set as None.')) raise exception.InvalidVolume(reason=msg) new_vol_name = volume['name'] LOG.info("Volume status before managing it : %(status)s", {'status': vol_info['online']}) if vol_info['online'] is True: msg = (_('Volume %s is online. Set volume to offline for ' 'managing using OpenStack.') % target_vol_name) raise exception.InvalidVolume(reason=msg) # edit the volume data = {'data': {'name': new_vol_name}} if self._group_target_enabled is True: # check if any ACL's are attached to this volume if 'access_control_records' in vol_info and ( vol_info['access_control_records'] is not None): msg = (_('Volume %s has ACL associated with it. Remove ACL ' 'for managing using OpenStack') % target_vol_name) raise exception.InvalidVolume(reason=msg) data['data']['agent_type'] = AGENT_TYPE_OPENSTACK_GST else: data['data']['agent_type'] = AGENT_TYPE_OPENSTACK LOG.debug("Data for edit %(data)s", {'data': data}) self.APIExecutor.edit_vol(target_vol_name, data) # make the volume online after rename self.APIExecutor.online_vol(new_vol_name, True) return self._get_model_info(new_vol_name) def manage_existing_get_size(self, volume, external_ref): """Return size of an existing volume""" LOG.debug('Volume name : %(name)s External ref : %(ref)s', {'name': volume['name'], 'ref': external_ref}) target_vol_name = self._get_existing_volume_ref_name(external_ref) # get vol info vol_info = self.APIExecutor.get_vol_info(target_vol_name) LOG.debug('Volume size : %(size)s Volume-name : %(name)s', {'size': vol_info['size'], 'name': vol_info['name']}) return int(vol_info['size'] / units.Ki) def unmanage(self, volume): """Removes the specified volume from Cinder management.""" vol_name = volume['name'] LOG.debug("Entering unmanage_volume volume =%s", vol_name) # check agent type vol_info = self.APIExecutor.get_vol_info(vol_name) if vol_info['agent_type'] != AGENT_TYPE_OPENSTACK and ( vol_info['agent_type'] != AGENT_TYPE_OPENSTACK_GST): msg = (_('Only volumes managed by OpenStack can be unmanaged.')) raise exception.InvalidVolume(reason=msg) data = {'data': {'agent_type': AGENT_TYPE_NONE}} # update the agent-type to None self.APIExecutor.edit_vol(vol_name, data) # offline the volume self.APIExecutor.online_vol(vol_name, False) def _do_replication_setup(self, array_id=None): devices = self.configuration.replication_device if devices: dev = devices[0] remote_array = dict(dev.items()) remote_array['san_login'] = ( dev.get('ssh_login', self.configuration.san_login)) remote_array['san_password'] = ( dev.get('san_password', self.configuration.san_password)) try: self.APIExecutor_remote_array = NimbleRestAPIExecutor( username=remote_array['san_login'], password=remote_array['san_password'], ip=remote_array['san_ip'], verify=self.verify) LOG.debug("created APIExecutor for remote ip: %(ip)s", {'ip': remote_array['san_ip']}) except Exception: LOG.error('Failed to create REST client.' ' Check san_ip, username, password' ' and make sure the array version is compatible') raise self._replicated_type = True self.remote_array = remote_array def do_setup(self, context): """Setup the Nimble Cinder volume driver.""" self._check_config() # Setup API Executor san_ip = self.configuration.san_ip LOG.debug("san_ip: %(ip)s", {'ip': san_ip}) try: self.APIExecutor = NimbleRestAPIExecutor( username=self.configuration.san_login, password=self.configuration.san_password, ip=self.configuration.san_ip, verify=self.verify) if self._storage_protocol == constants.ISCSI: group_info = self.APIExecutor.get_group_info() self._enable_group_scoped_target(group_info) except Exception: LOG.error('Failed to create REST client. ' 'Check san_ip, username, password' ' and make sure the array version is compatible') raise self._update_existing_vols_agent_type(context) self._do_replication_setup() if self._replicated_type: LOG.debug("for %(ip)s, schedule_name is: %(name)s", {'ip': san_ip, 'name': self.remote_array['schedule_name']}) def _update_existing_vols_agent_type(self, context): backend_name = self.configuration.safe_get('volume_backend_name') all_vols = volume.VolumeList.get_all( context, None, None, None, None, {'status': 'available'}) for vol in all_vols: if backend_name in vol.host: try: vol_info = self.APIExecutor.get_vol_info(vol.name) # update agent_type only if no ACL's are present if 'access_control_records' in vol_info and ( vol_info['access_control_records'] is None): if self._group_target_enabled: LOG.debug("Updating %(vol)s to have agent_type :" "%(agent)s", {'vol': vol.name, 'agent': AGENT_TYPE_OPENSTACK_GST}) # check if this is an upgrade case from # openstack to openstackv2 if vol_info['agent_type'] == AGENT_TYPE_NONE: data = {'data': {'agent_type': AGENT_TYPE_OPENSTACK_GST}} self.APIExecutor.edit_vol(vol.name, data) elif vol_info['agent_type'] == ( AGENT_TYPE_OPENSTACK): # 1. update the agent type to None data = {'data': {'agent_type': AGENT_TYPE_NONE}} self.APIExecutor.edit_vol(vol.name, data) # 2. update the agent type to openstack_gst data = {'data': {'agent_type': AGENT_TYPE_OPENSTACK_GST}} self.APIExecutor.edit_vol(vol.name, data) else: LOG.debug("Updating %(vol)s to have agent_type :" "%(agent)s", {'vol': vol.name, 'agent': AGENT_TYPE_OPENSTACK_GST}) if vol_info['agent_type'] == AGENT_TYPE_NONE: data = {'data': {'agent_type': AGENT_TYPE_OPENSTACK}} self.APIExecutor.edit_vol(vol.name, data) elif vol_info['agent_type'] == ( AGENT_TYPE_OPENSTACK_GST): # 1. update the agent type to None data = {'data': {'agent_type': AGENT_TYPE_NONE}} self.APIExecutor.edit_vol(vol.name, data) # 2. update the agent type to openstack data = {'data': {'agent_type': AGENT_TYPE_OPENSTACK}} self.APIExecutor.edit_vol(vol.name, data) except NimbleAPIException: # just log the error but don't fail driver initialization LOG.warning('Error updating agent-type for ' 'volume %s.', vol.name) def _get_model_info(self, volume_name): """Get model info for the volume.""" return ( {'provider_location': self._get_provider_location(volume_name), 'provider_auth': None}) @abc.abstractmethod def _get_provider_location(self, volume_name): """Volume info for iSCSI and FC""" pass def _create_igroup_for_initiator(self, initiator_name, wwpns): """Creates igroup for an initiator and returns the igroup name.""" igrp_name = 'openstack-' + self._generate_random_string(12) LOG.info('Creating initiator group %(grp)s ' 'with initiator %(iname)s', {'grp': igrp_name, 'iname': initiator_name}) if self._storage_protocol == constants.ISCSI: self.APIExecutor.create_initiator_group(igrp_name) self.APIExecutor.add_initiator_to_igroup(igrp_name, initiator_name) elif self._storage_protocol == constants.FC: self.APIExecutor.create_initiator_group_fc(igrp_name) for wwpn in wwpns: self.APIExecutor.add_initiator_to_igroup_fc(igrp_name, wwpn) return igrp_name def _get_igroupname_for_initiator_fc(self, initiator_wwpns): initiator_groups = self.APIExecutor.get_initiator_grp_list() for initiator_group in initiator_groups: if 'fc_initiators' in initiator_group and initiator_group[ 'fc_initiators'] is not None: wwpns_list = [] for initiator in initiator_group['fc_initiators']: wwpn = str(initiator['wwpn']).replace(":", "") wwpns_list.append(wwpn) LOG.debug("initiator_wwpns=%(initiator)s " "wwpns_list_from_array=%(wwpns)s", {'initiator': initiator_wwpns, 'wwpns': wwpns_list}) if set(initiator_wwpns) == set(wwpns_list): LOG.info('igroup %(grp)s found for ' 'initiator %(wwpns_list)s', {'grp': initiator_group['name'], 'wwpns_list': wwpns_list}) return initiator_group['name'] LOG.info('No igroup found for initiators %s', initiator_wwpns) return '' def _get_igroupname_for_initiator(self, initiator_name): initiator_groups = self.APIExecutor.get_initiator_grp_list() for initiator_group in initiator_groups: if initiator_group['iscsi_initiators'] is not None: if (len(initiator_group['iscsi_initiators']) == 1 and initiator_group['iscsi_initiators'][0]['iqn'] == initiator_name): LOG.info('igroup %(grp)s found for ' 'initiator %(iname)s', {'grp': initiator_group['name'], 'iname': initiator_name}) return initiator_group['name'] LOG.info('No igroup found for initiator %s', initiator_name) return '' def get_lun_number(self, volume, initiator_group_name): vol_info = self.APIExecutor.get_vol_info(volume['name']) for acl in vol_info['access_control_records']: if (initiator_group_name == acl['initiator_group_name']): LOG.info("access_control_record =%(acl)s", {'acl': acl}) lun = acl['lun'] LOG.info("LUN : %(lun)s", {"lun": lun}) return lun raise NimbleAPIException(_("Lun number not found for volume %(vol)s " "with initiator_group: %(igroup)s") % {'vol': volume['name'], 'igroup': initiator_group_name}) def _is_multiattach(self, volume): if volume.multiattach: attachment_list = volume.volume_attachment try: attachment_list = attachment_list.objects except AttributeError: pass if attachment_list is not None and len(attachment_list) > 1: LOG.info("Volume %(volume)s is attached to multiple " "instances on host %(host_name)s, " "skip terminate volume connection", {'volume': volume.name, 'host_name': volume.host.split('@')[0]}) return True return False def revert_to_snapshot(self, context, volume, snapshot): vol_info = self.APIExecutor.get_vol_info(volume['name']) snap_info = self.APIExecutor.get_snap_info(snapshot['name'], volume['name']) snap_id = snap_info['id'] volume_id = vol_info['id'] LOG.debug("Reverting volume %(vol)s with snapshot id %(snap_id)s", {'vol': volume['name'], 'snap_id': snap_id}) data = {'data': {"base_snap_id": snap_id, "id": volume_id}} try: self.APIExecutor.online_vol(volume['name'], False) self.APIExecutor.volume_restore(volume['name'], data) LOG.info("Volume %(vol)s is successfully restored with " "snap_id %(snap_id)s", {'vol': volume['name'], 'snap_id': snap_id}) self.APIExecutor.online_vol(volume['name'], True) except NimbleAPIException as ex: raise NimbleAPIException(_("Unable to restore %(vol)s to " "%(snap_id)s: %(err)s") % {'vol': volume['name'], 'snap_id': snap_id, 'err': ex.message}) return self._get_model_info(volume['name']) def is_volume_group_snap_type(self, volume_type): consis_group_snap_type = False if volume_type: extra_specs = volume_type.get('extra_specs') if 'consistent_group_snapshot_enabled' in extra_specs: gsnap_val = extra_specs['consistent_group_snapshot_enabled'] consis_group_snap_type = (gsnap_val == " True") return consis_group_snap_type def create_group(self, context, group): """Creates a generic group""" if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() cg_type = False cg_name = group.id description = group.description if group.description else group.name LOG.info('Create group: %(name)s, %(description)s', {'name': cg_name, 'description': description}) for volume_type in group.volume_types: if volume_type: extra_specs = volume_type.get('extra_specs') if 'consistent_group_snapshot_enabled' in extra_specs: gsnap_val = extra_specs[ 'consistent_group_snapshot_enabled'] cg_type = (gsnap_val == " True") if not cg_type: msg = _('For a volume type to be a part of consistent' ' group, volume type extra spec must have ' 'consistent_group_snapshot_enabled' '=" True"') LOG.error(msg) raise exception.InvalidInput(reason=msg) self.APIExecutor.create_volcoll(cg_name, description) return {'status': fields.GroupStatus.AVAILABLE} def delete_group(self, context, group, volumes): """Deletes a group.""" if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() LOG.info("Delete Consistency Group %s.", group.id) model_updates = {"status": fields.GroupStatus.DELETED} error_statuses = [ fields.GroupStatus.ERROR, fields.GroupStatus.ERROR_DELETING, ] volume_model_updates = [] for tmp_volume in volumes: update_item = {"id": tmp_volume.id} try: self.delete_volume(tmp_volume) update_item["status"] = "deleted" except exception.VolumeBackendAPIException: update_item["status"] = fields.VolumeStatus.ERROR_DELETING if model_updates["status"] not in error_statuses: model_updates["status"] = fields.GroupStatus.ERROR_DELETING LOG.error("Failed to delete volume %(vol_id)s of " "group %(group_id)s.", {"vol_id": tmp_volume.id, "group_id": group.id}) volume_model_updates.append(update_item) cg_name = group.id cg_id = self.APIExecutor.get_volcoll_id_by_name(cg_name) self.APIExecutor.delete_volcoll(cg_id) return model_updates, volume_model_updates def update_group(self, context, group, add_volumes=None, remove_volumes=None): if (not volume_utils.is_group_a_cg_snapshot_type(group)): raise NotImplementedError() model_update = {'status': fields.GroupStatus.AVAILABLE} for tmp_volume in add_volumes: volume_id = self.APIExecutor.get_volume_id_by_name( tmp_volume['name']) vol_snap_enable = self.is_volume_group_snap_type( tmp_volume.get('volume_type')) cg_id = self.APIExecutor.get_volcoll_id_by_name(group.id) try: if vol_snap_enable: self.APIExecutor.associate_volcoll(volume_id, cg_id) else: msg = (_('Volume with volume id %s is not ' 'supported as extra specs of this ' 'volume does not have ' 'consistent_group_snapshot_enabled=" True"' ) % volume['id']) LOG.error(msg) raise exception.InvalidInput(reason=msg) except NimbleAPIException: msg = ('Volume collection does not exist.') LOG.error(msg) raise NimbleAPIException(msg) for tmp_volume in remove_volumes: volume_id = self.APIExecutor.get_volume_id_by_name( tmp_volume['name']) try: self.APIExecutor.dissociate_volcoll(volume_id) except NimbleAPIException: msg = ('Volume collection does not exist.') LOG.error(msg) raise NimbleAPIException(msg) return model_update, None, None def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group snapshot.""" if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() group_id = group_snapshot.group_id snap_name = group_snapshot.id cg_id = self.APIExecutor.get_volcoll_id_by_name(group_id) try: self.APIExecutor.snapcoll_create(snap_name, cg_id) except NimbleAPIException: msg = ('Error creating cg snapshot') LOG.error(msg) raise NimbleAPIException(msg) snapshot_model_updates = [] for snapshot in snapshots: snapshot_update = {'id': snapshot['id'], 'status': fields.SnapshotStatus.AVAILABLE} snapshot_model_updates.append(snapshot_update) model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} return model_update, snapshot_model_updates def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group snapshot.""" if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() snap_name = group_snapshot.id model_update = {'status': fields.ConsistencyGroupStatus.DELETED} snapshots_model_update = [] snapcoll_id = self.APIExecutor.get_snapcoll_id_by_name(snap_name) try: self.APIExecutor.snapcoll_delete(snapcoll_id) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot.id, 'status': fields.SnapshotStatus.DELETED}) except Exception as e: LOG.error("Error deleting volume group snapshot." "Error received: %(e)s", {'e': e}) model_update = { 'status': fields.GroupSnapshotStatus.ERROR_DELETING} return model_update, snapshots_model_update def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates the volume group from source.""" if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() self.create_group(context, group) cg_id = self.APIExecutor.get_volcoll_id_by_name(group.id) try: if group_snapshot is not None and snapshots is not None: for tmp_volume, snapshot in zip(volumes, snapshots): self.create_volume_from_snapshot(tmp_volume, snapshot) volume_id = self.APIExecutor.get_volume_id_by_name( tmp_volume['name']) self.APIExecutor.associate_volcoll(volume_id, cg_id) elif source_group is not None and source_vols is not None: for tmp_volume, src_vol in zip(volumes, source_vols): self.create_cloned_volume(tmp_volume, src_vol) volume_id = self.APIExecutor.get_volume_id_by_name( tmp_volume['name']) self.APIExecutor.associate_volcoll(volume_id, cg_id) except NimbleAPIException: msg = ('Error creating cg snapshot') LOG.error(msg) raise NimbleAPIException(msg) return None, None def _time_to_secs(self, time): # time is specified as 'HH:MM' or 'HH:MM:SS' # qualified with am or pm, or in 24-hour clock time = time.strip("'") arr = time.split(':') (hours, minutes) = (arr[0], arr[1]) total_secs = 0 if len(arr) == 2: hours = int(hours) if minutes.endswith('pm'): # for time like 12:01pm, no need to add 12 to hours if hours != 12: # for other time like 01:05pm, we have add 12 to hours hours += 12 minutes = minutes.strip('pm') if minutes.endswith('am'): minutes = minutes.strip('am') minutes = int(minutes) total_secs = hours * 3600 + minutes * 60 return total_secs if len(arr) == 3: seconds = arr[2] hours = int(hours) minutes = int(minutes) if seconds.endswith('pm'): # for time like 12:01:01pm, no need to add 12 to hours if hours != 12: # for other time like 01:05:05pm, we have add 12 to hours hours += 12 seconds = seconds.strip('pm') if seconds.endswith('am'): seconds = seconds.strip('am') seconds = int(seconds) total_secs = hours * 3600 + minutes * 60 + seconds return total_secs def enable_replication(self, context, group, volumes): LOG.debug("try to enable repl on group %(group)s", {'group': group.id}) if not group.is_replicated: raise NotImplementedError() model_update = {} try: # If replication is enabled for volume type, apply the schedule nimble_group_name = group.id san_ip = self.configuration.san_ip # apply schedule sched_name = self.remote_array['schedule_name'] partner_name = self.remote_array['downstream_partner'] LOG.debug("for %(ip)s, schedule_name is: %(name)s", {'ip': san_ip, 'name': sched_name}) kwargs = {} optionals = ['period', 'period_unit', 'num_retain', 'num_retain_replica', 'at_time', 'until_time', 'days', 'replicate_every', 'alert_threshold'] for key in optionals: if key in self.remote_array: value = self.remote_array[key] kwargs[key] = value if key == 'at_time' or key == 'until_time': seconds = self._time_to_secs(value) kwargs[key] = seconds self.APIExecutor.set_schedule_for_volcoll( sched_name, nimble_group_name, partner_name, **kwargs) model_update.update({ 'replication_status': fields.ReplicationStatus.ENABLED}) except Exception as e: model_update.update({ 'replication_status': fields.ReplicationStatus.ERROR}) LOG.error("Error enabling replication on group %(group)s. " "Exception received: %(e)s.", {'group': group.id, 'e': e}) return model_update, None def disable_replication(self, context, group, volumes): LOG.debug("try disable repl on group %(group)s", {'group': group.id}) if not group.is_replicated: raise NotImplementedError() model_update = {} try: san_ip = self.configuration.san_ip sched_name = self.remote_array['schedule_name'] LOG.debug("for %(ip)s, schedule_name is: %(name)s", {'ip': san_ip, 'name': sched_name}) data = self.APIExecutor.get_volcoll_details(group.id) LOG.debug("data: %(data)s", {'data': data}) sched_id = data['schedule_list'][0]['id'] self.APIExecutor.delete_schedule(sched_id) model_update.update({ 'replication_status': fields.ReplicationStatus.DISABLED}) except Exception as e: model_update.update({ 'replication_status': fields.ReplicationStatus.ERROR}) LOG.error("Error disabling replication on group %(group)s. " "Exception received: %(e)s.", {'group': group.id, 'e': e}) return model_update, None def failover_replication(self, context, group, volumes, secondary_backend_id=None): LOG.debug("try to failover/failback group %(group)s to %(backend)s", {'group': group.id, 'backend': secondary_backend_id}) group_update = {} volume_update_list = [] partner_name = secondary_backend_id partner_id = None if partner_name != 'default': LOG.debug("failover to secondary array") partner_id = self.APIExecutor.get_partner_id_by_name(partner_name) LOG.debug("partner_id %(id)s", {'id': partner_id}) volcoll_id = self.APIExecutor.get_volcoll_id_by_name(group.id) LOG.debug("volcoll_id %(id)s", {'id': volcoll_id}) self.APIExecutor.handover(volcoll_id, partner_id) rep_status = fields.ReplicationStatus.FAILED_OVER if partner_name == 'default': LOG.debug("failback to primary array") data = self.APIExecutor_remote_array.get_volcoll_details(group.id) partner_name = data['replication_partner'] LOG.debug("partner_name: %(name)s", {'name': partner_name}) partner_id = self.APIExecutor_remote_array.get_partner_id_by_name( partner_name) LOG.debug("partner_id %(id)s", {'id': partner_id}) volcoll_id = self.APIExecutor_remote_array.get_volcoll_id_by_name( group.id) LOG.debug("volcoll_id %(id)s", {'id': volcoll_id}) self.APIExecutor_remote_array.handover(volcoll_id, partner_id) rep_status = fields.ReplicationStatus.ENABLED group_update['replication_status'] = rep_status for vol in volumes: volume_update = { 'id': vol.id, 'replication_status': rep_status} volume_update_list.append(volume_update) return group_update, volume_update_list @interface.volumedriver class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver): """OpenStack driver to enable Nimble ISCSI Controller.""" def __init__(self, *args, **kwargs): super(NimbleISCSIDriver, self).__init__(*args, **kwargs) self._storage_protocol = constants.ISCSI self._group_target_name = None def _set_gst_for_group(self): group_info = self.APIExecutor.get_group_info() if 'group_target_enabled' in group_info and ( group_info['group_target_enabled']) is True and ( 'group_target_name' in group_info) and ( group_info['group_target_name'] is not None): self._group_target_name = group_info['group_target_name'] def _get_gst_for_group(self): return self._group_target_name def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance.""" LOG.info('Entering initialize_connection volume=%(vol)s' ' connector=%(conn)s location=%(loc)s', {'vol': volume, 'conn': connector, 'loc': volume['provider_location']}) initiator_name = connector['initiator'] initiator_group_name = self._get_igroupname_for_initiator( initiator_name) if not initiator_group_name: initiator_group_name = self._create_igroup_for_initiator( initiator_name, None) LOG.info('Initiator group name is %(grp)s for initiator ' '%(iname)s', {'grp': initiator_group_name, 'iname': initiator_name}) self.APIExecutor.add_acl(volume, initiator_group_name) properties = {"driver_volume_type": "iscsi", "data": {"target_discovered": False, "discard": True}} properties['data']['volume_id'] = volume['id'] # used by xen currently (iscsi_portal, iqn) = volume['provider_location'].split() if self._get_gst_for_group() is not None: lun_num = self.get_lun_number(volume, initiator_group_name) netconfig = self.APIExecutor.get_netconfig('active') target_portals = self._get_data_ips(netconfig) LOG.info("target portals %(portals)s", {'portals': target_portals}) target_luns = [int(lun_num)] * len(target_portals) target_iqns = [iqn] * len(target_portals) LOG.debug("target iqns %(iqns)s target luns %(luns)s", {'iqns': target_iqns, 'luns': target_luns}) if target_luns and target_iqns and target_portals: properties["data"]["target_luns"] = target_luns properties["data"]["target_iqns"] = target_iqns properties["data"]["target_portals"] = target_portals else: # handling volume scoped target lun_num = LUN_ID properties['data']['target_portal'] = iscsi_portal properties['data']['target_iqn'] = iqn properties['data']['target_lun'] = int(lun_num) return properties def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" LOG.info('Entering terminate_connection volume=%(vol)s' ' connector=%(conn)s location=%(loc)s.', {'vol': volume['name'], 'conn': connector, 'loc': volume['provider_location']}) if connector is None: LOG.warning("Removing ALL host connections for volume %s", volume) self.APIExecutor.remove_all_acls(volume) return if self._is_multiattach(volume): return initiator_name = connector['initiator'] initiator_group_name = self._get_igroupname_for_initiator( initiator_name) if not initiator_group_name: raise NimbleDriverException(_('No initiator group found for ' 'initiator %s') % initiator_name) self.APIExecutor.remove_acl(volume, initiator_group_name) eventlet.sleep(DEFAULT_SLEEP) def _get_provider_location(self, volume_name): """Get volume iqn for initiator access.""" vol_info = self.APIExecutor.get_vol_info(volume_name) netconfig = self.APIExecutor.get_netconfig('active') self._set_gst_for_group() if self._get_gst_for_group() is not None: iqn = self._get_gst_for_group() else: iqn = vol_info['target_name'] target_ipaddr = self._get_discovery_ip(netconfig) iscsi_portal = target_ipaddr + ':3260' provider_location = '%s %s' % (iscsi_portal, iqn) LOG.info('vol_name=%(name)s provider_location=%(loc)s', {'name': volume_name, 'loc': provider_location}) return provider_location def _get_data_ips(self, netconfig): """Get data ips.""" subnet_label = self.configuration.nimble_subnet_label LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s', {'netlabel': subnet_label, 'netconf': netconfig}) ret_data_ips = [] for subnet in netconfig['array_list'][0]['nic_list']: LOG.info('Exploring array subnet label %s', subnet[ 'subnet_label']) if subnet['data_ip']: if subnet_label == '*': # if all subnets are mentioned then return all portals # else just return specific subnet LOG.info('Data ip %(data_ip)s is used ' 'on data subnet %(net_label)s', {'data_ip': subnet['data_ip'], 'net_label': subnet['subnet_label']}) ret_data_ips.append(str(subnet['data_ip']) + ':3260') elif subnet_label == subnet['subnet_label']: LOG.info('Data ip %(data_ip)s is used' ' on subnet %(net_label)s', {'data_ip': subnet['data_ip'], 'net_label': subnet['subnet_label']}) data_ips_single_subnet = [] data_ips_single_subnet.append(str(subnet['data_ip']) + ':3260') return data_ips_single_subnet if ret_data_ips: LOG.info('Data ips %s', ret_data_ips) return ret_data_ips else: raise NimbleDriverException(_('No suitable data ip found')) def _get_discovery_ip(self, netconfig): """Get discovery ip.""" subnet_label = self.configuration.nimble_subnet_label LOG.debug('subnet_label used %(netlabel)s, netconfig %(netconf)s', {'netlabel': subnet_label, 'netconf': netconfig}) ret_discovery_ip = '' for subnet in netconfig['subnet_list']: LOG.info('Exploring array subnet label %s', subnet['label']) if subnet_label == '*': # Use the first data subnet, save mgmt+data for later if subnet['type'] == SM_SUBNET_DATA: LOG.info('Discovery ip %(disc_ip)s is used ' 'on data subnet %(net_label)s', {'disc_ip': subnet['discovery_ip'], 'net_label': subnet['label']}) return subnet['discovery_ip'] elif (subnet['type'] == SM_SUBNET_MGMT_PLUS_DATA): LOG.info('Discovery ip %(disc_ip)s is found' ' on mgmt+data subnet %(net_label)s', {'disc_ip': subnet['discovery_ip'], 'net_label': subnet['label']}) ret_discovery_ip = subnet['discovery_ip'] # If subnet is specified and found, use the subnet elif subnet_label == subnet['label']: LOG.info('Discovery ip %(disc_ip)s is used' ' on subnet %(net_label)s', {'disc_ip': subnet['discovery_ip'], 'net_label': subnet['label']}) return subnet['discovery_ip'] if ret_discovery_ip: LOG.info('Discovery ip %s is used on mgmt+data subnet', ret_discovery_ip) return ret_discovery_ip else: raise NimbleDriverException(_('No suitable discovery ip found')) @interface.volumedriver class NimbleFCDriver(NimbleBaseVolumeDriver, driver.FibreChannelDriver): """OpenStack driver to enable Nimble FC Driver Controller.""" def __init__(self, *args, **kwargs): super(NimbleFCDriver, self).__init__(*args, **kwargs) self._storage_protocol = constants.FC self._lookup_service = fczm_utils.create_lookup_service() def _get_provider_location(self, volume_name): """Get array info wwn details.""" netconfig = self.APIExecutor.get_netconfig('active') array_name = netconfig['group_leader_array'] provider_location = '%s' % (array_name) LOG.info('vol_name=%(name)s provider_location=%(loc)s', {'name': volume_name, 'loc': provider_location}) return provider_location def _build_initiator_target_map(self, target_wwns, connector): """Build the target_wwns and the initiator target map.""" LOG.debug("_build_initiator_target_map for %(wwns)s", {'wwns': target_wwns}) init_targ_map = {} if self._lookup_service: # use FC san lookup to determine which wwpns to use # for the new VLUN. dev_map = self._lookup_service.get_device_mapping_from_network( connector['wwpns'], target_wwns) map_fabric = dev_map LOG.info("dev_map =%(fabric)s", {'fabric': map_fabric}) for fabric_name in dev_map: fabric = dev_map[fabric_name] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) else: init_targ_map = dict.fromkeys(connector["wwpns"], target_wwns) return init_targ_map def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance.""" LOG.info('Entering initialize_connection volume=%(vol)s' ' connector=%(conn)s location=%(loc)s', {'vol': volume, 'conn': connector, 'loc': volume['provider_location']}) wwpns = [] initiator_name = connector['initiator'] for wwpn in connector['wwpns']: wwpns.append(wwpn) initiator_group_name = self._get_igroupname_for_initiator_fc(wwpns) if not initiator_group_name: initiator_group_name = self._create_igroup_for_initiator( initiator_name, wwpns) LOG.info('Initiator group name is %(grp)s for initiator ' '%(iname)s', {'grp': initiator_group_name, 'iname': initiator_name}) self.APIExecutor.add_acl(volume, initiator_group_name) lun = self.get_lun_number(volume, initiator_group_name) init_targ_map = {} (array_name) = volume['provider_location'].split() target_wwns = self.get_wwpns_from_array(array_name) init_targ_map = self._build_initiator_target_map(target_wwns, connector) data = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': lun, 'target_discovered': True, 'discard': True, 'target_wwn': target_wwns, 'initiator_target_map': init_targ_map}} LOG.info("Return FC data for zone addition: %(data)s.", {'data': data}) fczm_utils.add_fc_zone(data) return data def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" LOG.info('Entering terminate_connection volume=%(vol)s' ' connector=%(conn)s location=%(loc)s.', {'vol': volume, 'conn': connector, 'loc': volume['provider_location']}) wwpns = [] if connector is None: LOG.warning("Removing ALL host connections for volume %s", volume) self.APIExecutor.remove_all_acls(volume) return if self._is_multiattach(volume): return initiator_name = connector['initiator'] for wwpn in connector['wwpns']: wwpns.append(wwpn) (array_name) = volume['provider_location'].split() target_wwns = self.get_wwpns_from_array(array_name) init_targ_map = self._build_initiator_target_map(target_wwns, connector) initiator_group_name = self._get_igroupname_for_initiator_fc(wwpns) if not initiator_group_name: raise NimbleDriverException( _('No initiator group found for initiator %s') % initiator_name) LOG.debug("initiator_target_map %s", init_targ_map) self.APIExecutor.remove_acl(volume, initiator_group_name) eventlet.sleep(DEFAULT_SLEEP) # FIXME to check for other volumes attached to the host and then # return the data. Bug https://bugs.launchpad.net/cinder/+bug/1617472 data = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': target_wwns}} # FIXME: need to optionally add the initiator_target_map here when # there are no more volumes exported to the initiator / target pair # otherwise the zone will never get removed. fczm_utils.remove_fc_zone(data) return data def get_wwpns_from_array(self, array_name): """Retrieve the wwpns from the array""" LOG.debug("get_wwpns_from_array %s", array_name) target_wwpns = [] interface_info = self.APIExecutor.get_fc_interface_list(array_name) LOG.info("interface_info %(interface_info)s", {"interface_info": interface_info}) for wwpn_list in interface_info: wwpn = wwpn_list['wwpn'] wwpn = wwpn.replace(":", "") target_wwpns.append(wwpn) return target_wwpns def _connection_checker(func): """Decorator to re-establish and re-run the api if session has expired.""" @functools.wraps(func) def inner_connection_checker(self, *args, **kwargs): for attempts in range(2): try: return func(self, *args, **kwargs) except Exception as e: if attempts < 1 and (re.search("Failed to execute", str(e))): LOG.info('Session might have expired.' ' Trying to relogin') self.login() continue else: LOG.error('Re-throwing Exception %s', e) raise return inner_connection_checker class NimbleRestAPIExecutor(object): """Makes Nimble REST API calls.""" def __init__(self, api_version=NimbleDefaultVersion, *args, **kwargs): self.token_id = None self.ip = kwargs['ip'] self.username = kwargs['username'] self.password = kwargs['password'] self.verify = kwargs['verify'] self.api_version = api_version self.uri = "https://%(ip)s:5392/v%(version)s/" % { 'ip': self.ip, 'version': self.api_version} self.login() def login(self): data = {'data': {"username": self.username, "password": self.password, "app_name": "NimbleCinderDriver"}} r = requests.post(self.uri + "tokens", data=json.dumps(data), verify=self.verify) if r.status_code != 201 and r.status_code != 200: msg = _("Failed to login for user %s"), self.username raise NimbleAPIException(msg) self.token_id = r.json()['data']['session_token'] self.headers = {'X-Auth-Token': self.token_id} def get_group_id(self): api = 'groups' r = self.get(api) if not r.json()['data']: raise NimbleAPIException(_("Unable to retrieve Group Object for : " "%s") % self.ip) return r.json()['data'][0]['id'] def get_group_info(self): group_id = self.get_group_id() api = 'groups/' + str(group_id) r = self.get(api) if not r.json()['data']: raise NimbleAPIException(_("Unable to retrieve Group info for: %s") % group_id) return r.json()['data'] def get_folder_id(self, folder_name): api = 'folders' filter = {"name": folder_name} r = self.get_query(api, filter) if not r.json()['data']: raise NimbleAPIException(_("Unable to retrieve information for " "Folder: %s") % folder_name) return r.json()['data'][0]['id'] def get_performance_policy_id(self, perf_policy_name): api = 'performance_policies/' filter = {'name': perf_policy_name} LOG.debug("Performance policy Name %s", perf_policy_name) r = self.get_query(api, filter) if not r.json()['data']: raise NimbleAPIException(_("No performance policy found for: " "%(perf)s") % {'perf': perf_policy_name}) LOG.debug("Performance policy ID :%(perf)s", {'perf': r.json()['data'][0]['id']}) return r.json()['data'][0]['id'] def get_netconfig(self, role): api = "network_configs/detail" filter = {'role': role} r = self.get_query(api, filter) if not r.json()['data']: raise NimbleAPIException(_("No %s network config exists") % role) return r.json()['data'][0] def _get_volumetype_extraspecs(self, volume): specs = {} type_id = volume['volume_type_id'] if type_id is not None: specs = volume_types.get_volume_type_extra_specs(type_id) return specs def _get_extra_spec_values(self, extra_specs): """Nimble specific extra specs.""" perf_policy_name = extra_specs.get(EXTRA_SPEC_PERF_POLICY, DEFAULT_PERF_POLICY_SETTING) encryption = extra_specs.get(EXTRA_SPEC_ENCRYPTION, DEFAULT_ENCRYPTION_SETTING) iops_limit = extra_specs.get(EXTRA_SPEC_IOPS_LIMIT, DEFAULT_IOPS_LIMIT_SETTING) folder_name = extra_specs.get(EXTRA_SPEC_FOLDER, DEFAULT_FOLDER_SETTING) dedupe = extra_specs.get(EXTRA_SPEC_DEDUPE, DEFAULT_DEDUPE_SETTING) extra_specs_map = {} extra_specs_map[EXTRA_SPEC_PERF_POLICY] = perf_policy_name extra_specs_map[EXTRA_SPEC_ENCRYPTION] = encryption extra_specs_map[EXTRA_SPEC_IOPS_LIMIT] = iops_limit extra_specs_map[EXTRA_SPEC_DEDUPE] = dedupe extra_specs_map[EXTRA_SPEC_FOLDER] = folder_name return extra_specs_map def get_valid_nimble_extraspecs(self, extra_specs_map, vol_info): extra_specs_map_updated = self._get_extra_spec_values(extra_specs_map) data = {"data": {}} perf_policy_name = extra_specs_map_updated[EXTRA_SPEC_PERF_POLICY] perf_policy_id = self.get_performance_policy_id(perf_policy_name) data['perfpolicy_id'] = perf_policy_id encrypt = extra_specs_map_updated[EXTRA_SPEC_ENCRYPTION] cipher = DEFAULT_CIPHER if encrypt.lower() == 'yes': cipher = AES_256_XTS_CIPHER data['cipher'] = cipher if extra_specs_map.get('multiattach') == " True": data['multi_initiator'] = True else: data['multi_initiator'] = False folder_name = extra_specs_map_updated[EXTRA_SPEC_FOLDER] folder_id = None pool_id = vol_info['pool_id'] pool_name = vol_info['pool_name'] if folder_name is not None: # validate if folder exists in pool_name pool_info = self.get_pool_info(pool_id) if 'folder_list' in pool_info and (pool_info['folder_list'] is not None): for folder_list in pool_info['folder_list']: LOG.debug("folder_list : %s", folder_list) if folder_list['fqn'] == "/" + folder_name: LOG.debug("Folder %(folder)s present in pool " "%(pool)s", {'folder': folder_name, 'pool': pool_name}) folder_id = self.get_folder_id(folder_name) if folder_id is not None: data['data']["folder_id"] = folder_id if folder_id is None: raise NimbleAPIException(_("Folder '%(folder)s' not " "present in pool '%(" "pool)s'") % {'folder': folder_name, 'pool': pool_name}) else: raise NimbleAPIException(_( "Folder '%(folder)s' not present in pool '%(pool)s'") % {'folder': folder_name, 'pool': pool_name}) iops_limit = extra_specs_map_updated[EXTRA_SPEC_IOPS_LIMIT] if iops_limit is not None: if not iops_limit.isdigit() or ( int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS): raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]") % {'err': IOPS_ERR_MSG, 'min': MIN_IOPS, 'max': MAX_IOPS}) data['data']['limit_iops'] = iops_limit dedupe = extra_specs_map_updated[EXTRA_SPEC_DEDUPE] if dedupe.lower() == 'true': data['data']['dedupe_enabled'] = True return data def create_vol(self, volume, pool_name, reserve, protocol, is_gst_enabled): response = self._execute_create_vol(volume, pool_name, reserve, protocol, is_gst_enabled) LOG.info('Successfully created volume %(name)s', {'name': response['name']}) return response['name'] def _is_ascii(self, value): try: return all(ord(c) < 128 for c in value) except TypeError: return False def _execute_create_vol(self, volume, pool_name, reserve, protocol, is_gst_enabled): """Create volume :return: r['data'] """ # Set volume size, display name and description volume_size = volume['size'] * units.Ki reserve_size = 100 if reserve else 0 # Set volume description display_name = getattr(volume, 'display_name', '') display_description = getattr(volume, 'display_description', '') if self._is_ascii(display_name) and self._is_ascii( display_description): display_list = [getattr(volume, 'display_name', ''), getattr(volume, 'display_description', '')] description = ':'.join(filter(None, display_list)) elif self._is_ascii(display_name): description = display_name elif self._is_ascii(display_description): description = display_description else: description = "" # Limit description size to 254 characters description = description[:254] pool_id = self.get_pool_id(pool_name) specs = self._get_volumetype_extraspecs(volume) extra_specs_map = self._get_extra_spec_values(specs) perf_policy_name = extra_specs_map[EXTRA_SPEC_PERF_POLICY] perf_policy_id = self.get_performance_policy_id(perf_policy_name) encrypt = extra_specs_map[EXTRA_SPEC_ENCRYPTION] multi_initiator = volume.get('multiattach', False) folder_name = extra_specs_map[EXTRA_SPEC_FOLDER] iops_limit = extra_specs_map[EXTRA_SPEC_IOPS_LIMIT] dedupe = extra_specs_map[EXTRA_SPEC_DEDUPE] cipher = DEFAULT_CIPHER if encrypt.lower() == 'yes': cipher = AES_256_XTS_CIPHER if is_gst_enabled is True: agent_type = AGENT_TYPE_OPENSTACK_GST else: agent_type = AGENT_TYPE_OPENSTACK LOG.debug('Creating a new volume=%(vol)s size=%(size)s' ' reserve=%(reserve)s in pool=%(pool)s' ' description=%(description)s with Extra Specs' ' perfpol-name=%(perfpol-name)s' ' encryption=%(encryption)s cipher=%(cipher)s' ' agent-type=%(agent-type)s' ' multi-initiator=%(multi-initiator)s', {'vol': volume['name'], 'size': volume_size, 'reserve': reserve_size, 'pool': pool_name, 'description': description, 'perfpol-name': perf_policy_name, 'encryption': encrypt, 'cipher': cipher, 'agent-type': agent_type, 'multi-initiator': multi_initiator}) data = {"data": {'name': volume['name'], 'description': description, 'size': volume_size, 'reserve': reserve_size, 'warn_level': int(WARN_LEVEL), 'limit': 100, 'snap_limit': DEFAULT_SNAP_QUOTA, 'online': True, 'pool_id': pool_id, 'agent_type': agent_type, 'perfpolicy_id': perf_policy_id, 'encryption_cipher': cipher}} if protocol == constants.ISCSI: data['data']['multi_initiator'] = multi_initiator if dedupe.lower() == 'true': data['data']['dedupe_enabled'] = True folder_id = None if folder_name is not None: # validate if folder exists in pool_name pool_info = self.get_pool_info(pool_id) if 'folder_list' in pool_info and (pool_info['folder_list'] is not None): for folder_list in pool_info['folder_list']: LOG.debug("folder_list : %s", folder_list) if folder_list['fqn'] == "/" + folder_name: LOG.debug("Folder %(folder)s present in pool " "%(pool)s", {'folder': folder_name, 'pool': pool_name}) folder_id = self.get_folder_id(folder_name) if folder_id is not None: data['data']["folder_id"] = folder_id if folder_id is None: raise NimbleAPIException(_("Folder '%(folder)s' not " "present in pool '%(pool)s'") % {'folder': folder_name, 'pool': pool_name}) else: raise NimbleAPIException(_("Folder '%(folder)s' not present in" " pool '%(pool)s'") % {'folder': folder_name, 'pool': pool_name}) if iops_limit is not None: if not iops_limit.isdigit() or ( int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS): raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]") % {'err': IOPS_ERR_MSG, 'min': MIN_IOPS, 'max': MAX_IOPS}) data['data']['limit_iops'] = iops_limit LOG.debug("Volume metadata :%s", volume.metadata) for key, value in volume.metadata.items(): LOG.debug("Key %(key)s Value %(value)s", {'key': key, 'value': value}) if key == EXTRA_SPEC_IOPS_LIMIT and value.isdigit(): if type(value) is int or int(value) < MIN_IOPS or ( int(value) > MAX_IOPS): raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]") % {'err': IOPS_ERR_MSG, 'min': MIN_IOPS, 'max': MAX_IOPS}) LOG.debug("IOPS Limit %s", value) data['data']['limit_iops'] = value LOG.debug("Data : %s", data) api = 'volumes' r = self.post(api, data) return r['data'] def create_initiator_group(self, initiator_grp_name): api = "initiator_groups" data = {"data": {"name": initiator_grp_name, "access_protocol": "iscsi", }} r = self.post(api, data) return r['data'] def create_initiator_group_fc(self, initiator_grp_name): api = "initiator_groups" data = {} data["data"] = {} data["data"]["name"] = initiator_grp_name data["data"]["access_protocol"] = "fc" r = self.post(api, data) return r['data'] def get_initiator_grp_id(self, initiator_grp_name): api = "initiator_groups" filter = {'name': initiator_grp_name} r = self.get_query(api, filter) return r.json()['data'][0]['id'] def add_initiator_to_igroup(self, initiator_grp_name, initiator_name): initiator_group_id = self.get_initiator_grp_id(initiator_grp_name) api = "initiators" data = {"data": { "access_protocol": "iscsi", "initiator_group_id": initiator_group_id, "label": initiator_name, "iqn": initiator_name }} r = self.post(api, data) return r['data'] def add_initiator_to_igroup_fc(self, initiator_grp_name, wwpn): initiator_group_id = self.get_initiator_grp_id(initiator_grp_name) api = "initiators" data = {"data": { "access_protocol": "fc", "initiator_group_id": initiator_group_id, "wwpn": self._format_to_wwpn(wwpn) }} r = self.post(api, data) return r['data'] def get_pool_id(self, pool_name): api = "pools/" filter = {'name': pool_name} r = self.get_query(api, filter) if not r.json()['data']: raise NimbleAPIException(_("Unable to retrieve information for " "pool : %(pool)s") % {'pool': pool_name}) return r.json()['data'][0]['id'] def get_pool_info(self, pool_id): api = 'pools/' + str(pool_id) r = self.get(api) return r.json()['data'] def get_initiator_grp_list(self): api = "initiator_groups/detail" r = self.get(api) if 'data' not in r.json(): raise NimbleAPIException(_("Unable to retrieve initiator group " "list")) LOG.info('Successfully retrieved InitiatorGrpList') return r.json()['data'] def get_initiator_grp_id_by_name(self, initiator_group_name): api = 'initiator_groups' filter = {"name": initiator_group_name} r = self.get_query(api, filter) if not r.json()['data']: raise NimbleAPIException(_("Unable to retrieve information for " "initiator group : %s") % initiator_group_name) return r.json()['data'][0]['id'] def get_volume_id_by_name(self, name): api = "volumes" filter = {"name": name} r = self.get_query(api, filter) if not r.json()['data']: raise NimbleAPIException(_("Unable to retrieve information for " "volume: %s") % name) return r.json()['data'][0]['id'] def get_volume_name(self, volume_id): api = "volumes/" + str(volume_id) r = self.get(api) if not r.json()['data']: raise NimbleAPIException(_("Unable to retrieve information for " "volume: %s") % volume_id) return r.json()['data']['name'] def add_acl(self, volume, initiator_group_name): initiator_group_id = self.get_initiator_grp_id_by_name( initiator_group_name) volume_id = self.get_volume_id_by_name(volume['name']) data = {'data': {"apply_to": 'both', "initiator_group_id": initiator_group_id, "vol_id": volume_id }} api = 'access_control_records' try: self.post(api, data) except NimbleAPIException as ex: LOG.debug("add_acl_exception: %s", ex) if SM_OBJ_EXIST_MSG in str(ex): LOG.warning('Volume %(vol)s : %(state)s', {'vol': volume['name'], 'state': SM_OBJ_EXIST_MSG}) else: msg = (_("Add access control failed with error: %s") % str(ex)) raise NimbleAPIException(msg) def get_acl_record(self, volume_id, initiator_group_id): filter = {"vol_id": volume_id, "initiator_group_id": initiator_group_id} api = "access_control_records" r = self.get_query(api, filter) LOG.info("ACL record is %(result)s", {'result': r.json()}) if not r.json()['data']: LOG.warning('ACL is not available for this volume %(vol_id)s', { 'vol_id': volume_id}) return return r.json()['data'][0] def get_volume_acl_records(self, volume_id): api = "volumes/" + str(volume_id) r = self.get(api) if not r.json()['data']: raise NimbleAPIException(_("Unable to retrieve information for " "volume: %s") % volume_id) return r.json()['data']['access_control_records'] def remove_all_acls(self, volume): LOG.info("removing all access control list from volume=%(vol)s", {"vol": volume['name']}) volume_id = self.get_volume_id_by_name(volume['name']) acl_records = self.get_volume_acl_records(volume_id) if acl_records is not None: for acl_record in acl_records: LOG.info("removing acl=%(acl)s with igroup=%(igroup)s", {"acl": acl_record['id'], "igroup": acl_record['initiator_group_name']}) self.remove_acl(volume, acl_record['initiator_group_name']) def remove_acl(self, volume, initiator_group_name): LOG.info("removing ACL from volume=%(vol)s " "and %(igroup)s", {"vol": volume['name'], "igroup": initiator_group_name}) initiator_group_id = self.get_initiator_grp_id_by_name( initiator_group_name) volume_id = self.get_volume_id_by_name(volume['name']) try: acl_record = self.get_acl_record(volume_id, initiator_group_id) LOG.debug("ACL Record %(acl)s", {"acl": acl_record}) if acl_record is not None: acl_id = acl_record['id'] api = 'access_control_records/%s' % acl_id self.delete(api) except NimbleAPIException as ex: LOG.debug("remove_acl_exception: %s", ex) if SM_OBJ_ENOENT_MSG in str(ex): LOG.warning('Volume %(vol)s : %(state)s', {'vol': volume['name'], 'state': SM_OBJ_ENOENT_MSG}) else: msg = (_("Remove access control failed with error: %s") % str(ex)) raise NimbleAPIException(msg) def get_snap_info_by_id(self, snap_id, vol_id): filter = {"id": snap_id, "vol_id": vol_id} api = 'snapshots' r = self.get_query(api, filter) if not r.json()['data']: raise NimbleAPIException(_("Unable to retrieve snapshot info for " "snap_id: %(snap)s volume id: %(vol)s") % {'snap': snap_id, 'vol': vol_id}) LOG.debug("SnapInfo :%s", r.json()['data'][0]) return r.json()['data'][0] def get_snap_info(self, snap_name, vol_name): filter = {"name": snap_name, "vol_name": vol_name} api = 'snapshots' r = self.get_query(api, filter) if not r.json()['data']: raise NimbleAPIException(_("Snapshot: %(snap)s of Volume: %(vol)s " "doesn't exist") % {'snap': snap_name, 'vol': vol_name}) return r.json()['data'][0] def get_snap_info_detail(self, snap_id): api = 'snapshots/detail' filter = {'id': snap_id} r = self.get_query(api, filter) if not r.json()['data']: raise NimbleAPIException(_("Snapshot: %s doesn't exist") % snap_id) return r.json()['data'][0] def get_volcoll_id_by_name(self, volcoll_name): api = "volume_collections" filter = {"name": volcoll_name} r = self.get_query(api, filter) if not r.json()['data']: raise Exception("Unable to retrieve information for volcoll: {0}" .format(volcoll_name)) return r.json()['data'][0]['id'] def get_volcoll_details(self, volcoll_name): api = "volume_collections/detail" filter = {"name": volcoll_name} r = self.get_query(api, filter) if not r.json()['data']: raise Exception("Unable to retrieve information for volcoll: {0}" .format(volcoll_name)) return r.json()['data'][0] def get_snapcoll_id_by_name(self, snapcoll_name): api = "snapshot_collections" filter = {"name": snapcoll_name} r = self.get_query(api, filter) if not r.json()['data']: raise Exception("Unable to retrieve information for snapcoll: {0}" .format(snapcoll_name)) return r.json()['data'][0]['id'] def create_volcoll(self, volcoll_name, description=''): api = "volume_collections" data = {"data": {"name": volcoll_name, "description": description}} r = self.post(api, data) return r['data'] def delete_volcoll(self, volcoll_id): api = "volume_collections/" + str(volcoll_id) self.delete(api) def dissociate_volcoll(self, volume_id): api = "volumes/" + str(volume_id) data = {'data': {"volcoll_id": '' } } r = self.put(api, data) return r def associate_volcoll(self, volume_id, volcoll_id): api = "volumes/" + str(volume_id) data = {'data': {"volcoll_id": volcoll_id } } r = self.put(api, data) return r def snapcoll_create(self, snapcoll_name, volcoll_id): data = {'data': {"name": snapcoll_name, "volcoll_id": volcoll_id } } api = 'snapshot_collections' r = self.post(api, data) return r def snapcoll_delete(self, snapcoll_id): api = "snapshot_collections/" + str(snapcoll_id) self.delete(api) @utils.retry(NimbleAPIException, 2, 3) def online_vol(self, volume_name, online_flag): volume_id = self.get_volume_id_by_name(volume_name) LOG.debug("volume_id %s", str(volume_id)) eventlet.sleep(DEFAULT_SLEEP) api = "volumes/" + str(volume_id) data = {'data': {"online": online_flag, 'force': True}} try: LOG.debug("data :%s", data) self.put(api, data) LOG.debug("Volume %(vol)s is in requested online state :%(flag)s", {'vol': volume_name, 'flag': online_flag}) except Exception as ex: msg = (_("Error %s") % ex) LOG.debug("online_vol_exception: %s", msg) if msg.__contains__("Object is %s" % SM_STATE_MSG): LOG.warning('Volume %(vol)s : %(state)s', {'vol': volume_name, 'state': SM_STATE_MSG}) # TODO(rkumar): Check if we need to ignore the connected # initiator elif msg.__contains__("Initiators are connected to"): raise NimbleAPIException(msg) else: raise exception.InvalidVolume(reason=msg) def online_snap(self, volume_name, online_flag, snap_name): snap_info = self.get_snap_info(snap_name, volume_name) api = "snapshots/" + str(snap_info['id']) data = {'data': {"online": online_flag}} try: self.put(api, data) LOG.debug("Snapshot %(snap)s is in requested online state " ":%(flag)s", {'snap': snap_name, 'flag': online_flag}) except Exception as ex: LOG.debug("online_snap_exception: %s", ex) if str(ex).__contains__("Object %s" % SM_STATE_MSG): LOG.warning('Snapshot %(snap)s :%(state)s', {'snap': snap_name, 'state': SM_STATE_MSG}) else: raise @utils.retry(NimbleAPIException, 2, 3) def get_vol_info(self, volume_name): volume_id = self.get_volume_id_by_name(volume_name) api = 'volumes/' + str(volume_id) r = self.get(api) if not r.json()['data']: raise exception.VolumeNotFound(_("Volume: %s not found") % volume_name) return r.json()['data'] def delete_vol(self, volume_name): volume_id = self.get_volume_id_by_name(volume_name) api = "volumes/" + str(volume_id) self.delete(api) def snap_vol(self, snapshot): api = "snapshots" volume_name = snapshot['volume_name'] vol_id = self.get_volume_id_by_name(volume_name) snap_name = snapshot['name'] # Set snapshot description display_list = [ getattr(snapshot, 'display_name', snapshot['display_name']), getattr(snapshot, 'display_description', '')] snap_description = ':'.join(filter(None, display_list)) # Limit to 254 characters snap_description = snap_description[:254] data = {"data": {"name": snap_name, "description": snap_description, "vol_id": vol_id } } r = self.post(api, data) return r['data'] def clone_vol(self, volume, snapshot, reserve, is_gst_enabled, protocol, pool_name): api = "volumes" volume_name = snapshot['volume_name'] snap_name = snapshot['name'] snap_info = self.get_snap_info(snap_name, volume_name) clone_name = volume['name'] snap_size = snapshot['volume_size'] reserve_size = 100 if reserve else 0 specs = self._get_volumetype_extraspecs(volume) extra_specs_map = self._get_extra_spec_values(specs) perf_policy_name = extra_specs_map.get(EXTRA_SPEC_PERF_POLICY) perf_policy_id = self.get_performance_policy_id(perf_policy_name) encrypt = extra_specs_map.get(EXTRA_SPEC_ENCRYPTION) multi_initiator = volume.get('multiattach', False) iops_limit = extra_specs_map[EXTRA_SPEC_IOPS_LIMIT] folder_name = extra_specs_map[EXTRA_SPEC_FOLDER] pool_id = self.get_pool_id(pool_name) # default value of cipher for encryption cipher = DEFAULT_CIPHER if encrypt.lower() == 'yes': cipher = AES_256_XTS_CIPHER if is_gst_enabled is True: agent_type = AGENT_TYPE_OPENSTACK_GST else: agent_type = AGENT_TYPE_OPENSTACK LOG.info('Cloning volume from snapshot volume=%(vol)s ' 'snapshot=%(snap)s clone=%(clone)s snap_size=%(size)s ' 'reserve=%(reserve)s' 'agent-type=%(agent-type)s ' 'perfpol-name=%(perfpol-name)s ' 'encryption=%(encryption)s cipher=%(cipher)s ' 'multi-initiator=%(multi-initiator)s', {'vol': volume_name, 'snap': snap_name, 'clone': clone_name, 'size': snap_size, 'reserve': reserve_size, 'agent-type': agent_type, 'perfpol-name': perf_policy_name, 'encryption': encrypt, 'cipher': cipher, 'multi-initiator': multi_initiator}) data = {"data": {"name": clone_name, "clone": 'true', "base_snap_id": snap_info['id'], 'snap_limit': DEFAULT_SNAP_QUOTA, 'warn_level': int(WARN_LEVEL), 'limit': 100, "online": 'true', "reserve": reserve_size, "agent_type": agent_type, "perfpolicy_id": perf_policy_id, "encryption_cipher": cipher } } if protocol == constants.ISCSI: data['data']['multi_initiator'] = multi_initiator folder_id = None if folder_name is not None: # validate if folder exists in pool_name pool_info = self.get_pool_info(pool_id) if 'folder_list' in pool_info and (pool_info['folder_list'] is not None): for folder_list in pool_info['folder_list']: LOG.debug("folder_list : %s", folder_list) if folder_list['fqn'] == "/" + folder_name: LOG.debug("Folder %(folder)s present in pool " "%(pool)s", {'folder': folder_name, 'pool': pool_name}) folder_id = self.get_folder_id(folder_name) if folder_id is not None: data['data']["folder_id"] = folder_id if folder_id is None: raise NimbleAPIException(_("Folder '%(folder)s' not " "present in pool '%(pool)s'") % {'folder': folder_name, 'pool': pool_name}) else: raise NimbleAPIException(_("Folder '%(folder)s' not present in" " pool '%(pool)s'") % {'folder': folder_name, 'pool': pool_name}) if iops_limit is not None: if not iops_limit.isdigit() or ( int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS): raise NimbleAPIException(_("%(err)s [%(min)s, %(max)s]") % {'err': IOPS_ERR_MSG, 'min': MIN_IOPS, 'max': MAX_IOPS}) data['data']['limit_iops'] = iops_limit if iops_limit is not None: if not iops_limit.isdigit() or ( int(iops_limit) < MIN_IOPS) or (int(iops_limit) > MAX_IOPS): raise NimbleAPIException(_("Please set valid IOPS limit" " in the range [%(min)s, %(max)s]") % {'min': MIN_IOPS, 'max': MAX_IOPS}) data['data']['limit_iops'] = iops_limit LOG.debug("Volume metadata :%s", volume.metadata) for key, value in volume.metadata.items(): LOG.debug("Key %(key)s Value %(value)s", {'key': key, 'value': value}) if key == EXTRA_SPEC_IOPS_LIMIT and value.isdigit(): if type(value) is int or int(value) < MIN_IOPS or ( int(value) > MAX_IOPS): raise NimbleAPIException(_("Please enter valid IOPS " "limit in the range [" "%(min)s, %(max)s]") % {'min': MIN_IOPS, 'max': MAX_IOPS}) LOG.debug("IOPS Limit %s", value) data['data']['limit_iops'] = value r = self.post(api, data) return r['data'] def edit_vol(self, volume_name, data): vol_id = self.get_volume_id_by_name(volume_name) api = "volumes/" + str(vol_id) self.put(api, data) def delete_snap(self, volume_name, snap_name): snap_info = self.get_snap_info(snap_name, volume_name) api = "snapshots/" + str(snap_info['id']) try: self.delete(api) except NimbleAPIException as ex: LOG.debug("delete snapshot exception: %s", ex) if SM_OBJ_HAS_CLONE in str(ex): # if snap has a clone log the error and continue ahead LOG.warning('Snapshot %(snap)s : %(state)s', {'snap': snap_name, 'state': SM_OBJ_HAS_CLONE}) else: raise def volume_restore(self, volume_name, data): volume_id = self.get_volume_id_by_name(volume_name) api = 'volumes/%s/actions/restore' % volume_id self.post(api, data) @_connection_checker def get(self, api): return self.get_query(api, None) @_connection_checker def get_query(self, api, query): url = self.uri + api return requests.get(url, headers=self.headers, params=query, verify=self.verify) @_connection_checker def put(self, api, payload): url = self.uri + api r = requests.put(url, data=json.dumps(payload), headers=self.headers, verify=self.verify) if r.status_code != 201 and r.status_code != 200: base = "Failed to execute api %(api)s : Error Code :%(code)s" % { 'api': api, 'code': r.status_code} LOG.debug("Base error : %(base)s", {'base': base}) try: msg = _("%(base)s Message: %(msg)s") % { 'base': base, 'msg': r.json()['messages'][1]['text']} except IndexError: msg = _("%(base)s Message: %(msg)s") % { 'base': base, 'msg': str(r.json())} raise NimbleAPIException(msg) return r.json() @_connection_checker def post(self, api, payload): url = self.uri + api r = requests.post(url, data=json.dumps(payload), headers=self.headers, verify=self.verify) if r.status_code != 201 and r.status_code != 200: msg = _("Failed to execute api %(api)s : %(msg)s : %(code)s") % { 'api': api, 'msg': r.json()['messages'][1]['text'], 'code': r.status_code} raise NimbleAPIException(msg) return r.json() @_connection_checker def delete(self, api): url = self.uri + api r = requests.delete(url, headers=self.headers, verify=self.verify) if r.status_code != 201 and r.status_code != 200: base = "Failed to execute api %(api)s: Error Code: %(code)s" % { 'api': api, 'code': r.status_code} LOG.debug("Base error : %(base)s", {'base': base}) try: msg = _("%(base)s Message: %(msg)s") % { 'base': base, 'msg': r.json()['messages'][1]['text']} except IndexError: msg = _("%(base)s Message: %(msg)s") % { 'base': base, 'msg': str(r.json())} raise NimbleAPIException(msg) return r.json() def _format_to_wwpn(self, string_wwpn): return ':'.join(a + b for a, b in zip(* [iter(string_wwpn)] * 2)) def get_fc_interface_list(self, array_name): """getFibreChannelInterfaceList API to get FC interfaces on array.""" api = 'fibre_channel_interfaces/detail' filter = {'array_name_or_serial': array_name} r = self.get_query(api, filter) if not r.json()['data']: raise NimbleAPIException(_("No fc interfaces for array %s") % array_name) return r.json()['data'] def enable_group_scoped_target(self): group_id = self.get_group_id() api = "groups/" + str(group_id) data = {'data': {'group_target_enabled': True}} self.put(api, data) def set_schedule_for_volcoll(self, sched_name, volcoll_name, repl_partner, period=1, period_unit='days', num_retain=10, num_retain_replica=1, at_time=0, # 00:00 until_time=86340, # 23:59 days='all', replicate_every=1, alert_threshold='24:00'): volcoll_id = self.get_volcoll_id_by_name(volcoll_name) api = "protection_schedules" sched_details = {'name': sched_name, 'volcoll_or_prottmpl_type': "volume_collection", 'volcoll_or_prottmpl_id': volcoll_id, 'downstream_partner': repl_partner, 'period': period, 'period_unit': period_unit, 'num_retain': num_retain, 'num_retain_replica': num_retain_replica} if at_time != 0: sched_details['at_time'] = at_time if until_time != 86340: sched_details['until_time'] = until_time if days != 'all': sched_details['days'] = days if replicate_every != 1: sched_details['replicate_every'] = replicate_every if alert_threshold != '24:00': sched_details['alert_threshold'] = alert_threshold data = {'data': sched_details} r = self.post(api, data) return r['data'] def delete_schedule(self, sched_id): api = "protection_schedules/" + str(sched_id) self.delete(api) def claim_vol(self, volume_id, group_id): api = "volumes/" + str(volume_id) group_id = str(group_id) data = {'data': {"owned_by_group_id": group_id } } r = self.put(api, data) return r def get_partner_id_by_name(self, partner_name): api = "replication_partners" filter = {"name": partner_name} r = self.get_query(api, filter) if not r.json()['data']: raise Exception("Unable to retrieve information for partner: {0}" .format(partner_name)) return r.json()['data'][0]['id'] def handover(self, volcoll_id, partner_id): volcoll_id = str(volcoll_id) partner_id = str(partner_id) api = "volume_collections/" + volcoll_id + "/actions/handover" data = {'data': {"id": volcoll_id, "replication_partner_id": partner_id } } self.post(api, data) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3591208 cinder-27.0.0/cinder/volume/drivers/hpe/xp/0000775000175000017500000000000000000000000020523 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hpe/xp/hpe_xp_fc.py0000664000175000017500000000570400000000000023036 0ustar00zuulzuul00000000000000# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Fibre channel module for Hewlett Packard Enterprise Driver.""" from cinder import interface from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_fc from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_utils from cinder.volume.drivers.hpe.xp import hpe_xp_rest as rest from cinder.volume.drivers.hpe.xp import hpe_xp_utils as utils MSG = hbsd_utils.HBSDMsg _DRIVER_INFO = { 'version': utils.VERSION, 'proto': 'FC', 'hba_id': 'wwpns', 'hba_id_type': 'World Wide Name', 'msg_id': { 'target': MSG.CREATE_HOST_GROUP_FAILED, }, 'volume_backend_name': '%(prefix)sFC' % { 'prefix': utils.DRIVER_PREFIX, }, 'volume_type': 'fibre_channel', 'param_prefix': utils.PARAM_PREFIX, 'vendor_name': utils.VENDOR_NAME, 'driver_prefix': utils.DRIVER_PREFIX, 'driver_file_prefix': utils.DRIVER_FILE_PREFIX, 'target_prefix': utils.TARGET_PREFIX, 'hdp_vol_attr': utils.HDP_VOL_ATTR, 'hdt_vol_attr': utils.HDT_VOL_ATTR, 'nvol_ldev_type': utils.NVOL_LDEV_TYPE, 'target_iqn_suffix': utils.TARGET_IQN_SUFFIX, 'pair_attr': utils.PAIR_ATTR, } @interface.volumedriver class HPEXPFCDriver(hbsd_fc.HBSDFCDriver): """Fibre channel class for Hewlett Packard Enterprise Driver. Version history: .. code-block:: none 1.0.0 - Initial driver. """ VERSION = utils.VERSION # ThirdPartySystems wiki page CI_WIKI_NAME = utils.CI_WIKI_NAME def __init__(self, *args, **kwargs): """Initialize instance variables.""" super(HPEXPFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(rest.COMMON_VOLUME_OPTS) self.configuration.append_config_values(rest.FC_VOLUME_OPTS) def _init_common(self, conf, db): return rest.HPEXPRESTFC(conf, _DRIVER_INFO, db) @staticmethod def get_driver_options(): additional_opts = HPEXPFCDriver._get_oslo_driver_opts( *(hbsd_common._INHERITED_VOLUME_OPTS + hbsd_rest._REQUIRED_REST_OPTS + ['driver_ssl_cert_verify', 'driver_ssl_cert_path', 'san_api_port', ])) return (rest.COMMON_VOLUME_OPTS + rest.REST_VOLUME_OPTS + rest.FC_VOLUME_OPTS + additional_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hpe/xp/hpe_xp_iscsi.py0000664000175000017500000000555600000000000023565 0ustar00zuulzuul00000000000000# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """iSCSI channel module for Hewlett Packard Enterprise Driver.""" from cinder import interface from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_iscsi from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_utils from cinder.volume.drivers.hpe.xp import hpe_xp_rest as rest from cinder.volume.drivers.hpe.xp import hpe_xp_utils as utils MSG = hbsd_utils.HBSDMsg _DRIVER_INFO = { 'version': utils.VERSION, 'proto': 'iSCSI', 'hba_id': 'initiator', 'hba_id_type': 'iSCSI initiator IQN', 'msg_id': { 'target': MSG.CREATE_ISCSI_TARGET_FAILED, }, 'volume_backend_name': '%(prefix)siSCSI' % { 'prefix': utils.DRIVER_PREFIX, }, 'volume_type': 'iscsi', 'param_prefix': utils.PARAM_PREFIX, 'vendor_name': utils.VENDOR_NAME, 'driver_prefix': utils.DRIVER_PREFIX, 'driver_file_prefix': utils.DRIVER_FILE_PREFIX, 'target_prefix': utils.TARGET_PREFIX, 'hdp_vol_attr': utils.HDP_VOL_ATTR, 'hdt_vol_attr': utils.HDT_VOL_ATTR, 'nvol_ldev_type': utils.NVOL_LDEV_TYPE, 'target_iqn_suffix': utils.TARGET_IQN_SUFFIX, 'pair_attr': utils.PAIR_ATTR, } @interface.volumedriver class HPEXPISCSIDriver(hbsd_iscsi.HBSDISCSIDriver): """iSCSI class for Hewlett Packard Enterprise Driver. Version history: .. code-block:: none 1.0.0 - Initial driver. """ VERSION = utils.VERSION # ThirdPartySystems wiki page CI_WIKI_NAME = utils.CI_WIKI_NAME def __init__(self, *args, **kwargs): """Initialize instance variables.""" super(HPEXPISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(rest.COMMON_VOLUME_OPTS) def _init_common(self, conf, db): return rest.HPEXPRESTISCSI(conf, _DRIVER_INFO, db) @staticmethod def get_driver_options(): additional_opts = HPEXPISCSIDriver._get_oslo_driver_opts( *(hbsd_common._INHERITED_VOLUME_OPTS + hbsd_rest._REQUIRED_REST_OPTS + ['driver_ssl_cert_verify', 'driver_ssl_cert_path', 'san_api_port', ])) return (rest.COMMON_VOLUME_OPTS + rest.REST_VOLUME_OPTS + additional_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py0000664000175000017500000003455000000000000023424 0ustar00zuulzuul00000000000000# Copyright (C) 2022, 2023, Hewlett Packard Enterprise, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """REST interface for Hewlett Packard Enterprise Driver.""" from oslo_config import cfg from cinder.volume import configuration from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.hitachi import hbsd_rest_fc from cinder.volume.drivers.hitachi import hbsd_rest_iscsi COMMON_VOLUME_OPTS = [ cfg.StrOpt( 'hpexp_storage_id', default=None, help='Product number of the storage system.'), cfg.ListOpt( 'hpexp_pools', default=[], deprecated_name='hpexp_pool', help='Pool number[s] or pool name[s] of the THP pool.'), cfg.StrOpt( 'hpexp_snap_pool', default=None, help='Pool number or pool name of the snapshot pool.'), cfg.StrOpt( 'hpexp_ldev_range', default=None, help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that ' 'can be used by the driver. Values can be in decimal format ' '(e.g. 1000) or in colon-separated hexadecimal format ' '(e.g. 00:03:E8).'), cfg.ListOpt( 'hpexp_target_ports', default=[], help='IDs of the storage ports used to attach volumes to the ' 'controller node. To specify multiple ports, connect them by ' 'commas (e.g. CL1-A,CL2-A).'), cfg.ListOpt( 'hpexp_compute_target_ports', default=[], help='IDs of the storage ports used to attach volumes to compute ' 'nodes. To specify multiple ports, connect them by commas ' '(e.g. CL1-A,CL2-A).'), cfg.BoolOpt( 'hpexp_group_create', default=False, help='If True, the driver will create host groups or iSCSI targets on ' 'storage ports as needed.'), cfg.BoolOpt( 'hpexp_group_delete', default=False, help='If True, the driver will delete host groups or iSCSI targets on ' 'storage ports as needed.'), cfg.IntOpt( 'hpexp_copy_speed', default=3, min=1, max=15, help='Copy speed of storage system. 1 or 2 indicates ' 'low speed, 3 indicates middle speed, and a value between 4 and ' '15 indicates high speed.'), cfg.IntOpt( 'hpexp_copy_check_interval', default=3, min=1, max=600, help='Interval in seconds to check copy'), cfg.IntOpt( 'hpexp_async_copy_check_interval', default=10, min=1, max=600, help='Interval in seconds to check copy asynchronously'), ] REST_VOLUME_OPTS = [ cfg.BoolOpt( 'hpexp_rest_disable_io_wait', default=True, help='It may take some time to detach volume after I/O. ' 'This option will allow detaching volume to complete ' 'immediately.'), cfg.BoolOpt( 'hpexp_rest_tcp_keepalive', default=True, help='Enables or disables use of REST API tcp keepalive'), cfg.BoolOpt( 'hpexp_discard_zero_page', default=True, help='Enable or disable zero page reclamation in a THP V-VOL.'), cfg.IntOpt( 'hpexp_lun_timeout', default=hbsd_rest._LUN_TIMEOUT, help='Maximum wait time in seconds for adding a LUN to complete.'), cfg.IntOpt( 'hpexp_lun_retry_interval', default=hbsd_rest._LUN_RETRY_INTERVAL, help='Retry interval in seconds for REST API adding a LUN.'), cfg.IntOpt( 'hpexp_restore_timeout', default=hbsd_rest._RESTORE_TIMEOUT, help='Maximum wait time in seconds for the restore operation to ' 'complete.'), cfg.IntOpt( 'hpexp_state_transition_timeout', default=hbsd_rest._STATE_TRANSITION_TIMEOUT, help='Maximum wait time in seconds for a volume transition to ' 'complete.'), cfg.IntOpt( 'hpexp_lock_timeout', default=hbsd_rest_api._LOCK_TIMEOUT, help='Maximum wait time in seconds for storage to be unlocked.'), cfg.IntOpt( 'hpexp_rest_timeout', default=hbsd_rest_api._REST_TIMEOUT, help='Maximum wait time in seconds for REST API execution to ' 'complete.'), cfg.IntOpt( 'hpexp_extend_timeout', default=hbsd_rest_api._EXTEND_TIMEOUT, help='Maximum wait time in seconds for a volume extention to ' 'complete.'), cfg.IntOpt( 'hpexp_exec_retry_interval', default=hbsd_rest_api._EXEC_RETRY_INTERVAL, help='Retry interval in seconds for REST API execution.'), cfg.IntOpt( 'hpexp_rest_connect_timeout', default=hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT, help='Maximum wait time in seconds for REST API connection to ' 'complete.'), cfg.IntOpt( 'hpexp_rest_job_api_response_timeout', default=hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT, help='Maximum wait time in seconds for a response from REST API.'), cfg.IntOpt( 'hpexp_rest_get_api_response_timeout', default=hbsd_rest_api._GET_API_RESPONSE_TIMEOUT, help='Maximum wait time in seconds for a response against GET method ' 'of REST API.'), cfg.IntOpt( 'hpexp_rest_server_busy_timeout', default=hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT, help='Maximum wait time in seconds when REST API returns busy.'), cfg.IntOpt( 'hpexp_rest_keep_session_loop_interval', default=hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL, help='Loop interval in seconds for keeping REST API session.'), cfg.IntOpt( 'hpexp_rest_another_ldev_mapped_retry_timeout', default=hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT, help='Retry time in seconds when new LUN allocation request fails.'), cfg.IntOpt( 'hpexp_rest_tcp_keepidle', default=hbsd_rest_api._TCP_KEEPIDLE, help='Wait time in seconds for sending a first TCP keepalive packet.'), cfg.IntOpt( 'hpexp_rest_tcp_keepintvl', default=hbsd_rest_api._TCP_KEEPINTVL, help='Interval of transmissions in seconds for TCP keepalive packet.'), cfg.IntOpt( 'hpexp_rest_tcp_keepcnt', default=hbsd_rest_api._TCP_KEEPCNT, help='Maximum number of transmissions for TCP keepalive packet.'), cfg.ListOpt( 'hpexp_host_mode_options', default=[], help='Host mode option for host group or iSCSI target.'), ] FC_VOLUME_OPTS = [ cfg.BoolOpt( 'hpexp_zoning_request', default=False, help='If True, the driver will configure FC zoning between the server ' 'and the storage system provided that FC zoning manager is ' 'enabled.'), ] CONF = cfg.CONF CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(REST_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(FC_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) class HPEXPRESTFC(hbsd_rest_fc.HBSDRESTFC): """REST interface fibre channel class for Hewlett Packard Enterprise Driver. """ def __init__(self, conf, storage_protocol, db): """Initialize instance variables.""" conf.append_config_values(COMMON_VOLUME_OPTS) conf.append_config_values(REST_VOLUME_OPTS) conf.append_config_values(FC_VOLUME_OPTS) super(HPEXPRESTFC, self).__init__(conf, storage_protocol, db) self._update_conf() def _update_conf(self): """Update configuration""" # COMMON_VOLUME_OPTS self.conf.hitachi_storage_id = self.conf.hpexp_storage_id self.conf.hitachi_pools = self.conf.hpexp_pools self.conf.hitachi_snap_pool = self.conf.hpexp_snap_pool self.conf.hitachi_ldev_range = self.conf.hpexp_ldev_range self.conf.hitachi_target_ports = self.conf.hpexp_target_ports self.conf.hitachi_compute_target_ports = ( self.conf.hpexp_compute_target_ports) self.conf.hitachi_group_create = self.conf.hpexp_group_create self.conf.hitachi_group_delete = self.conf.hpexp_group_delete self.conf.hitachi_copy_speed = self.conf.hpexp_copy_speed self.conf.hitachi_copy_check_interval = ( self.conf.hpexp_copy_check_interval) self.conf.hitachi_async_copy_check_interval = ( self.conf.hpexp_async_copy_check_interval) # REST_VOLUME_OPTS self.conf.hitachi_rest_disable_io_wait = ( self.conf.hpexp_rest_disable_io_wait) self.conf.hitachi_rest_tcp_keepalive = ( self.conf.hpexp_rest_tcp_keepalive) self.conf.hitachi_discard_zero_page = ( self.conf.hpexp_discard_zero_page) self.conf.hitachi_lun_timeout = self.conf.hpexp_lun_timeout self.conf.hitachi_lun_retry_interval = ( self.conf.hpexp_lun_retry_interval) self.conf.hitachi_restore_timeout = self.conf.hpexp_restore_timeout self.conf.hitachi_state_transition_timeout = ( self.conf.hpexp_state_transition_timeout) self.conf.hitachi_lock_timeout = self.conf.hpexp_lock_timeout self.conf.hitachi_rest_timeout = self.conf.hpexp_rest_timeout self.conf.hitachi_extend_timeout = self.conf.hpexp_extend_timeout self.conf.hitachi_exec_retry_interval = ( self.conf.hpexp_exec_retry_interval) self.conf.hitachi_rest_connect_timeout = ( self.conf.hpexp_rest_connect_timeout) self.conf.hitachi_rest_job_api_response_timeout = ( self.conf.hpexp_rest_job_api_response_timeout) self.conf.hitachi_rest_get_api_response_timeout = ( self.conf.hpexp_rest_get_api_response_timeout) self.conf.hitachi_rest_server_busy_timeout = ( self.conf.hpexp_rest_server_busy_timeout) self.conf.hitachi_rest_keep_session_loop_interval = ( self.conf.hpexp_rest_keep_session_loop_interval) self.conf.hitachi_rest_another_ldev_mapped_retry_timeout = ( self.conf.hpexp_rest_another_ldev_mapped_retry_timeout) self.conf.hitachi_rest_tcp_keepidle = ( self.conf.hpexp_rest_tcp_keepidle) self.conf.hitachi_rest_tcp_keepintvl = ( self.conf.hpexp_rest_tcp_keepintvl) self.conf.hitachi_rest_tcp_keepcnt = ( self.conf.hpexp_rest_tcp_keepcnt) self.conf.hitachi_host_mode_options = ( self.conf.hpexp_host_mode_options) # FC_VOLUME_OPTS self.conf.hitachi_zoning_request = self.conf.hpexp_zoning_request class HPEXPRESTISCSI(hbsd_rest_iscsi.HBSDRESTISCSI): """REST interface iSCSI class for Hewlett Packard Enterprise Driver.""" def __init__(self, conf, storage_protocol, db): """Initialize instance variables.""" conf.append_config_values(COMMON_VOLUME_OPTS) conf.append_config_values(REST_VOLUME_OPTS) super(HPEXPRESTISCSI, self).__init__(conf, storage_protocol, db) self._update_conf() def _update_conf(self): """Update configuration""" # COMMON_VOLUME_OPTS self.conf.hitachi_storage_id = self.conf.hpexp_storage_id self.conf.hitachi_pools = self.conf.hpexp_pools self.conf.hitachi_snap_pool = self.conf.hpexp_snap_pool self.conf.hitachi_ldev_range = self.conf.hpexp_ldev_range self.conf.hitachi_target_ports = self.conf.hpexp_target_ports self.conf.hitachi_compute_target_ports = ( self.conf.hpexp_compute_target_ports) self.conf.hitachi_group_create = self.conf.hpexp_group_create self.conf.hitachi_group_delete = self.conf.hpexp_group_delete self.conf.hitachi_copy_speed = self.conf.hpexp_copy_speed self.conf.hitachi_copy_check_interval = ( self.conf.hpexp_copy_check_interval) self.conf.hitachi_async_copy_check_interval = ( self.conf.hpexp_async_copy_check_interval) # REST_VOLUME_OPTS self.conf.hitachi_rest_disable_io_wait = ( self.conf.hpexp_rest_disable_io_wait) self.conf.hitachi_rest_tcp_keepalive = ( self.conf.hpexp_rest_tcp_keepalive) self.conf.hitachi_discard_zero_page = ( self.conf.hpexp_discard_zero_page) self.conf.hitachi_lun_timeout = self.conf.hpexp_lun_timeout self.conf.hitachi_lun_retry_interval = ( self.conf.hpexp_lun_retry_interval) self.conf.hitachi_restore_timeout = self.conf.hpexp_restore_timeout self.conf.hitachi_state_transition_timeout = ( self.conf.hpexp_state_transition_timeout) self.conf.hitachi_lock_timeout = self.conf.hpexp_lock_timeout self.conf.hitachi_rest_timeout = self.conf.hpexp_rest_timeout self.conf.hitachi_extend_timeout = self.conf.hpexp_extend_timeout self.conf.hitachi_exec_retry_interval = ( self.conf.hpexp_exec_retry_interval) self.conf.hitachi_rest_connect_timeout = ( self.conf.hpexp_rest_connect_timeout) self.conf.hitachi_rest_job_api_response_timeout = ( self.conf.hpexp_rest_job_api_response_timeout) self.conf.hitachi_rest_get_api_response_timeout = ( self.conf.hpexp_rest_get_api_response_timeout) self.conf.hitachi_rest_server_busy_timeout = ( self.conf.hpexp_rest_server_busy_timeout) self.conf.hitachi_rest_keep_session_loop_interval = ( self.conf.hpexp_rest_keep_session_loop_interval) self.conf.hitachi_rest_another_ldev_mapped_retry_timeout = ( self.conf.hpexp_rest_another_ldev_mapped_retry_timeout) self.conf.hitachi_rest_tcp_keepidle = ( self.conf.hpexp_rest_tcp_keepidle) self.conf.hitachi_rest_tcp_keepintvl = ( self.conf.hpexp_rest_tcp_keepintvl) self.conf.hitachi_rest_tcp_keepcnt = ( self.conf.hpexp_rest_tcp_keepcnt) self.conf.hitachi_host_mode_options = ( self.conf.hpexp_host_mode_options) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/hpe/xp/hpe_xp_utils.py0000664000175000017500000000176500000000000023611 0ustar00zuulzuul00000000000000# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Utility module for Hewlett Packard Enterprise Driver.""" VERSION = '1.0.0' CI_WIKI_NAME = 'HPE_XP_Storage_CI' PARAM_PREFIX = 'hpexp' VENDOR_NAME = 'Hewlett Packard Enterprise' DRIVER_PREFIX = 'HPEXP' DRIVER_FILE_PREFIX = 'hpe_xp' TARGET_PREFIX = 'HPEXP-' HDP_VOL_ATTR = 'THP' HDT_VOL_ATTR = 'ST' NVOL_LDEV_TYPE = 'THP V-VOL' TARGET_IQN_SUFFIX = '.hpexp-target' PAIR_ATTR = 'FS' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3591208 cinder-27.0.0/cinder/volume/drivers/huawei/0000775000175000017500000000000000000000000020602 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/__init__.py0000664000175000017500000000000000000000000022701 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/common.py0000664000175000017500000023346500000000000022461 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import math import re import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import huawei_conf from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import hypermetro from cinder.volume.drivers.huawei import replication from cinder.volume.drivers.huawei import rest_client from cinder.volume.drivers.huawei import smartx from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) huawei_opts = [ cfg.StrOpt('cinder_huawei_conf_file', default='/etc/cinder/cinder_huawei_conf.xml', help='The configuration file for the Cinder Huawei driver.'), cfg.StrOpt('hypermetro_devices', default=None, help='The remote device hypermetro will use.'), cfg.StrOpt('metro_san_user', default=None, help='The remote metro device san user.'), cfg.StrOpt('metro_san_password', default=None, secret=True, help='The remote metro device san password.'), cfg.StrOpt('metro_domain_name', default=None, help='The remote metro device domain name.'), cfg.StrOpt('metro_san_address', default=None, help='The remote metro device request url.'), cfg.StrOpt('metro_storage_pools', default=None, help='The remote metro device pool names.'), ] CONF = cfg.CONF CONF.register_opts(huawei_opts, group=configuration.SHARED_CONF_GROUP) snap_attrs = ('id', 'volume_id', 'volume', 'provider_location', 'volume_size') Snapshot = collections.namedtuple('Snapshot', snap_attrs) vol_attrs = ('id', 'lun_type', 'provider_location', 'metadata') Volume = collections.namedtuple('Volume', vol_attrs) class HuaweiBaseDriver(driver.VolumeDriver): # ThirdPartySytems wiki page CI_WIKI_NAME = "Huawei_volume_CI" def __init__(self, *args, **kwargs): super(HuaweiBaseDriver, self).__init__(*args, **kwargs) if not self.configuration: msg = _('Configuration is not found.') raise exception.InvalidInput(reason=msg) self.active_backend_id = kwargs.get('active_backend_id') self.configuration.append_config_values(huawei_opts) self.huawei_conf = huawei_conf.HuaweiConf(self.configuration) self.support_func = None self.metro_flag = False self.replica = None self.is_dorado_v6 = False @staticmethod def get_driver_options(): return huawei_opts def check_func_support(self, obj_name): try: self.client._get_object_count(obj_name) return True except Exception: return False def get_local_and_remote_dev_conf(self): self.loc_dev_conf = { 'san_address': self.configuration.san_address, 'san_user': self.configuration.san_user, 'san_password': self.configuration.san_password, 'storage_pools': self.configuration.storage_pools, 'iscsi_info': self.configuration.iscsi_info, } # Now just support one replication device. self.replica_dev_conf = self.configuration.replication def get_local_and_remote_client_conf(self): if self.active_backend_id: return self.replica_dev_conf, self.loc_dev_conf else: return self.loc_dev_conf, self.replica_dev_conf def do_setup(self, context): """Instantiate common class and login storage system.""" # Set huawei private configuration into Configuration object. self.huawei_conf.update_config_value() self.get_local_and_remote_dev_conf() client_conf, replica_client_conf = ( self.get_local_and_remote_client_conf()) # init local client if not client_conf: msg = _('Get active client failed.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self.client = rest_client.RestClient(self.configuration, **client_conf) self.client.login() self.is_dorado_v6 = huawei_utils.is_support_clone_pair(self.client) # init remote client metro_san_address = self.configuration.safe_get("metro_san_address") metro_san_user = self.configuration.safe_get("metro_san_user") metro_san_password = self.configuration.safe_get("metro_san_password") if metro_san_address and metro_san_user and metro_san_password: metro_san_address = metro_san_address.split(";") self.rmt_client = rest_client.RestClient(self.configuration, metro_san_address, metro_san_user, metro_san_password) self.rmt_client.login() self.metro_flag = True else: self.metro_flag = False LOG.warning("Remote device not configured in cinder.conf") # init replication manager if replica_client_conf: self.replica_client = rest_client.RestClient(self.configuration, **replica_client_conf) self.replica_client.try_login() self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) def check_for_setup_error(self): pass def _get_volume_stats(self, refresh=False): """Get volume status and reload huawei config file.""" self.huawei_conf.update_config_value() stats = self.client.update_volume_stats() stats = self.update_support_capability(stats) backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = backend_name or self.__class__.__name__ stats['vendor_name'] = 'Huawei' if self.replica: stats = self.replica.update_replica_capability(stats) targets = [self.replica_dev_conf['backend_id']] stats['replication_targets'] = targets stats['replication_enabled'] = True return stats def update_support_capability(self, stats): for pool in stats['pools']: pool['smartpartition'] = ( self.check_func_support("SMARTCACHEPARTITION")) pool['smartcache'] = self.check_func_support("smartcachepool") pool['QoS_support'] = self.check_func_support("ioclass") pool['splitmirror'] = self.check_func_support("splitmirror") pool['luncopy'] = self.check_func_support("luncopy") pool['thick_provisioning_support'] = True pool['thin_provisioning_support'] = True pool['smarttier'] = True pool['consistencygroup_support'] = True pool['consistent_group_snapshot_enabled'] = True if self.configuration.san_product == "Dorado": pool['smarttier'] = False pool['thick_provisioning_support'] = False if self.metro_flag: pool['hypermetro'] = self.check_func_support("HyperMetroPair") # assign the support function to global parameter. self.support_func = pool return stats def _get_volume_type(self, volume): volume_type = None type_id = volume.volume_type_id if type_id: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) return volume_type def _get_lun_params(self, volume, opts, src_size=None): pool_name = volume_utils.extract_host(volume.host, level='pool') params = { 'TYPE': '11', 'NAME': huawei_utils.encode_name(volume.id), 'PARENTTYPE': '216', 'PARENTID': self.client.get_pool_id(pool_name), 'DESCRIPTION': volume.name, 'ALLOCTYPE': opts.get('LUNType', self.configuration.lun_type), 'CAPACITY': int(int(src_size) * constants.CAPACITY_UNIT if src_size else int(volume.size) * constants.CAPACITY_UNIT), 'READCACHEPOLICY': self.configuration.lun_read_cache_policy, 'WRITECACHEPOLICY': self.configuration.lun_write_cache_policy, } if hasattr(self.configuration, 'write_type'): params['WRITEPOLICY'] = self.configuration.write_type if hasattr(self.configuration, 'prefetch_type'): params['PREFETCHPOLICY'] = self.configuration.prefetch_type if hasattr(self.configuration, 'prefetch_value'): params['PREFETCHVALUE'] = self.configuration.prefetch_value if opts.get('policy'): params['DATATRANSFERPOLICY'] = opts['policy'] LOG.info('volume: %(volume)s, lun params: %(params)s.', {'volume': volume.id, 'params': params}) return params def _create_volume(self, lun_params): # Create LUN on the array. lun_info = self.client.create_lun(lun_params) metadata = {'huawei_lun_id': lun_info['ID'], 'huawei_lun_wwn': lun_info['WWN']} model_update = {'metadata': metadata} return lun_info, model_update def _create_base_type_volume(self, opts, volume, src_size=None): """Create volume and add some base type. Base type is the service type which doesn't conflict with the other. """ if self.is_dorado_v6: lun_params = self._get_lun_params(volume, opts, src_size) else: lun_params = self._get_lun_params(volume, opts) lun_info, model_update = self._create_volume(lun_params) lun_id = lun_info['ID'] try: if opts.get('qos'): smartqos = smartx.SmartQos(self.client) smartqos.add(opts['qos'], lun_id) if opts.get('smartpartition'): smartpartition = smartx.SmartPartition(self.client) smartpartition.add(opts['partitionname'], lun_id) if opts.get('smartcache'): smartcache = smartx.SmartCache(self.client) smartcache.add(opts['cachename'], lun_id) except Exception as err: self._delete_lun_with_check(lun_id) msg = _('Create volume error. Because %s.') % str(err) raise exception.VolumeBackendAPIException(data=msg) return lun_params, lun_info, model_update def _add_extend_type_to_volume(self, opts, lun_params, lun_info, model_update): """Add the extend type. Extend type is the service type which may conflict with the other. So add it after those services. """ lun_id = lun_info['ID'] if opts.get('hypermetro'): metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) try: metro_info = metro.create_hypermetro(lun_id, lun_params) model_update['metadata'].update(metro_info) except exception.VolumeBackendAPIException as err: LOG.error('Create hypermetro error: %s.', err) self._delete_lun_with_check(lun_id) raise if opts.get('replication_enabled'): replica_model = opts.get('replication_type') try: replica_info = self.replica.create_replica(lun_info, replica_model) model_update.update(replica_info) except Exception: LOG.exception('Create replication volume error.') self._delete_lun_with_check(lun_id) raise return model_update def create_volume(self, volume): """Create a volume.""" opts = huawei_utils.get_volume_params(volume) if opts.get('hypermetro') and opts.get('replication_enabled'): err_msg = _("Hypermetro and Replication can not be " "used in the same volume_type.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) lun_params, lun_info, model_update = self._create_base_type_volume( opts, volume) model_update = self._add_extend_type_to_volume(opts, lun_params, lun_info, model_update) model_update['provider_location'] = huawei_utils.to_string( **model_update.pop('metadata')) return model_update def _delete_volume(self, volume): lun_info = huawei_utils.get_lun_info(self.client, volume) if not lun_info: return lun_id = lun_info['ID'] lun_group_ids = self.client.get_lungroupids_by_lunid(lun_id) if lun_group_ids and len(lun_group_ids) == 1: self.client.remove_lun_from_lungroup(lun_group_ids[0], lun_id) self.client.delete_lun(lun_id) def delete_volume(self, volume): """Delete a volume. Three steps: Firstly, remove associate from lungroup. Secondly, remove associate from QoS policy. Thirdly, remove the lun. """ lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_WARN) if not lun_id: return if self.support_func.get('QoS_support'): qos_id = self.client.get_qosid_by_lunid(lun_id) if qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(qos_id, lun_id) metadata = huawei_utils.get_volume_private_data(volume) if metadata.get('hypermetro_id'): metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) try: metro.delete_hypermetro(volume) except exception.VolumeBackendAPIException as err: LOG.error('Delete hypermetro error: %s.', err) # We have checked the LUN WWN above, # no need to check again here. self._delete_volume(volume) raise # Delete a replication volume replica_data = volume.replication_driver_data if replica_data: try: self.replica.delete_replica(volume) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception("Delete replication error.") self._delete_volume(volume) self._delete_volume(volume) def _delete_lun_with_check(self, lun_id, lun_wwn=None): if not lun_id: return if self.client.check_lun_exist(lun_id, lun_wwn): if self.support_func.get('QoS_support'): qos_id = self.client.get_qosid_by_lunid(lun_id) if qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(qos_id, lun_id) self.client.delete_lun(lun_id) def _is_lun_migration_complete(self, src_id, dst_id): result = self.client.get_lun_migration_task() found_migration_task = False if 'data' not in result: return False for item in result['data']: if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): found_migration_task = True if constants.MIGRATION_COMPLETE == item['RUNNINGSTATUS']: return True if constants.MIGRATION_FAULT == item['RUNNINGSTATUS']: msg = _("Lun migration error.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not found_migration_task: err_msg = _("Cannot find migration task.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) return False def _is_lun_migration_exist(self, src_id, dst_id): try: result = self.client.get_lun_migration_task() except Exception: LOG.error("Get LUN migration error.") return False if 'data' in result: for item in result['data']: if (src_id == item['PARENTID'] and dst_id == item['TARGETLUNID']): return True return False def _migrate_lun(self, src_id, dst_id): try: self.client.create_lun_migration(src_id, dst_id) def _is_lun_migration_complete(): return self._is_lun_migration_complete(src_id, dst_id) wait_interval = constants.MIGRATION_WAIT_INTERVAL huawei_utils.wait_for_condition(_is_lun_migration_complete, wait_interval, self.configuration.lun_timeout) # Clean up if migration failed. except Exception as ex: raise exception.VolumeBackendAPIException(data=ex) finally: if self._is_lun_migration_exist(src_id, dst_id): self.client.delete_lun_migration(src_id, dst_id) self._delete_lun_with_check(dst_id) LOG.debug("Migrate lun %s successfully.", src_id) return True def _wait_volume_ready(self, lun_id): wait_interval = self.configuration.lun_ready_wait_interval def _volume_ready(): result = self.client.get_lun_info(lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(_volume_ready, wait_interval, wait_interval * 10) def _get_original_status(self, volume): return 'in-use' if volume.volume_attachment else 'available' def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status=None): orig_lun_name = huawei_utils.encode_name(volume.id) new_lun_info = huawei_utils.get_lun_info( self.client, new_volume) if not new_lun_info: msg = _("Volume %s doesn't exist.") % volume.id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) new_lun_id = new_lun_info['ID'] new_metadata = huawei_utils.get_volume_private_data(new_volume) model_update = { 'provider_location': huawei_utils.to_string(**new_metadata), } try: self.client.rename_lun(new_lun_id, orig_lun_name) except exception.VolumeBackendAPIException: LOG.error('Unable to rename lun %s on array.', new_lun_id) model_update['_name_id'] = new_volume.name_id else: LOG.debug("Renamed lun %(id)s to %(name)s successfully.", {'id': new_lun_id, 'name': orig_lun_name}) model_update['_name_id'] = None return model_update def migrate_volume(self, ctxt, volume, host): """Migrate a volume within the same array.""" self._check_volume_exist_on_array(volume, constants.VOLUME_NOT_EXISTS_RAISE) # NOTE(jlc): Replication volume can't migrate. But retype # can remove replication relationship first then do migrate. # So don't add this judgement into _check_migration_valid(). opts = huawei_utils.get_volume_params(volume) if opts.get('replication_enabled'): return (False, None) return self._migrate_volume(volume, host) def _check_migration_valid(self, host, volume): if 'pool_name' not in host['capabilities']: return False target_device = host['capabilities']['location_info'] # Source and destination should be on same array. if target_device != self.client.device_id: return False # Same protocol should be used if volume is in-use. protocol = self.configuration.san_protocol if (host['capabilities']['storage_protocol'] != protocol and self._get_original_status(volume) == 'in-use'): return False pool_name = host['capabilities']['pool_name'] if len(pool_name) == 0: return False return True def _migrate_volume(self, volume, host, new_type=None): if not self._check_migration_valid(host, volume): return (False, None) pool_name = host['capabilities']['pool_name'] pools = self.client.get_all_pools() pool_info = self.client.get_pool_info(pool_name, pools) dst_volume_name = str(uuid.uuid4()) lun_info = huawei_utils.get_lun_info(self.client, volume) if not lun_info: msg = _("Volume %s doesn't exist.") % volume.id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) src_id = lun_info['ID'] if new_type: # If new type exists, use new type. opts = huawei_utils.get_volume_type_params(new_type) else: opts = huawei_utils.get_volume_params(volume) if opts['policy']: policy = opts['policy'] else: policy = lun_info.get('DATATRANSFERPOLICY', self.configuration.lun_policy) lun_params = { 'NAME': huawei_utils.encode_name(dst_volume_name), 'PARENTID': pool_info['ID'], 'DESCRIPTION': lun_info['DESCRIPTION'], 'ALLOCTYPE': opts.get('LUNType', lun_info['ALLOCTYPE']), 'CAPACITY': lun_info['CAPACITY'], 'WRITEPOLICY': lun_info['WRITEPOLICY'], 'PREFETCHPOLICY': lun_info['PREFETCHPOLICY'], 'PREFETCHVALUE': lun_info['PREFETCHVALUE'], 'DATATRANSFERPOLICY': policy, 'READCACHEPOLICY': lun_info.get( 'READCACHEPOLICY', self.configuration.lun_read_cache_policy), 'WRITECACHEPOLICY': lun_info.get( 'WRITECACHEPOLICY', self.configuration.lun_write_cache_policy), 'OWNINGCONTROLLER': lun_info['OWNINGCONTROLLER'], } for item in lun_params: if lun_params.get(item) == '--': del lun_params[item] lun_info = self.client.create_lun(lun_params) lun_id = lun_info['ID'] if opts.get('qos'): SmartQos = smartx.SmartQos(self.client) SmartQos.add(opts['qos'], lun_id) if opts.get('smartpartition'): smartpartition = smartx.SmartPartition(self.client) smartpartition.add(opts['partitionname'], lun_id) if opts.get('smartcache'): smartcache = smartx.SmartCache(self.client) smartcache.add(opts['cachename'], lun_id) dst_id = lun_info['ID'] self._wait_volume_ready(dst_id) moved = self._migrate_lun(src_id, dst_id) return moved, {} def _create_volume_wait_ready(self, opts, volume, snapshot_id, src_size=None): lun_params, lun_info, model_update = \ self._create_base_type_volume(opts, volume, src_size) tgt_lun_id = lun_info['ID'] luncopy_name = huawei_utils.encode_name(volume.id) LOG.info( 'create_volume_from_snapshot: src_lun_id: %(src_lun_id)s, ' 'tgt_lun_id: %(tgt_lun_id)s, copy_name: %(copy_name)s.', {'src_lun_id': snapshot_id, 'tgt_lun_id': tgt_lun_id, 'copy_name': luncopy_name}) wait_interval = self.configuration.lun_ready_wait_interval def _volume_ready(): result = self.client.get_lun_info(tgt_lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(_volume_ready, wait_interval, wait_interval * 10) return lun_params, lun_info, model_update def _create_clone_pair(self, source_id, target_id, clone_speed): clone_pair_id = self.client.create_clone_pair( source_id, target_id, clone_speed) def _pair_sync_completed(): clone_pair_info = self.client.get_clone_pair_info(clone_pair_id) if clone_pair_info['copyStatus'] != constants.CLONE_STATUS_HEALTH: msg = _("ClonePair %s is abnormal.") % clone_pair_id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return (clone_pair_info['syncStatus'] in constants.CLONE_STATUS_COMPLETE) self.client.sync_clone_pair(clone_pair_id) huawei_utils.wait_for_condition( _pair_sync_completed, self.configuration.lun_copy_wait_interval, self.configuration.lun_timeout) self.client.delete_clone_pair(clone_pair_id) def _create_volume_from_snapshot(self, volume, snapshot, opts, clone_pair_flag=None): snapshot_info = huawei_utils.get_snapshot_info(self.client, snapshot) if not snapshot_info: msg = _('create_volume_from_snapshot: Snapshot %(name)s ' 'does not exist.') % {'name': snapshot.id} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) snapshot_id = snapshot_info['ID'] if snapshot_info.get("RUNNINGSTATUS") != constants.STATUS_ACTIVE: msg = _("Failed to create volume from snapshot due to " "snapshot %s not being active.") % snapshot_id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) expect_size = int(int(volume.size) * constants.CAPACITY_UNIT) lun_params, lun_info, model_update = \ self._create_volume_wait_ready(opts, volume, snapshot_id, src_size=snapshot.volume_size) tgt_lun_id = lun_info['ID'] luncopy_name = huawei_utils.encode_name(volume.id) if clone_pair_flag: clone_speed = self.configuration.lun_copy_speed self._create_clone_pair(snapshot_id, tgt_lun_id, clone_speed) else: self._copy_volume(volume, luncopy_name, snapshot_id, tgt_lun_id) try: if int(lun_info['CAPACITY']) < expect_size: self.client.extend_lun(lun_info["ID"], expect_size) lun_info = self.client.get_lun_info(lun_info["ID"]) lun_params.update({"CAPACITY": expect_size}) except Exception as err: LOG.exception('Extend lun %(lun_id)s error. Reason is %(err)s', {"lun_id": lun_info['ID'], "err": err}) self._delete_lun_with_check(lun_info['ID']) raise return lun_params, lun_info, model_update def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot. We use LUNcopy to copy a new volume from snapshot. The time needed increases as volume size does. For Dorado V6 we use clone_pair """ opts = huawei_utils.get_volume_params(volume) if opts.get('hypermetro') and opts.get('replication_enabled'): msg = _("Hypermetro and Replication can not be " "used in the same volume_type.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) lun_params, lun_info, model_update = \ self._create_volume_from_snapshot(volume, snapshot, opts, self.is_dorado_v6) model_update = self._add_extend_type_to_volume(opts, lun_params, lun_info, model_update) model_update['provider_location'] = huawei_utils.to_string( **model_update.pop('metadata')) return model_update def create_cloned_volume(self, volume, src_vref): """Clone a new volume from an existing volume.""" self._check_volume_exist_on_array(src_vref, constants.VOLUME_NOT_EXISTS_RAISE) # Form the snapshot structure. snapshot = Snapshot(id=uuid.uuid4().__str__(), volume_id=src_vref.id, volume=src_vref, volume_size=src_vref.size, provider_location=None) # Create snapshot. self.create_snapshot(snapshot) try: # Create volume from snapshot. model_update = self.create_volume_from_snapshot(volume, snapshot) finally: try: # Delete snapshot. self.delete_snapshot(snapshot) except exception.VolumeBackendAPIException: LOG.warning( 'Failure deleting the snapshot %(snapshot_id)s ' 'of volume %(volume_id)s.', {'snapshot_id': snapshot.id, 'volume_id': src_vref.id},) return model_update def _check_volume_exist_on_array(self, volume, action): """Check whether the volume exists on the array. If the volume exists on the array, return the LUN ID. If not exists, raise or log warning. """ lun_info = huawei_utils.get_lun_info(self.client, volume) if not lun_info: msg = _("Volume %s does not exist on the array.") % volume.id if action == constants.VOLUME_NOT_EXISTS_WARN: LOG.warning(msg) if action == constants.VOLUME_NOT_EXISTS_RAISE: raise exception.VolumeBackendAPIException(data=msg) return return lun_info['ID'] def extend_volume(self, volume, new_size): """Extend a volume.""" lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) opts = huawei_utils.get_volume_params(volume) if opts.get('replication_enabled'): msg = (_("Can't extend replication volume, volume: %(id)s") % {"id": volume.id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) lun_info = self.client.get_lun_info(lun_id) old_size = int(lun_info.get('CAPACITY')) new_size = int(new_size) * units.Gi / 512 if new_size == old_size: LOG.info("New size is equal to the real size from backend" " storage, no need to extend." " realsize: %(oldsize)s, newsize: %(newsize)s.", {'oldsize': old_size, 'newsize': new_size}) return if new_size < old_size: msg = (_("New size should be bigger than the real size from " "backend storage." " realsize: %(oldsize)s, newsize: %(newsize)s."), {'oldsize': old_size, 'newsize': new_size}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info('Extend volume: %(id)s, oldsize: %(oldsize)s, ' 'newsize: %(newsize)s.', {'id': volume.id, 'oldsize': old_size, 'newsize': new_size}) self.client.extend_lun(lun_id, new_size) def _create_snapshot_base(self, snapshot): lun_info = huawei_utils.get_lun_info(self.client, snapshot.volume) if not lun_info: msg = _("Parent volume of snapshot %s doesn't exist." ) % snapshot.id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) snapshot_name = huawei_utils.encode_name(snapshot.id) snapshot_description = snapshot.id snapshot_info = self.client.create_snapshot(lun_info['ID'], snapshot_name, snapshot_description) snapshot_id = snapshot_info['ID'] return snapshot_id def create_snapshot(self, snapshot): snapshot_id = self._create_snapshot_base(snapshot) try: self.client.activate_snapshot(snapshot_id) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Active snapshot %s failed, now deleting it.", snapshot_id) self.client.delete_snapshot(snapshot_id) snapshot_info = self.client.get_snapshot_info(snapshot_id) location = huawei_utils.to_string( huawei_snapshot_id=snapshot_id, huawei_snapshot_wwn=snapshot_info['WWN']) return {'provider_location': location} def delete_snapshot(self, snapshot): LOG.info('Delete snapshot %s.', snapshot.id) snapshot_info = huawei_utils.get_snapshot_info(self.client, snapshot) if snapshot_info: self.client.stop_snapshot(snapshot_info['ID']) self.client.delete_snapshot(snapshot_info['ID']) else: LOG.warning("Can't find snapshot on the array.") def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, " "diff=%(diff)s, host=%(host)s.", {'id': volume.id, 'new_type': new_type, 'diff': diff, 'host': host}) self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) # Check what changes are needed migration, change_opts, lun_id = self.determine_changes_when_retype( volume, new_type, host) model_update = {} replica_enabled_change = change_opts.get('replication_enabled') replica_type_change = change_opts.get('replication_type') if replica_enabled_change and replica_enabled_change[0]: try: self.replica.delete_replica(volume) model_update.update({'replication_status': 'disabled', 'replication_driver_data': None}) except exception.VolumeBackendAPIException: LOG.exception('Retype volume error. ' 'Delete replication failed.') return False try: if migration: LOG.debug("Begin to migrate LUN(id: %(lun_id)s) with " "change %(change_opts)s.", {"lun_id": lun_id, "change_opts": change_opts}) if not self._migrate_volume(volume, host, new_type): LOG.warning("Storage-assisted migration failed during " "retype.") return False else: # Modify lun to change policy self.modify_lun(lun_id, change_opts) except exception.VolumeBackendAPIException: LOG.exception('Retype volume error.') return False if replica_enabled_change and replica_enabled_change[1]: try: # If replica_enabled_change is not None, the # replica_type_change won't be None. See function # determine_changes_when_retype. lun_info = self.client.get_lun_info(lun_id) replica_info = self.replica.create_replica( lun_info, replica_type_change[1]) model_update.update(replica_info) except exception.VolumeBackendAPIException: LOG.exception('Retype volume error. ' 'Create replication failed.') return False return (True, model_update) def modify_lun(self, lun_id, change_opts): if change_opts.get('partitionid'): old, new = change_opts['partitionid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.client.remove_lun_from_partition(lun_id, old_id) if new_id: self.client.add_lun_to_partition(lun_id, new_id) LOG.info("Retype LUN(id: %(lun_id)s) smartpartition from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) success.", {"lun_id": lun_id, "old_id": old_id, "old_name": old_name, "new_id": new_id, "new_name": new_name}) if change_opts.get('cacheid'): old, new = change_opts['cacheid'] old_id = old[0] old_name = old[1] new_id = new[0] new_name = new[1] if old_id: self.client.remove_lun_from_cache(lun_id, old_id) if new_id: self.client.add_lun_to_cache(lun_id, new_id) LOG.info("Retype LUN(id: %(lun_id)s) smartcache from " "(name: %(old_name)s, id: %(old_id)s) to " "(name: %(new_name)s, id: %(new_id)s) successfully.", {'lun_id': lun_id, 'old_id': old_id, "old_name": old_name, 'new_id': new_id, "new_name": new_name}) if change_opts.get('policy'): old_policy, new_policy = change_opts['policy'] self.client.change_lun_smarttier(lun_id, new_policy) LOG.info("Retype LUN(id: %(lun_id)s) smarttier policy from " "%(old_policy)s to %(new_policy)s success.", {'lun_id': lun_id, 'old_policy': old_policy, 'new_policy': new_policy}) if change_opts.get('qos'): old_qos, new_qos = change_opts['qos'] old_qos_id = old_qos[0] old_qos_value = old_qos[1] if old_qos_id: smart_qos = smartx.SmartQos(self.client) smart_qos.remove(old_qos_id, lun_id) if new_qos: smart_qos = smartx.SmartQos(self.client) smart_qos.add(new_qos, lun_id) LOG.info("Retype LUN(id: %(lun_id)s) smartqos from " "%(old_qos_value)s to %(new_qos)s success.", {'lun_id': lun_id, 'old_qos_value': old_qos_value, 'new_qos': new_qos}) def get_lun_specs(self, lun_id): lun_opts = { 'policy': None, 'partitionid': None, 'cacheid': None, 'LUNType': None, } lun_info = self.client.get_lun_info(lun_id) lun_opts['LUNType'] = int(lun_info['ALLOCTYPE']) if lun_info.get('DATATRANSFERPOLICY'): lun_opts['policy'] = lun_info['DATATRANSFERPOLICY'] if lun_info.get('SMARTCACHEPARTITIONID'): lun_opts['cacheid'] = lun_info['SMARTCACHEPARTITIONID'] if lun_info.get('CACHEPARTITIONID'): lun_opts['partitionid'] = lun_info['CACHEPARTITIONID'] return lun_opts def _check_capability_support(self, new_opts, new_type): new_cache_name = new_opts['cachename'] if new_cache_name: if not self.support_func.get('smartcache'): msg = (_( "Can't support cache on the array, cache name is: " "%(name)s.") % {'name': new_cache_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) new_partition_name = new_opts['partitionname'] if new_partition_name: if not self.support_func.get('smartpartition'): msg = (_( "Can't support partition on the array, partition name is: " "%(name)s.") % {'name': new_partition_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if new_opts['policy']: if (not self.support_func.get('smarttier') and new_opts['policy'] != '0'): msg = (_("Can't support tier on the array.")) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if new_opts['qos']: if not self.support_func.get('QoS_support'): msg = (_("Can't support qos on the array.")) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _check_needed_changes(self, lun_id, old_opts, new_opts, change_opts): new_cache_id = None new_cache_name = new_opts['cachename'] if new_cache_name: if self.support_func.get('smartcache'): new_cache_id = self.client.get_cache_id_by_name( new_cache_name) if new_cache_id is None: msg = (_( "Can't find cache name on the array, cache name is: " "%(name)s.") % {'name': new_cache_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) new_partition_id = None new_partition_name = new_opts['partitionname'] if new_partition_name: if self.support_func.get('smartpartition'): new_partition_id = self.client.get_partition_id_by_name( new_partition_name) if new_partition_id is None: msg = (_( "Can't find partition name on the array, partition name " "is: %(name)s.") % {'name': new_partition_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # smarttier if old_opts['policy'] != new_opts['policy']: if not (old_opts['policy'] == '--' and new_opts['policy'] is None): change_opts['policy'] = (old_opts['policy'], new_opts['policy']) # smartcache old_cache_id = old_opts['cacheid'] if old_cache_id == '--': old_cache_id = None if old_cache_id != new_cache_id: old_cache_name = None if self.support_func.get('smartcache'): if old_cache_id: cache_info = self.client.get_cache_info_by_id( old_cache_id) old_cache_name = cache_info['NAME'] change_opts['cacheid'] = ([old_cache_id, old_cache_name], [new_cache_id, new_cache_name]) # smartpartition old_partition_id = old_opts['partitionid'] if old_partition_id == '--': old_partition_id = None if old_partition_id != new_partition_id: old_partition_name = None if self.support_func.get('smartpartition'): if old_partition_id: partition_info = self.client.get_partition_info_by_id( old_partition_id) old_partition_name = partition_info['NAME'] change_opts['partitionid'] = ([old_partition_id, old_partition_name], [new_partition_id, new_partition_name]) # smartqos new_qos = new_opts.get('qos') if not self.support_func.get('QoS_support'): if new_qos: msg = (_("Can't support qos on the array.")) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: old_qos_id = self.client.get_qosid_by_lunid(lun_id) old_qos = self._get_qos_specs_from_array(old_qos_id) if old_qos != new_qos: change_opts['qos'] = ([old_qos_id, old_qos], new_qos) return change_opts def determine_changes_when_retype(self, volume, new_type, host): migration = False change_opts = { 'policy': None, 'partitionid': None, 'cacheid': None, 'qos': None, 'host': None, 'LUNType': None, 'replication_enabled': None, 'replication_type': None, } lun_info = huawei_utils.get_lun_info(self.client, volume) if not lun_info: msg = _("Volume %s doesn't exist.") % volume.id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) lun_id = lun_info['ID'] old_opts = self.get_lun_specs(lun_id) new_opts = huawei_utils.get_volume_type_params(new_type) if 'LUNType' not in new_opts: new_opts['LUNType'] = self.configuration.lun_type if volume.host != host['host']: migration = True change_opts['host'] = (volume.host, host['host']) if old_opts['LUNType'] != new_opts['LUNType']: migration = True change_opts['LUNType'] = (old_opts['LUNType'], new_opts['LUNType']) volume_opts = huawei_utils.get_volume_params(volume) if (volume_opts['replication_enabled'] or new_opts['replication_enabled']): # If replication_enabled changes, # then replication_type in change_opts will be set. change_opts['replication_enabled'] = ( volume_opts['replication_enabled'], new_opts['replication_enabled']) change_opts['replication_type'] = (volume_opts['replication_type'], new_opts['replication_type']) change_opts = self._check_needed_changes(lun_id, old_opts, new_opts, change_opts) LOG.debug("Determine changes when retype. Migration: " "%(migration)s, change_opts: %(change_opts)s.", {'migration': migration, 'change_opts': change_opts}) return migration, change_opts, lun_id def _get_qos_specs_from_array(self, qos_id): qos = {} qos_info = {} if qos_id: qos_info = self.client.get_qos_info(qos_id) for key, value in qos_info.items(): key = key.upper() if key in constants.QOS_KEYS: if key == 'LATENCY' and value == '0': continue else: qos[key] = value return qos def create_export(self, context, volume, connector): """Export a volume.""" pass def ensure_export(self, context, volume): """Synchronously recreate an export for a volume.""" pass def remove_export(self, context, volume): """Remove an export for a volume.""" pass def create_export_snapshot(self, context, snapshot, connector): """Export a snapshot.""" pass def remove_export_snapshot(self, context, snapshot): """Remove an export for a snapshot.""" pass def _copy_volume(self, volume, copy_name, src_lun, tgt_lun): metadata = huawei_utils.get_volume_metadata(volume) copyspeed = metadata.get('copyspeed') luncopy_id = self.client.create_luncopy(copy_name, src_lun, tgt_lun, copyspeed) wait_interval = self.configuration.lun_copy_wait_interval try: self.client.start_luncopy(luncopy_id) def _luncopy_complete(): luncopy_info = self.client.get_luncopy_info(luncopy_id) if luncopy_info['status'] == constants.STATUS_LUNCOPY_READY: # luncopy_info['status'] means for the running status of # the luncopy. If luncopy_info['status'] is equal to '40', # this luncopy is completely ready. return True elif luncopy_info['state'] != constants.STATUS_HEALTH: # luncopy_info['state'] means for the healthy status of the # luncopy. If luncopy_info['state'] is not equal to '1', # this means that an error occurred during the LUNcopy # operation and we should abort it. err_msg = (_( 'An error occurred during the LUNcopy operation. ' 'LUNcopy name: %(luncopyname)s. ' 'LUNcopy status: %(luncopystatus)s. ' 'LUNcopy state: %(luncopystate)s.') % {'luncopyname': luncopy_id, 'luncopystatus': luncopy_info['status'], 'luncopystate': luncopy_info['state']},) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) huawei_utils.wait_for_condition(_luncopy_complete, wait_interval, self.configuration.lun_timeout) except Exception: with excutils.save_and_reraise_exception(): self.client.delete_luncopy(luncopy_id) self.delete_volume(volume) self.client.delete_luncopy(luncopy_id) def _check_lun_valid_for_manage(self, lun_info, external_ref): lun_id = lun_info.get('ID') lun_name = lun_info.get('NAME') # Check whether the LUN is already in LUN group. if lun_info.get('ISADD2LUNGROUP') == 'true': msg = (_("Can't import LUN %s to Cinder. Already exists in a LUN " "group.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN is Normal. if lun_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: msg = _("Can't import LUN %s to Cinder. LUN status is not " "normal.") % lun_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a HyperMetroPair. if self.support_func.get('hypermetro'): try: hypermetro_pairs = self.client.get_hypermetro_pairs() except exception.VolumeBackendAPIException: hypermetro_pairs = [] LOG.debug("Can't get hypermetro info, pass the check.") for pair in hypermetro_pairs: if pair.get('LOCALOBJID') == lun_id: msg = (_("Can't import LUN %s to Cinder. Already exists " "in a HyperMetroPair.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a SplitMirror. if self.support_func.get('splitmirror'): try: split_mirrors = self.client.get_split_mirrors() except exception.VolumeBackendAPIException as ex: if re.search('License is unavailable', ex.msg): # Can't check whether the LUN has SplitMirror with it, # just pass the check and log it. split_mirrors = [] LOG.warning('No license for SplitMirror.') else: msg = _("Failed to get SplitMirror.") raise exception.VolumeBackendAPIException(data=msg) for mirror in split_mirrors: try: target_luns = self.client.get_target_luns(mirror.get('ID')) except exception.VolumeBackendAPIException: msg = _("Failed to get target LUN of SplitMirror.") raise exception.VolumeBackendAPIException(data=msg) if ((mirror.get('PRILUNID') == lun_id) or (lun_id in target_luns)): msg = (_("Can't import LUN %s to Cinder. Already exists " "in a SplitMirror.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a migration task. try: migration_tasks = self.client.get_migration_task() except exception.VolumeBackendAPIException as ex: if re.search('License is unavailable', ex.msg): # Can't check whether the LUN has migration task with it, # just pass the check and log it. migration_tasks = [] LOG.warning('No license for migration.') else: msg = _("Failed to get migration task.") raise exception.VolumeBackendAPIException(data=msg) for migration in migration_tasks: if lun_id in (migration.get('PARENTID'), migration.get('TARGETLUNID')): msg = (_("Can't import LUN %s to Cinder. Already exists in a " "migration task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a LUN copy task. if self.support_func.get('luncopy'): lun_copy = lun_info.get('LUNCOPYIDS') if lun_copy and lun_copy[1:-1]: msg = (_("Can't import LUN %s to Cinder. Already exists in " "a LUN copy task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a remote replication task. rmt_replication = lun_info.get('REMOTEREPLICATIONIDS') if rmt_replication and rmt_replication[1:-1]: msg = (_("Can't import LUN %s to Cinder. Already exists in " "a remote replication task.") % lun_id) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check whether the LUN exists in a LUN mirror. if self.client.is_lun_in_mirror(lun_name): msg = (_("Can't import LUN %s to Cinder. Already exists in " "a LUN mirror.") % lun_name) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) def manage_existing(self, volume, external_ref): """Manage an existing volume on the backend storage.""" # Check whether the LUN is belonged to the specified pool. pool = volume_utils.extract_host(volume.host, 'pool') LOG.debug("Pool specified is: %s.", pool) lun_info = self._get_lun_info_by_ref(external_ref) lun_id = lun_info.get('ID') description = lun_info.get('DESCRIPTION', '') if len(description) <= ( constants.MAX_VOL_DESCRIPTION - len(volume.name) - 1): description = volume.name + ' ' + description lun_pool = lun_info.get('PARENTNAME') LOG.debug("Storage pool of existing LUN %(lun)s is %(pool)s.", {"lun": lun_id, "pool": lun_pool}) if pool != lun_pool: msg = (_("The specified LUN does not belong to the given " "pool: %s.") % pool) raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) # Check other stuffs to determine whether this LUN can be imported. self._check_lun_valid_for_manage(lun_info, external_ref) type_id = volume.volume_type_id new_opts = None if type_id: # Handle volume type if specified. old_opts = self.get_lun_specs(lun_id) volume_type = volume_types.get_volume_type(None, type_id) new_opts = huawei_utils.get_volume_type_params(volume_type) if ('LUNType' in new_opts and old_opts['LUNType'] != new_opts['LUNType']): msg = (_("Can't import LUN %(lun_id)s to Cinder. " "LUN type mismatched.") % lun_id) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if volume_type: self._check_capability_support(new_opts, volume_type) change_opts = {'policy': None, 'partitionid': None, 'cacheid': None, 'qos': None} change_opts = self._check_needed_changes( lun_id, old_opts, new_opts, change_opts) self.modify_lun(lun_id, change_opts) # Rename the LUN to make it manageable for Cinder. new_name = huawei_utils.encode_name(volume.id) LOG.debug("Rename LUN %(old_name)s to %(new_name)s.", {'old_name': lun_info.get('NAME'), 'new_name': new_name}) self.client.rename_lun(lun_id, new_name, description) location = huawei_utils.to_string(huawei_lun_id=lun_id, huawei_lun_wwn=lun_info['WWN']) model_update = {'provider_location': location} if new_opts and new_opts.get('replication_enabled'): LOG.debug("Manage volume need to create replication.") try: lun_info = self.client.get_lun_info(lun_id) replica_info = self.replica.create_replica( lun_info, new_opts.get('replication_type')) model_update.update(replica_info) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): LOG.exception("Manage exist volume failed.") return model_update def _get_lun_info_by_ref(self, external_ref): LOG.debug("Get external_ref: %s", external_ref) name = external_ref.get('source-name') id = external_ref.get('source-id') if not (name or id): msg = _('Must specify source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) lun_id = id or self.client.get_lun_id_by_name(name) if not lun_id: msg = _("Can't find LUN on the array, please check the " "source-name or source-id.") raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) lun_info = self.client.get_lun_info(lun_id) return lun_info def unmanage(self, volume): """Export Huawei volume from Cinder.""" LOG.debug("Unmanage volume: %s.", volume.id) def manage_existing_get_size(self, volume, external_ref): """Get the size of the existing volume.""" lun_info = self._get_lun_info_by_ref(external_ref) size = int(math.ceil(float(lun_info.get('CAPACITY')) / constants.CAPACITY_UNIT)) return size def _check_snapshot_valid_for_manage(self, snapshot_info, external_ref): snapshot_id = snapshot_info.get('ID') # Check whether the snapshot is normal. if snapshot_info.get('HEALTHSTATUS') != constants.STATUS_HEALTH: msg = _("Can't import snapshot %s to Cinder. " "Snapshot status is not normal" " or running status is not online.") % snapshot_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) if snapshot_info.get('EXPOSEDTOINITIATOR') != 'false': msg = _("Can't import snapshot %s to Cinder. " "Snapshot is exposed to initiator.") % snapshot_id raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) def _get_snapshot_info_by_ref(self, external_ref): LOG.debug("Get snapshot external_ref: %s.", external_ref) name = external_ref.get('source-name') id = external_ref.get('source-id') if not (name or id): msg = _('Must specify snapshot source-name or source-id.') raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) snapshot_id = id or self.client.get_snapshot_id_by_name(name) if not snapshot_id: msg = _("Can't find snapshot on array, please check the " "source-name or source-id.") raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) snapshot_info = self.client.get_snapshot_info(snapshot_id) return snapshot_info def manage_existing_snapshot(self, snapshot, existing_ref): snapshot_info = self._get_snapshot_info_by_ref(existing_ref) snapshot_id = snapshot_info.get('ID') parent_lun_info = huawei_utils.get_lun_info( self.client, snapshot.volume) if (not parent_lun_info or parent_lun_info['ID'] != snapshot_info.get('PARENTID')): msg = (_("Can't import snapshot %s to Cinder. " "Snapshot doesn't belong to volume."), snapshot_id) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Check whether this snapshot can be imported. self._check_snapshot_valid_for_manage(snapshot_info, existing_ref) # Rename the snapshot to make it manageable for Cinder. description = snapshot.id snapshot_name = huawei_utils.encode_name(snapshot.id) self.client.rename_snapshot(snapshot_id, snapshot_name, description) if snapshot_info.get('RUNNINGSTATUS') != constants.STATUS_ACTIVE: self.client.activate_snapshot(snapshot_id) LOG.debug("Rename snapshot %(old_name)s to %(new_name)s.", {'old_name': snapshot_info.get('NAME'), 'new_name': snapshot_name}) location = huawei_utils.to_string(huawei_snapshot_id=snapshot_id) return {'provider_location': location} def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Get the size of the existing snapshot.""" snapshot_info = self._get_snapshot_info_by_ref(existing_ref) size = int(math.ceil(float(snapshot_info.get('USERCAPACITY')) / constants.CAPACITY_UNIT)) return size def unmanage_snapshot(self, snapshot): """Unmanage the specified snapshot from Cinder management.""" LOG.debug("Unmanage snapshot: %s.", snapshot.id) def remove_host_with_check(self, host_id): wwns_in_host = ( self.client.get_host_fc_initiators(host_id)) iqns_in_host = ( self.client.get_host_iscsi_initiators(host_id)) if not (wwns_in_host or iqns_in_host or self.client.is_host_associated_to_hostgroup(host_id)): self.client.remove_host(host_id) def _get_group_type(self, group): opts = [] for vol_type in group.volume_types: opts.append(huawei_utils.get_volume_type_params(vol_type)) return opts def _check_group_type_support(self, opts, vol_type): if not opts: return False for opt in opts: if opt.get(vol_type) == 'true': return True return False def _get_group_type_value(self, opts, vol_type): if not opts: return for opt in opts: if vol_type in opt: return opt[vol_type] def create_group(self, context, group): """Creates a group.""" if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() model_update = {'status': fields.GroupStatus.AVAILABLE} opts = self._get_group_type(group) if self._check_group_type_support(opts, 'hypermetro'): if not self.check_func_support("HyperMetro_ConsistentGroup"): msg = _("Can't create consistency group, array does not " "support hypermetro consistentgroup, " "group id: %(group_id)s." ) % {"group_id": group.id} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) metro.create_consistencygroup(group) return model_update return model_update def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() model_update = self.create_group(context, group) volumes_model_update = [] delete_snapshots = False if not snapshots and source_vols: snapshots = [] for src_vol in source_vols: vol_kwargs = { 'id': src_vol.id, 'provider_location': src_vol.provider_location, } snapshot_kwargs = {'id': str(uuid.uuid4()), 'volume': objects.Volume(**vol_kwargs), 'volume_size': src_vol.size} snapshot = objects.Snapshot(**snapshot_kwargs) snapshots.append(snapshot) snapshots_model_update = self._create_group_snapshot(snapshots) for i, model in enumerate(snapshots_model_update): snapshot = snapshots[i] snapshot.provider_location = model['provider_location'] delete_snapshots = True if snapshots: for i, vol in enumerate(volumes): snapshot = snapshots[i] vol_model_update = self.create_volume_from_snapshot( vol, snapshot) vol_model_update.update({'id': vol.id}) volumes_model_update.append(vol_model_update) if delete_snapshots: self._delete_group_snapshot(snapshots) return model_update, volumes_model_update def delete_group(self, context, group, volumes): if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() opts = self._get_group_type(group) model_update = {'status': fields.GroupStatus.DELETED} volumes_model_update = [] if self._check_group_type_support(opts, 'hypermetro'): metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) metro.delete_consistencygroup(context, group, volumes) for volume in volumes: volume_model_update = {'id': volume.id} try: self.delete_volume(volume) except Exception: LOG.exception('Delete volume %s failed.', volume) volume_model_update.update({'status': 'error_deleting'}) else: volume_model_update.update({'status': 'deleted'}) volumes_model_update.append(volume_model_update) return model_update, volumes_model_update def update_group(self, context, group, add_volumes=None, remove_volumes=None): if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() model_update = {'status': fields.GroupStatus.AVAILABLE} opts = self._get_group_type(group) if self._check_group_type_support(opts, 'hypermetro'): metro = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) metro.update_consistencygroup(context, group, add_volumes, remove_volumes) return model_update, None, None for volume in add_volumes: self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) return model_update, None, None def create_group_snapshot(self, context, group_snapshot, snapshots): """Create group snapshot.""" if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() LOG.info('Create group snapshot for group: %(group_id)s', {'group_id': group_snapshot.group_id}) snapshots_model_update = self._create_group_snapshot(snapshots) model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} return model_update, snapshots_model_update def _create_group_snapshot(self, snapshots): snapshots_model_update = [] added_snapshots_info = [] try: for snapshot in snapshots: snapshot_id = self._create_snapshot_base(snapshot) info = self.client.get_snapshot_info(snapshot_id) location = huawei_utils.to_string( huawei_snapshot_id=info['ID'], huawei_snapshot_wwn=info['WWN']) snapshot_model_update = { 'id': snapshot.id, 'status': fields.SnapshotStatus.AVAILABLE, 'provider_location': location, } snapshots_model_update.append(snapshot_model_update) added_snapshots_info.append(info) except Exception: with excutils.save_and_reraise_exception(): for added_snapshot in added_snapshots_info: self.client.delete_snapshot(added_snapshot['ID']) snapshot_ids = [added_snapshot['ID'] for added_snapshot in added_snapshots_info] try: self.client.activate_snapshot(snapshot_ids) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Active group snapshots %s failed.", snapshot_ids) for snapshot_id in snapshot_ids: self.client.delete_snapshot(snapshot_id) return snapshots_model_update def delete_group_snapshot(self, context, group_snapshot, snapshots): """Delete group snapshot.""" if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() LOG.info('Delete group snapshot %(snap_id)s for group: ' '%(group_id)s', {'snap_id': group_snapshot.id, 'group_id': group_snapshot.group_id}) try: snapshots_model_update = self._delete_group_snapshot(snapshots) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Delete group snapshots failed. " "Group snapshot id: %s", group_snapshot.id) model_update = {'status': fields.GroupSnapshotStatus.DELETED} return model_update, snapshots_model_update def _delete_group_snapshot(self, snapshots): snapshots_model_update = [] for snapshot in snapshots: self.delete_snapshot(snapshot) snapshot_model_update = { 'id': snapshot.id, 'status': fields.SnapshotStatus.DELETED } snapshots_model_update.append(snapshot_model_update) return snapshots_model_update def _classify_volume(self, volumes): normal_volumes = [] replica_volumes = [] for v in volumes: opts = huawei_utils.get_volume_params(v) if opts.get('replication_enabled'): replica_volumes.append(v) else: normal_volumes.append(v) return normal_volumes, replica_volumes def _failback_normal_volumes(self, volumes): volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v.id metadata = huawei_utils.get_volume_metadata(v) old_status = 'available' if 'old_status' in metadata: old_status = metadata.pop('old_status') v_update['updates'] = {'status': old_status, 'metadata': metadata} volumes_update.append(v_update) return volumes_update def _failback(self, volumes): if self.active_backend_id in ('', None): return 'default', [] normal_volumes, replica_volumes = self._classify_volume(volumes) volumes_update = [] replica_volumes_update = self.replica.failback(replica_volumes) volumes_update.extend(replica_volumes_update) normal_volumes_update = self._failback_normal_volumes(normal_volumes) volumes_update.extend(normal_volumes_update) self.active_backend_id = "" secondary_id = 'default' # Switch array connection. self.client, self.replica_client = self.replica_client, self.client self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) return secondary_id, volumes_update def _failover_normal_volumes(self, volumes): volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v.id metadata = huawei_utils.get_volume_metadata(v) metadata.update({'old_status': v.status}) v_update['updates'] = {'status': 'error', 'metadata': metadata} volumes_update.append(v_update) return volumes_update def _failover(self, volumes): if self.active_backend_id not in ('', None): return self.replica_dev_conf['backend_id'], [] normal_volumes, replica_volumes = self._classify_volume(volumes) volumes_update = [] replica_volumes_update = self.replica.failover(replica_volumes) volumes_update.extend(replica_volumes_update) normal_volumes_update = self._failover_normal_volumes(normal_volumes) volumes_update.extend(normal_volumes_update) self.active_backend_id = self.replica_dev_conf['backend_id'] secondary_id = self.active_backend_id # Switch array connection. self.client, self.replica_client = self.replica_client, self.client self.replica = replication.ReplicaPairManager(self.client, self.replica_client, self.configuration) return secondary_id, volumes_update def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover all volumes to secondary.""" if secondary_id == 'default': secondary_id, volumes_update = self._failback(volumes) elif (secondary_id == self.replica_dev_conf['backend_id'] or secondary_id is None): secondary_id, volumes_update = self._failover(volumes) else: msg = _("Invalid secondary id %s.") % secondary_id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return secondary_id, volumes_update, [] def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Map a snapshot to a host and return target iSCSI information.""" # From the volume structure. volume = Volume(id=snapshot.id, provider_location=snapshot.provider_location, lun_type=constants.SNAPSHOT_TYPE, metadata=None) return self.initialize_connection(volume, connector) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Delete map between a snapshot and a host.""" # From the volume structure. volume = Volume(id=snapshot.id, provider_location=snapshot.provider_location, lun_type=constants.SNAPSHOT_TYPE, metadata=None) return self.terminate_connection(volume, connector) def get_lun_id_and_type(self, volume): if hasattr(volume, 'lun_type'): metadata = huawei_utils.get_snapshot_private_data(volume) lun_id = metadata['huawei_snapshot_id'] lun_type = constants.SNAPSHOT_TYPE else: lun_id = self._check_volume_exist_on_array( volume, constants.VOLUME_NOT_EXISTS_RAISE) lun_type = constants.LUN_TYPE return lun_id, lun_type ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/constants.py0000664000175000017500000000770100000000000023175 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. STATUS_HEALTH = '1' STATUS_ACTIVE = '43' STATUS_RUNNING = '10' STATUS_VOLUME_READY = '27' STATUS_LUNCOPY_READY = '40' STATUS_QOS_ACTIVE = '2' QOS_INACTIVATED = '45' LUN_TYPE = '11' SNAPSHOT_TYPE = '27' BLOCK_STORAGE_POOL_TYPE = '1' FILE_SYSTEM_POOL_TYPE = '2' HOSTGROUP_PREFIX = 'OpenStack_HostGroup_' LUNGROUP_PREFIX = 'OpenStack_LunGroup_' MAPPING_VIEW_PREFIX = 'OpenStack_Mapping_View_' PORTGROUP_PREFIX = 'OpenStack_PortGroup_' QOS_NAME_PREFIX = 'OpenStack_' PORTGROUP_DESCRIP_PREFIX = "Please do NOT modify this. Engine ID: " ARRAY_VERSION = 'V300R003C00' FC_PORT_CONNECTED = '10' FC_INIT_ONLINE = '27' FC_PORT_MODE_FABRIC = '0' CAPACITY_UNIT = 1024 * 1024 * 2 DEFAULT_WAIT_TIMEOUT = 3600 * 24 * 30 DEFAULT_WAIT_INTERVAL = 5 MIGRATION_WAIT_INTERVAL = 5 MIGRATION_FAULT = '74' MIGRATION_COMPLETE = '76' ERROR_CONNECT_TO_SERVER = -403 ERROR_UNAUTHORIZED_TO_SERVER = -401 SOCKET_TIMEOUT = 52 ERROR_VOLUME_ALREADY_EXIST = 1077948993 LOGIN_SOCKET_TIMEOUT = 4 ERROR_VOLUME_NOT_EXIST = 1077939726 RELOGIN_ERROR_PASS = [ERROR_VOLUME_NOT_EXIST] RUNNING_NORMAL = '1' RUNNING_SYNC = '23' RUNNING_STOP = '41' HEALTH_NORMAL = '1' NO_SPLITMIRROR_LICENSE = 1077950233 NO_MIGRATION_LICENSE = 1073806606 THICK_LUNTYPE = 0 THIN_LUNTYPE = 1 MAX_NAME_LENGTH = 31 MAX_VOL_DESCRIPTION = 170 PORT_NUM_PER_CONTR = 2 PWD_EXPIRED = 3 PWD_RESET = 4 OS_TYPE = {'Linux': '0', 'Windows': '1', 'Solaris': '2', 'HP-UX': '3', 'AIX': '4', 'XenServer': '5', 'Mac OS X': '6', 'VMware ESX': '7'} HUAWEI_VALID_KEYS = ['maxIOPS', 'minIOPS', 'minBandWidth', 'maxBandWidth', 'latency', 'IOType'] QOS_KEYS = [i.upper() for i in HUAWEI_VALID_KEYS] EXTRA_QOS_KEYS = ['MAXIOPS', 'MINIOPS', 'MINBANDWIDTH', 'MAXBANDWIDTH'] LOWER_LIMIT_KEYS = ['MINIOPS', 'LATENCY', 'MINBANDWIDTH'] UPPER_LIMIT_KEYS = ['MAXIOPS', 'MAXBANDWIDTH'] MAX_LUN_NUM_IN_QOS = 64 DEFAULT_REPLICA_WAIT_INTERVAL = 1 DEFAULT_REPLICA_WAIT_TIMEOUT = 20 REPLICA_SYNC_MODEL = '1' REPLICA_ASYNC_MODEL = '2' REPLICA_SPEED = '2' REPLICA_PERIOD = '3600' REPLICA_SECOND_RO = '2' REPLICA_SECOND_RW = '3' REPLICA_RUNNING_STATUS_KEY = 'RUNNINGSTATUS' REPLICA_RUNNING_STATUS_INITIAL_SYNC = '21' REPLICA_RUNNING_STATUS_SYNC = '23' REPLICA_RUNNING_STATUS_SYNCED = '24' REPLICA_RUNNING_STATUS_NORMAL = '1' REPLICA_RUNNING_STATUS_SPLIT = '26' REPLICA_RUNNING_STATUS_ERRUPTED = '34' REPLICA_RUNNING_STATUS_INVALID = '35' REPLICA_HEALTH_STATUS_KEY = 'HEALTHSTATUS' REPLICA_HEALTH_STATUS_NORMAL = '1' REPLICA_LOCAL_DATA_STATUS_KEY = 'PRIRESDATASTATUS' REPLICA_REMOTE_DATA_STATUS_KEY = 'SECRESDATASTATUS' REPLICA_DATA_SYNC_KEY = 'ISDATASYNC' REPLICA_DATA_STATUS_SYNCED = '1' REPLICA_DATA_STATUS_COMPLETE = '2' REPLICA_DATA_STATUS_INCOMPLETE = '3' LUN_TYPE_MAP = {'Thick': THICK_LUNTYPE, 'Thin': THIN_LUNTYPE} VALID_PRODUCT = ('V3', 'V5', '18000', 'Dorado') PRODUCT_LUN_TYPE = { 'Dorado': 'Thin', } VOLUME_NOT_EXISTS_WARN = 'warning' VOLUME_NOT_EXISTS_RAISE = 'raise' LUN_COPY_SPEED_TYPES = ( LUN_COPY_SPEED_LOW, LUN_COPY_SPEED_MEDIUM, LUN_COPY_SPEED_HIGH, LUN_COPY_SPEED_HIGHEST ) = ('1', '2', '3', '4') MAX_QUERY_COUNT = 100 CLONE_STATUS_HEALTH = '0' CLONE_STATUS_COMPLETE = (CLONE_COMPLETE,) = ('2',) CLONE_PAIR_NOT_EXIST = "1073798147" SUPPORT_CLONE_PAIR_VERSION = "V600R003C00" DEFAULT_MINIMUM_FC_INITIATOR_ONLINE = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/fc_zone_helper.py0000664000175000017500000002613700000000000024147 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.huawei import constants LOG = logging.getLogger(__name__) class FCZoneHelper(object): """FC zone helper for Huawei driver.""" def __init__(self, fcsan_lookup_service, client): self.fcsan = fcsan_lookup_service self.client = client def _get_fc_ports_info(self): ports_info = {} data = self.client.get_fc_ports_on_array() for item in data: if item['RUNNINGSTATUS'] == constants.FC_PORT_CONNECTED: location = item['PARENTID'].split('.') port_info = {} port_info['id'] = item['ID'] port_info['contr'] = location[0] port_info['bandwidth'] = item['RUNSPEED'] ports_info[item['WWN']] = port_info return ports_info def _count_port_weight(self, port, ports_info): LOG.debug("Count weight for port: %s.", port) portgs = self.client.get_portgs_by_portid(ports_info[port]['id']) LOG.debug("Port %(port)s belongs to PortGroup %(portgs)s.", {"port": port, "portgs": portgs}) weight = 0 for portg in portgs: views = self.client.get_views_by_portg(portg) if not views: LOG.debug("PortGroup %s doesn't belong to any view.", portg) continue LOG.debug("PortGroup %(portg)s belongs to view %(views)s.", {"portg": portg, "views": views[0]}) # In fact, there is just one view for one port group. lungroup = self.client.get_lungroup_by_view(views[0]) lun_num = self.client.get_obj_count_from_lungroup(lungroup) ports_in_portg = self.client.get_ports_by_portg(portg) LOG.debug("PortGroup %(portg)s contains ports: %(ports)s.", {"portg": portg, "ports": ports_in_portg}) total_bandwidth = 0 for port_pg in ports_in_portg: if port_pg in ports_info: total_bandwidth += int(ports_info[port_pg]['bandwidth']) LOG.debug("Total bandwidth for PortGroup %(portg)s is %(bindw)s.", {"portg": portg, "bindw": total_bandwidth}) if total_bandwidth: weight += float(lun_num) / float(total_bandwidth) bandwidth = float(ports_info[port]['bandwidth']) return (weight, 10000 / bandwidth) def _get_weighted_ports_per_contr(self, ports, ports_info): port_weight_map = {} for port in ports: port_weight_map[port] = self._count_port_weight(port, ports_info) LOG.debug("port_weight_map: %s", port_weight_map) sorted_ports = sorted(port_weight_map.items(), key=lambda d: d[1]) weighted_ports = [] count = 0 for port in sorted_ports: if count >= constants.PORT_NUM_PER_CONTR: break weighted_ports.append(port[0]) count += 1 return weighted_ports def _get_weighted_ports(self, contr_port_map, ports_info, contrs): LOG.debug("_get_weighted_ports, we only select ports from " "controllers: %s", contrs) weighted_ports = [] for contr in contrs: if contr in contr_port_map: weighted_ports_per_contr = self._get_weighted_ports_per_contr( contr_port_map[contr], ports_info) LOG.debug("Selected ports %(ports)s on controller %(contr)s.", {"ports": weighted_ports_per_contr, "contr": contr}) weighted_ports.extend(weighted_ports_per_contr) return weighted_ports def _filter_by_fabric(self, wwns, ports): """Filter FC ports and initiators connected to fabrics.""" ini_tgt_map = self.fcsan.get_device_mapping_from_network(wwns, ports) fabric_connected_ports = [] fabric_connected_initiators = [] for fabric in ini_tgt_map: fabric_connected_ports.extend( ini_tgt_map[fabric]['target_port_wwn_list']) fabric_connected_initiators.extend( ini_tgt_map[fabric]['initiator_port_wwn_list']) if not fabric_connected_ports: msg = _("No FC port connected to fabric.") raise exception.VolumeBackendAPIException(data=msg) if not fabric_connected_initiators: msg = _("No initiator connected to fabric.") raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Fabric connected ports: %(ports)s, " "Fabric connected initiators: %(initiators)s.", {'ports': fabric_connected_ports, 'initiators': fabric_connected_initiators}) return fabric_connected_ports, fabric_connected_initiators def _get_lun_engine_contrs(self, engines, lun_id, lun_type=constants.LUN_TYPE): contrs = [] engine_id = None lun_info = self.client.get_lun_info(lun_id, lun_type) lun_contr_id = lun_info['OWNINGCONTROLLER'] for engine in engines: contrs = json.loads(engine['NODELIST']) engine_id = engine['ID'] if lun_contr_id in contrs: break LOG.debug("LUN %(lun_id)s belongs to engine %(engine_id)s. Engine " "%(engine_id)s has controllers: %(contrs)s.", {"lun_id": lun_id, "engine_id": engine_id, "contrs": contrs}) return contrs, engine_id def _build_contr_port_map(self, fabric_connected_ports, ports_info): contr_port_map = {} for port in fabric_connected_ports: contr = ports_info[port]['contr'] if not contr_port_map.get(contr): contr_port_map[contr] = [] contr_port_map[contr].append(port) LOG.debug("Controller port map: %s.", contr_port_map) return contr_port_map def _create_new_portg(self, portg_name, engine_id): portg_id = self.client.get_tgt_port_group(portg_name) if portg_id: LOG.debug("Found port group %s not belonged to any view, " "deleting it.", portg_name) ports = self.client.get_fc_ports_by_portgroup(portg_id) for port_id in ports.values(): self.client.remove_port_from_portgroup(portg_id, port_id) self.client.delete_portgroup(portg_id) description = constants.PORTGROUP_DESCRIP_PREFIX + engine_id new_portg_id = self.client.create_portg(portg_name, description) return new_portg_id def build_ini_targ_map(self, wwns, host_id, lun_id, lun_type=constants.LUN_TYPE): engines = self.client.get_all_engines() LOG.debug("Get array engines: %s", engines) contrs, engine_id = self._get_lun_engine_contrs(engines, lun_id, lun_type) # Check if there is already a port group in the view. # If yes and have already considered the engine, # we won't change anything about the port group and zone. view_name = constants.MAPPING_VIEW_PREFIX + host_id portg_name = constants.PORTGROUP_PREFIX + host_id view_id = self.client.find_mapping_view(view_name) portg_info = self.client.get_portgroup_by_view(view_id) portg_id = portg_info[0]['ID'] if portg_info else None init_targ_map = {} if portg_id: description = portg_info[0].get("DESCRIPTION", '') engines = description.replace(constants.PORTGROUP_DESCRIP_PREFIX, "") engines = engines.split(',') ports = self.client.get_fc_ports_by_portgroup(portg_id) if engine_id in engines: LOG.debug("Have already selected ports for engine %s, just " "use them.", engine_id) return (list(ports.keys()), portg_id, init_targ_map) # Filter initiators and ports that connected to fabrics. ports_info = self._get_fc_ports_info() (fabric_connected_ports, fabric_connected_initiators) = ( self._filter_by_fabric(wwns, ports_info.keys())) # Build a controller->ports map for convenience. contr_port_map = self._build_contr_port_map(fabric_connected_ports, ports_info) # Get the 'best' ports for the given controllers. weighted_ports = self._get_weighted_ports(contr_port_map, ports_info, contrs) if not weighted_ports: msg = _("No FC port can be used for LUN %s.") % lun_id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Handle port group. port_list = [ports_info[port]['id'] for port in weighted_ports] if portg_id: # Add engine ID to the description of the port group. self.client.append_portg_desc(portg_id, engine_id) # Extend the weighted_ports to include the ports already in the # port group. weighted_ports.extend(list(ports.keys())) else: portg_id = self._create_new_portg(portg_name, engine_id) for port in port_list: self.client.add_port_to_portg(portg_id, port) for ini in fabric_connected_initiators: init_targ_map[ini] = weighted_ports LOG.debug("build_ini_targ_map: Port group name: %(portg_name)s, " "init_targ_map: %(map)s.", {"portg_name": portg_name, "map": init_targ_map}) return weighted_ports, portg_id, init_targ_map def get_init_targ_map(self, wwns, host_id): error_ret = ([], None, {}) if not host_id: return error_ret view_name = constants.MAPPING_VIEW_PREFIX + host_id view_id = self.client.find_mapping_view(view_name) if not view_id: return error_ret port_group = self.client.get_portgroup_by_view(view_id) portg_id = port_group[0]['ID'] if port_group else None ports = self.client.get_fc_ports_by_portgroup(portg_id) for port_id in ports.values(): self.client.remove_port_from_portgroup(portg_id, port_id) init_targ_map = {} for wwn in wwns: init_targ_map[wwn] = list(ports.keys()) return list(ports.keys()), portg_id, init_targ_map ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/huawei_conf.py0000664000175000017500000004034600000000000023452 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sets Huawei private configuration into Configuration object. For conveniently get private configuration. We parse Huawei config file and set every property into Configuration object as an attribute. """ import base64 import os import re from lxml import etree as ET from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.huawei import constants LOG = logging.getLogger(__name__) class HuaweiConf(object): def __init__(self, conf): self.conf = conf self.last_modify_time = None def update_config_value(self): file_time = os.stat(self.conf.cinder_huawei_conf_file).st_mtime if self.last_modify_time == file_time: return self.last_modify_time = file_time tree = ET.parse(self.conf.cinder_huawei_conf_file) xml_root = tree.getroot() self._encode_authentication(tree, xml_root) attr_funcs = ( self._san_address, self._san_user, self._san_password, self._san_vstore, self._san_product, self._ssl_cert_path, self._ssl_cert_verify, self._iscsi_info, self._fc_info, self._hypermetro_devices, self._replication_devices, self._lun_type, self._lun_ready_wait_interval, self._lun_copy_wait_interval, self._lun_copy_speed, self._lun_timeout, self._lun_write_type, self._lun_prefetch, self._lun_policy, self._lun_read_cache_policy, self._lun_write_cache_policy, self._storage_pools, self._get_local_minimum_fc_initiator, ) for f in attr_funcs: f(xml_root) def _encode_authentication(self, tree, xml_root): name_node = xml_root.find('Storage/UserName') pwd_node = xml_root.find('Storage/UserPassword') vstore_node = xml_root.find('Storage/vStoreName') need_encode = False if name_node is not None and not name_node.text.startswith('!$$$'): encoded = base64.b64encode( name_node.text.encode('latin-1')).decode() name_node.text = '!$$$' + encoded need_encode = True if pwd_node is not None and not pwd_node.text.startswith('!$$$'): encoded = base64.b64encode( pwd_node.text.encode('latin-1')).decode() pwd_node.text = '!$$$' + encoded need_encode = True if vstore_node is not None and not vstore_node.text.startswith('!$$$'): encoded = base64.b64encode( vstore_node.text.encode('latin-1')).decode() vstore_node.text = '!$$$' + encoded need_encode = True if need_encode: tree.write(self.conf.cinder_huawei_conf_file, encoding='UTF-8') def _san_address(self, xml_root): text = xml_root.findtext('Storage/RestURL') if not text: msg = _("RestURL is not configured.") LOG.error(msg) raise exception.InvalidInput(reason=msg) addrs = list(set([x.strip() for x in text.split(';') if x.strip()])) setattr(self.conf, 'san_address', addrs) def _san_user(self, xml_root): text = xml_root.findtext('Storage/UserName') if not text: msg = _("UserName is not configured.") LOG.error(msg) raise exception.InvalidInput(reason=msg) user = base64.b64decode(text[4:].encode('latin-1')).decode() setattr(self.conf, 'san_user', user) def _san_password(self, xml_root): text = xml_root.findtext('Storage/UserPassword') if not text: msg = _("UserPassword is not configured.") LOG.error(msg) raise exception.InvalidInput(reason=msg) pwd = base64.b64decode(text[4:].encode('latin-1')).decode() setattr(self.conf, 'san_password', pwd) def _san_vstore(self, xml_root): vstore = None text = xml_root.findtext('Storage/vStoreName') if text: vstore = base64.b64decode(text[4:].encode('latin-1')).decode() setattr(self.conf, 'vstore_name', vstore) def _ssl_cert_path(self, xml_root): text = xml_root.findtext('Storage/SSLCertPath') setattr(self.conf, 'ssl_cert_path', text) def _ssl_cert_verify(self, xml_root): value = False text = xml_root.findtext('Storage/SSLCertVerify') if text: if text.lower() in ('true', 'false'): value = text.lower() == 'true' else: msg = _("SSLCertVerify configured error.") LOG.error(msg) raise exception.InvalidInput(reason=msg) setattr(self.conf, 'ssl_cert_verify', value) def _set_extra_constants_by_product(self, product): extra_constants = {} if product == 'Dorado': extra_constants['QOS_SPEC_KEYS'] = ( 'maxIOPS', 'maxBandWidth', 'IOType') extra_constants['QOS_IOTYPES'] = ('2',) extra_constants['SUPPORT_LUN_TYPES'] = ('Thin',) extra_constants['DEFAULT_LUN_TYPE'] = 'Thin' else: extra_constants['QOS_SPEC_KEYS'] = ( 'maxIOPS', 'minIOPS', 'minBandWidth', 'maxBandWidth', 'latency', 'IOType') extra_constants['QOS_IOTYPES'] = ('0', '1', '2') extra_constants['SUPPORT_LUN_TYPES'] = ('Thick', 'Thin') extra_constants['DEFAULT_LUN_TYPE'] = 'Thick' for k in extra_constants: setattr(constants, k, extra_constants[k]) def _san_product(self, xml_root): text = xml_root.findtext('Storage/Product') if not text: msg = _("SAN product is not configured.") LOG.error(msg) raise exception.InvalidInput(reason=msg) product = text.strip() if product not in constants.VALID_PRODUCT: msg = _("Invalid SAN product %(text)s, SAN product must be " "in %(valid)s.") % {'text': product, 'valid': constants.VALID_PRODUCT} LOG.error(msg) raise exception.InvalidInput(reason=msg) self._set_extra_constants_by_product(product) setattr(self.conf, 'san_product', product) def _lun_type(self, xml_root): lun_type = constants.DEFAULT_LUN_TYPE text = xml_root.findtext('LUN/LUNType') if text: lun_type = text.strip() if lun_type not in constants.LUN_TYPE_MAP: msg = _("Invalid lun type %s is configured.") % lun_type LOG.error(msg) raise exception.InvalidInput(reason=msg) if lun_type not in constants.SUPPORT_LUN_TYPES: msg = _("%(array)s array requires %(valid)s lun type, " "but %(conf)s is specified." ) % {'array': self.conf.san_product, 'valid': constants.SUPPORT_LUN_TYPES, 'conf': lun_type} LOG.error(msg) raise exception.InvalidInput(reason=msg) setattr(self.conf, 'lun_type', constants.LUN_TYPE_MAP[lun_type]) def _lun_ready_wait_interval(self, xml_root): text = xml_root.findtext('LUN/LUNReadyWaitInterval') interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL setattr(self.conf, 'lun_ready_wait_interval', int(interval)) def _lun_copy_wait_interval(self, xml_root): text = xml_root.findtext('LUN/LUNcopyWaitInterval') interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL setattr(self.conf, 'lun_copy_wait_interval', int(interval)) def _lun_timeout(self, xml_root): text = xml_root.findtext('LUN/Timeout') interval = text.strip() if text else constants.DEFAULT_WAIT_TIMEOUT setattr(self.conf, 'lun_timeout', int(interval)) def _lun_write_type(self, xml_root): text = xml_root.findtext('LUN/WriteType') if text and text.strip(): setattr(self.conf, 'write_type', text.strip()) def _lun_prefetch(self, xml_root): node = xml_root.find('LUN/Prefetch') if node is not None: if 'Type' in node.attrib: prefetch_type = node.attrib['Type'].strip() setattr(self.conf, 'prefetch_type', prefetch_type) if 'Value' in node.attrib: prefetch_value = node.attrib['Value'].strip() setattr(self.conf, 'prefetch_value', prefetch_value) def _lun_policy(self, xml_root): setattr(self.conf, 'lun_policy', '0') def _lun_read_cache_policy(self, xml_root): setattr(self.conf, 'lun_read_cache_policy', '2') def _lun_write_cache_policy(self, xml_root): setattr(self.conf, 'lun_write_cache_policy', '5') def _storage_pools(self, xml_root): text = xml_root.findtext('LUN/StoragePool') if not text: msg = _('Storage pool is not configured.') LOG.error(msg) raise exception.InvalidInput(reason=msg) pools = set(x.strip() for x in text.split(';') if x.strip()) if not pools: msg = _('No valid storage pool configured.') LOG.error(msg) raise exception.InvalidInput(msg) setattr(self.conf, 'storage_pools', list(pools)) def _iscsi_info(self, xml_root): iscsi_info = { 'default_target_ips': [], 'CHAPinfo': xml_root.findtext('iSCSI/CHAPinfo'), 'ALUA': xml_root.findtext('iSCSI/ALUA'), 'FAILOVERMODE': xml_root.findtext('iSCSI/FAILOVERMODE'), 'SPECIALMODETYPE': xml_root.findtext('iSCSI/SPECIALMODETYPE'), 'PATHTYPE': xml_root.findtext('iSCSI/PATHTYPE'), } text = xml_root.findtext('iSCSI/DefaultTargetIP') if text: iscsi_info['default_target_ips'] = [ ip.strip() for ip in text.split(';') if ip.strip()] initiators = {} nodes = xml_root.findall('iSCSI/Initiator') for node in nodes or []: if 'Name' not in node.attrib: msg = _('Name must be specified for initiator.') LOG.error(msg) raise exception.InvalidInput(msg) initiators[node.attrib['Name']] = node.attrib iscsi_info['initiators'] = initiators setattr(self.conf, 'iscsi_info', iscsi_info) def _fc_info(self, xml_root): fc_info = { 'ALUA': xml_root.findtext('FC/ALUA'), 'FAILOVERMODE': xml_root.findtext('FC/FAILOVERMODE'), 'SPECIALMODETYPE': xml_root.findtext('FC/SPECIALMODETYPE'), 'PATHTYPE': xml_root.findtext('FC/PATHTYPE'), } initiators = {} nodes = xml_root.findall('FC/Initiator') for node in nodes or []: if 'Name' not in node.attrib: msg = _('Name must be specified for initiator.') LOG.error(msg) raise exception.InvalidInput(msg) initiators[node.attrib['Name']] = node.attrib fc_info['initiators'] = initiators setattr(self.conf, 'fc_info', fc_info) def _parse_remote_initiator_info(self, dev, ini_type): ini_info = {'default_target_ips': []} if dev.get('iscsi_default_target_ip'): ini_info['default_target_ips'] = dev[ 'iscsi_default_target_ip'].split(';') initiators = {} if ini_type in dev: # Analyze initiators configure text, convert to: # [{'Name':'xxx'}, {'Name':'xxx','CHAPinfo':'mm-usr#mm-pwd'}] ini_list = re.split(r'\s', dev[ini_type]) def _convert_one_iscsi_info(ini_text): # get initiator configure attr list attr_list = re.split('[{;}]', ini_text) # get initiator configures ini = {} for attr in attr_list: if not attr: continue pair = attr.split(':', 1) if pair[0] == 'CHAPinfo': value = pair[1].replace('#', ';', 1) else: value = pair[1] ini[pair[0]] = value if 'Name' not in ini: msg = _('Name must be specified for initiator.') LOG.error(msg) raise exception.InvalidInput(msg) return ini for text in ini_list: ini = _convert_one_iscsi_info(text) initiators[ini['Name']] = ini ini_info['initiators'] = initiators return ini_info def _hypermetro_devices(self, xml_root): dev = self.conf.safe_get('hypermetro_device') config = {} if dev: config = { 'san_address': dev['san_address'].split(';'), 'san_user': dev['san_user'], 'san_password': dev['san_password'], 'vstore_name': dev.get('vstore_name'), 'metro_domain': dev['metro_domain'], 'storage_pools': dev['storage_pool'].split(';')[:1], 'iscsi_info': self._parse_remote_initiator_info( dev, 'iscsi_info'), 'fc_info': self._parse_remote_initiator_info( dev, 'fc_info'), } setattr(self.conf, 'hypermetro', config) def _replication_devices(self, xml_root): replication_devs = self.conf.safe_get('replication_device') config = {} if replication_devs: dev = replication_devs[0] config = { 'backend_id': dev['backend_id'], 'san_address': dev['san_address'].split(';'), 'san_user': dev['san_user'], 'san_password': dev['san_password'], 'vstore_name': dev.get('vstore_name'), 'storage_pools': dev['storage_pool'].split(';')[:1], 'iscsi_info': self._parse_remote_initiator_info( dev, 'iscsi_info'), 'fc_info': self._parse_remote_initiator_info( dev, 'fc_info'), } setattr(self.conf, 'replication', config) def _lun_copy_speed(self, xml_root): text = xml_root.findtext('LUN/LUNCopySpeed') if text and text.strip() not in constants.LUN_COPY_SPEED_TYPES: msg = (_("Invalid LUNCopySpeed '%(text)s', LUNCopySpeed must " "be between %(low)s and %(high)s.") % {"text": text, "low": constants.LUN_COPY_SPEED_LOW, "high": constants.LUN_COPY_SPEED_HIGHEST}) LOG.error(msg) raise exception.InvalidInput(reason=msg) if not text: speed = constants.LUN_COPY_SPEED_MEDIUM else: speed = text.strip() setattr(self.conf, 'lun_copy_speed', int(speed)) def _get_local_minimum_fc_initiator(self, xml_root): text = xml_root.findtext('FC/MinOnlineFCInitiator') minimum_fc_initiator = constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE if not text: LOG.info("MinOnlineFCInitiator not set, using default.") setattr(self.conf, 'min_fc_ini_online', minimum_fc_initiator) return text = text.strip() if not text.isdigit(): msg = (_("Invalid FC MinOnlineFCInitiator '%s', " "MinOnlineFCInitiator must be a digit.") % text) LOG.error(msg) raise exception.InvalidInput(reason=msg) minimum_fc_initiator = int(text) setattr(self.conf, 'min_fc_ini_online', minimum_fc_initiator) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/huawei_driver.py0000664000175000017500000005622000000000000024016 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_log import log as logging from oslo_utils import strutils from cinder.common import constants as cinder_constants from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.huawei import common from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import fc_zone_helper from cinder.volume.drivers.huawei import huawei_utils from cinder.volume.drivers.huawei import hypermetro from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @interface.volumedriver class HuaweiISCSIDriver(common.HuaweiBaseDriver, driver.ISCSIDriver): """ISCSI driver for Huawei storage arrays. Version history: .. code-block:: none 1.0.0 - Initial driver 1.1.0 - Provide Huawei OceanStor storage 18000 driver 1.1.1 - Code refactor CHAP support Multiple pools support ISCSI multipath support SmartX support Volume migration support Volume retype support 2.0.0 - Rename to HuaweiISCSIDriver 2.0.1 - Manage/unmanage volume support 2.0.2 - Refactor HuaweiISCSIDriver 2.0.3 - Manage/unmanage snapshot support 2.0.5 - Replication V2 support 2.0.6 - Support iSCSI configuration in Replication 2.0.7 - Hypermetro support Hypermetro consistency group support Consistency group support Cgsnapshot support 2.0.8 - Backup snapshot optimal path support 2.0.9 - Support reporting disk type of pool """ VERSION = "2.0.9" def __init__(self, *args, **kwargs): super(HuaweiISCSIDriver, self).__init__(*args, **kwargs) def get_volume_stats(self, refresh=False): """Get volume status.""" data = self._get_volume_stats(refresh=False) data['storage_protocol'] = cinder_constants.ISCSI data['driver_version'] = self.VERSION return data @coordination.synchronized('huawei-mapping-{connector[host]}') def initialize_connection(self, volume, connector): """Map a volume to a host and return target iSCSI information.""" lun_id, lun_type = self.get_lun_id_and_type(volume) initiator_name = connector['initiator'] LOG.info( 'initiator name: %(initiator_name)s, ' 'LUN ID: %(lun_id)s.', {'initiator_name': initiator_name, 'lun_id': lun_id}) (iscsi_iqns, target_ips, portgroup_id) = self.client.get_iscsi_params(connector) LOG.info('initialize_connection, iscsi_iqn: %(iscsi_iqn)s, ' 'target_ip: %(target_ip)s, ' 'portgroup_id: %(portgroup_id)s.', {'iscsi_iqn': iscsi_iqns, 'target_ip': target_ips, 'portgroup_id': portgroup_id},) # Create hostgroup if not exist. host_id = self.client.add_host_with_check(connector['host']) # Add initiator to the host. self.client.ensure_initiator_added(initiator_name, host_id) hostgroup_id = self.client.add_host_to_hostgroup(host_id) # Mapping lungroup and hostgroup to view. self.client.do_mapping(lun_id, hostgroup_id, host_id, portgroup_id, lun_type) hostlun_id = self.client.get_host_lun_id(host_id, lun_id, lun_type) LOG.info("initialize_connection, host lun id is: %s.", hostlun_id) chapinfo = self.client.find_chap_info(self.client.iscsi_info, initiator_name) # Return iSCSI properties. properties = {} properties['target_discovered'] = False properties['volume_id'] = volume.id multipath = connector.get('multipath', False) hostlun_id = int(hostlun_id) if not multipath: properties['target_portal'] = ('%s:3260' % target_ips[0]) properties['target_iqn'] = iscsi_iqns[0] properties['target_lun'] = hostlun_id else: properties['target_iqns'] = [iqn for iqn in iscsi_iqns] properties['target_portals'] = [ '%s:3260' % ip for ip in target_ips] properties['target_luns'] = [hostlun_id] * len(target_ips) # If use CHAP, return CHAP info. if chapinfo: chap_username, chap_password = chapinfo.split(';') properties['auth_method'] = 'CHAP' properties['auth_username'] = chap_username properties['auth_password'] = chap_password LOG.info("initialize_connection success. Return data: %s.", strutils.mask_password(properties)) return {'driver_volume_type': 'iscsi', 'data': properties} @coordination.synchronized('huawei-mapping-{connector[host]}') def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" lun_id, lun_type = self.get_lun_id_and_type(volume) initiator_name = connector['initiator'] host_name = connector['host'] lungroup_id = None LOG.info( 'terminate_connection: initiator name: %(ini)s, ' 'LUN ID: %(lunid)s.', {'ini': initiator_name, 'lunid': lun_id},) portgroup = None portgroup_id = None view_id = None left_lunnum = -1 ini = self.client.iscsi_info['initiators'].get(initiator_name) if ini and ini.get('TargetPortGroup'): portgroup = ini['TargetPortGroup'] if portgroup: portgroup_id = self.client.get_tgt_port_group(portgroup) host_id = huawei_utils.get_host_id(self.client, host_name) if host_id: mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id view_id = self.client.find_mapping_view(mapping_view_name) if view_id: lungroup_id = self.client.find_lungroup_from_map(view_id) # Remove lun from lungroup. if lun_id and lungroup_id: lungroup_ids = self.client.get_lungroupids_by_lunid( lun_id, lun_type) if lungroup_id in lungroup_ids: self.client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) else: LOG.warning("LUN is not in lungroup. " "LUN ID: %(lun_id)s. " "Lungroup id: %(lungroup_id)s.", {"lun_id": lun_id, "lungroup_id": lungroup_id}) # Remove portgroup from mapping view if no lun left in lungroup. if lungroup_id: left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) if portgroup_id and view_id and (int(left_lunnum) <= 0): if self.client.is_portgroup_associated_to_view(view_id, portgroup_id): self.client.delete_portgroup_mapping_view(view_id, portgroup_id) if view_id and (int(left_lunnum) <= 0): self.client.remove_chap(initiator_name) if self.client.lungroup_associated(view_id, lungroup_id): self.client.delete_lungroup_mapping_view(view_id, lungroup_id) self.client.delete_lungroup(lungroup_id) if self.client.is_initiator_associated_to_host(initiator_name, host_id): self.client.remove_iscsi_from_host(initiator_name) hostgroup_name = constants.HOSTGROUP_PREFIX + host_id hostgroup_id = self.client.find_hostgroup(hostgroup_name) if hostgroup_id: if self.client.hostgroup_associated(view_id, hostgroup_id): self.client.delete_hostgoup_mapping_view(view_id, hostgroup_id) self.client.remove_host_from_hostgroup(hostgroup_id, host_id) self.client.delete_hostgroup(hostgroup_id) self.client.remove_host(host_id) self.client.delete_mapping_view(view_id) @interface.volumedriver class HuaweiFCDriver(common.HuaweiBaseDriver, driver.FibreChannelDriver): """FC driver for Huawei OceanStor storage arrays. Version history: .. code-block:: none 1.0.0 - Initial driver 1.1.0 - Provide Huawei OceanStor 18000 storage volume driver 1.1.1 - Code refactor Multiple pools support SmartX support Volume migration support Volume retype support FC zone enhancement Volume hypermetro support 2.0.0 - Rename to HuaweiFCDriver 2.0.1 - Manage/unmanage volume support 2.0.2 - Refactor HuaweiFCDriver 2.0.3 - Manage/unmanage snapshot support 2.0.4 - Balanced FC port selection 2.0.5 - Replication V2 support 2.0.7 - Hypermetro support Hypermetro consistency group support Consistency group support Cgsnapshot support 2.0.8 - Backup snapshot optimal path support 2.0.9 - Support reporting disk type of pool """ VERSION = "2.0.9" def __init__(self, *args, **kwargs): super(HuaweiFCDriver, self).__init__(*args, **kwargs) self.fcsan = None def get_volume_stats(self, refresh=False): """Get volume status.""" data = self._get_volume_stats(refresh=False) data['storage_protocol'] = cinder_constants.FC data['driver_version'] = self.VERSION return data def _check_fc_links(self, wwns, online_wwns_in_host, online_free_wwns, host_id): wwns_check = [] for wwn in wwns: if wwn in online_wwns_in_host or wwn in online_free_wwns: wwns_check.append(wwn) continue LOG.warning("Can't add FC initiator %(wwn)s to host " "%(host)s, please check if this initiator has" " been added to other host or isn't present " "on array.", {"wwn": wwn, "host": host_id}) if (self.configuration.min_fc_ini_online == constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE): wwns_in_host = ( self.client.get_host_fc_initiators(host_id)) iqns_in_host = ( self.client.get_host_iscsi_initiators(host_id)) if not (wwns_in_host or iqns_in_host or self.client.is_host_associated_to_hostgroup( host_id)): self.client.remove_host(host_id) msg = _("There is an FC initiator in an invalid " "state. If you want to continue to attach " "volume to host, configure MinFCIniOnline " "in the XML file.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if len(wwns_check) < self.configuration.min_fc_ini_online: msg = (_("The number of online FC initiator %(wwns)s less than" " the set number: %(set)s.") % {"wwns": wwns_check, "set": self.configuration.min_fc_ini_online}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return wwns_check @coordination.synchronized('huawei-mapping-{connector[host]}') def initialize_connection(self, volume, connector): lun_id, lun_type = self.get_lun_id_and_type(volume) wwns = connector['wwpns'] LOG.info( 'initialize_connection, initiator: %(wwpns)s,' ' LUN ID: %(lun_id)s.', {'wwpns': wwns, 'lun_id': lun_id},) portg_id = None host_id = self.client.add_host_with_check(connector['host']) if not self.fcsan: self.fcsan = fczm_utils.create_lookup_service() if self.fcsan: # Use FC switch. zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client) try: (tgt_port_wwns, portg_id, init_targ_map) = ( zone_helper.build_ini_targ_map(wwns, host_id, lun_id, lun_type)) except Exception as err: self.remove_host_with_check(host_id) msg = _('build_ini_targ_map fails. %s') % err raise exception.VolumeBackendAPIException(data=msg) for ini in init_targ_map: self.client.ensure_fc_initiator_added(ini, host_id) else: # Not use FC switch. online_wwns_in_host = ( self.client.get_host_online_fc_initiators(host_id)) online_free_wwns = self.client.get_online_free_wwns() fc_initiators_on_array = self.client.get_fc_initiator_on_array() wwns = [i for i in wwns if i in fc_initiators_on_array] wwns = self._check_fc_links( wwns, online_wwns_in_host, online_free_wwns, host_id) for wwn in wwns: if wwn in online_free_wwns: self.client.add_fc_port_to_host(host_id, wwn) (tgt_port_wwns, init_targ_map) = ( self.client.get_init_targ_map(wwns)) # Add host into hostgroup. hostgroup_id = self.client.add_host_to_hostgroup(host_id) metadata = huawei_utils.get_volume_private_data(volume) LOG.info("initialize_connection, metadata is: %s.", metadata) hypermetro_lun = metadata.get('hypermetro_id') is not None map_info = self.client.do_mapping(lun_id, hostgroup_id, host_id, portg_id, lun_type, hypermetro_lun) host_lun_id = self.client.get_host_lun_id(host_id, lun_id, lun_type) # Return FC properties. fc_info = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': int(host_lun_id), 'target_discovered': True, 'target_wwn': tgt_port_wwns, 'volume_id': volume.id, 'initiator_target_map': init_targ_map, 'map_info': map_info}, } # Deal with hypermetro connection. if hypermetro_lun: loc_tgt_wwn = fc_info['data']['target_wwn'] local_ini_tgt_map = fc_info['data']['initiator_target_map'] hyperm = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) rmt_fc_info = hyperm.connect_volume_fc(volume, connector) rmt_tgt_wwn = rmt_fc_info['data']['target_wwn'] rmt_ini_tgt_map = rmt_fc_info['data']['initiator_target_map'] fc_info['data']['target_wwn'] = (loc_tgt_wwn + rmt_tgt_wwn) wwns = connector['wwpns'] for wwn in wwns: if (wwn in local_ini_tgt_map and wwn in rmt_ini_tgt_map): fc_info['data']['initiator_target_map'][wwn].extend( rmt_ini_tgt_map[wwn]) elif (wwn not in local_ini_tgt_map and wwn in rmt_ini_tgt_map): fc_info['data']['initiator_target_map'][wwn] = ( rmt_ini_tgt_map[wwn]) # else, do nothing loc_map_info = fc_info['data']['map_info'] rmt_map_info = rmt_fc_info['data']['map_info'] same_host_id = self._get_same_hostid(loc_map_info, rmt_map_info) self.client.change_hostlun_id(loc_map_info, same_host_id) hyperm.rmt_client.change_hostlun_id(rmt_map_info, same_host_id) fc_info['data']['target_lun'] = same_host_id hyperm.rmt_client.logout() fczm_utils.add_fc_zone(fc_info) LOG.info("Return FC info is: %s.", fc_info) return fc_info def _get_same_hostid(self, loc_fc_info, rmt_fc_info): loc_aval_luns = loc_fc_info['aval_luns'] loc_aval_luns = json.loads(loc_aval_luns) rmt_aval_luns = rmt_fc_info['aval_luns'] rmt_aval_luns = json.loads(rmt_aval_luns) same_host_id = None for i in range(1, 512): if i in rmt_aval_luns and i in loc_aval_luns: same_host_id = i break LOG.info("The same hostid is: %s.", same_host_id) if not same_host_id: msg = _("Can't find the same host id from arrays.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return same_host_id @coordination.synchronized('huawei-mapping-{connector[host]}') def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" lun_id, lun_type = self.get_lun_id_and_type(volume) wwns = connector['wwpns'] host_name = connector['host'] left_lunnum = -1 lungroup_id = None view_id = None LOG.info('terminate_connection: wwpns: %(wwns)s, ' 'LUN ID: %(lun_id)s.', {'wwns': wwns, 'lun_id': lun_id}) host_id = huawei_utils.get_host_id(self.client, host_name) if host_id: mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id view_id = self.client.find_mapping_view(mapping_view_name) if view_id: lungroup_id = self.client.find_lungroup_from_map(view_id) if lun_id and lungroup_id: lungroup_ids = self.client.get_lungroupids_by_lunid(lun_id, lun_type) if lungroup_id in lungroup_ids: self.client.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) else: LOG.warning("LUN is not in lungroup. " "LUN ID: %(lun_id)s. " "Lungroup id: %(lungroup_id)s.", {"lun_id": lun_id, "lungroup_id": lungroup_id}) else: LOG.warning("Can't find lun on the array.") if lungroup_id: left_lunnum = self.client.get_obj_count_from_lungroup(lungroup_id) if int(left_lunnum) > 0: fc_info = {'driver_volume_type': 'fibre_channel', 'data': {}} else: fc_info, portg_id = self._delete_zone_and_remove_fc_initiators( wwns, host_id) if lungroup_id: if view_id and self.client.lungroup_associated( view_id, lungroup_id): self.client.delete_lungroup_mapping_view(view_id, lungroup_id) self.client.delete_lungroup(lungroup_id) if portg_id: if view_id and self.client.is_portgroup_associated_to_view( view_id, portg_id): self.client.delete_portgroup_mapping_view(view_id, portg_id) self.client.delete_portgroup(portg_id) if host_id: hostgroup_name = constants.HOSTGROUP_PREFIX + host_id hostgroup_id = self.client.find_hostgroup(hostgroup_name) if hostgroup_id: if view_id and self.client.hostgroup_associated( view_id, hostgroup_id): self.client.delete_hostgoup_mapping_view( view_id, hostgroup_id) self.client.remove_host_from_hostgroup( hostgroup_id, host_id) self.client.delete_hostgroup(hostgroup_id) if not self.client.check_fc_initiators_exist_in_host( host_id): self.client.remove_host(host_id) if view_id: self.client.delete_mapping_view(view_id) # Deal with hypermetro connection. metadata = huawei_utils.get_volume_private_data(volume) LOG.info("Detach Volume, metadata is: %s.", metadata) if metadata.get('hypermetro_id'): hyperm = hypermetro.HuaweiHyperMetro(self.client, self.rmt_client, self.configuration) hyperm.disconnect_volume_fc(volume, connector) LOG.info("terminate_connection, return data is: %s.", fc_info) # This only does something if and only if the initiator_target_map # exists in fc_info fczm_utils.remove_fc_zone(fc_info) return fc_info def _delete_zone_and_remove_fc_initiators(self, wwns, host_id): # Get tgt_port_wwns and init_targ_map to remove zone. portg_id = None if not self.fcsan: self.fcsan = fczm_utils.create_lookup_service() if self.fcsan: zone_helper = fc_zone_helper.FCZoneHelper(self.fcsan, self.client) (tgt_port_wwns, portg_id, init_targ_map) = ( zone_helper.get_init_targ_map(wwns, host_id)) else: (tgt_port_wwns, init_targ_map) = ( self.client.get_init_targ_map(wwns)) # Remove the initiators from host if need. if host_id: fc_initiators = self.client.get_host_fc_initiators(host_id) for wwn in wwns: if wwn in fc_initiators: self.client.remove_fc_from_host(wwn) info = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}} return info, portg_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/huawei_utils.py0000664000175000017500000003737200000000000023672 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import json import math from oslo_log import log as logging from oslo_utils import strutils from cinder import context from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume.drivers.huawei import constants from cinder.volume import qos_specs from cinder.volume import volume_types LOG = logging.getLogger(__name__) def encode_name(name): encoded_name = hashlib.md5(name.encode('utf-8'), usedforsecurity=False).hexdigest() prefix = name.split('-')[0] + '-' postfix = encoded_name[:constants.MAX_NAME_LENGTH - len(prefix)] return prefix + postfix def old_encode_name(name): pre_name = name.split("-")[0] vol_encoded = str(hash(name)) if vol_encoded.startswith('-'): newuuid = pre_name + vol_encoded else: newuuid = pre_name + '-' + vol_encoded return newuuid def encode_host_name(name): if name and len(name) > constants.MAX_NAME_LENGTH: encoded_name = hashlib.md5(name.encode('utf-8'), usedforsecurity=False).hexdigest() return encoded_name[:constants.MAX_NAME_LENGTH] return name def old_encode_host_name(name): if name and len(name) > constants.MAX_NAME_LENGTH: name = str(hash(name)) return name def wait_for_condition(func, interval, timeout): """Wait for ``func`` to return True. This retries running func until it either returns True or raises an exception. :param func: The function to call. :param interval: The interval to wait in seconds between calls. :param timeout: The maximum time in seconds to wait. """ if interval == 0: interval = 1 if timeout == 0: timeout = 1 @utils.retry(exception.VolumeDriverException, interval=interval, backoff_rate=1, retries=(math.ceil(timeout / interval))) def _retry_call(): result = func() if not result: raise exception.VolumeDriverException( _('Timed out waiting for condition.')) _retry_call() def _get_volume_type(volume): if volume.volume_type: return volume.volume_type if volume.volume_type_id: return volume_types.get_volume_type(None, volume.volume_type_id) def get_volume_params(volume): volume_type = _get_volume_type(volume) return get_volume_type_params(volume_type) def get_volume_type_params(volume_type): specs = {} if isinstance(volume_type, dict) and volume_type.get('extra_specs'): specs = volume_type['extra_specs'] elif isinstance(volume_type, objects.VolumeType ) and volume_type.extra_specs: specs = volume_type.extra_specs vol_params = get_volume_params_from_specs(specs) vol_params['qos'] = None if isinstance(volume_type, dict) and volume_type.get('qos_specs_id'): vol_params['qos'] = _get_qos_specs(volume_type['qos_specs_id']) elif isinstance(volume_type, objects.VolumeType ) and volume_type.qos_specs_id: vol_params['qos'] = _get_qos_specs(volume_type.qos_specs_id) LOG.info('volume opts %s.', vol_params) return vol_params def get_volume_params_from_specs(specs): opts = _get_opts_from_specs(specs) _verify_smartcache_opts(opts) _verify_smartpartition_opts(opts) _verify_smartthin_opts(opts) return opts def _get_opts_from_specs(specs): """Get the well defined extra specs.""" opts = {} def _get_bool_param(k, v): words = v.split() if len(words) == 2 and words[0] == '': return strutils.bool_from_string(words[1], strict=True) msg = _("%(k)s spec must be specified as %(k)s=' True' " "or ' False'.") % {'k': k} LOG.error(msg) raise exception.InvalidInput(reason=msg) def _get_replication_type_param(k, v): words = v.split() if len(words) == 2 and words[0] == '': REPLICA_SYNC_TYPES = {'sync': constants.REPLICA_SYNC_MODEL, 'async': constants.REPLICA_ASYNC_MODEL} sync_type = words[1].lower() if sync_type in REPLICA_SYNC_TYPES: return REPLICA_SYNC_TYPES[sync_type] msg = _("replication_type spec must be specified as " "replication_type=' sync' or ' async'.") LOG.error(msg) raise exception.InvalidInput(reason=msg) def _get_string_param(k, v): if not v: msg = _("%s spec must be specified as a string.") % k LOG.error(msg) raise exception.InvalidInput(reason=msg) return v opts_capability = { 'capabilities:smarttier': (_get_bool_param, False), 'capabilities:smartcache': (_get_bool_param, False), 'capabilities:smartpartition': (_get_bool_param, False), 'capabilities:thin_provisioning_support': (_get_bool_param, False), 'capabilities:thick_provisioning_support': (_get_bool_param, False), 'capabilities:hypermetro': (_get_bool_param, False), 'capabilities:replication_enabled': (_get_bool_param, False), 'replication_type': (_get_replication_type_param, constants.REPLICA_ASYNC_MODEL), 'smarttier:policy': (_get_string_param, None), 'smartcache:cachename': (_get_string_param, None), 'smartpartition:partitionname': (_get_string_param, None), 'huawei_controller:controllername': (_get_string_param, None), 'capabilities:dedup': (_get_bool_param, None), 'capabilities:compression': (_get_bool_param, None), } def _get_opt_key(spec_key): key_split = spec_key.split(':') if len(key_split) == 1: return key_split[0] else: return key_split[1] for spec_key in opts_capability: opt_key = _get_opt_key(spec_key) opts[opt_key] = opts_capability[spec_key][1] for key, value in specs.items(): if key not in opts_capability: continue func = opts_capability[key][0] opt_key = _get_opt_key(key) opts[opt_key] = func(key, value) return opts def _get_qos_specs(qos_specs_id): ctxt = context.get_admin_context() specs = qos_specs.get_qos_specs(ctxt, qos_specs_id) if specs is None: return {} if specs.get('consumer') == 'front-end': return {} kvs = specs.get('specs', {}) LOG.info('The QoS specs is: %s.', kvs) qos = {'IOTYPE': kvs.pop('IOType', None)} if qos['IOTYPE'] not in constants.QOS_IOTYPES: msg = _('IOType must be in %(types)s.' ) % {'types': constants.QOS_IOTYPES} LOG.error(msg) raise exception.InvalidInput(reason=msg) for k, v in kvs.items(): if k not in constants.QOS_SPEC_KEYS: msg = _('QoS key %s is not valid.') % k LOG.error(msg) raise exception.InvalidInput(reason=msg) if int(v) <= 0: msg = _('QoS value for %s must > 0.') % k LOG.error(msg) raise exception.InvalidInput(reason=msg) qos[k.upper()] = v if len(qos) < 2: msg = _('QoS policy must specify both IOType and one another ' 'qos spec, got policy: %s.') % qos LOG.error(msg) raise exception.InvalidInput(reason=msg) qos_keys = set(qos.keys()) if (qos_keys & set(constants.UPPER_LIMIT_KEYS) and qos_keys & set(constants.LOWER_LIMIT_KEYS)): msg = _('QoS policy upper limit and lower limit ' 'conflict, QoS policy: %s.') % qos LOG.error(msg) raise exception.InvalidInput(reason=msg) return qos def _verify_smartthin_opts(opts): if (opts['thin_provisioning_support'] and opts['thick_provisioning_support']): msg = _('Cannot set thin and thick at the same time.') LOG.error(msg) raise exception.InvalidInput(reason=msg) elif opts['thin_provisioning_support']: opts['LUNType'] = constants.THIN_LUNTYPE elif opts['thick_provisioning_support']: opts['LUNType'] = constants.THICK_LUNTYPE def _verify_smartcache_opts(opts): if opts['smartcache'] and not opts['cachename']: msg = _('Cache name is not specified, please set ' 'smartcache:cachename in extra specs.') LOG.error(msg) raise exception.InvalidInput(reason=msg) def _verify_smartpartition_opts(opts): if opts['smartpartition'] and not opts['partitionname']: msg = _('Partition name is not specified, please set ' 'smartpartition:partitionname in extra specs.') LOG.error(msg) raise exception.InvalidInput(reason=msg) def wait_lun_online(client, lun_id, wait_interval=None, wait_timeout=None): def _lun_online(): result = client.get_lun_info_by_id(lun_id) if result['HEALTHSTATUS'] != constants.STATUS_HEALTH: err_msg = _('LUN %s is abnormal.') % lun_id LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) if result['RUNNINGSTATUS'] == constants.LUN_INITIALIZING: return False return True if not wait_interval: wait_interval = constants.DEFAULT_WAIT_INTERVAL if not wait_timeout: wait_timeout = wait_interval * 10 wait_for_condition(_lun_online, wait_interval, wait_timeout) def is_not_exist_exc(exc): msg = getattr(exc, 'msg', '') return 'not exist' in msg def to_string(**kwargs): return json.dumps(kwargs) if kwargs else '' def to_dict(text): return json.loads(text) if text else {} def get_volume_private_data(volume): if not volume.provider_location: return {} try: info = json.loads(volume.provider_location) except Exception: LOG.exception("Decode volume provider_location error") return {} if isinstance(info, dict): return info # To keep compatible with old driver version return {'huawei_lun_id': str(info), 'huawei_lun_wwn': volume.admin_metadata.get('huawei_lun_wwn'), 'huawei_sn': volume.metadata.get('huawei_sn'), 'hypermetro_id': volume.metadata.get('hypermetro_id'), 'remote_lun_id': volume.metadata.get('remote_lun_id') } def get_volume_metadata(volume): if isinstance(volume, objects.Volume): return volume.metadata if volume.get('volume_metadata'): return {item['key']: item['value'] for item in volume['volume_metadata']} return {} def get_replication_data(volume): if not volume.replication_driver_data: return {} return json.loads(volume.replication_driver_data) def get_snapshot_private_data(snapshot): if not snapshot.provider_location: return {} info = json.loads(snapshot.provider_location) if isinstance(info, dict): return info # To keep compatible with old driver version return {'huawei_snapshot_id': str(info), 'huawei_snapshot_wwn': snapshot.metadata.get( 'huawei_snapshot_wwn'), } def get_external_lun_info(client, external_ref): lun_info = None if 'source-id' in external_ref: lun = client.get_lun_info_by_id(external_ref['source-id']) lun_info = client.get_lun_info_by_name(lun['NAME']) elif 'source-name' in external_ref: lun_info = client.get_lun_info_by_name(external_ref['source-name']) return lun_info def get_external_snapshot_info(client, external_ref): snapshot_info = None if 'source-id' in external_ref: snapshot_info = client.get_snapshot_info_by_id( external_ref['source-id']) elif 'source-name' in external_ref: snapshot_info = client.get_snapshot_info_by_name( external_ref['source-name']) return snapshot_info def get_lun_info(client, volume): metadata = get_volume_private_data(volume) volume_name = encode_name(volume.id) lun_info = client.get_lun_info_by_name(volume_name) # If new encoded way not found, try the old encoded way. if not lun_info: volume_name = old_encode_name(volume.id) lun_info = client.get_lun_info_by_name(volume_name) if not lun_info and metadata.get('huawei_lun_id'): lun_info = client.get_lun_info_by_id(metadata['huawei_lun_id']) if lun_info and ('huawei_lun_wwn' in metadata and lun_info.get('WWN') != metadata['huawei_lun_wwn']): return None return lun_info def get_snapshot_info(client, snapshot): name = encode_name(snapshot.id) snapshot_info = client.get_snapshot_info_by_name(name) # If new encoded way not found, try the old encoded way. if not snapshot_info: name = old_encode_name(snapshot.id) snapshot_info = client.get_snapshot_info_by_name(name) return snapshot_info def get_host_id(client, host_name): encoded_name = encode_host_name(host_name) host_id = client.get_host_id_by_name(encoded_name) if encoded_name == host_name: return host_id if not host_id: encoded_name = old_encode_host_name(host_name) host_id = client.get_host_id_by_name(encoded_name) return host_id def get_hypermetro_group(client, group_id): encoded_name = encode_name(group_id) group = client.get_metrogroup_by_name(encoded_name) if not group: encoded_name = old_encode_name(group_id) group = client.get_metrogroup_by_name(encoded_name) return group def get_replication_group(client, group_id): encoded_name = encode_name(group_id) group = client.get_replication_group_by_name(encoded_name) if not group: encoded_name = old_encode_name(group_id) group = client.get_replication_group_by_name(encoded_name) return group def get_volume_model_update(volume, **kwargs): private_data = get_volume_private_data(volume) if kwargs.get('hypermetro_id'): private_data['hypermetro_id'] = kwargs.get('hypermetro_id') elif 'hypermetro_id' in private_data: private_data.pop('hypermetro_id') if 'huawei_lun_id' in kwargs: private_data['huawei_lun_id'] = kwargs['huawei_lun_id'] if 'huawei_lun_wwn' in kwargs: private_data['huawei_lun_wwn'] = kwargs['huawei_lun_wwn'] if 'huawei_sn' in kwargs: private_data['huawei_sn'] = kwargs['huawei_sn'] model_update = {'provider_location': to_string(**private_data)} if kwargs.get('replication_id'): model_update['replication_driver_data'] = to_string( pair_id=kwargs.get('replication_id')) model_update['replication_status'] = fields.ReplicationStatus.ENABLED else: model_update['replication_driver_data'] = None model_update['replication_status'] = fields.ReplicationStatus.DISABLED return model_update def get_group_type_params(group): opts = [] for volume_type in group.volume_types: opt = get_volume_type_params(volume_type) opts.append(opt) return opts def is_support_clone_pair(client): array_info = client.get_array_info() version_info = array_info['PRODUCTVERSION'] if version_info >= constants.SUPPORT_CLONE_PAIR_VERSION: return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/hypermetro.py0000664000175000017500000003477000000000000023365 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import huawei_utils LOG = logging.getLogger(__name__) class HuaweiHyperMetro(object): def __init__(self, client, rmt_client, configuration): self.client = client self.rmt_client = rmt_client self.configuration = configuration def create_hypermetro(self, local_lun_id, lun_params): """Create hypermetro.""" try: # Get the remote pool info. config_pool = self.configuration.metro_storage_pools remote_pool = self.rmt_client.get_all_pools() pool = self.rmt_client.get_pool_info(config_pool, remote_pool) if not pool: err_msg = _("Remote pool cannot be found.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) # Create remote lun. lun_params['PARENTID'] = pool['ID'] remotelun_info = self.rmt_client.create_lun(lun_params) remote_lun_id = remotelun_info['ID'] # Get hypermetro domain. try: domain_name = self.configuration.metro_domain_name domain_id = self.rmt_client.get_hyper_domain_id(domain_name) self._wait_volume_ready(remote_lun_id) hypermetro = self._create_hypermetro_pair(domain_id, local_lun_id, remote_lun_id) LOG.info("Hypermetro id: %(metro_id)s. " "Remote lun id: %(remote_lun_id)s.", {'metro_id': hypermetro['ID'], 'remote_lun_id': remote_lun_id}) return {'hypermetro_id': hypermetro['ID'], 'remote_lun_id': remote_lun_id} except exception.VolumeBackendAPIException as err: self.rmt_client.delete_lun(remote_lun_id) msg = _('Create hypermetro error. %s.') % err raise exception.VolumeBackendAPIException(data=msg) except exception.VolumeBackendAPIException: raise def delete_hypermetro(self, volume): """Delete hypermetro.""" metadata = huawei_utils.get_volume_private_data(volume) metro_id = metadata['hypermetro_id'] remote_lun_id = metadata['remote_lun_id'] if metro_id: self.check_metro_need_to_stop(volume) # Delete hypermetro self.client.delete_hypermetro(metro_id) # Delete remote lun. if remote_lun_id and self.rmt_client.check_lun_exist(remote_lun_id): self.rmt_client.delete_lun(remote_lun_id) def _create_hypermetro_pair(self, domain_id, lun_id, remote_lun_id): """Create a HyperMetroPair.""" hcp_param = {"DOMAINID": domain_id, "HCRESOURCETYPE": '1', "ISFIRSTSYNC": False, "LOCALOBJID": lun_id, "RECOVERYPOLICY": '1', "REMOTEOBJID": remote_lun_id, "SPEED": '2'} return self.client.create_hypermetro(hcp_param) def connect_volume_fc(self, volume, connector): """Create map between a volume and a host for FC.""" wwns = connector['wwpns'] LOG.info( 'initialize_connection_fc, initiator: %(wwpns)s, ' 'volume id: %(id)s.', {'wwpns': wwns, 'id': volume.id}) metadata = huawei_utils.get_volume_private_data(volume) lun_id = metadata.get('remote_lun_id') if lun_id is None: msg = _("Can't get volume id. Volume name: %s.") % volume.id LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) original_host_name = connector['host'] host_id = self.client.add_host_with_check(original_host_name) # Create hostgroup if not exist. host_id = self.rmt_client.add_host_with_check(original_host_name) online_wwns_in_host = ( self.rmt_client.get_host_online_fc_initiators(host_id)) online_free_wwns = self.rmt_client.get_online_free_wwns() fc_initiators_on_array = self.rmt_client.get_fc_initiator_on_array() wwns = [i for i in wwns if i in fc_initiators_on_array] for wwn in wwns: if (wwn not in online_wwns_in_host and wwn not in online_free_wwns): wwns_in_host = ( self.rmt_client.get_host_fc_initiators(host_id)) iqns_in_host = ( self.rmt_client.get_host_iscsi_initiators(host_id)) if not (wwns_in_host or iqns_in_host): self.rmt_client.remove_host(host_id) msg = _('Can not add FC port to host.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for wwn in wwns: if wwn in online_free_wwns: self.rmt_client.add_fc_port_to_host(host_id, wwn) (tgt_port_wwns, init_targ_map) = ( self.rmt_client.get_init_targ_map(wwns)) # Add host into hostgroup. hostgroup_id = self.rmt_client.add_host_to_hostgroup(host_id) map_info = self.rmt_client.do_mapping(lun_id, hostgroup_id, host_id, hypermetro_lun=True) if not map_info: msg = _('Map info is None due to array version ' 'not supporting hypermetro.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) host_lun_id = self.rmt_client.get_host_lun_id(host_id, lun_id) # Return FC properties. fc_info = {'driver_volume_type': 'fibre_channel', 'data': {'target_lun': int(host_lun_id), 'target_discovered': True, 'target_wwn': tgt_port_wwns, 'volume_id': volume.id, 'initiator_target_map': init_targ_map, 'map_info': map_info}, } LOG.info('Remote return FC info is: %s.', fc_info) return fc_info def disconnect_volume_fc(self, volume, connector): """Delete map between a volume and a host for FC.""" wwns = connector['wwpns'] metadata = huawei_utils.get_volume_private_data(volume) lun_id = metadata.get('remote_lun_id') host_name = connector['host'] left_lunnum = -1 lungroup_id = None view_id = None LOG.info('terminate_connection_fc: volume: %(id)s, ' 'wwpns: %(wwns)s, ' 'lun_id: %(lunid)s.', {'id': volume.id, 'wwns': wwns, 'lunid': lun_id},) hostid = huawei_utils.get_host_id(self.rmt_client, host_name) if hostid: mapping_view_name = constants.MAPPING_VIEW_PREFIX + hostid view_id = self.rmt_client.find_mapping_view( mapping_view_name) if view_id: lungroup_id = self.rmt_client.find_lungroup_from_map( view_id) if lun_id and self.rmt_client.check_lun_exist(lun_id): if lungroup_id: lungroup_ids = self.rmt_client.get_lungroupids_by_lunid( lun_id) if lungroup_id in lungroup_ids: self.rmt_client.remove_lun_from_lungroup( lungroup_id, lun_id) else: LOG.warning("Lun is not in lungroup. " "Lun id: %(lun_id)s, " "lungroup id: %(lungroup_id)s", {"lun_id": lun_id, "lungroup_id": lungroup_id}) (tgt_port_wwns, init_targ_map) = ( self.rmt_client.get_init_targ_map(wwns)) hostid = huawei_utils.get_host_id(self.rmt_client, host_name) if hostid: mapping_view_name = constants.MAPPING_VIEW_PREFIX + hostid view_id = self.rmt_client.find_mapping_view( mapping_view_name) if view_id: lungroup_id = self.rmt_client.find_lungroup_from_map( view_id) if lungroup_id: left_lunnum = self.rmt_client.get_obj_count_from_lungroup( lungroup_id) if int(left_lunnum) > 0: info = {'driver_volume_type': 'fibre_channel', 'data': {}} else: info = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': tgt_port_wwns, 'initiator_target_map': init_targ_map}, } return info def _wait_volume_ready(self, lun_id): wait_interval = self.configuration.lun_ready_wait_interval def _volume_ready(): result = self.rmt_client.get_lun_info(lun_id) if (result['HEALTHSTATUS'] == constants.STATUS_HEALTH and result['RUNNINGSTATUS'] == constants.STATUS_VOLUME_READY): return True return False huawei_utils.wait_for_condition(_volume_ready, wait_interval, wait_interval * 10) def retype(self, volume, new_type): return False def get_hypermetro_stats(self, hypermetro_id): pass def create_consistencygroup(self, group): LOG.info("Create Consistency Group: %(group)s.", {'group': group.id}) group_name = huawei_utils.encode_name(group.id) domain_name = self.configuration.metro_domain_name domain_id = self.client.get_hyper_domain_id(domain_name) if not domain_name or not domain_id: msg = _("The domain_name config in cinder.conf is wrong.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self.client.create_metrogroup(group_name, group.id, domain_id) def delete_consistencygroup(self, context, group, volumes): LOG.info("Delete Consistency Group: %(group)s.", {'group': group.id}) model_update = {} volumes_model_update = [] model_update['status'] = fields.GroupStatus.DELETED metrogroup_id = self.check_consistencygroup_need_to_stop(group) if metrogroup_id: self.client.delete_metrogroup(metrogroup_id) # Deal with the return volumes info for volume_ref in volumes: volume_update = {'id': volume_ref.id} volume_update['status'] = 'deleted' volumes_model_update.append(volume_update) return model_update, volumes_model_update def update_consistencygroup(self, context, group, add_volumes, remove_volumes): LOG.info("Update Consistency Group: %(group)s. " "This adds or removes volumes from a CG.", {'group': group.id}) metrogroup_id = self.check_consistencygroup_need_to_stop(group) if metrogroup_id: # Deal with add volumes to CG for volume in add_volumes: metro_id = self.check_metro_need_to_stop(volume) self.client.add_metro_to_metrogroup(metrogroup_id, metro_id) # Deal with remove volumes from CG for volume in remove_volumes: metro_id = self.check_metro_need_to_stop(volume) self.client.remove_metro_from_metrogroup(metrogroup_id, metro_id) self.client.sync_hypermetro(metro_id) new_group_info = self.client.get_metrogroup_by_id(metrogroup_id) is_empty = new_group_info["ISEMPTY"] if is_empty == 'false': self.client.sync_metrogroup(metrogroup_id) # if CG not exist on array else: msg = _("The CG does not exist on array.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def check_metro_need_to_stop(self, volume): metadata = huawei_utils.get_volume_private_data(volume) metro_id = metadata['hypermetro_id'] metro_existed = self.client.check_hypermetro_exist(metro_id) if metro_existed: metro_info = self.client.get_hypermetro_by_id(metro_id) metro_health_status = metro_info['HEALTHSTATUS'] metro_running_status = metro_info['RUNNINGSTATUS'] if (metro_health_status == constants.HEALTH_NORMAL and (metro_running_status == constants.RUNNING_NORMAL or metro_running_status == constants.RUNNING_SYNC)): self.client.stop_hypermetro(metro_id) return metro_id def _get_metro_group_id(self, id): group_name = huawei_utils.encode_name(id) metrogroup_id = self.client.get_metrogroup_by_name(group_name) if not metrogroup_id: group_name = huawei_utils.old_encode_name(id) metrogroup_id = self.client.get_metrogroup_by_name(group_name) return metrogroup_id def check_consistencygroup_need_to_stop(self, group): metrogroup_id = self._get_metro_group_id(group.id) if metrogroup_id: metrogroup_info = self.client.get_metrogroup_by_id(metrogroup_id) health_status = metrogroup_info['HEALTHSTATUS'] running_status = metrogroup_info['RUNNINGSTATUS'] if (health_status == constants.HEALTH_NORMAL and (running_status == constants.RUNNING_NORMAL or running_status == constants.RUNNING_SYNC)): self.client.stop_metrogroup(metrogroup_id) return metrogroup_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/replication.py0000664000175000017500000005577200000000000023505 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import json from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import huawei_utils LOG = logging.getLogger(__name__) class AbsReplicaOp(object): def __init__(self, client): self.client = client def create(self, **kwargs): pass def delete(self, replica_id): pass def protect_second(self, replica_id): pass def unprotect_second(self, replica_id): pass def sync(self, replica_id): pass def split(self, replica_id): pass def switch(self, replica_id): pass def is_primary(self, replica_info): flag = replica_info.get('ISPRIMARY') if flag and flag.lower() == 'true': return True return False def get_replica_info(self, replica_id): return {} def _is_status(self, status_key, status, replica_info): if type(status) in (list, tuple): return replica_info.get(status_key, '') in status if type(status) is str: return replica_info.get(status_key, '') == status return False def is_running_status(self, status, replica_info): return self._is_status(constants.REPLICA_RUNNING_STATUS_KEY, status, replica_info) def is_health_status(self, status, replica_info): return self._is_status(constants.REPLICA_HEALTH_STATUS_KEY, status, replica_info) class PairOp(AbsReplicaOp): def create(self, local_lun_id, rmt_lun_id, rmt_dev_id, rmt_dev_name, replica_model, speed=constants.REPLICA_SPEED, period=constants.REPLICA_PERIOD, **kwargs): super(PairOp, self).create(**kwargs) params = { "LOCALRESID": local_lun_id, "LOCALRESTYPE": '11', "REMOTEDEVICEID": rmt_dev_id, "REMOTEDEVICENAME": rmt_dev_name, "REMOTERESID": rmt_lun_id, "REPLICATIONMODEL": replica_model, # recovery policy. 1: auto, 2: manual "RECOVERYPOLICY": '1', "SPEED": speed, } if replica_model == constants.REPLICA_ASYNC_MODEL: # Synchronize type values: # 1, manual # 2, timed wait when synchronization begins # 3, timed wait when synchronization ends params['SYNCHRONIZETYPE'] = '2' params['TIMINGVAL'] = period try: pair_info = self.client.create_pair(params) except Exception as err: msg = _('Create replication pair failed. Error: %s.') % err LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return pair_info def split(self, pair_id): self.client.split_pair(pair_id) def delete(self, pair_id, force=False): self.client.delete_pair(pair_id, force) def protect_second(self, pair_id): self.client.set_pair_second_access(pair_id, constants.REPLICA_SECOND_RO) def unprotect_second(self, pair_id): self.client.set_pair_second_access(pair_id, constants.REPLICA_SECOND_RW) def sync(self, pair_id): self.client.sync_pair(pair_id) def switch(self, pair_id): self.client.switch_pair(pair_id) def get_replica_info(self, pair_id): return self.client.get_pair_by_id(pair_id) class CGOp(AbsReplicaOp): pass class ReplicaCommonDriver(object): def __init__(self, conf, replica_op): self.conf = conf self.op = replica_op def protect_second(self, replica_id): info = self.op.get_replica_info(replica_id) if info.get('SECRESACCESS') == constants.REPLICA_SECOND_RO: return self.op.protect_second(replica_id) self.wait_second_access(replica_id, constants.REPLICA_SECOND_RO) def unprotect_second(self, replica_id): info = self.op.get_replica_info(replica_id) if info.get('SECRESACCESS') == constants.REPLICA_SECOND_RW: return self.op.unprotect_second(replica_id) self.wait_second_access(replica_id, constants.REPLICA_SECOND_RW) def sync(self, replica_id, wait_complete=False): self.protect_second(replica_id) expect_status = (constants.REPLICA_RUNNING_STATUS_NORMAL, constants.REPLICA_RUNNING_STATUS_SYNC, constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) info = self.op.get_replica_info(replica_id) # When running status is synchronizing or normal, # it's not necessary to do synchronize again. if (info.get('REPLICATIONMODEL') == constants.REPLICA_SYNC_MODEL and self.op.is_running_status(expect_status, info)): return self.op.sync(replica_id) self.wait_expect_state(replica_id, expect_status) if wait_complete: self.wait_replica_ready(replica_id) def split(self, replica_id): running_status = (constants.REPLICA_RUNNING_STATUS_SPLIT, constants.REPLICA_RUNNING_STATUS_INVALID, constants.REPLICA_RUNNING_STATUS_ERRUPTED) info = self.op.get_replica_info(replica_id) if self.op.is_running_status(running_status, info): return try: self.op.split(replica_id) except Exception as err: LOG.warning('Split replication exception: %s.', err) try: self.wait_expect_state(replica_id, running_status) except Exception: msg = _('Split replication failed.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def enable(self, replica_id, wait_sync_complete=False): info = self.op.get_replica_info(replica_id) if not self.op.is_primary(info): self.switch(replica_id) self.sync(replica_id) return None def switch(self, replica_id): self.split(replica_id) self.unprotect_second(replica_id) self.op.switch(replica_id) # Wait to be primary def _wait_switch_to_primary(): info = self.op.get_replica_info(replica_id) if self.op.is_primary(info): return True return False interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT huawei_utils.wait_for_condition(_wait_switch_to_primary, interval, timeout) def failover(self, replica_id): """Failover replication. Purpose: 1. Split replication. 2. Set secondary access read & write. """ info = self.op.get_replica_info(replica_id) if self.op.is_primary(info): msg = _('We should not do switch over on primary array.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) sync_status_set = (constants.REPLICA_RUNNING_STATUS_SYNC, constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) if self.op.is_running_status(sync_status_set, info): self.wait_replica_ready(replica_id) self.split(replica_id) self.op.unprotect_second(replica_id) def wait_replica_ready(self, replica_id, interval=None, timeout=None): LOG.debug('Wait synchronize complete.') running_status_normal = (constants.REPLICA_RUNNING_STATUS_NORMAL, constants.REPLICA_RUNNING_STATUS_SYNCED) running_status_sync = (constants.REPLICA_RUNNING_STATUS_SYNC, constants.REPLICA_RUNNING_STATUS_INITIAL_SYNC) health_status_normal = constants.REPLICA_HEALTH_STATUS_NORMAL def _replica_ready(): info = self.op.get_replica_info(replica_id) if (self.op.is_running_status(running_status_normal, info) and self.op.is_health_status(health_status_normal, info)): return True if not self.op.is_running_status(running_status_sync, info): msg = (_('Wait synchronize failed. Running status: %s.') % info.get(constants.REPLICA_RUNNING_STATUS_KEY)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return False if not interval: interval = constants.DEFAULT_WAIT_INTERVAL if not timeout: timeout = constants.DEFAULT_WAIT_TIMEOUT huawei_utils.wait_for_condition(_replica_ready, interval, timeout) def wait_second_access(self, replica_id, access_level): def _check_access(): info = self.op.get_replica_info(replica_id) if info.get('SECRESACCESS') == access_level: return True return False interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT huawei_utils.wait_for_condition(_check_access, interval, timeout) def wait_expect_state(self, replica_id, running_status, health_status=None, interval=None, timeout=None): def _check_state(): info = self.op.get_replica_info(replica_id) if self.op.is_running_status(running_status, info): if (not health_status or self.op.is_health_status(health_status, info)): return True return False if not interval: interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL if not timeout: timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT huawei_utils.wait_for_condition(_check_state, interval, timeout) def get_replication_driver_data(volume): if volume.replication_driver_data: return json.loads(volume.replication_driver_data) return {} def to_string(dict_data): if dict_data: return json.dumps(dict_data) return '' class ReplicaPairManager(object): def __init__(self, local_client, rmt_client, conf): self.local_client = local_client self.rmt_client = rmt_client self.conf = conf # Now just support one remote pool. self.rmt_pool = self.rmt_client.storage_pools[0] self.local_op = PairOp(self.local_client) self.local_driver = ReplicaCommonDriver(self.conf, self.local_op) self.rmt_op = PairOp(self.rmt_client) self.rmt_driver = ReplicaCommonDriver(self.conf, self.rmt_op) def try_get_remote_wwn(self): try: info = self.rmt_client.get_array_info() return info.get('wwn') except Exception as err: LOG.warning('Get remote array wwn failed. Error: %s.', err) return None def get_remote_device_by_wwn(self, wwn): devices = {} try: devices = self.local_client.get_remote_devices() except Exception as err: LOG.warning('Get remote devices failed. Error: %s.', err) for device in devices: if device.get('WWN') == wwn: return device return {} def check_remote_available(self): # We get device wwn in every check time. # If remote array changed, we can run normally. wwn = self.try_get_remote_wwn() if not wwn: return False device = self.get_remote_device_by_wwn(wwn) # Check remote device is available to use. # If array type is replication, 'ARRAYTYPE' == '1'. # If health status is normal, 'HEALTHSTATUS' == '1'. if (device and device.get('ARRAYTYPE') == '1' and device.get('HEALTHSTATUS') == '1' and device.get('RUNNINGSTATUS') == constants.STATUS_RUNNING): return True return False def update_replica_capability(self, stats): is_rmt_dev_available = self.check_remote_available() if not is_rmt_dev_available: LOG.warning('Remote device is unavailable.') return stats for pool in stats['pools']: pool['replication_enabled'] = True pool['replication_type'] = ['sync', 'async'] return stats def get_rmt_dev_info(self): wwn = self.try_get_remote_wwn() if not wwn: return None, None device = self.get_remote_device_by_wwn(wwn) if not device: return None, None return device.get('ID'), device.get('NAME') def build_rmt_lun_params(self, local_lun_info): params = { 'TYPE': '11', 'NAME': local_lun_info['NAME'], 'PARENTTYPE': '216', 'PARENTID': self.rmt_client.get_pool_id(self.rmt_pool), 'DESCRIPTION': local_lun_info['DESCRIPTION'], 'ALLOCTYPE': local_lun_info['ALLOCTYPE'], 'CAPACITY': local_lun_info['CAPACITY'], 'READCACHEPOLICY': self.conf.lun_read_cache_policy, 'WRITECACHEPOLICY': self.conf.lun_write_cache_policy, } if 'WRITEPOLICY' in local_lun_info: params['WRITEPOLICY'] = local_lun_info['WRITEPOLICY'] if 'PREFETCHPOLICY' in local_lun_info: params['PREFETCHPOLICY'] = local_lun_info['PREFETCHPOLICY'] if 'PREFETCHVALUE' in local_lun_info: params['PREFETCHVALUE'] = local_lun_info['PREFETCHVALUE'] if 'DATATRANSFERPOLICY' in local_lun_info: params['DATATRANSFERPOLICY'] = local_lun_info['DATATRANSFERPOLICY'] LOG.debug('Remote lun params: %s.', params) return params def wait_volume_online(self, client, lun_info, interval=None, timeout=None): online_status = constants.STATUS_VOLUME_READY if lun_info.get('RUNNINGSTATUS') == online_status: return lun_id = lun_info['ID'] def _wait_online(): info = client.get_lun_info(lun_id) return info.get('RUNNINGSTATUS') == online_status if not interval: interval = constants.DEFAULT_REPLICA_WAIT_INTERVAL if not timeout: timeout = constants.DEFAULT_REPLICA_WAIT_TIMEOUT huawei_utils.wait_for_condition(_wait_online, interval, timeout) def create_rmt_lun(self, local_lun_info): # Create on rmt array. If failed, raise exception. lun_params = self.build_rmt_lun_params(local_lun_info) lun_info = self.rmt_client.create_lun(lun_params) try: self.wait_volume_online(self.rmt_client, lun_info) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self.rmt_client.delete_lun(lun_info['ID']) return lun_info def create_replica(self, local_lun_info, replica_model): """Create remote LUN and replication pair. Purpose: 1. create remote lun 2. create replication pair 3. enable replication pair """ LOG.debug(('Create replication, local lun info: %(info)s, ' 'replication model: %(model)s.'), {'info': local_lun_info, 'model': replica_model}) local_lun_id = local_lun_info['ID'] self.wait_volume_online(self.local_client, local_lun_info) # step1, create remote lun rmt_lun_info = self.create_rmt_lun(local_lun_info) rmt_lun_id = rmt_lun_info['ID'] # step2, get remote device info rmt_dev_id, rmt_dev_name = self.get_rmt_dev_info() if not rmt_lun_id or not rmt_dev_name: self._delete_rmt_lun(rmt_lun_id) msg = _('Get remote device info failed.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # step3, create replication pair try: pair_info = self.local_op.create(local_lun_id, rmt_lun_id, rmt_dev_id, rmt_dev_name, replica_model) pair_id = pair_info['ID'] except Exception as err: with excutils.save_and_reraise_exception(): LOG.error('Create pair failed. Error: %s.', err) self._delete_rmt_lun(rmt_lun_id) # step4, start sync manually. If replication type is sync, # then wait for sync complete. wait_complete = (replica_model == constants.REPLICA_SYNC_MODEL) try: self.local_driver.sync(pair_id, wait_complete) except Exception as err: with excutils.save_and_reraise_exception(): LOG.error('Start synchronization failed. Error: %s.', err) self._delete_pair(pair_id) self._delete_rmt_lun(rmt_lun_id) model_update = {} driver_data = {'pair_id': pair_id, 'rmt_lun_id': rmt_lun_id, 'rmt_lun_wwn': rmt_lun_info['WWN']} model_update['replication_driver_data'] = to_string(driver_data) model_update['replication_status'] = 'available' LOG.debug('Create replication, return info: %s.', model_update) return model_update def _delete_pair(self, pair_id): if (not pair_id or not self.local_client.check_pair_exist(pair_id)): return self.local_driver.split(pair_id) self.local_op.delete(pair_id) def _delete_rmt_lun(self, lun_id): if lun_id and self.rmt_client.check_lun_exist(lun_id): self.rmt_client.delete_lun(lun_id) def delete_replica(self, volume): """Delete replication pair and remote lun. Purpose: 1. delete replication pair 2. delete remote_lun """ LOG.debug('Delete replication, volume: %s.', volume.id) info = get_replication_driver_data(volume) pair_id = info.get('pair_id') if pair_id: self._delete_pair(pair_id) # Delete remote_lun rmt_lun_id = info.get('rmt_lun_id') if rmt_lun_id: self._delete_rmt_lun(rmt_lun_id) def failback(self, volumes): """Failover volumes back to primary backend. The main steps: 1. Switch the role of replication pairs. 2. Copy the second LUN data back to primary LUN. 3. Split replication pairs. 4. Switch the role of replication pairs. 5. Enable replications. """ volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v.id drv_data = get_replication_driver_data(v) pair_id = drv_data.get('pair_id') if not pair_id: LOG.warning("No pair id in volume %s.", v.id) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue rmt_lun_id = drv_data.get('rmt_lun_id') if not rmt_lun_id: LOG.warning("No remote lun id in volume %s.", v.id) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue # Switch replication pair role, and start synchronize. self.local_driver.enable(pair_id) # Wait for synchronize complete. self.local_driver.wait_replica_ready(pair_id) # Split replication pair again self.rmt_driver.failover(pair_id) # Switch replication pair role, and start synchronize. self.rmt_driver.enable(pair_id) local_metadata = huawei_utils.get_volume_private_data(v) new_drv_data = to_string( {'pair_id': pair_id, 'rmt_lun_id': local_metadata.get('huawei_lun_id'), 'rmt_lun_wwn': local_metadata.get('huawei_lun_wwn')}) location = huawei_utils.to_string( huawei_lun_id=rmt_lun_id, huawei_lun_wwn=drv_data.get('rmt_lun_wwn')) v_update['updates'] = {'provider_location': location, 'replication_status': 'available', 'replication_driver_data': new_drv_data} volumes_update.append(v_update) return volumes_update def failover(self, volumes): """Failover volumes back to secondary array. Split the replication pairs and make the secondary LUNs R&W. """ volumes_update = [] for v in volumes: v_update = {} v_update['volume_id'] = v.id drv_data = get_replication_driver_data(v) pair_id = drv_data.get('pair_id') if not pair_id: LOG.warning("No pair id in volume %s.", v.id) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue rmt_lun_id = drv_data.get('rmt_lun_id') if not rmt_lun_id: LOG.warning("No remote lun id in volume %s.", v.id) v_update['updates'] = {'replication_status': 'error'} volumes_update.append(v_update) continue self.rmt_driver.failover(pair_id) local_metadata = huawei_utils.get_volume_private_data(v) new_drv_data = to_string( {'pair_id': pair_id, 'rmt_lun_id': local_metadata.get('huawei_lun_id'), 'rmt_lun_wwn': local_metadata.get('huawei_lun_wwn')}) location = huawei_utils.to_string( huawei_lun_id=rmt_lun_id, huawei_lun_wwn=drv_data.get('rmt_lun_wwn')) v_update['updates'] = {'provider_location': location, 'replication_status': 'failed-over', 'replication_driver_data': new_drv_data} volumes_update.append(v_update) return volumes_update def get_replication_opts(opts): if opts.get('replication_type') == 'sync': opts['replication_type'] = constants.REPLICA_SYNC_MODEL else: opts['replication_type'] = constants.REPLICA_ASYNC_MODEL return opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/rest_client.py0000664000175000017500000026424500000000000023504 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import re import time from oslo_log import log as logging from oslo_utils import excutils import requests from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.huawei import constants from cinder.volume.drivers.huawei import huawei_utils LOG = logging.getLogger(__name__) class RestClient(object): """Common class for Huawei OceanStor storage system.""" def __init__(self, configuration, san_address, san_user, san_password, **kwargs): self.configuration = configuration self.san_address = san_address self.san_user = san_user self.san_password = san_password self.storage_pools = kwargs.get('storage_pools', self.configuration.storage_pools) self.iscsi_info = kwargs.get('iscsi_info', self.configuration.iscsi_info) self.session = None self.url = None self.device_id = None if hasattr(requests, 'packages'): requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecureRequestWarning) requests.packages.urllib3.disable_warnings( requests.packages.urllib3.exceptions.InsecurePlatformWarning) def init_http_head(self): self.url = None self.session = requests.Session() self.session.headers.update({ "Connection": "keep-alive", "Content-Type": "application/json"}) self.session.verify = False def do_call(self, url, data, method, calltimeout=constants.SOCKET_TIMEOUT, log_filter_flag=False): """Send requests to Huawei storage server. Send HTTPS call, get response in JSON. Convert response into Python Object and return it. """ if self.url: url = self.url + url kwargs = {'timeout': calltimeout} if data: kwargs['data'] = json.dumps(data) if method in ('POST', 'PUT', 'GET', 'DELETE'): func = getattr(self.session, method.lower()) else: msg = _("Request method %s is invalid.") % method LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: res = func(url, **kwargs) except Exception as err: LOG.exception('Bad response from server: %(url)s.' ' Error: %(err)s', {'url': url, 'err': err}) return {"error": {"code": constants.ERROR_CONNECT_TO_SERVER, "description": "Connect to server error."}} try: res.raise_for_status() except requests.HTTPError as exc: return {"error": {"code": exc.response.status_code, "description": str(exc)}} res_json = res.json() if not log_filter_flag: LOG.info('\n\n\n\nRequest URL: %(url)s\n\n' 'Call Method: %(method)s\n\n' 'Request Data: %(data)s\n\n' 'Response Data:%(res)s\n\n', {'url': url, 'method': method, 'data': data, 'res': res_json}) return res_json def login(self): """Login Huawei storage array.""" device_id = None for item_url in self.san_address: url = item_url + "xx/sessions" data = {"username": self.san_user, "password": self.san_password, "scope": "0"} self.init_http_head() result = self.do_call(url, data, 'POST', calltimeout=constants.LOGIN_SOCKET_TIMEOUT, log_filter_flag=True) if (result['error']['code'] != 0) or ("data" not in result): LOG.error("Login error. URL: %(url)s\n" "Reason: %(reason)s.", {"url": item_url, "reason": result}) continue LOG.debug('Login success: %(url)s', {'url': item_url}) device_id = result['data']['deviceid'] self.device_id = device_id self.url = item_url + device_id self.session.headers['iBaseToken'] = result['data']['iBaseToken'] if (result['data']['accountstate'] in (constants.PWD_EXPIRED, constants.PWD_RESET)): self.logout() msg = _("Password has expired or has been reset, " "please change the password.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) break if device_id is None: msg = _("Failed to login with all rest URLs.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return device_id def try_login(self): try: self.login() except Exception as err: LOG.warning('Login failed. Error: %s.', err) @utils.synchronized('huawei_cinder_call') def call(self, url, data=None, method=None, log_filter_flag=False): """Send requests to server. If fail, try another RestURL. """ device_id = None old_url = self.url result = self.do_call(url, data, method, log_filter_flag=log_filter_flag) error_code = result['error']['code'] if (error_code == constants.ERROR_CONNECT_TO_SERVER or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): LOG.error("Can't open the recent url, relogin.") device_id = self.login() if device_id is not None: LOG.debug('Replace URL: \n' 'Old URL: %(old_url)s\n,' 'New URL: %(new_url)s\n.', {'old_url': old_url, 'new_url': self.url}) result = self.do_call(url, data, method, log_filter_flag=log_filter_flag) if result['error']['code'] in constants.RELOGIN_ERROR_PASS: result['error']['code'] = 0 return result def logout(self): """Logout the session.""" url = "/sessions" if self.url: result = self.do_call(url, None, "DELETE") self._assert_rest_result(result, _('Logout session error.')) def _assert_rest_result(self, result, err_str): if result['error']['code'] != 0: msg = (_('%(err)s\nresult: %(res)s.') % {'err': err_str, 'res': result}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _assert_data_in_result(self, result, msg): if 'data' not in result: err_msg = _('%s "data" is not in result.') % msg LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) def create_lun(self, lun_params): # Set the mirror switch always on lun_params['MIRRORPOLICY'] = '1' url = "/lun" result = self.call(url, lun_params, 'POST') if result['error']['code'] == constants.ERROR_VOLUME_ALREADY_EXIST: lun_id = self.get_lun_id_by_name(lun_params['NAME']) if lun_id: return self.get_lun_info(lun_id) msg = _('Create lun error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def check_lun_exist(self, lun_id, lun_wwn=None): url = "/lun/" + lun_id result = self.call(url, None, "GET") error_code = result['error']['code'] if error_code != 0: return False if lun_wwn and result['data']['WWN'] != lun_wwn: LOG.debug("LUN ID %(id)s with WWN %(wwn)s does not exist on " "the array.", {"id": lun_id, "wwn": lun_wwn}) return False return True def delete_lun(self, lun_id): url = "/lun/" + lun_id data = {"TYPE": "11", "ID": lun_id} result = self.call(url, data, "DELETE") self._assert_rest_result(result, _('Delete lun error.')) def get_all_pools(self): url = "/storagepool" result = self.call(url, None, "GET", log_filter_flag=True) msg = _('Query resource pool error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def get_pool_info(self, pool_name=None, pools=None): info = {} if not pool_name: return info for pool in pools: if pool_name.strip() != pool['NAME']: continue if pool.get('USAGETYPE') == constants.FILE_SYSTEM_POOL_TYPE: break info['ID'] = pool['ID'] info['CAPACITY'] = pool.get('DATASPACE', pool['USERFREECAPACITY']) info['TOTALCAPACITY'] = pool.get('USERTOTALCAPACITY', '0') info['TIER0CAPACITY'] = pool.get('TIER0CAPACITY', '0') info['TIER1CAPACITY'] = pool.get('TIER1CAPACITY', '0') info['TIER2CAPACITY'] = pool.get('TIER2CAPACITY', '0') return info def get_pool_id(self, pool_name): pools = self.get_all_pools() pool_info = self.get_pool_info(pool_name, pools) if not pool_info: # The following code is to keep compatibility with old version of # Huawei driver. for pool_name in self.storage_pools: pool_info = self.get_pool_info(pool_name, pools) if pool_info: break if not pool_info: msg = _('Can not get pool info. pool: %s') % pool_name LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return pool_info['ID'] def _get_id_from_result(self, result, name, key): if 'data' in result: for item in result['data']: if name == item.get(key): return item['ID'] def get_lun_id_by_name(self, name): if not name: return url = "/lun?filter=NAME::%s" % name result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get lun id by name error.')) if 'data' in result and result['data']: return result['data'][0]['ID'] def activate_snapshot(self, snapshot_id): url = "/snapshot/activate" data = ({"SNAPSHOTLIST": snapshot_id} if type(snapshot_id) in (list, tuple) else {"SNAPSHOTLIST": [snapshot_id]}) result = self.call(url, data, 'POST') self._assert_rest_result(result, _('Activate snapshot error.')) def create_snapshot(self, lun_id, snapshot_name, snapshot_description): url = "/snapshot" data = {"TYPE": "27", "NAME": snapshot_name, "PARENTTYPE": "11", "DESCRIPTION": snapshot_description, "PARENTID": lun_id} result = self.call(url, data, 'POST') msg = _('Create snapshot error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def check_snapshot_exist(self, snapshot_id): url = "/snapshot/%s" % snapshot_id result = self.call(url, None, "GET") error_code = result['error']['code'] if error_code != 0: return False return True def stop_snapshot(self, snapshot_id): url = "/snapshot/stop" stopdata = {"ID": snapshot_id} result = self.call(url, stopdata, "PUT") self._assert_rest_result(result, _('Stop snapshot error.')) def delete_snapshot(self, snapshotid): url = "/snapshot/%s" % snapshotid data = {"TYPE": "27", "ID": snapshotid} result = self.call(url, data, "DELETE") self._assert_rest_result(result, _('Delete snapshot error.')) def get_snapshot_id_by_name(self, name): if not name: return url = "/snapshot?filter=NAME::%s" % name description = 'The snapshot license file is unavailable.' result = self.call(url, None, "GET") if 'error' in result: if description == result['error']['description']: return self._assert_rest_result(result, _('Get snapshot id error.')) if 'data' in result and result['data']: return result['data'][0]['ID'] def create_luncopy(self, luncopyname, srclunid, tgtlunid, copyspeed): """Create a luncopy.""" url = "/luncopy" if copyspeed not in constants.LUN_COPY_SPEED_TYPES: LOG.warning('The copy speed %(copyspeed)s is not valid, ' 'using default value %(default)s instead.', {'copyspeed': copyspeed, 'default': constants.LUN_COPY_SPEED_MEDIUM}) copyspeed = constants.LUN_COPY_SPEED_MEDIUM data = {"TYPE": 219, "NAME": luncopyname, "DESCRIPTION": luncopyname, "COPYSPEED": copyspeed, "LUNCOPYTYPE": "1", "SOURCELUN": ("INVALID;%s;INVALID;INVALID;INVALID" % srclunid), "TARGETLUN": ("INVALID;%s;INVALID;INVALID;INVALID" % tgtlunid)} result = self.call(url, data, 'POST') msg = _('Create luncopy error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def add_host_to_hostgroup(self, host_id): """Associate host to hostgroup. If hostgroup doesn't exist, create one. """ hostgroup_name = constants.HOSTGROUP_PREFIX + host_id hostgroup_id = self.create_hostgroup_with_check(hostgroup_name) is_associated = self._is_host_associate_to_hostgroup(hostgroup_id, host_id) if not is_associated: self._associate_host_to_hostgroup(hostgroup_id, host_id) return hostgroup_id def get_tgt_port_group(self, tgt_port_group): """Find target portgroup id by target port group name.""" url = "/portgroup?range=[0-8191]&TYPE=257" result = self.call(url, None, "GET") msg = _('Find portgroup error.') self._assert_rest_result(result, msg) return self._get_id_from_result(result, tgt_port_group, 'NAME') def _associate_portgroup_to_view(self, view_id, portgroup_id): url = "/MAPPINGVIEW/CREATE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "257", "ASSOCIATEOBJID": portgroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Associate portgroup to mapping ' 'view error.')) def _portgroup_associated(self, view_id, portgroup_id): url = ("/mappingview/associate?TYPE=245&" "ASSOCIATEOBJTYPE=257&ASSOCIATEOBJID=%s" % portgroup_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check portgroup associate error.')) if self._get_id_from_result(result, view_id, 'ID'): return True return False def do_mapping(self, lun_id, hostgroup_id, host_id, portgroup_id=None, lun_type=constants.LUN_TYPE, hypermetro_lun=False): """Add hostgroup and lungroup to mapping view.""" lungroup_name = constants.LUNGROUP_PREFIX + host_id mapping_view_name = constants.MAPPING_VIEW_PREFIX + host_id lungroup_id = self._find_lungroup(lungroup_name) view_id = self.find_mapping_view(mapping_view_name) map_info = {} LOG.info( 'do_mapping, lun_group: %(lun_group)s, ' 'view_id: %(view_id)s, lun_id: %(lun_id)s.', {'lun_group': lungroup_id, 'view_id': view_id, 'lun_id': lun_id}) try: # Create lungroup and add LUN into to lungroup. if lungroup_id is None: lungroup_id = self._create_lungroup(lungroup_name) is_associated = self._is_lun_associated_to_lungroup(lungroup_id, lun_id, lun_type) if not is_associated: self.associate_lun_to_lungroup(lungroup_id, lun_id, lun_type) if view_id is None: view_id = self._add_mapping_view(mapping_view_name) self._associate_hostgroup_to_view(view_id, hostgroup_id) self._associate_lungroup_to_view(view_id, lungroup_id) if portgroup_id: self._associate_portgroup_to_view(view_id, portgroup_id) else: if not self.hostgroup_associated(view_id, hostgroup_id): self._associate_hostgroup_to_view(view_id, hostgroup_id) if not self.lungroup_associated(view_id, lungroup_id): self._associate_lungroup_to_view(view_id, lungroup_id) if portgroup_id: if not self._portgroup_associated(view_id, portgroup_id): self._associate_portgroup_to_view(view_id, portgroup_id) if hypermetro_lun: aval_luns = self.find_view_by_id(view_id) map_info["lun_id"] = lun_id map_info["view_id"] = view_id map_info["aval_luns"] = aval_luns except Exception: with excutils.save_and_reraise_exception(): LOG.error( 'Error occurred when adding hostgroup and lungroup to ' 'view. Remove lun from lungroup now.') self.remove_lun_from_lungroup(lungroup_id, lun_id, lun_type) return map_info def check_iscsi_initiators_exist_in_host(self, host_id): url = "/iscsi_initiator?range=[0-256]&PARENTID=%s" % host_id result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get host initiators info failed.') if "data" in result: return True return False def ensure_initiator_added(self, initiator_name, host_id): added = self._initiator_is_added_to_array(initiator_name) if not added: self._add_initiator_to_array(initiator_name) if not self.is_initiator_associated_to_host(initiator_name, host_id): self._associate_initiator_to_host(initiator_name, host_id) def _get_iscsi_tgt_port(self): url = "/iscsidevicename" result = self.call(url, None, 'GET') msg = _('Get iSCSI target port error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'][0]['CMO_ISCSI_DEVICE_NAME'] def find_hostgroup(self, groupname): """Get the given hostgroup id.""" url = "/hostgroup?range=[0-8191]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get hostgroup information error.')) return self._get_id_from_result(result, groupname, 'NAME') def _find_lungroup(self, lungroup_name): """Get the given hostgroup id.""" url = "/lungroup?range=[0-8191]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get lungroup information error.')) return self._get_id_from_result(result, lungroup_name, 'NAME') def create_hostgroup_with_check(self, hostgroup_name): """Check if host exists on the array, or create it.""" hostgroup_id = self.find_hostgroup(hostgroup_name) if hostgroup_id: LOG.info( 'create_hostgroup_with_check. ' 'hostgroup name: %(name)s, ' 'hostgroup id: %(id)s', {'name': hostgroup_name, 'id': hostgroup_id}) return hostgroup_id try: hostgroup_id = self._create_hostgroup(hostgroup_name) except Exception: LOG.info( 'Failed to create hostgroup: %(name)s. ' 'Please check if it exists on the array.', {'name': hostgroup_name}) hostgroup_id = self.find_hostgroup(hostgroup_name) if hostgroup_id is None: err_msg = (_( 'Failed to create hostgroup: %(name)s. ' 'Check if it exists on the array.') % {'name': hostgroup_name}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) LOG.info( 'create_hostgroup_with_check. ' 'Create hostgroup success. ' 'hostgroup name: %(name)s, ' 'hostgroup id: %(id)s', {'name': hostgroup_name, 'id': hostgroup_id}) return hostgroup_id def _create_hostgroup(self, hostgroup_name): url = "/hostgroup" data = {"TYPE": "14", "NAME": hostgroup_name} result = self.call(url, data, 'POST') msg = _('Create hostgroup error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def _create_lungroup(self, lungroup_name): url = "/lungroup" data = {"DESCRIPTION": lungroup_name, "APPTYPE": '0', "GROUPTYPE": '0', "NAME": lungroup_name} result = self.call(url, data, 'POST') msg = _('Create lungroup error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data']['ID'] def delete_lungroup(self, lungroup_id): url = "/LUNGroup/" + lungroup_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete lungroup error.')) def lungroup_associated(self, view_id, lungroup_id): url = ("/mappingview/associate?TYPE=245&" "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" % lungroup_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check lungroup associate error.')) if self._get_id_from_result(result, view_id, 'ID'): return True return False def hostgroup_associated(self, view_id, hostgroup_id): url = ("/mappingview/associate?TYPE=245&" "ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s" % hostgroup_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check hostgroup associate error.')) if self._get_id_from_result(result, view_id, 'ID'): return True return False def get_host_lun_id(self, host_id, lun_id, lun_type=constants.LUN_TYPE): cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' url = ("/%s/associate?TYPE=%s&ASSOCIATEOBJTYPE=21" "&ASSOCIATEOBJID=%s" % (cmd_type, lun_type, host_id)) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find host lun id error.')) host_lun_id = 1 if 'data' in result: for item in result['data']: if lun_id == item['ID']: associate_data = item['ASSOCIATEMETADATA'] try: hostassoinfo = json.loads(associate_data) host_lun_id = hostassoinfo['HostLUNID'] break except Exception as err: LOG.error("JSON transfer data error. %s.", err) raise return host_lun_id def get_host_id_by_name(self, host_name): """Get the given host ID.""" url = "/host?filter=NAME::%s" % host_name result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find host in hostgroup error.')) if 'data' in result and result['data']: return result['data'][0]['ID'] def add_host_with_check(self, host_name): host_id = huawei_utils.get_host_id(self, host_name) if host_id: LOG.info('Got exist host. host name: %(name)s, ' 'host id: %(id)s.', {'name': host_name, 'id': host_id}) return host_id encoded_name = huawei_utils.encode_host_name(host_name) try: host_id = self._add_host(encoded_name, host_name) except Exception: LOG.info('Failed to create host %s, check if already exist.', encoded_name) host_id = self.get_host_id_by_name(encoded_name) if not host_id: msg = _('Failed to create host: %(name)s. ' 'Please check if it exists on the array.' ) % {'name': encoded_name} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.info('create host success. host name: %(name)s, ' 'host id: %(id)s', {'name': encoded_name, 'id': host_id}) return host_id def _add_host(self, hostname, host_name_before_hash): """Add a new host.""" url = "/host" data = {"TYPE": "21", "NAME": hostname, "OPERATIONSYSTEM": "0", "DESCRIPTION": host_name_before_hash} result = self.call(url, data, 'POST') self._assert_rest_result(result, _('Add new host error.')) if 'data' in result: return result['data']['ID'] def _is_host_associate_to_hostgroup(self, hostgroup_id, host_id): """Check whether the host is associated to the hostgroup.""" url = ("/host/associate?TYPE=21&" "ASSOCIATEOBJTYPE=14&ASSOCIATEOBJID=%s" % hostgroup_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check hostgroup associate error.')) if self._get_id_from_result(result, host_id, 'ID'): return True return False def _is_lun_associated_to_lungroup(self, lungroup_id, lun_id, lun_type=constants.LUN_TYPE): """Check whether the lun is associated to the lungroup.""" cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' url = ("/%s/associate?TYPE=%s&" "ASSOCIATEOBJTYPE=256&ASSOCIATEOBJID=%s" % (cmd_type, lun_type, lungroup_id)) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check lungroup associate error.')) if self._get_id_from_result(result, lun_id, 'ID'): return True return False def _associate_host_to_hostgroup(self, hostgroup_id, host_id): url = "/hostgroup/associate" data = {"TYPE": "14", "ID": hostgroup_id, "ASSOCIATEOBJTYPE": "21", "ASSOCIATEOBJID": host_id} result = self.call(url, data, 'POST') self._assert_rest_result(result, _('Associate host to hostgroup ' 'error.')) def associate_lun_to_lungroup(self, lungroup_id, lun_id, lun_type=constants.LUN_TYPE): """Associate lun to lungroup.""" url = "/lungroup/associate" data = {"ID": lungroup_id, "ASSOCIATEOBJTYPE": lun_type, "ASSOCIATEOBJID": lun_id} result = self.call(url, data, 'POST') self._assert_rest_result(result, _('Associate lun to lungroup error.')) def remove_lun_from_lungroup(self, lungroup_id, lun_id, lun_type=constants.LUN_TYPE): """Remove lun from lungroup.""" url = ("/lungroup/associate?ID=%s&ASSOCIATEOBJTYPE=%s" "&ASSOCIATEOBJID=%s" % (lungroup_id, lun_type, lun_id)) result = self.call(url, None, 'DELETE') self._assert_rest_result( result, _('Delete associated lun from lungroup error.')) def _initiator_is_added_to_array(self, ininame): """Check whether the initiator is already added on the array.""" url = "/iscsi_initiator?range=[0-256]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Check initiator added to array error.')) if self._get_id_from_result(result, ininame, 'ID'): return True return False def is_initiator_associated_to_host(self, ininame, host_id): """Check whether the initiator is associated to the host.""" url = "/iscsi_initiator?range=[0-256]" result = self.call(url, None, "GET") self._assert_rest_result( result, _('Check initiator associated to host error.')) for item in result.get('data'): if item['ID'] == ininame: if item['ISFREE'] == "true": return False if item['PARENTID'] == host_id: return True else: msg = (_("Initiator %(ini)s has been added to another " "host %(host)s.") % {"ini": ininame, "host": item['PARENTNAME']}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return True def _add_initiator_to_array(self, initiator_name): """Add a new initiator to storage device.""" url = "/iscsi_initiator" data = {"TYPE": "222", "ID": initiator_name, "USECHAP": "false"} result = self.call(url, data, "POST") self._assert_rest_result(result, _('Add initiator to array error.')) def _add_initiator_to_host(self, initiator_name, host_id): url = "/iscsi_initiator/" + initiator_name data = {"TYPE": "222", "ID": initiator_name, "USECHAP": "false", "PARENTTYPE": "21", "PARENTID": host_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Associate initiator to host error.')) def _associate_initiator_to_host(self, initiator_name, host_id): """Associate initiator with the host.""" chapinfo = self.find_chap_info(self.iscsi_info, initiator_name) multipath_type = self._find_alua_info(self.iscsi_info, initiator_name) if chapinfo: LOG.info('Use CHAP when adding initiator to host.') self._use_chap(chapinfo, initiator_name, host_id) else: self._add_initiator_to_host(initiator_name, host_id) if multipath_type: LOG.info('Use ALUA when adding initiator to host.') self._use_alua(initiator_name, multipath_type) def find_chap_info(self, iscsi_info, initiator_name): """Find CHAP info from xml.""" chapinfo = None ini = iscsi_info['initiators'].get(initiator_name) if ini and ini.get('CHAPinfo'): chapinfo = ini['CHAPinfo'] return chapinfo def _find_alua_info(self, iscsi_info, initiator_name): """Find ALUA info from xml.""" multipath_type = 0 ini = iscsi_info['initiators'].get(initiator_name) if ini and ini.get('ALUA'): if ini['ALUA'] != '1' and ini['ALUA'] != '0': msg = (_( 'Invalid ALUA value. ' 'ALUA value must be 1 or 0.')) LOG.error(msg) raise exception.InvalidInput(msg) else: multipath_type = ini['ALUA'] return multipath_type def _use_chap(self, chapinfo, initiator_name, host_id): """Use CHAP when adding initiator to host.""" (chap_username, chap_password) = chapinfo.split(";") url = "/iscsi_initiator/" + initiator_name data = {"TYPE": "222", "USECHAP": "true", "CHAPNAME": chap_username, "CHAPPASSWORD": chap_password, "ID": initiator_name, "PARENTTYPE": "21", "PARENTID": host_id} result = self.call(url, data, "PUT", log_filter_flag=True) msg = _('Use CHAP to associate initiator to host error. ' 'Please check the CHAP username and password.') self._assert_rest_result(result, msg) def _use_alua(self, initiator_name, multipath_type): """Use ALUA when adding initiator to host.""" url = "/iscsi_initiator" data = {"ID": initiator_name, "MULTIPATHTYPE": multipath_type} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Use ALUA to associate initiator to host error.')) def remove_chap(self, initiator_name): """Remove CHAP when terminate connection.""" url = "/iscsi_initiator" data = {"USECHAP": "false", "MULTIPATHTYPE": "0", "ID": initiator_name} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove CHAP error.')) def find_mapping_view(self, name): """Find mapping view.""" url = "/mappingview?range=[0-8191]" result = self.call(url, None, "GET") msg = _('Find mapping view error.') self._assert_rest_result(result, msg) return self._get_id_from_result(result, name, 'NAME') def _add_mapping_view(self, name): url = "/mappingview" data = {"NAME": name, "TYPE": "245"} result = self.call(url, data, 'POST') self._assert_rest_result(result, _('Add mapping view error.')) return result['data']['ID'] def _associate_hostgroup_to_view(self, view_id, hostgroup_id): url = "/MAPPINGVIEW/CREATE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "14", "ASSOCIATEOBJID": hostgroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Associate host to mapping view ' 'error.')) def _associate_lungroup_to_view(self, view_id, lungroup_id): url = "/MAPPINGVIEW/CREATE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "256", "ASSOCIATEOBJID": lungroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Associate lungroup to mapping view error.')) def delete_lungroup_mapping_view(self, view_id, lungroup_id): """Remove lungroup associate from the mapping view.""" url = "/mappingview/REMOVE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "256", "ASSOCIATEOBJID": lungroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Delete lungroup from mapping view ' 'error.')) def delete_hostgoup_mapping_view(self, view_id, hostgroup_id): """Remove hostgroup associate from the mapping view.""" url = "/mappingview/REMOVE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "14", "ASSOCIATEOBJID": hostgroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Delete hostgroup from mapping view error.')) def delete_portgroup_mapping_view(self, view_id, portgroup_id): """Remove portgroup associate from the mapping view.""" url = "/mappingview/REMOVE_ASSOCIATE" data = {"ASSOCIATEOBJTYPE": "257", "ASSOCIATEOBJID": portgroup_id, "TYPE": "245", "ID": view_id} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Delete portgroup from mapping view error.')) def delete_mapping_view(self, view_id): """Remove mapping view from the storage.""" url = "/mappingview/" + view_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete mapping view error.')) def get_obj_count_from_lungroup(self, lungroup_id): """Get all objects count associated to the lungroup.""" lun_count = self._get_obj_count_from_lungroup_by_type( lungroup_id, constants.LUN_TYPE) snapshot_count = self._get_obj_count_from_lungroup_by_type( lungroup_id, constants.SNAPSHOT_TYPE) return int(lun_count) + int(snapshot_count) def _get_obj_count_from_lungroup_by_type(self, lungroup_id, lun_type=constants.LUN_TYPE): cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' lunnum = 0 if not lungroup_id: return lunnum url = ("/%s/count?TYPE=%s&ASSOCIATEOBJTYPE=256&" "ASSOCIATEOBJID=%s" % (cmd_type, lun_type, lungroup_id)) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find obj number error.')) if 'data' in result: lunnum = int(result['data']['COUNT']) return lunnum def is_portgroup_associated_to_view(self, view_id, portgroup_id): """Check whether the port group is associated to the mapping view.""" url = ("/portgroup/associate?ASSOCIATEOBJTYPE=245&" "ASSOCIATEOBJID=%s&range=[0-8191]" % view_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find portgroup from mapping view ' 'error.')) if self._get_id_from_result(result, portgroup_id, 'ID'): return True return False def find_lungroup_from_map(self, view_id): """Get lungroup from the given map""" url = ("/mappingview/associate/lungroup?TYPE=256&" "ASSOCIATEOBJTYPE=245&ASSOCIATEOBJID=%s" % view_id) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Find lun group from mapping view ' 'error.')) lungroup_id = None if 'data' in result: # One map can have only one lungroup. for item in result['data']: lungroup_id = item['ID'] return lungroup_id def start_luncopy(self, luncopy_id): """Start a LUNcopy.""" url = "/LUNCOPY/start" data = {"TYPE": "219", "ID": luncopy_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Start LUNcopy error.')) def _get_capacity(self, pool_name, result): """Get free capacity and total capacity of the pool.""" pool_info = self.get_pool_info(pool_name, result) pool_capacity = {'total_capacity': 0.0, 'free_capacity': 0.0} if pool_info: total = float(pool_info['TOTALCAPACITY']) / constants.CAPACITY_UNIT free = float(pool_info['CAPACITY']) / constants.CAPACITY_UNIT pool_capacity['total_capacity'] = total pool_capacity['free_capacity'] = free return pool_capacity def _get_disk_type(self, pool_name, result): """Get disk type of the pool.""" pool_info = self.get_pool_info(pool_name, result) if not pool_info: return None pool_disk = [] for i, x in enumerate(['ssd', 'sas', 'nl_sas']): if pool_info['TIER%dCAPACITY' % i] != '0': pool_disk.append(x) if len(pool_disk) > 1: pool_disk = ['mix'] return pool_disk[0] if pool_disk else None def get_luncopy_info(self, luncopy_id): """Get LUNcopy information.""" url = "/LUNCOPY?range=[0-1023]" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get LUNcopy information error.')) luncopyinfo = {} if 'data' in result: for item in result['data']: if luncopy_id == item['ID']: luncopyinfo['name'] = item['NAME'] luncopyinfo['id'] = item['ID'] luncopyinfo['state'] = item['HEALTHSTATUS'] luncopyinfo['status'] = item['RUNNINGSTATUS'] break return luncopyinfo def delete_luncopy(self, luncopy_id): """Delete a LUNcopy.""" url = "/LUNCOPY/%s" % luncopy_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete LUNcopy error.')) def get_init_targ_map(self, wwns): init_targ_map = {} tgt_port_wwns = [] for wwn in wwns: tgtwwpns = self.get_fc_target_wwpns(wwn) if not tgtwwpns: continue init_targ_map[wwn] = tgtwwpns for tgtwwpn in tgtwwpns: if tgtwwpn not in tgt_port_wwns: tgt_port_wwns.append(tgtwwpn) return (tgt_port_wwns, init_targ_map) def get_online_free_wwns(self): """Get online free WWNs. If no new ports connected, return an empty list. """ url = "/fc_initiator?ISFREE=true&range=[0-8191]" result = self.call(url, None, "GET") msg = _('Get connected free FC wwn error.') self._assert_rest_result(result, msg) wwns = [] if 'data' in result: for item in result['data']: if item['RUNNINGSTATUS'] == constants.FC_INIT_ONLINE: wwns.append(item['ID']) return wwns def _get_fc_initiator_count(self): url = '/fc_initiator/count' result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get fc initiator count error.')) return int(result['data']['COUNT']) def get_fc_initiator_on_array(self): count = self._get_fc_initiator_count() if count <= 0: return [] fc_initiators = [] for i in range((count - 1) // constants.MAX_QUERY_COUNT + 1): url = '/fc_initiator?range=[%d-%d]' % ( i * constants.MAX_QUERY_COUNT, (i + 1) * constants.MAX_QUERY_COUNT) result = self.call(url, None, "GET") msg = _('Get FC initiators from array error.') self._assert_rest_result(result, msg) for item in result.get('data', []): fc_initiators.append(item['ID']) return fc_initiators def add_fc_port_to_host(self, host_id, wwn): """Add a FC port to the host.""" url = "/fc_initiator/" + wwn data = {"TYPE": "223", "ID": wwn, "PARENTTYPE": 21, "PARENTID": host_id} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Add FC port to host error.')) def _get_iscsi_port_info(self, ip): """Get iscsi port info in order to build the iscsi target iqn.""" url = "/eth_port" result = self.call(url, None, "GET") msg = _('Get iSCSI port information error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) iscsi_port_info = None for item in result['data']: if ip == item['IPV4ADDR']: iscsi_port_info = item['LOCATION'] break return iscsi_port_info def _get_tgt_iqn(self, iscsi_ip): """Get target iSCSI iqn.""" ip_info = self._get_iscsi_port_info(iscsi_ip) iqn_prefix = self._get_iscsi_tgt_port() if not ip_info: err_msg = (_( 'Get iSCSI port info error, please check the target IP ' 'configured in huawei conf file.')) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) LOG.debug('Request ip info is: %s.', ip_info) split_list = ip_info.split(".") newstr = split_list[1] + split_list[2] LOG.info('New str info is: %s.', newstr) if ip_info: if newstr[0] == 'A': ctr = "0" elif newstr[0] == 'B': ctr = "1" interface = '0' + newstr[1] port = '0' + newstr[3] iqn_suffix = ctr + '02' + interface + port for i in range(0, len(iqn_suffix)): if iqn_suffix[i] != '0': iqn_suffix = iqn_suffix[i:] break iqn = iqn_prefix + ':' + iqn_suffix + ':' + iscsi_ip LOG.info('_get_tgt_iqn: iSCSI target iqn is: %s.', iqn) return iqn def get_fc_target_wwpns(self, wwn): url = ("/host_link?INITIATOR_TYPE=223&INITIATOR_PORT_WWN=" + wwn) result = self.call(url, None, "GET") msg = _('Get FC target wwpn error.') self._assert_rest_result(result, msg) fc_wwpns = [] if "data" in result: for item in result['data']: if wwn == item['INITIATOR_PORT_WWN']: fc_wwpns.append(item['TARGET_PORT_WWN']) return fc_wwpns def update_volume_stats(self): data = {} data['pools'] = [] result = self.get_all_pools() for pool_name in self.storage_pools: capacity = self._get_capacity(pool_name, result) disk_type = self._get_disk_type(pool_name, result) pool = {} pool.update(dict( location_info=self.device_id, pool_name=pool_name, total_capacity_gb=capacity['total_capacity'], free_capacity_gb=capacity['free_capacity'], reserved_percentage=self.configuration.safe_get( 'reserved_percentage'), max_over_subscription_ratio=self.configuration.safe_get( 'max_over_subscription_ratio'), )) if disk_type: pool['disk_type'] = disk_type data['pools'].append(pool) return data def _find_qos_policy_info(self, policy_name): url = "/ioclass" result = self.call(url, None, "GET") msg = _('Get QoS policy error.') self._assert_rest_result(result, msg) qos_info = {} if 'data' in result: for item in result['data']: if policy_name == item['NAME']: qos_info['ID'] = item['ID'] lun_list = json.loads(item['LUNLIST']) qos_info['LUNLIST'] = lun_list qos_info['RUNNINGSTATUS'] = item['RUNNINGSTATUS'] break return qos_info def _update_qos_policy_lunlist(self, lun_list, policy_id): url = "/ioclass/" + policy_id data = {"TYPE": "230", "ID": policy_id, "LUNLIST": lun_list} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Update QoS policy error.')) def _get_tgt_ip_from_portgroup(self, portgroup_id): target_ips = [] url = ("/eth_port/associate?TYPE=213&ASSOCIATEOBJTYPE=257" "&ASSOCIATEOBJID=%s" % portgroup_id) result = self.call(url, None, "GET") msg = _('Get target IP error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) if 'data' in result: for item in result['data']: if (item['IPV4ADDR'] and item['HEALTHSTATUS'] == constants.STATUS_HEALTH and item['RUNNINGSTATUS'] == constants.STATUS_RUNNING): target_ip = item['IPV4ADDR'] LOG.info('_get_tgt_ip_from_portgroup: Get ip: %s.', target_ip) target_ips.append(target_ip) return target_ips def get_iscsi_params(self, connector): """Get target iSCSI params, including iqn, IP.""" initiator = connector['initiator'] multipath = connector['multipath'] target_ips = [] target_iqns = [] temp_tgt_ips = [] portgroup = None portgroup_id = None if multipath: ini = self.iscsi_info['initiators'].get(initiator) if ini and ini.get('TargetPortGroup'): portgroup = ini['TargetPortGroup'] if portgroup: portgroup_id = self.get_tgt_port_group(portgroup) temp_tgt_ips = self._get_tgt_ip_from_portgroup(portgroup_id) valid_port_info = self._get_tgt_port_ip_from_rest() valid_tgt_ips = valid_port_info for ip in temp_tgt_ips: if ip in valid_tgt_ips: target_ips.append(ip) if not target_ips: msg = (_( 'get_iscsi_params: No valid port in portgroup. ' 'portgroup_id: %(id)s, please check it on storage.') % {'id': portgroup_id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: target_ips = self._get_target_ip(initiator) else: target_ips = self._get_target_ip(initiator) # Deal with the remote tgt ip. if 'remote_target_ip' in connector: target_ips.append(connector['remote_target_ip']) LOG.info('Get the default ip: %s.', target_ips) for ip in target_ips: target_iqn = self._get_tgt_iqn_from_rest(ip) if not target_iqn: target_iqn = self._get_tgt_iqn(ip) if target_iqn: target_iqns.append(target_iqn) return (target_iqns, target_ips, portgroup_id) def _get_target_ip(self, initiator): target_ips = [] ini = self.iscsi_info['initiators'].get(initiator) if ini and ini.get('TargetIP'): target_ips.append(ini['TargetIP']) # If not specify target IP for some initiators, use default IP. if not target_ips: default_target_ips = self.iscsi_info['default_target_ips'] if default_target_ips: target_ips.append(default_target_ips[0]) else: msg = (_( 'get_iscsi_params: Failed to get target IP ' 'for initiator %(ini)s, please check config file.') % {'ini': initiator}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return target_ips def _get_tgt_port_ip_from_rest(self): url = "/iscsi_tgt_port" result = self.call(url, None, "GET") info_list = [] target_ips = [] if result['error']['code'] != 0: LOG.warning("Can't find target port info from rest.") return target_ips elif not result['data']: msg = (_( "Can't find valid IP from rest, please check it on storage.")) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if 'data' in result: for item in result['data']: info_list.append(item['ID']) if not info_list: LOG.warning("Can't find target port info from rest.") return target_ips for info in info_list: split_list = info.split(",") info_before = split_list[0] iqn_info = info_before.split("+") target_iqn = iqn_info[1] ip_info = target_iqn.split(":") target_ip = ip_info[-1] target_ips.append(target_ip) return target_ips def _get_tgt_iqn_from_rest(self, target_ip): url = "/iscsi_tgt_port" result = self.call(url, None, "GET") target_iqn = None if result['error']['code'] != 0: LOG.warning("Can't find target iqn from rest.") return target_iqn ip_pattern = re.compile(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}') if 'data' in result: for item in result['data']: ips = re.findall(ip_pattern, item['ID']) for ip in ips: if target_ip == ip: target_iqn = item['ID'] break if not target_iqn: LOG.warning("Can't find target iqn from rest.") return target_iqn split_list = target_iqn.split(",") target_iqn_before = split_list[0] split_list_new = target_iqn_before.split("+") target_iqn = split_list_new[1] return target_iqn def create_qos(self, qos, lun_id): # Get local time. localtime = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) # Package QoS name. qos_name = constants.QOS_NAME_PREFIX + lun_id + '_' + localtime data = {"TYPE": "230", "NAME": qos_name, "LUNLIST": ["%s" % lun_id], "CLASSTYPE": "1", "SCHEDULEPOLICY": "2", "SCHEDULESTARTTIME": "1410969600", "STARTTIME": "08:00", "DURATION": "86400", "CYCLESET": "[1,2,3,4,5,6,0]", } data.update(qos) url = "/ioclass" result = self.call(url, data, 'POST') self._assert_rest_result(result, _('Create QoS policy error.')) return result['data']['ID'] def delete_qos(self, qos_id): url = "/ioclass/" + qos_id data = {"TYPE": "230", "ID": qos_id} result = self.call(url, data, 'DELETE') self._assert_rest_result(result, _('Delete QoS policy error.')) def activate_deactivate_qos(self, qos_id, enablestatus): """Activate or deactivate QoS. enablestatus: true (activate) enbalestatus: false (deactivate) """ url = "/ioclass/active/" + qos_id data = {"TYPE": 230, "ID": qos_id, "ENABLESTATUS": enablestatus} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Activate or deactivate QoS error.')) def get_qos_info(self, qos_id): """Get QoS information.""" url = "/ioclass/" + qos_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get QoS information error.')) return result['data'] def get_lun_list_in_qos(self, qos_id, qos_info): """Get the lun list in QoS.""" lun_list = [] lun_string = qos_info['LUNLIST'][1:-1] for lun in lun_string.split(","): str = lun[1:-1] lun_list.append(str) return lun_list def remove_lun_from_qos(self, lun_id, lun_list, qos_id): """Remove lun from QoS.""" lun_list = [i for i in lun_list if i != lun_id] url = "/ioclass/" + qos_id data = {"LUNLIST": lun_list, "TYPE": 230, "ID": qos_id} result = self.call(url, data, "PUT") msg = _('Remove lun from QoS error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def change_lun_priority(self, lun_id): """Change lun priority to high.""" url = "/lun/" + lun_id data = {"TYPE": "11", "ID": lun_id, "IOPRIORITY": "3"} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Change lun priority error.')) def change_lun_smarttier(self, lunid, smarttier_policy): """Change lun smarttier policy.""" url = "/lun/" + lunid data = {"TYPE": "11", "ID": lunid, "DATATRANSFERPOLICY": smarttier_policy} result = self.call(url, data, "PUT") self._assert_rest_result( result, _('Change lun smarttier policy error.')) def get_qosid_by_lunid(self, lun_id): """Get QoS id by lun id.""" url = "/lun/" + lun_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get QoS id by lun id error.')) return result['data']['IOCLASSID'] def get_lungroupids_by_lunid(self, lun_id, lun_type=constants.LUN_TYPE): """Get lungroup ids by lun id.""" url = ("/lungroup/associate?TYPE=256" "&ASSOCIATEOBJTYPE=%s&ASSOCIATEOBJID=%s" % (lun_type, lun_id)) result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get lungroup id by lun id error.')) lungroup_ids = [] if 'data' in result: for item in result['data']: lungroup_ids.append(item['ID']) return lungroup_ids def get_lun_info(self, lun_id, lun_type = constants.LUN_TYPE): cmd_type = 'lun' if lun_type == constants.LUN_TYPE else 'snapshot' url = ("/%s/%s" % (cmd_type, lun_id)) result = self.call(url, None, "GET") msg = _('Get volume error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def get_snapshot_info(self, snapshot_id): url = "/snapshot/" + snapshot_id result = self.call(url, None, "GET") msg = _('Get snapshot error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def extend_lun(self, lun_id, new_volume_size): url = "/lun/expand" data = {"TYPE": 11, "ID": lun_id, "CAPACITY": int(new_volume_size)} result = self.call(url, data, 'PUT') msg = _('Extend volume error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def create_lun_migration(self, src_id, dst_id, speed=2): url = "/LUN_MIGRATION" data = {"TYPE": '253', "PARENTID": src_id, "TARGETLUNID": dst_id, "SPEED": speed, "WORKMODE": 0} result = self.call(url, data, "POST") msg = _('Create lun migration error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def get_lun_migration_task(self): url = '/LUN_MIGRATION?range=[0-256]' result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get lun migration task error.')) return result def delete_lun_migration(self, src_id, dst_id): url = '/LUN_MIGRATION/' + src_id result = self.call(url, None, "DELETE") msg = _('Delete lun migration error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def get_partition_id_by_name(self, name): url = "/cachepartition" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get partition by name error.')) return self._get_id_from_result(result, name, 'NAME') def get_partition_info_by_id(self, partition_id): url = '/cachepartition/' + partition_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get partition by partition id error.')) return result['data'] def add_lun_to_partition(self, lun_id, partition_id): url = "/lun/associate/cachepartition" data = {"ID": partition_id, "ASSOCIATEOBJTYPE": 11, "ASSOCIATEOBJID": lun_id} result = self.call(url, data, "POST") self._assert_rest_result(result, _('Add lun to partition error.')) def remove_lun_from_partition(self, lun_id, partition_id): url = ('/lun/associate/cachepartition?ID=' + partition_id + '&ASSOCIATEOBJTYPE=11&ASSOCIATEOBJID=' + lun_id) result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Remove lun from partition error.')) def get_cache_id_by_name(self, name): url = "/SMARTCACHEPARTITION" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get cache by name error.')) return self._get_id_from_result(result, name, 'NAME') def get_cache_info_by_id(self, cacheid): url = "/SMARTCACHEPARTITION/" + cacheid data = {"TYPE": "273", "ID": cacheid} result = self.call(url, data, "GET") self._assert_rest_result( result, _('Get smartcache by cache id error.')) return result['data'] def remove_lun_from_cache(self, lun_id, cache_id): url = "/SMARTCACHEPARTITION/REMOVE_ASSOCIATE" data = {"ID": cache_id, "ASSOCIATEOBJTYPE": 11, "ASSOCIATEOBJID": lun_id, "TYPE": 273} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove lun from cache error.')) def get_qos(self): url = "/ioclass" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get QoS information error.')) return result def find_available_qos(self, qos): """"Find available QoS on the array.""" qos_id = None lun_list = [] extra_qos = [i for i in constants.EXTRA_QOS_KEYS if i not in qos] result = self.get_qos() if 'data' in result: for items in result['data']: qos_flag = 0 extra_flag = False if 'LATENCY' not in qos and items['LATENCY'] != '0': extra_flag = True else: for item in items: if item in extra_qos: extra_flag = True break for key in qos: if key not in items: break elif qos[key] != items[key]: break qos_flag = qos_flag + 1 lun_num = len(items['LUNLIST'].split(",")) qos_name = items['NAME'] qos_status = items['RUNNINGSTATUS'] # We use this QoS only if the LUNs in it is less than 64, # created by OpenStack and does not contain filesystem, # else we cannot add LUN to this QoS any more. if (qos_flag == len(qos) and not extra_flag and lun_num < constants.MAX_LUN_NUM_IN_QOS and qos_name.startswith(constants.QOS_NAME_PREFIX) and qos_status == constants.STATUS_QOS_ACTIVE and items['FSLIST'] == '[""]'): qos_id = items['ID'] lun_list = items['LUNLIST'] break return (qos_id, lun_list) def add_lun_to_qos(self, qos_id, lun_id, lun_list): """Add lun to QoS.""" url = "/ioclass/" + qos_id new_lun_list = [] lun_list_string = lun_list[1:-1] for lun_string in lun_list_string.split(","): tmp_lun_id = lun_string[1:-1] if '' != tmp_lun_id and tmp_lun_id != lun_id: new_lun_list.append(tmp_lun_id) new_lun_list.append(lun_id) data = {"LUNLIST": new_lun_list, "TYPE": 230, "ID": qos_id} result = self.call(url, data, "PUT") msg = _('Associate lun to QoS error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def add_lun_to_cache(self, lun_id, cache_id): url = "/SMARTCACHEPARTITION/CREATE_ASSOCIATE" data = {"ID": cache_id, "ASSOCIATEOBJTYPE": 11, "ASSOCIATEOBJID": lun_id, "TYPE": 273} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Add lun to cache error.')) def get_array_info(self): url = "/system/" result = self.call(url, None, "GET", log_filter_flag=True) self._assert_rest_result(result, _('Get array info error.')) return result.get('data', None) def find_array_version(self): info = self.get_array_info() return info.get('PRODUCTVERSION', None) def remove_host(self, host_id): url = "/host/%s" % host_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Remove host from array error.')) def delete_hostgroup(self, hostgroup_id): url = "/hostgroup/%s" % hostgroup_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete hostgroup error.')) def remove_host_from_hostgroup(self, hostgroup_id, host_id): url_subfix001 = "/host/associate?TYPE=14&ID=%s" % hostgroup_id url_subfix002 = "&ASSOCIATEOBJTYPE=21&ASSOCIATEOBJID=%s" % host_id url = url_subfix001 + url_subfix002 result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Remove host from hostgroup error.')) def remove_iscsi_from_host(self, initiator): url = "/iscsi_initiator/remove_iscsi_from_host" data = {"TYPE": '222', "ID": initiator} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove iscsi from host error.')) def get_host_online_fc_initiators(self, host_id): url = "/fc_initiator?PARENTTYPE=21&PARENTID=%s" % host_id result = self.call(url, None, "GET") initiators = [] if 'data' in result: for item in result['data']: if (('PARENTID' in item) and (item['PARENTID'] == host_id) and (item['RUNNINGSTATUS'] == constants.FC_INIT_ONLINE)): initiators.append(item['ID']) return initiators def get_host_fc_initiators(self, host_id): url = "/fc_initiator?PARENTTYPE=21&PARENTID=%s" % host_id result = self.call(url, None, "GET") initiators = [] if 'data' in result: for item in result['data']: if (('PARENTID' in item) and (item['PARENTID'] == host_id)): initiators.append(item['ID']) return initiators def get_host_iscsi_initiators(self, host_id): url = "/iscsi_initiator?PARENTTYPE=21&PARENTID=%s" % host_id result = self.call(url, None, "GET") initiators = [] if 'data' in result: for item in result['data']: if (('PARENTID' in item) and (item['PARENTID'] == host_id)): initiators.append(item['ID']) return initiators def rename_lun(self, lun_id, new_name, description=None): url = "/lun/" + lun_id data = {"NAME": new_name} if description: data.update({"DESCRIPTION": description}) result = self.call(url, data, "PUT") msg = _('Rename lun on array error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def rename_snapshot(self, snapshot_id, new_name, description=None): url = "/snapshot/" + snapshot_id data = {"NAME": new_name} if description: data.update({"DESCRIPTION": description}) result = self.call(url, data, "PUT") msg = _('Rename snapshot on array error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) def is_fc_initiator_associated_to_host(self, ininame): """Check whether the initiator is associated to the host.""" url = '/fc_initiator?range=[0-256]' result = self.call(url, None, "GET") self._assert_rest_result(result, 'Check initiator associated to host error.') if "data" in result: for item in result['data']: if item['ID'] == ininame and item['ISFREE'] != "true": return True return False def remove_fc_from_host(self, initiator): url = '/fc_initiator/remove_fc_from_host' data = {"TYPE": '223', "ID": initiator} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Remove fc from host error.')) def check_fc_initiators_exist_in_host(self, host_id): url = "/fc_initiator?range=[0-256]&PARENTID=%s" % host_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get host initiators info failed.')) if 'data' in result: return True return False def _fc_initiator_is_added_to_array(self, ininame): """Check whether the fc initiator is already added on the array.""" url = "/fc_initiator/" + ininame result = self.call(url, None, "GET") error_code = result['error']['code'] if error_code != 0: return False return True def _add_fc_initiator_to_array(self, ininame): """Add a fc initiator to storage device.""" url = '/fc_initiator/' data = {"TYPE": '223', "ID": ininame} result = self.call(url, data, 'POST') self._assert_rest_result(result, _('Add fc initiator to array error.')) def ensure_fc_initiator_added(self, initiator_name, host_id): added = self._fc_initiator_is_added_to_array(initiator_name) if not added: self._add_fc_initiator_to_array(initiator_name) # Just add, no need to check whether have been added. self.add_fc_port_to_host(host_id, initiator_name) def get_fc_ports_on_array(self): url = '/fc_port' result = self.call(url, None, "GET") msg = _('Get FC ports from array error.') self._assert_rest_result(result, msg) return result['data'] def get_fc_ports_from_contr(self, contr): port_list_from_contr = [] location = [] data = self.get_fc_ports_on_array() for item in data: location = item['PARENTID'].split('.') if (location[0][1] == contr) and (item['RUNNINGSTATUS'] == constants.FC_PORT_CONNECTED): port_list_from_contr.append(item['WWN']) return port_list_from_contr def get_hyper_domain_id(self, domain_name): url = "/HyperMetroDomain?range=[0-32]" result = self.call(url, None, "GET") domain_id = None if "data" in result: for item in result['data']: if domain_name == item['NAME']: domain_id = item['ID'] break msg = _('get_hyper_domain_id error.') self._assert_rest_result(result, msg) return domain_id def create_hypermetro(self, hcp_param): url = "/HyperMetroPair" result = self.call(url, hcp_param, "POST") msg = _('create_hypermetro_pair error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def delete_hypermetro(self, metro_id): url = "/HyperMetroPair/" + metro_id result = self.call(url, None, "DELETE") msg = _('delete_hypermetro error.') self._assert_rest_result(result, msg) def sync_hypermetro(self, metro_id): url = "/HyperMetroPair/synchronize_hcpair" data = {"ID": metro_id, "TYPE": "15361"} result = self.call(url, data, "PUT") msg = _('sync_hypermetro error.') self._assert_rest_result(result, msg) def stop_hypermetro(self, metro_id): url = '/HyperMetroPair/disable_hcpair' data = {"ID": metro_id, "TYPE": "15361"} result = self.call(url, data, "PUT") msg = _('stop_hypermetro error.') self._assert_rest_result(result, msg) def get_hypermetro_by_id(self, metro_id): url = "/HyperMetroPair/" + metro_id result = self.call(url, None, "GET") msg = _('get_hypermetro_by_id error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def check_hypermetro_exist(self, metro_id): url = "/HyperMetroPair/" + metro_id result = self.call(url, None, "GET") error_code = result['error']['code'] if (error_code == constants.ERROR_CONNECT_TO_SERVER or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): LOG.error("Can not open the recent url, login again.") self.login() result = self.call(url, None, "GET") error_code = result['error']['code'] if (error_code == constants.ERROR_CONNECT_TO_SERVER or error_code == constants.ERROR_UNAUTHORIZED_TO_SERVER): msg = _("check_hypermetro_exist error.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if error_code != 0: return False return True def change_hostlun_id(self, map_info, hostlun_id): url = "/mappingview" view_id = str(map_info['view_id']) lun_id = str(map_info['lun_id']) hostlun_id = str(hostlun_id) data = {"TYPE": 245, "ID": view_id, "ASSOCIATEOBJTYPE": 11, "ASSOCIATEOBJID": lun_id, "ASSOCIATEMETADATA": [{"LUNID": lun_id, "hostLUNId": hostlun_id}]} result = self.call(url, data, "PUT") msg = 'change hostlun id error.' self._assert_rest_result(result, msg) def find_view_by_id(self, view_id): url = "/MAPPINGVIEW/" + view_id result = self.call(url, None, "GET") msg = _('Change hostlun id error.') self._assert_rest_result(result, msg) if 'data' in result: return result["data"]["AVAILABLEHOSTLUNIDLIST"] def get_metrogroup_by_name(self, name): url = "/HyperMetro_ConsistentGroup?type='15364'" result = self.call(url, None, "GET") msg = _('Get hypermetro group by name error.') self._assert_rest_result(result, msg) return self._get_id_from_result(result, name, 'NAME') def get_metrogroup_by_id(self, id): url = "/HyperMetro_ConsistentGroup/" + id result = self.call(url, None, "GET") msg = _('Get hypermetro group by id error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def create_metrogroup(self, name, description, domain_id): url = "/HyperMetro_ConsistentGroup" data = {"NAME": name, "TYPE": "15364", "DESCRIPTION": description, "RECOVERYPOLICY": "1", "SPEED": "2", "PRIORITYSTATIONTYPE": "0", "DOMAINID": domain_id} result = self.call(url, data, "POST") msg = _('create hypermetro group error.') self._assert_rest_result(result, msg) if 'data' in result: return result["data"]["ID"] def delete_metrogroup(self, metrogroup_id): url = "/HyperMetro_ConsistentGroup/" + metrogroup_id result = self.call(url, None, "DELETE") msg = _('Delete hypermetro group error.') self._assert_rest_result(result, msg) def get_metrogroup(self, metrogroup_id): url = "/HyperMetro_ConsistentGroup/" + metrogroup_id result = self.call(url, None, "GET") msg = _('Get hypermetro group error.') self._assert_rest_result(result, msg) def stop_metrogroup(self, metrogroup_id): url = "/HyperMetro_ConsistentGroup/stop" data = {"TYPE": "15364", "ID": metrogroup_id } result = self.call(url, data, "PUT") msg = _('stop hypermetro group error.') self._assert_rest_result(result, msg) def sync_metrogroup(self, metrogroup_id): url = "/HyperMetro_ConsistentGroup/sync" data = {"TYPE": "15364", "ID": metrogroup_id } result = self.call(url, data, "PUT") msg = _('sync hypermetro group error.') self._assert_rest_result(result, msg) def add_metro_to_metrogroup(self, metrogroup_id, metro_id): url = "/hyperMetro/associate/pair" data = {"TYPE": "15364", "ID": metrogroup_id, "ASSOCIATEOBJTYPE": "15361", "ASSOCIATEOBJID": metro_id} result = self.call(url, data, "POST") msg = _('Add hypermetro to metrogroup error.') self._assert_rest_result(result, msg) def remove_metro_from_metrogroup(self, metrogroup_id, metro_id): url = "/hyperMetro/associate/pair" data = {"TYPE": "15364", "ID": metrogroup_id, "ASSOCIATEOBJTYPE": "15361", "ASSOCIATEOBJID": metro_id} result = self.call(url, data, "DELETE") msg = _('Delete hypermetro from metrogroup error.') self._assert_rest_result(result, msg) def get_hypermetro_pairs(self): url = "/HyperMetroPair?range=[0-4095]" result = self.call(url, None, "GET") msg = _('Get HyperMetroPair error.') self._assert_rest_result(result, msg) return result.get('data', []) def get_split_mirrors(self): url = "/splitmirror?range=[0-8191]" result = self.call(url, None, "GET") if result['error']['code'] == constants.NO_SPLITMIRROR_LICENSE: msg = _('License is unavailable.') raise exception.VolumeBackendAPIException(data=msg) msg = _('Get SplitMirror error.') self._assert_rest_result(result, msg) return result.get('data', []) def get_target_luns(self, id): url = ("/SPLITMIRRORTARGETLUN/targetLUN?TYPE=228&PARENTID=%s&" "PARENTTYPE=220") % id result = self.call(url, None, "GET") msg = _('Get target LUN of SplitMirror error.') self._assert_rest_result(result, msg) target_luns = [] for item in result.get('data', []): target_luns.append(item.get('ID')) return target_luns def get_migration_task(self): url = "/LUN_MIGRATION?range=[0-256]" result = self.call(url, None, "GET") if result['error']['code'] == constants.NO_MIGRATION_LICENSE: msg = _('License is unavailable.') raise exception.VolumeBackendAPIException(data=msg) msg = _('Get migration task error.') self._assert_rest_result(result, msg) return result.get('data', []) def is_lun_in_mirror(self, name): if not name: return False url = "/lun?filter=NAME::%s" % name result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get volume by name error.')) for item in result.get('data', []): rss_obj = item.get('HASRSSOBJECT') if rss_obj: rss_obj = json.loads(rss_obj) if rss_obj.get('LUNMirror') == 'TRUE': return True return False def get_portgs_by_portid(self, port_id): portgs = [] if not port_id: return portgs url = ("/portgroup/associate/fc_port?TYPE=257&ASSOCIATEOBJTYPE=212&" "ASSOCIATEOBJID=%s") % port_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get port groups by port error.')) for item in result.get("data", []): portgs.append(item["ID"]) return portgs def get_views_by_portg(self, portg_id): views = [] if not portg_id: return views url = ("/mappingview/associate/portgroup?TYPE=245&ASSOCIATEOBJTYPE=" "257&ASSOCIATEOBJID=%s") % portg_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get views by port group error.')) for item in result.get("data", []): views.append(item["ID"]) return views def get_lungroup_by_view(self, view_id): if not view_id: return None url = ("/lungroup/associate/mappingview?TYPE=256&ASSOCIATEOBJTYPE=" "245&ASSOCIATEOBJID=%s") % view_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get LUN group by view error.')) for item in result.get("data", []): # In fact, there is just one lungroup in a view. return item["ID"] def get_portgroup_by_view(self, view_id): if not view_id: return None url = ("/portgroup/associate/mappingview?TYPE=257&ASSOCIATEOBJTYPE=" "245&ASSOCIATEOBJID=%s") % view_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get port group by view error.')) return result.get("data", []) def get_fc_ports_by_portgroup(self, portg_id): ports = {} if not portg_id: return ports url = ("/fc_port/associate/portgroup?TYPE=212&ASSOCIATEOBJTYPE=257" "&ASSOCIATEOBJID=%s") % portg_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get FC ports by port group ' 'error.')) for item in result.get("data", []): ports[item["WWN"]] = item["ID"] return ports def create_portg(self, portg_name, description=""): url = "/PortGroup" data = {"DESCRIPTION": description, "NAME": portg_name, "TYPE": 257} result = self.call(url, data, "POST") self._assert_rest_result(result, _('Create port group error.')) if "data" in result: return result['data']['ID'] def add_port_to_portg(self, portg_id, port_id): url = "/port/associate/portgroup" data = {"ASSOCIATEOBJID": port_id, "ASSOCIATEOBJTYPE": 212, "ID": portg_id, "TYPE": 257} result = self.call(url, data, "POST") self._assert_rest_result(result, _('Add port to port group error.')) def delete_portgroup(self, portg_id): url = "/PortGroup/%s" % portg_id result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Delete port group error.')) def remove_port_from_portgroup(self, portg_id, port_id): url = (("/port/associate/portgroup?ID=%(portg_id)s&TYPE=257&" "ASSOCIATEOBJTYPE=212&ASSOCIATEOBJID=%(port_id)s") % {"portg_id": portg_id, "port_id": port_id}) result = self.call(url, None, "DELETE") self._assert_rest_result(result, _('Remove port from port group' ' error.')) def get_all_engines(self): url = "/storageengine" result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get engines error.')) return result.get("data", []) def get_portg_info(self, portg_id): url = "/portgroup/%s" % portg_id result = self.call(url, None, "GET") self._assert_rest_result(result, _('Get port group error.')) return result.get("data", {}) def append_portg_desc(self, portg_id, description): portg_info = self.get_portg_info(portg_id) new_description = portg_info.get('DESCRIPTION') + ',' + description url = "/portgroup/%s" % portg_id data = {"DESCRIPTION": new_description, "ID": portg_id, "TYPE": 257} result = self.call(url, data, "PUT") self._assert_rest_result(result, _('Append port group description' ' error.')) def get_ports_by_portg(self, portg_id): wwns = [] url = ("/fc_port/associate?TYPE=213&ASSOCIATEOBJTYPE=257" "&ASSOCIATEOBJID=%s" % portg_id) result = self.call(url, None, "GET") msg = _('Get ports by port group error.') self._assert_rest_result(result, msg) for item in result.get('data', []): wwns.append(item['WWN']) return wwns def get_remote_devices(self): url = "/remote_device" result = self.call(url, None, "GET", log_filter_flag=True) self._assert_rest_result(result, _('Get remote devices error.')) return result.get('data', []) def create_pair(self, pair_params): url = "/REPLICATIONPAIR" result = self.call(url, pair_params, "POST") msg = _('Create replication error.') self._assert_rest_result(result, msg) self._assert_data_in_result(result, msg) return result['data'] def get_pair_by_id(self, pair_id): url = "/REPLICATIONPAIR/" + pair_id result = self.call(url, None, "GET") msg = _('Get pair failed.') self._assert_rest_result(result, msg) return result.get('data', {}) def switch_pair(self, pair_id): url = '/REPLICATIONPAIR/switch' data = {"ID": pair_id, "TYPE": "263"} result = self.call(url, data, "PUT") msg = _('Switch over pair error.') self._assert_rest_result(result, msg) def split_pair(self, pair_id): url = '/REPLICATIONPAIR/split' data = {"ID": pair_id, "TYPE": "263"} result = self.call(url, data, "PUT") msg = _('Split pair error.') self._assert_rest_result(result, msg) def delete_pair(self, pair_id, force=False): url = "/REPLICATIONPAIR/" + pair_id data = None if force: data = {"ISLOCALDELETE": force} result = self.call(url, data, "DELETE") msg = _('delete_replication error.') self._assert_rest_result(result, msg) def sync_pair(self, pair_id): url = "/REPLICATIONPAIR/sync" data = {"ID": pair_id, "TYPE": "263"} result = self.call(url, data, "PUT") msg = _('Sync pair error.') self._assert_rest_result(result, msg) def check_pair_exist(self, pair_id): url = "/REPLICATIONPAIR/" + pair_id result = self.call(url, None, "GET") return result['error']['code'] == 0 def set_pair_second_access(self, pair_id, access): url = "/REPLICATIONPAIR/" + pair_id data = {"ID": pair_id, "SECRESACCESS": access} result = self.call(url, data, "PUT") msg = _('Set pair secondary access error.') self._assert_rest_result(result, msg) def is_host_associated_to_hostgroup(self, host_id): url = "/host/" + host_id result = self.call(url, None, "GET") data = result.get('data') if data is not None: return data.get('ISADD2HOSTGROUP') == 'true' return False def _get_object_count(self, obj_name): url = "/" + obj_name + "/count" result = self.call(url, None, "GET", log_filter_flag=True) if result['error']['code'] != 0: raise Exception(_('Failed to get object count.')) if result.get("data"): return result.get("data").get("COUNT") def get_lun_info_by_name(self, name): url = "/lun?filter=NAME::%s" % name result = self.call(url, None, "GET") msg = _('Get lun by name %s error.') % name self._assert_rest_result(result, msg) if result.get('data'): return result['data'][0] def get_lun_info_by_id(self, lun_id): url = "/lun/" + lun_id result = self.call(url, None, "GET") msg = _('Get lun by id %s error.') % lun_id self._assert_rest_result(result, msg) return result['data'] def get_snapshot_info_by_name(self, name): url = "/snapshot?filter=NAME::%s" % name result = self.call(url, None, "GET") msg = _('Get snapshot by name %s error.') % name self._assert_rest_result(result, msg) if result.get('data'): return result['data'][0] def get_snapshot_info_by_id(self, snapshot_id): url = "/snapshot/" + snapshot_id result = self.call(url, None, "GET") msg = _('Get snapshot by id %s error.') % snapshot_id self._assert_rest_result(result, msg) return result['data'] def update_qos_luns(self, qos_id, lun_list): url = "/ioclass/" + qos_id data = {"LUNLIST": lun_list} result = self.call(url, data, "PUT") msg = _('Update luns of qos %s error.') % qos_id self._assert_rest_result(result, msg) def create_clone_pair(self, source_id, target_id, clone_speed): url = "/clonepair/relation" data = {"copyRate": clone_speed, "sourceID": source_id, "targetID": target_id, "isNeedSynchronize": "0"} result = self.call(url, data, "POST") self._assert_rest_result(result, 'Create ClonePair error, source_id ' 'is %s.' % source_id) return result['data']['ID'] def get_clone_pair_info(self, pair_id): url = "/clonepair/%s" % pair_id result = self.call(url, None, "GET") self._assert_rest_result(result, 'Get ClonePair %s error.' % pair_id) return result.get('data', {}) def sync_clone_pair(self, pair_id): url = "/clonepair/synchronize" data = {"ID": pair_id, "copyAction": 0} result = self.call(url, data, "PUT") self._assert_rest_result(result, 'Sync ClonePair error, pair is %s.' % pair_id) def delete_clone_pair(self, pair_id, delete_dst_lun=False): data = {"ID": pair_id, "isDeleteDstLun": delete_dst_lun} url = "/clonepair/%s" % pair_id result = self.call(url, data, "DELETE") if result['error']['code'] == constants.CLONE_PAIR_NOT_EXIST: LOG.warning('ClonePair %s to delete does not exist.', pair_id) return self._assert_rest_result(result, 'Delete ClonePair %s error.' % pair_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/huawei/smartx.py0000664000175000017500000001151200000000000022472 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.huawei import constants LOG = logging.getLogger(__name__) class SmartQos(object): def __init__(self, client): self.client = client def _check_qos_consistency(self, policy, qos): for key in [k.upper() for k in constants.QOS_SPEC_KEYS]: if qos.get(key, '0') != policy.get(key, '0'): return False return True def _change_lun_priority(self, qos, lun_id): for key in qos: if key.startswith('MIN') or key.startswith('LATENCY'): data = {"IOPRIORITY": "3"} self.client.update_lun(lun_id, data) break @utils.synchronized('huawei_qos', external=True) def add(self, qos, lun_id): self._change_lun_priority(qos, lun_id) qos_id = self.client.create_qos(qos, lun_id) try: self.client.activate_deactivate_qos(qos_id, True) except exception.VolumeBackendAPIException: self.remove(qos_id, lun_id) raise return qos_id @utils.synchronized('huawei_qos', external=True) def remove(self, qos_id, lun_id, qos_info=None): if not qos_info: qos_info = self.client.get_qos_info(qos_id) lun_list = json.loads(qos_info['LUNLIST']) if lun_id in lun_list: lun_list.remove(lun_id) if len(lun_list) == 0: if qos_info['RUNNINGSTATUS'] != constants.QOS_INACTIVATED: self.client.activate_deactivate_qos(qos_id, False) self.client.delete_qos(qos_id) else: self.client.update_qos_luns(qos_id, lun_list) def update(self, qos_id, new_qos, lun_id): qos_info = self.client.get_qos_info(qos_id) if self._check_qos_consistency(qos_info, new_qos): return self.remove(qos_id, lun_id, qos_info) self.add(new_qos, lun_id) class SmartPartition(object): def __init__(self, client): self.client = client def add(self, partitionname, lun_id): partition_id = self.client.get_partition_id_by_name(partitionname) if not partition_id: msg = _('Cannot find partition by name %s.') % partitionname LOG.error(msg) raise exception.InvalidInput(reason=msg) self.client.add_lun_to_partition(lun_id, partition_id) return partition_id def remove(self, partition_id, lun_id): self.client.remove_lun_from_partition(lun_id, partition_id) def update(self, partition_id, partitionname, lun_id): partition_info = self.client.get_partition_info_by_id(partition_id) if partition_info['NAME'] == partitionname: return self.remove(partition_id, lun_id) self.add(partitionname, lun_id) def check_partition_valid(self, partitionname): partition_id = self.client.get_partition_id_by_name(partitionname) if not partition_id: msg = _("Partition %s doesn't exist.") % partitionname LOG.error(msg) raise exception.InvalidInput(reason=msg) class SmartCache(object): def __init__(self, client): self.client = client def add(self, cachename, lun_id): cache_id = self.client.get_cache_id_by_name(cachename) if not cache_id: msg = _('Cannot find cache by name %s.') % cachename LOG.error(msg) raise exception.InvalidInput(reason=msg) self.client.add_lun_to_cache(lun_id, cache_id) return cache_id def remove(self, cache_id, lun_id): self.client.remove_lun_from_cache(lun_id, cache_id) def update(self, cache_id, cachename, lun_id): cache_info = self.client.get_cache_info_by_id(cache_id) if cache_info['NAME'] == cachename: return self.remove(cache_id, lun_id) self.add(cachename, lun_id) def check_cache_valid(self, cachename): cache_id = self.client.get_cache_id_by_name(cachename) if not cache_id: msg = _("Cache %s doesn't exit.") % cachename LOG.error(msg) raise exception.InvalidInput(reason=msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3631208 cinder-27.0.0/cinder/volume/drivers/ibm/0000775000175000017500000000000000000000000020067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/__init__.py0000664000175000017500000000000000000000000022166 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/flashsystem_common.py0000664000175000017500000013470000000000000024360 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for IBM FlashSystem storage systems. Limitations: 1. Cinder driver only works when open_access_enabled=off. """ import re from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) FLASHSYSTEM_VOLPOOL_NAME = 'mdiskgrp0' FLASHSYSTEM_VOL_IOGRP = 0 flashsystem_opts = [ cfg.StrOpt('flashsystem_connection_protocol', default='FC', help='Connection protocol should be FC. ' '(Default is FC.)'), cfg.BoolOpt('flashsystem_multihostmap_enabled', default=True, help='Allows vdisk to multi host mapping. ' '(Default is True)') ] CONF = cfg.CONF CONF.register_opts(flashsystem_opts, group=configuration.SHARED_CONF_GROUP) class FlashSystemDriver(san.SanDriver, driver.ManageableVD, driver.BaseVD): """IBM FlashSystem volume driver. Version history: .. code-block:: none 1.0.0 - Initial driver 1.0.1 - Code clean up 1.0.2 - Add lock into vdisk map/unmap, connection initialize/terminate 1.0.3 - Initial driver for iSCSI 1.0.4 - Split Flashsystem driver into common and FC 1.0.5 - Report capability of volume multiattach 1.0.6 - Fix bug #1469581, add I/T mapping check in terminate_connection 1.0.7 - Fix bug #1505477, add host name check in _find_host_exhaustive for FC 1.0.8 - Fix bug #1572743, multi-attach attribute should not be hardcoded, only in iSCSI 1.0.9 - Fix bug #1570574, Cleanup host resource leaking, changes only in iSCSI 1.0.10 - Fix bug #1585085, add host name check in _find_host_exhaustive for iSCSI 1.0.11 - Update driver to use ABC metaclasses 1.0.12 - Update driver to support Manage/Unmanage existing volume """ VERSION = "1.0.12" # TODO(jsbryant) Remove driver in the 'U' release if CI is not fixed. SUPPORTED = False MULTI_HOST_MAP_ERRORS = ['CMMVC6045E', 'CMMVC6071E'] def __init__(self, *args, **kwargs): super(FlashSystemDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(flashsystem_opts) self._storage_nodes = {} self._protocol = None self._context = None self._system_name = None self._system_id = None self._check_lock_interval = 5 self._vdisk_copy_in_progress = set() self._vdisk_copy_lock = None @staticmethod def get_driver_options(): return flashsystem_opts def _ssh(self, ssh_cmd, check_exit_code=True): try: return self._run_ssh(ssh_cmd, check_exit_code) except processutils.ProcessExecutionError as e: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'cmd': ssh_cmd, 'out': e.stdout, 'err': e.stderr}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _append_dict(self, dict_, key, value): key, value = key.strip(), value.strip() obj = dict_.get(key, None) if obj is None: dict_[key] = value elif isinstance(obj, list): obj.append(value) dict_[key] = obj else: dict_[key] = [obj, value] return dict_ def _assert_ssh_return(self, test, fun, ssh_cmd, out, err): self._driver_assert(test, (_('%(fun)s: Failed with unexpected CLI output.\n ' 'Command: %(cmd)s\n stdout: %(out)s\n ' 'stderr: %(err)s') % {'fun': fun, 'cmd': ssh_cmd, 'out': str(out), 'err': str(err)})) def _build_default_params(self): return {'protocol': self.configuration.flashsystem_connection_protocol} def _build_initiator_target_map(self, initiator_wwpns, target_wwpns): map = {} for i_wwpn in initiator_wwpns: idx = str(i_wwpn) map[idx] = [] for t_wwpn in target_wwpns: map[idx].append(t_wwpn) return map def _check_vdisk_params(self, params): raise NotImplementedError() def _connector_to_hostname_prefix(self, connector): """Translate connector info to storage system host name. Translate a host's name and IP to the prefix of its hostname on the storage subsystem. We create a host name from the host and IP address, replacing any invalid characters (at most 55 characters), and adding a random 8-character suffix to avoid collisions. The total length should be at most 63 characters. """ # Build cleanup translation tables for host names invalid_ch_in_host = '' for num in range(0, 128): ch = str(chr(num)) if not ch.isalnum() and ch not in [' ', '.', '-', '_']: invalid_ch_in_host = invalid_ch_in_host + ch host_name = connector['host'] if isinstance(host_name, str): unicode_host_name_filter = {ord(str(char)): u'-' for char in invalid_ch_in_host} host_name = host_name.translate(unicode_host_name_filter) elif isinstance(host_name, str): string_host_name_filter = bytes.maketrans( invalid_ch_in_host, '-' * len(invalid_ch_in_host)) host_name = host_name.translate(string_host_name_filter) else: msg = _('_create_host: Can not translate host name. Host name ' 'is not unicode or string.') LOG.error(msg) raise exception.NoValidBackend(reason=msg) host_name = str(host_name) # FlashSystem family doesn't like hostname that starts with number. if not re.match('^[A-Za-z]', host_name): host_name = '_' + host_name return host_name[:55] def _copy_vdisk_data(self, src_vdisk_name, src_vdisk_id, dest_vdisk_name, dest_vdisk_id): """Copy data from src vdisk to dest vdisk. To be able to copy data between vdisks, we must ensure that both vdisks have been mapped to host. If vdisk has not been mapped, it must be mapped firstly. When data copy completed, vdisk should be restored to previous mapped or non-mapped status. """ LOG.debug('enter: _copy_vdisk_data: %(src)s -> %(dest)s.', {'src': src_vdisk_name, 'dest': dest_vdisk_name}) connector = volume_utils.brick_get_connector_properties( self.configuration.use_multipath_for_image_xfer, self.configuration.enforce_multipath_for_image_xfer) (src_map, src_lun_id) = self._is_vdisk_map( src_vdisk_name, connector) (dest_map, dest_lun_id) = self._is_vdisk_map( dest_vdisk_name, connector) src_map_device = None src_properties = None dest_map_device = None dest_properties = None try: if not src_map: src_lun_id = self._map_vdisk_to_host(src_vdisk_name, connector) if not dest_map: dest_lun_id = self._map_vdisk_to_host(dest_vdisk_name, connector) src_properties = self._get_vdisk_map_properties( connector, src_lun_id, src_vdisk_name, src_vdisk_id, self._get_vdisk_params(None)) src_map_device = self._scan_device(src_properties) dest_properties = self._get_vdisk_map_properties( connector, dest_lun_id, dest_vdisk_name, dest_vdisk_id, self._get_vdisk_params(None)) dest_map_device = self._scan_device(dest_properties) src_vdisk_attr = self._get_vdisk_attributes(src_vdisk_name) # vdisk capacity is bytes, translate into MB size_in_mb = int(src_vdisk_attr['capacity']) / units.Mi volume_utils.copy_volume( src_map_device['path'], dest_map_device['path'], size_in_mb, self.configuration.volume_dd_blocksize) except Exception: with excutils.save_and_reraise_exception(): LOG.error('Failed to copy %(src)s to %(dest)s.', {'src': src_vdisk_name, 'dest': dest_vdisk_name}) finally: if not dest_map: self._unmap_vdisk_from_host(dest_vdisk_name, connector) self._remove_device(dest_properties, dest_map_device) if not src_map: self._unmap_vdisk_from_host(src_vdisk_name, connector) self._remove_device(src_properties, src_map_device) LOG.debug( 'leave: _copy_vdisk_data: %(src)s -> %(dest)s.', {'src': src_vdisk_name, 'dest': dest_vdisk_name}) def _create_and_copy_vdisk_data(self, src_vdisk_name, src_vdisk_id, dest_vdisk_name, dest_vdisk_id, dest_vdisk_size=None): if dest_vdisk_size is None: vdisk_attr = self._get_vdisk_attributes(src_vdisk_name) self._driver_assert( vdisk_attr is not None, (_('_create_and_copy_vdisk_data: Failed to get attributes for ' 'vdisk %s.') % src_vdisk_name)) dest_vdisk_size = vdisk_attr['capacity'] self._create_vdisk(dest_vdisk_name, dest_vdisk_size, 'b', None) # create a timer to lock vdisk that will be used to data copy timer = loopingcall.FixedIntervalLoopingCall( self._set_vdisk_copy_in_progress, [src_vdisk_name, dest_vdisk_name]) timer.start(interval=self._check_lock_interval).wait() try: self._copy_vdisk_data(src_vdisk_name, src_vdisk_id, dest_vdisk_name, dest_vdisk_id) finally: self._unset_vdisk_copy_in_progress( [src_vdisk_name, dest_vdisk_name]) def _create_host(self, connector): raise NotImplementedError() def _create_vdisk(self, name, size, unit, opts): """Create a new vdisk.""" LOG.debug('enter: _create_vdisk: vdisk %s.', name) ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp', FLASHSYSTEM_VOLPOOL_NAME, '-iogrp', str(FLASHSYSTEM_VOL_IOGRP), '-size', str(size), '-unit', unit] out, err = self._ssh(ssh_cmd) self._assert_ssh_return(out.strip(), '_create_vdisk', ssh_cmd, out, err) # Ensure that the output is as expected match_obj = re.search( r'Virtual Disk, id \[([0-9]+)\], successfully created', out) self._driver_assert( match_obj is not None, (_('_create_vdisk %(name)s - did not find ' 'success message in CLI output.\n ' 'stdout: %(out)s\n stderr: %(err)s') % {'name': name, 'out': str(out), 'err': str(err)})) LOG.debug('leave: _create_vdisk: vdisk %s.', name) def _delete_host(self, host_name): """Delete a host on the storage system.""" LOG.debug('enter: _delete_host: host %s.', host_name) ssh_cmd = ['svctask', 'rmhost', host_name] out, err = self._ssh(ssh_cmd) # No output should be returned from rmhost self._assert_ssh_return( (not out.strip()), '_delete_host', ssh_cmd, out, err) LOG.debug('leave: _delete_host: host %s.', host_name) def _delete_vdisk(self, name, force): """Deletes existing vdisks.""" LOG.debug('enter: _delete_vdisk: vdisk %s.', name) # Try to delete volume only if found on the storage vdisk_defined = self._is_vdisk_defined(name) if not vdisk_defined: LOG.warning('warning: Tried to delete vdisk %s but ' 'it does not exist.', name) return ssh_cmd = ['svctask', 'rmvdisk', '-force', name] if not force: ssh_cmd.remove('-force') out, err = self._ssh(ssh_cmd) # No output should be returned from rmvdisk self._assert_ssh_return( (not out.strip()), ('_delete_vdisk %(name)s') % {'name': name}, ssh_cmd, out, err) LOG.debug('leave: _delete_vdisk: vdisk %s.', name) def _driver_assert(self, assert_condition, exception_message): """Internal assertion mechanism for CLI output.""" if not assert_condition: LOG.error(exception_message) raise exception.VolumeBackendAPIException(data=exception_message) def _execute_command_and_parse_attributes(self, ssh_cmd): """Execute command on the FlashSystem and parse attributes. Exception is raised if the information from the system can not be obtained. """ LOG.debug( 'enter: _execute_command_and_parse_attributes: ' 'command: %s.', str(ssh_cmd)) try: out, err = self._ssh(ssh_cmd) except processutils.ProcessExecutionError: LOG.warning('Failed to run command: %s.', ssh_cmd) # Does not raise exception when command encounters error. # Only return and the upper logic decides what to do. return None self._assert_ssh_return( out, '_execute_command_and_parse_attributes', ssh_cmd, out, err) attributes = {} for attrib_line in out.split('\n'): # If '!' not found, return the string and two empty strings attrib_name, foo, attrib_value = attrib_line.partition('!') if attrib_name is not None and attrib_name.strip(): self._append_dict(attributes, attrib_name, attrib_value) LOG.debug( 'leave: _execute_command_and_parse_attributes: ' 'command: %(cmd)s attributes: %(attr)s.', {'cmd': str(ssh_cmd), 'attr': str(attributes)}) return attributes def _find_host_exhaustive(self, connector, hosts): raise NotImplementedError() def _get_hdr_dic(self, header, row, delim): """Return CLI row data as a dictionary indexed by names from header. The strings are converted to columns using the delimiter in delim. """ attributes = header.split(delim) values = row.split(delim) self._driver_assert( len(values) == len(attributes), (_('_get_hdr_dic: attribute headers and values do not match.\n ' 'Headers: %(header)s\n Values: %(row)s.') % {'header': str(header), 'row': str(row)})) dic = {a: v for a, v in zip(attributes, values)} return dic def _get_host_from_connector(self, connector): """List the hosts defined in the storage. Return the host name with the given connection info, or None if there is no host fitting that information. """ LOG.debug('enter: _get_host_from_connector: %s.', connector) # Get list of host in the storage ssh_cmd = ['svcinfo', 'lshost', '-delim', '!'] out, err = self._ssh(ssh_cmd) if not out.strip(): return None # If we have FC information, we have a faster lookup option hostname = None host_lines = out.strip().split('\n') self._assert_ssh_return( host_lines, '_get_host_from_connector', ssh_cmd, out, err) header = host_lines.pop(0).split('!') self._assert_ssh_return( 'name' in header, '_get_host_from_connector', ssh_cmd, out, err) name_index = header.index('name') hosts = [x.split('!')[name_index] for x in host_lines] hostname = self._find_host_exhaustive(connector, hosts) LOG.debug('leave: _get_host_from_connector: host %s.', hostname) return hostname def _get_hostvdisk_mappings(self, host_name): """Return the defined storage mappings for a host.""" return_data = {} ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', host_name] out, err = self._ssh(ssh_cmd) mappings = out.strip().split('\n') if mappings: header = mappings.pop(0) for mapping_line in mappings: mapping_data = self._get_hdr_dic(header, mapping_line, '!') return_data[mapping_data['vdisk_name']] = mapping_data return return_data def _get_node_data(self): """Get and verify node configuration.""" # Get storage system name and id ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!'] attributes = self._execute_command_and_parse_attributes(ssh_cmd) if not attributes or ('name' not in attributes): msg = _('Could not get system name.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self._system_name = attributes['name'] self._system_id = attributes['id'] # Validate value of open_access_enabled flag, for now only # support when open_access_enabled is off if not attributes or ('open_access_enabled' not in attributes) or ( attributes['open_access_enabled'] != 'off'): msg = _('open_access_enabled is not off.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Validate that the array exists pool = FLASHSYSTEM_VOLPOOL_NAME ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool] attributes = self._execute_command_and_parse_attributes(ssh_cmd) if not attributes: msg = _('Unable to parse attributes.') LOG.error(msg) raise exception.InvalidInput(reason=msg) if ('status' not in attributes) or ( attributes['status'] == 'offline'): msg = (_('Array does not exist or is offline. ' 'Current status of array is %s.') % attributes['status']) LOG.error(msg) raise exception.InvalidInput(reason=msg) # Get the iSCSI names of the FlashSystem nodes ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!'] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( out.strip(), '_get_config_data', ssh_cmd, out, err) nodes = out.strip().splitlines() self._assert_ssh_return(nodes, '_get_node_data', ssh_cmd, out, err) header = nodes.pop(0) for node_line in nodes: try: node_data = self._get_hdr_dic(header, node_line, '!') except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self._log_cli_output_error('_get_node_data', ssh_cmd, out, err) try: node = { 'id': node_data['id'], 'name': node_data['name'], 'IO_group': node_data['IO_group_id'], 'WWNN': node_data['WWNN'], 'status': node_data['status'], 'WWPN': [], 'protocol': None, 'iscsi_name': node_data['iscsi_name'], 'config_node': node_data['config_node'], 'ipv4': [], 'ipv6': [], } if node['status'] == 'online': self._storage_nodes[node['id']] = node except KeyError: self._handle_keyerror('lsnode', header) def _get_vdisk_attributes(self, vdisk_ref): """Return vdisk attributes Exception is raised if the information from system can not be parsed/matched to a single vdisk. :param vdisk_ref: vdisk name or vdisk id """ ssh_cmd = [ 'svcinfo', 'lsvdisk', '-bytes', '-delim', '!', vdisk_ref] return self._execute_command_and_parse_attributes(ssh_cmd) def _get_vdisk_map_properties( self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params): raise NotImplementedError() def _get_vdiskhost_mappings(self, vdisk_name): """Return the defined storage mappings for a vdisk.""" return_data = {} ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', vdisk_name] out, err = self._ssh(ssh_cmd) mappings = out.strip().split('\n') if mappings: header = mappings.pop(0) for mapping_line in mappings: mapping_data = self._get_hdr_dic(header, mapping_line, '!') return_data[mapping_data['host_name']] = mapping_data return return_data def _get_vdisk_params(self, type_id): params = self._build_default_params() if type_id: ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) specs = volume_type.get('extra_specs') for k, value in specs.items(): # Get the scope, if using scope format key_split = k.split(':') if len(key_split) == 1: scope = None key = key_split[0] else: scope = key_split[0] key = key_split[1] # We generally do not look at capabilities in the driver, but # protocol is a special case where the user asks for a given # protocol and we want both the scheduler and the driver to act # on the value. if ((not scope or scope == 'capabilities') and key == 'storage_protocol'): scope = None key = 'protocol' # Anything keys that the driver should look at should have the # 'drivers' scope. if scope and scope != "drivers": continue if key in params: this_type = type(params[key]).__name__ if this_type == 'int': value = int(value) elif this_type == 'bool': value = strutils.bool_from_string(value) params[key] = value self._check_vdisk_params(params) return params def _handle_keyerror(self, function, header): msg = (_('Did not find expected column in %(fun)s: %(hdr)s.') % {'fun': function, 'hdr': header}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _is_vdisk_defined(self, vdisk_name): """Check if vdisk is defined.""" LOG.debug('enter: _is_vdisk_defined: vdisk %s.', vdisk_name) vdisk_attributes = self._get_vdisk_attributes(vdisk_name) LOG.debug( 'leave: _is_vdisk_defined: vdisk %(vol)s with %(str)s.', {'vol': vdisk_name, 'str': vdisk_attributes is not None}) if vdisk_attributes is None: return False else: return True def _is_vdisk_copy_in_progress(self, vdisk_name): LOG.debug( '_is_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', {'vdisk': vdisk_name, 'vdisk_in_progress': str(self._vdisk_copy_in_progress)}) if vdisk_name not in self._vdisk_copy_in_progress: LOG.debug( '_is_vdisk_copy_in_progress: ' 'vdisk copy is not in progress.') raise loopingcall.LoopingCallDone(retvalue=True) def _is_vdisk_map(self, vdisk_name, connector): """Check if vdisk is mapped. If map, return True and lun id. If not map, return False and expected lun id. """ LOG.debug('enter: _is_vdisk_map: %(src)s.', {'src': vdisk_name}) map_flag = False result_lun = '-1' host_name = self._get_host_from_connector(connector) if host_name is None: return (map_flag, int(result_lun)) mapping_data = self._get_hostvdisk_mappings(host_name) if vdisk_name in mapping_data: map_flag = True result_lun = mapping_data[vdisk_name]['SCSI_id'] else: lun_used = [int(v['SCSI_id']) for v in mapping_data.values()] lun_used.sort() # Start from 1 due to problems with lun id being 0. result_lun = 1 for lun_id in lun_used: if result_lun < lun_id: break elif result_lun == lun_id: result_lun += 1 LOG.debug( 'leave: _is_vdisk_map: %(src)s ' 'mapped %(map_flag)s %(result_lun)s.', {'src': vdisk_name, 'map_flag': str(map_flag), 'result_lun': result_lun}) return (map_flag, int(result_lun)) def _log_cli_output_error(self, function, cmd, out, err): LOG.error('%(fun)s: Failed with unexpected CLI output.\n ' 'Command: %(cmd)s\nstdout: %(out)s\nstderr: %(err)s\n', {'fun': function, 'cmd': cmd, 'out': str(out), 'err': str(err)}) def _manage_input_check(self, existing_ref): """Verify the input of manage function.""" # Check that the reference is valid if 'source-name' in existing_ref: manage_source = existing_ref['source-name'] vdisk = self._get_vdisk_attributes(manage_source) elif 'source-id' in existing_ref: manage_source = existing_ref['source-id'] vdisk = self._get_vdisk_attributes(manage_source) else: reason = _('Reference must contain source-id or ' 'source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) if vdisk is None: reason = (_('No vdisk with the ID specified by ref %s.') % manage_source) raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return vdisk def _cli_except(self, fun, cmd, out, err, exc_list): """Raise if stderr contains an unexpected error code""" if not err: return None if not isinstance(exc_list, (tuple, list)): exc_list = [exc_list] try: err_type = [e for e in exc_list if err.startswith(e)].pop() except IndexError: msg = _( '%(fun)s: encountered unexpected CLI error, ' 'expected one of: %(errors)s' ) % {'fun': fun, 'errors': ', '.join(exc_list)} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return {'code': err_type, 'message': err.strip(err_type).strip()} @utils.synchronized('flashsystem-map', external=True) def _map_vdisk_to_host(self, vdisk_name, connector): """Create a mapping between a vdisk to a host.""" LOG.debug( 'enter: _map_vdisk_to_host: vdisk %(vdisk_name)s to ' 'host %(host)s.', {'vdisk_name': vdisk_name, 'host': connector}) # Check if a host object is defined for this host name host_name = self._get_host_from_connector(connector) if host_name is None: # Host does not exist - add a new host to FlashSystem host_name = self._create_host(connector) # Verify that create_new_host succeeded self._driver_assert( host_name is not None, (_('_create_host failed to return the host name.'))) (map_flag, result_lun) = self._is_vdisk_map(vdisk_name, connector) # Volume is not mapped to host, create a new LUN if not map_flag: ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', host_name, '-scsi', str(result_lun), vdisk_name] out, err = self._ssh(ssh_cmd, check_exit_code=False) map_error = self._cli_except('_map_vdisk_to_host', ssh_cmd, out, err, self.MULTI_HOST_MAP_ERRORS) if map_error: if not self.configuration.flashsystem_multihostmap_enabled: msg = _( 'flashsystem_multihostmap_enabled is set ' 'to False, failing requested multi-host map. ' '(%(code)s %(message)s)' ) % map_error LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for i in range(len(ssh_cmd)): if ssh_cmd[i] == 'mkvdiskhostmap': ssh_cmd.insert(i + 1, '-force') # try to map one volume to multiple hosts out, err = self._ssh(ssh_cmd) LOG.info('Volume %s is mapping to multiple hosts.', vdisk_name) self._assert_ssh_return( 'successfully created' in out, '_map_vdisk_to_host', ssh_cmd, out, err) else: self._assert_ssh_return( 'successfully created' in out, '_map_vdisk_to_host', ssh_cmd, out, err) LOG.debug( ('leave: _map_vdisk_to_host: LUN %(result_lun)s, vdisk ' '%(vdisk_name)s, host %(host_name)s.'), {'result_lun': result_lun, 'vdisk_name': vdisk_name, 'host_name': host_name}) return int(result_lun) def _port_conf_generator(self, cmd): ssh_cmd = cmd + ['-delim', '!'] out, err = self._ssh(ssh_cmd) if not out.strip(): return port_lines = out.strip().split('\n') if not port_lines: return header = port_lines.pop(0) yield header for portip_line in port_lines: try: port_data = self._get_hdr_dic(header, portip_line, '!') except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self._log_cli_output_error('_port_conf_generator', ssh_cmd, out, err) yield port_data def _remove_device(self, properties, device): LOG.debug('enter: _remove_device') if not properties or not device: LOG.warning('_remove_device: invalid properties or device.') return use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = properties['driver_volume_type'] connector = volume_utils.brick_get_connector( protocol, use_multipath=use_multipath, device_scan_attempts= device_scan_attempts, conn=properties) connector.disconnect_volume(properties['data'], device) LOG.debug('leave: _remove_device') def _rename_vdisk(self, vdisk_name, new_name): """Rename vdisk""" # Try to rename volume only if found on the storage vdisk_defined = self._is_vdisk_defined(vdisk_name) if not vdisk_defined: LOG.warning('warning: Tried to rename vdisk %s but ' 'it does not exist.', vdisk_name) return ssh_cmd = [ 'svctask', 'chvdisk', '-name', new_name, vdisk_name] out, err = self._ssh(ssh_cmd) # No output should be returned from chvdisk self._assert_ssh_return( (not out.strip()), '_rename_vdisk %(name)s' % {'name': vdisk_name}, ssh_cmd, out, err) LOG.info('Renamed %(vdisk)s to %(newname)s .', {'vdisk': vdisk_name, 'newname': new_name}) def _scan_device(self, properties): LOG.debug('enter: _scan_device') use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = properties['driver_volume_type'] connector = volume_utils.brick_get_connector( protocol, use_multipath=use_multipath, device_scan_attempts= device_scan_attempts, conn=properties) device = connector.connect_volume(properties['data']) host_device = device['path'] if not connector.check_valid_device(host_device): msg = (_('Unable to access the backend storage ' 'via the path %(path)s.') % {'path': host_device}) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('leave: _scan_device') return device @utils.synchronized('flashsystem-unmap', external=True) def _unmap_vdisk_from_host(self, vdisk_name, connector): if 'host' in connector: host_name = self._get_host_from_connector(connector) self._driver_assert( host_name is not None, (_('_get_host_from_connector failed to return the host name ' 'for connector.'))) else: host_name = None # Check if vdisk-host mapping exists, remove if it does. If no host # name was given, but only one mapping exists, we can use that. mapping_data = self._get_vdiskhost_mappings(vdisk_name) if not mapping_data: LOG.warning('_unmap_vdisk_from_host: No mapping of volume ' '%(vol_name)s to any host found.', {'vol_name': vdisk_name}) return host_name if host_name is None: if len(mapping_data) > 1: LOG.warning('_unmap_vdisk_from_host: Multiple mappings of ' 'volume %(vdisk_name)s found, no host ' 'specified.', {'vdisk_name': vdisk_name}) return else: host_name = list(mapping_data.keys())[0] else: if host_name not in mapping_data: LOG.error('_unmap_vdisk_from_host: No mapping of volume ' '%(vol_name)s to host %(host_name)s found.', {'vol_name': vdisk_name, 'host_name': host_name}) return host_name # We have a valid host_name now ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', host_name, vdisk_name] out, err = self._ssh(ssh_cmd) # Verify CLI behaviour - no output is returned from rmvdiskhostmap self._assert_ssh_return( (not out.strip()), '_unmap_vdisk_from_host', ssh_cmd, out, err) # If this host has no more mappings, delete it mapping_data = self._get_hostvdisk_mappings(host_name) if not mapping_data: self._delete_host(host_name) def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") data = { 'vendor_name': 'IBM', 'driver_version': self.VERSION, 'storage_protocol': self._protocol, 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'multiattach': self.configuration.flashsystem_multihostmap_enabled, } pool = FLASHSYSTEM_VOLPOOL_NAME backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = '%s_%s' % (self._system_name, pool) data['volume_backend_name'] = backend_name ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool] attributes = self._execute_command_and_parse_attributes(ssh_cmd) if not attributes: msg = _('_update_volume_stats: Could not get storage pool data.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) data['total_capacity_gb'] = ( float(attributes['capacity']) / units.Gi) data['free_capacity_gb'] = ( float(attributes['free_capacity']) / units.Gi) data['easytier_support'] = False # Do not support easy tier data['location_info'] = ( 'FlashSystemDriver:%(sys_id)s:%(pool)s' % {'sys_id': self._system_id, 'pool': pool}) self._stats = data def _set_vdisk_copy_in_progress(self, vdisk_list): LOG.debug( '_set_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', {'vdisk': str(vdisk_list), 'vdisk_in_progress': str(self._vdisk_copy_in_progress)}) get_lock = True self._vdisk_copy_lock.acquire() for vdisk in vdisk_list: if vdisk in self._vdisk_copy_in_progress: get_lock = False break if get_lock: self._vdisk_copy_in_progress.update(vdisk_list) self._vdisk_copy_lock.release() if get_lock: LOG.debug( '_set_vdisk_copy_in_progress: %s.', str(self._vdisk_copy_in_progress)) raise loopingcall.LoopingCallDone(retvalue=True) def _unset_vdisk_copy_in_progress(self, vdisk_list): LOG.debug( '_unset_vdisk_copy_in_progress: %(vdisk)s: %(vdisk_in_progress)s.', {'vdisk': str(vdisk_list), 'vdisk_in_progress': str(self._vdisk_copy_in_progress)}) self._vdisk_copy_lock.acquire() for vdisk in vdisk_list: if vdisk in self._vdisk_copy_in_progress: self._vdisk_copy_in_progress.remove(vdisk) self._vdisk_copy_lock.release() def _wait_vdisk_copy_completed(self, vdisk_name): timer = loopingcall.FixedIntervalLoopingCall( self._is_vdisk_copy_in_progress, vdisk_name) timer.start(interval=self._check_lock_interval).wait() def check_for_setup_error(self): """Ensure that the flags are set properly.""" LOG.debug('enter: check_for_setup_error') # Check that we have the system ID information if self._system_name is None: msg = ( _('check_for_setup_error: Unable to determine system name.')) raise exception.VolumeBackendAPIException(data=msg) if self._system_id is None: msg = _('check_for_setup_error: Unable to determine system id.') raise exception.VolumeBackendAPIException(data=msg) required_flags = ['san_ip', 'san_ssh_port', 'san_login'] for flag in required_flags: if not self.configuration.safe_get(flag): msg = (_('%s is not set.') % flag) raise exception.InvalidInput(reason=msg) # Ensure that either password or keyfile were set if not (self.configuration.san_password or self.configuration.san_private_key): msg = _('check_for_setup_error: Password or SSH private key ' 'is required for authentication: set either ' 'san_password or san_private_key option.') raise exception.InvalidInput(reason=msg) params = self._build_default_params() self._check_vdisk_params(params) LOG.debug('leave: check_for_setup_error') def create_volume(self, volume): """Create volume.""" vdisk_name = volume['name'] vdisk_params = self._get_vdisk_params(volume['volume_type_id']) vdisk_size = str(volume['size']) return self._create_vdisk(vdisk_name, vdisk_size, 'gb', vdisk_params) def delete_volume(self, volume): """Delete volume.""" vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) self._delete_vdisk(vdisk_name, False) def extend_volume(self, volume, new_size): """Extend volume.""" LOG.debug('enter: extend_volume: volume %s.', volume['name']) vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) extend_amt = int(new_size) - volume['size'] ssh_cmd = (['svctask', 'expandvdisksize', '-size', str(extend_amt), '-unit', 'gb', vdisk_name]) out, err = self._ssh(ssh_cmd) # No output should be returned from expandvdisksize self._assert_ssh_return( (not out.strip()), 'extend_volume', ssh_cmd, out, err) LOG.debug('leave: extend_volume: volume %s.', volume['name']) def create_snapshot(self, snapshot): """Create snapshot from volume.""" LOG.debug( 'enter: create_snapshot: create %(snap)s from %(vol)s.', {'snap': snapshot['name'], 'vol': snapshot['volume']['name']}) status = snapshot['volume']['status'] if status not in ['available', 'in-use']: msg = (_( 'create_snapshot: Volume status must be "available" or ' '"in-use" for snapshot. The invalid status is %s.') % status) raise exception.InvalidVolume(msg) self._create_and_copy_vdisk_data(snapshot['volume']['name'], snapshot['volume']['id'], snapshot['name'], snapshot['id']) LOG.debug( 'leave: create_snapshot: create %(snap)s from %(vol)s.', {'snap': snapshot['name'], 'vol': snapshot['volume']['name']}) def delete_snapshot(self, snapshot): """Delete snapshot.""" LOG.debug( 'enter: delete_snapshot: delete %(snap)s.', {'snap': snapshot['name']}) self._wait_vdisk_copy_completed(snapshot['name']) self._delete_vdisk(snapshot['name'], False) LOG.debug( 'leave: delete_snapshot: delete %(snap)s.', {'snap': snapshot['name']}) def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot.""" LOG.debug( 'enter: create_volume_from_snapshot: create %(vol)s from ' '%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']}) if volume['size'] < snapshot['volume_size']: msg = _('create_volume_from_snapshot: Volume is smaller than ' 'snapshot.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) status = snapshot['status'] if status != 'available': msg = (_('create_volume_from_snapshot: Snapshot status ' 'must be "available" for creating volume. ' 'The invalid status is: %s.') % status) raise exception.InvalidSnapshot(msg) self._create_and_copy_vdisk_data( snapshot['name'], snapshot['id'], volume['name'], volume['id'], dest_vdisk_size=volume['size'] * units.Gi ) LOG.debug( 'leave: create_volume_from_snapshot: create %(vol)s from ' '%(snap)s.', {'vol': volume['name'], 'snap': snapshot['name']}) def create_cloned_volume(self, volume, src_volume): """Create volume from a source volume.""" LOG.debug('enter: create_cloned_volume: create %(vol)s from %(src)s.', {'src': src_volume['name'], 'vol': volume['name']}) if src_volume['size'] > volume['size']: msg = _('create_cloned_volume: Source volume larger than ' 'destination volume') LOG.error(msg) raise exception.VolumeDriverException(message=msg) self._create_and_copy_vdisk_data( src_volume['name'], src_volume['id'], volume['name'], volume['id'], dest_vdisk_size=volume['size'] * units.Gi ) LOG.debug('leave: create_cloned_volume: create %(vol)s from %(src)s.', {'src': src_volume['name'], 'vol': volume['name']}) def manage_existing(self, volume, existing_ref): """Manages an existing vdisk. Renames the vdisk to match the expected name for the volume. """ LOG.debug('enter: manage_existing: volume %(vol)s ref %(ref)s.', {'vol': volume, 'ref': existing_ref}) vdisk = self._manage_input_check(existing_ref) new_name = 'volume-' + volume['id'] self._rename_vdisk(vdisk['name'], new_name) LOG.debug('leave: manage_existing: volume %(vol)s ref %(ref)s.', {'vol': volume, 'ref': existing_ref}) return def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing.""" vdisk = self._manage_input_check(existing_ref) if self._get_vdiskhost_mappings(vdisk['name']): reason = _('The specified vdisk is mapped to a host.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return int(vdisk['capacity']) / units.Gi def unmanage(self, volume): """Removes the specified volume from Cinder management.""" LOG.debug('unmanage: volume %(vol)s is no longer managed by cinder.', {'vol': volume}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/flashsystem_fc.py0000664000175000017500000003411500000000000023457 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for IBM FlashSystem storage systems with FC protocol. Limitations: 1. Cinder driver only works when open_access_enabled=off. """ import random import threading from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume.drivers.ibm import flashsystem_common as fscommon from cinder.volume.drivers.san import san from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @interface.volumedriver class FlashSystemFCDriver(fscommon.FlashSystemDriver): """IBM FlashSystem FC volume driver. Version history: .. code-block:: none 1.0.0 - Initial driver 1.0.1 - Code clean up 1.0.2 - Add lock into vdisk map/unmap, connection initialize/terminate 1.0.3 - Initial driver for iSCSI 1.0.4 - Split Flashsystem driver into common and FC 1.0.5 - Report capability of volume multiattach 1.0.6 - Fix bug #1469581, add I/T mapping check in terminate_connection 1.0.7 - Fix bug #1505477, add host name check in _find_host_exhaustive for FC 1.0.8 - Fix bug #1572743, multi-attach attribute should not be hardcoded, only in iSCSI 1.0.9 - Fix bug #1570574, Cleanup host resource leaking, changes only in iSCSI 1.0.10 - Fix bug #1585085, add host name check in _find_host_exhaustive for iSCSI 1.0.11 - Update driver to use ABC metaclasses 1.0.12 - Update driver to support Manage/Unmanage existing volume """ VERSION = "1.0.12" # ThirdPartySystems wiki page CI_WIKI_NAME = "IBM_STORAGE_CI" def __init__(self, *args, **kwargs): super(FlashSystemFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(fscommon.flashsystem_opts) self.configuration.append_config_values(san.san_opts) def _check_vdisk_params(self, params): # Check that the requested protocol is enabled if params['protocol'] != self._protocol: msg = (_("Illegal value '%(prot)s' specified for " "flashsystem_connection_protocol: " "valid value(s) are %(enabled)s.") % {'prot': params['protocol'], 'enabled': self._protocol}) raise exception.InvalidInput(reason=msg) def _create_host(self, connector): """Create a new host on the storage system. We create a host and associate it with the given connection information. """ LOG.debug('enter: _create_host: host %s.', connector['host']) rand_id = str(random.randint(0, 99999999)).zfill(8) host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector), rand_id) ports = [] if 'FC' == self._protocol and 'wwpns' in connector: for wwpn in connector['wwpns']: ports.append('-hbawwpn %s' % wwpn) self._driver_assert(ports, (_('_create_host: No connector ports.'))) port1 = ports.pop(0) arg_name, arg_val = port1.split() ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name', '"%s"' % host_name] out, err = self._ssh(ssh_cmd) self._assert_ssh_return('successfully created' in out, '_create_host', ssh_cmd, out, err) for port in ports: arg_name, arg_val = port.split() ssh_cmd = ['svctask', 'addhostport', '-force', arg_name, arg_val, host_name] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( (not out.strip()), '_create_host', ssh_cmd, out, err) LOG.debug( 'leave: _create_host: host %(host)s - %(host_name)s.', {'host': connector['host'], 'host_name': host_name}) return host_name def _find_host_exhaustive(self, connector, hosts): hname = connector['host'] hnames = [ihost[0:ihost.rfind('-')] for ihost in hosts] if hname in hnames: host = hosts[hnames.index(hname)] ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( out.strip(), '_find_host_exhaustive', ssh_cmd, out, err) attr_lines = [attr_line for attr_line in out.split('\n')] attr_parm = {} for attr_line in attr_lines: attr_name, foo, attr_val = attr_line.partition('!') attr_parm[attr_name] = attr_val if ('WWPN' in attr_parm.keys() and 'wwpns' in connector and attr_parm['WWPN'].lower() in map(str.lower, map(str, connector['wwpns']))): return host else: LOG.warning('Host %(host)s was not found on backend storage.', {'host': hname}) return None def _get_conn_fc_wwpns(self): wwpns = [] cmd = ['svcinfo', 'lsportfc'] generator = self._port_conf_generator(cmd) header = next(generator, None) if not header: return wwpns for port_data in generator: try: if port_data['status'] == 'active': wwpns.append(port_data['WWPN']) except KeyError: self._handle_keyerror('lsportfc', header) return wwpns def _get_fc_wwpns(self): for key in self._storage_nodes: node = self._storage_nodes[key] ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!', node['id']] attributes = self._execute_command_and_parse_attributes(ssh_cmd) wwpns = set(node['WWPN']) for i, s in zip(attributes['port_id'], attributes['port_status']): if 'unconfigured' != s: wwpns.add(i) node['WWPN'] = list(wwpns) LOG.info('WWPN on node %(node)s: %(wwpn)s.', {'node': node['id'], 'wwpn': node['WWPN']}) def _get_vdisk_map_properties( self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params): """Get the map properties of vdisk.""" LOG.debug( 'enter: _get_vdisk_map_properties: vdisk ' '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) IO_group = '0' io_group_nodes = [] for k, node in self._storage_nodes.items(): if vdisk_params['protocol'] != node['protocol']: continue if node['IO_group'] == IO_group: io_group_nodes.append(node) if not io_group_nodes: msg = (_('_get_vdisk_map_properties: No node found in ' 'I/O group %(gid)s for volume %(vol)s.') % {'gid': IO_group, 'vol': vdisk_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) properties = {} properties['target_discovered'] = False properties['target_lun'] = lun_id properties['volume_id'] = vdisk_id type_str = 'fibre_channel' conn_wwpns = self._get_conn_fc_wwpns() if not conn_wwpns: msg = _('_get_vdisk_map_properties: Could not get FC ' 'connection information for the host-volume ' 'connection. Is the host configured properly ' 'for FC connections?') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) properties['target_wwn'] = conn_wwpns if "zvm_fcp" in connector: properties['zvm_fcp'] = connector['zvm_fcp'] properties['initiator_target_map'] = self._build_initiator_target_map( connector['wwpns'], conn_wwpns) LOG.debug( 'leave: _get_vdisk_map_properties: vdisk ' '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) return {'driver_volume_type': type_str, 'data': properties} @utils.synchronized('flashsystem-init-conn', external=True) def initialize_connection(self, volume, connector): """Perform work so that an FC connection can be made. To be able to create a FC connection from a given host to a volume, we must: 1. Translate the given WWNN to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug( 'enter: initialize_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) vdisk_name = volume['name'] vdisk_id = volume['id'] vdisk_params = self._get_vdisk_params(volume['volume_type_id']) # TODO(edwin): might fix it after vdisk copy function is # ready in FlashSystem thin-provision layer. As this validation # is to check the vdisk which is in copying, at present in firmware # level vdisk doesn't allow to map host which it is copy. New # vdisk clone and snapshot function will cover it. After that the # _wait_vdisk_copy_completed need some modification. self._wait_vdisk_copy_completed(vdisk_name) self._driver_assert( self._is_vdisk_defined(vdisk_name), (_('initialize_connection: vdisk %s is not defined.') % vdisk_name)) lun_id = self._map_vdisk_to_host(vdisk_name, connector) properties = {} try: properties = self._get_vdisk_map_properties( connector, lun_id, vdisk_name, vdisk_id, vdisk_params) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) LOG.error('initialize_connection: Failed to collect ' 'return properties for volume %(vol)s and ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) LOG.debug( 'leave: initialize_connection:\n volume: %(vol)s\n connector ' '%(conn)s\n properties: %(prop)s.', {'vol': volume, 'conn': connector, 'prop': properties}) fczm_utils.add_fc_zone(properties) return properties @utils.synchronized('flashsystem-term-conn', external=True) def terminate_connection(self, volume, connector, **kwargs): """Cleanup after connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug( 'enter: terminate_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) return_data = { 'driver_volume_type': 'fibre_channel', 'data': {}, } vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) self._unmap_vdisk_from_host(vdisk_name, connector) host_name = self._get_host_from_connector(connector) if not host_name: properties = {} conn_wwpns = self._get_conn_fc_wwpns() properties['target_wwn'] = conn_wwpns properties['initiator_target_map'] = ( self._build_initiator_target_map( connector['wwpns'], conn_wwpns)) return_data['data'] = properties fczm_utils.remove_fc_zone(return_data) LOG.debug( 'leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) return return_data def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" self._context = ctxt # Get data of configured node self._get_node_data() # Get the WWPNs of the FlashSystem nodes self._get_fc_wwpns() # For each node, check what connection modes it supports. Delete any # nodes that do not support any types (may be partially configured). to_delete = [] for k, node in self._storage_nodes.items(): if not node['WWPN']: to_delete.append(k) for delkey in to_delete: del self._storage_nodes[delkey] # Make sure we have at least one node configured self._driver_assert(self._storage_nodes, 'do_setup: No configured nodes.') self._protocol = node['protocol'] = 'FC' # Set for vdisk synchronization self._vdisk_copy_in_progress = set() self._vdisk_copy_lock = threading.Lock() self._check_lock_interval = 5 def validate_connector(self, connector): """Check connector.""" if 'FC' == self._protocol and 'wwpns' not in connector: LOG.error('The connector does not contain the ' 'required information: wwpns is missing') raise exception.InvalidConnectorException(missing='wwpns') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/flashsystem_iscsi.py0000664000175000017500000003652700000000000024212 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume driver for IBM FlashSystem storage systems with iSCSI protocol. Limitations: 1. Cinder driver only works when open_access_enabled=off. """ import random import threading from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import configuration as conf from cinder.volume.drivers.ibm import flashsystem_common as fscommon from cinder.volume.drivers.san import san LOG = logging.getLogger(__name__) flashsystem_iscsi_opts = [ cfg.IntOpt('flashsystem_iscsi_portid', default=0, help='Default iSCSI Port ID of FlashSystem. ' '(Default port is 0.)') ] CONF = cfg.CONF CONF.register_opts(flashsystem_iscsi_opts, group=conf.SHARED_CONF_GROUP) @interface.volumedriver class FlashSystemISCSIDriver(fscommon.FlashSystemDriver): """IBM FlashSystem iSCSI volume driver. Version history: .. code-block:: none 1.0.0 - Initial driver 1.0.1 - Code clean up 1.0.2 - Add lock into vdisk map/unmap, connection initialize/terminate 1.0.3 - Initial driver for iSCSI 1.0.4 - Split Flashsystem driver into common and FC 1.0.5 - Report capability of volume multiattach 1.0.6 - Fix bug #1469581, add I/T mapping check in terminate_connection 1.0.7 - Fix bug #1505477, add host name check in _find_host_exhaustive for FC 1.0.8 - Fix bug #1572743, multi-attach attribute should not be hardcoded, only in iSCSI 1.0.9 - Fix bug #1570574, Cleanup host resource leaking, changes only in iSCSI 1.0.10 - Fix bug #1585085, add host name check in _find_host_exhaustive for iSCSI 1.0.11 - Update driver to use ABC metaclasses 1.0.12 - Update driver to support Manage/Unmanage existing volume """ VERSION = "1.0.12" # ThirdPartySystems wiki page CI_WIKI_NAME = "IBM_STORAGE_CI" def __init__(self, *args, **kwargs): super(FlashSystemISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(fscommon.flashsystem_opts) self.configuration.append_config_values(flashsystem_iscsi_opts) self.configuration.append_config_values(san.san_opts) def _check_vdisk_params(self, params): # Check that the requested protocol is enabled if not params['protocol'] in self._protocol: msg = (_("'%(prot)s' is invalid for " "flashsystem_connection_protocol " "in config file. valid value(s) are " "%(enabled)s.") % {'prot': params['protocol'], 'enabled': self._protocol}) raise exception.InvalidInput(reason=msg) # Check if iscsi_ip is set when protocol is iSCSI if params['protocol'] == 'iSCSI' and params['iscsi_ip'] == 'None': msg = _("target_ip_address must be set in config file when " "using protocol 'iSCSI'.") raise exception.InvalidInput(reason=msg) def _create_host(self, connector): """Create a new host on the storage system. We create a host and associate it with the given connection information. """ LOG.debug('enter: _create_host: host %s.', connector['host']) rand_id = str(random.randint(0, 99999999)).zfill(8) host_name = '%s-%s' % (self._connector_to_hostname_prefix(connector), rand_id) ports = [] if 'iSCSI' == self._protocol and 'initiator' in connector: ports.append('-iscsiname %s' % connector['initiator']) self._driver_assert(ports, (_('_create_host: No connector ports.'))) port1 = ports.pop(0) arg_name, arg_val = port1.split() ssh_cmd = ['svctask', 'mkhost', '-force', arg_name, arg_val, '-name', '"%s"' % host_name] out, err = self._ssh(ssh_cmd) self._assert_ssh_return('successfully created' in out, '_create_host', ssh_cmd, out, err) for port in ports: arg_name, arg_val = port.split() ssh_cmd = ['svctask', 'addhostport', '-force', arg_name, arg_val, host_name] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( (not out.strip()), '_create_host', ssh_cmd, out, err) LOG.debug( 'leave: _create_host: host %(host)s - %(host_name)s.', {'host': connector['host'], 'host_name': host_name}) return host_name def _find_host_exhaustive(self, connector, hosts): LOG.debug('enter: _find_host_exhaustive hosts: %s.', hosts) hname = connector['host'] hnames = [ihost[0:ihost.rfind('-')] for ihost in hosts] if hname in hnames: host = hosts[hnames.index(hname)] ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host] out, err = self._ssh(ssh_cmd) self._assert_ssh_return( out.strip(), '_find_host_exhaustive', ssh_cmd, out, err) for attr_line in out.split('\n'): attr_name, foo, attr_val = attr_line.partition('!') if (attr_name == 'iscsi_name' and 'initiator' in connector and attr_val == connector['initiator']): LOG.debug( 'leave: _find_host_exhaustive connector: %s.', connector) return host else: LOG.warning('Host %(host)s was not found on backend storage.', {'host': hname}) return None def _get_vdisk_map_properties( self, connector, lun_id, vdisk_name, vdisk_id, vdisk_params): """Get the map properties of vdisk.""" LOG.debug( 'enter: _get_vdisk_map_properties: vdisk ' '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) preferred_node = '0' IO_group = '0' # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] for k, node in self._storage_nodes.items(): if vdisk_params['protocol'] != node['protocol']: continue if node['id'] == preferred_node: preferred_node_entry = node if node['IO_group'] == IO_group: io_group_nodes.append(node) if not io_group_nodes: msg = (_('No node found in I/O group %(gid)s for volume %(vol)s.') % {'gid': IO_group, 'vol': vdisk_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warning('_get_vdisk_map_properties: Did not find a ' 'preferred node for vdisk %s.', vdisk_name) properties = { 'target_discovered': False, 'target_lun': lun_id, 'volume_id': vdisk_id, } type_str = 'iscsi' if preferred_node_entry['ipv4']: ipaddr = preferred_node_entry['ipv4'][0] else: ipaddr = preferred_node_entry['ipv6'][0] iscsi_port = self.configuration.target_port properties['target_portal'] = '%s:%s' % (ipaddr, iscsi_port) properties['target_iqn'] = preferred_node_entry['iscsi_name'] LOG.debug( 'leave: _get_vdisk_map_properties: vdisk ' '%(vdisk_name)s.', {'vdisk_name': vdisk_name}) return {'driver_volume_type': type_str, 'data': properties} @utils.synchronized('flashsystem-init-conn', external=True) def initialize_connection(self, volume, connector): """Perform work so that an iSCSI connection can be made. To be able to create an iSCSI connection from a given host to a volume, we must: 1. Translate the given iSCSI name to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug( 'enter: initialize_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) vdisk_name = volume['name'] vdisk_id = volume['id'] vdisk_params = self._get_vdisk_params(volume['volume_type_id']) self._wait_vdisk_copy_completed(vdisk_name) self._driver_assert( self._is_vdisk_defined(vdisk_name), (_('vdisk %s is not defined.') % vdisk_name)) lun_id = self._map_vdisk_to_host(vdisk_name, connector) properties = {} try: properties = self._get_vdisk_map_properties( connector, lun_id, vdisk_name, vdisk_id, vdisk_params) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self.terminate_connection(volume, connector) LOG.error('Failed to collect return properties for ' 'volume %(vol)s and connector %(conn)s.', {'vol': volume, 'conn': connector}) LOG.debug( 'leave: initialize_connection:\n volume: %(vol)s\n connector ' '%(conn)s\n properties: %(prop)s.', {'vol': volume, 'conn': connector, 'prop': properties}) return properties @utils.synchronized('flashsystem-term-conn', external=True) def terminate_connection(self, volume, connector, **kwargs): """Cleanup after connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug( 'enter: terminate_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) vdisk_name = volume['name'] self._wait_vdisk_copy_completed(vdisk_name) host_name = self._unmap_vdisk_from_host(vdisk_name, connector) # checking if host_name none, if not then, check if the host has # any mappings, if not the host gets deleted. if host_name: if not self._get_hostvdisk_mappings(host_name): self._delete_host(host_name) LOG.debug( 'leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s.', {'vol': volume, 'conn': connector}) return {'driver_volume_type': 'iscsi'} def _get_iscsi_ip_addrs(self): """get ip address of iSCSI interface.""" LOG.debug('enter: _get_iscsi_ip_addrs') cmd = ['svcinfo', 'lsportip'] generator = self._port_conf_generator(cmd) header = next(generator, None) if not header: return for key in self._storage_nodes: if self._storage_nodes[key]['config_node'] == 'yes': node = self._storage_nodes[key] break if node is None: msg = _('No config node found.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for port_data in generator: try: port_ipv4 = port_data['IP_address'] port_ipv6 = port_data['IP_address_6'] state = port_data['state'] speed = port_data['speed'] except KeyError: self._handle_keyerror('lsportip', header) if port_ipv4 == self.configuration.target_ip_address and ( port_data['id'] == ( str(self.configuration.flashsystem_iscsi_portid))): if state not in ('configured', 'online'): msg = (_('State of node is wrong. Current state is %s.') % state) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if state in ('configured', 'online') and speed != 'NONE': if port_ipv4: node['ipv4'].append(port_ipv4) if port_ipv6: node['ipv6'].append(port_ipv6) break if not (len(node['ipv4']) or len(node['ipv6'])): msg = _('No ip address found.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('leave: _get_iscsi_ip_addrs') def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" LOG.debug('enter: do_setup') self._context = ctxt # Get data of configured node self._get_node_data() # Get the iSCSI IP addresses of the FlashSystem nodes self._get_iscsi_ip_addrs() for k, node in self._storage_nodes.items(): if self.configuration.flashsystem_connection_protocol == 'iSCSI': if (len(node['ipv4']) or len(node['ipv6']) and len(node['iscsi_name'])): node['protocol'] = 'iSCSI' self._protocol = 'iSCSI' # Set for vdisk synchronization self._vdisk_copy_in_progress = set() self._vdisk_copy_lock = threading.Lock() self._check_lock_interval = 5 LOG.debug('leave: do_setup') def _build_default_params(self): protocol = self.configuration.flashsystem_connection_protocol if protocol.lower() == 'iscsi': protocol = 'iSCSI' return { 'protocol': protocol, 'iscsi_ip': self.configuration.target_ip_address, 'iscsi_port': self.configuration.target_port, 'iscsi_ported': self.configuration.flashsystem_iscsi_portid, } def validate_connector(self, connector): """Check connector for enabled protocol.""" valid = False if 'iSCSI' == self._protocol and 'initiator' in connector: valid = True if not valid: LOG.error('The connector does not contain the ' 'required information: initiator is missing') raise exception.InvalidConnectorException(missing=( 'initiator')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/gpfs.py0000664000175000017500000021361700000000000021412 0ustar00zuulzuul00000000000000# Copyright IBM Corp. 2013 All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ GPFS Volume Driver. """ import math import os import re import shutil from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import paramiko from cinder.common import constants from cinder import context from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.objects import fields from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers import nfs from cinder.volume.drivers import remotefs from cinder.volume.drivers.san import san from cinder.volume import volume_utils GPFS_CLONE_MIN_RELEASE = 1200 GPFS_ENC_MIN_RELEASE = 1404 MIGRATION_ALLOWED_DEST_TYPE = ['GPFSDriver', 'GPFSNFSDriver'] LOG = logging.getLogger(__name__) gpfs_opts = [ cfg.StrOpt('gpfs_mount_point_base', help='Specifies the path of the GPFS directory where Block ' 'Storage volume and snapshot files are stored.'), cfg.StrOpt('gpfs_images_dir', help='Specifies the path of the Image service repository in ' 'GPFS. Leave undefined if not storing images in GPFS.'), cfg.StrOpt('gpfs_images_share_mode', choices=['copy', 'copy_on_write', None], help='Specifies the type of image copy to be used. Set this ' 'when the Image service repository also uses GPFS so ' 'that image files can be transferred efficiently from ' 'the Image service to the Block Storage service. There ' 'are two valid values: "copy" specifies that a full copy ' 'of the image is made; "copy_on_write" specifies that ' 'copy-on-write optimization strategy is used and ' 'unmodified blocks of the image file are shared ' 'efficiently.'), cfg.IntOpt('gpfs_max_clone_depth', default=0, help='Specifies an upper limit on the number of indirections ' 'required to reach a specific block due to snapshots or ' 'clones. A lengthy chain of copy-on-write snapshots or ' 'clones can have a negative impact on performance, but ' 'improves space utilization. 0 indicates unlimited ' 'clone depth.'), cfg.BoolOpt('gpfs_sparse_volumes', default=True, help=('Specifies that volumes are created as sparse files ' 'which initially consume no space. If set to False, the ' 'volume is created as a fully allocated file, in which ' 'case, creation may take a significantly longer time.')), cfg.StrOpt('gpfs_storage_pool', default='system', help=('Specifies the storage pool that volumes are assigned ' 'to. By default, the system storage pool is used.')), ] gpfs_remote_ssh_opts = [ cfg.ListOpt('gpfs_hosts', default=[], help='Comma-separated list of IP address or ' 'hostnames of GPFS nodes.'), cfg.StrOpt('gpfs_user_login', default='root', help='Username for GPFS nodes.'), cfg.StrOpt('gpfs_user_password', default='', help='Password for GPFS node user.', secret=True), cfg.StrOpt('gpfs_private_key', default='', help='Filename of private key to use for SSH authentication.'), cfg.PortOpt('gpfs_ssh_port', default=22, help='SSH port to use.'), cfg.StrOpt('gpfs_hosts_key_file', default='$state_path/ssh_known_hosts', help='File containing SSH host keys for the gpfs nodes ' 'with which driver needs to communicate. ' 'Default=$state_path/ssh_known_hosts'), cfg.BoolOpt('gpfs_strict_host_key_policy', default=False, help='Option to enable strict gpfs host key checking while ' 'connecting to gpfs nodes. Default=False'), ] CONF = cfg.CONF CONF.register_opts(gpfs_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(gpfs_remote_ssh_opts, group=configuration.SHARED_CONF_GROUP) class GPFSDriverUnsupportedOperation(exception.VolumeBackendAPIException): message = _("GPFS driver unsupported operation: %(msg)s") def _different(difference_tuple): """Return true if two elements of a tuple are different.""" if difference_tuple: member1, member2 = difference_tuple return member1 != member2 else: return False def _sizestr(size_in_g): """Convert the specified size into a string value.""" return '%sG' % size_in_g @interface.volumedriver class GPFSDriver(driver.CloneableImageVD, driver.MigrateVD, driver.BaseVD): """Implements volume functions using GPFS primitives. .. code-block:: none Version history: 1.0.0 - Initial driver 1.1.0 - Add volume retype, refactor volume migration 1.2.0 - Add consistency group support 1.3.0 - Add NFS based GPFS storage backend support 1.3.1 - Add GPFS native encryption (encryption of data at rest) support """ VERSION = "1.3.1" # ThirdPartySystems wiki page CI_WIKI_NAME = "IBM_GPFS_CI" def __init__(self, *args, **kwargs): super(GPFSDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(gpfs_opts) self.gpfs_execute = self._gpfs_local_execute self._execute = utils.execute self.GPFS_PATH = '' @staticmethod def get_driver_options(): return gpfs_opts def _gpfs_local_execute(self, *cmd, **kwargs): if 'run_as_root' not in kwargs: kwargs.update({'run_as_root': True}) return utils.execute(*cmd, **kwargs) def _get_gpfs_state(self): """Return GPFS state information.""" try: (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmgetstate', '-Y') return out except processutils.ProcessExecutionError as exc: LOG.error('Failed to issue mmgetstate command, error: %s.', exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) def _check_gpfs_state(self): """Raise VolumeBackendAPIException if GPFS is not active.""" out = self._get_gpfs_state() lines = out.splitlines() state_token = lines[0].split(':').index('state') gpfs_state = lines[1].split(':')[state_token] if gpfs_state != 'active': LOG.error('GPFS is not active. Detailed output: %s.', out) raise exception.VolumeBackendAPIException( data=_('GPFS is not running, state: %s.') % gpfs_state) def _same_filesystem(self, path1, path2): """Return true if the two paths are in the same GPFS file system.""" try: (out, err) = self.gpfs_execute('stat', '-f', '-c', '"%i"', path1, path2) lines = out.splitlines() return lines[0] == lines[1] except processutils.ProcessExecutionError as exc: LOG.error('Failed to issue stat command on path ' '%(path1)s and path %(path2)s, error: %(error)s', {'path1': path1, 'path2': path2, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) def _get_filesystem_from_path(self, path): """Return filesystem for specified path.""" try: (out, err) = self.gpfs_execute('df', path) lines = out.splitlines() filesystem = lines[1].split()[0] return filesystem except processutils.ProcessExecutionError as exc: LOG.error('Failed to issue df command for path %(path)s, ' 'error: %(error)s.', {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) def _get_gpfs_cluster_id(self): """Return the id for GPFS cluster being used.""" try: (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmlsconfig', 'clusterId', '-Y') lines = out.splitlines() value_token = lines[0].split(':').index('value') cluster_id = lines[1].split(':')[value_token] return cluster_id except processutils.ProcessExecutionError as exc: LOG.error('Failed to issue mmlsconfig command, error: %s.', exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) def _get_fileset_from_path(self, path): """Return the GPFS fileset for specified path.""" fs_regex = re.compile(r'.*fileset.name:\s+(?P\w+)', re.S) try: (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmlsattr', '-L', path) except processutils.ProcessExecutionError as exc: LOG.error('Failed to issue mmlsattr command on path %(path)s, ' 'error: %(error)s', {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) try: fileset = fs_regex.match(out).group('fileset') return fileset except AttributeError: msg = (_('Failed to find fileset for path %(path)s, command ' 'output: %(cmdout)s.') % {'path': path, 'cmdout': out}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _verify_gpfs_pool(self, storage_pool): """Return true if the specified pool is a valid GPFS storage pool.""" try: self.gpfs_execute(self.GPFS_PATH + 'mmlspool', self._gpfs_device, storage_pool) return True except processutils.ProcessExecutionError: return False def _update_volume_storage_pool(self, local_path, new_pool): """Set the storage pool for a volume to the specified value.""" if new_pool is None: new_pool = 'system' if not self._verify_gpfs_pool(new_pool): msg = (_('Invalid storage pool %s requested. Retype failed.') % new_pool) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: self.gpfs_execute(self.GPFS_PATH + 'mmchattr', '-P', new_pool, local_path) LOG.debug('Updated storage pool with mmchattr to %s.', new_pool) return True except processutils.ProcessExecutionError as exc: LOG.info('Could not update storage pool with mmchattr to ' '%(pool)s, error: %(error)s', {'pool': new_pool, 'error': exc.stderr}) return False def _get_gpfs_fs_release_level(self, path): """Return the GPFS version of the specified file system. The file system is specified by any valid path it contains. """ filesystem = self._get_filesystem_from_path(path) try: (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmlsfs', filesystem, '-V', '-Y') except processutils.ProcessExecutionError as exc: LOG.error('Failed to issue mmlsfs command for path %(path)s, ' 'error: %(error)s.', {'path': path, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) lines = out.splitlines() value_token = lines[0].split(':').index('data') fs_release_level_str = lines[1].split(':')[value_token] # at this point, release string looks like "13.23 (3.5.0.7)" # extract first token and convert to whole number value fs_release_level = int(float(fs_release_level_str.split()[0]) * 100) return filesystem, fs_release_level def _get_gpfs_cluster_release_level(self): """Return the GPFS version of current cluster.""" try: (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmlsconfig', 'minreleaseLeveldaemon', '-Y') except processutils.ProcessExecutionError as exc: LOG.error('Failed to issue mmlsconfig command, error: %s.', exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) lines = out.splitlines() value_token = lines[0].split(':').index('value') min_release_level = lines[1].split(':')[value_token] return int(min_release_level) def _is_gpfs_path(self, directory): """Determine if the specified path is in a gpfs file system. If not part of a gpfs file system, raise ProcessExecutionError. """ try: self.gpfs_execute(self.GPFS_PATH + 'mmlsattr', directory) except processutils.ProcessExecutionError as exc: LOG.error('Failed to issue mmlsattr command ' 'for path %(path)s, ' 'error: %(error)s.', {'path': directory, 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) def _is_same_fileset(self, path1, path2): """Return true if the two paths are in the same GPFS fileset.""" if self._get_fileset_from_path(path1) == \ self._get_fileset_from_path(path2): return True return False def _same_cluster(self, host): """Return true if the host is a member of the same GPFS cluster.""" dest_location = host['capabilities'].get('location_info') if self._stats['location_info'] == dest_location: return True return False def _set_rw_permission(self, path, modebits='660'): """Set permission bits for the path.""" self.gpfs_execute('chmod', modebits, path) def _can_migrate_locally(self, host): """Return true if the host can migrate a volume locally.""" if 'location_info' not in host['capabilities']: LOG.debug('Evaluate migration: no location info, ' 'cannot migrate locally.') return None info = host['capabilities']['location_info'] try: (dest_type, dest_id, dest_path) = info.split(':') except ValueError: LOG.debug('Evaluate migration: unexpected location info, ' 'cannot migrate locally: %s.', info) return None if (dest_id != self._cluster_id or dest_type not in MIGRATION_ALLOWED_DEST_TYPE): LOG.debug('Evaluate migration: different destination driver or ' 'cluster id in location info: %s.', info) return None LOG.debug('Evaluate migration: use local migration.') return dest_path def do_setup(self, ctxt): """Determine storage back end capabilities.""" try: self._cluster_id = self._get_gpfs_cluster_id() except Exception as setup_exception: msg = (_('Could not find GPFS cluster id: %s.') % setup_exception) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: gpfs_base = self.configuration.gpfs_mount_point_base self._gpfs_device = self._get_filesystem_from_path(gpfs_base) except Exception as setup_exception: msg = (_('Could not find GPFS file system device: %s.') % setup_exception) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) pool = self.configuration.safe_get('gpfs_storage_pool') self._storage_pool = pool if not self._verify_gpfs_pool(self._storage_pool): msg = (_('Invalid storage pool %s specificed.') % self._storage_pool) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) _gpfs_cluster_release_level = self._get_gpfs_cluster_release_level() if _gpfs_cluster_release_level >= GPFS_ENC_MIN_RELEASE: self._encryption_state = self._get_gpfs_encryption_status() else: LOG.info('Downlevel GPFS Cluster Detected. GPFS ' 'encryption-at-rest feature not enabled in cluster ' 'daemon level %(cur)s - must be at least at ' 'level %(min)s.', {'cur': _gpfs_cluster_release_level, 'min': GPFS_ENC_MIN_RELEASE}) def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" self._check_gpfs_state() if self.configuration.gpfs_mount_point_base is None: msg = _('Option gpfs_mount_point_base is not set correctly.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if (self.configuration.gpfs_images_share_mode and self.configuration.gpfs_images_dir is None): msg = _('Option gpfs_images_dir is not set correctly.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if (self.configuration.gpfs_images_share_mode == 'copy_on_write' and not self._same_filesystem(self.configuration.gpfs_mount_point_base, self.configuration.gpfs_images_dir)): msg = (_('gpfs_images_share_mode is set to copy_on_write, but ' '%(vol)s and %(img)s belong to different file ' 'systems.') % {'vol': self.configuration.gpfs_mount_point_base, 'img': self.configuration.gpfs_images_dir}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if (self.configuration.gpfs_images_share_mode == 'copy_on_write' and not self._is_same_fileset(self.configuration.gpfs_mount_point_base, self.configuration.gpfs_images_dir)): msg = (_('gpfs_images_share_mode is set to copy_on_write, but ' '%(vol)s and %(img)s belong to different filesets.') % {'vol': self.configuration.gpfs_mount_point_base, 'img': self.configuration.gpfs_images_dir}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) _gpfs_cluster_release_level = self._get_gpfs_cluster_release_level() if _gpfs_cluster_release_level < GPFS_CLONE_MIN_RELEASE: msg = (_('Downlevel GPFS Cluster Detected. GPFS Clone feature ' 'not enabled in cluster daemon level %(cur)s - must ' 'be at least at level %(min)s.') % {'cur': _gpfs_cluster_release_level, 'min': GPFS_CLONE_MIN_RELEASE}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for directory in [self.configuration.gpfs_mount_point_base, self.configuration.gpfs_images_dir]: if directory is None: continue if not directory.startswith('/'): msg = (_('%s must be an absolute path.') % directory) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not os.path.isdir(directory): msg = (_('%s is not a directory.') % directory) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Check if GPFS is mounted self._verify_gpfs_path_state(directory) filesystem, fslevel = self._get_gpfs_fs_release_level(directory) if fslevel < GPFS_CLONE_MIN_RELEASE: msg = (_('The GPFS filesystem %(fs)s is not at the required ' 'release level. Current level is %(cur)s, must be ' 'at least %(min)s.') % {'fs': filesystem, 'cur': fslevel, 'min': GPFS_CLONE_MIN_RELEASE}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _create_sparse_file(self, path, size): """Creates file with 0 disk usage.""" sizestr = _sizestr(size) self.gpfs_execute('truncate', '-s', sizestr, path) def _allocate_file_blocks(self, path, size): """Preallocate file blocks by writing zeros.""" block_size_mb = 1 block_count = size * units.Gi / (block_size_mb * units.Mi) self.gpfs_execute('dd', 'if=/dev/zero', 'of=%s' % path, 'bs=%dM' % block_size_mb, 'count=%d' % block_count) def _gpfs_change_attributes(self, options, path): """Update GPFS attributes on the specified file.""" cmd = [self.GPFS_PATH + 'mmchattr'] cmd.extend(options) cmd.append(path) LOG.debug('Update volume attributes with mmchattr to %s.', options) self.gpfs_execute(*cmd) def _set_volume_attributes(self, volume, path, metadata): """Set various GPFS attributes for this volume.""" set_pool = False options = [] for item in metadata: if item == 'data_pool_name': options.extend(['-P', metadata[item]]) set_pool = True elif item == 'replicas': options.extend(['-r', metadata[item], '-m', metadata[item]]) elif item == 'dio': options.extend(['-D', metadata[item]]) elif item == 'write_affinity_depth': options.extend(['--write-affinity-depth', metadata[item]]) elif item == 'block_group_factor': options.extend(['--block-group-factor', metadata[item]]) elif item == 'write_affinity_failure_group': options.extend(['--write-affinity-failure-group', metadata[item]]) # metadata value has precedence over value set in volume type if self.configuration.gpfs_storage_pool and not set_pool: options.extend(['-P', self.configuration.gpfs_storage_pool]) if options: self._gpfs_change_attributes(options, path) fstype = None fslabel = None for item in metadata: if item == 'fstype': fstype = metadata[item] elif item == 'fslabel': fslabel = metadata[item] if fstype: self._mkfs(volume, fstype, fslabel) def create_volume(self, volume): """Creates a GPFS volume.""" # Check if GPFS is mounted self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) volume_path = self._get_volume_path(volume) volume_size = volume['size'] # Create a sparse file first; allocate blocks later if requested self._create_sparse_file(volume_path, volume_size) self._set_rw_permission(volume_path) # Set the attributes prior to allocating any blocks so that # they are allocated according to the policy self._set_volume_attributes(volume, volume_path, volume.metadata) if not self.configuration.gpfs_sparse_volumes: self._allocate_file_blocks(volume_path, volume_size) def _create_volume_from_snapshot(self, volume, snapshot): snapshot_path = self._get_snapshot_path(snapshot) # check if the snapshot lies in the same CG as the volume to be created # if yes, clone the volume from the snapshot, else perform full copy clone = False ctxt = context.get_admin_context() snap_parent_vol = self.db.volume_get(ctxt, snapshot['volume_id']) if (volume['group_id'] == snap_parent_vol['group_id']): clone = True volume_path = self._get_volume_path(volume) if clone: self._create_gpfs_copy(src=snapshot_path, dest=volume_path) self._gpfs_redirect(volume_path) else: self._gpfs_full_copy(snapshot_path, volume_path) self._set_rw_permission(volume_path) self._set_volume_attributes(volume, volume_path, volume.metadata) def create_volume_from_snapshot(self, volume, snapshot): """Creates a GPFS volume from a snapshot.""" self._create_volume_from_snapshot(volume, snapshot) virt_size = self._resize_volume_file(volume, volume['size']) return {'size': math.ceil(virt_size / units.Gi)} def _get_volume_path(self, volume): return self.local_path(volume) def _create_cloned_volume(self, volume, src_vref): src = self._get_volume_path(src_vref) dest = self._get_volume_path(volume) if (volume['group_id'] == src_vref['group_id']): self._create_gpfs_clone(src, dest) else: self._gpfs_full_copy(src, dest) self._set_rw_permission(dest) self._set_volume_attributes(volume, dest, volume.metadata) def create_cloned_volume(self, volume, src_vref): """Create a GPFS volume from another volume.""" self._create_cloned_volume(volume, src_vref) virt_size = self._resize_volume_file(volume, volume['size']) return {'size': math.ceil(virt_size / units.Gi)} def _delete_gpfs_file(self, fchild, mount_point=None): """Delete a GPFS file and cleanup clone children.""" if mount_point is None: if not os.path.exists(fchild): return else: fchild_local_path = os.path.join(mount_point, os.path.basename(fchild)) if not os.path.exists(fchild_local_path): return (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'show', fchild) fparent = None delete_parent = False inode_regex = re.compile( r'.*\s+(?:yes|no)\s+\d+\s+(?P\d+)', re.M | re.S) match = inode_regex.match(out) if match: inode = match.group('inode') if mount_point is None: path = os.path.dirname(fchild) else: path = mount_point # -ignore_readdir_race is to prevent the command from exiting # with nonzero RC when some files in the directory are removed # by other delete operations. -quit is to end the execution as # soon as we get one filename; it is not expected that two or # more filenames found. (out, err) = self._execute('find', path, '-maxdepth', '1', '-ignore_readdir_race', '-inum', inode, '-print0', '-quit', run_as_root=True) if out: fparent = out.split('\0', 1)[0] if mount_point is None: self._execute( 'rm', '-f', fchild, check_exit_code=False, run_as_root=True) else: self._execute( 'rm', '-f', fchild_local_path, check_exit_code=False, run_as_root=True) # There is no need to check for volume references on this snapshot # because 'rm -f' itself serves as a simple and implicit check. If the # parent is referenced by another volume, GPFS doesn't allow deleting # it. 'rm -f' silently fails and the subsequent check on the path # indicates whether there are any volumes derived from that snapshot. # If there are such volumes, we quit recursion and let the other # volumes delete the snapshot later. If there are no references, rm # would succeed and the snapshot is deleted. if mount_point is None: if not os.path.exists(fchild) and fparent: delete_parent = True else: if not os.path.exists(fchild_local_path) and fparent: delete_parent = True if delete_parent: fpbase = os.path.basename(fparent) if fpbase.endswith('.snap') or fpbase.endswith('.ts'): if mount_point is None: self._delete_gpfs_file(fparent) else: fparent_remote_path = os.path.join(os.path.dirname(fchild), fpbase) fparent_mount_path = os.path.dirname(fparent) self._delete_gpfs_file(fparent_remote_path, fparent_mount_path) def delete_volume(self, volume): """Deletes a logical volume.""" # Check if GPFS is mounted self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) volume_path = self.local_path(volume) self._delete_gpfs_file(volume_path) def _gpfs_redirect(self, src): """Removes the copy_on_write dependency between src and parent. Remove the copy_on_write dependency between the src file and its immediate parent such that the length of dependency chain is reduced by 1. """ max_depth = self.configuration.gpfs_max_clone_depth if max_depth == 0: return False (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'show', src) depth_regex = re.compile(r'.*\s+no\s+(?P\d+)', re.M | re.S) match = depth_regex.match(out) if match: depth = int(match.group('depth')) if depth > max_depth: self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'redirect', src) return True return False def _create_gpfs_clone(self, src, dest): """Create a GPFS file clone parent for the specified file.""" snap = dest + ".snap" self._create_gpfs_snap(src, snap) self._create_gpfs_copy(snap, dest) if self._gpfs_redirect(src) and self._gpfs_redirect(dest): self._execute('rm', '-f', snap, run_as_root=True) def _create_gpfs_copy(self, src, dest): """Create a GPFS file clone copy for the specified file.""" self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'copy', src, dest) def _gpfs_full_copy(self, src, dest): """Create a full copy from src to dest.""" self.gpfs_execute('cp', src, dest, check_exit_code=True) def _create_gpfs_snap(self, src, dest=None): """Create a GPFS file clone snapshot for the specified file.""" if dest is None: self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'snap', src) else: self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'snap', src, dest) def _is_gpfs_parent_file(self, gpfs_file): """Return true if the specified file is a gpfs clone parent.""" out, err = self.gpfs_execute(self.GPFS_PATH + 'mmclone', 'show', gpfs_file) ptoken = out.splitlines().pop().split()[0] return ptoken == 'yes' def create_snapshot(self, snapshot): """Creates a GPFS snapshot.""" snapshot_path = self._get_snapshot_path(snapshot) volume_path = os.path.join(os.path.dirname(snapshot_path), snapshot.volume.name) self._create_gpfs_snap(src=volume_path, dest=snapshot_path) self._set_rw_permission(snapshot_path, modebits='640') self._gpfs_redirect(volume_path) def delete_snapshot(self, snapshot): """Deletes a GPFS snapshot.""" # Rename the deleted snapshot to indicate it no longer exists in # cinder db. Attempt to delete the snapshot. If the snapshot has # clone children, the delete will fail silently. When volumes that # are clone children are deleted in the future, the remaining ts # snapshots will also be deleted. snapshot_path = self._get_snapshot_path(snapshot) snapshot_ts_path = '%s.ts' % snapshot_path self.gpfs_execute('mv', snapshot_path, snapshot_ts_path) self.gpfs_execute('rm', '-f', snapshot_ts_path, check_exit_code=False) def _get_snapshot_path(self, snapshot): snap_parent_vol_path = self.local_path(snapshot.volume) snapshot_path = os.path.join(os.path.dirname(snap_parent_vol_path), snapshot.name) return snapshot_path def local_path(self, volume): """Return the local path for the specified volume.""" # Check if the volume is part of a consistency group and return # the local_path accordingly. if volume.group_id is not None: if volume_utils.is_group_a_cg_snapshot_type(volume.group): cgname = "consisgroup-%s" % volume.group_id volume_path = os.path.join( self.configuration.gpfs_mount_point_base, cgname, volume.name ) return volume_path volume_path = os.path.join( self.configuration.gpfs_mount_point_base, volume.name ) return volume_path def _get_gpfs_encryption_status(self): """Determine if the backend is configured with key manager.""" try: (out, err) = self.gpfs_execute(self.GPFS_PATH + 'mmlsfs', self._gpfs_device, '--encryption', '-Y') lines = out.splitlines() value_token = lines[0].split(':').index('data') encryption_status = lines[1].split(':')[value_token] return encryption_status except processutils.ProcessExecutionError as exc: LOG.error('Failed to issue mmlsfs command, error: %s.', exc.stderr) raise exception.VolumeBackendAPIException(data=exc.stderr) def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" pass def create_export(self, context, volume, connector): """Exports the volume.""" pass def remove_export(self, context, volume): """Removes an export for a logical volume.""" pass def initialize_connection(self, volume, connector): return { 'driver_volume_type': 'gpfs', 'data': { 'name': volume['name'], 'device_path': self.local_path(volume), } } def terminate_connection(self, volume, connector, **kwargs): pass def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") gpfs_base = self.configuration.gpfs_mount_point_base data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or 'GPFS' data["vendor_name"] = 'IBM' data["driver_version"] = self.VERSION data["storage_protocol"] = constants.FILE free, capacity = self._get_available_capacity(self.configuration. gpfs_mount_point_base) data['total_capacity_gb'] = math.ceil(capacity / units.Gi) data['free_capacity_gb'] = math.ceil(free / units.Gi) data['reserved_percentage'] = 0 data['QoS_support'] = False data['storage_pool'] = self._storage_pool data['location_info'] = ('GPFSDriver:%(cluster_id)s:%(root_path)s' % {'cluster_id': self._cluster_id, 'root_path': gpfs_base}) data['consistencygroup_support'] = 'True' data['consistent_group_snapshot_enabled'] = True if self._encryption_state.lower() == 'yes': data['gpfs_encryption_rest'] = 'True' self._stats = data def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume from the specified image.""" return self._clone_image(volume, image_location, image_meta['id']) def _is_cloneable(self, image_id): """Return true if the specified image can be cloned by GPFS.""" if not ((self.configuration.gpfs_images_dir and self.configuration.gpfs_images_share_mode)): reason = 'glance repository not configured to use GPFS' return False, reason, None image_path = os.path.join(self.configuration.gpfs_images_dir, image_id) try: self._is_gpfs_path(image_path) except processutils.ProcessExecutionError: reason = 'image file not in GPFS' return False, reason, None return True, None, image_path def _clone_image(self, volume, image_location, image_id): """Attempt to create a volume by efficiently copying image to volume. If both source and target are backed by gpfs storage and the source image is in raw format move the image to create a volume using either gpfs clone operation or with a file copy. If the image format is not raw, convert it to raw at the volume path. """ # Check if GPFS is mounted self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) cloneable_image, reason, image_path = self._is_cloneable(image_id) if not cloneable_image: LOG.debug('Image %(img)s not cloneable: %(reas)s.', {'img': image_id, 'reas': reason}) return (None, False) data = image_utils.qemu_img_info(image_path) # if image format is already raw either clone it or # copy it depending on config file settings # GPFS command (mmclone) needs to run on GPFS node on GPFS path if data.file_format == 'raw': vol_path = self._get_volume_path(volume) if (self.configuration.gpfs_images_share_mode == 'copy_on_write'): LOG.debug('Clone image to vol %s using mmclone.', volume['id']) # if the image is not already a GPFS snap file make it so if not self._is_gpfs_parent_file(image_path): self._create_gpfs_snap(image_path) self._create_gpfs_copy(image_path, vol_path) elif self.configuration.gpfs_images_share_mode == 'copy': LOG.debug('Clone image to vol %s using copyfile.', volume['id']) shutil.copyfile(image_path, vol_path) # if image is not raw convert it to raw into vol_path destination # Image conversion can be run locally on GPFS mount path else: vol_path = self.local_path(volume) LOG.debug('Clone image to vol %s using qemu convert.', volume['id']) image_utils.convert_image(image_path, vol_path, 'raw') self._set_rw_permission(vol_path) self._resize_volume_file(volume, volume['size']) return {'provider_location': None}, True def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume. Note that cinder.volume.flows.create_volume will attempt to use clone_image to efficiently create volume from image when both source and target are backed by gpfs storage. If that is not the case, this function is invoked and uses fetch_to_raw to create the volume. """ # Check if GPFS is mounted self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) LOG.debug('Copy image to vol %s using image_utils fetch_to_raw.', volume['id']) image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume), self.configuration.volume_dd_blocksize, size=volume['size'], disable_sparse=disable_sparse) self._resize_volume_file(volume, volume['size']) def _resize_volume_file(self, volume, new_size): """Resize volume file to new size.""" vol_path = self.local_path(volume) try: image_utils.resize_image(vol_path, new_size, run_as_root=True) except processutils.ProcessExecutionError as exc: LOG.error("Failed to resize volume " "%(volume_id)s, error: %(error)s.", {'volume_id': volume['id'], 'error': exc.stderr}) raise exception.VolumeBackendAPIException(data=exc.stderr) data = image_utils.qemu_img_info(vol_path) return data.virtual_size def extend_volume(self, volume, new_size): """Extend an existing volume.""" self._resize_volume_file(volume, new_size) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" volume_utils.upload_volume(context, image_service, image_meta, self.local_path(volume), volume) def _migrate_volume(self, volume, host): """Migrate vol if source and dest are managed by same GPFS cluster.""" LOG.debug('Migrate volume request %(vol)s to %(host)s.', {'vol': volume['name'], 'host': host['host']}) dest_path = self._can_migrate_locally(host) if dest_path is None: LOG.debug('Cannot migrate volume locally, use generic migration.') return (False, None) if dest_path == self.configuration.gpfs_mount_point_base: LOG.debug('Migration target is same cluster and path, ' 'no work needed.') return (True, None) LOG.debug('Migration target is same cluster but different path, ' 'move the volume file.') local_path = self._get_volume_path(volume) new_path = os.path.join(dest_path, volume['name']) try: self.gpfs_execute('mv', local_path, new_path) return (True, None) except processutils.ProcessExecutionError as exc: LOG.error('Driver-based migration of volume %(vol)s failed. ' 'Move from %(src)s to %(dst)s failed with error: ' '%(error)s.', {'vol': volume['name'], 'src': local_path, 'dst': new_path, 'error': exc.stderr}) return (False, None) def migrate_volume(self, context, volume, host): """Attempt to migrate a volume to specified host.""" return self._migrate_volume(volume, host) def retype(self, context, volume, new_type, diff, host): """Modify volume to be of new type.""" LOG.debug('Retype volume request %(vol)s to be %(type)s ' '(host: %(host)s), diff %(diff)s.', {'vol': volume['name'], 'type': new_type, 'host': host, 'diff': diff}) retyped = False migrated = False pools = diff['extra_specs'].get('capabilities:storage_pool') backends = diff['extra_specs'].get('volume_backend_name') hosts = (volume['host'], host['host']) # if different backends let migration create a new volume and copy # data because the volume is considered to be substantially different if _different(backends): backend1, backend2 = backends LOG.debug('Retype request is for different backends, ' 'use migration: %(backend1)s %(backend2)s.', {'backend1': backend1, 'backend2': backend1}) return False if _different(pools): old, new = pools LOG.debug('Retype pool attribute from %(old)s to %(new)s.', {'old': old, 'new': new}) retyped = self._update_volume_storage_pool(self.local_path(volume), new) if _different(hosts): source, destination = hosts LOG.debug('Retype hosts migrate from: %(source)s to ' '%(destination)s.', {'source': source, 'destination': destination}) migrated, mdl_update = self._migrate_volume(volume, host) if migrated: updates = {'host': host['host']} self.db.volume_update(context, volume['id'], updates) return retyped or migrated def _mkfs(self, volume, filesystem, label=None): """Initialize volume to be specified filesystem type.""" if filesystem == 'swap': cmd = ['mkswap'] else: cmd = ['mkfs', '-t', filesystem] if filesystem in ('ext3', 'ext4'): cmd.append('-F') if label: if filesystem in ('msdos', 'vfat'): label_opt = '-n' else: label_opt = '-L' cmd.extend([label_opt, label]) path = self.local_path(volume) cmd.append(path) try: self._execute(*cmd, run_as_root=True) except processutils.ProcessExecutionError as exc: exception_message = (_("mkfs failed on volume %(vol)s, " "error message was: %(err)s.") % {'vol': volume['name'], 'err': exc.stderr}) LOG.error(exception_message) raise exception.VolumeBackendAPIException( data=exception_message) def _get_available_capacity(self, path): """Calculate available space on path.""" # Check if GPFS is mounted try: self._verify_gpfs_path_state(path) mounted = True except exception.VolumeBackendAPIException: mounted = False # If GPFS is not mounted, return zero capacity. So that the volume # request can be scheduled to another volume service. if not mounted: return 0, 0 out, err = self.gpfs_execute('df', '-P', '-B', '1', path, run_as_root=True) out = out.splitlines()[1] size = int(out.split()[1]) available = int(out.split()[3]) return available, size def _verify_gpfs_path_state(self, path): """Examine if GPFS is active and file system is mounted or not.""" try: self._is_gpfs_path(path) except processutils.ProcessExecutionError: msg = (_('%s cannot be accessed. Verify that GPFS is active and ' 'file system is mounted.') % path) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _create_consistencygroup(self, context, group): """Create consistency group of GPFS volumes.""" cgname = "consisgroup-%s" % group['id'] fsdev = self._gpfs_device cgpath = os.path.join(self.configuration.gpfs_mount_point_base, cgname) try: self.gpfs_execute(self.GPFS_PATH + 'mmcrfileset', fsdev, cgname, '--inode-space', 'new') except processutils.ProcessExecutionError as e: msg = (_('Failed to create consistency group: %(cgid)s. ' 'Error: %(excmsg)s.') % {'cgid': group['id'], 'excmsg': str(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: self.gpfs_execute(self.GPFS_PATH + 'mmlinkfileset', fsdev, cgname, '-J', cgpath) except processutils.ProcessExecutionError as e: msg = (_('Failed to link fileset for the share %(cgname)s. ' 'Error: %(excmsg)s.') % {'cgname': cgname, 'excmsg': str(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: self.gpfs_execute('chmod', '770', cgpath) except processutils.ProcessExecutionError as e: msg = (_('Failed to set permissions for the consistency group ' '%(cgname)s. ' 'Error: %(excmsg)s.') % {'cgname': cgname, 'excmsg': str(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) model_update = {'status': fields.GroupStatus.AVAILABLE} return model_update def _delete_consistencygroup(self, context, group, volumes): """Delete consistency group of GPFS volumes.""" cgname = "consisgroup-%s" % group['id'] fsdev = self._gpfs_device delete_fileset = True model_update = {} model_update['status'] = group['status'] try: self.gpfs_execute(self.GPFS_PATH + 'mmlsfileset', fsdev, cgname) except processutils.ProcessExecutionError as e: if e.exit_code == 2: msg = (_('The fileset associated with consistency group ' '%(cgname)s does not exist') % {'cgname': cgname}) LOG.info(msg) delete_fileset = False # Unlink and delete the fileset associated with the consistency group. # All of the volumes and volume snapshot data will also be deleted. if delete_fileset: try: self.gpfs_execute(self.GPFS_PATH + 'mmunlinkfileset', fsdev, cgname, '-f') except processutils.ProcessExecutionError as e: msg = (_('Failed to unlink fileset for consistency group ' '%(cgname)s. Error: %(excmsg)s.') % {'cgname': cgname, 'excmsg': str(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: self.gpfs_execute(self.GPFS_PATH + 'mmdelfileset', fsdev, cgname, '-f') except processutils.ProcessExecutionError as e: msg = (_('Failed to delete fileset for consistency group ' '%(cgname)s. Error: %(excmsg)s.') % {'cgname': cgname, 'excmsg': str(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) for volume_ref in volumes: volume_ref['status'] = 'deleted' model_update = {'status': group['status']} return None, None def _create_cgsnapshot(self, context, cgsnapshot, snapshots): """Create snapshot of a consistency group of GPFS volumes.""" model_update = {'status': fields.GroupStatus.AVAILABLE} snapshots_model_update = [] try: for snapshot in snapshots: self.create_snapshot(snapshot) except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.GroupStatus.ERROR) LOG.error("Failed to create the snapshot %(snap)s of " "CGSnapshot. Exception: %(exception)s.", {'snap': snapshot.name, 'exception': err}) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot.id, 'status': model_update['status']}) return model_update, snapshots_model_update def _delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Delete snapshot of a consistency group of GPFS volumes.""" model_update = {'status': fields.GroupStatus.DELETED} snapshots_model_update = [] try: for snapshot in snapshots: self.delete_snapshot(snapshot) except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.GroupStatus.ERROR_DELETING) LOG.error("Failed to delete the snapshot %(snap)s of " "CGSnapshot. Exception: %(exception)s.", {'snap': snapshot.name, 'exception': err}) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot.id, 'status': model_update['status']}) return model_update, snapshots_model_update def _update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): msg = _('Updating a consistency group is not supported.') LOG.error(msg) raise GPFSDriverUnsupportedOperation(msg=msg) def _create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): msg = _('Creating a consistency group from any source consistency ' 'group or consistency group snapshot is not supported.') LOG.error(msg) raise GPFSDriverUnsupportedOperation(msg=msg) def create_group(self, ctxt, group): """Creates a group. :param ctxt: the context of the caller. :param group: the Group object of the group to be created. :returns: model_update """ if volume_utils.is_group_a_cg_snapshot_type(group): return self._create_consistencygroup(ctxt, group) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() def delete_group(self, ctxt, group, volumes): """Deletes a group. :param ctxt: the context of the caller. :param group: the Group object of the group to be deleted. :param volumes: a list of Volume objects in the group. :returns: model_update, volumes_model_update """ if volume_utils.is_group_a_cg_snapshot_type(group): return self._delete_consistencygroup(ctxt, group, volumes) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None): """Updates a group. :param ctxt: the context of the caller. :param group: the Group object of the group to be updated. :param add_volumes: a list of Volume objects to be added. :param remove_volumes: a list of Volume objects to be removed. :returns: model_update, add_volumes_update, remove_volumes_update """ if volume_utils.is_group_a_cg_snapshot_type(group): return self._update_consistencygroup(ctxt, group, add_volumes, remove_volumes) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() def create_group_snapshot(self, ctxt, group_snapshot, snapshots): """Creates a group_snapshot. :param ctxt: the context of the caller. :param group_snapshot: the GroupSnapshot object to be created. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self._create_cgsnapshot(ctxt, group_snapshot, snapshots) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() def delete_group_snapshot(self, ctxt, group_snapshot, snapshots): """Deletes a group_snapshot. :param ctxt: the context of the caller. :param group_snapshot: the GroupSnapshot object to be deleted. :param snapshots: a list of snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self._delete_cgsnapshot(ctxt, group_snapshot, snapshots) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() def create_group_from_src(self, ctxt, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param ctxt: the context of the caller. :param group: the Group object to be created. :param volumes: a list of Volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of snapshot objects in group_snapshot. :param source_group: the Group object as source. :param source_vols: a list of volume objects in the source_group. :returns: model_update, volumes_model_update """ if volume_utils.is_group_a_cg_snapshot_type(group): return self._create_consistencygroup_from_src(ctxt, group, volumes, group_snapshot, snapshots, source_group, source_vols) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() @interface.volumedriver class GPFSRemoteDriver(GPFSDriver, san.SanDriver): """GPFS cinder driver extension. This extends the capability of existing GPFS cinder driver to be able to run the driver when cinder volume service is not running on GPFS node where as Nova Compute is a GPFS client. This deployment is typically in Container based OpenStack environment. """ VERSION = "1.0" # ThirdPartySystems wiki page CI_WIKI_NAME = "IBM_GPFS_CI" def __init__(self, *args, **kwargs): super(GPFSRemoteDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(gpfs_remote_ssh_opts) self.configuration.san_login = self.configuration.gpfs_user_login self.configuration.san_password = ( self.configuration.gpfs_user_password) self.configuration.san_private_key = ( self.configuration.gpfs_private_key) self.configuration.san_ssh_port = self.configuration.gpfs_ssh_port self.gpfs_execute = self._gpfs_remote_execute self.GPFS_PATH = '/usr/lpp/mmfs/bin/' @staticmethod def get_driver_options(): return gpfs_opts + gpfs_remote_ssh_opts def _gpfs_remote_execute(self, *cmd, **kwargs): check_exit_code = kwargs.pop('check_exit_code', None) return self._run_ssh(cmd, check_exit_code) def do_setup(self, ctxt): self.configuration.san_ip = self._get_active_gpfs_node_ip() super(GPFSRemoteDriver, self).do_setup(ctxt) def _get_active_gpfs_node_ip(self): """Set the san_ip to active gpfs node IP""" active_gpfs_node_ip = None gpfs_node_ips = self.configuration.gpfs_hosts ssh = paramiko.SSHClient() # Validate good config setting here. # Paramiko handles the case where the file is inaccessible. if not self.configuration.gpfs_hosts_key_file: raise exception.ParameterNotFound(param='gpfs_hosts_key_file') elif not os.path.isfile(self.configuration.gpfs_hosts_key_file): # If using the default path, just create the file. if CONF.state_path in self.configuration.gpfs_hosts_key_file: open(self.configuration.gpfs_hosts_key_file, 'a').close() else: msg = (_("Unable to find ssh_hosts_key_file: %s") % self.configuration.gpfs_hosts_key_file) raise exception.InvalidInput(reason=msg) ssh.load_host_keys(self.configuration.gpfs_hosts_key_file) if self.configuration.gpfs_strict_host_key_policy: ssh.set_missing_host_key_policy(paramiko.RejectPolicy()) else: ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if ((not self.configuration.gpfs_user_password) and (not self.configuration.gpfs_private_key)): msg = _("Specify a password or private_key") raise exception.VolumeDriverException(msg) for ip in gpfs_node_ips: try: if self.configuration.gpfs_user_password: ssh.connect(ip, port=self.configuration.gpfs_ssh_port, username=self.configuration.gpfs_user_login, password=self.configuration.gpfs_user_password, timeout=self.configuration.ssh_conn_timeout) elif self.configuration.gpfs_private_key: pkfile = os.path.expanduser( self.configuration.gpfs_private_key) privatekey = paramiko.RSAKey.from_private_key_file(pkfile) ssh.connect(ip, port=self.configuration.gpfs_ssh_port, username=self.configuration.gpfs_user_login, pkey=privatekey, timeout=self.configuration.ssh_conn_timeout) except Exception as e: LOG.info("Cannot connect to GPFS node %(ip)s. " "Error is: %(err)s. " "Continuing to next node", {'ip': ip, 'err': e}) continue try: # check if GPFS state is active on the node (out, __) = processutils.ssh_execute(ssh, self.GPFS_PATH + 'mmgetstate -Y') lines = out.splitlines() state_token = lines[0].split(':').index('state') gpfs_state = lines[1].split(':')[state_token] if gpfs_state != 'active': LOG.info("GPFS is not active on node %(ip)s. " "Continuing to next node", {'ip': ip}) continue # check if filesystem is mounted on the node processutils.ssh_execute( ssh, 'df ' + self.configuration.gpfs_mount_point_base) except processutils.ProcessExecutionError as e: LOG.info("GPFS is not active on node %(ip)s. " "Error is: %(err)s. " "Continuing to next node", {'ip': ip, 'err': e}) continue # set the san_ip to the active gpfs node IP LOG.debug("Setting active GPFS node IP to %s", ip) active_gpfs_node_ip = ip break else: msg = _("No GPFS node is active") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return active_gpfs_node_ip @interface.volumedriver class GPFSNFSDriver(GPFSDriver, nfs.NfsDriver, san.SanDriver): """GPFS cinder driver extension. This extends the capability of existing GPFS cinder driver to be able to create cinder volumes when cinder volume service is not running on GPFS node. """ VERSION = "1.0" # ThirdPartySystems wiki page CI_WIKI_NAME = "IBM_GPFS_CI" def __init__(self, *args, **kwargs): self._context = None self._storage_pool = None self._cluster_id = None super(GPFSNFSDriver, self).__init__(*args, **kwargs) self.gpfs_execute = self._gpfs_remote_execute self.configuration.append_config_values(remotefs.nas_opts) self.configuration.san_ip = self.configuration.nas_host self.configuration.san_login = self.configuration.nas_login self.configuration.san_password = self.configuration.nas_password self.configuration.san_private_key = ( self.configuration.nas_private_key) self.configuration.san_ssh_port = self.configuration.nas_ssh_port self.GPFS_PATH = '/usr/lpp/mmfs/bin/' def _gpfs_remote_execute(self, *cmd, **kwargs): check_exit_code = kwargs.pop('check_exit_code', None) return self._run_ssh(cmd, check_exit_code) def do_setup(self, context): super(GPFSNFSDriver, self).do_setup(context) self._context = context def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Enter _update_volume_stats.") gpfs_base = self.configuration.gpfs_mount_point_base data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or 'GPFSNFS' data['vendor_name'] = 'IBM' data['driver_version'] = self.get_version() data['storage_protocol'] = constants.FILE self._ensure_shares_mounted() global_capacity = 0 global_free = 0 for share in self._mounted_shares: capacity, free, _used = self._get_capacity_info(share) global_capacity += capacity global_free += free data['total_capacity_gb'] = global_capacity / float(units.Gi) data['free_capacity_gb'] = global_free / float(units.Gi) data['reserved_percentage'] = 0 data['QoS_support'] = False data['storage_pool'] = self._storage_pool data['location_info'] = ('GPFSNFSDriver:%(cluster_id)s:%(root_path)s' % {'cluster_id': self._cluster_id, 'root_path': gpfs_base}) data['consistencygroup_support'] = 'True' data['consistent_group_snapshot_enabled'] = True self._stats = data LOG.debug("Exit _update_volume_stats.") def _get_volume_path(self, volume): """Returns remote GPFS path for the given volume.""" export_path = self.configuration.gpfs_mount_point_base if volume.group_id is not None: if volume_utils.is_group_a_cg_snapshot_type(volume.group): cgname = "consisgroup-%s" % volume.group_id return os.path.join(export_path, cgname, volume.name) return os.path.join(export_path, volume.name) def local_path(self, volume): """Returns the local path for the specified volume.""" remotefs_share = self._find_share(volume) base_local_path = self._get_mount_point_for_share(remotefs_share) # Check if the volume is part of a consistency group and return # the local_path accordingly. if volume.group_id is not None: if volume_utils.is_group_a_cg_snapshot_type(volume.group): cgname = "consisgroup-%s" % volume.group_id return os.path.join(base_local_path, cgname, volume.name) return os.path.join(base_local_path, volume.name) def _get_snapshot_path(self, snapshot): """Returns remote GPFS path for the given snapshot.""" snap_parent_vol = self.db.volume_get(self._context, snapshot['volume_id']) snap_parent_vol_path = self._get_volume_path(snap_parent_vol) snapshot_path = os.path.join(os.path.dirname(snap_parent_vol_path), snapshot['name']) return snapshot_path def create_volume(self, volume): """Creates a GPFS volume.""" super(GPFSNFSDriver, self).create_volume(volume) volume['provider_location'] = self._find_share(volume) return {'provider_location': volume['provider_location']} def delete_volume(self, volume): """Deletes a logical volume.""" # Check if GPFS is mounted self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base) volume_path = self._get_volume_path(volume) mount_point = os.path.dirname(self.local_path(volume)) # Delete all dependent snapshots, the snapshot will get deleted # if the link count goes to zero, else rm will fail silently self._delete_gpfs_file(volume_path, mount_point) def create_volume_from_snapshot(self, volume, snapshot): """Creates a GPFS volume from a snapshot.""" self._create_volume_from_snapshot(volume, snapshot) volume['provider_location'] = self._find_share(volume) self._resize_volume_file(volume, volume['size']) return {'provider_location': volume['provider_location']} def create_cloned_volume(self, volume, src_vref): """Create a GPFS volume from another volume.""" self._create_cloned_volume(volume, src_vref) volume['provider_location'] = self._find_share(volume) self._resize_volume_file(volume, volume['size']) return {'provider_location': volume['provider_location']} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.367121 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/0000775000175000017500000000000000000000000022362 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/__init__.py0000664000175000017500000001104700000000000024476 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder import exception from cinder.i18n import _ BLOCKS_PER_GIGABYTE = 2097152 XIV_LOG_PREFIX = "[IBM XIV STORAGE]:" XIV_CONNECTION_TYPE_ISCSI = 'iscsi' XIV_CONNECTION_TYPE_FC = 'fibre_channel' XIV_CONNECTION_TYPE_FC_ECKD = 'fibre_channel_eckd' CHAP_NONE = 'disabled' CHAP_ENABLED = 'enabled' STORAGE_DRIVER_XIV = 'xiv' STORAGE_DRIVER_DS8K = 'ds8k' CONF_KEYS = { 'driver': "volume_driver", 'proxy': "proxy", 'user': "san_login", 'password': "san_password", 'storage_pool': "san_clustername", 'address': "san_ip", 'driver_version': "ibm_storage_driver_version", 'volume_api_class': "volume_api_class", 'volume_backend': "volume_backend_name", 'connection_type': "connection_type", 'management_ips': "management_ips", 'chap': 'chap', 'system_id': 'system_id', 'replication_device': 'replication_device' } CONF_BACKEND_KEYS = { 'user': "san_login", 'password': "san_password", 'storage_pool': "san_clustername", 'address': "san_ip", 'volume_backend': "volume_backend_name", 'connection_type': "connection_type", 'management_ips': "management_ips", } FLAG_KEYS = { 'user': "user", 'password': "password", 'storage_pool': "vol_pool", 'address': "address", 'connection_type': "connection_type", 'bypass_connection_check': "XIV_BYPASS_CONNECTION_CHECK", 'management_ips': "management_ips" } METADATA_KEYS = { 'ibm_storage_version': 'openstack_ibm_storage_driver_version', 'openstack_version': 'openstack_version', 'pool_host_key': 'openstack_compute_node_%(hostname)s', 'pool_volume_os': 'openstack_volume_os', 'pool_volume_hostname': 'openstack_volume_hostname' } def get_host_or_create_from_iqn(connector, connection=None): """Get host name. Return the hostname if existing at the connector (nova-compute info) If not, generate one from the IQN or HBA """ if connection is None and connector.get('host', None): return connector['host'] if connection != XIV_CONNECTION_TYPE_FC and 'initiator' in connector: try: initiator = connector['initiator'] iqn_suffix = initiator.split('.')[-1].replace(":", "_") except Exception: if connector.get('initiator', 'None'): raise exception.VolumeDriverException(message=( _("Initiator format: %(iqn)s")) % {'iqn': connector.get('initiator', 'None')}) else: raise exception.VolumeDriverException( message=_("Initiator is missing from connector object")) return "nova-compute-%s" % iqn_suffix if connection != XIV_CONNECTION_TYPE_ISCSI and len( connector.get('wwpns', []) ) > 0: return "nova-compute-%s" % connector['wwpns'][0].replace(":", "_") raise exception.VolumeDriverException( message=_("Compute host missing either iSCSI initiator or FC wwpns")) def gigabytes_to_blocks(gigabytes): return int(BLOCKS_PER_GIGABYTE * float(gigabytes)) def get_online_iscsi_ports(ibm_storage_cli): """Returns online iscsi ports.""" iscsi_ports = [ { 'ip': p.get('address'), # ipinterface_list returns ports field in Gen3, and # port field in BlueRidge 'port': p.get('ports', p.get('port')), 'module': p.get('module') } for p in ibm_storage_cli.cmd.ipinterface_list() if p.type == 'iSCSI'] iscsi_connected_ports = [ { 'port': p.index, 'module': p.get('module_id') } for p in ibm_storage_cli.cmd.ipinterface_list_ports() if p.is_link_up == 'yes' and p.role == 'iSCSI'] to_return = [] for ip in iscsi_ports: if len([ p for p in iscsi_connected_ports if (p.get('port') == ip.get('port') and p.get('module') == ip.get('module')) ]) > 0: to_return += [ip.get('ip')] return to_return ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/certificate.py0000664000175000017500000000434300000000000025222 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os import tempfile from oslo_log import log as logging LOG = logging.getLogger(__name__) class CertificateCollector(object): def __init__(self, paths=None): self.paths_checked = [ '/etc/ssl/certs', '/etc/ssl/certs/xiv', '/etc/pki', '/etc/pki/xiv'] if paths: self.paths_checked.extend(paths) self.paths_checked = set(self.paths_checked) self.tmp_fd = None self.tmp_path = None def collect_certificate(self): self.tmp_fd, self.tmp_path = tempfile.mkstemp() for path in self.paths_checked: if os.path.exists(path) and os.path.isdir(path): dir_contents = os.listdir(path) for f in dir_contents: full_path = os.path.join(path, f) if (os.path.isfile(full_path) and f.startswith('XIV') and f.endswith('.pem')): try: cert_file = open(full_path, 'r') os.write(self.tmp_fd, cert_file.read()) cert_file.close() except Exception: LOG.exception("Failed to process certificate") os.close(self.tmp_fd) fsize = os.path.getsize(self.tmp_path) if fsize > 0: return self.tmp_path else: return None def free_certificate(self): if self.tmp_path: try: os.remove(self.tmp_path) except Exception: pass self.tmp_path = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/cryptish.py0000664000175000017500000000160400000000000024602 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import base64 def encrypt(string): return base64.b64encode(string.encode('UTF-8')) def decrypt(string): missing_padding = len(string) % 4 if missing_padding != 0: string += b'=' * (4 - missing_padding) return base64.b64decode(string) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py0000664000175000017500000001317400000000000026032 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime import hashlib import re import ssl from oslo_log import log as logging from requests.packages.urllib3 import connection from requests.packages.urllib3 import connectionpool from requests.packages.urllib3 import poolmanager from cinder.i18n import _ LOG = logging.getLogger(__name__) try: from OpenSSL.crypto import FILETYPE_ASN1 from OpenSSL.crypto import load_certificate except ImportError: load_certificate = None FILETYPE_ASN1 = None _PEM_RE = re.compile(u"""-----BEGIN CERTIFICATE-----\r? .+?\r?-----END CERTIFICATE-----\r?\n?""", re.DOTALL) class DS8KHTTPSConnection(connection.VerifiedHTTPSConnection): """Extend the HTTPS Connection to do our own Certificate Verification.""" def _verify_cert(self, sock, ca_certs): # If they asked us to not verify the Certificate then nothing to do if not ca_certs: return # Retrieve the Existing Certificates from the File in Binary Form peercert = sock.getpeercert(True) try: with open(ca_certs, 'r') as f: certs_str = f.read() except Exception: raise ssl.SSLError(_("Failed to read certificate from %s") % ca_certs) # Verify the Existing Certificates found = False certs = [match.group(0) for match in _PEM_RE.finditer(certs_str)] for cert in certs: existcert = ssl.PEM_cert_to_DER_cert(cert) # First check to make sure the 2 certificates are the same ones if (hashlib.sha256(existcert).digest() == hashlib.sha256(peercert).digest()): found = True break if not found: raise ssl.SSLError( _("The certificate doesn't match the trusted one " "in %s.") % ca_certs) if load_certificate is None and FILETYPE_ASN1 is None: raise ssl.SSLError( _("Missing 'pyOpenSSL' python module, ensure the " "library is installed.")) # Throw an exception if the certificate given to us has expired x509 = load_certificate(FILETYPE_ASN1, peercert) if x509.has_expired(): raise ssl.SSLError( _("The certificate expired: %s") % x509.get_notAfter()) def connect(self): """Override the Connect Method to fix the Certificate Verification.""" # Add certificate verification conn = self._new_conn() if getattr(self, '_tunnel_host', None): # _tunnel_host was added in Python 2.6.3 # (See: http://hg.python.org/cpython/rev/0f57b30a152f) self.sock = conn # Calls self._set_hostport(), so self.host is # self._tunnel_host below. # self._tunnel() # Mark this connection as not reusable self.auto_open = 0 # The RECENT_DATE is originally taken from requests. The date is just # an arbitrary value that is used as a sanity test to identify hosts # that are using the default time after bootup (e.g. 1970), and # provides information for debugging RECENT_DATE = datetime.date(2014, 1, 1) is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: LOG.warning('System time is way off (before %s). This will ' 'probably lead to SSL verification errors.', RECENT_DATE) # Wrap socket using verification with the root certs in # trusted_root_certs context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) self.sock = context.wrap_socket(conn) self._verify_cert(self.sock, self.ca_certs) self.is_verified = True def putrequest(self, method, url, **kwargs): """Override the Put Request method take the DS8K off of the URL.""" if url and url.startswith('httpsds8k://'): url = 'https://' + url[12:] return super(DS8KHTTPSConnection, self).putrequest(method, url, **kwargs) def request(self, method, url, **kwargs): """Override the Request method take the DS8K off of the URL.""" if url and url.startswith('httpsds8k://'): url = 'https://' + url[12:] return super(DS8KHTTPSConnection, self).request(method, url, **kwargs) class DS8KConnectionPool(connectionpool.HTTPSConnectionPool): """Extend the HTTPS Connection Pool to our own Certificate verification.""" scheme = 'httpsds8k' ConnectionCls = DS8KHTTPSConnection def urlopen(self, method, url, **kwargs): """Override URL Open method to take DS8K out of the URL protocol.""" if url and url.startswith('httpsds8k://'): url = 'https://' + url[12:] return super(DS8KConnectionPool, self).urlopen(method, url, **kwargs) if hasattr(poolmanager, 'key_fn_by_scheme'): poolmanager.key_fn_by_scheme["httpsds8k"] = ( poolmanager.key_fn_by_scheme["https"]) poolmanager.pool_classes_by_scheme["httpsds8k"] = DS8KConnectionPool ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py0000664000175000017500000015306300000000000025154 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import collections import copy import math import os import string import eventlet from oslo_log import log as logging import packaging.version as dist_version # pylint: disable=E0611 from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.objects import fields import cinder.volume.drivers.ibm.ibm_storage as storage from cinder.volume.drivers.ibm.ibm_storage import cryptish from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient from cinder.volume.drivers.ibm.ibm_storage import proxy from cinder.volume.drivers.ibm.ibm_storage import strings LOG = logging.getLogger(__name__) LSS_VOL_SLOTS = 0x100 LSS_SLOTS = 0xFF VALID_HOST_TYPES = ( 'auto', 'AMDLinuxRHEL', 'AMDLinuxSuse', 'AppleOSX', 'Fujitsu', 'Hp', 'HpTru64', 'HpVms', 'LinuxDT', 'LinuxRF', 'LinuxRHEL', 'LinuxSuse', 'Novell', 'SGI', 'SVC', 'SanFsAIX', 'SanFsLinux', 'Sun', 'VMWare', 'Win2000', 'Win2003', 'Win2008', 'Win2012', 'iLinux', 'nSeries', 'pLinux', 'pSeries', 'pSeriesPowerswap', 'zLinux', 'iSeries' ) def filter_alnum(s): return ''.join(x if x in string.ascii_letters + string.digits else '_' for x in s) if s else '' class DS8KCommonHelper(object): """Manage the primary backend, it is common class too.""" OPTIONAL_PARAMS = ['ds8k_host_type', 'lss_range_for_cg'] # if use new REST API, please update the version below VALID_REST_VERSION_87_51_MIN = '87.51.52.0' INVALID_STORAGE_VERSION = '8.0.1' REST_VERSION_87_51_MIN_PPRC_CG = '87.51.63.0' REST_VERSION_88_20_MIN_PPRC_CG = '88.20.112.0' def __init__(self, conf, HTTPConnectorObject=None): self.conf = conf self._connector_obj = HTTPConnectorObject self._storage_pools = None self._disable_thin_provision = False self._connection_type = self._get_value('connection_type') self._existing_lss = None self.backend = {} self.setup() @staticmethod def _gb2b(gb): return gb * (2 ** 30) def _get_value(self, key): if getattr(self.conf, 'safe_get', 'get') == 'get': value = self.conf.get(key) else: value = self.conf.safe_get(key) if not value and key not in self.OPTIONAL_PARAMS: raise exception.InvalidParameterValue( err=(_('Param [%s] should be provided.') % key)) return value def get_thin_provision(self): return self._disable_thin_provision def get_storage_pools(self): return self._storage_pools def get_connection_type(self): return self._connection_type def get_pool(self, lss): node = int(lss, 16) % 2 pids = [ pid for pid, p in self._storage_pools.items() if p['node'] == node] return pids[0] if pids else None def setup(self): self._create_client() self._get_storage_information() self._check_host_type() self.backend['pools_str'] = self._get_value('san_clustername') self._storage_pools = self.get_pools() self.verify_pools(self._storage_pools) self.backend['lss_ids_for_cg'] = self._get_lss_ids_for_cg() self._verify_rest_version() def update_client(self): self._client.close() self._create_client() def _get_certificate(self, host): cert_file = strings.CERTIFICATES_PATH + host + '.pem' LOG.debug("certificate file for DS8K %(host)s: %(cert)s", {'host': host, 'cert': cert_file}) # Use the certificate if it exists, otherwise use the System CA Bundle if os.path.exists(cert_file): return cert_file else: LOG.debug("certificate file not found.") return True def _create_client(self): san_ip = self._get_value('san_ip') try: clear_pass = cryptish.decrypt(self._get_value('san_password')) except TypeError: raise exception.InvalidParameterValue( err=_('Param [san_password] is invalid.')) verify = self._get_certificate(san_ip) try: self._client = restclient.RESTScheduler( san_ip, self._get_value('san_login'), clear_pass, self._connector_obj, verify) except restclient.TimeoutException: raise restclient.APIException( data=(_("Can't connect to %(host)s") % {'host': san_ip})) self.backend['rest_version'] = self._get_version()['bundle'] LOG.info("Connection to DS8K storage system %(host)s has been " "established successfully, the version of REST is %(rest)s.", {'host': self._get_value('san_ip'), 'rest': self.backend['rest_version']}) def _get_storage_information(self): storage_info = self.get_systems() self.backend['storage_unit'] = storage_info['id'] self.backend['storage_wwnn'] = storage_info['wwnn'] self.backend['storage_version'] = storage_info['release'] def _get_lss_ids_for_cg(self): lss_range = self._get_value('lss_range_for_cg') lss_ids_for_cg = set() if not lss_range or not lss_range.strip(): return lss_ids_for_cg if '-' in lss_range: lss_ids = list() lss_range = lss_range.strip() if lss_range.startswith('-') or lss_range.endswith('-'): raise exception.InvalidParameterValue( err=_('Param [lss_range_for_cg]\'s format is invalid, ' 'please don\'t put the \'-\' at the beginning or ' 'the end.')) lss_range = lss_range.replace('-', ' - ').split() for index, lss in enumerate(lss_range): if lss == '-': try: begin = int(lss_range[index - 1], 16) end = int(lss_range[index + 1], 16) lss_ids_for_cg |= set( ('%02x' % i).upper() for i in range(begin, end + 1)) except ValueError as e: raise exception.InvalidParameterValue( err=_('Param [lss_range_for_cg] is invalid, it ' 'only supports space and \'-\' as ' 'separator. ' 'Exception = %s.') % str(e)) else: lss_ids.append(lss) lss_ids_for_cg |= set(lss_ids) else: lss_ids_for_cg = set(lss_range.split()) for lss_id in lss_ids_for_cg: try: if int(lss_id, 16) > 0xFF: raise exception.InvalidParameterValue( err=_('Param [lss_range_for_cg] is invalid, it ' 'should be within 00-FF')) except ValueError as e: raise exception.InvalidParameterValue( err=_('Param [lss_range_for_cg] is invalid, it ' 'only supports space and \'-\' as separator. ' 'Exception = %s.') % str(e)) return lss_ids_for_cg def _check_host_type(self): ds8k_host_type = self._get_value('ds8k_host_type') if (ds8k_host_type and (ds8k_host_type not in VALID_HOST_TYPES)): msg = (_("Param [ds8k_host_type] must be one of: %(values)s.") % {'values': VALID_HOST_TYPES[1:-1]}) LOG.error(msg) raise exception.InvalidParameterValue(err=msg) self.backend['host_type_override'] = ( None if ds8k_host_type == 'auto' else ds8k_host_type) def _verify_rest_version(self): if self.backend['storage_version'] == self.INVALID_STORAGE_VERSION: raise exception.VolumeDriverException( message=(_("%s does not support bulk deletion of volumes, " "if you want to use this version of driver, " "please upgrade the CCL.") % self.INVALID_STORAGE_VERSION)) rest_ver = self.backend['rest_version'][0:2] if ('87' == rest_ver and dist_version.parse(self.backend['rest_version']) < dist_version.parse(self.VALID_REST_VERSION_87_51_MIN)): raise exception.VolumeDriverException( message=(_("REST version %(invalid)s is lower than " "%(valid)s, please upgrade it in DS8K.") % {'invalid': self.backend['rest_version'], 'valid': self.VALID_REST_VERSION_87_51_MIN})) def verify_rest_version_for_pprc_cg(self): if '8.1' in self.backend['rest_version']: raise exception.VolumeDriverException( message=_("REST for DS8K 8.1 does not support PPRC " "consistency group, please upgrade the CCL.")) valid_rest_version = None rest_ver = self.backend['rest_version'][0:2] if ('87' == rest_ver and dist_version.parse(self.backend['rest_version']) < dist_version.parse(self.REST_VERSION_87_51_MIN_PPRC_CG)): valid_rest_version = self.REST_VERSION_87_51_MIN_PPRC_CG elif ('88' == rest_ver and dist_version.parse(self.backend['rest_version']) < dist_version.parse(self.REST_VERSION_88_20_MIN_PPRC_CG)): valid_rest_version = self.REST_VERSION_88_20_MIN_PPRC_CG if valid_rest_version: raise exception.VolumeDriverException( message=(_("REST version %(invalid)s is lower than " "%(valid)s, please upgrade it in DS8K.") % {'invalid': self.backend['rest_version'], 'valid': valid_rest_version})) def verify_pools(self, storage_pools): if self._connection_type == storage.XIV_CONNECTION_TYPE_FC: ptype = 'fb' elif self._connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD: ptype = 'ckd' else: raise exception.InvalidParameterValue( err=_('Param [connection_type] is invalid.')) for pid, pool in storage_pools.items(): if pool['stgtype'] != ptype: LOG.error('The stgtype of pool %(pool)s is %(ptype)s.', {'pool': pid, 'ptype': pool['stgtype']}) raise exception.InvalidParameterValue( err='Param [san_clustername] is invalid.') @proxy.logger def get_pools(self, specific_pools=None): if specific_pools: pools_str = specific_pools.replace(' ', '').upper().split(',') else: pools_str = self.backend['pools_str'].replace( ' ', '').upper().split(',') pools = [] storage_pools = collections.OrderedDict() for pid in pools_str: try: pools.append(self._get_pool(pid)) except restclient.APIException as e: LOG.warning("Failed to get pool %(id)s information, " "Exception: %(ex)s.", {'id': pid, 'ex': str(e)}) if len(pools): unsorted_pools = self._format_pools(pools) storage_pools = collections.OrderedDict(sorted( unsorted_pools, key=lambda i: i[1]['capavail'], reverse=True)) return storage_pools @proxy.logger def update_storage_pools(self, storage_pools): self._storage_pools = storage_pools def _format_pools(self, pools): return ((p['id'], { 'name': p['name'], 'node': int(p['node']), 'stgtype': p['stgtype'], 'cap': int(p['cap']), 'capavail': int(p['capavail']) }) for p in pools) def verify_lss_ids(self, specified_lss_ids): if not specified_lss_ids: return None lss_ids = specified_lss_ids.upper().replace(' ', '').split(',') # verify LSS IDs. for lss_id in lss_ids: if int(lss_id, 16) > 255: raise exception.InvalidParameterValue( _('LSS %s should be within 00-FF.') % lss_id) # verify address group self._existing_lss = self.get_all_lss() ckd_addrgrps = set(int(lss['id'], 16) // 16 for lss in self._existing_lss if lss['type'] == 'ckd') fb_addrgrps = set((int(lss, 16) // 16) for lss in lss_ids) intersection = ckd_addrgrps & fb_addrgrps if intersection: raise exception.VolumeDriverException( message=_('LSSes in the address group %s are reserved ' 'for CKD volumes') % list(intersection)) # verify whether LSSs specified have been reserved for # consistency group or not. if self.backend['lss_ids_for_cg']: for lss_id in lss_ids: if lss_id in self.backend['lss_ids_for_cg']: raise exception.InvalidParameterValue( _('LSS %s has been reserved for CG.') % lss_id) return lss_ids @proxy.logger def find_pool_lss_pair(self, pool, find_new_pid, excluded_lss): if pool: node = int(pool[1:], 16) % 2 lss = self._find_lss(node, excluded_lss) if lss: return (pool, lss) else: if not find_new_pid: raise restclient.LssIDExhaustError( message=_('All LSS/LCU IDs for configured pools ' 'on storage are exhausted.')) # find new pool id and lss for lun return self.find_biggest_pool_and_lss(excluded_lss) @proxy.logger def find_biggest_pool_and_lss(self, excluded_lss, specified_pool_lss=None): if specified_pool_lss: # pool and lss should be verified every time user create volume or # snapshot, because they can be changed in extra-sepcs at any time. specified_pool_ids, specified_lss_ids = specified_pool_lss storage_pools = self.get_pools(specified_pool_ids) self.verify_pools(storage_pools) storage_lss = self.verify_lss_ids(specified_lss_ids) else: storage_pools, storage_lss = self._storage_pools, None # pools are ordered by capacity for pool_id, pool in storage_pools.items(): lss = self._find_lss(pool['node'], excluded_lss, storage_lss) if lss: return pool_id, lss raise restclient.LssIDExhaustError( message=_("All LSS/LCU IDs for configured pools are exhausted.")) @proxy.logger def _find_lss(self, node, excluded_lss, specified_lss_ids=None): if specified_lss_ids: existing_lss = self._existing_lss else: existing_lss = self.get_all_lss() LOG.info("Existing LSS IDs are: %s.", ','.join([lss['id'] for lss in existing_lss])) saved_existing_lss = copy.copy(existing_lss) # exclude LSSs that are full. existing_lss = [lss for lss in existing_lss if lss['id'] not in excluded_lss] if not existing_lss: LOG.info("All LSSs are full.") return None # user specify LSSs in extra-specs. if specified_lss_ids: specified_lss_ids = [lss for lss in specified_lss_ids if lss not in excluded_lss] if specified_lss_ids: existing_lss = [lss for lss in existing_lss if lss['id'] in specified_lss_ids] nonexistent_lss_ids = (set(specified_lss_ids) - set(lss['id'] for lss in existing_lss)) lss = None for lss_id in nonexistent_lss_ids: if int(lss_id, 16) % 2 == node: lss = lss_id break if not lss: lss = self._find_from_existing_lss( node, existing_lss, True) else: LOG.info("All appropriate LSSs specified are full.") return None else: # exclude LSSs that reserved for CG. if self.backend['lss_ids_for_cg']: existing_lss_cg, nonexistent_lss_cg = ( self._classify_lss_for_cg(existing_lss)) existing_lss = [lss for lss in existing_lss if lss['id'] not in existing_lss_cg] else: existing_lss_cg = set() nonexistent_lss_cg = set() lss = self._find_from_existing_lss(node, existing_lss) if not lss: lss = self._find_from_nonexistent_lss(node, saved_existing_lss, nonexistent_lss_cg) return lss def _classify_lss_for_cg(self, existing_lss): existing_lss_ids = set(lss['id'] for lss in existing_lss) existing_lss_cg = existing_lss_ids & self.backend['lss_ids_for_cg'] nonexistent_lss_cg = self.backend['lss_ids_for_cg'] - existing_lss_cg return existing_lss_cg, nonexistent_lss_cg def _find_from_existing_lss(self, node, existing_lss, ignore_pprc=False): if not ignore_pprc: # exclude LSSs that are used by PPRC paths. lss_in_pprc = self.get_lss_in_pprc_paths() if lss_in_pprc: existing_lss = [lss for lss in existing_lss if lss['id'] not in lss_in_pprc] # exclude wrong type of LSSs and those that are not in expected node. existing_lss = [lss for lss in existing_lss if lss['type'] == 'fb' and int(lss['group']) == node] lss_id = None if existing_lss: # look for the emptiest lss from existing lss lss = sorted(existing_lss, key=lambda k: int(k['configvols']))[0] if int(lss['configvols']) < LSS_VOL_SLOTS: lss_id = lss['id'] LOG.info('_find_from_existing_lss: choose %(lss)s. ' 'now it has %(num)s volumes.', {'lss': lss_id, 'num': lss['configvols']}) return lss_id def _find_from_nonexistent_lss(self, node, existing_lss, lss_cg=None): ckd_addrgrps = set(int(lss['id'], 16) // 16 for lss in existing_lss if lss['type'] == 'ckd' and int(lss['group']) == node) full_lss = set(int(lss['id'], 16) for lss in existing_lss if lss['type'] == 'fb' and int(lss['group']) == node) cg_lss = set(int(lss, 16) for lss in lss_cg) if lss_cg else set() # look for an available lss from nonexistent lss lss_id = None for lss in range(node, LSS_SLOTS, 2): addrgrp = lss // 16 if (addrgrp not in ckd_addrgrps and lss not in full_lss and lss not in cg_lss): lss_id = ("%02x" % lss).upper() break LOG.info('_find_from_unexisting_lss: choose %s.', lss_id) return lss_id def create_lun(self, lun): volData = { 'cap': self._gb2b(lun.size), 'captype': 'bytes', 'stgtype': 'fb', 'tp': 'ese' if lun.type_thin else 'none' } lun.data_type = lun.data_type if lun.data_type else 'FB 512' if lun.type_os400: volData['os400'] = lun.type_os400 volData['name'] = lun.ds_name volData['pool'], volData['lss'] = lun.pool_lss_pair['source'] lun.ds_id = self._create_lun(volData) return lun def delete_lun(self, luns): lun_ids = [] luns = [luns] if not isinstance(luns, list) else luns for lun in luns: if lun.ds_id is None: # create_lun must have failed and not returned the id LOG.error("delete_lun: volume id is None.") continue if not self.lun_exists(lun.ds_id): LOG.error("delete_lun: volume %s not found.", lun.ds_id) continue lun_ids.append(lun.ds_id) # Max 32 volumes could be deleted by specifying ids parameter while lun_ids: if len(lun_ids) > 32: lun_ids_str = ','.join(lun_ids[0:32]) del lun_ids[0:32] else: lun_ids_str = ','.join(lun_ids) lun_ids = [] LOG.info("Deleting volumes: %s.", lun_ids_str) self._delete_lun(lun_ids_str) def get_lss_in_pprc_paths(self): # TODO(Jiamin): when the REST API that get the licenses installed # in DS8K is ready, this function should be improved. try: paths = self.get_pprc_paths() except restclient.APIException: paths = [] LOG.exception("Can not get the LSS") lss_ids = set(p['source_lss_id'] for p in paths) LOG.info('LSS in PPRC paths are: %s.', ','.join(lss_ids)) return lss_ids def wait_flashcopy_finished(self, src_luns, tgt_luns): valid_fc_states = ('valid', 'validation_required') for tgt_lun in tgt_luns: tgt_lun.status = 'checking' while True: eventlet.sleep(5) for src_lun, tgt_lun in zip(src_luns, tgt_luns): if tgt_lun.status == 'checking': try: fcs = self.get_flashcopy(tgt_lun.ds_id) if not fcs: tgt_lun.status = 'available' elif fcs[0]['state'] not in valid_fc_states: LOG.error('Flashcopy %(src)s:%(tgt)s ended ' 'up in bad state %(state)s.', {'src': src_lun.ds_id, 'tgt': tgt_lun.ds_id, 'state': fcs[0]['state']}) tgt_lun.status = 'error' except restclient.APIException: LOG.error('Can not get flashcopy relationship ' '%(src)s:%(tgt)s', {'src': src_lun.ds_id, 'tgt': tgt_lun.ds_id}) tgt_lun.status = 'error' if not [lun for lun in tgt_luns if lun.status == 'checking']: break # cleanup error flashcopy relationship. for src_lun, tgt_lun in zip(src_luns, tgt_luns): if tgt_lun.status == 'error': self.delete_flashcopy(src_lun.ds_id, tgt_lun.ds_id) def wait_pprc_copy_finished(self, vol_ids, state, delete=True): LOG.info("Wait for PPRC pair to enter into state %s", state) vol_ids = sorted(vol_ids) min_vol_id = min(vol_ids) max_vol_id = max(vol_ids) invalid_states = ('target_suspended', 'invalid', 'volume_inaccessible') if state == 'full_duplex': invalid_states += ('suspended',) elif state == 'suspended': invalid_states += ('valid',) finished = False try: while True: eventlet.sleep(2) pairs = self.get_pprc_pairs(min_vol_id, max_vol_id) pairs = [ p for p in pairs if p['source_volume']['name'] in vol_ids] finished_pairs = [p for p in pairs if p['state'] == state] if len(finished_pairs) == len(pairs): finished = True break unfinished_pairs = [p for p in pairs if p['state'] != state] for p in unfinished_pairs: if p['state'] in invalid_states: raise restclient.APIException( data=(_('Metro Mirror pair %(id)s enters into ' 'state %(state)s. ') % {'id': p['id'], 'state': p['state']})) finally: if not finished and delete: pair_ids = {'ids': ','.join([p['id'] for p in pairs])} self.delete_pprc_pair_by_pair_id(pair_ids) def _get_host(self, connector): # DS8K doesn't support hostname which is longer than 32 chars. hname = filter_alnum(connector['host'])[:32] os_type = connector.get('os_type') platform = connector.get('platform') if self.backend['host_type_override']: htype = self.backend['host_type_override'] elif os_type == 'OS400': htype = 'iSeries' elif os_type == 'AIX': htype = 'pSeries' elif platform in ('s390', 's390x') and os_type in ('linux', 'linux2'): htype = 'zLinux' else: htype = 'LinuxRHEL' return collections.namedtuple('Host', ('name', 'type'))(hname, htype) def check_vol_mapped_to_host(self, connector, vol_id): map_info = { 'host_ports': [], 'mappings': [], 'lun_ids': [] } host_wwpn_set = set(wwpn.upper() for wwpn in connector['wwpns']) host_ports = self._get_host_ports(host_wwpn_set) defined_hosts = set( hp['host_id'] for hp in host_ports if hp['host_id']) if not defined_hosts: return False, None, map_info elif len(defined_hosts) > 1: raise restclient.APIException(_('More than one host found.')) else: host_id = defined_hosts.pop() mappings = self._get_mappings(host_id) lun_ids = [ m['lunid'] for m in mappings if m['volume']['id'] == vol_id] map_info['host_ports'] = host_ports map_info['mappings'] = mappings map_info['lun_ids'] = lun_ids if not lun_ids: return False, host_id, map_info else: return True, host_id, map_info @coordination.synchronized('ibm-ds8k-{connector[host]}') def initialize_connection(self, vol_id, connector, **kwargs): host = self._get_host(connector) # Find defined host and undefined host ports host_wwpn_set = set(wwpn.upper() for wwpn in connector['wwpns']) host_ports = self._get_host_ports(host_wwpn_set) LOG.debug("host_ports: %s", host_ports) defined_hosts = set( hp['host_id'] for hp in host_ports if hp['host_id']) unknown_ports = host_wwpn_set - set( hp['wwpn'] for hp in host_ports) unconfigured_ports = set( hp['wwpn'] for hp in host_ports if not hp['host_id']) LOG.debug("initialize_connection: defined_hosts: %(defined)s, " "unknown_ports: %(unknown)s, unconfigured_ports: " "%(unconfigured)s.", {"defined": defined_hosts, "unknown": unknown_ports, "unconfigured": unconfigured_ports}) # Create host if it is not defined if not defined_hosts: host_id = self._create_host(host)['id'] elif len(defined_hosts) == 1: host_id = defined_hosts.pop() else: raise restclient.APIException( message='More than one host defined for requested ports.') LOG.info('Volume will be attached to host %s.', host_id) # Create missing host ports if unknown_ports or unconfigured_ports: self._assign_host_port(host_id, list(unknown_ports | unconfigured_ports)) # Map the volume to host lun_id = self._map_volume_to_host(host_id, vol_id) target_ports = [p['wwpn'] for p in self._get_ioports()] return { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': False, 'target_lun': int(lun_id, 16), 'target_wwn': target_ports, 'initiator_target_map': {initiator: target_ports for initiator in connector['wwpns']} } } @coordination.synchronized('ibm-ds8k-{connector[host]}') def terminate_connection(self, volume, vol_id, host_id, connector, map_info): host = self._get_host(connector) host_ports = map_info['host_ports'] lun_ids = map_info['lun_ids'] mappings = map_info['mappings'] delete_ports = set( hp['wwpn'] for hp in host_ports if not hp['host_id']) LOG.debug("terminate_connection: host_ports: %(host)s, " "defined_hosts: %(defined)s, delete_ports: %(delete)s.", {"host": host_ports, "defined": host_id, "delete": delete_ports}) host_name = host.name if (host.name[:7] == "OShost:"): host_name = host.name[7:] attachment_count = 0 if hasattr(volume, 'multiattach') and volume.multiattach: try: attachment_list = volume.volume_attachment for attachment in attachment_list: if (attachment.attach_status == "attached" and attachment.attached_host == host_name): attachment_count += 1 except AttributeError: pass if attachment_count > 1: LOG.info("Volume %(volume)s is attached to multiple " "instances on host %(host)s, hence " "skipping delete host.", {'volume': volume.name, 'host': host.name}) return for lun_id in lun_ids: self._delete_mappings(host_id, lun_id) if not lun_ids: LOG.warning("Volume %(vol)s is already not mapped to " "host %(host)s.", {'vol': vol_id, 'host': host.name}) # if this host only has volumes that have been detached, # remove the host and its ports ret_info = { 'driver_volume_type': 'fibre_channel', 'data': {} } if len(mappings) == len(lun_ids): for port in delete_ports: self._delete_host_ports(port) self._delete_host(host_id) target_ports = [p['wwpn'] for p in self._get_ioports()] target_map = {initiator: target_ports for initiator in connector['wwpns']} ret_info['data']['initiator_target_map'] = target_map return ret_info def create_group(self, group): return {'status': fields.GroupStatus.AVAILABLE} def delete_group(self, group, src_luns): volumes_model_update = [] model_update = {'status': fields.GroupStatus.DELETED} if src_luns: try: self.delete_lun(src_luns) except restclient.APIException as e: model_update['status'] = fields.GroupStatus.ERROR_DELETING LOG.exception( "Failed to delete the volumes in group %(group)s, " "Exception = %(ex)s", {'group': group.id, 'ex': e}) for src_lun in src_luns: volumes_model_update.append({ 'id': src_lun.os_id, 'status': model_update['status'] }) return model_update, volumes_model_update def delete_group_snapshot(self, group_snapshot, tgt_luns): snapshots_model_update = [] model_update = {'status': fields.GroupSnapshotStatus.DELETED} if tgt_luns: try: self.delete_lun(tgt_luns) except restclient.APIException as e: model_update['status'] = ( fields.GroupSnapshotStatus.ERROR_DELETING) LOG.error("Failed to delete snapshots in group snapshot " "%(gsnapshot)s, Exception = %(ex)s", {'gsnapshot': group_snapshot.id, 'ex': e}) for tgt_lun in tgt_luns: snapshots_model_update.append({ 'id': tgt_lun.os_id, 'status': model_update['status'] }) return model_update, snapshots_model_update def _delete_lun(self, lun_ids_str): self._client.send('DELETE', '/volumes', params={'ids': lun_ids_str}) def delete_lun_by_id(self, lun_id): self._client.send('DELETE', '/volumes/%s' % lun_id) def _get_version(self): return self._client.fetchone('GET', '/systems') @proxy.logger def _create_lun(self, volData): return self._client.fetchid('POST', '/volumes', volData) def _get_pool(self, pool_id): return self._client.fetchone('GET', '/pools/%s' % pool_id, fields=['id', 'name', 'node', 'stgtype', 'cap', 'capavail']) def start_flashcopy(self, vol_pairs, freeze=False): options = [ "permit_space_efficient_target", "fail_space_efficient_target_out_of_space" ] if freeze: options.append("freeze_consistency") self._client.send('POST', '/cs/flashcopies', { "volume_pairs": vol_pairs, "options": options }) def get_pprc_paths(self, specific_lss=None): if specific_lss: lss_range = { 'source_lss_id_from': specific_lss, 'source_lss_id_to': specific_lss } else: # get all of PPRC paths between source DS8K and target DS8K. lss_range = { 'source_lss_id_from': '00', 'source_lss_id_to': 'FF' } return self._client.fetchall('GET', '/cs/pprcs/paths', params=lss_range) def get_flashcopy(self, vol_id): return self._client.fetchall('GET', '/volumes/%s/flashcopy' % vol_id) def delete_flashcopy(self, src_lun_id, tgt_lun_id): # no exception if failed self._client.statusok( 'DELETE', '/cs/flashcopies/%s:%s' % (src_lun_id, tgt_lun_id)) def _get_host_ports(self, host_wwpn_set): return self._client.fetchall( 'GET', '/host_ports', params={ 'wwpns': ",".join(host_wwpn_set), 'state': 'logged in,logged out' }, fields=['host_id', 'wwpn']) def _create_host(self, host): return self._client.fetchone( 'POST', '/hosts', {'name': host.name, 'hosttype': host.type}) def _assign_host_port(self, host_id, ports): self._client.send('POST', '/host_ports/assign', { 'host_id': host_id, 'host_port_wwpns': ports}) def _map_volume_to_host(self, host_id, vol_id): return self._client.fetchid( 'POST', '/hosts%5Bid=' + host_id + '%5D/mappings', {'volumes': [vol_id]}) def _get_mappings(self, host_id): return self._client.fetchall( 'GET', '/hosts%5Bid=' + host_id + '%5D/mappings') def _delete_mappings(self, host_id, lun_id): self._client.send( 'DELETE', '/hosts%5Bid=' + host_id + '%5D/mappings/' + lun_id) def _delete_host_ports(self, port): self._client.send('DELETE', '/host_ports/%s' % port) def _delete_host(self, host_id): # delete the host will delete all of the ports belong to it self._client.send('DELETE', '/hosts%5Bid=' + host_id + '%5D') def _get_ioports(self): return self._client.fetchall('GET', '/ioports', fields=['wwpn']) def unfreeze_lss(self, lss_ids): self._client.send( 'POST', '/cs/flashcopies/unfreeze', {"lss_ids": lss_ids}) def get_all_lss(self, fields=None): fields = (fields if fields else ['id', 'type', 'group', 'configvols']) return self._client.fetchall('GET', '/lss', fields=fields) def lun_exists(self, lun_id): return self._client.statusok('GET', '/volumes/%s' % lun_id) def get_lun_pool(self, lun_id): return self._client.fetchone( 'GET', '/volumes/%s' % lun_id, fields=['pool'])['pool'] def change_lun(self, lun_id, param): self._client.send('PUT', '/volumes/%s' % lun_id, param) def get_physical_links(self, target_id): return self._client.fetchall( 'GET', '/cs/pprcs/physical_links', params={ 'target_system_wwnn': target_id, 'source_lss_id': 00, 'target_lss_id': 00 }) def get_systems(self): return self._client.fetchone( 'GET', '/systems', fields=['id', 'wwnn', 'release']) def get_lun_number_in_lss(self, lss_id): return int(self._client.fetchone( 'GET', '/lss/%s' % lss_id, fields=['configvols'])['configvols']) def create_pprc_path(self, pathData): self._client.send('POST', '/cs/pprcs/paths', pathData) def get_pprc_path(self, path_id): return self._client.fetchone( 'GET', '/cs/pprcs/paths/%s' % path_id, fields=['port_pairs']) def delete_pprc_path(self, path_id): self._client.send('DELETE', '/cs/pprcs/paths/%s' % path_id) def create_pprc_pair(self, pair_data): self._client.send('POST', '/cs/pprcs', pair_data) def delete_pprc_pair_by_pair_id(self, pids): self._client.statusok('DELETE', '/cs/pprcs', params=pids) def do_failback(self, pair_data): self._client.send('POST', '/cs/pprcs/resume', pair_data) def get_pprc_pairs(self, min_vol_id, max_vol_id): return self._client.fetchall( 'GET', '/cs/pprcs', params={ 'volume_id_from': min_vol_id, 'volume_id_to': max_vol_id }) def delete_pprc_pair(self, vol_id): # check pprc pairs exist or not. if not self.get_pprc_pairs(vol_id, vol_id): return None # don't use pprc pair ID to delete it, because it may have # communication issues. pair_data = { 'volume_full_ids': [{ 'volume_id': vol_id, 'system_id': self.backend['storage_unit'] }], 'options': ['unconditional', 'issue_source'] } self._client.send('POST', '/cs/pprcs/delete', pair_data) def pause_pprc_pairs(self, pprc_pair_ids): pair_data = {'pprc_ids': pprc_pair_ids} self._client.send('POST', '/cs/pprcs/pause', pair_data) def resume_pprc_pairs(self, pprc_pair_ids): pair_data = { 'pprc_ids': pprc_pair_ids, 'type': 'metro_mirror', 'options': ['permit_space_efficient_target', 'initial_copy_out_of_sync'] } self._client.send('POST', '/cs/pprcs/resume', pair_data) class DS8KReplicationSourceHelper(DS8KCommonHelper): """Manage source storage for replication.""" @proxy.logger def find_pool_and_lss(self, excluded_lss=None): for pool_id, pool in self._storage_pools.items(): lss = self._find_lss_for_type_replication(pool['node'], excluded_lss) if lss: return pool_id, lss raise restclient.LssIDExhaustError( message=_("All LSS/LCU IDs for configured pools are exhausted.")) @proxy.logger def _find_lss_for_type_replication(self, node, excluded_lss): # prefer to choose non-existing one first. existing_lss = self.get_all_lss() LOG.info("existing LSS IDs are %s", ','.join([lss['id'] for lss in existing_lss])) existing_lss_cg, nonexistent_lss_cg = ( self._classify_lss_for_cg(existing_lss)) lss_id = self._find_from_nonexistent_lss(node, existing_lss, nonexistent_lss_cg) if not lss_id: if excluded_lss: existing_lss = [lss for lss in existing_lss if lss['id'] not in excluded_lss] candidates = [lss for lss in existing_lss if lss['id'] not in existing_lss_cg] lss_id = self._find_from_existing_lss(node, candidates) return lss_id class DS8KReplicationTargetHelper(DS8KReplicationSourceHelper): """Manage target storage for replication.""" OPTIONAL_PARAMS = ['ds8k_host_type', 'port_pairs', 'lss_range_for_cg'] def setup(self): self._create_client() self._get_storage_information() self._get_replication_information() self._check_host_type() self.backend['lss_ids_for_cg'] = self._get_lss_ids_for_cg() self.backend['pools_str'] = self._get_value( 'san_clustername').replace('_', ',') self._storage_pools = self.get_pools() self.verify_pools(self._storage_pools) self._verify_rest_version() def _get_replication_information(self): port_pairs = [] pairs = self._get_value('port_pairs') if pairs: for pair in pairs.replace(' ', '').upper().split(';'): pair = pair.split('-') port_pair = { 'source_port_id': pair[0], 'target_port_id': pair[1] } port_pairs.append(port_pair) self.backend['port_pairs'] = port_pairs self.backend['id'] = self._get_value('backend_id') def create_lun(self, lun): volData = { 'cap': self._gb2b(lun.size), 'captype': 'bytes', 'stgtype': 'fb', 'tp': 'ese' if lun.type_thin else 'none' } lun.data_type = lun.data_type if lun.data_type else 'FB 512' if lun.type_os400: volData['os400'] = lun.type_os400 volData['name'] = lun.replica_ds_name volData['pool'], volData['lss'] = lun.pool_lss_pair['target'] volID = self._create_lun(volData) lun.replication_driver_data.update( {self.backend['id']: {'vol_hex_id': volID}}) return lun def delete_pprc_pair(self, vol_id): if not self.get_pprc_pairs(vol_id, vol_id): return None pair_data = { 'volume_full_ids': [{ 'volume_id': vol_id, 'system_id': self.backend['storage_unit'] }], 'options': ['unconditional', 'issue_target'] } self._client.send('POST', '/cs/pprcs/delete', pair_data) class DS8KECKDHelper(DS8KCommonHelper): """Manage ECKD volume.""" OPTIONAL_PARAMS = ['ds8k_host_type', 'port_pairs', 'ds8k_ssid_prefix', 'lss_range_for_cg'] # if use new REST API, please update the version below VALID_REST_VERSION_87_51_MIN = '87.51.63.0' VALID_REST_VERSION_88_20_MIN = '88.20.112.0' MIN_VALID_STORAGE_VERSION = '8.1' INVALID_STORAGE_VERSION = '8.0.1' @staticmethod def _gb2cyl(gb): # now only support 3390, no 3380 or 3390-A cyl = int(math.ceil(gb * 1263.28)) if cyl > 65520: raise exception.VolumeDriverException( message=(_("For 3390 volume, capacity can be in the range " "1-65520(849KiB to 55.68GiB) cylinders, now it " "is %(gb)d GiB, equals to %(cyl)d cylinders.") % {'gb': gb, 'cyl': cyl})) return cyl @staticmethod def _cyl2b(cyl): return cyl * 849960 def _get_cula(self, lcu): return self.backend['device_mapping'][lcu] def disable_thin_provision(self): self._disable_thin_provision = True def setup(self): self._create_client() self._get_storage_information() self._check_host_type() self.backend['lss_ids_for_cg'] = self._get_lss_ids_for_cg() self.backend['pools_str'] = self._get_value('san_clustername') self._storage_pools = self.get_pools() self.verify_pools(self._storage_pools) ssid_prefix = self._get_value('ds8k_ssid_prefix') self.backend['ssid_prefix'] = ssid_prefix if ssid_prefix else 'FF' self.backend['device_mapping'] = self._get_device_mapping() self._verify_rest_version() def _verify_rest_version(self): if self.backend['storage_version'] == self.INVALID_STORAGE_VERSION: raise exception.VolumeDriverException( message=(_("%s does not support bulk deletion of volumes, " "if you want to use this version of driver, " "please upgrade the CCL.") % self.INVALID_STORAGE_VERSION)) # DS8K supports ECKD ESE volume from 8.1 if (dist_version.parse(self.backend['storage_version']) < dist_version.parse(self.MIN_VALID_STORAGE_VERSION)): self._disable_thin_provision = True rest_ver = self.backend['rest_version'][0:2] if (('87' == rest_ver and dist_version.parse(self.backend['rest_version']) < dist_version.parse(self.VALID_REST_VERSION_87_51_MIN)) or ('88' == rest_ver and dist_version.parse(self.backend['rest_version']) < dist_version.parse(self.VALID_REST_VERSION_88_20_MIN))): raise exception.VolumeDriverException( message=(_("REST version %(invalid)s is lower than " "%(valid)s, please upgrade it in DS8K.") % {'invalid': self.backend['rest_version'], 'valid': (self.VALID_REST_VERSION_87_51_MIN if '87' == rest_ver else self.VALID_REST_VERSION_88_20_MIN)})) @proxy.logger def _get_device_mapping(self): map_str = self._get_value('ds8k_devadd_unitadd_mapping') mappings = map_str.replace(' ', '').upper().split(';') pairs = [m.split('-') for m in mappings] self.verify_lss_ids(','.join([p[1] for p in pairs])) return {p[1]: int(p[0], 16) for p in pairs} @proxy.logger def verify_lss_ids(self, specified_lcu_ids): if not specified_lcu_ids: return None lcu_ids = specified_lcu_ids.upper().replace(' ', '').split(',') # verify the LCU ID. for lcu in lcu_ids: if int(lcu, 16) > 255: raise exception.InvalidParameterValue( err=_('LCU %s should be within 00-FF.') % lcu) # verify address group self._existing_lss = self.get_all_lss() fb_addrgrps = set(int(lss['id'], 16) // 16 for lss in self._existing_lss if lss['type'] == 'fb') ckd_addrgrps = set((int(lcu, 16) // 16) for lcu in lcu_ids) intersection = ckd_addrgrps & fb_addrgrps if intersection: raise exception.VolumeDriverException( message=_('LCUs in the address group %s are reserved ' 'for FB volumes') % list(intersection)) # create LCU that doesn't exist nonexistent_lcu = set(lcu_ids) - set( lss['id'] for lss in self._existing_lss if lss['type'] == 'ckd') if nonexistent_lcu: LOG.info('LCUs %s do not exist in DS8K, they will be ' 'created.', ','.join(nonexistent_lcu)) for lcu in nonexistent_lcu: try: self._create_lcu(self.backend['ssid_prefix'], lcu) except restclient.APIException as e: raise exception.VolumeDriverException( message=(_('Can not create lcu %(lcu)s, ' 'Exception = %(e)s.') % {'lcu': lcu, 'e': str(e)})) return lcu_ids def _format_pools(self, pools): return ((p['id'], { 'name': p['name'], 'node': int(p['node']), 'stgtype': p['stgtype'], 'cap': self._cyl2b(int(p['cap'])), 'capavail': self._cyl2b(int(p['capavail'])) }) for p in pools) @proxy.logger def find_pool_and_lss(self, excluded_lss=None): return self.find_biggest_pool_and_lss(excluded_lss) @proxy.logger def _find_lss(self, node, excluded_lcu, specified_lcu_ids=None): # all LCUs have existed, unlike LSS. if specified_lcu_ids: for lcu_id in specified_lcu_ids: if lcu_id not in self.backend['device_mapping'].keys(): raise exception.InvalidParameterValue( err=_("LCU %s is not in parameter " "ds8k_devadd_unitadd_mapping, " "Please specify LCU in it, otherwise " "driver can not attach volume.") % lcu_id) all_lss = self._existing_lss else: all_lss = self.get_all_lss() existing_lcu = [lcu for lcu in all_lss if lcu['type'] == 'ckd' and lcu['id'] in self.backend['device_mapping'].keys() and lcu['group'] == str(node)] LOG.info("All appropriate LCUs are %s.", ','.join([lcu['id'] for lcu in existing_lcu])) # exclude full LCUs. if excluded_lcu: existing_lcu = [lcu for lcu in existing_lcu if lcu['id'] not in excluded_lcu] if not existing_lcu: LOG.info("All appropriate LCUs are full.") return None ignore_pprc = False if specified_lcu_ids: # user specify LCUs in extra-specs. existing_lcu = [lcu for lcu in existing_lcu if lcu['id'] in specified_lcu_ids] ignore_pprc = True # exclude LCUs reserved for CG. existing_lcu = [lcu for lcu in existing_lcu if lcu['id'] not in self.backend['lss_ids_for_cg']] if not existing_lcu: LOG.info("All appropriate LCUs have been reserved for " "for consistency group.") return None if not ignore_pprc: # prefer to use LCU that is not in PPRC path first. lcu_pprc = self.get_lss_in_pprc_paths() & set( self.backend['device_mapping'].keys()) if lcu_pprc: lcu_non_pprc = [ lcu for lcu in existing_lcu if lcu['id'] not in lcu_pprc] if lcu_non_pprc: existing_lcu = lcu_non_pprc # return LCU which has max number of empty slots. emptiest_lcu = sorted( existing_lcu, key=lambda i: int(i['configvols']))[0] if int(emptiest_lcu['configvols']) == LSS_VOL_SLOTS: return None else: return emptiest_lcu['id'] def _create_lcu(self, ssid_prefix, lcu): self._client.send('POST', '/lss', { 'id': lcu, 'type': 'ckd', 'sub_system_identifier': ssid_prefix + lcu }) def create_lun(self, lun): volData = { 'cap': self._gb2cyl(lun.size), 'captype': 'cyl', 'stgtype': 'ckd', 'tp': 'ese' if lun.type_thin else 'none' } lun.data_type = '3390' volData['name'] = lun.ds_name volData['pool'], volData['lss'] = lun.pool_lss_pair['source'] lun.ds_id = self._create_lun(volData) return lun def initialize_connection(self, vol_id, connector, **kwargs): return { 'driver_volume_type': 'fibre_channel_eckd', 'data': { 'target_discovered': True, 'cula': self._get_cula(vol_id[0:2]), 'unit_address': int(vol_id[2:4], 16), 'discard': False } } def terminate_connection(self, volume, vol_id, connector, force, **kwargs): return None class DS8KReplicationTargetECKDHelper(DS8KECKDHelper, DS8KReplicationTargetHelper): """Manage ECKD volume in replication target.""" def setup(self): self._create_client() self._get_storage_information() self._get_replication_information() self._check_host_type() self.backend['lss_ids_for_cg'] = self._get_lss_ids_for_cg() self.backend['pools_str'] = self._get_value( 'san_clustername').replace('_', ',') self._storage_pools = self.get_pools() self.verify_pools(self._storage_pools) ssid_prefix = self._get_value('ds8k_ssid_prefix') self.backend['ssid_prefix'] = ssid_prefix if ssid_prefix else 'FF' self.backend['device_mapping'] = self._get_device_mapping() self._verify_rest_version() def create_lun(self, lun): volData = { 'cap': self._gb2cyl(lun.size), 'captype': 'cyl', 'stgtype': 'ckd', 'tp': 'ese' if lun.type_thin else 'none' } lun.data_type = '3390' volData['name'] = lun.replica_ds_name volData['pool'], volData['lss'] = lun.pool_lss_pair['target'] volID = self._create_lun(volData) lun.replication_driver_data.update( {self.backend['id']: {'vol_hex_id': volID}}) return lun ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py0000664000175000017500000022464000000000000025056 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """This is the driver that allows openstack to talk to DS8K. All volumes are thin provisioned by default, if the machine is licensed for it. This can be overridden by creating a volume type and specifying a key like so: .. code:: console #> cinder type-create my_type #> cinder type-key my_type set drivers:thin_provision=False #> cinder create --volume-type my_type 123 Sample settings for cinder.conf: .. code:: ini enabled_backends = ibm_ds8k_1, ibm_ds8k_2 [ibm_ds8k_1] proxy = cinder.volume.drivers.ibm.ibm_storage.ds8k_proxy.DS8KProxy volume_backend_name = ibm_ds8k_1 san_clustername = P2,P3 san_password = actual_password san_login = actual_username san_ip = foo.com volume_driver = cinder.volume.drivers.ibm.ibm_storage.ibm_storage.IBMStorageDriver chap = disabled connection_type = fibre_channel replication_device = connection_type: fibre_channel, backend_id: bar, san_ip: bar.com, san_login: actual_username, san_password: actual_password, san_clustername: P4, port_pairs: I0236-I0306; I0237-I0307 [ibm_ds8k_2] proxy = cinder.volume.drivers.ibm.ibm_storage.ds8k_proxy.DS8KProxy volume_backend_name = ibm_ds8k_2 san_clustername = P4,P5 san_password = actual_password san_login = actual_username san_ip = bar.com volume_driver = cinder.volume.drivers.ibm.ibm_storage.ibm_storage.IBMStorageDriver chap = disabled connection_type = fibre_channel """ import ast import json import eventlet from oslo_config import cfg from oslo_log import log as logging from cinder import context from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.volume import configuration import cinder.volume.drivers.ibm.ibm_storage as storage from cinder.volume.drivers.ibm.ibm_storage import ( ds8k_replication as replication) from cinder.volume.drivers.ibm.ibm_storage import ds8k_helper as helper from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient from cinder.volume.drivers.ibm.ibm_storage import proxy from cinder.volume.drivers.ibm.ibm_storage import strings from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) VALID_OS400_VOLUME_TYPES = { 'A01': 8, 'A02': 17, 'A04': 66, 'A05': 33, 'A06': 132, 'A07': 263, 'A81': 8, 'A82': 17, 'A84': 66, 'A85': 33, 'A86': 132, 'A87': 263, '050': '', '099': '' } EXTRA_SPECS_DEFAULTS = { 'thin': True, 'replication_enabled': False, 'consistency': False, 'os400': '', 'storage_pool_ids': '', 'storage_lss_ids': '', 'async_clone': False, 'multiattach': False } ds8k_opts = [ cfg.StrOpt( 'ds8k_devadd_unitadd_mapping', default='', help='Mapping between IODevice address and unit address.'), cfg.StrOpt( 'ds8k_ssid_prefix', default='FF', help='Set the first two digits of SSID.'), cfg.StrOpt( 'lss_range_for_cg', default='', help='Reserve LSSs for consistency group.'), cfg.StrOpt( 'ds8k_host_type', default='auto', help='Set to zLinux if your OpenStack version is prior to ' 'Liberty and you\'re connecting to zLinux systems. ' 'Otherwise set to auto. Valid values for this parameter ' 'are: %s.' % str(helper.VALID_HOST_TYPES)[1:-1]) ] CONF = cfg.CONF CONF.register_opts(ds8k_opts, group=configuration.SHARED_CONF_GROUP) class Lun(object): """provide volume information for driver from volume db object. Version history: .. code-block:: none 1.0.0 - initial revision. 2.1.0 - Added support for specify pool and lss, also improve the code. 2.1.1 - Added support for replication consistency group. 2.1.2 - Added support for cloning volume asynchronously. 2.3.0 - Added support for reporting backend state. 2.5.0 - Added support for revert to snapshot operation. """ VERSION = "2.5.0" class FakeLun(object): def __init__(self, lun, **overrides): self.size = lun.size self.os_id = lun.os_id self.cinder_name = lun.cinder_name self.is_snapshot = lun.is_snapshot self.ds_name = lun.ds_name self.ds_id = lun.ds_id self.type_thin = lun.type_thin self.type_os400 = lun.type_os400 self.data_type = lun.data_type self.type_replication = lun.type_replication self.group = lun.group self.specified_pool = lun.specified_pool self.specified_lss = lun.specified_lss self.async_clone = lun.async_clone self.multiattach = lun.multiattach self.status = lun.status if not self.is_snapshot: self.replica_ds_name = lun.replica_ds_name self.replication_driver_data = ( lun.replication_driver_data.copy()) self.replication_status = lun.replication_status self.pool_lss_pair = lun.pool_lss_pair def update_volume(self, lun): lun.data_type = self.data_type volume_update = lun.get_volume_update() volume_update['provider_location'] = str({ 'vol_hex_id': self.ds_id}) if self.type_replication: volume_update['replication_driver_data'] = json.dumps( self.replication_driver_data) volume_update['metadata']['replication'] = str( self.replication_driver_data) else: volume_update.pop('replication_driver_data', None) volume_update['metadata'].pop('replication', None) volume_update['metadata']['vol_hex_id'] = self.ds_id volume_update['multiattach'] = self.multiattach return volume_update def __init__(self, volume, is_snapshot=False): volume_type_id = volume.get('volume_type_id') self.specs = volume_types.get_volume_type_extra_specs( volume_type_id) if volume_type_id else {} os400 = self.specs.get( 'drivers:os400', EXTRA_SPECS_DEFAULTS['os400'] ).strip().upper() self.type_thin = self.specs.get( 'drivers:thin_provision', '%s' % EXTRA_SPECS_DEFAULTS['thin'] ).upper() == 'TRUE' self.type_replication = self.specs.get( 'replication_enabled', ' %s' % EXTRA_SPECS_DEFAULTS['replication_enabled'] ).upper() == strings.METADATA_IS_TRUE self.specified_pool = self.specs.get( 'drivers:storage_pool_ids', EXTRA_SPECS_DEFAULTS['storage_pool_ids'] ) self.specified_lss = self.specs.get( 'drivers:storage_lss_ids', EXTRA_SPECS_DEFAULTS['storage_lss_ids'] ) self.multiattach = self.specs.get( 'multiattach', ' %s' % EXTRA_SPECS_DEFAULTS['multiattach'] ).upper() == strings.METADATA_IS_TRUE if volume.provider_location: provider_location = ast.literal_eval(volume.provider_location) self.ds_id = provider_location['vol_hex_id'] else: self.ds_id = None self.cinder_name = volume.name self.pool_lss_pair = {} self.is_snapshot = is_snapshot if self.is_snapshot: self.group = (Group(volume.group_snapshot, True) if volume.group_snapshot else None) self.size = volume.volume_size # ds8k supports at most 16 chars self.ds_name = helper.filter_alnum(self.cinder_name)[:16] self.metadata = self._get_snapshot_metadata(volume) self.source_volid = volume.volume_id else: self.group = Group(volume.group) if volume.group else None self.size = volume.size self.ds_name = helper.filter_alnum(self.cinder_name)[:16] self.replica_ds_name = helper.filter_alnum(self.cinder_name)[:16] self.previous_status = volume.previous_status self.replication_status = volume.replication_status self.replication_driver_data = ( json.loads(volume.replication_driver_data) if volume.replication_driver_data else {}) if self.replication_driver_data: # now only support one replication target. replication_target = sorted( self.replication_driver_data.values())[0] self.replica_ds_id = replication_target['vol_hex_id'] self.pool_lss_pair = { 'source': (None, self.ds_id[0:2]), 'target': (None, self.replica_ds_id[0:2]) } # Don't use self.replication_status to judge if volume has # been failed over or not, because when user fail over a # group, replication_status of each volume in group is # failing over. self.failed_over = (True if 'default' in self.replication_driver_data.keys() else False) else: self.failed_over = False self.metadata = self._get_volume_metadata(volume) self.source_volid = volume.source_volid self.async_clone = self.metadata.get( 'async_clone', '%s' % EXTRA_SPECS_DEFAULTS['async_clone'] ).upper() == 'TRUE' if os400: if os400 not in VALID_OS400_VOLUME_TYPES.keys(): raise restclient.APIException( data=(_("The OS400 volume type provided, %s, is not " "a valid volume type.") % os400)) self.type_os400 = os400 if os400 not in ['050', '099']: self.size = VALID_OS400_VOLUME_TYPES[os400] else: self.type_os400 = EXTRA_SPECS_DEFAULTS['os400'] self.data_type = self._create_datatype(self.type_os400) self.os_id = volume.id self.status = volume.status self.volume = volume def _get_volume_metadata(self, volume): if 'volume_metadata' in volume: metadata = volume.volume_metadata return {m['key']: m['value'] for m in metadata} if 'metadata' in volume: return volume.metadata return {} def _get_snapshot_metadata(self, snapshot): if 'snapshot_metadata' in snapshot: metadata = snapshot.snapshot_metadata return {m['key']: m['value'] for m in metadata} if 'metadata' in snapshot: return snapshot.metadata return {} def shallow_copy(self, **overrides): return Lun.FakeLun(self, **overrides) def _create_datatype(self, t): if t[0:2] == 'A0': datatype = t + ' FB 520P' elif t[0:2] == 'A8': datatype = t + ' FB 520U' elif t == '050': datatype = t + ' FB 520UV' elif t == '099': datatype = t + ' FB 520PV' else: datatype = None return datatype # Note: updating metadata in vol related funcs deletes all prior metadata def get_volume_update(self): volume_update = {} volume_update['provider_location'] = str( {'vol_hex_id': self.ds_id}) # update metadata if not self.is_snapshot: if self.type_replication: self.metadata['replication'] = str( self.replication_driver_data) else: self.metadata.pop('replication', None) volume_update['replication_driver_data'] = json.dumps( self.replication_driver_data) volume_update['replication_status'] = ( self.replication_status or fields.ReplicationStatus.NOT_CAPABLE) volume_update['multiattach'] = self.multiattach self.metadata['data_type'] = (self.data_type or self.metadata['data_type']) self.metadata['vol_hex_id'] = self.ds_id volume_update['metadata'] = self.metadata # need to update volume size for OS400 if self.type_os400: volume_update['size'] = self.size return volume_update class Group(object): """provide group information for driver from group db object.""" def __init__(self, group, is_snapshot=False): self.id = group.id self.host = group.host self.consisgroup_snapshot_enabled = ( volume_utils.is_group_a_cg_snapshot_type(group)) self.group_replication_enabled = ( volume_utils.is_group_a_type( group, "group_replication_enabled")) self.consisgroup_replication_enabled = ( volume_utils.is_group_a_type( group, "consistent_group_replication_enabled")) if is_snapshot: self.snapshots = group.snapshots else: self.failed_over = ( group.replication_status == fields.ReplicationStatus.FAILED_OVER) # create_volume needs to check volumes in the group, # so get it from volume.group object. self.volumes = group.volumes class DS8KProxy(proxy.IBMStorageProxy): prefix = "[IBM DS8K STORAGE]:" def __init__(self, storage_info, logger, exception, driver, active_backend_id=None, HTTPConnectorObject=None, host=None): proxy.IBMStorageProxy.__init__( self, storage_info, logger, exception, driver, active_backend_id) self._helper = None self._replication = None self._connector_obj = HTTPConnectorObject self._host = host self._replication_enabled = False self._active_backend_id = active_backend_id self.configuration = driver.configuration self.configuration.append_config_values(ds8k_opts) # TODO(jiamin): this cache is used to handle concurrency issue, but it # hurts HA, we will find whether is it possible to store it in storage. self.consisgroup_cache = {} @proxy._trace_time def setup(self, ctxt): LOG.info("Initiating connection to IBM DS8K storage system.") connection_type = self.configuration.safe_get('connection_type') replication_devices = self.configuration.safe_get('replication_device') if connection_type == storage.XIV_CONNECTION_TYPE_FC: if not replication_devices: self._helper = helper.DS8KCommonHelper(self.configuration, self._connector_obj) else: self._helper = ( helper.DS8KReplicationSourceHelper(self.configuration, self._connector_obj)) elif connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD: self._helper = helper.DS8KECKDHelper(self.configuration, self._connector_obj) else: raise exception.InvalidParameterValue( err=(_("Param [connection_type] %s is invalid.") % connection_type)) if replication_devices: self._do_replication_setup(replication_devices, self._helper) # checking volumes which are still in clone process. self._check_async_cloned_volumes() @proxy.logger def _check_async_cloned_volumes(self): ctxt = context.get_admin_context() volumes = objects.VolumeList.get_all_by_host(ctxt, self._host) src_luns = [] tgt_luns = [] for volume in volumes: tgt_lun = Lun(volume) if tgt_lun.metadata.get('flashcopy') == 'started': try: src_vol = objects.Volume.get_by_id( ctxt, tgt_lun.source_volid) except exception.VolumeNotFound: LOG.error("Failed to get source volume %(src)s for " "target volume %(tgt)s", {'src': tgt_lun.source_volid, 'tgt': tgt_lun.ds_id}) else: src_luns.append(Lun(src_vol)) tgt_luns.append(tgt_lun) if src_luns and tgt_luns: eventlet.spawn(self._wait_flashcopy, src_luns, tgt_luns) @proxy.logger def _do_replication_setup(self, devices, src_helper): if len(devices) >= 2: raise exception.InvalidParameterValue( err=_("Param [replication_device] is invalid, Driver " "support only one replication target.")) self._replication = replication.Replication(src_helper, devices[0]) self._replication.check_physical_links() self._replication.check_connection_type() if self._active_backend_id: self._replication.switch_source_and_target_client() self._replication_enabled = True @staticmethod def _b2gb(b): return b // (2 ** 30) @proxy._trace_time def _update_stats(self): if self._helper: storage_pools = self._helper.get_pools() else: raise exception.VolumeDriverException( message=(_('Backend %s is not initialized.') % self.configuration.volume_backend_name)) stats = { "volume_backend_name": self.configuration.volume_backend_name, "serial_number": self._helper.backend['storage_unit'], "reserved_percentage": self.configuration.reserved_percentage, "consistent_group_snapshot_enabled": True, "group_replication_enabled": True, "consistent_group_replication_enabled": True, "multiattach": True, "vendor_name": 'IBM', "driver_version": self.full_version, "storage_protocol": self._helper.get_connection_type(), "extent_pools": 'None', "total_capacity_gb": 0, "free_capacity_gb": 0, "backend_state": 'up' } if not len(storage_pools): msg = _('No pools found - make sure san_clustername ' 'is defined in the config file and that the ' 'pools exist on the storage.') LOG.error(msg) stats.update({ "extent_pools": 'None', "total_capacity_gb": 0, "free_capacity_gb": 0, "backend_state": 'down' }) else: self._helper.update_storage_pools(storage_pools) stats.update({ "extent_pools": ','.join(p for p in storage_pools.keys()), "total_capacity_gb": self._b2gb( sum(p['cap'] for p in storage_pools.values())), "free_capacity_gb": self._b2gb( sum(p['capavail'] for p in storage_pools.values())), "backend_state": 'up' }) if self._replication_enabled: stats['replication_enabled'] = self._replication_enabled self.meta['stat'] = stats def _assert(self, assert_condition, exception_message=''): if not assert_condition: LOG.error(exception_message) raise exception.VolumeDriverException(message=exception_message) @proxy.logger def _create_lun_helper(self, lun, pool=None, find_new_pid=True): connection_type = self._helper.get_connection_type() if connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD: if lun.type_thin: if self._helper.get_thin_provision(): msg = (_("Backend %s can not support ECKD ESE volume.") % self._helper.backend['storage_unit']) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if lun.type_replication: target_helper = self._replication.get_target_helper() # PPRC can not copy from ESE volume to standard volume # or vice versa. if target_helper.get_thin_provision(): msg = (_("Secondary storage %s can not support ECKD " "ESE volume.") % target_helper.backend['storage_unit']) LOG.error(msg) raise exception.VolumeDriverException(message=msg) # There is a time gap between find available LSS slot and # lun actually occupies it. excluded_lss = set() while True: try: if lun.specified_pool or lun.specified_lss: lun.pool_lss_pair = { 'source': self._find_pool_lss_pair_from_spec( lun, excluded_lss)} elif lun.group and (lun.group.consisgroup_snapshot_enabled or lun.group.consisgroup_replication_enabled): lun.pool_lss_pair = ( self._find_pool_lss_pair_for_cg(lun, excluded_lss)) else: if lun.type_replication and not lun.is_snapshot: lun.pool_lss_pair = ( self._replication.find_pool_lss_pair( excluded_lss)) else: lun.pool_lss_pair = { 'source': self._helper.find_pool_lss_pair( pool, find_new_pid, excluded_lss)} return self._helper.create_lun(lun) except restclient.LssFullException: excluded_lss.add(lun.pool_lss_pair['source'][1]) if lun.group and (lun.group.consisgroup_snapshot_enabled or lun.group.consisgroup_replication_enabled): msg = _("The reserve LSS for CG is full. " "Volume can not be created on it.") LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: LOG.warning("LSS %s is full, find another one.", lun.pool_lss_pair['source'][1]) def _find_pool_lss_pair_from_spec(self, lun, excluded_lss): if lun.group and (lun.group.consisgroup_snapshot_enabled or lun.group.consisgroup_replication_enabled): msg = _("No support for specifying pool or lss for " "volumes that belong to consistency group.") LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: pool, lss = self._helper.find_biggest_pool_and_lss( excluded_lss, (lun.specified_pool, lun.specified_lss)) return (pool, lss) @coordination.synchronized('{self.prefix}-consistency-group') def _find_pool_lss_pair_for_cg(self, lun, excluded_lss): # NOTE: a group may have multiple LSSs. lss_pairs_in_cache = self.consisgroup_cache.get(lun.group.id, set()) if not lss_pairs_in_cache: lss_pairs_in_group = self._get_lss_pairs_in_group(lun.group, lun.is_snapshot) LOG.debug("LSSs used by group %(grp)s are %(lss_pair)s.", {'grp': lun.group.id, 'lss_pair': lss_pairs_in_group}) available_lss_pairs = set(pair for pair in lss_pairs_in_group if pair[0] != excluded_lss) else: available_lss_pairs = set(pair for pair in lss_pairs_in_cache if pair[0] != excluded_lss) if not available_lss_pairs: available_lss_pairs = self._find_lss_pair_for_cg(lun.group, excluded_lss, lun.is_snapshot) pool_lss_pair, lss_pair = self._find_pool_for_lss(available_lss_pairs) if pool_lss_pair: lss_pairs_in_cache.add(lss_pair) self.consisgroup_cache[lun.group.id] = lss_pairs_in_cache else: raise exception.VolumeDriverException( message=(_('There are still some available LSSs %s for CG, ' 'but they are not in the same node as pool.') % available_lss_pairs)) return pool_lss_pair def _get_lss_pairs_in_group(self, group, is_snapshot=False): lss_pairs_in_group = set() if is_snapshot: luns = [Lun(snapshot, is_snapshot=True) for snapshot in group.snapshots] else: luns = [Lun(volume) for volume in group.volumes] if group.consisgroup_replication_enabled and not is_snapshot: lss_pairs_in_group = set((lun.ds_id[:2], lun.replica_ds_id[:2]) for lun in luns if lun.ds_id and lun.replica_ds_id) else: lss_pairs_in_group = set((lun.ds_id[:2], None) for lun in luns if lun.ds_id) return lss_pairs_in_group def _find_lss_pair_for_cg(self, group, excluded_lss, is_snapshot): lss_pairs_used = set() ctxt = context.get_admin_context() filters_groups = {'host': group.host, 'status': 'available'} groups = objects.GroupList.get_all(ctxt, filters=filters_groups) for grp in groups: grp = Group(grp) if (grp.consisgroup_snapshot_enabled or grp.consisgroup_replication_enabled): lss_pairs_used |= self._get_lss_pairs_in_group(grp) filters_group_snapshots = {'status': 'available'} group_snapshots = objects.GroupSnapshotList.get_all_by_group( ctxt, grp.id, filters=filters_group_snapshots) for sgrp in group_snapshots: sgrp = Group(sgrp, True) if (sgrp.consisgroup_snapshot_enabled or sgrp.consisgroup_replication_enabled): lss_pairs_used |= self._get_lss_pairs_in_group(sgrp, True) # in order to keep one-to-one pprc mapping relationship, zip LSSs # which reserved by user. if not is_snapshot: if group.consisgroup_replication_enabled: target_helper = self._replication.get_target_helper() source_lss_for_cg = self._helper.backend['lss_ids_for_cg'] target_lss_for_cg = target_helper.backend['lss_ids_for_cg'] available_lss_pairs = zip(source_lss_for_cg, target_lss_for_cg) else: available_lss_pairs = [(lss, None) for lss in self._helper.backend['lss_ids_for_cg']] source_lss_used = set() for lss_pair in lss_pairs_used: source_lss_used.add(lss_pair[0]) # in concurrency case, lss may be reversed in cache but the group # has not been committed into DB. for lss_pairs_set in self.consisgroup_cache.values(): source_lss_used |= set( lss_pair[0] for lss_pair in lss_pairs_set) available_lss_pairs = [lss_pair for lss_pair in available_lss_pairs if lss_pair[0] not in source_lss_used] self._assert(available_lss_pairs, "All LSSs reserved for CG have been used out, " "please reserve more LSS for CG if there are still " "some empty LSSs left.") else: available_lss_pairs = set() excluded_lss |= lss_pairs_used for node in (0, 1): available_lss_pairs |= {(self._helper._find_lss( node, excluded_lss), None)} if not available_lss_pairs: raise restclient.LssIDExhaustError( message=_('All LSS/LCU IDs for configured pools ' 'on storage are exhausted.')) LOG.debug('_find_lss_pair_for_cg: available LSSs for consistency ' 'group are %s', available_lss_pairs) return available_lss_pairs @proxy.logger def _find_pool_for_lss(self, available_lss_pairs): # all LSS pairs have target LSS or do not have. for src_lss, tgt_lss in available_lss_pairs: src_pid = self._helper.get_pool(src_lss) if not src_pid: continue if tgt_lss: target_helper = self._replication.get_target_helper() tgt_pid = target_helper.get_pool(tgt_lss) if tgt_pid: return ({'source': (src_pid, src_lss), 'target': (tgt_pid, tgt_lss)}, (src_lss, tgt_lss)) else: return {'source': (src_pid, src_lss)}, (src_lss, tgt_lss) raise exception.VolumeDriverException( message=(_("Can not find pool for LSSs %s.") % available_lss_pairs)) @proxy.logger def _clone_lun(self, src_lun, tgt_lun): self._assert(src_lun.size <= tgt_lun.size, _('Target volume should be bigger or equal ' 'to the Source volume in size.')) self._ensure_vol_not_fc_target(src_lun.ds_id) # image volume cache brings two cases for clone lun: # 1. volume ID of src_lun and tgt_lun will be the same one because # _clone_image_volume does not pop the provider_location. # 2. if creating image volume failed at the first time, tgt_lun will be # deleted, so when it is sent to driver again, it will not exist. if (tgt_lun.ds_id is None or src_lun.ds_id == tgt_lun.ds_id or not self._helper.lun_exists(tgt_lun.ds_id)): # It is a preferred practice to locate the FlashCopy target # volume on the same DS8000 server as the FlashCopy source volume. pool = self._helper.get_pool(src_lun.ds_id[0:2]) # flashcopy to larger target only works with thick vols, so we # emulate for thin by extending after copy if tgt_lun.type_thin and tgt_lun.size > src_lun.size: tmp_size = tgt_lun.size tgt_lun.size = src_lun.size self._create_lun_helper(tgt_lun, pool) tgt_lun.size = tmp_size else: self._create_lun_helper(tgt_lun, pool) else: self._assert( src_lun.size == tgt_lun.size, _('When target volume is pre-created, it must be equal ' 'in size to source volume.')) vol_pairs = [{ "source_volume": src_lun.ds_id, "target_volume": tgt_lun.ds_id }] try: self._helper.start_flashcopy(vol_pairs) if ((tgt_lun.type_thin and tgt_lun.size > src_lun.size) or (not tgt_lun.async_clone)): self._helper.wait_flashcopy_finished([src_lun], [tgt_lun]) if (tgt_lun.status == 'available' and tgt_lun.type_thin and tgt_lun.size > src_lun.size): param = { 'cap': self._helper._gb2b(tgt_lun.size), 'captype': 'bytes' } self._helper.change_lun(tgt_lun.ds_id, param) else: LOG.info("Clone volume %(tgt)s from volume %(src)s " "in the background.", {'src': src_lun.ds_id, 'tgt': tgt_lun.ds_id}) tgt_lun.metadata['flashcopy'] = "started" eventlet.spawn(self._wait_flashcopy, [src_lun], [tgt_lun]) finally: if not tgt_lun.async_clone and tgt_lun.status == 'error': self._helper.delete_lun(tgt_lun) return tgt_lun def _wait_flashcopy(self, src_luns, tgt_luns): # please note that the order of volumes should be fixed. self._helper.wait_flashcopy_finished(src_luns, tgt_luns) for src_lun, tgt_lun in zip(src_luns, tgt_luns): if tgt_lun.status == 'available': tgt_lun.volume.metadata['flashcopy'] = 'success' elif tgt_lun.status == 'error': tgt_lun.volume.metadata['flashcopy'] = "error" tgt_lun.volume.metadata['error_msg'] = ( "FlashCopy from source volume %(src)s to target volume " "%(tgt)s fails, the state of target volume %(id)s is set " "to error." % {'src': src_lun.ds_id, 'tgt': tgt_lun.ds_id, 'id': tgt_lun.os_id}) tgt_lun.volume.status = 'error' self._helper.delete_lun(tgt_lun) else: self._helper.delete_lun(tgt_lun) raise exception.VolumeDriverException( message=_("Volume %(id)s is in unexpected state " "%(state)s.") % {'id': tgt_lun.ds_id, 'state': tgt_lun.status}) tgt_lun.volume.save() def _ensure_vol_not_fc_target(self, vol_hex_id): for cp in self._helper.get_flashcopy(vol_hex_id): if cp['targetvolume']['id'] == vol_hex_id: raise restclient.APIException( data=(_('Volume %s is currently a target of another ' 'FlashCopy operation') % vol_hex_id)) def _create_replica_helper(self, lun): if not lun.pool_lss_pair.get('target'): lun = self._replication.establish_replication(lun, True) else: lun = self._replication.create_replica(lun) return lun @proxy._trace_time def create_volume(self, volume): lun = self._create_lun_helper(Lun(volume)) if lun.type_replication: lun = self._create_replica_helper(lun) return lun.get_volume_update() @proxy._trace_time def create_cloned_volume(self, target_vol, source_vol): lun = self._clone_lun(Lun(source_vol), Lun(target_vol)) if lun.type_replication: lun = self._create_replica_helper(lun) return lun.get_volume_update() @proxy._trace_time def create_volume_from_snapshot(self, volume, snapshot): lun = self._clone_lun(Lun(snapshot, is_snapshot=True), Lun(volume)) if lun.type_replication: lun = self._create_replica_helper(lun) return lun.get_volume_update() @proxy._trace_time def extend_volume(self, volume, new_size): lun = Lun(volume) param = { 'cap': self._helper._gb2b(new_size), 'captype': 'bytes' } if lun.type_replication: if not self._active_backend_id: self._replication.delete_pprc_pairs(lun) self._helper.change_lun(lun.ds_id, param) self._replication.extend_replica(lun, param) self._replication.create_pprc_pairs(lun) else: raise exception.VolumeDriverException( message=(_("The volume %s has been failed over, it is " "not suggested to extend it.") % lun.ds_id)) else: self._helper.change_lun(lun.ds_id, param) @proxy._trace_time def volume_exists(self, volume): return self._helper.lun_exists(Lun(volume).ds_id) @proxy._trace_time def delete_volume(self, volume): lun = Lun(volume) if lun.type_replication: lun = self._replication.delete_replica(lun) self._helper.delete_lun(lun) @proxy._trace_time def create_snapshot(self, snapshot): return self._clone_lun(Lun(snapshot['volume']), Lun( snapshot, is_snapshot=True)).get_volume_update() @proxy._trace_time def delete_snapshot(self, snapshot): self._helper.delete_lun(Lun(snapshot, is_snapshot=True)) @proxy._trace_time def migrate_volume(self, ctxt, volume, backend): # this and retype is a complete mess, pending cinder changes for fix. # currently this is only for migrating between pools on the same # physical machine but different cinder.conf backends. # volume not allowed to get here if cg or repl # should probably check volume['status'] in ['available', 'in-use'], # especially for flashcopy lun = Lun(volume) if lun.type_replication: raise exception.VolumeDriverException( message=_('Driver does not support migrate replicated ' 'volume, it can be done via retype.')) stats = self.meta['stat'] if backend['capabilities']['vendor_name'] != stats['vendor_name']: raise exception.VolumeDriverException(_( 'source and destination vendors differ.')) if backend['capabilities']['serial_number'] != stats['serial_number']: raise exception.VolumeDriverException(_( 'source and destination serial numbers differ.')) new_pools = self._helper.get_pools( backend['capabilities']['extent_pools']) cur_pool_id = self._helper.get_lun_pool(lun.ds_id)['id'] cur_node = self._helper.get_storage_pools()[cur_pool_id]['node'] # try pools in same rank for pid, pool in new_pools.items(): if pool['node'] == cur_node: try: self._helper.change_lun(lun.ds_id, {'pool': pid}) return (True, None) except Exception: pass # try pools in opposite rank for pid, pool in new_pools.items(): if pool['node'] != cur_node: try: new_lun = lun.shallow_copy() self._create_lun_helper(new_lun, pid, False) self._clone_lun(lun, new_lun) volume_update = new_lun.update_volume(lun) try: self._helper.delete_lun(lun) except Exception: pass return (True, volume_update) except Exception: # will ignore missing ds_id if failed create volume self._helper.delete_lun(new_lun) return (False, None) @proxy._trace_time def retype(self, ctxt, volume, new_type, diff, host): """retype the volume. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ def _check_extra_specs(key, value=None): extra_specs = diff.get('extra_specs') specific_type = extra_specs.get(key) if extra_specs else None old_type = None new_type = None if specific_type: old_type, new_type = specific_type if value: old_type = (True if old_type and old_type.upper() == value else False) new_type = (True if new_type and new_type.upper() == value else False) return old_type, new_type lun = Lun(volume) # check user specify pool or lss or not old_specified_pool, new_specified_pool = _check_extra_specs( 'drivers:storage_pool_ids') old_specified_lss, new_specified_lss = _check_extra_specs( 'drivers:storage_lss_ids') # check thin or thick old_type_thick, new_type_thick = _check_extra_specs( 'drivers:thin_provision', 'FALSE') # check replication capability old_type_replication, new_type_replication = _check_extra_specs( 'replication_enabled', strings.METADATA_IS_TRUE) # check multiattach capability old_multiattach, new_multiattach = _check_extra_specs( 'multiattach', strings.METADATA_IS_TRUE) # start retype, please note that the order here is important # because of rollback problem once failed to retype. new_props = {} if old_type_thick != new_type_thick: new_props['type_thin'] = not new_type_thick if (old_specified_pool == new_specified_pool and old_specified_lss == new_specified_lss): LOG.info("Same pool and lss.") elif ((old_specified_pool or old_specified_lss) and (new_specified_pool or new_specified_lss)): raise exception.VolumeDriverException( message=_("Retype does not support to move volume from " "specified pool or lss to another specified " "pool or lss.")) elif ((old_specified_pool is None and new_specified_pool) or (old_specified_lss is None and new_specified_lss)): storage_pools = self._helper.get_pools(new_specified_pool) self._helper.verify_pools(storage_pools) storage_lss = self._helper.verify_lss_ids(new_specified_lss) vol_pool = self._helper.get_lun_pool(lun.ds_id)['id'] vol_lss = lun.ds_id[:2].upper() # if old volume is in the specified LSS, but it is needed # to be changed from thin to thick or vice versa, driver # needs to make sure the new volume will be created in the # specified LSS. if ((storage_lss and vol_lss not in storage_lss) or new_props.get('type_thin')): new_props['specified_pool'] = new_specified_pool new_props['specified_lss'] = new_specified_lss elif vol_pool not in storage_pools.keys(): vol_node = int(vol_lss, 16) % 2 new_pool_id = None for pool_id, pool in storage_pools.items(): if vol_node == pool['node']: new_pool_id = pool_id break if new_pool_id: self._helper.change_lun(lun.ds_id, {'pool': new_pool_id}) else: raise exception.VolumeDriverException( message=_("Can not change the pool volume allocated.")) new_lun = None if new_props: new_lun = lun.shallow_copy() for key, value in new_props.items(): setattr(new_lun, key, value) self._clone_lun(lun, new_lun) volume_update = None if new_lun: # if new lun meets all requirements of retype successfully, # exception happens during clean up can be ignored. if new_type_replication: new_lun.type_replication = True new_lun = self._replication.establish_replication(new_lun, True) elif old_type_replication: new_lun.type_replication = False try: self._replication.delete_replica(lun) except Exception: pass if new_multiattach: new_lun.multiattach = True elif old_multiattach: new_lun.multiattach = False try: self._helper.delete_lun(lun) except Exception: pass volume_update = new_lun.update_volume(lun) else: # if driver does not create new lun, don't delete source # lun when failed to enable replication or delete replica. if not old_type_replication and new_type_replication: lun.type_replication = True lun = self._replication.establish_replication(lun) elif old_type_replication and not new_type_replication: lun = self._replication.delete_replica(lun) lun.type_replication = False if not old_multiattach and new_multiattach: lun.multiattach = True elif old_multiattach and not new_multiattach: lun.multiattach = False volume_update = lun.get_volume_update() return True, volume_update @proxy._trace_time @proxy.logger def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot.""" if snapshot.volume_size != volume.size: raise exception.InvalidInput( reason=_('Reverting volume is not supported if the volume ' 'size is not equal to the snapshot size.')) vol_lun = Lun(volume) snap_lun = Lun(snapshot, is_snapshot=True) if vol_lun.type_replication: raise exception.VolumeDriverException( message=_('Driver does not support revert to snapshot ' 'of replicated volume.')) try: self._clone_lun(snap_lun, vol_lun) except Exception as err: msg = (_("Reverting volume %(vol)s to snapshot %(snap)s failed " "due to: %(err)s.") % {"vol": volume.name, "snap": snapshot.name, "err": err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @proxy._trace_time @proxy.logger def initialize_connection(self, volume, connector, **kwargs): """Attach a volume to the host.""" lun = Lun(volume) LOG.info('Attach the volume %s.', lun.ds_id) if lun.group and lun.failed_over: backend_helper = self._replication.get_target_helper() else: backend_helper = self._helper return backend_helper.initialize_connection(lun.ds_id, connector, **kwargs) @proxy._trace_time @proxy.logger def terminate_connection(self, volume, connector, force=False, **kwargs): """Detach a volume from a host.""" ret_info = { 'driver_volume_type': 'fibre_channel', 'data': {} } lun = Lun(volume) if (lun.group and lun.failed_over) and not self._active_backend_id: backend_helper = self._replication.get_target_helper() else: backend_helper = self._helper if isinstance(backend_helper, helper.DS8KECKDHelper): LOG.info('Detach the volume %s.', lun.ds_id) return backend_helper.terminate_connection(volume, lun.ds_id, connector, force, **kwargs) else: vol_mapped, host_id, map_info = ( backend_helper.check_vol_mapped_to_host(connector, lun.ds_id)) if host_id is None or not vol_mapped: if host_id is None and not lun.type_replication: LOG.warning('Failed to find the Host information.') return ret_info if host_id and not lun.type_replication and not vol_mapped: LOG.warning("Volume %(vol)s is already not mapped to " "host %(host)s.", {'vol': lun.ds_id, 'host': host_id}) return ret_info if lun.type_replication: if backend_helper == self._replication.get_target_helper(): backend_helper = self._replication.get_source_helper() else: backend_helper = self._replication.get_target_helper() try: if backend_helper.lun_exists(lun.replica_ds_id): LOG.info('Detaching volume %s from the ' 'Secondary site.', lun.replica_ds_id) mapped, host_id, map_info = ( backend_helper.check_vol_mapped_to_host( connector, lun.replica_ds_id)) else: msg = (_('Failed to find the attached ' 'Volume %s.') % lun.ds_id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) except Exception as ex: LOG.warning('Failed to get host mapping for volume ' '%(volume)s in the secondary site. ' 'Exception: %(err)s.', {'volume': lun.replica_ds_id, 'err': ex}) return ret_info if not mapped: return ret_info else: LOG.info('Detach the volume %s.', lun.replica_ds_id) return backend_helper.terminate_connection( volume, lun.replica_ds_id, host_id, connector, map_info) elif host_id and vol_mapped: LOG.info('Detaching volume %s.', lun.ds_id) return backend_helper.terminate_connection(volume, lun.ds_id, host_id, connector, map_info) @proxy.logger def create_group(self, ctxt, group): """Create consistency group of FlashCopy or RemoteCopy.""" model_update = {} grp = Group(group) # verify replication. if (grp.group_replication_enabled or grp.consisgroup_replication_enabled): for volume_type in group.volume_types: replication_type = volume_utils.is_replicated_spec( volume_type.extra_specs) self._assert(replication_type, 'Unable to create group: group %(grp)s ' 'is for replication type, but volume ' '%(vtype)s is a non-replication one.' % {'grp': grp.id, 'vtype': volume_type.id}) model_update['replication_status'] = ( fields.ReplicationStatus.ENABLED) # verify consistency group. if (grp.consisgroup_snapshot_enabled or grp.consisgroup_replication_enabled): self._assert(self._helper.backend['lss_ids_for_cg'], 'No LSS(s) for CG, please make sure you have ' 'reserved LSS for CG via param lss_range_for_cg.') if grp.consisgroup_replication_enabled: self._helper.verify_rest_version_for_pprc_cg() target_helper = self._replication.get_target_helper() target_helper.verify_rest_version_for_pprc_cg() # driver will create replication group because base cinder # doesn't update replication_status of the group, otherwise # base cinder can take over it. if (grp.consisgroup_snapshot_enabled or grp.consisgroup_replication_enabled or grp.group_replication_enabled): model_update.update(self._helper.create_group(group)) return model_update else: raise NotImplementedError() @proxy.logger def delete_group(self, ctxt, group, volumes): """Delete consistency group and volumes in it.""" grp = Group(group) if grp.consisgroup_snapshot_enabled: luns = [Lun(volume) for volume in volumes] return self._delete_group_with_lock(group, luns) elif grp.consisgroup_replication_enabled: self._assert(not grp.failed_over, 'Group %s has been failed over, it does ' 'not support to delete it' % grp.id) luns = [Lun(volume) for volume in volumes] for lun in luns: self._replication.delete_replica(lun) return self._delete_group_with_lock(group, luns) else: raise NotImplementedError() @coordination.synchronized('{self.prefix}-consistency-group') def _delete_group_with_lock(self, group, luns): model_update, volumes_model_update = ( self._helper.delete_group(group, luns)) if model_update['status'] == fields.GroupStatus.DELETED: self._remove_record_from_consisgroup_cache(group.id) return model_update, volumes_model_update @proxy.logger def delete_group_snapshot(self, ctxt, group_snapshot, snapshots): """Delete volume group snapshot.""" grp = Group(group_snapshot, True) if (grp.consisgroup_snapshot_enabled or grp.consisgroup_replication_enabled): tgt_luns = [Lun(s, is_snapshot=True) for s in snapshots] return self._delete_group_snapshot_with_lock( group_snapshot, tgt_luns) else: raise NotImplementedError() @coordination.synchronized('{self.prefix}-consistency-group') def _delete_group_snapshot_with_lock(self, group_snapshot, tgt_luns): model_update, snapshots_model_update = ( self._helper.delete_group_snapshot(group_snapshot, tgt_luns)) if model_update['status'] == fields.GroupStatus.DELETED: self._remove_record_from_consisgroup_cache(group_snapshot.id) return model_update, snapshots_model_update @proxy.logger def create_group_snapshot(self, ctxt, group_snapshot, snapshots): """Create volume group snapshot.""" tgt_group = Group(group_snapshot, True) if (not tgt_group.consisgroup_snapshot_enabled and not tgt_group.consisgroup_replication_enabled): raise NotImplementedError() src_group = Group(group_snapshot.group) self._assert(not src_group.failed_over, 'Group %s has been failed over, it does not ' 'support to create group snapshot.' % src_group.id) snapshots_model_update = [] model_update = {'status': fields.GroupStatus.AVAILABLE} src_luns = [Lun(snapshot.volume) for snapshot in snapshots] tgt_luns = [Lun(snapshot, is_snapshot=True) for snapshot in snapshots] try: if src_luns and tgt_luns: self._clone_group(src_luns, tgt_luns) except restclient.APIException: model_update['status'] = fields.GroupStatus.ERROR LOG.exception('Failed to create group snapshot.') for tgt_lun in tgt_luns: snapshot_model_update = tgt_lun.get_volume_update() snapshot_model_update.update({ 'id': tgt_lun.os_id, 'status': model_update['status'] }) snapshots_model_update.append(snapshot_model_update) return model_update, snapshots_model_update @proxy.logger def update_group(self, ctxt, group, add_volumes, remove_volumes): """Update generic volume group.""" grp = Group(group) if (grp.consisgroup_snapshot_enabled or grp.consisgroup_replication_enabled): self._assert(not grp.failed_over, 'Group %s has been failed over, it does not ' 'support to update it.' % grp.id) return self._update_consisgroup(grp, add_volumes, remove_volumes) else: raise NotImplementedError() def _update_consisgroup(self, grp, add_volumes, remove_volumes): add_volumes_update = [] if add_volumes: add_volumes_update = self._add_volumes_into_consisgroup( grp, add_volumes) remove_volumes_update = [] if remove_volumes: remove_volumes_update = self._remove_volumes_from_consisgroup( grp, add_volumes, remove_volumes) return None, add_volumes_update, remove_volumes_update @proxy.logger def _add_volumes_into_consisgroup(self, grp, add_volumes): add_volumes_update = [] for vol in add_volumes: if vol.status == 'in-use': msg = (_("add volume %(vol)s into group %(grp)s failed " "since this volume is 'in-use' status") % {'vol': vol.id, 'grp': grp.id}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) new_add_luns, old_add_luns = ( self._clone_lun_for_consisgroup(add_volumes, grp)) for new_add_lun, old_add_lun in zip(new_add_luns, old_add_luns): volume_update = new_add_lun.update_volume(old_add_lun) volume_update['id'] = new_add_lun.os_id add_volumes_update.append(volume_update) return add_volumes_update @proxy.logger @coordination.synchronized('{self.prefix}-consistency-group') def _remove_volumes_from_consisgroup(self, grp, add_volumes, remove_volumes): remove_volumes_update = [] for vol in remove_volumes: if vol.status == 'in-use': msg = (_("remove volume %(vol)s from group %(grp)s failed " "since this volume is 'in-use' status") % {'vol': vol.id, 'grp': grp.id}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) new_remove_luns, old_remove_luns = ( self._clone_lun_for_consisgroup(remove_volumes)) for new_remove_lun, old_remove_lun in zip(new_remove_luns, old_remove_luns): volume_update = new_remove_lun.update_volume(old_remove_lun) volume_update['id'] = new_remove_lun.os_id remove_volumes_update.append(volume_update) if len(remove_volumes) == len(grp.volumes) + len(add_volumes): self._remove_record_from_consisgroup_cache(grp.id) return remove_volumes_update def _clone_lun_for_consisgroup(self, volumes, grp=None): new_luns = [] old_luns = [] for volume in volumes: old_lun = Lun(volume) if old_lun.ds_id: new_lun = old_lun.shallow_copy() new_lun.group = grp self._clone_lun(old_lun, new_lun) if old_lun.type_replication: new_lun = self._create_replica_helper(new_lun) old_lun = self._replication.delete_replica(old_lun) self._helper.delete_lun(old_lun) new_luns.append(new_lun) old_luns.append(old_lun) return new_luns, old_luns @proxy.logger def _remove_record_from_consisgroup_cache(self, group_id): lss_pairs = self.consisgroup_cache.get(group_id) if lss_pairs: LOG.debug('Consistecy Group %(id)s owns LSS %(lss)s in the cache.', {'id': group_id, 'lss': lss_pairs}) self.consisgroup_cache.pop(group_id) @proxy._trace_time def create_group_from_src(self, ctxt, group, volumes, group_snapshot, sorted_snapshots, source_group, sorted_source_vols): """Create volume group from volume group or volume group snapshot.""" grp = Group(group) if (not grp.consisgroup_snapshot_enabled and not grp.consisgroup_replication_enabled and not grp.group_replication_enabled): raise NotImplementedError() model_update = { 'status': fields.GroupStatus.AVAILABLE, 'replication_status': fields.ReplicationStatus.DISABLED } if (grp.group_replication_enabled or grp.consisgroup_replication_enabled): model_update['replication_status'] = ( fields.ReplicationStatus.ENABLED) volumes_model_update = [] if group_snapshot and sorted_snapshots: src_luns = [Lun(snapshot, is_snapshot=True) for snapshot in sorted_snapshots] elif source_group and sorted_source_vols: src_luns = [Lun(source_vol) for source_vol in sorted_source_vols] src_group = Group(source_group) self._assert(not src_group.failed_over, 'Group %s has been failed over, it does not ' 'support to create a group from it.' % src_group.id) else: msg = _("_create_group_from_src supports a group snapshot " "source or a group source, other sources can not " "be used.") LOG.error(msg) raise exception.InvalidInput(message=msg) try: tgt_luns = [Lun(volume) for volume in volumes] if src_luns and tgt_luns: self._clone_group(src_luns, tgt_luns) for tgt_lun in tgt_luns: if tgt_lun.type_replication: self._create_replica_helper(tgt_lun) except restclient.APIException: model_update['status'] = fields.GroupStatus.ERROR LOG.exception("Failed to create group from group snapshot.") for tgt_lun in tgt_luns: volume_model_update = tgt_lun.get_volume_update() volume_model_update.update({ 'id': tgt_lun.os_id, 'status': model_update['status'], 'replication_status': model_update['replication_status'] }) volumes_model_update.append(volume_model_update) return model_update, volumes_model_update def _clone_group(self, src_luns, tgt_luns): for src_lun in src_luns: self._ensure_vol_not_fc_target(src_lun.ds_id) try: vol_pairs = [] for src_lun, tgt_lun in zip(src_luns, tgt_luns): pool = self._helper.get_pool(src_lun.ds_id[0:2]) if tgt_lun.ds_id is None: self._create_lun_helper(tgt_lun, pool) vol_pairs.append({ "source_volume": src_lun.ds_id, "target_volume": tgt_lun.ds_id }) if tgt_lun.group.consisgroup_snapshot_enabled: self._do_flashcopy_with_freeze(vol_pairs) else: self._helper.start_flashcopy(vol_pairs) self._helper.wait_flashcopy_finished(src_luns, tgt_luns) finally: # if one of volume failed, delete all volumes. error_luns = [lun for lun in tgt_luns if lun.status == 'error'] if error_luns: self._helper.delete_lun(tgt_luns) @coordination.synchronized('{self.prefix}-consistency-group') @proxy._trace_time def _do_flashcopy_with_freeze(self, vol_pairs): # issue flashcopy with freeze self._helper.start_flashcopy(vol_pairs, True) # unfreeze the LSS where source volumes are in lss_ids = list(set(p['source_volume'][0:2] for p in vol_pairs)) LOG.debug('Unfreezing the LSS: %s', ','.join(lss_ids)) self._helper.unfreeze_lss(lss_ids) def freeze_backend(self, ctxt): """Notify the backend that it's frozen.""" pass def thaw_backend(self, ctxt): """Notify the backend that it's unfrozen/thawed.""" pass @proxy.logger @proxy._trace_time def failover_host(self, ctxt, volumes, secondary_id, groups=None): """Fail over the volume back and forth. if secondary_id is 'default', volumes will be failed back, otherwize failed over. """ volume_update_list = [] if secondary_id == strings.PRIMARY_BACKEND_ID: if not self._active_backend_id: LOG.info("Host has been failed back. doesn't need " "to fail back again.") return self._active_backend_id, volume_update_list, [] else: if self._active_backend_id: LOG.info("Host has been failed over to %s.", self._active_backend_id) return self._active_backend_id, volume_update_list, [] target_helper = self._replication.get_target_helper() if secondary_id is None: secondary_id = target_helper.backend['id'] elif secondary_id != target_helper.backend['id']: raise exception.InvalidReplicationTarget( message=(_('Invalid secondary_backend_id specified. ' 'Valid backend id is %s.') % target_helper.backend['id'])) LOG.debug("Starting failover host to %s.", secondary_id) # all volumes passed to failover_host are replicated. replicated_luns = [Lun(volume) for volume in volumes if volume.status in ('available', 'in-use')] # volumes in group may have been failed over. if secondary_id != strings.PRIMARY_BACKEND_ID: failover_luns = [lun for lun in replicated_luns if not lun.failed_over] else: failover_luns = [lun for lun in replicated_luns if lun.failed_over] if failover_luns: try: if secondary_id != strings.PRIMARY_BACKEND_ID: self._replication.start_host_pprc_failover( failover_luns, secondary_id) self._active_backend_id = secondary_id else: self._replication.start_host_pprc_failback( failover_luns, secondary_id) self._active_backend_id = "" self._helper = self._replication.get_source_helper() except restclient.APIException as e: raise exception.UnableToFailOver( reason=(_("Unable to failover host to %(id)s. " "Exception= %(ex)s") % {'id': secondary_id, 'ex': str(e)})) for lun in failover_luns: volume_update = lun.get_volume_update() # failover_host in base cinder has considered previous status # of the volume, it doesn't need to return it for update. volume_update['replication_status'] = ( fields.ReplicationStatus.FAILED_OVER if self._active_backend_id else fields.ReplicationStatus.ENABLED) model_update = {'volume_id': lun.os_id, 'updates': volume_update} volume_update_list.append(model_update) else: LOG.info("No volume has replication capability.") if secondary_id != strings.PRIMARY_BACKEND_ID: LOG.info("Switch to the target %s", secondary_id) self._replication.switch_source_and_target_client() self._active_backend_id = secondary_id else: LOG.info("Switch to the primary %s", secondary_id) self._replication.switch_source_and_target_client() self._active_backend_id = "" # No group entity in DS8K, so just need to update replication_status # of the group. group_update_list = [] groups = [grp for grp in groups if grp.status == 'available'] if groups: if secondary_id != strings.PRIMARY_BACKEND_ID: update_groups = [grp for grp in groups if grp.replication_status == fields.ReplicationStatus.ENABLED] repl_status = fields.ReplicationStatus.FAILED_OVER else: update_groups = [grp for grp in groups if grp.replication_status == fields.ReplicationStatus.FAILED_OVER] repl_status = fields.ReplicationStatus.ENABLED if update_groups: for group in update_groups: group_update = { 'group_id': group.id, 'updates': {'replication_status': repl_status} } group_update_list.append(group_update) return secondary_id, volume_update_list, group_update_list def enable_replication(self, context, group, volumes): """Resume pprc pairs. if user wants to adjust group, he/she does not need to pause/resume pprc pairs, here just provide a way to resume replicaiton. """ volumes_model_update = [] model_update = ( {'replication_status': fields.ReplicationStatus.ENABLED}) if volumes: luns = [Lun(volume) for volume in volumes] try: self._replication.enable_replication(luns) except restclient.APIException as e: msg = (_('Failed to enable replication for group %(id)s, ' 'Exception: %(ex)s.') % {'id': group.id, 'ex': str(e)}) LOG.exception(msg) raise exception.VolumeDriverException(message=msg) for lun in luns: volumes_model_update.append( {'id': lun.os_id, 'replication_status': fields.ReplicationStatus.ENABLED}) return model_update, volumes_model_update def disable_replication(self, context, group, volumes): """Pause pprc pairs. if user wants to adjust group, he/she does not need to pause/resume pprc pairs, here just provide a way to pause replicaiton. """ volumes_model_update = [] model_update = ( {'replication_status': fields.ReplicationStatus.DISABLED}) if volumes: luns = [Lun(volume) for volume in volumes] try: self._replication.disable_replication(luns) except restclient.APIException as e: msg = (_('Failed to disable replication for group %(id)s, ' 'Exception: %(ex)s.') % {'id': group.id, 'ex': str(e)}) LOG.exception(msg) raise exception.VolumeDriverException(message=msg) for lun in luns: volumes_model_update.append( {'id': lun.os_id, 'replication_status': fields.ReplicationStatus.DISABLED}) return model_update, volumes_model_update def failover_replication(self, context, group, volumes, secondary_backend_id): """Fail over replication for a group and volumes in the group.""" volumes_model_update = [] model_update = {} luns = [Lun(volume) for volume in volumes] if secondary_backend_id == strings.PRIMARY_BACKEND_ID: if luns: if not luns[0].failed_over: LOG.info("Group %s has been failed back. it doesn't " "need to fail back again.", group.id) return model_update, volumes_model_update else: return model_update, volumes_model_update else: target_helper = self._replication.get_target_helper() backend_id = target_helper.backend['id'] if secondary_backend_id is None: secondary_backend_id = backend_id elif secondary_backend_id != backend_id: raise exception.InvalidReplicationTarget( message=(_('Invalid secondary_backend_id %(id)s. ' 'Valid backend ids are %(ids)s.') % {'id': secondary_backend_id, 'ids': (strings.PRIMARY_BACKEND_ID, backend_id)})) if luns: if luns[0].failed_over: LOG.info("Group %(grp)s has been failed over to %(id)s.", {'grp': group.id, 'id': backend_id}) return model_update, volumes_model_update else: return model_update, volumes_model_update LOG.debug("Starting failover group %(grp)s to %(id)s.", {'grp': group.id, 'id': secondary_backend_id}) try: if secondary_backend_id != strings.PRIMARY_BACKEND_ID: self._replication.start_group_pprc_failover( luns, secondary_backend_id) model_update['replication_status'] = ( fields.ReplicationStatus.FAILED_OVER) else: self._replication.start_group_pprc_failback( luns, secondary_backend_id) model_update['replication_status'] = ( fields.ReplicationStatus.ENABLED) except restclient.APIException as e: raise exception.VolumeDriverException( message=(_("Unable to failover group %(grp_id)s to " "backend %(bck_id)s. Exception= %(ex)s") % {'grp_id': group.id, 'bck_id': secondary_backend_id, 'ex': str(e)})) for lun in luns: volume_model_update = lun.get_volume_update() # base cinder doesn't consider previous status of the volume # in failover_replication, so here returns it for update. volume_model_update['replication_status'] = ( model_update['replication_status']) volume_model_update['id'] = lun.os_id volumes_model_update.append(volume_model_update) return model_update, volumes_model_update def get_replication_error_status(self, context, groups): """Return error info for replicated groups and its volumes. all pprc copy related APIs wait until copy is finished, so it does not need to check their status afterwards. """ return [], [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py0000664000175000017500000007300600000000000026204 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import eventlet from oslo_log import log as logging from oslo_utils import excutils from cinder import coordination from cinder import exception from cinder.i18n import _ import cinder.volume.drivers.ibm.ibm_storage as storage from cinder.volume.drivers.ibm.ibm_storage import ds8k_helper as helper from cinder.volume.drivers.ibm.ibm_storage import ds8k_restclient as restclient from cinder.volume.drivers.ibm.ibm_storage import proxy LOG = logging.getLogger(__name__) PPRC_PATH_NOT_EXIST = 0x00 PPRC_PATH_HEALTHY = 0x01 PPRC_PATH_UNHEALTHY = 0x02 PPRC_PATH_FULL = 0x03 class MetroMirrorManager(object): """Manage metro mirror for replication.""" def __init__(self, source_helper, target_helper): self._source_helper = source_helper self._target_helper = target_helper def switch_source_and_target(self): self._source_helper, self._target_helper = ( self._target_helper, self._source_helper) def check_physical_links(self): ports = self._source_helper.get_physical_links( self._target_helper.backend['storage_wwnn']) if not ports: raise exception.VolumeDriverException( message=((_("%(tgt)s is not connected to %(src)s!") % { 'tgt': self._target_helper.backend['storage_wwnn'], 'src': self._source_helper.backend['storage_wwnn'] }))) pairs = [{ 'source_port_id': p['source_port_id'], 'target_port_id': p['target_port_id'] } for p in ports] if not self._target_helper.backend['port_pairs']: # if there are more than eight physical links, # choose eight of them. self._target_helper.backend['port_pairs'] = ( pairs[:8] if len(pairs) > 8 else pairs) else: # verify the port pairs user set for pair in self._target_helper.backend['port_pairs']: if pair not in pairs: valid_pairs = ';'.join( ["%s-%s" % (p['source_port_id'], p['target_port_id']) for p in pairs]) invalid_pair = "%s-%s" % (pair['source_port_id'], pair['target_port_id']) raise exception.VolumeDriverException( message=((_("Invalid port pair: %(invalid)s, valid " "port pair(s) are: %(valid)s") % {'invalid': invalid_pair, 'valid': valid_pairs}))) self._source_helper.backend['port_pairs'] = [{ 'source_port_id': p['target_port_id'], 'target_port_id': p['source_port_id'] } for p in self._target_helper.backend['port_pairs']] def is_target_alive(self): try: self._target_helper.get_systems() except restclient.TimeoutException as e: LOG.info("REST request time out, backend may be not available " "any more. Exception: %s", e) return False return True def find_from_pprc_paths(self, specified_lss=None, excluded_lss=None): """find lss from existing pprc paths and pool id for it. the format of pool_lss_pair returned is as below: {'source': (pid, lss), 'target': (pid, lss)} """ state, paths = self._filter_pprc_paths(specified_lss) if state != PPRC_PATH_HEALTHY: # check whether the physical links are available or not, # or have been changed. self.check_physical_links() return state, None if excluded_lss: paths = [p for p in paths if p['source_lss_id'] not in excluded_lss] # only establish_replication will specify the source LSS # and it need to reuse LSS reserved for CG if this LSS # is in PPRC path. if not specified_lss: paths = [p for p in paths if p['source_lss_id'] not in self._source_helper.backend['lss_ids_for_cg']] # sort pairs according to the number of luns in their LSSes, # and get the pair which LSS has least luns. candidates = [] source_lss_set = set(p['source_lss_id'] for p in paths) for lss in source_lss_set: # get the number of luns in source. src_luns = self._source_helper.get_lun_number_in_lss(lss) if src_luns == helper.LSS_VOL_SLOTS and not specified_lss: continue spec_paths = [p for p in paths if p['source_lss_id'] == lss] for path in spec_paths: # get the number of luns in target. try: tgt_luns = self._target_helper.get_lun_number_in_lss( path['target_lss_id']) except restclient.APIException: # if DS8K can fix this problem, then remove the # exception here. LOG.error("Target LSS %s in PPRC path may doesn't " "exist although PPRC path is available.", path['target_lss_id']) tgt_luns = 0 candidates.append((path['source_lss_id'], path['target_lss_id'], src_luns + tgt_luns)) if not candidates: return PPRC_PATH_FULL, None else: src_lss, tgt_lss, num = sorted(candidates, key=lambda c: c[2])[0] return PPRC_PATH_HEALTHY, { 'source': (self._source_helper.get_pool(src_lss), src_lss), 'target': (self._target_helper.get_pool(tgt_lss), tgt_lss) } def _filter_pprc_paths(self, lss): paths = self._source_helper.get_pprc_paths(lss) if paths: # get the paths only connected to replication target paths = [p for p in paths if p['target_system_wwnn'] in self._target_helper.backend['storage_wwnn']] else: LOG.info("No PPRC paths found in primary DS8K.") return PPRC_PATH_NOT_EXIST, None # get the paths whose port pairs have been set in configuration file. expected_port_pairs = [ (port['source_port_id'], port['target_port_id']) for port in self._target_helper.backend['port_pairs']] for path in paths[:]: port_pairs = [(p['source_port_id'], p['target_port_id']) for p in path['port_pairs']] if not (set(port_pairs) & set(expected_port_pairs)): paths.remove(path) if not paths: LOG.info("Existing PPRC paths do not use port pairs that " "are set.") return PPRC_PATH_NOT_EXIST, None # abandon PPRC paths according to volume type(fb/ckd) source_lss_set = set(p['source_lss_id'] for p in paths) if self._source_helper.backend.get('device_mapping'): source_lss_set = source_lss_set & set( self._source_helper.backend['device_mapping'].keys()) else: all_lss = self._source_helper.get_all_lss(['id', 'type']) fb_lss = set( lss['id'] for lss in all_lss if lss['type'] == 'fb') source_lss_set = source_lss_set & fb_lss paths = [p for p in paths if p['source_lss_id'] in source_lss_set] if not paths: LOG.info("No source LSS in PPRC paths has correct volume type.") return PPRC_PATH_NOT_EXIST, None # if the group property of lss doesn't match pool node, # abandon these paths. discarded_src_lss = [] discarded_tgt_lss = [] for lss in source_lss_set: spec_paths = [p for p in paths if p['source_lss_id'] == lss] if self._source_helper.get_pool(lss) is None: discarded_src_lss.append(lss) continue for spec_path in spec_paths: tgt_lss = spec_path['target_lss_id'] if self._target_helper.get_pool(tgt_lss) is None: discarded_tgt_lss.append(tgt_lss) if discarded_src_lss: paths = [p for p in paths if p['source_lss_id'] not in discarded_src_lss] if discarded_tgt_lss: paths = [p for p in paths if p['target_lss_id'] not in discarded_tgt_lss] if not paths: LOG.info("No PPRC paths can be re-used.") return PPRC_PATH_NOT_EXIST, None # abandon unhealthy PPRC paths. for path in paths[:]: failed_port_pairs = [ p for p in path['port_pairs'] if p['state'] != 'success'] if len(failed_port_pairs) == len(path['port_pairs']): paths.remove(path) if not paths: LOG.info("PPRC paths between primary and target DS8K " "are unhealthy.") return PPRC_PATH_UNHEALTHY, None return PPRC_PATH_HEALTHY, paths def create_pprc_path(self, lun, is_group=False): switch = lun.failed_over if is_group else False src_helper, tgt_helper = ( (self._target_helper, self._source_helper) if switch else (self._source_helper, self._target_helper)) src_lss = lun.pool_lss_pair['source'][1] tgt_lss = lun.pool_lss_pair['target'][1] # check whether the pprc path exists and is healthy or not. pid = (src_helper.backend['storage_wwnn'] + '_' + src_lss + ':' + tgt_helper.backend['storage_wwnn'] + '_' + tgt_lss) state = self._is_pprc_paths_healthy(pid, switch) LOG.info("The state of PPRC path %(path)s is %(state)s.", {'path': pid, 'state': state}) if state == PPRC_PATH_HEALTHY: return # create the pprc path pathData = { 'target_system_wwnn': tgt_helper.backend['storage_wwnn'], 'source_lss_id': src_lss, 'target_lss_id': tgt_lss, 'port_pairs': tgt_helper.backend['port_pairs'] } if lun.group and lun.group.consisgroup_replication_enabled: pathData['pprc_consistency_group'] = 'enable' LOG.info("PPRC path %(src)s:%(tgt)s will be created.", {'src': src_lss, 'tgt': tgt_lss}) src_helper.create_pprc_path(pathData) # check the state of the pprc path LOG.debug("Checking the state of the new PPRC path.") for retry in range(4): eventlet.sleep(2) if self._is_pprc_paths_healthy(pid, switch) == PPRC_PATH_HEALTHY: break if retry == 3: src_helper.delete_pprc_path(pid) raise restclient.APIException( data=(_("Failed to create PPRC path %(src)s:%(tgt)s.") % {'src': src_lss, 'tgt': tgt_lss})) LOG.debug("Create the new PPRC path successfully.") def _is_pprc_paths_healthy(self, path_id, switch): bck_helper = self._target_helper if switch else self._source_helper try: path = bck_helper.get_pprc_path(path_id) except restclient.APIException: return PPRC_PATH_NOT_EXIST for port in path['port_pairs']: if port['state'] == 'success': return PPRC_PATH_HEALTHY return PPRC_PATH_UNHEALTHY def create_pprc_pairs(self, lun): tgt_vol_id = lun.replication_driver_data[ self._target_helper.backend['id']]['vol_hex_id'] tgt_stg_id = self._target_helper.backend['storage_unit'] vol_pairs = [{ 'source_volume': lun.ds_id, 'source_system_id': self._source_helper.backend['storage_unit'], 'target_volume': tgt_vol_id, 'target_system_id': tgt_stg_id }] pair_data = { "volume_pairs": vol_pairs, "type": "metro_mirror", "options": ["permit_space_efficient_target", "initial_copy_full"] } LOG.debug("Creating pprc pair, pair_data is %s.", pair_data) self._source_helper.create_pprc_pair(pair_data) self._source_helper.wait_pprc_copy_finished([lun.ds_id], 'full_duplex') LOG.info("The state of PPRC pair has become full_duplex.") def delete_pprc_pairs(self, lun): self._source_helper.delete_pprc_pair(lun.ds_id) if self.is_target_alive() and lun.replication_driver_data: replica = sorted(lun.replication_driver_data.values())[0] self._target_helper.delete_pprc_pair(replica['vol_hex_id']) def do_pprc_failover(self, luns, is_group=False): switch = luns[0].failed_over if is_group else False src_helper, tgt_helper = ( (self._target_helper, self._source_helper) if switch else (self._source_helper, self._target_helper)) vol_pairs = [] target_vol_ids = [] for lun in luns: if not tgt_helper.lun_exists(lun.replica_ds_id): LOG.info("Target volume %(volid)s doesn't exist in " "DS8K %(storage)s.", {'volid': lun.replica_ds_id, 'storage': tgt_helper.backend['storage_unit']}) continue vol_pairs.append({ 'source_volume': lun.replica_ds_id, 'source_system_id': tgt_helper.backend['storage_unit'], 'target_volume': lun.ds_id, 'target_system_id': src_helper.backend['storage_unit'] }) target_vol_ids.append(lun.replica_ds_id) pair_data = { "volume_pairs": vol_pairs, "type": "metro_mirror", "options": ["failover"] } LOG.info("Begin to fail over to %(backend)s, " "pair_data is %(pair_data)s.", {'backend': tgt_helper.backend['storage_unit'], 'pair_data': pair_data}) tgt_helper.create_pprc_pair(pair_data) tgt_helper.wait_pprc_copy_finished(target_vol_ids, 'suspended', switch) LOG.info("Failover from %(src)s to %(tgt)s is finished.", { 'src': src_helper.backend['storage_unit'], 'tgt': tgt_helper.backend['storage_unit'] }) def get_pprc_pair_ids(self, luns, switch=False): if not luns: return None src_helper, tgt_helper = ( (self._target_helper, self._source_helper) if switch else (self._source_helper, self._target_helper)) pprc_pair_ids = [] for lun in luns: if switch: is_lun_exist = tgt_helper.lun_exists(lun.replica_ds_id) else: is_lun_exist = src_helper.lun_exists(lun.ds_id) if not is_lun_exist: LOG.info("Target volume %(volume)s doesn't exist in " "DS8K %(storage)s.", {'volume': (lun.replica_ds_id if switch else lun.ds_id), 'storage': (tgt_helper.backend['storage_unit'] if switch else src_helper.backend['storage_unit'])}) continue pprc_pair_ids.append( src_helper.backend['storage_unit'] + '_' + lun.ds_id + ':' + tgt_helper.backend['storage_unit'] + '_' + lun.replica_ds_id) return pprc_pair_ids def do_pprc_failback(self, luns, is_group=False): switch = luns[0].failed_over if is_group else False bck_helper = self._target_helper if switch else self._source_helper pair_data = {"pprc_ids": self.get_pprc_pair_ids(luns, switch), "type": "metro_mirror", "options": ["failback"]} LOG.info("Begin to run failback in %(backend)s, " "pair_data is %(pair_data)s.", {'backend': bck_helper.backend['storage_unit'], 'pair_data': pair_data}) bck_helper.do_failback(pair_data) lun_ids = [lun.ds_id for lun in luns] bck_helper.wait_pprc_copy_finished(lun_ids, 'full_duplex', switch) LOG.info("Run failback in %s is finished.", bck_helper.backend['storage_unit']) class Replication(object): """Metro Mirror and Global Mirror will be used by it. Version history: .. code-block:: none 1.0.0 - initial revision. 2.1.0 - ignore exception during cleanup when creating or deleting replica failed. 2.1.1 - Adding support for replication consistency group. """ VERSION = "2.1.1" def __init__(self, source_helper, target_device): self._source_helper = source_helper connection_type = target_device.get('connection_type') if connection_type == storage.XIV_CONNECTION_TYPE_FC: self._target_helper = ( helper.DS8KReplicationTargetHelper(target_device)) elif connection_type == storage.XIV_CONNECTION_TYPE_FC_ECKD: self._target_helper = ( helper.DS8KReplicationTargetECKDHelper(target_device)) else: raise exception.InvalidParameterValue( err=(_("Param [connection_type] %s in replication_device " "is invalid.") % connection_type)) if self._target_helper.backend['lss_ids_for_cg']: if (len(self._target_helper.backend['lss_ids_for_cg']) != len(self._source_helper.backend['lss_ids_for_cg'])): raise exception.VolumeDriverException( message=_("Please reserve the same number of LSS for " "secondary DS8K just as the primary DS8K.")) else: self._target_helper.backend['lss_ids_for_cg'] = ( self._source_helper.backend['lss_ids_for_cg']) self._mm_manager = MetroMirrorManager(self._source_helper, self._target_helper) def get_target_helper(self): return self._target_helper def get_source_helper(self): return self._source_helper def check_connection_type(self): src_conn_type = self._source_helper.get_connection_type() tgt_conn_type = self._target_helper.get_connection_type() if src_conn_type != tgt_conn_type: raise exception.VolumeDriverException( message=(_("The connection type in primary backend is " "%(primary)s, but in secondary backend it is " "%(secondary)s") % {'primary': src_conn_type, 'secondary': tgt_conn_type})) def check_physical_links(self): self._mm_manager.check_physical_links() def switch_source_and_target_client(self): # switch the helper in metro mirror manager self._mm_manager.switch_source_and_target() # switch the helper self._source_helper, self._target_helper = ( self._target_helper, self._source_helper) def _switch_source_and_target_volume(self, luns, secondary_backend_id): for lun in luns: if secondary_backend_id == 'default': backend_id = self._target_helper.backend['id'] lun.failed_over = False else: backend_id = 'default' lun.failed_over = True # secondary_id is never blank here. lun.replication_driver_data = ( {backend_id: {'vol_hex_id': lun.ds_id}}) lun.ds_id, lun.replica_ds_id = lun.replica_ds_id, lun.ds_id return luns @proxy.logger def find_pool_lss_pair(self, excluded_lss): state, pool_lss_pair = ( self._mm_manager.find_from_pprc_paths(None, excluded_lss)) if pool_lss_pair is None: pool_lss_pair = self.find_new_lss_for_source(excluded_lss) pool_lss_pair.update(self.find_new_lss_for_target()) return pool_lss_pair @proxy.logger def find_new_lss_for_source(self, excluded_lss): src_pid, src_lss = self._source_helper.find_pool_and_lss(excluded_lss) return {'source': (src_pid, src_lss)} @proxy.logger def find_new_lss_for_target(self): tgt_pid, tgt_lss = self._target_helper.find_pool_and_lss() return {'target': (tgt_pid, tgt_lss)} @proxy.logger def establish_replication(self, lun, delete_source=False): state, lun.pool_lss_pair = ( self._mm_manager.find_from_pprc_paths(lun.ds_id[0:2])) LOG.debug("establish_replication: pool_lss_pair is %s.", lun.pool_lss_pair) if state == PPRC_PATH_UNHEALTHY: raise restclient.APIException( data=(_("The path(s) for volume %(name)s isn't available " "any more, please make sure the state of the path(s) " "which source LSS is %(lss)s is success.") % {'name': lun.cinder_name, 'lss': lun.ds_id[0:2]})) elif state == PPRC_PATH_NOT_EXIST: pid = self._source_helper.get_pool(lun.ds_id[0:2]) lun.pool_lss_pair = {'source': (pid, lun.ds_id[0:2])} lun.pool_lss_pair.update(self.find_new_lss_for_target()) lun = self.create_replica(lun, delete_source) return lun @proxy.logger @coordination.synchronized('ibm-ds8k-replication') def create_replica(self, lun, delete_source=True): try: self._target_helper.create_lun(lun) # create PPRC paths if need. self._mm_manager.create_pprc_path(lun) # create pprc pair self._mm_manager.create_pprc_pairs(lun) except restclient.APIException: with excutils.save_and_reraise_exception(): try: self.delete_replica(lun) if delete_source: self._source_helper.delete_lun(lun) except restclient.APIException as ex: LOG.info("Failed to cleanup replicated volume %(id)s, " "Exception: %(ex)s.", {'id': lun.ds_id, 'ex': ex}) lun.replication_status = 'enabled' return lun @proxy.logger def delete_replica(self, lun, delete_source=False): if lun.ds_id is not None: try: self._mm_manager.delete_pprc_pairs(lun) self._delete_replica(lun) except restclient.APIException as e: if delete_source: try: self._source_helper.delete_lun(lun) except restclient.APIException as ex: LOG.info("Failed to delete source volume %(id)s, " "Exception: %(ex)s.", {'id': lun.ds_id, 'ex': ex}) raise exception.VolumeDriverException( message=(_('Failed to delete the target volume for ' 'volume %(volume)s, Exception: %(ex)s.') % {'volume': lun.ds_id, 'ex': str(e)})) lun.replication_status = 'disabled' lun.replication_driver_data = {} return lun @proxy.logger def _delete_replica(self, lun): if not lun.replication_driver_data: LOG.error("No replica ID for lun %s, maybe there is something " "wrong when creating the replica for lun.", lun.ds_id) return None for backend_id, backend in lun.replication_driver_data.items(): if not self._mm_manager.is_target_alive(): return None if not self._target_helper.lun_exists(backend['vol_hex_id']): LOG.debug("Replica %s not found.", backend['vol_hex_id']) continue LOG.debug("Deleting replica %s.", backend['vol_hex_id']) self._target_helper.delete_lun_by_id(backend['vol_hex_id']) def extend_replica(self, lun, param): for backend_id, backend in lun.replication_driver_data.items(): self._target_helper.change_lun(backend['vol_hex_id'], param) def delete_pprc_pairs(self, lun): self._mm_manager.delete_pprc_pairs(lun) def create_pprc_pairs(self, lun): self._mm_manager.create_pprc_pairs(lun) def start_host_pprc_failover(self, luns, backend_id): self._mm_manager.do_pprc_failover(luns) self.switch_source_and_target_client() self._switch_source_and_target_volume(luns, backend_id) def start_group_pprc_failover(self, luns, backend_id): self._mm_manager.do_pprc_failover(luns, True) self._switch_source_and_target_volume(luns, backend_id) def _get_sample_luns(self, luns): # choose sample lun according to position. sample_luns = [] positions = [] for lun in luns: position = (lun.pool_lss_pair['source'][1], lun.pool_lss_pair['target'][1]) if position not in positions: sample_luns.append(lun) positions.append(position) return sample_luns @proxy.logger def start_host_pprc_failback(self, luns, backend_id): # check whether primary client is alive or not. if not self._mm_manager.is_target_alive(): try: self._target_helper.update_client() except restclient.APIException: msg = _("Can not connect to the primary backend, " "please make sure it is back.") LOG.error(msg) raise exception.UnableToFailOver(reason=msg) LOG.debug("Failback host starts, backend id is %s.", backend_id) sample_luns = self._get_sample_luns(luns) for lun in sample_luns: self._mm_manager.create_pprc_path(lun) self._mm_manager.do_pprc_failback(luns) # revert the relationship of source volume and target volume self.start_host_pprc_failover(luns, backend_id) self._mm_manager.do_pprc_failback(luns) LOG.debug("Failback host ends, backend id is %s.", backend_id) @proxy.logger def start_group_pprc_failback(self, luns, backend_id): LOG.debug("Failback group starts, backend id is %s.", backend_id) sample_luns = self._get_sample_luns(luns) for lun in sample_luns: self._mm_manager.create_pprc_path(lun, True) self._mm_manager.do_pprc_failback(luns, True) self.start_group_pprc_failover(luns, backend_id) self._mm_manager.do_pprc_failback(luns, True) LOG.debug("Failback group ends, backend id is %s.", backend_id) def _get_expected_luns(self, luns, state, ignored_state=None): lun_ids = set(lun.ds_id for lun in luns) min_lun_id = min(lun_ids) max_lun_id = max(lun_ids) if not luns[0].failed_over: pairs = self._source_helper.get_pprc_pairs(min_lun_id, max_lun_id) else: pairs = self._target_helper.get_pprc_pairs(min_lun_id, max_lun_id) pairs = {pair['source_volume']['name']: pair for pair in pairs} expected_luns = [] for lun in luns: pair = pairs.get(lun.ds_id) if pair: if ignored_state and pair['state'] == ignored_state: continue elif pair['state'] != state: raise exception.VolumeDriverException( message=(_("Source volume %(id)s has wrong pprc pair " "state %(invalid_state)s, expected one is " "%(valid_state)s") % {'id': pair['source_volume']['name'], 'invalid_state': pair['state'], 'valid_state': state})) else: raise exception.VolumeDriverException( message=_("There is no PPRC pair for source volume " "%s.") % lun.ds_id) expected_luns.append(lun) return expected_luns @proxy.logger def enable_replication(self, luns): # after group is failed over, user can not enable replication. if not luns: return None luns = self._get_expected_luns(luns, 'suspended', 'full_duplex') pprc_pair_ids = self._mm_manager.get_pprc_pair_ids(luns) LOG.debug("enable_replication: pprc_pair_ids is %s", pprc_pair_ids) if pprc_pair_ids: self._source_helper.resume_pprc_pairs(pprc_pair_ids) @proxy.logger def disable_replication(self, luns): # after group is failed over, user can not disable replication. if not luns: return None luns = self._get_expected_luns(luns, 'full_duplex', 'suspended') pprc_pair_ids = self._mm_manager.get_pprc_pair_ids(luns) LOG.debug("disable_replication: pprc_pair_ids is %s", pprc_pair_ids) if pprc_pair_ids: self._source_helper.pause_pprc_pairs(pprc_pair_ids) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/ds8k_restclient.py0000664000175000017500000003040500000000000026043 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import importlib import json import urllib import eventlet import requests from requests import exceptions as req_exception from cinder import exception from cinder.i18n import _ TOKEN_ERROR_CODES = ('BE7A001B', 'BE7A001A') # remove BE7A0032 after REST fixed the problem of throwing message # which shows all LSS are full but actually only one LSS is full. LSS_ERROR_CODES = ('BE7A0031', 'BE7A0032') AUTHENTICATION_ERROR_CODES = ( 'BE7A001B', 'BE7A001A', 'BE7A0027', 'BE7A0028', 'BE7A0029', 'BE7A002A', 'BE7A002B', 'BE7A002C', 'BE7A002D' ) class APIException(exception.VolumeBackendAPIException): """Exception raised for errors in the REST APIs.""" """ Attributes: message -- explanation of the error """ pass class APIAuthenticationException(APIException): """Exception raised for errors in the Authentication.""" """ Attributes: message -- explanation of the error """ pass class LssFullException(APIException): """Exception raised for errors when LSS is full.""" """ Attributes: message -- explanation of the error """ pass class LssIDExhaustError(exception.VolumeBackendAPIException): """Exception raised for errors when can not find available LSS.""" """ Attributes: message -- explanation of the error """ pass class TimeoutException(APIException): """Exception raised when the request is time out.""" """ Attributes: message -- explanation of the error """ pass class AbstractRESTConnector(object, metaclass=abc.ABCMeta): """Inherit this class when you define your own connector.""" @abc.abstractmethod def close(self): """close the connector. If the connector uses persistent connection, please provide a way to close it in this method, otherwise you can just leave this method empty. Input: None Output: None Exception: can raise any exceptions """ pass @abc.abstractmethod def send(self, method='', url='', headers=None, payload='', timeout=900): """send the request. Input: see above Output: if we reached the server and read an HTTP response: .. code:: text (INTEGER__HTTP_RESPONSE_STATUS_CODE, STRING__BODY_OF_RESPONSE_EVEN_IF_STATUS_NOT_200) if we were not able to reach the server or response was invalid HTTP(like certificate error, or could not resolve domain etc): .. code:: text (False, STRING__SHORT_EXPLANATION_OF_REASON_FOR_NOT_ REACHING_SERVER_OR_GETTING_INVALID_RESPONSE) Exception: should not raise any exceptions itself as all the expected scenarios are covered above. Unexpected exceptions are permitted. """ pass class DefaultRESTConnector(AbstractRESTConnector): """User can write their own connector and pass it to RESTScheduler.""" def __init__(self, verify): # overwrite certificate validation method only when using # default connector, and not globally import the new scheme. if isinstance(verify, str): importlib.import_module("cinder.volume.drivers.ibm.ibm_storage." "ds8k_connection") self.session = None self.verify = verify def connect(self): if self.session is None: self.session = requests.Session() if isinstance(self.verify, str): self.session.mount('httpsds8k://', requests.adapters.HTTPAdapter()) else: self.session.mount('https://', requests.adapters.HTTPAdapter()) self.session.verify = self.verify def close(self): self.session.close() self.session = None def send(self, method='', url='', headers=None, payload='', timeout=900): self.connect() try: if isinstance(self.verify, str): url = url.replace('https://', 'httpsds8k://') resp = self.session.request(method, url, headers=headers, data=payload, timeout=timeout) return resp.status_code, resp.text except req_exception.ConnectTimeout as e: self.close() return 408, "Connection time out: %s" % str(e) except req_exception.SSLError as e: self.close() return False, "SSL error: %s" % str(e) except Exception as e: self.close() return False, "Unexcepted exception: %s" % str(e) class RESTScheduler(object): """This class is multithread friendly. it isn't optimally (token handling) but good enough for low-mid traffic. """ def __init__(self, host, user, passw, connector_obj, verify=False): if not host: raise APIException('The host parameter must not be empty.') # the api incorrectly transforms an empty password to a missing # password paramter, so we have to catch it here if not user or not passw: raise APIAuthenticationException( _('The username and the password parameters must ' 'not be empty.')) self.token = '' self.host = host self.port = '8452' self.user = user if isinstance(user, str) else user.decode() self.passw = passw if isinstance(passw, str) else passw.decode() self.connector = connector_obj or DefaultRESTConnector(verify) self.connect() def connect(self): # one retry when connecting, 60s should be enough to get the token, # usually it is within 30s. try: response = self.send( 'POST', '/tokens', {'username': self.user, 'password': self.passw}, timeout=60) except Exception: eventlet.sleep(2) response = self.send( 'POST', '/tokens', {'username': self.user, 'password': self.passw}, timeout=60) self.token = response['token']['token'] def close(self): self.connector.close() # usually NI responses within 15min. def send(self, method, endpoint, data=None, badStatusException=True, params=None, fields=None, timeout=900): # verify the method if method not in ('GET', 'POST', 'PUT', 'DELETE'): msg = _("Invalid HTTP method: %s") % method raise APIException(msg) # prepare the url url = "https://%s:%s/api/v1%s" % (self.host, self.port, endpoint) if fields: params = params or {} params['data_fields'] = ','.join(fields) if params: url += (('&' if '?' in url else '?') + urllib.parse.urlencode(params)) # prepare the data data = json.dumps({'request': {'params': data}}) if data else None # make a REST request to DS8K and get one retry if logged out for attempts in range(2): headers = {'Content-Type': 'application/json', 'X-Auth-Token': self.token} code, body = self.connector.send(method, url, headers, data, timeout) # parse the returned code if code == 200: try: response = json.loads(body) except ValueError: response = {'server': { 'status': 'failed', 'message': 'Unable to parse server response into json.' }} elif code == 408: response = {'server': {'status': 'timeout', 'message': body}} elif code is not False: try: response = json.loads(body) # make sure has useful message response['server']['message'] except Exception: response = {'server': { 'status': 'failed', 'message': 'HTTP %s: %s' % (code, body) }} else: response = {'server': {'status': 'failed', 'message': body}} # handle the response if (response['server'].get('code') in TOKEN_ERROR_CODES and attempts == 0): self.connect() elif response['server'].get('code') in AUTHENTICATION_ERROR_CODES: raise APIAuthenticationException( data=(_('Authentication failed for host %(host)s. ' 'Exception= %(e)s') % {'host': self.host, 'e': response['server']['message']})) elif response['server'].get('code') in LSS_ERROR_CODES: raise LssFullException( data=(_('Can not put the volume in LSS: %s') % response['server']['message'])) elif response['server']['status'] == 'timeout': raise TimeoutException( data=(_('Request to storage API time out: %s') % response['server']['message'])) elif (response['server']['status'] != 'ok' and (badStatusException or 'code' not in response['server'])): # if code is not in response means that error was in # transport so we raise exception even if asked not to # via badStatusException=False, but will retry it to # confirm the problem. if attempts == 1: raise APIException( data=(_("Request to storage API failed: %(err)s, " "(%(url)s).") % {'err': response['server']['message'], 'url': url})) eventlet.sleep(2) else: return response # same as the send method above but returns first item from # response data, must receive only one item. def fetchall(self, *args, **kwargs): r = self.send(*args, **kwargs)['data'] if len(r) != 1: raise APIException( data=(_('Expected one result but got %d.') % len(r))) else: return r.popitem()[1] # the api for some reason returns a list when you request details # of a specific item. def fetchone(self, *args, **kwargs): r = self.fetchall(*args, **kwargs) if len(r) != 1: raise APIException( data=(_('Expected one item in result but got %d.') % len(r))) return r[0] # same as the send method above but returns the last element of the # link property in the response. def fetchid(self, *args, **kwargs): r = self.send(*args, **kwargs) if 'responses' in r: if len(r['responses']) != 1: raise APIException( data=(_('Expected one item in result responses but ' 'got %d.') % len(r['responses']))) r = r['responses'][0] return r['link']['href'].split('/')[-1] # the api unfortunately has no way to differentiate between api error # and error in DS8K resources. this method returns True if "ok", False # if "failed", exception otherwise. def statusok(self, *args, **kwargs): return self.send(*args, badStatusException=False, **kwargs)['server']['status'] == 'ok' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/ibm_storage.py0000664000175000017500000002356000000000000025235 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ IBM Storage driver is a unified Volume driver for IBM XIV, Spectrum Accelerate, FlashSystem A9000, FlashSystem A9000R and DS8000 storage systems. """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder import interface from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.zonemanager import utils as fczm_utils driver_opts = [ cfg.StrOpt( 'proxy', default='cinder.volume.drivers.ibm.ibm_storage.proxy.IBMStorageProxy', help='Proxy driver that connects to the IBM Storage Array'), cfg.StrOpt( 'connection_type', default='iscsi', choices=['fibre_channel', 'iscsi'], help='Connection type to the IBM Storage Array'), cfg.StrOpt( 'chap', default='disabled', choices=['disabled', 'enabled'], help='CHAP authentication mode, effective only for iscsi' ' (disabled|enabled)'), cfg.StrOpt( 'management_ips', default='', help='List of Management IP addresses (separated by commas)'), ] CONF = cfg.CONF CONF.register_opts(driver_opts, group=configuration.SHARED_CONF_GROUP) LOG = logging.getLogger(__name__) @interface.volumedriver class IBMStorageDriver(san.SanDriver, driver.ManageableVD, driver.MigrateVD, driver.CloneableImageVD): """IBM Storage driver IBM Storage driver is a unified Volume driver for IBM XIV, Spectrum Accelerate, FlashSystem A9000, FlashSystem A9000R and DS8000 storage systems. Version history: .. code-block:: none 2.0 - First open source driver version 2.1.0 - Support Consistency groups through Generic volume groups - Support XIV/A9000 Volume independent QoS - Support Consistency groups replication 2.3.0 - Support Report backend state """ VERSION = "2.3.0" # ThirdPartySystems wiki page CI_WIKI_NAME = "IBM_STORAGE_CI" def __init__(self, *args, **kwargs): """Initialize the driver.""" super(IBMStorageDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(driver_opts) proxy = importutils.import_class(self.configuration.proxy) active_backend_id = kwargs.get('active_backend_id', None) # Driver additional flags should be specified in the cinder.conf # preferably in each backend configuration. self.proxy = proxy( { "user": self.configuration.san_login, "password": self.configuration.san_password, "address": self.configuration.san_ip, "vol_pool": self.configuration.san_clustername, "connection_type": self.configuration.connection_type, "chap": self.configuration.chap, "management_ips": self.configuration.management_ips }, LOG, exception, driver=self, active_backend_id=active_backend_id, host=self.host) @staticmethod def get_driver_options(): return driver_opts def do_setup(self, context): """Setup and verify connection to IBM Storage.""" self.proxy.setup(context) def ensure_export(self, context, volume): """Ensure an export.""" return self.proxy.ensure_export(context, volume) def create_export(self, context, volume, connector): """Create an export.""" return self.proxy.create_export(context, volume) def create_volume(self, volume): """Create a volume on the IBM Storage system.""" return self.proxy.create_volume(volume) def delete_volume(self, volume): """Delete a volume on the IBM Storage system.""" self.proxy.delete_volume(volume) def remove_export(self, context, volume): """Disconnect a volume from an attached instance.""" return self.proxy.remove_export(context, volume) def initialize_connection(self, volume, connector): """Map the created volume.""" conn_info = self.proxy.initialize_connection(volume, connector) fczm_utils.add_fc_zone(conn_info) return conn_info def terminate_connection(self, volume, connector, **kwargs): """Terminate a connection to a volume.""" conn_info = self.proxy.terminate_connection(volume, connector) fczm_utils.remove_fc_zone(conn_info) return conn_info def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" return self.proxy.create_volume_from_snapshot( volume, snapshot) def create_snapshot(self, snapshot): """Create a snapshot.""" return self.proxy.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Delete a snapshot.""" return self.proxy.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): """Get volume stats.""" return self.proxy.get_volume_stats(refresh) def create_cloned_volume(self, tgt_volume, src_volume): """Create Cloned Volume.""" return self.proxy.create_cloned_volume(tgt_volume, src_volume) def extend_volume(self, volume, new_size): """Extend Created Volume.""" self.proxy.extend_volume(volume, new_size) def migrate_volume(self, context, volume, host): """Migrate the volume to the specified host.""" return self.proxy.migrate_volume(context, volume, host) def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object to Cinder management.""" return self.proxy.manage_volume(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing.""" return self.proxy.manage_volume_get_size(volume, existing_ref) def unmanage(self, volume): """Removes the specified volume from Cinder management.""" return self.proxy.unmanage_volume(volume) def freeze_backend(self, context): """Notify the backend that it's frozen. """ return self.proxy.freeze_backend(context) def thaw_backend(self, context): """Notify the backend that it's unfrozen/thawed. """ return self.proxy.thaw_backend(context) def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover a backend to a secondary replication target. """ return self.proxy.failover_host( context, volumes, secondary_id, groups) def get_replication_status(self, context, volume): """Return replication status.""" return self.proxy.get_replication_status(context, volume) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" return self.proxy.retype(ctxt, volume, new_type, diff, host) def revert_to_snapshot(self, ctxt, volume, snapshot): """Revert volume to snapshot.""" return self.proxy.revert_to_snapshot(ctxt, volume, snapshot) def create_group(self, context, group): """Creates a group.""" return self.proxy.create_group(context, group) def delete_group(self, context, group, volumes): """Deletes a group.""" return self.proxy.delete_group(context, group, volumes) def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group snapshot.""" return self.proxy.create_group_snapshot( context, group_snapshot, snapshots) def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group snapshot.""" return self.proxy.delete_group_snapshot( context, group_snapshot, snapshots) def update_group(self, context, group, add_volumes, remove_volumes): """Adds or removes volume(s) to/from an existing group.""" return self.proxy.update_group( context, group, add_volumes, remove_volumes) def create_group_from_src( self, context, group, volumes, group_snapshot, snapshots, source_cg=None, source_vols=None): """Creates a group from source.""" return self.proxy.create_group_from_src( context, group, volumes, group_snapshot, snapshots, source_cg, source_vols) def enable_replication(self, context, group, volumes): """Enable replication.""" return self.proxy.enable_replication(context, group, volumes) def disable_replication(self, context, group, volumes): """Disable replication.""" return self.proxy.disable_replication(context, group, volumes) def failover_replication(self, context, group, volumes, secondary_backend_id): """Failover replication.""" return self.proxy.failover_replication(context, group, volumes, secondary_backend_id) def get_replication_error_status(self, context, groups): """Returns error info for replicated groups and its volumes.""" return self.proxy.get_replication_error_status(context, groups) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/proxy.py0000664000175000017500000003116500000000000024123 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import functools import gettext import inspect from oslo_log import log as logging from oslo_utils import timeutils from cinder.i18n import _ from cinder import version import cinder.volume.drivers.ibm.ibm_storage as storage from cinder.volume.drivers.ibm.ibm_storage import strings LOG = logging.getLogger(__name__) gettext.install('cinder') def get_total_seconds(td): return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6 def logger(func): @functools.wraps(func) def wrapper(*args, **kwargs): frm = inspect.stack()[1] log = getattr(inspect.getmodule(frm[0]), 'LOG') log.debug("Enter %s()", func.__name__) log.debug("Args: %(args)s %(kwargs)s", {'args': args, 'kwargs': kwargs}) result = func(*args, **kwargs) log.debug("Exit %s()", func.__name__) log.debug("Return: %s", result) return result return wrapper def _trace_time(fnc): @functools.wraps(fnc) def wrapper(self, *args, **kwargs): method = fnc.__name__ start = timeutils.utcnow() LOG.debug("Entered '%(method)s' at %(when)s.", {'method': method, 'when': start}) result = fnc(self, *args, **kwargs) current = timeutils.utcnow() delta = current - start LOG.debug( "Exited '%(method)s' at %(when)s, after %(seconds)f seconds.", {'method': method, 'when': start, 'seconds': get_total_seconds(delta)}) return result return wrapper class IBMStorageProxy(object): """Base class for connecting to storage. Abstract Proxy between the XIV/DS8K Cinder Volume and Spectrum Accelerate Storage (e.g. XIV, Spectruam Accelerate, A9000, A9000R) """ prefix = storage.XIV_LOG_PREFIX def __init__(self, storage_info, logger, exception, driver=None, active_backend_id=None): """Initialize Proxy.""" self.storage_info = storage_info self.meta = dict() self.logger = logger self.meta['exception'] = exception self.meta['openstack_version'] = "cinder-%s" % version.version_string() self.meta['stat'] = None self.driver = driver if driver is not None: self.full_version = "%(title)s (v%(version)s)" % { 'title': strings.TITLE, 'version': driver.VERSION} else: self.full_version = strings.TITLE self.active_backend_id = active_backend_id self.targets = {} self._read_replication_devices() self.meta['bypass_connection_check'] = ( self._get_safely_from_configuration( storage.FLAG_KEYS['bypass_connection_check'], False)) @_trace_time def setup(self, context): """Driver setup.""" pass @_trace_time def create_volume(self, volume): """Creates a volume.""" pass @_trace_time def ensure_export(self, context, volume): ctxt = context.as_dict() if hasattr(context, 'as_dict') else "Empty" LOG.debug("ensure_export: %(volume)s context : %(ctxt)s", {'volume': volume['name'], 'ctxt': ctxt}) return 1 @_trace_time def create_export(self, context, volume): ctxt = context.as_dict() if hasattr(context, 'as_dict') else "Empty" LOG.debug("create_export: %(volume)s context : %(ctxt)s", {'volume': volume['name'], 'ctxt': ctxt}) return {} @_trace_time def delete_volume(self, volume): """Deletes a volume on the IBM Storage machine.""" pass @_trace_time def remove_export(self, context, volume): """Remove export. Disconnect a volume from an attached instance """ ctxt = context.as_dict() if hasattr(context, 'as_dict') else "Empty" LOG.debug("remove_export: %(volume)s context : %(ctxt)s", {'volume': volume['name'], 'ctxt': ctxt}) @_trace_time def initialize_connection(self, volume, connector): """Initialize connection. Maps the created volume to the cinder volume node, and returns the iSCSI/FC targets to be used in the instance """ pass @_trace_time def terminate_connection(self, volume, connector): """Terminate connection.""" pass @_trace_time def create_volume_from_snapshot(self, volume, snapshot): """create volume from snapshot.""" pass @_trace_time def create_snapshot(self, snapshot): """create snapshot""" pass @_trace_time def delete_snapshot(self, snapshot): """delete snapshot.""" pass @_trace_time def get_volume_stats(self, refresh=False): """get volume stats.""" if self.meta['stat'] is None or refresh: self._update_stats() return self.meta['stat'] @_trace_time def _update_stats(self): """fetch and update stats.""" pass @_trace_time def check_for_export(self, context, volume_id): pass @_trace_time def copy_volume_to_image(self, context, volume, image_service, image_id): """Copy volume to image. Handled by ISCSiDriver """ LOG.info("The copy_volume_to_image feature is not implemented.") raise NotImplementedError() @_trace_time def create_cloned_volume(self, volume, src_vref): """Create cloned volume.""" pass @_trace_time def volume_exists(self, volume): """Checks if a volume exists on xiv.""" pass @_trace_time def validate_connection(self): """Validates ibm_storage connection info.""" pass @_trace_time def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" pass @_trace_time def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot.""" pass @_trace_time def _get_bunch_from_host( self, connector, host_id=0, host_name=None, chap=None): """Get's a Bunch describing a host""" if not host_name: LOG.debug("Connector %(conn)s", {'conn': connector}) current_host_name = host_name or storage.get_host_or_create_from_iqn( connector) initiator = connector.get('initiator', None) wwpns = connector.get("wwpns", []) if len(wwpns) == 0 and "wwnns" in connector: wwpns = connector.get("wwns", []) return {'name': current_host_name, 'initiator': initiator, 'id': host_id, 'wwpns': wwpns, 'chap': chap} def _log(self, level, message, **kwargs): """Wrapper around the logger""" to_log = _(self.prefix + message) # NOQA if len(kwargs) > 0: to_log = to_log % kwargs getattr(self.logger, level)(to_log) def _get_exception(self): """Get's Cinder exception""" return self.meta['exception'].CinderException def _get_code_and_status_or_message(self, exception): """Returns status message returns a string made out of code and status if present, else message """ if (getattr(exception, "code", None) is not None and getattr(exception, "status", None) is not None): return "Status: '%s', Code: %s" % ( exception.status, exception.code) return str(exception) def _get_driver_super(self): """Gets the IBM Storage Drivers super class returns driver super class """ return super(self.driver.__class__, self.driver) def _get_connection_type(self): """Get Connection Type(iscsi|fibre_channel) :returns: iscsi|fibre_channel """ return self._get_safely_from_configuration( storage.CONF_KEYS['connection_type'], default=storage.XIV_CONNECTION_TYPE_ISCSI) def _is_iscsi(self): """Checks if connection type is iscsi""" connection_type = self._get_connection_type() return connection_type == storage.XIV_CONNECTION_TYPE_ISCSI def _get_management_ips(self): """Gets the management IP addresses from conf""" return self._get_safely_from_configuration( storage.CONF_KEYS['management_ips'], default='') def _get_chap_type(self): """Get CHAP Type(disabled|enabled) :returns: disabled|enabled """ LOG.debug("_get_chap_type chap: %(chap)s", {'chap': storage.CONF_KEYS['chap']}) return self._get_safely_from_configuration( storage.CONF_KEYS['chap'], default=storage.CHAP_NONE) def _get_safely_from_configuration(self, key, default=None): """Get value of key from configuration Get's a key from the backend configuration if available. If not available returns default value """ if not self.driver: LOG.debug("self.driver is missing") return default config_value = self.driver.configuration.safe_get(key) if not config_value: LOG.debug("missing key %(key)s ", {'key': key}) return default return config_value # Backend_id values: # - The primary backend_id is marked 'default' # - The secondary backend_ids are the values of the targets. # - In most cases the given value is one of the above, but in some cases # it can be None. For example in failover_host, the value None means # that the function should select a target by itself (consider multiple # targets) def _get_primary_backend_id(self): return strings.PRIMARY_BACKEND_ID def _get_secondary_backend_id(self): return self._get_target() def _get_active_backend_id(self): if self.active_backend_id == strings.PRIMARY_BACKEND_ID: return self._get_primary_backend_id() else: return self._get_secondary_backend_id() def _get_inactive_backend_id(self): if self.active_backend_id != strings.PRIMARY_BACKEND_ID: return self._get_primary_backend_id() else: return self._get_secondary_backend_id() def _get_target_params(self, target): if not self.targets: LOG.debug("No targets available") return None try: params = self.targets[target] return params except Exception: LOG.debug("No target called '%(target)s'", {'target': target}) return None def _get_target(self): """returns an arbitrary target if available""" if not self.targets: return None try: target = list(self.targets.keys())[0] return target except Exception: return None @_trace_time def _read_replication_devices(self): """Read replication devices from configuration Several replication devices are permitted. If an entry already exists an error is assumed. The format is: replication_device = backend_id:vendor-id-1,unique_key:val.... """ if not self.driver: return replication_devices = self._get_safely_from_configuration( 'replication_device', default={}) if not replication_devices: LOG.debug('No replication devices were found') for dev in replication_devices: LOG.debug('Replication device found: %(dev)s', {'dev': dev}) backend_id = dev.get('backend_id', None) if backend_id is None: LOG.error("Replication is missing backend_id: %(dev)s", {'dev': dev}) elif self.targets.get(backend_id, None): LOG.error("Multiple entries for replication %(dev)s", {'dev': dev}) else: self.targets[backend_id] = {} device = self.targets[backend_id] for k, v in dev.items(): if k != 'backend_id': device[k] = v ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/strings.py0000664000175000017500000000274300000000000024433 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # General TITLE = "IBM Storage" DEFAULT = "Default" # PROMPTS CERTIFICATES_PATH = "/opt/ibm/ds8k_certs/" # DEFAULT INSTALLED VALUES XIV_BACKEND_PREFIX = "IBM-XIV" DS8K_BACKEND_PREFIX = "IBM-DS8K" # Replication Status Strings REPLICATION_STATUS_DISABLED = 'disabled' # no replication REPLICATION_STATUS_ERROR = 'error' # replication in error state # replication copying data to secondary (inconsistent) REPLICATION_STATUS_COPYING = 'copying' # replication copying data to secondary (consistent) REPLICATION_STATUS_ACTIVE = 'active' # replication copying data to secondary (consistent) REPLICATION_STATUS_ACTIVE_STOPPED = 'active-stopped' # replication copying data to secondary (consistent) REPLICATION_STATUS_INACTIVE = 'inactive' # Replication Failback String PRIMARY_BACKEND_ID = 'default' # Volume Extra Metadata Default Value METADATA_IS_TRUE = ' TRUE' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py0000664000175000017500000035030500000000000025011 0ustar00zuulzuul00000000000000# Copyright (c) 2016 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime import re import socket from oslo_log import log as logging from oslo_utils import importutils pyxcli = importutils.try_import("pyxcli") if pyxcli: from pyxcli import client from pyxcli import errors from pyxcli.events import events from pyxcli.mirroring import mirrored_entities from pyxcli import transports from cinder import context from cinder.i18n import _ from cinder.objects import fields from cinder import volume as c_volume import cinder.volume.drivers.ibm.ibm_storage as storage from cinder.volume.drivers.ibm.ibm_storage import certificate from cinder.volume.drivers.ibm.ibm_storage import cryptish from cinder.volume.drivers.ibm.ibm_storage import proxy from cinder.volume.drivers.ibm.ibm_storage import strings from cinder.volume.drivers.ibm.ibm_storage import xiv_replication as repl from cinder.volume import group_types from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils OPENSTACK_PRODUCT_NAME = "OpenStack" PERF_CLASS_NAME_PREFIX = "cinder-qos" HOST_BAD_NAME = "HOST_BAD_NAME" VOLUME_IS_MAPPED = "VOLUME_IS_MAPPED" CONNECTIONS_PER_MODULE = 2 MIN_LUNID = 1 MAX_LUNID = 511 SYNC = 'sync' ASYNC = 'async' SYNC_TIMEOUT = 300 SYNCHED_STATES = ['synchronized', 'rpo ok'] PYXCLI_VERSION = '1.1.6' LOG = logging.getLogger(__name__) # performance class strings - used in exceptions PERF_CLASS_ERROR = _("Unable to create or get performance class: %(details)s") PERF_CLASS_ADD_ERROR = _("Unable to add volume to performance class: " "%(details)s") PERF_CLASS_VALUES_ERROR = _("A performance class with the same name but " "different values exists: %(details)s") # setup strings - used in exceptions SETUP_BASE_ERROR = _("Unable to connect to %(title)s: %(details)s") SETUP_INVALID_ADDRESS = _("Unable to connect to the storage system " "at '%(address)s', invalid address.") # create volume strings - used in exceptions CREATE_VOLUME_BASE_ERROR = _("Unable to create volume: %(details)s") # initialize connection strings - used in exceptions CONNECTIVITY_FC_NO_TARGETS = _("Unable to detect FC connection between the " "compute host and the storage, please ensure " "that zoning is set up correctly.") # terminate connection strings - used in logging TERMINATE_CONNECTION_BASE_ERROR = ("Unable to terminate the connection " "for volume '%(volume)s': %(error)s.") TERMINATE_CONNECTION_HOST_ERROR = ("Terminate connection for volume " "'%(volume)s': for volume '%(volume)s': " "%(host)s %(error)s.") # delete volume strings - used in logging DELETE_VOLUME_BASE_ERROR = ("Unable to delete volume '%(volume)s': " "%(error)s.") # manage volume strings - used in exceptions MANAGE_VOLUME_BASE_ERROR = _("Unable to manage the volume '%(volume)s': " "%(error)s.") INCOMPATIBLE_PYXCLI = _('Incompatible pyxcli found. Mininum: %(required)s ' 'Found: %(found)s') class XIVProxy(proxy.IBMStorageProxy): """Proxy between the Cinder Volume and Spectrum Accelerate Storage. Supports IBM XIV, Spectrum Accelerate, A9000, A9000R Version: 2.3.0 Required pyxcli version: 1.1.6 .. code:: text 2.0 - First open source driver version 2.1.0 - Support Consistency groups through Generic volume groups - Support XIV/A9000 Volume independent QoS - Support groups replication 2.3.0 - Support Report backend state """ def __init__(self, storage_info, logger, exception, driver=None, active_backend_id=None, host=None): """Initialize Proxy.""" if not active_backend_id: active_backend_id = strings.PRIMARY_BACKEND_ID proxy.IBMStorageProxy.__init__( self, storage_info, logger, exception, driver, active_backend_id) LOG.info("__init__: storage_info: %(keys)s", {'keys': self.storage_info}) if active_backend_id: LOG.info("__init__: active_backend_id: %(id)s", {'id': active_backend_id}) self.ibm_storage_cli = None self.meta['ibm_storage_portal'] = None self.meta['ibm_storage_iqn'] = None self.ibm_storage_remote_cli = None self.meta['ibm_storage_fc_targets'] = [] self.meta['storage_version'] = None self.system_id = None @proxy._trace_time def setup(self, context): msg = '' if pyxcli: if pyxcli.version < PYXCLI_VERSION: msg = (INCOMPATIBLE_PYXCLI % {'required': PYXCLI_VERSION, 'found': pyxcli.version }) else: msg = (SETUP_BASE_ERROR % {'title': strings.TITLE, 'details': "IBM Python XCLI Client (pyxcli) not found" }) if msg != '': LOG.error(msg) raise self._get_exception()(msg) """Connect ssl client.""" LOG.info("Setting up connection to %(title)s...\n" "Active backend_id: '%(id)s'.", {'title': strings.TITLE, 'id': self.active_backend_id}) self.ibm_storage_cli = self._init_xcli(self.active_backend_id) if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI: self.meta['ibm_storage_iqn'] = ( self._call_xiv_xcli("config_get"). as_dict('name')['iscsi_name'].value) portals = storage.get_online_iscsi_ports(self.ibm_storage_cli) if len(portals) == 0: msg = (SETUP_BASE_ERROR, {'title': strings.TITLE, 'details': "No iSCSI portals available on the Storage." }) raise self._get_exception()( _("%(prefix)s %(portals)s") % {'prefix': storage.XIV_LOG_PREFIX, 'portals': msg}) self.meta['ibm_storage_portal'] = "%s:3260" % portals[:1][0] remote_id = self._get_secondary_backend_id() if remote_id: self.ibm_storage_remote_cli = self._init_xcli(remote_id) self._event_service_start() self._get_pool() LOG.info("IBM Storage %(common_ver)s " "xiv_proxy %(proxy_ver)s. ", {'common_ver': self.full_version, 'proxy_ver': self.full_version}) self._update_system_id() if remote_id: self._update_active_schedule_objects() self._update_remote_schedule_objects() LOG.info("Connection to the IBM storage " "system established successfully.") @proxy._trace_time def _update_active_schedule_objects(self): """Set schedule objects on active backend. The value 00:20:00 is covered in XIV by a pre-defined object named min_interval. """ schedules = self._call_xiv_xcli("schedule_list").as_dict('name') for rate in repl.Replication.async_rates: if rate.schedule == '00:00:20': continue name = rate.schedule_name schedule = schedules.get(name, None) if schedule: LOG.debug('Exists on local backend %(sch)s', {'sch': name}) interval = schedule.get('interval', '') if interval != rate.schedule: msg = (_("Schedule %(sch)s exists with incorrect " "value %(int)s") % {'sch': name, 'int': interval}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) else: LOG.debug('create %(sch)s', {'sch': name}) try: self._call_xiv_xcli("schedule_create", schedule=name, type='interval', interval=rate.schedule) except errors.XCLIError: msg = (_("Setting up Async mirroring failed, " "schedule %(sch)s is not supported on system: " " %(id)s.") % {'sch': name, 'id': self.system_id}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) @proxy._trace_time def _update_remote_schedule_objects(self): """Set schedule objects on remote backend. The value 00:20:00 is covered in XIV by a pre-defined object named min_interval. """ schedules = self._call_remote_xiv_xcli("schedule_list").as_dict('name') for rate in repl.Replication.async_rates: if rate.schedule == '00:00:20': continue name = rate.schedule_name if schedules.get(name, None): LOG.debug('Exists on remote backend %(sch)s', {'sch': name}) interval = schedules.get(name, None)['interval'] if interval != rate.schedule: msg = (_("Schedule %(sch)s exists with incorrect " "value %(int)s") % {'sch': name, 'int': interval}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) else: try: self._call_remote_xiv_xcli("schedule_create", schedule=name, type='interval', interval=rate.schedule) except errors.XCLIError: msg = (_("Setting up Async mirroring failed, " "schedule %(sch)s is not supported on system: " " %(id)s.") % {'sch': name, 'id': self.system_id}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) def _get_extra_specs(self, type_id): """get extra specs to match the type_id type_id can derive from volume or from consistency_group """ if type_id is None: return {} return c_volume.volume_types.get_volume_type_extra_specs(type_id) def _update_system_id(self): if self.system_id: return local_ibm_storage_cli = self._init_xcli(strings.PRIMARY_BACKEND_ID) if not local_ibm_storage_cli: LOG.error('Failed to connect to main backend. ' 'Cannot retrieve main backend system_id') return system_id = local_ibm_storage_cli.cmd.config_get().as_dict( 'name')['system_id'].value LOG.debug('system_id: %(id)s', {'id': system_id}) self.system_id = system_id @proxy._trace_time def _get_qos_specs(self, type_id): """Gets the qos specs from cinder.""" ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) if not volume_type: return None qos_specs_id = volume_type.get('qos_specs_id', None) if qos_specs_id: return qos_specs.get_qos_specs( ctxt, qos_specs_id).get('specs', None) return None @proxy._trace_time def _qos_create_kwargs_for_xcli(self, specs): args = {} for key in specs: if key == 'bw': args['max_bw_rate'] = specs[key] if key == 'iops': args['max_io_rate'] = specs[key] return args def _qos_remove_vol(self, volume): try: self._call_xiv_xcli("perf_class_remove_vol", vol=volume['name']) except errors.VolumeNotConnectedToPerfClassError as e: details = self._get_code_and_status_or_message(e) LOG.debug(details) return True except errors.XCLIError as e: details = self._get_code_and_status_or_message(e) msg_data = (_("Unable to add volume to performance " "class: %(details)s") % {'details': details}) LOG.error(msg_data) raise self.meta['exception'].VolumeBackendAPIException( data=msg_data) return True def _qos_add_vol(self, volume, perf_class_name): try: self._call_xiv_xcli("perf_class_add_vol", vol=volume['name'], perf_class=perf_class_name) except errors.VolumeAlreadyInPerfClassError as e: details = self._get_code_and_status_or_message(e) LOG.debug(details) return True except errors.XCLIError as e: details = self._get_code_and_status_or_message(e) msg = PERF_CLASS_ADD_ERROR % {'details': details} LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) return True def _check_perf_class_on_backend(self, specs): """Checking if class exists on backend. if not - create it.""" perf_class_name = PERF_CLASS_NAME_PREFIX if specs is None or specs == {}: return '' for key, value in sorted(specs.items()): perf_class_name += '_' + key + '_' + value try: classes_list = self._call_xiv_xcli("perf_class_list", perf_class=perf_class_name ).as_list # list is not empty, check if class has the right values for perf_class in classes_list: if (not perf_class.get('max_iops', None) == specs.get('iops', '0') or not perf_class.get('max_bw', None) == specs.get('bw', '0')): raise self.meta['exception'].VolumeBackendAPIException( data=PERF_CLASS_VALUES_ERROR % {'details': perf_class_name}) except errors.XCLIError as e: details = self._get_code_and_status_or_message(e) msg = PERF_CLASS_ERROR % {'details': details} LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) # class does not exist, create it if not classes_list: self._create_qos_class(perf_class_name, specs) return perf_class_name def _get_type_from_perf_class_name(self, perf_class_name): _type = re.findall('type_(independent|shared)', perf_class_name) return _type[0] if _type else None def _create_qos_class(self, perf_class_name, specs): """Create the qos class on the backend.""" try: # check if we have a shared (default) perf class # or an independent perf class _type = self._get_type_from_perf_class_name(perf_class_name) if _type: self._call_xiv_xcli("perf_class_create", perf_class=perf_class_name, type=_type) else: self._call_xiv_xcli("perf_class_create", perf_class=perf_class_name) except errors.XCLIError as e: details = self._get_code_and_status_or_message(e) msg = PERF_CLASS_ERROR % {'details': details} LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) try: args = self._qos_create_kwargs_for_xcli(specs) self._call_xiv_xcli("perf_class_set_rate", perf_class=perf_class_name, **args) return perf_class_name except errors.XCLIError as e: details = self._get_code_and_status_or_message(e) # attempt to clean up self._call_xiv_xcli("perf_class_delete", perf_class=perf_class_name) msg = PERF_CLASS_ERROR % {'details': details} LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) def _qos_specs_from_volume(self, volume): """Returns qos_specs of volume. checks if there is a type on the volume if so, checks if it has been associated with a qos class returns the name of that class """ type_id = volume.get('volume_type_id', None) if not type_id: return None return self._get_qos_specs(type_id) def _get_replication_info(self, specs): info, msg = repl.Replication.extract_replication_info_from_specs(specs) if not info: LOG.error(msg) raise self._get_exception()(message=msg) return info @proxy._trace_time def _create_volume(self, volume): """Internal implementation to create a volume.""" size = storage.gigabytes_to_blocks(float(volume['size'])) pool = self._get_backend_pool() try: self._call_xiv_xcli( "vol_create", vol=volume['name'], size_blocks=size, pool=pool) except errors.SystemOutOfSpaceError: msg = _("Unable to create volume: System is out of space.") LOG.error(msg) raise self._get_exception()(msg) except errors.PoolOutOfSpaceError: msg = (_("Unable to create volume: pool '%(pool)s' is " "out of space.") % {'pool': pool}) LOG.error(msg) raise self._get_exception()(msg) except errors.XCLIError as e: details = self._get_code_and_status_or_message(e) msg = (CREATE_VOLUME_BASE_ERROR, {'details': details}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) @proxy._trace_time def create_volume(self, volume): """Creates a volume.""" # read replication information specs = self._get_extra_specs(volume.get('volume_type_id', None)) replication_info = self._get_replication_info(specs) self._create_volume(volume) return self.handle_created_vol_properties(replication_info, volume) def handle_created_vol_properties(self, replication_info, volume): volume_update = {} LOG.debug('checking replication_info %(rep)s', {'rep': replication_info}) volume_update['replication_status'] = 'disabled' cg = volume.group and volume_utils.is_group_a_cg_snapshot_type( volume.group) if replication_info['enabled']: try: repl.VolumeReplication(self).create_replication( volume.name, replication_info) except Exception as e: details = self._get_code_and_status_or_message(e) msg = ('Failed create_replication for ' 'volume %(vol)s: %(err)s', {'vol': volume['name'], 'err': details}) LOG.error(msg) if cg: cg_name = self._cg_name_from_volume(volume) self._silent_delete_volume_from_cg(volume, cg_name) self._silent_delete_volume(volume=volume) raise volume_update['replication_status'] = 'enabled' if cg: if volume.group.is_replicated: # for replicated Consistency Group: # The Volume must be mirrored, and its mirroring settings must # be identical to those of the Consistency Group: # mirroring type (e.g., synchronous), # mirroring status, mirroring target(backend) group_specs = group_types.get_group_type_specs( volume.group.group_type_id) group_rep_info = self._get_replication_info(group_specs) msg = None if volume_update['replication_status'] != 'enabled': msg = ('Cannot add non-replicated volume into' ' replicated group') elif replication_info['mode'] != group_rep_info['mode']: msg = ('Volume replication type and Group replication type' ' should be the same') elif volume.host != volume.group.host: msg = 'Cannot add volume to Group on different host' elif volume.group['replication_status'] == 'enabled': # if group is mirrored and enabled, compare state. group_name = self._cg_name_from_group(volume.group) me = mirrored_entities.MirroredEntities( self.ibm_storage_cli) me_objs = me.get_mirror_resources_by_name_map() vol_obj = me_objs['volumes'][volume.name] vol_sync_state = vol_obj['sync_state'] cg_sync_state = me_objs['cgs'][group_name]['sync_state'] if (vol_sync_state != 'Synchronized' or cg_sync_state != 'Synchronized'): msg = ('Cannot add volume to Group. Both volume and ' 'group should have sync_state = Synchronized') if msg: LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) try: cg_name = self._cg_name_from_volume(volume) self._call_xiv_xcli( "cg_add_vol", vol=volume['name'], cg=cg_name) except errors.XCLIError as e: details = self._get_code_and_status_or_message(e) self._silent_delete_volume(volume=volume) msg = (CREATE_VOLUME_BASE_ERROR, {'details': details}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) perf_class_name = None specs = self._qos_specs_from_volume(volume) if specs: try: perf_class_name = self._check_perf_class_on_backend(specs) if perf_class_name: self._call_xiv_xcli("perf_class_add_vol", vol=volume['name'], perf_class=perf_class_name) except errors.XCLIError as e: details = self._get_code_and_status_or_message(e) if cg: cg_name = self._cg_name_from_volume(volume) self._silent_delete_volume_from_cg(volume, cg_name) self._silent_delete_volume(volume=volume) msg = PERF_CLASS_ADD_ERROR % {'details': details} LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) return volume_update @proxy._trace_time def enable_replication(self, context, group, volumes): """Enable cg replication""" # fetch replication info group_specs = group_types.get_group_type_specs(group.group_type_id) if not group_specs: msg = 'No group specs inside group type' LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) # Add this field to adjust it to generic replication (for volumes) replication_info = self._get_replication_info(group_specs) if volume_utils.is_group_a_cg_snapshot_type(group): # take every vol out of cg - we can't mirror the cg otherwise. if volumes: self._update_consistencygroup(context, group, remove_volumes=volumes) for volume in volumes: enabled_status = fields.ReplicationStatus.ENABLED if volume['replication_status'] != enabled_status: repl.VolumeReplication(self).create_replication( volume.name, replication_info) # mirror entire group group_name = self._cg_name_from_group(group) try: self._create_consistencygroup_on_remote(context, group_name) except errors.CgNameExistsError: LOG.debug("CG name %(cg)s exists, no need to open it on " "secondary backend.", {'cg': group_name}) repl.GroupReplication(self).create_replication(group_name, replication_info) updated_volumes = [] if volumes: # add volumes back to cg self._update_consistencygroup(context, group, add_volumes=volumes) for volume in volumes: updated_volumes.append( {'id': volume['id'], 'replication_status': fields.ReplicationStatus.ENABLED}) return ({'replication_status': fields.ReplicationStatus.ENABLED}, updated_volumes) else: # For generic groups we replicate all the volumes updated_volumes = [] for volume in volumes: repl.VolumeReplication(self).create_replication( volume.name, replication_info) # update status for volume in volumes: updated_volumes.append( {'id': volume['id'], 'replication_status': fields.ReplicationStatus.ENABLED}) return ({'replication_status': fields.ReplicationStatus.ENABLED}, updated_volumes) @proxy._trace_time def disable_replication(self, context, group, volumes): """disables CG replication""" group_specs = group_types.get_group_type_specs(group.group_type_id) if not group_specs: msg = 'No group specs inside group type' LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) replication_info = self._get_replication_info(group_specs) updated_volumes = [] if volume_utils.is_group_a_cg_snapshot_type(group): # one call deletes replication for cgs and volumes together. group_name = self._cg_name_from_group(group) repl.GroupReplication(self).delete_replication(group_name, replication_info) for volume in volumes: # xiv locks volumes after deletion of replication. # we need to unlock it for further use. try: self.ibm_storage_cli.cmd.vol_unlock(vol=volume.name) self.ibm_storage_remote_cli.cmd.vol_unlock( vol=volume.name) self.ibm_storage_remote_cli.cmd.cg_remove_vol( vol=volume.name) except errors.VolumeBadNameError: LOG.debug("Failed to delete vol %(vol)s - " "ignoring.", {'vol': volume.name}) except errors.XCLIError as e: details = self._get_code_and_status_or_message(e) msg = ('Failed to unlock volumes %(details)s' % {'details': details}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) updated_volumes.append( {'id': volume.id, 'replication_status': fields.ReplicationStatus.DISABLED}) else: # For generic groups we replicate all the volumes updated_volumes = [] for volume in volumes: repl.VolumeReplication(self).delete_replication( volume.name, replication_info) # update status for volume in volumes: try: self.ibm_storage_cli.cmd.vol_unlock(vol=volume.name) self.ibm_storage_remote_cli.cmd.vol_unlock( vol=volume.name) except errors.XCLIError as e: details = self._get_code_and_status_or_message(e) msg = (_('Failed to unlock volumes %(details)s'), {'details': details}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) updated_volumes.append( {'id': volume['id'], 'replication_status': fields.ReplicationStatus.DISABLED}) return ({'replication_status': fields.ReplicationStatus.DISABLED}, updated_volumes) def get_secondary_backend_id(self, secondary_backend_id): if secondary_backend_id is None: secondary_backend_id = self._get_target() if secondary_backend_id is None: msg = _("No targets defined. Can't perform failover.") LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) return secondary_backend_id def check_for_splitbrain(self, volumes, pool_master, pool_slave): if volumes: # check for split brain situations # check for files that are available on both volumes # and are not in an active mirroring relation split_brain = self._potential_split_brain( self.ibm_storage_cli, self.ibm_storage_remote_cli, volumes, pool_master, pool_slave) if split_brain: # if such a situation exists stop and raise an exception! msg = (_("A potential split brain condition has been found " "with the following volumes: \n'%(volumes)s.'") % {'volumes': split_brain}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) def failover_replication(self, context, group, volumes, secondary_backend_id): """Failover a cg with all it's volumes. if secondery_id is default, cg needs to be failed back. """ volumes_updated = [] goal_status = '' pool_master = None group_updated = {'replication_status': group.replication_status} LOG.info("failover_replication: of cg %(cg)s " "from %(active)s to %(id)s", {'cg': group.get('name'), 'active': self.active_backend_id, 'id': secondary_backend_id}) if secondary_backend_id == strings.PRIMARY_BACKEND_ID: # default as active backend id if self._using_default_backend(): LOG.info("CG has been failed back. " "No need to fail back again.") return group_updated, volumes_updated # get the master pool, not using default id. pool_master = self._get_target_params( self.active_backend_id)['san_clustername'] pool_slave = self.storage_info[storage.FLAG_KEYS['storage_pool']] goal_status = 'enabled' vol_goal_status = 'available' else: if not self._using_default_backend(): LOG.info("cg already failed over.") return group_updated, volumes_updated # using same api as Cheesecake, we need # replciation_device entry. so we use get_targets. secondary_backend_id = self.get_secondary_backend_id( secondary_backend_id) pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']] pool_slave = self._get_target_params( secondary_backend_id)['san_clustername'] goal_status = fields.ReplicationStatus.FAILED_OVER vol_goal_status = fields.ReplicationStatus.FAILED_OVER # we should have secondary_backend_id by here. self.ibm_storage_remote_cli = self._init_xcli(secondary_backend_id) # check for split brain in mirrored volumes self.check_for_splitbrain(volumes, pool_master, pool_slave) group_specs = group_types.get_group_type_specs(group.group_type_id) if group_specs is None: msg = "No group specs found. Cannot failover." LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) failback = (secondary_backend_id == strings.PRIMARY_BACKEND_ID) result = False details = "" if volume_utils.is_group_a_cg_snapshot_type(group): result, details = repl.GroupReplication(self).failover(group, failback) else: replicated_vols = [] for volume in volumes: result, details = repl.VolumeReplication(self).failover( volume, failback) if not result: break replicated_vols.append(volume) # switch the replicated ones back in case of error if not result: for volume in replicated_vols: result, details = repl.VolumeReplication(self).failover( volume, not failback) if result: status = goal_status group_updated['replication_status'] = status else: status = 'error' updates = {'status': vol_goal_status} if status == 'error': group_updated['replication_extended_status'] = details # if replication on cg was successful, then all of the volumes # have been successfully replicated as well. for volume in volumes: volumes_updated.append({ 'id': volume.id, 'updates': updates }) # replace between active and secondary xcli self._replace_xcli_to_remote_xcli() self.active_backend_id = secondary_backend_id return group_updated, volumes_updated def _replace_xcli_to_remote_xcli(self): temp_ibm_storage_cli = self.ibm_storage_cli self.ibm_storage_cli = self.ibm_storage_remote_cli self.ibm_storage_remote_cli = temp_ibm_storage_cli def _get_replication_target_params(self): LOG.debug('_get_replication_target_params.') if not self.targets: msg = _("No targets available for replication") LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) no_of_targets = len(self.targets) if no_of_targets > 1: msg = _("Too many targets configured. Only one is supported") LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) LOG.debug('_get_replication_target_params selecting target...') target = self._get_target() if not target: msg = _("No targets available for replication.") LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) params = self._get_target_params(target) if not params: msg = (_("Missing target information for target '%(target)s'"), {'target': target}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) return target, params def _delete_volume(self, vol_name): """Deletes a volume on the Storage.""" LOG.debug("_delete_volume: %(volume)s", {'volume': vol_name}) try: self._call_xiv_xcli("vol_delete", vol=vol_name) except errors.VolumeBadNameError: # Don't throw error here, allow the cinder volume manager # to set the volume as deleted if it's not available # on the XIV box LOG.info("Volume '%(volume)s' not found on storage", {'volume': vol_name}) def _silent_delete_volume(self, volume): """Silently delete a volume. silently delete a volume in case of an immediate failure within a function that created it. """ try: self._delete_volume(vol_name=volume['name']) except errors.XCLIError as e: error = self._get_code_and_status_or_message(e) LOG.error(DELETE_VOLUME_BASE_ERROR, {'volume': volume['name'], 'error': error}) def _silent_delete_volume_from_cg(self, volume, cgname): """Silently delete a volume from CG. silently delete a volume in case of an immediate failure within a function that created it. """ try: self._call_xiv_xcli( "cg_remove_vol", vol=volume['name']) except errors.XCLIError as e: LOG.error("Failed removing volume %(vol)s from " "consistency group %(cg)s: %(err)s", {'vol': volume['name'], 'cg': cgname, 'err': self._get_code_and_status_or_message(e)}) self._silent_delete_volume(volume=volume) @proxy._trace_time def delete_volume(self, volume): """Deletes a volume on the Storage machine.""" LOG.debug("delete_volume: %(volume)s", {'volume': volume['name']}) # read replication information specs = self._get_extra_specs(volume.get('volume_type_id', None)) replication_info = self._get_replication_info(specs) if replication_info['enabled']: try: repl.VolumeReplication(self).delete_replication( volume.name, replication_info) except Exception as e: error = self._get_code_and_status_or_message(e) LOG.error(DELETE_VOLUME_BASE_ERROR, {'volume': volume['name'], 'error': error}) # continue even if failed # attempt to delete volume at target target = None try: target, params = self._get_replication_target_params() LOG.info('Target %(target)s: %(params)s', {'target': target, 'params': params}) except Exception as e: LOG.error("Unable to delete replicated volume " "'%(volume)s': %(error)s.", {'error': self._get_code_and_status_or_message(e), 'volume': volume['name']}) if target: try: self._call_remote_xiv_xcli( "vol_delete", vol=volume['name']) except errors.XCLIError as e: LOG.error( "Unable to delete replicated volume " "'%(volume)s': %(error)s.", {'error': self._get_code_and_status_or_message(e), 'volume': volume['name']}) try: self._delete_volume(volume['name']) except errors.XCLIError as e: LOG.error(DELETE_VOLUME_BASE_ERROR, {'volume': volume['name'], 'error': self._get_code_and_status_or_message(e)}) @proxy._trace_time def initialize_connection(self, volume, connector): """Initialize connection to instance. Maps the created volume to the nova volume node, and returns the iSCSI target to be used in the instance """ connection_type = self._get_connection_type() LOG.debug("initialize_connection: %(volume)s %(connector)s" " connection_type: %(connection_type)s", {'volume': volume['name'], 'connector': connector, 'connection_type': connection_type}) # This call does all the work.. fc_targets, host = self._get_host_and_fc_targets( volume, connector) lun_id = self._vol_map_and_get_lun_id( volume, connector, host) meta = { 'driver_volume_type': connection_type, 'data': { 'target_discovered': True, 'target_lun': lun_id, 'volume_id': volume['id'], }, } if connection_type == storage.XIV_CONNECTION_TYPE_ISCSI: meta['data']['target_portal'] = self.meta['ibm_storage_portal'] meta['data']['target_iqn'] = self.meta['ibm_storage_iqn'] meta['data']['provider_location'] = "%s,1 %s %s" % ( self.meta['ibm_storage_portal'], self.meta['ibm_storage_iqn'], lun_id) chap_type = self._get_chap_type() LOG.debug("initialize_connection: %(volume)s." " chap_type:%(chap_type)s", {'volume': volume['name'], 'chap_type': chap_type}) if chap_type == storage.CHAP_ENABLED: chap = self._create_chap(host) meta['data']['auth_method'] = 'CHAP' meta['data']['auth_username'] = chap[0] meta['data']['auth_password'] = chap[1] else: all_storage_wwpns = self._get_fc_targets(None) meta['data']['all_storage_wwpns'] = all_storage_wwpns modules = set() for wwpn in fc_targets: modules.add(wwpn[-2]) meta['data']['recommended_connections'] = ( len(modules) * CONNECTIONS_PER_MODULE) meta['data']['target_wwn'] = fc_targets if fc_targets == []: fc_targets = all_storage_wwpns meta['data']['initiator_target_map'] = ( self._build_initiator_target_map(fc_targets, connector)) LOG.debug(str(meta)) return meta @proxy._trace_time def terminate_connection(self, volume, connector): """Terminate connection. Unmaps volume. If this is the last connection from the host, undefines the host from the storage. """ LOG.debug("terminate_connection: %(volume)s %(connector)s", {'volume': volume['name'], 'connector': connector}) host = self._get_host(connector) if host is None: LOG.error(TERMINATE_CONNECTION_BASE_ERROR, {'volume': volume['name'], 'error': "Host not found."}) return fc_targets = {} if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_FC: fc_targets = self._get_fc_targets(host) try: self._call_xiv_xcli( "unmap_vol", vol=volume['name'], host=host.get('name')) except errors.VolumeBadNameError: LOG.error(TERMINATE_CONNECTION_BASE_ERROR, {'volume': volume['name'], 'error': "Volume not found."}) except errors.XCLIError as err: details = self._get_code_and_status_or_message(err) LOG.error(TERMINATE_CONNECTION_BASE_ERROR, {'volume': volume['name'], 'error': details}) # check if there are still mapped volumes or we can # remove this host host_mappings = [] try: host_mappings = self._call_xiv_xcli( "mapping_list", host=host.get('name')).as_list if len(host_mappings) == 0: LOG.info("Terminate connection for volume '%(volume)s': " "%(host)s %(info)s.", {'volume': volume['name'], 'host': host.get('name'), 'info': "will be deleted"}) if not self._is_iscsi(): # The following meta data is provided so that zoning can # be cleared meta = { 'driver_volume_type': self._get_connection_type(), 'data': {'volume_id': volume['id'], }, } meta['data']['target_wwn'] = fc_targets meta['data']['initiator_target_map'] = ( self._build_initiator_target_map(fc_targets, connector)) self._call_xiv_xcli("host_delete", host=host.get('name')) if not self._is_iscsi(): return meta return None else: LOG.debug(("Host '%(host)s' has additional mapped " "volumes %(mappings)s"), {'host': host.get('name'), 'mappings': host_mappings}) except errors.HostBadNameError: LOG.error(TERMINATE_CONNECTION_HOST_ERROR, {'volume': volume['name'], 'host': host.get('name'), 'error': "Host not found."}) except errors.XCLIError as err: details = self._get_code_and_status_or_message(err) LOG.error(TERMINATE_CONNECTION_HOST_ERROR, {'volume': volume['name'], 'host': host.get('name'), 'error': details}) def _create_volume_from_snapshot(self, volume, snapshot_name, snapshot_size): """Create volume from snapshot internal implementation. used for regular snapshot and cgsnapshot """ LOG.debug("_create_volume_from_snapshot: %(volume)s from %(name)s", {'volume': volume['name'], 'name': snapshot_name}) # TODO(alonma): Refactor common validation volume_size = float(volume['size']) if volume_size < snapshot_size: error = (_("Volume size (%(vol_size)sGB) cannot be smaller than " "the snapshot size (%(snap_size)sGB)..") % {'vol_size': volume_size, 'snap_size': snapshot_size}) LOG.error(error) raise self._get_exception()(error) self.create_volume(volume) try: self._call_xiv_xcli( "vol_copy", vol_src=snapshot_name, vol_trg=volume['name']) except errors.XCLIError as e: error = (_("Fatal error in copying volume: %(details)s") % {'details': self._get_code_and_status_or_message(e)}) LOG.error(error) self._silent_delete_volume(volume) raise self._get_exception()(error) # A side effect of vol_copy is the resizing of the destination volume # to the size of the source volume. If the size is different we need # to get it back to the desired size if snapshot_size == volume_size: return size = storage.gigabytes_to_blocks(volume_size) try: self._call_xiv_xcli( "vol_resize", vol=volume['name'], size_blocks=size) except errors.XCLIError as e: error = (_("Fatal error in resize volume: %(details)s") % {'details': self._get_code_and_status_or_message(e)}) LOG.error(error) self._silent_delete_volume(volume) raise self._get_exception()(error) @proxy._trace_time def create_volume_from_snapshot(self, volume, snapshot): """create volume from snapshot.""" snapshot_size = float(snapshot['volume_size']) self._create_volume_from_snapshot(volume, snapshot.name, snapshot_size) @proxy._trace_time def create_snapshot(self, snapshot): """create snapshot.""" try: self._call_xiv_xcli( "snapshot_create", vol=snapshot['volume_name'], name=snapshot['name']) except errors.XCLIError as e: error = (_("Fatal error in snapshot_create: %(details)s") % {'details': self._get_code_and_status_or_message(e)}) LOG.error(error) raise self._get_exception()(error) @proxy._trace_time def delete_snapshot(self, snapshot): """delete snapshot.""" try: self._call_xiv_xcli( "snapshot_delete", snapshot=snapshot['name']) except errors.XCLIError as e: error = (_("Fatal error in snapshot_delete: %(details)s") % {'details': self._get_code_and_status_or_message(e)}) LOG.error(error) raise self._get_exception()(error) @proxy._trace_time def extend_volume(self, volume, new_size): """Resize volume.""" volume_size = float(volume['size']) wanted_size = float(new_size) if wanted_size == volume_size: return shrink = 'yes' if wanted_size < volume_size else 'no' size = storage.gigabytes_to_blocks(wanted_size) try: self._call_xiv_xcli( "vol_resize", vol=volume['name'], size_blocks=size, shrink_volume=shrink) except errors.XCLIError as e: error = (_("Fatal error in vol_resize: %(details)s") % {'details': self._get_code_and_status_or_message(e)}) LOG.error(error) raise self._get_exception()(error) @proxy._trace_time def migrate_volume(self, context, volume, host): """Migrate volume to another backend. Optimize the migration if the destination is on the same server. If the specified host is another back-end on the same server, and the volume is not attached, we can do the migration locally without going through iSCSI. Storage-assisted migration... """ false_ret = (False, None) if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: dest, dest_host, dest_pool = info.split(':') except ValueError: return false_ret volume_host = volume.host.split('_')[1] if dest != strings.XIV_BACKEND_PREFIX or dest_host != volume_host: return false_ret if volume.attach_status == 'attached': LOG.info("Storage-assisted volume migration: Volume " "%(volume)s is attached", {'volume': volume.id}) try: self._call_xiv_xcli( "vol_move", vol=volume.name, pool=dest_pool) except errors.XCLIError as e: error = (_("Fatal error in vol_move: %(details)s") % {'details': self._get_code_and_status_or_message(e)}) LOG.error(error) raise self._get_exception()(error) return (True, None) @proxy._trace_time def manage_volume(self, volume, reference): """Brings an existing backend storage object under Cinder management. reference value is passed straight from the get_volume_list helper function. it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. 2. Place some metadata on the volume, or somewhere in the backend, that allows other driver requests (e.g. delete, clone, attach, detach...) to locate the backend storage object when required. If the reference doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. The volume may have a volume_type, and the driver can inspect that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. """ existing_volume = reference['source-name'] LOG.debug("manage_volume: %(volume)s", {'volume': existing_volume}) # check that volume exists try: volumes = self._call_xiv_xcli( "vol_list", vol=existing_volume).as_list except errors.XCLIError as e: error = (MANAGE_VOLUME_BASE_ERROR % {'volume': existing_volume, 'error': self._get_code_and_status_or_message(e)}) LOG.error(error) raise self._get_exception()(error) if len(volumes) != 1: error = (MANAGE_VOLUME_BASE_ERROR % {'volume': existing_volume, 'error': 'Volume does not exist'}) LOG.error(error) raise self._get_exception()(error) volume['size'] = float(volumes[0]['size']) # option 1: # rename volume to volume['name'] try: self._call_xiv_xcli( "vol_rename", vol=existing_volume, new_name=volume['name']) except errors.XCLIError as e: error = (MANAGE_VOLUME_BASE_ERROR % {'volume': existing_volume, 'error': self._get_code_and_status_or_message(e)}) LOG.error(error) raise self._get_exception()(error) # option 2: # return volume name as admin metadata # update the admin metadata DB # Need to do the ~same in create data. use the metadata instead of the # volume name return {} @proxy._trace_time def manage_volume_get_size(self, volume, reference): """Return size of volume to be managed by manage_volume. When calculating the size, round up to the next GB. """ existing_volume = reference['source-name'] # check that volume exists try: volumes = self._call_xiv_xcli( "vol_list", vol=existing_volume).as_list except errors.XCLIError as e: error = (_("Fatal error in vol_list: %(details)s") % {'details': self._get_code_and_status_or_message(e)}) LOG.error(error) raise self._get_exception()(error) if len(volumes) != 1: error = (_("Volume %(volume)s is not available on storage") % {'volume': existing_volume}) LOG.error(error) raise self._get_exception()(error) return float(volumes[0]['size']) @proxy._trace_time def unmanage_volume(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. """ pass @proxy._trace_time def get_replication_status(self, context, volume): """Return replication status.""" pass def freeze_backend(self, context): """Notify the backend that it's frozen.""" # go over volumes in backend that are replicated and lock them pass def thaw_backend(self, context): """Notify the backend that it's unfrozen/thawed.""" # go over volumes in backend that are replicated and unlock them pass def _using_default_backend(self): return ((self.active_backend_id is None) or (self.active_backend_id == strings.PRIMARY_BACKEND_ID)) def _is_vol_split_brain(self, xcli_master, xcli_slave, vol): mirror_master = xcli_master.cmd.mirror_list(vol=vol).as_list mirror_slave = xcli_slave.cmd.mirror_list(vol=vol).as_list if (len(mirror_master) == 1 and len(mirror_slave) == 1 and mirror_master[0].current_role == 'Master' and mirror_slave[0].current_role == 'Slave' and mirror_master[0].sync_state.lower() in SYNCHED_STATES): return False else: return True def _potential_split_brain(self, xcli_master, xcli_slave, volumes, pool_master, pool_slave): potential_split_brain = [] if xcli_master is None or xcli_slave is None: return potential_split_brain try: vols_master = xcli_master.cmd.vol_list( pool=pool_master).as_dict('name') except Exception: msg = "Failed getting information from the active storage." LOG.debug(msg) return potential_split_brain try: vols_slave = xcli_slave.cmd.vol_list( pool=pool_slave).as_dict('name') except Exception: msg = "Failed getting information from the target storage." LOG.debug(msg) return potential_split_brain vols_requested = set(vol['name'] for vol in volumes) common_vols = set(vols_master).intersection( set(vols_slave)).intersection(set(vols_requested)) for name in common_vols: if self._is_vol_split_brain(xcli_master=xcli_master, xcli_slave=xcli_slave, vol=name): potential_split_brain.append(name) return potential_split_brain @proxy._trace_time def failover_host(self, context, volumes, secondary_id, groups=None): """Failover a full backend. Fails over the volume back and forth, if secondary_id is 'default', volumes will be failed back, otherwize failed over. Note that the resulting status depends on the direction: in case of failover it will be 'failed-over' and in case of failback it will be 'available' """ volume_update_list = [] LOG.info("failover_host: from %(active)s to %(id)s", {'active': self.active_backend_id, 'id': secondary_id}) # special cases to handle if secondary_id == strings.PRIMARY_BACKEND_ID: # case: already failed back if self._using_default_backend(): LOG.info("Host has been failed back. No need " "to fail back again.") return self.active_backend_id, volume_update_list, [] pool_slave = self.storage_info[storage.FLAG_KEYS['storage_pool']] pool_master = self._get_target_params( self.active_backend_id)['san_clustername'] goal_status = 'available' else: if not self._using_default_backend(): LOG.info("Already failed over. No need to failover again.") return self.active_backend_id, volume_update_list, [] # case: need to select a target secondary_id = self.get_secondary_backend_id(secondary_id) pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']] try: pool_slave = self._get_target_params( secondary_id)['san_clustername'] except Exception: msg = _("Invalid target information. Can't perform failover") LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']] goal_status = fields.ReplicationStatus.FAILED_OVER # connnect xcli to secondary storage according to backend_id by # calling _init_xcli with secondary_id self.ibm_storage_remote_cli = self._init_xcli(secondary_id) # get replication_info for all volumes at once if len(volumes): # check for split brain situations # check for files that are available on both volumes # and are not in an active mirroring relation self.check_for_splitbrain(volumes, pool_master, pool_slave) # loop over volumes and attempt failover for volume in volumes: LOG.debug("Attempting to failover '%(vol)s'", {'vol': volume['name']}) result, details = repl.VolumeReplication(self).failover( volume, failback=(secondary_id == strings.PRIMARY_BACKEND_ID)) if result: status = goal_status else: status = 'error' updates = {'status': status} if status == 'error': updates['replication_extended_status'] = details volume_update_list.append({ 'volume_id': volume['id'], 'updates': updates }) # set active xcli to secondary xcli self._replace_xcli_to_remote_xcli() # set active backend id to secondary id self.active_backend_id = secondary_id return secondary_id, volume_update_list, [] @proxy._trace_time def retype(self, ctxt, volume, new_type, diff, host): """Change volume type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities """ LOG.debug("retype: volume = %(vol)s type = %(ntype)s", {'vol': volume.get('display_name'), 'ntype': new_type['name']}) if 'location_info' not in host['capabilities']: return False info = host['capabilities']['location_info'] try: (dest, dest_host, dest_pool) = info.split(':') except ValueError: return False volume_host = volume.get('host').split('_')[1] if (dest != strings.XIV_BACKEND_PREFIX or dest_host != volume_host): return False pool_name = self._get_backend_pool() # if pool is different. else - we're on the same pool and retype is ok. if (pool_name != dest_pool): # The input host and pool are already "linked" to the new_type, # otherwise the scheduler does not assign them as candidates for # the retype thus we just need to migrate the volume to the new # pool LOG.debug("retype: migrate volume %(vol)s to " "host=%(host)s, pool=%(pool)s", {'vol': volume.get('display_name'), 'host': dest_host, 'pool': dest_pool}) (mig_result, model) = self.migrate_volume( context=ctxt, volume=volume, host=host) if not mig_result: raise self.meta['exception'].VolumeBackendAPIException( data=PERF_CLASS_ADD_ERROR) # Migration occurred, retype has finished. # We need to check for type and QoS. # getting the old specs old_specs = self._qos_specs_from_volume(volume) new_specs = self._get_qos_specs(new_type.get('id', None)) if not new_specs: if old_specs: LOG.debug("qos: removing qos class for %(vol)s.", {'vol': volume.display_name}) self._qos_remove_vol(volume) return True perf_class_name_old = self._check_perf_class_on_backend(old_specs) perf_class_name_new = self._check_perf_class_on_backend(new_specs) if perf_class_name_new != perf_class_name_old: # add new qos to vol. (removed from old qos automatically) self._qos_add_vol(volume, perf_class_name_new) return True @proxy._trace_time def _check_storage_version_for_qos_support(self): if self.meta['storage_version'] is None: self.meta['storage_version'] = self._call_xiv_xcli( "version_get").as_single_element.system_version if int(self.meta['storage_version'][0:2]) >= 12: return 'True' return 'False' @proxy._trace_time def _update_stats(self): """fetch and update stats.""" LOG.debug("Entered XIVProxy::_update_stats:") self.meta['stat'] = {} connection_type = self._get_connection_type() backend_name = None if self.driver: backend_name = self.driver.configuration.safe_get( 'volume_backend_name') self.meta['stat']['reserved_percentage'] = ( self.driver.configuration.safe_get('reserved_percentage')) self.meta['stat']["volume_backend_name"] = ( backend_name or '%s_%s_%s_%s' % ( strings.XIV_BACKEND_PREFIX, self.storage_info[storage.FLAG_KEYS['address']], self.storage_info[storage.FLAG_KEYS['storage_pool']], connection_type)) self.meta['stat']["vendor_name"] = 'IBM' self.meta['stat']["driver_version"] = self.full_version self.meta['stat']["storage_protocol"] = connection_type self.meta['stat']['multiattach'] = False self.meta['stat']['group_replication_enabled'] = True self.meta['stat']['consistent_group_replication_enabled'] = True self.meta['stat']['QoS_support'] = ( self._check_storage_version_for_qos_support()) self.meta['stat']['location_info'] = ( ('%(destination)s:%(hostname)s:%(pool)s' % {'destination': strings.XIV_BACKEND_PREFIX, 'hostname': self.storage_info[storage.FLAG_KEYS['address']], 'pool': self.storage_info[storage.FLAG_KEYS['storage_pool']] })) self._retrieve_pool_stats(self.meta) if self.targets: self.meta['stat']['replication_enabled'] = True self.meta['stat']['replication_type'] = [SYNC, ASYNC] self.meta['stat']['rpo'] = repl.Replication.get_supported_rpo() self.meta['stat']['replication_count'] = len(self.targets) self.meta['stat']['replication_targets'] = [target for target in self.targets] self.meta['stat']['timestamp'] = datetime.datetime.utcnow() LOG.debug("Exiting XIVProxy::_update_stats: %(stat)s", {'stat': self.meta['stat']}) @proxy._trace_time def _get_pool(self): pool_name = self._get_backend_pool() pools = self._call_xiv_xcli( "pool_list", pool=pool_name).as_list if not pools: msg = (_( "Pool %(pool)s not available on storage") % {'pool': pool_name}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException(data=msg) return pools def _get_backend_pool(self): if self.active_backend_id == strings.PRIMARY_BACKEND_ID: return self.storage_info[storage.FLAG_KEYS['storage_pool']] else: return self._get_target_params( self.active_backend_id)['san_clustername'] def _retrieve_pool_stats(self, data): try: pools = self._get_pool() pool = pools[0] data['stat']['pool_name'] = pool.get('name') # handle different fields in pool_list between Gen3 and BR soft_size = pool.get('soft_size') if soft_size is None: soft_size = pool.get('size') hard_size = 0 else: hard_size = pool.hard_size data['stat']['total_capacity_gb'] = int(soft_size) data['stat']['free_capacity_gb'] = int( pool.get('empty_space_soft', pool.get('empty_space'))) # thin/thick provision data['stat']['thin_provisioning_support'] = ( 'True' if soft_size > hard_size else 'False') data['stat']['backend_state'] = 'up' except Exception as e: data['stat']['total_capacity_gb'] = 0 data['stat']['free_capacity_gb'] = 0 data['stat']['thin_provision'] = False data['stat']['backend_state'] = 'down' error = self._get_code_and_status_or_message(e) LOG.error(error) @proxy._trace_time def create_cloned_volume(self, volume, src_vref): """Create cloned volume.""" # read replication information specs = self._get_extra_specs(volume.get('volume_type_id', None)) replication_info = self._get_replication_info(specs) # TODO(alonma): Refactor to use more common code src_vref_size = float(src_vref['size']) volume_size = float(volume['size']) if volume_size < src_vref_size: error = (_("New volume size (%(vol_size)s GB) cannot be less " "than the source volume size (%(src_size)s GB)..") % {'vol_size': volume_size, 'src_size': src_vref_size}) LOG.error(error) raise self._get_exception()(error) self._create_volume(volume) try: self._call_xiv_xcli( "vol_copy", vol_src=src_vref['name'], vol_trg=volume['name']) except errors.XCLIError as e: error = (_("Failed to copy from '%(src)s' to '%(vol)s': " "%(details)s") % {'src': src_vref.get('name', ''), 'vol': volume.get('name', ''), 'details': self._get_code_and_status_or_message(e)}) LOG.error(error) self._silent_delete_volume(volume=volume) raise self._get_exception()(error) # A side effect of vol_copy is the resizing of the destination volume # to the size of the source volume. If the size is different we need # to get it back to the desired size if src_vref_size != volume_size: size = storage.gigabytes_to_blocks(volume_size) try: self._call_xiv_xcli( "vol_resize", vol=volume['name'], size_blocks=size) except errors.XCLIError as e: error = (_("Fatal error in vol_resize: %(details)s") % {'details': self._get_code_and_status_or_message(e)}) LOG.error(error) self._silent_delete_volume(volume=volume) raise self._get_exception()(error) self.handle_created_vol_properties(replication_info, volume) @proxy._trace_time def volume_exists(self, volume): """Checks if a volume exists on xiv.""" return len(self._call_xiv_xcli( "vol_list", vol=volume['name']).as_list) > 0 def _cg_name_from_id(self, id): '''Get storage CG name from id. A utility method to translate from id to CG name on the storage ''' return "cg_%(id)s" % {'id': id} def _group_name_from_id(self, id): '''Get storage group name from id. A utility method to translate from id to Snapshot Group name on the storage ''' return "cgs_%(id)s" % {'id': id} def _cg_name_from_volume(self, volume): '''Get storage CG name from volume. A utility method to translate from openstack volume to CG name on the storage ''' LOG.debug("_cg_name_from_volume: %(vol)s", {'vol': volume['name']}) cg_id = volume.get('group_id', None) if cg_id: cg_name = self._cg_name_from_id(cg_id) LOG.debug("Volume %(vol)s is in CG %(cg)s", {'vol': volume['name'], 'cg': cg_name}) return cg_name else: LOG.debug("Volume %(vol)s not in CG", {'vol': volume['name']}) return None def _cg_name_from_group(self, group): '''Get storage CG name from group. A utility method to translate from openstack group to CG name on the storage ''' return self._cg_name_from_id(group['id']) def _cg_name_from_cgsnapshot(self, cgsnapshot): '''Get storage CG name from snapshot. A utility method to translate from openstack cgsnapshot to CG name on the storage ''' return self._cg_name_from_id(cgsnapshot['group_id']) def _group_name_from_cgsnapshot_id(self, cgsnapshot_id): '''Get storage Snaphost Group name from snapshot. A utility method to translate from openstack cgsnapshot to Snapshot Group name on the storage ''' return self._group_name_from_id(cgsnapshot_id) def _volume_name_from_cg_snapshot(self, cgs, vol): # Note: The string is limited by the storage to 63 characters return ('%(cgs)s.%(vol)s' % {'cgs': cgs, 'vol': vol})[0:62] @proxy._trace_time def create_group(self, context, group): """Creates a group.""" if volume_utils.is_group_a_cg_snapshot_type(group): cgname = self._cg_name_from_group(group) return self._create_consistencygroup(context, cgname) # For generic group, create is executed by manager raise NotImplementedError() def _create_consistencygroup(self, context, cgname): """Creates a consistency group.""" LOG.info("Creating consistency group %(name)s.", {'name': cgname}) # call XCLI try: self._call_xiv_xcli( "cg_create", cg=cgname, pool=self.storage_info[ storage.FLAG_KEYS['storage_pool']]).as_list except errors.CgNameExistsError: error = (_("consistency group %s already exists on backend") % cgname) LOG.error(error) raise self._get_exception()(error) except errors.CgLimitReachedError: error = _("Reached Maximum number of consistency groups") LOG.error(error) raise self._get_exception()(error) except errors.XCLIError as e: error = (_("Fatal error in cg_create: %(details)s") % {'details': self._get_code_and_status_or_message(e)}) LOG.error(error) raise self._get_exception()(error) model_update = {'status': fields.GroupStatus.AVAILABLE} return model_update def _create_consistencygroup_on_remote(self, context, cgname): """Creates a consistency group on secondary machine. Return group available even if it already exists (for replication) """ LOG.info("Creating consistency group %(name)s on secondary.", {'name': cgname}) # call remote XCLI try: self._call_remote_xiv_xcli( "cg_create", cg=cgname, pool=self.storage_info[ storage.FLAG_KEYS['storage_pool']]).as_list except errors.CgNameExistsError: model_update = {'status': fields.GroupStatus.AVAILABLE} except errors.CgLimitReachedError: error = _("Maximum number of consistency groups reached") LOG.error(error) raise self._get_exception()(error) except errors.XCLIError as e: error = (_("Fatal error in cg_create on remote: %(details)s") % {'details': self._get_code_and_status_or_message(e)}) LOG.error(error) raise self._get_exception()(error) model_update = {'status': fields.GroupStatus.AVAILABLE} return model_update def _silent_cleanup_consistencygroup_from_src(self, context, group, volumes, cgname): """Silent cleanup of volumes from CG. Silently cleanup volumes and created consistency-group from storage. This function is called after a failure already occurred and just logs errors, but does not raise exceptions """ for volume in volumes: self._silent_delete_volume_from_cg(volume=volume, cgname=cgname) try: self._delete_consistencygroup(context, group, []) except Exception as e: details = self._get_code_and_status_or_message(e) LOG.error('Failed to cleanup CG %(details)s', {'details': details}) @proxy._trace_time def create_group_from_src(self, context, group, volumes, group_snapshot, sorted_snapshots, source_group, sorted_source_vols): """Create volume group from volume group or volume group snapshot.""" if volume_utils.is_group_a_cg_snapshot_type(group): return self._create_consistencygroup_from_src(context, group, volumes, group_snapshot, sorted_snapshots, source_group, sorted_source_vols) else: raise NotImplementedError() def _create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot, snapshots, source_cg, sorted_source_vols): """Creates a consistency group from source. Source can be a cgsnapshot with the relevant list of snapshots, or another CG with its list of volumes. """ cgname = self._cg_name_from_group(group) LOG.info("Creating consistency group %(cg)s from src.", {'cg': cgname}) volumes_model_update = [] if cgsnapshot and snapshots: LOG.debug("Creating from cgsnapshot %(cg)s", {'cg': self._cg_name_from_group(cgsnapshot)}) try: self._create_consistencygroup(context, cgname) except Exception as e: LOG.error( "Creating CG from cgsnapshot failed: %(details)s", {'details': self._get_code_and_status_or_message(e)}) raise created_volumes = [] try: groupname = self._group_name_from_cgsnapshot_id( cgsnapshot['id']) for volume, source in zip(volumes, snapshots): vol_name = source.volume_name LOG.debug("Original volume: %(vol_name)s", {'vol_name': vol_name}) snapshot_name = self._volume_name_from_cg_snapshot( groupname, vol_name) LOG.debug("create volume (vol)s from snapshot %(snap)s", {'vol': vol_name, 'snap': snapshot_name}) snapshot_size = float(source['volume_size']) self._create_volume_from_snapshot( volume, snapshot_name, snapshot_size) created_volumes.append(volume) volumes_model_update.append( { 'id': volume['id'], 'status': 'available', 'size': snapshot_size, }) except Exception as e: details = self._get_code_and_status_or_message(e) msg = (CREATE_VOLUME_BASE_ERROR % {'details': details}) LOG.error(msg) # cleanup and then raise exception self._silent_cleanup_consistencygroup_from_src( context, group, created_volumes, cgname) raise self.meta['exception'].VolumeBackendAPIException( data=msg) elif source_cg and sorted_source_vols: LOG.debug("Creating from CG %(cg)s .", {'cg': self._cg_name_from_group(source_cg)}) LOG.debug("Creating from CG %(cg)s .", {'cg': source_cg['id']}) try: self._create_consistencygroup(context, group) except Exception as e: LOG.error("Creating CG from CG failed: %(details)s", {'details': self._get_code_and_status_or_message(e)}) raise created_volumes = [] try: for volume, source in zip(volumes, sorted_source_vols): self.create_cloned_volume(volume, source) created_volumes.append(volume) volumes_model_update.append( { 'id': volume['id'], 'status': 'available', 'size': source['size'], }) except Exception as e: details = self._get_code_and_status_or_message(e) msg = (CREATE_VOLUME_BASE_ERROR, {'details': details}) LOG.error(msg) # cleanup and then raise exception self._silent_cleanup_consistencygroup_from_src( context, group, created_volumes, cgname) raise self.meta['exception'].VolumeBackendAPIException( data=msg) else: error = 'create_consistencygroup_from_src called without a source' raise self._get_exception()(error) model_update = {'status': fields.GroupStatus.AVAILABLE} return model_update, volumes_model_update @proxy._trace_time def delete_group(self, context, group, volumes): """Deletes a group.""" rep_status = group.get('replication_status') enabled = fields.ReplicationStatus.ENABLED failed_over = fields.ReplicationStatus.FAILED_OVER if rep_status == enabled or rep_status == failed_over: msg = _("Disable group replication before deleting group.") LOG.error(msg) raise self._get_exception()(msg) if volume_utils.is_group_a_cg_snapshot_type(group): return self._delete_consistencygroup(context, group, volumes) else: # For generic group delete the volumes only - executed by manager raise NotImplementedError() def _delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" cgname = self._cg_name_from_group(group) LOG.info("Deleting consistency group %(name)s.", {'name': cgname}) model_update = {} model_update['status'] = group.get('status', fields.GroupStatus.DELETING) # clean up volumes volumes_model_update = [] for volume in volumes: try: self._call_xiv_xcli( "cg_remove_vol", vol=volume['name']) except errors.XCLIError as e: LOG.error("Failed removing volume %(vol)s from " "consistency group %(cg)s: %(err)s", {'vol': volume['name'], 'cg': cgname, 'err': self._get_code_and_status_or_message(e)}) # continue in spite of error try: self._delete_volume(volume['name']) # size and volume_type_id are required in liberty code # they are maintained here for backwards compatability volumes_model_update.append( { 'id': volume['id'], 'status': 'deleted', }) except errors.XCLIError as e: LOG.error(DELETE_VOLUME_BASE_ERROR, {'volume': volume['name'], 'error': self._get_code_and_status_or_message(e)}) model_update['status'] = fields.GroupStatus.ERROR_DELETING # size and volume_type_id are required in liberty code # they are maintained here for backwards compatibility volumes_model_update.append( { 'id': volume['id'], 'status': 'error_deleting', }) # delete CG from cinder.volume.drivers.ibm.ibm_storage if model_update['status'] != fields.GroupStatus.ERROR_DELETING: try: self._call_xiv_xcli( "cg_delete", cg=cgname).as_list model_update['status'] = fields.GroupStatus.DELETED except (errors.CgDoesNotExistError, errors.CgBadNameError): LOG.warning("consistency group %(cgname)s does not " "exist on backend", {'cgname': cgname}) # if the object was already deleted on the backend, we can # continue and delete the openstack object model_update['status'] = fields.GroupStatus.DELETED except errors.CgHasMirrorError: error = (_("consistency group %s is being mirrored") % cgname) LOG.error(error) raise self._get_exception()(error) except errors.CgNotEmptyError: error = (_("consistency group %s is not empty") % cgname) LOG.error(error) raise self._get_exception()(error) except errors.XCLIError as e: error = (_("Fatal: %(code)s. CG: %(cgname)s") % {'code': self._get_code_and_status_or_message(e), 'cgname': cgname}) LOG.error(error) raise self._get_exception()(error) return model_update, volumes_model_update @proxy._trace_time def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group.""" if volume_utils.is_group_a_cg_snapshot_type(group): return self._update_consistencygroup(context, group, add_volumes, remove_volumes) else: # For generic group update executed by manager raise NotImplementedError() def _update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group.""" cgname = self._cg_name_from_group(group) LOG.info("Updating consistency group %(name)s.", {'name': cgname}) model_update = {'status': fields.GroupStatus.AVAILABLE} add_volumes_update = [] if add_volumes: for volume in add_volumes: try: self._call_xiv_xcli( "cg_add_vol", vol=volume['name'], cg=cgname) except errors.XCLIError as e: error = (_("Failed adding volume %(vol)s to " "consistency group %(cg)s: %(err)s") % {'vol': volume['name'], 'cg': cgname, 'err': self._get_code_and_status_or_message(e)}) LOG.error(error) self._cleanup_consistencygroup_update( context, group, add_volumes_update, None) raise self._get_exception()(error) add_volumes_update.append({'name': volume['name']}) remove_volumes_update = [] if remove_volumes: for volume in remove_volumes: try: self._call_xiv_xcli( "cg_remove_vol", vol=volume['name']) except (errors.VolumeNotInConsGroup, errors.VolumeBadNameError) as e: # ignore the error if the volume exists in storage but # not in cg, or the volume does not exist in the storage details = self._get_code_and_status_or_message(e) LOG.debug(details) except errors.XCLIError as e: error = (_("Failed removing volume %(vol)s from " "consistency group %(cg)s: %(err)s") % {'vol': volume['name'], 'cg': cgname, 'err': self._get_code_and_status_or_message(e)}) LOG.error(error) self._cleanup_consistencygroup_update( context, group, add_volumes_update, remove_volumes_update) raise self._get_exception()(error) remove_volumes_update.append({'name': volume['name']}) return model_update, None, None def _cleanup_consistencygroup_update(self, context, group, add_volumes, remove_volumes): if add_volumes: for volume in add_volumes: try: self._call_xiv_xcli( "cg_remove_vol", vol=volume['name']) except Exception: LOG.debug("cg_remove_vol(%s) failed", volume['name']) if remove_volumes: cgname = self._cg_name_from_group(group) for volume in remove_volumes: try: self._call_xiv_xcli( "cg_add_vol", vol=volume['name'], cg=cgname) except Exception: LOG.debug("cg_add_vol(%(name)s, %(cgname)s) failed", {'name': volume['name'], 'cgname': cgname}) @proxy._trace_time def create_group_snapshot(self, context, group_snapshot, snapshots): """Create volume group snapshot.""" if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self._create_cgsnapshot(context, group_snapshot, snapshots) else: # For generic group snapshot create executed by manager raise NotImplementedError() def _create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a CG snapshot.""" model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} cgname = self._cg_name_from_cgsnapshot(cgsnapshot) groupname = self._group_name_from_cgsnapshot_id(cgsnapshot['id']) LOG.info("Creating snapshot %(group)s for CG %(cg)s.", {'group': groupname, 'cg': cgname}) # call XCLI try: self._call_xiv_xcli( "cg_snapshots_create", cg=cgname, snap_group=groupname).as_list except errors.CgDoesNotExistError: error = (_("Consistency group %s does not exist on backend") % cgname) LOG.error(error) raise self._get_exception()(error) except errors.CgBadNameError: error = (_("Consistency group %s has an illegal name") % cgname) LOG.error(error) raise self._get_exception()(error) except errors.SnapshotGroupDoesNotExistError: error = (_("Snapshot group %s has an illegal name") % cgname) LOG.error(error) raise self._get_exception()(error) except errors.PoolSnapshotLimitReachedError: error = _("Reached maximum snapshots allocation size") LOG.error(error) raise self._get_exception()(error) except errors.CgEmptyError: error = (_("Consistency group %s is empty") % cgname) LOG.error(error) raise self._get_exception()(error) except (errors.MaxVolumesReachedError, errors.DomainMaxVolumesReachedError): error = _("Reached Maximum number of volumes") LOG.error(error) raise self._get_exception()(error) except errors.SnapshotGroupIsReservedError: error = (_("Consistency group %s name is reserved") % cgname) LOG.error(error) raise self._get_exception()(error) except errors.SnapshotGroupAlreadyExistsError: error = (_("Snapshot group %s already exists") % groupname) LOG.error(error) raise self._get_exception()(error) except errors.XCLIError as e: error = (_("Fatal: CG %(cg)s, Group %(group)s. %(err)s") % {'cg': cgname, 'group': groupname, 'err': self._get_code_and_status_or_message(e)}) LOG.error(error) raise self._get_exception()(error) snapshots_model_update = [] for snapshot in snapshots: snapshots_model_update.append( { 'id': snapshot['id'], 'status': fields.SnapshotStatus.AVAILABLE, }) return model_update, snapshots_model_update @proxy._trace_time def delete_group_snapshot(self, context, group_snapshot, snapshots): """Delete volume group snapshot.""" if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self._delete_cgsnapshot(context, group_snapshot, snapshots) else: # For generic group snapshot delete is executed by manager raise NotImplementedError() def _delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a CG snapshot.""" cgname = self._cg_name_from_cgsnapshot(cgsnapshot) groupname = self._group_name_from_cgsnapshot_id(cgsnapshot['id']) LOG.info("Deleting snapshot %(group)s for CG %(cg)s.", {'group': groupname, 'cg': cgname}) # call XCLI try: self._call_xiv_xcli( "snap_group_delete", snap_group=groupname).as_list except errors.CgDoesNotExistError: error = _("consistency group %s not found on backend") % cgname LOG.error(error) raise self._get_exception()(error) except errors.PoolSnapshotLimitReachedError: error = _("Reached Maximum size allocated for snapshots") LOG.error(error) raise self._get_exception()(error) except errors.CgEmptyError: error = _("Consistency group %s is empty") % cgname LOG.error(error) raise self._get_exception()(error) except errors.XCLIError as e: error = _("Fatal: CG %(cg)s, Group %(group)s. %(err)s") % { 'cg': cgname, 'group': groupname, 'err': self._get_code_and_status_or_message(e) } LOG.error(error) raise self._get_exception()(error) model_update = {'status': fields.GroupSnapshotStatus.DELETED} snapshots_model_update = [] for snapshot in snapshots: snapshots_model_update.append( { 'id': snapshot['id'], 'status': fields.SnapshotStatus.DELETED, }) return model_update, snapshots_model_update def _generate_chap_secret(self, chap_name): """Returns chap secret generated according to chap_name chap secret must be between 12-16 chaqnracters """ name = chap_name chap_secret = "" while len(chap_secret) < 12: chap_secret = cryptish.encrypt(name)[:16] name = name + '_' LOG.debug("_generate_chap_secret: %(secret)s", {'secret': chap_secret}) return chap_secret @proxy._trace_time def _create_chap(self, host=None): """Get CHAP name and secret returns chap name and secret chap_name and chap_secret must be 12-16 characters long """ if host: if host['chap']: chap_name = host['chap'][0] LOG.debug("_create_chap: %(chap_name)s ", {'chap_name': chap_name}) else: chap_name = host['name'] else: LOG.info("_create_chap: host missing!!!") chap_name = "12345678901234" chap_secret = self._generate_chap_secret(chap_name) LOG.debug("_create_chap (new): %(chap_name)s ", {'chap_name': chap_name}) return (chap_name, chap_secret) @proxy._trace_time def _get_host(self, connector): """Returns a host looked up via initiator.""" try: host_bunch = self._get_bunch_from_host(connector) except Exception as e: details = self._get_code_and_status_or_message(e) msg = (_("%(prefix)s. Invalid connector: '%(details)s.'") % {'prefix': storage.XIV_LOG_PREFIX, 'details': details}) raise self._get_exception()(msg) host = [] chap = None all_hosts = self._call_xiv_xcli("host_list").as_list if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI: host = [host_obj for host_obj in all_hosts if host_bunch['initiator'] in host_obj.iscsi_ports.split(',')] else: if 'wwpns' in connector: if len(host_bunch['wwpns']) > 0: wwpn_set = set([wwpn.lower() for wwpn in host_bunch['wwpns']]) host = [host_obj for host_obj in all_hosts if len(wwpn_set.intersection(host_obj.get( 'fc_ports', '').lower().split(','))) > 0] else: # fake connector created by nova host = [host_obj for host_obj in all_hosts if host_obj.get('name', '') == connector['host']] if len(host) == 1: if self._is_iscsi() and host[0].iscsi_chap_name: chap = (host[0].iscsi_chap_name, self._generate_chap_secret(host[0].iscsi_chap_name)) LOG.debug("_get_host: chap_name %(chap_name)s ", {'chap_name': host[0].iscsi_chap_name}) return self._get_bunch_from_host( connector, host[0].id, host[0].name, chap) LOG.debug("_get_host: returns None") return None @proxy._trace_time def _call_host_define(self, host, chap_name=None, chap_secret=None, domain_name=None): """Call host_define using XCLI.""" LOG.debug("host_define with domain: %s)", domain_name) if domain_name: if chap_name: return self._call_xiv_xcli( "host_define", host=host, iscsi_chap_name=chap_name, iscsi_chap_secret=chap_secret, domain=domain_name ).as_list[0] else: return self._call_xiv_xcli( "host_define", host=host, domain=domain_name ).as_list[0] else: # No domain if chap_name: return self._call_xiv_xcli( "host_define", host=host, iscsi_chap_name=chap_name, iscsi_chap_secret=chap_secret ).as_list[0] else: return self._call_xiv_xcli( "host_define", host=host ).as_list[0] @proxy._trace_time def _define_host_according_to_chap(self, host, in_domain): """Check on chap state and define host accordingly.""" chap_name = None chap_secret = None if (self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI and self._get_chap_type() == storage.CHAP_ENABLED): host_bunch = {'name': host, 'chap': None, } chap = self._create_chap(host=host_bunch) chap_name = chap[0] chap_secret = chap[1] LOG.debug("_define_host_according_to_chap: " "%(name)s : %(secret)s", {'name': chap_name, 'secret': chap_secret}) return self._call_host_define( host=host, chap_name=chap_name, chap_secret=chap_secret, domain_name=in_domain) def _define_ports(self, host_bunch): """Defines ports in XIV.""" fc_targets = [] LOG.debug(host_bunch.get('name')) if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI: self._define_iscsi(host_bunch) else: fc_targets = self._define_fc(host_bunch) fc_targets = list(set(fc_targets)) fc_targets.sort(key=self._sort_last_digit) return fc_targets def _get_pool_domain(self, connector): pool_name = self._get_backend_pool() LOG.debug("pool name from configuration: %s", pool_name) domain = None try: domain = self._call_xiv_xcli( "pool_list", pool=pool_name).as_list[0].get('domain') LOG.debug("Pool's domain: %s", domain) except AttributeError: pass return domain @proxy._trace_time def _define_host(self, connector): """Defines a host in XIV.""" domain = self._get_pool_domain(connector) host_bunch = self._get_bunch_from_host(connector) host = self._call_xiv_xcli( "host_list", host=host_bunch['name']).as_list connection_type = self._get_connection_type() if len(host) == 0: LOG.debug("Non existing host, defining") host = self._define_host_according_to_chap( host=host_bunch['name'], in_domain=domain) host_bunch = self._get_bunch_from_host(connector, host.get('id')) else: host_bunch = self._get_bunch_from_host(connector, host[0].get('id')) LOG.debug("Generating hostname for connector %(conn)s", {'conn': connector}) generated_hostname = storage.get_host_or_create_from_iqn( connector, connection=connection_type) generated_host = self._call_xiv_xcli( "host_list", host=generated_hostname).as_list if len(generated_host) == 0: host = self._define_host_according_to_chap( host=generated_hostname, in_domain=domain) else: host = generated_host[0] host_bunch = self._get_bunch_from_host( connector, host.get('id'), host_name=generated_hostname) LOG.debug("The host_bunch: %s", host_bunch) return host_bunch @proxy._trace_time def _define_fc(self, host_bunch): """Define FC Connectivity.""" fc_targets = [] if len(host_bunch.get('wwpns')) > 0: connected_wwpns = [] for wwpn in host_bunch.get('wwpns'): component_ids = list(set( [p.component_id for p in self._call_xiv_xcli( "fc_connectivity_list", wwpn=wwpn.replace(":", ""))])) wwpn_fc_target_lists = [] for component in component_ids: wwpn_fc_target_lists += [fc_p.wwpn for fc_p in self._call_xiv_xcli( "fc_port_list", fcport=component)] LOG.debug("got %(tgts)s fc targets for wwpn %(wwpn)s", {'tgts': wwpn_fc_target_lists, 'wwpn': wwpn}) if len(wwpn_fc_target_lists) > 0: connected_wwpns += [wwpn] fc_targets += wwpn_fc_target_lists LOG.debug("adding fc port %s", wwpn) self._call_xiv_xcli( "host_add_port", host=host_bunch.get('name'), fcaddress=wwpn) if len(connected_wwpns) == 0: LOG.error(CONNECTIVITY_FC_NO_TARGETS) all_target_ports = self._get_all_target_ports() fc_targets = list(set([target.get('wwpn') for target in all_target_ports])) else: msg = _("No Fibre Channel HBA's are defined on the host.") LOG.error(msg) raise self._get_exception()(msg) return fc_targets @proxy._trace_time def _define_iscsi(self, host_bunch): """Add iscsi ports.""" if host_bunch.get('initiator'): LOG.debug("adding iscsi") self._call_xiv_xcli( "host_add_port", host=host_bunch.get('name'), iscsi_name=host_bunch.get('initiator')) else: msg = _("No iSCSI initiator found!") LOG.error(msg) raise self._get_exception()(msg) @proxy._trace_time def _event_service_start(self): """Send an event when cinder service starts.""" LOG.debug("send event SERVICE_STARTED") service_start_evnt_prop = { "openstack_version": self.meta['openstack_version'], "pool_name": self._get_backend_pool()} ev_mgr = events.EventsManager(self.ibm_storage_cli, OPENSTACK_PRODUCT_NAME, self.full_version) ev_mgr.send_event('SERVICE_STARTED', service_start_evnt_prop) @proxy._trace_time def _event_volume_attached(self): """Send an event when volume is attached to host.""" LOG.debug("send event VOLUME_ATTACHED") compute_host_name = socket.getfqdn() vol_attach_evnt_prop = { "openstack_version": self.meta['openstack_version'], "pool_name": self._get_backend_pool(), "compute_hostname": compute_host_name} ev_mgr = events.EventsManager(self.ibm_storage_cli, OPENSTACK_PRODUCT_NAME, self.full_version) ev_mgr.send_event('VOLUME_ATTACHED', vol_attach_evnt_prop) @proxy._trace_time def _build_initiator_target_map(self, fc_targets, connector): """Build the target_wwns and the initiator target map.""" init_targ_map = {} wwpns = connector.get('wwpns', []) for initiator in wwpns: init_targ_map[initiator] = fc_targets LOG.debug("_build_initiator_target_map: %(init_targ_map)s", {'init_targ_map': init_targ_map}) return init_targ_map @proxy._trace_time def _get_host_and_fc_targets(self, volume, connector): """Returns the host and its FC targets.""" LOG.debug("_get_host_and_fc_targets %(volume)s", {'volume': volume['name']}) fc_targets = [] host = self._get_host(connector) if not host: host = self._define_host(connector) fc_targets = self._define_ports(host) elif self._get_connection_type() == storage.XIV_CONNECTION_TYPE_FC: fc_targets = self._get_fc_targets(host) if len(fc_targets) == 0: LOG.error(CONNECTIVITY_FC_NO_TARGETS) raise self._get_exception()(CONNECTIVITY_FC_NO_TARGETS) return (fc_targets, host) def _vol_map_and_get_lun_id(self, volume, connector, host): """Maps volume to instance. Maps a volume to the nova volume node as host, and return the created lun id """ vol_name = volume['name'] LOG.debug("_vol_map_and_get_lun_id %(volume)s", {'volume': vol_name}) try: mapped_vols = self._call_xiv_xcli( "vol_mapping_list", vol=vol_name).as_dict('host') if host['name'] in mapped_vols: LOG.info("Volume '%(volume)s' was already attached to " "the host '%(host)s'.", {'host': host['name'], 'volume': volume['name']}) return int(mapped_vols[host['name']].lun) except errors.VolumeBadNameError: LOG.error("Volume not found. '%s'", volume['name']) raise self.meta['exception'].VolumeNotFound(volume_id=volume['id']) used_luns = [int(mapped.get('lun')) for mapped in self._call_xiv_xcli( "mapping_list", host=host['name']).as_list] luns = range(MIN_LUNID, MAX_LUNID) for lun_id in luns: if lun_id not in used_luns: self._call_xiv_xcli( "map_vol", lun=lun_id, host=host['name'], vol=vol_name) self._event_volume_attached() return lun_id msg = _("All free LUN IDs were already mapped.") LOG.error(msg) raise self._get_exception()(msg) @proxy._trace_time def _get_all_target_ports(self): all_target_ports = [] fc_port_list = self._call_xiv_xcli("fc_port_list") all_target_ports += ([t for t in fc_port_list if t.get('wwpn') != '0000000000000000' and t.get('role') == 'Target' and t.get('port_state') == 'Online']) return all_target_ports @proxy._trace_time def _get_fc_targets(self, host): """Get FC targets :host: A dictionary describing the host :returns: array of FC target WWPNs """ target_wwpns = [] all_target_ports = self._get_all_target_ports() if host: host_conect_list = self._call_xiv_xcli("host_connectivity_list", host=host.get('name')) for connection in host_conect_list: fc_port = connection.get('local_fc_port') target_wwpns += ( [target.get('wwpn') for target in all_target_ports if target.get('component_id') == fc_port]) if not target_wwpns: LOG.debug('No fc targets found accessible to host: %s. Return list' ' of all available FC targets', host) target_wwpns = ([target.get('wwpn') for target in all_target_ports]) fc_targets = list(set(target_wwpns)) fc_targets.sort(key=self._sort_last_digit) LOG.debug("fc_targets : %s", fc_targets) return fc_targets def _sort_last_digit(self, a): return a[-1:] @proxy._trace_time def _get_xcli(self, xcli, backend_id): """Wrapper around XCLI to ensure that connection is up.""" if self.meta['bypass_connection_check']: LOG.debug("_get_xcli(bypass mode)") else: if not xcli.is_connected(): xcli = self._init_xcli(backend_id) return xcli @proxy._trace_time def _call_xiv_xcli(self, method, *args, **kwargs): """Wrapper around XCLI to call active storage.""" self.ibm_storage_cli = self._get_xcli( self.ibm_storage_cli, self.active_backend_id) if self.ibm_storage_cli: LOG.info("_call_xiv_xcli #1: %s", method) else: LOG.debug("_call_xiv_xcli #2: %s", method) return getattr(self.ibm_storage_cli.cmd, method)(*args, **kwargs) @proxy._trace_time def _call_remote_xiv_xcli(self, method, *args, **kwargs): """Wrapper around XCLI to call remote storage.""" remote_id = self._get_secondary_backend_id() if not remote_id: raise self._get_exception()(_("No remote backend found.")) self.ibm_storage_remote_cli = self._get_xcli( self.ibm_storage_remote_cli, remote_id) LOG.debug("_call_remote_xiv_xcli: %s", method) return getattr(self.ibm_storage_remote_cli.cmd, method)( *args, **kwargs) def _verify_xiv_flags(self, address, user, password): """Verify that the XIV flags were passed.""" if not user or not password: raise self._get_exception()(_("No credentials found.")) if not address: raise self._get_exception()(_("No host found.")) def _get_connection_params(self, backend_id=strings.PRIMARY_BACKEND_ID): """Get connection parameters. returns a tuple containing address list, user, password, according to backend_id """ if not backend_id or backend_id == strings.PRIMARY_BACKEND_ID: if self._get_management_ips(): address = [e.strip(" ") for e in self.storage_info[ storage.FLAG_KEYS['management_ips']].split(",")] else: address = self.storage_info[storage.FLAG_KEYS['address']] user = self.storage_info[storage.FLAG_KEYS['user']] password = self.storage_info[storage.FLAG_KEYS['password']] else: params = self._get_target_params(backend_id) if not params: msg = (_("Missing target information for target '%(target)s'"), {'target': backend_id}) LOG.error(msg) raise self.meta['exception'].VolumeBackendAPIException( data=msg) if params.get('management_ips', None): address = [e.strip(" ") for e in params['management_ips'].split(",")] else: address = params['san_ip'] user = params['san_login'] password = params['san_password'] return (address, user, password) @proxy._trace_time def _init_xcli(self, backend_id=strings.PRIMARY_BACKEND_ID): """Initilize XCLI connection. returns an XCLIClient object """ try: address, user, password = self._get_connection_params(backend_id) except Exception as e: details = self._get_code_and_status_or_message(e) ex_details = (SETUP_BASE_ERROR, {'title': strings.TITLE, 'details': details}) LOG.error(ex_details) raise self.meta['exception'].InvalidParameterValue( (_("%(prefix)s %(ex_details)s") % {'prefix': storage.XIV_LOG_PREFIX, 'ex_details': ex_details})) self._verify_xiv_flags(address, user, password) try: clear_pass = cryptish.decrypt(password) except TypeError: ex_details = (SETUP_BASE_ERROR, {'title': strings.TITLE, 'details': "Invalid password."}) LOG.error(ex_details) raise self.meta['exception'].InvalidParameterValue( (_("%(prefix)s %(ex_details)s") % {'prefix': storage.XIV_LOG_PREFIX, 'ex_details': ex_details})) certs = certificate.CertificateCollector() path = certs.collect_certificate() try: LOG.debug('connect_multiendpoint_ssl with: %s', address) xcli = client.XCLIClient.connect_multiendpoint_ssl( user, clear_pass, address, ca_certs=path) except errors.CredentialsError: LOG.error(SETUP_BASE_ERROR, {'title': strings.TITLE, 'details': "Invalid credentials."}) raise self.meta['exception'].NotAuthorized() except (errors.ConnectionError, transports.ClosedTransportError): err_msg = (SETUP_INVALID_ADDRESS, {'address': address}) LOG.error(err_msg) raise self.meta['exception'].HostNotFound(host=err_msg) except Exception as er: err_msg = (SETUP_BASE_ERROR % {'title': strings.TITLE, 'details': er}) LOG.error(err_msg) raise self._get_exception()(err_msg) finally: certs.free_certificate() return xcli ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/ibm_storage/xiv_replication.py0000664000175000017500000003325000000000000026136 0ustar00zuulzuul00000000000000# Copyright (c) 2017 IBM Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from oslo_utils import importutils pyxcli = importutils.try_import("pyxcli") if pyxcli: from pyxcli import errors from pyxcli.mirroring import cg_recovery_manager from pyxcli.mirroring import errors as m_errors from pyxcli.mirroring import volume_recovery_manager from cinder.i18n import _ from cinder.volume.drivers.ibm.ibm_storage import strings SYNC = 'sync' ASYNC = 'async' LOG = logging.getLogger(__name__) class Rate(object): def __init__(self, rpo, schedule): self.rpo = rpo self.schedule = schedule self.schedule_name = self._schedule_name_from_schedule(self.schedule) def _schedule_name_from_schedule(self, schedule): if schedule == '00:00:20': return 'min_interval' return ("cinder_%(sched)s" % {'sched': schedule.replace(':', '_')}) class Replication(object): async_rates = ( Rate(rpo=120, schedule='00:01:00'), Rate(rpo=300, schedule='00:02:00'), Rate(rpo=600, schedule='00:05:00'), Rate(rpo=1200, schedule='00:10:00'), ) def __init__(self, proxy): self.proxy = proxy @staticmethod def get_schedule_from_rpo(rpo): schedule = [rate for rate in Replication.async_rates if rate.rpo == rpo][0].schedule_name if schedule: LOG.debug('schedule %(sched)s: for rpo %(rpo)s', {'sched': schedule, 'rpo': rpo}) else: LOG.error('Failed to find schedule for rpo %(rpo)s', {'rpo': rpo}) return schedule @staticmethod def get_supported_rpo(): return [rate.rpo for rate in Replication.async_rates] def get_recovery_mgr(self): # Recovery manager is set in derived classes raise NotImplementedError def get_remote_recovery_mgr(self): # Recovery manager is set in derived classes raise NotImplementedError def replication_create_mirror(self, resource, replication_info, target, pool): raise NotImplementedError @staticmethod def extract_replication_info_from_specs(specs): info = {'enabled': False, 'mode': None, 'rpo': 0} msg = "" if specs: LOG.debug('extract_replication_info_from_specs: specs %(specs)s', {'specs': specs}) info['enabled'] = ( specs.get('replication_enabled', '').upper() in (u'TRUE', strings.METADATA_IS_TRUE) or specs.get('group_replication_enabled', '').upper() in (u'TRUE', strings.METADATA_IS_TRUE)) replication_type = specs.get('replication_type', SYNC).lower() if replication_type in (u'sync', u' sync'): info['mode'] = SYNC elif replication_type in (u'async', u' async'): info['mode'] = ASYNC else: msg = (_("Unsupported replication mode %(mode)s") % {'mode': replication_type}) return None, msg info['rpo'] = int(specs.get('rpo', u' 0')[5:]) supported_rpos = Replication.get_supported_rpo() if info['rpo'] and info['rpo'] not in supported_rpos: msg = (_("Unsupported replication RPO %(rpo)s"), {'rpo': info['rpo']}) return None, msg LOG.debug('extract_replication_info_from_specs: info %(info)s', {'info': info}) return info, msg def failover(self, resource, failback): raise NotImplementedError def create_replication(self, resource_name, replication_info): LOG.debug('Replication::create_replication replication_info %(rep)s', {'rep': replication_info}) target, params = self.proxy._get_replication_target_params() LOG.info('Target %(target)s: %(params)s', {'target': target, 'params': str(params)}) try: pool = params['san_clustername'] except Exception: msg = (_("Missing pool information for target '%(target)s'") % {'target': target}) LOG.error(msg) raise self.proxy.meta['exception'].VolumeBackendAPIException( data=msg) self.replication_create_mirror(resource_name, replication_info, target, pool) def delete_replication(self, resource_name, replication_info): LOG.debug('Replication::delete_replication replication_info %(rep)s', {'rep': replication_info}) recovery_mgr = self.get_recovery_mgr() try: recovery_mgr.deactivate_mirror(resource_id=resource_name) except Exception as e: details = self.proxy._get_code_and_status_or_message(e) msg = (_("Failed ending replication for %(resource)s: " "'%(details)s'") % {'resource': resource_name, 'details': details}) LOG.error(msg) raise self.proxy.meta['exception'].VolumeBackendAPIException( data=msg) try: recovery_mgr.delete_mirror(resource_id=resource_name) except Exception as e: details = self.proxy._get_code_and_status_or_message(e) msg = (_("Failed deleting replica for %(resource)s: " "'%(details)s'") % {'resource': resource_name, 'details': details}) LOG.error(msg) raise self.proxy.meta['exception'].VolumeBackendAPIException( data=msg) def _failover_resource(self, resource, recovery_mgr, failover_rep_mgr, rep_type, failback): # check if mirror is defined and active LOG.debug('Check if mirroring is active on %(res)s', {'res': resource['name']}) try: active = recovery_mgr.is_mirror_active( resource_id=resource['name']) except Exception: active = False state = 'active' if active else 'inactive' LOG.debug('Mirroring is %(state)s', {'state': state}) # In case of failback, mirroring must be active # In case of failover we attempt to move in any condition if failback and not active: msg = ("%(rep_type)s %(res)s: no active mirroring and can not " "failback" % {'rep_type': rep_type, 'res': resource['name']}) LOG.error(msg) return False, msg try: if rep_type == 'cg': resource['name'] = self.proxy._cg_name_from_group(resource) recovery_mgr.switch_roles(resource_id=resource['name']) return True, None except Exception as e: # failed attempt to switch_roles from the master details = self.proxy._get_code_and_status_or_message(e) LOG.warning('Failed to perform switch_roles on' ' %(res)s: %(err)s. ' 'Continue to change_role', {'res': resource['name'], 'err': details}) try: # this is the ugly stage we come to brute force if failback: role = 'Slave' else: role = 'Master' LOG.warning('Attempt to change_role to %(role)s', {'role': role}) failover_rep_mgr.change_role(resource_id=resource['name'], new_role=role) return True, None except m_errors.NoMirrorDefinedError as e: details = self.proxy._get_code_and_status_or_message(e) msg = ("%(rep_type)s %(res)s no replication defined: %(err)s" % {'rep_type': rep_type, 'res': resource['name'], 'err': details}) LOG.error(msg) return False, msg except Exception as e: details = self.proxy._get_code_and_status_or_message(e) msg = ('%(rep_type)s %(res)s change_role failed: %(err)s' % {'rep_type': rep_type, 'res': resource['name'], 'err': details}) LOG.error(msg) return False, msg class VolumeReplication(Replication): def __init__(self, proxy): super(VolumeReplication, self).__init__(proxy) def get_recovery_mgr(self): return volume_recovery_manager.VolumeRecoveryManager( False, self.proxy.ibm_storage_cli) def get_remote_recovery_mgr(self): return volume_recovery_manager.VolumeRecoveryManager( True, self.proxy.ibm_storage_remote_cli) def replication_create_mirror(self, resource_name, replication_info, target, pool): LOG.debug('VolumeReplication::replication_create_mirror') schedule = None if replication_info['rpo']: schedule = Replication.get_schedule_from_rpo( replication_info['rpo']) try: recovery_mgr = self.get_recovery_mgr() recovery_mgr.create_mirror( resource_name=resource_name, target_name=target, mirror_type=replication_info['mode'], slave_resource_name=resource_name, create_slave='yes', remote_pool=pool, rpo=replication_info['rpo'], schedule=schedule, activate_mirror='yes') except errors.RemoteVolumeExists: # if volume exists (same ID), don't create slave # This only happens when vol is a part of a cg recovery_mgr.create_mirror( resource_name=resource_name, target_name=target, mirror_type=replication_info['mode'], slave_resource_name=resource_name, create_slave='no', remote_pool=pool, rpo=replication_info['rpo'], schedule=schedule, activate_mirror='yes') except errors.VolumeMasterError: LOG.debug('Volume %(vol)s has been already mirrored', {'vol': resource_name}) except Exception as e: details = self.proxy._get_code_and_status_or_message(e) msg = (_("Failed replication for %(resource)s: '%(details)s'") % {'resource': resource_name, 'details': details}) LOG.error(msg) raise self.proxy.meta['exception'].VolumeBackendAPIException( data=msg) def failover(self, resource, failback): """Failover a single volume. Attempts to failover a single volume Sequence: 1. attempt to switch roles from master 2. attempt to change role to master on secondary returns (success, failure_reason) """ LOG.debug("VolumeReplication::failover %(vol)s", {'vol': resource['name']}) recovery_mgr = self.get_recovery_mgr() remote_recovery_mgr = self.get_remote_recovery_mgr() return self._failover_resource(resource, recovery_mgr, remote_recovery_mgr, 'vol', failback) class GroupReplication(Replication): def __init__(self, proxy): super(GroupReplication, self).__init__(proxy) def get_recovery_mgr(self): return cg_recovery_manager.CGRecoveryManager( False, self.proxy.ibm_storage_cli) def get_remote_recovery_mgr(self): return cg_recovery_manager.CGRecoveryManager( True, self.proxy.ibm_storage_remote_cli) def replication_create_mirror(self, resource_name, replication_info, target, pool): LOG.debug('GroupReplication::replication_create_mirror') schedule = None if replication_info['rpo']: schedule = Replication.get_schedule_from_rpo( replication_info['rpo']) try: recovery_mgr = self.get_recovery_mgr() recovery_mgr.create_mirror( resource_name=resource_name, target_name=target, mirror_type=replication_info['mode'], slave_resource_name=resource_name, rpo=replication_info['rpo'], schedule=schedule, activate_mirror='yes') except Exception as e: details = self.proxy._get_code_and_status_or_message(e) msg = (_("Failed replication for %(resource)s: '%(details)s'"), {'resource': resource_name, 'details': details}) LOG.error(msg) raise self.proxy.meta['exception'].VolumeBackendAPIException( data=msg) def failover(self, resource, failback): LOG.debug("GroupReplication::failover %(cg)s", {'cg': resource['name']}) recovery_mgr = self.get_recovery_mgr() remote_recovery_mgr = self.get_remote_recovery_mgr() return self._failover_resource(resource, recovery_mgr, remote_recovery_mgr, 'cg', failback) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.367121 cinder-27.0.0/cinder/volume/drivers/ibm/storwize_svc/0000775000175000017500000000000000000000000022630 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/storwize_svc/__init__.py0000664000175000017500000000000000000000000024727 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/storwize_svc/replication.py0000664000175000017500000004205700000000000025523 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import random from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder import ssh_utils from cinder import utils from cinder.volume.drivers.ibm.storwize_svc import storwize_const from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class StorwizeSVCReplication(object): def __init__(self, driver, replication_target=None): self.driver = driver self.target = replication_target or {} @volume_utils.trace def failover_volume_host(self, context, vref): # Make the aux volume writeable. try: tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + vref.name self.target_helpers.stop_relationship(tgt_volume, access=True) try: self.target_helpers.start_relationship(tgt_volume, 'aux') except exception.VolumeBackendAPIException as e: LOG.error('Error running startrcrelationship due to %(err)s.', {'err': e}) return except Exception as e: msg = (_('Unable to fail-over the volume %(id)s to the ' 'secondary back-end, error: %(error)s') % {"id": vref['id'], "error": str(e)}) LOG.exception(msg) raise exception.VolumeDriverException(message=msg) @volume_utils.trace def replication_failback(self, volume): tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] rel_info = self.target_helpers.get_relationship_info(tgt_volume) if rel_info: try: self.target_helpers.stop_relationship(tgt_volume, access=True) self.target_helpers.start_relationship(tgt_volume, 'master') return except Exception as e: msg = (_('Unable to fail-back the volume: %(vol)s to the ' 'master back-end, error: %(error)s') % {"vol": volume['name'], "error": str(e)}) LOG.exception(msg) raise exception.VolumeDriverException(message=msg) def volume_replication_setup(self, context, vref): pass class StorwizeSVCReplicationGlobalMirror(StorwizeSVCReplication): """Support for Storwize/SVC global mirror mode replication. Global Mirror establishes a Global Mirror relationship between two volumes of equal size. The volumes in a Global Mirror relationship are referred to as the master (source) volume and the auxiliary (target) volume. This mode is dedicated to the asynchronous volume replication. """ asyncmirror = True def __init__(self, driver, replication_target=None, target_helpers=None): super(StorwizeSVCReplicationGlobalMirror, self).__init__( driver, replication_target) self.target_helpers = target_helpers def volume_replication_setup(self, context, vref): LOG.debug('enter: volume_replication_setup: volume %s', vref['name']) target_vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + vref['name'] try: opts = self.driver._get_vdisk_params(vref['volume_type_id']) pool = self.target.get('pool_name') src_attr = self.driver._helpers.get_vdisk_attributes( vref['name']) opts['iogrp'] = src_attr['IO_group_id'] opts['mirror_pool'] = None try: self.target_helpers.create_vdisk(target_vol_name, str(vref['size']), 'gb', pool, opts) except exception.VolumeBackendAPIException as excp: if "CMMVC6035E" in excp.msg: LOG.info('Target Volume: %(vol)s already exists', {'vol': target_vol_name}) target_system_id = self.driver._aux_state['system_id'] self.driver._helpers.create_relationship( vref['name'], target_vol_name, target_system_id, self.asyncmirror) except Exception as e: msg = (_("Unable to set up mirror mode replication for %(vol)s. " "Exception: %(err)s.") % {'vol': vref['id'], 'err': e}) LOG.exception(msg) raise exception.VolumeDriverException(message=msg) LOG.debug('leave: volume_replication_setup:volume %s', vref['name']) class StorwizeSVCReplicationMetroMirror( StorwizeSVCReplicationGlobalMirror): """Support for Storwize/SVC metro mirror mode replication. Metro Mirror establishes a Metro Mirror relationship between two volumes of equal size. The volumes in a Metro Mirror relationship are referred to as the master (source) volume and the auxiliary (target) volume. """ asyncmirror = False def __init__(self, driver, replication_target=None, target_helpers=None): super(StorwizeSVCReplicationMetroMirror, self).__init__( driver, replication_target, target_helpers) class StorwizeSVCReplicationGMCV(StorwizeSVCReplicationGlobalMirror): """Support for Storwize/SVC GMCV mode replication. Global Mirror with Change Volumes(GMCV) provides asynchronous replication based on point-in-time copies of data. The volumes in a GMCV relationship are referred to as the master (source) volume, master change volume, the auxiliary (target) volume and auxiliary change volume. """ asyncmirror = True def __init__(self, driver, replication_target=None, target_helpers=None): super(StorwizeSVCReplicationGMCV, self).__init__( driver, replication_target, target_helpers) def volume_replication_setup(self, context, vref, new_type=None): LOG.debug('enter: volume_replication_setup: volume %s', vref['name']) source_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + vref['name']) target_vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + vref['name'] target_change_vol_name = (storwize_const.REPLICA_CHG_VOL_PREFIX + target_vol_name) try: if new_type: new_type_opts = self.driver._get_vdisk_params( new_type['id'], volume_type=new_type) src_attr = self.driver._helpers.get_vdisk_attributes( vref['name']) # Source change volume creation src_change_opts = self.driver._get_vdisk_params( vref['volume_type_id']) src_change_opts['iogrp'] = src_attr['IO_group_id'] # Change volumes would usually be thin-provisioned src_change_opts['autoexpand'] = True src_change_pool = src_attr['mdisk_grp_name'] if new_type: src_child_pool = ( new_type_opts['storwize_svc_src_child_pool']) else: src_child_pool = ( src_change_opts['storwize_svc_src_child_pool']) if src_child_pool: src_change_pool = src_child_pool try: self.driver._helpers.create_vdisk(source_change_vol_name, str(vref['size']), 'gb', src_change_pool, src_change_opts) except exception.VolumeBackendAPIException as excp: if "CMMVC6035E" in excp.msg: msg = ('Source change volume: %s already exists' % source_change_vol_name) LOG.info(msg) # Target volume creation target_opts = self.driver._get_vdisk_params( vref['volume_type_id']) target_pool = self.target.get('pool_name') target_opts['iogrp'] = src_attr['IO_group_id'] try: self.target_helpers.create_vdisk(target_vol_name, str(vref['size']), 'gb', target_pool, target_opts) except exception.VolumeBackendAPIException as excp: if "CMMVC6035E" in excp.msg: msg = ('Target Volume: %s already exists' % target_vol_name) LOG.info(msg) # Target change volume creation target_change_opts = self.driver._get_vdisk_params( vref['volume_type_id']) target_change_pool = self.target.get('pool_name') if new_type: target_child_pool = ( new_type_opts['storwize_svc_target_child_pool']) else: target_child_pool = ( target_change_opts['storwize_svc_target_child_pool']) if target_child_pool: target_change_pool = target_child_pool target_change_opts['iogrp'] = src_attr['IO_group_id'] # Change Volumes would usually be thin-provisioned target_change_opts['autoexpand'] = True try: self.target_helpers.create_vdisk(target_change_vol_name, str(vref['size']), 'gb', target_change_pool, target_change_opts) except exception.VolumeBackendAPIException as excp: if "CMMVC6035E" in excp.msg: msg = ('Target Change Volume: %s already exists' % target_change_vol_name) LOG.info(msg) target_system_id = self.driver._aux_state['system_id'] # Get cycle_period_seconds src_change_opts = self.driver._get_vdisk_params( vref['volume_type_id']) cycle_period_seconds = src_change_opts.get('cycle_period_seconds') rc_name = self.driver._helpers.create_relationship( vref['name'], target_vol_name, target_system_id, self.asyncmirror, True, source_change_vol_name, cycle_period_seconds) # Set target change volume self.target_helpers.change_relationship_changevolume( target_vol_name, target_change_vol_name, False, rc_name) # Start gmcv relationship self.driver._helpers.start_relationship(vref['name'], rcrel=rc_name) except Exception as e: msg = (_("Unable to set up gmcv mode replication for %(vol)s. " "Exception: %(err)s.") % {'vol': vref['id'], 'err': str(e)}) LOG.exception(msg) raise exception.VolumeDriverException(message=msg) LOG.debug('leave: volume_replication_setup:volume %s', vref['name']) class StorwizeSVCReplicationManager(object): def __init__(self, driver, replication_target=None, target_helpers=None): self.sshpool = None self.driver = driver self.target = replication_target self.target_helpers = target_helpers(self._run_ssh) self._master_helpers = self.driver._master_backend_helpers self.global_m = StorwizeSVCReplicationGlobalMirror( self.driver, replication_target, self.target_helpers) self.metro_m = StorwizeSVCReplicationMetroMirror( self.driver, replication_target, self.target_helpers) self.gmcv = StorwizeSVCReplicationGMCV( self.driver, replication_target, self.target_helpers) def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd_list) # TODO(vhou): We'll have a common method in ssh_utils to take # care of this _run_ssh method. command = ' '. join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool( self.target.get('san_ip'), self.target.get('san_ssh_port', 22), self.target.get('ssh_conn_timeout', 30), self.target.get('san_login'), password=self.target.get('san_password'), privatekey=self.target.get('san_private_key', ''), min_size=self.target.get('ssh_min_pool_conn', 1), max_size=self.target.get('ssh_max_pool_conn', 5),) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.error(str(e)) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error running SSH command: %s", command) def get_target_helpers(self): return self.target_helpers def get_replica_obj(self, rep_type): if rep_type == storwize_const.GLOBAL: return self.global_m elif rep_type == storwize_const.METRO: return self.metro_m elif rep_type == storwize_const.GMCV: return self.gmcv else: return None def _partnership_validate_create(self, client, remote_name, remote_ip): try: partnership_info = client.get_partnership_info( remote_name) if not partnership_info: candidate_info = client.get_partnershipcandidate_info( remote_name) if candidate_info: client.mkfcpartnership(remote_name) else: client.mkippartnership(remote_ip) except Exception: msg = (_('Unable to establish the partnership with ' 'the Storwize cluster %s.'), remote_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def _partnership_start(self, client, remote_name): try: partnership_info = client.get_partnership_info( remote_name) if (partnership_info and partnership_info['partnership'] != 'fully_configured'): client.chpartnership(partnership_info['id']) except Exception: msg = (_('Unable to start the partnership with ' 'the Storwize cluster %s.'), remote_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def establish_target_partnership(self): local_system_info = self._master_helpers.get_system_info() target_system_info = self.target_helpers.get_system_info() local_system_name = local_system_info['system_name'] target_system_name = target_system_info['system_name'] local_ip = self.driver.configuration.safe_get('san_ip') target_ip = self.target.get('san_ip') # Establish partnership only when the local system and the replication # target system is different. if target_system_name != local_system_name: self._partnership_validate_create(self._master_helpers, target_system_name, target_ip) self._partnership_validate_create(self.target_helpers, local_system_name, local_ip) self._partnership_start(self._master_helpers, target_system_name) self._partnership_start(self.target_helpers, local_system_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/storwize_svc/storwize_const.py0000664000175000017500000000367400000000000026310 0ustar00zuulzuul00000000000000# Copyright 2016 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # product id is 2145 for SVC 6.1.0+. no product id for older version. DEV_MODEL_SVC = '2145' DEV_MODEL_STORWIZE = '2076' DEV_MODEL_STORWIZE_V3500 = '2071' DEV_MODEL_STORWIZE_V5000E = '2072' DEV_MODEL_STORWIZE_V7000 = '2076' DEV_MODEL_STORWIZE_V5000 = '2078' DEV_MODEL_STORWIZE_V5000_1YR = '2077' DEV_MODEL_FLASH_V9000 = '9846' DEV_MODEL_FLEX = '4939' REP_CAP_DEVS = (DEV_MODEL_SVC, DEV_MODEL_STORWIZE, DEV_MODEL_STORWIZE_V5000, DEV_MODEL_STORWIZE_V5000_1YR, DEV_MODEL_FLASH_V9000, DEV_MODEL_FLEX, DEV_MODEL_STORWIZE_V5000E) # constants used for replication GLOBAL = 'global' METRO = 'metro' GMCV = 'gmcv' GMCV_MULTI = 'multi' VALID_REP_TYPES = (GLOBAL, METRO, GMCV) FAILBACK_VALUE = 'default' DEFAULT_RC_TIMEOUT = 3600 * 24 * 7 DEFAULT_RC_INTERVAL = 5 DEFAULT_RCCG_TIMEOUT = 60 * 30 DEFAULT_RCCG_INTERVAL = 2 REPLICA_AUX_VOL_PREFIX = 'aux_' REPLICA_CHG_VOL_PREFIX = 'chg_' RCCG_PREFIX = 'rccg-' HYPERCG_PREFIX = 'hycg-' VG_PREFIX = 'vg-' VG_SNAPSHOT_PREFIX = 'vg_snap-' # remote mirror copy status REP_CONSIS_SYNC = 'consistent_synchronized' REP_CONSIS_COPYING = 'consistent_copying' REP_CONSIS_STOP = 'consistent_stopped' REP_SYNC = 'synchronized' REP_IDL = 'idling' REP_IDL_DISC = 'idling_disconnected' REP_STATUS_ON_LINE = 'online' # IOThrottling types MBPS = 'mbps' IOPS = 'iops' IOPS_PER_GB = 'iops_per_gb' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py0000664000175000017500000125317500000000000027331 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import math import random import re import time import unicodedata from eventlet import greenthread from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils as json from oslo_service import loopingcall from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import units import paramiko from cinder import context from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import ssh_utils from cinder import utils as cinder_utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.ibm.storwize_svc import ( replication as storwize_rep) from cinder.volume.drivers.ibm.storwize_svc import storwize_const from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils INTERVAL_1_SEC = 1 DEFAULT_TIMEOUT = 15 CMMVC5753E = "CMMVC5753E" LOG = logging.getLogger(__name__) storwize_svc_opts = [ cfg.ListOpt('storwize_svc_volpool_name', default=['volpool'], help='Comma separated list of storage system storage ' 'pools for volumes.'), cfg.IntOpt('storwize_svc_vol_rsize', default=2, min=-1, max=100, help='Storage system space-efficiency parameter for volumes ' '(percentage)'), cfg.IntOpt('storwize_svc_vol_warning', default=0, min=-1, max=100, help='Storage system threshold for volume capacity warnings ' '(percentage)'), cfg.BoolOpt('storwize_svc_vol_autoexpand', default=True, help='Storage system autoexpand parameter for volumes ' '(True/False)'), cfg.IntOpt('storwize_svc_vol_grainsize', default=256, help='Storage system grain size parameter for volumes ' '(8/32/64/128/256)'), cfg.BoolOpt('storwize_svc_vol_compression', default=False, help='Storage system compression option for volumes'), cfg.BoolOpt('storwize_svc_vol_easytier', default=True, help='Enable Easy Tier for volumes'), cfg.StrOpt('storwize_svc_vol_iogrp', default='0', help='The I/O group in which to allocate volumes. It can be a ' 'comma-separated list in which case the driver will select an ' 'io_group based on least number of volumes associated with the ' 'io_group.'), cfg.IntOpt('storwize_svc_flashcopy_timeout', default=120, min=1, max=600, help='Maximum number of seconds to wait for FlashCopy to be ' 'prepared.'), cfg.BoolOpt('storwize_svc_multihostmap_enabled', default=True, help='This option no longer has any affect. It is deprecated ' 'and will be removed in the next release.', deprecated_for_removal=True), cfg.BoolOpt('storwize_svc_allow_tenant_qos', default=False, help='Allow tenants to specify QOS on create'), cfg.StrOpt('storwize_svc_stretched_cluster_partner', default=None, help='If operating in stretched cluster mode, specify the ' 'name of the pool in which mirrored copies are stored.' 'Example: "pool2"'), cfg.StrOpt('storwize_san_secondary_ip', default=None, help='Specifies secondary management IP or hostname to be ' 'used if san_ip is invalid or becomes inaccessible.'), cfg.BoolOpt('storwize_svc_vol_nofmtdisk', default=False, help='Specifies that the volume not be formatted during ' 'creation.'), cfg.IntOpt('storwize_svc_flashcopy_rate', default=50, min=1, max=150, help='Specifies the Storwize FlashCopy copy rate to be used ' 'when creating a full volume copy. The default is rate ' 'is 50, and the valid rates are 1-150.'), cfg.IntOpt('storwize_svc_clean_rate', default=50, min=0, max=150, help='Specifies the Storwize cleaning rate for the mapping. ' 'The default rate is 50, and the valid rates are ' '0-150.'), cfg.StrOpt('storwize_svc_mirror_pool', default=None, help='Specifies the name of the pool in which mirrored copy ' 'is stored. Example: "pool2"'), cfg.StrOpt('storwize_svc_aux_mirror_pool', default=None, help='Specifies the name of the pool in which mirrored copy ' 'is stored for aux volume. Example: "pool2"'), cfg.StrOpt('storwize_portset', default=None, help='Specifies the name of the portset in which ' 'the host is to be created.'), cfg.StrOpt('storwize_svc_src_child_pool', default=None, help='Specifies the name of the source child pool in which ' 'global mirror source change volume is stored.'), cfg.StrOpt('storwize_svc_target_child_pool', default=None, help='Specifies the name of the target child pool in which ' 'global mirror auxiliary change volume is stored.'), cfg.StrOpt('storwize_peer_pool', default=None, help='Specifies the name of the peer pool for hyperswap ' 'volume, the peer pool must exist on the other site.'), cfg.DictOpt('storwize_preferred_host_site', default={}, help='Specifies the site information for host. ' 'One WWPN or multi WWPNs used in the host can be ' 'specified. For example: ' 'storwize_preferred_host_site=site1:wwpn1,' 'site2:wwpn2&wwpn3 or ' 'storwize_preferred_host_site=site1:iqn1,site2:iqn2'), cfg.IntOpt('cycle_period_seconds', default=300, min=60, max=86400, help='This defines an optional cycle period that applies to ' 'Global Mirror relationships with a cycling mode of multi. ' 'A Global Mirror relationship using the multi cycling_mode ' 'performs a complete cycle at most once each period. ' 'The default is 300 seconds, and the valid seconds ' 'are 60-86400.'), cfg.BoolOpt('storwize_svc_retain_aux_volume', default=False, help='Enable or disable retaining of aux volume on secondary ' 'storage during delete of the volume on primary storage ' 'or moving the primary volume from mirror to non-mirror ' 'with replication enabled. This option is valid for ' 'Storage Virtualize Family.'), cfg.BoolOpt('migrate_from_flashcopy', default=True, help='Parameter to allow or prevent volumes with legacy ' 'FlashCopy mappings to be part of volume_group_enabled ' 'and temporary_volume_group_enabled groups.'), ] CONF = cfg.CONF CONF.register_opts(storwize_svc_opts, group=configuration.SHARED_CONF_GROUP) class StorwizeSSH(object): """SSH interface to IBM Storwize family and SVC storage systems.""" def __init__(self, run_ssh): self._ssh = run_ssh def _run_ssh(self, ssh_cmd): try: return self._ssh(ssh_cmd) except processutils.ProcessExecutionError as e: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': e.stdout, 'err': e.stderr}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def run_ssh_info(self, ssh_cmd, delim='!', with_header=False): """Run an SSH command and return parsed output.""" raw = self._run_ssh(ssh_cmd) return CLIResponse(raw, ssh_cmd=ssh_cmd, delim=delim, with_header=with_header) def run_ssh_assert_no_output(self, ssh_cmd, log_cmd=None): """Run an SSH command and assert no output returned.""" out, err = self._run_ssh(ssh_cmd) if len(out.strip()) != 0: if not log_cmd: log_cmd = ' '.join(ssh_cmd) msg = (_('Expected no output from CLI command %(cmd)s, ' 'got %(out)s.') % {'cmd': log_cmd, 'out': out}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def run_ssh_check_created(self, ssh_cmd): """Run an SSH command and return the ID of the created object.""" out, err = self._run_ssh(ssh_cmd) try: match_obj = re.search(r'\[([0-9]+)\],? successfully created', out) return match_obj.group(1) except (AttributeError, IndexError): msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def lsnode(self, node_id=None): with_header = True ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!'] if node_id: with_header = False ssh_cmd.append(node_id) return self.run_ssh_info(ssh_cmd, with_header=with_header) def lslicense(self): ssh_cmd = ['svcinfo', 'lslicense', '-delim', '!'] return self.run_ssh_info(ssh_cmd)[0] def lsguicapabilities(self): ssh_cmd = ['svcinfo', 'lsguicapabilities', '-delim', '!'] return self.run_ssh_info(ssh_cmd)[0] def lssystem(self): ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!'] return self.run_ssh_info(ssh_cmd)[0] def lsmdiskgrp(self, pool): ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', '"%s"' % pool] try: return self.run_ssh_info(ssh_cmd)[0] except exception.VolumeBackendAPIException as ex: LOG.warning("Failed to get pool %(pool)s info. " "Exception: %(ex)s.", {'pool': pool, 'ex': ex}) return None def lsiogrp(self): ssh_cmd = ['svcinfo', 'lsiogrp', '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def lsportip(self): ssh_cmd = ['svcinfo', 'lsportip', '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) @staticmethod def _create_port_arg(port_type, port_name): if port_type == 'initiator': port = ['-iscsiname'] else: port = ['-hbawwpn'] port.append(port_name) return port def mkhost(self, host_name, port_type, port_name, site=None, portset=None): port = self._create_port_arg(port_type, port_name) ssh_cmd = ['svctask', 'mkhost', '-force'] + port if site: ssh_cmd += ['-site', '"%s"' % site] if portset: ssh_cmd += ['-portset', '"%s"' % portset] ssh_cmd += ['-name', '"%s"' % host_name] return self.run_ssh_check_created(ssh_cmd) def addhostport(self, host, port_type, port_name): port = self._create_port_arg(port_type, port_name) ssh_cmd = ['svctask', 'addhostport', '-force'] + port + ['"%s"' % host] self.run_ssh_assert_no_output(ssh_cmd) def addhostiogrp(self, host, iogrplist='all'): ssh_cmd = ['svctask', 'addhostiogrp'] if iogrplist == 'all': ssh_cmd += ['-iogrpall', '"%s"' % host] else: ssh_cmd += ['-iogrp', ':'.join(iogrplist), '"%s"' % host] self.run_ssh_assert_no_output(ssh_cmd) def lshost(self, host=None): with_header = True ssh_cmd = ['svcinfo', 'lshost', '-delim', '!'] if host: with_header = False ssh_cmd.append('"%s"' % host) return self.run_ssh_info(ssh_cmd, with_header=with_header) def add_chap_secret(self, secret, host): ssh_cmd = ['svctask', 'chhost', '-chapsecret', secret, '"%s"' % host] log_cmd = 'svctask chhost -chapsecret *** %s' % host self.run_ssh_assert_no_output(ssh_cmd, log_cmd) def chhost(self, host, site): ssh_cmd = ['svctask', 'chhost'] if site: ssh_cmd += ['-site', '"%s"' % site, '"%s"' % host] else: ssh_cmd += ['-nosite', '"%s"' % host] self.run_ssh_assert_no_output(ssh_cmd) def lsiscsiauth(self): ssh_cmd = ['svcinfo', 'lsiscsiauth', '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def lsip(self, portset=None): ssh_cmd = ['svcinfo', 'lsip', '-delim', '!'] if portset: ssh_cmd += ['-filtervalue', 'portset_name=%s' % portset] return self.run_ssh_info(ssh_cmd, with_header=True) def lsfabric(self, wwpn=None, host=None): ssh_cmd = ['svcinfo', 'lsfabric', '-delim', '!'] if wwpn: ssh_cmd.extend(['-wwpn', wwpn]) elif host: ssh_cmd.extend(['-host', '"%s"' % host]) else: msg = (_('Must pass wwpn or host to lsfabric.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) return self.run_ssh_info(ssh_cmd, with_header=True) def mkvdiskhostmap(self, host, vdisk, lun, multihostmap): """Map vdisk to host. If vdisk already mapped and multihostmap is True, use the force flag. """ ssh_cmd = ['svctask', 'mkvdiskhostmap', '-host', '"%s"' % host, '-scsi', lun, '"%s"' % vdisk] if multihostmap: ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force') self.run_ssh_check_created(ssh_cmd) def mkrcrelationship(self, master, aux, system, asyncmirror, cyclingmode=False): ssh_cmd = ['svctask', 'mkrcrelationship', '-master', master, '-aux', aux, '-cluster', system] if asyncmirror: ssh_cmd.append('-global') if cyclingmode: ssh_cmd.extend(['-cyclingmode', 'multi']) return self.run_ssh_check_created(ssh_cmd) def rmrcrelationship(self, relationship, force=False): ssh_cmd = ['svctask', 'rmrcrelationship'] if force: ssh_cmd += ['-force'] ssh_cmd += [relationship] self.run_ssh_assert_no_output(ssh_cmd) def switchrelationship(self, relationship, aux=True): primary = 'aux' if aux else 'master' ssh_cmd = ['svctask', 'switchrcrelationship', '-primary', primary, relationship] self.run_ssh_assert_no_output(ssh_cmd) def startrcrelationship(self, rc_rel, primary=None): ssh_cmd = ['svctask', 'startrcrelationship', '-force'] if primary: ssh_cmd.extend(['-primary', primary]) ssh_cmd.append(rc_rel) self.run_ssh_assert_no_output(ssh_cmd) def ch_rcconsistgrp_cyclingmode(self, consistgrp, cyclingmode='none'): ssh_cmd = ['svctask', 'chrcconsistgrp', '-cyclingmode', cyclingmode, consistgrp] self.run_ssh_assert_no_output(ssh_cmd) def ch_rcrelationship_cyclingmode(self, relationship, cyclingmode='none'): # Note: Can only change one attribute at a time, # so define three ch_rcrelationship_xxx here ssh_cmd = ['svctask', 'chrcrelationship', '-cyclingmode', cyclingmode, relationship] self.run_ssh_assert_no_output(ssh_cmd) def ch_rcrelationship_cycleperiod(self, relationship, cycle_period_seconds): # Note: Can only change one attribute at a time, # so define three ch_rcrelationship_xxx here if cycle_period_seconds: ssh_cmd = ['svctask', 'chrcrelationship'] ssh_cmd.extend(['-cycleperiodseconds', str(cycle_period_seconds)]) ssh_cmd.append(relationship) self.run_ssh_assert_no_output(ssh_cmd) def ch_rcrelationship_changevolume(self, relationship, changevolume, master): # Note: Can only change one attribute at a time, # so define three ch_rcrelationship_xxx here if changevolume: ssh_cmd = ['svctask', 'chrcrelationship'] if master: ssh_cmd.extend(['-masterchange', changevolume]) else: ssh_cmd.extend(['-auxchange', changevolume]) ssh_cmd.append(relationship) self.run_ssh_assert_no_output(ssh_cmd) def stoprcrelationship(self, relationship, access=False): ssh_cmd = ['svctask', 'stoprcrelationship'] if access: ssh_cmd.append('-access') ssh_cmd.append(relationship) self.run_ssh_assert_no_output(ssh_cmd) def lsrcrelationship(self, rc_rel): ssh_cmd = ['svcinfo', 'lsrcrelationship', '-delim', '!', rc_rel] return self.run_ssh_info(ssh_cmd) # replication cg def chrcrelationship(self, relationship, rccg=None): ssh_cmd = ['svctask', 'chrcrelationship'] if rccg: ssh_cmd.extend(['-consistgrp', rccg]) else: ssh_cmd.extend(['-noconsistgrp']) ssh_cmd.append(relationship) self.run_ssh_assert_no_output(ssh_cmd) def lsrcconsistgrp(self, rccg): ssh_cmd = ['svcinfo', 'lsrcconsistgrp', '-delim', '!', rccg] try: return self.run_ssh_info(ssh_cmd)[0] except exception.VolumeBackendAPIException as ex: LOG.warning("Failed to get rcconsistgrp %(rccg)s info. " "Exception: %(ex)s.", {'rccg': rccg, 'ex': ex}) return None def mkrcconsistgrp(self, rccg, system): ssh_cmd = ['svctask', 'mkrcconsistgrp', '-name', rccg, '-cluster', system] return self.run_ssh_check_created(ssh_cmd) def rmrcconsistgrp(self, rccg, force=True): ssh_cmd = ['svctask', 'rmrcconsistgrp'] if force: ssh_cmd += ['-force'] ssh_cmd += ['"%s"' % rccg] return self.run_ssh_assert_no_output(ssh_cmd) def startrcconsistgrp(self, rccg, primary=None): ssh_cmd = ['svctask', 'startrcconsistgrp', '-force'] if primary: ssh_cmd.extend(['-primary', primary]) ssh_cmd.append(rccg) self.run_ssh_assert_no_output(ssh_cmd) def stoprcconsistgrp(self, rccg, access=False): ssh_cmd = ['svctask', 'stoprcconsistgrp'] if access: ssh_cmd.append('-access') ssh_cmd.append(rccg) self.run_ssh_assert_no_output(ssh_cmd) def switchrcconsistgrp(self, rccg, aux=True): primary = 'aux' if aux else 'master' ssh_cmd = ['svctask', 'switchrcconsistgrp', '-primary', primary, rccg] self.run_ssh_assert_no_output(ssh_cmd) def lspartnership(self, system_name): key_value = 'name=%s' % system_name ssh_cmd = ['svcinfo', 'lspartnership', '-filtervalue', key_value, '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def lspartnershipcandidate(self): ssh_cmd = ['svcinfo', 'lspartnershipcandidate', '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def mkippartnership(self, ip_v4, bandwidth=1000, backgroundcopyrate=50): ssh_cmd = ['svctask', 'mkippartnership', '-type', 'ipv4', '-clusterip', ip_v4, '-linkbandwidthmbits', str(bandwidth), '-backgroundcopyrate', str(backgroundcopyrate)] return self.run_ssh_assert_no_output(ssh_cmd) def mkfcpartnership(self, system_name, bandwidth=1000, backgroundcopyrate=50): ssh_cmd = ['svctask', 'mkfcpartnership', '-linkbandwidthmbits', str(bandwidth), '-backgroundcopyrate', str(backgroundcopyrate), system_name] return self.run_ssh_assert_no_output(ssh_cmd) def chpartnership(self, partnership_id, start=True): action = '-start' if start else '-stop' ssh_cmd = ['svctask', 'chpartnership', action, partnership_id] return self.run_ssh_assert_no_output(ssh_cmd) def rmvdiskhostmap(self, host, vdisk): ssh_cmd = ['svctask', 'rmvdiskhostmap', '-host', '"%s"' % host, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def lsvdiskhostmap(self, vdisk): ssh_cmd = ['svcinfo', 'lsvdiskhostmap', '-delim', '!', '"%s"' % vdisk] return self.run_ssh_info(ssh_cmd, with_header=True) def lshostvdiskmap(self, host): ssh_cmd = ['svcinfo', 'lshostvdiskmap', '-delim', '!', '"%s"' % host] return self.run_ssh_info(ssh_cmd, with_header=True) def get_vdiskhostmapid(self, vdisk, host): resp = self.lsvdiskhostmap(vdisk) for mapping_info in resp: if mapping_info['host_name'] == host: lun_id = mapping_info['SCSI_id'] return lun_id return None def rmhost(self, host): ssh_cmd = ['svctask', 'rmhost', '"%s"' % host] self.run_ssh_assert_no_output(ssh_cmd) def mkvolumegroup(self, volumegroup_name): """Create a volume group(VG).""" ssh_cmd = ['svctask', 'mkvolumegroup', '-name', '"%s"' % volumegroup_name] try: return self.run_ssh_check_created(ssh_cmd) except Exception as ex: if hasattr(ex, 'msg') and 'CMMVC6035E' in ex.msg: msg = (_('CMMVC6372W Action failed because volume group ' 'with the name provided already exists.')) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) with excutils.save_and_reraise_exception(): LOG.exception('Failed to create volumegroup.') def lsvolumegroup(self, volumegroup_id_or_name): """Return volume group attributes or None if it doesn't exist.""" ssh_cmd = ['svcinfo', 'lsvolumegroup', '-bytes', '-delim', '!', '"%s"' % volumegroup_id_or_name] out, err = self._ssh(ssh_cmd, check_exit_code=False) if not err: return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!', with_header=False)[0] if 'CMMVC5804E' in err: return None msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def rmvolumegroup(self, volumegroup_name_or_id): """Delete a volume group""" ssh_cmd = ['svctask', 'rmvolumegroup', '"%s"' % volumegroup_name_or_id] try: self.run_ssh_assert_no_output(ssh_cmd) except Exception as ex: if hasattr(ex, 'msg') and 'CMMVC8749E' in ex.msg: msg = _('rmvolumegroup: specified volume group is not empty.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) with excutils.save_and_reraise_exception(): LOG.exception('Failed to delete volumegroup.') def lsvolumegroupsnapshot(self, params): """Return volumegroup-snapshot attributes. Return None if it doesn't exists """ ssh_cmd = ['svcinfo', 'lsvolumegroupsnapshot'] if "id" in params: ssh_cmd.append(params["id"]) elif "name" and "volumegroup" in params: ssh_cmd.extend(['-snapshot', params["name"], '-volumegroup', params["volumegroup"]]) # Add delimiter to parse the output ssh_cmd.extend(['-delim', ':']) out, err = self._ssh(ssh_cmd, check_exit_code=False) if not err: if not out: return None # Parse the lsvolumegroupsnapshot output output = out.split('\n') attributes = output[0].split(":") attribute_values = output[1].split(":") attrs = {key: val for key, val in zip(attributes, attribute_values)} return attrs # CMMVC5804E implies volumegroup-snapshot or volumegroup specified # does not exist in the SVC storage. if 'CMMVC5804E' in err: return None msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def addsnapshot(self, params): ssh_cmd = ['svctask', 'addsnapshot', '-ignorelegacy'] if "volumegroup" in params: ssh_cmd.extend(['-volumegroup', params["volumegroup"]]) if "name" in params: ssh_cmd.extend(['-name', params["name"]]) try: return self.run_ssh_check_created(ssh_cmd) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create volumegroup snapshot.') def rmsnapshot(self, params): ssh_cmd = ['svctask', 'rmsnapshot'] if "id" in params: ssh_cmd.extend(['-snapshotid', params["id"]]) elif "name" and "volumegroup" in params: ssh_cmd.extend(['-snapshot', params["name"], '-volumegroup', params["volumegroup"]]) self.run_ssh_assert_no_output(ssh_cmd) def mkvdisk(self, name, size, units, pool, opts, params): ssh_cmd = ['svctask', 'mkvdisk', '-name', '"%s"' % name, '-mdiskgrp', '"%s"' % pool, '-iogrp', str(opts['iogrp']), '-size', size, '-unit', units] + params try: return self.run_ssh_check_created(ssh_cmd) except Exception as ex: # pylint: disable=E1101 if hasattr(ex, 'msg') and 'CMMVC6372W' in ex.msg: vdisk = self.lsvdisk(name) if vdisk: LOG.warning('CMMVC6372W The virtualized storage ' 'capacity that the cluster is using is ' 'approaching the virtualized storage ' 'capacity that is licensed.') return vdisk['id'] with excutils.save_and_reraise_exception(): LOG.exception('Failed to create vdisk %(vol)s.', {'vol': name}) def rmvdisk(self, vdisk, force_unmap=True, force_delete=True): ssh_cmd = ['svctask', 'rmvdisk'] if force_unmap and not force_delete: ssh_cmd += ['-removehostmappings'] if force_delete: ssh_cmd += ['-force'] ssh_cmd += ['"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def lsvdisk(self, vdisk): """Return vdisk attributes or None if it doesn't exist.""" ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', '"%s"' % vdisk] out, err = self._ssh(ssh_cmd, check_exit_code=False) if not err: return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!', with_header=False)[0] if 'CMMVC5754E' in err: return None msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def lsvdisks_from_filter(self, filter_name, value): """Performs an lsvdisk command, filtering the results as specified. Returns an iterable for all matching vdisks. """ ssh_cmd = ['svcinfo', 'lsvdisk', '-bytes', '-delim', '!', '-filtervalue', '%s=%s' % (filter_name, value)] return self.run_ssh_info(ssh_cmd, with_header=True) def lsthrottle(self): """Returns throttle objects for all vdisks.""" ssh_cmd = ['svcinfo', 'lsthrottle', '-delim', '!', '-filtervalue', 'throttle_type=vdisk'] throttles = self.run_ssh_info(ssh_cmd, with_header=True) return throttles.result def chvdisk(self, vdisk, params): ssh_cmd = ['svctask', 'chvdisk'] + params + ['"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def movevdisk(self, vdisk, iogrp): ssh_cmd = ['svctask', 'movevdisk', '-iogrp', iogrp, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def expandvdisksize(self, vdisk, amount): ssh_cmd = ( ['svctask', 'expandvdisksize', '-size', str(amount), '-unit', 'gb', '"%s"' % vdisk]) self.run_ssh_assert_no_output(ssh_cmd) def mkfcmap(self, source, target, full_copy, copy_rate, clean_rate, consistgrp=None): ssh_cmd = ['svctask', 'mkfcmap', '-source', '"%s"' % source, '-target', '"%s"' % target] if not full_copy: ssh_cmd.extend(['-copyrate', '0']) else: ssh_cmd.extend(['-copyrate', str(copy_rate)]) ssh_cmd.append('-autodelete') if consistgrp: ssh_cmd.extend(['-consistgrp', consistgrp]) if clean_rate is not None: ssh_cmd.extend(['-cleanrate', str(int(clean_rate))]) out, err = self._ssh(ssh_cmd, check_exit_code=False) if 'successfully created' not in out: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: match_obj = re.search(r'FlashCopy Mapping, id \[([0-9]+)\], ' 'successfully created', out) fc_map_id = match_obj.group(1) except (AttributeError, IndexError): msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return fc_map_id def prestartfcmap(self, fc_map_id, restore=False): ssh_cmd = ['svctask', 'prestartfcmap'] if restore: ssh_cmd.append('-restore') ssh_cmd.append(fc_map_id) self.run_ssh_assert_no_output(ssh_cmd) def startfcmap(self, fc_map_id, restore=False): ssh_cmd = ['svctask', 'startfcmap'] if restore: ssh_cmd.append('-restore') ssh_cmd.append(fc_map_id) self.run_ssh_assert_no_output(ssh_cmd) def prestartfcconsistgrp(self, fc_consist_group): ssh_cmd = ['svctask', 'prestartfcconsistgrp', fc_consist_group] self.run_ssh_assert_no_output(ssh_cmd) def startfcconsistgrp(self, fc_consist_group): ssh_cmd = ['svctask', 'startfcconsistgrp', fc_consist_group] self.run_ssh_assert_no_output(ssh_cmd) def stopfcconsistgrp(self, fc_consist_group): ssh_cmd = ['svctask', 'stopfcconsistgrp', fc_consist_group] self.run_ssh_assert_no_output(ssh_cmd) def chfcmap(self, fc_map_id, copyrate=None, clean_rate=None, autodel='on'): ssh_cmd = ['svctask', 'chfcmap'] if clean_rate is not None: ssh_cmd += ['-cleanrate', clean_rate] if copyrate is not None: ssh_cmd += ['-copyrate', copyrate] ssh_cmd += ['-autodelete', autodel, fc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def stopfcmap(self, fc_map_id, force=False, split=False): ssh_cmd = ['svctask', 'stopfcmap'] if force: ssh_cmd += ['-force'] if split: ssh_cmd += ['-split'] ssh_cmd += [fc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def rmfcmap(self, fc_map_id): ssh_cmd = ['svctask', 'rmfcmap', '-force', fc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def lsvdiskfcmappings(self, vdisk): ssh_cmd = ['svcinfo', 'lsvdiskfcmappings', '-delim', '!', '"%s"' % vdisk] return self.run_ssh_info(ssh_cmd, with_header=True) def lsfcmap(self, fc_map_id): ssh_cmd = ['svcinfo', 'lsfcmap', '-filtervalue', 'id=%s' % fc_map_id, '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def lsfcconsistgrp(self, fc_consistgrp): ssh_cmd = ['svcinfo', 'lsfcconsistgrp', '-delim', '!', fc_consistgrp] out, err = self._ssh(ssh_cmd) return CLIResponse((out, err), ssh_cmd=ssh_cmd, delim='!', with_header=False) def mkfcconsistgrp(self, fc_consist_group): ssh_cmd = ['svctask', 'mkfcconsistgrp', '-name', fc_consist_group] return self.run_ssh_check_created(ssh_cmd) def rmfcconsistgrp(self, fc_consist_group): ssh_cmd = ['svctask', 'rmfcconsistgrp', '-force', fc_consist_group] return self.run_ssh_assert_no_output(ssh_cmd) def addvdiskcopy(self, vdisk, dest_pool, params, auto_delete): ssh_cmd = (['svctask', 'addvdiskcopy'] + params + ['-mdiskgrp', '"%s"' % dest_pool]) if auto_delete: ssh_cmd += ['-autodelete'] ssh_cmd += ['"%s"' % vdisk] return self.run_ssh_check_created(ssh_cmd) def lsvdiskcopy(self, vdisk, copy_id=None): ssh_cmd = ['svcinfo', 'lsvdiskcopy', '-delim', '!'] with_header = True if copy_id: ssh_cmd += ['-copy', copy_id] with_header = False ssh_cmd += ['"%s"' % vdisk] return self.run_ssh_info(ssh_cmd, with_header=with_header) def lsvdisksyncprogress(self, vdisk, copy_id): ssh_cmd = ['svcinfo', 'lsvdisksyncprogress', '-delim', '!', '-copy', copy_id, '"%s"' % vdisk] return self.run_ssh_info(ssh_cmd, with_header=True)[0] def rmvdiskcopy(self, vdisk, copy_id): ssh_cmd = ['svctask', 'rmvdiskcopy', '-copy', copy_id, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def addvdiskaccess(self, vdisk, iogrp): ssh_cmd = ['svctask', 'addvdiskaccess', '-iogrp', iogrp, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def rmvdiskaccess(self, vdisk, iogrp): ssh_cmd = ['svctask', 'rmvdiskaccess', '-iogrp', iogrp, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def lsvdiskaccess(self, vdisk): ssh_cmd = ['svcinfo', 'lsvdiskaccess', '-delim', '!', '"%s"' % vdisk] return self.run_ssh_info(ssh_cmd, with_header=True) def lsportfc(self, node_id): ssh_cmd = ['svcinfo', 'lsportfc', '-delim', '!', '-filtervalue', 'node_id=%s' % node_id] return self.run_ssh_info(ssh_cmd, with_header=True) def lstargetportfc(self, current_node_id=None, host_io_permitted=None): ssh_cmd = ['svcinfo', 'lstargetportfc', '-delim', '!'] if current_node_id and host_io_permitted: ssh_cmd += ['-filtervalue', '%s:%s' % ( 'current_node_id=%s' % current_node_id, 'host_io_permitted=%s' % host_io_permitted)] elif current_node_id: ssh_cmd += ['-filtervalue', 'current_node_id=%s' % current_node_id] return self.run_ssh_info(ssh_cmd, with_header=True) def lsfcportsetmember(self): ssh_cmd = ['svcinfo', 'lsfcportsetmember', '-delim', '!'] return self.run_ssh_info(ssh_cmd, with_header=True) def migratevdisk(self, vdisk, dest_pool, copy_id='0'): ssh_cmd = ['svctask', 'migratevdisk', '-mdiskgrp', dest_pool, '-copy', copy_id, '-vdisk', vdisk] self.run_ssh_assert_no_output(ssh_cmd) def mkvolume(self, name, size, units, pool, params): ssh_cmd = ['svctask', 'mkvolume', '-name', name, '-pool', '"%s"' % pool, '-size', size, '-unit', units] + params return self.run_ssh_check_created(ssh_cmd) def rmvolume(self, volume, force_unmap=True, force_delete=True): ssh_cmd = ['svctask', 'rmvolume'] if force_delete: ssh_cmd += ['-removehostmappings', '-removefcmaps', '-removercrelationships'] elif force_unmap: ssh_cmd += ['-removehostmappings'] ssh_cmd += ['"%s"' % volume] self.run_ssh_assert_no_output(ssh_cmd) def addvolumecopy(self, name, pool, params): ssh_cmd = ['svctask', 'addvolumecopy', '-pool', '"%s"' % pool] + params + ['"%s"' % name] self.run_ssh_assert_no_output(ssh_cmd) def rmvolumecopy(self, name, pool): ssh_cmd = ['svctask', 'rmvolumecopy', '-pool', '"%s"' % pool, '"%s"' % name] self.run_ssh_assert_no_output(ssh_cmd) class StorwizeHelpers(object): # All the supported QoS key are saved in this dict. When a new # key is going to add, four values MUST be set: # 'default': to indicate the value, when the parameter is disabled. # 'param': to indicate the corresponding parameter in the command. # 'type': to indicate the type of this value. # 'unit': to indicate the string, a supported QoS parameter. WAIT_TIME = 5 svc_qos = {'IOThrottling': {'default': '0', 'param': 'rate', 'type': float, 'unit': 'IOThrottling_unit'}, 'IOThrottling_unit': {'default': 'iops', 'enum': ['iops', 'mbps', 'iops_per_gb'], 'type': str, 'mbps': 'unitmb', 'iops': 'rate', 'iops_per_gb': 'rate'}} def __init__(self, run_ssh): self.ssh = StorwizeSSH(run_ssh) self.check_fcmapping_interval = 3 self.code_level = None self.stats = {} self.Host_connector_info = {"FC": {}, "ISCSI": {}} @staticmethod def handle_keyerror(cmd, out): msg = (_('Could not find key in output of command %(cmd)s: %(out)s.') % {'out': out, 'cmd': cmd}) raise exception.VolumeBackendAPIException(data=msg) def compression_enabled(self): """Return whether or not compression is enabled for this system.""" resp = self.ssh.lslicense() keys = ['license_compression_enclosures', 'license_compression_capacity'] for key in keys: if resp.get(key, '0') != '0': return True # lslicense is not used for V9000 compression check # compression_enclosures and compression_capacity are # always 0. V9000 uses license_scheme 9846 as an # indicator and can always do compression try: resp = self.ssh.lsguicapabilities() if resp.get('license_scheme', '0') == '9846': return True if resp.get('license_scheme', '0') == 'flex': return True except exception.VolumeBackendAPIException: LOG.exception("Failed to fetch licensing scheme.") return False def replication_licensed(self): """Return whether or not replication is enabled for this system.""" # Uses product_key as an indicator to check # whether replication is supported in storage. try: resp = self.ssh.lsguicapabilities() product_key = resp.get('product_key', '0') if product_key in storwize_const.REP_CAP_DEVS: return True except exception.VolumeBackendAPIException as war: LOG.warning("Failed to run lsguicapability. Exception: %s.", war) return False def get_system_info(self): """Return system's name, ID, and code level.""" resp = self.ssh.lssystem() level = resp['code_level'] match_obj = re.search('([0-9].){3}[0-9]', level) if match_obj is None: msg = _('Failed to get code level (%s).') % level raise exception.VolumeBackendAPIException(data=msg) code_level = match_obj.group().split('.') LOG.info("code_level is: %s.", level) return {'code_level': tuple([int(x) for x in code_level]), 'topology': resp['topology'], 'system_name': resp['name'], 'system_id': resp['id']} def get_pool_attrs(self, pool): """Return attributes for the specified pool.""" return self.ssh.lsmdiskgrp(pool) def is_pool_defined(self, pool_name): """Check if vdisk is defined.""" attrs = self.get_pool_attrs(pool_name) return attrs is not None def is_data_reduction_pool(self, pool_name): """Check if pool is data reduction pool.""" # Check pool is data reduction pool or not from pool information # saved in stats. for pool in self.stats.get('pools', []): if pool['pool_name'] == pool_name: return pool['data_reduction'] pool_data = self.get_pool_attrs(pool_name) if (pool_data and 'data_reduction' in pool_data and pool_data['data_reduction'] == 'yes'): return True return False def get_pool_volumes(self, pool): """Return volumes for the specified pool.""" vdisks = self.ssh.lsvdisks_from_filter('mdisk_grp_name', pool) return vdisks.result def get_available_io_groups(self): """Return list of available IO groups.""" iogrps = [] resp = self.ssh.lsiogrp() for iogrp in resp: try: if int(iogrp['node_count']) > 0: iogrps.append(int(iogrp['id'])) except KeyError: self.handle_keyerror('lsiogrp', iogrp) except ValueError: msg = (_('Expected integer for node_count, ' 'svcinfo lsiogrp returned: %(node)s.') % {'node': iogrp['node_count']}) raise exception.VolumeBackendAPIException(data=msg) return iogrps def get_vdisk_count_by_io_group(self): res = {} resp = self.ssh.lsiogrp() for iogrp in resp: try: if int(iogrp['node_count']) > 0: res[int(iogrp['id'])] = int(iogrp['vdisk_count']) except KeyError: self.handle_keyerror('lsiogrp', iogrp) except ValueError: msg = (_('Expected integer for node_count, ' 'svcinfo lsiogrp returned: %(node)s') % {'node': iogrp['node_count']}) raise exception.VolumeBackendAPIException(data=msg) return res def get_hyperswap_pool_io_grp(self, state, pool, peer_pool): if not peer_pool or not pool: raise exception.InvalidInput( reason=_('The pool and peer pool is necessary for hyperswap ' 'volume, please configure the pool and peer pool.')) pool_data = None peer_pool_data = None for stat_pool in self.stats.get('pools', []): if stat_pool['pool_name'] == pool: pool_data = stat_pool elif stat_pool['pool_name'] == peer_pool: peer_pool_data = stat_pool if pool_data is None or pool_data.get("site_id") is None: pool_data = self.get_pool_attrs(pool) if not pool_data['site_id']: raise exception.InvalidInput( reason=_('The pool with site is necessary for hyperswap ' 'volume, please configure the pool with site.')) if peer_pool_data is None or peer_pool_data.get("site_id") is None: peer_pool_data = self.get_pool_attrs(peer_pool) if not peer_pool_data['site_id']: raise exception.InvalidInput( reason=_('The peer pool with site is necessary for ' 'hyperswap volume, please configure the peer ' 'pool with site.')) iogrp_list = [] for node in state['storage_nodes'].values(): if ((pool_data['site_id'] == node['site_id']) or (peer_pool_data['site_id'] == node['site_id'])): if node['IO_group'] not in iogrp_list: iogrp_list.append(node['IO_group']) return iogrp_list def select_io_group(self, state, opts, pool): selected_iog = 0 iog_list = StorwizeHelpers._get_valid_requested_io_groups(state, opts) if len(iog_list) == 0: raise exception.InvalidInput( reason=_('Given I/O group(s) %(iogrp)s not valid; available ' 'I/O groups are %(avail)s.') % {'iogrp': opts['iogrp'], 'avail': state['available_iogrps']}) site_iogrp = [] hyperswap = opts['volume_topology'] == 'hyperswap' if hyperswap: pool_data = self.get_pool_attrs(pool) if pool_data is None: msg = (_('Failed getting details for pool %s.') % pool) LOG.error(msg) raise exception.InvalidConfigurationValue(message=msg) if hyperswap and pool_data.get('site_id'): for node in state['storage_nodes'].values(): if pool_data['site_id'] == node['site_id']: site_iogrp.append(node['IO_group']) site_iogrp = list(map(int, site_iogrp)) iogroup_list = list(set(site_iogrp).intersection(iog_list)) if len(iogroup_list) == 0: LOG.warning('The storage system topology is hyperswap or ' 'stretched, The site_id of pool %(pool)s is ' '%(site_id)s, the available I/O groups on this ' 'site is %(site_iogrp)s, but the given I/O' ' group(s) is %(iogrp)s.', {'pool': pool, 'site_id': pool_data['site_id'], 'site_iogrp': site_iogrp, 'iogrp': opts['iogrp']}) iogroup_list = iog_list else: iogroup_list = iog_list iog_vdc = self.get_vdisk_count_by_io_group() LOG.debug("IO group current balance %s", iog_vdc) min_vdisk_count = iog_vdc[iogroup_list[0]] selected_iog = iogroup_list[0] for iog in iogroup_list: if iog_vdc[iog] < min_vdisk_count: min_vdisk_count = iog_vdc[iog] selected_iog = iog LOG.debug("Selected io_group is %d", selected_iog) return selected_iog def get_pool_max_throttle_rate_vdisk(self, pool, throttle_rate_type): """Returns the IOPs or Bandwidth throttle rate. Throttle rate of all vdisks for the specified pool. """ max_throttle_rate_vdisk = 0 vdisks = self.get_pool_volumes(pool) if vdisks: throttles = self.ssh.lsthrottle() if throttles: vdisk_names = [ vdisk['name'] for vdisk in vdisks if vdisk['name']] for throttle in throttles: if (throttle['object_name'] in vdisk_names and throttle[throttle_rate_type]): max_throttle_rate_vdisk += int( throttle[throttle_rate_type]) return max_throttle_rate_vdisk def get_volume_io_group(self, vol_name): vdisk = self.ssh.lsvdisk(vol_name) if vdisk: resp = self.ssh.lsiogrp() for iogrp in resp: if iogrp['name'] == vdisk['IO_group_name']: return int(iogrp['id']) return None def get_node_info(self, online_node=True): """Return dictionary containing information on system's nodes.""" nodes = {} resp = self.ssh.lsnode() for node_data in resp: try: if online_node and node_data['status'] != 'online': continue node = {} node['id'] = node_data['id'] node['name'] = node_data['name'] node['IO_group'] = node_data['IO_group_id'] node['iscsi_name'] = node_data['iscsi_name'] node['WWNN'] = node_data['WWNN'] node['status'] = node_data['status'] node['WWPN'] = [] node['ipv4'] = [] node['ipv6'] = [] node['IP_address'] = [] node['enabled_protocols'] = [] nodes[node['id']] = node node['site_id'] = (node_data['site_id'] if 'site_id' in node_data else None) node['site_name'] = (node_data['site_name'] if 'site_name' in node_data else None) except KeyError: self.handle_keyerror('lsnode', node_data) return nodes def add_iscsi_ip_addrs(self, storage_nodes, code_level, portset=None): """Add iSCSI IP addresses to system node information.""" if code_level >= (8, 4, 2, 0): portset_name = portset if portset else 'portset0' lsip_resp = self.ssh.lsip(portset=portset_name) # For every node_id there is one IP address in a particular # portset_name. Hence storing that one IP address of the # corresponding node_id in storage_node list. for node_data in storage_nodes: try: for ip_data in lsip_resp: if ip_data['node_id'] in node_data: if ip_data['IP_address']: (storage_nodes[ip_data['node_id']] ['IP_address']) = ( [ip_data['IP_address']]) except KeyError: self.handle_keyerror('lsip', ip_data) else: lsportip_resp = self.ssh.lsportip() for ip_data in lsportip_resp: try: state = ip_data['state'] if ip_data['node_id'] in storage_nodes and ( state == 'configured' or state == 'online'): node = storage_nodes[ip_data['node_id']] if len(ip_data['IP_address']): node['ipv4'].append(ip_data['IP_address']) if len(ip_data['IP_address_6']): node['ipv6'].append(ip_data['IP_address_6']) except KeyError: self.handle_keyerror('lsportip', ip_data) def add_fc_wwpns(self, storage_nodes, code_level): """Add FC WWPNs to system node information.""" for key in storage_nodes: node = storage_nodes[key] wwpns = set(node['WWPN']) # The Storwize/svc release 7.7.0.0 introduced NPIV feature. # The virtual wwpns will be included in cli lstargetportfc if code_level < (7, 7, 0, 0): resp = self.ssh.lsportfc(node_id=node['id']) for port_info in resp: if (port_info['type'] == 'fc' and port_info['status'] == 'active'): wwpns.add(port_info['WWPN']) else: npiv_wwpns = self.get_npiv_wwpns(code_level, node_id=node['id']) wwpns.update(npiv_wwpns) node['WWPN'] = list(wwpns) LOG.info('WWPN on node %(node)s: %(wwpn)s.', {'node': node['id'], 'wwpn': node['WWPN']}) def get_npiv_wwpns(self, code_level, node_id=None, host_io=None, portset=None): wwpns = set() # In the response of lstargetportfc, the host_io_permitted # indicates whether the port can be used for host I/O targetportfc_resp = self.ssh.lstargetportfc(current_node_id=node_id, host_io_permitted=host_io) if code_level >= (8, 5, 0, 0): portset_name = portset if portset else 'portset64' port_ids = set() fcportsetmember_resp = self.ssh.lsfcportsetmember() for portset_member in fcportsetmember_resp: if portset_member['portset_name'] == portset_name: port_ids.add(portset_member['fc_io_port_id']) for port_info in targetportfc_resp: for port_id in port_ids: if port_id == port_info['fc_io_port_id']: wwpns.add(port_info['WWPN']) break else: for port_info in targetportfc_resp: wwpns.add(port_info['WWPN']) return list(wwpns) def add_chap_secret_to_host(self, host_name): """Generate and store a randomly-generated CHAP secret for the host.""" chap_secret = volume_utils.generate_password() self.ssh.add_chap_secret(chap_secret, host_name) return chap_secret def get_chap_secret_for_host(self, host_name): """Generate and store a randomly-generated CHAP secret for the host.""" resp = self.ssh.lsiscsiauth() host_found = False for host_data in resp: try: if host_data['name'] == host_name: host_found = True if host_data['iscsi_auth_method'] == 'chap': return host_data['iscsi_chap_secret'] except KeyError: self.handle_keyerror('lsiscsiauth', host_data) if not host_found: msg = _('Failed to find host %s.') % host_name raise exception.VolumeBackendAPIException(data=msg) return None def get_conn_fc_wwpns(self, host): wwpns = set() resp = self.ssh.lsfabric(host=host) for wwpn in resp.select('local_wwpn'): if wwpn is not None: wwpns.add(wwpn) return list(wwpns) def initialize_host_info(self): """Get the host,wwpn,iscsi and store in Host_connector_info.""" if (not self.Host_connector_info['FC'] and not self.Host_connector_info['ISCSI']): hosts_info = self.ssh.lshost() host_list = list(hosts_info.select('name')) for eachhost in host_list: resp = self.ssh.lshost(host=eachhost) if list(resp.select("WWPN")) != [None]: for wwpn in resp.select('WWPN'): if wwpn not in self.Host_connector_info['FC'].keys(): self.Host_connector_info['FC'][wwpn] = eachhost elif list(resp.select('iscsi_name')) != [None]: for iscsi_name in resp.select('iscsi_name'): if (iscsi_name not in self.Host_connector_info['ISCSI'].keys()): self.Host_connector_info['ISCSI'][iscsi_name] = ( eachhost) def get_host_from_host_info(self, connector, iscsi=False): host_name = None new_wwpn = [] if iscsi and 'initiator' in connector: if connector['initiator'] in self.Host_connector_info['ISCSI']: iqn = connector['initiator'] host_name = self.Host_connector_info['ISCSI'][iqn] elif 'wwpns' in connector: for wwpn in connector['wwpns']: if wwpn.upper() in self.Host_connector_info['FC']: host_name = self.Host_connector_info['FC'][wwpn.upper()] else: new_wwpn.append(['wwpn', '%s' % wwpn]) return host_name, new_wwpn def get_host_from_connector(self, connector, volume_name=None, iscsi=False): """Return the Storwize host described by the connector.""" LOG.debug('Enter: get_host_from_connector: %s.', connector) # If we have FC information, we have a faster lookup option host_name, new_wwpn = self.get_host_from_host_info(connector, iscsi) if host_name and volume_name: hosts_map_info = self.ssh.lsvdiskhostmap(volume_name) hosts_map_info_list = list(hosts_map_info.select('host_name')) if host_name in hosts_map_info_list: LOG.debug("get_host_from_connector: hosts_map_info:" " %s", hosts_map_info_list) LOG.debug('Leave: get_host_from_connector host %s', host_name) return host_name else: LOG.debug('get_host_from_connector: host %s not mapped ' 'to volume', host_name) host_name = None if host_name: for port in new_wwpn: LOG.debug('update wwpn %(wwpn)s to host %(host)s.', {'wwpn': port, 'host': host_name}) self.ssh.addhostport(host_name, port[0], port[1]) LOG.debug('Leave: get_host_from_connector: host %s.', host_name) return host_name def update_host_list(host, host_list): idx = host_list.index(host) del host_list[idx] host_list.insert(0, host) # That didn't work, so try exhaustive search hosts_info = self.ssh.lshost() host_list = list(hosts_info.select('name')) # If we have a "real" connector, we might be able to find the # host entry with fewer queries if we move the host entries # that contain the connector's host property value to the front # of the list if 'host' in connector: # order host_list such that the host entries that # contain the connector's host name are at the # beginning of the list for host in host_list: if re.search(connector['host'], host): update_host_list(host, host_list) # If we have a volume name we have a potential fast path # for finding the matching host for that volume. # Add the host_names that have mappings for our volume to the # head of the list of host names to search them first if volume_name: hosts_map_info = self.ssh.lsvdiskhostmap(volume_name) hosts_map_info_list = list(hosts_map_info.select('host_name')) # remove the fast path host names from the end of the list # and move to the front so they are only searched for once. for host in hosts_map_info_list: update_host_list(host, host_list) found = False for name in host_list: try: resp = self.ssh.lshost(host=name) except exception.VolumeBackendAPIException as ex: LOG.debug("Exception message: %s", ex.msg) if 'CMMVC5754E' in ex.msg: LOG.debug("CMMVC5754E found in CLI exception.") # CMMVC5754E: The specified object does not exist # The host has been deleted while walking the list. # This is a result of a host change on the SVC that # is out of band to this request. continue # unexpected error so reraise it with excutils.save_and_reraise_exception(): pass if iscsi: if 'initiator' in connector: for iscsi_name in resp.select('iscsi_name'): if iscsi_name == connector['initiator']: host_name = name found = True break elif 'wwpns' in connector and len(connector['wwpns']): connector_wwpns = [str(x).lower() for x in connector['wwpns']] for wwpn in resp.select('WWPN'): if wwpn and wwpn.lower() in connector_wwpns: host_name = name found = True break if found: break LOG.debug('Leave: get_host_from_connector: host %s.', host_name) return host_name def create_host(self, connector, iscsi=False, site=None, portset=None): """Create a new host on the storage system. We create a host name and associate it with the given connection information. The host name will be a cleaned up version of the given host name (at most 55 characters), plus a random 8-character suffix to avoid collisions. The total length should be at most 63 characters. """ LOG.debug('Enter: create_host: host %s.', connector['host']) # Before we start, make sure host name is a string and that we have at # least one port. host_name = connector['host'] if not isinstance(host_name, str): msg = _('create_host: Host name is not a string.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) ports = [] if iscsi: if 'initiator' in connector: ports.append(['initiator', '%s' % connector['initiator']]) else: msg = _('create_host: No initiators supplied.') else: if 'wwpns' in connector: for wwpn in connector['wwpns']: ports.append(['wwpn', '%s' % wwpn]) else: msg = _('create_host: No wwpns supplied.') if not len(ports): LOG.error(msg) raise exception.VolumeDriverException(message=msg) # Build a host name for the Storwize host - first clean up the name if isinstance(host_name, str): host_name = unicodedata.normalize('NFKD', host_name).encode( 'ascii', 'replace').decode('ascii') for num in range(0, 128): ch = str(chr(num)) if not ch.isalnum() and ch not in [' ', '.', '-', '_']: host_name = host_name.replace(ch, '-') # Storwize doesn't like hostname that doesn't starts with letter or _. if not re.match('^[A-Za-z]', host_name): host_name = '_' + host_name # Add a random 8-character suffix to avoid collisions rand_id = str(random.randint(0, 99999999)).zfill(8) host_name = '%s-%s' % (host_name[:55], rand_id) # Create a host with one port port = ports.pop(0) # Host site_id is necessary for hyperswap volume. self.ssh.mkhost(host_name, port[0], port[1], site, portset) # Add any additional ports to the host for port in ports: self.ssh.addhostport(host_name, port[0], port[1]) if iscsi and 'initiator' in connector: iqn = connector['initiator'] self.Host_connector_info['ISCSI'][iqn] = host_name elif 'wwpns' in connector: for wwpn in connector['wwpns']: self.Host_connector_info['FC'][wwpn.upper()] = host_name LOG.debug('Leave: create_host: host %(host)s - %(host_name)s.', {'host': connector['host'], 'host_name': host_name}) return host_name def update_host(self, host_name, site_name): self.ssh.chhost(host_name, site=site_name) def delete_host(self, host_name): self.ssh.rmhost(host_name) if host_name in self.Host_connector_info['ISCSI'].values(): host_iqn = None for iqn, host in self.Host_connector_info['ISCSI'].items(): if host == host_name: host_iqn = iqn break if host_iqn: self.Host_connector_info['ISCSI'].pop(host_iqn) elif host_name in self.Host_connector_info['FC'].values(): host_wwpn = [] for wwpn, host in self.Host_connector_info['FC'].items(): if host == host_name: host_wwpn.append(wwpn) for wwpn in host_wwpn: self.Host_connector_info['FC'].pop(wwpn) def _get_unused_lun_id(self, host_name): luns_used = [] result_lun = '-1' resp = self.ssh.lshostvdiskmap(host_name) for mapping_info in resp: luns_used.append(int(mapping_info['SCSI_id'])) luns_used.sort() result_lun = str(len(luns_used)) for index, n in enumerate(luns_used): if n > index: result_lun = str(index) break return result_lun @volume_utils.trace def map_vol_to_host(self, volume_name, host_name, multihostmap): """Create a mapping between a volume to a host.""" # Check if this volume is already mapped to this host result_lun = self.ssh.get_vdiskhostmapid(volume_name, host_name) if result_lun: LOG.debug('volume %(volume_name)s is already mapped to the host ' '%(host_name)s.', {'volume_name': volume_name, 'host_name': host_name}) return int(result_lun) class _RetryableVolumeDriverException( exception.VolumeBackendAPIException): """Exception to identify which types of errors to retry.""" pass @cinder_utils.retry(_RetryableVolumeDriverException, interval=2, retries=3, wait_random=True) def make_vdisk_host_map(): try: result_lun = self._get_unused_lun_id(host_name) self.ssh.mkvdiskhostmap(host_name, volume_name, result_lun, multihostmap) return int(result_lun) except Exception as ex: # pylint: disable=E1101 if (not multihostmap and hasattr(ex, 'msg') and 'CMMVC6071E' in ex.msg): LOG.warning('storwize_svc_multihostmap_enabled is set ' 'to False, not allowing multi host mapping.') raise exception.VolumeDriverException( message=_('CMMVC6071E The VDisk-to-host mapping was ' 'not created because the VDisk is already ' 'mapped to a host.')) if hasattr(ex, 'msg') and 'CMMVC5879E' in ex.msg: raise _RetryableVolumeDriverException(ex) with excutils.save_and_reraise_exception(): LOG.error('Error mapping VDisk-to-host.') return make_vdisk_host_map() def unmap_vol_from_host(self, volume_name, host_name): """Unmap the volume and delete the host if it has no more mappings.""" LOG.debug('Enter: unmap_vol_from_host: volume %(volume_name)s from ' 'host %(host_name)s.', {'volume_name': volume_name, 'host_name': host_name}) # Check if the mapping exists resp = self.ssh.lsvdiskhostmap(volume_name) if not len(resp): LOG.warning('unmap_vol_from_host: No mapping of volume ' '%(vol_name)s to any host found.', {'vol_name': volume_name}) return host_name if host_name is None: if len(resp) > 1: LOG.warning('unmap_vol_from_host: Multiple mappings of ' 'volume %(vol_name)s found, no host ' 'specified.', {'vol_name': volume_name}) return else: host_name = resp[0]['host_name'] else: found = False for h in resp.select('host_name'): if h == host_name: found = True if not found: LOG.warning('unmap_vol_from_host: No mapping of volume ' '%(vol_name)s to host %(host)s found.', {'vol_name': volume_name, 'host': host_name}) return host_name # We now know that the mapping exists self.ssh.rmvdiskhostmap(host_name, volume_name) LOG.debug('Leave: unmap_vol_from_host: volume %(volume_name)s from ' 'host %(host_name)s.', {'volume_name': volume_name, 'host_name': host_name}) return host_name def check_host_mapped_vols(self, host_name): return self.ssh.lshostvdiskmap(host_name) def check_vol_mapped_to_host(self, vol_name, host_name): resp = self.ssh.lsvdiskhostmap(vol_name) for mapping_info in resp: if mapping_info['host_name'] == host_name: return True return False @staticmethod def build_default_opts(config): # Ignore capitalization cluster_partner = config.storwize_svc_stretched_cluster_partner opt = {'rsize': config.storwize_svc_vol_rsize, 'warning': config.storwize_svc_vol_warning, 'autoexpand': config.storwize_svc_vol_autoexpand, 'grainsize': config.storwize_svc_vol_grainsize, 'compression': config.storwize_svc_vol_compression, 'easytier': config.storwize_svc_vol_easytier, 'iogrp': config.storwize_svc_vol_iogrp, 'qos': None, 'stretched_cluster': cluster_partner, 'replication': False, 'nofmtdisk': config.storwize_svc_vol_nofmtdisk, 'flashcopy_rate': config.storwize_svc_flashcopy_rate, 'clean_rate': config.storwize_svc_clean_rate, 'mirror_pool': config.storwize_svc_mirror_pool, 'aux_mirror_pool': config.storwize_svc_aux_mirror_pool, 'volume_topology': None, 'peer_pool': config.storwize_peer_pool, 'storwize_portset': config.storwize_portset, 'storwize_svc_src_child_pool': config.storwize_svc_src_child_pool, 'storwize_svc_target_child_pool': config.storwize_svc_target_child_pool, 'cycle_period_seconds': config.cycle_period_seconds} return opt @staticmethod def check_vdisk_opts(state, opts): # Check that grainsize is 32/64/128/256 if opts['grainsize'] not in [8, 32, 64, 128, 256]: raise exception.InvalidInput( reason=_('Illegal value specified for ' 'storwize_svc_vol_grainsize: set to either ' '32, 64, 128, or 256.')) # Check that compression is supported if opts['compression'] and not state['compression_enabled']: raise exception.InvalidInput( reason=_('System does not support compression.')) # Check that rsize is set if compression is set if opts['compression'] and opts['rsize'] == -1: raise exception.InvalidInput( reason=_('If compression is set to True, rsize must ' 'also be set (not equal to -1).')) # Check cycle_period_seconds are in 60-86400 if opts['cycle_period_seconds'] not in range(60, 86401): raise exception.InvalidInput( reason=_('cycle_period_seconds should be integer ' 'between 60 and 86400.')) iogs = StorwizeHelpers._get_valid_requested_io_groups(state, opts) if len(iogs) == 0: raise exception.InvalidInput( reason=_('Given I/O group(s) %(iogrp)s not valid; available ' 'I/O groups are %(avail)s.') % {'iogrp': opts['iogrp'], 'avail': state['available_iogrps']}) if opts['nofmtdisk'] and opts['rsize'] != -1: raise exception.InvalidInput( reason=_('If nofmtdisk is set to True, rsize must ' 'also be set to -1.')) @staticmethod def _get_valid_requested_io_groups(state, opts): given_iogs = str(opts['iogrp']) iog_list = given_iogs.split(',') # convert to int iog_list = list(map(int, iog_list)) LOG.debug("Requested iogroups %s", iog_list) LOG.debug("Available iogroups %s", state['available_iogrps']) filtiog = set(iog_list).intersection(state['available_iogrps']) iog_list = list(filtiog) LOG.debug("Filtered (valid) requested iogroups %s", iog_list) return iog_list def _get_opts_from_specs(self, opts, specs): qos = {} for k, value in specs.items(): # Get the scope, if using scope format key_split = k.split(':') if len(key_split) == 1: scope = None key = key_split[0] else: scope = key_split[0] key = key_split[1] # We generally do not look at capabilities in the driver, but # replication is a special case where the user asks for # a volume to be replicated, and we want both the scheduler and # the driver to act on the value. if ((not scope or scope == 'capabilities') and key == 'replication'): scope = None key = 'replication' words = value.split() if not (words and len(words) == 2 and words[0] == ''): LOG.error('Replication must be specified as ' '\' True\' or \' False\'.') del words[0] value = words[0] # Add the QoS. if scope and scope == 'qos': if key in self.svc_qos: try: type_fn = self.svc_qos[key]['type'] value = type_fn(value) qos[key] = value except ValueError: continue # Any keys that the driver should look at should have the # 'drivers' scope. if scope and scope != 'drivers': continue if key in opts: this_type = type(opts[key]).__name__ if this_type == 'int': value = int(value) elif this_type == 'bool': value = strutils.bool_from_string(value) opts[key] = value if len(qos): opts['qos'] = qos opts = self._validate_qos_opts(opts) return opts def _validate_qos_opts(self, opts): """Override to add IOThrottling_unit to qos from extra_specs""" qos = {} for key, value in opts['qos'].items(): # Validate IOThrottle rate value if key in self.svc_qos and key == "IOThrottling": if int(value) >= 0: qos[key] = value else: msg = (_("I/O Throttle rate cannot be negative or Zero. " "So skipping setting of I/O Throttle rate on " "volumes.")) LOG.warning(msg) continue # Validate IOThrottle Unit if key in self.svc_qos and key == 'IOThrottling_unit': if value: enum_values = self.svc_qos[key]['enum'] if value in enum_values: qos[key] = value else: msg = (_("An invalid '%(actual)s' unit was configured " "for IOThrottling_unit on Storage Template. " "It should be one of the values: " "%(expected)s. So skipping setting of I/O " "Throttle rate on volumes.") % dict(actual=value, expected=enum_values)) LOG.warning(msg) continue if len(qos) != 2: opts['qos'] = {} return opts def _get_qos_from_volume_metadata(self, volume_metadata): """Return the QoS information from the volume metadata.""" qos = {} for i in volume_metadata: k = i.get('key', None) value = i.get('value', None) key_split = k.split(':') if len(key_split) == 1: scope = None key = key_split[0] else: scope = key_split[0] key = key_split[1] # Add the QoS. if scope and scope == 'qos': if key in self.svc_qos: try: type_fn = self.svc_qos[key]['type'] value = type_fn(value) qos[key] = value except ValueError: continue return qos def _wait_for_a_condition(self, testmethod, timeout=None, interval=INTERVAL_1_SEC, raise_exception=False): start_time = time.time() if timeout is None: timeout = DEFAULT_TIMEOUT def _inner(): try: testValue = testmethod() except Exception as ex: if raise_exception: LOG.exception("_wait_for_a_condition: %s" " execution failed.", testmethod.__name__) raise exception.VolumeBackendAPIException(data=ex) else: testValue = False # pylint: disable=E1101 LOG.debug('Helper.' '_wait_for_condition: %(method_name)s ' 'execution failed for %(exception)s.', {'method_name': testmethod.__name__, 'exception': ex.message}) if testValue: raise loopingcall.LoopingCallDone() if int(time.time()) - start_time > timeout: msg = (_('CommandLineHelper._wait_for_condition: %s timeout.') % testmethod.__name__) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) timer = loopingcall.FixedIntervalLoopingCall(_inner) timer.start(interval=interval).wait() def get_vdisk_params(self, config, state, type_id, volume_type=None, volume_metadata=None): """Return the parameters for creating the vdisk. Takes volume type and defaults from config options into account. """ opts = self.build_default_opts(config) ctxt = context.get_admin_context() if volume_type is None and type_id is not None: volume_type = volume_types.get_volume_type(ctxt, type_id) if volume_type: qos_specs_id = volume_type.get('qos_specs_id') specs = dict(volume_type).get('extra_specs') # NOTE(vhou): We prefer the qos_specs association # and over-ride any existing # extra-specs settings if present if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] # Merge the qos_specs into extra_specs and qos_specs has higher # priority than extra_specs if they have different values for # the same key. specs.update(kvs) opts = self._get_opts_from_specs(opts, specs) if (opts['qos'] is None and config.storwize_svc_allow_tenant_qos and volume_metadata): qos = self._get_qos_from_volume_metadata(volume_metadata) if len(qos) != 0: opts['qos'] = qos self.check_vdisk_opts(state, opts) return opts def check_data_reduction_pool_params(self, opts): """Check the configured parameters if vol in data reduction pool.""" if opts['warning'] != 0: msg = (_('You cannot specify -warning for thin-provisioned or ' 'compressed volumes that are in data reduction ' 'pools. The configured warning is ' '%s.') % opts['warning']) raise exception.VolumeDriverException(message=msg) if not opts['easytier']: msg = (_('You cannot specify -easytier for thin-provisioned ' 'or compressed volumes that are in data reduction ' 'pools. The configured easytier is ' '%s') % opts['easytier']) raise exception.VolumeDriverException(message=msg) if opts['grainsize'] != 256 and opts['grainsize'] != 8: msg = (_('You cannot specify -grainsize for thin-provisioned ' 'or compressed volumes that are in data reduction ' 'pools. This type of volume will be created with a ' 'grainsize of 8 KB. The configured grainsize is ' '%s.') % opts['grainsize']) raise exception.VolumeDriverException(message=msg) if opts['rsize'] != 2: if opts['volume_topology'] == 'hyperswap': msg = (_('You cannot specify -buffersize for Hyperswap volumes' ' that are in data reduction pools, The configured ' 'buffersize is %s.') % opts['rsize']) raise exception.VolumeDriverException(message=msg) else: msg = (_('You cannot specify -rsize for thin-provisioned ' 'or compressed volumes that are in data reduction ' 'pools. The -rsize parameter will be ignored in ' 'mkvdisk. Only its presence or absence is used to ' 'determine if the disk is a data reduction volume ' 'copy or a thick volume copy. The ' 'configured rsize is %s.') % opts['rsize']) raise exception.VolumeDriverException(message=msg) if not opts['autoexpand']: msg = (_('You cannot set the autoexpand to disable for ' 'thin-provisioned or compressed volumes that are in data ' 'reduction pool. The configured' ' autoexpand is %s.') % opts['autoexpand']) raise exception.VolumeDriverException(message=msg) else: LOG.info('You cannot specify warning, grainsize and ' 'easytier for thin-provisioned or compressed' ' volumes that are in data reduction pools. ' 'The rsize parameter will be ignored, the ' 'autoexpand must be enabled.') def is_volume_type_dr_pools(self, pool, opts, rep_type=None, rep_target_pool=None): """Check every configured pools is data reduction pool.""" if self.is_data_reduction_pool(pool): LOG.debug('The configured pool %s is a data reduction pool.', pool) return True if opts['mirror_pool'] and self.is_data_reduction_pool( opts['mirror_pool']): LOG.debug('The mirror_pool %s is a data reduction pool.', opts['mirror_pool']) return True if (opts['volume_topology'] == 'hyperswap' and self.is_data_reduction_pool(opts['peer_pool'])): LOG.debug('The peer_pool %s is a data reduction pool.', opts['peer_pool']) return True if rep_type and self.is_data_reduction_pool(rep_target_pool): LOG.debug('The replica target pool %s is a data reduction pool.', rep_target_pool) return True return False @staticmethod def _get_vdisk_create_params(opts, is_dr_pool, add_copies=False): easytier = 'on' if opts['easytier'] else 'off' if opts['rsize'] == -1: params = [] if opts['nofmtdisk']: params.append('-nofmtdisk') else: if is_dr_pool: params = ['-rsize', '%s%%' % str(opts['rsize']), '-autoexpand'] if opts['compression']: params.append('-compressed') else: params = ['-rsize', '%s%%' % str(opts['rsize']), '-autoexpand', '-warning', '%s%%' % str(opts['warning'])] if not opts['autoexpand']: params.remove('-autoexpand') if opts['compression']: params.append('-compressed') else: params.extend(['-grainsize', str(opts['grainsize'])]) if add_copies and (opts['mirror_pool'] or opts['aux_mirror_pool']): params.extend(['-copies', '2']) if not is_dr_pool: params.extend(['-easytier', easytier]) return params def create_vdisk(self, name, size, units, pool, opts): LOG.debug('Enter: create_vdisk: vdisk %s.', name) mdiskgrp = pool if opts['mirror_pool']: if not self.is_pool_defined(opts['mirror_pool']): raise exception.InvalidInput( reason=_('The pool %s in which mirrored copy is stored ' 'is invalid') % opts['mirror_pool']) # The syntax of pool SVC expects is pool:mirror_pool in # mdiskgrp for mirror volume mdiskgrp = '%s:%s' % (pool, opts['mirror_pool']) if opts['aux_mirror_pool']: if not self.is_pool_defined(opts['aux_mirror_pool']): raise exception.InvalidInput( reason=_('The pool %s in which aux mirrored copy is ' 'stored is invalid') % opts['aux_mirror_pool']) # The syntax of pool SVC expects is pool:aux_mirror_pool in # mdiskgrp for aux mirror volume mdiskgrp = '%s:%s' % (pool, opts['aux_mirror_pool']) is_dr_pool = False if opts['rsize'] != -1: is_dr_pool = self.is_volume_type_dr_pools(pool, opts) if is_dr_pool: self.check_data_reduction_pool_params(opts) params = self._get_vdisk_create_params( opts, is_dr_pool, add_copies=True if (opts['mirror_pool'] or opts['aux_mirror_pool']) else False) self.ssh.mkvdisk(name, size, units, mdiskgrp, opts, params) LOG.debug('Leave: _create_vdisk: volume %s.', name) def _get_hyperswap_volume_create_params(self, opts, is_dr_pool): # Storwize/svc use cli command mkvolume to create hyperswap volume. # You must specify -thin with grainsize. # You must specify either -thin or -compressed with warning. params = [] LOG.debug('The I/O groups of a hyperswap volume will be selected by ' 'storage.') if is_dr_pool: if opts['compression']: params.append('-compressed') else: params.append('-thin') else: params.extend(['-buffersize', '%s%%' % str(opts['rsize']), '-warning', '%s%%' % str(opts['warning'])]) if not opts['autoexpand']: params.append('-noautoexpand') if opts['compression']: params.append('-compressed') else: params.append('-thin') params.extend(['-grainsize', str(opts['grainsize'])]) return params def create_hyperswap_volume(self, vol_name, size, units, pool, opts): vol_name = '"%s"' % vol_name params = [] if opts['rsize'] != -1: is_dr_pool = self.is_volume_type_dr_pools(pool, opts) if is_dr_pool: self.check_data_reduction_pool_params(opts) params = self._get_hyperswap_volume_create_params(opts, is_dr_pool) hyperpool = '%s:%s' % (pool, opts['peer_pool']) self.ssh.mkvolume(vol_name, str(size), units, hyperpool, params) def convert_volume_to_hyperswap(self, vol_name, opts, state): vol_name = '%s' % vol_name if not self.is_system_topology_hyperswap(state): msg = _('Convert volume to hyperswap failed, the system is ' 'below release 7.6.0.0 or it is not hyperswap ' 'topology.') raise exception.VolumeDriverException(message=msg) else: attr = self.get_vdisk_attributes(vol_name) if attr is None: msg = (_('convert_volume_to_hyperswap: Failed to get ' 'attributes for volume %s.') % vol_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) pool = attr['mdisk_grp_name'] self.check_hyperswap_pool(pool, opts['peer_pool']) hyper_pool = '%s' % opts['peer_pool'] params = [] if opts['rsize'] != -1: is_dr_pool = self.is_volume_type_dr_pools(pool, opts) if is_dr_pool: self.check_data_reduction_pool_params(opts) params = self._get_hyperswap_volume_create_params(opts, is_dr_pool) self.ssh.addvolumecopy(vol_name, hyper_pool, params) def convert_extended_volume_to_hyperswap(self, vol_name, opts, state): vol_name = '%s' % vol_name attr = self.get_vdisk_attributes(vol_name) if attr is None: msg = (_('convert_volume_to_hyperswap: Failed to get ' 'attributes for volume %s.') % vol_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) hyper_pool = '%s' % opts['peer_pool'] params = [] if opts['rsize'] != -1: is_dr_pool = self.is_volume_type_dr_pools(attr['mdisk_grp_name'], opts) if is_dr_pool: self.check_data_reduction_pool_params(opts) params = self._get_hyperswap_volume_create_params(opts, is_dr_pool) self.ssh.addvolumecopy(vol_name, hyper_pool, params) def convert_hyperswap_volume_to_normal(self, vol_name, peer_pool): vol_name = '%s' % vol_name hyper_pool = '%s' % peer_pool self.ssh.rmvolumecopy(vol_name, hyper_pool) def delete_hyperswap_volume(self, volume, force_unmap, force_delete): """Ensures that vdisk is not part of FC mapping and deletes it.""" if not self.is_vdisk_defined(volume): LOG.warning('Tried to delete non-existent volume %s.', volume) return self.ensure_vdisk_no_fc_mappings(volume, allow_snaps=True, allow_fctgt=True) self.ssh.rmvolume(volume, force_unmap=force_unmap, force_delete=force_delete) def get_vdisk_attributes(self, vdisk): attrs = self.ssh.lsvdisk(vdisk) return attrs def is_vdisk_defined(self, vdisk_name): """Check if vdisk is defined.""" attrs = self.get_vdisk_attributes(vdisk_name) return attrs is not None def get_vdisk_copy_attrs(self, vdisk, copy_id): return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0] def get_vdisk_copies(self, vdisk): copies = {'primary': None, 'secondary': None} resp = self.ssh.lsvdiskcopy(vdisk) for copy_id, status, sync, primary, mdisk_grp in ( resp.select('copy_id', 'status', 'sync', 'primary', 'mdisk_grp_name')): copy = {'copy_id': copy_id, 'status': status, 'sync': sync, 'primary': primary, 'mdisk_grp_name': mdisk_grp, 'sync_progress': None} if copy['sync'] != 'yes': progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id) copy['sync_progress'] = progress_info['progress'] if copy['primary'] == 'yes': copies['primary'] = copy else: copies['secondary'] = copy return copies def _prepare_fc_map(self, fc_map_id, timeout, restore): self.ssh.prestartfcmap(fc_map_id, restore) mapping_ready = False max_retries = (timeout // self.WAIT_TIME) + 1 for try_number in range(1, max_retries): mapping_attrs = self._get_flashcopy_mapping_attributes(fc_map_id) if (mapping_attrs is None or 'status' not in mapping_attrs): break if mapping_attrs['status'] == 'prepared': mapping_ready = True break elif mapping_attrs['status'] == 'stopped': self.ssh.prestartfcmap(fc_map_id, restore) elif mapping_attrs['status'] != 'preparing': msg = (_('Unexecpted mapping status %(status)s for mapping ' '%(id)s. Attributes: %(attr)s.') % {'status': mapping_attrs['status'], 'id': fc_map_id, 'attr': mapping_attrs}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) greenthread.sleep(self.WAIT_TIME) if not mapping_ready: msg = (_('Mapping %(id)s prepare failed to complete within the ' 'allotted %(to)d seconds timeout. Terminating.') % {'id': fc_map_id, 'to': timeout}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def start_fc_consistgrp(self, fc_consistgrp): self.ssh.startfcconsistgrp(fc_consistgrp) def create_fc_consistgrp(self, fc_consistgrp): self.ssh.mkfcconsistgrp(fc_consistgrp) def delete_fc_consistgrp(self, fc_consistgrp): self.ssh.rmfcconsistgrp(fc_consistgrp) def stop_fc_consistgrp(self, fc_consistgrp): self.ssh.stopfcconsistgrp(fc_consistgrp) def run_consistgrp_snapshots(self, fc_consistgrp, snapshots, state, config, timeout): model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} snapshots_model_update = [] try: for snapshot in snapshots: opts = self.get_vdisk_params(config, state, snapshot['volume_type_id']) volume = snapshot.volume if not volume: msg = (_("Can't get volume from snapshot: %(id)s") % {"id": snapshot.id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) vhost = volume.host if '#' not in vhost: attrs = self.get_vdisk_attributes(volume['name']) pool = self._get_pool(attrs) else: pool = volume_utils.extract_host(volume.host, 'pool') self.create_flashcopy_to_consistgrp(snapshot['volume_name'], snapshot['name'], fc_consistgrp, config, opts, False, pool=pool) self.prepare_fc_consistgrp(fc_consistgrp, timeout) self.start_fc_consistgrp(fc_consistgrp) # There is CG limitation that could not create more than 128 CGs. # After start CG, we delete CG to avoid CG limitation. # Cinder general will maintain the CG and snapshots relationship. self.delete_fc_consistgrp(fc_consistgrp) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.GroupSnapshotStatus.ERROR # Release cg self.delete_fc_consistgrp(fc_consistgrp) LOG.error("Failed to create CGSnapshot. " "Exception: %s.", err) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot['id'], 'status': model_update['status'], 'replication_status': fields.ReplicationStatus.NOT_CAPABLE}) return model_update, snapshots_model_update def delete_consistgrp_snapshots(self, fc_consistgrp, snapshots): """Delete flashcopy maps and consistent group.""" model_update = {'status': fields.GroupSnapshotStatus.DELETED} snapshots_model_update = [] try: self.delete_fc_consistgrp(fc_consistgrp) except exception.VolumeBackendAPIException as err: if CMMVC5753E in err.msg: LOG.warning('Failed to delete as flash copy consistency ' 'group %s does not exist,ignoring err: %s', fc_consistgrp, err) for snapshot in snapshots: try: self.delete_vdisk(snapshot['name'], force_unmap=False, force_delete=True) snapshots_model_update.append( {'id': snapshot['id'], 'status': fields.GroupSnapshotStatus.DELETED}) except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.GroupSnapshotStatus.ERROR_DELETING) snapshots_model_update.append( {'id': snapshot['id'], 'status': fields.GroupSnapshotStatus.ERROR_DELETING}) LOG.error("Failed to delete the snapshot %(snap)s of " "CGSnapshot. Exception: %(exception)s.", {'snap': snapshot['name'], 'exception': err}) return model_update, snapshots_model_update def prepare_fc_consistgrp(self, fc_consistgrp, timeout): """Prepare FC Consistency Group.""" self.ssh.prestartfcconsistgrp(fc_consistgrp) def prepare_fc_consistgrp_success(): mapping_ready = False mapping_attrs = self._get_flashcopy_consistgrp_attr(fc_consistgrp) if (mapping_attrs is None or 'status' not in mapping_attrs): pass if mapping_attrs['status'] == 'prepared': mapping_ready = True elif mapping_attrs['status'] == 'stopped': self.ssh.prestartfcconsistgrp(fc_consistgrp) elif mapping_attrs['status'] != 'preparing': msg = (_('Unexpected mapping status %(status)s for mapping ' '%(id)s. Attributes: %(attr)s.') % {'status': mapping_attrs['status'], 'id': fc_consistgrp, 'attr': mapping_attrs}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return mapping_ready self._wait_for_a_condition(prepare_fc_consistgrp_success, timeout) def create_cg_from_source(self, group, fc_consistgrp, sources, targets, state, config, timeout): """Create consistence group from source""" LOG.debug('Enter: create_cg_from_source: cg %(cg)s' ' source %(source)s, target %(target)s', {'cg': fc_consistgrp, 'source': sources, 'target': targets}) model_update = {'status': fields.GroupStatus.AVAILABLE} ctxt = context.get_admin_context() try: for source, target in zip(sources, targets): opts = self.get_vdisk_params(config, state, source['volume_type_id']) vhost = target['host'] if '#' not in vhost: pool = opts.get('storage_pool') else: pool = volume_utils.extract_host(target['host'], 'pool') self.create_flashcopy_to_consistgrp(source['name'], target['name'], fc_consistgrp, config, opts, True, pool=pool) self.prepare_fc_consistgrp(fc_consistgrp, timeout) self.start_fc_consistgrp(fc_consistgrp) self.delete_fc_consistgrp(fc_consistgrp) volumes_model_update = self._get_volume_model_updates( ctxt, targets, group['id'], model_update['status']) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.GroupStatus.ERROR volumes_model_update = self._get_volume_model_updates( ctxt, targets, group['id'], model_update['status']) with excutils.save_and_reraise_exception(): # Release cg self.delete_fc_consistgrp(fc_consistgrp) LOG.error("Failed to create CG from CGsnapshot. " "Exception: %s", err) return model_update, volumes_model_update LOG.debug('Leave: create_cg_from_source.') return model_update, volumes_model_update def _get_volume_model_updates(self, ctxt, volumes, cgId, status='available'): """Update the volume model's status and return it.""" volume_model_updates = [] LOG.info("Updating status for CG: %(id)s.", {'id': cgId}) if volumes: for volume in volumes: volume_model_updates.append({ 'id': volume['id'], 'status': status, 'replication_status': fields.ReplicationStatus.NOT_CAPABLE}) else: LOG.info("No volume found for CG: %(cg)s.", {'cg': cgId}) return volume_model_updates def update_clean_rate(self, volume_name, new_clean_rate): mapping_ids = self._get_vdisk_fc_mappings(volume_name) for map_id in mapping_ids: attrs = self._get_flashcopy_mapping_attributes(map_id) # chfcmap should not be called for rc_controlled fcmap if attrs is not None and attrs['rc_controlled'] != 'yes': self.ssh.chfcmap(map_id, clean_rate=str(new_clean_rate)) def check_flashcopy_rate(self, flashcopy_rate): if not self.code_level: sys_info = self.get_system_info() self.code_level = sys_info['code_level'] if flashcopy_rate not in range(1, 151): raise exception.InvalidInput( reason=_('The configured flashcopy rate should be ' 'between 1 and 150.')) elif self.code_level < (7, 8, 1, 0) and flashcopy_rate > 100: msg = (_('The configured flashcopy rate is %(fc_rate)s, The ' 'storage code level is %(code_level)s, the flashcopy_rate' ' range is 1-100 if the storwize code level ' 'below 7.8.1.') % {'fc_rate': flashcopy_rate, 'code_level': self.code_level}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def update_flashcopy_rate(self, volume_name, new_flashcopy_rate): mapping_ids = self._get_vdisk_fc_mappings(volume_name) for map_id in mapping_ids: attrs = self._get_flashcopy_mapping_attributes(map_id) copy_rate = attrs['copy_rate'] # update flashcopy rate for clone volume if copy_rate != '0' and attrs['rc_controlled'] != 'yes': self.ssh.chfcmap(map_id, copyrate=str(new_flashcopy_rate)) def run_flashcopy(self, source, target, timeout, copy_rate, clean_rate, full_copy=True, restore=False): """Create a FlashCopy mapping from the source to the target.""" LOG.debug('Enter: run_flashcopy: execute FlashCopy from source ' '%(source)s to target %(target)s.', {'source': source, 'target': target}) self.check_flashcopy_rate(copy_rate) fc_map_id = self.ssh.mkfcmap(source, target, full_copy, copy_rate, clean_rate) self._prepare_fc_map(fc_map_id, timeout, restore) self.ssh.startfcmap(fc_map_id, restore) LOG.debug('Leave: run_flashcopy: FlashCopy started from ' '%(source)s to %(target)s.', {'source': source, 'target': target}) def create_flashcopy_to_consistgrp(self, source, target, consistgrp, config, opts, full_copy=False, pool=None): """Create a FlashCopy mapping and add to consistent group.""" LOG.debug('Enter: create_flashcopy_to_consistgrp: create FlashCopy' ' from source %(source)s to target %(target)s. ' 'Then add the flashcopy to %(cg)s.', {'source': source, 'target': target, 'cg': consistgrp}) src_attrs = self.get_vdisk_attributes(source) if src_attrs is None: msg = (_('create_copy: Source vdisk %(src)s ' 'does not exist.') % {'src': source}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) src_size = src_attrs['capacity'] # In case we need to use a specific pool if not pool: pool = self._get_pool(src_attrs) if not full_copy: opts['rsize'] = config.storwize_svc_vol_rsize opts['autoexpand'] = True if opts and opts.get('iogrp') is None: opts['iogrp'] = src_attrs['IO_group_id'] self.create_vdisk(target, src_size, 'b', pool, opts) if opts['qos']: vdisk_size = int(float(src_size) / (1 << 30)) self.add_vdisk_qos(target, opts['qos'], vdisk_size) self.check_flashcopy_rate(opts['flashcopy_rate']) self.ssh.mkfcmap(source, target, full_copy, opts['flashcopy_rate'], opts['clean_rate'], consistgrp=consistgrp) LOG.debug('Leave: create_flashcopy_to_consistgrp: ' 'FlashCopy started from %(source)s to %(target)s.', {'source': source, 'target': target}) def _get_pool(self, volume): pool = volume['mdisk_grp_name'] if 'many' in pool: LOG.info("Mirror volume copy found %s: Getting volume " "copies", volume['name']) copies = self.get_vdisk_copies(volume['name']) if 'primary' in copies: pool = copies['primary']['mdisk_grp_name'] return pool def _get_vdisk_fc_mappings(self, vdisk): """Return FlashCopy mappings that this vdisk is associated with.""" mapping_ids = [] resp = self.ssh.lsvdiskfcmappings(vdisk) for id in resp.select('id'): mapping_ids.append(id) return mapping_ids def _get_flashcopy_mapping_attributes(self, fc_map_id): try: resp = self.ssh.lsfcmap(fc_map_id) return resp[0] if len(resp) else None except exception.VolumeBackendAPIException as ex: LOG.warning("Failed to get fcmap %(fcmap)s info. " "Exception: %(ex)s.", {'fcmap': fc_map_id, 'ex': ex}) return None def _get_flashcopy_consistgrp_attr(self, fc_map_id): resp = self.ssh.lsfcconsistgrp(fc_map_id) if not len(resp): return None return resp[0] @volume_utils.trace def _check_delete_vdisk_fc_mappings(self, name, allow_snaps=True, allow_fctgt=False, rel_info=None): """FlashCopy mapping check helper.""" mapping_ids = self._get_vdisk_fc_mappings(name) wait_for_copy = False for map_id in mapping_ids: attrs = self._get_flashcopy_mapping_attributes(map_id) # We should ignore GMCV flash copies # Hyperswap flash copies are also ignored. if not attrs or 'yes' == attrs['rc_controlled']: continue source = attrs['source_vdisk_name'] target = attrs['target_vdisk_name'] copy_rate = attrs['copy_rate'] status = attrs['status'] progress = attrs['progress'] LOG.debug('Loopcall: source: %s, target: %s, copy_rate: %s, ' 'status: %s, progress: %s, mapid: %s', source, target, copy_rate, status, progress, map_id) if allow_fctgt and target == name and status == 'copying': try: self.ssh.stopfcmap(map_id) except exception.VolumeBackendAPIException as ex: LOG.warning(ex) wait_for_copy = True try: attrs = self._get_flashcopy_mapping_attributes(map_id) except exception.VolumeBackendAPIException as ex: LOG.warning(ex) wait_for_copy = True continue if attrs: status = attrs['status'] else: continue if copy_rate == '0': if source == name: # Vdisk with snapshots. Return False if snapshot # not allowed. if not allow_snaps: raise loopingcall.LoopingCallDone(retvalue=False) self.ssh.chfcmap(map_id, copyrate='50', autodel='on') wait_for_copy = True else: # A snapshot if target != name: msg = (_('Vdisk %(name)s not involved in ' 'mapping %(src)s -> %(tgt)s.') % {'name': name, 'src': source, 'tgt': target}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) try: if status in ['copying', 'prepared']: self.ssh.stopfcmap(map_id) # Need to wait for the fcmap to change to # stopped state before remove fcmap wait_for_copy = True elif status in ['stopping', 'preparing']: wait_for_copy = True else: self.ssh.rmfcmap(map_id) except exception.VolumeBackendAPIException as ex: LOG.warning(ex) wait_for_copy = True # Case 4: Copy in progress - wait and will autodelete else: try: if status == 'prepared': self.ssh.stopfcmap(map_id) self.ssh.rmfcmap(map_id) elif status in ['idle_or_copied', 'stopped']: # Prepare failed or stopped self.ssh.rmfcmap(map_id) elif (status in ['copying', 'prepared'] and progress == '100'): force = False if rel_info: force = True self.ssh.stopfcmap(map_id, force) else: wait_for_copy = True except exception.VolumeBackendAPIException as ex: LOG.warning(ex) wait_for_copy = True if not wait_for_copy or not len(mapping_ids): raise loopingcall.LoopingCallDone(retvalue=True) @volume_utils.trace def _check_vdisk_fc_mappings(self, name, allow_snaps=True, allow_fctgt=False, rel_info=None): """FlashCopy mapping check helper.""" # if this is a remove disk we need to be down to one fc clone mapping_ids = self._get_vdisk_fc_mappings(name) Rc_mapping_ids = [] if len(mapping_ids) > 1 and allow_fctgt: LOG.debug('Loopcall: vdisk %s has ' 'more than one fc map. Waiting.', name) for map_id in mapping_ids: attrs = self._get_flashcopy_mapping_attributes(map_id) if not attrs: continue if 'yes' == attrs.get('rc_controlled', None): Rc_mapping_ids.append(map_id) continue source = attrs['source_vdisk_name'] target = attrs['target_vdisk_name'] copy_rate = attrs['copy_rate'] status = attrs['status'] progress = attrs['progress'] LOG.debug('Loopcall: source: %s, target: %s, copy_rate: %s, ' 'status: %s, progress: %s, mapid: %s', source, target, copy_rate, status, progress, map_id) if copy_rate != '0' and source == name: try: if status in ['copying'] and progress == '100': self.ssh.stopfcmap(map_id) elif status == 'idle_or_copied' and progress == '100': # wait for auto-delete of fcmap. continue elif status in ['idle_or_copied', 'stopped']: # Prepare failed or stopped self.ssh.rmfcmap(map_id) # handle VolumeBackendAPIException to let it go through # next attempts in case of any cli exception. except exception.VolumeBackendAPIException as ex: LOG.warning(ex) if len(mapping_ids) - len(Rc_mapping_ids) > 1: return return self._check_delete_vdisk_fc_mappings( name, allow_snaps=allow_snaps, allow_fctgt=allow_fctgt, rel_info=rel_info) def ensure_vdisk_no_fc_mappings(self, name, allow_snaps=True, allow_fctgt=False, rel_info=None): """Ensure vdisk has no flashcopy mappings.""" timer = loopingcall.FixedIntervalLoopingCall( self._check_vdisk_fc_mappings, name, allow_snaps, allow_fctgt, rel_info) # Create a timer greenthread. The default volume service heart # beat is every 10 seconds. The flashcopy usually takes hours # before it finishes. Don't set the sleep interval shorter # than the heartbeat. Otherwise volume service heartbeat # will not be serviced. LOG.debug('Calling _ensure_vdisk_no_fc_mappings: vdisk %s.', name) ret = timer.start(interval=self.check_fcmapping_interval).wait() timer.stop() return ret def start_relationship(self, volume_name, primary=None, rcrel=None): if rcrel is None: vol_attrs = self.get_vdisk_attributes(volume_name) rcrel = vol_attrs['RC_name'] self.ssh.startrcrelationship(rcrel, primary) def stop_relationship(self, volume_name, access=False, rcrel=None): if rcrel is None: vol_attrs = self.get_vdisk_attributes(volume_name) rcrel = vol_attrs['RC_name'] self.ssh.stoprcrelationship(rcrel, access=access) def create_relationship(self, master, aux, system, asyncmirror, cyclingmode=False, masterchange=None, cycle_period_seconds=None): try: rc_id = self.ssh.mkrcrelationship(master, aux, system, asyncmirror, cyclingmode) except exception.VolumeBackendAPIException as ex: rc_id = None # CMMVC5959E is the code in Stowize storage, meaning that # there is a relationship that already has this name on the # master cluster. # pylint: disable=E1101 if hasattr(ex, 'msg') and 'CMMVC5959E' not in ex.msg: # If there is no relation between the primary and the # secondary back-end storage, the exception is raised. raise if rc_id: # We need setup master and aux change volumes for gmcv # before we can start remote relationship # aux change volume must be set on target site rel_info = self.ssh.lsrcrelationship(rc_id) rc_name = rel_info[0]['name'] if cycle_period_seconds: self.change_relationship_cycleperiod(master, cycle_period_seconds, rc_name) if masterchange: self.change_relationship_changevolume(master, masterchange, True, rc_name) else: self.start_relationship(master, rcrel=rc_name) return rc_name def change_relationship_changevolume(self, volume_name, change_volume, master, rcrel=None): if rcrel is None: vol_attrs = self.get_vdisk_attributes(volume_name) rcrel = vol_attrs['RC_name'] if rcrel and change_volume: self.ssh.ch_rcrelationship_changevolume(rcrel, change_volume, master) def change_relationship_cycleperiod(self, volume_name, cycle_period_seconds, rcrel=None): if rcrel is None: vol_attrs = self.get_vdisk_attributes(volume_name) rcrel = vol_attrs['RC_name'] if rcrel and cycle_period_seconds: self.ssh.ch_rcrelationship_cycleperiod(rcrel, cycle_period_seconds) def change_relationship_cyclingmode(self, volume_name, cyclingmode='none', rcrel=None): if rcrel is None: vol_attrs = self.get_vdisk_attributes(volume_name) rcrel = vol_attrs['RC_name'] if rcrel and cyclingmode: self.ssh.ch_rcrelationship_cyclingmode(rcrel, cyclingmode) def change_consistgrp_cyclingmode(self, rccg_name, cyclingmode='none'): self.ssh.ch_rcconsistgrp_cyclingmode(rccg_name, cyclingmode) def delete_relationship(self, volume_name, rcrel=None): if rcrel is None: vol_attrs = self.get_vdisk_attributes(volume_name) rcrel = vol_attrs['RC_name'] self.ssh.rmrcrelationship(rcrel, True) def get_relationship_info(self, volume_name): vol_attrs = self.get_vdisk_attributes(volume_name) if not vol_attrs or not vol_attrs['RC_name']: LOG.info("Unable to get remote copy information for " "volume %s", volume_name) return None relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name']) return relationship[0] if len(relationship) > 0 else None def is_replicated_volume_primary(self, volume, rel_info): # Return true if either source_volume is the primary volume or # onboarded auxiliary volume is primary [Reverse replication failover] if ((rel_info["master_vdisk_name"] == volume.name and rel_info["primary"] == "master") or (rel_info["master_vdisk_name"] != volume.name and rel_info["primary"] == "aux")): return True return False def get_target_volume_information(self, source_volume): source_volume_name = source_volume.name rel_info = self.get_relationship_info(source_volume_name) if rel_info: if source_volume_name == rel_info["aux_vdisk_name"]: target_volume = rel_info["master_vdisk_name"] else: target_volume = rel_info["aux_vdisk_name"] else: # Retrieving target volume based on Source volume name, if # relationship not exists. if source_volume_name[:4] == storwize_const.REPLICA_AUX_VOL_PREFIX: target_volume = source_volume_name[4:] else: target_volume = (storwize_const.REPLICA_AUX_VOL_PREFIX + source_volume_name) return (target_volume, rel_info) def delete_rc_volume(self, volume_name, rel_info=None, target_vol=False, force_unmap=True, retain_aux_volume=False): try: # If relationship exists, will delete the relationship. if rel_info: self.delete_relationship(volume_name, rcrel=rel_info['name']) # Delete change volume self.delete_vdisk( storwize_const.REPLICA_CHG_VOL_PREFIX + volume_name, force_unmap=force_unmap, force_delete=False) # We want to retain/remove the secondary volume after retyping of # primary volume from mirror to non-mirror storage template # or on the delete of the primary volume based on user's # choice of config value for storwize_svc_retain_aux_volume. # The default value is False. if (not retain_aux_volume and target_vol) or not target_vol: self.delete_vdisk(volume_name, force_unmap=force_unmap, force_delete=False) except Exception as e: msg = (_('Unable to delete the volume for ' 'volume %(vol)s. Exception: %(err)s.'), {'vol': volume_name, 'err': e}) LOG.exception(msg) raise exception.VolumeDriverException(message=msg) def switch_relationship(self, relationship, aux=True): self.ssh.switchrelationship(relationship, aux) # replication cg def chrcrelationship(self, relationship, rccg=None): rels = self.ssh.lsrcrelationship(relationship)[0] if rccg and rels['consistency_group_name'] == rccg: LOG.info('relationship %(rel)s is aleady added to group %(grp)s.', {'rel': relationship, 'grp': rccg}) return if not rccg and rels['consistency_group_name'] == '': LOG.info('relationship %(rel)s is aleady removed from group', {'rel': relationship}) return self.ssh.chrcrelationship(relationship, rccg) def get_rccg(self, rccg): return self.ssh.lsrcconsistgrp(rccg) def create_rccg(self, rccg, system): self.ssh.mkrcconsistgrp(rccg, system) def delete_rccg(self, rccg): if self.ssh.lsrcconsistgrp(rccg): self.ssh.rmrcconsistgrp(rccg) def start_rccg(self, rccg, primary=None): self.ssh.startrcconsistgrp(rccg, primary) def stop_rccg(self, rccg, access=False): self.ssh.stoprcconsistgrp(rccg, access) def get_rccg_info(self, volume_name): vol_attrs = self.get_vdisk_attributes(volume_name) if not vol_attrs or not vol_attrs['RC_name']: LOG.warning("Unable to get remote copy information for " "volume %s", volume_name) return None rcrel = self.ssh.lsrcrelationship(vol_attrs['RC_name']) if len(rcrel) > 0 and rcrel[0]['consistency_group_name']: return self.ssh.lsrcconsistgrp(rcrel[0]['consistency_group_name']) else: return None def get_rccg_name_by_volume_name(self, volume_name): vol_attrs = self.get_vdisk_attributes(volume_name) if not vol_attrs: LOG.warning("Unable to get volume attributes for " "volume %s", volume_name) return None rcrel = self.ssh.lsrcrelationship(vol_attrs['RC_name']) if len(rcrel) > 0 and rcrel[0].get('consistency_group_name'): return rcrel[0]['consistency_group_name'] else: return None def create_volumegroup(self, volumegroup_name): self.ssh.mkvolumegroup(volumegroup_name) def get_volumegroup(self, volumegroup_id_or_name): vg = self.ssh.lsvolumegroup(volumegroup_id_or_name) return vg if len(vg) > 0 else None def delete_volumegroup(self, volumegroup_id_or_name): if self.ssh.lsvolumegroup(volumegroup_id_or_name): self.ssh.rmvolumegroup(volumegroup_id_or_name) def add_vdisk_to_volumegroup(self, vol_name, volumegroup_id): self.ssh.chvdisk(vol_name, ['-volumegroup', volumegroup_id]) def remove_vdisk_from_volumegroup(self, vol_name): self.ssh.chvdisk(vol_name, ['-novolumegroup']) def check_codelevel_for_volumegroup(self, code_level): min_level = (8, 5, 1, 0) if not self.check_code_level_within_limit(min_level, None, code_level): msg = (_('The configured group type spec is ' '"volume_group_enabled". ' 'The supported code level for this group type spec ' 'is %(min_level)s ' 'The current storage code level is %(code_level)s.') % {'min_level': min_level, 'code_level': code_level}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def check_codelevel_for_temp_volumegroup(self, code_level): min_level = (8, 6, 2, 0) if not self.check_code_level_within_limit(min_level, None, code_level): msg = (_('The configured group type spec is ' '"temporary_volume_group_enabled". ' 'The supported code level for this group type spec ' 'is %(min_level)s ' 'The current storage code level is %(code_level)s.') % {'min_level': min_level, 'code_level': code_level}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def create_volumegroup_snapshot(self, params): self.ssh.addsnapshot(params) def is_volumegroup_snapshot_exists(self, params): """Check if volumegroup snapshot exists.""" attrs = self.ssh.lsvolumegroupsnapshot(params) return attrs is not None def delete_volumegroup_snapshot(self, params): """Delete volumegroup snapshot""" if not self.is_volumegroup_snapshot_exists(params): LOG.info('Tried to delete non-existent volumegroup snapshot.') return self.ssh.rmsnapshot(params) def get_volume_name_from_metadata(self, volume): """Get Volume name from metadata if metadata exists""" if volume.metadata: svc_volume_name = volume.metadata.get("Volume Name", None) if svc_volume_name: LOG.info('Volume %(cinder_id)s in cinder API is linked to ' 'volume_name %(svc_volume_name)s in SVC', {'cinder_id': volume.name, 'svc_volume_name': svc_volume_name}) volume.name_id = svc_volume_name.split("-", 1)[1] return volume def get_partnership_info(self, system_name): partnership = self.ssh.lspartnership(system_name) return partnership[0] if len(partnership) > 0 else None def get_partnershipcandidate_info(self, system_name): candidates = self.ssh.lspartnershipcandidate() for candidate in candidates: if system_name == candidate['name']: return candidate return None def mkippartnership(self, ip_v4, bandwidth=1000, copyrate=50): self.ssh.mkippartnership(ip_v4, bandwidth, copyrate) def mkfcpartnership(self, system_name, bandwidth=1000, copyrate=50): self.ssh.mkfcpartnership(system_name, bandwidth, copyrate) def chpartnership(self, partnership_id): self.ssh.chpartnership(partnership_id) def delete_vdisk(self, vdisk, force_unmap, force_delete): """Ensures that vdisk is not part of FC mapping and deletes it.""" LOG.debug('Enter: delete_vdisk: vdisk %s.', vdisk) if not self.is_vdisk_defined(vdisk): LOG.info('Tried to delete non-existent vdisk %s.', vdisk) return self.ensure_vdisk_no_fc_mappings(vdisk, allow_snaps=True, allow_fctgt=True) self.ssh.rmvdisk(vdisk, force_unmap=force_unmap, force_delete=force_delete) LOG.debug('Leave: delete_vdisk: vdisk %s.', vdisk) def create_copy(self, src, tgt, src_id, config, opts, full_copy, state, pool=None): """Create a new snapshot using FlashCopy.""" LOG.debug('Enter: create_copy: snapshot %(src)s to %(tgt)s.', {'tgt': tgt, 'src': src}) src_attrs = self.get_vdisk_attributes(src) if src_attrs is None: msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) ' 'does not exist.') % {'src': src, 'src_id': src_id}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) src_size = src_attrs['capacity'] # In case we need to use a specific pool if not pool: pool = src_attrs['mdisk_grp_name'] opts['iogrp'] = self.select_io_group(state, opts, pool) self.create_vdisk(tgt, src_size, 'b', pool, opts) timeout = config.storwize_svc_flashcopy_timeout try: self.run_flashcopy(src, tgt, timeout, opts['flashcopy_rate'], opts['clean_rate'], full_copy=full_copy) except Exception: with excutils.save_and_reraise_exception(): self.delete_vdisk(tgt, force_unmap=False, force_delete=True) LOG.debug('Leave: _create_copy: snapshot %(tgt)s from ' 'vdisk %(src)s.', {'tgt': tgt, 'src': src}) def extend_vdisk(self, vdisk, amount): self.ssh.expandvdisksize(vdisk, amount) def add_vdisk_copy(self, vdisk, dest_pool, volume_type, state, config, auto_delete=False): """Add a vdisk copy in the given pool.""" resp = self.ssh.lsvdiskcopy(vdisk) if len(resp) > 1: msg = (_('add_vdisk_copy failed: A copy of volume %s exists. ' 'Adding another copy would exceed the limit of ' '2 copies.') % vdisk) raise exception.VolumeDriverException(message=msg) orig_copy_id = resp[0].get("copy_id", None) if orig_copy_id is None: msg = (_('add_vdisk_copy started without a vdisk copy in the ' 'expected pool.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if volume_type is None: opts = self.get_vdisk_params(config, state, None) else: opts = self.get_vdisk_params(config, state, volume_type['id'], volume_type=volume_type) is_dr_pool = self.is_data_reduction_pool(dest_pool) if is_dr_pool and opts['rsize'] != -1: self.check_data_reduction_pool_params(opts) params = self._get_vdisk_create_params(opts, is_dr_pool) try: new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params, auto_delete) except exception.VolumeBackendAPIException as e: msg = (_('Unable to add vdiskcopy for volume %(vol)s. ' 'Exception: %(err)s.'), {'vol': vdisk, 'err': e}) LOG.exception(msg) raise exception.VolumeDriverException(message=msg) return (orig_copy_id, new_copy_id) def is_vdisk_copy_synced(self, vdisk, copy_id): sync = self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]['sync'] if sync == 'yes': return True return False def rm_vdisk_copy(self, vdisk, copy_id): self.ssh.rmvdiskcopy(vdisk, copy_id) def lsvdiskcopy(self, vdisk, copy_id=None): return self.ssh.lsvdiskcopy(vdisk, copy_id) @staticmethod def can_migrate_to_host(host, state): if 'location_info' not in host['capabilities']: return None info = host['capabilities']['location_info'] try: (dest_type, dest_id, dest_pool) = info.split(':') except ValueError: return None if (dest_type != 'StorwizeSVCDriver' or dest_id != state['system_id']): return None return dest_pool def add_vdisk_qos(self, vdisk, qos, vdisk_size): """Add the QoS configuration to the volume.""" for key, value in qos.items(): if key in self.svc_qos and key == "IOThrottling": param = self.svc_qos[key]['param'] if storwize_const.IOPS_PER_GB in qos.values(): value = value * vdisk_size if not int(value): value = 1 vdisk_params = ['-' + param, str(int(value))] # Add -unitmb param to the chvdisk if qos:IOThrottling_unit # is added in extra specs key_unit = self.svc_qos[key].get('unit', None) if key_unit in qos: key_unit_param = qos.get(key_unit) if (key_unit_param and key_unit_param == storwize_const.MBPS): t_val = '-' + self.svc_qos[key_unit][key_unit_param] vdisk_params.append(t_val) self.ssh.chvdisk(vdisk, vdisk_params) def update_vdisk_qos(self, vdisk, qos, vdisk_size): """Update all the QoS in terms of a key and value. svc_qos saves all the supported QoS parameters. Going through this dict, we set the new values to all the parameters. If QoS is available in the QoS configuration, the value is taken from it; if not, the value will be set to default. """ iothrottling = 'IOThrottling' if iothrottling in qos: throttling_value = qos[iothrottling] key_unit = self.svc_qos[iothrottling]['unit'] throttling_unit = qos[key_unit] # check if throttling unit specified is in allowed units # if not allowed - we will go with default unit - iops param = self.svc_qos[iothrottling]['param'] unit_param = self.svc_qos[key_unit][storwize_const.MBPS] default_throttling_value = self.svc_qos[iothrottling]['default'] if throttling_unit in self.svc_qos[key_unit]: # check if specified throttling unit is not the default unit # if not default unit - specify the parameter for the # special unit if throttling_unit == storwize_const.MBPS: # Uppdating vdisk_params to disable iops limit and # enable only bandwidth limit - in mbps # disable iops disable_vdisk_params = ['-' + param, default_throttling_value] # enable mbps enable_vdisk_params = ['-' + param, str(int(throttling_value)), '-' + unit_param] else: # This means that we have to disable mbps limit (bandwidth) # and enable iops limit if throttling_unit == storwize_const.IOPS_PER_GB: throttling_value = throttling_value * vdisk_size # disable mbps disable_vdisk_params = ['-' + param, default_throttling_value, '-' + unit_param] # enable iops enable_vdisk_params = ['-' + param, str(int(throttling_value))] # Disable conditional vdisk_params self.ssh.chvdisk(vdisk, disable_vdisk_params) # Enable conditional vdisk_params self.ssh.chvdisk(vdisk, enable_vdisk_params) def disable_vdisk_qos(self, vdisk, qos): """Disable the QoS.""" for key, value in qos.items(): if key in self.svc_qos and key == 'IOThrottling': # qos of previous volume type is in format: # qos - {'IOThrottling': 1000, 'IOThrottling_unit': 'iops'} param = self.svc_qos[key]['param'] vdisk_params = ['-' + param, self.svc_qos[key]['default']] # clear out iops limit self.ssh.chvdisk(vdisk, vdisk_params) vdisk_params.append( '-' + self.svc_qos['IOThrottling_unit']['mbps']) # clear out mbps limit self.ssh.chvdisk(vdisk, vdisk_params) def change_vdisk_options(self, vdisk, changes, opts, state): change_value = {'warning': '', 'easytier': '', 'autoexpand': ''} if 'warning' in opts: change_value['warning'] = '%s%%' % str(opts['warning']) if 'easytier' in opts: change_value['easytier'] = 'on' if opts['easytier'] else 'off' if 'autoexpand' in opts: change_value['autoexpand'] = 'on' if opts['autoexpand'] else 'off' for key in changes: self.ssh.chvdisk(vdisk, ['-' + key, change_value[key]]) def change_vdisk_iogrp(self, vdisk, state, iogrp): if state['code_level'] < (6, 4, 0, 0): LOG.debug('Ignore change IO group as storage code level is ' '%(code_level)s, below the required 6.4.0.0.', {'code_level': state['code_level']}) else: self.ssh.addvdiskaccess(vdisk, str(iogrp[0])) try: self.ssh.movevdisk(vdisk, str(iogrp[0])) except exception.VolumeBackendAPIException as e: self.ssh.rmvdiskaccess(vdisk, str(iogrp[0])) msg = (_('movevdisk command failed for %(vdisk),' 'performing rmdiskaccess for %(iogrp)s.' 'Exception: %(err)s.'), {'vdisk': vdisk, 'iogrp': iogrp[0], 'err': e}) LOG.exception(msg) raise exception.VolumeBackendAPIException(data=msg) self.ssh.rmvdiskaccess(vdisk, str(iogrp[1])) def vdisk_by_uid(self, vdisk_uid): """Returns the properties of the vdisk with the specified UID. Returns None if no such disk exists. """ vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid) if len(vdisks) == 0: return None if len(vdisks) != 1: msg = (_('Expected single vdisk returned from lsvdisk when ' 'filtering on vdisk_UID. %(count)s were returned.') % {'count': len(vdisks)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) vdisk = vdisks.result[0] return self.ssh.lsvdisk(vdisk['name']) def is_vdisk_in_use(self, vdisk): """Returns True if the specified vdisk is mapped to at least 1 host.""" resp = self.ssh.lsvdiskhostmap(vdisk) return len(resp) != 0 def rename_vdisk(self, vdisk, new_name): self.ssh.chvdisk(vdisk, ['-name', new_name]) def migratevdisk(self, vdisk, dest_pool, copy_id='0'): self.ssh.migratevdisk(vdisk, dest_pool, copy_id) def is_system_topology_hyperswap(self, state): """Returns True if the system version higher than 7.5 and the system topology is hyperswap. """ if state['code_level'] < (7, 6, 0, 0): LOG.debug('Hyperswap failure as the storage ' 'code_level is %(code_level)s, below ' 'the required 7.6.0.0.', {'code_level': state['code_level']}) else: if state['topology'] == 'hyperswap': return True else: LOG.debug('Hyperswap failure as the storage system ' 'topology is not hyperswap.') return False def check_hyperswap_pool(self, pool, peer_pool): # Check the hyperswap pools. if not peer_pool: raise exception.InvalidInput( reason=_('The peer pool is necessary for hyperswap volume, ' 'please configure the peer pool.')) pool_attr = None peer_pool_attr = None for stat_pool in self.stats.get('pools', []): if stat_pool['pool_name'] == pool: pool_attr = stat_pool elif stat_pool['pool_name'] == peer_pool: peer_pool_attr = stat_pool if pool_attr is None: pool_attr = self.get_pool_attrs(pool) if peer_pool_attr is None: peer_pool_attr = self.get_pool_attrs(peer_pool) if not peer_pool_attr: raise exception.InvalidInput( reason=_('The hyperswap peer pool %s ' 'is invalid.') % peer_pool) if not pool_attr['site_id'] or not peer_pool_attr['site_id']: raise exception.InvalidInput( reason=_('The site_id of pools is necessary for hyperswap ' 'volume, but there is no site_id in the pool or ' 'peer pool.')) if pool_attr['site_id'] == peer_pool_attr['site_id']: raise exception.InvalidInput( reason=_('The hyperswap volume must be configured in two ' 'independent sites, the pool %(pool)s is on the ' 'same site as peer_pool %(peer_pool)s. ') % {'pool': pool, 'peer_pool': peer_pool}) def pretreatment_before_revert(self, name): mapping_ids = self._get_vdisk_fc_mappings(name) for map_id in mapping_ids: attrs = self._get_flashcopy_mapping_attributes(map_id) if not attrs: continue target = attrs['target_vdisk_name'] copy_rate = attrs['copy_rate'] progress = attrs['progress'] status = attrs['status'] if status in ['copying', 'prepared'] and target == name: if copy_rate != '0' and progress != '100': msg = (_('Cannot start revert since fcmap %(map_id)s ' 'in progress, current progress is %(progress)s') % {'map_id': map_id, 'progress': progress}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) elif copy_rate != '0' and progress == '100': LOG.debug('Split completed clone map_id=%(map_id)s fcmap', {'map_id': map_id}) self.ssh.stopfcmap(map_id) @staticmethod def check_code_level_within_limit(min_level, max_level, code_level): if max_level is None: max_level = code_level return min_level <= code_level <= max_level class CLIResponse(object): """Parse SVC CLI output and generate iterable.""" def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True): super(CLIResponse, self).__init__() if ssh_cmd: self.ssh_cmd = ' '.join(ssh_cmd) else: self.ssh_cmd = 'None' self.raw = raw self.delim = delim self.with_header = with_header self.result = self._parse() def select(self, *keys): for a in self.result: vs = [] for k in keys: v = a.get(k, None) if isinstance(v, str) or v is None: v = [v] if isinstance(v, list): vs.append(v) for item in zip(*vs): if len(item) == 1: yield item[0] else: yield item def __getitem__(self, key): try: return self.result[key] except KeyError: msg = (_('Did not find the expected key %(key)s in %(fun)s: ' '%(raw)s.') % {'key': key, 'fun': self.ssh_cmd, 'raw': self.raw}) raise exception.VolumeBackendAPIException(data=msg) def __iter__(self): for a in self.result: yield a def __len__(self): return len(self.result) def _parse(self): def get_reader(content, delim): for line in content.lstrip().splitlines(): line = line.strip() if line: yield line.split(delim) else: yield [] if isinstance(self.raw, str): stdout, stderr = self.raw, '' else: stdout, stderr = self.raw reader = get_reader(stdout, self.delim) result = [] if self.with_header: hds = tuple() for row in reader: hds = row break for row in reader: cur = dict() if len(hds) != len(row): msg = (_('Unexpected CLI response: header/row mismatch. ' 'header: %(header)s, row: %(row)s.') % {'header': hds, 'row': row}) raise exception.VolumeBackendAPIException(data=msg) for k, v in zip(hds, row): CLIResponse.append_dict(cur, k, v) result.append(cur) else: cur = dict() for row in reader: if row: CLIResponse.append_dict(cur, row[0], ' '.join(row[1:])) elif cur: # start new section result.append(cur) cur = dict() if cur: result.append(cur) return result @staticmethod def append_dict(dict_, key, value): key, value = key.strip(), value.strip() obj = dict_.get(key, None) if obj is None: dict_[key] = value elif isinstance(obj, list): obj.append(value) dict_[key] = obj else: dict_[key] = [obj, value] return dict_ class StorwizeSVCCommonDriver(san.SanDriver, driver.ManageableVD, driver.MigrateVD, driver.CloneableImageVD): """IBM Storwize V7000 SVC abstract base class for iSCSI/FC volume drivers. Version history: .. code-block:: none 1.0 - Initial driver 1.1 - FC support, create_cloned_volume, volume type support, get_volume_stats, minor bug fixes 1.2.0 - Added retype 1.2.1 - Code refactor, improved exception handling 1.2.2 - Fix bug #1274123 (races in host-related functions) 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to lsfabric, clear unused data from connections, ensure matching WWPNs by comparing lower case 1.2.4 - Fix bug #1278035 (async migration/retype) 1.2.5 - Added support for manage_existing (unmanage is inherited) 1.2.6 - Added QoS support in terms of I/O throttling rate 1.3.1 - Added support for volume replication 1.3.2 - Added support for consistency group 1.3.3 - Update driver to use ABC metaclasses 2.0 - Code refactor, split init file and placed shared methods for FC and iSCSI within the StorwizeSVCCommonDriver class 2.1 - Added replication V2 support to the global/metro mirror mode 2.1.1 - Update replication to version 2.1 2.1.2 - Added support volume_group (Flash copy) """ VERSION = "2.1.2" VDISKCOPYOPS_INTERVAL = 600 DEFAULT_GR_SLEEP = random.randint(20, 500) / 100.0 def __init__(self, *args, **kwargs): super(StorwizeSVCCommonDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(storwize_svc_opts) self._backend_name = self.configuration.safe_get('volume_backend_name') self.active_ip = self.configuration.san_ip self.inactive_ip = self.configuration.storwize_san_secondary_ip self._master_backend_helpers = StorwizeHelpers(self._run_ssh) self._aux_backend_helpers = None self._helpers = self._master_backend_helpers self._vdiskcopyops = {} self._vdiskcopyops_loop = None self.protocol = None self._storwize_portset = self.configuration.storwize_portset self._master_state = {'storage_nodes': {}, 'enabled_protocols': set(), 'compression_enabled': False, 'available_iogrps': [], 'system_name': None, 'system_id': None, 'code_level': None, } self._state = self._master_state self._aux_state = {'storage_nodes': {}, 'enabled_protocols': set(), 'compression_enabled': False, 'available_iogrps': [], 'system_name': None, 'system_id': None, 'code_level': None, } self._active_backend_id = kwargs.get('active_backend_id') # This list is used to ensure volume export self._volumes_list = [] # This dictionary is used to map each replication target to certain # replication manager object. self.replica_manager = {} # One driver can be configured with only one replication target # to failover. self._replica_target = {} # This boolean is used to indicate whether replication is supported # by this storage. self._replica_enabled = False # This list is used to save the supported replication modes. self._supported_replica_types = [] # This is used to save the available pools in failed-over status self._secondary_pools = None # This dictionary is used to save pools information. self._stats = {} # Storwize has the limitation that can not burst more than 3 new ssh # connections within 1 second. So slow down the initialization. time.sleep(1) def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" LOG.debug('enter: do_setup') # v2.1 replication setup self._get_storwize_config() # Validate that the pool exists self._validate_pools_exist() # Get list of all volumes self._get_all_volumes() # Update the pool stats self._update_volume_stats() # Save the pool stats information in helpers class. self._master_backend_helpers.stats = self._stats # Build the list of in-progress vdisk copy operations if ctxt is None: admin_context = context.get_admin_context() else: admin_context = ctxt.elevated() volumes = objects.VolumeList.get_all_by_host(admin_context, self.host) for volume in volumes: metadata = volume.admin_metadata curr_ops = metadata.get('vdiskcopyops', None) if curr_ops: ops = [tuple(x.split(':')) for x in curr_ops.split(';')] self._vdiskcopyops[volume['id']] = ops # if vdiskcopy exists in database, start the looping call if len(self._vdiskcopyops) >= 1: self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall( self._check_volume_copy_ops) self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL) LOG.debug('leave: do_setup') def _update_storwize_state(self, state, helper): # Get storage system name, id, and code level state.update(helper.get_system_info()) # Check if compression is supported state['compression_enabled'] = helper.compression_enabled() # Get the available I/O groups state['available_iogrps'] = helper.get_available_io_groups() # Get the iSCSI and FC names of the Storwize/SVC nodes state['storage_nodes'] = helper.get_node_info() # Add the iSCSI IP addresses and WWPNs to the storage node info helper.add_iscsi_ip_addrs(state['storage_nodes'], state['code_level'], portset=self._storwize_portset) helper.add_fc_wwpns(state['storage_nodes'], state['code_level']) # For each node, check what connection modes it supports. Delete any # nodes that do not support any types (may be partially configured). to_delete = [] for k, node in state['storage_nodes'].items(): if ((len(node['ipv4']) or len(node['ipv6']) or len(node['IP_address'])) and len(node['iscsi_name'])): node['enabled_protocols'].append('iSCSI') state['enabled_protocols'].add('iSCSI') if len(node['WWPN']): node['enabled_protocols'].append('FC') state['enabled_protocols'].add('FC') if not len(node['enabled_protocols']): to_delete.append(k) for delkey in to_delete: del state['storage_nodes'][delkey] def _get_backend_pools(self): if not self._active_backend_id: return self.configuration.storwize_svc_volpool_name elif not self._secondary_pools: self._secondary_pools = [self._replica_target.get('pool_name')] return self._secondary_pools def _get_backend_peer_pool(self): if not self._active_backend_id: return self.configuration.storwize_peer_pool def _validate_pools_exist(self): # Validate that the pool exists pools = self._get_backend_pools() for pool in pools: if not self._helpers.is_pool_defined(pool): reason = (_('Failed getting details for pool %s.') % pool) raise exception.InvalidInput(reason=reason) def _get_all_volumes(self): # Get list of all volumes pools = self._get_backend_pools() for pool in pools: pool_vols = self._helpers.get_pool_volumes(pool) for volume in pool_vols: self._volumes_list.append(volume['name']) def _get_config_param_value(self, config_param, config_param_value): if not config_param_value: config_param_value = self.configuration.safe_get(config_param) LOG.info('CONFIG:value of %(param)s' ' is %(value)s', {'param': config_param, 'value': config_param_value}) return config_param_value def check_for_setup_error(self): """Ensure that the flags are set properly.""" LOG.debug('enter: check_for_setup_error') # Check that we have the system ID information if self._state['system_name'] is None: exception_msg = (_('Unable to determine system name.')) raise exception.VolumeBackendAPIException(data=exception_msg) if self._state['system_id'] is None: exception_msg = (_('Unable to determine system id.')) raise exception.VolumeBackendAPIException(data=exception_msg) # Make sure we have at least one node configured if not len(self._state['storage_nodes']): msg = _('do_setup: No configured nodes.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) if self.protocol not in self._state['enabled_protocols']: # TODO(mc_nair): improve this error message by looking at # self._state['enabled_protocols'] to tell user what driver to use raise exception.InvalidInput( reason=_('The storage device does not support %(prot)s. ' 'Please configure the device to support %(prot)s or ' 'switch to a driver using a different protocol.') % {'prot': self.protocol}) required_flags = ['san_ip', 'san_ssh_port', 'san_login', 'storwize_svc_volpool_name'] for flag in required_flags: if not self.configuration.safe_get(flag): raise exception.InvalidInput(reason=_('%s is not set.') % flag) # Ensure that either password or keyfile were set if not (self.configuration.san_password or self.configuration.san_private_key): raise exception.InvalidInput( reason=_('Password or SSH private key is required for ' 'authentication: set either san_password or ' 'san_private_key option.')) opts = self._helpers.build_default_opts(self.configuration) self._helpers.check_vdisk_opts(self._state, opts) LOG.debug('leave: check_for_setup_error') def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): cinder_utils.check_ssh_injection(cmd_list) command = ' '.join(cmd_list) if not self.sshpool: try: self.sshpool = self._set_up_sshpool(self.active_ip) except paramiko.SSHException: LOG.warning('Unable to use san_ip to create SSHPool. Now ' 'attempting to use storwize_san_secondary_ip ' 'to create SSHPool.') if self._toggle_ip(): self.sshpool = self._set_up_sshpool(self.active_ip) else: LOG.warning('Unable to create SSHPool using san_ip ' 'and not able to use ' 'storwize_san_secondary_ip since it is ' 'not configured.') raise try: return self._ssh_execute(self.sshpool, command, check_exit_code, attempts) except Exception: # Need to check if creating an SSHPool storwize_san_secondary_ip # before raising an error. try: if self._toggle_ip(): LOG.warning("Unable to execute SSH command with " "%(inactive)s. Attempting to execute SSH " "command with %(active)s.", {'inactive': self.inactive_ip, 'active': self.active_ip}) self.sshpool = self._set_up_sshpool(self.active_ip) return self._ssh_execute(self.sshpool, command, check_exit_code, attempts) else: LOG.warning('Not able to use ' 'storwize_san_secondary_ip since it is ' 'not configured.') raise except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error running SSH command: %s", command) def _set_up_sshpool(self, ip): password = self.configuration.san_password privatekey = self.configuration.san_private_key min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn sshpool = ssh_utils.SSHPool( ip, self.configuration.san_ssh_port, self.configuration.ssh_conn_timeout, self.configuration.san_login, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) return sshpool def _ssh_execute(self, sshpool, command, check_exit_code=True, attempts=1): try: with sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code, sanitize_stdout=False) except Exception as e: LOG.error('Error has occurred: %s', e) last_exception = e greenthread.sleep(self.DEFAULT_GR_SLEEP) try: std_err = last_exception.stderr if std_err is not None and not self._is_ascii(std_err): std_err = encodeutils.safe_decode(std_err, errors='ignore') LOG.error("The stderr has non-ascii characters. " "Please check the error code.\n" "Stderr: %s", std_err) std_err = std_err.split()[0] raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=std_err, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error running SSH command: %s", command) def _is_ascii(self, value): try: return all(ord(c) < 128 for c in value) except TypeError: return False def _toggle_ip(self): # Change active_ip if storwize_san_secondary_ip is set. if self.configuration.storwize_san_secondary_ip is None: return False self.inactive_ip, self.active_ip = self.active_ip, self.inactive_ip LOG.info('Toggle active_ip from %(old)s to %(new)s.', {'old': self.inactive_ip, 'new': self.active_ip}) return True def ensure_export(self, ctxt, volume): """Check that the volume exists on the storage. The system does not "export" volumes as a Linux iSCSI target does, and therefore we just check that the volume exists on the storage. """ volume_defined = volume['name'] in self._volumes_list if not volume_defined: LOG.error('ensure_export: Volume %s not found on storage.', volume['name']) def create_export(self, ctxt, volume, connector): model_update = None return model_update def remove_export(self, ctxt, volume): pass def create_export_snapshot(self, ctxt, snapshot, connector): model_update = None return model_update def remove_export_snapshot(self, ctxt, snapshot): pass def _get_vdisk_params(self, type_id, volume_type=None, volume_metadata=None): return self._helpers.get_vdisk_params(self.configuration, self._state, type_id, volume_type=volume_type, volume_metadata=volume_metadata) def _check_if_group_type_cg_snapshot(self, volume): if (volume.group_id and (not volume_utils.is_group_a_cg_snapshot_type(volume.group) and not volume_utils.is_group_a_type (volume.group, "consistent_group_replication_enabled"))): msg = _('Create volume with a replication or hyperswap ' 'group_id is not supported. Please add volume to ' 'group after volume creation.') LOG.error(msg) raise exception.VolumeDriverException(reason=msg) def _update_replication_properties(self, ctxt, volume, model_update): @cinder_utils.retry(exception.VolumeBackendAPIException, interval=2, retries=3) def _try_get_relationship_info(volume_name): try: rel_info = self._helpers.get_relationship_info(volume_name) return rel_info except Exception: msg = (_('_update_replication_properties: Failed to fetch ' 'relationship details for the volume.')) LOG.error(msg) raise exception.VolumeBackendAPIException(message=msg) model_update = model_update or dict() vol_metadata = model_update.get('metadata', {}) db_metadata = self.db.volume_metadata_get(ctxt.elevated(), volume['id']) model_update['metadata'] = db_metadata if db_metadata else dict() if (('IOThrottle_rate' not in vol_metadata) and ('IOThrottle_rate' in model_update['metadata'])): del model_update['metadata']['IOThrottle_rate'] model_update['metadata'].update(vol_metadata) rel_info = _try_get_relationship_info(volume.name) rep_properties = { 'Id': 'id', 'Relationship Name': 'name', 'Master Cluster Id': 'master_cluster_id', 'Master Cluster Name': 'master_cluster_name', 'Master Volume Id': 'master_vdisk_id', 'Master Volume Name': 'master_vdisk_name', 'Aux Cluster Id': 'aux_cluster_id', 'Aux Cluster Name': 'aux_cluster_name', 'Aux Volume Id': 'aux_vdisk_id', 'Aux Volume Name': 'aux_vdisk_name', 'Consistency Group Id': 'consistency_group_id', 'Consistency Group Name': 'consistency_group_name', 'Bg Copy Priority': 'bg_copy_priority', 'Primary': 'primary', 'Progress': 'progress', 'Mirroring State': 'state', 'Status': 'status', 'Sync': 'sync', 'Copy Type': 'copy_type', 'Cycling Mode': 'cycling_mode', 'Cycle Period Seconds': 'cycle_period_seconds', 'Master Change Volume Id': 'master_change_vdisk_id', 'Master Change Volume Name': 'master_change_vdisk_name', 'Aux Change Volume Id': 'aux_change_vdisk_id', 'Aux Change Volume Name': 'aux_change_vdisk_name', 'Freeze Time': 'freeze_time' } # Update model for replication if not rel_info: for key in rep_properties: if key in model_update['metadata']: del model_update['metadata'][key] else: for key, value in rep_properties.items(): model_update['metadata'][key] = rel_info.get(value) return model_update def _update_rccg_properties(self, ctxt, volume, group=None): rccg_name = self._get_rccg_name(group) if group else "" if not volume.metadata: volume.metadata = dict() volume.metadata['Consistency Group Name'] = rccg_name volume.save() def _update_volumegroup_properties(self, ctxt, volume, group=None): volumegroup_name = self._get_volumegroup_name(group) if group else "" if not volume.metadata: volume.metadata = dict() volume.metadata['Volume Group Name'] = volumegroup_name volume.save() def _update_volumegroup_snapshot_properties(self, ctxt, snapshot, group_snapshot=None): volumegroup_snapshot_name = ( self._get_volumegroup_snapshot_name(group_snapshot) if group_snapshot else "") if not snapshot.metadata: snapshot.metadata = dict() snapshot.metadata['snapshot_name'] = volumegroup_snapshot_name snapshot.save() def create_volume(self, volume): LOG.debug('enter: create_volume: volume %s', volume['name']) # Create a replication or hyperswap volume with group_id is not # allowed. self._check_if_group_type_cg_snapshot(volume) opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) pool = volume_utils.extract_host(volume['host'], 'pool') model_update = dict() if opts['volume_topology'] == 'hyperswap': LOG.debug('Volume %s to be created is a hyperswap volume.', volume.name) if not self._helpers.is_system_topology_hyperswap(self._state): reason = _('Create hyperswap volume failed, the system is ' 'below release 7.6.0.0 or it is not hyperswap ' 'topology.') raise exception.InvalidInput(reason=reason) if opts['mirror_pool'] or rep_type: reason = _('Create hyperswap volume with streched cluster or ' 'replication enabled is not supported.') raise exception.InvalidInput(reason=reason) if not opts['easytier']: msg = _('The default easytier of hyperswap volume is ' 'on, it does not support easytier off.') raise exception.VolumeDriverException(message=msg) self._helpers.check_hyperswap_pool(pool, opts['peer_pool']) self._helpers.create_hyperswap_volume(volume.name, volume.size, 'gb', pool, opts) # Updating Hyperswap volume replication properties model_update = self._update_replication_properties(ctxt, volume, model_update) else: opts['iogrp'] = self._helpers.select_io_group(self._state, opts, pool) opts['aux_mirror_pool'] = None self._helpers.create_vdisk(volume['name'], str(volume['size']), 'gb', pool, opts) if opts['qos']: self._helpers.add_vdisk_qos(volume['name'], opts['qos'], volume['size']) model_update = self._qos_model_update(model_update, volume) model_update[ 'replication_status'] = fields.ReplicationStatus.NOT_CAPABLE if rep_type: replica_obj = self._get_replica_obj(rep_type) replica_obj.volume_replication_setup(ctxt, volume) model_update[ 'replication_status'] = fields.ReplicationStatus.ENABLED # Updating replication properties for a volume with replication # enabled. model_update = self._update_replication_properties(ctxt, volume, model_update) LOG.debug('leave: create_volume:\n volume: %(vol)s\n ' 'model_update %(model_update)s', {'vol': volume['name'], 'model_update': model_update}) return model_update def delete_volume(self, volume): LOG.debug('enter: delete_volume: volume %s', volume['name']) ctxt = context.get_admin_context() if self._state['code_level'] < (7, 7, 0, 0): force_unmap = False else: force_unmap = True hyper_volume = self.is_volume_hyperswap(volume) if hyper_volume: LOG.debug('Volume %s to be deleted is a hyperswap ' 'volume.', volume.name) self._helpers.delete_hyperswap_volume(volume.name, force_unmap=force_unmap, force_delete=False) return rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type or ( volume.replication_status not in ["not-capable", "disabled"]): target_volume, rel_info = ( self._helpers.get_target_volume_information(volume)) if self._aux_backend_helpers: self._aux_backend_helpers.delete_rc_volume( target_volume, rel_info, target_vol=True, force_unmap=force_unmap, retain_aux_volume=self.configuration.safe_get( 'storwize_svc_retain_aux_volume')) # As the relationship got deleted, updated rel_info # as None and sent to master_backend_helper rel_info = None if not self._active_backend_id: self._master_backend_helpers.delete_rc_volume( volume['name'], rel_info, force_unmap=force_unmap) else: # If it's in fail over state, also try to delete the volume # in master backend try: self._master_backend_helpers.delete_rc_volume( volume['name'], rel_info, force_unmap=force_unmap) except Exception as ex: LOG.error('Failed to get delete volume %(volume)s in ' 'master backend. Exception: %(err)s.', {'volume': volume['name'], 'err': ex}) else: if self._active_backend_id: msg = (_('Error: delete non-replicate volume in failover mode' ' is not allowed.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: self._helpers.delete_vdisk( volume['name'], force_unmap=force_unmap, force_delete=False) if volume['id'] in self._vdiskcopyops: del self._vdiskcopyops[volume['id']] if not len(self._vdiskcopyops): self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None LOG.debug('leave: delete_volume: volume %s', volume['name']) def create_snapshot(self, snapshot): ctxt = context.get_admin_context() try: # TODO(zhaochy): change to use snapshot.volume source_vol = self.db.volume_get(ctxt, snapshot['volume_id']) except Exception: msg = (_('create_snapshot: get source volume failed.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) pool = volume_utils.extract_host(source_vol['host'], 'pool') opts = self._get_vdisk_params(source_vol['volume_type_id']) self._helpers.create_copy(snapshot['volume_name'], snapshot['name'], snapshot['volume_id'], self.configuration, opts, False, self._state, pool=pool) def delete_snapshot(self, snapshot): if self._state['code_level'] < (7, 7, 0, 0): force_unmap = False else: force_unmap = True self._helpers.delete_vdisk( snapshot['name'], force_unmap=force_unmap, force_delete=False) def create_volume_from_snapshot(self, volume, snapshot): # Create volume from snapshot with a replication or hyperswap group_id # is not allowed. model_update = dict() self._check_if_group_type_cg_snapshot(volume) opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) pool = volume_utils.extract_host(volume['host'], 'pool') self._helpers.create_copy(snapshot['name'], volume['name'], snapshot['id'], self.configuration, opts, True, self._state, pool=pool) # The volume size is equal to the snapshot size in most # of the cases. But in some scenario, the volume size # may be bigger than the source volume size. # SVC does not support flashcopy between two volumes # with two different size. So use the snapshot size to # create volume first and then extend the volume to- # the target size. if volume['size'] > snapshot['volume_size']: # extend the new created target volume to expected size. self._extend_volume_op(volume, volume['size'], snapshot['volume_size']) if opts['qos']: self._helpers.add_vdisk_qos(volume['name'], opts['qos'], volume['size']) model_update = self._qos_model_update(model_update, volume) ctxt = context.get_admin_context() model_update[ 'replication_status'] = fields.ReplicationStatus.NOT_CAPABLE rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type: self._validate_replication_enabled() replica_obj = self._get_replica_obj(rep_type) replica_obj.volume_replication_setup(ctxt, volume) model_update[ 'replication_status'] = fields.ReplicationStatus.ENABLED # Updating replication properties for a volume with replication # enabled. model_update = self._update_replication_properties(ctxt, volume, model_update) if opts['volume_topology'] == 'hyperswap': LOG.debug('The volume %s to be created is a hyperswap ' 'volume.', volume.name) # Ensures the vdisk is not part of FC mapping. # Otherwize convert it to hyperswap volume will be failed. self._helpers.ensure_vdisk_no_fc_mappings(volume['name'], allow_snaps=True, allow_fctgt=False) self._helpers.convert_volume_to_hyperswap(volume['name'], opts, self._state) return model_update def create_cloned_volume(self, tgt_volume, src_volume): """Creates a clone of the specified volume.""" # Create a cloned volume with a replication or hyperswap group_id is # not allowed. model_update = dict() self._check_if_group_type_cg_snapshot(tgt_volume) opts = self._get_vdisk_params(tgt_volume['volume_type_id'], volume_metadata= tgt_volume.get('volume_metadata')) pool = volume_utils.extract_host(tgt_volume['host'], 'pool') self._helpers.create_copy(src_volume['name'], tgt_volume['name'], src_volume['id'], self.configuration, opts, True, self._state, pool=pool) # The source volume size is equal to target volume size # in most of the cases. But in some scenarios, the target # volume size may be bigger than the source volume size. # SVC does not support flashcopy between two volumes # with two different sizes. So use source volume size to # create target volume first and then extend target # volume to original size. ctxt = context.get_admin_context() if tgt_volume['size'] > src_volume['size']: # extend the new created target volume to expected size. self._extend_volume_op(tgt_volume, tgt_volume['size'], src_volume['size']) if opts['qos']: self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'], tgt_volume['size']) model_update = self._qos_model_update(model_update, tgt_volume) if opts['volume_topology'] == 'hyperswap': LOG.debug('The source volume %s to be cloned is a hyperswap ' 'volume.', src_volume.name) # Ensures the vdisk is not part of FC mapping. # Otherwize convert it to hyperswap volume will be failed. self._helpers.ensure_vdisk_no_fc_mappings(tgt_volume['name'], allow_snaps=True, allow_fctgt=False) self._helpers.convert_volume_to_hyperswap(tgt_volume['name'], opts, self._state) # Updating Hyperswap volume replication properties model_update = self._update_replication_properties(ctxt, tgt_volume, model_update) model_update[ 'replication_status'] = fields.ReplicationStatus.NOT_CAPABLE ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, tgt_volume) if rep_type: self._validate_replication_enabled() replica_obj = self._get_replica_obj(rep_type) replica_obj.volume_replication_setup(ctxt, tgt_volume) model_update[ 'replication_status'] = fields.ReplicationStatus.ENABLED # Updating replication properties for a volume with replication # enabled. model_update = self._update_replication_properties(ctxt, tgt_volume, model_update) return model_update def extend_volume(self, volume, new_size): self._extend_volume_op(volume, new_size) def _extend_volume_op(self, volume, new_size, old_size=None): LOG.debug('enter: _extend_volume_op: volume %s', volume['id']) if self._state['code_level'] < (7, 7, 0, 0): force_unmap = False else: force_unmap = True volume_name = self._get_target_vol(volume) tgt_vol, rel_info = self._helpers.get_target_volume_information( volume) ret = self._helpers.ensure_vdisk_no_fc_mappings(volume_name, allow_snaps=False, rel_info=rel_info) if not ret: msg = (_('_extend_volume_op: Extending a volume with snapshots is ' 'not supported.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if old_size is None: old_size = volume.size extend_amt = int(new_size) - old_size if rel_info: LOG.warning('_extend_volume_op: Extending a volume with ' 'remote copy or with "active-active" relationship is ' 'not recommended.') rep_type = rel_info['copy_type'] cyclingmode = rel_info['cycling_mode'] rc_name = rel_info['name'] master_helper = self._master_backend_helpers target_helper = self._aux_backend_helpers if rep_type == 'activeactive': hs_opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get( 'volume_metadata')) try: master_helper.convert_hyperswap_volume_to_normal( volume_name, hs_opts['peer_pool']) except Exception as e: msg = (_('_extend_volume_op: Failed to convert hyperswap ' 'volume to normal volume %(volume)s. Exception: ' '%(err)s.') % {'volume': volume.id, 'err': e}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) try: master_helper.extend_vdisk(volume_name, extend_amt) except Exception as e: msg = (_('_extend_volume_op: Failed to extend a hyperswap ' 'volume %(volume)s. Exception: ' '%(err)s.') % {'volume': volume.id, 'err': e}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) finally: try: master_helper.convert_extended_volume_to_hyperswap( volume_name, hs_opts, self._state) except Exception as e: msg = (_('_extend_volume_op: Failed to convert volume ' 'to hyperswap volume %(volume)s. Exception: ' '%(err)s.') % {'volume': volume.id, 'err': e}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: try: if storwize_const.GMCV_MULTI == cyclingmode: rccg_name = ( self._helpers.get_rccg_name_by_volume_name( volume.name)) # Update gmcv volume cyclingmode to 'none' if rccg_name: master_helper.stop_rccg(rccg_name) master_helper.change_consistgrp_cyclingmode( rccg_name) master_helper.start_rccg(rccg_name) else: master_helper.stop_relationship(volume.name, rcrel=rc_name) master_helper.change_relationship_cyclingmode( volume.name, rcrel=rc_name) master_helper.start_relationship(volume.name, rcrel=rc_name) tgt_change_vol = ( storwize_const.REPLICA_CHG_VOL_PREFIX + tgt_vol) source_change_vol = ( storwize_const.REPLICA_CHG_VOL_PREFIX + volume.name) # Delete source_change_volume and target_change_volume master_helper.delete_vdisk(source_change_vol, force_unmap=force_unmap, force_delete=True) target_helper.delete_vdisk(tgt_change_vol, force_unmap=force_unmap, force_delete=True) # Extend primary volume and auxiliary volume flag = self._helpers.is_replicated_volume_primary( volume, rel_info) if flag: # source_volume is the primary volume or # onboarded auxiliary volume is primary # [Reverse replication failover] target_helper.extend_vdisk(tgt_vol, extend_amt) master_helper.extend_vdisk(volume.name, extend_amt) else: # Auxiliary volume is onboarded as source volume # [Reverse Replication] or # source volume with primary as aux [Failover] master_helper.extend_vdisk(volume.name, extend_amt) target_helper.extend_vdisk(tgt_vol, extend_amt) if storwize_const.GMCV_MULTI == cyclingmode: # Convert global mirror volume to GMCV volume with # the new volume-size self._convert_global_mirror_volume_to_gmcv( volume, tgt_vol, new_size, rel_info, rccg_name=rccg_name) except Exception as e: msg = (_('Failed to extend a volume with remote copy ' '%(volume)s. Exception: ' '%(err)s.') % {'volume': volume.id, 'err': e}) rel_info = self._helpers.get_relationship_info(volume_name) new_cyclingmode = ( rel_info['cycling_mode'] if rel_info else 'multi') if (storwize_const.GMCV_MULTI == cyclingmode and cyclingmode != new_cyclingmode): # Convert global mirror volume to GMCV volume with # the current volume-size self._convert_global_mirror_volume_to_gmcv( volume, tgt_vol, volume['size'], rel_info, rccg_name=rccg_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: self._helpers.extend_vdisk(volume_name, extend_amt) LOG.debug('leave: _extend_volume_op: volume %s', volume.id) # Update the QoS IOThrottling value to the volume properties opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) if opts['qos'] and opts['qos']['IOThrottling_unit']: unit = opts['qos']['IOThrottling_unit'] if storwize_const.IOPS_PER_GB in unit: self._helpers.update_vdisk_qos(volume_name, opts['qos'], new_size) # Add the QoS IOThrottling value to Volume Metadata model_update = self._qos_model_update(dict(), volume) # Update the Volume Metadata in the DB self.db.volume_metadata_update( context.get_admin_context(), volume['id'], model_update['metadata'], False) def _convert_global_mirror_volume_to_gmcv(self, volume, target_vol, size, rel_info, rccg_name=None): master_helper = self._master_backend_helpers target_helper = self._aux_backend_helpers tgt_change_vol = (storwize_const.REPLICA_CHG_VOL_PREFIX + target_vol) src_change_vol = (storwize_const.REPLICA_CHG_VOL_PREFIX + volume.name) rc_name = rel_info['name'] # Create source change volume if it doesn't exist src_attr = master_helper.get_vdisk_attributes(volume.name) src_change_attr = master_helper.get_vdisk_attributes(src_change_vol) if not src_change_attr: src_change_opts = self._get_vdisk_params(volume.volume_type_id) src_change_opts['iogrp'] = src_attr['IO_group_id'] # Change volumes would usually be thin-provisioned src_change_opts['autoexpand'] = True master_helper.create_vdisk(src_change_vol, str(int(size)), 'gb', src_attr['mdisk_grp_name'], src_change_opts) # Create target change volume if it doesn't exist target_change_attr = ( target_helper.get_vdisk_attributes(tgt_change_vol)) if not target_change_attr: target_change_opts = self._get_vdisk_params( volume.volume_type_id) target_change_pool = self._replica_target.get('pool_name') target_change_opts['iogrp'] = src_attr['IO_group_id'] # Change Volumes would usually be thin-provisioned target_change_opts['autoexpand'] = True target_helper.create_vdisk(tgt_change_vol, str(int(size)), 'gb', target_change_pool, target_change_opts) if rccg_name: # Update consistency group cyclingmode to 'multi' master_helper.stop_rccg(rccg_name) master_helper.change_consistgrp_cyclingmode(rccg_name, 'multi') else: # Update volume cyclingmode to 'multi' master_helper.stop_relationship(volume.name, rcrel=rc_name) master_helper.change_relationship_cyclingmode(volume.name, 'multi', rc_name) # Set source_change_volume and target_change_volume if rel_info["master_vdisk_name"] == volume.name: master_helper.change_relationship_changevolume(volume.name, src_change_vol, True, rc_name) target_helper.change_relationship_changevolume(target_vol, tgt_change_vol, False, rc_name) else: # Auxiliary volume is onboarded as source volume # [Reverse Replication Scenario] master_helper.change_relationship_changevolume(volume.name, src_change_vol, False, rc_name) target_helper.change_relationship_changevolume(target_vol, tgt_change_vol, True, rc_name) if rccg_name: # Start gmcv consistency group relationshi master_helper.start_rccg(rccg_name) else: # Start gmcv volume relationship master_helper.start_relationship(volume.name, rcrel=rc_name) def _qos_model_update(self, model_update, volume): """add volume wwn and IOThrottle_rate to the metadata of the volume""" model_update = model_update or dict() vol_metadata = model_update.get('metadata', {}) db_meta = self.db.volume_metadata_get(context.get_admin_context(), volume['id']) model_update['metadata'] = db_meta if db_meta else dict() model_update['metadata'].update(vol_metadata) attrs = self._helpers.get_vdisk_attributes(volume['name']) model_update['metadata']['volume_wwn'] = attrs['vdisk_UID'] iops_limit = attrs.get('IOPs_limit') bw_limit_mbps = attrs.get('bandwidth_limit_MB') if iops_limit: model_update['metadata']['IOThrottle_rate'] = ( "%s IOps" % iops_limit) elif bw_limit_mbps: model_update['metadata']['IOThrottle_rate'] = ( "%s MBps" % bw_limit_mbps) else: # there is no IOThrottle_rate defined - remove it from metadata # This case is seen during retype from a storage template # with qos to storage template without qos (the qos rate # was leftover in the volume details on UI). if 'IOThrottle_rate' in model_update['metadata']: del model_update['metadata']['IOThrottle_rate'] model_update['host'] = volume['host'] return model_update def add_vdisk_copy(self, volume, dest_pool, vol_type, auto_delete=False): return self._helpers.add_vdisk_copy(volume, dest_pool, vol_type, self._state, self.configuration, auto_delete=auto_delete) def _add_vdisk_copy_op(self, ctxt, volume, new_op): metadata = self.db.volume_admin_metadata_get(ctxt.elevated(), volume['id']) curr_ops = metadata.get('vdiskcopyops', None) if curr_ops: curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')] new_ops_list = curr_ops_list.append(new_op) else: new_ops_list = [new_op] new_ops_str = ';'.join([':'.join(x) for x in new_ops_list]) self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'], {'vdiskcopyops': new_ops_str}, False) if volume['id'] in self._vdiskcopyops: self._vdiskcopyops[volume['id']].append(new_op) else: self._vdiskcopyops[volume['id']] = [new_op] # We added the first copy operation, so start the looping call if len(self._vdiskcopyops) == 1: self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall( self._check_volume_copy_ops) self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL) def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id): try: self._vdiskcopyops[volume['id']].remove((orig_copy_id, new_copy_id)) if not len(self._vdiskcopyops[volume['id']]): del self._vdiskcopyops[volume['id']] if not len(self._vdiskcopyops): self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None except KeyError: LOG.error('_rm_vdisk_copy_op: Volume %s does not have any ' 'registered vdisk copy operations.', volume['id']) return except ValueError: LOG.error('_rm_vdisk_copy_op: Volume %(vol)s does not have ' 'the specified vdisk copy operation: orig=%(orig)s ' 'new=%(new)s.', {'vol': volume['id'], 'orig': orig_copy_id, 'new': new_copy_id}) return metadata = self.db.volume_admin_metadata_get(ctxt.elevated(), volume['id']) curr_ops = metadata.get('vdiskcopyops', None) if not curr_ops: LOG.error('_rm_vdisk_copy_op: Volume metadata %s does not ' 'have any registered vdisk copy operations.', volume['id']) return curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')] try: curr_ops_list.remove((orig_copy_id, new_copy_id)) except ValueError: LOG.error('_rm_vdisk_copy_op: Volume %(vol)s metadata does ' 'not have the specified vdisk copy operation: ' 'orig=%(orig)s new=%(new)s.', {'vol': volume['id'], 'orig': orig_copy_id, 'new': new_copy_id}) return if len(curr_ops_list): new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list]) self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'], {'vdiskcopyops': new_ops_str}, False) else: self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'], 'vdiskcopyops') def _check_volume_copy_ops(self): LOG.debug("Enter: update volume copy status.") ctxt = context.get_admin_context() copy_items = list(self._vdiskcopyops.items()) for vol_id, copy_ops in copy_items: try: volume = self.db.volume_get(ctxt, vol_id) except Exception: LOG.warning('Volume %s does not exist.', vol_id) del self._vdiskcopyops[vol_id] if not len(self._vdiskcopyops): self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None continue for copy_op in copy_ops: try: synced = self._helpers.is_vdisk_copy_synced(volume['name'], copy_op[1]) except Exception: LOG.info('_check_volume_copy_ops: Volume %(vol)s does ' 'not have the specified vdisk copy ' 'operation: orig=%(orig)s new=%(new)s.', {'vol': volume['id'], 'orig': copy_op[0], 'new': copy_op[1]}) else: if synced: self._helpers.rm_vdisk_copy(volume['name'], copy_op[0]) self._rm_vdisk_copy_op(ctxt, volume, copy_op[0], copy_op[1]) LOG.debug("Exit: update volume copy status.") # #### V2.1 replication methods #### # @volume_utils.trace def failover_host(self, context, volumes, secondary_id=None, groups=None): if not self._replica_enabled: msg = _("Replication is not properly enabled on backend.") LOG.error(msg) raise exception.UnableToFailOver(reason=msg) if storwize_const.FAILBACK_VALUE == secondary_id: # In this case the administrator would like to fail back. secondary_id, volumes_update, groups_update = self._host_failback( context, volumes, groups) elif (secondary_id == self._replica_target['backend_id'] or secondary_id is None): # In this case the administrator would like to fail over. secondary_id, volumes_update, groups_update = self._host_failover( context, volumes, groups) else: msg = (_("Invalid secondary id %s.") % secondary_id) LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) return secondary_id, volumes_update, groups_update def _host_failback(self, ctxt, volumes, groups): """Fail back all the volume on the secondary backend.""" volumes_update = [] groups_update = [] if not self._active_backend_id: LOG.info("Host has been failed back. doesn't need " "to fail back again") return None, volumes_update, groups_update try: self._master_backend_helpers.get_system_info() except Exception: msg = (_("Unable to failback due to primary is not reachable.")) LOG.error(msg) raise exception.UnableToFailOver(reason=msg) bypass_volumes, rep_volumes = self._classify_volume(ctxt, volumes) # start synchronize from aux volume to master volume self._sync_with_aux(ctxt, rep_volumes) self._sync_replica_groups(ctxt, groups) self._wait_replica_ready(ctxt, rep_volumes) self._wait_replica_groups_ready(ctxt, groups) rep_volumes_update = self._failback_replica_volumes(ctxt, rep_volumes) volumes_update.extend(rep_volumes_update) rep_vols_in_grp_update, groups_update = self._failback_replica_groups( ctxt, groups) volumes_update.extend(rep_vols_in_grp_update) bypass_volumes_update = self._bypass_volume_process(bypass_volumes) volumes_update.extend(bypass_volumes_update) self._helpers = self._master_backend_helpers self._active_backend_id = None self._state = self._master_state self._update_volume_stats() self._master_backend_helpers.stats = self._stats return storwize_const.FAILBACK_VALUE, volumes_update, groups_update def _failback_replica_volumes(self, ctxt, rep_volumes): LOG.debug('enter: _failback_replica_volumes') volumes_update = [] for volume in rep_volumes: rep_type = self._get_volume_replicated_type(ctxt, volume) replica_obj = self._get_replica_obj(rep_type) tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] rep_info = self._helpers.get_relationship_info(tgt_volume) if not rep_info: volumes_update.append( {'volume_id': volume['id'], 'updates': {'replication_status': fields.ReplicationStatus.ERROR, 'status': 'error'}}) LOG.error('_failback_replica_volumes:no rc-releationship ' 'is established between master: %(master)s and ' 'aux %(aux)s. Please re-establish the ' 'relationship and synchronize the volumes on ' 'backend storage.', {'master': volume['name'], 'aux': tgt_volume}) continue LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol=' '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, ' 'primary=%(primary)s', {'vol': volume['name'], 'master_vol': rep_info['master_vdisk_name'], 'aux_vol': rep_info['aux_vdisk_name'], 'state': rep_info['state'], 'primary': rep_info['primary']}) if volume.status == 'in-use': LOG.warning('_failback_replica_volumes: failback in-use ' 'volume: %(volume)s is not recommended.', {'volume': volume.name}) try: replica_obj.replication_failback(volume) model_updates = { 'replication_status': fields.ReplicationStatus.ENABLED} volumes_update.append( {'volume_id': volume['id'], 'updates': model_updates}) except exception.VolumeDriverException: LOG.error('Unable to fail back volume %(volume_id)s', {'volume_id': volume.id}) volumes_update.append( {'volume_id': volume['id'], 'updates': {'replication_status': fields.ReplicationStatus.ERROR, 'status': 'error'}}) LOG.debug('leave: _failback_replica_volumes ' 'volumes_update=%(volumes_update)s', {'volumes_update': volumes_update}) return volumes_update def _bypass_volume_process(self, bypass_vols): volumes_update = [] for vol in bypass_vols: if vol.replication_driver_data: rep_data = json.loads(vol.replication_driver_data) update_status = rep_data['previous_status'] rep_data = '' else: update_status = 'error' rep_data = json.dumps({'previous_status': vol.status}) volumes_update.append( {'volume_id': vol.id, 'updates': {'status': update_status, 'replication_driver_data': rep_data}}) return volumes_update def _failback_replica_groups(self, ctxt, groups): volumes_update = [] groups_update = [] for grp in groups: try: grp_rep_status = self._rep_grp_failback( ctxt, grp, sync_grp=False)['replication_status'] except Exception as ex: LOG.error('Fail to failback group %(grp)s during host ' 'failback due to error: %(error)s', {'grp': grp.id, 'error': ex}) grp_rep_status = fields.ReplicationStatus.ERROR # Update all the volumes' status in that group for vol in grp.volumes: vol_update = {'volume_id': vol.id, 'updates': {'replication_status': grp_rep_status, 'status': ( vol.status if grp_rep_status == fields.ReplicationStatus.ENABLED else 'error')}} volumes_update.append(vol_update) grp_status = (fields.GroupStatus.AVAILABLE if grp_rep_status == fields.ReplicationStatus.ENABLED else fields.GroupStatus.ERROR) grp_update = {'group_id': grp.id, 'updates': {'replication_status': grp_rep_status, 'status': grp_status}} groups_update.append(grp_update) return volumes_update, groups_update def _sync_with_aux(self, ctxt, volumes): LOG.debug('enter: _sync_with_aux ') try: rep_mgr = self._get_replica_mgr() rep_mgr.establish_target_partnership() except Exception as ex: LOG.warning('Fail to establish partnership in backend. ' 'error=%(ex)s', {'error': ex}) for volume in volumes: tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] rep_info = self._helpers.get_relationship_info(tgt_volume) if not rep_info: LOG.error('_sync_with_aux: no rc-releationship is ' 'established between master: %(master)s and aux ' '%(aux)s. Please re-establish the relationship ' 'and synchronize the volumes on backend ' 'storage.', {'master': volume['name'], 'aux': tgt_volume}) continue LOG.debug('_sync_with_aux: volume: %(volume)s rep_info:master_vol=' '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, ' 'primary=%(primary)s', {'volume': volume['name'], 'master_vol': rep_info['master_vdisk_name'], 'aux_vol': rep_info['aux_vdisk_name'], 'state': rep_info['state'], 'primary': rep_info['primary']}) try: if (rep_info['state'] not in [storwize_const.REP_CONSIS_SYNC, storwize_const.REP_CONSIS_COPYING]): if rep_info['primary'] == 'master': self._helpers.start_relationship(tgt_volume, rcrel= rep_info['name']) else: self._helpers.start_relationship(tgt_volume, primary='aux', rcrel= rep_info['name']) except Exception as ex: LOG.warning('Fail to copy data from aux to master. master:' ' %(master)s and aux %(aux)s. Please ' 're-establish the relationship and synchronize' ' the volumes on backend storage. error=' '%(ex)s', {'master': volume['name'], 'aux': tgt_volume, 'error': ex}) LOG.debug('leave: _sync_with_aux.') def _wait_replica_ready(self, ctxt, volumes): for volume in volumes: tgt_volume = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] try: self._wait_replica_vol_ready(ctxt, tgt_volume) except Exception as ex: LOG.error('_wait_replica_ready: wait for volume:%(volume)s' ' remote copy synchronization failed due to ' 'error:%(err)s.', {'volume': tgt_volume, 'err': ex}) def _wait_replica_vol_ready(self, ctxt, volume): LOG.debug('enter: _wait_replica_vol_ready: volume=%(volume)s', {'volume': volume}) def _replica_vol_ready(): rep_info = self._helpers.get_relationship_info(volume) if not rep_info: msg = (_('_wait_replica_vol_ready: no rc-releationship ' 'is established for volume:%(volume)s. Please ' 're-establish the rc-relationship and ' 'synchronize the volumes on backend storage.'), {'volume': volume}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_replica_vol_ready:volume: %(volume)s rep_info: ' 'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, ' 'state=%(state)s, primary=%(primary)s', {'volume': volume, 'master_vol': rep_info['master_vdisk_name'], 'aux_vol': rep_info['aux_vdisk_name'], 'state': rep_info['state'], 'primary': rep_info['primary']}) if (rep_info['state'] in [storwize_const.REP_CONSIS_SYNC, storwize_const.REP_CONSIS_COPYING]): return True elif rep_info['state'] == storwize_const.REP_IDL_DISC: msg = (_('Wait synchronize failed. volume: %(volume)s'), {'volume': volume}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return False self._helpers._wait_for_a_condition( _replica_vol_ready, timeout=storwize_const.DEFAULT_RC_TIMEOUT, interval=storwize_const.DEFAULT_RC_INTERVAL, raise_exception=True) LOG.debug('leave: _wait_replica_vol_ready: volume=%(volume)s', {'volume': volume}) def _sync_replica_groups(self, ctxt, groups): for grp in groups: rccg_name = self._get_rccg_name(grp) self._sync_with_aux_grp(ctxt, rccg_name) def _wait_replica_groups_ready(self, ctxt, groups): for grp in groups: rccg_name = self._get_rccg_name(grp) self._wait_replica_grp_ready(ctxt, rccg_name) def _host_failover(self, ctxt, volumes, groups): volumes_update = [] groups_update = [] if self._active_backend_id: LOG.info("Host has been failed over to %s", self._active_backend_id) return self._active_backend_id, volumes_update, groups_update try: self._aux_backend_helpers.get_system_info() except Exception as ex: msg = (_("Unable to failover due to replication target is not " "reachable. error=%(ex)s"), {'error': ex}) LOG.error(msg) raise exception.UnableToFailOver(reason=msg) bypass_volumes, rep_volumes = self._classify_volume(ctxt, volumes) rep_volumes_update = self._failover_replica_volumes(ctxt, rep_volumes) volumes_update.extend(rep_volumes_update) rep_vols_in_grp_update, groups_update = self._failover_replica_groups( ctxt, groups) volumes_update.extend(rep_vols_in_grp_update) bypass_volumes_update = self._bypass_volume_process(bypass_volumes) volumes_update.extend(bypass_volumes_update) self._helpers = self._aux_backend_helpers self._active_backend_id = self._replica_target['backend_id'] self._secondary_pools = [self._replica_target['pool_name']] self._state = self._aux_state self._update_volume_stats() self._aux_backend_helpers.stats = self._stats return self._active_backend_id, volumes_update, groups_update def _failover_replica_volumes(self, ctxt, rep_volumes): LOG.debug('enter: _failover_replica_volumes') volumes_update = [] for volume in rep_volumes: rep_type = self._get_volume_replicated_type(ctxt, volume) replica_obj = self._get_replica_obj(rep_type) # Try do the fail-over. try: rep_info = self._aux_backend_helpers.get_relationship_info( storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']) if not rep_info: volumes_update.append( {'volume_id': volume['id'], 'updates': {'replication_status': fields.ReplicationStatus.FAILOVER_ERROR, 'status': 'error'}}) LOG.error('_failover_replica_volumes: no rc-' 'releationship is established for volume:' '%(volume)s. Please re-establish the rc-' 'relationship and synchronize the volumes on' ' backend storage.', {'volume': volume.name}) continue LOG.debug('_failover_replica_volumes: vol=%(vol)s, ' 'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, ' 'state=%(state)s, primary=%(primary)s', {'vol': volume['name'], 'master_vol': rep_info['master_vdisk_name'], 'aux_vol': rep_info['aux_vdisk_name'], 'state': rep_info['state'], 'primary': rep_info['primary']}) if volume.status == 'in-use': LOG.warning('_failover_replica_volumes: failover in-use ' 'volume: %(volume)s is not recommended.', {'volume': volume.name}) replica_obj.failover_volume_host(ctxt, volume) model_updates = { 'replication_status': fields.ReplicationStatus.FAILED_OVER} volumes_update.append( {'volume_id': volume['id'], 'updates': model_updates}) except exception.VolumeDriverException: LOG.error('Unable to failover to aux volume. Please make ' 'sure that the aux volume is ready.') volumes_update.append( {'volume_id': volume['id'], 'updates': {'status': 'error', 'replication_status': fields.ReplicationStatus.FAILOVER_ERROR}}) LOG.debug('leave: _failover_replica_volumes ' 'volumes_update=%(volumes_update)s', {'volumes_update': volumes_update}) return volumes_update def _failover_replica_groups(self, ctxt, groups): volumes_update = [] groups_update = [] for grp in groups: try: grp_rep_status = self._rep_grp_failover( ctxt, grp)['replication_status'] except Exception as ex: LOG.error('Fail to failover group %(grp)s during host ' 'failover due to error: %(error)s', {'grp': grp.id, 'error': ex}) grp_rep_status = fields.ReplicationStatus.ERROR # Update all the volumes' status in that group for vol in grp.volumes: vol_update = {'volume_id': vol.id, 'updates': {'replication_status': grp_rep_status, 'status': ( vol.status if grp_rep_status == fields.ReplicationStatus.FAILED_OVER else 'error')}} volumes_update.append(vol_update) grp_status = (fields.GroupStatus.AVAILABLE if grp_rep_status == fields.ReplicationStatus.FAILED_OVER else fields.GroupStatus.ERROR) grp_update = {'group_id': grp.id, 'updates': {'replication_status': grp_rep_status, 'status': grp_status}} groups_update.append(grp_update) return volumes_update, groups_update def _classify_volume(self, ctxt, volumes): bypass_volumes = [] replica_volumes = [] for v in volumes: volume_type = self._get_volume_replicated_type(ctxt, v) grp = v.group if grp and volume_utils.is_group_a_type( grp, "consistent_group_replication_enabled"): continue elif volume_type and v.status in ['available', 'in-use']: replica_volumes.append(v) else: bypass_volumes.append(v) return bypass_volumes, replica_volumes def _get_replica_obj(self, rep_type): replica_manager = self.replica_manager[ self._replica_target['backend_id']] return replica_manager.get_replica_obj(rep_type) def _get_replica_mgr(self): replica_manager = self.replica_manager[ self._replica_target['backend_id']] return replica_manager def _get_target_vol(self, volume): tgt_vol = volume['name'] if self._active_backend_id: ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type: tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name']) return tgt_vol def _validate_replication_enabled(self): if not self._replica_enabled: msg = _("Replication is not properly configured on backend.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _get_specs_replicated_type(self, volume_type): replication_type = None extra_specs = volume_type.get("extra_specs", {}) rep_val = extra_specs.get('replication_enabled') if rep_val == " True": replication_type = extra_specs.get('replication_type', storwize_const.GLOBAL) # The format for replication_type in extra spec is in # " global". Otherwise, the code will # not reach here. if replication_type != storwize_const.GLOBAL: # Pick up the replication type specified in the # extra spec from the format like " global". replication_type = replication_type.split()[1] if replication_type not in storwize_const.VALID_REP_TYPES: msg = (_("Invalid replication type %s.") % replication_type) LOG.error(msg) raise exception.InvalidInput(reason=msg) return replication_type def _get_volume_replicated_type(self, ctxt, volume, vol_type_id=None): replication_type = None volume_type = None volume_type_id = volume.volume_type_id if volume else vol_type_id if volume_type_id: volume_type = objects.VolumeType.get_by_name_or_id( ctxt, volume_type_id) if volume_type: replication_type = self._get_specs_replicated_type(volume_type) return replication_type def is_volume_hyperswap(self, volume): """Returns True if the volume type is hyperswap.""" is_hyper_volume = False if 'volume_type_id' in volume: opts = self._get_vdisk_params(volume.volume_type_id) if opts['volume_topology'] == 'hyperswap': is_hyper_volume = True return is_hyper_volume def _get_storwize_config(self): # Update the storwize state try: self._update_storwize_state(self._master_state, self._helpers) except Exception as err: LOG.warning('Fail to get system %(san_ip)s info. error=%(error)s', {'san_ip': self.active_ip, 'error': err}) if not self._active_backend_id: with excutils.save_and_reraise_exception(): pass self._do_replication_setup() if self._active_backend_id and self._replica_target: self._helpers = self._aux_backend_helpers self._state = self._aux_state self._replica_enabled = (True if (self._helpers.replication_licensed() and self._replica_target) else False) if self._replica_enabled: self._supported_replica_types = storwize_const.VALID_REP_TYPES def _do_replication_setup(self): rep_devs = self.configuration.safe_get('replication_device') if not rep_devs: return if len(rep_devs) > 1: raise exception.InvalidInput( reason='Multiple replication devices are configured. ' 'Now only one replication_device is supported.') required_flags = ['san_ip', 'backend_id', 'san_login', 'san_password', 'pool_name'] for flag in required_flags: if flag not in rep_devs[0]: raise exception.InvalidInput( reason=_('%s is not set.') % flag) rep_target = {} rep_target['san_ip'] = rep_devs[0].get('san_ip') rep_target['backend_id'] = rep_devs[0].get('backend_id') rep_target['san_login'] = rep_devs[0].get('san_login') rep_target['san_password'] = rep_devs[0].get('san_password') rep_target['pool_name'] = rep_devs[0].get('pool_name') # Each replication target will have a corresponding replication. self._replication_initialize(rep_target) def _replication_initialize(self, target): rep_manager = storwize_rep.StorwizeSVCReplicationManager( self, target, StorwizeHelpers) if self._active_backend_id: if self._active_backend_id != target['backend_id']: msg = (_("Invalid secondary id %s.") % self._active_backend_id) LOG.error(msg) raise exception.InvalidInput(reason=msg) # Setup partnership only in non-failover state else: try: rep_manager.establish_target_partnership() except exception.VolumeDriverException: LOG.error('The replication src %(src)s has not ' 'successfully established partnership with the ' 'replica target %(tgt)s.', {'src': self.configuration.san_ip, 'tgt': target['backend_id']}) self._aux_backend_helpers = rep_manager.get_target_helpers() self.replica_manager[target['backend_id']] = rep_manager self._replica_target = target self._update_storwize_state(self._aux_state, self._aux_backend_helpers) # Replication Group (Tiramisu) @volume_utils.trace def enable_replication(self, context, group, volumes): """Enables replication for a group and volumes in the group.""" model_update = {'replication_status': fields.ReplicationStatus.ENABLED} volumes_update = [] rccg_name = self._get_rccg_name(group) rccg = self._helpers.get_rccg(rccg_name) if rccg and rccg['relationship_count'] != '0': try: if rccg['primary'] == 'aux': self._helpers.start_rccg(rccg_name, primary='aux') else: self._helpers.start_rccg(rccg_name, primary='master') except exception.VolumeBackendAPIException as err: LOG.error("Failed to enable group replication on %(rccg)s. " "Exception: %(exception)s.", {'rccg': rccg_name, 'exception': err}) model_update[ 'replication_status'] = fields.ReplicationStatus.ERROR else: if rccg: LOG.error("Enable replication on empty group %(rccg)s is " "forbidden.", {'rccg': rccg['name']}) else: LOG.error("Failed to enable group replication: %(grp)s does " "not exist in backend.", {'grp': group.id}) model_update['replication_status'] = fields.ReplicationStatus.ERROR for vol in volumes: volumes_update.append( {'id': vol.id, 'replication_status': model_update['replication_status']}) return model_update, volumes_update @volume_utils.trace def disable_replication(self, context, group, volumes): """Disables replication for a group and volumes in the group.""" model_update = { 'replication_status': fields.ReplicationStatus.DISABLED} volumes_update = [] rccg_name = self._get_rccg_name(group) rccg = self._helpers.get_rccg(rccg_name) if rccg and rccg['relationship_count'] != '0': try: self._helpers.stop_rccg(rccg_name) except exception.VolumeBackendAPIException as err: LOG.error("Failed to disable group replication on %(rccg)s. " "Exception: %(exception)s.", {'rccg': rccg_name, 'exception': err}) model_update[ 'replication_status'] = fields.ReplicationStatus.ERROR else: if rccg: LOG.error("Disable replication on empty group %(rccg)s is " "forbidden.", {'rccg': rccg['name']}) else: LOG.error("Failed to disable group replication: %(grp)s does " "not exist in backend.", {'grp': group.id}) model_update['replication_status'] = fields.ReplicationStatus.ERROR for vol in volumes: volumes_update.append( {'id': vol.id, 'replication_status': model_update['replication_status']}) return model_update, volumes_update @volume_utils.trace def failover_replication(self, context, group, volumes, secondary_backend_id=None): """Fails over replication for a group and volumes in the group.""" volumes_model_update = [] model_update = {} if not self._replica_enabled: msg = _("Replication is not properly enabled on backend.") LOG.error(msg) raise exception.UnableToFailOver(reason=msg) if storwize_const.FAILBACK_VALUE == secondary_backend_id: # In this case the administrator would like to group fail back. model_update = self._rep_grp_failback(context, group) elif (secondary_backend_id == self._replica_target['backend_id'] or secondary_backend_id is None): # In this case the administrator would like to group fail over. model_update = self._rep_grp_failover(context, group) else: msg = (_("Invalid secondary id %s.") % secondary_backend_id) LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) for vol in volumes: volume_model_update = {'id': vol.id, 'replication_status': model_update['replication_status']} volumes_model_update.append(volume_model_update) return model_update, volumes_model_update @volume_utils.trace def _rep_grp_failback(self, ctxt, group, sync_grp=True): """Fail back all the volume in the replication group.""" model_update = { 'replication_status': fields.ReplicationStatus.ENABLED} rccg_name = self._get_rccg_name(group) try: self._aux_backend_helpers.stop_rccg(rccg_name, access=True) self._aux_backend_helpers.start_rccg(rccg_name, primary='master') return model_update except exception.VolumeBackendAPIException as e: msg = (_('Unable to fail back the group %(rccg)s, error: ' '%(error)s') % {"rccg": rccg_name, "error": e}) LOG.exception(msg) raise exception.UnableToFailOver(reason=msg) @volume_utils.trace def _rep_grp_failover(self, ctxt, group): """Fail over all the volume in the replication group.""" model_update = { 'replication_status': fields.ReplicationStatus.FAILED_OVER} rccg_name = self._get_rccg_name(group) try: self._aux_backend_helpers.stop_rccg(rccg_name, access=True) self._helpers.start_rccg(rccg_name, primary='aux') return model_update except exception.VolumeBackendAPIException as e: msg = (_('Unable to fail over the group %(rccg)s to the aux ' 'back-end, error: %(error)s') % {"rccg": rccg_name, "error": e}) LOG.exception(msg) raise exception.UnableToFailOver(reason=msg) @volume_utils.trace def _sync_with_aux_grp(self, ctxt, rccg_name): try: rccg = self._helpers.get_rccg(rccg_name) if rccg and rccg['relationship_count'] != '0': if (rccg['state'] not in [storwize_const.REP_CONSIS_SYNC, storwize_const.REP_CONSIS_COPYING]): if rccg['primary'] == 'master': self._helpers.start_rccg(rccg_name, primary='master') else: self._helpers.start_rccg(rccg_name, primary='aux') else: LOG.warning('group %(grp)s is not in sync.', {'grp': rccg_name}) except exception.VolumeBackendAPIException as ex: LOG.warning('Fail to copy data from aux group %(rccg)s to master ' 'group. Please recheck the relationship and ' 'synchronize the group on backend storage. error=' '%(error)s', {'rccg': rccg['name'], 'error': ex}) def _wait_replica_grp_ready(self, ctxt, rccg_name): LOG.debug('_wait_replica_grp_ready: group=%(rccg)s', {'rccg': rccg_name}) def _replica_grp_ready(): rccg = self._helpers.get_rccg(rccg_name) if not rccg: msg = (_('_replica_grp_ready: no group %(rccg)s exists on the ' 'backend. Please re-create the rccg and synchronize ' 'the volumes on backend storage.'), {'rccg': rccg_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if rccg['relationship_count'] == '0': return True LOG.debug('_replica_grp_ready: group: %(rccg)s: state=%(state)s, ' 'primary=%(primary)s', {'rccg': rccg['name'], 'state': rccg['state'], 'primary': rccg['primary']}) if rccg['state'] in [storwize_const.REP_CONSIS_SYNC, storwize_const.REP_CONSIS_COPYING]: return True if rccg['state'] == storwize_const.REP_IDL_DISC: msg = (_('Wait synchronize failed. group: %(rccg)s') % {'rccg': rccg_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return False try: self._helpers._wait_for_a_condition( _replica_grp_ready, timeout=storwize_const.DEFAULT_RCCG_TIMEOUT, interval=storwize_const.DEFAULT_RCCG_INTERVAL, raise_exception=True) except Exception as ex: LOG.error('_wait_replica_grp_ready: wait for group %(rccg)s ' 'synchronization failed due to ' 'error: %(err)s.', {'rccg': rccg_name, 'err': ex}) def get_replication_error_status(self, context, groups): """Returns error info for replicated groups and its volumes. The failover/failback only happens manually, no need to update the status. """ return [], [] def _get_vol_sys_info(self, volume): tgt_vol = volume.name backend_helper = self._helpers node_state = self._state grp = volume.group if grp and volume_utils.is_group_a_type( grp, "consistent_group_replication_enabled"): if (grp.replication_status == fields.ReplicationStatus.FAILED_OVER): tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name) backend_helper = self._aux_backend_helpers node_state = self._aux_state else: backend_helper = self._master_backend_helpers node_state = self._master_state elif self._active_backend_id: ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type: tgt_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name) return tgt_vol, backend_helper, node_state def _toggle_rep_vol_info(self, volume, helper): if helper == self._master_backend_helpers: vol_name = storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name backend_helper = self._aux_backend_helpers node_state = self._aux_state else: vol_name = volume.name backend_helper = self._master_backend_helpers node_state = self._master_state return vol_name, backend_helper, node_state def _get_map_info_from_connector(self, volume, connector, iscsi=False): if volume.display_name == 'backup-snapshot': LOG.debug('It is a virtual volume %(vol)s for detach snapshot.', {'vol': volume.id}) vol_name = volume.name backend_helper = self._helpers node_state = self._state else: vol_name, backend_helper, node_state = self._get_vol_sys_info( volume) backend_helper.initialize_host_info() info = {} if 'host' in connector: # get host according to FC protocol connector = connector.copy() if not iscsi: connector.pop('initiator', None) info = {'driver_volume_type': 'fibre_channel', 'data': {}} else: info = {'driver_volume_type': 'iscsi', 'data': {}} host_name = backend_helper.get_host_from_connector( connector, volume_name=vol_name, iscsi=iscsi) vol_mapped = backend_helper.check_vol_mapped_to_host(vol_name, host_name) if host_name is None or not vol_mapped: ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) if host_name is None and not rep_type: msg = (_('_get_map_info_from_connector: Failed to get ' 'host name from connector.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if rep_type: # Try to unmap the volume in the secondary side if it is a # replication volume. (vol_name, backend_helper, node_state) = self._toggle_rep_vol_info(volume, backend_helper) try: host_name = backend_helper.get_host_from_connector( connector, volume_name=vol_name, iscsi=iscsi) except Exception as ex: LOG.warning('Failed to get host mapping for volume ' '%(volume)s in the secondary side. ' 'Exception: %(err)s.', {'volume': vol_name, 'err': ex}) return info, None, None, None, None if host_name is None: msg = (_('_get_map_info_from_connector: Failed to get ' 'host name from connector.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: host_name = None return info, host_name, vol_name, backend_helper, node_state def _check_snapshot_replica_volume_status(self, snapshot): ctxt = context.get_admin_context() if self._get_volume_replicated_type(ctxt, None, snapshot.volume_type_id): LOG.debug('It is a replication volume snapshot for backup.') rep_volume = objects.Volume.get_by_id(ctxt, snapshot.volume_id) volume_name, backend_helper, node_state = self._get_vol_sys_info( rep_volume) if backend_helper != self._helpers or self._active_backend_id: msg = (_('The snapshot of the replication volume %s has ' 'failed over to the aux backend. It can not attach' ' to the aux backend.') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def migrate_volume(self, ctxt, volume, host): """Migrate directly if source and dest are managed by same storage. We create a new vdisk copy in the desired pool, and add the original vdisk copy to the admin_metadata of the volume to be deleted. The deletion will occur using a periodic task once the new copy is synced. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s', {'id': volume['id'], 'host': host['host']}) # hyperswap volume doesn't support migrate if self.is_volume_hyperswap(volume): msg = _('migrate_volume: Migrating a hyperswap volume is ' 'not supported.') LOG.error(msg) raise exception.InvalidInput(message=msg) false_ret = (False, None) dest_pool = self._helpers.can_migrate_to_host(host, self._state) if dest_pool is None: return false_ret ctxt = context.get_admin_context() volume_type_id = volume['volume_type_id'] if volume_type_id is not None: vol_type = volume_types.get_volume_type(ctxt, volume_type_id) else: vol_type = None resp = self._helpers.lsvdiskcopy(volume.name) if len(resp) > 1: copies = self._helpers.get_vdisk_copies(volume.name) src_pool = copies['primary']['mdisk_grp_name'] mirror_pool = copies['secondary']['mdisk_grp_name'] opts = self._get_vdisk_params(volume.volume_type_id) if opts['rsize'] != -1: if (self._helpers.is_data_reduction_pool(src_pool) or self._helpers.is_data_reduction_pool(mirror_pool)): msg = _('Unable to migrate: the thin-provisioned or ' 'compressed volume can not be migrated from a data' ' reduction pool. ') raise exception.VolumeDriverException(message=msg) elif self._helpers.is_data_reduction_pool(dest_pool): msg = _('Unable to migrate: the thin-provisioned or ' 'compressed volume can not be migrated to a data ' 'reduction pool.') raise exception.VolumeDriverException(message=msg) self._helpers.migratevdisk(volume.name, dest_pool, copies['primary']['copy_id']) else: self._check_volume_copy_ops() if self._state['code_level'] < (7, 6, 0, 0): new_op = self.add_vdisk_copy(volume.name, dest_pool, vol_type) self._add_vdisk_copy_op(ctxt, volume, new_op) else: self.add_vdisk_copy(volume.name, dest_pool, vol_type, auto_delete=True) LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s', {'id': volume.id, 'host': host['host']}) return (True, None) def _verify_iogrp(self, rsize, pool, opts, rep_type, status): if rsize != -1 and self._helpers.is_volume_type_dr_pools( pool, opts, rep_type, rep_target_pool=self._replica_target[ 'pool_name'] if rep_type else None): msg = _('Unable to retype: the thin-provisioned or compressed ' 'vol in data reduction pool can not modify iogrp.') raise exception.VolumeDriverException(message=msg) def _verify_retype_params(self, volume, new_opts, old_opts, need_copy, change_mirror, new_rep_type, old_rep_type, vdisk_changes, old_pool, new_pool, old_io_grp): # Some volume parameters can not be changed or changed at the same # time during volume retype operation. This function checks the # retype parameters. resp = self._helpers.lsvdiskcopy(volume.name) if old_opts['mirror_pool'] and len(resp) == 1: msg = (_('Unable to retype: volume %s is a mirrorred vol. But it ' 'has only one copy in storage.') % volume.name) raise exception.VolumeDriverException(message=msg) is_old_type_dr_pool = self._helpers.is_volume_type_dr_pools( old_pool, old_opts, old_rep_type, rep_target_pool=self._replica_target[ 'pool_name'] if old_rep_type else None) is_new_type_dr_pool = self._helpers.is_volume_type_dr_pools( new_pool, new_opts, new_rep_type, rep_target_pool=self._replica_target[ 'pool_name'] if new_rep_type else None) need_check_dr_pool_param = False if need_copy: # mirror volume can not add volume-copy again. if len(resp) > 1: msg = (_('Unable to retype: current action needs volume-copy. ' 'A copy of volume %s exists. Adding another copy ' 'would exceed the limit of 2 copies.') % volume.name) raise exception.VolumeDriverException(message=msg) if old_opts['mirror_pool'] or new_opts['mirror_pool']: msg = (_('Unable to retype: current action needs volume-copy, ' 'it is not allowed for mirror volume ' '%s.') % volume.name) raise exception.VolumeDriverException(message=msg) need_check_dr_pool_param = True if change_mirror: if (new_opts['mirror_pool'] and not self._helpers.is_pool_defined( new_opts['mirror_pool'])): msg = (_('Unable to retype: The pool %s in which mirror copy ' 'is stored is not valid') % new_opts['mirror_pool']) raise exception.VolumeDriverException(message=msg) # migrate second copy to a dr pool or from a dr pool is not allowed if (old_opts['mirror_pool'] and new_opts[ 'mirror_pool'] and old_opts['rsize'] != -1): if is_old_type_dr_pool or is_new_type_dr_pool: msg = _('Unable to retype: the thin-provisioned or ' 'compressed vol can not be migrated from a dr pool' ' or to a dr pool.') raise exception.VolumeDriverException(message=msg) if not old_opts['mirror_pool'] and new_opts['mirror_pool']: need_check_dr_pool_param = True if new_rep_type != old_rep_type: if (old_io_grp not in StorwizeHelpers._get_valid_requested_io_groups( self._state, new_opts)): msg = (_('Unable to retype: it is not allowed to change ' 'replication type and io group at the same time.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if new_rep_type and old_rep_type: msg = (_('Unable to retype: it is not allowed to change ' '%(old_rep_type)s volume to %(new_rep_type)s ' 'volume.') % {'old_rep_type': old_rep_type, 'new_rep_type': new_rep_type}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if not old_rep_type and new_rep_type: if new_opts['rsize'] != -1 and is_new_type_dr_pool: try: self._helpers.check_data_reduction_pool_params( new_opts) except Exception as err: msg = (_("Failed to retype volume, the error is " "%s") % err) raise exception.VolumeDriverException(message=msg) elif storwize_const.GMCV == new_rep_type: # To gmcv, we may change cycle_period_seconds if needed previous_cps = old_opts.get('cycle_period_seconds') new_cps = new_opts.get('cycle_period_seconds') if previous_cps != new_cps: self._helpers.change_relationship_cycleperiod(volume.name, new_cps) if (is_new_type_dr_pool and new_opts[ 'rsize'] != -1 and need_check_dr_pool_param == 1): try: self._helpers.check_data_reduction_pool_params(new_opts) except Exception as err: msg = (_("Failed to retype volume, the error is " "%s") % err) raise exception.VolumeDriverException(message=msg) if vdisk_changes and not need_copy: if is_old_type_dr_pool or is_new_type_dr_pool: msg = _('The volume specified is a thin or compressed volume ' 'in a data reduction pool. The autoexpand and warning' ' and easytier can not be changed.') raise exception.VolumeDriverException(message=msg) def _check_hyperswap_retype_params(self, volume, new_opts, old_opts, change_mirror, new_rep_type, old_rep_type, old_pool, new_pool, old_io_grp): if new_opts['mirror_pool'] or old_opts['mirror_pool']: msg = (_('Unable to retype volume %s: current action needs ' 'volume-copy, it is not allowed for hyperswap ' 'type.') % volume.name) LOG.error(msg) raise exception.InvalidInput(message=msg) if new_rep_type or old_rep_type: msg = _('Retype between replicated volume and hyperswap volume' ' is not allowed.') LOG.error(msg) raise exception.InvalidInput(message=msg) if (old_io_grp not in StorwizeHelpers._get_valid_requested_io_groups( self._state, new_opts)): msg = _('Unable to retype: it is not allowed to change ' 'hyperswap type and IO group at the same time.') LOG.error(msg) raise exception.InvalidInput(message=msg) if new_opts['volume_topology'] == 'hyperswap': if old_pool != new_pool: msg = (_('Unable to retype volume %s: current action needs ' 'volume pool change, hyperswap volume does not ' 'support pool change.') % volume.name) LOG.error(msg) raise exception.InvalidInput(message=msg) if not new_opts['easytier']: raise exception.InvalidInput( reason=_('The default easytier of hyperswap volume is ' 'on, it does not support easytier off.')) if old_opts['volume_topology'] != 'hyperswap': is_new_type_dr_pool = self._helpers.is_volume_type_dr_pools( new_pool, new_opts) if is_new_type_dr_pool and new_opts['rsize'] != -1: try: self._helpers.check_data_reduction_pool_params( new_opts) except Exception as err: msg = (_("Failed to retype volume, the error is " "%s") % err) raise exception.VolumeDriverException(reason=msg) if self._helpers._get_vdisk_fc_mappings(volume.name): msg = _('Unable to retype: it is not allowed to change a ' 'normal volume with snapshot to a hyperswap ' 'volume.') LOG.error(msg) raise exception.InvalidInput(message=msg) if (old_opts['volume_topology'] == 'hyperswap' and old_opts['peer_pool'] != new_opts['peer_pool']): msg = _('Unable to retype: it is not allowed to change a ' 'hyperswap volume peer_pool.') LOG.error(msg) raise exception.InvalidInput(message=msg) def _retype_hyperswap_volume(self, ctxt, volume, host, old_opts, new_opts, old_pool, new_pool, vdisk_changes, need_copy, new_type): if (old_opts['volume_topology'] != 'hyperswap' and new_opts['volume_topology'] == 'hyperswap'): LOG.debug('retype: Convert a normal volume %s to hyperswap ' 'volume.', volume.name) self._helpers.convert_volume_to_hyperswap(volume.name, new_opts, self._state) elif (old_opts['volume_topology'] == 'hyperswap' and new_opts['volume_topology'] != 'hyperswap'): LOG.debug('retype: Convert a hyperswap volume %s to normal ' 'volume.', volume.name) if new_pool == old_pool: self._helpers.convert_hyperswap_volume_to_normal( volume.name, old_opts['peer_pool']) elif new_pool == old_opts['peer_pool']: self._helpers.convert_hyperswap_volume_to_normal( volume.name, old_pool) else: rel_info = self._helpers.get_relationship_info(volume.name) aux_vdisk = rel_info['aux_vdisk_name'] if need_copy: self.add_vdisk_copy(aux_vdisk, old_opts['peer_pool'], new_type, auto_delete=True) elif vdisk_changes: self._helpers.change_vdisk_options(aux_vdisk, vdisk_changes, new_opts, self._state) if need_copy: self.add_vdisk_copy(volume.name, old_pool, new_type, auto_delete=True) elif vdisk_changes: self._helpers.change_vdisk_options(volume.name, vdisk_changes, new_opts, self._state) # flake8: noqa: C901 def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ def retype_iogrp_property(volume, new, old): if new != old: self._helpers.change_vdisk_iogrp(volume['name'], self._state, (new, old)) LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,' 'diff=%(diff)s, host=%(host)s', {'id': volume['id'], 'new_type': new_type, 'diff': diff, 'host': host}) no_copy_keys = ['warning', 'autoexpand', 'easytier'] copy_keys = ['rsize', 'grainsize', 'compression'] all_keys = no_copy_keys + copy_keys old_opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) new_opts = self._get_vdisk_params(new_type['id'], volume_type=new_type) vdisk_changes = [] need_copy = False change_mirror = False aux_change_mirror = False for key in all_keys: if old_opts[key] != new_opts[key]: if key in copy_keys: need_copy = True break elif key in no_copy_keys: vdisk_changes.append(key) old_pool = volume_utils.extract_host(volume['host'], 'pool') new_pool = volume_utils.extract_host(host['host'], 'pool') if old_pool != new_pool: need_copy = True if old_opts['mirror_pool'] != new_opts['mirror_pool']: change_mirror = True if old_opts['aux_mirror_pool'] != new_opts['aux_mirror_pool']: aux_change_mirror = True # Check if retype affects volume replication model_update = dict() new_rep_type = self._get_specs_replicated_type(new_type) old_rep_type = self._get_volume_replicated_type(ctxt, volume) old_io_grp = self._helpers.get_volume_io_group(volume['name']) new_io_grp = self._helpers.select_io_group(self._state, new_opts, new_pool) self._verify_retype_params(volume, new_opts, old_opts, need_copy, change_mirror, new_rep_type, old_rep_type, vdisk_changes, old_pool, new_pool, old_io_grp) if old_opts['volume_topology'] or new_opts['volume_topology']: self._check_hyperswap_retype_params(volume, new_opts, old_opts, change_mirror, new_rep_type, old_rep_type, old_pool, new_pool, old_io_grp) self._retype_hyperswap_volume(ctxt, volume, host, old_opts, new_opts, old_pool, new_pool, vdisk_changes, need_copy, new_type) # Updating Hyperswap volume replication properties model_update = self._update_replication_properties(ctxt, volume, model_update) else: # hyperswap volume will select iogrp by storage. ignore iogrp here. if old_io_grp != new_io_grp: self._verify_iogrp(old_opts['rsize'], old_pool, old_opts, old_rep_type, volume.previous_status) if need_copy: self._check_volume_copy_ops() dest_pool = self._helpers.can_migrate_to_host(host, self._state) if dest_pool is None: return False retype_iogrp_property(volume, new_io_grp, old_io_grp) try: if self._state['code_level'] < (7, 6, 0, 0): new_op = self.add_vdisk_copy(volume.name, dest_pool, new_type) self._add_vdisk_copy_op(ctxt, volume, new_op) else: self.add_vdisk_copy(volume.name, dest_pool, new_type, auto_delete=True) except exception.VolumeDriverException: # roll back changing iogrp property retype_iogrp_property(volume, old_io_grp, new_io_grp) msg = (_('Unable to retype: A copy of volume %s exists. ' 'Retyping would exceed the limit of 2 copies.'), volume['id']) raise exception.VolumeDriverException(message=msg) else: retype_iogrp_property(volume, new_io_grp, old_io_grp) self._helpers.change_vdisk_options(volume['name'], vdisk_changes, new_opts, self._state) if change_mirror: copies = self._helpers.get_vdisk_copies(volume.name) if not old_opts['mirror_pool'] and new_opts['mirror_pool']: # retype from non mirror vol to mirror vol self.add_vdisk_copy(volume['name'], new_opts['mirror_pool'], new_type) elif (old_opts['mirror_pool'] and not new_opts['mirror_pool']): # retype from mirror vol to non mirror vol secondary = copies['secondary'] if secondary: self._helpers.rm_vdisk_copy( volume.name, secondary['copy_id']) else: # migrate the second copy to another pool. self._helpers.migratevdisk( volume.name, new_opts['mirror_pool'], copies['secondary']['copy_id']) if new_opts['qos']: # Add the new QoS setting to the volume. If the volume has an # old QoS setting, it will be overwritten. self._helpers.update_vdisk_qos(volume['name'], new_opts['qos'], volume['size']) model_update = self._qos_model_update(model_update, volume) elif old_opts['qos']: # If the old_opts contain QoS keys, disable them. self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos']) model_update = self._qos_model_update(model_update, volume) if new_opts['flashcopy_rate'] != old_opts['flashcopy_rate']: self._helpers.update_flashcopy_rate(volume.name, new_opts['flashcopy_rate']) if new_opts['clean_rate']: # Add the new clean_rate. If the old FC maps has the clean_rate # it will be overwritten. self._helpers.update_clean_rate(volume.name, new_opts['clean_rate']) # Delete replica if needed if self._state['code_level'] < (7, 7, 0, 0): force_unmap = False else: force_unmap = True if old_rep_type and not new_rep_type: target_volume, rel_info = ( self._helpers.get_target_volume_information(volume)) self._aux_backend_helpers.delete_rc_volume( target_volume, rel_info, target_vol=True, force_unmap=force_unmap, retain_aux_volume=self.configuration.safe_get( 'storwize_svc_retain_aux_volume')) if storwize_const.GMCV == old_rep_type: self._helpers.delete_vdisk( storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name'], force_unmap=force_unmap, force_delete=False) if aux_change_mirror: aux_change_mirror = False model_update['replication_status'] = ( fields.ReplicationStatus.DISABLED) model_update['replication_driver_data'] = None model_update['replication_extended_status'] = None # Updating replication properties for a volume with replication # enabled. model_update = self._update_replication_properties(ctxt, volume, model_update) # Add replica if needed if not old_rep_type and new_rep_type: replica_obj = self._get_replica_obj(new_rep_type) if storwize_const.GMCV == new_rep_type: replica_obj.volume_replication_setup(ctxt, volume, new_type) # Set cycle_period_seconds if needed self._helpers.change_relationship_cycleperiod( volume['name'], new_opts.get('cycle_period_seconds')) else: replica_obj.volume_replication_setup(ctxt, volume) model_update['replication_status'] = ( fields.ReplicationStatus.ENABLED) # Updating replication properties for a volume with replication # enabled. model_update = self._update_replication_properties(ctxt, volume, model_update) if aux_change_mirror: target_volume, rel_info = ( self._helpers.get_target_volume_information(volume)) aux_copies = self._aux_backend_helpers.get_vdisk_copies( target_volume) if (old_rep_type and new_rep_type) or (not old_rep_type and new_rep_type): # retype from non-mirror-rep to mirror-rep if (not old_opts['aux_mirror_pool'] and new_opts['aux_mirror_pool']): self._aux_backend_helpers.add_vdisk_copy( target_volume, new_opts['aux_mirror_pool'], new_type, self._aux_state, self.configuration) # retype from mirror-rep to non-mirror-rep elif (old_opts['aux_mirror_pool'] and not new_opts['aux_mirror_pool']): aux_secondary = aux_copies['secondary'] if aux_secondary: self._aux_backend_helpers.rm_vdisk_copy( target_volume, aux_secondary['copy_id']) LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,' 'diff=%(diff)s, host=%(host)s', {'id': volume['id'], 'new_type': new_type, 'diff': diff, 'host': host['host']}) return True, model_update def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update from Storwize for migrated volume. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ current_name = new_volume.name original_volume_name = volume.name LOG.debug("Attempt rename of %(cur)s to original name %(orig)s", dict(cur=current_name, orig=original_volume_name)) try: self._helpers.rename_vdisk(current_name, original_volume_name) rep_type = self._get_volume_replicated_type(ctxt, new_volume) if rep_type: rel_info = self._helpers.get_relationship_info(current_name) aux_vol = (storwize_const.REPLICA_AUX_VOL_PREFIX + original_volume_name) self._aux_backend_helpers.rename_vdisk( rel_info['aux_vdisk_name'], aux_vol) except exception.VolumeBackendAPIException: LOG.error('Unable to rename the logical volume ' 'for volume: %s', volume['id']) return {'_name_id': new_volume['_name_id'] or new_volume['id']} # If the back-end name(id) for the volume has been renamed, # it is OK for the volume to keep the original name(id) and there is # no need to use the column "_name_id" to establish the mapping # relationship between the volume id and the back-end volume # name(id). # Set the key "_name_id" to None for a successful rename. model_update = {'_name_id': None} return model_update def manage_existing(self, volume, ref): """Manages an existing vdisk. Renames the vdisk to match the expected name for the volume. Error checking done by manage_existing_get_size is not repeated - if we got here then we have a vdisk that isn't in use (or we don't care if it is in use. """ # Check that the reference is valid vdisk = self._manage_input_check(ref) vdisk_io_grp = self._helpers.get_volume_io_group(vdisk['name']) if vdisk_io_grp not in self._state['available_iogrps']: msg = (_("Failed to manage existing volume due to " "the volume to be managed is not in a valid " "I/O group.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) # Add replication check ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) vol_rep_type = None rel_info = self._helpers.get_relationship_info(vdisk['name']) copies = self._helpers.get_vdisk_copies(vdisk['name']) if rel_info and rel_info['copy_type'] != 'activeactive': vol_rep_type = ( storwize_const.GMCV if storwize_const.GMCV_MULTI == rel_info['cycling_mode'] else rel_info['copy_type']) aux_info = self._aux_backend_helpers.get_system_info() if rel_info['aux_cluster_id'] != aux_info['system_id']: msg = (_("Failed to manage existing volume due to the aux " "cluster for volume %(volume)s is %(aux_id)s. The " "configured cluster id is %(cfg_id)s") % {'volume': vdisk['name'], 'aux_id': rel_info['aux_cluster_id'], 'cfg_id': aux_info['system_id']}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if vol_rep_type != rep_type: msg = (_("Failed to manage existing volume due to " "the replication type of the volume to be managed is " "mismatch with the provided replication type.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) elif storwize_const.GMCV == rep_type: if volume['volume_type_id']: rep_opts = self._get_vdisk_params( volume['volume_type_id'], volume_metadata=volume.get('volume_metadata')) # Check cycle_period_seconds rep_cps = str(rep_opts.get('cycle_period_seconds')) if rel_info['cycle_period_seconds'] != rep_cps: msg = (_("Failed to manage existing volume due to " "the cycle_period_seconds %(vol_cps)s of " "the volume to be managed is mismatch with " "cycle_period_seconds %(type_cps)s in " "the provided gmcv replication type.") % {'vol_cps': rel_info['cycle_period_seconds'], 'type_cps': rep_cps}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) pool = volume_utils.extract_host(volume['host'], 'pool') if copies['primary']['mdisk_grp_name'] != pool: msg = (_("Failed to manage existing volume due to the " "pool of the volume to be managed does not " "match the backend pool. Pool of the " "volume to be managed is %(vdisk_pool)s. Pool " "of the backend is %(backend_pool)s.") % {'vdisk_pool': copies['primary']['mdisk_grp_name'], 'backend_pool': pool}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if volume['volume_type_id']: opts = self._get_vdisk_params(volume['volume_type_id'], volume_metadata= volume.get('volume_metadata')) # Manage hyperswap volume if rel_info and rel_info['copy_type'] == 'activeactive': if opts['volume_topology'] != 'hyperswap': msg = _("Failed to manage existing volume due to " "the hyperswap volume to be managed is " "mismatched with the provided non-hyperswap type.") raise exception.ManageExistingVolumeTypeMismatch( reason=msg) aux_vdisk = rel_info['aux_vdisk_name'] aux_vol_attr = self._helpers.get_vdisk_attributes(aux_vdisk) peer_pool = aux_vol_attr['mdisk_grp_name'] if opts['peer_pool'] != peer_pool: msg = (_("Failed to manage existing hyperswap volume due " "to peer pool mismatch. The peer pool of the " "volume to be managed is %(vol_pool)s, but the " "peer_pool of the chosen type is %(peer_pool)s.") % {'vol_pool': peer_pool, 'peer_pool': opts['peer_pool']}) raise exception.ManageExistingVolumeTypeMismatch( reason=msg) else: if opts['volume_topology'] == 'hyperswap': msg = _("Failed to manage existing volume, the volume to " "be managed is not a hyperswap volume, " "mismatch with the provided hyperswap type.") raise exception.ManageExistingVolumeTypeMismatch( reason=msg) resp = self._helpers.lsvdiskcopy(vdisk['name']) expected_copy_num = 2 if opts['mirror_pool'] else 1 if len(resp) != expected_copy_num: msg = (_("Failed to manage existing volume due to mirror type " "mismatch. Volume to be managed has %(resp_len)s " "copies. mirror_pool of the chosen type is " "%(mirror_pool)s.") % {'resp_len': len(resp), 'mirror_pool': opts['mirror_pool']}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if (opts['mirror_pool'] and opts['mirror_pool'] != copies['secondary']['mdisk_grp_name']): msg = (_("Failed to manage existing volume due to mirror pool " "mismatch. The secondary pool of the volume to be " "managed is %(sec_copy_pool)s. mirror_pool of the " "chosen type is %(mirror_pool)s.") % {'sec_copy_pool': copies['secondary']['mdisk_grp_name'], 'mirror_pool': opts['mirror_pool']}) raise exception.ManageExistingVolumeTypeMismatch( reason=msg) vdisk_copy = self._helpers.get_vdisk_copy_attrs(vdisk['name'], '0') if vdisk_copy['autoexpand'] == 'on' and opts['rsize'] == -1: msg = (_("Failed to manage existing volume due to " "the volume to be managed is thin, but " "the volume type chosen is thick.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if not vdisk_copy['autoexpand'] and opts['rsize'] != -1: msg = (_("Failed to manage existing volume due to " "the volume to be managed is thick, but " "the volume type chosen is thin.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if (vdisk_copy['compressed_copy'] == 'no' and opts['compression']): msg = (_("Failed to manage existing volume due to the " "volume to be managed is not compress, but " "the volume type chosen is compress.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if (vdisk_copy['compressed_copy'] == 'yes' and not opts['compression']): msg = (_("Failed to manage existing volume due to the " "volume to be managed is compress, but " "the volume type chosen is not compress.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if (vdisk_io_grp not in StorwizeHelpers._get_valid_requested_io_groups( self._state, opts)): msg = (_("Failed to manage existing volume due to " "I/O group mismatch. The I/O group of the " "volume to be managed is %(vdisk_iogrp)s. I/O group" " of the chosen type is %(opt_iogrp)s.") % {'vdisk_iogrp': vdisk['IO_group_name'], 'opt_iogrp': opts['iogrp']}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if opts['rsize'] != -1 and self._helpers.is_volume_type_dr_pools( pool, opts, rep_type, rep_target_pool=self._replica_target[ 'pool_name'] if rep_type else None): try: self._helpers.check_data_reduction_pool_params(opts) except Exception as err: msg = (_("Failed to manage existing volume, the error is " "%s") % err) raise exception.ManageExistingVolumeTypeMismatch( reason=msg) model_update = {'replication_status': fields.ReplicationStatus.NOT_CAPABLE} self._helpers.rename_vdisk(vdisk['name'], volume['name']) if vol_rep_type: aux_vol = storwize_const.REPLICA_AUX_VOL_PREFIX + volume['name'] self._aux_backend_helpers.rename_vdisk(rel_info['aux_vdisk_name'], aux_vol) if storwize_const.GMCV == vol_rep_type: self._helpers.rename_vdisk( rel_info['master_change_vdisk_name'], storwize_const.REPLICA_CHG_VOL_PREFIX + volume['name']) self._aux_backend_helpers.rename_vdisk( rel_info['aux_change_vdisk_name'], storwize_const.REPLICA_CHG_VOL_PREFIX + aux_vol) model_update = {'replication_status': fields.ReplicationStatus.ENABLED} return model_update def manage_existing_get_size(self, volume, ref): """Return size of an existing Vdisk for manage_existing. existing_ref is a dictionary of the form: {'source-id': } or {'source-name': } Optional elements are: 'manage_if_in_use': True/False (default is False) If set to True, a volume will be managed even if it is currently attached to a host system. """ # Check that the reference is valid vdisk = self._manage_input_check(ref) # Check if the disk is in use, if we need to. manage_if_in_use = ref.get('manage_if_in_use', False) if (not manage_if_in_use and self._helpers.is_vdisk_in_use(vdisk['name'])): reason = _('The specified vdisk is mapped to a host.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) return int(math.ceil(float(vdisk['capacity']) / units.Gi)) def unmanage(self, volume): """Remove the specified volume from Cinder management.""" pass @staticmethod def _get_rccg_name(group, grp_id=None, hyper_grp=False): group_id = group.id if group else grp_id rccg = (storwize_const.HYPERCG_PREFIX if hyper_grp else storwize_const.RCCG_PREFIX) return rccg + group_id[0:4] + '-' + group_id[-5:] @staticmethod def _get_volumegroup_name(group, grp_id=None): group_id = group.id if group else grp_id vg = storwize_const.VG_PREFIX return vg + group_id[0:4] + '-' + group_id[-5:] @staticmethod def _get_volumegroup_snapshot_name(group_snapshot, grp_snapshot_id=None): group_snapshot_id = ( group_snapshot.id if group_snapshot else grp_snapshot_id) vg_snapshot = storwize_const.VG_SNAPSHOT_PREFIX return vg_snapshot + group_snapshot_id # Add CG capability to generic volume groups def create_group(self, context, group): # noqa: C901 """Creates a group. :param context: the context of the caller. :param group: the group object. :returns: model_update """ LOG.debug("Creating group.") model_update = {'status': fields.GroupStatus.AVAILABLE} support_grps = ['group_snapshot_enabled', 'consistent_group_snapshot_enabled', 'consistent_group_replication_enabled', 'hyperswap_group_enabled', 'volume_group_enabled', 'temporary_volume_group_enabled'] support_grp_clone_specs = ['clone_type'] group_type = objects.GroupType.get_by_id(context, group.group_type_id) all_group_specs = group_type.group_specs # Check if group_specs are supported for group_spec in all_group_specs: if (group_spec not in support_grps and group_spec not in support_grp_clone_specs): LOG.error('Unable to create group: %s is not a supported ' 'group type.', group.group_type_id) model_update = {'status': fields.GroupStatus.ERROR} return model_update # Check if a non-clone group_spec is passed among supported group types # and only one non-clone group_spec is passed count_group_type_specs = 0 for group_spec in all_group_specs: if group_spec in support_grps: count_group_type_specs += 1 if count_group_type_specs == 0: LOG.error('Unable to create group: ' 'No supported group type provided') model_update = {'status': fields.GroupStatus.ERROR} return model_update if count_group_type_specs > 1: LOG.error('Unable to create group: create group with mixed specs ' 'is not supported.') model_update = {'status': fields.GroupStatus.ERROR} return model_update if (volume_utils.is_group_a_cg_snapshot_type(group) or volume_utils.is_group_a_type(group, "group_snapshot_enabled")): for vol_type_id in group.volume_type_ids: replication_type = self._get_volume_replicated_type( context, None, vol_type_id) if replication_type: # An unsupported configuration LOG.error('Unable to create group: create consistent ' 'snapshot group with replication volume type is ' 'not supported.') model_update = {'status': fields.GroupStatus.ERROR} return model_update opts = self._get_vdisk_params(vol_type_id) if opts['volume_topology']: # An unsupported configuration LOG.error('Unable to create group: create consistent ' 'snapshot group with a hyperswap volume type' ' is not supported.') model_update = {'status': fields.GroupStatus.ERROR} return model_update # We'll rely on the generic group implementation if it is # a non-consistent snapshot group. if volume_utils.is_group_a_type(group, "group_snapshot_enabled"): raise NotImplementedError() if volume_utils.is_group_a_type( group, "consistent_group_replication_enabled"): self._validate_replication_enabled() rccg_type = None for vol_type_id in group.volume_type_ids: replication_type = self._get_volume_replicated_type( context, None, vol_type_id) if not replication_type: # An unsupported configuration LOG.error('Unable to create group: create consistent ' 'replication group with non-replication volume' ' type is not supported.') model_update = {'status': fields.GroupStatus.ERROR} return model_update if not rccg_type: rccg_type = replication_type elif rccg_type != replication_type: # An unsupported configuration LOG.error('Unable to create group: create consistent ' 'replication group with different replication ' 'types is not supported.') model_update = {'status': fields.GroupStatus.ERROR} return model_update rccg_name = self._get_rccg_name(group) try: tgt_sys = self._aux_backend_helpers.get_system_info() self._helpers.create_rccg( rccg_name, tgt_sys.get('system_id')) model_update.update({'replication_status': fields.ReplicationStatus.ENABLED}) except exception.VolumeBackendAPIException as err: LOG.error("Failed to create rccg %(rccg)s. " "Exception: %(exception)s.", {'rccg': rccg_name, 'exception': err}) model_update = {'status': fields.GroupStatus.ERROR} return model_update if volume_utils.is_group_a_type(group, "hyperswap_group_enabled"): if not self._helpers.is_system_topology_hyperswap(self._state): LOG.error('Unable to create group: create group on ' 'a system that does not support hyperswap.') model_update = {'status': fields.GroupStatus.ERROR} for vol_type_id in group.volume_type_ids: opts = self._get_vdisk_params(vol_type_id) if not opts['volume_topology']: # An unsupported configuration LOG.error('Unable to create group: create consistent ' 'hyperswap group with non-hyperswap volume' ' type is not supported.') model_update = {'status': fields.GroupStatus.ERROR} return model_update if ( volume_utils.is_group_a_type(group, "volume_group_enabled") or volume_utils.is_group_a_type( group, "temporary_volume_group_enabled")): try: if volume_utils.is_group_a_type( group, "temporary_volume_group_enabled"): self._helpers.check_codelevel_for_temp_volumegroup( self._state['code_level']) else: self._helpers.check_codelevel_for_volumegroup( self._state['code_level']) for vol_type_id in group.volume_type_ids: replication_type = self._get_volume_replicated_type( context, None, vol_type_id) if replication_type: # An unsupported configuration LOG.error('Unable to create group: ' 'volume_group_enabled or ' 'temporary_volume_group_enabled ' 'group with ' 'replication volume type is ' 'not supported.') model_update = {'status': fields.GroupStatus.ERROR} return model_update opts = self._get_vdisk_params(vol_type_id) if opts['volume_topology']: # An unsupported configuration LOG.error('Unable to create group: ' 'volume_group_enabled or ' 'temporary_volume_group_enabled ' 'group with a ' 'hyperswap volume type is ' 'not supported.') model_update = {'status': fields.GroupStatus.ERROR} return model_update if volume_utils.is_group_a_type( group, "temporary_volume_group_enabled"): return model_update volumegroup_name = self._get_volumegroup_name(group) self._helpers.create_volumegroup(volumegroup_name) except exception.VolumeBackendAPIException as err: LOG.error("Failed to create volume group %(volumegroup)s. " "Exception: %(exception)s.", {'volumegroup': volumegroup_name, 'exception': err}) model_update = {'status': fields.GroupStatus.ERROR} return model_update return model_update def delete_group(self, context, group, volumes): """Deletes a group. :param context: the context of the caller. :param group: the group object. :param volumes: a list of volume objects in the group. :returns: model_update, volumes_model_update """ LOG.debug("Deleting group.") # we'll rely on the generic group implementation if it is # not a consistency group and not a consistency replication # request and not a hyperswap group request. if (not volume_utils.is_group_a_cg_snapshot_type(group) and not volume_utils.is_group_a_type( group, "consistent_group_replication_enabled") and not volume_utils.is_group_a_type( group, "hyperswap_group_enabled") and not volume_utils.is_group_a_type( group, "volume_group_enabled") and not volume_utils.is_group_a_type( group, "temporary_volume_group_enabled")): raise NotImplementedError() model_update = {'status': fields.GroupStatus.DELETED} volumes_model_update = [] if volume_utils.is_group_a_type( group, "consistent_group_replication_enabled"): model_update, volumes_model_update = self._delete_replication_grp( group, volumes) elif volume_utils.is_group_a_type(group, "hyperswap_group_enabled"): model_update, volumes_model_update = self._delete_hyperswap_grp( group, volumes) elif volume_utils.is_group_a_type(group, "volume_group_enabled"): self._helpers.check_codelevel_for_volumegroup( self._state['code_level']) model_update, volumes_model_update = self._delete_volumegroup( group, volumes) elif volume_utils.is_group_a_type( group, "temporary_volume_group_enabled"): self._helpers.check_codelevel_for_temp_volumegroup( self._state['code_level']) model_update, volumes_model_update = self._delete_volumegroup( group, volumes) else: for volume in volumes: try: self._helpers.delete_vdisk( volume['name'], force_unmap=False, force_delete=True) volumes_model_update.append( {'id': volume.id, 'status': 'deleted'}) except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.GroupStatus.ERROR_DELETING) LOG.error("Failed to delete the volume %(vol)s of CG. " "Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) volumes_model_update.append( {'id': volume.id, 'status': fields.GroupStatus.ERROR_DELETING}) return model_update, volumes_model_update def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group. :param context: the context of the caller. :param group: the group object. :param add_volumes: a list of volume objects to be added. :param remove_volumes: a list of volume objects to be removed. :returns: model_update, add_volumes_update, remove_volumes_update """ LOG.debug("Updating group.") # we'll rely on the generic group implementation if it is not a # consistency group request and not consistency replication request # and not a hyperswap group request. if (not volume_utils.is_group_a_cg_snapshot_type(group) and not volume_utils.is_group_a_type( group, "consistent_group_replication_enabled") and not volume_utils.is_group_a_type( group, "hyperswap_group_enabled") and not volume_utils.is_group_a_type( group, "volume_group_enabled") and not volume_utils.is_group_a_type( group, "temporary_volume_group_enabled")): raise NotImplementedError() if volume_utils.is_group_a_type( group, "consistent_group_replication_enabled"): return self._update_replication_grp(context, group, add_volumes, remove_volumes) if volume_utils.is_group_a_type(group, "hyperswap_group_enabled"): return self._update_hyperswap_group(context, group, add_volumes, remove_volumes) if volume_utils.is_group_a_cg_snapshot_type(group): return None, None, None migrate_from_flashcopy = self._get_config_param_value( 'migrate_from_flashcopy', None) if volume_utils.is_group_a_type(group, "volume_group_enabled"): self._helpers.check_codelevel_for_volumegroup( self._state['code_level']) return self._update_volumegroup(context, group, add_volumes, remove_volumes, migrate_from_flashcopy) if volume_utils.is_group_a_type( group, "temporary_volume_group_enabled"): self._helpers.check_codelevel_for_temp_volumegroup( self._state['code_level']) return self._update_temporary_volumegroup(context, group, add_volumes, remove_volumes, migrate_from_flashcopy) def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param context: the context of the caller. :param group: the Group object to be created. :param volumes: a list of Volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of snapshot objects in group_snapshot. :param source_group: the Group object as source. :param source_vols: a list of volume objects in the source_group. :returns: model_update, volumes_model_update """ LOG.debug('Enter: create_group_from_src.') is_hyper_group = False if volume_utils.is_group_a_type(group, "hyperswap_group_enabled"): is_hyper_group = True if (not volume_utils.is_group_a_cg_snapshot_type(group) and not volume_utils.is_group_a_type (group, "consistent_group_replication_enabled") and not volume_utils.is_group_a_type( group, "hyperswap_group_enabled")): # we'll rely on the generic volume groups implementation if it is # not a consistency group request. raise NotImplementedError() if group_snapshot and snapshots: cg_name = 'cg-' + group_snapshot.id sources = snapshots elif source_group and source_vols: cg_name = 'cg-' + source_group.id sources = source_vols else: error_msg = _("create_group_from_src must be creating from a " "group snapshot, or a source group.") raise exception.InvalidInput(reason=error_msg) LOG.debug('create_group_from_src: cg_name %(cg_name)s' ' %(sources)s', {'cg_name': cg_name, 'sources': sources}) self._helpers.create_fc_consistgrp(cg_name) timeout = self.configuration.storwize_svc_flashcopy_timeout model_update, volumes_model = ( self._helpers.create_cg_from_source(group, cg_name, sources, volumes, self._state, self.configuration, timeout)) if volume_utils.is_group_a_type( group, "consistent_group_replication_enabled"): self._validate_replication_enabled() rccg_name = self._get_rccg_name(group) try: tgt_sys = self._aux_backend_helpers.get_system_info() self._helpers.create_rccg(rccg_name, tgt_sys.get('system_id')) model_update.update({'replication_status': fields.ReplicationStatus.ENABLED}) except exception.VolumeBackendAPIException as err: LOG.error("Failed to create rccg %(rccg)s. " "Exception: %(exception)s.", {'rccg': rccg_name, 'exception': err}) model_update = {'status': fields.GroupStatus.ERROR} for vol in volumes: rep_type = self._get_volume_replicated_type(context, vol) volume_model = dict() for model in volumes_model: if vol.id == model["id"]: volume_model = model break if rep_type: replica_obj = self._get_replica_obj(rep_type) replica_obj.volume_replication_setup(context, vol) volume_model['replication_status'] = ( fields.ReplicationStatus.ENABLED) # Updating replication properties for a volume with replication # enabled. self._update_replication_properties(context, vol, volume_model) opts = self._get_vdisk_params(vol['volume_type_id'], volume_metadata= vol.get('volume_metadata')) if opts['qos']: # Updating QoS properties for a volume self._helpers.add_vdisk_qos(vol['name'], opts['qos'], vol['size']) self._qos_model_update(volume_model, vol) if is_hyper_group: self._helpers.ensure_vdisk_no_fc_mappings(vol['name'], allow_snaps=True, allow_fctgt=False) opts = self._get_vdisk_params(vol['volume_type_id'], volume_metadata= vol.get('volume_metadata')) self._helpers.convert_volume_to_hyperswap(vol['name'], opts, self._state) if volume_utils.is_group_a_type( group, "consistent_group_replication_enabled"): model_update, added_vols, removed_vols = ( self._update_replication_grp(context, group, volumes, [])) if model_update.get('status') != fields.GroupStatus.ERROR: # Updating RCCG property to volume metadata for model in volumes_model: model['metadata']['Consistency Group Name'] = rccg_name LOG.debug("Leave: create_group_from_src.") return model_update, volumes_model def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be created. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ if (volume_utils.is_group_a_cg_snapshot_type(group_snapshot) or volume_utils.is_group_a_type (group_snapshot, "consistent_group_replication_enabled") or volume_utils.is_group_a_type( group_snapshot, "hyperswap_group_enabled")): # Use group_snapshot id as cg name cg_name = 'cg_snap-' + group_snapshot.id # Create new cg as cg_snapshot self._helpers.create_fc_consistgrp(cg_name) timeout = self.configuration.storwize_svc_flashcopy_timeout model_update, snapshots_model = ( self._helpers.run_consistgrp_snapshots(cg_name, snapshots, self._state, self.configuration, timeout)) elif volume_utils.is_group_a_type( group_snapshot, "volume_group_enabled"): try: self._helpers.check_codelevel_for_volumegroup( self._state['code_level']) params = dict() # Use group_snapshot id as volumegroup name volumegroup_snapshot_name = ( self._get_volumegroup_snapshot_name(group_snapshot)) params["name"] = volumegroup_snapshot_name volumegroup_name = self._get_volumegroup_name( None, grp_id=group_snapshot.group_id) params["volumegroup"] = volumegroup_name model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE} snapshots_model = [] self._helpers.create_volumegroup_snapshot(params) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.GroupSnapshotStatus.ERROR LOG.error("Failed to create VolumeGroup Snapshot. " "Exception: %s.", err) for snapshot in snapshots: self._update_volumegroup_snapshot_properties( context, snapshot, group_snapshot) snapshots_model.append( {'id': snapshot['id'], 'status': model_update['status'], 'replication_status': fields.ReplicationStatus.NOT_CAPABLE }) else: # we'll rely on the generic group implementation if it is not a # consistency group/volumegroup request. raise NotImplementedError() return model_update, snapshots_model def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be deleted. :param snapshots: a list of snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ if (volume_utils.is_group_a_cg_snapshot_type(group_snapshot) or volume_utils.is_group_a_type( group_snapshot, "hyperswap_group_enabled")): cgsnapshot_id = group_snapshot.id cg_name = 'cg_snap-' + cgsnapshot_id model_update, snapshots_model = ( self._helpers.delete_consistgrp_snapshots(cg_name, snapshots)) elif volume_utils.is_group_a_type( group_snapshot, "volume_group_enabled"): try: self._helpers.check_codelevel_for_volumegroup( self._state['code_level']) params = dict() volumegroup_snapshot_name = ( self._get_volumegroup_snapshot_name(group_snapshot)) params["name"] = volumegroup_snapshot_name volumegroup_name = self._get_volumegroup_name( None, grp_id=group_snapshot.group_id) params["volumegroup"] = volumegroup_name model_update = {'status': fields.GroupSnapshotStatus.DELETED} snapshots_model = [] self._helpers.delete_volumegroup_snapshot(params) for snapshot in snapshots: self._update_volumegroup_snapshot_properties( context, snapshot) snapshots_model.append( {'id': snapshot['id'], 'status': fields.GroupSnapshotStatus.DELETED}) except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.GroupSnapshotStatus.ERROR_DELETING) for snapshot in snapshots: snapshots_model.append( {'id': snapshot['id'], 'status': fields.GroupSnapshotStatus.ERROR_DELETING}) LOG.error("Failed to delete the volume_group_snapshot %(snap) " "with Exception: %(exception)s.", {'snap': group_snapshot.group_id, 'exception': err}) else: # we'll rely on the generic group implementation if it is not a # consistency group/volumegroup request. raise NotImplementedError() return model_update, snapshots_model @volume_utils.trace def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot.""" if snapshot.volume_size != volume.size: raise exception.InvalidInput( reason=_('Reverting volume is not supported if the volume ' 'size is not equal to the snapshot size.')) rep_type = self._get_volume_replicated_type(context, volume) if rep_type: try: rccg_name = self._helpers.get_rccg_name_by_volume_name( volume.name) if rccg_name: self._helpers.stop_rccg(rccg_name, access=False) else: self._helpers.stop_relationship(volume.name, access=False) except Exception as err: msg = (_("Stop RC or rccg relationship has failed for %(vol)s " "due to: %(err)s.") % {"vol": volume.name, "err": err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: self._helpers.pretreatment_before_revert(volume.name) except Exception as err: msg = (_("Pretreatment before revert volume %(vol)s to snapshot " "%(snap)s failed due to: %(err)s.") % {"vol": volume.name, "snap": snapshot.name, "err": err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) opts = self._get_vdisk_params(volume.volume_type_id) try: self._helpers.run_flashcopy( snapshot.name, volume.name, self.configuration.storwize_svc_flashcopy_timeout, opts['flashcopy_rate'], opts['clean_rate'], True, True) if rep_type: if rccg_name: self._helpers.start_rccg(rccg_name, primary=None) else: self._helpers.start_relationship(volume.name, primary=None) except Exception as err: msg = (_("Reverting volume %(vol)s to snapshot %(snap)s failed " "due to: %(err)s.") % {"vol": volume.name, "snap": snapshot.name, "err": err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_pool(self, volume): attr = self._helpers.get_vdisk_attributes(volume['name']) if attr is None: msg = (_('get_pool: Failed to get attributes for volume ' '%s') % volume['name']) LOG.error(msg) raise exception.VolumeDriverException(message=msg) return attr['mdisk_grp_name'] def get_hyperswap_storage_state(self): storage_state = fields.ReplicationStatus.ENABLED disabled_reason = None site_node_info = {} site_node_down_info = {} storage_nodes = self._helpers.get_node_info(online_node=False) for node, node_info in storage_nodes.items(): if node_info['site_id']: site = node_info['site_id'] if site not in site_node_info: site_node_info[site] = [] site_node_down_info[site] = {'nodes_down': 0} site_node_info[site].append(node_info) if node_info['status'] not in ['online', 'degraded']: site_node_down_info[site]['nodes_down'] += 1 for site, site_info in site_node_down_info.items(): if len(site_node_info[site]) == site_info['nodes_down']: storage_state = fields.ReplicationStatus.DISABLED site_name = site_node_info[site][0]['site_name'] disabled_reason = "{0} is down".format(site_name) break return storage_state, disabled_reason def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") data = {} data['vendor_name'] = 'IBM' data['driver_version'] = self.VERSION data['storage_protocol'] = self.protocol data['pools'] = [] backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = (backend_name or self._state['system_name']) data['pools'] = [self._build_pool_stats(pool) for pool in self._get_backend_pools()] if self._helpers.is_system_topology_hyperswap(self._state): peer_pool = self._get_backend_peer_pool() if peer_pool: data['pools'].append(self._build_pool_stats(peer_pool)) if self._replica_enabled: data['replication'] = self._replica_enabled data['replication_enabled'] = self._replica_enabled data['replication_targets'] = self._get_replication_targets() data['consistent_group_replication_enabled'] = True remote_data = dict() remote_data['pools'] = [self._build_pool_stats(pool, target=True) for pool in [self._replica_target.get('pool_name')]] self._aux_backend_helpers.stats = remote_data if self._helpers.is_system_topology_hyperswap(self._state): data['replication_enabled'] = True try: state, reason = self.get_hyperswap_storage_state() if state != fields.ReplicationStatus.ENABLED: data['replication_enabled'] = False data['disabled_reason'] = reason except exception.VolumeBackendAPIException as exc: LOG.warning("Failed to get node info. " "Exception: %(ex)s.", {'ex': exc.msg}) self._stats = data def _build_pool_stats(self, pool, target=False): """Build pool status""" QoS_support = True pool_stats = {} is_dr_pool = False if target: pool_data = self._aux_backend_helpers.get_pool_attrs(pool) system_id = self._aux_state['system_id'] compression_enabled = self._aux_state['compression_enabled'] else: pool_data = self._helpers.get_pool_attrs(pool) system_id = self._state['system_id'] compression_enabled = self._state['compression_enabled'] if pool_data: easy_tier = pool_data['easy_tier'] in ['on', 'auto'] total_capacity_gb = float(pool_data['capacity']) / units.Gi free_capacity_gb = float(pool_data['free_capacity']) / units.Gi provisioned_capacity_gb = float( pool_data['virtual_capacity']) / units.Gi rsize = self.configuration.safe_get( 'storwize_svc_vol_rsize') # rsize of -1 or 100 means fully allocate the mdisk use_thick_provisioning = rsize == -1 or rsize == 100 over_sub_ratio = self.configuration.safe_get( 'max_over_subscription_ratio') location_info = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' % {'sys_id': system_id, 'pool': pool_data['name']}) multiattach = (self.configuration. storwize_svc_multihostmap_enabled) backend_state = ('up' if pool_data['status'] == 'online' else 'down') # Get the data_reduction information for pool and set # is_dr_pool flag. if pool_data.get('data_reduction'): is_dr_pool = pool_data.get('data_reduction').lower() == 'yes' pool_stats = { 'pool_name': pool_data['name'], 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'provisioned_capacity_gb': provisioned_capacity_gb, 'compression_support': compression_enabled, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': QoS_support, 'consistencygroup_support': True, 'location_info': location_info, 'easytier_support': easy_tier, 'multiattach': multiattach, 'thin_provisioning_support': not use_thick_provisioning, 'thick_provisioning_support': use_thick_provisioning, 'max_over_subscription_ratio': over_sub_ratio, 'consistent_group_snapshot_enabled': True, 'backend_state': backend_state, 'data_reduction': is_dr_pool, 'site_id': pool_data['site_id'], 'site_name': pool_data['site_name'], } if self._replica_enabled: pool_stats.update({ 'replication_enabled': self._replica_enabled, 'replication_type': self._supported_replica_types, 'replication_targets': self._get_replication_targets(), 'replication_count': len(self._get_replication_targets()), 'consistent_group_replication_enabled': True }) else: LOG.error('Failed getting details for pool %s.', pool) pool_stats = {'pool_name': pool, 'total_capacity_gb': 0, 'free_capacity_gb': 0, 'provisioned_capacity_gb': 0, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'max_over_subscription_ratio': 0, 'reserved_percentage': 0, 'data_reduction': is_dr_pool, 'site_id': None, 'site_name': None, 'backend_state': 'down'} return pool_stats def _get_replication_targets(self): return [self._replica_target['backend_id']] def _manage_input_check(self, ref): """Verify the input of manage function.""" # Check that the reference is valid if 'source-name' in ref: manage_source = ref['source-name'] vdisk = self._helpers.get_vdisk_attributes(manage_source) elif 'source-id' in ref: manage_source = ref['source-id'] vdisk = self._helpers.vdisk_by_uid(manage_source) else: reason = _('Reference must contain source-id or ' 'source-name element.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) if vdisk is None: reason = (_('No vdisk with the UID specified by ref %s.') % manage_source) raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) return vdisk def _delete_replication_grp(self, group, volumes): if self._state['code_level'] < (7, 7, 0, 0): force_unmap = False else: force_unmap = True model_update = {'status': fields.GroupStatus.DELETED} volumes_model_update = [] rccg_name = self._get_rccg_name(group) try: self._helpers.delete_rccg(rccg_name) except exception.VolumeBackendAPIException as err: LOG.error("Failed to delete rccg %(rccg)s. " "Exception: %(exception)s.", {'rccg': rccg_name, 'exception': err}) model_update = {'status': fields.GroupStatus.ERROR_DELETING} for volume in volumes: try: target_volume, rel_info = ( self._helpers.get_target_volume_information(volume)) self._aux_backend_helpers.delete_rc_volume( target_volume, rel_info, target_vol=True, force_unmap=force_unmap) self._master_backend_helpers.delete_rc_volume( volume.name, force_unmap=force_unmap) volumes_model_update.append( {'id': volume.id, 'status': 'deleted'}) except exception.VolumeDriverException as err: model_update['status'] = ( fields.GroupStatus.ERROR_DELETING) LOG.error("Failed to delete the volume %(vol)s of CG. " "Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) volumes_model_update.append( {'id': volume.id, 'status': fields.GroupStatus.ERROR_DELETING}) return model_update, volumes_model_update def _update_replication_grp(self, context, group, add_volumes, remove_volumes): model_update = {'status': fields.GroupStatus.AVAILABLE} LOG.info("Update replication group: %(group)s. ", {'group': group.id}) rccg_name = self._get_rccg_name(group) # This code block fails during remove of volumes from group try: rccg = self._helpers.get_rccg(rccg_name) except Exception as ex: if len(add_volumes) > 0: LOG.exception("Unable to retrieve " "replication group information. Failed " "with exception %(ex)s", ex) if not rccg and len(add_volumes) > 0: LOG.error("Failed to update group: %(grp)s does not exist in " "backend.", {'grp': group.id}) model_update['status'] = fields.GroupStatus.ERROR return model_update, None, None # Add remote copy relationship to rccg added_vols = [] for volume in add_volumes: try: vol_name = (volume.name if not self._active_backend_id else storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name) rcrel = self._helpers.get_relationship_info(vol_name) if not rcrel: LOG.error("Failed to update group: remote copy " "relationship of %(vol)s does not exist in " "backend.", {'vol': volume.id}) model_update['status'] = fields.GroupStatus.ERROR else: if rccg and rccg.get('cycling_mode', None) == 'multi': self._helpers.stop_relationship(vol_name, rcrel=rcrel['name']) rcrel = self._helpers.get_relationship_info(vol_name) if (rccg['state'] != 'empty' and rccg['state'] != 'consistent_stopped' or rccg['state'] != 'inconsistent_stopped'): self._helpers.stop_rccg(rccg_name) # To handle existing group updation, refresh rccg # state to avoid unnecessary stop_rccg calls. rccg = self._helpers.get_rccg(rccg_name) if (rccg['copy_type'] != 'empty_group' and any(k for k in ('copy_type', 'state', 'primary', 'cycling_mode', 'cycle_period_seconds') if rccg[k] != rcrel[k])): LOG.error("Failed to update rccg %(rccg)s: remote " "copy type of %(vol)s is %(vol_rc_type)s, " "the rccg type is %(rccg_type)s. rcrel " "state %(rcrel_state)s, rccg state is " "%(rccg_state)s rcrel primary is " "%(rcrel_primary)s, rccg primary is " "%(rccg_primary)s. rcrel cycling mode is " "%(rcrel_cmode)s, rccg cycling mode is " "%(rccg_cmode)s. rcrel cycling period is " "%(rcrel_period)s, rccg cycling " "period is %(rccg_period)s. ", {'rccg': rccg_name, 'vol': volume.id, 'vol_rc_type': rcrel['copy_type'], 'rccg_type': rccg['copy_type'], 'rcrel_state': rcrel['state'], 'rccg_state': rccg['state'], 'rcrel_primary': rcrel['primary'], 'rccg_primary': rccg['primary'], 'rcrel_cmode': rcrel['cycling_mode'], 'rccg_cmode': rccg['cycling_mode'], 'rcrel_period': rcrel['cycle_period_seconds'], 'rccg_period': rccg['cycle_period_seconds']}) # This rcrel updation failed ,it has to be started # explicitly. self._helpers.start_relationship(vol_name, rcrel=rcrel['name']) model_update['status'] = fields.GroupStatus.ERROR else: self._helpers.chrcrelationship(rcrel['name'], rccg_name) if rccg['copy_type'] == 'empty_group': rccg = self._helpers.get_rccg(rccg_name) added_vols.append({'id': volume.id, 'group_id': group.id}) # Updating RCCG properties for a volume self._update_rccg_properties(context, volume, group) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.GroupStatus.ERROR LOG.error("Failed to add the remote copy of volume %(vol)s to " "group. Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) self._helpers.start_relationship(vol_name) if (rccg and len(add_volumes) > 0 and rccg.get('cycling_mode', None) == 'multi'): if rccg.get('primary', None) == 'aux': self._helpers.start_rccg(rccg_name, primary='aux') elif rccg.get('primary', None) == 'master': self._helpers.start_rccg(rccg_name, primary='master') # Remove remote copy relationship from rccg removed_vols = [] for volume in remove_volumes: try: vol_name = (volume.name if not self._active_backend_id else storwize_const.REPLICA_AUX_VOL_PREFIX + volume.name) rcrel = self._helpers.get_relationship_info(vol_name) if not rcrel: LOG.error("Failed to update group: remote copy " "relationship of %(vol)s does not exist in " "backend.", {'vol': volume.id}) model_update['status'] = fields.GroupStatus.ERROR else: self._helpers.chrcrelationship(rcrel['name']) removed_vols.append({'id': volume.id, 'group_id': None}) # Updating RCCG properties for a volume self._update_rccg_properties(context, volume) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.GroupStatus.ERROR LOG.error("Failed to remove the remote copy of volume %(vol)s " "from group. Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) return model_update, added_vols, removed_vols def _delete_volumegroup(self, group, volumes): model_update = {'status': fields.GroupStatus.DELETED} volumes_model_update = [] force_unmap = True if self._state['code_level'] < (7, 7, 0, 0): force_unmap = False for volume in volumes: volume = self._helpers.get_volume_name_from_metadata(volume) if self._active_backend_id: msg = (_('Error: deleting non-replicated volume in ' 'failover mode is not allowed.')) LOG.error(msg) volume.name_id = None raise exception.VolumeDriverException(message=msg) else: try: self._helpers.delete_vdisk( volume.name, force_unmap=force_unmap, force_delete=True) volumes_model_update.append({'id': volume.id, 'status': 'deleted'}) except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.GroupStatus.ERROR_DELETING) LOG.error("Failed to delete the volume %(vol)s of CG. " "Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) volume.name_id = None volumes_model_update.append( {'id': volume.id, 'status': fields.GroupStatus.ERROR_DELETING}) volume.name_id = None if volume_utils.is_group_a_type( group, "temporary_volume_group_enabled"): return model_update, volumes_model_update else: volumegroup_name = self._get_volumegroup_name(group) try: self._helpers.delete_volumegroup(volumegroup_name) except exception.VolumeBackendAPIException as err: LOG.error("Failed to delete volume group %(volumegroup)s. " "Exception: %(exception)s.", {'volumegroup': volumegroup_name, 'exception': err}) model_update = {'status': fields.GroupStatus.ERROR_DELETING} return model_update, volumes_model_update def _update_volumegroup(self, context, group, add_volumes, remove_volumes, migrate_from_flashcopy): model_update = {'status': fields.GroupStatus.AVAILABLE} LOG.info("Update volume group: %(volumegroup_id)s. ", {'volumegroup_id': group.id}) volumegroup_name = self._get_volumegroup_name(group) # This code block fails during remove of volumes from group try: volumegroup = self._helpers.get_volumegroup(volumegroup_name) volumegroup_id = volumegroup["id"] except Exception as ex: if len(add_volumes) > 0: LOG.exception("Unable to retrieve volume group " "information. Failed with exception " "%(ex)s", ex) if not volumegroup and len(add_volumes) > 0: LOG.error("Failed to update group: %(volumegroup)s does not " "exist in backend.", {'volumegroup': volumegroup_name}) model_update['status'] = fields.GroupStatus.ERROR return model_update, None, None # Add volume(s) to the volume group added_vols = [] if not migrate_from_flashcopy: for volume in add_volumes: vol_name = volume.name try: if self._helpers._get_vdisk_fc_mappings(volume.name): reason = (_("Adding volume %(vol)s failed because " "it has legacy FlashCopy mappings and " "migrate_from_flashcopy flag is set to " "False. ") % {'vol': volume.name}) model_update['status'] = fields.GroupStatus.ERROR raise exception.InvalidInput(reason=reason) self._helpers.add_vdisk_to_volumegroup(vol_name, volumegroup_id) added_vols.append({'id': volume.id, 'group_id': group.id}) self._update_volumegroup_properties(context, volume, group) except exception.InvalidInput as err: LOG.error("Failed to add the volumes to " "the group. Exception: %(exception)s.", {'exception': err}) else: for volume in add_volumes: vol_name = volume.name try: self._helpers.add_vdisk_to_volumegroup(vol_name, volumegroup_id) added_vols.append({'id': volume.id, 'group_id': group.id}) self._update_volumegroup_properties(context, volume, group) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.GroupStatus.ERROR LOG.error("Failed to add the volume %(vol)s to " "group. Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) # Remove volume(s) from the volume group removed_vols = [] for volume in remove_volumes: vol_name = volume.name try: self._helpers.remove_vdisk_from_volumegroup(vol_name) removed_vols.append({'id': volume.id, 'group_id': None}) self._update_volumegroup_properties(context, volume) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.GroupStatus.ERROR LOG.error("Failed to remove the volume %(vol)s from " "group. Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) return model_update, added_vols, removed_vols def _update_temporary_volumegroup(self, context, group, add_volumes, remove_volumes, migrate_from_flashcopy): model_update = {'status': fields.GroupStatus.AVAILABLE} # Check if volume is allowed to be added added_vols = [] if not migrate_from_flashcopy: for volume in add_volumes: try: if self._helpers._get_vdisk_fc_mappings(volume.name): reason = (_("Adding volume %(vol)s failed because " "it has legacy FlashCopy mappings and " "migrate_from_flashcopy flag is set to " "False. ") % {'vol': volume.name}) model_update['status'] = fields.GroupStatus.ERROR raise exception.InvalidInput(reason=reason) added_vols.append({'id': volume.id, 'group_id': group.id}) except exception.InvalidInput as err: LOG.error("Failed to add the volume %(vol)s to " "group. Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) else: for volume in add_volumes: added_vols.append({'id': volume.id, 'group_id': group.id}) removed_vols = [] for volume in remove_volumes: removed_vols.append({'id': volume.id, 'group_id': None}) return model_update, added_vols, removed_vols def _delete_hyperswap_grp(self, group, volumes): model_update = {'status': fields.GroupStatus.DELETED} volumes_model_update = [] for volume in volumes: try: self._helpers.delete_hyperswap_volume(volume.name, force_unmap=False, force_delete=True) volumes_model_update.append( {'id': volume.id, 'status': 'deleted'}) except exception.VolumeDriverException as err: LOG.error("Failed to delete the volume %(vol)s of CG. " "Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) volumes_model_update.append( {'id': volume.id, 'status': 'error_deleting'}) return model_update, volumes_model_update def _update_hyperswap_group(self, context, group, add_volumes=None, remove_volumes=None): LOG.info("Update hyperswap group: %(group)s. ", {'group': group.id}) model_update = {'status': fields.GroupStatus.AVAILABLE} added_vols = [] for volume in add_volumes: hyper_volume = self.is_volume_hyperswap(volume) if not hyper_volume: LOG.error("Failed to update rccg: the non hyperswap volume" " of %(vol)s can't be added to hyperswap group.", {'vol': volume.id}) model_update['status'] = fields.GroupStatus.ERROR continue added_vols.append({'id': volume.id, 'group_id': group.id}) removed_vols = [] for volume in remove_volumes: hyper_volume = self.is_volume_hyperswap(volume) if not hyper_volume: LOG.error("Failed to update rccg: the non hyperswap volume" " of %(vol)s can't be added to hyperswap group.", {'vol': volume.id}) model_update['status'] = fields.GroupStatus.ERROR continue removed_vols.append({'id': volume.id, 'group_id': None}) return model_update, added_vols, removed_vols def _get_volume_host_site_from_conf(self, volume, connector, iscsi=False): host_site = self.configuration.safe_get('storwize_preferred_host_site') select_site = None if not host_site: LOG.debug('There is no host_site configured for volume %s.', volume.name) return select_site if iscsi: for site, iqn in host_site.items(): if connector['initiator'].lower() in iqn.lower(): if select_site is None: select_site = site elif select_site != site: msg = _('Configured the host IQN in both sites.') LOG.error(msg) raise exception.InvalidConfigurationValue(message=msg) else: for wwpn in connector['wwpns']: for site, wwpn_list in host_site.items(): if wwpn.lower() in wwpn_list.lower(): if select_site is None: select_site = site elif select_site != site: msg = _('Configured the host wwpns not in the' ' same site.') LOG.error(msg) raise exception.InvalidConfigurationValue( message=msg) return select_site def _update_host_site_for_hyperswap_volume(self, host_name, host_site): host_info = self._helpers.ssh.lshost(host=host_name) if not host_info[0]['site_name'] and host_site: self._helpers.update_host(host_name, host_site) elif host_info[0]['site_name']: ref_host_site = host_info[0]['site_name'] if host_site and host_site != ref_host_site: msg = (_('The existing host site is %(ref_host_site)s,' ' but the new host site is %(host_site)s.') % {'ref_host_site': ref_host_site, 'host_site': host_site}) LOG.error(msg) raise exception.InvalidConfigurationValue(message=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py0000664000175000017500000005753400000000000026431 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Volume FC driver for IBM Storwize family and SVC storage systems. Notes: 1. If you specify both a password and a key file, this driver will use the key file only. 2. When using a key file for authentication, it is up to the user or system administrator to store the private key in a safe manner. 3. The defaults for creating volumes are "-rsize 2% -autoexpand -grainsize 256 -warning 0". These can be changed in the configuration file or by using volume types(recommended only for advanced users). Limitations: 1. The driver expects CLI output in English, error messages may be in a localized format. 2. Clones and creating volumes from snapshots, where the source and target are of different sizes, is not supported. """ import collections from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import configuration from cinder.volume.drivers.ibm.storwize_svc import ( storwize_svc_common as storwize_common) from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) storwize_svc_fc_opts = [ cfg.BoolOpt('storwize_svc_multipath_enabled', default=False, help='Connect with multipath (FC only; iSCSI multipath is ' 'controlled by Nova)'), ] CONF = cfg.CONF CONF.register_opts(storwize_svc_fc_opts, group=configuration.SHARED_CONF_GROUP) @interface.volumedriver class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver): """IBM Storwize V7000 and SVC FC volume driver. Version history: .. code-block:: none 1.0 - Initial driver 1.1 - FC support, create_cloned_volume, volume type support, get_volume_stats, minor bug fixes 1.2.0 - Added retype 1.2.1 - Code refactor, improved exception handling 1.2.2 - Fix bug #1274123 (races in host-related functions) 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to lsfabric, clear unused data from connections, ensure matching WWPNs by comparing lower case 1.2.4 - Fix bug #1278035 (async migration/retype) 1.2.5 - Added support for manage_existing (unmanage is inherited) 1.2.6 - Added QoS support in terms of I/O throttling rate 1.3.1 - Added support for volume replication 1.3.2 - Added support for consistency group 1.3.3 - Update driver to use ABC metaclasses 2.0 - Code refactor, split init file and placed shared methods for FC and iSCSI within the StorwizeSVCCommonDriver class 2.0.1 - Added support for multiple pools with model update 2.1 - Added replication V2 support to the global/metro mirror mode 2.1.1 - Update replication to version 2.1 2.2 - Add CG capability to generic volume groups 2.2.1 - Add vdisk mirror/stretch cluster support 2.2.2 - Add npiv support 2.2.3 - Add replication group support 2.2.4 - Add backup snapshots support 2.2.5 - Add hyperswap support 2.2.6 - Add support for host attachment using portsets """ VERSION = "2.2.6" # ThirdPartySystems wiki page CI_WIKI_NAME = "IBM_STORAGE_CI" def __init__(self, *args, **kwargs): super(StorwizeSVCFCDriver, self).__init__(*args, **kwargs) self.protocol = 'FC' self.configuration.append_config_values( storwize_svc_fc_opts) @staticmethod def get_driver_options(): return storwize_common.storwize_svc_opts + storwize_svc_fc_opts def validate_connector(self, connector): """Check connector for at least one enabled FC protocol.""" if 'wwpns' not in connector: LOG.error('The connector does not contain the required ' 'information.') raise exception.InvalidConnectorException( missing='wwpns') def initialize_connection_snapshot(self, snapshot, connector): """Perform attach snapshot for backup snapshots.""" # If the snapshot's source volume is a replication volume and the # replication volume has failed over to aux_backend, # attach the snapshot will be failed. self._check_snapshot_replica_volume_status(snapshot) vol_attrs = ['id', 'name', 'volume_type_id', 'display_name'] Volume = collections.namedtuple('Volume', vol_attrs) volume = Volume(id=snapshot.id, name=snapshot.name, volume_type_id=snapshot.volume_type_id, display_name='backup-snapshot') return self.initialize_connection(volume, connector) def initialize_connection(self, volume, connector): """Perform necessary work to make a FC connection.""" @coordination.synchronized('storwize-host-{system_id}-{host}') def _do_initialize_connection_locked(system_id, host): conn_info = self._do_initialize_connection(volume, connector) fczm_utils.add_fc_zone(conn_info) return conn_info return _do_initialize_connection_locked(self._state['system_id'], connector['host']) def _do_initialize_connection(self, volume, connector): """Perform necessary work to make a FC connection. To be able to create an FC connection from a given host to a volume, we must: 1. Translate the given WWNN to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug('enter: initialize_connection: volume %(vol)s with connector' ' %(conn)s', {'vol': volume.id, 'conn': connector}) if volume.display_name == 'backup-snapshot': LOG.debug('It is a virtual volume %(vol)s for attach snapshot.', {'vol': volume.id}) volume_name = volume.name backend_helper = self._helpers node_state = self._state else: volume_name, backend_helper, node_state = self._get_vol_sys_info( volume) host_site = None backend_helper.initialize_host_info() is_hyper_volume = self.is_volume_hyperswap(volume) if is_hyper_volume: host_site = self._get_volume_host_site_from_conf(volume, connector) # The host_site is necessary for hyperswap volume. if is_hyper_volume and host_site is None: msg = (_('There is no correct storwize_preferred_host_site ' 'configured for a hyperswap volume %s.') % volume.name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) # Try creating the host, if host creation is successfull continue # with intialization flow, else search for host object defined for # this connector info. host_name = None try: opts = self._get_vdisk_params(volume.volume_type_id) host_name = ( backend_helper.create_host(connector, site=host_site, portset=opts['storwize_portset'])) except exception.VolumeBackendAPIException as excp: if "CMMVC6035E" in excp.msg: msg = (_('Host already exists for connector ' '%(conn)s'), {'conn': connector}) LOG.info(msg) host_name = backend_helper.get_host_from_connector(connector) else: msg = (_('Error creating host %(ex)s'), {'ex': excp.msg}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if is_hyper_volume: self._update_host_site_for_hyperswap_volume(host_name, host_site) volume_attributes = backend_helper.get_vdisk_attributes(volume_name) if volume_attributes is None: msg = (_('initialize_connection: Failed to get attributes' ' for volume %s.') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) multihostmap = self.configuration.storwize_svc_multihostmap_enabled lun_id = backend_helper.map_vol_to_host(volume_name, host_name, multihostmap) try: preferred_node = volume_attributes['preferred_node_id'] IO_group = volume_attributes['IO_group_id'] except KeyError as e: LOG.error('Did not find expected column name in ' 'lsvdisk: %s.', e) raise exception.VolumeBackendAPIException( data=_('initialize_connection: Missing volume attribute for ' 'volume %s.') % volume_name) try: # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] for node in node_state['storage_nodes'].values(): if node['id'] == preferred_node: preferred_node_entry = node if node['IO_group'] == IO_group: io_group_nodes.append(node) if not len(io_group_nodes): msg = (_('initialize_connection: No node found in ' 'I/O group %(gid)s for volume %(vol)s.') % {'gid': IO_group, 'vol': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warning('initialize_connection: Did not find a ' 'preferred node for volume %s.', volume_name) properties = {} properties['target_discovered'] = False properties['target_lun'] = lun_id properties['volume_id'] = volume.id conn_wwpns = backend_helper.get_conn_fc_wwpns(host_name) # If conn_wwpns is empty, then that means that there were # no target ports with visibility to any of the initiators # so we return all target ports. if len(conn_wwpns) == 0: for node in node_state['storage_nodes'].values(): # The Storwize/svc release 7.7.0.0 introduced NPIV feature, # Different commands be used to get the wwpns for host I/O if node_state['code_level'] < (7, 7, 0, 0): conn_wwpns.extend(node['WWPN']) else: npiv_wwpns = backend_helper.get_npiv_wwpns( node_state['code_level'], node_id=node['id'], host_io="yes", portset=opts['storwize_portset']) conn_wwpns.extend(npiv_wwpns) properties['target_wwn'] = conn_wwpns i_t_map = self._make_initiator_target_map(connector['wwpns'], conn_wwpns) properties['initiator_target_map'] = i_t_map # specific for z/VM, refer to cinder bug 1323993 if "zvm_fcp" in connector: properties['zvm_fcp'] = connector['zvm_fcp'] except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error('initialize_connection: Failed to export volume ' '%(vol)s due to %(ex)s.', {'vol': volume.name, 'ex': ex}) self._do_terminate_connection(volume, connector) LOG.error('initialize_connection: Failed ' 'to collect return ' 'properties for volume %(vol)s and connector ' '%(conn)s.\n', {'vol': volume, 'conn': connector}) LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n ' 'connector %(conn)s\n properties: %(prop)s', {'vol': volume.id, 'conn': connector, 'prop': properties}) return {'driver_volume_type': 'fibre_channel', 'data': properties, } def _make_initiator_target_map(self, initiator_wwpns, target_wwpns): """Build a simplistic all-to-all mapping.""" i_t_map = {} for i_wwpn in initiator_wwpns: i_t_map[str(i_wwpn)] = [] for t_wwpn in target_wwpns: i_t_map[i_wwpn].append(t_wwpn) return i_t_map def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Perform detach snapshot for backup snapshots.""" vol_attrs = ['id', 'name', 'display_name'] Volume = collections.namedtuple('Volume', vol_attrs) volume = Volume(id=snapshot.id, name=snapshot.name, display_name='backup-snapshot') return self.terminate_connection(volume, connector, **kwargs) def terminate_connection(self, volume, connector, **kwargs): """Cleanup after an FC connection has been terminated.""" # If a fake connector is generated by nova when the host # is down, then the connector will not have a host property, # In this case construct the lock without the host property # so that all the fake connectors to an SVC are serialized host = connector['host'] if 'host' in connector else "" attachment_count = 0 if hasattr(volume, 'multiattach') and volume.multiattach: try: attachment_list = volume.volume_attachment for attachment in attachment_list: if (attachment.attach_status == "attached" and attachment.attached_host == host): attachment_count += 1 except AttributeError: pass if attachment_count > 1: LOG.debug("Volume %(volume)s is attached to multiple " "instances on host %(host_name)s, " "skip terminate volume connection", {'volume': volume.name, 'host_name': volume.host.split('@')[0]}) return @coordination.synchronized('storwize-host-{system_id}-{host}') def _do_terminate_connection_locked(system_id, host): conn_info = self._do_terminate_connection(volume, connector, **kwargs) fczm_utils.remove_fc_zone(conn_info) return conn_info return _do_terminate_connection_locked(self._state['system_id'], host) def _do_terminate_connection(self, volume, connector, **kwargs): """Cleanup after an FC connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug('enter: terminate_connection: volume %(vol)s with connector' ' %(conn)s', {'vol': volume.id, 'conn': connector}) (info, host_name, vol_name, backend_helper, node_state) = self._get_map_info_from_connector(volume, connector) if not backend_helper: return info # Unmap volumes, if hostname is None, need to get value from vdiskmap host_name = backend_helper.unmap_vol_from_host(vol_name, host_name) # Host_name could be none if host_name: resp = backend_helper.check_host_mapped_vols(host_name) if not len(resp): LOG.info("Need to remove FC Zone, building initiator " "target map.") # Build info data structure for zone removing if 'wwpns' in connector and host_name: target_wwpns = [] # Returning all target_wwpns in storage_nodes, since # we cannot determine which wwpns are logged in during # a VM deletion. for node in node_state['storage_nodes'].values(): target_wwpns.extend(node['WWPN']) init_targ_map = (self._make_initiator_target_map (connector['wwpns'], target_wwpns)) info['data'] = {'initiator_target_map': init_targ_map} # No volume mapped to the host, delete host from array backend_helper.delete_host(host_name) LOG.debug('leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s, info %(info)s', {'vol': volume.id, 'conn': connector, 'info': info}) return info def _get_volume_connection_info(self, ctxt, volume, host_info, iogrp_list): connector = {'wwpns': []} connection_info = {"driver_volume_type": "fibre_channel"} data = {} for wwpn in host_info.select('WWPN'): connector['wwpns'].append(wwpn) vol_name, backend_helper, node_state = self._get_vol_sys_info(volume) data['target_discovered'] = False data['volume_id'] = volume.id conn_wwpns = [] for node in node_state['storage_nodes'].values(): if node['IO_group'] not in iogrp_list: continue # The Storwize/svc release 7.7.0.0 introduced NPIV feature, # Different commands be used to get the wwpns for host I/O if node_state['code_level'] < (7, 7, 0, 0): conn_wwpns.extend(node['WWPN']) else: npivwwpns = ( backend_helper.get_npiv_wwpns(node_state['code_level'], node_id=node['id'], host_io="yes")) conn_wwpns.extend(npivwwpns) i_t_map = self._make_initiator_target_map(connector['wwpns'], conn_wwpns) data["initiator_target_map"] = i_t_map data["target_wwn"] = conn_wwpns connection_info['data'] = data connection_info['connector'] = connector return connection_info def _retype_hyperswap_volume(self, ctxt, volume, host, old_opts, new_opts, old_pool, new_pool, vdisk_changes, need_copy, new_type): if (old_opts['volume_topology'] != 'hyperswap' and new_opts['volume_topology'] == 'hyperswap'): LOG.debug('retype: Convert a normal volume %s to hyperswap ' 'volume.', volume.name) conn_info = {} if volume.previous_status == 'in-use': vdisk_info = self._helpers.ssh.lsvdiskhostmap(volume.name) peer_pool = new_opts['peer_pool'] iogrp_list = self._helpers.get_hyperswap_pool_io_grp( self._state, new_pool, peer_pool) for mapping_info in vdisk_info: host = mapping_info['host_name'] try: host_info = self._helpers.ssh.lshost(host) conn_info[host] = self._get_volume_connection_info( ctxt, volume, host_info, iogrp_list) host_site = self._get_volume_host_site_from_conf( volume, conn_info[host].get('connector')) self._update_host_site_for_hyperswap_volume( host, host_site) self._helpers.ssh.addhostiogrp(host, iogrp_list) except Exception as ex: msg = _('Error updating host %(host)s due to %(ex)s', {'host': host, 'ex': ex}) raise exception.VolumeBackendAPIException(data=msg) self._helpers.convert_volume_to_hyperswap(volume.name, new_opts, self._state) if volume.previous_status == 'in-use': for host, info in conn_info.items(): try: fczm_utils.add_fc_zone(info) except Exception as ex: self._helpers.convert_hyperswap_volume_to_normal( volume.name, new_opts['peer_pool']) msg = _('Zoning failed for volume %(vol)s and host ' '%(host)s due to %(ex)s.', {'vol': volume.name, 'host': host, 'ex': ex}) raise exception.VolumeBackendAPIException(data=msg) elif (old_opts['volume_topology'] == 'hyperswap' and new_opts['volume_topology'] != 'hyperswap'): LOG.debug('retype: Convert a hyperswap volume %s to normal ' 'volume.', volume.name) if new_pool == old_pool: self._helpers.convert_hyperswap_volume_to_normal( volume.name, old_opts['peer_pool']) elif new_pool == old_opts['peer_pool']: self._helpers.convert_hyperswap_volume_to_normal( volume.name, old_pool) if volume.previous_status == 'in-use': vdisk_info = self._helpers.ssh.lsvdiskhostmap(volume.name) for mapping_info in vdisk_info: res = self._helpers.check_host_mapped_vols( mapping_info['host_name']) if len(res) == 1: self._helpers.update_host(mapping_info['host_name'], None) else: rel_info = self._helpers.get_relationship_info(volume.name) aux_vdisk = rel_info['aux_vdisk_name'] if need_copy: self.add_vdisk_copy(aux_vdisk, old_opts['peer_pool'], new_type, auto_delete=True) elif vdisk_changes: self._helpers.change_vdisk_options(aux_vdisk, vdisk_changes, new_opts, self._state) if need_copy: self.add_vdisk_copy(volume.name, old_pool, new_type, auto_delete=True) elif vdisk_changes: self._helpers.change_vdisk_options(volume.name, vdisk_changes, new_opts, self._state) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py0000664000175000017500000005242400000000000027144 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ISCSI volume driver for IBM Storwize family and SVC storage systems. Notes: 1. If you specify both a password and a key file, this driver will use the key file only. 2. When using a key file for authentication, it is up to the user or system administrator to store the private key in a safe manner. 3. The defaults for creating volumes are "-rsize 2% -autoexpand -grainsize 256 -warning 0". These can be changed in the configuration file or by using volume types(recommended only for advanced users). Limitations: 1. The driver expects CLI output in English, error messages may be in a localized format. 2. Clones and creating volumes from snapshots, where the source and target are of different sizes, is not supported. """ import collections from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from cinder.common import constants from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import configuration as conf from cinder.volume.drivers.ibm.storwize_svc import ( storwize_svc_common as storwize_common) LOG = logging.getLogger(__name__) storwize_svc_iscsi_opts = [ cfg.BoolOpt('storwize_svc_iscsi_chap_enabled', default=True, help='Configure CHAP authentication for iSCSI connections ' '(Default: Enabled)'), ] CONF = cfg.CONF CONF.register_opts(storwize_svc_iscsi_opts, group=conf.SHARED_CONF_GROUP) @interface.volumedriver class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver): """IBM Storwize V7000 and SVC iSCSI volume driver. Version history: .. code-block:: none 1.0 - Initial driver 1.1 - FC support, create_cloned_volume, volume type support, get_volume_stats, minor bug fixes 1.2.0 - Added retype 1.2.1 - Code refactor, improved exception handling 1.2.2 - Fix bug #1274123 (races in host-related functions) 1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to lsfabric, clear unused data from connections, ensure matching WWPNs by comparing lower case 1.2.4 - Fix bug #1278035 (async migration/retype) 1.2.5 - Added support for manage_existing (unmanage is inherited) 1.2.6 - Added QoS support in terms of I/O throttling rate 1.3.1 - Added support for volume replication 1.3.2 - Added support for consistency group 1.3.3 - Update driver to use ABC metaclasses 2.0 - Code refactor, split init file and placed shared methods for FC and iSCSI within the StorwizeSVCCommonDriver class 2.0.1 - Added support for multiple pools with model update 2.1 - Added replication V2 support to the global/metro mirror mode 2.1.1 - Update replication to version 2.1 2.2 - Add CG capability to generic volume groups 2.2.1 - Add vdisk mirror/stretch cluster support 2.2.2 - Add replication group support 2.2.3 - Add backup snapshots support 2.2.4 - Add hyperswap support 2.2.5 - Add support for host attachment using portsets """ VERSION = "2.2.5" # ThirdPartySystems wiki page CI_WIKI_NAME = "IBM_STORAGE_CI" def __init__(self, *args, **kwargs): super(StorwizeSVCISCSIDriver, self).__init__(*args, **kwargs) self.protocol = constants.ISCSI self.configuration.append_config_values( storwize_svc_iscsi_opts) @staticmethod def get_driver_options(): return storwize_common.storwize_svc_opts + storwize_svc_iscsi_opts def validate_connector(self, connector): """Check connector for at least one enabled iSCSI protocol.""" if 'initiator' not in connector: LOG.error('The connector does not contain the required ' 'information.') raise exception.InvalidConnectorException( missing='initiator') def initialize_connection_snapshot(self, snapshot, connector): """Perform attach snapshot for backup snapshots.""" # If the snapshot's source volume is a replication volume and the # replication volume has failed over to aux_backend, # attach the snapshot will be failed. self._check_snapshot_replica_volume_status(snapshot) vol_attrs = ['id', 'name', 'volume_type_id', 'display_name'] Volume = collections.namedtuple('Volume', vol_attrs) volume = Volume(id=snapshot.id, name=snapshot.name, volume_type_id=snapshot.volume_type_id, display_name='backup-snapshot') return self.initialize_connection(volume, connector) def initialize_connection(self, volume, connector): """Perform necessary work to make an iSCSI connection.""" @coordination.synchronized('storwize-host-{system_id}-{host}') def _do_initialize_connection_locked(system_id, host): return self._do_initialize_connection(volume, connector) return _do_initialize_connection_locked(self._state['system_id'], connector['host']) def _do_initialize_connection(self, volume, connector): """Perform necessary work to make an iSCSI connection. To be able to create an iSCSI connection from a given host to a volume, we must: 1. Translate the given iSCSI name to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ LOG.debug('enter: initialize_connection: volume %(vol)s with connector' ' %(conn)s', {'vol': volume.id, 'conn': connector}) if volume.display_name == 'backup-snapshot': LOG.debug('It is a virtual volume %(vol)s for attach snapshot.', {'vol': volume.id}) volume_name = volume.name backend_helper = self._helpers node_state = self._state else: volume_name, backend_helper, node_state = self._get_vol_sys_info( volume) backend_helper.initialize_host_info() host_site = self._get_volume_host_site_from_conf(volume, connector, iscsi=True) is_hyper_volume = self.is_volume_hyperswap(volume) if is_hyper_volume and host_site is None: msg = (_('There is no correct storwize_preferred_host_site ' 'configured for a hyperswap volume %s.') % volume.name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) # Try creating the host, if host creation is successfull continue # with intialization flow, else search for host object defined for # this connector info. host_name = None try: opts = self._get_vdisk_params(volume.volume_type_id) host_name = ( backend_helper.create_host(connector, iscsi=True, site=host_site, portset=opts['storwize_portset'])) except exception.VolumeBackendAPIException as excp: if "CMMVC6578E" in excp.msg: msg = (_('Host already exists for connector ' '%(conn)s'), {'conn': connector}) LOG.info(msg) host_name = backend_helper.get_host_from_connector(connector, iscsi=True) else: msg = (_('Error creating host %(ex)s'), {'ex': excp.msg}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if is_hyper_volume: self._update_host_site_for_hyperswap_volume(host_name, host_site) chap_secret = backend_helper.get_chap_secret_for_host(host_name) chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled if chap_enabled and chap_secret is None: chap_secret = backend_helper.add_chap_secret_to_host(host_name) elif not chap_enabled and chap_secret: LOG.warning('CHAP secret exists for host but CHAP is disabled.') multihostmap = self.configuration.storwize_svc_multihostmap_enabled lun_id = backend_helper.map_vol_to_host(volume_name, host_name, multihostmap) try: properties = self._get_single_iscsi_data(volume, connector, lun_id, chap_secret, opts['storwize_portset']) multipath = connector.get('multipath', False) if multipath: properties = ( self._get_multi_iscsi_data(volume, connector, lun_id, properties, backend_helper, node_state, opts['storwize_portset'])) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error('initialize_connection: Failed to export volume ' '%(vol)s due to %(ex)s.', {'vol': volume.name, 'ex': ex}) self._do_terminate_connection(volume, connector) LOG.error('initialize_connection: Failed ' 'to collect return ' 'properties for volume %(vol)s and connector ' '%(conn)s.\n', {'vol': volume, 'conn': connector}) # properties may contain chap secret so must be masked LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n ' 'connector: %(conn)s\n properties: %(prop)s', {'vol': volume.id, 'conn': connector, 'prop': strutils.mask_password(properties)}) return {'driver_volume_type': 'iscsi', 'data': properties, } def _get_single_iscsi_data(self, volume, connector, lun_id, chap_secret, portset): LOG.debug('enter: _get_single_iscsi_data: volume %(vol)s with ' 'connector %(conn)s lun_id %(lun_id)s', {'vol': volume.id, 'conn': connector, 'lun_id': lun_id}) if volume.display_name == 'backup-snapshot': LOG.debug('It is a virtual volume %(vol)s for attach snapshot', {'vol': volume.name}) volume_name = volume.name backend_helper = self._helpers node_state = self._state else: volume_name, backend_helper, node_state = self._get_vol_sys_info( volume) volume_attributes = backend_helper.get_vdisk_attributes(volume_name) if volume_attributes is None: msg = (_('_get_single_iscsi_data: Failed to get attributes' ' for volume %s.') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) try: preferred_node = volume_attributes['preferred_node_id'] IO_group = volume_attributes['IO_group_id'] except KeyError as e: msg = (_('_get_single_iscsi_data: Did not find expected column' ' name in %(volume)s: %(key)s %(error)s.'), {'volume': volume_name, 'key': e.args[0], 'error': e}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] if node_state['code_level'] >= (8, 4, 2, 0): backend_helper.add_iscsi_ip_addrs(node_state['storage_nodes'], node_state['code_level'], portset=portset) for node in node_state['storage_nodes'].values(): if self.protocol not in node['enabled_protocols']: continue if node['IO_group'] != IO_group: continue io_group_nodes.append(node) if node['id'] == preferred_node: preferred_node_entry = node if not len(io_group_nodes): msg = (_('_get_single_iscsi_data: No node found in ' 'I/O group %(gid)s for volume %(vol)s.') % { 'gid': IO_group, 'vol': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warning('_get_single_iscsi_data: Did not find a ' 'preferred node for volume %s.', volume_name) properties = { 'target_discovered': False, 'target_lun': lun_id, 'volume_id': volume.id} if node_state['code_level'] >= (8, 4, 2, 0): if preferred_node_entry['IP_address']: ipaddr = preferred_node_entry['IP_address'][0] else: if preferred_node_entry['ipv4']: ipaddr = preferred_node_entry['ipv4'][0] else: ipaddr = preferred_node_entry['ipv6'][0] properties['target_portal'] = '%s:%s' % (ipaddr, '3260') properties['target_iqn'] = preferred_node_entry['iscsi_name'] if chap_secret: properties.update(auth_method='CHAP', auth_username=connector['initiator'], auth_password=chap_secret, discovery_auth_method='CHAP', discovery_auth_username=connector['initiator'], discovery_auth_password=chap_secret) # properties may contain chap secret so must be masked LOG.debug('leave: _get_single_iscsi_data:\n volume: %(vol)s\n ' 'connector: %(conn)s\n lun_id: %(lun_id)s\n ' 'properties: %(prop)s', {'vol': volume.id, 'conn': connector, 'lun_id': lun_id, 'prop': strutils.mask_password(properties)}) return properties def _get_multi_iscsi_data(self, volume, connector, lun_id, properties, backend_helper, node_state, portset): LOG.debug('enter: _get_multi_iscsi_data: volume %(vol)s with ' 'connector %(conn)s lun_id %(lun_id)s', {'vol': volume.id, 'conn': connector, 'lun_id': lun_id}) try: if node_state['code_level'] >= (8, 4, 2, 0): portset_name = portset if portset else 'portset0' resp = backend_helper.ssh.lsip(portset=portset_name) else: resp = backend_helper.ssh.lsportip() except Exception as ex: msg = (_('_get_multi_iscsi_data: Failed to ' 'get port ip because of exception: ' '%s.') % ex) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) properties['target_iqns'] = [] properties['target_portals'] = [] properties['target_luns'] = [] for node in node_state['storage_nodes'].values(): for ip_data in resp: if ip_data['node_id'] != node['id']: continue link_state = ip_data.get('link_state', None) valid_port = '' if node_state['code_level'] >= (8, 4, 2, 0): valid_port = ip_data['IP_address'] else: if ((ip_data['state'] == 'configured' and link_state == 'active') or ip_data['state'] == 'online'): valid_port = (ip_data['IP_address'] or ip_data['IP_address_6']) if valid_port: properties['target_portals'].append( '%s:%s' % (valid_port, '3260')) properties['target_iqns'].append( node['iscsi_name']) properties['target_luns'].append(lun_id) if not len(properties['target_portals']): msg = (_('_get_multi_iscsi_data: Failed to find valid port ' 'for volume %s.') % volume.name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # properties may contain chap secret so must be masked LOG.debug('leave: _get_multi_iscsi_data:\n volume: %(vol)s\n ' 'connector: %(conn)s\n lun_id: %(lun_id)s\n ' 'properties: %(prop)s', {'vol': volume.id, 'conn': connector, 'lun_id': lun_id, 'prop': strutils.mask_password(properties)}) return properties def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Perform detach snapshot for backup snapshots.""" vol_attrs = ['id', 'name', 'display_name'] Volume = collections.namedtuple('Volume', vol_attrs) volume = Volume(id=snapshot.id, name=snapshot.name, display_name='backup-snapshot') return self.terminate_connection(volume, connector, **kwargs) def terminate_connection(self, volume, connector, **kwargs): """Cleanup after an iSCSI connection has been terminated.""" # If a fake connector is generated by nova when the host # is down, then the connector will not have a host property, # In this case construct the lock without the host property # so that all the fake connectors to an SVC are serialized host = connector['host'] if 'host' in connector else "" attachment_count = 0 if hasattr(volume, 'multiattach') and volume.multiattach: try: attachment_list = volume.volume_attachment for attachment in attachment_list: if (attachment.attach_status == "attached" and attachment.attached_host == host): attachment_count += 1 except AttributeError: pass if attachment_count > 1: LOG.debug("Volume %(volume)s is attached to multiple " "instances on host %(host_name)s, " "skip terminate volume connection", {'volume': volume.name, 'host_name': volume.host.split('@')[0]}) return @coordination.synchronized('storwize-host-{system_id}-{host}') def _do_terminate_connection_locked(system_id, host): return self._do_terminate_connection(volume, connector, **kwargs) return _do_terminate_connection_locked(self._state['system_id'], host) def _do_terminate_connection(self, volume, connector, **kwargs): """Cleanup after an iSCSI connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ LOG.debug('enter: terminate_connection: volume %(vol)s with connector' ' %(conn)s', {'vol': volume.id, 'conn': connector}) (info, host_name, vol_name, backend_helper, node_state) = self._get_map_info_from_connector(volume, connector, iscsi=True) if not backend_helper: return info # Unmap volumes, if hostname is None, need to get value from vdiskmap host_name = backend_helper.unmap_vol_from_host(vol_name, host_name) # Host_name could be none if host_name: resp = backend_helper.check_host_mapped_vols(host_name) if not len(resp): backend_helper.delete_host(host_name) LOG.debug('leave: terminate_connection: volume %(vol)s with ' 'connector %(conn)s', {'vol': volume.id, 'conn': connector}) return info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/infinidat.py0000664000175000017500000017076100000000000021653 0ustar00zuulzuul00000000000000# Copyright 2022 Infinidat Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """INFINIDAT InfiniBox Volume Driver.""" import collections from contextlib import contextmanager import functools import math import platform import socket import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from cinder.common import constants from cinder import context as cinder_context from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import objects from cinder.objects import fields from cinder import version from cinder.volume import configuration from cinder.volume.drivers.san import san from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils try: # we check that infinisdk is installed. the other imported modules # are dependencies, so if any of the dependencies are not importable # we assume infinisdk is not installed import capacity from infi.dtypes import iqn from infi.dtypes import wwn import infinisdk except ImportError: from oslo_utils import units as capacity infinisdk = None iqn = None wwn = None LOG = logging.getLogger(__name__) VENDOR_NAME = 'INFINIDAT' BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both']) QOS_MAX_IOPS = 'maxIOPS' QOS_MAX_BWS = 'maxBWS' # Max retries for the REST API client in case of a failure: _API_MAX_RETRIES = 5 _INFINIDAT_CINDER_IDENTIFIER = ( "cinder/%s" % version.version_info.release_string()) infinidat_opts = [ cfg.StrOpt('infinidat_pool_name', help='Name of the pool from which volumes are allocated'), # We can't use the existing "storage_protocol" option because its default # is "iscsi", but for backward-compatibility our default must be "fc" cfg.StrOpt('infinidat_storage_protocol', ignore_case=True, default='fc', choices=['iscsi', 'fc'], help='Protocol for transferring data between host and ' 'storage back-end.'), cfg.ListOpt('infinidat_iscsi_netspaces', default=[], help='List of names of network spaces to use for iSCSI ' 'connectivity'), cfg.BoolOpt('infinidat_use_compression', help='Specifies whether to enable (true) or disable (false) ' 'compression for all newly created volumes. Leave this ' 'unset (commented out) for all created volumes to ' 'inherit their compression setting from their parent ' 'pool at creation time. The default value is unset.') ] CONF = cfg.CONF CONF.register_opts(infinidat_opts, group=configuration.SHARED_CONF_GROUP) def infinisdk_to_cinder_exceptions(func): @functools.wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except infinisdk.core.exceptions.InfiniSDKException as ex: # string formatting of 'ex' includes http code and url msg = _('Caught exception from infinisdk: %s') % ex LOG.exception(msg) raise exception.VolumeBackendAPIException(data=msg) return wrapper @interface.volumedriver class InfiniboxVolumeDriver(san.SanISCSIDriver): """INFINIDAT InfiniBox Cinder driver. Version history: .. code-block:: none 1.0 - initial release 1.1 - switched to use infinisdk package 1.2 - added support for iSCSI protocol 1.3 - added generic volume groups support 1.4 - added support for QoS 1.5 - added support for volume compression 1.6 - added support for volume multi-attach 1.7 - fixed iSCSI to return all portals 1.8 - added revert to snapshot 1.9 - added manage/unmanage/manageable-list volume/snapshot 1.10 - added support for TLS/SSL communication 1.11 - fixed generic volume migration 1.12 - fixed volume multi-attach 1.13 - fixed consistency groups feature 1.14 - added storage assisted volume migration 1.15 - fixed backup for attached volume """ VERSION = '1.15' # ThirdPartySystems wiki page CI_WIKI_NAME = "INFINIDAT_CI" def __init__(self, *args, **kwargs): super(InfiniboxVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(infinidat_opts) self._lookup_service = fczm_utils.create_lookup_service() @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'use_chap_auth', 'chap_username', 'chap_password', 'san_thin_provision', 'use_multipath_for_image_xfer', 'enforce_multipath_for_image_xfer', 'num_volume_device_scan_tries', 'volume_dd_blocksize', 'driver_use_ssl', 'suppress_requests_ssl_warnings', 'max_over_subscription_ratio') return infinidat_opts + additional_opts def _setup_and_get_system_object(self, management_address, auth, use_ssl=False): system = infinisdk.InfiniBox(management_address, auth=auth, use_ssl=use_ssl) system.api.add_auto_retry( lambda e: isinstance( e, infinisdk.core.exceptions.APITransportFailure) and "Interrupted system call" in e.error_desc, _API_MAX_RETRIES) system.api.set_source_identifier(_INFINIDAT_CINDER_IDENTIFIER) system.login() return system def do_setup(self, context): """Driver initialization""" if infinisdk is None: msg = _("Missing 'infinisdk' python module, ensure the library" " is installed and available.") raise exception.VolumeDriverException(message=msg) auth = (self.configuration.san_login, self.configuration.san_password) use_ssl = self.configuration.driver_use_ssl self.management_address = self.configuration.san_ip self._system = self._setup_and_get_system_object( self.management_address, auth, use_ssl=use_ssl) backend_name = self.configuration.safe_get('volume_backend_name') self._backend_name = backend_name or self.__class__.__name__ self._volume_stats = None if self.configuration.infinidat_storage_protocol.lower() == 'iscsi': self._protocol = constants.ISCSI if len(self.configuration.infinidat_iscsi_netspaces) == 0: msg = _('No iSCSI network spaces configured') raise exception.VolumeDriverException(message=msg) else: self._protocol = constants.FC LOG.debug('setup complete') def validate_connector(self, connector): required = ('initiator' if self._protocol == constants.ISCSI else 'wwpns') if required not in connector: LOG.error('The volume driver requires %(data)s ' 'in the connector.', {'data': required}) raise exception.InvalidConnectorException(missing=required) def _make_volume_name(self, cinder_volume, migration=False): """Return the Infinidat volume name. Use Cinder volume id in case of volume migration and use Cinder volume name_id for all other cases. """ if migration: key = cinder_volume.id else: key = cinder_volume.name_id return 'openstack-vol-%s' % key def _make_snapshot_name(self, cinder_snapshot): return 'openstack-snap-%s' % cinder_snapshot.id def _make_host_name(self, port): return 'openstack-host-%s' % str(port).replace(":", ".") def _make_cg_name(self, cinder_group): return 'openstack-cg-%s' % cinder_group.id def _make_group_snapshot_name(self, cinder_group_snap): return 'openstack-group-snap-%s' % cinder_group_snap.id def _set_cinder_object_metadata(self, infinidat_object, cinder_object): data = {"system": "openstack", "openstack_version": version.version_info.release_string(), "cinder_id": cinder_object.id, "cinder_name": cinder_object.name, "host.created_by": _INFINIDAT_CINDER_IDENTIFIER} infinidat_object.set_metadata_from_dict(data) def _set_host_metadata(self, infinidat_object): data = {"system": "openstack", "openstack_version": version.version_info.release_string(), "hostname": socket.gethostname(), "platform": platform.platform(), "host.created_by": _INFINIDAT_CINDER_IDENTIFIER} infinidat_object.set_metadata_from_dict(data) def _get_infinidat_dataset_by_ref(self, existing_ref): if 'source-id' in existing_ref: kwargs = dict(id=existing_ref['source-id']) elif 'source-name' in existing_ref: kwargs = dict(name=existing_ref['source-name']) else: reason = _('dataset reference must contain ' 'source-id or source-name key') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return self._system.volumes.safe_get(**kwargs) def _get_infinidat_volume_by_ref(self, existing_ref): infinidat_volume = self._get_infinidat_dataset_by_ref(existing_ref) if infinidat_volume is None: raise exception.VolumeNotFound(volume_id=existing_ref) return infinidat_volume def _get_infinidat_snapshot_by_ref(self, existing_ref): infinidat_snapshot = self._get_infinidat_dataset_by_ref(existing_ref) if infinidat_snapshot is None: raise exception.SnapshotNotFound(snapshot_id=existing_ref) if not infinidat_snapshot.is_snapshot(): reason = (_('reference %(existing_ref)s is a volume') % {'existing_ref': existing_ref}) raise exception.InvalidSnapshot(reason=reason) return infinidat_snapshot def _get_infinidat_volume_by_name(self, name): ref = {'source-name': name} return self._get_infinidat_volume_by_ref(ref) def _get_infinidat_snapshot_by_name(self, name): ref = {'source-name': name} return self._get_infinidat_snapshot_by_ref(ref) def _get_infinidat_volume(self, cinder_volume): volume_name = self._make_volume_name(cinder_volume) return self._get_infinidat_volume_by_name(volume_name) def _get_infinidat_snapshot(self, cinder_snapshot): snap_name = self._make_snapshot_name(cinder_snapshot) return self._get_infinidat_snapshot_by_name(snap_name) def _get_infinidat_pool(self): pool_name = self.configuration.infinidat_pool_name pool = self._system.pools.safe_get(name=pool_name) if pool is None: msg = _('Pool "%s" not found') % pool_name LOG.error(msg) raise exception.VolumeDriverException(message=msg) return pool def _get_infinidat_cg(self, cinder_group): group_name = self._make_cg_name(cinder_group) infinidat_cg = self._system.cons_groups.safe_get(name=group_name) if infinidat_cg is None: raise exception.GroupNotFound(group_id=group_name) return infinidat_cg def _get_infinidat_sg(self, group_snapshot): name = self._make_group_snapshot_name(group_snapshot) infinidat_sg = self._system.cons_groups.safe_get(name=name) if infinidat_sg is None: raise exception.GroupSnapshotNotFound( group_snapshot_id=group_snapshot.id) if not infinidat_sg.is_snapgroup(): reason = (_('consistency group "%s" is not a snapshot group') % name) raise exception.InvalidGroupSnapshot(reason=reason) return infinidat_sg def _get_or_create_host(self, port): host_name = self._make_host_name(port) infinidat_host = self._system.hosts.safe_get(name=host_name) if infinidat_host is None: infinidat_host = self._system.hosts.create(name=host_name) infinidat_host.add_port(port) self._set_host_metadata(infinidat_host) return infinidat_host def _get_mapping(self, host, volume): existing_mapping = host.get_luns() for mapping in existing_mapping: if mapping.get_volume() == volume: return mapping def _get_or_create_mapping(self, host, volume): mapping = self._get_mapping(host, volume) if mapping: return mapping # volume not mapped. map it return host.map_volume(volume) def _get_backend_qos_specs(self, cinder_volume): type_id = cinder_volume.volume_type_id if type_id is None: return None qos_specs = volume_types.get_volume_type_qos_specs(type_id) if qos_specs is None: return None qos_specs = qos_specs['qos_specs'] if qos_specs is None: return None consumer = qos_specs['consumer'] # Front end QoS specs are handled by nova. We ignore them here. if consumer not in BACKEND_QOS_CONSUMERS: return None max_iops = qos_specs['specs'].get(QOS_MAX_IOPS) max_bws = qos_specs['specs'].get(QOS_MAX_BWS) if max_iops is None and max_bws is None: return None return { 'id': qos_specs['id'], QOS_MAX_IOPS: max_iops, QOS_MAX_BWS: max_bws, } def _get_or_create_qos_policy(self, qos_specs): qos_policy = self._system.qos_policies.safe_get(name=qos_specs['id']) if qos_policy is None: qos_policy = self._system.qos_policies.create( name=qos_specs['id'], type="VOLUME", max_ops=qos_specs[QOS_MAX_IOPS], max_bps=qos_specs[QOS_MAX_BWS]) return qos_policy def _set_qos(self, cinder_volume, infinidat_volume): if (hasattr(self._system.compat, "has_qos") and self._system.compat.has_qos()): qos_specs = self._get_backend_qos_specs(cinder_volume) if qos_specs: policy = self._get_or_create_qos_policy(qos_specs) policy.assign_entity(infinidat_volume) def _get_online_fc_ports(self): nodes = self._system.components.nodes.get_all() for node in nodes: for port in node.get_fc_ports(): if (port.get_link_state().lower() == 'up' and port.get_state() == 'OK'): yield str(port.get_wwpn()) def _initialize_connection_fc(self, infinidat_volume, connector): ports = [wwn.WWN(wwpn) for wwpn in connector['wwpns']] for port in ports: infinidat_host = self._get_or_create_host(port) mapping = self._get_or_create_mapping(infinidat_host, infinidat_volume) lun = mapping.get_lun() # Create initiator-target mapping. target_wwpns = list(self._get_online_fc_ports()) target_wwpns, init_target_map = self._build_initiator_target_map( connector, target_wwpns) conn_info = dict(driver_volume_type='fibre_channel', data=dict(target_discovered=False, target_wwn=target_wwpns, target_lun=lun, initiator_target_map=init_target_map)) fczm_utils.add_fc_zone(conn_info) return conn_info def _get_iscsi_network_space(self, netspace_name): netspace = self._system.network_spaces.safe_get( service='ISCSI_SERVICE', name=netspace_name) if netspace is None: msg = (_('Could not find iSCSI network space with name "%s"') % netspace_name) raise exception.VolumeDriverException(message=msg) return netspace def _get_iscsi_portals(self, netspace): port = netspace.get_properties().iscsi_tcp_port portals = ["%s:%s" % (interface.ip_address, port) for interface in netspace.get_ips() if interface.enabled] if portals: return portals # if we get here it means there are no enabled ports msg = (_('No available interfaces in iSCSI network space %s') % netspace.get_name()) raise exception.VolumeDriverException(message=msg) def _initialize_connection_iscsi(self, infinidat_volume, connector): port = iqn.IQN(connector['initiator']) infinidat_host = self._get_or_create_host(port) if self.configuration.use_chap_auth: chap_username = (self.configuration.chap_username or volume_utils.generate_username()) chap_password = (self.configuration.chap_password or volume_utils.generate_password()) infinidat_host.update_fields( security_method='CHAP', security_chap_inbound_username=chap_username, security_chap_inbound_secret=chap_password) mapping = self._get_or_create_mapping(infinidat_host, infinidat_volume) lun = mapping.get_lun() netspace_names = self.configuration.infinidat_iscsi_netspaces target_portals = [] target_iqns = [] target_luns = [] for netspace_name in netspace_names: netspace = self._get_iscsi_network_space(netspace_name) netspace_portals = self._get_iscsi_portals(netspace) target_portals.extend(netspace_portals) target_iqns.extend([netspace.get_properties().iscsi_iqn] * len(netspace_portals)) target_luns.extend([lun] * len(netspace_portals)) result_data = dict(target_discovered=True, target_portal=target_portals[0], target_iqn=target_iqns[0], target_lun=target_luns[0], target_portals=target_portals, target_iqns=target_iqns, target_luns=target_luns) if self.configuration.use_chap_auth: result_data.update(dict(auth_method='CHAP', auth_username=chap_username, auth_password=chap_password)) return dict(driver_volume_type='iscsi', data=result_data) def _get_ports_from_connector(self, infinidat_volume, connector): if connector is None: # If no connector was provided it is a force-detach - remove all # host connections for the volume if self._protocol == constants.FC: port_cls = wwn.WWN else: port_cls = iqn.IQN ports = [] for lun_mapping in infinidat_volume.get_logical_units(): host_ports = lun_mapping.get_host().get_ports() host_ports = [port for port in host_ports if isinstance(port, port_cls)] ports.extend(host_ports) elif self._protocol == constants.FC: ports = [wwn.WWN(wwpn) for wwpn in connector['wwpns']] else: ports = [iqn.IQN(connector['initiator'])] return ports def _is_volume_multiattached(self, volume, connector): """Returns whether the volume is multiattached. Check if there are multiple attachments to the volume from the same connector. Terminate connection only for the last attachment from the corresponding host. """ if not (connector and volume.multiattach and volume.volume_attachment): return False keys = ['system uuid'] if self._protocol == constants.FC: keys.append('wwpns') else: keys.append('initiator') for key in keys: if not (key in connector and connector[key]): continue if sum(1 for attachment in volume.volume_attachment if attachment.connector and key in attachment.connector and attachment.connector[key] == connector[key]) > 1: LOG.debug('Volume %s is multiattached to %s %s', volume.name_id, key, connector[key]) return True return False def create_export_snapshot(self, context, snapshot, connector): """Exports the snapshot.""" pass def remove_export_snapshot(self, context, snapshot): """Removes an export for a snapshot.""" pass def backup_use_temp_snapshot(self): """Use a temporary snapshot for performing non-disruptive backups.""" return True @coordination.synchronized('infinidat-{self.management_address}-lock') def _initialize_connection(self, infinidat_volume, connector): if self._protocol == constants.FC: initialize_connection = self._initialize_connection_fc else: initialize_connection = self._initialize_connection_iscsi return initialize_connection(infinidat_volume, connector) @infinisdk_to_cinder_exceptions def initialize_connection(self, volume, connector, **kwargs): """Map an InfiniBox volume to the host""" infinidat_volume = self._get_infinidat_volume(volume) return self._initialize_connection(infinidat_volume, connector) @infinisdk_to_cinder_exceptions def initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Map an InfiniBox snapshot to the host""" infinidat_snapshot = self._get_infinidat_snapshot(snapshot) return self._initialize_connection(infinidat_snapshot, connector) @coordination.synchronized('infinidat-{self.management_address}-lock') def _terminate_connection(self, infinidat_volume, connector): if self._protocol == constants.FC: volume_type = 'fibre_channel' else: volume_type = 'iscsi' result_data = dict() ports = self._get_ports_from_connector(infinidat_volume, connector) for port in ports: host_name = self._make_host_name(port) host = self._system.hosts.safe_get(name=host_name) if host is None: # not found. ignore. continue # unmap try: host.unmap_volume(infinidat_volume) except KeyError: continue # volume mapping not found # check if the host now doesn't have mappings if host is not None and len(host.get_luns()) == 0: host.safe_delete() if self._protocol == constants.FC and connector is not None: # Create initiator-target mapping to delete host entry # this is only relevant for regular (specific host) detach target_wwpns = list(self._get_online_fc_ports()) target_wwpns, target_map = ( self._build_initiator_target_map(connector, target_wwpns)) result_data = dict(target_wwn=target_wwpns, initiator_target_map=target_map) if self._protocol == constants.FC: conn_info = dict(driver_volume_type=volume_type, data=result_data) fczm_utils.remove_fc_zone(conn_info) @infinisdk_to_cinder_exceptions def terminate_connection(self, volume, connector, **kwargs): """Unmap an InfiniBox volume from the host""" if self._is_volume_multiattached(volume, connector): return True infinidat_volume = self._get_infinidat_volume(volume) self._terminate_connection(infinidat_volume, connector) return volume.volume_attachment and len(volume.volume_attachment) > 1 @infinisdk_to_cinder_exceptions def terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Unmap an InfiniBox snapshot from the host""" infinidat_snapshot = self._get_infinidat_snapshot(snapshot) self._terminate_connection(infinidat_snapshot, connector) @infinisdk_to_cinder_exceptions def get_volume_stats(self, refresh=False): if self._volume_stats is None or refresh: pool = self._get_infinidat_pool() location_info = '%(driver)s:%(serial)s:%(pool)s' % { 'driver': self.__class__.__name__, 'serial': self._system.get_serial(), 'pool': self.configuration.infinidat_pool_name} free_capacity_bytes = (pool.get_free_physical_capacity() / capacity.byte) physical_capacity_bytes = (pool.get_physical_capacity() / capacity.byte) free_capacity_gb = float(free_capacity_bytes) / units.Gi total_capacity_gb = float(physical_capacity_bytes) / units.Gi qos_support = (hasattr(self._system.compat, "has_qos") and self._system.compat.has_qos()) max_osr = self.configuration.max_over_subscription_ratio thin = self.configuration.san_thin_provision self._volume_stats = dict(volume_backend_name=self._backend_name, vendor_name=VENDOR_NAME, driver_version=self.VERSION, storage_protocol=self._protocol, location_info=location_info, consistencygroup_support=False, total_capacity_gb=total_capacity_gb, free_capacity_gb=free_capacity_gb, consistent_group_snapshot_enabled=True, QoS_support=qos_support, thin_provisioning_support=thin, thick_provisioning_support=not thin, max_over_subscription_ratio=max_osr, multiattach=True) return self._volume_stats def _create_volume(self, volume): pool = self._get_infinidat_pool() volume_name = self._make_volume_name(volume) provtype = "THIN" if self.configuration.san_thin_provision else "THICK" size = volume.size * capacity.GiB create_kwargs = dict(name=volume_name, pool=pool, provtype=provtype, size=size) compression_enabled = self.configuration.infinidat_use_compression if compression_enabled is not None: create_kwargs["compression_enabled"] = compression_enabled infinidat_volume = self._system.volumes.create(**create_kwargs) self._set_qos(volume, infinidat_volume) self._set_cinder_object_metadata(infinidat_volume, volume) if volume.group_id: group = volume_utils.group_get_by_id(volume.group_id) if volume_utils.is_group_a_cg_snapshot_type(group): infinidat_group = self._get_infinidat_cg(group) infinidat_group.add_member(infinidat_volume) return infinidat_volume @infinisdk_to_cinder_exceptions def create_volume(self, volume): """Create a new volume on the backend.""" # this is the same as _create_volume but without the return statement self._create_volume(volume) @infinisdk_to_cinder_exceptions def delete_volume(self, volume): """Delete a volume from the backend.""" try: infinidat_volume = self._get_infinidat_volume(volume) except exception.VolumeNotFound: return if infinidat_volume.has_children(): # can't delete a volume that has a live snapshot raise exception.VolumeIsBusy(volume_name=volume.name) infinidat_volume.safe_delete() @infinisdk_to_cinder_exceptions def extend_volume(self, volume, new_size): """Extend the size of a volume.""" infinidat_volume = self._get_infinidat_volume(volume) size_delta = new_size * capacity.GiB - infinidat_volume.get_size() infinidat_volume.resize(size_delta) @infinisdk_to_cinder_exceptions def create_snapshot(self, snapshot): """Creates a snapshot.""" volume = self._get_infinidat_volume(snapshot.volume) name = self._make_snapshot_name(snapshot) infinidat_snapshot = volume.create_snapshot(name=name) self._set_cinder_object_metadata(infinidat_snapshot, snapshot) @contextmanager def _connection_context(self, infinidat_volume): use_multipath = self.configuration.use_multipath_for_image_xfer enforce_multipath = self.configuration.enforce_multipath_for_image_xfer connector = volume_utils.brick_get_connector_properties( use_multipath, enforce_multipath) connection = self._initialize_connection(infinidat_volume, connector) try: yield connection finally: self._terminate_connection(infinidat_volume, connector) @contextmanager def _attach_context(self, connection): use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = connection['driver_volume_type'] connector = volume_utils.brick_get_connector( protocol, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, conn=connection) attach_info = None try: attach_info = self._connect_device(connection) yield attach_info except exception.DeviceUnavailable as exc: attach_info = exc.kwargs.get('attach_info', None) raise finally: if attach_info: connector.disconnect_volume(attach_info['conn']['data'], attach_info['device']) @contextmanager def _device_connect_context(self, infinidat_volume): with self._connection_context(infinidat_volume) as connection: with self._attach_context(connection) as attach_info: yield attach_info @infinisdk_to_cinder_exceptions def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot. InfiniBox does not yet support detached clone so use dd to copy data. This could be a lengthy operation. - create destination volume - map source snapshot and destination volume - copy data from snapshot to volume - unmap volume and snapshot """ infinidat_snapshot = self._get_infinidat_snapshot(snapshot) infinidat_volume = self._create_volume(volume) try: src_ctx = self._device_connect_context(infinidat_snapshot) dst_ctx = self._device_connect_context(infinidat_volume) with src_ctx as src_dev, dst_ctx as dst_dev: dd_block_size = self.configuration.volume_dd_blocksize volume_utils.copy_volume(src_dev['device']['path'], dst_dev['device']['path'], snapshot.volume.size * units.Ki, dd_block_size, sparse=True) except Exception: infinidat_volume.delete() raise @infinisdk_to_cinder_exceptions def delete_snapshot(self, snapshot): """Deletes a snapshot.""" try: snapshot = self._get_infinidat_snapshot(snapshot) except exception.SnapshotNotFound: return snapshot.safe_delete() @infinisdk_to_cinder_exceptions def create_cloned_volume(self, volume, src_vref): """Create a clone from source volume. InfiniBox does not yet support detached clone so use dd to copy data. This could be a lengthy operation. * create temporary snapshot from source volume * map temporary snapshot * create and map new volume * copy data from temporary snapshot to new volume * unmap volume and temporary snapshot * delete temporary snapshot """ attributes = ('id', 'name', 'volume') Snapshot = collections.namedtuple('Snapshot', attributes) snapshot_id = str(uuid.uuid4()) snapshot_name = CONF.snapshot_name_template % snapshot_id snapshot = Snapshot(id=snapshot_id, name=snapshot_name, volume=src_vref) try: self.create_snapshot(snapshot) self.create_volume_from_snapshot(volume, snapshot) finally: self.delete_snapshot(snapshot) def _build_initiator_target_map(self, connector, all_target_wwns): """Build the target_wwns and the initiator target map.""" target_wwns = [] init_targ_map = {} if self._lookup_service is not None: # use FC san lookup. dev_map = self._lookup_service.get_device_mapping_from_network( connector.get('wwpns'), all_target_wwns) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) target_wwns = list(set(target_wwns)) else: initiator_wwns = connector.get('wwpns', []) target_wwns = all_target_wwns for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return target_wwns, init_targ_map @infinisdk_to_cinder_exceptions def create_group(self, context, group): """Creates a group. :param context: the context of the caller. :param group: the Group object of the group to be created. :returns: model_update """ # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() name = self._make_cg_name(group) pool = self._get_infinidat_pool() infinidat_cg = self._system.cons_groups.create(name=name, pool=pool) self._set_cinder_object_metadata(infinidat_cg, group) return {'status': fields.GroupStatus.AVAILABLE} @infinisdk_to_cinder_exceptions def delete_group(self, context, group, volumes): """Deletes a group. :param context: the context of the caller. :param group: the Group object of the group to be deleted. :param volumes: a list of Volume objects in the group. :returns: model_update, volumes_model_update """ # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() try: infinidat_cg = self._get_infinidat_cg(group) except exception.GroupNotFound: pass else: infinidat_cg.safe_delete() for volume in volumes: self.delete_volume(volume) return None, None @infinisdk_to_cinder_exceptions def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group. :param context: the context of the caller. :param group: the Group object of the group to be updated. :param add_volumes: a list of Volume objects to be added. :param remove_volumes: a list of Volume objects to be removed. :returns: model_update, add_volumes_update, remove_volumes_update """ # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() add_volumes = add_volumes if add_volumes else [] remove_volumes = remove_volumes if remove_volumes else [] infinidat_cg = self._get_infinidat_cg(group) for volume in add_volumes: infinidat_volume = self._get_infinidat_volume(volume) infinidat_cg.add_member(infinidat_volume) for volume in remove_volumes: infinidat_volume = self._get_infinidat_volume(volume) infinidat_cg.remove_member(infinidat_volume) return None, None, None @infinisdk_to_cinder_exceptions def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param context: the context of the caller. :param group: the Group object to be created. :param volumes: a list of Volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of Snapshot objects in group_snapshot. :param source_group: the Group object as source. :param source_vols: a list of Volume objects in the source_group. :returns: model_update, volumes_model_update The source can be group_snapshot or a source_group. """ # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() self.create_group(context, group) if group_snapshot and snapshots: for volume, snapshot in zip(volumes, snapshots): self.create_volume_from_snapshot(volume, snapshot) elif source_group and source_vols: for volume, source_vol in zip(volumes, source_vols): self.create_cloned_volume(volume, source_vol) else: message = _('creating a group from source is possible ' 'from an existing group or a group snapshot.') raise exception.InvalidInput(message=message) return None, None @infinisdk_to_cinder_exceptions def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be created. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() infinidat_cg = self._get_infinidat_cg(group_snapshot.group) group_snapshot_name = self._make_group_snapshot_name(group_snapshot) infinidat_sg = infinidat_cg.create_snapshot(name=group_snapshot_name) # update the names of the individual snapshots in the new snapgroup # to match the names we use for cinder snapshots for infinidat_snapshot in infinidat_sg.get_members(): parent_name = infinidat_snapshot.get_parent().get_name() for snapshot in snapshots: if snapshot.volume.name_id in parent_name: snapshot_name = self._make_snapshot_name(snapshot) infinidat_snapshot.update_name(snapshot_name) return None, None @infinisdk_to_cinder_exceptions def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be deleted. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() try: infinidat_sg = self._get_infinidat_sg(group_snapshot) except exception.GroupSnapshotNotFound: pass else: infinidat_sg.safe_delete() for snapshot in snapshots: self.delete_snapshot(snapshot) return None, None def snapshot_revert_use_temp_snapshot(self): """Disable the use of a temporary snapshot on revert.""" return False @infinisdk_to_cinder_exceptions def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot. Note: the revert process should not change the volume's current size, that means if the driver shrank the volume during the process, it should extend the volume internally. """ infinidat_snapshot = self._get_infinidat_snapshot(snapshot) infinidat_volume = self._get_infinidat_volume(snapshot.volume) infinidat_volume.restore(infinidat_snapshot) volume_size = infinidat_volume.get_size() snapshot_size = snapshot.volume.size * capacity.GiB if volume_size < snapshot_size: self.extend_volume(volume, snapshot.volume.size) @infinisdk_to_cinder_exceptions def manage_existing(self, volume, existing_ref): """Manage an existing Infinidat volume. Checks if the volume is already managed. Renames the Infinidat volume to match the expected name. Updates QoS and metadata. :param volume: Cinder volume to manage :param existing_ref: dictionary of the forms: {'source-name': 'Infinidat volume name'} or {'source-id': 'Infinidat volume serial number'} """ infinidat_volume = self._get_infinidat_volume_by_ref(existing_ref) infinidat_metadata = infinidat_volume.get_all_metadata() if 'cinder_id' in infinidat_metadata: cinder_id = infinidat_metadata['cinder_id'] if volume_utils.check_already_managed_volume(cinder_id): raise exception.ManageExistingAlreadyManaged( volume_ref=cinder_id) infinidat_pool = infinidat_volume.get_pool_name() if infinidat_pool != self.configuration.infinidat_pool_name: message = (_('unexpected pool name %(infinidat_pool)s') % {'infinidat_pool': infinidat_pool}) raise exception.InvalidConfigurationValue(message=message) cinder_name = self._make_volume_name(volume) infinidat_volume.update_name(cinder_name) self._set_qos(volume, infinidat_volume) self._set_cinder_object_metadata(infinidat_volume, volume) @infinisdk_to_cinder_exceptions def manage_existing_get_size(self, volume, existing_ref): """Return size of an existing Infinidat volume. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: dictionary of the forms: {'source-name': 'Infinidat volume name'} or {'source-id': 'Infinidat volume serial number'} :returns size: Volume size in GiB (integer) """ infinidat_volume = self._get_infinidat_volume_by_ref(existing_ref) return int(math.ceil(infinidat_volume.get_size() / capacity.GiB)) @infinisdk_to_cinder_exceptions def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the Infinidat backend available for management. Returns a list of dictionaries, each specifying a volume on the Infinidat backend, with the following keys: - reference (dictionary): The reference for a volume, which can be passed to "manage_existing". Each reference contains keys: Infinidat volume name and Infinidat volume serial number. - size (int): The size of the volume according to the Infinidat storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this volume is safe to manage according to the storage backend. For example, is the volume already managed, in use, has snapshots or active mappings. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Extra information (pool name, volume type, QoS and metadata) to return to the user. :param cinder_volumes: A list of volumes in this host that Cinder currently manages, used to determine if a volume is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ manageable_volumes = [] cinder_ids = [cinder_volume.id for cinder_volume in cinder_volumes] infinidat_pool = self._get_infinidat_pool() infinidat_volumes = infinidat_pool.get_volumes() for infinidat_volume in infinidat_volumes: if infinidat_volume.is_snapshot(): continue safe_to_manage = False reason_not_safe = None volume_id = infinidat_volume.get_id() volume_name = infinidat_volume.get_name() volume_size = infinidat_volume.get_size() volume_type = infinidat_volume.get_type() volume_pool = infinidat_volume.get_pool_name() volume_qos = infinidat_volume.get_qos_policy() volume_meta = infinidat_volume.get_all_metadata() cinder_id = volume_meta.get('cinder_id') volume_luns = infinidat_volume.get_logical_units() if cinder_id and cinder_id in cinder_ids: reason_not_safe = _('volume already managed') elif volume_luns: reason_not_safe = _('volume has mappings') elif infinidat_volume.has_children(): reason_not_safe = _('volume has snapshots') else: safe_to_manage = True reference = { 'source-name': volume_name, 'source-id': str(volume_id) } extra_info = { 'pool': volume_pool, 'type': volume_type, 'qos': str(volume_qos), 'meta': str(volume_meta) } manageable_volume = { 'reference': reference, 'size': int(math.ceil(volume_size / capacity.GiB)), 'safe_to_manage': safe_to_manage, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info } manageable_volumes.append(manageable_volume) return volume_utils.paginate_entries_list( manageable_volumes, marker, limit, offset, sort_keys, sort_dirs) @infinisdk_to_cinder_exceptions def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param volume: Cinder volume to unmanage """ infinidat_volume = self._get_infinidat_volume(volume) infinidat_volume.clear_metadata() def _check_already_managed_snapshot(self, snapshot_id): """Check cinder db for already managed snapshot. :param snapshot_id snapshot id parameter :returns: bool -- return True, if db entry with specified snapshot id exists, otherwise return False """ try: uuid.UUID(snapshot_id, version=4) except ValueError: return False ctxt = cinder_context.get_admin_context() return objects.Snapshot.exists(ctxt, snapshot_id) @infinisdk_to_cinder_exceptions def manage_existing_snapshot(self, snapshot, existing_ref): """Manage an existing Infinidat snapshot. Checks if the snapshot is already managed. Renames the Infinidat snapshot to match the expected name. Updates QoS and metadata. :param snapshot: Cinder snapshot to manage :param existing_ref: dictionary of the forms: {'source-name': 'Infinidat snapshot name'} or {'source-id': 'Infinidat snapshot serial number'} """ infinidat_snapshot = self._get_infinidat_snapshot_by_ref(existing_ref) infinidat_metadata = infinidat_snapshot.get_all_metadata() if 'cinder_id' in infinidat_metadata: cinder_id = infinidat_metadata['cinder_id'] if self._check_already_managed_snapshot(cinder_id): raise exception.ManageExistingAlreadyManaged( volume_ref=cinder_id) infinidat_pool = infinidat_snapshot.get_pool_name() if infinidat_pool != self.configuration.infinidat_pool_name: message = (_('unexpected pool name %(infinidat_pool)s') % {'infinidat_pool': infinidat_pool}) raise exception.InvalidConfigurationValue(message=message) cinder_name = self._make_snapshot_name(snapshot) infinidat_snapshot.update_name(cinder_name) self._set_qos(snapshot, infinidat_snapshot) self._set_cinder_object_metadata(infinidat_snapshot, snapshot) @infinisdk_to_cinder_exceptions def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of an existing Infinidat snapshot. When calculating the size, round up to the next GB. :param snapshot: Cinder snapshot to manage :param existing_ref: dictionary of the forms: {'source-name': 'Infinidat snapshot name'} or {'source-id': 'Infinidat snapshot serial number'} :returns size: Snapshot size in GiB (integer) """ infinidat_snapshot = self._get_infinidat_snapshot_by_ref(existing_ref) return int(math.ceil(infinidat_snapshot.get_size() / capacity.GiB)) @infinisdk_to_cinder_exceptions def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the Infinidat backend available for management. Returns a list of dictionaries, each specifying a snapshot on the Infinidat backend, with the following keys: - reference (dictionary): The reference for a snapshot, which can be passed to "manage_existing_snapshot". Each reference contains keys: Infinidat snapshot name and Infinidat snapshot serial number. - size (int): The size of the snapshot according to the Infinidat storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this snapshot is safe to manage according to the storage backend. For example, is the snapshot already managed, has clones or active mappings. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Extra information (pool name, snapshot type, QoS and metadata) to return to the user. - source_reference (string): Similar to "reference", but for the snapshot's source volume. The source reference contains two keys: Infinidat volume name and Infinidat volume serial number. :param cinder_snapshots: A list of snapshots in this host that Cinder currently manages, used to determine if a snapshot is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ manageable_snapshots = [] cinder_ids = [cinder_snapshot.id for cinder_snapshot in cinder_snapshots] infinidat_pool = self._get_infinidat_pool() infinidat_snapshots = infinidat_pool.get_volumes() for infinidat_snapshot in infinidat_snapshots: if not infinidat_snapshot.is_snapshot(): continue safe_to_manage = False reason_not_safe = None parent = infinidat_snapshot.get_parent() parent_id = parent.get_id() parent_name = parent.get_name() snapshot_id = infinidat_snapshot.get_id() snapshot_name = infinidat_snapshot.get_name() snapshot_size = infinidat_snapshot.get_size() snapshot_type = infinidat_snapshot.get_type() snapshot_pool = infinidat_snapshot.get_pool_name() snapshot_qos = infinidat_snapshot.get_qos_policy() snapshot_meta = infinidat_snapshot.get_all_metadata() cinder_id = snapshot_meta.get('cinder_id') snapshot_luns = infinidat_snapshot.get_logical_units() if cinder_id and cinder_id in cinder_ids: reason_not_safe = _('snapshot already managed') elif snapshot_luns: reason_not_safe = _('snapshot has mappings') elif infinidat_snapshot.has_children(): reason_not_safe = _('snapshot has clones') else: safe_to_manage = True reference = { 'source-name': snapshot_name, 'source-id': str(snapshot_id) } source_reference = { 'source-name': parent_name, 'source-id': str(parent_id) } extra_info = { 'pool': snapshot_pool, 'type': snapshot_type, 'qos': str(snapshot_qos), 'meta': str(snapshot_meta) } manageable_snapshot = { 'reference': reference, 'size': int(math.ceil(snapshot_size / capacity.GiB)), 'safe_to_manage': safe_to_manage, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info, 'source_reference': source_reference } manageable_snapshots.append(manageable_snapshot) return volume_utils.paginate_entries_list( manageable_snapshots, marker, limit, offset, sort_keys, sort_dirs) @infinisdk_to_cinder_exceptions def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param snapshot: Cinder volume snapshot to unmanage """ infinidat_snapshot = self._get_infinidat_snapshot(snapshot) infinidat_snapshot.clear_metadata() @infinisdk_to_cinder_exceptions def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update from Infinidat for migrated volume. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ model_update = {'_name_id': new_volume.name_id, 'provider_location': None} new_volume_name = self._make_volume_name(new_volume, migration=True) new_infinidat_volume = self._get_infinidat_volume(new_volume) self._set_cinder_object_metadata(new_infinidat_volume, volume) volume_name = self._make_volume_name(volume, migration=True) try: infinidat_volume = self._get_infinidat_volume(volume) except exception.VolumeNotFound: LOG.debug('Source volume %s not found', volume_name) else: volume_pool = infinidat_volume.get_pool_name() LOG.debug('Found source volume %s in pool %s', volume_name, volume_pool) return model_update try: new_infinidat_volume.update_name(volume_name) except infinisdk.core.exceptions.InfiniSDKException as error: LOG.error('Failed to rename destination volume %s -> %s: %s', new_volume_name, volume_name, error) return model_update return {'_name_id': None, 'provider_location': None} @infinisdk_to_cinder_exceptions def migrate_volume(self, ctxt, volume, host): """Migrate a volume within the same InfiniBox system.""" LOG.debug('Starting volume migration for volume %s to host %s', volume.name, host) if not (host and 'capabilities' in host): LOG.error('No capabilities found for host %s', host) return False, None capabilities = host['capabilities'] if not (capabilities and 'location_info' in capabilities): LOG.error('No location info found for host %s', host) return False, None location = capabilities['location_info'] try: driver, serial, pool = location.split(':') serial = int(serial) except (AttributeError, ValueError) as error: LOG.error('Invalid location info %s found for host %s: %s', location, host, error) return False, None if driver != self.__class__.__name__: LOG.debug('Unsupported storage driver %s found for host %s', driver, host) return False, None if serial != self._system.get_serial(): LOG.error('Unable to migrate volume %s to remote host %s', volume.name, host) return False, None infinidat_volume = self._get_infinidat_volume(volume) if pool == infinidat_volume.get_pool_name(): LOG.debug('Volume %s already migrated to pool %s', volume.name, pool) return True, None infinidat_pool = self._system.pools.safe_get(name=pool) if infinidat_pool is None: LOG.error('Destination pool %s not found on host %s', pool, host) return False, None infinidat_volume.move_pool(infinidat_pool) LOG.info('Migrated volume %s to pool %s', volume.name, pool) return True, None ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.367121 cinder-27.0.0/cinder/volume/drivers/infortrend/0000775000175000017500000000000000000000000021472 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/infortrend/__init__.py0000664000175000017500000000000000000000000023571 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/infortrend/infortrend_fc_cli.py0000664000175000017500000003610100000000000025516 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fibre Channel Driver for Infortrend Eonstor based on CLI. """ from oslo_log import log as logging from cinder import interface from cinder.volume import driver from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli LOG = logging.getLogger(__name__) @interface.volumedriver class InfortrendCLIFCDriver(driver.FibreChannelDriver): # ThirdPartySystems wiki page CI_WIKI_NAME = "Infortrend_Storage_CI" VERSION = common_cli.InfortrendCommon.VERSION def __init__(self, *args, **kwargs): super(InfortrendCLIFCDriver, self).__init__(*args, **kwargs) self.common = common_cli.InfortrendCommon( 'FC', configuration=self.configuration) self.VERSION = self.common.VERSION @staticmethod def get_driver_options(): """Return the oslo_config options specific to the driver.""" return common_cli.infortrend_opts def do_setup(self, context): """Any initialization the volume driver does while starting. note: This runs before check_for_setup_error """ LOG.debug('do_setup start') self.common.do_setup() def check_for_setup_error(self): LOG.debug('check_for_setup_error start') self.common.check_for_setup_error() def create_volume(self, volume): """Creates a volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ LOG.debug('create_volume volume id=%(volume_id)s', { 'volume_id': volume['id']}) return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug( 'create_volume_from_snapshot volume id=%(volume_id)s ' 'snapshot id=%(snapshot_id)s', { 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) return self.common.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" LOG.debug( 'create_cloned_volume volume id=%(volume_id)s ' 'src_vref provider_location=%(provider_location)s', { 'volume_id': volume['id'], 'provider_location': src_vref['provider_location']}) return self.common.create_cloned_volume(volume, src_vref) def extend_volume(self, volume, new_size): """Extend a volume.""" LOG.debug( 'extend_volume volume id=%(volume_id)s new size=%(size)s', { 'volume_id': volume['id'], 'size': new_size}) self.common.extend_volume(volume, new_size) def delete_volume(self, volume): """Deletes a volume.""" LOG.debug('delete_volume volume id=%(volume_id)s', { 'volume_id': volume['id']}) return self.common.delete_volume(volume) def migrate_volume(self, ctxt, volume, host): """Migrate the volume to the specified host. Returns a boolean indicating whether the migration occurred, as well as model_update. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', { 'volume_id': volume['id'], 'host': host['host']}) return self.common.migrate_volume(volume, host) def create_snapshot(self, snapshot): """Creates a snapshot.""" LOG.debug( 'create_snapshot snapshot id=%(snapshot_id)s ' 'volume id=%(volume_id)s', { 'snapshot_id': snapshot['id'], 'volume_id': snapshot['volume_id']}) return self.common.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" LOG.debug( 'delete_snapshot snapshot id=%(snapshot_id)s ' 'volume id=%(volume_id)s', { 'snapshot_id': snapshot['id'], 'volume_id': snapshot['volume_id']}) self.common.delete_snapshot(snapshot) def ensure_export(self, context, volume): """Synchronously recreates an export for a volume.""" pass def create_export(self, context, volume, connector): """Exports the volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ LOG.debug( 'create_export volume provider_location=%(provider_location)s', { 'provider_location': volume['provider_location']}) return self.common.create_export(context, volume) def remove_export(self, context, volume): """Removes an export for a volume.""" pass def initialize_connection(self, volume, connector): """Initializes the connection and returns connection information. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. The initiator_target_map is a map that represents the remote wwn(s) and a list of wwns which are visible to the remote wwn(s). Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '1234567890123', 'initiator_target_map': { '1122334455667788': ['1234567890123'] } } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], 'initiator_target_map': { '1122334455667788': ['1234567890123', '0987654321321'] } } } """ LOG.debug( 'initialize_connection volume id=%(volume_id)s ' 'connector initiator=%(initiator)s', { 'volume_id': volume['id'], 'initiator': connector['initiator']}) return self.common.initialize_connection(volume, connector) def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" LOG.debug('terminate_connection volume id=%(volume_id)s', { 'volume_id': volume['id']}) return self.common.terminate_connection(volume, connector) def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ LOG.debug('get_volume_stats refresh=%(refresh)s', { 'refresh': refresh}) return self.common.get_volume_stats(refresh) def manage_existing(self, volume, existing_ref): """Manage an existing lun in the array. The lun should be in a manageable pool backend, otherwise error would return. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. :param existing_ref: Driver-specific information used to identify a volume """ LOG.debug( 'manage_existing volume: %(volume)s ' 'existing_ref source: %(source)s', { 'volume': volume, 'source': existing_ref}) return self.common.manage_existing(volume, existing_ref) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. :param volume: Cinder volume to unmanage """ LOG.debug('unmanage volume id=%(volume_id)s', { 'volume_id': volume['id']}) self.common.unmanage(volume) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. """ LOG.debug( 'manage_existing_get_size volume: %(volume)s ' 'existing_ref source: %(source)s', { 'volume': volume, 'source': existing_ref}) return self.common.manage_existing_get_size(volume, existing_ref) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug( 'retype volume id=%(volume_id)s new_type id=%(type_id)s', { 'volume_id': volume['id'], 'type_id': new_type['id']}) return self.common.retype(ctxt, volume, new_type, diff, host) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ LOG.debug( 'update migrated volume original volume id= %(volume_id)s ' 'new volume id=%(new_volume_id)s', { 'volume_id': volume['id'], 'new_volume_id': new_volume['id']}) return self.common.update_migrated_volume(ctxt, volume, new_volume, original_volume_status) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder.""" LOG.debug( 'get_manageable_volumes CALLED ' 'cinder_volumes: %(volume)s, ' 'marker: %(mkr)s, ' 'limit: %(lmt)s, ' 'offset: %(_offset)s, ' 'sort_keys: %(s_key)s, ' 'sort_dirs: %(sort_dir)s', { 'volume': cinder_volumes, 'mkr': marker, 'lmt': limit, '_offset': offset, 's_key': sort_keys, 'sort_dir': sort_dirs } ) return self.common.get_manageable_volumes(cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot """ LOG.debug( 'manage_existing_snapshot CALLED ' 'snapshot: %(si)s, ' 'existing_ref: %(ref)s', { 'si': snapshot, 'ref': existing_ref } ) return self.common.manage_existing_snapshot(snapshot, existing_ref) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot :returns size: Volume snapshot size in GiB (integer) """ LOG.debug( 'manage_existing_snapshot_get_size CALLED ' 'snapshot: %(si)s, ' 'existing_ref: %(ref)s', { 'si': snapshot, 'ref': existing_ref } ) return self.common.manage_existing_snapshot_get_size(snapshot, existing_ref) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the backend available for management by Cinder.""" LOG.debug( 'get_manageable_volumes CALLED ' 'cinder_snapshots: %(volume)s, ' 'marker: %(mkr)s, ' 'limit: %(lmt)s, ' 'offset: %(_offset)s, ' 'sort_keys: %(s_key)s, ' 'sort_dirs: %(sort_dir)s', { 'volume': cinder_snapshots, 'mkr': marker, 'lmt': limit, '_offset': offset, 's_key': sort_keys, 'sort_dir': sort_dirs } ) return self.common.get_manageable_snapshots(cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs) def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param snapshot: Cinder volume snapshot to unmanage """ LOG.debug( 'manage_existing_snapshot_get_size CALLED ' 'snapshot: %(si)s', { 'si': snapshot } ) return self.common.unmanage_snapshot(snapshot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py0000664000175000017500000003432300000000000026244 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ iSCSI Driver for Infortrend Eonstor based on CLI. """ from oslo_log import log as logging from cinder import interface from cinder.volume import driver from cinder.volume.drivers.infortrend.raidcmd_cli import common_cli LOG = logging.getLogger(__name__) @interface.volumedriver class InfortrendCLIISCSIDriver(driver.ISCSIDriver): # ThirdPartySystems wiki page CI_WIKI_NAME = "Infortrend_Storage_CI" VERSION = common_cli.InfortrendCommon.VERSION def __init__(self, *args, **kwargs): super(InfortrendCLIISCSIDriver, self).__init__(*args, **kwargs) self.common = common_cli.InfortrendCommon( 'iSCSI', configuration=self.configuration) self.VERSION = self.common.VERSION @staticmethod def get_driver_options(): """Return the oslo_config options specific to the driver.""" return common_cli.infortrend_opts def do_setup(self, context): """Any initialization the volume driver does while starting. note: This runs before check_for_setup_error """ LOG.debug('do_setup start') self.common.do_setup() def check_for_setup_error(self): LOG.debug('check_for_setup_error start') self.common.check_for_setup_error() def create_volume(self, volume): """Creates a volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ LOG.debug('create_volume volume id=%(volume_id)s', { 'volume_id': volume['id']}) return self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug( 'create_volume_from_snapshot volume id=%(volume_id)s ' 'snapshot id=%(snapshot_id)s', { 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) return self.common.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" LOG.debug( 'create_cloned_volume volume id=%(volume_id)s ' 'src_vref provider_location=%(provider_location)s', { 'volume_id': volume['id'], 'provider_location': src_vref['provider_location']}) return self.common.create_cloned_volume(volume, src_vref) def extend_volume(self, volume, new_size): """Extend a volume.""" LOG.debug( 'extend_volume volume id=%(volume_id)s new size=%(size)s', { 'volume_id': volume['id'], 'size': new_size}) self.common.extend_volume(volume, new_size) def delete_volume(self, volume): """Deletes a volume.""" LOG.debug('delete_volume volume id=%(volume_id)s', { 'volume_id': volume['id']}) return self.common.delete_volume(volume) def migrate_volume(self, ctxt, volume, host): """Migrate the volume to the specified host. Returns a boolean indicating whether the migration occurred, as well as model_update. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('migrate_volume volume id=%(volume_id)s host=%(host)s', { 'volume_id': volume['id'], 'host': host['host']}) return self.common.migrate_volume(volume, host) def create_snapshot(self, snapshot): """Creates a snapshot.""" LOG.debug( 'create_snapshot snapshot id=%(snapshot_id)s ' 'volume_id=%(volume_id)s', { 'snapshot_id': snapshot['id'], 'volume_id': snapshot['volume_id']}) return self.common.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" LOG.debug( 'delete_snapshot snapshot id=%(snapshot_id)s ' 'volume_id=%(volume_id)s', { 'snapshot_id': snapshot['id'], 'volume_id': snapshot['volume_id']}) self.common.delete_snapshot(snapshot) def ensure_export(self, context, volume): """Synchronously recreates an export for a volume.""" pass def create_export(self, context, volume, connector): """Exports the volume. Can optionally return a Dictionary of changes to the volume object to be persisted. """ LOG.debug( 'create_export volume provider_location=%(provider_location)s', { 'provider_location': volume['provider_location']}) return self.common.create_export(context, volume) def remove_export(self, context, volume): """Removes an export for a volume.""" pass def initialize_connection(self, volume, connector): """Initializes the connection and returns connection information. The iscsi driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined in _get_iscsi_properties. Example return value:: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, } } """ LOG.debug( 'initialize_connection volume id=%(volume_id)s ' 'connector initiator=%(initiator)s', { 'volume_id': volume['id'], 'initiator': connector['initiator']}) return self.common.initialize_connection(volume, connector) def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" LOG.debug('terminate_connection volume id=%(volume_id)s', { 'volume_id': volume['id']}) self.common.terminate_connection(volume, connector) def get_volume_stats(self, refresh=False): """Get volume stats. If 'refresh' is True, run update the stats first. """ LOG.debug('get_volume_stats refresh=%(refresh)s', { 'refresh': refresh}) return self.common.get_volume_stats(refresh) def manage_existing(self, volume, existing_ref): """Manage an existing lun in the array. The lun should be in a manageable pool backend, otherwise error would return. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. :param existing_ref: Driver-specific information used to identify a volume """ LOG.debug( 'manage_existing volume: %(volume)s ' 'existing_ref source: %(source)s', { 'volume': volume, 'source': existing_ref}) return self.common.manage_existing(volume, existing_ref) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. :param volume: Cinder volume to unmanage """ LOG.debug('unmanage volume id=%(volume_id)s', { 'volume_id': volume['id']}) self.common.unmanage(volume) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. """ LOG.debug( 'manage_existing_get_size volume: %(volume)s ' 'existing_ref source: %(source)s', { 'volume': volume, 'source': existing_ref}) return self.common.manage_existing_get_size(volume, existing_ref) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug( 'retype volume id=%(volume_id)s new_type id=%(type_id)s', { 'volume_id': volume['id'], 'type_id': new_type['id']}) return self.common.retype(ctxt, volume, new_type, diff, host) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume. :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ LOG.debug( 'update migrated volume original volume id= %(volume_id)s ' 'new volume id=%(new_volume_id)s', { 'volume_id': volume['id'], 'new_volume_id': new_volume['id']}) return self.common.update_migrated_volume(ctxt, volume, new_volume, original_volume_status) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder.""" LOG.debug( 'get_manageable_volumes CALLED ' 'cinder_volumes: %(volume)s, ' 'marker: %(mkr)s, ' 'limit: %(lmt)s, ' 'offset: %(_offset)s, ' 'sort_keys: %(s_key)s, ' 'sort_dirs: %(sort_dir)s', { 'volume': cinder_volumes, 'mkr': marker, 'lmt': limit, '_offset': offset, 's_key': sort_keys, 'sort_dir': sort_dirs } ) return self.common.get_manageable_volumes(cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot """ LOG.debug( 'manage_existing_snapshot CALLED ' 'snapshot: %(si)s, ' 'existing_ref: %(ref)s', { 'si': snapshot, 'ref': existing_ref } ) return self.common.manage_existing_snapshot(snapshot, existing_ref) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot :returns size: Volume snapshot size in GiB (integer) """ LOG.debug( 'manage_existing_snapshot_get_size CALLED ' 'snapshot: %(si)s, ' 'existing_ref: %(ref)s', { 'si': snapshot, 'ref': existing_ref } ) return self.common.manage_existing_snapshot_get_size(snapshot, existing_ref) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the backend available for management by Cinder.""" LOG.debug( 'get_manageable_volumes CALLED ' 'cinder_snapshots: %(volume)s, ' 'marker: %(mkr)s, ' 'limit: %(lmt)s, ' 'offset: %(_offset)s, ' 'sort_keys: %(s_key)s, ' 'sort_dirs: %(sort_dir)s', { 'volume': cinder_snapshots, 'mkr': marker, 'lmt': limit, '_offset': offset, 's_key': sort_keys, 'sort_dir': sort_dirs } ) return self.common.get_manageable_snapshots(cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs) def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param snapshot: Cinder volume snapshot to unmanage """ LOG.debug( 'manage_existing_snapshot_get_size CALLED ' 'snapshot: %(si)s', { 'si': snapshot } ) return self.common.unmanage_snapshot(snapshot) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.371121 cinder-27.0.0/cinder/volume/drivers/infortrend/raidcmd_cli/0000775000175000017500000000000000000000000023724 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/infortrend/raidcmd_cli/__init__.py0000664000175000017500000000000000000000000026023 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py0000664000175000017500000005361700000000000026610 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Infortrend basic CLI factory. """ import abc import os import time from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import strutils from cinder import utils LOG = logging.getLogger(__name__) DEFAULT_RETRY_TIME = 5 def retry_cli(func): def inner(self, *args, **kwargs): total_retry_time = self.cli_retry_time if total_retry_time is None: total_retry_time = DEFAULT_RETRY_TIME retry_time = 0 while retry_time < total_retry_time: rc, out = func(self, *args, **kwargs) retry_time += 1 if rc == 0: break LOG.error( 'Retry %(retry)s times: %(method)s Failed ' '%(rc)s: %(reason)s', { 'retry': retry_time, 'method': self.__class__.__name__, 'rc': rc, 'reason': out}) # show error log, not retrying if rc == 1: # RAID return fail break elif rc == 11: # rc == 11 means not exist break elif rc == 20: # rc == 20 means already exist break LOG.debug( 'Method: %(method)s Return Code: %(rc)s ' 'Output: %(out)s', { 'method': self.__class__.__name__, 'rc': rc, 'out': out}) return rc, out return inner def os_execute(fd, raidcmd_timeout, command_line): os.write(fd, command_line.encode('utf-8')) return os_read(fd, 8192, 'RAIDCmd:>', raidcmd_timeout) def os_read(fd, buffer_size, cmd_pattern, raidcmd_timeout): content = '' start_time = int(time.time()) while True: time.sleep(0.5) output = os.read(fd, buffer_size) if len(output) > 0: content += output.decode('utf-8') if content.find(cmd_pattern) >= 0: break if int(time.time()) - start_time > raidcmd_timeout: content = 'Raidcmd timeout: %s' % content LOG.error( 'Raidcmd exceeds cli timeout [%(timeout)s]s.', { 'timeout': raidcmd_timeout}) break return content def strip_empty_in_list(list): result = [] for entry in list: entry = entry.strip() if entry != "": result.append(entry) return result def table_to_dict(table): tableHeader = table[0].split(" ") tableHeaderList = strip_empty_in_list(tableHeader) result = [] for i in range(len(table) - 2): if table[i + 2].strip() == "": break resultEntry = {} tableEntry = table[i + 2].split(" ") tableEntryList = strip_empty_in_list(tableEntry) for key, value in zip(tableHeaderList, tableEntryList): resultEntry[key] = value result.append(resultEntry) return result def content_lines_to_dict(content_lines): result = [] resultEntry = {} for content_line in content_lines: if content_line.strip() == "": result.append(resultEntry) resultEntry = {} continue split_entry = content_line.strip().split(": ", 1) resultEntry[split_entry[0]] = split_entry[1] return result class BaseCommand(object, metaclass=abc.ABCMeta): """The BaseCommand abstract class.""" def __init__(self): super(BaseCommand, self).__init__() @abc.abstractmethod def execute(self, *args, **kwargs): pass class ShellCommand(BaseCommand): """The Common ShellCommand.""" def __init__(self, cli_conf): super(ShellCommand, self).__init__() self.cli_retry_time = cli_conf.get('cli_retry_time') @retry_cli def execute(self, *args, **kwargs): commands = ' '.join(args) result = None rc = 0 try: result, err = utils.execute(commands, shell=True) except processutils.ProcessExecutionError as pe: rc = pe.exit_code result = pe.stdout result = result.replace('\n', '\\n') LOG.error( 'Error on execute command. ' 'Error code: %(exit_code)d Error msg: %(result)s', { 'exit_code': pe.exit_code, 'result': result}) return rc, result class ExecuteCommand(BaseCommand): """The Cinder Filter Command.""" def __init__(self, cli_conf): super(ExecuteCommand, self).__init__() self.cli_retry_time = cli_conf.get('cli_retry_time') @retry_cli def execute(self, *args, **kwargs): result = None rc = 0 try: result, err = utils.execute(*args, **kwargs) except processutils.ProcessExecutionError as pe: rc = pe.exit_code result = pe.stdout result = result.replace('\n', '\\n') LOG.error( 'Error on execute command. ' 'Error code: %(exit_code)d Error msg: %(result)s', { 'exit_code': pe.exit_code, 'result': result}) return rc, result class CLIBaseCommand(BaseCommand): """The CLIBaseCommand class.""" def __init__(self, cli_conf): super(CLIBaseCommand, self).__init__() self.cli_retry_time = cli_conf.get('cli_retry_time') self.raidcmd_timeout = cli_conf.get('raidcmd_timeout') self.cli_cache = cli_conf.get('cli_cache') self.pid = cli_conf.get('pid') self.fd = cli_conf.get('fd') self.command = "" self.parameters = () self.show_noinit = "" self.command_line = "" def _generate_command(self, parameters): """Generate execute Command. use java, execute, command, parameters.""" self.parameters = parameters parameters_line = ' '.join(parameters) self.command_line = "{0} {1} {2}\n".format( self.command, parameters_line, self.show_noinit) return self.command_line def _parser(self, content=None): """The parser to parse command result. :param content: The parse Content :returns: parse result """ content = content.replace("\r", "") content = content.replace("\\/-", "") content = content.strip() LOG.debug(content) if content is not None: content_lines = content.split("\n") rc, out = self._parse_return(content_lines) if rc != 0: return rc, out else: return rc, content_lines return -1, None @retry_cli def execute(self, *args, **kwargs): command_line = self._generate_command(args) LOG.debug('Executing: %(command)s', { 'command': strutils.mask_password(command_line)}) rc = 0 result = None try: content = self._execute(command_line) rc, result = self._parser(content) except processutils.ProcessExecutionError as pe: rc = -2 # prevent confusing with cli real rc result = pe.stdout result = result.replace('\n', '\\n') LOG.error( 'Error on execute %(command)s. ' 'Error code: %(exit_code)d Error msg: %(result)s', { 'command': strutils.mask_password(command_line), 'exit_code': pe.exit_code, 'result': result}) return rc, result def _execute(self, command_line): return os_execute( self.fd, self.raidcmd_timeout, command_line) def _parse_return(self, content_lines): """Get the end of command line result.""" rc = 0 if 'Raidcmd timeout' in content_lines[0]: rc = -3 return_cli_result = content_lines elif len(content_lines) < 4: rc = -4 return_cli_result = 'Raidcmd output error: %s' % content_lines else: return_value = content_lines[-3].strip().split(' ', 1)[1] return_cli_result = content_lines[-4].strip().split(' ', 1)[1] rc = int(return_value, 16) return rc, return_cli_result class ConnectRaid(CLIBaseCommand): """The Connect Raid Command.""" def __init__(self, *args, **kwargs): super(ConnectRaid, self).__init__(*args, **kwargs) self.command = "connect" class CheckConnection(CLIBaseCommand): """The Check Connection Command.""" def __init__(self, *args, **kwargs): super(CheckConnection, self).__init__(*args, **kwargs) self.command = "lock" class InitCache(CLIBaseCommand): """Refresh cacahe data for update volume status.""" def __init__(self, *args, **kwargs): super(InitCache, self).__init__(*args, **kwargs) self.command = "utility init-cache" class CreateLD(CLIBaseCommand): """The Create LD Command.""" def __init__(self, *args, **kwargs): super(CreateLD, self).__init__(*args, **kwargs) self.command = "create ld" class CreateLV(CLIBaseCommand): """The Create LV Command.""" def __init__(self, *args, **kwargs): super(CreateLV, self).__init__(*args, **kwargs) self.command = "create lv" class CreatePartition(CLIBaseCommand): """Create Partition. create part [LV-ID] [name] [size={partition-size}] [min={minimal-reserve-size}] [init={switch}] [tier={tier-level-list}] """ def __init__(self, *args, **kwargs): super(CreatePartition, self).__init__(*args, **kwargs) self.command = "create part" class DeletePartition(CLIBaseCommand): """Delete Partition. delete part [partition-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeletePartition, self).__init__(*args, **kwargs) self.command = "delete part" class SetPartition(CLIBaseCommand): """Set Partition. set part [partition-ID] [name={partition-name}] [min={minimal-reserve-size}] set part expand [partition-ID] [size={expand-size}] set part purge [partition-ID] [number] [rule-type] set part reclaim [partition-ID] set part tier-resided [partition-ID] tier={tier-level-list} """ def __init__(self, *args, **kwargs): super(SetPartition, self).__init__(*args, **kwargs) self.command = "set part" class SetLV(CLIBaseCommand): """Set Logical Volume. set lv tier-migrate [LV-ID] [part={partition-IDs}] """ def __init__(self, *args, **kwargs): super(SetLV, self).__init__(*args, **kwargs) self.command = "set lv" class SetSnapshot(CLIBaseCommand): """Set Logical Volume. set lv tier-migrate [LV-ID] [part={partition-IDs}] """ def __init__(self, *args, **kwargs): super(SetSnapshot, self).__init__(*args, **kwargs) self.command = "set si" class CreateMap(CLIBaseCommand): """Map the Partition on the channel. create map [part] [partition-ID] [Channel-ID] [Target-ID] [LUN-ID] [assign={assign-to}] """ def __init__(self, *args, **kwargs): super(CreateMap, self).__init__(*args, **kwargs) self.command = "create map" class DeleteMap(CLIBaseCommand): """Unmap the Partition on the channel. delete map [part] [partition-ID] [Channel-ID] [Target-ID] [LUN-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeleteMap, self).__init__(*args, **kwargs) self.command = "delete map" class CreateSnapshot(CLIBaseCommand): """Create partition's Snapshot. create si [part] [partition-ID] """ def __init__(self, *args, **kwargs): super(CreateSnapshot, self).__init__(*args, **kwargs) self.command = "create si" class DeleteSnapshot(CLIBaseCommand): """Delete partition's Snapshot. delete si [snapshot-image-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeleteSnapshot, self).__init__(*args, **kwargs) self.command = "delete si" class CreateReplica(CLIBaseCommand): """Create partition or snapshot's replica. create replica [name] [part | si] [source-volume-ID] [part] [target-volume-ID] [type={replication-mode}] [priority={level}] [desc={description}] [incremental={switch}] [timeout={value}] [compression={switch}] """ def __init__(self, *args, **kwargs): super(CreateReplica, self).__init__(*args, **kwargs) self.command = "create replica" class DeleteReplica(CLIBaseCommand): """Delete and terminate specific replication job. delete replica [volume-pair-ID] [-y] """ def __init__(self, *args, **kwargs): super(DeleteReplica, self).__init__(*args, **kwargs) self.command = "delete replica" class CreateIQN(CLIBaseCommand): """Create host iqn for CHAP or lun filter. create iqn [IQN] [IQN-alias-name] [user={username}] [password={secret}] [target={name}] [target-password={secret}] [ip={ip-address}] [mask={netmask-ip}] """ def __init__(self, *args, **kwargs): super(CreateIQN, self).__init__(*args, **kwargs) self.command = "create iqn" class DeleteIQN(CLIBaseCommand): """Delete host iqn by name. delete iqn [name] """ def __init__(self, *args, **kwargs): super(DeleteIQN, self).__init__(*args, **kwargs) self.command = "delete iqn" class SetIOTimeout(CLIBaseCommand): """Set CLI IO timeout. utility set io-timeout [time] """ def __init__(self, *args, **kwargs): super(SetIOTimeout, self).__init__(*args, **kwargs) self.command = "utility set io-timeout" class ShowCommand(CLIBaseCommand): """Basic Show Command.""" def __init__(self, *args, **kwargs): super(ShowCommand, self).__init__(*args, **kwargs) self.param_detail = "-l" self.default_type = "table" self.start_key = "" if self.cli_cache: self.show_noinit = "-noinit" def _parser(self, content=None): """Parse Table or Detail format into dict. # Table format ID Name LD-amount ---------------------- 123 LV-1 1 # Result { 'ID': '123', 'Name': 'LV-1', 'LD-amount': '1' } # Detail format ID: 5DE94FF775D81C30 Name: LV-1 LD-amount: 1 # Result { 'ID': '123', 'Name': 'LV-1', 'LD-amount': '1' } :param content: The parse Content. :returns: parse result """ rc, out = super(ShowCommand, self)._parser(content) # Error. if rc != 0: return rc, out # No content. if len(out) < 6: return rc, [] detect_type = self.detect_type() # Show detail content. if detect_type == "list": start_id = self.detect_detail_start_index(out) if start_id < 0: return rc, [] result = content_lines_to_dict(out[start_id:-3]) else: start_id = self.detect_table_start_index(out) if start_id < 0: return rc, [] result = table_to_dict(out[start_id:-4]) return rc, result def detect_type(self): if self.param_detail in self.parameters: detect_type = "list" else: detect_type = self.default_type return detect_type def detect_table_start_index(self, content): for i in range(1, len(content)): key = content[i].strip().split(' ') if self.start_key in key[0].strip(): return i return -1 def detect_detail_start_index(self, content): for i in range(1, len(content)): split_entry = content[i].strip().split(' ') if len(split_entry) >= 2 and ':' in split_entry[0]: return i return -1 class ShowLD(ShowCommand): """Show LD. show ld [index-list] """ def __init__(self, *args, **kwargs): super(ShowLD, self).__init__(*args, **kwargs) self.command = "show ld" class ShowLV(ShowCommand): """Show LV. show lv [lv={LV-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowLV, self).__init__(*args, **kwargs) self.command = "show lv" self.start_key = "ID" self.show_noinit = "" def detect_table_start_index(self, content): if "tier" in self.parameters: self.start_key = "LV-Name" for i in range(1, len(content)): key = content[i].strip().split(' ') if self.start_key in key[0].strip(): return i return -1 class ShowPartition(ShowCommand): """Show Partition. show part [part={partition-IDs} | lv={LV-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowPartition, self).__init__(*args, **kwargs) self.command = "show part" self.start_key = "ID" self.show_noinit = "" class ShowSnapshot(ShowCommand): """Show Snapshot. show si [si={snapshot-image-IDs} | part={partition-IDs} | lv={LV-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowSnapshot, self).__init__(*args, **kwargs) self.command = "show si" self.start_key = "Index" class ShowDevice(ShowCommand): """Show Device. show device """ def __init__(self, *args, **kwargs): super(ShowDevice, self).__init__(*args, **kwargs) self.command = "show device" self.start_key = "Index" class ShowChannel(ShowCommand): """Show Channel. show channel """ def __init__(self, *args, **kwargs): super(ShowChannel, self).__init__(*args, **kwargs) self.command = "show channel" self.start_key = "Ch" class ShowDisk(ShowCommand): """The Show Disk Command. show disk [disk-index-list | channel={ch}] """ def __init__(self, *args, **kwargs): super(ShowDisk, self).__init__(*args, **kwargs) self.command = "show disk" class ShowMap(ShowCommand): """Show Map. show map [part={partition-IDs} | channel={channel-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowMap, self).__init__(*args, **kwargs) self.command = "show map" self.start_key = "Ch" class ShowNet(ShowCommand): """Show IP network. show net [id={channel-IDs}] [-l] """ def __init__(self, *args, **kwargs): super(ShowNet, self).__init__(*args, **kwargs) self.command = "show net" self.start_key = "ID" class ShowLicense(ShowCommand): """Show License. show license """ def __init__(self, *args, **kwargs): super(ShowLicense, self).__init__(*args, **kwargs) self.command = "show license" self.start_key = "License" def _parser(self, content=None): """Parse License format. # License format License Amount(Partition/Subsystem) Expired ------------------------------------------------ EonPath --- True # Result { 'EonPath': { 'Amount': '---', 'Support': True } } :param content: The parse Content. :returns: parse result """ rc, out = super(ShowLicense, self)._parser(content) if rc != 0: return rc, out if len(out) > 0: result = {} for entry in out: if entry['Expired'] == '---' or entry['Expired'] == 'Expired': support = False else: support = True result[entry['License']] = { 'Amount': entry['Amount(Partition/Subsystem)'], 'Support': support } return rc, result return rc, [] class ShowReplica(ShowCommand): """Show information of all replication jobs or specific job. show replica [id={volume-pair-IDs}] [-l] id={volume-pair-IDs} """ def __init__(self, *args, **kwargs): super(ShowReplica, self).__init__(*args, **kwargs) self.command = 'show replica' self.show_noinit = "" class ShowWWN(ShowCommand): """Show Fibre network. show wwn """ def __init__(self, *args, **kwargs): super(ShowWWN, self).__init__(*args, **kwargs) self.command = "show wwn" self.start_key = "CH" class ShowIQN(ShowCommand): """Show iSCSI initiator IQN which is set by create iqn. show iqn """ LIST_START_LINE = "List of initiator IQN(s):" def __init__(self, *args, **kwargs): super(ShowIQN, self).__init__(*args, **kwargs) self.command = "show iqn" self.default_type = "list" def detect_detail_start_index(self, content): for i in range(1, len(content)): if content[i].strip() == self.LIST_START_LINE: return i + 2 return -1 class ShowHost(ShowCommand): """Show host settings. show host """ def __init__(self, *args, **kwargs): super(ShowHost, self).__init__(*args, **kwargs) self.command = "show host" self.default_type = "list" def detect_detail_start_index(self, content): for i in range(1, len(content)): if ':' in content[i]: return i return -1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py0000664000175000017500000031023600000000000026422 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Infortrend Technology, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Infortrend Common CLI. """ import math import os import time from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import timeutils from oslo_utils import units from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.infortrend.raidcmd_cli import cli_factory as cli from cinder.volume.drivers.san import san from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) infortrend_opts = [ cfg.ListOpt('infortrend_pools_name', default='', help='The Infortrend logical volumes name list. ' 'It is separated with comma.'), cfg.StrOpt('infortrend_cli_path', default='/opt/bin/Infortrend/raidcmd_ESDS10.jar', help='The Infortrend CLI absolute path.'), cfg.IntOpt('infortrend_cli_max_retries', default=5, help='The maximum retry times if a command fails.'), cfg.IntOpt('infortrend_cli_timeout', default=60, help='The timeout for CLI in seconds.'), cfg.ListOpt('infortrend_slots_a_channels_id', default='', help='Infortrend raid channel ID list on Slot A ' 'for OpenStack usage. It is separated with comma.'), cfg.ListOpt('infortrend_slots_b_channels_id', default='', help='Infortrend raid channel ID list on Slot B ' 'for OpenStack usage. It is separated with comma.'), cfg.StrOpt('infortrend_iqn_prefix', default='iqn.2002-10.com.infortrend', help='Infortrend iqn prefix for iSCSI.'), cfg.BoolOpt('infortrend_cli_cache', default=False, help='The Infortrend CLI cache. ' 'While set True, the RAID status report will use cache ' 'stored in the CLI. Never enable this unless the RAID is ' 'managed only by Openstack and only by one infortrend ' 'cinder-volume backend. Otherwise, CLI might report ' 'out-dated status to cinder and thus there might be some ' 'race condition among all backend/CLIs.'), cfg.StrOpt('java_path', default='/usr/bin/java', help='The Java absolute path.'), ] CONF = cfg.CONF CONF.register_opts(infortrend_opts) CLI_RC_FILTER = { 'CreatePartition': {'error': _('Failed to create partition.')}, 'DeletePartition': {'error': _('Failed to delete partition.')}, 'SetPartition': {'error': _('Failed to set partition.')}, 'CreateMap': { 'warning': { 1: 'RAID return Fail. Might be LUN conflict.', 20: 'The MCS Channel is grouped. / LUN Already Used.'}, 'error': _('Failed to create map.'), }, 'DeleteMap': { 'warning': {11: 'No mapping.'}, 'error': _('Failed to delete map.'), }, 'CreateSnapshot': {'error': _('Failed to create snapshot.')}, 'DeleteSnapshot': { 'warning': {11: 'No such snapshot exist.'}, 'error': _('Failed to delete snapshot.') }, 'CreateReplica': {'error': _('Failed to create replica.')}, 'DeleteReplica': {'error': _('Failed to delete replica.')}, 'CreateIQN': { 'warning': {20: 'IQN already existed.'}, 'error': _('Failed to create iqn.'), }, 'DeleteIQN': { 'warning': { 20: 'IQN has been used to create map.', 11: 'No such host alias name.', }, 'error': _('Failed to delete iqn.'), }, 'ShowLV': {'error': _('Failed to get lv info.')}, 'ShowPartition': {'error': _('Failed to get partition info.')}, 'ShowSnapshot': {'error': _('Failed to get snapshot info.')}, 'ShowDevice': {'error': _('Failed to get device info.')}, 'ShowChannel': {'error': _('Failed to get channel info.')}, 'ShowMap': {'error': _('Failed to get map info.')}, 'ShowNet': {'error': _('Failed to get network info.')}, 'ShowLicense': {'error': _('Failed to get license info.')}, 'ShowReplica': {'error': _('Failed to get replica info.')}, 'ShowWWN': {'error': _('Failed to get wwn info.')}, 'ShowIQN': {'error': _('Failed to get iqn info.')}, 'ShowHost': {'error': _('Failed to get host info.')}, 'SetIOTimeout': {'error': _('Failed to set IO timeout.')}, 'ConnectRaid': {'error': _('Failed to connect to raid.')}, 'InitCache': { 'warning': {9: 'Device not connected.'}, 'error': _('Failed to init cache.')}, 'ExecuteCommand': {'error': _('Failed to execute common command.')}, 'ShellCommand': {'error': _('Failed to execute shell command.')}, } def log_func(func): def inner(self, *args, **kwargs): LOG.debug('Entering: %(method)s', {'method': func.__name__}) start = timeutils.utcnow() ret = func(self, *args, **kwargs) end = timeutils.utcnow() LOG.debug( 'Leaving: %(method)s, ' 'Spent: %(time)s sec, ' 'Return: %(ret)s.', { 'method': func.__name__, 'time': timeutils.delta_seconds(start, end), 'ret': ret}) return ret return inner def mi_to_gi(mi_size): return mi_size * units.Mi / units.Gi def gi_to_mi(gi_size): return gi_size * units.Gi / units.Mi def ti_to_gi(ti_size): return ti_size * units.Ti / units.Gi def ti_to_mi(ti_size): return ti_size * units.Ti / units.Mi class InfortrendCliException(exception.CinderException): message = _("Infortrend CLI exception: %(err)s Param: %(param)s " "(Return Code: %(rc)s) (Output: %(out)s)") class InfortrendCommon(object): """The Infortrend's Common Command using CLI. Version history: .. code-block:: none 1.0.0 - Initial driver 1.0.1 - Support DS4000 1.0.2 - Support GS/GSe Family 1.0.3 - Support MPIO for iSCSI protocol 1.0.4 - Fix Nova live migration (bug #1481968) 1.1.0 - Improve driver performance 1.1.1 - Fix creating volume on a wrong pool Fix manage-existing volume issue 1.1.2 - Add volume migration check 2.0.0 - Enhance extraspecs usage and refactor retype 2.0.1 - Improve speed for deleting volume 2.0.2 - Remove timeout for replication 2.0.3 - Use full ID for volume name 2.1.0 - Support for list manageable volume Support for list/manage/unmanage snapshot Remove unnecessary check in snapshot 2.1.1 - Add Lun ID overflow check 2.1.2 - Support for force detach volume 2.1.3 - Add handling for LUN ID conflict for Active/Active cinder Improve speed for attach/detach/polling commands 2.1.4 - Check CLI connection first for polling process """ VERSION = '2.1.4' constants = { 'ISCSI_PORT': 3260, 'MAX_LUN_MAP_PER_CHL': 128, } PROVISIONING_KEY = 'infortrend:provisioning' TIERING_SET_KEY = 'infortrend:tiering' PROVISIONING_VALUES = ['thin', 'full'] TIERING_VALUES = [0, 1, 2, 3] def __init__(self, protocol, configuration=None): self.protocol = protocol self.configuration = configuration self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(infortrend_opts) self.path = self.configuration.infortrend_cli_path self.password = self.configuration.san_password self.ip = self.configuration.san_ip self.cli_retry_time = self.configuration.infortrend_cli_max_retries self.cli_timeout = self.configuration.infortrend_cli_timeout self.cli_cache = self.configuration.infortrend_cli_cache self.iqn_prefix = self.configuration.infortrend_iqn_prefix self.iqn = self.iqn_prefix + ':raid.uid%s.%s%s%s' self.unmanaged_prefix = 'cinder-unmanaged-%s' self.java_path = self.configuration.java_path self.fc_lookup_service = fczm_utils.create_lookup_service() self.backend_name = None self._volume_stats = None self.system_id = None self.pid = None self.fd = None self._model_type = 'R' self.map_dict = { 'slot_a': {}, 'slot_b': {}, } self.map_dict_init = False self.target_dict = { 'slot_a': {}, 'slot_b': {}, } if self.protocol == 'iSCSI': self.mcs_dict = { 'slot_a': {}, 'slot_b': {}, } self.tier_pools_dict = {} def check_for_setup_error(self): # These two checks needs raidcmd to be ready self._check_pools_setup() self._check_host_setup() def do_setup(self): if self.ip == '': msg = _('san_ip is not set.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) if self.cli_timeout < 40: msg = _('infortrend_cli_timeout should be larger than 40.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) self._init_pool_dict() self._init_channel_list() self._init_raidcmd() self.cli_conf = { 'path': self.path, 'cli_retry_time': self.cli_retry_time, 'raidcmd_timeout': self.cli_timeout, 'cli_cache': self.cli_cache, 'pid': self.pid, 'fd': self.fd, } self._init_raid_connection() self._set_raidcmd() def _init_pool_dict(self): self.pool_dict = {} pools_name = self.configuration.infortrend_pools_name if pools_name == '': msg = _('Pools name is not set.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) tmp_pool_list = pools_name for pool in tmp_pool_list: self.pool_dict[pool.strip()] = '' def _init_channel_list(self): self.channel_list = { 'slot_a': [], 'slot_b': [], } tmp_channel_list = ( self.configuration.infortrend_slots_a_channels_id ) self.channel_list['slot_a'] = ( [str(channel) for channel in tmp_channel_list] ) tmp_channel_list = ( self.configuration.infortrend_slots_b_channels_id ) self.channel_list['slot_b'] = ( [str(channel) for channel in tmp_channel_list] ) def _init_raidcmd(self): if not self.pid: self.pid, self.fd = os.forkpty() if self.pid == 0: try: os.execv(self.java_path, [self.java_path, '-jar', self.path]) except OSError: msg = _('Raidcmd failed to start. ' 'Please check Java is installed.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) check_java_start = cli.os_read(self.fd, 1024, 'RAIDCmd:>', 10) if 'Raidcmd timeout' in check_java_start: msg = _('Raidcmd failed to start. ' 'Please check Java is installed.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) LOG.debug('Raidcmd [%s:%s] start!', self.pid, self.fd) def _set_raidcmd(self): cli_io_timeout = str(self.cli_timeout - 10) rc, _ = self._execute('SetIOTimeout', cli_io_timeout) LOG.debug('CLI IO timeout is [%s]', cli_io_timeout) def _init_raid_connection(self): raid_password = '' if self.password: raid_password = 'password=%s' % self.password rc, _ = self._execute('ConnectRaid', self.ip, raid_password, '-notiOn') LOG.info('Raid [%s] is connected!', self.ip) def _execute_command(self, cli_type, *args, **kwargs): command = getattr(cli, cli_type) return command(self.cli_conf).execute(*args, **kwargs) def _execute(self, cli_type, *args, **kwargs): LOG.debug('Executing command type: %(type)s.', {'type': cli_type}) @lockutils.synchronized('raidcmd-%s' % self.pid, 'infortrend-', False) def _lock_raidcmd(cli_type, *args, **kwargs): return self._execute_command(cli_type, *args, **kwargs) rc, out = _lock_raidcmd(cli_type, *args, **kwargs) if rc != 0: if cli_type == 'CheckConnection': return rc, out elif ('warning' in CLI_RC_FILTER[cli_type] and rc in CLI_RC_FILTER[cli_type]['warning']): LOG.warning(CLI_RC_FILTER[cli_type]['warning'][rc]) else: msg = CLI_RC_FILTER[cli_type]['error'] LOG.error(msg) raise InfortrendCliException( err=msg, param=args, rc=rc, out=out) return rc, out @log_func def _init_map_info(self): if not self.map_dict_init: rc, channel_info = self._execute('ShowChannel') if 'BID' in channel_info[0]: self._model_type = 'R' self._set_channel_id(channel_info, 'slot_b') else: self._model_type = 'G' self._set_channel_id(channel_info, 'slot_a') self.map_dict_init = True for controller in sorted(self.map_dict.keys()): LOG.debug('Controller: [%(controller)s] ' 'enable channels: %(ch)s', { 'controller': controller, 'ch': sorted(self.map_dict[controller].keys())}) @log_func def _update_map_info(self, multipath=False): """Record the driver mapping information. map_dict = { 'slot_a': { '0': [1, 2, 3, 4] # Slot A Channel 0 map lun 1, 2, 3, 4 }, 'slot_b' : { '1': [0, 1, 3] # Slot B Channel 1 map lun 0, 1, 3 } } """ rc, map_info = self._execute('ShowMap') self._update_map_info_by_slot(map_info, 'slot_a') if multipath and self._model_type == 'R': self._update_map_info_by_slot(map_info, 'slot_b') return map_info @log_func def _update_map_info_by_slot(self, map_info, slot_key): for key, value in self.map_dict[slot_key].items(): self.map_dict[slot_key][key] = list( range(self.constants['MAX_LUN_MAP_PER_CHL'])) if len(map_info) > 0 and isinstance(map_info, list): for entry in map_info: ch = entry['Ch'] lun = entry['LUN'] if ch not in self.map_dict[slot_key].keys(): continue target_id = self.target_dict[slot_key][ch] if (entry['Target'] == target_id and int(lun) in self.map_dict[slot_key][ch]): self.map_dict[slot_key][ch].remove(int(lun)) def _check_initiator_has_lun_map(self, initiator_info): rc, map_info = self._execute('ShowMap') if not isinstance(initiator_info, list): initiator_info = (initiator_info,) if len(map_info) > 0: for initiator_name in initiator_info: for entry in map_info: if initiator_name.lower() == entry['Host-ID'].lower(): return True return False @log_func def _set_channel_id( self, channel_info, controller): if self.protocol == 'iSCSI': check_channel_type = ('NETWORK', 'LAN') else: check_channel_type = ('FIBRE', 'Fibre') for entry in channel_info: if entry['Type'] in check_channel_type: if entry['Ch'] in self.channel_list[controller]: self.map_dict[controller][entry['Ch']] = [] if self.protocol == 'iSCSI': self._update_mcs_dict( entry['Ch'], entry['MCS'], controller) self._update_target_dict(entry, controller) # check the channel status if entry['curClock'] == '---': LOG.warning( 'Controller[%(controller)s] ' 'Channel[%(Ch)s] not linked, please check.', { 'controller': controller, 'Ch': entry['Ch']}) @log_func def _update_target_dict(self, channel, controller): """Record the target id for mapping. # R model target_dict = { 'slot_a': { '0': '0', '1': '0', }, 'slot_b': { '0': '1', '1': '1', }, } # G model target_dict = { 'slot_a': { '2': '32', '3': '112', } } """ if self._model_type == 'G': self.target_dict[controller][channel['Ch']] = channel['ID'] else: if controller == 'slot_a': self.target_dict[controller][channel['Ch']] = channel['AID'] else: self.target_dict[controller][channel['Ch']] = channel['BID'] def _update_mcs_dict(self, channel_id, mcs_id, controller): """Record the iSCSI MCS topology. # R model with mcs, but it not working with iSCSI multipath mcs_dict = { 'slot_a': { '0': ['0', '1'], '2': ['2'], '3': ['3'], }, 'slot_b': { '0': ['0', '1'], '2': ['2'] } } # G model with mcs mcs_dict = { 'slot_a': { '0': ['0', '1'], '1': ['2'] }, 'slot_b': {} } """ if mcs_id not in self.mcs_dict[controller]: self.mcs_dict[controller][mcs_id] = [] self.mcs_dict[controller][mcs_id].append(channel_id) def _check_pools_setup(self): temp_pool_dict = self.pool_dict.copy() rc, lv_info = self._execute('ShowLV') for lv in lv_info: if lv['Name'] in temp_pool_dict.keys(): del temp_pool_dict[lv['Name']] self.pool_dict[lv['Name']] = lv['ID'] if len(temp_pool_dict) == 0: break if len(temp_pool_dict) != 0: msg = _('Please create %(pool_list)s pool in advance!') % { 'pool_list': list(temp_pool_dict.keys())} LOG.error(msg) raise exception.VolumeDriverException(message=msg) def _check_host_setup(self): rc, host_info = self._execute('ShowHost') max_lun = int(host_info[0]['Max LUN per ID']) device_type = host_info[0]['Peripheral device type'] if 'No Device Present' not in device_type: msg = _('Please set to ' ' in advance!') LOG.error(msg) raise exception.VolumeDriverException(message=msg) self.constants['MAX_LUN_MAP_PER_CHL'] = max_lun system_id = self._get_system_id(self.ip) LOG.info('Device: [%(device)s] ' 'max LUN setting is: [%(luns)s]', { 'device': system_id, 'luns': self.constants['MAX_LUN_MAP_PER_CHL']}) def create_volume(self, volume): """Create a Infortrend partition.""" self._create_partition_by_default(volume) part_id = self._get_part_id(volume['id']) system_id = self._get_system_id(self.ip) model_dict = { 'system_id': system_id, 'partition_id': part_id, } model_update = { "provider_location": self._concat_provider_location(model_dict), } LOG.info('Create Volume %(volume_id)s completed.', { 'volume_id': volume['id']}) return model_update def _create_partition_by_default(self, volume): pool_id = self._get_volume_pool_id(volume) self._create_partition_with_pool(volume, pool_id) def _create_partition_with_pool( self, volume, pool_id, extraspecs=None): volume_size = gi_to_mi(volume['size']) pool_name = volume['host'].split('#')[-1] if extraspecs: extraspecs = self._get_extraspecs_set(extraspecs) else: extraspecs = self._get_volume_type_extraspecs(volume) pool_extraspecs = self._get_pool_extraspecs(pool_name, extraspecs) provisioning = pool_extraspecs['provisioning'] tiering = pool_extraspecs['tiering'] extraspecs_dict = {} # Normal pool if pool_id not in self.tier_pools_dict.keys(): if provisioning == 'thin': extraspecs_dict['provisioning'] = int(volume_size * 0.2) extraspecs_dict['init'] = 'disable' # Tier pool else: pool_tiers = self.tier_pools_dict[pool_id] if tiering == 'all': # thin provisioning reside on all tiers if provisioning == 'thin': extraspecs_dict['provisioning'] = 0 tiering_set = ','.join(str(i) for i in pool_tiers) extraspecs_dict['tiering'] = tiering_set extraspecs_dict['init'] = 'disable' # full provisioning reside on the top tier else: top_tier = self.tier_pools_dict.get(pool_id)[0] self._check_tier_space(top_tier, pool_id, volume_size) extraspecs_dict['tiering'] = str(top_tier) else: # check extraspecs fit the real pool tiers if not self._check_pool_tiering(pool_tiers, tiering): msg = _('Tiering extraspecs %(pool_name)s:%(tiering)s ' 'can not fit in the real tiers %(pool_tier)s.') % { 'pool_name': pool_name, 'tiering': tiering, 'pool_tier': pool_tiers} LOG.error(msg) raise exception.VolumeDriverException(message=msg) # User specific tier levels if provisioning == 'thin': extraspecs_dict['provisioning'] = 0 tiering_set = ','.join(str(i) for i in tiering) extraspecs_dict['tiering'] = tiering_set extraspecs_dict['init'] = 'disable' else: self._check_tier_space(tiering[0], pool_id, volume_size) extraspecs_dict['tiering'] = str(tiering[0]) cmd = '' if extraspecs_dict: cmd = self._create_part_parameters_str(extraspecs_dict) commands = (pool_id, volume['id'], 'size=%s' % int(volume_size), cmd) self._execute('CreatePartition', *commands) def _check_pool_tiering(self, pool_tiers, extra_specs_tiers): return set(extra_specs_tiers).issubset(pool_tiers) def _check_tier_pool_or_not(self, pool_id): if pool_id in self.tier_pools_dict.keys(): return True return False def _check_tier_space(self, tier_level, pool_id, volume_size): rc, lv_info = self._execute('ShowLV', 'tier') if lv_info: for entry in lv_info: if (entry['LV-ID'] == pool_id and int(entry['Tier']) == tier_level): total_space = self._parse_size(entry['Size'], 'MB') used_space = self._parse_size(entry['Used'], 'MB') if not (total_space and used_space): return elif volume_size > (total_space - used_space): LOG.warning('Tier pool [%(pool_id)s] ' 'has already run out of space in ' 'tier level [%(tier_level)s].', { 'pool_id': pool_id, 'tier_level': tier_level}) def _parse_size(self, size_string, return_unit): size = float(size_string.split(' ', 1)[0]) if 'TB' in size_string: if return_unit == 'GB': return round(ti_to_gi(size), 2) elif return_unit == 'MB': return round(ti_to_mi(size)) elif 'GB' in size_string: if return_unit == 'GB': return round(size, 2) elif return_unit == 'MB': return round(gi_to_mi(size)) elif 'MB' in size_string: if return_unit == 'GB': return round(mi_to_gi(size), 2) elif return_unit == 'MB': return round(size) else: LOG.warning('Tier size [%(size_string)s], ' 'the unit is not recognized.', { 'size_string': size_string}) return def _create_part_parameters_str(self, extraspecs_dict): parameters_list = [] parameters = { 'provisioning': 'min=%sMB', 'tiering': 'tier=%s', 'init': 'init=%s', } for extraspec in sorted(extraspecs_dict.keys()): value = parameters[extraspec] % (extraspecs_dict[extraspec]) parameters_list.append(value) return ' '.join(parameters_list) @log_func def _iscsi_create_map(self, part_id, multipath, host, system_id): host_filter = self._create_host_filter(host) rc, net_list = self._execute('ShowNet') self._update_map_info(multipath) rc, part_mapping = self._execute( 'ShowMap', 'part=%s' % part_id) map_chl, map_lun = self._get_mapping_info(multipath) lun_id = map_lun[0] save_id = lun_id while True: rc, iqns, ips, luns = self._exec_iscsi_create_map(map_chl, part_mapping, host, part_id, lun_id, host_filter, system_id, net_list) if rc == 20: self._delete_all_map(part_id) lun_id = self._find_next_lun_id(lun_id, save_id) else: break return iqns, ips, luns def _exec_iscsi_create_map(self, channel_dict, part_mapping, host, part_id, lun_id, host_filter, system_id, net_list): iqns = [] ips = [] luns = [] rc = 0 for controller in sorted(channel_dict.keys()): for channel_id in sorted(channel_dict[controller]): target_id = self.target_dict[controller][channel_id] exist_lun_id = self._check_map( channel_id, target_id, part_mapping, host) if exist_lun_id < 0: commands = ( 'part', part_id, channel_id, target_id, lun_id, host_filter ) rc, out = self._execute('CreateMap', *commands) if (rc == 20) or (rc == 1): # LUN Conflict detected. msg = _('Volume[%(part_id)s] LUN conflict detected, ' 'Ch:[%(Ch)s] ID:[%(tid)s] LUN:[%(lun)s].') % { 'part_id': part_id, 'Ch': channel_id, 'tid': target_id, 'lun': lun_id} LOG.warning(msg) return 20, 0, 0, 0 if rc != 0: msg = _('Volume[%(part_id)s] create map failed, ' 'Ch:[%(Ch)s] ID:[%(tid)s] LUN:[%(lun)s].') % { 'part_id': part_id, 'Ch': channel_id, 'tid': target_id, 'lun': lun_id} LOG.error(msg) raise exception.VolumeDriverException(message=msg) exist_lun_id = int(lun_id) self.map_dict[controller][channel_id].remove(exist_lun_id) mcs_id = self._get_mcs_id(channel_id, controller) # There might be some channels in the same group for channel in self.mcs_dict[controller][mcs_id]: target_id = self.target_dict[controller][channel] map_ch_info = { 'system_id': system_id, 'mcs_id': mcs_id, 'target_id': target_id, 'controller': controller, } iqns.append(self._generate_iqn(map_ch_info)) ips.append(self._get_ip_by_channel( channel, net_list, controller)) luns.append(exist_lun_id) return rc, iqns, ips, luns def _check_map(self, channel_id, target_id, part_map_info, host): if len(part_map_info) > 0: for entry in part_map_info: if (entry['Ch'] == channel_id and entry['Target'] == target_id and entry['Host-ID'].lower() == host.lower()): return int(entry['LUN']) return -1 def _create_host_filter(self, host): if self.protocol == 'iSCSI': host_filter = 'iqn=%s' % host else: host_filter = 'wwn=%s' % host return host_filter def _get_extraspecs_dict(self, volume_type_id): extraspecs = {} if volume_type_id: extraspecs = volume_types.get_volume_type_extra_specs( volume_type_id) return extraspecs def _get_volume_pool_id(self, volume): pool_name = volume['host'].split('#')[-1] pool_id = self._find_pool_id_by_name(pool_name) if not pool_id: msg = _('Failed to get pool id with pool %(pool_name)s.') % { 'pool_name': pool_name} LOG.error(msg) raise exception.VolumeDriverException(data=msg) return pool_id def _get_volume_type_extraspecs(self, volume): """Example for Infortrend extraspecs settings: Using a global setting: infortrend:provisoioning: 'thin' infortrend:tiering: '0,1,2' Using an individual setting: infortrend:provisoioning: 'LV0:thin;LV1:full' infortrend:tiering: 'LV0:0,1,3; LV1:1' Using a mixed setting: infortrend:provisoioning: 'LV0:thin;LV1:full' infortrend:tiering: 'all' """ # extraspecs default setting extraspecs_set = { 'global_provisioning': 'full', 'global_tiering': 'all', } extraspecs = self._get_extraspecs_dict(volume['volume_type_id']) if extraspecs: extraspecs_set = self._get_extraspecs_set(extraspecs) return extraspecs_set def _get_pool_extraspecs(self, pool_name, all_extraspecs): LOG.debug('_Extraspecs_dict: %s', all_extraspecs) pool_extraspecs = {} provisioning = None tiering = None # check individual setting if pool_name in all_extraspecs.keys(): if 'provisioning' in all_extraspecs[pool_name]: provisioning = all_extraspecs[pool_name]['provisioning'] if 'tiering' in all_extraspecs[pool_name]: tiering = all_extraspecs[pool_name]['tiering'] # use global setting if not provisioning: provisioning = all_extraspecs['global_provisioning'] if not tiering: tiering = all_extraspecs['global_tiering'] if tiering != 'all': pool_id = self._find_pool_id_by_name(pool_name) if not self._check_tier_pool_or_not(pool_id): LOG.warning('Infortrend pool: [%(pool_name)s] ' 'is not a tier pool. Skip tiering ' '%(tiering)s because it is invalid.', { 'pool_name': pool_name, 'tiering': tiering}) self._check_extraspecs_conflict(tiering, provisioning) pool_extraspecs['provisioning'] = provisioning pool_extraspecs['tiering'] = tiering for key, value in pool_extraspecs.items(): if 'Err' in value: err, user_setting = value.split(':', 1) msg = _('Extraspecs Error, ' 'pool: [%(pool)s], %(key)s: %(setting)s ' 'is invalid, please check.') % { 'pool': pool_name, 'key': key, 'setting': user_setting} LOG.error(msg) raise exception.VolumeDriverException(message=msg) return pool_extraspecs def _check_extraspecs_conflict(self, tiering, provisioning): if len(tiering) > 1 and provisioning == 'full': msg = _('When provision is full, ' 'it must specify only one tier instead of ' '%(tiering)s tiers.') % { 'tiering': tiering} LOG.error(msg) raise exception.VolumeDriverException(message=msg) def _get_extraspecs_set(self, extraspecs): """Return extraspecs settings dictionary Legal values: provisioning: 'thin', 'full' tiering: 'all' or combination of 0,1,2,3 Only global settings example: extraspecs_set = { 'global_provisioning': 'thin', 'global_tiering': '[0, 1]', } All individual settings example: extraspecs_set = { 'global_provisioning': 'full', 'global_tiering': 'all', 'LV0': { 'provisioning': 'thin', 'tiering': [0, 1, 3], }, 'LV1': { 'provisioning': 'full', 'tiering': [1], } } Mixed settings example: extraspecs_set = { 'global_provisioning': 'thin', 'global_tiering': 'all', 'LV0': { 'tiering': [0, 1, 3], }, 'LV1': { 'provisioning': 'full', 'tiering': [1], } } Use global settings if a pool has no individual settings. """ # extraspecs default setting extraspecs_set = { 'global_provisioning': 'full', 'global_tiering': 'all', } provisioning_string = extraspecs.get(self.PROVISIONING_KEY, None) tiering_string = extraspecs.get(self.TIERING_SET_KEY, None) extraspecs_set = self._get_provisioning_setting( extraspecs_set, provisioning_string) extraspecs_set = self._get_tiering_setting( extraspecs_set, tiering_string) return extraspecs_set def _get_provisioning_setting(self, extraspecs_set, provisioning_string): # provisioning individual setting if provisioning_string and ':' in provisioning_string: provisioning_string = provisioning_string.replace(' ', '') provisioning_string = provisioning_string.split(';') for provisioning in provisioning_string: pool, value = provisioning.split(':', 1) if pool not in self.pool_dict.keys(): LOG.warning('Infortrend:provisioning ' 'this setting %(pool)s:%(value)s, ' 'pool [%(pool)s] not set in config.', { 'pool': pool, 'value': value}) else: if pool not in extraspecs_set.keys(): extraspecs_set[pool] = {} if value.lower() in self.PROVISIONING_VALUES: extraspecs_set[pool]['provisioning'] = value.lower() else: extraspecs_set[pool]['provisioning'] = 'Err:%s' % value LOG.warning('Infortrend:provisioning ' 'this setting %(pool)s:%(value)s, ' '[%(value)s] is illegal', { 'pool': pool, 'value': value}) # provisioning global setting elif provisioning_string: provisioning = provisioning_string.replace(' ', '').lower() if provisioning in self.PROVISIONING_VALUES: extraspecs_set['global_provisioning'] = provisioning else: extraspecs_set['global_provisioning'] = 'Err:%s' % provisioning LOG.warning('Infortrend:provisioning ' '[%(value)s] is illegal', { 'value': provisioning_string}) return extraspecs_set def _get_tiering_setting(self, extraspecs_set, tiering_string): # tiering individual setting if tiering_string and ':' in tiering_string: tiering_string = tiering_string.replace(' ', '') tiering_string = tiering_string.split(';') for tiering_set in tiering_string: pool, value = tiering_set.split(':', 1) if pool not in self.pool_dict.keys(): LOG.warning('Infortrend:tiering ' 'this setting %(pool)s:%(value)s, ' 'pool [%(pool)s] not set in config.', { 'pool': pool, 'value': value}) else: if pool not in extraspecs_set.keys(): extraspecs_set[pool] = {} if value.lower() == 'all': extraspecs_set[pool]['tiering'] = 'all' else: value = value.split(',') value = [int(i) for i in value] value = list(set(value)) if value[-1] in self.TIERING_VALUES: extraspecs_set[pool]['tiering'] = value else: extraspecs_set[pool]['tiering'] = 'Err:%s' % value LOG.warning('Infortrend:tiering ' 'this setting %(pool)s:%(value)s, ' '[%(err_value)s] is illegal', { 'pool': pool, 'value': value, 'err_value': value[-1]}) # tiering global setting elif tiering_string: tiering_set = tiering_string.replace(' ', '').lower() if tiering_set != 'all': tiering_set = tiering_set.split(',') tiering_set = [int(i) for i in tiering_set] tiering_set = list(set(tiering_set)) if tiering_set[-1] in range(4): extraspecs_set['global_tiering'] = tiering_set else: extraspecs_set['global_tiering'] = 'Err:%s' % tiering_set LOG.warning('Infortrend:tiering ' '[%(err_value)s] is illegal', { 'err_value': tiering_set[-1]}) return extraspecs_set def _find_pool_id_by_name(self, pool_name): if pool_name in self.pool_dict.keys(): return self.pool_dict[pool_name] else: msg = _('Pool [%(pool_name)s] not set in cinder conf.') % { 'pool_name': pool_name} LOG.error(msg) raise exception.VolumeDriverException(data=msg) def _get_system_id(self, system_ip): if not self.system_id: rc, device_info = self._execute('ShowDevice') for entry in device_info: if system_ip == entry['Connected-IP']: self.system_id = str(int(entry['ID'], 16)) return self.system_id @log_func def _get_lun_id(self, ch_id, controller='slot_a'): lun_id = -1 if len(self.map_dict[controller][ch_id]) > 0: lun_id = self.map_dict[controller][ch_id][0] if lun_id == -1: msg = _('LUN number is out of bound ' 'on channel id: %(ch_id)s.') % {'ch_id': ch_id} LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: return lun_id @log_func def _get_mapping_info(self, multipath): if multipath: return self._get_mapping_info_with_mpio() else: return self._get_mapping_info_with_normal() def _get_mapping_info_with_mpio(self): """Get all mapping channel id and minimun lun id mapping info. # R model with mcs map_chl = { 'slot_a': ['2', '0'] 'slot_b': ['0', '3'] } map_lun = ['0'] # G model with mcs map_chl = { 'slot_a': ['1', '2'] } map_lun = ['0'] mcs_dict = { 'slotX' = { 'MCSID': ['chID', 'chID'] } } :returns: all mapping channel id per slot and minimun lun id """ map_chl = { 'slot_a': [] } if self._model_type == 'R': map_chl['slot_b'] = [] # MPIO: Map all the channels specified in conf file # If MCS groups exist, only map to the minimum channel id per group for controller in map_chl.keys(): for mcs in self.mcs_dict[controller]: map_mcs_chl = sorted((self.mcs_dict[controller][mcs]))[0] map_chl[controller].append(map_mcs_chl) map_lun = self._get_minimum_common_lun_id(map_chl) if not map_lun: msg = _('Cannot find a common lun id for mapping.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) return map_chl, map_lun def _get_minimum_common_lun_id(self, channel_dict): """Find the minimun common lun id in all channels.""" map_lun = [] # search for free lun id on all channels for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']): lun_id_is_used = False for controller in channel_dict.keys(): for channel_id in channel_dict[controller]: if lun_id not in self.map_dict[controller][channel_id]: lun_id_is_used = True if not lun_id_is_used: map_lun.append(str(lun_id)) break # check lun id overflow elif (lun_id == self.constants['MAX_LUN_MAP_PER_CHL'] - 1): msg = _('LUN map has reached maximum value [%(max_lun)s].') % { 'max_lun': self.constants['MAX_LUN_MAP_PER_CHL']} LOG.error(msg) raise exception.VolumeDriverException(message=msg) return map_lun @log_func def _get_mapping_info_with_normal(self): """Get the minimun mapping channel id and lun id mapping info. # G model and R model map_chl = { 'slot_a': ['1'] } map_lun = ['0'] :returns: minimun mapping channel id per slot and lun id """ map_chl = { 'slot_a': [] } map_lun = [] ret_chl = self._get_minimun_mapping_channel_id('slot_a') lun_id = self._get_lun_id(ret_chl, 'slot_a') map_chl['slot_a'].append(ret_chl) map_lun.append(str(lun_id)) return map_chl, map_lun @log_func def _get_minimun_mapping_channel_id(self, controller): empty_lun_num = 0 min_map_chl = -1 # Sort items to get a reliable behaviour. Dictionary items # are iterated in a random order because of hash randomization. # We don't care MCS group here, single path working as well. for mcs in sorted(self.mcs_dict[controller].keys()): mcs_chl = sorted((self.mcs_dict[controller][mcs]))[0] free_lun_num = len(self.map_dict[controller][mcs_chl]) if empty_lun_num < free_lun_num: min_map_chl = mcs_chl empty_lun_num = free_lun_num if int(min_map_chl) < 0: msg = _('LUN map overflow on every channel.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: return min_map_chl def _get_common_lun_map_id(self, wwpn_channel_info): map_lun = None # search for free lun id on all channels for lun_id in range(self.constants['MAX_LUN_MAP_PER_CHL']): lun_id_is_used = False for slot_name in ['slot_a', 'slot_b']: for wwpn in wwpn_channel_info: channel_id = wwpn_channel_info[wwpn]['channel'] if channel_id not in self.map_dict[slot_name]: continue elif lun_id not in self.map_dict[slot_name][channel_id]: lun_id_is_used = True if not lun_id_is_used: map_lun = lun_id break # check lun id overflow elif (lun_id == self.constants['MAX_LUN_MAP_PER_CHL'] - 1): msg = _('LUN map has reached maximum value [%(max_lun)s].') % { 'max_lun': self.constants['MAX_LUN_MAP_PER_CHL']} LOG.error(msg) raise exception.VolumeDriverException(message=msg) return map_lun def _get_mcs_id(self, channel_id, controller): mcs_id = None for mcs in self.mcs_dict[controller]: if channel_id in self.mcs_dict[controller][mcs]: mcs_id = mcs break if mcs_id is None: msg = _('Cannot get mcs_id by channel id: %(channel_id)s.') % { 'channel_id': channel_id} LOG.error(msg) raise exception.VolumeDriverException(message=msg) return mcs_id def _concat_provider_location(self, model_dict): keys = sorted(model_dict.keys()) return '@'.join([i + '^' + str(model_dict[i]) for i in keys]) def delete_volume(self, volume): """Delete the specific volume.""" if not volume['provider_location']: LOG.warning('Volume %(volume_name)s ' 'provider location not stored.', { 'volume_name': volume['name']}) return have_map = False part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') (check_exist, have_map, part_id) = ( self._check_volume_exist(volume['id'], part_id) ) if not check_exist: LOG.warning('Volume %(volume_id)s already deleted.', { 'volume_id': volume['id']}) return if have_map: self._execute('DeleteMap', 'part', part_id, '-y') self._execute('DeletePartition', part_id, '-y') LOG.info('Delete Volume %(volume_id)s completed.', { 'volume_id': volume['id']}) def _check_replica_completed(self, replica): if ((replica['Type'] == 'Copy' and replica['Status'] == 'Completed') or (replica['Type'] == 'Mirror' and replica['Status'] == 'Mirror')): return True # show the progress percentage status = replica['Progress'].lower() LOG.info('Replica from %(source_type)s: [%(source_name)s] ' 'progess [%(progess)s].', { 'source_type': replica['Source-Type'], 'source_name': replica['Source-Name'], 'progess': status}) return False def _check_volume_exist(self, volume_id, part_id): check_exist = False have_map = False rc, part_list = self._execute('ShowPartition', '-l') if part_id: key = 'ID' find_key = part_id else: key = 'Name' find_key = volume_id for entry in part_list: if entry[key] == find_key: check_exist = True if entry['Mapped'] == 'true': have_map = True if not part_id: part_id = entry['ID'] break if check_exist: return (check_exist, have_map, part_id) else: return (False, False, None) def create_cloned_volume(self, volume, src_vref): """Create a clone of the volume by volume copy.""" # Step1 create a snapshot of the volume src_part_id = self._extract_specific_provider_location( src_vref['provider_location'], 'partition_id') if src_part_id is None: src_part_id = self._get_part_id(volume['id']) model_update = self._create_volume_from_volume(volume, src_part_id) LOG.info('Create Cloned Volume %(volume_id)s completed.', { 'volume_id': volume['id']}) return model_update def _create_volume_from_volume(self, dst_volume, src_part_id): # create the target volume for volume copy self._create_partition_by_default(dst_volume) dst_part_id = self._get_part_id(dst_volume['id']) # prepare return value system_id = self._get_system_id(self.ip) model_dict = { 'system_id': system_id, 'partition_id': dst_part_id, } model_info = self._concat_provider_location(model_dict) model_update = {"provider_location": model_info} # clone the volume from the origin partition commands = ( 'Cinder-Cloned', 'part', src_part_id, 'part', dst_part_id ) self._execute('CreateReplica', *commands) self._wait_replica_complete(dst_part_id) return model_update def _extract_specific_provider_location(self, provider_location, key): if not provider_location: msg = _('Failed to get provider location.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) provider_location_dict = self._extract_all_provider_location( provider_location) result = provider_location_dict.get(key, None) return result @log_func def _extract_all_provider_location(self, provider_location): provider_location_dict = {} dict_entry = provider_location.split("@") for entry in dict_entry: key, value = entry.split('^', 1) if value == 'None': value = None provider_location_dict[key] = value return provider_location_dict def create_export(self, context, volume): model_update = volume['provider_location'] LOG.info('Create export done from Volume %(volume_id)s.', { 'volume_id': volume['id']}) return {'provider_location': model_update} def get_volume_stats(self, refresh=False): """Get volume status. If refresh is True, update the status first. """ if self._volume_stats is None or refresh: self._update_volume_stats() LOG.info( 'Successfully update volume stats. ' 'backend: %(volume_backend_name)s, ' 'vendor: %(vendor_name)s, ' 'model_type: %(model_type)s, ' 'system_id: %(system_id)s, ' 'status: %(status)s, ' 'driver_version: %(driver_version)s, ' 'storage_protocol: %(storage_protocol)s.', self._volume_stats) return self._volume_stats def _update_volume_stats(self): # Ensure the CLI is connected. status = self._check_connection() # Refresh cache rc, out = self._execute('InitCache') if rc != 0: LOG.Warning('[InitCache Failed]') self.backend_name = self.configuration.safe_get('volume_backend_name') system_id = self._get_system_id(self.ip) data = { 'volume_backend_name': self.backend_name, 'vendor_name': 'Infortrend', 'driver_version': self.VERSION, 'storage_protocol': self.protocol, 'model_type': self._model_type, 'system_id': system_id, 'status': status, 'pools': self._update_pools_stats(system_id), } self._volume_stats = data def _check_connection(self): rc, out = self._execute('CheckConnection') if rc == 0: return 'Connected' elif rc in (9, 13): self._init_raid_connection() self._set_raidcmd() return 'Reconnected' else: return 'Error: %s' % out def _update_pools_stats(self, system_id): self._update_pool_tiers() enable_specs_dict = self._get_enable_specs_on_array() if 'Thin Provisioning' in enable_specs_dict.keys(): provisioning_support = True else: provisioning_support = False rc, pools_info = self._execute('ShowLV') pools = [] if provisioning_support: rc, part_list = self._execute('ShowPartition') for pool in pools_info: if pool['Name'] in self.pool_dict.keys(): total_space = float(pool['Size'].split(' ', 1)[0]) available_space = float(pool['Available'].split(' ', 1)[0]) total_capacity_gb = round(mi_to_gi(total_space), 2) free_capacity_gb = round(mi_to_gi(available_space), 2) _pool = { 'pool_name': pool['Name'], 'pool_id': pool['ID'], 'location_info': 'Infortrend:%s' % system_id, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'reserved_percentage': 0, 'QoS_support': False, 'thick_provisioning_support': True, 'thin_provisioning_support': provisioning_support, } if provisioning_support: provisioning_factor = self.configuration.safe_get( 'max_over_subscription_ratio') provisioned_space = self._get_provisioned_space( pool['ID'], part_list) provisioned_capacity_gb = round( mi_to_gi(provisioned_space), 2) _pool['provisioned_capacity_gb'] = provisioned_capacity_gb _pool['max_over_subscription_ratio'] = float( provisioning_factor) pools.append(_pool) return pools def _get_provisioned_space(self, pool_id, part_list): provisioning_space = 0 for entry in part_list: if entry['LV-ID'] == pool_id: provisioning_space += int(entry['Size']) return provisioning_space def _update_pool_tiers(self): """Setup the tier pools information. tier_pools_dict = { '12345678': [0, 1, 2, 3], # Pool 12345678 has 4 tiers: 0, 1, 2, 3 '87654321': [0, 1, 3], # Pool 87654321 has 3 tiers: 0, 1, 3 } """ rc, lv_info = self._execute('ShowLV', 'tier') temp_dict = {} for entry in lv_info: if entry['LV-Name'] in self.pool_dict.keys(): if entry['LV-ID'] not in temp_dict.keys(): temp_dict[entry['LV-ID']] = [] temp_dict[entry['LV-ID']].append(int(entry['Tier'])) self.tier_pools_dict = temp_dict def create_snapshot(self, snapshot): """Creates a snapshot.""" volume_id = snapshot['volume_id'] LOG.debug('Create Snapshot %(snapshot)s volume %(volume)s.', {'snapshot': snapshot['id'], 'volume': volume_id}) model_update = {} part_id = self._get_part_id(volume_id) if not part_id: msg = _('Failed to get Partition ID for volume %(volume_id)s.') % { 'volume_id': volume_id} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) @lockutils.synchronized( 'snapshot-' + part_id, 'infortrend-', True) def do_create_snapshot(): self._execute('CreateSnapshot', 'part', part_id, 'name=%s' % snapshot['id']) rc, tmp_snapshot_list = self._execute( 'ShowSnapshot', 'part=%s' % part_id) return tmp_snapshot_list snapshot_list = do_create_snapshot() LOG.info( 'Create success. ' 'Snapshot: %(snapshot)s, ' 'Snapshot ID in raid: %(raid_snapshot_id)s, ' 'volume: %(volume)s.', { 'snapshot': snapshot['id'], 'raid_snapshot_id': snapshot_list[-1]['SI-ID'], 'volume': volume_id}) model_update['provider_location'] = snapshot_list[-1]['SI-ID'] return model_update def delete_snapshot(self, snapshot): """Delete the snapshot.""" volume_id = snapshot['volume_id'] LOG.debug('Delete Snapshot %(snapshot)s volume %(volume)s.', {'snapshot': snapshot['id'], 'volume': volume_id}) raid_snapshot_id = snapshot.get('provider_location') if raid_snapshot_id: self._execute('DeleteSnapshot', raid_snapshot_id, '-y') LOG.info('Delete Snapshot %(snapshot_id)s completed.', { 'snapshot_id': snapshot['id']}) else: LOG.warning('Snapshot %(snapshot_id)s ' 'provider_location not stored.', { 'snapshot_id': snapshot['id']}) def _get_part_id(self, volume_id, pool_id=None): count = 0 while True: rc, part_list = self._execute('ShowPartition') for entry in part_list: if pool_id is None: if entry['Name'] == volume_id: return entry['ID'] else: if (entry['Name'] == volume_id and entry['LV-ID'] == pool_id): return entry['ID'] if count >= 3: msg = _('Failed to get partition info ' 'from volume_id: %(volume_id)s.') % { 'volume_id': volume_id} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: time.sleep(4) count = count + 1 return def create_volume_from_snapshot(self, volume, snapshot): raid_snapshot_id = snapshot.get('provider_location') if raid_snapshot_id is None: msg = _('Failed to get Raid Snapshot ID ' 'from snapshot: %(snapshot_id)s.') % { 'snapshot_id': snapshot['id']} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self._create_partition_by_default(volume) dst_part_id = self._get_part_id(volume['id']) # clone the volume from the snapshot commands = ( 'Cinder-Snapshot', 'si', raid_snapshot_id, 'part', dst_part_id ) self._execute('CreateReplica', *commands) self._wait_replica_complete(dst_part_id) # prepare return value system_id = self._get_system_id(self.ip) model_dict = { 'system_id': system_id, 'partition_id': dst_part_id, } model_info = self._concat_provider_location(model_dict) LOG.info( 'Create Volume %(volume_id)s from ' 'snapshot %(snapshot_id)s completed.', { 'volume_id': volume['id'], 'snapshot_id': snapshot['id']}) return {"provider_location": model_info} def initialize_connection(self, volume, connector): system_id = self._get_system_id(self.ip) LOG.debug('Connector_info: %s', connector) @lockutils.synchronized( '%s-connection' % system_id, 'infortrend-', True) def lock_initialize_conn(): if self.protocol == 'iSCSI': multipath = connector.get('multipath', False) return self._initialize_connection_iscsi( volume, connector, multipath) elif self.protocol == 'FC': return self._initialize_connection_fc( volume, connector) else: msg = _('Unknown protocol: %(protocol)s.') % { 'protocol': self.protocol} LOG.error(msg) raise exception.VolumeDriverException(message=msg) return lock_initialize_conn() def _initialize_connection_fc(self, volume, connector): self._init_map_info() map_lun, target_wwpns, initiator_target_map = ( self._do_fc_connection(volume, connector) ) properties = self._generate_fc_connection_properties( map_lun, target_wwpns, initiator_target_map) LOG.info('Successfully initialized connection. ' 'target_wwn: %(target_wwn)s, ' 'initiator_target_map: %(initiator_target_map)s, ' 'lun: %(target_lun)s.', properties['data']) fczm_utils.add_fc_zone(properties) return properties @log_func def _do_fc_connection(self, volume, connector): target_wwpns = [] partition_data = self._extract_all_provider_location( volume['provider_location']) part_id = partition_data['partition_id'] if part_id is None: part_id = self._get_part_id(volume['id']) wwpn_list, wwpn_channel_info = self._get_wwpn_list() initiator_target_map, target_wwpns = self._build_initiator_target_map( connector, wwpn_list) rc, part_mapping = self._execute('ShowMap', 'part=%s' % part_id) map_lun_list = [] # We need to check all the maps first # Because fibre needs a consistent lun id for initiator_wwpn in sorted(initiator_target_map): for target_wwpn in initiator_target_map[initiator_wwpn]: ch_id = wwpn_channel_info[target_wwpn.upper()]['channel'] controller = wwpn_channel_info[target_wwpn.upper()]['slot'] target_id = self.target_dict[controller][ch_id] exist_lun_id = self._check_map( ch_id, target_id, part_mapping, initiator_wwpn) map_lun_list.append(exist_lun_id) # To check if already mapped if (map_lun_list.count(map_lun_list[0]) == len(map_lun_list) and map_lun_list[0] != -1): map_lun = map_lun_list[0] LOG.info('Already has map. volume: [%(volume)s], ' 'mapped_lun_list: %(list)s, ', { 'volume': volume['id'], 'list': map_lun_list}) return map_lun, target_wwpns, initiator_target_map # Update used LUN list self._update_map_info(True) map_lun = self._get_common_lun_map_id(wwpn_channel_info) save_lun = map_lun while True: ret = self._create_new_fc_maps( initiator_wwpn, initiator_target_map, target_wwpn, wwpn_channel_info, part_id, map_lun) if ret == 20: # Clean up the map for following re-create self._delete_all_map(part_id) map_lun = self._find_next_lun_id(map_lun, save_lun) else: break return map_lun, target_wwpns, initiator_target_map def _create_new_fc_maps(self, initiator_wwpn, initiator_target_map, target_wwpn, wwpn_channel_info, part_id, map_lun): for initiator_wwpn in sorted(initiator_target_map): for target_wwpn in initiator_target_map[initiator_wwpn]: ch_id = wwpn_channel_info[target_wwpn.upper()]['channel'] controller = wwpn_channel_info[target_wwpn.upper()]['slot'] target_id = self.target_dict[controller][ch_id] host_filter = self._create_host_filter(initiator_wwpn) commands = ( 'part', part_id, ch_id, target_id, str(map_lun), host_filter ) rc, out = self._execute('CreateMap', *commands) if (rc == 20) or (rc == 1): msg = _('Volume[%(part_id)s] LUN conflict detected,' 'Ch:[%(Ch)s] ID:[%(tid)s] LUN:[%(lun)s].') % { 'part_id': part_id, 'Ch': ch_id, 'tid': target_id, 'lun': map_lun} LOG.warning(msg) return 20 elif rc != 0: msg = _('Volume[%(part_id)s] create map failed, ' 'Ch:[%(Ch)s] ID:[%(tid)s] LUN:[%(lun)s].') % { 'part_id': part_id, 'Ch': ch_id, 'tid': target_id, 'lun': map_lun} LOG.error(msg) raise exception.VolumeDriverException(message=msg) if map_lun in self.map_dict[controller][ch_id]: self.map_dict[controller][ch_id].remove(map_lun) return rc def _build_initiator_target_map(self, connector, all_target_wwpns): initiator_target_map = {} target_wwpns = [] if self.fc_lookup_service: lookup_map = ( self.fc_lookup_service.get_device_mapping_from_network( connector['wwpns'], all_target_wwpns) ) for fabric_name in lookup_map: fabric = lookup_map[fabric_name] target_wwpns.extend(fabric['target_port_wwn_list']) for initiator in fabric['initiator_port_wwn_list']: initiator_target_map[initiator] = ( fabric['target_port_wwn_list'] ) else: initiator_wwns = connector['wwpns'] target_wwpns = all_target_wwpns for initiator in initiator_wwns: initiator_target_map[initiator] = all_target_wwpns return initiator_target_map, target_wwpns def _generate_fc_connection_properties( self, lun_id, target_wwpns, initiator_target_map): return { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': lun_id, 'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map, }, } def _find_next_lun_id(self, lun_id, save_id): lun_id = lun_id + 1 if lun_id == self.constants['MAX_LUN_MAP_PER_CHL']: lun_id = 0 elif lun_id == save_id: msg = _('No available LUN among [%(max_lun)s] LUNs.' ) % {'max_lun': self.constants['MAX_LUN_MAP_PER_CHL']} LOG.error(msg) raise exception.VolumeDriverException(message=msg) return lun_id @log_func def _initialize_connection_iscsi(self, volume, connector, multipath): self._init_map_info() partition_data = self._extract_all_provider_location( volume['provider_location']) # system_id, part_id system_id = partition_data['system_id'] part_id = partition_data['partition_id'] if part_id is None: part_id = self._get_part_id(volume['id']) self._set_host_iqn(connector['initiator']) iqns, ips, luns = self._iscsi_create_map( part_id, multipath, connector['initiator'], system_id) properties = self._generate_iscsi_connection_properties( iqns, ips, luns, volume, multipath) LOG.info('Successfully initialized connection ' 'with volume: %(volume_id)s.', properties['data']) return properties def _set_host_iqn(self, host_iqn): rc, iqn_list = self._execute('ShowIQN') check_iqn_exist = False for entry in iqn_list: if entry['IQN'] == host_iqn: check_iqn_exist = True break if not check_iqn_exist: self._execute( 'CreateIQN', host_iqn, self._truncate_host_name(host_iqn)) def _truncate_host_name(self, iqn): if len(iqn) > 16: return iqn[-16:] else: return iqn @log_func def _generate_iqn(self, channel_info): slot_id = 1 if channel_info['controller'] == 'slot_a' else 2 return self.iqn % ( channel_info['system_id'], channel_info['mcs_id'], channel_info['target_id'], slot_id) @log_func def _get_ip_by_channel( self, channel_id, net_list, controller='slot_a'): slot_name = 'slotA' if controller == 'slot_a' else 'slotB' for entry in net_list: if entry['ID'] == channel_id and entry['Slot'] == slot_name: if entry['IPv4'] == '0.0.0.0': msg = _( 'Please set ip on Channel[%(channel_id)s] ' 'with controller[%(controller)s].') % { 'channel_id': channel_id, 'controller': slot_name} LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: return entry['IPv4'] msg = _( 'Can not find channel[%(channel_id)s] ' 'with controller[%(controller)s].') % { 'channel_id': channel_id, 'controller': slot_name} LOG.error(msg) raise exception.VolumeDriverException(message=msg) def _get_wwpn_list(self): rc, wwn_list = self._execute('ShowWWN') wwpn_list = [] wwpn_channel_info = {} for entry in wwn_list: channel_id = entry['CH'] if 'BID' in entry['ID']: slot_name = 'slot_b' else: slot_name = 'slot_a' if channel_id in self.map_dict[slot_name]: wwpn_list.append(entry['WWPN']) wwpn_channel_info[entry['WWPN']] = { 'channel': channel_id, 'slot': slot_name, } return wwpn_list, wwpn_channel_info @log_func def _generate_iscsi_connection_properties( self, iqns, ips, luns, volume, multipath): portals = [] for i in range(len(ips)): discovery_ip = '%s:%s' % ( ips[i], self.constants['ISCSI_PORT']) discovery_iqn = iqns[i] portals.append(discovery_ip) if not self._do_iscsi_discovery(discovery_iqn, discovery_ip): msg = _( 'Could not find iSCSI target ' 'for volume: [%(volume_id)s] ' 'portal: [%(discovery_ip)s] ' 'iqn: [%(discovery_iqn)s]' 'for path: [%(i)s/%(len)s]') % { 'volume_id': volume['id'], 'discovery_ip': discovery_ip, 'discovery_iqn': discovery_iqn, 'i': i + 1, 'len': len(ips)} LOG.error(msg) raise exception.VolumeDriverException(message=msg) properties = { 'target_discovered': True, 'target_iqn': iqns[0], 'target_portal': portals[0], 'target_lun': luns[0], 'volume_id': volume['id'], } if multipath: properties['target_iqns'] = iqns properties['target_portals'] = portals properties['target_luns'] = luns if 'provider_auth' in volume: auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret return { 'driver_volume_type': 'iscsi', 'data': properties, } @log_func def _do_iscsi_discovery(self, target_iqn, target_ip): rc, out = self._execute( 'ExecuteCommand', 'iscsiadm', '-m', 'discovery', '-t', 'sendtargets', '-p', target_ip, run_as_root=True) if rc != 0: LOG.error( 'Can not discovery in %(target_ip)s with %(target_iqn)s.', { 'target_ip': target_ip, 'target_iqn': target_iqn}) return False else: for target in out.splitlines(): if target_iqn in target and target_ip in target: return True return False def extend_volume(self, volume, new_size): part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') if part_id is None: part_id = self._get_part_id(volume['id']) expand_size = new_size - volume['size'] if '.' in ('%s' % expand_size): expand_size = round(gi_to_mi(float(expand_size))) expand_command = 'size=%sMB' % expand_size else: expand_command = 'size=%sGB' % expand_size self._execute('SetPartition', 'expand', part_id, expand_command) LOG.info( 'Successfully extended volume %(volume_id)s to size %(size)s.', { 'volume_id': volume['id'], 'size': new_size}) def terminate_connection(self, volume, connector): system_id = self._get_system_id(self.ip) @lockutils.synchronized( '%s-connection' % system_id, 'infortrend-', True) def lock_terminate_conn(): conn_info = None part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') if part_id is None: part_id = self._get_part_id(volume['id']) # Support for force detach volume if not connector: self._delete_all_map(part_id) LOG.warning( 'Connection Info Error: detach all connections ' 'for volume: %(volume_id)s.', { 'volume_id': volume['id']}) return self._delete_host_map(part_id, connector) # Check if this iqn is none used if self.protocol == 'iSCSI': lun_map_exist = self._check_initiator_has_lun_map( connector['initiator']) if not lun_map_exist: host_name = self._truncate_host_name( connector['initiator']) self._execute('DeleteIQN', host_name) # FC should return info elif self.protocol == 'FC': conn_info = {'driver_volume_type': 'fibre_channel', 'data': {}} lun_map_exist = self._check_initiator_has_lun_map( connector['wwpns']) if not lun_map_exist: wwpn_list, wwpn_channel_info = self._get_wwpn_list() init_target_map, target_wwpns = ( self._build_initiator_target_map(connector, wwpn_list) ) conn_info['data']['initiator_target_map'] = init_target_map LOG.info( 'Successfully terminated connection ' 'for volume: %(volume_id)s.', { 'volume_id': volume['id']}) fczm_utils.remove_fc_zone(conn_info) return conn_info return lock_terminate_conn() def _delete_host_map(self, part_id, connector): count = 0 while True: rc, part_map_info = self._execute('ShowMap', 'part=%s' % part_id) if len(part_map_info) > 0: break elif count > 2: # in case of noinit fails rc, part_map_info = self._execute('ShowMap', 'part=%s' % part_id) break else: count = count + 1 if self.protocol == 'iSCSI': host = connector['initiator'].lower() host = (host,) elif self.protocol == 'FC': host = [x.lower() for x in connector['wwpns']] temp_ch = None temp_tid = None temp_lun = None # The default result of ShowMap is ordered by Ch-Target-LUN # The same lun-map might have different host filters # We need to specify Ch-Target-LUN and delete it only once if len(part_map_info) > 0: for entry in part_map_info: if entry['Host-ID'].lower() in host: if not (entry['Ch'] == temp_ch and entry['Target'] == temp_tid and entry['LUN'] == temp_lun): self._execute( 'DeleteMap', 'part', part_id, entry['Ch'], entry['Target'], entry['LUN'], '-y') temp_ch = entry['Ch'] temp_tid = entry['Target'] temp_lun = entry['LUN'] return def _delete_all_map(self, part_id): self._execute('DeleteMap', 'part', part_id, '-y') return def migrate_volume(self, volume, host, new_extraspecs=None): is_valid, dst_pool_id = ( self._is_valid_for_storage_assisted_migration(host, volume) ) if not is_valid: return (False, None) src_pool_id = self._get_volume_pool_id(volume) if src_pool_id != dst_pool_id: model_dict = self._migrate_volume_with_pool( volume, dst_pool_id, new_extraspecs) model_update = { "provider_location": self._concat_provider_location(model_dict), } LOG.info('Migrate Volume %(volume_id)s completed.', { 'volume_id': volume['id']}) else: model_update = { "provider_location": volume['provider_location'], } return (True, model_update) def _is_valid_for_storage_assisted_migration(self, host, volume): if 'location_info' not in host['capabilities']: LOG.error('location_info not stored in pool.') return (False, None) vendor = host['capabilities']['location_info'].split(':')[0] dst_system_id = host['capabilities']['location_info'].split(':')[-1] if vendor != 'Infortrend': LOG.error('Vendor should be Infortrend for migration.') return (False, None) # It should be the same raid for migration src_system_id = self._get_system_id(self.ip) if dst_system_id != src_system_id: LOG.error('Migration must be performed ' 'on the same Infortrend array.') return (False, None) # We don't support volume live migration if volume['status'].lower() != 'available': LOG.error('Volume status must be available for migration.') return (False, None) if 'pool_id' not in host['capabilities']: LOG.error('Failed to get target pool id.') return (False, None) dst_pool_id = host['capabilities']['pool_id'] if dst_pool_id is None: return (False, None) return (True, dst_pool_id) def _migrate_volume_with_pool(self, volume, dst_pool_id, extraspecs=None): # Get old partition data for delete map partition_data = self._extract_all_provider_location( volume['provider_location']) src_part_id = partition_data['partition_id'] if src_part_id is None: src_part_id = self._get_part_id(volume['id']) # Create New Partition self._create_partition_with_pool(volume, dst_pool_id, extraspecs) dst_part_id = self._get_part_id( volume['id'], pool_id=dst_pool_id) if dst_part_id is None: msg = _('Failed to get new part id in new pool: %(pool_id)s.') % { 'pool_id': dst_pool_id} LOG.error(msg) raise exception.VolumeDriverException(message=msg) # Volume Mirror from old partition into new partition commands = ( 'Cinder-Migrate', 'part', src_part_id, 'part', dst_part_id, 'type=mirror' ) self._execute('CreateReplica', *commands) self._wait_replica_complete(dst_part_id) self._execute('DeleteMap', 'part', src_part_id, '-y') self._execute('DeletePartition', src_part_id, '-y') model_dict = { 'system_id': partition_data['system_id'], 'partition_id': dst_part_id, } return model_dict def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update for migrated volume.""" src_volume_id = volume['id'] dst_volume_id = new_volume['id'] part_id = self._extract_specific_provider_location( new_volume['provider_location'], 'partition_id') if part_id is None: part_id = self._get_part_id(dst_volume_id) LOG.debug( 'Rename partition %(part_id)s ' 'into new volume %(new_volume)s.', { 'part_id': part_id, 'new_volume': dst_volume_id}) try: self._execute('SetPartition', part_id, 'name=%s' % src_volume_id) except InfortrendCliException: LOG.exception('Failed to rename %(new_volume)s into ' '%(volume)s.', {'new_volume': new_volume['id'], 'volume': volume['id']}) return {'_name_id': new_volume['_name_id'] or new_volume['id']} LOG.info('Update migrated volume %(new_volume)s completed.', { 'new_volume': new_volume['id']}) model_update = { '_name_id': None, 'provider_location': new_volume['provider_location'], } return model_update def _wait_replica_complete(self, part_id): def _inner(): check_done = False try: rc, replica_list = self._execute('ShowReplica', '-l') for entry in replica_list: if (entry['Target'] == part_id and self._check_replica_completed(entry)): check_done = True self._execute('DeleteReplica', entry['Pair-ID'], '-y') except Exception: check_done = False LOG.exception('Cannot detect replica status.') if check_done: raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_inner) timer.start(interval=15).wait() def _get_enable_specs_on_array(self): enable_specs = {} rc, license_list = self._execute('ShowLicense') for key, value in license_list.items(): if value['Support']: enable_specs[key] = value return enable_specs def manage_existing_get_size(self, volume, ref): """Return size of volume to be managed by manage_existing.""" volume_data = self._get_existing_volume_ref_data(ref) volume_pool_id = self._get_volume_pool_id(volume) if not volume_data: msg = _('Specified volume does not exist.') LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=ref, reason=msg) if volume_data['Mapped'].lower() != 'false': msg = _('The specified volume is mapped. ' 'Please unmap first for Openstack using.') LOG.error(msg) raise exception.VolumeDriverException(data=msg) if volume_data['LV-ID'] != volume_pool_id: msg = _('The specified volume pool is wrong.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return int(math.ceil(mi_to_gi(float(volume_data['Size'])))) def manage_existing(self, volume, ref): volume_data = self._get_existing_volume_ref_data(ref) if not volume_data: msg = _('Specified logical volume does not exist.') LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=ref, reason=msg) self._execute( 'SetPartition', volume_data['ID'], 'name=%s' % volume['id']) model_dict = { 'system_id': self._get_system_id(self.ip), 'partition_id': volume_data['ID'], } model_update = { "provider_location": self._concat_provider_location(model_dict), } LOG.info('Rename Volume %(volume_id)s completed.', { 'volume_id': volume['id']}) return model_update def _get_existing_volume_ref_data(self, ref): if 'source-name' in ref: key = 'Name' find_key = ref['source-name'] elif 'source-id' in ref: key = 'ID' find_key = ref['source-id'] else: msg = _('Reference must contain source-id or source-name.') LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=ref, reason=msg) ref_dict = {} rc, part_list = self._execute('ShowPartition', '-l') for entry in part_list: if entry[key] == find_key: ref_dict = entry break return ref_dict def unmanage(self, volume): part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') if part_id is None: part_id = self._get_part_id(volume['id']) new_vol_name = self.unmanaged_prefix % volume['id'][:-17] self._execute('SetPartition', part_id, 'name=%s' % new_vol_name) LOG.info('Unmanage volume %(volume_id)s completed.', { 'volume_id': volume['id']}) def _check_volume_attachment(self, volume): if not volume['volume_attachment']: return False return True def _check_volume_has_snapshot(self, volume): part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') rc, snapshot_list = self._execute('ShowSnapshot', 'part=%s' % part_id) if len(snapshot_list) > 0: return True return False def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to the new volume type.""" src_pool_name = volume['host'].split('#')[-1] dst_pool_name = host['host'].split('#')[-1] if src_pool_name != dst_pool_name: if self._check_volume_attachment(volume): LOG.error( 'Volume %(volume_id)s cannot be retyped ' 'during attachment.', { 'volume_id': volume['id']}) return False if self._check_volume_has_snapshot(volume): LOG.error( 'Volume %(volume_id)s cannot be retyped ' 'because it has snapshot.', { 'volume_id': volume['id']}) return False new_extraspecs = new_type['extra_specs'] rc, model_update = self.migrate_volume( volume, host, new_extraspecs) if rc: LOG.info( 'Retype Volume %(volume_id)s is done ' 'and migrated to pool %(pool_id)s.', { 'volume_id': volume['id'], 'pool_id': host['capabilities']['pool_id']}) return (rc, model_update) else: # extract extraspecs for pool src_extraspec = new_type['extra_specs'].copy() if self.PROVISIONING_KEY in diff['extra_specs']: src_prov = diff['extra_specs'][self.PROVISIONING_KEY][0] src_extraspec[self.PROVISIONING_KEY] = src_prov if self.TIERING_SET_KEY in diff['extra_specs']: src_tier = diff['extra_specs'][self.TIERING_SET_KEY][0] src_extraspec[self.TIERING_SET_KEY] = src_tier if src_extraspec != new_type['extra_specs']: src_extraspec_set = self._get_extraspecs_set( src_extraspec) new_extraspec_set = self._get_extraspecs_set( new_type['extra_specs']) src_extraspecs = self._get_pool_extraspecs( src_pool_name, src_extraspec_set) new_extraspecs = self._get_pool_extraspecs( dst_pool_name, new_extraspec_set) if not self._check_volume_type_diff( src_extraspecs, new_extraspecs, 'provisioning'): LOG.warning( 'The provisioning: [%(src)s] to [%(new)s] ' 'is unable to retype.', { 'src': src_extraspecs['provisioning'], 'new': new_extraspecs['provisioning']}) return False elif not self._check_volume_type_diff( src_extraspecs, new_extraspecs, 'tiering'): self._execute_retype_tiering(new_extraspecs, volume) LOG.info('Retype Volume %(volume_id)s is completed.', { 'volume_id': volume['id']}) return True def _check_volume_type_diff(self, src_extraspecs, new_extraspecs, key): if src_extraspecs[key] != new_extraspecs[key]: return False return True def _execute_retype_tiering(self, new_pool_extraspecs, volume): part_id = self._extract_specific_provider_location( volume['provider_location'], 'partition_id') if part_id is None: part_id = self._get_part_id(volume['id']) pool_name = volume['host'].split('#')[-1] pool_id = self._get_volume_pool_id(volume) provisioning = new_pool_extraspecs['provisioning'] new_tiering = new_pool_extraspecs['tiering'] if not self._check_tier_pool_or_not(pool_id): return pool_tiers = self.tier_pools_dict[pool_id] if new_tiering == 'all': if provisioning == 'thin': tiering = ','.join(str(i) for i in pool_tiers) else: volume_size = gi_to_mi(volume['size']) self._check_tier_space(pool_tiers[0], pool_id, volume_size) tiering = str(pool_tiers[0]) else: if not self._check_pool_tiering(pool_tiers, new_tiering): msg = _('Tiering extraspecs %(pool_name)s:%(tiering)s ' 'can not fit in the real tiers %(pool_tier)s.') % { 'pool_name': pool_name, 'tiering': new_tiering, 'pool_tier': pool_tiers} LOG.error(msg) raise exception.VolumeDriverException(message=msg) if provisioning == 'thin': tiering = ','.join(str(i) for i in new_tiering) else: volume_size = gi_to_mi(volume['size']) self._check_tier_space(new_tiering[0], pool_id, volume_size) tiering = str(new_tiering[0]) rc, out = self._execute( 'SetPartition', 'tier-resided', part_id, 'tier=%s' % tiering) rc, out = self._execute( 'SetLV', 'tier-migrate', pool_id, 'part=%s' % part_id) self._wait_tier_migrate_complete(part_id) def _wait_tier_migrate_complete(self, part_id): def _inner(): check_done = False try: rc, part_list = self._execute('ShowPartition', '-l') for entry in part_list: if (entry['ID'] == part_id and self._check_tier_migrate_completed(entry)): check_done = True except Exception: check_done = False LOG.exception('Cannot detect tier migrate status.') if check_done: raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_inner) timer.start(interval=15).wait() def _check_tier_migrate_completed(self, part_info): status = part_info['Progress'].lower() if 'migrating' in status: LOG.info('Retype volume [%(volume_name)s] ' 'progess [%(progess)s].', { 'volume_name': part_info['Name'], 'progess': status}) return False return True def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder.""" manageable_volumes = [] # List to Return cinder_ids = [cinder_volume.id for cinder_volume in cinder_volumes] rc, part_list = self._execute('ShowPartition', '-l') for entry in part_list: # Check if parts are located within right LVs config. pool_name = None for _name, _id in self.pool_dict.items(): if _id == entry['LV-ID']: pool_name = _name break if not pool_name: continue if entry['Name'] in cinder_ids: safety = False reason = 'Already Managed' cinder_id = entry['Name'] elif entry['Mapped'].lower() != 'false': safety = False reason = 'Volume In-use' cinder_id = None else: safety = True reason = None cinder_id = None volume = { 'reference': { 'source-id': entry['ID'], 'source-name': entry['Name'], 'pool-name': pool_name }, 'size': int(round(mi_to_gi(float(entry['Size'])))), 'safe_to_manage': safety, 'reason_not_safe': reason, 'cinder_id': cinder_id, 'extra_info': None } manageable_volumes.append(volume) return volume_utils.paginate_entries_list(manageable_volumes, marker, limit, offset, sort_keys, sort_dirs) def manage_existing_snapshot(self, snapshot, existing_ref): """Brings existing backend storage object under Cinder management.""" si = self._get_snapshot_ref_data(existing_ref) self._execute('SetSnapshot', si['SI-ID'], 'name=%s' % snapshot.id) LOG.info('Rename Snapshot %(si_id)s completed.', { 'si_id': si['SI-ID']}) return {'provider_location': si['SI-ID']} def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing.""" si = self._get_snapshot_ref_data(existing_ref) rc, part_list = self._execute('ShowPartition') volume_id = si['Partition-ID'] for entry in part_list: if entry['ID'] == volume_id: part = entry break return int(math.ceil(mi_to_gi(float(part['Size'])))) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the backend available for management by Cinder.""" manageable_snapshots = [] # List to Return cinder_si_ids = [cinder_si.id for cinder_si in cinder_snapshots] rc, si_list = self._execute('ShowSnapshot', '-l') rc, part_list = self._execute('ShowPartition', '-l') for entry in si_list: # Check if parts are located within right LVs config. pool_name = None for _name, _id in self.pool_dict.items(): if _id == entry['LV-ID']: pool_name = _name break if not pool_name: continue # Find si's partition for part_entry in part_list: if part_entry['ID'] == entry['Partition-ID']: part = part_entry break if entry['Name'] in cinder_si_ids: safety = False reason = 'Already Managed' cinder_id = entry['Name'] elif part['Mapped'].lower() != 'false': safety = False reason = 'Volume In-use' cinder_id = None else: safety = True reason = None cinder_id = None return_si = { 'reference': { 'source-id': entry['ID'], 'source-name': entry['Name'] }, 'size': int(round(mi_to_gi(float(part['Size'])))), 'safe_to_manage': safety, 'reason_not_safe': reason, 'cinder_id': cinder_id, 'extra_info': None, 'source_reference': { 'volume-id': part['Name'] } } manageable_snapshots.append(return_si) return volume_utils.paginate_entries_list(manageable_snapshots, marker, limit, offset, sort_keys, sort_dirs) def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management.""" si_id = snapshot.provider_location if si_id is None: msg = _('Failed to get snapshot provider location.') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) self._execute('SetSnapshot', si_id, 'name=cinder-unmanaged-%s' % snapshot.id[:-17]) LOG.info('Unmanaging Snapshot %(si_id)s is completed.', { 'si_id': snapshot.id}) return def _get_snapshot_ref_data(self, ref): """Check the existance of SI for the specified partition.""" if 'source-name' in ref: key = 'Name' content = ref['source-name'] if ref['source-name'] == '---': LOG.warning( 'Finding snapshot with default name "---" ' 'can cause ambiguity.' ) elif 'source-id' in ref: key = 'SI-ID' content = ref['source-id'] else: msg = _('Reference must contain source-id or source-name.') LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=ref, reason=msg) rc, si_list = self._execute('ShowSnapshot') si_data = {} for entry in si_list: if entry[key] == content: si_data = entry break if not si_data: msg = _('Specified snapshot does not exist %(key)s: %(content)s.' ) % {'key': key, 'content': content} LOG.error(msg) raise exception.ManageExistingInvalidReference( existing_ref=ref, reason=msg) return si_data ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.371121 cinder-27.0.0/cinder/volume/drivers/inspur/0000775000175000017500000000000000000000000020640 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/inspur/__init__.py0000664000175000017500000000000000000000000022737 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.371121 cinder-27.0.0/cinder/volume/drivers/inspur/as13000/0000775000175000017500000000000000000000000021627 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/inspur/as13000/__init__.py0000664000175000017500000000000000000000000023726 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/inspur/as13000/as13000_driver.py0000664000175000017500000007672500000000000024564 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for Inspur AS13000 """ import ipaddress import json import random import re import time import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units import requests from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume.drivers.san import san from cinder.volume import volume_utils LOG = logging.getLogger(__name__) inspur_as13000_opts = [ cfg.ListOpt( 'as13000_ipsan_pools', default=['Pool0'], help='The Storage Pools Cinder should use, a comma separated list.'), cfg.IntOpt( 'as13000_token_available_time', default=3300, min=600, max=3600, help='The effective time of token validity in seconds.'), cfg.StrOpt( 'as13000_meta_pool', help='The pool which is used as a meta pool when creating a volume, ' 'and it should be a replication pool at present. ' 'If not set, the driver will choose a replication pool ' 'from the value of as13000_ipsan_pools.'), ] CONF = cfg.CONF CONF.register_opts(inspur_as13000_opts) class RestAPIExecutor(object): def __init__(self, hostname, port, username, password): self._username = username self._password = password self._token = None self._baseurl = 'http://%s:%s/rest' % (hostname, port) def login(self): """Login the AS13000 and store the token.""" self._token = self._login() LOG.debug('Login the AS13000.') def _login(self): """Do request to login the AS13000 and get the token.""" method = 'security/token' params = {'name': self._username, 'password': self._password} token = self.send_rest_api(method=method, params=params, request_type='post').get('token') return token @utils.retry(exception.VolumeDriverException, interval=1, retries=3) def send_rest_api(self, method, params=None, request_type='post'): try: return self.send_api(method, params, request_type) except exception.VolumeDriverException: self.login() raise @staticmethod @volume_utils.trace_method def do_request(cmd, url, header, data): """Send request to the storage and handle the response.""" if cmd in ['post', 'get', 'put', 'delete']: req = getattr(requests, cmd)(url, data=data, headers=header) else: msg = (_('Unsupported cmd: %s.') % cmd) raise exception.VolumeBackendAPIException(msg) response = req.json() code = req.status_code LOG.debug('CODE: %(code)s, RESPONSE: %(response)s.', {'code': code, 'response': response}) if code != 200: msg = (_('Code: %(code)s, URL: %(url)s, Message: %(msg)s.') % {'code': req.status_code, 'url': req.url, 'msg': req.text}) LOG.error(msg) raise exception.VolumeDriverException(msg) return response @volume_utils.trace def send_api(self, method, params=None, request_type='post'): if params: params = json.dumps(params) url = '%s/%s' % (self._baseurl, method) # header is not needed when the driver login the backend if method == 'security/token': if request_type == 'delete': header = {'X-Auth-Token': self._token} else: header = None else: if not self._token: self.login() header = {'X-Auth-Token': self._token} response = self.do_request(request_type, url, header, params) try: code = response.get('code') if code == 0: if request_type == 'get': data = response.get('data') else: if method == 'security/token': data = response.get('data') else: data = response.get('message') data = str(data).lower() if hasattr(data, 'success'): return elif code == 301: msg = _('Token is expired.') LOG.error(msg) raise exception.VolumeDriverException(msg) else: message = response.get('message') msg = (_('Unexpected RestAPI response: %(code)d %(msg)s.') % { 'code': code, 'msg': message}) LOG.error(msg) raise exception.VolumeBackendAPIException(msg) except ValueError: msg = _("Deal with response failed.") raise exception.VolumeDriverException(msg) return data @interface.volumedriver class AS13000Driver(san.SanISCSIDriver): """Driver for Inspur AS13000 storage. .. code-block:: none Version history: 1.0.0 - Initial driver """ VENDOR = 'INSPUR' VERSION = '1.0.0' # ThirdPartySystems wiki page CI_WIKI_NAME = 'Inspur_CI' def __init__(self, *args, **kwargs): super(AS13000Driver, self).__init__(*args, **kwargs) self.configuration.append_config_values(inspur_as13000_opts) self.hostname = self.configuration.san_ip self.port = self.configuration.safe_get('san_api_port') or 8088 self.username = self.configuration.san_login self.password = self.configuration.san_password self.token_available_time = (self.configuration. as13000_token_available_time) self.pools = self.configuration.as13000_ipsan_pools self.meta_pool = self.configuration.as13000_meta_pool self.pools_info = {} self.nodes = [] self._token_time = 0 # get the RestAPIExecutor self._rest = RestAPIExecutor(self.hostname, self.port, self.username, self.password) @staticmethod def get_driver_options(): return inspur_as13000_opts @volume_utils.trace def do_setup(self, context): # get tokens for the driver self._rest.login() self._token_time = time.time() # get available nodes in the backend for node in self._get_cluster_status(): if node.get('healthStatus') == 1 and node.get('ip'): self.nodes.append(node) # collect pools info meta_pools = [self.meta_pool] if self.meta_pool else [] self.pools_info = self._get_pools_info(self.pools + meta_pools) # setup the meta pool if it is not setted if not self.meta_pool: for pool_info in self.pools_info.values(): if pool_info['type'] in (1, '1'): self.meta_pool = pool_info['name'] break self._check_pools() self._check_meta_pool() @volume_utils.trace def check_for_setup_error(self): """Do check to make sure service is available.""" # check the required flags in conf required_flags = ['san_ip', 'san_login', 'san_password', 'as13000_ipsan_pools'] for flag in required_flags: value = self.configuration.safe_get(flag) if not value: msg = (_('Required flag %s is not set.') % flag) LOG.error(msg) raise exception.InvalidConfigurationValue(option=flag, value=value) # make sure at least one node can if not self.nodes: msg = _('No healthy nodes are available!') LOG.error(msg) raise exception.VolumeDriverException(message=msg) def _check_pools(self): """Check the pool in conf exist in the AS13000.""" if not set(self.pools).issubset(self.pools_info): pools = set(self.pools) - set(self.pools_info) msg = _('Pools %s do not exist.') % pools LOG.error(msg) raise exception.InvalidInput(reason=msg) def _check_meta_pool(self): """Check whether the meta pool is valid.""" if not self.meta_pool: msg = _('Meta pool is not set.') LOG.error(msg) raise exception.InvalidInput(reason=msg) if self.meta_pool not in self.pools_info: msg = _('Meta pool %s does not exist.') % self.meta_pool LOG.error(msg) raise exception.InvalidInput(reason=msg) if self.pools_info[self.meta_pool]['type'] not in (1, '1'): msg = _('Meta pool %s is not a replication pool.') % self.meta_pool LOG.error(msg) raise exception.InvalidInput(reason=msg) @volume_utils.trace def create_volume(self, volume): """Create volume in the backend.""" pool = volume_utils.extract_host(volume.host, level='pool') size = volume.size * units.Ki name = self._trans_name_down(volume.name) method = 'block/lvm' request_type = "post" params = { "name": name, "capacity": size, "dataPool": pool, "dataPoolType": self.pools_info[pool]['type'], "metaPool": self.meta_pool } self._rest.send_rest_api(method=method, params=params, request_type=request_type) @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create a new volume base on a specific snapshot.""" if snapshot.volume_size > volume.size: msg = (_("create_volume_from_snapshot: snapshot %(snapshot_name)s " "size is %(snapshot_size)dGB and doesn't fit in target " "volume %(volume_name)s of size %(volume_size)dGB.") % {'snapshot_name': snapshot.name, 'snapshot_size': snapshot.volume_size, 'volume_name': volume.name, 'volume_size': volume.size}) LOG.error(msg) raise exception.InvalidInput(message=msg) src_vol_name = self._trans_name_down(snapshot.volume_name) source_vol = snapshot.volume src_pool = volume_utils.extract_host(source_vol['host'], level='pool') dest_name = self._trans_name_down(volume.name) dest_pool = volume_utils.extract_host(volume.host, level='pool') snap_name = self._trans_name_down(snapshot.name) # lock the snapshot before clone from it self._snapshot_lock_op('lock', src_vol_name, snap_name, src_pool) # do clone from snap to a volume method = 'snapshot/volume/cloneLvm' request_type = 'post' params = {'originalLvm': src_vol_name, 'originalPool': src_pool, 'originalSnap': snap_name, 'name': dest_name, 'pool': dest_pool} self._rest.send_rest_api(method=method, params=params, request_type=request_type) # do filling the cloned volume self._filling_volume(dest_name, dest_pool) # wait until the cloned volume has been filled self._wait_volume_filled(dest_name, dest_pool) # unlock the original snapshot self._snapshot_lock_op('unlock', src_vol_name, snap_name, src_pool) if volume.size > snapshot.volume_size: self.extend_volume(volume, volume.size) @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Clone a volume.""" if src_vref.size > volume.size: msg = (_("create_cloned_volume: source volume %(src_vol)s " "size is %(src_size)dGB and doesn't fit in target " "volume %(tgt_vol)s of size %(tgt_size)dGB.") % {'src_vol': src_vref.name, 'src_size': src_vref.size, 'tgt_vol': volume.name, 'tgt_size': volume.size}) LOG.error(msg) raise exception.InvalidInput(message=msg) dest_pool = volume_utils.extract_host(volume.host, level='pool') dest_vol_name = self._trans_name_down(volume.name) src_pool = volume_utils.extract_host(src_vref.host, level='pool') src_vol_name = self._trans_name_down(src_vref.name) method = 'block/lvm/clone' request_type = 'post' params = {'srcVolumeName': src_vol_name, 'srcPoolName': src_pool, 'destVolumeName': dest_vol_name, 'destPoolName': dest_pool} self._rest.send_rest_api(method=method, params=params, request_type=request_type) if volume.size > src_vref.size: self.extend_volume(volume, volume.size) @volume_utils.trace def extend_volume(self, volume, new_size): """Extend volume to new size.""" name = self._trans_name_down(volume.name) if not self._check_volume(volume): msg = _('Extend Volume Failed: Volume %s does not exist.') % name LOG.error(msg) raise exception.VolumeDriverException(message=msg) size = new_size * units.Ki pool = volume_utils.extract_host(volume.host, level='pool') method = 'block/lvm' request_type = 'put' params = {'pool': pool, 'name': name, 'newCapacity': size} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @volume_utils.trace def delete_volume(self, volume): """Delete volume from AS13000.""" name = self._trans_name_down(volume.name) if not self._check_volume(volume): # if volume is not exist in backend, the driver will do # nothing but log it LOG.info('Tried to delete non-existent volume %(name)s.', {'name': name}) return pool = volume_utils.extract_host(volume.host, level='pool') method = 'block/lvm?pool=%s&lvm=%s' % (pool, name) request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) @volume_utils.trace def create_snapshot(self, snapshot): """Create snapshot of volume in backend. The snapshot type of AS13000 is copy-on-write. """ source_volume = snapshot.volume volume_name = self._trans_name_down(source_volume.name) if not self._check_volume(source_volume): msg = (_('create_snapshot: Source_volume %s does not exist.') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) pool = volume_utils.extract_host(source_volume.host, level='pool') snapshot_name = self._trans_name_down(snapshot.name) method = 'snapshot/volume' request_type = 'post' params = {'snapName': snapshot_name, 'volumeName': volume_name, 'poolName': pool, 'snapType': 'r'} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @volume_utils.trace def delete_snapshot(self, snapshot): """Delete snapshot of volume.""" source_volume = snapshot.volume volume_name = self._trans_name_down(source_volume.name) if self._check_volume(source_volume) is False: msg = (_('delete_snapshot: Source_volume %s does not exist.') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) pool = volume_utils.extract_host(source_volume.host, level='pool') snapshot_name = self._trans_name_down(snapshot.name) method = ('snapshot/volume?snapName=%s&volumeName=%s&poolName=%s' % (snapshot_name, volume_name, pool)) request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) @volume_utils.trace def _update_volume_stats(self): """Update the backend stats including driver info and pools info.""" # As _update_volume_stats runs periodically, # so we can do a check and refresh the token each time it runs. time_difference = time.time() - self._token_time if time_difference > self.token_available_time: self._rest.login() self._token_time = time.time() LOG.debug('Token of the Driver has been refreshed.') # update the backend stats data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['vendor_name'] = self.VENDOR data['driver_version'] = self.VERSION data['storage_protocol'] = constants.ISCSI data['volume_backend_name'] = backend_name data['pools'] = self._get_pools_stats() self._stats = data LOG.debug('Update volume stats : %(stats)s.', {'stats': self._stats}) def _build_target_portal(self, ip, port): """Build iSCSI portal for both IPV4 and IPV6.""" addr = ipaddress.ip_address(ip) if addr.version == 4: ipaddr = ip else: ipaddr = '[%s]' % ip return '%(ip)s:%(port)s' % {'ip': ipaddr, 'port': port} @volume_utils.trace def initialize_connection(self, volume, connector, **kwargs): """Initialize connection steps: 1. check if the host exist in targets. 2.1 if there is target that has the host, add the volume to the target. 2.2 if not, create an target add host to host add volume to host. 3. return the target info. """ host_ip = connector['ip'] multipath = connector.get("multipath", False) # Check if there host exist in targets host_exist, target_name, node_of_target = self._get_target_from_conn( host_ip) if not host_exist: # host doesn't exist, need create target and bind the host, # generate the target name _TARGET_NAME_PATTERN = 'target.inspur.%(host)s-%(padding)s' _padding = str(random.randint(0, 99999999)).zfill(8) target_name = _TARGET_NAME_PATTERN % {'host': connector['host'], 'padding': _padding} # decide the nodes to be used if multipath: node_of_target = [node['name'] for node in self.nodes] else: # single node node_of_target = [self.nodes[0]['name']] # create the target nodes = ','.join(node_of_target) self._create_target(target_node=nodes, target_name=target_name) self._add_host_to_target(host_ip=host_ip, target_name=target_name) self._add_lun_to_target(target_name=target_name, volume=volume) if self.configuration.use_chap_auth: self._add_chap_to_target(target_name, self.configuration.chap_username, self.configuration.chap_password) lun_id = self._get_lun_id(volume, target_name) connection_data = { 'target_discovered': True, 'volume_id': volume.id, } portals = [] for node_name in node_of_target: for node in self.nodes: if node['name'] == node_name: portal = self._build_target_portal(node.get('ip'), '3260') portals.append(portal) if multipath: connection_data.update({ 'target_portals': portals, 'target_luns': [int(lun_id)] * len(portals), 'target_iqns': [target_name] * len(portals) }) else: # single node connection_data.update({ 'target_portal': portals[0], 'target_lun': int(lun_id), 'target_iqn': target_name }) if self.configuration.use_chap_auth: connection_data['auth_method'] = 'CHAP' connection_data['auth_username'] = self.configuration.chap_username connection_data['auth_password'] = self.configuration.chap_password return {'driver_volume_type': 'iscsi', 'data': connection_data} @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Delete lun from target. If target has no any lun, driver will delete the target. """ volume_name = self._trans_name_down(volume.name) target_name = None lun_id = None host_ip = None if connector and 'ip' in connector: host_ip = connector['ip'] target_list = self._get_target_list() for target in target_list: if not host_ip or host_ip in target['hostIp']: for lun in target['lun']: if volume_name == lun['lvm']: target_name = target['name'] lun_id = lun['lunID'] break if lun_id is not None: break if lun_id is None: return self._delete_lun_from_target(target_name=target_name, lun_id=lun_id) luns = self._get_lun_list(target_name) if not luns: self._delete_target(target_name) def _get_pools_info(self, pools): """Get the pools info.""" method = 'block/pool?type=2' requests_type = 'get' pools_data = self._rest.send_rest_api(method=method, request_type=requests_type) pools_info = {} for pool_data in pools_data: if pool_data['name'] in pools: pools_info[pool_data['name']] = pool_data return pools_info @volume_utils.trace def _get_pools_stats(self): """Generate the pool stat information.""" pools_info = self._get_pools_info(self.pools) pools = [] for pool_info in pools_info.values(): total_capacity = pool_info.get('totalCapacity') total_capacity_gb = self._unit_convert(total_capacity) used_capacity = pool_info.get('usedCapacity') used_capacity_gb = self._unit_convert(used_capacity) free_capacity_gb = total_capacity_gb - used_capacity_gb pool = { 'pool_name': pool_info.get('name'), 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'thin_provisioning_support': True, 'thick_provisioning_support': False, } pools.append(pool) return pools @volume_utils.trace def _get_target_from_conn(self, host_ip): """Get target information base on the host ip.""" host_exist = False target_name = None node = None target_list = self._get_target_list() for target in target_list: if host_ip in target['hostIp']: host_exist = True target_name = target['name'] node = target['node'] break return host_exist, target_name, node @volume_utils.trace def _get_target_list(self): """Get a list of all targets in the backend.""" method = 'block/target/detail' request_type = 'get' data = self._rest.send_rest_api(method=method, request_type=request_type) return data @volume_utils.trace def _create_target(self, target_name, target_node): """Create a target on the specified node.""" method = 'block/target' request_type = 'post' params = {'name': target_name, 'nodeName': target_node} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @volume_utils.trace def _delete_target(self, target_name): """Delete all target of all the node.""" method = 'block/target?name=%s' % target_name request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) @volume_utils.trace def _add_chap_to_target(self, target_name, chap_username, chap_password): """Add CHAP to target.""" method = 'block/chap/bond' request_type = 'post' params = {'target': target_name, 'user': chap_username, 'password': chap_password} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @volume_utils.trace def _add_host_to_target(self, host_ip, target_name): """Add the authority of host to target.""" method = 'block/host' request_type = 'post' params = {'name': target_name, 'hostIp': host_ip} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @volume_utils.trace @utils.retry(retry_param=exception.VolumeDriverException, interval=1, retries=3) def _add_lun_to_target(self, target_name, volume): """Add volume to target.""" pool = volume_utils.extract_host(volume.host, level='pool') volume_name = self._trans_name_down(volume.name) method = 'block/lun' request_type = 'post' params = {'name': target_name, 'pool': pool, 'lvm': volume_name} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @volume_utils.trace def _delete_lun_from_target(self, target_name, lun_id): """Delete lun from target_name.""" method = 'block/lun?name=%s&id=%s&force=1' % (target_name, lun_id) request_type = 'delete' self._rest.send_rest_api(method=method, request_type=request_type) @volume_utils.trace def _get_lun_list(self, target_name): """Get all lun list of the target.""" method = 'block/lun?name=%s' % target_name request_type = 'get' return self._rest.send_rest_api(method=method, request_type=request_type) @volume_utils.trace def _snapshot_lock_op(self, op, vol_name, snap_name, pool_name): """Lock or unlock a snapshot to protect the snapshot. op is 'lock' for lock and 'unlock' for unlock """ method = 'snapshot/volume/%s' % op request_type = 'post' params = {'snapName': snap_name, 'volumeName': vol_name, 'poolName': pool_name} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @volume_utils.trace def _filling_volume(self, name, pool): """Filling a volume so that make it independently.""" method = 'block/lvm/filling' request_type = 'post' params = {'pool': pool, 'name': name} self._rest.send_rest_api(method=method, params=params, request_type=request_type) @utils.retry(exception.VolumeDriverException, interval=5, retries=36) def _wait_volume_filled(self, name, pool): """Wait until the volume is filled.""" volumes = self._get_volumes(pool) for vol in volumes: if name == vol['name']: if vol['lvmType'] == 1: return else: break msg = (_('Volume %s is not filled.') % name) raise exception.VolumeDriverException(msg) @volume_utils.trace def _check_volume(self, volume): """Check if the volume exists in the backend.""" pool = volume_utils.extract_host(volume.host, 'pool') volume_name = self._trans_name_down(volume.name) attempts = 3 while attempts > 0: volumes = self._get_volumes(pool) attempts -= 1 for vol in volumes: if volume_name == vol.get('name'): return True eventlet.sleep(1) return False @volume_utils.trace def _get_volumes(self, pool): """Get all the volumes in the pool.""" method = 'block/lvm?pool=%s' % pool request_type = 'get' return self._rest.send_rest_api(method=method, request_type=request_type) @volume_utils.trace def _get_cluster_status(self): """Get all nodes of the backend.""" method = 'cluster/node' request_type = 'get' return self._rest.send_rest_api(method=method, request_type=request_type) @volume_utils.trace def _get_lun_id(self, volume, target_name): """Get lun id of the voluem in a target.""" pool = volume_utils.extract_host(volume.host, level='pool') volume_name = self._trans_name_down(volume.name) lun_id = None luns = self._get_lun_list(target_name) for lun in luns: mappinglvm = lun.get('mappingLvm') lun_name = mappinglvm.replace(r'%s/' % pool, '') if lun_name == volume_name: lun_id = lun.get('id') return lun_id def _trans_name_down(self, name): """Legitimize the name. Because AS13000 volume name is only allowed letters, numbers, and '_'. """ return name.replace('-', '_') @volume_utils.trace def _unit_convert(self, capacity): """Convert all units to GB. The capacity is a string in form like 100GB, 20TB, 100B, this routine will convert to GB unit. """ capacity = capacity.upper() try: unit = re.findall(r'[A-Z]+', capacity)[0] except BaseException: unit = '' capacity = float(capacity.replace(unit, '')) size_gb = 0.0 if unit in ['B', '']: size_gb = capacity / units.Gi elif unit in ['K', 'KB']: size_gb = capacity / units.Mi elif unit in ['M', 'MB']: size_gb = capacity / units.Ki elif unit in ['G', 'GB']: size_gb = capacity elif unit in ['T', 'TB']: size_gb = capacity * units.Ki elif unit in ['P', 'PB']: size_gb = capacity * units.Mi elif unit in ['E', 'EB']: size_gb = capacity * units.Gi return float('%.0f' % size_gb) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.371121 cinder-27.0.0/cinder/volume/drivers/inspur/instorage/0000775000175000017500000000000000000000000022633 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/inspur/instorage/__init__.py0000664000175000017500000000000000000000000024732 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/inspur/instorage/instorage_common.py0000664000175000017500000045740700000000000026571 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import math import random import re import time import unicodedata from eventlet import greenthread from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils as json from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import units import paramiko from cinder.common import constants from cinder import context from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder import ssh_utils from cinder import utils as cinder_utils from cinder.volume import driver from cinder.volume.drivers.inspur.instorage import ( replication as instorage_rep) from cinder.volume.drivers.inspur.instorage import instorage_const from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils INTERVAL_1_SEC = 1 DEFAULT_TIMEOUT = 20 LOG = logging.getLogger(__name__) instorage_mcs_opts = [ cfg.BoolOpt('instorage_mcs_vol_autoexpand', default=True, help='Storage system autoexpand parameter for volumes ' '(True/False)'), cfg.BoolOpt('instorage_mcs_vol_compression', default=False, help='Storage system compression option for volumes'), cfg.BoolOpt('instorage_mcs_vol_intier', default=True, help='Enable InTier for volumes'), cfg.BoolOpt('instorage_mcs_allow_tenant_qos', default=False, help='Allow tenants to specify QOS on create'), cfg.IntOpt('instorage_mcs_vol_grainsize', default=256, min=32, max=256, help='Storage system grain size parameter for volumes ' '(32/64/128/256)'), cfg.IntOpt('instorage_mcs_vol_rsize', default=2, min=-1, max=100, help='Storage system space-efficiency parameter for volumes ' '(percentage)'), cfg.IntOpt('instorage_mcs_vol_warning', default=0, min=-1, max=100, help='Storage system threshold for volume capacity warnings ' '(percentage)'), cfg.IntOpt('instorage_mcs_localcopy_timeout', default=120, min=1, max=600, help='Maximum number of seconds to wait for LocalCopy to be ' 'prepared.'), cfg.IntOpt('instorage_mcs_localcopy_rate', default=50, min=1, max=100, help='Specifies the InStorage LocalCopy copy rate to be used ' 'when creating a full volume copy. The default rate ' 'is 50, and the valid rates are 1-100.'), cfg.StrOpt('instorage_mcs_vol_iogrp', default='0', help='The I/O group in which to allocate volumes. It can be a ' 'comma-separated list in which case the driver will select an ' 'io_group based on least number of volumes associated with the ' 'io_group.'), cfg.StrOpt('instorage_san_secondary_ip', default=None, help='Specifies secondary management IP or hostname to be ' 'used if san_ip is invalid or becomes inaccessible.'), cfg.ListOpt('instorage_mcs_volpool_name', default=['volpool'], help='Comma separated list of storage system storage ' 'pools for volumes.'), ] CONF = cfg.CONF CONF.register_opts(instorage_mcs_opts) class InStorageMCSCommonDriver(driver.VolumeDriver, san.SanDriver): """Inspur InStorage MCS abstract base class for iSCSI/FC volume drivers. Version history: .. code-block:: none 1.0 - Initial driver """ VERSION = "1.0.0" VDISKCOPYOPS_INTERVAL = 600 DEFAULT_GR_SLEEP = random.randint(20, 500) / 100.0 def __init__(self, *args, **kwargs): super(InStorageMCSCommonDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(instorage_mcs_opts) self._backend_name = self.configuration.safe_get('volume_backend_name') self.active_ip = self.configuration.san_ip self.inactive_ip = self.configuration.instorage_san_secondary_ip self._local_backend_assistant = InStorageAssistant(self._run_ssh) self._aux_backend_assistant = None self._assistant = self._local_backend_assistant self._vdiskcopyops = {} self._vdiskcopyops_loop = None self.protocol = None self.replication = None self._state = {'storage_nodes': {}, 'enabled_protocols': set(), 'compression_enabled': False, 'available_iogrps': [], 'system_name': None, 'system_id': None, 'code_level': None, } self._active_backend_id = kwargs.get('active_backend_id') # This dictionary is used to map each replication target to certain # replication manager object. self.replica_manager = {} # One driver can be configured with only one replication target # to failover. self._replica_target = {} # This boolean is used to indicate whether replication is supported # by this storage. self._replica_enabled = False # This list is used to save the supported replication modes. self._supported_replica_types = [] # This is used to save the available pools in failed-over status self._secondary_pools = None @staticmethod def get_driver_options(): return instorage_mcs_opts @volume_utils.trace def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" # InStorage has the limitation that can not burst more than 3 new ssh # connections within 1 second. So slow down the initialization. # however, this maybe removed later. greenthread.sleep(1) # Update the instorage state self._update_instorage_state() # v2.1 replication setup self._get_instorage_config() # Validate that the pool exists self._validate_pools_exist() def _update_instorage_state(self): # Get storage system name, id, and code level self._state.update(self._assistant.get_system_info()) # Check if compression is supported self._state['compression_enabled'] = (self._assistant. compression_enabled()) # Get the available I/O groups self._state['available_iogrps'] = (self._assistant. get_available_io_groups()) # Get the iSCSI and FC names of the InStorage/MCS nodes self._state['storage_nodes'] = self._assistant.get_node_info() # Add the iSCSI IP addresses and WWPNs to the storage node info self._assistant.add_iscsi_ip_addrs(self._state['storage_nodes']) self._assistant.add_fc_wwpns(self._state['storage_nodes']) # For each node, check what connection modes it supports. Delete any # nodes that do not support any types (may be partially configured). to_delete = [] for k, node in self._state['storage_nodes'].items(): if ((len(node['ipv4']) or len(node['ipv6'])) and len(node['iscsi_name'])): node['enabled_protocols'].append(constants.ISCSI) self._state['enabled_protocols'].add(constants.ISCSI) if len(node['WWPN']): node['enabled_protocols'].append(constants.FC) self._state['enabled_protocols'].add(constants.FC) if not len(node['enabled_protocols']): to_delete.append(k) for delkey in to_delete: del self._state['storage_nodes'][delkey] def _get_backend_pools(self): if not self._active_backend_id: return self.configuration.instorage_mcs_volpool_name elif not self._secondary_pools: self._secondary_pools = [self._replica_target.get('pool_name')] return self._secondary_pools def _validate_pools_exist(self): # Validate that the pool exists pools = self._get_backend_pools() for pool in pools: try: self._assistant.get_pool_attrs(pool) except exception.VolumeBackendAPIException: msg = _('Failed getting details for pool %s.') % pool raise exception.InvalidInput(reason=msg) @volume_utils.trace def check_for_setup_error(self): """Ensure that the flags are set properly.""" # Check that we have the system ID information if self._state['system_name'] is None: exception_msg = _('Unable to determine system name.') raise exception.VolumeBackendAPIException(data=exception_msg) if self._state['system_id'] is None: exception_msg = _('Unable to determine system id.') raise exception.VolumeBackendAPIException(data=exception_msg) # Make sure we have at least one node configured if not len(self._state['storage_nodes']): msg = _('do_setup: No configured nodes.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) if self.protocol not in self._state['enabled_protocols']: raise exception.InvalidInput( reason=_('The storage device does not support %(prot)s. ' 'Please configure the device to support %(prot)s or ' 'switch to a driver using a different protocol.') % {'prot': self.protocol}) required_flags = ['san_ip', 'san_ssh_port', 'san_login', 'instorage_mcs_volpool_name'] for flag in required_flags: if not self.configuration.safe_get(flag): raise exception.InvalidInput(reason=_('%s is not set.') % flag) # Ensure that either password or keyfile were set if not (self.configuration.san_password or self.configuration.san_private_key): raise exception.InvalidInput( reason=_('Password or SSH private key is required for ' 'authentication: set either san_password or ' 'san_private_key option.')) opts = self._assistant.build_default_opts(self.configuration) self._assistant.check_vdisk_opts(self._state, opts) def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): """SSH tool""" cinder_utils.check_ssh_injection(cmd_list) command = ' '.join(cmd_list) if not self.sshpool: try: self.sshpool = self._set_up_sshpool(self.active_ip) except paramiko.SSHException: LOG.warning('Unable to use san_ip to create SSHPool. Now ' 'attempting to use instorage_san_secondary_ip ' 'to create SSHPool.') if self._switch_ip(): self.sshpool = self._set_up_sshpool(self.active_ip) else: LOG.error('Unable to create SSHPool using san_ip ' 'and not able to use ' 'instorage_san_secondary_ip since it is ' 'not configured.') raise try: return self._ssh_execute(self.sshpool, command, check_exit_code, attempts) except Exception: # Need to check if creating an SSHPool instorage_san_secondary_ip # before raising an error. try: if self._switch_ip(): LOG.warning("Unable to execute SSH command with " "%(inactive)s. Attempting to execute SSH " "command with %(active)s.", {'inactive': self.inactive_ip, 'active': self.active_ip}) self.sshpool = self._set_up_sshpool(self.active_ip) return self._ssh_execute(self.sshpool, command, check_exit_code, attempts) else: LOG.warning('Not able to use ' 'instorage_san_secondary_ip since it is ' 'not configured.') raise except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error running SSH command: %s", command) def _set_up_sshpool(self, ip): password = self.configuration.san_password privatekey = self.configuration.san_private_key min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn sshpool = ssh_utils.SSHPool( ip, self.configuration.san_ssh_port, self.configuration.ssh_conn_timeout, self.configuration.san_login, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) return sshpool def _ssh_execute(self, sshpool, command, check_exit_code=True, attempts=1): try: with sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.exception('Error has occurred') last_exception = e greenthread.sleep(self.DEFAULT_GR_SLEEP) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error running SSH command: %s", command) def _switch_ip(self): # Change active_ip if instorage_san_secondary_ip is set. if self.configuration.instorage_san_secondary_ip is None: return False self.inactive_ip, self.active_ip = self.active_ip, self.inactive_ip LOG.info('Switch active_ip from %(old)s to %(new)s.', {'old': self.inactive_ip, 'new': self.active_ip}) return True def ensure_export(self, ctxt, volume): """Check that the volume exists on the storage.""" vol_name = self._get_target_vol(volume) volume_defined = self._assistant.is_vdisk_defined(vol_name) if not volume_defined: LOG.error('ensure_export: Volume %s not found on storage.', volume['name']) def create_export(self, ctxt, volume, connector): pass def remove_export(self, ctxt, volume): pass def _get_vdisk_params(self, type_id, volume_type=None, volume_metadata=None): return self._assistant.get_vdisk_params( self.configuration, self._state, type_id, volume_type=volume_type, volume_metadata=volume_metadata) @volume_utils.trace def create_volume(self, volume): opts = self._get_vdisk_params( volume.volume_type_id, volume_metadata=volume.get('volume_metadata')) pool = volume_utils.extract_host(volume.host, 'pool') opts['iogrp'] = self._assistant.select_io_group(self._state, opts) self._assistant.create_vdisk(volume.name, str(volume.size), 'gb', pool, opts) if opts['qos']: self._assistant.add_vdisk_qos(volume.name, opts['qos']) model_update = None ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type: replica_obj = self._get_replica_obj(rep_type) replica_obj.volume_replication_setup(ctxt, volume) model_update = { 'replication_status': fields.ReplicationStatus.ENABLED} return model_update def create_volume_from_snapshot(self, volume, snapshot): if snapshot.volume_size > volume.size: msg = (_("create_volume_from_snapshot: snapshot %(snapshot_name)s " "size is %(snapshot_size)dGB and doesn't fit in target " "volume %(volume_name)s of size %(volume_size)dGB.") % {'snapshot_name': snapshot.name, 'snapshot_size': snapshot.volume_size, 'volume_name': volume.name, 'volume_size': volume.size}) LOG.error(msg) raise exception.InvalidInput(message=msg) opts = self._get_vdisk_params( volume.volume_type_id, volume_metadata=volume.get('volume_metadata')) pool = volume_utils.extract_host(volume.host, 'pool') self._assistant.create_copy(snapshot.name, volume.name, snapshot.id, self.configuration, opts, True, pool=pool) # The volume size is equal to the snapshot size in most # of the cases. But in some scenario, the volume size # may be bigger than the source volume size. # InStorage does not support localcopy between two volumes # with two different size. So InStorage will copy volume # from snapshot first and then extend the volume to # the target size. if volume.size > snapshot.volume_size: # extend the new created target volume to expected size. self._extend_volume_op(volume, volume.size, snapshot.volume_size) if opts['qos']: self._assistant.add_vdisk_qos(volume.name, opts['qos']) ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type: self._validate_replication_enabled() replica_obj = self._get_replica_obj(rep_type) replica_obj.volume_replication_setup(ctxt, volume) return {'replication_status': fields.ReplicationStatus.ENABLED} def create_cloned_volume(self, tgt_volume, src_volume): """Creates a clone of the specified volume.""" if src_volume.size > tgt_volume.size: msg = (_("create_cloned_volume: source volume %(src_vol)s " "size is %(src_size)dGB and doesn't fit in target " "volume %(tgt_vol)s of size %(tgt_size)dGB.") % {'src_vol': src_volume.name, 'src_size': src_volume.size, 'tgt_vol': tgt_volume.name, 'tgt_size': tgt_volume.size}) LOG.error(msg) raise exception.InvalidInput(message=msg) opts = self._get_vdisk_params( tgt_volume.volume_type_id, volume_metadata=tgt_volume.get('volume_metadata')) pool = volume_utils.extract_host(tgt_volume.host, 'pool') self._assistant.create_copy(src_volume.name, tgt_volume.name, src_volume.id, self.configuration, opts, True, pool=pool) # The source volume size is equal to target volume size # in most of the cases. But in some scenarios, the target # volume size may be bigger than the source volume size. # InStorage does not support localcopy between two volumes # with two different sizes. So InStorage will copy volume # from source volume first and then extend target # volume to original size. if tgt_volume.size > src_volume.size: # extend the new created target volume to expected size. self._extend_volume_op(tgt_volume, tgt_volume.size, src_volume.size) if opts['qos']: self._assistant.add_vdisk_qos(tgt_volume.name, opts['qos']) ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, tgt_volume) if rep_type: self._validate_replication_enabled() replica_obj = self._get_replica_obj(rep_type) replica_obj.volume_replication_setup(ctxt, tgt_volume) return {'replication_status': fields.ReplicationStatus.ENABLED} def extend_volume(self, volume, new_size): self._extend_volume_op(volume, new_size) @volume_utils.trace def _extend_volume_op(self, volume, new_size, old_size=None): volume_name = self._get_target_vol(volume) ret = self._assistant.ensure_vdisk_no_lc_mappings(volume_name, allow_snaps=False) if not ret: msg = (_('_extend_volume_op: Extending a volume with snapshots is ' 'not supported.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if old_size is None: old_size = volume.size extend_amt = int(new_size) - old_size rel_info = self._assistant.get_relationship_info(volume_name) if rel_info: LOG.warning('_extend_volume_op: Extending a volume with ' 'remote copy is not recommended.') try: tgt_vol = instorage_const.REPLICA_AUX_VOL_PREFIX + volume.name rep_type = rel_info['copy_type'] self._local_backend_assistant.delete_relationship( volume.name) self._local_backend_assistant.extend_vdisk(volume.name, extend_amt) self._aux_backend_assistant.extend_vdisk(tgt_vol, extend_amt) tgt_sys = self._aux_backend_assistant.get_system_info() self._local_backend_assistant.create_relationship( volume.name, tgt_vol, tgt_sys.get('system_name'), True if instorage_const.ASYNC == rep_type else False) except Exception as e: msg = (_('Failed to extend a volume with remote copy ' '%(volume)s. Exception: ' '%(err)s.') % {'volume': volume.id, 'err': str(e)}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: self._assistant.extend_vdisk(volume_name, extend_amt) @volume_utils.trace def delete_volume(self, volume): ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type: self._aux_backend_assistant.delete_rc_volume(volume.name, target_vol=True) if not self._active_backend_id: self._local_backend_assistant.delete_rc_volume(volume.name) else: # If it's in fail over state, also try to delete the volume # in master backend try: self._local_backend_assistant.delete_rc_volume( volume.name) except Exception as ex: LOG.error('Failed to get delete volume %(volume)s in ' 'master backend. Exception: %(err)s.', {'volume': volume.name, 'err': ex}) else: if self._active_backend_id: msg = (_('Error: delete non-replicate volume in failover mode' ' is not allowed.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: self._assistant.delete_vdisk(volume.name, False) if volume.id in self._vdiskcopyops: del self._vdiskcopyops[volume.id] if not self._vdiskcopyops: self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None def create_snapshot(self, snapshot): source_vol = snapshot.volume pool = volume_utils.extract_host(source_vol.host, 'pool') opts = self._get_vdisk_params(source_vol.volume_type_id) self._assistant.create_copy(snapshot.volume_name, snapshot.name, snapshot.volume_id, self.configuration, opts, False, pool=pool) def delete_snapshot(self, snapshot): self._assistant.delete_vdisk(snapshot.name, False) def add_vdisk_copy(self, volume, dest_pool, vol_type): return self._assistant.add_vdisk_copy(volume, dest_pool, vol_type, self._state, self.configuration) def _add_vdisk_copy_op(self, ctxt, volume, new_op): if volume.id in self._vdiskcopyops: self._vdiskcopyops[volume.id]['copyops'].append(new_op) else: self._vdiskcopyops[volume.id] = {'name': volume.name, 'copyops': [new_op]} # We added the first copy operation, so start the looping call if len(self._vdiskcopyops) == 1: self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall( self._check_volume_copy_ops) self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL) def _rm_vdisk_copy_op(self, ctxt, vol_id, orig_copy_id, new_copy_id): try: self._vdiskcopyops[vol_id]['copyops'].remove((orig_copy_id, new_copy_id)) if not self._vdiskcopyops[vol_id]['copyops']: del self._vdiskcopyops[vol_id] if not self._vdiskcopyops: self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None except KeyError: LOG.error('_rm_vdisk_copy_op: Volume %s does not have any ' 'registered vdisk copy operations.', vol_id) return except ValueError: LOG.error('_rm_vdisk_copy_op: Volume %(vol)s does not have ' 'the specified vdisk copy operation: orig=%(orig)s ' 'new=%(new)s.', {'vol': vol_id, 'orig': orig_copy_id, 'new': new_copy_id}) return def _check_volume_copy_ops(self): LOG.debug("Enter: update volume copy status.") ctxt = context.get_admin_context() copy_items = list(self._vdiskcopyops.items()) for vol_id, copy_ops_data in copy_items: vol_name = copy_ops_data['name'] copy_ops = copy_ops_data['copyops'] if not self._assistant.is_vdisk_defined(vol_name): LOG.warning('Volume %s does not exist.', vol_id) del self._vdiskcopyops[vol_id] if not self._vdiskcopyops: self._vdiskcopyops_loop.stop() self._vdiskcopyops_loop = None continue for copy_op in copy_ops: try: synced = self._assistant.check_vdisk_copy_synced( vol_name, copy_op[1]) except Exception: LOG.info('_check_volume_copy_ops: Volume %(vol)s does ' 'not have the specified vdisk copy ' 'operation: orig=%(orig)s new=%(new)s.', {'vol': vol_id, 'orig': copy_op[0], 'new': copy_op[1]}) else: if synced: self._assistant.rm_vdisk_copy( vol_name, copy_op[0]) self._rm_vdisk_copy_op(ctxt, vol_id, copy_op[0], copy_op[1]) LOG.debug("Exit: update volume copy status.") @volume_utils.trace def migrate_volume(self, ctxt, volume, host): """Migrate directly if source and dest are managed by same storage. We create a new vdisk copy in the desired pool, and add the original vdisk copy to the admin_metadata of the volume to be deleted. The deletion will occur using a periodic task once the new copy is synced. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ false_ret = (False, None) dest_pool = self._assistant.can_migrate_to_host(host, self._state) if dest_pool is None: return false_ret ctxt = context.get_admin_context() volume_type_id = volume.volume_type_id if volume_type_id is not None: vol_type = volume_types.get_volume_type(ctxt, volume_type_id) else: vol_type = None self._check_volume_copy_ops() new_op = self.add_vdisk_copy(volume.name, dest_pool, vol_type) self._add_vdisk_copy_op(ctxt, volume, new_op) return (True, None) @volume_utils.trace def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred. :param ctxt: Context :param volume: A volume object describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ def retype_iogrp_property(volume, new, old): if new != old: self._assistant.change_vdisk_iogrp(volume.name, self._state, (new, old)) no_copy_keys = ['warning', 'autoexpand', 'intier'] copy_keys = ['rsize', 'grainsize', 'compression'] all_keys = no_copy_keys + copy_keys old_opts = self._get_vdisk_params( volume.volume_type_id, volume_metadata=volume.get('volume_matadata')) new_opts = self._get_vdisk_params(new_type['id'], volume_type=new_type) vdisk_changes = [] need_copy = False for key in all_keys: if old_opts[key] != new_opts[key]: if key in copy_keys: need_copy = True break elif key in no_copy_keys: vdisk_changes.append(key) if (volume_utils.extract_host(volume.host, 'pool') != volume_utils.extract_host(host['host'], 'pool')): need_copy = True # Check if retype affects volume replication model_update = None new_rep_type = self._get_specs_replicated_type(new_type) old_rep_type = self._get_volume_replicated_type(ctxt, volume) old_io_grp = self._assistant.get_volume_io_group(volume.name) # There are three options for rep_type: None, sync, async if new_rep_type != old_rep_type: if (old_io_grp not in InStorageAssistant._get_valid_requested_io_groups( self._state, new_opts)): msg = (_('Unable to retype: it is not allowed to change ' 'replication type and io group at the same time.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if new_rep_type and old_rep_type: msg = (_('Unable to retype: it is not allowed to change ' '%(old_rep_type)s volume to %(new_rep_type)s ' 'volume.') % {'old_rep_type': old_rep_type, 'new_rep_type': new_rep_type}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) # If volume is replicated, can't copy if need_copy: msg = (_('Unable to retype: Current action needs volume-copy,' ' it is not allowed when new type is replication.' ' Volume = %s') % volume.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) new_io_grp = self._assistant.select_io_group(self._state, new_opts) if need_copy: self._check_volume_copy_ops() dest_pool = self._assistant.can_migrate_to_host(host, self._state) if dest_pool is None: return False retype_iogrp_property(volume, new_io_grp, old_io_grp) try: new_op = self.add_vdisk_copy(volume.name, dest_pool, new_type) self._add_vdisk_copy_op(ctxt, volume, new_op) except exception.VolumeDriverException: # roll back changing iogrp property retype_iogrp_property(volume, old_io_grp, new_io_grp) msg = (_('Unable to retype: A copy of volume %s exists. ' 'Retyping would exceed the limit of 2 copies.'), volume.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: retype_iogrp_property(volume, new_io_grp, old_io_grp) self._assistant.change_vdisk_options(volume.name, vdisk_changes, new_opts, self._state) if new_opts['qos']: # Add the new QoS setting to the volume. If the volume has an # old QoS setting, it will be overwritten. self._assistant.update_vdisk_qos(volume.name, new_opts['qos']) elif old_opts['qos']: # If the old_opts contain QoS keys, disable them. self._assistant.disable_vdisk_qos(volume.name, old_opts['qos']) # Delete replica if needed if old_rep_type and not new_rep_type: self._aux_backend_assistant.delete_rc_volume(volume.name, target_vol=True) model_update = { 'replication_status': fields.ReplicationStatus.DISABLED, 'replication_driver_data': None, 'replication_extended_status': None} # Add replica if needed if not old_rep_type and new_rep_type: replica_obj = self._get_replica_obj(new_rep_type) replica_obj.volume_replication_setup(ctxt, volume) model_update = { 'replication_status': fields.ReplicationStatus.ENABLED} return True, model_update def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update from InStorage for migrated volume. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ current_name = CONF.volume_name_template % new_volume.id original_volume_name = CONF.volume_name_template % volume.id try: self._assistant.rename_vdisk(current_name, original_volume_name) except exception.VolumeBackendAPIException: LOG.error('Unable to rename the logical volume ' 'for volume: %s', volume.id) return {'_name_id': new_volume._name_id or new_volume.id} # If the back-end name(id) for the volume has been renamed, # it is OK for the volume to keep the original name(id) and there is # no need to use the column "_name_id" to establish the mapping # relationship between the volume id and the back-end volume # name(id). # Set the key "_name_id" to None for a successful rename. model_update = {'_name_id': None} return model_update def manage_existing(self, volume, ref): """Manages an existing vdisk. Renames the vdisk to match the expected name for the volume. Error checking done by manage_existing_get_size is not repeated - if we got here then we have a vdisk that isn't in use (or we don't care if it is in use. """ # Check that the reference is valid vdisk = self._manage_input_check(ref) vdisk_io_grp = self._assistant.get_volume_io_group(vdisk['name']) if vdisk_io_grp not in self._state['available_iogrps']: msg = (_("Failed to manage existing volume due to " "the volume to be managed is not in a valid " "I/O group.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) # Add replication check ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) vol_rep_type = None rel_info = self._assistant.get_relationship_info(vdisk['name']) if rel_info: vol_rep_type = rel_info['copy_type'] aux_info = self._aux_backend_assistant.get_system_info() if rel_info['aux_cluster_id'] != aux_info['system_id']: msg = (_("Failed to manage existing volume due to the aux " "cluster for volume %(volume)s is %(aux_id)s. The " "configured cluster id is %(cfg_id)s") % {'volume': vdisk['name'], 'aux_id': rel_info['aux_cluster_id'], 'cfg_id': aux_info['system_id']}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if vol_rep_type != rep_type: msg = (_("Failed to manage existing volume due to " "the replication type of the volume to be managed is " "mismatch with the provided replication type.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if volume.volume_type_id: opts = self._get_vdisk_params( volume.volume_type_id, volume_metadata=volume.get('volume_metadata')) vdisk_copy = self._assistant.get_vdisk_copy_attrs( vdisk['name'], '0') if vdisk_copy['autoexpand'] == 'on' and opts['rsize'] == -1: msg = (_("Failed to manage existing volume due to " "the volume to be managed is thin, but " "the volume type chosen is thick.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if not vdisk_copy['autoexpand'] and opts['rsize'] != -1: msg = (_("Failed to manage existing volume due to " "the volume to be managed is thick, but " "the volume type chosen is thin.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if (vdisk_copy['compressed_copy'] == 'no' and opts['compression']): msg = (_("Failed to manage existing volume due to the " "volume to be managed is not compress, but " "the volume type chosen is compress.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if (vdisk_copy['compressed_copy'] == 'yes' and not opts['compression']): msg = (_("Failed to manage existing volume due to the " "volume to be managed is compress, but " "the volume type chosen is not compress.")) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) if (vdisk_io_grp not in InStorageAssistant._get_valid_requested_io_groups( self._state, opts)): msg = (_("Failed to manage existing volume due to " "I/O group mismatch. The I/O group of the " "volume to be managed is %(vdisk_iogrp)s. I/O group " "of the chosen type is %(opt_iogrp)s.") % {'vdisk_iogrp': vdisk['IO_group_name'], 'opt_iogrp': opts['iogrp']}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) pool = volume_utils.extract_host(volume.host, 'pool') if vdisk['mdisk_grp_name'] != pool: msg = (_("Failed to manage existing volume due to the " "pool of the volume to be managed does not " "match the backend pool. Pool of the " "volume to be managed is %(vdisk_pool)s. Pool " "of the backend is %(backend_pool)s.") % {'vdisk_pool': vdisk['mdisk_grp_name'], 'backend_pool': self._get_backend_pools()}) raise exception.ManageExistingVolumeTypeMismatch(reason=msg) model_update = {} self._assistant.rename_vdisk(vdisk['name'], volume.name) if vol_rep_type: aux_vol = instorage_const.REPLICA_AUX_VOL_PREFIX + volume.name self._aux_backend_assistant.rename_vdisk( rel_info['aux_vdisk_name'], aux_vol) model_update = { 'replication_status': fields.ReplicationStatus.ENABLED} return model_update def manage_existing_get_size(self, volume, ref): """Return size of an existing Vdisk for manage_existing. existing_ref is a dictionary of the form: {'source-id': } or {'source-name': } Optional elements are: 'manage_if_in_use': True/False (default is False) If set to True, a volume will be managed even if it is currently attached to a host system. """ # Check that the reference is valid vdisk = self._manage_input_check(ref) # Check if the disk is in use, if we need to. manage_if_in_use = ref.get('manage_if_in_use', False) if (not manage_if_in_use and self._assistant.is_vdisk_in_use(vdisk['name'])): reason = _('The specified vdisk is mapped to a host.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) return int(math.ceil(float(vdisk['capacity']) / units.Gi)) def unmanage(self, volume): """Remove the specified volume from Cinder management.""" pass # ## Group method ## # def create_group(self, context, group): """Create a group. Inspur InStorage will create group until group-snapshot creation, db will maintain the volumes and group relationship. """ # now we only support consistent group if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() LOG.debug("Creating group.") model_update = {'status': fields.GroupStatus.AVAILABLE} return model_update def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param context: the context of the caller. :param group: the dictionary of the group to be created. :param volumes: a list of volume dictionaries in the group. :param group_snapshot: the dictionary of the group_snapshot as source. :param snapshots: a list of snapshot dictionaries in the group_snapshot. :param source_group: the dictionary of a group as source. :param source_vols: a list of volume dictionaries in the source_group. :returns: model_update, volumes_model_update """ # now we only support consistent group if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() LOG.debug('Enter: create_group_from_src.') if group_snapshot and snapshots: group_name = 'group-' + group_snapshot.id sources = snapshots elif source_group and source_vols: group_name = 'group-' + source_group.id sources = source_vols else: error_msg = _("create_group_from_src must be creating from" " a group snapshot, or a source group.") raise exception.InvalidInput(reason=error_msg) LOG.debug('create_group_from_src: group_name %(group_name)s' ' %(sources)s', {'group_name': group_name, 'sources': sources}) self._assistant.create_lc_consistgrp(group_name) # create group timeout = self.configuration.instorage_mcs_localcopy_timeout model_update, snapshots_model = ( self._assistant.create_group_from_source(group, group_name, sources, volumes, self._state, self.configuration, timeout)) LOG.debug("Leave: create_group_from_src.") return model_update, snapshots_model def delete_group(self, context, group, volumes): """Deletes a group. Inspur InStorage will delete the volumes of the group. """ # now we only support consistent group if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() LOG.debug("Deleting group.") model_update = {'status': fields.ConsistencyGroupStatus.DELETED} volumes_model_update = [] for volume in volumes: try: self._assistant.delete_vdisk(volume.name, True) volumes_model_update.append( {'id': volume.id, 'status': fields.ConsistencyGroupStatus.DELETED}) except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.ConsistencyGroupStatus.ERROR_DELETING) LOG.error("Failed to delete the volume %(vol)s of group. " "Exception: %(exception)s.", {'vol': volume.name, 'exception': err}) volumes_model_update.append( {'id': volume.id, 'status': fields.ConsistencyGroupStatus.ERROR_DELETING}) return model_update, volumes_model_update def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None): """Adds or removes volume(s) to/from an existing group.""" if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() LOG.debug("Updating group.") # as we don't keep group info on device, nonthing need to be done return None, None, None def create_group_snapshot(self, ctxt, group_snapshot, snapshots): """Creates a cgsnapshot.""" # now we only support consistent group if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() # Use cgsnapshot id as cg name group_name = 'group_snap-' + group_snapshot.id # Create new cg as cg_snapshot self._assistant.create_lc_consistgrp(group_name) timeout = self.configuration.instorage_mcs_localcopy_timeout model_update, snapshots_model = ( self._assistant.run_group_snapshots(group_name, snapshots, self._state, self.configuration, timeout)) return model_update, snapshots_model def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a cgsnapshot.""" # now we only support consistent group if not volume_utils.is_group_a_cg_snapshot_type(group_snapshot): raise NotImplementedError() group_snapshot_id = group_snapshot.id group_name = 'group_snap-' + group_snapshot_id model_update, snapshots_model = ( self._assistant.delete_group_snapshots(group_name, snapshots)) return model_update, snapshots_model def get_pool(self, volume): attr = self._assistant.get_vdisk_attributes(volume.name) if attr is None: msg = (_('get_pool: Failed to get attributes for volume ' '%s') % volume.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) return attr['mdisk_grp_name'] def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats.") data = {} data['vendor_name'] = 'Inspur' data['driver_version'] = self.VERSION data['storage_protocol'] = self.protocol data['pools'] = [] backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = (backend_name or self._state['system_name']) data['pools'] = [self._build_pool_stats(pool) for pool in self._get_backend_pools()] if self._replica_enabled: data['replication'] = self._replica_enabled data['replication_enabled'] = self._replica_enabled data['replication_targets'] = self._get_replication_targets() self._stats = data def _build_pool_stats(self, pool): """Build pool status""" QoS_support = True pool_stats = {} try: pool_data = self._assistant.get_pool_attrs(pool) if pool_data: in_tier = pool_data['in_tier'] in ['on', 'auto'] total_capacity_gb = float(pool_data['capacity']) / units.Gi free_capacity_gb = float(pool_data['free_capacity']) / units.Gi provisioned_capacity_gb = float( pool_data['virtual_capacity']) / units.Gi rsize = self.configuration.safe_get( 'instorage_mcs_vol_rsize') # rsize of -1 or 100 means fully allocate the mdisk use_thick_provisioning = rsize == -1 or rsize == 100 over_sub_ratio = self.configuration.safe_get( 'max_over_subscription_ratio') location_info = ('InStorageMCSDriver:%(sys_id)s:%(pool)s' % {'sys_id': self._state['system_id'], 'pool': pool_data['name']}) pool_stats = { 'pool_name': pool_data['name'], 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'provisioned_capacity_gb': provisioned_capacity_gb, 'compression_support': self._state['compression_enabled'], 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': QoS_support, 'consistent_group_snapshot_enabled': True, 'location_info': location_info, 'intier_support': in_tier, 'multiattach': False, 'thin_provisioning_support': not use_thick_provisioning, 'thick_provisioning_support': use_thick_provisioning, 'max_over_subscription_ratio': over_sub_ratio, } if self._replica_enabled: pool_stats.update({ 'replication_enabled': self._replica_enabled, 'replication_type': self._supported_replica_types, 'replication_targets': self._get_replication_targets(), 'replication_count': len(self._get_replication_targets()) }) except exception.VolumeBackendAPIException: msg = _('Failed getting details for pool %s.') % pool raise exception.VolumeBackendAPIException(data=msg) return pool_stats def _get_replication_targets(self): return [self._replica_target['backend_id']] def _manage_input_check(self, ref): """Verify the input of manage function.""" # Check that the reference is valid if 'source-name' in ref: manage_source = ref['source-name'] vdisk = self._assistant.get_vdisk_attributes(manage_source) elif 'source-id' in ref: manage_source = ref['source-id'] vdisk = self._assistant.vdisk_by_uid(manage_source) else: reason = _('Reference must contain source-id or ' 'source-name element.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) if vdisk is None: reason = (_('No vdisk with the UID specified by ref %s.') % manage_source) raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) return vdisk # #### V2.1 replication methods #### # @volume_utils.trace def failover_host(self, context, volumes, secondary_id=None): if not self._replica_enabled: msg = _("Replication is not properly enabled on backend.") LOG.error(msg) raise exception.UnableToFailOver(reason=msg) if instorage_const.FAILBACK_VALUE == secondary_id: # In this case the administrator would like to fail back. secondary_id, volumes_update = self._replication_failback(context, volumes) elif (secondary_id == self._replica_target['backend_id'] or secondary_id is None): # In this case the administrator would like to fail over. secondary_id, volumes_update = self._replication_failover(context, volumes) else: msg = (_("Invalid secondary id %s.") % secondary_id) LOG.error(msg) raise exception.InvalidReplicationTarget(reason=msg) return secondary_id, volumes_update def _replication_failback(self, ctxt, volumes): """Fail back all the volume on the secondary backend.""" volumes_update = [] if not self._active_backend_id: LOG.info("Host has been failed back. doesn't need " "to fail back again") return None, volumes_update try: self._local_backend_assistant.get_system_info() except Exception: msg = (_("Unable to failback due to primary is not reachable.")) LOG.error(msg) raise exception.UnableToFailOver(reason=msg) normal_volumes, rep_volumes = self._classify_volume(ctxt, volumes) # start synchronize from aux volume to master volume self._sync_with_aux(ctxt, rep_volumes) self._wait_replica_ready(ctxt, rep_volumes) rep_volumes_update = self._failback_replica_volumes(ctxt, rep_volumes) volumes_update.extend(rep_volumes_update) normal_volumes_update = self._failback_normal_volumes(normal_volumes) volumes_update.extend(normal_volumes_update) self._assistant = self._local_backend_assistant self._active_backend_id = None # Update the instorage state self._update_instorage_state() self._update_volume_stats() return instorage_const.FAILBACK_VALUE, volumes_update @volume_utils.trace def _failback_replica_volumes(self, ctxt, rep_volumes): volumes_update = [] for volume in rep_volumes: rep_type = self._get_volume_replicated_type(ctxt, volume) replica_obj = self._get_replica_obj(rep_type) tgt_volume = instorage_const.REPLICA_AUX_VOL_PREFIX + volume.name rep_info = self._assistant.get_relationship_info(tgt_volume) if not rep_info: replication_status = fields.ReplicationStatus.FAILOVER_ERROR volumes_update.append( {'volume_id': volume.id, 'updates': { 'replication_status': replication_status, 'status': 'error'}}) LOG.error('_failback_replica_volumes:no rc-releationship ' 'is established between master: %(master)s and ' 'aux %(aux)s. Please re-establish the ' 'relationship and synchronize the volumes on ' 'backend storage.', {'master': volume.name, 'aux': tgt_volume}) continue LOG.debug('_failover_replica_volumes: vol=%(vol)s, master_vol=' '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, ' 'primary=%(primary)s', {'vol': volume.name, 'master_vol': rep_info['master_vdisk_name'], 'aux_vol': rep_info['aux_vdisk_name'], 'state': rep_info['state'], 'primary': rep_info['primary']}) try: model_updates = replica_obj.replication_failback(volume) volumes_update.append( {'volume_id': volume.id, 'updates': model_updates}) except exception.VolumeDriverException: LOG.error('Unable to fail back volume %(volume_id)s', {'volume_id': volume.id}) replication_status = fields.ReplicationStatus.FAILOVER_ERROR volumes_update.append( {'volume_id': volume.id, 'updates': {'replication_status': replication_status, 'status': 'error'}}) return volumes_update def _failback_normal_volumes(self, normal_volumes): volumes_update = [] for vol in normal_volumes: pre_status = 'available' if ('replication_driver_data' in vol and vol.replication_driver_data): rep_data = json.loads(vol.replication_driver_data) pre_status = rep_data['previous_status'] volumes_update.append( {'volume_id': vol.id, 'updates': {'status': pre_status, 'replication_driver_data': ''}}) return volumes_update @volume_utils.trace def _sync_with_aux(self, ctxt, volumes): try: rep_mgr = self._get_replica_mgr() rep_mgr.establish_target_partnership() except Exception as ex: LOG.warning('Fail to establish partnership in backend. ' 'error=%(ex)s', {'error': ex}) for volume in volumes: tgt_volume = instorage_const.REPLICA_AUX_VOL_PREFIX + volume.name rep_info = self._assistant.get_relationship_info(tgt_volume) if not rep_info: LOG.error('_sync_with_aux: no rc-releationship is ' 'established between master: %(master)s and aux ' '%(aux)s. Please re-establish the relationship ' 'and synchronize the volumes on backend ' 'storage.', {'master': volume.name, 'aux': tgt_volume}) continue LOG.debug('_sync_with_aux: volume: %(volume)s rep_info:master_vol=' '%(master_vol)s, aux_vol=%(aux_vol)s, state=%(state)s, ' 'primary=%(primary)s', {'volume': volume.name, 'master_vol': rep_info['master_vdisk_name'], 'aux_vol': rep_info['aux_vdisk_name'], 'state': rep_info['state'], 'primary': rep_info['primary']}) try: if rep_info['state'] != instorage_const.REP_CONSIS_SYNC: if rep_info['primary'] == 'master': self._assistant.start_relationship(tgt_volume) else: self._assistant.start_relationship(tgt_volume, primary='aux') except Exception as ex: LOG.warning('Fail to copy data from aux to master. master:' ' %(master)s and aux %(aux)s. Please ' 're-establish the relationship and synchronize' ' the volumes on backend storage. error=' '%(ex)s', {'master': volume.name, 'aux': tgt_volume, 'error': ex}) def _wait_replica_ready(self, ctxt, volumes): for volume in volumes: tgt_volume = instorage_const.REPLICA_AUX_VOL_PREFIX + volume.name try: self._wait_replica_vol_ready(ctxt, tgt_volume) except Exception as ex: LOG.error('_wait_replica_ready: wait for volume:%(volume)s' ' remote copy synchronization failed due to ' 'error:%(err)s.', {'volume': tgt_volume, 'err': ex}) @volume_utils.trace def _wait_replica_vol_ready(self, ctxt, volume): def _replica_vol_ready(): rep_info = self._assistant.get_relationship_info(volume) if not rep_info: msg = (_('_wait_replica_vol_ready: no rc-releationship ' 'is established for volume:%(volume)s. Please ' 're-establish the rc-relationship and ' 'synchronize the volumes on backend storage.'), {'volume': volume}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_replica_vol_ready:volume: %(volume)s rep_info: ' 'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, ' 'state=%(state)s, primary=%(primary)s', {'volume': volume, 'master_vol': rep_info['master_vdisk_name'], 'aux_vol': rep_info['aux_vdisk_name'], 'state': rep_info['state'], 'primary': rep_info['primary']}) if rep_info['state'] == instorage_const.REP_CONSIS_SYNC: return True if rep_info['state'] == instorage_const.REP_IDL_DISC: msg = (_('Wait synchronize failed. volume: %(volume)s'), {'volume': volume}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return False self._assistant._wait_for_a_condition( _replica_vol_ready, timeout=instorage_const.DEFAULT_RC_TIMEOUT, interval=instorage_const.DEFAULT_RC_INTERVAL, raise_exception=True) def _replication_failover(self, ctxt, volumes): volumes_update = [] if self._active_backend_id: LOG.info("Host has been failed over to %s", self._active_backend_id) return self._active_backend_id, volumes_update try: self._aux_backend_assistant.get_system_info() except Exception as ex: msg = (_("Unable to failover due to replication target is not " "reachable. error=%(ex)s"), {'error': ex}) LOG.error(msg) raise exception.UnableToFailOver(reason=msg) normal_volumes, rep_volumes = self._classify_volume(ctxt, volumes) rep_volumes_update = self._failover_replica_volumes(ctxt, rep_volumes) volumes_update.extend(rep_volumes_update) normal_volumes_update = self._failover_normal_volumes(normal_volumes) volumes_update.extend(normal_volumes_update) self._assistant = self._aux_backend_assistant self._active_backend_id = self._replica_target['backend_id'] self._secondary_pools = [self._replica_target['pool_name']] # Update the instorage state self._update_instorage_state() self._update_volume_stats() return self._active_backend_id, volumes_update @volume_utils.trace def _failover_replica_volumes(self, ctxt, rep_volumes): volumes_update = [] for volume in rep_volumes: rep_type = self._get_volume_replicated_type(ctxt, volume) replica_obj = self._get_replica_obj(rep_type) # Try do the fail-over. try: rep_info = self._aux_backend_assistant.get_relationship_info( instorage_const.REPLICA_AUX_VOL_PREFIX + volume.name) if not rep_info: rep_status = fields.ReplicationStatus.FAILOVER_ERROR volumes_update.append( {'volume_id': volume.id, 'updates': {'replication_status': rep_status, 'status': 'error'}}) LOG.error('_failover_replica_volumes: no rc-' 'releationship is established for master:' '%(master)s. Please re-establish the rc-' 'relationship and synchronize the volumes on' ' backend storage.', {'master': volume.name}) continue LOG.debug('_failover_replica_volumes: vol=%(vol)s, ' 'master_vol=%(master_vol)s, aux_vol=%(aux_vol)s, ' 'state=%(state)s, primary=%(primary)s', {'vol': volume.name, 'master_vol': rep_info['master_vdisk_name'], 'aux_vol': rep_info['aux_vdisk_name'], 'state': rep_info['state'], 'primary': rep_info['primary']}) model_updates = replica_obj.failover_volume_host(ctxt, volume) volumes_update.append( {'volume_id': volume.id, 'updates': model_updates}) except exception.VolumeDriverException: LOG.error('Unable to failover to aux volume. Please make ' 'sure that the aux volume is ready.') volumes_update.append( {'volume_id': volume.id, 'updates': {'status': 'error', 'replication_status': fields.ReplicationStatus.FAILOVER_ERROR}}) return volumes_update def _failover_normal_volumes(self, normal_volumes): volumes_update = [] for volume in normal_volumes: # If the volume is not of replicated type, we need to # force the status into error state so a user knows they # do not have access to the volume. rep_data = json.dumps({'previous_status': volume.status}) volumes_update.append( {'volume_id': volume.id, 'updates': {'status': 'error', 'replication_driver_data': rep_data}}) return volumes_update def _classify_volume(self, ctxt, volumes): normal_volumes = [] replica_volumes = [] for v in volumes: volume_type = self._get_volume_replicated_type(ctxt, v) if volume_type and v.status == 'available': replica_volumes.append(v) else: normal_volumes.append(v) return normal_volumes, replica_volumes def _get_replica_obj(self, rep_type): replica_manager = self.replica_manager[ self._replica_target['backend_id']] return replica_manager.get_replica_obj(rep_type) def _get_replica_mgr(self): replica_manager = self.replica_manager[ self._replica_target['backend_id']] return replica_manager def _get_target_vol(self, volume): tgt_vol = volume.name if self._active_backend_id: ctxt = context.get_admin_context() rep_type = self._get_volume_replicated_type(ctxt, volume) if rep_type: tgt_vol = instorage_const.REPLICA_AUX_VOL_PREFIX + volume.name return tgt_vol def _validate_replication_enabled(self): if not self._replica_enabled: msg = _("Replication is not properly configured on backend.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def _get_specs_replicated_type(self, volume_type): replication_type = None extra_specs = volume_type.get("extra_specs", {}) rep_val = extra_specs.get('replication_enabled') if rep_val == " True": replication_type = extra_specs.get('replication_type', instorage_const.ASYNC) # The format for replication_type in extra spec is in # " async". Otherwise, the code will # not reach here. if replication_type != instorage_const.ASYNC: # Pick up the replication type specified in the # extra spec from the format like " async". replication_type = replication_type.split()[1] if replication_type not in instorage_const.VALID_REP_TYPES: msg = (_("Invalid replication type %s.") % replication_type) LOG.error(msg) raise exception.InvalidInput(reason=msg) return replication_type def _get_volume_replicated_type(self, ctxt, volume): replication_type = None if volume.get("volume_type_id"): volume_type = volume_types.get_volume_type( ctxt, volume.volume_type_id) replication_type = self._get_specs_replicated_type(volume_type) return replication_type def _get_instorage_config(self): self._do_replication_setup() if self._active_backend_id and self._replica_target: self._assistant = self._aux_backend_assistant self._replica_enabled = (True if (self._assistant. replication_licensed() and self._replica_target) else False) if self._replica_enabled: self._supported_replica_types = instorage_const.VALID_REP_TYPES def _do_replication_setup(self): rep_devs = self.configuration.safe_get('replication_device') if not rep_devs: return if len(rep_devs) > 1: raise exception.InvalidInput( reason=_('Multiple replication devices are configured. ' 'Now only one replication_device is supported.')) required_flags = ['san_ip', 'backend_id', 'san_login', 'san_password', 'pool_name'] for flag in required_flags: if flag not in rep_devs[0]: raise exception.InvalidInput( reason=_('%s is not set.') % flag) rep_target = {} rep_target['san_ip'] = rep_devs[0].get('san_ip') rep_target['backend_id'] = rep_devs[0].get('backend_id') rep_target['san_login'] = rep_devs[0].get('san_login') rep_target['san_password'] = rep_devs[0].get('san_password') rep_target['pool_name'] = rep_devs[0].get('pool_name') # Each replication target will have a corresponding replication. self._replication_initialize(rep_target) def _replication_initialize(self, target): rep_manager = instorage_rep.InStorageMCSReplicationManager( self, target, InStorageAssistant) if self._active_backend_id: if self._active_backend_id != target['backend_id']: msg = (_("Invalid secondary id %s.") % self._active_backend_id) LOG.error(msg) raise exception.InvalidInput(reason=msg) # Setup partnership only in non-failover state else: try: rep_manager.establish_target_partnership() except exception.VolumeDriverException: LOG.error('The replication src %(src)s has not ' 'successfully established partnership with the ' 'replica target %(tgt)s.', {'src': self.configuration.san_ip, 'tgt': target['backend_id']}) self._aux_backend_assistant = rep_manager.get_target_assistant() self.replica_manager[target['backend_id']] = rep_manager self._replica_target = target class InStorageAssistant(object): # All the supported QoS key are saved in this dict. When a new # key is going to add, three values MUST be set: # 'default': to indicate the value, when the parameter is disabled. # 'param': to indicate the corresponding parameter in the command. # 'type': to indicate the type of this value. WAIT_TIME = 5 mcs_qos_keys = {'IOThrottling': {'default': '0', 'param': 'rate', 'type': int}} def __init__(self, run_ssh): self.ssh = InStorageSSH(run_ssh) self.check_lcmapping_interval = 3 @staticmethod def handle_keyerror(cmd, out): msg = (_('Could not find key in output of command %(cmd)s: %(out)s.') % {'out': out, 'cmd': cmd}) raise exception.VolumeBackendAPIException(data=msg) def compression_enabled(self): """Return whether or not compression is enabled for this system.""" resp = self.ssh.lslicense() keys = ['license_compression_enclosures', 'license_compression_capacity'] for key in keys: if resp.get(key, '0') != '0': return True try: resp = self.ssh.lsguicapabilities() if resp.get('compression', '0') == 'yes': return True except exception.VolumeBackendAPIException: LOG.exception("Failed to fetch licensing scheme.") return False def replication_licensed(self): """Return whether or not replication is enabled for this system.""" return True def get_system_info(self): """Return system's name, ID, and code level.""" resp = self.ssh.lssystem() level = resp['code_level'] match_obj = re.search('([0-9].){3}[0-9]', level) if match_obj is None: msg = _('Failed to get code level (%s).') % level raise exception.VolumeBackendAPIException(data=msg) code_level = match_obj.group().split('.') return {'code_level': tuple([int(x) for x in code_level]), 'system_name': resp['name'], 'system_id': resp['id']} def get_node_info(self): """Return dictionary containing information on system's nodes.""" nodes = {} resp = self.ssh.lsnode() for node_data in resp: try: if node_data['status'] != 'online': continue node = {} node['id'] = node_data['id'] node['name'] = node_data['name'] node['IO_group'] = node_data['IO_group_id'] node['iscsi_name'] = node_data['iscsi_name'] node['WWNN'] = node_data['WWNN'] node['status'] = node_data['status'] node['WWPN'] = [] node['ipv4'] = [] node['ipv6'] = [] node['enabled_protocols'] = [] nodes[node['id']] = node except KeyError: self.handle_keyerror('lsnode', node_data) return nodes def get_pool_attrs(self, pool): """Return attributes for the specified pool.""" return self.ssh.lsmdiskgrp(pool) def get_available_io_groups(self): """Return list of available IO groups.""" iogrps = [] resp = self.ssh.lsiogrp() for iogrp in resp: try: if int(iogrp['node_count']) > 0: iogrps.append(int(iogrp['id'])) except KeyError: self.handle_keyerror('lsiogrp', iogrp) except ValueError: msg = (_('Expected integer for node_count, ' 'mcsinq lsiogrp returned: %(node)s.') % {'node': iogrp['node_count']}) raise exception.VolumeBackendAPIException(data=msg) return iogrps def get_vdisk_count_by_io_group(self): res = {} resp = self.ssh.lsiogrp() for iogrp in resp: try: if int(iogrp['node_count']) > 0: res[int(iogrp['id'])] = int(iogrp['vdisk_count']) except KeyError: self.handle_keyerror('lsiogrp', iogrp) except ValueError: msg = (_('Expected integer for node_count, ' 'mcsinq lsiogrp returned: %(node)s') % {'node': iogrp['node_count']}) raise exception.VolumeBackendAPIException(data=msg) return res def select_io_group(self, state, opts): selected_iog = 0 iog_list = InStorageAssistant._get_valid_requested_io_groups( state, opts) if len(iog_list) == 0: raise exception.InvalidInput( reason=_('Given I/O group(s) %(iogrp)s not valid; available ' 'I/O groups are %(avail)s.') % {'iogrp': opts['iogrp'], 'avail': state['available_iogrps']}) iog_vdc = self.get_vdisk_count_by_io_group() LOG.debug("IO group current balance %s", iog_vdc) min_vdisk_count = iog_vdc[iog_list[0]] selected_iog = iog_list[0] for iog in iog_list: if iog_vdc[iog] < min_vdisk_count: min_vdisk_count = iog_vdc[iog] selected_iog = iog LOG.debug("Selected io_group is %d", selected_iog) return selected_iog def get_volume_io_group(self, vol_name): vdisk = self.ssh.lsvdisk(vol_name) if vdisk: resp = self.ssh.lsiogrp() for iogrp in resp: if iogrp['name'] == vdisk['IO_group_name']: return int(iogrp['id']) return None def add_iscsi_ip_addrs(self, storage_nodes): """Add iSCSI IP addresses to system node information.""" resp = self.ssh.lsportip() for ip_data in resp: try: state = ip_data['state'] if ip_data['node_id'] in storage_nodes and ( state == 'configured' or state == 'online'): node = storage_nodes[ip_data['node_id']] if len(ip_data['IP_address']): node['ipv4'].append(ip_data['IP_address']) if len(ip_data['IP_address_6']): node['ipv6'].append(ip_data['IP_address_6']) except KeyError: self.handle_keyerror('lsportip', ip_data) def add_fc_wwpns(self, storage_nodes): """Add FC WWPNs to system node information.""" for key in storage_nodes: node = storage_nodes[key] wwpns = set(node['WWPN']) resp = self.ssh.lsportfc(node_id=node['id']) for port_info in resp: if (port_info['type'] == 'fc' and port_info['status'] == 'active'): wwpns.add(port_info['WWPN']) node['WWPN'] = list(wwpns) LOG.info('WWPN on node %(node)s: %(wwpn)s.', {'node': node['id'], 'wwpn': node['WWPN']}) def get_conn_fc_wwpns(self, host): wwpns = set() resp = self.ssh.lsfabric(host=host) for wwpn in resp.select('local_wwpn'): if wwpn is not None: wwpns.add(wwpn) return list(wwpns) def add_chap_secret_to_host(self, host_name): """Generate and store a randomly-generated CHAP secret for the host.""" chap_secret = volume_utils.generate_password() self.ssh.add_chap_secret(chap_secret, host_name) return chap_secret def get_chap_secret_for_host(self, host_name): """Generate and store a randomly-generated CHAP secret for the host.""" resp = self.ssh.lsiscsiauth() host_found = False for host_data in resp: try: if host_data['name'] == host_name: host_found = True if host_data['iscsi_auth_method'] == 'chap': return host_data['iscsi_chap_secret'] except KeyError: self.handle_keyerror('lsiscsiauth', host_data) if not host_found: msg = _('Failed to find host %s.') % host_name raise exception.VolumeBackendAPIException(data=msg) return None def get_host_from_connector(self, connector, volume_name=None): """Return the InStorage host described by the connector.""" LOG.debug('Enter: get_host_from_connector: %s.', connector) # If we have FC information, we have a faster lookup option host_name = None if 'wwpns' in connector: for wwpn in connector['wwpns']: resp = self.ssh.lsfabric(wwpn=wwpn) for wwpn_info in resp: try: if (wwpn_info['remote_wwpn'] and wwpn_info['name'] and wwpn_info['remote_wwpn'].lower() == wwpn.lower()): host_name = wwpn_info['name'] break except KeyError: self.handle_keyerror('lsfabric', wwpn_info) if host_name: break if host_name: LOG.debug('Leave: get_host_from_connector: host %s.', host_name) return host_name def update_host_list(host, host_list): idx = host_list.index(host) del host_list[idx] host_list.insert(0, host) # That didn't work, so try exhaustive search hosts_info = self.ssh.lshost() host_list = list(hosts_info.select('name')) # If we have a "real" connector, we might be able to find the # host entry with fewer queries if we move the host entries # that contain the connector's host property value to the front # of the list if 'host' in connector: # order host_list such that the host entries that # contain the connector's host name are at the # beginning of the list for host in host_list: if re.search(connector['host'], host): update_host_list(host, host_list) # If we have a volume name we have a potential fast path # for finding the matching host for that volume. # Add the host_names that have mappings for our volume to the # head of the list of host names to search them first if volume_name: hosts_map_info = self.ssh.lsvdiskhostmap(volume_name) hosts_map_info_list = list(hosts_map_info.select('host_name')) # remove the fast path host names from the end of the list # and move to the front so they are only searched for once. for host in hosts_map_info_list: update_host_list(host, host_list) found = False for name in host_list: try: resp = self.ssh.lshost(host=name) except exception.VolumeBackendAPIException as ex: LOG.debug("Exception message: %s", ex.msg) if 'CMMVC5754E' in ex.msg: LOG.debug("CMMVC5754E found in CLI exception.") # CMMVC5754E: The specified object does not exist # The host has been deleted while walking the list. # This is a result of a host change on the MCS that # is out of band to this request. continue # unexpected error so reraise it with excutils.save_and_reraise_exception(): pass if 'initiator' in connector: for iscsi in resp.select('iscsi_name'): if iscsi == connector['initiator']: host_name = name found = True break elif 'wwpns' in connector and len(connector['wwpns']): connector_wwpns = [str(x).lower() for x in connector['wwpns']] for wwpn in resp.select('WWPN'): if wwpn and wwpn.lower() in connector_wwpns: host_name = name found = True break if found: break LOG.debug('Leave: get_host_from_connector: host %s.', host_name) return host_name def create_host(self, connector): """Create a new host on the storage system. We create a host name and associate it with the given connection information. The host name will be a cleaned up version of the given host name (at most 55 characters), plus a random 8-character suffix to avoid collisions. The total length should be at most 63 characters. """ LOG.debug('Enter: create_host: host %s.', connector['host']) # Before we start, make sure host name is a string and that we have # one port at least . host_name = connector['host'] if not isinstance(host_name, str): msg = _('create_host: Host name is not unicode or string.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) ports = [] if 'initiator' in connector: ports.append(['initiator', '%s' % connector['initiator']]) if 'wwpns' in connector: for wwpn in connector['wwpns']: ports.append(['wwpn', '%s' % wwpn]) if not len(ports): msg = _('create_host: No initiators or wwpns supplied.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) # Build a host name for the InStorage host - first clean up the name if isinstance(host_name, str): host_name = unicodedata.normalize('NFKD', host_name).encode( 'ascii', 'replace').decode('ascii') for num in range(0, 128): ch = str(chr(num)) if not ch.isalnum() and ch not in [' ', '.', '-', '_']: host_name = host_name.replace(ch, '-') # InStorage doesn't expect hostname that doesn't starts with letter or # _. if not re.match('^[A-Za-z]', host_name): host_name = '_' + host_name # Add a random 8-character suffix to avoid collisions rand_id = str(random.randint(0, 99999999)).zfill(8) host_name = '%s-%s' % (host_name[:55], rand_id) # Create a host with one port port = ports.pop(0) self.ssh.mkhost(host_name, port[0], port[1]) # Add any additional ports to the host for port in ports: self.ssh.addhostport(host_name, port[0], port[1]) LOG.debug('Leave: create_host: host %(host)s - %(host_name)s.', {'host': connector['host'], 'host_name': host_name}) return host_name def delete_host(self, host_name): self.ssh.rmhost(host_name) def check_host_mapped_vols(self, host_name): return self.ssh.lshostvdiskmap(host_name) def map_vol_to_host(self, volume_name, host_name, multihostmap): """Create a mapping between a volume to a host.""" LOG.debug('Enter: map_vol_to_host: volume %(volume_name)s to ' 'host %(host_name)s.', {'volume_name': volume_name, 'host_name': host_name}) # Check if this volume is already mapped to this host result_lun = self.ssh.get_vdiskhostmapid(volume_name, host_name) if result_lun is None: result_lun = self.ssh.mkvdiskhostmap(host_name, volume_name, None, multihostmap) LOG.debug('Leave: map_vol_to_host: LUN %(result_lun)s, volume ' '%(volume_name)s, host %(host_name)s.', {'result_lun': result_lun, 'volume_name': volume_name, 'host_name': host_name}) return int(result_lun) def unmap_vol_from_host(self, volume_name, host_name): """Unmap the volume and delete the host if it has no more mappings.""" LOG.debug('Enter: unmap_vol_from_host: volume %(volume_name)s from ' 'host %(host_name)s.', {'volume_name': volume_name, 'host_name': host_name}) # Check if the mapping exists resp = self.ssh.lsvdiskhostmap(volume_name) if not len(resp): LOG.warning('unmap_vol_from_host: No mapping of volume ' '%(vol_name)s to any host found.', {'vol_name': volume_name}) return host_name if host_name is None: if len(resp) > 1: LOG.warning('unmap_vol_from_host: Multiple mappings of ' 'volume %(vol_name)s found, no host ' 'specified.', {'vol_name': volume_name}) return else: host_name = resp[0]['host_name'] else: found = False for h in resp.select('host_name'): if h == host_name: found = True if not found: LOG.warning('unmap_vol_from_host: No mapping of volume ' '%(vol_name)s to host %(host)s found.', {'vol_name': volume_name, 'host': host_name}) return host_name # We now know that the mapping exists self.ssh.rmvdiskhostmap(host_name, volume_name) LOG.debug('Leave: unmap_vol_from_host: volume %(volume_name)s from ' 'host %(host_name)s.', {'volume_name': volume_name, 'host_name': host_name}) return host_name @staticmethod def build_default_opts(config): # Ignore capitalization opt = {'rsize': config.instorage_mcs_vol_rsize, 'warning': config.instorage_mcs_vol_warning, 'autoexpand': config.instorage_mcs_vol_autoexpand, 'grainsize': config.instorage_mcs_vol_grainsize, 'compression': config.instorage_mcs_vol_compression, 'intier': config.instorage_mcs_vol_intier, 'iogrp': config.instorage_mcs_vol_iogrp, 'qos': None, 'replication': False} return opt @staticmethod def check_vdisk_opts(state, opts): # Check that grainsize is 32/64/128/256 if opts['grainsize'] not in [32, 64, 128, 256]: raise exception.InvalidInput( reason=_('Illegal value specified for ' 'instorage_mcs_vol_grainsize: set to either ' '32, 64, 128, or 256.')) # Check that compression is supported if opts['compression'] and not state['compression_enabled']: raise exception.InvalidInput( reason=_('System does not support compression.')) # Check that rsize is set if compression is set if opts['compression'] and opts['rsize'] == -1: raise exception.InvalidInput( reason=_('If compression is set to True, rsize must ' 'also be set (not equal to -1).')) iogs = InStorageAssistant._get_valid_requested_io_groups(state, opts) if len(iogs) == 0: raise exception.InvalidInput( reason=_('Given I/O group(s) %(iogrp)s not valid; available ' 'I/O groups are %(avail)s.') % {'iogrp': opts['iogrp'], 'avail': state['available_iogrps']}) @staticmethod def _get_valid_requested_io_groups(state, opts): given_iogs = str(opts['iogrp']) iog_list = given_iogs.split(',') # convert to int iog_list = list(map(int, iog_list)) LOG.debug("Requested iogroups %s", iog_list) LOG.debug("Available iogroups %s", state['available_iogrps']) filtiog = set(iog_list).intersection(state['available_iogrps']) iog_list = list(filtiog) LOG.debug("Filtered (valid) requested iogroups %s", iog_list) return iog_list def _get_opts_from_specs(self, opts, specs): qos = {} for k, value in specs.items(): # Get the scope, if using scope format key_split = k.split(':') if len(key_split) == 1: scope = None key = key_split[0] else: scope = key_split[0] key = key_split[1] # We generally do not look at capabilities in the driver, but # replication is a special case where the user asks for # a volume to be replicated, and we want both the scheduler and # the driver to act on the value. if ((not scope or scope == 'capabilities') and key == 'replication'): scope = None key = 'replication' words = value.split() if not (words and len(words) == 2 and words[0] == ''): LOG.error("Replication must be specified as " "' True' or ' False'.") del words[0] value = words[0] # Add the QoS. if scope and scope == 'qos': if key in self.mcs_qos_keys.keys(): try: type_fn = self.mcs_qos_keys[key]['type'] value = type_fn(value) qos[key] = value except ValueError: continue # Any keys that the driver should look at should have the # 'drivers' scope. if scope and scope != 'drivers': continue if key in opts: this_type = type(opts[key]).__name__ if this_type == 'int': value = int(value) elif this_type == 'bool': value = strutils.bool_from_string(value) opts[key] = value if len(qos) != 0: opts['qos'] = qos return opts def _get_qos_from_volume_metadata(self, volume_metadata): """Return the QoS information from the volume metadata.""" qos = {} for i in volume_metadata: k = i.get('key', None) value = i.get('value', None) key_split = k.split(':') if len(key_split) == 1: scope = None key = key_split[0] else: scope = key_split[0] key = key_split[1] # Add the QoS. if scope and scope == 'qos': if key in self.mcs_qos_keys.keys(): try: type_fn = self.mcs_qos_keys[key]['type'] value = type_fn(value) qos[key] = value except ValueError: continue return qos def _wait_for_a_condition(self, testmethod, timeout=None, interval=INTERVAL_1_SEC, raise_exception=False): start_time = time.time() if timeout is None: timeout = DEFAULT_TIMEOUT def _inner(): try: testValue = testmethod() except Exception as ex: if raise_exception: LOG.exception("_wait_for_a_condition: %s" " execution failed.", testmethod.__name__) raise exception.VolumeBackendAPIException(data=ex) else: testValue = False LOG.debug('Assistant.' '_wait_for_condition: %(method_name)s ' 'execution failed for %(exception)s.', {'method_name': testmethod.__name__, 'exception': ex.message}) if testValue: raise loopingcall.LoopingCallDone() if int(time.time()) - start_time > timeout: msg = ( _('CommandLineAssistant._wait_for_condition: ' '%s timeout.') % testmethod.__name__) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) timer = loopingcall.FixedIntervalLoopingCall(_inner) timer.start(interval=interval).wait() def get_vdisk_params(self, config, state, type_id, volume_type=None, volume_metadata=None): """Return the parameters for creating the vdisk. Get volume type and defaults from config options and take them into account. """ opts = self.build_default_opts(config) ctxt = context.get_admin_context() if volume_type is None and type_id is not None: volume_type = volume_types.get_volume_type(ctxt, type_id) if volume_type: qos_specs_id = volume_type.get('qos_specs_id') specs = dict(volume_type).get('extra_specs') # NOTE: We prefer the qos_specs association # and over-ride any existing # extra-specs settings if present if qos_specs_id is not None: kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] # Merge the qos_specs into extra_specs and qos_specs has higher # priority than extra_specs if they have different values for # the same key. specs.update(kvs) opts = self._get_opts_from_specs(opts, specs) if (opts['qos'] is None and config.instorage_mcs_allow_tenant_qos and volume_metadata): qos = self._get_qos_from_volume_metadata(volume_metadata) if len(qos) != 0: opts['qos'] = qos self.check_vdisk_opts(state, opts) return opts @staticmethod def _get_vdisk_create_params(opts): intier = 'on' if opts['intier'] else 'off' if opts['rsize'] == -1: params = [] else: params = ['-rsize', '%s%%' % str(opts['rsize']), '-autoexpand', '-warning', '%s%%' % str(opts['warning'])] if not opts['autoexpand']: params.remove('-autoexpand') if opts['compression']: params.append('-compressed') else: params.extend(['-grainsize', str(opts['grainsize'])]) params.extend(['-intier', intier]) return params def create_vdisk(self, name, size, units, pool, opts): name = '"%s"' % name LOG.debug('Enter: create_vdisk: vdisk %s.', name) params = self._get_vdisk_create_params(opts) self.ssh.mkvdisk(name, size, units, pool, opts, params) LOG.debug('Leave: _create_vdisk: volume %s.', name) def delete_vdisk(self, vdisk, force): """Ensures that vdisk is not part of FC mapping and deletes it.""" LOG.debug('Enter: delete_vdisk: vdisk %s.', vdisk) if not self.is_vdisk_defined(vdisk): LOG.info('Tried to delete non-existent vdisk %s.', vdisk) return self.ensure_vdisk_no_lc_mappings(vdisk, allow_snaps=True, allow_lctgt=True) self.ssh.rmvdisk(vdisk, force=force) LOG.debug('Leave: delete_vdisk: vdisk %s.', vdisk) def is_vdisk_defined(self, vdisk_name): """Check if vdisk is defined.""" attrs = self.get_vdisk_attributes(vdisk_name) return attrs is not None def get_vdisk_attributes(self, vdisk): attrs = self.ssh.lsvdisk(vdisk) return attrs def get_vdisk_copy_attrs(self, vdisk, copy_id): return self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0] def get_vdisk_copy_ids(self, vdisk): resp = self.ssh.lsvdiskcopy(vdisk) if len(resp) == 2: if resp[0]['primary'] == 'yes': primary = resp[0]['copy_id'] secondary = resp[1]['copy_id'] else: primary = resp[1]['copy_id'] secondary = resp[0]['copy_id'] return primary, secondary else: msg = (_('list_vdisk_copy failed: No copy of volume %s exists.') % vdisk) raise exception.VolumeDriverException(message=msg) def get_vdisk_copies(self, vdisk): copies = {'primary': None, 'secondary': None} resp = self.ssh.lsvdiskcopy(vdisk) for copy_id, status, sync, primary, mdisk_grp in ( resp.select('copy_id', 'status', 'sync', 'primary', 'mdisk_grp_name')): copy = {'copy_id': copy_id, 'status': status, 'sync': sync, 'primary': primary, 'mdisk_grp_name': mdisk_grp, 'sync_progress': None} if copy['sync'] != 'yes': progress_info = self.ssh.lsvdisksyncprogress(vdisk, copy_id) copy['sync_progress'] = progress_info['progress'] if copy['primary'] == 'yes': copies['primary'] = copy else: copies['secondary'] = copy return copies def create_copy(self, src, tgt, src_id, config, opts, full_copy, pool=None): """Create a new snapshot using LocalCopy.""" LOG.debug('Enter: create_copy: snapshot %(src)s to %(tgt)s.', {'tgt': tgt, 'src': src}) src_attrs = self.get_vdisk_attributes(src) if src_attrs is None: msg = (_('create_copy: Source vdisk %(src)s (%(src_id)s) ' 'does not exist.') % {'src': src, 'src_id': src_id}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) src_size = src_attrs['capacity'] # In case we need to use a specific pool if not pool: pool = src_attrs['mdisk_grp_name'] opts['iogrp'] = src_attrs['IO_group_id'] self.create_vdisk(tgt, src_size, 'b', pool, opts) timeout = config.instorage_mcs_localcopy_timeout try: self.run_localcopy(src, tgt, timeout, config.instorage_mcs_localcopy_rate, full_copy=full_copy) except Exception: with excutils.save_and_reraise_exception(): self.delete_vdisk(tgt, True) LOG.debug('Leave: _create_copy: snapshot %(tgt)s from ' 'vdisk %(src)s.', {'tgt': tgt, 'src': src}) def extend_vdisk(self, vdisk, amount): self.ssh.expandvdisksize(vdisk, amount) def add_vdisk_copy(self, vdisk, dest_pool, volume_type, state, config): """Add a vdisk copy in the given pool.""" resp = self.ssh.lsvdiskcopy(vdisk) if len(resp) > 1: msg = (_('add_vdisk_copy failed: A copy of volume %s exists. ' 'Adding another copy would exceed the limit of ' '2 copies.') % vdisk) raise exception.VolumeDriverException(message=msg) orig_copy_id = resp[0].get("copy_id", None) if orig_copy_id is None: msg = (_('add_vdisk_copy started without a vdisk copy in the ' 'expected pool.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if volume_type is None: opts = self.get_vdisk_params(config, state, None) else: opts = self.get_vdisk_params(config, state, volume_type['id'], volume_type=volume_type) params = self._get_vdisk_create_params(opts) new_copy_id = self.ssh.addvdiskcopy(vdisk, dest_pool, params) return (orig_copy_id, new_copy_id) def check_vdisk_copy_synced(self, vdisk, copy_id): sync = self.ssh.lsvdiskcopy(vdisk, copy_id=copy_id)[0]['sync'] if sync == 'yes': return True return False def rm_vdisk_copy(self, vdisk, copy_id): self.ssh.rmvdiskcopy(vdisk, copy_id) def _prepare_lc_map(self, lc_map_id, timeout): self.ssh.prestartlcmap(lc_map_id) mapping_ready = False max_retries = (timeout // self.WAIT_TIME) + 1 for try_number in range(1, max_retries): mapping_attrs = self._get_localcopy_mapping_attributes(lc_map_id) if (mapping_attrs is None or 'status' not in mapping_attrs): break if mapping_attrs['status'] == 'prepared': mapping_ready = True break elif mapping_attrs['status'] == 'stopped': self.ssh.prestartlcmap(lc_map_id) elif mapping_attrs['status'] != 'preparing': msg = (_('Unexecpted mapping status %(status)s for mapping ' '%(id)s. Attributes: %(attr)s.') % {'status': mapping_attrs['status'], 'id': lc_map_id, 'attr': mapping_attrs}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) greenthread.sleep(self.WAIT_TIME) if not mapping_ready: msg = (_('Mapping %(id)s prepare failed to complete within the ' 'allotted %(to)d seconds timeout. Terminating.') % {'id': lc_map_id, 'to': timeout}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) # Consistency Group def start_lc_consistgrp(self, lc_consistgrp): self.ssh.startlcconsistgrp(lc_consistgrp) def create_lc_consistgrp(self, lc_consistgrp): self.ssh.mklcconsistgrp(lc_consistgrp) def delete_lc_consistgrp(self, lc_consistgrp): self.ssh.rmlcconsistgrp(lc_consistgrp) def run_consistgrp_snapshots(self, lc_consistgrp, snapshots, state, config, timeout): model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} snapshots_model_update = [] try: for snapshot in snapshots: opts = self.get_vdisk_params(config, state, snapshot.volume_type_id) self.create_localcopy_to_consistgrp(snapshot.volume_name, snapshot.name, lc_consistgrp, config, opts) self.prepare_lc_consistgrp(lc_consistgrp, timeout) self.start_lc_consistgrp(lc_consistgrp) # There is CG limitation that could not create more than 128 CGs. # After start CG, we delete CG to avoid CG limitation. # Cinder general will maintain the CG and snapshots relationship. self.delete_lc_consistgrp(lc_consistgrp) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.ConsistencyGroupStatus.ERROR # Release cg self.delete_lc_consistgrp(lc_consistgrp) LOG.error("Failed to create CGSnapshot. " "Exception: %s.", err) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot.id, 'status': model_update['status']}) return model_update, snapshots_model_update def delete_consistgrp_snapshots(self, lc_consistgrp, snapshots): """Delete localcopy maps and consistent group.""" model_update = {'status': fields.ConsistencyGroupStatus.DELETED} snapshots_model_update = [] try: for snapshot in snapshots: self.ssh.rmvdisk(snapshot.name, True) except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.ConsistencyGroupStatus.ERROR_DELETING) LOG.error("Failed to delete the snapshot %(snap)s of " "CGSnapshot. Exception: %(exception)s.", {'snap': snapshot.name, 'exception': err}) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot.id, 'status': model_update['status']}) return model_update, snapshots_model_update def run_group_snapshots(self, lc_group, snapshots, state, config, timeout): model_update = {'status': fields.GroupStatus.AVAILABLE} snapshots_model_update = [] try: for snapshot in snapshots: opts = self.get_vdisk_params(config, state, snapshot.volume_type_id) self.create_localcopy_to_consistgrp(snapshot.volume_name, snapshot.name, lc_group, config, opts) self.prepare_lc_consistgrp(lc_group, timeout) self.start_lc_consistgrp(lc_group) # There is CG limitation that could not create more than 128 CGs. # After start CG, we delete CG to avoid CG limitation. # Cinder general will maintain the group and snapshots # relationship. self.delete_lc_consistgrp(lc_group) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.GroupStatus.ERROR # Release cg self.delete_lc_consistgrp(lc_group) LOG.error("Failed to create Group_Snapshot. " "Exception: %s.", err) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot.id, 'status': model_update['status']}) return model_update, snapshots_model_update def delete_group_snapshots(self, lc_group, snapshots): """Delete localcopy maps and group.""" model_update = {'status': fields.GroupStatus.DELETED} snapshots_model_update = [] try: for snapshot in snapshots: self.ssh.rmvdisk(snapshot.name, True) except exception.VolumeBackendAPIException as err: model_update['status'] = ( fields.GroupStatus.ERROR_DELETING) LOG.error("Failed to delete the snapshot %(snap)s of " "Group_Snapshot. Exception: %(exception)s.", {'snap': snapshot.name, 'exception': err}) for snapshot in snapshots: snapshots_model_update.append( {'id': snapshot.id, 'status': model_update['status']}) return model_update, snapshots_model_update def prepare_lc_consistgrp(self, lc_consistgrp, timeout): """Prepare LC Consistency Group.""" self.ssh.prestartlcconsistgrp(lc_consistgrp) def prepare_lc_consistgrp_success(): mapping_ready = False mapping_attrs = self._get_localcopy_consistgrp_attr(lc_consistgrp) if (mapping_attrs is None or 'status' not in mapping_attrs): pass if mapping_attrs['status'] == 'prepared': mapping_ready = True elif mapping_attrs['status'] == 'stopped': self.ssh.prestartlcconsistgrp(lc_consistgrp) elif mapping_attrs['status'] != 'preparing': msg = (_('Unexpected mapping status %(status)s for mapping ' '%(id)s. Attributes: %(attr)s.') % {'status': mapping_attrs['status'], 'id': lc_consistgrp, 'attr': mapping_attrs}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return mapping_ready self._wait_for_a_condition(prepare_lc_consistgrp_success, timeout) def create_group_from_source(self, group, lc_group, sources, targets, state, config, timeout): """Create group from source""" LOG.debug('Enter: create_group_from_source: group %(group)s' ' source %(source)s, target %(target)s', {'group': lc_group, 'source': sources, 'target': targets}) model_update = {'status': fields.GroupStatus.AVAILABLE} ctxt = context.get_admin_context() try: for source, target in zip(sources, targets): opts = self.get_vdisk_params(config, state, source.volume_type_id) pool = volume_utils.extract_host(target.host, 'pool') self.create_localcopy_to_consistgrp(source.name, target.name, lc_group, config, opts, True, pool=pool) self.prepare_lc_consistgrp(lc_group, timeout) self.start_lc_consistgrp(lc_group) self.delete_lc_consistgrp(lc_group) volumes_model_update = self._get_volume_model_updates( ctxt, targets, group.id, model_update['status']) except exception.VolumeBackendAPIException as err: model_update['status'] = fields.GroupStatus.ERROR volumes_model_update = self._get_volume_model_updates( ctxt, targets, group.id, model_update['status']) with excutils.save_and_reraise_exception(): self.delete_lc_consistgrp(lc_group) LOG.error("Failed to create group from group_snapshot. " "Exception: %s", err) return model_update, volumes_model_update LOG.debug('Leave: create_cg_from_source.') return model_update, volumes_model_update def _get_volume_model_updates(self, ctxt, volumes, cgId, status='available'): """Update the volume model's status and return it.""" volume_model_updates = [] LOG.info("Updating status for CG: %(id)s.", {'id': cgId}) if volumes: for volume in volumes: volume_model_updates.append({'id': volume.id, 'status': status}) else: LOG.info("No volume found for CG: %(cg)s.", {'cg': cgId}) return volume_model_updates def run_localcopy(self, source, target, timeout, copy_rate, full_copy=True): """Create a LocalCopy mapping from the source to the target.""" LOG.debug('Enter: run_localcopy: execute LocalCopy from source ' '%(source)s to target %(target)s.', {'source': source, 'target': target}) lc_map_id = self.ssh.mklcmap(source, target, full_copy, copy_rate) self._prepare_lc_map(lc_map_id, timeout) self.ssh.startlcmap(lc_map_id) LOG.debug('Leave: run_localcopy: LocalCopy started from ' '%(source)s to %(target)s.', {'source': source, 'target': target}) def create_localcopy_to_consistgrp(self, source, target, consistgrp, config, opts, full_copy=False, pool=None): """Create a LocalCopy mapping and add to consistent group.""" LOG.debug('Enter: create_localcopy_to_consistgrp: create LocalCopy ' 'from source %(source)s to target %(target)s. ' 'Then add the localcopy to %(cg)s.', {'source': source, 'target': target, 'cg': consistgrp}) src_attrs = self.get_vdisk_attributes(source) if src_attrs is None: msg = (_('create_copy: Source vdisk %(src)s ' 'does not exist.') % {'src': source}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) src_size = src_attrs['capacity'] # In case we need to use a specific pool if not pool: pool = src_attrs['mdisk_grp_name'] opts['iogrp'] = src_attrs['IO_group_id'] self.create_vdisk(target, src_size, 'b', pool, opts) self.ssh.mklcmap(source, target, full_copy, config.instorage_mcs_localcopy_rate, consistgrp=consistgrp) LOG.debug('Leave: create_localcopy_to_consistgrp: ' 'LocalCopy started from %(source)s to %(target)s.', {'source': source, 'target': target}) def _get_vdisk_lc_mappings(self, vdisk): """Return LocalCopy mappings that this vdisk is associated with.""" mapping_ids = [] resp = self.ssh.lsvdisklcmappings(vdisk) for id in resp.select('id'): mapping_ids.append(id) return mapping_ids def _get_localcopy_mapping_attributes(self, lc_map_id): resp = self.ssh.lslcmap(lc_map_id) if not len(resp): return None return resp[0] def _get_localcopy_consistgrp_attr(self, lc_map_id): resp = self.ssh.lslcconsistgrp(lc_map_id) if not len(resp): return None return resp[0] def _check_vdisk_lc_mappings(self, name, allow_snaps=True, allow_lctgt=False): """LocalCopy mapping check helper.""" LOG.debug('Loopcall: _check_vdisk_lc_mappings(), vdisk %s.', name) mapping_ids = self._get_vdisk_lc_mappings(name) wait_for_copy = False rmlcmap_failed_e = None for map_id in mapping_ids: attrs = self._get_localcopy_mapping_attributes(map_id) if not attrs: continue source = attrs['source_vdisk_name'] target = attrs['target_vdisk_name'] copy_rate = attrs['copy_rate'] status = attrs['status'] if allow_lctgt and target == name and status == 'copying': self.ssh.stoplcmap(map_id) attrs = self._get_localcopy_mapping_attributes(map_id) if attrs: status = attrs['status'] if copy_rate == '0': if source == name: # Vdisk with snapshots. Return False if snapshot # not allowed. if not allow_snaps: raise loopingcall.LoopingCallDone(retvalue=False) self.ssh.chlcmap(map_id, copyrate='50', autodel='on') wait_for_copy = True else: # A snapshot if target != name: msg = (_('Vdisk %(name)s not involved in ' 'mapping %(src)s -> %(tgt)s.') % {'name': name, 'src': source, 'tgt': target}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) if status in ['copying', 'prepared']: self.ssh.stoplcmap(map_id) # Need to wait for the lcmap to change to # stopped state before remove lcmap wait_for_copy = True elif status in ['stopping', 'preparing']: wait_for_copy = True else: try: self.ssh.rmlcmap(map_id) except exception.VolumeBackendAPIException as e: rmlcmap_failed_e = e # Case 4: Copy in progress - wait and will autodelete else: if status == 'prepared': self.ssh.stoplcmap(map_id) self.ssh.rmlcmap(map_id) elif status in ['idle_or_copied', 'stopped']: # Prepare failed or stopped self.ssh.rmlcmap(map_id) else: wait_for_copy = True if not wait_for_copy and rmlcmap_failed_e is not None: raise rmlcmap_failed_e if not wait_for_copy or not len(mapping_ids): raise loopingcall.LoopingCallDone(retvalue=True) def ensure_vdisk_no_lc_mappings(self, name, allow_snaps=True, allow_lctgt=False): """Ensure vdisk has no localcopy mappings.""" timer = loopingcall.FixedIntervalLoopingCall( self._check_vdisk_lc_mappings, name, allow_snaps, allow_lctgt) # Create a timer greenthread. The default volume service heart # beat is every 10 seconds. The localcopy usually takes hours # before it finishes. Don't set the sleep interval shorter # than the heartbeat. Otherwise volume service heartbeat # will not be serviced. LOG.debug('Calling _ensure_vdisk_no_lc_mappings: vdisk %s.', name) ret = timer.start(interval=self.check_lcmapping_interval).wait() timer.stop() return ret def start_relationship(self, volume_name, primary=None): vol_attrs = self.get_vdisk_attributes(volume_name) if vol_attrs['RC_name']: self.ssh.startrcrelationship(vol_attrs['RC_name'], primary) def stop_relationship(self, volume_name, access=False): vol_attrs = self.get_vdisk_attributes(volume_name) if vol_attrs['RC_name']: self.ssh.stoprcrelationship(vol_attrs['RC_name'], access=access) def create_relationship(self, master, aux, system, asynccopy): try: rc_id = self.ssh.mkrcrelationship(master, aux, system, asynccopy) except exception.VolumeBackendAPIException as e: rc_id = None # CMMVC5959E is the code in InStorage, meaning that # there is a relationship that already has this name on the # master cluster. if 'CMMVC5959E' not in str(e): # If there is no relation between the primary and the # secondary back-end storage, the exception is raised. raise if rc_id: self.start_relationship(master) def delete_relationship(self, volume_name): vol_attrs = self.get_vdisk_attributes(volume_name) if vol_attrs['RC_name']: self.ssh.rmrcrelationship(vol_attrs['RC_name'], True) def get_relationship_info(self, volume_name): vol_attrs = self.get_vdisk_attributes(volume_name) if not vol_attrs or not vol_attrs['RC_name']: LOG.info("Unable to get remote copy information for " "volume %s", volume_name) return relationship = self.ssh.lsrcrelationship(vol_attrs['RC_name']) return relationship[0] if len(relationship) > 0 else None def delete_rc_volume(self, volume_name, target_vol=False): vol_name = volume_name if target_vol: vol_name = instorage_const.REPLICA_AUX_VOL_PREFIX + volume_name try: rel_info = self.get_relationship_info(vol_name) if rel_info: self.delete_relationship(vol_name) self.delete_vdisk(vol_name, False) except Exception as e: msg = (_('Unable to delete the volume for ' 'volume %(vol)s. Exception: %(err)s.') % {'vol': vol_name, 'err': e}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def switch_relationship(self, relationship, aux=True): self.ssh.switchrelationship(relationship, aux) def get_partnership_info(self, system_name): partnership = self.ssh.lspartnership(system_name) return partnership[0] if len(partnership) > 0 else None def get_partnershipcandidate_info(self, system_name): candidates = self.ssh.lspartnershipcandidate() for candidate in candidates: if system_name == candidate['name']: return candidate return None def mkippartnership(self, ip_v4, bandwidth=1000, copyrate=50): self.ssh.mkippartnership(ip_v4, bandwidth, copyrate) def mkfcpartnership(self, system_name, bandwidth=1000, copyrate=50): self.ssh.mkfcpartnership(system_name, bandwidth, copyrate) def chpartnership(self, partnership_id): self.ssh.chpartnership(partnership_id) @staticmethod def can_migrate_to_host(host, state): if 'location_info' not in host['capabilities']: return None info = host['capabilities']['location_info'] try: (dest_type, dest_id, dest_pool) = info.split(':') except ValueError: return None if (dest_type != 'InStorageMCSDriver' or dest_id != state['system_id']): return None return dest_pool def add_vdisk_qos(self, vdisk, qos): """Add the QoS configuration to the volume.""" for key, value in qos.items(): if key in self.mcs_qos_keys.keys(): param = self.mcs_qos_keys[key]['param'] self.ssh.chvdisk(vdisk, ['-' + param, str(value)]) def update_vdisk_qos(self, vdisk, qos): """Update all the QoS in terms of a key and value. mcs_qos_keys saves all the supported QoS parameters. Going through this dict, we set the new values to all the parameters. If QoS is available in the QoS configuration, the value is taken from it; if not, the value will be set to default. """ for key, value in self.mcs_qos_keys.items(): param = value['param'] if key in qos.keys(): # If the value is set in QoS, take the value from # the QoS configuration. v = qos[key] else: # If not, set the value to default. v = value['default'] self.ssh.chvdisk(vdisk, ['-' + param, str(v)]) def disable_vdisk_qos(self, vdisk, qos): """Disable the QoS.""" for key, value in qos.items(): if key in self.mcs_qos_keys.keys(): param = self.mcs_qos_keys[key]['param'] # Take the default value. value = self.mcs_qos_keys[key]['default'] self.ssh.chvdisk(vdisk, ['-' + param, value]) def change_vdisk_options(self, vdisk, changes, opts, state): if 'warning' in opts: opts['warning'] = '%s%%' % str(opts['warning']) if 'intier' in opts: opts['intier'] = 'on' if opts['intier'] else 'off' if 'autoexpand' in opts: opts['autoexpand'] = 'on' if opts['autoexpand'] else 'off' for key in changes: self.ssh.chvdisk(vdisk, ['-' + key, opts[key]]) def change_vdisk_iogrp(self, vdisk, state, iogrp): if state['code_level'] < (3, 0, 0, 0): LOG.debug('Ignore change IO group as storage code level is ' '%(code_level)s, below the required 3, 0, 0, 0.', {'code_level': state['code_level']}) else: self.ssh.movevdisk(vdisk, str(iogrp[0])) self.ssh.addvdiskaccess(vdisk, str(iogrp[0])) self.ssh.rmvdiskaccess(vdisk, str(iogrp[1])) def vdisk_by_uid(self, vdisk_uid): """Returns the properties of the vdisk with the specified UID. Returns None if no such disk exists. """ vdisks = self.ssh.lsvdisks_from_filter('vdisk_UID', vdisk_uid) if len(vdisks) == 0: return None if len(vdisks) != 1: msg = (_('Expected single vdisk returned from lsvdisk when ' 'filtering on vdisk_UID. %(count)s were returned.') % {'count': len(vdisks)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) vdisk = vdisks.result[0] return self.ssh.lsvdisk(vdisk['name']) def is_vdisk_in_use(self, vdisk): """Returns True if the specified vdisk is mapped to at least 1 host.""" resp = self.ssh.lsvdiskhostmap(vdisk) return len(resp) != 0 def rename_vdisk(self, vdisk, new_name): self.ssh.chvdisk(vdisk, ['-name', new_name]) class InStorageSSH(object): """SSH interface to Inspur InStorage systems.""" def __init__(self, run_ssh): self._ssh = run_ssh def _run_ssh(self, ssh_cmd): try: return self._ssh(ssh_cmd) except processutils.ProcessExecutionError as e: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': e.stdout, 'err': e.stderr}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def run_ssh_inq(self, ssh_cmd, delim='!', with_header=False): """Run an SSH command and return parsed output.""" raw = self._run_ssh(ssh_cmd) return CLIParser(raw, ssh_cmd=ssh_cmd, delim=delim, with_header=with_header) def run_ssh_assert_no_output(self, ssh_cmd): """Run an SSH command and assert no output returned.""" out, err = self._run_ssh(ssh_cmd) if len(out.strip()) != 0: msg = (_('Expected no output from CLI command %(cmd)s, ' 'got %(out)s.') % {'cmd': ' '.join(ssh_cmd), 'out': out}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def run_ssh_check_created(self, ssh_cmd): """Run an SSH command and return the ID of the created object.""" out, err = self._run_ssh(ssh_cmd) try: match_obj = re.search(r'\[([0-9]+)\],? successfully created', out) return match_obj.group(1) except (AttributeError, IndexError): msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def lsnode(self, node_id=None): with_header = True ssh_cmd = ['mcsinq', 'lsnode', '-delim', '!'] if node_id: with_header = False ssh_cmd.append(node_id) return self.run_ssh_inq(ssh_cmd, with_header=with_header) def lslicense(self): ssh_cmd = ['mcsinq', 'lslicense', '-delim', '!'] return self.run_ssh_inq(ssh_cmd)[0] def lsguicapabilities(self): ssh_cmd = ['mcsinq', 'lsguicapabilities', '-delim', '!'] return self.run_ssh_inq(ssh_cmd)[0] def lssystem(self): ssh_cmd = ['mcsinq', 'lssystem', '-delim', '!'] return self.run_ssh_inq(ssh_cmd)[0] def lsmdiskgrp(self, pool): ssh_cmd = ['mcsinq', 'lsmdiskgrp', '-bytes', '-delim', '!', '"%s"' % pool] return self.run_ssh_inq(ssh_cmd)[0] def lsiogrp(self): ssh_cmd = ['mcsinq', 'lsiogrp', '-delim', '!'] return self.run_ssh_inq(ssh_cmd, with_header=True) def lsportip(self): ssh_cmd = ['mcsinq', 'lsportip', '-delim', '!'] return self.run_ssh_inq(ssh_cmd, with_header=True) def lshost(self, host=None): with_header = True ssh_cmd = ['mcsinq', 'lshost', '-delim', '!'] if host: with_header = False ssh_cmd.append('"%s"' % host) return self.run_ssh_inq(ssh_cmd, with_header=with_header) def lsiscsiauth(self): ssh_cmd = ['mcsinq', 'lsiscsiauth', '-delim', '!'] return self.run_ssh_inq(ssh_cmd, with_header=True) def lsfabric(self, wwpn=None, host=None): ssh_cmd = ['mcsinq', 'lsfabric', '-delim', '!'] if wwpn: ssh_cmd.extend(['-wwpn', wwpn]) elif host: ssh_cmd.extend(['-host', '"%s"' % host]) else: msg = (_('Must pass wwpn or host to lsfabric.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) return self.run_ssh_inq(ssh_cmd, with_header=True) def lsrcrelationship(self, rc_rel): key_value = 'name=%s' % rc_rel ssh_cmd = ['mcsinq', 'lsrcrelationship', '-filtervalue', key_value, '-delim', '!'] return self.run_ssh_inq(ssh_cmd, with_header=True) def lspartnership(self, system_name): key_value = 'name=%s' % system_name ssh_cmd = ['mcsinq', 'lspartnership', '-filtervalue', key_value, '-delim', '!'] return self.run_ssh_inq(ssh_cmd, with_header=True) def lspartnershipcandidate(self): ssh_cmd = ['mcsinq', 'lspartnershipcandidate', '-delim', '!'] return self.run_ssh_inq(ssh_cmd, with_header=True) def lsvdiskhostmap(self, vdisk): ssh_cmd = ['mcsinq', 'lsvdiskhostmap', '-delim', '!', '"%s"' % vdisk] return self.run_ssh_inq(ssh_cmd, with_header=True) def lshostvdiskmap(self, host): ssh_cmd = ['mcsinq', 'lshostvdiskmap', '-delim', '!', '"%s"' % host] return self.run_ssh_inq(ssh_cmd, with_header=True) def lsvdisk(self, vdisk): """Return vdisk attributes or None if it doesn't exist.""" ssh_cmd = ['mcsinq', 'lsvdisk', '-bytes', '-delim', '!', '"%s"' % vdisk] out, err = self._ssh(ssh_cmd, check_exit_code=False) if not err: return CLIParser((out, err), ssh_cmd=ssh_cmd, delim='!', with_header=False)[0] if 'CMMVC5754E' in err: return None msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def lsvdisks_from_filter(self, filter_name, value): """Performs an lsvdisk command, filtering the results as specified. Returns an iterable for all matching vdisks. """ ssh_cmd = ['mcsinq', 'lsvdisk', '-bytes', '-delim', '!', '-filtervalue', '%s=%s' % (filter_name, value)] return self.run_ssh_inq(ssh_cmd, with_header=True) def lsvdisklcmappings(self, vdisk): ssh_cmd = ['mcsinq', 'lsvdisklcmappings', '-delim', '!', '"%s"' % vdisk] return self.run_ssh_inq(ssh_cmd, with_header=True) def lslcmap(self, lc_map_id): ssh_cmd = ['mcsinq', 'lslcmap', '-filtervalue', 'id=%s' % lc_map_id, '-delim', '!'] return self.run_ssh_inq(ssh_cmd, with_header=True) def lslcconsistgrp(self, lc_consistgrp): ssh_cmd = ['mcsinq', 'lslcconsistgrp', '-delim', '!', lc_consistgrp] out, err = self._ssh(ssh_cmd) return CLIParser((out, err), ssh_cmd=ssh_cmd, delim='!', with_header=False) def lsvdiskcopy(self, vdisk, copy_id=None): ssh_cmd = ['mcsinq', 'lsvdiskcopy', '-delim', '!'] with_header = True if copy_id: ssh_cmd += ['-copy', copy_id] with_header = False ssh_cmd += ['"%s"' % vdisk] return self.run_ssh_inq(ssh_cmd, with_header=with_header) def lsvdisksyncprogress(self, vdisk, copy_id): ssh_cmd = ['mcsinq', 'lsvdisksyncprogress', '-delim', '!', '-copy', copy_id, '"%s"' % vdisk] return self.run_ssh_inq(ssh_cmd, with_header=True)[0] def lsportfc(self, node_id): ssh_cmd = ['mcsinq', 'lsportfc', '-delim', '!', '-filtervalue', 'node_id=%s' % node_id] return self.run_ssh_inq(ssh_cmd, with_header=True) @staticmethod def _create_port_arg(port_type, port_name): if port_type == 'initiator': port = ['-iscsiname'] else: port = ['-hbawwpn'] port.append(port_name) return port def mkhost(self, host_name, port_type, port_name): port = self._create_port_arg(port_type, port_name) ssh_cmd = ['mcsop', 'mkhost', '-force'] + port ssh_cmd += ['-name', '"%s"' % host_name] return self.run_ssh_check_created(ssh_cmd) def addhostport(self, host, port_type, port_name): port = self._create_port_arg(port_type, port_name) ssh_cmd = ['mcsop', 'addhostport', '-force'] + port + ['"%s"' % host] self.run_ssh_assert_no_output(ssh_cmd) def add_chap_secret(self, secret, host): ssh_cmd = ['mcsop', 'chhost', '-chapsecret', secret, '"%s"' % host] self.run_ssh_assert_no_output(ssh_cmd) def mkvdiskhostmap(self, host, vdisk, lun, multihostmap): """Map vdisk to host. If vdisk already mapped and multihostmap is True, use the force flag. """ ssh_cmd = ['mcsop', 'mkvdiskhostmap', '-host', '"%s"' % host, vdisk] if lun: ssh_cmd.insert(ssh_cmd.index(vdisk), '-scsi') ssh_cmd.insert(ssh_cmd.index(vdisk), lun) if multihostmap: ssh_cmd.insert(ssh_cmd.index('mkvdiskhostmap') + 1, '-force') try: self.run_ssh_check_created(ssh_cmd) result_lun = self.get_vdiskhostmapid(vdisk, host) if result_lun is None or (lun and lun != result_lun): msg = (_('mkvdiskhostmap error:\n command: %(cmd)s\n ' 'lun: %(lun)s\n result_lun: %(result_lun)s') % {'cmd': ssh_cmd, 'lun': lun, 'result_lun': result_lun}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) return result_lun except Exception as ex: if (not multihostmap and hasattr(ex, 'message') and 'CMMVC6071E' in ex.message): LOG.error('volume is not allowed to be mapped to multi host') raise exception.VolumeDriverException( message=_('CMMVC6071E The VDisk-to-host mapping was not ' 'created because the VDisk is already mapped ' 'to a host.\n"')) with excutils.save_and_reraise_exception(): LOG.error('Error mapping VDisk-to-host') def mkrcrelationship(self, master, aux, system, asynccopy): ssh_cmd = ['mcsop', 'mkrcrelationship', '-master', master, '-aux', aux, '-cluster', system] if asynccopy: ssh_cmd.append('-async') return self.run_ssh_check_created(ssh_cmd) def rmrcrelationship(self, relationship, force=False): ssh_cmd = ['mcsop', 'rmrcrelationship'] if force: ssh_cmd += ['-force'] ssh_cmd += [relationship] self.run_ssh_assert_no_output(ssh_cmd) def switchrelationship(self, relationship, aux=True): primary = 'aux' if aux else 'master' ssh_cmd = ['mcsop', 'switchrcrelationship', '-primary', primary, relationship] self.run_ssh_assert_no_output(ssh_cmd) def startrcrelationship(self, rc_rel, primary=None): ssh_cmd = ['mcsop', 'startrcrelationship', '-force'] if primary: ssh_cmd.extend(['-primary', primary]) ssh_cmd.append(rc_rel) self.run_ssh_assert_no_output(ssh_cmd) def stoprcrelationship(self, relationship, access=False): ssh_cmd = ['mcsop', 'stoprcrelationship'] if access: ssh_cmd.append('-access') ssh_cmd.append(relationship) self.run_ssh_assert_no_output(ssh_cmd) def mkippartnership(self, ip_v4, bandwidth=1000, backgroundcopyrate=50): ssh_cmd = ['mcsop', 'mkippartnership', '-type', 'ipv4', '-clusterip', ip_v4, '-linkbandwidthmbits', str(bandwidth), '-backgroundcopyrate', str(backgroundcopyrate)] return self.run_ssh_assert_no_output(ssh_cmd) def mkfcpartnership(self, system_name, bandwidth=1000, backgroundcopyrate=50): ssh_cmd = ['mcsop', 'mkfcpartnership', '-linkbandwidthmbits', str(bandwidth), '-backgroundcopyrate', str(backgroundcopyrate), system_name] return self.run_ssh_assert_no_output(ssh_cmd) def chpartnership(self, partnership_id, start=True): action = '-start' if start else '-stop' ssh_cmd = ['mcsop', 'chpartnership', action, partnership_id] return self.run_ssh_assert_no_output(ssh_cmd) def rmvdiskhostmap(self, host, vdisk): ssh_cmd = ['mcsop', 'rmvdiskhostmap', '-host', '"%s"' % host, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def get_vdiskhostmapid(self, vdisk, host): resp = self.lsvdiskhostmap(vdisk) for mapping_info in resp: if mapping_info['host_name'] == host: lun_id = mapping_info['SCSI_id'] return lun_id return None def rmhost(self, host): ssh_cmd = ['mcsop', 'rmhost', '"%s"' % host] self.run_ssh_assert_no_output(ssh_cmd) def mkvdisk(self, name, size, units, pool, opts, params): ssh_cmd = ['mcsop', 'mkvdisk', '-name', name, '-mdiskgrp', '"%s"' % pool, '-iogrp', str(opts['iogrp']), '-size', size, '-unit', units] + params try: return self.run_ssh_check_created(ssh_cmd) except Exception as ex: if hasattr(ex, 'msg') and 'CMMVC6372W' in ex.msg: vdisk = self.lsvdisk(name) if vdisk: LOG.warning('CMMVC6372W The virtualized storage ' 'capacity that the cluster is using is ' 'approaching the virtualized storage ' 'capacity that is licensed.') return vdisk['id'] with excutils.save_and_reraise_exception(): LOG.exception('Failed to create vdisk %(vol)s.', {'vol': name}) def rmvdisk(self, vdisk, force=True): ssh_cmd = ['mcsop', 'rmvdisk'] if force: ssh_cmd += ['-force'] ssh_cmd += ['"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def chvdisk(self, vdisk, params): ssh_cmd = ['mcsop', 'chvdisk'] + params + ['"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def movevdisk(self, vdisk, iogrp): ssh_cmd = ['mcsop', 'movevdisk', '-iogrp', iogrp, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def expandvdisksize(self, vdisk, amount): ssh_cmd = ( ['mcsop', 'expandvdisksize', '-size', str(amount), '-unit', 'gb', '"%s"' % vdisk]) self.run_ssh_assert_no_output(ssh_cmd) def mklcmap(self, source, target, full_copy, copy_rate, consistgrp=None): ssh_cmd = ['mcsop', 'mklcmap', '-source', '"%s"' % source, '-target', '"%s"' % target, '-autodelete'] if not full_copy: ssh_cmd.extend(['-copyrate', '0']) else: ssh_cmd.extend(['-copyrate', str(copy_rate)]) if consistgrp: ssh_cmd.extend(['-consistgrp', consistgrp]) out, err = self._ssh(ssh_cmd, check_exit_code=False) if 'successfully created' not in out: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: match_obj = re.search(r'LocalCopy Mapping, id \[([0-9]+)\], ' 'successfully created', out) lc_map_id = match_obj.group(1) except (AttributeError, IndexError): msg = (_('Failed to parse CLI output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': out, 'err': err}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return lc_map_id def prestartlcmap(self, lc_map_id): ssh_cmd = ['mcsop', 'prestartlcmap', lc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def startlcmap(self, lc_map_id): ssh_cmd = ['mcsop', 'startlcmap', lc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def prestartlcconsistgrp(self, lc_consist_group): ssh_cmd = ['mcsop', 'prestartlcconsistgrp', lc_consist_group] self.run_ssh_assert_no_output(ssh_cmd) def startlcconsistgrp(self, lc_consist_group): ssh_cmd = ['mcsop', 'startlcconsistgrp', lc_consist_group] self.run_ssh_assert_no_output(ssh_cmd) def chlcmap(self, lc_map_id, copyrate='50', autodel='on'): ssh_cmd = ['mcsop', 'chlcmap', '-copyrate', copyrate, '-autodelete', autodel, lc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def stoplcmap(self, lc_map_id): ssh_cmd = ['mcsop', 'stoplcmap', lc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def rmlcmap(self, lc_map_id): ssh_cmd = ['mcsop', 'rmlcmap', '-force', lc_map_id] self.run_ssh_assert_no_output(ssh_cmd) def mklcconsistgrp(self, lc_consist_group): ssh_cmd = ['mcsop', 'mklcconsistgrp', '-name', lc_consist_group] return self.run_ssh_check_created(ssh_cmd) def rmlcconsistgrp(self, lc_consist_group): ssh_cmd = ['mcsop', 'rmlcconsistgrp', '-force', lc_consist_group] return self.run_ssh_assert_no_output(ssh_cmd) def addvdiskcopy(self, vdisk, dest_pool, params): ssh_cmd = (['mcsop', 'addvdiskcopy'] + params + ['-mdiskgrp', '"%s"' % dest_pool, '"%s"' % vdisk]) return self.run_ssh_check_created(ssh_cmd) def rmvdiskcopy(self, vdisk, copy_id): ssh_cmd = ['mcsop', 'rmvdiskcopy', '-copy', copy_id, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def addvdiskaccess(self, vdisk, iogrp): ssh_cmd = ['mcsop', 'addvdiskaccess', '-iogrp', iogrp, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) def rmvdiskaccess(self, vdisk, iogrp): ssh_cmd = ['mcsop', 'rmvdiskaccess', '-iogrp', iogrp, '"%s"' % vdisk] self.run_ssh_assert_no_output(ssh_cmd) class CLIParser(object): """Parse MCS CLI output and generate iterable.""" def __init__(self, raw, ssh_cmd=None, delim='!', with_header=True): super(CLIParser, self).__init__() if ssh_cmd: self.ssh_cmd = ' '.join(ssh_cmd) else: self.ssh_cmd = 'None' self.raw = raw self.delim = delim self.with_header = with_header self.result = self._parse() def select(self, *keys): for a in self.result: vs = [] for k in keys: v = a.get(k, None) if isinstance(v, str) or v is None: v = [v] if isinstance(v, list): vs.append(v) for item in zip(*vs): if len(item) == 1: yield item[0] else: yield item def __getitem__(self, key): try: return self.result[key] except KeyError: msg = (_('Did not find the expected key %(key)s in %(fun)s: ' '%(raw)s.') % {'key': key, 'fun': self.ssh_cmd, 'raw': self.raw}) raise exception.VolumeBackendAPIException(data=msg) def __iter__(self): for a in self.result: yield a def __len__(self): return len(self.result) def _parse(self): def get_reader(content, delim): for line in content.lstrip().splitlines(): line = line.strip() if line: yield line.split(delim) else: yield [] if isinstance(self.raw, str): stdout, stderr = self.raw, '' else: stdout, stderr = self.raw reader = get_reader(stdout, self.delim) result = [] if self.with_header: hds = tuple() for row in reader: hds = row break for row in reader: cur = dict() if len(hds) != len(row): msg = (_('Unexpected CLI response: header/row mismatch. ' 'header: %(header)s, row: %(row)s.') % {'header': hds, 'row': row}) raise exception.VolumeBackendAPIException(data=msg) for k, v in zip(hds, row): CLIParser.append_dict(cur, k, v) result.append(cur) else: cur = dict() for row in reader: if row: CLIParser.append_dict(cur, row[0], ' '.join(row[1:])) elif cur: # start new section result.append(cur) cur = dict() if cur: result.append(cur) return result @staticmethod def append_dict(dict_, key, value): key, value = key.strip(), value.strip() obj = dict_.get(key, None) if obj is None: dict_[key] = value elif isinstance(obj, list): obj.append(value) dict_[key] = obj else: dict_[key] = [obj, value] return dict_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/inspur/instorage/instorage_const.py0000664000175000017500000000226200000000000026410 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # DEV_MODEL_INSTORAGE = '1813' DEV_MODEL_INSTORAGE_AS5X00 = '2076' REP_CAP_DEVS = (DEV_MODEL_INSTORAGE, DEV_MODEL_INSTORAGE_AS5X00) # constants used for replication ASYNC = 'async' SYNC = 'sync' VALID_REP_TYPES = (ASYNC, SYNC) FAILBACK_VALUE = 'default' DEFAULT_RC_TIMEOUT = 3600 * 24 * 7 DEFAULT_RC_INTERVAL = 5 REPLICA_AUX_VOL_PREFIX = 'aux_' # remote mirror copy status REP_CONSIS_SYNC = 'consistent_synchronized' REP_CONSIS_STOP = 'consistent_stopped' REP_SYNC = 'synchronized' REP_IDL = 'idling' REP_IDL_DISC = 'idling_disconnected' REP_STATUS_ON_LINE = 'online' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/inspur/instorage/instorage_fc.py0000664000175000017500000002341100000000000025651 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ FC volume driver for Inspur InStorage family storage systems. """ from oslo_log import log as logging from oslo_utils import excutils from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.inspur.instorage import instorage_common from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @interface.volumedriver class InStorageMCSFCDriver(instorage_common.InStorageMCSCommonDriver, driver.FibreChannelDriver): """INSPUR InStorage MCS FC volume driver. Version history: .. code-block:: none 1.0 - Initial driver """ VERSION = "1.0.0" # ThirdPartySystems wiki page CI_WIKI_NAME = "Inspur_CI" def __init__(self, *args, **kwargs): super(InStorageMCSFCDriver, self).__init__(*args, **kwargs) self.protocol = 'FC' @staticmethod def make_initiator_target_all2all_map(initiator_wwpns, target_wwpns): """Build a simplistic all-to-all mapping.""" i_t_map = {} for i_wwpn in initiator_wwpns: i_t_map[str(i_wwpn)] = [] for t_wwpn in target_wwpns: i_t_map[i_wwpn].append(t_wwpn) return i_t_map @volume_utils.trace @coordination.synchronized('instorage-host' '{self._state[system_id]}' '{connector[host]}') def initialize_connection(self, volume, connector): """Perform necessary work to make a FC connection. To be able to create an FC connection from a given host to a volume, we must: 1. Translate the given WWNN to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ volume_name = self._get_target_vol(volume) # Check if a host object is defined for this host name host_name = self._assistant.get_host_from_connector(connector) if host_name is None: # Host does not exist - add a new host to InStorage/MCS host_name = self._assistant.create_host(connector) volume_attributes = self._assistant.get_vdisk_attributes(volume_name) if volume_attributes is None: msg = (_('initialize_connection: Failed to get attributes' ' for volume %s.') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) lun_id = self._assistant.map_vol_to_host(volume_name, host_name, True) try: preferred_node = volume_attributes['preferred_node_id'] IO_group = volume_attributes['IO_group_id'] except KeyError as e: LOG.error('Did not find expected column name in ' 'lsvdisk: %s.', e) raise exception.VolumeBackendAPIException( data=_('initialize_connection: Missing volume attribute for ' 'volume %s.') % volume_name) try: # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] for node in self._state['storage_nodes'].values(): if node['id'] == preferred_node: preferred_node_entry = node if node['IO_group'] == IO_group: io_group_nodes.append(node) if not len(io_group_nodes): msg = (_('initialize_connection: No node found in ' 'I/O group %(gid)s for volume %(vol)s.') % {'gid': IO_group, 'vol': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warning('initialize_connection: Did not find a ' 'preferred node for volume %s.', volume_name) properties = {} properties['target_discovered'] = False properties['target_lun'] = lun_id properties['volume_id'] = volume.id conn_wwpns = self._assistant.get_conn_fc_wwpns(host_name) # If conn_wwpns is empty, then that means that there were # no target ports with visibility to any of the initiators # so we return all target ports. if len(conn_wwpns) == 0: for node in self._state['storage_nodes'].values(): conn_wwpns.extend(node['WWPN']) properties['target_wwn'] = conn_wwpns i_t_map = self.make_initiator_target_all2all_map( connector['wwpns'], conn_wwpns) properties['initiator_target_map'] = i_t_map except Exception: with excutils.save_and_reraise_exception(): self._do_terminate_connection(volume, connector) LOG.error('initialize_connection: Failed ' 'to collect return ' 'properties for volume %(vol)s and connector ' '%(conn)s.\n', {'vol': volume, 'conn': connector}) info = {'driver_volume_type': 'fibre_channel', 'data': properties, } fczm_utils.add_fc_zone(info) return info def terminate_connection(self, volume, connector, **kwargs): """Cleanup after an FC connection has been terminated.""" # If a fake connector is generated by nova when the host # is down, then the connector will not have a host property, # In this case construct the lock without the host property # so that all the fake connectors to an MCS are serialized host = "" if connector is not None and 'host' in connector: host = connector['host'] @coordination.synchronized('instorage-host' + self._state['system_id'] + host) def _do_terminate_connection_locked(): return self._do_terminate_connection(volume, connector, **kwargs) return _do_terminate_connection_locked() @volume_utils.trace def _do_terminate_connection(self, volume, connector, **kwargs): """Cleanup after an FC connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ vol_name = self._get_target_vol(volume) info = {} if connector is not None and 'host' in connector: # get host according to FC protocol connector = connector.copy() connector.pop('initiator', None) info = {'driver_volume_type': 'fibre_channel', 'data': {}} host_name = self._assistant.get_host_from_connector( connector, volume_name=vol_name) if host_name is None: msg = (_('terminate_connection: Failed to get host name from' ' connector.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: host_name = None # Unmap volumes, if hostname is None, need to get value from vdiskmap host_name = self._assistant.unmap_vol_from_host(vol_name, host_name) # Host_name could be none if host_name: resp = self._assistant.check_host_mapped_vols(host_name) if not len(resp): LOG.info("Need to remove FC Zone, building initiator " "target map.") # Build info data structure for zone removing if connector is not None and 'wwpns' in connector: target_wwpns = [] # Returning all target_wwpns in storage_nodes, since # we cannot determine which wwpns are logged in during # a VM deletion. for node in self._state['storage_nodes'].values(): target_wwpns.extend(node['WWPN']) init_targ_map = ( self.make_initiator_target_all2all_map( connector['wwpns'], target_wwpns)) info['data'] = {'initiator_target_map': init_targ_map} # Only remove the zone if it's the last volume removed fczm_utils.remove_fc_zone(info) # No volume mapped to the host, delete host from array self._assistant.delete_host(host_name) return info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/inspur/instorage/instorage_iscsi.py0000664000175000017500000002766700000000000026414 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ISCSI volume driver for Inspur InStorage family and MCS storage systems. Notes: 1. Make sure you config the password or key file. If you specify both a password and a key file, this driver will use the key file only. 2. When a key file is used for authentication, the private key is stored in a secure manner by the user or system administrator. 3. The defaults for creating volumes are "-rsize 2% -autoexpand -grainsize 256 -warning 0". These can be changed in the configuration file or by using volume types(recommended only for advanced users). Limitations: 1. The driver expects CLI output in English, but the error messages may be in a localized format. 2. when you clone or create volumes from snapshots, it not support that the source and target_rep are different size. Perform necessary work to make an iSCSI connection: To be able to create an iSCSI connection from a given host to a volume, we must: 1. Translate the given iSCSI name to a host name 2. Create new host on the storage system if it does not yet exist 3. Map the volume to the host if it is not already done 4. Return the connection information for relevant nodes (in the proper I/O group) """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.inspur.instorage import instorage_common from cinder.volume import volume_utils LOG = logging.getLogger(__name__) instorage_mcs_iscsi_opts = [ cfg.BoolOpt('instorage_mcs_iscsi_chap_enabled', default=True, help='Configure CHAP authentication for iSCSI connections ' '(Default: Enabled)'), ] CONF = cfg.CONF CONF.register_opts(instorage_mcs_iscsi_opts) @interface.volumedriver class InStorageMCSISCSIDriver(instorage_common.InStorageMCSCommonDriver, driver.ISCSIDriver): """Inspur InStorage iSCSI volume driver. Version history: .. code-block:: none 1.0 - Initial driver """ VERSION = "1.0.0" # ThirdPartySystems wiki page CI_WIKI_NAME = "Inspur_CI" def __init__(self, *args, **kwargs): super(InStorageMCSISCSIDriver, self).__init__(*args, **kwargs) self.protocol = 'iSCSI' self.configuration.append_config_values( instorage_mcs_iscsi_opts) @volume_utils.trace @coordination.synchronized('instorage-host' '{self._state[system_id]}' '{connector[host]}') def initialize_connection(self, volume, connector): """Perform necessary work to make an iSCSI connection.""" volume_name = self._get_target_vol(volume) # Check if a host object is defined for this host name host_name = self._assistant.get_host_from_connector(connector) if host_name is None: # Host does not exist - add a new host to InStorage/MCS host_name = self._assistant.create_host(connector) chap_secret = self._assistant.get_chap_secret_for_host(host_name) chap_enabled = self.configuration.instorage_mcs_iscsi_chap_enabled if chap_enabled and chap_secret is None: chap_secret = self._assistant.add_chap_secret_to_host(host_name) elif not chap_enabled and chap_secret: LOG.warning('CHAP secret exists for host but CHAP is disabled.') lun_id = self._assistant.map_vol_to_host(volume_name, host_name, True) try: properties = self._get_single_iscsi_data(volume, connector, lun_id, chap_secret) multipath = connector.get('multipath', False) if multipath: properties = self._get_multi_iscsi_data(volume, connector, lun_id, properties) except Exception: with excutils.save_and_reraise_exception(): self._do_terminate_connection(volume, connector) LOG.error('initialize_connection: Failed ' 'to collect return ' 'properties for volume %(vol)s and connector ' '%(conn)s.\n', {'vol': volume, 'conn': connector}) return {'driver_volume_type': 'iscsi', 'data': properties} @volume_utils.trace def _get_single_iscsi_data(self, volume, connector, lun_id, chap_secret): volume_name = self._get_target_vol(volume) volume_attributes = self._assistant.get_vdisk_attributes(volume_name) if volume_attributes is None: msg = (_('_get_single_iscsi_data: Failed to get attributes' ' for volume %s.') % volume_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) try: preferred_node = volume_attributes['preferred_node_id'] IO_group = volume_attributes['IO_group_id'] except KeyError as e: msg = (_('_get_single_iscsi_data: Did not find expected column' ' name in %(volume)s: %(key)s %(error)s.'), {'volume': volume_name, 'key': e.args[0], 'error': e}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Get preferred node and other nodes in I/O group preferred_node_entry = None io_group_nodes = [] for node in self._state['storage_nodes'].values(): if self.protocol not in node['enabled_protocols']: continue if node['IO_group'] != IO_group: continue io_group_nodes.append(node) if node['id'] == preferred_node: preferred_node_entry = node if not len(io_group_nodes): msg = (_('_get_single_iscsi_data: No node found in ' 'I/O group %(gid)s for volume %(vol)s.') % { 'gid': IO_group, 'vol': volume_name}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not preferred_node_entry: # Get 1st node in I/O group preferred_node_entry = io_group_nodes[0] LOG.warning('_get_single_iscsi_data: Did not find a ' 'preferred node for volume %s.', volume_name) properties = { 'target_discovered': False, 'target_lun': lun_id, 'volume_id': volume.id} if preferred_node_entry['ipv4']: ipaddr = preferred_node_entry['ipv4'][0] else: ipaddr = '[%s]' % preferred_node_entry['ipv6'][0] # ipv6 need surround with brackets when it use port properties['target_portal'] = '%s:%s' % (ipaddr, '3260') properties['target_iqn'] = preferred_node_entry['iscsi_name'] if chap_secret: properties.update(auth_method='CHAP', auth_username=connector['initiator'], auth_password=chap_secret, discovery_auth_method='CHAP', discovery_auth_username=connector['initiator'], discovery_auth_password=chap_secret) return properties @volume_utils.trace def _get_multi_iscsi_data(self, volume, connector, lun_id, properties): try: resp = self._assistant.ssh.lsportip() except Exception as ex: msg = (_('_get_multi_iscsi_data: Failed to ' 'get port ip because of exception: ' '%s.') % str(ex)) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) properties['target_iqns'] = [] properties['target_portals'] = [] properties['target_luns'] = [] for node in self._state['storage_nodes'].values(): for ip_data in resp: if ip_data['node_id'] != node['id']: continue link_state = ip_data.get('link_state', None) valid_port = '' if ((ip_data['state'] == 'configured' and link_state == 'active') or ip_data['state'] == 'online'): valid_port = (ip_data['IP_address'] or ip_data['IP_address_6']) if valid_port: properties['target_portals'].append( '%s:%s' % (valid_port, '3260')) properties['target_iqns'].append( node['iscsi_name']) properties['target_luns'].append(lun_id) if not len(properties['target_portals']): msg = (_('_get_multi_iscsi_data: Failed to find valid port ' 'for volume %s.') % volume.name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return properties def terminate_connection(self, volume, connector, **kwargs): """Cleanup after an iSCSI connection has been terminated.""" # If a fake connector is generated by nova when the host # is down, then the connector will not have a host property, # In this case construct the lock without the host property # so that all the fake connectors to an MCS are serialized host = "" if connector is not None and 'host' in connector: host = connector['host'] @coordination.synchronized('instorage-host' + self._state['system_id'] + host) def _do_terminate_connection_locked(): return self._do_terminate_connection(volume, connector, **kwargs) return _do_terminate_connection_locked() @volume_utils.trace def _do_terminate_connection(self, volume, connector, **kwargs): """Cleanup after an iSCSI connection has been terminated. When we clean up a terminated connection between a given connector and volume, we: 1. Translate the given connector to a host name 2. Remove the volume-to-host mapping if it exists 3. Delete the host if it has no more mappings (hosts are created automatically by this driver when mappings are created) """ vol_name = self._get_target_vol(volume) info = {} if connector is not None and 'host' in connector: # get host according to iSCSI protocol info = {'driver_volume_type': 'iscsi', 'data': {}} host_name = self._assistant.get_host_from_connector(connector) if host_name is None: msg = (_('terminate_connection: Failed to get host name from' ' connector.')) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: host_name = None # Unmap volumes, if hostname is None, need to get value from vdiskmap host_name = self._assistant.unmap_vol_from_host(vol_name, host_name) # Host_name could be none if host_name: resp = self._assistant.check_host_mapped_vols(host_name) if not len(resp): self._assistant.delete_host(host_name) return info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/inspur/instorage/replication.py0000664000175000017500000002471100000000000025523 0ustar00zuulzuul00000000000000# Copyright 2017 Inspur Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import random from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder import ssh_utils from cinder import utils as cinder_utils from cinder.volume.drivers.inspur.instorage import instorage_const from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class InStorageMCSReplicationManager(object): def __init__(self, driver, replication_target=None, target_assistant=None): self.sshpool = None self.driver = driver self.target = replication_target self.target_assistant = target_assistant(self._run_ssh) self._local_assistant = self.driver._local_backend_assistant self.async_m = InStorageMCSReplicationAsyncCopy( self.driver, replication_target, self.target_assistant) self.sync_m = InStorageMCSReplicationSyncCopy( self.driver, replication_target, self.target_assistant) def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): cinder_utils.check_ssh_injection(cmd_list) command = ' '. join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool( self.target.get('san_ip'), self.target.get('san_ssh_port', 22), self.target.get('ssh_conn_timeout', 30), self.target.get('san_login'), password=self.target.get('san_password'), privatekey=self.target.get('san_private_key', ''), min_size=self.target.get('ssh_min_pool_conn', 1), max_size=self.target.get('ssh_max_pool_conn', 5),) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.error(e) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error running SSH command: %s", command) def get_target_assistant(self): return self.target_assistant def get_replica_obj(self, rep_type): if rep_type == instorage_const.ASYNC: return self.async_m elif rep_type == instorage_const.SYNC: return self.sync_m else: return None def _partnership_validate_create(self, client, remote_name, remote_ip): try: partnership_info = client.get_partnership_info(remote_name) if not partnership_info: candidate_info = client.get_partnershipcandidate_info( remote_name) if candidate_info: client.mkfcpartnership(remote_name) else: client.mkippartnership(remote_ip) partnership_info = client.get_partnership_info(remote_name) if partnership_info['partnership'] != 'fully_configured': client.chpartnership(partnership_info['id']) except Exception: msg = (_('Unable to establish the partnership with ' 'the InStorage cluster %s.') % remote_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def establish_target_partnership(self): local_system_info = self._local_assistant.get_system_info() target_system_info = self.target_assistant.get_system_info() local_system_name = local_system_info['system_name'] target_system_name = target_system_info['system_name'] local_ip = self.driver.configuration.safe_get('san_ip') target_ip = self.target.get('san_ip') # Establish partnership only when the local system and the replication # target system is different. if target_system_name != local_system_name: self._partnership_validate_create(self._local_assistant, target_system_name, target_ip) self._partnership_validate_create(self.target_assistant, local_system_name, local_ip) class InStorageMCSReplication(object): def __init__(self, asynccopy, driver, replication_target=None, target_assistant=None): self.asynccopy = asynccopy self.driver = driver self.target = replication_target or {} self.target_assistant = target_assistant @volume_utils.trace def volume_replication_setup(self, context, vref): target_vol_name = instorage_const.REPLICA_AUX_VOL_PREFIX + vref.name try: attr = self.target_assistant.get_vdisk_attributes(target_vol_name) if not attr: opts = self.driver._get_vdisk_params(vref.volume_type_id) pool = self.target.get('pool_name') src_attr = self.driver._assistant.get_vdisk_attributes( vref.name) opts['iogrp'] = src_attr['IO_group_id'] self.target_assistant.create_vdisk(target_vol_name, str(vref['size']), 'gb', pool, opts) system_info = self.target_assistant.get_system_info() self.driver._assistant.create_relationship( vref.name, target_vol_name, system_info.get('system_name'), self.asynccopy) except Exception as e: msg = (_("Unable to set up copy mode replication for %(vol)s. " "Exception: %(err)s.") % {'vol': vref.id, 'err': e}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) @volume_utils.trace def failover_volume_host(self, context, vref): target_vol = instorage_const.REPLICA_AUX_VOL_PREFIX + vref.name try: rel_info = self.target_assistant.get_relationship_info(target_vol) # Reverse the role of the primary and secondary volumes self.target_assistant.switch_relationship(rel_info['name']) return {'replication_status': fields.ReplicationStatus.FAILED_OVER} except Exception: LOG.exception('Unable to fail-over the volume %(id)s to the ' 'secondary back-end by switchrcrelationship ' 'command.', {"id": vref.id}) # If the switch command fail, try to make the aux volume # writeable again. try: self.target_assistant.stop_relationship(target_vol, access=True) return { 'replication_status': fields.ReplicationStatus.FAILED_OVER} except Exception as e: msg = (_('Unable to fail-over the volume %(id)s to the ' 'secondary back-end, error: %(error)s') % {"id": vref.id, "error": str(e)}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) def replication_failback(self, volume): tgt_volume = instorage_const.REPLICA_AUX_VOL_PREFIX + volume.name rel_info = self.target_assistant.get_relationship_info(tgt_volume) if rel_info: try: self.target_assistant.switch_relationship(rel_info['name'], aux=False) return {'replication_status': fields.ReplicationStatus.ENABLED, 'status': 'available'} except Exception as e: msg = (_('Unable to fail-back the volume:%(vol)s to the ' 'master back-end, error:%(error)s') % {"vol": volume.name, "error": str(e)}) LOG.error(msg) raise exception.VolumeDriverException(message=msg) class InStorageMCSReplicationAsyncCopy(InStorageMCSReplication): """Support for InStorage/MCS async copy mode replication. Async Copy establishes a Async Copy relationship between two volumes of equal size. The volumes in a Async Copy relationship are referred to as the master (source) volume and the auxiliary (target) volume. This mode is dedicated to the asynchronous volume replication. """ def __init__(self, driver, replication_target=None, target_assistant=None): super(InStorageMCSReplicationAsyncCopy, self).__init__( True, driver, replication_target, target_assistant) class InStorageMCSReplicationSyncCopy(InStorageMCSReplication): """Support for InStorage/MCS sync copy mode replication. Sync Copy establishes a Sync Copy relationship between two volumes of equal size. The volumes in a Sync Copy relationship are referred to as the master (source) volume and the auxiliary (target) volume. """ def __init__(self, driver, replication_target=None, target_assistant=None): super(InStorageMCSReplicationSyncCopy, self).__init__( False, driver, replication_target, target_assistant) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.375121 cinder-27.0.0/cinder/volume/drivers/kaminario/0000775000175000017500000000000000000000000021272 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/kaminario/__init__.py0000664000175000017500000000000000000000000023371 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/kaminario/kaminario_common.py0000664000175000017500000014521600000000000025177 0ustar00zuulzuul00000000000000# Copyright (c) 2016 by Kaminario Technologies, Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Kaminario K2 all-flash arrays.""" import math import re import threading import time import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import units from oslo_utils import versionutils import requests import cinder from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume import configuration from cinder.volume.drivers.san import san from cinder.volume import volume_utils krest = importutils.try_import("krest") K2_MIN_VERSION = '2.2.0' K2_LOCK_PREFIX = 'Kaminario' MAX_K2_RETRY = 5 K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER LOG = logging.getLogger(__name__) kaminario_opts = [ cfg.BoolOpt('auto_calc_max_oversubscription_ratio', default=False, help="K2 driver will calculate max_oversubscription_ratio " "on setting this option as True."), cfg.BoolOpt('disable_discovery', default=False, help="Disabling iSCSI discovery (sendtargets) for multipath " "connections on K2 driver."), ] CONF = cfg.CONF CONF.register_opts(kaminario_opts, group=configuration.SHARED_CONF_GROUP) K2HTTPError = requests.exceptions.HTTPError K2_RETRY_ERRORS = ("MC_ERR_BUSY", "MC_ERR_BUSY_SPECIFIC", "MC_ERR_INPROGRESS", "MC_ERR_START_TIMEOUT") class KaminarioCinderDriverException(exception.VolumeDriverException): message = _("KaminarioCinderDriver failure: %(reason)s") class KaminarioRetryableException(exception.VolumeDriverException): message = _("Kaminario retryable exception: %(reason)s") if krest: class KrestWrap(krest.EndPoint): def __init__(self, *args, **kwargs): self.krestlock = threading.Lock() super(KrestWrap, self).__init__(*args, **kwargs) def _should_retry(self, err_code, err_msg): if err_code == 400: for er in K2_RETRY_ERRORS: if er in err_msg: LOG.debug("Retry ERROR: %d with status %s", err_code, err_msg) return True return False @utils.retry(KaminarioRetryableException, retries=MAX_K2_RETRY) def _request(self, method, *args, **kwargs): try: self.krestlock.acquire() return super(KrestWrap, self)._request(method, *args, **kwargs) except K2HTTPError as err: err_code = err.response.status_code err_msg = err.response.text if self._should_retry(err_code, err_msg): raise KaminarioRetryableException( reason=str(err_msg)) raise finally: self.krestlock.release() class Replication(object): def __init__(self, config, *args, **kwargs): self.backend_id = config.get('backend_id') self.login = config.get('login') self.password = config.get('password') self.rpo = config.get('rpo') class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver): VENDOR = "Kaminario" stats = {} def __init__(self, *args, **kwargs): super(KaminarioCinderDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(kaminario_opts) self.replica = None self._protocol = None k2_lock_sfx = self.configuration.safe_get('san_ip') self.k2_lock_name = "%s-%s" % (K2_LOCK_PREFIX, k2_lock_sfx) @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'replication_device', 'volume_dd_blocksize', 'unique_fqdn_network') return kaminario_opts + additional_opts @volume_utils.trace def check_for_setup_error(self): if krest is None: msg = _("Unable to import 'krest' python module.") LOG.error(msg) raise KaminarioCinderDriverException(reason=msg) else: conf = self.configuration self.client = KrestWrap(conf.san_ip, conf.san_login, conf.san_password, ssl_validate=False) if self.replica: self.target = KrestWrap(self.replica.backend_id, self.replica.login, self.replica.password, ssl_validate=False) v_rs = self.client.search("system/state") if hasattr(v_rs, 'hits') and v_rs.total != 0: ver = v_rs.hits[0].rest_api_version ver_exist = versionutils.convert_version_to_int(ver) ver_min = versionutils.convert_version_to_int(K2_MIN_VERSION) if ver_exist < ver_min: msg = _("K2 REST API version should be " ">= %s.") % K2_MIN_VERSION LOG.error(msg) raise KaminarioCinderDriverException(reason=msg) else: msg = _("K2 REST API version search failed.") LOG.error(msg) raise KaminarioCinderDriverException(reason=msg) def _check_ops(self): """Ensure that the options we care about are set.""" required_ops = ['san_ip', 'san_login', 'san_password'] for attr in required_ops: if not getattr(self.configuration, attr, None): raise exception.InvalidInput(reason=_('%s is not set.') % attr) replica = self.configuration.safe_get('replication_device') if replica and isinstance(replica, list): replica_ops = ['backend_id', 'login', 'password', 'rpo'] for attr in replica_ops: if attr not in replica[0]: msg = _('replication_device %s is not set.') % attr raise exception.InvalidInput(reason=msg) self.replica = Replication(replica[0]) @volume_utils.trace def do_setup(self, context): super(KaminarioCinderDriver, self).do_setup(context) self._check_ops() @volume_utils.trace def create_volume(self, volume): """Volume creation in K2 needs a volume group. - create a volume group - create a volume in the volume group """ vg_name = self.get_volume_group_name(volume.id) vol_name = self.get_volume_name(volume.id) prov_type = self._get_is_dedup(volume.get('volume_type')) try: LOG.debug("Creating volume group with name: %(name)s, " "quota: unlimited and dedup_support: %(dedup)s", {'name': vg_name, 'dedup': prov_type}) vg = self.client.new("volume_groups", name=vg_name, quota=0, is_dedup=prov_type).save() LOG.debug("Creating volume with name: %(name)s, size: %(size)s " "GB, volume_group: %(vg)s", {'name': vol_name, 'size': volume.size, 'vg': vg_name}) vol = self.client.new("volumes", name=vol_name, size=volume.size * units.Mi, volume_group=vg).save() except Exception as ex: vg_rs = self.client.search("volume_groups", name=vg_name) if vg_rs.total != 0: LOG.debug("Deleting vg: %s for failed volume in K2.", vg_name) vg_rs.hits[0].delete() LOG.exception("Creation of volume %s failed.", vol_name) raise KaminarioCinderDriverException(reason=ex) if self._get_is_replica(volume.volume_type) and self.replica: self._create_volume_replica(volume, vg, vol, self.replica.rpo) @volume_utils.trace def _create_volume_replica(self, volume, vg, vol, rpo): """Volume replica creation in K2 needs session and remote volume. - create a session - create a volume in the volume group """ session_name = self.get_session_name(volume.id) rsession_name = self.get_rep_name(session_name) rvg_name = self.get_rep_name(vg.name) rvol_name = self.get_rep_name(vol.name) k2peer_rs = self.client.search("replication/peer_k2arrays", mgmt_host=self.replica.backend_id) if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0: k2peer = k2peer_rs.hits[0] else: msg = _("Unable to find K2peer in source K2:") LOG.error(msg) raise KaminarioCinderDriverException(reason=msg) try: LOG.debug("Creating source session with name: %(sname)s and " " target session name: %(tname)s", {'sname': session_name, 'tname': rsession_name}) src_ssn = self.client.new("replication/sessions") src_ssn.replication_peer_k2array = k2peer src_ssn.auto_configure_peer_volumes = "False" src_ssn.local_volume_group = vg src_ssn.replication_peer_volume_group_name = rvg_name src_ssn.remote_replication_session_name = rsession_name src_ssn.name = session_name src_ssn.rpo = rpo src_ssn.save() LOG.debug("Creating remote volume with name: %s", rvol_name) self.client.new("replication/peer_volumes", local_volume=vol, name=rvol_name, replication_session=src_ssn).save() src_ssn.state = "in_sync" src_ssn.save() except Exception as ex: LOG.exception("Replication for the volume %s has " "failed.", vol.name) self._delete_by_ref(self.client, "replication/sessions", session_name, 'session') self._delete_by_ref(self.target, "replication/sessions", rsession_name, 'remote session') self._delete_by_ref(self.target, "volumes", rvol_name, 'remote volume') self._delete_by_ref(self.client, "volumes", vol.name, "volume") self._delete_by_ref(self.target, "volume_groups", rvg_name, "remote vg") self._delete_by_ref(self.client, "volume_groups", vg.name, "vg") raise KaminarioCinderDriverException(reason=ex) @volume_utils.trace def _create_failover_volume_replica(self, volume, vg_name, vol_name): """Volume replica creation in K2 needs session and remote volume. - create a session - create a volume in the volume group """ session_name = self.get_session_name(volume.id) rsession_name = self.get_rep_name(session_name) rvg_name = self.get_rep_name(vg_name) rvol_name = self.get_rep_name(vol_name) rvg = self.target.search("volume_groups", name=rvg_name).hits[0] rvol = self.target.search("volumes", name=rvol_name).hits[0] k2peer_rs = self.target.search("replication/peer_k2arrays", mgmt_host=self.configuration.san_ip) if hasattr(k2peer_rs, 'hits') and k2peer_rs.total != 0: k2peer = k2peer_rs.hits[0] else: msg = _("Unable to find K2peer in source K2:") LOG.error(msg) raise KaminarioCinderDriverException(reason=msg) try: LOG.debug("Creating source session with name: %(sname)s and " " target session name: %(tname)s", {'sname': rsession_name, 'tname': session_name}) tgt_ssn = self.target.new("replication/sessions") tgt_ssn.replication_peer_k2array = k2peer tgt_ssn.auto_configure_peer_volumes = "False" tgt_ssn.local_volume_group = rvg tgt_ssn.replication_peer_volume_group_name = vg_name tgt_ssn.remote_replication_session_name = session_name tgt_ssn.name = rsession_name tgt_ssn.rpo = self.replica.rpo tgt_ssn.save() LOG.debug("Creating remote volume with name: %s", rvol_name) self.target.new("replication/peer_volumes", local_volume=rvol, name=vol_name, replication_session=tgt_ssn).save() tgt_ssn.state = "in_sync" tgt_ssn.save() except Exception as ex: LOG.exception("Replication for the volume %s has " "failed.", rvol_name) self._delete_by_ref(self.target, "replication/sessions", rsession_name, 'session') self._delete_by_ref(self.client, "replication/sessions", session_name, 'remote session') self._delete_by_ref(self.client, "volumes", vol_name, "volume") self._delete_by_ref(self.client, "volume_groups", vg_name, "vg") raise KaminarioCinderDriverException(reason=ex) @volume_utils.trace def _delete_by_ref(self, device, url, name, msg): rs = device.search(url, name=name) for result in rs.hits: result.delete() LOG.debug("Deleting %(msg)s: %(name)s", {'msg': msg, 'name': name}) @volume_utils.trace def _failover_volume(self, volume): """Promoting a secondary volume to primary volume.""" session_name = self.get_session_name(volume.id) rsession_name = self.get_rep_name(session_name) tgt_ssn = self.target.search("replication/sessions", name=rsession_name).hits[0] if tgt_ssn.state == 'in_sync': tgt_ssn.state = 'failed_over' tgt_ssn.save() LOG.debug("The target session: %s state is " "changed to failed_over ", rsession_name) @volume_utils.trace def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover to replication target.""" volume_updates = [] back_end_ip = None svc_host = volume_utils.extract_host(self.host, 'backend') service = objects.Service.get_by_args(context, svc_host, 'cinder-volume') if secondary_id and secondary_id != self.replica.backend_id: LOG.error("Kaminario driver received failover_host " "request, But backend is non replicated device") raise exception.UnableToFailOver(reason=_("Failover requested " "on non replicated " "backend.")) if (service.active_backend_id and service.active_backend_id != self.configuration.san_ip): self.snap_updates = [] rep_volumes = [] # update status for non-replicated primary volumes for v in volumes: vol_name = self.get_volume_name(v['id']) vol = self.client.search("volumes", name=vol_name) if v.replication_status != K2_REP_FAILED_OVER and vol.total: status = 'available' if v.volume_attachment: map_rs = self.client.search("mappings", volume=vol.hits[0]) status = 'in-use' if map_rs.total: map_rs.hits[0].delete() volume_updates.append({'volume_id': v['id'], 'updates': {'status': status}}) else: rep_volumes.append(v) # In-sync from secondaray array to primary array for v in rep_volumes: vol_name = self.get_volume_name(v['id']) vol = self.client.search("volumes", name=vol_name) rvol_name = self.get_rep_name(vol_name) rvol = self.target.search("volumes", name=rvol_name) session_name = self.get_session_name(v['id']) rsession_name = self.get_rep_name(session_name) ssn = self.target.search("replication/sessions", name=rsession_name) if ssn.total: tgt_ssn = ssn.hits[0] ssn = self.client.search("replication/sessions", name=session_name) if ssn.total: src_ssn = ssn.hits[0] if (tgt_ssn.state == 'failed_over' and tgt_ssn.current_role == 'target' and vol.total and src_ssn): map_rs = self.client.search("mappings", volume=vol.hits[0]) if map_rs.total: map_rs.hits[0].delete() tgt_ssn.state = 'in_sync' tgt_ssn.save() self._check_for_status(src_ssn, 'in_sync') if (rvol.total and src_ssn.state == 'in_sync' and src_ssn.current_role == 'target'): gen_no = self._create_volume_replica_user_snap(self.target, tgt_ssn) self.snap_updates.append({'tgt_ssn': tgt_ssn, 'gno': gen_no, 'stime': time.time()}) LOG.debug("The target session: %s state is " "changed to in sync", rsession_name) self._is_user_snap_sync_finished() # Delete secondary volume mappings and create snapshot for v in rep_volumes: vol_name = self.get_volume_name(v['id']) vol = self.client.search("volumes", name=vol_name) rvol_name = self.get_rep_name(vol_name) rvol = self.target.search("volumes", name=rvol_name) session_name = self.get_session_name(v['id']) rsession_name = self.get_rep_name(session_name) ssn = self.target.search("replication/sessions", name=rsession_name) if ssn.total: tgt_ssn = ssn.hits[0] ssn = self.client.search("replication/sessions", name=session_name) if ssn.total: src_ssn = ssn.hits[0] if (rvol.total and src_ssn.state == 'in_sync' and src_ssn.current_role == 'target'): map_rs = self.target.search("mappings", volume=rvol.hits[0]) if map_rs.total: map_rs.hits[0].delete() gen_no = self._create_volume_replica_user_snap(self.target, tgt_ssn) self.snap_updates.append({'tgt_ssn': tgt_ssn, 'gno': gen_no, 'stime': time.time()}) self._is_user_snap_sync_finished() # changing source sessions to failed-over for v in rep_volumes: vol_name = self.get_volume_name(v['id']) vol = self.client.search("volumes", name=vol_name) rvol_name = self.get_rep_name(vol_name) rvol = self.target.search("volumes", name=rvol_name) session_name = self.get_session_name(v['id']) rsession_name = self.get_rep_name(session_name) ssn = self.target.search("replication/sessions", name=rsession_name) if ssn.total: tgt_ssn = ssn.hits[0] ssn = self.client.search("replication/sessions", name=session_name) if ssn.total: src_ssn = ssn.hits[0] if (rvol.total and src_ssn.state == 'in_sync' and src_ssn.current_role == 'target'): src_ssn.state = 'failed_over' src_ssn.save() self._check_for_status(tgt_ssn, 'suspended') LOG.debug("The target session: %s state is " "changed to failed over", session_name) src_ssn.state = 'in_sync' src_ssn.save() LOG.debug("The target session: %s state is " "changed to in sync", session_name) rep_status = fields.ReplicationStatus.DISABLED volume_updates.append({'volume_id': v['id'], 'updates': {'replication_status': rep_status}}) back_end_ip = self.configuration.san_ip else: """Failover to replication target.""" for v in volumes: vol_name = self.get_volume_name(v['id']) rv = self.get_rep_name(vol_name) if self.target.search("volumes", name=rv).total: self._failover_volume(v) volume_updates.append( {'volume_id': v['id'], 'updates': {'replication_status': K2_REP_FAILED_OVER}}) else: volume_updates.append({'volume_id': v['id'], 'updates': {'status': 'error', }}) back_end_ip = self.replica.backend_id return back_end_ip, volume_updates, [] @volume_utils.trace def _create_volume_replica_user_snap(self, k2, sess): snap = k2.new("snapshots") snap.is_application_consistent = "False" snap.replication_session = sess snap.save() return snap.generation_number def _is_user_snap_sync_finished(self): # waiting for user snapshot to be synced while len(self.snap_updates) > 0: for update in self.snap_updates: sess = update.get('tgt_ssn') gno = update.get('gno') stime = update.get('stime') sess.refresh() if (sess.generation_number == gno and sess.current_snapshot_progress == 100 and sess.current_snapshot_id is None): if time.time() - stime > 300: gen_no = self._create_volume_replica_user_snap( self.target, sess) self.snap_updates.append({'tgt_ssn': sess, 'gno': gen_no, 'stime': time.time()}) self.snap_updates.remove(update) eventlet.sleep(1) @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot. - search for snapshot and retention_policy - create a view from snapshot and attach view - create a volume and attach volume - copy data from attached view to attached volume - detach volume and view and finally delete view """ snap_name = self.get_snap_name(snapshot.id) view_name = self.get_view_name(volume.id) vol_name = self.get_volume_name(volume.id) cview = src_attach_info = dest_attach_info = None rpolicy = self.get_policy() properties = volume_utils.brick_get_connector_properties( self.configuration.use_multipath_for_image_xfer, self.configuration.enforce_multipath_for_image_xfer) LOG.debug("Searching for snapshot: %s in K2.", snap_name) snap_rs = self.client.search("snapshots", short_name=snap_name) if hasattr(snap_rs, 'hits') and snap_rs.total != 0: snap = snap_rs.hits[0] LOG.debug("Creating a view: %(view)s from snapshot: %(snap)s", {'view': view_name, 'snap': snap_name}) try: cview = self.client.new("snapshots", short_name=view_name, source=snap, retention_policy=rpolicy, is_exposable=True).save() except Exception as ex: LOG.exception("Creating a view: %(view)s from snapshot: " "%(snap)s failed", {"view": view_name, "snap": snap_name}) raise KaminarioCinderDriverException(reason=ex) else: msg = _("Snapshot: %s search failed in K2.") % snap_name LOG.error(msg) raise KaminarioCinderDriverException(reason=msg) try: conn = self.initialize_connection(cview, properties) src_attach_info = self._connect_device(conn) self.create_volume(volume) conn = self.initialize_connection(volume, properties) dest_attach_info = self._connect_device(conn) volume_utils.copy_volume(src_attach_info['device']['path'], dest_attach_info['device']['path'], snapshot.volume.size * units.Ki, self.configuration.volume_dd_blocksize, sparse=True) self._kaminario_disconnect_volume(src_attach_info, dest_attach_info) self.terminate_connection(volume, properties) self.terminate_connection(cview, properties) cview.delete() except Exception as ex: self._kaminario_disconnect_volume(src_attach_info, dest_attach_info) self.terminate_connection(cview, properties) self.terminate_connection(volume, properties) cview.delete() self.delete_volume(volume) LOG.exception("Copy to volume: %(vol)s from view: %(view)s " "failed", {"vol": vol_name, "view": view_name}) raise KaminarioCinderDriverException(reason=ex) @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Create a clone from source volume. - attach source volume - create and attach new volume - copy data from attached source volume to attached new volume - detach both volumes """ clone_name = self.get_volume_name(volume.id) src_name = self.get_volume_name(src_vref.id) src_vol = self.client.search("volumes", name=src_name) src_map = self.client.search("mappings", volume=src_vol) src_attach_info = dest_attach_info = None if src_map.total != 0: msg = _("K2 driver does not support clone of an attached volume. " "To get this done, create a snapshot from the attached " "volume and then create a volume from the snapshot.") LOG.error(msg) raise KaminarioCinderDriverException(reason=msg) try: properties = volume_utils.brick_get_connector_properties( self.configuration.use_multipath_for_image_xfer, self.configuration.enforce_multipath_for_image_xfer) conn = self.initialize_connection(src_vref, properties) src_attach_info = self._connect_device(conn) self.create_volume(volume) conn = self.initialize_connection(volume, properties) dest_attach_info = self._connect_device(conn) volume_utils.copy_volume(src_attach_info['device']['path'], dest_attach_info['device']['path'], src_vref.size * units.Ki, self.configuration.volume_dd_blocksize, sparse=True) self._kaminario_disconnect_volume(src_attach_info, dest_attach_info) self.terminate_connection(volume, properties) self.terminate_connection(src_vref, properties) except Exception as ex: self._kaminario_disconnect_volume(src_attach_info, dest_attach_info) self.terminate_connection(src_vref, properties) self.terminate_connection(volume, properties) self.delete_volume(volume) LOG.exception("Create a clone: %s failed.", clone_name) raise KaminarioCinderDriverException(reason=ex) @volume_utils.trace def delete_volume(self, volume): """Volume in K2 exists in a volume group. - delete the volume - delete the corresponding volume group """ vg_name = self.get_volume_group_name(volume.id) vol_name = self.get_volume_name(volume.id) try: if self._get_is_replica(volume.volume_type) and self.replica: self._delete_volume_replica(volume, vg_name, vol_name) LOG.debug("Searching and deleting volume: %s in K2.", vol_name) vol_rs = self.client.search("volumes", name=vol_name) if vol_rs.total != 0: vol_rs.hits[0].delete() LOG.debug("Searching and deleting vg: %s in K2.", vg_name) vg_rs = self.client.search("volume_groups", name=vg_name) if vg_rs.total != 0: vg_rs.hits[0].delete() except Exception as ex: LOG.exception("Deletion of volume %s failed.", vol_name) raise KaminarioCinderDriverException(reason=ex) @volume_utils.trace def _delete_volume_replica(self, volume, vg_name, vol_name): rvg_name = self.get_rep_name(vg_name) rvol_name = self.get_rep_name(vol_name) session_name = self.get_session_name(volume.id) rsession_name = self.get_rep_name(session_name) src_ssn = self.client.search('replication/sessions', name=session_name).hits[0] tgt_ssn = self.target.search('replication/sessions', name=rsession_name).hits[0] src_ssn.state = 'suspended' src_ssn.save() self._check_for_status(tgt_ssn, 'suspended') src_ssn.state = 'idle' src_ssn.save() self._check_for_status(tgt_ssn, 'idle') tgt_ssn.delete() src_ssn.delete() LOG.debug("Searching and deleting snapshots for volume groups:" "%(vg1)s, %(vg2)s in K2.", {'vg1': vg_name, 'vg2': rvg_name}) vg = self.client.search('volume_groups', name=vg_name).hits rvg = self.target.search('volume_groups', name=rvg_name).hits snaps = self.client.search('snapshots', volume_group=vg).hits for s in snaps: s.delete() rsnaps = self.target.search('snapshots', volume_group=rvg).hits for s in rsnaps: s.delete() self._delete_by_ref(self.target, "volumes", rvol_name, 'remote volume') self._delete_by_ref(self.target, "volume_groups", rvg_name, "remote vg") @volume_utils.trace def _delete_failover_volume_replica(self, volume, vg_name, vol_name): rvg_name = self.get_rep_name(vg_name) rvol_name = self.get_rep_name(vol_name) session_name = self.get_session_name(volume.id) rsession_name = self.get_rep_name(session_name) tgt_ssn = self.target.search('replication/sessions', name=rsession_name).hits[0] tgt_ssn.state = 'idle' tgt_ssn.save() tgt_ssn.delete() LOG.debug("Searching and deleting snapshots for target volume group " "and target volume: %(vol)s, %(vg)s in K2.", {'vol': rvol_name, 'vg': rvg_name}) rvg = self.target.search('volume_groups', name=rvg_name).hits rsnaps = self.target.search('snapshots', volume_group=rvg).hits for s in rsnaps: s.delete() def _check_for_status(self, obj, status): while obj.state != status: obj.refresh() eventlet.sleep(1) @volume_utils.trace def get_volume_stats(self, refresh=False): if refresh: self.update_volume_stats() stats = self.stats stats['storage_protocol'] = self._protocol stats['driver_version'] = self.VERSION stats['vendor_name'] = self.VENDOR backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = (backend_name or self.__class__.__name__) return stats def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass @volume_utils.trace def create_snapshot(self, snapshot): """Create a snapshot from a volume_group.""" vg_name = self.get_volume_group_name(snapshot.volume_id) snap_name = self.get_snap_name(snapshot.id) rpolicy = self.get_policy() try: LOG.debug("Searching volume_group: %s in K2.", vg_name) vg = self.client.search("volume_groups", name=vg_name).hits[0] LOG.debug("Creating a snapshot: %(snap)s from vg: %(vg)s", {'snap': snap_name, 'vg': vg_name}) self.client.new("snapshots", short_name=snap_name, source=vg, retention_policy=rpolicy, is_auto_deleteable=False).save() except Exception as ex: LOG.exception("Creation of snapshot: %s failed.", snap_name) raise KaminarioCinderDriverException(reason=ex) @volume_utils.trace def delete_snapshot(self, snapshot): """Delete a snapshot.""" snap_name = self.get_snap_name(snapshot.id) try: LOG.debug("Searching and deleting snapshot: %s in K2.", snap_name) snap_rs = self.client.search("snapshots", short_name=snap_name) if snap_rs.total != 0: snap_rs.hits[0].delete() except Exception as ex: LOG.exception("Deletion of snapshot: %s failed.", snap_name) raise KaminarioCinderDriverException(reason=ex) @volume_utils.trace def extend_volume(self, volume, new_size): """Extend volume.""" vol_name = self.get_volume_name(volume.id) try: LOG.debug("Searching volume: %s in K2.", vol_name) vol = self.client.search("volumes", name=vol_name).hits[0] vol.size = new_size * units.Mi LOG.debug("Extending volume: %s in K2.", vol_name) vol.save() except Exception as ex: LOG.exception("Extending volume: %s failed.", vol_name) raise KaminarioCinderDriverException(reason=ex) def update_volume_stats(self): conf = self.configuration LOG.debug("Searching system capacity in K2.") cap = self.client.search("system/capacity").hits[0] LOG.debug("Searching total volumes in K2 for updating stats.") total_volumes = self.client.search("volumes").total - 1 provisioned_vol = cap.provisioned_volumes if (conf.auto_calc_max_oversubscription_ratio and cap.provisioned and (cap.total - cap.free) != 0): ratio = provisioned_vol / float(cap.total - cap.free) else: ratio = volume_utils.get_max_over_subscription_ratio( conf.max_over_subscription_ratio, supports_auto=True) self.stats = {'QoS_support': False, 'free_capacity_gb': cap.free / units.Mi, 'total_capacity_gb': cap.total / units.Mi, 'thin_provisioning_support': True, 'sparse_copy_volume': True, 'total_volumes': total_volumes, 'thick_provisioning_support': False, 'provisioned_capacity_gb': provisioned_vol / units.Mi, 'max_over_subscription_ratio': ratio, 'kaminario:thin_prov_type': 'dedup/nodedup', 'replication_enabled': True, 'kaminario:replication': True} def get_initiator_host_name(self, connector): """Return the initiator host name or unique ID. Unique ID when configuration's unique_fqdn_network is false will be the reversed IQN/WWPNS. Valid characters: 0-9, a-z, A-Z, '-', '_' All other characters are replaced with '_'. Total characters in initiator host name: 32 """ name = connector.get('initiator', connector.get('wwnns', [''])[0])[::-1] if self.configuration.unique_fqdn_network: name = connector.get('host', name) return re.sub('[^0-9a-zA-Z-_]', '_', name[:32]) def get_volume_group_name(self, vid): """Return the volume group name.""" return "cvg-{0}".format(vid) def get_volume_name(self, vid): """Return the volume name.""" return "cv-{0}".format(vid) def get_session_name(self, vid): """Return the volume name.""" return "ssn-{0}".format(vid) def get_snap_name(self, sid): """Return the snapshot name.""" return "cs-{0}".format(sid) def get_view_name(self, vid): """Return the view name.""" return "cview-{0}".format(vid) def get_rep_name(self, name): """Return the corresponding replication names.""" return "r{0}".format(name) @volume_utils.trace def _delete_host_by_name(self, name): """Deleting host by name.""" host_rs = self.client.search("hosts", name=name) if hasattr(host_rs, "hits") and host_rs.total != 0: host = host_rs.hits[0] host.delete() def get_policy(self): """Return the retention policy.""" try: LOG.debug("Searching for retention_policy in K2.") return self.client.search("retention_policies", name="Best_Effort_Retention").hits[0] except Exception as ex: LOG.exception("Retention policy search failed in K2.") raise KaminarioCinderDriverException(reason=ex) def _get_volume_object(self, volume): vol_name = self.get_volume_name(volume.id) if volume.replication_status == K2_REP_FAILED_OVER: vol_name = self.get_rep_name(vol_name) LOG.debug("Searching volume : %s in K2.", vol_name) vol_rs = self.client.search("volumes", name=vol_name) if not hasattr(vol_rs, 'hits') or vol_rs.total == 0: msg = _("Unable to find volume: %s from K2.") % vol_name LOG.error(msg) raise KaminarioCinderDriverException(reason=msg) return vol_rs.hits[0] def _get_lun_number(self, vol, host): volsnap = None LOG.debug("Searching volsnaps in K2.") volsnap_rs = self.client.search("volsnaps", snapshot=vol) if hasattr(volsnap_rs, 'hits') and volsnap_rs.total != 0: volsnap = volsnap_rs.hits[0] LOG.debug("Searching mapping of volsnap in K2.") map_rs = self.client.search("mappings", volume=volsnap, host=host) return map_rs.hits[0].lun def initialize_connection(self, volume, connector): pass @volume_utils.trace def terminate_connection(self, volume, connector): """Terminate connection of volume from host.""" # Get volume object if type(volume).__name__ != 'RestObject': vol_name = self.get_volume_name(volume.id) if volume.replication_status == K2_REP_FAILED_OVER: vol_name = self.get_rep_name(vol_name) LOG.debug("Searching volume: %s in K2.", vol_name) volume_rs = self.client.search("volumes", name=vol_name) if hasattr(volume_rs, "hits") and volume_rs.total != 0: volume = volume_rs.hits[0] else: vol_name = volume.name host_name = "" if connector is None: vol_map_rs = self.client.search("mappings", {"volume": volume}) if hasattr(vol_map_rs, "hits") and vol_map_rs.total != 0: host_name = vol_map_rs.hits[0].host.name else: # Get host object. host_name = self.get_initiator_host_name(connector) host_rs = self.client.search("hosts", name=host_name) if hasattr(host_rs, "hits") and host_rs.total != 0 and volume: host = host_rs.hits[0] LOG.debug("Searching and deleting mapping of volume: %(name)s to " "host: %(host)s", {'host': host_name, 'name': vol_name}) map_rs = self.client.search("mappings", volume=volume, host=host) if hasattr(map_rs, "hits") and map_rs.total != 0: map_rs.hits[0].delete() if self.client.search("mappings", host=host).total == 0: LOG.debug("Deleting initiator hostname: %s in K2.", host_name) host.delete() else: LOG.warning("Host: %s not found on K2.", host_name) @volume_utils.trace def k2_initialize_connection(self, volume, connector): # Get volume object. if type(volume).__name__ != 'RestObject': vol = self._get_volume_object(volume) else: vol = volume # Get host object. host, host_rs, host_name = self._get_host_object(connector) try: # Map volume object to host object. LOG.debug("Mapping volume: %(vol)s to host: %(host)s", {'host': host_name, 'vol': vol.name}) mapping = self.client.new("mappings", volume=vol, host=host).save() except Exception as ex: if host_rs.total == 0: self._delete_host_by_name(host_name) LOG.exception("Unable to map volume: %(vol)s to host: " "%(host)s", {'host': host_name, 'vol': vol.name}) raise KaminarioCinderDriverException(reason=ex) # Get lun number. if type(volume).__name__ == 'RestObject': return self._get_lun_number(vol, host) else: return mapping.lun def _get_host_object(self, connector): pass def _get_is_dedup(self, vol_type): if vol_type: specs_val = vol_type.get('extra_specs', {}).get( 'kaminario:thin_prov_type') if specs_val == 'nodedup': return False else: return True else: return True def _get_is_replica(self, vol_type): replica = False if vol_type and vol_type.get('extra_specs'): specs = vol_type.get('extra_specs') if (specs.get('kaminario:replication') == 'enabled' and self.replica): replica = True return replica def _get_replica_status(self, vg_name): vg_rs = self.client.search("volume_groups", name=vg_name) if vg_rs.total: vg = vg_rs.hits[0] if self.client.search("replication/sessions", local_volume_group=vg).total: return True return False @volume_utils.trace def manage_existing(self, volume, existing_ref): vol_name = existing_ref['source-name'] new_name = self.get_volume_name(volume.id) vg_new_name = self.get_volume_group_name(volume.id) vg_name = None is_dedup = self._get_is_dedup(volume.get('volume_type')) reason = None try: LOG.debug("Searching volume: %s in K2.", vol_name) vol = self.client.search("volumes", name=vol_name).hits[0] vg = vol.volume_group nvol = self.client.search("volumes", volume_group=vg).total vg_replica = self._get_replica_status(vg.name) vol_map = False if self.client.search("mappings", volume=vol).total != 0: vol_map = True if is_dedup != vg.is_dedup: reason = 'dedup type mismatch for K2 volume group.' elif vg_replica: reason = 'replication enabled K2 volume group.' elif vol_map: reason = 'attached K2 volume.' elif nvol != 1: reason = 'multiple volumes in K2 volume group.' if reason: raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_('Unable to manage K2 volume due to: %s') % reason) vol.name = new_name vg_name = vg.name LOG.debug("Manage new volume name: %s", new_name) vg.name = vg_new_name LOG.debug("Manage volume group name: %s", vg_new_name) vg.save() LOG.debug("Manage volume: %s in K2.", vol_name) vol.save() except exception.ManageExistingInvalidReference: LOG.exception("manage volume: %s failed.", vol_name) raise except Exception: LOG.exception("manage volume: %s failed.", vol_name) vg_rs = self.client.search("volume_groups", name=vg_new_name) if hasattr(vg_rs, 'hits') and vg_rs.total != 0: vg = vg_rs.hits[0] if vg_name and vg.name == vg_new_name: vg.name = vg_name LOG.debug("Updating vg new name to old name: %s ", vg_name) vg.save() raise @volume_utils.trace def manage_existing_get_size(self, volume, existing_ref): vol_name = existing_ref['source-name'] v_rs = self.client.search("volumes", name=vol_name) if hasattr(v_rs, 'hits') and v_rs.total != 0: vol = v_rs.hits[0] size = vol.size / units.Mi return math.ceil(size) else: raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_('Unable to get size of manage volume.')) @volume_utils.trace def after_volume_copy(self, ctxt, volume, new_volume, remote=None): self.delete_volume(volume) vg_name_old = self.get_volume_group_name(volume.id) vol_name_old = self.get_volume_name(volume.id) vg_name_new = self.get_volume_group_name(new_volume.id) vol_name_new = self.get_volume_name(new_volume.id) vg_new = self.client.search("volume_groups", name=vg_name_new).hits[0] vg_new.name = vg_name_old vg_new.save() vol_new = self.client.search("volumes", name=vol_name_new).hits[0] vol_new.name = vol_name_old vol_new.save() @volume_utils.trace def retype(self, ctxt, volume, new_type, diff, host): old_type = volume.get('volume_type') vg_name = self.get_volume_group_name(volume.id) vol_name = self.get_volume_name(volume.id) vol_rs = self.client.search("volumes", name=vol_name) if vol_rs.total: vol = vol_rs.hits[0] vmap = self.client.search("mappings", volume=vol).total old_rep_type = self._get_replica_status(vg_name) new_rep_type = self._get_is_replica(new_type) new_prov_type = self._get_is_dedup(new_type) old_prov_type = self._get_is_dedup(old_type) # Change dedup<->nodedup with add/remove replication is complex in K2 # since K2 does not have api to change dedup<->nodedup. if new_prov_type == old_prov_type: if not old_rep_type and new_rep_type: self._add_replication(volume) return True elif old_rep_type and not new_rep_type: self._delete_replication(volume) return True elif not new_rep_type and not old_rep_type: msg = ("Use '--migration-policy on-demand' to change 'dedup " "without replication'<->'nodedup without replication'.") if vol_rs.total and vmap: msg = "Unattach volume and {0}".format(msg) LOG.debug(msg) return False else: LOG.error('Change from type1: %(type1)s to type2: %(type2)s ' 'is not supported directly in K2.', {'type1': old_type, 'type2': new_type}) return False def _add_replication(self, volume): vg_name = self.get_volume_group_name(volume.id) vol_name = self.get_volume_name(volume.id) if volume.replication_status == K2_REP_FAILED_OVER: self._create_failover_volume_replica(volume, vg_name, vol_name) else: LOG.debug("Searching volume group with name: %(name)s", {'name': vg_name}) vg = self.client.search("volume_groups", name=vg_name).hits[0] LOG.debug("Searching volume with name: %(name)s", {'name': vol_name}) vol = self.client.search("volumes", name=vol_name).hits[0] self._create_volume_replica(volume, vg, vol, self.replica.rpo) def _delete_replication(self, volume): vg_name = self.get_volume_group_name(volume.id) vol_name = self.get_volume_name(volume.id) if volume.replication_status == K2_REP_FAILED_OVER: self._delete_failover_volume_replica(volume, vg_name, vol_name) else: self._delete_volume_replica(volume, vg_name, vol_name) def _kaminario_disconnect_volume(self, *attach_info): for info in attach_info: if (info and info.get('connector') and info.get('conn', {}).get('data') and info.get('device')): info['connector'].disconnect_volume(info['conn']['data'], info['device']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/kaminario/kaminario_fc.py0000664000175000017500000002227400000000000024275 0ustar00zuulzuul00000000000000# Copyright (c) 2016 by Kaminario Technologies, Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Kaminario K2 all-flash arrays.""" from oslo_log import log as logging from cinder.common import constants from cinder import coordination from cinder.i18n import _ from cinder.objects import fields from cinder.volume.drivers.kaminario import kaminario_common as common from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER LOG = logging.getLogger(__name__) class KaminarioFCDriver(common.KaminarioCinderDriver): """Kaminario K2 FC Volume Driver. Version history: 1.0 - Initial driver 1.1 - Added manage/unmanage and extra-specs support for nodedup 1.2 - Added replication support 1.3 - Added retype support 1.4 - Added replication failback support """ VERSION = '1.4' # ThirdPartySystems wiki page name CI_WIKI_NAME = "Kaminario_K2_CI" @volume_utils.trace def __init__(self, *args, **kwargs): super(KaminarioFCDriver, self).__init__(*args, **kwargs) self._protocol = constants.FC self.lookup_service = fczm_utils.create_lookup_service() @volume_utils.trace @coordination.synchronized('{self.k2_lock_name}') def initialize_connection(self, volume, connector): """Attach K2 volume to host.""" # Check wwpns in host connector. if not connector.get('wwpns'): msg = _("No wwpns found in host connector.") LOG.error(msg) raise common.KaminarioCinderDriverException(reason=msg) # To support replication failback temp_client = None if (hasattr(volume, 'replication_status') and volume.replication_status == K2_REP_FAILED_OVER): temp_client = self.client self.client = self.target # Get target wwpns. target_wwpns = self.get_target_info(volume) # Map volume. lun = self.k2_initialize_connection(volume, connector) # Create initiator-target mapping. target_wwpns, init_target_map = self._build_initiator_target_map( connector, target_wwpns) # To support replication failback if temp_client: self.client = temp_client # Return target volume information. conn_info = {'driver_volume_type': 'fibre_channel', 'data': {"target_discovered": True, "target_lun": lun, "target_wwn": target_wwpns, "initiator_target_map": init_target_map}} fczm_utils.add_fc_zone(conn_info) return conn_info def get_hostname_initiator_pwwn(self, volume): init_host = None init_host_name = "" init_pwwn = [] vol_map_rs = self.client.search("mappings", {"volume": volume}) if hasattr(vol_map_rs, "hits") and vol_map_rs.total != 0: init_host = vol_map_rs.hits[0].host init_host_name = init_host.name if init_host is not None: host_fc_ports = self.client.search("host_fc_ports", host=init_host) if hasattr(host_fc_ports, "hits") and host_fc_ports.total != 0: for port in host_fc_ports.hits: if port.pwwn: init_pwwn.append((port.pwwn).replace(':', '')) return init_host_name, init_pwwn @volume_utils.trace @coordination.synchronized('{self.k2_lock_name}') def terminate_connection(self, volume, connector, **kwargs): if connector is None: host_name, init_pwwn = self.get_hostname_initiator_pwwn(volume) else: host_name = self.get_initiator_host_name(connector) # To support replication failback temp_client = None if (hasattr(volume, 'replication_status') and volume.replication_status == K2_REP_FAILED_OVER): temp_client = self.client self.client = self.target super(KaminarioFCDriver, self).terminate_connection(volume, connector) properties = {"driver_volume_type": "fibre_channel", "data": {}} host_rs = self.client.search("hosts", name=host_name) # In terminate_connection, host_entry is deleted if host # is not attached to any volume if host_rs.total == 0: # Get target wwpns. target_wwpns = self.get_target_info(volume) if connector is None: connector = {'wwpns': init_pwwn} target_wwpns, init_target_map = self._build_initiator_target_map( connector, target_wwpns) properties["data"] = {"target_wwn": target_wwpns, "initiator_target_map": init_target_map} fczm_utils.remove_fc_zone(properties) # To support replication failback if temp_client: self.client = temp_client return properties def get_target_info(self, volume): LOG.debug("Searching target wwpns in K2.") fc_ports_rs = self.client.search("system/fc_ports") target_wwpns = [] if hasattr(fc_ports_rs, 'hits') and fc_ports_rs.total != 0: for port in fc_ports_rs.hits: if port.pwwn: target_wwpns.append((port.pwwn).replace(':', '')) if not target_wwpns: msg = _("Unable to get FC target wwpns from K2.") LOG.error(msg) raise common.KaminarioCinderDriverException(reason=msg) return target_wwpns @volume_utils.trace def _get_host_object(self, connector): host_name = self.get_initiator_host_name(connector) LOG.debug("Searching initiator hostname: %s in K2.", host_name) host_rs = self.client.search("hosts", name=host_name) host_wwpns = connector['wwpns'] if host_rs.total == 0: try: LOG.debug("Creating initiator hostname: %s in K2.", host_name) host = self.client.new("hosts", name=host_name, type="Linux").save() except Exception as ex: LOG.exception("Unable to create host : %s in K2.", host_name) raise common.KaminarioCinderDriverException(reason=ex) else: # Use existing host. LOG.debug("Use existing initiator hostname: %s in K2.", host_name) host = host_rs.hits[0] # Adding host wwpn. for wwpn in host_wwpns: wwpn = ":".join([wwpn[i:i + 2] for i in range(0, len(wwpn), 2)]) if self.client.search("host_fc_ports", pwwn=wwpn, host=host).total == 0: LOG.debug("Adding wwpn: %(wwpn)s to host: " "%(host)s in K2.", {'wwpn': wwpn, 'host': host_name}) try: self.client.new("host_fc_ports", pwwn=wwpn, host=host).save() except Exception as ex: if host_rs.total == 0: self._delete_host_by_name(host_name) LOG.exception("Unable to add wwpn : %(wwpn)s to " "host: %(host)s in K2.", {'wwpn': wwpn, 'host': host_name}) raise common.KaminarioCinderDriverException(reason=ex) return host, host_rs, host_name def _build_initiator_target_map(self, connector, all_target_wwns): """Build the target_wwns and the initiator target map.""" target_wwns = [] init_targ_map = {} if self.lookup_service is not None: # use FC san lookup. dev_map = self.lookup_service.get_device_mapping_from_network( connector.get('wwpns'), all_target_wwns) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) target_wwns = list(set(target_wwns)) else: initiator_wwns = connector.get('wwpns', []) target_wwns = all_target_wwns for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return target_wwns, init_targ_map ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/kaminario/kaminario_iscsi.py0000664000175000017500000001417200000000000025015 0ustar00zuulzuul00000000000000# Copyright (c) 2016 by Kaminario Technologies, Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Kaminario K2 all-flash arrays.""" from oslo_log import log as logging from cinder.common import constants from cinder import coordination from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder.volume.drivers.kaminario import kaminario_common as common from cinder.volume import volume_utils ISCSI_TCP_PORT = "3260" K2_REP_FAILED_OVER = fields.ReplicationStatus.FAILED_OVER LOG = logging.getLogger(__name__) @interface.volumedriver class KaminarioISCSIDriver(common.KaminarioCinderDriver): """Kaminario K2 iSCSI Volume Driver. .. code-block:: none Version history: 1.0 - Initial driver 1.1 - Added manage/unmanage and extra-specs support for nodedup 1.2 - Added replication support 1.3 - Added retype support 1.4 - Added replication failback support """ VERSION = '1.4' # ThirdPartySystems wiki page name CI_WIKI_NAME = "Kaminario_K2_CI" @volume_utils.trace def __init__(self, *args, **kwargs): super(KaminarioISCSIDriver, self).__init__(*args, **kwargs) self._protocol = constants.ISCSI @volume_utils.trace @coordination.synchronized('{self.k2_lock_name}') def initialize_connection(self, volume, connector): """Attach K2 volume to host.""" # To support replication failback temp_client = None if (hasattr(volume, 'replication_status') and volume.replication_status == K2_REP_FAILED_OVER): temp_client = self.client self.client = self.target # Get target_portal and target iqn. iscsi_portals, target_iqns = self.get_target_info(volume) # Map volume. lun = self.k2_initialize_connection(volume, connector) # To support replication failback if temp_client: self.client = temp_client # Return target volume information. result = {"driver_volume_type": "iscsi", "data": {"target_iqn": target_iqns[0], "target_portal": iscsi_portals[0], "target_lun": lun, "target_discovered": True}} if self.configuration.disable_discovery and connector.get('multipath'): result['data'].update(target_iqns=target_iqns, target_portals=iscsi_portals, target_luns=[lun] * len(target_iqns)) return result @volume_utils.trace @coordination.synchronized('{self.k2_lock_name}') def terminate_connection(self, volume, connector, **kwargs): # To support replication failback temp_client = None if (hasattr(volume, 'replication_status') and volume.replication_status == K2_REP_FAILED_OVER): temp_client = self.client self.client = self.target super(KaminarioISCSIDriver, self).terminate_connection(volume, connector) # To support replication failback if temp_client: self.client = temp_client def get_target_info(self, volume): LOG.debug("Searching first iscsi port ip without wan in K2.") iscsi_ip_rs = self.client.search("system/net_ips") iscsi_portals = target_iqns = None if hasattr(iscsi_ip_rs, 'hits') and iscsi_ip_rs.total != 0: iscsi_portals = ['%s:%s' % (ip.ip_address, ISCSI_TCP_PORT) for ip in iscsi_ip_rs.hits if not ip.wan_port] if not iscsi_portals: msg = _("Unable to get ISCSI IP address from K2.") LOG.error(msg) raise common.KaminarioCinderDriverException(reason=msg) LOG.debug("Searching system state for target iqn in K2.") sys_state_rs = self.client.search("system/state") if hasattr(sys_state_rs, 'hits') and sys_state_rs.total != 0: iqn = sys_state_rs.hits[0].iscsi_qualified_target_name target_iqns = [iqn] * len(iscsi_portals) if not target_iqns: msg = _("Unable to get target iqn from K2.") LOG.error(msg) raise common.KaminarioCinderDriverException(reason=msg) return iscsi_portals, target_iqns @volume_utils.trace def _get_host_object(self, connector): host_name = self.get_initiator_host_name(connector) LOG.debug("Searching initiator hostname: %s in K2.", host_name) host_rs = self.client.search("hosts", name=host_name) """Create a host if not exists.""" if host_rs.total == 0: try: LOG.debug("Creating initiator hostname: %s in K2.", host_name) host = self.client.new("hosts", name=host_name, type="Linux").save() LOG.debug("Adding iqn: %(iqn)s to host: %(host)s in K2.", {'iqn': connector['initiator'], 'host': host_name}) iqn = self.client.new("host_iqns", iqn=connector['initiator'], host=host) iqn.save() except Exception as ex: self._delete_host_by_name(host_name) LOG.exception("Unable to create host: %s in K2.", host_name) raise common.KaminarioCinderDriverException(reason=ex) else: LOG.debug("Use existing initiator hostname: %s in K2.", host_name) host = host_rs.hits[0] return host, host_rs, host_name ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.375121 cinder-27.0.0/cinder/volume/drivers/kioxia/0000775000175000017500000000000000000000000020604 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/kioxia/entities.py0000664000175000017500000003041500000000000023005 0ustar00zuulzuul00000000000000# (c) Copyright Kioxia Corporation 2021 All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json class JsonClass(object): def __init__(self): pass def to_json(self): return json.dumps( self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def __str__(self): return ', '.join(['{key}={value}'.format( key=key, value=self.__dict__.get(key)) for key in self.__dict__]) def __getattr__(self, item): return "N/A" def set_items(self, json_object): json_keys = json_object.keys() for key in json_keys: if not isinstance(json_object[key], 'dict'): self.__dict__[key] = json_object[key] class ProvisionerResponse(JsonClass): # # Provisioner response data # def __init__( self, prov_entities, res_id=None, status=None, description=None, path=None): JsonClass.__init__(self) self.prov_entities = prov_entities self.resID = res_id self.status = "Success" if status is None else status self.description = self.status if description is None else description self.path = path def __str__(self): items = "" if self.prov_entities: num_of_entities = len(self.prov_entities) if num_of_entities == 1: items = self.prov_entities[0] else: items = num_of_entities return "(" + str(items) + ", " + str(self.resID) + ", " + \ str(self.status) + ", " + str(self.description) + ")" class ProvisionerInfo(JsonClass): # # Provisioner Info data # def __init__(self, totalFreeSpace, version, syslogsBackend=None): self.totalFreeSpace = totalFreeSpace self.version = version self.syslogsBackend = syslogsBackend class Backend(JsonClass): # # Backend data # def __init__( self, mgmt_ips=None, rack=None, region=None, zone=None, persistentID=None, inUse=None, hostId=None, state=None, totalCapacity=None, availableCapacity=None, lastProbTime=None, probeInterval=None, totalBW=None, availableBW=None, totalIOPS=None, availableIOPS=None): self.mgmtIPs = mgmt_ips self.rack = rack self.region = region self.zone = zone self.persistentID = persistentID self.inUse = inUse self.state = state self.totalCapacity = totalCapacity self.availableCapacity = availableCapacity self.lastProbTime = lastProbTime self.probeInterval = probeInterval self.totalBW = totalBW self.availableBW = availableBW self.totalIOPS = totalIOPS self.availableIOPS = availableIOPS self.hostId = hostId class Replica(JsonClass): # # Backend data # def __init__(self, sameRackAllowed, racks, regions, zones): self.sameRackAllowed = sameRackAllowed self.racks = racks self.regions = regions self.zones = zones class Location(JsonClass): # # Location data # def __init__( self, uuid=None, backend=None, replicaState=None, currentStateTime=None): self.uuid = uuid self.backend = backend self.replicaState = replicaState self.currentStateTime = currentStateTime class VolumeProv(JsonClass): # # Provisioner Volume data # def __init__( self, uuid=None, alias=None, capacity=None, numReplicas=None, maxIOPS=None, desiredIOPS=None, maxBW=None, desiredBW=None, blockSize=None, maxReplicaDownTime=None, snapshotID=None, writable=None, reservedSpace=None, location=None): self.uuid = uuid self.alias = alias self.capacity = capacity self.numReplicas = numReplicas self.maxIOPS = maxIOPS self.desiredIOPS = desiredIOPS self.maxBW = maxBW self.desiredBW = desiredBW self.blockSize = blockSize self.maxReplicaDownTime = maxReplicaDownTime self.snapshotID = snapshotID self.writable = writable self.reservedSpacePercentage = reservedSpace self.location = location class StorageClass(JsonClass): # # Provisioner Storage Class # def __init__( self, replicas, racks=None, regions=None, zones=None, blockSize=None, maxIOPSPerGB=None, desiredIOPSPerGB=None, maxBWPerGB=None, desiredBWPerGB=None, sameRackAllowed=None, maxReplicaDownTime=None, hostId=None, spanAllowed=None, name=None, shareSSDBetweenVolumes=None): self.numReplicas = replicas if racks is not None: self.racks = racks if regions is not None: self.regions = regions if zones is not None: self.zones = zones if blockSize is not None: self.blockSize = blockSize if maxIOPSPerGB is not None: self.maxIOPSPerGB = maxIOPSPerGB if desiredIOPSPerGB is not None: self.desiredIOPSPerGB = desiredIOPSPerGB if maxBWPerGB is not None: self.maxBWPerGB = maxBWPerGB if desiredBWPerGB is not None: self.desiredBWPerGB = desiredBWPerGB if sameRackAllowed is not None: self.sameRackAllowed = sameRackAllowed if maxReplicaDownTime is not None: self.maxReplicaDownTime = maxReplicaDownTime if hostId is not None: self.hostId = hostId if spanAllowed is not None: self.allowSpan = spanAllowed if name is not None: self.name = name if shareSSDBetweenVolumes is not None: self.shareSSDBetweenVolumes = shareSSDBetweenVolumes class VolumeCreate(JsonClass): # # Provisioner Volume data for Create operation # def __init__( self, alias, capacity, storage_class, prov_type, reserved_space=None, protocol=None, uuid=None): self.alias = alias self.capacity = capacity self.storageClass = storage_class self.provisioningType = prov_type if reserved_space is not None: self.reservedSpacePercentage = reserved_space if protocol is not None: self.protocol = protocol if uuid is not None: self.uuid = uuid class SyslogEntity(JsonClass): # # Syslog Entity object # def __init__( self, name=None, url=None, state=None, useTls=None, certFileName=None): self.name = name self.url = url self.state = state self.useTls = useTls self.certFileName = certFileName class SnapshotCreate(JsonClass): # # Provisioner Snapshot data for Create operation # def __init__( self, alias, volumeID, reservedSpacePercentage=None, snapshotID=None): self.alias = alias self.volumeID = volumeID if reservedSpacePercentage is not None: self.reservedSpacePercentage = reservedSpacePercentage if snapshotID is not None: self.snapshotID = snapshotID class SnapshotEntity(JsonClass): # # Provisioner Snapshot Entity data for Show operation # def __init__( self, alias=None, snapshotID=None, reservedSpace=None, volumeID=None, capacity=None, timestamp=None): self.alias = alias self.volumeID = volumeID self.reservedSpace = reservedSpace self.snapshotID = snapshotID self.capacity = capacity self.timestamp = timestamp class SnapshotVolumeCreate(JsonClass): # # Provisioner Snapshot Volume data for Create operation # def __init__( self, alias, snapshotID, writable, reservedSpacePercentage=None, volumeID=None, maxIOPSPerGB=None, maxBWPerGB=None, protocol=None, spanAllowed=None, storageClassName=None): self.alias = alias self.snapshotID = snapshotID self.writable = writable if reservedSpacePercentage is not None: self.reservedSpacePercentage = reservedSpacePercentage if volumeID is not None: self.volumeID = volumeID if maxIOPSPerGB is not None: self.maxIOPSPerGB = maxIOPSPerGB if maxBWPerGB is not None: self.maxBWPerGB = maxBWPerGB if protocol is not None: self.protocol = protocol if spanAllowed is not None: self.allowSpan = spanAllowed if storageClassName is not None: self.storageClassName = storageClassName class ForwardEntity(JsonClass): # # Provisioner Forward Entity data # def __init__( self, loggingType, level, host, appName, message, parametersList): self.loggingType = loggingType self.level = level self.host = host self.appName = appName self.message = message self.parametersList = parametersList class LicenseEntity(JsonClass): # # Provisioner License Entity data # def __init__( self, license_type=None, expirationDate=None, maxBackends=None): self.type = license_type self.expirationDate = expirationDate self.maxBackends = maxBackends class HostEntity(JsonClass): # # Provisioner Host Entity data # def __init__( self, nqn=None, uuid=None, name=None, clientType=None, version=None, state=None, lastProbeTime=None, duration=None): self.nqn = nqn self.uuid = uuid self.name = name self.clientType = clientType self.version = version self.state = state self.lastProbeTime = lastProbeTime self.duration = duration class TargetEntity(JsonClass): # # Provisioner Target Entity data for Show operation # def __init__(self, alias=None): self.alias = alias class TenantEntity(JsonClass): # # Provisioner Tenant Entity data for Show operation # def __init__(self, capacity, iops, bw, uuid=None, name=None): self.capacity = capacity self.totalIOPS = iops self.totalBW = bw if uuid is not None: self.tenantId = uuid if name is not None: self.name = name class CloneEntity(JsonClass): # # Provisioner Clone Entity data # def __init__(self, sourceVolumeId, alias, volumeId=None, reservedSpacePercentage=None, capacity=None): self.sourceVolumeId = sourceVolumeId self.alias = alias if volumeId is not None: self.volumeId = volumeId if reservedSpacePercentage is not None: self.reservedSpacePercentage = reservedSpacePercentage if capacity is not None: self.capacity = capacity ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/kioxia/kumoscale.py0000664000175000017500000004636300000000000023155 0ustar00zuulzuul00000000000000# (c) Copyright Kioxia Corporation 2021 All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for KIOXIA KumoScale NVMeOF storage system.""" import hashlib from oslo_config import cfg from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.kioxia import entities from cinder.volume.drivers.kioxia import rest_client LOG = logging.getLogger(__name__) KUMOSCALE_OPTS = [ cfg.StrOpt("kioxia_url", help="KumoScale provisioner REST API URL"), cfg.StrOpt("kioxia_cafile", help="Cert for provisioner REST API SSL"), cfg.StrOpt("kioxia_token", help="KumoScale Provisioner auth token."), cfg.IntOpt( "kioxia_num_replicas", default=1, help="Number of volume replicas."), cfg.IntOpt( "kioxia_max_iops_per_gb", default=0, help="Upper limit for IOPS/GB."), cfg.IntOpt( "kioxia_desired_iops_per_gb", default=0, help="Desired IOPS/GB."), cfg.IntOpt( "kioxia_max_bw_per_gb", default=0, help="Upper limit for bandwidth in B/s per GB."), cfg.IntOpt( "kioxia_desired_bw_per_gb", default=0, help="Desired bandwidth in B/s per GB."), cfg.BoolOpt( "kioxia_same_rack_allowed", default=False, help="Can more than one replica be allocated to same rack."), cfg.IntOpt( "kioxia_block_size", default=4096, help="Volume block size in bytes - 512 or 4096 (Default)."), cfg.BoolOpt( "kioxia_writable", default=False, help="Volumes from snapshot writeable or not."), cfg.StrOpt( "kioxia_provisioning_type", default="THICK", choices=[ ('THICK', 'Thick provisioning'), ('THIN', 'Thin provisioning')], help="Thin or thick volume, Default thick."), cfg.IntOpt( "kioxia_vol_reserved_space_percentage", default=0, help="Thin volume reserved capacity allocation percentage."), cfg.IntOpt( "kioxia_snap_reserved_space_percentage", default=0, help="Percentage of the parent volume to be used for log."), cfg.IntOpt( "kioxia_snap_vol_reserved_space_percentage", default=0, help="Writable snapshot percentage of parent volume used for log."), cfg.IntOpt( "kioxia_max_replica_down_time", default=0, help="Replicated volume max downtime for replica in minutes."), cfg.BoolOpt( "kioxia_span_allowed", default=True, help="Allow span - Default True."), cfg.BoolOpt( "kioxia_snap_vol_span_allowed", default=True, help="Allow span in snapshot volume - Default True.") ] CONF = cfg.CONF CONF.register_opts(KUMOSCALE_OPTS) @interface.volumedriver class KumoScaleBaseVolumeDriver(driver.BaseVD): """Performs volume management on KumoScale Provisioner. Version history: .. code-block:: none 1.0.0 - Initial driver version. """ VERSION = '1.0.0' CI_WIKI_NAME = 'KIOXIA_CI' SUPPORTED_REST_API_VERSIONS = ['1.0', '1.1'] def __init__(self, *args, **kwargs): super(KumoScaleBaseVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(KUMOSCALE_OPTS) self._backend_name = ( self.configuration.volume_backend_name or self.__class__.__name__) self.kumoscale = self._get_kumoscale( self.configuration.safe_get("kioxia_url"), self.configuration.safe_get("kioxia_token"), self.configuration.safe_get("kioxia_cafile")) self.num_replicas = self.configuration.safe_get("kioxia_num_replicas") self.same_rack_allowed = self.configuration.safe_get( "kioxia_same_rack_allowed") self.max_iops_per_gb = self.configuration.safe_get( "kioxia_max_iops_per_gb") self.desired_iops_per_gb = self.configuration.safe_get( "kioxia_desired_iops_per_gb") self.max_bw_per_gb = self.configuration.safe_get( "kioxia_max_bw_per_gb") self.desired_bw_per_gb = self.configuration.safe_get( "kioxia_desired_bw_per_gb") self.block_size = self.configuration.safe_get("kioxia_block_size") self.writable = self.configuration.safe_get("kioxia_writable") self.provisioning_type = self.configuration.safe_get( "kioxia_provisioning_type") self.vol_reserved_space_percentage = self.configuration.safe_get( "kioxia_vol_reserved_space_percentage") self.snap_vol_reserved_space_percentage = self.configuration.safe_get( "kioxia_snap_vol_reserved_space_percentage") self.snap_reserved_space_percentage = self.configuration.safe_get( "kioxia_snap_reserved_space_percentage") self.max_replica_down_time = self.configuration.safe_get( "kioxia_max_replica_down_time") self.span_allowed = self.configuration.safe_get("kioxia_span_allowed") self.snap_vol_span_allowed = self.configuration.safe_get( "kioxia_snap_vol_span_allowed") @staticmethod def get_driver_options(): return KUMOSCALE_OPTS def _get_kumoscale(self, url, token, cert): """Returns an initialized rest client""" url_strs = url.split(":") ip_str = url_strs[1] ip_strs = ip_str.split("//") ip = ip_strs[1] port = url_strs[2] kumoscale = rest_client.KioxiaProvisioner([ip], cert, token, port) return kumoscale def create_volume(self, volume): """Create the volume""" volume_name = volume["name"] volume_uuid = volume["id"] volume_size = volume["size"] zone_list = None if 'availability_zone' not in volume else [ volume['availability_zone']] if self.num_replicas > 1 and len(volume_name) > 27: volume_name = volume_name[:27] # workaround for limitation storage_class = entities.StorageClass( self.num_replicas, None, None, zone_list, self.block_size, self.max_iops_per_gb, self.desired_iops_per_gb, self.max_bw_per_gb, self.desired_bw_per_gb, self.same_rack_allowed, self.max_replica_down_time, None, self.span_allowed) ks_volume = entities.VolumeCreate( volume_name, volume_size, storage_class, self.provisioning_type, self.vol_reserved_space_percentage, 'NVMeoF', volume_uuid) try: result = self.kumoscale.create_volume(ks_volume) except Exception as e: msg = (_("Volume %(volname)s creation exception: %(txt)s") % {'volname': volume_name, 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status != 'Success': raise exception.VolumeBackendAPIException(data=result.description) def delete_volume(self, volume): """Delete the volume""" volume_uuid = volume["id"] try: result = self.kumoscale.delete_volume(volume_uuid) except Exception as e: msg = (_("Volume %(voluuid)s deletion exception: %(txt)s") % {'voluuid': volume_uuid, 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status not in ('Success', 'DeviceNotFound', 'NotExists'): raise exception.VolumeBackendAPIException(data=result.description) def create_snapshot(self, snapshot): snapshot_name = snapshot['name'] snapshot_uuid = snapshot['id'] volume_uuid = snapshot['volume_id'] ks_snapshot = entities.SnapshotCreate( snapshot_name, volume_uuid, self.snap_reserved_space_percentage, snapshot_uuid) try: result = self.kumoscale.create_snapshot(ks_snapshot) except Exception as e: msg = (_("Snapshot %(snapname)s creation exception: %(txt)s") % {'snapname': snapshot_name, 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status != 'Success': raise exception.VolumeBackendAPIException(data=result.description) def delete_snapshot(self, snapshot): snapshot_uuid = snapshot['id'] try: result = self.kumoscale.delete_snapshot(snapshot_uuid) except Exception as e: msg = (_("Snapshot %(snapuuid)s deletion exception: %(txt)s") % {'snapuuid': snapshot_uuid, 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status not in ('Success', 'DeviceNotFound', 'NotExists'): raise exception.VolumeBackendAPIException(data=result.description) def create_volume_from_snapshot(self, volume, snapshot): volume_name = volume["name"] volume_uuid = volume["id"] snapshot_uuid = snapshot["id"] if self.writable: reserved_space_percentage = self.snap_vol_reserved_space_percentage else: reserved_space_percentage = 0 ks_snapshot_volume = entities.SnapshotVolumeCreate( volume_name, snapshot_uuid, self.writable, reserved_space_percentage, volume_uuid, self.max_iops_per_gb, self.max_bw_per_gb, 'NVMeoF', self.snap_vol_span_allowed) try: result = self.kumoscale.create_snapshot_volume(ks_snapshot_volume) except Exception as e: msg = (_("Volume %(volname)s from snapshot exception: %(txt)s") % {'volname': volume_name, 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status != 'Success': raise exception.VolumeBackendAPIException(data=result.description) def initialize_connection(self, volume, connector, initiator_data=None): """Connect the initiator to a volume""" host_uuid = connector['uuid'] ks_volume = None targets = [] volume_replicas = [] volume_uuid = volume['id'] volume_name = volume['name'] try: result = self.kumoscale.host_probe( connector['nqn'], connector['uuid'], KumoScaleBaseVolumeDriver._convert_host_name( connector['host']), 'Agent', 'cinder-driver-0.1', 30) except Exception as e: msg = (_("Host %(uuid)s host_probe exception: %(txt)s") % {'uuid': connector['uuid'], 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status != 'Success': msg = (_("host_probe for %(uuid)s failed with %(txt)s") % {'uuid': connector['uuid'], 'txt': result.description}) raise exception.VolumeBackendAPIException(data=msg) try: result = self.kumoscale.publish(host_uuid, volume_uuid) except Exception as e: msg = (_("Volume %(voluuid)s publish exception: %(txt)s") % {'voluuid': volume_uuid, 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status != "Success" and result.status != 'AlreadyPublished': raise exception.VolumeBackendAPIException(data=result.description) try: result = self.kumoscale.get_volumes_by_uuid(volume_uuid) except Exception as e: msg = (_("Volume %(voluuid)s fetch exception: %(txt)s") % {'voluuid': volume_uuid, 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status == "Success": if len(result.prov_entities) == 0: raise exception.VolumeBackendAPIException( data=_("Volume %s not found") % volume_uuid) else: ks_volume = result.prov_entities[0] else: msg = (_("get_volumes_by_uuid for %(uuid)s failed with %(txt)s") % {'uuid': volume_uuid, 'txt': result.description}) raise exception.VolumeBackendAPIException(data=msg) try: result = self.kumoscale.get_targets(host_uuid, ks_volume.uuid) except Exception as e: msg = (_("Volume %(voluuid)s get targets exception: %(txt)s") % {'voluuid': volume_uuid, 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status == "Success": if len(result.prov_entities) == 0: raise exception.VolumeBackendAPIException( data=_("Volume %s targets not found") % ks_volume.uuid) else: targets = result.prov_entities ks_volume_replicas = ks_volume.location for i in range(len(targets)): persistent_id = str(targets[i].backend.persistentID) try: result = self.kumoscale.get_backend_by_id(persistent_id) except Exception as e: msg = (_("Backend %(backpid)s exception: %(txt)s") % {'backpid': persistent_id, 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status == "Success": if len(result.prov_entities) == 0: raise exception.VolumeBackendAPIException( data=_("Backend %s not found") % persistent_id) else: backend = result.prov_entities[0] else: msg = (_("get_backend_by_id for %(pid)s failed with %(txt)s") % {'pid': persistent_id, 'txt': result.description}) raise exception.VolumeBackendAPIException(data=msg) str_portals = [] for p in range(len(backend.portals)): portal = backend.portals[p] portal_ip = str(portal.ip) portal_port = str(portal.port) portal_transport = str(portal.transport) str_portals.append( (portal_ip, portal_port, portal_transport)) for j in range(len(ks_volume_replicas)): ks_replica = ks_volume_replicas[j] if str(ks_replica.backend.persistentID) == persistent_id: break replica = dict() replica['vol_uuid'] = ks_replica.uuid replica['target_nqn'] = str(targets[i].targetName) replica['portals'] = str_portals volume_replicas.append(replica) if len(volume_replicas) > 1: # workaround for limitation volume_name = volume_name[:27] data = { 'vol_uuid': volume_uuid, 'alias': volume_name, 'writable': ks_volume.writable, 'volume_replicas': volume_replicas, 'replica_count': len(ks_volume_replicas) } if result.status != 'Success': raise exception.VolumeBackendAPIException(data=result.description) return { 'driver_volume_type': 'nvmeof', 'data': data } @staticmethod def _convert_host_name(name): if name is None: return "" if len(name) > 32: name = hashlib.md5(name.encode('utf-8'), usedforsecurity=False).hexdigest() else: name = name.replace('.', '-').lower() return name def terminate_connection(self, volume, connector, **kwargs): """Terminate connection.""" volume_uuid = volume['id'] if connector: host_uuid = connector['uuid'] else: host_uuid = None try: result = self.kumoscale.unpublish(host_uuid, volume_uuid) except Exception as e: msg = (_("Volume %(voluuid)s unpublish exception: %(txt)s") % {'voluuid': volume_uuid, 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status != 'Success' and ( result.status != 'VolumeNotPublished'): raise exception.VolumeBackendAPIException(data=result.description) def _update_volume_stats(self): data = dict( volume_backend_name=self._backend_name, vendor_name='KIOXIA', driver_version=self.VERSION, storage_protocol=constants.NVMEOF_VARIANT_1, ) data['total_capacity_gb'] = 'unknown' data['free_capacity_gb'] = 'unknown' data['consistencygroup_support'] = False data['thin_provisioning_support'] = True data['multiattach'] = False result = None tenants = [] try: result = self.kumoscale.get_tenants() except Exception as e: msg = _("Get tenants exception: %s") % str(e) LOG.exception(msg) if result and result.status == "Success": if len(result.prov_entities) == 0: LOG.error("No kumoscale tenants") else: tenants = result.prov_entities elif result: LOG.error("Get tenants API error: %s", result.description) default_tenant = None for i in range(len(tenants)): if tenants[i].tenantId == "0": default_tenant = tenants[i] break if default_tenant: total_capacity = default_tenant.capacity consumed_capacity = default_tenant.consumedCapacity free_capacity = total_capacity - consumed_capacity data['total_capacity_gb'] = total_capacity data['free_capacity_gb'] = free_capacity self._stats = data def extend_volume(self, volume, new_size): try: result = self.kumoscale.expand_volume( new_size, volume["id"]) except Exception as e: msg = (_("Volume %(volid)s expand exception: %(txt)s") % {'volid': volume["id"], 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status != 'Success': raise exception.VolumeBackendAPIException(data=result.description) def create_cloned_volume(self, volume, src_vref): clone_entity = entities.CloneEntity( src_vref['id'], volume['name'], volumeId=volume['id'], capacity=volume['size']) try: result = self.kumoscale.clone_volume(clone_entity) except Exception as e: msg = (_("Volume %(volid)s clone exception: %(txt)s") % {'volid': volume["id"], 'txt': str(e)}) raise exception.VolumeBackendAPIException(data=msg) if result.status != 'Success': raise exception.VolumeBackendAPIException(data=result.description) def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def check_for_setup_error(self): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/kioxia/rest_client.py0000664000175000017500000010402200000000000023470 0ustar00zuulzuul00000000000000# (c) Copyright Kioxia Corporation 2021 All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import json import os import ssl import urllib3 from cinder.volume.drivers.kioxia import entities urllib3.disable_warnings() RUN_COMMAND_TRIALS = 20 RUN_COMMAND_SLEEP = 0.5 class ClassBuilder(object): def __init__(self, **kwargs): for key, value in kwargs.items(): if value is not None: self.__dict__[key] = value def to_json(self): return json.dumps( self, default=lambda o: o.__dict__, sort_keys=True, indent=4) class JsonToClass(object): def __init__(self, json_object, first=False): if isinstance(json_object, list): self.records = [] for list_index in range(len(json_object)): list_item = JsonToClass(json_object[list_index]) self.records.append(list_item) else: if first: self.records = None self.build_class(json_object) if first: if 'status' not in json_object: self.status = "Success" if 'description' not in json_object: self.description = "Success." pass def __getattr__(self, item): return "N/A" def to_json(self): return json.dumps( self, default=lambda o: o.__dict__, sort_keys=True, indent=4) def __str__(self): return json.dumps(self, default=lambda o: o.__dict__) def is_exist(self, item): if item in self.__dict__.keys() and self.__dict__[item] is not None: return True return False def build_class(self, json_object): json_keys = json_object.keys() for key in json_keys: if isinstance(json_object[key], list): self.__dict__[key] = [] for i in range(len(json_object[key])): if isinstance(json_object[key][i], dict): sub_object = JsonToClass(json_object[key][i]) else: sub_object = json_object[key][i] self.__dict__[key].append(sub_object) continue if not isinstance(json_object[key], dict): self.__dict__[key] = json_object[key] continue self.__dict__[key] = {} sub_object = JsonToClass(json_object[key]) self.__dict__[key] = sub_object class ProvisionerVisitor(object): # # Provisioner Visitor # def __init__(self, http, command_str): self.http = http self.command_str = command_str @abc.abstractmethod def visit(self, url): return class ProvisionerGetVisitor(ProvisionerVisitor): # # Provisioner Get Visitor # def visit(self, url, token=None): r = self.http.request( 'GET', url, headers={ "Authorization": "Bearer " + token}) return r class ProvisionerPostVisitor(ProvisionerVisitor): # # Provisioner Post Visitor # def __init__(self, http, command_str, json_body): ProvisionerVisitor.__init__(self, http, command_str) self.json_body = json_body def visit(self, url, token=None): r = self.http.request( 'POST', url, body=self.json_body, headers={ 'Content-Type': 'application/json', "Authorization": "Bearer " + token}) return r class ProvisionerDeleteVisitor(ProvisionerVisitor): # # Provisioner Delete Visitor # def __init__(self, http, command_str): ProvisionerVisitor.__init__(self, http, command_str) def visit(self, url, token=None): r = self.http.request( 'DELETE', url, body=None, headers={ 'Content-Type': 'application/json', "Authorization": "Bearer " + token}) return r class ProvisionerPatchVisitor(ProvisionerVisitor): # # Provisioner Patch Visitor # def __init__(self, http, command_str, json_body=None): ProvisionerVisitor.__init__(self, http, command_str) self.json_body = json_body def visit(self, url, token=None): r = self.http.request( 'PATCH', url, body=self.json_body, headers={ 'Content-Type': 'application/json', "Authorization": "Bearer " + token}) return r class ProvisionerPutVisitor(ProvisionerVisitor): # # Provisioner Put Visitor # def __init__(self, http, command_str, json_body): ProvisionerVisitor.__init__(self, http, command_str) self.json_body = json_body def visit(self, url, token=None): r = self.http.request( 'PUT', url, body=self.json_body, headers={ 'Content-Type': 'application/json', "Authorization": "Bearer " + token}) return r class ProvisionerPostDataVisitor(ProvisionerVisitor): # # Provisioner Post Data Visitor # def __init__(self, http, command_str, path): ProvisionerVisitor.__init__(self, http, command_str) self.path = path self.timeout = 90 def visit(self, url, token=None): binary_data = open(self.path, 'rb').read() disposition = "inline; filename=" + os.path.basename(self.path) r = self.http.request( 'POST', url, body=binary_data, headers={ 'Content-Type': 'application/x-gtar', 'Content-Disposition': disposition, "Authorization": "Bearer " + token}, timeout=self.timeout) return r class ProvisionerConnector(object): # # Provisioner Connector # def __init__(self, ips, port, visitor): self.visitor = visitor self.ips = ips self.port = port def visit_provisioner(self, token=None): r = None if self.ips: num_of_ips = len(self.ips) if num_of_ips > 0: for i in range(num_of_ips): ip = self.ips[i] url = 'https://' + ip + ':' + \ str(self.port) + '/' + self.visitor.command_str try: if token is None: token = "Unknown" r = self.visitor.visit(url, token) if r: if i != 0: KioxiaProvisioner.switch_path(i) return r except BaseException: continue return r return r return r class KioxiaProvisioner(object): # # REST client class that interacts with a specific Provisioner # :type ips: str array # :param ips: Provisioner management IPs # :type cert: str # :param cert: KumoScale keystore pem file full path # mgmt_ips = [] def __init__(self, ips, cert, token, port=8090): self.mgmt_ips = ips self.port = port self.user = None self.token = token if cert is None: cert = '/etc/kioxia/ssdtoolbox.pem' KioxiaProvisioner.mgmt_ips = ips self.http = urllib3.PoolManager( cert_reqs=ssl.CERT_NONE, cert_file=cert, assert_hostname=False, timeout=urllib3.Timeout( connect=5.0, read=60.0)) def set_token(self, user, token): self.user = user self.token = token def result_support(self, result): if result is not None: if result.data is not None: if "Status 401" in str(result.data): ClassBuilder() return entities.ProvisionerResponse( None, None, "Bad credentials") if "Status 403" in str(result.data): return entities.ProvisionerResponse( None, None, "Access is denied") if str(result.data) == "": return entities.ProvisionerResponse([], None, "Success") try: result_data = json.loads(result.data) if ('status' in result_data and result_data['status'] != "Success"): return entities.ProvisionerResponse( result_data, None, result_data['status'], result_data['description']) return entities.ProvisionerResponse(result_data) except Exception as e: return entities.ProvisionerResponse( None, None, type(e).__name__, e.message) return entities.ProvisionerResponse( None, None, "Provisioner Communication Error", "Provisioner Communication Error") # Call to switch last successful connected ip @staticmethod def switch_path(ip_idx): temp = KioxiaProvisioner.mgmt_ips[0] KioxiaProvisioner.mgmt_ips[0] = KioxiaProvisioner.mgmt_ips[ip_idx] KioxiaProvisioner.mgmt_ips[ip_idx] = temp # Call Provisioner with get request def provisioner_get_request(self, api_name): get_visitor = ProvisionerGetVisitor(self.http, api_name) provisioner_connector = ProvisionerConnector( self.mgmt_ips, self.port, get_visitor) r = provisioner_connector.visit_provisioner(self.token) return self.result_support(r) # Call Provisioner with delete request def provisioner_delete_request(self, api_name): delete_visitor = ProvisionerDeleteVisitor(self.http, api_name) provisioner_connector = ProvisionerConnector( self.mgmt_ips, self.port, delete_visitor) r = provisioner_connector.visit_provisioner(self.token) return self.result_support(r) # Call Provisioner with patch request def provisioner_patch_request(self, api_name, json_body=None): patch_visitor = ProvisionerPatchVisitor(self.http, api_name, json_body) provisioner_connector = ProvisionerConnector( self.mgmt_ips, self.port, patch_visitor) r = provisioner_connector.visit_provisioner(self.token) return self.result_support(r) # Call Provisioner with update request def provisioner_put_request(self, api_name, json_body): put_visitor = ProvisionerPutVisitor(self.http, api_name, json_body) provisioner_connector = ProvisionerConnector( self.mgmt_ips, self.port, put_visitor) r = provisioner_connector.visit_provisioner(self.token) return self.result_support(r) # Call Provisioner with post request def provisioner_post_request(self, api_name, json_body, password=None): post_visitor = ProvisionerPostVisitor(self.http, api_name, json_body) provisioner_connector = ProvisionerConnector( KioxiaProvisioner.mgmt_ips, self.port, post_visitor) r = provisioner_connector.visit_provisioner(self.token) return self.result_support(r) def get_info(self): # Call to Get Info API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain Provisioner information # result_response = self.provisioner_get_request('info') if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity) return result_response def get_provisioner_info(self): # Call to Get Info API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain Provisioner information # result_response = self.provisioner_get_request('info') if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return result_entity return result_response def add_backend(self, backend_entity): # Call to Add Backend API # @rtype: ProvisionerResponse # @returns: Provisioner response data # j = backend_entity.to_json() result_response = self.provisioner_post_request('backends', j) return result_response def update_backend(self, backend_entity, persistent_id): # all to Update Backend API # @rtype: ProvisionerResponse # @returns: Provisioner response data # j = backend_entity.to_json() result_response = self.provisioner_put_request( 'backends/' + persistent_id, j) return result_response def delete_backend(self, persistent_id): # Call to Delete Backend API # @rtype: ProvisionerResponse # @returns: Provisioner response data # result_response = self.provisioner_delete_request( 'backends/' + persistent_id) return result_response def get_backends(self): # Call to List of Backends API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain List of Backends # result_response = self.provisioner_get_request('backends') if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def get_backend_by_id(self, uuid): # Call to List of Backends API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain List of Backends # result_response = self.provisioner_get_request('backends/' + uuid) if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def get_volumes(self, tenant_uuid=None): # Call to List of Volumes API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain List of Volumes # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" result_response = self.provisioner_get_request(tenant_id + 'volumes') if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def get_volumes_by_alias(self, alias, tenant_uuid=None): # Call to List of Volumes API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain List of Volumes # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" result_response = self.provisioner_get_request( tenant_id + 'volumes_by_alias/' + alias) if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def get_volumes_by_uuid( self, volume_uuid, tenant_uuid=None): # Call to List of Volumes API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain List of Volumes # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" result_response = self.provisioner_get_request( tenant_id + 'volumes/' + volume_uuid) if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def add_replica( self, replica_entity, volume_uuid, tenant_uuid=None): # Call to Add Replica API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" j = replica_entity.to_json() result_response = self.provisioner_post_request( tenant_id + 'replica/' + volume_uuid, j) return result_response def delete_replica( self, volume_uuid, replica_uuid, tenant_uuid=None): # Call to Delete Replica API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" result_response = self.provisioner_patch_request( tenant_id + 'replica/' + volume_uuid + "/" + replica_uuid) return result_response def delete_replica_confirm( self, volume_uuid, replica_uuid, tenant_uuid=None): # Call to Delete Replica Confirm API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" result_response = self.provisioner_delete_request( tenant_id + 'replica/' + volume_uuid + "/" + replica_uuid) return result_response def create_volume(self, volume_entity, tenant_uuid=None): # Call to Create Volume API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" j = volume_entity.to_json() result_response = self.provisioner_post_request( tenant_id + 'volumes', j) return result_response def delete_volume(self, volume_uuid, tenant_uuid=None): # Call to Delete Volume API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" result_response = self.provisioner_delete_request( tenant_id + 'volumes/' + volume_uuid) return result_response def expand_volume( self, new_capacity, volume_uuid, tenant_uuid=None): # Call to Expand Volume API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" entity = ClassBuilder(newCapacity=str(new_capacity)) j = entity.to_json() result_response = self.provisioner_patch_request( tenant_id + 'volumes/' + volume_uuid, j) return result_response def set_replica_state( self, volume_uuid, replica_uuid, state, tenant_uuid=None): # Call to Set Replica State API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" result_response = self.provisioner_patch_request( tenant_id + 'replica/' + volume_uuid + "/" + replica_uuid + "/" + str(state)) return result_response def get_snapshots( self, snapshot_uuid=None, tenant_uuid=None): # Call to List of Snapshots API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain List of Volumes # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" if snapshot_uuid is None: result_response = self.provisioner_get_request( tenant_id + 'snapshots') else: result_response = self.provisioner_get_request( tenant_id + 'snapshots/' + snapshot_uuid) if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def get_snapshots_by_vol( self, volume_uuid, tenant_uuid=None): # Call to Get Snapshot Information via Volume UUID API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain List of Volumes # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" result_response = self.provisioner_get_request( tenant_id + 'snapshots_by_vol/' + volume_uuid) if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def get_snapshots_by_alias(self, alias, tenant_uuid=None): # Call to Get Snapshot Information via alias API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain List of Volumes # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" result_response = self.provisioner_get_request( tenant_id + 'snapshots_by_alias/' + alias) if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def set_license(self, license_key): # Call to Set License API # @rtype: ProvisionerResponse # @returns: Provisioner response data # entity = ClassBuilder(license=license_key) j = entity.to_json() result_response = self.provisioner_post_request('license', j) return result_response def get_license(self): # Call to Get License API # @rtype: ProvisionerResponse # @returns: Provisioner response data # result_response = self.provisioner_get_request('license') if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity) return result_response def get_inventory(self): # Call to Get Inventory API # @rtype: ProvisionerResponse # @returns: Provisioner response data # result_response = self.provisioner_get_request('inventory') return result_response def reset_inventory(self): # Call to Reset Inventory API # @rtype: ProvisionerResponse # @returns: Provisioner response data # result_response = self.provisioner_delete_request('reset_inventory') return result_response def get_syslogs(self): # Call to Get Syslogs API # @rtype: ProvisionerResponse # @returns: Provisioner response data # result_response = self.provisioner_get_request('syslog') if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def create_snapshot( self, snapshot_entity, tenant_uuid=None): # Call to Create Snapshot API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" j = snapshot_entity.to_json() result_response = self.provisioner_post_request( tenant_id + 'snapshots', j) return result_response def delete_snapshot( self, snapshot_uuid, tenant_uuid=None): # Call to Delete Snapshot API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" result_response = self.provisioner_delete_request( tenant_id + 'snapshots/' + snapshot_uuid) return result_response def create_snapshot_volume( self, snapshot_volume_entity, tenant_uuid=None): # Call to Create Snapshot Volume API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" j = snapshot_volume_entity.to_json() result_response = self.provisioner_post_request( tenant_id + 'snapshot_volumes', j) return result_response def forward_log(self, forward_entity): # Call to Forward Log API # @rtype: ProvisionerResponse # @returns: Provisioner response data # j = forward_entity.to_json() result_response = self.provisioner_post_request('forward_log', j) return result_response def get_hosts(self): # Call to Get Hosts API # @rtype: ProvisionerResponse # @returns: Provisioner response data # result_response = self.provisioner_get_request('hosts') if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def get_hosts_by_name(self, host_name): # Call to Get Hosts API # @rtype: ProvisionerResponse # @returns: Provisioner response data # result_response = self.provisioner_get_request( 'hosts?hostName=' + host_name) if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def delete_host(self, host_uuid): # Call to Delete Host API # @rtype: ProvisionerResponse # @returns: Provisioner response data # result_response = self.provisioner_delete_request('hosts/' + host_uuid) return result_response def get_targets(self, host_uuid, volume_uuid): # Call to Get Targets API # @rtype: ProvisionerResponse # @returns: Provisioner response data # if host_uuid is None and volume_uuid is None: return entities.ProvisionerResponse( None, None, "ParametersError", "All parameters missing") if host_uuid is not None: request = "?hostId=" + host_uuid else: request = "?volId=" + volume_uuid if host_uuid is not None and volume_uuid is not None: request += "&volId=" + volume_uuid result_response = self.provisioner_get_request('targets' + request) if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def publish( self, host_uuid, volume_uuid, tenant_uuid=None): # Call to Pablish API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" entity = ClassBuilder(hostId=host_uuid, volId=volume_uuid) j = entity.to_json() result_response = self.provisioner_post_request( tenant_id + 'publish', j) return result_response def unpublish( self, host_uuid, volume_uuid, tenant_uuid=None): # Call to UnPablish API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" entity = ClassBuilder(hostId=host_uuid, volId=volume_uuid) j = entity.to_json() result_response = self.provisioner_post_request( tenant_id + 'unpublish', j) return result_response def host_probe(self, host_nqn, host_uuid, host_name, client_type, sw_version, duration_in_sec): # Call to Host Probe API # @rtype: ProvisionerResponse # @returns: Provisioner response data # entity = ClassBuilder( hostNqn=host_nqn, hostId=host_uuid, name=host_name, clientType=client_type, version=sw_version, duration=duration_in_sec) j = entity.to_json() result_response = self.provisioner_post_request('host_probe', j) return result_response def migrate_volume( self, volume_uuid, replica_uuid, tenant_uuid=None): # Call to Migrate Volume API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" entity = ClassBuilder(volId=volume_uuid, repId=replica_uuid) j = entity.to_json() result_response = self.provisioner_post_request( tenant_id + 'migrate_volume', j) return result_response def get_tasks(self, task_id=None, host_id=None): # Call to Get Tasks API # @rtype: ProvisionerResponse # @returns: Provisioner response data # if task_id is not None: cmd = "tasks?taskId=" + str(task_id) elif host_id is not None: cmd = "tasks?hostId=" + str(host_id) else: cmd = "tasks" result_response = self.provisioner_get_request(cmd) if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def remove_task(self, task_id, host_id=None): # Call to Remove Task API # @rtype: ProvisionerResponse # @returns: Provisioner response data # cmd = 'tasks?taskId=' + task_id if host_id is not None: cmd += "&hostId=" + host_id result_response = self.provisioner_delete_request(cmd) return result_response def update_task(self, task_id, host_id, state=None, progress=None, status=None, description=None, tags=None): # Call to Update Task API # @rtype: ProvisionerResponse # @returns: Provisioner response data # entity = ClassBuilder( taskId=task_id, hostId=host_id, state=state, progress=progress, taskStatus=status, statusDescription=description, taskConfiguration=tags) j = entity.to_json() result_response = self.provisioner_put_request('tasks', j) return result_response def create_tenant(self, tenant_entity): # Call to Create Tenant API # @rtype: ProvisionerResponse # @returns: Provisioner response data # j = tenant_entity.to_json() result_response = self.provisioner_post_request('tenants', j) return result_response def delete_tenant(self, tenant_uuid): # Call to Delete Tenant API # @rtype: ProvisionerResponse # @returns: Provisioner response data # result_response = self.provisioner_delete_request( 'tenants/' + tenant_uuid) return result_response def modify_tenant(self, tenant_entity, tenant_uuid): # Call to Modify Tenant API # @rtype: ProvisionerResponse # @returns: Provisioner response data # j = tenant_entity.to_json() result_response = self.provisioner_put_request( 'tenants/' + tenant_uuid, j) return result_response def get_tenants(self): # Call to List of Tenants API # @rtype: ProvisionerResponse # @returns: Provisioner response data contain List of Volumes # result_response = self.provisioner_get_request('tenants') if result_response.status == "Success": result_entity = JsonToClass(result_response.prov_entities, True) return entities.ProvisionerResponse(result_entity.records) return result_response def clone_volume(self, clone_entity, tenant_uuid=None): # Call to Clone Volume API # @rtype: ProvisionerResponse # @returns: Provisioner response data # tenant_id = "" if tenant_uuid is not None: tenant_id = tenant_uuid + "/" j = clone_entity.to_json() result_response = self.provisioner_post_request( tenant_id + 'clone_volume', j) return result_response def get_non_implemented(self, param1=None, param2=None): # Call to Get Not Implemented Answer API # @rtype: KSResponse # @returns: KumoScale response data # return entities.ProvisionerResponse(None, None, "Not implemented") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.375121 cinder-27.0.0/cinder/volume/drivers/lenovo/0000775000175000017500000000000000000000000020622 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/lenovo/__init__.py0000664000175000017500000000000000000000000022721 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/lenovo/lenovo_client.py0000664000175000017500000000165300000000000024041 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import cinder.volume.drivers.stx.client as client class LenovoClient(client.STXClient): def __init__(self, host, login, password, protocol, ssl_verify): super(LenovoClient, self).__init__(host, login, password, protocol, ssl_verify) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/lenovo/lenovo_common.py0000664000175000017500000000737500000000000024062 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from cinder.volume import configuration from cinder.volume import driver import cinder.volume.drivers.lenovo.lenovo_client as lenovo_client import cinder.volume.drivers.stx.common as common common_opts = [ cfg.StrOpt('lenovo_pool_name', deprecated_name='lenovo_backend_name', default='A', help="Pool or Vdisk name to use for volume creation."), cfg.StrOpt('lenovo_pool_type', deprecated_name='lenovo_backend_type', choices=['linear', 'virtual'], default='virtual', help="linear (for VDisk) or virtual (for Pool)."), cfg.StrOpt('lenovo_api_protocol', deprecated_for_removal=True, deprecated_reason='driver_use_ssl should be used instead.', choices=['http', 'https'], default='https', help="Lenovo api interface protocol."), cfg.BoolOpt('lenovo_verify_certificate', deprecated_for_removal=True, deprecated_reason='Use driver_ssl_cert_verify instead.', default=False, help="Whether to verify Lenovo array SSL certificate."), cfg.StrOpt('lenovo_verify_certificate_path', deprecated_for_removal=True, deprecated_reason='Use driver_ssl_cert_path instead.', help="Lenovo array SSL certificate path.") ] iscsi_opts = [ cfg.ListOpt('lenovo_iscsi_ips', default=[], help="List of comma-separated target iSCSI IP addresses."), ] CONF = cfg.CONF CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(iscsi_opts, group=configuration.SHARED_CONF_GROUP) class LenovoCommon(common.STXCommon): VERSION = "2.0" def __init__(self, config): self.config = config self.vendor_name = "Lenovo" self.backend_name = self.config.lenovo_pool_name self.backend_type = self.config.lenovo_pool_type self.api_protocol = self.config.lenovo_api_protocol ssl_verify = False # check for deprecated options... if (self.api_protocol == 'https' and self.config.lenovo_verify_certificate): ssl_verify = self.config.lenovo_verify_certificate_path or True # ...then check common options if self.config.driver_use_ssl: self.api_protocol = 'https' if self.config.driver_ssl_cert_verify: ssl_verify = self.config.driver_ssl_cert_path or True self.client = lenovo_client.LenovoClient(self.config.san_ip, self.config.san_login, self.config.san_password, self.api_protocol, ssl_verify) @staticmethod def get_driver_options(): additional_opts = driver.BaseVD._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'driver_use_ssl', 'driver_ssl_cert_verify', 'driver_ssl_cert_path') return common_opts + additional_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/lenovo/lenovo_fc.py0000664000175000017500000000340300000000000023146 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 Dot Hill Systems Corp. # Copyright 2016-2019 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder import interface import cinder.volume.drivers.lenovo.lenovo_common as lenovo_common import cinder.volume.drivers.stx.fc as fc @interface.volumedriver class LenovoFCDriver(fc.STXFCDriver): """OpenStack Fibre Channel cinder drivers for Lenovo Storage arrays. .. code-block:: default Version history: 1.0 - Inheriting from DotHill cinder drivers. 1.6 - Add management path redundancy and reduce load placed on management controller. 2.0 - DotHill driver renamed to Seagate (STX) """ VERSION = "2.0" SUPPORTED = True # ThirdPartySystems wiki page CI_WIKI_NAME = "Lenovo_Storage_CI" def __init__(self, *args, **kwargs): super(LenovoFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(lenovo_common.common_opts) @staticmethod def get_driver_options(): return lenovo_common.LenovoCommon.get_driver_options() def _init_common(self): return lenovo_common.LenovoCommon(self.configuration) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/lenovo/lenovo_iscsi.py0000664000175000017500000000370100000000000023671 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 Dot Hill Systems Corp. # Copyright 2016-2019 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder import interface import cinder.volume.drivers.lenovo.lenovo_common as lenovo_common import cinder.volume.drivers.stx.iscsi as iscsi @interface.volumedriver class LenovoISCSIDriver(iscsi.STXISCSIDriver): """OpenStack iSCSI cinder drivers for Lenovo Storage arrays. .. code-block:: default Version history: 1.0 - Inheriting from DotHill cinder drivers. 1.6 - Add management path redundancy and reduce load placed on management controller. 2.0 - DotHill driver renamed to Seagate (STX) """ VERSION = "2.0" SUPPORTED = True # ThirdPartySystems wiki page CI_WIKI_NAME = "Lenovo_Storage_CI" def __init__(self, *args, **kwargs): super(LenovoISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(lenovo_common.common_opts) self.configuration.append_config_values(lenovo_common.iscsi_opts) self.iscsi_ips = self.configuration.lenovo_iscsi_ips @staticmethod def get_driver_options(): return (lenovo_common.LenovoCommon.get_driver_options() + lenovo_common.iscsi_opts) def _init_common(self): return lenovo_common.LenovoCommon(self.configuration) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/lightos.py0000664000175000017500000017616400000000000021362 0ustar00zuulzuul00000000000000# Copyright (C) 2016-2022 Lightbits Labs Ltd. # Copyright (C) 2020 Intel Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client as httpstatus import json import random import time from typing import Dict from urllib.parse import urlparse from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import netutils from oslo_utils import units import requests import urllib3 from cinder.common import constants from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import configuration as config from cinder.volume import driver LOG = logging.getLogger(__name__) ENABLE_TRACE = True LIGHTOS_DEFAULT_PROJECT_NAME = "default" urllib3.disable_warnings() lightos_opts = [ cfg.ListOpt('lightos_api_address', default=None, item_type=cfg.types.IPAddress(), help='The IP addresses of the LightOS API servers separated' ' by commas.'), cfg.PortOpt('lightos_api_port', default='443', help='The TCP/IP port at which the LightOS API' ' endpoints listen.' ' Port 443 is used for HTTPS and other values' ' are used for HTTP.'), cfg.StrOpt('lightos_jwt', default=None, help='JWT to be used for volume and snapshot operations with' ' the LightOS cluster.' ' Do not set this parameter if the cluster is installed' ' with multi-tenancy disabled.'), cfg.IntOpt('lightos_default_num_replicas', min=1, max=3, default=3, help='The default number of replicas to create for each' ' volume.'), cfg.BoolOpt('lightos_default_compression_enabled', default=False, help='Set to True to create new volumes compressed assuming' ' no other compression setting is specified via the' ' volumes type.'), cfg.IntOpt('lightos_api_service_timeout', default=30, help='The default amount of time (in seconds) to wait for' ' an API endpoint response.'), cfg.BoolOpt('lightos_use_ipacl', default=True, help='IPACL work in conjunction with the standard NVME ACL.' ' A host must be in both the IPACL and the ACL of a volume to' ' access that volume. Cinder always sets the volume`s ACL.' ' If lightos_use_ipacl is set to True, Cinder will also add' ' the host`s IP addresses to a volume IPACL. If set to' ' False, any IP address may access the volume. The default' ' is True.'), cfg.IntOpt( 'lightos_api_service_snapshots_max_calls', default=5, help='The maximum number of calls to the LightOS' ' when creating snapshots. The default is 5 calls.') ] CONF = cfg.CONF CONF.register_opts(lightos_opts, group=config.SHARED_CONF_GROUP) BLOCK_SIZE = 8 LIGHTOS = "LIGHTOS" INTERM_SNAPSHOT_PREFIX = "for_clone_" class LightOSConnection(object): def __init__(self, conf): self.conf = conf self.access_key = None self.apiservers = self._init_api_servers() self._cur_api_server_idx = random.randint(0, len(self.apiservers) - 1) self.targets = dict() self.lightos_cluster_uuid = None self.subsystemNQN = None self._stats = {'total_capacity_gb': 0, 'free_capacity_gb': 0} # a single API call must have been answered in this time if the API # service/network were up self.api_timeout = self.conf.lightos_api_service_timeout def _init_api_servers(self) -> Dict[int, Dict]: # And verify that port is in range apiservers: Dict[int, Dict] = {} hosts = self.conf.lightos_api_address port = str(self.conf.lightos_api_port) apiservers = [dict(api_address=addr, api_port=port) for addr in hosts] return apiservers def _generate_lightos_cmd(self, cmd, **kwargs): """Generate command to be sent to LightOS API service""" def _joined_params(params): param_str = [] for k, v in params.items(): param_str.append("%s=%s" % (k, v)) return '&'.join(param_str) # Dictionary of applicable LightOS commands in the following format: # 'command': (method, API_URL, {optional parameters}) # This is constructed on the fly to include the caller-supplied kwargs # Can be optimized by only constructing the specific # command the user provided in cmd # API V2 common commands lightos_commands = { # cluster operations, 'get_cluster_info': ('GET', '/api/v2/clusterinfo', {}), 'get_cluster': ('GET', '/api/v2/cluster', {}), # node operations 'get_node': ('GET', '/api/v2/nodes/%s' % kwargs.get('UUID'), {}), 'get_nodes': ('GET', '/api/v2/nodes', {}), # volume operations 'create_volume': ('POST', '/api/v2/projects/%s/volumes' % kwargs.get( "project_name"), { 'name': kwargs.get('name'), 'size': kwargs.get('size'), 'replicaCount': kwargs.get('n_replicas'), 'compression': kwargs.get('compression'), 'acl': { 'values': kwargs.get('acl'), }, 'IPAcl': { 'values': kwargs.get('ip_acl'), }, 'sourceSnapshotUUID': kwargs.get( 'src_snapshot_uuid'), 'sourceSnapshotName': kwargs.get( 'src_snapshot_name'), }), 'delete_volume': ('DELETE', '/api/v2/projects/%s/volumes/%s' % (kwargs.get( "project_name"), kwargs.get("volume_uuid")), {}), 'update_volume': ('PUT', '/api/v2/projects/%s/volumes/%s' % (kwargs.get( "project_name"), kwargs.get("volume_uuid")), { 'acl': { 'values': kwargs.get('acl'), }, 'IPAcl': { 'values': kwargs.get('ip_acl'), }, }), 'extend_volume': ('PUT', '/api/v2/projects/%s/volumes/%s' % ( kwargs.get("project_name"), kwargs.get("volume_uuid")), { 'UUID': kwargs.get('volume_uuid'), 'size': kwargs.get('size'), }), # snapshots operations 'create_snapshot': ('POST', '/api/v2/projects/%s/snapshots' % kwargs.get( "project_name"), { 'name': kwargs.get('name'), 'sourceVolumeUUID': kwargs.get( 'src_volume_uuid'), 'sourceVolumeName': kwargs.get( 'src_volume_name'), }), 'delete_snapshot': ('DELETE', '/api/v2/projects/%s/snapshots/%s' % ( kwargs.get("project_name"), kwargs.get("snapshot_uuid")), {}), # get operations 'get_volume': ('GET', '/api/v2/projects/%s/volumes/%s' % ( kwargs.get("project_name"), kwargs.get("volume_uuid")), {}), 'get_volume_by_name': ('GET', '/api/v2/projects/%s/volumes/?name=%s' % ( kwargs.get("project_name"), kwargs.get("volume_name")), {}), 'list_volumes': ('GET', '/api/v2/projects/%s/volumes' % kwargs.get( "project_name"), {}), 'get_snapshot': ('GET', '/api/v2/projects/%s/snapshots/%s' % ( kwargs.get("project_name"), kwargs.get("snapshot_uuid")), {}), 'get_snapshot_by_name': ('GET', '/api/v2/projects/%s/snapshots' '/?Name=%s' % ( kwargs.get("project_name"), kwargs.get("snapshot_name")), {}) } if kwargs.get("qos_policy", None) is not None: lightos_commands['create_volume'][2]['qosPolicyUUID'] = \ str(kwargs.get("qos_policy")) if cmd not in lightos_commands: raise exception.UnknownCmd(cmd=cmd) else: (method, url, params) = lightos_commands[cmd] if method == 'GET': body = params elif method == 'DELETE': LOG.debug("DELETE params: %s", params) # For DELETE commands add parameters to the URL url += '?' + _joined_params(params) body = '' elif method == 'PUT': # For PUT commands add parameters to the URL body = params elif method == 'POST': body = params else: msg = (_('Method %(method)s is not defined') % {'method': method}) LOG.error(msg) raise AssertionError(msg) return (method, url, body) def pretty_print_req(self, req, timeout): request = req.method + ' ' + req.url header = ', '.join('"{}: {}"'.format(k, v) for k, v in req.headers.items()) LOG.debug('Req: %s Headers: %s Body: %s Timeout: %s', request, header, req.body, timeout) def send_cmd(self, cmd, timeout, **kwargs): """Send command to any LightOS REST API server.""" start_idx = self._cur_api_server_idx stop = time.time() + timeout while time.time() <= stop: server = self.apiservers[self._cur_api_server_idx] host = server['api_address'] port = server['api_port'] (success, status_code, data) = self.__send_cmd( cmd, host, port, self.api_timeout, **kwargs) if success: return (status_code, data) # go on to the next API server wrapping around as needed self._cur_api_server_idx = ( self._cur_api_server_idx + 1) % len(self.apiservers) # if we only have a single API server, keep trying it # if we have more than one and we tried all of them, give up if (self._cur_api_server_idx == start_idx and len(self.apiservers) > 1): break raise exception.VolumeDriverException( message="Could not get a response from any API server") def _format_endpoint(self, ip, port): ip_requires_bracketing = ':' in ip or '%' in ip template = "[%s]:%s" if ip_requires_bracketing else "%s:%s" return template % (ip, port) def __send_cmd(self, cmd, host, port, timeout, **kwargs): """Send command to LightOS REST API server. Returns: (success = True/False, data) """ ssl_verify = self.conf.driver_ssl_cert_verify (method, url, body) = self._generate_lightos_cmd(cmd, **kwargs) LOG.info( 'Invoking %(cmd)s using %(method)s url: %(url)s \ request.body: %(body)s ssl_verify: %(ssl_verify)s', {'cmd': cmd, 'method': method, 'url': url, 'body': body, 'ssl_verify': ssl_verify}) api_url = "https://%s%s" % (self._format_endpoint(host, port), url) try: with requests.Session() as session: req = requests.Request( method, api_url, data=json.dumps(body) if body else None) req.headers.update({'Accept': 'application/json'}) # -H 'Expect:' will prevent us from getting # the 100 Continue response from curl req.headers.update({'Expect': ''}) if method in ('POST', 'PUT'): req.headers.update({'Content-Type': 'application/json'}) if kwargs.get("etag"): req.headers.update({'If-Match': kwargs['etag']}) if self.conf.lightos_jwt: req.headers.update( {'Authorization': 'Bearer %s' % self.conf.lightos_jwt}) prepped = req.prepare() self.pretty_print_req(prepped, timeout) response = session.send( prepped, timeout=timeout, verify=ssl_verify) except Exception: LOG.exception("REST server not responding at '%s'", api_url) return (False, None, None) try: resp = response.json() except ValueError: resp = response.text data = resp LOG.debug( 'Resp(%s): code %s data %s', api_url, response.status_code, data) return (True, response.status_code, data) @interface.volumedriver class LightOSVolumeDriver(driver.VolumeDriver): """OpenStack NVMe/TCP cinder drivers for Lightbits LightOS. .. code-block:: default Version history: 2.3.12 - Initial upstream driver version. """ VERSION = '2.3.12' # ThirdPartySystems wiki page CI_WIKI_NAME = "LightbitsLabs_CI" SUPPORTS_ACTIVE_ACTIVE = True def __init__(self, *args, **kwargs): super(LightOSVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(lightos_opts) # connector implements NVMe/TCP initiator functionality. if not self.configuration.__dict__.get("initiator_connector", None): self.configuration.initiator_connector = ( "os_brick.initiator.connector.InitiatorConnector") if not self.configuration.__dict__.get("lightos_client", None): self.configuration.lightos_client = ( "cinder.volume.drivers.lightos.LightOSConnection") initiator_connector = importutils.import_class( self.configuration.initiator_connector) self.connector = initiator_connector.factory( LIGHTOS, root_helper=utils.get_root_helper(), message_queue=None, device_scan_attempts= self.configuration.num_volume_device_scan_tries) lightos_client_ctor = importutils.import_class( self.configuration.lightos_client) self.cluster = lightos_client_ctor(self.configuration) self.logical_op_timeout = \ self.configuration.lightos_api_service_timeout * 3 + 10 self.snapshots_retries = \ self.configuration.lightos_api_service_snapshots_max_calls @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'driver_ssl_cert_verify', 'reserved_percentage', 'volume_backend_name') return lightos_opts + additional_opts def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. If volume_type extra specs includes 'replication: True' the driver needs to create a volume replica (secondary) and setup replication between the newly created volume and the secondary volume. """ project_name = self._get_lightos_project_name(volume) # Create an intermediate snapshot snapshot_name = self._interm_snapshotname(volume) src_volume_name = self._lightos_volname(src_vref) try: self._create_snapshot(project_name, snapshot_name, src_volume_name) except Exception as e: LOG.warning( "Failed to create intermediate snapshot \ %s from source volume %s.", snapshot_name, src_volume_name) raise e # Create a volume from the intermediate snapshot try: self._create_volume(volume, src_snapshot_lightos_name=snapshot_name) except Exception as e: LOG.error("Failed to create volume %s from intermediate " " snapshot %s. Trying to clean up.", src_volume_name, snapshot_name) raise e # Delete the intermediate snapshot finally: try: self._delete_lightos_snapshot(project_name, snapshot_name) except Exception as e: LOG.warning("Failed to delete the intermediate snapshot %s for" " volume %s. Trying to clean up.", snapshot_name, src_volume_name) raise e def create_export(self, context, volume, vg=None): """Irrelevant for lightos volumes. Export created during attachment. """ pass def ensure_export(self, context, volume): """Irrelevant for lightos volumes. Export created during attachment. """ pass def remove_export(self, context, volume): """Irrelevant for lightos volumes. Export removed during detach. """ pass def _get_lightos_volume( self, project_name, timeout, vol_uuid=None, vol_name=None): assert vol_uuid or vol_name, 'LightOS volume name or UUID \ must be specified' if vol_uuid: return self.cluster.send_cmd( cmd='get_volume', project_name=project_name, timeout=timeout, volume_uuid=vol_uuid) return self.cluster.send_cmd( cmd='get_volume_by_name', project_name=project_name, timeout=timeout, volume_name=vol_name) def _lightos_volname(self, volume): volid = volume.name_id lightos_volname = CONF.volume_name_template % volid return lightos_volname def _get_lightos_project_name(self, volume): try: extra_specs = volume.volume_type.extra_specs project_name = extra_specs.get( 'lightos:project_name', LIGHTOS_DEFAULT_PROJECT_NAME) except Exception: LOG.debug( "LIGHTOS volume %s has no lightos:project_name", volume) project_name = LIGHTOS_DEFAULT_PROJECT_NAME return project_name def _lightos_snapshotname(self, snapshot_id): return CONF.snapshot_name_template % snapshot_id def _interm_snapshotname(self, snapshot): id = snapshot['id'] return '%s%s' % (INTERM_SNAPSHOT_PREFIX, id) def _get_lightos_snapshot( self, project_name, timeout, snapshot_uuid=None, snapshot_name=None): assert snapshot_uuid or snapshot_name, 'LightOS snapshot name or \ UUID must be specified' if snapshot_uuid: return self.cluster.send_cmd( cmd='get_snapshot', project_name=project_name, timeout=timeout, snapshot_uuid=snapshot_uuid) return self.cluster.send_cmd( cmd='get_snapshot_by_name', project_name=project_name, timeout=timeout, snapshot_name=snapshot_name) def _wait_for_volume_available( self, project_name, timeout, vol_uuid=None, vol_name=None): """Wait until the volume is available.""" assert vol_uuid or vol_name, 'LightOS volume UUID or name \ must be supplied' # while creating lightos volume we can stop on any terminal status # possible states: Unknown, Creating, Available, Deleting, Deleted, # Failed, Updating, Migrating, Rollback states = ('Available', 'Deleting', 'Deleted', 'Failed', 'UNKNOWN', 'Migrating', 'Rollback') stop = time.time() + timeout while time.time() <= stop: (status_code, resp) = self._get_lightos_volume(project_name, timeout=self.logical_op_timeout, vol_uuid=vol_uuid, vol_name=vol_name) state = resp.get('state', 'UNKNOWN') if \ status_code == httpstatus.OK and resp else 'UNKNOWN' if state in states and status_code != httpstatus.NOT_FOUND: break time.sleep(1) return state def _parse_extra_spec(self, extra_spec_value, default_value): extra_spec_value = str(extra_spec_value) extra_spec_value = extra_spec_value.casefold() if "true" in extra_spec_value: return "True" elif "false" in extra_spec_value: return "False" return default_value def _get_volume_specs(self, volume): default_compression = 'True' if self.configuration. \ lightos_default_compression_enabled else 'False' num_replicas = str(self.configuration.lightos_default_num_replicas) if not volume.volume_type: return (default_compression, num_replicas, LIGHTOS_DEFAULT_PROJECT_NAME) specs = getattr(volume.volume_type, 'extra_specs', {}) type_compression = specs.get('compression', default_compression) compression = self._parse_extra_spec(type_compression, default_compression) num_replicas = str(specs.get('lightos:num_replicas', num_replicas)) qos_policy = specs.get('lightos:qos_policy', None) project_name = specs.get( 'lightos:project_name', LIGHTOS_DEFAULT_PROJECT_NAME) return (compression, num_replicas, project_name, qos_policy) def _create_new_lightos_volume(self, os_volume, project_name, lightos_name, src_snapshot_lightos_name=None): """Create a new LightOS volume for this openstack volume.""" (compression, num_replicas, _, qos_policy) = \ self._get_volume_specs(os_volume) vol_ipAcl = ['ALLOW_NONE'] if self.use_ip_acl() else ['ALLOW_ANY'] return self.cluster.send_cmd( cmd='create_volume', project_name=project_name, timeout=self.logical_op_timeout, name=lightos_name, size=str(os_volume['size']) + ' gib', n_replicas=num_replicas, compression=compression, src_snapshot_name=src_snapshot_lightos_name, acl=['ALLOW_NONE'], ip_acl=vol_ipAcl, qos_policy=qos_policy) def _get_lightos_uuid(self, project_name, volume): lightos_name = self._lightos_volname(volume) timeout = self.logical_op_timeout (status, data) = self._get_lightos_volume(project_name=project_name, timeout=timeout, vol_name=lightos_name) if status != httpstatus.OK or not data: LOG.error( 'Failed to get LightOS volume %s project %s status: \ %s data: %s', lightos_name, project_name, status, str(data)) raise exception.VolumeNotFound(volume_id=volume) lightos_uuid = data.get('UUID') if not lightos_uuid: LOG.error('Failed to get LightOS volume UUID status: %s, data: %s', status, str(data)) raise exception.VolumeNotFound(volume_id=volume) return lightos_uuid def create_volume(self, volume): return self._create_volume(volume, src_snapshot_lightos_name=None) def create_volume_from_snapshot(self, volume, snapshot): snapshotname = self._lightos_snapshotname(snapshot["id"]) return self._create_volume(volume, src_snapshot_lightos_name=snapshotname) def _create_volume(self, volume, src_snapshot_lightos_name): lightos_name = self._lightos_volname(volume) project_name = self._get_lightos_project_name(volume) lightos_uuid = '' vol_state = 'UNKNOWN' # first, check if such a volume exists # if it exists, we must have created it earlier in a previous # invocation of create volume since it takes a while for # openstack to retry the call, it's highly unlikely that we created # it but it does not show up yet, so assume that if it does not show # up, it was never created status_code, resp = self._get_lightos_volume(project_name, timeout=self. logical_op_timeout, vol_name=lightos_name) if status_code == httpstatus.NOT_FOUND: status_code, resp = self._create_new_lightos_volume( os_volume=volume, project_name=project_name, lightos_name=lightos_name, src_snapshot_lightos_name=src_snapshot_lightos_name) if status_code in (httpstatus.OK, httpstatus.CREATED): lightos_uuid = resp['UUID'] vol_state = self._wait_for_volume_available( project_name, timeout=self.logical_op_timeout, vol_uuid=lightos_uuid) allowed_states = ['Available', 'Migrating'] if vol_state in allowed_states: LOG.debug( "LIGHTOS created volume name %s lightos_uuid \ %s project %s", lightos_name, lightos_uuid, project_name) return # if volume was created in failed state we should clean it up LOG.warning( 'LightOS volume with UUID %s project %s last_state is %s', lightos_uuid, project_name, vol_state) if vol_state != 'UNKNOWN': LOG.debug( 'Cleaning up LightOS volume with UUID %s project %s', lightos_uuid, project_name) self._delete_lightos_volume(project_name, lightos_uuid) # wait for openstack to call us again to create it msg = ( "Did not succeed creating LightOS volume with UUID %(uuid)s" " status_code %(code)s last state %(state)s" % dict(uuid=lightos_uuid, code=status_code, state=vol_state)) msg = _(msg) raise exception.VolumeBackendAPIException(message=msg) def _wait_for_snapshot_available(self, project_name, timeout, snapshot_uuid=None, snapshot_name=None): """Wait until the snapshot is available.""" assert snapshot_uuid or snapshot_name, \ 'LightOS snapshot UUID or name must be supplied' # we can stop on any terminal status # possible states: Unknown, Creating, Available, Deleting, Deleted, # Failed, Updating states = ('Available', 'Deleting', 'Deleted', 'Failed', 'UNKNOWN') stop = time.time() + timeout while time.time() <= stop: (status_code, resp) = self._get_lightos_snapshot(project_name, timeout= self.logical_op_timeout, snapshot_uuid=snapshot_uuid, snapshot_name=snapshot_name) state = resp.get('state', 'UNKNOWN') if \ status_code == httpstatus.OK and resp else 'UNKNOWN' if state in states and status_code != httpstatus.NOT_FOUND: break time.sleep(1) return state def _wait_for_snapshot_deleted(self, project_name, timeout, snapshot_uuid): """Wait until the snapshot has been deleted.""" assert snapshot_uuid, 'LightOS snapshot UUID must be specified' states = ('Deleted', 'Deleting', 'UNKNOWN') stop = time.time() + timeout while time.time() <= stop: status_code, resp = ( self._get_lightos_snapshot(project_name, timeout=self.logical_op_timeout, snapshot_uuid=snapshot_uuid)) if status_code == httpstatus.NOT_FOUND: return 'Deleted' state = resp.get('state', 'UNKNOWN') if \ status_code == httpstatus.OK and resp else 'UNKNOWN' if state in states: break time.sleep(1) return state def _wait_for_volume_deleted(self, project_name, timeout, vol_uuid): """Wait until the volume has been deleted.""" assert vol_uuid, 'LightOS volume UUID must be specified' states = ('Deleted', 'Deleting', 'UNKNOWN') stop = time.time() + timeout while time.time() <= stop: (status_code, resp) = self._get_lightos_volume(project_name, timeout=self.logical_op_timeout, vol_uuid=vol_uuid) if status_code == httpstatus.NOT_FOUND: return 'Deleted' state = resp.get('state', 'UNKNOWN') if \ status_code == httpstatus.OK and resp else 'UNKNOWN' if state in states: break time.sleep(1) return state def _delete_lightos_volume(self, project_name, lightos_uuid): end = time.time() + self.logical_op_timeout while (time.time() < end): status_code, resp = ( self.cluster.send_cmd( cmd='delete_volume', project_name=project_name, timeout=self. logical_op_timeout, volume_uuid=lightos_uuid)) if status_code == httpstatus.OK: break LOG.warning( "delete_volume for volume with LightOS UUID %s failed \ with status code %s response %s", lightos_uuid, status_code, resp) time.sleep(1) else: # no break LOG.error( "Failed to delete volume with LightOS UUID %s. Final status \ code %s response %s", lightos_uuid, status_code, resp) return False deleted_state = self._wait_for_volume_deleted( project_name, timeout=self.logical_op_timeout, vol_uuid=lightos_uuid) return deleted_state in ('Deleted', 'Deleting', 'UNKNOWN') def delete_volume(self, volume): """Delete volume.""" project_name = self._get_lightos_project_name(volume) try: lightos_uuid = self._get_lightos_uuid(project_name, volume) except exception.VolumeNotFound: return True if not self._delete_lightos_volume(project_name, lightos_uuid): msg = ('Failed to delete LightOS volume with UUID' ' %(uuid)s project %(project_name)s' % ( dict(uuid=lightos_uuid, project_name=project_name))) raise exception.VolumeBackendAPIException(message=msg) def get_vol_by_id(self, volume): LOG.warning('UNIMPLEMENTED: get vol by id') def get_vols(self): LOG.warning('UNIMPLEMENTED: get vols') def check_for_setup_error(self): subsysnqn = self.cluster.subsystemNQN if not subsysnqn: msg = ('LIGHTOS: Cinder driver requires the' ' LightOS cluster subsysnqn') raise exception.VolumeBackendAPIException(message=msg) hostnqn = ( self.connector.get_connector_properties( utils.get_root_helper())['nqn']) if not hostnqn: msg = ("LIGHTOS: Cinder driver requires a local hostnqn for" " image_to/from_volume operations") raise exception.VolumeBackendAPIException(message=msg) def get_cluster_info(self): status_code, cluster_info = self.cluster.send_cmd( cmd='get_cluster_info', timeout=self.logical_op_timeout) if status_code == httpstatus.UNAUTHORIZED: msg = f'LIGHTOS: failed to connect to cluster. code: {status_code}' raise exception.InvalidAuthKey(message=_(msg)) if status_code != httpstatus.OK: msg = 'LIGHTOS: Could not connect to LightOS cluster' raise exception.VolumeBackendAPIException(message=_(msg)) LOG.info("Connected to LightOS cluster %s subsysnqn %s", cluster_info['UUID'], cluster_info['subsystemNQN']) self.cluster.lightos_cluster_uuid = cluster_info['UUID'] self.cluster.subsystemNQN = cluster_info['subsystemNQN'] def get_cluster_stats(self): status_code, cluster_info = self.cluster.send_cmd( cmd='get_cluster', timeout=self.logical_op_timeout) if status_code != httpstatus.OK: msg = 'LIGHTOS: Could not connect to LightOS cluster' raise exception.VolumeBackendAPIException(message=_(msg)) return cluster_info['statistics'] def valid_nodes_info(self, nodes_info): if not nodes_info or 'nodes' not in nodes_info: return False return True def wait_for_lightos_cluster(self): cmd = 'get_nodes' end = time.time() + self.logical_op_timeout while (time.time() < end): status_code, nodes_info = self.cluster.send_cmd( cmd=cmd, timeout=self.logical_op_timeout) if status_code != httpstatus.OK or not self.valid_nodes_info( nodes_info): time.sleep(1) continue return nodes_info # bail out if we got here, timeout elapsed msg = 'Failed to get nodes, last status was {} nodes_info {}'.format( status_code, nodes_info) raise exception.VolumeBackendAPIException(message=_(msg)) def do_setup(self, context): self.get_cluster_info() nodes_info = self.wait_for_lightos_cluster() self.cluster.targets = dict() node_list = nodes_info['nodes'] for node in node_list: self.cluster.targets[node['UUID']] = node # reduce the logical op timeout if single server LightOS cluster if len(node_list) == 1: self.logical_op_timeout = self.configuration. \ lightos_api_service_timeout + 10 def extend_volume(self, volume, size): # loop because lightos api is async end = time.time() + self.logical_op_timeout while (time.time() < end): try: finished = self._extend_volume(volume, size) if finished: break except exception.VolumeNotFound as e: raise e except Exception as e: # bail out if the time out elapsed... if time.time() >= end: LOG.warning('Timed out extend volume operation') raise e # if we still have more time, just print the exception LOG.warning( 'caught this in extend_volume() ... will retry: %s', str(e)) time.sleep(1) def _extend_volume(self, volume, size): lightos_volname = self._lightos_volname(volume) project_name = self._get_lightos_project_name(volume) try: (status, data) = self._get_lightos_volume( project_name, timeout=self. logical_op_timeout, vol_name=lightos_volname) if status != httpstatus.OK or not data: LOG.error( 'Failed to get LightOS volume status: %s data: %s', status, str(data)) raise exception.VolumeNotFound(volume_id=volume.id) lightos_uuid = data['UUID'] etag = data.get('ETag', '') except Exception as e: raise e try: code, message = self.cluster.send_cmd( cmd='extend_volume', project_name=project_name, timeout=self.logical_op_timeout, volume_uuid=lightos_uuid, size=str(size) + ' gib', etag=etag ) if code == httpstatus.OK: LOG.info( "Successfully extended volume %s project %s size:%s", volume, project_name, size) else: raise exception.ExtendVolumeError(reason=message) except exception.ExtendVolumeError as e: raise e except Exception as e: raise exception.ExtendVolumeError(raised_exception=e) return True @staticmethod def byte_to_gb(bbytes): return int(int(bbytes) / units.Gi) def get_volume_stats(self, refresh=False): """Retrieve stats info for the volume *service*, not a specific volume. """ LOG.debug("getting volume stats (refresh=%s)", refresh) if not refresh: return self._stats backend_name = self.configuration.safe_get('volume_backend_name') res_percentage = self.configuration.safe_get('reserved_percentage') # as a tenant we dont have access to cluster stats # in the future we might expose this per project via get_project API # currently we remove this stats call. # cluster_stats = self.get_cluster_stats() data = {'vendor_name': 'LightOS Storage', 'volume_backend_name': backend_name or self.__class__.__name__, 'driver_version': self.VERSION, 'storage_protocol': constants.LIGHTOS, 'reserved_percentage': res_percentage, 'QoS_support': True, 'online_extend_support': True, 'thin_provisioning_support': True, 'compression': [True, False], 'multiattach': True} # data['total_capacity_gb'] = # self.byte_to_gb(cluster_stats['effectivePhysicalStorage']) # It would be preferable to return # self.byte_to_gb(cluster_stats['freePhysicalStorage']) # here but we return 'infinite' due to the Cinder bug described in # https://bugs.launchpad.net/cinder/+bug/1871371 data['free_capacity_gb'] = 'infinite' self._stats = data return self._stats def _get_connection_properties(self, project_name, volume): lightos_targets = {} for target in self.cluster.targets.values(): properties = dict() ep = urlparse('//' + target['nvmeEndpoint']) properties['target_portal'] = ep.hostname properties['target_port'] = 8009 # spec specified discovery port properties['transport_type'] = 'tcp' lightos_targets[ep.hostname] = properties server_properties = {} server_properties['lightos_nodes'] = lightos_targets server_properties['uuid'] = ( self._get_lightos_uuid(project_name, volume)) server_properties['subsysnqn'] = self.cluster.subsystemNQN return server_properties def set_volume_acl(self, project_name, lightos_uuid, acl, ip_acl, etag): return self.cluster.send_cmd( cmd='update_volume', project_name=project_name, timeout=self.logical_op_timeout, volume_uuid=lightos_uuid, acl=acl, ip_acl=ip_acl, etag=etag ) def use_ip_acl(self): return self.configuration.lightos_use_ipacl def __add_volume_acl(self, project_name, lightos_volname, acl_to_add, host_ips): (status, data) = self._get_lightos_volume(project_name, self.logical_op_timeout, vol_name=lightos_volname) if status != httpstatus.OK or not data: LOG.error('Failed to get LightOS volume %s status %s data %s', lightos_volname, status, data) return False lightos_uuid = data.get('UUID') if not lightos_uuid: LOG.warning('Got LightOS volume without UUID?! data: %s', data) return False acl = data.get('acl') if not acl: LOG.warning('Got LightOS volume without ACL?! data: %s', data) return False ip_acl = data.get('IPAcl') if self.use_ip_acl() and not ip_acl: LOG.warning('Got LightOS volume without IP ACL?! data: %s', data) return False acl = acl.get('values', []) ip_acl = ip_acl.get('values', []) # remove ALLOW_NONE and add our acl_to_add if not already there if 'ALLOW_NONE' in acl: acl.remove('ALLOW_NONE') if acl_to_add not in acl: acl.append(acl_to_add) if 'ALLOW_NONE' in ip_acl: ip_acl.remove('ALLOW_NONE') if self.use_ip_acl(): ip_acl = list(set(ip_acl).union(set(host_ips))) else: ip_acl = ['ALLOW_ANY'] # The max (16) elemenets are allowed in IPACL. # if elements are more than 16 then remove # less-frequently used IPv6 address(s), and IPv4 if needed. ipv4addrs = [addr for addr in ip_acl if netutils.is_valid_ipv4(addr)] ipv6addrs = [addr for addr in ip_acl if netutils.is_valid_ipv6(addr)] IpAcl_size = 16 if len(ipv4addrs) > IpAcl_size: LOG.warning( 'IPv4 address(es) are more than maximum (%s)' ' allowed in IP-ACL of volume, therefore reducing' ' IPv4 address(es) written to IP-ACL of volume %s' ' of project %s', IpAcl_size, lightos_volname, project_name) ip_acl = ipv4addrs[0: IpAcl_size] elif len(ip_acl) > IpAcl_size: LOG.warning( 'Combined IPv4 and IPv6 address(es) are more than' ' maximum (%s) allowed in IP-ACL of volume, therefore' ' reducing IPv6 address(es) written to IP-ACL of' ' volume %s of project %s', IpAcl_size, lightos_volname, project_name) ipv6addrs_count = IpAcl_size - len(ipv4addrs) ip_acl = ipv4addrs + (ipv6addrs[0: ipv6addrs_count]) return self.set_volume_acl( project_name, lightos_uuid, acl, ip_acl, etag=data.get( 'ETag', '')) def add_volume_acl(self, project_name, volume, acl_to_add, host_ips): LOG.debug( 'add_volume_acl got volume %s project %s acl %s', volume, project_name, acl_to_add) lightos_volname = self._lightos_volname(volume) return self.update_volume_acl( self.__add_volume_acl, project_name, lightos_volname, acl_to_add, host_ips) def __remove_volume_acl( self, project_name, lightos_volname, acl_to_remove, host_ips): (status, data) = self._get_lightos_volume(project_name, self.logical_op_timeout, vol_name=lightos_volname) if not data: LOG.error( 'Could not get data for LightOS volume %s project %s', lightos_volname, project_name) return False lightos_uuid = data.get('UUID') if not lightos_uuid: LOG.warning('Got LightOS volume without UUID?! data: %s', data) return False acl = data.get('acl') if not acl: LOG.warning('Got LightOS volume without ACL?! data: %s', data) return False acl = acl.get('values') if not acl: LOG.warning( 'Got LightOS volume without ACL values?! data: %s', data) return False try: acl.remove(acl_to_remove) except ValueError: LOG.warning( 'Could not remove acl %s from LightOS volume %s project \ %s with acl %s', acl_to_remove, lightos_volname, project_name, acl) # if the ACL is empty here, put in ALLOW_NONE if not acl: acl.append('ALLOW_NONE') ip_acl = data.get('IPAcl') if self.use_ip_acl() and not ip_acl: LOG.warning('Got LightOS volume without IP ACL?! data: %s', data) return False ip_acl = ip_acl.get('values') if self.use_ip_acl() and not ip_acl: LOG.warning( 'Got LightOS volume without IP ACL values?! data: %s', data) return False for ip in host_ips: try: ip_acl.remove(ip) except ValueError: LOG.warning( 'Could not find matching ip %s in ip-acl of volume %s ', ip, lightos_volname) if not ip_acl: ip_acl.append('ALLOW_NONE') return self.set_volume_acl( project_name, lightos_uuid, acl, ip_acl, etag=data.get('ETag', '')) def __overwrite_volume_acl( self, project_name, lightos_volname, acl, host_ips): status, data = self._get_lightos_volume(project_name, self.logical_op_timeout, vol_name=lightos_volname) if not data: LOG.error( 'Could not get data for LightOS volume %s project %s', lightos_volname, project_name) return False lightos_uuid = data.get('UUID') if not lightos_uuid: LOG.warning('Got LightOS volume without UUID?! data: %s', data) return False return self.set_volume_acl( project_name, lightos_uuid, acl, host_ips, etag=data.get( 'ETag', '')) def remove_volume_acl(self, project_name, volume, acl_to_remove, host_ips): lightos_volname = self._lightos_volname(volume) LOG.debug('remove_volume_acl volume %s project %s acl %s', volume, project_name, acl_to_remove) return self.update_volume_acl( self.__remove_volume_acl, project_name, lightos_volname, acl_to_remove, host_ips) def remove_all_volume_acls(self, project_name, volume): lightos_volname = self._lightos_volname(volume) LOG.debug('remove_all_volume_acls volume %s project %s', volume, project_name) return self.update_volume_acl( self.__overwrite_volume_acl, project_name, lightos_volname, ['ALLOW_NONE'], ['ALLOW_NONE']) def update_volume_acl(self, func, project_name, lightos_volname, acl, host_ips): # loop because lightos api is async end = time.time() + self.logical_op_timeout first_iteration = True while (time.time() < end): if not first_iteration: time.sleep(1) first_iteration = False res = func(project_name, lightos_volname, acl, host_ips) if not isinstance(res, tuple): LOG.debug('Update_volume: func %s(%s project %s) failed', func, lightos_volname, project_name) continue if len(res) != 2: LOG.debug("Unexpected number of values to unpack") continue (status, resp) = res if status != httpstatus.OK: LOG.debug( 'update_volume: func %s(%s project %s) got \ http status %s', func, lightos_volname, project_name, status) else: break # bail out if the time out elapsed... if time.time() >= end: LOG.warning( 'Timed out %s(%s project %s)', func, lightos_volname, project_name) return False # or the call succeeded and we need to wait # for the volume to stabilize vol_state = self._wait_for_volume_available( project_name, timeout=end - time.time(), vol_name=lightos_volname) allowed_states = ['Available', 'Migrating'] if vol_state not in allowed_states: LOG.warning( 'Timed out waiting for volume %s project %s to stabilize, \ last state %s', lightos_volname, project_name, vol_state) return False return True def _wait_for_volume_acl( self, project_name, lightos_volname, acl, requested_membership): end = time.time() + self.logical_op_timeout while (time.time() < end): (status, resp) = self._get_lightos_volume( project_name, self.logical_op_timeout, vol_name=lightos_volname) if status == httpstatus.OK: if not resp or not resp.get('acl'): LOG.warning( 'Got LightOS volume %s without ACL?! data: %s', lightos_volname, resp) return False volume_acls = resp.get('acl').get('values', []) membership = acl in volume_acls if membership == requested_membership: return True LOG.debug( 'ACL did not settle for volume %s project %s, status \ %s resp %s', lightos_volname, project_name, status, resp) time.sleep(1) LOG.warning( 'ACL did not settle for volume %s, giving up', lightos_volname) return False def create_snapshot(self, snapshot): snapshot_name = self._lightos_snapshotname(snapshot["id"]) src_volume_name = self._lightos_volname(snapshot["volume"]) project_name = self._get_lightos_project_name(snapshot.volume) self._create_snapshot(project_name, snapshot_name, src_volume_name) @coordination.synchronized('lightos-create_snapshot-{src_volume_name}') def _create_snapshot(self, project_name, snapshot_name, src_volume_name): found_or_created_snapshot = False last_status_code = 999 last_response = "No response" for i in range(self.snapshots_retries): if i != 0: sleeptime = 2 ** i # 2, 4, 8, 16 (default is 30 seconds) time.sleep(sleeptime) (status_code_get, response) = self._get_lightos_snapshot( project_name, self.logical_op_timeout, snapshot_name=snapshot_name) if status_code_get == httpstatus.OK: found_or_created_snapshot = True break (status_code_create, response) = self.cluster.send_cmd( cmd='create_snapshot', project_name=project_name, timeout=self.logical_op_timeout, name=snapshot_name, src_volume_name=src_volume_name, ) if status_code_create == httpstatus.OK: found_or_created_snapshot = True break if status_code_create in (httpstatus.BAD_REQUEST, httpstatus.INTERNAL_SERVER_ERROR, httpstatus.SERVICE_UNAVAILABLE): LOG.debug('Creating new snapshot %s under project %s' ' failed, received error with http-status %s', snapshot_name, project_name, status_code_create) last_status_code = status_code_create last_response = response else: msg = ('Did not succeed creating LightOS snapshot %s' ' project %s' ' status code %s response %s' % (snapshot_name, project_name, status_code_create, response)) raise exception.VolumeBackendAPIException(message=_(msg)) if not found_or_created_snapshot: msg = ('Did not succeed creating LightOS snapshot %s' ' project %s' ' status code %s response %s' % (snapshot_name, project_name, last_status_code, last_response)) raise exception.VolumeBackendAPIException(message=_(msg)) state = self._wait_for_snapshot_available(project_name, timeout= self.logical_op_timeout, snapshot_name=snapshot_name) if state == 'Available': LOG.debug( 'Successfully created LightOS snapshot %s', snapshot_name) return LOG.error( 'Failed to create snapshot %s project %s for volume %s. \ state = %s.', snapshot_name, project_name, src_volume_name, state) try: self._delete_lightos_snapshot(project_name, snapshot_name) except exception.CinderException as ex: LOG.warning("Error deleting snapshot during cleanup: %s", ex) msg = ('Did not succeed creating LightOS snapshot %s project' '%s last state %s' % (snapshot_name, project_name, state)) raise exception.VolumeBackendAPIException(message=_(msg)) def delete_snapshot(self, snapshot): lightos_snapshot_name = self._lightos_snapshotname(snapshot["id"]) project_name = self._get_lightos_project_name(snapshot.volume) self._delete_lightos_snapshot(project_name=project_name, snapshot_name=lightos_snapshot_name) def _get_lightos_snapshot_uuid(self, project_name, lightos_snapshot_name): (status_code, data) = self._get_lightos_snapshot( project_name=project_name, timeout=self.logical_op_timeout, snapshot_name=lightos_snapshot_name) if status_code == httpstatus.OK: uuid = data.get("UUID") if uuid: return uuid if status_code == httpstatus.NOT_FOUND: return None msg = ('Unable to fetch UUID of snapshot named %s. status code' ' %s data %s' % (lightos_snapshot_name, status_code, data)) raise exception.VolumeBackendAPIException(message=_(msg)) def _delete_lightos_snapshot(self, project_name, snapshot_name): snapshot_uuid = self._get_lightos_snapshot_uuid( project_name, snapshot_name) if snapshot_uuid is None: LOG.warning( "Unable to find lightos snapshot %s project %s for deletion", snapshot_name, project_name) return False (status_code, _) = self.cluster.send_cmd(cmd='delete_snapshot', project_name=project_name, timeout=self. logical_op_timeout, snapshot_uuid=snapshot_uuid) if status_code == httpstatus.OK: state = self._wait_for_snapshot_deleted( project_name, timeout=self.logical_op_timeout, snapshot_uuid=snapshot_uuid) if state in ('Deleted', 'Deleting', 'UNKNOWN'): LOG.debug( "Successfully detected that snapshot %s was deleted.", snapshot_name) return True LOG.warning("Snapshot %s was not deleted. It is in state %s.", snapshot_name, state) return False LOG.warning( "Request to delete snapshot %s" " was rejected with status code %s.", snapshot_name, status_code) return False def initialize_connection(self, volume, connector): hostnqn = connector.get('nqn') found_dsc = connector.get('found_dsc') host_ips = connector.get('host_ips', []) LOG.info('Current host hostNQN is %s and IP(s) are %s', hostnqn, host_ips) LOG.debug( 'initialize_connection: connector hostnqn is %s found_dsc %s', hostnqn, found_dsc) if not hostnqn: msg = 'Connector (%s) did not contain a hostnqn, aborting' % ( connector) raise exception.VolumeBackendAPIException(message=_(msg)) if not found_dsc: msg = ('Connector (%s) did not indicate a discovery' 'client, aborting' % (connector)) raise exception.VolumeBackendAPIException(message=_(msg)) if not host_ips: msg = 'Connector (%s) did not find host IPs, aborting' % ( connector) raise exception.VolumeBackendAPIException(message=_(msg)) lightos_volname = self._lightos_volname(volume) project_name = self._get_lightos_project_name(volume) success = self.add_volume_acl(project_name, volume, hostnqn, host_ips) if not success or not self._wait_for_volume_acl( project_name, lightos_volname, hostnqn, True): msg = ('Could not add ACL for hostnqn %s LightOS volume' ' %s, aborting' % (hostnqn, lightos_volname)) raise exception.VolumeBackendAPIException(message=_(msg)) props = self._get_connection_properties(project_name, volume) return {'driver_volume_type': ('lightos'), 'data': props} def terminate_connection(self, volume, connector, **kwargs): force = 'force' in kwargs hostnqn = connector.get('nqn') if connector else None host_ips = connector.get('host_ips', []) if connector else [] LOG.debug( 'terminate_connection: force %s kwargs %s hostnqn %s', force, kwargs, hostnqn) project_name = self._get_lightos_project_name(volume) if not hostnqn: if force: LOG.debug( 'Terminating connection with extreme prejudice for \ volume %s', volume) self.remove_all_volume_acls(project_name, volume) return msg = 'Connector (%s) did not return a hostnqn, aborting' % ( connector) raise exception.VolumeBackendAPIException(message=_(msg)) lightos_volname = self._lightos_volname(volume) project_name = self._get_lightos_project_name(volume) success = self.remove_volume_acl(project_name, volume, hostnqn, host_ips) if not success or not self._wait_for_volume_acl( project_name, lightos_volname, hostnqn, False): LOG.warning( 'Could not remove ACL for hostnqn %s LightOS \ volume %s, limping along', hostnqn, lightos_volname) def _init_vendor_properties(self): # compression is one of the standard properties, # no need to add it here # see the definition of this function in cinder/volume/driver.py properties = {} self._set_property( properties, "lightos:num_replicas", "Number of replicas for LightOS volume", _( "Specifies the number of replicas to create for the \ LightOS volume."), "integer", minimum=1, maximun=3, default=3) self._set_property( properties, "lightos:qos_policy", "Lightbits volume QoS policy UUID", _( "Specifies the Lightbits volume QoS policy UUID to use for \ the LightOS volume."), "string", default=None) return properties, 'lightos' def backup_use_temp_snapshot(self): return False def snapshot_revert_use_temp_snapshot(self): """Disable the use of a temporary snapshot on revert.""" return False def snapshot_remote_attachable(self): """LightOS does not support 'mount a snapshot'""" return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/linstordrv.py0000664000175000017500000012175500000000000022113 0ustar00zuulzuul00000000000000# Copyright (c) 2014-2019 LINBIT HA Solutions GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This driver connects Cinder to an installed LINSTOR instance. See https://docs.linbit.com/docs/users-guide-9.0/#ch-openstack-linstor for more details. """ import socket import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import units from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.volume import configuration from cinder.volume import driver from cinder.volume import volume_utils try: import linstor lin_drv = linstor.Linstor except ImportError: linstor = None lin_drv = None # To override these values, update cinder.conf in /etc/cinder/ linstor_opts = [ cfg.StrOpt('linstor_default_volume_group_name', default='drbd-vg', help='Default Volume Group name for LINSTOR. ' 'Not Cinder Volume.'), cfg.StrOpt('linstor_default_uri', default='linstor://localhost', help='Default storage URI for LINSTOR.'), cfg.StrOpt('linstor_default_storage_pool_name', default='DfltStorPool', help='Default Storage Pool name for LINSTOR.'), cfg.FloatOpt('linstor_volume_downsize_factor', default=4096, help='Default volume downscale size in KiB = 4 MiB.'), cfg.IntOpt('linstor_default_blocksize', default=4096, help='Default Block size for Image restoration. ' 'When using iSCSI transport, this option ' 'specifies the block size.'), cfg.IntOpt('linstor_autoplace_count', default=0, help='Autoplace replication count on volume deployment. ' '0 = Full cluster replication without autoplace, ' '1 = Single node deployment without replication, ' '2 or greater = Replicated deployment with autoplace.'), cfg.BoolOpt('linstor_controller_diskless', default=True, help='True means Cinder node is a diskless LINSTOR node.') ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(linstor_opts, group=configuration.SHARED_CONF_GROUP) CINDER_UNKNOWN = 'unknown' DM_VN_PREFIX = 'CV_' DM_SN_PREFIX = 'SN_' DISKLESS = 'DISKLESS' LVM = 'LVM' LVM_THIN = 'LVM_THIN' ZFS = 'ZFS' ZFS_THIN = 'ZFS_THIN' class LinstorBaseDriver(driver.VolumeDriver): """Cinder driver that uses LINSTOR for storage. Version history: .. code-block:: none 1.0.0 - Initial driver 1.0.1 - Added support for LINSTOR 0.9.12 """ VERSION = '1.0.1' # ThirdPartySystems wiki page CI_WIKI_NAME = 'LINBIT_LINSTOR_CI' def __init__(self, *args, **kwargs): super(LinstorBaseDriver, self).__init__(*args, **kwargs) LOG.debug('START: Base Init Linstor') self.configuration.append_config_values(linstor_opts) self.default_pool = self.configuration.safe_get( 'linstor_default_storage_pool_name') self.default_uri = self.configuration.safe_get( 'linstor_default_uri') self.default_downsize_factor = self.configuration.safe_get( 'linstor_volume_downsize_factor') self.default_vg_name = self.configuration.safe_get( 'linstor_default_volume_group_name') self.default_blocksize = self.configuration.safe_get( 'linstor_default_blocksize') self.diskless = self.configuration.safe_get( 'linstor_controller_diskless') self.ap_count = self.configuration.safe_get( 'linstor_autoplace_count') self.default_backend_name = self.configuration.safe_get( 'volume_backend_name') self.host_name = socket.gethostname() @staticmethod def get_driver_options(): return linstor_opts def _ping(self): with lin_drv(self.default_uri) as lin: return lin.ping() def _clean_uuid(self): """Returns a UUID string, WITHOUT braces.""" # Some uuid library versions put braces around the result. # We don't want them, just a plain [0-9a-f-]+ string. uuid_str = str(uuid.uuid4()) uuid_str = uuid_str.replace("{", "") uuid_str = uuid_str.replace("}", "") return uuid_str # LINSTOR works in kiB units; Cinder uses GiB. def _vol_size_to_linstor(self, size): return int(size * units.Mi - self.default_downsize_factor) def _vol_size_to_cinder(self, size): return int(size / units.Mi) def _is_clean_volume_name(self, name, prefix): try: if (name.startswith(CONF.volume_name_template % "") and uuid.UUID(name[7:]) is not None): return prefix + name[7:] except ValueError: return None try: if uuid.UUID(name) is not None: return prefix + name except ValueError: return None def _snapshot_name_from_cinder_snapshot(self, snapshot): sn_name = self._is_clean_volume_name(snapshot['id'], DM_SN_PREFIX) return sn_name def _cinder_volume_name_from_drbd_resource(self, rsc_name): cinder_volume_name = rsc_name.split(DM_VN_PREFIX)[1] return cinder_volume_name def _drbd_resource_name_from_cinder_snapshot(self, snapshot): drbd_resource_name = '{}{}'.format(DM_VN_PREFIX, snapshot['volume_id']) return drbd_resource_name def _drbd_resource_name_from_cinder_volume(self, volume): drbd_resource_name = '{}{}'.format(DM_VN_PREFIX, volume['id']) return drbd_resource_name def _get_api_resource_list(self): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() api_reply = lin.resource_list()[0].__dict__['_rest_data'] return api_reply def _get_api_resource_dfn_list(self): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() api_reply = lin.resource_dfn_list()[0].__dict__['_rest_data'] return api_reply def _get_api_node_list(self): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() api_reply = lin.node_list()[0].__dict__['_rest_data'] return api_reply def _get_api_storage_pool_dfn_list(self): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() api_reply = lin.storage_pool_dfn_list()[0].__dict__['_rest_data'] return api_reply def _get_api_storage_pool_list(self): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() api_reply = lin.storage_pool_list()[0].__dict__['_rest_data'] return api_reply def _get_api_volume_extend(self, rsc_target_name, new_size): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() vol_reply = lin.volume_dfn_modify( rsc_name=rsc_target_name, volume_nr=0, size=self._vol_size_to_linstor(new_size)) return vol_reply def _api_snapshot_create(self, drbd_rsc_name, snapshot_name): lin = linstor.Resource(drbd_rsc_name, uri=self.default_uri) snap_reply = lin.snapshot_create(snapshot_name) return snap_reply def _api_snapshot_delete(self, drbd_rsc_name, snapshot_name): lin = linstor.Resource(drbd_rsc_name, uri=self.default_uri) snap_reply = lin.snapshot_delete(snapshot_name) return snap_reply def _api_rsc_dfn_delete(self, drbd_rsc_name): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() snap_reply = lin.resource_dfn_delete(drbd_rsc_name) return snap_reply def _api_storage_pool_create(self, node_name, storage_pool_name, storage_driver, driver_pool_name): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() sp_reply = lin.storage_pool_create( node_name=node_name, storage_pool_name=storage_pool_name, storage_driver=storage_driver, driver_pool_name=driver_pool_name) return sp_reply def _api_rsc_dfn_create(self, rsc_name): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() rsc_dfn_reply = lin.resource_dfn_create(rsc_name) return rsc_dfn_reply def _api_volume_dfn_create(self, rsc_name, size): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() vol_dfn_reply = lin.volume_dfn_create( rsc_name=rsc_name, storage_pool=self.default_pool, size=size) return vol_dfn_reply def _api_rsc_create(self, rsc_name, node_name, diskless=False): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() if diskless: storage_pool = None else: storage_pool = self.default_pool new_rsc = linstor.ResourceData(rsc_name=rsc_name, node_name=node_name, storage_pool=storage_pool, diskless=diskless) rsc_reply = lin.resource_create([new_rsc], async_msg=False) return rsc_reply def _api_rsc_autoplace(self, rsc_name): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() new_rsc = linstor.Resource(name=rsc_name, uri=self.default_uri) new_rsc.placement.redundancy = self.ap_count new_rsc.placement.storage_pool = self.default_pool rsc_reply = new_rsc.autoplace() return rsc_reply def _api_rsc_delete(self, rsc_name, node_name): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() rsc_reply = lin.resource_delete(node_name=node_name, rsc_name=rsc_name) return rsc_reply def _api_rsc_auto_delete(self, rsc_name): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() rsc = linstor.Resource(str(rsc_name), self.default_uri) return rsc.delete() def _api_rsc_is_diskless(self, rsc_name): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() rsc = linstor.Resource(str(rsc_name)) return rsc.is_diskless(self.host_name) def _api_rsc_size(self, rsc_name): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() rsc = linstor.Resource(str(rsc_name)) if len(rsc.volumes): if "size" in rsc.volumes: return rsc.volumes[0].size else: return 0 else: return 0 def _api_volume_dfn_delete(self, rsc_name, volume_nr): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() rsc_reply = lin.volume_dfn_delete(rsc_name=rsc_name, volume_nr=volume_nr) return rsc_reply def _api_snapshot_volume_dfn_restore(self, src_rsc_name, src_snap_name, new_vol_name): with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() vol_reply = lin.snapshot_volume_definition_restore( from_resource=src_rsc_name, from_snapshot=src_snap_name, to_resource=new_vol_name) return vol_reply def _api_snapshot_resource_restore(self, src_rsc_name, src_snap_name, new_vol_name): lin = linstor.Resource(src_rsc_name, uri=self.default_uri) new_rsc = lin.restore_from_snapshot(src_snap_name, new_vol_name) # Adds an aux/property KV for synchronous return from snapshot restore with lin_drv(self.default_uri) as lin: if not lin.connected: lin.connect() aux_prop = {} aux_prop["Aux/restore"] = "done" lin.volume_dfn_modify( rsc_name=new_vol_name, volume_nr=0, set_properties=aux_prop) if new_rsc.name == new_vol_name: return True return False def _get_rsc_path(self, rsc_name): rsc_list_reply = self._get_api_resource_list() if rsc_list_reply: for rsc in rsc_list_reply: if (rsc["name"] == rsc_name and rsc["node_name"] == self.host_name): for volume in rsc["volumes"]: if volume["volume_number"] == 0: return volume["device_path"] def _get_local_path(self, volume): try: full_rsc_name = ( self._drbd_resource_name_from_cinder_volume(volume)) return self._get_rsc_path(full_rsc_name) except Exception: message = _('Local Volume not found.') raise exception.VolumeBackendAPIException(data=message) def _get_spd(self): # Storage Pool Definition List spd_list_reply = self._get_api_storage_pool_dfn_list() spd_list = [] if spd_list_reply: for spd in spd_list_reply: spd_list.append(spd["storage_pool_name"]) return spd_list def _get_storage_pool(self): # Fetch Storage Pool List sp_list_reply = self._get_api_storage_pool_list() # Separate the diskless nodes sp_diskless_list = [] sp_list = [] if sp_list_reply: for node in sp_list_reply: if node["storage_pool_name"] == self.default_pool: sp_node = {} sp_node["node_name"] = node["node_name"] sp_node["sp_uuid"] = node["uuid"] sp_node["sp_name"] = node["storage_pool_name"] if node["provider_kind"] == DISKLESS: diskless = True sp_node["sp_free"] = -1.0 sp_node["sp_cap"] = -1.0 sp_node["sp_allocated"] = 0.0 else: diskless = False if "free_capacity" in node: temp = float(node["free_capacity"]) / units.Mi sp_node["sp_free"] = round(temp) temp = float(node["total_capacity"]) / units.Mi sp_node["sp_cap"] = round(temp) drivers = [LVM, LVM_THIN, ZFS, ZFS_THIN, DISKLESS] # Driver selection if node["provider_kind"] in drivers: sp_node['driver_name'] = node["provider_kind"] else: sp_node['driver_name'] = str(node["provider_kind"]) if diskless: sp_diskless_list.append(sp_node) else: sp_list.append(sp_node) # Add the diskless nodes to the end of the list if sp_diskless_list: sp_list.extend(sp_diskless_list) return sp_list def _get_volume_stats(self): data = {} data["volume_backend_name"] = self.default_backend_name data["vendor_name"] = "LINBIT" data["driver_version"] = self.VERSION data["pools"] = [] sp_data = self._get_storage_pool() rd_list = self._get_resource_definitions() # Total volumes and capacity num_vols = 0 for rd in rd_list: num_vols += 1 # allocated_sizes_gb = [] free_gb = [] total_gb = [] thin_enabled = False # Total & Free capacity for Local Node single_pool = {} for sp in sp_data: if "Diskless" not in sp["driver_name"]: thin_backends = [LVM_THIN, ZFS_THIN] if sp["driver_name"] in thin_backends: thin_enabled = True if "sp_cap" in sp: if sp["sp_cap"] >= 0.0: total_gb.append(sp["sp_cap"]) if "sp_free" in sp: if sp["sp_free"] >= 0.0: free_gb.append(sp["sp_free"]) # Allocated capacity sp_allocated_size_gb = 0.0 local_resources = [] reply = self._get_api_resource_list() if reply: for rsc in reply: if rsc["node_name"] == self.host_name: local_resources.append(rsc["name"]) for rsc_name in local_resources: if not self._api_rsc_is_diskless(rsc_name): rsc_size = self._api_rsc_size(rsc_name) sp_allocated_size_gb += round( int(rsc_size) / units.Gi, 2) single_pool["pool_name"] = data["volume_backend_name"] single_pool["free_capacity_gb"] = min(free_gb) if free_gb else 0 single_pool["total_capacity_gb"] = min(total_gb) if total_gb else 0 single_pool["provisioned_capacity_gb"] = sp_allocated_size_gb single_pool["reserved_percentage"] = ( self.configuration.reserved_percentage) single_pool["thin_provisioning_support"] = thin_enabled single_pool["thick_provisioning_support"] = not thin_enabled single_pool["max_over_subscription_ratio"] = ( self.configuration.max_over_subscription_ratio) single_pool["location_info"] = self.default_uri single_pool["total_volumes"] = num_vols single_pool["filter_function"] = self.get_filter_function() single_pool["goodness_function"] = self.get_goodness_function() single_pool["QoS_support"] = False single_pool["multiattach"] = False single_pool["backend_state"] = "up" data["pools"].append(single_pool) return data def _get_resource_definitions(self): rd_list_reply = self._get_api_resource_dfn_list() rd_list = [] if rd_list_reply: for node in rd_list_reply: # Count only Cinder volumes if DM_VN_PREFIX in node['name']: rd_node = {} rd_node["rd_uuid"] = node['uuid'] rd_node["rd_name"] = node['name'] rd_list.append(rd_node) return rd_list def _get_snapshot_nodes(self, resource): """Returns all available resource nodes for snapshot. However, it excludes diskless nodes. """ rsc_list_reply = self._get_api_resource_list() snap_list = [] if rsc_list_reply: for rsc in rsc_list_reply: if rsc["name"] != resource: continue # Diskless nodes are not available for snapshots diskless = False if "flags" in rsc: if 'DISKLESS' in rsc["flags"]: diskless = True if not diskless: snap_list.append(rsc["node_name"]) return snap_list def _get_diskless_nodes(self, resource): # Returns diskless nodes given a resource rsc_list_reply = self._get_api_resource_list() diskless_list = [] if rsc_list_reply: for rsc in rsc_list_reply: if rsc["name"] != resource: continue if "flags" in rsc: if DISKLESS in rsc["flags"]: diskless_list.append(rsc["node_name"]) return diskless_list def _get_linstor_nodes(self): # Returns all available LINSTOR nodes node_list_reply = self._get_api_node_list() node_list = [] if node_list_reply: for node in node_list_reply: node_list.append(node["name"]) return node_list def _get_nodes(self): # Returns all LINSTOR nodes in a dict list node_list_reply = self._get_api_node_list() node_list = [] if node_list_reply: for node in node_list_reply: node_item = {} node_item["node_name"] = node["name"] node_item["node_address"] = ( node["net_interfaces"][0]["address"]) node_list.append(node_item) return node_list def _check_api_reply(self, api_response, noerror_only=False): if noerror_only: # Checks if none of the replies has an error return lin_drv.all_api_responses_no_error(api_response) else: # Check if all replies are success return lin_drv.all_api_responses_success(api_response) def _copy_vol_to_image(self, context, image_service, image_meta, rsc_path, volume): return volume_utils.upload_volume(context, image_service, image_meta, rsc_path, volume) # # Snapshot # def create_snapshot(self, snapshot): snap_name = self._snapshot_name_from_cinder_snapshot(snapshot) rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) snap_reply = self._api_snapshot_create(drbd_rsc_name=rsc_name, snapshot_name=snap_name) if not snap_reply: msg = 'ERROR creating a LINSTOR snapshot {}'.format(snap_name) LOG.error(msg) raise exception.VolumeBackendAPIException(msg) def delete_snapshot(self, snapshot): snapshot_name = self._snapshot_name_from_cinder_snapshot(snapshot) rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) snap_reply = self._api_snapshot_delete(rsc_name, snapshot_name) if not snap_reply: msg = 'ERROR deleting a LINSTOR snapshot {}'.format(snapshot_name) LOG.error(msg) raise exception.VolumeBackendAPIException(msg) # Delete RD if no other RSC are found if not self._get_snapshot_nodes(rsc_name): self._api_rsc_dfn_delete(rsc_name) def create_volume_from_snapshot(self, volume, snapshot): src_rsc_name = self._drbd_resource_name_from_cinder_snapshot(snapshot) src_snap_name = self._snapshot_name_from_cinder_snapshot(snapshot) new_vol_name = self._drbd_resource_name_from_cinder_volume(volume) # If no autoplace, manually build a cluster list if self.ap_count == 0: diskless_nodes = [] nodes = [] for node in self._get_storage_pool(): if DISKLESS in node['driver_name']: diskless_nodes.append(node['node_name']) continue # Filter out controller node if it is diskless if self.diskless and node['node_name'] == self.host_name: continue else: nodes.append(node['node_name']) reply = self._api_snapshot_resource_restore(src_rsc_name, src_snap_name, new_vol_name) if not reply: msg = _('Error on restoring a LINSTOR volume') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Manually add the controller node as a resource if diskless if self.diskless: reply = self._api_rsc_create(rsc_name=new_vol_name, node_name=self.host_name, diskless=self.diskless) # Add any other diskless nodes only if not autoplaced if self.ap_count == 0 and diskless_nodes: for node in diskless_nodes: self._api_rsc_create(rsc_name=new_vol_name, node_name=node, diskless=True) # Upsize if larger volume than original snapshot src_rsc_size = int(snapshot['volume_size']) new_vol_size = int(volume['size']) if new_vol_size > src_rsc_size: upsize_target_name = self._is_clean_volume_name(volume['id'], DM_VN_PREFIX) reply = self._get_api_volume_extend( rsc_target_name=upsize_target_name, new_size=new_vol_size) if not self._check_api_reply(reply, noerror_only=True): # Delete failed volume failed_volume = {} failed_volume['id'] = volume['id'] self.delete_volume(failed_volume) msg = _('Error on extending LINSTOR resource size') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_volume(self, volume): # Check for Storage Pool List sp_data = self._get_storage_pool() rsc_size = volume['size'] # No existing Storage Pools found if not sp_data: # Check for Nodes node_list = self._get_nodes() if not node_list: msg = _('No LINSTOR resource nodes available / configured') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Create Storage Pool spd_list = self._get_spd() if spd_list: spd_name = spd_list[0] for node in node_list: node_driver = None for sp in sp_data: if sp['node_name'] == node['node_name']: node_driver = sp['driver_name'] sp_reply = self._api_storage_pool_create( node_name=node['node_name'], storage_pool_name=spd_name, storage_driver=node_driver, driver_pool_name=self.default_vg_name) if not self._check_api_reply(sp_reply, noerror_only=True): msg = _('Could not create a LINSTOR storage pool') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Check for RD # If Retyping from another volume, use parent/origin uuid # as a name source if (volume['migration_status'] is not None and str(volume['migration_status']).find('success') == -1): src_name = str(volume['migration_status']).split(':')[1] rsc_name = self._is_clean_volume_name(str(src_name), DM_VN_PREFIX) else: rsc_name = self._is_clean_volume_name(volume['id'], DM_VN_PREFIX) # Create a New RD rsc_dfn_reply = self._api_rsc_dfn_create(rsc_name) if not self._check_api_reply(rsc_dfn_reply, noerror_only=True): msg = _("Error creating a LINSTOR resource definition") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Create a New VD vd_size = self._vol_size_to_linstor(rsc_size) vd_reply = self._api_volume_dfn_create(rsc_name=rsc_name, size=int(vd_size)) if not self._check_api_reply(vd_reply, noerror_only=True): msg = _("Error creating a LINSTOR volume definition") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Create LINSTOR Resources ctrl_in_sp = False for node in sp_data: # Check if controller is in the pool if node['node_name'] == self.host_name: ctrl_in_sp = True # Use autoplace to deploy if set if self.ap_count: try: self._api_rsc_autoplace(rsc_name=rsc_name) except Exception: msg = _("Error creating autoplaces LINSTOR resource(s)") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Otherwise deploy across the entire cluster else: for node in sp_data: # Deploy resource on each node if DISKLESS in node['driver_name']: diskless = True else: diskless = False rsc_reply = self._api_rsc_create(rsc_name=rsc_name, node_name=node['node_name'], diskless=diskless) if not self._check_api_reply(rsc_reply, noerror_only=True): msg = _("Error creating a LINSTOR resource") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # If the controller is diskless and not in the pool, create a diskless # resource on it if not ctrl_in_sp and self.diskless: rsc_reply = self._api_rsc_create(rsc_name=rsc_name, node_name=self.host_name, diskless=True) if not self._check_api_reply(rsc_reply, noerror_only=True): msg = _("Error creating a LINSTOR controller resource") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return {} def delete_volume(self, volume): drbd_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) rsc_list_reply = self._get_api_resource_list() diskful_nodes = self._get_snapshot_nodes(drbd_rsc_name) diskless_nodes = self._get_diskless_nodes(drbd_rsc_name) # If autoplace was used, use Resource class if self.ap_count: rsc_reply = self._api_rsc_auto_delete(drbd_rsc_name) if not rsc_reply: msg = _("Error deleting an autoplaced LINSTOR resource") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Delete all resources in a cluster manually if not autoplaced else: if rsc_list_reply: # Remove diskless nodes first if diskless_nodes: for node in diskless_nodes: rsc_reply = self._api_rsc_delete( node_name=node, rsc_name=drbd_rsc_name) if not self._check_api_reply(rsc_reply, noerror_only=True): msg = _("Error deleting a diskless LINSTOR rsc") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Remove diskful nodes if diskful_nodes: for node in diskful_nodes: rsc_reply = self._api_rsc_delete( node_name=node, rsc_name=drbd_rsc_name) if not self._check_api_reply(rsc_reply, noerror_only=True): msg = _("Error deleting a LINSTOR resource") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Delete VD vd_reply = self._api_volume_dfn_delete(drbd_rsc_name, 0) if not vd_reply: if not self._check_api_reply(vd_reply): msg = _("Error deleting a LINSTOR volume definition") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Delete RD # Will fail if snapshot exists but expected self._api_rsc_dfn_delete(drbd_rsc_name) return True def extend_volume(self, volume, new_size): rsc_target_name = self._is_clean_volume_name(volume['id'], DM_VN_PREFIX) extend_reply = self._get_api_volume_extend(rsc_target_name, new_size) if not self._check_api_reply(extend_reply, noerror_only=True): msg = _("ERROR extending a LINSTOR volume") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_cloned_volume(self, volume, src_vref): temp_id = self._clean_uuid() snapshot = {} snapshot['id'] = temp_id snapshot['volume_id'] = src_vref['id'] snapshot['volume_size'] = src_vref['size'] self.create_snapshot(snapshot) self.create_volume_from_snapshot(volume, snapshot) self.delete_snapshot(snapshot) def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): # self.create_volume(volume) already called by Cinder, and works full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) # This creates a LINSTOR volume from the source image image_utils.fetch_to_raw(context, image_service, image_id, str(self._get_rsc_path(full_rsc_name)), self.default_blocksize, size=volume['size'], disable_sparse=disable_sparse) return {} def copy_volume_to_image(self, context, volume, image_service, image_meta): full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) rsc_path = str(self._get_rsc_path(full_rsc_name)) self._copy_vol_to_image(context, image_service, image_meta, rsc_path, volume) return {} # Not supported currently def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): return (False, None) def check_for_setup_error(self): msg = None if linstor is None: msg = _('Linstor python package not found') if msg is not None: LOG.error(msg) raise exception.VolumeDriverException(message=msg) def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def initialize_connection(self, volume, connector, **kwargs): pass def remove_export(self, context, volume): pass def terminate_connection(self, volume, connector, **kwargs): pass # Class with iSCSI interface methods @interface.volumedriver class LinstorIscsiDriver(LinstorBaseDriver): """Cinder iSCSI driver that uses LINSTOR for storage.""" def __init__(self, *args, **kwargs): super(LinstorIscsiDriver, self).__init__(*args, **kwargs) # iSCSI target_helper if 'h_name' in kwargs: self.helper_name = kwargs.get('h_name') self.helper_driver = self.helper_name self.target_driver = None else: self.helper_name = self.configuration.safe_get('iscsi_helper') self.helper_driver = self.target_mapping[self.helper_name] self.target_driver = importutils.import_object( self.helper_driver, configuration=self.configuration, executor=self._execute) LOG.info('START: LINSTOR DRBD driver %s', self.helper_name) def get_volume_stats(self, refresh=False): data = self._get_volume_stats() data["storage_protocol"] = constants.ISCSI data["pools"][0]["location_info"] = ( 'LinstorIscsiDriver:' + data["pools"][0]["location_info"]) return data def ensure_export(self, context, volume): volume_path = self._get_local_path(volume) return self.target_driver.ensure_export( context, volume, volume_path) def create_export(self, context, volume, connector): volume_path = self._get_local_path(volume) export_info = self.target_driver.create_export( context, volume, volume_path) return {'provider_location': export_info['location'], 'provider_auth': export_info['auth'], } def remove_export(self, context, volume): return self.target_driver.remove_export(context, volume) def initialize_connection(self, volume, connector, **kwargs): return self.target_driver.initialize_connection(volume, connector) def validate_connector(self, connector): return self.target_driver.validate_connector(connector) def terminate_connection(self, volume, connector, **kwargs): return self.target_driver.terminate_connection(volume, connector, **kwargs) # Class with DRBD transport mode @interface.volumedriver class LinstorDrbdDriver(LinstorBaseDriver): """Cinder DRBD driver that uses LINSTOR for storage.""" def __init__(self, *args, **kwargs): super(LinstorDrbdDriver, self).__init__(*args, **kwargs) def _return_drbd_config(self, volume): full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) rsc_path = self._get_rsc_path(full_rsc_name) return { 'driver_volume_type': 'local', 'data': { "device_path": str(rsc_path) } } def _node_in_sp(self, node_name): for pool in self._get_storage_pool(): if pool['node_name'] == node_name: return True return False def get_volume_stats(self, refresh=False): data = self._get_volume_stats() data["storage_protocol"] = constants.DRBD data["pools"][0]["location_info"] = 'LinstorDrbdDriver:{}'.format( data["pools"][0]["location_info"]) return data def initialize_connection(self, volume, connector, **kwargs): node_name = connector['host'] if not self._node_in_sp(connector['host']): full_rsc_name = self._drbd_resource_name_from_cinder_volume(volume) rsc_reply = self._api_rsc_create(rsc_name=full_rsc_name, node_name=node_name, diskless=True) if not self._check_api_reply(rsc_reply, noerror_only=True): msg = _('Error on creating LINSTOR Resource') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return self._return_drbd_config(volume) def terminate_connection(self, volume, connector, **kwargs): if connector: node_name = connector['host'] if not self._node_in_sp(connector['host']): rsc_name = self._drbd_resource_name_from_cinder_volume(volume) rsc_reply = self._api_rsc_delete(rsc_name=rsc_name, node_name=node_name) if not self._check_api_reply(rsc_reply, noerror_only=True): msg = _('Error on deleting LINSTOR Resource') LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_export(self, context, volume, connector): return self._return_drbd_config(volume) def ensure_export(self, context, volume): return self._return_drbd_config(volume) def remove_export(self, context, volume): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/lvm.py0000664000175000017500000011361500000000000020477 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver for Linux servers running LVM. """ import math import os import socket from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import units from cinder.brick.local_dev import lvm from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume import volume_utils LOG = logging.getLogger(__name__) # FIXME(jdg): We'll put the lvm_ prefix back on these when we # move over to using this as the real LVM driver, for now we'll # rename them so that the config generation utility doesn't barf # on duplicate entries. volume_opts = [ cfg.StrOpt('volume_group', default='cinder-volumes', help='Name for the VG that will contain exported volumes'), cfg.IntOpt('lvm_mirrors', default=0, help='If >0, create LVs with multiple mirrors. Note that ' 'this requires lvm_mirrors + 2 PVs with available space'), cfg.StrOpt('lvm_type', default='auto', choices=[('default', 'Thick-provisioned LVM.'), ('thin', 'Thin-provisioned LVM.'), ('auto', 'Defaults to thin when supported.')], help='Type of LVM volumes to deploy; (default, thin, or auto). ' 'Auto defaults to thin if thin is supported.'), cfg.StrOpt('lvm_conf_file', default='/etc/cinder/lvm.conf', help='LVM conf file to use for the LVM driver in Cinder; ' 'this setting is ignored if the specified file does ' 'not exist (You can also specify \'None\' to not use ' 'a conf file even if one exists).'), cfg.BoolOpt('lvm_suppress_fd_warnings', default=False, help='Suppress leaked file descriptor warnings in LVM ' 'commands.'), cfg.BoolOpt('lvm_share_target', default=False, help='Whether to share the same target for all LUNs or not ' '(currently only supported by nvmet.'), ] CONF = cfg.CONF CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) @interface.volumedriver class LVMVolumeDriver(driver.VolumeDriver): """Executes commands relating to Volumes.""" VERSION = '3.0.0' # ThirdPartySystems wiki page CI_WIKI_NAME = "Cinder_Jenkins" def __init__(self, vg_obj=None, *args, **kwargs): # Parent sets db, host, _execute and base config super(LVMVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) self.hostname = socket.gethostname() self.vg = vg_obj self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'LVM' # Target Driver is what handles data-transport # Transport specific code should NOT be in # the driver (control path), this way # different target drivers can be added (iscsi, FC etc) target_driver = \ self.target_mapping[self.configuration.safe_get('target_helper')] LOG.debug('Attempting to initialize LVM driver with the ' 'following target_driver: %s', target_driver) self.target_driver = importutils.import_object( target_driver, configuration=self.configuration, executor=self._execute) self.protocol = (self.target_driver.storage_protocol or self.target_driver.protocol) if (self.configuration.lvm_share_target and not self.target_driver.SHARED_TARGET_SUPPORT): raise exception.InvalidConfigurationValue( f"{target_driver} doesn't support shared targets") if (self.configuration.target_secondary_ip_addresses and not self.target_driver.SECONDARY_IP_SUPPORT): raise exception.InvalidConfigurationValue( f"{target_driver} doesn't support secondary addresses") self._sparse_copy_volume = False @classmethod def get_driver_options(cls): # Imports required to have config options from cinder.volume.targets import spdknvmf # noqa additional_opts = cls._get_oslo_driver_opts( 'target_ip_address', 'target_helper', 'target_protocol', 'volume_clear', 'volume_clear_size', 'reserved_percentage', 'max_over_subscription_ratio', 'volume_dd_blocksize', 'target_prefix', 'volumes_dir', 'target_secondary_ip_addresses', 'target_port', 'iscsi_write_cache', 'iscsi_target_flags', # TGT 'iscsi_iotype', # IET 'nvmet_port_id', 'nvmet_ns_id', # NVMET 'scst_target_iqn_name', 'scst_target_driver', # SCST 'spdk_rpc_ip', 'spdk_rpc_port', 'spdk_rpc_username', # SPDKNVMF 'spdk_rpc_password', 'spdk_max_queue_depth', # SPDKNVMF ) return volume_opts + additional_opts def _sizestr(self, size_in_g): return '%sg' % size_in_g def _volume_not_present(self, volume_name): return self.vg.get_volume(volume_name) is None def _delete_volume(self, volume, is_snapshot=False): """Deletes a logical volume.""" if self.configuration.volume_clear != 'none' and \ self.configuration.lvm_type != 'thin': self._clear_volume(volume, is_snapshot) name = volume['name'] if is_snapshot: name = self._escape_snapshot(volume['name']) self.vg.delete(name) def _clear_volume(self, volume, is_snapshot=False): # zero out old volumes to prevent data leaking between users # TODO(ja): reclaiming space should be done lazy and low priority if is_snapshot: # if the volume to be cleared is a snapshot of another volume # we need to clear out the volume using the -cow instead of the # directly volume path. We need to skip this if we are using # thin provisioned LVs. # bug# lp1191812 dev_path = self.local_path(volume) + "-cow" else: dev_path = self.local_path(volume) # TODO(jdg): Maybe we could optimize this for snaps by looking at # the cow table and only overwriting what's necessary? # for now we're still skipping on snaps due to hang issue if not os.path.exists(dev_path): msg = (_('Volume device file path %s does not exist.') % dev_path) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) size_in_g = (volume.get('volume_size') if is_snapshot else volume.get('size')) if size_in_g is None: msg = (_("Size for volume: %s not found, cannot secure delete.") % volume['id']) LOG.error(msg) raise exception.InvalidParameterValue(msg) # clear_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in vol_sz_in_meg = size_in_g * units.Ki volume_utils.clear_volume( vol_sz_in_meg, dev_path, volume_clear=self.configuration.volume_clear, volume_clear_size=self.configuration.volume_clear_size) def _escape_snapshot(self, snapshot_name): # Linux LVM reserves name that starts with snapshot, so that # such volume name can't be created. Mangle it. if not snapshot_name.startswith('snapshot'): return snapshot_name return '_' + snapshot_name def _unescape_snapshot(self, snapshot_name): # Undo snapshot name change done by _escape_snapshot() if not snapshot_name.startswith('_snapshot'): return snapshot_name return snapshot_name[1:] def _create_volume(self, name, size, lvm_type, mirror_count, vg=None): vg_ref = self.vg if vg is not None: vg_ref = vg vg_ref.create_volume(name, size, lvm_type, mirror_count) def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats") if self.vg is None: LOG.warning('Unable to update stats on non-initialized ' 'Volume Group: %s', self.configuration.volume_group) return self.vg.update_volume_group_info() data = {} # Note(zhiteng): These information are driver/backend specific, # each driver may define these values in its own config options # or fetch from driver specific configuration file. data["volume_backend_name"] = self.backend_name data["vendor_name"] = 'Open Source' data["driver_version"] = self.VERSION data["storage_protocol"] = self.protocol data["pools"] = [] total_capacity = 0 free_capacity = 0 if self.configuration.lvm_mirrors > 0: total_capacity =\ self.vg.vg_mirror_size(self.configuration.lvm_mirrors) free_capacity =\ self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors) provisioned_capacity = round( float(total_capacity) - float(free_capacity), 2) elif self.configuration.lvm_type == 'thin': total_capacity = self.vg.vg_thin_pool_size free_capacity = self.vg.vg_thin_pool_free_space provisioned_capacity = self.vg.vg_provisioned_capacity else: total_capacity = self.vg.vg_size free_capacity = self.vg.vg_free_space provisioned_capacity = round( float(total_capacity) - float(free_capacity), 2) location_info = \ ('LVMVolumeDriver:%(hostname)s:%(vg)s' ':%(lvm_type)s:%(lvm_mirrors)s' % {'hostname': self.hostname, 'vg': self.configuration.volume_group, 'lvm_type': self.configuration.lvm_type, 'lvm_mirrors': self.configuration.lvm_mirrors}) thin_enabled = self.configuration.lvm_type == 'thin' # Calculate the total volumes used by the VG group. # This includes volumes and snapshots. total_volumes = len(self.vg.get_volumes()) # Skip enabled_pools setting, treat the whole backend as one pool # XXX FIXME if multipool support is added to LVM driver. single_pool = {} single_pool.update(dict( pool_name=data["volume_backend_name"], total_capacity_gb=total_capacity, free_capacity_gb=free_capacity, reserved_percentage=self.configuration.reserved_percentage, location_info=location_info, QoS_support=False, provisioned_capacity_gb=provisioned_capacity, max_over_subscription_ratio=( self.configuration.max_over_subscription_ratio), thin_provisioning_support=thin_enabled, thick_provisioning_support=not thin_enabled, total_volumes=total_volumes, filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function(), multiattach=True, backend_state='up' )) data["pools"].append(single_pool) data["shared_targets"] = self.configuration.lvm_share_target # Check availability of sparse volume copy. data['sparse_copy_volume'] = self._sparse_copy_volume self._stats = data def check_for_setup_error(self): """Verify that requirements are in place to use LVM driver.""" if self.vg is None: root_helper = utils.get_root_helper() lvm_conf_file = self.configuration.lvm_conf_file if lvm_conf_file.lower() == 'none': lvm_conf_file = None try: lvm_type = self.configuration.lvm_type if lvm_type == 'auto': if volume_utils.supports_thin_provisioning(): lvm_type = 'thin' else: lvm_type = 'default' self.vg = lvm.LVM( self.configuration.volume_group, root_helper, lvm_type=lvm_type, executor=self._execute, lvm_conf=lvm_conf_file, suppress_fd_warn=( self.configuration.lvm_suppress_fd_warnings)) except exception.VolumeGroupNotFound: message = (_("Volume Group %s does not exist") % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) vg_list = volume_utils.get_all_volume_groups( self.configuration.volume_group) vg_dict = next(vg for vg in vg_list if vg['name'] == self.vg.vg_name) if vg_dict is None: message = (_("Volume Group %s does not exist") % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) pool_name = "%s-pool" % self.configuration.volume_group if self.configuration.lvm_type == 'auto': # Default to thin provisioning if it is supported and # the volume group is empty, or contains a thin pool # for us to use. self.vg.update_volume_group_info() self.configuration.lvm_type = 'default' if volume_utils.supports_thin_provisioning(): if self.vg.get_volume(pool_name) is not None: LOG.info('Enabling LVM thin provisioning by default ' 'because a thin pool exists.') self.configuration.lvm_type = 'thin' elif len(self.vg.get_volumes()) == 0: LOG.info('Enabling LVM thin provisioning by default ' 'because no LVs exist.') self.configuration.lvm_type = 'thin' if self.configuration.lvm_type == 'thin': # Specific checks for using Thin provisioned LV's if not volume_utils.supports_thin_provisioning(): message = _("Thin provisioning not supported " "on this version of LVM.") raise exception.VolumeBackendAPIException(data=message) if self.vg.get_volume(pool_name) is None: try: self.vg.create_thin_pool(pool_name) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to create thin pool, " "error message was: %s") % str(exc.stderr)) raise exception.VolumeBackendAPIException( data=exception_message) # Enable sparse copy since lvm_type is 'thin' self._sparse_copy_volume = True def create_volume(self, volume): """Creates a logical volume.""" mirror_count = 0 if self.configuration.lvm_mirrors: mirror_count = self.configuration.lvm_mirrors self._create_volume(volume['name'], self._sizestr(volume['size']), self.configuration.lvm_type, mirror_count) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return model update from LVM for migrated volume. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ name_id = None provider_location = None if original_volume_status == 'available': current_name = CONF.volume_name_template % new_volume['id'] original_volume_name = CONF.volume_name_template % volume['id'] try: self.vg.rename_volume(current_name, original_volume_name) except processutils.ProcessExecutionError: LOG.error('Unable to rename the logical volume ' 'for volume: %s', volume['id']) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] else: # The back-end will not be renamed. name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] return {'_name_id': name_id, 'provider_location': provider_location} def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" if self.configuration.lvm_type == 'thin': self.vg.create_lv_snapshot(volume['name'], self._escape_snapshot(snapshot['name']), self.configuration.lvm_type) if volume['size'] > snapshot['volume_size']: LOG.debug("Resize the new volume to %s.", volume['size']) self.extend_volume(volume, volume['size']) # Some configurations of LVM do not automatically activate # ThinLVM snapshot LVs. self.vg.activate_lv(snapshot['name'], is_snapshot=True) self.vg.activate_lv(volume['name'], is_snapshot=True, permanent=True) return self._create_volume(volume['name'], self._sizestr(volume['size']), self.configuration.lvm_type, self.configuration.lvm_mirrors) # Some configurations of LVM do not automatically activate # ThinLVM snapshot LVs. self.vg.activate_lv(snapshot['name'], is_snapshot=True) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in volume_utils.copy_volume(self.local_path(snapshot), self.local_path(volume), snapshot['volume_size'] * units.Ki, self.configuration.volume_dd_blocksize, execute=self._execute, sparse=self._sparse_copy_volume) def delete_volume(self, volume): """Deletes a logical volume.""" # NOTE(jdg): We don't need to explicitly call # remove export here because we already did it # in the manager before we got here. if self._volume_not_present(volume['name']): # If the volume isn't present, then don't attempt to delete return True if self.vg.lv_has_snapshot(volume['name']): LOG.error('Unable to delete due to existing snapshot ' 'for volume: %s', volume['name']) raise exception.VolumeIsBusy(volume_name=volume['name']) self._delete_volume(volume) LOG.info('Successfully deleted volume: %s', volume['id']) def create_snapshot(self, snapshot): """Creates a snapshot.""" self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']), snapshot['volume_name'], self.configuration.lvm_type) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" if self._volume_not_present(self._escape_snapshot(snapshot['name'])): # If the snapshot isn't present, then don't attempt to delete LOG.warning("snapshot: %s not found, " "skipping delete operations", snapshot['name']) LOG.info('Successfully deleted snapshot: %s', snapshot['id']) return True # TODO(yamahata): zeroing out the whole snapshot triggers COW. # it's quite slow. self._delete_volume(snapshot, is_snapshot=True) def revert_to_snapshot(self, context, volume, snapshot): """Revert a volume to a snapshot""" # NOTE(tommylikehu): We still can revert the volume because Cinder # will try the alternative approach if 'NotImplementedError' # is raised here. if self.configuration.lvm_type == 'thin': msg = _("Revert volume to snapshot not implemented for thin LVM.") raise NotImplementedError(msg) else: self.vg.revert(self._escape_snapshot(snapshot.name)) self.vg.deactivate_lv(volume.name) self.vg.activate_lv(volume.name) # Recreate the snapshot that was destroyed by the revert self.create_snapshot(snapshot) def local_path(self, volume, vg=None): if vg is None: vg = self.configuration.volume_group # NOTE(vish): stops deprecation warning escaped_group = vg.replace('-', '--') escaped_name = self._escape_snapshot(volume['name']).replace('-', '--') return "/dev/mapper/%s-%s" % (escaped_group, escaped_name) def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume.""" image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume), self.configuration.volume_dd_blocksize, size=volume['size'], disable_sparse=disable_sparse) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" volume_utils.upload_volume(context, image_service, image_meta, self.local_path(volume), volume) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" if self.configuration.lvm_type == 'thin': self.vg.create_lv_snapshot(volume['name'], src_vref['name'], self.configuration.lvm_type) if volume['size'] > src_vref['size']: LOG.debug("Resize the new volume to %s.", volume['size']) self.extend_volume(volume, volume['size']) self.vg.activate_lv(volume['name'], is_snapshot=True, permanent=True) return mirror_count = 0 if self.configuration.lvm_mirrors: mirror_count = self.configuration.lvm_mirrors LOG.info('Creating clone of volume: %s', src_vref['id']) volume_name = src_vref['name'] temp_id = 'tmp-snap-%s' % volume['id'] temp_snapshot = {'volume_name': volume_name, 'size': src_vref['size'], 'volume_size': src_vref['size'], 'name': 'clone-snap-%s' % volume['id'], 'id': temp_id} self.create_snapshot(temp_snapshot) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in try: self._create_volume(volume['name'], self._sizestr(volume['size']), self.configuration.lvm_type, mirror_count) self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True) volume_utils.copy_volume( self.local_path(temp_snapshot), self.local_path(volume), src_vref['size'] * units.Ki, self.configuration.volume_dd_blocksize, execute=self._execute, sparse=self._sparse_copy_volume) finally: self.delete_snapshot(temp_snapshot) def clone_image(self, context, volume, image_location, image_meta, image_service): return None, False def extend_volume(self, volume, new_size): """Extend an existing volume's size.""" self.vg.extend_volume(volume['name'], self._sizestr(new_size)) try: self.target_driver.extend_target(volume) except Exception: LOG.exception('Error extending target after volume resize.') raise exception.TargetUpdateFailed(volume_id=volume.id) def manage_existing(self, volume, existing_ref): """Manages an existing LV. Renames the LV to match the expected name for the volume. Error checking done by manage_existing_get_size is not repeated. """ lv_name = existing_ref['source-name'] self.vg.get_volume(lv_name) vol_id = volume_utils.extract_id_from_volume_name(lv_name) if volume_utils.check_already_managed_volume(vol_id): raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name) # Attempt to rename the LV to match the OpenStack internal name. try: self.vg.rename_volume(lv_name, volume['name']) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to rename logical volume %(name)s, " "error message was: %(err_msg)s") % {'name': lv_name, 'err_msg': exc.stderr}) raise exception.VolumeBackendAPIException( data=exception_message) def manage_existing_object_get_size(self, existing_object, existing_ref, object_type): """Return size of an existing LV for manage existing volume/snapshot. existing_ref is a dictionary of the form: {'source-name': } """ # Check that the reference is valid if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) lv_name = existing_ref['source-name'] lv = self.vg.get_volume(lv_name) # Raise an exception if we didn't find a suitable LV. if not lv: kwargs = {'existing_ref': lv_name, 'reason': 'Specified logical volume does not exist.'} raise exception.ManageExistingInvalidReference(**kwargs) # LV size is returned in gigabytes. Attempt to parse size as a float # and round up to the next integer. try: lv_size = int(math.ceil(float(lv['size']))) except ValueError: exception_message = (_("Failed to manage existing %(type)s " "%(name)s, because reported size %(size)s " "was not a floating-point number.") % {'type': object_type, 'name': lv_name, 'size': lv['size']}) raise exception.VolumeBackendAPIException( data=exception_message) return lv_size def manage_existing_get_size(self, volume, existing_ref): return self.manage_existing_object_get_size(volume, existing_ref, "volume") def manage_existing_snapshot_get_size(self, snapshot, existing_ref): if not isinstance(existing_ref, dict): existing_ref = {"source-name": existing_ref} return self.manage_existing_object_get_size(snapshot, existing_ref, "snapshot") def manage_existing_snapshot(self, snapshot, existing_ref): dest_name = self._escape_snapshot(snapshot['name']) snapshot_temp = {"name": dest_name} if not isinstance(existing_ref, dict): existing_ref = {"source-name": existing_ref} return self.manage_existing(snapshot_temp, existing_ref) def _get_manageable_resource_info(self, cinder_resources, resource_type, marker, limit, offset, sort_keys, sort_dirs): entries = [] lvs = self.vg.get_volumes() cinder_ids = [resource['id'] for resource in cinder_resources] for lv in lvs: is_snap = self.vg.lv_is_snapshot(lv['name']) if ((resource_type == 'volume' and is_snap) or (resource_type == 'snapshot' and not is_snap)): continue if resource_type == 'volume': potential_id = volume_utils.extract_id_from_volume_name( lv['name']) else: unescape = self._unescape_snapshot(lv['name']) potential_id = volume_utils.extract_id_from_snapshot_name( unescape) lv_info = {'reference': {'source-name': lv['name']}, 'size': int(math.ceil(float(lv['size']))), 'cinder_id': None, 'extra_info': None} if potential_id in cinder_ids: lv_info['safe_to_manage'] = False lv_info['reason_not_safe'] = 'already managed' lv_info['cinder_id'] = potential_id elif self.vg.lv_is_open(lv['name']): lv_info['safe_to_manage'] = False lv_info['reason_not_safe'] = '%s in use' % resource_type else: lv_info['safe_to_manage'] = True lv_info['reason_not_safe'] = None if resource_type == 'snapshot': origin = self.vg.lv_get_origin(lv['name']) lv_info['source_reference'] = {'source-name': origin} entries.append(lv_info) return volume_utils.paginate_entries_list(entries, marker, limit, offset, sort_keys, sort_dirs) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): return self._get_manageable_resource_info(cinder_volumes, 'volume', marker, limit, offset, sort_keys, sort_dirs) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): return self._get_manageable_resource_info(cinder_snapshots, 'snapshot', marker, limit, offset, sort_keys, sort_dirs) def retype(self, context, volume, new_type, diff, host): """Retypes a volume, allow QoS and extra_specs change.""" LOG.debug('LVM retype called for volume %s. No action ' 'required for LVM volumes.', volume['id']) return True def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): """Optimize the migration if the destination is on the same server. If the specified host is another back-end on the same server, and the volume is not attached, we can do the migration locally without going through iSCSI. """ false_ret = (False, None) if volume['status'] != 'available': return false_ret if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ info.split(':') lvm_mirrors = int(lvm_mirrors) except ValueError: return false_ret if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): return false_ret if dest_vg == self.vg.vg_name: message = (_("Refusing to migrate volume ID: %(id)s. Please " "check your configuration because source and " "destination are the same Volume Group: %(name)s.") % {'id': volume['id'], 'name': self.vg.vg_name}) LOG.error(message) raise exception.VolumeBackendAPIException(data=message) vg_list = volume_utils.get_all_volume_groups() try: next(vg for vg in vg_list if vg['name'] == dest_vg) except StopIteration: LOG.error("Destination Volume Group %s does not exist", dest_vg) return false_ret helper = utils.get_root_helper() lvm_conf_file = self.configuration.lvm_conf_file if lvm_conf_file.lower() == 'none': lvm_conf_file = None dest_vg_ref = lvm.LVM(dest_vg, helper, lvm_type=lvm_type, executor=self._execute, lvm_conf=lvm_conf_file) self._create_volume(volume['name'], self._sizestr(volume['size']), lvm_type, lvm_mirrors, dest_vg_ref) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in size_in_mb = int(volume['size']) * units.Ki try: volume_utils.copy_volume(self.local_path(volume), self.local_path(volume, vg=dest_vg), size_in_mb, self.configuration.volume_dd_blocksize, execute=self._execute, sparse=self._sparse_copy_volume) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Volume migration failed due to " "exception: %(reason)s.", {'reason': str(e)}, resource=volume) dest_vg_ref.delete(volume) self._delete_volume(volume) return (True, None) def get_pool(self, volume): return self.backend_name # ####### Interface methods for DataPath (Target Driver) ######## def ensure_export(self, context, volume): volume_path = "/dev/%s/%s" % (self.configuration.volume_group, volume['name']) self.vg.activate_lv(volume['name']) model_update = \ self.target_driver.ensure_export(context, volume, volume_path) return model_update def create_export(self, context, volume, connector, vg=None): if vg is None: vg = self.configuration.volume_group volume_path = "/dev/%s/%s" % (vg, volume['name']) self.vg.activate_lv(volume['name']) export_info = self.target_driver.create_export( context, volume, volume_path) return {'provider_location': export_info['location'], 'provider_auth': export_info['auth'], } def remove_export(self, context, volume): self.target_driver.remove_export(context, volume) def initialize_connection(self, volume, connector): return self.target_driver.initialize_connection(volume, connector) def validate_connector(self, connector): return self.target_driver.validate_connector(connector) def terminate_connection(self, volume, connector, **kwargs): # NOTE(jdg): LVM has a single export for each volume, so what # we need to do here is check if there is more than one attachment for # the volume, if there is; let the caller know that they should NOT # remove the export. # NOTE(jdg): For the TGT driver this is a noop, for LIO this removes # the initiator IQN from the targets access list, so we're good # NOTE(lyarwood): Given the above note we should only call # terminate_connection for the target lioadm driver when there is only # one attachment left for the host specified by the connector to # remove, otherwise the ACL will be removed prematurely while other # attachments on the same host are still accessing the volume. def same_connector(attach): return (attach.connector and self.target_driver.are_same_connector(attach.connector, connector)) attachments = volume.volume_attachment if (volume.multiattach and sum(1 for a in filter(same_connector, attachments)) > 1): return True self.target_driver.terminate_connection(volume, connector, **kwargs) return len(attachments) > 1 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.379121 cinder-27.0.0/cinder/volume/drivers/macrosan/0000775000175000017500000000000000000000000021123 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/macrosan/__init__.py0000664000175000017500000000000000000000000023222 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/macrosan/config.py0000664000175000017500000001014100000000000022737 0ustar00zuulzuul00000000000000# Copyright (c) 2019 MacroSAN Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume Drivers Config Registration documents for MacroSAN SAN.""" from oslo_config import cfg macrosan_opts = [ # sdas login_info cfg.ListOpt('macrosan_sdas_ipaddrs', help="MacroSAN sdas devices' ip addresses"), cfg.StrOpt('macrosan_sdas_username', help="MacroSAN sdas devices' username"), cfg.StrOpt('macrosan_sdas_password', secret=True, help="MacroSAN sdas devices' password"), # replication login_info cfg.ListOpt('macrosan_replication_ipaddrs', help="MacroSAN replication devices' ip addresses"), cfg.StrOpt('macrosan_replication_username', help="MacroSAN replication devices' username"), cfg.StrOpt('macrosan_replication_password', secret=True, help="MacroSAN replication devices' password"), cfg.ListOpt('macrosan_replication_destination_ports', sample_default="eth-1:0/eth-1:1, eth-2:0/eth-2:1", help="Slave device"), # device_features cfg.StrOpt('macrosan_pool', quotes=True, help='Pool to use for volume creation'), cfg.IntOpt('macrosan_thin_lun_extent_size', default=8, help="Set the thin lun's extent size"), cfg.IntOpt('macrosan_thin_lun_low_watermark', default=5, help="Set the thin lun's low watermark"), cfg.IntOpt('macrosan_thin_lun_high_watermark', default=20, help="Set the thin lun's high watermark"), cfg.BoolOpt('macrosan_force_unmap_itl', default=True, help="Force disconnect while deleting volume"), cfg.FloatOpt('macrosan_snapshot_resource_ratio', default=1.0, help="Set snapshot's resource ratio"), cfg.BoolOpt('macrosan_log_timing', default=True, help="Whether enable log timing"), # fc connection cfg.IntOpt('macrosan_fc_use_sp_port_nr', default=1, max=4, help="The use_sp_port_nr parameter is the number of " "online FC ports used by the single-ended memory " "when the FC connection is established in the switch " "non-all-pass mode. The maximum is 4"), cfg.BoolOpt('macrosan_fc_keep_mapped_ports', default=True, help="In the case of an FC connection, the configuration " "item associated with the port is maintained."), # iscsi connection cfg.ListOpt('macrosan_client', help="""Macrosan iscsi_clients list. You can configure multiple clients. You can configure it in this format: (host; client_name; sp1_iscsi_port; sp2_iscsi_port), (host; client_name; sp1_iscsi_port; sp2_iscsi_port) Important warning, Client_name has the following requirements: [a-zA-Z0-9.-_:], the maximum number of characters is 31 E.g: (controller1; device1; eth-1:0; eth-2:0), (controller2; device2; eth-1:0/eth-1:1; eth-2:0/eth-2:1), """), cfg.StrOpt('macrosan_client_default', help="This is the default connection ports' name for iscsi. " "This default configuration is used " "when no host related information is obtained." "E.g: eth-1:0/eth-1:1; eth-2:0/eth-2:1") ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/macrosan/devop_client.py0000664000175000017500000005471600000000000024165 0ustar00zuulzuul00000000000000# Copyright (c) 2019 MacroSAN Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base device operation on MacroSAN SAN.""" import logging from random import shuffle import requests from cinder import exception from cinder.i18n import _ LOG = logging.getLogger(__name__) context_request_id = None class Client(object): """Device Client to do operation.""" def __init__(self, sp1_ip, sp2_ip, secret_key): """Initialize the client.""" self.sp1_ip = sp1_ip self.sp2_ip = sp2_ip self.port = 12138 self.choosed_ip = None self.last_request_id = None self.last_ip = None self.timeout = 30 self.SECRET_KEY = secret_key self.url_prefix = '/api/v1' def conn_test(self): iplist = [('sp1', self.sp1_ip), ('sp2', self.sp2_ip)] shuffle(iplist) ha = {} for sp, ip in iplist: try: url = ('http://%s:%s%s/ha_status' % (ip, str(self.port), self.url_prefix)) header = {'Authorization': 'Bearer %s' % self.SECRET_KEY} response = requests.get(url=url, timeout=self.timeout, headers=header) ha = self.response_processing(response) if ha[sp] in ['single', 'double']: LOG.debug('Heart Beating......%(ha)s ', {'ha': ha}) return ip except Exception: pass raise exception.VolumeBackendAPIException( data=_('Connect to MacroSAN IPSAN Error, HA Status:%s') % str(ha)) def send_request(self, method='get', url='/', data=None): header = {'Authorization': 'Bearer %s' % self.SECRET_KEY} try: ip = self.conn_test() url = ('http://%s:%s%s%s' % (ip, str(self.port), self.url_prefix, url)) response = None if method == 'get': response = requests.get(url=url, params=data, timeout=self.timeout, headers=header) elif method == 'post': response = requests.post(url=url, json=data, timeout=self.timeout, headers=header) elif method == 'put': response = requests.put(url=url, json=data, timeout=self.timeout, headers=header) elif method == 'delete': response = requests.delete(url=url, json=data, timeout=self.timeout, headers=header) return self.response_processing(response) except requests.exceptions.ConnectionError: LOG.error('========== Unable to establish connection ' 'with VolumeBackend %(url)s', {'url': url}) def response_processing(self, response): if response.status_code != 200: LOG.error('========== Command %(url)s execution error,' 'response_conde: %(status)s', {'url': response.url, 'status': response.status_code}) raise exception.VolumeBackendAPIException(data=response.json()) LOG.debug('The response is: %(response)s, %(text)s', {'response': response, 'text': response.json()}) return response.json() def get_ha_state(self): """Get HA state.""" return self.send_request(method='get', url='/ha_status') def lun_exists(self, name): """Whether the lun exists.""" data = { 'attr': 'existence', 'name': name } return self.send_request(method='get', url='/lun', data=data) def snapshot_point_exists(self, lun_name, pointid): """Whether the snapshot point exists.""" data = { 'attr': 'existence', 'lun_name': lun_name, 'pointid': pointid } return self.send_request(method='get', url='/snapshot_point', data=data) def it_exists(self, initr_wwn, tgt_port_name): """Whether the it exists.""" data = { 'attr': 'it', 'initr_wwn': initr_wwn, 'tgt_port_name': tgt_port_name } return self.send_request(method='get', url='/itl', data=data) def is_initiator_mapped_to_client(self, initr_wwn, client_name): """Whether initiator is mapped to client.""" data = { 'initr_wwn': initr_wwn, 'client_name': client_name, 'attr': 'list' } return self.send_request(method='get', url='/initiator', data=data) def snapshot_resource_exists(self, lun_name): """Whether the snapshot resource exists.""" data = { 'lun_name': lun_name } return self.send_request(method='get', url='/snapshot_resource', data=data) def initiator_exists(self, initr_wwn): """Whether the initiator exists.""" data = { 'attr': 'existence', 'initr_wwn': initr_wwn, } return self.send_request(method='get', url='/initiator', data=data) def get_client(self, name): """Get client info.""" return self.send_request(method='get', url='/client', data={'name': name}) def delete_lun(self, name): """Delete a lun.""" return self.send_request(method='delete', url='/lun', data={'name': name}) def get_lun_sp(self, name): """Get lun sp.""" data = { 'attr': 'lun_sp', 'name': name } return self.send_request(method='get', url='/lun', data=data) def get_snapshot_resource_name(self, lun_name): """Whether the snapshot resource exists.""" return self.send_request(method='get', url='/snapshot_resource', data={'lun_name': lun_name}) def rename_lun(self, old_name, new_name): """Rename a lun.""" return self.send_request(method='put', url='/lun', data={'attr': 'name', 'old_name': old_name, 'new_name': new_name}) def create_lun(self, name, owner, pool, raids, lun_mode, size, lun_params): """Create a lun.""" data = {'name': name, 'owner': owner, 'pool': pool, 'raids': raids, 'lun_mode': lun_mode, 'size': size, 'lun_params': lun_params} return self.send_request(method='post', url='/lun', data=data) def get_raid_list(self, pool): """Get a raid list.""" return self.send_request(method='get', url='/raid_list', data={'pool': pool}) def get_pool_cap(self, pool): """Get pool capacity.""" return self.send_request(method='get', url='/pool', data={'pool': pool}) def get_lun_base_info(self, name): data = {'attr': 'base_info', 'name': name} return self.send_request(method='get', url='/lun', data=data) def extend_lun(self, name, raids, size): """Extend a lun.""" data = { 'attr': 'capicity', 'name': name, 'raids': raids, 'size': size } return self.send_request(method='put', url='/lun', data=data) def enable_lun_qos(self, name, strategy): """Enable lun qos.""" data = { 'attr': 'qos', 'name': name, 'strategy': strategy } return self.send_request(method='put', url='/lun', data=data) def localclone_completed(self, lun): """Whether localclone lun completed.""" return self.send_request(method='get', url='/local_clone', data={'attr': 'completed', 'lun': lun}) def start_localclone_lun(self, master, slave): """start localclone lun.""" return self.send_request(method='post', url='/local_clone', data={'master': master, 'slave': slave}) def stop_localclone_lun(self, lun): """stop localclone lun.""" return self.send_request(method='delete', url='/local_clone', data={'lun': lun}) def create_snapshot_resource(self, lun_name, raids, size): """Create a snapshot resource.""" data = { 'lun_name': lun_name, 'raids': raids, 'size': size } return self.send_request(method='post', url='/snapshot_resource', data=data) def enable_snapshot_resource_autoexpand(self, lun_name): """Enable snapshot resource autoexpand.""" data = { 'attr': 'autoexpand', 'lun_name': lun_name } return self.send_request(method='put', url='/snapshot_resource', data=data) def enable_snapshot(self, lun_name): """Enable snapshot.""" data = { 'attr': 'enable', 'lun_name': lun_name } return self.send_request(method='put', url='/snapshot', data=data) def snapshot_enabled(self, lun_name): """Weather enable snapshot""" params = { 'attr': 'enable', 'lun_name': lun_name } return self.send_request(method='get', url='/snapshot', data=params) def delete_snapshot_resource(self, lun_name): """Delete a snapshot resource.""" data = {'lun_name': lun_name} return self.send_request(method='delete', url='/snapshot_resource', data=data) def create_snapshot_point(self, lun_name, snapshot_name): """Create a snapshot point.""" data = { 'lun_name': lun_name, 'snapshot_name': snapshot_name } return self.send_request(method='post', url='/snapshot_point', data=data) def get_snapshot_pointid(self, lun_name, snapshot_name): """Get a snapshot pointid.""" params = { 'attr': 'point_id', 'lun_name': lun_name, 'snapshot_name': snapshot_name } return self.send_request(method='get', url='/snapshot_point', data=params) def rename_snapshot_point(self, lun_name, pointid, name): data = { 'attr': 'name', 'lun_name': lun_name, 'pointid': pointid, 'name': name } return self.send_request(method='put', url='/snapshot_point', data=data) def disable_snapshot(self, lun_name): """Disable snapshot.""" data = { 'attr': 'disable', 'lun_name': lun_name } return self.send_request(method='put', url='/snapshot', data=data) def delete_snapshot_point(self, lun_name, pointid): """Delete a snapshot point.""" data = { 'lun_name': lun_name, 'pointid': pointid } return self.send_request(method='delete', url='/snapshot_point', data=data) def get_snapshot_point_num(self, lun_name): """Get snapshot point number.""" data = { 'attr': 'number', 'lun_name': lun_name } return self.send_request(method='get', url='/snapshot_point', data=data) def create_client(self, name): """Create a client.""" return self.send_request(method='post', url='/client', data={'name': name}) def create_target(self, port_name, type='fc'): """Create a target.""" data = { 'port_name': port_name, 'type': type } return self.send_request(method='post', url='/target', data=data) def delete_target(self, tgt_name): """Delete a target.""" return self.send_request(method='delete', url='/target', data={'tgt_name': tgt_name}) def create_initiator(self, initr_wwn, alias, type='fc'): """Create an initiator.""" data = { 'initr_wwn': initr_wwn, 'alias': alias, 'type': type } return self.send_request(method='post', url='/initiator', data=data) def delete_initiator(self, initr_wwn): """Delete an initiator.""" return self.send_request(method='delete', url='/initiator', data={'initr_wwn': initr_wwn}) def map_initiator_to_client(self, initr_wwn, client_name): """Map initiator to client.""" data = { 'attr': 'mapinitiator', 'initr_wwn': initr_wwn, 'client_name': client_name } return self.send_request(method='put', url='/client', data=data) def unmap_initiator_from_client(self, initr_wwn, client_name): """Unmap target from initiator.""" data = { 'attr': 'unmapinitiator', 'initr_wwn': initr_wwn, 'client_name': client_name } return self.send_request(method='put', url='/client', data=data) def map_target_to_initiator(self, tgt_port_name, initr_wwn): """Map target to initiator.""" data = { 'attr': 'maptarget', 'initr_wwn': initr_wwn, 'tgt_port_name': tgt_port_name } return self.send_request(method='post', url='/itl', data=data) def unmap_target_from_initiator(self, tgt_port_name, initr_wwn): """Unmap target from initiator.""" data = { 'attr': 'unmaptarget', 'initr_wwn': initr_wwn, 'tgt_port_name': tgt_port_name } return self.send_request(method='delete', url='/itl', data=data) def map_lun_to_it(self, lun_name, initr_wwn, tgt_port_name, lun_id=-1): """Map lun to it.""" data = { 'attr': 'maplun', 'lun_name': lun_name, 'initr_wwn': initr_wwn, 'tgt_port_name': tgt_port_name, 'lun_id': lun_id } return self.send_request(method='post', url='/itl', data=data) def unmap_lun_to_it(self, lun_name, initr_wwn, tgt_port_name): """Unmap lun to it.""" data = { 'attr': 'unmaplun', 'lun_name': lun_name, 'initr_wwn': initr_wwn, 'tgt_port_name': tgt_port_name, } return self.send_request(method='delete', url='/itl', data=data) def has_initiators_mapped_any_lun(self, initr_wwns, type='fc'): """Whether has initiators mapped any lun.""" data = { 'attr': 'itl', 'initr_wwns': initr_wwns, 'type': type } return self.send_request(method='get', url='/itl', data=data) def create_snapshot_view(self, view_name, lun_name, pointid): """Create a snapshot view.""" data = { 'view_name': view_name, 'lun_name': lun_name, 'pointid': pointid } return self.send_request(method='post', url='/snapshot_view', data=data) def delete_snapshot_view(self, view_name): """Delete a snapshot view.""" return self.send_request(method='delete', url='/snapshot_view', data={'view_name': view_name}) def get_fc_initr_mapped_ports(self, initr_wwns): """Get initiator mapped port.""" data = { 'attr': 'fc_initr_mapped_ports', 'initr_wwns': initr_wwns } return self.send_request(method='get', url='/initiator', data=data) def get_fc_ports(self): """Get FC ports.""" data = { 'attr': 'fc_ports', } return self.send_request(method='get', url='/initiator', data=data) def get_iscsi_ports(self): """Get iSCSI ports.""" data = { 'attr': 'iscsi_ports', } return self.send_request(method='get', url='/initiator', data=data) def get_lun_id(self, initr_wwn, tgt_port_name, lun_name): """Get lun id.""" data = { 'attr': 'lun_id', 'initr_wwn': initr_wwn, 'tgt_port_name': tgt_port_name, 'lun_name': lun_name } return self.send_request(method='get', url='/lun', data=data) def get_lun_uuid(self, lun_name): """Get lun uuid.""" data = { 'attr': 'lun_uuid', 'lun_name': lun_name } return self.send_request(method='get', url='/lun', data=data) def get_lun_name(self, lun_uuid): """Get lun name.""" data = { 'attr': 'lun_name', 'lun_uuid': lun_uuid } return self.send_request(method='get', url='/lun', data=data) def copy_volume_from_view(self, lun_name, view_name): """Copy volume from view.""" data = { 'attr': 'from_view', 'lun_name': lun_name, 'view_name': view_name } return self.send_request(method='post', url='/copy_volume', data=data) def snapshot_copy_task_completed(self, lun_name): data = { 'attr': 'snapshot_copy_task_completed', 'lun_name': lun_name } return self.send_request(method='get', url='/copy_volume', data=data) def get_it_unused_id_list(self, it_type, initr_wwn, tgt_port_name): data = { 'attr': 'it_unused_id_list', 'it_type': it_type, 'initr_wwn': initr_wwn, 'tgt_port_name': tgt_port_name } return self.send_request(method='get', url='/initiator', data=data) def backup_lun_name_to_rename_file(self, cur_name, original_name): """Backup lun name to rename file.""" data = { 'cur_name': cur_name, 'original_name': original_name } return self.send_request(method='post', url='/rename_file', data=data) def get_lun_name_from_rename_file(self, name): """Get lun name from rename file.""" data = {'name': name} return self.send_request(method='get', url='/rename_file', data=data) def create_dalun(self, lun_name): data = {'lun_name': lun_name} return self.send_request(method='post', url='/dalun', data=data) def delete_dalun(self, lun_name): data = {'lun_name': lun_name} return self.send_request(method='delete', url='/dalun', data=data) def dalun_exists(self, lun_name): data = { 'attr': 'existence', 'lun_name': lun_name } return self.send_request(method='get', url='/dalun', data=data) def suspend_dalun(self, lun_name): data = { 'attr': 'suspend', 'lun_name': lun_name } return self.send_request(method='put', url='/dalun', data=data) def resume_dalun(self, lun_name): data = { 'attr': 'resume', 'lun_name': lun_name } return self.send_request(method='put', url='/dalun', data=data) def setup_snapshot_resource(self, volume_name, size, raids): if not self.snapshot_resource_exists(volume_name): self.create_snapshot_resource(volume_name, raids, size) if self.enable_snapshot_resource_autoexpand( volume_name).status_code != 200: LOG.warning('========== Enable snapshot resource auto ' 'expand for volume: %s error', volume_name) def get_raid_list_to_create_lun(self, pool, size): raids = self.get_raid_list(pool) free = sum(raid['free_cap'] for raid in raids) if size > free: raise exception.VolumeBackendAPIException( data=_('Pool has not enough free capacity')) raids = sorted(raids, key=lambda x: x['free_cap'], reverse=True) selected = [] cap = 0 for raid in raids: if raid['free_cap']: cap += raid['free_cap'] selected.append(raid['name']) if cap >= size: break return selected def get_port_ipaddr(self, port): data = { 'attr': 'port_ipaddr', 'port': port, } return self.send_request(method='get', url='/itl', data=data) def enable_replication(self, lun_name, sp1, sp2): data = { 'attr': 'enable', 'lun_name': lun_name, 'sp1': sp1, 'sp2': sp2, } return self.send_request(method='put', url='/replication', data=data) def disable_replication(self, lun_name): data = { 'attr': 'disable', 'lun_name': lun_name, } return self.send_request(method='put', url='/replication', data=data) def replication_enabled(self, lun_name): data = { 'attr': 'enabled', 'lun_name': lun_name } return self.send_request(method='get', url='/replication', data=data) def startscan_replication(self, lun_name): data = { 'attr': 'startscan', 'lun_name': lun_name } return self.send_request(method='put', url='/replication', data=data) def stopscan_replication(self, lun_name): data = { 'attr': 'stopscan', 'lun_name': lun_name } return self.send_request(method='put', url='/replication', data=data) def pausereplicate(self, lun_name): data = { 'attr': 'pause', 'lun_name': lun_name } return self.send_request(method='put', url='/replication', data=data) def get_device_uuid(self): return self.send_request(method='get', url='/device') def get_lun_it(self, name): data = { 'attr': 'getitl', 'name': name } return self.send_request(method='get', url='/itl', data=data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/macrosan/driver.py0000664000175000017500000017010700000000000022776 0ustar00zuulzuul00000000000000# Copyright (c) 2019 MacroSAN Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume Drivers for MacroSAN SAN.""" from contextlib import contextmanager import math import re import socket import time import uuid from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import timeutils from cinder.common import constants from cinder import context from cinder.coordination import synchronized from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.macrosan import config from cinder.volume.drivers.macrosan import devop_client from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils version = '1.0.1' lock_name = 'MacroSAN' LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(config.macrosan_opts, group=configuration.SHARED_CONF_GROUP) @contextmanager def ignored(*exceptions): try: yield except exceptions: pass def record_request_id(fn): def _record_request_id(*vargs, **kv): ctx = context.context.get_current() devop_client.context_request_id = ctx.request_id return fn(*vargs, **kv) return _record_request_id def replication_synced(params): return (params['replication_enabled'] and params['replication_mode'] == 'sync') class MacroSANBaseDriver(driver.VolumeDriver): """Base driver for MacroSAN SAN.""" CI_WIKI_NAME = 'MacroSAN_Volume_CI' def __init__(self, *args, **kwargs): """Initialize the driver.""" super(MacroSANBaseDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(config.macrosan_opts) self.configuration.append_config_values(san.san_opts) self._stats = {} self.use_multipath = True self.owners = ['SP1', 'SP2'] self.owner_idx = 0 self.volume_backend_name = ( self.configuration.safe_get('volume_backend_name') or 'MacroSAN') self.username = self.configuration.san_login self.passwd = self.configuration.san_password self.sp1_ipaddr, self.sp2_ipaddr = ( self.configuration.san_ip.replace(' ', '').split(",")) self.login_info = self.username + self.passwd if self.configuration.macrosan_sdas_ipaddrs: self.sdas_username = self.configuration.macrosan_sdas_username self.sdas_passwd = self.configuration.macrosan_sdas_password self.sdas_sp1_ipaddr, self.sdas_sp2_ipaddr = ( self.configuration.macrosan_sdas_ipaddrs) self.sdas_sp1_ipaddr = ( self.sdas_sp1_ipaddr.replace('/', ',').replace(' ', '')) self.sdas_sp2_ipaddr = ( self.sdas_sp2_ipaddr.replace('/', ',').replace(' ', '')) self.sdas_login_info = self.sdas_username + self.sdas_passwd if self.configuration.macrosan_replication_ipaddrs: self.rep_username = ( self.configuration.macrosan_replication_username) self.rep_passwd = self.configuration.macrosan_replication_password self.rep_sp1_ipaddr, self.rep_sp2_ipaddr = ( self.configuration.macrosan_replication_ipaddrs) self.rep_sp1_ipaddr = ( self.rep_sp1_ipaddr.replace('/', ',').replace(' ', '')) self.rep_sp2_ipaddr = ( self.rep_sp2_ipaddr.replace('/', ',').replace(' ', '')) self.replica_login_info = self.rep_username + self.rep_passwd self.replication_params = { 'destination': {'sp1': self.configuration. macrosan_replication_destination_ports[0], 'sp2': self.configuration. macrosan_replication_destination_ports[1]}} self.client = None self.replica_client = None self.sdas_client = None self.storage_protocol = None self.device_uuid = None self.client_info = dict() self.lun_params = {} self.lun_mode = self.configuration.san_thin_provision if self.lun_mode: self.lun_params = ( {'extent-size': self.configuration.macrosan_thin_lun_extent_size, 'low-watermark': self.configuration.macrosan_thin_lun_low_watermark, 'high-watermark': self.configuration.macrosan_thin_lun_high_watermark}) self.pool = self.configuration.macrosan_pool self.force_unmap_itl_when_deleting = ( self.configuration.macrosan_force_unmap_itl) self.snapshot_resource_ratio = ( self.configuration.macrosan_snapshot_resource_ratio) global timing_on timing_on = self.configuration.macrosan_log_timing self.initialize_iscsi_info() def _size_str_to_int(self, size_in_g): if int(size_in_g) == 0: return 1 return int(size_in_g) def _volume_name(self, volume): try: lun_uuid = re.search(r'macrosan uuid:(.+)', volume['provider_location']).group(1) return self.client.get_lun_name(lun_uuid) except Exception: return volume['id'] def _snapshot_name(self, snapshotid): return snapshotid.replace('-', '')[:31] def initialize_iscsi_info(self): sp1_port, sp2_port = \ self.configuration.macrosan_client_default.split(';') host = socket.gethostname() self.client_info['default'] = {'client_name': host, 'sp1_port': sp1_port.replace(' ', ''), 'sp2_port': sp2_port.replace(' ', '')} client_list = self.configuration.macrosan_client if client_list: for i in client_list: client = i.strip('(').strip(')').split(";") host, client_name, sp1_port, sp2_port = [j.strip() for j in client] self.client_info[host] = ( {'client_name': client_name, 'sp1_port': sp1_port.replace(' ', '').replace('/', ','), 'sp2_port': sp2_port.replace(' ', '').replace('/', ',')}) def _get_client_name(self, host): if host in self.client_info: return self.client_info[host]['client_name'] return self.client_info['default']['client_name'] @utils.synchronized('MacroSAN-Setup', external=True) @record_request_id def do_setup(self, context): """Any initialization the volume driver does while starting.""" LOG.debug('Enter in Macrosan do_setup.') self.client = devop_client.Client(self.sp1_ipaddr, self.sp2_ipaddr, self.login_info) if self.configuration.macrosan_sdas_ipaddrs: self.sdas_client = ( devop_client.Client(self.sdas_sp1_ipaddr, self.sdas_sp2_ipaddr, self.sdas_login_info)) if self.configuration.macrosan_replication_ipaddrs: self.replica_client = ( devop_client.Client(self.rep_sp1_ipaddr, self.rep_sp2_ipaddr, self.replica_login_info)) self.device_uuid = self.client.get_device_uuid() self._do_setup() LOG.debug('MacroSAN Cinder Driver setup complete.') def _do_setup(self): pass def _get_owner(self): owner = self.owners[self.owner_idx % 2] self.owner_idx += 1 return owner def check_for_setup_error(self): """Check any setup error.""" pass def _check_volume_params(self, params): if params['sdas'] and params['replication_enabled']: raise exception.VolumeBackendAPIException( data=_('sdas and replication can not be enabled at same time')) if params['sdas'] and self.sdas_client is None: raise exception.VolumeBackendAPIException( data=_('sdas is not configured, cannot use sdas')) if params['replication_enabled'] and self.replica_client is None: raise exception.VolumeBackendAPIException( data=_('replica is not configured, cannot use replication')) def get_raid_list(self, size): raids = self.client.get_raid_list(self.pool) free = sum(raid['free_cap'] for raid in raids) if size > free: raise exception.VolumeBackendAPIException(_('Pool has not enough' 'free capacity')) raids = sorted(raids, key=lambda x: x['free_cap'], reverse=True) selected = [] cap = 0 for raid in raids: if raid['free_cap']: cap += raid['free_cap'] selected.append(raid['name']) if cap >= size: break return selected def _create_volume(self, name, size, params, owner=None, pool=None): rmt_client = None if params['sdas']: rmt_client = self.sdas_client elif params['replication_enabled']: rmt_client = self.replica_client owner = self._get_owner() if owner is None else owner raids = [] pool = self.pool if pool is None else pool if not params['lun_mode']: raids = self.client.get_raid_list_to_create_lun(pool, size) self.client.create_lun(name, owner, pool, raids, params['lun_mode'], size, self.lun_params) if params['qos-strategy']: try: self.client.enable_lun_qos(name, params['qos-strategy']) except Exception: self.client.delete_lun(name) raise exception.VolumeBackendAPIException( _('Enable lun qos failed.')) if params['sdas'] or params['replication_enabled']: res_size = int(max(int(size) * self.snapshot_resource_ratio, 1)) try: raids = self.client.get_raid_list_to_create_lun(pool, res_size) self.client.setup_snapshot_resource(name, res_size, raids) except Exception: with excutils.save_and_reraise_exception(): self.client.delete_lun(name) try: raids = [] if not params['lun_mode']: raids = rmt_client.get_raid_list_to_create_lun( pool, size) rmt_client.create_lun(name, owner, pool, raids, params['lun_mode'], size, self.lun_params) except Exception: with excutils.save_and_reraise_exception(): self.client.delete_snapshot_resource(name) self.client.delete_lun(name) try: raids = rmt_client.get_raid_list_to_create_lun(pool, res_size) rmt_client.setup_snapshot_resource(name, res_size, raids) except Exception: with ignored(Exception): rmt_client.delete_lun(name) with excutils.save_and_reraise_exception(): self.client.delete_snapshot_resource(name) self.client.delete_lun(name) if params['sdas'] or replication_synced(params): try: self.client.create_dalun(name) except Exception: with ignored(Exception): rmt_client.delete_snapshot_resource(name) rmt_client.delete_lun(name) with excutils.save_and_reraise_exception(): self.client.delete_snapshot_resource(name) self.client.delete_lun(name) elif params['replication_mode'] == 'async': destination = self.replication_params['destination'] sp1_ipaddr = rmt_client.get_port_ipaddr(destination['sp1']) sp2_ipaddr = rmt_client.get_port_ipaddr(destination['sp2']) try: self.client.enable_replication(name, sp1_ipaddr, sp2_ipaddr) self.client.startscan_replication(name) except Exception: with ignored(Exception): rmt_client.delete_snapshot_resource(name) rmt_client.delete_lun(name) with excutils.save_and_reraise_exception(): self.client.delete_snapshot_resource(name) self.client.delete_lun(name) lun_uuid = self.client.get_lun_uuid(name) return {'provider_location': 'macrosan uuid:%s' % lun_uuid} def _parse_qos_strategy(self, volume_type): qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id is None: return '' ctx = context.get_admin_context() specs = qos_specs.get_qos_specs(ctx, qos_specs_id)['specs'] return specs.pop('qos-strategy', '').strip() if specs else '' def _default_volume_params(self): params = { 'qos-strategy': '', 'replication_enabled': False, 'replication_mode': 'async', 'sdas': False, 'lun_mode': self.lun_mode } return params def _parse_volume_params(self, volume): params = self._default_volume_params() if volume.volume_type_id is None: return params ctx = context.get_admin_context() volume_type = volume_types.get_volume_type(ctx, volume.volume_type_id) params['qos-strategy'] = self._parse_qos_strategy(volume_type) specs = dict(volume_type).get('extra_specs') for k, val in specs.items(): ks = k.lower().split(':') if len(ks) == 2 and ks[0] != "capabilities": continue k = ks[-1] if k not in params: continue else: v = val.split()[-1] val_type = type(params[k]).__name__ if val_type == 'int': v = int(v) elif val_type == 'bool': v = strutils.bool_from_string(v) params[k] = v if params['sdas']: params['lun_mode'] = False return params @synchronized(lock_name) @record_request_id @volume_utils.trace def create_volume(self, volume): """Create a volume.""" name = volume['name'] size = self._size_str_to_int(volume['size']) params = self._parse_volume_params(volume) self._check_volume_params(params) return self._create_volume(name, size, params) def _delete_volume(self, name, params=None): if not self.client.lun_exists(name): return if params is None: params = self._default_volume_params() if self.force_unmap_itl_when_deleting: self.force_terminate_connection(name, False) if params['sdas'] or replication_synced(params): if self.client.dalun_exists(name): self.client.suspend_dalun(name) self.client.delete_dalun(name) with ignored(Exception): self.sdas_client.delete_snapshot_resource(name) self.sdas_client.delete_lun(name) self.client.delete_snapshot_resource(name) if (params['replication_enabled'] and params['replication_mode'] == 'async'): if self.client.replication_enabled(name): with ignored(Exception): self.client.stopscan_replication(name) self.client.pausereplicate(name) self.client.disable_replication(name) self.client.delete_snapshot_resource(name) self.client.delete_lun(name) try: migrated_name = self.client.get_lun_name_from_rename_file(name) if not migrated_name: return try: self.client.rename_lun(migrated_name, name) except Exception: LOG.warning('========== failed to rename %(migrated_name)s' ' to %(name)s', {'migrated_name': migrated_name, 'name': name}) except Exception: return @synchronized(lock_name) @record_request_id @volume_utils.trace def delete_volume(self, volume): """Delete a volume.""" name = self._volume_name(volume) params = self._parse_volume_params(volume) self._delete_volume(name, params) @utils.synchronized('MacroSAN-Attach-Detach', external=True) def _attach_volume(self, context, volume, properties, remote=False): return super(MacroSANBaseDriver, self)._attach_volume(context, volume, properties, remote) @utils.synchronized('MacroSAN-Attach-Detach', external=True) def _detach_volume(self, context, attach_info, volume, properties, force=False, remote=False, ignore_errors=True): return super(MacroSANBaseDriver, self)._detach_volume(context, attach_info, volume, properties, force, remote, ignore_errors) def _create_snapshot(self, snapshot_name, volume_name, volume_size): size = int(max(int(volume_size) * self.snapshot_resource_ratio, 1)) raids = self.client.get_raid_list_to_create_lun(self.pool, size) if not self.client.snapshot_resource_exists(volume_name): self.client.create_snapshot_resource(volume_name, raids, size) try: self.client.enable_snapshot_resource_autoexpand(volume_name) except exception.VolumeBackendAPIException: LOG.warning('========== Enable snapshot resource auto ' 'expand for volume: %(volume_name)s error', {'volume_name': volume_name}) if not self.client.snapshot_enabled(volume_name): try: self.client.enable_snapshot(volume_name) except exception.VolumeBackendAPIException: with excutils.save_and_reraise_exception(): self.client.delete_snapshot_resource(volume_name) try: self.client.create_snapshot_point(volume_name, snapshot_name) pointid = self.client.get_snapshot_pointid(volume_name, snapshot_name) except exception.VolumeBackendAPIException: with ignored(Exception): self.client.disable_snapshot(volume_name) self.client.delete_snapshot_resource(volume_name) raise return int(pointid) @synchronized(lock_name) @record_request_id @volume_utils.trace def create_snapshot(self, snapshot): """Create a snapshot.""" volume = snapshot['volume'] snapshot_name = self._snapshot_name(snapshot['name']) volume_name = self._volume_name(volume) pointid = self._create_snapshot(snapshot_name, volume_name, volume['size']) return {'provider_location': 'pointid: %s' % pointid} def _delete_snapshot(self, snapshot_name, volume_name, pointid): if self.client.snapshot_point_exists(volume_name, pointid): self.client.delete_snapshot_point(volume_name, pointid) with ignored(Exception): n = self.client.get_snapshot_point_num(volume_name) if n != 0: return with ignored(Exception): self.client.disable_snapshot(volume_name) if not (self.client.dalun_exists(volume_name) or self.client.replication_enabled(volume_name)): self.client.delete_snapshot_resource(volume_name) @synchronized(lock_name) @record_request_id @volume_utils.trace def delete_snapshot(self, snapshot): """Delete a snapshot.""" volume = snapshot['volume'] provider = snapshot['provider_location'] if not provider: return m = re.findall(r'pointid: (\d+)', provider) if m is None: return snapshot_name = self._snapshot_name(snapshot['id']) volume_name = self._volume_name(volume) self._delete_snapshot(snapshot_name, volume_name, m[0]) def _initialize_connection(self, name, host, wwns): raise NotImplementedError def _terminate_connection(self, name, host, wwns): raise NotImplementedError def _create_volume_from_snapshot(self, vol_name, vol_size, vol_params, snp_name, pointid, snp_vol_name, snp_vol_size): self._create_volume(vol_name, vol_size, vol_params) try: self.client.create_snapshot_view(snp_name, snp_vol_name, pointid) except Exception: self._delete_volume(vol_name) raise exception.VolumeBackendAPIException( _('Create snapshot view failed.')) try: self.client.copy_volume_from_view(vol_name, snp_name) while not self.client.snapshot_copy_task_completed(vol_name): time.sleep(2) except Exception: with excutils.save_and_reraise_exception(): self.client.delete_snapshot_view(snp_name) self._delete_volume(vol_name) else: self.client.delete_snapshot_view(snp_name) lun_uuid = self.client.get_lun_uuid(vol_name) return {'provider_location': 'macrosan uuid:%s' % lun_uuid} @synchronized(lock_name) @record_request_id @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" snapshot_volume = snapshot['volume'] provider = snapshot['provider_location'] m = re.findall(r'pointid: (\d+)', provider) pointid = int(m[0]) vol_name = self._volume_name(volume) snp_name = self._snapshot_name(snapshot['id']) snp_vol_name = self._volume_name(snapshot_volume) params = self._parse_volume_params(volume) self._check_volume_params(params) return self._create_volume_from_snapshot(vol_name, volume['size'], params, snp_name, pointid, snp_vol_name, snapshot['volume_size']) def _create_cloned_volume(self, vol_name, vol_size, vol_params, src_vol_name, src_vol_size, snp_name): pointid = self._create_snapshot(snp_name, src_vol_name, src_vol_size) try: return self._create_volume_from_snapshot(vol_name, vol_size, vol_params, snp_name, pointid, src_vol_name, src_vol_size) finally: self._delete_snapshot(snp_name, src_vol_name, pointid) @record_request_id @volume_utils.trace def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume.""" vol_name = volume['id'] src_vol_name = self._volume_name(src_vref) snapshotid =\ src_vref['id'][:12] + timeutils.utcnow().strftime('%Y%m%d%H%M%S%f') snp_name = self._snapshot_name(snapshotid) params = self._parse_volume_params(volume) self._check_volume_params(params) return self._create_cloned_volume(vol_name, volume['size'], params, src_vol_name, src_vref['size'], snp_name) def _extend_volume(self, name, moresize, params): if params['replication_enabled']: raise Exception( 'Volume %s has replication enabled, cannot extend' % name) if params['sdas']: self.client.suspend_dalun(name) raids = self.client.get_raid_list_to_create_lun(self.pool, moresize) self.client.extend_lun(name, raids, moresize) raids = self.sdas_client.get_raid_list_to_create_lun(self.pool, moresize) self.sdas_client.extend_lun(name, raids, moresize) self.client.resume_dalun(name) else: raids = self.client.get_raid_list_to_create_lun(self.pool, moresize) self.client.extend_lun(name, raids, moresize) @synchronized(lock_name) @record_request_id @volume_utils.trace def extend_volume(self, volume, new_size): """Extend a volume.""" name = self._volume_name(volume) moresize = self._size_str_to_int(new_size - int(volume['size'])) params = self._parse_volume_params(volume) self._extend_volume(name, moresize, params) def ensure_export(self, context, volume): """Synchronously recreates an export for a volume.""" pass def create_export(self, context, volume, connector): """Export the volume.""" pass def remove_export(self, context, volume): """Remove an export for a volume.""" pass @record_request_id def _update_volume_stats(self): data = {} pool = {} total, free, thin_unalloced = self.client.get_pool_cap(self.pool) pool['location_info'] = self.device_uuid pool['pool_name'] = self.pool pool['total_capacity_gb'] = total pool['free_capacity_gb'] = free + thin_unalloced pool['reserved_percentage'] = self.configuration.safe_get( 'reserved_percentage') pool['max_over_subscription_ratio'] = self.configuration.safe_get( 'max_over_subscription_ratio') pool['QoS_support'] = True pool['multiattach'] = True pool['lun_mode'] = True pool['replication_mode'] = [] if self.replica_client: pool['replication_enabled'] = 'True' pool['replication_mode'].append('async') if self.sdas_client: pool['replication_enabled'] = 'True' pool['sdas'] = 'True' pool['replication_mode'].append('sync') if len(pool['replication_mode']) == 0: del pool['replication_mode'] data['pools'] = [pool] data["volume_backend_name"] = self.volume_backend_name data["vendor_name"] = 'MacroSAN' data["driver_version"] = version data["storage_protocol"] = self.storage_protocol self._stats = data @record_request_id @volume_utils.trace def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status=None): """Return model update for migrated volume.""" original_name = self._volume_name(volume) cur_name = self._volume_name(new_volume) if self.client.lun_exists(original_name): self.client.backup_lun_name_to_rename_file(cur_name, original_name) else: if original_volume_status == 'available': try: self.client.rename_lun(cur_name, original_name) except Exception: LOG.warning('========== failed to rename ' '%(cur_name)s to %(original_name)s', {'cur_name': cur_name, 'original_name': original_name}) name_id = new_volume['_name_id'] or new_volume['id'] return {'_name_id': name_id, 'provider_location': new_volume['provider_location']} @synchronized(lock_name) @record_request_id @volume_utils.trace def initialize_connection_snapshot(self, snapshot, connector, **kwargs): volume = snapshot['volume'] provider = snapshot['provider_location'] m = re.findall(r'pointid: (\d+)', provider) pointid = m[0] snp_name = self._snapshot_name(snapshot['id']) snp_vol_name = self._volume_name(volume) self.client.create_snapshot_view(snp_name, snp_vol_name, pointid) try: conn = self._initialize_connection_snapshot(snp_name, connector) conn['data']['volume_id'] = snapshot['id'] return conn except Exception: with excutils.save_and_reraise_exception(): self.client.delete_snapshot_view(snp_name) def _initialize_connection_snapshot(self, snp_name, connector): raise NotImplementedError def terminate_connection_snapshot(self, snapshot, connector, **kwargs): snp_name = self._snapshot_name(snapshot['id']) self._terminate_connection_snapshot(snp_name, connector) self.client.delete_snapshot_view(snp_name) def _terminate_connection_snapshot(self, snp_name, connector): raise NotImplementedError @record_request_id def manage_existing_get_size(self, volume, external_ref): __, info, __ = self._get_existing_lun_info(external_ref) size = int(math.ceil(info['size'])) return size @synchronized(lock_name) @record_request_id @volume_utils.trace def manage_existing(self, volume, external_ref): vol_params = self._parse_volume_params(volume) self._check_volume_params(vol_params) if vol_params['qos-strategy']: raise exception.VolumeBackendAPIException( data=_('Import qos-strategy not supported')) pool = volume_utils.extract_host(volume.host, 'pool') name, info, params = self._get_existing_lun_info(external_ref) if pool != info['pool']: msg = _("LUN %(name)s does not belong to the pool: " "%(pool)s."), {'name': name, 'pool': pool} raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=msg) if params['sdas'] and params['replication_enabled']: msg = _('LUN %(name)s sdas and replication ' 'enabled at same time'), {'name': name} raise exception.VolumeBackendAPIException(data=msg) if replication_synced(vol_params) and params['sdas']: params.update({'sdas': False, 'replication_mode': 'sync', 'replication_enabled': True}) def notequal(attr): return vol_params[attr] != params[attr] if (notequal('replication_enabled') or notequal('replication_mode') or notequal('sdas') or notequal('lun_mode')): msg = _("Volume type: %(vol_params)s doesn't equal " "to existing lun: " "%(params)s"), {'vol_params': vol_params, 'params': params} raise exception.VolumeBackendAPIException(data=msg) rmt_client = None if params['sdas']: rmt_client = self.sdas_client elif params['replication_enabled']: rmt_client = self.replica_client snp_res_name = self.client.get_snapshot_resource_name(name) self.client.rename_lun(name, volume['name']) if snp_res_name: self.client.rename_lun(snp_res_name, 'SR-%s' % volume['id']) if params['sdas'] or params['replication_enabled']: snp_res_name = rmt_client.get_snapshot_resource_name(name) rmt_client.rename_lun(name, volume['name']) if snp_res_name: rmt_client.rename_lun(snp_res_name, 'SR-%s' % volume['id']) lun_uuid = self.client.get_lun_uuid(volume['name']) return {'provider_location': 'macrosan uuid:%s' % lun_uuid} def _get_existing_lun_info(self, external_ref): name = external_ref.get('source-name') if not name: raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=_('No source-name to get existing lun')) info = self.client.get_lun_base_info(name) params = { 'qos-strategy': '', 'replication_enabled': False, 'replication_mode': 'async', 'sdas': False, 'lun_mode': False } sdas = self.client.dalun_exists(name) rep = self.client.replication_enabled(name) params['replication_enabled'] = rep params['sdas'] = sdas if info['lun_mode'] == 'thin': info['lun_mode'] = True else: info['lun_mode'] = False params['lun_mode'] = info['lun_mode'] return name, info, params def unmanage(self, volume): pass @synchronized(lock_name) @record_request_id @volume_utils.trace def manage_existing_snapshot(self, snapshot, existing_ref): volume = snapshot['volume'] src_name = self._get_existing_snapname(existing_ref).lstrip('_') src_name = self._snapshot_name(src_name) pointid = self.client.get_snapshot_pointid(volume['name'], src_name) snap_name = self._snapshot_name(snapshot['id']) self.client.rename_snapshot_point(volume['name'], pointid, snap_name) return {'provider_location': 'pointid: %s' % pointid} @record_request_id def manage_existing_snapshot_get_size(self, snapshot, existing_ref): volume = snapshot['volume'] return volume['size'] def _get_existing_snapname(self, external_ref): name = external_ref.get('source-name') if not name: raise exception.ManageExistingInvalidReference( existing_ref=external_ref, reason=_('No source-name to get existing snap')) return name def unmanage_snapshot(self, snapshot): pass def migration_valid(self, volume, host): if volume.volume_attachment: return False pool_name = host['capabilities'].get('pool_name', '') if pool_name == '': return False device_uuid = host['capabilities']['location_info'] if device_uuid != self.device_uuid: return False params = self._parse_volume_params(volume) if params['sdas'] or params['replication_enabled']: return False return True @synchronized(lock_name) @record_request_id @volume_utils.trace def migrate_volume(self, ctxt, volume, host): if not self.migration_valid(volume, host): return False, None size = self._size_str_to_int(volume['size']) params = self._parse_volume_params(volume) name = str(uuid.uuid4()) src_name = self._volume_name(volume) owner = self.client.get_lun_sp(src_name) pool = host['capabilities'].get('pool_name', self.pool) LOG.info('Migrating volume: %(volume)s, ' 'host: %(host)s, ' 'backend: %(volume_backend_name)s', {'volume': src_name, 'host': host, 'volume_backend_name': self.volume_backend_name}) self._create_volume(name, size, params, owner, pool) res_sz = int(max(int(size) * self.snapshot_resource_ratio, 1)) src_snp_res_exists = self.client.snapshot_resource_exists(src_name) if not src_snp_res_exists: raids = self.client.get_raid_list_to_create_lun(self.pool, res_sz) self.client.create_snapshot_resource(src_name, raids, res_sz) snp_res_exists = self.client.snapshot_resource_exists(name) if not snp_res_exists: raids = self.client.get_raid_list_to_create_lun(pool, res_sz) self.client.create_snapshot_resource(name, raids, res_sz) self.client.start_localclone_lun(src_name, name) while not self.client.localclone_completed(name): time.sleep(2) self.client.stop_localclone_lun(name) if not snp_res_exists: self.client.delete_snapshot_resource(name) if not src_snp_res_exists: self.client.delete_snapshot_resource(src_name) self._delete_volume(src_name, params) self.client.rename_lun(name, src_name) lun_uuid = self.client.get_lun_uuid(src_name) return True, {'provider_location': 'macrosan uuid:%s' % lun_uuid} def force_terminate_connection(self, name, force_connected=False): it_list = self.client.get_lun_it(name) it_list = [it for it in it_list if (force_connected or not it['connected'])] if len(it_list) > 0: for it in it_list: self.client.unmap_lun_to_it(name, it['initiator'], it['port']) @interface.volumedriver class MacroSANISCSIDriver(MacroSANBaseDriver, driver.ISCSIDriver): """ISCSI driver for MacroSan storage arrays. Version history: .. code-block:: none 1.0.0 - Initial driver 1.0.1 - Adjust some log level and text prompts; Remove some useless functions; Add Cinder trace decorator. #1837920 """ VERSION = "1.0.1" def __init__(self, *args, **kwargs): """Initialize the driver.""" super(MacroSANISCSIDriver, self).__init__(*args, **kwargs) self.storage_protocol = constants.ISCSI def _do_setup(self): ports = self.client.get_iscsi_ports() for port in ports: if port['port_name'] == '' and port['ip'] != '0': self.client.create_target(port['port'], type='iscsi') if self.sdas_client: ports = self.sdas_client.get_iscsi_ports() for port in ports: if port['port_name'] == '' and port['ip'] != '0': self.sdas_client.create_target(port['port'], type='iscsi') def _get_iscsi_ports(self, dev_client, host): ha_state = dev_client.get_ha_state() if host in self.client_info: iscsi_sp1 = self.client_info[host]['sp1_port'] iscsi_sp2 = self.client_info[host]['sp2_port'] else: iscsi_sp1 = self.client_info['default']['sp1_port'] iscsi_sp2 = self.client_info['default']['sp2_port'] ports = [] if ha_state['sp1'] in ['single', 'double', 'idle']: ports.extend(iscsi_sp1.split(',')) if ha_state['sp2'] in ['single', 'double', 'idle']: ports.extend(iscsi_sp2.split(',')) all_ports = {p['port']: p for p in dev_client.get_iscsi_ports()} return [all_ports[p] for p in ports] def _map_initr_tgt(self, dev_client, itl_client_name, initr, ports): if not dev_client.get_client(itl_client_name): dev_client.create_client(itl_client_name) if not dev_client.initiator_exists(initr): dev_client.create_initiator(initr, itl_client_name, type='iscsi') if not dev_client.is_initiator_mapped_to_client(initr, itl_client_name): dev_client.map_initiator_to_client(initr, itl_client_name) for p in ports: port_name = p['port_name'] dev_client.map_target_to_initiator(port_name, initr) def _unmap_itl(self, dev_client, itl_client_name, wwns, ports, volume_name): wwn = wwns[0] for p in ports: port_name = p['port_name'] dev_client.unmap_lun_to_it(volume_name, wwn, port_name) def _map_itl(self, dev_client, wwn, ports, volume_name, hint_lun_id): lun_id = hint_lun_id exists = False for p in ports: port_name = p['port_name'] exists = dev_client.map_lun_to_it(volume_name, wwn, port_name, hint_lun_id) if exists and lun_id == hint_lun_id: lun_id = self.client.get_lun_id(wwn, port_name, volume_name) return lun_id def _get_unused_lun_id(self, wwn, dev_client, ports, sdas_client, sdas_ports): id_list = set(range(0, 511)) for p in ports: port_name = p['port_name'] tmp_list = dev_client.get_it_unused_id_list('iscsi', wwn, port_name) id_list = id_list.intersection(tmp_list) for p in sdas_ports: port_name = p['port_name'] tmp_list = sdas_client.get_it_unused_id_list('iscsi', wwn, port_name) id_list = id_list.intersection(tmp_list) return id_list.pop() def _initialize_connection(self, name, vol_params, host, wwns): client_name = self._get_client_name(host) wwn = wwns[0] LOG.debug('initialize_connection, initiator: %(wwpns)s,' 'volume name: %(volume)s.', {'wwpns': wwns, 'volume': name}) ports = self._get_iscsi_ports(self.client, host) self._map_initr_tgt(self.client, client_name, wwn, ports) if vol_params['sdas']: sdas_ports = self._get_iscsi_ports(self.sdas_client, host) self._map_initr_tgt(self.sdas_client, client_name, wwn, sdas_ports) lun_id = self._get_unused_lun_id(wwn, self.client, ports, self.sdas_client, sdas_ports) self._map_itl(self.sdas_client, wwn, sdas_ports, name, lun_id) lun_id = self._map_itl(self.client, wwn, ports, name, lun_id) ports = ports + sdas_ports else: lun_id = self._get_unused_lun_id(wwn, self.client, ports, None, {}) lun_id = self._map_itl(self.client, wwn, ports, name, lun_id) properties = {'target_discovered': False, 'target_portal': '%s:3260' % ports[0]['ip'], 'target_iqn': ports[0]['target'], 'target_lun': lun_id, 'target_iqns': [p['target'] for p in ports], 'target_portals': ['%s:3260' % p['ip'] for p in ports], 'target_luns': [lun_id] * len(ports)} LOG.info('initialize_connection, iSCSI properties: %(properties)s', {'properties': properties}) return {'driver_volume_type': 'iscsi', 'data': properties} @synchronized(lock_name) @record_request_id @volume_utils.trace def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" name = self._volume_name(volume) params = self._parse_volume_params(volume) conn = self._initialize_connection(name, params, connector['host'], [connector['initiator']]) conn['data']['volume_id'] = volume['id'] return conn def _unmap_initr_tgt(self, dev_client, itl_client_name, wwn): for p in dev_client.get_iscsi_ports(): port_name = p['port_name'] if dev_client.it_exists(wwn, port_name): dev_client.unmap_target_from_initiator(port_name, wwn) if dev_client.initiator_exists(wwn): dev_client.unmap_initiator_from_client(wwn, itl_client_name) dev_client.delete_initiator(wwn) def _terminate_connection(self, name, volume_params, host, wwns): client_name = self._get_client_name(host) ports = self._get_iscsi_ports(self.client, host) self._unmap_itl(self.client, client_name, wwns, ports, name) if volume_params['sdas']: self._unmap_itl(self.sdas_client, client_name, wwns, ports, name) data = dict() data['ports'] = ports data['client'] = client_name return {'driver_volume_type': 'iSCSI', 'data': data} @synchronized(lock_name) @record_request_id @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" name = self._volume_name(volume) conn = None if not connector: self.force_terminate_connection(name, True) else: params = self._parse_volume_params(volume) conn = self._terminate_connection(name, params, connector['host'], [connector['initiator']]) return conn def _initialize_connection_snapshot(self, snp_name, connector): return self._initialize_connection(snp_name, None, connector['host'], [connector['initiator']]) def _terminate_connection_snapshot(self, snp_name, connector): return self._terminate_connection(snp_name, None, connector['host'], [connector['initiator']]) @interface.volumedriver class MacroSANFCDriver(MacroSANBaseDriver, driver.FibreChannelDriver): """FC driver for MacroSan storage arrays. Version history: .. code-block:: none 1.0.0 - Initial driver 1.0.1 - Adjust some log level and text prompts; Remove some useless functions; Add Cinder trace decorator. #1837920 """ VERSION = "1.0.1" def __init__(self, *args, **kwargs): """Initialize the driver.""" super(MacroSANFCDriver, self).__init__(*args, **kwargs) self.storage_protocol = constants.FC self.fcsan_lookup_service = None self.use_sp_port_nr = self.configuration.macrosan_fc_use_sp_port_nr self.keep_mapped_ports = \ self.configuration.macrosan_fc_keep_mapped_ports def _do_setup(self): self.fcsan_lookup_service = fczm_utils.create_lookup_service() ports = self.client.get_fc_ports() for port in ports: if port['port_name'] == '': self.client.create_target(port['port']) if self.sdas_client: ports = self.sdas_client.get_fc_ports() for port in ports: if port['port_name'] == '': self.sdas_client.create_target(port['port']) def _strip_wwn_colon(self, wwn_str): return wwn_str.replace(':', '') def _format_wwn_with_colon(self, wwn_str): wwn_str = wwn_str.replace(":", "") return (':'.join([wwn_str[i:i + 2] for i in range(0, len(wwn_str), 2)])).lower() def _select_fc_ports(self, ports_in_storage, ports_in_fabric): selected = [] for sp in [1, 2]: n = 0 for p in ports_in_storage: if (p['sp'] == sp and p['online'] == 1 and p['wwn'] in ports_in_fabric): selected.append({'port_name': p['port_name'], 'wwn': p['wwn']}) n += 1 if n >= self.use_sp_port_nr: break return selected def _get_initr_port_map(self, dev_client, wwns): initr_port_map = {} ports_in_storage = dev_client.get_fc_ports() if self.fcsan_lookup_service is not None: mapping = (self.fcsan_lookup_service .get_device_mapping_from_network( wwns, [p['wwn'] for p in ports_in_storage])) for fabric in mapping: wwns = mapping[fabric]['target_port_wwn_list'] mapping[fabric]['target_port_wwn_list'] = ( [self._format_wwn_with_colon(wwn) for wwn in wwns]) wwns = mapping[fabric]['initiator_port_wwn_list'] mapping[fabric]['initiator_port_wwn_list'] = ( [self._format_wwn_with_colon(wwn) for wwn in wwns]) for fabric in mapping: ports_in_fabric = mapping[fabric]['target_port_wwn_list'] selected_ports = self._select_fc_ports(ports_in_storage, ports_in_fabric) for initr in mapping[fabric]['initiator_port_wwn_list']: initr_port_map[initr] = selected_ports else: initr_port_map = {} for wwn in wwns: for port in ports_in_storage: if port['initr'] == wwn: initr_port_map[wwn] = [port] break return initr_port_map def _map_initr_tgt_do(self, dev_client, itl_client_name, initr_port_map, mapped_ports): for wwn in initr_port_map: if wwn in mapped_ports: continue if not dev_client.initiator_exists(wwn): dev_client.create_initiator(wwn, wwn) if not dev_client.is_initiator_mapped_to_client(wwn, itl_client_name): dev_client.map_initiator_to_client(wwn, itl_client_name) for p in initr_port_map[wwn]: port_name = p['port_name'] dev_client.map_target_to_initiator(port_name, wwn) def _unmap_initr_tgt(self, dev_client, client_name, mapped_ports): for wwn in mapped_ports: for p in mapped_ports[wwn]: port_name = p['port_name'] if dev_client.it_exists(wwn, port_name): dev_client.unmap_target_from_initiator(port_name, wwn) if dev_client.initiator_exists(wwn): dev_client.unmap_initiator_from_client(wwn, client_name) dev_client.delete_initiator(wwn) def _map_initr_tgt(self, dev_client, itl_client_name, wwns): if not dev_client.get_client(itl_client_name): dev_client.create_client(itl_client_name) initr_port_map = {} mapped_ports = dev_client.get_fc_initr_mapped_ports(wwns) has_port_not_mapped = not all(wwn in mapped_ports for wwn in wwns) if has_port_not_mapped: initr_port_map = self._get_initr_port_map(dev_client, wwns) initr_port_map.update(mapped_ports) if has_port_not_mapped: self._map_initr_tgt_do(dev_client, itl_client_name, initr_port_map, mapped_ports) return has_port_not_mapped, initr_port_map def _map_itl(self, dev_client, initr_port_map, volume_name, hint_lun_id): lun_id = hint_lun_id exists = False for wwn in initr_port_map: for p in initr_port_map[wwn]: port_name = p['port_name'] exists = dev_client.map_lun_to_it(volume_name, wwn, port_name, lun_id) if exists and lun_id == hint_lun_id: lun_id = dev_client.get_lun_id(wwn, port_name, volume_name) return lun_id def _get_unused_lun_id(self, dev_client, initr_port_map, sdas_client, sdas_initr_port_map): id_list = set(range(0, 511)) for wwn in initr_port_map: for p in initr_port_map[wwn]: port_name = p['port_name'] tmp_list = dev_client.get_it_unused_id_list('fc', wwn, port_name) id_list = id_list.intersection(tmp_list) for wwn in sdas_initr_port_map: for p in sdas_initr_port_map[wwn]: port_name = p['port_name'] tmp_list = sdas_client.get_it_unused_id_list('fc', wwn, port_name) id_list = id_list.intersection(tmp_list) return id_list.pop() def _initialize_connection(self, name, vol_params, host, wwns): client_name = self._get_client_name(host) LOG.info('initialize_connection, initiator: %(wwpns)s, ' 'volume name: %(volume)s.', {'wwpns': wwns, 'volume': name}) has_port_not_mapped, initr_port_map = ( self._map_initr_tgt(self.client, client_name, wwns)) LOG.debug('initr_port_map: %(initr_port_map)s', {'initr_port_map': initr_port_map}) if vol_params and vol_params['sdas']: sdas_has_port_not_mapped, sdas_initr_port_map = ( self._map_initr_tgt(self.sdas_client, client_name, wwns)) lun_id = self._get_unused_lun_id(self.client, initr_port_map, self.sdas_client, sdas_initr_port_map) LOG.debug('sdas_initr_port_map: %(sdas_initr_port_map)s', {'sdas_initr_port_map': sdas_initr_port_map}) self._map_itl(self.sdas_client, sdas_initr_port_map, name, lun_id) lun_id = self._map_itl(self.client, initr_port_map, name, lun_id) for initr, ports in sdas_initr_port_map.items(): if len(ports): initr_port_map[initr].extend(ports) has_port_not_mapped = (has_port_not_mapped or sdas_has_port_not_mapped) else: lun_id = self._get_unused_lun_id(self.client, initr_port_map, None, {}) lun_id = self._map_itl(self.client, initr_port_map, name, lun_id) tgt_wwns = list(set(self._strip_wwn_colon(p['wwn']) for wwn in initr_port_map for p in initr_port_map[wwn])) tgt_wwns.sort() properties = {'target_lun': lun_id, 'target_discovered': True, 'target_wwn': tgt_wwns} if has_port_not_mapped and self.fcsan_lookup_service is not None: initr_tgt_map = {} for initr, ports in initr_port_map.items(): initr = self._strip_wwn_colon(initr) initr_tgt_map[initr] = ( [self._strip_wwn_colon(p['wwn']) for p in ports]) properties['initiator_target_map'] = initr_tgt_map LOG.info('initialize_connection, FC properties: %(properties)s', {'properties': properties}) return {'driver_volume_type': 'fibre_channel', 'data': properties} @synchronized(lock_name) @record_request_id @volume_utils.trace def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" name = self._volume_name(volume) params = self._parse_volume_params(volume) wwns = [self._format_wwn_with_colon(wwns) for wwns in connector['wwnns']] conn = self._initialize_connection(name, params, connector['host'], wwns) conn['data']['volume_id'] = volume['id'] fczm_utils.add_fc_zone(conn) return conn def _unmap_itl(self, dev_client, name, itl_client_name, wwns): mapped_ports = dev_client.get_fc_initr_mapped_ports(wwns) if len(mapped_ports) == 0: return [], {} for wwn, ports in mapped_ports.items(): for p in ports: port_name = p['port_name'] dev_client.unmap_lun_to_it(name, wwn, port_name) ports, initr_tgt_map = [], {} if (not self.keep_mapped_ports and not dev_client.has_initiators_mapped_any_lun(wwns)): mapped_ports = dev_client.get_fc_initr_mapped_ports(wwns) initr_tgt_map = {self._strip_wwn_colon(wwn): [self._strip_wwn_colon(p['wwn']) for p in mapped_ports[wwn]] for wwn in wwns} ports = list(set(self._strip_wwn_colon(p['wwn']) for ports in mapped_ports.values() for p in ports)) self._unmap_initr_tgt(dev_client, itl_client_name, mapped_ports) if self.fcsan_lookup_service is None: initr_tgt_map = {} return ports, initr_tgt_map def _terminate_connection(self, name, vol_params, host, wwns): client_name = self._get_client_name(host) ports, initr_tgt_map = self._unmap_itl(self.client, name, client_name, wwns) if vol_params and vol_params['sdas']: sdas_ports, sdas_initr_tgt_map = ( self._unmap_itl(self.sdas_client, name, client_name, wwns)) ports.extend(sdas_ports) for initr, tgt_wwns in sdas_initr_tgt_map.items(): if len(tgt_wwns): initr_tgt_map[initr].extend(tgt_wwns) data = {} if ports: data['target_wwn'] = ports if initr_tgt_map: data['initiator_target_map'] = initr_tgt_map LOG.info('terminate_connection, data: %(data)s', {'data': data}) return {'driver_volume_type': 'fibre_channel', 'data': data} @synchronized(lock_name) @record_request_id @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" name = self._volume_name(volume) conn = None if not connector: self.force_terminate_connection(name, True) conn = {'driver_volume_type': 'fibre_channel', 'data': {}} else: params = self._parse_volume_params(volume) wwns = [self._format_wwn_with_colon(wwns) for wwns in connector['wwpns']] attachments = volume.volume_attachment hostnum = 0 for i in attachments: if connector['host'] == i['attached_host']: hostnum += 1 if hostnum > 1: pass else: conn = self._terminate_connection(name, params, connector['host'], wwns) fczm_utils.remove_fc_zone(conn) return conn def _initialize_connection_snapshot(self, snp_name, connector): wwns = [self._format_wwn_with_colon(wwns) for wwns in connector['wwpns']] return self._initialize_connection(snp_name, None, connector['host'], wwns) def _terminate_connection_snapshot(self, snp_name, connector): wwns = [self._format_wwn_with_colon(wwns) for wwns in connector['wwpns']] return self._terminate_connection(snp_name, None, connector['host'], wwns) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.379121 cinder-27.0.0/cinder/volume/drivers/nec/0000775000175000017500000000000000000000000020065 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/__init__.py0000664000175000017500000000000000000000000022164 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/cli.py0000664000175000017500000007661600000000000021226 0ustar00zuulzuul00000000000000# # Copyright (c) 2016 NEC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import select import time import traceback from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import ssh_utils from cinder import utils LOG = logging.getLogger(__name__) retry_msgids = ['iSM31005', 'iSM31015', 'iSM42408', 'iSM42412', 'iSM19411'] def get_sleep_time_for_clone(retry_count): if retry_count < 19: return int(10.0 * (1.1 ** retry_count)) else: return 60 class MStorageISMCLI(object): """SSH client.""" def __init__(self, properties): super(MStorageISMCLI, self).__init__() self._sshpool = None self._properties = properties def _execute(self, command, expected_status=[0], raise_exec=True): return self._sync_execute(command, self._properties['diskarray_name'], expected_status, raise_exec) @coordination.synchronized('mstorage_ismcli_execute_{diskarray_name}') def _sync_execute(self, command, diskarray_name, expected_status=[0], raise_exec=True): retry_flag = True retry_count = 0 while retry_flag is True: try: out, err, status = self._cli_execute(command, expected_status, False) if status != 0: errflg = 0 errnum = out + err LOG.debug('ismcli failed (errnum=%s).', errnum) for retry_msgid in retry_msgids: if errnum.find(retry_msgid) >= 0: LOG.debug('`%(command)s` failed. ' '%(name)s %(errnum)s ' 'retry_count=%(retry_count)d', {'command': command, 'name': __name__, 'errnum': errnum, 'retry_count': retry_count}) errflg = 1 break if errflg == 1: retry_count += 1 if retry_count >= 60: msg = (_('Timeout `%(command)s`.' ' status=%(status)d, ' 'out="%(out)s", ' 'err="%(err)s".') % {'command': command, 'status': status, 'out': out, 'err': err}) raise exception.APITimeout(msg) time.sleep(5) continue else: if raise_exec is True: msg = _('Command `%s` failed.') % command raise exception.VolumeBackendAPIException(data=msg) except EOFError: with excutils.save_and_reraise_exception() as ctxt: LOG.debug('EOFError has occurred. ' '%(name)s retry_count=%(retry_count)d', {'name': __name__, 'retry_count': retry_count}) retry_count += 1 if retry_count < 60: ctxt.reraise = False time.sleep(5) continue retry_flag = False return out, err, status def _execute_nolock(self, command, expected_status=[0], raise_exec=True): retry_flag = True retry_count = 0 while retry_flag is True: try: out, err, status = self._cli_execute(command, expected_status, raise_exec) except EOFError: with excutils.save_and_reraise_exception() as ctxt: LOG.debug('EOFError has occurred. ' '%(name)s retry_count=%(retry_count)d', {'name': __name__, 'retry_count': retry_count}) retry_count += 1 if retry_count < 60: ctxt.reraise = False time.sleep(5) continue retry_flag = False return out, err, status def _cli_execute(self, command, expected_status=[0], raise_exec=True): if not self._sshpool: LOG.debug('ssh_utils.SSHPool execute.') self._sshpool = ssh_utils.SSHPool( self._properties['cli_fip'], self._properties['ssh_pool_port_number'], self._properties['ssh_conn_timeout'], self._properties['cli_user'], self._properties['cli_password'], privatekey=self._properties['cli_privkey']) with self._sshpool.item() as ssh: LOG.debug('`%s` executing...', command) stdin, stdout, stderr = ssh.exec_command(command) stdin.close() channel = stdout.channel tmpout, tmperr = b'', b'' while 1: select.select([channel], [], []) if channel.recv_ready(): tmpout += channel.recv(4096) continue if channel.recv_stderr_ready(): tmperr += channel.recv_stderr(4096) continue if channel.exit_status_ready(): status = channel.recv_exit_status() break LOG.debug('`%(command)s` done. status=%(status)d.', {'command': command, 'status': status}) out = utils.convert_str(tmpout) err = utils.convert_str(tmperr) if expected_status is not None and status not in expected_status: LOG.debug('`%(command)s` failed. status=%(status)d, ' 'out="%(out)s", err="%(err)s".', {'command': command, 'status': status, 'out': out, 'err': err}) if raise_exec is True: msg = _('Command `%s` failed.') % command raise exception.VolumeBackendAPIException(data=msg) return out, err, status def view_all(self, conf_ismview_path=None, delete_ismview=True, cmd_lock=True): if self._properties['queryconfig_view'] is True: command = 'clioutmsg xml; iSMview' if self._properties['ismview_alloptimize'] is True: command += ' --alloptimize' else: command += ' -all' else: command = 'iSMquery -cinder -xml -all' if cmd_lock is True: out, err, status = self._execute(command) else: out, err, status = self._execute_nolock(command) exstats = re.compile(r"(.*)ExitStatus(.*)\n") tmpout = exstats.sub('', out) out = tmpout if conf_ismview_path is not None: if delete_ismview: if os.path.exists(conf_ismview_path): os.remove(conf_ismview_path) LOG.debug('Remove clioutmsg xml to %s.', conf_ismview_path) else: with open(conf_ismview_path, 'w+') as f: f.write(out) LOG.debug('Wrote clioutmsg xml to %s.', conf_ismview_path) return out def ldbind(self, name, pool, ldn, size): """Bind an LD and attach a nickname to it.""" errnum = "" cmd = ('iSMcfg ldbind -poolnumber %(poolnumber)d -ldn %(ldn)d ' '-capacity %(capacity)d -immediate' % {'poolnumber': pool, 'ldn': ldn, 'capacity': size}) out, err, status = self._execute(cmd, [0], False) errnum = err if status != 0: return False, errnum cmd = ('iSMcfg nickname -ldn %(ldn)d -newname %(newname)s ' '-immediate' % {'ldn': ldn, 'newname': name}) self._execute(cmd) return True, errnum def unbind(self, name): """Unbind an LD.""" cmd = 'iSMcfg ldunbind -ldname %s' % name self._execute(cmd) def expand(self, ldn, capacity): """Expand a LD.""" cmd = ('iSMcfg ldexpand -ldn %(ldn)d -capacity %(capacity)d ' '-unit gb' % {'ldn': ldn, 'capacity': capacity}) self._execute(cmd) def addldset_fc(self, ldsetname, connector): """Create new FC LD Set.""" cmd = 'iSMcfg addldset -ldset LX:%s -type fc' % ldsetname out, err, status = self._execute(cmd, [0], False) if status != 0: return False for wwpn in connector['wwpns']: length = len(wwpn) setwwpn = '-'.join([wwpn[i:i + 4] for i in range(0, length, 4)]) setwwpn = setwwpn.upper() cmd = ('iSMcfg addldsetpath -ldset LX:%(name)s -path %(path)s' % {'name': ldsetname, 'path': setwwpn}) out, err, status = self._execute(cmd, [0], False) if status != 0: return False return True def addldset_iscsi(self, ldsetname, connector): """Create new iSCSI LD Set.""" cmd = ('iSMcfg addldset -ldset LX:%s -type iscsi' % ldsetname) out, err, status = self._execute(cmd, [0], False) if status != 0: return False cmd = ('iSMcfg addldsetinitiator' ' -ldset LX:%(name)s -initiatorname %(initiator)s' % {'name': ldsetname, 'initiator': connector['initiator']}) out, err, status = self._execute(cmd, [0], False) if status != 0: return False return True def addldsetld(self, ldset, ldname, lun=None): """Add an LD to specified LD Set.""" if lun is None: cmd = ('iSMcfg addldsetld -ldset %(ldset)s ' '-ldname %(ldname)s' % {'ldset': ldset, 'ldname': ldname}) self._execute(cmd) else: cmd = ('iSMcfg addldsetld -ldset %(ldset)s -ldname %(ldname)s ' '-lun %(lun)d' % {'ldset': ldset, 'ldname': ldname, 'lun': lun}) self._execute(cmd) def delldsetld(self, ldset, ldname): """Delete an LD from specified LD Set.""" rtn = True errnum = "" cmd = ('iSMcfg delldsetld -ldset %(ldset)s ' '-ldname %(ldname)s' % {'ldset': ldset, 'ldname': ldname}) out, err, status = self._execute(cmd, [0], False) errnum = err if status != 0: rtn = False return rtn, errnum def changeldname(self, ldn, new_name, old_name=None): """Rename nickname of LD.""" if old_name is None: cmd = ('iSMcfg nickname -ldn %(ldn)d -newname %(newname)s ' '-immediate' % {'ldn': ldn, 'newname': new_name}) self._execute(cmd) else: cmd = ('iSMcfg nickname -ldname %(ldname)s ' '-newname %(newname)s' % {'ldname': old_name, 'newname': new_name}) self._execute(cmd) def setpair(self, mvname, rvname): """Set pair.""" cmd = ('iSMrc_pair -pair -mv %(mv)s -mvflg ld ' '-rv %(rv)s -rvflg ld' % {'mv': mvname, 'rv': rvname}) self._execute(cmd) LOG.debug('Pair command completed. MV = %(mv)s RV = %(rv)s.', {'mv': mvname, 'rv': rvname}) def unpair(self, mvname, rvname, flag): """Unset pair.""" if flag == 'normal': cmd = ('iSMrc_pair -unpair -mv %(mv)s -mvflg ld ' '-rv %(rv)s -rvflg ld' % {'mv': mvname, 'rv': rvname}) self._execute(cmd) elif flag == 'force': cmd = ('iSMrc_pair -unpair -mv %(mv)s -mvflg ld ' '-rv %(rv)s -rvflg ld -force all' % {'mv': mvname, 'rv': rvname}) self._execute(cmd) else: LOG.debug('unpair flag ERROR. flag = %s', flag) LOG.debug('Unpair command completed. MV = %(mv)s, RV = %(rv)s.', {'mv': mvname, 'rv': rvname}) def replicate(self, mvname, rvname, flag): if flag == 'full': cmd = ('iSMrc_replicate -mv %(mv)s -mvflg ld ' '-rv %(rv)s -rvflg ld -nowait -cprange full ' '-cpmode bg' % {'mv': mvname, 'rv': rvname}) self._execute(cmd) else: cmd = ('iSMrc_replicate -mv %(mv)s -mvflg ld ' '-rv %(rv)s -rvflg ld -nowait -cpmode bg' % {'mv': mvname, 'rv': rvname}) self._execute(cmd) LOG.debug('Replicate command completed. MV = %(mv)s RV = %(rv)s.', {'mv': mvname, 'rv': rvname}) def separate(self, mvname, rvname, flag): """Separate for backup.""" if flag == 'backup': cmd = ('iSMrc_separate -mv %(mv)s -mvflg ld ' '-rv %(rv)s -rvflg ld ' '-rvacc ro -rvuse complete -nowait' % {'mv': mvname, 'rv': rvname}) self._execute(cmd) elif flag == 'restore' or flag == 'clone': cmd = ('iSMrc_separate -mv %(mv)s -mvflg ld ' '-rv %(rv)s -rvflg ld ' '-rvacc rw -rvuse immediate -nowait' % {'mv': mvname, 'rv': rvname}) self._execute(cmd) elif flag == 'esv_restore' or flag == 'migrate': cmd = ('iSMrc_separate -mv %(mv)s -mvflg ld ' '-rv %(rv)s -rvflg ld ' '-rvacc rw -rvuse complete -nowait' % {'mv': mvname, 'rv': rvname}) self._execute(cmd) else: LOG.debug('separate flag ERROR. flag = %s', flag) LOG.debug('Separate command completed. MV = %(mv)s RV = %(rv)s.', {'mv': mvname, 'rv': rvname}) def query_MV_RV_status(self, ldname, rpltype): if rpltype == 'MV': cmd = ('iSMrc_query -mv %s -mvflg ld | ' 'while builtin read line;' 'do if [[ "$line" =~ "Sync State" ]]; ' 'then builtin echo ${line:10};fi;' 'done' % ldname) out, err, status = self._execute(cmd) elif rpltype == 'RV': cmd = ('iSMrc_query -rv %s -rvflg ld | ' 'while builtin read line;' 'do if [[ "$line" =~ "Sync State" ]]; ' 'then builtin echo ${line:10};fi;' 'done' % ldname) out, err, status = self._execute(cmd) else: LOG.debug('rpltype flag ERROR. rpltype = %s', rpltype) query_status = out.strip() return query_status def query_MV_RV_name(self, ldname, rpltype): if rpltype == 'MV': cmd = ('iSMrc_query -mv %s -mvflg ld | ' 'while builtin read line;' 'do if [[ "$line" =~ "LD Name" ]]; ' 'then builtin echo ${line:7};fi;' 'done' % ldname) out, err, status = self._execute(cmd) out = out.replace(ldname, "") elif rpltype == 'RV': cmd = ('iSMrc_query -rv %s -rvflg ld | ' 'while builtin read line;' 'do if [[ "$line" =~ "LD Name" ]]; ' 'then builtin echo ${line:7};fi;' 'done' % ldname) out, err, status = self._execute(cmd) out = out.replace(ldname, "") else: LOG.debug('rpltype flag ERROR. rpltype = %s', rpltype) query_name = out.strip() return query_name def query_MV_RV_diff(self, ldname, rpltype): if rpltype == 'MV': cmd = ('iSMrc_query -mv %s -mvflg ld | ' 'while builtin read line;' 'do if [[ "$line" =~ "Separate Diff" ]]; ' 'then builtin echo ${line:13};fi;' 'done' % ldname) out, err, status = self._execute(cmd) elif rpltype == 'RV': cmd = ('iSMrc_query -rv %s -rvflg ld | ' 'while builtin read line;' 'do if [[ "$line" =~ "Separate Diff" ]]; ' 'then builtin echo ${line:13};fi;' 'done' % ldname) out, err, status = self._execute(cmd) else: LOG.debug('rpltype flag ERROR. rpltype = %s', rpltype) query_status = out.strip() return query_status def backup_restore(self, volume_properties, unpairWait, canPairing=True): # Setting Pair. flag = 'full' if canPairing is True: self.setpair(volume_properties['mvname'][3:], volume_properties['rvname'][3:]) else: rv_diff = self.query_MV_RV_diff(volume_properties['rvname'][3:], 'RV') rv_diff = int(rv_diff.replace('KB', ''), 10) // units.Ki if rv_diff != volume_properties['capacity']: flag = None # Replicate. self.replicate(volume_properties['mvname'][3:], volume_properties['rvname'][3:], flag) # Separate. self.separate(volume_properties['mvname'][3:], volume_properties['rvname'][3:], volume_properties['flag']) unpairProc = unpairWait(volume_properties, self) unpairProc.run() def get_pair_lds(self, ldname, lds): query_status = self.query_MV_RV_name(ldname[3:], 'MV') query_status = query_status.split('\n') query_status = [query for query in query_status if query != ''] LOG.debug('query_status=%s.', query_status) pair_lds = {} for rvname in query_status: rvname = self._properties['ld_backupname_format'] % rvname if rvname not in lds: LOG.debug('LD `%s` is RDR pair?', rvname) else: ld = lds[rvname] ldn = ld['ldn'] pair_lds[ldn] = ld LOG.debug('pair_lds=%s.', pair_lds) return pair_lds def snapshot_create(self, bvname, svname, poolnumber): """Snapshot create.""" cmd = ('iSMcfg generationadd -bvname %(bvname)s ' '-poolnumber %(poolnumber)d -count 1 ' '-svname %(svname)s' % {'bvname': bvname, 'poolnumber': poolnumber, 'svname': svname}) self._execute(cmd) cmd = ('iSMsc_create -bv %(bv)s -bvflg ld -sv %(sv)s ' '-svflg ld' % {'bv': bvname[3:], 'sv': svname}) self._execute(cmd) def snapshot_delete(self, bvname, svname): """Snapshot delete.""" query_status = self.query_BV_SV_status(bvname[3:], svname) if query_status == 'snap/active': cmd = ('iSMsc_delete -bv %(bv)s -bvflg ld -sv %(sv)s ' '-svflg ld' % {'bv': bvname[3:], 'sv': svname}) self._execute(cmd) while True: query_status = self.query_BV_SV_status(bvname[3:], svname) if query_status == 'snap/deleting': LOG.debug('Sleep 1 seconds Start') time.sleep(1) else: break else: LOG.debug('The snapshot data does not exist,' ' because already forced deletion.' ' bvname=%(bvname)s, svname=%(svname)s', {'bvname': bvname, 'svname': svname}) cmd = 'iSMcfg generationdel -bvname %s -count 1' % bvname self._execute(cmd) def snapshot_restore(self, bvname, svname): """Snapshot restore.""" query_status = self.query_BV_SV_status(bvname[3:], svname[3:]) if query_status == 'snap/active': cmd = ('iSMsc_restore -bv %(bv)s -bvflg ld -sv %(sv)s ' '-svflg ld -derivsv keep -nowait' % {'bv': bvname[3:], 'sv': svname[3:]}) self._execute(cmd) retry_count = 0 while True: query_status = self.query_BV_SV_status(bvname[3:], svname[3:]) if query_status == 'rst/exec': # Restoration is in progress. sleep_time = get_sleep_time_for_clone(retry_count) LOG.debug('Sleep %d seconds Start', sleep_time) time.sleep(sleep_time) retry_count += 1 elif query_status == 'snap/active': # Restoration was successful. break else: # Restoration failed. msg = (_('Failed to restore from snapshot. ' 'bvname=%(bvname)s, svname=%(svname)s, ' 'status=%(status)s') % {'bvname': bvname, 'svname': svname, 'status': query_status}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: msg = (_('The snapshot does not exist or is ' 'not in snap/active status. ' 'bvname=%(bvname)s, svname=%(svname)s, ' 'status=%(status)s') % {'bvname': bvname, 'svname': svname, 'status': query_status}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def query_BV_SV_status(self, bvname, svname): cmd = ('iSMsc_query -bv %(bv)s -bvflg ld -sv %(sv)s -svflg ld ' '-summary | ' 'while builtin read line;do ' 'if [[ "$line" =~ "%(line)s" ]]; ' 'then builtin echo "$line";fi;done' % {'bv': bvname, 'sv': svname, 'line': svname}) out, err, status = self._execute(cmd) delimiter = ') ' start = out.find(delimiter) if start == -1: return None start += len(delimiter) query_status = out[start:].split(' ')[0] LOG.debug('snap/state:%s.', query_status) return query_status def get_bvname(self, svname): cmd = ('iSMsc_query -sv %s -svflg ld -summary | ' 'while builtin read line;do ' 'if [[ "$line" =~ "LD Name" ]]; ' 'then builtin echo "$line";fi;done' % svname[3:]) out, err, status = self._execute(cmd) query_status = out[15:39].strip() return query_status def set_io_limit(self, ldname, qos_params, force_delete=True): upper = qos_params['upperlimit'] lower = qos_params['lowerlimit'] report = qos_params['upperreport'] if upper is None and lower is None and report is None: return cmd = 'iSMioc setlimit -ldname %s' % ldname if upper is not None: cmd += ' -upperlimit %d' % upper if lower is not None: cmd += ' -lowerlimit %d' % lower if report is not None: cmd += ' -upperreport %s' % report try: self._execute(cmd) except Exception: with excutils.save_and_reraise_exception(): if force_delete: self.unbind(ldname) def lvbind(self, bvname, lvname, lvnumber): """Link Volume create.""" cmd = ('iSMcfg lvbind -bvname %(bvname)s ' '-lvn %(lvnumber)d -lvname %(lvname)s' % {'bvname': bvname, 'lvnumber': lvnumber, 'lvname': lvname}) self._execute(cmd) def lvunbind(self, lvname): """Link Volume delete.""" cmd = ('iSMcfg lvunbind -ldname %(lvname)s' % {'lvname': lvname}) self._execute(cmd) def lvlink(self, svname, lvname): """Link to snapshot volume.""" cmd = ('iSMsc_link -lv %(lvname)s -lvflg ld ' '-sv %(svname)s -svflg ld -lvacc ro' % {'lvname': lvname, 'svname': svname}) self._execute(cmd) def lvunlink(self, lvname): """Unlink from snapshot volume.""" cmd = ('iSMsc_unlink -lv %(lvname)s -lvflg ld' % {'lvname': lvname}) self._execute(cmd) def cvbind(self, poolnumber, cvnumber): """Create Control Volume.""" cmd = ('iSMcfg ldbind -poolnumber %(poolnumber)d ' '-ldattr cv -ldn %(cvnumber)d' % {'poolnumber': poolnumber, 'cvnumber': cvnumber}) self._execute(cmd) class UnpairWait(object): def __init__(self, volume_properties, cli): super(UnpairWait, self).__init__() self._volume_properties = volume_properties self._mvname = volume_properties['mvname'][3:] self._rvname = volume_properties['rvname'][3:] self._mvID = volume_properties['mvid'] self._rvID = volume_properties['rvid'] self._flag = volume_properties['flag'] self._context = volume_properties['context'] self._cli = cli self._local_conf = self._cli._properties def _wait(self, unpair=True): timeout = self._local_conf['thread_timeout'] * 24 start_time = time.time() retry_count = 0 while True: cur_time = time.time() if (cur_time - start_time) > timeout: raise exception.APITimeout(_('UnpairWait wait timeout.')) sleep_time = get_sleep_time_for_clone(retry_count) LOG.debug('Sleep %d seconds Start', sleep_time) time.sleep(sleep_time) retry_count += 1 query_status = self._cli.query_MV_RV_status(self._rvname, 'RV') if query_status == 'separated': if unpair is True: self._cli.unpair(self._mvname, self._rvname, 'normal') break elif query_status == 'sep/exec': continue else: LOG.debug('iSMrc_query command result abnormal.' 'Query status = %(status)s, RV = %(rv)s.', {'status': query_status, 'rv': self._rvname}) break def run(self): try: self._execute() except Exception: with excutils.save_and_reraise_exception(): LOG.debug('UnpairWait Unexpected error. ' 'exception=%(exception)s, MV = %(mv)s, RV = %(rv)s.', {'exception': traceback.format_exc(), 'mv': self._mvname, 'rv': self._rvname}) def _execute(self): pass class UnpairWaitForRestore(UnpairWait): def __init__(self, volume_properties, cli): super(UnpairWaitForRestore, self).__init__(volume_properties, cli) self._rvldn = None if ('rvldn' in volume_properties and volume_properties['rvldn'] is not None): self._rvldn = volume_properties['rvldn'] self._rvcapacity = None if ('rvcapacity' in volume_properties and volume_properties['rvcapacity'] is not None): self._rvcapacity = volume_properties['rvcapacity'] def _execute(self): LOG.debug('UnpairWaitForRestore start.') self._wait(True) if self._rvcapacity is not None: try: self._cli.expand(self._rvldn, self._rvcapacity) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.debug('UnpairWaitForDDRRestore expand error. ' 'exception=%(exception)s, ' 'MV = %(mv)s, RV = %(rv)s.', {'exception': traceback.format_exc(), 'mv': self._mvname, 'rv': self._rvname}) class UnpairWaitForClone(UnpairWait): def __init__(self, volume_properties, cli): super(UnpairWaitForClone, self).__init__(volume_properties, cli) self._rvldn = None if ('rvldn' in volume_properties and volume_properties['rvldn'] is not None): self._rvldn = volume_properties['rvldn'] self._rvcapacity = None if ('rvcapacity' in volume_properties and volume_properties['rvcapacity'] is not None): self._rvcapacity = volume_properties['rvcapacity'] def _execute(self): LOG.debug('UnpairWaitForClone start.') self._wait(True) if self._rvcapacity is not None: try: self._cli.expand(self._rvldn, self._rvcapacity) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.debug('UnpairWaitForClone expand error. ' 'exception=%(exception)s, ' 'MV = %(mv)s, RV = %(rv)s.', {'exception': traceback.format_exc(), 'mv': self._mvname, 'rv': self._rvname}) class UnpairWaitForMigrate(UnpairWait): def __init__(self, volume_properties, cli): super(UnpairWaitForMigrate, self).__init__(volume_properties, cli) def _execute(self): LOG.debug('UnpairWaitForMigrate start.') self._wait(True) self._cli.unbind(self._volume_properties['mvname']) self._cli.changeldname(None, self._volume_properties['mvname'], self._volume_properties['rvname']) class UnpairWaitForDDRRestore(UnpairWaitForRestore): def __init__(self, volume_properties, cli): super(UnpairWaitForDDRRestore, self).__init__(volume_properties, cli) self._prev_mvname = None if ('prev_mvname' in volume_properties and volume_properties['prev_mvname'] is not None): self._prev_mvname = volume_properties['prev_mvname'][3:] def _execute(self): LOG.debug('UnpairWaitForDDRRestore start.') self._wait(True) if self._rvcapacity is not None: try: self._cli.expand(self._rvldn, self._rvcapacity) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.debug('UnpairWaitForDDRRestore expand error. ' 'exception=%(exception)s, ' 'MV = %(mv)s, RV = %(rv)s.', {'exception': traceback.format_exc(), 'mv': self._mvname, 'rv': self._rvname}) if self._prev_mvname is not None: self._cli.setpair(self._prev_mvname, self._mvname) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/product.xml0000664000175000017500000000217700000000000022276 0ustar00zuulzuul00000000000000 NEC 8192 8192 4096 4096 1024 1024 8192 8192 4096 4096 1024 1024 8192 8192 8192 4096 4096 4096 4096 1024 1024 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.379121 cinder-27.0.0/cinder/volume/drivers/nec/v/0000775000175000017500000000000000000000000020332 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/v/__init__.py0000664000175000017500000000000000000000000022431 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/v/nec_v_fc.py0000664000175000017500000000507300000000000022453 0ustar00zuulzuul00000000000000# Copyright (C) 2021 NEC corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Fibre channel module for NEC Driver.""" from cinder import interface from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_fc from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_utils from cinder.volume.drivers.nec.v import nec_v_rest as rest from cinder.volume.drivers.nec.v import nec_v_utils as utils MSG = hbsd_utils.HBSDMsg @interface.volumedriver class VStorageFCDriver(hbsd_fc.HBSDFCDriver): """Fibre channel class for NEC Driver. Version history: .. code-block:: none 1.0.0 - Initial driver. """ VERSION = utils.VERSION # ThirdPartySystems wiki page CI_WIKI_NAME = utils.CI_WIKI_NAME def __init__(self, *args, **kwargs): """Initialize instance variables.""" super(VStorageFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(rest.COMMON_VOLUME_OPTS) self.configuration.append_config_values(rest.FC_VOLUME_OPTS) def _init_common(self, conf, db): utils.DRIVER_INFO['proto'] = 'FC' utils.DRIVER_INFO['hba_id'] = 'wwpns' utils.DRIVER_INFO['hba_id_type'] = 'World Wide Name' utils.DRIVER_INFO['msg_id'] = { 'target': MSG.CREATE_HOST_GROUP_FAILED} utils.DRIVER_INFO['volume_backend_name'] = '%(prefix)sFC' % { 'prefix': utils.DRIVER_PREFIX} utils.DRIVER_INFO['volume_type'] = 'fibre_channel' return rest.VStorageRESTFC(conf, utils.DRIVER_INFO, db) @staticmethod def get_driver_options(): additional_opts = driver.BaseVD._get_oslo_driver_opts( *(hbsd_common._INHERITED_VOLUME_OPTS + hbsd_rest._REQUIRED_REST_OPTS + ['driver_ssl_cert_verify', 'driver_ssl_cert_path', 'san_api_port'])) return (rest.COMMON_VOLUME_OPTS + rest.REST_VOLUME_OPTS + rest.FC_VOLUME_OPTS + additional_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/v/nec_v_iscsi.py0000664000175000017500000000476200000000000023201 0ustar00zuulzuul00000000000000# Copyright (C) 2021 NEC corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """iSCSI channel module for NEC Driver.""" from cinder import interface from cinder.volume import driver from cinder.volume.drivers.hitachi import hbsd_common from cinder.volume.drivers.hitachi import hbsd_iscsi from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_utils from cinder.volume.drivers.nec.v import nec_v_rest as rest from cinder.volume.drivers.nec.v import nec_v_utils as utils MSG = hbsd_utils.HBSDMsg @interface.volumedriver class VStorageISCSIDriver(hbsd_iscsi.HBSDISCSIDriver): """iSCSI class for NEC Driver. Version history: .. code-block:: none 1.0.0 - Initial driver. """ VERSION = utils.VERSION # ThirdPartySystems wiki page CI_WIKI_NAME = utils.CI_WIKI_NAME def __init__(self, *args, **kwargs): """Initialize instance variables.""" super(VStorageISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(rest.COMMON_VOLUME_OPTS) def _init_common(self, conf, db): utils.DRIVER_INFO['proto'] = 'iSCSI' utils.DRIVER_INFO['hba_id'] = 'initiator' utils.DRIVER_INFO['hba_id_type'] = 'iSCSI initiator IQN' utils.DRIVER_INFO['msg_id'] = { 'target': MSG.CREATE_ISCSI_TARGET_FAILED} utils.DRIVER_INFO['volume_backend_name'] = '%(prefix)siSCSI' % { 'prefix': utils.DRIVER_PREFIX} utils.DRIVER_INFO['volume_type'] = 'iscsi' return rest.VStorageRESTISCSI(conf, utils.DRIVER_INFO, db) @staticmethod def get_driver_options(): additional_opts = driver.BaseVD._get_oslo_driver_opts( *(hbsd_common._INHERITED_VOLUME_OPTS + hbsd_rest._REQUIRED_REST_OPTS + ['driver_ssl_cert_verify', 'driver_ssl_cert_path', 'san_api_port'])) return (rest.COMMON_VOLUME_OPTS + rest.REST_VOLUME_OPTS + additional_opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/v/nec_v_rest.py0000664000175000017500000002565000000000000023043 0ustar00zuulzuul00000000000000# Copyright (C) 2021, 2023, NEC corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """REST interface for NEC Driver.""" from oslo_config import cfg from cinder.volume import configuration from cinder.volume.drivers.hitachi import hbsd_rest from cinder.volume.drivers.hitachi import hbsd_rest_api from cinder.volume.drivers.hitachi import hbsd_rest_fc from cinder.volume.drivers.hitachi import hbsd_rest_iscsi COMMON_VOLUME_OPTS = [ cfg.StrOpt( 'nec_v_storage_id', default=None, help='Product number of the storage system.'), cfg.ListOpt( 'nec_v_pools', default=[], deprecated_name='nec_v_pool', help='Pool number[s] or pool name[s] of the DP pool.'), cfg.StrOpt( 'nec_v_snap_pool', default=None, help='Pool number or pool name of the snapshot pool.'), cfg.StrOpt( 'nec_v_ldev_range', default=None, help='Range of the LDEV numbers in the format of \'xxxx-yyyy\' that ' 'can be used by the driver. Values can be in decimal format ' '(e.g. 1000) or in colon-separated hexadecimal format ' '(e.g. 00:03:E8).'), cfg.ListOpt( 'nec_v_target_ports', default=[], help='IDs of the storage ports used to attach volumes to the ' 'controller node. To specify multiple ports, connect them by ' 'commas (e.g. CL1-A,CL2-A).'), cfg.ListOpt( 'nec_v_compute_target_ports', default=[], help='IDs of the storage ports used to attach volumes to compute ' 'nodes. To specify multiple ports, connect them by commas ' '(e.g. CL1-A,CL2-A).'), cfg.BoolOpt( 'nec_v_group_create', default=False, help='If True, the driver will create host groups or iSCSI targets on ' 'storage ports as needed.'), cfg.BoolOpt( 'nec_v_group_delete', default=False, help='If True, the driver will delete host groups or iSCSI targets on ' 'storage ports as needed.'), cfg.IntOpt( 'nec_v_copy_speed', default=3, min=1, max=15, help='Copy speed of storage system. 1 or 2 indicates ' 'low speed, 3 indicates middle speed, and a value between 4 and ' '15 indicates high speed.'), cfg.IntOpt( 'nec_v_copy_check_interval', default=3, min=1, max=600, help='Interval in seconds to check copying status during a volume ' 'copy.'), cfg.IntOpt( 'nec_v_async_copy_check_interval', default=10, min=1, max=600, help='Interval in seconds to check asynchronous copying status during ' 'a copy pair deletion or data restoration.'), ] REST_VOLUME_OPTS = [ cfg.BoolOpt( 'nec_v_rest_disable_io_wait', default=True, help='It may take some time to detach volume after I/O. ' 'This option will allow detaching volume to complete ' 'immediately.'), cfg.BoolOpt( 'nec_v_rest_tcp_keepalive', default=True, help='Enables or disables use of REST API tcp keepalive'), cfg.BoolOpt( 'nec_v_discard_zero_page', default=True, help='Enable or disable zero page reclamation in a DP-VOL.'), cfg.IntOpt( 'nec_v_lun_timeout', default=hbsd_rest._LUN_TIMEOUT, help='Maximum wait time in seconds for adding a LUN to complete.'), cfg.IntOpt( 'nec_v_lun_retry_interval', default=hbsd_rest._LUN_RETRY_INTERVAL, help='Retry interval in seconds for REST API adding a LUN.'), cfg.IntOpt( 'nec_v_restore_timeout', default=hbsd_rest._RESTORE_TIMEOUT, help='Maximum wait time in seconds for the restore operation to ' 'complete.'), cfg.IntOpt( 'nec_v_state_transition_timeout', default=hbsd_rest._STATE_TRANSITION_TIMEOUT, help='Maximum wait time in seconds for a volume transition to ' 'complete.'), cfg.IntOpt( 'nec_v_lock_timeout', default=hbsd_rest_api._LOCK_TIMEOUT, help='Maximum wait time in seconds for storage to be unlocked.'), cfg.IntOpt( 'nec_v_rest_timeout', default=hbsd_rest_api._REST_TIMEOUT, help='Maximum wait time in seconds for REST API execution to ' 'complete.'), cfg.IntOpt( 'nec_v_extend_timeout', default=hbsd_rest_api._EXTEND_TIMEOUT, help='Maximum wait time in seconds for a volume extention to ' 'complete.'), cfg.IntOpt( 'nec_v_exec_retry_interval', default=hbsd_rest_api._EXEC_RETRY_INTERVAL, help='Retry interval in seconds for REST API execution.'), cfg.IntOpt( 'nec_v_rest_connect_timeout', default=hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT, help='Maximum wait time in seconds for REST API connection to ' 'complete.'), cfg.IntOpt( 'nec_v_rest_job_api_response_timeout', default=hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT, help='Maximum wait time in seconds for a response from REST API.'), cfg.IntOpt( 'nec_v_rest_get_api_response_timeout', default=hbsd_rest_api._GET_API_RESPONSE_TIMEOUT, help='Maximum wait time in seconds for a response against GET method ' 'of REST API.'), cfg.IntOpt( 'nec_v_rest_server_busy_timeout', default=hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT, help='Maximum wait time in seconds when REST API returns busy.'), cfg.IntOpt( 'nec_v_rest_keep_session_loop_interval', default=hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL, help='Loop interval in seconds for keeping REST API session.'), cfg.IntOpt( 'nec_v_rest_another_ldev_mapped_retry_timeout', default=hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT, help='Retry time in seconds when new LUN allocation request fails.'), cfg.IntOpt( 'nec_v_rest_tcp_keepidle', default=hbsd_rest_api._TCP_KEEPIDLE, help='Wait time in seconds for sending a first TCP keepalive packet.'), cfg.IntOpt( 'nec_v_rest_tcp_keepintvl', default=hbsd_rest_api._TCP_KEEPINTVL, help='Interval of transmissions in seconds for TCP keepalive packet.'), cfg.IntOpt( 'nec_v_rest_tcp_keepcnt', default=hbsd_rest_api._TCP_KEEPCNT, help='Maximum number of transmissions for TCP keepalive packet.'), cfg.ListOpt( 'nec_v_host_mode_options', default=[], help='Host mode option for host group or iSCSI target'), ] FC_VOLUME_OPTS = [ cfg.BoolOpt( 'nec_v_zoning_request', default=False, help='If True, the driver will configure FC zoning between the server ' 'and the storage system provided that FC zoning manager is ' 'enabled.'), ] CONF = cfg.CONF CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(REST_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(FC_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP) def update_conf(conf): # COMMON_VOLUME_OPTS conf.hitachi_storage_id = conf.nec_v_storage_id conf.hitachi_pools = conf.nec_v_pools conf.hitachi_snap_pool = conf.nec_v_snap_pool conf.hitachi_ldev_range = conf.nec_v_ldev_range conf.hitachi_target_ports = conf.nec_v_target_ports conf.hitachi_compute_target_ports = ( conf.nec_v_compute_target_ports) conf.hitachi_group_create = conf.nec_v_group_create conf.hitachi_group_delete = conf.nec_v_group_delete conf.hitachi_copy_speed = conf.nec_v_copy_speed conf.hitachi_copy_check_interval = ( conf.nec_v_copy_check_interval) conf.hitachi_async_copy_check_interval = ( conf.nec_v_async_copy_check_interval) # REST_VOLUME_OPTS conf.hitachi_rest_disable_io_wait = ( conf.nec_v_rest_disable_io_wait) conf.hitachi_rest_tcp_keepalive = ( conf.nec_v_rest_tcp_keepalive) conf.hitachi_discard_zero_page = ( conf.nec_v_discard_zero_page) conf.hitachi_lun_timeout = conf.nec_v_lun_timeout conf.hitachi_lun_retry_interval = ( conf.nec_v_lun_retry_interval) conf.hitachi_restore_timeout = conf.nec_v_restore_timeout conf.hitachi_state_transition_timeout = ( conf.nec_v_state_transition_timeout) conf.hitachi_lock_timeout = conf.nec_v_lock_timeout conf.hitachi_rest_timeout = conf.nec_v_rest_timeout conf.hitachi_extend_timeout = conf.nec_v_extend_timeout conf.hitachi_exec_retry_interval = ( conf.nec_v_exec_retry_interval) conf.hitachi_rest_connect_timeout = ( conf.nec_v_rest_connect_timeout) conf.hitachi_rest_job_api_response_timeout = ( conf.nec_v_rest_job_api_response_timeout) conf.hitachi_rest_get_api_response_timeout = ( conf.nec_v_rest_get_api_response_timeout) conf.hitachi_rest_server_busy_timeout = ( conf.nec_v_rest_server_busy_timeout) conf.hitachi_rest_keep_session_loop_interval = ( conf.nec_v_rest_keep_session_loop_interval) conf.hitachi_rest_another_ldev_mapped_retry_timeout = ( conf.nec_v_rest_another_ldev_mapped_retry_timeout) conf.hitachi_rest_tcp_keepidle = ( conf.nec_v_rest_tcp_keepidle) conf.hitachi_rest_tcp_keepintvl = ( conf.nec_v_rest_tcp_keepintvl) conf.hitachi_rest_tcp_keepcnt = ( conf.nec_v_rest_tcp_keepcnt) conf.hitachi_host_mode_options = ( conf.nec_v_host_mode_options) return conf class VStorageRESTFC(hbsd_rest_fc.HBSDRESTFC): """REST interface fibre channel class.""" def __init__(self, conf, storage_protocol, db): """Initialize instance variables.""" conf.append_config_values(COMMON_VOLUME_OPTS) conf.append_config_values(REST_VOLUME_OPTS) conf.append_config_values(FC_VOLUME_OPTS) super(VStorageRESTFC, self).__init__(conf, storage_protocol, db) self.conf = update_conf(self.conf) # FC_VOLUME_OPTS self.conf.hitachi_zoning_request = self.conf.nec_v_zoning_request class VStorageRESTISCSI(hbsd_rest_iscsi.HBSDRESTISCSI): """REST interface iSCSI channel class.""" def __init__(self, conf, storage_protocol, db): """Initialize instance variables.""" conf.append_config_values(COMMON_VOLUME_OPTS) conf.append_config_values(REST_VOLUME_OPTS) super(VStorageRESTISCSI, self).__init__(conf, storage_protocol, db) self.conf = update_conf(self.conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/v/nec_v_utils.py0000664000175000017500000000273000000000000023220 0ustar00zuulzuul00000000000000# Copyright (C) 2021 NEC corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Utility module for NEC Driver.""" VERSION = '1.0.0' CI_WIKI_NAME = 'NEC_V_Cinder_CI' PARAM_PREFIX = 'nec_v' VENDOR_NAME = 'NEC' DRIVER_PREFIX = 'NEC' DRIVER_FILE_PREFIX = 'nec' TARGET_PREFIX = 'NEC-' HDP_VOL_ATTR = 'DP' HDT_VOL_ATTR = 'DT' NVOL_LDEV_TYPE = 'DP-VOL' TARGET_IQN_SUFFIX = '.nec-target' PAIR_ATTR = 'SS' DRIVER_INFO = { 'version': VERSION, 'proto': '', 'hba_id': '', 'hba_id_type': '', 'msg_id': { 'target': '', }, 'volume_backend_name': '', 'volume_type': '', 'param_prefix': PARAM_PREFIX, 'vendor_name': VENDOR_NAME, 'driver_prefix': DRIVER_PREFIX, 'driver_file_prefix': DRIVER_FILE_PREFIX, 'target_prefix': TARGET_PREFIX, 'hdp_vol_attr': HDP_VOL_ATTR, 'hdt_vol_attr': HDT_VOL_ATTR, 'nvol_ldev_type': NVOL_LDEV_TYPE, 'target_iqn_suffix': TARGET_IQN_SUFFIX, 'pair_attr': PAIR_ATTR, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/volume.py0000664000175000017500000001511700000000000021753 0ustar00zuulzuul00000000000000# # Copyright (c) 2016 NEC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Drivers for M-Series Storage.""" from cinder import interface from cinder.volume import driver from cinder.volume.drivers.nec import volume_common from cinder.volume.drivers.nec import volume_helper from cinder.zonemanager import utils as fczm_utils @interface.volumedriver class MStorageISCSIDriver(volume_helper.MStorageDSVDriver, driver.ISCSIDriver): """M-Series Storage Snapshot iSCSI Driver. .. code-block:: none Version history: 1.8.1 - First open source driver version. 1.8.2 - Code refactoring. 1.9.1 - Support optimal path for non-disruptive backup. 1.9.2 - Support manage/unmanage and manage/unmanage snapshot. Delete an unused configuration parameter (ldset_controller_node_name). Fixed bug #1705001: driver fails to start. 1.10.1 - Support automatic configuration of SAN access control. Fixed bug #1753375: SAN access remains permitted on the source node. 1.10.2 - Delete max volumes per pool limit. 1.10.3 - Add faster clone status check. Fixed bug #1777385: driver removed access permission from the destination node after live-migraion. Fixed bug #1778669: LUNs of detached volumes are never reused. 1.11.1 - Add support python 3. Add support for multi-attach. Add support of more than 4 iSCSI portals for a node. Add support to revert a volume to a snapshot. Add support storage assist retype and fixed bug #1838955: a volume in NEC Storage was left undeleted when the volume was retyped to another storage. """ VERSION = '1.11.1' CI_WIKI_NAME = 'NEC_Cinder_CI' def __init__(self, *args, **kwargs): super(MStorageISCSIDriver, self).__init__(*args, **kwargs) self._set_config(self.configuration, self.host, self.__class__.__name__) @staticmethod def get_driver_options(): return volume_common.mstorage_opts def ensure_export(self, context, volume): pass def get_volume_stats(self, refresh=False): return self.iscsi_get_volume_stats(refresh) def initialize_connection(self, volume, connector): return self.iscsi_initialize_connection(volume, connector) def terminate_connection(self, volume, connector, **kwargs): return self.iscsi_terminate_connection(volume, connector) def initialize_connection_snapshot(self, snapshot, connector, **kwargs): return self.iscsi_initialize_connection_snapshot(snapshot, connector, **kwargs) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): return self.iscsi_terminate_connection_snapshot(snapshot, connector, **kwargs) @interface.volumedriver class MStorageFCDriver(volume_helper.MStorageDSVDriver, driver.FibreChannelDriver): """M-Series Storage Snapshot FC Driver. .. code-block:: none Version history: 1.8.1 - First open source driver version. 1.8.2 - Code refactoring. 1.9.1 - Support optimal path for non-disruptive backup. 1.9.2 - Support manage/unmanage and manage/unmanage snapshot. Delete an unused configuration parameter (ldset_controller_node_name). Fixed bug #1705001: driver fails to start. 1.10.1 - Support automatic configuration of SAN access control. Fixed bug #1753375: SAN access remains permitted on the source node. 1.10.2 - Delete max volumes per pool limit. 1.10.3 - Add faster clone status check. Fixed bug #1777385: driver removed access permission from the destination node after live-migraion. Fixed bug #1778669: LUNs of detached volumes are never reused. 1.11.1 - Add support python 3. Add support for multi-attach. Add support of more than 4 iSCSI portals for a node. Add support to revert a volume to a snapshot. Add support storage assist retype and fixed bug #1838955: a volume in NEC Storage was left undeleted when the volume was retyped to another storage. """ VERSION = '1.11.1' CI_WIKI_NAME = 'NEC_Cinder_CI' def __init__(self, *args, **kwargs): super(MStorageFCDriver, self).__init__(*args, **kwargs) self._set_config(self.configuration, self.host, self.__class__.__name__) @staticmethod def get_driver_options(): return volume_common.mstorage_opts def ensure_export(self, context, volume): pass def get_volume_stats(self, refresh=False): return self.fc_get_volume_stats(refresh) def initialize_connection(self, volume, connector): conn_info = self.fc_initialize_connection(volume, connector) fczm_utils.add_fc_zone(conn_info) return conn_info def terminate_connection(self, volume, connector, **kwargs): conn_info = self.fc_terminate_connection(volume, connector) fczm_utils.remove_fc_zone(conn_info) return conn_info def initialize_connection_snapshot(self, snapshot, connector, **kwargs): return self.fc_initialize_connection_snapshot(snapshot, connector, **kwargs) def terminate_connection_snapshot(self, snapshot, connector, **kwargs): return self.fc_terminate_connection_snapshot(snapshot, connector, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/volume_common.py0000664000175000017500000011362000000000000023321 0ustar00zuulzuul00000000000000# # Copyright (c) 2016 NEC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os import re import traceback from lxml import etree from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _ from cinder.volume import configuration from cinder.volume.drivers.nec import cli from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume import volume_types LOG = logging.getLogger(__name__) FLAGS = cfg.CONF mstorage_opts = [ cfg.IPOpt('nec_ismcli_fip', default=None, help='FIP address of M-Series Storage iSMCLI.'), cfg.StrOpt('nec_ismcli_user', default='', help='User name for M-Series Storage iSMCLI.'), cfg.StrOpt('nec_ismcli_password', secret=True, default='', help='Password for M-Series Storage iSMCLI.'), cfg.StrOpt('nec_ismcli_privkey', default='', help='Filename of RSA private key for ' 'M-Series Storage iSMCLI.'), cfg.StrOpt('nec_ldset', default='', help='M-Series Storage LD Set name for Compute Node.'), cfg.StrOpt('nec_ldname_format', default='LX:%s', help='M-Series Storage LD name format for volumes.'), cfg.StrOpt('nec_backup_ldname_format', default='LX:%s', help='M-Series Storage LD name format for snapshots.'), cfg.StrOpt('nec_diskarray_name', default='', help='Diskarray name of M-Series Storage.'), cfg.StrOpt('nec_ismview_dir', default='/tmp/nec/cinder', help='Output path of iSMview file.'), cfg.IntOpt('nec_ssh_pool_port_number', default=22, help='Port number of ssh pool.'), cfg.IntOpt('nec_unpairthread_timeout', default=3600, help='Timeout value of Unpairthread.'), cfg.IntOpt('nec_backend_max_ld_count', default=1024, help='Maximum number of managing sessions.'), cfg.BoolOpt('nec_actual_free_capacity', default=False, help='Return actual free capacity.'), cfg.BoolOpt('nec_ismview_alloptimize', default=False, help='Use legacy iSMCLI command with optimization.'), cfg.ListOpt('nec_pools', default=[], help='M-Series Storage pool numbers list to be used.'), cfg.ListOpt('nec_backup_pools', default=[], help='M-Series Storage backup pool number to be used.'), cfg.BoolOpt('nec_queryconfig_view', default=False, help='Use legacy iSMCLI command.'), cfg.IntOpt('nec_iscsi_portals_per_cont', default=0, deprecated_for_removal=True, help='Max number of iSCSI portals per controller. ' '0 => unlimited. ' 'This option is deprecated and may ' 'be removed in the next release.'), cfg.BoolOpt('nec_auto_accesscontrol', default=True, help='Configure access control automatically.'), cfg.StrOpt('nec_cv_ldname_format', default='LX:__ControlVolume_%xh', help='M-Series Storage Control Volume name format.'), ] FLAGS.register_opts(mstorage_opts, group=configuration.SHARED_CONF_GROUP) def convert_to_id(value62): alnum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" length = len(value62) weight = 0 value = 0 index = 0 for i in reversed(range(0, length)): num = alnum.find(value62[i]) if index != 0: value += int(weight * (num)) else: value = num index += 1 weight = 62 ** index value = '%032x' % value uuid = value[0:8] uuid += '-' uuid += value[8:12] uuid += '-' uuid += value[12:16] uuid += '-' uuid += value[16:20] uuid += '-' uuid += value[20:] return uuid class MStorageVolumeCommon(object): """M-Series Storage volume common class.""" def do_setup(self, context): self._context = context def check_for_setup_error(self): fip = self._configuration.safe_get('nec_ismcli_fip') user = self._configuration.safe_get('nec_ismcli_user') pw = self._configuration.safe_get('nec_ismcli_password') key = self._configuration.safe_get('nec_ismcli_privkey') pools = self._configuration.safe_get('nec_pools') if fip is None or fip == '': raise exception.ParameterNotFound(param='nec_ismcli_fip') if user is None or user == '': raise exception.ParameterNotFound(param='nec_ismcli_user') if (pw is None or pw == '') and (key is None or key == ''): msg = _('nec_ismcli_password nor nec_ismcli_privkey') raise exception.ParameterNotFound(param=msg) if pools is None or len(pools) == 0: raise exception.ParameterNotFound(param='nec_pools') def _set_config(self, configuration, host, driver_name): self._configuration = configuration self._host = host self._driver_name = driver_name self._configuration.append_config_values(mstorage_opts) self._configuration.append_config_values(san.san_opts) self._config_group = self._configuration.config_group self._properties = self._set_properties() self._cli = self._properties['cli'] def _create_ismview_dir(self, ismview_dir, diskarray_name, driver_name, host): """Create ismview directory.""" filename = diskarray_name if filename == '': filename = driver_name + '_' + host ismview_path = os.path.join(ismview_dir, filename) LOG.debug('ismview_path=%s.', ismview_path) try: if os.path.exists(ismview_path): os.remove(ismview_path) except OSError as e: with excutils.save_and_reraise_exception() as ctxt: if e.errno == errno.ENOENT: ctxt.reraise = False try: os.makedirs(ismview_dir) except OSError as e: with excutils.save_and_reraise_exception() as ctxt: if e.errno == errno.EEXIST: ctxt.reraise = False return ismview_path def get_conf_properties(self): confobj = self._configuration pool_pools = [] for pool in confobj.safe_get('nec_pools'): if pool.endswith('h'): pool_pools.append(int(pool[:-1], 16)) else: pool_pools.append(int(pool, 10)) pool_backup_pools = [] for pool in confobj.safe_get('nec_backup_pools'): if pool.endswith('h'): pool_backup_pools.append(int(pool[:-1], 16)) else: pool_backup_pools.append(int(pool, 10)) return { 'cli_fip': confobj.safe_get('nec_ismcli_fip'), 'cli_user': confobj.safe_get('nec_ismcli_user'), 'cli_password': confobj.safe_get('nec_ismcli_password'), 'cli_privkey': confobj.safe_get('nec_ismcli_privkey'), 'pool_pools': pool_pools, 'pool_backup_pools': pool_backup_pools, 'pool_actual_free_capacity': confobj.safe_get('nec_actual_free_capacity'), 'ldset_name': confobj.safe_get('nec_ldset'), 'ld_name_format': confobj.safe_get('nec_ldname_format'), 'ld_backupname_format': confobj.safe_get('nec_backup_ldname_format'), 'ld_backend_max_count': confobj.safe_get('nec_backend_max_ld_count'), 'thread_timeout': confobj.safe_get('nec_unpairthread_timeout'), 'ismview_dir': confobj.safe_get('nec_ismview_dir'), 'ismview_alloptimize': confobj.safe_get('nec_ismview_alloptimize'), 'ssh_pool_port_number': confobj.safe_get('nec_ssh_pool_port_number'), 'diskarray_name': confobj.safe_get('nec_diskarray_name'), 'queryconfig_view': confobj.safe_get('nec_queryconfig_view'), 'portal_number': confobj.safe_get('nec_iscsi_portals_per_cont'), 'auto_accesscontrol': confobj.safe_get('nec_auto_accesscontrol'), 'cv_name_format': confobj.safe_get('nec_cv_ldname_format') } def _set_properties(self): conf_properties = self.get_conf_properties() ismview_path = self._create_ismview_dir( conf_properties['ismview_dir'], conf_properties['diskarray_name'], self._driver_name, self._host) vendor_name, _product_dict = self.get_oem_parameter() backend_name = self._configuration.safe_get('volume_backend_name') ssh_timeout = self._configuration.safe_get('ssh_conn_timeout') reserved_per = self._configuration.safe_get('reserved_percentage') conf_properties['ssh_conn_timeout'] = ssh_timeout conf_properties['reserved_percentage'] = reserved_per conf_properties['ismview_path'] = ismview_path conf_properties['vendor_name'] = vendor_name conf_properties['products'] = _product_dict conf_properties['backend_name'] = backend_name conf_properties['cli'] = cli.MStorageISMCLI(conf_properties) return conf_properties def get_oem_parameter(self): product = os.path.join(os.path.dirname(__file__), 'product.xml') try: with open(product, 'r') as f: xml = f.read() root = etree.fromstring(xml) vendor_name = root.findall('./VendorName')[0].text product_dict = {} product_map = root.findall('./ProductMap/Product') for s in product_map: product_dict[s.attrib['Name']] = int(s.text, 10) return vendor_name, product_dict except OSError as e: with excutils.save_and_reraise_exception() as ctxt: if e.errno == errno.ENOENT: ctxt.reraise = False raise exception.NotFound(_('%s not found.') % product) @staticmethod def get_ldname(volid, volformat): alnum = ('0123456789' 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz') ldname = "" num = int(volid.replace(("-"), ""), 16) while num != 0: ldname = alnum[num % len(alnum)] + ldname num = num - num % len(alnum) num = num // len(alnum) return volformat % ldname def get_ldset(self, ldsets): ldset = None if self._properties['ldset_name'] == '': nldset = len(ldsets) if nldset == 0: msg = _('Logical Disk Set could not be found.') raise exception.NotFound(msg) else: ldset = None else: if self._properties['ldset_name'] not in ldsets: msg = (_('Logical Disk Set `%s` could not be found.') % self._properties['ldset_name']) raise exception.NotFound(msg) ldset = ldsets[self._properties['ldset_name']] return ldset def get_pool_capacity(self, pools, ldsets): pools = [pool for (pn, pool) in pools.items() if len(self._properties['pool_pools']) == 0 or pn in self._properties['pool_pools']] free_capacity_gb = 0 total_capacity_gb = 0 for pool in pools: # Convert to GB. tmp_total = int(pool['total'] // units.Gi) tmp_free = int(pool['free'] // units.Gi) if free_capacity_gb < tmp_free: total_capacity_gb = tmp_total free_capacity_gb = tmp_free return {'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb} def set_backend_max_ld_count(self, xml, root): section = root.findall('./CMD_REQUEST')[0] version = section.get('version').replace('Version ', '')[0:3] version = float(version) if version < 9.1: if 512 < self._properties['ld_backend_max_count']: self._properties['ld_backend_max_count'] = 512 else: if 1024 < self._properties['ld_backend_max_count']: self._properties['ld_backend_max_count'] = 1024 def get_diskarray_max_ld_count(self, xml, root): max_ld_count = 0 for section in root.findall( './' 'CMD_REQUEST/' 'CHAPTER[@name="Disk Array"]/' 'OBJECT[@name="Disk Array"]/' 'SECTION[@name="Disk Array Detail Information"]'): unit = section.find('./UNIT[@name="Product ID"]') if unit is None: msg = (_('UNIT[@name="Product ID"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: product_id = unit.text if product_id in self._properties['products']: max_ld_count = self._properties['products'][product_id] else: max_ld_count = 8192 LOG.debug('UNIT[@name="Product ID"] unknown id. ' 'productId=%s', product_id) LOG.debug('UNIT[@name="Product ID"] max_ld_count=%d.', max_ld_count) return max_ld_count def get_pool_config(self, xml, root): pools = {} for xmlobj in root.findall('./' 'CMD_REQUEST/' 'CHAPTER[@name="Pool"]/' 'OBJECT[@name="Pool"]'): section = xmlobj.find('./SECTION[@name="Pool Detail Information"]') if section is None: msg = (_('SECTION[@name="Pool Detail Information"] ' 'not found. line=%(line)d out="%(out)s"') % {'line': xmlobj.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) unit = section.find('./UNIT[@name="Pool No.(h)"]') if unit is None: msg = (_('UNIT[@name="Pool No.(h)"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) pool_num = int(unit.text, 16) unit = section.find('UNIT[@name="Pool Capacity"]') if unit is None: msg = (_('UNIT[@name="Pool Capacity"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) total = int(unit.text, 10) unit = section.find('UNIT[@name="Free Pool Capacity"]') if unit is None: msg = (_('UNIT[@name="Free Pool Capacity"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) free = int(unit.text, 10) if self._properties['pool_actual_free_capacity']: unit = section.find('UNIT[@name="Used Pool Capacity"]') if unit is None: msg = (_('UNIT[@name="Used Pool Capacity"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) used = int(unit.text, 10) for section in xmlobj.findall('./SECTION[@name=' '"Virtual Capacity Pool ' 'Information"]'): unit = section.find('UNIT[@name="Actual Capacity"]') if unit is None: msg = (_('UNIT[@name="Actual Capacity"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) total = int(unit.text, 10) free = total - used pool = {'pool_num': pool_num, 'total': total, 'free': free, 'ld_list': []} pools[pool_num] = pool return pools def get_ld_config(self, xml, root, pools): lds = {} used_ldns = [] for section in root.findall('./' 'CMD_REQUEST/' 'CHAPTER[@name="Logical Disk"]/' 'OBJECT[@name="Logical Disk"]/' 'SECTION[@name="LD Detail Information"]'): unit = section.find('./UNIT[@name="LDN(h)"]') if unit is None: msg = (_('UNIT[@name="LDN(h)"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ldn = int(unit.text, 16) unit = section.find('./UNIT[@name="OS Type"]') if unit is None: msg = (_('UNIT[@name="OS Type"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ostype = unit.text if unit.text is not None else '' unit = section.find('./UNIT[@name="LD Name"]') if unit is None: msg = (_('UNIT[@name="LD Name"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ldname = ostype + ':' + unit.text unit = section.find('./UNIT[@name="Pool No.(h)"]') if unit is None: msg = (_('UNIT[@name="Pool No.(h)"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) pool_num = int(unit.text, 16) unit = section.find('./UNIT[@name="LD Capacity"]') if unit is None: msg = (_('UNIT[@name="LD Capacity"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # byte capacity transform GB capacity. ld_capacity = int(unit.text, 10) // units.Gi unit = section.find('./UNIT[@name="RPL Attribute"]') if unit is None: msg = (_('UNIT[@name="RPL Attribute"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) rplatr = unit.text unit = section.find('./UNIT[@name="Purpose"]') if unit is None: msg = (_('UNIT[@name="Purpose"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) purpose = unit.text ld = {'ldname': ldname, 'ldn': ldn, 'pool_num': pool_num, 'ld_capacity': ld_capacity, 'RPL Attribute': rplatr, 'Purpose': purpose} pools[pool_num]['ld_list'].append(ld) lds[ldname] = ld used_ldns.append(ldn) return lds, used_ldns def get_iscsi_ldset_config(self, xml, root): ldsets = {} for xmlobj in root.findall('./' 'CMD_REQUEST/' 'CHAPTER[@name="Access Control"]/' 'OBJECT[@name="LD Set(iSCSI)"]'): ldsetlds = {} portals = [] initiators = [] for unit in xmlobj.findall('./SECTION[@name="Portal"]/' 'UNIT[@name="Portal"]'): if not unit.text.startswith('0.0.0.0:'): portals.append(unit.text) for unit in xmlobj.findall('./SECTION[@name="Initiator List"]/' 'UNIT[@name="Initiator List"]'): initiators.append(unit.text) section = xmlobj.find('./SECTION[@name="LD Set(iSCSI)' ' Information"]') if section is None: return ldsets unit = section.find('./UNIT[@name="Platform"]') if unit is None: msg = (_('UNIT[@name="Platform"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) platform = unit.text unit = section.find('./UNIT[@name="LD Set Name"]') if unit is None: msg = (_('UNIT[@name="LD Set Name"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ldsetname = platform + ':' + unit.text unit = section.find('./UNIT[@name="Target Mode"]') if unit is None: msg = (_('UNIT[@name="Target Mode"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) tmode = unit.text if tmode == 'Normal': unit = section.find('./UNIT[@name="Target Name"]') if unit is None: msg = (_('UNIT[@name="Target Name"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) iqn = unit.text for section in xmlobj.findall('./SECTION[@name=' '"LUN/LD List"]'): unit = section.find('./UNIT[@name="LDN(h)"]') if unit is None: msg = (_('UNIT[@name="LDN(h)"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ldn = int(unit.text, 16) unit = section.find('./UNIT[@name="LUN(h)"]') if unit is None: msg = (_('UNIT[@name="LUN(h)"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) lun = int(unit.text, 16) ld = {'ldn': ldn, 'lun': lun, 'iqn': iqn} ldsetlds[ldn] = ld elif tmode == 'Multi-Target': for section in xmlobj.findall('./SECTION[@name=' '"Target Information For ' 'Multi-Target Mode"]'): unit = section.find('./UNIT[@name="Target Name"]') if unit is None: msg = (_('UNIT[@name="Target Name"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) iqn = unit.text unit = section.find('./UNIT[@name="LDN(h)"]') if unit is None: msg = (_('UNIT[@name="LDN(h)"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if unit.text.startswith('-'): continue ldn = int(unit.text, 16) unit = section.find('./UNIT[@name="LUN(h)"]') if unit is None: msg = (_('UNIT[@name="LUN(h)"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if unit.text.startswith('-'): continue lun = int(unit.text, 16) ld = {'ldn': ldn, 'lun': lun, 'iqn': iqn} ldsetlds[ldn] = ld else: LOG.debug('`%(mode)s` Unknown Target Mode. ' 'line=%(line)d out="%(out)s"', {'mode': tmode, 'line': unit.sourceline, 'out': xml}) ldset = {'ldsetname': ldsetname, 'protocol': 'iSCSI', 'mode': tmode, 'portal_list': portals, 'lds': ldsetlds, 'initiator_list': initiators} ldsets[ldsetname] = ldset return ldsets def get_fc_ldset_config(self, xml, root): ldsets = {} for xmlobj in root.findall('./' 'CMD_REQUEST/' 'CHAPTER[@name="Access Control"]/' 'OBJECT[@name="LD Set(FC)"]'): ldsetlds = {} section = xmlobj.find('./SECTION[@name="LD Set(FC)' ' Information"]') if section is None: return ldsets unit = section.find('./UNIT[@name="Platform"]') if unit is None: msg = (_('UNIT[@name="Platform"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) platform = unit.text unit = section.find('./UNIT[@name="LD Set Name"]') if unit is None: msg = (_('UNIT[@name="LD Set Name"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ldsetname = platform + ':' + unit.text wwpns = [] ports = [] for section in xmlobj.findall('./SECTION[@name="Path List"]'): unit = section.find('./UNIT[@name="Path"]') if unit is None: msg = (_('UNIT[@name="Path"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if unit.text.find('(') != -1: ports.append(unit.text) else: wwpns.append(unit.text) for section in xmlobj.findall('./SECTION[@name="LUN/LD List"]'): unit = section.find('./UNIT[@name="LDN(h)"]') if unit is None: msg = (_('UNIT[@name="LDN(h)"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) ldn = int(unit.text, 16) unit = section.find('./UNIT[@name="LUN(h)"]') if unit is None: msg = (_('UNIT[@name="LUN(h)"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) lun = int(unit.text, 16) ld = {'ldn': ldn, 'lun': lun} ldsetlds[ldn] = ld ldset = {'ldsetname': ldsetname, 'lds': ldsetlds, 'protocol': 'FC', 'wwpn': wwpns, 'port': ports} ldsets[ldsetname] = ldset return ldsets def get_hostport_config(self, xml, root): hostports = {} for section in root.findall('./' 'CMD_REQUEST/' 'CHAPTER[@name="Controller"]/' 'OBJECT[@name="Host Port"]/' 'SECTION[@name="Host Director' '/Host Port Information"]'): unit = section.find('./UNIT[@name="Port No.(h)"]') if unit is None: msg = (_('UNIT[@name="Port No.(h)"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) units = unit.text.split('-') director = int(units[0], 16) port = int(units[1], 16) unit = section.find('./UNIT[@name="IP Address"]') if unit is None: unit = section.find('./UNIT[@name="WWPN"]') if unit is None: msg = (_('UNIT[@name="WWPN"] not found. ' 'line=%(line)d out="%(out)s"') % {'line': section.sourceline, 'out': xml}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) wwpn = unit.text hostport = { 'director': director, 'port': port, 'wwpn': wwpn, 'protocol': 'FC', } else: ip = unit.text if ip == '0.0.0.0': continue hostport = { 'director': director, 'port': port, 'ip': ip, 'protocol': 'iSCSI', } if director not in hostports: hostports[director] = [] hostports[director].append(hostport) return hostports def configs(self, xml): root = etree.fromstring(xml) pools = self.get_pool_config(xml, root) lds, used_ldns = self.get_ld_config(xml, root, pools) iscsi_ldsets = self.get_iscsi_ldset_config(xml, root) fc_ldsets = self.get_fc_ldset_config(xml, root) hostports = self.get_hostport_config(xml, root) diskarray_max_ld_count = self.get_diskarray_max_ld_count(xml, root) self.set_backend_max_ld_count(xml, root) ldsets = {} ldsets.update(iscsi_ldsets) ldsets.update(fc_ldsets) return pools, lds, ldsets, used_ldns, hostports, diskarray_max_ld_count def get_xml(self): ismview_path = self._properties['ismview_path'] if os.path.exists(ismview_path) and os.path.isfile(ismview_path): with open(ismview_path, 'r') as f: xml = f.read() LOG.debug('loaded from %s.', ismview_path) else: xml = self._cli.view_all(ismview_path, False, False) return xml def parse_xml(self): try: xml = self.get_xml() return self.configs(xml) except Exception: LOG.debug('parse_xml Unexpected error. exception=%s', traceback.format_exc()) xml = self._cli.view_all(self._properties['ismview_path'], False) return self.configs(xml) def get_volume_type_qos_specs(self, volume_type_id): specs = {} ctxt = context.get_admin_context() if volume_type_id is not None: volume_type = volume_types.get_volume_type(ctxt, volume_type_id) qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id is not None: specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] LOG.debug('get_volume_type_qos_specs ' 'volume_type=%(volume_type)s, ' 'qos_specs_id=%(qos_spec_id)s, ' 'specs=%(specs)s', {'volume_type': volume_type, 'qos_spec_id': qos_specs_id, 'specs': specs}) return specs def get_qos_parameters(self, specs, reset): qos_params = {} if 'upperlimit' in specs and specs['upperlimit'] is not None: if self.validates_number(specs['upperlimit']) is True: upper_limit = int(specs['upperlimit'], 10) if ((upper_limit != 0) and ((upper_limit < 10) or (upper_limit > 1000000))): raise exception.InvalidConfigurationValue( value=upper_limit, option='upperlimit') qos_params['upperlimit'] = upper_limit else: raise exception.InvalidConfigurationValue( value=specs['upperlimit'], option='upperlimit') else: # 0: Set to no limit.(default) # On the QoS function in NEC Storage, 0 means there is no # limit. # None: Keep current value. qos_params['upperlimit'] = 0 if reset else None if 'lowerlimit' in specs and specs['lowerlimit'] is not None: if self.validates_number(specs['lowerlimit']) is True: lower_limit = int(specs['lowerlimit'], 10) if (lower_limit != 0 and (lower_limit < 10 or lower_limit > 1000000)): raise exception.InvalidConfigurationValue( value=lower_limit, option='lowerlimit') qos_params['lowerlimit'] = lower_limit else: raise exception.InvalidConfigurationValue( value=specs['lowerlimit'], option='lowerlimit') else: # 0: Set to no limit.(default) # On the QoS function in NEC Storage, 0 means there is no # limit. # None: Keep current value. qos_params['lowerlimit'] = 0 if reset else None if 'upperreport' in specs: if specs['upperreport'] not in ['on', 'off']: LOG.debug('Illegal arguments. ' 'upperreport is not on or off.' 'upperreport=%s', specs['upperreport']) qos_params['upperreport'] = 'off' if reset else None else: qos_params['upperreport'] = specs['upperreport'] else: # off: Set to no report.(default) # None: Keep current value. qos_params['upperreport'] = 'off' if reset else None return qos_params def check_accesscontrol(self, ldsets, ld): """Check Logical disk is in-use or not.""" set_accesscontrol = False for ldset in ldsets.values(): if ld['ldn'] in ldset['lds']: set_accesscontrol = True break return set_accesscontrol def validates_number(self, value): return re.match(r'^(?![-+]0+$)[-+]?([1-9][0-9]*)?[0-9](\.[0-9]+)?$', '%s' % value) and True or False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nec/volume_helper.py0000664000175000017500000024311400000000000023312 0ustar00zuulzuul00000000000000# # Copyright (c) 2016 NEC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import traceback from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder.common import constants from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.nec import cli from cinder.volume.drivers.nec import volume_common from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class MStorageDriver(volume_common.MStorageVolumeCommon): """M-Series Storage helper class.""" def _convert_id2name(self, volume): ldname = (self.get_ldname(volume.id, self._properties['ld_name_format'])) return ldname def _convert_id2snapname(self, volume): ldname = (self.get_ldname(volume.id, self._properties['ld_backupname_format'])) return ldname def _convert_id2migratename(self, volume): ldname = self._convert_id2name(volume) ldname = ldname + '_m' return ldname def _convert_deleteldname(self, ldname): return ldname + '_d' def _select_ldnumber(self, used_ldns, max_ld_count): """Pick up unused LDN.""" for ldn in range(0, max_ld_count + 1): if ldn not in used_ldns: break if ldn > max_ld_count - 1: msg = _('All Logical Disk Numbers are used. ' 'No more volumes can be created.') raise exception.VolumeBackendAPIException(data=msg) return ldn def _return_poolnumber(self, nominated_pools): """Select pool form nominated pools.""" selected_pool = -1 min_ldn = 0 for pool in nominated_pools: nld = len(pool['ld_list']) if selected_pool == -1 or min_ldn > nld: selected_pool = pool['pool_num'] min_ldn = nld if selected_pool < 0: msg = _('No available pools found.') raise exception.VolumeBackendAPIException(data=msg) return selected_pool def _select_leastused_poolnumber(self, volume, pools, xml, option=None): """Pick up least used pool.""" size = volume.size * units.Gi pools = [pool for (pn, pool) in pools.items() if pool['free'] >= size and (len(self._properties['pool_pools']) == 0 or pn in self._properties['pool_pools'])] return self._return_poolnumber(pools) def _select_migrate_poolnumber(self, volume, pools, xml, host): """Pick up migration target pool.""" tmpPools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) ldname = self.get_ldname(volume.id, self._properties['ld_name_format']) ld = lds[ldname] capabilities = host['capabilities'] pools_string = capabilities.get('location_info').split(':')[1] destination_pools = list(map(int, pools_string.split(','))) size = volume.size * units.Gi pools = [pool for (pn, pool) in pools.items() if pool['free'] >= size and (len(destination_pools) == 0 or pn in destination_pools)] selected_pool = self._return_poolnumber(pools) if selected_pool == ld['pool_num']: # it is not necessary to create new volume. selected_pool = -1 return selected_pool def _select_dsv_poolnumber(self, volume, pools, option=None): """Pick up backup pool for DSV.""" pools = [pool for (pn, pool) in pools.items() if pn in self._properties['pool_backup_pools']] return self._return_poolnumber(pools) def _select_ddr_poolnumber(self, volume, pools, xml, option): """Pick up backup pool for DDR.""" size = option * units.Gi pools = [pool for (pn, pool) in pools.items() if pool['free'] >= size and pn in self._properties['pool_backup_pools']] return self._return_poolnumber(pools) def _select_volddr_poolnumber(self, volume, pools, xml, option): """Pick up backup pool for DDR.""" size = option * units.Gi pools = [pool for (pn, pool) in pools.items() if pool['free'] >= size and pn in self._properties['pool_pools']] return self._return_poolnumber(pools) def _bind_ld(self, volume, capacity, validator, nameselector, poolselector, option=None): return self._sync_bind_ld(volume, capacity, validator, nameselector, poolselector, self._properties['diskarray_name'], option) @coordination.synchronized('mstorage_bind_execute_{diskarray_name}') def _sync_bind_ld(self, volume, capacity, validator, nameselector, poolselector, diskarray_name, option=None): """Get storage state and bind ld. volume: ld information capacity: capacity in GB validator: validate method(volume, xml) nameselector: select ld name method(volume) poolselector: select ld location method(volume, pools) diskarray_name: target diskarray name option: optional info """ LOG.debug('_bind_ld Start.') xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # execute validator function. if validator is not None: result = validator(volume, xml) if result is False: msg = _('Invalid bind Logical Disk info.') raise exception.VolumeBackendAPIException(data=msg) # generate new ld name. ldname = nameselector(volume) # pick up least used pool and unused LDN. selected_pool = poolselector(volume, pools, xml, option) selected_ldn = self._select_ldnumber(used_ldns, max_ld_count) if selected_pool < 0 or selected_ldn < 0: LOG.debug('NOT necessary LD bind. ' 'Name=%(name)s ' 'Size=%(size)dGB ' 'LDN=%(ldn)04xh ' 'Pool=%(pool)04xh.', {'name': ldname, 'size': capacity, 'ldn': selected_ldn, 'pool': selected_pool}) return ldname, selected_ldn, selected_pool # bind LD. retnum, errnum = (self._cli.ldbind(ldname, selected_pool, selected_ldn, capacity)) if retnum is False: if 'iSM31077' in errnum: msg = _('Logical Disk number is duplicated (%s).') % errnum raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Failed to bind Logical Disk (%s).') % errnum raise exception.VolumeBackendAPIException(data=msg) LOG.debug('LD bound. Name=%(name)s Size=%(size)dGB ' 'LDN=%(ldn)04xh Pool=%(pool)04xh.', {'name': ldname, 'size': capacity, 'ldn': selected_ldn, 'pool': selected_pool}) return ldname, selected_ldn, selected_pool def _validate_ld_exist(self, lds, vol_id, name_format): ldname = self.get_ldname(vol_id, name_format) if ldname not in lds: msg = _('Logical Disk `%s` could not be found.') % ldname LOG.error(msg) raise exception.NotFound(msg) return ldname def _validate_iscsildset_exist(self, ldsets, connector): ldset = self.get_ldset(ldsets) if ldset is None: for tldset in ldsets.values(): if 'initiator_list' not in tldset: continue n = tldset['initiator_list'].count(connector['initiator']) if n > 0: ldset = tldset break if ldset is None: if self._properties['auto_accesscontrol']: authname = connector['initiator'].strip() authname = authname.replace((":"), "") authname = authname.replace(("."), "") new_ldsetname = authname[-16:] ret = self._cli.addldset_iscsi(new_ldsetname, connector) if ret is False: msg = _('Appropriate Logical Disk Set' ' could not be found.') raise exception.NotFound(msg) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) ldset = self._validate_iscsildset_exist(ldsets, connector) else: msg = _('Appropriate Logical Disk Set could not be found.') raise exception.NotFound(msg) if len(ldset['portal_list']) < 1: msg = (_('Logical Disk Set `%s` has no portal.') % ldset['ldsetname']) raise exception.NotFound(msg) return ldset def _validate_fcldset_exist(self, ldsets, connector): ldset = self.get_ldset(ldsets) if ldset is None: for conect in connector['wwpns']: length = len(conect) findwwpn = '-'.join([conect[i:i + 4] for i in range(0, length, 4)]) findwwpn = findwwpn.upper() for tldset in ldsets.values(): if 'wwpn' in tldset and findwwpn in tldset['wwpn']: ldset = tldset break if ldset is not None: break if ldset is None: if self._properties['auto_accesscontrol']: new_ldsetname = connector['wwpns'][0][:16] ret = self._cli.addldset_fc(new_ldsetname, connector) if ret is False: msg = _('Appropriate Logical Disk Set' ' could not be found.') raise exception.NotFound(msg) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) ldset = self._validate_fcldset_exist(ldsets, connector) else: msg = _('Appropriate Logical Disk Set could not be found.') raise exception.NotFound(msg) return ldset def _enumerate_iscsi_portals(self, hostports, ldset, prefered_director=0): portals = [] for director in [prefered_director, 1 - prefered_director]: if director not in hostports: continue dirportals = [] for port in hostports[director]: if not port['protocol'].lower() == 'iscsi': continue for portal in ldset['portal_list']: if portal.startswith(port['ip'] + ':'): dirportals.append(portal) break if (self._properties['portal_number'] > 0 and len(dirportals) > self._properties['portal_number']): portals.extend( dirportals[0:self._properties['portal_number']]) else: portals.extend(dirportals) if len(portals) == 0: raise exception.NotFound( _('No portal matches to any host ports.')) return portals def create_volume(self, volume): msgparm = ('Volume ID = %(id)s, Size = %(size)dGB' % {'id': volume.id, 'size': volume.size}) try: self._create_volume(volume) LOG.info('Created Volume (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Create Volume (%(msgparm)s) ' '(%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _create_volume(self, volume): LOG.debug('_create_volume Start.') # select ld number and LD bind. ldname, ldn, selected_pool = self._bind_ld( volume, volume.size, None, self._convert_id2name, self._select_leastused_poolnumber) self._set_qos_spec(ldname, volume.volume_type_id) LOG.debug('LD bound. ' 'Name=%(name)s ' 'Size=%(size)dGB ' 'LDN=%(ldn)04xh ' 'Pool=%(pool)04xh.', {'name': ldname, 'size': volume.size, 'ldn': ldn, 'pool': selected_pool}) def _can_extend_capacity(self, new_size, pools, lds, ld): rvs = {} ld_count_in_pool = {} if ld['RPL Attribute'] == 'MV': pair_lds = self._cli.get_pair_lds(ld['ldname'], lds) for (ldn, pair_ld) in pair_lds.items(): rv_name = pair_ld['ldname'] pool_number = pair_ld['pool_num'] ldn = pair_ld['ldn'] rvs[ldn] = pair_ld # check rv status. query_status = self._cli.query_MV_RV_status(rv_name[3:], 'RV') if query_status != 'separated': msg = (_('Specified Logical Disk %s has been copied.') % rv_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # get pool number. if pool_number in ld_count_in_pool: ld_count_in_pool[pool_number].append(ldn) else: ld_count_in_pool[pool_number] = [ldn] # check pool capacity. for (pool_number, tmp_ldn_list) in ld_count_in_pool.items(): ld_capacity = ( ld['ld_capacity'] * units.Gi) new_size_byte = new_size * units.Gi size_increase = new_size_byte - ld_capacity pool = pools[pool_number] ld_count = len(tmp_ldn_list) if pool['free'] < size_increase * ld_count: msg = (_('Not enough pool capacity. ' 'pool_number=%(pool)d, size_increase=%(sizeinc)d') % {'pool': pool_number, 'sizeinc': size_increase * ld_count}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return rvs def extend_volume(self, volume, new_size): msgparm = ('Volume ID = %(id)s, New Size = %(newsize)dGB, ' 'Old Size = %(oldsize)dGB' % {'id': volume.id, 'newsize': new_size, 'oldsize': volume.size}) try: self._extend_volume(volume, new_size) LOG.info('Extended Volume (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Extend Volume (%(msgparm)s) ' '(%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _extend_volume(self, volume, new_size): LOG.debug('_extend_volume(Volume ID = %(id)s, ' 'new_size = %(size)s) Start.', {'id': volume.id, 'size': new_size}) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # get volume. ldname = self._validate_ld_exist( lds, volume.id, self._properties['ld_name_format']) ld = lds[ldname] ldn = ld['ldn'] # check pools capacity. rvs = self._can_extend_capacity(new_size, pools, lds, ld) # volume expand. self._cli.expand(ldn, new_size) # rv expand. if ld['RPL Attribute'] == 'MV': # ld expand. for (ldn, rv) in rvs.items(): self._cli.expand(ldn, new_size) elif ld['RPL Attribute'] != 'IV': msg = (_('RPL Attribute Error. RPL Attribute = %s.') % ld['RPL Attribute']) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('_extend_volume(Volume ID = %(id)s, ' 'new_size = %(newsize)s) End.', {'id': volume.id, 'newsize': new_size}) def create_cloned_volume(self, volume, src_vref): msgparm = ('Volume ID = %(id)s, ' 'Source Volume ID = %(src_id)s' % {'id': volume.id, 'src_id': src_vref.id}) try: self._create_cloned_volume(volume, src_vref) LOG.info('Created Cloned Volume (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Create Cloned Volume ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" LOG.debug('_create_cloned_volume' '(Volume ID = %(id)s, Source ID = %(src_id)s ) Start.', {'id': volume.id, 'src_id': src_vref.id}) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # check MV existence and get MV info. source_name = ( self.get_ldname(src_vref.id, self._properties['ld_name_format'])) if source_name not in lds: msg = (_('Logical Disk `%(name)s` has unbound already. ' 'volume_id = %(id)s.') % {'name': source_name, 'id': src_vref.id}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) source_ld = lds[source_name] # check temporarily released pairs existence. if source_ld['RPL Attribute'] == 'MV': # get pair lds. pair_lds = self._cli.get_pair_lds(source_name, lds) if len(pair_lds) == 3: msg = (_('Cannot create clone volume. ' 'number of pairs reached 3. ' 'ldname=%s') % source_name) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # Creating Cloned Volume. volume_name, ldn, selected_pool = self._bind_ld( volume, src_vref.size, None, self._convert_id2name, self._select_leastused_poolnumber) self._set_qos_spec(volume_name, volume.volume_type_id) LOG.debug('LD bound. Name=%(name)s ' 'Size=%(size)dGB ' 'LDN=%(ldn)04xh ' 'Pool=%(pool)04xh.', {'name': volume_name, 'size': volume.size, 'ldn': ldn, 'pool': selected_pool}) LOG.debug('source_name=%(src_name)s, volume_name=%(name)s.', {'src_name': source_name, 'name': volume_name}) # compare volume size and copy data to RV. mv_capacity = src_vref.size rv_capacity = volume.size if rv_capacity <= mv_capacity: rv_capacity = None volume_properties = { 'mvname': source_name, 'rvname': volume_name, 'capacity': mv_capacity, 'mvid': src_vref.id, 'rvid': volume.id, 'rvldn': ldn, 'rvcapacity': rv_capacity, 'flag': 'clone', 'context': self._context } self._cli.backup_restore(volume_properties, cli.UnpairWaitForClone) LOG.debug('_create_cloned_volume(Volume ID = %(id)s, ' 'Source ID = %(src_id)s ) End.', {'id': volume.id, 'src_id': src_vref.id}) def _set_qos_spec(self, ldname, volume_type_id, reset=False): # check io limit. specs = self.get_volume_type_qos_specs(volume_type_id) qos_params = self.get_qos_parameters(specs, reset) # set io limit. self._cli.set_io_limit(ldname, qos_params) LOG.debug('_set_qos_spec(Specs = %s) End.', qos_params) return def _validate_migrate_volume(self, volume, xml): """Validate source volume information.""" pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # get ld object ldname = self._validate_ld_exist( lds, volume.id, self._properties['ld_name_format']) # check rpl attribute. ld = lds[ldname] if ld['Purpose'] != '---': msg = (_('Specified Logical Disk %(ld)s ' 'has an invalid attribute (%(purpose)s).') % {'ld': ldname, 'purpose': ld['Purpose']}) raise exception.VolumeBackendAPIException(data=msg) return True def migrate_volume(self, context, volume, host): msgparm = ('Volume ID = %(id)s, ' 'Destination Host = %(dsthost)s' % {'id': volume.id, 'dsthost': host}) try: ret = self._migrate_volume(context, volume, host) if ret != (False, None): LOG.info('Migrated Volume (%s)', msgparm) else: LOG.debug('Failed to Migrate Volume (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Migrate Volume ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _migrate_volume(self, context, volume, host): """Migrate the volume to the specified host. Returns a boolean indicating whether the migration occurred, as well as model_update. """ LOG.debug('_migrate_volume(' 'Volume ID = %(id)s, ' 'Volume Name = %(name)s, ' 'host = %(host)s) Start.', {'id': volume.id, 'name': volume.name, 'host': host}) false_ret = (False, None) # check volume status. if volume.status != 'available': LOG.debug('Specified volume %s is not available.', volume.id) return false_ret if 'capabilities' not in host: LOG.debug('Host not in capabilities. Host = %s ', host) return false_ret capabilities = host['capabilities'] if capabilities.get('vendor_name') != self._properties['vendor_name']: LOG.debug('Vendor is not %(vendor)s. ' 'capabilities = %(capabilities)s ', {'vendor': self._properties['vendor_name'], 'capabilities': capabilities}) return false_ret # another storage configuration is not supported. destination_fip = capabilities.get('location_info').split(':')[0] if destination_fip != self._properties['cli_fip']: LOG.debug('FIP is mismatch. FIP = %(destination)s != %(fip)s', {'destination': destination_fip, 'fip': self._properties['cli_fip']}) return false_ret self._migrate(volume, host, volume.volume_type_id, self._validate_migrate_volume, self._select_migrate_poolnumber) LOG.debug('_migrate_volume(Volume ID = %(id)s, ' 'Host = %(host)s) End.', {'id': volume.id, 'host': host}) return (True, []) def _validate_retype_volume(self, volume, xml): """Validate source volume information.""" pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # get ld object ldname = self._validate_ld_exist( lds, volume.id, self._properties['ld_name_format']) # check rpl attribute. ld = lds[ldname] if ld['Purpose'] != '---': msg = (_('Specified Logical Disk %(ld)s ' 'has an invalid attribute (%(purpose)s).') % {'ld': ldname, 'purpose': ld['Purpose']}) raise exception.VolumeBackendAPIException(data=msg) return True def _spec_is_changed(self, specdiff, resname): res = specdiff.get(resname) if (res is not None and res[0] != res[1]): return True return False def _check_same_backend(self, diff): if self._spec_is_changed(diff['extra_specs'], 'volume_backend_name'): return False if len(diff['extra_specs']) > 1: return False return True def retype(self, context, volume, new_type, diff, host): """Convert the volume to the specified volume type. :param context: The context used to run the method retype :param volume: The original volume that was retype to this backend :param new_type: The new volume type :param diff: The difference between the two types :param host: The target information :returns: a boolean indicating whether the migration occurred, and model_update """ msgparm = ('Volume ID = %(id)s, ' 'New Type = %(type)s, ' 'Diff = %(diff)s, ' 'Destination Host = %(dsthost)s' % {'id': volume.id, 'type': new_type, 'diff': diff, 'dsthost': host}) try: ret = self._retype(context, volume, new_type, diff, host) if ret is not False: LOG.info('Retyped Volume (%s)', msgparm) else: LOG.debug('Failed to Retype Volume (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Retype Volume ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _retype(self, context, volume, new_type, diff, host): """Retype the volume to the specified volume type. Returns a boolean indicating whether the migration occurred, as well as model_update. """ LOG.debug('_retype(' 'Volume ID = %(id)s, ' 'Volume Name = %(name)s, ' 'New Type = %(type)s, ' 'Diff = %(diff)s, ' 'host = %(host)s) Start.', {'id': volume.id, 'name': volume.name, 'type': new_type, 'diff': diff, 'host': host}) # check volume attach status. if volume.attach_status == 'attached': LOG.debug('Specified volume %s is attached.', volume.id) return False if self._check_same_backend(diff): ldname = self._convert_id2name(volume) reset = (diff['qos_specs'].get('consumer')[0] == 'back-end') self._set_qos_spec(ldname, new_type['id'], reset) LOG.debug('_retype(QoS setting only)(Volume ID = %(id)s, ' 'Host = %(host)s) End.', {'id': volume.id, 'host': host}) return True self._migrate(volume, host, new_type['id'], self._validate_retype_volume, self._select_leastused_poolnumber) LOG.debug('_retype(Volume ID = %(id)s, ' 'Host = %(host)s) End.', {'id': volume.id, 'host': host}) return True def _migrate(self, volume, host, volume_type_id, validator, pool_selecter): # bind LD. rvname, __, selected_pool = self._bind_ld( volume, volume.size, validator, self._convert_id2migratename, pool_selecter, host) if selected_pool >= 0: self._set_qos_spec(rvname, volume_type_id) volume_properties = { 'mvname': self.get_ldname( volume.id, self._properties['ld_name_format']), 'rvname': rvname, 'capacity': volume.size * units.Gi, 'mvid': volume.id, 'rvid': None, 'flag': 'migrate', 'context': self._context } # replicate LD. self._cli.backup_restore(volume_properties, cli.UnpairWaitForMigrate) return def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Updates metadata after host-assisted migration. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ LOG.debug('update_migrated_volume' '(Volume ID = %(id)s, New Volume ID = %(new_id)s, ' 'Status = %(status)s) Start.', {'id': volume.id, 'new_id': new_volume.id, 'status': original_volume_status}) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) name_id = None provider_location = None if original_volume_status in ['available', 'in-use']: original_name = self._convert_id2name(volume) new_name = self._convert_id2name(new_volume) try: if original_name in lds: delete_ldname = self._convert_deleteldname(original_name) self._cli.changeldname(None, delete_ldname, original_name) self._cli.changeldname(None, original_name, new_name) except exception.CinderException as e: LOG.warning('Unable to rename the logical volume ' '(Volume ID = %(id)s), (%(exception)s)', {'id': volume.id, 'exception': e}) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. name_id = new_volume._name_id or new_volume.id provider_location = new_volume.provider_location else: # The back-end will not be renamed. name_id = new_volume._name_id or new_volume.id provider_location = new_volume.provider_location LOG.debug('update_migrated_volume(name_id = %(name_id)s, ' 'provider_location = %(location)s) End.', {'name_id': name_id, 'location': provider_location}) return {'_name_id': name_id, 'provider_location': provider_location} def check_for_export(self, context, volume_id): pass def backup_use_temp_snapshot(self): return True def _get_free_lun(self, ldset): # Lun can't be specified when multi target mode. if ldset['protocol'] == 'iSCSI' and ldset['mode'] == 'Multi-Target': return None # get free lun. luns = [] ldsetlds = ldset['lds'] for ld in ldsetlds.values(): luns.append(ld['lun']) target_lun = 0 for lun in sorted(luns): if target_lun < lun: break target_lun += 1 return target_lun def create_export(self, context, volume, connector): pass def create_export_snapshot(self, context, snapshot, connector): pass @coordination.synchronized('mstorage_bind_execute_{diskarray_name}') def _create_snapshot_and_link(self, snapshot, connector, diskarray_name, validate_ldset_exist): xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) LOG.debug('validate data.') svname = self._validate_ld_exist( lds, snapshot.id, self._properties['ld_name_format']) bvname = self._validate_ld_exist( lds, snapshot.volume_id, self._properties['ld_name_format']) lvname = svname + '_l' ldset = validate_ldset_exist(ldsets, connector) svstatus = self._cli.query_BV_SV_status(bvname[3:], svname[3:]) if svstatus != 'snap/active': msg = _('Logical Disk (%s) is invalid snapshot.') % svname raise exception.VolumeBackendAPIException(data=msg) lvldn = self._select_ldnumber(used_ldns, max_ld_count) LOG.debug('configure backend.') lun0 = [ld for (ldn, ld) in ldset['lds'].items() if ld['lun'] == 0] # NEC Storage cannot create an LV with LUN 0. # Create a CV with LUN 0 to use the other LUN for an LV. if not lun0: LOG.debug('create and attach control volume.') used_ldns.append(lvldn) cvldn = self._select_ldnumber(used_ldns, max_ld_count) self._cli.cvbind(lds[bvname]['pool_num'], cvldn) self._cli.changeldname(cvldn, self._properties['cv_name_format'] % cvldn) self._cli.addldsetld(ldset['ldsetname'], self._properties['cv_name_format'] % cvldn, self._get_free_lun(ldset)) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) ldset = validate_ldset_exist(ldsets, connector) self._cli.lvbind(bvname, lvname[3:], lvldn) self._cli.lvlink(svname[3:], lvname[3:]) self._cli.addldsetld(ldset['ldsetname'], lvname, self._get_free_lun(ldset)) LOG.debug('Add LD `%(ld)s` to LD Set `%(ldset)s`.', {'ld': lvname, 'ldset': ldset['ldsetname']}) return lvname def remove_export(self, context, volume): pass def _detach_from_all(self, ldname, xml): LOG.debug('_detach_from_all Start.') pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # get target LD Set. ldset = self.get_ldset(ldsets) ld = lds[ldname] ldsetlist = [] if ldset is None: for tldset in ldsets.values(): if ld['ldn'] in tldset['lds']: ldsetlist.append(tldset) LOG.debug('ldset=%s.', tldset) if len(ldsetlist) == 0: return False else: if ld['ldn'] not in ldset['lds']: LOG.debug('LD `%(ld)s` already deleted ' 'from LD Set `%(ldset)s`?', {'ld': ldname, 'ldset': ldset['ldsetname']}) return False ldsetlist.append(ldset) # delete LD from LD set. for tagetldset in ldsetlist: retnum, errnum = (self._cli.delldsetld( tagetldset['ldsetname'], ldname)) if retnum is not True: if 'iSM31065' in errnum: LOG.debug( 'LD `%(ld)s` already deleted ' 'from LD Set `%(ldset)s`?', {'ld': ldname, 'ldset': tagetldset['ldsetname']}) else: msg = (_('Failed to unregister Logical Disk from ' 'Logical Disk Set (%s)') % errnum) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('LD `%(ld)s` deleted from LD Set `%(ldset)s`.', {'ld': ldname, 'ldset': tagetldset['ldsetname']}) LOG.debug('_detach_from_all(LD Name = %s) End.', ldname) return True def remove_export_snapshot(self, context, snapshot): """Removes an export for a snapshot.""" msgparm = 'Snapshot ID = %s' % snapshot.id try: self._remove_export_snapshot(context, snapshot) LOG.info('Removed Export Snapshot(%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Remove Export Snapshot' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _remove_export_snapshot(self, context, snapshot): LOG.debug('_remove_export_snapshot(Snapshot ID = %s) Start.', snapshot.id) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) LOG.debug('validate data.') svname = self._validate_ld_exist( lds, snapshot.id, self._properties['ld_name_format']) lvname = svname + '_l' if lvname not in lds: LOG.debug('Logical Disk `%s` is already unexported.', lvname) return ld = lds[lvname] ldsetlist = [] if ld is None: msg = _('Exported snapshot could not be found.') raise exception.VolumeBackendAPIException(data=msg) for tldset in ldsets.values(): if ld['ldn'] in tldset['lds']: ldsetlist.append(tldset) if len(ldsetlist) == 0: LOG.debug('Specified Logical Disk is already removed.') return LOG.debug('configure backend.') for tagetldset in ldsetlist: retnum, errnum = self._cli.delldsetld(tagetldset['ldsetname'], lvname) if retnum is not True: msg = (_('Failed to remove export Logical Disk from ' 'Logical Disk Set (%s)') % errnum) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('LD `%(ld)s` deleted from LD Set `%(ldset)s`.', {'ld': lvname, 'ldset': tagetldset['ldsetname']}) try: self._cli.lvunlink(lvname[3:]) except Exception: LOG.debug('LV unlink error.') try: self._cli.lvunbind(lvname) except Exception: LOG.debug('LV unbind error.') LOG.debug('_remove_export_snapshot(Snapshot ID = %s) End.', snapshot.id) @coordination.synchronized('mstorage_bind_execute_{diskarray_name}') def _export_volume(self, volume, connector, diskarray_name, validate_exist): xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) ldset = validate_exist(ldsets, connector) ldname = self.get_ldname( volume.id, self._properties['ld_name_format']) if ldname not in lds: msg = _('Logical Disk `%s` could not be found.') % ldname raise exception.NotFound(msg) ld = lds[ldname] if ld['ldn'] not in ldset['lds']: self._cli.addldsetld(ldset['ldsetname'], ldname, self._get_free_lun(ldset)) # update local info. LOG.debug('Add LD `%(ld)s` to LD Set `%(ldset)s`.', {'ld': ldname, 'ldset': ldset['ldsetname']}) return ldname def iscsi_initialize_connection(self, volume, connector): msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' % {'id': volume.id, 'connector': connector}) try: ret = self._iscsi_initialize_connection(volume, connector) LOG.info('Initialized iSCSI Connection (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Initialize iSCSI Connection ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _iscsi_initialize_connection(self, volume, connector, is_snapshot=False): """Initializes the connection and returns connection info. The iscsi driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined in _get_iscsi_properties. Example return value:: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': 1, 'access_mode': 'rw' } } """ LOG.debug('_iscsi_initialize_connection' '(Volume ID = %(id)s, connector = %(connector)s, ' 'snapshot = %(snapshot)s) Start.', {'id': volume.id, 'connector': connector, 'snapshot': is_snapshot}) # configure access control if is_snapshot: ldname = self._create_snapshot_and_link( volume, connector, self._properties['diskarray_name'], self._validate_iscsildset_exist) else: ldname = self._export_volume(volume, connector, self._properties['diskarray_name'], self._validate_iscsildset_exist) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # enumerate portals for iscsi multipath. ld = lds[ldname] ldset = self._validate_iscsildset_exist(ldsets, connector) prefered_director = ld['pool_num'] % 2 portals = self._enumerate_iscsi_portals(hostports, ldset, prefered_director) info = {'driver_volume_type': 'iscsi', 'data': {'target_portal': portals[int(volume.id[:1], 16) % len(portals)], 'target_iqn': ldset['lds'][ld['ldn']]['iqn'], 'target_lun': ldset['lds'][ld['ldn']]['lun'], 'target_discovered': False, 'volume_id': volume.id} } if connector.get('multipath'): portals_len = len(portals) info['data'].update({'target_portals': portals, 'target_iqns': [ldset['lds'][ld['ldn']] ['iqn']] * portals_len, 'target_luns': [ldset['lds'][ld['ldn']] ['lun']] * portals_len}) LOG.debug('_iscsi_initialize_connection' '(Volume ID = %(id)s, connector = %(connector)s, ' 'info = %(info)s) End.', {'id': volume.id, 'connector': connector, 'info': info}) return info def iscsi_initialize_connection_snapshot(self, snapshot, connector, **kwargs): """Allow connection to connector and return connection info. :param snapshot: The snapshot to be attached :param connector: Dictionary containing information about what is being connected to. :returns conn_info: A dictionary of connection information. This can optionally include a "initiator_updates" field. """ msgparm = ('Snapshot ID = %(id)s, Connector = %(connector)s' % {'id': snapshot.id, 'connector': connector}) try: ret = self._iscsi_initialize_connection(snapshot, connector, is_snapshot=True) LOG.info('Initialized iSCSI Connection snapshot(%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Initialize iSCSI Connection snapshot' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) return ret @coordination.synchronized('mstorage_iscsi_terminate_{volume.id}') def iscsi_terminate_connection(self, volume, connector): msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' % {'id': volume.id, 'connector': connector}) try: self._iscsi_terminate_connection(volume, connector) LOG.info('Terminated iSCSI Connection (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Terminate iSCSI Connection ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _iscsi_terminate_connection(self, volume, connector): if self._properties['ldset_name'] != '': LOG.debug('Ldset is specified. Access control setting ' 'is not deleted automatically.') return if connector is None: LOG.debug('Connector is not specified. Nothing to do.') return if self._is_multi_attachment(volume, connector): return # delete unused access control setting. xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) ldname = self.get_ldname( volume.id, self._properties['ld_name_format']) if ldname not in lds: LOG.debug('Logical Disk `%s` has unbound already.', ldname) return ldset = self._validate_iscsildset_exist(ldsets, connector) retnum, errnum = self._cli.delldsetld(ldset['ldsetname'], ldname) if retnum is not True: if 'iSM31065' in errnum: LOG.debug('LD `%(ld)s` already deleted ' 'from LD Set `%(ldset)s`?', {'ld': ldname, 'ldset': ldset['ldsetname']}) else: msg = (_('Failed to unregister Logical Disk from ' 'Logical Disk Set (%s)') % errnum) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('LD `%(ld)s` deleted from LD Set `%(ldset)s`.', {'ld': ldname, 'ldset': ldset['ldsetname']}) def iscsi_terminate_connection_snapshot(self, snapshot, connector, **kwargs): """Disallow connection from connector.""" msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' % {'id': snapshot.id, 'connector': connector}) self.remove_export_snapshot(None, snapshot) LOG.info('Terminated iSCSI Connection snapshot(%s)', msgparm) def fc_initialize_connection(self, volume, connector): msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' % {'id': volume.id, 'connector': connector}) try: ret = self._fc_initialize_connection(volume, connector) LOG.info('Initialized FC Connection (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Initialize FC Connection ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _fc_initialize_connection(self, volume, connector, is_snapshot=False): """Initializes the connection and returns connection info. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '1234567890123', 'access_mode': 'rw' } } or { 'driver_volume_type': 'fibre_channel' 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['1234567890123', '0987654321321'], 'access_mode': 'rw' } } """ LOG.debug('_fc_initialize_connection' '(Volume ID = %(id)s, connector = %(connector)s, ' 'snapshot = %(snapshot)s) Start.', {'id': volume.id, 'connector': connector, 'snapshot': is_snapshot}) if is_snapshot: ldname = self._create_snapshot_and_link( volume, connector, self._properties['diskarray_name'], self._validate_fcldset_exist) else: ldname = self._export_volume(volume, connector, self._properties['diskarray_name'], self._validate_fcldset_exist) # update local info. xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # get target wwpns and initiator/target map. fc_ports = [] for director, hostport in hostports.items(): for port in hostport: if port['protocol'].lower() == 'fc': fc_ports.append(port) target_wwns, init_targ_map = ( self._build_initiator_target_map(connector, fc_ports)) # get link volume number ldname = self.get_ldname( volume.id, self._properties['ld_name_format']) lvname = ldname + '_l' if lvname in lds: ldn = lds[lvname]['ldn'] else: ldn = lds[ldname]['ldn'] ldset = self._validate_fcldset_exist(ldsets, connector) info = { 'driver_volume_type': 'fibre_channel', 'data': {'target_lun': ldset['lds'][ldn]['lun'], 'target_wwn': target_wwns, 'initiator_target_map': init_targ_map}} LOG.debug('_fc_initialize_connection' '(Volume ID = %(id)s, connector = %(connector)s, ' 'info = %(info)s) End.', {'id': volume.id, 'connector': connector, 'info': info}) return info def fc_initialize_connection_snapshot(self, snapshot, connector): msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' % {'id': snapshot.id, 'connector': connector}) try: ret = self._fc_initialize_connection(snapshot, connector, is_snapshot=True) LOG.info('Initialized FC Connection snapshot(%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Initialize FC Connection snapshot' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) @coordination.synchronized('mstorage_fc_terminate_{volume.id}') def fc_terminate_connection(self, volume, connector): msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' % {'id': volume.id, 'connector': connector}) try: ret = self._fc_terminate_connection(volume, connector) LOG.info('Terminated FC Connection (%s)', msgparm) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Terminate FC Connection ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) @volume_utils.trace def _fc_terminate_connection(self, vol_or_snap, connector, is_snapshot=False): """Disallow connection from connector.""" if not is_snapshot and connector is not None and ( self._is_multi_attachment(vol_or_snap, connector)): return xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # get target wwpns and initiator/target map. fc_ports = [] for director, hostport in hostports.items(): for port in hostport: if port['protocol'].lower() == 'fc': fc_ports.append(port) info = {'driver_volume_type': 'fibre_channel', 'data': {}} if connector is not None: target_wwns, init_targ_map = ( self._build_initiator_target_map(connector, fc_ports)) info['data'] = {'target_wwn': target_wwns, 'initiator_target_map': init_targ_map} if is_snapshot: # Detaching the snapshot is performed in the # remove_export_snapshot. return info if connector is not None and self._properties['ldset_name'] == '': # delete LD from LD set. ldname = self.get_ldname( vol_or_snap.id, self._properties['ld_name_format']) if ldname not in lds: LOG.debug('Logical Disk `%s` has unbound already.', ldname) return info ldset = self._validate_fcldset_exist(ldsets, connector) retnum, errnum = self._cli.delldsetld(ldset['ldsetname'], ldname) if retnum is not True: if 'iSM31065' in errnum: LOG.debug('LD `%(ld)s` already deleted ' 'from LD Set `%(ldset)s`?', {'ld': ldname, 'ldset': ldset['ldsetname']}) else: msg = (_('Failed to unregister Logical Disk from ' 'Logical Disk Set (%s)') % errnum) raise exception.VolumeBackendAPIException(data=msg) return info def fc_terminate_connection_snapshot(self, snapshot, connector, **kwargs): msgparm = ('Volume ID = %(id)s, Connector = %(connector)s' % {'id': snapshot.id, 'connector': connector}) try: ret = self._fc_terminate_connection(snapshot, connector, is_snapshot=True) LOG.info('Terminated FC Connection snapshot(%s)', msgparm) self.remove_export_snapshot(None, snapshot) return ret except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Terminate FC Connection snapshot' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _is_multi_attachment(self, volume, connector): """Check the number of attached instances. Returns true if the volume is attached to multiple instances. Returns false if the volume is attached to a single instance. """ host = connector['host'] attach_list = volume.volume_attachment if attach_list is None: return False host_list = [att.connector['host'] for att in attach_list if att is not None and att.connector is not None] if host_list.count(host) > 1: LOG.info("Volume is attached to multiple instances on " "this host.") return True return False def _build_initiator_target_map(self, connector, fc_ports): target_wwns = [] for port in fc_ports: target_wwns.append(port['wwpn']) initiator_wwns = [] if connector is not None: initiator_wwns = connector['wwpns'] init_targ_map = {} for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return target_wwns, init_targ_map def _update_volume_status(self): """Retrieve status info from volume group.""" data = {} data['volume_backend_name'] = (self._properties['backend_name'] or self._driver_name) data['vendor_name'] = self._properties['vendor_name'] data['driver_version'] = self.VERSION data['reserved_percentage'] = self._properties['reserved_percentage'] data['QoS_support'] = True data['multiattach'] = True data['location_info'] = (self._properties['cli_fip'] + ":" + (','.join(map(str, self._properties['pool_pools'] )))) # Get xml data from file and parse. try: pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.parse_xml()) # Get capacities from pools. pool_capacity = self.get_pool_capacity(pools, ldsets) data['total_capacity_gb'] = pool_capacity['total_capacity_gb'] data['free_capacity_gb'] = pool_capacity['free_capacity_gb'] except Exception: LOG.debug('_update_volume_status Unexpected error. ' 'exception=%s', traceback.format_exc()) data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 return data def iscsi_get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update the stats first. """ if refresh: self._stats = self._update_volume_status() self._stats['storage_protocol'] = constants.ISCSI LOG.debug('data=%(data)s, config_group=%(group)s', {'data': self._stats, 'group': self._config_group}) return self._stats def fc_get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update the stats first. """ if refresh: self._stats = self._update_volume_status() self._stats['storage_protocol'] = constants.FC LOG.debug('data=%(data)s, config_group=%(group)s', {'data': self._stats, 'group': self._config_group}) return self._stats def get_pool(self, volume): LOG.debug('backend_name=%s', self._properties['backend_name']) return self._properties['backend_name'] def delete_volume(self, volume): msgparm = 'Volume ID = %s' % volume.id try: self._delete_volume(volume) LOG.info('Deleted Volume (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Delete Volume ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _delete_volume(self, volume): LOG.debug('_delete_volume id=%(id)s, _name_id=%(name_id)s Start.', {'id': volume.id, 'name_id': volume._name_id}) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) ldname = self.get_ldname(volume.name_id, self._properties['ld_name_format']) # The volume to be deleted has '_d' at the end of the name # when migrating with the same backend. delete_ldname = self._convert_deleteldname(ldname) if delete_ldname in lds: ldname = delete_ldname if ldname not in lds: LOG.debug('LD `%s` already unbound?', ldname) return # If not migrating, detach from all hosts. if ldname != delete_ldname: detached = self._detach_from_all(ldname, xml) xml = self._cli.view_all(self._properties['ismview_path'], detached) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) ld = lds[ldname] if ld['RPL Attribute'] == 'IV': pass elif ld['RPL Attribute'] == 'MV': query_status = self._cli.query_MV_RV_status(ldname[3:], 'MV') if query_status == 'separated': # unpair. rvname = self._cli.query_MV_RV_name(ldname[3:], 'MV') self._cli.unpair(ldname[3:], rvname, 'force') else: msg = _('Specified Logical Disk %s has been copied.') % ldname LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif ld['RPL Attribute'] == 'RV': query_status = self._cli.query_MV_RV_status(ldname[3:], 'RV') if query_status == 'separated': # unpair. mvname = self._cli.query_MV_RV_name(ldname[3:], 'RV') self._cli.unpair(mvname, ldname[3:], 'force') else: msg = _('Specified Logical Disk %s has been copied.') % ldname LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) else: msg = (_('RPL Attribute Error. RPL Attribute = %s.') % ld['RPL Attribute']) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # unbind LD. self._cli.unbind(ldname) LOG.debug('LD unbound. Name=%s.', ldname) def _is_manageable_volume(self, ld): if ld['RPL Attribute'] == '---': return False if ld['Purpose'] != '---' and 'BV' not in ld['RPL Attribute']: return False if ld['pool_num'] not in self._properties['pool_pools']: return False return True def _is_manageable_snapshot(self, ld): if ld['RPL Attribute'] == '---': return False if 'SV' not in ld['RPL Attribute']: return False if ld['pool_num'] not in self._properties['pool_backup_pools']: return False return True def _reference_to_ldname(self, resource_type, volume, existing_ref): if resource_type == 'volume': ldname_format = self._properties['ld_name_format'] else: ldname_format = self._properties['ld_backupname_format'] id_name = self.get_ldname(volume.id, ldname_format) ref_name = existing_ref['source-name'] volid = re.search( r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}', ref_name) if volid: ref_name = self.get_ldname(volid.group(0), ldname_format) return id_name, ref_name def _get_manageable_resources(self, resource_type, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): entries = [] xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) cinder_ids = [resource['id'] for resource in cinder_volumes] for ld in lds.values(): if ((resource_type == 'volume' and not self._is_manageable_volume(ld)) or (resource_type == 'snapshot' and not self._is_manageable_snapshot(ld))): continue ld_info = {'reference': {'source-name': ld['ldname']}, 'size': ld['ld_capacity'], 'cinder_id': None, 'extra_info': None} potential_id = volume_common.convert_to_id(ld['ldname'][3:]) if potential_id in cinder_ids: ld_info['safe_to_manage'] = False ld_info['reason_not_safe'] = 'already managed' ld_info['cinder_id'] = potential_id elif self.check_accesscontrol(ldsets, ld): ld_info['safe_to_manage'] = False ld_info['reason_not_safe'] = '%s in use' % resource_type else: ld_info['safe_to_manage'] = True ld_info['reason_not_safe'] = None if resource_type == 'snapshot': bvname = self._cli.get_bvname(ld['ldname']) bv_id = volume_common.convert_to_id(bvname) ld_info['source_reference'] = {'source-name': bv_id} entries.append(ld_info) return volume_utils.paginate_entries_list(entries, marker, limit, offset, sort_keys, sort_dirs) def _manage_existing_get_size(self, resource_type, volume, existing_ref): if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) id_name, ref_name = self._reference_to_ldname(resource_type, volume, existing_ref) if ref_name not in lds: reason = _('Specified resource does not exist.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) ld = lds[ref_name] return ld['ld_capacity'] def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder.""" LOG.debug('get_manageable_volumes Start.') return self._get_manageable_resources('volume', cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. """ LOG.debug('manage_existing Start.') xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) newname, oldname = self._reference_to_ldname('volume', volume, existing_ref) if self.check_accesscontrol(ldsets, lds[oldname]): reason = _('Specified resource is already in-use.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) if lds[oldname]['pool_num'] not in self._properties['pool_pools']: reason = _('Volume type is unmatched.') raise exception.ManageExistingVolumeTypeMismatch( existing_ref=existing_ref, reason=reason) try: self._cli.changeldname(None, newname, oldname) except exception.CinderException as e: LOG.warning('Unable to manage existing volume ' '(reference = %(ref)s), (%(exception)s)', {'ref': existing_ref['source-name'], 'exception': e}) return def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing.""" LOG.debug('manage_existing_get_size Start.') return self._manage_existing_get_size('volume', volume, existing_ref) def unmanage(self, volume): """Removes the specified volume from Cinder management.""" pass def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the backend available for management by Cinder.""" LOG.debug('get_manageable_snapshots Start.') return self._get_manageable_resources('snapshot', cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs) def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. """ LOG.debug('manage_existing_snapshots Start.') xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) newname, oldname = self._reference_to_ldname('snapshot', snapshot, existing_ref) param_source = self.get_ldname(snapshot.volume_id, self._properties['ld_name_format']) ref_source = self._cli.get_bvname(oldname) if param_source[3:] != ref_source: reason = _('Snapshot source is unmatched.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) if (lds[oldname]['pool_num'] not in self._properties['pool_backup_pools']): reason = _('Volume type is unmatched.') raise exception.ManageExistingVolumeTypeMismatch( existing_ref=existing_ref, reason=reason) try: self._cli.changeldname(None, newname, oldname) except exception.CinderException as e: LOG.warning('Unable to manage existing snapshot ' '(reference = %(ref)s), (%(exception)s)', {'ref': existing_ref['source-name'], 'exception': e}) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing.""" LOG.debug('manage_existing_snapshot_get_size Start.') return self._manage_existing_get_size('snapshot', snapshot, existing_ref) def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management.""" pass class MStorageDSVDriver(MStorageDriver): """M-Series Storage Snapshot helper class.""" def create_snapshot(self, snapshot): msgparm = ('Snapshot ID = %(snap_id)s, ' 'Snapshot Volume ID = %(snapvol_id)s' % {'snap_id': snapshot.id, 'snapvol_id': snapshot.volume_id}) try: self._create_snapshot(snapshot, self._properties['diskarray_name']) LOG.info('Created Snapshot (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Create Snapshot ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) @coordination.synchronized('mstorage_bind_execute_{diskarray_name}') def _create_snapshot(self, snapshot, diskarray_name): LOG.debug('_create_snapshot(Volume ID = %(snapvol_id)s, ' 'Snapshot ID = %(snap_id)s ) Start.', {'snapvol_id': snapshot.volume_id, 'snap_id': snapshot.id}) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) if len(self._properties['pool_backup_pools']) == 0: LOG.error('backup_pools is not set.') raise exception.ParameterNotFound(param='backup_pools') # get BV name. ldname = self._validate_ld_exist( lds, snapshot.volume_id, self._properties['ld_name_format']) selected_pool = self._select_dsv_poolnumber(snapshot, pools, None) snapshotname = self._convert_id2snapname(snapshot) self._cli.snapshot_create(ldname, snapshotname[3:], selected_pool) LOG.debug('_create_snapshot(Volume ID = %(snapvol_id)s, ' 'Snapshot ID = %(snap_id)s) End.', {'snapvol_id': snapshot.volume_id, 'snap_id': snapshot.id}) def delete_snapshot(self, snapshot): msgparm = ('Snapshot ID = %(snap_id)s, ' 'Snapshot Volume ID = %(snapvol_id)s' % {'snap_id': snapshot.id, 'snapvol_id': snapshot.volume_id}) try: self._delete_snapshot(snapshot) LOG.info('Deleted Snapshot (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Delete Snapshot ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _delete_snapshot(self, snapshot): LOG.debug('_delete_snapshot(Snapshot ID = %s) Start.', snapshot.id) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # get BV name. ldname = self.get_ldname(snapshot.volume_id, self._properties['ld_name_format']) if ldname not in lds: LOG.debug('LD(BV) `%s` already unbound?', ldname) return # get SV name. snapshotname = ( self.get_ldname(snapshot.id, self._properties['ld_backupname_format'])) if snapshotname not in lds: LOG.debug('LD(SV) `%s` already unbound?', snapshotname) return self._cli.snapshot_delete(ldname, snapshotname[3:]) LOG.debug('_delete_snapshot(Snapshot ID = %s) End.', snapshot.id) def create_volume_from_snapshot(self, volume, snapshot): msgparm = ('Volume ID = %(vol_id)s, ' 'Snapshot ID = %(snap_id)s, ' 'Snapshot Volume ID = %(snapvol_id)s' % {'vol_id': volume.id, 'snap_id': snapshot.id, 'snapvol_id': snapshot.volume_id}) try: self._create_volume_from_snapshot(volume, snapshot) LOG.info('Created Volume from Snapshot (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to Create Volume from Snapshot ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _create_volume_from_snapshot(self, volume, snapshot): LOG.debug('_create_volume_from_snapshot' '(Volume ID = %(vol_id)s, Snapshot ID(SV) = %(snap_id)s, ' 'Snapshot ID(BV) = %(snapvol_id)s) Start.', {'vol_id': volume.id, 'snap_id': snapshot.id, 'snapvol_id': snapshot.volume_id}) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # get BV name. mvname = ( self.get_ldname(snapshot.volume_id, self._properties['ld_name_format'])) # get SV name. rvname = ( self.get_ldname(snapshot.id, self._properties['ld_backupname_format'])) if rvname not in lds: msg = _('Logical Disk `%s` has unbound already.') % rvname LOG.error(msg) raise exception.NotFound(msg) rv = lds[rvname] # check snapshot status. query_status = self._cli.query_BV_SV_status(mvname[3:], rvname[3:]) if query_status != 'snap/active': msg = (_('Cannot create volume from snapshot, ' 'because the snapshot data does not exist. ' 'bvname=%(bvname)s, svname=%(svname)s') % {'bvname': mvname, 'svname': rvname}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) mv_capacity = rv['ld_capacity'] rv_capacity = volume.size new_rvname, rvnumber, selected_pool = self._bind_ld( volume, mv_capacity, None, self._convert_id2name, self._select_volddr_poolnumber, mv_capacity) self._set_qos_spec(new_rvname, volume.volume_type_id) if rv_capacity <= mv_capacity: rvnumber = None rv_capacity = None # Restore Start. volume_properties = { 'mvname': rvname, 'rvname': new_rvname, 'prev_mvname': None, 'capacity': mv_capacity, 'mvid': snapshot.id, 'rvid': volume.id, 'rvldn': rvnumber, 'rvcapacity': rv_capacity, 'flag': 'esv_restore', 'context': self._context } self._cli.backup_restore(volume_properties, cli.UnpairWaitForDDRRestore) LOG.debug('_create_volume_from_snapshot(Volume ID = %(vol_id)s, ' 'Snapshot ID(SV) = %(snap_id)s, ' 'Snapshot ID(BV) = %(snapvol_id)s) End.', {'vol_id': volume.id, 'snap_id': snapshot.id, 'snapvol_id': snapshot.volume_id}) def revert_to_snapshot(self, context, volume, snapshot): """called to perform revert volume from snapshot. :param context: Our working context. :param volume: the volume to be reverted. :param snapshot: the snapshot data revert to volume. :return None """ msgparm = ('Volume ID = %(vol_id)s, ' 'Snapshot ID = %(snap_id)s, ' 'Snapshot Volume ID = %(snapvol_id)s' % {'vol_id': volume.id, 'snap_id': snapshot.id, 'snapvol_id': snapshot.volume_id}) try: self._revert_to_snapshot(context, volume, snapshot) LOG.info('Reverted to Snapshot (%s)', msgparm) except exception.CinderException as e: with excutils.save_and_reraise_exception(): LOG.warning('Failed to revert to Snapshot ' '(%(msgparm)s) (%(exception)s)', {'msgparm': msgparm, 'exception': e}) def _revert_to_snapshot(self, context, volume, snapshot): LOG.debug('_revert_to_snapshot (Volume ID = %(vol_id)s, ' 'Snapshot ID = %(snap_id)s) Start.', {'vol_id': volume.id, 'snap_id': snapshot.id}) xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) # get BV name. bvname = ( self.get_ldname(volume.id, self._properties['ld_name_format'])) if bvname not in lds: msg = _('Logical Disk `%s` has unbound already.') % bvname LOG.error(msg) raise exception.NotFound(msg) # get SV name. svname = ( self.get_ldname(snapshot.id, self._properties['ld_backupname_format'])) if svname not in lds: msg = _('Logical Disk `%s` has unbound already.') % svname LOG.error(msg) raise exception.NotFound(msg) self._cli.snapshot_restore(bvname, svname) LOG.debug('_revert_to_snapshot(Volume ID = %s) End.', volume.id) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.383121 cinder-27.0.0/cinder/volume/drivers/netapp/0000775000175000017500000000000000000000000020607 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/__init__.py0000664000175000017500000000000000000000000022706 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/common.py0000664000175000017500000000772100000000000022460 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Alex Meade. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Unified driver for NetApp storage systems. Supports multiple storage systems of different families and protocols. """ from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder.volume import driver from cinder.volume.drivers.netapp import options from cinder.volume.drivers.netapp import utils as na_utils LOG = logging.getLogger(__name__) DATAONTAP_PATH = 'cinder.volume.drivers.netapp.dataontap' # Add new drivers here, no other code changes required. NETAPP_UNIFIED_DRIVER_REGISTRY = { 'ontap_cluster': { 'iscsi': DATAONTAP_PATH + '.iscsi_cmode.NetAppCmodeISCSIDriver', 'nfs': DATAONTAP_PATH + '.nfs_cmode.NetAppCmodeNfsDriver', 'fc': DATAONTAP_PATH + '.fc_cmode.NetAppCmodeFibreChannelDriver', 'nvme': DATAONTAP_PATH + '.nvme_cmode.NetAppCmodeNVMeDriver' }} class NetAppDriver(driver.ProxyVD): """NetApp unified block storage driver. Acts as a factory to create NetApp storage drivers based on the storage family and protocol configured. """ REQUIRED_FLAGS = ['netapp_storage_family', 'netapp_storage_protocol'] def __new__(cls, *args, **kwargs): config = kwargs.get('configuration', None) if not config: raise exception.InvalidInput( reason=_('Required configuration not found')) config.append_config_values(options.netapp_proxy_opts) config.append_config_values(options.netapp_transport_opts) na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config) app_version = na_utils.OpenStackInfo().info() LOG.info('OpenStack OS Version Info: %(info)s', {'info': app_version}) kwargs['app_version'] = app_version return NetAppDriver.create_driver(config.netapp_storage_family, config.netapp_storage_protocol, *args, **kwargs) @staticmethod def create_driver(storage_family, storage_protocol, *args, **kwargs): """Creates an appropriate driver based on family and protocol.""" storage_family = storage_family.lower() storage_protocol = storage_protocol.lower() fmt = {'storage_family': storage_family, 'storage_protocol': storage_protocol} LOG.info('Requested unified config: %(storage_family)s and ' '%(storage_protocol)s.', fmt) family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family) if family_meta is None: raise exception.InvalidInput( reason=_('Storage family %s is not supported.') % storage_family) driver_loc = family_meta.get(storage_protocol) if driver_loc is None: raise exception.InvalidInput( reason=_('Protocol %(storage_protocol)s is not supported ' 'for storage family %(storage_family)s.') % fmt) kwargs = kwargs or {} kwargs['netapp_mode'] = 'proxy' driver = importutils.import_object(driver_loc, *args, **kwargs) LOG.info('NetApp driver of family %(storage_family)s and protocol ' '%(storage_protocol)s loaded.', fmt) return driver ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.383121 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/0000775000175000017500000000000000000000000022562 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/__init__.py0000664000175000017500000000000000000000000024661 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/block_base.py0000664000175000017500000014340100000000000025223 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2014 Jeff Applewhite. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. # Copyright (c) 2016 Chuck Fouts. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver library for NetApp 7/C-mode block storage systems. """ import copy import math import uuid from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import excutils from oslo_utils import units from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class NetAppLun(object): """Represents a LUN on NetApp storage.""" def __init__(self, handle, name, size, metadata_dict): self.handle = handle self.name = name self.size = size self.metadata = metadata_dict or {} def get_metadata_property(self, prop): """Get the metadata property of a LUN.""" if prop in self.metadata: return self.metadata[prop] name = self.name LOG.debug("No metadata property %(prop)s defined for the LUN %(name)s", {'prop': prop, 'name': name}) def __str__(self, *args, **kwargs): return 'NetApp LUN [handle:%s, name:%s, size:%s, metadata:%s]' % ( self.handle, self.name, self.size, self.metadata) class NetAppBlockStorageLibrary( object, metaclass=volume_utils.TraceWrapperMetaclass): """NetApp block storage library for Data ONTAP.""" # do not increment this as it may be used in volume type definitions VERSION = "1.0.0" REQUIRED_FLAGS_BASIC = ['netapp_login', 'netapp_password', 'netapp_server_hostname'] REQUIRED_FLAGS_CERT = ['netapp_private_key_file', 'netapp_certificate_file'] ALLOWED_LUN_OS_TYPES = ['linux', 'aix', 'hpux', 'image', 'windows', 'windows_2008', 'windows_gpt', 'solaris', 'solaris_efi', 'netware', 'openvms', 'hyper_v'] ALLOWED_IGROUP_HOST_TYPES = ['linux', 'aix', 'hpux', 'windows', 'solaris', 'netware', 'default', 'vmware', 'openvms', 'xen', 'hyper_v'] DEFAULT_LUN_OS = 'linux' DEFAULT_HOST_TYPE = 'linux' DEFAULT_FILTER_FUNCTION = ('capabilities.utilization < 70 and ' 'capabilities.total_volumes < 1024') DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization' def __init__(self, driver_name, driver_protocol, **kwargs): na_utils.validate_instantiation(**kwargs) self.driver_name = driver_name self.driver_protocol = driver_protocol self.zapi_client = None self.dest_zapi_client = None self._stats = {} self.lun_table = {} self.lun_ostype = None self.host_type = None self.lun_space_reservation = 'true' self.lookup_service = fczm_utils.create_lookup_service() self.app_version = kwargs.get("app_version", "unknown") self.host = kwargs.get('host') self.backend_name = self.host.split('@')[1] self.configuration = kwargs['configuration'] self.configuration.append_config_values(na_opts.netapp_connection_opts) self.configuration.append_config_values(na_opts.netapp_basicauth_opts) self.configuration.append_config_values( na_opts.netapp_certificateauth_opts) self.configuration.append_config_values(na_opts.netapp_transport_opts) self.configuration.append_config_values( na_opts.netapp_provisioning_opts) self.configuration.append_config_values(na_opts.netapp_san_opts) self.max_over_subscription_ratio = ( volume_utils.get_max_over_subscription_ratio( self.configuration.max_over_subscription_ratio, supports_auto=True)) self.reserved_percentage = self._get_reserved_percentage() self.loopingcalls = loopingcalls.LoopingCalls() def _get_reserved_percentage(self): # If the legacy config option if it is set to the default # value, use the more general configuration option. if self.configuration.netapp_size_multiplier == ( na_opts.NETAPP_SIZE_MULTIPLIER_DEFAULT): return self.configuration.reserved_percentage # If the legacy config option has a non-default value, # honor it for one release. Note that the "size multiplier" # actually acted as a divisor in the code and didn't apply # to the file size (as the help message for this option suggest), # but rather to total and free size for the pool. divisor = self.configuration.netapp_size_multiplier reserved_ratio = round(1 - (1 / divisor), 2) reserved_percentage = 100 * int(reserved_ratio) msg = ('The "netapp_size_multiplier" configuration option is ' 'deprecated and will be removed in the Mitaka release. ' 'Please set "reserved_percentage = %d" instead.') \ % reserved_percentage versionutils.report_deprecated_feature(LOG, msg) return reserved_percentage def do_setup(self, context): if self.configuration.netapp_private_key_file or\ self.configuration.netapp_certificate_file: na_utils.check_flags(self.REQUIRED_FLAGS_CERT, self.configuration) else: na_utils.check_flags(self.REQUIRED_FLAGS_BASIC, self.configuration) self.lun_ostype = (self.configuration.netapp_lun_ostype or self.DEFAULT_LUN_OS) self.host_type = (self.configuration.netapp_host_type or self.DEFAULT_HOST_TYPE) if self.configuration.netapp_lun_space_reservation == 'enabled': self.lun_space_reservation = 'true' else: self.lun_space_reservation = 'false' def check_for_setup_error(self): """Check that the driver is working and can communicate. Discovers the LUNs on the NetApp server. """ if self.lun_ostype not in self.ALLOWED_LUN_OS_TYPES: msg = _("Invalid value for NetApp configuration" " option netapp_lun_ostype.") LOG.error(msg) raise na_utils.NetAppDriverException(msg) if self.host_type not in self.ALLOWED_IGROUP_HOST_TYPES: msg = _("Invalid value for NetApp configuration" " option netapp_host_type.") LOG.error(msg) raise na_utils.NetAppDriverException(msg) lun_list = self.zapi_client.get_lun_list() self._extract_and_populate_luns(lun_list) LOG.debug("Success getting list of LUNs from server.") self.loopingcalls.start_tasks() def _add_looping_tasks(self): """Add tasks that need to be executed at a fixed interval. Inheriting class overrides and then explicitly calls this method. """ # Add the task that deletes snapshots marked for deletion. # ADD snapshot cleanup task to ASA r2 once snapshot feature is # implemented in the driver. if not self.configuration.netapp_disaggregated_platform: self.loopingcalls.add_task( self._delete_snapshots_marked_for_deletion, loopingcalls.ONE_MINUTE, loopingcalls.ONE_MINUTE) # Add the task that logs EMS messages self.loopingcalls.add_task( self._handle_ems_logging, loopingcalls.ONE_HOUR) def _delete_snapshots_marked_for_deletion(self): snapshots = self.zapi_client.get_snapshots_marked_for_deletion() for snapshot in snapshots: self.zapi_client.delete_snapshot( snapshot['volume_name'], snapshot['name']) def _handle_ems_logging(self): """Log autosupport messages.""" raise NotImplementedError() def get_pool(self, volume): """Return pool name where volume resides. :param volume: The volume hosted by the driver. :return: Name of the pool where given volume is hosted. """ name = volume['name'] metadata = self._get_lun_attr(name, 'metadata') or dict() return metadata.get('Volume', None) def create_volume(self, volume): """Driver entry point for creating a new volume (Data ONTAP LUN).""" LOG.debug('create_volume on %s', volume['host']) # get Data ONTAP volume name as pool name pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) space_allocation = volume_utils.is_boolean_str( extra_specs.get('netapp:space_allocation') ) LOG.debug('create_volume space_allocation %r', space_allocation) lun_name = volume['name'] size = int(volume['size']) * units.Gi metadata = {'OsType': self.lun_ostype, 'SpaceReserved': self.lun_space_reservation, 'SpaceAllocated': str(space_allocation).lower(), 'Path': '/vol/%s/%s' % (pool_name, lun_name)} qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) qos_policy_group_is_adaptive = (volume_utils.is_boolean_str( extra_specs.get('netapp:qos_policy_group_is_adaptive')) or na_utils.is_qos_policy_group_spec_adaptive (qos_policy_group_info)) try: self._create_lun(pool_name, lun_name, size, metadata, qos_policy_group_name, qos_policy_group_is_adaptive) except Exception: LOG.exception("Exception creating LUN %(name)s in pool %(pool)s.", {'name': lun_name, 'pool': pool_name}) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created.") raise exception.VolumeBackendAPIException(data=msg % ( volume['name'])) LOG.debug('Created LUN with name %(name)s and QoS info %(qos)s', {'name': lun_name, 'qos': qos_policy_group_info}) metadata['Path'] = '/vol/%s/%s' % (pool_name, lun_name) metadata['Volume'] = pool_name metadata['Qtree'] = None handle = self._create_lun_handle(metadata) self._add_lun_to_table(NetAppLun(handle, lun_name, size, metadata)) model_update = self._get_volume_model_update(volume) return model_update def _setup_qos_for_volume(self, volume, extra_specs): return None def _get_volume_model_update(self, volume): """Provide any updates necessary for a volume being created/managed.""" raise NotImplementedError def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info): return def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" self._delete_lun(volume['name']) def _delete_lun(self, lun_name): """Helper method to delete LUN backing a volume or snapshot.""" metadata = self._get_lun_attr(lun_name, 'metadata') if metadata: try: self.zapi_client.destroy_lun(metadata['Path']) except netapp_api.NaApiError as e: if e.code == netapp_api.EOBJECTNOTFOUND: LOG.warning("Failure deleting LUN %(name)s. %(message)s", {'name': lun_name, 'message': e}) else: error_message = (_('A NetApp Api Error occurred: %s') % e) raise na_utils.NetAppDriverException(error_message) self.lun_table.pop(lun_name) else: LOG.warning("No entry in LUN table for volume/snapshot" " %(name)s.", {'name': lun_name}) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" handle = self._get_lun_attr(volume['name'], 'handle') return {'provider_location': handle} def create_export(self, context, volume): """Driver entry point to get the export info for a new volume.""" handle = self._get_lun_attr(volume['name'], 'handle') return {'provider_location': handle} def remove_export(self, context, volume): """Driver entry point to remove an export for a volume. Since exporting is idempotent in this driver, we have nothing to do for unexporting. """ pass def create_snapshot(self, snapshot): """Driver entry point for creating a snapshot. This driver implements snapshots by using efficient single-file (LUN) cloning. """ self._create_snapshot(snapshot) def _create_snapshot(self, snapshot): vol_name = snapshot['volume_name'] snapshot_name = snapshot['name'] lun = self._get_lun_from_table(vol_name) self._clone_lun(lun.name, snapshot_name, space_reserved='false', is_snapshot=True) def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" self._delete_lun(snapshot['name']) LOG.debug("Snapshot %s deletion successful", snapshot['name']) def create_volume_from_snapshot(self, volume, snapshot): source = {'name': snapshot['name'], 'size': snapshot['volume_size']} return self._clone_source_to_destination(source, volume) def create_cloned_volume(self, volume, src_vref): src_lun = self._get_lun_from_table(src_vref['name']) source = {'name': src_lun.name, 'size': src_vref['size']} return self._clone_source_to_destination(source, volume) def _clone_source_to_destination(self, source, destination_volume): source_size = source['size'] destination_size = destination_volume['size'] source_name = source['name'] destination_name = destination_volume['name'] extra_specs = na_utils.get_volume_extra_specs(destination_volume) qos_policy_group_info = self._setup_qos_for_volume( destination_volume, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) qos_policy_group_is_adaptive = (volume_utils.is_boolean_str( extra_specs.get('netapp:qos_policy_group_is_adaptive')) or na_utils.is_qos_policy_group_spec_adaptive (qos_policy_group_info)) try: self._clone_lun( source_name, destination_name, space_reserved=self.lun_space_reservation, qos_policy_group_name=qos_policy_group_name, qos_policy_group_is_adaptive=qos_policy_group_is_adaptive) if destination_size != source_size: try: self._extend_volume(destination_volume, destination_size, qos_policy_group_name) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Resizing %s failed. Cleaning volume.", destination_volume['id']) self.delete_volume(destination_volume) return self._get_volume_model_update(destination_volume) except Exception: LOG.exception("Exception cloning volume %(name)s from source " "volume %(source)s.", {'name': destination_name, 'source': source_name}) self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = _("Volume %s could not be created from source volume.") raise exception.VolumeBackendAPIException( data=msg % destination_name) def _create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None): """Creates a LUN, handling Data ONTAP differences as needed.""" raise NotImplementedError() def _create_lun_handle(self, metadata): """Returns LUN handle based on filer type.""" raise NotImplementedError() def _extract_lun_info(self, lun): """Extracts the LUNs from API and populates the LUN table.""" path = lun['Path'] (_rest, _splitter, name) = path.rpartition('/') handle = self._create_lun_handle(lun) size = lun['Size'] return NetAppLun(handle, name, size, lun) def _extract_and_populate_luns(self, api_luns): """Extracts the LUNs from API and populates the LUN table.""" for lun in api_luns: discovered_lun = self._extract_lun_info(lun) self._add_lun_to_table(discovered_lun) def _map_lun(self, name, initiator_list, initiator_type, lun_id=None): """Maps LUN to the initiator(s) and returns LUN ID assigned.""" metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] igroup_name, ig_host_os, ig_type = self._get_or_create_igroup( initiator_list, initiator_type, self.host_type) if ig_host_os != self.host_type: LOG.warning("LUN misalignment may occur for current" " initiator group %(ig_nm)s) with host OS type" " %(ig_os)s. Please configure initiator group" " manually according to the type of the" " host OS.", {'ig_nm': igroup_name, 'ig_os': ig_host_os}) try: result = self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id) if self._is_active_sync_configured(self.configuration): self.dest_zapi_client.map_lun(path, igroup_name, lun_id=lun_id) return result except netapp_api.NaApiError as e: (_igroup, lun_id) = self._find_mapped_lun_igroup(path, initiator_list) if lun_id is not None: return lun_id else: raise e def _unmap_lun(self, path, initiator_list): """Unmaps a LUN from given initiator.""" if len(initiator_list) != 0: lun_unmap_list = [] (igroup_name, _) = self._find_mapped_lun_igroup( path, initiator_list) lun_unmap_list.append((path, igroup_name)) else: lun_maps = self.zapi_client.get_lun_map(path) lun_unmap_list = [(path, lun_m['initiator-group']) for lun_m in lun_maps] for _path, _igroup_name in lun_unmap_list: self.zapi_client.unmap_lun(_path, _igroup_name) if self._is_active_sync_configured(self.configuration): self.dest_zapi_client.unmap_lun(_path, _igroup_name) def _find_mapped_lun_igroup(self, path, initiator_list): """Find an igroup for a LUN mapped to the given initiator(s).""" raise NotImplementedError() def _has_luns_mapped_to_initiators(self, initiator_list): """Checks whether any LUNs are mapped to the given initiator(s).""" return self.zapi_client.has_luns_mapped_to_initiators(initiator_list) def _is_active_sync_configured(self, config): backend_names = [] replication_devices = config.safe_get('replication_device') if replication_devices: for replication_device in replication_devices: backend_id = replication_device.get('backend_id') if backend_id: backend_names.append(backend_id) replication_enabled = True if backend_names else False if replication_enabled: return config.safe_get('netapp_replication_policy') == \ "AutomatedFailOver" return False def _get_or_create_igroup(self, initiator_list, initiator_group_type, host_os_type): """Checks for an igroup for a set of one or more initiators. Creates igroup if not already present with given host os type, igroup type and adds initiators. """ # Backend supports different igroups with the same initiators, so # instead of reusing non OpenStack igroups, we make sure we only use # our own, thus being compatible with custom igroups. igroups = self.zapi_client.get_igroup_by_initiators(initiator_list) for igroup in igroups: igroup_name = igroup['initiator-group-name'] if igroup_name.startswith(na_utils.OPENSTACK_PREFIX): host_os_type = igroup['initiator-group-os-type'] initiator_group_type = igroup['initiator-group-type'] break else: igroup_name = self._create_igroup_add_initiators( initiator_group_type, host_os_type, initiator_list) if self._is_active_sync_configured(self.configuration): igroups_dest = self.dest_zapi_client.get_igroup_by_initiators( initiator_list) for igroup in igroups_dest: igroup_name_dest = igroup['initiator-group-name'] if igroup_name_dest.startswith(na_utils.OPENSTACK_PREFIX): host_os_type = igroup['initiator-group-os-type'] initiator_group_type = igroup['initiator-group-type'] break else: self._create_igroup_add_initiators( initiator_group_type, host_os_type, initiator_list) return igroup_name, host_os_type, initiator_group_type def _create_igroup_add_initiators(self, initiator_group_type, host_os_type, initiator_list): """Creates igroup and adds initiators.""" igroup_name = na_utils.OPENSTACK_PREFIX + str(uuid.uuid4()) self.zapi_client.create_igroup(igroup_name, initiator_group_type, host_os_type) if self._is_active_sync_configured(self.configuration): self.dest_zapi_client.create_igroup(igroup_name, initiator_group_type, host_os_type) for initiator in initiator_list: self.zapi_client.add_igroup_initiator(igroup_name, initiator) if self._is_active_sync_configured(self.configuration): self.dest_zapi_client.add_igroup_initiator(igroup_name, initiator) return igroup_name def _delete_lun_from_table(self, name): """Deletes LUN from cache table.""" if self.lun_table.get(name, None): self.lun_table.pop(name) def _add_lun_to_table(self, lun): """Adds LUN to cache table.""" if not isinstance(lun, NetAppLun): msg = _("Object is not a NetApp LUN.") raise exception.VolumeBackendAPIException(data=msg) self.lun_table[lun.name] = lun def _get_lun_from_table(self, name): """Gets LUN from cache table. Refreshes cache if LUN not found in cache. """ lun = self.lun_table.get(name) if lun is None: lun_list = self.zapi_client.get_lun_list() self._extract_and_populate_luns(lun_list) lun = self.lun_table.get(name) if lun is None: raise exception.VolumeNotFound(volume_id=name) return lun def _clone_lun(self, name, new_name, space_reserved='true', qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None, is_snapshot=False): """Clone LUN with the given name to the new name.""" raise NotImplementedError() def _get_lun_attr(self, name, attr): """Get the LUN attribute if found else None.""" try: attr = getattr(self._get_lun_from_table(name), attr) return attr except exception.VolumeNotFound as e: LOG.error("Message: %s", e.msg) except Exception as e: LOG.error("Error getting LUN attribute. Exception: %s", e) return None def _get_fc_target_wwpns(self, include_partner=True): raise NotImplementedError() def get_volume_stats(self, refresh=False, filter_function=None, goodness_function=None): """Get volume stats. If 'refresh' is True, update the stats first. """ if refresh: self._update_volume_stats(filter_function=filter_function, goodness_function=goodness_function) return self._stats def _update_volume_stats(self, filter_function=None, goodness_function=None): raise NotImplementedError() def get_default_filter_function(self): """Get the default filter_function string.""" return self.DEFAULT_FILTER_FUNCTION def get_default_goodness_function(self): """Get the default goodness_function string.""" return self.DEFAULT_GOODNESS_FUNCTION def extend_volume(self, volume, new_size): """Driver entry point to increase the size of a volume.""" extra_specs = na_utils.get_volume_extra_specs(volume) # Create volume copy with new size for size-dependent QOS specs volume_copy = copy.copy(volume) volume_copy['size'] = new_size qos_policy_group_info = self._setup_qos_for_volume(volume_copy, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) try: self._extend_volume(volume, new_size, qos_policy_group_name) except Exception: with excutils.save_and_reraise_exception(): # If anything went wrong, revert QoS settings self._setup_qos_for_volume(volume, extra_specs) def _extend_volume(self, volume, new_size, qos_policy_group_name): """Extend an existing volume to the new size.""" name = volume['name'] lun = self._get_lun_from_table(name) path = lun.metadata['Path'] curr_size_bytes = str(lun.size) new_size_bytes = str(int(new_size) * units.Gi) # Reused by clone scenarios. # Hence comparing the stored size. if curr_size_bytes == new_size_bytes: LOG.info("No need to extend volume %s" " as it is already the requested new size.", name) return ontap_version = self.zapi_client.get_ontap_version(cached=True) if ontap_version >= (9, 5, 0): self.zapi_client.do_direct_resize(path, new_size_bytes) else: lun_geometry = self.zapi_client.get_lun_geometry(path) if (lun_geometry and int(lun_geometry.get("max_resize", 0)) >= int(new_size_bytes)): self.zapi_client.do_direct_resize(path, new_size_bytes) else: if volume['attach_status'] != 'detached': msg = _('Volume %(vol_id)s cannot be resized from ' '%(old_size)s to %(new_size)s, because would ' 'exceed its max geometry %(max_geo)s while ' 'not being detached.') raise exception.VolumeBackendAPIException(data=msg % { 'vol_id': name, 'old_size': curr_size_bytes, 'new_size': new_size_bytes, 'max_geo': lun_geometry.get("max_resize")}) self._do_sub_clone_resize( path, new_size_bytes, qos_policy_group_name=qos_policy_group_name) self.lun_table[name].size = new_size_bytes def _get_vol_option(self, volume_name, option_name): """Get the value for the volume option.""" value = None options = self.zapi_client.get_volume_options(volume_name) for opt in options: if opt.get_child_content('name') == option_name: value = opt.get_child_content('value') break return value def _do_sub_clone_resize(self, lun_path, new_size_bytes, qos_policy_group_name=None): """Resize a LUN beyond its original geometry using sub-LUN cloning. Clones the block ranges, swaps the LUNs, and deletes the source LUN. """ seg = lun_path.split("/") LOG.info("Resizing LUN %s using clone operation.", seg[-1]) lun_name = seg[-1] vol_name = seg[2] lun = self._get_lun_from_table(lun_name) metadata = lun.metadata compression = self._get_vol_option(vol_name, 'compression') if compression == "on": msg = _('%s cannot be resized using clone operation' ' as it is hosted on compressed volume') raise exception.VolumeBackendAPIException(data=msg % lun_name) block_count = self._get_lun_block_count(lun_path) if block_count == 0: msg = _('%s cannot be resized using clone operation' ' as it contains no blocks.') raise exception.VolumeBackendAPIException(data=msg % lun_name) new_lun_name = 'new-%s' % lun_name self.zapi_client.create_lun( vol_name, new_lun_name, new_size_bytes, metadata, qos_policy_group_name=qos_policy_group_name) try: self._clone_lun(lun_name, new_lun_name, block_count=block_count) self._post_sub_clone_resize(lun_path) except Exception: with excutils.save_and_reraise_exception(): new_lun_path = '/vol/%s/%s' % (vol_name, new_lun_name) self.zapi_client.destroy_lun(new_lun_path) def _post_sub_clone_resize(self, path): """Try post sub clone resize in a transactional manner.""" st_tm_mv, st_nw_mv, st_del_old = None, None, None seg = path.split("/") LOG.info("Post clone resize LUN %s", seg[-1]) new_lun = 'new-%s' % (seg[-1]) tmp_lun = 'tmp-%s' % (seg[-1]) tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun) new_path = "/vol/%s/%s" % (seg[2], new_lun) try: st_tm_mv = self.zapi_client.move_lun(path, tmp_path) st_nw_mv = self.zapi_client.move_lun(new_path, path) st_del_old = self.zapi_client.destroy_lun(tmp_path) except Exception as e: if st_tm_mv is None: msg = _("Failure staging LUN %s to tmp.") raise exception.VolumeBackendAPIException(data=msg % (seg[-1])) else: if st_nw_mv is None: self.zapi_client.move_lun(tmp_path, path) msg = _("Failure moving new cloned LUN to %s.") raise exception.VolumeBackendAPIException( data=msg % (seg[-1])) elif st_del_old is None: LOG.error("Failure deleting staged tmp LUN %s.", tmp_lun) else: LOG.error("Unknown exception in" " post clone resize LUN %s.", seg[-1]) LOG.error("Exception details: %s", e) def _is_space_alloc_enabled(self, path): """Gets space allocation details for the LUN.""" LOG.debug("Getting LUN space allocation enabled.") lun_infos = self.zapi_client.get_lun_by_args(path=path) if not lun_infos: seg = path.split('/') msg = _('Failure getting LUN info for %s' % seg[-1]) raise exception.VolumeBackendAPIException(data=msg) return lun_infos[0]['SpaceAllocated'] == "true" def _get_lun_block_count(self, path): """Gets block counts for the LUN.""" LOG.debug("Getting LUN block count.") lun_infos = self.zapi_client.get_lun_by_args(path=path) if not lun_infos: seg = path.split('/') msg = _('Failure getting LUN info for %s.') raise exception.VolumeBackendAPIException(data=msg % seg[-1]) lun_info = lun_infos[-1] bs = int(lun_info['BlockSize']) ls = int(lun_info['Size']) block_count = ls / bs return block_count def _check_volume_type_for_lun(self, volume, lun, existing_ref, extra_specs): """Checks if LUN satisfies the volume type.""" def manage_existing(self, volume, existing_ref): """Brings an existing storage object under Cinder management. existing_ref can contain source-id or source-name or both. source-id: lun uuid. source-name: complete lun path eg. /vol/vol0/lun. """ lun = self._get_existing_vol_with_manage_ref(existing_ref) extra_specs = na_utils.get_volume_extra_specs(volume) self._check_volume_type_for_lun(volume, lun, existing_ref, extra_specs) qos_policy_group_info = self._setup_qos_for_volume(volume, extra_specs) qos_policy_group_name = ( na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info)) is_adaptive = na_utils.is_qos_policy_group_spec_adaptive( qos_policy_group_info) path = lun.get_metadata_property('Path') if lun.name == volume['name']: new_path = path LOG.info("LUN with given ref %s need not be renamed " "during manage operation.", existing_ref) else: (rest, splitter, name) = path.rpartition('/') new_path = '%s/%s' % (rest, volume['name']) self.zapi_client.move_lun(path, new_path) lun = self._get_existing_vol_with_manage_ref( {'source-name': new_path}) if qos_policy_group_name is not None: self.zapi_client.set_lun_qos_policy_group(new_path, qos_policy_group_name, is_adaptive) self._add_lun_to_table(lun) LOG.info("Manage operation completed for LUN with new path" " %(path)s and uuid %(uuid)s.", {'path': lun.get_metadata_property('Path'), 'uuid': lun.get_metadata_property('UUID')}) return self._get_volume_model_update(volume) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. """ lun = self._get_existing_vol_with_manage_ref(existing_ref) return int(math.ceil(float(lun.size) / units.Gi)) def _get_existing_vol_with_manage_ref(self, existing_ref): """Get the corresponding LUN from the storage server.""" uuid = existing_ref.get('source-id') path = existing_ref.get('source-name') lun_info = {} if path: lun_info['path'] = path elif uuid: if not hasattr(self, 'vserver'): reason = _('Volume manage identifier with source-id is only ' 'supported with clustered Data ONTAP.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) lun_info['uuid'] = uuid else: reason = _('Volume manage identifier must contain either ' 'source-id or source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) luns = self.zapi_client.get_lun_by_args(**lun_info) for lun in luns: netapp_lun = self._extract_lun_info(lun) if self._is_lun_valid_on_storage(netapp_lun): return netapp_lun raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=(_('LUN not found with given ref %s.') % existing_ref)) def _is_lun_valid_on_storage(self, lun): """Validate lun specific to storage system.""" return True def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. """ managed_lun = self._get_lun_from_table(volume['name']) LOG.info("Unmanaged LUN with current path %(path)s and uuid " "%(uuid)s.", {'path': managed_lun.get_metadata_property('Path'), 'uuid': managed_lun.get_metadata_property('UUID') or 'unknown'}) def initialize_connection_iscsi(self, volume, connector): """Driver entry point to attach a volume to an instance. Do the LUN masking on the storage system so the initiator can access the LUN on the target. Also return the iSCSI properties so the initiator can find the LUN. This implementation does not call _get_iscsi_properties() to get the properties because cannot store the LUN number in the database. We only find out what the LUN number will be during this method call so we construct the properties dictionary ourselves. """ initiator_name = connector['initiator'] lun_path = volume['provider_location'].split(':')[1] name = volume['name'] lun_id = self._map_lun(name, [initiator_name], 'iscsi', None) LOG.debug("Mapped LUN %(name)s to the initiator %(initiator_name)s", {'name': name, 'initiator_name': initiator_name}) target_list = self.zapi_client.get_iscsi_target_details() if not target_list: raise exception.VolumeBackendAPIException( data=_('Failed to get LUN target list for the LUN %s') % name) LOG.debug("Successfully fetched target list for LUN %(name)s and " "initiator %(initiator_name)s", {'name': name, 'initiator_name': initiator_name}) targets = self._get_targets_from_list(target_list) if not targets: msg = _('Failed to get target portal for the LUN %s') raise exception.VolumeBackendAPIException(data=msg % name) addresses = [target['address'] for target in targets] ports = [target['port'] for target in targets] iqn = self.zapi_client.get_iscsi_service_details() if not iqn: msg = _('Failed to get target IQN for the LUN %s') raise exception.VolumeBackendAPIException(data=msg % name) properties = na_utils.get_iscsi_connection_properties(lun_id, volume, iqn, addresses, ports) properties['discard'] = self._is_space_alloc_enabled(lun_path) if self.configuration.use_chap_auth: chap_username, chap_password = self._configure_chap(initiator_name) self._add_chap_properties(properties, chap_username, chap_password) return properties def _configure_chap(self, initiator_name): password = volume_utils.generate_password(na_utils.CHAP_SECRET_LENGTH) username = na_utils.DEFAULT_CHAP_USER_NAME self.zapi_client.set_iscsi_chap_authentication(initiator_name, username, password) LOG.debug("Set iSCSI CHAP authentication.") return username, password def _add_chap_properties(self, properties, username, password): properties['data']['auth_method'] = 'CHAP' properties['data']['auth_username'] = username properties['data']['auth_password'] = password properties['data']['discovery_auth_method'] = 'CHAP' properties['data']['discovery_auth_username'] = username properties['data']['discovery_auth_password'] = password def _get_targets_from_list(self, target_details_list, filter=None): targets = [target for target in target_details_list if (not filter or target['address'] in filter) and target.get('interface-enabled', 'true') == 'true'] if not targets and len(target_details_list) > 0: targets = target_details_list return targets def _is_multiattached(self, volume, connector): """Returns whether the volume is multiattached. Returns True if the volume is attached to multiple instances using the same initiator as the given one. Returns False otherwise. """ if not volume.multiattach or not volume.volume_attachment: return False same_connector = (True for at in volume.volume_attachment if at.connector and at.connector['initiator'] == connector['initiator']) next(same_connector, False) return next(same_connector, False) def terminate_connection_iscsi(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance. Unmask the LUN on the storage system so the given initiator can no longer access it. """ if connector and self._is_multiattached(volume, connector): return name = volume['name'] if connector is None: initiators = [] LOG.debug('Unmapping LUN %(name)s from all initiators', {'name': name}) else: initiators = [connector['initiator']] LOG.debug("Unmapping LUN %(name)s from the initiator " "%(initiator_name)s", {'name': name, 'initiator_name': initiators}) metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] self._unmap_lun(path, initiators) def initialize_connection_fc(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. The driver returns a driver_volume_type of 'fibre_channel'. The target_wwn can be a single entry or a list of wwns that correspond to the list of remote wwn(s) that will export the volume. Example return values: .. code-block:: default { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': '500a098280feeba5', 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5'], '21000024ff406cc2': ['500a098280feeba5'] } } } Or .. code-block:: default { 'driver_volume_type': 'fibre_channel', 'data': { 'target_discovered': True, 'target_lun': 1, 'target_wwn': ['500a098280feeba5', '500a098290feeba5', '500a098190feeba5', '500a098180feeba5'], 'initiator_target_map': { '21000024ff406cc3': ['500a098280feeba5', '500a098290feeba5'], '21000024ff406cc2': ['500a098190feeba5', '500a098180feeba5'] } } } """ initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns']] volume_name = volume['name'] lun_id = self._map_lun(volume_name, initiators, 'fcp', None) LOG.debug("Mapped LUN %(name)s to the initiator(s) %(initiators)s", {'name': volume_name, 'initiators': initiators}) target_wwpns, initiator_target_map, num_paths = ( self._build_initiator_target_map(connector)) if target_wwpns: LOG.debug("Successfully fetched target details for LUN %(name)s " "and initiator(s) %(initiators)s", {'name': volume_name, 'initiators': initiators}) else: raise exception.VolumeBackendAPIException( data=_('Failed to get LUN target details for ' 'the LUN %s') % volume_name) target_info = {'driver_volume_type': 'fibre_channel', 'data': {'target_discovered': True, 'target_lun': int(lun_id), 'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map}} return target_info @coordination.synchronized('netapp-terminate-fc-connection-{volume.id}') def terminate_connection_fc(self, volume, connector, **kwargs): """Disallow connection from connector. Return empty data if other volumes are in the same zone. The FibreChannel ZoneManager doesn't remove zones if there isn't an initiator_target_map in the return of terminate_connection. :returns: data - the target_wwns and initiator_target_map if the zone is to be removed, otherwise the same map with an empty dict for the 'data' key """ if connector and na_utils.is_multiattach_to_host( volume, connector ): return name = volume['name'] if connector is None: initiators = [] LOG.debug('Unmapping LUN %(name)s from all initiators', {'name': name}) else: initiators = [fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns']] LOG.debug("Unmapping LUN %(name)s from the initiators " "%(initiator_name)s", {'name': name, 'initiator_name': initiators}) metadata = self._get_lun_attr(name, 'metadata') path = metadata['Path'] self._unmap_lun(path, initiators) info = {'driver_volume_type': 'fibre_channel', 'data': {}} if (connector and not self._has_luns_mapped_to_initiators(initiators)): # No more exports for this host, so tear down zone. LOG.info("Need to remove FC Zone, " "building initiator target map") target_wwpns, initiator_target_map, num_paths = ( self._build_initiator_target_map(connector)) info['data'] = {'target_wwn': target_wwpns, 'initiator_target_map': initiator_target_map} return info def _build_initiator_target_map(self, connector): """Build the target_wwns and the initiator target map.""" # get WWPNs from controller and strip colons all_target_wwpns = self._get_fc_target_wwpns() all_target_wwpns = [str(wwpn).replace(':', '') for wwpn in all_target_wwpns] target_wwpns = [] init_targ_map = {} num_paths = 0 if self.lookup_service is not None: # Use FC SAN lookup to determine which ports are visible. dev_map = self.lookup_service.get_device_mapping_from_network( connector['wwpns'], all_target_wwpns) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwpns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) for target in init_targ_map[initiator]: num_paths += 1 target_wwpns = list(set(target_wwpns)) else: initiator_wwns = connector['wwpns'] target_wwpns = all_target_wwpns for initiator in initiator_wwns: init_targ_map[initiator] = target_wwpns return target_wwpns, init_targ_map, num_paths def _get_backing_flexvol_names(self): """Returns a list of backing flexvol names.""" raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/block_cmode.py0000664000175000017500000013551200000000000025404 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2014 Jeff Applewhite. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver library for NetApp C-mode block storage systems. """ import time from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities from cinder.volume.drivers.netapp.dataontap.utils import data_motion from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class NetAppBlockStorageCmodeLibrary( block_base.NetAppBlockStorageLibrary, data_motion.DataMotionMixin, metaclass=volume_utils.TraceWrapperMetaclass): """NetApp block storage library for Data ONTAP (Cluster-mode). Version history: .. code-block:: none 1.0.0 - Driver development before Wallaby 2.0.0 - Add support for QoS minimums specs Add support for dynamic Adaptive QoS policy group creation 3.0.0 - Add support for Intra-cluster Storage assisted volume migration Add support for revert to snapshot 4.0.0 - Add Cinder Active/Active support (High Availability) Implement Active/Active replication support """ VERSION = "4.0.0" REQUIRED_CMODE_FLAGS = ['netapp_vserver'] def __init__(self, driver_name, driver_protocol, **kwargs): super(NetAppBlockStorageCmodeLibrary, self).__init__(driver_name, driver_protocol, **kwargs) self.configuration.append_config_values(na_opts.netapp_cluster_opts) self.driver_mode = 'cluster' self.failed_over_backend_name = kwargs.get('active_backend_id').\ strip() if kwargs.get('active_backend_id') is not None else None self.failed_over = bool(self.failed_over_backend_name) self.replication_enabled = ( True if self.get_replication_backend_names( self.configuration) else False) def do_setup(self, context): super(NetAppBlockStorageCmodeLibrary, self).do_setup(context) na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration) # cDOT API client self.zapi_client = dot_utils.get_client_for_backend( self.failed_over_backend_name or self.backend_name) self.vserver = self.zapi_client.vserver self.dest_zapi_client = None if self.replication_enabled: if self.get_replication_policy(self.configuration) == \ "AutomatedFailOver": backend_names = self.get_replication_backend_names( self.configuration) for dest_backend_name in backend_names: dest_backend_config = dot_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver self.dest_zapi_client = dot_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver) # Storage service catalog self.ssc_library = capabilities.CapabilitiesLibrary( self.driver_protocol, self.vserver, self.zapi_client, self.configuration) self.ssc_library.check_api_permissions() self.using_cluster_credentials = ( self.ssc_library.cluster_user_supported()) # Performance monitoring library self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.zapi_client) def _update_zapi_client(self, backend_name): """Set cDOT API client for the specified config backend stanza name.""" self.zapi_client = dot_utils.get_client_for_backend(backend_name) self.vserver = self.zapi_client.vserver self.ssc_library._update_for_failover(self.zapi_client, self._get_flexvol_to_pool_map()) ssc = self.ssc_library.get_ssc() self.perf_library._update_for_failover(self.zapi_client, ssc) # Clear LUN table cache self.lun_table = {} def check_for_setup_error(self): """Check that the driver is working and can communicate.""" if (not self._get_flexvol_to_pool_map() and not self.configuration.netapp_disaggregated_platform): msg = _('No pools are available for provisioning volumes. ' 'Ensure that the configuration option ' 'netapp_pool_name_search_pattern is set correctly.') raise na_utils.NetAppDriverException(msg) elif (self.configuration.netapp_disaggregated_platform and not self._get_cluster_to_pool_map()): msg = _('No pools are available for provisioning volumes. ' 'Ensure ASA r2 configuration option is set correctly.') raise na_utils.NetAppDriverException(msg) self._add_looping_tasks() super(NetAppBlockStorageCmodeLibrary, self).check_for_setup_error() def _add_looping_tasks(self): """Add tasks that need to be executed at a fixed interval.""" # Note(cknight): Run the update once in the current thread to prevent a # race with the first invocation of _update_volume_stats. self._update_ssc() # Add the task that updates the slow-changing storage service catalog self.loopingcalls.add_task(self._update_ssc, loopingcalls.ONE_HOUR, loopingcalls.ONE_HOUR) self.loopingcalls.add_task( self._handle_housekeeping_tasks, loopingcalls.TEN_MINUTES, 0) super(NetAppBlockStorageCmodeLibrary, self)._add_looping_tasks() def _handle_housekeeping_tasks(self): """Handle various cleanup activities.""" active_backend = self.failed_over_backend_name or self.backend_name # Add the task that harvests soft-deleted QoS policy groups. if self.using_cluster_credentials: self.zapi_client.remove_unused_qos_policy_groups() LOG.debug("Current service state: Replication enabled: %(" "replication)s. Failed-Over: %(failed)s. Active Backend " "ID: %(active)s", { 'replication': self.replication_enabled, 'failed': self.failed_over, 'active': active_backend, }) # Create pool mirrors if whole-backend replication configured if self.replication_enabled and not self.failed_over: self.ensure_snapmirrors( self.configuration, self.backend_name, self.ssc_library.get_ssc_flexvol_names()) def _handle_ems_logging(self): """Log autosupport messages.""" base_ems_message = dot_utils.build_ems_log_message_0( self.driver_name, self.app_version) self.zapi_client.send_ems_log_message(base_ems_message) pool_ems_message = dot_utils.build_ems_log_message_1( self.driver_name, self.app_version, self.vserver, self.ssc_library.get_ssc_flexvol_names(), []) self.zapi_client.send_ems_log_message(pool_ems_message) def _create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None, qos_policy_group_is_adaptive=False): """Creates a LUN, handling Data ONTAP differences as needed.""" self.zapi_client.create_lun( volume_name, lun_name, size, metadata, qos_policy_group_name, qos_policy_group_is_adaptive) def _create_lun_handle(self, metadata, vserver=None): """Returns LUN handle based on filer type.""" vserver = vserver or self.vserver return '%s:%s' % (self.vserver, metadata['Path']) def _find_mapped_lun_igroup(self, path, initiator_list): """Find an igroup for a LUN mapped to the given initiator(s).""" igroups = [igroup['initiator-group-name'] for igroup in self.zapi_client.get_igroup_by_initiators(initiator_list)] # Map igroup-name to lun-id, but only for the requested initiators. lun_map = {v['initiator-group']: v['lun-id'] for v in self.zapi_client.get_lun_map(path) if v['initiator-group'] in igroups} igroup_name = lun_id = None # Give preference to OpenStack igroups, just use the last one if not # present to allow unmapping old mappings that used a custom igroup. for igroup_name, lun_id in lun_map.items(): if igroup_name.startswith(na_utils.OPENSTACK_PREFIX): break return igroup_name, lun_id def _clone_lun(self, name, new_name, space_reserved=None, qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None, is_snapshot=False, qos_policy_group_is_adaptive=False): """Clone LUN with the given handle to the new name.""" if not space_reserved: space_reserved = self.lun_space_reservation metadata = self._get_lun_attr(name, 'metadata') volume = metadata['Volume'] try: self.zapi_client.clone_lun( volume, name, new_name, space_reserved, qos_policy_group_name=qos_policy_group_name, src_block=src_block, dest_block=dest_block, block_count=block_count, source_snapshot=source_snapshot, is_snapshot=is_snapshot, qos_policy_group_is_adaptive=qos_policy_group_is_adaptive, ) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if 'Device busy' in e.message: self._retry_clone_lun( volume, name, new_name, space_reserved, qos_policy_group_name=qos_policy_group_name, src_block=src_block, dest_block=dest_block, block_count=block_count, source_snapshot=source_snapshot, is_snapshot=is_snapshot, qos_policy_group_is_adaptive=( qos_policy_group_is_adaptive), ) exc_context.reraise = False LOG.debug("Cloned LUN with new name %s", new_name) lun = self.zapi_client.get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s' % (volume, new_name)) if len(lun) == 0: msg = _("No cloned LUN named %s found on the filer") raise exception.VolumeBackendAPIException(data=msg % new_name) clone_lun = lun[0] self._add_lun_to_table( block_base.NetAppLun('%s:%s' % (clone_lun['Vserver'], clone_lun['Path']), new_name, clone_lun['Size'], clone_lun)) def _retry_clone_lun(self, volume, name, new_name, space_reserved, qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None, is_snapshot=False, qos_policy_group_is_adaptive=False): """Retry lun clone creation when ONTAP throws device busy error""" # timeout and interval are configurable parameters that the user can # specify under the backend stanza. If the user does not set these # values, default values will be used. For example, if timeout is set # to 60 seconds and interval is set to 5 seconds, then this code will # retry the LUN clone every 5 seconds until the 60-second timeout is # reached. timeout = self.configuration.safe_get('netapp_lun_clone_busy_timeout') interval = self.configuration.safe_get( 'netapp_lun_clone_busy_interval') retries = int(timeout / interval) for attempt in range(1, retries + 1): try: self.zapi_client.clone_lun( volume, name, new_name, space_reserved, qos_policy_group_name=qos_policy_group_name, src_block=src_block, dest_block=dest_block, block_count=block_count, source_snapshot=source_snapshot, is_snapshot=is_snapshot, qos_policy_group_is_adaptive=qos_policy_group_is_adaptive, ) LOG.info("LUN clone succeeded on attempt %s.", attempt) break except netapp_api.NaApiError as e: if 'Device busy' in e.message: LOG.debug("Attempt %s failed with device busy error." "Retrying after %s seconds...", attempt, interval) if attempt == retries: msg = _("Timed out after %s retry for LUN clone" " creation") raise na_utils.NetAppDriverException(msg % retries) time.sleep(interval) else: raise netapp_api.NaApiError(e.code, e.message) def _get_fc_target_wwpns(self, include_partner=True): return self.zapi_client.get_fc_target_wwpns() def _update_volume_stats(self, filter_function=None, goodness_function=None): """Retrieve backend stats.""" LOG.debug('Updating volume stats') data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.driver_name data['vendor_name'] = 'NetApp' data['driver_version'] = self.VERSION data['storage_protocol'] = self.driver_protocol data['pools'] = self._get_pool_stats( filter_function=filter_function, goodness_function=goodness_function) data['sparse_copy_volume'] = True # Used for service state report data['replication_enabled'] = self.replication_enabled self._stats = data def _get_pool_stats(self, filter_function=None, goodness_function=None): """Retrieve pool (Data ONTAP flexvol) stats. Pool statistics are assembled from static driver capabilities, the Storage Service Catalog of flexvol attributes, and real-time capacity and controller utilization metrics. The pool name is the flexvol name. """ pools = [] ssc = self.ssc_library.get_ssc() if not ssc: return pools # Utilization and performance metrics require cluster-scoped # credentials # Performance metrics are skipped for disaggregated for now. # TODO(jayaanan): Add support for performance metrics for ASA r2 if (self.using_cluster_credentials and not self.configuration.netapp_disaggregated_platform): # Get up-to-date node utilization metrics just once self.perf_library.update_performance_cache(ssc) # Get up-to-date aggregate capacities just once aggregates = self.ssc_library.get_ssc_aggregates() aggr_capacities = self.zapi_client.get_aggregate_capacities( aggregates) else: aggr_capacities = {} for ssc_vol_name, ssc_vol_info in ssc.items(): pool = dict() # Add storage service catalog data pool.update(ssc_vol_info) # Add driver capabilities and config info pool['QoS_support'] = self.using_cluster_credentials pool['multiattach'] = True pool['online_extend_support'] = True pool['consistencygroup_support'] = True pool['consistent_group_snapshot_enabled'] = True pool['reserved_percentage'] = self.reserved_percentage pool['max_over_subscription_ratio'] = ( self.max_over_subscription_ratio) # Add up-to-date capacity info if self.configuration.netapp_disaggregated_platform: capacity = self.zapi_client.get_cluster_capacity() else: capacity = self.zapi_client.get_flexvol_capacity( flexvol_name=ssc_vol_name) size_total_gb = capacity['size-total'] / units.Gi pool['total_capacity_gb'] = na_utils.round_down(size_total_gb) size_available_gb = capacity['size-available'] / units.Gi pool['free_capacity_gb'] = na_utils.round_down(size_available_gb) luns = self.zapi_client.get_lun_sizes_by_volume( ssc_vol_name) pool['total_volumes'] = len(luns) if self.configuration.netapp_driver_reports_provisioned_capacity: provisioned_cap = 0 for lun in luns: lun_name = lun['path'].split('/')[-1] # Filtering luns that matches the volume name template to # exclude snapshots if volume_utils.extract_id_from_volume_name(lun_name): provisioned_cap = provisioned_cap + lun['size'] pool['provisioned_capacity_gb'] = na_utils.round_down( float(provisioned_cap) / units.Gi) if (self.using_cluster_credentials and not self.configuration.netapp_disaggregated_platform): dedupe_used = ( self.zapi_client .get_flexvol_dedupe_used_percent(ssc_vol_name) ) else: dedupe_used = 0.0 pool['netapp_dedupe_used_percent'] = na_utils.round_down( dedupe_used) aggregate_name = ssc_vol_info.get('netapp_aggregate') aggr_capacity = aggr_capacities.get(aggregate_name, {}) pool['netapp_aggregate_used_percent'] = aggr_capacity.get( 'percent-used', 0) # Add utilization data utilization = self.perf_library.get_node_utilization_for_pool( ssc_vol_name) pool['utilization'] = na_utils.round_down(utilization) pool['filter_function'] = filter_function pool['goodness_function'] = goodness_function # Add replication capabilities/stats pool.update( self.get_replication_backend_stats(self.configuration)) pools.append(pool) return pools def _update_ssc(self): """Refresh the storage service catalog with the latest set of pools.""" if self.configuration.netapp_disaggregated_platform: self.ssc_library.update_ssc_asa(self._get_cluster_to_pool_map()) else: self.ssc_library.update_ssc(self._get_flexvol_to_pool_map()) def _get_flexvol_to_pool_map(self): """Get the flexvols that match the pool name search pattern. The map is of the format suitable for seeding the storage service catalog: { : {'pool_name': }} """ pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) pools = {} flexvol_names = self.zapi_client.list_flexvols() for flexvol_name in flexvol_names: msg_args = { 'flexvol': flexvol_name, 'vol_pattern': pool_regex.pattern, } if pool_regex.match(flexvol_name): msg = "Volume '%(flexvol)s' matches %(vol_pattern)s" LOG.debug(msg, msg_args) pools[flexvol_name] = {'pool_name': flexvol_name} else: msg = "Volume '%(flexvol)s' does not match %(vol_pattern)s" LOG.debug(msg, msg_args) return pools def _get_cluster_to_pool_map(self): return dot_utils.get_cluster_to_pool_map(self.zapi_client) def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume) try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) except exception.Invalid: # Delete even if there was invalid qos policy specified for the # volume. qos_policy_group_info = None self._mark_qos_policy_group_for_deletion(qos_policy_group_info) msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s' LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info}) def _setup_qos_for_volume(self, volume, extra_specs): try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume, extra_specs) except exception.Invalid: msg = _('Invalid QoS specification detected while getting QoS ' 'policy for volume %s') % volume['id'] raise exception.VolumeBackendAPIException(data=msg) pool = volume_utils.extract_host(volume['host'], level='pool') qos_min_support = self.ssc_library.is_qos_min_supported(pool) self.zapi_client.provision_qos_policy_group(qos_policy_group_info, qos_min_support) return qos_policy_group_info def _get_volume_model_update(self, volume): """Provide any updates necessary for a volume being created/managed.""" if self.replication_enabled: return {'replication_status': fields.ReplicationStatus.ENABLED} def _mark_qos_policy_group_for_deletion(self, qos_policy_group_info): is_adaptive = na_utils.is_qos_policy_group_spec_adaptive( qos_policy_group_info) self.zapi_client.mark_qos_policy_group_for_deletion( qos_policy_group_info, is_adaptive) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. """ try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) except exception.Invalid: # Unmanage even if there was invalid qos policy specified for the # volume. qos_policy_group_info = None self._mark_qos_policy_group_for_deletion(qos_policy_group_info) super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume) def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover a backend to a secondary replication target. This function combines failover() and failover_completed() to perform failover when Active/Active is not enabled. """ active_backend_name, volume_updates, group_updates = ( self._failover(context, volumes, secondary_id, groups)) self._failover_completed(context, active_backend_name) return active_backend_name, volume_updates, group_updates def failover(self, context, volumes, secondary_id=None, groups=None): """Failover to replication target.""" return self._failover(context, volumes, secondary_id, groups) def failover_completed(self, context, secondary_id=None): """Update volume node when `failover` is completed.""" return self._failover_completed(context, secondary_id) def _get_backing_flexvol_names(self): """Returns a list of backing flexvol names.""" ssc = self.ssc_library.get_ssc() return list(ssc.keys()) def create_group(self, group): """Driver entry point for creating a generic volume group. ONTAP does not maintain an actual Group construct. As a result, no communication to the backend is necessary for generic volume group creation. :returns: Hard-coded model update for generic volume group model. """ model_update = {'status': fields.GroupStatus.AVAILABLE} return model_update def delete_group(self, group, volumes): """Driver entry point for deleting a group. :returns: Updated group model and list of volume models for the volumes that were deleted. """ model_update = {'status': fields.GroupStatus.DELETED} volumes_model_update = [] for volume in volumes: try: self._delete_lun(volume['name']) volumes_model_update.append( {'id': volume['id'], 'status': 'deleted'}) except Exception: volumes_model_update.append( {'id': volume['id'], 'status': 'error_deleting'}) LOG.exception("Volume %(vol)s in the group could not be " "deleted.", {'vol': volume}) return model_update, volumes_model_update def update_group(self, group, add_volumes=None, remove_volumes=None): """Driver entry point for updating a generic volume group. Since no actual group construct is ever created in ONTAP, it is not necessary to update any metadata on the backend. Since this is a NO-OP, there is guaranteed to be no change in any of the volumes' statuses. """ return None, None, None def create_group_snapshot(self, group_snapshot, snapshots): """Creates a Cinder group snapshot object. The Cinder group snapshot object is created by making use of an ephemeral ONTAP consistency group snapshot in order to provide write-order consistency for a set of flexvol snapshots. First, a list of the flexvols backing the given Cinder group must be gathered. An ONTAP group-snapshot of these flexvols will create a snapshot copy of all the Cinder volumes in the generic volume group. For each Cinder volume in the group, it is then necessary to clone its backing LUN from the ONTAP cg-snapshot. The naming convention used for the clones is what indicates the clone's role as a Cinder snapshot and its inclusion in a Cinder group. The ONTAP cg-snapshot of the flexvols is no longer required after having cloned the LUNs backing the Cinder volumes in the Cinder group. :returns: An implicit update for group snapshot and snapshots models that is interpreted by the manager to set their models to available. """ try: if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): self._create_consistent_group_snapshot(group_snapshot, snapshots) else: for snapshot in snapshots: self._create_snapshot(snapshot) except Exception as ex: err_msg = (_("Create group snapshot failed (%s).") % ex) LOG.exception(err_msg, resource=group_snapshot) raise na_utils.NetAppDriverException(err_msg) return None, None def _create_consistent_group_snapshot(self, group_snapshot, snapshots): flexvols = set() for snapshot in snapshots: flexvols.add(volume_utils.extract_host( snapshot['volume']['host'], level='pool')) self.zapi_client.create_cg_snapshot(flexvols, group_snapshot['id']) for snapshot in snapshots: self._clone_lun(snapshot['volume']['name'], snapshot['name'], source_snapshot=group_snapshot['id']) for flexvol in flexvols: try: self.zapi_client.wait_for_busy_snapshot( flexvol, group_snapshot['id']) self.zapi_client.delete_snapshot( flexvol, group_snapshot['id']) except exception.SnapshotIsBusy: self.zapi_client.mark_snapshot_for_deletion( flexvol, group_snapshot['id']) def delete_group_snapshot(self, group_snapshot, snapshots): """Delete LUNs backing each snapshot in the group snapshot. :returns: An implicit update for snapshots models that is interpreted by the manager to set their models to deleted. """ for snapshot in snapshots: self._delete_lun(snapshot['name']) LOG.debug("Snapshot %s deletion successful", snapshot['name']) return None, None def create_group_from_src(self, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from a group snapshot or a group of cinder vols. :returns: An implicit update for the volumes model that is interpreted by the manager as a successful operation. """ LOG.debug("VOLUMES %s ", ', '.join([vol['id'] for vol in volumes])) volume_model_updates = [] if group_snapshot: vols = zip(volumes, snapshots) for volume, snapshot in vols: source = { 'name': snapshot['name'], 'size': snapshot['volume_size'], } volume_model_update = self._clone_source_to_destination( source, volume) if volume_model_update is not None: volume_model_update['id'] = volume['id'] volume_model_updates.append(volume_model_update) else: vols = zip(volumes, source_vols) for volume, old_src_vref in vols: src_lun = self._get_lun_from_table(old_src_vref['name']) source = {'name': src_lun.name, 'size': old_src_vref['size']} volume_model_update = self._clone_source_to_destination( source, volume) if volume_model_update is not None: volume_model_update['id'] = volume['id'] volume_model_updates.append(volume_model_update) return None, volume_model_updates def _move_lun(self, volume, src_ontap_volume, dest_ontap_volume, dest_lun_name=None): """Moves LUN from an ONTAP volume to another.""" operation_info = self.zapi_client.start_lun_move( volume.name, dest_ontap_volume, src_ontap_volume=src_ontap_volume, dest_lun_name=dest_lun_name) LOG.debug('Start moving LUN %s from %s to %s. ', volume.name, src_ontap_volume, dest_ontap_volume) def _wait_lun_move_complete(): move_status = self.zapi_client.get_lun_move_status(operation_info) LOG.debug('Waiting for LUN move to complete. ' 'Current status is: %s.', move_status['job-status']) if not move_status: status_error_msg = (_("Error moving LUN %s. The movement" "status could not be retrieved.")) raise na_utils.NetAppDriverException( status_error_msg % (volume.id)) elif move_status['job-status'] == 'destroyed': status_error_msg = (_('Error moving LUN %s. %s.')) raise na_utils.NetAppDriverException( status_error_msg % (volume.id, move_status['last-failure-reason'])) elif move_status['job-status'] == 'complete': raise loopingcall.LoopingCallDone() try: timer = loopingcall.FixedIntervalWithTimeoutLoopingCall( _wait_lun_move_complete) timer.start( interval=15, timeout=self.configuration.netapp_migrate_volume_timeout ).wait() except loopingcall.LoopingCallTimeOut: msg = (_('Timeout waiting to complete move operation of LUN %s.')) raise na_utils.NetAppDriverTimeout(msg % volume.id) def _cancel_lun_copy(self, job_uuid, volume, dest_pool, dest_backend_name): """Cancel an on-going lun copy operation.""" try: # NOTE(sfernand): Another approach would be first checking if # the copy operation isn't in `destroying` or `destroyed` states # before issuing cancel. self.zapi_client.cancel_lun_copy(job_uuid) except na_utils.NetAppDriverException: dest_client = dot_utils.get_client_for_backend(dest_backend_name) lun_path = '/vol/%s/%s' % (dest_pool, volume.name) try: dest_client.destroy_lun(lun_path) except Exception: LOG.warning('Error cleaning up LUN %s in destination volume. ' 'Verify if destination volume still exists in ' 'pool %s and delete it manually to avoid unused ' 'resources.', lun_path, dest_pool) def _copy_lun(self, volume, src_ontap_volume, src_vserver, dest_ontap_volume, dest_vserver, dest_lun_name=None, dest_backend_name=None, cancel_on_error=False): """Copies LUN from an ONTAP volume to another.""" operation_info = self.zapi_client.start_lun_copy( volume.name, dest_ontap_volume, dest_vserver, src_ontap_volume=src_ontap_volume, src_vserver=src_vserver, dest_lun_name=dest_lun_name) LOG.debug('Start copying LUN %(vol)s from ' '%(src_vserver)s:%(src_ontap_vol)s to ' '%(dest_vserver)s:%(dest_ontap_vol)s.', {'vol': volume.name, 'src_vserver': src_vserver, 'src_ontap_vol': src_ontap_volume, 'dest_vserver': dest_vserver, 'dest_ontap_vol': dest_ontap_volume}) def _wait_lun_copy_complete(): copy_status = self.zapi_client.get_lun_copy_status(operation_info) LOG.debug('Waiting for LUN copy job to complete. Current ' 'status is: %s.', copy_status['job-status']) if not copy_status: status_error_msg = (_("Error copying LUN %s. The copy" "status could not be retrieved.")) raise na_utils.NetAppDriverException( status_error_msg % (volume.id)) elif copy_status['job-status'] == 'destroyed': status_error_msg = (_('Error copying LUN %s. %s.')) raise na_utils.NetAppDriverException( status_error_msg % (volume.id, copy_status['last-failure-reason'])) elif copy_status['job-status'] == 'complete': raise loopingcall.LoopingCallDone() try: timer = loopingcall.FixedIntervalWithTimeoutLoopingCall( _wait_lun_copy_complete) timer.start( interval=10, timeout=self.configuration.netapp_migrate_volume_timeout ).wait() except Exception as e: with excutils.save_and_reraise_exception() as ctxt: if cancel_on_error: self._cancel_lun_copy(operation_info, volume, dest_ontap_volume, dest_backend_name=dest_backend_name) if isinstance(e, loopingcall.LoopingCallTimeOut): ctxt.reraise = False msg = (_('Timeout waiting volume %s to complete ' 'migration.')) raise na_utils.NetAppDriverTimeout(msg % volume.id) def _finish_migrate_volume_to_vserver(self, src_volume): """Finish volume migration to another vserver within the cluster.""" # The source volume can be safely deleted after a successful migration. self.delete_volume(src_volume) # LUN cache for current backend can be deleted after migration. self._delete_lun_from_table(src_volume.name) def _migrate_volume_to_vserver(self, volume, src_pool, src_vserver, dest_pool, dest_vserver, dest_backend_name): """Migrate volume to a another vserver within the same cluster.""" LOG.info('Migrating volume %(vol)s from ' '%(src_vserver)s:%(src_ontap_vol)s to ' '%(dest_vserver)s:%(dest_ontap_vol)s.', {'vol': volume.id, 'src_vserver': src_vserver, 'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver, 'dest_ontap_vol': dest_pool}) # NOTE(sfernand): Migrating to a different vserver relies on coping # operations which are always disruptive, as it requires the # destination volume to be added as a new block device to the Nova # instance. This differs from migrating volumes in a same vserver, # since we can make use of a LUN move operation without the # need of changing the iSCSI target. if volume.status != fields.VolumeStatus.AVAILABLE: msg = _("Volume status must be 'available' in order to " "migrate volume to another vserver.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) vserver_peer_application = 'lun_copy' self.create_vserver_peer(src_vserver, self.backend_name, dest_vserver, [vserver_peer_application]) self._copy_lun(volume, src_pool, src_vserver, dest_pool, dest_vserver, dest_backend_name=dest_backend_name, cancel_on_error=True) self._finish_migrate_volume_to_vserver(volume) LOG.info('Successfully migrated volume %(vol)s from ' '%(src_vserver)s:%(src_ontap_vol)s ' 'to %(dest_vserver)s:%(dest_ontap_vol)s.', {'vol': volume.id, 'src_vserver': src_vserver, 'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver, 'dest_ontap_vol': dest_pool}) # No model updates are necessary, so return empty dict return {} def _finish_migrate_volume_to_pool(self, src_volume, dest_pool): """Finish volume migration to another pool within the same vserver.""" # LUN cache must be updated with new path and volume information. lun = self._get_lun_from_table(src_volume.name) new_lun_path = '/vol/%s/%s' % (dest_pool, src_volume.name) lun.metadata['Path'] = new_lun_path lun.metadata['Volume'] = dest_pool def _migrate_volume_to_pool(self, volume, src_pool, dest_pool, vserver, dest_backend_name): """Migrate volume to another Cinder Pool within the same vserver.""" LOG.info('Migrating volume %(vol)s from pool %(src)s to ' '%(dest)s within vserver %(vserver)s.', {'vol': volume.id, 'src': src_pool, 'dest': dest_pool, 'vserver': vserver}) updates = {} try: self._move_lun(volume, src_pool, dest_pool) except na_utils.NetAppDriverTimeout: error_msg = (_('Timeout waiting volume %s to complete migration.' 'Volume status is set to maintenance to prevent ' 'performing operations with this volume. Check the ' 'migration status on the storage side and set ' 'volume status manually if migration succeeded.')) LOG.warning(error_msg, volume.id) updates['status'] = fields.VolumeStatus.MAINTENANCE except na_utils.NetAppDriverException as e: error_msg = (_('Failed to migrate volume %(vol)s from pool ' '%(src)s to %(dest)s. %(err)s')) raise na_utils.NetAppDriverException( error_msg % {'vol': volume.id, 'src': src_pool, 'dest': dest_pool, 'err': e}) self._finish_migrate_volume_to_pool(volume, dest_pool) LOG.info('Successfully migrated volume %(vol)s from pool %(src)s ' 'to %(dest)s within vserver %(vserver)s.', {'vol': volume.id, 'src': src_pool, 'dest': dest_pool, 'vserver': vserver}) return updates def migrate_volume(self, context, volume, host): """Migrate Cinder volume to the specified pool or vserver.""" return self.migrate_volume_ontap_assisted( volume, host, self.backend_name, self.configuration.netapp_vserver) def revert_to_snapshot(self, volume, snapshot): """Driver entry point for reverting volume to snapshot.""" try: self._revert_to_snapshot(volume, snapshot) except Exception: raise exception.VolumeBackendAPIException( "Revert snapshot failed.") def _revert_to_snapshot(self, volume, snapshot): """Sets up all required resources for _swap_luns. If _swap_luns fails, the cloned LUN is destroyed. """ new_lun_name = self._clone_snapshot(snapshot["name"]) LOG.debug("Cloned from snapshot: %s.", new_lun_name) lun = self._get_lun_from_table(volume["name"]) volume_path = lun.metadata["Path"] seg = volume_path.split("/") lun_name = seg[-1] flexvol_name = seg[2] try: self._swap_luns(lun_name, new_lun_name, flexvol_name) except Exception: LOG.error("Swapping LUN from %s to %s failed.", lun_name, new_lun_name) with excutils.save_and_reraise_exception(): try: LOG.debug("Deleting temporary reverted LUN %s.", new_lun_name) new_lun_path = "/vol/%s/%s" % (flexvol_name, new_lun_name) self.zapi_client.destroy_lun(new_lun_path) except Exception: LOG.error("Failure deleting temporary reverted LUN %s. " "A manual deletion is required.", new_lun_name) def _clone_snapshot(self, snapshot_name): """Returns the name of the LUN cloned from snapshot. Creates a LUN with same metadata as original LUN and then clones from snapshot. If clone operation fails, the new LUN is deleted. """ snapshot_lun = self._get_lun_from_table(snapshot_name) snapshot_path = snapshot_lun.metadata["Path"] lun_name = snapshot_path.split("/")[-1] flexvol_name = snapshot_path.split("/")[2] LOG.info("Cloning LUN %s from snapshot %s in volume %s.", lun_name, snapshot_name, flexvol_name) block_count = self._get_lun_block_count(snapshot_path) if block_count == 0: msg = _("%s cannot be reverted using clone operation" " as it contains no blocks.") raise exception.VolumeBackendAPIException(data=msg % snapshot_name) new_snap_name = "new-%s" % snapshot_name try: self._clone_lun(snapshot_name, new_snap_name, space_reserved='false', is_snapshot=True) return new_snap_name except Exception: with excutils.save_and_reraise_exception(): try: new_lun_path = "/vol/%s/%s" % (flexvol_name, new_snap_name) self.zapi_client.destroy_lun(new_lun_path) except Exception: LOG.error("Failure deleting temporary reverted LUN %s. " "A manual deletion is required.", new_snap_name) def _swap_luns(self, original_lun, new_lun, flexvol_name): """Swaps cloned and original LUNs using a temporary LUN. Moves the original LUN to a temporary path, then moves the cloned LUN to the original path (if this fails, moves the temporary LUN back as original LUN) and finally destroys the LUN with temporary path. """ tmp_lun = "tmp-%s" % original_lun original_path = "/vol/%s/%s" % (flexvol_name, original_lun) tmp_path = "/vol/%s/%s" % (flexvol_name, tmp_lun) new_path = "/vol/%s/%s" % (flexvol_name, new_lun) LOG.debug("Original Path: %s.", original_path) LOG.debug("Temporary Path: %s.", tmp_path) LOG.debug("New Path %s.", new_path) try: self.zapi_client.move_lun(original_path, tmp_path) except Exception: msg = _("Failure moving original LUN from %s to %s." % (original_path, tmp_path)) raise exception.VolumeBackendAPIException(data=msg) try: self.zapi_client.move_lun(new_path, original_path) except Exception: LOG.debug("Move temporary reverted LUN failed. Moving back " "original LUN to original path.") try: self.zapi_client.move_lun(tmp_path, original_path) except Exception: LOG.error("Could not move original LUN path from %s to %s. " "Cinder may lose the volume management. Please, you " "should move it back manually.", tmp_path, original_path) msg = _("Failure moving temporary reverted LUN from %s to %s.") raise exception.VolumeBackendAPIException( data=msg % (new_path, original_path)) try: self.zapi_client.destroy_lun(tmp_path) except Exception: LOG.error("Failure deleting old LUN %s. A manual deletion " "is required.", tmp_lun) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3871212 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/client/0000775000175000017500000000000000000000000024040 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/client/__init__.py0000664000175000017500000000000000000000000026137 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/client/api.py0000664000175000017500000010275200000000000025172 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Glenn Gobeli. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Alex Meade. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NetApp API for Data ONTAP and OnCommand DFM. Contains classes required to issue API calls to Data ONTAP and OnCommand DFM. """ import random import ssl import urllib from eventlet import greenthread from eventlet import semaphore from lxml import etree from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import netutils import requests from requests.adapters import HTTPAdapter from requests import auth from requests.packages.urllib3.util.retry import Retry from cinder import exception from cinder.i18n import _ from cinder import ssh_utils from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils LOG = logging.getLogger(__name__) # ZAPI API error codes. EAPIERROR = '13001' EAPIPRIVILEGE = '13003' EAPINOTFOUND = '13005' ESNAPSHOTNOTALLOWED = '13023' ESIS_CLONE_NOT_LICENSED = '14956' EOBJECTNOTFOUND = '15661' ESOURCE_IS_DIFFERENT = '17105' ERELATION_EXISTS = '17122' ERELATION_NOT_QUIESCED = '17127' ENOTRANSFER_IN_PROGRESS = '17130' EANOTHER_OP_ACTIVE = '17131' ETRANSFER_IN_PROGRESS = '17137' class NaServer(object): """Encapsulates server connection logic.""" TRANSPORT_TYPE_HTTP = 'http' TRANSPORT_TYPE_HTTPS = 'https' SERVER_TYPE_FILER = 'filer' SERVER_TYPE_DFM = 'dfm' URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer' URL_DFM = 'apis/XMLrequest' NETAPP_NS = 'http://www.netapp.com/filer/admin' def __init__(self, host, server_type=SERVER_TYPE_FILER, transport_type=TRANSPORT_TYPE_HTTP, ssl_cert_path=None, username=None, password=None, port=None, api_trace_pattern=None, private_key_file=None, certificate_file=None, ca_certificate_file=None, certificate_host_validation=None): self._host = host self.set_server_type(server_type) self.set_transport_type(transport_type) if port: self.set_port(port) self._username = username self._password = password self._private_key_file = private_key_file self._certificate_file = certificate_file self._ca_certificate_file = ca_certificate_file self._certificate_host_validation = certificate_host_validation self._refresh_conn = True self._ssl_cert_path = ssl_cert_path if api_trace_pattern is not None: na_utils.setup_api_trace_pattern(api_trace_pattern) LOG.debug('Using NetApp controller: %s', self._host) def set_transport_type(self, transport_type): """Set the transport type protocol for API. Supports http and https transport types. """ if not transport_type: raise ValueError('No transport type specified') if transport_type.lower() not in ( NaServer.TRANSPORT_TYPE_HTTP, NaServer.TRANSPORT_TYPE_HTTPS): raise ValueError('Unsupported transport type') self._protocol = transport_type.lower() if self._protocol == NaServer.TRANSPORT_TYPE_HTTP: if self._server_type == NaServer.SERVER_TYPE_FILER: self.set_port(80) else: self.set_port(8088) else: if self._server_type == NaServer.SERVER_TYPE_FILER: self.set_port(443) else: self.set_port(8488) self._refresh_conn = True def set_style(self, style): """Set the authorization style for communicating with the server. Supports basic_auth for now. Certificate_auth mode to be done. """ if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD, NaServer.STYLE_CERTIFICATE): raise ValueError('Unsupported authentication style') self._auth_style = style.lower() def set_server_type(self, server_type): """Set the target server type. Supports filer and dfm server types. """ if server_type.lower() not in (NaServer.SERVER_TYPE_FILER, NaServer.SERVER_TYPE_DFM): raise ValueError('Unsupported server type') self._server_type = server_type.lower() if self._server_type == NaServer.SERVER_TYPE_FILER: self._url = NaServer.URL_FILER else: self._url = NaServer.URL_DFM self._ns = NaServer.NETAPP_NS self._refresh_conn = True def set_ontap_version(self, version): self._ontap_version = version def get_ontap_version(self): return self._ontap_version def set_api_version(self, major, minor): """Set the API version.""" try: self._api_major_version = int(major) self._api_minor_version = int(minor) self._api_version = str(major) + "." + str(minor) except ValueError: raise ValueError('Major and minor versions must be integers') self._refresh_conn = True def get_api_version(self): """Gets the API version tuple.""" if hasattr(self, '_api_version'): return (self._api_major_version, self._api_minor_version) return None def set_port(self, port): """Set the server communication port.""" try: int(port) except ValueError: raise ValueError('Port must be integer') self._port = str(port) self._refresh_conn = True def set_timeout(self, seconds): """Sets the timeout in seconds.""" try: self._timeout = int(seconds) except ValueError: raise ValueError('timeout in seconds must be integer') def set_vfiler(self, vfiler): """Set the vfiler to use if tunneling gets enabled.""" self._vfiler = vfiler def set_vserver(self, vserver): """Set the vserver to use if tunneling gets enabled.""" self._vserver = vserver @volume_utils.trace_api(filter_function=na_utils.trace_filter_func_api) def send_http_request(self, na_element, enable_tunneling=False): """Invoke the API on the server.""" if not na_element or not isinstance(na_element, NaElement): raise ValueError('NaElement must be supplied to invoke API') request, request_element = self._create_request(na_element, enable_tunneling) if not hasattr(self, '_opener') or not self._opener \ or self._refresh_conn: self._build_opener() try: if hasattr(self, '_timeout'): response = self._opener.open(request, timeout=self._timeout) else: response = self._opener.open(request) except urllib.error.HTTPError as e: raise NaApiError(e.code, e.msg) except Exception: LOG.exception("Error communicating with NetApp filer.") raise NaApiError('Unexpected error') response_xml = response.read() response_element = self._get_result(response_xml) return response_element def invoke_successfully(self, na_element, enable_tunneling=False): """Invokes API and checks execution status as success. Need to set enable_tunneling to True explicitly to achieve it. This helps to use same connection instance to enable or disable tunneling. The vserver or vfiler should be set before this call otherwise tunneling remains disabled. """ result = self.send_http_request(na_element, enable_tunneling) if result.has_attr('status') and result.get_attr('status') == 'passed': return result code = result.get_attr('errno') \ or result.get_child_content('errorno') \ or 'ESTATUSFAILED' if code == ESIS_CLONE_NOT_LICENSED: msg = 'Clone operation failed: FlexClone not licensed.' else: msg = result.get_attr('reason') \ or result.get_child_content('reason') \ or 'Execution status is failed due to unknown reason' raise NaApiError(code, msg) def send_request(self, api_name, api_args=None, enable_tunneling=True): """Sends request to Ontapi.""" request = NaElement(api_name) if api_args: request.translate_struct(api_args) return self.invoke_successfully(request, enable_tunneling) def _create_request(self, na_element, enable_tunneling=False): """Creates request in the desired format.""" netapp_elem = NaElement('netapp') netapp_elem.add_attr('xmlns', self._ns) if hasattr(self, '_api_version'): netapp_elem.add_attr('version', self._api_version) if enable_tunneling: self._enable_tunnel_request(netapp_elem) netapp_elem.add_child_elem(na_element) request_d = netapp_elem.to_string() request = urllib.request.Request( self._get_url(), data=request_d, headers={'Content-Type': 'text/xml', 'charset': 'utf-8'}) return request, netapp_elem def _enable_tunnel_request(self, netapp_elem): """Enables vserver or vfiler tunneling.""" if hasattr(self, '_vfiler') and self._vfiler: if hasattr(self, '_api_major_version') and \ hasattr(self, '_api_minor_version') and \ self._api_major_version >= 1 and \ self._api_minor_version >= 7: netapp_elem.add_attr('vfiler', self._vfiler) else: raise ValueError('ontapi version has to be atleast 1.7' ' to send request to vfiler') if hasattr(self, '_vserver') and self._vserver: if hasattr(self, '_api_major_version') and \ hasattr(self, '_api_minor_version') and \ self._api_major_version >= 1 and \ self._api_minor_version >= 15: netapp_elem.add_attr('vfiler', self._vserver) else: raise ValueError('ontapi version has to be atleast 1.15' ' to send request to vserver') def _parse_response(self, response): """Get the NaElement for the response.""" if not response: raise NaApiError('No response received') xml = etree.XML(response) return NaElement(xml) def _get_result(self, response): """Gets the call result.""" processed_response = self._parse_response(response) return processed_response.get_child_by_name('results') def _get_url(self): host = self._host if netutils.is_valid_ipv6(host): host = netutils.escape_ipv6(host) return '%s://%s:%s/%s' % (self._protocol, host, self._port, self._url) def _build_opener(self): if self._private_key_file and self._certificate_file: auth_handler = self._create_certificate_auth_handler() else: auth_handler = self._create_basic_auth_handler() # Create an SSL context based on _ssl_cert_path if isinstance(self._ssl_cert_path, str): # with cert path ssl_context = ( ssl.create_default_context(cafile=self._ssl_cert_path)) else: # Disable SSL verification ssl_context = ssl._create_unverified_context() https_handler = urllib.request.HTTPSHandler(context=ssl_context) opener = urllib.request.build_opener(auth_handler, https_handler) self._opener = opener def _create_basic_auth_handler(self): password_man = urllib.request.HTTPPasswordMgrWithDefaultRealm() password_man.add_password(None, self._get_url(), self._username, self._password) auth_handler = urllib.request.HTTPBasicAuthHandler(password_man) return auth_handler def _create_certificate_auth_handler(self): context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) if not self._certificate_host_validation: context.check_hostname = False context.verify_mode = ssl.CERT_NONE if self._certificate_file and self._private_key_file: context.load_cert_chain(certfile=self._certificate_file, keyfile=self._private_key_file) if self._ca_certificate_file: context.load_verify_locations(cafile=self._ca_certificate_file) auth_handler = urllib.request.HTTPSHandler(context=context) return auth_handler def __str__(self): return "server: %s" % self._host class NaElement(object): """Class wraps basic building block for NetApp API request.""" def __init__(self, name): """Name of the element or etree.Element.""" if isinstance(name, etree._Element): self._element = name else: self._element = etree.Element(name) def get_name(self): """Returns the tag name of the element.""" return self._element.tag def set_content(self, text): """Set the text string for the element.""" self._element.text = text def get_content(self): """Get the text for the element.""" return self._element.text def add_attr(self, name, value): """Add the attribute to the element.""" self._element.set(name, value) def add_child_elem(self, na_element): """Add the child element to the element.""" if isinstance(na_element, NaElement): self._element.append(na_element._element) return raise Exception(_('Failed to add child element.')) def get_child_by_name(self, name): """Get the child element by the tag name.""" for child in self._element.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return NaElement(child) return None def get_child_content(self, name): """Get the content of the child.""" for child in self._element.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return child.text return None def get_children(self): """Get the children for the element.""" return [NaElement(el) for el in self._element.iterchildren()] def has_attr(self, name): """Checks whether element has attribute.""" attributes = self._element.attrib or {} return name in attributes.keys() def get_attr(self, name): """Get the attribute with the given name.""" attributes = self._element.attrib or {} return attributes.get(name) def get_attr_names(self): """Returns the list of attribute names.""" attributes = self._element.attrib or {} return list(attributes.keys()) def add_new_child(self, name, content, convert=False): """Add child with tag name and content. Convert replaces entity refs to chars. """ child = NaElement(name) if convert: content = NaElement._convert_entity_refs(content) child.set_content(content) self.add_child_elem(child) @staticmethod def _convert_entity_refs(text): """Converts entity refs to chars to handle etree auto conversions.""" text = text.replace("<", "<") text = text.replace(">", ">") return text @staticmethod def create_node_with_children(node, **children): """Creates and returns named node with children.""" parent = NaElement(node) for child in children: parent.add_new_child(child, children.get(child, None)) return parent def add_node_with_children(self, node, **children): """Creates named node with children.""" parent = NaElement.create_node_with_children(node, **children) self.add_child_elem(parent) def to_string(self, pretty=False, method='xml', encoding='UTF-8'): """Prints the element to string.""" return etree.tostring(self._element, method=method, encoding=encoding, pretty_print=pretty) def __str__(self): xml = self.to_string(pretty=True).decode('utf-8') return xml def __eq__(self, other): return str(self) == str(other) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(str(self)) def __repr__(self): return str(self) def __getitem__(self, key): """Dict getter method for NaElement. Returns NaElement list if present, text value in case no NaElement node children or attribute value if present. """ child = self.get_child_by_name(key) if child: if child.get_children(): return child else: return child.get_content() elif self.has_attr(key): return self.get_attr(key) raise KeyError(_('No element by given name %s.') % (key)) def __setitem__(self, key, value): """Dict setter method for NaElement. Accepts dict, list, tuple, str, int, float and long as valid value. """ if key: if value: if isinstance(value, NaElement): child = NaElement(key) child.add_child_elem(value) self.add_child_elem(child) elif isinstance(value, (str, int, float)): self.add_new_child(key, str(value)) elif isinstance(value, (list, tuple, dict)): child = NaElement(key) child.translate_struct(value) self.add_child_elem(child) else: raise TypeError(_('Not a valid value for NaElement.')) else: self.add_child_elem(NaElement(key)) else: raise KeyError(_('NaElement name cannot be null.')) def translate_struct(self, data_struct): """Convert list, tuple, dict to NaElement and appends. Example usage: 1. .. code-block:: xml vl1 vl2 vl3 The above can be achieved by doing .. code-block:: python root = NaElement('root') root.translate_struct({'elem1': 'vl1', 'elem2': 'vl2', 'elem3': 'vl3'}) 2. .. code-block:: xml vl1 vl2 vl3 The above can be achieved by doing .. code-block:: python root = NaElement('root') root.translate_struct([{'elem1': 'vl1', 'elem2': 'vl2'}, {'elem1': 'vl3'}]) """ if isinstance(data_struct, (list, tuple)): for el in data_struct: if isinstance(el, (list, tuple, dict)): self.translate_struct(el) else: self.add_child_elem(NaElement(el)) elif isinstance(data_struct, dict): for k in data_struct.keys(): child = NaElement(k) if isinstance(data_struct[k], (dict, list, tuple)): child.translate_struct(data_struct[k]) else: if data_struct[k]: child.set_content(str(data_struct[k])) self.add_child_elem(child) else: raise ValueError(_('Type cannot be converted into NaElement.')) class NaApiError(Exception): """Base exception class for NetApp API errors.""" def __init__(self, code='unknown', message='unknown'): self.code = code self.message = message def __str__(self, *args, **kwargs): return 'NetApp API failed. Reason - %s:%s' % (self.code, self.message) class NaRetryableError(NaApiError): def __str__(self, *args, **kwargs): return 'NetApp API failed. Try again. Reason - %s:%s' % ( self.code, self.message) class SSHUtil(object): """Encapsulates connection logic and command execution for SSH client.""" MAX_CONCURRENT_SSH_CONNECTIONS = 5 RECV_TIMEOUT = 3 CONNECTION_KEEP_ALIVE = 600 WAIT_ON_STDOUT_TIMEOUT = 3 def __init__(self, host, username, password, port=22): self.ssh_pool = self._init_ssh_pool(host, port, username, password) # Note(cfouts) Number of SSH connections made to the backend need to be # limited. Use of SSHPool allows connections to be cached and reused # instead of creating a new connection each time a command is executed # via SSH. self.ssh_connect_semaphore = semaphore.Semaphore( self.MAX_CONCURRENT_SSH_CONNECTIONS) def _init_ssh_pool(self, host, port, username, password): return ssh_utils.SSHPool(host, port, self.CONNECTION_KEEP_ALIVE, username, password) def execute_command(self, client, command_text, timeout=RECV_TIMEOUT): LOG.debug("execute_command() - Sending command.") stdin, stdout, stderr = client.exec_command(command_text) stdin.close() self._wait_on_stdout(stdout, timeout) output = stdout.read() LOG.debug("Output of length %(size)d received.", {'size': len(output)}) stdout.close() stderr.close() return output def execute_command_with_prompt(self, client, command, expected_prompt_text, prompt_response, timeout=RECV_TIMEOUT): LOG.debug("execute_command_with_prompt() - Sending command.") stdin, stdout, stderr = client.exec_command(command) self._wait_on_stdout(stdout, timeout) response = stdout.channel.recv(999) if expected_prompt_text not in response.strip().decode(): msg = _("Unexpected output. Expected [%(expected)s] but " "received [%(output)s]")\ % {'expected': expected_prompt_text, 'output': response.strip(), } LOG.error(msg) stdin.close() stdout.close() stderr.close() raise exception.VolumeBackendAPIException(msg) else: LOG.debug("execute_command_with_prompt() - Sending answer") stdin.write(prompt_response + '\n') stdin.flush() stdin.close() stdout.close() stderr.close() def _wait_on_stdout(self, stdout, timeout=WAIT_ON_STDOUT_TIMEOUT): wait_time = 0.0 # NOTE(cfouts): The server does not always indicate when EOF is reached # for stdout. The timeout exists for this reason and an attempt is made # to read from stdout. while not stdout.channel.exit_status_ready(): # period is 10 - 25 centiseconds period = random.randint(10, 25) / 100.0 greenthread.sleep(period) wait_time += period if wait_time > timeout: LOG.debug("Timeout exceeded while waiting for exit status.") break # REST API error codes. REST_UNAUTHORIZED = '6' REST_API_NOT_FOUND = '3' REST_UPDATE_SNAPMIRROR_FAILED = '13303844' REST_ERELATION_EXISTS = '6619637' REST_SNAPMIRROR_IN_PROGRESS = '13303810' REST_UPDATE_SNAPMIRROR_FAILED = '13303844' REST_NO_SUCH_LUN_MAP = '5374922' REST_NO_SUCH_FILE = '6684674' REST_NAMESPACE_EOBJECTNOTFOUND = ('72090006', '72090006') REST_HOST_ALREADY_MAPPED_TO_SUBSYSTEM = '72089705' class RestNaServer(object): TRANSPORT_TYPE_HTTP = 'http' TRANSPORT_TYPE_HTTPS = 'https' HTTP_PORT = '80' HTTPS_PORT = '443' TRANSPORT_PORT = { TRANSPORT_TYPE_HTTP: HTTP_PORT, TRANSPORT_TYPE_HTTPS: HTTPS_PORT } def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP, ssl_cert_path=None, username=None, password=None, port=None, api_trace_pattern=None, private_key_file=None, certificate_file=None, ca_certificate_file=None, certificate_host_validation=None): self._host = host self.set_transport_type(transport_type) self.set_port(port=port) self._username = username self._password = password self._private_key_file = private_key_file self._certificate_file = certificate_file self._ca_certificate_file = ca_certificate_file self._certificate_host_validation = certificate_host_validation if api_trace_pattern is not None: na_utils.setup_api_trace_pattern(api_trace_pattern) if ssl_cert_path is not None: self._ssl_verify = ssl_cert_path else: # Note(felipe_rodrigues): it will verify with the Mozila CA roots, # given by certifi package. self._ssl_verify = True self._api_version = None self._api_major_version = None self._api_minor_version = None self._ontap_version = None self._timeout = None LOG.debug('Using REST with NetApp controller: %s', self._host) def set_transport_type(self, transport_type): """Set the transport type protocol for API. Supports http and https transport types. """ if transport_type is None or transport_type.lower() not in ( RestNaServer.TRANSPORT_TYPE_HTTP, RestNaServer.TRANSPORT_TYPE_HTTPS): raise ValueError('Unsupported transport type') self._protocol = transport_type.lower() def get_transport_type(self): """Get the transport type protocol.""" return self._protocol def set_api_version(self, major, minor): """Set the API version.""" try: self._api_major_version = int(major) self._api_minor_version = int(minor) self._api_version = str(major) + "." + str(minor) except ValueError: raise ValueError('Major and minor versions must be integers') def get_api_version(self): """Gets the API version tuple.""" if not self._api_version: return None return (self._api_major_version, self._api_minor_version) def set_ontap_version(self, ontap_version): """Set the ONTAP version.""" self._ontap_version = ontap_version def get_ontap_version(self): """Gets the ONTAP version.""" return self._ontap_version def set_port(self, port=None): """Set the ONTAP port, if not informed, set with default one.""" if port is None and self._protocol in RestNaServer.TRANSPORT_PORT: self._port = RestNaServer.TRANSPORT_PORT[self._protocol] else: try: int(port) except ValueError: raise ValueError('Port must be integer') self._port = str(port) def get_port(self): """Get the server communication port.""" return self._port def set_timeout(self, seconds): """Sets the timeout in seconds.""" try: self._timeout = int(seconds) except ValueError: raise ValueError('timeout in seconds must be integer') def get_timeout(self): """Gets the timeout in seconds if set.""" return self._timeout def set_vserver(self, vserver): """Set the vserver to use if tunneling gets enabled.""" self._vserver = vserver def get_vserver(self): """Get the vserver to use in tunneling.""" return self._vserver def __str__(self): """Gets a representation of the client.""" return "server: %s" % (self._host) def _get_request_method(self, method, session): """Returns the request method to be used in the REST call.""" request_methods = { 'post': session.post, 'get': session.get, 'put': session.put, 'delete': session.delete, 'patch': session.patch, } return request_methods[method] def _add_query_params_to_url(self, url, query): """Populates the URL with specified filters.""" filters = '&'.join([f"{k}={v}" for k, v in query.items()]) url += "?" + filters return url def _get_base_url(self): """Get the base URL for REST requests.""" host = self._host if ':' in host: host = '[%s]' % host return '%s://%s:%s/api/' % (self._protocol, host, self._port) def _build_session(self, headers): """Builds a session in the client.""" self._session = requests.Session() # NOTE(felipe_rodrigues): request resilient of temporary network # failures (like name resolution failure), retrying until 5 times. max_retries = Retry(total=5, connect=5, read=2, backoff_factor=1) adapter = HTTPAdapter(max_retries=max_retries) self._session.mount('%s://' % self._protocol, adapter) if self._private_key_file and self._certificate_file: self._session.cert, self._session.verify\ = self._create_certificate_auth_handler() else: self._session.auth = self._create_basic_auth_handler() self._session.verify = self._ssl_verify self._session.headers = headers def _build_headers(self, enable_tunneling): """Build and return headers for a REST request.""" headers = { "Accept": "application/json", "Content-Type": "application/json" } if enable_tunneling: headers["X-Dot-SVM-Name"] = self.get_vserver() return headers def _create_basic_auth_handler(self): """Creates and returns a basic HTTP auth handler.""" return auth.HTTPBasicAuth(self._username, self._password) def _create_certificate_auth_handler(self): """Creates and returns a certificate auth handler.""" self._certificate_host_validation = self._session.verify if self._certificate_file and self._private_key_file \ and self._ca_certificate_file: self._session.cert = (self._certificate_file, self._private_key_file) if self._certificate_host_validation: self._session.verify = self._ca_certificate_file elif self._certificate_file and self._private_key_file: self._session.cert = (self._certificate_file, self._private_key_file) return self._session.cert, self._session.verify @volume_utils.trace_api( filter_function=na_utils.trace_filter_func_rest_api) def send_http_request(self, method, url, body, headers): """Invoke the API on the server. The passed parameters and returned parameters will be logged if trace feature is on. They are important for debugging purpose. """ data = jsonutils.dumps(body) if body else {} self._build_session(headers) request_method = self._get_request_method(method, self._session) try: if self._timeout is not None: response = request_method( url, data=data, timeout=self._timeout) else: response = request_method(url, data=data) except requests.HTTPError as e: raise NaApiError(e.errno, e.strerror) except Exception as e: raise NaApiError(message=e) code = response.status_code body = jsonutils.loads(response.content) if response.content else {} return code, body def invoke_successfully(self, action_url, method, body=None, query=None, enable_tunneling=False): """Invokes REST API and checks execution status as success.""" headers = self._build_headers(enable_tunneling) if query: action_url = self._add_query_params_to_url(action_url, query) url = self._get_base_url() + action_url code, response = self.send_http_request(method, url, body, headers) if not response.get('error'): return code, response result_error = response.get('error') code = result_error.get('code', 'ESTATUSFAILED') # TODO: add the correct code number for REST not licensed clone error. if code == ESIS_CLONE_NOT_LICENSED: msg = 'Clone operation failed: FlexClone not licensed.' else: msg = (result_error.get('message') or 'Execution status is failed due to unknown reason') raise NaApiError(code, msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/client/client_base.py0000664000175000017500000004650300000000000026672 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils LOG = logging.getLogger(__name__) DELETED_PREFIX = 'deleted_cinder_' MAX_SIZE_FOR_A_LUN = '17555678822400' class Client(object, metaclass=volume_utils.TraceWrapperMetaclass): def __init__(self, **kwargs): host = kwargs['hostname'] username = kwargs['username'] password = kwargs['password'] api_trace_pattern = kwargs['api_trace_pattern'] private_key_file = kwargs['private_key_file'] certificate_file = kwargs['certificate_file'] ca_certificate_file = kwargs['ca_certificate_file'] certificate_host_validation = kwargs['certificate_host_validation'] ssl_cert_path = kwargs.get('ssl_cert_path') if private_key_file and certificate_file and ca_certificate_file: self.connection = netapp_api.NaServer( host=host, transport_type='https', port=kwargs['port'], ssl_cert_path=ssl_cert_path, private_key_file=private_key_file, certificate_file=certificate_file, ca_certificate_file=ca_certificate_file, certificate_host_validation=certificate_host_validation, api_trace_pattern=api_trace_pattern) elif private_key_file and certificate_file: self.connection = netapp_api.NaServer( host=host, transport_type='https', port=kwargs['port'], ssl_cert_path=ssl_cert_path, private_key_file=private_key_file, certificate_file=certificate_file, certificate_host_validation=certificate_host_validation, api_trace_pattern=api_trace_pattern) else: self.connection = netapp_api.NaServer( host=host, transport_type=kwargs['transport_type'], port=kwargs['port'], ssl_cert_path=ssl_cert_path, username=username, password=password, api_trace_pattern=api_trace_pattern) self.ssh_client = self._init_ssh_client(host, username, password) def _init_ssh_client(self, host, username, password): return netapp_api.SSHUtil( host=host, username=username, password=password) def _init_features(self): """Set up the repository of available Data ONTAP features.""" self.features = na_utils.Features() def get_ontap_version(self, cached=True): """Gets the ONTAP version-string and version-tuple""" if cached: return self.connection.get_ontap_version() ontap_version = netapp_api.NaElement("system-get-version") result = self.connection.invoke_successfully( ontap_version, enable_tunneling=True) version_tuple = result.get_child_by_name( 'version-tuple') or netapp_api.NaElement('none') ontap_version_tuple = version_tuple.get_child_by_name( 'system-version-tuple') or netapp_api.NaElement('none') version = ( int(ontap_version_tuple.get_child_content('generation')), int(ontap_version_tuple.get_child_content('major')), int(ontap_version_tuple.get_child_content('minor'))) return version def get_ontapi_version(self, cached=True): """Gets the supported ontapi version.""" if cached: return self.connection.get_api_version() ontapi_version = netapp_api.NaElement('system-get-ontapi-version') res = self.connection.invoke_successfully(ontapi_version, False) major = res.get_child_content('major-version') minor = res.get_child_content('minor-version') return major, minor def _strip_xml_namespace(self, string): if string.startswith('{') and '}' in string: return string.split('}', 1)[1] return string def check_is_naelement(self, elem): """Checks if object is instance of NaElement.""" if not isinstance(elem, netapp_api.NaElement): raise ValueError('Expects NaElement') def create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None, qos_policy_group_is_adaptive=False): """Issues API request for creating LUN on volume.""" self._validate_qos_policy_group(qos_policy_group_is_adaptive) path = '/vol/%s/%s' % (volume_name, lun_name) space_reservation = metadata['SpaceReserved'] initial_size = size ontap_version = self.get_ontap_version() # On older ONTAP versions the extend size is limited to its # geometry on max_resize_size. In order to remove this # limitation we create the LUN with its maximum possible size # and then shrink to the requested size. if ontap_version < (9, 5, 0): initial_size = MAX_SIZE_FOR_A_LUN # In order to create a LUN with its maximum size (16TB), # the space_reservation needs to be disabled space_reservation = 'false' params = {'path': path, 'size': str(initial_size), 'ostype': metadata['OsType'], 'space-reservation-enabled': space_reservation} if "SpaceAllocated" in metadata: params['space-allocation-enabled'] = metadata['SpaceAllocated'] version = self.get_ontapi_version() if version >= (1, 110): params['use-exact-size'] = 'true' lun_create = netapp_api.NaElement.create_node_with_children( 'lun-create-by-size', **params) if qos_policy_group_name: if qos_policy_group_is_adaptive: lun_create.add_new_child( 'qos-adaptive-policy-group', qos_policy_group_name) else: lun_create.add_new_child( 'qos-policy-group', qos_policy_group_name) try: self.connection.invoke_successfully(lun_create, True) except netapp_api.NaApiError as ex: with excutils.save_and_reraise_exception(): LOG.error("Error provisioning volume %(lun_name)s on " "%(volume_name)s. Details: %(ex)s", {'lun_name': lun_name, 'volume_name': volume_name, 'ex': ex}) if ontap_version < (9, 5, 0): self.do_direct_resize(path, str(size)) if metadata['SpaceReserved'] == 'true': self.set_lun_space_reservation(path, True) def set_lun_space_reservation(self, path, flag): """Sets the LUN space reservation on ONTAP.""" lun_modify_space_reservation = ( netapp_api.NaElement.create_node_with_children( 'lun-set-space-reservation-info', **{ 'path': path, 'enable': str(flag)})) self.connection.invoke_successfully(lun_modify_space_reservation, True) def destroy_lun(self, path, force=True): """Destroys the LUN at the path.""" lun_destroy = netapp_api.NaElement.create_node_with_children( 'lun-destroy', **{'path': path}) if force: lun_destroy.add_new_child('force', 'true') self.connection.invoke_successfully(lun_destroy, True) seg = path.split("/") LOG.debug("Destroyed LUN %s", seg[-1]) def map_lun(self, path, igroup_name, lun_id=None): """Maps LUN to the initiator and returns LUN id assigned.""" lun_map = netapp_api.NaElement.create_node_with_children( 'lun-map', **{'path': path, 'initiator-group': igroup_name}) if lun_id: lun_map.add_new_child('lun-id', lun_id) try: result = self.connection.invoke_successfully(lun_map, True) return result.get_child_content('lun-id-assigned') except netapp_api.NaApiError as e: code = e.code message = e.message LOG.warning('Error mapping LUN. Code :%(code)s, Message: ' '%(message)s', {'code': code, 'message': message}) raise def unmap_lun(self, path, igroup_name): """Unmaps a LUN from given initiator.""" lun_unmap = netapp_api.NaElement.create_node_with_children( 'lun-unmap', **{'path': path, 'initiator-group': igroup_name}) try: self.connection.invoke_successfully(lun_unmap, True) except netapp_api.NaApiError as e: LOG.warning("Error unmapping LUN. Code :%(code)s, Message: " "%(message)s", {'code': e.code, 'message': e.message}) # if the LUN is already unmapped if e.code == '13115' or e.code == '9016': pass else: raise e def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'): """Creates igroup with specified args.""" igroup_create = netapp_api.NaElement.create_node_with_children( 'igroup-create', **{'initiator-group-name': igroup, 'initiator-group-type': igroup_type, 'os-type': os_type}) self.connection.invoke_successfully(igroup_create, True) def add_igroup_initiator(self, igroup, initiator): """Adds initiators to the specified igroup.""" igroup_add = netapp_api.NaElement.create_node_with_children( 'igroup-add', **{'initiator-group-name': igroup, 'initiator': initiator}) self.connection.invoke_successfully(igroup_add, True) def do_direct_resize(self, path, new_size_bytes, force=True): """Resize the LUN.""" seg = path.split("/") LOG.info("Resizing LUN %s directly to new size.", seg[-1]) lun_resize = netapp_api.NaElement.create_node_with_children( 'lun-resize', **{'path': path, 'size': new_size_bytes}) if force: lun_resize.add_new_child('force', 'true') self.connection.invoke_successfully(lun_resize, True) def get_lun_geometry(self, path): """Gets the LUN geometry.""" geometry = {} lun_geo = netapp_api.NaElement("lun-get-geometry") lun_geo.add_new_child('path', path) try: result = self.connection.invoke_successfully(lun_geo, True) geometry['size'] = result.get_child_content("size") geometry['bytes_per_sector'] = result.get_child_content( "bytes-per-sector") geometry['sectors_per_track'] = result.get_child_content( "sectors-per-track") geometry['tracks_per_cylinder'] = result.get_child_content( "tracks-per-cylinder") geometry['cylinders'] = result.get_child_content("cylinders") geometry['max_resize'] = result.get_child_content( "max-resize-size") except Exception as e: LOG.error("LUN %(path)s geometry failed. Message - %(msg)s", {'path': path, 'msg': str(e)}) return geometry def get_volume_options(self, volume_name): """Get the value for the volume option.""" opts = [] vol_option_list = netapp_api.NaElement("volume-options-list-info") vol_option_list.add_new_child('volume', volume_name) result = self.connection.invoke_successfully(vol_option_list, True) options = result.get_child_by_name("options") if options: opts = options.get_children() return opts def move_lun(self, path, new_path): """Moves the LUN at path to new path.""" seg = path.split("/") new_seg = new_path.split("/") LOG.debug("Moving LUN %(name)s to %(new_name)s.", {'name': seg[-1], 'new_name': new_seg[-1]}) lun_move = netapp_api.NaElement("lun-move") lun_move.add_new_child("path", path) lun_move.add_new_child("new-path", new_path) self.connection.invoke_successfully(lun_move, True) def get_iscsi_target_details(self): """Gets the iSCSI target portal details.""" raise NotImplementedError() def get_fc_target_wwpns(self): """Gets the FC target details.""" raise NotImplementedError() def get_iscsi_service_details(self): """Returns iscsi iqn.""" raise NotImplementedError() def check_iscsi_initiator_exists(self, iqn): """Returns True if initiator exists.""" raise NotImplementedError() def set_iscsi_chap_authentication(self, iqn, username, password): """Provides NetApp host's CHAP credentials to the backend.""" raise NotImplementedError() def get_lun_list(self): """Gets the list of LUNs on filer.""" raise NotImplementedError() def get_igroup_by_initiators(self, initiator_list): """Get igroups exactly matching a set of initiators.""" raise NotImplementedError() def _validate_qos_policy_group(self, is_adaptive, spec=None, is_nfs=False): """Raises an exception if the backend doesn't support the QoS spec.""" raise NotImplementedError() def _has_luns_mapped_to_initiator(self, initiator): """Checks whether any LUNs are mapped to the given initiator.""" lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info') lun_list_api.add_new_child('initiator', initiator) result = self.connection.invoke_successfully(lun_list_api, True) lun_maps_container = result.get_child_by_name( 'lun-maps') or netapp_api.NaElement('none') return len(lun_maps_container.get_children()) > 0 def has_luns_mapped_to_initiators(self, initiator_list): """Checks whether any LUNs are mapped to the given initiator(s).""" for initiator in initiator_list: if self._has_luns_mapped_to_initiator(initiator): return True return False def get_lun_by_args(self, **args): """Retrieves LUNs with specified args.""" raise NotImplementedError() def get_performance_counter_info(self, object_name, counter_name): """Gets info about one or more Data ONTAP performance counters.""" api_args = {'objectname': object_name} result = self.connection.send_request('perf-object-counter-list-info', api_args, enable_tunneling=False) counters = result.get_child_by_name( 'counters') or netapp_api.NaElement('None') for counter in counters.get_children(): if counter.get_child_content('name') == counter_name: labels = [] label_list = counter.get_child_by_name( 'labels') or netapp_api.NaElement('None') for label in label_list.get_children(): labels.extend(label.get_content().split(',')) base_counter = counter.get_child_content('base-counter') return { 'name': counter_name, 'labels': labels, 'base-counter': base_counter, } else: raise exception.NotFound(_('Counter %s not found') % counter_name) def delete_snapshot(self, volume_name, snapshot_name): """Deletes a volume snapshot.""" api_args = {'volume': volume_name, 'snapshot': snapshot_name} self.connection.send_request('snapshot-delete', api_args) def create_cg_snapshot(self, volume_names, snapshot_name): """Creates a consistency group snapshot out of one or more flexvols. ONTAP requires an invocation of cg-start to first fence off the flexvols to be included in the snapshot. If cg-start returns success, a cg-commit must be executed to finalized the snapshot and unfence the flexvols. """ cg_id = self._start_cg_snapshot(volume_names, snapshot_name) if not cg_id: msg = _('Could not start consistency group snapshot %s.') raise exception.VolumeBackendAPIException(data=msg % snapshot_name) self._commit_cg_snapshot(cg_id) def _start_cg_snapshot(self, volume_names, snapshot_name): snapshot_init = { 'snapshot': snapshot_name, 'timeout': 'relaxed', 'volumes': [ {'volume-name': volume_name} for volume_name in volume_names ], } result = self.connection.send_request('cg-start', snapshot_init) return result.get_child_content('cg-id') def _commit_cg_snapshot(self, cg_id): snapshot_commit = {'cg-id': cg_id} self.connection.send_request('cg-commit', snapshot_commit) def get_snapshot(self, volume_name, snapshot_name): """Gets a single snapshot.""" raise NotImplementedError() @utils.retry(exception.SnapshotIsBusy) def wait_for_busy_snapshot(self, flexvol, snapshot_name): """Checks for and handles a busy snapshot. If a snapshot is busy, for reasons other than cloning, an exception is raised immediately. Otherwise, wait for a period of time for the clone dependency to finish before giving up. If the snapshot is not busy then no action is taken and the method exits. """ snapshot = self.get_snapshot(flexvol, snapshot_name) if not snapshot['busy']: LOG.debug("Backing consistency group snapshot %s available for " "deletion.", snapshot_name) return else: LOG.debug("Snapshot %(snap)s for vol %(vol)s is busy, waiting " "for volume clone dependency to clear.", {"snap": snapshot_name, "vol": flexvol}) raise exception.SnapshotIsBusy(snapshot_name=snapshot_name) def mark_snapshot_for_deletion(self, volume, snapshot_name): """Mark snapshot for deletion by renaming snapshot.""" return self.rename_snapshot( volume, snapshot_name, DELETED_PREFIX + snapshot_name) def rename_snapshot(self, volume, current_name, new_name): """Renames a snapshot.""" api_args = { 'volume': volume, 'current-name': current_name, 'new-name': new_name, } return self.connection.send_request('snapshot-rename', api_args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py0000664000175000017500000033673000000000000027053 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # Copyright (c) 2017 Jose Porrua. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import math import re from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import units from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_base from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils LOG = logging.getLogger(__name__) DEFAULT_MAX_PAGE_LENGTH = 50 ONTAP_SELECT_MODEL = 'FDvM300' ONTAP_C190 = 'C190' # NOTE(cknight): The keys in this map are tuples that contain arguments needed # for efficient use of the system-user-capability-get-iter cDOT API. The # values are SSC extra specs associated with the APIs listed in the keys. SSC_API_MAP = { ('storage.aggregate', 'show', 'aggr-options-list-info'): [ 'netapp_raid_type', ], ('storage.disk', 'show', 'storage-disk-get-iter'): [ 'netapp_disk_type', ], ('snapmirror', 'show', 'snapmirror-get-iter'): [ 'netapp_mirrored', ], ('volume.efficiency', 'show', 'sis-get-iter'): [ 'netapp_dedup', 'netapp_compression', ], ('volume', '*show', 'volume-get-iter'): [ 'netapp_flexvol_encryption', ], } class Client(client_base.Client, metaclass=volume_utils.TraceWrapperMetaclass): def __init__(self, **kwargs): super(Client, self).__init__(**kwargs) self.vserver = kwargs.get('vserver', None) self.connection.set_vserver(self.vserver) # Default values to run first api self.connection.set_api_version(1, 15) (major, minor) = self.get_ontapi_version(cached=False) self.connection.set_api_version(major, minor) ontap_version = self.get_ontap_version(cached=False) self.connection.set_ontap_version(ontap_version) self._init_features() def _init_features(self): super(Client, self)._init_features() ontapi_version = self.get_ontapi_version() # major, minor ontapi_1_20 = ontapi_version >= (1, 20) ontapi_1_2x = (1, 20) <= ontapi_version < (1, 30) ontapi_1_30 = ontapi_version >= (1, 30) ontapi_1_100 = ontapi_version >= (1, 100) ontapi_1_1xx = (1, 100) <= ontapi_version < (1, 200) ontapi_1_60 = ontapi_version >= (1, 160) ontapi_1_40 = ontapi_version >= (1, 140) ontapi_1_50 = ontapi_version >= (1, 150) ontapi_1_80 = ontapi_version >= (1, 180) ontapi_1_90 = ontapi_version >= (1, 190) nodes_info = self._get_cluster_nodes_info() for node in nodes_info: qos_min_block = False qos_min_nfs = False if node['model'] == ONTAP_SELECT_MODEL: qos_min_block = node['is_all_flash_select'] and ontapi_1_60 qos_min_nfs = qos_min_block elif ONTAP_C190 in node['model']: qos_min_block = node['is_all_flash'] and ontapi_1_60 qos_min_nfs = qos_min_block else: qos_min_block = node['is_all_flash'] and ontapi_1_20 qos_min_nfs = node['is_all_flash'] and ontapi_1_30 qos_name = na_utils.qos_min_feature_name(True, node['name']) self.features.add_feature(qos_name, supported=qos_min_nfs) qos_name = na_utils.qos_min_feature_name(False, node['name']) self.features.add_feature(qos_name, supported=qos_min_block) self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20) self.features.add_feature('USER_CAPABILITY_LIST', supported=ontapi_1_20) self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x) self.features.add_feature('CLONE_SPLIT_STATUS', supported=ontapi_1_30) self.features.add_feature('FAST_CLONE_DELETE', supported=ontapi_1_30) self.features.add_feature('SYSTEM_CONSTITUENT_METRICS', supported=ontapi_1_30) self.features.add_feature('ADVANCED_DISK_PARTITIONING', supported=ontapi_1_30) self.features.add_feature('BACKUP_CLONE_PARAM', supported=ontapi_1_100) self.features.add_feature('CLUSTER_PEER_POLICY', supported=ontapi_1_30) self.features.add_feature('FLEXVOL_ENCRYPTION', supported=ontapi_1_1xx) self.features.add_feature('FLEXGROUP', supported=ontapi_1_80) self.features.add_feature('FLEXGROUP_CLONE_FILE', supported=ontapi_1_90) self.features.add_feature('ADAPTIVE_QOS', supported=ontapi_1_40) self.features.add_feature('ADAPTIVE_QOS_BLOCK_SIZE', supported=ontapi_1_50) self.features.add_feature('ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION', supported=ontapi_1_50) LOG.info('Reported ONTAPI Version: %(major)s.%(minor)s', {'major': ontapi_version[0], 'minor': ontapi_version[1]}) def _invoke_vserver_api(self, na_element, vserver): server = copy.copy(self.connection) server.set_vserver(vserver) result = server.invoke_successfully(na_element, True) return result def _has_records(self, api_result_element): num_records = api_result_element.get_child_content('num-records') return bool(num_records and '0' != num_records) def _get_record_count(self, api_result_element): try: return int(api_result_element.get_child_content('num-records')) except TypeError: msg = _('Missing record count for NetApp iterator API invocation.') raise na_utils.NetAppDriverException(msg) def set_vserver(self, vserver): self.vserver = vserver self.connection.set_vserver(vserver) def send_iter_request(self, api_name, api_args=None, enable_tunneling=True, max_page_length=DEFAULT_MAX_PAGE_LENGTH): """Invoke an iterator-style getter API.""" if not api_args: api_args = {} api_args['max-records'] = max_page_length # Get first page result = self.connection.send_request( api_name, api_args, enable_tunneling=enable_tunneling) # Most commonly, we can just return here if there is no more data next_tag = result.get_child_content('next-tag') if not next_tag: return result # Ensure pagination data is valid and prepare to store remaining pages num_records = self._get_record_count(result) attributes_list = result.get_child_by_name('attributes-list') if not attributes_list: msg = _('Missing attributes list for API %s.') % api_name raise na_utils.NetAppDriverException(msg) # Get remaining pages, saving data into first page while next_tag is not None: next_api_args = copy.deepcopy(api_args) next_api_args['tag'] = next_tag next_result = self.connection.send_request( api_name, next_api_args, enable_tunneling=enable_tunneling) next_attributes_list = next_result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for record in next_attributes_list.get_children(): attributes_list.add_child_elem(record) num_records += self._get_record_count(next_result) next_tag = next_result.get_child_content('next-tag') result.get_child_by_name('num-records').set_content(str(num_records)) result.get_child_by_name('next-tag').set_content('') return result def check_api_permissions(self): """Check which APIs that support SSC functionality are available.""" inaccessible_apis = [] invalid_extra_specs = [] for api_tuple, extra_specs in SSC_API_MAP.items(): object_name, operation_name, api = api_tuple if not self.check_cluster_api(object_name, operation_name, api): inaccessible_apis.append(api) invalid_extra_specs.extend(extra_specs) if inaccessible_apis: if 'volume-get-iter' in inaccessible_apis: msg = _('User not permitted to query Data ONTAP volumes.') raise exception.VolumeBackendAPIException(data=msg) else: LOG.warning('The configured user account does not have ' 'sufficient privileges to use all needed ' 'APIs. The following extra specs will fail ' 'or be ignored: %s.', invalid_extra_specs) return invalid_extra_specs def _get_cluster_nodes_info(self): """Return a list of models of the nodes in the cluster""" api_args = { 'desired-attributes': { 'node-details-info': { 'node': None, 'node-model': None, 'is-all-flash-select-optimized': None, 'is-all-flash-optimized': None, } } } nodes = [] try: result = self.send_iter_request('system-node-get-iter', api_args, enable_tunneling=False) system_node_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for system_node in system_node_list.get_children(): node = { 'model': system_node.get_child_content('node-model'), 'name': system_node.get_child_content('node'), 'is_all_flash': system_node.get_child_content( 'is-all-flash-optimized') == 'true', 'is_all_flash_select': system_node.get_child_content( 'is-all-flash-select-optimized') == 'true', } nodes.append(node) except netapp_api.NaApiError as e: if e.code == netapp_api.EAPINOTFOUND: LOG.debug('Cluster nodes can only be collected with ' 'cluster scoped credentials.') else: LOG.exception('Failed to get the cluster nodes.') return nodes def list_vservers(self, vserver_type='data'): """Get the names of vservers present, optionally filtered by type.""" query = { 'vserver-info': { 'vserver-type': vserver_type, } } if vserver_type else None api_args = { 'desired-attributes': { 'vserver-info': { 'vserver-name': None, }, }, } if query: api_args['query'] = query result = self.send_iter_request('vserver-get-iter', api_args, enable_tunneling=False) vserver_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [vserver_info.get_child_content('vserver-name') for vserver_info in vserver_info_list.get_children()] def _get_ems_log_destination_vserver(self): """Returns the best vserver destination for EMS messages.""" major, minor = self.get_ontapi_version(cached=True) if (major > 1) or (major == 1 and minor > 15): # Prefer admin Vserver (requires cluster credentials). admin_vservers = self.list_vservers(vserver_type='admin') if admin_vservers: return admin_vservers[0] # Fall back to data Vserver. data_vservers = self.list_vservers(vserver_type='data') if data_vservers: return data_vservers[0] # If older API version, or no other Vservers found, use node Vserver. node_vservers = self.list_vservers(vserver_type='node') if node_vservers: return node_vservers[0] raise exception.NotFound("No Vserver found to receive EMS messages.") def send_ems_log_message(self, message_dict): """Sends a message to the Data ONTAP EMS log.""" # NOTE(cknight): Cannot use deepcopy on the connection context node_client = copy.copy(self) node_client.connection = copy.copy(self.connection) node_client.connection.set_timeout(25) try: node_client.set_vserver(self._get_ems_log_destination_vserver()) node_client.connection.send_request('ems-autosupport-log', message_dict) LOG.debug('EMS executed successfully.') except netapp_api.NaApiError as e: LOG.warning('Failed to invoke EMS. %s', e) def get_iscsi_target_details(self): """Gets the iSCSI target portal details.""" iscsi_if_iter = netapp_api.NaElement('iscsi-interface-get-iter') result = self.connection.invoke_successfully(iscsi_if_iter, True) tgt_list = [] num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: attr_list = result.get_child_by_name('attributes-list') iscsi_if_list = attr_list.get_children() for iscsi_if in iscsi_if_list: d = dict() d['address'] = iscsi_if.get_child_content('ip-address') d['port'] = iscsi_if.get_child_content('ip-port') d['tpgroup-tag'] = iscsi_if.get_child_content('tpgroup-tag') d['interface-enabled'] = iscsi_if.get_child_content( 'is-interface-enabled') tgt_list.append(d) return tgt_list def set_iscsi_chap_authentication(self, iqn, username, password): """Provides NetApp host's CHAP credentials to the backend.""" initiator_exists = self.check_iscsi_initiator_exists(iqn) command_template = ('iscsi security %(mode)s -vserver %(vserver)s ' '-initiator-name %(iqn)s -auth-type CHAP ' '-user-name %(username)s') if initiator_exists: LOG.debug('Updating CHAP authentication for %(iqn)s.', {'iqn': iqn}) command = command_template % { 'mode': 'modify', 'vserver': self.vserver, 'iqn': iqn, 'username': username, } else: LOG.debug('Adding initiator %(iqn)s with CHAP authentication.', {'iqn': iqn}) command = command_template % { 'mode': 'create', 'vserver': self.vserver, 'iqn': iqn, 'username': username, } try: with self.ssh_client.ssh_connect_semaphore: ssh_pool = self.ssh_client.ssh_pool with ssh_pool.item() as ssh: self.ssh_client.execute_command_with_prompt(ssh, command, 'Password:', password) except Exception as e: msg = _('Failed to set CHAP authentication for target IQN %(iqn)s.' ' Details: %(ex)s') % { 'iqn': iqn, 'ex': e, } LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def check_iscsi_initiator_exists(self, iqn): """Returns True if initiator exists.""" initiator_exists = True try: auth_list = netapp_api.NaElement('iscsi-initiator-get-auth') auth_list.add_new_child('initiator', iqn) self.connection.invoke_successfully(auth_list, True) except netapp_api.NaApiError: initiator_exists = False return initiator_exists def get_fc_target_wwpns(self): """Gets the FC target details.""" wwpns = [] port_name_list_api = netapp_api.NaElement('fcp-port-name-get-iter') port_name_list_api.add_new_child('max-records', '100') result = self.connection.invoke_successfully(port_name_list_api, True) num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: for port_name_info in result.get_child_by_name( 'attributes-list').get_children(): if port_name_info.get_child_content('is-used') != 'true': continue wwpn = port_name_info.get_child_content('port-name').lower() wwpns.append(wwpn) return wwpns def get_iscsi_service_details(self): """Returns iscsi iqn.""" iscsi_service_iter = netapp_api.NaElement('iscsi-service-get-iter') result = self.connection.invoke_successfully(iscsi_service_iter, True) if result.get_child_content('num-records') and\ int(result.get_child_content('num-records')) >= 1: attr_list = result.get_child_by_name('attributes-list') iscsi_service = attr_list.get_child_by_name('iscsi-service-info') return iscsi_service.get_child_content('node-name') LOG.debug('No iSCSI service found for vserver %s', self.vserver) return None def get_lun_sizes_by_volume(self, volume_name): """"Gets the list of LUNs and their sizes from a given volume name""" api_args = { 'query': { 'lun-info': { 'volume': volume_name, 'vserver': self.vserver } }, 'desired-attributes': { 'lun-info': { 'path': None, 'size': None } } } result = self.send_iter_request( 'lun-get-iter', api_args, max_page_length=100) if not self._has_records(result): return [] attributes_list = result.get_child_by_name('attributes-list') luns = [] for lun_info in attributes_list.get_children(): luns.append({ 'path': lun_info.get_child_content('path'), 'size': float(lun_info.get_child_content('size')) }) return luns def get_file_sizes_by_dir(self, dir_path): """Gets the list of files and their sizes from a given directory.""" api_args = { 'path': '/vol/%s' % dir_path, 'query': { 'file-info': { 'file-type': 'file' } }, 'desired-attributes': { 'file-info': { 'name': None, 'file-size': None } } } result = self.send_iter_request( 'file-list-directory-iter', api_args, max_page_length=100) if not self._has_records(result): return [] attributes_list = result.get_child_by_name('attributes-list') files = [] for file_info in attributes_list.get_children(): files.append({ 'name': file_info.get_child_content('name'), 'file-size': float(file_info.get_child_content('file-size')) }) return files def get_lun_list(self): """Gets the list of LUNs on filer. Gets the LUNs from cluster with vserver. """ luns = [] tag = None while True: api = netapp_api.NaElement('lun-get-iter') api.add_new_child('max-records', '100') if tag: api.add_new_child('tag', tag, True) lun_info = netapp_api.NaElement('lun-info') lun_info.add_new_child('vserver', self.vserver) query = netapp_api.NaElement('query') query.add_child_elem(lun_info) api.add_child_elem(query) result = self.connection.invoke_successfully(api, True) if result.get_child_by_name('num-records') and\ int(result.get_child_content('num-records')) >= 1: attr_list = result.get_child_by_name('attributes-list') luns.extend(attr_list.get_children()) tag = result.get_child_content('next-tag') if tag is None: break lun_list = [self._create_lun_meta(lun) for lun in luns] return lun_list def _create_lun_meta(self, lun): """Creates LUN metadata dictionary.""" self.check_is_naelement(lun) meta_dict = {} meta_dict['Vserver'] = lun.get_child_content('vserver') meta_dict['Volume'] = lun.get_child_content('volume') meta_dict['Size'] = lun.get_child_content('size') meta_dict['Qtree'] = lun.get_child_content('qtree') meta_dict['Path'] = lun.get_child_content('path') meta_dict['OsType'] = lun.get_child_content('multiprotocol-type') meta_dict['SpaceReserved'] = \ lun.get_child_content('is-space-reservation-enabled') meta_dict['SpaceAllocated'] = \ lun.get_child_content('is-space-alloc-enabled') meta_dict['UUID'] = lun.get_child_content('uuid') meta_dict['BlockSize'] = lun.get_child_content('block-size') return meta_dict def get_lun_map(self, path): """Gets the LUN map by LUN path.""" tag = None map_list = [] while True: lun_map_iter = netapp_api.NaElement('lun-map-get-iter') lun_map_iter.add_new_child('max-records', '100') if tag: lun_map_iter.add_new_child('tag', tag, True) query = netapp_api.NaElement('query') lun_map_iter.add_child_elem(query) query.add_node_with_children('lun-map-info', **{'path': path}) result = self.connection.invoke_successfully(lun_map_iter, True) tag = result.get_child_content('next-tag') if result.get_child_content('num-records') and \ int(result.get_child_content('num-records')) >= 1: attr_list = result.get_child_by_name('attributes-list') lun_maps = attr_list.get_children() for lun_map in lun_maps: lun_m = dict() lun_m['initiator-group'] = lun_map.get_child_content( 'initiator-group') lun_m['lun-id'] = lun_map.get_child_content('lun-id') lun_m['vserver'] = lun_map.get_child_content('vserver') map_list.append(lun_m) if tag is None: break return map_list def _get_igroup_by_initiator_query(self, initiator, tag): igroup_get_iter = netapp_api.NaElement('igroup-get-iter') igroup_get_iter.add_new_child('max-records', '100') if tag: igroup_get_iter.add_new_child('tag', tag, True) query = netapp_api.NaElement('query') igroup_info = netapp_api.NaElement('initiator-group-info') query.add_child_elem(igroup_info) igroup_info.add_new_child('vserver', self.vserver) initiators = netapp_api.NaElement('initiators') igroup_info.add_child_elem(initiators) igroup_get_iter.add_child_elem(query) initiators.add_node_with_children( 'initiator-info', **{'initiator-name': initiator}) # limit results to just the attributes of interest desired_attrs = netapp_api.NaElement('desired-attributes') desired_igroup_info = netapp_api.NaElement('initiator-group-info') desired_igroup_info.add_node_with_children( 'initiators', **{'initiator-info': None}) desired_igroup_info.add_new_child('vserver', None) desired_igroup_info.add_new_child('initiator-group-name', None) desired_igroup_info.add_new_child('initiator-group-type', None) desired_igroup_info.add_new_child('initiator-group-os-type', None) desired_attrs.add_child_elem(desired_igroup_info) igroup_get_iter.add_child_elem(desired_attrs) return igroup_get_iter def get_igroup_by_initiators(self, initiator_list): """Get igroups exactly matching a set of initiators.""" tag = None igroup_list = [] if not initiator_list: return igroup_list initiator_set = set(initiator_list) while True: # C-mode getter APIs can't do an 'and' query, so match the first # initiator (which will greatly narrow the search results) and # filter the rest in this method. query = self._get_igroup_by_initiator_query(initiator_list[0], tag) result = self.connection.invoke_successfully(query, True) tag = result.get_child_content('next-tag') num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: for igroup_info in result.get_child_by_name( 'attributes-list').get_children(): initiator_set_for_igroup = set() for initiator_info in igroup_info.get_child_by_name( 'initiators').get_children(): initiator_set_for_igroup.add( initiator_info.get_child_content('initiator-name')) if initiator_set == initiator_set_for_igroup: igroup = {'initiator-group-os-type': igroup_info.get_child_content( 'initiator-group-os-type'), 'initiator-group-type': igroup_info.get_child_content( 'initiator-group-type'), 'initiator-group-name': igroup_info.get_child_content( 'initiator-group-name')} igroup_list.append(igroup) if tag is None: break return igroup_list def _validate_qos_policy_group(self, is_adaptive, spec=None, qos_min_support=False): if is_adaptive and not self.features.ADAPTIVE_QOS: msg = _("Adaptive QoS feature requires ONTAP 9.4 or later.") raise na_utils.NetAppDriverException(msg) if not spec: return qos_spec_support = [ {'key': 'min_throughput', 'support': qos_min_support, 'reason': _('is not supported by this back end.')}, {'key': 'block_size', 'support': self.features.ADAPTIVE_QOS_BLOCK_SIZE, 'reason': _('requires ONTAP >= 9.5.')}, {'key': 'expected_iops_allocation', 'support': self.features.ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION, 'reason': _('requires ONTAP >= 9.5.')}, ] for feature in qos_spec_support: if feature['key'] in spec and not feature['support']: msg = '%(key)s %(reason)s' raise na_utils.NetAppDriverException(msg % { 'key': feature['key'], 'reason': feature['reason']}) def clone_lun(self, volume, name, new_name, space_reserved='true', qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None, is_snapshot=False, qos_policy_group_is_adaptive=False): self._validate_qos_policy_group(qos_policy_group_is_adaptive) # ONTAP handles only 128 MB per call as of v9.1 bc_limit = 2 ** 18 # 2^18 blocks * 512 bytes/block = 128 MB z_calls = int(math.ceil(block_count / float(bc_limit))) zbc = block_count if z_calls == 0: z_calls = 1 for _call in range(0, z_calls): if zbc > bc_limit: block_count = bc_limit zbc -= bc_limit else: block_count = zbc is_sub_clone = block_count > 0 zapi_args = { 'volume': volume, 'source-path': name, 'destination-path': new_name, } if not is_sub_clone: zapi_args['space-reserve'] = space_reserved if source_snapshot: zapi_args['snapshot-name'] = source_snapshot if is_snapshot and self.features.BACKUP_CLONE_PARAM: zapi_args['is-backup'] = 'true' clone_create = netapp_api.NaElement.create_node_with_children( 'clone-create', **zapi_args) if qos_policy_group_name is not None: child_name = 'qos-%spolicy-group-name' % ( 'adaptive-' if qos_policy_group_is_adaptive else '') clone_create.add_new_child(child_name, qos_policy_group_name) if is_sub_clone: block_ranges = netapp_api.NaElement("block-ranges") segments = int(math.ceil(block_count / float(bc_limit))) bc = block_count for _segment in range(0, segments): if bc > bc_limit: block_count = bc_limit bc -= bc_limit else: block_count = bc block_range =\ netapp_api.NaElement.create_node_with_children( 'block-range', **{'source-block-number': str(src_block), 'destination-block-number': str(dest_block), 'block-count': str(int(block_count))}) block_ranges.add_child_elem(block_range) src_block += int(block_count) dest_block += int(block_count) clone_create.add_child_elem(block_ranges) self.connection.invoke_successfully(clone_create, True) def start_file_copy(self, file_name, dest_ontap_volume, src_ontap_volume=None, dest_file_name=None): """Starts a file copy operation between ONTAP volumes.""" if src_ontap_volume is None: src_ontap_volume = dest_ontap_volume if dest_file_name is None: dest_file_name = file_name api_args = { 'source-paths': [{ 'sfod-operation-path': '%s/%s' % (src_ontap_volume, file_name) }], 'destination-paths': [{ 'sfod-operation-path': '%s/%s' % (dest_ontap_volume, dest_file_name), }], } result = self.connection.send_request('file-copy-start', api_args, enable_tunneling=False) return result.get_child_content('job-uuid') def destroy_file_copy(self, job_uuid): """Cancel/Destroy a in-progress file copy.""" api_args = { 'job-uuid': job_uuid, 'file-index': 0 } try: self.connection.send_request('file-copy-destroy', api_args, enable_tunneling=False) except netapp_api.NaApiError as e: msg = (_('Could not cancel lun copy for job uuid %s. %s')) raise na_utils.NetAppDriverException(msg % (job_uuid, e)) def get_file_copy_status(self, job_uuid): """Get file copy job status from a given job's UUID.""" api_args = { 'query': { 'file-copy-info': { 'job-uuid': job_uuid } } } result = self.connection.send_request('file-copy-get-iter', api_args, enable_tunneling=False) lun_copy_info_list = result.get_child_by_name('attributes-list') if lun_copy_info_list: lun_copy_info = lun_copy_info_list.get_children()[0] copy_status = { 'job-status': lun_copy_info.get_child_content('scanner-status'), 'last-failure-reason': lun_copy_info.get_child_content('last-failure-reason') } return copy_status return None def start_lun_copy(self, lun_name, dest_ontap_volume, dest_vserver, src_ontap_volume=None, src_vserver=None, dest_lun_name=None): """Starts a lun copy operation between ONTAP volumes.""" if src_ontap_volume is None: src_ontap_volume = dest_ontap_volume if src_vserver is None: src_vserver = dest_vserver if dest_lun_name is None: dest_lun_name = lun_name api_args = { 'source-vserver': src_vserver, 'destination-vserver': dest_vserver, 'paths': [{ 'lun-path-pair': { 'destination-path': '/vol/%s/%s' % (dest_ontap_volume, dest_lun_name), 'source-path': '/vol/%s/%s' % (src_ontap_volume, lun_name)} }], } result = self.connection.send_request('lun-copy-start', api_args, enable_tunneling=False) return result.get_child_content('job-uuid') def cancel_lun_copy(self, job_uuid): """Cancel an in-progress lun copy.""" api_args = { 'job-uuid': job_uuid } try: self.connection.send_request('lun-copy-cancel', api_args, enable_tunneling=False) except netapp_api.NaApiError as e: msg = (_('Could not cancel lun copy for job uuid %s. %s')) raise na_utils.NetAppDriverException(msg % (job_uuid, e)) def get_lun_copy_status(self, job_uuid): """Get lun copy job status from a given job's UUID.""" api_args = { 'query': { 'lun-copy-info': { 'job-uuid': job_uuid } } } result = self.connection.send_request('lun-copy-get-iter', api_args, enable_tunneling=False) lun_copy_info_list = result.get_child_by_name('attributes-list') if lun_copy_info_list: lun_copy_info = lun_copy_info_list.get_children()[0] copy_status = { 'job-status': lun_copy_info.get_child_content('job-status'), 'last-failure-reason': lun_copy_info.get_child_content('last-failure-reason') } return copy_status return None def start_lun_move(self, lun_name, dest_ontap_volume, src_ontap_volume=None, dest_lun_name=None): """Starts a lun move operation between ONTAP volumes.""" if dest_lun_name is None: dest_lun_name = lun_name if src_ontap_volume is None: src_ontap_volume = dest_ontap_volume api_args = { 'paths': [{ 'lun-path-pair': { 'destination-path': '/vol/%s/%s' % (dest_ontap_volume, dest_lun_name), 'source-path': '/vol/%s/%s' % (src_ontap_volume, lun_name)} }] } result = self.connection.send_request('lun-move-start', api_args) return result.get_child_content('job-uuid') def get_lun_move_status(self, job_uuid): """Get lun move job status from a given job's UUID.""" api_args = { 'query': { 'lun-move-info': { 'job-uuid': job_uuid } } } result = self.connection.send_request('lun-move-get-iter', api_args) lun_move_info_list = result.get_child_by_name('attributes-list') if lun_move_info_list: lun_move_info = lun_move_info_list.get_children()[0] move_status = { 'job-status': lun_move_info.get_child_content('job-status'), 'last-failure-reason': lun_move_info.get_child_content('last-failure-reason') } return move_status return None def get_lun_by_args(self, **args): """Retrieves LUN with specified args.""" lun_iter = netapp_api.NaElement('lun-get-iter') lun_iter.add_new_child('max-records', '100') query = netapp_api.NaElement('query') lun_iter.add_child_elem(query) query.add_node_with_children('lun-info', **args) luns = self.connection.invoke_successfully(lun_iter, True) attr_list = luns.get_child_by_name('attributes-list') if not attr_list: return [] lun_list = [self._create_lun_meta(lun) for lun in attr_list.get_children()] return lun_list def file_assign_qos(self, flex_vol, qos_policy_group_name, qos_policy_group_is_adaptive, file_path): """Assigns the named QoS policy-group to a file.""" self._validate_qos_policy_group(qos_policy_group_is_adaptive) qos_arg_name = "qos-%spolicy-group-name" % ( "adaptive-" if qos_policy_group_is_adaptive else "") api_args = { 'volume': flex_vol, qos_arg_name: qos_policy_group_name, 'file': file_path, 'vserver': self.vserver, } return self.connection.send_request('file-assign-qos', api_args, False) def provision_qos_policy_group(self, qos_policy_group_info, qos_min_support): """Create QOS policy group on the backend if appropriate.""" if qos_policy_group_info is None: return # Legacy QOS uses externally provisioned QOS policy group, # so we don't need to create one on the backend. legacy = qos_policy_group_info.get('legacy') if legacy: return spec = qos_policy_group_info.get('spec') if not spec: return is_adaptive = na_utils.is_qos_policy_group_spec_adaptive( qos_policy_group_info) self._validate_qos_policy_group(is_adaptive, spec=spec, qos_min_support=qos_min_support) if is_adaptive: if not self.qos_policy_group_exists(spec['policy_name'], is_adaptive=True): self.qos_adaptive_policy_group_create(spec) else: self.qos_adaptive_policy_group_modify(spec) else: if not self.qos_policy_group_exists(spec['policy_name']): self.qos_policy_group_create(spec) else: self.qos_policy_group_modify(spec) def qos_policy_group_exists(self, qos_policy_group_name, is_adaptive=False): """Checks if a QOS policy group exists.""" query_name = 'qos-%spolicy-group-info' % ( 'adaptive-' if is_adaptive else '') request_name = 'qos-%spolicy-group-get-iter' % ( 'adaptive-' if is_adaptive else '') api_args = { 'query': { query_name: { 'policy-group': qos_policy_group_name, }, }, 'desired-attributes': { query_name: { 'policy-group': None, }, }, } result = self.connection.send_request(request_name, api_args, False) return self._has_records(result) def _qos_spec_to_api_args(self, spec, **kwargs): """Convert a QoS spec to ZAPI args.""" formatted_spec = {k.replace('_', '-'): v for k, v in spec.items() if v} formatted_spec['policy-group'] = formatted_spec.pop('policy-name') formatted_spec = {**formatted_spec, **kwargs} return formatted_spec def qos_policy_group_create(self, spec): """Creates a QOS policy group.""" api_args = self._qos_spec_to_api_args( spec, vserver=self.vserver) return self.connection.send_request( 'qos-policy-group-create', api_args, False) def qos_adaptive_policy_group_create(self, spec): """Creates a QOS adaptive policy group.""" api_args = self._qos_spec_to_api_args( spec, vserver=self.vserver) return self.connection.send_request( 'qos-adaptive-policy-group-create', api_args, False) def qos_policy_group_modify(self, spec): """Modifies a QOS policy group.""" api_args = self._qos_spec_to_api_args(spec) return self.connection.send_request( 'qos-policy-group-modify', api_args, False) def qos_adaptive_policy_group_modify(self, spec): """Modifies a QOS adaptive policy group.""" api_args = self._qos_spec_to_api_args(spec) return self.connection.send_request( 'qos-adaptive-policy-group-modify', api_args, False) def qos_policy_group_rename(self, qos_policy_group_name, new_name, is_adaptive=False): """Renames a QOS policy group.""" request_name = 'qos-%spolicy-group-rename' % ( 'adaptive-' if is_adaptive else '') api_args = { 'policy-group-name': qos_policy_group_name, 'new-name': new_name, } return self.connection.send_request(request_name, api_args, False) def mark_qos_policy_group_for_deletion(self, qos_policy_group_info, is_adaptive=False): """Soft delete a QOS policy group backing a cinder volume.""" if qos_policy_group_info is None: return spec = qos_policy_group_info.get('spec') # For cDOT we want to delete the QoS policy group that we created for # this cinder volume. Because the QoS policy may still be "in use" # after the zapi call to delete the volume itself returns successfully, # we instead rename the QoS policy group using a specific pattern and # later attempt on a best effort basis to delete any QoS policy groups # matching that pattern. if spec: current_name = spec['policy_name'] new_name = client_base.DELETED_PREFIX + current_name try: self.qos_policy_group_rename(current_name, new_name, is_adaptive) except netapp_api.NaApiError as ex: LOG.warning('Rename failure in cleanup of cDOT QOS policy ' 'group %(name)s: %(ex)s', {'name': current_name, 'ex': ex}) # Attempt to delete any QoS policies named "delete-openstack-*". self.remove_unused_qos_policy_groups() def _send_qos_policy_group_delete_iter_request(self, is_adaptive=False): request_name = 'qos-%spolicy-group-delete-iter' % ( 'adaptive-' if is_adaptive else '') query_name = 'qos-%spolicy-group-info' % ( 'adaptive-' if is_adaptive else '') api_args = { 'query': { query_name: { 'policy-group': '%s*' % client_base.DELETED_PREFIX, 'vserver': self.vserver, } }, 'max-records': 3500, 'continue-on-failure': 'true', 'return-success-list': 'false', 'return-failure-list': 'false', } try: self.connection.send_request(request_name, api_args, False) except netapp_api.NaApiError as ex: msg = ('Could not delete QOS %(prefix)spolicy groups. ' 'Details: %(ex)s') msg_args = { 'prefix': 'adaptive ' if is_adaptive else '', 'ex': ex, } LOG.debug(msg, msg_args) def remove_unused_qos_policy_groups(self): """Deletes all QOS policy groups that are marked for deletion.""" self._send_qos_policy_group_delete_iter_request() if self.features.ADAPTIVE_QOS: self._send_qos_policy_group_delete_iter_request(is_adaptive=True) def set_lun_qos_policy_group(self, path, qos_policy_group, is_adaptive=False): """Sets qos_policy_group on a LUN.""" self._validate_qos_policy_group(is_adaptive) policy_group_key = 'qos-%spolicy-group' % ( 'adaptive-' if is_adaptive else '') api_args = { 'path': path, policy_group_key: qos_policy_group, } return self.connection.send_request( 'lun-set-qos-policy-group', api_args) def get_if_info_by_ip(self, ip): """Gets the network interface info by ip.""" net_if_iter = netapp_api.NaElement('net-interface-get-iter') net_if_iter.add_new_child('max-records', '10') query = netapp_api.NaElement('query') net_if_iter.add_child_elem(query) query.add_node_with_children( 'net-interface-info', **{'address': volume_utils.resolve_hostname(ip)}) result = self.connection.invoke_successfully(net_if_iter, True) num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: attr_list = result.get_child_by_name('attributes-list') return [{'vserver': attr.get_child_content('vserver')} for attr in attr_list.get_children()] raise exception.NotFound( _('No interface found on cluster for ip %s') % ip) def get_vol_by_junc_vserver(self, vserver, junction): """Gets the volume by junction path and vserver.""" vol_iter = netapp_api.NaElement('volume-get-iter') vol_iter.add_new_child('max-records', '10') query = netapp_api.NaElement('query') vol_iter.add_child_elem(query) vol_attrs = netapp_api.NaElement('volume-attributes') query.add_child_elem(vol_attrs) vol_attrs.add_node_with_children( 'volume-id-attributes', **{'junction-path': junction, 'owning-vserver-name': vserver}) des_attrs = netapp_api.NaElement('desired-attributes') des_attrs.add_node_with_children('volume-attributes', **{'volume-id-attributes': None}) vol_iter.add_child_elem(des_attrs) result = self._invoke_vserver_api(vol_iter, vserver) num_records = result.get_child_content('num-records') if num_records and int(num_records) >= 1: volume_attr = self.get_unique_volume(result) vol_id = volume_attr.get_child_by_name('volume-id-attributes') return vol_id.get_child_content('name') msg_fmt = {'vserver': vserver, 'junction': junction} raise exception.NotFound(_("No volume on cluster with vserver " "%(vserver)s and junction path " "%(junction)s ") % msg_fmt) def clone_file(self, flex_vol, src_path, dest_path, vserver, dest_exists=False, source_snapshot=None, is_snapshot=False): """Clones file on vserver.""" LOG.debug("Cloning with params volume %(volume)s, src %(src_path)s, " "dest %(dest_path)s, vserver %(vserver)s," "source_snapshot %(source_snapshot)s", {'volume': flex_vol, 'src_path': src_path, 'dest_path': dest_path, 'vserver': vserver, 'source_snapshot': source_snapshot}) zapi_args = { 'volume': flex_vol, 'source-path': src_path, 'destination-path': dest_path, } if is_snapshot and self.features.BACKUP_CLONE_PARAM: zapi_args['is-backup'] = 'true' if source_snapshot: zapi_args['snapshot-name'] = source_snapshot clone_create = netapp_api.NaElement.create_node_with_children( 'clone-create', **zapi_args) major, minor = self.connection.get_api_version() if major == 1 and minor >= 20 and dest_exists: clone_create.add_new_child('destination-exists', 'true') self._invoke_vserver_api(clone_create, vserver) def get_file_usage(self, path, vserver): """Gets the file unique bytes.""" LOG.debug('Getting file usage for %s', path) file_use = netapp_api.NaElement.create_node_with_children( 'file-usage-get', **{'path': path}) res = self._invoke_vserver_api(file_use, vserver) unique_bytes = res.get_child_content('unique-bytes') LOG.debug('file-usage for path %(path)s is %(bytes)s', {'path': path, 'bytes': unique_bytes}) return unique_bytes def check_cluster_api(self, object_name, operation_name, api): """Checks the availability of a cluster API. Returns True if the specified cluster API exists and may be called by the current user. The API is *called* on Data ONTAP versions prior to 8.2, while versions starting with 8.2 utilize an API designed for this purpose. """ if not self.features.USER_CAPABILITY_LIST: return self._check_cluster_api_legacy(api) else: return self._check_cluster_api(object_name, operation_name, api) def _check_cluster_api(self, object_name, operation_name, api): """Checks the availability of a cluster API. Returns True if the specified cluster API exists and may be called by the current user. This method assumes Data ONTAP 8.2 or higher. """ api_args = { 'query': { 'capability-info': { 'object-name': object_name, 'operation-list': { 'operation-info': { 'name': operation_name, }, }, }, }, 'desired-attributes': { 'capability-info': { 'operation-list': { 'operation-info': { 'api-name': None, }, }, }, }, } result = self.connection.send_request( 'system-user-capability-get-iter', api_args, False) if not self._has_records(result): return False capability_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for capability_info in capability_info_list.get_children(): operation_list = capability_info.get_child_by_name( 'operation-list') or netapp_api.NaElement('none') for operation_info in operation_list.get_children(): api_name = operation_info.get_child_content('api-name') or '' api_names = api_name.split(',') if api in api_names: return True return False def _check_cluster_api_legacy(self, api): """Checks the availability of a cluster API. Returns True if the specified cluster API exists and may be called by the current user. This method should only be used for Data ONTAP 8.1, and only getter APIs may be tested because the API is actually called to perform the check. """ if not re.match(".*-get$|.*-get-iter$|.*-list-info$", api): raise ValueError(_('Non-getter API passed to API test method.')) try: self.connection.send_request(api, enable_tunneling=False) except netapp_api.NaApiError as ex: if ex.code in (netapp_api.EAPIPRIVILEGE, netapp_api.EAPINOTFOUND): return False return True def get_operational_lif_addresses(self): """Gets the IP addresses of operational LIFs on the vserver.""" net_interface_get_iter_args = { 'query': { 'net-interface-info': { 'operational-status': 'up' } }, 'desired-attributes': { 'net-interface-info': { 'address': None, } } } result = self.send_iter_request('net-interface-get-iter', net_interface_get_iter_args) lif_info_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') return [lif_info.get_child_content('address') for lif_info in lif_info_list.get_children()] def get_flexvol_capacity(self, flexvol_path=None, flexvol_name=None): """Gets total capacity and free capacity, in bytes, of the flexvol.""" volume_id_attributes = {} if flexvol_path: volume_id_attributes['junction-path'] = flexvol_path if flexvol_name: volume_id_attributes['name'] = flexvol_name api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': volume_id_attributes, } }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'style-extended': None, }, 'volume-space-attributes': { 'size-available': None, 'size-total': None, } } }, } result = self.send_iter_request('volume-get-iter', api_args) if self._get_record_count(result) < 1: msg = _('Volume %s not found.') msg_args = flexvol_path or flexvol_name raise na_utils.NetAppDriverException(msg % msg_args) volume_attributes = self.get_unique_volume(result) volume_space_attributes = volume_attributes.get_child_by_name( 'volume-space-attributes') size_available = float( volume_space_attributes.get_child_content('size-available')) size_total = float( volume_space_attributes.get_child_content('size-total')) return { 'size-total': size_total, 'size-available': size_available, } def get_unique_volume(self, get_volume_result): """Get the unique FlexVol or FleGroup volume from a get volume list""" volume_list = [] attributes_list = get_volume_result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for volume_attributes in attributes_list.get_children(): volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') style = volume_id_attributes.get_child_content('style-extended') if style == 'flexvol' or style == 'flexgroup': volume_list.append(volume_attributes) if len(volume_list) != 1: msg = _('Could not find unique volume. Volumes found: %(vol)s.') msg_args = {'vol': volume_list} raise exception.VolumeBackendAPIException(data=msg % msg_args) return volume_list[0] def list_flexvols(self): """Returns the names of the flexvols on the controller.""" api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'type': 'rw', 'style': 'flex', }, 'volume-state-attributes': { 'is-vserver-root': 'false', 'is-inconsistent': 'false', 'is-invalid': 'false', 'state': 'online', }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) if not self._has_records(result): return [] volumes = [] attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for volume_attributes in attributes_list.get_children(): volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') volumes.append(volume_id_attributes.get_child_content('name')) return volumes def get_volume_state(self, junction_path=None, name=None): """Returns volume state for a given name or junction path""" volume_id_attributes = {} if junction_path: volume_id_attributes['junction-path'] = junction_path if name: volume_id_attributes['name'] = name api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': volume_id_attributes } }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'style-extended': None }, 'volume-state-attributes': { 'state': None } } } } result = self.send_iter_request('volume-get-iter', api_args) try: volume_attributes = self.get_unique_volume(result) except exception.VolumeBackendAPIException: return None volume_state_attributes = volume_attributes.get_child_by_name( 'volume-state-attributes') or netapp_api.NaElement('none') volume_state = volume_state_attributes.get_child_content('state') return volume_state def get_flexvol(self, flexvol_path=None, flexvol_name=None): """Get flexvol attributes needed for the storage service catalog.""" volume_id_attributes = {'type': 'rw', 'style': 'flex'} if flexvol_path: volume_id_attributes['junction-path'] = flexvol_path if flexvol_name: volume_id_attributes['name'] = flexvol_name api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': volume_id_attributes, 'volume-state-attributes': { 'is-vserver-root': 'false', 'is-inconsistent': 'false', 'is-invalid': 'false', 'state': 'online', }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, 'owning-vserver-name': None, 'junction-path': None, 'aggr-list': { 'aggr-name': None, }, 'containing-aggregate-name': None, 'type': None, 'style-extended': None, }, 'volume-mirror-attributes': { 'is-data-protection-mirror': None, 'is-replica-volume': None, }, 'volume-space-attributes': { 'is-space-guarantee-enabled': None, 'space-guarantee': None, 'percentage-snapshot-reserve': None, 'size': None, }, 'volume-qos-attributes': { 'policy-group-name': None, }, 'volume-snapshot-attributes': { 'snapshot-policy': None, }, 'volume-language-attributes': { 'language-code': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) volume_attributes = self.get_unique_volume(result) volume_id_attributes = volume_attributes.get_child_by_name( 'volume-id-attributes') or netapp_api.NaElement('none') aggr_name = volume_id_attributes.get_child_content( 'containing-aggregate-name') aggr = [aggr_name] if aggr_name else None if not aggr: aggr_list_attr = volume_id_attributes.get_child_by_name( 'aggr-list') or netapp_api.NaElement('none') aggr = [aggr_elem.get_content() for aggr_elem in aggr_list_attr.get_children()] volume_space_attributes = volume_attributes.get_child_by_name( 'volume-space-attributes') or netapp_api.NaElement('none') volume_qos_attributes = volume_attributes.get_child_by_name( 'volume-qos-attributes') or netapp_api.NaElement('none') volume_snapshot_attributes = volume_attributes.get_child_by_name( 'volume-snapshot-attributes') or netapp_api.NaElement('none') volume_language_attributes = volume_attributes.get_child_by_name( 'volume-language-attributes') or netapp_api.NaElement('none') volume = { 'name': volume_id_attributes.get_child_content('name'), 'vserver': volume_id_attributes.get_child_content( 'owning-vserver-name'), 'junction-path': volume_id_attributes.get_child_content( 'junction-path'), 'aggregate': aggr, 'type': volume_id_attributes.get_child_content('type'), 'space-guarantee-enabled': strutils.bool_from_string( volume_space_attributes.get_child_content( 'is-space-guarantee-enabled')), 'space-guarantee': volume_space_attributes.get_child_content( 'space-guarantee'), 'percentage-snapshot-reserve': ( volume_space_attributes.get_child_content( 'percentage-snapshot-reserve')), 'size': volume_space_attributes.get_child_content('size'), 'qos-policy-group': volume_qos_attributes.get_child_content( 'policy-group-name'), 'snapshot-policy': volume_snapshot_attributes.get_child_content( 'snapshot-policy'), 'language': volume_language_attributes.get_child_content( 'language-code'), 'style-extended': volume_id_attributes.get_child_content( 'style-extended'), } return volume def get_flexvol_dedupe_info(self, flexvol_name): """Get dedupe attributes needed for the storage service catalog.""" api_args = { 'query': { 'sis-status-info': { 'path': '/vol/%s' % flexvol_name, }, }, 'desired-attributes': { 'sis-status-info': { 'state': None, 'is-compression-enabled': None, 'logical-data-size': None, 'logical-data-limit': None, }, }, } no_dedupe_response = { 'compression': False, 'dedupe': False, 'logical-data-size': 0, 'logical-data-limit': 1, } try: result = self.send_iter_request('sis-get-iter', api_args) except netapp_api.NaApiError: LOG.exception('Failed to get dedupe info for volume %s.', flexvol_name) return no_dedupe_response if self._get_record_count(result) != 1: return no_dedupe_response attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') sis_status_info = attributes_list.get_child_by_name( 'sis-status-info') or netapp_api.NaElement('none') logical_data_size = sis_status_info.get_child_content( 'logical-data-size') or 0 logical_data_limit = sis_status_info.get_child_content( 'logical-data-limit') or 1 sis = { 'compression': strutils.bool_from_string( sis_status_info.get_child_content('is-compression-enabled')), 'dedupe': na_utils.to_bool( sis_status_info.get_child_content('state')), 'logical-data-size': int(logical_data_size), 'logical-data-limit': int(logical_data_limit), } return sis def get_flexvol_dedupe_used_percent(self, flexvol_name): """Determine how close a flexvol is to its shared block limit.""" # Note(cknight): The value returned by this method is computed from # values returned by two different APIs, one of which was new in # Data ONTAP 8.3. if not self.features.CLONE_SPLIT_STATUS: return 0.0 dedupe_info = self.get_flexvol_dedupe_info(flexvol_name) clone_split_info = self.get_clone_split_info(flexvol_name) total_dedupe_blocks = (dedupe_info.get('logical-data-size') + clone_split_info.get('unsplit-size')) dedupe_used_percent = (100.0 * float(total_dedupe_blocks) / dedupe_info.get('logical-data-limit')) return dedupe_used_percent def get_clone_split_info(self, flexvol_name): """Get the status of unsplit file/LUN clones in a flexvol.""" try: result = self.connection.send_request( 'clone-split-status', {'volume-name': flexvol_name}) except netapp_api.NaApiError: LOG.exception('Failed to get clone split info for volume %s.', flexvol_name) return {'unsplit-size': 0, 'unsplit-clone-count': 0} clone_split_info = result.get_child_by_name( 'clone-split-info') or netapp_api.NaElement('none') unsplit_size = clone_split_info.get_child_content('unsplit-size') or 0 unsplit_clone_count = clone_split_info.get_child_content( 'unsplit-clone-count') or 0 return { 'unsplit-size': int(unsplit_size), 'unsplit-clone-count': int(unsplit_clone_count), } def is_flexvol_mirrored(self, flexvol_name, vserver_name): """Check if flexvol is a SnapMirror source.""" api_args = { 'query': { 'snapmirror-info': { 'source-vserver': vserver_name, 'source-volume': flexvol_name, 'mirror-state': 'snapmirrored', 'relationship-type': 'data_protection', }, }, 'desired-attributes': { 'snapmirror-info': None, }, } try: result = self.send_iter_request('snapmirror-get-iter', api_args) except netapp_api.NaApiError: LOG.exception('Failed to get SnapMirror info for volume %s.', flexvol_name) return False if not self._has_records(result): return False return True def is_flexvol_encrypted(self, flexvol_name, vserver_name): """Check if a flexvol is encrypted.""" if not self.features.FLEXVOL_ENCRYPTION: return False api_args = { 'query': { 'volume-attributes': { 'encrypt': 'true', 'volume-id-attributes': { 'name': flexvol_name, 'owning-vserver-name': vserver_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'encrypt': None, }, }, } try: result = self.send_iter_request('volume-get-iter', api_args) except netapp_api.NaApiError: LOG.exception('Failed to get Encryption info for volume %s.', flexvol_name) return False if not self._has_records(result): return False return True def is_qos_min_supported(self, is_nfs, node_name): """Check if the node supports QoS minimum.""" if node_name is None: # whether no access to node name (SVM account or error), the QoS # min support is dropped. return False qos_min_name = na_utils.qos_min_feature_name(is_nfs, node_name) return getattr(self.features, qos_min_name, False).__bool__() def create_volume_async(self, name, aggregate_list, size_gb, space_guarantee_type=None, snapshot_policy=None, language=None, dedupe_enabled=False, compression_enabled=False, snapshot_reserve=None, volume_type='rw'): """Creates a FlexGroup volume asynchronously.""" api_args = { 'aggr-list': [{'aggr-name': aggr} for aggr in aggregate_list], 'size': size_gb * units.Gi, 'volume-name': name, 'volume-type': volume_type, } if volume_type == 'dp': snapshot_policy = None else: api_args['junction-path'] = '/%s' % name if snapshot_policy is not None: api_args['snapshot-policy'] = snapshot_policy if space_guarantee_type: api_args['space-reserve'] = space_guarantee_type if language is not None: api_args['language-code'] = language if snapshot_reserve is not None: api_args['percentage-snapshot-reserve'] = str(snapshot_reserve) result = self.connection.send_request('volume-create-async', api_args) job_info = { 'status': result.get_child_content('result-status'), 'jobid': result.get_child_content('result-jobid'), 'error-code': result.get_child_content('result-error-code'), 'error-message': result.get_child_content('result-error-message') } return job_info def create_flexvol(self, flexvol_name, aggregate_name, size_gb, space_guarantee_type=None, snapshot_policy=None, language=None, dedupe_enabled=False, compression_enabled=False, snapshot_reserve=None, volume_type='rw'): """Creates a volume.""" api_args = { 'containing-aggr-name': aggregate_name, 'size': str(size_gb) + 'g', 'volume': flexvol_name, 'volume-type': volume_type, } if volume_type == 'dp': snapshot_policy = None else: api_args['junction-path'] = '/%s' % flexvol_name if snapshot_policy is not None: api_args['snapshot-policy'] = snapshot_policy if space_guarantee_type: api_args['space-reserve'] = space_guarantee_type if language is not None: api_args['language-code'] = language if snapshot_reserve is not None: api_args['percentage-snapshot-reserve'] = str(snapshot_reserve) self.connection.send_request('volume-create', api_args) # cDOT compression requires that deduplication be enabled. if dedupe_enabled or compression_enabled: self.enable_flexvol_dedupe(flexvol_name) if compression_enabled: self.enable_flexvol_compression(flexvol_name) def flexvol_exists(self, volume_name): """Checks if a flexvol exists on the storage array.""" LOG.debug('Checking if volume %s exists', volume_name) api_args = { 'query': { 'volume-attributes': { 'volume-id-attributes': { 'name': volume_name, }, }, }, 'desired-attributes': { 'volume-attributes': { 'volume-id-attributes': { 'name': None, }, }, }, } result = self.send_iter_request('volume-get-iter', api_args) return self._has_records(result) def rename_flexvol(self, orig_flexvol_name, new_flexvol_name): """Set flexvol name.""" api_args = { 'volume': orig_flexvol_name, 'new-volume-name': new_flexvol_name, } self.connection.send_request('volume-rename', api_args) def rename_file(self, orig_file_name, new_file_name): """Rename a volume file.""" LOG.debug("Renaming the file %(original)s to %(new)s.", {'original': orig_file_name, 'new': new_file_name}) api_args = { 'from-path': orig_file_name, 'to-path': new_file_name, } self.connection.send_request('file-rename-file', api_args) def mount_flexvol(self, flexvol_name, junction_path=None): """Mounts a volume on a junction path.""" api_args = { 'volume-name': flexvol_name, 'junction-path': (junction_path if junction_path else '/%s' % flexvol_name) } self.connection.send_request('volume-mount', api_args) def enable_flexvol_dedupe(self, flexvol_name): """Enable deduplication on volume.""" api_args = {'path': '/vol/%s' % flexvol_name} self.connection.send_request('sis-enable', api_args) def disable_flexvol_dedupe(self, flexvol_name): """Disable deduplication on volume.""" api_args = {'path': '/vol/%s' % flexvol_name} self.connection.send_request('sis-disable', api_args) def enable_flexvol_compression(self, flexvol_name): """Enable compression on volume.""" api_args = { 'path': '/vol/%s' % flexvol_name, 'enable-compression': 'true' } self.connection.send_request('sis-set-config', api_args) def disable_flexvol_compression(self, flexvol_name): """Disable compression on volume.""" api_args = { 'path': '/vol/%s' % flexvol_name, 'enable-compression': 'false' } self.connection.send_request('sis-set-config', api_args) def enable_volume_dedupe_async(self, volume_name): """Enable deduplication on FlexVol/FlexGroup volume asynchronously.""" api_args = {'volume-name': volume_name} self.connection.send_request('sis-enable-async', api_args) def disable_volume_dedupe_async(self, volume_name): """Disable deduplication on FlexVol/FlexGroup volume asynchronously.""" api_args = {'volume-name': volume_name} self.connection.send_request('sis-disable-async', api_args) def enable_volume_compression_async(self, volume_name): """Enable compression on FlexVol/FlexGroup volume asynchronously.""" api_args = { 'volume-name': volume_name, 'enable-compression': 'true' } self.connection.send_request('sis-set-config-async', api_args) def disable_volume_compression_async(self, volume_name): """Disable compression on FlexVol/FlexGroup volume asynchronously.""" api_args = { 'volume-name': volume_name, 'enable-compression': 'false' } self.connection.send_request('sis-set-config-async', api_args) @volume_utils.trace_method def delete_file(self, path_to_file): """Delete file at path.""" api_args = { 'path': path_to_file, } # Use fast clone deletion engine if it is supported. if self.features.FAST_CLONE_DELETE: api_args['is-clone-file'] = 'true' self.connection.send_request('file-delete-file', api_args, True) def _get_aggregates(self, aggregate_names=None, desired_attributes=None): query = { 'aggr-attributes': { 'aggregate-name': '|'.join(aggregate_names), } } if aggregate_names else None api_args = {} if query: api_args['query'] = query if desired_attributes: api_args['desired-attributes'] = desired_attributes result = self.connection.send_request('aggr-get-iter', api_args, enable_tunneling=False) if not self._has_records(result): return [] else: return result.get_child_by_name('attributes-list').get_children() def get_node_for_aggregate(self, aggregate_name): """Get home node for the specified aggregate. This API could return None, most notably if it was sent to a Vserver LIF, so the caller must be able to handle that case. """ if not aggregate_name: return None desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-ownership-attributes': { 'home-name': None, }, }, } try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], desired_attributes=desired_attributes) except netapp_api.NaApiError as e: if e.code == netapp_api.EAPINOTFOUND: return None else: raise if len(aggrs) < 1: return None aggr_ownership_attrs = aggrs[0].get_child_by_name( 'aggr-ownership-attributes') or netapp_api.NaElement('none') return aggr_ownership_attrs.get_child_content('home-name') def get_aggregate(self, aggregate_name): """Get aggregate attributes needed for the storage service catalog.""" if not aggregate_name: return {} desired_attributes = { 'aggr-attributes': { 'aggregate-name': None, 'aggr-raid-attributes': { 'raid-type': None, 'is-hybrid': None, }, 'aggr-ownership-attributes': { 'home-name': None, }, }, } try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], desired_attributes=desired_attributes) except netapp_api.NaApiError: LOG.exception('Failed to get info for aggregate %s.', aggregate_name) return {} if len(aggrs) < 1: return {} aggr_attributes = aggrs[0] aggr_raid_attrs = aggr_attributes.get_child_by_name( 'aggr-raid-attributes') or netapp_api.NaElement('none') aggr_ownership_attrs = aggrs[0].get_child_by_name( 'aggr-ownership-attributes') or netapp_api.NaElement('none') aggregate = { 'name': aggr_attributes.get_child_content('aggregate-name'), 'raid-type': aggr_raid_attrs.get_child_content('raid-type'), 'is-hybrid': strutils.bool_from_string( aggr_raid_attrs.get_child_content('is-hybrid')), 'node-name': aggr_ownership_attrs.get_child_content('home-name'), } return aggregate def get_aggregate_disk_types(self, aggregate_name): """Get the disk type(s) of an aggregate.""" disk_types = set() disk_types.update(self._get_aggregate_disk_types(aggregate_name)) if self.features.ADVANCED_DISK_PARTITIONING: disk_types.update(self._get_aggregate_disk_types(aggregate_name, shared=True)) return list(disk_types) if disk_types else None def _get_aggregate_disk_types(self, aggregate_name, shared=False): """Get the disk type(s) of an aggregate (may be a list).""" disk_types = set() if shared: disk_raid_info = { 'disk-shared-info': { 'aggregate-list': { 'shared-aggregate-info': { 'aggregate-name': aggregate_name, }, }, }, } else: disk_raid_info = { 'disk-aggregate-info': { 'aggregate-name': aggregate_name, }, } api_args = { 'query': { 'storage-disk-info': { 'disk-raid-info': disk_raid_info, }, }, 'desired-attributes': { 'storage-disk-info': { 'disk-raid-info': { 'effective-disk-type': None, }, }, }, } try: result = self.send_iter_request( 'storage-disk-get-iter', api_args, enable_tunneling=False) except netapp_api.NaApiError: LOG.exception('Failed to get disk info for aggregate %s.', aggregate_name) return disk_types attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') for storage_disk_info in attributes_list.get_children(): disk_raid_info = storage_disk_info.get_child_by_name( 'disk-raid-info') or netapp_api.NaElement('none') disk_type = disk_raid_info.get_child_content( 'effective-disk-type') if disk_type: disk_types.add(disk_type) return disk_types def get_aggregate_capacities(self, aggregate_names): """Gets capacity info for multiple aggregates.""" if not isinstance(aggregate_names, list): return {} aggregates = {} for aggregate_name in aggregate_names: aggregates[aggregate_name] = self.get_aggregate_capacity( aggregate_name) return aggregates def get_aggregate_capacity(self, aggregate_name): """Gets capacity info for an aggregate.""" desired_attributes = { 'aggr-attributes': { 'aggr-space-attributes': { 'percent-used-capacity': None, 'size-available': None, 'size-total': None, }, }, } try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], desired_attributes=desired_attributes) except netapp_api.NaApiError as e: if e.code == netapp_api.EAPINOTFOUND: LOG.debug('Aggregate capacity can only be collected with ' 'cluster scoped credentials.') else: LOG.exception('Failed to get info for aggregate %s.', aggregate_name) return {} if len(aggrs) < 1: return {} aggr_attributes = aggrs[0] aggr_space_attributes = aggr_attributes.get_child_by_name( 'aggr-space-attributes') or netapp_api.NaElement('none') percent_used = int(aggr_space_attributes.get_child_content( 'percent-used-capacity')) size_available = float(aggr_space_attributes.get_child_content( 'size-available')) size_total = float( aggr_space_attributes.get_child_content('size-total')) return { 'percent-used': percent_used, 'size-available': size_available, 'size-total': size_total, } def get_performance_instance_uuids(self, object_name, node_name): """Get UUIDs of performance instances for a cluster node.""" api_args = { 'objectname': object_name, 'query': { 'instance-info': { 'uuid': node_name + ':*', } } } result = self.connection.send_request( 'perf-object-instance-list-info-iter', api_args, enable_tunneling=False) uuids = [] instances = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('None') for instance_info in instances.get_children(): uuids.append(instance_info.get_child_content('uuid')) return uuids def get_performance_counters(self, object_name, instance_uuids, counter_names): """Gets more cDOT performance counters.""" api_args = { 'objectname': object_name, 'instance-uuids': [ {'instance-uuid': instance_uuid} for instance_uuid in instance_uuids ], 'counters': [ {'counter': counter} for counter in counter_names ], } result = self.connection.send_request( 'perf-object-get-instances', api_args, enable_tunneling=False) counter_data = [] timestamp = result.get_child_content('timestamp') instances = result.get_child_by_name( 'instances') or netapp_api.NaElement('None') for instance in instances.get_children(): instance_name = instance.get_child_content('name') instance_uuid = instance.get_child_content('uuid') node_name = instance_uuid.split(':')[0] counters = instance.get_child_by_name( 'counters') or netapp_api.NaElement('None') for counter in counters.get_children(): counter_name = counter.get_child_content('name') counter_value = counter.get_child_content('value') counter_data.append({ 'instance-name': instance_name, 'instance-uuid': instance_uuid, 'node-name': node_name, 'timestamp': timestamp, counter_name: counter_value, }) return counter_data def get_snapshots_marked_for_deletion(self): """Get a list of snapshots marked for deletion.""" api_args = { 'query': { 'snapshot-info': { 'name': client_base.DELETED_PREFIX + '*', 'vserver': self.vserver, 'busy': 'false', }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'volume': None, 'snapshot-instance-uuid': None, } }, } result = self.connection.send_request('snapshot-get-iter', api_args) snapshots = [] attributes = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') snapshot_info_list = attributes.get_children() for snapshot_info in snapshot_info_list: snapshot_name = snapshot_info.get_child_content('name') snapshot_id = snapshot_info.get_child_content( 'snapshot-instance-uuid') snapshot_volume = snapshot_info.get_child_content('volume') snapshots.append({ 'name': snapshot_name, 'instance_id': snapshot_id, 'volume_name': snapshot_volume, }) return snapshots def get_snapshot(self, volume_name, snapshot_name): """Gets a single snapshot.""" api_args = { 'query': { 'snapshot-info': { 'name': snapshot_name, 'volume': volume_name, }, }, 'desired-attributes': { 'snapshot-info': { 'name': None, 'volume': None, 'busy': None, 'snapshot-owners-list': { 'snapshot-owner': None, } }, }, } result = self.connection.send_request('snapshot-get-iter', api_args) self._handle_get_snapshot_return_failure(result, snapshot_name) attributes_list = result.get_child_by_name( 'attributes-list') or netapp_api.NaElement('none') snapshot_info_list = attributes_list.get_children() self._handle_snapshot_not_found(result, snapshot_info_list, snapshot_name, volume_name) snapshot_info = snapshot_info_list[0] snapshot = { 'name': snapshot_info.get_child_content('name'), 'volume': snapshot_info.get_child_content('volume'), 'busy': strutils.bool_from_string( snapshot_info.get_child_content('busy')), } snapshot_owners_list = snapshot_info.get_child_by_name( 'snapshot-owners-list') or netapp_api.NaElement('none') snapshot_owners = set([ snapshot_owner.get_child_content('owner') for snapshot_owner in snapshot_owners_list.get_children()]) snapshot['owners'] = snapshot_owners return snapshot def _handle_get_snapshot_return_failure(self, result, snapshot_name): error_record_list = result.get_child_by_name( 'volume-errors') or netapp_api.NaElement('none') errors = error_record_list.get_children() if errors: error = errors[0] error_code = error.get_child_content('errno') error_reason = error.get_child_content('reason') msg = _('Could not read information for snapshot %(name)s. ' 'Code: %(code)s. Reason: %(reason)s') msg_args = { 'name': snapshot_name, 'code': error_code, 'reason': error_reason, } if error_code == netapp_api.ESNAPSHOTNOTALLOWED: raise exception.SnapshotUnavailable(data=msg % msg_args) else: raise exception.VolumeBackendAPIException(data=msg % msg_args) def _handle_snapshot_not_found(self, result, snapshot_info_list, snapshot_name, volume_name): if not self._has_records(result): raise exception.SnapshotNotFound(snapshot_id=snapshot_name) elif len(snapshot_info_list) > 1: msg = _('Could not find unique snapshot %(snap)s on ' 'volume %(vol)s.') msg_args = {'snap': snapshot_name, 'vol': volume_name} raise exception.VolumeBackendAPIException(data=msg % msg_args) def get_cluster_name(self): """Gets cluster name.""" api_args = { 'desired-attributes': { 'cluster-identity-info': { 'cluster-name': None, } } } result = self.connection.send_request('cluster-identity-get', api_args, enable_tunneling=False) attributes = result.get_child_by_name('attributes') cluster_identity = attributes.get_child_by_name( 'cluster-identity-info') return cluster_identity.get_child_content('cluster-name') def create_cluster_peer(self, addresses, username=None, password=None, passphrase=None): """Creates a cluster peer relationship.""" api_args = { 'peer-addresses': [ {'remote-inet-address': address} for address in addresses ], } if username: api_args['user-name'] = username if password: api_args['password'] = password if passphrase: api_args['passphrase'] = passphrase self.connection.send_request('cluster-peer-create', api_args) def get_cluster_peers(self, remote_cluster_name=None): """Gets one or more cluster peer relationships.""" api_args = {} if remote_cluster_name: api_args['query'] = { 'cluster-peer-info': { 'remote-cluster-name': remote_cluster_name, } } result = self.send_iter_request('cluster-peer-get-iter', api_args) if not self._has_records(result): return [] cluster_peers = [] for cluster_peer_info in result.get_child_by_name( 'attributes-list').get_children(): cluster_peer = { 'active-addresses': [], 'peer-addresses': [] } active_addresses = cluster_peer_info.get_child_by_name( 'active-addresses') or netapp_api.NaElement('none') for address in active_addresses.get_children(): cluster_peer['active-addresses'].append(address.get_content()) peer_addresses = cluster_peer_info.get_child_by_name( 'peer-addresses') or netapp_api.NaElement('none') for address in peer_addresses.get_children(): cluster_peer['peer-addresses'].append(address.get_content()) cluster_peer['availability'] = cluster_peer_info.get_child_content( 'availability') cluster_peer['cluster-name'] = cluster_peer_info.get_child_content( 'cluster-name') cluster_peer['cluster-uuid'] = cluster_peer_info.get_child_content( 'cluster-uuid') cluster_peer['remote-cluster-name'] = ( cluster_peer_info.get_child_content('remote-cluster-name')) cluster_peer['serial-number'] = ( cluster_peer_info.get_child_content('serial-number')) cluster_peer['timeout'] = cluster_peer_info.get_child_content( 'timeout') cluster_peers.append(cluster_peer) return cluster_peers def delete_cluster_peer(self, cluster_name): """Deletes a cluster peer relationship.""" api_args = {'cluster-name': cluster_name} self.connection.send_request('cluster-peer-delete', api_args) def get_cluster_peer_policy(self): """Gets the cluster peering policy configuration.""" if not self.features.CLUSTER_PEER_POLICY: return {} result = self.connection.send_request('cluster-peer-policy-get') attributes = result.get_child_by_name( 'attributes') or netapp_api.NaElement('none') cluster_peer_policy = attributes.get_child_by_name( 'cluster-peer-policy') or netapp_api.NaElement('none') policy = { 'is-unauthenticated-access-permitted': cluster_peer_policy.get_child_content( 'is-unauthenticated-access-permitted'), 'passphrase-minimum-length': cluster_peer_policy.get_child_content( 'passphrase-minimum-length'), } if policy['is-unauthenticated-access-permitted'] is not None: policy['is-unauthenticated-access-permitted'] = ( strutils.bool_from_string( policy['is-unauthenticated-access-permitted'])) if policy['passphrase-minimum-length'] is not None: policy['passphrase-minimum-length'] = int( policy['passphrase-minimum-length']) return policy def set_cluster_peer_policy(self, is_unauthenticated_access_permitted=None, passphrase_minimum_length=None): """Modifies the cluster peering policy configuration.""" if not self.features.CLUSTER_PEER_POLICY: return if (is_unauthenticated_access_permitted is None and passphrase_minimum_length is None): return api_args = {} if is_unauthenticated_access_permitted is not None: api_args['is-unauthenticated-access-permitted'] = ( 'true' if strutils.bool_from_string( is_unauthenticated_access_permitted) else 'false') if passphrase_minimum_length is not None: api_args['passphrase-minlength'] = str(passphrase_minimum_length) self.connection.send_request('cluster-peer-policy-modify', api_args) def create_vserver_peer(self, vserver_name, peer_vserver_name, vserver_peer_application=None): """Creates a Vserver peer relationship.""" # default peering application to `snapmirror` if none is specified. if not vserver_peer_application: vserver_peer_application = ['snapmirror'] api_args = { 'vserver': vserver_name, 'peer-vserver': peer_vserver_name, 'applications': [ {'vserver-peer-application': app} for app in vserver_peer_application ], } self.connection.send_request('vserver-peer-create', api_args, enable_tunneling=False) def delete_vserver_peer(self, vserver_name, peer_vserver_name): """Deletes a Vserver peer relationship.""" api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name} self.connection.send_request('vserver-peer-delete', api_args) def accept_vserver_peer(self, vserver_name, peer_vserver_name): """Accepts a pending Vserver peer relationship.""" api_args = {'vserver': vserver_name, 'peer-vserver': peer_vserver_name} self.connection.send_request('vserver-peer-accept', api_args) def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None): """Gets one or more Vserver peer relationships.""" api_args = None if vserver_name or peer_vserver_name: api_args = {'query': {'vserver-peer-info': {}}} if vserver_name: api_args['query']['vserver-peer-info']['vserver'] = ( vserver_name) if peer_vserver_name: api_args['query']['vserver-peer-info']['peer-vserver'] = ( peer_vserver_name) result = self.send_iter_request('vserver-peer-get-iter', api_args, enable_tunneling=False) if not self._has_records(result): return [] vserver_peers = [] for vserver_peer_info in result.get_child_by_name( 'attributes-list').get_children(): vserver_peer = { 'vserver': vserver_peer_info.get_child_content('vserver'), 'peer-vserver': vserver_peer_info.get_child_content('peer-vserver'), 'peer-state': vserver_peer_info.get_child_content('peer-state'), 'peer-cluster': vserver_peer_info.get_child_content('peer-cluster'), 'applications': [app.get_content() for app in vserver_peer_info.get_child_by_name( 'applications').get_children()], } vserver_peers.append(vserver_peer) return vserver_peers def _ensure_snapmirror_v2(self): """Verify support for SnapMirror control plane v2.""" if not self.features.SNAPMIRROR_V2: msg = _('SnapMirror features require Data ONTAP 8.2 or later.') raise na_utils.NetAppDriverException(msg) def create_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, src_cg_name=None, dest_cg_name=None, schedule=None, policy=None, relationship_type='data_protection'): """Creates a SnapMirror relationship (cDOT 8.2 or later only).""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, 'relationship-type': relationship_type, } if schedule: api_args['schedule'] = schedule if policy: api_args['policy'] = policy try: self.connection.send_request('snapmirror-create', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.ERELATION_EXISTS: raise def initialize_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, active_sync_policy=None, source_snapshot=None, transfer_priority=None): """Initializes a SnapMirror relationship (cDOT 8.2 or later only).""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } if source_snapshot: api_args['source-snapshot'] = source_snapshot if transfer_priority: api_args['transfer-priority'] = transfer_priority result = self.connection.send_request('snapmirror-initialize', api_args) result_info = {} result_info['operation-id'] = result.get_child_content( 'result-operation-id') result_info['status'] = result.get_child_content('result-status') result_info['jobid'] = result.get_child_content('result-jobid') result_info['error-code'] = result.get_child_content( 'result-error-code') result_info['error-message'] = result.get_child_content( 'result-error-message') return result_info def release_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, relationship_info_only=False): """Removes a SnapMirror relationship on the source endpoint.""" self._ensure_snapmirror_v2() api_args = { 'query': { 'snapmirror-destination-info': { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, 'relationship-info-only': ('true' if relationship_info_only else 'false'), } } } self.connection.send_request('snapmirror-release-iter', api_args) def quiesce_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Disables future transfers to a SnapMirror destination.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } self.connection.send_request('snapmirror-quiesce', api_args) def abort_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, clear_checkpoint=False): """Stops ongoing transfers for a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, 'clear-checkpoint': 'true' if clear_checkpoint else 'false', } try: self.connection.send_request('snapmirror-abort', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.ENOTRANSFER_IN_PROGRESS: raise def break_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Breaks a data protection SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } self.connection.send_request('snapmirror-break', api_args) def modify_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, schedule=None, policy=None, tries=None, max_transfer_rate=None): """Modifies a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } if schedule: api_args['schedule'] = schedule if policy: api_args['policy'] = policy if tries is not None: api_args['tries'] = tries if max_transfer_rate is not None: api_args['max-transfer-rate'] = max_transfer_rate self.connection.send_request('snapmirror-modify', api_args) def delete_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Destroys an SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = { 'query': { 'snapmirror-info': { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } } } self.connection.send_request('snapmirror-destroy-iter', api_args) def update_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Schedules a SnapMirror update.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } try: self.connection.send_request('snapmirror-update', api_args) except netapp_api.NaApiError as e: if (e.code != netapp_api.ETRANSFER_IN_PROGRESS and e.code != netapp_api.EANOTHER_OP_ACTIVE): raise def resume_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Resume a SnapMirror relationship if it is quiesced.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } try: self.connection.send_request('snapmirror-resume', api_args) except netapp_api.NaApiError as e: if e.code != netapp_api.ERELATION_NOT_QUIESCED: raise def resync_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Resync a SnapMirror relationship.""" self._ensure_snapmirror_v2() api_args = { 'source-volume': source_volume, 'source-vserver': source_vserver, 'destination-volume': destination_volume, 'destination-vserver': destination_vserver, } self.connection.send_request('snapmirror-resync', api_args) def _get_snapmirrors(self, source_vserver=None, source_volume=None, destination_vserver=None, destination_volume=None, desired_attributes=None): query = None if (source_vserver or source_volume or destination_vserver or destination_volume): query = {'snapmirror-info': {}} if source_volume: query['snapmirror-info']['source-volume'] = source_volume if destination_volume: query['snapmirror-info']['destination-volume'] = ( destination_volume) if source_vserver: query['snapmirror-info']['source-vserver'] = source_vserver if destination_vserver: query['snapmirror-info']['destination-vserver'] = ( destination_vserver) api_args = {} if query: api_args['query'] = query if desired_attributes: api_args['desired-attributes'] = desired_attributes result = self.send_iter_request('snapmirror-get-iter', api_args) if not self._has_records(result): return [] else: return result.get_child_by_name('attributes-list').get_children() def get_snapmirrors(self, source_vserver, source_volume, destination_vserver, destination_volume, desired_attributes=None): """Gets one or more SnapMirror relationships. Either the source or destination info may be omitted. Desired attributes should be a flat list of attribute names. """ self._ensure_snapmirror_v2() if desired_attributes is not None: desired_attributes = { 'snapmirror-info': {attr: None for attr in desired_attributes}, } result = self._get_snapmirrors( source_vserver=source_vserver, source_volume=source_volume, destination_vserver=destination_vserver, destination_volume=destination_volume, desired_attributes=desired_attributes) snapmirrors = [] for snapmirror_info in result: snapmirror = {} for child in snapmirror_info.get_children(): name = self._strip_xml_namespace(child.get_name()) snapmirror[name] = child.get_content() snapmirrors.append(snapmirror) return snapmirrors def get_provisioning_options_from_flexvol(self, flexvol_name): """Get a dict of provisioning options matching existing flexvol.""" flexvol_info = self.get_flexvol(flexvol_name=flexvol_name) dedupe_info = self.get_flexvol_dedupe_info(flexvol_name) provisioning_opts = { 'aggregate': flexvol_info['aggregate'], # space-guarantee can be 'none', 'file', 'volume' 'space_guarantee_type': flexvol_info.get('space-guarantee'), 'snapshot_policy': flexvol_info['snapshot-policy'], 'language': flexvol_info['language'], 'dedupe_enabled': dedupe_info['dedupe'], 'compression_enabled': dedupe_info['compression'], 'snapshot_reserve': flexvol_info['percentage-snapshot-reserve'], 'volume_type': flexvol_info['type'], 'size': int(math.ceil(float(flexvol_info['size']) / units.Gi)), 'is_flexgroup': flexvol_info['style-extended'] == 'flexgroup', } return provisioning_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py0000664000175000017500000033572400000000000030112 0ustar00zuulzuul00000000000000# Copyright (c) 2022 NetApp, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from datetime import datetime from datetime import timedelta import math from time import time from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils LOG = logging.getLogger(__name__) DEFAULT_MAX_PAGE_LENGTH = 10000 ONTAP_SELECT_MODEL = 'FDvM300' ONTAP_C190 = 'C190' HTTP_ACCEPTED = 202 DELETED_PREFIX = 'deleted_cinder_' DEFAULT_TIMEOUT = 15 REST_SYNC_TIMEOUT = 15 # Keys in this map are REST API's endpoints that the user shall have permission # in order to enable extra specs reported to Cinder's scheduler. # NOTE(sfernand): ONTAP does not retrieve volume efficiency information # properly when using the pre-created "vsadmin" role (SVM scoped), causing # dedup and compression extra specs to be reported as disabled despite its # current configuration. SSC_API_MAP = { '/storage/aggregates': [ 'netapp_raid_type', ], '/storage/disks': [ 'netapp_disk_type', ], '/snapmirror/relationships': [ 'netapp_mirrored', ], '/storage/volumes': [ 'netapp_flexvol_encryption' 'netapp_dedup', 'netapp_compression', ], } class RestClient(object, metaclass=volume_utils.TraceWrapperMetaclass): def __init__(self, **kwargs): host = kwargs['hostname'] username = kwargs['username'] password = kwargs['password'] api_trace_pattern = kwargs['api_trace_pattern'] private_key_file = kwargs['private_key_file'] certificate_file = kwargs['certificate_file'] ca_certificate_file = kwargs['ca_certificate_file'] certificate_host_validation = kwargs['certificate_host_validation'] is_disaggregated = kwargs.get('is_disaggregated', False) ssl_cert_path = kwargs['ssl_cert_path'] if private_key_file and certificate_file and ca_certificate_file: self.connection = netapp_api.RestNaServer( host=host, transport_type='https', ssl_cert_path=ssl_cert_path, port=kwargs['port'], private_key_file=private_key_file, certificate_file=certificate_file, ca_certificate_file=ca_certificate_file, certificate_host_validation=certificate_host_validation, api_trace_pattern=api_trace_pattern) elif private_key_file and certificate_file: self.connection = netapp_api.RestNaServer( host=host, transport_type='https', ssl_cert_path=ssl_cert_path, port=kwargs['port'], private_key_file=private_key_file, certificate_file=certificate_file, certificate_host_validation=certificate_host_validation, api_trace_pattern=api_trace_pattern) else: self.connection = netapp_api.RestNaServer( host=host, transport_type=kwargs['transport_type'], ssl_cert_path=ssl_cert_path, port=kwargs['port'], username=username, password=password, api_trace_pattern=api_trace_pattern) self.async_rest_timeout = kwargs.get('async_rest_timeout', 60) self.vserver = kwargs.get('vserver') self.connection.set_vserver(self.vserver) ontap_version = self.get_ontap_version(cached=False) if ontap_version < (9, 11, 1): msg = _('REST Client can be used only with ONTAP 9.11.1 or upper.') raise na_utils.NetAppDriverException(msg) self.connection.set_ontap_version(ontap_version) self.ssh_client = self._init_ssh_client(host, username, password) # NOTE(nahimsouza): ZAPI Client is needed to implement the fallback # when a REST method is not supported. if not is_disaggregated: self.zapi_client = client_cmode.Client(**kwargs) self._init_features() def _init_ssh_client(self, host, username, password): return netapp_api.SSHUtil( host=host, username=username, password=password) def _init_features(self): self.features = na_utils.Features() generation, major, minor = self.get_ontap_version() ontap_version = (generation, major) ontap_9_0 = ontap_version >= (9, 0) ontap_9_4 = ontap_version >= (9, 4) ontap_9_5 = ontap_version >= (9, 5) ontap_9_6 = ontap_version >= (9, 6) ontap_9_8 = ontap_version >= (9, 8) ontap_9_14 = ontap_version >= (9, 14) nodes_info = self._get_cluster_nodes_info() for node in nodes_info: qos_min_block = False qos_min_nfs = False if node['model'] == ONTAP_SELECT_MODEL: qos_min_block = node['is_all_flash_select'] and ontap_9_6 qos_min_nfs = qos_min_block elif ONTAP_C190 in node['model']: qos_min_block = node['is_all_flash'] and ontap_9_6 qos_min_nfs = qos_min_block else: qos_min_block = node['is_all_flash'] and ontap_9_0 qos_min_nfs = node['is_all_flash'] and ontap_9_0 qos_name = na_utils.qos_min_feature_name(True, node['name']) self.features.add_feature(qos_name, supported=qos_min_nfs) qos_name = na_utils.qos_min_feature_name(False, node['name']) self.features.add_feature(qos_name, supported=qos_min_block) self.features.add_feature('SNAPMIRROR_V2', supported=ontap_9_0) self.features.add_feature('USER_CAPABILITY_LIST', supported=ontap_9_0) self.features.add_feature('SYSTEM_METRICS', supported=ontap_9_0) self.features.add_feature('CLONE_SPLIT_STATUS', supported=ontap_9_0) self.features.add_feature('FAST_CLONE_DELETE', supported=ontap_9_0) self.features.add_feature('SYSTEM_CONSTITUENT_METRICS', supported=ontap_9_0) self.features.add_feature('ADVANCED_DISK_PARTITIONING', supported=ontap_9_0) self.features.add_feature('BACKUP_CLONE_PARAM', supported=ontap_9_0) self.features.add_feature('CLUSTER_PEER_POLICY', supported=ontap_9_0) self.features.add_feature('FLEXVOL_ENCRYPTION', supported=ontap_9_0) self.features.add_feature('FLEXGROUP', supported=ontap_9_8) # Flex group file clone is supported for ONTAP 9.14 and above versions # so updating this from 9.9 to 9.14. self.features.add_feature('FLEXGROUP_CLONE_FILE', supported=ontap_9_14) self.features.add_feature('ADAPTIVE_QOS', supported=ontap_9_4) self.features.add_feature('ADAPTIVE_QOS_BLOCK_SIZE', supported=ontap_9_5) self.features.add_feature('ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION', supported=ontap_9_5) LOG.info('ONTAP Version: %(generation)s.%(major)s.%(minor)s', {'generation': ontap_version[0], 'major': ontap_version[1], 'minor': minor}) def __getattr__(self, name): """If method is not implemented for REST, try to call the ZAPI.""" LOG.debug("The %s call is not supported for REST, falling back to " "ZAPI.", name) # Don't use self.zapi_client to avoid reentrant call to __getattr__() zapi_client = object.__getattribute__(self, 'zapi_client') return getattr(zapi_client, name) def _wait_job_result(self, job_url): """Waits for a job to finish.""" interval = 2 retries = (self.async_rest_timeout / interval) @utils.retry(netapp_api.NaRetryableError, interval=interval, retries=retries, backoff_rate=1) def _waiter(): response = self.send_request(job_url, 'get', enable_tunneling=False) job_state = response.get('state') if job_state == 'success': return response elif job_state == 'failure': message = response['error']['message'] code = response['error']['code'] raise netapp_api.NaApiError(message=message, code=code) msg_args = {'job': job_url, 'state': job_state} LOG.debug("Job %(job)s has not finished: %(state)s", msg_args) raise netapp_api.NaRetryableError(message='Job is running.') try: return _waiter() except netapp_api.NaRetryableError: msg = _("Job %s did not reach the expected state. Retries " "exhausted. Aborting.") % job_url raise na_utils.NetAppDriverException(msg) def send_request(self, action_url, method, body=None, query=None, enable_tunneling=True, max_page_length=DEFAULT_MAX_PAGE_LENGTH, wait_on_accepted=True): """Sends REST request to ONTAP. :param action_url: action URL for the request :param method: HTTP method for the request ('get', 'post', 'put', 'delete' or 'patch') :param body: dict of arguments to be passed as request body :param query: dict of arguments to be passed as query string :param enable_tunneling: enable tunneling to the ONTAP host :param max_page_length: size of the page during pagination :param wait_on_accepted: if True, wait until the job finishes when HTTP code 202 (Accepted) is returned :returns: parsed REST response """ response = None if method == 'get': response = self.get_records( action_url, query, enable_tunneling, max_page_length) else: code, response = self.connection.invoke_successfully( action_url, method, body=body, query=query, enable_tunneling=enable_tunneling) if code == HTTP_ACCEPTED and wait_on_accepted: # get job URL and discard '/api' job_url = response['job']['_links']['self']['href'][4:] response = self._wait_job_result(job_url) return response def get_records(self, action_url, query=None, enable_tunneling=True, max_page_length=DEFAULT_MAX_PAGE_LENGTH): """Retrieves ONTAP resources using pagination REST request. :param action_url: action URL for the request :param query: dict of arguments to be passed as query string :param enable_tunneling: enable tunneling to the ONTAP host :param max_page_length: size of the page during pagination :returns: dict containing records and num_records """ # Initialize query variable if it is None query = query if query else {} query['max_records'] = max_page_length _, response = self.connection.invoke_successfully( action_url, 'get', query=query, enable_tunneling=enable_tunneling) # NOTE(nahimsouza): if all records are returned in the first call, # 'next_url' will be None. next_url = response.get('_links', {}).get('next', {}).get('href') next_url = next_url[4:] if next_url else None # discard '/api' # Get remaining pages, saving data into first page while next_url: # NOTE(nahimsouza): clean the 'query', because the parameters are # already included in 'next_url'. _, next_response = self.connection.invoke_successfully( next_url, 'get', query=None, enable_tunneling=enable_tunneling) response['num_records'] += next_response.get('num_records', 0) response['records'].extend(next_response.get('records')) next_url = ( next_response.get('_links', {}).get('next', {}).get('href')) next_url = next_url[4:] if next_url else None # discard '/api' return response def get_ontap_version(self, cached=True): """Gets the ONTAP version as tuple.""" if cached: return self.connection.get_ontap_version() query = { 'fields': 'version' } response = self.send_request('/cluster/', 'get', query=query) version = (response['version']['generation'], response['version']['major'], response['version']['minor']) return version def check_api_permissions(self): """Check which APIs that support SSC functionality are available.""" inaccessible_apis = [] invalid_extra_specs = [] for api, extra_specs in SSC_API_MAP.items(): if not self.check_cluster_api(api): inaccessible_apis.append(api) invalid_extra_specs.extend(extra_specs) if inaccessible_apis: if '/storage/volumes' in inaccessible_apis: msg = _('User not permitted to query Data ONTAP volumes.') raise exception.VolumeBackendAPIException(data=msg) else: LOG.warning('The configured user account does not have ' 'sufficient privileges to use all needed ' 'APIs. The following extra specs will fail ' 'or be ignored: %s.', invalid_extra_specs) return invalid_extra_specs def check_cluster_api(self, api): """Checks the availability of a cluster API. Returns True if the specified cluster API exists and may be called by the current user. """ try: # No need to return any records here since we just want to know if # the user is allowed to make the request. A "Permission Denied" # error code is expected in case user does not have the necessary # permissions. self.send_request('%s?return_records=false' % api, 'get', enable_tunneling=False) except netapp_api.NaApiError as ex: # NOTE(nahimsouza): This function only returns false in case user # is not authorized. If other error is returned, it must be # handled in the function call that uses the same endpoint. if ex.code == netapp_api.REST_UNAUTHORIZED: return False return True def _get_cluster_nodes_info(self): """Return a list of models of the nodes in the cluster.""" query_args = {'fields': 'model,' 'name,' 'is_all_flash_optimized,' 'is_all_flash_select_optimized'} nodes = [] try: result = self.send_request('/cluster/nodes', 'get', query=query_args, enable_tunneling=False) for record in result['records']: node = { 'model': record['model'], 'name': record['name'], 'is_all_flash': record['is_all_flash_optimized'], 'is_all_flash_select': record['is_all_flash_select_optimized'] } nodes.append(node) except netapp_api.NaApiError as e: if e.code == netapp_api.REST_UNAUTHORIZED: LOG.debug('Cluster nodes can only be collected with ' 'cluster scoped credentials.') else: LOG.exception('Failed to get the cluster nodes.') return nodes def list_flexvols(self): """Returns the names of the flexvols on the controller.""" query = { 'type': 'rw', 'style': 'flex*', # Match both 'flexvol' and 'flexgroup' 'is_svm_root': 'false', 'error_state.is_inconsistent': 'false', 'state': 'online', 'fields': 'name' } response = self.send_request( '/storage/volumes/', 'get', query=query) records = response.get('records', []) volumes = [volume['name'] for volume in records] return volumes def _get_unique_volume(self, records): """Get the unique FlexVol or FlexGroup volume from a volume list.""" if len(records) != 1: msg = _('Could not find unique volume. Volumes found: %(vol)s.') msg_args = {'vol': records} raise exception.VolumeBackendAPIException(data=msg % msg_args) return records[0] def _get_volume_by_args(self, vol_name=None, vol_path=None, vserver=None, fields=None): """Get info from a single volume according to the args.""" query = { 'type': 'rw', 'style': 'flex*', # Match both 'flexvol' and 'flexgroup' 'is_svm_root': 'false', 'error_state.is_inconsistent': 'false', 'state': 'online', 'fields': 'name,style' } if vol_name: query['name'] = vol_name if vol_path: query['nas.path'] = vol_path if vserver: query['svm.name'] = vserver if fields: query['fields'] = fields volumes_response = self.send_request( '/storage/volumes/', 'get', query=query) records = volumes_response.get('records', []) volume = self._get_unique_volume(records) return volume def get_flexvol(self, flexvol_path=None, flexvol_name=None): """Get flexvol attributes needed for the storage service catalog.""" fields = ('aggregates.name,name,svm.name,nas.path,' 'type,guarantee.honored,guarantee.type,' 'space.snapshot.reserve_percent,space.size,' 'qos.policy.name,snapshot_policy,language,style') unique_volume = self._get_volume_by_args( vol_name=flexvol_name, vol_path=flexvol_path, fields=fields) aggregate = None if unique_volume['style'] == 'flexvol': # flexvol has only 1 aggregate aggregate = [unique_volume['aggregates'][0]['name']] else: aggregate = [aggr["name"] for aggr in unique_volume.get('aggregates', [])] qos_policy_group = ( unique_volume.get('qos', {}).get('policy', {}).get('name')) volume = { 'name': unique_volume['name'], 'vserver': unique_volume['svm']['name'], 'junction-path': unique_volume.get('nas', {}).get('path'), 'aggregate': aggregate, 'type': unique_volume['type'], 'space-guarantee-enabled': unique_volume['guarantee']['honored'], 'space-guarantee': unique_volume['guarantee']['type'], 'percentage-snapshot-reserve': str(unique_volume['space']['snapshot']['reserve_percent']), 'size': str(unique_volume['space']['size']), 'qos-policy-group': qos_policy_group, 'snapshot-policy': unique_volume['snapshot_policy']['name'], 'language': unique_volume['language'], 'style-extended': unique_volume['style'], } return volume def is_flexvol_mirrored(self, flexvol_name, vserver_name): """Check if flexvol is a SnapMirror source.""" query = { 'source.path': vserver_name + ':' + flexvol_name, 'state': 'snapmirrored', 'return_records': 'false', } try: response = self.send_request('/snapmirror/relationships/', 'get', query=query) return response['num_records'] > 0 except netapp_api.NaApiError: LOG.exception('Failed to get SnapMirror info for volume %s.', flexvol_name) return False def is_flexvol_encrypted(self, flexvol_name, vserver_name): """Check if a flexvol is encrypted.""" if not self.features.FLEXVOL_ENCRYPTION: return False query = { 'encryption.enabled': 'true', 'name': flexvol_name, 'svm.name': vserver_name, 'return_records': 'false', } try: response = self.send_request( '/storage/volumes/', 'get', query=query) return response['num_records'] > 0 except netapp_api.NaApiError: LOG.exception('Failed to get Encryption info for volume %s.', flexvol_name) return False def get_aggregate_disk_types(self, aggregate_name): """Get the disk type(s) of an aggregate.""" disk_types = self._get_aggregate_disk_types(aggregate_name) return list(disk_types) if disk_types else None def _get_aggregate_disk_types(self, aggregate_name): """Get the disk type(s) of an aggregate""" disk_types = set() query = { 'aggregates.name': aggregate_name, 'fields': 'effective_type' } try: response = self.send_request( '/storage/disks', 'get', query=query, enable_tunneling=False) except netapp_api.NaApiError: LOG.exception('Failed to get disk info for aggregate %s.', aggregate_name) return disk_types for storage_disk_info in response['records']: disk_types.add(storage_disk_info['effective_type']) return disk_types def _get_aggregates(self, aggregate_names=None, fields=None): query = {} if aggregate_names: query['name'] = ','.join(aggregate_names) if fields: query['fields'] = fields response = self.send_request( '/storage/aggregates', 'get', query=query, enable_tunneling=False) return response['records'] def get_aggregate(self, aggregate_name): """Get aggregate attributes needed for the storage service catalog.""" if not aggregate_name: return {} fields = ('name,block_storage.primary.raid_type,' 'block_storage.storage_type,home_node.name') try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], fields=fields) except netapp_api.NaApiError: LOG.exception('Failed to get info for aggregate %s.', aggregate_name) return {} if len(aggrs) < 1: return {} aggr_attributes = aggrs[0] aggregate = { 'name': aggr_attributes['name'], 'raid-type': aggr_attributes['block_storage']['primary']['raid_type'], 'is-hybrid': aggr_attributes['block_storage']['storage_type'] == 'hybrid', 'node-name': aggr_attributes['home_node']['name'], } return aggregate def is_qos_min_supported(self, is_nfs, node_name): """Check if the node supports QoS minimum.""" if node_name is None: # whether no access to node name (SVM account or error), the QoS # min support is dropped. return False qos_min_name = na_utils.qos_min_feature_name(is_nfs, node_name) return getattr(self.features, qos_min_name, False).__bool__() def get_flexvol_dedupe_info(self, flexvol_name): """Get dedupe attributes needed for the storage service catalog.""" query = { 'efficiency.volume_path': '/vol/%s' % flexvol_name, 'fields': 'efficiency.state,efficiency.compression' } # Set default values for the case there is no response. no_dedupe_response = { 'compression': False, 'dedupe': False, 'logical-data-size': 0, 'logical-data-limit': 1, } try: response = self.send_request('/storage/volumes', 'get', query=query) except netapp_api.NaApiError: LOG.exception('Failed to get dedupe info for volume %s.', flexvol_name) return no_dedupe_response if response["num_records"] != 1: return no_dedupe_response state = response["records"][0]["efficiency"]["state"] compression = response["records"][0]["efficiency"]["compression"] # TODO(nahimsouza): as soon as REST API supports the fields # 'logical-data-size and 'logical-data-limit', we should include # them in the query and set them correctly. # NOTE(nahimsouza): these fields are only used by the client function # `get_flexvol_dedupe_used_percent`, since the function is not # implemented on REST yet, the below hard-coded fields are not # affecting the driver in anyway. logical_data_size = 0 logical_data_limit = 1 dedupe_info = { 'compression': False if compression == "none" else True, 'dedupe': False if state == "disabled" else True, 'logical-data-size': logical_data_size, 'logical-data-limit': logical_data_limit, } return dedupe_info def get_lun_list(self): """Gets the list of LUNs on filer. Gets the LUNs from cluster with vserver. """ query = { 'svm.name': self.vserver, 'fields': 'svm.name,location.volume.name,space.size,' 'location.qtree.name,name,os_type,' 'space.scsi_thin_provisioning_support_enabled,' 'space.guarantee.requested,uuid' } response = self.send_request( '/storage/luns/', 'get', query=query) if response['num_records'] == '0': return [] lun_list = [] for lun in response['records']: lun_info = {} lun_info['Vserver'] = lun['svm']['name'] lun_info['Volume'] = lun['location']['volume']['name'] lun_info['Size'] = lun['space']['size'] lun_info['Qtree'] = \ lun['location'].get('qtree', {}).get('name', '') lun_info['Path'] = lun['name'] lun_info['OsType'] = lun['os_type'] lun_info['SpaceReserved'] = lun['space']['guarantee']['requested'] lun_info['SpaceAllocated'] = \ lun['space']['scsi_thin_provisioning_support_enabled'] lun_info['UUID'] = lun['uuid'] lun_list.append(lun_info) return lun_list def get_lun_by_args(self, **lun_info_args): """Retrieves LUN with specified args.""" query = { 'fields': 'svm.name,location.volume.name,space.size,' 'location.qtree.name,name,os_type,' 'space.scsi_thin_provisioning_support_enabled,' 'space.guarantee.requested,uuid' } if lun_info_args: if 'vserver' in lun_info_args: query['svm.name'] = lun_info_args['vserver'] if 'path' in lun_info_args: query['name'] = lun_info_args['path'] if 'uuid' in lun_info_args: query['uuid'] = lun_info_args['uuid'] response = self.send_request( '/storage/luns/', 'get', query=query) if response['num_records'] == '0': return [] lun_list = [] for lun in response['records']: lun_info = {} lun_info['Vserver'] = lun['svm']['name'] lun_info['Volume'] = lun['location']['volume']['name'] lun_info['Size'] = lun['space']['size'] lun_info['Qtree'] = \ lun['location'].get('qtree', {}).get('name', '') lun_info['Path'] = lun['name'] lun_info['OsType'] = lun['os_type'] lun_info['SpaceReserved'] = lun['space']['guarantee']['requested'] lun_info['SpaceAllocated'] = \ lun['space']['scsi_thin_provisioning_support_enabled'] lun_info['UUID'] = lun['uuid'] # NOTE(nahimsouza): Currently, ONTAP REST API does not have the # 'block-size' in the response. By default, we are setting its # value to 512, since traditional block size advertised by hard # disks is 512 bytes. lun_info['BlockSize'] = 512 lun_list.append(lun_info) return lun_list def get_lun_sizes_by_volume(self, volume_name): """"Gets the list of LUNs and their sizes from a given volume name""" query = { 'location.volume.name': volume_name, 'svm.name': self.vserver, 'fields': 'space.size,name' } response = self.send_request('/storage/luns/', 'get', query=query) if response['num_records'] == '0': return [] luns = [] for lun_info in response['records']: luns.append({ 'path': lun_info.get('name', ''), 'size': float(lun_info.get('space', {}).get('size', 0)) }) return luns def get_file_sizes_by_dir(self, dir_path): """Gets the list of files and their sizes from a given directory.""" # 'dir_path' will always be a FlexVol name volume = self._get_volume_by_args(vol_name=dir_path) query = { 'type': 'file', 'fields': 'size,name' } vol_uuid = volume['uuid'] try: response = self.send_request( f'/storage/volumes/{vol_uuid}/files', 'get', query=query) except netapp_api.NaApiError as e: if e.code == netapp_api.REST_NO_SUCH_FILE: return [] else: raise e files = [] for file_info in response['records']: files.append({ 'name': file_info.get('name', ''), 'file-size': float(file_info.get('size', 0)) }) return files def get_volume_state(self, junction_path=None, name=None): """Returns volume state for a given name or junction path.""" query_args = {} if name: query_args['name'] = name if junction_path: query_args['nas.path'] = junction_path query_args['fields'] = 'state' response = self.send_request('/storage/volumes/', 'get', query=query_args) try: records = response.get('records', []) unique_volume = self._get_unique_volume(records) except exception.VolumeBackendAPIException: return None return unique_volume['state'] def delete_snapshot(self, volume_name, snapshot_name): """Deletes a volume snapshot.""" volume = self._get_volume_by_args(vol_name=volume_name) self.send_request( f'/storage/volumes/{volume["uuid"]}/snapshots' f'?name={snapshot_name}', 'delete') def get_operational_lif_addresses(self): """Gets the IP addresses of operational LIFs on the vserver.""" query = { 'state': 'up', 'fields': 'ip.address', } response = self.send_request( '/network/ip/interfaces/', 'get', query=query) return [lif_info['ip']['address'] for lif_info in response['records']] def _list_vservers(self): """Get the names of vservers present""" query = { 'fields': 'name', } response = self.send_request('/svm/svms', 'get', query=query, enable_tunneling=False) return [svm['name'] for svm in response.get('records', [])] def _get_ems_log_destination_vserver(self): """Returns the best vserver destination for EMS messages.""" # NOTE(nahimsouza): Differently from ZAPI, only 'data' SVMs can be # managed by the SVM REST APIs - that's why the vserver type is not # specified. vservers = self._list_vservers() if vservers: return vservers[0] raise exception.NotFound("No Vserver found to receive EMS messages.") def send_ems_log_message(self, message_dict): """Sends a message to the Data ONTAP EMS log.""" body = { 'computer_name': message_dict['computer-name'], 'event_source': message_dict['event-source'], 'app_version': message_dict['app-version'], 'category': message_dict['category'], 'severity': 'notice', 'autosupport_required': message_dict['auto-support'] == 'true', 'event_id': message_dict['event-id'], 'event_description': message_dict['event-description'], } bkp_connection = copy.copy(self.connection) bkp_timeout = self.connection.get_timeout() bkp_vserver = self.vserver self.connection.set_timeout(25) try: # TODO(nahimsouza): Vserver is being set to replicate the ZAPI # behavior, but need to check if this could be removed in REST API self.connection.set_vserver( self._get_ems_log_destination_vserver()) self.send_request('/support/ems/application-logs', 'post', body=body) LOG.debug('EMS executed successfully.') except netapp_api.NaApiError as e: LOG.warning('Failed to invoke EMS. %s', e) finally: # Restores the data timeout = ( bkp_timeout if bkp_timeout is not None else DEFAULT_TIMEOUT) self.connection.set_timeout(timeout) self.connection = copy.copy(bkp_connection) self.connection.set_vserver(bkp_vserver) def get_performance_counter_info(self, object_name, counter_name): """Gets info about one or more Data ONTAP performance counters.""" # NOTE(nahimsouza): This conversion is nedeed because different names # are used in ZAPI and we want to avoid changes in the driver for now. rest_counter_names = { 'domain_busy': 'domain_busy_percent', 'processor_elapsed_time': 'elapsed_time', 'avg_processor_busy': 'average_processor_busy_percent', } rest_counter_name = counter_name if counter_name in rest_counter_names: rest_counter_name = rest_counter_names[counter_name] # Get counter table info query = { 'counter_schemas.name': rest_counter_name, 'fields': 'counter_schemas.*' } try: table = self.send_request( f'/cluster/counter/tables/{object_name}', 'get', query=query, enable_tunneling=False) name = counter_name # use the original name (ZAPI compatible) base_counter = table['counter_schemas'][0]['denominator']['name'] query = { 'counters.name': rest_counter_name, 'fields': 'counters.*' } response = self.send_request( f'/cluster/counter/tables/{object_name}/rows', 'get', query=query, enable_tunneling=False) table_rows = response.get('records', []) labels = [] if len(table_rows) != 0: labels = table_rows[0]['counters'][0].get('labels', []) # NOTE(nahimsouza): Values have a different format on REST API # and we want to keep compatibility with ZAPI for a while if object_name == 'wafl' and counter_name == 'cp_phase_times': # discard the prefix 'cp_' labels = [label[3:] for label in labels] return { 'name': name, 'labels': labels, 'base-counter': base_counter, } except netapp_api.NaApiError: raise exception.NotFound(_('Counter %s not found') % counter_name) def get_performance_instance_uuids(self, object_name, node_name): """Get UUIDs of performance instances for a cluster node.""" query = { 'id': node_name + ':*', } response = self.send_request( f'/cluster/counter/tables/{object_name}/rows', 'get', query=query, enable_tunneling=False) records = response.get('records', []) uuids = [] for record in records: uuids.append(record['id']) return uuids def get_performance_counters(self, object_name, instance_uuids, counter_names): """Gets more cDOT performance counters.""" # NOTE(nahimsouza): This conversion is nedeed because different names # are used in ZAPI and we want to avoid changes in the driver for now. rest_counter_names = { 'domain_busy': 'domain_busy_percent', 'processor_elapsed_time': 'elapsed_time', 'avg_processor_busy': 'average_processor_busy_percent', } zapi_counter_names = { 'domain_busy_percent': 'domain_busy', 'elapsed_time': 'processor_elapsed_time', 'average_processor_busy_percent': 'avg_processor_busy', } for i in range(len(counter_names)): if counter_names[i] in rest_counter_names: counter_names[i] = rest_counter_names[counter_names[i]] query = { 'id': '|'.join(instance_uuids), 'counters.name': '|'.join(counter_names), 'fields': 'id,counter_table.name,counters.*', } response = self.send_request( f'/cluster/counter/tables/{object_name}/rows', 'get', query=query, enable_tunneling=False) counter_data = [] for record in response.get('records', []): for counter in record['counters']: counter_name = counter['name'] # Reverts the name conversion if counter_name in zapi_counter_names: counter_name = zapi_counter_names[counter_name] counter_value = '' if counter.get('value'): counter_value = counter.get('value') elif counter.get('values'): # NOTE(nahimsouza): Conversion made to keep compatibility # with old ZAPI format values = counter.get('values') counter_value = ','.join([str(v) for v in values]) counter_data.append({ 'instance-name': record['counter_table']['name'], 'instance-uuid': record['id'], 'node-name': record['id'].split(':')[0], 'timestamp': int(time()), counter_name: counter_value, }) return counter_data def get_aggregate_capacities(self, aggregate_names): """Gets capacity info for multiple aggregates.""" if not isinstance(aggregate_names, list): return {} aggregates = {} for aggregate_name in aggregate_names: aggregates[aggregate_name] = self._get_aggregate_capacity( aggregate_name) return aggregates def _get_aggregate_capacity(self, aggregate_name): """Gets capacity info for an aggregate.""" fields = ('space.block_storage.available,space.block_storage.size,' 'space.block_storage.used') try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], fields=fields) result = {} if len(aggrs) > 0: aggr = aggrs[0] available = float(aggr['space']['block_storage']['available']) total = float(aggr['space']['block_storage']['size']) used = float(aggr['space']['block_storage']['used']) percent_used = int((used * 100) // total) result = { 'percent-used': percent_used, 'size-available': available, 'size-total': total, } return result except netapp_api.NaApiError as e: if (e.code == netapp_api.REST_API_NOT_FOUND or e.code == netapp_api.REST_UNAUTHORIZED): LOG.debug('Aggregate capacity can only be collected with ' 'cluster scoped credentials.') else: LOG.exception('Failed to get info for aggregate %s.', aggregate_name) return {} def get_node_for_aggregate(self, aggregate_name): """Get home node for the specified aggregate. This API could return None, most notably if it was sent to a Vserver LIF, so the caller must be able to handle that case. """ if not aggregate_name: return None fields = 'home_node.name' try: aggrs = self._get_aggregates(aggregate_names=[aggregate_name], fields=fields) node = None if len(aggrs) > 0: aggr = aggrs[0] node = aggr['home_node']['name'] return node except netapp_api.NaApiError as e: if e.code == netapp_api.REST_API_NOT_FOUND: return None else: raise e def provision_qos_policy_group(self, qos_policy_group_info, qos_min_support): """Create QoS policy group on the backend if appropriate.""" if qos_policy_group_info is None: return # Legacy QoS uses externally provisioned QoS policy group, # so we don't need to create one on the backend. legacy = qos_policy_group_info.get('legacy') if legacy: return spec = qos_policy_group_info.get('spec') if not spec: return is_adaptive = na_utils.is_qos_policy_group_spec_adaptive( qos_policy_group_info) self._validate_qos_policy_group(is_adaptive, spec=spec, qos_min_support=qos_min_support) qos_policy_group = self._get_qos_first_policy_group_by_name( spec['policy_name']) if not qos_policy_group: self._create_qos_policy_group(spec, is_adaptive) else: self._modify_qos_policy_group(spec, is_adaptive, qos_policy_group) def _get_qos_first_policy_group_by_name(self, qos_policy_group_name): records = self._get_qos_policy_group_by_name(qos_policy_group_name) if len(records) == 0: return None return records[0] def _get_qos_policy_group_by_name(self, qos_policy_group_name): query = {'name': qos_policy_group_name} response = self.send_request('/storage/qos/policies/', 'get', query=query) records = response.get('records') if not records: return [] return records def _qos_spec_to_api_args(self, spec, is_adaptive, vserver=None): """Convert a QoS spec to REST args.""" rest_args = {} if is_adaptive: rest_args['adaptive'] = {} if spec.get('absolute_min_iops'): rest_args['adaptive']['absolute_min_iops'] = ( self._sanitize_qos_spec_value( spec.get('absolute_min_iops'))) if spec.get('expected_iops'): rest_args['adaptive']['expected_iops'] = ( self._sanitize_qos_spec_value(spec.get('expected_iops'))) if spec.get('expected_iops_allocation'): rest_args['adaptive']['expected_iops_allocation'] = ( spec.get('expected_iops_allocation')) if spec.get('peak_iops'): rest_args['adaptive']['peak_iops'] = ( self._sanitize_qos_spec_value(spec.get('peak_iops'))) if spec.get('peak_iops_allocation'): rest_args['adaptive']['peak_iops_allocation'] = ( spec.get('peak_iops_allocation')) if spec.get('block_size'): rest_args['adaptive']['block_size'] = ( spec.get('block_size')) else: rest_args['fixed'] = {} qos_max = spec.get('max_throughput') if qos_max and 'iops' in qos_max: rest_args['fixed']['max_throughput_iops'] = ( self._sanitize_qos_spec_value(qos_max)) elif qos_max: # Convert from B/s to MB/s value = math.ceil( self._sanitize_qos_spec_value(qos_max) / (10**6)) rest_args['fixed']['max_throughput_mbps'] = value qos_min = spec.get('min_throughput') if qos_min and 'iops' in qos_min: rest_args['fixed']['min_throughput_iops'] = ( self._sanitize_qos_spec_value(qos_min)) if spec.get('policy_name'): rest_args['name'] = spec.get('policy_name') if spec.get('return_record'): rest_args['return_records'] = spec.get('return_record') if vserver: rest_args['svm'] = {} rest_args['svm']['name'] = vserver return rest_args def _sanitize_qos_spec_value(self, value): value = value.lower() value = value.replace('iops', '').replace('b/s', '') value = int(value) return value def _create_qos_policy_group(self, spec, is_adaptive): """Creates a QoS policy group.""" body = self._qos_spec_to_api_args( spec, is_adaptive, vserver=self.vserver) self.send_request('/storage/qos/policies/', 'post', body=body, enable_tunneling=False) def _modify_qos_policy_group(self, spec, is_adaptive, qos_policy_group): """Modifies a QoS policy group.""" body = self._qos_spec_to_api_args(spec, is_adaptive) if qos_policy_group['name'] == body['name']: body.pop('name') self.send_request( f'/storage/qos/policies/{qos_policy_group["uuid"]}', 'patch', body=body, enable_tunneling=False) def get_vol_by_junc_vserver(self, vserver, junction): """Gets the volume by junction path and vserver.""" volume = self._get_volume_by_args(vol_path=junction, vserver=vserver) return volume['name'] def file_assign_qos(self, flex_vol, qos_policy_group_name, qos_policy_group_is_adaptive, file_path): """Assigns the named QoS policy-group to a file.""" volume = self._get_volume_by_args(flex_vol) body = { 'qos_policy.name': qos_policy_group_name } self.send_request( f'/storage/volumes/{volume["uuid"]}/files/{file_path}', 'patch', body=body, enable_tunneling=False) def mark_qos_policy_group_for_deletion(self, qos_policy_group_info, is_adaptive=False): """Soft delete a QoS policy group backing a cinder volume.""" if qos_policy_group_info is None: return spec = qos_policy_group_info.get('spec') # For cDOT we want to delete the QoS policy group that we created for # this cinder volume. Because the QoS policy may still be "in use" # after the zapi call to delete the volume itself returns successfully, # we instead rename the QoS policy group using a specific pattern and # later attempt on a best effort basis to delete any QoS policy groups # matching that pattern. if spec: current_name = spec['policy_name'] new_name = DELETED_PREFIX + current_name try: self._rename_qos_policy_group(current_name, new_name) except netapp_api.NaApiError as ex: LOG.warning('Rename failure in cleanup of cDOT QoS policy ' 'group %(current_name)s: %(ex)s', {'current_name': current_name, 'ex': ex}) # Attempt to delete any QoS policies named "delete-openstack-*". self.remove_unused_qos_policy_groups() def delete_file(self, path_to_file): """Delete file at path.""" LOG.debug('Deleting file: %s', path_to_file) volume_name = path_to_file.split('/')[2] relative_path = '/'.join(path_to_file.split('/')[3:]) volume = self._get_volume_by_args(volume_name) # Path requires "%2E" to represent "." and "%2F" to represent "/". relative_path = relative_path.replace('.', '%2E').replace('/', '%2F') self.send_request(f'/storage/volumes/{volume["uuid"]}' + f'/files/{relative_path}', 'delete') def _rename_qos_policy_group(self, qos_policy_group_name, new_name): """Renames a QoS policy group.""" body = {'name': new_name} query = {'name': qos_policy_group_name} self.send_request('/storage/qos/policies/', 'patch', body=body, query=query, enable_tunneling=False) def remove_unused_qos_policy_groups(self): """Deletes all QoS policy groups that are marked for deletion.""" query = {'name': f'{DELETED_PREFIX}*'} self.send_request('/storage/qos/policies', 'delete', query=query) def create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None, qos_policy_group_is_adaptive=False): """Issues API request for creating LUN on volume.""" self._validate_qos_policy_group(qos_policy_group_is_adaptive) path = f'/vol/{volume_name}/{lun_name}' space_reservation = metadata['SpaceReserved'] space_allocation = metadata['SpaceAllocated'] initial_size = size body = { 'name': path, 'space.size': str(initial_size), 'os_type': metadata['OsType'], 'space.guarantee.requested': space_reservation, 'space.scsi_thin_provisioning_support_enabled': space_allocation } if qos_policy_group_name: body['qos_policy.name'] = qos_policy_group_name try: self.send_request('/storage/luns', 'post', body=body) except netapp_api.NaApiError as ex: with excutils.save_and_reraise_exception(): LOG.error('Error provisioning volume %(lun_name)s on ' '%(volume_name)s. Details: %(ex)s', { 'lun_name': lun_name, 'volume_name': volume_name, 'ex': ex, }) def do_direct_resize(self, path, new_size_bytes, force=True): """Resize the LUN.""" seg = path.split("/") LOG.info('Resizing LUN %s directly to new size.', seg[-1]) body = {'name': path, 'space.size': new_size_bytes} self._lun_update_by_path(path, body) def _get_lun_by_path(self, path, fields=None): query = {'name': path} if fields: query['fields'] = fields response = self.send_request('/storage/luns', 'get', query=query) records = response.get('records', []) return records def _get_first_lun_by_path(self, path, fields=None): records = self._get_lun_by_path(path, fields=fields) if len(records) == 0: return None return records[0] def _lun_update_by_path(self, path, body): """Update the LUN.""" lun = self._get_first_lun_by_path(path) if not lun: raise netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) self.send_request(f'/storage/luns/{lun["uuid"]}', 'patch', body=body) def _validate_qos_policy_group(self, is_adaptive, spec=None, qos_min_support=False): if is_adaptive and not self.features.ADAPTIVE_QOS: msg = _("Adaptive QoS feature requires ONTAP 9.4 or later.") raise na_utils.NetAppDriverException(msg) if not spec: return if 'min_throughput' in spec and not qos_min_support: msg = 'min_throughput is not supported by this back end.' raise na_utils.NetAppDriverException(msg) def get_if_info_by_ip(self, ip): """Gets the network interface info by ip.""" query_args = {} query_args['ip.address'] = volume_utils.resolve_hostname(ip) query_args['fields'] = 'svm' result = self.send_request('/network/ip/interfaces/', 'get', query=query_args, enable_tunneling=False) num_records = result['num_records'] records = result.get('records', []) if num_records == 0: raise exception.NotFound( _('No interface found on cluster for ip %s') % ip) return [{'vserver': item['svm']['name']} for item in records] def get_igroup_by_initiators(self, initiator_list): """Get igroups exactly matching a set of initiators.""" igroup_list = [] if not initiator_list: return igroup_list query = { 'svm.name': self.vserver, 'initiators.name': ','.join(initiator_list), 'fields': 'name,protocol,os_type' } response = self.send_request('/protocols/san/igroups', 'get', query=query) records = response.get('records', []) for igroup_item in records: igroup = {'initiator-group-os-type': igroup_item['os_type'], 'initiator-group-type': igroup_item['protocol'], 'initiator-group-name': igroup_item['name']} igroup_list.append(igroup) return igroup_list def add_igroup_initiator(self, igroup, initiator): """Adds initiators to the specified igroup.""" query_initiator_uuid = { 'name': igroup, 'fields': 'uuid' } response_initiator_uuid = self.send_request( '/protocols/san/igroups/', 'get', query=query_initiator_uuid) response = response_initiator_uuid.get('records', []) if len(response) < 1: msg = _('Could not find igroup initiator.') raise exception.VolumeBackendAPIException(data=msg) igroup_uuid = response[0]['uuid'] body = { 'name': initiator } self.send_request('/protocols/san/igroups/' + igroup_uuid + '/initiators', 'post', body=body) def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'): """Creates igroup with specified args.""" body = { 'name': igroup, 'protocol': igroup_type, 'os_type': os_type, } self.send_request('/protocols/san/igroups', 'post', body=body) def map_lun(self, path, igroup_name, lun_id=None): """Maps LUN to the initiator and returns LUN id assigned.""" body_post = { 'lun.name': path, 'igroup.name': igroup_name, } if lun_id is not None: body_post['logical_unit_number'] = lun_id try: result = self.send_request('/protocols/san/lun-maps', 'post', body=body_post, query={'return_records': 'true'}) records = result.get('records') lun_id_assigned = records[0].get('logical_unit_number') return lun_id_assigned except netapp_api.NaApiError as e: code = e.code message = e.message LOG.warning('Error mapping LUN. Code :%(code)s, Message: ' '%(message)s', {'code': code, 'message': message}) raise def get_lun_map(self, path): """Gets the LUN map by LUN path.""" map_list = [] query = { 'lun.name': path, 'fields': 'igroup.name,logical_unit_number,svm.name', } response = self.send_request('/protocols/san/lun-maps', 'get', query=query) num_records = response.get('num_records') records = response.get('records', None) if records is None or num_records is None: return map_list for element in records: map_lun = {} map_lun['initiator-group'] = element['igroup']['name'] map_lun['lun-id'] = element['logical_unit_number'] map_lun['vserver'] = element['svm']['name'] map_list.append(map_lun) return map_list def get_fc_target_wwpns(self): """Gets the FC target details.""" wwpns = [] query = { 'fields': 'wwpn' } response = self.send_request('/network/fc/interfaces', 'get', query=query) records = response.get('records') for record in records: wwpn = record.get('wwpn').lower() wwpns.append(wwpn) return wwpns def unmap_lun(self, path, igroup_name): """Unmaps a LUN from given initiator.""" # get lun amd igroup uuids query_uuid = { 'igroup.name': igroup_name, 'lun.name': path, 'fields': 'lun.uuid,igroup.uuid' } response_uuid = self.send_request( '/protocols/san/lun-maps', 'get', query=query_uuid) if response_uuid['num_records'] > 0: lun_uuid = response_uuid['records'][0]['lun']['uuid'] igroup_uuid = response_uuid['records'][0]['igroup']['uuid'] try: self.send_request( f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}', 'delete') except netapp_api.NaApiError as e: LOG.warning("Error unmapping LUN. Code: %(code)s, Message: " "%(message)s", {'code': e.code, 'message': e.message}) # if the LUN is already unmapped if e.code == netapp_api.REST_NO_SUCH_LUN_MAP: pass else: raise e else: # Input is invalid or LUN may already be unmapped LOG.warning("Error unmapping LUN. Invalid input.") def has_luns_mapped_to_initiators(self, initiator_list): """Checks whether any LUNs are mapped to the given initiator(s).""" query = { 'initiators.name': ','.join(initiator_list), 'fields': 'lun_maps' } response = self.send_request('/protocols/san/igroups', 'get', query=query) records = response.get('records', []) if len(records) > 0: for record in records: lun_maps = record.get('lun_maps', []) if len(lun_maps) > 0: return True return False def get_iscsi_service_details(self): """Returns iscsi iqn.""" query = { 'fields': 'target.name' } response = self.send_request( '/protocols/san/iscsi/services', 'get', query=query) records = response.get('records') if records: return records[0]['target']['name'] LOG.debug('No iSCSI service found for vserver %s', self.vserver) return None def check_iscsi_initiator_exists(self, iqn): """Returns True if initiator exists.""" endpoint_url = '/protocols/san/iscsi/credentials' initiator_exists = True try: query = { 'initiator': iqn, } response = self.send_request(endpoint_url, 'get', query=query) records = response.get('records') if not records: initiator_exists = False except netapp_api.NaApiError: initiator_exists = False return initiator_exists def set_iscsi_chap_authentication(self, iqn, username, password): """Provides NetApp host's CHAP credentials to the backend.""" initiator_exists = self.check_iscsi_initiator_exists(iqn) command_template = ('iscsi security %(mode)s -vserver %(vserver)s ' '-initiator-name %(iqn)s -auth-type CHAP ' '-user-name %(username)s') if initiator_exists: LOG.debug('Updating CHAP authentication for %(iqn)s.', {'iqn': iqn}) command = command_template % { 'mode': 'modify', 'vserver': self.vserver, 'iqn': iqn, 'username': username, } else: LOG.debug('Adding initiator %(iqn)s with CHAP authentication.', {'iqn': iqn}) command = command_template % { 'mode': 'create', 'vserver': self.vserver, 'iqn': iqn, 'username': username, } try: with self.ssh_client.ssh_connect_semaphore: ssh_pool = self.ssh_client.ssh_pool with ssh_pool.item() as ssh: self.ssh_client.execute_command_with_prompt(ssh, command, 'Password:', password) except Exception as e: msg = _('Failed to set CHAP authentication for target IQN %(iqn)s.' ' Details: %(ex)s') % { 'iqn': iqn, 'ex': e, } LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def get_iscsi_target_details(self): """Gets the iSCSI target portal details.""" query = { 'services': 'data_iscsi', 'fields': 'ip.address,enabled' } response = self.send_request('/network/ip/interfaces', 'get', query=query) target_list = [] records = response.get('records', []) for record in records: details = dict() details['address'] = record['ip']['address'] details['tpgroup-tag'] = None details['interface-enabled'] = record['enabled'] # NOTE(nahimsouza): from ONTAP documentation: # ONTAP does not support changing the port number for iSCSI. # Port number 3260 is registered as part of the iSCSI specification # and cannot be used by any other application or service. details['port'] = 3260 target_list.append(details) return target_list def move_lun(self, path, new_path): """Moves the LUN at path to new path.""" seg = path.split("/") new_seg = new_path.split("/") LOG.debug("Moving LUN %(name)s to %(new_name)s.", {'name': seg[-1], 'new_name': new_seg[-1]}) query = { 'svm.name': self.vserver, 'name': path } body = { 'name': new_path, } self.send_request('/storage/luns/', 'patch', query=query, body=body) def clone_file(self, flex_vol, src_path, dest_path, vserver, dest_exists=False, source_snapshot=None, is_snapshot=False): """Clones file on vserver.""" LOG.debug('Cloning file - volume %(flex_vol)s, src %(src_path)s, ' 'dest %(dest_path)s, vserver %(vserver)s,' 'source_snapshot %(source_snapshot)s', { 'flex_vol': flex_vol, 'src_path': src_path, 'dest_path': dest_path, 'vserver': vserver, 'source_snapshot': source_snapshot, }) volume = self._get_volume_by_args(flex_vol) body = { 'volume': { 'uuid': volume['uuid'], 'name': volume['name'] }, 'source_path': src_path, 'destination_path': dest_path, } if is_snapshot and self.features.BACKUP_CLONE_PARAM: body['is_backup'] = True if dest_exists: body['overwrite_destination'] = True self.send_request('/storage/file/clone', 'post', body=body) def clone_lun(self, volume, name, new_name, space_reserved='true', qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None, is_snapshot=False, qos_policy_group_is_adaptive=False): """Clones lun on vserver.""" LOG.debug('Cloning lun - volume: %(volume)s, name: %(name)s, ' 'new_name: %(new_name)s, space_reserved: %(space_reserved)s,' ' qos_policy_group_name: %(qos_policy_group_name)s', { 'volume': volume, 'name': name, 'new_name': new_name, 'space_reserved': space_reserved, 'qos_policy_group_name': qos_policy_group_name, }) # NOTE(nahimsouza): some parameters are not available on REST API, # but they are in the header just to keep compatilbility with ZAPI: # src_block, dest_block, block_count, is_snapshot self._validate_qos_policy_group(qos_policy_group_is_adaptive) source_path = f'/vol/{volume}' if source_snapshot: source_path += f'/.snapshot/{source_snapshot}' source_path += f'/{name}' body = { 'svm': { 'name': self.vserver }, 'name': f'/vol/{volume}/{new_name}', 'clone': { 'source': { 'name': source_path, } }, 'space': { 'guarantee': { 'requested': space_reserved == 'true', } } } if qos_policy_group_name: body['qos_policy'] = {'name': qos_policy_group_name} self.send_request('/storage/luns', 'post', body=body) def destroy_lun(self, path, force=True): """Destroys the LUN at the path.""" query = {} query['name'] = path query['svm'] = self.vserver if force: query['allow_delete_while_mapped'] = 'true' self.send_request('/storage/luns/', 'delete', query=query) def get_flexvol_capacity(self, flexvol_path=None, flexvol_name=None): """Gets total capacity and free capacity, in bytes, of the flexvol.""" fields = 'name,space.available,space.afs_total' try: volume = self._get_volume_by_args( vol_name=flexvol_name, vol_path=flexvol_path, fields=fields) capacity = { 'size-total': float(volume['space']['afs_total']), 'size-available': float(volume['space']['available']), } return capacity except exception.VolumeBackendAPIException: msg = _('Volume %s not found.') msg_args = flexvol_path or flexvol_name raise na_utils.NetAppDriverException(msg % msg_args) def get_provisioning_options_from_flexvol(self, flexvol_name): """Get a dict of provisioning options matching existing flexvol.""" flexvol_info = self.get_flexvol(flexvol_name=flexvol_name) dedupe_info = self.get_flexvol_dedupe_info(flexvol_name) provisioning_opts = { 'aggregate': flexvol_info['aggregate'], # space-guarantee can be 'none', 'file', 'volume' 'space_guarantee_type': flexvol_info.get('space-guarantee'), 'snapshot_policy': flexvol_info['snapshot-policy'], 'language': flexvol_info['language'], 'dedupe_enabled': dedupe_info['dedupe'], 'compression_enabled': dedupe_info['compression'], 'snapshot_reserve': flexvol_info['percentage-snapshot-reserve'], 'volume_type': flexvol_info['type'], 'size': int(math.ceil(float(flexvol_info['size']) / units.Gi)), 'is_flexgroup': flexvol_info['style-extended'] == 'flexgroup', } return provisioning_opts def flexvol_exists(self, volume_name): """Checks if a flexvol exists on the storage array.""" LOG.debug('Checking if volume %s exists', volume_name) query = { 'name': volume_name, 'return_records': 'false' } response = self.send_request('/storage/volumes/', 'get', query=query) return response['num_records'] > 0 def create_volume_async(self, name, aggregate_list, size_gb, space_guarantee_type=None, snapshot_policy=None, language=None, dedupe_enabled=False, compression_enabled=False, snapshot_reserve=None, volume_type='rw'): """Creates a volume asynchronously.""" body = { 'name': name, 'size': size_gb * units.Gi, 'type': volume_type, } if isinstance(aggregate_list, list): body['style'] = 'flexgroup' body['aggregates'] = [{'name': aggr} for aggr in aggregate_list] else: body['style'] = 'flexvol' body['aggregates'] = [{'name': aggregate_list}] if volume_type == 'dp': snapshot_policy = None else: body['nas'] = {'path': '/%s' % name} if snapshot_policy is not None: body['snapshot_policy'] = {'name': snapshot_policy} if space_guarantee_type: body['guarantee'] = {'type': space_guarantee_type} if language is not None: body['language'] = language if snapshot_reserve is not None: body['space'] = { 'snapshot': { 'reserve_percent': str(snapshot_reserve) } } # cDOT compression requires that deduplication be enabled. if dedupe_enabled or compression_enabled: body['efficiency'] = {'dedupe': 'background'} if compression_enabled: body['efficiency']['compression'] = 'background' response = self.send_request('/storage/volumes/', 'post', body=body, wait_on_accepted=False) job_info = { 'status': None, 'jobid': response["job"]["uuid"], 'error-code': None, 'error-message': None, } return job_info def create_flexvol(self, flexvol_name, aggregate_name, size_gb, space_guarantee_type=None, snapshot_policy=None, language=None, dedupe_enabled=False, compression_enabled=False, snapshot_reserve=None, volume_type='rw'): """Creates a flexvol asynchronously and return the job info.""" return self.create_volume_async( flexvol_name, aggregate_name, size_gb, space_guarantee_type=space_guarantee_type, snapshot_policy=snapshot_policy, language=language, dedupe_enabled=dedupe_enabled, compression_enabled=compression_enabled, snapshot_reserve=snapshot_reserve, volume_type=volume_type) def enable_volume_dedupe_async(self, volume_name): """Enable deduplication on FlexVol/FlexGroup volume asynchronously.""" query = { 'name': volume_name, 'fields': 'uuid,style', } body = { 'efficiency': {'dedupe': 'background'} } self.send_request('/storage/volumes/', 'patch', body=body, query=query, wait_on_accepted=False) def enable_volume_compression_async(self, volume_name): """Enable compression on FlexVol/FlexGroup volume asynchronously.""" query = { 'name': volume_name } body = { 'efficiency': {'compression': 'background'} } self.send_request('/storage/volumes/', 'patch', body=body, query=query, wait_on_accepted=False) def _parse_lagtime(self, time_str): """Parse lagtime string (ISO 8601) into a number of seconds.""" fmt_str = 'PT' if 'H' in time_str: fmt_str += '%HH' if 'M' in time_str: fmt_str += '%MM' if 'S' in time_str: fmt_str += '%SS' t = None try: t = datetime.strptime(time_str, fmt_str) except Exception: LOG.debug("Failed to parse lagtime: %s", time_str) raise # convert to timedelta to get the total seconds td = timedelta(hours=t.hour, minutes=t.minute, seconds=t.second) return td.total_seconds() def _get_snapmirrors(self, source_vserver=None, source_volume=None, destination_vserver=None, destination_volume=None): fields = ['state', 'source.svm.name', 'source.path', 'destination.svm.name', 'destination.path', 'transfer.state', 'transfer.end_time', 'lag_time', 'healthy', 'uuid'] query = {} query['fields'] = '{}'.format(','.join(f for f in fields)) query_src_vol = source_volume if source_volume else '*' query_src_vserver = source_vserver if source_vserver else '*' query['source.path'] = query_src_vserver + ':' + query_src_vol query_dst_vol = destination_volume if destination_volume else '*' query_dst_vserver = destination_vserver if destination_vserver else '*' query['destination.path'] = query_dst_vserver + ':' + query_dst_vol response = self.send_request( '/snapmirror/relationships', 'get', query=query) snapmirrors = [] for record in response.get('records', []): snapmirrors.append({ 'relationship-status': ( 'idle' if record.get('state') == 'snapmirrored' else record.get('state')), 'transferring-state': record.get('transfer', {}).get('state'), 'mirror-state': record['state'], 'source-vserver': record['source']['svm']['name'], 'source-volume': (record['source']['path'].split(':')[1] if record.get('source') else None), 'destination-vserver': record['destination']['svm']['name'], 'destination-volume': ( record['destination']['path'].split(':')[1] if record.get('destination') else None), 'last-transfer-end-timestamp': (record['transfer']['end_time'] if record.get('transfer', {}).get('end_time') else None), 'lag-time': (self._parse_lagtime(record['lag_time']) if record.get('lag_time') else None), 'is-healthy': record['healthy'], 'uuid': record['uuid'] }) return snapmirrors def get_snapmirrors(self, source_vserver, source_volume, destination_vserver, destination_volume, desired_attributes=None): """Gets one or more SnapMirror relationships. Either the source or destination info may be omitted. Desired attributes exists only to keep consistent with ZAPI client signature and has no effect in the output. """ snapmirrors = self._get_snapmirrors( source_vserver=source_vserver, source_volume=source_volume, destination_vserver=destination_vserver, destination_volume=destination_volume) return snapmirrors def create_ontap_consistency_group(self, source_vserver, source_volume, source_cg): """Creates a ontap consistency group""" body = { 'svm': { 'name': source_vserver }, 'name': source_cg, 'volumes': [{ 'name': source_volume, "provisioning_options": {"action": "add"} }] } try: self.send_request('/application/consistency-groups/', 'post', body=body) except netapp_api.NaApiError as e: if e.code != netapp_api.REST_ERELATION_EXISTS: raise e def create_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, source_cg=None, destination_cg=None, schedule=None, policy=None, relationship_type='data_protection'): """Creates a SnapMirror relationship. The schedule and relationship type is kept to avoid breaking the API used by data_motion, but are not used on the REST API. The schedule is part of the policy associated the relationship and the relationship_type will be ignored because XDP is the only type supported through REST API. """ if source_cg is not None: body = { 'source': { 'path': source_vserver + ':/cg/' + source_cg, 'consistency_group_volumes': [{'name': source_volume}] }, 'destination': { 'path': destination_vserver + ':/cg/' + destination_cg, 'consistency_group_volumes': [{'name': destination_volume}] } } else: body = { 'source': { 'path': source_vserver + ':' + source_volume }, 'destination': { 'path': destination_vserver + ':' + destination_volume } } if policy: body['policy'] = {'name': policy} try: self.send_request('/snapmirror/relationships/', 'post', body=body) except netapp_api.NaApiError as e: if e.code != netapp_api.REST_ERELATION_EXISTS: raise e def _set_snapmirror_state(self, state, source_vserver, source_volume, destination_vserver, destination_volume, wait_result=True): """Change the snapmirror state between two volumes.""" snapmirror = self.get_snapmirrors(source_vserver, source_volume, destination_vserver, destination_volume) if not snapmirror: msg = _('Failed to get information about relationship between ' 'source %(src_vserver)s:%(src_volume)s and ' 'destination %(dst_vserver)s:%(dst_volume)s.') % { 'src_vserver': source_vserver, 'src_volume': source_volume, 'dst_vserver': destination_vserver, 'dst_volume': destination_volume} raise na_utils.NetAppDriverException(msg) uuid = snapmirror[0]['uuid'] body = {'state': state} result = self.send_request('/snapmirror/relationships/' + uuid, 'patch', body=body, wait_on_accepted=wait_result) job_info = { 'operation-id': None, 'status': None, 'jobid': result.get('job', {}).get('uuid'), 'error-code': None, 'error-message': None, 'relationship-uuid': uuid, } return job_info def initialize_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, active_sync_policy=False, source_snapshot=None, transfer_priority=None): """Initializes a SnapMirror relationship.""" # TODO: Trigger a geometry exception to be caught by data_motion. # This error is raised when using ZAPI with different volume component # numbers, but in REST, the job must be checked sometimes before that # error occurs. state = 'snapmirrored' if active_sync_policy: state = 'in_sync' return self._set_snapmirror_state( state, source_vserver, source_volume, destination_vserver, destination_volume, wait_result=False) def abort_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, clear_checkpoint=False): """Stops ongoing transfers for a SnapMirror relationship.""" snapmirror = self.get_snapmirrors(source_vserver, source_volume, destination_vserver, destination_volume) if not snapmirror: msg = _('Failed to get information about relationship between ' 'source %(src_vserver)s:%(src_volume)s and ' 'destination %(dst_vserver)s:%(dst_volume)s.') % { 'src_vserver': source_vserver, 'src_volume': source_volume, 'dst_vserver': destination_vserver, 'dst_volume': destination_volume} raise na_utils.NetAppDriverException(msg) snapmirror_uuid = snapmirror[0]['uuid'] query = {'state': 'transferring'} transfers = self.send_request('/snapmirror/relationships/' + snapmirror_uuid + '/transfers/', 'get', query=query) if not transfers.get('records'): raise netapp_api.NaApiError( code=netapp_api.ENOTRANSFER_IN_PROGRESS) body = {'state': 'hard_aborted' if clear_checkpoint else 'aborted'} for transfer in transfers['records']: transfer_uuid = transfer['uuid'] self.send_request('/snapmirror/relationships/' + snapmirror_uuid + '/transfers/' + transfer_uuid, 'patch', body=body) def delete_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Deletes an SnapMirror relationship on destination.""" query_uuid = {} query_uuid['source.path'] = source_vserver + ':' + source_volume query_uuid['destination.path'] = (destination_vserver + ':' + destination_volume) query_uuid['fields'] = 'uuid' response = self.send_request('/snapmirror/relationships/', 'get', query=query_uuid) records = response.get('records') if not records: raise netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) # 'destination_only' deletes the snapmirror on destination but does not # release it on source. query_delete = {"destination_only": "true"} snapmirror_uuid = records[0].get('uuid') self.send_request('/snapmirror/relationships/' + snapmirror_uuid, 'delete', query=query_delete) def resume_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Resume a SnapMirror relationship.""" query_uuid = {} query_uuid['source.path'] = source_vserver + ':' + source_volume query_uuid['destination.path'] = (destination_vserver + ':' + destination_volume) query_uuid['fields'] = 'uuid,policy.type' response_snapmirrors = self.send_request('/snapmirror/relationships/', 'get', query=query_uuid) records = response_snapmirrors.get('records') if not records: raise netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) snapmirror_uuid = records[0]['uuid'] snapmirror_policy = records[0]['policy']['type'] body_resync = {} if snapmirror_policy == 'async': body_resync['state'] = 'snapmirrored' elif snapmirror_policy == 'sync': body_resync['state'] = 'in_sync' self.send_request('/snapmirror/relationships/' + snapmirror_uuid, 'patch', body=body_resync) def release_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume, relationship_info_only=False): """Removes a SnapMirror relationship on the source endpoint.""" query_uuid = {} query_uuid['list_destinations_only'] = 'true' query_uuid['source.path'] = source_vserver + ':' + source_volume query_uuid['destination.path'] = (destination_vserver + ':' + destination_volume) query_uuid['fields'] = 'uuid' response_snapmirrors = self.send_request('/snapmirror/relationships/', 'get', query=query_uuid) records = response_snapmirrors.get('records') if not records: raise netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) query_release = {} if relationship_info_only: # release without removing related snapshots query_release['source_info_only'] = 'true' else: # release and removing all related snapshots query_release['source_only'] = 'true' snapmirror_uuid = records[0].get('uuid') self.send_request('/snapmirror/relationships/' + snapmirror_uuid, 'delete', query=query_release) def resync_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Resync a SnapMirror relationship.""" # We reuse the resume operation for resync since both are handled in # the same way in the REST API, by setting the snapmirror relationship # to the snapmirrored state. self.resume_snapmirror(source_vserver, source_volume, destination_vserver, destination_volume) def quiesce_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Disables future transfers to a SnapMirror destination.""" return self._set_snapmirror_state( 'paused', source_vserver, source_volume, destination_vserver, destination_volume) def break_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Breaks a data protection SnapMirror relationship.""" interval = 2 retries = (10 / interval) @utils.retry(netapp_api.NaRetryableError, interval=interval, retries=retries, backoff_rate=1) def _waiter(): snapmirror = self.get_snapmirrors( source_vserver=source_vserver, source_volume=source_volume, destination_vserver=destination_vserver, destination_volume=destination_volume) snapmirror_state = None if snapmirror: snapmirror_state = snapmirror[0].get('transferring-state') if snapmirror_state == 'success': uuid = snapmirror[0]['uuid'] body = {'state': 'broken_off'} self.send_request(f'/snapmirror/relationships/{uuid}', 'patch', body=body) return else: message = 'Waiting for transfer state to be SUCCESS.' code = '' raise netapp_api.NaRetryableError(message=message, code=code) try: return _waiter() except netapp_api.NaRetryableError: msg = _("Transfer state did not reach the expected state. Retries " "exhausted. Aborting.") raise na_utils.NetAppDriverException(msg) def update_snapmirror(self, source_vserver, source_volume, destination_vserver, destination_volume): """Schedules a SnapMirror update.""" snapmirror = self.get_snapmirrors(source_vserver, source_volume, destination_vserver, destination_volume) if not snapmirror: msg = _('Failed to get information about relationship between ' 'source %(src_vserver)s:%(src_volume)s and ' 'destination %(dst_vserver)s:%(dst_volume)s.') % { 'src_vserver': source_vserver, 'src_volume': source_volume, 'dst_vserver': destination_vserver, 'dst_volume': destination_volume} raise na_utils.NetAppDriverException(msg) snapmirror_uuid = snapmirror[0]['uuid'] # NOTE(nahimsouza): A POST with an empty body starts the update # snapmirror operation. try: self.send_request('/snapmirror/relationships/' + snapmirror_uuid + '/transfers/', 'post', wait_on_accepted=False) except netapp_api.NaApiError as e: if (e.code != netapp_api.REST_UPDATE_SNAPMIRROR_FAILED): LOG.warning('Unexpected failure during snapmirror update.' 'Code: %(code)s, Message: %(message)s', {'code': e.code, 'message': e.message}) raise def mount_flexvol(self, flexvol_name, junction_path=None): """Mounts a volume on a junction path.""" query = {'name': flexvol_name} body = {'nas.path': ( junction_path if junction_path else '/%s' % flexvol_name)} self.send_request('/storage/volumes', 'patch', query=query, body=body) def get_cluster_name(self): """Gets cluster name.""" query = {'fields': 'name'} response = self.send_request('/cluster', 'get', query=query, enable_tunneling=False) return response['name'] def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None): """Gets one or more Vserver peer relationships.""" query = { 'fields': 'svm.name,state,peer.svm.name,peer.cluster.name,' 'applications' } if peer_vserver_name: query['name'] = peer_vserver_name if vserver_name: query['svm.name'] = vserver_name response = self.send_request('/svm/peers', 'get', query=query, enable_tunneling=False) records = response.get('records', []) vserver_peers = [] for vserver_info in records: vserver_peer = { 'vserver': vserver_info['svm']['name'], 'peer-vserver': vserver_info['peer']['svm']['name'], 'peer-state': vserver_info['state'], 'peer-cluster': vserver_info['peer']['cluster']['name'], 'applications': vserver_info['applications'], } vserver_peers.append(vserver_peer) return vserver_peers def create_vserver_peer(self, vserver_name, peer_vserver_name, vserver_peer_application=None): """Creates a Vserver peer relationship.""" # default peering application to `snapmirror` if none is specified. if not vserver_peer_application: vserver_peer_application = ['snapmirror'] body = { 'svm.name': vserver_name, 'name': peer_vserver_name, 'applications': vserver_peer_application } self.send_request('/svm/peers', 'post', body=body, enable_tunneling=False) def start_lun_move(self, lun_name, dest_ontap_volume, src_ontap_volume=None, dest_lun_name=None): """Starts a lun move operation between ONTAP volumes.""" if dest_lun_name is None: dest_lun_name = lun_name if src_ontap_volume is None: src_ontap_volume = dest_ontap_volume src_path = f'/vol/{src_ontap_volume}/{lun_name}' dest_path = f'/vol/{dest_ontap_volume}/{dest_lun_name}' body = {'name': dest_path} self._lun_update_by_path(src_path, body) return dest_path def get_lun_move_status(self, dest_path): """Get lun move job status from a given dest_path.""" lun = self._get_first_lun_by_path( dest_path, fields='movement.progress') if not lun: return None move_progress = lun['movement']['progress'] move_status = { 'job-status': move_progress['state'], 'last-failure-reason': (move_progress .get('failure', {}) .get('message', None)) } return move_status def start_lun_copy(self, lun_name, dest_ontap_volume, dest_vserver, src_ontap_volume=None, src_vserver=None, dest_lun_name=None): """Starts a lun copy operation between ONTAP volumes.""" if src_ontap_volume is None: src_ontap_volume = dest_ontap_volume if src_vserver is None: src_vserver = dest_vserver if dest_lun_name is None: dest_lun_name = lun_name src_path = f'/vol/{src_ontap_volume}/{lun_name}' dest_path = f'/vol/{dest_ontap_volume}/{dest_lun_name}' body = { 'name': dest_path, 'copy.source.name': src_path, 'svm.name': dest_vserver } self.send_request('/storage/luns', 'post', body=body, enable_tunneling=False) return dest_path def get_lun_copy_status(self, dest_path): """Get lun copy job status from a given dest_path.""" lun = self._get_first_lun_by_path( dest_path, fields='copy.source.progress') if not lun: return None copy_progress = lun['copy']['source']['progress'] copy_status = { 'job-status': copy_progress['state'], 'last-failure-reason': (copy_progress .get('failure', {}) .get('message', None)) } return copy_status def cancel_lun_copy(self, dest_path): """Cancel an in-progress lun copy by deleting the lun.""" query = { 'name': dest_path, 'svm.name': self.vserver } try: self.send_request('/storage/luns/', 'delete', query=query) except netapp_api.NaApiError as e: msg = (_('Could not cancel lun copy by deleting lun at %s. %s')) raise na_utils.NetAppDriverException(msg % (dest_path, e)) def start_file_copy(self, file_name, dest_ontap_volume, src_ontap_volume=None, dest_file_name=None): """Starts a file copy operation between ONTAP volumes.""" if src_ontap_volume is None: src_ontap_volume = dest_ontap_volume if dest_file_name is None: dest_file_name = file_name source_vol = self._get_volume_by_args(src_ontap_volume) dest_vol = source_vol if dest_ontap_volume != src_ontap_volume: dest_vol = self._get_volume_by_args(dest_ontap_volume) body = { 'files_to_copy': [ { 'source': { 'path': f'{src_ontap_volume}/{file_name}', 'volume': { 'uuid': source_vol['uuid'] } }, 'destination': { 'path': f'{dest_ontap_volume}/{dest_file_name}', 'volume': { 'uuid': dest_vol['uuid'] } } } ] } result = self.send_request('/storage/file/copy', 'post', body=body, enable_tunneling=False) return result['job']['uuid'] def get_file_copy_status(self, job_uuid): """Get file copy job status from a given job's UUID.""" # TODO(rfluisa): Select only the fields that are needed here. query = {} query['fields'] = '*' result = self.send_request( f'/cluster/jobs/{job_uuid}', 'get', query=query, enable_tunneling=False) if not result or not result.get('state', None): return None state = result.get('state') if state == 'success': state = 'complete' elif state == 'failure': state = 'destroyed' copy_status = { 'job-status': state, 'last-failure-reason': result.get('error', {}).get('message', None) } return copy_status def rename_file(self, orig_file_name, new_file_name): """Rename a volume file.""" LOG.debug("Renaming the file %(original)s to %(new)s.", {'original': orig_file_name, 'new': new_file_name}) unique_volume = self._get_volume_by_args( vol_name=orig_file_name.split('/')[2]) # Get the relative path orig_file_name = '/'.join(orig_file_name.split('/')[3:]) new_file_name = '/'.join(new_file_name.split('/')[3:]) # Path requires "%2E" to represent "." and "%2F" to represent "/". orig_file_name = orig_file_name.replace('.', '%2E').replace('/', '%2F') new_file_name = new_file_name.replace('.', '%2E').replace('/', '%2F') body = {'path': new_file_name} self.send_request( f'/storage/volumes/{unique_volume["uuid"]}/files/{orig_file_name}', 'patch', body=body) def get_namespace_list(self): """Gets the list of namespaces on filer. Gets the namespaces from cluster with vserver. """ query = { 'svm.name': self.vserver, 'fields': 'svm.name,location.volume.name,space.size,' 'location.qtree.name,name,os_type,' 'space.guarantee.requested,uuid' } response = self.send_request( '/storage/namespaces/', 'get', query=query) namespace_list = [] for namespace in response.get('records', []): namespace_info = {} namespace_info['Vserver'] = namespace['svm']['name'] namespace_info['Volume'] = namespace['location']['volume']['name'] namespace_info['Size'] = namespace['space']['size'] namespace_info['Qtree'] = ( namespace['location'].get('qtree', {}).get('name', '')) namespace_info['Path'] = namespace['name'] namespace_info['OsType'] = namespace['os_type'] namespace_info['SpaceReserved'] = ( namespace['space']['guarantee']['requested']) namespace_info['UUID'] = namespace['uuid'] namespace_list.append(namespace_info) return namespace_list def create_namespace(self, volume_name, namespace_name, size, metadata): """Issues API request for creating namespace on volume.""" path = f'/vol/{volume_name}/{namespace_name}' initial_size = size body = { 'name': path, 'space.size': str(initial_size), 'os_type': metadata['OsType'], } try: self.send_request('/storage/namespaces', 'post', body=body) except netapp_api.NaApiError as ex: with excutils.save_and_reraise_exception(): LOG.error('Error provisioning volume %(namespace_name)s on ' '%(volume_name)s. Details: %(ex)s', { 'namespace_name': namespace_name, 'volume_name': volume_name, 'ex': ex, }) def destroy_namespace(self, path, force=True): """Destroys the namespace at the path.""" query = { 'name': path, 'svm': self.vserver } if force: query['allow_delete_while_mapped'] = 'true' self.send_request('/storage/namespaces', 'delete', query=query) def clone_namespace(self, volume, name, new_name): """Clones namespace on vserver.""" LOG.debug('Cloning namespace - volume: %(volume)s, name: %(name)s, ' 'new_name: %(new_name)s', { 'volume': volume, 'name': name, 'new_name': new_name, }) source_path = f'/vol/{volume}/{name}' body = { 'svm': { 'name': self.vserver }, 'name': f'/vol/{volume}/{new_name}', 'clone': { 'source': { 'name': source_path, } } } self.send_request('/storage/namespaces', 'post', body=body) def get_namespace_by_args(self, **namespace_info_args): """Retrieves namespace with specified args.""" query = { 'fields': 'svm.name,location.volume.name,space.size,' 'location.qtree.name,name,os_type,' 'space.guarantee.requested,uuid,space.block_size' } if namespace_info_args: if 'vserver' in namespace_info_args: query['svm.name'] = namespace_info_args['vserver'] if 'path' in namespace_info_args: query['name'] = namespace_info_args['path'] if 'uuid' in namespace_info_args: query['uuid'] = namespace_info_args['uuid'] response = self.send_request('/storage/namespaces', 'get', query=query) namespace_list = [] for namespace in response.get('records', []): namespace_info = {} namespace_info['Vserver'] = namespace['svm']['name'] namespace_info['Volume'] = namespace['location']['volume']['name'] namespace_info['Size'] = namespace['space']['size'] namespace_info['Qtree'] = ( namespace['location'].get('qtree', {}).get('name', '')) namespace_info['Path'] = namespace['name'] namespace_info['OsType'] = namespace['os_type'] namespace_info['SpaceReserved'] = ( namespace['space']['guarantee']['requested']) namespace_info['UUID'] = namespace['uuid'] namespace_info['BlockSize'] = namespace['space']['block_size'] namespace_list.append(namespace_info) return namespace_list def namespace_resize(self, path, new_size_bytes): """Resize the namespace.""" seg = path.split("/") LOG.info('Resizing namespace %s to new size.', seg[-1]) body = {'space.size': new_size_bytes} query = {'name': path} self.send_request('/storage/namespaces', 'patch', body=body, query=query) def get_namespace_sizes_by_volume(self, volume_name): """"Gets the list of namespace and their sizes from a given volume.""" query = { 'location.volume.name': volume_name, 'svm.name': self.vserver, 'fields': 'space.size,name' } response = self.send_request('/storage/namespaces', 'get', query=query) namespaces = [] for namespace_info in response.get('records', []): namespaces.append({ 'path': namespace_info.get('name', ''), 'size': float(namespace_info.get('space', {}).get('size', 0)) }) return namespaces def get_subsystem_by_host(self, host_nqn): """Get subsystem exactly matching the initiator host.""" query = { 'svm.name': self.vserver, 'hosts.nqn': host_nqn, 'fields': 'name,os_type', 'name': f'{na_utils.OPENSTACK_PREFIX}*', } response = self.send_request('/protocols/nvme/subsystems', 'get', query=query) records = response.get('records', []) return [{'name': subsystem['name'], 'os_type': subsystem['os_type']} for subsystem in records] def get_subsystem_by_path(self, path): """Get subsystem by its namespace path.""" query = { 'svm.name': self.vserver, 'subsystem_maps.namespace.name': path, 'fields': 'name,os_type', 'name': f'{na_utils.OPENSTACK_PREFIX}*', } response = self.send_request('/protocols/nvme/subsystems', 'get', query=query) records = response.get('records', []) return [{'name': subsystem['name'], 'os_type': subsystem['os_type']} for subsystem in records] def create_subsystem(self, subsystem_name, os_type, host_nqn): """Creates subsystem with specified args.""" body = { 'svm.name': self.vserver, 'name': subsystem_name, 'os_type': os_type, 'hosts': [{'nqn': host_nqn}] } self.send_request('/protocols/nvme/subsystems', 'post', body=body) def get_namespace_map(self, path): """Gets the namespace map using its path.""" query = { 'namespace.name': path, 'fields': 'subsystem.name,namespace.uuid,svm.name,subsystem.uuid', } response = self.send_request('/protocols/nvme/subsystem-maps', 'get', query=query) records = response.get('records', []) map_list = [] for map in records: map_subsystem = {} map_subsystem['subsystem'] = map['subsystem']['name'] map_subsystem['subsystem_uuid'] = map['subsystem']['uuid'] map_subsystem['uuid'] = map['namespace']['uuid'] map_subsystem['vserver'] = map['svm']['name'] map_list.append(map_subsystem) return map_list def map_namespace(self, path, subsystem_name): """Maps namespace to the host nqn and returns namespace uuid.""" body_post = { 'namespace.name': path, 'subsystem.name': subsystem_name } try: result = self.send_request('/protocols/nvme/subsystem-maps', 'post', body=body_post, query={'return_records': 'true'}) records = result.get('records') namespace_uuid = records[0]['namespace']['uuid'] return namespace_uuid except netapp_api.NaApiError as e: code = e.code message = e.message LOG.warning('Error mapping namespace. Code :%(code)s, Message: ' '%(message)s', {'code': code, 'message': message}) raise def get_nvme_subsystem_nqn(self, subsystem): """Returns target subsystem nqn.""" query = { 'fields': 'target_nqn', 'name': subsystem, 'svm.name': self.vserver } response = self.send_request( '/protocols/nvme/subsystems', 'get', query=query) records = response.get('records', []) if records: return records[0]['target_nqn'] LOG.debug('No %(subsystem)s NVMe subsystem found for vserver ' '%(vserver)s', {'subsystem': subsystem, 'vserver': self.vserver}) return None def get_nvme_target_portals(self): """Gets the NVMe target portal details.""" query = { 'services': 'data_nvme_tcp', 'fields': 'ip.address', 'enabled': 'true', } response = self.send_request('/network/ip/interfaces', 'get', query=query) interfaces = response.get('records', []) return [record['ip']['address'] for record in interfaces] def unmap_namespace(self, path, subsystem): """Unmaps a namespace from given subsystem.""" query = { 'subsystem.name': subsystem, 'namespace.name': path } self.send_request('/protocols/nvme/subsystem-maps', 'delete', query=query) def unmap_host_with_subsystem(self, host_nqn, subsystem_uuid): """Unmaps a host from given subsystem. In multiattach and live migration scenarios,it is possible that the host is attached to single namespace from different subsystems and repeated unmapping to subsystem to host is possible. Errors are logged but not propagated. Calling code will proceed even if unmapping fails. """ url = f'/protocols/nvme/subsystems/{subsystem_uuid}/hosts/{host_nqn}' try: self.send_request(url, 'delete') except netapp_api.NaApiError as e: LOG.warning( "Failed to unmap host from subsystem. " "Host NQN: %(host_nqn)s, Subsystem UUID: %(subsystem_uuid)s, " "Error Code: %(code)s, Error Message: %(message)s", {'host_nqn': host_nqn, 'subsystem_uuid': subsystem_uuid, 'code': e.code, 'message': e.message} ) def map_host_with_subsystem(self, host_nqn, subsystem_uuid): """Add host nqn to the subsystem""" body_post = { 'nqn': host_nqn, } try: self.send_request( f'/protocols/nvme/subsystems/{subsystem_uuid}/hosts', 'post', body=body_post ) except netapp_api.NaApiError as e: code = e.code message = e.message if e.code == netapp_api.REST_HOST_ALREADY_MAPPED_TO_SUBSYSTEM: LOG.info( 'Host %(host_nqn)s is already mapped to subsystem ' '%(subsystem_uuid)s ', {'host_nqn': host_nqn, 'subsystem_uuid': subsystem_uuid } ) else: LOG.error( 'Error mapping host to subsystem. Code :' '%(code)s, Message: %(message)s', {'code': code, 'message': message} ) raise ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest_asar2.py0000664000175000017500000003110100000000000031160 0ustar00zuulzuul00000000000000# Copyright (c) 2025 NetApp, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp ASA r2 REST client for Data ONTAP. This module provides the ASA r2 specific REST client that inherits from the base REST client and overrides methods to implement ASA r2 specific workflows when needed. """ from oslo_log import log as logging from oslo_utils import excutils from cinder.i18n import _ from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest from cinder.volume.drivers.netapp import utils as netapp_utils from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class RestClientASAr2(client_cmode_rest.RestClient, metaclass=volume_utils.TraceWrapperMetaclass): """NetApp ASA r2 REST client for Data ONTAP. This client inherits from the base REST client and provides ASA r2 specific functionality for disaggregated platform workflows. By default, all methods from the parent RestClient are called. Override methods only when ASA r2 specific functionality is required. The __getattr__ method automatically routes any missing methods to the parent class, eliminating the need to explicitly define every method. """ def __init__(self, **kwargs): """Initialize the ASA r2 REST client. :param kwargs: Same parameters as the parent RestClient """ LOG.info("Initializing NetApp ASA r2 REST client") super(RestClientASAr2, self).__init__(**kwargs) self._init_asar2_features() def _init_asar2_features(self): """Initialize ASA r2 specific features. This method can be used to set up ASA r2 specific features and capabilities that are different from the standard ONTAP. """ LOG.debug("Initializing ASA r2 specific features") # Remove features not supported in ASA r2 by setting them to False self.features.add_feature('SYSTEM_CONSTITUENT_METRICS', supported=False) self.features.add_feature('SYSTEM_METRICS', supported=False) # Add ASA r2 specific features here # For example, you might want to enable specific features # that are only available in ASA r2 environments # Example of adding ASA r2 specific features: # self.features.add_feature('ASA_R2_SPECIFIC_FEATURE', supported=True) # self.features.add_feature('ASA_R2_ENHANCED_CLONING', supported=True) LOG.debug("ASA r2 specific features initialized successfully") def __getattr__(self, name): """Log missing method call and return None.""" LOG.error("Method '%s' not found in ASA r2 client", name) return None def get_performance_counter_info(self, object_name, counter_name): """ASA r2 doesn't support performance counter APIs as of now. TODO: Performance counter support will be added in upcoming releases. """ msg = _('Performance counter APIs are not supported on ASA r2.') raise netapp_utils.NetAppDriverException(msg) def get_performance_instance_uuids(self, object_name, node_name): """ASA r2 doesn't support performance counter APIs.""" msg = _('Performance counter APIs are not supported on ASA r2.') raise netapp_utils.NetAppDriverException(msg) def get_performance_counters(self, object_name, instance_uuids, counter_names): """ASA r2 doesn't support performance counter APIs.""" msg = _('Performance counter APIs are not supported on ASA r2.') raise netapp_utils.NetAppDriverException(msg) # ASA r2 does not support ONTAPI, so we raise NotImplementedError def get_ontapi_version(self, cached=True): """ASA r2 doesn't support ONTAPI.""" return (0, 0) def get_cluster_info(self): """Get cluster information for ASA r2.""" query_args = { 'fields': 'name,disaggregated', } try: response = self.send_request('/cluster', 'get', query=query_args, enable_tunneling=False) return response except Exception as e: LOG.exception('Failed to get cluster information: %s', e) return None def get_cluster_capacity(self): """Get cluster capacity information for ASA r2.""" query = { 'fields': 'block_storage.size,block_storage.available' } try: response = self.send_request('/storage/cluster', 'get', query=query, enable_tunneling=False) if not response: LOG.error('No response received from cluster capacity API') return {} block_storage = response.get('block_storage', {}) size_total = block_storage.get('size', 0) size_available = block_storage.get('available', 0) capacity = { 'size-total': float(size_total), 'size-available': float(size_available) } LOG.debug('Cluster total size %s:', capacity['size-total']) LOG.debug('Cluster available size %s:', capacity['size-available']) return capacity except Exception as e: LOG.exception('Failed to get cluster capacity: %s', e) msg = _('Failed to get cluster capacity: %s') raise netapp_utils.NetAppDriverException(msg % e) def get_aggregate_disk_types(self): """Get storage_types as array from all aggregates.""" query = { 'fields': 'name,block_storage.storage_type' } try: response = self.send_request('/storage/aggregates', 'get', query=query, enable_tunneling=False) if not response or 'records' not in response: LOG.error('No records received from aggregate API') return None # Collect storage types from all aggregates storage_types = [] if response['records']: for record in response['records']: storage_type = ( record.get('block_storage', {}).get('storage_type')) if storage_type: storage_types.append(storage_type) LOG.debug('Aggregate storage types: %s', storage_types) return storage_types LOG.warning('No aggregate records found') return None except Exception as e: LOG.exception('Failed to get aggregate storage types: %s', e) msg = _('Failed to get aggregate storage types: %s') raise netapp_utils.NetAppDriverException(msg % e) def create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None, qos_policy_group_is_adaptive=False): """Issues API request for creating LUN on volume.""" initial_size = size lun_name = lun_name.replace("-", "_") body = { 'name': lun_name, 'space.size': str(initial_size), 'os_type': metadata['OsType'], } if qos_policy_group_name: body['qos_policy.name'] = qos_policy_group_name try: self.send_request('/storage/luns', 'post', body=body) except netapp_api.NaApiError as ex: with excutils.save_and_reraise_exception(): LOG.error('Error provisioning volume %(lun_name)s on cluster.' ' Details: %(ex)s', { 'lun_name': lun_name, 'ex': ex, }) def destroy_lun(self, path, force=True): """Destroys the LUN at the path.""" query = {} lun_name = self._get_backend_lun_or_namespace(path) query['name'] = lun_name if force: query['allow_delete_while_mapped'] = 'true' self.send_request('/storage/luns/', 'delete', query=query) def create_namespace(self, volume_name, namespace_name, size, metadata): """Issues API request for creating namespace""" initial_size = size namespace_name = namespace_name.replace("-", "_") body = { 'name': namespace_name, 'space.size': str(initial_size), 'os_type': metadata['OsType'], } try: self.send_request('/storage/namespaces', 'post', body=body) except netapp_api.NaApiError as ex: with excutils.save_and_reraise_exception(): LOG.error('Error provisioning namespace %(namespace_name)s' ' on cluster Details: %(ex)s', { 'namespace_name': namespace_name, 'ex': ex, }) def destroy_namespace(self, path, force=True): """Destroys the namespace at the path.""" lun_name = self._get_backend_lun_or_namespace(path) query = { 'name': lun_name, 'svm': self.vserver } if force: query['allow_delete_while_mapped'] = 'true' self.send_request('/storage/namespaces', 'delete', query=query) def get_lun_map(self, path): """Gets the LUN map by LUN path.""" lun_name = self._get_backend_lun_or_namespace(path) return super().get_lun_map(lun_name) def map_lun(self, path, igroup_name, lun_id=None): """Maps LUN to the initiator and returns LUN id assigned.""" lun_name = self._get_backend_lun_or_namespace(path) return super().map_lun(lun_name, igroup_name, lun_id) def get_lun_by_args(self, path=None): """Retrieves LUN with specified args.""" if path: if 'path' in path: lun_name = self._get_backend_lun_or_namespace(path) path['path'] = lun_name return super().get_lun_by_args(path=path) def unmap_lun(self, path, igroup_name): """Unmaps a LUN from given initiator.""" lun_name = self._get_backend_lun_or_namespace(path) super().unmap_lun(lun_name, igroup_name) def map_namespace(self, path, subsystem_name): """Maps namespace to the host nqn and returns namespace uuid.""" namespace_name = self._get_backend_lun_or_namespace(path) return super().map_namespace(namespace_name, subsystem_name) def unmap_namespace(self, path, subsystem): """Unmaps a namespace from given subsystem.""" namespace_name = self._get_backend_lun_or_namespace(path) super().unmap_namespace(namespace_name, subsystem) def get_namespace_map(self, path): """Gets the namespace map using its path.""" namespace_name = self._get_backend_lun_or_namespace(path) return super().get_namespace_map(namespace_name) def do_direct_resize(self, path, new_size_bytes, force=True): """Resize the LUN.""" lun_name = self._get_backend_lun_or_namespace(path) if lun_name is not None: LOG.info('Resizing LUN %s directly to new size.', lun_name) body = {'name': lun_name, 'space.size': new_size_bytes} self._lun_update_by_path(lun_name, body) def namespace_resize(self, path, new_size_bytes): """Resize the namespace.""" namespace_name = self._get_backend_lun_or_namespace(path) if namespace_name is not None: body = {'space.size': new_size_bytes} query = {'name': namespace_name} self.send_request('/storage/namespaces', 'patch', body=body, query=query ) def _get_backend_lun_or_namespace(self, path): """Get the backend LUN or namespace""" paths = path.split("/") lun_name = paths[3].replace("-", "_") return lun_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/fc_cmode.py0000664000175000017500000001461300000000000024700 0ustar00zuulzuul00000000000000# Copyright (c) - 2014, Clinton Knight. All rights reserved. # Copyright (c) - 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp Data ONTAP FibreChannel storage systems. """ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap import block_cmode from cinder.volume.drivers.netapp import options as na_opts from cinder.zonemanager import utils as fczm_utils @interface.volumedriver class NetAppCmodeFibreChannelDriver(driver.BaseVD, driver.ManageableVD): """NetApp C-mode FibreChannel volume driver. Version history: .. code-block:: none 1.0.0 - Driver development before Wallaby 2.0.0 - Wallaby driver version bump 3.0.0 - Add support for Intra-cluster Storage assisted volume migration Add support for revert to snapshot 4.0.0 - Add Cinder Active/Active support (High Availability) Implement Active/Active replication support """ VERSION = "4.0.0" DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct' SUPPORTS_ACTIVE_ACTIVE = True # ThirdPartySystems wiki page CI_WIKI_NAME = "NetApp_CI" VERSION = block_cmode.NetAppBlockStorageCmodeLibrary.VERSION def __init__(self, *args, **kwargs): super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs) self.library = block_cmode.NetAppBlockStorageCmodeLibrary( self.DRIVER_NAME, 'FC', **kwargs) @staticmethod def get_driver_options(): return na_opts.netapp_cluster_opts def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def create_volume(self, volume): return self.library.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): return self.library.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): return self.library.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.library.delete_volume(volume) def create_snapshot(self, snapshot): self.library.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.library.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): return self.library.get_volume_stats(refresh, self.get_filter_function(), self.get_goodness_function()) def get_default_filter_function(self): return self.library.get_default_filter_function() def get_default_goodness_function(self): return self.library.get_default_goodness_function() def extend_volume(self, volume, new_size): self.library.extend_volume(volume, new_size) def ensure_export(self, context, volume): return self.library.ensure_export(context, volume) def create_export(self, context, volume, connector): return self.library.create_export(context, volume) def remove_export(self, context, volume): self.library.remove_export(context, volume) def manage_existing(self, volume, existing_ref): return self.library.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.library.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): return self.library.unmanage(volume) def initialize_connection(self, volume, connector): conn_info = self.library.initialize_connection_fc(volume, connector) fczm_utils.add_fc_zone(conn_info) return conn_info def terminate_connection(self, volume, connector, **kwargs): conn_info = self.library.terminate_connection_fc(volume, connector, **kwargs) fczm_utils.remove_fc_zone(conn_info) return conn_info def get_pool(self, volume): return self.library.get_pool(volume) def create_group(self, context, group): return self.library.create_group(group) def delete_group(self, context, group, volumes): return self.library.delete_group(group, volumes) def update_group(self, context, group, add_volumes=None, remove_volumes=None): return self.library.update_group(group, add_volumes=None, remove_volumes=None) def create_group_snapshot(self, context, group_snapshot, snapshots): return self.library.create_group_snapshot(group_snapshot, snapshots) def delete_group_snapshot(self, context, group_snapshot, snapshots): return self.library.delete_group_snapshot(group_snapshot, snapshots) def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): return self.library.create_group_from_src( group, volumes, group_snapshot=group_snapshot, snapshots=snapshots, source_group=source_group, source_vols=source_vols) def failover_host(self, context, volumes, secondary_id=None, groups=None): return self.library.failover_host( context, volumes, secondary_id=secondary_id) def failover(self, context, volumes, secondary_id=None, groups=None): return self.library.failover(context, volumes, secondary_id) def failover_completed(self, context, secondary_id=None): return self.library.failover_completed(context, secondary_id) def migrate_volume(self, context, volume, host): return self.library.migrate_volume(context, volume, host) def revert_to_snapshot(self, context, volume, snapshot): return self.library.revert_to_snapshot(volume, snapshot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py0000664000175000017500000001342300000000000025420 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2016 Mike Rooney. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp Data ONTAP (C-mode) iSCSI storage systems. """ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap import block_cmode from cinder.volume.drivers.netapp import options as na_opts @interface.volumedriver class NetAppCmodeISCSIDriver(driver.BaseVD, driver.ManageableVD): """NetApp C-mode iSCSI volume driver.""" DRIVER_NAME = 'NetApp_iSCSI_Cluster_direct' # ThirdPartySystems wiki page CI_WIKI_NAME = "NetApp_CI" VERSION = block_cmode.NetAppBlockStorageCmodeLibrary.VERSION SUPPORTS_ACTIVE_ACTIVE = True def __init__(self, *args, **kwargs): super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs) self.library = block_cmode.NetAppBlockStorageCmodeLibrary( self.DRIVER_NAME, 'iSCSI', **kwargs) @staticmethod def get_driver_options(): return na_opts.netapp_cluster_opts def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def create_volume(self, volume): return self.library.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): return self.library.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): return self.library.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.library.delete_volume(volume) def create_snapshot(self, snapshot): self.library.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.library.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): return self.library.get_volume_stats(refresh, self.get_filter_function(), self.get_goodness_function()) def get_default_filter_function(self): return self.library.get_default_filter_function() def get_default_goodness_function(self): return self.library.get_default_goodness_function() def extend_volume(self, volume, new_size): self.library.extend_volume(volume, new_size) def ensure_export(self, context, volume): return self.library.ensure_export(context, volume) def create_export(self, context, volume, connector): return self.library.create_export(context, volume) def remove_export(self, context, volume): self.library.remove_export(context, volume) def manage_existing(self, volume, existing_ref): return self.library.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.library.manage_existing_get_size(volume, existing_ref) def unmanage(self, volume): return self.library.unmanage(volume) def initialize_connection(self, volume, connector): return self.library.initialize_connection_iscsi(volume, connector) def terminate_connection(self, volume, connector, **kwargs): return self.library.terminate_connection_iscsi(volume, connector, **kwargs) def get_pool(self, volume): return self.library.get_pool(volume) def create_group(self, context, group): return self.library.create_group(group) def delete_group(self, context, group, volumes): return self.library.delete_group(group, volumes) def update_group(self, context, group, add_volumes=None, remove_volumes=None): return self.library.update_group(group, add_volumes=None, remove_volumes=None) def create_group_snapshot(self, context, group_snapshot, snapshots): return self.library.create_group_snapshot(group_snapshot, snapshots) def delete_group_snapshot(self, context, group_snapshot, snapshots): return self.library.delete_group_snapshot(group_snapshot, snapshots) def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): return self.library.create_group_from_src( group, volumes, group_snapshot=group_snapshot, snapshots=snapshots, source_group=source_group, source_vols=source_vols) def failover_host(self, context, volumes, secondary_id=None, groups=None): return self.library.failover_host(context, volumes, secondary_id=secondary_id) def failover(self, context, volumes, secondary_id=None, groups=None): return self.library.failover(context, volumes, secondary_id) def failover_completed(self, context, secondary_id=None): return self.library.failover_completed(context, secondary_id) def migrate_volume(self, context, volume, host): return self.library.migrate_volume(context, volume, host) def revert_to_snapshot(self, context, volume, snapshot): return self.library.revert_to_snapshot(volume, snapshot) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/nfs_base.py0000664000175000017500000015407200000000000024725 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp NFS storage. """ import copy import math import os import re import time import urllib from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import netutils from oslo_utils import units from cinder import context from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import objects import cinder.privsep.path from cinder import utils from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume.drivers import nfs from cinder.volume import volume_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF HOUSEKEEPING_INTERVAL_SECONDS = 600 # ten minutes class NetAppNfsDriver(driver.ManageableVD, driver.CloneableImageVD, nfs.NfsDriver, metaclass=volume_utils.TraceWrapperWithABCMetaclass): """Base class for NetApp NFS driver for Data ONTAP.""" # do not increment this as it may be used in volume type definitions VERSION = "1.0.0" # ThirdPartySystems wiki page CI_WIKI_NAME = "NetApp_CI" REQUIRED_FLAGS_BASIC = ['netapp_login', 'netapp_password', 'netapp_server_hostname'] REQUIRED_FLAGS_CERT = ['netapp_private_key_file', 'netapp_certificate_file'] DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70' DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization' def __init__(self, *args, **kwargs): na_utils.validate_instantiation(**kwargs) self._execute = None self._context = None self.app_version = kwargs.pop("app_version", "unknown") kwargs['supports_auto_mosr'] = True super(NetAppNfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(na_opts.netapp_connection_opts) self.configuration.append_config_values(na_opts.netapp_basicauth_opts) self.configuration.append_config_values( na_opts.netapp_certificateauth_opts) self.configuration.append_config_values(na_opts.netapp_transport_opts) self.configuration.append_config_values(na_opts.netapp_img_cache_opts) self.configuration.append_config_values(na_opts.netapp_nfs_extra_opts) self.backend_name = self.host.split('@')[1] self.loopingcalls = loopingcalls.LoopingCalls() def do_setup(self, context): super(NetAppNfsDriver, self).do_setup(context) self._context = context if self.configuration.netapp_private_key_file or\ self.configuration.netapp_certificate_file: na_utils.check_flags(self.REQUIRED_FLAGS_CERT, self.configuration) else: na_utils.check_flags(self.REQUIRED_FLAGS_BASIC, self.configuration) self.zapi_client = None def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" super(NetAppNfsDriver, self).check_for_setup_error() self.loopingcalls.start_tasks() def _add_looping_tasks(self): """Add tasks that need to be executed at a fixed interval. Inheriting class overrides and then explicitly calls this method. """ # Add the task that deletes snapshots marked for deletion. self.loopingcalls.add_task( self._delete_snapshots_marked_for_deletion, loopingcalls.ONE_MINUTE, loopingcalls.ONE_MINUTE) # Add the task that logs EMS messages self.loopingcalls.add_task( self._handle_ems_logging, loopingcalls.ONE_HOUR) # Add the task that periodically cleanup old expired internal # image caching. self.loopingcalls.add_task( self._clean_image_cache, self.configuration.netapp_nfs_image_cache_cleanup_interval ) def _delete_snapshots_marked_for_deletion(self): snapshots = self.zapi_client.get_snapshots_marked_for_deletion() for snapshot in snapshots: self.zapi_client.delete_snapshot( snapshot['volume_name'], snapshot['name']) def _handle_ems_logging(self): """Log autosupport messages.""" raise NotImplementedError() def get_pool(self, volume): """Return pool name where volume resides. :param volume: The volume hosted by the driver. :return: Name of the pool where given volume is hosted. """ return volume['provider_location'] def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ LOG.debug('create_volume on %s', volume['host']) self._ensure_flexgroup_not_in_cg(volume) self._ensure_shares_mounted() # get share as pool name pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) extra_specs = na_utils.get_volume_extra_specs(volume) try: volume['provider_location'] = pool_name LOG.debug('Using pool %s.', pool_name) self._do_create_volume(volume) self._do_qos_for_volume(volume, extra_specs) model_update = self._get_volume_model_update(volume) or {} model_update['provider_location'] = volume['provider_location'] return model_update except Exception: LOG.exception("Exception creating vol %(name)s on " "pool %(pool)s.", {'name': volume['name'], 'pool': volume['provider_location']}) # We need to set this for the model update in order for the # manager to behave correctly. volume['provider_location'] = None msg = _("Volume %(vol)s could not be created in pool %(pool)s.") raise exception.VolumeBackendAPIException(data=msg % { 'vol': volume['name'], 'pool': pool_name}) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. For a FlexGroup pool, the operation relies on the NFS generic driver because the ONTAP clone file is not supported by FlexGroup yet. """ self._ensure_flexgroup_not_in_cg(volume) if (self._is_flexgroup(vol_id=snapshot['volume_id']) and not self._is_flexgroup_clone_file_supported()): model = super(NetAppNfsDriver, self).create_volume_from_snapshot( volume, snapshot) return self._do_qos_for_file_flexgroup(volume, model) else: source = { 'name': snapshot['name'], 'size': snapshot['volume_size'], 'id': snapshot['volume_id'], } return self._clone_source_to_destination_volume(source, volume) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. For a FlexGroup pool, the operation relies on the NFS generic driver because the ONTAP clone file is not supported by FlexGroup yet. """ self._ensure_flexgroup_not_in_cg(volume) if (self._is_flexgroup(vol_id=src_vref['id']) and not self._is_flexgroup_clone_file_supported()): model = super(NetAppNfsDriver, self).create_cloned_volume( volume, src_vref) return self._do_qos_for_file_flexgroup(volume, model) else: source = {'name': src_vref['name'], 'size': src_vref['size'], 'id': src_vref['id']} return self._clone_source_to_destination_volume(source, volume) def _do_qos_for_file_flexgroup(self, volume, model): """Creates the QoS for a file inside the FlexGroup.""" try: extra_specs = na_utils.get_volume_extra_specs(volume) volume['provider_location'] = model['provider_location'] self._do_qos_for_volume(volume, extra_specs) model_update = ( self._get_volume_model_update(volume) or {}) model_update['provider_location'] = model[ 'provider_location'] return model_update except Exception as e: LOG.exception('Exception while setting the QoS for the %(vol_id)s' ' volume inside a FlexGroup pool. Exception: ' ' %(exc)s', {'vol_id': volume['id'], 'exc': e}) msg = _("Volume %s could not set QoS.") raise exception.VolumeBackendAPIException(data=msg % volume['id']) def _clone_source_to_destination_volume(self, source, destination_volume): share = self._get_volume_location(source['id']) extra_specs = na_utils.get_volume_extra_specs(destination_volume) try: destination_volume['provider_location'] = share self._clone_with_extension_check( source, destination_volume) self._do_qos_for_volume(destination_volume, extra_specs) model_update = ( self._get_volume_model_update(destination_volume) or {}) model_update['provider_location'] = destination_volume[ 'provider_location'] return model_update except Exception: LOG.exception("Exception creating volume %(name)s from source " "%(source)s on share %(share)s.", {'name': destination_volume['id'], 'source': source['name'], 'share': destination_volume['provider_location']}) msg = _("Volume %s could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg % ( destination_volume['id'])) def _clone_with_extension_check(self, source, destination_volume): source_size = source['size'] source_id = source['id'] source_name = source['name'] destination_volume_size = destination_volume['size'] self._clone_backing_file_for_volume(source_name, destination_volume['name'], source_id) path = self.local_path(destination_volume) if self._discover_file_till_timeout(path): self._set_rw_permissions(path) if destination_volume_size != source_size: try: self.extend_volume(destination_volume, destination_volume_size) except Exception: LOG.error("Resizing %s failed. Cleaning " "volume.", destination_volume['name']) self._cleanup_volume_on_failure(destination_volume) raise exception.CinderException( _("Resizing clone %s failed.") % destination_volume['name']) else: raise exception.CinderException(_("NFS file %s not discovered.") % destination_volume['name']) def _cleanup_volume_on_failure(self, volume): LOG.debug('Cleaning up, failed operation on %s', volume['name']) vol_path = self.local_path(volume) if os.path.exists(vol_path): LOG.debug('Found %s, deleting ...', vol_path) self._delete_file_at_path(vol_path) else: LOG.debug('Could not find %s, continuing ...', vol_path) def _do_qos_for_volume(self, volume, extra_specs, cleanup=False): """Set QoS policy on backend from volume type information.""" raise NotImplementedError() def _get_volume_model_update(self, volume): """Provide any updates necessary for a volume being created/managed.""" raise NotImplementedError() def create_snapshot(self, snapshot): """Creates a snapshot. For a FlexGroup pool, the ZAPI operation uses the NFS generic driver. When it comes to REST, if the ONTAP version is below 9.14, the operation depends on the NFS generic driver. However, for ONTAP versions 9.14 and above, it relies on the ONTAP file clone API. """ if (self._is_flexgroup(vol_id=snapshot['volume_id']) and (self.configuration.safe_get('netapp_use_legacy_client') or not self._is_flexgroup_clone_file_supported())): self._create_snapshot_for_flexgroup(snapshot) else: self._clone_backing_file_for_volume(snapshot['volume_name'], snapshot['name'], snapshot['volume_id'], is_snapshot=True) def _create_snapshot_for_flexgroup(self, snapshot): """Creates the snapshot falling back to the Generic NFS driver. The generic NFS driver snapshot creates a new file which is gonna be the active one (used to attach). So, it must assign the QoS to this new file too. It does not require to create the policy group, though, only reusing the created one for the source volume. """ try: super(NetAppNfsDriver, self).create_snapshot(snapshot) source_vol = { 'id': snapshot['volume_id'], 'name': snapshot['volume_name'], 'volume_type_id': snapshot['volume_type_id'], } extra_specs = na_utils.get_volume_extra_specs(source_vol) qos_policy_group_is_adaptive = volume_utils.is_boolean_str( extra_specs.get('netapp:qos_policy_group_is_adaptive')) qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( source_vol, extra_specs) snap_vol = { 'name': '%s.%s' % (snapshot['volume_name'], snapshot['id']), 'host': self._get_volume_host(source_vol['id']) } self._set_qos_policy_group_on_volume(snap_vol, qos_policy_group_info, qos_policy_group_is_adaptive) except Exception as e: LOG.exception('Exception while creating the %(snap_id)s snapshot' ' of the %(vol_id)s volume inside a FlexGroup pool.' ' Exception: %(exc)s', {'snap_id': snapshot['id'], 'vol_id': snapshot['volume_id'], 'exc': e}) msg = _("Snapshot could not be created on shares.") raise exception.VolumeBackendAPIException(data=msg) def _set_qos_policy_group_on_volume(self, volume, qos_policy_group_info, qos_policy_group_is_adaptive): """Set the qos policy group for a volume""" raise NotImplementedError() def delete_snapshot(self, snapshot): """Deletes a snapshot.""" if (self._is_flexgroup(vol_id=snapshot.volume_id) and not self._is_flexgroup_clone_file_supported()): super(NetAppNfsDriver, self).delete_snapshot(snapshot) else: self._delete_file(snapshot.volume_id, snapshot.name) def _delete_file(self, file_id, file_name): nfs_share = self._get_provider_location(file_id) if self._volume_not_present(nfs_share, file_name): LOG.debug('File %(file_name)s not found when attempting to delete ' 'from share %(share)s', {'file_name': file_name, 'share': nfs_share}) return path = self._get_volume_path(nfs_share, file_name) self._delete(path) def _get_volume_location(self, volume_id): """Returns NFS mount address as :.""" provider_location = self._get_provider_location(volume_id) nfs_server_ip, export_path = na_utils.get_export_host_junction_path( provider_location) if netutils.is_valid_ipv6(nfs_server_ip): nfs_server_ip = netutils.escape_ipv6(nfs_server_ip) return nfs_server_ip + ':' + export_path def revert_to_snapshot(self, context, volume, snapshot): """Revert a volume to a given snapshot. For a FlexGroup pool, the operation relies on the NFS generic driver because the ONTAP clone file is not supported by FlexGroup yet. """ if (self._is_flexgroup(vol_id=snapshot['volume_id']) and not self._is_flexgroup_clone_file_supported()): super(NetAppNfsDriver, self).revert_to_snapshot(context, volume, snapshot) else: self._revert_to_snapshot(volume, snapshot) def _clone_backing_file_for_volume(self, volume_name, clone_name, volume_id, share=None, is_snapshot=False, source_snapshot=None): """Clone backing file for Cinder volume.""" raise NotImplementedError() def _revert_to_snapshot(self, volume, snapshot): raise NotImplementedError() def _is_flexgroup(self, vol_id=None, host=None): """Discover if a given volume is a FlexGroup or not""" raise NotImplementedError() def _get_backing_flexvol_names(self): """Returns backing flexvol names.""" raise NotImplementedError() def _get_flexvol_names_from_hosts(self, hosts): """Returns a set of flexvol names.""" raise NotImplementedError() def _get_provider_location(self, volume_id): """Returns provider location for given volume.""" volume = self.db.volume_get(self._context, volume_id) return volume.provider_location def _get_volume_host(self, volume_id): """Returns volume host for given volume.""" volume = self.db.volume_get(self._context, volume_id) return volume.host def _volume_not_present(self, nfs_mount, volume_name): """Check if volume exists.""" try: self._try_execute('ls', self._get_volume_path(nfs_mount, volume_name)) except processutils.ProcessExecutionError: # If the volume isn't present return True return False def _try_execute(self, *command, **kwargs): # NOTE(vish): Volume commands can partially fail due to timing, but # running them a second time on failure will usually # recover nicely. tries = 0 while True: try: self._execute(*command, **kwargs) return True except processutils.ProcessExecutionError: tries += 1 if tries >= self.configuration.num_shell_tries: raise LOG.exception("Recovering from a failed execute. " "Try number %s", tries) time.sleep(tries ** 2) def _get_volume_path(self, nfs_share, volume_name): """Get volume path. Get volume path (local fs path) for given volume name on given nfs share. :param nfs_share: string, example 172.18.194.100:/var/nfs :param volume_name: string, example volume-91ee65ec-c473-4391-8c09-162b00c68a8c """ return os.path.join(self._get_mount_point_for_share(nfs_share), volume_name) def _update_volume_stats(self): """Retrieve stats info from volume group.""" raise NotImplementedError() def get_default_filter_function(self): """Get the default filter_function string.""" return self.DEFAULT_FILTER_FUNCTION def get_default_goodness_function(self): """Get the default goodness_function string.""" return self.DEFAULT_GOODNESS_FUNCTION def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume.""" self._ensure_flexgroup_not_in_cg(volume) super(NetAppNfsDriver, self).copy_image_to_volume( context, volume, image_service, image_id, disable_sparse=disable_sparse) LOG.info('Copied image to volume %s using regular download.', volume['id']) if (not self._is_flexgroup(host=volume['host']) or self._is_flexgroup_clone_file_supported()): # NOTE(felipe_rodrigues): NetApp image cache relies on the # FlexClone file, which is only available for the earliest # versions of FlexGroup. self._register_image_in_cache(volume, image_id) def _register_image_in_cache(self, volume, image_id): """Stores image in the cache.""" file_name = 'img-cache-%s' % image_id LOG.info("Registering image in cache %s", file_name) try: self._do_clone_rel_img_cache( volume['name'], file_name, volume['provider_location'], file_name) except Exception as e: LOG.warning('Exception while registering image %(image_id)s' ' in cache. Exception: %(exc)s', {'image_id': image_id, 'exc': e}) def _find_image_in_cache(self, image_id): """Finds image in cache and returns list of shares with file name.""" result = [] if getattr(self, '_mounted_shares', None): for share in self._mounted_shares: dir = self._get_mount_point_for_share(share) file_name = 'img-cache-%s' % image_id file_path = '%s/%s' % (dir, file_name) if os.path.isfile(file_path): LOG.debug('Found cache file for image %(image_id)s' ' on share %(share)s', {'image_id': image_id, 'share': share}) result.append((share, file_name)) return result def _do_clone_rel_img_cache(self, src, dst, share, cache_file): """Do clone operation w.r.t image cache file.""" @utils.synchronized(cache_file, external=True) def _do_clone(): dir = self._get_mount_point_for_share(share) file_path = '%s/%s' % (dir, dst) if not os.path.exists(file_path): LOG.info('Cloning from cache to destination %s', dst) self._clone_backing_file_for_volume(src, dst, volume_id=None, share=share) src_path = '%s/%s' % (dir, src) os.utime(src_path, None) _do_clone() def _clean_image_cache(self): """Clean the image cache files in cache of space crunch.""" LOG.debug('Image cache cleaning in progress.') thres_size_perc_start = ( self.configuration.thres_avl_size_perc_start) thres_size_perc_stop = self.configuration.thres_avl_size_perc_stop for share in self._mounted_shares: try: total_size, total_avl = self._get_capacity_info(share) avl_percent = int((float(total_avl) / total_size) * 100) if avl_percent <= thres_size_perc_start: LOG.info('Cleaning cache for share %s.', share) eligible_files = self._find_old_cache_files(share) threshold_size = int( (thres_size_perc_stop * total_size) / 100) bytes_to_free = int(threshold_size - total_avl) LOG.debug('Files to be queued for deletion %s', eligible_files) self._delete_files_till_bytes_free( eligible_files, share, bytes_to_free) else: continue except Exception as e: LOG.warning('Exception during cache cleaning' ' %(share)s. Message - %(ex)s', {'share': share, 'ex': e}) def _shortlist_del_eligible_files(self, share, old_files): """Prepares list of eligible files to be deleted from cache.""" raise NotImplementedError() def _find_old_cache_files(self, share): """Finds the old files in cache.""" mount_fs = self._get_mount_point_for_share(share) threshold_minutes = self.configuration.expiry_thres_minutes cmd = ['find', mount_fs, '-maxdepth', '1', '-name', 'img-cache*', '-amin', '+%s' % threshold_minutes] res, _err = self._execute(*cmd, run_as_root=self._execute_as_root) if res: old_file_paths = res.strip('\n').split('\n') mount_fs_len = len(mount_fs) old_files = [x[mount_fs_len + 1:] for x in old_file_paths] eligible_files = self._shortlist_del_eligible_files( share, old_files) return eligible_files return [] def _delete_files_till_bytes_free(self, file_list, share, bytes_to_free=0): """Delete files from disk till bytes are freed or list exhausted.""" LOG.debug('Bytes to free %s', bytes_to_free) if file_list and bytes_to_free > 0: sorted_files = sorted(file_list, key=lambda x: x[1], reverse=True) mount_fs = self._get_mount_point_for_share(share) for f in sorted_files: if f: file_path = '%s/%s' % (mount_fs, f[0]) LOG.debug('Delete file path %s', file_path) @utils.synchronized(f[0], external=True) def _do_delete(): if self._delete_file_at_path(file_path): return True return False if _do_delete(): bytes_to_free -= int(f[1]) if bytes_to_free <= 0: return def _delete_file_at_path(self, path): """Delete file from disk and return result as boolean.""" try: LOG.debug('Deleting file at path %s', path) cmd = ['rm', '-f', path] self._execute(*cmd, run_as_root=self._execute_as_root) return True except Exception as ex: LOG.warning('Exception during deleting %s', ex) return False def _copy_from_cache(self, volume, image_id, cache_result): """Try copying image file_name from cached file""" raise NotImplementedError() def _copy_from_img_service(self, context, volume, image_service, image_id, use_copyoffload_tool=False): raise NotImplementedError() def clone_image(self, context, volume, image_location, image_meta, image_service): """Create a volume efficiently from an existing image. image_location is a string whose format depends on the image service backend in use. The driver should use it to determine whether cloning is possible. Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred. """ if (self._is_flexgroup(host=volume['host']) and not self._is_flexgroup_clone_file_supported()): return None, False image_id = image_meta['id'] cloned = False post_clone = False extra_specs = na_utils.get_volume_extra_specs(volume) major, minor = self.zapi_client.get_ontapi_version() col_path = self.configuration.netapp_copyoffload_tool_path if col_path: msg = ('The "netapp_copyoffload_tool_path" configuration option ' 'is deprecated and will be removed soon. Please, do not ' 'set it.') versionutils.report_deprecated_feature(LOG, msg) try: cache_result = self._find_image_in_cache(image_id) if cache_result: cloned = self._copy_from_cache(volume, image_id, cache_result) else: cloned = self._direct_nfs_clone(volume, image_location, image_id) # Try to use the deprecated copy offload tool or file copy. if not cloned: # We will use copy offload tool if the copy offload tool # path exists and the version is greater than or equal to # 1.20 use_tool = bool(col_path) and (major == 1 and minor >= 20) cloned = self._copy_from_img_service( context, volume, image_service, image_id, use_copyoffload_tool=use_tool) if cloned: self._do_qos_for_volume(volume, extra_specs) post_clone = self._post_clone_image(volume) except Exception as e: msg = e.msg if getattr(e, 'msg', None) else e LOG.info('Image cloning unsuccessful for image' ' %(image_id)s. Message: %(msg)s', {'image_id': image_id, 'msg': msg}) finally: cloned = cloned and post_clone share = (volume_utils.extract_host(volume['host'], level='pool') if cloned else None) bootable = True if cloned else False return {'provider_location': share, 'bootable': bootable}, cloned def _clone_from_cache(self, volume, image_id, cache_result): """Clones a copy from image cache.""" cloned = False LOG.info('Cloning image %s from cache', image_id) path = volume.host.split('#')[1] for res in cache_result: # Repeat tries in other shares if failed in some (share, file_name) = res if path == share: LOG.debug('Cache share: %s', share) if (share and self._is_share_clone_compatible(volume, share)): try: self._do_clone_rel_img_cache( file_name, volume['name'], share, file_name) cloned = True volume['provider_location'] = share break except Exception: LOG.warning('Unexpected exception during' ' image cloning in share %s', share) return cloned def _direct_nfs_clone(self, volume, image_location, image_id): """Clone directly in nfs share.""" LOG.info('Checking image clone %s from glance share.', image_id) cloned = False image_locations = self._construct_image_nfs_url(image_location) run_as_root = self._execute_as_root for loc in image_locations: share = self._is_cloneable_share(loc) if share and self._is_share_clone_compatible(volume, share): LOG.debug('Share is cloneable %s', share) (__, ___, img_file) = loc.rpartition('/') dir_path = self._get_mount_point_for_share(share) img_path = '%s/%s' % (dir_path, img_file) img_info = image_utils.qemu_img_info(img_path, run_as_root=run_as_root) if img_info.file_format == 'raw': LOG.debug('Image is raw %s', image_id) self._clone_backing_file_for_volume( img_file, volume['name'], volume_id=None, share=share) cloned = True break else: LOG.info( 'Image will locally be converted to raw %s', image_id) dst = '%s/%s' % (dir_path, volume['name']) image_utils.convert_image(img_path, dst, 'raw', run_as_root=run_as_root) data = image_utils.qemu_img_info(dst, run_as_root=run_as_root) if data.file_format != "raw": raise exception.InvalidResults( _("Converted to raw, but" " format is now %s") % data.file_format) else: cloned = True self._register_image_in_cache( volume, image_id) break return cloned def _post_clone_image(self, volume): """Do operations post image cloning.""" LOG.info('Performing post clone for %s', volume['name']) share = volume_utils.extract_host(volume['host'], level='pool') vol_path = self._get_volume_path(share, volume['name']) if self._discover_file_till_timeout(vol_path): self._set_rw_permissions(vol_path) self._resize_image_file(vol_path, volume['size']) return True raise exception.InvalidResults( _("NFS file could not be discovered.")) def _resize_image_file(self, path, new_size, file_format=None): """Resize the image file on share to new size.""" LOG.debug('Checking file for resize') if self._is_file_size_equal(path, new_size): return else: LOG.info('Resizing file to %sG', new_size) image_utils.resize_image(path, new_size, run_as_root=self._execute_as_root, file_format=file_format) if file_format == 'qcow2' and not self._is_file_size_equal( path, new_size): raise exception.InvalidResults( _('Resizing image file failed.')) def _is_file_size_equal(self, path, size): """Checks if file size at path is equal to size.""" data = image_utils.qemu_img_info(path, run_as_root=self._execute_as_root) virt_size = data.virtual_size / units.Gi if virt_size == size: return True else: return False def _touch_path_to_refresh(self, path): # Touching parent directory forces NFS client to flush its cache. cinder.privsep.path.touch(path) def _discover_file_till_timeout(self, path, timeout=75): """Checks if file size at path is equal to size.""" # Sometimes nfs takes time to discover file # Retrying in case any unexpected situation occurs # # The NFS client by default has a 60 second maximum # cache time before it refreshes directory information. # (See man 5 nfs acdirmax.) Allow 75 seconds for # retries to ensure that this cache has refreshed. retry_seconds = timeout sleep_interval = 2 base_path = os.path.dirname(path) self._touch_path_to_refresh(base_path) while True: if os.path.exists(path): return True else: if retry_seconds <= 0: LOG.warning('Discover file retries exhausted.') return False else: time.sleep(sleep_interval) retry_seconds -= sleep_interval self._touch_path_to_refresh(base_path) def _is_cloneable_share(self, image_location): """Finds if the image at location is cloneable.""" conn, dr = self._check_get_nfs_path_segs(image_location) return self._check_share_in_use(conn, dr) def _check_get_nfs_path_segs(self, image_location): """Checks if the nfs path format is matched. WebNFS url format with relative-path is supported. Accepting all characters in path-names and checking against the mounted shares which will contain only allowed path segments. Returns connection and dir details. """ conn, dr = None, None if image_location: nfs_loc_pattern = \ (r'^nfs://(([\w\-\.]+:{1}[\d]+|[\w\-\.]+)(/[^\/].*)' r'*(/[^\/\\\\]+)$)') matched = re.match(nfs_loc_pattern, image_location, flags=0) if not matched: LOG.debug('Image location not in the' ' expected format %s', image_location) else: conn = matched.group(2) dr = matched.group(3) or '/' return conn, dr def _share_match_for_ip(self, ip, shares): """Returns the share that is served by ip. Multiple shares can have same dir path but can be served using different ips. It finds the share which is served by ip on same nfs server. """ raise NotImplementedError() def _check_share_in_use(self, conn, dir): """Checks if share is cinder mounted and returns it.""" try: if conn: host = conn.split(':')[0] ip = volume_utils.resolve_hostname(host) share_candidates = [] for sh in self._mounted_shares: sh_exp = sh.split(':')[-1] if sh_exp == dir: share_candidates.append(sh) if share_candidates: LOG.debug('Found possible share matches %s', share_candidates) return self._share_match_for_ip(ip, share_candidates) except Exception: LOG.warning("Unexpected exception while " "short listing used share.") return None def _construct_image_nfs_url(self, image_location): """Construct direct url for nfs backend. It creates direct url from image_location which is a tuple with direct_url and locations. Returns array of urls with nfs scheme if nfs store else returns url. It needs to be verified by backend before use. """ direct_url, locations = image_location if not direct_url and not locations: raise exception.NotFound(_('Image location not present.')) urls = [] if not locations: urls.append(direct_url) else: for location in locations: if not location['metadata']: continue location_type = location['metadata'].get('type') if not location_type or location_type.lower() != "nfs": continue share_location = location['metadata'].get('share_location') mountpoint = location['metadata'].get('mountpoint') if not share_location or not mountpoint: continue url = location['url'] url_parse = urllib.parse.urlparse(url) abs_path = os.path.join(url_parse.netloc, url_parse.path) rel_path = os.path.relpath(abs_path, mountpoint) direct_url = "%s/%s" % (share_location, rel_path) urls.append(direct_url) return urls def extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" LOG.info('Extending volume %s.', volume['name']) try: path = self.local_path(volume) file_format = None admin_metadata = objects.Volume.get_by_id( context.get_admin_context(), volume.id).admin_metadata if admin_metadata and 'format' in admin_metadata: file_format = admin_metadata['format'] self._resize_image_file( path, new_size, file_format=file_format) except Exception as err: exception_msg = (_("Failed to extend volume " "%(name)s, Error msg: %(msg)s.") % {'name': volume['name'], 'msg': str(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) try: extra_specs = na_utils.get_volume_extra_specs(volume) # Create volume copy with new size for size-dependent QOS specs volume_copy = copy.copy(volume) volume_copy['size'] = new_size self._do_qos_for_volume(volume_copy, extra_specs, cleanup=False) except Exception as err: exception_msg = (_("Failed to set QoS for existing volume " "%(name)s, Error msg: %(msg)s.") % {'name': volume['name'], 'msg': str(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) def _is_share_clone_compatible(self, volume, share): """Checks if share is compatible with volume to host its clone.""" raise NotImplementedError() def _check_share_can_hold_size(self, share, size): """Checks if volume can hold image with size.""" _tot_size, tot_available = self._get_capacity_info( share) if tot_available < size: msg = _("Container size smaller than required file size.") raise exception.VolumeDriverException(msg) def _move_nfs_file(self, source_path, dest_path): """Moves source to destination.""" @utils.synchronized(dest_path, external=True) def _move_file(src, dst): if os.path.exists(dst): LOG.warning("Destination %s already exists.", dst) return False self._execute('mv', src, dst, run_as_root=self._execute_as_root) return True try: return _move_file(source_path, dest_path) except Exception as e: LOG.warning('Exception moving file %(src)s. Message - %(e)s', {'src': source_path, 'e': e}) return False def _get_export_ip_path(self, volume_id=None, share=None): """Returns export ip and path. One of volume id or share is used to return the values. """ if volume_id: provider_location = self._get_provider_location(volume_id) host_ip, export_path = na_utils.get_export_host_junction_path( provider_location) elif share: host_ip, export_path = na_utils.get_export_host_junction_path( share) else: raise exception.InvalidInput( 'A volume ID or share was not specified.') return host_ip, export_path def _get_share_capacity_info(self, nfs_share): """Returns the share capacity metrics needed by the scheduler.""" capacity = dict() capacity['reserved_percentage'] = self.reserved_percentage capacity['max_over_subscription_ratio'] = ( self.max_over_subscription_ratio) total_size, total_available = self._get_capacity_info(nfs_share) capacity['total_capacity_gb'] = na_utils.round_down( total_size / units.Gi) capacity['free_capacity_gb'] = na_utils.round_down( total_available / units.Gi) return capacity def _get_capacity_info(self, nfs_share): """Get total capacity and free capacity in bytes for an nfs share.""" export_path = nfs_share.rsplit(':', 1)[1] capacity = self.zapi_client.get_flexvol_capacity( flexvol_path=export_path) return capacity['size-total'], capacity['size-available'] def _check_volume_type(self, volume, share, file_name, extra_specs): """Match volume type for share file.""" def _convert_vol_ref_share_name_to_share_ip(self, vol_ref): """Converts the share point name to an IP address The volume reference may have a DNS name portion in the share name. Convert that to an IP address and then restore the entire path. :param vol_ref: Driver-specific information used to identify a volume :return: A volume reference where share is in IP format. """ # First strip out share and convert to IP format. share_split = vol_ref.rsplit(':', 1) vol_ref_share_ip = volume_utils.resolve_hostname(share_split[0]) # Now place back into volume reference. vol_ref_share = vol_ref_share_ip + ':' + share_split[1] return vol_ref_share def _get_share_mount_and_vol_from_vol_ref(self, vol_ref): """Get the NFS share, the NFS mount, and the volume from reference Determine the NFS share point, the NFS mount point, and the volume (with possible path) from the given volume reference. Raise exception if unsuccessful. :param vol_ref: Driver-specific information used to identify a volume :return: NFS Share, NFS mount, volume path or raise error """ # Check that the reference is valid. if 'source-name' not in vol_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, reason=reason) vol_ref_name = vol_ref['source-name'] self._ensure_shares_mounted() # If a share was declared as '1.2.3.4:/a/b/c' in the nfs_shares_config # file, but the admin tries to manage the file located at # 'my.hostname.com:/a/b/c/d.vol', this might cause a lookup miss below # when searching self._mounted_shares to see if we have an existing # mount that would work to access the volume-to-be-managed (a string # comparison is done instead of IP comparison). vol_ref_share = self._convert_vol_ref_share_name_to_share_ip( vol_ref_name) for nfs_share in self._mounted_shares: cfg_share = self._convert_vol_ref_share_name_to_share_ip(nfs_share) (orig_share, work_share, file_path) = \ vol_ref_share.partition(cfg_share) if work_share == cfg_share: file_path = file_path[1:] # strip off leading path divider LOG.debug("Found possible share %s; checking mount.", work_share) nfs_mount = self._get_mount_point_for_share(nfs_share) vol_full_path = os.path.join(nfs_mount, file_path) if os.path.isfile(vol_full_path): LOG.debug("Found share %(share)s and vol %(path)s on " "mount %(mnt)s", {'share': nfs_share, 'path': file_path, 'mnt': nfs_mount}) return nfs_share, nfs_mount, file_path else: LOG.debug("vol_ref %(ref)s not on share %(share)s.", {'ref': vol_ref_share, 'share': nfs_share}) raise exception.ManageExistingInvalidReference( existing_ref=vol_ref, reason=_('Volume not found on configured storage backend.')) def manage_existing(self, volume, existing_vol_ref): """Manages an existing volume. The specified Cinder volume is to be taken into Cinder management. The driver will verify its existence and then rename it to the new Cinder volume name. It is expected that the existing volume reference is an NFS share point and some [/path]/volume; e.g., 10.10.32.1:/openstack/vol_to_manage or 10.10.32.1:/openstack/some_directory/vol_to_manage :param volume: Cinder volume to manage :param existing_vol_ref: Driver-specific information used to identify a volume """ # Attempt to find NFS share, NFS mount, and volume path from vol_ref. (nfs_share, nfs_mount, vol_path) = \ self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) LOG.debug("Asked to manage NFS volume %(vol)s, with vol ref %(ref)s", {'vol': volume['id'], 'ref': existing_vol_ref['source-name']}) extra_specs = na_utils.get_volume_extra_specs(volume) self._check_volume_type(volume, nfs_share, vol_path, extra_specs) if vol_path == volume['name']: LOG.debug("New Cinder volume %s name matches reference name: " "no need to rename.", volume['name']) else: src_vol = os.path.join(nfs_mount, vol_path) dst_vol = os.path.join(nfs_mount, volume['name']) try: self._execute("mv", src_vol, dst_vol, run_as_root=self._execute_as_root, check_exit_code=True) LOG.debug("Setting newly managed Cinder volume name to %s", volume['name']) self._set_rw_permissions_for_all(dst_vol) except processutils.ProcessExecutionError as err: exception_msg = (_("Failed to manage existing volume %(name)s," " because rename operation failed:" " Error msg: %(msg)s."), {'name': existing_vol_ref['source-name'], 'msg': err}) raise exception.VolumeBackendAPIException(data=exception_msg) try: self._do_qos_for_volume(volume, extra_specs, cleanup=False) except Exception as err: exception_msg = (_("Failed to set QoS for existing volume " "%(name)s, Error msg: %(msg)s.") % {'name': existing_vol_ref['source-name'], 'msg': str(err)}) raise exception.VolumeBackendAPIException(data=exception_msg) model_update = self._get_volume_model_update(volume) or {} model_update['provider_location'] = nfs_share return model_update def manage_existing_get_size(self, volume, existing_vol_ref): """Returns the size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_vol_ref: Existing volume to take under management """ # Attempt to find NFS share, NFS mount, and volume path from vol_ref. (nfs_share, nfs_mount, vol_path) = \ self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref) try: LOG.debug("Asked to get size of NFS vol_ref %s.", existing_vol_ref['source-name']) file_path = os.path.join(nfs_mount, vol_path) file_size = float(utils.get_file_size(file_path)) / units.Gi vol_size = int(math.ceil(file_size)) except (OSError, ValueError): exception_message = (_("Failed to manage existing volume " "%(name)s, because of error in getting " "volume size."), {'name': existing_vol_ref['source-name']}) raise exception.VolumeBackendAPIException(data=exception_message) LOG.debug("Reporting size of NFS volume ref %(ref)s as %(size)d GB.", {'ref': existing_vol_ref['source-name'], 'size': vol_size}) return vol_size def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. A log entry will be made to notify the Admin that the volume is no longer being managed. :param volume: Cinder volume to unmanage """ vol_str = CONF.volume_name_template % volume['id'] vol_path = os.path.join(volume['provider_location'], vol_str) LOG.info('Cinder NFS volume with current path "%(cr)s" is ' 'no longer being managed.', {'cr': vol_path}) def _find_share(self, volume): """Returns the NFS share for the created volume. The method is used by base class to determine the provider_location share of the new volume. :param volume: the volume to be created. """ pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) return pool_name def _ensure_flexgroup_not_in_cg(self, volume): if (self._is_flexgroup(host=volume['host']) and volume['group'] and volume_utils.is_group_a_cg_snapshot_type(volume['group'])): msg = _("Cannot create %s volume on FlexGroup pool with " "consistency group.") raise na_utils.NetAppDriverException(msg % volume['id']) def _is_flexgroup_clone_file_supported(self): """Check whether storage can perform clone file for FlexGroup""" raise NotImplementedError() def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): # Implemented to prevent NFSDriver's implementation renaming the file # and breaking volume's backend QoS. msg = _("The method update_migrated_volume is not implemented.") raise NotImplementedError(msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py0000664000175000017500000016320200000000000025075 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Ben Swartzlander. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp NFS storage. """ import os import uuid from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import units from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.objects import fields from cinder.volume.drivers.netapp.dataontap import nfs_base from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities from cinder.volume.drivers.netapp.dataontap.utils import data_motion from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @interface.volumedriver class NetAppCmodeNfsDriver( nfs_base.NetAppNfsDriver, data_motion.DataMotionMixin, metaclass=volume_utils.TraceWrapperWithABCMetaclass): """NetApp NFS driver for Data ONTAP (Cluster-mode). Version history: .. code-block:: none 1.0.0 - Driver development before Wallaby 2.0.0 - Add support for QoS minimums specs Add support for dynamic Adaptive QoS policy group creation Implement FlexGroup pool 3.0.0 - Add support for Intra-cluster Storage assisted volume migration Add support for revert to snapshot 4.0.0 - Add Cinder Active/Active support (High Availability) Implement Active/Active replication support """ VERSION = "4.0.0" REQUIRED_CMODE_FLAGS = ['netapp_vserver'] SUPPORTS_ACTIVE_ACTIVE = True def __init__(self, *args, **kwargs): super(NetAppCmodeNfsDriver, self).__init__(*args, **kwargs) self.driver_name = 'NetApp_NFS_Cluster_direct' self.driver_mode = 'cluster' self.configuration.append_config_values(na_opts.netapp_cluster_opts) self.failed_over_backend_name = kwargs.get('active_backend_id') self.failed_over = self.failed_over_backend_name is not None self.replication_enabled = ( True if self.get_replication_backend_names( self.configuration) else False) def do_setup(self, context): """Do the customized set up on client for cluster mode.""" super(NetAppCmodeNfsDriver, self).do_setup(context) na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration) # cDOT API client self.zapi_client = dot_utils.get_client_for_backend( self.failed_over_backend_name or self.backend_name) self.vserver = self.zapi_client.vserver # Storage service catalog self.ssc_library = capabilities.CapabilitiesLibrary( 'nfs', self.vserver, self.zapi_client, self.configuration) self.ssc_library.check_api_permissions() self.using_cluster_credentials = ( self.ssc_library.cluster_user_supported()) # Performance monitoring library self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.zapi_client) def _update_zapi_client(self, backend_name): """Set cDOT API client for the specified config backend stanza name.""" self.zapi_client = dot_utils.get_client_for_backend(backend_name) self.vserver = self.zapi_client.vserver self.ssc_library._update_for_failover(self.zapi_client, self._get_flexvol_to_pool_map()) ssc = self.ssc_library.get_ssc() self.perf_library._update_for_failover(self.zapi_client, ssc) def check_for_setup_error(self): """Check that the driver is working and can communicate.""" self._add_looping_tasks() if (self.ssc_library.contains_flexgroup_pool() and not self.zapi_client.features.FLEXGROUP): msg = _('FlexGroup pool requires Data ONTAP 9.8 or later.') raise na_utils.NetAppDriverException(msg) super(NetAppCmodeNfsDriver, self).check_for_setup_error() def _add_looping_tasks(self): """Add tasks that need to be executed at a fixed interval.""" # Note(cknight): Run the update once in the current thread to prevent a # race with the first invocation of _update_volume_stats. self._update_ssc() # Add the task that updates the slow-changing storage service catalog self.loopingcalls.add_task(self._update_ssc, loopingcalls.ONE_HOUR, loopingcalls.ONE_HOUR) # Add the task that runs other housekeeping tasks, such as deletion # of previously soft-deleted storage artifacts. self.loopingcalls.add_task( self._handle_housekeeping_tasks, loopingcalls.TEN_MINUTES, 0) super(NetAppCmodeNfsDriver, self)._add_looping_tasks() def _handle_ems_logging(self): """Log autosupport messages.""" base_ems_message = dot_utils.build_ems_log_message_0( self.driver_name, self.app_version) self.zapi_client.send_ems_log_message(base_ems_message) pool_ems_message = dot_utils.build_ems_log_message_1( self.driver_name, self.app_version, self.vserver, self._get_backing_flexvol_names(), []) self.zapi_client.send_ems_log_message(pool_ems_message) def _handle_housekeeping_tasks(self): """Handle various cleanup activities.""" active_backend = self.failed_over_backend_name or self.backend_name # Add the task that harvests soft-deleted QoS policy groups. if self.using_cluster_credentials: self.zapi_client.remove_unused_qos_policy_groups() LOG.debug("Current service state: Replication enabled: %(" "replication)s. Failed-Over: %(failed)s. Active Backend " "ID: %(active)s", { 'replication': self.replication_enabled, 'failed': self.failed_over, 'active': active_backend, }) # Create pool mirrors if whole-backend replication configured if self.replication_enabled and not self.failed_over: self.ensure_snapmirrors( self.configuration, self.backend_name, self.ssc_library.get_ssc_flexvol_names()) def _do_qos_for_volume(self, volume, extra_specs, cleanup=True): try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume, extra_specs) pool = volume_utils.extract_host(volume['host'], level='pool') qos_min_support = self.ssc_library.is_qos_min_supported(pool) qos_policy_group_is_adaptive = (volume_utils.is_boolean_str( extra_specs.get('netapp:qos_policy_group_is_adaptive')) or na_utils.is_qos_policy_group_spec_adaptive( qos_policy_group_info)) self.zapi_client.provision_qos_policy_group(qos_policy_group_info, qos_min_support) self._set_qos_policy_group_on_volume(volume, qos_policy_group_info, qos_policy_group_is_adaptive) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Setting QoS for %s failed", volume['id']) if cleanup: LOG.debug("Cleaning volume %s", volume['id']) self._cleanup_volume_on_failure(volume) def _get_volume_model_update(self, volume): """Provide model updates for a volume being created.""" if self.replication_enabled: return {'replication_status': fields.ReplicationStatus.ENABLED} def _set_qos_policy_group_on_volume(self, volume, qos_policy_group_info, qos_policy_group_is_adaptive): if qos_policy_group_info is None: return qos_policy_group_name = na_utils.get_qos_policy_group_name_from_info( qos_policy_group_info) if qos_policy_group_name is None: return target_path = '%s' % (volume['name']) share = volume_utils.extract_host(volume['host'], level='pool') __, export_path = na_utils.get_export_host_junction_path(share) flex_vol_name = self.zapi_client.get_vol_by_junc_vserver(self.vserver, export_path) self.zapi_client.file_assign_qos(flex_vol_name, qos_policy_group_name, qos_policy_group_is_adaptive, target_path) def _revert_to_snapshot(self, volume, snapshot): """Clone volume from snapshot to perform the file name swap.""" new_snap_name = 'new-%s' % snapshot['name'] self._clone_backing_file_for_volume(snapshot['name'], new_snap_name, snapshot['volume_id'], is_snapshot=False) (host_ip, junction_path) = self._get_export_ip_path( volume_id=volume['id']) vserver = self._get_vserver_for_ip(host_ip) flexvol_name = self.zapi_client.get_vol_by_junc_vserver(vserver, junction_path) try: self._swap_files(flexvol_name, volume['name'], new_snap_name) except Exception: LOG.error("Swapping temporary reverted volume name from %s to %s " "failed.", new_snap_name, volume['name']) with excutils.save_and_reraise_exception(): try: LOG.debug("Deleting temporary reverted volume file %s.", new_snap_name) file_path = '/vol/%s/%s' % (flexvol_name, new_snap_name) self.zapi_client.delete_file(file_path) except Exception: LOG.error("Could not delete temporary reverted volume %s. " "A manual deletion is required.", new_snap_name) def _swap_files(self, flexvol_name, original_file, new_file): """Swaps cloned and original files using a temporary file. Renames the original file path to a temporary path, then changes the cloned file path to the original path (if this fails, change the temporary file path back as original path) and finally deletes the file with temporary path. """ prefix_path_on_backend = '/vol/' + flexvol_name + '/' new_file_path = prefix_path_on_backend + new_file original_file_path = prefix_path_on_backend + original_file tmp_file_path = prefix_path_on_backend + 'tmp-%s' % original_file try: self.zapi_client.rename_file(original_file_path, tmp_file_path) except exception.VolumeBackendAPIException: msg = _("Could not rename original volume from %s to %s.") raise na_utils.NetAppDriverException(msg % (original_file_path, tmp_file_path)) try: self.zapi_client.rename_file(new_file_path, original_file_path) except exception.VolumeBackendAPIException: try: LOG.debug("Revert volume failed. Rolling back to its original" " name.") self.zapi_client.rename_file(tmp_file_path, original_file_path) except exception.VolumeBackendAPIException: LOG.error("Could not rollback original volume name from %s " "to %s. Cinder may lose the volume management. " "Please, you should rename it back manually.", tmp_file_path, original_file_path) msg = _("Could not rename temporary reverted volume from %s " "to original volume name %s.") raise na_utils.NetAppDriverException(msg % (new_file_path, original_file_path)) try: self.zapi_client.delete_file(tmp_file_path) except exception.VolumeBackendAPIException: LOG.error("Could not delete old volume %s. A manual deletion is " "required.", tmp_file_path) def _clone_backing_file_for_volume(self, volume_name, clone_name, volume_id, share=None, is_snapshot=False, source_snapshot=None): """Clone backing file for Cinder volume.""" (vserver, exp_volume) = self._get_vserver_and_exp_vol(volume_id, share) self.zapi_client.clone_file(exp_volume, volume_name, clone_name, vserver, is_snapshot=is_snapshot) def _get_vserver_and_exp_vol(self, volume_id=None, share=None): """Gets the vserver and export volume for share.""" (host_ip, export_path) = self._get_export_ip_path(volume_id, share) ifs = self.zapi_client.get_if_info_by_ip(host_ip) vserver = ifs[0].get('vserver') exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver, export_path) return vserver, exp_volume def _update_volume_stats(self): """Retrieve stats info from vserver.""" LOG.debug('Updating volume stats') data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.driver_name data['vendor_name'] = 'NetApp' data['driver_version'] = self.VERSION data['storage_protocol'] = constants.NFS_VARIANT data['pools'] = self._get_pool_stats( filter_function=self.get_filter_function(), goodness_function=self.get_goodness_function()) data['sparse_copy_volume'] = True # Used for service state report data['replication_enabled'] = self.replication_enabled self._stats = data def _get_pool_stats(self, filter_function=None, goodness_function=None): """Retrieve pool (Data ONTAP flexvol) stats. Pool statistics are assembled from static driver capabilities, the Storage Service Catalog of flexvol attributes, and real-time capacity and controller utilization metrics. The pool name is the NFS share path. """ pools = [] ssc = self.ssc_library.get_ssc() if not ssc: return pools # Utilization and performance metrics require cluster-scoped # credentials if self.using_cluster_credentials: # Get up-to-date node utilization metrics just once self.perf_library.update_performance_cache(ssc) # Get up-to-date aggregate capacities just once aggregates = self.ssc_library.get_ssc_aggregates() aggr_capacities = self.zapi_client.get_aggregate_capacities( aggregates) else: aggr_capacities = {} for ssc_vol_name, ssc_vol_info in ssc.items(): pool = dict() # Add storage service catalog data pool.update(ssc_vol_info) # Add driver capabilities and config info pool['QoS_support'] = self.using_cluster_credentials pool['consistencygroup_support'] = True pool['consistent_group_snapshot_enabled'] = True pool['multiattach'] = True pool['online_extend_support'] = False is_flexgroup = ssc_vol_info.get('netapp_is_flexgroup') == 'true' if is_flexgroup: pool['consistencygroup_support'] = False pool['consistent_group_snapshot_enabled'] = False pool['multiattach'] = False # Add up-to-date capacity info nfs_share = ssc_vol_info['pool_name'] capacity = self._get_share_capacity_info(nfs_share) pool.update(capacity) if self.configuration.netapp_driver_reports_provisioned_capacity: files = self.zapi_client.get_file_sizes_by_dir(ssc_vol_name) provisioned_cap = 0 for f in files: if volume_utils.extract_id_from_volume_name(f['name']): provisioned_cap = provisioned_cap + f['file-size'] pool['provisioned_capacity_gb'] = na_utils.round_down( float(provisioned_cap) / units.Gi) if self.using_cluster_credentials and not is_flexgroup: dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent( ssc_vol_name) else: dedupe_used = 0.0 pool['netapp_dedupe_used_percent'] = na_utils.round_down( dedupe_used) aggregate_name = ssc_vol_info.get('netapp_aggregate') aggr_used = 0 if isinstance(aggregate_name, list): # For FlexGroup, the aggregate percentage can be seen as the # average of all aggregates. aggr_used_total = 0 aggr_num = 0 for aggr in aggregate_name: aggr_capacity = aggr_capacities.get(aggr, {}) aggr_used_total += aggr_capacity.get('percent-used', 0) aggr_num += 1 if aggr_num: aggr_used = aggr_used_total / aggr_num else: aggr_capacity = aggr_capacities.get(aggregate_name, {}) aggr_used = aggr_capacity.get('percent-used', 0) pool['netapp_aggregate_used_percent'] = aggr_used # Add utilization data utilization = self.perf_library.get_node_utilization_for_pool( ssc_vol_name) pool['utilization'] = na_utils.round_down(utilization) pool['filter_function'] = filter_function pool['goodness_function'] = goodness_function # Add replication capabilities/stats pool.update( self.get_replication_backend_stats(self.configuration)) pools.append(pool) return pools def _update_ssc(self): """Refresh the storage service catalog with the latest set of pools.""" self._ensure_shares_mounted() self.ssc_library.update_ssc(self._get_flexvol_to_pool_map()) def _get_flexvol_to_pool_map(self): """Get the flexvols that back all mounted shares. The map is of the format suitable for seeding the storage service catalog: { : {'pool_name': }} """ pools = {} vserver_addresses = self.zapi_client.get_operational_lif_addresses() for share in self._mounted_shares: host, junction_path = na_utils.get_export_host_junction_path(share) address = volume_utils.resolve_hostname(host) if address not in vserver_addresses: LOG.warning('Address not found for NFS share %s.', share) continue try: flexvol = self.zapi_client.get_flexvol( flexvol_path=junction_path) pools[flexvol['name']] = {'pool_name': share} except exception.VolumeBackendAPIException: LOG.exception('Flexvol not found for NFS share %s.', share) return pools def _shortlist_del_eligible_files(self, share, old_files): """Prepares list of eligible files to be deleted from cache.""" file_list = [] (vserver, exp_volume) = self._get_vserver_and_exp_vol( volume_id=None, share=share) for old_file in old_files: path = '/vol/%s/%s' % (exp_volume, old_file) u_bytes = self.zapi_client.get_file_usage(path, vserver) file_list.append((old_file, u_bytes)) LOG.debug('Shortlisted files eligible for deletion: %s', file_list) return file_list def _share_match_for_ip(self, ip, shares): """Returns the share that is served by ip. Multiple shares can have same dir path but can be served using different ips. It finds the share which is served by ip on same nfs server. """ ip_vserver = self._get_vserver_for_ip(ip) if ip_vserver and shares: for share in shares: ip_sh, __ = na_utils.get_export_host_junction_path(share) sh_vserver = self._get_vserver_for_ip(ip_sh) if sh_vserver == ip_vserver: LOG.debug('Share match found for ip %s', ip) return share LOG.debug('No share match found for ip %s', ip) return None def _get_vserver_for_ip(self, ip): """Get vserver for the mentioned ip.""" try: ifs = self.zapi_client.get_if_info_by_ip(ip) vserver = ifs[0].get('vserver') return vserver except Exception: return None def _is_share_clone_compatible(self, volume, share): """Checks if share is compatible with volume to host its clone.""" flexvol_name = self._get_flexvol_name_for_share(share) return self._is_share_vol_type_match(volume, share, flexvol_name) def _is_share_vol_type_match(self, volume, share, flexvol_name): """Checks if share matches volume type.""" LOG.debug("Found volume %(vol)s for share %(share)s.", {'vol': flexvol_name, 'share': share}) extra_specs = na_utils.get_volume_extra_specs(volume) flexvol_names = self.ssc_library.get_matching_flexvols_for_extra_specs( extra_specs) return flexvol_name in flexvol_names def _get_flexvol_name_for_share(self, nfs_share): """Queries the SSC for the flexvol containing an NFS share.""" ssc = self.ssc_library.get_ssc() for ssc_vol_name, ssc_vol_info in ssc.items(): if nfs_share == ssc_vol_info.get('pool_name'): return ssc_vol_name return None def delete_volume(self, volume): """Deletes a logical volume.""" self._delete_backing_file_for_volume(volume) try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) is_adaptive = na_utils.is_qos_policy_group_spec_adaptive( qos_policy_group_info) self.zapi_client.mark_qos_policy_group_for_deletion( qos_policy_group_info, is_adaptive) except Exception: # Don't blow up here if something went wrong de-provisioning the # QoS policy for the volume. pass def _delete_backing_file_for_volume(self, volume): """Deletes file on nfs share that backs a cinder volume.""" is_flexgroup = self._is_flexgroup(host=volume['host']) try: LOG.debug('Deleting backing file for volume %s.', volume['id']) if (is_flexgroup and not self._is_flexgroup_clone_file_supported()): super(NetAppCmodeNfsDriver, self).delete_volume(volume) else: self._delete_file(volume['id'], volume['name']) except Exception: if (is_flexgroup and not self._is_flexgroup_clone_file_supported()): LOG.exception('Exec of "rm" command on backing file for ' '%s was unsuccessful.', volume['id']) else: LOG.exception('Could not delete volume %s on backend, ' 'falling back to exec of "rm" command.', volume['id']) try: super(NetAppCmodeNfsDriver, self).delete_volume(volume) except Exception: LOG.exception('Exec of "rm" command on backing file for ' '%s was unsuccessful.', volume['id']) def _delete_file(self, file_id, file_name): (host_ip, junction_path) = self._get_export_ip_path(volume_id=file_id) vserver = self._get_vserver_for_ip(host_ip) flexvol = self.zapi_client.get_vol_by_junc_vserver( vserver, junction_path) path_on_backend = '/vol/' + flexvol + '/' + file_name LOG.debug('Attempting to delete file %(path)s for ID %(file_id)s on ' 'backend.', {'path': path_on_backend, 'file_id': file_id}) self.zapi_client.delete_file(path_on_backend) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" if (self._is_flexgroup(snapshot['volume_id']) and not self._is_flexgroup_clone_file_supported()): super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot) else: self._delete_backing_file_for_snapshot(snapshot) def _delete_backing_file_for_snapshot(self, snapshot): """Deletes file on nfs share that backs a cinder volume.""" try: LOG.debug('Deleting backing file for snapshot %s.', snapshot['id']) self._delete_file(snapshot['volume_id'], snapshot['name']) except Exception: LOG.exception('Could not delete snapshot %s on backend, ' 'falling back to exec of "rm" command.', snapshot['id']) try: # delete_file_from_share super(NetAppCmodeNfsDriver, self).delete_snapshot(snapshot) except Exception: LOG.exception('Exec of "rm" command on backing file for' ' %s was unsuccessful.', snapshot['id']) def _get_ip_verify_on_cluster(self, host): """Verifies if host on same cluster and returns ip.""" ip = volume_utils.resolve_hostname(host) vserver = self._get_vserver_for_ip(ip) if not vserver: raise exception.NotFound(_("Unable to locate an SVM that is " "managing the IP address '%s'") % ip) return ip, vserver def _copy_from_cache(self, volume, image_id, cache_result): """Try copying image file_name from cached file_name.""" copied = False cache_copy, found_local = self._find_image_location(cache_result, volume) try: if found_local: LOG.debug("Trying copy from cache using cloning.") (nfs_share, file_name) = cache_copy self._clone_file_dst_exists( nfs_share, file_name, volume['name'], dest_exists=True) LOG.debug("Copied image from cache to volume %s using " "cloning.", volume['id']) copied = True elif (cache_copy and self.configuration.netapp_copyoffload_tool_path): volume['provider_location'] = volume_utils.extract_host( volume['host'], level='pool') LOG.debug("Trying copy from cache using copy offload.") self._copy_from_remote_cache(volume, image_id, cache_copy) copied = True elif cache_copy: LOG.debug("Trying copy from cache using file copy.") self._copy_from_remote_cache(volume, image_id, cache_copy, use_copyoffload_tool=False) copied = True except Exception: LOG.exception('Error in workflow copy from cache.') return copied def _find_image_location(self, cache_result, volume): """Finds the location of a cached image. Returns image location local to the NFS share, that matches the volume_id, if it exists. Otherwise returns the last entry in cache_result or None if cache_result is empty. """ found_local_copy = False cache_copy = None provider_location = volume_utils.extract_host(volume['host'], level='pool') for res in cache_result: (share, file_name) = res if share == provider_location: cache_copy = res found_local_copy = True break else: cache_copy = res return cache_copy, found_local_copy def _copy_from_remote_cache(self, volume, image_id, cache_copy, use_copyoffload_tool=True): """Copies the remote cached image to the provided volume. Executes either the copy offload binary or the file copy operation, copying the cached image to the destination path of the provided volume. Also registers the new copy of the image as a cached image. """ (nfs_share, file_name) = cache_copy (src_ip, src_vserver, src_share_path, src_path) = ( self._get_source_ip_and_path(nfs_share, file_name)) (dest_ip, dest_vserver, dest_path) = ( self._get_destination_ip_and_path(volume)) # NOTE(felipe_rodrigues): the copy offload tool code will be removed in # the Antelope release. col_path = self.configuration.netapp_copyoffload_tool_path if use_copyoffload_tool and col_path: # Always run copy offload as regular user, it's sufficient # and rootwrap doesn't allow copy offload to run as root anyways. self._execute(col_path, src_ip, dest_ip, src_path, dest_path, run_as_root=False, check_exit_code=0) LOG.debug("Copied image from cache to volume %s using " "copy offload.", volume['id']) else: dest_share_path = dest_path.rsplit("/", 1)[0] self._copy_file(file_name, file_name, src_share_path, src_vserver, dest_share_path, dest_vserver, dest_backend_name=self.backend_name, dest_file_name=volume.name) LOG.debug("Copied image from cache to volume %s using " "file copy operation.", volume['id']) self._register_image_in_cache(volume, image_id) def _get_source_ip_and_path(self, nfs_share, file_name): host, share_path = na_utils.get_export_host_junction_path(nfs_share) (src_ip, src_vserver) = self._get_ip_verify_on_cluster(host) src_path = os.path.join(share_path, file_name) return src_ip, src_vserver, share_path, src_path def _get_destination_ip_and_path(self, volume): share = volume_utils.extract_host(volume['host'], level='pool') share_ip, share_path = na_utils.get_export_host_junction_path(share) (dest_ip, vserver) = self._get_ip_verify_on_cluster(share_ip) dest_path = os.path.join(share_path, volume['name']) return dest_ip, vserver, dest_path def _clone_file_dst_exists(self, share, src_name, dst_name, dest_exists=False): """Clone file even if dest exists.""" (vserver, exp_volume) = self._get_vserver_and_exp_vol(share=share) self.zapi_client.clone_file(exp_volume, src_name, dst_name, vserver, dest_exists=dest_exists) def _copy_from_img_service(self, context, volume, image_service, image_id, use_copyoffload_tool=True): """Copies from the image service using copy offload or file copy.""" image_loc = image_service.get_location(context, image_id) locations = self._construct_image_nfs_url(image_loc) src_ip = None src_vserver = None src_volume = None selected_loc = None cloned = False # this will match the first location that has a valid IP on cluster for location in locations: conn, src_volume = self._check_get_nfs_path_segs(location) if conn: try: (src_ip, src_vserver) = ( self._get_ip_verify_on_cluster(conn.split(':')[0])) selected_loc = location break except exception.NotFound: pass if src_ip is None or src_vserver is None: raise exception.NotFound(_("Source host details not found.")) (__, ___, img_file) = selected_loc.rpartition('/') (dst_ip, dest_vserver, vol_path) = ( self._get_destination_ip_and_path(volume)) dest_share_path = vol_path.rsplit("/", 1)[0] dst_share = dst_ip + ':' + dest_share_path # tmp file is required to deal with img formats tmp_img_file = str(uuid.uuid4()) img_info = image_service.show(context, image_id) self._check_share_can_hold_size(dst_share, img_info['size']) run_as_root = self._execute_as_root dst_dir = self._get_mount_point_for_share(dst_share) dst_img_local = os.path.join(dst_dir, tmp_img_file) try: # NOTE(felipe_rodrigues): the copy offload tool code will be # removed in the AA release. col_path = self.configuration.netapp_copyoffload_tool_path if col_path and use_copyoffload_tool: LOG.debug("Trying copy from image service using copy offload.") dst_img_serv_path = os.path.join(dest_share_path, tmp_img_file) src_path = os.path.join(src_volume, img_file) # Always run copy offload as regular user, it's sufficient # and rootwrap doesn't allow copy offload to run as root # anyways. self._execute(col_path, src_ip, dst_ip, src_path, dst_img_serv_path, run_as_root=False, check_exit_code=0) else: LOG.debug("Trying copy from image service using file copy.") src_volume = ''.join(src_volume.split("/", 1)) dest_share_path = ''.join(dest_share_path.split("/", 1)) self._copy_file(img_file, img_file, src_volume, src_vserver, dest_share_path, dest_vserver, dest_backend_name=self.backend_name, dest_file_name=tmp_img_file) self._discover_file_till_timeout(dst_img_local, timeout=120) LOG.debug('Copied image %(img)s to tmp file %(tmp)s.', {'img': image_id, 'tmp': tmp_img_file}) dst_img_cache_local = os.path.join(dst_dir, 'img-cache-%s' % image_id) if img_info['disk_format'] == 'raw': LOG.debug('Image is raw %s.', image_id) self._clone_file_dst_exists(dst_share, tmp_img_file, volume['name'], dest_exists=True) self._move_nfs_file(dst_img_local, dst_img_cache_local) LOG.debug('Copied raw image %(img)s to volume %(vol)s.', {'img': image_id, 'vol': volume['id']}) else: LOG.debug('Image will be converted to raw %s.', image_id) img_conv = str(uuid.uuid4()) dst_img_conv_local = os.path.join(dst_dir, img_conv) # Checking against image size which is approximate check self._check_share_can_hold_size(dst_share, img_info['size']) try: image_utils.convert_image(dst_img_local, dst_img_conv_local, 'raw', run_as_root=run_as_root) data = image_utils.qemu_img_info(dst_img_conv_local, run_as_root=run_as_root) if data.file_format != "raw": raise exception.InvalidResults( _("Converted to raw, but format is now %s.") % data.file_format) else: self._clone_file_dst_exists(dst_share, img_conv, volume['name'], dest_exists=True) self._move_nfs_file(dst_img_conv_local, dst_img_cache_local) LOG.debug('Copied locally converted raw image' ' %(img)s to volume %(vol)s.', {'img': image_id, 'vol': volume['id']}) finally: if os.path.exists(dst_img_conv_local): self._delete_file_at_path(dst_img_conv_local) cloned = True finally: if os.path.exists(dst_img_local): self._delete_file_at_path(dst_img_local) return cloned def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. A log entry will be made to notify the Admin that the volume is no longer being managed. :param volume: Cinder volume to unmanage """ try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) self.zapi_client.mark_qos_policy_group_for_deletion( qos_policy_group_info) except Exception: # Unmanage even if there was a problem deprovisioning the # associated qos policy group. pass super(NetAppCmodeNfsDriver, self).unmanage(volume) def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover a backend to a secondary replication target. This function combines failover() and failover_completed() to perform failover when Active/Active is not enabled. """ active_backend_name, volume_updates, group_updates = ( self._failover(context, volumes, secondary_id, groups)) self._failover_completed(context, active_backend_name) return active_backend_name, volume_updates, group_updates def failover(self, context, volumes, secondary_id=None, groups=None): """Failover to replication target.""" return self._failover(context, volumes, secondary_id, groups) def failover_completed(self, context, secondary_id=None): """Update volume node when `failover` is completed.""" return self._failover_completed(context, secondary_id) def _get_backing_flexvol_names(self): """Returns a list of backing flexvol names.""" ssc = self.ssc_library.get_ssc() return list(ssc.keys()) def _get_flexvol_names_from_hosts(self, hosts): """Returns a set of flexvol names.""" flexvols = set() ssc = self.ssc_library.get_ssc() for host in hosts: pool_name = volume_utils.extract_host(host, level='pool') for flexvol_name, ssc_volume_data in ssc.items(): if ssc_volume_data['pool_name'] == pool_name: flexvols.add(flexvol_name) return flexvols def delete_group_snapshot(self, context, group_snapshot, snapshots): """Delete files backing each snapshot in the group snapshot. :return: An implicit update of snapshot models that the manager will interpret and subsequently set the model state to deleted. """ for snapshot in snapshots: self._delete_backing_file_for_snapshot(snapshot) LOG.debug("Snapshot %s deletion successful", snapshot['name']) return None, None def create_group(self, context, group): """Driver entry point for creating a generic volume group. ONTAP does not maintain an actual group construct. As a result, no communtication to the backend is necessary for generic volume group creation. :returns: Hard-coded model update for generic volume group model. """ model_update = {'status': fields.GroupStatus.AVAILABLE} if (self._is_flexgroup(host=group['host']) and volume_utils.is_group_a_cg_snapshot_type(group)): msg = _("Cannot create %s consistency group on FlexGroup pool.") raise na_utils.NetAppDriverException(msg % group['id']) return model_update def delete_group(self, context, group, volumes): """Driver entry point for deleting a generic volume group. :returns: Updated group model and list of volume models for the volumes that were deleted. """ model_update = {'status': fields.GroupStatus.DELETED} volumes_model_update = [] for volume in volumes: try: self.delete_volume(volume) volumes_model_update.append( {'id': volume['id'], 'status': 'deleted'}) except Exception: volumes_model_update.append( {'id': volume['id'], 'status': fields.GroupStatus.ERROR_DELETING}) LOG.exception("Volume %(vol)s in the group could not be " "deleted.", {'vol': volume}) return model_update, volumes_model_update def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Driver entry point for updating a generic volume group. Since no actual group construct is ever created in ONTAP, it is not necessary to update any metadata on the backend. Since this is a NO-OP, there is guaranteed to be no change in any of the volumes' statuses. """ if volume_utils.is_group_a_cg_snapshot_type(group): for vol in add_volumes: if self._is_flexgroup(host=vol['host']): msg = _("Cannot add volume from FlexGroup pool to " "consistency group.") raise na_utils.NetAppDriverException(msg) return None, None, None def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a Cinder group snapshot object. The Cinder group snapshot object is created by making use of an ONTAP consistency group snapshot in order to provide write-order consistency for a set of flexvols snapshots. First, a list of the flexvols backing the given Cinder group must be gathered. An ONTAP group-snapshot of these flexvols will create a snapshot copy of all the Cinder volumes in the generic volume group. For each Cinder volume in the group, it is then necessary to clone its backing file from the ONTAP cg-snapshot. The naming convention used to for the clones is what indicates the clone's role as a Cinder snapshot and its inclusion in a Cinder group. The ONTAP cg-snapshot of the flexvols is deleted after the cloning operation is completed. :returns: An implicit update for the group snapshot and snapshot models that is then used by the manager to set the models to available. """ try: if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): # NOTE(felipe_rodrigues): ONTAP FlexGroup does not support # consistency group snapshot, so all members must be inside # a FlexVol pool. for snapshot in snapshots: if self._is_flexgroup(host=snapshot['volume']['host']): msg = _("Cannot create consistency group snapshot with" " volumes on a FlexGroup pool.") raise na_utils.NetAppDriverException(msg) self._create_consistent_group_snapshot(group_snapshot, snapshots) else: for snapshot in snapshots: self.create_snapshot(snapshot) except Exception as ex: err_msg = (_("Create group snapshot failed (%s).") % ex) LOG.exception(err_msg, resource=group_snapshot) raise na_utils.NetAppDriverException(err_msg) return None, None def _create_consistent_group_snapshot(self, group_snapshot, snapshots): hosts = [snapshot['volume']['host'] for snapshot in snapshots] flexvols = self._get_flexvol_names_from_hosts(hosts) # Create snapshot for backing flexvol self.zapi_client.create_cg_snapshot(flexvols, group_snapshot['id']) # Start clone process for snapshot files for snapshot in snapshots: self._clone_backing_file_for_volume( snapshot['volume']['name'], snapshot['name'], snapshot['volume']['id'], source_snapshot=group_snapshot['id']) # Delete backing flexvol snapshots for flexvol_name in flexvols: try: self.zapi_client.wait_for_busy_snapshot( flexvol_name, group_snapshot['id']) self.zapi_client.delete_snapshot( flexvol_name, group_snapshot['id']) except exception.SnapshotIsBusy: self.zapi_client.mark_snapshot_for_deletion( flexvol_name, group_snapshot['id']) def create_group_from_src(self, context, group, volumes, group_snapshot=None, sorted_snapshots=None, source_group=None, sorted_source_vols=None): """Creates a group from a group snapshot or a group of cinder vols. :returns: An implicit update for the volumes model that is interpreted by the manager as a successful operation. """ LOG.debug("VOLUMES %s ", ', '.join([vol['id'] for vol in volumes])) model_update = None volumes_model_update = [] if group_snapshot: vols = zip(volumes, sorted_snapshots) for volume, snapshot in vols: update = self.create_volume_from_snapshot( volume, snapshot) update['id'] = volume['id'] volumes_model_update.append(update) elif source_group and sorted_source_vols: hosts = [] for source_vol in sorted_source_vols: # NOTE(felipe_rodrigues): ONTAP FlexGroup does not support # consistency group snapshot, so if any source volume is on a # FlexGroup, the operation must be create from a not-cg, # falling back to the generic group support. if self._is_flexgroup(host=source_vol['host']): if volume_utils.is_group_a_cg_snapshot_type(group): msg = _("Cannot create consistency group with volume " "on a FlexGroup pool.") raise na_utils.NetAppDriverException(msg) else: # falls back to generic support raise NotImplementedError() hosts.append(source_vol['host']) flexvols = self._get_flexvol_names_from_hosts(hosts) # Create snapshot for backing flexvol snapshot_name = 'snapshot-temp-' + source_group['id'] self.zapi_client.create_cg_snapshot(flexvols, snapshot_name) # Start clone process for new volumes vols = zip(volumes, sorted_source_vols) for volume, source_vol in vols: self._clone_backing_file_for_volume( source_vol['name'], volume['name'], source_vol['id'], source_snapshot=snapshot_name) volume_model_update = ( self._get_volume_model_update(volume) or {}) volume_model_update.update({ 'id': volume['id'], 'provider_location': source_vol['provider_location'], }) volumes_model_update.append(volume_model_update) # Delete backing flexvol snapshots for flexvol_name in flexvols: self.zapi_client.wait_for_busy_snapshot( flexvol_name, snapshot_name) self.zapi_client.delete_snapshot(flexvol_name, snapshot_name) else: LOG.error("Unexpected set of parameters received when " "creating group from source.") model_update = {'status': fields.GroupStatus.ERROR} return model_update, volumes_model_update def _is_flexgroup(self, vol_id=None, host=None): """Discover if a volume is a FlexGroup or not""" if host is None: host = self._get_volume_host(vol_id) pool_name = volume_utils.extract_host(host, level='pool') return self.ssc_library.is_flexgroup(pool_name) def _is_flexgroup_clone_file_supported(self): """Check whether storage can perform clone file for FlexGroup""" return self.zapi_client.features.FLEXGROUP_CLONE_FILE def _cancel_file_copy(self, job_uuid, file_name, dest_pool, dest_backend_name=None): """Cancel an on-going file copy operation.""" try: # NOTE(sfernand): Another approach would be first checking if # the copy operation isn't in `destroying` or `destroyed` states # before issuing cancel. self.zapi_client.destroy_file_copy(job_uuid) except na_utils.NetAppDriverException: dest_client = dot_utils.get_client_for_backend(dest_backend_name) file_path = '%s/%s' % (dest_pool, file_name) try: dest_client.delete_file(file_path) except Exception: LOG.warning('Error cleaning up file %s in destination volume. ' 'Verify if destination volume still exists in ' 'pool %s and delete it manually to avoid unused ' 'resources.', file_path, dest_pool) def _copy_file(self, file_name, volume_id, src_ontap_volume, src_vserver, dest_ontap_volume, dest_vserver, dest_file_name=None, dest_backend_name=None, cancel_on_error=False): """Copies file from an ONTAP volume to another.""" job_uuid = self.zapi_client.start_file_copy( file_name, dest_ontap_volume, src_ontap_volume=src_ontap_volume, dest_file_name=dest_file_name) LOG.debug('Start copying file %(file)s from ' '%(src_vserver)s:%(src_ontap_vol)s to ' '%(dest_vserver)s:%(dest_ontap_vol)s. Job UUID is %(job)s.', {'file': file_name, 'src_vserver': src_vserver, 'src_ontap_vol': src_ontap_volume, 'dest_vserver': dest_vserver, 'dest_ontap_vol': dest_ontap_volume, 'job': job_uuid}) def _wait_file_copy_complete(): copy_status = self.zapi_client.get_file_copy_status(job_uuid) LOG.debug('Waiting for file copy job %s to complete. Current ' 'status is: %s.', job_uuid, copy_status['job-status']) if not copy_status: status_error_msg = (_("Error copying file %s. The " "corresponding Job UUID % doesn't " "exist.")) raise na_utils.NetAppDriverException( status_error_msg % (file_name, job_uuid)) elif copy_status['job-status'] == 'destroyed': status_error_msg = (_('Error copying file %s. %s.')) raise na_utils.NetAppDriverException( status_error_msg % (file_name, copy_status['last-failure-reason'])) elif copy_status['job-status'] == 'complete': raise loopingcall.LoopingCallDone() try: timer = loopingcall.FixedIntervalWithTimeoutLoopingCall( _wait_file_copy_complete) timer.start( interval=10, timeout=self.configuration.netapp_migrate_volume_timeout ).wait() except Exception as e: with excutils.save_and_reraise_exception() as ctxt: if cancel_on_error: try: self._cancel_file_copy( job_uuid, file_name, dest_ontap_volume, dest_backend_name=dest_backend_name) except na_utils.NetAppDriverException as ex: LOG.error("Failed to cancel file copy operation. %s", ex) if isinstance(e, loopingcall.LoopingCallTimeOut): ctxt.reraise = False msg = (_('Timeout waiting volume %s to complete ' 'migration.')) raise na_utils.NetAppDriverTimeout(msg % volume_id) def _finish_volume_migration(self, src_volume, dest_pool): """Finish volume migration to another ONTAP volume.""" # The source volume can be safely deleted after a successful migration. self.delete_volume(src_volume) # NFS driver requires the provider_location to be updated with the new # destination. updates = {'provider_location': dest_pool} return updates def _migrate_volume_to_vserver(self, volume, src_pool, src_vserver, dest_pool, dest_vserver, dest_backend_name): """Migrate volume to another vserver within the same cluster.""" LOG.info('Migrating volume %(vol)s from ' '%(src_vserver)s:%(src_ontap_vol)s to ' '%(dest_vserver)s:%(dest_ontap_vol)s.', {'vol': volume.id, 'src_vserver': src_vserver, 'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver, 'dest_ontap_vol': dest_pool}) vserver_peer_application = 'file_copy' self.create_vserver_peer(src_vserver, self.backend_name, dest_vserver, [vserver_peer_application]) src_ontap_volume_name = src_pool.split(':/')[1] dest_ontap_volume_name = dest_pool.split(':/')[1] self._copy_file(volume.name, volume.id, src_ontap_volume_name, src_vserver, dest_ontap_volume_name, dest_vserver, dest_backend_name=dest_backend_name, cancel_on_error=True) updates = self._finish_volume_migration(volume, dest_pool) LOG.info('Successfully migrated volume %(vol)s from ' '%(src_vserver)s:%(src_ontap_vol)s ' 'to %(dest_vserver)s:%(dest_ontap_vol)s.', {'vol': volume.id, 'src_vserver': src_vserver, 'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver, 'dest_ontap_vol': dest_pool}) return updates def _migrate_volume_to_pool(self, volume, src_pool, dest_pool, vserver, dest_backend_name): """Migrate volume to another Cinder Pool within the same vserver.""" LOG.info('Migrating volume %(vol)s from pool %(src)s to ' '%(dest)s within vserver %(vserver)s.', {'vol': volume.id, 'src': src_pool, 'dest': dest_pool, 'vserver': vserver}) src_ontap_volume_name = src_pool.split(':/')[1] dest_ontap_volume_name = dest_pool.split(':/')[1] self._copy_file(volume.name, volume.id, src_ontap_volume_name, vserver, dest_ontap_volume_name, vserver, dest_backend_name=dest_backend_name, cancel_on_error=True) updates = self._finish_volume_migration(volume, dest_pool) LOG.info('Successfully migrated volume %(vol)s from pool %(src)s ' 'to %(dest)s within vserver %(vserver)s.', {'vol': volume.id, 'src': src_pool, 'dest': dest_pool, 'vserver': vserver}) return updates def migrate_volume(self, context, volume, host): """Migrate Cinder volume to the specified pool or vserver.""" # NOTE(sfernand): the NetApp NFS driver relies only on coping # operations for storage assisted migration which are always # disruptive, as requires the destination volume to be added as a new # block device to be the Nova instance. if volume.status != fields.VolumeStatus.AVAILABLE: LOG.info("Storage assisted migration requires volume to be in " "available status. Falling back to host assisted " "migration.") return False, {} return self.migrate_volume_ontap_assisted( volume, host, self.backend_name, self.configuration.netapp_vserver) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/nvme_cmode.py0000664000175000017500000001123700000000000025254 0ustar00zuulzuul00000000000000# Copyright (c) 2023 NetApp, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for NetApp Data ONTAP NVMe storage systems. """ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap import nvme_library from cinder.volume.drivers.netapp import options as na_opts @interface.volumedriver class NetAppCmodeNVMeDriver(driver.BaseVD): """NetApp C-mode NVMe volume driver. Version history: .. code-block:: none 1.0.0 - Initial driver """ VERSION = "1.0.0" DRIVER_NAME = 'NetApp_NVMe_Cluster_direct' # ThirdPartySystems wiki page CI_WIKI_NAME = "NetApp_CI" def __init__(self, *args, **kwargs): super(NetAppCmodeNVMeDriver, self).__init__(*args, **kwargs) self.library = nvme_library.NetAppNVMeStorageLibrary( self.DRIVER_NAME, 'NVMe', **kwargs) @staticmethod def get_driver_options(): return na_opts.netapp_cluster_opts def do_setup(self, context): self.library.do_setup(context) def check_for_setup_error(self): self.library.check_for_setup_error() def create_volume(self, volume): return self.library.create_volume(volume) def create_volume_from_snapshot(self, volume, snapshot): return self.library.create_volume_from_snapshot(volume, snapshot) def create_cloned_volume(self, volume, src_vref): return self.library.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.library.delete_volume(volume) def create_snapshot(self, snapshot): self.library.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.library.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): return self.library.get_volume_stats(refresh, self.get_filter_function(), self.get_goodness_function()) def get_default_filter_function(self): return self.library.get_default_filter_function() def get_default_goodness_function(self): return self.library.get_default_goodness_function() def extend_volume(self, volume, new_size): self.library.extend_volume(volume, new_size) def ensure_export(self, context, volume): return self.library.ensure_export(context, volume) def create_export(self, context, volume, connector): return self.library.create_export(context, volume) def remove_export(self, context, volume): self.library.remove_export(context, volume) def initialize_connection(self, volume, connector): conn_info = self.library.initialize_connection(volume, connector) return conn_info def terminate_connection(self, volume, connector, **kwargs): conn_info = self.library.terminate_connection(volume, connector, **kwargs) return conn_info def get_pool(self, volume): return self.library.get_pool(volume) def create_group(self, context, group): return self.library.create_group(group) def delete_group(self, context, group, volumes): return self.library.delete_group(group, volumes) def update_group(self, context, group, add_volumes=None, remove_volumes=None): return self.library.update_group(group, add_volumes=None, remove_volumes=None) def create_group_snapshot(self, context, group_snapshot, snapshots): return self.library.create_group_snapshot(group_snapshot, snapshots) def delete_group_snapshot(self, context, group_snapshot, snapshots): return self.library.delete_group_snapshot(group_snapshot, snapshots) def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): return self.library.create_group_from_src( group, volumes, group_snapshot=group_snapshot, snapshots=snapshots, source_group=source_group, source_vols=source_vols) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/nvme_library.py0000664000175000017500000011347500000000000025640 0ustar00zuulzuul00000000000000# Copyright (c) 2023 NetApp, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver library for NetApp C-mode NVMe storage systems. """ import uuid from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_cmode from cinder.volume.drivers.netapp.dataontap.utils import capabilities from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils from cinder.volume.drivers.netapp import options as na_opts from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class NetAppNamespace(object): """Represents a namespace on NetApp storage.""" def __init__(self, handle, name, size, metadata_dict): self.handle = handle self.name = name self.size = size self.metadata = metadata_dict or {} def get_metadata_property(self, prop): """Get the metadata property of a namespace.""" if prop in self.metadata: return self.metadata[prop] name = self.name LOG.debug("No metadata property %(prop)s defined for the namespace " "%(name)s", {'prop': prop, 'name': name}) def __str__(self, *args, **kwargs): return ('NetApp namespace [handle:%s, name:%s, size:%s, metadata:%s]' % (self.handle, self.name, self.size, self.metadata)) class NetAppNVMeStorageLibrary( object, metaclass=volume_utils.TraceWrapperMetaclass): """NetApp NVMe storage library for Data ONTAP.""" # do not increment this as it may be used in volume type definitions. VERSION = "1.0.0" REQUIRED_FLAGS_BASIC = ['netapp_login', 'netapp_password', 'netapp_server_hostname'] REQUIRED_FLAGS_CERT = ['netapp_private_key_file', 'netapp_certificate_file'] ALLOWED_NAMESPACE_OS_TYPES = ['aix', 'linux', 'vmware', 'windows'] ALLOWED_SUBSYSTEM_HOST_TYPES = ['aix', 'linux', 'vmware', 'windows'] DEFAULT_NAMESPACE_OS = 'linux' DEFAULT_HOST_TYPE = 'linux' DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70 and ' \ 'capabilities.total_volumes < 1024' DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization' REQUIRED_CMODE_FLAGS = ['netapp_vserver'] NVME_PORT = 4420 NVME_TRANSPORT = "tcp" def __init__(self, driver_name, driver_protocol, **kwargs): na_utils.validate_instantiation(**kwargs) self.driver_name = driver_name self.driver_protocol = driver_protocol self.rest_client = None self._stats = {} self.namespace_table = {} self.namespace_ostype = None self.host_type = None self.app_version = kwargs.get("app_version", "unknown") self.host = kwargs.get('host') self.backend_name = self.host.split('@')[1] self.configuration = kwargs['configuration'] self.configuration.append_config_values(na_opts.netapp_connection_opts) self.configuration.append_config_values(na_opts.netapp_basicauth_opts) self.configuration.append_config_values( na_opts.netapp_certificateauth_opts) self.configuration.append_config_values(na_opts.netapp_transport_opts) self.configuration.append_config_values( na_opts.netapp_provisioning_opts) self.configuration.append_config_values(na_opts.netapp_san_opts) self.configuration.append_config_values(na_opts.netapp_cluster_opts) self.max_over_subscription_ratio = ( volume_utils.get_max_over_subscription_ratio( self.configuration.max_over_subscription_ratio, supports_auto=True)) self.reserved_percentage = self.configuration.reserved_percentage self.loopingcalls = loopingcalls.LoopingCalls() def do_setup(self, context): if self.configuration.netapp_private_key_file or\ self.configuration.netapp_certificate_file: na_utils.check_flags(self.REQUIRED_FLAGS_CERT, self.configuration) else: na_utils.check_flags(self.REQUIRED_FLAGS_BASIC, self.configuration) self.namespace_ostype = (self.configuration.netapp_namespace_ostype or self.DEFAULT_NAMESPACE_OS) self.host_type = (self.configuration.netapp_host_type or self.DEFAULT_HOST_TYPE) na_utils.check_flags(self.REQUIRED_CMODE_FLAGS, self.configuration) # NOTE(felipe_rodrigues): NVMe driver is only available with # REST client. self.client = dot_utils.get_client_for_backend( self.backend_name, force_rest=True) self.vserver = self.client.vserver # Storage service catalog. self.ssc_library = capabilities.CapabilitiesLibrary( self.driver_protocol, self.vserver, self.client, self.configuration) self.ssc_library.check_api_permissions() self.using_cluster_credentials = ( self.ssc_library.cluster_user_supported()) # Performance monitoring library. self.perf_library = perf_cmode.PerformanceCmodeLibrary( self.client) def _update_ssc(self): """Refresh the storage service catalog with the latest set of pools.""" """Refresh the storage service catalog with the latest set of pools.""" if self.configuration.netapp_disaggregated_platform: self.ssc_library.update_ssc_asa(self._get_cluster_to_pool_map()) else: self.ssc_library.update_ssc(self._get_flexvol_to_pool_map()) def _get_flexvol_to_pool_map(self): """Get the flexvols that match the pool name search pattern. The map is of the format suitable for seeding the storage service catalog: { : {'pool_name': }} """ pool_regex = na_utils.get_pool_name_filter_regex(self.configuration) pools = {} flexvol_names = self.client.list_flexvols() for flexvol_name in flexvol_names: msg_args = { 'flexvol': flexvol_name, 'vol_pattern': pool_regex.pattern, } if pool_regex.match(flexvol_name): msg = "Volume '%(flexvol)s' matches %(vol_pattern)s" LOG.debug(msg, msg_args) pools[flexvol_name] = {'pool_name': flexvol_name} else: msg = "Volume '%(flexvol)s' does not match %(vol_pattern)s" LOG.debug(msg, msg_args) return pools def _get_cluster_to_pool_map(self): return dot_utils.get_cluster_to_pool_map(self.client) def check_for_setup_error(self): """Check that the driver is working and can communicate. Discovers the namespaces on the NetApp server. """ if (not self.configuration.netapp_disaggregated_platform and not self._get_flexvol_to_pool_map()): msg = _('No pools are available for provisioning volumes. ' 'Ensure that the configuration option ' 'netapp_pool_name_search_pattern is set correctly.') raise na_utils.NetAppDriverException(msg) elif self.configuration.netapp_disaggregated_platform: if not self._get_cluster_to_pool_map(): msg = _('No pools are available for provisioning volumes. ' 'Ensure ASA r2 configuration option is set correctly.') raise na_utils.NetAppDriverException(msg) self._add_looping_tasks() if self.namespace_ostype not in self.ALLOWED_NAMESPACE_OS_TYPES: msg = _("Invalid value for NetApp configuration" " option netapp_namespace_ostype.") LOG.error(msg) raise na_utils.NetAppDriverException(msg) if self.host_type not in self.ALLOWED_SUBSYSTEM_HOST_TYPES: msg = _("Invalid value for NetApp configuration" " option netapp_host_type.") LOG.error(msg) raise na_utils.NetAppDriverException(msg) namespace_list = self.client.get_namespace_list() self._extract_and_populate_namespaces(namespace_list) LOG.debug("Success getting list of namespace from server.") self.loopingcalls.start_tasks() def _add_looping_tasks(self): """Add tasks that need to be executed at a fixed interval. Inheriting class overrides and then explicitly calls this method. """ # Note(cknight): Run the update once in the current thread to prevent a # race with the first invocation of _update_volume_stats. self._update_ssc() # Add the task that updates the slow-changing storage service catalog. self.loopingcalls.add_task(self._update_ssc, loopingcalls.ONE_HOUR, loopingcalls.ONE_HOUR) # Add the task that logs EMS messages. self.loopingcalls.add_task( self._handle_ems_logging, loopingcalls.ONE_HOUR) def _handle_ems_logging(self): """Log autosupport messages.""" base_ems_message = dot_utils.build_ems_log_message_0( self.driver_name, self.app_version) self.client.send_ems_log_message(base_ems_message) pool_ems_message = dot_utils.build_ems_log_message_1( self.driver_name, self.app_version, self.vserver, self.ssc_library.get_ssc_flexvol_names(), []) self.client.send_ems_log_message(pool_ems_message) def get_pool(self, volume): """Return pool name where volume resides. :param volume: The volume hosted by the driver. :return: Name of the pool where given volume is hosted. """ name = volume['name'] metadata = self._get_namespace_attr(name, 'metadata') or dict() return metadata.get('Volume', None) def create_volume(self, volume): """Driver entry point for creating a new volume (ONTAP namespace).""" LOG.debug('create_volume on %s', volume['host']) # get Data ONTAP volume name as pool name. pool_name = volume_utils.extract_host(volume['host'], level='pool') if pool_name is None: msg = _("Pool is not available in the volume host field.") raise exception.InvalidHost(reason=msg) namespace = volume.name size = int(volume['size']) * units.Gi metadata = {'OsType': self.namespace_ostype, 'Path': '/vol/%s/%s' % (pool_name, namespace)} try: self.client.create_namespace(pool_name, namespace, size, metadata) except Exception: LOG.exception("Exception creating namespace %(name)s in pool " "%(pool)s.", {'name': namespace, 'pool': pool_name}) msg = _("Volume %s could not be created.") raise exception.VolumeBackendAPIException(data=msg % namespace) LOG.debug('Created namespace with name %(name)s.', {'name': namespace}) metadata['Volume'] = pool_name metadata['Qtree'] = None handle = self._create_namespace_handle(metadata) self._add_namespace_to_table( NetAppNamespace(handle, namespace, size, metadata)) return def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" self._delete_namespace(volume['name']) def _delete_namespace(self, namespace_name): """Helper method to delete namespace backing a volume or snapshot.""" metadata = self._get_namespace_attr(namespace_name, 'metadata') if metadata: try: self.client.destroy_namespace(metadata['Path']) except netapp_api.NaApiError as e: if e.code in netapp_api.REST_NAMESPACE_EOBJECTNOTFOUND: LOG.warning("Failure deleting namespace %(name)s. " "%(message)s", {'name': namespace_name, 'message': e}) else: error_message = (_('A NetApp Api Error occurred: %s') % e) raise na_utils.NetAppDriverException(error_message) self.namespace_table.pop(namespace_name) else: LOG.warning("No entry in namespace table for volume/snapshot" " %(name)s.", {'name': namespace_name}) def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" handle = self._get_namespace_attr(volume['name'], 'handle') return {'provider_location': handle} def create_export(self, context, volume): """Driver entry point to get the export info for a new volume.""" handle = self._get_namespace_attr(volume['name'], 'handle') return {'provider_location': handle} def remove_export(self, context, volume): """Driver entry point to remove an export for a volume. Since exporting is idempotent in this driver, we have nothing to do for unexporting. """ pass def create_snapshot(self, snapshot): """Driver entry point for creating a snapshot. This driver implements snapshots by using efficient single-file (namespace) cloning. """ self._create_snapshot(snapshot) def _create_snapshot(self, snapshot): vol_name = snapshot['volume_name'] snapshot_name = snapshot['name'] namespace = self._get_namespace_from_table(vol_name) self._clone_namespace(namespace.name, snapshot_name) def _clone_namespace(self, name, new_name): """Clone namespace with the given handle to the new name.""" metadata = self._get_namespace_attr(name, 'metadata') volume = metadata['Volume'] self.client.clone_namespace(volume, name, new_name) LOG.debug("Cloned namespace with new name %s", new_name) namespace = self.client.get_namespace_by_args( vserver=self.vserver, path=f'/vol/{volume}/{new_name}') if len(namespace) == 0: msg = _("No cloned namespace named %s found on the filer.") raise exception.VolumeBackendAPIException(data=msg % new_name) cloned_namespace = namespace[0] self._add_namespace_to_table( NetAppNamespace( f"{cloned_namespace['Vserver']}:{cloned_namespace['Path']}", new_name, cloned_namespace['Size'], cloned_namespace)) def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" self._delete_namespace(snapshot['name']) LOG.debug("Snapshot %s deletion successful.", snapshot['name']) def create_volume_from_snapshot(self, volume, snapshot): source = {'name': snapshot['name'], 'size': snapshot['volume_size']} self._clone_source_to_destination(source, volume) def create_cloned_volume(self, volume, src_vref): src_namespace = self._get_namespace_from_table(src_vref['name']) source = {'name': src_namespace.name, 'size': src_vref['size']} self._clone_source_to_destination(source, volume) def _clone_source_to_destination(self, source, destination_volume): source_size = source['size'] destination_size = destination_volume['size'] source_name = source['name'] destination_name = destination_volume['name'] try: self._clone_namespace(source_name, destination_name) if destination_size != source_size: try: self._extend_volume(destination_volume, destination_size) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Resizing %s failed. Cleaning volume.", destination_volume['id']) self.delete_volume(destination_volume) except Exception: LOG.exception("Exception cloning volume %(name)s from source " "volume %(source)s.", {'name': destination_name, 'source': source_name}) msg = _("Volume %s could not be created from source volume.") raise exception.VolumeBackendAPIException( data=msg % destination_name) def _create_namespace_handle(self, metadata): """Returns namespace handle based on filer type.""" return '%s:%s' % (self.vserver, metadata['Path']) def _extract_namespace_info(self, namespace): """Extracts the namespace from API and populates the table.""" path = namespace['Path'] if self.configuration.netapp_disaggregated_platform: name = path else: (_rest, _splitter, name) = path.rpartition('/') handle = self._create_namespace_handle(namespace) size = namespace['Size'] return NetAppNamespace(handle, name, size, namespace) def _extract_and_populate_namespaces(self, api_namespaces): """Extracts the namespaces from API and populates the table.""" for namespace in api_namespaces: discovered_namespace = self._extract_namespace_info(namespace) self._add_namespace_to_table(discovered_namespace) def _add_namespace_to_table(self, namespace): """Adds namespace to cache table.""" if not isinstance(namespace, NetAppNamespace): msg = _("Object is not a NetApp namespace.") raise exception.VolumeBackendAPIException(data=msg) self.namespace_table[namespace.name] = namespace def _get_namespace_from_table(self, name): """Gets namespace from cache table. Refreshes cache if namespace not found in cache. """ namespace = self.namespace_table.get(name) if namespace is None: namespace_list = self.client.get_namespace_list() self._extract_and_populate_namespaces(namespace_list) namespace = self.namespace_table.get(name) if namespace is None: raise exception.VolumeNotFound(volume_id=name) return namespace def _get_namespace_attr(self, name, attr): """Get the namespace attribute if found else None.""" try: attr = getattr(self._get_namespace_from_table(name), attr) return attr except exception.VolumeNotFound as e: LOG.error("Message: %s", e.msg) except Exception as e: LOG.error("Error getting namespace attribute. Exception: %s", e) return None def get_volume_stats(self, refresh=False, filter_function=None, goodness_function=None): """Get volume stats. If 'refresh' is True, update the stats first. """ if refresh: self._update_volume_stats(filter_function=filter_function, goodness_function=goodness_function) return self._stats def _update_volume_stats(self, filter_function=None, goodness_function=None): """Retrieve backend stats.""" LOG.debug('Updating volume stats') data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.driver_name data['vendor_name'] = 'NetApp' data['driver_version'] = self.VERSION data['storage_protocol'] = self.driver_protocol data['pools'] = self._get_pool_stats( filter_function=filter_function, goodness_function=goodness_function) data['sparse_copy_volume'] = True data['replication_enabled'] = False self._stats = data def _get_pool_stats(self, filter_function=None, goodness_function=None): """Retrieve pool (Data ONTAP flexvol) stats. Pool statistics are assembled from static driver capabilities, the Storage Service Catalog of flexvol attributes, and real-time capacity and controller utilization metrics. The pool name is the flexvol name. """ pools = [] ssc = self.ssc_library.get_ssc() if not ssc: return pools # Utilization and performance metrics require cluster-scoped # credentials if (self.using_cluster_credentials and not self.configuration.netapp_disaggregated_platform): # Get up-to-date node utilization metrics just once self.perf_library.update_performance_cache(ssc) # Get up-to-date aggregate capacities just once aggregates = self.ssc_library.get_ssc_aggregates() aggr_capacities = self.client.get_aggregate_capacities( aggregates) else: aggr_capacities = {} for ssc_vol_name, ssc_vol_info in ssc.items(): pool = dict() # Add storage service catalog data pool.update(ssc_vol_info) # Add driver capabilities and config info pool['QoS_support'] = False pool['multiattach'] = True pool['online_extend_support'] = True pool['consistencygroup_support'] = True pool['consistent_group_snapshot_enabled'] = True pool['reserved_percentage'] = self.reserved_percentage pool['max_over_subscription_ratio'] = ( self.max_over_subscription_ratio) # Add up-to-date capacity info if self.configuration.netapp_disaggregated_platform: capacity = self.client.get_cluster_capacity() else: capacity = self.client.get_flexvol_capacity( flexvol_name=ssc_vol_name) size_total_gb = capacity['size-total'] / units.Gi pool['total_capacity_gb'] = na_utils.round_down(size_total_gb) size_available_gb = capacity['size-available'] / units.Gi pool['free_capacity_gb'] = na_utils.round_down(size_available_gb) namespaces = self.client.get_namespace_sizes_by_volume( ssc_vol_name) pool['total_volumes'] = len(namespaces) if self.configuration.netapp_driver_reports_provisioned_capacity: provisioned_cap = 0 for namespace in namespaces: namespace_name = namespace['path'].split('/')[-1] # Filtering namespaces that matches the volume name # template to exclude snapshots. if volume_utils.extract_id_from_volume_name( namespace_name): provisioned_cap = provisioned_cap + namespace['size'] pool['provisioned_capacity_gb'] = na_utils.round_down( float(provisioned_cap) / units.Gi) if (self.using_cluster_credentials and not self.configuration.netapp_disaggregated_platform): dedupe_used = self.client.get_flexvol_dedupe_used_percent( ssc_vol_name) else: dedupe_used = 0.0 pool['netapp_dedupe_used_percent'] = na_utils.round_down( dedupe_used) aggregate_name = ssc_vol_info.get('netapp_aggregate') aggr_capacity = aggr_capacities.get(aggregate_name, {}) pool['netapp_aggregate_used_percent'] = aggr_capacity.get( 'percent-used', 0) # Add utilization data utilization = self.perf_library.get_node_utilization_for_pool( ssc_vol_name) pool['utilization'] = na_utils.round_down(utilization) pool['filter_function'] = filter_function pool['goodness_function'] = goodness_function pools.append(pool) return pools def get_default_filter_function(self): """Get the default filter_function string.""" return self.DEFAULT_FILTER_FUNCTION def get_default_goodness_function(self): """Get the default goodness_function string.""" return self.DEFAULT_GOODNESS_FUNCTION def extend_volume(self, volume, new_size): """Driver entry point to increase the size of a volume.""" self._extend_volume(volume, new_size) def _extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" name = volume['name'] namespace = self._get_namespace_from_table(name) path = namespace.metadata['Path'] curr_size_bytes = str(namespace.size) new_size_bytes = str(int(new_size) * units.Gi) # Reused by clone scenarios. # Hence comparing the stored size. if curr_size_bytes == new_size_bytes: LOG.info("No need to extend volume %s" " as it is already the requested new size.", name) return self.client.namespace_resize(path, new_size_bytes) self.namespace_table[name].size = new_size_bytes def _find_mapped_namespace_subsystem(self, path, host_nqn): """Find an subsystem for a namespace mapped to the given host.""" subsystems = [subsystem['name'] for subsystem in self.client.get_subsystem_by_host(host_nqn)] # Map subsystem name to namespace-id for the requested host. namespace_map = {v['uuid']: (v['subsystem_uuid'], v['subsystem']) for v in self.client.get_namespace_map(path) if v['subsystem'] in subsystems} subsystem_uuid = subsystem_name = n_uuid = None # Give preference to OpenStack subsystems, just use the last one if not # present to allow unmapping old mappings that used a custom subsystem. for n_uuid, (subsystem_uuid, subsystem_name) in namespace_map.items(): if subsystem_name.startswith(na_utils.OPENSTACK_PREFIX): break return subsystem_uuid, subsystem_name, n_uuid def _map_namespace(self, name, host_nqn): """Maps namespace to the host nqn and returns its ID assigned.""" metadata = self._get_namespace_attr(name, 'metadata') path = metadata['Path'] try: subsystems = self.client.get_namespace_map(path) ns_uuid = subsystem_uuid = None if subsystems: subsystem_name = subsystems[0]['subsystem'] subsystem_uuid = subsystems[0]['subsystem_uuid'] ns_uuid = subsystems[0]['uuid'] self.client.map_host_with_subsystem(host_nqn, subsystem_uuid) else: subsystem_name = na_utils.OPENSTACK_PREFIX + str(uuid.uuid4()) self.client.create_subsystem(subsystem_name, self.host_type, host_nqn) ns_uuid = self.client.map_namespace(path, subsystem_name, ) return subsystem_name, ns_uuid except netapp_api.NaApiError as e: (_, subsystem_name, ns_uuid) =\ self._find_mapped_namespace_subsystem( path, host_nqn) if ns_uuid is not None and subsystem_name: return subsystem_name, ns_uuid else: raise e def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. Assign any created volume to a compute node/host so that it can be used from that host. Example return values: .. code-block:: default { 'driver_volume_type': 'nvmeof', 'data': { 'target_nqn' 'nqn.1992-01.example.com:subsystem', 'host_nqn': 'nqn.1992-01.example.com:string', 'portals': [ ('10.10.10.10', '4420', 'tcp') ], 'uuid': 'a1129e6f-8497-4c0c-be01-3eab1ba684ed' } } """ host_nqn = connector.get("nqn") if not host_nqn: raise exception.VolumeBackendAPIException( data=_("Initialize connection error: no host nqn available!")) name = volume['name'] subsystem, namespace_uuid = self._map_namespace(name, host_nqn) LOG.debug("Mapped namespace %(name)s to the host NQN %(host_nqn)s", {'name': name, 'host_nqn': host_nqn}) target_nqn = self.client.get_nvme_subsystem_nqn(subsystem) if not target_nqn: msg = _('Failed to get subsystem %(subsystem)s target NQN for the ' 'namespace %(name)s') msg_args = {'subsystem': subsystem, 'name': name} raise exception.VolumeBackendAPIException(data=msg % msg_args) target_portals = self.client.get_nvme_target_portals() if not target_portals: msg = _('Failed to get target portals for the namespace %s') raise exception.VolumeBackendAPIException( data=msg % name) portal = (target_portals[0], self.NVME_PORT, self.NVME_TRANSPORT) data = { "target_nqn": str(target_nqn), "host_nqn": host_nqn, "portals": [portal], "vol_uuid": namespace_uuid } conn_info = {"driver_volume_type": "nvmeof", "data": data} LOG.debug("Initialize connection info: %s", conn_info) return conn_info def _unmap_namespace(self, path, host_nqn): """Unmaps a namespace from given host.""" if not host_nqn: LOG.warning("Nothing to unmap - host_nqn is missing: %s", path) return (subsystem_uuid, _, _) = self._find_mapped_namespace_subsystem( path, host_nqn) if subsystem_uuid: self.client.unmap_host_with_subsystem(host_nqn, subsystem_uuid) else: LOG.debug("No mapping exists between namespace: %s" " and host_nqn: %s", path, host_nqn) @coordination.synchronized('netapp-terminate-nvme-connection-{volume.id}') def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance. Unmask the namespace on the storage system so the given initiator can no longer access it. """ if connector and na_utils.is_multiattach_to_host( volume, connector ): return name = volume['name'] host_nqn = None if connector is None: LOG.debug('Unmapping namespace %(name)s from all hosts.', {'name': name}) else: host_nqn = connector.get("nqn") LOG.debug("Unmapping namespace %(name)s from the host " "%(host_nqn)s", {'name': name, 'host_nqn': host_nqn}) metadata = self._get_namespace_attr(name, 'metadata') path = metadata['Path'] self._unmap_namespace(path, host_nqn) def create_group(self, group): """Driver entry point for creating a generic volume group. ONTAP does not maintain an actual Group construct. As a result, no communication to the backend is necessary for generic volume group creation. :returns: Hard-coded model update for generic volume group model. """ model_update = {'status': fields.GroupStatus.AVAILABLE} return model_update def delete_group(self, group, volumes): """Driver entry point for deleting a group. :returns: Updated group model and list of volume models for the volumes that were deleted. """ model_update = {'status': fields.GroupStatus.DELETED} volumes_model_update = [] for volume in volumes: try: self.delete_volume(volume) volumes_model_update.append( {'id': volume['id'], 'status': 'deleted'}) except Exception: volumes_model_update.append( {'id': volume['id'], 'status': 'error_deleting'}) LOG.exception("Volume %(vol)s in the group could not be " "deleted.", {'vol': volume}) return model_update, volumes_model_update def update_group(self, group, add_volumes=None, remove_volumes=None): """Driver entry point for updating a generic volume group. Since no actual group construct is ever created in ONTAP, it is not necessary to update any metadata on the backend. Since this is a NO-OP, there is guaranteed to be no change in any of the volumes' statuses. """ return None, None, None def create_group_snapshot(self, group_snapshot, snapshots): """Creates a Cinder group snapshot object. The Cinder group snapshot object is created by making use of an ephemeral ONTAP consistency group snapshot in order to provide write-order consistency for a set of flexvol snapshots. First, a list of the flexvols backing the given Cinder group must be gathered. An ONTAP group-snapshot of these flexvols will create a snapshot copy of all the Cinder volumes in the generic volume group. For each Cinder volume in the group, it is then necessary to clone its backing namespace from the ONTAP cg-snapshot. The naming convention used for the clones is what indicates the clone's role as a Cinder snapshot and its inclusion in a Cinder group. The ONTAP cg-snapshot of the flexvols is no longer required after having cloned the namespaces backing the Cinder volumes in the Cinder group. :returns: An implicit update for group snapshot and snapshots models that is interpreted by the manager to set their models to available. """ try: if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): self._create_consistent_group_snapshot(group_snapshot, snapshots) else: for snapshot in snapshots: self._create_snapshot(snapshot) except Exception as ex: err_msg = (_("Create group snapshot failed (%s).") % ex) LOG.exception(err_msg, resource=group_snapshot) raise na_utils.NetAppDriverException(err_msg) return None, None def _create_consistent_group_snapshot(self, group_snapshot, snapshots): flexvols = set() for snapshot in snapshots: flexvols.add(volume_utils.extract_host( snapshot['volume']['host'], level='pool')) self.client.create_cg_snapshot(flexvols, group_snapshot['id']) for snapshot in snapshots: self._clone_namespace(snapshot['volume']['name'], snapshot['name']) for flexvol in flexvols: try: self.client.wait_for_busy_snapshot( flexvol, group_snapshot['id']) self.client.delete_snapshot( flexvol, group_snapshot['id']) except exception.SnapshotIsBusy: self.client.mark_snapshot_for_deletion( flexvol, group_snapshot['id']) def delete_group_snapshot(self, group_snapshot, snapshots): """Delete namespaces backing each snapshot in the group snapshot. :returns: An implicit update for snapshots models that is interpreted by the manager to set their models to delete. """ for snapshot in snapshots: self._delete_namespace(snapshot['name']) LOG.debug("Snapshot %s deletion successful", snapshot['name']) return None, None def create_group_from_src(self, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from a group snapshot or a group of cinder vols. :returns: An implicit update for the volumes model that is interpreted by the manager as a successful operation. """ LOG.debug("VOLUMES %s ", ', '.join([vol['id'] for vol in volumes])) volume_model_updates = [] if group_snapshot: vols = zip(volumes, snapshots) for volume, snapshot in vols: source = { 'name': snapshot['name'], 'size': snapshot['volume_size'], } self._clone_source_to_destination(source, volume) '''if volume_model_update is not None: volume_model_update['id'] = volume['id'] volume_model_updates.append(volume_model_update)''' else: vols = zip(volumes, source_vols) for volume, old_src_vref in vols: src_namespace = self._get_namespace_from_table( old_src_vref['name']) source = {'name': src_namespace.name, 'size': old_src_vref['size']} self._clone_source_to_destination(source, volume) '''if volume_model_update is not None: volume_model_update['id'] = volume['id'] volume_model_updates.append(volume_model_update)''' return None, volume_model_updates ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3871212 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/performance/0000775000175000017500000000000000000000000025063 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/performance/__init__.py0000664000175000017500000000000000000000000027162 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/performance/perf_base.py0000664000175000017500000002247600000000000027376 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Performance metrics functions and cache for NetApp systems. """ from oslo_log import log as logging from cinder import exception from cinder.i18n import _ LOG = logging.getLogger(__name__) DEFAULT_UTILIZATION = 50 class PerformanceLibrary(object): def __init__(self, zapi_client): self.zapi_client = zapi_client self._init_counter_info() def _init_counter_info(self): """Set a few counter names based on Data ONTAP version.""" self.system_object_name = None self.avg_processor_busy_base_counter_name = None def _get_node_utilization(self, counters_t1, counters_t2, node_name): """Get node utilization from two sets of performance counters.""" try: # Time spent in the single-threaded Kahuna domain kahuna_percent = self._get_kahuna_utilization(counters_t1, counters_t2) # If Kahuna is using >60% of the CPU, the controller is fully busy if kahuna_percent > 60: return 100.0 # Average CPU busyness across all processors avg_cpu_percent = 100.0 * self._get_average_cpu_utilization( counters_t1, counters_t2) # Total Consistency Point (CP) time total_cp_time_msec = self._get_total_consistency_point_time( counters_t1, counters_t2) # Time spent in CP Phase 2 (buffer flush) p2_flush_time_msec = self._get_consistency_point_p2_flush_time( counters_t1, counters_t2) # Wall-clock time between the two counter sets poll_time_msec = self._get_total_time(counters_t1, counters_t2, 'total_cp_msecs') # If two polls happened in quick succession, use CPU utilization if total_cp_time_msec == 0 or poll_time_msec == 0: return max(min(100.0, avg_cpu_percent), 0) # Adjusted Consistency Point time adjusted_cp_time_msec = self._get_adjusted_consistency_point_time( total_cp_time_msec, p2_flush_time_msec) adjusted_cp_percent = (100.0 * adjusted_cp_time_msec / poll_time_msec) # Utilization is the greater of CPU busyness & CP time node_utilization = max(avg_cpu_percent, adjusted_cp_percent) return max(min(100.0, node_utilization), 0) except Exception: LOG.exception('Could not calculate node utilization for ' 'node %s.', node_name) return DEFAULT_UTILIZATION def _get_kahuna_utilization(self, counters_t1, counters_t2): """Get time spent in the single-threaded Kahuna domain.""" # Note(cknight): Because Kahuna is single-threaded, running only on # one CPU at a time, we can safely sum the Kahuna CPU usage # percentages across all processors in a node. return sum(self._get_performance_counter_average_multi_instance( counters_t1, counters_t2, 'domain_busy:kahuna', 'processor_elapsed_time')) * 100.0 def _get_average_cpu_utilization(self, counters_t1, counters_t2): """Get average CPU busyness across all processors.""" return self._get_performance_counter_average( counters_t1, counters_t2, 'avg_processor_busy', self.avg_processor_busy_base_counter_name) def _get_total_consistency_point_time(self, counters_t1, counters_t2): """Get time spent in Consistency Points in msecs.""" return float(self._get_performance_counter_delta( counters_t1, counters_t2, 'total_cp_msecs')) def _get_consistency_point_p2_flush_time(self, counters_t1, counters_t2): """Get time spent in CP Phase 2 (buffer flush) in msecs.""" return float(self._get_performance_counter_delta( counters_t1, counters_t2, 'cp_phase_times:p2_flush')) def _get_total_time(self, counters_t1, counters_t2, counter_name): """Get wall clock time between two successive counters in msecs.""" timestamp_t1 = float(self._find_performance_counter_timestamp( counters_t1, counter_name)) timestamp_t2 = float(self._find_performance_counter_timestamp( counters_t2, counter_name)) return (timestamp_t2 - timestamp_t1) * 1000.0 def _get_adjusted_consistency_point_time(self, total_cp_time, p2_flush_time): """Get adjusted CP time by limiting CP phase 2 flush time to 20%.""" return (total_cp_time - p2_flush_time) * 1.20 def _get_performance_counter_delta(self, counters_t1, counters_t2, counter_name): """Calculate a delta value from two performance counters.""" counter_t1 = int( self._find_performance_counter_value(counters_t1, counter_name)) counter_t2 = int( self._find_performance_counter_value(counters_t2, counter_name)) return counter_t2 - counter_t1 def _get_performance_counter_average(self, counters_t1, counters_t2, counter_name, base_counter_name, instance_name=None): """Calculate an average value from two performance counters.""" counter_t1 = float(self._find_performance_counter_value( counters_t1, counter_name, instance_name)) counter_t2 = float(self._find_performance_counter_value( counters_t2, counter_name, instance_name)) base_counter_t1 = float(self._find_performance_counter_value( counters_t1, base_counter_name, instance_name)) base_counter_t2 = float(self._find_performance_counter_value( counters_t2, base_counter_name, instance_name)) return (counter_t2 - counter_t1) / (base_counter_t2 - base_counter_t1) def _get_performance_counter_average_multi_instance(self, counters_t1, counters_t2, counter_name, base_counter_name): """Calculate an average value from multiple counter instances.""" averages = [] instance_names = [] for counter in counters_t1: if counter_name in counter: instance_names.append(counter['instance-name']) for instance_name in instance_names: average = self._get_performance_counter_average( counters_t1, counters_t2, counter_name, base_counter_name, instance_name) averages.append(average) return averages def _find_performance_counter_value(self, counters, counter_name, instance_name=None): """Given a counter set, return the value of a named instance.""" for counter in counters: if counter_name in counter: if (instance_name is None or counter['instance-name'] == instance_name): return counter[counter_name] else: raise exception.NotFound(_('Counter %s not found') % counter_name) def _find_performance_counter_timestamp(self, counters, counter_name, instance_name=None): """Given a counter set, return the timestamp of a named instance.""" for counter in counters: if counter_name in counter: if (instance_name is None or counter['instance-name'] == instance_name): return counter['timestamp'] else: raise exception.NotFound(_('Counter %s not found') % counter_name) def _expand_performance_array(self, object_name, counter_name, counter): """Get array labels and expand counter data array.""" # Get array labels for counter value counter_info = self.zapi_client.get_performance_counter_info( object_name, counter_name) array_labels = [counter_name + ':' + label.lower() for label in counter_info['labels']] array_values = counter[counter_name].split(',') # Combine labels and values, and then mix into existing counter array_data = dict(zip(array_labels, array_values)) counter.update(array_data) def _get_base_counter_name(self, object_name, counter_name): """Get the name of the base counter for the specified counter.""" counter_info = self.zapi_client.get_performance_counter_info( object_name, counter_name) return counter_info['base-counter'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py0000664000175000017500000002131700000000000027544 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Performance metrics functions and cache for NetApp cDOT systems. """ from oslo_log import log as logging from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.performance import perf_base LOG = logging.getLogger(__name__) class PerformanceCmodeLibrary(perf_base.PerformanceLibrary): def __init__(self, zapi_client): super(PerformanceCmodeLibrary, self).__init__(zapi_client) self.performance_counters = {} self.pool_utilization = {} def _init_counter_info(self): """Set a few counter names based on Data ONTAP version.""" super(PerformanceCmodeLibrary, self)._init_counter_info() try: if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS: self.system_object_name = 'system:constituent' self.avg_processor_busy_base_counter_name = ( self._get_base_counter_name('system:constituent', 'avg_processor_busy')) elif self.zapi_client.features.SYSTEM_METRICS: self.system_object_name = 'system' self.avg_processor_busy_base_counter_name = ( self._get_base_counter_name('system', 'avg_processor_busy')) except netapp_api.NaApiError: if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time' else: self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1' LOG.warning('Could not get performance base counter ' 'name. Performance-based scheduler ' 'functions may not be available.') def update_performance_cache(self, ssc_pools): """Called periodically to update per-pool node utilization metrics.""" # Nothing to do on older systems if not (self.zapi_client.features.SYSTEM_METRICS or self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS): return # Get aggregates and nodes for all known pools aggr_names = self._get_aggregates_for_pools(ssc_pools) node_names, aggr_node_map = self._get_nodes_for_aggregates(aggr_names) # Update performance counter cache for each node node_utilization = {} for node_name in node_names: if node_name not in self.performance_counters: self.performance_counters[node_name] = [] # Get new performance counters and save only the last 10 counters = self._get_node_utilization_counters(node_name) if not counters: continue self.performance_counters[node_name].append(counters) self.performance_counters[node_name] = ( self.performance_counters[node_name][-10:]) # Update utilization for each node using newest & oldest sample counters = self.performance_counters[node_name] if len(counters) < 2: node_utilization[node_name] = perf_base.DEFAULT_UTILIZATION else: node_utilization[node_name] = self._get_node_utilization( counters[0], counters[-1], node_name) # Update pool utilization map atomically pool_utilization = {} for pool_name, pool_info in ssc_pools.items(): aggr_name = pool_info.get('netapp_aggregate', 'unknown') if isinstance(aggr_name, list): # NOTE(felipe_rodrigues): for FlexGroup pool, the utilization # is not calculate. pool_utilization[pool_name] = perf_base.DEFAULT_UTILIZATION else: node_name = aggr_node_map.get(aggr_name) pool_utilization[pool_name] = node_utilization.get( node_name, perf_base.DEFAULT_UTILIZATION) self.pool_utilization = pool_utilization def get_node_utilization_for_pool(self, pool_name): """Get the node utilization for the specified pool, if available.""" return self.pool_utilization.get(pool_name, perf_base.DEFAULT_UTILIZATION) def _update_for_failover(self, zapi_client, ssc_pools): self.zapi_client = zapi_client self.update_performance_cache(ssc_pools) def _get_aggregates_for_pools(self, ssc_pools): """Get the set of aggregates that contain the specified pools.""" aggr_names = set() for pool_name, pool_info in ssc_pools.items(): aggr = pool_info.get('netapp_aggregate') if isinstance(aggr, list): # NOTE(felipe_rodrigues): for FlexGroup pool, the utilization # is not calculate continue aggr_names.add(aggr) return aggr_names def _get_nodes_for_aggregates(self, aggr_names): """Get the cluster nodes that own the specified aggregates.""" node_names = set() aggr_node_map = {} for aggr_name in aggr_names: node_name = self.zapi_client.get_node_for_aggregate(aggr_name) if node_name: node_names.add(node_name) aggr_node_map[aggr_name] = node_name return node_names, aggr_node_map def _get_node_utilization_counters(self, node_name): """Get all performance counters for calculating node utilization.""" try: return (self._get_node_utilization_system_counters(node_name) + self._get_node_utilization_wafl_counters(node_name) + self._get_node_utilization_processor_counters(node_name)) except netapp_api.NaApiError: LOG.exception('Could not get utilization counters from node %s', node_name) return None def _get_node_utilization_system_counters(self, node_name): """Get the system counters for calculating node utilization.""" system_instance_uuids = ( self.zapi_client.get_performance_instance_uuids( self.system_object_name, node_name)) system_counter_names = [ 'avg_processor_busy', self.avg_processor_busy_base_counter_name, ] if 'cpu_elapsed_time1' in system_counter_names: system_counter_names.append('cpu_elapsed_time') system_counters = self.zapi_client.get_performance_counters( self.system_object_name, system_instance_uuids, system_counter_names) return system_counters def _get_node_utilization_wafl_counters(self, node_name): """Get the WAFL counters for calculating node utilization.""" wafl_instance_uuids = self.zapi_client.get_performance_instance_uuids( 'wafl', node_name) wafl_counter_names = ['total_cp_msecs', 'cp_phase_times'] wafl_counters = self.zapi_client.get_performance_counters( 'wafl', wafl_instance_uuids, wafl_counter_names) # Expand array data so we can use wafl:cp_phase_times[P2_FLUSH] for counter in wafl_counters: if 'cp_phase_times' in counter: self._expand_performance_array( 'wafl', 'cp_phase_times', counter) return wafl_counters def _get_node_utilization_processor_counters(self, node_name): """Get the processor counters for calculating node utilization.""" processor_instance_uuids = ( self.zapi_client.get_performance_instance_uuids('processor', node_name)) processor_counter_names = ['domain_busy', 'processor_elapsed_time'] processor_counters = self.zapi_client.get_performance_counters( 'processor', processor_instance_uuids, processor_counter_names) # Expand array data so we can use processor:domain_busy[kahuna] for counter in processor_counters: if 'domain_busy' in counter: self._expand_performance_array( 'processor', 'domain_busy', counter) return processor_counters ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3871212 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/utils/0000775000175000017500000000000000000000000023722 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/utils/__init__.py0000664000175000017500000000000000000000000026021 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py0000664000175000017500000003572700000000000026743 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Clinton Knight. All rights reserved. # Copyright (c) 2017 Jose Porrua. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Storage service catalog (SSC) functions and classes for NetApp cDOT systems. """ import copy import re from oslo_log import log as logging LOG = logging.getLogger(__name__) class CapabilitiesLibrary(object): def __init__(self, protocol, vserver_name, zapi_client, configuration): self.protocol = protocol.lower() self.vserver_name = vserver_name self.zapi_client = zapi_client self.configuration = configuration self.backend_name = self.configuration.safe_get('volume_backend_name') self.ssc = {} self.invalid_extra_specs = [] def check_api_permissions(self): self.invalid_extra_specs = self.zapi_client.check_api_permissions() def cluster_user_supported(self): return not self.invalid_extra_specs def get_ssc(self): """Get a copy of the Storage Service Catalog.""" return copy.deepcopy(self.ssc) def get_ssc_flexvol_names(self): """Get the names of the FlexVols in the Storage Service Catalog.""" ssc = self.get_ssc() return list(ssc.keys()) def get_ssc_for_flexvol(self, flexvol_name): """Get map of Storage Service Catalog entries for a single flexvol.""" return copy.deepcopy(self.ssc.get(flexvol_name, {})) def get_ssc_aggregates(self): """Get a list of aggregates for all SSC flexvols.""" aggregates = set() for __, flexvol_info in self.ssc.items(): if 'netapp_aggregate' in flexvol_info: aggr = flexvol_info['netapp_aggregate'] if isinstance(aggr, list): aggregates.update(aggr) else: aggregates.add(aggr) return list(aggregates) def is_qos_min_supported(self, pool_name): for __, flexvol_info in self.ssc.items(): if ('netapp_qos_min_support' in flexvol_info and 'pool_name' in flexvol_info and flexvol_info['pool_name'] == pool_name): return flexvol_info['netapp_qos_min_support'] == 'true' return False def update_ssc(self, flexvol_map): """Periodically runs to update Storage Service Catalog data. The self.ssc attribute is updated with the following format. { : {: }} """ LOG.info("Updating storage service catalog information for " "backend '%s'", self.backend_name) ssc = {} for flexvol_name, flexvol_info in flexvol_map.items(): ssc_volume = {} # Add metadata passed from the driver, including pool name ssc_volume.update(flexvol_info) # Get volume info ssc_volume.update(self._get_ssc_flexvol_info(flexvol_name)) ssc_volume.update(self._get_ssc_dedupe_info(flexvol_name)) ssc_volume.update(self._get_ssc_mirror_info(flexvol_name)) ssc_volume.update(self._get_ssc_encryption_info(flexvol_name)) # Get aggregate info aggregate_name = ssc_volume.get('netapp_aggregate') is_flexgroup = isinstance(aggregate_name, list) aggr_info = self._get_ssc_aggregate_info( aggregate_name, is_flexgroup=is_flexgroup) node_name = aggr_info.pop('netapp_node_name') ssc_volume.update(aggr_info) ssc_volume.update(self._get_ssc_qos_min_info(node_name)) if self.protocol.casefold() != 'nfs': ssc_volume.update (self._get_ssc_volume_count_info(flexvol_name)) ssc[flexvol_name] = ssc_volume self.ssc = ssc def update_ssc_asa(self, cluster_map): """Periodically runs to update Storage Service Catalog data. The self.ssc attribute is updated with the following format. { : {'pool_name': }} """ ssc = {} for cluster_name, cluster_info in cluster_map.items(): ssc_cluster = {} # Add metadata passed from the driver, including pool name ssc_cluster.update(cluster_info) # Add ASA r2 default cluster attributes ssc_cluster.update({ 'netapp_thin_provisioned': True, 'thick_provisioning_support': False, 'thin_provisioning_support': True, 'netapp_aggregate': None, 'netapp_is_flexgroup': False, 'netapp_dedup': True, 'netapp_compression': True, 'netapp_mirrored': False, 'netapp_flexvol_encryption': False, }) # ASA r2 is disaggregated aggregate info is not available disk_types = self.zapi_client.get_aggregate_disk_types() aggr_disk_info = { 'netapp_raid_type': None, 'netapp_hybrid_aggregate': None, 'netapp_disk_type': disk_types, 'netapp_node_name': None } ssc_cluster.update(aggr_disk_info) # ASA r2 need min QoS support for all nodes ssc_cluster['netapp_qos_min_support'] = str('true').lower() ssc[cluster_name] = ssc_cluster LOG.debug("Storage Service Catalog: %s", ssc) self.ssc = ssc def _update_for_failover(self, zapi_client, flexvol_map): self.zapi_client = zapi_client self.update_ssc(flexvol_map) def _get_ssc_flexvol_info(self, flexvol_name): """Gather flexvol info and recast into SSC-style volume stats.""" volume_info = self.zapi_client.get_flexvol(flexvol_name=flexvol_name) netapp_thick = (volume_info.get('space-guarantee-enabled') and (volume_info.get('space-guarantee') == 'file' or volume_info.get('space-guarantee') == 'volume')) thick = self._get_thick_provisioning_support(netapp_thick) is_flexgroup = volume_info.get('style-extended') == 'flexgroup' return { 'netapp_thin_provisioned': str(not netapp_thick).lower(), 'thick_provisioning_support': thick, 'thin_provisioning_support': not thick, 'netapp_aggregate': volume_info.get('aggregate')[0], 'netapp_is_flexgroup': str(is_flexgroup).lower(), } def _get_thick_provisioning_support(self, netapp_thick): """Get standard thick/thin values for a flexvol. The values reported for the standard thick_provisioning_support and thin_provisioning_support flags depend on both the flexvol state as well as protocol-specific configuration values. """ if self.protocol == 'nfs': return (netapp_thick and not self.configuration.nfs_sparsed_volumes) else: return (netapp_thick and (self.configuration.netapp_lun_space_reservation == 'enabled')) def _get_ssc_dedupe_info(self, flexvol_name): """Gather dedupe info and recast into SSC-style volume stats.""" if ('netapp_dedup' in self.invalid_extra_specs or 'netapp_compression' in self.invalid_extra_specs): dedupe = False compression = False else: dedupe_info = self.zapi_client.get_flexvol_dedupe_info( flexvol_name) dedupe = dedupe_info.get('dedupe') compression = dedupe_info.get('compression') return { 'netapp_dedup': str(dedupe).lower(), 'netapp_compression': str(compression).lower(), } def _get_ssc_encryption_info(self, flexvol_name): """Gather flexvol encryption info and recast into SSC-style stats.""" encrypted = self.zapi_client.is_flexvol_encrypted( flexvol_name, self.vserver_name) return {'netapp_flexvol_encryption': str(encrypted).lower()} def _get_ssc_qos_min_info(self, node_name): """Gather Qos minimum info and recast into SSC-style stats.""" supported = True is_nfs = self.protocol == 'nfs' if isinstance(node_name, list): # NOTE(felipe_rodrigues): it cannot choose which node the volume # is created, so the pool must have all nodes as QoS min supported # for enabling this feature. for n_name in node_name: if not self.zapi_client.is_qos_min_supported(is_nfs, n_name): supported = False break else: supported = self.zapi_client.is_qos_min_supported(is_nfs, node_name) return {'netapp_qos_min_support': str(supported).lower()} def _get_ssc_mirror_info(self, flexvol_name): """Gather SnapMirror info and recast into SSC-style volume stats.""" mirrored = self.zapi_client.is_flexvol_mirrored( flexvol_name, self.vserver_name) return {'netapp_mirrored': str(mirrored).lower()} def _get_ssc_aggregate_info(self, aggregate_name, is_flexgroup=False): """Gather aggregate info and recast into SSC-style volume stats. :param aggregate_name: a list of aggregate names for FlexGroup or a single aggregate name for FlexVol :param is_flexgroup: bool informing the type of aggregate_name param """ if 'netapp_raid_type' in self.invalid_extra_specs: raid_type = None hybrid = None disk_types = None node_name = None elif is_flexgroup: raid_type = set() hybrid = set() disk_types = set() node_name = set() for aggr in aggregate_name: aggregate = self.zapi_client.get_aggregate(aggr) node_name.add(aggregate.get('node-name')) raid_type.add(aggregate.get('raid-type')) hybrid.add(str(aggregate.get('is-hybrid')).lower() if 'is-hybrid' in aggregate else None) disks = set(self.zapi_client.get_aggregate_disk_types(aggr)) disk_types = disk_types.union(disks) node_name = list(node_name) raid_type = list(raid_type) hybrid = list(hybrid) disk_types = list(disk_types) else: aggregate = self.zapi_client.get_aggregate(aggregate_name) node_name = aggregate.get('node-name') raid_type = aggregate.get('raid-type') hybrid = (str(aggregate.get('is-hybrid')).lower() if 'is-hybrid' in aggregate else None) disk_types = self.zapi_client.get_aggregate_disk_types( aggregate_name) return { 'netapp_raid_type': raid_type, 'netapp_hybrid_aggregate': hybrid, 'netapp_disk_type': disk_types, 'netapp_node_name': node_name, } def _get_ssc_volume_count_info(self, flexvol_name): """Gather volume count info and recast into SSC-style volume stats.""" if self.protocol.casefold() == 'nvme': namespaces = self.zapi_client.get_namespace_sizes_by_volume( flexvol_name) volume_count = len(namespaces) else: luns = self.zapi_client.get_lun_sizes_by_volume(flexvol_name) volume_count = len(luns) return { 'total_volumes': volume_count, } def get_matching_flexvols_for_extra_specs(self, extra_specs): """Return a list of flexvol names that match a set of extra specs.""" extra_specs = self._modify_extra_specs_for_comparison(extra_specs) matching_flexvols = [] for flexvol_name, flexvol_info in self.get_ssc().items(): if self._flexvol_matches_extra_specs(flexvol_info, extra_specs): matching_flexvols.append(flexvol_name) return matching_flexvols def _flexvol_matches_extra_specs(self, flexvol_info, extra_specs): """Check whether the SSC data for a FlexVol matches extra specs. A set of extra specs is considered a match for a FlexVol if, for each extra spec, the value matches what is in SSC or the key is unknown to SSC. """ for extra_spec_key, extra_spec_value in extra_specs.items(): if extra_spec_key in flexvol_info: if not self._extra_spec_matches(extra_spec_value, flexvol_info[extra_spec_key]): return False return True def _extra_spec_matches(self, extra_spec_value, ssc_flexvol_value): """Check whether an extra spec value matches something in the SSC. The SSC values may be scalars or lists, so the extra spec value must be compared to the SSC value if it is a scalar, or it must be sought in the list. """ if isinstance(ssc_flexvol_value, list): return extra_spec_value in ssc_flexvol_value else: return extra_spec_value == ssc_flexvol_value def _modify_extra_specs_for_comparison(self, extra_specs): """Adjust extra spec values for simple comparison to SSC values. Most extra-spec key-value tuples may be directly compared. But the boolean values that take the form ' True' or ' False' must be modified to allow comparison with the values we keep in the SSC and report to the scheduler. """ modified_extra_specs = copy.deepcopy(extra_specs) for key, value in extra_specs.items(): if isinstance(value, str): if re.match(r'\s+True', value, re.I): modified_extra_specs[key] = True elif re.match(r'\s+False', value, re.I): modified_extra_specs[key] = False return modified_extra_specs def is_flexgroup(self, pool_name): for __, flexvol_info in self.ssc.items(): if ('netapp_is_flexgroup' in flexvol_info and 'pool_name' in flexvol_info and flexvol_info['pool_name'] == pool_name): return flexvol_info['netapp_is_flexgroup'] == 'true' return False def contains_flexgroup_pool(self): for __, flexvol_info in self.ssc.items(): if ('netapp_is_flexgroup' in flexvol_info and flexvol_info['netapp_is_flexgroup'] == 'true'): return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py0000664000175000017500000013153100000000000026576 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Alex Meade. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp Data ONTAP data motion library. This library handles transferring data from a source to a destination. Its responsibility is to handle this as efficiently as possible given the location of the data's source and destination. This includes cloning, SnapMirror, and copy-offload as improvements to brute force data transfer. """ from oslo_log import log from oslo_service import loopingcall from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder import utils from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import volume_utils LOG = log.getLogger(__name__) ENTRY_DOES_NOT_EXIST = "(entry doesn't exist)" GEOMETRY_HAS_BEEN_CHANGED = ( "Geometry of the destination", # This intends to be a Tuple "has been changed since the SnapMirror relationship was created") QUIESCE_RETRY_INTERVAL = 5 class DataMotionMixin(object): def get_replication_backend_names(self, config): """Get the backend names for all configured replication targets.""" backend_names = [] replication_devices = config.safe_get('replication_device') if replication_devices: for replication_device in replication_devices: backend_id = replication_device.get('backend_id') if backend_id: backend_names.append(backend_id) return backend_names def get_replication_backend_stats(self, config): """Get the driver replication info for merging into volume stats.""" backend_names = self.get_replication_backend_names(config) if len(backend_names) > 0: stats = { 'replication_enabled': True, 'replication_count': len(backend_names), 'replication_targets': backend_names, 'replication_type': 'async', } else: stats = {'replication_enabled': False} return stats def _get_replication_aggregate_map(self, src_backend_name, target_backend_name): """Get the aggregate mapping config between src and destination.""" aggregate_map = {} config = config_utils.get_backend_configuration(src_backend_name) all_replication_aggregate_maps = config.safe_get( 'netapp_replication_aggregate_map') if all_replication_aggregate_maps: for replication_aggregate_map in all_replication_aggregate_maps: if (replication_aggregate_map.get('backend_id') == target_backend_name): replication_aggregate_map.pop('backend_id') aggregate_map = replication_aggregate_map break return aggregate_map def get_replication_policy(self, config): """Get replication policy for the configured replication targets.""" return config.safe_get('netapp_replication_policy') or \ "MirrorAllSnapshots" def is_sync_mirror_policy(self, replication_policy): return "Sync" in replication_policy or "StrictSync" in \ replication_policy def is_active_sync_asymmetric_policy(self, replication_policy): return "AutomatedFailOver" in replication_policy def is_active_sync_configured(self, configuration): replication_enabled = ( True if self.get_replication_backend_names( configuration) else False) if replication_enabled: return self.get_replication_policy(configuration) == \ "AutomatedFailOver" return False def get_snapmirrors(self, src_backend_name, dest_backend_name, src_flexvol_name=None, dest_flexvol_name=None): """Get info regarding SnapMirror relationship/s for given params.""" dest_backend_config = config_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver dest_client = config_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver) src_backend_config = config_utils.get_backend_configuration( src_backend_name) src_vserver = src_backend_config.netapp_vserver snapmirrors = dest_client.get_snapmirrors( src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name, desired_attributes=[ 'relationship-status', 'mirror-state', 'source-vserver', 'source-volume', 'destination-vserver', 'destination-volume', 'last-transfer-end-timestamp', 'lag-time', ]) return snapmirrors def create_snapmirror(self, src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name, replication_policy): """Set up a SnapMirror relationship b/w two FlexVols (cinder pools) 1. Create SnapMirror relationship 2. Initialize data transfer asynchronously If a SnapMirror relationship already exists and is broken off or quiesced, resume and re-sync the mirror. """ dest_backend_config = config_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver source_backend_config = config_utils.get_backend_configuration( src_backend_name) src_vserver = source_backend_config.netapp_vserver if replication_policy == "AutomatedFailOver": dest_client = config_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver, force_rest=True) src_client = config_utils.get_client_for_backend( src_backend_name, vserver_name=src_vserver, force_rest=True) else: dest_client = config_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver) src_client = config_utils.get_client_for_backend( src_backend_name, vserver_name=src_vserver) provisioning_options = ( src_client.get_provisioning_options_from_flexvol( src_flexvol_name) ) pool_is_flexgroup = provisioning_options.get('is_flexgroup', False) # 1. Create destination 'dp' FlexVol if it doesn't exist if not dest_client.flexvol_exists(dest_flexvol_name): self.create_destination_flexvol( src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name, pool_is_flexgroup=pool_is_flexgroup) sync_mirror_policy = self.is_sync_mirror_policy(replication_policy) active_sync_asymmetric_policy = self.is_active_sync_asymmetric_policy( replication_policy) src_cg = "cg_" + src_flexvol_name if active_sync_asymmetric_policy \ else "" dest_cg = "cg_" + dest_flexvol_name if active_sync_asymmetric_policy \ else "" src_cg_path = "/cg/" + str(src_cg) dest_cg_path = "/cg/" + str(dest_cg) # 2. Check if SnapMirror relationship exists if active_sync_asymmetric_policy: existing_mirrors = dest_client.get_snapmirrors( src_vserver, src_cg_path, dest_vserver, dest_cg_path) else: existing_mirrors = dest_client.get_snapmirrors( src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) msg_payload = { 'src_vserver': src_vserver, 'src_volume': src_flexvol_name, 'dest_vserver': dest_vserver, 'dest_volume': dest_flexvol_name, } # 3. Create and initialize SnapMirror if it doesn't already exist if not existing_mirrors: # TODO(gouthamr): Change the schedule from hourly to config value msg = ("Creating a SnapMirror relationship between " "%(src_vserver)s:%(src_flexvol_name)s and %(dest_vserver)s:" "%(dest_volume)s.") LOG.debug(msg, msg_payload) try: if active_sync_asymmetric_policy: src_client.create_ontap_consistency_group( src_vserver, src_flexvol_name, src_cg) dest_client.create_snapmirror( src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name, src_cg, dest_cg, schedule=None if sync_mirror_policy or active_sync_asymmetric_policy else 'hourly', policy=replication_policy, relationship_type=( 'extended_data_protection' if pool_is_flexgroup or sync_mirror_policy else 'data_protection')) # Initialize async transfer of the initial data if active_sync_asymmetric_policy: src_flexvol_name = src_cg_path dest_flexvol_name = dest_cg_path if not sync_mirror_policy: msg = ("Initializing SnapMirror transfers between " "%(src_vserver)s:%(src_volume)s and " "%(dest_vserver)s:%(dest_volume)s.") LOG.debug(msg, msg_payload) dest_client.initialize_snapmirror( src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name, active_sync_asymmetric_policy) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as raise_ctxt: if (e.code == netapp_api.EAPIERROR and all(substr in e.message for substr in GEOMETRY_HAS_BEEN_CHANGED)): msg = _("Error creating SnapMirror. Geometry has " "changed on destination volume.") LOG.error(msg) self.delete_snapmirror(src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name) raise_ctxt.reraise = False raise na_utils.GeometryHasChangedOnDestination(msg) # 4. Try to repair SnapMirror if existing else: snapmirror = existing_mirrors[0] if active_sync_asymmetric_policy: src_flexvol_name = src_cg_path dest_flexvol_name = dest_cg_path if snapmirror.get('mirror-state') != 'snapmirrored' and \ snapmirror.get('mirror-state') != 'in_sync': try: msg = ("SnapMirror between %(src_vserver)s:%(src_volume)s " "and %(dest_vserver)s:%(dest_volume)s is in " "'%(state)s' state. Attempting to repair it.") msg_payload['state'] = snapmirror.get('mirror-state') LOG.debug(msg, msg_payload) dest_client.resume_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) dest_client.resync_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) except netapp_api.NaApiError: LOG.exception("Could not re-sync SnapMirror.") def delete_snapmirror(self, src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name, release=True): """Ensure all information about a SnapMirror relationship is removed. 1. Abort SnapMirror 2. Delete the SnapMirror 3. Release SnapMirror to cleanup SnapMirror metadata and snapshots """ dest_backend_config = config_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver dest_client = config_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver) source_backend_config = config_utils.get_backend_configuration( src_backend_name) src_vserver = source_backend_config.netapp_vserver # 1. Abort any ongoing transfers try: dest_client.abort_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name, clear_checkpoint=False) except netapp_api.NaApiError: # Snapmirror is already deleted pass # 2. Delete SnapMirror Relationship and cleanup destination snapshots try: dest_client.delete_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or ENTRY_DOES_NOT_EXIST in e.message): LOG.info('No SnapMirror relationship to delete.') exc_context.reraise = False if release: # If the source is unreachable, do not perform the release try: src_client = config_utils.get_client_for_backend( src_backend_name, vserver_name=src_vserver) except Exception: src_client = None # 3. Cleanup SnapMirror relationship on source try: if src_client: src_client.release_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) except netapp_api.NaApiError as e: with excutils.save_and_reraise_exception() as exc_context: if (e.code == netapp_api.EOBJECTNOTFOUND or e.code == netapp_api.ESOURCE_IS_DIFFERENT or ENTRY_DOES_NOT_EXIST in e.message): # Handle the case where the SnapMirror is already # cleaned up exc_context.reraise = False def update_snapmirror(self, src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name): """Schedule a SnapMirror update on the backend.""" dest_backend_config = config_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver dest_client = config_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver) source_backend_config = config_utils.get_backend_configuration( src_backend_name) src_vserver = source_backend_config.netapp_vserver # Update SnapMirror dest_client.update_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) def quiesce_then_abort(self, src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name): """Quiesce a SnapMirror and wait with retries before aborting.""" dest_backend_config = config_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver dest_client = config_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver) source_backend_config = config_utils.get_backend_configuration( src_backend_name) src_vserver = source_backend_config.netapp_vserver # 1. Attempt to quiesce, then abort dest_client.quiesce_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) retries = (source_backend_config.netapp_snapmirror_quiesce_timeout / QUIESCE_RETRY_INTERVAL) @utils.retry(na_utils.NetAppDriverException, interval=QUIESCE_RETRY_INTERVAL, retries=retries, backoff_rate=1) def wait_for_quiesced(): snapmirror = dest_client.get_snapmirrors( src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name, desired_attributes=['relationship-status', 'mirror-state'])[0] if (snapmirror.get('relationship-status') not in ['quiesced', 'paused']): msg = _("SnapMirror relationship is not quiesced.") raise na_utils.NetAppDriverException(msg) try: wait_for_quiesced() except na_utils.NetAppDriverException: dest_client.abort_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name, clear_checkpoint=False) def break_snapmirror(self, src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name): """Break SnapMirror relationship. 1. Quiesce any ongoing SnapMirror transfers 2. Wait until SnapMirror finishes transfers and enters quiesced state 3. Break SnapMirror 4. Mount the destination volume so it is given a junction path """ dest_backend_config = config_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver dest_client = config_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver) source_backend_config = config_utils.get_backend_configuration( src_backend_name) src_vserver = source_backend_config.netapp_vserver # 1. Attempt to quiesce, then abort self.quiesce_then_abort(src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name) # 2. Break SnapMirror dest_client.break_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) # 3. Mount the destination volume and create a junction path dest_client.mount_flexvol(dest_flexvol_name) def resync_snapmirror(self, src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name): """Re-sync (repair / re-establish) SnapMirror relationship.""" dest_backend_config = config_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver dest_client = config_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver) source_backend_config = config_utils.get_backend_configuration( src_backend_name) src_vserver = source_backend_config.netapp_vserver dest_client.resync_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) def resume_snapmirror(self, src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name): """Resume SnapMirror relationship from a quiesced state.""" dest_backend_config = config_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver dest_client = config_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver) source_backend_config = config_utils.get_backend_configuration( src_backend_name) src_vserver = source_backend_config.netapp_vserver dest_client.resume_snapmirror(src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name) def create_destination_flexvol(self, src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name, pool_is_flexgroup=False): """Create a SnapMirror mirror target FlexVol for a given source.""" dest_backend_config = config_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver dest_client = config_utils.get_client_for_backend( dest_backend_name, vserver_name=dest_vserver) source_backend_config = config_utils.get_backend_configuration( src_backend_name) src_vserver = source_backend_config.netapp_vserver src_client = config_utils.get_client_for_backend( src_backend_name, vserver_name=src_vserver) provisioning_options = ( src_client.get_provisioning_options_from_flexvol( src_flexvol_name) ) provisioning_options.pop('is_flexgroup') # If the source is encrypted then the destination needs to be # encrypted too. Using is_flexvol_encrypted because it includes # a simple check to ensure that the NVE feature is supported. if src_client.is_flexvol_encrypted(src_flexvol_name, src_vserver): provisioning_options['encrypt'] = 'true' # Remove size and volume_type size = provisioning_options.pop('size', None) if not size: msg = _("Unable to read the size of the source FlexVol (%s) " "to create a SnapMirror destination.") raise na_utils.NetAppDriverException(msg % src_flexvol_name) provisioning_options.pop('volume_type', None) source_aggregate = provisioning_options.pop('aggregate') aggregate_map = self._get_replication_aggregate_map( src_backend_name, dest_backend_name) destination_aggregate = [] for src_aggr in source_aggregate: dst_aggr = aggregate_map.get(src_aggr, None) if dst_aggr: destination_aggregate.append(dst_aggr) else: msg = _("Unable to find configuration matching the source " "aggregate and the destination aggregate. Option " "netapp_replication_aggregate_map may be incorrect.") raise na_utils.NetAppDriverException(message=msg) # NOTE(gouthamr): The volume is intentionally created as a Data # Protection volume; junction-path will be added on breaking # the mirror. provisioning_options['volume_type'] = 'dp' if pool_is_flexgroup: compression_enabled = provisioning_options.pop( 'compression_enabled', False) # cDOT compression requires that deduplication be enabled. dedupe_enabled = provisioning_options.pop( 'dedupe_enabled', False) or compression_enabled dest_client.create_volume_async( dest_flexvol_name, destination_aggregate, size, **provisioning_options) timeout = self._get_replication_volume_online_timeout() def _wait_volume_is_online(): volume_state = dest_client.get_volume_state( name=dest_flexvol_name) if volume_state and volume_state == 'online': raise loopingcall.LoopingCallDone() try: wait_call = loopingcall.FixedIntervalWithTimeoutLoopingCall( _wait_volume_is_online) wait_call.start(interval=5, timeout=timeout).wait() if dedupe_enabled: dest_client.enable_volume_dedupe_async( dest_flexvol_name) if compression_enabled: dest_client.enable_volume_compression_async( dest_flexvol_name) except loopingcall.LoopingCallTimeOut: msg = _("Timeout waiting destination FlexGroup " "to come online.") raise na_utils.NetAppDriverException(msg) else: dest_client.create_flexvol(dest_flexvol_name, destination_aggregate[0], size, **provisioning_options) timeout = self._get_replication_volume_online_timeout() def _wait_volume_is_online(): volume_state = dest_client.get_volume_state( name=dest_flexvol_name) if volume_state and volume_state == 'online': raise loopingcall.LoopingCallDone() try: wait_call = loopingcall.FixedIntervalWithTimeoutLoopingCall( _wait_volume_is_online) wait_call.start(interval=5, timeout=timeout).wait() except loopingcall.LoopingCallTimeOut: msg = _("Timeout waiting destination FlexVol to to come " "online.") raise na_utils.NetAppDriverException(msg) def ensure_snapmirrors(self, config, src_backend_name, src_flexvol_names): """Ensure all the SnapMirrors needed for whole-backend replication.""" backend_names = self.get_replication_backend_names(config) replication_policy = self.get_replication_policy(config) for dest_backend_name in backend_names: for src_flexvol_name in src_flexvol_names: dest_flexvol_name = src_flexvol_name retry_exceptions = ( na_utils.GeometryHasChangedOnDestination, ) @utils.retry(retry_exceptions, interval=30, retries=6, backoff_rate=1) def _try_create_snapmirror(): self.create_snapmirror(src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name, replication_policy) try: _try_create_snapmirror() except na_utils.NetAppDriverException as e: with excutils.save_and_reraise_exception(): if isinstance(e, retry_exceptions): LOG.error("Number of tries exceeded " "while trying to create SnapMirror.") def break_snapmirrors(self, config, src_backend_name, src_flexvol_names, chosen_target): """Break all existing SnapMirror relationships for a given back end.""" failed_to_break = [] backend_names = self.get_replication_backend_names(config) for dest_backend_name in backend_names: for src_flexvol_name in src_flexvol_names: dest_flexvol_name = src_flexvol_name try: self.break_snapmirror(src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name) except netapp_api.NaApiError: msg = _("Unable to break SnapMirror between FlexVol " "%(src)s and Flexvol %(dest)s. Associated volumes " "will have their replication state set to error.") payload = { 'src': ':'.join([src_backend_name, src_flexvol_name]), 'dest': ':'.join([dest_backend_name, dest_flexvol_name]), } if dest_backend_name == chosen_target: failed_to_break.append(src_flexvol_name) LOG.exception(msg, payload) return failed_to_break def update_snapmirrors(self, config, src_backend_name, src_flexvol_names): """Update all existing SnapMirror relationships on a given back end.""" backend_names = self.get_replication_backend_names(config) for dest_backend_name in backend_names: for src_flexvol_name in src_flexvol_names: dest_flexvol_name = src_flexvol_name try: self.update_snapmirror(src_backend_name, dest_backend_name, src_flexvol_name, dest_flexvol_name) except netapp_api.NaApiError: # Ignore any errors since the current source may be # unreachable pass def create_vserver_peer(self, src_vserver, src_backend_name, dest_vserver, peer_applications): """Create a vserver peer relationship""" src_client = config_utils.get_client_for_backend( src_backend_name, vserver_name=src_vserver) vserver_peers = src_client.get_vserver_peers(src_vserver, dest_vserver) if not vserver_peers: src_client.create_vserver_peer( src_vserver, dest_vserver, vserver_peer_application=peer_applications) LOG.debug("Vserver peer relationship created between %(src)s " "and %(dest)s. Peering application set to %(app)s.", {'src': src_vserver, 'dest': dest_vserver, 'app': peer_applications}) return None for vserver_peer in vserver_peers: if all(app in vserver_peer['applications'] for app in peer_applications): LOG.debug("Found vserver peer relationship between %s and %s.", src_vserver, dest_vserver) return None msg = _("Vserver peer relationship found between %(src)s and %(dest)s " "but peering application %(app)s isn't defined.") raise na_utils.NetAppDriverException(msg % {'src': src_vserver, 'dest': dest_vserver, 'app': peer_applications}) def _choose_failover_target(self, backend_name, flexvols, replication_targets): target_lag_times = [] for target in replication_targets: all_target_mirrors = self.get_snapmirrors( backend_name, target, None, None) flexvol_mirrors = self._filter_and_sort_mirrors( all_target_mirrors, flexvols) if not flexvol_mirrors: msg = ("Ignoring replication target %(target)s because no " "SnapMirrors were found for any of the flexvols " "in (%(flexvols)s).") payload = { 'flexvols': ', '.join(flexvols), 'target': target, } LOG.debug(msg, payload) continue target_lag_times.append( { 'target': target, 'highest-lag-time': flexvol_mirrors[0]['lag-time'], } ) # The best target is one with the least 'worst' lag time. best_target = (sorted(target_lag_times, key=lambda x: int(x['highest-lag-time']))[0] if len(target_lag_times) > 0 else {}) return best_target.get('target') def _filter_and_sort_mirrors(self, mirrors, flexvols): """Return mirrors reverse-sorted by lag time. The 'slowest' mirror determines the best update that occurred on a given replication target. """ filtered_mirrors = [x for x in mirrors if x.get('destination-volume') in flexvols] sorted_mirrors = sorted(filtered_mirrors, key=lambda x: int(x.get('lag-time')), reverse=True) return sorted_mirrors def _complete_failover(self, source_backend_name, replication_targets, flexvols, volumes, failover_target=None): """Failover a backend to a secondary replication target.""" volume_updates = [] active_backend_name = failover_target or self._choose_failover_target( source_backend_name, flexvols, replication_targets) if active_backend_name is None: msg = _("No suitable host was found to failover.") raise na_utils.NetAppDriverException(msg) source_backend_config = config_utils.get_backend_configuration( source_backend_name) # 1. Start an update to try to get a last minute transfer before we # quiesce and break self.update_snapmirrors(source_backend_config, source_backend_name, flexvols) # 2. Break SnapMirrors failed_to_break = self.break_snapmirrors(source_backend_config, source_backend_name, flexvols, active_backend_name) # 3. Update cinder volumes within this host for volume in volumes: replication_status = fields.ReplicationStatus.FAILED_OVER volume_pool = volume_utils.extract_host(volume['host'], level='pool') if volume_pool in failed_to_break: replication_status = 'error' volume_update = { 'volume_id': volume['id'], 'updates': { 'replication_status': replication_status, }, } volume_updates.append(volume_update) return active_backend_name, volume_updates def _failover_host(self, volumes, secondary_id=None, groups=None): if secondary_id == self.backend_name: msg = _("Cannot failover to the same host as the primary.") raise exception.InvalidReplicationTarget(reason=msg) # Added logic to handle failback from the secondary to old primary # This condition is needed when the DR/replication conditions are # restored back to normal state if secondary_id == "default": LOG.debug('Fails back to primary') volume_updates = [] volume_update = [] # Update the ZAPI client to the backend we failed over to active_backend_name = self.backend_name self._update_zapi_client(active_backend_name) self.failed_over = False self.failed_over_backend_name = active_backend_name for volume in volumes: volume_update = [] replication_status = fields.ReplicationStatus.ENABLED volume_update = { 'volume_id': volume['id'], 'updates': {'replication_status': replication_status}, } volume_updates.append(volume_update) return active_backend_name, volume_updates, [] else: replication_targets = self.get_replication_backend_names( self.configuration) if not replication_targets: msg = _("No replication targets configured for backend " "%s. Cannot failover.") raise exception.InvalidReplicationTarget( reason=msg % self.host) if secondary_id and secondary_id not in replication_targets: msg = _("%(target)s is not among replication targets " "configured for back end %(host)s. Cannot failover.") payload = { 'target': secondary_id, 'host': self.host, } raise exception.InvalidReplicationTarget(reason=msg % payload) flexvols = self.ssc_library.get_ssc_flexvol_names() try: active_backend_name, volume_updates = self._complete_failover( self.backend_name, replication_targets, flexvols, volumes, failover_target=secondary_id) except na_utils.NetAppDriverException as e: msg = _("Could not complete failover: %s") % e raise exception.UnableToFailOver(reason=msg) # Update the ZAPI client to the backend we failed over to self._update_zapi_client(active_backend_name) self.failed_over = True self.failed_over_backend_name = active_backend_name return active_backend_name, volume_updates, [] def _failover(self, context, volumes, secondary_id=None, groups=None): """Failover to replication target.""" if secondary_id == self.backend_name: msg = _("Cannot failover to the same host as the primary.") raise exception.InvalidReplicationTarget(reason=msg) # Added logic to handle failback from the secondary to old primary # This condition is needed when the DR/replication conditions are # restored back to normal state if secondary_id == "default": LOG.debug('Fails back to primary inside _failover') volume_updates = [] volume_update = [] # Update the ZAPI client to the backend we failed over to active_backend_name = self.backend_name self._update_zapi_client(active_backend_name) self.failed_over = False self.failed_over_backend_name = active_backend_name for volume in volumes: replication_status = fields.ReplicationStatus.ENABLED volume_update = { 'volume_id': volume['id'], 'updates': {'replication_status': replication_status}, } volume_updates.append(volume_update) return active_backend_name, volume_updates, [] else: replication_targets = self.get_replication_backend_names( self.configuration) if not replication_targets: msg = _("No replication targets configured for backend " "%s. Cannot failover.") raise exception.InvalidReplicationTarget( reason=msg % self.host) if secondary_id and secondary_id not in replication_targets: msg = _("%(target)s is not among replication targets " "configured for back end %(host)s. Cannot failover.") payload = { 'target': secondary_id, 'host': self.host, } raise exception.InvalidReplicationTarget(reason=msg % payload) flexvols = self.ssc_library.get_ssc_flexvol_names() try: active_backend_name, volume_updates = self._complete_failover( self.backend_name, replication_targets, flexvols, volumes, failover_target=secondary_id) except na_utils.NetAppDriverException as e: msg = _("Could not complete failover: %s") % e raise exception.UnableToFailOver(reason=msg) return active_backend_name, volume_updates, [] def _failover_completed(self, context, secondary_id=None): """Update volume node when `failover` is completed.""" # Update the ZAPI client to the backend we failed over to self._update_zapi_client(secondary_id) self.failed_over = True self.failed_over_backend_name = secondary_id def _get_replication_volume_online_timeout(self): return self.configuration.netapp_replication_volume_online_timeout def migrate_volume_ontap_assisted(self, volume, host, src_backend_name, src_vserver): """Migrate Cinder volume using ONTAP capabilities""" _, src_pool = volume.host.split('#') dest_backend, dest_pool = host["host"].split('#') _, dest_backend_name = dest_backend.split('@') # Check if migration occurs in the same backend. If so, a migration # between Cinder pools in the same vserver will be performed. if src_backend_name == dest_backend_name: # We should skip the operation in case source and destination pools # are the same. if src_pool == dest_pool: LOG.info('Skipping volume migration as source and destination ' 'are the same.') return True, {} updates = self._migrate_volume_to_pool( volume, src_pool, dest_pool, src_vserver, dest_backend_name) else: if not self.using_cluster_credentials: LOG.info('Storage assisted volume migration across backends ' 'requires ONTAP cluster-wide credentials. Falling ' 'back to host assisted migration.') return False, {} dest_backend_config = config_utils.get_backend_configuration( dest_backend_name) dest_vserver = dest_backend_config.netapp_vserver dest_client = config_utils.get_client_for_backend( dest_backend_name) src_client = config_utils.get_client_for_backend( src_backend_name) # In case origin and destination backends are not pointing to the # same cluster, a host copy strategy using is required. Otherwise, # an intra-cluster operation can be done to complete the migration. src_cluster_name = src_client.get_cluster_name() dest_cluster_name = dest_client.get_cluster_name() if src_cluster_name != dest_cluster_name: LOG.info('Driver only supports storage assisted migration ' 'between pools in a same cluster. Falling back to ' 'host assisted migration.') return False, {} # if origin and destination vservers are the same, simply move # the cinder volume from one pool to the other. # Otherwise, an intra-cluster Vserver peer relationship # followed by a volume copy operation are required. # Both operations will copy data between ONTAP volumes # and won't finish in constant time as volume clones. if src_vserver == dest_vserver: # We should skip the operation in case source and # destination pools are the same if src_pool == dest_pool: LOG.info('Skipping volume migration as source and ' 'destination are the same.') return True, {} updates = self._migrate_volume_to_pool( volume, src_pool, dest_pool, src_vserver, dest_backend_name) else: updates = self._migrate_volume_to_vserver( volume, src_pool, src_vserver, dest_pool, dest_backend_config.netapp_vserver, dest_backend_name) LOG.info('Successfully migrated volume %s to host %s.', volume.id, host['host']) return True, updates ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/utils/loopingcalls.py0000664000175000017500000000261100000000000026762 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Chuck Fouts. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Collects and starts tasks created from oslo_service.loopingcall.""" from collections import namedtuple from oslo_service import loopingcall LoopingTask = namedtuple('LoopingTask', ['looping_call', 'interval', 'initial_delay']) # Time intervals in seconds ONE_MINUTE = 60 TEN_MINUTES = 600 ONE_HOUR = 3600 class LoopingCalls(object): def __init__(self): self.tasks = [] def add_task(self, call_function, interval, initial_delay=0): looping_call = loopingcall.FixedIntervalLoopingCall(call_function) task = LoopingTask(looping_call, interval, initial_delay) self.tasks.append(task) def start_tasks(self): for task in self.tasks: task.looping_call.start(task.interval, task.initial_delay) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/dataontap/utils/utils.py0000664000175000017500000002060300000000000025435 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utilities for NetApp FAS drivers. This module contains common utilities to be used by one or more NetApp FAS drivers to achieve the desired functionality. """ import json import socket from oslo_config import cfg from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.netapp.dataontap.client import client_cmode from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest from cinder.volume.drivers.netapp.dataontap.client \ import client_cmode_rest_asar2 from cinder.volume.drivers.netapp import options as na_opts from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) def get_backend_configuration(backend_name): """Get a cDOT configuration object for a specific backend.""" config_stanzas = CONF.list_all_sections() if backend_name not in config_stanzas: msg = _("Could not find backend stanza %(backend_name)s in " "configuration. Available stanzas are %(stanzas)s") params = { "stanzas": config_stanzas, "backend_name": backend_name, } raise exception.ConfigNotFound(message=msg % params) config = configuration.Configuration(driver.volume_opts, config_group=backend_name) config.append_config_values(na_opts.netapp_proxy_opts) config.append_config_values(na_opts.netapp_connection_opts) config.append_config_values(na_opts.netapp_transport_opts) config.append_config_values(na_opts.netapp_basicauth_opts) config.append_config_values(na_opts.netapp_certificateauth_opts) config.append_config_values(na_opts.netapp_provisioning_opts) config.append_config_values(na_opts.netapp_cluster_opts) config.append_config_values(na_opts.netapp_san_opts) config.append_config_values(na_opts.netapp_replication_opts) config.append_config_values(na_opts.netapp_support_opts) config.append_config_values(na_opts.netapp_migration_opts) return config def get_client_for_backend(backend_name, vserver_name=None, force_rest=False): """Get a cDOT API client for a specific backend.""" config = get_backend_configuration(backend_name) # Determine if disaggregated platform should be used # Parameter takes precedence over config setting is_disaggregated = config.netapp_disaggregated_platform # ZAPI clients are not supported for ASAr2 platform. # We are forcing the client to be REST client for ASAr2. if is_disaggregated: force_rest = True if config.netapp_use_legacy_client and not force_rest: client = client_cmode.Client( transport_type=config.netapp_transport_type, ssl_cert_path=config.netapp_ssl_cert_path, username=config.netapp_login, password=config.netapp_password, hostname=config.netapp_server_hostname, private_key_file=config.netapp_private_key_file, certificate_file=config.netapp_certificate_file, ca_certificate_file=config.netapp_ca_certificate_file, certificate_host_validation= config.netapp_certificate_host_validation, port=config.netapp_server_port, vserver=vserver_name or config.netapp_vserver, trace=volume_utils.TRACE_API, api_trace_pattern=config.netapp_api_trace_pattern) else: # Check if ASA r2 disaggregated platform is enabled if is_disaggregated: client = client_cmode_rest_asar2.RestClientASAr2( transport_type=config.netapp_transport_type, ssl_cert_path=config.netapp_ssl_cert_path, username=config.netapp_login, password=config.netapp_password, hostname=config.netapp_server_hostname, private_key_file=config.netapp_private_key_file, certificate_file=config.netapp_certificate_file, ca_certificate_file=config.netapp_ca_certificate_file, certificate_host_validation= config.netapp_certificate_host_validation, port=config.netapp_server_port, vserver=vserver_name or config.netapp_vserver, trace=volume_utils.TRACE_API, api_trace_pattern=config.netapp_api_trace_pattern, async_rest_timeout=config.netapp_async_rest_timeout, is_disaggregated=is_disaggregated) else: client = client_cmode_rest.RestClient( transport_type=config.netapp_transport_type, ssl_cert_path=config.netapp_ssl_cert_path, username=config.netapp_login, password=config.netapp_password, hostname=config.netapp_server_hostname, private_key_file=config.netapp_private_key_file, certificate_file=config.netapp_certificate_file, ca_certificate_file=config.netapp_ca_certificate_file, certificate_host_validation= config.netapp_certificate_host_validation, port=config.netapp_server_port, vserver=vserver_name or config.netapp_vserver, trace=volume_utils.TRACE_API, api_trace_pattern=config.netapp_api_trace_pattern, async_rest_timeout=config.netapp_async_rest_timeout) return client def _build_base_ems_log_message(driver_name, app_version): ems_log = { 'computer-name': socket.gethostname() or 'Cinder_node', 'event-source': 'Cinder driver %s' % driver_name, 'app-version': app_version, 'category': 'provisioning', 'log-level': '5', 'auto-support': 'false', } return ems_log def build_ems_log_message_0(driver_name, app_version): """Construct EMS Autosupport log message with deployment info.""" ems_log = _build_base_ems_log_message(driver_name, app_version) ems_log['event-id'] = '0' ems_log['event-description'] = 'OpenStack Cinder connected to cluster node' return ems_log def build_ems_log_message_1(driver_name, app_version, vserver, flexvol_pools, aggregate_pools): """Construct EMS Autosupport log message with storage pool info.""" message = { 'pools': { 'vserver': vserver, 'aggregates': aggregate_pools, 'flexvols': flexvol_pools, }, } ems_log = _build_base_ems_log_message(driver_name, app_version) ems_log['event-id'] = '1' ems_log['event-description'] = json.dumps(message) return ems_log def get_cluster_to_pool_map(client): """Get the cluster name for ASA r2 systems. For ASA r2 systems, instead of using flexvols, we use the cluster name as the pool. The map is of the format suitable for seeding the storage service catalog: { : {'pool_name': }} :param client: NetApp client instance to retrieve cluster information :returns: Dictionary mapping cluster names to pool information :raises: InvalidConfigurationValue if cluster is not disaggregated """ pools = {} cluster_info = client.get_cluster_info() # Check if cluster info is missing or cluster is not disaggregated (ASA r2) if not cluster_info.get('disaggregated', False): LOG.error("Cluster is not a disaggregated (ASA r2) platform. ") raise exception.InvalidConfigurationValue( option='disaggregated', value=cluster_info.get('disaggregated', None) ) cluster_name = cluster_info['name'] LOG.debug("Found ASA r2 cluster: %s", cluster_name) pools[cluster_name] = {'pool_name': cluster_name} msg_args = { 'cluster': cluster_name, } msg = "ASA r2 cluster '%(cluster)s' added as pool" LOG.debug(msg, msg_args) return pools ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/options.py0000664000175000017500000004474100000000000022666 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Bob Callaway. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Contains configuration options for NetApp drivers. Common place to hold configuration options for all NetApp drivers. Options need to be grouped into granular units to be able to be reused by different modules and classes. This does not restrict declaring options in individual modules. If options are not re usable then can be declared in individual modules. It is recommended to Keep options at a single place to ensure re usability and better management of configuration options. """ from oslo_config import cfg from oslo_config import types from cinder.volume import configuration as conf NETAPP_SIZE_MULTIPLIER_DEFAULT = 1.2 netapp_proxy_opts = [ cfg.StrOpt('netapp_storage_family', default='ontap_cluster', choices=['ontap_cluster'], help=('The storage family type used on the storage system; ' 'the only valid value is ontap_cluster for using ' 'clustered Data ONTAP.')), cfg.StrOpt('netapp_storage_protocol', choices=['iscsi', 'fc', 'nfs', 'nvme'], help=('The storage protocol to be used on the data path with ' 'the storage system.')), ] netapp_connection_opts = [ cfg.StrOpt('netapp_server_hostname', help='The hostname (or IP address) for the storage system or ' 'proxy server.'), cfg.IntOpt('netapp_server_port', help=('The TCP port to use for communication with the storage ' 'system or proxy server. If not specified, Data ONTAP ' 'drivers will use 80 for HTTP and 443 for HTTPS.')), cfg.BoolOpt('netapp_use_legacy_client', default=True, help=('Select which ONTAP client to use for retrieving and ' 'modifying data on the storage. The legacy client ' 'relies on ZAPI calls. If set to False, the new REST ' 'client is used, which runs REST calls if supported, ' 'otherwise falls back to the equivalent ZAPI call.')), cfg.IntOpt('netapp_async_rest_timeout', min=60, default=60, # One minute help='The maximum time in seconds to wait for completing a ' 'REST asynchronous operation.'), ] netapp_transport_opts = [ cfg.StrOpt('netapp_transport_type', default='http', choices=['http', 'https'], help=('The transport protocol used when communicating with ' 'the storage system or proxy server.')), cfg.StrOpt('netapp_ssl_cert_path', help=("The path to a CA_BUNDLE file or directory with " "certificates of trusted CA. If set to a directory, it " "must have been processed using the c_rehash utility " "supplied with OpenSSL. If not informed, it will use the " "Mozilla's carefully curated collection of Root " "Certificates for validating the trustworthiness of SSL " "certificates. Only applies with new REST client.")), ] netapp_basicauth_opts = [ cfg.StrOpt('netapp_login', help=('Administrative user account name used to access the ' 'storage system or proxy server.')), cfg.StrOpt('netapp_password', help=('Password for the administrative user account ' 'specified in the netapp_login option.'), secret=True), ] netapp_certificateauth_opts = [ cfg.StrOpt('netapp_private_key_file', sample_default='/path/to/private_key.key', help=(""" This option is applicable for both self signed and ca verified certificates. For self signed certificate: Absolute path to the file containing the private key associated with the self signed certificate. It is a sensitive file that should be kept secure and protected. The private key is used to sign the certificate and establish the authenticity and integrity of the certificate during the authentication process. For ca verified certificate: Absolute path to the file containing the private key associated with the certificate. It is generated when creating the certificate signingrequest (CSR) and should be kept secure and protected. The private key is used to sign the CSR and later used to establish secure connections and authenticate the entity. """), secret=True), cfg.StrOpt('netapp_certificate_file', sample_default='/path/to/certificate.pem', help=(""" This option is applicable for both self signed and ca verified certificates. For self signed certificate: Absolute path to the file containing the self-signed digital certificate itself. It includes information about the entity such as the common name (e.g., domain name), organization details, validity period, and public key. The certificate file is generated based on the private key and is used by clients or systems to verify the entity identity during the authentication process. For ca verified certificate: Absolute path to the file containing the digital certificate issued by the trusted third-party certificate authority (CA). It includes information about the entity identity, public key, and the CA that issued the certificate. The certificate file is used by clients or systems to verify the authenticity and integrity of the entity during the authentication process. """), secret=True), cfg.StrOpt('netapp_ca_certificate_file', sample_default='/path/to/ca_certificate.crt', help=(""" This option is applicable only for a ca verified certificate. Ca verified file: Absolute path to the file containing the public key certificate of the trusted third-party certificate authority (CA) that issued the certificate. It is used by clients or systems to validate the authenticity of the certificate presented by the entity. The CA certificate file is typically pre configured in the trust store of clients or systems to establish trust in certificates issued by that CA. """), secret=True), cfg.BoolOpt('netapp_certificate_host_validation', default=False, help=('This option is used only if netapp_private_key_file' ' and netapp_certificate_file files are passed in the' ' configuration.' ' By default certificate verification is disabled' ' and to verify the certificates please set the value' ' to True.')), ] netapp_provisioning_opts = [ cfg.FloatOpt('netapp_size_multiplier', default=NETAPP_SIZE_MULTIPLIER_DEFAULT, help=('The quantity to be multiplied by the requested ' 'volume size to ensure enough space is available on ' 'the virtual storage server (Vserver) to fulfill ' 'the volume creation request. Note: this option ' 'is deprecated and will be removed in favor of ' '"reserved_percentage" in the Mitaka release.')), cfg.StrOpt('netapp_lun_space_reservation', default='enabled', choices=['enabled', 'disabled'], help=('This option determines if storage space is reserved ' 'for LUN allocation. If enabled, LUNs are thick ' 'provisioned. If space reservation is disabled, ' 'storage space is allocated on demand.')), cfg.BoolOpt('netapp_driver_reports_provisioned_capacity', default=False, help=('Set to True for Cinder to query the storage system in ' 'order to calculate volumes provisioned size, otherwise ' 'provisioned_capacity_gb will corresponds to the ' 'value of allocated_capacity_gb (calculated by Cinder ' 'Core code). Enabling this feature increases ' 'the number of API calls to the storage and ' 'requires more processing on host, which may impact ' 'volume report overall performance.')), ] netapp_cluster_opts = [ cfg.StrOpt('netapp_vserver', help=('This option specifies the virtual storage server ' '(Vserver) name on the storage cluster on which ' 'provisioning of block storage volumes should occur.')), cfg.BoolOpt('netapp_disaggregated_platform', default=False, help=('This option specifies whether to enable ASA r2 ' 'workflows for NetApp disaggregated platform. ' 'When set to True, the driver will use ASA r2 ' 'specific client and workflows for interacting ' 'with NetApp ONTAP.')), ] netapp_img_cache_opts = [ cfg.IntOpt('netapp_nfs_image_cache_cleanup_interval', default=600, min=60, help=('Sets time in seconds between NFS image cache ' 'cleanup tasks.')), cfg.IntOpt('thres_avl_size_perc_start', default=20, help=('If the percentage of available space for an NFS share ' 'has dropped below the value specified by this option, ' 'the NFS image cache will be cleaned.')), cfg.IntOpt('thres_avl_size_perc_stop', default=60, help=('When the percentage of available space on an NFS share ' 'has reached the percentage specified by this option, ' 'the driver will stop clearing files from the NFS image ' 'cache that have not been accessed in the last M ' 'minutes, where M is the value of the ' 'expiry_thres_minutes configuration option.')), cfg.IntOpt('expiry_thres_minutes', default=720, help=('This option specifies the threshold for last access ' 'time for images in the NFS image cache. When a cache ' 'cleaning cycle begins, images in the cache that have ' 'not been accessed in the last M minutes, where M is ' 'the value of this parameter, will be deleted from the ' 'cache to create free space on the NFS share.')), ] netapp_nfs_extra_opts = [ cfg.StrOpt('netapp_copyoffload_tool_path', help=('This option specifies the path of the NetApp copy ' 'offload tool binary. Ensure that the binary has execute ' 'permissions set which allow the effective user of the ' 'cinder-volume process to execute the file.'), deprecated_for_removal=True, deprecated_reason='The CopyOfflload tool is no longer ' 'available for downloading.'), ] netapp_san_opts = [ cfg.StrOpt('netapp_lun_ostype', help=('This option defines the type of operating system that' ' will access a LUN exported from Data ONTAP; it is' ' assigned to the LUN at the time it is created.')), cfg.StrOpt('netapp_namespace_ostype', help=('This option defines the type of operating system that' ' will access a namespace exported from Data ONTAP; it is' ' assigned to the namespace at the time it is created.')), cfg.StrOpt('netapp_host_type', help=('This option defines the type of operating system for' ' all initiators that can access a LUN. This information' ' is used when mapping LUNs to individual hosts or' ' groups of hosts.')), cfg.StrOpt('netapp_pool_name_search_pattern', deprecated_opts=[cfg.DeprecatedOpt(name='netapp_volume_list'), cfg.DeprecatedOpt(name='netapp_storage_pools') ], default="(.+)", help=('This option is used to restrict provisioning to the ' 'specified pools. Specify the value of ' 'this option to be a regular expression which will be ' 'applied to the names of objects from the storage ' 'backend which represent pools in Cinder. This option ' 'is only utilized when the storage protocol is ' 'configured to use iSCSI or FC.')), cfg.IntOpt('netapp_lun_clone_busy_timeout', min=0, default=30, help='Specifies the maximum time (in seconds) to retry' ' the LUN clone operation when an ONTAP "device busy"' ' error occurs.'), cfg.IntOpt('netapp_lun_clone_busy_interval', min=0, default=3, help='Specifies the time interval (in seconds) to retry' ' the LUN clone operation when an ONTAP "device busy"' ' error occurs.')] netapp_replication_opts = [ cfg.MultiOpt('netapp_replication_aggregate_map', item_type=types.Dict(), help="Multi opt of dictionaries to represent the aggregate " "mapping between source and destination back ends when " "using whole back end replication. For every " "source aggregate associated with a cinder pool (NetApp " "FlexVol/FlexGroup), you would need to specify the " "destination aggregate on the replication target " "device. " "A replication target device is configured with the " "configuration option replication_device. Specify this " "option as many times as you have replication devices. " "Each entry takes the standard dict config form: " "netapp_replication_aggregate_map = " "backend_id:," "src_aggr_name1:dest_aggr_name1," "src_aggr_name2:dest_aggr_name2,..."), cfg.IntOpt('netapp_snapmirror_quiesce_timeout', min=0, default=3600, # One Hour help='The maximum time in seconds to wait for existing ' 'SnapMirror transfers to complete before aborting ' 'during a failover.'), cfg.IntOpt('netapp_replication_volume_online_timeout', min=60, default=360, # Default to six minutes help='Sets time in seconds to wait for a replication volume ' 'create to complete and go online.'), cfg.StrOpt('netapp_replication_policy', default='MirrorAllSnapshots', help='This option defines the replication policy to be used ' 'while creating snapmirror relationship. Default is ' 'MirrorAllSnapshots which is based on async-mirror.' 'User can pass values like Sync, StrictSync for ' 'synchronous snapmirror relationship (SM-S) to achieve ' 'zero RPO')] netapp_support_opts = [ cfg.StrOpt('netapp_api_trace_pattern', default='(.*)', help=('A regular expression to limit the API tracing. This ' 'option is honored only if enabling ``api`` tracing ' 'with the ``trace_flags`` option. By default, ' 'all APIs will be traced.')), ] netapp_migration_opts = [ cfg.IntOpt('netapp_migrate_volume_timeout', default=3600, min=30, help='Sets time in seconds to wait for storage assisted volume ' 'migration to complete.'), ] CONF = cfg.CONF CONF.register_opts(netapp_proxy_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_connection_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_transport_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_basicauth_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_certificateauth_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_cluster_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_provisioning_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_img_cache_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_nfs_extra_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_san_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_replication_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_support_opts, group=conf.SHARED_CONF_GROUP) CONF.register_opts(netapp_migration_opts, group=conf.SHARED_CONF_GROUP) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/netapp/utils.py0000664000175000017500000006435100000000000022332 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2016 Michael Price. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utilities for NetApp drivers. This module contains common utilities to be used by one or more NetApp drivers to achieve the desired functionality. """ import decimal import platform import re from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import netutils from cinder import context from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder import utils from cinder import version from cinder.volume import qos_specs from cinder.volume import volume_types LOG = logging.getLogger(__name__) OPENSTACK_PREFIX = 'openstack-' OBSOLETE_SSC_SPECS = {'netapp:raid_type': 'netapp_raid_type', 'netapp:disk_type': 'netapp_disk_type'} DEPRECATED_SSC_SPECS = {'netapp_unmirrored': 'netapp_mirrored', 'netapp_nodedup': 'netapp_dedup', 'netapp_nocompression': 'netapp_compression', 'netapp_thick_provisioned': 'netapp_thin_provisioned'} MIN_QOS_KEYS = frozenset([ 'minIOPS', 'minIOPSperGiB', ]) MAX_QOS_KEYS = frozenset([ 'maxIOPS', 'maxIOPSperGiB', 'maxBPS', 'maxBPSperGiB', ]) ADAPTIVE_QOS_KEYS = frozenset([ 'expectedIOPSperGiB', 'peakIOPSperGiB', 'expectedIOPSAllocation', 'peakIOPSAllocation', 'absoluteMinIOPS', 'blockSize', ]) QOS_ADAPTIVE_POLICY_GROUP_SPEC_KEYS = frozenset([ 'expected_iops', 'peak_iops', 'expected_iops_allocation', 'peak_iops_allocation', 'absolute_min_iops', 'block_size', 'policy_name', ]) BACKEND_QOS_CONSUMERS = frozenset(['back-end', 'both']) # Secret length cannot be less than 96 bits. http://tools.ietf.org/html/rfc3723 CHAP_SECRET_LENGTH = 16 DEFAULT_CHAP_USER_NAME = 'NetApp_iSCSI_CHAP_Username' API_TRACE_PATTERN = '(.*)' class NetAppDriverException(exception.VolumeDriverException): message = _("NetApp Cinder Driver exception.") class GeometryHasChangedOnDestination(NetAppDriverException): message = _("Geometry has changed on destination volume.") class NetAppDriverTimeout(NetAppDriverException): message = _("Timeout in NetApp Cinder Driver.") def validate_instantiation(**kwargs): """Checks if a driver is instantiated other than by the unified driver. Helps check direct instantiation of netapp drivers. Call this function in every netapp block driver constructor. """ if kwargs and kwargs.get('netapp_mode') == 'proxy': return LOG.warning("It is not the recommended way to use drivers by NetApp. " "Please use NetAppDriver to achieve the functionality.") def check_flags(required_flags, configuration): """Ensure that the flags we care about are set.""" for flag in required_flags: if not getattr(configuration, flag, None): msg = _('Configuration value %s is not set.') % flag raise exception.InvalidInput(reason=msg) def to_bool(val): """Converts true, yes, y, 1 to True, False otherwise.""" if val: strg = str(val).lower() if (strg == 'true' or strg == 'y' or strg == 'yes' or strg == 'enabled' or strg == '1'): return True else: return False else: return False @utils.synchronized("safe_set_attr") def set_safe_attr(instance, attr, val): """Sets the attribute in a thread safe manner. Returns if new val was set on attribute. If attr already had the value then False. """ if not instance or not attr: return False old_val = getattr(instance, attr, None) if val is None and old_val is None: return False elif val == old_val: return False else: setattr(instance, attr, val) return True def get_volume_extra_specs(volume): """Provides extra specs associated with volume.""" ctxt = context.get_admin_context() type_id = volume.get('volume_type_id') if type_id is None: return {} volume_type = volume_types.get_volume_type(ctxt, type_id) if volume_type is None: return {} extra_specs = volume_type.get('extra_specs', {}) log_extra_spec_warnings(extra_specs) return extra_specs def setup_api_trace_pattern(api_trace_pattern): global API_TRACE_PATTERN try: re.compile(api_trace_pattern) except (re.error, TypeError): msg = _('Cannot parse the API trace pattern. %s is not a ' 'valid python regular expression.') % api_trace_pattern raise exception.InvalidConfigurationValue(msg) API_TRACE_PATTERN = api_trace_pattern def trace_filter_func_api(all_args): na_element = all_args.get('na_element') if na_element is None: return True api_name = na_element.get_name() return re.match(API_TRACE_PATTERN, api_name) is not None def trace_filter_func_rest_api(all_args): url = all_args.get('url') if url is None: return True return re.match(API_TRACE_PATTERN, url) is not None def round_down(value, precision='0.00'): return float(decimal.Decimal(str(value)).quantize( decimal.Decimal(precision), rounding=decimal.ROUND_DOWN)) def log_extra_spec_warnings(extra_specs): for spec in (set(extra_specs.keys() if extra_specs else []) & set(OBSOLETE_SSC_SPECS.keys())): LOG.warning('Extra spec %(old)s is obsolete. Use %(new)s ' 'instead.', {'old': spec, 'new': OBSOLETE_SSC_SPECS[spec]}) for spec in (set(extra_specs.keys() if extra_specs else []) & set(DEPRECATED_SSC_SPECS.keys())): LOG.warning('Extra spec %(old)s is deprecated. Use %(new)s ' 'instead.', {'old': spec, 'new': DEPRECATED_SSC_SPECS[spec]}) def get_iscsi_connection_properties(lun_id, volume, iqns, addresses, ports): # literal ipv6 address addresses = [netutils.escape_ipv6(a) if netutils.is_valid_ipv6(a) else a for a in addresses] lun_id = int(lun_id) if isinstance(iqns, str): iqns = [iqns] * len(addresses) target_portals = ['%s:%s' % (a, p) for a, p in zip(addresses, ports)] properties = {} properties['target_discovered'] = False properties['target_portal'] = target_portals[0] properties['target_iqn'] = iqns[0] properties['target_lun'] = lun_id properties['volume_id'] = volume['id'] if len(addresses) > 1: properties['target_portals'] = target_portals properties['target_iqns'] = iqns properties['target_luns'] = [lun_id] * len(addresses) auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret return { 'driver_volume_type': 'iscsi', 'data': properties, } def validate_qos_spec(qos_spec): """Check validity of Cinder qos spec for our backend.""" if qos_spec is None: return normalized_min_keys = [key.lower() for key in MIN_QOS_KEYS] normalized_max_keys = [key.lower() for key in MAX_QOS_KEYS] normalized_aqos_keys = [key.lower() for key in ADAPTIVE_QOS_KEYS] unrecognized_keys = [ k for k in qos_spec.keys() if k.lower() not in normalized_max_keys + normalized_min_keys + normalized_aqos_keys] if unrecognized_keys: msg = _('Unrecognized QOS keywords: "%s"') % unrecognized_keys raise exception.Invalid(msg) min_dict = {k: v for k, v in qos_spec.items() if k.lower() in normalized_min_keys} if len(min_dict) > 1: msg = _('Only one minimum limit can be set in a QoS spec.') raise exception.Invalid(msg) max_dict = {k: v for k, v in qos_spec.items() if k.lower() in normalized_max_keys} if len(max_dict) > 1: msg = _('Only one maximum limit can be set in a QoS spec.') raise exception.Invalid(msg) aqos_dict = {k: v for k, v in qos_spec.items() if k.lower() in normalized_aqos_keys} if aqos_dict and (min_dict or max_dict): msg = _('Adaptive QoS specs and non-adaptive QoS specs ' 'cannot be used together.') raise exception.Invalid(msg) def get_volume_type_from_volume(volume): """Provides volume type associated with volume.""" type_id = volume.get('volume_type_id') if type_id is None: return {} ctxt = context.get_admin_context() return volume_types.get_volume_type(ctxt, type_id) def _get_min_throughput_from_qos_spec(qos_spec, volume_size): """Returns the minimum QoS throughput. The QoS min specs are exclusive of one another and it accepts values in IOPS only. """ if 'miniops' in qos_spec: min_throughput = '%siops' % qos_spec['miniops'] elif 'miniopspergib' in qos_spec: min_throughput = '%siops' % str( int(qos_spec['miniopspergib']) * int(volume_size)) else: min_throughput = None return min_throughput def _get_max_throughput_from_qos_spec(qos_spec, volume_size): """Returns the maximum QoS throughput. The QoS max specs are exclusive of one another. """ if 'maxiops' in qos_spec: max_throughput = '%siops' % qos_spec['maxiops'] elif 'maxiopspergib' in qos_spec: max_throughput = '%siops' % str( int(qos_spec['maxiopspergib']) * int(volume_size)) elif 'maxbps' in qos_spec: max_throughput = '%sB/s' % qos_spec['maxbps'] elif 'maxbpspergib' in qos_spec: max_throughput = '%sB/s' % str( int(qos_spec['maxbpspergib']) * int(volume_size)) else: max_throughput = None return max_throughput def map_qos_spec(qos_spec, volume): """Map Cinder QOS spec to limit/throughput-value as used in client API.""" if qos_spec is None: return None spec = map_dict_to_lower(qos_spec) min_throughput = _get_min_throughput_from_qos_spec(spec, volume['size']) max_throughput = _get_max_throughput_from_qos_spec(spec, volume['size']) if min_throughput and max_throughput and max_throughput.endswith('B/s'): msg = _('Maximum limit should be in IOPS when minimum limit is ' 'specified.') raise exception.Invalid(msg) if min_throughput and max_throughput and max_throughput < min_throughput: msg = _('Maximum limit should be greater than or equal to the ' 'minimum limit.') raise exception.Invalid(msg) policy = dict(policy_name=get_qos_policy_group_name(volume)) if min_throughput: policy['min_throughput'] = min_throughput if max_throughput: policy['max_throughput'] = max_throughput return policy def map_aqos_spec(qos_spec, volume): """Map Cinder QOS spec to Adaptive QoS values.""" if qos_spec is None: return None qos_spec = map_dict_to_lower(qos_spec) spec = dict(policy_name=get_qos_policy_group_name(volume)) # Adaptive QoS specs if 'expectediopspergib' in qos_spec: spec['expected_iops'] = ( '%sIOPS/GB' % qos_spec['expectediopspergib']) if 'peakiopspergib' in qos_spec: spec['peak_iops'] = '%sIOPS/GB' % qos_spec['peakiopspergib'] if 'expectediopsallocation' in qos_spec: spec['expected_iops_allocation'] = qos_spec['expectediopsallocation'] if 'peakiopsallocation' in qos_spec: spec['peak_iops_allocation'] = qos_spec['peakiopsallocation'] if 'absoluteminiops' in qos_spec: spec['absolute_min_iops'] = '%sIOPS' % qos_spec['absoluteminiops'] if 'blocksize' in qos_spec: spec['block_size'] = qos_spec['blocksize'] if 'peak_iops' not in spec or 'expected_iops' not in spec: msg = _('Adaptive QoS requires the expected property and ' 'the peak property set together.') raise exception.Invalid(msg) if spec['peak_iops'] < spec['expected_iops']: msg = _('Adaptive maximum limit should be greater than or equal to ' 'the adaptive minimum limit.') raise exception.Invalid(msg) return spec def map_dict_to_lower(input_dict): """Return an equivalent to the input dictionary with lower-case keys.""" lower_case_dict = {} for key in input_dict: lower_case_dict[key.lower()] = input_dict[key] return lower_case_dict def get_qos_policy_group_name(volume): """Return the name of backend QOS policy group based on its volume id.""" if 'id' in volume: return OPENSTACK_PREFIX + volume.name_id return None def get_qos_policy_group_name_from_info(qos_policy_group_info): """Return the name of a QOS policy group given qos policy group info.""" if qos_policy_group_info is None: return None legacy = qos_policy_group_info.get('legacy') if legacy is not None: return legacy['policy_name'] spec = qos_policy_group_info.get('spec') if spec is not None: return spec['policy_name'] return None def get_pool_name_filter_regex(configuration): """Build the regex for filtering pools by name :param configuration: The volume driver configuration :raise InvalidConfigurationValue: if configured regex pattern is invalid :returns: A compiled regex for filtering pool names """ # If the configuration parameter is specified as an empty string # (interpreted as matching all pools), we replace it here with # (.+) to be explicit with CSV compatibility support implemented below. pool_patterns = configuration.netapp_pool_name_search_pattern or r'(.+)' # Strip whitespace from start/end and then 'or' all regex patterns pool_patterns = '|'.join(['^' + pool_pattern.strip('^$ \t') + '$' for pool_pattern in pool_patterns.split(',')]) try: return re.compile(pool_patterns) except re.error: raise exception.InvalidConfigurationValue( option='netapp_pool_name_search_pattern', value=configuration.netapp_pool_name_search_pattern) def get_valid_qos_policy_group_info(volume, extra_specs=None): """Given a volume, return information for QOS provisioning.""" info = dict(legacy=None, spec=None) try: volume_type = get_volume_type_from_volume(volume) except (KeyError, exception.NotFound): LOG.exception('Cannot get QoS spec for volume %s.', volume['id']) return info if volume_type is None: return info if extra_specs is None: extra_specs = volume_type.get('extra_specs', {}) info['legacy'] = get_legacy_qos_policy(extra_specs) info['spec'] = get_valid_backend_qos_spec_from_volume_type(volume, volume_type) msg = 'QoS policy group info for volume %(vol)s: %(info)s' LOG.debug(msg, {'vol': volume['name'], 'info': info}) check_for_invalid_qos_spec_combination(info, volume_type) return info def get_valid_backend_qos_spec_from_volume_type(volume, volume_type): """Given a volume type, return the associated Cinder QoS spec.""" spec_dict = get_backend_qos_spec_from_volume_type(volume_type) if spec_dict is None: return None validate_qos_spec(spec_dict) map_spec = (map_aqos_spec if is_qos_adaptive(spec_dict) else map_qos_spec) return map_spec(spec_dict, volume) def is_qos_adaptive(spec_dict): if not spec_dict: return False normalized_aqos_keys = [key.lower() for key in ADAPTIVE_QOS_KEYS] return all(key in normalized_aqos_keys for key in map_dict_to_lower(spec_dict).keys()) def is_qos_policy_group_spec_adaptive(policy): if not policy: return False spec = policy.get('spec') if not spec: return False return all(key in QOS_ADAPTIVE_POLICY_GROUP_SPEC_KEYS for key in map_dict_to_lower(spec).keys()) def get_backend_qos_spec_from_volume_type(volume_type): qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id is None: return None ctxt = context.get_admin_context() qos_spec = qos_specs.get_qos_specs(ctxt, qos_specs_id) if qos_spec is None: return None consumer = qos_spec['consumer'] # Front end QoS specs are handled by libvirt and we ignore them here. if consumer not in BACKEND_QOS_CONSUMERS: return None return qos_spec['specs'] def check_for_invalid_qos_spec_combination(info, volume_type): """Invalidate QOS spec if both legacy and non-legacy info is present.""" if info['legacy'] and info['spec']: msg = _('Conflicting QoS specifications in volume type ' '%s: when QoS spec is associated to volume ' 'type, legacy "netapp:qos_policy_group" is not allowed in ' 'the volume type extra specs.') % volume_type['id'] raise exception.Invalid(msg) def get_legacy_qos_policy(extra_specs): """Return legacy qos policy information if present in extra specs.""" external_policy_name = extra_specs.get('netapp:qos_policy_group') if external_policy_name is None: return None return dict(policy_name=external_policy_name) def get_export_host_junction_path(share): if '[' in share and ']' in share: try: # ipv6 host = re.search(r'\[(.*)\]', share).group(1) junction_path = share.split(':')[-1] except AttributeError: raise NetAppDriverException(_("Share '%s' is not in a valid " "format.") % share) else: # ipv4 path = share.split(':') if len(path) == 2: host = path[0] junction_path = path[1] else: raise NetAppDriverException(_("Share '%s' is not in a valid " "format.") % share) return host, junction_path def qos_min_feature_name(is_nfs, node_name): if node_name is None: node_name = '' if is_nfs: return 'QOS_MIN_NFS_' + node_name else: return 'QOS_MIN_BLOCK_' + node_name def is_multiattach_to_host(volume, connector): # With multi-attach enabled, a single volume can be attached to multiple # instances. If multiple instances are running on the same nova host, the # volume should remain attached to the nova host until it is detached # from the last instance on that host. if not volume.multiattach or not volume.volume_attachment: return False attachment = [ attach_info for attach_info in volume.volume_attachment if attach_info['attach_status'] == fields.VolumeAttachStatus.ATTACHED and attach_info['attached_host'] == connector.get('host') ] LOG.debug('is_multiattach_to_host: attachment %s.', attachment) return len(attachment) > 1 class hashabledict(dict): """A hashable dictionary that is comparable (i.e. in unit tests, etc.)""" def __hash__(self): return hash(tuple(sorted(self.items()))) class OpenStackInfo(object): """OS/distribution, release, and version. NetApp uses these fields as content for EMS log entry. """ PACKAGE_NAME = 'python-cinder' def __init__(self): self._version = 'unknown version' self._release = 'unknown release' self._vendor = 'unknown vendor' self._platform = 'unknown platform' def _update_version_from_version_string(self): try: self._version = version.version_info.version_string() except Exception: pass def _update_release_from_release_string(self): try: self._release = version.version_info.release_string() except Exception: pass def _update_platform(self): try: self._platform = platform.platform() except Exception: pass @staticmethod def _get_version_info_version(): return version.version_info.version @staticmethod def _get_version_info_release(): return version.version_info.release def _update_info_from_version_info(self): try: ver = self._get_version_info_version() if ver: self._version = ver except Exception: pass try: rel = self._get_version_info_release() if rel: self._release = rel except Exception: pass # RDO, RHEL-OSP, Mirantis on Redhat, SUSE def _update_info_from_rpm(self): LOG.debug('Trying rpm command.') try: out, err = putils.execute("rpm", "-q", "--queryformat", "'%{version}\t%{release}\t%{vendor}'", self.PACKAGE_NAME) if not out: LOG.info('No rpm info found for %(pkg)s package.', { 'pkg': self.PACKAGE_NAME}) return False parts = out.split() self._version = parts[0] self._release = parts[1] self._vendor = ' '.join(parts[2::]) return True except Exception as e: LOG.info('Could not run rpm command: %(msg)s.', {'msg': e}) return False # ubuntu, mirantis on ubuntu def _update_info_from_dpkg(self): LOG.debug('Trying dpkg-query command.') try: _vendor = None out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'", self.PACKAGE_NAME) if not out: LOG.info('No dpkg-query info found for %(pkg)s package.', {'pkg': self.PACKAGE_NAME}) return False # debian format: [epoch:]upstream_version[-debian_revision] deb_version = out # in case epoch or revision is missing, copy entire string _release = deb_version if ':' in deb_version: deb_epoch, upstream_version = deb_version.split(':') _release = upstream_version if '-' in deb_version: deb_revision = deb_version.split('-')[1] _vendor = deb_revision self._release = _release if _vendor: self._vendor = _vendor return True except Exception as e: LOG.info('Could not run dpkg-query command: %(msg)s.', { 'msg': e}) return False def _update_openstack_info(self): self._update_version_from_version_string() self._update_release_from_release_string() self._update_platform() # some distributions override with more meaningful information self._update_info_from_version_info() # see if we have still more targeted info from rpm or apt found_package = self._update_info_from_rpm() if not found_package: self._update_info_from_dpkg() def info(self): self._update_openstack_info() return '%(version)s|%(release)s|%(vendor)s|%(platform)s' % { 'version': self._version, 'release': self._release, 'vendor': self._vendor, 'platform': self._platform} class Features(object): def __init__(self): self.defined_features = set() def add_feature(self, name, supported=True, min_version=None): if not isinstance(supported, bool): raise TypeError("Feature value must be a bool type.") self.defined_features.add(name) setattr(self, name, FeatureState(supported, min_version)) def __getattr__(self, name): # NOTE(cknight): Needed to keep pylint happy. raise AttributeError class FeatureState(object): def __init__(self, supported=True, minimum_version=None): """Represents the current state of enablement for a Feature :param supported: True if supported, false otherwise :param minimum_version: The minimum version that this feature is supported at """ self.supported = supported self.minimum_version = minimum_version def __nonzero__(self): """Allow a FeatureState object to be tested for truth value :returns: True if the feature is supported, otherwise False """ return self.supported def __bool__(self): """py3 Allow a FeatureState object to be tested for truth value :returns: True if the feature is supported, otherwise False """ return self.supported class BitSet(object): def __init__(self, value=0): self._value = value def set(self, bit): self._value |= 1 << bit return self def unset(self, bit): self._value &= ~(1 << bit) return self def is_set(self, bit): return self._value & 1 << bit def __and__(self, other): self._value &= other return self def __or__(self, other): self._value |= other return self def __invert__(self): self._value = ~self._value return self def __xor__(self, other): self._value ^= other return self def __lshift__(self, other): self._value <<= other return self def __rshift__(self, other): self._value >>= other return self def __int__(self): return self._value def __str__(self): return bin(self._value) def __repr__(self): return str(self._value) def __eq__(self, other): return (isinstance(other, self.__class__) and self._value == other._value) or self._value == int(other) def __ne__(self, other): return not self.__eq__(other) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3911211 cinder-27.0.0/cinder/volume/drivers/nexenta/0000775000175000017500000000000000000000000020762 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nexenta/__init__.py0000664000175000017500000000000000000000000023061 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nexenta/iscsi.py0000664000175000017500000007043700000000000022461 0ustar00zuulzuul00000000000000# Copyright 2016 Nexenta Systems, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.nexenta import jsonrpc from cinder.volume.drivers.nexenta import options from cinder.volume.drivers.nexenta import utils VERSION = '1.3.1' LOG = logging.getLogger(__name__) @interface.volumedriver class NexentaISCSIDriver(driver.ISCSIDriver): """Executes volume driver commands on Nexenta Appliance. Version history: .. code-block:: none 1.0.0 - Initial driver version. 1.0.1 - Fixed bug #1236626: catch "does not exist" exception of lu_exists. 1.1.0 - Changed class name to NexentaISCSIDriver. 1.1.1 - Ignore "does not exist" exception of nms.snapshot.destroy. 1.1.2 - Optimized create_cloned_volume, replaced zfs send recv with zfs clone. 1.1.3 - Extended volume stats provided by _update_volume_stats method. 1.2.0 - Added volume migration with storage assist method. 1.2.1 - Fixed bug #1263258: now migrate_volume update provider_location of migrated volume; after migrating volume migrate_volume destroy snapshot on migration destination. 1.3.0 - Added retype method. 1.3.0.1 - Target creation refactor. 1.3.1 - Added ZFS cleanup. """ VERSION = VERSION # ThirdPartySystems wiki page CI_WIKI_NAME = "Nexenta_CI" def __init__(self, *args, **kwargs): super(NexentaISCSIDriver, self).__init__(*args, **kwargs) self.nms = None self.targets = {} if self.configuration: self.configuration.append_config_values( options.NEXENTA_CONNECTION_OPTS) self.configuration.append_config_values( options.NEXENTA_ISCSI_OPTS) self.configuration.append_config_values( options.NEXENTA_DATASET_OPTS) self.configuration.append_config_values( options.NEXENTA_RRMGR_OPTS) self.nms_protocol = self.configuration.nexenta_rest_protocol self.nms_host = self.configuration.nexenta_host self.nms_port = self.configuration.nexenta_rest_port self.nms_user = self.configuration.nexenta_user self.nms_password = self.configuration.nexenta_password self.volume = self.configuration.nexenta_volume self.volume_compression = ( self.configuration.nexenta_dataset_compression) self.volume_deduplication = self.configuration.nexenta_dataset_dedup self.volume_description = ( self.configuration.nexenta_dataset_description) self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections self.iscsi_target_portal_port = ( self.configuration.nexenta_iscsi_target_portal_port) self._needless_objects = set() @staticmethod def get_driver_options(): return (options.NEXENTA_CONNECTION_OPTS + options.NEXENTA_ISCSI_OPTS + options.NEXENTA_DATASET_OPTS + options.NEXENTA_RRMGR_OPTS) @property def backend_name(self): backend_name = None if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = self.__class__.__name__ return backend_name def do_setup(self, context): if self.nms_protocol == 'auto': protocol, auto = 'http', True else: protocol, auto = self.nms_protocol, False self.nms = jsonrpc.NexentaJSONProxy( protocol, self.nms_host, self.nms_port, '/rest/nms', self.nms_user, self.nms_password, auto=auto) def check_for_setup_error(self): """Verify that the volume for our zvols exists. :raise: :py:exc:`LookupError` """ if not self.nms.volume.object_exists(self.volume): raise LookupError(_("Volume %s does not exist in Nexenta SA") % self.volume) def _get_zvol_name(self, volume_name): """Return zvol name that corresponds given volume name.""" return '%s/%s' % (self.volume, volume_name) def _create_target(self, target_idx): target_name = '%s%s-%i' % ( self.configuration.nexenta_target_prefix, self.nms_host, target_idx ) target_group_name = self._get_target_group_name(target_name) if not self._target_exists(target_name): try: self.nms.iscsitarget.create_target({ 'target_name': target_name}) except utils.NexentaException as exc: if 'already' in exc.args[0]: LOG.info('Ignored target creation error "%s" while ' 'ensuring export.', exc) else: raise if not self._target_group_exists(target_group_name): try: self.nms.stmf.create_targetgroup(target_group_name) except utils.NexentaException as exc: if ('already' in exc.args[0]): LOG.info('Ignored target group creation error "%s" ' 'while ensuring export.', exc) else: raise if not self._target_member_in_target_group(target_group_name, target_name): try: self.nms.stmf.add_targetgroup_member(target_group_name, target_name) except utils.NexentaException as exc: if ('already' in exc.args[0]): LOG.info('Ignored target group member addition error ' '"%s" while ensuring export.', exc) else: raise self.targets[target_name] = [] return target_name def _get_target_name(self, volume): """Return iSCSI target name with least LUs.""" provider_location = volume.get('provider_location') target_names = self.targets.keys() if provider_location: target_name = provider_location.split(',1 ')[1].split(' ')[0] if not self.targets.get(target_name): self.targets[target_name] = [] if not volume['name'] in self.targets[target_name]: self.targets[target_name].append(volume['name']) elif not target_names: # create first target and target group target_name = self._create_target(0) self.targets[target_name].append(volume['name']) else: target_name = target_names[0] for target in target_names: if len(self.targets[target]) < len(self.targets[target_name]): target_name = target if len(self.targets[target_name]) >= 20: # create new target and target group target_name = self._create_target(len(target_names)) if volume['name'] not in self.targets[target_name]: self.targets[target_name].append(volume['name']) return target_name def _get_target_group_name(self, target_name): """Return Nexenta iSCSI target group name for volume.""" return target_name.replace( self.configuration.nexenta_target_prefix, self.configuration.nexenta_target_group_prefix ) @staticmethod def _get_clone_snapshot_name(volume): """Return name for snapshot that will be used to clone the volume.""" return 'cinder-clone-snapshot-%(id)s' % volume @staticmethod def _is_clone_snapshot_name(snapshot): """Check if snapshot is created for cloning.""" name = snapshot.split('@')[-1] return name.startswith('cinder-clone-snapshot-') def create_volume(self, volume): """Create a zvol on appliance. :param volume: volume reference :return: model update dict for volume reference """ self.nms.zvol.create( self._get_zvol_name(volume['name']), '%sG' % (volume['size'],), str(self.configuration.nexenta_blocksize), self.configuration.nexenta_sparse) def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: volume reference :param new_size: volume new size in GB """ LOG.info('Extending volume: %(id)s New size: %(size)s GB', {'id': volume['id'], 'size': new_size}) self.nms.zvol.set_child_prop(self._get_zvol_name(volume['name']), 'volsize', '%sG' % new_size) def delete_volume(self, volume): """Destroy a zvol on appliance. :param volume: volume reference """ volume_name = self._get_zvol_name(volume['name']) try: props = self.nms.zvol.get_child_props(volume_name, 'origin') or {} self.nms.zvol.destroy(volume_name, '') except utils.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info('Volume %s does not exist, it ' 'seems it was already deleted.', volume_name) return if 'zvol has children' in exc.args[0]: self._mark_as_garbage(volume_name) LOG.info('Volume %s will be deleted later.', volume_name) return raise origin = props.get('origin') self._collect_garbage(origin) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. :param volume: new volume reference :param src_vref: source volume reference """ snapshot = {'volume_name': src_vref['name'], 'name': self._get_clone_snapshot_name(volume), 'volume_size': src_vref['size']} LOG.debug('Creating temp snapshot of the original volume: ' '%(volume_name)s@%(name)s', snapshot) # We don't delete this snapshot, because this snapshot will be origin # of new volume. This snapshot will be automatically promoted by NMS # when user will delete origin volume. But when cloned volume deleted # we check its origin property and delete source snapshot if needed. self.create_snapshot(snapshot) try: self.create_volume_from_snapshot(volume, snapshot) self._mark_as_garbage('@'.join( (self._get_zvol_name(src_vref['name']), snapshot['name']))) except utils.NexentaException: with excutils.save_and_reraise_exception(): LOG.exception( 'Volume creation failed, deleting created snapshot ' '%(volume_name)s@%(name)s', snapshot) try: self.delete_snapshot(snapshot) except (utils.NexentaException, exception.SnapshotIsBusy): LOG.warning('Failed to delete zfs snapshot ' '%(volume_name)s@%(name)s', snapshot) raise def _get_zfs_send_recv_cmd(self, src, dst): """Returns rrmgr command for source and destination.""" return utils.get_rrmgr_cmd(src, dst, compression=self.rrmgr_compression, tcp_buf_size=self.rrmgr_tcp_buf_size, connections=self.rrmgr_connections) @staticmethod def get_nms_for_url(url): """Returns initialized nms object for url.""" auto, scheme, user, password, host, port, path = ( utils.parse_nms_url(url)) return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user, password, auto=auto) def migrate_volume(self, ctxt, volume, host): """Migrate if volume and host are managed by Nexenta appliance. :param ctxt: context :param volume: a dictionary describing the volume to migrate :param host: a dictionary describing the host to migrate to """ LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s', {'id': volume['id'], 'host': host}) false_ret = (False, None) if volume['status'] not in ('available', 'retyping'): return false_ret if 'capabilities' not in host: return false_ret capabilities = host['capabilities'] if ('location_info' not in capabilities or 'iscsi_target_portal_port' not in capabilities or 'nms_url' not in capabilities): return false_ret nms_url = capabilities['nms_url'] dst_parts = capabilities['location_info'].split(':') if (capabilities.get('vendor_name') != 'Nexenta' or dst_parts[0] != self.__class__.__name__ or capabilities['free_capacity_gb'] < volume['size']): return false_ret dst_host, dst_volume = dst_parts[1:] ssh_bound = False ssh_bindings = self.nms.appliance.ssh_list_bindings() for bind in ssh_bindings: if dst_host.startswith(ssh_bindings[bind][3]): ssh_bound = True break if not ssh_bound: LOG.warning("Remote NexentaStor appliance at %s should be " "SSH-bound.", dst_host) # Create temporary snapshot of volume on NexentaStor Appliance. snapshot = { 'volume_name': volume['name'], 'name': utils.get_migrate_snapshot_name(volume) } self.create_snapshot(snapshot) src = '%(volume)s/%(zvol)s@%(snapshot)s' % { 'volume': self.volume, 'zvol': volume['name'], 'snapshot': snapshot['name'] } dst = ':'.join([dst_host, dst_volume]) try: self.nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst)) except utils.NexentaException as exc: LOG.warning("Cannot send source snapshot %(src)s to " "destination %(dst)s. Reason: %(exc)s", {'src': src, 'dst': dst, 'exc': exc}) return false_ret finally: try: self.delete_snapshot(snapshot) except utils.NexentaException as exc: LOG.warning("Cannot delete temporary source snapshot " "%(src)s on NexentaStor Appliance: %(exc)s", {'src': src, 'exc': exc}) try: self.delete_volume(volume) except utils.NexentaException as exc: LOG.warning("Cannot delete source volume %(volume)s on " "NexentaStor Appliance: %(exc)s", {'volume': volume['name'], 'exc': exc}) dst_nms = self.get_nms_for_url(nms_url) dst_snapshot = '%s/%s@%s' % (dst_volume, volume['name'], snapshot['name']) try: dst_nms.snapshot.destroy(dst_snapshot, '') except utils.NexentaException as exc: LOG.warning("Cannot delete temporary destination snapshot " "%(dst)s on NexentaStor Appliance: %(exc)s", {'dst': dst_snapshot, 'exc': exc}) return True, None def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type. :param context: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('Retype volume request %(vol)s to be %(type)s ' '(host: %(host)s), diff %(diff)s.', {'vol': volume['name'], 'type': new_type, 'host': host, 'diff': diff}) options = dict( compression='compression', dedup='dedup', description='nms:description' ) retyped = False migrated = False capabilities = host['capabilities'] src_backend = self.__class__.__name__ dst_backend = capabilities['location_info'].split(':')[0] if src_backend != dst_backend: LOG.warning('Cannot retype from %(src_backend)s to ' '%(dst_backend)s.', {'src_backend': src_backend, 'dst_backend': dst_backend}) return False hosts = (volume['host'], host['host']) old, new = hosts if old != new: migrated, provider_location = self.migrate_volume( context, volume, host) if not migrated: nms = self.nms else: nms_url = capabilities['nms_url'] nms = self.get_nms_for_url(nms_url) zvol = '%s/%s' % ( capabilities['location_info'].split(':')[-1], volume['name']) for opt in options: old, new = diff.get('extra_specs').get(opt, (False, False)) if old != new: LOG.debug('Changing %(opt)s from %(old)s to %(new)s.', {'opt': opt, 'old': old, 'new': new}) try: nms.zvol.set_child_prop( zvol, options[opt], new) retyped = True except utils.NexentaException: LOG.error('Error trying to change %(opt)s' ' from %(old)s to %(new)s', {'opt': opt, 'old': old, 'new': new}) return False, None return retyped or migrated, None def create_snapshot(self, snapshot): """Create snapshot of existing zvol on appliance. :param snapshot: snapshot reference """ self.nms.zvol.create_snapshot( self._get_zvol_name(snapshot['volume_name']), snapshot['name'], '') def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other's snapshot on appliance. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ self.nms.zvol.clone( '%s@%s' % (self._get_zvol_name(snapshot['volume_name']), snapshot['name']), self._get_zvol_name(volume['name'])) if (('size' in volume) and ( volume['size'] > snapshot['volume_size'])): self.extend_volume(volume, volume['size']) def delete_snapshot(self, snapshot): """Delete volume's snapshot on appliance. :param snapshot: snapshot reference """ volume_name = self._get_zvol_name(snapshot['volume_name']) snapshot_name = '%s@%s' % (volume_name, snapshot['name']) try: self.nms.snapshot.destroy(snapshot_name, '') except utils.NexentaException as exc: if "does not exist" in exc.args[0]: LOG.info('Snapshot %s does not exist, it seems it was ' 'already deleted.', snapshot_name) return elif "snapshot has dependent clones" in exc.args[0]: self._mark_as_garbage(snapshot_name) LOG.info('Snapshot %s has dependent clones, will be ' 'deleted later.', snapshot_name) return raise self._collect_garbage(volume_name) def local_path(self, volume): """Return local path to existing local volume. We never have local volumes, so it raises NotImplementedError. :raise: :py:exc:`NotImplementedError` """ raise NotImplementedError def _target_exists(self, target): """Check if iSCSI target exist. :param target: target name :return: True if target exist, else False """ targets = self.nms.stmf.list_targets() if not targets: return False return (target in self.nms.stmf.list_targets()) def _target_group_exists(self, target_group): """Check if target group exist. :param target_group: target group :return: True if target group exist, else False """ groups = self.nms.stmf.list_targetgroups() if not groups: return False return target_group in groups def _target_member_in_target_group(self, target_group, target_member): """Check if target member in target group. :param target_group: target group :param target_member: target member :return: True if target member in target group, else False :raises NexentaException: if target group doesn't exist """ members = self.nms.stmf.list_targetgroup_members(target_group) if not members: return False return target_member in members def _lu_exists(self, zvol_name): """Check if LU exists on appliance. :param zvol_name: Zvol name :raises NexentaException: if zvol not exists :return: True if LU exists, else False """ try: return bool(self.nms.scsidisk.lu_exists(zvol_name)) except utils.NexentaException as exc: if 'does not exist' not in exc.args[0]: raise return False def _is_lu_shared(self, zvol_name): """Check if LU exists on appliance and shared. :param zvol_name: Zvol name :raises NexentaException: if Zvol not exist :return: True if LU exists and shared, else False """ try: shared = self.nms.scsidisk.lu_shared(zvol_name) > 0 except utils.NexentaException as exc: if 'does not exist for zvol' not in exc.args[0]: raise # Zvol does not exists shared = False # LU does not exist return shared def create_export(self, _ctx, volume, connector): """Create new export for zvol. :param volume: reference of volume to be exported :return: iscsiadm-formatted provider location string """ model_update = self._do_export(_ctx, volume) return model_update def ensure_export(self, _ctx, volume): self._do_export(_ctx, volume) def _do_export(self, _ctx, volume): """Recreate parts of export if necessary. :param volume: reference of volume to be exported """ zvol_name = self._get_zvol_name(volume['name']) target_name = self._get_target_name(volume) target_group_name = self._get_target_group_name(target_name) entry = None if not self._lu_exists(zvol_name): try: entry = self.nms.scsidisk.create_lu(zvol_name, {}) except utils.NexentaException as exc: if 'in use' not in exc.args[0]: raise LOG.info('Ignored LU creation error "%s" while ensuring ' 'export.', exc) if not self._is_lu_shared(zvol_name): try: entry = self.nms.scsidisk.add_lun_mapping_entry(zvol_name, { 'target_group': target_group_name}) except utils.NexentaException as exc: if 'view entry exists' not in exc.args[0]: raise LOG.info('Ignored LUN mapping entry addition error "%s" ' 'while ensuring export.', exc) model_update = {} if entry: provider_location = '%(host)s:%(port)s,1 %(name)s %(lun)s' % { 'host': self.nms_host, 'port': self.configuration.nexenta_iscsi_target_portal_port, 'name': target_name, 'lun': entry['lun'], } model_update = {'provider_location': provider_location} return model_update def remove_export(self, _ctx, volume): """Destroy all resources created to export zvol. :param volume: reference of volume to be unexported """ target_name = self._get_target_name(volume) self.targets[target_name].remove(volume['name']) zvol_name = self._get_zvol_name(volume['name']) self.nms.scsidisk.delete_lu(zvol_name) def _update_volume_stats(self): """Retrieve stats info for NexentaStor appliance.""" LOG.debug('Updating volume stats') stats = self.nms.volume.get_child_props( self.configuration.nexenta_volume, 'health|size|used|available') total_amount = utils.str2gib_size(stats['size']) free_amount = utils.str2gib_size(stats['available']) location_info = '%(driver)s:%(host)s:%(volume)s' % { 'driver': self.__class__.__name__, 'host': self.nms_host, 'volume': self.volume } self._stats = { 'vendor_name': 'Nexenta', 'dedup': self.volume_deduplication, 'compression': self.volume_compression, 'description': self.volume_description, 'driver_version': self.VERSION, 'storage_protocol': constants.ISCSI, 'total_capacity_gb': total_amount, 'free_capacity_gb': free_amount, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'volume_backend_name': self.backend_name, 'location_info': location_info, 'iscsi_target_portal_port': self.iscsi_target_portal_port, 'nms_url': self.nms.url } def _collect_garbage(self, zfs_object): """Destroys ZFS parent objects Recursively destroys ZFS parent volumes and snapshots if they are marked as garbage :param zfs_object: full path to a volume or a snapshot """ if zfs_object and zfs_object in self._needless_objects: sp = zfs_object.split('/') path = '/'.join(sp[:-1]) name = sp[-1] if '@' in name: # it's a snapshot: volume, snap = name.split('@') parent = '/'.join((path, volume)) try: self.nms.snapshot.destroy(zfs_object, '') except utils.NexentaException as exc: LOG.debug('Error occurred while trying to delete a ' 'snapshot: %s', exc) return else: try: props = self.nms.zvol.get_child_props( zfs_object, 'origin') or {} except utils.NexentaException: props = {} parent = (props['origin'] if 'origin' in props and props['origin'] else '') try: self.nms.zvol.destroy(zfs_object, '') except utils.NexentaException as exc: LOG.debug('Error occurred while trying to delete a ' 'volume: %s', exc) return self._needless_objects.remove(zfs_object) self._collect_garbage(parent) def _mark_as_garbage(self, zfs_object): """Puts ZFS object into list for further removal :param zfs_object: full path to a volume or a snapshot """ self._needless_objects.add(zfs_object) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nexenta/jsonrpc.py0000664000175000017500000000546200000000000023021 0ustar00zuulzuul00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_serialization import jsonutils import requests from cinder.utils import retry from cinder.volume.drivers.nexenta import utils LOG = logging.getLogger(__name__) TIMEOUT = 60 class NexentaJSONProxy(object): retry_exc_tuple = (requests.exceptions.ConnectionError,) def __init__(self, scheme, host, port, path, user, password, auto=False, obj=None, method=None, session=None): if session: self.session = session else: self.session = requests.Session() self.session.auth = (user, password) self.session.headers.update({'Content-Type': 'application/json'}) self.scheme = scheme.lower() self.host = host self.port = port self.path = path self.user = user self.password = password self.auto = auto self.obj = obj self.method = method def __getattr__(self, name): if not self.obj: obj, method = name, None elif not self.method: obj, method = self.obj, name else: obj, method = '%s.%s' % (self.obj, self.method), name return NexentaJSONProxy(self.scheme, self.host, self.port, self.path, self.user, self.password, self.auto, obj, method, self.session) @property def url(self): return '%s://%s:%s%s' % (self.scheme, self.host, self.port, self.path) def __hash__(self): return self.url.__hash__() def __repr__(self): return 'NMS proxy: %s' % self.url @retry(retry_exc_tuple, retries=6) def __call__(self, *args): data = jsonutils.dumps({ 'object': self.obj, 'method': self.method, 'params': args }) LOG.debug('Sending JSON data: %s', data) r = self.session.post(self.url, data=data, timeout=TIMEOUT) response = r.json() LOG.debug('Got response: %s', response) if response.get('error') is not None: message = response['error'].get('message', '') raise utils.NexentaException(message) return response.get('result') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nexenta/nfs.py0000664000175000017500000010251000000000000022121 0ustar00zuulzuul00000000000000# Copyright 2016 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import os import re from eventlet import greenthread from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import units from cinder.common import constants from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import interface import cinder.privsep.fs from cinder.volume.drivers.nexenta import jsonrpc from cinder.volume.drivers.nexenta import options from cinder.volume.drivers.nexenta import utils from cinder.volume.drivers import nfs VERSION = '1.3.1' LOG = logging.getLogger(__name__) @interface.volumedriver class NexentaNfsDriver(nfs.NfsDriver): """Executes volume driver commands on Nexenta Appliance. Version history: .. code-block:: none 1.0.0 - Initial driver version. 1.1.0 - Auto sharing for enclosing folder. 1.1.1 - Added caching for NexentaStor appliance 'volroot' value. 1.1.2 - Ignore "folder does not exist" error in delete_volume and delete_snapshot method. 1.1.3 - Redefined volume_backend_name attribute inherited from RemoteFsDriver. 1.2.0 - Added migrate and retype methods. 1.3.0 - Extend volume method. 1.3.1 - Cache capacity info and check shared folders on setup. """ driver_prefix = 'nexenta' volume_backend_name = 'NexentaNfsDriver' VERSION = VERSION VOLUME_FILE_NAME = 'volume' # ThirdPartySystems wiki page CI_WIKI_NAME = "Nexenta_CI" def __init__(self, *args, **kwargs): super(NexentaNfsDriver, self).__init__(*args, **kwargs) if self.configuration: self.configuration.append_config_values( options.NEXENTA_CONNECTION_OPTS) self.configuration.append_config_values( options.NEXENTA_NFS_OPTS) self.configuration.append_config_values( options.NEXENTA_DATASET_OPTS) self.configuration.append_config_values( options.NEXENTA_RRMGR_OPTS) self.nms_cache_volroot = self.configuration.nexenta_nms_cache_volroot self.rrmgr_compression = self.configuration.nexenta_rrmgr_compression self.rrmgr_tcp_buf_size = self.configuration.nexenta_rrmgr_tcp_buf_size self.rrmgr_connections = self.configuration.nexenta_rrmgr_connections self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base self.volume_compression = ( self.configuration.nexenta_dataset_compression) self.volume_deduplication = self.configuration.nexenta_dataset_dedup self.volume_description = ( self.configuration.nexenta_dataset_description) self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes self._nms2volroot = {} self.share2nms = {} self.nfs_versions = {} self.shares_with_capacities = {} @staticmethod def get_driver_options(): return ( options.NEXENTA_CONNECTION_OPTS + options.NEXENTA_NFS_OPTS + options.NEXENTA_DATASET_OPTS + options.NEXENTA_RRMGR_OPTS ) @property def backend_name(self): backend_name = None if self.configuration: backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = self.__class__.__name__ return backend_name def do_setup(self, context): shares_config = getattr(self.configuration, self.driver_prefix + '_shares_config') if shares_config: self.configuration.nfs_shares_config = shares_config super(NexentaNfsDriver, self).do_setup(context) self._load_shares_config(shares_config) self._mount_subfolders() def check_for_setup_error(self): """Verify that the volume for our folder exists. :raise: :py:exc:`LookupError` """ if self.share2nms: for nfs_share in self.share2nms: nms = self.share2nms[nfs_share] volume_name, dataset = self._get_share_datasets(nfs_share) if not nms.volume.object_exists(volume_name): raise LookupError(_("Volume %s does not exist in Nexenta " "Store appliance"), volume_name) folder = '%s/%s' % (volume_name, dataset) if not nms.folder.object_exists(folder): raise LookupError(_("Folder %s does not exist in Nexenta " "Store appliance"), folder) if (folder not in nms.netstorsvc.get_shared_folders( 'svc:/network/nfs/server:default', '')): self._share_folder(nms, volume_name, dataset) self._get_capacity_info(nfs_share) def migrate_volume(self, ctxt, volume, host): """Migrate if volume and host are managed by Nexenta appliance. :param ctxt: context :param volume: a dictionary describing the volume to migrate :param host: a dictionary describing the host to migrate to """ LOG.debug('Enter: migrate_volume: id=%(id)s, host=%(host)s', {'id': volume['id'], 'host': host}) false_ret = (False, None) if volume['status'] not in ('available', 'retyping'): LOG.warning("Volume status must be 'available' or 'retyping'." " Current volume status: %s", volume['status']) return false_ret if 'capabilities' not in host: LOG.warning("Unsupported host. No capabilities found") return false_ret capabilities = host['capabilities'] ns_shares = capabilities['ns_shares'] dst_parts = capabilities['location_info'].split(':') dst_host, dst_volume = dst_parts[1:] if (capabilities.get('vendor_name') != 'Nexenta' or dst_parts[0] != self.__class__.__name__ or capabilities['free_capacity_gb'] < volume['size']): return false_ret nms = self.share2nms[volume['provider_location']] ssh_bindings = nms.appliance.ssh_list_bindings() shares = [] for bind in ssh_bindings: for share in ns_shares: if (share.startswith(ssh_bindings[bind][3]) and ns_shares[share] >= volume['size']): shares.append(share) if len(shares) == 0: LOG.warning("Remote NexentaStor appliance at %s should be " "SSH-bound.", share) return false_ret share = sorted(shares, key=ns_shares.get, reverse=True)[0] snapshot = { 'volume_name': volume['name'], 'volume_id': volume['id'], 'name': utils.get_migrate_snapshot_name(volume) } self.create_snapshot(snapshot) location = volume['provider_location'] src = '%(share)s/%(volume)s@%(snapshot)s' % { 'share': location.split(':')[1].split('volumes/')[1], 'volume': volume['name'], 'snapshot': snapshot['name'] } dst = ':'.join([dst_host, dst_volume.split('/volumes/')[1]]) try: nms.appliance.execute(self._get_zfs_send_recv_cmd(src, dst)) except utils.NexentaException as exc: LOG.warning("Cannot send source snapshot %(src)s to " "destination %(dst)s. Reason: %(exc)s", {'src': src, 'dst': dst, 'exc': exc}) return false_ret finally: try: self.delete_snapshot(snapshot) except utils.NexentaException as exc: LOG.warning("Cannot delete temporary source snapshot " "%(src)s on NexentaStor Appliance: %(exc)s", {'src': src, 'exc': exc}) try: self.delete_volume(volume) except utils.NexentaException as exc: LOG.warning("Cannot delete source volume %(volume)s on " "NexentaStor Appliance: %(exc)s", {'volume': volume['name'], 'exc': exc}) dst_nms = self._get_nms_for_url(capabilities['nms_url']) dst_snapshot = '%s/%s@%s' % (dst_volume.split('volumes/')[1], volume['name'], snapshot['name']) try: dst_nms.snapshot.destroy(dst_snapshot, '') except utils.NexentaException as exc: LOG.warning("Cannot delete temporary destination snapshot " "%(dst)s on NexentaStor Appliance: %(exc)s", {'dst': dst_snapshot, 'exc': exc}) return True, {'provider_location': share} def _get_zfs_send_recv_cmd(self, src, dst): """Returns rrmgr command for source and destination.""" return utils.get_rrmgr_cmd(src, dst, compression=self.rrmgr_compression, tcp_buf_size=self.rrmgr_tcp_buf_size, connections=self.rrmgr_connections) def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. :param volume: volume reference :param connector: connector reference """ export = '%s/%s' % (volume['provider_location'], volume['name']) data = {'export': export, 'name': 'volume'} if volume['provider_location'] in self.shares: data['options'] = self.shares[volume['provider_location']] return { 'driver_volume_type': self.driver_volume_type, 'data': data } def retype(self, context, volume, new_type, diff, host): """Convert the volume to be of the new type. :param context: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. """ LOG.debug('Retype volume request %(vol)s to be %(type)s ' '(host: %(host)s), diff %(diff)s.', {'vol': volume['name'], 'type': new_type, 'host': host, 'diff': diff}) options = dict( compression='compression', dedup='dedup', description='nms:description' ) retyped = False migrated = False model_update = None src_backend = self.__class__.__name__ dst_backend = host['capabilities']['location_info'].split(':')[0] if src_backend != dst_backend: LOG.warning('Cannot retype from %(src_backend)s to ' '%(dst_backend)s.', {'src_backend': src_backend, 'dst_backend': dst_backend}) return False hosts = (volume['host'], host['host']) old, new = hosts if old != new: migrated, provider_location = self.migrate_volume( context, volume, host) if not migrated: provider_location = volume['provider_location'] nms = self.share2nms[provider_location] else: nms_url = host['capabilities']['nms_url'] nms = self._get_nms_for_url(nms_url) model_update = provider_location provider_location = provider_location['provider_location'] share = provider_location.split(':')[1].split('volumes/')[1] folder = '%(share)s/%(volume)s' % { 'share': share, 'volume': volume['name'] } for opt in options: old, new = diff.get('extra_specs').get(opt, (False, False)) if old != new: LOG.debug('Changing %(opt)s from %(old)s to %(new)s.', {'opt': opt, 'old': old, 'new': new}) try: nms.folder.set_child_prop( folder, options[opt], new) retyped = True except utils.NexentaException: LOG.error('Error trying to change %(opt)s' ' from %(old)s to %(new)s', {'opt': opt, 'old': old, 'new': new}) return False, None return retyped or migrated, model_update def _do_create_volume(self, volume): nfs_share = volume['provider_location'] nms = self.share2nms[nfs_share] vol, dataset = self._get_share_datasets(nfs_share) folder = '%s/%s' % (dataset, volume['name']) LOG.debug('Creating folder on Nexenta Store %s', folder) nms.folder.create_with_props( vol, folder, {'compression': self.configuration.nexenta_dataset_compression} ) volume_path = self.remote_path(volume) volume_size = volume['size'] try: self._share_folder(nms, vol, folder) if getattr(self.configuration, self.driver_prefix + '_sparsed_volumes'): self._create_sparsed_file(nms, volume_path, volume_size) else: folder_path = '%s/%s' % (vol, folder) compression = nms.folder.get_child_prop( folder_path, 'compression') if compression != 'off': # Disable compression, because otherwise will not use space # on disk. nms.folder.set_child_prop( folder_path, 'compression', 'off') try: self._create_regular_file(nms, volume_path, volume_size) finally: if compression != 'off': # Backup default compression value if it was changed. nms.folder.set_child_prop( folder_path, 'compression', compression) self._set_rw_permissions_for_all(nms, volume_path) if self._get_nfs_server_version(nfs_share) < 4: sub_share, mnt_path = self._get_subshare_mount_point(nfs_share, volume) self._ensure_share_mounted(sub_share, mnt_path) self._get_capacity_info(nfs_share) except utils.NexentaException: try: nms.folder.destroy('%s/%s' % (vol, folder)) except utils.NexentaException: LOG.warning("Cannot destroy created folder: " "%(vol)s/%(folder)s", {'vol': vol, 'folder': folder}) raise def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other's snapshot on appliance. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ self._ensure_shares_mounted() snapshot_vol = self._get_snapshot_volume(snapshot) nfs_share = snapshot_vol['provider_location'] volume['provider_location'] = nfs_share nms = self.share2nms[nfs_share] vol, dataset = self._get_share_datasets(nfs_share) snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'], snapshot['name']) folder = '%s/%s' % (dataset, volume['name']) nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder)) try: self._share_folder(nms, vol, folder) except utils.NexentaException: try: nms.folder.destroy('%s/%s' % (vol, folder), '') except utils.NexentaException: LOG.warning("Cannot destroy cloned folder: " "%(vol)s/%(folder)s", {'vol': vol, 'folder': folder}) raise if self._get_nfs_server_version(nfs_share) < 4: sub_share, mnt_path = self._get_subshare_mount_point(nfs_share, volume) self._ensure_share_mounted(sub_share, mnt_path) if (('size' in volume) and ( volume['size'] > snapshot['volume_size'])): self.extend_volume(volume, volume['size']) return {'provider_location': volume['provider_location']} def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. :param volume: new volume reference :param src_vref: source volume reference """ LOG.info('Creating clone of volume: %s', src_vref['id']) snapshot = {'volume_name': src_vref['name'], 'volume_id': src_vref['id'], 'volume_size': src_vref['size'], 'name': self._get_clone_snapshot_name(volume)} # We don't delete this snapshot, because this snapshot will be origin # of new volume. This snapshot will be automatically promoted by NMS # when user will delete its origin. self.create_snapshot(snapshot) try: return self.create_volume_from_snapshot(volume, snapshot) except utils.NexentaException: LOG.error('Volume creation failed, deleting created snapshot ' '%(volume_name)s@%(name)s', snapshot) try: self.delete_snapshot(snapshot) except (utils.NexentaException, exception.SnapshotIsBusy): LOG.warning('Failed to delete zfs snapshot ' '%(volume_name)s@%(name)s', snapshot) raise def delete_volume(self, volume): """Deletes a logical volume. :param volume: volume reference """ nfs_share = volume.get('provider_location') if nfs_share: nms = self.share2nms[nfs_share] vol, parent_folder = self._get_share_datasets(nfs_share) folder = '%s/%s/%s' % (vol, parent_folder, volume['name']) mount_path = self.remote_path(volume).strip( '/%s' % self.VOLUME_FILE_NAME) if mount_path in self._remotefsclient._read_mounts(): cinder.privsep.fs.umount(mount_path) try: props = nms.folder.get_child_props(folder, 'origin') or {} nms.folder.destroy(folder, '-r') except utils.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info('Folder %s does not exist, it was ' 'already deleted.', folder) return raise self._get_capacity_info(nfs_share) origin = props.get('origin') if origin and self._is_clone_snapshot_name(origin): try: nms.snapshot.destroy(origin, '') except utils.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info('Snapshot %s does not exist, it was ' 'already deleted.', origin) return raise def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: volume reference :param new_size: volume new size in GB """ LOG.info('Extending volume: %(id)s New size: %(size)s GB', {'id': volume['id'], 'size': new_size}) nfs_share = volume['provider_location'] nms = self.share2nms[nfs_share] volume_path = self.remote_path(volume) if getattr(self.configuration, self.driver_prefix + '_sparsed_volumes'): self._create_sparsed_file(nms, volume_path, new_size) else: block_size_mb = 1 block_count = ((new_size - volume['size']) * units.Gi / (block_size_mb * units.Mi)) nms.appliance.execute( 'dd if=/dev/zero seek=%(seek)d of=%(path)s' ' bs=%(bs)dM count=%(count)d' % { 'seek': volume['size'] * units.Gi / block_size_mb, 'path': volume_path, 'bs': block_size_mb, 'count': block_count } ) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: snapshot reference """ volume = self._get_snapshot_volume(snapshot) nfs_share = volume['provider_location'] nms = self.share2nms[nfs_share] vol, dataset = self._get_share_datasets(nfs_share) folder = '%s/%s/%s' % (vol, dataset, volume['name']) nms.folder.create_snapshot(folder, snapshot['name'], '-r') def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: snapshot reference """ volume = self._get_snapshot_volume(snapshot) nfs_share = volume['provider_location'] nms = self.share2nms[nfs_share] vol, dataset = self._get_share_datasets(nfs_share) folder = '%s/%s/%s' % (vol, dataset, volume['name']) try: nms.snapshot.destroy('%s@%s' % (folder, snapshot['name']), '') except utils.NexentaException as exc: if 'does not exist' in exc.args[0]: LOG.info('Snapshot %(folder)s@%(snapshot)s does not ' 'exist, it was already deleted.', {'folder': folder, 'snapshot': snapshot}) return elif 'has dependent clones' in exc.args[0]: LOG.info('Snapshot %(folder)s@%(snapshot)s has dependent ' 'clones, it will be deleted later.', {'folder': folder, 'snapshot': snapshot}) return def _create_sparsed_file(self, nms, path, size): """Creates file with 0 disk usage. :param nms: nms object :param path: path to new file :param size: size of file """ nms.appliance.execute( 'truncate --size %(size)dG %(path)s' % { 'path': path, 'size': size } ) def _create_regular_file(self, nms, path, size): """Creates regular file of given size. Takes a lot of time for large files. :param nms: nms object :param path: path to new file :param size: size of file """ block_size_mb = 1 block_count = size * units.Gi / (block_size_mb * units.Mi) LOG.info('Creating regular file: %s.' 'This may take some time.', path) nms.appliance.execute( 'dd if=/dev/zero of=%(path)s bs=%(bs)dM count=%(count)d' % { 'path': path, 'bs': block_size_mb, 'count': block_count } ) LOG.info('Regular file: %s created.', path) def _set_rw_permissions_for_all(self, nms, path): """Sets 666 permissions for the path. :param nms: nms object :param path: path to file """ nms.appliance.execute('chmod ugo+rw %s' % path) def local_path(self, volume): """Get volume path (mounted locally fs path) for given volume. :param volume: volume reference """ nfs_share = volume['provider_location'] return os.path.join(self._get_mount_point_for_share(nfs_share), volume['name'], 'volume') def _get_mount_point_for_share(self, nfs_share): """Returns path to mount point NFS share. :param nfs_share: example 172.18.194.100:/var/nfs """ nfs_share = nfs_share.encode('utf-8') return os.path.join( self.configuration.nexenta_mount_point_base, hashlib.md5(nfs_share, usedforsecurity=False).hexdigest()) def remote_path(self, volume): """Get volume path (mounted remotely fs path) for given volume. :param volume: volume reference """ nfs_share = volume['provider_location'] share = nfs_share.split(':')[1].rstrip('/') return '%s/%s/volume' % (share, volume['name']) def _share_folder(self, nms, volume, folder): """Share NFS folder on NexentaStor Appliance. :param nms: nms object :param volume: volume name :param folder: folder name """ path = '%s/%s' % (volume, folder.lstrip('/')) share_opts = { 'read_write': '*', 'read_only': '', 'root': 'nobody', 'extra_options': 'anon=0', 'recursive': 'true', 'anonymous_rw': 'true', } LOG.debug('Sharing folder %s on Nexenta Store', folder) nms.netstorsvc.share_folder('svc:/network/nfs/server:default', path, share_opts) def _load_shares_config(self, share_file): self.shares = {} self.share2nms = {} for share in self._read_config_file(share_file): # A configuration line may be either: # host:/share_name http://user:pass@host:[port]/ # or # host:/share_name http://user:pass@host:[port]/ # -o options=123,rw --other if not share.strip(): continue if share.startswith('#'): continue share_info = re.split(r'\s+', share, 2) share_address = share_info[0].strip() nms_url = share_info[1].strip() share_opts = share_info[2].strip() if len(share_info) > 2 else None if not re.match(r'.+:/.+', share_address): LOG.warning("Share %s ignored due to invalid format. " "Must be of form address:/export.", share_address) continue self.shares[share_address] = share_opts self.share2nms[share_address] = self._get_nms_for_url(nms_url) LOG.debug('Shares loaded: %s', self.shares) def _get_subshare_mount_point(self, nfs_share, volume): mnt_path = '%s/%s' % ( self._get_mount_point_for_share(nfs_share), volume['name']) sub_share = '%s/%s' % (nfs_share, volume['name']) return sub_share, mnt_path def _ensure_share_mounted(self, nfs_share, mount_path=None): """Ensure that NFS share is mounted on the host. Unlike the parent method this one accepts mount_path as an optional parameter and uses it as a mount point if provided. :param nfs_share: NFS share name :param mount_path: mount path on the host """ mnt_flags = [] if self.shares.get(nfs_share) is not None: mnt_flags = self.shares[nfs_share].split() num_attempts = max(1, self.configuration.nfs_mount_attempts) for attempt in range(num_attempts): try: if mount_path is None: self._remotefsclient.mount(nfs_share, mnt_flags) else: if mount_path in self._remotefsclient._read_mounts(): LOG.info('Already mounted: %s', mount_path) return fileutils.ensure_tree(mount_path) self._remotefsclient._mount_nfs(nfs_share, mount_path, mnt_flags) return except Exception as e: if attempt == (num_attempts - 1): LOG.error('Mount failure for %(share)s after ' '%(count)d attempts.', {'share': nfs_share, 'count': num_attempts}) raise exception.NfsException(str(e)) LOG.warning( 'Mount attempt %(attempt)d failed: %(error)s. ' 'Retrying mount ...', {'attempt': attempt, 'error': e}) greenthread.sleep(1) def _mount_subfolders(self): ctxt = context.get_admin_context() vol_entries = self.db.volume_get_all_by_host(ctxt, self.host) for vol in vol_entries: nfs_share = vol['provider_location'] if ((nfs_share in self.shares) and (self._get_nfs_server_version(nfs_share) < 4)): sub_share, mnt_path = self._get_subshare_mount_point( nfs_share, vol) self._ensure_share_mounted(sub_share, mnt_path) def _get_nfs_server_version(self, share): if not self.nfs_versions.get(share): nms = self.share2nms[share] nfs_opts = nms.netsvc.get_confopts( 'svc:/network/nfs/server:default', 'configure') try: self.nfs_versions[share] = int( nfs_opts['nfs_server_versmax']['current']) except KeyError: self.nfs_versions[share] = int( nfs_opts['server_versmax']['current']) return self.nfs_versions[share] def _get_capacity_info(self, nfs_share): """Calculate available space on the NFS share. :param nfs_share: example 172.18.194.100:/var/nfs """ nms = self.share2nms[nfs_share] ns_volume, ns_folder = self._get_share_datasets(nfs_share) folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume, ns_folder), 'used|available') free = utils.str2size(folder_props['available']) allocated = utils.str2size(folder_props['used']) self.shares_with_capacities[nfs_share] = { 'free': utils.str2gib_size(free), 'total': utils.str2gib_size(free + allocated)} return free + allocated, free, allocated def _get_nms_for_url(self, url): """Returns initialized nms object for url.""" auto, scheme, user, password, host, port, path = ( utils.parse_nms_url(url)) return jsonrpc.NexentaJSONProxy(scheme, host, port, path, user, password, auto=auto) def _get_snapshot_volume(self, snapshot): ctxt = context.get_admin_context() return db.volume_get(ctxt, snapshot['volume_id']) def _get_volroot(self, nms): """Returns volroot property value from NexentaStor appliance.""" if not self.nms_cache_volroot: return nms.server.get_prop('volroot') if nms not in self._nms2volroot: self._nms2volroot[nms] = nms.server.get_prop('volroot') return self._nms2volroot[nms] def _get_share_datasets(self, nfs_share): nms = self.share2nms[nfs_share] volroot = self._get_volroot(nms) path = nfs_share.split(':')[1][len(volroot):].strip('/') volume_name = path.split('/')[0] folder_name = '/'.join(path.split('/')[1:]) return volume_name, folder_name def _get_clone_snapshot_name(self, volume): """Return name for snapshot that will be used to clone the volume.""" return 'cinder-clone-snapshot-%(id)s' % volume def _is_clone_snapshot_name(self, snapshot): """Check if snapshot is created for cloning.""" name = snapshot.split('@')[-1] return name.startswith('cinder-clone-snapshot-') def _update_volume_stats(self): """Retrieve stats info for NexentaStor appliance.""" LOG.debug('Updating volume stats') total_space = 0 free_space = 0 share = None for _share in self._mounted_shares: if self.shares_with_capacities[_share]['free'] > free_space: free_space = self.shares_with_capacities[_share]['free'] total_space = self.shares_with_capacities[_share]['total'] share = _share location_info = '%(driver)s:%(share)s' % { 'driver': self.__class__.__name__, 'share': share } nms_url = self.share2nms[share].url self._stats = { 'vendor_name': 'Nexenta', 'dedup': self.volume_deduplication, 'compression': self.volume_compression, 'description': self.volume_description, 'nms_url': nms_url, 'ns_shares': self.shares_with_capacities, 'driver_version': self.VERSION, 'storage_protocol': constants.NFS, 'total_capacity_gb': total_space, 'free_capacity_gb': free_space, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'location_info': location_info, 'volume_backend_name': self.backend_name, 'nfs_mount_point_base': self.nfs_mount_point_base } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3911211 cinder-27.0.0/cinder/volume/drivers/nexenta/ns5/0000775000175000017500000000000000000000000021467 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nexenta/ns5/__init__.py0000664000175000017500000000000000000000000023566 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nexenta/ns5/iscsi.py0000664000175000017500000017145700000000000023172 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ipaddress import posixpath import random import uuid from oslo_log import log as logging from oslo_utils import units from cinder.common import constants from cinder import context from cinder import coordination from cinder.i18n import _ from cinder import interface from cinder import objects from cinder.volume import driver from cinder.volume.drivers.nexenta.ns5 import jsonrpc from cinder.volume.drivers.nexenta import options from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @interface.volumedriver class NexentaISCSIDriver(driver.ISCSIDriver): """Executes volume driver commands on Nexenta Appliance. Version history: .. code-block:: none 1.0.0 - Initial driver version. 1.1.0 - Added HTTPS support. - Added use of sessions for REST calls. - Added abandoned volumes and snapshots cleanup. 1.2.0 - Failover support. 1.2.1 - Configurable luns per parget, target prefix. 1.3.0 - Removed target/TG caching, added support for target portals and host groups. 1.3.1 - Refactored _do_export to query exact lunMapping. 1.3.2 - Revert to snapshot support. 1.3.3 - Refactored LUN creation, use host group for LUN mappings. 1.3.4 - Adapted NexentaException for the latest Cinder. 1.3.5 - Added deferred deletion for snapshots. 1.3.6 - Fixed race between volume/clone deletion. 1.3.7 - Added consistency group support. 1.3.8 - Added volume multi-attach. 1.4.0 - Refactored iSCSI driver. - Added pagination support. - Added configuration parameters for REST API connect/read timeouts, connection retries and backoff factor. - Fixed HA failover. - Added retries on EBUSY errors. - Fixed HTTP authentication. - Added coordination for dataset operations. 1.4.1 - Support for NexentaStor tenants. 1.4.2 - Added manage/unmanage/manageable-list volume/snapshot support. 1.4.3 - Added consistency group capability to generic volume group. """ VERSION = '1.4.3' CI_WIKI_NAME = "Nexenta_CI" vendor_name = 'Nexenta' product_name = 'NexentaStor5' storage_protocol = constants.ISCSI driver_volume_type = 'iscsi' def __init__(self, *args, **kwargs): super(NexentaISCSIDriver, self).__init__(*args, **kwargs) if not self.configuration: message = (_('%(product_name)s %(storage_protocol)s ' 'backend configuration not found') % {'product_name': self.product_name, 'storage_protocol': self.storage_protocol}) raise jsonrpc.NefException(code='ENODATA', message=message) self.configuration.append_config_values( options.NEXENTA_CONNECTION_OPTS) self.configuration.append_config_values( options.NEXENTA_ISCSI_OPTS) self.configuration.append_config_values( options.NEXENTA_DATASET_OPTS) self.nef = None self.volume_backend_name = ( self.configuration.safe_get('volume_backend_name') or '%s_%s' % (self.product_name, self.storage_protocol)) self.target_prefix = self.configuration.nexenta_target_prefix self.target_group_prefix = ( self.configuration.nexenta_target_group_prefix) self.host_group_prefix = self.configuration.nexenta_host_group_prefix self.luns_per_target = self.configuration.nexenta_luns_per_target self.lu_writebackcache_disabled = ( self.configuration.nexenta_lu_writebackcache_disabled) self.iscsi_host = self.configuration.nexenta_host self.pool = self.configuration.nexenta_volume self.volume_group = self.configuration.nexenta_volume_group self.portal_port = self.configuration.nexenta_iscsi_target_portal_port self.portals = self.configuration.nexenta_iscsi_target_portals self.sparsed_volumes = self.configuration.nexenta_sparse self.deduplicated_volumes = self.configuration.nexenta_dataset_dedup self.compressed_volumes = ( self.configuration.nexenta_dataset_compression) self.dataset_description = ( self.configuration.nexenta_dataset_description) self.iscsi_target_portal_port = ( self.configuration.nexenta_iscsi_target_portal_port) self.root_path = posixpath.join(self.pool, self.volume_group) self.dataset_blocksize = self.configuration.nexenta_ns5_blocksize if not self.configuration.nexenta_ns5_blocksize > 128: self.dataset_blocksize *= units.Ki self.group_snapshot_template = ( self.configuration.nexenta_group_snapshot_template) self.origin_snapshot_template = ( self.configuration.nexenta_origin_snapshot_template) @staticmethod def get_driver_options(): return ( options.NEXENTA_CONNECTION_OPTS + options.NEXENTA_ISCSI_OPTS + options.NEXENTA_DATASET_OPTS ) def do_setup(self, context): self.nef = jsonrpc.NefProxy(self.driver_volume_type, self.root_path, self.configuration) def check_for_setup_error(self): """Check root volume group and iSCSI target service.""" try: self.nef.volumegroups.get(self.root_path) except jsonrpc.NefException as error: if error.code != 'ENOENT': raise payload = {'path': self.root_path, 'volumeBlockSize': self.dataset_blocksize} self.nef.volumegroups.create(payload) service = self.nef.services.get('iscsit') if service['state'] != 'online': message = (_('iSCSI target service is not online: %(state)s') % {'state': service['state']}) raise jsonrpc.NefException(code='ESRCH', message=message) def create_volume(self, volume): """Create a zfs volume on appliance. :param volume: volume reference :returns: model update dict for volume reference """ payload = { 'path': self._get_volume_path(volume), 'volumeSize': volume['size'] * units.Gi, 'volumeBlockSize': self.dataset_blocksize, 'compressionMode': self.compressed_volumes, 'sparseVolume': self.sparsed_volumes } self.nef.volumes.create(payload) @coordination.synchronized('{self.nef.lock}') def delete_volume(self, volume): """Deletes a logical volume. :param volume: volume reference """ volume_path = self._get_volume_path(volume) delete_payload = {'snapshots': True} try: self.nef.volumes.delete(volume_path, delete_payload) except jsonrpc.NefException as error: if error.code != 'EEXIST': raise snapshots_tree = {} snapshots_payload = {'parent': volume_path, 'fields': 'path'} snapshots = self.nef.snapshots.list(snapshots_payload) for snapshot in snapshots: clones_payload = {'fields': 'clones,creationTxg'} data = self.nef.snapshots.get(snapshot['path'], clones_payload) if data['clones']: snapshots_tree[data['creationTxg']] = data['clones'][0] if snapshots_tree: clone_path = snapshots_tree[max(snapshots_tree)] self.nef.volumes.promote(clone_path) self.nef.volumes.delete(volume_path, delete_payload) def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: volume reference :param new_size: volume new size in GB """ volume_path = self._get_volume_path(volume) payload = {'volumeSize': new_size * units.Gi} self.nef.volumes.set(volume_path, payload) @coordination.synchronized('{self.nef.lock}') def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: snapshot reference """ snapshot_path = self._get_snapshot_path(snapshot) payload = {'path': snapshot_path} self.nef.snapshots.create(payload) @coordination.synchronized('{self.nef.lock}') def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: snapshot reference """ snapshot_path = self._get_snapshot_path(snapshot) payload = {'defer': True} self.nef.snapshots.delete(snapshot_path, payload) def snapshot_revert_use_temp_snapshot(self): # Considering that NexentaStor based drivers use COW images # for storing snapshots, having chains of such images, # creating a backup snapshot when reverting one is not # actually helpful. return False def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot.""" volume_path = self._get_volume_path(volume) payload = {'snapshot': snapshot['name']} self.nef.volumes.rollback(volume_path, payload) @coordination.synchronized('{self.nef.lock}') def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other's snapshot on appliance. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ snapshot_path = self._get_snapshot_path(snapshot) clone_path = self._get_volume_path(volume) payload = {'targetPath': clone_path} self.nef.snapshots.clone(snapshot_path, payload) if volume['size'] > snapshot['volume_size']: self.extend_volume(volume, volume['size']) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. :param volume: new volume reference :param src_vref: source volume reference """ snapshot = { 'name': self.origin_snapshot_template % volume['id'], 'volume_id': src_vref['id'], 'volume_name': src_vref['name'], 'volume_size': src_vref['size'] } self.create_snapshot(snapshot) try: self.create_volume_from_snapshot(volume, snapshot) except jsonrpc.NefException as error: LOG.debug('Failed to create clone %(clone)s ' 'from volume %(volume)s: %(error)s', {'clone': volume['name'], 'volume': src_vref['name'], 'error': error}) raise finally: try: self.delete_snapshot(snapshot) except jsonrpc.NefException as error: LOG.debug('Failed to delete temporary snapshot ' '%(volume)s@%(snapshot)s: %(error)s', {'volume': src_vref['name'], 'snapshot': snapshot['name'], 'error': error}) def create_export(self, context, volume, connector): """Export a volume.""" pass def ensure_export(self, context, volume): """Synchronously recreate an export for a volume.""" pass def remove_export(self, context, volume): """Remove an export for a volume.""" pass def terminate_connection(self, volume, connector, **kwargs): """Terminate a connection to a volume. :param volume: a volume object :param connector: a connector object :returns: dictionary of connection information """ info = {'driver_volume_type': self.driver_volume_type, 'data': {}} host_iqn = None host_groups = [] volume_path = self._get_volume_path(volume) if isinstance(connector, dict) and 'initiator' in connector: connectors = [] for attachment in volume['volume_attachment']: connectors.append(attachment.get('connector')) if connectors.count(connector) > 1: LOG.debug('Detected multiple connections on host ' '%(host_name)s [%(host_ip)s] for volume ' '%(volume)s, skip terminate volume connection', {'host_name': connector.get('host', 'unknown'), 'host_ip': connector.get('ip', 'unknown'), 'volume': volume['name']}) return True host_iqn = connector.get('initiator') host_groups.append(options.DEFAULT_HOST_GROUP) host_group = self._get_host_group(host_iqn) if host_group is not None: host_groups.append(host_group) LOG.debug('Terminate connection for volume %(volume)s ' 'and initiator %(initiator)s', {'volume': volume['name'], 'initiator': host_iqn}) else: LOG.debug('Terminate all connections for volume %(volume)s', {'volume': volume['name']}) payload = {'volume': volume_path} mappings = self.nef.mappings.list(payload) if not mappings: LOG.debug('There are no LUN mappings found for volume %(volume)s', {'volume': volume['name']}) return info for mapping in mappings: mapping_id = mapping.get('id') mapping_tg = mapping.get('targetGroup') mapping_hg = mapping.get('hostGroup') if host_iqn is None or mapping_hg in host_groups: LOG.debug('Delete LUN mapping %(id)s for volume %(volume)s, ' 'target group %(tg)s and host group %(hg)s', {'id': mapping_id, 'volume': volume['name'], 'tg': mapping_tg, 'hg': mapping_hg}) self._delete_lun_mapping(mapping_id) else: LOG.debug('Skip LUN mapping %(id)s for volume %(volume)s, ' 'target group %(tg)s and host group %(hg)s', {'id': mapping_id, 'volume': volume['name'], 'tg': mapping_tg, 'hg': mapping_hg}) return info def _update_volume_stats(self): """Retrieve stats info for NexentaStor appliance.""" LOG.debug('Updating volume backend %(volume_backend_name)s stats', {'volume_backend_name': self.volume_backend_name}) payload = {'fields': 'bytesAvailable,bytesUsed'} dataset = self.nef.volumegroups.get(self.root_path, payload) free = dataset['bytesAvailable'] // units.Gi used = dataset['bytesUsed'] // units.Gi total = free + used location_info = '%(driver)s:%(host)s:%(pool)s/%(group)s' % { 'driver': self.__class__.__name__, 'host': self.iscsi_host, 'pool': self.pool, 'group': self.volume_group, } self._stats = { 'vendor_name': self.vendor_name, 'dedup': self.deduplicated_volumes, 'compression': self.compressed_volumes, 'description': self.dataset_description, 'nef_url': self.nef.host, 'nef_port': self.nef.port, 'driver_version': self.VERSION, 'storage_protocol': self.storage_protocol, 'sparsed_volumes': self.sparsed_volumes, 'total_capacity_gb': total, 'free_capacity_gb': free, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'multiattach': True, 'consistencygroup_support': True, 'consistent_group_snapshot_enabled': True, 'location_info': location_info, 'volume_backend_name': self.volume_backend_name, 'iscsi_target_portal_port': self.iscsi_target_portal_port } def _get_volume_path(self, volume): """Return ZFS datset path for the volume.""" return posixpath.join(self.root_path, volume['name']) def _get_snapshot_path(self, snapshot): """Return ZFS snapshot path for the snapshot.""" volume_name = snapshot['volume_name'] snapshot_name = snapshot['name'] volume_path = posixpath.join(self.root_path, volume_name) return '%s@%s' % (volume_path, snapshot_name) def _get_target_group_name(self, target_name): """Return Nexenta iSCSI target group name for volume.""" return target_name.replace( self.configuration.nexenta_target_prefix, self.configuration.nexenta_target_group_prefix ) def _get_target_name(self, target_group_name): """Return Nexenta iSCSI target name for volume.""" return target_group_name.replace( self.configuration.nexenta_target_group_prefix, self.configuration.nexenta_target_prefix ) def _get_host_addresses(self): """Return Nexenta IP addresses list.""" host_addresses = [] items = self.nef.netaddrs.list() for item in items: ip_cidr = str(item['address']) ip_addr, ip_mask = ip_cidr.split('/') ip_obj = ipaddress.ip_address(ip_addr) if not ip_obj.is_loopback: host_addresses.append(ip_obj.exploded) LOG.debug('Configured IP addresses: %(addresses)s', {'addresses': host_addresses}) return host_addresses def _get_host_portals(self): """Return configured iSCSI portals list.""" host_portals = [] host_addresses = self._get_host_addresses() portal_host = self.iscsi_host if portal_host: if portal_host in host_addresses: if self.portal_port: portal_port = int(self.portal_port) else: portal_port = options.DEFAULT_ISCSI_PORT host_portal = '%s:%s' % (portal_host, portal_port) host_portals.append(host_portal) else: LOG.debug('Skip not a local portal IP address %(portal)s', {'portal': portal_host}) else: LOG.debug('Configuration parameter nexenta_host is not defined') for portal in self.portals.split(','): if not portal: continue host_port = portal.split(':') portal_host = host_port[0] if portal_host in host_addresses: if len(host_port) == 2: portal_port = int(host_port[1]) else: portal_port = options.DEFAULT_ISCSI_PORT host_portal = '%s:%s' % (portal_host, portal_port) if host_portal not in host_portals: host_portals.append(host_portal) else: LOG.debug('Skip not a local portal IP address %(portal)s', {'portal': portal_host}) LOG.debug('Configured iSCSI portals: %(portals)s', {'portals': host_portals}) return host_portals def _target_group_props(self, group_name, host_portals): """Check and update an existing targets/portals for given target group. :param group_name: target group name :param host_portals: configured host portals list :returns: dictionary of portals per target """ if not group_name.startswith(self.target_group_prefix): LOG.debug('Skip not a cinder target group %(group)s', {'group': group_name}) return {} group_props = {} payload = {'name': group_name} data = self.nef.targetgroups.list(payload) if not data: LOG.debug('Skip target group %(group)s: group not found', {'group': group_name}) return {} target_names = data[0]['members'] if not target_names: target_name = self._get_target_name(group_name) self._create_target(target_name, host_portals) self._update_target_group(group_name, [target_name]) group_props[target_name] = host_portals return group_props for target_name in target_names: group_props[target_name] = [] payload = {'name': target_name} data = self.nef.targets.list(payload) if not data: LOG.debug('Skip target group %(group)s: ' 'group member %(target)s not found', {'group': group_name, 'target': target_name}) return {} target_portals = data[0]['portals'] if not target_portals: LOG.debug('Skip target group %(group)s: ' 'group member %(target)s has no portals', {'group': group_name, 'target': target_name}) return {} for item in target_portals: target_portal = '%s:%s' % (item['address'], item['port']) if target_portal not in host_portals: LOG.debug('Skip target group %(group)s: ' 'group member %(target)s bind to a ' 'non local portal address %(portal)s', {'group': group_name, 'target': target_name, 'portal': target_portal}) return {} group_props[target_name].append(target_portal) return group_props def initialize_connection(self, volume, connector): """Do all steps to get zfs volume exported at separate target. :param volume: volume reference :param connector: connector reference :returns: dictionary of connection information """ volume_path = self._get_volume_path(volume) host_iqn = connector.get('initiator') LOG.debug('Initialize connection for volume: %(volume)s ' 'and initiator: %(initiator)s', {'volume': volume_path, 'initiator': host_iqn}) host_groups = [options.DEFAULT_HOST_GROUP] host_group = self._get_host_group(host_iqn) if host_group: host_groups.append(host_group) host_portals = self._get_host_portals() props_portals = [] props_iqns = [] props_luns = [] payload = {'volume': volume_path} mappings = self.nef.mappings.list(payload) for mapping in mappings: mapping_id = mapping['id'] mapping_lu = mapping['lun'] mapping_hg = mapping['hostGroup'] mapping_tg = mapping['targetGroup'] if mapping_tg == options.DEFAULT_TARGET_GROUP: LOG.debug('Delete LUN mapping %(id)s for target group %(tg)s', {'id': mapping_id, 'tg': mapping_tg}) self._delete_lun_mapping(mapping_id) continue if mapping_hg not in host_groups: LOG.debug('Skip LUN mapping %(id)s for host group %(hg)s', {'id': mapping_id, 'hg': mapping_hg}) continue group_props = self._target_group_props(mapping_tg, host_portals) if not group_props: LOG.debug('Skip LUN mapping %(id)s for target group %(tg)s', {'id': mapping_id, 'tg': mapping_tg}) continue for target_iqn in group_props: target_portals = group_props[target_iqn] props_portals += target_portals props_iqns += [target_iqn] * len(target_portals) props_luns += [mapping_lu] * len(target_portals) props = {} props['target_discovered'] = False props['encrypted'] = False props['qos_specs'] = None props['volume_id'] = volume['id'] props['access_mode'] = 'rw' multipath = connector.get('multipath', False) if props_luns: if multipath: props['target_portals'] = props_portals props['target_iqns'] = props_iqns props['target_luns'] = props_luns else: index = random.randrange(0, len(props_luns)) props['target_portal'] = props_portals[index] props['target_iqn'] = props_iqns[index] props['target_lun'] = props_luns[index] LOG.debug('Use existing LUN mapping(s) %(props)s', {'props': props}) return {'driver_volume_type': self.driver_volume_type, 'data': props} if host_group is None: host_group = '%s-%s' % (self.host_group_prefix, uuid.uuid4().hex) self._create_host_group(host_group, [host_iqn]) mappings_spread = {} targets_spread = {} data = self.nef.targetgroups.list() for item in data: target_group = item['name'] group_props = self._target_group_props(target_group, host_portals) members = len(group_props) if members == 0: LOG.debug('Skip unsuitable target group %(tg)s', {'tg': target_group}) continue payload = {'targetGroup': target_group} data = self.nef.mappings.list(payload) mappings = len(data) if not mappings < self.luns_per_target: LOG.debug('Skip target group %(tg)s: ' 'group members limit reached: %(limit)s', {'tg': target_group, 'limit': mappings}) continue targets_spread[target_group] = group_props mappings_spread[target_group] = mappings LOG.debug('Found target group %(tg)s with %(members)s ' 'members and %(mappings)s LUNs', {'tg': target_group, 'members': members, 'mappings': mappings}) if not mappings_spread: target = '%s-%s' % (self.target_prefix, uuid.uuid4().hex) target_group = self._get_target_group_name(target) self._create_target(target, host_portals) self._create_target_group(target_group, [target]) props_portals += host_portals props_iqns += [target] * len(host_portals) else: target_group = min(mappings_spread, key=mappings_spread.get) targets = targets_spread[target_group] members = targets.keys() mappings = mappings_spread[target_group] LOG.debug('Using existing target group %(tg)s ' 'with members %(members)s and %(mappings)s LUNs', {'tg': target_group, 'members': members, 'mappings': mappings}) for target in targets: portals = targets[target] props_portals += portals props_iqns += [target] * len(portals) payload = {'volume': volume_path, 'targetGroup': target_group, 'hostGroup': host_group} self.nef.mappings.create(payload) mapping = {} for attempt in range(0, self.nef.retries): mapping = self.nef.mappings.list(payload) if mapping: break self.nef.delay(attempt) if not mapping: message = (_('Failed to get LUN number for %(volume)s') % {'volume': volume_path}) raise jsonrpc.NefException(code='ENOTBLK', message=message) lun = mapping[0]['lun'] props_luns = [lun] * len(props_iqns) if multipath: props['target_portals'] = props_portals props['target_iqns'] = props_iqns props['target_luns'] = props_luns else: index = random.randrange(0, len(props_luns)) props['target_portal'] = props_portals[index] props['target_iqn'] = props_iqns[index] props['target_lun'] = props_luns[index] if not self.lu_writebackcache_disabled: LOG.debug('Get LUN guid for volume %(volume)s', {'volume': volume_path}) payload = {'fields': 'guid', 'volume': volume_path} data = self.nef.logicalunits.list(payload) guid = data[0]['guid'] payload = {'writebackCacheDisabled': False} self.nef.logicalunits.set(guid, payload) LOG.debug('Created new LUN mapping(s): %(props)s', {'props': props}) return {'driver_volume_type': self.driver_volume_type, 'data': props} def _create_target_group(self, name, members): """Create a new target group with members. :param name: group name :param members: group members list """ payload = {'name': name, 'members': members} self.nef.targetgroups.create(payload) def _update_target_group(self, name, members): """Update a existing target group with new members. :param name: group name :param members: group members list """ payload = {'members': members} self.nef.targetgroups.set(name, payload) def _delete_lun_mapping(self, name): """Delete an existing LUN mapping. :param name: LUN mapping ID """ self.nef.mappings.delete(name) def _create_target(self, name, portals): """Create a new target with portals. :param name: target name :param portals: target portals list """ payload = {'name': name, 'portals': self._s2d(portals)} self.nef.targets.create(payload) def _get_host_group(self, member): """Find existing host group by group member. :param member: host group member :returns: host group name """ host_groups = self.nef.hostgroups.list() for host_group in host_groups: members = host_group['members'] if member in members: name = host_group['name'] LOG.debug('Found host group %(name)s for member %(member)s', {'name': name, 'member': member}) return name return None def _create_host_group(self, name, members): """Create a new host group. :param name: host group name :param members: host group members list """ payload = {'name': name, 'members': members} self.nef.hostgroups.create(payload) @staticmethod def _s2d(css): """Parse list of colon-separated address and port to dictionary. :param css: list of colon-separated address and port :returns: dictionary """ result = [] for key_val in css: key, val = key_val.split(':') result.append({'address': key, 'port': int(val)}) return result @staticmethod def _d2s(kvp): """Parse dictionary to list of colon-separated address and port. :param kvp: dictionary :returns: list of colon-separated address and port """ result = [] for key_val in kvp: result.append('%s:%s' % (key_val['address'], key_val['port'])) return result def create_consistencygroup(self, context, group): """Creates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :returns: group_model_update """ group_model_update = {} return group_model_update def create_group(self, context, group): """Creates a group. :param context: the context of the caller. :param group: the group object. :returns: model_update """ return self.create_consistencygroup(context, group) def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be deleted. :param volumes: a list of volume dictionaries in the group. :returns: group_model_update, volumes_model_update """ group_model_update = {} volumes_model_update = [] for volume in volumes: self.delete_volume(volume) return group_model_update, volumes_model_update def delete_group(self, context, group, volumes): """Deletes a group. :param context: the context of the caller. :param group: the group object. :param volumes: a list of volume objects in the group. :returns: model_update, volumes_model_update """ return self.delete_consistencygroup(context, group, volumes) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be updated. :param add_volumes: a list of volume dictionaries to be added. :param remove_volumes: a list of volume dictionaries to be removed. :returns: group_model_update, add_volumes_update, remove_volumes_update """ group_model_update = {} add_volumes_update = [] remove_volumes_update = [] return group_model_update, add_volumes_update, remove_volumes_update def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group. :param context: the context of the caller. :param group: the group object. :param add_volumes: a list of volume objects to be added. :param remove_volumes: a list of volume objects to be removed. :returns: model_update, add_volumes_update, remove_volumes_update """ return self.update_consistencygroup(context, group, add_volumes, remove_volumes) def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a consistency group snapshot. :param context: the context of the caller. :param cgsnapshot: the dictionary of the cgsnapshot to be created. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :returns: group_model_update, snapshots_model_update """ group_model_update = {} snapshots_model_update = [] cgsnapshot_name = self.group_snapshot_template % cgsnapshot['id'] cgsnapshot_path = '%s@%s' % (self.root_path, cgsnapshot_name) create_payload = {'path': cgsnapshot_path, 'recursive': True} self.nef.snapshots.create(create_payload) for snapshot in snapshots: volume_name = snapshot['volume_name'] volume_path = posixpath.join(self.root_path, volume_name) snapshot_name = snapshot['name'] snapshot_path = '%s@%s' % (volume_path, cgsnapshot_name) rename_payload = {'newName': snapshot_name} self.nef.snapshots.rename(snapshot_path, rename_payload) delete_payload = {'defer': True, 'recursive': True} self.nef.snapshots.delete(cgsnapshot_path, delete_payload) return group_model_update, snapshots_model_update def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be created. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ return self.create_cgsnapshot(context, group_snapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a consistency group snapshot. :param context: the context of the caller. :param cgsnapshot: the dictionary of the cgsnapshot to be created. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :returns: group_model_update, snapshots_model_update """ group_model_update = {} snapshots_model_update = [] for snapshot in snapshots: self.delete_snapshot(snapshot) return group_model_update, snapshots_model_update def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be deleted. :param snapshots: a list of snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ return self.delete_cgsnapshot(context, group_snapshot, snapshots) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistency group from source. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :param volumes: a list of volume dictionaries in the group. :param cgsnapshot: the dictionary of the cgsnapshot as source. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :param source_cg: the dictionary of a consistency group as source. :param source_vols: a list of volume dictionaries in the source_cg. :returns: group_model_update, volumes_model_update """ group_model_update = {} volumes_model_update = [] if cgsnapshot and snapshots: for volume, snapshot in zip(volumes, snapshots): self.create_volume_from_snapshot(volume, snapshot) elif source_cg and source_vols: snapshot_name = self.origin_snapshot_template % group['id'] snapshot_path = '%s@%s' % (self.root_path, snapshot_name) create_payload = {'path': snapshot_path, 'recursive': True} self.nef.snapshots.create(create_payload) for volume, source_vol in zip(volumes, source_vols): snapshot = { 'name': snapshot_name, 'volume_id': source_vol['id'], 'volume_name': source_vol['name'], 'volume_size': source_vol['size'] } self.create_volume_from_snapshot(volume, snapshot) delete_payload = {'defer': True, 'recursive': True} self.nef.snapshots.delete(snapshot_path, delete_payload) return group_model_update, volumes_model_update def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param context: the context of the caller. :param group: the Group object to be created. :param volumes: a list of Volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of snapshot objects in group_snapshot. :param source_group: the Group object as source. :param source_vols: a list of volume objects in the source_group. :returns: model_update, volumes_model_update """ return self.create_consistencygroup_from_src(context, group, volumes, group_snapshot, snapshots, source_group, source_vols) def _get_existing_volume(self, existing_ref): types = { 'source-name': 'name', 'source-guid': 'guid' } if not any(key in types for key in existing_ref): keys = ', '.join(types.keys()) message = (_('Manage existing volume failed ' 'due to invalid backend reference. ' 'Volume reference must contain ' 'at least one valid key: %(keys)s') % {'keys': keys}) raise jsonrpc.NefException(code='EINVAL', message=message) payload = { 'parent': self.root_path, 'fields': 'name,path,volumeSize' } for key, value in types.items(): if key in existing_ref: payload[value] = existing_ref[key] existing_volumes = self.nef.volumes.list(payload) if len(existing_volumes) == 1: volume_path = existing_volumes[0]['path'] volume_name = existing_volumes[0]['name'] volume_size = existing_volumes[0]['volumeSize'] // units.Gi existing_volume = { 'name': volume_name, 'path': volume_path, 'size': volume_size } vid = volume_utils.extract_id_from_volume_name(volume_name) if volume_utils.check_already_managed_volume(vid): message = (_('Volume %(name)s already managed') % {'name': volume_name}) raise jsonrpc.NefException(code='EBUSY', message=message) return existing_volume elif not existing_volumes: code = 'ENOENT' reason = _('no matching volumes were found') else: code = 'EINVAL' reason = _('too many volumes were found') message = (_('Unable to manage existing volume by ' 'reference %(reference)s: %(reason)s') % {'reference': existing_ref, 'reason': reason}) raise jsonrpc.NefException(code=code, message=message) def _check_already_managed_snapshot(self, snapshot_id): """Check cinder database for already managed snapshot. :param snapshot_id: snapshot id parameter :returns: return True, if database entry with specified snapshot id exists, otherwise return False """ if not isinstance(snapshot_id, str): return False try: uuid.UUID(snapshot_id, version=4) except ValueError: return False ctxt = context.get_admin_context() return objects.Snapshot.exists(ctxt, snapshot_id) def _get_existing_snapshot(self, snapshot, existing_ref): types = { 'source-name': 'name', 'source-guid': 'guid' } if not any(key in types for key in existing_ref): keys = ', '.join(types.keys()) message = (_('Manage existing snapshot failed ' 'due to invalid backend reference. ' 'Snapshot reference must contain ' 'at least one valid key: %(keys)s') % {'keys': keys}) raise jsonrpc.NefException(code='EINVAL', message=message) volume_name = snapshot['volume_name'] volume_size = snapshot['volume_size'] volume = {'name': volume_name} volume_path = self._get_volume_path(volume) payload = { 'parent': volume_path, 'fields': 'name,path', 'recursive': False } for key, value in types.items(): if key in existing_ref: payload[value] = existing_ref[key] existing_snapshots = self.nef.snapshots.list(payload) if len(existing_snapshots) == 1: name = existing_snapshots[0]['name'] path = existing_snapshots[0]['path'] existing_snapshot = { 'name': name, 'path': path, 'volume_name': volume_name, 'volume_size': volume_size } sid = volume_utils.extract_id_from_snapshot_name(name) if self._check_already_managed_snapshot(sid): message = (_('Snapshot %(name)s already managed') % {'name': name}) raise jsonrpc.NefException(code='EBUSY', message=message) return existing_snapshot elif not existing_snapshots: code = 'ENOENT' reason = _('no matching snapshots were found') else: code = 'EINVAL' reason = _('too many snapshots were found') message = (_('Unable to manage existing snapshot by ' 'reference %(reference)s: %(reason)s') % {'reference': existing_ref, 'reason': reason}) raise jsonrpc.NefException(code=code, message=message) @coordination.synchronized('{self.nef.lock}') def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. 2. Place some metadata on the volume, or somewhere in the backend, that allows other driver requests (e.g. delete, clone, attach, detach...) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. The volume may have a volume_type, and the driver can inspect that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ existing_volume = self._get_existing_volume(existing_ref) existing_volume_path = existing_volume['path'] payload = {'volume': existing_volume_path} mappings = self.nef.mappings.list(payload) if mappings: message = (_('Failed to manage existing volume %(path)s ' 'due to existing LUN mappings: %(mappings)s') % {'path': existing_volume_path, 'mappings': mappings}) raise jsonrpc.NefException(code='EEXIST', message=message) if existing_volume['name'] != volume['name']: volume_path = self._get_volume_path(volume) payload = {'newPath': volume_path} self.nef.volumes.rename(existing_volume_path, payload) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume :returns size: Volume size in GiB (integer) """ existing_volume = self._get_existing_volume(existing_ref) return existing_volume['size'] def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a volume in the host, with the following keys: - reference (dictionary): The reference for a volume, which can be passed to "manage_existing". - size (int): The size of the volume according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this volume is safe to manage according to the storage backend. For example, is the volume in use or invalid for any reason. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Any extra information to return to the user :param cinder_volumes: A list of volumes in this host that Cinder currently manages, used to determine if a volume is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ manageable_volumes = [] cinder_volume_names = {} for cinder_volume in cinder_volumes: key = cinder_volume['name'] value = cinder_volume['id'] cinder_volume_names[key] = value payload = { 'parent': self.root_path, 'fields': 'name,guid,path,volumeSize', 'recursive': False } volumes = self.nef.volumes.list(payload) for volume in volumes: safe_to_manage = True reason_not_safe = None cinder_id = None extra_info = None path = volume['path'] guid = volume['guid'] size = volume['volumeSize'] // units.Gi name = volume['name'] if name in cinder_volume_names: cinder_id = cinder_volume_names[name] safe_to_manage = False reason_not_safe = _('Volume already managed') else: payload = { 'volume': path, 'fields': 'hostGroup' } mappings = self.nef.mappings.list(payload) members = [] for mapping in mappings: hostgroup = mapping['hostGroup'] if hostgroup == options.DEFAULT_HOST_GROUP: members.append(hostgroup) else: group = self.nef.hostgroups.get(hostgroup) members += group['members'] if members: safe_to_manage = False hosts = ', '.join(members) reason_not_safe = (_('Volume is connected ' 'to host(s) %(hosts)s') % {'hosts': hosts}) reference = { 'source-name': name, 'source-guid': guid } manageable_volumes.append({ 'reference': reference, 'size': size, 'safe_to_manage': safe_to_manage, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info }) return volume_utils.paginate_entries_list(manageable_volumes, marker, limit, offset, sort_keys, sort_dirs) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param volume: Cinder volume to unmanage """ pass @coordination.synchronized('{self.nef.lock}') def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder snapshot structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. 2. Place some metadata on the snapshot, or somewhere in the backend, that allows other driver requests (e.g. delete) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot """ existing_snapshot = self._get_existing_snapshot(snapshot, existing_ref) existing_snapshot_path = existing_snapshot['path'] if existing_snapshot['name'] != snapshot['name']: payload = {'newName': snapshot['name']} self.nef.snapshots.rename(existing_snapshot_path, payload) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. When calculating the size, round up to the next GB. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot :returns size: Volume snapshot size in GiB (integer) """ existing_snapshot = self._get_existing_snapshot(snapshot, existing_ref) return existing_snapshot['volume_size'] def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a snapshot in the host, with the following keys: - reference (dictionary): The reference for a snapshot, which can be passed to "manage_existing_snapshot". - size (int): The size of the snapshot according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this snapshot is safe to manage according to the storage backend. For example, is the snapshot in use or invalid for any reason. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Any extra information to return to the user - source_reference (string): Similar to "reference", but for the snapshot's source volume. :param cinder_snapshots: A list of snapshots in this host that Cinder currently manages, used to determine if a snapshot is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ manageable_snapshots = [] cinder_volume_names = {} cinder_snapshot_names = {} ctxt = context.get_admin_context() cinder_volumes = objects.VolumeList.get_all_by_host(ctxt, self.host) for cinder_volume in cinder_volumes: key = self._get_volume_path(cinder_volume) value = { 'name': cinder_volume['name'], 'size': cinder_volume['size'] } cinder_volume_names[key] = value for cinder_snapshot in cinder_snapshots: key = cinder_snapshot['name'] value = { 'id': cinder_snapshot['id'], 'size': cinder_snapshot['volume_size'], 'parent': cinder_snapshot['volume_name'] } cinder_snapshot_names[key] = value payload = { 'parent': self.root_path, 'fields': 'name,guid,path,parent,hprService,snaplistId', 'recursive': True } snapshots = self.nef.snapshots.list(payload) for snapshot in snapshots: safe_to_manage = True reason_not_safe = None cinder_id = None extra_info = None name = snapshot['name'] guid = snapshot['guid'] path = snapshot['path'] parent = snapshot['parent'] if parent not in cinder_volume_names: LOG.debug('Skip snapshot %(path)s: parent ' 'volume %(parent)s is unmanaged', {'path': path, 'parent': parent}) continue if name.startswith(self.origin_snapshot_template): LOG.debug('Skip temporary origin snapshot %(path)s', {'path': path}) continue if name.startswith(self.group_snapshot_template): LOG.debug('Skip temporary group snapshot %(path)s', {'path': path}) continue if snapshot['hprService'] or snapshot['snaplistId']: LOG.debug('Skip HPR/snapping snapshot %(path)s', {'path': path}) continue if name in cinder_snapshot_names: size = cinder_snapshot_names[name]['size'] cinder_id = cinder_snapshot_names[name]['id'] safe_to_manage = False reason_not_safe = _('Snapshot already managed') else: size = cinder_volume_names[parent]['size'] payload = {'fields': 'clones'} props = self.nef.snapshots.get(path) clones = props['clones'] unmanaged_clones = [] for clone in clones: if clone not in cinder_volume_names: unmanaged_clones.append(clone) if unmanaged_clones: safe_to_manage = False dependent_clones = ', '.join(unmanaged_clones) reason_not_safe = (_('Snapshot has unmanaged ' 'dependent clone(s) %(clones)s') % {'clones': dependent_clones}) reference = { 'source-name': name, 'source-guid': guid } source_reference = { 'name': cinder_volume_names[parent]['name'] } manageable_snapshots.append({ 'reference': reference, 'size': size, 'safe_to_manage': safe_to_manage, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info, 'source_reference': source_reference }) return volume_utils.paginate_entries_list(manageable_snapshots, marker, limit, offset, sort_keys, sort_dirs) def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param snapshot: Cinder volume snapshot to unmanage """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nexenta/ns5/jsonrpc.py0000664000175000017500000005606700000000000023535 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import json import posixpath import urllib from eventlet import greenthread from oslo_log import log as logging import requests from cinder import exception from cinder.i18n import _ LOG = logging.getLogger(__name__) class NefException(exception.VolumeDriverException): def __init__(self, data=None, **kwargs): defaults = { 'name': 'NexentaError', 'code': 'EBADMSG', 'source': 'CinderDriver', 'message': 'Unknown error' } if isinstance(data, dict): for key in defaults: if key in kwargs: continue if key in data: kwargs[key] = data[key] else: kwargs[key] = defaults[key] elif isinstance(data, str): if 'message' not in kwargs: kwargs['message'] = data for key in defaults: if key not in kwargs: kwargs[key] = defaults[key] message = (_('%(message)s (source: %(source)s, ' 'name: %(name)s, code: %(code)s)') % kwargs) self.code = kwargs['code'] del kwargs['message'] super(NefException, self).__init__(message, **kwargs) class NefRequest(object): def __init__(self, proxy, method): self.proxy = proxy self.method = method self.path = None self.lock = False self.time = 0 self.data = [] self.payload = {} self.stat = {} self.hooks = { 'response': self.hook } self.kwargs = { 'hooks': self.hooks, 'timeout': self.proxy.timeout } def __call__(self, path, payload=None): LOG.debug('NEF request start: %(method)s %(path)s %(payload)s', {'method': self.method, 'path': path, 'payload': payload}) if self.method not in ['get', 'delete', 'put', 'post']: message = (_('NEF API does not support %(method)s method') % {'method': self.method}) raise NefException(code='EINVAL', message=message) if not path: message = _('NEF API call requires collection path') raise NefException(code='EINVAL', message=message) self.path = path if payload: if not isinstance(payload, dict): message = _('NEF API call payload must be a dictionary') raise NefException(code='EINVAL', message=message) if self.method in ['get', 'delete']: self.payload = {'params': payload} elif self.method in ['put', 'post']: self.payload = {'data': json.dumps(payload)} try: response = self.request(self.method, self.path, **self.payload) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as error: LOG.debug('Failed to %(method)s %(path)s %(payload)s: %(error)s', {'method': self.method, 'path': self.path, 'payload': self.payload, 'error': error}) if not self.failover(): raise LOG.debug('Retry initial request after failover: ' '%(method)s %(path)s %(payload)s', {'method': self.method, 'path': self.path, 'payload': self.payload}) response = self.request(self.method, self.path, **self.payload) LOG.debug('NEF request done: %(method)s %(path)s %(payload)s, ' 'total response time: %(time)s seconds, ' 'total requests count: %(count)s, ' 'requests statistics: %(stat)s', {'method': self.method, 'path': self.path, 'payload': self.payload, 'time': self.time, 'count': sum(self.stat.values()), 'stat': self.stat}) if response.ok and not response.content: return None content = json.loads(response.content) if not response.ok: raise NefException(content) if isinstance(content, dict) and 'data' in content: return self.data return content def request(self, method, path, **kwargs): url = self.proxy.url(path) LOG.debug('Perform session request: %(method)s %(url)s %(body)s', {'method': method, 'url': url, 'body': kwargs}) kwargs.update(self.kwargs) return self.proxy.session.request(method, url, **kwargs) def hook(self, response, **kwargs): initial_text = (_('initial request %(method)s %(path)s %(body)s') % {'method': self.method, 'path': self.path, 'body': self.payload}) request_text = (_('session request %(method)s %(url)s %(body)s') % {'method': response.request.method, 'url': response.request.url, 'body': response.request.body}) response_text = (_('session response %(code)s %(content)s') % {'code': response.status_code, 'content': response.content}) text = (_('%(request_text)s and %(response_text)s') % {'request_text': request_text, 'response_text': response_text}) LOG.debug('Hook start on %(text)s', {'text': text}) if response.status_code not in self.stat: self.stat[response.status_code] = 0 self.stat[response.status_code] += 1 self.time += response.elapsed.total_seconds() if response.ok and not response.content: LOG.debug('Hook done on %(text)s: ' 'empty response content', {'text': text}) return response if not response.content: message = (_('There is no response content ' 'is available for %(text)s') % {'text': text}) raise NefException(code='ENODATA', message=message) try: content = json.loads(response.content) except (TypeError, ValueError) as error: message = (_('Failed to decode JSON for %(text)s: %(error)s') % {'text': text, 'error': error}) raise NefException(code='ENOMSG', message=message) method = 'get' if response.status_code == requests.codes.unauthorized: if self.stat[response.status_code] > self.proxy.retries: raise NefException(content) self.auth() LOG.debug('Retry %(text)s after authentication', {'text': request_text}) request = response.request.copy() request.headers.update(self.proxy.session.headers) return self.proxy.session.send(request, **kwargs) elif response.status_code == requests.codes.not_found: if self.lock: LOG.debug('Hook done on %(text)s: ' 'nested failover is detected', {'text': text}) return response if self.stat[response.status_code] > self.proxy.retries: raise NefException(content) if not self.failover(): LOG.debug('Hook done on %(text)s: ' 'no valid hosts found', {'text': text}) return response LOG.debug('Retry %(text)s after failover', {'text': initial_text}) self.data = [] return self.request(self.method, self.path, **self.payload) elif response.status_code == requests.codes.server_error: if not (isinstance(content, dict) and 'code' in content and content['code'] == 'EBUSY'): raise NefException(content) if self.stat[response.status_code] > self.proxy.retries: raise NefException(content) self.proxy.delay(self.stat[response.status_code]) LOG.debug('Retry %(text)s after delay', {'text': initial_text}) self.data = [] return self.request(self.method, self.path, **self.payload) elif response.status_code == requests.codes.accepted: path = self.getpath(content, 'monitor') if not path: message = (_('There is no monitor path ' 'available for %(text)s') % {'text': text}) raise NefException(code='ENOMSG', message=message) self.proxy.delay(self.stat[response.status_code]) return self.request(method, path) elif response.status_code == requests.codes.ok: if not (isinstance(content, dict) and 'data' in content): LOG.debug('Hook done on %(text)s: there ' 'is no JSON data available', {'text': text}) return response LOG.debug('Append %(count)s data items to response', {'count': len(content['data'])}) self.data += content['data'] path = self.getpath(content, 'next') if not path: LOG.debug('Hook done on %(text)s: there ' 'is no next path available', {'text': text}) return response LOG.debug('Perform next session request %(method)s %(path)s', {'method': method, 'path': path}) return self.request(method, path) LOG.debug('Hook done on %(text)s and ' 'returned original response', {'text': text}) return response def auth(self): method = 'post' path = 'auth/login' payload = { 'username': self.proxy.username, 'password': self.proxy.password } data = json.dumps(payload) kwargs = {'data': data} self.proxy.delete_bearer() response = self.request(method, path, **kwargs) content = json.loads(response.content) if not (isinstance(content, dict) and 'token' in content): message = (_('There is no authentication token available ' 'for authentication request %(method)s %(url)s ' '%(body)s and response %(code)s %(content)s') % {'method': response.request.method, 'url': response.request.url, 'body': response.request.body, 'code': response.status_code, 'content': response.content}) raise NefException(code='ENODATA', message=message) token = content['token'] self.proxy.update_token(token) def failover(self): result = False self.lock = True method = 'get' root = self.proxy.root for host in self.proxy.hosts: self.proxy.update_host(host) LOG.debug('Try to failover path ' '%(root)s to host %(host)s', {'root': root, 'host': host}) try: response = self.request(method, root) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as error: LOG.debug('Skip unavailable host %(host)s ' 'due to error: %(error)s', {'host': host, 'error': error}) continue LOG.debug('Failover result: %(code)s %(content)s', {'code': response.status_code, 'content': response.content}) if response.status_code == requests.codes.ok: LOG.debug('Successful failover path ' '%(root)s to host %(host)s', {'root': root, 'host': host}) self.proxy.update_lock() result = True break else: LOG.debug('Skip unsuitable host %(host)s: ' 'there is no %(root)s path found', {'host': host, 'root': root}) self.lock = False return result @staticmethod def getpath(content, name): if isinstance(content, dict) and 'links' in content: for link in content['links']: if not isinstance(link, dict): continue if 'rel' in link and 'href' in link: if link['rel'] == name: return link['href'] return None class NefCollections(object): subj = 'collection' root = '/collections' def __init__(self, proxy): self.proxy = proxy def path(self, name): quoted_name = urllib.parse.quote_plus(name) return posixpath.join(self.root, quoted_name) def get(self, name, payload=None): LOG.debug('Get properties of %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = self.path(name) return self.proxy.get(path, payload) def set(self, name, payload=None): LOG.debug('Modify properties of %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = self.path(name) return self.proxy.put(path, payload) def list(self, payload=None): LOG.debug('List of %(subj)ss: %(payload)s', {'subj': self.subj, 'payload': payload}) return self.proxy.get(self.root, payload) def create(self, payload=None): LOG.debug('Create %(subj)s: %(payload)s', {'subj': self.subj, 'payload': payload}) try: return self.proxy.post(self.root, payload) except NefException as error: if error.code != 'EEXIST': raise def delete(self, name, payload=None): LOG.debug('Delete %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = self.path(name) try: return self.proxy.delete(path, payload) except NefException as error: if error.code != 'ENOENT': raise class NefSettings(NefCollections): subj = 'setting' root = '/settings/properties' def create(self, payload=None): return NotImplemented def delete(self, name, payload=None): return NotImplemented class NefDatasets(NefCollections): subj = 'dataset' root = '/storage/datasets' def rename(self, name, payload=None): LOG.debug('Rename %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'rename') return self.proxy.post(path, payload) class NefSnapshots(NefDatasets, NefCollections): subj = 'snapshot' root = '/storage/snapshots' def clone(self, name, payload=None): LOG.debug('Clone %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'clone') return self.proxy.post(path, payload) class NefVolumeGroups(NefDatasets, NefCollections): subj = 'volume group' root = 'storage/volumeGroups' def rollback(self, name, payload=None): LOG.debug('Rollback %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'rollback') return self.proxy.post(path, payload) class NefVolumes(NefVolumeGroups, NefDatasets, NefCollections): subj = 'volume' root = '/storage/volumes' def promote(self, name, payload=None): LOG.debug('Promote %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'promote') return self.proxy.post(path, payload) class NefFilesystems(NefVolumes, NefVolumeGroups, NefDatasets, NefCollections): subj = 'filesystem' root = '/storage/filesystems' def mount(self, name, payload=None): LOG.debug('Mount %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'mount') return self.proxy.post(path, payload) def unmount(self, name, payload=None): LOG.debug('Unmount %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'unmount') return self.proxy.post(path, payload) def acl(self, name, payload=None): LOG.debug('Set %(subj)s %(name)s ACL: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'acl') return self.proxy.post(path, payload) class NefHpr(NefCollections): subj = 'HPR service' root = '/hpr' def activate(self, payload=None): LOG.debug('Activate %(payload)s', {'payload': payload}) path = posixpath.join(self.root, 'activate') return self.proxy.post(path, payload) def start(self, name, payload=None): LOG.debug('Start %(subj)s %(name)s: %(payload)s', {'subj': self.subj, 'name': name, 'payload': payload}) path = posixpath.join(self.path(name), 'start') return self.proxy.post(path, payload) class NefServices(NefCollections): subj = 'service' root = '/services' class NefNfs(NefCollections): subj = 'NFS' root = '/nas/nfs' class NefTargets(NefCollections): subj = 'iSCSI target' root = '/san/iscsi/targets' class NefHostGroups(NefCollections): subj = 'host group' root = '/san/hostgroups' class NefTargetsGroups(NefCollections): subj = 'target group' root = '/san/targetgroups' class NefLunMappings(NefCollections): subj = 'LUN mapping' root = '/san/lunMappings' class NefLogicalUnits(NefCollections): subj = 'LU' root = 'san/logicalUnits' class NefNetAddresses(NefCollections): subj = 'network address' root = '/network/addresses' class NefProxy(object): def __init__(self, proto, path, conf): self.session = requests.Session() self.settings = NefSettings(self) self.filesystems = NefFilesystems(self) self.volumegroups = NefVolumeGroups(self) self.volumes = NefVolumes(self) self.snapshots = NefSnapshots(self) self.services = NefServices(self) self.hpr = NefHpr(self) self.nfs = NefNfs(self) self.targets = NefTargets(self) self.hostgroups = NefHostGroups(self) self.targetgroups = NefTargetsGroups(self) self.mappings = NefLunMappings(self) self.logicalunits = NefLogicalUnits(self) self.netaddrs = NefNetAddresses(self) self.lock = None self.tokens = {} self.headers = { 'Content-Type': 'application/json', 'X-XSS-Protection': '1' } if conf.nexenta_use_https: self.scheme = 'https' else: self.scheme = 'http' self.username = conf.nexenta_user self.password = conf.nexenta_password self.hosts = [] if conf.nexenta_rest_address: for host in conf.nexenta_rest_address.split(','): self.hosts.append(host.strip()) if proto == 'nfs': self.root = self.filesystems.path(path) if not self.hosts: self.hosts.append(conf.nas_host) elif proto == 'iscsi': self.root = self.volumegroups.path(path) if not self.hosts: self.hosts.append(conf.nexenta_host) else: message = (_('Storage protocol %(proto)s not supported') % {'proto': proto}) raise NefException(code='EPROTO', message=message) self.host = self.hosts[0] if conf.nexenta_rest_port: self.port = conf.nexenta_rest_port else: if conf.nexenta_use_https: self.port = 8443 else: self.port = 8080 self.proto = proto self.path = path self.backoff_factor = conf.nexenta_rest_backoff_factor self.retries = len(self.hosts) * conf.nexenta_rest_retry_count self.timeout = requests.packages.urllib3.util.timeout.Timeout( connect=conf.nexenta_rest_connect_timeout, read=conf.nexenta_rest_read_timeout) max_retries = requests.packages.urllib3.util.retry.Retry( total=conf.nexenta_rest_retry_count, backoff_factor=conf.nexenta_rest_backoff_factor) adapter = requests.adapters.HTTPAdapter(max_retries=max_retries) self.session.verify = conf.driver_ssl_cert_verify self.session.headers.update(self.headers) self.session.mount('%s://' % self.scheme, adapter) if not conf.driver_ssl_cert_verify: requests.packages.urllib3.disable_warnings() self.update_lock() def __getattr__(self, name): return NefRequest(self, name) def delete_bearer(self): if 'Authorization' in self.session.headers: del self.session.headers['Authorization'] def update_bearer(self, token): bearer = 'Bearer %s' % token self.session.headers['Authorization'] = bearer def update_token(self, token): self.tokens[self.host] = token self.update_bearer(token) def update_host(self, host): self.host = host if host in self.tokens: token = self.tokens[host] self.update_bearer(token) def update_lock(self): prop = self.settings.get('system.guid') guid = prop.get('value') path = '%s:%s' % (guid, self.path) if isinstance(path, str): path = path.encode('utf-8') self.lock = hashlib.md5(path, usedforsecurity=False).hexdigest() def url(self, path): netloc = '%s:%d' % (self.host, int(self.port)) components = (self.scheme, netloc, str(path), None, None) url = urllib.parse.urlunsplit(components) return url def delay(self, attempt): interval = int(self.backoff_factor * (2 ** (attempt - 1))) LOG.debug('Waiting for %(interval)s seconds', {'interval': interval}) greenthread.sleep(interval) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nexenta/ns5/nfs.py0000664000175000017500000016055600000000000022644 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import hashlib import os import posixpath import uuid from oslo_log import log as logging from oslo_utils import units from cinder.common import constants from cinder import context from cinder import coordination from cinder.i18n import _ from cinder import interface from cinder import objects from cinder.privsep import fs from cinder.volume.drivers.nexenta.ns5 import jsonrpc from cinder.volume.drivers.nexenta import options from cinder.volume.drivers import nfs from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @interface.volumedriver class NexentaNfsDriver(nfs.NfsDriver): """Executes volume driver commands on Nexenta Appliance. Version history: .. code-block:: none 1.0.0 - Initial driver version. 1.1.0 - Support for extend volume. 1.2.0 - Added HTTPS support. - Added use of sessions for REST calls. - Added abandoned volumes and snapshots cleanup. 1.3.0 - Failover support. 1.4.0 - Migrate volume support and new NEF API calls. 1.5.0 - Revert to snapshot support. 1.6.0 - Get mountPoint from API to support old style mount points. - Mount and umount shares on each operation to avoid mass mounts on controller. Clean up mount folders on delete. 1.6.1 - Fixed volume from image creation. 1.6.2 - Removed redundant share mount from initialize_connection. 1.6.3 - Adapted NexentaException for the latest Cinder. 1.6.4 - Fixed volume mount/unmount. 1.6.5 - Added driver_ssl_cert_verify for HA failover. 1.6.6 - Destroy unused snapshots after deletion of it's last clone. 1.6.7 - Fixed volume migration for HA environment. 1.6.8 - Added deferred deletion for snapshots. 1.6.9 - Fixed race between volume/clone deletion. 1.7.0 - Added consistency group support. 1.7.1 - Removed redundant hpr/activate call from initialize_connection. 1.7.2 - Merged upstream changes for umount. 1.8.0 - Refactored NFS driver. - Added pagination support. - Added configuration parameters for REST API connect/read timeouts, connection retries and backoff factor. - Fixed HA failover. - Added retries on EBUSY errors. - Fixed HTTP authentication. - Disabled non-blocking mandatory locks. - Added coordination for dataset operations. 1.8.1 - Support for NexentaStor tenants. 1.8.2 - Added manage/unmanage/manageable-list volume/snapshot support. 1.8.3 - Added consistency group capability to generic volume group. 1.8.4 - Disabled SmartCompression feature. """ VERSION = '1.8.3' CI_WIKI_NAME = "Nexenta_CI" vendor_name = 'Nexenta' product_name = 'NexentaStor5' storage_protocol = constants.NFS driver_volume_type = 'nfs' def __init__(self, *args, **kwargs): super(NexentaNfsDriver, self).__init__(*args, **kwargs) if not self.configuration: message = (_('%(product_name)s %(storage_protocol)s ' 'backend configuration not found') % {'product_name': self.product_name, 'storage_protocol': self.storage_protocol}) raise jsonrpc.NefException(code='ENODATA', message=message) self.configuration.append_config_values( options.NEXENTA_CONNECTION_OPTS) self.configuration.append_config_values( options.NEXENTA_NFS_OPTS) self.configuration.append_config_values( options.NEXENTA_DATASET_OPTS) self.nef = None self.volume_backend_name = ( self.configuration.safe_get('volume_backend_name') or '%s_%s' % (self.product_name, self.storage_protocol)) self.nas_host = self.configuration.nas_host self.root_path = self.configuration.nas_share_path self.sparsed_volumes = self.configuration.nexenta_sparsed_volumes self.deduplicated_volumes = self.configuration.nexenta_dataset_dedup self.compressed_volumes = ( self.configuration.nexenta_dataset_compression) self.dataset_description = ( self.configuration.nexenta_dataset_description) self.mount_point_base = self.configuration.nexenta_mount_point_base self.group_snapshot_template = ( self.configuration.nexenta_group_snapshot_template) self.origin_snapshot_template = ( self.configuration.nexenta_origin_snapshot_template) @staticmethod def get_driver_options(): return ( options.NEXENTA_CONNECTION_OPTS + options.NEXENTA_NFS_OPTS + options.NEXENTA_DATASET_OPTS ) def do_setup(self, context): self.nef = jsonrpc.NefProxy(self.driver_volume_type, self.root_path, self.configuration) def check_for_setup_error(self): """Check root filesystem, NFS service and NFS share.""" filesystem = self.nef.filesystems.get(self.root_path) if filesystem['mountPoint'] == 'none': message = (_('NFS root filesystem %(path)s is not writable') % {'path': filesystem['mountPoint']}) raise jsonrpc.NefException(code='ENOENT', message=message) if not filesystem['isMounted']: message = (_('NFS root filesystem %(path)s is not mounted') % {'path': filesystem['mountPoint']}) raise jsonrpc.NefException(code='ENOTDIR', message=message) payload = {} if filesystem['nonBlockingMandatoryMode']: payload['nonBlockingMandatoryMode'] = False if filesystem['smartCompression']: payload['smartCompression'] = False if payload: self.nef.filesystems.set(self.root_path, payload) service = self.nef.services.get('nfs') if service['state'] != 'online': message = (_('NFS server service is not online: %(state)s') % {'state': service['state']}) raise jsonrpc.NefException(code='ESRCH', message=message) share = self.nef.nfs.get(self.root_path) if share['shareState'] != 'online': message = (_('NFS share %(share)s is not online: %(state)s') % {'share': self.root_path, 'state': share['shareState']}) raise jsonrpc.NefException(code='ESRCH', message=message) def create_volume(self, volume): """Creates a volume. :param volume: volume reference """ volume_path = self._get_volume_path(volume) payload = {'path': volume_path, 'compressionMode': 'off'} self.nef.filesystems.create(payload) try: self._set_volume_acl(volume) self._mount_volume(volume) volume_file = self.local_path(volume) if self.sparsed_volumes: self._create_sparsed_file(volume_file, volume['size']) else: self._create_regular_file(volume_file, volume['size']) if self.compressed_volumes != 'off': payload = {'compressionMode': self.compressed_volumes} self.nef.filesystems.set(volume_path, payload) except jsonrpc.NefException as create_error: try: payload = {'force': True} self.nef.filesystems.delete(volume_path, payload) except jsonrpc.NefException as delete_error: LOG.debug('Failed to delete volume %(path)s: %(error)s', {'path': volume_path, 'error': delete_error}) raise create_error finally: self._unmount_volume(volume) def copy_image_to_volume(self, context, volume, image_service, image_id): LOG.debug('Copy image %(image)s to volume %(volume)s', {'image': image_id, 'volume': volume['name']}) self._mount_volume(volume) super(NexentaNfsDriver, self).copy_image_to_volume( context, volume, image_service, image_id) self._unmount_volume(volume) def copy_volume_to_image(self, context, volume, image_service, image_meta): LOG.debug('Copy volume %(volume)s to image %(image)s', {'volume': volume['name'], 'image': image_meta['id']}) self._mount_volume(volume) super(NexentaNfsDriver, self).copy_volume_to_image( context, volume, image_service, image_meta) self._unmount_volume(volume) def _ensure_share_unmounted(self, share): """Ensure that NFS share is unmounted on the host. :param share: share path """ attempts = max(1, self.configuration.nfs_mount_attempts) path = self._get_mount_point_for_share(share) if path not in self._remotefsclient._read_mounts(): LOG.debug('NFS share %(share)s is not mounted at %(path)s', {'share': share, 'path': path}) return for attempt in range(0, attempts): try: fs.umount(path) LOG.debug('NFS share %(share)s has been unmounted at %(path)s', {'share': share, 'path': path}) break except Exception as error: if attempt == (attempts - 1): LOG.error('Failed to unmount NFS share %(share)s ' 'after %(attempts)s attempts', {'share': share, 'attempts': attempts}) raise LOG.debug('Unmount attempt %(attempt)s failed: %(error)s, ' 'retrying unmount %(share)s from %(path)s', {'attempt': attempt, 'error': error, 'share': share, 'path': path}) self.nef.delay(attempt) self._delete(path) def _mount_volume(self, volume): """Ensure that volume is activated and mounted on the host.""" volume_path = self._get_volume_path(volume) payload = {'fields': 'mountPoint,isMounted'} filesystem = self.nef.filesystems.get(volume_path, payload) if filesystem['mountPoint'] == 'none': payload = {'datasetName': volume_path} self.nef.hpr.activate(payload) filesystem = self.nef.filesystems.get(volume_path) elif not filesystem['isMounted']: self.nef.filesystems.mount(volume_path) share = '%s:%s' % (self.nas_host, filesystem['mountPoint']) self._ensure_share_mounted(share) def _remount_volume(self, volume): """Workaround for NEX-16457.""" volume_path = self._get_volume_path(volume) self.nef.filesystems.unmount(volume_path) self.nef.filesystems.mount(volume_path) def _unmount_volume(self, volume): """Ensure that volume is unmounted on the host.""" share = self._get_volume_share(volume) self._ensure_share_unmounted(share) def _create_sparsed_file(self, path, size): """Creates file with 0 disk usage.""" if self.configuration.nexenta_qcow2_volumes: self._create_qcow2_file(path, size) else: super(NexentaNfsDriver, self)._create_sparsed_file(path, size) def migrate_volume(self, context, volume, host): """Migrate if volume and host are managed by Nexenta appliance. :param context: context :param volume: a dictionary describing the volume to migrate :param host: a dictionary describing the host to migrate to """ LOG.debug('Migrate volume %(volume)s to host %(host)s', {'volume': volume['name'], 'host': host}) false_ret = (False, None) if volume['status'] not in ('available', 'retyping'): LOG.error('Volume %(volume)s status must be available or ' 'retyping, current volume status is %(status)s', {'volume': volume['name'], 'status': volume['status']}) return false_ret if 'capabilities' not in host: LOG.error('Unsupported host %(host)s: no capabilities found', {'host': host}) return false_ret capabilities = host['capabilities'] if not ('location_info' in capabilities and 'vendor_name' in capabilities and 'free_capacity_gb' in capabilities): LOG.error('Unsupported host %(host)s: required NFS ' 'and vendor capabilities are not found', {'host': host}) return false_ret driver_name = capabilities['location_info'].split(':')[0] dst_root = capabilities['location_info'].split(':/')[1] if not (capabilities['vendor_name'] == 'Nexenta' and driver_name == self.__class__.__name__): LOG.error('Unsupported host %(host)s: incompatible ' 'vendor %(vendor)s or driver %(driver)s', {'host': host, 'vendor': capabilities['vendor_name'], 'driver': driver_name}) return false_ret if capabilities['free_capacity_gb'] < volume['size']: LOG.error('There is not enough space available on the ' 'host %(host)s to migrate volume %(volume)s, ' 'free space: %(free)d, required: %(size)d', {'host': host, 'volume': volume['name'], 'free': capabilities['free_capacity_gb'], 'size': volume['size']}) return false_ret src_path = self._get_volume_path(volume) dst_path = posixpath.join(dst_root, volume['name']) nef_ips = capabilities['nef_url'].split(',') nef_ips.append(None) svc = 'cinder-migrate-%s' % volume['name'] for nef_ip in nef_ips: payload = {'name': svc, 'sourceDataset': src_path, 'destinationDataset': dst_path, 'type': 'scheduled', 'sendShareNfs': True} if nef_ip is not None: payload['isSource'] = True payload['remoteNode'] = { 'host': nef_ip, 'port': capabilities['nef_port'] } try: self.nef.hpr.create(payload) break except jsonrpc.NefException as error: if nef_ip is None or error.code not in ('EINVAL', 'ENOENT'): LOG.error('Failed to create replication ' 'service %(payload)s: %(error)s', {'payload': payload, 'error': error}) return false_ret try: self.nef.hpr.start(svc) except jsonrpc.NefException as error: LOG.error('Failed to start replication ' 'service %(svc)s: %(error)s', {'svc': svc, 'error': error}) try: payload = {'force': True} self.nef.hpr.delete(svc, payload) except jsonrpc.NefException as error: LOG.error('Failed to delete replication ' 'service %(svc)s: %(error)s', {'svc': svc, 'error': error}) return false_ret payload = {'destroySourceSnapshots': True, 'destroyDestinationSnapshots': True} progress = True retry = 0 while progress: retry += 1 hpr = self.nef.hpr.get(svc) state = hpr['state'] if state == 'disabled': progress = False elif state == 'enabled': self.nef.delay(retry) else: self.nef.hpr.delete(svc, payload) return false_ret self.nef.hpr.delete(svc, payload) try: self.delete_volume(volume) except jsonrpc.NefException as error: LOG.debug('Failed to delete source volume %(volume)s: %(error)s', {'volume': volume['name'], 'error': error}) return True, None def terminate_connection(self, volume, connector, **kwargs): """Terminate a connection to a volume. :param volume: a volume object :param connector: a connector object :returns: dictionary of connection information """ LOG.debug('Terminate volume connection for %(volume)s', {'volume': volume['name']}) self._unmount_volume(volume) def initialize_connection(self, volume, connector): """Terminate a connection to a volume. :param volume: a volume object :param connector: a connector object :returns: dictionary of connection information """ LOG.debug('Initialize volume connection for %(volume)s', {'volume': volume['name']}) share = self._get_volume_share(volume) return { 'driver_volume_type': self.driver_volume_type, 'mount_point_base': self.mount_point_base, 'data': { 'export': share, 'name': 'volume' } } def ensure_export(self, context, volume): """Synchronously recreate an export for a volume.""" pass @coordination.synchronized('{self.nef.lock}') def delete_volume(self, volume): """Deletes a volume. :param volume: volume reference """ volume_path = self._get_volume_path(volume) self._unmount_volume(volume) delete_payload = {'force': True, 'snapshots': True} try: self.nef.filesystems.delete(volume_path, delete_payload) except jsonrpc.NefException as error: if error.code != 'EEXIST': raise snapshots_tree = {} snapshots_payload = {'parent': volume_path, 'fields': 'path'} snapshots = self.nef.snapshots.list(snapshots_payload) for snapshot in snapshots: clones_payload = {'fields': 'clones,creationTxg'} data = self.nef.snapshots.get(snapshot['path'], clones_payload) if data['clones']: snapshots_tree[data['creationTxg']] = data['clones'][0] if snapshots_tree: clone_path = snapshots_tree[max(snapshots_tree)] self.nef.filesystems.promote(clone_path) self.nef.filesystems.delete(volume_path, delete_payload) def _delete(self, path): """Override parent method for safe remove mountpoint.""" try: os.rmdir(path) LOG.debug('The mountpoint %(path)s has been successfully removed', {'path': path}) except OSError as error: LOG.debug('Failed to remove mountpoint %(path)s: %(error)s', {'path': path, 'error': error.strerror}) def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: volume reference :param new_size: volume new size in GB """ LOG.info('Extend volume %(volume)s, new size: %(size)sGB', {'volume': volume['name'], 'size': new_size}) self._mount_volume(volume) volume_file = self.local_path(volume) if self.sparsed_volumes: self._execute('truncate', '-s', '%dG' % new_size, volume_file, run_as_root=True) else: seek = volume['size'] * units.Ki count = (new_size - volume['size']) * units.Ki self._execute('dd', 'if=/dev/zero', 'of=%s' % volume_file, 'bs=%d' % units.Mi, 'seek=%d' % seek, 'count=%d' % count, run_as_root=True) self._unmount_volume(volume) @coordination.synchronized('{self.nef.lock}') def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: snapshot reference """ snapshot_path = self._get_snapshot_path(snapshot) payload = {'path': snapshot_path} self.nef.snapshots.create(payload) @coordination.synchronized('{self.nef.lock}') def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: snapshot reference """ snapshot_path = self._get_snapshot_path(snapshot) payload = {'defer': True} self.nef.snapshots.delete(snapshot_path, payload) def snapshot_revert_use_temp_snapshot(self): # Considering that NexentaStor based drivers use COW images # for storing snapshots, having chains of such images, # creating a backup snapshot when reverting one is not # actually helpful. return False def revert_to_snapshot(self, context, volume, snapshot): """Revert volume to snapshot.""" volume_path = self._get_volume_path(volume) payload = {'snapshot': snapshot['name']} self.nef.filesystems.rollback(volume_path, payload) @coordination.synchronized('{self.nef.lock}') def create_volume_from_snapshot(self, volume, snapshot): """Create new volume from other's snapshot on appliance. :param volume: reference of volume to be created :param snapshot: reference of source snapshot """ LOG.debug('Create volume %(volume)s from snapshot %(snapshot)s', {'volume': volume['name'], 'snapshot': snapshot['name']}) snapshot_path = self._get_snapshot_path(snapshot) clone_path = self._get_volume_path(volume) payload = {'targetPath': clone_path} self.nef.snapshots.clone(snapshot_path, payload) self._remount_volume(volume) self._set_volume_acl(volume) if volume['size'] > snapshot['volume_size']: new_size = volume['size'] volume['size'] = snapshot['volume_size'] self.extend_volume(volume, new_size) volume['size'] = new_size def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. :param volume: new volume reference :param src_vref: source volume reference """ snapshot = { 'name': self.origin_snapshot_template % volume['id'], 'volume_id': src_vref['id'], 'volume_name': src_vref['name'], 'volume_size': src_vref['size'] } self.create_snapshot(snapshot) try: self.create_volume_from_snapshot(volume, snapshot) except jsonrpc.NefException as error: LOG.debug('Failed to create clone %(clone)s ' 'from volume %(volume)s: %(error)s', {'clone': volume['name'], 'volume': src_vref['name'], 'error': error}) raise finally: try: self.delete_snapshot(snapshot) except jsonrpc.NefException as error: LOG.debug('Failed to delete temporary snapshot ' '%(volume)s@%(snapshot)s: %(error)s', {'volume': src_vref['name'], 'snapshot': snapshot['name'], 'error': error}) def create_consistencygroup(self, context, group): """Creates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :returns: group_model_update """ group_model_update = {} return group_model_update def create_group(self, context, group): """Creates a group. :param context: the context of the caller. :param group: the group object. :returns: model_update """ return self.create_consistencygroup(context, group) def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be deleted. :param volumes: a list of volume dictionaries in the group. :returns: group_model_update, volumes_model_update """ group_model_update = {} volumes_model_update = [] for volume in volumes: self.delete_volume(volume) return group_model_update, volumes_model_update def delete_group(self, context, group, volumes): """Deletes a group. :param context: the context of the caller. :param group: the group object. :param volumes: a list of volume objects in the group. :returns: model_update, volumes_model_update """ return self.delete_consistencygroup(context, group, volumes) def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): """Updates a consistency group. :param context: the context of the caller. :param group: the dictionary of the consistency group to be updated. :param add_volumes: a list of volume dictionaries to be added. :param remove_volumes: a list of volume dictionaries to be removed. :returns: group_model_update, add_volumes_update, remove_volumes_update """ group_model_update = {} add_volumes_update = [] remove_volumes_update = [] return group_model_update, add_volumes_update, remove_volumes_update def update_group(self, context, group, add_volumes=None, remove_volumes=None): """Updates a group. :param context: the context of the caller. :param group: the group object. :param add_volumes: a list of volume objects to be added. :param remove_volumes: a list of volume objects to be removed. :returns: model_update, add_volumes_update, remove_volumes_update """ return self.update_consistencygroup(context, group, add_volumes, remove_volumes) def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a consistency group snapshot. :param context: the context of the caller. :param cgsnapshot: the dictionary of the cgsnapshot to be created. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :returns: group_model_update, snapshots_model_update """ group_model_update = {} snapshots_model_update = [] cgsnapshot_name = self.group_snapshot_template % cgsnapshot['id'] cgsnapshot_path = '%s@%s' % (self.root_path, cgsnapshot_name) create_payload = {'path': cgsnapshot_path, 'recursive': True} self.nef.snapshots.create(create_payload) for snapshot in snapshots: volume_name = snapshot['volume_name'] volume_path = posixpath.join(self.root_path, volume_name) snapshot_name = snapshot['name'] snapshot_path = '%s@%s' % (volume_path, cgsnapshot_name) rename_payload = {'newName': snapshot_name} self.nef.snapshots.rename(snapshot_path, rename_payload) delete_payload = {'defer': True, 'recursive': True} self.nef.snapshots.delete(cgsnapshot_path, delete_payload) return group_model_update, snapshots_model_update def create_group_snapshot(self, context, group_snapshot, snapshots): """Creates a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be created. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ return self.create_cgsnapshot(context, group_snapshot, snapshots) def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a consistency group snapshot. :param context: the context of the caller. :param cgsnapshot: the dictionary of the cgsnapshot to be created. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :returns: group_model_update, snapshots_model_update """ group_model_update = {} snapshots_model_update = [] for snapshot in snapshots: self.delete_snapshot(snapshot) return group_model_update, snapshots_model_update def delete_group_snapshot(self, context, group_snapshot, snapshots): """Deletes a group_snapshot. :param context: the context of the caller. :param group_snapshot: the GroupSnapshot object to be deleted. :param snapshots: a list of snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ return self.delete_cgsnapshot(context, group_snapshot, snapshots) def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None): """Creates a consistency group from source. :param context: the context of the caller. :param group: the dictionary of the consistency group to be created. :param volumes: a list of volume dictionaries in the group. :param cgsnapshot: the dictionary of the cgsnapshot as source. :param snapshots: a list of snapshot dictionaries in the cgsnapshot. :param source_cg: the dictionary of a consistency group as source. :param source_vols: a list of volume dictionaries in the source_cg. :returns: group_model_update, volumes_model_update """ group_model_update = {} volumes_model_update = [] if cgsnapshot and snapshots: for volume, snapshot in zip(volumes, snapshots): self.create_volume_from_snapshot(volume, snapshot) elif source_cg and source_vols: snapshot_name = self.origin_snapshot_template % group['id'] snapshot_path = '%s@%s' % (self.root_path, snapshot_name) create_payload = {'path': snapshot_path, 'recursive': True} self.nef.snapshots.create(create_payload) for volume, source_vol in zip(volumes, source_vols): snapshot = { 'name': snapshot_name, 'volume_id': source_vol['id'], 'volume_name': source_vol['name'], 'volume_size': source_vol['size'] } self.create_volume_from_snapshot(volume, snapshot) delete_payload = {'defer': True, 'recursive': True} self.nef.snapshots.delete(snapshot_path, delete_payload) return group_model_update, volumes_model_update def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param context: the context of the caller. :param group: the Group object to be created. :param volumes: a list of Volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of snapshot objects in group_snapshot. :param source_group: the Group object as source. :param source_vols: a list of volume objects in the source_group. :returns: model_update, volumes_model_update """ return self.create_consistencygroup_from_src(context, group, volumes, group_snapshot, snapshots, source_group, source_vols) def _local_volume_dir(self, volume): """Get volume dir (mounted locally fs path) for given volume. :param volume: volume reference """ share = self._get_volume_share(volume) if isinstance(share, str): share = share.encode('utf-8') path = hashlib.md5(share, usedforsecurity=False).hexdigest() return os.path.join(self.mount_point_base, path) def local_path(self, volume): """Get volume path (mounted locally fs path) for given volume. :param volume: volume reference """ volume_dir = self._local_volume_dir(volume) return os.path.join(volume_dir, 'volume') def _set_volume_acl(self, volume): """Sets access permissions for given volume. :param volume: volume reference """ volume_path = self._get_volume_path(volume) payload = { 'type': 'allow', 'principal': 'everyone@', 'permissions': [ 'full_set' ], 'flags': [ 'file_inherit', 'dir_inherit' ] } self.nef.filesystems.acl(volume_path, payload) def _get_volume_share(self, volume): """Return NFS share path for the volume.""" volume_path = self._get_volume_path(volume) payload = {'fields': 'mountPoint'} filesystem = self.nef.filesystems.get(volume_path, payload) return '%s:%s' % (self.nas_host, filesystem['mountPoint']) def _get_volume_path(self, volume): """Return ZFS dataset path for the volume.""" return posixpath.join(self.root_path, volume['name']) def _get_snapshot_path(self, snapshot): """Return ZFS snapshot path for the snapshot.""" volume_name = snapshot['volume_name'] snapshot_name = snapshot['name'] volume_path = posixpath.join(self.root_path, volume_name) return '%s@%s' % (volume_path, snapshot_name) def _update_volume_stats(self): """Retrieve stats info for NexentaStor Appliance.""" LOG.debug('Updating volume backend %(volume_backend_name)s stats', {'volume_backend_name': self.volume_backend_name}) payload = {'fields': 'mountPoint,bytesAvailable,bytesUsed'} dataset = self.nef.filesystems.get(self.root_path, payload) free = dataset['bytesAvailable'] // units.Gi used = dataset['bytesUsed'] // units.Gi total = free + used share = '%s:%s' % (self.nas_host, dataset['mountPoint']) location_info = '%(driver)s:%(share)s' % { 'driver': self.__class__.__name__, 'share': share } self._stats = { 'vendor_name': self.vendor_name, 'dedup': self.deduplicated_volumes, 'compression': self.compressed_volumes, 'description': self.dataset_description, 'nef_url': self.nef.host, 'nef_port': self.nef.port, 'driver_version': self.VERSION, 'storage_protocol': self.storage_protocol, 'sparsed_volumes': self.sparsed_volumes, 'total_capacity_gb': total, 'free_capacity_gb': free, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'multiattach': True, 'consistencygroup_support': True, 'consistent_group_snapshot_enabled': True, 'location_info': location_info, 'volume_backend_name': self.volume_backend_name, 'nfs_mount_point_base': self.mount_point_base } def _get_existing_volume(self, existing_ref): types = { 'source-name': 'path', 'source-guid': 'guid' } if not any(key in types for key in existing_ref): keys = ', '.join(types.keys()) message = (_('Manage existing volume failed ' 'due to invalid backend reference. ' 'Volume reference must contain ' 'at least one valid key: %(keys)s') % {'keys': keys}) raise jsonrpc.NefException(code='EINVAL', message=message) payload = { 'parent': self.root_path, 'fields': 'path', 'recursive': False } for key, value in types.items(): if key in existing_ref: if value == 'path': path = posixpath.join(self.root_path, existing_ref[key]) else: path = existing_ref[key] payload[value] = path existing_volumes = self.nef.filesystems.list(payload) if len(existing_volumes) == 1: volume_path = existing_volumes[0]['path'] volume_name = posixpath.basename(volume_path) existing_volume = { 'name': volume_name, 'path': volume_path } vid = volume_utils.extract_id_from_volume_name(volume_name) if volume_utils.check_already_managed_volume(vid): message = (_('Volume %(name)s already managed') % {'name': volume_name}) raise jsonrpc.NefException(code='EBUSY', message=message) return existing_volume elif not existing_volumes: code = 'ENOENT' reason = _('no matching volumes were found') else: code = 'EINVAL' reason = _('too many volumes were found') message = (_('Unable to manage existing volume by ' 'reference %(reference)s: %(reason)s') % {'reference': existing_ref, 'reason': reason}) raise jsonrpc.NefException(code=code, message=message) def _check_already_managed_snapshot(self, snapshot_id): """Check cinder database for already managed snapshot. :param snapshot_id: snapshot id parameter :returns: return True, if database entry with specified snapshot id exists, otherwise return False """ if not isinstance(snapshot_id, str): return False try: uuid.UUID(snapshot_id, version=4) except ValueError: return False ctxt = context.get_admin_context() return objects.Snapshot.exists(ctxt, snapshot_id) def _get_existing_snapshot(self, snapshot, existing_ref): types = { 'source-name': 'name', 'source-guid': 'guid' } if not any(key in types for key in existing_ref): keys = ', '.join(types.keys()) message = (_('Manage existing snapshot failed ' 'due to invalid backend reference. ' 'Snapshot reference must contain ' 'at least one valid key: %(keys)s') % {'keys': keys}) raise jsonrpc.NefException(code='EINVAL', message=message) volume_name = snapshot['volume_name'] volume_size = snapshot['volume_size'] volume = {'name': volume_name} volume_path = self._get_volume_path(volume) payload = { 'parent': volume_path, 'fields': 'name,path', 'recursive': False } for key, value in types.items(): if key in existing_ref: payload[value] = existing_ref[key] existing_snapshots = self.nef.snapshots.list(payload) if len(existing_snapshots) == 1: name = existing_snapshots[0]['name'] path = existing_snapshots[0]['path'] existing_snapshot = { 'name': name, 'path': path, 'volume_name': volume_name, 'volume_size': volume_size } sid = volume_utils.extract_id_from_snapshot_name(name) if self._check_already_managed_snapshot(sid): message = (_('Snapshot %(name)s already managed') % {'name': name}) raise jsonrpc.NefException(code='EBUSY', message=message) return existing_snapshot elif not existing_snapshots: code = 'ENOENT' reason = _('no matching snapshots were found') else: code = 'EINVAL' reason = _('too many snapshots were found') message = (_('Unable to manage existing snapshot by ' 'reference %(reference)s: %(reason)s') % {'reference': existing_ref, 'reason': reason}) raise jsonrpc.NefException(code=code, message=message) @coordination.synchronized('{self.nef.lock}') def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder volume structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the, volume['name'] which is how drivers traditionally map between a cinder volume and the associated backend storage object. 2. Place some metadata on the volume, or somewhere in the backend, that allows other driver requests (e.g. delete, clone, attach, detach...) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. The volume may have a volume_type, and the driver can inspect that and compare against the properties of the referenced backend storage object. If they are incompatible, raise a ManageExistingVolumeTypeMismatch, specifying a reason for the failure. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ existing_volume = self._get_existing_volume(existing_ref) existing_volume_path = existing_volume['path'] if existing_volume['name'] != volume['name']: volume_path = self._get_volume_path(volume) payload = {'newPath': volume_path} self.nef.filesystems.rename(existing_volume_path, payload) def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume :returns size: Volume size in GiB (integer) """ existing_volume = self._get_existing_volume(existing_ref) self._set_volume_acl(existing_volume) self._mount_volume(existing_volume) local_path = self.local_path(existing_volume) try: volume_size = os.path.getsize(local_path) except OSError as error: code = errno.errorcode[error.errno] message = (_('Manage existing volume %(name)s failed: ' 'unable to get size of volume data file ' '%(file)s: %(error)s') % {'name': existing_volume['name'], 'file': local_path, 'error': error.strerror}) raise jsonrpc.NefException(code=code, message=message) finally: self._unmount_volume(existing_volume) return volume_size // units.Gi def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a volume in the host, with the following keys: - reference (dictionary): The reference for a volume, which can be passed to "manage_existing". - size (int): The size of the volume according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this volume is safe to manage according to the storage backend. For example, is the volume in use or invalid for any reason. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Any extra information to return to the user :param cinder_volumes: A list of volumes in this host that Cinder currently manages, used to determine if a volume is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ manageable_volumes = [] cinder_volume_names = {} for cinder_volume in cinder_volumes: key = cinder_volume['name'] value = cinder_volume['id'] cinder_volume_names[key] = value payload = { 'parent': self.root_path, 'fields': 'guid,parent,path,bytesUsed', 'recursive': False } volumes = self.nef.filesystems.list(payload) for volume in volumes: safe_to_manage = True reason_not_safe = None cinder_id = None extra_info = None path = volume['path'] guid = volume['guid'] parent = volume['parent'] size = volume['bytesUsed'] // units.Gi name = posixpath.basename(path) if path == self.root_path: continue if parent != self.root_path: continue if name in cinder_volume_names: cinder_id = cinder_volume_names[name] safe_to_manage = False reason_not_safe = _('Volume already managed') reference = { 'source-name': name, 'source-guid': guid } manageable_volumes.append({ 'reference': reference, 'size': size, 'safe_to_manage': safe_to_manage, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info }) return volume_utils.paginate_entries_list(manageable_volumes, marker, limit, offset, sort_keys, sort_dirs) def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param volume: Cinder volume to unmanage """ pass @coordination.synchronized('{self.nef.lock}') def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. existing_ref is passed straight through from the API request's manage_existing_ref value, and it is up to the driver how this should be interpreted. It should be sufficient to identify a storage object that the driver should somehow associate with the newly-created cinder snapshot structure. There are two ways to do this: 1. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. 2. Place some metadata on the snapshot, or somewhere in the backend, that allows other driver requests (e.g. delete) to locate the backend storage object when required. If the existing_ref doesn't make sense, or doesn't refer to an existing backend storage object, raise a ManageExistingInvalidReference exception. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot """ existing_snapshot = self._get_existing_snapshot(snapshot, existing_ref) existing_snapshot_path = existing_snapshot['path'] if existing_snapshot['name'] != snapshot['name']: payload = {'newName': snapshot['name']} self.nef.snapshots.rename(existing_snapshot_path, payload) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. When calculating the size, round up to the next GB. :param snapshot: Cinder volume snapshot to manage :param existing_ref: Driver-specific information used to identify a volume snapshot :returns size: Volume snapshot size in GiB (integer) """ existing_snapshot = self._get_existing_snapshot(snapshot, existing_ref) return existing_snapshot['volume_size'] def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the backend available for management by Cinder. Returns a list of dictionaries, each specifying a snapshot in the host, with the following keys: - reference (dictionary): The reference for a snapshot, which can be passed to "manage_existing_snapshot". - size (int): The size of the snapshot according to the storage backend, rounded up to the nearest GB. - safe_to_manage (boolean): Whether or not this snapshot is safe to manage according to the storage backend. For example, is the snapshot in use or invalid for any reason. - reason_not_safe (string): If safe_to_manage is False, the reason why. - cinder_id (string): If already managed, provide the Cinder ID. - extra_info (string): Any extra information to return to the user - source_reference (string): Similar to "reference", but for the snapshot's source volume. :param cinder_snapshots: A list of snapshots in this host that Cinder currently manages, used to determine if a snapshot is manageable or not. :param marker: The last item of the previous page; we return the next results after this value (after sorting) :param limit: Maximum number of items to return :param offset: Number of items to skip after marker :param sort_keys: List of keys to sort results by (valid keys are 'identifier' and 'size') :param sort_dirs: List of directions to sort by, corresponding to sort_keys (valid directions are 'asc' and 'desc') """ manageable_snapshots = [] cinder_volume_names = {} cinder_snapshot_names = {} ctxt = context.get_admin_context() cinder_volumes = objects.VolumeList.get_all_by_host(ctxt, self.host) for cinder_volume in cinder_volumes: key = self._get_volume_path(cinder_volume) value = { 'name': cinder_volume['name'], 'size': cinder_volume['size'] } cinder_volume_names[key] = value for cinder_snapshot in cinder_snapshots: key = cinder_snapshot['name'] value = { 'id': cinder_snapshot['id'], 'size': cinder_snapshot['volume_size'], 'parent': cinder_snapshot['volume_name'] } cinder_snapshot_names[key] = value payload = { 'parent': self.root_path, 'fields': 'name,guid,path,parent,hprService,snaplistId', 'recursive': True } snapshots = self.nef.snapshots.list(payload) for snapshot in snapshots: safe_to_manage = True reason_not_safe = None cinder_id = None extra_info = None name = snapshot['name'] guid = snapshot['guid'] path = snapshot['path'] parent = snapshot['parent'] if parent not in cinder_volume_names: LOG.debug('Skip snapshot %(path)s: parent ' 'volume %(parent)s is unmanaged', {'path': path, 'parent': parent}) continue if name.startswith(self.origin_snapshot_template): LOG.debug('Skip temporary origin snapshot %(path)s', {'path': path}) continue if name.startswith(self.group_snapshot_template): LOG.debug('Skip temporary group snapshot %(path)s', {'path': path}) continue if snapshot['hprService'] or snapshot['snaplistId']: LOG.debug('Skip HPR/snapping snapshot %(path)s', {'path': path}) continue if name in cinder_snapshot_names: size = cinder_snapshot_names[name]['size'] cinder_id = cinder_snapshot_names[name]['id'] safe_to_manage = False reason_not_safe = _('Snapshot already managed') else: size = cinder_volume_names[parent]['size'] payload = {'fields': 'clones'} props = self.nef.snapshots.get(path) clones = props['clones'] unmanaged_clones = [] for clone in clones: if clone not in cinder_volume_names: unmanaged_clones.append(clone) if unmanaged_clones: safe_to_manage = False dependent_clones = ', '.join(unmanaged_clones) reason_not_safe = (_('Snapshot has unmanaged ' 'dependent clone(s) %(clones)s') % {'clones': dependent_clones}) reference = { 'source-name': name, 'source-guid': guid } source_reference = { 'name': cinder_volume_names[parent]['name'] } manageable_snapshots.append({ 'reference': reference, 'size': size, 'safe_to_manage': safe_to_manage, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': extra_info, 'source_reference': source_reference }) return volume_utils.paginate_entries_list(manageable_snapshots, marker, limit, offset, sort_keys, sort_dirs) def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. For most drivers, this will not need to do anything. However, some drivers might use this call as an opportunity to clean up any Cinder-specific configuration that they have associated with the backend storage object. :param snapshot: Cinder volume snapshot to unmanage """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nexenta/options.py0000664000175000017500000002607100000000000023035 0ustar00zuulzuul00000000000000# Copyright 2019 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from cinder.volume import configuration as conf DEFAULT_ISCSI_PORT = 3260 DEFAULT_HOST_GROUP = 'all' DEFAULT_TARGET_GROUP = 'all' NEXENTA_EDGE_OPTS = [ cfg.StrOpt('nexenta_nbd_symlinks_dir', default='/dev/disk/by-path', help='NexentaEdge logical path of directory to store symbolic ' 'links to NBDs'), cfg.StrOpt('nexenta_rest_user', default='admin', help='User name to connect to NexentaEdge.'), cfg.StrOpt('nexenta_rest_password', default='nexenta', help='Password to connect to NexentaEdge.', secret=True), cfg.StrOpt('nexenta_lun_container', default='', help='NexentaEdge logical path of bucket for LUNs'), cfg.StrOpt('nexenta_iscsi_service', default='', help='NexentaEdge iSCSI service name'), cfg.StrOpt('nexenta_client_address', deprecated_for_removal=True, deprecated_reason='iSCSI target address should now be set using' ' the common param target_ip_address.', default='', help='NexentaEdge iSCSI Gateway client ' 'address for non-VIP service'), cfg.IntOpt('nexenta_iops_limit', default=0, help='NexentaEdge iSCSI LUN object IOPS limit'), cfg.IntOpt('nexenta_chunksize', default=32768, help='NexentaEdge iSCSI LUN object chunk size'), cfg.IntOpt('nexenta_replication_count', default=3, help='NexentaEdge iSCSI LUN object replication count.'), cfg.BoolOpt('nexenta_encryption', default=False, help='Defines whether NexentaEdge iSCSI LUN object ' 'has encryption enabled.', deprecated_for_removal=True, deprecated_since="2024.2", deprecated_reason="Unused option") ] NEXENTA_CONNECTION_OPTS = [ cfg.StrOpt('nexenta_host', default='', help='IP address of NexentaStor Appliance'), cfg.StrOpt('nexenta_rest_address', deprecated_for_removal=True, deprecated_reason='Rest address should now be set using ' 'the common param depending on driver type, ' 'san_ip or nas_host', default='', help='IP address of NexentaStor management REST API endpoint'), cfg.IntOpt('nexenta_rest_port', deprecated_for_removal=True, deprecated_reason='Rest address should now be set using ' 'the common param san_api_port.', default=0, help='HTTP(S) port to connect to NexentaStor management ' 'REST API server. If it is equal zero, 8443 for ' 'HTTPS and 8080 for HTTP is used'), cfg.StrOpt('nexenta_rest_protocol', default='auto', choices=['http', 'https', 'auto'], help='Use http or https for NexentaStor management ' 'REST API connection (default auto)'), cfg.FloatOpt('nexenta_rest_connect_timeout', default=30, help='Specifies the time limit (in seconds), within ' 'which the connection to NexentaStor management ' 'REST API server must be established'), cfg.FloatOpt('nexenta_rest_read_timeout', default=300, help='Specifies the time limit (in seconds), ' 'within which NexentaStor management ' 'REST API server must send a response'), cfg.FloatOpt('nexenta_rest_backoff_factor', default=0.5, help='Specifies the backoff factor to apply ' 'between connection attempts to NexentaStor ' 'management REST API server'), cfg.IntOpt('nexenta_rest_retry_count', default=3, help='Specifies the number of times to repeat NexentaStor ' 'management REST API call in case of connection errors ' 'and NexentaStor appliance EBUSY or ENOENT errors'), cfg.BoolOpt('nexenta_use_https', default=True, help='Use HTTP secure protocol for NexentaStor ' 'management REST API connections'), cfg.BoolOpt('nexenta_lu_writebackcache_disabled', default=False, help='Postponed write to backing store or not'), cfg.StrOpt('nexenta_user', deprecated_for_removal=True, deprecated_reason='Common user parameters should be used ' 'depending on the driver type: ' 'san_login or nas_login', default='admin', help='User name to connect to NexentaStor ' 'management REST API server'), cfg.StrOpt('nexenta_password', deprecated_for_removal=True, deprecated_reason='Common password parameters should be used ' 'depending on the driver type: ' 'san_password or nas_password', default='nexenta', help='Password to connect to NexentaStor ' 'management REST API server', secret=True) ] NEXENTA_ISCSI_OPTS = [ cfg.StrOpt('nexenta_iscsi_target_portal_groups', default='', help='NexentaStor target portal groups'), cfg.StrOpt('nexenta_iscsi_target_portals', default='', help='Comma separated list of portals for NexentaStor5, in ' 'format of IP1:port1,IP2:port2. Port is optional, ' 'default=3260. Example: 10.10.10.1:3267,10.10.1.2'), cfg.StrOpt('nexenta_iscsi_target_host_group', default='all', help='Group of hosts which are allowed to access volumes'), cfg.IntOpt('nexenta_iscsi_target_portal_port', default=3260, help='Nexenta appliance iSCSI target portal port'), cfg.IntOpt('nexenta_luns_per_target', default=100, help='Amount of LUNs per iSCSI target'), cfg.StrOpt('nexenta_volume', default='cinder', help='NexentaStor pool name that holds all volumes'), cfg.StrOpt('nexenta_target_prefix', default='iqn.1986-03.com.sun:02:cinder', help='iqn prefix for NexentaStor iSCSI targets'), cfg.StrOpt('nexenta_target_group_prefix', default='cinder', help='Prefix for iSCSI target groups on NexentaStor'), cfg.StrOpt('nexenta_host_group_prefix', default='cinder', help='Prefix for iSCSI host groups on NexentaStor'), cfg.StrOpt('nexenta_volume_group', default='iscsi', help='Volume group for NexentaStor5 iSCSI'), ] NEXENTA_NFS_OPTS = [ cfg.StrOpt('nexenta_shares_config', default='/etc/cinder/nfs_shares', help='File with the list of available nfs shares'), cfg.StrOpt('nexenta_mount_point_base', default='$state_path/mnt', help='Base directory that contains NFS share mount points'), cfg.BoolOpt('nexenta_sparsed_volumes', default=True, help='Enables or disables the creation of volumes as ' 'sparsed files that take no space. If disabled ' '(False), volume is created as a regular file, ' 'which takes a long time.'), cfg.BoolOpt('nexenta_qcow2_volumes', default=False, help='Create volumes as QCOW2 files rather than raw files'), cfg.BoolOpt('nexenta_nms_cache_volroot', default=True, help=('If set True cache NexentaStor appliance volroot option ' 'value.')) ] NEXENTA_DATASET_OPTS = [ cfg.StrOpt('nexenta_dataset_compression', default='on', choices=['on', 'off', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lzjb', 'zle', 'lz4'], help='Compression value for new ZFS folders.'), cfg.StrOpt('nexenta_dataset_dedup', default='off', choices=['on', 'off', 'sha256', 'verify', 'sha256, verify'], help='Deduplication value for new ZFS folders.'), cfg.StrOpt('nexenta_folder', default='', help='A folder where cinder created datasets will reside.'), cfg.StrOpt('nexenta_dataset_description', default='', help='Human-readable description for the folder.'), cfg.IntOpt('nexenta_blocksize', default=4096, help='Block size for datasets'), cfg.IntOpt('nexenta_ns5_blocksize', default=32, help='Block size for datasets'), cfg.BoolOpt('nexenta_sparse', default=False, help='Enables or disables the creation of sparse datasets'), cfg.StrOpt('nexenta_origin_snapshot_template', default='origin-snapshot-%s', help='Template string to generate origin name of clone'), cfg.StrOpt('nexenta_group_snapshot_template', default='group-snapshot-%s', help='Template string to generate group snapshot name') ] NEXENTA_RRMGR_OPTS = [ cfg.IntOpt('nexenta_rrmgr_compression', default=0, help=('Enable stream compression, level 1..9. 1 - gives best ' 'speed; 9 - gives best compression.')), cfg.IntOpt('nexenta_rrmgr_tcp_buf_size', default=4096, help='TCP Buffer size in KiloBytes.'), cfg.IntOpt('nexenta_rrmgr_connections', default=2, help='Number of TCP connections.'), ] CONF = cfg.CONF CONF.register_opts(NEXENTA_CONNECTION_OPTS, group=conf.SHARED_CONF_GROUP) CONF.register_opts(NEXENTA_ISCSI_OPTS, group=conf.SHARED_CONF_GROUP) CONF.register_opts(NEXENTA_DATASET_OPTS, group=conf.SHARED_CONF_GROUP) CONF.register_opts(NEXENTA_NFS_OPTS, group=conf.SHARED_CONF_GROUP) CONF.register_opts(NEXENTA_RRMGR_OPTS, group=conf.SHARED_CONF_GROUP) CONF.register_opts(NEXENTA_EDGE_OPTS, group=conf.SHARED_CONF_GROUP) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nexenta/utils.py0000664000175000017500000000725700000000000022507 0ustar00zuulzuul00000000000000# Copyright 2018 Nexenta Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from urllib import parse as urlparse from oslo_utils import units from cinder import exception from cinder.i18n import _ class NexentaException(exception.VolumeDriverException): message = "%(reason)s" def str2size(s, scale=1024): """Convert size-string. String format: [:space:] to bytes. :param s: size-string :param scale: base size """ if not s: return 0 if isinstance(s, int): return s match = re.match(r'^([\.\d]+)\s*([BbKkMmGgTtPpEeZzYy]?)', s) if match is None: raise ValueError(_('Invalid value: %(value)s') % {'value': s}) groups = match.groups() value = float(groups[0]) suffix = groups[1].upper() if groups[1] else 'B' types = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') for i, t in enumerate(types): if suffix == t: return int(value * pow(scale, i)) def str2gib_size(s): """Covert size-string to size in gigabytes.""" size_in_bytes = str2size(s) return size_in_bytes // units.Gi def get_rrmgr_cmd(src, dst, compression=None, tcp_buf_size=None, connections=None): """Returns rrmgr command for source and destination.""" cmd = ['rrmgr', '-s', 'zfs'] if compression: cmd.extend(['-c', str(compression)]) cmd.append('-q') cmd.append('-e') if tcp_buf_size: cmd.extend(['-w', str(tcp_buf_size)]) if connections: cmd.extend(['-n', str(connections)]) cmd.extend([src, dst]) return ' '.join(cmd) def parse_nms_url(url): """Parse NMS url into normalized parts like scheme, user, host and others. Example NMS URL: auto://admin:nexenta@192.168.1.1:2000/ NMS URL parts: .. code-block:: none auto True if url starts with auto://, protocol will be automatically switched to https if http not supported; scheme (auto) connection protocol (http or https); user (admin) NMS user; password (nexenta) NMS password; host (192.168.1.1) NMS host; port (2000) NMS port. :param url: url string :return: tuple (auto, scheme, user, password, host, port, path) """ pr = urlparse.urlparse(url) scheme = pr.scheme auto = scheme == 'auto' if auto: scheme = 'http' user = 'admin' password = 'nexenta' if '@' not in pr.netloc: host_and_port = pr.netloc else: user_and_password, host_and_port = pr.netloc.split('@', 1) if ':' in user_and_password: user, password = user_and_password.split(':') else: user = user_and_password if ':' in host_and_port: host, port = host_and_port.split(':', 1) else: host, port = host_and_port, '2000' return auto, scheme, user, password, host, port, '/rest/nms/' def get_migrate_snapshot_name(volume): """Return name for snapshot that will be used to migrate the volume.""" return 'cinder-migrate-snapshot-%(id)s' % volume ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/nfs.py0000664000175000017500000007402000000000000020463 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NetApp, Inc. # Copyright (c) 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import errno import os import tempfile import time import typing from castellan import key_manager from os_brick.remotefs import remotefs as remotefs_brick from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from cinder import context from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import objects from cinder import utils from cinder.volume import configuration from cinder.volume.drivers import remotefs from cinder.volume import volume_utils VERSION = '1.4.0' LOG = logging.getLogger(__name__) nfs_opts = [ cfg.StrOpt('nfs_shares_config', default='/etc/cinder/nfs_shares', help='File with the list of available NFS shares.'), cfg.BoolOpt('nfs_sparsed_volumes', default=True, help='Create volumes as sparsed files which take no space. ' 'If set to False volume is created as regular file. ' 'In such case volume creation takes a lot of time.'), cfg.BoolOpt('nfs_qcow2_volumes', default=False, help='Create volumes as QCOW2 files rather than raw files.'), cfg.StrOpt('nfs_mount_point_base', default='$state_path/mnt', help='Base dir containing mount points for NFS shares.'), cfg.StrOpt('nfs_mount_options', help='Mount options passed to the NFS client. See the NFS(5) ' 'man page for details.'), cfg.IntOpt('nfs_mount_attempts', default=3, help='The number of attempts to mount NFS shares before ' 'raising an error. At least one attempt will be ' 'made to mount an NFS share, regardless of the ' 'value specified.'), cfg.BoolOpt('nfs_snapshot_support', default=False, help='Enable support for snapshots on the NFS driver. ' 'Platforms using libvirt <1.2.7 will encounter issues ' 'with this feature.'), ] CONF = cfg.CONF CONF.register_opts(nfs_opts, group=configuration.SHARED_CONF_GROUP) @interface.volumedriver class NfsDriver(remotefs.RemoteFSSnapDriverDistributed): """NFS based cinder driver. Creates file on NFS share for using it as block device on hypervisor. """ driver_volume_type = 'nfs' driver_prefix = 'nfs' volume_backend_name = 'Generic_NFS' VERSION = VERSION # ThirdPartySystems wiki page CI_WIKI_NAME = "Cinder_Jenkins" def __init__(self, execute=putils.execute, *args, **kwargs): self._remotefsclient = None super(NfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(nfs_opts) root_helper = utils.get_root_helper() # base bound to instance is used in RemoteFsConnector. base = getattr(self.configuration, 'nfs_mount_point_base') self.base = os.path.realpath(base) opts = getattr(self.configuration, 'nfs_mount_options') nas_mount_options = getattr(self.configuration, 'nas_mount_options', None) if nas_mount_options is not None: LOG.debug('overriding nfs_mount_options with nas_mount_options') opts = nas_mount_options self._remotefsclient = remotefs_brick.RemoteFsClient( 'nfs', root_helper, execute=execute, nfs_mount_point_base=self.base, nfs_mount_options=opts) supports_auto_mosr = kwargs.get('supports_auto_mosr', False) self._sparse_copy_volume_data = True self.reserved_percentage = self.configuration.reserved_percentage self.max_over_subscription_ratio = ( volume_utils.get_max_over_subscription_ratio( self.configuration.max_over_subscription_ratio, supports_auto=supports_auto_mosr)) self._supports_encryption = True @staticmethod def get_driver_options(): return nfs_opts + remotefs.nas_opts def initialize_connection(self, volume, connector): LOG.debug('Initializing connection to volume %(vol)s. ' 'Connector: %(con)s', {'vol': volume.id, 'con': connector}) active_vol = self.get_active_image_from_info(volume) volume_dir = self._local_volume_dir(volume) path_to_vol = os.path.join(volume_dir, active_vol) info = self._qemu_img_info(path_to_vol, volume['name']) data = {'export': volume.provider_location, 'name': active_vol} if volume.provider_location in self.shares: data['options'] = self.shares[volume.provider_location] conn_info = { 'driver_volume_type': self.driver_volume_type, 'data': data, 'mount_point_base': self._get_mount_point_base() } # Test file for raw vs. qcow2 format if info.file_format not in ['raw', 'qcow2']: msg = _('nfs volume must be a valid raw or qcow2 image.') raise exception.InvalidVolume(reason=msg) # Test if the size is accurate or if something tried to modify it if info.virtual_size != volume.size * units.Gi: LOG.error('The volume virtual_size does not match the size in ' 'cinder, aborting as we suspect an exploit. ' 'Virtual Size is %(vsize)s and real size is %(size)s', {'vsize': info.virtual_size, 'size': volume.size}) msg = _('The volume virtual_size does not match the size in ' 'cinder, aborting as we suspect an exploit.') raise exception.InvalidVolume(reason=msg) conn_info['data']['format'] = info.file_format LOG.debug('NfsDriver: conn_info: %s', conn_info) return conn_info def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(NfsDriver, self).do_setup(context) nas_host = getattr(self.configuration, 'nas_host', None) nas_share_path = getattr(self.configuration, 'nas_share_path', None) # If both nas_host and nas_share_path are set we are not # going to use the nfs_shares_config file. So, only check # for its existence if it is going to be used. if (not nas_host) or (not nas_share_path): config = self.configuration.nfs_shares_config if not config: msg = (_("There's no NFS config file configured (%s)") % 'nfs_shares_config') LOG.warning(msg) raise exception.NfsException(msg) if not os.path.exists(config): msg = (_("NFS config file at %(config)s doesn't exist") % {'config': config}) LOG.warning(msg) raise exception.NfsException(msg) self.shares = {} # address : options # Check if mount.nfs is installed on this system; note that we # need to be root, to also find mount.nfs on distributions, where # it is not located in an unprivileged users PATH (e.g. /sbin). package = 'mount.nfs' try: self._execute(package, check_exit_code=False, run_as_root=True) except OSError as exc: if exc.errno == errno.ENOENT: msg = _('%s is not installed') % package raise exception.NfsException(msg) else: raise # Now that all configuration data has been loaded (shares), # we can "set" our final NAS file security options. self.set_nas_security_options(self._is_voldb_empty_at_startup) self._check_snapshot_support(setup_checking=True) def _ensure_share_mounted(self, nfs_share): mnt_flags = [] if self.shares.get(nfs_share) is not None: mnt_flags = self.shares[nfs_share].split() num_attempts = max(1, self.configuration.nfs_mount_attempts) for attempt in range(num_attempts): try: self._remotefsclient.mount(nfs_share, mnt_flags) return except Exception as e: if attempt == (num_attempts - 1): LOG.error('Mount failure for %(share)s after ' '%(count)d attempts.', {'share': nfs_share, 'count': num_attempts}) raise exception.NfsException(str(e)) LOG.debug('Mount attempt %(attempt)d failed: %(exc)s.\n' 'Retrying mount ...', {'attempt': attempt, 'exc': e}) time.sleep(1) def _find_share(self, volume): """Choose NFS share among available ones for given volume size. For instances with more than one share that meets the criteria, the share with the least "allocated" space will be selected. :param volume: the volume to be created. """ if not self._mounted_shares: raise exception.NfsNoSharesMounted() target_share = None target_share_reserved = 0 for nfs_share in self._mounted_shares: total_size, total_available, total_allocated = ( self._get_capacity_info(nfs_share)) share_info = {'total_size': total_size, 'total_available': total_available, 'total_allocated': total_allocated, } if not self._is_share_eligible(nfs_share, volume.size, share_info): continue if target_share is not None: if target_share_reserved > total_allocated: target_share = nfs_share target_share_reserved = total_allocated else: target_share = nfs_share target_share_reserved = total_allocated if target_share is None: raise exception.NfsNoSuitableShareFound( volume_size=volume.size) LOG.debug('Selected %s as target NFS share.', target_share) return target_share def _is_share_eligible(self, nfs_share, volume_size_in_gib, share_info=None): """Verifies NFS share is eligible to host volume with given size. First validation step: ratio of actual space (used_space / total_space) is less than used_ratio. Second validation step: apparent space allocated (differs from actual space used when using sparse files) and compares the apparent available space (total_available * oversub_ratio) to ensure enough space is available for the new volume. :param nfs_share: NFS share :param volume_size_in_gib: int size in GB """ # Because the generic NFS driver aggregates over all shares # when reporting capacity and usage stats to the scheduler, # we still have to perform some scheduler-like capacity # checks here, and these have to take into account # configuration for reserved space and oversubscription. # It would be better to do all this in the scheduler, but # this requires either pool support for the generic NFS # driver or limiting each NFS backend driver to a single share. # derive used_ratio from reserved percentage if share_info is None: total_size, total_available, total_allocated = ( self._get_capacity_info(nfs_share)) share_info = {'total_size': total_size, 'total_available': total_available, 'total_allocated': total_allocated, } used_percentage = 100 - self.reserved_percentage used_ratio = used_percentage / 100.0 requested_volume_size = volume_size_in_gib * units.Gi apparent_size = max(0, share_info['total_size'] * self.max_over_subscription_ratio) apparent_available = max(0, apparent_size - share_info['total_allocated']) actual_used_ratio = ((share_info['total_size'] - share_info['total_available']) / float(share_info['total_size'])) if actual_used_ratio > used_ratio: # NOTE(morganfainberg): We check the used_ratio first since # with oversubscription it is possible to not have the actual # available space but be within our oversubscription limit # therefore allowing this share to still be selected as a valid # target. LOG.debug('%s is not eligible - used ratio exceeded.', nfs_share) return False if apparent_available <= requested_volume_size: LOG.debug('%s is not eligible - insufficient (apparent) available ' 'space.', nfs_share) return False if share_info['total_allocated'] / share_info['total_size'] >= ( self.max_over_subscription_ratio): LOG.debug('%s is not eligible - utilization exceeds max ' 'over subscription ratio.', nfs_share) return False return True def _get_mount_point_for_share(self, nfs_share): """Needed by parent class.""" return self._remotefsclient.get_mount_point(nfs_share) def _get_capacity_info(self, nfs_share): """Calculate available space on the NFS share. :param nfs_share: example 172.18.194.100:/var/nfs """ mount_point = self._get_mount_point_for_share(nfs_share) df, _ = self._execute('stat', '-f', '-c', '%S %b %a', mount_point, run_as_root=self._execute_as_root) block_size, blocks_total, blocks_avail = map(float, df.split()) total_available = block_size * blocks_avail total_size = block_size * blocks_total du, _ = self._execute('du', '-sb', '--apparent-size', '--exclude', '*snapshot*', mount_point, run_as_root=self._execute_as_root) total_allocated = float(du.split()[0]) return total_size, total_available, total_allocated def _get_mount_point_base(self): return self.base def extend_volume(self, volume, new_size): """Extend an existing volume to the new size.""" if self._is_volume_attached(volume): # NOTE(kaisers): no attached extensions until #1870367 is fixed msg = (_("Cannot extend volume %s while it is attached.") % volume['id']) raise exception.ExtendVolumeError(msg) LOG.info('Extending volume %s.', volume.id) extend_by = int(new_size) - volume.size if not self._is_share_eligible(volume.provider_location, extend_by): raise exception.ExtendVolumeError(reason='Insufficient space to' ' extend volume %s to %sG' % (volume.id, new_size)) # Use the active image file because this volume might have snapshot(s). active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) LOG.info('Resizing file to %sG...', new_size) file_format = None admin_metadata = objects.Volume.get_by_id( context.get_admin_context(), volume.id).admin_metadata if admin_metadata and 'format' in admin_metadata: file_format = admin_metadata['format'] image_utils.resize_image(active_file_path, new_size, run_as_root=self._execute_as_root, file_format=file_format) if file_format == 'qcow2' and not self._is_file_size_equal( active_file_path, volume.name, new_size): raise exception.ExtendVolumeError( reason='Resizing image file failed.') def _is_file_size_equal(self, path, volume_name, size): """Checks if file size at path is equal to size.""" data = self._qemu_img_info(path, volume_name) virt_size = int(data.virtual_size / units.Gi) return virt_size == size def set_nas_security_options(self, is_new_cinder_install): """Determine the setting to use for Secure NAS options. Value of each NAS Security option is checked and updated. If the option is currently 'auto', then it is set to either true or false based upon if this is a new Cinder installation. The RemoteFS variable '_execute_as_root' will be updated for this driver. :param is_new_cinder_install: bool indication of new Cinder install """ doc_html = "https://docs.openstack.org/cinder/latest" \ "/admin/blockstorage-nfs-backend.html" self._ensure_shares_mounted() if not self._mounted_shares: raise exception.NfsNoSharesMounted() nfs_mount = self._get_mount_point_for_share(self._mounted_shares[0]) self.configuration.nas_secure_file_permissions = \ self._determine_nas_security_option_setting( self.configuration.nas_secure_file_permissions, nfs_mount, is_new_cinder_install) LOG.debug('NAS variable secure_file_permissions setting is: %s', self.configuration.nas_secure_file_permissions) if self.configuration.nas_secure_file_permissions == 'false': LOG.warning("The NAS file permissions mode will be 666 " "(allowing other/world read & write access). " "This is considered an insecure NAS environment. " "Please see %s for information on a secure " "NFS configuration.", doc_html) self.configuration.nas_secure_file_operations = \ self._determine_nas_security_option_setting( self.configuration.nas_secure_file_operations, nfs_mount, is_new_cinder_install) # If secure NAS, update the '_execute_as_root' flag to not # run as the root user; run as process' user ID. # TODO(eharney): need to separate secure NAS vs. execute as root. # There are requirements to run some commands as root even # when running in secure NAS mode. (i.e. read volume file # attached to an instance and owned by qemu:qemu) if self.configuration.nas_secure_file_operations == 'true': self._execute_as_root = False LOG.debug('NAS secure file operations setting is: %s', self.configuration.nas_secure_file_operations) if self.configuration.nas_secure_file_operations == 'false': LOG.warning("The NAS file operations will be run as " "root: allowing root level access at the storage " "backend. This is considered an insecure NAS " "environment. Please see %s " "for information on a secure NAS configuration.", doc_html) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Return the keys and values updated from NFS for migrated volume. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. Due to this renaming, inheriting drivers may need to also re-associate other entities (such as backend QoS) with the new name. Alternatively they could overwrite this method and raise NotImplemented. In any case, driver's code should always use the volume OVO's 'name_id' field instead of 'id' to get the volume's real UUID, as the renaming could fail. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ name_id = None if (original_volume_status == 'available' and volume.provider_location != new_volume.provider_location): current_name = CONF.volume_name_template % new_volume.id original_volume_name = CONF.volume_name_template % volume.id current_path = self.local_path(new_volume) # Replace the volume name with the original volume name original_path = current_path.replace(current_name, original_volume_name) try: os.rename(current_path, original_path) except OSError: LOG.exception('Unable to rename the logical volume ' 'for volume: %s', volume.id) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. name_id = new_volume._name_id or new_volume.id else: # The back-end will not be renamed. name_id = new_volume._name_id or new_volume.id return {'_name_id': name_id, 'provider_location': new_volume.provider_location} def _update_volume_stats(self): """Retrieve stats info from volume group.""" super(NfsDriver, self)._update_volume_stats() self._stats['sparse_copy_volume'] = True data = self._stats global_capacity = data['total_capacity_gb'] global_capacity = typing.cast(float, global_capacity) global_free = data['free_capacity_gb'] global_free = typing.cast(float, global_free) thin_enabled = self.configuration.nfs_sparsed_volumes if thin_enabled: provisioned_capacity = self._get_provisioned_capacity() else: provisioned_capacity = round(global_capacity - global_free, 2) data['provisioned_capacity_gb'] = provisioned_capacity data['max_over_subscription_ratio'] = self.max_over_subscription_ratio data['reserved_percentage'] = self.reserved_percentage data['thin_provisioning_support'] = thin_enabled data['thick_provisioning_support'] = not thin_enabled self._stats = data @coordination.synchronized('{self.driver_prefix}-{volume.id}') def create_volume(self, volume): """Apply locking to the create volume operation.""" return super(NfsDriver, self).create_volume(volume) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def delete_volume(self, volume): """Deletes a logical volume.""" LOG.debug('Deleting volume %(vol)s, provider_location: %(loc)s', {'vol': volume.id, 'loc': volume.provider_location}) if not volume.provider_location: LOG.warning('Volume %s does not have provider_location ' 'specified, skipping', volume.name) return info_path = self._local_path_volume_info(volume) info = self._read_info_file(info_path, empty_if_missing=True) if info: base_volume_path = os.path.join(self._local_volume_dir(volume), info['active']) self._delete(info_path) else: base_volume_path = self._local_path_volume(volume) self._delete(base_volume_path) def _qemu_img_info(self, path, volume_name): return super(NfsDriver, self)._qemu_img_info_base( path, volume_name, self.configuration.nfs_mount_point_base, force_share=True, run_as_root=True) def _check_snapshot_support(self, setup_checking=False): """Ensure snapshot support is enabled in config.""" if (not self.configuration.nfs_snapshot_support and not setup_checking): msg = _("NFS driver snapshot support is disabled in cinder.conf.") raise exception.VolumeDriverException(message=msg) if (self.configuration.nas_secure_file_operations == 'true' and self.configuration.nfs_snapshot_support): msg = _("Snapshots are not supported with " "nas_secure_file_operations enabled ('true' or 'auto'). " "Please set it to 'false' if you intend to have " "it enabled.") LOG.error(msg) raise exception.VolumeDriverException(message=msg) @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') def create_snapshot(self, snapshot): """Apply locking to the create snapshot operation.""" self._check_snapshot_support() return self._create_snapshot(snapshot) @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') def delete_snapshot(self, snapshot): """Apply locking to the delete snapshot operation.""" return self._delete_snapshot(snapshot) def _copy_volume_from_snapshot(self, snapshot, volume, volume_size, src_encryption_key_id=None, new_encryption_key_id=None): """Copy data from snapshot to destination volume. This is done with a qemu-img convert to raw/qcow2 from the snapshot qcow2. """ LOG.debug("Copying snapshot: %(snap)s -> volume: %(vol)s, " "volume_size: %(size)s GB", {'snap': snapshot.id, 'vol': volume.id, 'size': volume_size}) info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path) vol_path = self._local_volume_dir(snapshot.volume) forward_file = snap_info[snapshot.id] forward_path = os.path.join(vol_path, forward_file) # Find the file which backs this file, which represents the point # when this snapshot was created. img_info = self._qemu_img_info(forward_path, snapshot.volume.name) path_to_snap_img = os.path.join(vol_path, img_info.backing_file) snap_backing_file_img_info = self._qemu_img_info(path_to_snap_img, snapshot.volume.name) path_to_new_vol = self._local_path_volume(volume) LOG.debug("will copy from snapshot at %s", path_to_snap_img) if self.configuration.nfs_qcow2_volumes: out_format = 'qcow2' else: out_format = 'raw' if new_encryption_key_id is not None: if src_encryption_key_id is None: message = _("Can't create an encrypted volume %(format)s " "from an unencrypted source." ) % {'format': out_format} LOG.error(message) # TODO(enriquetaso): handle unencrypted snap->encrypted vol raise exception.NfsException(message) keymgr = key_manager.API(CONF) new_key = keymgr.get(volume.obj_context, new_encryption_key_id) new_passphrase = \ binascii.hexlify(new_key.get_encoded()).decode('utf-8') # volume.obj_context is the owner of this request src_key = keymgr.get(volume.obj_context, src_encryption_key_id) src_passphrase = \ binascii.hexlify(src_key.get_encoded()).decode('utf-8') tmp_dir = volume_utils.image_conversion_dir() with tempfile.NamedTemporaryFile(prefix='luks_', dir=tmp_dir) as src_pass_file: with open(src_pass_file.name, 'w') as f: f.write(src_passphrase) with tempfile.NamedTemporaryFile(prefix='luks_', dir=tmp_dir) as new_pass_file: with open(new_pass_file.name, 'w') as f: f.write(new_passphrase) image_utils.convert_image( path_to_snap_img, path_to_new_vol, 'luks', passphrase_file=new_pass_file.name, src_passphrase_file=src_pass_file.name, run_as_root=self._execute_as_root, data=snap_backing_file_img_info) else: image_utils.convert_image(path_to_snap_img, path_to_new_vol, out_format, run_as_root=self._execute_as_root, data=snap_backing_file_img_info) self._set_rw_permissions_for_all(path_to_new_vol) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3911211 cinder-27.0.0/cinder/volume/drivers/open_e/0000775000175000017500000000000000000000000020565 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/open_e/__init__.py0000664000175000017500000000000000000000000022664 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/open_e/iscsi.py0000664000175000017500000004774100000000000022266 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """iSCSI volume driver for JovianDSS driver.""" import math import string from oslo_log import log as logging from oslo_utils import units as o_units from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.open_e.jovian_common import driver as jdriver from cinder.volume.drivers.open_e.jovian_common import exception as jexc from cinder.volume.drivers.open_e.jovian_common import jdss_common as jcom from cinder.volume.drivers.open_e.jovian_common import rest from cinder.volume.drivers.open_e import options from cinder.volume.drivers.san import san from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @interface.volumedriver class JovianISCSIDriver(driver.ISCSIDriver): """Executes volume driver commands on Open-E JovianDSS. Version history: .. code-block:: none 1.0.0 - Open-E JovianDSS driver with basic functionality 1.0.1 - Added certificate support Added revert to snapshot support 1.0.2 - Added multi-attach support Added 16K block support 1.0.3 - Driver rework and optimisation Abandon recursive volume deletion Removed revert to snapshot support """ # Third-party Systems wiki page CI_WIKI_NAME = "Open-E_JovianDSS_CI" VERSION = "1.0.3" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._stats = None self.jovian_iscsi_target_portal_port = "3260" self.jovian_target_prefix = 'iqn.2020-04.com.open-e.cinder:' self.jovian_chap_pass_len = 12 self.jovian_sparse = False self.jovian_ignore_tpath = None self.jovian_hosts = None self._pool = 'Pool-0' self.ra = None self.driver = None @property def backend_name(self): """Return backend name.""" backend_name = None if self.configuration: backend_name = self.configuration.get('volume_backend_name', 'Open-EJovianDSS') if not backend_name: backend_name = self.__class__.__name__ return backend_name def do_setup(self, context): """Any initialization the volume driver does while starting.""" self.configuration.append_config_values( options.jdss_connection_opts) self.configuration.append_config_values( options.jdss_iscsi_opts) self.configuration.append_config_values( options.jdss_volume_opts) self.configuration.append_config_values(san.san_opts) self.jovian_target_prefix = self.configuration.get( 'target_prefix', 'iqn.2020-04.com.open-e.cinder:') self.jovian_chap_pass_len = self.configuration.get( 'chap_password_len', 12) self.block_size = ( self.configuration.get('jovian_block_size', '64K')) self.jovian_sparse = ( self.configuration.get('san_thin_provision', True)) self.jovian_ignore_tpath = self.configuration.get( 'jovian_ignore_tpath', None) self.jovian_hosts = self.configuration.get( 'san_hosts', []) self.ra = rest.JovianRESTAPI(self.configuration) self.driver = jdriver.JovianDSSDriver(self.configuration) def check_for_setup_error(self): """Check for setup error.""" if len(self.jovian_hosts) == 0: msg = _("No hosts provided in configuration") raise exception.VolumeDriverException(msg) if not self.driver.rest_config_is_ok(): msg = (_("Unable to identify pool %s") % self._pool) raise exception.VolumeDriverException(msg) valid_bsize = ['16K', '32K', '64K', '128K', '256K', '512K', '1M'] if self.block_size not in valid_bsize: raise exception.InvalidConfigurationValue( value=self.block_size, option='jovian_block_size') def _get_target_name(self, volume_name): """Return iSCSI target name to access volume.""" return f'{self.jovian_target_prefix}{volume_name}' def _get_active_ifaces(self): """Return list of ip addresses for iSCSI connection""" return self.jovian_hosts def create_volume(self, volume): """Create a volume. :param volume: volume reference :return: model update dict for volume reference """ LOG.debug('creating volume %s.', volume.id) try: self.driver.create_volume(volume.id, volume.size, sparse=self.jovian_sparse, block_size=self.block_size) except jexc.JDSSException as jerr: LOG.error("Create volume error. Because %(err)s", {"err": jerr}) raise exception.VolumeBackendAPIException( _('Failed to create volume %s.') % volume.id) from jerr return self._get_provider_info(volume.id) def delete_volume(self, volume, cascade=False): """Delete volume :param volume: volume reference :param cascade: remove snapshots of a volume as well """ try: self.driver.delete_volume(volume.id, cascade=cascade) except jexc.JDSSException as jerr: raise exception.VolumeBackendAPIException(jerr) def extend_volume(self, volume, new_size): """Extend an existing volume. :param volume: volume reference :param new_size: volume new size in GB """ try: self.driver.resize_volume(volume.id, new_size) except jexc.JDSSException as jerr: msg = _('Failed to extend volume %s.') raise exception.VolumeBackendAPIException( data=msg % volume.id) from jerr def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume. :param volume: new volume reference :param src_vref: source volume reference """ try: self.driver.create_cloned_volume(volume.id, src_vref.id, volume.size, sparse=self.jovian_sparse) except jexc.JDSSException as jerr: msg = _("Fail to clone volume %(vol)s to %(clone)s because of " "error %(err)s.") % { 'vol': src_vref.id, 'clone': volume.id, 'err': jerr} raise exception.VolumeBackendAPIException(msg) from jerr return self._get_provider_info(volume.id) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot. """ LOG.debug('create volume %(vol)s from snapshot %(snap)s', { 'vol': volume.id, 'snap': snapshot.id}) try: self.driver.create_cloned_volume(volume.id, snapshot.volume_id, volume.size, snapshot_name=snapshot.id) except jexc.JDSSResourceExistsException as jerr: raise exception.Duplicate() from jerr except jexc.JDSSException as jerr: raise exception.VolumeBackendAPIException( _("Failed to create clone %(clone)s from snapshot %(snap)s " "of volume %(vol)s because of error %(err)s.") % { 'vol': snapshot.volume_id, 'clone': volume.id, 'snap': snapshot.id, 'err': jerr}) from jerr return self._get_provider_info(volume.id) def create_snapshot(self, snapshot): """Create snapshot of existing volume. :param snapshot: snapshot object """ LOG.debug('create snapshot %(snap)s for volume %(vol)s', { 'snap': snapshot.id, 'vol': snapshot.volume_id}) try: self.driver.create_snapshot(snapshot.id, snapshot.volume_id) except jexc.JDSSSnapshotExistsException as jexistserr: raise exception.Duplicate() from jexistserr except jexc.JDSSResourceNotFoundException as jerrnotfound: raise exception.VolumeNotFound( volume_id=jcom.idname(snapshot.volume_id)) from jerrnotfound except jexc.JDSSException as jerr: args = {'snapshot': snapshot.id, 'object': snapshot.volume_id, 'err': jerr} msg = (_('Failed to create tmp snapshot %(snapshot)s ' 'for object %(object)s: %(err)s') % args) raise exception.VolumeBackendAPIException(msg) from jerr return self._get_provider_info(snapshot.id) def delete_snapshot(self, snapshot): """Delete snapshot of existing volume. :param snapshot: snapshot reference """ try: self.driver.delete_snapshot(snapshot.volume_id, snapshot.id) except jexc.JDSSResourceNotFoundException: return except jexc.JDSSException as jerr: raise exception.VolumeBackendAPIException(jerr) def _get_provider_info(self, vid): '''returns provider info dict :param vid: volume id ''' info = {} try: info['provider_location'] = self.driver.get_provider_location(vid) except jexc.JDSSException as jerr: msg = _("Fail to identify critical properties of " "new volume %s.") % vid raise exception.VolumeBackendAPIException(data=msg) from jerr info['provider_auth'] = self._get_provider_auth() return info def _get_provider_auth(self): """Get provider authentication for the volume. :return: string of auth method and credentials """ chap_user = volume_utils.generate_password( length=8, symbolgroups=(string.ascii_lowercase + string.ascii_uppercase)) chap_password = volume_utils.generate_password( length=self.jovian_chap_pass_len, symbolgroups=(string.ascii_lowercase + string.ascii_uppercase + string.digits)) return 'CHAP %(user)s %(passwd)s' % { 'user': chap_user, 'passwd': chap_password} def create_export(self, _ctx, volume, connector): """Create new export for zvol. :param volume: reference of volume to be exported :return: iscsiadm-formatted provider location string """ LOG.debug("create export for volume: %s.", volume.id) provider_auth = volume.provider_auth ret = dict() if provider_auth is None: provider_auth = self._get_provider_auth() ret['provider_auth'] = provider_auth try: self.driver.ensure_export(volume.id, provider_auth) location = self.driver.get_provider_location(volume.id) ret['provider_location'] = location except jexc.JDSSException as jerr: raise exception.VolumeDriverException from jerr return ret def ensure_export(self, _ctx, volume): """Recreate parts of export if necessary. :param volume: reference of volume to be exported """ LOG.debug("ensure export for volume: %s.", volume.id) provider_auth = volume.provider_auth ret = dict() if provider_auth is None: provider_auth = self._get_provider_auth() ret['provider_auth'] = provider_auth try: self.driver.ensure_export(volume.id, provider_auth) location = self.driver.get_provider_location(volume.id) ret['provider_location'] = location except jexc.JDSSException as jerr: raise exception.VolumeDriverException from jerr return ret def create_export_snapshot(self, context, snapshot, connector): provider_auth = snapshot.provider_auth ret = dict() if provider_auth is None: provider_auth = self._get_provider_auth() ret['provider_auth'] = provider_auth try: ret = self.driver.create_export_snapshot(snapshot.id, snapshot.volume_id, provider_auth) except jexc.JDSSResourceExistsException as jres_err: raise exception.Duplicate() from jres_err except jexc.JDSSException as jerr: raise exception.VolumeDriverException from jerr return ret def remove_export(self, _ctx, volume): """Destroy all resources created to export zvol. :param volume: reference of volume to be unexposed """ LOG.debug("remove_export for volume: %s.", volume.id) try: self.driver.remove_export(volume.id) except jexc.JDSSException as jerr: raise exception.VolumeDriverException from jerr def remove_export_snapshot(self, context, snapshot): try: self.driver.remove_export_snapshot(snapshot.id, snapshot.volume_id) except jexc.JDSSException as jerr: raise exception.VolumeDriverException from jerr def _update_volume_stats(self): """Retrieve stats info.""" LOG.debug('Updating volume stats') pool_stats = self.ra.get_pool_stats() total_capacity = math.floor(int(pool_stats["size"]) / o_units.Gi) free_capacity = math.floor(int(pool_stats["available"]) / o_units.Gi) reserved_percentage = ( self.configuration.get('reserved_percentage', 0)) if total_capacity is None: total_capacity = 'unknown' if free_capacity is None: free_capacity = 'unknown' location_info = '%(driver)s:%(host)s:%(volume)s' % { 'driver': self.__class__.__name__, 'host': self.ra.get_active_host(), 'volume': self._pool } self._stats = { 'vendor_name': 'Open-E', 'driver_version': self.VERSION, 'storage_protocol': constants.ISCSI, 'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'reserved_percentage': int(reserved_percentage), 'volume_backend_name': self.backend_name, 'QoS_support': False, 'location_info': location_info, 'multiattach': True } LOG.debug('Total capacity: %d, ' 'Free %d.', self._stats['total_capacity_gb'], self._stats['free_capacity_gb']) def _get_iscsi_properties(self, volume_id, provider_auth, multipath=False): """Return dict according to cinder/driver.py implementation. :param volume_id: openstack volume UUID :param str provider_auth: space-separated triple ' ' :param bool multipath: use multipath flag :return: """ tname = self.jovian_target_prefix + volume_id iface_info = [] if multipath: iface_info = self._get_active_ifaces() if not iface_info: raise exception.InvalidConfigurationValue( _('No available interfaces ' 'or config excludes them')) iscsi_properties = {} if multipath: iscsi_properties['target_iqns'] = [] iscsi_properties['target_portals'] = [] iscsi_properties['target_luns'] = [] LOG.debug('tpaths %s.', iface_info) for iface in iface_info: iscsi_properties['target_iqns'].append( self.jovian_target_prefix + volume_id) iscsi_properties['target_portals'].append( iface + ":" + str(self.jovian_iscsi_target_portal_port)) iscsi_properties['target_luns'].append(0) else: iscsi_properties['target_iqn'] = tname iscsi_properties['target_portal'] = ( self.ra.get_active_host() + ":" + str(self.jovian_iscsi_target_portal_port)) iscsi_properties['target_discovered'] = False if provider_auth is None: provider_auth = self._get_provider_auth() (auth_method, auth_username, auth_secret) = provider_auth.split() iscsi_properties['auth_method'] = auth_method iscsi_properties['auth_username'] = auth_username iscsi_properties['auth_password'] = auth_secret iscsi_properties['target_lun'] = 0 return iscsi_properties def initialize_connection(self, volume, connector): """Initialize the connection and returns connection info. The iscsi driver returns a driver_volume_type of 'iscsi'. Format of the driver data is defined in _get_iscsi_properties. Example return value: .. code-block:: json { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': '12345678-1234-1234-1234-123456789012', } } """ multipath = connector.get("multipath", False) provider_auth = volume.provider_auth ret = { 'driver_volume_type': 'iscsi', 'data': None, } try: self.driver.initialize_connection(volume.id, provider_auth, multipath=multipath) ret['data'] = self._get_iscsi_properties(volume.id, provider_auth, multipath=multipath) except jexc.JDSSException as jerr: raise exception.VolumeDriverException from jerr return ret def terminate_connection(self, volume, connector, force=False, **kwargs): """terminate_connection """ LOG.debug("terminate connection for %(volume)s ", {'volume': volume.id}) def initialize_connection_snapshot(self, snapshot, connector, **kwargs): multipath = connector.get("multipath", False) provider_auth = snapshot.provider_auth ret = { 'driver_volume_type': 'iscsi', 'data': None, } try: self.driver.initialize_connection(snapshot.volume_id, provider_auth, snapshot_id=snapshot.id, multipath=multipath) ret['data'] = self._get_iscsi_properties(snapshot.id, provider_auth, multipath=multipath) except jexc.JDSSException as jerr: raise exception.VolumeDriverException from jerr return ret def terminate_connection_snapshot(self, snapshot, connector, **kwargs): pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.395121 cinder-27.0.0/cinder/volume/drivers/open_e/jovian_common/0000775000175000017500000000000000000000000023423 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/open_e/jovian_common/__init__.py0000664000175000017500000000000000000000000025522 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/open_e/jovian_common/driver.py0000664000175000017500000007322200000000000025276 0ustar00zuulzuul00000000000000# Copyright (c) 2023 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import units as o_units from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.open_e.jovian_common import exception as jexc from cinder.volume.drivers.open_e.jovian_common import jdss_common as jcom from cinder.volume.drivers.open_e.jovian_common import rest LOG = logging.getLogger(__name__) class JovianDSSDriver(object): def __init__(self, config): self.configuration = config self._pool = self.configuration.get('jovian_pool', 'Pool-0') self.jovian_iscsi_target_portal_port = self.configuration.get( 'target_port', 3260) self.jovian_target_prefix = self.configuration.get( 'target_prefix', 'iqn.2020-04.com.open-e.cinder:') self.jovian_chap_pass_len = self.configuration.get( 'chap_password_len', 12) self.block_size = ( self.configuration.get('jovian_block_size', '64K')) self.jovian_sparse = ( self.configuration.get('san_thin_provision', True)) self.jovian_ignore_tpath = self.configuration.get( 'jovian_ignore_tpath', None) self.jovian_hosts = self.configuration.get( 'san_hosts', []) self.ra = rest.JovianRESTAPI(config) def rest_config_is_ok(self): """Check config correctness by checking pool availability""" return self.ra.is_pool_exists() def get_active_ifaces(self): """Return list of ip addresses for iSCSI connection""" return self.jovian_hosts def get_provider_location(self, volume_name): """Return volume iscsiadm-formatted provider location string.""" return '%(host)s:%(port)s,1 %(name)s 0' % { 'host': self.ra.get_active_host(), 'port': self.jovian_iscsi_target_portal_port, 'name': self._get_target_name(volume_name)} def create_volume(self, volume_id, volume_size, sparse=False, block_size=None): """Create a volume. :param str volume_id: volume id :param int volume_size: size in Gi :param bool sparse: thin or thick volume flag (default thin) :param int block_size: size of block (default None) :return: None """ vname = jcom.vname(volume_id) LOG.debug("Create volume:%(name)s with size:%(size)s", {'name': volume_id, 'size': volume_size}) self.ra.create_lun(vname, volume_size * o_units.Gi, sparse=sparse, block_size=block_size) return def _promote_newest_delete(self, vname, snapshots=None): '''Promotes and delete volume This function deletes volume. It will promote volume if needed before deletion. :param str vname: physical volume id :param list snapshots: snapshot data list (default None) :return: None ''' if snapshots is None: try: snapshots = self.ra.get_snapshots(vname) except jexc.JDSSResourceNotFoundException: LOG.debug('volume %s do not exists, it was already ' 'deleted', vname) return bsnaps = self._list_busy_snapshots(vname, snapshots) if len(bsnaps) != 0: promote_target = None sname = jcom.get_newest_snapshot_name(bsnaps) for snap in bsnaps: if snap['name'] == sname: cvnames = jcom.snapshot_clones(snap) for cvname in cvnames: if jcom.is_volume(cvname): promote_target = cvname if jcom.is_snapshot(cvname): self._promote_newest_delete(cvname) if jcom.is_hidden(cvname): self._promote_newest_delete(cvname) break if promote_target is None: self._promote_newest_delete(vname) return self.ra.promote(vname, sname, promote_target) self._delete_vol_with_source_snap(vname, recursive=True) def _delete_vol_with_source_snap(self, vname, recursive=False): '''Delete volume and its source snapshot if required This function deletes volume. If volume is a clone it will check its source snapshot if one is originates from volume to delete. :param str vname: physical volume id :param bool recursive: recursive flag (default False) :return: None ''' vol = None try: vol = self.ra.get_lun(vname) except jexc.JDSSResourceNotFoundException: LOG.debug('unable to get volume %s info, ' 'assume it was already deleted', vname) return try: self.ra.delete_lun(vname, force_umount=True, recursively_children=recursive) except jexc.JDSSResourceNotFoundException: LOG.debug('volume %s do not exists, it was already ' 'deleted', vname) return if vol is not None and \ 'origin' in vol and \ vol['origin'] is not None: if jcom.is_volume(jcom.origin_snapshot(vol)) or \ jcom.is_hidden(jcom.origin_snapshot(vol)) or \ (jcom.vid_from_sname(jcom.origin_snapshot(vol)) == jcom.idname(vname)): self.ra.delete_snapshot(jcom.origin_volume(vol), jcom.origin_snapshot(vol), recursively_children=True, force_umount=True) def _clean_garbage_resources(self, vname, snapshots=None): '''Removes resources that is not related to volume Goes through volume snapshots and it clones to identify one that is clearly not related to vname volume and therefore have to be deleted. :param str vname: physical volume id :param list snapshots: list of snapshot info dictionaries :return: updated list of snapshots ''' if snapshots is None: try: snapshots = self.ra.get_snapshots(vname) except jexc.JDSSResourceNotFoundException: LOG.debug('volume %s do not exists, it was already ' 'deleted', vname) return update = False for snap in snapshots: if jcom.is_volume(jcom.sname_from_snap(snap)): cvnames = jcom.snapshot_clones(snap) if len(cvnames) == 0: self._delete_snapshot(vname, jcom.sname_from_snap(snap)) update = True if jcom.is_snapshot(jcom.sname_from_snap(snap)): cvnames = jcom.snapshot_clones(snap) for cvname in cvnames: if jcom.is_hidden(cvname): self._promote_newest_delete(cvname) update = True if jcom.is_snapshot(cvname): if jcom.idname(vname) != jcom.vid_from_sname(cvname): self._promote_newest_delete(cvname) update = True if update: snapshots = self.ra.get_snapshots(vname) return snapshots def _list_busy_snapshots(self, vname, snapshots, exclude_dedicated_volumes=False) -> list: """List all volume snapshots with clones Goes through provided list of snapshots. If additional parameters are given, will filter list of snapshots accordingly. Keyword arguments: :param str vname: zvol id :param list snapshots: list of snapshots data dicts :param bool exclude_dedicated_volumes: list snapshots that has clones (default False) :return: filtered list of snapshot data dicts :rtype: list """ out = [] for snap in snapshots: clones = jcom.snapshot_clones(snap) add = False for cvname in clones: if exclude_dedicated_volumes and jcom.is_volume(cvname): continue add = True if add: out.append(snap) return out def _clean_volume_snapshots_mount_points(self, vname, snapshots): update = False for snap in snapshots: clones = jcom.snapshot_clones(snap) for cname in [c for c in clones if jcom.is_snapshot(c)]: update = True self._delete_volume(cname, cascade=True) if update: snapshots = self.ra.get_snapshots(vname) return snapshots def _delete_volume(self, vname, cascade=False): """_delete_volume delete routine containing delete logic :param str vname: physical volume id :param bool cascade: flag for cascade volume deletion with its snapshots :return: None """ try: self.ra.delete_lun(vname, force_umount=True, recursively_children=cascade) except jexc.JDSSResourceIsBusyException: LOG.debug('unable to conduct direct volume %s deletion', vname) except jexc.JDSSResourceNotFoundException: LOG.debug('volume %s do not exists, it was already ' 'deleted', vname) return except jexc.JDSSRESTException as jerr: LOG.debug( "Unable to delete physical volume %(volume)s " "with error %(err)s.", { "volume": vname, "err": jerr}) else: LOG.debug('in place deletion suceeded') return snapshots = None try: snapshots = self.ra.get_snapshots(vname) except jexc.JDSSResourceNotFoundException: LOG.debug('volume %s do not exists, it was already ' 'deleted', vname) return if cascade is False: bsnaps = self._list_busy_snapshots(vname, snapshots, exclude_dedicated_volumes=True) if len(bsnaps) > 0: raise exception.VolumeIsBusy('Volume has snapshots') snaps = self._clean_garbage_resources(vname, snapshots) snaps = self._clean_volume_snapshots_mount_points(vname, snapshots) self._promote_newest_delete(vname, snapshots=snaps) def delete_volume(self, volume_name, cascade=False): """Delete volume :param volume: volume reference :param cascade: remove snapshots of a volume as well """ vname = jcom.vname(volume_name) LOG.debug('deleting volume %s', vname) self._delete_volume(vname, cascade=cascade) def _clone_object(self, cvname, sname, ovname, sparse=None, create_snapshot=False): """Creates a clone of specified object Will create snapshot if it is not provided :param str cvname: clone volume name :param str sname: snapshot name :param str ovname: original volume name :param bool sparse: sparse property of new volume :param bool create_snapshot: """ LOG.debug('cloning %(ovname)s to %(coname)s', { "ovname": ovname, "coname": cvname}) if create_snapshot: self.ra.create_snapshot(ovname, sname) try: self.ra.create_volume_from_snapshot( cvname, sname, ovname, sparse=sparse) except jexc.JDSSException as jerr: # This is a garbage collecting section responsible for cleaning # all the mess of request failed if create_snapshot: try: self.ra.delete_snapshot(ovname, cvname, recursively_children=True, force_umount=True) except jexc.JDSSException as jerrd: LOG.warning("Because of %s physical snapshot %s of volume" " %s have to be removed manually", jerrd, sname, ovname) raise jerr def resize_volume(self, volume_name, new_size): """Extend an existing volume. :param str volume_name: volume id :param int new_size: volume new size in Gi """ LOG.debug("Extend volume:%(name)s to size:%(size)s", {'name': volume_name, 'size': new_size}) self.ra.extend_lun(jcom.vname(volume_name), int(new_size) * o_units.Gi) def create_cloned_volume(self, clone_name, volume_name, size, snapshot_name=None, sparse=False): """Create a clone of the specified volume. :param str clone_name: new volume id :param volume_name: original volume id :param int size: size in Gi :param str snapshot_name: openstack snapshot id to use for cloning :param bool sparse: sparse flag """ cvname = jcom.vname(clone_name) ovname = jcom.vname(volume_name) LOG.debug('clone volume %(id)s to %(id_clone)s', { "id": volume_name, "id_clone": clone_name}) if snapshot_name: sname = jcom.sname(snapshot_name, volume_name) self._clone_object(cvname, sname, ovname, create_snapshot=False, sparse=sparse) else: sname = jcom.vname(clone_name) self._clone_object(cvname, sname, ovname, create_snapshot=True, sparse=sparse) clone_size = 0 try: clone_size = int(self.ra.get_lun(cvname)['volsize']) except jexc.JDSSException as jerr: self.delete_volume(clone_name, cascade=False) raise exception.VolumeBackendAPIException( _("Fail in cloning volume %(vol)s to %(clone)s.") % { 'vol': volume_name, 'clone': clone_name}) from jerr try: if int(clone_size) < o_units.Gi * int(size): self.resize_volume(clone_name, int(size)) except jexc.JDSSException as jerr: # If volume can't be set to a proper size make sure to clean it # before failing try: self.delete_volume(clone_name, cascade=False) except jexc.JDSSException as jerrex: LOG.warning("Error %s during cleaning failed volume %s", jerrex, volume_name) raise jerr from jerrex def create_snapshot(self, snapshot_name, volume_name): """Create snapshot of existing volume. :param str snapshot_name: new snapshot id :param str volume_name: original volume id """ LOG.debug('create snapshot %(snap)s for volume %(vol)s', { 'snap': snapshot_name, 'vol': volume_name}) vname = jcom.vname(volume_name) sname = jcom.sname(snapshot_name, volume_name) self.ra.create_snapshot(vname, sname) def create_export_snapshot(self, snapshot_name, volume_name, provider_auth): """Creates iscsi resources needed to start using snapshot :param str snapshot_name: openstack snapshot id :param str volume_name: openstack volume id :param str provider_auth: space-separated triple ' ' """ sname = jcom.sname(snapshot_name, volume_name) ovname = jcom.vname(volume_name) self._clone_object(sname, sname, ovname, sparse=True, create_snapshot=False) try: self._ensure_target_volume(snapshot_name, sname, provider_auth, ro=True) except jexc.JDSSException as jerr: self._delete_volume(sname, cascade=True) raise jerr def remove_export(self, volume_name): """Remove iscsi target created to make volume attachable :param str volume_name: openstack volume id """ vname = jcom.vname(volume_name) try: self._remove_target_volume(volume_name, vname) except jexc.JDSSException as jerr: LOG.warning(jerr) def remove_export_snapshot(self, snapshot_name, volume_name): """Remove tmp vol and iscsi target created to make snap attachable :param str snapshot_name: openstack snapshot id :param str volume_name: openstack volume id """ sname = jcom.sname(snapshot_name, volume_name) try: self._remove_target_volume(snapshot_name, sname) except jexc.JDSSException as jerr: self._delete_volume(sname, cascade=True) raise jerr self._delete_volume(sname, cascade=True) def _delete_snapshot(self, vname, sname): """Delete snapshot This method will delete snapshot mount point and snapshot if possible :param str vname: zvol name :param dict snap: snapshot info dictionary :return: None """ try: self.ra.delete_snapshot(vname, sname, force_umount=True) except jexc.JDSSResourceIsBusyException: LOG.debug('Direct deletion of snapshot %s failed', vname) else: return snap = self.ra.get_snapshot(vname, sname) clones = jcom.snapshot_clones(snap) busy = False for cvname in clones: if jcom.is_snapshot(cvname): self._promote_newest_delete(cvname) if jcom.is_volume(cvname): LOG.debug('Will not delete snap %(snap)s,' 'becasue it is used by %(vol)s', {'snap': sname, 'vol': cvname}) busy = True if busy: return try: self.ra.delete_snapshot(vname, sname, force_umount=True) except jexc.JDSSResourceIsBusyException: LOG.debug('Unable to delete snap %(snap)s because it is busy', {'snap': jcom.sname_from_snap(snap)}) def delete_snapshot(self, volume_name, snapshot_name): """Delete snapshot of existing volume. :param str volume_name: volume id :param str snapshot_name: snapshot id """ vname = jcom.vname(volume_name) sname = jcom.sname(snapshot_name, volume_name) self._delete_snapshot(vname, sname) def _ensure_target_volume(self, id, vid, provider_auth, ro=False): """Checks if target configured properly and volume is attached to it :param str id: id that would be used for target naming :param str vname: physical volume id :param str provider_auth: space-separated triple ' ' """ LOG.debug("ensure volume %s assigned to a proper target", id) target_name = self._get_target_name(id) if not provider_auth: msg = _("volume %s is missing provider_auth") % jcom.idname(id) raise jexc.JDSSException(msg) if not self.ra.is_target(target_name): return self._create_target_volume(id, vid, provider_auth) if not self.ra.is_target_lun(target_name, vid): self._attach_target_volume(target_name, vid) (__, auth_username, auth_secret) = provider_auth.split() chap_cred = {"name": auth_username, "password": auth_secret} try: users = self.ra.get_target_user(target_name) if len(users) == 1: if users[0]['name'] == chap_cred['name']: return self.ra.delete_target_user( target_name, users[0]['name']) for user in users: self.ra.delete_target_user( target_name, user['name']) self._set_target_credentials(target_name, chap_cred) except jexc.JDSSException as jerr: self.ra.delete_target(target_name) raise exception.VolumeBackendAPIException(jerr) def _get_target_name(self, volume_id): """Return iSCSI target name to access volume.""" return f'{self.jovian_target_prefix}{volume_id}' def _get_iscsi_properties(self, volume_id, provider_auth, multipath=False): """Return dict according to cinder/driver.py implementation. :param volume_id: UUID of volume, might take snapshot UUID :param str provider_auth: space-separated triple ' ' :return: """ tname = self._get_target_name(volume_id) iface_info = [] if multipath: iface_info = self.get_active_ifaces() if not iface_info: raise exception.InvalidConfigurationValue( _('No available interfaces ' 'or config excludes them')) iscsi_properties = {} if multipath: iscsi_properties['target_iqns'] = [] iscsi_properties['target_portals'] = [] iscsi_properties['target_luns'] = [] LOG.debug('tpaths %s.', iface_info) for iface in iface_info: iscsi_properties['target_iqns'].append( self._get_target_name(volume_id)) iscsi_properties['target_portals'].append( iface + ":" + str(self.jovian_iscsi_target_portal_port)) iscsi_properties['target_luns'].append(0) else: iscsi_properties['target_iqn'] = tname iscsi_properties['target_portal'] = ( self.ra.get_active_host() + ":" + str(self.jovian_iscsi_target_portal_port)) iscsi_properties['target_discovered'] = False if provider_auth: (auth_method, auth_username, auth_secret) = provider_auth.split() iscsi_properties['auth_method'] = auth_method iscsi_properties['auth_username'] = auth_username iscsi_properties['auth_password'] = auth_secret iscsi_properties['target_lun'] = 0 return iscsi_properties def _remove_target_volume(self, id, vid): """_remove_target_volume Ensure that volume is not attached to target and target do not exists. """ target_name = self._get_target_name(id) LOG.debug("remove export") LOG.debug("detach volume:%(vol)s from target:%(targ)s.", { 'vol': id, 'targ': target_name}) try: self.ra.detach_target_vol(target_name, vid) except jexc.JDSSResourceNotFoundException as jerrrnf: LOG.debug('failed to remove resource %(t)s because of %(err)s', { 't': target_name, 'err': jerrrnf.args[0]}) except jexc.JDSSException as jerr: LOG.warning('failed to Terminate_connection for target %(targ)s ' 'because of: %(err)s', {'targ': target_name, 'err': jerr.args[0]}) raise jerr LOG.debug("delete target: %s", target_name) try: self.ra.delete_target(target_name) except jexc.JDSSResourceNotFoundException as jerrrnf: LOG.debug('failed to remove resource %(target)s because ' 'of %(err)s', {'target': target_name, 'err': jerrrnf.args[0]}) except jexc.JDSSException as jerr: LOG.warning('Failed to Terminate_connection for target %(targ)s ' 'because of: %(err)s ', {'targ': target_name, 'err': jerr.args[0]}) raise jerr def ensure_export(self, volume_id, provider_auth): vname = jcom.vname(volume_id) self._ensure_target_volume(volume_id, vname, provider_auth) def initialize_connection(self, volume_id, provider_auth, snapshot_id=None, multipath=False): """Ensures volume is ready for connection and return connection data Ensures that particular volume is ready to be used over iscsi with credentials provided in provider_auth If snapshot name is provided method will ensure that connection leads to read only volume object associated with particular snapshot :param str volume_id: Volume id string :param str provider_auth: space-separated triple ' ' :param str snapshot_id: id of snapshot that should be connected :param bool multipath: specifies if multipath should be used """ id_of_disk_to_attach = volume_id vid = jcom.vname(volume_id) if provider_auth is None: raise jexc.JDSSException(_("CHAP credentials missing")) if snapshot_id: id_of_disk_to_attach = snapshot_id vid = jcom.sname(snapshot_id, volume_id) iscsi_properties = self._get_iscsi_properties(id_of_disk_to_attach, provider_auth, multipath=multipath) if snapshot_id: self._ensure_target_volume(id_of_disk_to_attach, vid, provider_auth, mode='ro') else: self._ensure_target_volume(id_of_disk_to_attach, vid, provider_auth) LOG.debug( "initialize_connection for physical disk %(vid)s with %(id)s", {'vid': vid, 'id': id_of_disk_to_attach}) return { 'driver_volume_type': 'iscsi', 'data': iscsi_properties, } def _create_target_volume(self, id, vid, provider_auth): """Creates target and attach volume to it :param id: uuid of particular resource :param vid: physical volume id, might identify snapshot mount :param str provider_auth: space-separated triple ' ' :return: """ LOG.debug("create target and attach volume %s to it", vid) target_name = self._get_target_name(id) (__, auth_username, auth_secret) = provider_auth.split() chap_cred = {"name": auth_username, "password": auth_secret} # Create target self.ra.create_target(target_name, use_chap=True) # Attach volume self._attach_target_volume(target_name, vid) # Set credentials self._set_target_credentials(target_name, chap_cred) def _attach_target_volume(self, target_name, vname): """Attach target to volume and handles exceptions Attempts to set attach volume to specific target. In case of failure will remove target. :param target_name: name of target :param use_chap: flag for using chap """ try: self.ra.attach_target_vol(target_name, vname) except jexc.JDSSException as jerr: msg = ('Unable to attach volume {volume} to target {target} ' 'because of {error}.') LOG.warning(msg, {"volume": vname, "target": target_name, "error": jerr}) self.ra.delete_target(target_name) raise jerr def _set_target_credentials(self, target_name, cred): """Set CHAP configuration for target and handle exceptions Attempts to set CHAP credentials for specific target. In case of failure will remove target. :param target_name: name of target :param cred: CHAP user name and password """ try: self.ra.create_target_user(target_name, cred) except jexc.JDSSException as jerr: try: self.ra.delete_target(target_name) except jexc.JDSSException: pass err_msg = (('Unable to create user %(user)s ' 'for target %(target)s ' 'because of %(error)s.') % { 'target': target_name, 'user': cred['name'], 'error': jerr}) LOG.error(err_msg) raise jexc.JDSSException(_(err_msg)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/open_e/jovian_common/exception.py0000664000175000017500000000465100000000000026001 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import exception from cinder.i18n import _ class JDSSException(exception.VolumeDriverException): """Unknown error""" message = _("%(reason)s") class JDSSRESTException(JDSSException): """Unknown communication error""" message = _("JDSS REST request %(request)s faild: %(reason)s.") class JDSSRESTProxyException(JDSSException): """Connection with host failed""" message = _("JDSS connection with %(host)s failed: %(reason)s.") class JDSSResourceNotFoundException(JDSSException): """Resource does not exist""" message = _("JDSS resource %(res)s DNE.") class JDSSVolumeNotFoundException(JDSSResourceNotFoundException): """Volume does not exist""" message = _("JDSS volume %(volume)s DNE.") class JDSSSnapshotNotFoundException(JDSSResourceNotFoundException): """Snapshot does not exist""" message = _("JDSS snapshot %(snapshot)s DNE.") class JDSSResourceExistsException(JDSSException): """Resource with specified id exists""" message = _("JDSS resource with id %(res)s exists.") class JDSSSnapshotExistsException(JDSSResourceExistsException): """Snapshot with the same id exists""" message = _("JDSS snapshot %(snapshot)s already exists.") class JDSSVolumeExistsException(JDSSResourceExistsException): """Volume with same id exists""" message = _("JDSS volume %(volume)s already exists.") class JDSSResourceIsBusyException(JDSSException): """Resource have dependents""" message = _("JDSS resource %(res)s is busy.") class JDSSSnapshotIsBusyException(JDSSResourceIsBusyException): """Snapshot have dependent clones""" message = _("JDSS snapshot %(snapshot)s is busy.") class JDSSOSException(JDSSException): """Storage internal system error""" message = _("JDSS internal system error %(message)s.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/open_e/jovian_common/jdss_common.py0000664000175000017500000000725000000000000026314 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from cinder import exception from cinder.i18n import _ def is_volume(name): """Return True if volume""" return name.startswith("v_") def is_snapshot(name): """Return True if volume""" return name.startswith("s_") def idname(name): """Extract UUID from physical volume name""" if name.startswith(('v_', 't_')): return name[2:] if name.startswith(('s_')): return sid_from_sname(name) msg = _('Object name %s is incorrect') % name raise exception.VolumeDriverException(message=msg) def vname(name): """Convert id into volume name""" if name.startswith("v_"): return name if name.startswith('s_'): msg = _('Attempt to use snapshot %s as a volume') % name raise exception.VolumeDriverException(message=msg) if name.startswith('t_'): msg = _('Attempt to use deleted object %s as a volume') % name raise exception.VolumeDriverException(message=msg) return f'v_{name}' def sname_to_id(sname): spl = sname.split('_') if len(spl) == 2: return (spl[1], None) return (spl[1], spl[2]) def sid_from_sname(name): return sname_to_id(name)[0] def vid_from_sname(name): return sname_to_id(name)[1] def sname(sid, vid): """Convert id into snapshot name :param: vid: volume id :param: sid: snapshot id """ if vid is None: return 's_%(sid)s' % {'sid': sid} return 's_%(sid)s_%(vid)s' % {'sid': sid, 'vid': vid} def sname_from_snap(snapshot_struct): return snapshot_struct['name'] def is_hidden(name): """Check if object is active or no""" if len(name) < 2: return False if name.startswith('t_'): return True return False def origin_snapshot(vol): """Extracts original physical snapshot name from volume dict""" if 'origin' in vol and vol['origin'] is not None: return vol['origin'].split("@")[1] return None def origin_volume(vol): """Extracts original physical volume name from volume dict""" if 'origin' in vol and vol['origin'] is not None: return vol['origin'].split("@")[0].split("/")[1] return None def snapshot_clones(snap): """Return list of clones associated with snapshot or return empty list""" out = [] clones = [] if 'clones' not in snap: return out else: clones = snap['clones'].split(',') for clone in clones: out.append(clone.split('/')[1]) return out def hidden(name): """Get hidden version of a name""" if len(name) < 2: raise exception.VolumeDriverException("Incorrect volume name") if name[:2] == 'v_' or name[:2] == 's_': return 't_' + name[2:] return 't_' + name def get_newest_snapshot_name(snapshots): newest_date = None sname = None for snap in snapshots: current_date = datetime.strptime(snap['creation'], "%Y-%m-%d %H:%M:%S") if newest_date is None or current_date > newest_date: newest_date = current_date sname = snap['name'] return sname ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/open_e/jovian_common/rest.py0000664000175000017500000007700500000000000024763 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """REST cmd interoperation class for Open-E JovianDSS driver.""" import re from oslo_log import log as logging from cinder.i18n import _ from cinder.volume.drivers.open_e.jovian_common import exception as jexc from cinder.volume.drivers.open_e.jovian_common import rest_proxy LOG = logging.getLogger(__name__) class JovianRESTAPI(object): """Jovian REST API""" def __init__(self, config): self.pool = config.get('jovian_pool', 'Pool-0') self.rproxy = rest_proxy.JovianDSSRESTProxy(config) self.resource_dne_msg = ( re.compile(r'^Zfs resource: .* not found in this collection\.$')) self.resource_has_clones_msg = ( re.compile(r'^In order to delete a zvol, you must delete all of ' 'its clones first.$')) self.resource_has_clones_class = ( re.compile(r'^opene.storage.zfs.ZfsOeError$')) self.resource_has_snapshots_msg = ( re.compile(r"^cannot destroy '.*/.*': volume has children\nuse " "'-r' to destroy the following datasets:\n.*")) self.resource_has_snapshots_class = ( re.compile(r'^zfslib.wrap.zfs.ZfsCmdError$')) def _general_error(self, url, resp): reason = "Request %s failure" % url LOG.debug("error resp %s", resp) if 'error' in resp: eclass = resp['error'].get('class', 'Unknown') code = resp['error'].get('code', 'Unknown') msg = resp['error'].get('message', 'Unknown') reason = _("Request to %(url)s failed with code: %(code)s " "of type:%(eclass)s reason:%(message)s") reason = (reason % {'url': url, 'code': code, 'eclass': eclass, 'message': msg}) raise jexc.JDSSException(reason=reason) def get_active_host(self): """Return address of currently used host.""" return self.rproxy.get_active_host() def is_pool_exists(self): """is_pool_exists. GET /pools/ :param pool_name: :return: Bool """ req = "" LOG.debug("check pool") resp = self.rproxy.pool_request('GET', req) if resp["code"] == 200 and not resp["error"]: return True return False def get_iface_info(self): """get_iface_info GET /network/interfaces :return list of internet ifaces """ req = '/network/interfaces' LOG.debug("get network interfaces") resp = self.rproxy.request('GET', req) if (resp['error'] is None) and (resp['code'] == 200): return resp['data'] raise jexc.JDSSRESTException(resp['error']['message']) def get_luns(self): """get_all_pool_volumes. GET /pools//volumes :param pool_name :return list of all pool volumes """ req = '/volumes' LOG.debug("get all volumes") resp = self.rproxy.pool_request('GET', req) if resp['error'] is None and resp['code'] == 200: return resp['data'] raise jexc.JDSSRESTException(resp['error']['message']) def create_lun(self, volume_name, volume_size, sparse=False, block_size=None): """create_volume. POST .../volumes :param volume_name: :param volume_size: :param sparse: thin or thick volume flag :param block_size: size of block :return: """ LOG.debug("create volume start") volume_size_str = str(volume_size) jbody = { 'name': volume_name, 'size': volume_size_str, 'sparse': sparse } if block_size: jbody['blocksize'] = block_size req = '/volumes' LOG.debug("create volume %s", str(jbody)) resp = self.rproxy.pool_request('POST', req, json_data=jbody) if not resp["error"] and resp["code"] in (200, 201): return if resp["error"] is not None: if resp["error"]["errno"] == str(5): msg = _('Failed to create volume %s.' % resp['error']['message']) raise jexc.JDSSRESTException(msg) raise jexc.JDSSRESTException('Failed to create volume.') def extend_lun(self, volume_name, volume_size): """create_volume. PUT /volumes/ """ req = '/volumes/' + volume_name volume_size_str = str(volume_size) jbody = { 'size': volume_size_str } LOG.debug("jdss extend volume %(volume)s to %(size)s", {"volume": volume_name, "size": volume_size_str}) resp = self.rproxy.pool_request('PUT', req, json_data=jbody) if not resp["error"] and resp["code"] == 201: return if resp["error"]: raise jexc.JDSSRESTException( _('Failed to extend volume %s' % resp['error']['message'])) raise jexc.JDSSRESTException('Failed to extend volume.') def is_lun(self, volume_name): """is_lun. GET /volumes/ Returns True if volume exists. Uses GET request. :param pool_name: :param volume_name: :return: """ req = '/volumes/' + volume_name LOG.debug("check volume %s", volume_name) ret = self.rproxy.pool_request('GET', req) if not ret["error"] and ret["code"] == 200: return True return False def get_lun(self, volume_name): """get_lun GET /volumes/ :param volume_name: zvol id :return: volume dict { "origin": null, "referenced": "65536", "primarycache": "all", "logbias": "latency", "creation": "1432730973", "sync": "always", "is_clone": false, "dedup": "off", "used": "1076101120", "full_name": "Pool-0/v1", "type": "volume", "written": "65536", "usedbyrefreservation": "1076035584", "compression": "lz4", "usedbysnapshots": "0", "copies": "1", "compressratio": "1.00x", "readonly": "off", "mlslabel": "none", "secondarycache": "all", "available": "976123452576", "resource_name": "Pool-0/v1", "volblocksize": "131072", "refcompressratio": "1.00x", "snapdev": "hidden", "volsize": "1073741824", "reservation": "0", "usedbychildren": "0", "usedbydataset": "65536", "name": "v1", "checksum": "on", "refreservation": "1076101120" } """ req = '/volumes/' + volume_name LOG.debug("get volume %s info", volume_name) resp = self.rproxy.pool_request('GET', req) if not resp['error'] and resp['code'] == 200: return resp['data'] if resp['error']: if 'message' in resp['error']: if self.resource_dne_msg.match(resp['error']['message']): raise jexc.JDSSResourceNotFoundException(res=volume_name) self._general_error(req, resp) def modify_lun(self, volume_name, prop=None): """Update volume properties :param volume_name: volume name :param prop: dictionary { : } """ req = '/volumes/' + volume_name resp = self.rproxy.pool_request('PUT', req, json_data=prop) if resp["code"] in (200, 201, 204): LOG.debug("volume %s updated", volume_name) return if resp["code"] == 500: if resp["error"] is not None: if resp["error"]["errno"] == 1: raise jexc.JDSSResourceNotFoundException( res=volume_name) self._general_error(req, resp) def make_readonly_lun(self, volume_name): """Set volume into read only mode :param: volume_name: volume name """ prop = {"property_name": "readonly", "property_value": "on"} self.modify_property_lun(volume_name, prop) def modify_property_lun(self, volume_name, prop=None): """Change volume properties :param volume_name: volume name :param prop: dictionary of volume properties in format { "property_name": "", "property_value":""} """ req = '/volumes/%s/properties' % volume_name resp = self.rproxy.pool_request('PUT', req, json_data=prop) if resp["code"] in (200, 201, 204): LOG.debug( "volume %s properties updated", volume_name) return if resp["code"] == 500: if resp["error"] is not None: if resp["error"]["errno"] == 1: raise jexc.JDSSResourceNotFoundException( res=volume_name) raise jexc.JDSSRESTException(request=req, reason=resp['error']['message']) raise jexc.JDSSRESTException(request=req, reason="unknown") def promote(self, volume_name, snapshot_name, clone_name): """promote volume POST /volumes//snapshots//clones/ /promoteclone_promote :param volume_name: parent volume for the one that should be promoted :param snapshot_name: snapshot that is linking parent and clone :param clone_name: volume name that is going to be promoted :return: """ jbody = {} req = '/volumes/' + volume_name + \ '/snapshots/' + snapshot_name + \ '/clones/' + clone_name + '/promote' LOG.debug("promote clone %s", clone_name) resp = self.rproxy.pool_request('POST', req, json_data=jbody) if resp["code"] == 200: LOG.debug("clone %s promoted", clone_name) return self._general_error(req, resp) def delete_lun(self, volume_name, recursively_children=False, force_umount=False): """delete_volume. DELETE /volumes/ :param volume_name: :return: """ jbody = {} if recursively_children: jbody['recursively_children'] = True if force_umount: jbody['force_umount'] = True req = '/volumes/' + volume_name LOG.debug(("delete volume:%(vol)s " "args:%(args)s"), {'vol': volume_name, 'args': jbody}) if len(jbody) > 0: resp = self.rproxy.pool_request('DELETE', req, json_data=jbody) else: resp = self.rproxy.pool_request('DELETE', req) if resp["code"] == 204: LOG.debug( "volume %s deleted", volume_name) return # Handle DNE case if resp["code"] == 500: if 'message' in resp['error']: if self.resource_dne_msg.match(resp['error']['message']): LOG.debug("volume %s do not exists, delition success", volume_name) return # Handle volume busy if resp["code"] == 500 and resp["error"]: if 'message' in resp['error'] and \ 'class' in resp['error']: if self.resource_has_clones_msg.match( resp['error']['message']) and \ self.resource_has_clones_class.match( resp['error']['class']): LOG.warning("volume %s is busy", volume_name) raise jexc.JDSSResourceIsBusyException(res=volume_name) if self.resource_has_snapshots_msg.match( resp['error']['message']) and \ self.resource_has_snapshots_class.match( resp['error']['class']): LOG.warning("volume %s is busy", volume_name) raise jexc.JDSSResourceIsBusyException(res=volume_name) raise jexc.JDSSRESTException('Failed to delete volume.') def is_target(self, target_name): """is_target. GET /san/iscsi/targets/ target_name :param target_name: :return: Bool """ req = '/san/iscsi/targets/' + target_name LOG.debug("check if targe %s exists", target_name) resp = self.rproxy.pool_request('GET', req) if resp["error"] or resp["code"] not in (200, 201): return False if "name" in resp["data"]: if resp["data"]["name"] == target_name: LOG.debug( "target %s exists", target_name) return True return False def create_target(self, target_name, use_chap=True, allow_ip=None, deny_ip=None): """create_target. POST /san/iscsi/targets :param target_name: :param chap_cred: :param allow_ip: "allow_ip": [ "192.168.2.30/0", "192.168.3.45" ], :return: """ req = '/san/iscsi/targets' LOG.debug("create target %s", target_name) jdata = {"name": target_name, "active": True} jdata["incoming_users_active"] = use_chap if allow_ip: jdata["allow_ip"] = allow_ip if deny_ip: jdata["deny_ip"] = deny_ip resp = self.rproxy.pool_request('POST', req, json_data=jdata) if not resp["error"] and resp["code"] == 201: return if resp["code"] == 409: raise jexc.JDSSResourceExistsException(res=target_name) self._general_error(req, resp) def delete_target(self, target_name): """delete_target. DELETE /san/iscsi/targets/ :param pool_name: :param target_name: :return: """ req = '/san/iscsi/targets/' + target_name LOG.debug("delete target %s", target_name) resp = self.rproxy.pool_request('DELETE', req) if resp["code"] in (200, 201, 204): LOG.debug( "target %s deleted", target_name) return not_found_err = "opene.exceptions.ItemNotFoundError" if (resp["code"] == 404) or \ (resp["error"]["class"] == not_found_err): raise jexc.JDSSResourceNotFoundException(res=target_name) self._general_error(req, resp) def create_target_user(self, target_name, chap_cred): """Set CHAP credentials for accees specific target. POST /san/iscsi/targets//incoming-users :param target_name: :param chap_cred: { "name": "target_user", "password": "3e21ewqdsacxz" --- 12 chars min } :return: """ req = "/san/iscsi/targets/%s/incoming-users" % target_name LOG.debug("add credentails to target %s", target_name) resp = self.rproxy.pool_request('POST', req, json_data=chap_cred) if not resp["error"] and resp["code"] in (200, 201, 204): return if resp['code'] == 404: raise jexc.JDSSResourceNotFoundException(res=target_name) self._general_error(req, resp) def get_target_user(self, target_name): """Get name of CHAP user for accessing target GET /san/iscsi/targets//incoming-users :param target_name: """ req = "/san/iscsi/targets/%s/incoming-users" % target_name LOG.debug("get chap cred for target %s", target_name) resp = self.rproxy.pool_request('GET', req) if not resp["error"] and resp["code"] == 200: return resp['data'] if resp['code'] == 404: raise jexc.JDSSResourceNotFoundException(res=target_name) self._general_error(req, resp) def delete_target_user(self, target_name, user_name): """Delete CHAP user for target DELETE /san/iscsi/targets//incoming-users/ :param target_name: target name :param user_name: user name """ req = '/san/iscsi/targets/%(target)s/incoming-users/%(user)s' % { 'target': target_name, 'user': user_name} LOG.debug("remove credentails from target %s", target_name) resp = self.rproxy.pool_request('DELETE', req) if resp["error"] is None and resp["code"] == 204: return if resp['code'] == 404: raise jexc.JDSSResourceNotFoundException(res=target_name) self._general_error(req, resp) def is_target_lun(self, target_name, lun_name): """is_target_lun. GET /san/iscsi/targets//luns/ :param pool_name: :param target_name: :param lun_name: :return: Bool """ req = '/san/iscsi/targets/%(tar)s/luns/%(lun)s' % { 'tar': target_name, 'lun': lun_name} LOG.debug("check if volume %(vol)s is associated with %(tar)s", {'vol': lun_name, 'tar': target_name}) resp = self.rproxy.pool_request('GET', req) if not resp["error"] and resp["code"] == 200: LOG.debug("volume %(vol)s is associated with %(tar)s", {'vol': lun_name, 'tar': target_name}) return True if resp['code'] == 404: LOG.debug("volume %(vol)s is not associated with %(tar)s", {'vol': lun_name, 'tar': target_name}) return False self._general_error(req, resp) def attach_target_vol(self, target_name, lun_name, lun_id=0, mode=None): """attach_target_vol. POST /san/iscsi/targets//luns :param target_name: name of the target :param lun_name: phisical volume name to be attached :param lun_id: id that would be assigned to volume :param mode: one of "wt", "wb" or "ro" :return: """ req = '/san/iscsi/targets/%s/luns' % target_name jbody = {"name": lun_name, "lun": lun_id} if mode is not None: if mode in ['wt', 'wb', 'ro']: jbody['mode'] = mode else: raise jexc.JDSSException( _("Incoret mode for target %s" % mode)) LOG.debug("atach volume %(vol)s to target %(tar)s", {'vol': lun_name, 'tar': target_name}) resp = self.rproxy.pool_request('POST', req, json_data=jbody) if not resp["error"] and resp["code"] == 201: return if resp['code'] == 409: raise jexc.JDSSResourceExistsException(res=lun_name) if resp['code'] == 404: raise jexc.JDSSResourceNotFoundException(res=target_name) self._general_error(req, resp) def detach_target_vol(self, target_name, lun_name): """detach_target_vol. DELETE /san/iscsi/targets//luns/ :param target_name: :param lun_name: :return: """ req = '/san/iscsi/targets/%(tar)s/luns/%(lun)s' % { 'tar': target_name, 'lun': lun_name} LOG.debug("detach volume %(vol)s from target %(tar)s", {'vol': lun_name, 'tar': target_name}) resp = self.rproxy.pool_request('DELETE', req) if resp["code"] in (200, 201, 204): return if resp['code'] == 404: raise jexc.JDSSResourceNotFoundException(res=lun_name) self._general_error(req, resp) def create_snapshot(self, volume_name, snapshot_name): """create_snapshot. POST /pools//volumes//snapshots :param pool_name: :param volume_name: source volume :param snapshot_name: snapshot name :return: """ req = '/volumes/%s/snapshots' % volume_name jbody = { 'snapshot_name': snapshot_name } LOG.debug("create snapshot %s", snapshot_name) resp = self.rproxy.pool_request('POST', req, json_data=jbody) if not resp["error"] and resp["code"] in (200, 201, 204): return if resp["code"] == 500: if resp["error"]: if resp["error"]["errno"] == 5: raise jexc.JDSSSnapshotExistsException( snapshot=snapshot_name) if resp["error"]["errno"] == 1: raise jexc.JDSSVolumeNotFoundException( volume=volume_name) self._general_error(req, resp) def create_volume_from_snapshot(self, volume_name, snapshot_name, original_vol_name, **options): """create_volume_from_snapshot. POST /volumes//clone :param volume_name: volume that is going to be created :param snapshot_name: slice of original volume :param original_vol_name: sample copy :return: """ req = '/volumes/%s/clone' % original_vol_name jbody = { 'name': volume_name, 'snapshot': snapshot_name, 'sparse': False } if 'sparse' in options: jbody['sparse'] = options['sparse'] if 'ro' in options: jbody['ro'] = options['sparse'] LOG.debug("create volume %(vol)s from snapshot %(snap)s", {'vol': volume_name, 'snap': snapshot_name}) resp = self.rproxy.pool_request('POST', req, json_data=jbody) if not resp["error"] and resp["code"] in (200, 201, 204): return if resp["code"] == 500: if resp["error"]: if resp["error"]["errno"] == 100: raise jexc.JDSSVolumeExistsException( volume=volume_name) if resp["error"]["errno"] == 1: raise jexc.JDSSResourceNotFoundException( res="%(vol)s@%(snap)s" % {'vol': original_vol_name, 'snap': snapshot_name}) self._general_error(req, resp) def rollback_volume_to_snapshot(self, volume_name, snapshot_name): """Rollback volume to its snapshot POST /volumes//snapshots//rollback :param volume_name: volume that is going to be restored :param snapshot_name: snapshot of a volume above :return: """ req = ('/volumes/%(vol)s/snapshots/' '%(snap)s/rollback') % {'vol': volume_name, 'snap': snapshot_name} LOG.debug("rollback volume %(vol)s to snapshot %(snap)s", {'vol': volume_name, 'snap': snapshot_name}) resp = self.rproxy.pool_request('POST', req) if not resp["error"] and resp["code"] == 200: return if resp["code"] == 500: if resp["error"]: if resp["error"]["errno"] == 1: raise jexc.JDSSResourceNotFoundException( res="%(vol)s@%(snap)s" % {'vol': volume_name, 'snap': snapshot_name}) self._general_error(req, resp) def count_rollback_dependents(self, volume_name, snapshot_name): """Count volumes and snapshots affected by rollback GET /volumes//snapshots//rollback :param str volume_name: volume that is going to be reverted :param str snapshot_name: snapshot of a volume above :return: None """ req = ('/volumes/%(vol)s/snapshots/' '%(snap)s/rollback') % {'vol': volume_name, 'snap': snapshot_name} LOG.debug("get rollback count for volume %(vol)s to snapshot %(snap)s", {'vol': volume_name, 'snap': snapshot_name}) resp = self.rproxy.pool_request('GET', req) if not resp["error"] and resp["code"] == 200: return resp['data'] if resp["code"] == 500: if resp["error"]: if resp["error"]["errno"] == 1: raise jexc.JDSSResourceNotFoundException( res="%(vol)s@%(snap)s" % {'vol': volume_name, 'snap': snapshot_name}) self._general_error(req, resp) def delete_snapshot(self, volume_name, snapshot_name, recursively_children=False, force_umount=False): """delete_snapshot. DELETE /volumes//snapshots/ :param volume_name: volume that snapshot belongs to :param snapshot_name: snapshot name :param recursively_children: boolean indicating if zfs should recursively destroy all children of resource, in case of snapshot remove all snapshots in descendant file system (default false). :param recursively_dependents: boolean indicating if zfs should recursively destroy all dependents, including cloned file systems outside the target hierarchy (default false). :param force_umount: boolean indicating if volume should be forced to umount (defualt false). :return: """ req = '/volumes/%(vol)s/snapshots/%(snap)s' % { 'vol': volume_name, 'snap': snapshot_name} LOG.debug("delete snapshot %(snap)s of volume %(vol)s", {'snap': snapshot_name, 'vol': volume_name}) jbody = {} if recursively_children: jbody['recursively_children'] = True if force_umount: jbody['force_umount'] = True resp = self.rproxy.pool_request('DELETE', req, json_data=jbody) if resp["code"] in (200, 201, 204): LOG.debug("snapshot %s deleted", snapshot_name) return if resp["code"] == 500: if resp["error"]: if resp["error"]["errno"] == 1000: raise jexc.JDSSSnapshotIsBusyException( snapshot=snapshot_name) self._general_error(req, resp) def get_snapshot(self, volume_name, snapshot_name): req = (('/volumes/%(vol)s/snapshots/%(snap)s') % {'vol': volume_name, 'snap': snapshot_name}) LOG.debug("get snapshots for volume %s ", volume_name) resp = self.rproxy.pool_request('GET', req) if not resp["error"] and resp["code"] == 200: return resp["data"] if resp['code'] == 500: if 'message' in resp['error']: if self.resource_dne_msg.match(resp['error']['message']): raise jexc.JDSSResourceNotFoundException(volume_name) self._general_error(req, resp) def get_snapshots(self, volume_name): """get_snapshots. GET /volumes// snapshots :param volume_name: that snapshot belongs to :return: { "data": [ { "referenced": "65536", "name": "MySnapshot", "defer_destroy": "off", "userrefs": "0", "primarycache": "all", "type": "snapshot", "creation": "2015-5-27 16:8:35", "refcompressratio": "1.00x", "compressratio": "1.00x", "written": "65536", "used": "0", "clones": "", "mlslabel": "none", "secondarycache": "all" } ], "error": null } """ req = '/volumes/%s/snapshots' % volume_name LOG.debug("get snapshots for volume %s ", volume_name) resp = self.rproxy.pool_request('GET', req) if not resp["error"] and resp["code"] == 200: return resp["data"]["entries"] if resp['code'] == 500: if 'message' in resp['error']: if self.resource_dne_msg.match(resp['error']['message']): raise jexc.JDSSResourceNotFoundException(volume_name) self._general_error(req, resp) def get_pool_stats(self): """get_pool_stats. GET /pools/ :param pool_name: :return: { "data": { "available": "24433164288", "status": 24, "name": "Pool-0", "scan": { "errors": 0, "repaired": "0", "start_time": 1463476815, "state": "finished", "end_time": 1463476820, "type": "scrub" }, "iostats": { "read": "0", "write": "0", "chksum": "0" }, "vdevs": [ { "name": "scsi-SSCST_BIOoWKF6TM0qafySQBUd1bb392e", "iostats": { "read": "0", "write": "0", "chksum": "0" }, "disks": [ { "led": "off", "name": "sdb", "iostats": { "read": "0", "write": "0", "chksum": "0" }, "health": "ONLINE", "sn": "d1bb392e", "path": "pci-0000:04:00.0-scsi-0:0:0:0", "model": "oWKF6TM0qafySQBU", "id": "scsi-SSCST_BIOoWKF6TM0qafySQBUd1bb392e", "size": 30064771072 } ], "health": "ONLINE", "vdev_replacings": [], "vdev_spares": [], "type": "" } ], "health": "ONLINE", "operation": "none", "id": "11612982948930769833", "size": "29796335616" }, "error": null } """ req = "" LOG.debug("Get pool %s fsprops", self.pool) resp = self.rproxy.pool_request('GET', req) if not resp["error"] and resp["code"] == 200: return resp["data"] self._general_error(req, resp) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/open_e/jovian_common/rest_proxy.py0000664000175000017500000001537500000000000026226 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Network connection handling class for JovianDSS driver.""" import json from oslo_log import log as logging from oslo_utils import netutils as o_netutils import requests import urllib3 from cinder import exception from cinder.i18n import _ from cinder.utils import retry from cinder.volume.drivers.open_e.jovian_common import exception as jexc LOG = logging.getLogger(__name__) class JovianDSSRESTProxy(object): """Jovian REST API proxy""" def __init__(self, config): """:param config: list of config values.""" self.proto = 'http' if config.get('driver_use_ssl', True): self.proto = 'https' self.hosts = config.get('san_hosts', []) self.port = str(config.get('san_api_port', 82)) for host in self.hosts: if o_netutils.is_valid_ip(host) is False: err_msg = ('Invalid value of jovian_host property: ' '%(addr)s, IP address expected.' % {'addr': host}) LOG.debug(err_msg) raise exception.InvalidConfigurationValue(err_msg) self.active_host = 0 self.delay = config.get('jovian_recovery_delay', 40) self.pool = config.get('jovian_pool', 'Pool-0') self.user = config.get('san_login', 'admin') self.password = config.get('san_password', 'admin') self.verify = config.get('driver_ssl_cert_verify', True) self.cert = config.get('driver_ssl_cert_path', None) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) self.session = self._get_session() def _get_session(self): """Create and init new session object""" session = requests.Session() session.auth = (self.user, self.password) session.headers.update({'Connection': 'keep-alive', 'Content-Type': 'application/json', 'Authorization': 'Basic'}) session.hooks['response'] = [JovianDSSRESTProxy._handle_500] session.verify = self.verify if self.verify and self.cert: session.verify = self.cert return session def _get_base_url(self): """Get url prefix with active host""" url = ('%(proto)s://%(host)s:%(port)s/api/v3' % { 'proto': self.proto, 'host': self.hosts[self.active_host], 'port': self.port}) return url def _next_host(self): """Set next host as active""" self.active_host = (self.active_host + 1) % len(self.hosts) def request(self, request_method, req, json_data=None): """Send request to the specific url. :param request_method: GET, POST, DELETE :param req: where to send :param json_data: data """ out = None for i in range(3): for i in range(len(self.hosts)): try: addr = "%(base)s%(req)s" % {'base': self._get_base_url(), 'req': req} LOG.debug("Sending %(t)s to %(addr)s data %(data)s", {'t': request_method, 'addr': addr, 'data': json_data}) r = None if json_data: r = requests.Request(request_method, addr, data=json.dumps(json_data)) else: r = requests.Request(request_method, addr) pr = self.session.prepare_request(r) out = self._send(pr) except requests.exceptions.ConnectionError: self._next_host() continue LOG.debug("Geting %(data)s from %(t)s to %(addr)s", {'data': out, 't': request_method, 'addr': addr}) return out def pool_request(self, request_method, req, json_data=None): """Send request to the specific url. :param request_method: GET, POST, DELETE :param url: where to send :param json_data: data """ req = "/pools/{pool}{req}".format(pool=self.pool, req=req) addr = "{base}{req}".format(base=self._get_base_url(), req=req) LOG.debug("Sending pool request %(t)s to %(addr)s", {'t': request_method, 'addr': addr}) return self.request(request_method, req, json_data=json_data) @retry(json.JSONDecodeError, retries=3) def _send(self, pr): """Send prepared request :param pr: prepared request """ ret = {} response_obj = self.session.send(pr) ret['code'] = response_obj.status_code if ret['code'] == 204: ret["data"] = None return ret data = json.loads(response_obj.text) ret["error"] = data.get("error") ret["data"] = data.get("data") return ret @staticmethod def _handle_500(resp, *args, **kwargs): """Handle OS error on a storage side""" error = None if resp.status_code == 500: try: data = json.loads(resp.text) error = data.get("error") except json.JSONDecodeError: return else: return if error: if "class" in error: if error["class"] == "opene.tools.scstadmin.ScstAdminError": LOG.debug("ScstAdminError %(code)d %(msg)s", {'code': error["errno"], 'msg': error["message"]}) raise jexc.JDSSOSException(_(error["message"])) if error["class"] == "exceptions.OSError": LOG.debug("OSError %(code)d %(msg)s", {'code': error["errno"], 'msg': error["message"]}) raise jexc.JDSSOSException(_(error["message"])) def get_active_host(self): """Return address of currently used host.""" return self.hosts[self.active_host] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/open_e/options.py0000664000175000017500000000374000000000000022636 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Open-E, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg jdss_connection_opts = [ cfg.ListOpt('san_hosts', default='', help='IP address of Open-E JovianDSS SA'), cfg.IntOpt('jovian_recovery_delay', default=60, help='Time before HA cluster failure.'), cfg.ListOpt('jovian_ignore_tpath', default=[], help='List of multipath ip addresses to ignore.'), ] jdss_iscsi_opts = [ cfg.IntOpt('chap_password_len', default=12, help='Length of the random string for CHAP password.'), cfg.StrOpt('jovian_pool', default='Pool-0', help='JovianDSS pool that holds all cinder volumes'), ] jdss_volume_opts = [ cfg.StrOpt('jovian_block_size', default='64K', choices=[('16K', 'Use 16K block size'), ('32K', 'Use 32K block size'), ('64K', 'Use 64K block size'), ('128K', 'Use 128K block size'), ('256K', 'Use 256K block size'), ('512K', 'Use 512K block size'), ('1M', 'Use 1M block size')], help='Block size for new volume') ] CONF = cfg.CONF CONF.register_opts(jdss_connection_opts) CONF.register_opts(jdss_iscsi_opts) CONF.register_opts(jdss_volume_opts) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.395121 cinder-27.0.0/cinder/volume/drivers/prophetstor/0000775000175000017500000000000000000000000021711 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/prophetstor/__init__.py0000664000175000017500000000000000000000000024010 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/prophetstor/dpl_fc.py0000664000175000017500000004001700000000000023514 0ustar00zuulzuul00000000000000# Copyright (c) 2014 ProphetStor, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.prophetstor import dplcommon from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @interface.volumedriver class DPLFCDriver(dplcommon.DPLCOMMONDriver, driver.FibreChannelDriver): def __init__(self, *args, **kwargs): super(DPLFCDriver, self).__init__(*args, **kwargs) def _get_fc_channel(self): """Get FibreChannel info. :returns: fcInfos[uuid] fcInfo[uuid]['display_name'] fcInfo[uuid]['display_description'] fcInfo[uuid]['hardware_address'] fcInfo[uuid]['type'] fcInfo[uuid]['speed'] fcInfo[uuid]['state'] """ output = None fcInfos = {} try: retCode, output = self.dpl.get_server_info() if retCode == 0 and output: fcUuids = output.get('metadata', {}).get('storage_adapter', {}).keys() for fcUuid in fcUuids: fcInfo = output.get('metadata', {}).get('storage_adapter', {}).get(fcUuid) if fcInfo['type'] == 'fc': fcInfos[fcUuid] = fcInfo except Exception as e: LOG.error("Failed to get fiber channel info from storage " "due to %(stat)s", {'stat': e}) return fcInfos def _get_targets(self): """Get targets. :returns: targetInfos[uuid] = targetInfo targetInfo['targetUuid'] targetInfo['targetName'] targetInfo['targetAddr'] """ output = None targetInfos = {} try: retCode, output = self.dpl.get_target_list('target') if retCode == 0 and output: for targetInfo in output.get('children', []): targetI = {} targetI['targetUuid'] = targetInfo[0] targetI['targetName'] = targetInfo[1] targetI['targetAddr'] = targetInfo[2] targetInfos[str(targetInfo[0])] = targetI except Exception as e: targetInfos = {} LOG.error("Failed to get fiber channel target from " "storage server due to %(stat)s", {'stat': e}) return targetInfos def _get_targetwpns(self, volumeid, initiatorWwpns): lstargetWwpns = [] try: ret, output = self.dpl.get_vdev(volumeid) if ret == 0 and output: exports = output.get('exports', {}) fc_infos = exports.get('Network/FC', {}) for fc_info in fc_infos: for p in fc_info.get('permissions', []): if p.get(initiatorWwpns, None): targetWwpns = fc_info.get('target_identifier', '') lstargetWwpns.append(targetWwpns) except Exception as e: LOG.error("Failed to get target wwpns from storage due " "to %(stat)s", {'stat': e}) lstargetWwpns = [] return lstargetWwpns def _convertHex2String(self, wwpns): szwwpns = '' if len(str(wwpns)) == 16: szwwpns = '%2s:%2s:%2s:%2s:%2s:%2s:%2s:%2s' % ( str(wwpns)[0:2], str(wwpns)[2:4], str(wwpns)[4:6], str(wwpns)[6:8], str(wwpns)[8:10], str(wwpns)[10:12], str(wwpns)[12:14], str(wwpns)[14:16]) return szwwpns def _export_fc(self, volumeid, targetwwpns, initiatorwwpns, volumename): ret = 0 output = '' LOG.debug('Export fc: %(volume)s, %(wwpns)s, %(iqn)s, %(volumename)s', {'volume': volumeid, 'wwpns': targetwwpns, 'iqn': initiatorwwpns, 'volumename': volumename}) try: ret, output = self.dpl.assign_vdev_fc( self._conver_uuid2hex(volumeid), targetwwpns, initiatorwwpns, volumename) except Exception: LOG.error('Volume %(volumeid)s failed to send assign command, ' 'ret: %(status)s output: %(output)s', {'volumeid': volumeid, 'status': ret, 'output': output}) ret = errno.EFAULT if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if len(event_uuid): ret = 0 status = self._wait_event( self.dpl.get_vdev_status, self._conver_uuid2hex(volumeid), event_uuid) if status['state'] == 'error': ret = errno.EFAULT msg = _('Flexvisor failed to assign volume %(id)s: ' '%(status)s.') % {'id': volumeid, 'status': status} raise exception.VolumeBackendAPIException(data=msg) else: ret = errno.EFAULT msg = _('Flexvisor failed to assign volume %(id)s due to ' 'unable to query status by event ' 'id.') % {'id': volumeid} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor assign volume failed:%(id)s:' '%(status)s.') % {'id': volumeid, 'status': ret} raise exception.VolumeBackendAPIException(data=msg) return ret def _delete_export_fc(self, volumeid, targetwwpns, initiatorwwpns): ret = 0 output = '' ret, output = self.dpl.unassign_vdev_fc( self._conver_uuid2hex(volumeid), targetwwpns, initiatorwwpns) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0 and len(event_uuid): status = self._wait_event( self.dpl.get_vdev_status, volumeid, event_uuid) if status['state'] == 'error': ret = errno.EFAULT msg = _('Flexvisor failed to unassign volume %(id)s:' ' %(status)s.') % {'id': volumeid, 'status': status} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to unassign volume (get event) ' '%(id)s.') % {'id': volumeid} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor unassign volume failed:%(id)s:' '%(status)s.') % {'id': volumeid, 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: LOG.info('Flexvisor succeeded to unassign volume %(id)s.', {'id': volumeid}) return ret def _build_initiator_target_map(self, connector, tgtwwns): """Build the target_wwns and the initiator target map.""" init_targ_map = {} initiator_wwns = connector['wwpns'] for initiator in initiator_wwns: init_targ_map[initiator] = tgtwwns return init_targ_map def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" """ connector = {'ip': CONF.my_ip, 'host': CONF.host, 'initiator': self._initiator, 'wwnns': self._fc_wwnns, 'wwpns': self._fc_wwpns} """ dc_fc = {} dc_target = {} lsTargetWwpn = [] output = None properties = {} preferTargets = {} ret = 0 targetIdentifier = [] szwwpns = [] LOG.info('initialize_connection volume: %(volume)s, connector:' ' %(connector)s', {"volume": volume, "connector": connector}) # Get Storage Fiber channel controller dc_fc = self._get_fc_channel() # Get existed FC target list to decide target wwpn dc_target = self._get_targets() if len(dc_target) == 0: msg = _('Backend storage did not configure fiber channel ' 'target.') raise exception.VolumeBackendAPIException(data=msg) for keyFc in dc_fc: for targetuuid in dc_target: if dc_fc[keyFc]['hardware_address'] == \ dc_target[targetuuid]['targetAddr']: preferTargets[targetuuid] = dc_target[targetuuid] break # Confirm client wwpn is existed in sns table # Covert wwwpns to 'xx:xx:xx:xx:xx:xx:xx:xx' format for dwwpn in connector['wwpns']: szwwpn = self._convertHex2String(dwwpn) if len(szwwpn) == 0: msg = _('Invalid wwpns format %(wwpns)s') % \ {'wwpns': connector['wwpns']} raise exception.VolumeBackendAPIException(data=msg) szwwpns.append(szwwpn) if len(szwwpns): for targetUuid in preferTargets: targetWwpn = '' targetWwpn = preferTargets.get(targetUuid, {}).get('targetAddr', '') lsTargetWwpn.append(targetWwpn) # Use wwpns to assign volume. LOG.info('Prefer use target wwpn %(wwpn)s', {'wwpn': lsTargetWwpn}) # Start to create export in all FC target node. assignedTarget = [] for pTarget in lsTargetWwpn: try: ret = self._export_fc(volume['id'], str(pTarget), szwwpns, volume['name']) if ret: break else: assignedTarget.append(pTarget) except Exception as e: LOG.error('Failed to export fiber channel target ' 'due to %s', e) ret = errno.EFAULT break if ret == 0: ret, output = self.dpl.get_vdev(self._conver_uuid2hex( volume['id'])) nLun = -1 if ret == 0: try: for p in output['exports']['Network/FC']: # check initiator wwpn existed in target initiator list for initI in p.get('permissions', []): for szwpn in szwwpns: if initI.get(szwpn, None): nLun = initI[szwpn] break if nLun != -1: break if nLun != -1: targetIdentifier.append( str(p['target_identifier']).replace(':', '')) except Exception: msg = _('Invalid connection initialization response of ' 'volume %(name)s: ' '%(output)s') % {'name': volume['name'], 'output': output} raise exception.VolumeBackendAPIException(data=msg) if nLun != -1: init_targ_map = self._build_initiator_target_map(connector, targetIdentifier) properties['target_discovered'] = True properties['target_wwn'] = targetIdentifier properties['target_lun'] = int(nLun) properties['volume_id'] = volume['id'] properties['initiator_target_map'] = init_targ_map LOG.info('%(volume)s assign type fibre_channel, properties ' '%(properties)s', {'volume': volume['id'], 'properties': properties}) else: msg = _('Invalid connection initialization response of ' 'volume %(name)s') % {'name': volume['name']} raise exception.VolumeBackendAPIException(data=msg) LOG.info('Connect initialization info: ' '{driver_volume_type: fibre_channel, ' 'data: %(properties)s', {'properties': properties}) conn_info = {'driver_volume_type': 'fibre_channel', 'data': properties} fczm_utils.add_fc_zone(conn_info) return conn_info def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" """ connector = {'ip': CONF.my_ip, 'host': CONF.host, 'initiator': self._initiator, 'wwnns': self._fc_wwnns, 'wwpns': self._fc_wwpns} """ lstargetWwpns = [] lsTargets = [] szwwpns = [] ret = 0 info = {'driver_volume_type': 'fibre_channel', 'data': {}} LOG.info('terminate_connection volume: %(volume)s, ' 'connector: %(con)s', {'volume': volume, 'con': connector}) # Query targetwwpns. # Get all target list of volume. for dwwpn in connector['wwpns']: szwwpn = self._convertHex2String(dwwpn) if len(szwwpn) == 0: msg = _('Invalid wwpns format %(wwpns)s') % \ {'wwpns': connector['wwpns']} raise exception.VolumeBackendAPIException(data=msg) szwwpns.append(szwwpn) if len(szwwpns) == 0: ret = errno.EFAULT msg = _('Invalid wwpns format %(wwpns)s') % \ {'wwpns': connector['wwpns']} raise exception.VolumeBackendAPIException(data=msg) else: for szwwpn in szwwpns: lstargetWwpns = self._get_targetwpns( self._conver_uuid2hex(volume['id']), szwwpn) lsTargets = list(set(lsTargets + lstargetWwpns)) # Remove all export target try: for ptarget in lsTargets: ret = self._delete_export_fc(volume['id'], ptarget, szwwpns) if ret: break except Exception: ret = errno.EFAULT finally: if ret: msg = _('Faield to unassign %(volume)s') % (volume['id']) raise exception.VolumeBackendAPIException(data=msg) # Failed to delete export with fibre channel if ret: init_targ_map = self._build_initiator_target_map(connector, lsTargets) info['data'] = {'target_wwn': lsTargets, 'initiator_target_map': init_targ_map} fczm_utils.remove_fc_zone(info) return info def get_volume_stats(self, refresh=False): if refresh: data = super(DPLFCDriver, self).get_volume_stats(refresh) if data: data['storage_protocol'] = constants.FC backend_name = \ self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = (backend_name or 'DPLFCDriver') self._stats = data return self._stats ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/prophetstor/dpl_iscsi.py0000664000175000017500000001556500000000000024250 0ustar00zuulzuul00000000000000# Copyright (c) 2014 ProphetStor, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface import cinder.volume.driver from cinder.volume.drivers.prophetstor import dplcommon LOG = logging.getLogger(__name__) @interface.volumedriver class DPLISCSIDriver(dplcommon.DPLCOMMONDriver, cinder.volume.driver.ISCSIDriver): def __init__(self, *args, **kwargs): super(DPLISCSIDriver, self).__init__(*args, **kwargs) def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" properties = {} properties['target_lun'] = None properties['target_discovered'] = True properties['target_portal'] = '' properties['target_iqn'] = None properties['volume_id'] = volume['id'] dpl_server = self.configuration.san_ip dpl_iscsi_port = self.configuration.target_port ret, output = self.dpl.assign_vdev(self._conver_uuid2hex( volume['id']), connector['initiator'].lower(), volume['id'], '%s:%d' % (dpl_server, dpl_iscsi_port), 0) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if len(event_uuid): ret = 0 status = self._wait_event( self.dpl.get_vdev_status, self._conver_uuid2hex( volume['id']), event_uuid) if status['state'] == 'error': ret = errno.EFAULT msg = _('Flexvisor failed to assign volume %(id)s: ' '%(status)s.') % {'id': volume['id'], 'status': status} raise exception.VolumeBackendAPIException(data=msg) else: ret = errno.EFAULT msg = _('Flexvisor failed to assign volume %(id)s due to ' 'unable to query status by event ' 'id.') % {'id': volume['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor assign volume failed.:%(id)s:' '%(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) if ret == 0: ret, output = self.dpl.get_vdev( self._conver_uuid2hex(volume['id'])) if ret == 0: for tgInfo in output['exports']['Network/iSCSI']: if tgInfo['permissions'] and \ isinstance(tgInfo['permissions'][0], dict): for assign in tgInfo['permissions']: if connector['initiator'].lower() in assign.keys(): for tgportal in tgInfo.get('portals', {}): properties['target_portal'] = tgportal break properties['target_lun'] = \ int(assign[connector['initiator'].lower()]) break if properties['target_portal'] != '': properties['target_iqn'] = tgInfo['target_identifier'] break else: if connector['initiator'].lower() in tgInfo['permissions']: for tgportal in tgInfo.get('portals', {}): properties['target_portal'] = tgportal break if properties['target_portal'] != '': properties['target_lun'] = int( tgInfo['logical_unit_number']) properties['target_iqn'] = tgInfo['target_identifier'] break if not (ret == 0 or properties['target_portal']): msg = _('Flexvisor failed to assign volume %(volume)s ' 'iqn %(iqn)s.') % {'volume': volume['id'], 'iqn': connector['initiator']} raise exception.VolumeBackendAPIException(data=msg) return {'driver_volume_type': 'iscsi', 'data': properties} def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" ret, output = self.dpl.unassign_vdev( self._conver_uuid2hex(volume['id']), connector['initiator']) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event( self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] == 'error': ret = errno.EFAULT msg = _('Flexvisor failed to unassign volume %(id)s:' ' %(status)s.') % {'id': volume['id'], 'status': status} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to unassign volume (get event) ' '%(id)s.') % {'id': volume['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: LOG.info('Flexvisor already unassigned volume %(id)s.', {'id': volume['id']}) elif ret != 0: msg = _('Flexvisor failed to unassign volume:%(id)s:' '%(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) def get_volume_stats(self, refresh=False): if refresh: try: data = super(DPLISCSIDriver, self).get_volume_stats(refresh) if data: data['storage_protocol'] = constants.ISCSI backend_name = \ self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = \ (backend_name or 'DPLISCSIDriver') self._stats = data except Exception as exc: LOG.warning('Cannot get volume status %(exc)s.', {'exc': exc}) return self._stats ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/prophetstor/dplcommon.py0000664000175000017500000017201000000000000024254 0ustar00zuulzuul00000000000000# Copyright (c) 2014 ProphetStor, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Implementation of the class of ProphetStor DPL storage adapter of Federator. # v2.0.1 Consistency group support # v2.0.2 Pool aware scheduler # v2.0.3 Consistency group modification support # v2.0.4 Port ProphetStor driver to use new driver model # v2.0.5 Move from httplib to requests """ import base64 import errno from http import HTTPStatus import json import random import time from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import units import requests from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder.volume import driver from cinder.volume.drivers.prophetstor import options from cinder.volume.drivers.san import san from cinder.volume import volume_utils LOG = logging.getLogger(__name__) CONNECTION_RETRY = 10 MAXSNAPSHOTS = 1024 DISCOVER_SERVER_TYPE = 'dpl' DPL_BLOCKSTOR = '/dpl_blockstor' DPL_SYSTEM = '/dpl_system' DPL_VER_V1 = 'v1' DPL_OBJ_POOL = 'dpl_pool' DPL_OBJ_DISK = 'dpl_disk' DPL_OBJ_VOLUME = 'dpl_volume' DPL_OBJ_VOLUMEGROUP = 'dpl_volgroup' DPL_OBJ_SNAPSHOT = 'cdmi_snapshots' DPL_OBJ_EXPORT = 'dpl_export' DPL_OBJ_REPLICATION = 'cdmi_replication' DPL_OBJ_TARGET = 'dpl_target' DPL_OBJ_SYSTEM = 'dpl_system' DPL_OBJ_SNS = 'sns_table' class DPLCommand(object): """DPL command interface.""" def __init__(self, ip, port, username, password, cert_verify=False, cert_path=None): self.ip = ip self.port = port self.username = username self.password = password self.cert_verify = cert_verify self.cert_path = cert_path def send_cmd(self, method, url, params, expected_status): """Send command to DPL.""" retcode = 0 data = {} header = {'Content-Type': 'application/cdmi-container', 'Accept': 'application/cdmi-container', 'x-cdmi-specification-version': '1.0.2'} # base64 encode the username and password auth = base64.encodebytes('%s:%s' % (self.username, self.password)).replace('\n', '') header['Authorization'] = 'Basic %s' % auth if not params: payload = None else: try: payload = json.dumps(params, ensure_ascii=False) payload.encode('utf-8') except Exception as e: LOG.error('JSON encode params %(param)s error:' ' %(status)s.', {'param': params, 'status': e}) retcode = errno.EINVAL retry = CONNECTION_RETRY func = getattr(requests, method.lower()) cert_path = False if self.cert_verify: cert_path = self.cert_path else: cert_path = False while (retry): try: r = func( url="https://%s:%s%s" % (self.ip, self.port, url), data=payload, headers=header, verify=cert_path) if r.status_code == HTTPStatus.SERVICE_UNAVAILABLE: LOG.error("The flexvisor service is unavailable.") continue else: break except Exception as e: msg = (_("failed to %(method)s due to %(error)s") % {"method": method, "error": str(e)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if (r.status_code in expected_status and r.status_code == HTTPStatus.NOT_FOUND): retcode = errno.ENODATA elif r.status_code not in expected_status: LOG.error('%(method)s %(url)s unexpected response status: ' '%(response)s (expects: %(expects)s).', {'method': method, 'url': url, 'response': HTTPStatus.responses[r.status_code], 'expects': expected_status}) if r.status_code == HTTPStatus.UNAUTHORIZED: raise exception.NotAuthorized else: retcode = errno.EIO elif r.status_code == HTTPStatus.NOT_FOUND: retcode = errno.ENODATA elif r.status_code == HTTPStatus.ACCEPTED: retcode = errno.EAGAIN try: data = r.json() except (TypeError, ValueError) as e: LOG.error('Call to json.loads() raised an exception: %s.', e) retcode = errno.ENOEXEC except Exception as e: LOG.error('Read response raised an exception: %s.', e) retcode = errno.ENOEXEC elif (r.status_code in [HTTPStatus.OK, HTTPStatus.CREATED] and HTTPStatus.NO_CONTENT not in expected_status): try: data = r.json() except (TypeError, ValueError) as e: LOG.error('Call to json.loads() raised an exception: %s.', e) retcode = errno.ENOEXEC except Exception as e: LOG.error('Read response raised an exception: %s.', e) retcode = errno.ENOEXEC return retcode, data class DPLVolume(object): def __init__(self, dplServer, dplPort, dplUser, dplPassword, cert_verify=False, cert_path=None): self.objCmd = DPLCommand( dplServer, dplPort, dplUser, dplPassword, cert_verify=cert_verify, cert_path=cert_path) def _execute(self, method, url, params, expected_status): if self.objCmd: return self.objCmd.send_cmd(method, url, params, expected_status) else: return -1, None def _gen_snapshot_url(self, vdevid, snapshotid): snapshot_url = '/%s/%s/%s' % (vdevid, DPL_OBJ_SNAPSHOT, snapshotid) return snapshot_url def get_server_info(self): method = 'GET' url = ('/%s/%s/' % (DPL_VER_V1, DPL_OBJ_SYSTEM)) return self._execute(method, url, None, [HTTPStatus.OK, HTTPStatus.ACCEPTED]) def create_vdev(self, volumeID, volumeName, volumeDesc, poolID, volumeSize, fthinprovision=True, maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): method = 'PUT' metadata = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) if volumeName is None or volumeName == '': metadata['display_name'] = volumeID else: metadata['display_name'] = volumeName metadata['display_description'] = volumeDesc metadata['pool_uuid'] = poolID metadata['total_capacity'] = volumeSize metadata['maximum_snapshot'] = maximum_snapshot if snapshot_quota is not None: metadata['snapshot_quota'] = int(snapshot_quota) metadata['properties'] = dict(thin_provision=fthinprovision) params['metadata'] = metadata return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def extend_vdev(self, volumeID, volumeName, volumeDesc, volumeSize, maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): method = 'PUT' metadata = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) if volumeName is None or volumeName == '': metadata['display_name'] = volumeID else: metadata['display_name'] = volumeName metadata['display_description'] = volumeDesc metadata['total_capacity'] = int(volumeSize) metadata['maximum_snapshot'] = maximum_snapshot if snapshot_quota is not None: metadata['snapshot_quota'] = snapshot_quota params['metadata'] = metadata return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def delete_vdev(self, volumeID, force=True): method = 'DELETE' metadata = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, volumeID) metadata['force'] = force params['metadata'] = metadata return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.NOT_FOUND, HTTPStatus.NO_CONTENT]) def create_vdev_from_snapshot(self, vdevID, vdevDisplayName, vdevDesc, snapshotID, poolID, fthinprovision=True, maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): method = 'PUT' metadata = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevID) metadata['snapshot_operation'] = 'copy' if vdevDisplayName is None or vdevDisplayName == "": metadata['display_name'] = vdevID else: metadata['display_name'] = vdevDisplayName metadata['display_description'] = vdevDesc metadata['pool_uuid'] = poolID metadata['properties'] = {} metadata['maximum_snapshot'] = maximum_snapshot if snapshot_quota: metadata['snapshot_quota'] = snapshot_quota metadata['properties'] = dict(thin_provision=fthinprovision) params['metadata'] = metadata params['copy'] = self._gen_snapshot_url(vdevID, snapshotID) return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def spawn_vdev_from_snapshot(self, new_vol_id, src_vol_id, vol_display_name, description, snap_id): method = 'PUT' params = {} metadata = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, new_vol_id) metadata['snapshot_operation'] = 'spawn' if vol_display_name is None or vol_display_name == '': metadata['display_name'] = new_vol_id else: metadata['display_name'] = vol_display_name metadata['display_description'] = description params['metadata'] = metadata params['copy'] = self._gen_snapshot_url(src_vol_id, snap_id) return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def get_pools(self): method = 'GET' url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL) return self._execute(method, url, None, [HTTPStatus.OK]) def get_pool(self, poolid): method = 'GET' url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_POOL, poolid) return self._execute(method, url, None, [HTTPStatus.OK, HTTPStatus.ACCEPTED]) def clone_vdev(self, SourceVolumeID, NewVolumeID, poolID, volumeName, volumeDesc, volumeSize, fthinprovision=True, maximum_snapshot=MAXSNAPSHOTS, snapshot_quota=None): method = 'PUT' params = {} metadata = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, NewVolumeID) metadata["snapshot_operation"] = "clone" if volumeName is None or volumeName == '': metadata["display_name"] = NewVolumeID else: metadata["display_name"] = volumeName metadata["display_description"] = volumeDesc metadata["pool_uuid"] = poolID metadata["total_capacity"] = volumeSize metadata["maximum_snapshot"] = maximum_snapshot if snapshot_quota: metadata["snapshot_quota"] = snapshot_quota metadata["properties"] = dict(thin_provision=fthinprovision) params["metadata"] = metadata params["copy"] = SourceVolumeID return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.CREATED, HTTPStatus.ACCEPTED]) def create_vdev_snapshot(self, vdevid, snapshotid, snapshotname='', snapshotdes='', isgroup=False): method = 'PUT' metadata = {} params = {} if isgroup: url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid) else: url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) if not snapshotname: metadata['display_name'] = snapshotid else: metadata['display_name'] = snapshotname metadata['display_description'] = snapshotdes params['metadata'] = metadata params['snapshot'] = snapshotid return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.CREATED, HTTPStatus.ACCEPTED]) def get_vdev(self, vdevid): method = 'GET' url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) return self._execute(method, url, None, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.NOT_FOUND]) def get_vdev_status(self, vdevid, eventid): method = 'GET' url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid, eventid)) return self._execute(method, url, None, [HTTPStatus.OK, HTTPStatus.NOT_FOUND]) def get_pool_status(self, poolid, eventid): method = 'GET' url = ('/%s/%s/%s/?event_uuid=%s' % (DPL_VER_V1, DPL_OBJ_POOL, poolid, eventid)) return self._execute(method, url, None, [HTTPStatus.OK, HTTPStatus.NOT_FOUND]) def assign_vdev(self, vdevid, iqn, lunname, portal, lunid=0): method = 'PUT' metadata = {} exports = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) metadata['export_operation'] = 'assign' exports['Network/iSCSI'] = {} target_info = {} target_info['logical_unit_number'] = 0 target_info['logical_unit_name'] = lunname permissions = [] portals = [] portals.append(portal) permissions.append(iqn) target_info['permissions'] = permissions target_info['portals'] = portals exports['Network/iSCSI'] = target_info params['metadata'] = metadata params['exports'] = exports return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def assign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpn, lunname, lunid=-1): method = 'PUT' metadata = {} exports = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) metadata['export_operation'] = 'assign' exports['Network/FC'] = {} target_info = {} target_info['target_identifier'] = targetwwpn target_info['logical_unit_number'] = lunid target_info['logical_unit_name'] = lunname target_info['permissions'] = initiatorwwpn exports['Network/FC'] = target_info params['metadata'] = metadata params['exports'] = exports return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def unassign_vdev(self, vdevid, initiatorIqn, targetIqn=''): method = 'PUT' metadata = {} exports = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) metadata['export_operation'] = 'unassign' params['metadata'] = metadata exports['Network/iSCSI'] = {} exports['Network/iSCSI']['target_identifier'] = targetIqn permissions = [] permissions.append(initiatorIqn) exports['Network/iSCSI']['permissions'] = permissions params['exports'] = exports return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT, HTTPStatus.NOT_FOUND]) def unassign_vdev_fc(self, vdevid, targetwwpn, initiatorwwpns): method = 'PUT' metadata = {} exports = {} params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) metadata['export_operation'] = 'unassign' params['metadata'] = metadata exports['Network/FC'] = {} exports['Network/FC']['target_identifier'] = targetwwpn permissions = initiatorwwpns exports['Network/FC']['permissions'] = permissions params['exports'] = exports return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT, HTTPStatus.NOT_FOUND]) def delete_vdev_snapshot(self, objID, snapshotID, isGroup=False): method = 'DELETE' if isGroup: url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, objID, DPL_OBJ_SNAPSHOT, snapshotID)) else: url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, objID, DPL_OBJ_SNAPSHOT, snapshotID)) return self._execute(method, url, None, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.NO_CONTENT, HTTPStatus.NOT_FOUND]) def rollback_vdev(self, vdevid, snapshotid): method = 'PUT' params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid) params['copy'] = self._gen_snapshot_url(vdevid, snapshotid) return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED]) def list_vdev_snapshots(self, vdevid, isGroup=False): method = 'GET' if isGroup: url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid, DPL_OBJ_SNAPSHOT)) else: url = ('/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid, DPL_OBJ_SNAPSHOT)) return self._execute(method, url, None, [HTTPStatus.OK]) def query_vdev_snapshot(self, vdevid, snapshotID, isGroup=False): method = 'GET' if isGroup: url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUMEGROUP, vdevid, DPL_OBJ_SNAPSHOT, snapshotID)) else: url = ('/%s/%s/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_VOLUME, vdevid, DPL_OBJ_SNAPSHOT, snapshotID)) return self._execute(method, url, None, [HTTPStatus.OK]) def create_target(self, targetID, protocol, displayName, targetAddress, description=''): method = 'PUT' params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) params['metadata'] = {} metadata = params['metadata'] metadata['type'] = 'target' metadata['protocol'] = protocol if displayName is None or displayName == '': metadata['display_name'] = targetID else: metadata['display_name'] = displayName metadata['display_description'] = description metadata['address'] = targetAddress return self._execute(method, url, params, [HTTPStatus.OK]) def get_target(self, targetID): method = 'GET' url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) return self._execute(method, url, None, [HTTPStatus.OK]) def delete_target(self, targetID): method = 'DELETE' url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, targetID) return self._execute(method, url, None, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.NOT_FOUND]) def get_target_list(self, type='target'): # type = target/initiator method = 'GET' if type is None: url = '/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT) else: url = '/%s/%s/?type=%s' % (DPL_VER_V1, DPL_OBJ_EXPORT, type) return self._execute(method, url, None, [HTTPStatus.OK]) def get_sns_table(self, wwpn): method = 'PUT' params = {} url = '/%s/%s/%s/' % (DPL_VER_V1, DPL_OBJ_EXPORT, DPL_OBJ_SNS) params['metadata'] = {} params['metadata']['protocol'] = 'fc' params['metadata']['address'] = str(wwpn) return self._execute(method, url, params, [HTTPStatus.OK]) def create_vg(self, groupID, groupName, groupDesc='', listVolume=None, maxSnapshots=MAXSNAPSHOTS, rotationSnapshot=True): method = 'PUT' metadata = {} params = {} properties = {} url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) if listVolume: metadata['volume'] = listVolume else: metadata['volume'] = [] metadata['display_name'] = groupName metadata['display_description'] = groupDesc metadata['maximum_snapshot'] = maxSnapshots properties['snapshot_rotation'] = rotationSnapshot metadata['properties'] = properties params['metadata'] = metadata return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED, HTTPStatus.CREATED]) def get_vg(self, groupID): method = 'GET' url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) return self._execute(method, url, None, [HTTPStatus.OK]) def delete_vg(self, groupID, force=True): method = 'DELETE' metadata = {} params = {} url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) metadata['force'] = force params['metadata'] = metadata return self._execute(method, url, params, [HTTPStatus.NO_CONTENT, HTTPStatus.NOT_FOUND]) def join_vg(self, volumeID, groupID): method = 'PUT' metadata = {} params = {} url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) metadata['volume_group_operation'] = 'join' metadata['volume'] = [] metadata['volume'].append(volumeID) params['metadata'] = metadata return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED]) def leave_vg(self, volumeID, groupID): method = 'PUT' metadata = {} params = {} url = '/%s/%s/' % (DPL_OBJ_VOLUMEGROUP, groupID) metadata['volume_group_operation'] = 'leave' metadata['volume'] = [] metadata['volume'].append(volumeID) params['metadata'] = metadata return self._execute(method, url, params, [HTTPStatus.OK, HTTPStatus.ACCEPTED]) class DPLCOMMONDriver(driver.CloneableImageVD, driver.BaseVD): """Class of dpl storage adapter.""" VERSION = '2.0.5' # ThirdPartySystems wiki page CI_WIKI_NAME = "ProphetStor_CI" # TODO(jsbryant) Remove driver in the 'U' release if CI is not fixed. SUPPORTED = False def __init__(self, *args, **kwargs): cert_path = None cert_verify = False super(DPLCOMMONDriver, self).__init__(*args, **kwargs) if self.configuration: self.configuration.append_config_values(options.DPL_OPTS) self.configuration.append_config_values(san.san_opts) cert_verify = self.configuration.driver_ssl_cert_verify cert_path = self.configuration.driver_ssl_cert_path if cert_verify: if not cert_path: LOG.warning( "Flexvisor: cert_verify is enabled but required cert_path" " option is missing.") cert_path = None else: cert_path = None self.dpl = DPLVolume(self.configuration.san_ip, self.configuration.dpl_port, self.configuration.san_login, self.configuration.san_password, cert_verify=cert_verify, cert_path=cert_path) self._stats = {} @staticmethod def get_driver_options(): return options.DPL_OPTS def _convert_size_GB(self, size): s = round(float(size) / units.Gi, 2) if s > 0: return s else: return 0 def _conver_uuid2hex(self, strID): if strID: return strID.replace('-', '') else: return None def _get_event_uuid(self, output): ret = 0 event_uuid = "" if (type(output) is dict and output.get("metadata") and output["metadata"]): if (output["metadata"].get("event_uuid") and output["metadata"]["event_uuid"]): event_uuid = output["metadata"]["event_uuid"] else: ret = errno.EINVAL else: ret = errno.EINVAL return ret, event_uuid def _wait_event(self, callFun, objuuid, eventid=None): nRetry = 30 fExit = False status = {} status['state'] = 'error' status['output'] = {} while nRetry: try: if eventid: ret, output = callFun( self._conver_uuid2hex(objuuid), self._conver_uuid2hex(eventid)) else: ret, output = callFun(self._conver_uuid2hex(objuuid)) if ret == 0: if output['completionStatus'] == 'Complete': fExit = True status['state'] = 'available' status['output'] = output elif output['completionStatus'] == 'Error': fExit = True status['state'] = 'error' raise loopingcall.LoopingCallDone(retvalue=False) else: nsleep = random.randint(0, 10) value = round(float(nsleep) / 10, 2) time.sleep(value) elif ret == errno.ENODATA: status['state'] = 'deleted' fExit = True else: nRetry -= 1 time.sleep(3) continue except Exception as e: LOG.error('Flexvisor failed to get event %(volume)s ' '(%(status)s).', {'volume': eventid, 'status': e}) raise loopingcall.LoopingCallDone(retvalue=False) if fExit is True: break return status def _join_volume_group(self, volume, cgId): # Join volume group if consistency group id not empty msg = '' try: ret, output = self.dpl.join_vg( self._conver_uuid2hex(volume['id']), self._conver_uuid2hex(cgId)) except Exception as e: ret = errno.EFAULT msg = _('Fexvisor failed to add volume %(id)s ' 'due to %(reason)s.') % {"id": volume['id'], "reason": str(e)} if ret: if not msg: msg = _('Flexvisor failed to add volume %(id)s ' 'to group %(cgid)s.') % {'id': volume['id'], 'cgid': cgId} raise exception.VolumeBackendAPIException(data=msg) else: LOG.info('Flexvisor succeeded to add volume %(id)s to ' 'group %(cgid)s.', {'id': volume['id'], 'cgid': cgId}) def _leave_volume_group(self, volume, cgId): # Leave volume group if consistency group id not empty msg = '' try: ret, output = self.dpl.leave_vg( self._conver_uuid2hex(volume['id']), self._conver_uuid2hex(cgId)) except Exception as e: ret = errno.EFAULT msg = _('Fexvisor failed to remove volume %(id)s ' 'due to %(reason)s.') % {"id": volume['id'], "reason": str(e)} if ret: if not msg: msg = _('Flexvisor failed to remove volume %(id)s ' 'from group %(cgid)s.') % {'id': volume['id'], 'cgid': cgId} raise exception.VolumeBackendAPIException(data=msg) else: LOG.info('Flexvisor succeeded to remove volume %(id)s from ' 'group %(cgid)s.', {'id': volume['id'], 'cgid': cgId}) def _get_snapshotid_of_vgsnapshot(self, vgID, vgsnapshotID, volumeID): snapshotID = None ret, out = self.dpl.query_vdev_snapshot(vgID, vgsnapshotID, True) if ret == 0: volumes = out.get('metadata', {}).get('member', {}) if volumes: snapshotID = volumes.get(volumeID, None) else: msg = _('Flexvisor failed to get snapshot id of volume ' '%(id)s from group %(vgid)s.') % {'id': volumeID, 'vgid': vgID} raise exception.VolumeBackendAPIException(data=msg) if not snapshotID: msg = _('Flexvisor could not find volume %(id)s snapshot in' ' the group %(vgid)s snapshot ' '%(vgsid)s.') % {'id': volumeID, 'vgid': vgID, 'vgsid': vgsnapshotID} raise exception.VolumeBackendAPIException(data=msg) return snapshotID def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def _create_consistencygroup(self, context, group): """Creates a consistencygroup.""" LOG.info('Start to create consistency group: %(group_name)s ' 'id: %(id)s', {'group_name': group.name, 'id': group.id}) model_update = {'status': fields.GroupStatus.AVAILABLE} try: ret, output = self.dpl.create_vg( self._conver_uuid2hex(group.id), group.name, group.description) if ret: msg = _('Failed to create consistency group ' '%(id)s:%(ret)s.') % {'id': group.id, 'ret': ret} raise exception.VolumeBackendAPIException(data=msg) else: return model_update except Exception as e: msg = _('Failed to create consistency group ' '%(id)s due to %(reason)s.') % {'id': group.id, 'reason': str(e)} raise exception.VolumeBackendAPIException(data=msg) def _delete_consistencygroup(self, context, group, volumes): """Delete a consistency group.""" ret = 0 volumes = self.db.volume_get_all_by_group( context, group.id) model_update = {} model_update['status'] = group.status LOG.info('Start to delete consistency group: %(cg_name)s', {'cg_name': group.id}) try: self.dpl.delete_vg(self._conver_uuid2hex(group.id)) except Exception as e: msg = _('Failed to delete consistency group %(id)s ' 'due to %(reason)s.') % {'id': group.id, 'reason': str(e)} raise exception.VolumeBackendAPIException(data=msg) for volume_ref in volumes: try: self.dpl.delete_vdev(self._conver_uuid2hex(volume_ref['id'])) volume_ref['status'] = 'deleted' except Exception: ret = errno.EFAULT volume_ref['status'] = 'error_deleting' model_update['status'] = ( fields.GroupStatus.ERROR_DELETING) if ret == 0: model_update['status'] = fields.GroupStatus.DELETED return model_update, volumes def _create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" snapshots = objects.SnapshotList().get_all_for_group_snapshot( context, cgsnapshot.id) model_update = {} LOG.info('Start to create cgsnapshot for consistency group' ': %(group_name)s', {'group_name': cgsnapshot.group_id}) try: self.dpl.create_vdev_snapshot( self._conver_uuid2hex(cgsnapshot.group_id), self._conver_uuid2hex(cgsnapshot.id), cgsnapshot.name, '', True) for snapshot in snapshots: snapshot.status = fields.SnapshotStatus.AVAILABLE except Exception as e: msg = _('Failed to create cg snapshot %(id)s ' 'due to %(reason)s.') % {'id': cgsnapshot.id, 'reason': str(e)} raise exception.VolumeBackendAPIException(data=msg) model_update['status'] = 'available' return model_update, snapshots def _delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" snapshots = objects.SnapshotList().get_all_for_group_snapshot( context, cgsnapshot.id) model_update = {} model_update['status'] = cgsnapshot.status LOG.info('Delete cgsnapshot %(snap_name)s for consistency group: ' '%(group_name)s', {'snap_name': cgsnapshot.id, 'group_name': cgsnapshot.group_id}) try: self.dpl.delete_vdev_snapshot( self._conver_uuid2hex(cgsnapshot.group_id), self._conver_uuid2hex(cgsnapshot.id), True) for snapshot in snapshots: snapshot.status = fields.SnapshotStatus.DELETED except Exception as e: msg = _('Failed to delete cgsnapshot %(id)s due to ' '%(reason)s.') % {'id': cgsnapshot.id, 'reason': str(e)} raise exception.VolumeBackendAPIException(data=msg) model_update['status'] = 'deleted' return model_update, snapshots def update_group(self, context, group, add_volumes=None, remove_volumes=None): addvollist = [] removevollist = [] cgid = group.id vid = '' model_update = {'status': fields.GroupStatus.AVAILABLE} if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() # Get current group info in backend storage. ret, output = self.dpl.get_vg(self._conver_uuid2hex(cgid)) if ret == 0: group_members = output.get('children', []) if add_volumes: addvollist = add_volumes if remove_volumes: removevollist = remove_volumes # Process join volumes. try: for volume in addvollist: vid = volume['id'] # Verify the volume exists in the group or not. if self._conver_uuid2hex(vid) in group_members: continue self._join_volume_group(volume, cgid) except Exception as e: msg = _("Fexvisor failed to join the volume %(vol)s in the " "group %(group)s due to " "%(ret)s.") % {"vol": vid, "group": cgid, "ret": str(e)} raise exception.VolumeBackendAPIException(data=msg) # Process leave volumes. try: for volume in removevollist: vid = volume['id'] if self._conver_uuid2hex(vid) in group_members: self._leave_volume_group(volume, cgid) except Exception as e: msg = _("Fexvisor failed to remove the volume %(vol)s in the " "group %(group)s due to " "%(ret)s.") % {"vol": vid, "group": cgid, "ret": str(e)} raise exception.VolumeBackendAPIException(data=msg) return model_update, None, None def create_group(self, context, group): if volume_utils.is_group_a_cg_snapshot_type(group): return self._create_consistencygroup(context, group) raise NotImplementedError() def delete_group(self, context, group, volumes): if volume_utils.is_group_a_cg_snapshot_type(group): return self._delete_consistencygroup(context, group, volumes) raise NotImplementedError() def create_group_snapshot(self, context, group_snapshot, snapshots): if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self._create_cgsnapshot(context, group_snapshot, snapshots) raise NotImplementedError() def delete_group_snapshot(self, context, group_snapshot, snapshots): if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self._delete_cgsnapshot(context, group_snapshot, snapshots) raise NotImplementedError() def create_group_from_src(self, context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): err_msg = _("Prophet Storage doesn't support create_group_from_src.") LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) def create_volume(self, volume): """Create a volume.""" pool = volume_utils.extract_host(volume['host'], level='pool') if not pool: if not self.configuration.dpl_pool: msg = _("Pool is not available in the volume host fields.") raise exception.InvalidHost(reason=msg) else: pool = self.configuration.dpl_pool ret, output = self.dpl.create_vdev( self._conver_uuid2hex(volume['id']), volume.get('display_name', ''), volume.get('display_description', ''), pool, int(volume['size']) * units.Gi, self.configuration.san_thin_provision) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to create volume %(volume)s: ' '%(status)s.') % {'volume': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to create volume (get event) ' '%s.') % (volume['id']) raise exception.VolumeBackendAPIException( data=msg) elif ret != 0: msg = _('Flexvisor create volume failed.:%(volumeid)s:' '%(status)s.') % {'volumeid': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: LOG.info('Flexvisor succeeded to create volume %(id)s.', {'id': volume['id']}) if volume.group_id: group = volume_utils.group_get_by_id(volume.group_id) if volume_utils.is_group_a_cg_snapshot_type(group): try: self._join_volume_group(volume, volume.group_id) except Exception: # Delete volume if volume failed to join group. self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) msg = _('Flexvisor failed to create volume %(id)s in the ' 'group %(vgid)s.') % { 'id': volume['id'], 'vgid': volume.group_id} raise exception.VolumeBackendAPIException(data=msg) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" src_volume = None vgID = None # Detect whether a member of the group. snapshotID = snapshot['id'] # Try to get cgid if volume belong in the group. src_volumeID = snapshot['volume_id'] cgsnapshotID = snapshot.get('group_snapshot_id', None) if cgsnapshotID: try: src_volume = self.db.volume_get(src_volumeID) except Exception: msg = _("Flexvisor unable to find the source volume " "%(id)s info.") % {'id': src_volumeID} raise exception.VolumeBackendAPIException(data=msg) if src_volume: vgID = src_volume.group_id # Get the volume origin snapshot id if the source snapshot is group # snapshot. if vgID: snapshotID = self._get_snapshotid_of_vgsnapshot( self._conver_uuid2hex(vgID), self._conver_uuid2hex(cgsnapshotID), self._conver_uuid2hex(src_volumeID)) pool = volume_utils.extract_host(volume['host'], level='pool') if not pool: if not self.configuration.dpl_pool: msg = _("Pool is not available in the volume host fields.") raise exception.InvalidHost(reason=msg) else: pool = self.configuration.dpl_pool ret, output = self.dpl.create_vdev_from_snapshot( self._conver_uuid2hex(volume['id']), volume.get('display_name', ''), volume.get('display_description', ''), self._conver_uuid2hex(snapshotID), pool, self.configuration.san_thin_provision) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to create volume from ' 'snapshot %(id)s:' '%(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: msg = _('Flexvisor failed to create volume from snapshot ' '(failed to get event) ' '%(id)s.') % {'id': snapshot['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor failed to create volume from snapshot ' '%(id)s: %(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: LOG.info('Flexvisor succeeded to create volume %(id)s ' 'from snapshot.', {'id': volume['id']}) if volume['size'] > snapshot['volume_size']: self.extend_volume(volume, volume['size']) if volume.group_id: group = volume_utils.group_get_by_id(volume.group_id) if volume_utils.is_group_a_cg_snapshot_type(group): try: self._join_volume_group(volume, volume.group_id) except Exception: # Delete volume if volume failed to join group. self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) raise def spawn_volume_from_snapshot(self, volume, snapshot): """Spawn a REFERENCED volume from a snapshot.""" ret, output = self.dpl.spawn_vdev_from_snapshot( self._conver_uuid2hex(volume['id']), self._conver_uuid2hex(snapshot['volume_id']), volume.get('display_name', ''), volume.get('display_description', ''), self._conver_uuid2hex(snapshot['id'])) if ret == errno.EAGAIN: # its an async process ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to spawn volume from snapshot ' '%(id)s:%(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to spawn volume from snapshot ' '(failed to get event) ' '%(id)s.') % {'id': snapshot['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor failed to create volume from snapshot ' '%(id)s: %(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: LOG.info('Flexvisor succeeded to create volume %(id)s ' 'from snapshot.', {'id': volume['id']}) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" pool = volume_utils.extract_host(volume['host'], level='pool') if not pool: if not self.configuration.dpl_pool: msg = _("Pool is not available in the volume host fields.") raise exception.InvalidHost(reason=msg) else: pool = self.configuration.dpl_pool ret, output = self.dpl.clone_vdev( self._conver_uuid2hex(src_vref['id']), self._conver_uuid2hex(volume['id']), pool, volume.get('display_name', ''), volume.get('display_description', ''), int(volume['size']) * units.Gi, self.configuration.san_thin_provision) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to clone volume %(id)s: ' '%(status)s.') % {'id': src_vref['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to clone volume (failed to' ' get event) %(id)s.') % {'id': src_vref['id']} raise exception.VolumeBackendAPIException( data=msg) elif ret != 0: msg = _('Flexvisor failed to clone volume %(id)s: ' '%(status)s.') % {'id': src_vref['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: LOG.info('Flexvisor succeeded to clone volume %(id)s.', {'id': volume['id']}) if volume.group_id: group = volume_utils.group_get_by_id(volume.group_id) if volume_utils.is_group_a_cg_snapshot_type(group): try: self._join_volume_group(volume, volume.group_id) except Exception: # Delete volume if volume failed to join group. self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) msg = _('Flexvisor volume %(id)s failed to join group ' '%(vgid)s.') % {'id': volume['id'], 'vgid': volume.group_id} raise exception.VolumeBackendAPIException(data=msg) def delete_volume(self, volume): """Deletes a volume.""" ret = 0 if volume.group_id: group = volume_utils.group_get_by_id(volume.group_id) if group and volume_utils.is_group_a_cg_snapshot_type(group): msg = '' try: ret, out = self.dpl.leave_vg( self._conver_uuid2hex(volume['id']), self._conver_uuid2hex(volume.group_id)) if ret: LOG.warning('Flexvisor failed to delete volume ' '%(id)s from the group %(vgid)s.', {'id': volume['id'], 'vgid': volume.group_id}) except Exception as e: LOG.warning('Flexvisor failed to delete volume %(id)s ' 'from group %(vgid)s due to %(status)s.', {'id': volume['id'], 'vgid': volume.group_id, 'status': e}) if ret: ret = 0 ret, output = self.dpl.delete_vdev(self._conver_uuid2hex(volume['id'])) if ret == errno.EAGAIN: status = self._wait_event(self.dpl.get_vdev, volume['id']) if status['state'] == 'error': msg = _('Flexvisor failed deleting volume %(id)s: ' '%(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: ret = 0 LOG.info('Flexvisor volume %(id)s does not ' 'exist.', {'id': volume['id']}) elif ret != 0: msg = _('Flexvisor failed to delete volume %(id)s: ' '%(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) def extend_volume(self, volume, new_size): ret, output = self.dpl.extend_vdev(self._conver_uuid2hex(volume['id']), volume.get('display_name', ''), volume.get('display_description', ''), new_size * units.Gi) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, volume['id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to extend volume ' '%(id)s:%(status)s.') % {'id': volume, 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: msg = _('Flexvisor failed to extend volume ' '(failed to get event) ' '%(id)s.') % {'id': volume['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor failed to extend volume ' '%(id)s: %(status)s.') % {'id': volume['id'], 'status': ret} raise exception.VolumeBackendAPIException( data=msg) else: LOG.info('Flexvisor succeeded to extend volume' ' %(id)s.', {'id': volume['id']}) def create_snapshot(self, snapshot): """Creates a snapshot.""" ret, output = self.dpl.create_vdev_snapshot( self._conver_uuid2hex(snapshot['volume_id']), self._conver_uuid2hex(snapshot['id']), snapshot.get('display_name', ''), snapshot.get('display_description', '')) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, snapshot['volume_id'], event_uuid) if status['state'] != 'available': msg = (_('Flexvisor failed to create snapshot for volume ' '%(id)s: %(status)s.') % {'id': snapshot['volume_id'], 'status': ret}) raise exception.VolumeBackendAPIException(data=msg) else: msg = (_('Flexvisor failed to create snapshot for volume ' '(failed to get event) %(id)s.') % {'id': snapshot['volume_id']}) raise exception.VolumeBackendAPIException(data=msg) elif ret != 0: msg = _('Flexvisor failed to create snapshot for volume %(id)s: ' '%(status)s.') % {'id': snapshot['volume_id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" ret, output = self.dpl.delete_vdev_snapshot( self._conver_uuid2hex(snapshot['volume_id']), self._conver_uuid2hex(snapshot['id'])) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_vdev_status, snapshot['volume_id'], event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to delete snapshot %(id)s: ' '%(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: msg = _('Flexvisor failed to delete snapshot (failed to ' 'get event) %(id)s.') % {'id': snapshot['id']} raise exception.VolumeBackendAPIException(data=msg) elif ret == errno.ENODATA: LOG.info('Flexvisor snapshot %(id)s not existed.', {'id': snapshot['id']}) elif ret != 0: msg = _('Flexvisor failed to delete snapshot %(id)s: ' '%(status)s.') % {'id': snapshot['id'], 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: LOG.info('Flexvisor succeeded to delete snapshot %(id)s.', {'id': snapshot['id']}) def _get_pools(self): pools = [] qpools = [] # Defined access pool by cinder configuration. defined_pool = self.configuration.dpl_pool if defined_pool: qpools.append(defined_pool) else: try: ret, output = self.dpl.get_pools() if ret == 0: for poolUuid, poolName in output.get('children', []): qpools.append(poolUuid) else: LOG.error("Flexvisor failed to get pool list." " (Error: %d)", ret) except Exception as e: LOG.error("Flexvisor failed to get pool list due to " "%s.", e) # Query pool detail information for poolid in qpools: ret, output = self._get_pool_info(poolid) if ret == 0: pool = {} pool['pool_name'] = output['metadata']['pool_uuid'] pool['total_capacity_gb'] = ( self._convert_size_GB( int(output['metadata']['total_capacity']))) pool['free_capacity_gb'] = ( self._convert_size_GB( int(output['metadata']['available_capacity']))) pool['QoS_support'] = False pool['reserved_percentage'] = 0 pools.append(pool) else: LOG.warning("Failed to query pool %(id)s status " "%(ret)d.", {'id': poolid, 'ret': ret}) continue return pools def _update_volume_stats(self, refresh=False): """Return the current state of the volume service. If 'refresh' is True, run the update first. """ data = {} pools = self._get_pools() data['volume_backend_name'] = ( self.configuration.safe_get('volume_backend_name')) location_info = '%(driver)s:%(host)s:%(volume)s' % { 'driver': self.__class__.__name__, 'host': self.configuration.san_ip, 'volume': self.configuration.dpl_pool } try: ret, output = self.dpl.get_server_info() if ret == 0: data['vendor_name'] = output['metadata']['vendor'] data['driver_version'] = output['metadata']['version'] data['storage_protocol'] = constants.ISCSI data['location_info'] = location_info data['consistencygroup_support'] = True data['consistent_group_snapshot_enabled'] = True data['pools'] = pools self._stats = data except Exception as e: LOG.error('Failed to get server info due to ' '%(state)s.', {'state': e}) return self._stats def do_setup(self, context): """Any initialization the volume driver does while starting.""" self.context = context LOG.info('Activate Flexvisor cinder volume driver.') def check_for_setup_error(self): """Check DPL can connect properly.""" pass def _get_pool_info(self, poolid): """Query pool information.""" ret, output = self.dpl.get_pool(poolid) if ret == errno.EAGAIN: ret, event_uuid = self._get_event_uuid(output) if ret == 0: status = self._wait_event(self.dpl.get_pool_status, poolid, event_uuid) if status['state'] != 'available': msg = _('Flexvisor failed to get pool info %(id)s: ' '%(status)s.') % {'id': poolid, 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: ret = 0 output = status.get('output', {}) else: LOG.error('Flexvisor failed to get pool %(id)s info.', {'id': poolid}) raise exception.VolumeBackendAPIException( data="failed to get event") elif ret != 0: msg = _('Flexvisor failed to get pool info %(id)s: ' '%(status)s.') % {'id': poolid, 'status': ret} raise exception.VolumeBackendAPIException(data=msg) else: LOG.debug('Flexvisor succeeded to get pool info.') return ret, output ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/prophetstor/options.py0000664000175000017500000000201100000000000023750 0ustar00zuulzuul00000000000000# Copyright (c) 2014 ProphetStor, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from cinder.volume import configuration DPL_OPTS = [ cfg.StrOpt('dpl_pool', default='', help='DPL pool uuid in which DPL volumes are stored.'), cfg.PortOpt('dpl_port', default=8357, help='DPL port number.'), ] CONF = cfg.CONF CONF.register_opts(DPL_OPTS, group=configuration.SHARED_CONF_GROUP) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/pure.py0000664000175000017500000064130400000000000020655 0ustar00zuulzuul00000000000000# Copyright (c) 2024 Pure Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Pure Storage FlashArray storage system. This driver requires Purity version 6.1.0 or higher. """ import functools import ipaddress import math import re import time import uuid import distro from os_brick import constants as brick_constants from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import units from packaging import version try: from pypureclient import flasharray except ImportError: flasharray = None from cinder.common import constants from cinder import context from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import objects from cinder.objects import fields from cinder.objects import volume_type from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) _INSTANCE_SENTINEL = object() _VOLTYPE_SENTINEL = object() _PROJECT_SENTINEL = object() PURE_OPTS = [ cfg.StrOpt("pure_api_token", help="REST API authorization token."), cfg.BoolOpt("pure_automatic_max_oversubscription_ratio", default=True, help="Automatically determine an oversubscription ratio based " "on the current total data reduction values. If used " "this calculated value will override the " "max_over_subscription_ratio config option."), cfg.StrOpt("pure_host_personality", default=None, choices=['aix', 'esxi', 'hitachi-vsp', 'hpux', 'oracle-vm-server', 'solaris', 'vms', None], help="Determines how the Purity system tunes the protocol used " "between the array and the initiator."), # These are used as default settings. In future these can be overridden # by settings in volume-type. cfg.IntOpt("pure_replica_interval_default", default=3600, help="Snapshot replication interval in seconds."), cfg.IntOpt("pure_replica_retention_short_term_default", default=14400, help="Retain all snapshots on target for this " "time (in seconds.)"), cfg.IntOpt("pure_replica_retention_long_term_per_day_default", default=3, help="Retain how many snapshots for each day."), cfg.IntOpt("pure_replica_retention_long_term_default", default=7, help="Retain snapshots per day on target for this time " "(in days.)"), cfg.StrOpt("pure_replication_pg_name", default="cinder-group", help="Pure Protection Group name to use for async replication " "(will be created if it does not exist)."), cfg.StrOpt("pure_trisync_pg_name", default="cinder-trisync", help="Pure Protection Group name to use for trisync " "replication leg inside the sync replication pod " "(will be created if it does not exist)."), cfg.StrOpt("pure_replication_pod_name", default="cinder-pod", help="Pure Pod name to use for sync replication " "(will be created if it does not exist)."), cfg.StrOpt("pure_ghost_pod_name", default="cinder-ghost-pod", help="Pure Ghost Pod name to retype sync replication."), cfg.StrOpt("pure_iscsi_cidr", default="0.0.0.0/0", help="CIDR of FlashArray iSCSI targets hosts are allowed to " "connect to. Default will allow connection to any " "IPv4 address. This parameter now supports IPv6 subnets. " "Ignored when pure_iscsi_cidr_list is set."), cfg.ListOpt("pure_iscsi_cidr_list", default=None, help="Comma-separated list of CIDR of FlashArray iSCSI " "targets hosts are allowed to connect to. It supports " "IPv4 and IPv6 subnets. This parameter supersedes " "pure_iscsi_cidr."), cfg.StrOpt("pure_nvme_cidr", default="0.0.0.0/0", help="CIDR of FlashArray NVMe targets hosts are allowed to " "connect to. Default will allow connection to any " "IPv4 address. This parameter now supports IPv6 subnets. " "Ignored when pure_nvme_cidr_list is set."), cfg.ListOpt("pure_nvme_cidr_list", default=None, help="Comma-separated list of CIDR of FlashArray NVMe " "targets hosts are allowed to connect to. It supports " "IPv4 and IPv6 subnets. This parameter supersedes " "pure_nvme_cidr."), cfg.StrOpt("pure_nvme_transport", default="roce", choices=['roce', 'tcp'], help="The NVMe transport layer to be used by the NVMe driver."), cfg.BoolOpt("pure_eradicate_on_delete", default=False, help="When enabled, all Pure volumes, snapshots, and " "protection groups will be eradicated at the time of " "deletion in Cinder. Data will NOT be recoverable after " "a delete with this set to True! When disabled, volumes " "and snapshots will go into pending eradication state " "and can be recovered."), cfg.BoolOpt("pure_trisync_enabled", default=False, help="When enabled and two replication devices are provided, " "one each of types sync and async, this will enable " "the ability to create a volume that is sync replicated " "to one array and async replicated to a separate array.") ] CONF = cfg.CONF CONF.register_opts(PURE_OPTS, group=configuration.SHARED_CONF_GROUP) INVALID_CHARACTERS = re.compile(r"[^-a-zA-Z0-9]") GENERATED_NAME = re.compile(r".*-[a-f0-9]{32}-cinder$") REPLICATION_TYPE_SYNC = "sync" REPLICATION_TYPE_ASYNC = "async" REPLICATION_TYPE_TRISYNC = "trisync" REPLICATION_TYPES = [ REPLICATION_TYPE_SYNC, REPLICATION_TYPE_ASYNC, REPLICATION_TYPE_TRISYNC ] CHAP_SECRET_KEY = "PURE_TARGET_CHAP_SECRET" ERR_MSG_NOT_EXIST = "does not exist" ERR_MSG_HOST_NOT_EXIST = "Host " + ERR_MSG_NOT_EXIST ERR_MSG_NO_SUCH_SNAPSHOT = "No such volume or snapshot" ERR_MSG_PENDING_ERADICATION = "has been destroyed" ERR_MSG_ALREADY_EXISTS = "already exists" ERR_MSG_COULD_NOT_BE_FOUND = "could not be found" ERR_MSG_ALREADY_INCLUDES = "already includes" ERR_MSG_ALREADY_ALLOWED = "already allowed on" ERR_MSG_ALREADY_BELONGS = "already belongs to" ERR_MSG_EXISTING_CONNECTIONS = "cannot be deleted due to existing connections" ERR_MSG_ALREADY_IN_USE = "already in use" ERR_MSG_ARRAY_LIMIT = "limit reached" EXTRA_SPECS_REPL_ENABLED = "replication_enabled" EXTRA_SPECS_REPL_TYPE = "replication_type" MAX_VOL_LENGTH = 63 MAX_SNAP_LENGTH = 96 UNMANAGED_SUFFIX = '-unmanaged' NVME_PORT = 4420 REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL = 5 # 5 seconds REPL_SETTINGS_PROPAGATE_MAX_RETRIES = 36 # 36 * 5 = 180 seconds HOST_CREATE_MAX_RETRIES = 5 USER_AGENT_BASE = 'OpenStack Cinder' MIN_IOPS = 100 MAX_IOPS = 100000000 # 100M MIN_BWS = 1048576 # 1 MB/s MAX_BWS = 549755813888 # 512 GB/s TAG_NAMESPACE = "openstack-integration.purestorage.com" class PureDriverException(exception.VolumeDriverException): message = _("Pure Storage Cinder driver failure: %(reason)s") class PureRetryableException(exception.VolumeBackendAPIException): message = _("Retryable Pure Storage Exception encountered") def pure_driver_debug_trace(f): """Log the method entrance and exit including active backend name. This should only be used on VolumeDriver class methods. It depends on having a 'self' argument that is a PureBaseVolumeDriver. """ @functools.wraps(f) def wrapper(*args, **kwargs): driver = args[0] # self cls_name = driver.__class__.__name__ method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name, "method": f.__name__} backend_name = driver._get_current_array(True).backend_id LOG.debug("[%(backend_name)s] Enter %(method_name)s, args=%(args)s," " kwargs=%(kwargs)s", { "method_name": method_name, "backend_name": backend_name, "args": args, "kwargs": kwargs, }) result = f(*args, **kwargs) LOG.debug("[%(backend_name)s] Leave %(method_name)s, ret=%(result)s", { "method_name": method_name, "backend_name": backend_name, "result": result, }) return result return wrapper class PureBaseVolumeDriver(san.SanDriver): """Performs volume management on Pure Storage FlashArray.""" SUPPORTS_ACTIVE_ACTIVE = True PURE_QOS_KEYS = ['maxIOPS', 'maxBWS', 'maxIOPS_per_GB', 'maxBWS_per_GB'] # ThirdPartySystems wiki page CI_WIKI_NAME = "Pure_Storage_CI" def __init__(self, *args, **kwargs): execute = kwargs.pop("execute", utils.execute) super(PureBaseVolumeDriver, self).__init__(execute=execute, *args, **kwargs) self.configuration.append_config_values(PURE_OPTS) self._array = None self._storage_protocol = None self._backend_name = (self.configuration.volume_backend_name or self.__class__.__name__) self._replication_target_arrays = [] self._active_cluster_target_arrays = [] self._uniform_active_cluster_target_arrays = [] self._trisync_pg_name = None self._replication_pg_name = None self._trisync_name = None self._replication_pod_name = None self._replication_interval = None self._replication_retention_short_term = None self._replication_retention_long_term = None self._replication_retention_long_term_per_day = None self._async_replication_retention_policy = {} self._is_replication_enabled = False self._is_active_cluster_enabled = False self._is_trisync_enabled = False self._active_backend_id = kwargs.get('active_backend_id', None) self._failed_over_primary_array = None self._user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { 'base': USER_AGENT_BASE, 'class': self.__class__.__name__, 'version': self.VERSION, 'platform': distro.name(pretty=True) } @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'san_ip', 'driver_ssl_cert_verify', 'driver_ssl_cert_path', 'use_chap_auth', 'replication_device', 'reserved_percentage', 'max_over_subscription_ratio', 'pure_nvme_transport', 'pure_nvme_cidr_list', 'pure_nvme_cidr', 'pure_trisync_enabled', 'pure_trisync_pg_name') return PURE_OPTS + additional_opts def parse_replication_configs(self): self._trisync_pg_name = ( self.configuration.pure_trisync_pg_name) self._replication_pg_name = ( self.configuration.pure_replication_pg_name) self._replication_pod_name = ( self.configuration.pure_replication_pod_name) self._ghost_pod_name = ( self.configuration.pure_ghost_pod_name) self._replication_interval = ( self.configuration.pure_replica_interval_default * 1000) self._replication_retention_short_term = ( self.configuration.pure_replica_retention_short_term_default) self._replication_retention_long_term = ( self.configuration.pure_replica_retention_long_term_default) self._replication_retention_long_term_per_day = ( self.configuration. pure_replica_retention_long_term_per_day_default) self._async_replication_retention_policy = ( self._generate_replication_retention()) replication_devices = self.configuration.safe_get( 'replication_device') if replication_devices: for replication_device in replication_devices: backend_id = replication_device["backend_id"] san_ip = replication_device["san_ip"] api_token = replication_device["api_token"] verify_ssl = strutils.bool_from_string( replication_device.get("ssl_cert_verify", False)) ssl_cert_path = replication_device.get("ssl_cert_path", None) repl_type = replication_device.get("type", REPLICATION_TYPE_ASYNC) uniform = strutils.bool_from_string( replication_device.get("uniform", False)) target_array = self._get_flasharray( san_ip, api_token, verify_ssl=verify_ssl, ssl_cert_path=ssl_cert_path ) if target_array: target_array_info = list( target_array.get_arrays().items )[0] target_array.array_name = target_array_info.name target_array.array_id = target_array_info.id target_array.replication_type = repl_type target_array.backend_id = backend_id target_array.uniform = uniform LOG.info("Added secondary array: backend_id='%s'," " name='%s', id='%s', type='%s', uniform='%s'", target_array.backend_id, target_array.array_name, target_array.array_id, target_array.replication_type, target_array.uniform) else: LOG.warning("Failed to set up secondary array: %(ip)s", {"ip": san_ip}) continue self._replication_target_arrays.append(target_array) if repl_type == REPLICATION_TYPE_SYNC: self._active_cluster_target_arrays.append(target_array) if target_array.uniform: self._uniform_active_cluster_target_arrays.append( target_array) @pure_driver_debug_trace def set_qos(self, array, vol_name, vol_size, qos): # max_IOPS and max_BWS override the per GB IOPS and BW values if # both are provided. If only a per GB value is provided then # we must ensure, based on volume size, the IOPS or BW values # do not exceed the maximum limits for these values allowed per # volume. if qos['maxIOPS'] == 0 and qos['maxIOPS_per_GB']: qos['maxIOPS'] = min(MAX_IOPS, int(qos['maxIOPS_per_GB']) * vol_size) if qos['maxBWS'] == 0 and qos['maxBWS_per_GB']: qos['maxBWS'] = min(MAX_BWS, int(qos['maxBWS_per_GB']) * vol_size) if qos['maxIOPS'] == 0 and qos['maxBWS'] == 0: array.patch_volumes(names=[vol_name], volume=flasharray.VolumePatch( qos=flasharray.Qos( iops_limit=MAX_IOPS, bandwidth_limit=MAX_BWS))) elif qos['maxIOPS'] == 0: array.patch_volumes(names=[vol_name], volume=flasharray.VolumePatch( qos=flasharray.Qos( iops_limit=MAX_IOPS, bandwidth_limit=qos['maxBWS']))) elif qos['maxBWS'] == 0: array.patch_volumes(names=[vol_name], volume=flasharray.VolumePatch( qos=flasharray.Qos( iops_limit=qos['maxIOPS'], bandwidth_limit=MAX_BWS))) else: array.patch_volumes(names=[vol_name], volume=flasharray.VolumePatch( qos=flasharray.Qos( iops_limit=qos['maxIOPS'], bandwidth_limit=qos['maxBWS']))) return @pure_driver_debug_trace def create_from_snap_in_vgroup(self, array, vol_name, snap_name, vgroup, vg_iop, vg_bw): if not (MIN_IOPS <= int(vg_iop) <= MAX_IOPS): msg = (_('vg_maxIOPS QoS error. Must be more than ' '%(min_iops)s and less than %(max_iops)s') % {'min_iops': MIN_IOPS, 'max_iops': MAX_IOPS}) raise exception.InvalidQoSSpecs(message=msg) if not (MIN_BWS <= int(vg_bw) <= MAX_BWS): msg = (_('vg_maxBWS QoS error. Must be between ' '%(min_bws)s and %(max_bws)s') % {'min_bws': MIN_BWS, 'max_bws': MAX_BWS}) raise exception.InvalidQoSSpecs(message=msg) self._create_volume_group_if_not_exist(array, vgroup, int(vg_iop), int(vg_bw)) vg_volname = vgroup + "/" + vol_name if self._array.safemode: array.post_volumes(names=[vg_volname], with_default_protection=False, volume=flasharray.VolumePost( source=flasharray.Reference( name=snap_name))) else: array.post_volumes(names=[vg_volname], volume=flasharray.VolumePost( source=flasharray.Reference(name=snap_name))) return vg_volname @pure_driver_debug_trace def create_in_vgroup(self, array, vol_name, vol_size, vgroup, vg_iop, vg_bw): if not (MIN_IOPS <= int(vg_iop) <= MAX_IOPS): msg = (_('vg_maxIOPS QoS error. Must be more than ' '%(min_iops)s and less than %(max_iops)s') % {'min_iops': MIN_IOPS, 'max_iops': MAX_IOPS}) raise exception.InvalidQoSSpecs(message=msg) if not (MIN_BWS <= int(vg_bw) <= MAX_BWS): msg = (_('vg_maxBWS QoS error. Must be between ' '%(min_bws)s and %(max_bws)s') % {'min_bws': MIN_BWS, 'max_bws': MAX_BWS}) raise exception.InvalidQoSSpecs(message=msg) self._create_volume_group_if_not_exist(array, vgroup, int(vg_iop), int(vg_bw)) vg_volname = vgroup + "/" + vol_name if self._array.safemode: array.post_volumes(names=[vg_volname], with_default_protection=False, volume=flasharray.VolumePost( provisioned=vol_size)) else: array.post_volumes(names=[vg_volname], volume=flasharray.VolumePost( provisioned=vol_size)) return vg_volname @pure_driver_debug_trace def create_with_qos(self, array, vol_name, vol_size, qos): # max_IOPS and max_BWS override the per GB IOPS and BW values if # both are provided. Iif only a per GB value is provided then # we must ensure, based on volume size, the IOPS or BW values # do not exceed the maximum limits for these values allowed per # volume. gb_size = vol_size / units.Gi if qos['maxIOPS'] == 0 and qos['maxIOPS_per_GB']: qos['maxIOPS'] = min(MAX_IOPS, int(qos['maxIOPS_per_GB']) * gb_size) if qos['maxBWS'] == 0 and qos['maxBWS_per_GB']: qos['maxBWS'] = min(MAX_BWS, int(qos['maxBWS_per_GB']) * gb_size) if self._array.safemode: if qos['maxIOPS'] == 0 and qos['maxBWS'] == 0: array.post_volumes(names=[vol_name], with_default_protection=False, volume=flasharray.VolumePost( provisioned=vol_size)) elif qos['maxIOPS'] == 0: array.post_volumes(names=[vol_name], with_default_protection=False, volume=flasharray.VolumePost( provisioned=vol_size, qos=flasharray.Qos( bandwidth_limit=qos['maxBWS']))) elif qos['maxBWS'] == 0: array.post_volumes(names=[vol_name], with_default_protection=False, volume=flasharray.VolumePost( provisioned=vol_size, qos=flasharray.Qos( iops_limit=qos['maxIOPS']))) else: array.post_volumes(names=[vol_name], with_default_protection=False, volume=flasharray.VolumePost( provisioned=vol_size, qos=flasharray.Qos( iops_limit=qos['maxIOPS'], bandwidth_limit=qos['maxBWS']))) else: if qos['maxIOPS'] == 0 and qos['maxBWS'] == 0: array.post_volumes(names=[vol_name], volume=flasharray.VolumePost( provisioned=vol_size)) elif qos['maxIOPS'] == 0: array.post_volumes(names=[vol_name], volume=flasharray.VolumePost( provisioned=vol_size, qos=flasharray.Qos( bandwidth_limit=qos['maxBWS']))) elif qos['maxBWS'] == 0: array.post_volumes(names=[vol_name], volume=flasharray.VolumePost( provisioned=vol_size, qos=flasharray.Qos( iops_limit=qos['maxIOPS']))) else: array.post_volumes(names=[vol_name], volume=flasharray.VolumePost( provisioned=vol_size, qos=flasharray.Qos( iops_limit=qos['maxIOPS'], bandwidth_limit=qos['maxBWS']))) return def do_setup(self, context): """Performs driver initialization steps that could raise exceptions.""" if flasharray is None: msg = _("Missing 'py-pure-client' python module, ensure the" " library is installed and available.") raise PureDriverException(msg) # Raises PureDriverException if unable to connect and PureError # if unable to authenticate. self._array = self._get_flasharray( san_ip=self.configuration.san_ip, api_token=self.configuration.pure_api_token, verify_ssl=self.configuration.driver_ssl_cert_verify, ssl_cert_path=self.configuration.driver_ssl_cert_path ) if self._array: array_info = list(self._array.get_arrays().items)[0] if version.parse(array_info.version) < version.parse( '6.1.0' ): msg = _("FlashArray Purity version less than 6.1.0 " "unsupported. Please upgrade your backend to " "a supported version.") raise PureDriverException(msg) if version.parse(array_info.version) < version.parse( '6.4.2' ) and self._storage_protocol == constants.NVMEOF_TCP: msg = _("FlashArray Purity version less than 6.4.2 " "unsupported for NVMe-TCP. Please upgrade your " "backend to a supported version.") raise PureDriverException(msg) self._array.array_name = array_info.name self._array.array_id = array_info.id self._array.replication_type = None self._array.backend_id = self._backend_name self._array.preferred = True self._array.uniform = True self._array.version = array_info.version if version.parse(array_info.version) < version.parse( '6.3.4' ): self._array.safemode = False else: self._array.safemode = True LOG.info("Primary array: backend_id='%s', name='%s', id='%s'", self.configuration.config_group, self._array.array_name, self._array.array_id) else: LOG.warning("self.do_setup failed to set up primary array: %(ip)s", {"ip": self.configuration.san_ip}) self.do_setup_replication() if self.configuration.pure_trisync_enabled: # If trisync is enabled check that we have only 1 sync and 1 async # replication device set up and that the async target is not the # same as any of the sync targets. self.do_setup_trisync() # If we have failed over at some point we need to adjust our current # array based on the one that we have failed over to if (self._active_backend_id and self._active_backend_id != self._array.backend_id): secondary_array = self._get_secondary(self._active_backend_id) self._swap_replication_state(self._array, secondary_array) def do_setup_trisync(self): repl_device = {} async_target = [] count = 0 replication_devices = self.configuration.safe_get( 'replication_device') if not replication_devices or len(replication_devices) != 2: LOG.error("Unable to configure TriSync Replication. Incorrect " "number of replication devices enabled. " "Only 2 are supported.") else: for replication_device in replication_devices: san_ip = replication_device["san_ip"] api_token = replication_device["api_token"] repl_type = replication_device.get( "type", REPLICATION_TYPE_ASYNC) repl_device[count] = { "rep_type": repl_type, "token": api_token, "san_ip": san_ip, } count += 1 if (repl_device[0]["rep_type"] == repl_device[1]["rep_type"]) or ( (repl_device[0]["token"] == repl_device[1]["token"]) ): LOG.error("Replication devices provided must be one each " "of sync and async and targets must be different " "to enable TriSync Replication.") return for replication_device in replication_devices: repl_type = replication_device.get( "type", REPLICATION_TYPE_ASYNC) if repl_type == "async": san_ip = replication_device["san_ip"] api_token = replication_device["api_token"] verify_ssl = strutils.bool_from_string( replication_device.get("ssl_cert_verify", False)) ssl_cert_path = replication_device.get( "ssl_cert_path", None) target_array = self._get_flasharray( san_ip, api_token, verify_ssl=verify_ssl, ssl_cert_path=ssl_cert_path ) trisync_async_info = list( target_array.get_arrays().items)[0] target_array.array_name = trisync_async_info.name async_target.append(target_array) self._trisync_name = self._replication_pod_name + \ "::" + \ self._trisync_pg_name self._is_trisync_enabled = True self._setup_replicated_pgroups( self._get_current_array(), async_target, self._trisync_name, self._replication_interval, self._async_replication_retention_policy ) def do_setup_replication(self): replication_devices = self.configuration.safe_get( 'replication_device') if replication_devices: self.parse_replication_configs() self._is_replication_enabled = True if len(self._active_cluster_target_arrays) > 0: self._is_active_cluster_enabled = True # Only set this up on sync rep arrays self._setup_replicated_pods( self._get_current_array(True), self._active_cluster_target_arrays, self._replication_pod_name ) # Even if the array is configured for sync rep set it # up to handle async too self._setup_replicated_pgroups( self._get_current_array(True), self._replication_target_arrays, self._replication_pg_name, self._replication_interval, self._async_replication_retention_policy ) def check_for_setup_error(self): # Avoid inheriting check_for_setup_error from SanDriver, which checks # for san_password or san_private_key, not relevant to our driver. pass def update_provider_info(self, volumes, snapshots): """Ensure we have a provider_id set on volumes. If there is a provider_id already set then skip, if it is missing then we will update it based on the volume object. We can always compute the id if we have the full volume object, but not all driver API's give us that info. We don't care about snapshots, they just use the volume's provider_id. """ vol_updates = [] for vol in volumes: if not vol.provider_id: vol.provider_id = self._get_vol_name(vol) vol_name = self._generate_purity_vol_name(vol) if vol.metadata: vol_updates.append({ 'id': vol.id, 'provider_id': vol_name, 'metadata': {**vol.metadata, 'array_volume_name': vol_name, 'array_name': self._array.array_name}, }) else: vol_updates.append({ 'id': vol.id, 'provider_id': vol_name, 'metadata': {'array_volume_name': vol_name, 'array_name': self._array.array_name}, }) return vol_updates, None @pure_driver_debug_trace def revert_to_snapshot(self, context, volume, snapshot): """Is called to perform revert volume from snapshot. :param context: Our working context. :param volume: the volume to be reverted. :param snapshot: the snapshot data revert to volume. :return None """ vol_name = self._generate_purity_vol_name(volume) if snapshot['group_snapshot'] or snapshot['cgsnapshot']: snap_name = self._get_pgroup_snap_name_from_snapshot(snapshot) else: snap_name = self._get_snap_name(snapshot) LOG.debug("Reverting from snapshot %(snap)s to volume " "%(vol)s", {'vol': vol_name, 'snap': snap_name}) current_array = self._get_current_array() current_array.post_volumes(names=[snap_name], overwrite=True, volume=flasharray.VolumePost( source=flasharray.Reference( name=vol_name))) @pure_driver_debug_trace def create_volume(self, volume): """Creates a volume. Note that if a vgroup is specified in the volume type extra_spec then we do not apply volume level qos as this is incompatible with volume group qos settings. We will force a volume group to have the maximum qos settings if not specified in the volume type extra_spec as this can cause retyping issues in the future if not defined. """ qos = None vol_name = self._generate_purity_vol_name(volume) vol_size = volume["size"] * units.Gi ctxt = context.get_admin_context() type_id = volume.get('volume_type_id') current_array = self._get_current_array() if type_id is not None: volume_type = volume_types.get_volume_type(ctxt, type_id) vg_iops = self._get_volume_type_extra_spec(type_id, 'vg_maxIOPS', default_value=MAX_IOPS) vg_bws = self._get_volume_type_extra_spec(type_id, 'vg_maxBWS', default_value=MAX_BWS) vgroup = self._get_volume_type_extra_spec(type_id, 'vg_name') if vgroup: vgroup = INVALID_CHARACTERS.sub("-", vgroup) vg_volname = self.create_in_vgroup(current_array, vol_name, vol_size, vgroup, vg_iops, vg_bws) return self._setup_volume(current_array, volume, vg_volname) else: qos = self._get_qos_settings(volume_type) if qos is not None: self.create_with_qos(current_array, vol_name, vol_size, qos) else: if self._array.safemode: current_array.post_volumes(names=[vol_name], with_default_protection=False, volume=flasharray.VolumePost( provisioned=vol_size)) else: current_array.post_volumes(names=[vol_name], volume=flasharray.VolumePost( provisioned=vol_size)) return self._setup_volume(current_array, volume, vol_name) @pure_driver_debug_trace def create_volume_from_snapshot(self, volume, snapshot, cgsnapshot=False): """Creates a volume from a snapshot.""" qos = None vol_name = self._generate_purity_vol_name(volume) if cgsnapshot: snap_name = self._get_pgroup_snap_name_from_snapshot(snapshot) else: snap_name = self._get_snap_name(snapshot) current_array = self._get_current_array() ctxt = context.get_admin_context() type_id = volume.get('volume_type_id') if type_id is not None: volume_type = volume_types.get_volume_type(ctxt, type_id) vg_iops = self._get_volume_type_extra_spec(type_id, 'vg_maxIOPS', default_value=MAX_IOPS) vg_bws = self._get_volume_type_extra_spec(type_id, 'vg_maxBWS', default_value=MAX_BWS) vgroup = self._get_volume_type_extra_spec(type_id, 'vg_name') if vgroup: vgroup = INVALID_CHARACTERS.sub("-", vgroup) vg_volname = self.create_from_snap_in_vgroup(current_array, vol_name, snap_name, vgroup, vg_iops, vg_bws) return self._setup_volume(current_array, volume, vg_volname) else: qos = self._get_qos_settings(volume_type) if self._array.safemode: current_array.post_volumes(names=[vol_name], with_default_protection=False, volume=flasharray.VolumePost( source=flasharray.Reference( name=snap_name))) else: current_array.post_volume(names=[vol_name], volume=flasharray.VolumePost( source=flasharray.Reference( name=snap_name))) self._extend_if_needed(current_array, vol_name, snapshot["volume_size"], volume["size"]) if qos is not None: self.set_qos(current_array, vol_name, snapshot["volume_size"], qos) else: current_array.patch_volumes(names=[vol_name], volume=flasharray.VolumePatch( qos=flasharray.Qos( iops_limit=MAX_IOPS, bandwidth_limit=MAX_BWS))) return self._setup_volume(current_array, volume, vol_name) def _setup_volume(self, array, volume, purity_vol_name): # set provider_id early so other methods can use it even though # it wont be set in the cinder DB until we return from create_volume volume.provider_id = purity_vol_name async_enabled = False trisync_enabled = False self._add_to_group_if_needed(volume, purity_vol_name) async_enabled = self._enable_async_replication_if_needed( array, volume) trisync_enabled = self._enable_trisync_replication_if_needed( array, volume) repl_type = self._get_replication_type_from_vol_type( volume.volume_type) try: pgroup = array.get_protection_groups_volumes( member_names=[volume.provider_id]).items except AttributeError: # AttributeError from pypureclient SDK as volume # not in a protection group pgroup = None if (repl_type in [REPLICATION_TYPE_ASYNC, REPLICATION_TYPE_TRISYNC] and not pgroup): LOG.error("Failed to add volume %s to pgroup, removing volume") array.patch_volumes(names=[purity_vol_name], volume=flasharray.VolumePatch( destroyed=True)) array.delete_volumes(names=[purity_vol_name]) repl_status = fields.ReplicationStatus.DISABLED if (self._is_vol_in_pod(purity_vol_name) or (async_enabled or trisync_enabled)): repl_status = fields.ReplicationStatus.ENABLED result = self._tag_volume(volume_name=purity_vol_name, vol_type="Data", project=volume.project_id) LOG.debug("Volume tags added. %s", result) if not volume.metadata: model_update = { 'id': volume.id, 'provider_id': purity_vol_name, 'replication_status': repl_status, 'metadata': {'array_volume_name': purity_vol_name, 'array_name': self._array.array_name} } else: model_update = { 'id': volume.id, 'provider_id': purity_vol_name, 'replication_status': repl_status, 'metadata': {**volume.metadata, 'array_volume_name': purity_vol_name, 'array_name': self._array.array_name} } return model_update def _enable_async_replication_if_needed(self, array, volume): repl_type = self._get_replication_type_from_vol_type( volume.volume_type) if repl_type == REPLICATION_TYPE_ASYNC: self._enable_async_replication(array, volume) return True return False def _disable_async_replication_if_needed(self, array, volume): repl_type = self._get_replication_type_from_vol_type( volume.volume_type) if repl_type != REPLICATION_TYPE_ASYNC: self._disable_async_replication(volume) return True return False def _disable_sync_replication_if_needed(self, array, volume, refv): repl_type = self._get_replication_type_from_vol_type( volume.volume_type) if repl_type != REPLICATION_TYPE_SYNC: self._disable_sync_replication(array, volume, refv) return True return False def _enable_sync_replication_if_needed(self, array, volume, refv): repl_type = self._get_replication_type_from_vol_type( volume.volume_type) if repl_type == REPLICATION_TYPE_SYNC: self._enable_sync_replication(array, volume, refv) return True return False @pure_driver_debug_trace def _stretch_replica(self, array, volume, ghost_pod_name): vol_name = self._get_vol_name(volume) pgdata = list(array.get_protection_groups_volumes( member_names=[vol_name]).items) pgs = [item['group']['name'] for item in pgdata] or None pod = flasharray.Pod(name=ghost_pod_name) res = array.patch_volumes(names=[vol_name], volume=flasharray.VolumePatch(pod=pod), remove_from_protection_group_names=pgs) if res.status_code != 200: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = True LOG.warning("Unable to add volume to Ghost Pod: %s", res.errors[0].message) else: self._setup_replicated_pods( self._get_current_array(True), self._active_cluster_target_arrays, ghost_pod_name ) return @pure_driver_debug_trace def _wait_for_stretch(self, array, ghost_pod_name): while True: pod_check = array.get_pods(names=[ghost_pod_name]) pod_stat = list(pod_check.items) status_list = [] for system in pod_stat[0].arrays: status_list.append(system.status) if all(status == 'online' for status in status_list): break return @pure_driver_debug_trace def _cleanup_ghostpod(self, array, ghost_pod_name): secondaries = [target_array.array_name for target_array in self._active_cluster_target_arrays] res = array.delete_pods_arrays(group_names=[ghost_pod_name], member_names=secondaries) if res.status_code == 200: array.patch_pods(names=[ghost_pod_name], destroy_contents=True, pod=flasharray.PodPatch(destroyed=True)) array.delete_pods(names=[ghost_pod_name], eradicate_contents=True) else: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = True LOG.warning("Unable to unstretch ghost pod for deletion: %s", res.errors[0].message) return @pure_driver_debug_trace def _get_pgroups(self, array, vol_name): pgdata = list(array.get_protection_groups_volumes( member_names=[vol_name]).items) pgs = [item['group']['name'] for item in pgdata] or None return pgs @pure_driver_debug_trace def _disable_sync_replication(self, array, volume, refname): vol_name = self._get_vol_name(volume) ghost_pod_name = self._ghost_pod_name + "-" + volume.id self._create_pod_if_not_exist(array, ghost_pod_name) self._setup_replicated_pods( self._get_current_array(True), self._active_cluster_target_arrays, ghost_pod_name ) self._wait_for_stretch(array, ghost_pod_name) ghost_ref = flasharray.Reference(name=ghost_pod_name) volmv = array.patch_volumes(names=[vol_name], volume=flasharray. VolumePatch(pod=ghost_ref)) if volmv.status_code == 200: secondaries = [target_array.array_name for target_array in self._active_cluster_target_arrays] array.delete_pods_arrays(group_names=[ghost_pod_name], member_names=secondaries) array.patch_volumes(names=[ghost_pod_name + '::' + vol_name.split('::')[-1]], volume=flasharray.VolumePatch (pod=flasharray.Reference(name=""))) array.patch_pods(names=[ghost_pod_name], destroy_contents=True, pod=flasharray.PodPatch(destroyed=True)) array.delete_pods(names=[ghost_pod_name], eradicate_contents=True) volume.provider_id = vol_name.split('::')[-1] return True else: return False @pure_driver_debug_trace def _enable_sync_replication(self, array, volume, refv): vol_name = self._get_vol_name(volume) cpod = flasharray.Pod(name=self._replication_pod_name) if "::" not in refv: ghost_pod_name = self._ghost_pod_name + "-" + volume.id self._create_pod_if_not_exist(array, ghost_pod_name) self._stretch_replica(array, volume, ghost_pod_name) self._wait_for_stretch(array, ghost_pod_name) volmv = array.patch_volumes(names=[ghost_pod_name + '::' + vol_name], volume=flasharray. VolumePatch(pod=cpod)) if volmv.status_code == 200: self._cleanup_ghostpod(array, ghost_pod_name) return True else: volmv = array.patch_volumes(names=[vol_name], volume=flasharray. VolumePatch(pod=cpod)) if volmv.status_code == 200: return True return False def _enable_trisync_replication_if_needed(self, array, volume): repl_type = self._get_replication_type_from_vol_type( volume.volume_type) if (self.configuration.pure_trisync_enabled and repl_type == REPLICATION_TYPE_TRISYNC): self._enable_trisync_replication(array, volume) return True return False def _enable_trisync_replication(self, array, volume): """Add volume to sync-replicated protection group""" array.post_protection_groups_volumes( group_names=[self._trisync_name], member_names=[self._get_vol_name(volume)]) def _disable_trisync_replication(self, array, volume): """Remove volume from sync-replicated protection group""" array.delete_protection_groups_volumes( group_names=[self._trisync_name], member_names=[self._get_vol_name(volume)]) def _enable_async_replication(self, array, volume): """Add volume to replicated protection group.""" array.post_protection_groups_volumes( group_names=[self._replication_pg_name], member_names=[self._get_vol_name(volume)]) @pure_driver_debug_trace def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" vol_name = self._generate_purity_vol_name(volume) src_name = self._get_vol_name(src_vref) # Check which backend the source volume is on. In case of failover # the source volume may be on the secondary array. current_array = self._get_current_array() current_array.post_volumes(volume=flasharray.VolumePost( source=flasharray.Reference(name=src_name)), names=[vol_name]) self._extend_if_needed(current_array, vol_name, src_vref["size"], volume["size"]) type_id = volume.get('volume_type_id') ctxt = context.get_admin_context() if type_id is not None: volume_type = volume_types.get_volume_type(ctxt, type_id) # Check if the volume_type has QoS settings and if so # apply them to the newly created volume qos = self._get_qos_settings(volume_type) if qos is not None: self.set_qos(current_array, vol_name, volume["size"], qos) return self._setup_volume(current_array, volume, vol_name) def _extend_if_needed(self, array, vol_name, src_size, vol_size): """Extend the volume from size src_size to size vol_size.""" if vol_size > src_size: vol_size = vol_size * units.Gi array.patch_volumes(names=[vol_name], volume=flasharray.VolumePatch( provisioned=vol_size)) @pure_driver_debug_trace def delete_volume(self, volume): """Disconnect all hosts and delete the volume""" vol_name = self._get_vol_name(volume) current_array = self._get_current_array() # Do a pass over remaining connections on the current array, if # we can try and remove any remote connections too. con_data = current_array.get_connections( volume_names=[vol_name]) if con_data.status_code == 200: hosts = list(current_array.get_connections( volume_names=[vol_name]).items) for host_info in range(0, len(hosts)): host_name = hosts[host_info].host.name self._disconnect_host(current_array, host_name, vol_name) # Finally, it should be safe to delete the volume res = current_array.patch_volumes(names=[vol_name], volume=flasharray.VolumePatch( destroyed=True)) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_NOT_EXIST in res.errors[0].message: # Happens if the volume does not exist. ctxt.reraise = False LOG.warning("Volume deletion failed with message: %s", res.errors[0].message) if self.configuration.pure_eradicate_on_delete: current_array.delete_volumes(names=[vol_name]) # Now check to see if deleting this volume left an empty volume # group. If so, we delete / eradicate the volume group if vol_name and "/" in vol_name: vgroup = vol_name.split("/")[0] self._delete_vgroup_if_empty(current_array, vgroup) @pure_driver_debug_trace def _delete_vgroup_if_empty(self, array, vgroup): """Delete volume group if empty""" vgroup_volumes = list(array.get_volume_groups( names=[vgroup]).items)[0].volume_count if vgroup_volumes == 0: # Delete the volume group array.patch_volume_groups( names=[vgroup], volume_group=flasharray.VolumeGroupPatch( destroyed=True)) if self.configuration.pure_eradicate_on_delete: # Eradciate the volume group res = array.delete_volume_groups(names=[vgroup]) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False LOG.warning("Volume group deletion failed " "with message: %s", res.errors[0].message) @pure_driver_debug_trace def create_snapshot(self, snapshot): """Creates a snapshot.""" # Get current array in case we have failed over via replication. current_array = self._get_current_array() vol_name, snap_suff = self._get_snap_name(snapshot).split(".") volume_snapshot = flasharray.VolumeSnapshotPost(suffix=snap_suff) current_array.post_volume_snapshots(source_names=[vol_name], volume_snapshot=volume_snapshot) if not snapshot.metadata: snapshot_update = { 'metadata': {'array_snapshot_name': self._get_snap_name( snapshot), 'array_name': self._array.array_name} } else: snapshot_update = { 'metadata': {**snapshot.metadata, 'array_snapshot_name': self._get_snap_name( snapshot), 'array_name': self._array.array_name} } return snapshot_update @pure_driver_debug_trace def delete_snapshot(self, snapshot): """Deletes a snapshot.""" # Get current array in case we have failed over via replication. current_array = self._get_current_array() snap_name = self._get_snap_name(snapshot) volume_snap = flasharray.VolumeSnapshotPatch(destroyed=True) res = current_array.patch_volume_snapshots(names=[snap_name], volume_snapshot=volume_snap) if self.configuration.pure_eradicate_on_delete: current_array.delete_volume_snapshots(names=[snap_name]) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if (ERR_MSG_NOT_EXIST in res.errors[0].message or ERR_MSG_NO_SUCH_SNAPSHOT in res.errors[0].message or ERR_MSG_PENDING_ERADICATION in res.errors[0].message): # Happens if the snapshot does not exist. ctxt.reraise = False LOG.warning("Unable to delete snapshot, assuming " "already deleted. Error: %s", res.errors[0].message) def ensure_export(self, context, volume): pass def create_export(self, context, volume, connector): pass def initialize_connection(self, volume, connector): """Connect the volume to the specified initiator in Purity. This implementation is specific to the host type (iSCSI, FC, etc). """ raise NotImplementedError def _get_host(self, array, connector, remote=False): """Get a Purity Host that corresponds to the host in the connector. This implementation is specific to the host type (iSCSI, FC, etc). """ raise NotImplementedError def _is_multiattach_to_host(self, volume_attachment, host_name): # When multiattach is enabled a volume could be attached to multiple # instances which are hosted on the same Nova compute. # Because Purity cannot recognize the volume is attached more than # one instance we should keep the volume attached to the Nova compute # until the volume is detached from the last instance if not volume_attachment: return False attachment = [a for a in volume_attachment if a.attach_status == "attached" and a.attached_host == host_name] return len(attachment) > 1 @pure_driver_debug_trace def _disconnect(self, array, volume, connector, remove_remote_hosts=True, is_multiattach=False): """Disconnect the volume from the host described by the connector. If no connector is specified it will remove *all* attachments for the volume. Returns True if it was the hosts last connection. """ vol_name = self._get_vol_name(volume) self._tag_volume(volume_name=vol_name) LOG.debug("Volume instance tags deleted") if connector is None: # If no connector was provided it is a force-detach, remove all # host connections for the volume LOG.warning("Removing ALL host connections for volume %s", vol_name) connections = list(array.get_connections( volume_names=[vol_name]).items) for connection in range(0, len(connections)): self._disconnect_host(array, connections[connection]['host'], vol_name) return False else: # Normal case with a specific initiator to detach it from hosts = self._get_host(array, connector, remote=remove_remote_hosts) if hosts: any_in_use = False host_in_use = False for host in hosts: host_name = host.name if not is_multiattach: host_in_use = self._disconnect_host(array, host_name, vol_name) else: LOG.warning("Unable to disconnect host from volume. " "Volume is multi-attached.") any_in_use = any_in_use or host_in_use return any_in_use else: LOG.error("Unable to disconnect host from volume, could not " "determine Purity host on array %s", array.backend_id) return False @pure_driver_debug_trace def terminate_connection(self, volume, connector, **kwargs): """Terminate connection.""" vol_name = self._get_vol_name(volume) # None `connector` indicates force detach, then delete all even # if the volume is multi-attached. multiattach = (connector is not None and self._is_multiattach_to_host(volume.volume_attachment, connector["host"])) if self._is_vol_in_pod(vol_name): # Try to disconnect from each host, they may not be online though # so if they fail don't cause a problem. for array in self._uniform_active_cluster_target_arrays: res = self._disconnect(array, volume, connector, remove_remote_hosts=False, is_multiattach=multiattach) if not res: # Swallow any exception, just warn and continue LOG.warning("Disconnect on secondary array failed") # Now disconnect from the current array self._disconnect(self._get_current_array(), volume, connector, remove_remote_hosts=False, is_multiattach=multiattach) @pure_driver_debug_trace def _disconnect_host(self, array, host_name, vol_name): """Return value indicates if host should be cleaned up.""" res = array.delete_connections(host_names=[host_name], volume_names=[vol_name]) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if (ERR_MSG_NOT_EXIST in res.errors[0].message or ERR_MSG_HOST_NOT_EXIST in res.errors[0].message): # Happens if the host and volume are not connected or # the host has already been deleted ctxt.reraise = False LOG.warning("Disconnection failed with message: " "%(msg)s.", {"msg": res.errors[0].message}) # If it is a remote host, call it quits here. We cannot delete a remote # host even if it should be cleaned up now. if ':' in host_name: return connections = None res = array.get_connections(host_names=[host_name]) connection_obj = getattr(res, "items", None) if connection_obj: connections = list(connection_obj) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_NOT_EXIST in res.errors[0].message: ctxt.reraise = False # Assume still used if volumes are attached host_still_used = bool(connections) if GENERATED_NAME.match(host_name) and not host_still_used: LOG.info("Attempting to delete unneeded host %(host_name)r.", {"host_name": host_name}) res = array.delete_hosts(names=[host_name]) if res.status_code == 200: host_still_used = False else: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_NOT_EXIST in res.errors[0].message: # Happens if the host is already deleted. # This is fine though, just log so we know what # happened. ctxt.reraise = False host_still_used = False LOG.debug("Purity host deletion failed: " "%(msg)s.", {"msg": res.errors[0].message}) if ERR_MSG_EXISTING_CONNECTIONS in res.errors[0].message: # If someone added a connection underneath us # that's ok, just keep going. ctxt.reraise = False host_still_used = True LOG.debug("Purity host deletion ignored: %(msg)s", {"msg": res.errors[0].message}) return not host_still_used @pure_driver_debug_trace def _update_volume_stats(self): """Set self._stats with relevant information.""" current_array = self._get_current_array() space_info = list(current_array.get_arrays_space().items)[0] perf_info = list(current_array.get_arrays_performance( end_time=int(time.time()) * 1000, start_time=(int(time.time()) * 1000) - 30000, resolution=30000 ).items)[0] hosts = list(current_array.get_hosts().items) volumes = list(current_array.get_volumes().items) snaps = list(current_array.get_volume_snapshots().items) pgroups = list(current_array.get_protection_groups().items) # Perform some translations and calculations total_capacity = float(space_info.capacity) / units.Gi used_space = float(space_info.space.total_physical) / units.Gi free_space = float(total_capacity - used_space) # If array uses Evergreen/One model then total_provisioned # is not reported so use the closest value avaible in that # consumption model try: provisioned_space = float(space_info.space. total_provisioned) / units.Gi except AttributeError: provisioned_space = float(space_info.space. used_provisioned) / units.Gi # If array uses Evergreen/One model then data reduction values # are not reported so we must force the driver to use the old # cinder non-dynamic oversubscription calculations try: total_reduction = float(space_info.space.total_reduction) except AttributeError: total_reduction = 999 total_vols = len(volumes) total_hosts = len(hosts) total_snaps = len(snaps) total_pgroups = len(pgroups) thin_provisioning = self._get_thin_provisioning(total_reduction) # Start with some required info data = dict( volume_backend_name=self._backend_name, vendor_name='Pure Storage', driver_version=self.VERSION, storage_protocol=self._storage_protocol, ) # Add flags for supported features data['consistencygroup_support'] = True data['thin_provisioning_support'] = True data['multiattach'] = True data['consistent_group_replication_enabled'] = True data['consistent_group_snapshot_enabled'] = True data['QoS_support'] = True # Add capacity info for scheduler data['total_capacity_gb'] = total_capacity data['free_capacity_gb'] = free_space data['reserved_percentage'] = self.configuration.reserved_percentage data['provisioned_capacity'] = provisioned_space data['max_over_subscription_ratio'] = thin_provisioning # Add the filtering/goodness functions data['filter_function'] = self.get_filter_function() data['goodness_function'] = self.get_goodness_function() # Add array metadata counts for filtering and weighing functions data['total_volumes'] = total_vols data['total_snapshots'] = total_snaps data['total_hosts'] = total_hosts data['total_pgroups'] = total_pgroups # Add performance stats for filtering and weighing functions # IOPS data['writes_per_sec'] = perf_info.writes_per_sec data['reads_per_sec'] = perf_info.reads_per_sec # Bandwidth data['input_per_sec'] = perf_info.write_bytes_per_sec data['output_per_sec'] = perf_info.read_bytes_per_sec # Latency data['usec_per_read_op'] = perf_info.usec_per_read_op data['usec_per_write_op'] = perf_info.usec_per_write_op # TODO: Queue depth - deprecated - remove in 2026.1 cycle data['queue_depth'] = getattr(perf_info, 'queue_depth', 0) # Detailed I/O queuieing information data['queue_usec_per_mirrored_write_op'] = ( perf_info.queue_usec_per_mirrored_write_op) data['queue_usec_per_read_op'] = perf_info.queue_usec_per_read_op data['queue_usec_per_write_op'] = perf_info.queue_usec_per_write_op # Replication data["replication_capability"] = self._get_replication_capability() data["replication_enabled"] = self._is_replication_enabled repl_types = [] if self._is_replication_enabled: repl_types = [REPLICATION_TYPE_ASYNC] if self._is_active_cluster_enabled: repl_types.append(REPLICATION_TYPE_SYNC) if self._is_trisync_enabled: repl_types.append(REPLICATION_TYPE_TRISYNC) data["replication_type"] = repl_types data["replication_count"] = len(self._replication_target_arrays) data["replication_targets"] = [array.backend_id for array in self._replication_target_arrays] self._stats = data def _get_replication_capability(self): """Discovered connected arrays status for replication""" connections = list( self._get_current_array().get_array_connections().items) is_sync, is_async, is_trisync = False, False, False for conn in range(0, len(connections)): # If connection status is connected, we can have # either sync or async replication if connections[conn].status == "connected": # check for async replication if connections[conn].type == "async-replication": is_async = True # check for sync replication elif connections[conn].type == "sync-replication": is_sync = True # If we've connections for both sync and async # replication, we can set trisync replication # and exit the loop if is_sync and is_async: is_trisync = True break # Check if it is a trisync replication if is_trisync: replication_type = "trisync" # If replication is not trisync, it will be either # sync or async elif is_sync: replication_type = "sync" elif is_async: replication_type = "async" else: replication_type = None return replication_type def _get_thin_provisioning(self, total_reduction): """Get the current value for the thin provisioning ratio. If pure_automatic_max_oversubscription_ratio is True we will calculate a value, if not we will respect the configuration option for the max_over_subscription_ratio. """ if (self.configuration.pure_automatic_max_oversubscription_ratio and total_reduction < 100): # If total_reduction is > 100 then this is a very under-utilized # array and therefore the oversubscription rate is effectively # meaningless. # In this case we look to the config option as a starting # point. Once some volumes are actually created and some data is # stored on the array a much more accurate number will be # presented based on current usage. thin_provisioning = total_reduction else: thin_provisioning = volume_utils.get_max_over_subscription_ratio( self.configuration.max_over_subscription_ratio, supports_auto=True) return thin_provisioning @pure_driver_debug_trace def extend_volume(self, volume, new_size_gb): """Extend volume to new_size.""" # Get current array in case we have failed over via replication. current_array = self._get_current_array() vol_name = self._get_vol_name(volume) new_size = new_size_gb * units.Gi current_array.patch_volumes(names=[vol_name], volume=flasharray.VolumePatch( provisioned=new_size)) ctxt = context.get_admin_context() type_id = volume.get('volume_type_id') if type_id is not None: volume_type = volume_types.get_volume_type(ctxt, type_id) LOG.debug("QOS volume type: '%s'", volume_type) qos = self._get_qos_settings(volume_type) if qos is not None: self.set_qos(current_array, vol_name, new_size, qos) def _add_volume_to_consistency_group(self, group, vol_name): pgroup_name = self._get_pgroup_name(group) current_array = self._get_current_array() current_array.post_protection_groups_volumes( group_names=[pgroup_name], member_names=[vol_name]) @pure_driver_debug_trace def create_consistencygroup(self, context, group, grp_type=None): """Creates a consistencygroup.""" current_array = self._get_current_array() group_name = self._get_pgroup_name(group) LOG.debug('Creating Consistency Group %(group_name)s', {'group_name': group_name}) current_array.post_protection_groups( names=[group_name]) if grp_type: current_array.patch_protection_groups( names=[group_name], protection_group=flasharray.ProtectionGroup( replication_schedule=flasharray.ReplicationSchedule( frequency=self._replication_interval))) for target_array in self._replication_target_arrays: # Configure PG to replicate to target_array. current_array.post_protection_groups_targets( group_names=[group_name], member_names=[target_array.array_name]) # Wait until "Target Group" setting propagates to target_array. pgroup_name_on_target = self._get_pgroup_name_on_target( current_array.array_name, group_name) if grp_type == REPLICATION_TYPE_TRISYNC: pgroup_name_on_target = group_name.replace("::", ":") target_array.patch_protection_groups_targets( group_names=[pgroup_name_on_target], target=flasharray.TargetProtectionGroupPostPatch( allowed=True)) # Wait until source array acknowledges previous operation. self._wait_until_source_array_allowed(current_array, group_name) # Start replication on the PG. current_array.patch_protection_groups( names=[group_name], protection_group=flasharray.ProtectionGroup( replication_schedule=flasharray.ReplicationSchedule( enabled=True))) model_update = {'status': fields.ConsistencyGroupStatus.AVAILABLE} return model_update def _create_cg_from_cgsnap(self, volumes, snapshots): """Creates a new consistency group from a cgsnapshot. The new volumes will be consistent with the snapshot. """ vol_models = [] for volume, snapshot in zip(volumes, snapshots): vol_models.append(self.create_volume_from_snapshot( volume, snapshot, cgsnapshot=True)) return vol_models def _create_cg_from_cg(self, group, source_group, volumes, source_vols): """Creates a new consistency group from an existing cg. The new volumes will be in a consistent state, but this requires taking a new temporary group snapshot and cloning from that. """ vol_models = [] pgroup_name = self._get_pgroup_name(source_group) tmp_suffix = '%s-tmp' % uuid.uuid4() tmp_pgsnap_name = '%(pgroup_name)s.%(pgsnap_suffix)s' % { 'pgroup_name': pgroup_name, 'pgsnap_suffix': tmp_suffix, } LOG.debug('Creating temporary Protection Group snapshot %(snap_name)s ' 'while cloning Consistency Group %(source_group)s.', {'snap_name': tmp_pgsnap_name, 'source_group': source_group.id}) current_array = self._get_current_array() suffix = flasharray.ProtectionGroupSnapshotPost(suffix=tmp_suffix) current_array.post_protection_group_snapshots( source_names=[pgroup_name], protection_group_snapshot=suffix) volumes, _ = self.update_provider_info(volumes, None) try: for source_vol, cloned_vol in zip(source_vols, volumes): vol_models.append(cloned_vol) source_snap_name = self._get_pgroup_vol_snap_name( pgroup_name, tmp_suffix, self._get_vol_name(source_vol) ) cloned_vol_name = self._get_vol_name(cloned_vol) current_array.post_volumes(names=[cloned_vol_name], volume=flasharray.VolumePost( source=flasharray.Reference( name=source_snap_name))) self._add_volume_to_consistency_group( group, cloned_vol_name ) repl_type = self._get_replication_type_from_vol_type( source_vol.volume_type) if (self.configuration.pure_trisync_enabled and repl_type == REPLICATION_TYPE_TRISYNC): self._enable_trisync_replication(current_array, cloned_vol) LOG.info('Trisync replication set for new cloned ' 'volume %s', cloned_vol_name) finally: self._delete_pgsnapshot(tmp_pgsnap_name) return vol_models @pure_driver_debug_trace def create_consistencygroup_from_src(self, context, group, volumes, cgsnapshot=None, snapshots=None, source_cg=None, source_vols=None, group_type=None): # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() model_update = self.create_consistencygroup(context, group, group_type) if cgsnapshot and snapshots: vol_models = self._create_cg_from_cgsnap(volumes, snapshots) elif source_cg: vol_models = self._create_cg_from_cg(group, source_cg, volumes, source_vols) return model_update, vol_models @pure_driver_debug_trace def delete_consistencygroup(self, context, group, volumes): """Deletes a consistency group.""" # let generic volume group support handle non-cgsnapshots if not volume_utils.is_group_a_cg_snapshot_type(group): raise NotImplementedError() pgroup_name = self._get_pgroup_name(group) current_array = self._get_current_array() pgres = current_array.patch_protection_groups( names=[pgroup_name], protection_group=flasharray.ProtectionGroup( destroyed=True)) if pgres.status_code == 200: if self.configuration.pure_eradicate_on_delete: current_array.delete_protection_groups( names=[pgroup_name]) else: with excutils.save_and_reraise_exception() as ctxt: if (ERR_MSG_PENDING_ERADICATION in pgres.errors[0].message or ERR_MSG_NOT_EXIST in pgres.errors[0].message): # Treat these as a "success" case since we are trying # to delete them anyway. ctxt.reraise = False LOG.warning("Unable to delete Protection Group: %s", pgres.errors[0].context) for volume in volumes: self.delete_volume(volume) return None, None @pure_driver_debug_trace def update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): pgroup_name = self._get_pgroup_name(group) if add_volumes: addvollist = [self._get_vol_name(vol) for vol in add_volumes] else: addvollist = [] if remove_volumes: remvollist = [self._get_vol_name(vol) for vol in remove_volumes] else: remvollist = [] current_array = self._get_current_array() current_array.post_protection_groups_volumes( group_names=[pgroup_name], member_names=addvollist) current_array.delete_protection_groups_volumes( group_names=[pgroup_name], member_names=remvollist) return None, None, None @pure_driver_debug_trace def create_cgsnapshot(self, context, cgsnapshot, snapshots): """Creates a cgsnapshot.""" pgroup_name = self._get_pgroup_name(cgsnapshot.group) pgsnap_suffix = self._get_pgroup_snap_suffix(cgsnapshot) current_array = self._get_current_array() suffix = flasharray.ProtectionGroupSnapshotPost(suffix=pgsnap_suffix) current_array.post_protection_group_snapshots( source_names=[pgroup_name], protection_group_snapshot=suffix) return None, None def _delete_pgsnapshot(self, pgsnap_name): current_array = self._get_current_array() pg_snapshot = flasharray.ProtectionGroupSnapshotPatch(destroyed=True) res = current_array.patch_protection_group_snapshots( protection_group_snapshot=pg_snapshot, names=[pgsnap_name]) if self.configuration.pure_eradicate_on_delete: current_array.delete_protection_group_snapshots( names=[pgsnap_name]) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if (ERR_MSG_PENDING_ERADICATION in res.errors[0].message or ERR_MSG_NOT_EXIST in res.errors[0].message): # Treat these as a "success" case since we are trying # to delete them anyway. ctxt.reraise = False LOG.warning("Unable to delete Protection Group " "Snapshot: %s", res.errors[0].message) @pure_driver_debug_trace def delete_cgsnapshot(self, context, cgsnapshot, snapshots): """Deletes a cgsnapshot.""" pgsnap_name = self._get_pgroup_snap_name(cgsnapshot) self._delete_pgsnapshot(pgsnap_name) return None, None def _validate_manage_existing_ref(self, existing_ref, is_snap=False): """Ensure that an existing_ref is valid and return volume info If the ref is not valid throw a ManageExistingInvalidReference exception with an appropriate error. Will return volume or snapshot information from the array for the object specified by existing_ref. """ if ("source-name" not in existing_ref or not existing_ref["source-name"]): raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_("manage_existing requires a 'source-name'" " key to identify an existing volume.")) if is_snap: if existing_ref['source-name'].count("::") > 1: # Don't allow for managing snaphot in a realm raise exception.ManageExistingInvalidReference( _("Unable to manage snapshot in a Realm")) # Purity snapshot names are prefixed with the source volume name. ref_vol_name, ref_snap_suffix = existing_ref['source-name'].split( '.') else: ref_vol_name = existing_ref['source-name'] current_array = self._get_current_array() if not is_snap and self._realm_check(current_array, ref_vol_name): # Don't allow for managing volumes in a realm raise exception.ManageExistingInvalidReference( _("Unable to manage volume in a Realm")) if not is_snap and self._pod_check(current_array, ref_vol_name): # Don't allow for managing volumes in a replicated pod raise exception.ManageExistingInvalidReference( _("Unable to manage volume in a Replicated Pod")) volres = current_array.get_volumes(names=[ref_vol_name]) if volres.status_code == 200: volume_info = list(volres.items)[0] if volume_info: if is_snap: snapres = current_array.get_volume_snapshots( names=[existing_ref['source-name']]) if snapres.status_code == 200: snap = list(snapres.items)[0] return snap else: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_NOT_EXIST in volres.errors[0].message: ctxt.reraise = False else: return volume_info else: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_NOT_EXIST in volres.errors[0].message: ctxt.reraise = False # If volume information was unable to be retrieved we need # to throw an Invalid Reference exception. raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=(_("Unable to find Purity ref with source-name=%s") % ref_vol_name)) def _add_to_group_if_needed(self, volume, vol_name): if volume['group_id']: if volume_utils.is_group_a_cg_snapshot_type(volume.group): self._add_volume_to_consistency_group( volume.group, vol_name ) elif volume['consistencygroup_id']: self._add_volume_to_consistency_group( volume.consistencygroup, vol_name ) def create_group(self, ctxt, group): """Creates a group. :param ctxt: the context of the caller. :param group: the Group object of the group to be created. :returns: model_update """ cgr_type = None repl_type = None if volume_utils.is_group_a_cg_snapshot_type(group): if volume_utils.is_group_a_type( group, "consistent_group_replication_enabled"): if not self._is_replication_enabled: msg = _("Replication not properly configured on backend.") LOG.error(msg) raise PureDriverException(msg) for vol_type_id in group.volume_type_ids: vol_type = volume_type.VolumeType.get_by_name_or_id( ctxt, vol_type_id) repl_type = self._get_replication_type_from_vol_type( vol_type) if repl_type not in [REPLICATION_TYPE_ASYNC, REPLICATION_TYPE_TRISYNC]: # Unsupported configuration LOG.error("Unable to create group: create consistent " "replication group with non-replicated or " "sync replicated volume type is not " "supported.") model_update = {'status': fields.GroupStatus.ERROR} return model_update if not cgr_type: cgr_type = repl_type elif cgr_type != repl_type: LOG.error("Unable to create group: create consistent " "replication group with different " "replication types is not supported.") model_update = {'status': fields.GroupStatus.ERROR} return model_update return self.create_consistencygroup(ctxt, group, cgr_type) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() def delete_group(self, ctxt, group, volumes): """Deletes a group. :param ctxt: the context of the caller. :param group: the Group object of the group to be deleted. :param volumes: a list of Volume objects in the group. :returns: model_update, volumes_model_update """ if volume_utils.is_group_a_cg_snapshot_type(group): return self.delete_consistencygroup(ctxt, group, volumes) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None): """Updates a group. :param ctxt: the context of the caller. :param group: the Group object of the group to be updated. :param add_volumes: a list of Volume objects to be added. :param remove_volumes: a list of Volume objects to be removed. :returns: model_update, add_volumes_update, remove_volumes_update """ if volume_utils.is_group_a_cg_snapshot_type(group): return self.update_consistencygroup(ctxt, group, add_volumes, remove_volumes) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() def create_group_from_src(self, ctxt, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None): """Creates a group from source. :param ctxt: the context of the caller. :param group: the Group object to be created. :param volumes: a list of Volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of snapshot objects in group_snapshot. :param source_group: the Group object as source. :param source_vols: a list of volume objects in the source_group. :returns: model_update, volumes_model_update """ cgr_type = None if volume_utils.is_group_a_cg_snapshot_type(group): if volume_utils.is_group_a_type( group, "consistent_group_replication_enabled"): cgr_type = True return self.create_consistencygroup_from_src(ctxt, group, volumes, group_snapshot, snapshots, source_group, source_vols, cgr_type) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() def create_group_snapshot(self, ctxt, group_snapshot, snapshots): """Creates a group_snapshot. :param ctxt: the context of the caller. :param group_snapshot: the GroupSnapshot object to be created. :param snapshots: a list of Snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self.create_cgsnapshot(ctxt, group_snapshot, snapshots) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() def delete_group_snapshot(self, ctxt, group_snapshot, snapshots): """Deletes a group_snapshot. :param ctxt: the context of the caller. :param group_snapshot: the GroupSnapshot object to be deleted. :param snapshots: a list of snapshot objects in the group_snapshot. :returns: model_update, snapshots_model_update """ if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self.delete_cgsnapshot(ctxt, group_snapshot, snapshots) # If it wasn't a consistency group request ignore it and we'll rely on # the generic group implementation. raise NotImplementedError() @pure_driver_debug_trace def _safemode_check(self, array, existing_ref): pgs = self._get_pgroups(array, existing_ref['source-name']) for pg in filter(None, pgs or []): res = array.get_protection_groups(names=[pg]) if res.status_code == 200: if list(res.items)[0].retention_lock == 'ratcheted': raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_("%(driver)s manage_existing cannot manage" " a SafeMode protected volume as its not" " supported." ) % {'driver': self.__class__.__name__}) @pure_driver_debug_trace def manage_existing(self, volume, existing_ref): """Brings an existing backend storage object under Cinder management. We expect a volume name in the existing_ref that matches one in Purity. """ self._validate_manage_existing_ref(existing_ref) ref_vol_name = existing_ref['source-name'] current_array = self._get_current_array() self._safemode_check(current_array, existing_ref) ref_type = self._check_repl(current_array, ref_vol_name) volume_data = list(current_array.get_volumes( names=[ref_vol_name]).items)[0] connected_hosts = volume_data.connection_count if connected_hosts > 0: raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_("%(driver)s manage_existing cannot manage a volume " "connected to hosts. Please disconnect this volume " "from existing hosts before importing" ) % {'driver': self.__class__.__name__}) orig_vol_name = self._generate_purity_vol_name(volume) new_vol_name = orig_vol_name.split('::')[-1] if "::" in ref_vol_name: ref_name = ref_vol_name.split('::')[0] new_vol_name = ref_name + '::' + new_vol_name LOG.info("Renaming existing volume %(ref_name)s to %(new_name)s", {"ref_name": ref_vol_name, "new_name": new_vol_name}) self._rename_volume_object(ref_vol_name, new_vol_name, raise_not_exist=True, manage=True) # If existing volume has QoS settings then clear these out vol_iops = getattr(volume_data.qos, "iops_limit", None) vol_bw = getattr(volume_data.qos, "bandwidth_limit", None) if vol_bw or vol_iops: LOG.info("Removing pre-existing QoS settings on managed volume.") current_array.patch_volumes( names=[new_vol_name], volume=flasharray.VolumePatch( qos=flasharray.Qos(iops_limit=MAX_IOPS, bandwidth_limit=MAX_BWS))) # If we are managing to a volume type that is a volume group # make sure that the target volume group exists with the # correct QoS settings. if self._get_volume_type_extra_spec(volume.volume_type['id'], 'vg_name'): target_vg = self._get_volume_type_extra_spec( volume.volume_type['id'], 'vg_name') target_vg = INVALID_CHARACTERS.sub("-", target_vg) vg_iops = self._get_volume_type_extra_spec( volume.volume_type['id'], 'vg_maxIOPS', default_value=MAX_IOPS) vg_bws = self._get_volume_type_extra_spec( volume.volume_type['id'], 'vg_maxBWS', default_value=MAX_BWS) if not (MIN_IOPS <= int(vg_iops) <= MAX_IOPS): msg = (_('vg_maxIOPS QoS error. Must be more than ' '%(min_iops)s and less than %(max_iops)s') % {'min_iops': MIN_IOPS, 'max_iops': MAX_IOPS}) raise exception.InvalidQoSSpecs(message=msg) if not (MIN_BWS <= int(vg_bws) <= MAX_BWS): msg = (_('vg_maxBWS QoS error. Must be between ' '%(min_bws)s and less than %(max_bws)s') % {'min_bws': MIN_BWS, 'max_bws': MAX_BWS}) raise exception.InvalidQoSSpecs(message=msg) self._create_volume_group_if_not_exist(current_array, target_vg, vg_iops, vg_bws) res = current_array.patch_volumes( names=[new_vol_name], volume=flasharray.VolumePatch( volume_group=flasharray.Reference( name=target_vg))) if res.status_code != 200: LOG.warning("Failed to move volume %(vol)s, to volume " "group %(vg)s. Error: %(mess)s", { "vol": new_vol_name, "vg": target_vg, "mess": res.errors[0].message}) new_vol_name = target_vg + "/" + new_vol_name if "/" in ref_vol_name: source_vg = ref_vol_name.split('/')[0] self._delete_vgroup_if_empty(current_array, source_vg) # Check if the volume_type has QoS settings and if so # apply them to the newly managed volume qos = None qos = self._get_qos_settings(volume.volume_type) if qos: vol_size = int(volume_data.provisioned / units.Gi) self.set_qos(current_array, new_vol_name, vol_size, qos) volume.provider_id = new_vol_name if ref_type == REPLICATION_TYPE_ASYNC: self._disable_async_replication_if_needed(current_array, volume) elif ref_type == REPLICATION_TYPE_SYNC: self._disable_sync_replication_if_needed(current_array, volume, ref_vol_name) async_enabled = self._enable_async_replication_if_needed(current_array, volume) sync_enabled = self._enable_sync_replication_if_needed(current_array, volume, ref_vol_name) repl_status = fields.ReplicationStatus.DISABLED volume.provider_id = orig_vol_name if async_enabled or sync_enabled: repl_status = fields.ReplicationStatus.ENABLED result = self._tag_volume(volume_name=new_vol_name, project=volume.project_id) LOG.debug("Volume tags added: %s", result) return { 'provider_id': orig_vol_name, 'replication_status': repl_status, 'metadata': {'array_volume_name': orig_vol_name, 'array_name': current_array.array_name}, } @pure_driver_debug_trace def _check_repl(self, array, ref_vol_name): repl_type = None if '::' in ref_vol_name: res = array.get_pods(names=[ref_vol_name.split('::')[0]]) if list(res.items)[0].array_count >= 2: repl_type = 'sync' else: pgs = self._get_pgroups(array, ref_vol_name) for pg in filter(None, pgs or []): res = array.get_protection_groups(names=[pg]) if list(res.items)[0].target_count >= 1: repl_type = 'async' break return repl_type @pure_driver_debug_trace def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. We expect a volume name in the existing_ref that matches one in Purity. """ volume_info = self._validate_manage_existing_ref(existing_ref) size = self._round_bytes_to_gib(volume_info.provisioned) return size def _pod_check(self, array, volume): """Check if volume is in a replicated pod.""" if "::" in volume: if volume.count("::") != 1: # This is a special for a volume in a realm pod return False pod = volume.split("::")[0] pod_info = list(array.get_pods(names=[pod]).items)[0] if (pod_info.link_source_count == 0 and pod_info.link_target_count == 0 and pod_info.array_count == 1): return False else: return True else: return False def _realm_check(self, array, volume): """Check if volume is in a realm.""" if "::" in volume: if volume.count("::") > 1: return True else: return False else: return False def _rename_volume_object(self, old_name, new_name, raise_not_exist=False, snapshot=False, manage=False): """Rename a volume object (could be snapshot) in Purity. This will not raise an exception if the object does not exist. We need to ensure that if we are renaming to a different container in the backend, eg a pod, volume group, or just the main array container, we have to rename first and then move the object. """ current_array = self._get_current_array() if snapshot: res = current_array.patch_volume_snapshots( names=[old_name], volume_snapshot=flasharray.VolumePatch(name=new_name)) else: if not manage and "/" in old_name and "::" not in old_name: interim_name = old_name.split("/")[1] res = current_array.patch_volumes( names=[old_name], volume=flasharray.VolumePatch( volume_group=flasharray.Reference(name=""))) if res.status_code == 400: LOG.warning("Unable to move %(old_name)s, error " "message: %(error)s", {"old_name": old_name, "error": res.errors[0].message}) old_name = interim_name if not manage and "/" not in old_name and "::" in old_name: interim_name = old_name.split("::")[1] res = current_array.patch_volumes( names=[old_name], volume=flasharray.VolumePatch( pod=flasharray.Reference(name=""))) if res.status_code == 400: LOG.warning("Unable to move %(old_name)s, error " "message: %(error)s", {"old_name": old_name, "error": res.errors[0].message}) old_name = interim_name if not manage and "/" in old_name and "::" in old_name: # This is a VVOL which can't be moved, so have # to take a copy interim_name = old_name.split("/")[1] res = current_array.post_volumes( names=[interim_name], volume=flasharray.VolumePost( source=flasharray.Reference(name=old_name))) if res.status_code == 400: LOG.warning("Unable to copy %(old_name)s, error " "message: %(error)s", {"old_name": old_name, "error": res.errors[0].message}) old_name = interim_name res = current_array.patch_volumes( names=[old_name], volume=flasharray.VolumePatch(name=new_name)) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_NOT_EXIST in res.errors[0].message: ctxt.reraise = raise_not_exist LOG.warning("Unable to rename %(old_name)s, error " "message: %(error)s", {"old_name": old_name, "error": res.errors[0].message}) return new_name @pure_driver_debug_trace def unmanage(self, volume): """Removes the specified volume from Cinder management. Does not delete the underlying backend storage object. The volume will be renamed with "-unmanaged" as a suffix """ vol_name = self._get_vol_name(volume) if len(vol_name + UNMANAGED_SUFFIX) > MAX_VOL_LENGTH: unmanaged_vol_name = vol_name[:-len(UNMANAGED_SUFFIX)] + \ UNMANAGED_SUFFIX else: unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX LOG.info("Renaming existing volume %(ref_name)s to %(new_name)s", {"ref_name": vol_name, "new_name": unmanaged_vol_name}) self._untag_volume(vol_name) LOG.debug("Volume tags removed") self._rename_volume_object(vol_name, unmanaged_vol_name, manage=True) def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. We expect a snapshot name in the existing_ref that matches one in Purity. """ self._validate_manage_existing_ref(existing_ref, is_snap=True) ref_snap_name = existing_ref['source-name'] new_snap_name = self._get_snap_name(snapshot) LOG.info("Renaming existing snapshot %(ref_name)s to " "%(new_name)s", {"ref_name": ref_snap_name, "new_name": new_snap_name}) self._rename_volume_object(ref_snap_name, new_snap_name, raise_not_exist=True, snapshot=True) return { 'metadata': {'array_snapshot_name': new_snap_name, 'array_name': self._array.array_name}, } def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing. We expect a snapshot name in the existing_ref that matches one in Purity. """ snap_info = self._validate_manage_existing_ref(existing_ref, is_snap=True) size = self._round_bytes_to_gib(snap_info.provisioned) return size def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management. Does not delete the underlying backend storage object. We expect a snapshot name in the existing_ref that matches one in Purity. """ snap_name = self._get_snap_name(snapshot) if len(snap_name + UNMANAGED_SUFFIX) > MAX_SNAP_LENGTH: unmanaged_snap_name = snap_name[:-len(UNMANAGED_SUFFIX)] + \ UNMANAGED_SUFFIX else: unmanaged_snap_name = snap_name + UNMANAGED_SUFFIX LOG.info("Renaming existing snapshot %(ref_name)s to " "%(new_name)s", {"ref_name": snap_name, "new_name": unmanaged_snap_name}) self._rename_volume_object(snap_name, unmanaged_snap_name, snapshot=True) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder. Rule out volumes that are attached to a Purity host or that are already in the list of cinder_volumes. Also exclude any volumes that are in a pod, it is difficult to safely move in/out of pods from here without more context so we'll rely on the admin to move them before managing the volume. We return references of the volume names for any others. """ array = self._get_current_array() pure_vols = list(array.get_volumes().items) connections = list(array.get_connections().items) # Put together a map of volumes that are connected to hosts connected_vols = {} for connect in range(0, len(connections)): connected_vols[connections[connect].volume.name] = \ getattr(connections[connect].host, "name", None) # Put together a map of existing cinder volumes on the array # so we can lookup cinder id's by purity volume names existing_vols = {} for cinder_vol in cinder_volumes: existing_vols[self._get_vol_name(cinder_vol)] = cinder_vol.name_id manageable_vols = [] for pure_vol in range(0, len(pure_vols)): vol_name = pure_vols[pure_vol].name cinder_id = existing_vols.get(vol_name) not_safe_msgs = [] host = connected_vols.get(vol_name) in_realm = self._realm_check(array, vol_name) in_pod = self._pod_check(array, vol_name) is_deleted = pure_vols[pure_vol].destroyed if host: not_safe_msgs.append(_('Volume connected to host %s') % host) if cinder_id: not_safe_msgs.append(_('Volume already managed')) if in_realm: not_safe_msgs.append(_('Volume is in a Realm')) if in_pod: not_safe_msgs.append(_('Volume is in a Replicated Pod')) if is_deleted: not_safe_msgs.append(_('Volume is deleted')) is_safe = (len(not_safe_msgs) == 0) reason_not_safe = '' if not is_safe: for i, msg in enumerate(not_safe_msgs): if i > 0: reason_not_safe += ' && ' reason_not_safe += "%s" % msg manageable_vols.append({ 'reference': {'name': vol_name}, 'size': self._round_bytes_to_gib( pure_vols[pure_vol].provisioned), 'safe_to_manage': is_safe, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': None, }) return volume_utils.paginate_entries_list( manageable_vols, marker, limit, offset, sort_keys, sort_dirs) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """List snapshots on the backend available for management by Cinder.""" array = self._get_current_array() pure_snapshots = list(array.get_volume_snapshots().items) # Put together a map of existing cinder snapshots on the array # so we can lookup cinder id's by purity snapshot names existing_snapshots = {} for cinder_snap in cinder_snapshots: name = self._get_snap_name(cinder_snap) existing_snapshots[name] = cinder_snap.id manageable_snaps = [] for pure_snap in range(0, len(pure_snapshots)): snap_name = pure_snapshots[pure_snap].name cinder_id = existing_snapshots.get(snap_name) is_safe = True reason_not_safe = None if cinder_id: is_safe = False reason_not_safe = _("Snapshot already managed.") if pure_snapshots[pure_snap].destroyed: is_safe = False reason_not_safe = _("Snapshot is deleted.") if snap_name.count("::") > 1: is_safe = False reason_not_safe = _("Snapshot is in a realm.") manageable_snaps.append({ 'reference': {'name': snap_name}, 'size': self._round_bytes_to_gib( pure_snapshots[pure_snap].provisioned), 'safe_to_manage': is_safe, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': None, 'source_reference': { 'name': getattr(pure_snapshots[pure_snap].source, "name", None)}, }) return volume_utils.paginate_entries_list( manageable_snaps, marker, limit, offset, sort_keys, sort_dirs) @staticmethod def _round_bytes_to_gib(size): return int(math.ceil(float(size) / units.Gi)) def _get_flasharray(self, san_ip, api_token, rest_version=None, verify_ssl=None, ssl_cert_path=None): try: array = flasharray.Client(target=san_ip, api_token=api_token, verify_ssl=verify_ssl, ssl_cert=ssl_cert_path, user_agent=self._user_agent, ) except Exception: return None array_info = list(array.get_arrays().items)[0] array.array_name = array_info.name array.array_id = array_info.id array._rest_version = array.get_rest_version() # Configure some extra tracing on requests made to the array if hasattr(array, '_request'): def trace_request(fn): def wrapper(*args, **kwargs): request_id = uuid.uuid4().hex LOG.debug("Making HTTP Request [%(id)s]:" " 'args=%(args)s kwargs=%(kwargs)s'", { "id": request_id, "args": args, "kwargs": kwargs, }) ret = fn(*args, **kwargs) LOG.debug( "Response for HTTP request [%(id)s]: '%(response)s'", { "id": request_id, "response": ret, } ) return ret return wrapper array._request = trace_request(array._request) LOG.debug("connected to %(array_name)s with REST API %(api_version)s", {"array_name": array.array_name, "api_version": array._rest_version}) return array @staticmethod def _get_pod_for_volume(volume_name): """Return the Purity pod name for the given volume. This works on the assumption that volume names are always prefixed with the pod name followed by '::' """ if '::' not in volume_name: # Not in a pod return None parts = volume_name.split('::') if len(parts) != 2 or not parts[0]: # Can't parse this.. Should never happen though, would mean a # break to the API contract with Purity. raise PureDriverException( _("Unable to determine pod for volume %s") % volume_name) return parts[0] @classmethod def _is_vol_in_pod(cls, pure_vol_name): return bool(cls._get_pod_for_volume(pure_vol_name) is not None) @staticmethod def _get_replication_type_from_vol_type(volume_type): if volume_type and volume_type.is_replicated(): specs = volume_type.get("extra_specs") if specs and EXTRA_SPECS_REPL_TYPE in specs: replication_type_spec = specs[EXTRA_SPECS_REPL_TYPE] # Do not validate settings, ignore invalid. if replication_type_spec == " async": return REPLICATION_TYPE_ASYNC elif replication_type_spec == " sync": return REPLICATION_TYPE_SYNC elif replication_type_spec == " trisync": return REPLICATION_TYPE_TRISYNC else: # if no type was specified but replication is enabled assume # that async replication is enabled return REPLICATION_TYPE_ASYNC return None def _get_volume_type_extra_spec(self, type_id, spec_key, possible_values=None, default_value=None): """Get extra spec value. If the spec value is not present in the input possible_values, then default_value will be returned. If the type_id is None, then default_value is returned. The caller must not consider scope and the implementation adds/removes scope. the scope used here is 'flasharray' e.g. key 'flasharray:vg_name' and so the caller must pass vg_name as an input ignoring the scope. :param type_id: volume type id :param spec_key: extra spec key :param possible_values: permitted values for the extra spec if known :param default_value: default value for the extra spec incase of an invalid value or if the entry does not exist :return: extra spec value """ if not type_id: return default_value spec_key = ('flasharray:%s') % spec_key spec_value = volume_types.get_volume_type_extra_specs(type_id).get( spec_key, False) if not spec_value: LOG.debug("Returning default spec value: %s.", default_value) return default_value if possible_values is None: return spec_value if spec_value in possible_values: LOG.debug("Returning spec value %s", spec_value) return spec_value LOG.debug("Invalid spec value: %s specified.", spec_value) def _get_qos_settings(self, volume_type): """Get extra_specs and qos_specs of a volume_type. This fetches the keys from the volume type. Anything set from qos_specs will override keys set from extra_specs """ # Deal with volume with no type qos = {} qos_specs_id = volume_type.get('qos_specs_id') specs = volume_type.get('extra_specs') # We prefer QoS specs associations to override # any existing extra-specs settings if qos_specs_id is not None: ctxt = context.get_admin_context() kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs'] else: kvs = specs for key, value in kvs.items(): if key in self.PURE_QOS_KEYS: qos[key] = value if qos == {}: return None else: # Check set values are within limits iops_qos = int(qos.get('maxIOPS', 0)) bw_qos = int(qos.get('maxBWS', 0)) * MIN_BWS if iops_qos != 0 and not (MIN_IOPS <= iops_qos <= MAX_IOPS): msg = (_('maxIOPS QoS error. Must be more than ' '%(min_iops)s and less than %(max_iops)s') % {'min_iops': MIN_IOPS, 'max_iops': MAX_IOPS}) raise exception.InvalidQoSSpecs(message=msg) if bw_qos != 0 and not (MIN_BWS <= bw_qos <= MAX_BWS): msg = (_('maxBWS QoS error. Must be between ' '%(min_bws)s and %(max_bws)s') % {'min_bws': MIN_BWS, 'max_bws': MAX_BWS}) raise exception.InvalidQoSSpecs(message=msg) qos['maxIOPS'] = iops_qos qos['maxBWS'] = bw_qos qos['maxIOPS_per_GB'] = int(qos.get('maxIOPS_per_GB', 0)) qos['maxBWS_per_GB'] = int(qos.get('maxBWS_per_GB', 0)) * MIN_BWS return qos def _generate_purity_vol_name(self, volume): """Return the name of the volume Purity will use. This expects to be given a Volume OVO and not a volume dictionary. """ base_name = volume.name # Some OpenStack deployments, eg PowerVC, create a volume.name that # when appended with our '-cinder' string will exceed the maximum # volume name length for Pure, so here we left truncate the true volume # name before the opennstack volume_name_template affected it and # then put back the template format if len(base_name) > 56: actual_name = base_name[(len(CONF.volume_name_template) - 2):] base_name = CONF.volume_name_template % \ actual_name[-(56 - len(CONF.volume_name_template)):] repl_type = self._get_replication_type_from_vol_type( volume.volume_type) vgroup_type = self._get_volume_type_extra_spec(volume.volume_type_id, 'vg_name') if repl_type in [REPLICATION_TYPE_SYNC, REPLICATION_TYPE_TRISYNC]: if vgroup_type: raise exception.InvalidVolumeType( reason=_("Synchronously replicated volume group volumes " "are not supported")) else: base_name = self._replication_pod_name + "::" + base_name return base_name + "-cinder" def _get_vol_name(self, volume): """Return the name of the volume Purity will use.""" # Use the dictionary access style for compatibility, this works for # db or OVO volume objects too. return volume['provider_id'] def _get_snap_name(self, snapshot): """Return the name of the snapshot that Purity will use.""" return "%s.%s" % (self._get_vol_name(snapshot.volume), snapshot["name"]) def _group_potential_repl_types(self, pgroup): repl_types = set() for type in pgroup.volume_types: repl_type = self._get_replication_type_from_vol_type(type) repl_types.add(repl_type) return repl_types def _get_pgroup_name(self, pgroup): # check if the pgroup has any volume types that are sync rep enabled, # if so, we need to use a group name accounting for the ActiveCluster # pod. base_name = "" if ((REPLICATION_TYPE_SYNC in self._group_potential_repl_types(pgroup)) or (REPLICATION_TYPE_TRISYNC in self._group_potential_repl_types(pgroup))): base_name = self._replication_pod_name + "::" return "%(base)sconsisgroup-%(id)s-cinder" % { 'base': base_name, 'id': pgroup.id} @staticmethod def _get_pgroup_snap_suffix(group_snapshot): return "cgsnapshot-%s-cinder" % group_snapshot['id'] @staticmethod def _get_group_id_from_snap(group_snap): # We don't really care what kind of group it is, if we are calling # this look for a group_id and fall back to using a consistencygroup_id id = None try: id = group_snap['group_id'] except AttributeError: pass if id is None: try: id = group_snap['consistencygroup_id'] except AttributeError: pass return id def _get_pgroup_snap_name(self, group_snapshot): """Return the name of the pgroup snapshot that Purity will use""" return "%s.%s" % (self._get_pgroup_name(group_snapshot.group), self._get_pgroup_snap_suffix(group_snapshot)) @staticmethod def _get_pgroup_vol_snap_name(pg_name, pgsnap_suffix, volume_name): if "::" in volume_name: volume_name = volume_name.split("::")[1] return "%(pgroup_name)s.%(pgsnap_suffix)s.%(volume_name)s" % { 'pgroup_name': pg_name, 'pgsnap_suffix': pgsnap_suffix, 'volume_name': volume_name, } def _get_pgroup_snap_name_from_snapshot(self, snapshot): """Return the name of the snapshot that Purity will use.""" group_snap = None if snapshot.group_snapshot: group_snap = snapshot.group_snapshot elif snapshot.cgsnapshot: group_snap = snapshot.cgsnapshot volume_name = self._get_vol_name(snapshot.volume) if "::" in volume_name: volume_name = volume_name.split("::")[1] pg_vol_snap_name = "%(group_snap)s.%(volume_name)s" % { 'group_snap': self._get_pgroup_snap_name(group_snap), 'volume_name': volume_name } return pg_vol_snap_name @staticmethod def _generate_purity_host_name(connector): """Return a valid Purity host name based on the name passed in.""" name = connector["host"] if "system uuid" in connector: system_id = str(connector["system uuid"]).replace("-", "") else: system_id = uuid.uuid4().hex if len(name) > 23: name = name[0:23] name = INVALID_CHARACTERS.sub("-", name) name = name.lstrip("-") return "{name}-{uuid}-cinder".format(name=name, uuid=system_id) @staticmethod def _connect_host_to_vol(array, host_name, vol_name): connection = None LOG.debug("Connecting volume %(vol)s to host %(host)s.", {"vol": vol_name, "host": host_name}) res = array.post_connections( host_names=[host_name], volume_names=[vol_name]) connection_obj = getattr(res, "items", None) if connection_obj: connection = list(connection_obj) if res.status_code == 400: if ERR_MSG_HOST_NOT_EXIST in res.errors[0].message: LOG.debug( 'Unable to attach volume to host: %s', res.errors[0].context ) raise PureRetryableException() with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False if (res.status_code == 400 and ERR_MSG_ALREADY_EXISTS in res.errors[0].message): # Happens if the volume is already connected to the host. # Treat this as a success. ctxt.reraise = False LOG.debug("Volume connection already exists for Purity " "host with message: %s", res.errors[0].message) vol_data = list(array.get_volumes(names=[vol_name]).items) vol_id = vol_data[0].id connected_host = list( array.get_connections( volume_names=[vol_name], host_names=[host_name] ).items )[0] connection = [ { "host": {"name": host_name}, "host_group": {}, 'protocol_endpoint': {}, "volume": {"name": vol_name, "id": vol_id}, "lun": getattr(connected_host, "lun", None), "nsid": getattr(connected_host, "nsid", None), } ] if not connection: raise PureDriverException( reason=_("Unable to connect or find connection to host")) return connection @pure_driver_debug_trace def _sync_retype_enable(self, volume): if self._active_cluster_target_arrays: self._enable_sync_replication(self._get_current_array(), volume, volume.name) volume.provider_id = self._replication_pod_name + '::' \ + volume.name + '-cinder' model_update = {"replication_status": fields.ReplicationStatus.ENABLED, "metadata": {**volume.metadata, "array_volume_name": volume.provider_id} } return model_update else: LOG.error("Sync replication is not enabled on the array") # flake8: noqa: C901 @pure_driver_debug_trace def retype(self, context, volume, new_type, diff, host): """Retype from one volume type to another on the same backend. For a Pure Array there is currently no differentiation between types of volumes other than some being part of a protection group to be replicated for async, or part of a pod for sync replication. """ qos = None # TODO: Can remove this once new_type is a VolumeType OVO new_type = volume_type.VolumeType.get_by_name_or_id(context, new_type['id']) previous_vol_replicated = volume.is_replicated() new_vol_replicated = (new_type and new_type.is_replicated()) prev_repl_type = None new_repl_type = None source_vg = False target_vg = False # See if the type specifies the replication type. If we know it is # replicated but doesn't specify a type assume that it is async rep # for backwards compatibility. This applies to both old and new types if previous_vol_replicated: prev_repl_type = self._get_replication_type_from_vol_type( volume.volume_type) if new_vol_replicated: new_repl_type = self._get_replication_type_from_vol_type(new_type) if new_repl_type is None: new_repl_type = REPLICATION_TYPE_ASYNC # There are a few cases we care about, going from non-replicated to # replicated, from replicated to non-replicated, and switching # replication types. model_update = None if previous_vol_replicated and not new_vol_replicated: if prev_repl_type == REPLICATION_TYPE_ASYNC: # Remove from protection group. self._disable_async_replication(volume) model_update = { "replication_status": fields.ReplicationStatus.DISABLED } elif prev_repl_type in [REPLICATION_TYPE_SYNC, REPLICATION_TYPE_TRISYNC]: if prev_repl_type == REPLICATION_TYPE_TRISYNC: self._disable_trisync_replication( self._get_current_array(), volume ) self._disable_sync_replication(self._get_current_array(), volume, volume.name) volume.provider_id = volume.name + '-cinder' model_update = {"replication_status": fields.ReplicationStatus.DISABLED, "metadata": {**volume.metadata, "array_volume_name": volume.provider_id}} elif not previous_vol_replicated and new_vol_replicated: if new_repl_type == REPLICATION_TYPE_ASYNC: # Add to protection group. self._enable_async_replication(self._get_current_array(), volume) model_update = { "replication_status": fields.ReplicationStatus.ENABLED } elif new_repl_type in [REPLICATION_TYPE_SYNC, REPLICATION_TYPE_TRISYNC]: model_update = self._sync_retype_enable(volume) if new_repl_type == REPLICATION_TYPE_TRISYNC: self._enable_trisync_replication( self._get_current_array(), volume ) elif previous_vol_replicated and new_vol_replicated: if prev_repl_type == REPLICATION_TYPE_ASYNC: if new_repl_type in [REPLICATION_TYPE_SYNC, REPLICATION_TYPE_TRISYNC]: model_update = self._sync_retype_enable(volume) if new_repl_type == REPLICATION_TYPE_TRISYNC: self._enable_trisync_replication( self._get_current_array(), volume ) if prev_repl_type == REPLICATION_TYPE_SYNC: if new_repl_type == REPLICATION_TYPE_ASYNC: self._disable_sync_replication(self._get_current_array(), volume, volume.name) self._enable_async_replication(self._get_current_array(), volume) volume.provider_id = volume.name + '-cinder' model_update = {"replication_status": fields.ReplicationStatus.ENABLED, "metadata": {**volume.metadata, "array_volume_name": volume.provider_id}} elif new_repl_type == REPLICATION_TYPE_TRISYNC: # Add to trisync protection group self._enable_trisync_replication(self._get_current_array(), volume) if prev_repl_type == REPLICATION_TYPE_TRISYNC: if new_repl_type == REPLICATION_TYPE_ASYNC: self._disable_trisync_replication( self._get_current_array(), volume ) self._disable_sync_replication(self._get_current_array(), volume, volume.name) self._enable_async_replication(self._get_current_array(), volume) volume.provider_id = volume.name + '-cinder' model_update = {"replication_status": fields.ReplicationStatus.ENABLED, "metadata": {**volume.metadata, "array_volume_name": volume.provider_id}} elif new_repl_type == REPLICATION_TYPE_SYNC: # Remove from trisync protection group self._disable_trisync_replication( self._get_current_array(), volume ) current_array = self._get_current_array() # Now check if we are retyping to/from a type with volume groups if "/" in self._get_vol_name(volume): source_vg = self._get_vol_name(volume).split('/')[0] if self._get_volume_type_extra_spec(new_type['id'], 'vg_name'): target_vg = self._get_volume_type_extra_spec(new_type['id'], 'vg_name') if source_vg or target_vg: if target_vg: target_vg = INVALID_CHARACTERS.sub("-", target_vg) vg_iops = self._get_volume_type_extra_spec( new_type['id'], 'vg_maxIOPS', default_value=MAX_IOPS) vg_bws = self._get_volume_type_extra_spec( new_type['id'], 'vg_maxBWS', default_value=MAX_BWS) if not (MIN_IOPS <= int(vg_iops) <= MAX_IOPS): msg = (_('vg_maxIOPS QoS error. Must be more than ' '%(min_iops)s and less than %(max_iops)s') % {'min_iops': MIN_IOPS, 'max_iops': MAX_IOPS}) raise exception.InvalidQoSSpecs(message=msg) if not (MIN_BWS <= int(vg_bws) <= MAX_BWS): msg = (_('vg_maxBWS QoS error. Must be more than ' '%(min_bws)s and less than %(max_bws)s') % {'min_bws': MIN_BWS, 'max_bws': MAX_BWS}) raise exception.InvalidQoSSpecs(message=msg) self._create_volume_group_if_not_exist(current_array, target_vg, vg_iops, vg_bws) current_array.patch_volumes( names=[self._get_vol_name(volume)], volume=flasharray.VolumePatch( volume_group=flasharray.Reference( name=target_vg))) vol_name = self._get_vol_name(volume) if source_vg: target_vol_name = (target_vg + "/" + vol_name.split('/')[1]) else: target_vol_name = (target_vg + "/" + vol_name) model_update = { 'id': volume.id, 'provider_id': target_vol_name, 'metadata': {**volume.metadata, 'array_volume_name': target_vol_name, 'array_name': self._array.array_name} } # If we have empied a VG by retyping out of it then delete VG if source_vg: self._delete_vgroup_if_empty(current_array, source_vg) else: current_array.patch_volumes( names=[self._get_vol_name(volume)], volume=flasharray.VolumePatch( volume_group=flasharray.Reference( name=""))) target_vol_name = self._get_vol_name(volume).split('/')[1] model_update = { 'id': volume.id, 'provider_id': target_vol_name, 'metadata': {**volume.metadata, 'array_volume_name': target_vol_name, 'array_name': self._array.array_name} } if source_vg: self._delete_vgroup_if_empty(current_array, source_vg) return True, model_update # If we are moving to a volume type with QoS settings then # make sure the volume gets the correct new QoS settings. # This could mean removing existing QoS settings. qos = self._get_qos_settings(new_type) vol_name = self._generate_purity_vol_name(volume) if qos is not None: self.set_qos(current_array, vol_name, volume["size"], qos) else: current_array.patch_volumes(names=[vol_name], volume=flasharray.VolumePatch( qos=flasharray.Qos( iops_limit=MAX_IOPS, bandwidth_limit=MAX_BWS))) return True, model_update @pure_driver_debug_trace def _disable_async_replication(self, volume): """Disable replication on the given volume.""" current_array = self._get_current_array() vol_name = self._get_vol_name(volume) LOG.debug("Disabling replication for volume %(id)s residing on " "array %(backend_id)s.", {"id": volume["id"], "backend_id": current_array.backend_id}) pgdata = list(current_array.get_protection_groups_volumes( member_names=[vol_name]).items) pgs = [item['group']['name'] for item in pgdata] or None res = current_array.delete_protection_groups_volumes( group_names=pgs, member_names=[self._get_vol_name(volume)]) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_COULD_NOT_BE_FOUND in res.errors[0].message: ctxt.reraise = False LOG.warning("Disable replication on volume failed: " "already disabled: %s", res.errors[0].message) else: LOG.error("Disable replication on volume failed with " "message: %s", res.errors[0].message) @pure_driver_debug_trace def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover to replication target. This function combines calls to failover() and failover_completed() to perform failover when Active/Active is not enabled. """ active_backend_id, volume_update_list, group_update_list = ( self.failover(context, volumes, secondary_id, groups)) self.failover_completed(context, active_backend_id) return active_backend_id, volume_update_list, group_update_list @pure_driver_debug_trace def failover_completed(self, context, active_backend_id=None): """Failover to replication target.""" LOG.info('Driver failover completion started.') current = self._get_current_array() # This should not happen unless we receive the same RPC message twice if active_backend_id == current.backend_id: LOG.info('No need to switch replication backend, already using it') # Manager sets the active_backend to '' when secondary_id was default, # but the driver failover_host method calls us with "default" elif not active_backend_id or active_backend_id == 'default': if self._failed_over_primary_array is not None: LOG.info('Failing back to %s', self._failed_over_primary_array) self._swap_replication_state(current, self._failed_over_primary_array, failback=True) else: LOG.info('Failover not occured - secondary array ' 'cannot be same as primary') else: secondary = self._get_secondary(active_backend_id) LOG.info('Failing over to %s', secondary.backend_id) self._swap_replication_state(current, secondary) LOG.info('Driver failover completion completed.') @pure_driver_debug_trace def failover(self, context, volumes, secondary_id=None, groups=None): """Failover backend to a secondary array This action will not affect the original volumes in any way and it will stay as is. If a subsequent failover is performed we will simply overwrite the original (now unmanaged) volumes. """ if secondary_id == 'default': # We are going back to the 'original' driver config, just put # our current array back to the primary. if self._failed_over_primary_array: # If the "default" and current host are in an ActiveCluster # with volumes stretched between the two then we can put # the sync rep enabled volumes into available states, anything # else will go into an error state pending an admin to check # them and adjust states as appropriate. current_array = self._get_current_array(True) repl_type = current_array.replication_type is_in_ac = bool(repl_type == REPLICATION_TYPE_SYNC) model_updates = [] # We are only given replicated volumes, but any non sync rep # volumes should go into error upon doing a failback as the # async replication is not bi-directional. for vol in volumes: repl_type = self._get_replication_type_from_vol_type( vol.volume_type) if not (is_in_ac and repl_type == REPLICATION_TYPE_SYNC): model_updates.append({ 'volume_id': vol['id'], 'updates': { 'status': 'error', } }) return secondary_id, model_updates, [] else: msg = _('Unable to failback to "default", this can only be ' 'done after a failover has completed.') raise exception.InvalidReplicationTarget(message=msg) current_array = self._get_current_array(True) LOG.debug("Failover replication for array %(primary)s to " "%(secondary)s.", {"primary": current_array.backend_id, "secondary": secondary_id}) if secondary_id == current_array.backend_id: raise exception.InvalidReplicationTarget( reason=_("Secondary id can not be the same as primary array, " "backend_id = %(secondary)s.") % {"secondary": secondary_id} ) secondary_array = None pg_snap = None # used for async only if secondary_id: secondary_array = self._get_secondary(secondary_id) if secondary_array.replication_type in [REPLICATION_TYPE_ASYNC, REPLICATION_TYPE_SYNC]: pg_snap = self._get_latest_replicated_pg_snap( secondary_array, self._get_current_array().array_name, self._replication_pg_name ) else: LOG.debug('No secondary array id specified, checking all targets.') # Favor sync-rep targets options secondary_array = self._find_sync_failover_target() if not secondary_array: # Now look for an async one secondary_array, pg_snap = self._find_async_failover_target() # If we *still* don't have a secondary array it means we couldn't # determine one to use. Stop now. if not secondary_array: raise PureDriverException( reason=_("Unable to find viable secondary array from " "configured targets: %(targets)s.") % {"targets": str(self._replication_target_arrays)} ) LOG.debug("Starting failover from %(primary)s to %(secondary)s", {"primary": current_array.array_name, "secondary": secondary_array.array_name}) model_updates = [] if secondary_array.replication_type == REPLICATION_TYPE_ASYNC: model_updates = self._async_failover_host( volumes, secondary_array, pg_snap) elif secondary_array.replication_type == REPLICATION_TYPE_SYNC: model_updates = self._sync_failover_host(volumes, secondary_array) current_array = self._get_current_array(True) return secondary_array.backend_id, model_updates, [] @pure_driver_debug_trace def set_personality(self, array, host_name, personality): res = array.patch_hosts(names=[host_name], host=flasharray.HostPatch( personality=personality)) if res.status_code == 400: if ERR_MSG_HOST_NOT_EXIST in res.errors[0].message: # If the host disappeared out from under us that's # ok, we will just retry and snag a new host. LOG.debug('Unable to set host personality: %s', res.errors[0].message) raise PureRetryableException() return def _swap_replication_state(self, current_array, secondary_array, failback=False): # After failover we want our current array to be swapped for the # secondary array we just failed over to. self._failed_over_primary_array = current_array # Remove the new primary from our secondary targets if secondary_array in self._replication_target_arrays: self._replication_target_arrays.remove(secondary_array) # For async, if we're doing a failback then add the old primary back # into the replication list if failback: self._replication_target_arrays.append(current_array) self._is_replication_enabled = True self._failed_over_primary_array = None # If its sync rep then swap the two in their lists since it is a # bi-directional setup, if the primary is still OK or comes back # it can continue being used as a secondary target until a 'failback' # occurs. This is primarily important for "uniform" environments with # attachments to both arrays. We may need to adjust flags on the # primary array object to lock it into one type of replication. if secondary_array.replication_type == REPLICATION_TYPE_SYNC: self._is_active_cluster_enabled = True self._is_replication_enabled = True if secondary_array in self._active_cluster_target_arrays: self._active_cluster_target_arrays.remove(secondary_array) current_array.replication_type = REPLICATION_TYPE_SYNC self._replication_target_arrays.append(current_array) self._active_cluster_target_arrays.append(current_array) else: # If the target is not configured for sync rep it means it isn't # part of the ActiveCluster and we need to reflect this in our # capabilities. self._is_active_cluster_enabled = False self._is_replication_enabled = True if secondary_array.uniform: if secondary_array in self._uniform_active_cluster_target_arrays: self._uniform_active_cluster_target_arrays.remove( secondary_array) current_array.uniform = True self._uniform_active_cluster_target_arrays.append(current_array) self._set_current_array(secondary_array) def _does_pgroup_exist(self, array, pgroup_name): """Return True/False""" pgroupres = array.get_protection_groups( names=[pgroup_name]) if pgroupres.status_code == 200: return True else: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_NOT_EXIST in pgroupres.errors[0].message: ctxt.reraise = False return False # Any unexpected exception to be handled by caller. @pure_driver_debug_trace @utils.retry(PureDriverException, REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL, REPL_SETTINGS_PROPAGATE_MAX_RETRIES) def _wait_until_target_group_setting_propagates( self, target_array, pgroup_name_on_target): # Wait for pgroup to show up on target array. if self._does_pgroup_exist(target_array, pgroup_name_on_target): return else: raise PureDriverException(message=_('Protection Group not ready.')) @pure_driver_debug_trace @utils.retry(PureDriverException, REPL_SETTINGS_PROPAGATE_RETRY_INTERVAL, REPL_SETTINGS_PROPAGATE_MAX_RETRIES) def _wait_until_source_array_allowed(self, source_array, pgroup_name): result = list(source_array.get_protection_groups_targets( group_names=[pgroup_name]).items)[0] if result.allowed: return else: raise PureDriverException(message=_('Replication not ' 'allowed yet.')) def _get_pgroup_name_on_target(self, source_array_name, pgroup_name): return "%s:%s" % (source_array_name, pgroup_name) @pure_driver_debug_trace def _setup_replicated_pods(self, primary, ac_secondaries, pod_name): # Make sure the pod exists self._create_pod_if_not_exist(primary, pod_name) # Stretch it across arrays we have configured, assume all secondary # arrays given to this method are configured for sync rep with active # cluster enabled. for target_array in ac_secondaries: res = primary.post_pods_arrays( group_names=[pod_name], member_names=[target_array.array_name]) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ( ERR_MSG_ALREADY_EXISTS in res.errors[0].message or ERR_MSG_ARRAY_LIMIT in res.errors[0].message ): ctxt.reraise = False LOG.info("Skipping add array %(target_array)s to pod" " %(pod_name)s since it's already added.", {"target_array": target_array.array_name, "pod_name": pod_name}) @pure_driver_debug_trace def _setup_replicated_pgroups(self, primary, secondaries, pg_name, replication_interval, retention_policy): self._create_protection_group_if_not_exist( primary, pg_name) # Apply retention policies to a protection group. # These retention policies will be applied on the replicated # snapshots on the target array. primary.patch_protection_groups( names=[pg_name], protection_group=flasharray.ProtectionGroup( target_retention=retention_policy)) # Configure replication propagation frequency on a # protection group. primary.patch_protection_groups( names=[pg_name], protection_group=flasharray.ProtectionGroup( replication_schedule=flasharray.ReplicationSchedule( frequency=replication_interval))) for target_array in secondaries: # Configure PG to replicate to target_array. res = primary.post_protection_groups_targets( group_names=[pg_name], member_names=[target_array.array_name]) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_ALREADY_INCLUDES in res.errors[0].message: ctxt.reraise = False LOG.info("Skipping add target %(target_array)s" " to protection group %(pgname)s" " since it's already added.", {"target_array": target_array.array_name, "pgname": pg_name}) # Wait until "Target Group" setting propagates to target_array. pgroup_name_on_target = self._get_pgroup_name_on_target( primary.array_name, pg_name) if self._is_trisync_enabled: pgroup_name_on_target = pg_name.replace("::", ":") for target_array in secondaries: self._wait_until_target_group_setting_propagates( target_array, pgroup_name_on_target) # Configure the target_array to allow replication from the # PG on source_array. res = target_array.patch_protection_groups_targets( group_names=[pgroup_name_on_target], target=flasharray.TargetProtectionGroupPostPatch( allowed=True)) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_ALREADY_ALLOWED in res.errors[0].message: ctxt.reraise = False LOG.info("Skipping allow pgroup %(pgname)s on " "target array %(target_array)s since " "it is already allowed.", {"pgname": pg_name, "target_array": target_array.array_name}) # Wait until source array acknowledges previous operation. self._wait_until_source_array_allowed(primary, pg_name) # Start replication on the PG. primary.patch_protection_groups( names=[pg_name], protection_group=flasharray.ProtectionGroup( replication_schedule=flasharray.ReplicationSchedule( enabled=True))) @pure_driver_debug_trace def _generate_replication_retention(self): """Generates replication retention settings in Purity compatible format An example of the settings: target_all_for = 14400 (i.e. 4 hours) target_per_day = 6 target_days = 4 The settings above configure the target array to retain 4 hours of the most recent snapshots. After the most recent 4 hours, the target will choose 4 snapshots per day from the previous 6 days for retention :return: a dictionary representing replication retention settings """ replication_retention = flasharray.RetentionPolicy( all_for_sec=self._replication_retention_short_term, per_day=self._replication_retention_long_term_per_day, days=self._replication_retention_long_term ) return replication_retention @pure_driver_debug_trace def _get_latest_replicated_pg_snap(self, target_array, source_array_name, pgroup_name): # Get all protection group snapshots where replication has completed. # Sort into reverse order to get the latest. snap_name = "%s:%s" % (source_array_name, pgroup_name) LOG.debug("Looking for snap %(snap)s on array id %(array_id)s", {"snap": snap_name, "array_id": target_array.array_id}) try: pg_snaps = list( target_array.get_protection_group_snapshots_transfer( names=[snap_name], destroyed=False, filter='progress="1.0"', sort=["started-"]).items) pg_snap = pg_snaps[0] if pg_snaps else None except AttributeError: pg_snap = None LOG.debug("Selecting snapshot %(pg_snap)s for failover.", {"pg_snap": pg_snap}) return pg_snap @pure_driver_debug_trace def _create_pod_if_not_exist(self, source_array, name): if not name: raise PureDriverException( reason=_("Empty string passed for Pod name.")) res = source_array.post_pods(names=[name], pod=flasharray.PodPost()) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_ALREADY_EXISTS in res.errors[0].message: # Happens if the pod already exists ctxt.reraise = False LOG.warning("Skipping creation of pod %s since it " "already exists.", name) return if list(source_array.get_pods( names=[name]).items)[0].destroyed: ctxt.reraise = False LOG.warning("Pod %s is deleted but not" " eradicated - will recreate.", name) source_array.delete_pods(names=[name]) self._create_pod_if_not_exist(source_array, name) else: if self._array.safemode: # Now we check to ensure that the created pod does not have a # safemode protection group attached to it as this is not # supported by Cinder safemode_pg = list( source_array.get_container_default_protections( names=[name]).items)[0].default_protections if safemode_pg: pgname = safemode_pg[0].name res = source_array.patch_container_default_protections( names=[name], container_default_protection=( flasharray.ContainerDefaultProtection( default_protections=[]))) if res.status_code != 200: LOG.warning("Failed to remove Default Protection " "Container: %s", res.errors[0]) else: source_array.patch_protection_groups( names=[pgname], protection_group=flasharray.ProtectionGroup( destroyed=True)) source_array.delete_protection_groups( names=[pgname]) @pure_driver_debug_trace def _create_volume_group_if_not_exist(self, source_array, vgname, vg_iops, vg_bws): res = source_array.post_volume_groups( names=[vgname], volume_group=flasharray.VolumeGroupPost( qos=flasharray.Qos( bandwidth_limit=vg_bws, iops_limit=vg_iops))) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_ALREADY_EXISTS in res.errors[0].message: # Happens if the vg already exists ctxt.reraise = False LOG.warning("Skipping creation of vg %s since it " "already exists. Resetting QoS", vgname) res = source_array.patch_volume_groups( names=[vgname], volume_group=flasharray.VolumeGroupPatch( qos=flasharray.Qos( bandwidth_limit=vg_bws, iops_limit=vg_iops))) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_NOT_EXIST in res.errors[0].message: ctxt.reraise = False LOG.warning("Unable to change %(vgroup)s QoS, " "error message: %(error)s", {"vgroup": vgname, "error": res.errors[0].message}) return if list(source_array.get_volume_groups( names=[vgname]).items)[0].destroyed: ctxt.reraise = False LOG.warning("Volume group %s is deleted but not" " eradicated - will recreate.", vgname) source_array.delete_volume_groups(names=[vgname]) self._create_volume_group_if_not_exist(source_array, vgname, vg_iops, vg_bws) @pure_driver_debug_trace def _create_protection_group_if_not_exist(self, source_array, pgname): if not pgname: raise PureDriverException( reason=_("Empty string passed for PG name.")) res = source_array.post_protection_groups(names=[pgname]) if res.status_code == 400: with excutils.save_and_reraise_exception() as ctxt: if ERR_MSG_ALREADY_EXISTS in res.errors[0].message: # Happens if the PG already exists ctxt.reraise = False LOG.warning("Skipping creation of PG %s since it " "already exists.", pgname) # We assume PG has already been setup with correct # replication settings. return if list(source_array.get_protection_groups( names=[pgname]).items)[0].destroyed: ctxt.reraise = False LOG.warning("Protection group %s is deleted but not" " eradicated - will recreate.", pgname) source_array.delete_protection_groups(names=[pgname]) self._create_protection_group_if_not_exist(source_array, pgname) def _find_async_failover_target(self): if not self._replication_target_arrays: raise PureDriverException( reason=_("Unable to find failover target, no " "secondary targets configured.")) secondary_array = None pg_snap = None for array in self._replication_target_arrays: if array.replication_type != REPLICATION_TYPE_ASYNC: continue try: secondary_array = array pg_snap = self._get_latest_replicated_pg_snap( secondary_array, self._get_current_array().array_name, self._replication_pg_name ) if pg_snap: break except Exception: LOG.exception('Error finding replicated pg snapshot ' 'on %(secondary)s.', {'secondary': array.backend_id}) secondary_array = None if not pg_snap: raise PureDriverException( reason=_("Unable to find viable pg snapshot to use for " "failover on selected secondary array: %(id)s.") % {"id": secondary_array.backend_id if secondary_array else None} ) return secondary_array, pg_snap def _get_secondary(self, secondary_id): for array in self._replication_target_arrays: if array.backend_id == secondary_id: return array raise exception.InvalidReplicationTarget( reason=_("Unable to determine secondary_array from" " supplied secondary: %(secondary)s.") % {"secondary": secondary_id} ) def _find_sync_failover_target(self): secondary_array = None if not self._active_cluster_target_arrays: LOG.warning("Unable to find failover target, no " "sync rep secondary targets configured.") return secondary_array for array in self._active_cluster_target_arrays: secondary_array = array # Ensure the pod is in a good state on the array res = secondary_array.get_pods( names=[self._replication_pod_name]) if res.status_code == 200: pod_info = list(res.items)[0] for pod_array in range(0, len(pod_info.arrays)): # Compare against Purity ID's if pod_info.arrays[pod_array].id == \ secondary_array.array_id: if pod_info.arrays[pod_array].status == "online": # Success! Use this array. break else: secondary_array = None else: LOG.warning("Failed to get pod status for secondary array " "%(id)s: %(err)s", { "id": secondary_array.backend_id, "err": res.errors[0].message, }) secondary_array = None return secondary_array def _async_failover_host(self, volumes, secondary_array, pg_snap): # Try to copy the flasharray as close as we can. secondary_info = list(secondary_array.get_arrays().items)[0] if version.parse(secondary_info.version) < version.parse('6.3.4'): secondary_safemode = False else: secondary_safemode = True volume_snaps = list(secondary_array.get_volume_snapshots( filter="name='" + pg_snap.name + ".*'" ).items) # We only care about volumes that are in the list we are given. vol_names = set() for vol in volumes: vol_names.add(self._get_vol_name(vol)) for snap in range(0, len(volume_snaps)): vol_name = volume_snaps[snap].name.split('.')[-1] if vol_name in vol_names: vol_names.remove(vol_name) LOG.debug('Creating volume %(vol)s from replicated snapshot ' '%(snap)s', {'vol': vol_name, 'snap': volume_snaps[snap].name}) if "/" in vol_name: # We have to create the target vgroup with assosiated QoS vg_iops = self._get_volume_type_extra_spec( vol.volume_type_id, 'vg_maxIOPS', default_value=MAX_IOPS) vg_bws = self._get_volume_type_extra_spec( vol.volume_type_id, 'vg_maxBWS', default_value=MAX_BWS) self._create_volume_group_if_not_exist( secondary_array, vol_name.split("/")[0], int(vg_iops), int(vg_bws)) if secondary_safemode: secondary_array.post_volumes( with_default_protection=False, volume=flasharray.VolumePost( source=flasharray.Reference( name=volume_snaps[snap].name) ), names=[vol_name], overwrite=True) else: secondary_array.post_volumes( volume=flasharray.VolumePost( source=flasharray.Reference( name=volume_snaps[snap].name) ), names=[vol_name], overwrite=True) else: LOG.debug('Ignoring unmanaged volume %(vol)s from replicated ' 'snapshot %(snap)s.', {'vol': vol_name, 'snap': volume_snaps[snap].name}) # The only volumes remaining in the vol_names set have been left behind # on the array and should be considered as being in an error state. model_updates = [] for vol in volumes: if self._get_vol_name(vol) in vol_names: model_updates.append({ 'volume_id': vol['id'], 'updates': { 'status': 'error', } }) else: repl_status = fields.ReplicationStatus.FAILED_OVER model_updates.append({ 'volume_id': vol['id'], 'updates': { 'replication_status': repl_status, } }) return model_updates def _sync_failover_host(self, volumes, secondary_array): """Perform a failover for hosts in an ActiveCluster setup There isn't actually anything that needs to be changed, only update the volume status to distinguish the survivors.. """ array_volumes = list(secondary_array.get_volumes( filter="pod.name='" + self._replication_pod_name + "'").items) replicated_vol_names = set() for vol in array_volumes: replicated_vol_names.add(vol.name) model_updates = [] for vol in volumes: if self._get_vol_name(vol) not in replicated_vol_names: model_updates.append({ 'volume_id': vol['id'], 'updates': { 'status': fields.VolumeStatus.ERROR, } }) else: repl_status = fields.ReplicationStatus.FAILED_OVER model_updates.append({ 'volume_id': vol['id'], 'updates': { 'replication_status': repl_status, } }) return model_updates def _get_wwn(self, pure_vol_name): """Return the WWN based on the volume's serial number The WWN is composed of the constant '36', the OUI for Pure, followed by '0', and finally the serial number. """ array = self._get_current_array() volume_info = list(array.get_volumes(names=[pure_vol_name]).items)[0] wwn = '3624a9370' + volume_info.serial return wwn.lower() def _get_current_array(self, init=False): if (not init and self._is_active_cluster_enabled and not self._failed_over_primary_array): res = self._array.get_pods(names=[self._replication_pod_name]) if res.status_code == 200: pod_info = list(res.items)[0] for target_array in self._active_cluster_target_arrays: LOG.info("Checking target array %s...", target_array.array_name) status_ok = False for pod_array in range(0, len(pod_info.arrays)): if pod_info.arrays[pod_array].id == \ target_array.array_id: if pod_info.arrays[pod_array].status == \ 'online': status_ok = True break if not status_ok: LOG.warning("Target array is offline. Volume " "replication in unknown state. Check " "replication links and array state.") else: LOG.warning("self.get_pod failed with" " message: %(msg)s", {"msg": res.errors[0].message}) raise PureDriverException( reason=_("No functional arrays available")) return self._array def _set_current_array(self, array): self._array = array @pure_driver_debug_trace def _get_valid_ports(self, array): ports = [] res = array.get_controllers(filter="status='ready'") if res.status_code != 200: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False LOG.warning("No live controllers found: %s", res.errors[0]) return ports else: live_controllers = list(res.items) if len(live_controllers) != 0: controllers = [controller.name for controller in live_controllers] for controller in controllers: ports += list( array.get_ports(filter="name='" + controller + ".*'").items ) lacps = list( array.get_network_interfaces( filter=("eth.subtype='lacp_bond'" or "eth.subtype='vif'") ).items ) if lacps: for lacp in range(0, len(lacps)): ports += list( array.get_ports( names=[lacps[lacp].name.upper()] ).items ) return ports @pure_driver_debug_trace def _untag_volume(self, volume_name): array = self._get_current_array() array.delete_volumes_tags(namespace=[TAG_NAMESPACE], resource_names=[volume_name]) @pure_driver_debug_trace def _tag_volume( self, volume_name: str, instance=_INSTANCE_SENTINEL, vol_type=_VOLTYPE_SENTINEL, project=_PROJECT_SENTINEL, namespace: str = TAG_NAMESPACE, data_store: str = "Direct Access", ): """Attach a batch of tags to a volume. :param array: flasharray client/connection :param volume_name: name of volume to tag :param instance: VM ID value :param vol_type: Volume type value :param namespace: Tag namespace (default from constant) :param data_store: Value for "DataStoreType" tag :return: Response from put_volumes_tags_batch() Only include VolType if vol_type is explicitly provided. Passing vol_type=None will set the tag's value to None; omitting vol_type entirely leaves VolType unchanged. """ array = self._get_current_array() pairs = [] # Handle instance taif instance is not _instance_sentinel: if instance is not _INSTANCE_SENTINEL: pairs.append(("VmId", instance)) # Handle project tag if project is not _PROJECT_SENTINEL: pairs.append(("ProjectId", project)) # Handle vol_type tag if vol_type is not _VOLTYPE_SENTINEL: pairs.append(("VolType", vol_type)) # Always include DataStoreType pairs.append(("DataStoreType", data_store)) tags = [ flasharray.TagBatch(key=k, value=v, namespace=namespace, copyable=False) for k, v in pairs ] LOG.debug("tags: %s", tags) return array.put_volumes_tags_batch(tag=tags, resource_names=[volume_name]) def _get_attachments(self, volume): context = volume._context volume_obj = objects.Volume.get_by_id(context, volume.id) vol_type = "Boot" if volume_obj["bootable"] else "Data" attachments = volume_obj.volume_attachment instance_id = None for attachment in attachments: if attachment.get("instance_uuid"): instance_id = attachment["instance_uuid"] break return vol_type, instance_id @interface.volumedriver class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver): """OpenStack Volume Driver to support Pure Storage FlashArray. This version of the driver enables the use of iSCSI for the underlying storage connectivity with the FlashArray. """ VERSION = "21.0.iscsi" def __init__(self, *args, **kwargs): execute = kwargs.pop("execute", utils.execute) super(PureISCSIDriver, self).__init__(execute=execute, *args, **kwargs) self._storage_protocol = constants.ISCSI def _get_host(self, array, connector, remote=False): """Return dict describing existing Purity host object or None.""" if remote: hosts = list( getattr( array.get_hosts( filter="iqns='" + connector["initiator"] + "' and not is_local" ), "items", [] ) ) else: hosts = list( getattr( array.get_hosts( filter="iqns='" + connector["initiator"] + "' and is_local" ), "items", [] ) ) return hosts @pure_driver_debug_trace def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" pure_vol_name = self._get_vol_name(volume) target_arrays = [self._get_current_array()] if (self._is_vol_in_pod(pure_vol_name) and self._is_active_cluster_enabled and not self._failed_over_primary_array): target_arrays += self._uniform_active_cluster_target_arrays vol_type, instance_id = self._get_attachments(volume) if instance_id: tags = self._tag_volume(volume_name=pure_vol_name, vol_type=vol_type, instance=instance_id, project=volume.project_id) else: tags = self._tag_volume(volume_name=pure_vol_name, vol_type=vol_type, project=volume.project_id) LOG.debug("Volume tags added: %s", tags) chap_username = None chap_password = None if self.configuration.use_chap_auth: (chap_username, chap_password) = self._get_chap_credentials( connector['host'], connector["initiator"]) targets = [] for array in target_arrays: connection = self._connect(array, pure_vol_name, connector, chap_username, chap_password) if not connection[0]['lun']: # Swallow any exception, just warn and continue LOG.warning("self._connect failed.") continue target_ports = self._get_target_iscsi_ports(array) targets.append({ "connection": connection, "ports": target_ports, }) properties = self._build_connection_properties(targets) properties["data"]["wwn"] = self._get_wwn(pure_vol_name) if self.configuration.use_chap_auth: properties["data"]["auth_method"] = "CHAP" properties["data"]["auth_username"] = chap_username properties["data"]["auth_password"] = chap_password return properties def _build_connection_properties(self, targets): props = { "driver_volume_type": "iscsi", "data": { "target_discovered": False, "discard": True, "addressing_mode": brick_constants.SCSI_ADDRESSING_SAM2, }, } if self.configuration.pure_iscsi_cidr_list: iscsi_cidrs = self.configuration.pure_iscsi_cidr_list if self.configuration.pure_iscsi_cidr != "0.0.0.0/0": LOG.warning("pure_iscsi_cidr was ignored as " "pure_iscsi_cidr_list is set") else: iscsi_cidrs = [self.configuration.pure_iscsi_cidr] check_iscsi_cidrs = [ ipaddress.ip_network(item) for item in iscsi_cidrs ] target_luns = [] target_iqns = [] target_portals = [] # Aggregate all targets together if they're in the allowed CIDR. We may # end up with different LUNs for different target iqn/portal sets (ie. # it could be a unique LUN for each FlashArray) for target in range(0, len(targets)): port_iter = iter(targets[target]["ports"]) for port in port_iter: # Check to ensure that the portal IP is in the iSCSI target # CIDR before adding it target_portal = port.portal portal, p_port = target_portal.rsplit(':', 1) portal = portal.strip('[]') check_ip = ipaddress.ip_address(portal) for check_cidr in check_iscsi_cidrs: if check_ip in check_cidr: target_luns.append( targets[target]["connection"][0].lun) target_iqns.append(port.iqn) target_portals.append(target_portal) LOG.info("iSCSI target portals that match CIDR range: '%s'", target_portals) LOG.info("iSCSI target IQNs that match CIDR range: '%s'", target_iqns) # If we have multiple ports always report them. if target_luns and target_iqns and target_portals: props["data"]["target_luns"] = target_luns props["data"]["target_iqns"] = target_iqns props["data"]["target_portals"] = target_portals return props def _get_target_iscsi_ports(self, array): """Return list of iSCSI-enabled port descriptions.""" ports = self._get_valid_ports(array) iscsi_ports = [port for port in ports if getattr(port, "iqn", None)] if not iscsi_ports: raise PureDriverException( reason=_("No iSCSI-enabled ports on target array.")) return iscsi_ports @staticmethod def _generate_chap_secret(): return volume_utils.generate_password() def _get_chap_secret_from_init_data(self, initiator): data = self.driver_utils.get_driver_initiator_data(initiator) if data: for d in data: if d["key"] == CHAP_SECRET_KEY: return d["value"] return None def _get_chap_credentials(self, host, initiator): username = host password = self._get_chap_secret_from_init_data(initiator) if not password: password = self._generate_chap_secret() success = self.driver_utils.insert_driver_initiator_data( initiator, CHAP_SECRET_KEY, password) if not success: # The only reason the save would have failed is if someone # else (read: another thread/instance of the driver) set # one before we did. In that case just do another query. password = self._get_chap_secret_from_init_data(initiator) return username, password @utils.retry(PureRetryableException, retries=HOST_CREATE_MAX_RETRIES) def _connect(self, array, vol_name, connector, chap_username, chap_password): """Connect the host and volume; return dict describing connection.""" iqn = connector["initiator"] hosts = self._get_host(array, connector, remote=False) host = hosts[0] if len(hosts) > 0 else None if host: host_name = host.name LOG.info("Re-using existing purity host %(host_name)r", {"host_name": host_name}) if self.configuration.use_chap_auth: if not GENERATED_NAME.match(host_name): LOG.error("Purity host %(host_name)s is not managed " "by Cinder and can't have CHAP credentials " "modified. Remove IQN %(iqn)s from the host " "to resolve this issue.", {"host_name": host_name, "iqn": connector["initiator"]}) raise PureDriverException( reason=_("Unable to re-use a host that is not " "managed by Cinder with use_chap_auth=True,")) elif chap_username is None or chap_password is None: LOG.error("Purity host %(host_name)s is managed by " "Cinder but CHAP credentials could not be " "retrieved from the Cinder database.", {"host_name": host_name}) raise PureDriverException( reason=_("Unable to re-use host with unknown CHAP " "credentials configured.")) else: personality = self.configuration.safe_get('pure_host_personality') host_name = self._generate_purity_host_name(connector) LOG.info("Creating host object %(host_name)r with IQN:" " %(iqn)s.", {"host_name": host_name, "iqn": iqn}) res = array.post_hosts(names=[host_name], host=flasharray.HostPost(iqns=[iqn])) if res.status_code == 400: if (ERR_MSG_ALREADY_EXISTS in res.errors[0].message or ERR_MSG_ALREADY_IN_USE in res.errors[0].message): # If someone created it before we could just retry, we will # pick up the new host. LOG.debug('Unable to create host: %s', res.errors[0].message) raise PureRetryableException() if personality: self.set_personality(array, host_name, personality) if self.configuration.use_chap_auth: res = array.patch_hosts(names=[host_name], host=flasharray.HostPatch( chap=flasharray.Chap( host_user=chap_username, host_password=chap_password))) if (res.status_code == 400 and ERR_MSG_HOST_NOT_EXIST in res.errors[0].message): # If the host disappeared out from under us that's ok, # we will just retry and snag a new host. LOG.debug('Unable to set CHAP info: %s', res.errors[0].message) raise PureRetryableException() # TODO: Ensure that the host has the correct preferred # arrays configured for it. connection = self._connect_host_to_vol(array, host_name, vol_name) return connection @interface.volumedriver class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver): """OpenStack Volume Driver to support Pure Storage FlashArray. This version of the driver enables the use of Fibre Channel for the underlying storage connectivity with the FlashArray. It fully supports the Cinder Fibre Channel Zone Manager. """ VERSION = "21.0.fc" def __init__(self, *args, **kwargs): execute = kwargs.pop("execute", utils.execute) super(PureFCDriver, self).__init__(execute=execute, *args, **kwargs) self._storage_protocol = constants.FC self._lookup_service = fczm_utils.create_lookup_service() def _get_host(self, array, connector, remote=False): """Return dict describing existing Purity host object or None.""" if remote: for wwn in connector["wwpns"]: hosts = list( getattr( array.get_hosts( filter="wwns='" + wwn.upper() + "' and not is_local" ), "items", [] ) ) else: for wwn in connector["wwpns"]: hosts = list( getattr( array.get_hosts( filter="wwns='" + wwn.upper() + "' and is_local" ), "items", [] ) ) return hosts def _get_array_wwns(self, array): """Return list of wwns from the array Ensure that only true scsi FC ports are selected and not any that are enabled for NVMe-based FC with an associated NQN. """ ports = self._get_valid_ports(array) valid_ports = [port.wwn.replace(":", "") for port in ports if getattr( port, "wwn", None) and not getattr(port, "nqn", None)] return valid_ports @pure_driver_debug_trace def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" pure_vol_name = self._get_vol_name(volume) target_arrays = [self._get_current_array()] if (self._is_vol_in_pod(pure_vol_name) and self._is_active_cluster_enabled and not self._failed_over_primary_array): target_arrays += self._uniform_active_cluster_target_arrays vol_type, instance_id = self._get_attachments(volume) if instance_id: tags = self._tag_volume(volume_name=pure_vol_name, vol_type=vol_type, instance=instance_id, project=volume.project_id) else: tags = self._tag_volume(volume_name=pure_vol_name, vol_type=vol_type, project=volume.project_id) LOG.debug("Volume tags added: %s", tags) target_luns = [] target_wwns = [] for array in target_arrays: connection = self._connect(array, pure_vol_name, connector) if not connection[0].lun: # Swallow any exception, just warn and continue LOG.warning("self._connect failed.") continue array_wwns = self._get_array_wwns(array) for wwn in array_wwns: target_wwns.append(wwn) target_luns.append(connection[0].lun) # Build the zoning map based on *all* wwns, this could be multiple # arrays connecting to the same host with a stretched volume. init_targ_map = self._build_initiator_target_map(target_wwns, connector) properties = { "driver_volume_type": "fibre_channel", "data": { "target_discovered": True, "target_lun": target_luns[0], # For backwards compatibility "target_luns": target_luns, "target_wwn": target_wwns, "target_wwns": target_wwns, "initiator_target_map": init_targ_map, "discard": True, "addressing_mode": brick_constants.SCSI_ADDRESSING_SAM2, } } properties["data"]["wwn"] = self._get_wwn(pure_vol_name) fczm_utils.add_fc_zone(properties) return properties @utils.retry(PureRetryableException, retries=HOST_CREATE_MAX_RETRIES) def _connect(self, array, vol_name, connector): """Connect the host and volume; return dict describing connection.""" wwns = connector["wwpns"] hosts = self._get_host(array, connector, remote=False) host = hosts[0] if len(hosts) > 0 else None if host: host_name = host.name LOG.info("Re-using existing purity host %(host_name)r", {"host_name": host_name}) else: personality = self.configuration.safe_get('pure_host_personality') host_name = self._generate_purity_host_name(connector) LOG.info("Creating host object %(host_name)r with WWN:" " %(wwn)s.", {"host_name": host_name, "wwn": wwns}) res = array.post_hosts(names=[host_name], host=flasharray.HostPost(wwns=wwns)) if (res.status_code == 400 and (ERR_MSG_ALREADY_EXISTS in res.errors[0].message or ERR_MSG_ALREADY_IN_USE in res.errors[0].message)): # If someone created it before we could just retry, we will # pick up the new host. LOG.debug('Unable to create host: %s', res.errors[0].message) raise PureRetryableException() if personality: self.set_personality(array, host_name, personality) # TODO: Ensure that the host has the correct preferred # arrays configured for it. return self._connect_host_to_vol(array, host_name, vol_name) def _build_initiator_target_map(self, target_wwns, connector): """Build the target_wwns and the initiator target map.""" init_targ_map = {} if self._lookup_service: # use FC san lookup to determine which NSPs to use # for the new VLUN. dev_map = self._lookup_service.get_device_mapping_from_network( connector['wwpns'], target_wwns) for fabric_name in dev_map: fabric = dev_map[fabric_name] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) else: init_targ_map = dict.fromkeys(connector["wwpns"], target_wwns) return init_targ_map @pure_driver_debug_trace def terminate_connection(self, volume, connector, **kwargs): """Terminate connection.""" vol_name = self._get_vol_name(volume) # None `connector` indicates force detach, then delete all even # if the volume is multi-attached. multiattach = (connector is not None and self._is_multiattach_to_host(volume.volume_attachment, connector["host"])) unused_wwns = [] if self._is_vol_in_pod(vol_name): # Try to disconnect from each host, they may not be online though # so if they fail don't cause a problem. for array in self._uniform_active_cluster_target_arrays: no_more_connections = self._disconnect( array, volume, connector, remove_remote_hosts=True, is_multiattach=multiattach) if no_more_connections: unused_wwns += self._get_array_wwns(array) # Now disconnect from the current array, removing any left over # remote hosts that we maybe couldn't reach. current_array = self._get_current_array() no_more_connections = self._disconnect(current_array, volume, connector, remove_remote_hosts=False, is_multiattach=multiattach) if no_more_connections: unused_wwns += self._get_array_wwns(current_array) properties = {"driver_volume_type": "fibre_channel", "data": {}} if len(unused_wwns) > 0: init_targ_map = self._build_initiator_target_map(unused_wwns, connector) properties["data"] = {"target_wwn": unused_wwns, "initiator_target_map": init_targ_map} fczm_utils.remove_fc_zone(properties) return properties @interface.volumedriver class PureNVMEDriver(PureBaseVolumeDriver, driver.BaseVD): """OpenStack Volume Driver to support Pure Storage FlashArray. This version of the driver enables the use of NVMe over different transport types for the underlying storage connectivity with the FlashArray. """ VERSION = "21.0.nvme" def __init__(self, *args, **kwargs): execute = kwargs.pop("execute", utils.execute) super(PureNVMEDriver, self).__init__(execute=execute, *args, **kwargs) if self.configuration.pure_nvme_transport == "roce": self.transport_type = "rdma" self._storage_protocol = constants.NVMEOF_ROCE else: self.transport_type = "tcp" self._storage_protocol = constants.NVMEOF_TCP def _get_nguid(self, pure_vol_name): """Return the NGUID based on the volume's serial number The NGUID is constructed from the volume serial number and 3 octet OUI // octet 0: padding // octets 1 - 7: first 7 octets of volume serial number // octets 8 - 10: 3 octet OUI (24a937) // octets 11 - 15: last 5 octets of volume serial number """ array = self._get_current_array() volume_info = list(array.get_volumes(names=[pure_vol_name]).items)[0] nguid = ("00" + volume_info.serial[0:14] + "24a937" + volume_info.serial[-10:]) return nguid.lower() def _get_host(self, array, connector, remote=False): """Return a list of dicts describing existing host objects or None.""" if remote: hosts = list( getattr( array.get_hosts( filter="nqns='" + connector["nqn"] + "' and not is_local" ), "items", [] ) ) else: hosts = list( getattr( array.get_hosts( filter="nqns='" + connector["nqn"] + "' and is_local" ), "items", [] ) ) return hosts @pure_driver_debug_trace def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" pure_vol_name = self._get_vol_name(volume) target_arrays = [self._get_current_array()] if ( self._is_vol_in_pod(pure_vol_name) and self._is_active_cluster_enabled and not self._failed_over_primary_array ): target_arrays += self._uniform_active_cluster_target_arrays vol_type, instance_id = self._get_attachments(volume) if instance_id: tags = self._tag_volume(volume_name=pure_vol_name, vol_type=vol_type, instance=instance_id, project=volume.project_id) else: tags = self._tag_volume(volume_name=pure_vol_name, vol_type=vol_type, project=volume.project_id) LOG.debug("Volume tags added: %s", tags) targets = [] for array in target_arrays: connection = self._connect(array, pure_vol_name, connector) array_info = list(self._array.get_arrays().items)[0] # Minimum NVMe-TCP support is 6.4.2, but at 6.6.0 Purity # changes from using LUN to NSID if version.parse(array_info.version) < version.parse( '6.6.0' ): if not connection[0].lun: # Swallow any exception, just warn and continue LOG.warning("self._connect failed.") continue else: if not connection[0].nsid: # Swallow any exception, just warn and continue LOG.warning("self._connect failed.") continue target_ports = self._get_target_nvme_ports(array) targets.append( { "connection": connection, "ports": target_ports, } ) properties = self._build_connection_properties(targets) properties["data"]["volume_nguid"] = self._get_nguid(pure_vol_name) return properties def _build_connection_properties(self, targets): props = { "driver_volume_type": "nvmeof", "data": { "discard": True, }, } if self.configuration.pure_nvme_cidr_list: nvme_cidrs = self.configuration.pure_nvme_cidr_list if self.configuration.pure_nvme_cidr != "0.0.0.0/0": LOG.warning( "pure_nvme_cidr was ignored as " "pure_nvme_cidr_list is set" ) else: nvme_cidrs = [self.configuration.pure_nvme_cidr] check_nvme_cidrs = [ ipaddress.ip_network(item) for item in nvme_cidrs ] target_luns = [] target_nqns = [] target_portals = [] array_info = list(self._array.get_arrays().items)[0] # Aggregate all targets together, we may end up with different # namespaces for different target nqn/subsys sets (ie. it could # be a unique namespace for each FlashArray) for target in range(0, len(targets)): for port in targets[target]["ports"]: # Check to ensure that the portal IP is in the NVMe target # CIDR before adding it target_portal = port.portal if target_portal and port.nqn: portal, p_port = target_portal.rsplit(':', 1) portal = portal.strip("[]") check_ip = ipaddress.ip_address(portal) for check_cidr in check_nvme_cidrs: if check_ip in check_cidr: # Minimum NVMe-TCP support is 6.4.2, # but at 6.6.0 Purity changes from using LUN to # NSID if version.parse( array_info.version ) < version.parse("6.6.0"): target_luns.append( targets[target]["connection"][0].lun) else: target_luns.append( targets[target]["connection"][0].nsid) target_nqns.append(port.nqn) target_portals.append( (portal, NVME_PORT, self.transport_type) ) LOG.debug( "NVMe target portals that match CIDR range: '%s'", target_portals ) # If we have multiple ports always report them. if target_luns and target_nqns: props["data"]["portals"] = target_portals props["data"]["target_nqn"] = target_nqns[0] else: raise PureDriverException( reason=_("No approrpiate nvme ports on target array.") ) return props def _get_target_nvme_ports(self, array): """Return list of correct nvme-enabled port descriptions.""" ports = self._get_valid_ports(array) valid_nvme_ports = [] nvme_ports = [port for port in ports if getattr(port, "nqn", None)] for port in range(0, len(nvme_ports)): port_detail = list(array.get_network_interfaces( names=[nvme_ports[port].name.lower()] ).items)[0] if hasattr(port_detail.eth, "address") and ( port_detail.services[0] == "nvme-" + self.configuration.pure_nvme_transport): valid_nvme_ports.append(nvme_ports[port]) if not nvme_ports: raise PureDriverException( reason=_("No %(type)s enabled ports on target array.") % {"type": self._storage_protocol} ) return valid_nvme_ports @utils.retry(PureRetryableException, retries=HOST_CREATE_MAX_RETRIES) def _connect(self, array, vol_name, connector): """Connect the host and volume; return dict describing connection.""" nqn = connector["nqn"] hosts = self._get_host(array, connector, remote=False) host = hosts[0] if len(hosts) > 0 else None if host: host_name = host.name LOG.info( "Re-using existing purity host %(host_name)r", {"host_name": host_name}, ) else: personality = self.configuration.safe_get('pure_host_personality') host_name = self._generate_purity_host_name(connector) LOG.info( "Creating host object %(host_name)r with NQN:" " %(nqn)s.", {"host_name": host_name, "nqn": connector["nqn"]}, ) res = array.post_hosts(names=[host_name], host=flasharray.HostPost(nqns=[nqn])) if res.status_code == 400 and ( ERR_MSG_ALREADY_EXISTS in res.errors[0].message or ERR_MSG_ALREADY_IN_USE in res.errors[0].message): # If someone created it before we could just retry, we will # pick up the new host. LOG.debug("Unable to create host: %s", res.errors[0].message) raise PureRetryableException() if personality: self.set_personality(array, host_name, personality) # TODO: Ensure that the host has the correct preferred # arrays configured for it. return self._connect_host_to_vol(array, host_name, vol_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/qnap.py0000664000175000017500000025254300000000000020644 0ustar00zuulzuul00000000000000# Copyright (c) 2016 QNAP Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for QNAP Storage. This driver supports QNAP Storage for iSCSI. """ import base64 from collections import OrderedDict import functools import re import threading import time import urllib.parse import eventlet from lxml import etree as ET from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import units import requests from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import configuration from cinder.volume.drivers.san import san LOG = logging.getLogger(__name__) qnap_opts = [ cfg.URIOpt('qnap_management_url', help='The URL to management QNAP Storage. ' 'Driver does not support IPv6 address in URL.'), cfg.StrOpt('qnap_poolname', help='The pool name in the QNAP Storage'), cfg.StrOpt('qnap_storage_protocol', default=constants.ISCSI, help='Communication protocol to access QNAP storage') ] CONF = cfg.CONF CONF.register_opts(qnap_opts, group=configuration.SHARED_CONF_GROUP) @interface.volumedriver class QnapISCSIDriver(san.SanISCSIDriver): """QNAP iSCSI based cinder driver .. code-block:: none Version History: 1.0.0: Initial driver (Only iSCSI). 1.2.001: Add supports for Thin Provisioning, SSD Cache, Deduplication, Compression and CHAP. 1.2.002: Add support for QES fw 2.0.0. 1.2.003: Add support for QES fw 2.1.0. 1.2.004: Add support for QES fw on TDS series NAS model. 1.2.005: Add support for QTS fw 4.4.0. NOTE: Set driver_ssl_cert_verify as True under backend section to enable SSL verification. """ # ThirdPartySystems wiki page CI_WIKI_NAME = "QNAP_CI" # Remove if third party CI requirements are met SUPPORTED = False VERSION = '1.2.005' TIME_INTERVAL = 3 def __init__(self, *args, **kwargs): """Initialize QnapISCSIDriver.""" super(QnapISCSIDriver, self).__init__(*args, **kwargs) self.api_executor = None self.group_stats = {} self.configuration.append_config_values(qnap_opts) self.cache_time = 0 self.initiator = '' self.iscsi_port = '' self.target_index = '' self.target_iqn = '' self.target_iqns = [] self.nasInfoCache = {} @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'target_ip_address', 'san_login', 'san_password', 'use_chap_auth', 'chap_username', 'chap_password', 'driver_ssl_cert_verify', 'reserved_percentage') return qnap_opts + additional_opts def _check_config(self): """Ensure that the flags we care about are set.""" LOG.debug('in _check_config') required_config = ['qnap_management_url', 'san_login', 'san_password', 'qnap_poolname', 'qnap_storage_protocol'] for attr in required_config: if not getattr(self.configuration, attr, None): raise exception.InvalidInput( reason=_('%s is not set.') % attr) if not self.configuration.use_chap_auth: self.configuration.chap_username = '' self.configuration.chap_password = '' else: if not str.isalnum(self.configuration.chap_username): # invalid chap_username LOG.error('Username must be single-byte alphabet or number.') raise exception.InvalidInput( reason=_('Username must be single-byte ' 'alphabet or number.')) if not 12 <= len(self.configuration.chap_password) <= 16: # invalid chap_password LOG.error('Password must contain 12-16 characters.') raise exception.InvalidInput( reason=_('Password must contain 12-16 characters.')) def do_setup(self, context): """Setup the QNAP Cinder volume driver.""" self._check_config() self.ctxt = context LOG.debug('context: %s', context) # Setup API Executor try: self.api_executor = self.create_api_executor() except Exception: LOG.error('Failed to create HTTP client. ' 'Check ip, port, username, password' ' and make sure the array version is compatible') msg = _('Failed to create HTTP client.') raise exception.VolumeDriverException(message=msg) def check_for_setup_error(self): """Check the status of setup.""" pass def create_api_executor(self): """Create api executor by nas model.""" self.api_executor = QnapAPIExecutor( username=self.configuration.san_login, password=self.configuration.san_password, management_url=self.configuration.qnap_management_url, verify_ssl=self.configuration.driver_ssl_cert_verify) nas_model_name, internal_model_name, fw_version = ( self.api_executor.get_basic_info( self.configuration.qnap_management_url)) if (self.configuration.qnap_management_url not in self.nasInfoCache): self.nasInfoCache[self.configuration.qnap_management_url] = ( nas_model_name, internal_model_name, fw_version) pattern = re.compile(r"^([A-Z]+)-?[A-Z]{0,2}(\d+)\d{2}(U|[a-z]*)") matches = pattern.match(nas_model_name) if not matches: return None model_type = matches.group(1) ts_model_types = [ "TS", "SS", "IS", "TVS", "TBS" ] tes_model_types = [ "TES", "TDS" ] es_model_types = [ "ES" ] LOG.debug('fw_version: %s', fw_version) if model_type in ts_model_types: if (fw_version >= "4.2") and (fw_version <= "4.4.9999"): LOG.debug('Create TS API Executor') # modify the pool name to pool index self.configuration.qnap_poolname = ( self._get_ts_model_pool_id( self.configuration.qnap_poolname)) return (QnapAPIExecutorTS( username=self.configuration.san_login, password=self.configuration.san_password, management_url=self.configuration.qnap_management_url, verify_ssl=self.configuration.driver_ssl_cert_verify)) elif model_type in tes_model_types: if 'TS' in internal_model_name: if (fw_version >= "4.2") and (fw_version <= "4.4.9999"): LOG.debug('Create TS API Executor') # modify the pool name to poole index self.configuration.qnap_poolname = ( self._get_ts_model_pool_id( self.configuration.qnap_poolname)) return (QnapAPIExecutorTS( username=self.configuration.san_login, password=self.configuration.san_password, management_url=self.configuration.qnap_management_url, verify_ssl=self.configuration.driver_ssl_cert_verify)) elif "1.1.2" <= fw_version <= "2.1.9999": LOG.debug('Create TES API Executor') return (QnapAPIExecutorTES( username=self.configuration.san_login, password=self.configuration.san_password, management_url=self.configuration.qnap_management_url, verify_ssl=self.configuration.driver_ssl_cert_verify)) elif model_type in es_model_types: if "1.1.2" <= fw_version <= "2.1.9999": LOG.debug('Create ES API Executor') return (QnapAPIExecutor( username=self.configuration.san_login, password=self.configuration.san_password, management_url=self.configuration.qnap_management_url, verify_ssl=self.configuration.driver_ssl_cert_verify)) msg = _('Model not support') raise exception.VolumeDriverException(message=msg) def _get_ts_model_pool_id(self, pool_name): """Modify the pool name to poole index.""" pattern = re.compile(r"^(\d+)+|^Storage Pool (\d+)+") matches = pattern.match(pool_name) if matches.group(1): return matches.group(1) else: return matches.group(2) def _gen_random_name(self): return "cinder-{0}".format(timeutils. utcnow(). strftime('%Y%m%d%H%M%S%f')) def _get_volume_metadata(self, volume): volume_metadata = {} if 'volume_metadata' in volume: for metadata in volume['volume_metadata']: volume_metadata[metadata['key']] = metadata['value'] return volume_metadata def _gen_lun_name(self): create_lun_name = '' while True: create_lun_name = self._gen_random_name() # If lun name with the name exists, need to change to # a different name created_lun = self.api_executor.get_lun_info( LUNName=create_lun_name) if created_lun is None: break return create_lun_name def _parse_boolean_extra_spec(self, extra_spec_value): """Parse boolean value from extra spec. Parse extra spec values of the form ' True' , ' False', 'True' and 'False'. """ if not isinstance(extra_spec_value, str): extra_spec_value = str(extra_spec_value) match = re.match(r'^\s*(?PTrue|False)$', extra_spec_value.strip(), re.IGNORECASE) if match: extra_spec_value = match.group('value') return strutils.bool_from_string(extra_spec_value, strict=True) def create_volume(self, volume): """Create a new volume.""" start_time = time.time() LOG.debug('in create_volume') LOG.debug('volume: %s', volume.__dict__) try: extra_specs = volume["volume_type"]["extra_specs"] LOG.debug('extra_spec: %s', extra_specs) qnap_thin_provision = self._parse_boolean_extra_spec( extra_specs.get('qnap_thin_provision', 'true')) qnap_compression = self._parse_boolean_extra_spec( extra_specs.get('qnap_compression', 'true')) qnap_deduplication = self._parse_boolean_extra_spec( extra_specs.get('qnap_deduplication', 'false')) qnap_ssd_cache = self._parse_boolean_extra_spec( extra_specs.get('qnap_ssd_cache', 'false')) except TypeError: LOG.debug('Unable to retrieve extra specs info. ' 'Use default extra spec.') qnap_thin_provision = True qnap_compression = True qnap_deduplication = False qnap_ssd_cache = False LOG.debug('qnap_thin_provision: %(qnap_thin_provision)s ' 'qnap_compression: %(qnap_compression)s ' 'qnap_deduplication: %(qnap_deduplication)s ' 'qnap_ssd_cache: %(qnap_ssd_cache)s', {'qnap_thin_provision': qnap_thin_provision, 'qnap_compression': qnap_compression, 'qnap_deduplication': qnap_deduplication, 'qnap_ssd_cache': qnap_ssd_cache}) if (qnap_deduplication and not qnap_thin_provision): LOG.debug('Dedupe cannot be enabled without thin_provisioning.') raise exception.VolumeBackendAPIException( data=_('Dedupe cannot be enabled without thin_provisioning.')) # User could create two volume with the same name on horizon. # Therefore, We should not use display name to create lun on nas. create_lun_name = self._gen_lun_name() create_lun_index = self.api_executor.create_lun( volume, self.configuration.qnap_poolname, create_lun_name, qnap_thin_provision, qnap_ssd_cache, qnap_compression, qnap_deduplication) max_wait_sec = 600 try_times = 0 lun_naa = "" while True: created_lun = self.api_executor.get_lun_info( LUNIndex=create_lun_index) if (created_lun is not None and created_lun.find('LUNNAA').text is not None): lun_naa = created_lun.find('LUNNAA').text try_times += 3 if try_times > max_wait_sec or lun_naa: break eventlet.sleep(self.TIME_INTERVAL) LOG.debug('LUNNAA: %s', lun_naa) _metadata = self._get_volume_metadata(volume) _metadata['LUNIndex'] = create_lun_index _metadata['LUNNAA'] = lun_naa _metadata['LunName'] = create_lun_name elapsed_time = time.time() - start_time LOG.debug('create_volume elapsed_time: %s', elapsed_time) LOG.debug('create_volume volid: %(volid)s, metadata: %(meta)s', {'volid': volume['id'], 'meta': _metadata}) return {'metadata': _metadata} @lockutils.synchronized('delete_volume', 'cinder-', True) def delete_volume(self, volume): """Delete the specified volume.""" start_time = time.time() LOG.debug('volume: %s', volume.__dict__) lun_naa = self._get_lun_naa_from_volume_metadata(volume) if lun_naa == '': LOG.debug('Volume %s does not exist.', volume.id) return lun_index = '' for metadata in volume['volume_metadata']: if metadata['key'] == 'LUNIndex': lun_index = metadata['value'] break LOG.debug('LUNIndex: %s', lun_index) internal_model_name = (self.nasInfoCache [self.configuration.qnap_management_url][1]) LOG.debug('internal_model_name: %s', internal_model_name) fw_version = self.nasInfoCache[self.configuration .qnap_management_url][2] LOG.debug('fw_version: %s', fw_version) if 'TS' in internal_model_name.upper(): LOG.debug('in TS FW: get_one_lun_info') ret = self.api_executor.get_one_lun_info(lun_index) del_lun = ET.fromstring(ret['data']).find('LUNInfo').find('row') elif 'ES' in internal_model_name.upper(): if fw_version >= "1.1.2" and fw_version <= "1.1.3": LOG.debug('in ES FW before 1.1.2/1.1.3: get_lun_info') del_lun = self.api_executor.get_lun_info( LUNIndex=lun_index) elif "1.1.4" <= fw_version <= "2.1.9999": LOG.debug('in ES FW after 1.1.4: get_one_lun_info') ret = self.api_executor.get_one_lun_info(lun_index) del_lun = (ET.fromstring(ret['data']).find('LUNInfo') .find('row')) if del_lun is None: LOG.debug('Volume %s does not exist.', lun_naa) return # if lun is mapping at target, the delete action will fail if del_lun.find('LUNStatus').text == '2': target_index = (del_lun.find('LUNTargetList') .find('row').find('targetIndex').text) LOG.debug('target_index: %s', target_index) self.api_executor.disable_lun(lun_index, target_index) self.api_executor.unmap_lun(lun_index, target_index) retry_delete = False while True: retry_delete = self.api_executor.delete_lun(lun_index) if not retry_delete: break elapsed_time = time.time() - start_time LOG.debug('delete_volume elapsed_time: %s', elapsed_time) def _get_lun_naa_from_volume_metadata(self, volume): lun_naa = '' for metadata in volume['volume_metadata']: if metadata['key'] == 'LUNNAA': lun_naa = metadata['value'] break return lun_naa def _extend_lun(self, volume, lun_naa): LOG.debug('volume: %s', volume.__dict__) if lun_naa == '': lun_naa = self._get_lun_naa_from_volume_metadata(volume) LOG.debug('lun_naa: %s', lun_naa) selected_lun = self.api_executor.get_lun_info( LUNNAA=lun_naa) lun_index = selected_lun.find('LUNIndex').text LOG.debug('LUNIndex: %s', lun_index) lun_name = selected_lun.find('LUNName').text LOG.debug('LUNName: %s', lun_name) lun_thin_allocate = selected_lun.find('LUNThinAllocate').text LOG.debug('LUNThinAllocate: %s', lun_thin_allocate) lun_path = '' if selected_lun.find('LUNPath') is not None: lun_path = selected_lun.find('LUNPath').text LOG.debug('LUNPath: %s', lun_path) lun_status = selected_lun.find('LUNStatus').text LOG.debug('LUNStatus: %s', lun_status) lun = {'LUNName': lun_name, 'LUNCapacity': volume['size'], 'LUNIndex': lun_index, 'LUNThinAllocate': lun_thin_allocate, 'LUNPath': lun_path, 'LUNStatus': lun_status} self.api_executor.edit_lun(lun) def _create_snapshot_name(self, lun_index): create_snapshot_name = '' while True: # If snapshot with the name exists, need to change to # a different name create_snapshot_name = 'Q%d' % int(time.time()) snapshot = self.api_executor.get_snapshot_info( lun_index=lun_index, snapshot_name=create_snapshot_name) if snapshot is None: break return create_snapshot_name def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume.""" LOG.debug('Entering create_cloned_volume...') LOG.debug('volume: %s', volume.__dict__) LOG.debug('src_vref: %s', src_vref.__dict__) LOG.debug('volume_metadata: %s', volume['volume_metadata']) src_lun_naa = self._get_lun_naa_from_volume_metadata(src_vref) # Below is to clone a volume from a snapshot in the snapshot manager src_lun = self.api_executor.get_lun_info( LUNNAA=src_lun_naa) lun_index = src_lun.find('LUNIndex').text LOG.debug('LUNIndex: %s', lun_index) # User could create two snapshot with the same name on horizon. # Therefore, we should not use displayname to create snapshot on nas. create_snapshot_name = self._create_snapshot_name(lun_index) self.api_executor.create_snapshot_api(lun_index, create_snapshot_name) created_snapshot = self.api_executor.get_snapshot_info( lun_index=lun_index, snapshot_name=create_snapshot_name) snapshot_id = created_snapshot.find('snapshot_id').text LOG.debug('snapshot_id: %s', snapshot_id) # User could create two volume with the same name on horizon. # Therefore, We should not use displayname to create lun on nas. while True: cloned_lun_name = self._gen_random_name() # If lunname with the name exists, need to change to # a different name cloned_lun = self.api_executor.get_lun_info( LUNName=cloned_lun_name) if cloned_lun is None: break self.api_executor.clone_snapshot(snapshot_id, cloned_lun_name) max_wait_sec = 600 try_times = 0 lun_naa = "" lun_index = "" while True: created_lun = self.api_executor.get_lun_info( LUNName=cloned_lun_name) if (created_lun is not None and created_lun.find('LUNNAA') is not None): lun_naa = created_lun.find('LUNNAA').text lun_index = created_lun.find('LUNIndex').text LOG.debug('LUNIndex: %s', lun_index) try_times += 3 if try_times > max_wait_sec or lun_naa: break eventlet.sleep(self.TIME_INTERVAL) LOG.debug('LUNNAA: %s', lun_naa) if (volume['size'] > src_vref['size']): self._extend_lun(volume, lun_naa) internal_model_name = (self.nasInfoCache [self.configuration.qnap_management_url][1]) if 'TS' in internal_model_name.upper(): LOG.debug('in TS FW: delete_snapshot_api') self.api_executor.delete_snapshot_api(snapshot_id) elif 'ES' in internal_model_name.upper(): LOG.debug('in ES FW: do nothing') _metadata = self._get_volume_metadata(volume) _metadata['LUNIndex'] = lun_index _metadata['LUNNAA'] = lun_naa _metadata['LunName'] = cloned_lun_name return {'metadata': _metadata} def create_snapshot(self, snapshot): """Create a snapshot.""" LOG.debug('snapshot: %s', snapshot.__dict__) LOG.debug('snapshot id: %s', snapshot['id']) # Below is to create snapshot in the snapshot manager LOG.debug('volume_metadata: %s', snapshot.volume['metadata']) volume_metadata = snapshot.volume['metadata'] LOG.debug('lun_naa: %s', volume_metadata['LUNNAA']) lun_naa = volume_metadata['LUNNAA'] src_lun = self.api_executor.get_lun_info(LUNNAA=lun_naa) lun_index = src_lun.find('LUNIndex').text LOG.debug('LUNIndex: %s', lun_index) # User could create two snapshot with the same name on horizon. # Therefore, We should not use displayname to create snapshot on nas. create_snapshot_name = self._create_snapshot_name(lun_index) LOG.debug('create_snapshot_name: %s', create_snapshot_name) self.api_executor.create_snapshot_api(lun_index, create_snapshot_name) max_wait_sec = 600 try_times = 0 snapshot_id = "" while True: created_snapshot = self.api_executor.get_snapshot_info( lun_index=lun_index, snapshot_name=create_snapshot_name) if (created_snapshot is not None and created_snapshot.find('snapshot_id').text is not None): snapshot_id = created_snapshot.find('snapshot_id').text try_times += 3 if try_times > max_wait_sec or snapshot_id: break eventlet.sleep(self.TIME_INTERVAL) LOG.debug('created_snapshot: %s', created_snapshot) LOG.debug('snapshot_id: %s', snapshot_id) _metadata = snapshot['metadata'] _metadata['snapshot_id'] = snapshot_id _metadata['SnapshotName'] = create_snapshot_name return {'metadata': _metadata} def delete_snapshot(self, snapshot): """Delete a snapshot.""" LOG.debug('snapshot: %s', snapshot.__dict__) # Below is to delete snapshot in the snapshot manager snap_metadata = snapshot['metadata'] if 'snapshot_id' not in snap_metadata: return LOG.debug('snapshot_id: %s', snap_metadata['snapshot_id']) snapshot_id = snap_metadata['snapshot_id'] self.api_executor.delete_snapshot_api(snapshot_id) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" LOG.debug('in create_volume_from_snapshot') LOG.debug('volume: %s', volume.__dict__) LOG.debug('snapshot: %s', snapshot.__dict__) # Below is to clone a volume from a snapshot in the snapshot manager snap_metadata = snapshot['metadata'] if 'snapshot_id' not in snap_metadata: LOG.debug('Metadata of the snapshot is invalid') msg = _('Metadata of the snapshot is invalid') raise exception.VolumeDriverException(message=msg) LOG.debug('snapshot_id: %s', snap_metadata['snapshot_id']) snapshot_id = snap_metadata['snapshot_id'] # User could create two volume with the same name on horizon. # Therefore, We should not use displayname to create lun on nas. create_lun_name = self._gen_lun_name() self.api_executor.clone_snapshot( snapshot_id, create_lun_name) max_wait_sec = 600 try_times = 0 lun_naa = "" lun_index = "" while True: created_lun = self.api_executor.get_lun_info( LUNName=create_lun_name) if (created_lun is not None and created_lun.find('LUNNAA') is not None): lun_naa = created_lun.find('LUNNAA').text lun_index = created_lun.find('LUNIndex').text LOG.debug('LUNNAA: %s', lun_naa) LOG.debug('LUNIndex: %s', lun_index) try_times += 3 if try_times > max_wait_sec or lun_naa: break eventlet.sleep(self.TIME_INTERVAL) if (volume['size'] > snapshot['volume_size']): self._extend_lun(volume, lun_naa) _metadata = self._get_volume_metadata(volume) _metadata['LUNIndex'] = lun_index _metadata['LUNNAA'] = lun_naa _metadata['LunName'] = create_lun_name return {'metadata': _metadata} def get_volume_stats(self, refresh=False): """Get volume stats. This is more of getting group stats.""" LOG.debug('in get_volume_stats refresh: %s', refresh) if refresh: backend_name = (self.configuration.safe_get( 'volume_backend_name') or self.__class__.__name__) LOG.debug('backend_name=%(backend_name)s', {'backend_name': backend_name}) selected_pool = self.api_executor.get_specific_poolinfo( self.configuration.qnap_poolname) capacity_bytes = int(selected_pool.find('capacity_bytes').text) LOG.debug('capacity_bytes: %s GB', capacity_bytes / units.Gi) freesize_bytes = int(selected_pool.find('freesize_bytes').text) LOG.debug('freesize_bytes: %s GB', freesize_bytes / units.Gi) provisioned_bytes = int(selected_pool.find('allocated_bytes').text) driver_protocol = self.configuration.qnap_storage_protocol LOG.debug( 'provisioned_bytes: %s GB', provisioned_bytes / units.Gi) self.group_stats = {'volume_backend_name': backend_name, 'vendor_name': 'QNAP', 'driver_version': self.VERSION, 'storage_protocol': driver_protocol} # single pool now, need support multiple pools in the future single_pool = dict( pool_name=self.configuration.qnap_poolname, total_capacity_gb=capacity_bytes / units.Gi, free_capacity_gb=freesize_bytes / units.Gi, provisioned_capacity_gb=provisioned_bytes / units.Gi, reserved_percentage=self.configuration.reserved_percentage, QoS_support=False, qnap_thin_provision=["True", "False"], qnap_compression=["True", "False"], qnap_deduplication=["True", "False"], qnap_ssd_cache=["True", "False"]) self.group_stats['pools'] = [single_pool] return self.group_stats def extend_volume(self, volume, new_size): """Extend an existing volume.""" LOG.debug('Entering extend_volume volume=%(vol)s ' 'new_size=%(size)s', {'vol': volume['display_name'], 'size': new_size}) volume['size'] = new_size self._extend_lun(volume, '') def _get_portal_info(self, volume, connector, lun_slot_id, lun_owner): """Get portal info.""" # Cache portal info for twenty seconds # If connectors were the same then use the portal info which was cached LOG.debug('get into _get_portal_info') self.initiator = connector['initiator'] ret = self.api_executor.get_iscsi_portal_info() root = ET.fromstring(ret['data']) iscsi_port = root.find('iSCSIPortal').find('servicePort').text LOG.debug('iscsiPort: %s', iscsi_port) target_iqn_prefix = root.find( 'iSCSIPortal').find('targetIQNPrefix').text LOG.debug('targetIQNPrefix: %s', target_iqn_prefix) internal_model_name = (self.nasInfoCache [self.configuration.qnap_management_url][1]) LOG.debug('internal_model_name: %s', internal_model_name) fw_version = (self.nasInfoCache [self.configuration.qnap_management_url][2]) LOG.debug('fw_version: %s', fw_version) target_index = '' target_iqn = '' # create a new target if no target has ACL connector['initiator'] LOG.debug('exist target_index: %s', target_index) if not target_index: target_name = self._gen_random_name() LOG.debug('target_name: %s', target_name) target_index = self.api_executor.create_target( target_name, lun_owner) LOG.debug('targetIndex: %s', target_index) retryCount = 0 retrySleepTime = 2 while retryCount <= 5: target_info = self.api_executor.get_target_info(target_index) if target_info.find('targetIQN').text is not None: break eventlet.sleep(retrySleepTime) retrySleepTime = retrySleepTime + 2 retryCount = retryCount + 1 target_iqn = target_info.find('targetIQN').text LOG.debug('target_iqn: %s', target_iqn) # TS NAS have to remove default ACL default_acl = ( target_iqn_prefix[:target_iqn_prefix.find(":") + 1]) default_acl = default_acl + "all:iscsi.default.ffffff" LOG.debug('default_acl: %s', default_acl) self.api_executor.remove_target_init(target_iqn, default_acl) # add ACL self.api_executor.add_target_init( target_iqn, connector['initiator'], self.configuration.use_chap_auth, self.configuration.chap_username, self.configuration.chap_password) # Get information for multipath target_iqns = [] slotid_list = [] eth_list, slotid_list = Util.retriveFormCache( self.configuration.qnap_management_url, lambda: self.api_executor.get_ethernet_ip(type='data'), 30) LOG.debug('slotid_list: %s', slotid_list) target_portals = [] target_portals.append( self.configuration.target_ip_address + ':' + iscsi_port) # target_iqns.append(target_iqn) for index, eth in enumerate(eth_list): # TS NAS do not have slot_id if not slotid_list: target_iqns.append(target_iqn) else: # To support ALUA, target portal and target inq should # be consistent. # EX: 10.77.230.31:3260 at controller B and it should map # to the target at controller B target_iqns.append( target_iqn[:-2] + '.' + slotid_list[index]) if eth == self.configuration.target_ip_address: continue target_portals.append(eth + ':' + iscsi_port) self.iscsi_port = iscsi_port self.target_index = target_index self.target_iqn = target_iqn self.target_iqns = target_iqns self.target_portals = target_portals return (iscsi_port, target_index, target_iqn, target_iqns, target_portals) @lockutils.synchronized('create_export', 'cinder-', True) def create_export(self, context, volume, connector): start_time = time.time() LOG.debug('in create_export') LOG.debug('volume: %s', volume.__dict__) LOG.debug('connector: %s', connector) lun_naa = self._get_lun_naa_from_volume_metadata(volume) if lun_naa == '': msg = (_("Volume %s does not exist.") % volume.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) LOG.debug('volume[name]: %s', volume['name']) LOG.debug('volume[display_name]: %s', volume['display_name']) lun_index = '' for metadata in volume['volume_metadata']: if metadata['key'] == 'LUNIndex': lun_index = metadata['value'] break LOG.debug('LUNIndex: %s', lun_index) internal_model_name = (self.nasInfoCache [self.configuration.qnap_management_url][1]) LOG.debug('internal_model_name: %s', internal_model_name) fw_version = self.nasInfoCache[self.configuration .qnap_management_url][2] LOG.debug('fw_version: %s', fw_version) if 'TS' in internal_model_name.upper(): LOG.debug('in TS FW: get_one_lun_info') ret = self.api_executor.get_one_lun_info(lun_index) selected_lun = (ET.fromstring(ret['data']).find('LUNInfo') .find('row')) elif 'ES' in internal_model_name.upper(): if fw_version >= "1.1.2" and fw_version <= "1.1.3": LOG.debug('in ES FW before 1.1.2/1.1.3: get_lun_info') selected_lun = self.api_executor.get_lun_info( LUNNAA=lun_naa) elif "1.1.4" <= fw_version <= "2.1.9999": LOG.debug('in ES FW after 1.1.4: get_one_lun_info') ret = self.api_executor.get_one_lun_info(lun_index) selected_lun = (ET.fromstring(ret['data']).find('LUNInfo') .find('row')) lun_owner = '' lun_slot_id = '' if selected_lun.find('lun_owner') is not None: lun_owner = selected_lun.find('lun_owner').text LOG.debug('lun_owner: %s', lun_owner) lun_slot_id = '0' if (lun_owner == 'SCA') else '1' LOG.debug('lun_slot_id: %s', lun_slot_id) # LOG.debug('self.initiator: %s', self.initiator) LOG.debug('connector: %s', connector['initiator']) iscsi_port, target_index, target_iqn, target_iqns, target_portals = ( self._get_portal_info(volume, connector, lun_slot_id, lun_owner)) self.api_executor.map_lun(lun_index, target_index) max_wait_sec = 600 try_times = 0 LUNNumber = "" target_lun_id = -999 while True: if 'TS' in internal_model_name.upper(): LOG.debug('in TS FW: get_one_lun_info') ret = self.api_executor.get_one_lun_info(lun_index) root = ET.fromstring(ret['data']) target_lun_id = int(root.find('LUNInfo').find('row') .find('LUNTargetList').find('row') .find('LUNNumber').text) try_times += 3 if try_times > max_wait_sec or target_lun_id != -999: break eventlet.sleep(self.TIME_INTERVAL) elif 'ES' in internal_model_name.upper(): if fw_version >= "1.1.2" and fw_version <= "1.1.3": LOG.debug('in ES FW before 1.1.2/1.1.3: get_lun_info') root = self.api_executor.get_lun_info(LUNNAA=lun_naa) if len(list(root.find('LUNTargetList'))) != 0: LUNNumber = root.find('LUNTargetList').find( 'row').find('LUNNumber').text target_lun_id = int(LUNNumber) try_times += 3 if try_times > max_wait_sec or LUNNumber: break eventlet.sleep(self.TIME_INTERVAL) elif "1.1.4" <= fw_version <= "2.1.9999": LOG.debug('in ES FW after 1.1.4: get_one_lun_info') ret = self.api_executor.get_one_lun_info(lun_index) root = ET.fromstring(ret['data']) target_lun_id = int(root.find('LUNInfo') .find('row').find('LUNTargetList') .find('row').find('LUNNumber').text) try_times += 3 if try_times > max_wait_sec or target_lun_id != -999: break eventlet.sleep(self.TIME_INTERVAL) else: break else: break properties = {} properties['target_discovered'] = False properties['target_portal'] = (self.configuration.target_ip_address + ':' + iscsi_port) properties['target_iqn'] = target_iqn LOG.debug('properties[target_iqn]: %s', properties['target_iqn']) LOG.debug('target_lun_id: %s', target_lun_id) properties['target_lun'] = target_lun_id properties['volume_id'] = volume['id'] # used by xen currently multipath = connector.get('multipath', False) if multipath: """Below are settings for multipath""" properties['target_portals'] = target_portals properties['target_iqns'] = target_iqns properties['target_luns'] = ( [target_lun_id] * len(target_portals)) LOG.debug('properties: %s', properties) provider_location = '%(host)s:%(port)s,1 %(name)s %(tgt_lun)s' % { 'host': self.configuration.target_ip_address, 'port': iscsi_port, 'name': target_iqn, 'tgt_lun': target_lun_id, } if self.configuration.use_chap_auth: provider_auth = 'CHAP %s %s' % (self.configuration.chap_username, self.configuration.chap_password) else: provider_auth = None elapsed_time = time.time() - start_time LOG.debug('create_export elapsed_time: %s', elapsed_time) LOG.debug('create_export volid: %(volid)s, provider_location: %(loc)s', {'volid': volume['id'], 'loc': provider_location}) return ( {'provider_location': provider_location, 'provider_auth': provider_auth}) def initialize_connection(self, volume, connector): start_time = time.time() LOG.debug('in initialize_connection') if not volume['provider_location']: err = _("Param volume['provider_location'] is invalid.") raise exception.InvalidParameterValue(err=err) result = volume['provider_location'].split(' ') if len(result) < 2: raise exception.InvalidInput(reason=volume['provider_location']) data = result[0].split(',') if len(data) < 2: raise exception.InvalidInput(reason=volume['provider_location']) iqn = result[1] LOG.debug('iqn: %s', iqn) target_lun_id = int(result[2], 10) LOG.debug('target_lun_id: %d', target_lun_id) properties = {} properties['target_discovered'] = False properties['target_portal'] = (self.configuration.target_ip_address + ':' + self.iscsi_port) properties['target_iqn'] = iqn properties['target_lun'] = target_lun_id properties['volume_id'] = volume['id'] # used by xen currently if self.configuration.use_chap_auth: properties['auth_method'] = 'CHAP' properties['auth_username'] = self.configuration.chap_username properties['auth_password'] = self.configuration.chap_password elapsed_time = time.time() - start_time LOG.debug('initialize_connection elapsed_time: %s', elapsed_time) LOG.debug('initialize_connection volid:' ' %(volid)s, properties: %(prop)s', {'volid': volume['id'], 'prop': properties}) return { 'driver_volume_type': 'iscsi', 'data': properties, } def enum(self, *sequential, **named): """Enum method.""" enums = dict(zip(sequential, range(len(sequential))), **named) return type('Enum', (), enums) def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance.""" start_time = time.time() LOG.debug('in terminate_connection') LOG.debug('volume: %s', volume.__dict__) LOG.debug('connector: %s', connector) # get lun index lun_naa = self._get_lun_naa_from_volume_metadata(volume) LOG.debug('lun_naa: %s', lun_naa) lun_index = '' for metadata in volume['volume_metadata']: if metadata['key'] == 'LUNIndex': lun_index = metadata['value'] break LOG.debug('LUNIndex: %s', lun_index) internal_model_name = (self.nasInfoCache [self.configuration.qnap_management_url][1]) LOG.debug('internal_model_name: %s', internal_model_name) fw_version = self.nasInfoCache[self.configuration .qnap_management_url][2] LOG.debug('fw_version: %s', fw_version) if 'TS' in internal_model_name.upper(): LOG.debug('in TS FW: get_one_lun_info') ret = self.api_executor.get_one_lun_info(lun_index) selected_lun = (ET.fromstring(ret['data']).find('LUNInfo') .find('row')) elif 'ES' in internal_model_name.upper(): if fw_version >= "1.1.2" and fw_version <= "1.1.3": LOG.debug('in ES FW before 1.1.2/1.1.3: get_lun_info') selected_lun = self.api_executor.get_lun_info( LUNIndex=lun_index) elif "1.1.4" <= fw_version <= "2.1.9999": LOG.debug('in ES FW after 1.1.4: get_one_lun_info') ret = self.api_executor.get_one_lun_info(lun_index) selected_lun = (ET.fromstring(ret['data']).find('LUNInfo') .find('row')) lun_status = self.enum('creating', 'unmapped', 'mapped') LOG.debug('LUNStatus: %s', selected_lun.find('LUNStatus').text) LOG.debug('lun_status.mapped: %s', str(lun_status.mapped)) # lun does not map to any target if (selected_lun.find('LUNStatus').text) != ( str(lun_status.mapped)): return target_index = (selected_lun.find('LUNTargetList') .find('row').find('targetIndex').text) LOG.debug('target_index: %s', target_index) start_time1 = time.time() self.api_executor.disable_lun(lun_index, target_index) elapsed_time1 = time.time() - start_time1 LOG.debug('terminate_connection disable_lun elapsed_time : %s', elapsed_time1) start_time2 = time.time() self.api_executor.unmap_lun(lun_index, target_index) elapsed_time2 = time.time() - start_time2 LOG.debug('terminate_connection unmap_lun elapsed_time : %s', elapsed_time2) elapsed_time = time.time() - start_time LOG.debug('terminate_connection elapsed_time : %s', elapsed_time) self.api_executor.delete_target(target_index) def update_migrated_volume( self, context, volume, new_volume, original_volume_status): """Return model update for migrated volume.""" LOG.debug('volume: %s', volume.__dict__) LOG.debug('new_volume: %s', new_volume.__dict__) LOG.debug('original_volume_status: %s', original_volume_status) _metadata = self._get_volume_metadata(new_volume) # metadata will not be swap after migration with liberty version # and the metadata of new volume is different with the metadata # of original volume. Therefore, we need to update the migrated volume. if not hasattr(new_volume, '_orig_metadata'): model_update = {'metadata': _metadata} return model_update @utils.synchronized('_attach_volume') def _detach_volume(self, context, attach_info, volume, properties, force=False, remote=False, ignore_errors=False): super(QnapISCSIDriver, self)._detach_volume( context, attach_info, volume, properties, force=force, remote=remote, ignore_errors=ignore_errors ) @utils.synchronized('_attach_volume') def _attach_volume(self, context, volume, properties, remote=False): return super(QnapISCSIDriver, self)._attach_volume(context, volume, properties, remote) def _connection_checker(func): """Decorator to check session has expired or not.""" @functools.wraps(func) def inner_connection_checker(self, *args, **kwargs): LOG.debug('in _connection_checker') for attempts in range(5): try: return func(self, *args, **kwargs) except exception.VolumeBackendAPIException as e: pattern = re.compile( r".*Session id expired$") matches = pattern.match(str(e)) if matches: if attempts < 4: LOG.debug('Session might have expired.' ' Trying to relogin') self._login() continue LOG.error('Re-throwing Exception %s', e) raise return inner_connection_checker class QnapAPIExecutor(object): """Makes QNAP API calls for ES NAS.""" es_create_lun_lock = threading.Lock() es_delete_lun_lock = threading.Lock() es_lun_locks = {} def __init__(self, *args, **kwargs): """Init function.""" self.sid = None self.username = kwargs['username'] self.password = kwargs['password'] self.ip, self.port, self.ssl = ( self._parse_management_url(kwargs['management_url'])) self.verify_ssl = kwargs['verify_ssl'] self._login() def _parse_management_url(self, management_url): # NOTE(Ibad): This parser isn't compatible with IPv6 address. # Typical IPv6 address will have : as delimiters and # URL is represented as https://[3ffe:2a00:100:7031::1]:8080 # since the regular expression below uses : to identify ip and port # it won't work with IPv6 address. pattern = re.compile(r"(http|https)\:\/\/(\S+)\:(\d+)") matches = pattern.match(management_url) if matches.group(1) == 'http': management_ssl = False else: management_ssl = True management_ip = matches.group(2) management_port = matches.group(3) return management_ip, management_port, management_ssl def get_basic_info(self, management_url): """Get the basic information of NAS.""" management_ip, management_port, management_ssl = ( self._parse_management_url(management_url)) response = self._get_response(management_ip, management_port, management_ssl, '/cgi-bin/authLogin.cgi') data = response.text root = ET.fromstring(data) nas_model_name = root.find('model/displayModelName').text internal_model_name = root.find('model/internalModelName').text fw_version = root.find('firmware/version').text return nas_model_name, internal_model_name, fw_version def _get_response(self, host_ip, host_port, use_ssl, action, body=None): """"Execute http request and return response.""" method = 'GET' headers = None protocol = 'https' if use_ssl else 'http' verify = self.verify_ssl if use_ssl else False # NOTE(ibad): URL formed here isn't IPv6 compatible # we should surround host ip with [] when IPv6 is supported # so the final URL can be like https://[3ffe:2a00:100:7031::1]:8080 url = '%s://%s:%s%s' % (protocol, host_ip, host_port, action) if body: method = 'POST' headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'charset': 'utf-8' } response = requests.request(method, url, data=body, headers=headers, verify=verify) return response def _execute_and_get_response_details(self, nas_ip, url, post_parm=None): """Will prepare response after executing an http request.""" LOG.debug('_execute_and_get_response_details url: %s', url) LOG.debug('_execute_and_get_response_details post_parm: %s', post_parm) res_details = {} # Make the connection start_time2 = time.time() response = self._get_response( nas_ip, self.port, self.ssl, url, post_parm) elapsed_time2 = time.time() - start_time2 LOG.debug('request elapsed_time: %s', elapsed_time2) # Read the response data = response.text LOG.debug('response status: %s', response.status_code) # Extract http error msg if any error_details = None res_details['data'] = data res_details['error'] = error_details res_details['http_status'] = response.status_code return res_details def execute_login(self): """Login and return sid.""" params = OrderedDict( pwd=base64.b64encode(self.password.encode('utf-8')).decode(), serviceKey='1', user=self.username, ) encoded_params = urllib.parse.urlencode(params) url = ('/cgi-bin/authLogin.cgi?') res_details = self._execute_and_get_response_details( self.ip, url, encoded_params) root = ET.fromstring(res_details['data']) LOG.debug('execute_login data: %s', res_details['data']) session_id = root.find('authSid').text LOG.debug('execute_login session_id: %s', session_id) return session_id def _login(self): """Execute Https Login API.""" self.sid = self.execute_login() def _get_res_details(self, url, **kwargs): sanitized_params = OrderedDict() # Sort the dict of parameters params = utils.create_ordereddict(kwargs) for key, value in params.items(): if value is not None: sanitized_params[key] = str(value) encoded_params = urllib.parse.urlencode(sanitized_params) url = url + encoded_params res_details = self._execute_and_get_response_details(self.ip, url) return res_details @_connection_checker def create_lun(self, volume, pool_name, create_lun_name, reserve, ssd_cache, compress, dedup): """Create lun.""" self.es_create_lun_lock.acquire() lun_thin_allocate = '' if reserve: lun_thin_allocate = '1' else: lun_thin_allocate = '0' try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_lun_setting.cgi?', func='add_lun', FileIO='no', LUNThinAllocate=lun_thin_allocate, LUNName=create_lun_name, LUNPath=create_lun_name, poolID=pool_name, lv_ifssd='yes' if ssd_cache else 'no', compression='1' if compress else '0', dedup='sha256' if dedup else 'off', LUNCapacity=volume['size'], lv_threshold='80', sid=self.sid) finally: self.es_create_lun_lock.release() root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Create volume %s failed') % volume['display_name']) return root.find('result').text @_connection_checker def delete_lun(self, vol_id, *args, **kwargs): """Execute delete lun API.""" self.es_delete_lun_lock.acquire() try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_lun_setting.cgi?', func='remove_lun', run_background='1', ha_sync='1', LUNIndex=vol_id, sid=self.sid) finally: self.es_delete_lun_lock.release() data_set_is_busy = "-205041" root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) # dataset is busy, retry to delete if root.find('result').text == data_set_is_busy: return True if root.find('result').text < '0': msg = (_('Volume %s delete failed') % vol_id) raise exception.VolumeBackendAPIException(data=msg) return False @_connection_checker def get_specific_poolinfo(self, pool_id): """Execute get specific poolinfo API.""" res_details = self._get_res_details( '/cgi-bin/disk/disk_manage.cgi?', store='poolInfo', func='extra_get', poolID=pool_id, Pool_Info='1', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('get_specific_poolinfo failed')) pool_list = root.find('Pool_Index') pool_info_tree = pool_list.findall('row') for pool in pool_info_tree: if pool_id == pool.find('poolID').text: return pool @_connection_checker def create_target(self, target_name, controller_name): """Create target on nas and return target index.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='add_target', targetName=target_name, targetAlias=target_name, bTargetDataDigest='0', bTargetHeaderDigest='0', bTargetClusterEnable='1', controller_name=controller_name, sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Create target failed')) root = ET.fromstring(res_details['data']) targetIndex = root.find('result').text return targetIndex @_connection_checker def delete_target(self, target_index): """Delete target on nas.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='remove_target', targetIndex=target_index, sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text != '0': raise exception.VolumeBackendAPIException( data=_('Delete target failed')) @_connection_checker def add_target_init(self, target_iqn, init_iqn, use_chap_auth, chap_username, chap_password): """Add target acl.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='add_init', targetIQN=target_iqn, initiatorIQN=init_iqn, initiatorAlias=init_iqn, bCHAPEnable='1' if use_chap_auth else '0', CHAPUserName=chap_username, CHAPPasswd=chap_password, bMutualCHAPEnable='0', mutualCHAPUserName='', mutualCHAPPasswd='', ha_sync='1', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Add target acl failed')) def remove_target_init(self, target_iqn, init_iqn): """Remote target acl.""" pass @_connection_checker def map_lun(self, lun_index, target_index): """Map lun to sepecific target.""" try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='add_lun', LUNIndex=lun_index, targetIndex=target_index, sid=self.sid) finally: pass root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException(data=_( "Map lun %(lun_index)s to target %(target_index)s failed") % {'lun_index': str(lun_index), 'target_index': str(target_index)}) @_connection_checker def disable_lun(self, lun_index, target_index): """Disable lun from sepecific target.""" try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='edit_lun', LUNIndex=lun_index, targetIndex=target_index, LUNEnable=0, sid=self.sid) finally: pass root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException(data=_( 'Disable lun %(lun_index)s from target %(target_index)s failed' ) % {'lun_index': lun_index, 'target_index': target_index}) @_connection_checker def unmap_lun(self, lun_index, target_index): """Unmap lun from sepecific target.""" try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='remove_lun', LUNIndex=lun_index, targetIndex=target_index, sid=self.sid) finally: pass root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException(data=_( 'Unmap lun %(lun_index)s from target %(target_index)s failed') % {'lun_index': lun_index, 'target_index': target_index}) @_connection_checker def get_iscsi_portal_info(self): """Get iscsi portal info.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_portal_setting.cgi?', func='extra_get', iSCSI_portal='1', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) else: return res_details @_connection_checker def get_lun_info(self, **kwargs): """Execute get_lun_info API.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_portal_setting.cgi?', func='extra_get', lunList='1', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if (('LUNIndex' in kwargs) or ('LUNName' in kwargs) or ('LUNNAA' in kwargs)): lun_list = root.find('iSCSILUNList') lun_info_tree = lun_list.findall('LUNInfo') for lun in lun_info_tree: if ('LUNIndex' in kwargs): if (kwargs['LUNIndex'] == lun.find('LUNIndex').text): return lun elif ('LUNName' in kwargs): if (kwargs['LUNName'] == lun.find('LUNName').text): return lun elif ('LUNNAA' in kwargs): if (kwargs['LUNNAA'] == lun.find('LUNNAA').text): return lun return None @_connection_checker def get_one_lun_info(self, lunID): """Execute get_one_lun_info API.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_portal_setting.cgi?', func='extra_get', lun_info='1', lunID=lunID, sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) else: return res_details @_connection_checker def get_snapshot_info(self, **kwargs): """Execute get_snapshot_info API.""" res_details = self._get_res_details( '/cgi-bin/disk/snapshot.cgi?', func='extra_get', LUNIndex=kwargs['lun_index'], snapshot_list='1', snap_start='0', snap_count='100', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Unexpected response from QNAP API')) snapshot_list = root.find('SnapshotList') if snapshot_list is None: return None snapshot_tree = snapshot_list.findall('row') for snapshot in snapshot_tree: if (kwargs['snapshot_name'] == snapshot.find('snapshot_name').text): return snapshot return None @_connection_checker def create_snapshot_api(self, lun_id, snapshot_name): """Execute CGI to create snapshot from source lun.""" res_details = self._get_res_details( '/cgi-bin/disk/snapshot.cgi?', func='create_snapshot', lunID=lun_id, snapshot_name=snapshot_name, expire_min='0', vital='1', snapshot_type='0', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('create snapshot failed')) @_connection_checker def delete_snapshot_api(self, snapshot_id): """Execute CGI to delete snapshot by snapshot id.""" res_details = self._get_res_details( '/cgi-bin/disk/snapshot.cgi?', func='del_snapshots', snapshotID=snapshot_id, sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) # snapshot not exist if root.find('result').text == '-206021': return # lun not exist if root.find('result').text == '-200005': return if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('delete snapshot %s failed') % snapshot_id) @_connection_checker def clone_snapshot(self, snapshot_id, new_lunname): """Execute CGI to clone snapshot as unmap lun.""" res_details = self._get_res_details( '/cgi-bin/disk/snapshot.cgi?', func='clone_qsnapshot', by_lun='1', snapshotID=snapshot_id, new_name=new_lunname, sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException(data=_( 'Clone lun %(lunname)s from snapshot %(snapshot_id)s failed' ) % {'lunname': new_lunname, 'snapshot_id': snapshot_id}) @_connection_checker def edit_lun(self, lun): """Extend lun.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_lun_setting.cgi?', func='edit_lun', LUNName=lun['LUNName'], LUNCapacity=lun['LUNCapacity'], LUNIndex=lun['LUNIndex'], LUNThinAllocate=lun['LUNThinAllocate'], LUNPath=lun['LUNPath'], LUNStatus=lun['LUNStatus'], sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Extend lun %s failed') % lun['LUNIndex']) @_connection_checker def get_all_iscsi_portal_setting(self): """Execute get_all_iscsi_portal_setting API.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_portal_setting.cgi?', func='get_all', sid=self.sid) return res_details @_connection_checker def get_ethernet_ip(self, **kwargs): """Execute get_ethernet_ip API.""" res_details = self._get_res_details( '/cgi-bin/sys/sysRequest.cgi?', subfunc='net_setting', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if ('type' in kwargs): return_ip = [] return_slot_id = [] ip_list = root.find('func').find('ownContent') ip_list_tree = ip_list.findall('IPInfo') for IP in ip_list_tree: ipv4 = (IP.find('IP').find('IP1').text + '.' + IP.find('IP').find('IP2').text + '.' + IP.find('IP').find('IP3').text + '.' + IP.find('IP').find('IP4').text) if ((kwargs['type'] == 'data') and (IP.find('isManagePort').text != '1') and (IP.find('status').text == '1')): return_slot_id.append(IP.find('interfaceSlotid').text) return_ip.append(ipv4) elif ((kwargs['type'] == 'manage') and (IP.find('isManagePort').text == '1') and (IP.find('status').text == '1')): return_ip.append(ipv4) elif ((kwargs['type'] == 'all') and (IP.find('status').text == '1')): return_ip.append(ipv4) return return_ip, return_slot_id @_connection_checker def get_target_info(self, target_index): """Get target info.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_portal_setting.cgi?', func='extra_get', targetInfo=1, targetIndex=target_index, sid=self.sid) root = ET.fromstring(res_details['data']) LOG.debug('ES get_target_info.authPassed: (%s)', root.find('authPassed').text) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Get target info failed')) target_list = root.find('targetInfo') target_tree = target_list.findall('row') for target in target_tree: if target_index == target.find('targetIndex').text: return target @_connection_checker def get_target_info_by_initiator(self, initiatorIQN): """Get target info by initiatorIQN.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_portal_setting.cgi?', func='extra_get', initiatorIQN=initiatorIQN, sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': return "", "" target = root.find('targetACL').find('row') targetIndex = target.find('targetIndex').text targetIQN = target.find('targetIQN').text return targetIndex, targetIQN class QnapAPIExecutorTS(QnapAPIExecutor): """Makes QNAP API calls for TS NAS.""" create_lun_lock = threading.Lock() delete_lun_lock = threading.Lock() lun_locks = {} @_connection_checker def create_lun(self, volume, pool_name, create_lun_name, reserve, ssd_cache, compress, dedup): """Create lun.""" self.create_lun_lock.acquire() lun_thin_allocate = '' if reserve: lun_thin_allocate = '1' else: lun_thin_allocate = '0' try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_lun_setting.cgi?', func='add_lun', FileIO='no', LUNThinAllocate=lun_thin_allocate, LUNName=create_lun_name, LUNPath=create_lun_name, poolID=pool_name, lv_ifssd='yes' if ssd_cache else 'no', LUNCapacity=volume['size'], LUNSectorSize='512', lv_threshold='80', sid=self.sid) finally: self.create_lun_lock.release() root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Create volume %s failed') % volume['display_name']) return root.find('result').text @_connection_checker def delete_lun(self, vol_id, *args, **kwargs): """Execute delete lun API.""" self.delete_lun_lock.acquire() try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_lun_setting.cgi?', func='remove_lun', run_background='1', ha_sync='1', LUNIndex=vol_id, sid=self.sid) finally: self.delete_lun_lock.release() data_set_is_busy = "-205041" root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) # dataset is busy, retry to delete if root.find('result').text == data_set_is_busy: return True if root.find('result').text < '0': msg = (_('Volume %s delete failed') % vol_id) raise exception.VolumeBackendAPIException(data=msg) return False @lockutils.synchronized('map_unmap_lun_ts') @_connection_checker def map_lun(self, lun_index, target_index): """Map lun to sepecific target.""" try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='add_lun', LUNIndex=lun_index, targetIndex=target_index, sid=self.sid) finally: pass root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException(data=_( "Map lun %(lun_index)s to target %(target_index)s failed") % {'lun_index': str(lun_index), 'target_index': str(target_index)}) return root.find('result').text @_connection_checker def disable_lun(self, lun_index, target_index): """Disable lun from sepecific target.""" try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='edit_lun', LUNIndex=lun_index, targetIndex=target_index, LUNEnable=0, sid=self.sid) finally: pass root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException(data=_( 'Disable lun %(lun_index)s from target %(target_index)s failed' ) % {'lun_index': lun_index, 'target_index': target_index}) @_connection_checker def unmap_lun(self, lun_index, target_index): """Unmap lun from sepecific target.""" try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='remove_lun', LUNIndex=lun_index, targetIndex=target_index, sid=self.sid) finally: pass # self.lun_locks[lun_index].release() root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException(data=_( 'Unmap lun %(lun_index)s from target %(target_index)s failed') % {'lun_index': lun_index, 'target_index': target_index}) @_connection_checker def remove_target_init(self, target_iqn, init_iqn): """Remove target acl.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='remove_init', targetIQN=target_iqn, initiatorIQN=init_iqn, ha_sync='1', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Remove target acl failed')) @_connection_checker def get_target_info(self, target_index): """Get nas target info.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_portal_setting.cgi?', func='extra_get', targetInfo=1, targetIndex=target_index, ha_sync='1', sid=self.sid) root = ET.fromstring(res_details['data']) LOG.debug('TS get_target_info.authPassed: (%s)', root.find('authPassed').text) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Get target info failed')) target_list = root.find('targetInfo') target_tree = target_list.findall('row') for target in target_tree: if target_index == target.find('targetIndex').text: return target @_connection_checker def get_ethernet_ip(self, **kwargs): """Execute get_ethernet_ip API.""" res_details = self._get_res_details( '/cgi-bin/sys/sysRequest.cgi?', subfunc='net_setting', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if ('type' in kwargs): return_ip = [] ip_list = root.find('func').find('ownContent') ip_list_tree = ip_list.findall('IPInfo') for IP in ip_list_tree: ipv4 = (IP.find('IP').find('IP1').text + '.' + IP.find('IP').find('IP2').text + '.' + IP.find('IP').find('IP3').text + '.' + IP.find('IP').find('IP4').text) if (IP.find('status').text == '1'): return_ip.append(ipv4) return return_ip, None @_connection_checker def get_snapshot_info(self, **kwargs): """Execute get_snapshot_info API.""" res_details = self._get_res_details( '/cgi-bin/disk/snapshot.cgi?', func='extra_get', LUNIndex=kwargs['lun_index'], smb_snapshot_list='1', smb_snapshot='1', snapshot_list='1', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Unexpected response from QNAP API')) snapshot_list = root.find('SnapshotList') if snapshot_list is None: return None snapshot_tree = snapshot_list.findall('row') for snapshot in snapshot_tree: if (kwargs['snapshot_name'] == snapshot.find('snapshot_name').text): return snapshot return None @lockutils.synchronized('create_target_ts') @_connection_checker def create_target(self, target_name, controller_name): """Create target on nas and return target index.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='add_target', targetName=target_name, targetAlias=target_name, bTargetDataDigest='0', bTargetHeaderDigest='0', bTargetClusterEnable='1', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Create target failed')) root = ET.fromstring(res_details['data']) targetIndex = root.find('result').text return targetIndex @_connection_checker def delete_target(self, target_index): """Delete target on nas.""" res_details = self._get_res_details( '/cgi-bin/disk/iscsi_target_setting.cgi?', func='remove_target', targetIndex=target_index, sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text != target_index: raise exception.VolumeBackendAPIException( data=_('Delete target failed')) class QnapAPIExecutorTES(QnapAPIExecutor): """Makes QNAP API calls for TES NAS.""" tes_create_lun_lock = threading.Lock() @_connection_checker def create_lun(self, volume, pool_name, create_lun_name, reserve, ssd_cache, compress, dedup): """Create lun.""" self.tes_create_lun_lock.acquire() lun_thin_allocate = '' if reserve: lun_thin_allocate = '1' else: lun_thin_allocate = '0' try: res_details = self._get_res_details( '/cgi-bin/disk/iscsi_lun_setting.cgi?', func='add_lun', FileIO='no', LUNThinAllocate=lun_thin_allocate, LUNName=create_lun_name, LUNPath=create_lun_name, poolID=pool_name, lv_ifssd='yes' if ssd_cache else 'no', compression='1' if compress else '0', dedup='sha256' if dedup else 'off', sync='disabled', LUNCapacity=volume['size'], lv_threshold='80', sid=self.sid) finally: self.tes_create_lun_lock.release() root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if root.find('result').text < '0': raise exception.VolumeBackendAPIException( data=_('Create volume %s failed') % volume['display_name']) return root.find('result').text @_connection_checker def get_ethernet_ip(self, **kwargs): """Execute get_ethernet_ip API.""" res_details = self._get_res_details( '/cgi-bin/sys/sysRequest.cgi?', subfunc='net_setting', sid=self.sid) root = ET.fromstring(res_details['data']) if root.find('authPassed').text == '0': raise exception.VolumeBackendAPIException( data=_('Session id expired')) if ('type' in kwargs): return_ip = [] ip_list = root.find('func').find('ownContent') ip_list_tree = ip_list.findall('IPInfo') for IP in ip_list_tree: ipv4 = (IP.find('IP').find('IP1').text + '.' + IP.find('IP').find('IP2').text + '.' + IP.find('IP').find('IP3').text + '.' + IP.find('IP').find('IP4').text) if (IP.find('status').text == '1'): return_ip.append(ipv4) return return_ip, None class Util(object): _dictCondRetriveFormCache = {} _dictCacheRetriveFormCache = {} _condRetriveFormCache = threading.Condition() @classmethod def retriveFormCache(cls, lockKey, func, keepTime=0): cond = None cls._condRetriveFormCache.acquire() try: if (lockKey not in cls._dictCondRetriveFormCache): cls._dictCondRetriveFormCache[lockKey] = threading.Condition() cond = cls._dictCondRetriveFormCache[lockKey] finally: cls._condRetriveFormCache.release() cond.acquire() try: if (lockKey not in cls._dictCacheRetriveFormCache): # store (startTime, result) in cache. result = func() cls._dictCacheRetriveFormCache[lockKey] = (time.time(), result) startTime, result = cls._dictCacheRetriveFormCache[lockKey] # check if the cache is time-out if ((time.time() - startTime) > keepTime): result = func() cls._dictCacheRetriveFormCache[lockKey] = (time.time(), result) return result finally: cond.release() @classmethod def retry(cls, func, retry=0, retryTime=30): if (retry == 0): retry = 9999 # max is 9999 times if (retryTime == 0): retryTime = 9999 # max is 9999 seconds startTime = time.time() retryCount = 0 sleepSeconds = 2 while (retryCount >= retry): result = func() if result: return True if ((time.time() - startTime) <= retryTime): return False # more than retry times eventlet.sleep(sleepSeconds) sleepSeconds = sleepSeconds + 2 retryCount = retryCount + 1 return False # more than retryTime ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/quobyte.py0000664000175000017500000010155700000000000021373 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Quobyte Inc. # Copyright (c) 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import fnmatch import os import shutil from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils try: import psutil except ImportError: psutil = None from cinder import compute from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import utils from cinder.volume import configuration from cinder.volume.drivers import remotefs as remotefs_drv VERSION = '1.1.13' LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('quobyte_volume_url', help=('Quobyte URL to the Quobyte volume using e.g. a DNS SRV' ' record (preferred) or a host list (alternatively) like' ' quobyte://, /')), cfg.StrOpt('quobyte_client_cfg', help=('Path to a Quobyte Client configuration file.')), cfg.BoolOpt('quobyte_sparsed_volumes', default=True, help=('Create volumes as sparse files which take no space.' ' If set to False, volume is created as regular file.')), cfg.BoolOpt('quobyte_qcow2_volumes', default=True, help=('Create volumes as QCOW2 files rather than raw files.')), cfg.StrOpt('quobyte_mount_point_base', default='$state_path/mnt', help=('Base dir containing the mount point' ' for the Quobyte volume.')), cfg.BoolOpt('quobyte_volume_from_snapshot_cache', default=False, help=('Create a cache of volumes from merged snapshots to ' 'speed up creation of multiple volumes from a single ' 'snapshot.')), cfg.BoolOpt('quobyte_overlay_volumes', default=False, help=('Create new volumes from the volume_from_snapshot_cache' ' by creating overlay files instead of full copies. This' ' speeds up the creation of volumes from this cache.' ' This feature requires the options' ' quobyte_qcow2_volumes and' ' quobyte_volume_from_snapshot_cache to be set to' ' True. If one of these is set to False this option is' ' ignored.')) ] CONF = cfg.CONF CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) @interface.volumedriver class QuobyteDriver(remotefs_drv.RemoteFSSnapDriverDistributed): """Cinder driver for Quobyte USP. Volumes are stored as files on the mounted Quobyte volume. The hypervisor will expose them as block devices. Unlike other similar drivers, this driver uses exactly one Quobyte volume because Quobyte USP is a distributed storage system. To add or remove capacity, administrators can add or remove storage servers to/from the volume. For different types of volumes e.g., SSD vs. rotating disks, use multiple backends in Cinder. Note: To be compliant with the inherited RemoteFSSnapDriver, Quobyte volumes are also referred to as shares. .. code-block:: none Version history: 1.0 - Initial driver. 1.1 - Adds optional insecure NAS settings 1.1.1 - Removes getfattr calls from driver 1.1.2 - Fixes a bug in the creation of cloned volumes 1.1.3 - Explicitely mounts Quobyte volumes w/o xattrs 1.1.4 - Fixes capability to configure redundancy in quobyte_volume_url 1.1.5 - Enables extension of volumes with snapshots 1.1.6 - Optimizes volume creation 1.1.7 - Support fuse subtype based Quobyte mount validation 1.1.8 - Adds optional snapshot merge caching 1.1.9 - Support for Qemu >= 2.10.0 1.1.10 - Adds overlay based volumes for snapshot merge caching 1.1.11 - NAS secure ownership & permissions are now False by default 1.1.12 - Ensure the currently configured volume url is always used 1.1.13 - Allow creating volumes from snapshots in state 'backing-up' """ driver_volume_type = 'quobyte' driver_prefix = 'quobyte' volume_backend_name = 'Quobyte' VERSION = VERSION # ThirdPartySystems wiki page CI_WIKI_NAME = "Quobyte_CI" # driver is subject to removal if CI is not fixed SUPPORTED = False QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME = "volume_from_snapshot_cache" def __init__(self, execute=processutils.execute, *args, **kwargs): super(QuobyteDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) # Used to manage snapshots which are currently attached to a VM. self._nova = None @staticmethod def get_driver_options(): return volume_opts def _create_regular_file(self, path, size): """Creates a regular file of given size in GiB using fallocate.""" self._fallocate_file(path, size) @coordination.synchronized('{self.driver_prefix}-{snapshot.id}') def _delete_snapshot(self, snapshot): cache_path = self._local_volume_from_snap_cache_path(snapshot) if os.access(cache_path, os.F_OK): self._remove_from_vol_cache( cache_path, ".parent-" + snapshot.id, snapshot.volume) super(QuobyteDriver, self)._delete_snapshot(snapshot) def _ensure_volume_from_snap_cache(self, mount_path): """This expects the Quobyte volume to be mounted & available""" cache_path = os.path.join(mount_path, self.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME) if not os.access(cache_path, os.F_OK): LOG.info("Volume from snapshot cache directory does not exist, " "creating the directory %(volcache)s", {'volcache': cache_path}) fileutils.ensure_tree(cache_path) if not (os.access(cache_path, os.R_OK) and os.access(cache_path, os.W_OK) and os.access(cache_path, os.X_OK)): msg = _("Insufficient permissions for Quobyte volume from " "snapshot cache directory at %(cpath)s. Please update " "permissions.") % {'cpath': cache_path} raise exception.VolumeDriverException(msg) LOG.debug("Quobyte volume from snapshot cache directory validated ok") def _fallocate_file(self, path, size): """Calls fallocate on the given path with the given size in GiB.""" self._execute('fallocate', '-l', '%sGiB' % size, path, run_as_root=self._execute_as_root) def _get_backing_chain_for_path(self, volume, path): raw_chain = super(QuobyteDriver, self)._get_backing_chain_for_path( volume, path) # NOTE(kaisers): if the last element resides in the cache snip it off, # as the RemoteFS driver cannot handle it. if len(raw_chain) and (self.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME in raw_chain[-1]['filename']): del raw_chain[-1] return raw_chain def _local_volume_from_snap_cache_path(self, snapshot): path_to_disk = os.path.join( self._local_volume_dir(snapshot.volume), self.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME, snapshot.id) return path_to_disk def _qemu_img_info_base(self, path, volume_name, basedir, force_share=True, run_as_root=False): # NOTE(kaisers): This uses a specialized backing file template in # order to allow for backing files in the volume_from_snapshot_cache. backing_file_template = remotefs_drv.BackingFileTemplate( "(#basedir/[0-9a-f]+/)?(" "#volname(.(tmp-snap-)?[0-9a-f-]+)?#valid_ext|" "%(cache)s/(tmp-snap-)?[0-9a-f-]+(.(child-|parent-)" "[0-9a-f-]+)?)$" % { 'cache': self.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME }) return super(QuobyteDriver, self)._qemu_img_info_base( path, volume_name, basedir, ext_bf_template=backing_file_template, force_share=True) def _remove_from_vol_cache(self, cache_file_path, ref_suffix, volume): """Removes a reference and possibly volume from the volume cache This method removes the ref_id reference (soft link) from the cache. If no other references exist the cached volume itself is removed, too. :param cache_file_path file path to the volume in the cache :param ref_suffix The id based suffix of the cache file reference :param volume The volume whose share defines the cache to address """ # NOTE(kaisers): As the cache_file_path may be a relative path we use # cache dir and file name to ensure absolute paths in all operations. cache_path = os.path.join(self._local_volume_dir(volume), self.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME) cache_file_name = os.path.basename(cache_file_path) # delete the reference LOG.debug("Deleting cache reference %(cfp)s%(rs)s", {"cfp": cache_file_path, "rs": ref_suffix}) fileutils.delete_if_exists(os.path.join(cache_path, cache_file_name + ref_suffix)) # If no other reference exists, remove the cache entry. for file in os.listdir(cache_path): if fnmatch.fnmatch(file, cache_file_name + ".*"): # found another reference file, keep cache entry LOG.debug("Cached volume %(file)s still has at least one " "reference: %(ref)s", {"file": cache_file_name, "ref": file}) return # No other reference found, remove cache entry LOG.debug("Removing cached volume %(cvol)s as no more references for " "this cached volume exist.", {"cvol": os.path.join(cache_path, cache_file_name)}) fileutils.delete_if_exists(os.path.join(cache_path, cache_file_name)) def _strip_qb_protocol(self, url): # Strip quobyte:// from the URL protocol = self.driver_volume_type + "://" if url.startswith(protocol): return url[len(protocol):] return url def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(QuobyteDriver, self).do_setup(context) self.set_nas_security_options(is_new_cinder_install=False) self.shares = {} # address : options self._nova = compute.API() self.base = self.configuration.quobyte_mount_point_base if self.configuration.quobyte_overlay_volumes: if not (self.configuration.quobyte_qcow2_volumes and self.configuration.quobyte_volume_from_snapshot_cache): self.configuration.quobyte_overlay_volumes = False LOG.warning("Configuration of quobyte_qcow2_volumes and " "quobyte_volume_from_snapshot_cache is " "incompatible with " "quobyte_overlay_volumes=True. " "quobyte_overlay_volumes " "setting will be ignored.") def check_for_setup_error(self): if psutil is None: msg = _("The python 'psutil' module is required by this driver.") LOG.error(msg) raise exception.VolumeDriverException(msg) if not self.configuration.quobyte_volume_url: msg = (_("There's no Quobyte volume configured (%s). Example:" " quobyte:///") % 'quobyte_volume_url') LOG.warning(msg) raise exception.VolumeDriverException(msg) # Check if mount.quobyte is installed try: self._execute('mount.quobyte', check_exit_code=False, run_as_root=False) except OSError as exc: if exc.errno == errno.ENOENT: raise exception.VolumeDriverException( 'mount.quobyte is not installed') else: raise def set_nas_security_options(self, is_new_cinder_install): self._execute_as_root = False LOG.debug("nas_secure_file_* settings are %(ops)s (ownership) and " "%(perm)s (permissions).", {'ops': self.configuration.nas_secure_file_operations, 'perm': self.configuration.nas_secure_file_permissions} ) if self.configuration.nas_secure_file_operations == 'auto': LOG.debug("Mapping 'auto' value to 'false' for" " nas_secure_file_operations.") self.configuration.nas_secure_file_operations = 'false' if self.configuration.nas_secure_file_permissions == 'auto': LOG.debug("Mapping 'auto' value to 'false' for" " nas_secure_file_permissions.") self.configuration.nas_secure_file_permissions = 'false' if self.configuration.nas_secure_file_operations == 'false': LOG.warning("The NAS file operations will be run as " "root, allowing root level access at the storage " "backend.") self._execute_as_root = True else: LOG.info("The NAS file operations will be run as" " non privileged user in secure mode. Please" " ensure your libvirtd settings have been configured" " accordingly (see section 'OpenStack' in the Quobyte" " Manual.") if self.configuration.nas_secure_file_permissions == 'false': LOG.warning("The NAS file permissions mode will be 666 " "(allowing other/world read & write access).") def _qemu_img_info(self, path, volume_name, force_share=True): return self._qemu_img_info_base( path, volume_name, self.configuration.quobyte_mount_point_base, force_share=True) @utils.synchronized('quobyte', external=False) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" return self._create_cloned_volume(volume, src_vref, src_vref.obj_context) @coordination.synchronized( '{self.driver_prefix}-{snapshot.id}-{volume.id}') def _create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. Snapshot must not be the active snapshot. (offline) """ LOG.debug('Creating volume %(vol)s from snapshot %(snap)s', {'vol': volume.id, 'snap': snapshot.id}) if snapshot.status not in ['available', 'backing-up']: msg = _('Snapshot status must be "available" or "backing-up" to ' 'clone. But is: %(status)s') % {'status': snapshot.status} raise exception.InvalidSnapshot(msg) self._ensure_shares_mounted() volume.provider_location = self._find_share(volume) self._copy_volume_from_snapshot(snapshot, volume, volume.size) return {'provider_location': volume.provider_location} @utils.synchronized('quobyte', external=False) def create_volume(self, volume): return super(QuobyteDriver, self).create_volume(volume) @utils.synchronized('quobyte', external=False) def create_volume_from_snapshot(self, volume, snapshot): return self._create_volume_from_snapshot(volume, snapshot) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): """Copy data from snapshot to destination volume. This is done with a qemu-img convert to raw/qcow2 from the snapshot qcow2. If the quobyte_volume_from_snapshot_cache is active the result is written into the cache and all volumes created from this snapshot id are created directly from the cache. """ LOG.debug("snapshot: %(snap)s, volume: %(vol)s, ", {'snap': snapshot.id, 'vol': volume.id, 'size': volume_size}) info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path) vol_path = self._local_volume_dir(snapshot.volume) forward_file = snap_info[snapshot.id] forward_path = os.path.join(vol_path, forward_file) self._ensure_shares_mounted() # Find the file which backs this file, which represents the point # when this snapshot was created. img_info = self._qemu_img_info(forward_path, snapshot.volume.name) path_to_snap_img = os.path.join(vol_path, img_info.backing_file) path_to_new_vol = self._local_path_volume(volume) path_to_cached_vol = self._local_volume_from_snap_cache_path(snapshot) LOG.debug("will copy from snapshot at %s", path_to_snap_img) if self.configuration.quobyte_qcow2_volumes: out_format = 'qcow2' else: out_format = 'raw' if not self.configuration.quobyte_volume_from_snapshot_cache: LOG.debug("Creating direct copy from snapshot") image_utils.convert_image(path_to_snap_img, path_to_new_vol, out_format, run_as_root=self._execute_as_root) else: # create the volume via volume cache if not os.access(path_to_cached_vol, os.F_OK): LOG.debug("Caching volume %(volpath)s from snapshot.", {'volpath': path_to_cached_vol}) image_utils.convert_image(path_to_snap_img, path_to_cached_vol, out_format, run_as_root=self._execute_as_root) if self.configuration.quobyte_overlay_volumes: # NOTE(kaisers): Create a parent symlink to track the # existence of the parent os.symlink(path_to_snap_img, path_to_cached_vol + '.parent-' + snapshot.id) if self.configuration.quobyte_overlay_volumes: self._create_overlay_volume_from_snapshot(volume, snapshot, volume_size, out_format) else: # Copy volume from cache LOG.debug("Copying volume %(volpath)s from cache", {'volpath': path_to_new_vol}) shutil.copyfile(path_to_cached_vol, path_to_new_vol) # Note(kaisers): As writes beyond EOF are sequentialized with # FUSE we call fallocate here to optimize performance: self._fallocate_file(path_to_new_vol, volume_size) self._set_rw_permissions(path_to_new_vol) def _create_overlay_volume_from_snapshot(self, volume, snapshot, volume_size, out_format): """Creates an overlay volume based on a parent in the cache Besides the overlay volume this also creates a softlink in the cache that links to the child volume file of the cached volume. This can be used to track the cached volumes child volume and marks the fact that this child still exists. The softlink is deleted when the child is deleted. """ rel_path = os.path.join( self.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME, snapshot.id) command = ['qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s,backing_fmt=%s' % (rel_path, out_format), self._local_path_volume(volume), "%dG" % volume_size] self._execute(*command, run_as_root=self._execute_as_root) os.symlink(self._local_path_volume(volume), self._local_volume_from_snap_cache_path(snapshot) + '.child-' + volume.id) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def delete_volume(self, volume): """Deletes a logical volume.""" if not volume.provider_location: LOG.warning('Volume %s does not have provider_location ' 'specified, skipping', volume.name) return self._ensure_share_mounted(volume.provider_location) volume_dir = self._local_volume_dir(volume) active_image = self.get_active_image_from_info(volume) mounted_path = os.path.join(volume_dir, active_image) if os.access(self.local_path(volume), os.F_OK): img_info = self._qemu_img_info(self.local_path(volume), volume.name) if (img_info.backing_file and (self.QUOBYTE_VOLUME_SNAP_CACHE_DIR_NAME in img_info.backing_file)): # This is an overlay volume, call cache cleanup self._remove_from_vol_cache(img_info.backing_file, ".child-" + volume.id, volume) self._execute('rm', '-f', mounted_path, run_as_root=self._execute_as_root) # If an exception (e.g. timeout) occurred during delete_snapshot, the # base volume may linger around, so just delete it if it exists base_volume_path = self._local_path_volume(volume) fileutils.delete_if_exists(base_volume_path) info_path = self._local_path_volume_info(volume) fileutils.delete_if_exists(info_path) @utils.synchronized('quobyte', external=False) def create_snapshot(self, snapshot): """Apply locking to the create snapshot operation.""" return self._create_snapshot(snapshot) @utils.synchronized('quobyte', external=False) def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" # Find active qcow2 file active_file = self.get_active_image_from_info(volume) path = '%s/%s/%s' % (self.configuration.quobyte_mount_point_base, self._get_hash_str(volume.provider_location), active_file) data = {'export': volume.provider_location, 'name': active_file} if volume.provider_location in self.shares: data['options'] = self.shares[volume.provider_location] # Test file for raw vs. qcow2 format info = self._qemu_img_info(path, volume.name) data['format'] = info.file_format if data['format'] not in ['raw', 'qcow2']: msg = _('%s must be a valid raw or qcow2 image.') % path raise exception.InvalidVolume(msg) return { 'driver_volume_type': 'quobyte', 'data': data, 'mount_point_base': self.configuration.quobyte_mount_point_base } @utils.synchronized('quobyte', external=False) def copy_volume_to_image(self, context, volume, image_service, image_meta): self._copy_volume_to_image(context, volume, image_service, image_meta) @utils.synchronized('quobyte', external=False) def extend_volume(self, volume, size_gb): if self._is_volume_attached(volume): # NOTE(kaisers): no attached extensions until #1870367 is fixed msg = (_("Cannot extend volume %s while it is attached.") % volume['id']) raise exception.ExtendVolumeError(msg) volume_path = self.local_path(volume) info = self._qemu_img_info(volume_path, volume.name) backing_fmt = info.file_format if backing_fmt not in ['raw', 'qcow2']: msg = _('Unrecognized backing format: %s') raise exception.InvalidVolume(msg % backing_fmt) # qemu-img can resize both raw and qcow2 files active_path = os.path.join( self._get_mount_point_for_share(volume.provider_location), self.get_active_image_from_info(volume)) image_utils.resize_image(active_path, size_gb) def _do_create_volume(self, volume): """Create a volume on given Quobyte volume. :param volume: volume reference """ volume_path = self.local_path(volume) volume_size = volume.size if self.configuration.quobyte_qcow2_volumes: self._create_qcow2_file(volume_path, volume_size) else: if self.configuration.quobyte_sparsed_volumes: self._create_sparsed_file(volume_path, volume_size) else: self._create_regular_file(volume_path, volume_size) self._set_rw_permissions_for_all(volume_path) def _load_shares_config(self, share_file=None): """Put 'quobyte_volume_url' into the 'shares' list. :param share_file: string, Not used because the user has to specify the Quobyte volume directly. """ self.shares = {} url = self._strip_qb_protocol(self.configuration.quobyte_volume_url) self.shares[url] = None # None = No extra mount options. LOG.debug("Quobyte Volume URL set to: %s", self.shares) def _ensure_share_mounted(self, quobyte_volume): """Mount Quobyte volume. :param quobyte_volume: string """ mount_path = self._get_mount_point_for_share(quobyte_volume) # NOTE(kaisers): Always use the currently configured volume url self._mount_quobyte( self._strip_qb_protocol(self.configuration.quobyte_volume_url), mount_path, ensure=True) @utils.synchronized('quobyte_ensure', external=False) def _ensure_shares_mounted(self): """Mount the Quobyte volume. Used for example by RemoteFsDriver._update_volume_stats """ self._mounted_shares = [] self._load_shares_config() for share in self.shares: try: self._ensure_share_mounted(share) self._mounted_shares.append(share) except Exception as exc: LOG.warning('Exception during mounting %s', exc) LOG.debug('Available shares %s', self._mounted_shares) def _find_share(self, volume): """Returns the mounted Quobyte volume. Multiple shares are not supported because the virtualization of multiple storage devices is taken care of at the level of Quobyte USP. For different types of volumes e.g., SSD vs. rotating disks, use multiple backends in Cinder. :param volume: the volume to be created. """ if not self._mounted_shares: raise exception.NotFound() assert len(self._mounted_shares) == 1, 'There must be exactly' \ ' one Quobyte volume.' target_volume = self._mounted_shares[0] LOG.debug('Selected %s as target Quobyte volume.', target_volume) return target_volume def _get_mount_point_for_share(self, quobyte_volume): """Return mount point for Quobyte volume. :param quobyte_volume: Example: storage-host/openstack-volumes """ return os.path.join(self.configuration.quobyte_mount_point_base, self._get_hash_str(quobyte_volume)) # open() wrapper to mock reading from /proc/mount. @staticmethod def read_proc_mount(): # pragma: no cover return open('/proc/mounts') def _mount_quobyte(self, quobyte_volume, mount_path, ensure=False): """Mount Quobyte volume to mount path.""" mounted = False with QuobyteDriver.read_proc_mount() as proc_mount: for line in proc_mount: if line.split()[1] == mount_path: mounted = True break if mounted: try: os.stat(mount_path) except OSError as exc: if exc.errno == errno.ENOTCONN: mounted = False try: LOG.info('Fixing previous mount %s which was not' ' unmounted correctly.', mount_path) self._execute('umount.quobyte', mount_path, run_as_root=self._execute_as_root) except processutils.ProcessExecutionError as exc: LOG.warning("Failed to unmount previous mount: " "%s", exc) else: # TODO(quobyte): Extend exc analysis in here? LOG.warning("Unknown error occurred while checking " "mount point: %s Trying to continue.", exc) if not mounted: if not os.path.isdir(mount_path): fileutils.ensure_tree(mount_path) command = ['mount.quobyte', '--disable-xattrs', quobyte_volume, mount_path] if self.configuration.quobyte_client_cfg: command.extend(['-c', self.configuration.quobyte_client_cfg]) try: LOG.info('Mounting volume: %s ...', quobyte_volume) self._execute(*command, run_as_root=self._execute_as_root) LOG.info('Mounting volume: %s succeeded', quobyte_volume) mounted = True except processutils.ProcessExecutionError as exc: if ensure and 'already mounted' in exc.stderr: LOG.warning("%s is already mounted", quobyte_volume) mounted = True else: raise if mounted: self._validate_volume(mount_path) if self.configuration.quobyte_volume_from_snapshot_cache: self._ensure_volume_from_snap_cache(mount_path) def _validate_volume(self, mount_path): """Runs a number of tests on the expect Quobyte mount""" partitions = psutil.disk_partitions(all=True) for p in partitions: if mount_path == p.mountpoint: if (p.device.startswith("quobyte@") or (p.fstype == "fuse.quobyte")): try: statresult = os.stat(mount_path) if statresult.st_size == 0: # client looks healthy if not os.access(mount_path, os.W_OK | os.X_OK): LOG.warning("Volume is not writable. " "Please broaden the file" " permissions." " Mount: %s", mount_path) return # we're happy here else: msg = (_("The mount %(mount_path)s is not a " "valid Quobyte volume. Stale mount?") % {'mount_path': mount_path}) raise exception.VolumeDriverException(msg) except Exception as exc: msg = (_("The mount %(mount_path)s is not a valid" " Quobyte volume. Error: %(exc)s . " " Possibly a Quobyte client crash?") % {'mount_path': mount_path, 'exc': exc}) raise exception.VolumeDriverException(msg) else: msg = (_("The mount %(mount_path)s is not a valid" " Quobyte volume according to partition list.") % {'mount_path': mount_path}) raise exception.VolumeDriverException(msg) msg = (_("No matching Quobyte mount entry for %(mount_path)s" " could be found for validation in partition list.") % {'mount_path': mount_path}) raise exception.VolumeDriverException(msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/rbd.py0000664000175000017500000034076600000000000020461 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2022 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RADOS Block Device Driver""" import binascii import errno import json import math import os import tempfile import typing from typing import Any, Optional, Union import urllib.parse from castellan import key_manager from eventlet import tpool from os_brick.initiator import linuxrbd from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import encodeutils from oslo_utils import excutils from oslo_utils import fileutils from oslo_utils import units try: import rados import rbd except ImportError: rados = None rbd = None from cinder.common import constants from cinder import context from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import objects from cinder.objects.backup import Backup from cinder.objects import fields from cinder.objects.snapshot import Snapshot from cinder.objects.volume import Volume from cinder.objects.volume_type import VolumeType from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume import qos_specs from cinder.volume import volume_utils LOG = logging.getLogger(__name__) RBD_OPTS = [ cfg.StrOpt('rbd_cluster_name', default='ceph', help='The name of ceph cluster'), cfg.StrOpt('rbd_pool', default='rbd', help='The RADOS pool where RBD volumes are stored'), cfg.StrOpt('rbd_user', help='The RADOS client name for accessing RBD volumes ' '- only set when using cephx authentication'), cfg.StrOpt('rbd_ceph_conf', default='', # default determined by librados help='Path to the ceph configuration file'), cfg.BoolOpt('rbd_flatten_volume_from_snapshot', default=False, help='Flatten volumes created from snapshots to remove ' 'dependency from volume to snapshot'), cfg.StrOpt('rbd_secret_uuid', help='The libvirt uuid of the secret for the rbd_user ' 'volumes. Defaults to the cluster FSID.'), cfg.IntOpt('rbd_max_clone_depth', default=5, help='Maximum number of nested volume clones that are ' 'taken before a flatten occurs. Set to 0 to disable ' 'cloning. Note: lowering this value will not affect ' 'existing volumes whose clone depth exceeds the new ' 'value.'), cfg.IntOpt('rbd_store_chunk_size', default=4, help='Volumes will be chunked into objects of this size ' '(in megabytes).'), cfg.IntOpt('rados_connect_timeout', default=-1, help='Timeout value (in seconds) used when connecting to ' 'ceph cluster. If value < 0, no timeout is set and ' 'default librados value is used.'), cfg.IntOpt('rados_connection_retries', default=3, help='Number of retries if connection to ceph cluster ' 'failed.'), cfg.IntOpt('rados_connection_interval', default=5, help='Interval value (in seconds) between connection ' 'retries to ceph cluster.'), cfg.IntOpt('replication_connect_timeout', default=5, help='Timeout value (in seconds) used when connecting to ' 'ceph cluster to do a demotion/promotion of volumes. ' 'If value < 0, no timeout is set and default librados ' 'value is used.'), cfg.BoolOpt('report_dynamic_total_capacity', default=True, help='Set to True for driver to report total capacity as a ' 'dynamic value (used + current free) and to False to ' 'report a static value (quota max bytes if defined and ' 'global size of cluster if not).'), cfg.BoolOpt('rbd_exclusive_cinder_pool', default=True, help="Set to False if the pool is shared with other usages. " "On exclusive use driver won't query images' provisioned " "size as they will match the value calculated by the " "Cinder core code for allocated_capacity_gb. This " "reduces the load on the Ceph cluster as well as on the " "volume service. On non exclusive use driver will query " "the Ceph cluster for per image used disk, this is an " "intensive operation having an independent request for " "each image."), cfg.BoolOpt('enable_deferred_deletion', default=False, help='Enable deferred deletion. Upon deletion, volumes are ' 'tagged for deletion but will only be removed ' 'asynchronously at a later time.'), cfg.IntOpt('deferred_deletion_delay', default=0, help='Time delay in seconds before a volume is eligible ' 'for permanent removal after being tagged for deferred ' 'deletion.'), cfg.IntOpt('deferred_deletion_purge_interval', default=60, help='Number of seconds between runs of the periodic task ' 'to purge volumes tagged for deletion.'), cfg.IntOpt('rbd_concurrent_flatten_operations', default=3, min=0, help='Number of flatten operations that will run ' 'concurrently on this volume service.') ] CONF = cfg.CONF CONF.register_opts(RBD_OPTS, group=configuration.SHARED_CONF_GROUP) EXTRA_SPECS_REPL_ENABLED = "replication_enabled" EXTRA_SPECS_MULTIATTACH = "multiattach" QOS_KEY_MAP = { 'total_iops_sec': { 'ceph_key': 'rbd_qos_iops_limit', 'default': 0 }, 'read_iops_sec': { 'ceph_key': 'rbd_qos_read_iops_limit', 'default': 0 }, 'write_iops_sec': { 'ceph_key': 'rbd_qos_write_iops_limit', 'default': 0 }, 'total_bytes_sec': { 'ceph_key': 'rbd_qos_bps_limit', 'default': 0 }, 'read_bytes_sec': { 'ceph_key': 'rbd_qos_read_bps_limit', 'default': 0 }, 'write_bytes_sec': { 'ceph_key': 'rbd_qos_write_bps_limit', 'default': 0 }, 'total_iops_sec_max': { 'ceph_key': 'rbd_qos_bps_burst', 'default': 0 }, 'read_iops_sec_max': { 'ceph_key': 'rbd_qos_read_iops_burst', 'default': 0 }, 'write_iops_sec_max': { 'ceph_key': 'rbd_qos_write_iops_burst', 'default': 0 }, 'total_bytes_sec_max': { 'ceph_key': 'rbd_qos_bps_burst', 'default': 0 }, 'read_bytes_sec_max': { 'ceph_key': 'rbd_qos_read_bps_burst', 'default': 0 }, 'write_bytes_sec_max': { 'ceph_key': 'rbd_qos_write_bps_burst', 'default': 0 }} CEPH_QOS_SUPPORTED_VERSION = 15 # RBD class RBDDriverException(exception.VolumeDriverException): message = _("RBD Cinder driver failure: %(reason)s") class RBDVolumeProxy(object): """Context manager for dealing with an existing RBD volume. This handles connecting to rados and opening an ioctx automatically, and otherwise acts like a librbd Image object. Also this may reuse an external connection (client and ioctx args), but note, that caller will be responsible for opening/closing connection. Also `pool`, `remote`, `timeout` args will be ignored in that case. The underlying librados client and ioctx can be accessed as the attributes 'client' and 'ioctx'. """ def __init__(self, driver: 'RBDDriver', name: str, pool: Optional[str] = None, snapshot: Optional[str] = None, read_only: bool = False, remote: Optional[dict[str, str]] = None, timeout: Optional[int] = None, client: 'rados.Rados' = None, ioctx: 'rados.Ioctx' = None): self._close_conn = not (client and ioctx) rados_client, rados_ioctx = driver._connect_to_rados( pool, remote, timeout) if self._close_conn else (client, ioctx) try: self.volume = driver.rbd.Image(rados_ioctx, name, snapshot=snapshot, read_only=read_only) self.volume = tpool.Proxy(self.volume) except driver.rbd.Error: if self._close_conn: driver._disconnect_from_rados(rados_client, rados_ioctx) raise self.driver = driver self.client = rados_client self.ioctx = rados_ioctx def __enter__(self) -> 'RBDVolumeProxy': return self def __exit__(self, type_: Optional[Any], value: Optional[Any], traceback: Optional[Any]) -> None: try: self.volume.close() finally: if self._close_conn: self.driver._disconnect_from_rados(self.client, self.ioctx) def __getattr__(self, attrib: str): return getattr(self.volume, attrib) class RADOSClient(object): """Context manager to simplify error handling for connecting to ceph.""" def __init__(self, driver: 'RBDDriver', pool: Optional[str] = None) -> None: self.driver = driver self.cluster, self.ioctx = driver._connect_to_rados(pool) def __enter__(self) -> 'RADOSClient': return self def __exit__(self, type_, value, traceback) -> None: self.driver._disconnect_from_rados(self.cluster, self.ioctx) @property def features(self) -> int: features = self.cluster.conf_get('rbd_default_features') if ((features is None) or (int(features) == 0)): features = self.driver.RBD_FEATURE_LAYERING return int(features) @interface.volumedriver class RBDDriver(driver.CloneableImageVD, driver.MigrateVD, driver.ManageableVD, driver.ManageableSnapshotsVD, driver.BaseVD): """Implements RADOS block device (RBD) volume commands. Version history: .. code-block:: none 1.3.0 - Added QoS Support """ VERSION = '1.3.0' # ThirdPartySystems wiki page CI_WIKI_NAME = "Cinder_Jenkins" SUPPORTS_ACTIVE_ACTIVE = True SYSCONFDIR = '/etc/ceph/' RBD_FEATURE_LAYERING = 1 RBD_FEATURE_EXCLUSIVE_LOCK = 4 RBD_FEATURE_OBJECT_MAP = 8 RBD_FEATURE_FAST_DIFF = 16 RBD_FEATURE_JOURNALING = 64 STORAGE_PROTOCOL = constants.CEPH def __init__(self, active_backend_id: Optional[str] = None, *args, **kwargs) -> None: super(RBDDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(RBD_OPTS) self._stats: dict[str, Union[str, bool]] = {} # allow overrides for testing self.rados = kwargs.get('rados', rados) self.rbd = kwargs.get('rbd', rbd) # All string args used with librbd must be None or utf-8 otherwise # librbd will break. for attr in ['rbd_cluster_name', 'rbd_user', 'rbd_ceph_conf', 'rbd_pool']: val = getattr(self.configuration, attr) if val is not None: setattr(self.configuration, attr, utils.convert_str(val)) self._backend_name = (self.configuration.volume_backend_name or self.__class__.__name__) self._active_backend_id: Optional[str] = active_backend_id self._active_config: dict[str, str] = {} self._is_replication_enabled = False self._replication_targets: list = [] self._target_names: list[str] = [] self._clone_v2_api_checked: bool = False if self.rbd is not None: self.RBD_FEATURE_LAYERING = self.rbd.RBD_FEATURE_LAYERING self.RBD_FEATURE_EXCLUSIVE_LOCK = \ self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK self.RBD_FEATURE_OBJECT_MAP = self.rbd.RBD_FEATURE_OBJECT_MAP self.RBD_FEATURE_FAST_DIFF = self.rbd.RBD_FEATURE_FAST_DIFF self.RBD_FEATURE_JOURNALING = self.rbd.RBD_FEATURE_JOURNALING self.MULTIATTACH_EXCLUSIONS = ( self.RBD_FEATURE_JOURNALING | self.RBD_FEATURE_FAST_DIFF | self.RBD_FEATURE_OBJECT_MAP | self.RBD_FEATURE_EXCLUSIVE_LOCK) self.keyring_data: Optional[str] = None self._set_keyring_attributes() self._semaphore = utils.semaphore_factory( limit=self.configuration.rbd_concurrent_flatten_operations, concurrent_processes=1) def _set_keyring_attributes(self) -> None: # The rbd_keyring_conf option is not available for OpenStack usage # for security reasons (OSSN-0085) and in OpenStack we use # rbd_secret_uuid or make sure that the keyring files are present on # the hosts (where os-brick will look for them). # For cinderlib usage this option is necessary (no security issue, as # in those cases the contents of the connection are not available to # users). By using getattr Oslo-conf won't read the option from the # file even if it's there (because we have removed the conf option # definition), but cinderlib will find it because it sets the option # directly as an attribute. self.keyring_file: Optional[str] = getattr(self.configuration, 'rbd_keyring_conf', None) self.keyring_data = None try: if self.keyring_file and os.path.isfile(self.keyring_file): with open(self.keyring_file, 'r') as k_file: self.keyring_data = k_file.read() except IOError: LOG.debug('Cannot read RBD keyring file: %s.', self.keyring_file) @classmethod def get_driver_options(cls) -> list: additional_opts = cls._get_oslo_driver_opts( 'replication_device', 'reserved_percentage', 'max_over_subscription_ratio', 'volume_dd_blocksize') return RBD_OPTS + additional_opts def _show_msg_check_clone_v2_api(self, volume_name: str) -> None: if not self._clone_v2_api_checked: self._clone_v2_api_checked = True with RBDVolumeProxy(self, volume_name, read_only=True) as volume: try: enabled = (volume.volume.op_features() & self.rbd.RBD_OPERATION_FEATURE_CLONE_PARENT) except Exception: enabled = False if enabled: LOG.info('Using v2 Clone API') else: LOG.warning('Not using v2 clone API, please upgrade to' ' mimic+ and set the OSD minimum client' ' compat version to mimic for better' ' performance, fewer deletion issues') def _get_target_config(self, target_id: Optional[str]) -> dict[str, str]: """Get a replication target from known replication targets.""" for target in self._replication_targets: if target['name'] == target_id: return target if not target_id or target_id == 'default': return { 'name': self.configuration.rbd_cluster_name, 'conf': self.configuration.rbd_ceph_conf, 'user': self.configuration.rbd_user, 'secret_uuid': self.configuration.rbd_secret_uuid } raise exception.InvalidReplicationTarget( reason=_('RBD: Unknown failover target host %s.') % target_id) def do_setup(self, context: context.RequestContext) -> None: """Performs initialization steps that could raise exceptions.""" self._do_setup_replication() self._active_config = self._get_target_config(self._active_backend_id) self._set_default_secret_uuid() def _set_default_secret_uuid(self): # Set secret_uuid to the cluster FSID if missing, should only happen # with the primary/default configuration if not self._active_config['secret_uuid']: # self._active_config must be set before this call fsid = self._get_fsid() self._active_config['secret_uuid'] = fsid LOG.info('Secret UUID defaulting to cluster FSID: %s', fsid) self.configuration.set_default('rbd_secret_uuid', fsid) def _do_setup_replication(self) -> None: replication_devices = self.configuration.safe_get( 'replication_device') if replication_devices: self._parse_replication_configs(replication_devices) self._is_replication_enabled = True self._target_names.append('default') def _parse_replication_configs(self, replication_devices: list[dict]) -> None: for replication_device in replication_devices: if 'backend_id' not in replication_device: msg = _('Missing backend_id in replication_device ' 'configuration.') raise exception.InvalidConfigurationValue(msg) name = replication_device['backend_id'] conf = replication_device.get('conf', self.SYSCONFDIR + name + '.conf') user = replication_device.get( 'user', self.configuration.rbd_user or 'cinder') secret_uuid = replication_device.get( 'secret_uuid', self.configuration.rbd_secret_uuid) # Pool has to be the same in all clusters replication_target = {'name': name, 'conf': utils.convert_str(conf), 'user': utils.convert_str(user), 'secret_uuid': secret_uuid} LOG.info('Adding replication target: %s.', name) self._replication_targets.append(replication_target) self._target_names.append(name) def _get_config_tuple( self, remote: Optional[dict[str, str]] = None) \ -> tuple[Optional[str], Optional[str], Optional[str], Optional[str]]: if not remote: remote = self._active_config return (remote.get('name'), remote.get('conf'), remote.get('user'), remote.get('secret_uuid', None)) def _trash_purge(self) -> None: LOG.info("Purging trash for backend '%s'", self._backend_name) def _err(vol_name: str, backend_name: str) -> None: LOG.exception("Error deleting %s from trash backend '%s'", vol_name, backend_name) with RADOSClient(self) as client: for vol in self.RBDProxy().trash_list(client.ioctx): try: self.RBDProxy().trash_remove(client.ioctx, vol.get('id')) except OSError as e: # NOTE(arne_wiebalck): trash_remove raises EPERM in case # the volume's deferral time has not expired yet, so we # want to explicitly handle this "normal" situation. # All other exceptions, e.g. ImageBusy, are not re-raised # so that the periodic purge retries on the next iteration # and leaves ERRORs in the logs in case the deletion fails # repeatedly. if (e.errno == errno.EPERM): LOG.debug("%s has not expired yet on backend '%s'", vol.get('name'), self._backend_name) else: _err(vol.get('name'), self._backend_name) except Exception: _err(vol.get('name'), self._backend_name) else: LOG.info("Deleted %s from trash for backend '%s'", vol.get('name'), self._backend_name) def _start_periodic_tasks(self) -> None: if self.configuration.enable_deferred_deletion: LOG.info("Starting periodic trash purge for backend '%s'", self._backend_name) deferred_deletion_ptask = loopingcall.FixedIntervalLoopingCall( self._trash_purge) deferred_deletion_ptask.start( interval=self.configuration.deferred_deletion_purge_interval) def check_for_setup_error(self) -> None: """Returns an error if prerequisites aren't met.""" if rados is None: msg = _('rados and rbd python libraries not found') raise exception.VolumeBackendAPIException(data=msg) for attr in ['rbd_cluster_name', 'rbd_pool']: val = getattr(self.configuration, attr) if not val: raise exception.InvalidConfigurationValue(option=attr, value=val) # NOTE: Checking connection to ceph # RADOSClient __init__ method invokes _connect_to_rados # so no need to check for self.rados.Error here. with RADOSClient(self): pass # NOTE(arne_wiebalck): If deferred deletion is enabled, check if the # local Ceph client has support for the trash API. if self.configuration.enable_deferred_deletion: if not hasattr(self.RBDProxy(), 'trash_list'): msg = _("Deferred deletion is enabled, but the local Ceph " "client has no support for the trash API. Support " "for this feature started with v12.2.0 Luminous.") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) # If the keyring is defined (cinderlib usage), then the contents are # necessary. if self.keyring_file and not self.keyring_data: msg = _('No keyring data found') LOG.error(msg) raise exception.InvalidConfigurationValue( option='rbd_keyring_conf', value=self.keyring_file) self._start_periodic_tasks() def RBDProxy(self) -> tpool.Proxy: return tpool.Proxy(self.rbd.RBD()) def _ceph_args(self) -> list[str]: args = [] name, conf, user, secret_uuid = self._get_config_tuple() if user: args.extend(['--id', user]) if name: args.extend(['--cluster', name]) if conf: args.extend(['--conf', conf]) return args def _connect_to_rados(self, pool: Optional[str] = None, remote: Optional[dict] = None, timeout: Optional[int] = None) -> \ tuple['rados.Rados', 'rados.Ioctx']: @utils.retry(exception.VolumeBackendAPIException, self.configuration.rados_connection_interval, self.configuration.rados_connection_retries) def _do_conn(pool: Optional[str], remote: Optional[dict], timeout: Optional[int]) -> tuple['rados.Rados', 'rados.Ioctx']: name, conf, user, secret_uuid = self._get_config_tuple(remote) if pool is None: pool = self.configuration.rbd_pool if timeout is None: timeout = self.configuration.rados_connect_timeout LOG.debug("connecting to %(user)s@%(name)s (conf=%(conf)s, " "timeout=%(timeout)s).", {'user': user, 'name': name, 'conf': conf, 'timeout': timeout}) client = self.rados.Rados(rados_id=user, clustername=name, conffile=conf) client = tpool.Proxy(client) try: if timeout >= 0: t = str(timeout) client.conf_set('rados_osd_op_timeout', t) client.conf_set('rados_mon_op_timeout', t) client.conf_set('client_mount_timeout', t) client.connect() ioctx = client.open_ioctx(pool) return client, ioctx except self.rados.Error: msg = _("Error connecting to ceph cluster.") LOG.exception(msg) client.shutdown() raise exception.VolumeBackendAPIException(data=msg) return _do_conn(pool, remote, timeout) @staticmethod def _disconnect_from_rados(client: 'rados.Rados', ioctx: 'rados.Ioctx') -> None: # closing an ioctx cannot raise an exception ioctx.close() client.shutdown() def _supports_qos(self): return self.RBDProxy().version()[1] >= CEPH_QOS_SUPPORTED_VERSION @staticmethod def _get_backup_snaps(rbd_image) -> list: """Get list of any backup snapshots that exist on this volume. There should only ever be one but accept all since they need to be deleted before the volume can be. """ # NOTE(dosaboy): we do the import here otherwise we get import conflict # issues between the rbd driver and the ceph backup driver. These # issues only seem to occur when NOT using them together and are # triggered when the ceph backup driver imports the rbd volume driver. from cinder.backup.drivers import ceph return ceph.CephBackupDriver.get_backup_snaps(rbd_image) def _get_mon_addrs(self) -> tuple[list[str], list[str]]: args = ['ceph', 'mon', 'dump', '--format=json'] args.extend(self._ceph_args()) out, _ = self._execute(*args) lines = out.split('\n') if lines[0].startswith('dumped monmap epoch'): lines = lines[1:] monmap = json.loads('\n'.join(lines)) addrs: list[str] = [mon['addr'] for mon in monmap['mons']] hosts = [] ports = [] for addr in addrs: host_port = addr[:addr.rindex('/')] host, port = host_port.rsplit(':', 1) hosts.append(host.strip('[]')) ports.append(port) return hosts, ports def _get_usage_info(self) -> int: """Calculate provisioned volume space in GiB. Stats report should send provisioned size of volumes (snapshot must not be included) and not the physical size of those volumes. We must include all volumes, not only Cinder created volumes, because Cinder created volumes are reported by the Cinder core code as allocated_capacity_gb. """ total_provisioned = 0 with RADOSClient(self) as client: for t in self.RBDProxy().list(client.ioctx): try: with RBDVolumeProxy(self, t, read_only=True, client=client.cluster, ioctx=client.ioctx) as v: size = v.size() except (self.rbd.ImageNotFound, self.rbd.OSError): LOG.debug("Image %s is not found.", t) else: total_provisioned += size total_provisioned = math.ceil(float(total_provisioned) / units.Gi) return total_provisioned def _get_pool_stats(self) -> Union[tuple[str, str], tuple[float, float]]: """Gets pool free and total capacity in GiB. Calculate free and total capacity of the pool based on the pool's defined quota and pools stats. Returns a tuple with (free, total) where they are either unknown or a real number with a 2 digit precision. """ pool_name = self.configuration.rbd_pool with RADOSClient(self) as client: ret, df_outbuf, __ = client.cluster.mon_command( '{"prefix":"df", "format":"json"}', b'') if ret: LOG.warning('Unable to get rados pool stats.') return 'unknown', 'unknown' ret, quota_outbuf, __ = client.cluster.mon_command( '{"prefix":"osd pool get-quota", "pool": "%s",' ' "format":"json"}' % pool_name, b'') if ret: LOG.warning('Unable to get rados pool quotas.') return 'unknown', 'unknown' df_outbuf = encodeutils.safe_decode(df_outbuf) df_data = json.loads(df_outbuf) pool_stats = [pool for pool in df_data['pools'] if pool['name'] == pool_name][0]['stats'] total_capacity: float free_capacity: float # In Nautilus bytes_used was renamed to stored bytes_used = pool_stats.get('stored', pool_stats['bytes_used']) quota_outbuf = encodeutils.safe_decode(quota_outbuf) bytes_quota = json.loads(quota_outbuf)['quota_max_bytes'] # With quota the total is the quota limit and free is quota - used if bytes_quota: total_capacity = bytes_quota free_capacity = max(min(total_capacity - bytes_used, pool_stats['max_avail']), 0) # Without quota free is pools max available and total is global size else: total_capacity = df_data['stats']['total_bytes'] free_capacity = pool_stats['max_avail'] # If we want dynamic total capacity (default behavior) if self.configuration.safe_get('report_dynamic_total_capacity'): total_capacity = free_capacity + bytes_used free_capacity = round((float(free_capacity) / units.Gi), 2) total_capacity = round((float(total_capacity) / units.Gi), 2) return free_capacity, total_capacity def _update_volume_stats(self) -> None: location_info = '%s:%s:%s:%s:%s' % ( self.configuration.rbd_cluster_name, self.configuration.rbd_ceph_conf, self._get_fsid(), self.configuration.rbd_user, self.configuration.rbd_pool) stats = { 'vendor_name': 'Open Source', 'driver_version': self.VERSION, 'storage_protocol': self.STORAGE_PROTOCOL, 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'reserved_percentage': ( self.configuration.safe_get('reserved_percentage')), 'multiattach': True, 'thin_provisioning_support': True, 'max_over_subscription_ratio': ( self.configuration.safe_get('max_over_subscription_ratio')), 'location_info': location_info, 'backend_state': 'down', 'qos_support': self._supports_qos(), } backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = backend_name or 'RBD' stats['replication_enabled'] = self._is_replication_enabled if self._is_replication_enabled: stats['replication_targets'] = self._target_names try: free_capacity, total_capacity = self._get_pool_stats() stats['free_capacity_gb'] = free_capacity stats['total_capacity_gb'] = total_capacity # For exclusive pools let scheduler set provisioned_capacity_gb to # allocated_capacity_gb, and for non exclusive query the value. if not self.configuration.safe_get('rbd_exclusive_cinder_pool'): total_gbi = self._get_usage_info() stats['provisioned_capacity_gb'] = total_gbi stats['backend_state'] = 'up' except self.rados.Error: # just log and return unknown capacities and let scheduler set # provisioned_capacity_gb = allocated_capacity_gb LOG.exception('error refreshing volume stats') self._stats = stats def _get_clone_depth(self, client: 'rados.Rados', volume_name: str, depth: int = 0) -> int: """Returns the number of ancestral clones of the given volume.""" parent_volume = self.rbd.Image(client.ioctx, volume_name, read_only=True) try: _pool, parent, _snap = self._get_clone_info(parent_volume, volume_name) finally: parent_volume.close() if not parent: return depth return self._get_clone_depth(client, parent, depth + 1) def _extend_if_required(self, volume: Volume, src_vref: Volume) -> None: """Extends a volume if required In case src_vref size is smaller than the size if the requested new volume call _resize(). """ if volume.size != src_vref.size: LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to " "%(dst_size)d", {'dst_vol': volume.name, 'src_size': src_vref.size, 'dst_size': volume.size}) self._resize(volume) def _flatten_volume( self, image_name: str, client: RADOSClient) -> None: # Flatten destination volume try: with RBDVolumeProxy(self, image_name, client=client, ioctx=client.ioctx) as dest_volume: LOG.debug("flattening volume %s", image_name) dest_volume.flatten() except Exception as e: msg = (_("Failed to flatten volume %(volume)s with " "error: %(error)s.") % {'volume': image_name, 'error': e}) LOG.exception(msg) raise exception.VolumeBackendAPIException(data=msg) def create_cloned_volume( self, volume: Volume, src_vref: Volume) -> Optional[dict[str, Optional[str]]]: """Create a cloned volume from another volume. Since we are cloning from a volume and not a snapshot, we must first create a snapshot of the source volume. The user has the option to limit how long a volume's clone chain can be by setting rbd_max_clone_depth. If a clone is made of another clone and that clone has rbd_max_clone_depth clones behind it, the dest volume will be flattened. """ src_name = src_vref.name dest_name = volume.name clone_snap = "%s.clone_snap" % dest_name # Do full copy if requested if self.configuration.rbd_max_clone_depth <= 0: with RBDVolumeProxy(self, src_name, read_only=True) as vol: vol.copy(vol.ioctx, dest_name) self._extend_if_required(volume, src_vref) return None # Otherwise do COW clone. with RADOSClient(self) as client: src_volume = self.rbd.Image(client.ioctx, src_name) LOG.debug("creating snapshot='%s'", clone_snap) try: # Create new snapshot of source volume src_volume.create_snap(clone_snap) src_volume.protect_snap(clone_snap) # Now clone source volume snapshot LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to " "'%(dest)s'", {'src_vol': src_name, 'src_snap': clone_snap, 'dest': dest_name}) self.RBDProxy().clone(client.ioctx, src_name, clone_snap, client.ioctx, dest_name, features=client.features) except Exception as e: src_volume.unprotect_snap(clone_snap) src_volume.remove_snap(clone_snap) src_volume.close() msg = (_("Failed to clone '%(src_vol)s@%(src_snap)s' to " "'%(dest)s', error: %(error)s") % {'src_vol': src_name, 'src_snap': clone_snap, 'dest': dest_name, 'error': e}) LOG.exception(msg) raise exception.VolumeBackendAPIException(data=msg) depth = self._get_clone_depth(client, src_name) # If dest volume is a clone and rbd_max_clone_depth reached, # flatten the dest after cloning. Zero rbd_max_clone_depth means # volumes are always flattened. if (volume.use_quota and depth >= self.configuration.rbd_max_clone_depth): LOG.info("maximum clone depth (%d) has been reached - " "flattening dest volume", self.configuration.rbd_max_clone_depth) self._flatten_volume(dest_name, client) try: # remove temporary snap LOG.debug("remove temporary snap %s", clone_snap) src_volume.unprotect_snap(clone_snap) src_volume.remove_snap(clone_snap) except Exception as e: msg = (_("Failed to remove temporary snap " "%(snap_name)s, error: %(error)s") % {'snap_name': clone_snap, 'error': e}) LOG.exception(msg) src_volume.close() raise exception.VolumeBackendAPIException(data=msg) try: volume_update = self._setup_volume(volume) except Exception: self.RBDProxy().remove(client.ioctx, dest_name) src_volume.unprotect_snap(clone_snap) src_volume.remove_snap(clone_snap) err_msg = (_('Failed to enable image replication')) raise exception.ReplicationError(reason=err_msg, volume_id=volume.id) finally: src_volume.close() self._extend_if_required(volume, src_vref) LOG.debug("clone created successfully") return volume_update def _enable_replication(self, volume: Volume) -> dict[str, str]: """Enable replication for a volume. Returns required volume update. """ vol_name = volume.name with RBDVolumeProxy(self, vol_name) as image: had_exclusive_lock = (image.features() & self.RBD_FEATURE_EXCLUSIVE_LOCK) had_journaling = image.features() & self.RBD_FEATURE_JOURNALING if not had_exclusive_lock: image.update_features(self.RBD_FEATURE_EXCLUSIVE_LOCK, True) if not had_journaling: image.update_features(self.RBD_FEATURE_JOURNALING, True) image.mirror_image_enable() driver_data = self._dumps({ 'had_journaling': bool(had_journaling), 'had_exclusive_lock': bool(had_exclusive_lock) }) return {'replication_status': fields.ReplicationStatus.ENABLED, 'replication_driver_data': driver_data} def _enable_multiattach(self, volume: Volume) -> dict[str, str]: vol_name = volume.name with RBDVolumeProxy(self, vol_name) as image: image_features = image.features() change_features = self.MULTIATTACH_EXCLUSIONS & image_features if change_features != 0: image.update_features(change_features, False) return {'provider_location': self._dumps({'saved_features': image_features})} def _disable_multiattach(self, volume: Volume) -> dict[str, None]: vol_name = volume.name with RBDVolumeProxy(self, vol_name) as image: try: provider_location = json.loads(volume.provider_location) image_features = provider_location['saved_features'] change_features = self.MULTIATTACH_EXCLUSIONS & image_features if change_features != 0: image.update_features(change_features, True) except IndexError: msg = "Could not find saved image features." raise RBDDriverException(reason=msg) except self.rbd.InvalidArgument: msg = "Failed to restore image features." raise RBDDriverException(reason=msg) return {'provider_location': None} def _is_replicated_type(self, volume_type: VolumeType) -> bool: try: extra_specs = volume_type.extra_specs LOG.debug('extra_specs: %s', extra_specs) return extra_specs.get(EXTRA_SPECS_REPL_ENABLED) == " True" except Exception: LOG.debug('Unable to retrieve extra specs info') return False def _is_multiattach_type(self, volume_type: VolumeType) -> bool: try: extra_specs = volume_type.extra_specs LOG.debug('extra_specs: %s', extra_specs) return extra_specs.get(EXTRA_SPECS_MULTIATTACH) == " True" except Exception: LOG.debug('Unable to retrieve extra specs info') return False def _qos_specs_from_volume_type(self, volume_type): if not volume_type: return None qos_specs_id = volume_type.get('qos_specs_id') if qos_specs_id is not None: ctxt = context.get_admin_context() vol_qos_specs = qos_specs.get_qos_specs(ctxt, qos_specs_id) LOG.debug('qos_specs: %s', vol_qos_specs) if vol_qos_specs['consumer'] in ('back-end', 'both'): return vol_qos_specs['specs'] return None def _setup_volume( self, volume: Volume, volume_type: Optional[VolumeType] = None) -> dict[str, Optional[str]]: if volume_type: had_replication = self._is_replicated_type(volume.volume_type) had_multiattach = self._is_multiattach_type(volume.volume_type) else: had_replication = False had_multiattach = False volume_type = volume.volume_type specs = self._qos_specs_from_volume_type(volume_type) if specs: if self._supports_qos(): self.update_rbd_image_qos(volume, specs) else: LOG.warning("Backend QOS policies for ceph not " "supported prior to librbd version %s", CEPH_QOS_SUPPORTED_VERSION) want_replication = self._is_replicated_type(volume_type) want_multiattach = self._is_multiattach_type(volume_type) if want_replication and want_multiattach: msg = _('Replication and Multiattach are mutually exclusive.') raise RBDDriverException(reason=msg) volume_update: dict = dict() if want_replication: if had_multiattach: volume_update.update(self._disable_multiattach(volume)) if not had_replication: try: volume_update.update(self._enable_replication(volume)) except Exception: err_msg = (_('Failed to enable image replication')) raise exception.ReplicationError(reason=err_msg, volume_id=volume.id) elif had_replication: try: volume_update.update(self._disable_replication(volume)) except Exception: err_msg = (_('Failed to disable image replication')) raise exception.ReplicationError(reason=err_msg, volume_id=volume.id) elif self._is_replication_enabled: volume_update.update({'replication_status': fields.ReplicationStatus.DISABLED}) if want_multiattach: volume_update.update(self._enable_multiattach(volume)) elif had_multiattach: volume_update.update(self._disable_multiattach(volume)) return volume_update def _create_encrypted_volume(self, volume: Volume, context: context.RequestContext) -> None: """Create an encrypted volume. This works by creating an encrypted image locally, and then uploading it to the volume. """ encryption = volume_utils.check_encryption_provider(volume, context) # Fetch the key associated with the volume and decode the passphrase keymgr = key_manager.API(CONF) key = keymgr.get(context, encryption['encryption_key_id']) passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8') # create a file tmp_dir = volume_utils.image_conversion_dir() with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp_image: with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp_key: with open(tmp_key.name, 'w') as f: f.write(passphrase) cipher_spec = image_utils.decode_cipher(encryption['cipher'], encryption['key_size']) create_cmd = ( 'qemu-img', 'create', '-f', 'luks', '-o', 'cipher-alg=%(cipher_alg)s,' 'cipher-mode=%(cipher_mode)s,' 'ivgen-alg=%(ivgen_alg)s' % cipher_spec, '--object', 'secret,id=luks_sec,' 'format=raw,file=%(passfile)s' % {'passfile': tmp_key.name}, '-o', 'key-secret=luks_sec', tmp_image.name, '%sM' % (volume.size * 1024)) self._execute(*create_cmd) # Copy image into RBD chunk_size = self.configuration.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) cmd = ['rbd', 'import', '--dest-pool', self.configuration.rbd_pool, '--order', order, tmp_image.name, volume.name] cmd.extend(self._ceph_args()) self._execute(*cmd) def create_volume(self, volume: Volume) -> dict[str, Any]: """Creates a logical volume.""" if volume.encryption_key_id: self._create_encrypted_volume(volume, volume.obj_context) return {} size = int(volume.size) * units.Gi LOG.debug("creating volume '%s'", volume.name) chunk_size = self.configuration.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) vol_name = volume.name with RADOSClient(self) as client: self.RBDProxy().create(client.ioctx, vol_name, size, order, old_format=False, features=client.features) try: volume_update = self._setup_volume(volume) except Exception: with excutils.save_and_reraise_exception(): LOG.error('Error creating RBD image %(vol)s.', {'vol': vol_name}) self.RBDProxy().remove(client.ioctx, vol_name) return volume_update @utils.limit_operations def _do_flatten(self, volume_name: str, pool: str) -> None: LOG.debug('flattening %s/%s', pool, volume_name) try: with RBDVolumeProxy(self, volume_name, pool=pool) as vol: vol.flatten() LOG.debug('flattening of %s/%s has completed', pool, volume_name) except self.rbd.ImageNotFound: LOG.debug('image %s not found during flatten', volume_name) # do nothing def _flatten(self, pool: str, volume_name: str) -> None: image = pool + '/' + volume_name LOG.debug('Queueing %s for flattening', image) self._do_flatten(volume_name, pool) def _get_stripe_unit(self, ioctx: 'rados.Ioctx', volume_name: str) -> int: """Return the correct stripe unit for a cloned volume. A cloned volume must be created with a stripe unit at least as large as the source volume. We compute the desired stripe width from rbd_store_chunk_size and compare that to the incoming source volume's stripe width, selecting the larger to avoid error. """ default_stripe_unit = \ self.configuration.rbd_store_chunk_size * units.Mi image = self.rbd.Image(ioctx, volume_name, read_only=True) try: image_stripe_unit = image.stripe_unit() finally: image.close() return max(image_stripe_unit, default_stripe_unit) def _clone(self, volume: Volume, src_pool: str, src_image: str, src_snap: str) -> dict[str, Optional[str]]: LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s', dict(pool=src_pool, img=src_image, snap=src_snap, dst=volume.name)) vol_name = volume.name with RADOSClient(self, src_pool) as src_client: stripe_unit = self._get_stripe_unit(src_client.ioctx, src_image) order = int(math.log(stripe_unit, 2)) with RADOSClient(self) as dest_client: self.RBDProxy().clone(src_client.ioctx, src_image, src_snap, dest_client.ioctx, vol_name, features=src_client.features, order=order) try: volume_update = self._setup_volume(volume) except Exception: self.RBDProxy().remove(dest_client.ioctx, vol_name) err_msg = (_('Failed to enable image replication')) raise exception.ReplicationError(reason=err_msg, volume_id=volume.id) return volume_update or {} def _resize(self, volume: Volume, **kwargs: Any) -> None: size = kwargs.get('size', None) if not size: size = int(volume.size) * units.Gi with RBDVolumeProxy(self, volume.name) as vol: vol.resize(size) def _calculate_new_size(self, size_diff: int, volume_name: str) -> int: with RBDVolumeProxy(self, volume_name) as vol: current_size_bytes = vol.volume.size() size_diff_bytes = size_diff * units.Gi new_size_bytes = current_size_bytes + size_diff_bytes return new_size_bytes def create_volume_from_snapshot( self, volume: Volume, snapshot: Snapshot) -> dict: """Creates a volume from a snapshot.""" volume_update = self._clone(volume, self.configuration.rbd_pool, snapshot.volume_name, snapshot.name) # Don't flatten temporary volumes if (volume.use_quota and self.configuration.rbd_flatten_volume_from_snapshot): self._flatten(self.configuration.rbd_pool, volume.name) snap_vol_size = snapshot.volume_size # In case the destination size is bigger than the snapshot size # we should resize. In particular when the destination volume # is encrypted we should consider the encryption header size. # Because of this, we need to calculate the difference size to # provide the size that the user is expecting. # Otherwise if the destination volume size is equal to the # source volume size we don't perform a resize. if volume.size > snap_vol_size: new_size = None # In case the volume is encrypted we need to consider the # size of the encryption header when resizing the volume if volume.encryption_key_id: size_diff = volume.size - snap_vol_size new_size = self._calculate_new_size(size_diff, volume.name) self._resize(volume, size=new_size) self._show_msg_check_clone_v2_api(snapshot.volume_name) return volume_update def _delete_backup_snaps(self, rbd_image: 'rbd.Image') -> None: backup_snaps = self._get_backup_snaps(rbd_image) if backup_snaps: for snap in backup_snaps: rbd_image.remove_snap(snap['name']) else: LOG.debug("volume has no backup snaps") def _get_clone_info( self, volume: 'rbd.Image', volume_name: str, snap: Optional[str] = None) -> Union[tuple[str, str, str], tuple[None, None, None]]: """If volume is a clone, return its parent info. Returns a tuple of (pool, parent, snap). A snapshot may optionally be provided for the case where a cloned volume has been flattened but it's snapshot still depends on the parent. """ try: if snap: volume.set_snap(snap) pool, parent, parent_snap = tuple(volume.parent_info()) if snap: volume.set_snap(None) # Strip the tag off the end of the volume name since it will not be # in the snap name. if volume_name.endswith('.deleted'): volume_name = volume_name[:-len('.deleted')] # Now check the snap name matches. if parent_snap == "%s.clone_snap" % volume_name: return pool, parent, parent_snap except self.rbd.ImageNotFound: LOG.debug("Volume %s is not a clone.", volume_name) volume.set_snap(None) return (None, None, None) def _delete_clone_parent_refs(self, client: RADOSClient, parent_name: str, parent_snap: str) -> None: """Walk back up the clone chain and delete references. Deletes references i.e. deleted parent volumes and snapshots. """ parent_rbd = self.rbd.Image(client.ioctx, parent_name) parent_has_snaps = False try: # Check for grandparent _pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd, parent_name, parent_snap) LOG.debug("deleting parent snapshot %s", parent_snap) parent_rbd.unprotect_snap(parent_snap) parent_rbd.remove_snap(parent_snap) parent_has_snaps = bool(list(parent_rbd.list_snaps())) finally: parent_rbd.close() # If parent has been deleted in Cinder, delete the silent reference and # keep walking up the chain if it is itself a clone. if (not parent_has_snaps) and parent_name.endswith('.deleted'): LOG.debug("deleting parent %s", parent_name) if self.configuration.enable_deferred_deletion: LOG.debug("moving volume %s to trash", parent_name) delay = self.configuration.deferred_deletion_delay self.RBDProxy().trash_move(client.ioctx, parent_name, delay) else: self.RBDProxy().remove(client.ioctx, parent_name) # Now move up to grandparent if there is one if g_parent: g_parent_snap = typing.cast(str, g_parent_snap) self._delete_clone_parent_refs(client, g_parent, g_parent_snap) def _flatten_children(self, client_ioctx: 'rados.Ioctx', volume_name: str, snap_name: Optional[str] = None) -> None: with RBDVolumeProxy(self, volume_name, ioctx=client_ioctx) as rbd_image: if snap_name is not None: rbd_image.set_snap(snap_name) children_list = rbd_image.list_children() for (pool, child_name) in children_list: LOG.info('Image %(pool)s/%(image)s%(snap)s is dependent ' 'on the image %(volume_name)s.', {'pool': pool, 'image': child_name, 'volume_name': volume_name, 'snap': '@' + snap_name if snap_name else ''}) try: self._flatten(pool, child_name) except Exception as e: LOG.error(e) raise def _move_volume_to_trash(self, client_ioctx: 'rados.Ioctx', volume_name: str, delay: int) -> None: # trash_move() will succeed in some situations when a regular # remove() call will fail due to image dependencies LOG.debug("moving volume %s to trash", volume_name) try: self.RBDProxy().trash_move(client_ioctx, volume_name, delay) except self.rbd.ImageBusy: msg = _('ImageBusy error raised while trashing RBD ' 'volume.') LOG.warning(msg) raise exception.VolumeIsBusy(msg, volume_name=volume_name) def _try_remove_volume(self, client: 'rados', volume_name: str) -> bool: # Try a couple of times to delete the volume, rather than # stopping on the first error. # In the event of simultaneous Cinder delete operations, # this gives a window for other deletes of snapshots and images # to complete, freeing dependencies which allow this remove to # succeed. @utils.retry((self.rbd.ImageBusy, self.rbd.ImageHasSnapshots), self.configuration.rados_connection_interval, self.configuration.rados_connection_retries) def _do_try_remove_volume(self, client, volume_name: str) -> bool: try: LOG.debug('Trying to remove image %s', volume_name) self.RBDProxy().remove(client.ioctx, volume_name) return True except (self.rbd.ImageHasSnapshots, self.rbd.ImageBusy): with excutils.save_and_reraise_exception(): msg = _('deletion failed') LOG.info(msg) return False return _do_try_remove_volume(self, client, volume_name) @staticmethod def _find_clone_snap(rbd_image: RBDVolumeProxy) -> Optional[str]: snaps = rbd_image.list_snaps() for snap in snaps: if snap['name'].endswith('.clone_snap'): LOG.debug("volume has clone snapshot(s)") # We grab one of these and use it when fetching parent # info in case the volume has been flattened. clone_snap = snap['name'] return clone_snap return None def _delete_volume(self, volume: Volume, client: RADOSClient) -> None: clone_snap = None parent = None parent_snap = None try: with RBDVolumeProxy(self, volume.name, ioctx=client.ioctx) as rbd_image: # Ensure any backup snapshots are deleted self._delete_backup_snaps(rbd_image) clone_snap = self._find_clone_snap(rbd_image) # Determine if this volume is itself a clone _pool, parent, parent_snap = self._get_clone_info(rbd_image, volume.name, clone_snap) except self.rbd.ImageNotFound: LOG.info("volume %s no longer exists in backend", volume.name) return if clone_snap is not None: # If the volume has copy-on-write clones, keep it as a silent # volume which will be deleted when its snapshots and clones # are deleted. # TODO: only do this if it actually can't be deleted? new_name = "%s.deleted" % (volume.name) self.RBDProxy().rename(client.ioctx, volume.name, new_name) return LOG.debug("deleting RBD volume %s", volume.name) try: self.RBDProxy().remove(client.ioctx, volume.name) return # the fast path was successful except (self.rbd.ImageHasSnapshots, self.rbd.ImageBusy): self._flatten_children(client.ioctx, volume.name) except self.rbd.ImageNotFound: LOG.info("RBD volume %s not found, allowing delete " "operation to proceed.", volume.name) return try: if self._try_remove_volume(client, volume.name): return except self.rbd.ImageHasSnapshots: # perform trash instead, which can succeed when snapshots exist pass except self.rbd.ImageBusy: msg = _('ImageBusy error raised while deleting RBD volume') raise exception.VolumeIsBusy(msg, volume_name=volume.name) delay = 0 if self.configuration.enable_deferred_deletion: delay = self.configuration.deferred_deletion_delay # Since it failed to remove above, trash the volume here instead. # This covers the scenario of an image unable to be deleted because # a child snapshot of it has been trashed but not yet removed. # That snapshot is not visible but is still in the dependency # chain of RBD images. self._move_volume_to_trash(client.ioctx, volume.name, delay) # If it is a clone, walk back up the parent chain deleting # references. if parent: LOG.debug("volume is a clone so cleaning references") parent_snap = typing.cast(str, parent_snap) self._delete_clone_parent_refs(client, parent, parent_snap) def delete_volume(self, volume: Volume) -> None: """Deletes an RBD volume.""" with RADOSClient(self) as client: self._delete_volume(volume, client) def create_snapshot(self, snapshot: Snapshot) -> None: """Creates an RBD snapshot.""" with RBDVolumeProxy(self, snapshot.volume_name) as volume: snap = snapshot.name volume.create_snap(snap) volume.protect_snap(snap) def delete_snapshot(self, snapshot: Snapshot) -> None: """Deletes an RBD snapshot.""" volume_name = snapshot.volume_name snap_name = snapshot.name @utils.retry(self.rbd.ImageBusy, self.configuration.rados_connection_interval, self.configuration.rados_connection_retries) def do_unprotect_snap(self, volume_name, snap_name): try: with RBDVolumeProxy(self, volume_name) as volume: volume.unprotect_snap(snap_name) except self.rbd.InvalidArgument: LOG.info( "InvalidArgument: Unable to unprotect snapshot %s.", snap_name) except self.rbd.ImageNotFound as e: LOG.info("Snapshot %s does not exist in backend.", snap_name) raise e except self.rbd.ImageBusy as e: # flatten and then retry the operation with RADOSClient(self) as client: self._flatten_children(client.ioctx, volume_name, snap_name) raise e try: do_unprotect_snap(self, volume_name, snap_name) except self.rbd.ImageBusy: raise exception.SnapshotIsBusy(snapshot_name=snap_name) except self.rbd.ImageNotFound: return try: with RBDVolumeProxy(self, volume_name) as volume: volume.remove_snap(snap_name) except self.rbd.ImageNotFound: LOG.info("Snapshot %s does not exist in backend.", snap_name) def snapshot_revert_use_temp_snapshot(self) -> bool: """Disable the use of a temporary snapshot on revert.""" return False def revert_to_snapshot(self, context: context.RequestContext, volume: Volume, snapshot: Snapshot) -> None: """Revert a volume to a given snapshot.""" # NOTE(rosmaita): The Ceph documentation notes that this operation is # inefficient on the backend for large volumes, and that the preferred # method of returning to a pre-existing state in Ceph is to clone from # a snapshot. # So why don't we do something like that here? # (a) an end user can do the more efficient operation on their own if # they value speed over the convenience of reverting their existing # volume # (b) revert-to-snapshot is properly a backend operation, and should # be handled by the backend -- trying to "fake it" in this driver # is both dishonest and likely to cause subtle bugs # (c) the Ceph project undergoes continual improvement. It may be # the case that there are things an operator can do on the Ceph # side (for example, use BlueStore for the Ceph backend storage) # to improve the efficiency of this operation. # Thus, a motivated operator reading this is encouraged to consult # the Ceph documentation. with RBDVolumeProxy(self, volume.name) as image: image.rollback_to_snap(snapshot.name) def _disable_replication(self, volume: Volume) -> dict[str, Optional[str]]: """Disable replication on the given volume.""" vol_name = volume.name with RBDVolumeProxy(self, vol_name) as image: image.mirror_image_disable(False) driver_data = json.loads(volume.replication_driver_data) # If 'journaling' and/or 'exclusive-lock' have # been enabled in '_enable_replication', # they will be disabled here. If not, it will keep # what it was before. if not driver_data['had_journaling']: image.update_features(self.RBD_FEATURE_JOURNALING, False) if not driver_data['had_exclusive_lock']: image.update_features(self.RBD_FEATURE_EXCLUSIVE_LOCK, False) return {'replication_status': fields.ReplicationStatus.DISABLED, 'replication_driver_data': None} def retype(self, context: context.RequestContext, volume: Volume, new_type: VolumeType, diff: Union[dict[str, dict[str, str]], dict[str, dict], None], host: Optional[dict[str, str]]) -> tuple[bool, dict]: """Retype from one volume type to another on the same backend. Returns a tuple of (diff, equal), where 'equal' is a boolean indicating whether there is any difference, and 'diff' is a dictionary with the following format: .. code-block:: default { 'encryption': {}, 'extra_specs': {}, 'qos_specs': {'consumer': (u'front-end', u'back-end'), u'total_bytes_sec': (None, u'2048000'), u'total_iops_sec': (u'200', None) {...}} } """ # NOTE(rogeryu): If `diff` contains `qos_specs`, `qos_spec` must have # the `consumer` parameter, whether or not there is a difference.] # Remove qos keys present in RBD image that are no longer in cinder qos # spec, new keys are added in _setup_volume. if diff and diff.get('qos_specs') and self._supports_qos(): specs = diff.get('qos_specs', {}) if (specs.get('consumer') and specs['consumer'][1] == 'front-end' and specs['consumer'][0] != 'front-end'): del_qos_keys = [key for key in specs.keys() if key in QOS_KEY_MAP.keys()] else: del_qos_keys = [] existing_config = self.get_rbd_image_qos(volume) for k, v in QOS_KEY_MAP.items(): qos_val = specs.get(k, None) vol_val = int(existing_config.get(v['ceph_key'])) if not qos_val: if vol_val != v['default']: del_qos_keys.append(k) continue if qos_val[1] is None and vol_val != v['default']: del_qos_keys.append(k) self.delete_rbd_image_qos_keys(volume, del_qos_keys) return True, self._setup_volume(volume, new_type) @staticmethod def _dumps(obj: dict[str, Union[bool, int]]) -> str: return json.dumps(obj, separators=(',', ':'), sort_keys=True) def _exec_on_volume(self, volume_name: str, remote: dict[str, str], operation: str, *args: Any, **kwargs: Any): @utils.retry(rbd.ImageBusy, self.configuration.rados_connection_interval, self.configuration.rados_connection_retries) def _do_exec(): timeout = self.configuration.replication_connect_timeout with RBDVolumeProxy(self, volume_name, self.configuration.rbd_pool, remote=remote, timeout=timeout) as rbd_image: return getattr(rbd_image, operation)(*args, **kwargs) return _do_exec() def _failover_volume(self, volume: Volume, remote: dict[str, str], is_demoted: bool, replication_status: str) -> dict[str, Any]: """Process failover for a volume. There are 2 different cases that will return different update values for the volume: - Volume has replication enabled and failover succeeded: Set replication status to failed-over. - Volume has replication enabled and failover fails: Set status to error, replication status to failover-error, and store previous status in previous_status field. """ # Failover is allowed when volume has it enabled or it has already # failed over, because we may want to do a second failover. vol_name = volume.name try: self._exec_on_volume(vol_name, remote, 'mirror_image_promote', not is_demoted) return {'volume_id': volume.id, 'updates': {'replication_status': replication_status}} except Exception as e: replication_status = fields.ReplicationStatus.FAILOVER_ERROR LOG.error('Failed to failover volume %(volume)s with ' 'error: %(error)s.', {'volume': volume.name, 'error': e}) # Failover failed error_result = { 'volume_id': volume.id, 'updates': { 'status': 'error', 'previous_status': volume.status, 'replication_status': replication_status } } return error_result def _demote_volumes(self, volumes: list[Volume], until_failure: bool = True) -> list[bool]: """Try to demote volumes on the current primary cluster.""" result = [] try_demoting = True for volume in volumes: demoted = False if try_demoting: vol_name = volume.name try: self._exec_on_volume(vol_name, self._active_config, 'mirror_image_demote') demoted = True except Exception as e: LOG.debug('Failed to demote %(volume)s with error: ' '%(error)s.', {'volume': volume.name, 'error': e}) try_demoting = not until_failure result.append(demoted) return result def _get_failover_target_config( self, secondary_id: Optional[str] = None) -> tuple[str, dict]: if not secondary_id: # In auto mode exclude failback and active candidates = set(self._target_names).difference( ('default', self._active_backend_id)) if not candidates: raise exception.InvalidReplicationTarget( reason=_('RBD: No available failover target host.')) secondary_id = candidates.pop() return secondary_id, self._get_target_config(secondary_id) def failover(self, context: context.RequestContext, volumes: list, secondary_id: Optional[str] = None, groups=None) -> tuple[str, list, list]: """Failover replicated volumes.""" LOG.info('RBD driver failover started.') if not self._is_replication_enabled: raise exception.UnableToFailOver( reason=_('RBD: Replication is not enabled.')) if secondary_id == 'default': replication_status = fields.ReplicationStatus.ENABLED else: replication_status = fields.ReplicationStatus.FAILED_OVER secondary_id, remote = self._get_failover_target_config(secondary_id) # Try to demote the volumes first demotion_results = self._demote_volumes(volumes) # Do the failover taking into consideration if they have been demoted updates = [self._failover_volume(volume, remote, is_demoted, replication_status) for volume, is_demoted in zip(volumes, demotion_results)] LOG.info('RBD driver failover completed.') return secondary_id, updates, [] def failover_completed(self, context: context.RequestContext, secondary_id: Optional[str] = None) -> None: """Failover to replication target.""" LOG.info('RBD driver failover completion started.') secondary_id, remote = self._get_failover_target_config(secondary_id) self._active_backend_id = secondary_id self._active_config = remote self._set_default_secret_uuid() LOG.info('RBD driver failover completion completed.') def failover_host(self, context: context.RequestContext, volumes: list[Volume], secondary_id: Optional[str] = None, groups: Optional[list] = None) -> tuple[str, list[Volume], list]: """Failover to replication target. This function combines calls to failover() and failover_completed() to perform failover when Active/Active is not enabled. """ active_backend_id, volume_update_list, group_update_list = ( self.failover(context, volumes, secondary_id, groups)) self.failover_completed(context, secondary_id) return active_backend_id, volume_update_list, group_update_list def ensure_export(self, context: context.RequestContext, volume: Volume): """Synchronously recreates an export for a logical volume.""" pass def create_export(self, context: context.RequestContext, volume: Volume, connector: dict): """Exports the volume.""" pass def remove_export(self, context: context.RequestContext, volume: Volume): """Removes an export for a logical volume.""" pass def initialize_connection(self, volume: Volume, connector: dict) -> dict[str, Any]: hosts, ports = self._get_mon_addrs() name, conf, user, secret_uuid = self._get_config_tuple() data = { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % (self.configuration.rbd_pool, volume.name), 'hosts': hosts, 'ports': ports, 'cluster_name': name, 'auth_enabled': (user is not None), 'auth_username': user, 'secret_type': 'ceph', 'secret_uuid': secret_uuid, 'volume_id': volume.id, "discard": True, } } if self.keyring_data: data['data']['keyring'] = self.keyring_data # type: ignore LOG.debug('connection data: %s', data) return data def terminate_connection(self, volume: Volume, connector: dict, **kwargs) -> None: pass @staticmethod def _parse_location(location: str) -> list[str]: prefix = 'rbd://' if not location.startswith(prefix): reason = _('Not stored in RBD') raise exception.ImageUnacceptable(image_id=location, reason=reason) pieces = [urllib.parse.unquote(loc) for loc in location[len(prefix):].split('/')] if any(map(lambda p: p == '', pieces)): reason = _('Blank components') raise exception.ImageUnacceptable(image_id=location, reason=reason) if len(pieces) != 4: reason = _('Not an RBD snapshot') raise exception.ImageUnacceptable(image_id=location, reason=reason) return pieces def _get_fsid(self) -> str: with RADOSClient(self) as client: # Librados's get_fsid is represented as binary # in py3 instead of str as it is in py2. # This causes problems with cinder rbd # driver as we rely on get_fsid return value # which should be string, not bytes. # Decode binary to str fixes these issues. # Fix with encodeutils.safe_decode CAN BE REMOVED # after librados's fix will be in stable for some time. # # More informations: # https://bugs.launchpad.net/glance-store/+bug/1816721 # https://bugs.launchpad.net/cinder/+bug/1816468 # https://tracker.ceph.com/issues/38381 return encodeutils.safe_decode(client.cluster.get_fsid()) def _is_cloneable(self, image_location: str, image_meta: dict) -> bool: try: fsid, pool, image, snapshot = self._parse_location(image_location) except exception.ImageUnacceptable as e: LOG.debug('not cloneable: %s.', e) return False if self._get_fsid() != fsid: LOG.debug('%s is in a different ceph cluster.', image_location) return False if image_meta['disk_format'] != 'raw': LOG.debug("RBD image clone requires image format to be " "'raw' but image %(image)s is '%(format)s'", {"image": image_location, "format": image_meta['disk_format']}) return False # check that we can read the image try: with RBDVolumeProxy(self, image, pool=pool, snapshot=snapshot, read_only=True): return True except self.rbd.Error as e: LOG.debug('Unable to open image %(loc)s: %(err)s.', dict(loc=image_location, err=e)) return False def clone_image(self, context: context.RequestContext, volume: Volume, image_location: Optional[list], image_meta: dict, image_service) -> tuple[dict, bool]: if image_location: # Note: image_location[0] is glance image direct_url. # image_location[1] contains the list of all locations (including # direct_url) or None if show_multiple_locations is False in # glance configuration. if image_location[1]: url_locations = [location['url'] for location in image_location[1]] else: url_locations = [image_location[0]] # iterate all locations to look for a cloneable one. for url_location in url_locations: if url_location and self._is_cloneable( url_location, image_meta): _prefix, pool, image, snapshot = \ self._parse_location(url_location) volume_update = self._clone(volume, pool, image, snapshot) volume_update['provider_location'] = None self._resize(volume) return volume_update, True return ({}, False) def copy_image_to_encrypted_volume(self, context: context.RequestContext, volume: Volume, image_service, image_id: str, disable_sparse=False) -> None: self._copy_image_to_volume(context, volume, image_service, image_id, encrypted=True, disable_sparse=disable_sparse) def copy_image_to_volume(self, context: context.RequestContext, volume: Volume, image_service, image_id: str, disable_sparse: bool = False) -> None: self._copy_image_to_volume(context, volume, image_service, image_id, disable_sparse=disable_sparse) def _encrypt_image(self, context: context.RequestContext, volume: Volume, tmp_dir: str, src_image_path: Any) -> None: encryption = volume_utils.check_encryption_provider( volume, context) # Fetch the key associated with the volume and decode the passphrase keymgr = key_manager.API(CONF) key = keymgr.get(context, encryption['encryption_key_id']) passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8') # Decode the dm-crypt style cipher spec into something qemu-img can use cipher_spec = image_utils.decode_cipher(encryption['cipher'], encryption['key_size']) tmp_dir = volume_utils.image_conversion_dir() with tempfile.NamedTemporaryFile(prefix='luks_', dir=tmp_dir) as pass_file: with open(pass_file.name, 'w') as f: f.write(passphrase) # Convert the raw image to luks dest_image_path = src_image_path + '.luks' try: image_utils.convert_image(src_image_path, dest_image_path, 'luks', src_format='raw', cipher_spec=cipher_spec, passphrase_file=pass_file.name) # Replace the original image with the now encrypted image os.rename(dest_image_path, src_image_path) finally: fileutils.delete_if_exists(dest_image_path) def _copy_image_to_volume(self, context: context.RequestContext, volume: Volume, image_service: Any, image_id: str, encrypted: bool = False, disable_sparse: bool = False) -> None: tmp_dir = volume_utils.image_conversion_dir() with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp: image_utils.fetch_to_raw(context, image_service, image_id, tmp.name, self.configuration.volume_dd_blocksize, size=volume.size, disable_sparse=disable_sparse) if encrypted: self._encrypt_image(context, volume, tmp_dir, tmp.name) @utils.retry(exception.VolumeIsBusy, self.configuration.rados_connection_interval, self.configuration.rados_connection_retries) def _delete_volume(volume: Volume) -> None: self.delete_volume(volume) _delete_volume(volume) chunk_size = self.configuration.rbd_store_chunk_size * units.Mi order = int(math.log(chunk_size, 2)) # keep using the command line import instead of librbd since it # detects zeroes to preserve sparseness in the image args = ['rbd', 'import', '--pool', self.configuration.rbd_pool, '--order', order, tmp.name, volume.name, '--new-format'] args.extend(self._ceph_args()) self._try_execute(*args) self._resize(volume) # We may need to re-enable replication because we have deleted the # original image and created a new one using the command line import. try: self._setup_volume(volume) except Exception: err_msg = (_('Failed to enable image replication')) raise exception.ReplicationError(reason=err_msg, volume_id=volume.id) def _get_rbd_handle(self, volume: Volume): conn = self.initialize_connection(volume, {}) connector = volume_utils.brick_get_connector('rbd') return connector._get_rbd_handle(conn['data']) def copy_volume_to_image(self, context, volume, image_service, image_meta): if image_meta.get('container_format') != 'compressed' and ( image_meta['disk_format'] == 'raw'): source_handle = self._get_rbd_handle(volume) volume_utils.upload_volume(context, image_service, image_meta, None, volume, volume_fd=source_handle) else: # When the image format is different from volume format, we will # fallback to the old workflow because of the following issues: # 1. Passing RBDVolumeIOWrapper to format inspector # We fail when calling privsep since RPC cannot serialize the # RBDVolumeIOWrapper object # 2. Handling in format inspector # Determine if it's RBD file descriptor and only open the volume # file if it's not # 3. Handling in qemu-img commands # Use rbd:{pool-name}/{image-name} format instead of file path # https://docs.ceph.com/en/latest/rbd/qemu-rbd/#running-qemu-with-rbd # noqa # # Even if above issues are addressed, qemu-img convert will create # a local copy of converted volume file so will need to determine # the performance vs this workflow. tmp_dir = volume_utils.image_conversion_dir() tmp_file = os.path.join(tmp_dir, volume.name + '-' + image_meta['id']) with fileutils.remove_path_on_error(tmp_file): args = ['rbd', 'export', '--pool', self.configuration.rbd_pool, volume.name, tmp_file] args.extend(self._ceph_args()) self._try_execute(*args) volume_utils.upload_volume(context, image_service, image_meta, tmp_file, volume) os.unlink(tmp_file) def extend_volume(self, volume: Volume, new_size: str) -> None: """Extend an existing volume.""" old_size = volume.size try: size = int(new_size) * units.Gi self._resize(volume, size=size) except Exception: msg = _('Failed to Extend Volume ' '%(volname)s') % {'volname': volume.name} LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.", {'old_size': old_size, 'new_size': new_size}) def _is_valid_type(self, volume_type): want_replication = self._is_replicated_type(volume_type) want_multiattach = self._is_multiattach_type(volume_type) if want_replication and want_multiattach: return False return True def manage_existing(self, volume: Volume, existing_ref: dict[str, str]) -> dict[str, Any]: """Manages an existing image. Renames the image name to match the expected name for the volume. Error checking done by manage_existing_get_size is not repeated. :param volume: volume ref info to be set :param existing_ref: existing_ref is a dictionary of the form: {'source-name': } """ # check if the volume type is valid and fail fast if not if not self._is_valid_type(volume.volume_type): msg = _('Replication and Multiattach are mutually exclusive.') raise exception.ManageExistingVolumeTypeMismatch(reason=msg) # Raise an exception if we didn't find a suitable rbd image. with RADOSClient(self) as client: rbd_name = existing_ref['source-name'] self.RBDProxy().rename(client.ioctx, utils.convert_str(rbd_name), volume.name) return self._setup_volume(volume) def manage_existing_get_size(self, volume: Volume, existing_ref: dict[str, str]) -> int: """Return size of an existing image for manage_existing. :param volume: volume ref info to be set :param existing_ref: existing_ref is a dictionary of the form: {'source-name': } """ # Check that the reference is valid if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) rbd_name = utils.convert_str(existing_ref['source-name']) with RADOSClient(self) as client: # Raise an exception if we didn't find a suitable rbd image. try: rbd_image = self.rbd.Image(client.ioctx, rbd_name, read_only=True) except self.rbd.ImageNotFound: kwargs = {'existing_ref': rbd_name, 'reason': 'Specified RBD image does not exist.'} raise exception.ManageExistingInvalidReference(**kwargs) image_size = rbd_image.size() rbd_image.close() # RBD image size is returned in bytes. Attempt to parse # size as a float and round up to the next integer. try: convert_size = int(math.ceil(float(image_size) / units.Gi)) return convert_size except ValueError: exception_message = (_("Failed to manage existing volume " "%(name)s, because reported size " "%(size)s was not a floating-point" " number.") % {'name': rbd_name, 'size': image_size}) raise exception.VolumeBackendAPIException( data=exception_message) def _get_image_status(self, image_name): args = ['rbd', 'status', '--pool', self.configuration.rbd_pool, '--format=json', image_name] args.extend(self._ceph_args()) out, _ = self._execute(*args) return json.loads(out) def get_manageable_volumes(self, cinder_volumes: list[dict[str, str]], marker: Optional[Any], limit: int, offset: int, sort_keys: list[str], sort_dirs: list[str]) -> list[dict[str, Any]]: manageable_volumes = [] cinder_ids = [resource['id'] for resource in cinder_volumes] with RADOSClient(self) as client: for image_name in self.RBDProxy().list(client.ioctx): image_id = volume_utils.extract_id_from_volume_name(image_name) try: with RBDVolumeProxy(self, image_name, read_only=True, client=client.cluster, ioctx=client.ioctx) as image: image_info = { 'reference': {'source-name': image_name}, 'size': int(math.ceil( float(image.size()) / units.Gi)), 'cinder_id': None, 'extra_info': None } if image_id in cinder_ids: image_info['cinder_id'] = image_id image_info['safe_to_manage'] = False image_info['reason_not_safe'] = 'already managed' elif len(self._get_image_status( image_name)['watchers']) > 0: # If the num of watchers of image is >= 1, then the # image is considered to be used by client(s). image_info['safe_to_manage'] = False image_info['reason_not_safe'] = 'volume in use' elif image_name.endswith('.deleted'): # parent of cloned volume which marked as deleted # should not be manageable. image_info['safe_to_manage'] = False image_info['reason_not_safe'] = ( 'volume marked as deleted') else: image_info['safe_to_manage'] = True image_info['reason_not_safe'] = None manageable_volumes.append(image_info) except self.rbd.ImageNotFound: LOG.debug("Image %s is not found.", image_name) except self.rbd.Error as error: LOG.debug("Cannot open image %(image)s. (%(error)s)", ({'image': image_name, 'error': error})) return volume_utils.paginate_entries_list( manageable_volumes, marker, limit, offset, sort_keys, sort_dirs) def unmanage(self, volume): pass def update_migrated_volume(self, ctxt: dict, volume: Volume, new_volume: Volume, original_volume_status: str) -> \ Union[dict[str, None], dict[str, Optional[str]]]: """Return model update from RBD for migrated volume. This method should rename the back-end volume name(id) on the destination host back to its original name(id) on the source host. :param ctxt: The context used to run the method update_migrated_volume :param volume: The original volume that was migrated to this backend :param new_volume: The migration volume object that was created on this backend as part of the migration process :param original_volume_status: The status of the original volume :returns: model_update to update DB with any needed changes """ name_id = None provider_location = None if original_volume_status == 'in-use': # The back-end will not be renamed. name_id = new_volume['_name_id'] or new_volume['id'] provider_location = new_volume['provider_location'] return {'_name_id': name_id, 'provider_location': provider_location} existing_name = CONF.volume_name_template % new_volume.id wanted_name = CONF.volume_name_template % volume.id with RADOSClient(self) as client: try: self.RBDProxy().rename(client.ioctx, existing_name, wanted_name) except (self.rbd.ImageNotFound, self.rbd.ImageExists): LOG.error('Unable to rename the logical volume ' 'for volume %s.', volume.id) # If the rename fails, _name_id should be set to the new # volume id and provider_location should be set to the # one from the new volume as well. name_id = new_volume._name_id or new_volume.id provider_location = new_volume['provider_location'] return {'_name_id': name_id, 'provider_location': provider_location} def migrate_volume(self, context: context.RequestContext, volume: Volume, host: dict[str, dict[str, str]]) -> tuple[bool, None]: refuse_to_migrate = (False, None) if volume.status not in ('available', 'retyping', 'maintenance', 'in-use'): LOG.debug('Only available or in-use volumes can be migrated using ' 'backend assisted migration. Falling back to generic ' 'migration.') return refuse_to_migrate if (host['capabilities']['storage_protocol'] != self.STORAGE_PROTOCOL): LOG.debug('Source and destination drivers need to be RBD ' 'to use backend assisted migration. Falling back to ' 'generic migration.') return refuse_to_migrate loc_info = host['capabilities'].get('location_info') LOG.debug('Attempting RBD assisted volume migration. volume: %(id)s, ' 'host: %(host)s, status=%(status)s.', {'id': volume.id, 'host': host, 'status': volume.status}) if not loc_info: LOG.debug('Could not find location_info in capabilities reported ' 'by the destination driver. Falling back to generic ' 'migration.') return refuse_to_migrate try: (rbd_cluster_name, rbd_ceph_conf, rbd_fsid, rbd_user, rbd_pool) = ( utils.convert_str(loc_info).split(':')) except ValueError: LOG.error('Location info needed for backend enabled volume ' 'migration not in correct format: %s. Falling back to ' 'generic volume migration.', loc_info) return refuse_to_migrate with linuxrbd.RBDClient(rbd_user, rbd_pool, conffile=rbd_ceph_conf, rbd_cluster_name=rbd_cluster_name) as target: if (rbd_fsid != self._get_fsid()) or \ (rbd_fsid != encodeutils.safe_decode( target.client.get_fsid())): LOG.info('Migration between clusters is not supported. ' 'Falling back to generic migration.') return refuse_to_migrate if rbd_pool == self.configuration.rbd_pool: LOG.debug('Migration in the same pool, just need to update ' "volume's host value to destination host.") return (True, None) if volume.status == 'in-use': LOG.debug('Migration in-use volume between different pools. ' 'Falling back to generic migration.') return refuse_to_migrate with RBDVolumeProxy(self, volume.name, read_only=True) as source: try: source.copy(target.ioctx, volume.name) except Exception: with excutils.save_and_reraise_exception(): LOG.error('Error copying RBD image %(vol)s to target ' 'pool %(pool)s.', {'vol': volume.name, 'pool': rbd_pool}) self.RBDProxy().remove(target.ioctx, volume.name) try: # If the source fails to delete for some reason, we want to leave # the target volume in place in case deleting it might cause a lose # of data. self.delete_volume(volume) except Exception: reason = 'Failed to delete migration source volume %s.', volume.id raise exception.VolumeMigrationFailed(reason=reason) LOG.info('Successful RBD assisted volume migration.') return (True, None) def manage_existing_snapshot_get_size(self, snapshot: Snapshot, existing_ref: dict[str, Any]) -> int: """Return size of an existing image for manage_existing. :param snapshot: snapshot ref info to be set :param existing_ref: existing_ref is a dictionary of the form: {'source-name': } """ # Check that the reference is valid if not isinstance(existing_ref, dict): existing_ref = {"source-name": existing_ref} if 'source-name' not in existing_ref: reason = _('Reference must contain source-name element.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) volume_name = snapshot.volume_name snapshot_name = utils.convert_str(existing_ref['source-name']) with RADOSClient(self) as client: # Raise an exception if we didn't find a suitable rbd image. try: rbd_snapshot = self.rbd.Image(client.ioctx, volume_name, snapshot=snapshot_name, read_only=True) except self.rbd.ImageNotFound: kwargs = {'existing_ref': snapshot_name, 'reason': 'Specified snapshot does not exist.'} raise exception.ManageExistingInvalidReference(**kwargs) snapshot_size = rbd_snapshot.size() rbd_snapshot.close() # RBD image size is returned in bytes. Attempt to parse # size as a float and round up to the next integer. try: convert_size = int(math.ceil(float(snapshot_size) / units.Gi)) return convert_size except ValueError: exception_message = (_("Failed to manage existing snapshot " "%(name)s, because reported size " "%(size)s was not a floating-point" " number.") % {'name': snapshot_name, 'size': snapshot_size}) raise exception.VolumeBackendAPIException( data=exception_message) def manage_existing_snapshot(self, snapshot: Snapshot, existing_ref: dict[str, Any]) -> None: """Manages an existing snapshot. Renames the snapshot name to match the expected name for the snapshot. Error checking done by manage_existing_get_size is not repeated. :param snapshot: snapshot ref info to be set :param existing_ref: existing_ref is a dictionary of the form: {'source-name': } """ if not isinstance(existing_ref, dict): existing_ref = {"source-name": existing_ref} volume_name = snapshot.volume_name with RBDVolumeProxy(self, volume_name) as volume: snapshot_name = existing_ref['source-name'] volume.rename_snap(utils.convert_str(snapshot_name), snapshot.name) if not volume.is_protected_snap(snapshot.name): volume.protect_snap(snapshot.name) def get_manageable_snapshots(self, cinder_snapshots: list[dict[str, str]], marker: Optional[Any], limit: int, offset: int, sort_keys: list[str], sort_dirs: list[str]) -> list[dict[str, Any]]: """List manageable snapshots on RBD backend.""" manageable_snapshots = [] cinder_snapshot_ids = [resource['id'] for resource in cinder_snapshots] with RADOSClient(self) as client: for image_name in self.RBDProxy().list(client.ioctx): with RBDVolumeProxy(self, image_name, read_only=True, client=client.cluster, ioctx=client.ioctx) as image: try: for snapshot in image.list_snaps(): snapshot_id = ( volume_utils.extract_id_from_snapshot_name( snapshot['name'])) snapshot_info = { 'reference': {'source-name': snapshot['name']}, 'size': int(math.ceil( float(snapshot['size']) / units.Gi)), 'cinder_id': None, 'extra_info': None, 'safe_to_manage': False, 'reason_not_safe': None, 'source_reference': {'source-name': image_name} } if snapshot_id in cinder_snapshot_ids: # Exclude snapshots already managed. snapshot_info['reason_not_safe'] = ( 'already managed') snapshot_info['cinder_id'] = snapshot_id elif snapshot['name'].endswith('.clone_snap'): # Exclude clone snapshot. snapshot_info['reason_not_safe'] = ( 'used for clone snap') elif (snapshot['name'].startswith('backup') and '.snap.' in snapshot['name']): # Exclude intermediate snapshots created by the # Ceph backup driver. snapshot_info['reason_not_safe'] = ( 'used for volume backup') else: snapshot_info['safe_to_manage'] = True manageable_snapshots.append(snapshot_info) except self.rbd.ImageNotFound: LOG.debug("Image %s is not found.", image_name) return volume_utils.paginate_entries_list( manageable_snapshots, marker, limit, offset, sort_keys, sort_dirs) def unmanage_snapshot(self, snapshot: Snapshot) -> None: """Removes the specified snapshot from Cinder management.""" with RBDVolumeProxy(self, snapshot.volume_name) as volume: volume.set_snap(snapshot.name) children = volume.list_children() volume.set_snap(None) if not children and volume.is_protected_snap(snapshot.name): volume.unprotect_snap(snapshot.name) def get_backup_device(self, context: context.RequestContext, backup: Backup) -> tuple[Volume, bool]: """Get a backup device from an existing volume. To support incremental backups on Ceph to Ceph we don't clone the volume. """ if not ('backup.drivers.ceph' in backup.service) or backup.snapshot_id: return super(RBDDriver, self).get_backup_device(context, backup) volume = objects.Volume.get_by_id(context, backup.volume_id) return (volume, False) @utils.retry(exception.VolumeBackendAPIException) def get_rbd_image_qos(self, volume): try: with RBDVolumeProxy(self, volume.name) as rbd_image: current = {k['name']: k['value'] for k in rbd_image.config_list()} return current except Exception as e: msg = (_("Failed to get qos specs for RBD image " "%(rbd_image_name)s, due to " "%(error)s.") % {'rbd_image_name': volume.name, 'error': e}) raise exception.VolumeBackendAPIException( data=msg) @utils.retry(exception.VolumeBackendAPIException) def update_rbd_image_qos(self, volume, qos_specs): try: with RBDVolumeProxy(self, volume.name) as rbd_image: for qos_key, qos_val in qos_specs.items(): if qos_key in QOS_KEY_MAP: rbd_image.config_set(QOS_KEY_MAP[qos_key]['ceph_key'], str(qos_val)) LOG.debug('qos_specs: %(qos_key)s successfully set to' ' %(qos_value)s', {'qos_key': qos_key, 'qos_value': qos_val}) else: LOG.warning('qos_specs: the requested qos key ' '%(qos_key)s does not exist', {'qos_key': qos_key, 'qos_value': qos_val}) except Exception as e: msg = (_('Failed to set qos spec %(qos_key)s ' 'for RBD image %(rbd_image_name)s, ' 'due to %(error)s.') % {'qos_key': qos_key, 'rbd_image_name': volume.name, 'error': e}) raise exception.VolumeBackendAPIException(data=msg) @utils.retry(exception.VolumeBackendAPIException) def delete_rbd_image_qos_keys(self, volume, qos_keys): try: with RBDVolumeProxy(self, volume.name) as rbd_image: for key in qos_keys: rbd_image.config_remove(QOS_KEY_MAP[key]['ceph_key']) LOG.debug('qos_specs: %(qos_key)s was ' 'successfully unset', {'qos_key': key}) except Exception as e: msg = (_("Failed to delete qos keys %(qos_key)s " "for RBD image %(rbd_image_name)s, " "due to %(error)s.") % {'qos_key': key, 'rbd_image_name': volume.name, 'error': e}) raise exception.VolumeBackendAPIException(data=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/remotefs.py0000664000175000017500000027730300000000000021532 0ustar00zuulzuul00000000000000# Copyright (c) 2012 NetApp, Inc. # Copyright (c) 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import collections import errno import hashlib import inspect import json import math import os import re import shutil import string import tempfile import time import typing from typing import Callable, List, Optional, Tuple, Union from castellan import key_manager from os_brick.remotefs import remotefs as remotefs_brick from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import imageutils from oslo_utils import units from cinder import compute from cinder import context from cinder import coordination from cinder import db from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume import volume_utils LOG = logging.getLogger(__name__) nas_opts = [ cfg.StrOpt('nas_host', default='', help='IP address or Hostname of NAS system.'), cfg.StrOpt('nas_login', default='admin', help='User name to connect to NAS system.'), cfg.StrOpt('nas_password', default='', help='Password to connect to NAS system.', secret=True), cfg.PortOpt('nas_ssh_port', default=22, help='SSH port to use to connect to NAS system.'), cfg.StrOpt('nas_private_key', default='', help='Filename of private key to use for SSH authentication.'), cfg.StrOpt('nas_secure_file_operations', default='auto', help=('Allow network-attached storage systems to operate in a ' 'secure environment where root level access is not ' 'permitted. If set to False, access is as the root user ' 'and insecure. If set to True, access is not as root. ' 'If set to auto, a check is done to determine if this is ' 'a new installation: True is used if so, otherwise ' 'False. Default is auto.')), cfg.StrOpt('nas_secure_file_permissions', default='auto', help=('Set more secure file permissions on network-attached ' 'storage volume files to restrict broad other/world ' 'access. If set to False, volumes are created with open ' 'permissions. If set to True, volumes are created with ' 'permissions for the cinder user and group (660). If ' 'set to auto, a check is done to determine if ' 'this is a new installation: True is used if so, ' 'otherwise False. Default is auto.')), cfg.StrOpt('nas_share_path', default='', help=('Path to the share to use for storing Cinder volumes. ' 'For example: "/srv/export1" for an NFS server export ' 'available at 10.0.5.10:/srv/export1 .')), cfg.StrOpt('nas_mount_options', help=('Options used to mount the storage backend file system ' 'where Cinder volumes are stored.')), ] volume_opts = [ cfg.StrOpt('nas_volume_prov_type', default='thin', choices=['thin', 'thick'], help=('Provisioning type that will be used when ' 'creating volumes.')), ] CONF = cfg.CONF CONF.register_opts(nas_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) def locked_volume_id_operation(f: Callable) -> Callable: """Lock decorator for volume operations. Takes a named lock prior to executing the operation. The lock is named with the id of the volume. This lock can be used by driver methods to prevent conflicts with other operations modifying the same volume. May be applied to methods that take a 'volume' or 'snapshot' argument. """ def lvo_inner1(inst, *args, **kwargs): lock_tag = inst.driver_prefix call_args = inspect.getcallargs(f, inst, *args, **kwargs) if call_args.get('volume'): volume_id = call_args['volume'].id elif call_args.get('snapshot'): volume_id = call_args['snapshot'].volume.id else: err_msg = _('The decorated method must accept either a volume or ' 'a snapshot object') raise exception.VolumeBackendAPIException(data=err_msg) @utils.synchronized('%s-%s' % (lock_tag, volume_id), external=False) def lvo_inner2(): return f(inst, *args, **kwargs) return lvo_inner2() return lvo_inner1 class BackingFileTemplate(string.Template): """Custom Template for substitutions in backing files regex strings Changes the default delimiter from '$' to '#' in order to prevent clashing with the regex end of line marker '$'. """ delimiter = '#' idpattern = r'[a-z][_a-z0-9]*' class RemoteFSDriver(driver.BaseVD): """Common base for drivers that work like NFS.""" driver_volume_type: Optional[str] = None driver_prefix = 'remotefs' volume_backend_name: Optional[str] = None vendor_name = 'Open Source' SHARE_FORMAT_REGEX = r'.+:/.+' # We let the drivers inheriting this specify # whether thin provisioning is supported or not. _thin_provisioning_support = False _thick_provisioning_support = False def __init__(self, *args, **kwargs): super(RemoteFSDriver, self).__init__(*args, **kwargs) self.shares = {} self._mounted_shares: List[str] = [] self._execute_as_root = True self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None) self._supports_encryption = False self.format = 'raw' if self.configuration: self.configuration.append_config_values(nas_opts) self.configuration.append_config_values(volume_opts) def check_for_setup_error(self) -> None: """Just to override parent behavior.""" pass @volume_utils.trace def initialize_connection(self, volume: objects.Volume, connector: dict) -> dict: """Allow connection to connector and return connection info. :param volume: volume reference :param connector: connector reference """ data = {'export': volume.provider_location, 'name': volume.name} if volume.provider_location in self.shares: data['options'] = self.shares[volume.provider_location] return { 'driver_volume_type': self.driver_volume_type, 'data': data, 'mount_point_base': self._get_mount_point_base() } def do_setup(self, context: context.RequestContext) -> None: """Any initialization the volume driver does while starting.""" super(RemoteFSDriver, self).do_setup(context) # Validate the settings for our secure file options. self.configuration.nas_secure_file_permissions = \ self.configuration.nas_secure_file_permissions.lower() self.configuration.nas_secure_file_operations = \ self.configuration.nas_secure_file_operations.lower() valid_secure_opts = ['auto', 'true', 'false'] secure_options = {'nas_secure_file_permissions': self.configuration.nas_secure_file_permissions, 'nas_secure_file_operations': self.configuration.nas_secure_file_operations} LOG.debug('NAS config: %s', secure_options) for opt_name, opt_value in secure_options.items(): if opt_value not in valid_secure_opts: err_parms = {'name': opt_name, 'value': opt_value} msg = _("NAS config '%(name)s=%(value)s' invalid. Must be " "'auto', 'true', or 'false'") % err_parms LOG.error(msg) raise exception.InvalidConfigurationValue(msg) def _get_provisioned_capacity(self) -> float: """Returns the provisioned capacity. Get the sum of sizes of volumes, snapshots and any other files on the mountpoint. """ provisioned_size = 0.0 for share in self.shares.keys(): mount_path = self._get_mount_point_for_share(share) out, _ = self._execute('du', '--bytes', '-s', mount_path, run_as_root=self._execute_as_root) provisioned_size += int(out.split()[0]) return round(provisioned_size / units.Gi, 2) def _get_mount_point_base(self) -> Optional[str]: """Returns the mount point base for the remote fs. This method facilitates returning mount point base for the specific remote fs. Override this method in the respective driver to return the entry to be used while attach/detach using brick in cinder. If not overridden then it returns None without raising exception to continue working for cases when not used with brick. """ LOG.debug("Driver specific implementation needs to return" " mount_point_base.") return None @staticmethod def _validate_state(current_state: str, acceptable_states: Union[tuple, list], obj_description='volume', invalid_exc=exception.InvalidVolume) -> None: if current_state not in acceptable_states: message = _('Invalid %(obj_description)s state. ' 'Acceptable states for this operation: ' '%(acceptable_states)s. ' 'Current %(obj_description)s state: ' '%(current_state)s.') raise invalid_exc( message=message % dict(obj_description=obj_description, acceptable_states=acceptable_states, current_state=current_state)) @volume_utils.trace def create_volume(self, volume: objects.Volume) -> dict: """Creates a volume. :param volume: volume reference :returns: provider_location update dict for database """ if volume.encryption_key_id and not self._supports_encryption: message = _("Encryption is not yet supported.") raise exception.VolumeDriverException(message=message) LOG.debug('Creating volume %(vol)s', {'vol': volume.id}) self._ensure_shares_mounted() volume.provider_location = self._find_share(volume) LOG.info('casted to %s', volume.provider_location) self._do_create_volume(volume) return {'provider_location': volume.provider_location} def _do_create_volume(self, volume: objects.Volume) -> None: """Create a volume on given remote share. :param volume: volume reference """ volume_path = self.local_path(volume) volume_size = volume.size encrypted = volume.encryption_key_id is not None if encrypted: encryption = volume_utils.check_encryption_provider( volume, volume.obj_context) self._create_encrypted_volume_file(volume_path, volume_size, encryption, volume.obj_context) elif getattr(self.configuration, self.driver_prefix + '_qcow2_volumes', False): # QCOW2 volumes are inherently sparse, so this setting # will override the _sparsed_volumes setting. self._create_qcow2_file(volume_path, volume_size) self.format = 'qcow2' elif getattr(self.configuration, self.driver_prefix + '_sparsed_volumes', False): self._create_sparsed_file(volume_path, volume_size) else: self._create_regular_file(volume_path, volume_size) self._set_rw_permissions(volume_path) volume.admin_metadata['format'] = self.format # This is done here because when creating a volume from image, # while encountering other volume.save() method fails for # non-admins with volume.obj_as_admin(): volume.save() def _ensure_shares_mounted(self) -> None: """Look for remote shares in the flags and mount them locally.""" mounted_shares: List[str] = [] self._load_shares_config(getattr(self.configuration, self.driver_prefix + '_shares_config')) for share in self.shares: try: self._ensure_share_mounted(share) mounted_shares.append(share) except Exception as exc: LOG.error('Exception during mounting %s', exc) self._mounted_shares = mounted_shares LOG.debug('Available shares %s', self._mounted_shares) @volume_utils.trace def delete_volume(self, volume: objects.Volume) -> None: """Deletes a logical volume. :param volume: volume reference """ LOG.debug('Deleting volume %(vol)s, provider_location: %(loc)s', {'vol': volume.id, 'loc': volume.provider_location}) if not volume.provider_location: LOG.warning('Volume %s does not have ' 'provider_location specified, ' 'skipping', volume.name) return self._ensure_share_mounted(volume.provider_location) mounted_path = self.local_path(volume) self._delete(mounted_path) def ensure_export(self, ctx: context.RequestContext, volume: objects.Volume) -> None: """Synchronously recreates an export for a logical volume.""" self._ensure_share_mounted(volume.provider_location) def create_export(self, ctx: context.RequestContext, volume: objects.Volume, connector: dict) -> None: """Exports the volume. Can optionally return a dictionary of changes to the volume object to be persisted. """ pass def remove_export(self, ctx: context.RequestContext, volume: objects.Volume) -> None: """Removes an export for a logical volume.""" pass def delete_snapshot(self, snapshot: objects.Snapshot) -> None: """Delete snapshot. Do nothing for this driver, but allow manager to handle deletion of snapshot in error state. """ pass def _delete(self, path: str) -> None: # Note(lpetrut): this method is needed in order to provide # interoperability with Windows as it will be overridden. self._execute('rm', '-f', path, run_as_root=self._execute_as_root) def _create_sparsed_file(self, path: str, size: int) -> None: """Creates a sparse file of a given size in GiB.""" self._execute('truncate', '-s', '%sG' % size, path, run_as_root=self._execute_as_root) def _create_regular_file(self, path: str, size: int) -> None: """Creates a regular file of given size in GiB.""" block_size_mb = 1 block_count = size * units.Gi // (block_size_mb * units.Mi) self._execute('dd', 'if=/dev/zero', 'of=%s' % path, 'bs=%dM' % block_size_mb, 'count=%d' % block_count, run_as_root=self._execute_as_root) def _create_qcow2_file(self, path: str, size_gb: int) -> None: """Creates a QCOW2 file of a given size in GiB.""" self._execute('qemu-img', 'create', '-f', 'qcow2', '-o', 'preallocation=metadata', path, str(size_gb * units.Gi), run_as_root=self._execute_as_root) def _create_encrypted_volume_file(self, path: str, size_gb: int, encryption: dict, context: context.RequestContext) -> None: """Create an encrypted volume. This works by creating an encrypted image locally, and then uploading it to the volume. """ cipher_spec = image_utils.decode_cipher(encryption['cipher'], encryption['key_size']) # TODO(enriquetaso): share this code w/ the RBD driver # Fetch the key associated with the volume and decode the passphrase keymgr = key_manager.API(CONF) key = keymgr.get(context, encryption['encryption_key_id']) passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8') # create a file tmp_dir = volume_utils.image_conversion_dir() with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp_key: # TODO(enriquetaso): encrypt w/ aes256 cipher text # (qemu-img feature) ? with open(tmp_key.name, 'w') as f: f.write(passphrase) self._execute( 'qemu-img', 'create', '-f', 'qcow2', '-o', 'encrypt.format=luks,' 'encrypt.key-secret=sec1,' 'encrypt.cipher-alg=%(cipher_alg)s,' 'encrypt.cipher-mode=%(cipher_mode)s,' 'encrypt.ivgen-alg=%(ivgen_alg)s' % cipher_spec, '--object', 'secret,id=sec1,format=raw,file=' + tmp_key.name, path, str(size_gb * units.Gi), run_as_root=self._execute_as_root) def _set_rw_permissions(self, path: str) -> None: """Sets access permissions for given NFS path. Volume file permissions are set based upon the value of secure_file_permissions: 'true' sets secure access permissions and 'false' sets more open (insecure) access permissions. :param path: the volume file path. """ if self.configuration.nas_secure_file_permissions == 'true': permissions = '660' LOG.debug('File path %(path)s is being set with permissions: ' '%(permissions)s', {'path': path, 'permissions': permissions}) else: permissions = 'ugo+rw' LOG.warning('%(path)s is being set with open permissions: ' '%(perm)s', {'path': path, 'perm': permissions}) self._execute('chmod', permissions, path, run_as_root=self._execute_as_root) def _set_rw_permissions_for_all(self, path: str) -> None: """Sets 666 permissions for the path.""" self._execute('chmod', 'ugo+rw', path, run_as_root=self._execute_as_root) def _set_rw_permissions_for_owner(self, path: str) -> None: """Sets read-write permissions to the owner for the path.""" self._execute('chmod', 'u+rw', path, run_as_root=self._execute_as_root) def local_path(self, volume: objects.Volume) -> str: """Get volume path (mounted locally fs path) for given volume. :param volume: volume reference """ remotefs_share = volume.provider_location return os.path.join(self._get_mount_point_for_share(remotefs_share), volume.name) def copy_image_to_volume(self, context: context.RequestContext, volume: objects.Volume, image_service, image_id: str, disable_sparse: bool = False) -> None: """Fetch the image from image_service and write it to the volume.""" image_utils.fetch_to_raw(context, image_service, image_id, self.local_path(volume), self.configuration.volume_dd_blocksize, size=volume.size, run_as_root=self._execute_as_root, disable_sparse=disable_sparse) # NOTE (leseb): Set the virtual size of the image # the raw conversion overwrote the destination file # (which had the correct size) # with the fetched glance image size, # thus the initial 'size' parameter is not honored # this sets the size to the one asked in the first place by the user # and then verify the final virtual size image_utils.resize_image(self.local_path(volume), volume.size, run_as_root=self._execute_as_root) data = image_utils.qemu_img_info(self.local_path(volume), run_as_root=self._execute_as_root) virt_size = data.virtual_size // units.Gi if virt_size != volume.size: raise exception.ImageUnacceptable( image_id=image_id, reason=(_("Expected volume size was %d") % volume.size) + (_(" but size is now %d") % virt_size)) def copy_volume_to_image(self, context: context.RequestContext, volume: objects.Volume, image_service, image_meta: dict) -> None: """Copy the volume to the specified image.""" volume_utils.upload_volume(context, image_service, image_meta, self.local_path(volume), volume, run_as_root=self._execute_as_root) def _read_config_file(self, config_file: str) -> List[str]: # Returns list of lines in file with open(config_file) as f: return f.readlines() def _load_shares_config(self, share_file: Optional[str] = None) -> None: self.shares = {} if all((self.configuration.nas_host, self.configuration.nas_share_path)): LOG.debug('Using nas_host and nas_share_path configuration.') nas_host = self.configuration.nas_host nas_share_path = self.configuration.nas_share_path share_address = '%s:%s' % (nas_host, nas_share_path) if not re.match(self.SHARE_FORMAT_REGEX, share_address): msg = (_("Share %s ignored due to invalid format. Must " "be of form address:/export. Please check the " "nas_host and nas_share_path settings."), share_address) raise exception.InvalidConfigurationValue(msg) self.shares[share_address] = self.configuration.nas_mount_options elif share_file is not None: LOG.debug('Loading shares from %s.', share_file) for share in self._read_config_file(share_file): # A configuration line may be either: # host:/vol_name # or # host:/vol_name -o options=123,rw --other if not share.strip(): # Skip blank or whitespace-only lines continue if share.startswith('#'): continue share_info = share.split(' ', 1) # results in share_info = # [ 'address:/vol', '-o options=123,rw --other' ] share_address = share_info[0].strip() # Replace \040 with a space, to support paths with spaces share_address = share_address.replace("\\040", " ") share_opts = None if len(share_info) > 1: share_opts = share_info[1].strip() if not re.match(self.SHARE_FORMAT_REGEX, share_address): LOG.error("Share %s ignored due to invalid format. " "Must be of form address:/export.", share_address) continue self.shares[share_address] = share_opts LOG.debug("shares loaded: %s", self.shares) def _get_mount_point_for_share(self, path: str) -> str: raise NotImplementedError() def terminate_connection(self, volume: objects.Volume, connector: dict, **kwargs) -> None: """Disallow connection from connector.""" pass def _update_volume_stats(self) -> None: """Retrieve stats info from volume group.""" data = {} backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.volume_backend_name data['vendor_name'] = 'Open Source' data['driver_version'] = self.get_version() data['storage_protocol'] = self.driver_volume_type self._ensure_shares_mounted() global_capacity = 0 global_free = 0 for share in self._mounted_shares: capacity, free, used = self._get_capacity_info(share) global_capacity += capacity global_free += free data['total_capacity_gb'] = global_capacity / float(units.Gi) data['free_capacity_gb'] = global_free / float(units.Gi) data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = False self._stats = data def _get_capacity_info(self, share: str): raise NotImplementedError() def _find_share(self, volume: objects.Volume): raise NotImplementedError() def _ensure_share_mounted(self, share: str): raise NotImplementedError() def secure_file_operations_enabled(self) -> bool: """Determine if driver is operating in Secure File Operations mode. The Cinder Volume driver needs to query if this driver is operating in a secure file mode; check our nas_secure_file_operations flag. """ if self.configuration.nas_secure_file_operations == 'true': return True return False def set_nas_security_options(self, is_new_cinder_install: bool) -> None: """Determine the setting to use for Secure NAS options. This method must be overridden by child wishing to use secure NAS file operations. This base method will set the NAS security options to false. """ doc_html = ("https://docs.openstack.org/cinder/latest/admin" "/blockstorage-nfs-backend.html") self.configuration.nas_secure_file_operations = 'false' LOG.warning("The NAS file operations will be run as root: " "allowing root level access at the storage backend. " "This is considered an insecure NAS environment. " "Please see %s for information on a secure NAS " "configuration.", doc_html) self.configuration.nas_secure_file_permissions = 'false' LOG.warning("The NAS file permissions mode will be 666 (allowing " "other/world read & write access). This is considered " "an insecure NAS environment. Please see %s for " "information on a secure NFS configuration.", doc_html) def _determine_nas_security_option_setting( self, nas_option: str, mount_point: str, is_new_cinder_install: bool) -> str: """Determine NAS security option setting when 'auto' is assigned. This method determines the final 'true'/'false' setting of an NAS security option when the default value of 'auto' has been detected. If the nas option isn't 'auto' then its current value is used. :param nas_option: The NAS security option value loaded from config. :param mount_point: Mount where indicator file is written. :param is_new_cinder_install: boolean for new Cinder installation. :return string: 'true' or 'false' for new option setting. """ if nas_option == 'auto': # For auto detection, we first check to see if we have been # through this process before by checking for the existence of # the Cinder secure environment indicator file. file_name = '.cinderSecureEnvIndicator' file_path = os.path.join(mount_point, file_name) if os.path.isfile(file_path): nas_option = 'true' LOG.info('Cinder secure environment ' 'indicator file exists.') else: # The indicator file does not exist. If it is a new # installation, set to 'true' and create the indicator file. if is_new_cinder_install: nas_option = 'true' try: with open(file_path, 'w') as fh: fh.write('Detector file for Cinder secure ' 'environment usage.\n') fh.write('Do not delete this file.\n') # Set the permissions on our special marker file to # protect from accidental removal (owner write only). self._execute('chmod', '640', file_path, run_as_root=self._execute_as_root) LOG.info('New Cinder secure environment indicator' ' file created at path %s.', file_path) except IOError as err: LOG.error('Failed to created Cinder secure ' 'environment indicator file: %s', err) if err.errno == errno.EACCES: LOG.warning('Reverting to non-secure mode. Adjust ' 'permissions at %s to allow the ' 'cinder volume service write access ' 'to use secure mode.', mount_point) nas_option = 'false' else: # For existing installs, we default to 'false'. The # admin can always set the option at the driver config. nas_option = 'false' return nas_option class RemoteFSSnapDriverBase(RemoteFSDriver): """Base class for remotefs drivers implementing qcow2 snapshots. Driver must implement: _local_volume_dir(self, volume) """ _VALID_IMAGE_EXTENSIONS: List[str] = [] # The following flag may be overridden by the concrete drivers in order # to avoid using temporary volume snapshots when creating volume clones, # when possible. _always_use_temp_snap_when_cloning = True def __init__(self, *args, **kwargs): self._remotefsclient: remotefs_brick.RemoteFsClient = None self.base: Optional[str] = None self._nova: Optional[db.base.Base] = None super(RemoteFSSnapDriverBase, self).__init__(*args, **kwargs) def do_setup(self, context: context.RequestContext) -> None: super(RemoteFSSnapDriverBase, self).do_setup(context) self._nova = compute.API() def snapshot_revert_use_temp_snapshot(self) -> bool: # Considering that RemoteFS based drivers use COW images # for storing snapshots, having chains of such images, # creating a backup snapshot when reverting one is not # actutally helpful. return False def _local_volume_dir(self, volume: objects.Volume) -> str: share = volume.provider_location local_dir = self._get_mount_point_for_share(share) return local_dir def _local_path_volume(self, volume: objects.Volume) -> str: path_to_disk = os.path.join( self._local_volume_dir(volume), volume.name) return path_to_disk def _get_new_snap_path(self, snapshot: objects.Snapshot) -> str: vol_path = self.local_path(snapshot.volume) snap_path = '%s.%s' % (vol_path, snapshot.id) return snap_path def _local_path_volume_info(self, volume: objects.Volume) -> str: return '%s%s' % (self.local_path(volume), '.info') def _read_file(self, filename: str) -> str: """This method is to make it easier to stub out code for testing. Returns a string representing the contents of the file. """ with open(filename, 'r') as f: return f.read() def _write_info_file(self, info_path: str, snap_info: dict) -> None: if 'active' not in snap_info.keys(): msg = _("'active' must be present when writing snap_info.") raise exception.RemoteFSException(msg) if not (os.path.exists(info_path) or os.name == 'nt'): # We're not managing file permissions on Windows. # Plus, 'truncate' is not available. self._execute('truncate', "-s0", info_path, run_as_root=self._execute_as_root) self._set_rw_permissions(info_path) with open(info_path, 'w') as f: json.dump(snap_info, f, indent=1, sort_keys=True) def _qemu_img_info_base(self, path: str, volume_name: str, basedir: str, ext_bf_template=None, force_share=False, run_as_root=False) -> imageutils.QemuImgInfo: """Sanitize image_utils' qemu_img_info. This code expects to deal only with relative filenames. :param path: Path to the image file whose info is fetched :param volume_name: Name of the volume :param basedir: Path to backing files directory :param ext_bf_template: Alt. string.Template for allowed backing files :type object: BackingFileTemplate :param force_share: Wether to force fetching img info for images in use :param run_as_root: Wether to run with privileged permissions or not """ run_as_root = run_as_root or self._execute_as_root info = image_utils.qemu_img_info(path, force_share=force_share, run_as_root=run_as_root, allow_qcow2_backing_file=True) if info.image: info.image = os.path.basename(info.image) if info.backing_file: if self._VALID_IMAGE_EXTENSIONS: valid_ext = r'(\.(%s))?' % '|'.join( self._VALID_IMAGE_EXTENSIONS) else: valid_ext = '' if ext_bf_template: backing_file_template = ext_bf_template.substitute( basedir=basedir, volname=volume_name, valid_ext=valid_ext ) LOG.debug("Fetching qemu-img info with special " "backing_file_template: %(bft)s", { "bft": backing_file_template }) else: backing_file_template = \ "(%(basedir)s/[0-9a-f]+/)?" \ "%(volname)s(.(tmp-snap-)?[0-9a-f-]+)?%(valid_ext)s$" % { 'basedir': basedir, 'volname': volume_name, 'valid_ext': valid_ext, } if not re.match(backing_file_template, info.backing_file, re.IGNORECASE): raise exception.RemoteFSInvalidBackingFile( path=path, backing_file=info.backing_file) info.backing_file = os.path.basename(info.backing_file) return info def _qemu_img_info(self, path: str, volume_name: str): raise NotImplementedError() def _img_commit(self, path: str, passphrase_file: Optional[str] = None, backing_file: Optional[str] = None) -> None: # TODO(eharney): this is not using the correct permissions for # NFS snapshots # It needs to run as root for volumes attached to instances, but # does not when in secure mode. cmd = ['qemu-img', 'commit'] if passphrase_file: obj = ['--object', 'secret,id=s0,format=raw,file=%s' % passphrase_file] image_opts = ['--image-opts'] src_opts = \ "file.filename=%(filename)s,encrypt.format=luks," \ "encrypt.key-secret=s0,backing.file.filename=%(backing)s," \ "backing.encrypt.key-secret=s0" % { 'filename': path, 'backing': backing_file, } path_no_to_delete = ['-d', src_opts] cmd += obj + image_opts + path_no_to_delete else: cmd += ['-d', path] self._execute(*cmd, run_as_root=self._execute_as_root) self._delete(path) def _rebase_img(self, image: str, backing_file: str, volume_format: str, passphrase_file: Optional[str] = None) -> None: # qemu-img create must run as root, because it reads from the # backing file, which will be owned by qemu:qemu if attached to an # instance. # TODO(erlon): Sanity check this. command = ['qemu-img', 'rebase', '-u'] # if encrypted if passphrase_file: objectdef = "secret,id=s0,file=%s" % passphrase_file filename = "encrypt.key-secret=s0,"\ "file.filename=%(filename)s" % {'filename': image} command += ['--object', objectdef, '-b', backing_file, '-F', volume_format, '--image-opts', filename] # not encrypted else: command += ['-b', backing_file, image, '-F', volume_format] self._execute(*command, run_as_root=self._execute_as_root) def _read_info_file(self, info_path: str, empty_if_missing: bool = False) -> dict: """Return dict of snapshot information. :param info_path: path to file :param empty_if_missing: True=return empty dict if no file """ if not os.path.exists(info_path): if empty_if_missing is True: return {} return json.loads(self._read_file(info_path)) def _get_higher_image_path(self, snapshot: objects.Snapshot) -> Optional[str]: volume = snapshot.volume info_path = self._local_path_volume_info(volume) snap_info = self._read_info_file(info_path) snapshot_file = snap_info[snapshot.id] active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) backing_chain = self._get_backing_chain_for_path( volume, active_file_path) higher_file = next((os.path.basename(f['filename']) for f in backing_chain if utils.paths_normcase_equal( f.get('backing-filename', ''), snapshot_file)), None) return higher_file def _get_backing_chain_for_path(self, volume: objects.Volume, path: str) -> List[dict]: """Returns list of dicts containing backing-chain information. Includes 'filename', and 'backing-filename' for each applicable entry. Consider converting this to use --backing-chain and --output=json when environment supports qemu-img 1.5.0. :param volume: volume reference :param path: path to image file at top of chain """ output = [] info = self._qemu_img_info(path, volume.name) new_info = {} new_info['filename'] = os.path.basename(path) new_info['backing-filename'] = info.backing_file output.append(new_info) while new_info['backing-filename']: filename = new_info['backing-filename'] path = os.path.join(self._local_volume_dir(volume), filename) info = self._qemu_img_info(path, volume.name) backing_filename = info.backing_file new_info = {} new_info['filename'] = filename new_info['backing-filename'] = backing_filename output.append(new_info) return output def _get_hash_str(self, base_str) -> str: """Return a string that represents hash of base_str. Returns string in a hex format. """ if isinstance(base_str, str): base_str = base_str.encode('utf-8') return hashlib.md5(base_str, usedforsecurity=False).hexdigest() def _get_mount_point_for_share(self, share: str) -> str: """Return mount point for share. :param share: example 172.18.194.100:/var/fs """ return self._remotefsclient.get_mount_point(share) def _get_available_capacity(self, share: str) -> Tuple[int, int]: """Calculate available space on the share. :param share: example 172.18.194.100:/var/fs """ mount_point = self._get_mount_point_for_share(share) out, _ = self._execute('df', '--portability', '--block-size', '1', mount_point, run_as_root=self._execute_as_root) out = out.splitlines()[1] size = int(out.split()[1]) available = int(out.split()[3]) return available, size def _get_capacity_info(self, remotefs_share: str) -> Tuple[int, int, int]: available, size = self._get_available_capacity(remotefs_share) return size, available, size - available def _get_mount_point_base(self) -> Optional[str]: return self.base def _copy_volume_to_image(self, context: context.RequestContext, volume: objects.Volume, image_service, image_meta: dict, store_id: Optional[str] = None) -> None: """Copy the volume to the specified image.""" # If snapshots exist, flatten to a temporary image, and upload it active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) info = self._qemu_img_info(active_file_path, volume.name) backing_file = info.backing_file root_file_fmt = info.file_format tmp_params = { 'prefix': '%s.temp_image.%s' % (volume.id, image_meta['id']), 'suffix': '.img' } with image_utils.temporary_file(**tmp_params) as temp_path: if backing_file or (root_file_fmt != 'raw'): # Convert due to snapshots # or volume data not being stored in raw format # (upload_volume assumes raw format input) image_utils.convert_image(active_file_path, temp_path, 'raw', run_as_root=self._execute_as_root) upload_path = temp_path else: upload_path = active_file_path volume_utils.upload_volume(context, image_service, image_meta, upload_path, volume, run_as_root=self._execute_as_root) def get_active_image_from_info(self, volume: objects.Volume) -> str: """Returns filename of the active image from the info file.""" info_file = self._local_path_volume_info(volume) snap_info = self._read_info_file(info_file, empty_if_missing=True) if not snap_info: # No info file = no snapshots exist vol_path = os.path.basename(self.local_path(volume)) return vol_path return snap_info['active'] def _local_path_active_image(self, volume: objects.Volume) -> str: active_fname = self.get_active_image_from_info(volume) vol_dir = self._local_volume_dir(volume) active_fpath = os.path.join(vol_dir, active_fname) return active_fpath def _get_snapshot_backing_file(self, snapshot: objects.Snapshot) -> str: info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path) vol_dir = self._local_volume_dir(snapshot.volume) forward_file = snap_info[snapshot.id] forward_path = os.path.join(vol_dir, forward_file) # Find the file which backs this file, which represents the point # in which this snapshot was created. # TODO: something is wrong here img_info = self._qemu_img_info(forward_path) # type: ignore return img_info.backing_file def _snapshots_exist(self, volume: objects.Volume) -> bool: if not volume.provider_location: return False active_fpath = self._local_path_active_image(volume) base_vol_path = self.local_path(volume) return not utils.paths_normcase_equal(active_fpath, base_vol_path) def _is_volume_attached(self, volume: objects.Volume) -> bool: return volume.attach_status == fields.VolumeAttachStatus.ATTACHED def _create_cloned_volume(self, volume: objects.Volume, src_vref: objects.Volume, context: context.RequestContext) -> dict: LOG.info('Cloning volume %(src)s to volume %(dst)s', {'src': src_vref.id, 'dst': volume.id}) acceptable_states = ['available', 'backing-up', 'downloading'] self._validate_state(src_vref.status, acceptable_states, obj_description='source volume') volume_name = CONF.volume_name_template % volume.id # Create fake volume and snapshot objects Volume = collections.namedtuple('Volume', ('provider_location', 'size', 'id', 'name', 'status', 'volume_type', 'metadata', 'obj_context')) volume_info = Volume(provider_location=src_vref.provider_location, size=src_vref.size, id=volume.id, name=volume_name, status=src_vref.status, volume_type=src_vref.volume_type, metadata=src_vref.metadata, obj_context=volume.obj_context) if (self._always_use_temp_snap_when_cloning or self._snapshots_exist(src_vref)): kwargs = { 'volume_id': src_vref.id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'volume_size': src_vref.size, 'display_name': 'tmp-snap-%s' % volume.id, 'display_description': None, 'volume_type_id': src_vref.volume_type_id, 'encryption_key_id': src_vref.encryption_key_id, } temp_snapshot = objects.Snapshot(context=context, **kwargs) temp_snapshot.create() self._create_snapshot(temp_snapshot) try: self._copy_volume_from_snapshot( temp_snapshot, volume_info, volume.size, src_encryption_key_id=src_vref.encryption_key_id, new_encryption_key_id=volume.encryption_key_id) # remove temp snapshot after the cloning is done temp_snapshot.status = fields.SnapshotStatus.DELETING temp_snapshot.context = context.elevated() temp_snapshot.save() finally: self._delete_snapshot(temp_snapshot) temp_snapshot.destroy() else: self._copy_volume_image(self.local_path(src_vref), self.local_path(volume_info)) self._extend_volume(volume_info, volume.size) if src_vref.admin_metadata and 'format' in src_vref.admin_metadata: volume.admin_metadata['format'] = ( src_vref.admin_metadata['format']) # This is done here because when cloning from a bootable volume, # while encountering other volume.save() method fails with volume.obj_as_admin(): volume.save() return {'provider_location': src_vref.provider_location} def _copy_volume_image(self, src_path: str, dest_path: str) -> None: shutil.copyfile(src_path, dest_path) self._set_rw_permissions(dest_path) def _delete_stale_snapshot(self, snapshot: objects.Snapshot) -> None: info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path) snapshot_file = snap_info[snapshot.id] active_file = self.get_active_image_from_info(snapshot.volume) snapshot_path = os.path.join( self._local_volume_dir(snapshot.volume), snapshot_file) if utils.paths_normcase_equal(snapshot_file, active_file): return LOG.info('Deleting stale snapshot: %s', snapshot.id) self._delete(snapshot_path) del snap_info[snapshot.id] self._write_info_file(info_path, snap_info) def _delete_snapshot(self, snapshot: objects.Snapshot) -> None: """Delete a snapshot. If volume status is 'available', delete snapshot here in Cinder using qemu-img. If volume status is 'in-use', calculate what qcow2 files need to merge, and call to Nova to perform this operation. :raises: InvalidVolume if status not acceptable :raises: RemoteFSException(msg) if operation fails :returns: None """ LOG.debug('Deleting %(type)s snapshot %(snap)s of volume %(vol)s', {'snap': snapshot.id, 'vol': snapshot.volume.id, 'type': ('online' if self._is_volume_attached(snapshot.volume) else 'offline')}) volume_status = snapshot.volume.status acceptable_states = ['available', 'in-use', 'backing-up', 'deleting', 'downloading'] self._validate_state(volume_status, acceptable_states) vol_path = self._local_volume_dir(snapshot.volume) volume_path = os.path.join(vol_path, snapshot.volume.name) # Determine the true snapshot file for this snapshot # based on the .info file info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path, empty_if_missing=True) if snapshot.id not in snap_info: # If snapshot info file is present, but snapshot record does not # exist, do not attempt to delete. # (This happens, for example, if snapshot_create failed due to lack # of permission to write to the share.) LOG.info('Snapshot record for %s is not present, allowing ' 'snapshot_delete to proceed.', snapshot.id) return snapshot_file = snap_info[snapshot.id] LOG.debug('snapshot_file for this snap is: %s', snapshot_file) snapshot_path = os.path.join( self._local_volume_dir(snapshot.volume), snapshot_file) snapshot_path_img_info = self._qemu_img_info( snapshot_path, snapshot.volume.name) base_file = snapshot_path_img_info.backing_file if base_file is None: # There should always be at least the original volume # file as base. LOG.warning('No backing file found for %s, allowing ' 'snapshot to be deleted.', snapshot_path) # Snapshot may be stale, so just delete it and update the # info file instead of blocking return self._delete_stale_snapshot(snapshot) base_path = os.path.join(vol_path, base_file) base_file_img_info = self._qemu_img_info(base_path, snapshot.volume.name) # Find what file has this as its backing file active_file = self.get_active_image_from_info(snapshot.volume) if self._is_volume_attached(snapshot.volume): # Online delete context = snapshot._context new_base_file = base_file_img_info.backing_file base_id = None for key, value in snap_info.items(): if utils.paths_normcase_equal(value, base_file) and key != 'active': base_id = key break if base_id is None: # This means we are deleting the oldest snapshot LOG.debug('No %(base_id)s found for %(file)s', {'base_id': 'base_id', 'file': snapshot_file}) online_delete_info = { 'active_file': active_file, 'snapshot_file': snapshot_file, 'base_file': base_file, 'base_id': base_id, 'new_base_file': new_base_file } return self._delete_snapshot_online(context, snapshot, online_delete_info) encrypted = snapshot.encryption_key_id is not None if encrypted: keymgr = key_manager.API(CONF) encryption_key = snapshot.encryption_key_id new_key = keymgr.get(snapshot.obj_context, encryption_key) src_passphrase = \ binascii.hexlify(new_key.get_encoded()).decode('utf-8') tmp_dir = volume_utils.image_conversion_dir() if utils.paths_normcase_equal(snapshot_file, active_file): # There is no top file # T0 | T1 | # base | snapshot_file | None # (guaranteed to| (being deleted, | # exist) | committed down) | if encrypted: with tempfile.NamedTemporaryFile(prefix='luks_', dir=tmp_dir) as src_file: with open(src_file.name, 'w') as f: f.write(src_passphrase) self._img_commit(snapshot_path, passphrase_file=src_file.name, backing_file=volume_path) else: self._img_commit(snapshot_path) # Active file has changed snap_info['active'] = base_file else: # T0 | T1 | T2 | T3 # base | snapshot_file | higher_file | highest_file # (guaranteed to | (being deleted, | (guaranteed to | (may exist) # exist, not | committed down) | exist, needs | # used here) | | ptr update) | # This file is guaranteed to exist since we aren't operating on # the active file. higher_file = self._get_higher_image_path(snapshot) if higher_file is None: msg = _('No file found with %s as backing file.') %\ snapshot_file raise exception.RemoteFSException(msg) higher_id = next((i for i in snap_info if utils.paths_normcase_equal(snap_info[i], higher_file) and i != 'active'), None) if higher_id is None: msg = _('No snap found with %s as backing file.') %\ higher_file raise exception.RemoteFSException(msg) if encrypted: with tempfile.NamedTemporaryFile(prefix='luks_', dir=tmp_dir) as src_file: with open(src_file.name, 'w') as f: f.write(src_passphrase) self._img_commit(snapshot_path, passphrase_file=src_file.name, backing_file=volume_path) higher_file_path = os.path.join(vol_path, higher_file) base_file_fmt = base_file_img_info.file_format self._rebase_img(higher_file_path, volume_path, base_file_fmt, src_file.name) else: self._img_commit(snapshot_path) higher_file_path = os.path.join(vol_path, higher_file) base_file_fmt = base_file_img_info.file_format self._rebase_img(higher_file_path, base_file, base_file_fmt) # Remove snapshot_file from info del snap_info[snapshot.id] self._write_info_file(info_path, snap_info) def _create_volume_from_snapshot(self, volume: objects.Volume, snapshot: objects.Snapshot) -> dict: """Creates a volume from a snapshot. Snapshot must not be the active snapshot. (offline) """ LOG.debug('Creating volume %(vol)s from snapshot %(snap)s', {'vol': volume.id, 'snap': snapshot.id}) status = snapshot.status acceptable_states = ['available', 'backing-up', 'restoring'] self._validate_state(status, acceptable_states, obj_description='snapshot', invalid_exc=exception.InvalidSnapshot) self._ensure_shares_mounted() volume.provider_location = self._find_share(volume) self._do_create_volume(volume) self._copy_volume_from_snapshot(snapshot, volume, volume.size, snapshot.volume.encryption_key_id, volume.encryption_key_id) return {'provider_location': volume.provider_location} def _copy_volume_from_snapshot( self, snapshot: objects.Snapshot, volume: objects.Volume, volume_size: int, src_encryption_key_id: Optional[str] = None, new_encryption_key_id: Optional[str] = None): raise NotImplementedError() def _do_create_snapshot(self, snapshot: objects.Snapshot, backing_filename: str, new_snap_path: str) -> None: """Create a QCOW2 file backed by another file. :param snapshot: snapshot reference :param backing_filename: filename of file that will back the new qcow2 file :param new_snap_path: filename of new qcow2 file """ backing_path_full_path = os.path.join( self._local_volume_dir(snapshot.volume), backing_filename) volume_path = os.path.join( self._local_volume_dir(snapshot.volume), snapshot.volume.name) info = self._qemu_img_info(backing_path_full_path, snapshot.volume.name) backing_fmt = info.file_format obj_context = snapshot.volume.obj_context # create new qcow2 file if snapshot.volume.encryption_key_id is None: command = ['qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s,backing_fmt=%s' % (backing_path_full_path, backing_fmt), new_snap_path, "%dG" % snapshot.volume.size] self._execute(*command, run_as_root=self._execute_as_root) command = ['qemu-img', 'rebase', '-u', '-b', backing_filename, '-F', backing_fmt, new_snap_path] # qemu-img rebase must run as root for the same reasons as above self._execute(*command, run_as_root=self._execute_as_root) else: # encrypted keymgr = key_manager.API(CONF) # Get key for the source volume using the context of this request. key = keymgr.get(obj_context, snapshot.volume.encryption_key_id) passphrase = binascii.hexlify(key.get_encoded()).decode('utf-8') tmp_dir = volume_utils.image_conversion_dir() with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp_key: with open(tmp_key.name, 'w') as f: f.write(passphrase) file_json_dict = {"driver": "qcow2", "encrypt.key-secret": "s0", "backing.encrypt.key-secret": "s0", "backing.file.filename": volume_path, "file": {"driver": "file", "filename": backing_path_full_path, }} file_json = jsonutils.dumps(file_json_dict) encryption = volume_utils.check_encryption_provider( volume=snapshot.volume, context=obj_context) cipher_spec = image_utils.decode_cipher(encryption['cipher'], encryption['key_size']) command = ['qemu-img', 'create', '-f' 'qcow2', '-o', 'encrypt.format=luks,encrypt.key-secret=s1,' 'encrypt.cipher-alg=%(cipher_alg)s,' 'encrypt.cipher-mode=%(cipher_mode)s,' 'encrypt.ivgen-alg=%(ivgen_alg)s' % cipher_spec, '-b', 'json:' + file_json, '--object', 'secret,id=s0,file=' + tmp_key.name, '--object', 'secret,id=s1,file=' + tmp_key.name, new_snap_path] self._execute(*command, run_as_root=self._execute_as_root) command_path = 'encrypt.key-secret=s0,file.filename=' command = ['qemu-img', 'rebase', '--object', 'secret,id=s0,file=' + tmp_key.name, '--image-opts', command_path + new_snap_path, '-u', '-b', backing_filename, '-F', backing_fmt] # qemu-img rebase must run as root for the same reasons as # above self._execute(*command, run_as_root=self._execute_as_root) self._set_rw_permissions(new_snap_path) # if in secure mode, chown new file if self.secure_file_operations_enabled(): ref_file = backing_path_full_path log_msg = 'Setting permissions: %(file)s -> %(user)s:%(group)s' % { 'file': ref_file, 'user': os.stat(ref_file).st_uid, 'group': os.stat(ref_file).st_gid} LOG.debug(log_msg) command = ['chown', '--reference=%s' % ref_file, new_snap_path] self._execute(*command, run_as_root=self._execute_as_root) def _create_snapshot(self, snapshot: objects.Snapshot) -> None: """Create a snapshot. If volume is attached, call to Nova to create snapshot, providing a qcow2 file. Cinder creates and deletes qcow2 files, but Nova is responsible for transitioning the VM between them and handling live transfers of data between files as required. If volume is detached, create locally with qemu-img. Cinder handles manipulation of qcow2 files. A file named volume-.info is stored with the volume data and is a JSON table which contains a mapping between Cinder snapshot UUIDs and filenames, as these associations will change as snapshots are deleted. Basic snapshot operation: 1. Initial volume file: volume-1234 2. Snapshot created: volume-1234 <- volume-1234.aaaa volume-1234.aaaa becomes the new "active" disk image. If the volume is not attached, this filename will be used to attach the volume to a VM at volume-attach time. If the volume is attached, the VM will switch to this file as part of the snapshot process. Note that volume-1234.aaaa represents changes after snapshot 'aaaa' was created. So the data for snapshot 'aaaa' is actually in the backing file(s) of volume-1234.aaaa. This file has a qcow2 header recording the fact that volume-1234 is its backing file. Delta changes since the snapshot was created are stored in this file, and the backing file (volume-1234) does not change. info file: { 'active': 'volume-1234.aaaa', 'aaaa': 'volume-1234.aaaa' } 3. Second snapshot created: volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb volume-1234.bbbb now becomes the "active" disk image, recording changes made to the volume. info file: { 'active': 'volume-1234.bbbb', (* changed!) 'aaaa': 'volume-1234.aaaa', 'bbbb': 'volume-1234.bbbb' } (* added!) 4. Snapshot deletion when volume is attached ('in-use' state): * When first snapshot is deleted, Cinder calls Nova for online snapshot deletion. Nova deletes snapshot with id "aaaa" and makes snapshot with id "bbbb" point to the base image. Snapshot with id "bbbb" is the active image. volume-1234 <- volume-1234.bbbb info file: { 'active': 'volume-1234.bbbb', 'bbbb': 'volume-1234.bbbb' } * When second snapshot is deleted, Cinder calls Nova for online snapshot deletion. Nova deletes snapshot with id "bbbb" by pulling volume-1234's data into volume-1234.bbbb. This (logically) removes snapshot with id "bbbb" and the active file remains the same. volume-1234.bbbb info file: { 'active': 'volume-1234.bbbb' } TODO (deepakcs): Change this once Nova supports blockCommit for in-use volumes. 5. Snapshot deletion when volume is detached ('available' state): * When first snapshot is deleted, Cinder does the snapshot deletion. volume-1234.aaaa is removed from the snapshot chain. The data from it is merged into its parent. volume-1234.bbbb is rebased, having volume-1234 as its new parent. volume-1234 <- volume-1234.bbbb info file: { 'active': 'volume-1234.bbbb', 'bbbb': 'volume-1234.bbbb' } * When second snapshot is deleted, Cinder does the snapshot deletion. volume-1234.aaaa is removed from the snapshot chain. The base image, volume-1234 becomes the active image for this volume again. volume-1234 info file: { 'active': 'volume-1234' } (* changed!) """ LOG.debug('Creating %(type)s snapshot %(snap)s of volume %(vol)s', {'snap': snapshot.id, 'vol': snapshot.volume.id, 'type': ('online' if self._is_volume_attached(snapshot.volume) else 'offline')}) status = snapshot.volume.status acceptable_states = ['available', 'in-use', 'backing-up'] if (snapshot.display_name and snapshot.display_name.startswith('tmp-snap-')): # This is an internal volume snapshot. In order to support # image caching, we'll allow creating/deleting such snapshots # while having volumes in 'downloading' state. acceptable_states.append('downloading') self._validate_state(status, acceptable_states) info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path, empty_if_missing=True) backing_filename = self.get_active_image_from_info( snapshot.volume) new_snap_path = self._get_new_snap_path(snapshot) active = os.path.basename(new_snap_path) if self._is_volume_attached(snapshot.volume): self._create_snapshot_online(snapshot, backing_filename, new_snap_path) # Update the format for the volume and the connection_info. The # connection_info needs to reflect the current volume format in # order for Nova to create the disk device correctly whenever the # instance is stopped/started or rebooted. new_format = 'qcow2' snapshot.volume.admin_metadata['format'] = new_format with snapshot.volume.obj_as_admin(): snapshot.volume.save() # Update reference in the only attachment (no multi-attach support) attachment = snapshot.volume.volume_attachment[0] attachment.connection_info['name'] = active attachment.connection_info['format'] = new_format # Let OVO know it has been updated attachment.connection_info = attachment.connection_info attachment.save() else: self._do_create_snapshot(snapshot, backing_filename, new_snap_path) snap_info['active'] = active snap_info[snapshot.id] = active self._write_info_file(info_path, snap_info) def _create_snapshot_online(self, snapshot: objects.Snapshot, backing_filename: str, new_snap_path: str) -> None: # Perform online snapshot via Nova self._do_create_snapshot(snapshot, backing_filename, new_snap_path) connection_info = { 'type': 'qcow2', 'new_file': os.path.basename(new_snap_path), 'snapshot_id': snapshot.id } try: assert self._nova is not None result = self._nova.create_volume_snapshot( snapshot.obj_context, snapshot.volume_id, connection_info) LOG.debug('nova call result: %s', result) except Exception: LOG.exception('Call to Nova to create snapshot failed') raise # Loop and wait for result # Nova will call Cinderclient to update the status in the database # An update of progress = '90%' means that Nova is done seconds_elapsed = 0 increment = 1 timeout = 600 while True: s = db.snapshot_get(snapshot.obj_context, snapshot.id) LOG.debug('Status of snapshot %(id)s is now %(status)s', {'id': snapshot['id'], 'status': s['status']}) if s['status'] == fields.SnapshotStatus.CREATING: if s['progress'] == '90%': # Nova tasks completed successfully break time.sleep(increment) seconds_elapsed += increment elif s['status'] == fields.SnapshotStatus.ERROR: msg = _('Nova returned "error" status ' 'while creating snapshot.') raise exception.RemoteFSException(msg) elif (s['status'] == fields.SnapshotStatus.DELETING or s['status'] == fields.SnapshotStatus.ERROR_DELETING): msg = _('Snapshot %(id)s has been asked to be deleted while ' 'waiting for it to become available. Perhaps a ' 'concurrent request was made.') % {'id': snapshot.id} raise exception.RemoteFSConcurrentRequest(msg) if 10 < seconds_elapsed <= 20: increment = 2 elif 20 < seconds_elapsed <= 60: increment = 5 elif 60 < seconds_elapsed: increment = 10 if seconds_elapsed > timeout: msg = _('Timed out while waiting for Nova update ' 'for creation of snapshot %s.') % snapshot.id raise exception.RemoteFSException(msg) def _delete_snapshot_online(self, context: context.RequestContext, snapshot: objects.Snapshot, info: dict) -> None: # Update info over the course of this method # active file never changes info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path) update_format = False if utils.paths_normcase_equal(info['active_file'], info['snapshot_file']): # blockRebase/Pull base into active # info['base'] => snapshot_file file_to_delete = info['base_file'] if info['base_id'] is None: # Passing base=none to blockRebase ensures that # libvirt blanks out the qcow2 backing file pointer new_base = None else: new_base = info['new_base_file'] snap_info[info['base_id']] = info['snapshot_file'] delete_info = {'file_to_merge': new_base, 'merge_target_file': None, # current 'type': 'qcow2', 'volume_id': snapshot.volume.id} del snap_info[snapshot.id] update_format = True else: # blockCommit snapshot into base # info['base'] <= snapshot_file # delete record of snapshot file_to_delete = info['snapshot_file'] delete_info = {'file_to_merge': info['snapshot_file'], 'merge_target_file': info['base_file'], 'type': 'qcow2', 'volume_id': snapshot.volume.id} del snap_info[snapshot.id] self._nova_assisted_vol_snap_delete(context, snapshot, delete_info) if update_format: snapshot.volume.admin_metadata['format'] = 'qcow2' with snapshot.volume.obj_as_admin(): snapshot.volume.save() # Write info file updated above self._write_info_file(info_path, snap_info) # Delete stale file path_to_delete = os.path.join( self._local_volume_dir(snapshot.volume), file_to_delete) self._delete(path_to_delete) def _nova_assisted_vol_snap_delete(self, context: context.RequestContext, snapshot: objects.Snapshot, delete_info: dict) -> None: try: assert self._nova is not None self._nova.delete_volume_snapshot( context, snapshot.id, delete_info) except Exception: LOG.exception('Call to Nova delete snapshot failed') raise # Loop and wait for result # Nova will call Cinderclient to update the status in the database # An update of progress = '90%' means that Nova is done seconds_elapsed = 0 increment = 1 timeout = 7200 while True: s = db.snapshot_get(context, snapshot.id) if s['status'] == fields.SnapshotStatus.DELETING: if s['progress'] == '90%': # Nova tasks completed successfully break else: LOG.debug('status of snapshot %s is still "deleting"... ' 'waiting', snapshot.id) time.sleep(increment) seconds_elapsed += increment else: msg = _('Unable to delete snapshot %(id)s, ' 'status: %(status)s.') % {'id': snapshot.id, 'status': s['status']} raise exception.RemoteFSException(msg) if 10 < seconds_elapsed <= 20: increment = 2 elif 20 < seconds_elapsed <= 60: increment = 5 elif 60 < seconds_elapsed: increment = 10 if seconds_elapsed > timeout: msg = _('Timed out while waiting for Nova update ' 'for deletion of snapshot %(id)s.') %\ {'id': snapshot.id} raise exception.RemoteFSException(msg) def _extend_volume(self, volume: objects.Volume, size_gb: int): raise NotImplementedError() def _revert_to_snapshot(self, context: context.RequestContext, volume: objects.Volume, snapshot: objects.Snapshot): raise NotImplementedError() class RemoteFSSnapDriver(RemoteFSSnapDriverBase): @locked_volume_id_operation def create_snapshot(self, snapshot: objects.Snapshot) -> None: """Apply locking to the create snapshot operation.""" return self._create_snapshot(snapshot) @locked_volume_id_operation def delete_snapshot(self, snapshot: objects.Snapshot) -> None: """Apply locking to the delete snapshot operation.""" return self._delete_snapshot(snapshot) @locked_volume_id_operation def create_volume_from_snapshot(self, volume: objects.Volume, snapshot: objects.Snapshot) -> dict: return self._create_volume_from_snapshot(volume, snapshot) # TODO: should be locking on src_vref id -- bug #1852449 @locked_volume_id_operation def create_cloned_volume(self, volume: objects.Volume, src_vref: objects.Volume) -> dict: """Creates a clone of the specified volume.""" return self._create_cloned_volume(volume, src_vref, src_vref.obj_context) @locked_volume_id_operation def copy_volume_to_image(self, context: context.RequestContext, volume: objects.Volume, image_service, image_meta: dict) -> None: """Copy the volume to the specified image.""" return self._copy_volume_to_image(context, volume, image_service, image_meta) @locked_volume_id_operation def extend_volume(self, volume: objects.Volume, size_gb: int) -> None: return self._extend_volume(volume, size_gb) @locked_volume_id_operation def revert_to_snapshot(self, context: context.RequestContext, volume: objects.Volume, snapshot: objects.Snapshot) -> None: """Revert to specified snapshot.""" return self._revert_to_snapshot(context, volume, snapshot) class RemoteFSSnapDriverDistributed(RemoteFSSnapDriverBase): @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') def create_snapshot(self, snapshot: objects.Snapshot) -> None: """Apply locking to the create snapshot operation.""" return self._create_snapshot(snapshot) @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') def delete_snapshot(self, snapshot: objects.Snapshot) -> None: """Apply locking to the delete snapshot operation.""" return self._delete_snapshot(snapshot) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def create_volume_from_snapshot(self, volume: objects.Volume, snapshot: objects.Snapshot) -> dict: return self._create_volume_from_snapshot(volume, snapshot) # lock the source volume id first @coordination.synchronized('{self.driver_prefix}-{src_vref.id}') @coordination.synchronized('{self.driver_prefix}-{volume.id}') def create_cloned_volume(self, volume: objects.Volume, src_vref: objects.Volume) -> dict: """Creates a clone of the specified volume.""" return self._create_cloned_volume(volume, src_vref, src_vref.obj_context) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def copy_volume_to_image(self, context: context.RequestContext, volume: objects.Volume, image_service, image_meta: dict) -> None: """Copy the volume to the specified image.""" return self._copy_volume_to_image(context, volume, image_service, image_meta) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def extend_volume(self, volume: objects.Volume, size_gb: int) -> None: return self._extend_volume(volume, size_gb) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def revert_to_snapshot(self, context: context.RequestContext, volume: objects.Volume, snapshot: objects.Snapshot) -> None: """Revert to specified snapshot.""" return self._revert_to_snapshot(context, volume, snapshot) class RemoteFSPoolMixin(object): """Drivers inheriting this will report each share as a pool.""" def _find_share(self, volume: objects.Volume) -> Optional[str]: # We let the scheduler choose a pool for us. pool_name = self._get_pool_name_from_volume(volume) share = self._get_share_from_pool_name(pool_name) return share def _get_pool_name_from_volume(self, volume: objects.Volume) -> Optional[str]: pool_name = volume_utils.extract_host(volume['host'], level='pool') return pool_name def _get_pool_name_from_share(self, share: str): raise NotImplementedError() def _get_share_from_pool_name(self, pool_name: Optional[str]): # To be implemented by drivers using pools. raise NotImplementedError() @typing.no_type_check def _update_volume_stats(self): data = {} pools = [] backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.volume_backend_name data['vendor_name'] = self.vendor_name data['driver_version'] = self.get_version() data['storage_protocol'] = self.driver_volume_type self._ensure_shares_mounted() for share in self._mounted_shares: (share_capacity, share_free, total_allocated) = self._get_capacity_info(share) pool = {'pool_name': self._get_pool_name_from_share(share), 'total_capacity_gb': share_capacity / float(units.Gi), 'free_capacity_gb': share_free / float(units.Gi), 'provisioned_capacity_gb': ( total_allocated / float(units.Gi)), 'reserved_percentage': ( self.configuration.reserved_percentage), 'max_over_subscription_ratio': ( self.configuration.max_over_subscription_ratio), 'thin_provisioning_support': ( self._thin_provisioning_support), 'thick_provisioning_support': ( self._thick_provisioning_support), 'QoS_support': False, } pools.append(pool) data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 data['pools'] = pools self._stats = data class RevertToSnapshotMixin(object): @typing.no_type_check def _revert_to_snapshot(self, context, volume, snapshot): """Revert a volume to specified snapshot The volume must not be attached. Only the latest snapshot can be used. """ status = snapshot.volume.status acceptable_states = ['available', 'reverting'] self._validate_state(status, acceptable_states) LOG.debug('Reverting volume %(vol)s to snapshot %(snap)s', {'vol': snapshot.volume.id, 'snap': snapshot.id}) info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path) snapshot_file = snap_info[snapshot.id] active_file = snap_info['active'] if not utils.paths_normcase_equal(snapshot_file, active_file): msg = _("Could not revert volume '%(volume_id)s' to snapshot " "'%(snapshot_id)s' as it does not " "appear to be the latest snapshot. Current active " "image: %(active_file)s.") raise exception.InvalidSnapshot( msg % dict(snapshot_id=snapshot.id, active_file=active_file, volume_id=volume.id)) snapshot_path = os.path.join( self._local_volume_dir(snapshot.volume), snapshot_file) backing_filename = self._qemu_img_info( snapshot_path, volume.name).backing_file # We revert the volume to the latest snapshot by recreating the top # image from the chain. # This workflow should work with most (if not all) drivers inheriting # this class. self._delete(snapshot_path) self._do_create_snapshot(snapshot, backing_filename, snapshot_path) class RemoteFSManageableVolumesMixin(object): _SUPPORTED_IMAGE_FORMATS = ['raw', 'qcow2'] _MANAGEABLE_IMAGE_RE = None @typing.no_type_check def _get_manageable_vol_location(self, existing_ref): if 'source-name' not in existing_ref: reason = _('The existing volume reference ' 'must contain "source-name".') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) vol_remote_path = os.path.normcase( os.path.normpath(existing_ref['source-name'])) for mounted_share in self._mounted_shares: # We don't currently attempt to resolve hostnames. This could # be troublesome for some distributed shares, which may have # hostnames resolving to multiple addresses. norm_share = os.path.normcase(os.path.normpath(mounted_share)) _head, match, share_rel_path = vol_remote_path.partition( norm_share) if not (match and share_rel_path.startswith(os.path.sep)): continue mountpoint = self._get_mount_point_for_share(mounted_share) vol_local_path = os.path.join(mountpoint, share_rel_path.lstrip(os.path.sep)) LOG.debug("Found mounted share referenced by %s.", vol_remote_path) if os.path.isfile(vol_local_path): LOG.debug("Found volume %(path)s on share %(share)s.", dict(path=vol_local_path, share=mounted_share)) return dict(share=mounted_share, mountpoint=mountpoint, vol_local_path=vol_local_path, vol_remote_path=vol_remote_path) else: LOG.error("Could not find volume %s on the " "specified share.", vol_remote_path) break raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_('Volume not found.')) def _get_managed_vol_expected_path(self, volume, volume_location): # This may be overridden by the drivers. return os.path.join(volume_location['mountpoint'], volume.name) @typing.no_type_check def _is_volume_manageable(self, volume_path, already_managed=False): unmanageable_reason = None if already_managed: return False, _('Volume already managed.') try: img_info = self._qemu_img_info(volume_path, volume_name=None) except exception.RemoteFSInvalidBackingFile: return False, _("Backing file present.") except Exception: return False, _("Failed to open image.") # We're double checking as some drivers do not validate backing # files through '_qemu_img_info'. if img_info.backing_file: return False, _("Backing file present.") if img_info.file_format not in self._SUPPORTED_IMAGE_FORMATS: unmanageable_reason = _( "Unsupported image format: '%s'.") % img_info.file_format return False, unmanageable_reason return True, None @typing.no_type_check def manage_existing(self, volume, existing_ref): LOG.info('Managing volume %(volume_id)s with ref %(ref)s', {'volume_id': volume.id, 'ref': existing_ref}) vol_location = self._get_manageable_vol_location(existing_ref) vol_local_path = vol_location['vol_local_path'] manageable, unmanageable_reason = self._is_volume_manageable( vol_local_path) if not manageable: raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=unmanageable_reason) expected_vol_path = self._get_managed_vol_expected_path( volume, vol_location) self._set_rw_permissions(vol_local_path) # This should be the last thing we do. if expected_vol_path != vol_local_path: LOG.info("Renaming imported volume image %(src)s to %(dest)s", dict(src=vol_location['vol_local_path'], dest=expected_vol_path)) os.rename(vol_location['vol_local_path'], expected_vol_path) return {'provider_location': vol_location['share']} @typing.no_type_check def _get_rounded_manageable_image_size(self, image_path): image_size = image_utils.qemu_img_info( image_path, run_as_root=self._execute_as_root).virtual_size return int(math.ceil(float(image_size) / units.Gi)) def manage_existing_get_size(self, volume, existing_ref): vol_location = self._get_manageable_vol_location(existing_ref) volume_path = vol_location['vol_local_path'] return self._get_rounded_manageable_image_size(volume_path) def unmanage(self, volume): pass @typing.no_type_check def _get_manageable_volume(self, share, volume_path, managed_volume=None): manageable, unmanageable_reason = self._is_volume_manageable( volume_path, already_managed=managed_volume is not None) size_gb = None if managed_volume: # We may not be able to query in-use images. size_gb = managed_volume.size else: try: size_gb = self._get_rounded_manageable_image_size(volume_path) except Exception: manageable = False unmanageable_reason = (unmanageable_reason or _("Failed to get size.")) mountpoint = self._get_mount_point_for_share(share) norm_mountpoint = os.path.normcase(os.path.normpath(mountpoint)) norm_vol_path = os.path.normcase(os.path.normpath(volume_path)) ref = norm_vol_path.replace(norm_mountpoint, share).replace('\\', '/') manageable_volume = { 'reference': {'source-name': ref}, 'size': size_gb, 'safe_to_manage': manageable, 'reason_not_safe': unmanageable_reason, 'cinder_id': managed_volume.id if managed_volume else None, 'extra_info': None, } return manageable_volume @typing.no_type_check def _get_share_manageable_volumes(self, share, managed_volumes): manageable_volumes = [] mount_path = self._get_mount_point_for_share(share) for dir_path, _dir_names, file_names in os.walk(mount_path): for file_name in file_names: file_name = os.path.normcase(file_name) img_path = os.path.join(dir_path, file_name) # In the future, we may have the regex filtering images # as a config option. if (not self._MANAGEABLE_IMAGE_RE or self._MANAGEABLE_IMAGE_RE.match(file_name)): managed_volume = managed_volumes.get( os.path.splitext(file_name)[0]) try: manageable_volume = self._get_manageable_volume( share, img_path, managed_volume) manageable_volumes.append(manageable_volume) except Exception as exc: LOG.error( "Failed to get manageable volume info: " "'%(image_path)s'. Exception: %(exc)s.", dict(image_path=img_path, exc=exc)) return manageable_volumes @typing.no_type_check def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): manageable_volumes = [] managed_volumes = {vol.name: vol for vol in cinder_volumes} for share in self._mounted_shares: try: manageable_volumes += self._get_share_manageable_volumes( share, managed_volumes) except Exception as exc: LOG.error("Failed to get manageable volumes for " "share %(share)s. Exception: %(exc)s.", dict(share=share, exc=exc)) return volume_utils.paginate_entries_list( manageable_volumes, marker, limit, offset, sort_keys, sort_dirs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/rsd.py0000664000175000017500000007114500000000000020472 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Driver for RackScale Design.""" import json from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from packaging import version try: from rsd_lib import RSDLib from sushy import exceptions as sushy_exceptions except ImportError: # Used for tests, when no rsd-lib is installed RSDLib = None sushy_exceptions = None from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume import driver from cinder.volume import volume_utils LOG = logging.getLogger(__name__) RSD_OPTS = [ cfg.StrOpt('podm_url', default='', help='URL of PODM service'), cfg.StrOpt('podm_username', default='', help='Username of PODM service'), cfg.StrOpt('podm_password', default='', help='Password of PODM service', secret=True), ] class RSDRetryableException(exception.VolumeDriverException): message = _("RSD retryable exception: %(reason)s") def get_volume_metadata(volume): metadata = volume.get('volume_metadata') if metadata: ret = {data['key']: data['value'] for data in metadata} else: ret = volume.get('metadata', {}) return ret class RSDClient(object): def __init__(self, rsdlib): self.rsdlib = rsdlib @classmethod def initialize(cls, url, username, password, verify): if not RSDLib: raise exception.VolumeBackendAPIException( data=(_("RSDLib is not available, please install rsd-lib."))) try: rsdlib = RSDLib(url, username, password, verify=verify).factory() except Exception: # error credentials may throw unexpected exception LOG.exception("Cannot connect to RSD PODM") raise exception.VolumeBackendAPIException( data=_("initialize: Cannot connect to RSD PODM.")) rsd_api_version = version.parse(rsdlib._rsd_api_version) if rsd_api_version < version.parse("2.4.0"): raise exception.VolumeBackendAPIException( data=(_("initialize: Unsupported rsd_api version: " "%(current)s < %(expected)s.") % {'current': rsdlib._rsd_api_version, 'expected': "2.4.0"})) if version.parse(rsdlib._redfish_version) < version.parse("1.1.0"): raise exception.VolumeBackendAPIException( data=(_("initialize: Unsupported rsd_lib version: " "%(current)s < %(expected)s.") % {'current': rsdlib._redfish_version, 'expected': "1.1.0"})) LOG.info("initialize: Connected to %s at version %s.", url, rsdlib._rsd_api_version) return cls(rsdlib) def _get_storage(self, storage_url): ss_url = "/".join(storage_url.split("/", 5)[:5]) storage_service = self.rsdlib.get_storage_service(ss_url) return storage_service def _get_storages(self, filter_nvme=True): ret = [] for storage in (self.rsdlib .get_storage_service_collection().get_members()): if filter_nvme: drives = storage.drives.get_members() if drives and (any(map(lambda drive: False if not drive.protocol else 'nvme' in drive.protocol.lower(), drives))): ret.append(storage) else: ret.append(storage) return ret def _get_node(self, node_url): return self.rsdlib.get_node(node_url) def _get_volume(self, volume_url): ss = self._get_storage(volume_url) volume = ss.volumes.get_member(volume_url) return volume def _get_providing_pool(self, volume): len_cs = len(volume.capacity_sources) if len_cs != 1: raise exception.ValidationError( detail=(_("Volume %(vol)s has %(len_cs)d capacity_sources!") % {'vol': volume.path, 'len_cs': len_cs})) len_pp = len(volume.capacity_sources[0].providing_pools) if len_pp != 1: raise exception.ValidationError( detail=(_("Volume %(vol)s has %(len_pp)d providing_pools!") % {'vol': volume.path, 'len_pp': len_pp})) providing_pool = volume.capacity_sources[0].providing_pools[0] return providing_pool.get_members()[0].path def _create_vol_or_snap(self, storage, size_in_bytes, pool_url=None, source_snap=None, source_vol=None): capacity_sources = None if pool_url: capacity_sources = [{ "ProvidingPools": [{ "@odata.id": pool_url }] }] replica_infos = None if source_snap: replica_infos = [{ "ReplicaType": "Clone", "Replica": {"@odata.id": source_snap} }] if source_vol: raise exception.InvalidInput( reason=(_("Cannot specify both source_snap=%(snap)s and " "source_vol=%(vol)s!") % {'snap': source_snap, 'vol': source_vol})) elif source_vol: replica_infos = [{ "ReplicaType": "Snapshot", "Replica": {"@odata.id": source_vol} }] LOG.debug("Creating... with size_byte=%s, " "capacity_sources=%s, replica_infos=%s", size_in_bytes, capacity_sources, replica_infos) volume_url = storage.volumes.create_volume( size_in_bytes, capacity_sources=capacity_sources, replica_infos=replica_infos) LOG.debug("Created volume_url=%s", volume_url) return volume_url def create_volume(self, size_in_gb): size_in_bytes = size_in_gb * units.Gi try: for storage in self._get_storages(): try: volume_url = self._create_vol_or_snap( storage, size_in_bytes) LOG.info("RSD volume %s created, with size %s GiB", volume_url, size_in_gb) return volume_url # NOTE(Yingxin): Currently, we capture sushy_exception to # identify that volume creation is failed at RSD backend. except (sushy_exceptions.HTTPError, sushy_exceptions.ConnectionError) as e: LOG.warning("skipped storage %s for creation error %s", storage.path, e) except Exception: LOG.exception("Create volume failed") raise exception.VolumeBackendAPIException( data=(_('Unable to create new volume with %d GiB') % size_in_gb)) def create_snap(self, volume_url): try: ss = self._get_storage(volume_url) volume = self._get_volume(volume_url) pool_url = self._get_providing_pool(volume) snap_url = self._create_vol_or_snap( ss, volume.capacity_bytes, pool_url=pool_url, source_vol=volume_url) LOG.info("RSD snapshot %s created, from volume %s", snap_url, volume_url) return snap_url except Exception: LOG.exception("Create snapshot failed") raise exception.VolumeBackendAPIException( data=(_('Unable to create snapshot from volume %s') % volume_url)) def create_volume_from_snap(self, snap_url, size_in_gb=None): try: ss = self._get_storage(snap_url) snap = self._get_volume(snap_url) if not size_in_gb: size_in_bytes = snap.capacity_bytes else: size_in_bytes = size_in_gb * units.Gi pool_url = self._get_providing_pool(snap) volume_url = self._create_vol_or_snap( ss, size_in_bytes, pool_url=pool_url, source_snap=snap_url) LOG.info("RSD volume %s created, from snap %s, " "with size %s GiB.", volume_url, snap_url, size_in_bytes / units.Gi) return volume_url except Exception: LOG.exception("Create volume from snapshot failed") raise exception.VolumeBackendAPIException( data=(_('Unable to create volume from snapshot %s') % snap_url)) def clone_volume(self, volume_url, size_in_gb=None): try: ss = self._get_storage(volume_url) origin_volume = self._get_volume(volume_url) pool_url = self._get_providing_pool(origin_volume) snap_url = self._create_vol_or_snap( ss, origin_volume.capacity_bytes, pool_url=pool_url, source_vol=volume_url) except Exception: LOG.exception("Clone volume failed (create snapshot phase)") raise exception.VolumeBackendAPIException( data=(_('Unable to create volume from volume %s, snapshot ' 'creation failed.') % volume_url)) try: if not size_in_gb: size_in_bytes = origin_volume.capacity_bytes else: size_in_bytes = size_in_gb * units.Gi new_vol_url = self._create_vol_or_snap( ss, size_in_bytes, pool_url=pool_url, source_snap=snap_url) LOG.info("RSD volume %s created, from volume %s and snap %s, " "with size %s GiB.", new_vol_url, volume_url, snap_url, size_in_bytes / units.Gi) return new_vol_url, snap_url except Exception: LOG.exception("Clone volume failed (clone volume phase)") try: self.delete_vol_or_snap(snap_url) except Exception: LOG.exception("Clone volume failed (undo snapshot)") raise exception.VolumeBackendAPIException( data=(_('Unable to delete the temp snapshot %(snap)s, ' 'during a failure to clone volume %(vol)s.') % {'snap': snap_url, 'vol': volume_url})) raise exception.VolumeBackendAPIException( data=(_('Unable to create volume from volume %s, volume ' 'creation failed.') % volume_url)) def extend_volume(self, volume_url, size_in_gb): size_in_bytes = size_in_gb * units.Gi try: volume = self._get_volume(volume_url) volume.resize(size_in_bytes) LOG.info("RSD volume %s resized to %s Bytes", volume.path, size_in_bytes) except Exception: LOG.exception("Extend volume failed") raise exception.VolumeBackendAPIException( data=(_('Unable to extend volume %s.') % volume_url)) def delete_vol_or_snap(self, volume_url, volume_name='', ignore_non_exist=False): try: try: volume = self._get_volume(volume_url) except sushy_exceptions.ResourceNotFoundError: if ignore_non_exist: LOG.warning("Deleted non existent vol/snap %s", volume_url) else: raise if volume.links.endpoints: LOG.warning("Delete vol/snap failed, attached: %s", volume_url) raise exception.VolumeIsBusy(_("Volume is already attached"), volume_name=volume_name) volume.delete() except sushy_exceptions.BadRequestError as e: try: msg = e.body['@Message.ExtendedInfo'][0]['Message'] if (msg == "Cannot delete source snapshot volume when " "other clone volumes are based on this snapshot."): LOG.warning("Delete vol/snap failed, has-deps: %s", volume_url) raise exception.SnapshotIsBusy(snapshot_name=volume_name) except Exception: LOG.exception("Delete vol/snap failed") raise exception.VolumeBackendAPIException( data=(_('Unable to delete volume %s.') % volume_url)) except Exception: LOG.exception("Delete vol/snap failed") raise exception.VolumeBackendAPIException( data=(_('Unable to delete volume %s.') % volume_url)) LOG.info("RSD volume deleted: %s", volume_url) def get_node_url_by_uuid(self, uuid): uuid = uuid.upper() try: nodes = self.rsdlib.get_node_collection().get_members() for node in nodes: node_system = None if node: node_system = self.rsdlib.get_system( node.links.computer_system) if (node and node_system and node_system.uuid and node_system.uuid.upper() == uuid): return node.path except Exception: LOG.exception("Get node url failed") return "" def get_stats(self): free_capacity_gb = 0 total_capacity_gb = 0 allocated_capacity_gb = 0 total_volumes = 0 try: storages = self._get_storages() for storage in storages: for pool in storage.storage_pools.get_members(): total_capacity_gb += ( float(pool.capacity.allocated_bytes or 0) / units.Gi) allocated_capacity_gb += ( float(pool.capacity.consumed_bytes or 0) / units.Gi) total_volumes += len(storage.volumes.members_identities) free_capacity_gb = total_capacity_gb - allocated_capacity_gb LOG.info("Got RSD stats: free_gb:%s, total_gb:%s, " "allocated_gb:%s, volumes:%s", free_capacity_gb, total_capacity_gb, allocated_capacity_gb, total_volumes) except Exception: LOG.exception("Get stats failed") return (free_capacity_gb, total_capacity_gb, allocated_capacity_gb, total_volumes) def _get_nqn_endpoints(self, endpoint_urls): ret = [] for endpoint_url in endpoint_urls: endpoint_json = ( json.loads(self.rsdlib._conn.get(endpoint_url).text)) for ident in endpoint_json["Identifiers"]: if ident["DurableNameFormat"] == "NQN": nqn = ident["DurableName"] ret.append((nqn, endpoint_json)) break return ret @utils.retry(RSDRetryableException, interval=4, retries=5, backoff_rate=2) def attach_volume_to_node(self, volume_url, node_url): LOG.info('Trying attach from node %s to volume %s', node_url, volume_url) try: volume = self._get_volume(volume_url) node = self._get_node(node_url) if len(volume.links.endpoints) > 0: raise exception.ValidationError( detail=(_("Volume %s already attached") % volume_url)) node.attach_endpoint(volume.path) except sushy_exceptions.InvalidParameterValueError: LOG.exception("Attach volume failed (not allowable)") raise RSDRetryableException( reason=(_("Not allowed to attach from " "%(node)s to %(volume)s.") % {'node': node_url, 'volume': volume_url})) except Exception: LOG.exception("Attach volume failed (attach phase)") raise exception.VolumeBackendAPIException( data=(_("Attach failed from %(node)s to %(volume)s.") % {'node': node_url, 'volume': volume_url})) try: volume.refresh() node.refresh() v_endpoints = volume.links.endpoints v_endpoints = self._get_nqn_endpoints(v_endpoints) if len(v_endpoints) != 1: raise exception.ValidationError( detail=(_("Attach volume error: %d target nqns") % len(v_endpoints))) target_nqn, v_endpoint = v_endpoints[0] ip_transports = v_endpoint["IPTransportDetails"] if len(ip_transports) != 1: raise exception.ValidationError( detail=(_("Attach volume error: %d target ips") % len(ip_transports))) ip_transport = ip_transports[0] target_ip = ip_transport["IPv4Address"]["Address"] target_port = ip_transport["Port"] node_system = self.rsdlib.get_system(node.links.computer_system) n_endpoints = tuple( val["@odata.id"] for val in node_system.json["Links"]["Endpoints"]) n_endpoints = self._get_nqn_endpoints(n_endpoints) if len(n_endpoints) == 0: raise exception.ValidationError( detail=(_("Attach volume error: %d host nqns") % len(n_endpoints))) host_nqn, v_endpoint = n_endpoints[0] LOG.info('Attachment successful: Retrieved target IP %s, ' 'target Port %s, target NQN %s and initiator NQN %s', target_ip, target_port, target_nqn, host_nqn) return (target_ip, target_port, target_nqn, host_nqn) except Exception as e: LOG.exception("Attach volume failed (post-attach)") try: node.refresh() node.detach_endpoint(volume.path) LOG.info('Detached from node %s to volume %s', node_url, volume_url) except Exception: LOG.exception("Attach volume failed (undo attach)") raise exception.VolumeBackendAPIException( data=(_("Undo-attach failed from %(node)s to %(volume)s.") % {'node': node_url, 'volume': volume_url})) if isinstance(e, exception.ValidationError): raise RSDRetryableException( reason=(_("Validation error during post-attach from " "%(node)s to %(volume)s.") % {'node': node_url, 'volume': volume_url})) else: raise exception.VolumeBackendAPIException( data=(_("Post-attach failed from %(node)s to %(volume)s.") % {'node': node_url, 'volume': volume_url})) def detach_volume_from_node(self, volume_url, node_url): LOG.info('Trying detach from node %s for volume %s', node_url, volume_url) try: volume = self._get_volume(volume_url) node = self._get_node(node_url) node.detach_endpoint(volume.path) except Exception: LOG.exception("Detach volume failed") raise exception.VolumeBackendAPIException( data=(_("Detach failed from %(node)s for %(volume)s.") % {'node': node_url, 'volume': volume_url})) def detach_all_node_connections_for_volume(self, volume_url): try: volume = self._get_volume(volume_url) nodes = self.rsdlib.get_node_collection().get_members() for node in nodes: if node: if volume.path in node.get_allowed_detach_endpoints(): node.detach_endpoint(volume.path) except Exception: LOG.exception("Detach failed for volume from all host " "connections") raise exception.VolumeBackendAPIException( data=(_("Detach failed for %(volume)s from all host " "connections.") % {'volume': volume_url})) @interface.volumedriver class RSDDriver(driver.VolumeDriver): """Openstack driver to perform NVMe-oF volume management in RSD Solution .. code-block:: none Version History: 1.0.0: Initial driver """ VERSION = '1.0.0' CI_WIKI_NAME = 'INTEL-RSD-CI' def __init__(self, *args, **kwargs): super(RSDDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(RSD_OPTS) self.rsdClient = None @staticmethod def get_driver_options(): return RSD_OPTS @volume_utils.trace def do_setup(self, context): self.rsdClient = RSDClient.initialize( self.configuration.podm_url, self.configuration.podm_username, self.configuration.podm_password, self.configuration.suppress_requests_ssl_warnings) def check_for_setup_error(self): pass @volume_utils.trace def create_volume(self, volume): size_in_gb = int(volume['size']) volume_url = self.rsdClient.create_volume(size_in_gb) return {'provider_location': volume_url} @volume_utils.trace def delete_volume(self, volume): volume_url = volume['provider_location'] if not volume_url: return self.rsdClient.delete_vol_or_snap(volume_url, volume_name=volume.name, ignore_non_exist=True) provider_snap_url = volume.metadata.get("rsd_provider_snap") if provider_snap_url: self.rsdClient.delete_vol_or_snap(provider_snap_url, volume_name=volume.name, ignore_non_exist=True) @volume_utils.trace def _update_volume_stats(self): backend_name = ( self.configuration.safe_get('volume_backend_name') or 'RSD') ret = self.rsdClient.get_stats() (free_capacity_gb, total_capacity_gb, allocated_capacity_gb, total_volumes) = ret spool = {} spool['pool_name'] = backend_name spool['total_capacity_gb'] = total_capacity_gb spool['free_capacity_gb'] = free_capacity_gb spool['allocated_capacity_gb'] = allocated_capacity_gb spool['thin_provisioning_support'] = True spool['thick_provisioning_support'] = True spool['multiattach'] = False self._stats['volume_backend_name'] = backend_name self._stats['vendor_name'] = 'Intel' self._stats['driver_version'] = self.VERSION self._stats['storage_protocol'] = constants.NVMEOF_VARIANT_2 # SinglePool self._stats['pools'] = [spool] @volume_utils.trace def initialize_connection(self, volume, connector, **kwargs): uuid = connector.get("system uuid") if not uuid: msg = _("initialize_connection error: no uuid available!") LOG.exception(msg) raise exception.VolumeBackendAPIException(msg) node_url = self.rsdClient.get_node_url_by_uuid(uuid) if not node_url: msg = (_("initialize_connection error: no node_url from uuid %s!") % uuid) LOG.exception(msg) raise exception.VolumeBackendAPIException(msg) volume_url = volume['provider_location'] target_ip, target_port, target_nqn, initiator_nqn = ( self.rsdClient.attach_volume_to_node(volume_url, node_url)) conn_info = { 'driver_volume_type': 'nvmeof', 'data': { 'transport_type': 'rdma', 'host_nqn': initiator_nqn, 'nqn': target_nqn, 'target_port': target_port, 'target_portal': target_ip, } } return conn_info @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): if connector is None: # None connector means force-detach volume_url = volume['provider_location'] self.rsdClient.detach_all_node_connections_for_volume(volume_url) return uuid = connector.get("system uuid") if not uuid: msg = _("terminate_connection error: no uuid available!") LOG.exception(msg) raise exception.VolumeBackendAPIException(msg) node_url = self.rsdClient.get_node_url_by_uuid(uuid) if not node_url: msg = (_("terminate_connection error: no node_url from uuid %s!") % uuid) LOG.exception(msg) raise exception.VolumeBackendAPIException(msg) volume_url = volume['provider_location'] self.rsdClient.detach_volume_from_node(volume_url, node_url) def ensure_export(self, context, volume): pass def create_export(self, context, volume, connector): pass def remove_export(self, context, volume): pass @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): snap_url = snapshot.provider_location old_size_in_gb = snapshot.volume_size size_in_gb = volume.size volume_url = self.rsdClient.create_volume_from_snap(snap_url) if size_in_gb != old_size_in_gb: try: self.rsdClient.extend_volume(volume_url, size_in_gb) except Exception: self.rsdClient.delete_vol_or_snap(volume_url, volume_name=volume.name) raise return {'provider_location': volume_url} @volume_utils.trace def create_snapshot(self, snapshot): volume_url = snapshot.volume.provider_location snap_url = self.rsdClient.create_snap(volume_url) snapshot.provider_location = snap_url snapshot.save() @volume_utils.trace def delete_snapshot(self, snapshot): snap_url = snapshot.provider_location if not snap_url: return self.rsdClient.delete_vol_or_snap(snap_url, volume_name=snapshot.name, ignore_non_exist=True) @volume_utils.trace def extend_volume(self, volume, new_size): volume_url = volume.provider_location self.rsdClient.extend_volume(volume_url, new_size) def clone_image(self, context, volume, image_location, image_meta, image_service): return None, False @volume_utils.trace def create_cloned_volume(self, volume, src_vref): volume_url = src_vref.provider_location old_size_in_gb = src_vref.size size_in_gb = volume.size new_vol_url, provider_snap_url = self.rsdClient.clone_volume( volume_url) metadata = get_volume_metadata(volume) metadata["rsd_provider_snap"] = provider_snap_url if size_in_gb != old_size_in_gb: try: self.rsdClient.extend_volume(new_vol_url, size_in_gb) except Exception: self.rsdClient.delete_vol_or_snap(new_vol_url, volume_name=volume.name) self.rsdClient.delete_vol_or_snap(provider_snap_url, volume_name=volume.name) raise return {'provider_location': new_vol_url, 'metadata': metadata} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.395121 cinder-27.0.0/cinder/volume/drivers/san/0000775000175000017500000000000000000000000020101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/san/__init__.py0000664000175000017500000000174500000000000022221 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`cinder.volume.drivers.san` -- Cinder San Drivers ===================================================== .. automodule:: cinder.volume.drivers.san :platform: Unix :synopsis: Module containing all the Cinder San drivers. """ # Adding imports for backwards compatibility in loading volume_driver. from cinder.volume.drivers.san.san import SanISCSIDriver # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3991213 cinder-27.0.0/cinder/volume/drivers/san/hp/0000775000175000017500000000000000000000000020510 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/san/hp/__init__.py0000664000175000017500000000000000000000000022607 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/san/hp/hpmsa_client.py0000664000175000017500000000165000000000000023532 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import cinder.volume.drivers.stx.client as client class HPMSAClient(client.STXClient): def __init__(self, host, login, password, protocol, ssl_verify): super(HPMSAClient, self).__init__(host, login, password, protocol, ssl_verify) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/san/hp/hpmsa_common.py0000664000175000017500000000736000000000000023550 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 DotHill Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from cinder.volume import configuration from cinder.volume import driver import cinder.volume.drivers.san.hp.hpmsa_client as hpmsa_client import cinder.volume.drivers.stx.common as common common_opts = [ cfg.StrOpt('hpmsa_pool_name', deprecated_name='hpmsa_backend_name', default='A', help="Pool or Vdisk name to use for volume creation."), cfg.StrOpt('hpmsa_pool_type', deprecated_name='hpmsa_backend_type', choices=['linear', 'virtual'], default='virtual', help="linear (for Vdisk) or virtual (for Pool)."), cfg.StrOpt('hpmsa_api_protocol', deprecated_for_removal=True, deprecated_reason='driver_use_ssl should be used instead.', choices=['http', 'https'], default='https', help="HPMSA API interface protocol."), cfg.BoolOpt('hpmsa_verify_certificate', deprecated_for_removal=True, deprecated_reason='Use driver_ssl_cert_verify instead.', default=False, help="Whether to verify HPMSA array SSL certificate."), cfg.StrOpt('hpmsa_verify_certificate_path', deprecated_for_removal=True, deprecated_reason='Use driver_ssl_cert_path instead.', help="HPMSA array SSL certificate path."), ] iscsi_opts = [ cfg.ListOpt('hpmsa_iscsi_ips', default=[], help="List of comma-separated target iSCSI IP addresses."), ] CONF = cfg.CONF CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(iscsi_opts, group=configuration.SHARED_CONF_GROUP) class HPMSACommon(common.STXCommon): VERSION = "2.0" def __init__(self, config): self.config = config self.vendor_name = "HPMSA" self.backend_name = self.config.hpmsa_pool_name self.backend_type = self.config.hpmsa_pool_type self.api_protocol = self.config.hpmsa_api_protocol ssl_verify = False # check deprecated vendor-specific options ... if (self.api_protocol == 'https' and self.config.hpmsa_verify_certificate): ssl_verify = self.config.hpmsa_verify_certificate_path or True # ... before newer common options if self.config.driver_use_ssl: self.api_protocol = 'https' if self.config.driver_ssl_cert_verify: ssl_verify = self.config.driver_ssl_cert_path or True self.client = hpmsa_client.HPMSAClient(self.config.san_ip, self.config.san_login, self.config.san_password, self.api_protocol, ssl_verify) @staticmethod def get_driver_options(): additional_opts = driver.BaseVD._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'driver_use_ssl', 'driver_ssl_cert_verify', 'driver_ssl_cert_path') return common_opts + additional_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/san/hp/hpmsa_fc.py0000664000175000017500000000330600000000000022644 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 Dot Hill Systems Corp. # Copyright 2016-2019 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder import interface import cinder.volume.drivers.san.hp.hpmsa_common as hpmsa_common import cinder.volume.drivers.stx.fc as fc @interface.volumedriver class HPMSAFCDriver(fc.STXFCDriver): """OpenStack Fibre Channel cinder drivers for HPMSA arrays. .. code-block:: default Version history: 1.0 - Inheriting from DotHill cinder drivers. 1.6 - Add management path redundancy and reduce load placed on management controller. 2.0 - DotHill driver renamed to Seagate (STX) """ VERSION = "2.0" CI_WIKI_NAME = "HPMSA_CI" SUPPORTED = True def __init__(self, *args, **kwargs): super(HPMSAFCDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(hpmsa_common.common_opts) @staticmethod def get_driver_options(): return hpmsa_common.HPMSACommon.get_driver_options() def _init_common(self): return hpmsa_common.HPMSACommon(self.configuration) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/san/hp/hpmsa_iscsi.py0000664000175000017500000000360100000000000023364 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 Dot Hill Systems Corp. # Copyright 2016-2019 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder import interface import cinder.volume.drivers.san.hp.hpmsa_common as hpmsa_common import cinder.volume.drivers.stx.iscsi as iscsi @interface.volumedriver class HPMSAISCSIDriver(iscsi.STXISCSIDriver): """OpenStack iSCSI cinder drivers for HPMSA arrays. .. code-block:: default Version history: 1.0 - Inheriting from DotHill cinder drivers. 1.6 - Add management path redundancy and reduce load placed on management controller. 2.0 - DotHill driver renamed to Seagate (STX) """ VERSION = "2.0" CI_WIKI_NAME = "HPMSA_CI" SUPPORTED = True def __init__(self, *args, **kwargs): super(HPMSAISCSIDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(hpmsa_common.common_opts) self.configuration.append_config_values(hpmsa_common.iscsi_opts) self.iscsi_ips = self.configuration.hpmsa_iscsi_ips @staticmethod def get_driver_options(): return (hpmsa_common.HPMSACommon.get_driver_options() + hpmsa_common.iscsi_opts) def _init_common(self): return hpmsa_common.HPMSACommon(self.configuration) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/san/san.py0000664000175000017500000001535500000000000021245 0ustar00zuulzuul00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Default Driver for san-stored volumes. The unique thing about a SAN is that we don't expect that we can run the volume controller on the SAN hardware. We expect to access it over SSH or some API. """ import random from eventlet import greenthread from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder import ssh_utils from cinder import utils from cinder.volume import configuration from cinder.volume import driver LOG = logging.getLogger(__name__) san_opts = [ cfg.BoolOpt('san_thin_provision', default=True, help='Use thin provisioning for SAN volumes?'), cfg.StrOpt('san_ip', default='', help='IP address of SAN controller'), cfg.StrOpt('san_login', default='admin', help='Username for SAN controller'), cfg.StrOpt('san_password', default='', help='Password for SAN controller', secret=True), cfg.StrOpt('san_private_key', default='', help='Filename of private key to use for SSH authentication'), cfg.StrOpt('san_clustername', default='', help='Cluster name to use for creating volumes'), cfg.PortOpt('san_ssh_port', default=22, help='SSH port to use with SAN'), cfg.PortOpt('san_api_port', help='Port to use to access the SAN API'), cfg.BoolOpt('san_is_local', default=False, help='Execute commands locally instead of over SSH; ' 'use if the volume service is running on the SAN device'), cfg.IntOpt('ssh_conn_timeout', default=30, help="SSH connection timeout in seconds"), cfg.IntOpt('ssh_min_pool_conn', default=1, help='Minimum ssh connections in the pool'), cfg.IntOpt('ssh_max_pool_conn', default=5, help='Maximum ssh connections in the pool'), ] CONF = cfg.CONF CONF.register_opts(san_opts, group=configuration.SHARED_CONF_GROUP) class SanDriver(driver.BaseVD): """Base class for SAN-style storage volumes A SAN-style storage value is 'different' because the volume controller probably won't run on it, so we need to access is over SSH or another remote protocol. """ def __init__(self, *args, **kwargs): execute = kwargs.pop('execute', self.san_execute) super(SanDriver, self).__init__(execute=execute, *args, **kwargs) self.configuration.append_config_values(san_opts) self.run_local = self.configuration.san_is_local self.sshpool = None def san_execute(self, *cmd, **kwargs): if self.run_local: return utils.execute(*cmd, **kwargs) else: check_exit_code = kwargs.pop('check_exit_code', None) return self._run_ssh(cmd, check_exit_code) def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): utils.check_ssh_injection(cmd_list) command = ' '. join(cmd_list) if not self.sshpool: password = self.configuration.san_password privatekey = self.configuration.san_private_key min_size = self.configuration.ssh_min_pool_conn max_size = self.configuration.ssh_max_pool_conn self.sshpool = ssh_utils.SSHPool( self.configuration.san_ip, self.configuration.san_ssh_port, self.configuration.ssh_conn_timeout, self.configuration.san_login, password=password, privatekey=privatekey, min_size=min_size, max_size=max_size) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.error(e) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error running SSH command: %s", command) def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" pass def create_export(self, context, volume, connector): """Exports the volume.""" pass def remove_export(self, context, volume): """Removes an export for a logical volume.""" pass def check_for_setup_error(self): """Returns an error if prerequisites aren't met.""" if not self.run_local: if not (self.configuration.san_password or self.configuration.san_private_key): raise exception.InvalidInput( reason=_('Specify san_password or san_private_key')) # The san_ip must always be set, because we use it for the target if not self.configuration.san_ip: raise exception.InvalidInput(reason=_("san_ip must be set")) class SanISCSIDriver(SanDriver, driver.ISCSIDriver): def __init__(self, *args, **kwargs): super(SanISCSIDriver, self).__init__(*args, **kwargs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3991213 cinder-27.0.0/cinder/volume/drivers/sandstone/0000775000175000017500000000000000000000000021316 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/sandstone/__init__.py0000664000175000017500000000000000000000000023415 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/sandstone/constants.py0000664000175000017500000000142300000000000023704 0ustar00zuulzuul00000000000000# Copyright (c) 2019 ShenZhen SandStone Data Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SandStone iSCSI Driver Const.""" CONNECT_ERROR = 403 BASIC_URI = '/api/storage/' OM_URI = '/api/om/' PAGESIZE = 1000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/sandstone/sds_client.py0000664000175000017500000007410000000000000024021 0ustar00zuulzuul00000000000000# Copyright (c) 2019 ShenZhen SandStone Data Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SandStone iSCSI Driver.""" import hashlib import json import re import time from oslo_log import log as logging import requests from cinder import exception from cinder.i18n import _ from cinder.volume.drivers.sandstone import constants LOG = logging.getLogger(__name__) class RestCmd(object): """Restful api class.""" def __init__(self, address, user, password, suppress_requests_ssl_warnings): """Init RestCmd class. :param address: Restapi uri. :param user: login web username. :param password: login web password. """ self.address = "https://%(address)s" % {"address": address} self.user = user self.password = password self.pagesize = constants.PAGESIZE self.session = None self.short_wait = 10 self.long_wait = 12000 self.debug = True self._init_http_header() def _init_http_header(self): self.session = requests.Session() self.session.headers.update({ "Content-Type": "application/json", "Connection": "keep-alive", "Accept-Encoding": "gzip, deflate", }) self.session.verify = False def run(self, url, method, data=None, json_flag=True, filter_flag=False, om_op_flag=False): """Run rest cmd function. :param url: rest api uri resource. :param data: rest api uri json parameter. :param filter_flag: controller whether filter log. (default 'No') :param om_op_flag: api op have basic and om, use different prefix uri. """ kwargs = {} if data: kwargs["data"] = json.dumps(data) if om_op_flag: rest_url = self.address + constants.OM_URI + url else: rest_url = self.address + constants.BASIC_URI + url func = getattr(self.session, method.lower()) try: result = func(rest_url, **kwargs) except requests.RequestException as err: msg = _('Bad response from server: %(url)s. ' 'Error: %(err)s') % {'url': rest_url, 'err': err} raise exception.VolumeBackendAPIException(msg) try: result.raise_for_status() except requests.HTTPError as exc: if exc.response.status_code == constants.CONNECT_ERROR: try: self.login() except requests.ConnectTimeout as err: msg = (_("Sandstone web server may be abnormal " "or storage may be poweroff. Error: %(err)s") % {'err': err}) raise exception.VolumeBackendAPIException(msg) else: return {"error": {"code": exc.response.status_code, "description": str(exc)}} if not filter_flag: LOG.info(''' Request URL: %(url)s, Call Method: %(method)s, Request Data: %(data)s, Response Data: %(res)s, Result Data: %(res_json)s.''', {'url': url, 'method': method, 'data': data, 'res': result, 'res_json': result.json()}) if json_flag: return result.json() return result def _assert_restapi_result(self, result, err): if result.get("success") != 1: msg = (_('%(err)s\nresult:%(res)s') % {"err": err, "res": result}) raise exception.VolumeBackendAPIException(data=msg) def login(self): """Login web get with token session.""" url = 'user/login' sha256 = hashlib.sha256() sha256.update(self.password.encode("utf8")) password = sha256.hexdigest() data = {"username": self.user, "password": password} result = self.run(url=url, data=data, method='POST', json_flag=False, om_op_flag=True) self._assert_restapi_result(result.json(), _('Login error.')) cookies = result.cookies set_cookie = result.headers['Set-Cookie'] self.session.headers['Cookie'] = ';'.join( ['XSRF-TOKEN={}'.format(cookies['XSRF-TOKEN']), ' username={}'.format(self.user), ' sdsom_sessionid={}'.format(self._find_sessionid(set_cookie))]) self.session.headers["Referer"] = self.address self.session.headers["X-XSRF-TOKEN"] = cookies["XSRF-TOKEN"] def _find_sessionid(self, headers): sessionid = re.findall("sdsom_sessionid=(\\w+);", headers) if sessionid: return sessionid[0] return "" def _check_special_result(self, result, contain): if result.get("success") == 0 and contain in result.get("data"): return True def logout(self): """Logout release resource.""" url = 'user/logout' data = {"username": self.user} result = self.run(url, 'POST', data=data, om_op_flag=True) self._assert_restapi_result(result, _("Logout out error.")) def query_capacity_info(self): """Query cluster capacity.""" url = 'capacity' capacity_info = {} result = self.run(url, 'POST', filter_flag=True) self._assert_restapi_result(result, _("Query capacity error.")) capacity_info["capacity_bytes"] = result["data"].get( "capacity_bytes", 0) capacity_info["free_bytes"] = result["data"].get("free_bytes", 0) return capacity_info def query_pool_info(self): """Query use pool status.""" url = 'pool/list' result = self.run(url, 'POST') self._assert_restapi_result(result, _("Query pool status error.")) return result["data"] def get_poolid_from_poolname(self): """Use poolname get poolid from pool/list maps.""" data = self.query_pool_info() poolname_map_poolid = {} if data: for pool in data: poolname_map_poolid[pool["realname"]] = pool["pool_id"] return poolname_map_poolid def create_initiator(self, initiator_name): """Create client iqn in storage cluster.""" url = 'resource/initiator/create' data = {"iqn": initiator_name, "type": "iSCSI", "remark": "Cinder iSCSI"} result = self.run(url, 'POST', data=data) # initiator exist, return no err. if self._check_special_result(result, "already exist"): return self._assert_restapi_result(result, _("Create initiator error.")) def _delaytask_list(self, pagesize=20): url = 'delaytask/list' data = {"pageno": 1, "pagesize": pagesize} return self.run(url, 'POST', data=data, om_op_flag=True) def _judge_delaytask_status(self, wait_time, func_name, *args): # wait 10 seconds for task func = getattr(self, func_name.lower()) for wait in range(1, wait_time + 1): try: task_status = func(*args) if self.debug: LOG.info(task_status) except exception.VolumeBackendAPIException as exc: msg = (_("Task: run %(task)s failed, " "err: %(err)s.") % {"task": func_name, "err": exc}) LOG.error(msg) if task_status.get('run_status') == "failed": msg = (_("Task : run %(task)s failed, " "parameter : %(parameter)s, " "progress is %(process)d.") % {"task": func_name, "process": task_status.get('progress'), "parameter": args}) raise exception.VolumeBackendAPIException(data=msg) elif task_status.get('run_status') != "completed": msg = (_("Task : running %(task)s , " "parameter : %(parameter)s, " "progress is %(process)d, " "waited for 1 second, " "total waited %(total)d second.") % {"task": func_name, "process": task_status.get('progress', 0), "parameter": args, "total": wait}) LOG.info(msg) time.sleep(1) elif task_status.get('run_status') == "completed": msg = (_("Task : running %(task)s successfully, " "parameter : %(parameter)s, " "progress is %(process)d, " "total spend %(total)d second.") % {"task": func_name, "process": task_status.get('progress'), "parameter": args, "total": wait}) LOG.info(msg) break def add_initiator_to_target(self, target_name, initiator_name): """Bind client iqn to storage target iqn.""" url = 'resource/target/add_initiator_to_target' data = {"targetName": target_name, "iqns": [{"ip": "", "iqn": initiator_name}]} result = self.run(url, 'POST', data=data) # wait 10 seconds to map initiator self._judge_delaytask_status(self.short_wait, "query_map_initiator_porcess", target_name, initiator_name) self._assert_restapi_result(result, _("Add initiator " "to target error.")) def query_map_initiator_porcess(self, target_name, initiator_name): """Query initiator add to target process.""" result = self._delaytask_list() self._assert_restapi_result(result, _("Query mapping " "initiator process error.")) result = result["data"].get("results", None) or [] expected_parameter = [{"target_name": target_name, "iqns": [{"ip": "", "iqn": initiator_name}]}] task = [map_initiator_task for map_initiator_task in result if map_initiator_task["executor"] == "MapInitiator" and map_initiator_task["parameter"] == expected_parameter] if task: return task[0] return {} def query_initiator_by_name(self, initiator_name): """Query initiator exist or not.""" url = 'resource/initiator/list' data = {"initiatorMark": "", "pageno": 1, "pagesize": self.pagesize, "type": "iSCSI"} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Query initiator " "by name error.")) result = result["data"].get("results", None) or [] initiator_info = [initiator for initiator in result if initiator.get("iqn", None) == initiator_name] if initiator_info: return initiator_info[0] return None def query_target_initiatoracl(self, target_name, initiator_name): """Query target iqn bind client iqn info.""" url = 'resource/target/get_target_acl_list' data = {"pageno": 1, "pagesize": self.pagesize, "targetName": target_name} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Query target " "initiatoracl error.")) results = result["data"].get("results", None) acl_info = [acl for acl in results or [] if acl.get("name", None) == initiator_name] return acl_info or None def query_node_by_targetips(self, target_ips): """Query target ip relation with node.""" url = 'block/gateway/server/list' result = self.run(url, 'POST') self._assert_restapi_result(result, _("Query node by " "targetips error.")) targetip_to_hostid = {} for node in result["data"]: for node_access_ip in node.get("networks"): goal_ip = node_access_ip.get("address") if goal_ip in target_ips: targetip_to_hostid[goal_ip] =\ node_access_ip.get("hostid", None) return targetip_to_hostid def query_target_by_name(self, target_name): """Query target iqn exist or not.""" url = 'resource/target/list' data = {"pageno": 1, "pagesize": self.pagesize, "thirdParty": [0, 1], "targetMark": ""} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Query target by name error.")) result = result["data"].get("results", None) or [] target_info = [target for target in result if target.get("name", None) == target_name] if target_info: return target_info[0] return None def create_target(self, target_name, targetip_to_hostid): """Create target iqn.""" url = 'resource/target/create' data = {"type": "iSCSI", "readOnly": 0, "thirdParty": 1, "targetName": target_name, "networks": [{"hostid": host_id, "address": address} for address, host_id in targetip_to_hostid.items()]} result = self.run(url, 'POST', data=data) # target exist, return no err. if self._check_special_result(result, "already exist"): return self._assert_restapi_result(result, _("Create target error.")) def add_chap_by_target(self, target_name, username, password): """Add chap to target, only support forward.""" url = 'resource/target/add_chap' data = {"password": password, "user": username, "targetName": target_name} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Add chap by target error.")) def query_chapinfo_by_target(self, target_name, username): """Query chapinfo by target, check chap add target or not.""" url = 'resource/target/get_chap_list' data = {"targetName": target_name} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Query chapinfo " "by target error.")) result = result.get('data') or [] chapinfo = [c for c in result if c.get("user") == username] if chapinfo: return chapinfo[0] return None def create_lun(self, capacity_bytes, poolid, volume_name): """Create lun resource.""" url = 'resource/lun/add' data = {"capacity_bytes": capacity_bytes, "poolId": poolid, "priority": "normal", "qosSettings": {}, "volumeName": volume_name} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Create lun error.")) def delete_lun(self, poolid, volume_name): """Delete lun resource.""" url = 'resource/lun/batch_delete' data = {"delayTime": 0, "volumeNameList": [{ "poolId": poolid, "volumeName": volume_name}]} result = self.run(url, 'POST', data=data) # lun deleted, return no err. if self._check_special_result(result, "not found"): return self._assert_restapi_result(result, _("Delete lun error.")) def extend_lun(self, capacity_bytes, poolid, volume_name): """Extend lun, only support enlarge.""" url = 'resource/lun/resize' data = {"capacity_bytes": capacity_bytes, "poolId": poolid, "volumeName": volume_name} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Extend lun error.")) def unmap_lun(self, target_name, poolid, volume_name, pool_name): """Unbind lun from target iqn.""" url = 'resource/target/unmap_luns' volume_info = self.query_lun_by_name(volume_name, poolid) result = {"success": 0} if volume_info: uuid = volume_info.get("uuid", None) data = {"targetName": target_name, "targetLunList": [uuid], "targetSnapList": []} result = self.run(url, 'POST', data=data) # lun unmaped, return no err. if self._check_special_result(result, "not mapped"): return # wait for 10 seconds to unmap lun. self._judge_delaytask_status(self.short_wait, "query_unmapping_lun_porcess", target_name, volume_name, uuid, pool_name) self._assert_restapi_result(result, _("Unmap lun error.")) else: self._assert_restapi_result(result, _("Unmap lun error, uuid is None.")) def mapping_lun(self, target_name, poolid, volume_name, pool_name): """Bind lun to target iqn.""" url = 'resource/target/map_luns' volume_info = self.query_lun_by_name(volume_name, poolid) result = {"success": 0} if volume_info: uuid = volume_info.get("uuid", None) data = {"targetName": target_name, "targetLunList": [uuid], "targetSnapList": []} result = self.run(url, 'POST', data=data) # lun maped, return no err. if self._check_special_result(result, "already mapped"): return # wait for 10 seconds to map lun. self._judge_delaytask_status(self.short_wait, "query_mapping_lun_porcess", target_name, volume_name, uuid, pool_name) self._assert_restapi_result(result, _("Map lun error.")) else: self._assert_restapi_result(result, _("Map lun error, uuid is None.")) def query_mapping_lun_porcess(self, target_name, volume_name, uuid, pool_name): """Query mapping lun process.""" result = self._delaytask_list() self._assert_restapi_result(result, _("Query mapping " "lun process error.")) expected_parameter = {"target_name": target_name, "image_id": uuid, "target_realname": target_name, "meta_pool": pool_name, "image_realname": volume_name} result = result["data"].get("results", None) or [] task = [map_initiator_task for map_initiator_task in result if map_initiator_task["executor"] == "TargetMap" and map_initiator_task["parameter"] == expected_parameter] if task: return task[0] return {} def query_unmapping_lun_porcess(self, target_name, volume_name, uuid, pool_name): """Query mapping lun process.""" result = self._delaytask_list() self._assert_restapi_result(result, _("Query mapping " "lun process error.")) expected_parameter = {"target_name": target_name, "image_id": uuid, "target_realname": target_name, "meta_pool": pool_name, "image_name": volume_name} result = result["data"].get("results", None) or [] task = [map_initiator_task for map_initiator_task in result if map_initiator_task["executor"] == "TargetUnmap" and map_initiator_task["parameter"] == expected_parameter] if task: return task[0] return {} def query_target_lunacl(self, target_name, poolid, volume_name): """Query target iqn relation with lun.""" url = 'resource/target/get_luns' data = {"pageno": 1, "pagesize": self.pagesize, "pools": [poolid], "targetName": target_name} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Query target lunacl error.")) # target get_luns use results result = result["data"].get("results", None) or [] lunid = [volume.get("lid", None) for volume in result if volume.get("name", None) == volume_name and volume.get("pool_id") == poolid] if lunid: return lunid[0] return None def query_lun_by_name(self, volume_name, poolid): """Query lun exist or not.""" url = 'resource/lun/list' data = {"pageno": 1, "pagesize": self.pagesize, "volumeMark": volume_name, "sortType": "time", "sortOrder": "desc", "pools": [poolid], "thirdParty": [0, 1]} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Query lun by name error.")) result = result["data"].get("results", None) or [] volume_info = [volume for volume in result if volume.get("volumeName", None) == volume_name] if volume_info: return volume_info[0] return None def query_target_by_lun(self, volume_name, poolid): """Query lun already mapped target name.""" url = "resource/lun/targets" data = {"poolId": poolid, "volumeName": volume_name} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Query target by lun error.")) data = result["data"] target_name = data[0].get("name", None) return target_name def create_snapshot(self, poolid, volume_name, snapshot_name): """Create lun snapshot.""" url = 'resource/snapshot/add' data = {"lunName": volume_name, "poolId": poolid, "remark": "Cinder iSCSI snapshot.", "snapName": snapshot_name} result = self.run(url, 'POST', data=data) # snapshot existed, return no err. if self._check_special_result(result, "has exists"): return # wait for 10 seconds to create snapshot self._judge_delaytask_status(self.short_wait, "query_create_snapshot_process", poolid, volume_name, snapshot_name) self._assert_restapi_result(result, _("Create snapshot error.")) def query_create_snapshot_process(self, poolid, volume_name, snapshot_name): """Query create snapshot process.""" result = self._delaytask_list() self._assert_restapi_result(result, _("Query flatten " "lun process error.")) result = result["data"].get("results", None) or [] task = [flatten_task for flatten_task in result if flatten_task["executor"] == "SnapCreate" and flatten_task["parameter"].get("pool_id", None) == poolid and flatten_task["parameter"].get("snap_name", None) == snapshot_name and flatten_task["parameter"].get("lun_name", None) == volume_name] if task: return task[0] return {} def delete_snapshot(self, poolid, volume_name, snapshot_name): """Delete lun snapshot.""" url = 'resource/snapshot/delete' data = {"lunName": volume_name, "poolId": poolid, "snapName": snapshot_name} result = self.run(url, 'POST', data=data) # snapshot deleted, need return no err. if self._check_special_result(result, "not found"): return # wait for 10 seconds to delete snapshot self._judge_delaytask_status(self.short_wait, "query_delete_snapshot_process", poolid, volume_name, snapshot_name) self._assert_restapi_result(result, _("Delete snapshot error.")) def query_delete_snapshot_process(self, poolid, volume_name, snapshot_name): """Query delete snapshot process.""" result = self._delaytask_list() self._assert_restapi_result(result, _("Query delete " "snapshot process error.")) result = result["data"].get("results", None) or [] task = [flatten_task for flatten_task in result if flatten_task["executor"] == "SnapDelete" and flatten_task["parameter"].get("pool_id", None) == poolid and flatten_task["parameter"].get("snap_name", None) == snapshot_name and flatten_task["parameter"].get("lun_name", None) == volume_name] if task: return task[0] return {} def create_lun_from_snapshot(self, snapshot_name, src_volume_name, poolid, dst_volume_name): """Create lun from source lun snapshot.""" url = 'resource/snapshot/clone' data = {"snapshot": {"poolId": poolid, "lunName": src_volume_name, "snapName": snapshot_name}, "cloneLun": {"lunName": dst_volume_name, "poolId": poolid}} result = self.run(url, 'POST', data=data) # clone volume exsited, return no err. if self._check_special_result(result, "already exists"): return # wait for 10 seconds to clone lun self._judge_delaytask_status(self.short_wait, "query_clone_lun_process", poolid, src_volume_name, snapshot_name) self._assert_restapi_result(result, _("Create lun " "from snapshot error.")) self.flatten_lun(dst_volume_name, poolid) def query_clone_lun_process(self, poolid, volume_name, snapshot_name): """Query clone lun process.""" result = self._delaytask_list() self._assert_restapi_result(result, _("Query flatten " "lun process error.")) result = result["data"].get("results", None) or [] task = [flatten_task for flatten_task in result if flatten_task["executor"] == "SnapClone" and flatten_task["parameter"].get("pool_id", None) == poolid and flatten_task["parameter"].get("snap_name", None) == snapshot_name and flatten_task["parameter"].get("lun_name", None) == volume_name] if task: return task[0] return {} def flatten_lun(self, volume_name, poolid): """Flatten lun.""" url = 'resource/lun/flatten' data = {"poolId": poolid, "volumeName": volume_name} result = self.run(url, 'POST', data=data) # volume flattened, return no err. if self._check_special_result(result, "not need flatten"): return # wait for longest 200 min to flatten self._judge_delaytask_status(self.long_wait, "query_flatten_lun_process", poolid, volume_name) self._assert_restapi_result(result, _("Flatten lun error.")) def query_flatten_lun_process(self, poolid, volume_name): """Query flatten lun process.""" result = self._delaytask_list() self._assert_restapi_result(result, _("Query flatten " "lun process error.")) result = result["data"].get("results", None) or [] task = [flatten_task for flatten_task in result if flatten_task["executor"] == "LunFlatten" and flatten_task["parameter"].get("pool_id", None) == poolid and flatten_task["parameter"].get("lun_name", None) == volume_name] if task: return task[0] return {} def create_lun_from_lun(self, dst_volume_name, poolid, src_volume_name): """Clone lun from source lun.""" tmp_snapshot_name = 'temp' + src_volume_name + 'clone' +\ dst_volume_name self.create_snapshot(poolid, src_volume_name, tmp_snapshot_name) self.create_lun_from_snapshot(tmp_snapshot_name, src_volume_name, poolid, dst_volume_name) self.flatten_lun(dst_volume_name, poolid) self.delete_snapshot(poolid, src_volume_name, tmp_snapshot_name) def query_snapshot_by_name(self, volume_name, poolid, snapshot_name): """Query snapshot exist or not.""" url = 'resource/snapshot/list' data = {"lunName": volume_name, "pageno": 1, "pagesize": self.pagesize, "poolId": poolid, "snapMark": ""} result = self.run(url, 'POST', data=data) self._assert_restapi_result(result, _("Query snapshot by name error.")) result = result["data"].get("results", None) or [] snapshot_info = [snapshot for snapshot in result if snapshot.get("snapName", None) == snapshot_name] return snapshot_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/sandstone/sds_driver.py0000664000175000017500000005106200000000000024040 0ustar00zuulzuul00000000000000# Copyright (c) 2019 ShenZhen SandStone Data Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume Drivers for SandStone distributed storage.""" from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume.drivers.sandstone.sds_client import RestCmd LOG = logging.getLogger(__name__) sds_opts = [ cfg.ListOpt("default_sandstone_target_ips", default=[], help="SandStone default target ip."), cfg.StrOpt("sandstone_pool", default="", help="SandStone storage pool resource name."), cfg.DictOpt("initiator_assign_sandstone_target_ip", default={}, help="Support initiator assign target with assign ip.") ] CONF = cfg.CONF CONF.register_opts(sds_opts) class SdsBaseDriver(driver.VolumeDriver): """ISCSIDriver base class.""" # ThirdPartySytems wiki page VERSION = '1.0' CI_WIKI_NAME = "SandStone_Storage_CI" def __init__(self, *args, **kwargs): """Init configuration.""" super(SdsBaseDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(sds_opts) self.configuration.append_config_values(san.san_opts) def do_setup(self, context): """Instantiate common class and login storage system.""" if not self.configuration: msg = _('Configuration is not found.') raise exception.InvalidConfigurationValue(msg) self.address = self.configuration.san_ip self.user = self.configuration.san_login self.password = self.configuration.san_password self.pool = self.configuration.sandstone_pool self.iscsi_info = (self.configuration. initiator_assign_sandstone_target_ip) self.default_target_ips = (self.configuration. default_sandstone_target_ips) self.chap_username = self.configuration.chap_username self.chap_password = self.configuration.chap_password self.suppress_requests_ssl_warnings = (self.configuration. suppress_requests_ssl_warnings) self.client = RestCmd(self.address, self.user, self.password, self.suppress_requests_ssl_warnings) LOG.debug("Run sandstone driver setup.") def check_for_setup_error(self): """Check pool status and exist or not.""" self.client.login() self.poolname_map_poolid = self.client.get_poolid_from_poolname() all_pools = self.client.query_pool_info() all_pools_name = [p['pool_name'] for p in all_pools if p.get('pool_name')] if self.pool not in all_pools_name: msg = _('Storage pool %(pool)s does not exist ' 'in the cluster.') % {'pool': self.pool} LOG.error(msg) raise exception.InvalidInput(reason=msg) pool_status = [p['status'] for p in all_pools if p.get('pool_name') == self.pool] if pool_status: if ("health" not in pool_status[0].get('state') and pool_status[0].get("progress", 0) != 100): LOG.warning('Storage pool: %(poolName)s not healthy.', {"poolName": self.pool}) if not self.poolname_map_poolid: err_msg = _('poolname_map_poolid info is empty.') self._raise_exception(err_msg) self.poolid = self.poolname_map_poolid.get(self.pool) if not self.poolid: err_msg = _('poolid is None.') self._raise_exception(err_msg) def _update_volume_stats(self, pool_name): """Get cluster capability and capacity.""" data, pool = {}, {} data['pools'] = [] cluster_capacity = self.client.query_capacity_info() total_capacity_gb = (float(cluster_capacity.get("capacity_bytes", 0)) / units.Gi) free_capacity_gb = (float(cluster_capacity.get("free_bytes", 0)) / units.Gi) self._stats = pool.update(dict( pool_name = pool_name, vendor_name = 'SandStone USP', driver_version = self.VERSION, total_capacity_gb = total_capacity_gb, free_capacity_gb = free_capacity_gb, QoS_support=True, thin_provisioning_support=True, multiattach=False, )) data['pools'].append(pool) return data def _raise_exception(self, msg): LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_volume(self, volume): """Create a volume.""" capacity_bytes = int(volume.size) * units.Gi self.client.create_lun(capacity_bytes, self.poolid, volume.name) def delete_volume(self, volume): """Delete a volume.""" LOG.debug("Delete volume %(volumeName)s from pool %(poolId)s", {"volumeName": volume.name, "poolId": self.poolid}) self.client.delete_lun(self.poolid, volume.name) def migrate_volume(self, ctxt, volume, host, new_type=None): """Migrate a volume within the same array.""" return (False, None) def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot. We use LUNcopy to copy a new volume from snapshot. The time needed increases as volume size does. """ if snapshot.volume: source_vol_name = snapshot.volume.name source_vol_size = snapshot.volume.size * units.Gi destination_vol_name = volume.name destination_vol_size = volume.size * units.Gi snapshot_name = snapshot.name self.client.create_lun_from_snapshot(snapshot_name, source_vol_name, self.poolid, destination_vol_name) if destination_vol_size > source_vol_size: self.client.extend_lun(destination_vol_size, self.poolid, volume.name) else: err_msg = _('No such snapshot volume.') self._raise_exception(err_msg) def create_cloned_volume(self, dst_volume, src_volume): """Clone a new volume from an existing volume.""" if not self._check_volume_exist(src_volume.name): msg = (_('Source volume: %(volume_name)s does not exist.') % {'volume_name': src_volume.name}) self._raise_exception(msg) self.client.create_lun_from_lun(dst_volume.name, self.poolid, src_volume.name) dst_vol_size = dst_volume.size * units.Gi src_vol_size = src_volume.size * units.Gi if dst_vol_size > src_vol_size: self.client.extend_lun(dst_vol_size, self.poolid, dst_volume.name) def _check_volume_exist(self, volume): return self.client.query_lun_by_name(volume, self.poolid) def extend_volume(self, volume, new_size): """Extend a volume.""" old_volume = self._check_volume_exist(volume.name) if not old_volume: msg = (_('Not exist volume: %(volumeName)s') % {"volumeName": volume.name}) self._raise_exception(msg) old_size = old_volume.get("capacity_bytes") new_size = new_size * units.Gi if new_size == old_size: LOG.info("New size is equal to the real size from backend " "storage, no need to extend. " "realsize: %(oldsize)s, newsize: %(newsize)s.", {"oldsize": old_size, "newsize": new_size}) return if new_size < old_size: msg = (_("New size should be bigger than the real size from " "backend storage. " "realsize: %(oldsize)s, newsize: %(newsize)s.") % {"oldsize": old_size, "newsize": new_size}) self._raise_exception(msg) LOG.info( 'Extend volume: %(volumename)s, ' 'oldsize: %(oldsize)s, newsize: %(newsize)s.', {"volumename": volume.name, "oldsize": old_size, "newsize": new_size}) self.client.extend_lun(new_size, self.poolid, volume.name) def create_snapshot(self, snapshot): """Create snapshot from volume.""" volume = snapshot.volume if not volume: msg = (_("Can't get volume id from snapshot, snapshot: %(id)s.") % {"id": snapshot.id}) self._raise_exception(msg) LOG.debug( "create snapshot from volumeName: %(volume)s, " "snap name: %(snapshot)s.", {"snapshot": snapshot.name, "volume": volume.name},) self.client.create_snapshot(self.poolid, volume.name, snapshot.name) def _check_snapshot_exist(self, snapshot): return self.client.query_snapshot_by_name(snapshot.volume.name, self.poolid, snapshot.name) def delete_snapshot(self, snapshot): """Delete volume's snapshot.""" snapshot_name = snapshot.name volume_name = snapshot.volume.name if not self._check_snapshot_exist(snapshot): LOG.debug("not exist snapshot: %(snapshotName)s", {"snapshotName": snapshot.name}) LOG.info( 'stop_snapshot: snapshot name: %(snapshot)s, ' 'volume name: %(volume)s.', {"snapshot": snapshot_name, "volume": volume_name},) self.client.delete_snapshot(self.poolid, volume_name, snapshot_name) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type.""" LOG.debug("Enter retype: id=%(id)s, new_type=%(new_type)s, " "diff=%(diff)s, host=%(host)s.", {'id': volume.id, 'new_type': new_type, 'diff': diff, 'host': host}) def create_export(self, context, volume, connector): """Export a volume.""" pass def ensure_export(self, context, volume): """Synchronously recreate an export for a volume.""" pass def remove_export(self, context, volume): """Remove an export for a volume.""" pass def create_export_snapshot(self, context, snapshot, connector): """Export a snapshot.""" pass def remove_export_snapshot(self, context, snapshot): """Remove an export for a snapshot.""" pass def backup_use_temp_snapshot(self): """The config option has a default to be False, So just return it.""" pass def unmanage(self, volume): """Export SandStone volume from Cinder.""" LOG.debug("Unmanage volume: %s.", volume.id) def unmanage_snapshot(self, snapshot): """Unmanage the specified snapshot from Cinder management.""" LOG.debug("Unmanage snapshot: %s.", snapshot.id) @interface.volumedriver class SdsISCSIDriver(SdsBaseDriver, driver.ISCSIDriver): """ISCSI driver for SandStone storage arrays. Version history: .. code-block:: none 1.0.0 - Initial driver Provide SandStone storage create volume support delete volume support create snapshot support delete snapshot support extend volume support create volume from snap support create cloned volume support nova volume-attach support nova volume-detach support """ VERSION = "1.0.0" def get_volume_stats(self, refresh): """Get volume status and capality.""" data = SdsBaseDriver.get_volume_stats(self, refresh) backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or self.__class__.__name__ data['storage_protocol'] = constants.ISCSI data['driver_version'] = self.VERSION data['vendor_name'] = 'SandStone USP' return data def _check_target_exist(self, target_name): """Check target name exist or not.""" return self.client.query_target_by_name(target_name) def _check_initiator_exist(self, initiator_name): """Check initiator name exist or not.""" return self.client.query_initiator_by_name(initiator_name) def _check_target_added_initiator(self, target_name, initiator_name): return self.client.query_target_initiatoracl(target_name, initiator_name) def _check_target_added_lun(self, target_name, poolid, volume_name): return self.client.query_target_lunacl(target_name, poolid, volume_name) def _check_target_added_chap(self, target_name, username): return self.client.query_chapinfo_by_target(target_name, username) def _get_target_ip(self, initiator): ini = self.iscsi_info.get(initiator) if ini: target_ips = [ip.strip() for ip in ini.split(',') if ip.strip()] else: target_ips = [] # If not specify target IP for some initiators, use default IP. if not target_ips: if self.default_target_ips: target_ips = self.default_target_ips else: msg = (_( 'get_iscsi_params: Failed to get target IP ' 'for initiator %(ini)s, please check config file.') % {'ini': initiator}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) return target_ips def initialize_connection(self, volume, connector): """Map a volume to a host and return target iSCSI information.""" initiator_name = connector['initiator'] LOG.info( "initiator name: %(initiator_name)s, " "LUN ID: %(lun_id)s.", {"initiator_name": initiator_name, "lun_id": volume.name}) # Create target iqn_end = initiator_name.split(':')[-1] target_head = 'iqn.2014-10.com.szsandstone:storage:' target_name = target_head + iqn_end target_ips = self._get_target_ip(initiator_name) if not self._check_target_exist(iqn_end): targetip_to_hostid = (self.client. query_node_by_targetips(target_ips)) self.client.create_target(iqn_end, targetip_to_hostid) else: # Target is exist, get target_name and nodes LOG.info("target is exist, don't repeat to create, " "iscsi_iqn: %(iscsi_iqn)s.", {"iscsi_iqn": target_name}) LOG.info("initialize_connection, iscsi_iqn: %(iscsi_iqn)s, " 'target_ips: %(target_ips)s.', {'iscsi_iqn': target_name, 'target_ips': target_ips}) # Check initiator isn't exist if not self._check_initiator_exist(initiator_name): # Create initiator and add in storage self.client.create_initiator(initiator_name) else: LOG.info("initiator is exist, don't repeat to create " "initiator: %(initiator_name)s.", {"initiator_name": initiator_name}) # Check target added initiator or not if not self._check_target_added_initiator(iqn_end, initiator_name): # Add initiator to target self.client.add_initiator_to_target(iqn_end, initiator_name) else: LOG.info("initiator is added to target, no action needed, " "target: %(target_name)s, " "initiator: %(initiator_name)s.", {"initiator_name": initiator_name, "target_name": target_name}) lun_id = self._check_target_added_lun(iqn_end, self.poolid, volume.name) if not lun_id: # Mapping lun to target self.client.mapping_lun(iqn_end, self.poolid, volume.name, self.pool) lun_id = self._check_target_added_lun(iqn_end, self.poolid, volume.name) else: LOG.info("lun is added to target, don't repeat to add " "volume: %(volume_name)s, target: %(target_name)s.", {"volume_name": volume.name, "target_name": target_name}) # Mapping lungroup and hostgroup to view. LOG.info("initialize_connection, host lun id is: %(lun_id)d.", {"lun_id": lun_id}) # Return iSCSI properties. properties = {} properties['target_discovered'] = True properties['volume_id'] = volume.id multipath = connector.get('multipath', False) hostlun_id = lun_id if not multipath: properties['target_portal'] = ("%s:3260" % target_ips[0]) properties['target_iqn'] = target_name properties['target_lun'] = hostlun_id else: properties['target_iqns'] = [target_name for i in range(len(target_ips))] properties['target_portals'] = [ "%s:3260" % ip for ip in target_ips] properties['target_luns'] = [hostlun_id] * len(target_ips) # If use CHAP, return CHAP info. if self.chap_username and self.chap_password: if not self._check_target_added_chap(iqn_end, self.chap_username): self.client.add_chap_by_target(iqn_end, self.chap_username, self.chap_password) else: LOG.info("chap username: %(chap_username)s exist, don't " "repeat to create, iscsi_iqn: %(iscsi_iqn)s.", {"iscsi_iqn": target_name, "chap_username": self.chap_username}) properties['auth_method'] = 'CHAP' properties['auth_username'] = self.chap_username properties['auth_password'] = self.chap_password LOG.info("initialize_connection success. Return data: %(properties)s.", {"properties": properties}) return {'driver_volume_type': 'iscsi', 'data': properties} def terminate_connection(self, volume, connector, **kwargs): """Delete map between a volume and a host.""" if not connector: target_name = self.client.query_target_by_lun(volume.name, self.poolid) self.client.unmap_lun(target_name, self.poolid, volume.name, self.pool) return initiator_name = connector['initiator'] # Remove lun from target force. iqn_end = initiator_name.split(':')[-1] target_head = 'iqn.2014-10.com.szsandstone:storage:' target_name = target_head + iqn_end self.client.unmap_lun(iqn_end, self.poolid, volume.name, self.pool) LOG.info( "terminate_connection: initiator name: %(ini)s, " "LUN ID: %(lunid)s, " "Target Name: %(target_name)s.", {"ini": initiator_name, "lunid": volume.name, "target_name": target_name}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/solidfire.py0000664000175000017500000036553500000000000021673 0ustar00zuulzuul00000000000000# All Rights Reserved. # Copyright 2013 SolidFire Inc # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect import json import math import re import socket import string import time import warnings from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import units import requests from cinder.common import constants from cinder import context from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder import utils from cinder.volume import configuration from cinder.volume.drivers.san import san from cinder.volume import qos_specs from cinder.volume.targets import iscsi as iscsi_driver from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) sf_opts = [ cfg.BoolOpt('sf_emulate_512', default=True, help='Set 512 byte emulation on volume creation; '), cfg.BoolOpt('sf_allow_tenant_qos', default=False, help='Allow tenants to specify QOS on create'), cfg.StrOpt('sf_account_prefix', help='Create SolidFire accounts with this prefix. Any string ' 'can be used here, but the string \"hostname\" is special ' 'and will create a prefix using the cinder node hostname ' '(previous default behavior). The default is NO prefix.'), cfg.StrOpt('sf_volume_prefix', default='UUID-', help='Create SolidFire volumes with this prefix. Volume names ' 'are of the form . ' 'The default is to use a prefix of \'UUID-\'.'), cfg.StrOpt('sf_svip', help='Overrides default cluster SVIP with the one specified. ' 'This is required or deployments that have implemented ' 'the use of VLANs for iSCSI networks in their cloud.'), cfg.PortOpt('sf_api_port', default=443, help='SolidFire API port. Useful if the device api is behind ' 'a proxy on a different port.'), cfg.BoolOpt('sf_enable_vag', default=False, help='Utilize volume access groups on a per-tenant basis.'), cfg.StrOpt('sf_provisioning_calc', default='maxProvisionedSpace', choices=['maxProvisionedSpace', 'usedSpace'], help='Change how SolidFire reports used space and ' 'provisioning calculations. If this parameter is set to ' '\'usedSpace\', the driver will report correct ' 'values as expected by Cinder ' 'thin provisioning.'), cfg.IntOpt('sf_cluster_pairing_timeout', default=60, min=3, help='Sets time in seconds to wait for clusters to complete ' 'pairing.'), cfg.IntOpt('sf_volume_pairing_timeout', default=3600, min=30, help='Sets time in seconds to wait for a migrating volume to ' 'complete pairing and sync.'), cfg.IntOpt('sf_api_request_timeout', default=30, min=30, help='Sets time in seconds to wait for an api request to ' 'complete.'), cfg.IntOpt('sf_volume_clone_timeout', default=600, min=60, help='Sets time in seconds to wait for a clone of a volume or ' 'snapshot to complete.' ), cfg.IntOpt('sf_volume_create_timeout', default=60, min=30, help='Sets time in seconds to wait for a create volume ' 'operation to complete.')] CONF = cfg.CONF CONF.register_opts(sf_opts, group=configuration.SHARED_CONF_GROUP) # SolidFire API Error Constants xExceededLimit = 'xExceededLimit' xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup' xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist' xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup' class SolidFireAPIException(exception.VolumeBackendAPIException): message = _("Bad response from SolidFire API") class SolidFireDriverException(exception.VolumeDriverException): message = _("SolidFire Cinder Driver exception") class SolidFireAPIDataException(SolidFireAPIException): message = _("Error in SolidFire API response: data=%(data)s") class SolidFireAccountNotFound(SolidFireDriverException): message = _("Unable to locate account %(account_name)s in " "SolidFire cluster") class SolidFireVolumeNotFound(SolidFireDriverException): message = _("Unable to locate volume id %(volume_id)s in " "SolidFire cluster") class SolidFireRetryableException(exception.VolumeBackendAPIException): message = _("Retryable SolidFire Exception encountered") class SolidFireReplicationPairingError(exception.VolumeBackendAPIException): message = _("Error on SF Keys") class SolidFireDataSyncTimeoutError(exception.VolumeBackendAPIException): message = _("Data sync volumes timed out") class SolidFireDuplicateVolumeNames(SolidFireDriverException): message = _("Volume name [%(vol_name)s] already exists " "in the SolidFire backend.") def retry(exc_tuple, tries=5, delay=1, backoff=2): def retry_dec(f): @functools.wraps(f) def func_retry(*args, **kwargs): _tries, _delay = tries, delay while _tries > 1: try: return f(*args, **kwargs) except exc_tuple: time.sleep(_delay) _tries -= 1 _delay *= backoff LOG.debug('Retrying %(args)s, %(tries)s attempts ' 'remaining...', {'args': args, 'tries': _tries}) # NOTE(jdg): Don't log the params passed here # some cmds like createAccount will have sensitive # info in the params, grab only the second tuple # which should be the Method msg = (_('Retry count exceeded for command: %s') % (args[1],)) LOG.error(msg) raise SolidFireAPIException(message=msg) return func_retry return retry_dec def locked_image_id_operation(f, external=False): def lvo_inner1(inst, *args, **kwargs): lock_tag = inst.driver_prefix call_args = inspect.getcallargs(f, inst, *args, **kwargs) if call_args.get('image_meta'): image_id = call_args['image_meta']['id'] else: err_msg = _('The decorated method must accept image_meta.') raise exception.VolumeBackendAPIException(data=err_msg) @utils.synchronized('%s-%s' % (lock_tag, image_id), external=external) def lvo_inner2(): return f(inst, *args, **kwargs) return lvo_inner2() return lvo_inner1 def locked_source_id_operation(f, external=False): def lvo_inner1(inst, *args, **kwargs): lock_tag = inst.driver_prefix call_args = inspect.getcallargs(f, inst, *args, **kwargs) src_arg = call_args.get('source', None) if src_arg and src_arg.get('id', None): source_id = call_args['source']['id'] else: err_msg = _('The decorated method must accept src_uuid.') raise exception.VolumeBackendAPIException(message=err_msg) @utils.synchronized('%s-%s' % (lock_tag, source_id), external=external) def lvo_inner2(): return f(inst, *args, **kwargs) return lvo_inner2() return lvo_inner1 @interface.volumedriver class SolidFireDriver(san.SanISCSIDriver): """OpenStack driver to enable SolidFire cluster. .. code-block:: default Version history: 1.0 - Initial driver 1.1 - Refactor, clone support, qos by type and minor bug fixes 1.2 - Add xfr and retype support 1.2.1 - Add export/import support 1.2.2 - Catch VolumeNotFound on accept xfr 2.0.0 - Move from httplib to requests 2.0.1 - Implement SolidFire Snapshots 2.0.2 - Implement secondary account 2.0.3 - Implement cluster pairing 2.0.4 - Implement volume replication 2.0.5 - Try and deal with the stupid retry/clear issues from objects and tflow 2.0.6 - Add a lock decorator around the clone_image method 2.0.7 - Add scaled IOPS 2.0.8 - Add active status filter to get volume ops 2.0.9 - Always purge on delete volume 2.0.10 - Add response to debug on retryable errors 2.0.11 - Add ability to failback replicating volumes 2.0.12 - Fix bug #1744005 2.0.14 - Fix bug #1782588 qos settings on extend 2.0.15 - Fix bug #1834013 NetApp SolidFire replication errors 2.0.16 - Add options for replication mode (Async, Sync and SnapshotsOnly) 2.0.17 - Fix bug #1859653 SolidFire fails to failback when volume service is restarted 2.1.0 - Add Cinder Active/Active support - Enable Active/Active support flag - Implement Active/Active replication support 2.2.0 - Add storage assisted volume migration support 2.2.1 - Fix bug #1891914 fix error on cluster workload rebalancing by adding xNotPrimary to the retryable exception list 2.2.2 - Fix bug #1896112 SolidFire Driver creates duplicate volume when API response is lost 2.2.3 - Fix bug #1942090 SolidFire retype fails due to volume status as retyping. Fix bug #1932964 SolidFire duplicate volume name exception on migration and replication. 2.2.4 - Fix bug #1934435 fix driver failing with multiple exceptions during Element OS upgrade by adding xDBOperationTimeout, xDBConnectionLoss, xNoHandler, xSnapshotFailed, xRecvTimeout, xDBNoSuchPath, xPermissionDenied to the retryable exception list 2.2.5 - Fix bug #1934459 SolidFire Driver gets into an infinite recursion on startup while OS Profiler is enabled """ VERSION = '2.2.5' SUPPORTS_ACTIVE_ACTIVE = True # ThirdPartySystems wiki page CI_WIKI_NAME = "NetApp_SolidFire_CI" driver_prefix = 'solidfire' sf_qos_dict = {'slow': {'minIOPS': 100, 'maxIOPS': 200, 'burstIOPS': 200}, 'medium': {'minIOPS': 200, 'maxIOPS': 400, 'burstIOPS': 400}, 'fast': {'minIOPS': 500, 'maxIOPS': 1000, 'burstIOPS': 1000}, 'performant': {'minIOPS': 2000, 'maxIOPS': 4000, 'burstIOPS': 4000}, 'off': None} sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS'] sf_scale_qos_keys = ['scaledIOPS', 'scaleMin', 'scaleMax', 'scaleBurst'] sf_iops_lim_min = {'minIOPS': 100, 'maxIOPS': 100, 'burstIOPS': 100} sf_iops_lim_max = {'minIOPS': 15000, 'maxIOPS': 200000, 'burstIOPS': 200000} cluster_stats = {} retry_exc_tuple = (SolidFireRetryableException, requests.exceptions.ConnectionError) retryable_errors = ['xDBVersionMismatch', 'xMaxSnapshotsPerVolumeExceeded', 'xMaxClonesPerVolumeExceeded', 'xMaxSnapshotsPerNodeExceeded', 'xMaxClonesPerNodeExceeded', 'xSliceNotRegistered', 'xNotReadyForIO', 'xNotPrimary', 'xDBOperationTimeout', 'xDBConnectionLoss', 'xNoHandler', 'xSnapshotFailed', 'xRecvTimeout', 'xDBNoSuchPath', 'xPermissionDenied'] def __init__(self, *args, **kwargs): super(SolidFireDriver, self).__init__(*args, **kwargs) self.failed_over_id = kwargs.get('active_backend_id', None) self.replication_status = kwargs.get('replication_status', "na") self.configuration.append_config_values(sf_opts) self.template_account_id = None self.max_volumes_per_account = 1990 self.volume_map = {} self.cluster_pairs = [] self.replication_enabled = False self.failed_over = False self.verify_ssl = self.configuration.driver_ssl_cert_verify self.target_driver = SolidFireISCSI(solidfire_driver=self, configuration=self.configuration) self._check_replication_configs() # If we're failed over, we need to parse things out and set the active # cluster appropriately if self.failed_over_id: LOG.info("Running on failed-over mode. " "Active backend-id: %s", self.failed_over_id) repl_target = self.configuration.get('replication_device', []) if not repl_target: LOG.error('Failed to initialize SolidFire driver to ' 'a remote cluster specified at id: %s', self.failed_over_id) raise SolidFireDriverException remote_endpoint = self._build_repl_endpoint_info( **repl_target[0]) self.active_cluster = self._create_cluster_reference( remote_endpoint) self.failed_over = True self.replication_enabled = True else: self.active_cluster = self._create_cluster_reference() if self.configuration.replication_device: self._set_cluster_pairs() self.replication_enabled = True LOG.debug("Active cluster: %s", self.active_cluster) # NOTE(jdg): This works even in a failed over state, because what we # do is use self.active_cluster in issue_api_request so by default we # always use the currently active cluster, override that by providing # an endpoint to issue_api_request if needed try: self._update_cluster_status() except SolidFireAPIException: pass @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'driver_ssl_cert_verify', 'replication_device', 'reserved_percentage', 'max_over_subscription_ratio') return sf_opts + additional_opts def _init_vendor_properties(self): properties = {} self._set_property( properties, "solidfire:replication_mode", "Replication mode", _("Specifies replication mode."), "string", enum=["Async", "Sync", "SnapshotsOnly"]) return properties, 'solidfire' def __getattr__(self, attr): if hasattr(self.target_driver, attr): return getattr(self.target_driver, attr) else: msg = _('Attribute: %s not found.') % attr raise NotImplementedError(msg) def _create_remote_pairing(self, remote_device): try: pairing_info = self._issue_api_request('StartClusterPairing', {}, version='8.0')['result'] pair_id = self._issue_api_request( 'CompleteClusterPairing', {'clusterPairingKey': pairing_info['clusterPairingKey']}, version='8.0', endpoint=remote_device['endpoint'])['result']['clusterPairID'] except SolidFireAPIException as ex: if 'xPairingAlreadyExists' in ex.msg: LOG.debug('Pairing already exists during init.') else: with excutils.save_and_reraise_exception(): LOG.error('Cluster pairing failed: %s', ex.msg) LOG.debug('Initialized Cluster pair with ID: %s', pair_id) return pair_id def _check_replication_configs(self): repl_configs = self.configuration.replication_device if not repl_configs: return # We only support one replication target. Checking if the user is # trying to add more than one; if len(repl_configs) > 1: msg = _("SolidFire driver only supports one replication target " "device.") LOG.error(msg) raise SolidFireDriverException(msg) repl_configs = repl_configs[0] # Check if the user is not using the same MVIP as source # and replication target. if repl_configs['mvip'] == self.configuration.san_ip: msg = _("Source mvip cannot be the same " "as the replication target.") LOG.error(msg) raise SolidFireDriverException(msg) def _set_cluster_pairs(self): repl_configs = self.configuration.replication_device[0] remote_endpoint = self._build_repl_endpoint_info(**repl_configs) remote_cluster = self._create_cluster_reference(remote_endpoint) remote_cluster['backend_id'] = repl_configs['backend_id'] cluster_pair = self._get_or_create_cluster_pairing( remote_cluster, check_connected=True) remote_cluster['clusterPairID'] = cluster_pair['clusterPairID'] if self.cluster_pairs: self.cluster_pairs.clear() self.cluster_pairs.append(remote_cluster) def _get_cluster_pair(self, remote_cluster): existing_pairs = self._issue_api_request( 'ListClusterPairs', {}, version='8.0')['result']['clusterPairs'] LOG.debug("Existing cluster pairs: %s", existing_pairs) remote_pair = None for ep in existing_pairs: if remote_cluster['mvip'] == ep['mvip']: remote_pair = ep LOG.debug("Found remote pair: %s", remote_pair) break return remote_pair def _get_or_create_cluster_pairing(self, remote_cluster, check_connected=False): # FIXME(sfernand): We check for pairs only in the remote cluster. # This is an issue if a pair exists only in destination cluster. remote_pair = self._get_cluster_pair(remote_cluster) if not remote_pair: LOG.debug("Setting up new cluster pairs.") self._create_remote_pairing(remote_cluster) remote_pair = self._get_cluster_pair(remote_cluster) if check_connected: if not remote_pair: msg = _("Cluster pair not found for cluster [%s]", remote_cluster['mvip']) raise SolidFireReplicationPairingError(message=msg) if remote_pair['status'] == 'Connected': return remote_pair def _wait_cluster_pairing_connected(): pair = self._get_cluster_pair(remote_cluster) if pair and pair['status'] == 'Connected': raise loopingcall.LoopingCallDone(pair) try: timer = loopingcall.FixedIntervalWithTimeoutLoopingCall( _wait_cluster_pairing_connected) remote_pair = timer.start( interval=3, timeout=self.configuration.sf_cluster_pairing_timeout) \ .wait() except loopingcall.LoopingCallTimeOut: msg = _("Cluster pair not found or in an invalid state.") raise SolidFireReplicationPairingError(message=msg) return remote_pair def _create_cluster_reference(self, endpoint=None): cluster_ref = {} cluster_ref['endpoint'] = endpoint if not endpoint: cluster_ref['endpoint'] = self._build_endpoint_info() cluster_info = (self._issue_api_request( 'GetClusterInfo', {}, endpoint=cluster_ref['endpoint']) ['result']['clusterInfo']) for k, v in cluster_info.items(): cluster_ref[k] = v # Add a couple extra things that are handy for us cluster_ref['clusterAPIVersion'] = ( self._issue_api_request('GetClusterVersionInfo', {}, endpoint=cluster_ref['endpoint']) ['result']['clusterAPIVersion']) # NOTE(sfernand): If a custom svip is configured, we update the # default storage ip to the configuration value. # Otherwise, we update endpoint info with the default storage ip # retrieved from GetClusterInfo API call. svip = cluster_ref['endpoint'].get('svip') if not svip: svip = cluster_ref['svip'] if ':' not in svip: svip += ':3260' cluster_ref['svip'] = svip cluster_ref['endpoint']['svip'] = svip return cluster_ref def _create_provider_id_string(self, resource_id, account_or_vol_id): # NOTE(jdg): We use the same format, but in the case # of snapshots, we don't have an account id, we instead # swap that with the parent volume id return "%s %s %s" % (resource_id, account_or_vol_id, self.active_cluster['uuid']) def _init_snapshot_mappings(self, srefs): updates = [] sf_snaps = self._issue_api_request( 'ListSnapshots', {}, version='6.0')['result']['snapshots'] for s in srefs: seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id']) sfsnap = next( (ss for ss in sf_snaps if ss['name'] == seek_name), None) if sfsnap: id_string = self._create_provider_id_string( sfsnap['snapshotID'], sfsnap['volumeID']) if s.get('provider_id') != id_string: updates.append( {'id': s['id'], 'provider_id': id_string}) return updates def _init_volume_mappings(self, vrefs): updates = [] sf_vols = self._issue_api_request('ListActiveVolumes', {})['result']['volumes'] self.volume_map = {} for v in vrefs: seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id']) sfvol = next( (sv for sv in sf_vols if sv['name'] == seek_name), None) if sfvol: if v.get('provider_id', 'nil') != sfvol['volumeID']: updates.append( {'id': v['id'], 'provider_id': self._create_provider_id_string( sfvol['volumeID'], sfvol['accountID'])}) return updates def update_provider_info(self, vrefs, snaprefs): volume_updates = self._init_volume_mappings(vrefs) snapshot_updates = self._init_snapshot_mappings(snaprefs) return (volume_updates, snapshot_updates) def _build_repl_endpoint_info(self, **repl_device): endpoint = { 'mvip': repl_device.get('mvip'), 'login': repl_device.get('login'), 'passwd': repl_device.get('password'), 'port': repl_device.get('port', 443), 'url': 'https://%s:%s' % (repl_device.get('mvip'), repl_device.get('port', 443)), 'svip': repl_device.get('svip') } return endpoint def _build_endpoint_info(self, backend_conf=None, **kwargs): endpoint = {} if not backend_conf: backend_conf = self.configuration # NOTE(jdg): We default to the primary cluster config settings # but always check to see if desired settings were passed in # to handle things like replication targets with unique settings endpoint['mvip'] = ( kwargs.get('mvip', backend_conf.san_ip)) endpoint['login'] = ( kwargs.get('login', backend_conf.san_login)) endpoint['passwd'] = ( kwargs.get('password', backend_conf.san_password)) endpoint['port'] = ( kwargs.get(('port'), backend_conf.sf_api_port)) sanitized_mvip = volume_utils.sanitize_host(endpoint['mvip']) endpoint['url'] = 'https://%s:%s' % (sanitized_mvip, endpoint['port']) endpoint['svip'] = kwargs.get('svip', backend_conf.sf_svip) if not endpoint.get('mvip', None) and kwargs.get('backend_id', None): endpoint['mvip'] = kwargs.get('backend_id') return endpoint @retry(retry_exc_tuple, tries=6) def _issue_api_request(self, method, params, version='1.0', endpoint=None, timeout=None): if params is None: params = {} if endpoint is None: endpoint = self.active_cluster['endpoint'] if not timeout: timeout = self.configuration.sf_api_request_timeout payload = {'method': method, 'params': params} url = '%s/json-rpc/%s/' % (endpoint['url'], version) with warnings.catch_warnings(): warnings.simplefilter( "ignore", requests.packages.urllib3.exceptions.InsecureRequestWarning) req = requests.post(url, data=json.dumps(payload), auth=(endpoint['login'], endpoint['passwd']), verify=self.verify_ssl, timeout=timeout) response = req.json() req.close() if (('error' in response) and (response['error']['name'] in self.retryable_errors)): msg = ('Retryable error (%s) encountered during ' 'SolidFire API call.' % response['error']['name']) LOG.debug(msg) LOG.debug("API response: %s", response) raise SolidFireRetryableException(message=msg) if (('error' in response) and response['error']['name'] == 'xInvalidPairingKey'): LOG.debug("Error on volume pairing") raise SolidFireReplicationPairingError if 'error' in response: msg = _('API response: %s') % response raise SolidFireAPIException(msg) return response def _get_volumes_by_sfaccount(self, account_id, endpoint=None): """Get all volumes on cluster for specified account.""" params = {'accountID': account_id} return self._issue_api_request( 'ListVolumesForAccount', params, endpoint=endpoint)['result']['volumes'] def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None, endpoint=None): # ListVolumesForAccount gives both Active and Deleted # we require the solidfire accountID, uuid of volume # is optional vols = self._get_volumes_by_sfaccount(sf_account_id, endpoint=endpoint) if cinder_uuid: vlist = [v for v in vols if cinder_uuid in v['name']] else: vlist = [v for v in vols] vlist = sorted(vlist, key=lambda k: k['volumeID']) return vlist def _get_sfvol_by_cinder_vref(self, vref): # sfvols is one or more element objects returned from a list call # sfvol is the single volume object that will be returned or it will # be None sfvols = None sfvol = None provider_id = vref.get('provider_id', None) if provider_id: try: sf_vid, sf_aid, sf_cluster_id = provider_id.split(' ') except ValueError: LOG.warning("Invalid provider_id entry for volume: %s", vref.id) else: # So there shouldn't be any clusters out in the field that are # running Element < 8.0, but just in case; we'll to a try # block here and fall back to the old methods just to be safe try: sfvol = self._issue_api_request( 'ListVolumes', {'startVolumeID': sf_vid, 'limit': 1}, version='8.0')['result']['volumes'][0] # Bug 1782373 validate the list returned has what we asked # for, check if there was no match if sfvol['volumeID'] != int(sf_vid): sfvol = None except Exception: pass if not sfvol: LOG.info("Failed to find volume by provider_id, " "attempting ListForAccount") for account in self._get_sfaccounts_for_tenant(vref.project_id): sfvols = self._issue_api_request( 'ListVolumesForAccount', {'accountID': account['accountID']})['result']['volumes'] # Bug 1782373 match single vref.id encase no provider as the # above call will return a list for the account for sfv in sfvols: if sfv['attributes'].get('uuid', None) == vref.id: sfvol = sfv break return sfvol def _get_sfaccount_by_name(self, sf_account_name, endpoint=None): """Get SolidFire account object by name.""" sfaccount = None params = {'username': sf_account_name} try: data = self._issue_api_request('GetAccountByName', params, endpoint=endpoint) if 'result' in data and 'account' in data['result']: LOG.debug('Found solidfire account: %s', sf_account_name) sfaccount = data['result']['account'] except SolidFireAPIException as ex: if 'xUnknownAccount' in ex.msg: return sfaccount else: raise return sfaccount def _get_sf_account_name(self, project_id): """Build the SolidFire account name to use.""" prefix = self.configuration.sf_account_prefix or '' if prefix == 'hostname': prefix = socket.gethostname() return '%s%s%s' % (prefix, '-' if prefix else '', project_id) def _get_sfaccount(self, project_id): sf_account_name = self._get_sf_account_name(project_id) sfaccount = self._get_sfaccount_by_name(sf_account_name) if sfaccount is None: raise SolidFireAccountNotFound( account_name=sf_account_name) return sfaccount def _create_sfaccount(self, sf_account_name, endpoint=None): """Create account on SolidFire device if it doesn't already exist. We're first going to check if the account already exists, if it does just return it. If not, then create it. """ sfaccount = self._get_sfaccount_by_name(sf_account_name, endpoint=endpoint) if sfaccount is None: LOG.debug('solidfire account: %s does not exist, create it...', sf_account_name) chap_secret = self._generate_random_string(12) params = {'username': sf_account_name, 'initiatorSecret': chap_secret, 'targetSecret': chap_secret, 'attributes': {}} self._issue_api_request('AddAccount', params, endpoint=endpoint) sfaccount = self._get_sfaccount_by_name(sf_account_name, endpoint=endpoint) return sfaccount def _generate_random_string(self, length): """Generates random_string to use for CHAP password.""" return volume_utils.generate_password( length=length, symbolgroups=(string.ascii_uppercase + string.digits)) def _build_connection_info(self, sfaccount, vol, endpoint=None): """Gets the connection info for specified account and volume.""" if endpoint: iscsi_portal = endpoint['svip'] else: iscsi_portal = self.active_cluster['svip'] if ':' not in iscsi_portal: iscsi_portal += ':3260' chap_secret = sfaccount['targetSecret'] vol_id = vol['volumeID'] iqn = vol['iqn'] conn_info = { # NOTE(john-griffith): SF volumes are always at lun 0 'provider_location': ('%s %s %s' % (iscsi_portal, iqn, 0)), 'provider_auth': ('CHAP %s %s' % (sfaccount['username'], chap_secret)) } if not self.configuration.sf_emulate_512: conn_info['provider_geometry'] = ('%s %s' % (4096, 4096)) conn_info['provider_id'] = ( self._create_provider_id_string(vol_id, sfaccount['accountID'])) return conn_info def _get_model_info(self, sfaccount, sf_volume_id, endpoint=None): volume = None volume_list = self._get_volumes_by_sfaccount( sfaccount['accountID'], endpoint=endpoint) for v in volume_list: if v['volumeID'] == sf_volume_id: volume = v break if not volume: LOG.error('Failed to retrieve volume SolidFire-' 'ID: %s in get_by_account!', sf_volume_id) raise exception.VolumeNotFound(volume_id=sf_volume_id) model_update = self._build_connection_info(sfaccount, volume, endpoint=endpoint) return model_update def _snapshot_discovery(self, src_uuid, params, vref): # NOTE(jdg): First check the SF snapshots # if we don't find a snap by the given name, just move on to check # volumes. This may be a running system that was updated from # before we did snapshots, so need to check both is_clone = False sf_vol = None snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid) snaps = self._get_sf_snapshots() snap = next((s for s in snaps if s["name"] == snap_name), None) if snap: params['snapshotID'] = int(snap['snapshotID']) params['volumeID'] = int(snap['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) else: sf_vol = self._get_sf_volume(src_uuid) if sf_vol is None: raise exception.VolumeNotFound(volume_id=src_uuid) params['volumeID'] = int(sf_vol['volumeID']) params['newSize'] = int(vref['size'] * units.Gi) is_clone = True return params, is_clone, sf_vol def _do_clone_volume(self, src_uuid, vref, sf_src_snap=None): """Create a clone of an existing volume or snapshot.""" LOG.debug("Creating cloned volume from vol %(src)s to %(dst)s.", {'src': src_uuid, 'dst': vref.id}) sf_account = self._get_create_account(vref['project_id']) params = {'name': '%(prefix)s%(id)s' % {'prefix': self.configuration.sf_volume_prefix, 'id': vref['id']}, 'newAccountID': sf_account['accountID']} is_clone = False if sf_src_snap: # In some scenarios we are passed the snapshot information that we # are supposed to clone. params['snapshotID'] = sf_src_snap['snapshotID'] params['volumeID'] = sf_src_snap['volumeID'] params['newSize'] = int(vref['size'] * units.Gi) else: params, is_clone, sf_src_vol = self._snapshot_discovery( src_uuid, params, vref) data = self._issue_api_request('CloneVolume', params, version='6.0') if (('result' not in data) or ('volumeID' not in data['result'])): msg = _("API response: %s") % data raise SolidFireAPIException(msg) sf_cloned_id = data['result']['volumeID'] # NOTE(jdg): all attributes are copied via clone, need to do an update # to set any that were provided params = self._get_default_volume_params(vref, is_clone=is_clone) params['volumeID'] = sf_cloned_id data = self._issue_api_request('ModifyVolume', params) def _wait_volume_is_active(): try: model_info = self._get_model_info(sf_account, sf_cloned_id) if model_info: raise loopingcall.LoopingCallDone(model_info) except exception.VolumeNotFound: LOG.debug('Waiting for cloned volume [%s] - [%s] to become ' 'active', sf_cloned_id, vref.id) pass try: timer = loopingcall.FixedIntervalWithTimeoutLoopingCall( _wait_volume_is_active) model_update = timer.start( interval=1, timeout=self.configuration.sf_volume_clone_timeout).wait() except loopingcall.LoopingCallTimeOut: msg = _('Failed to get model update from clone [%s] - [%s]' % (sf_cloned_id, vref.id)) LOG.error(msg) raise SolidFireAPIException(msg) rep_settings = self._retrieve_replication_settings(vref) if self.replication_enabled and rep_settings: try: vref['volumeID'] = sf_cloned_id rep_updates = self._replicate_volume( vref, params, sf_account, rep_settings) model_update.update(rep_updates) except SolidFireDriverException: with excutils.save_and_reraise_exception(): self._issue_api_request('DeleteVolume', {'volumeID': sf_cloned_id}) self._issue_api_request('PurgeDeletedVolume', {'volumeID': sf_cloned_id}) # Increment the usage count, just for data collection # We're only doing this for clones, not create_from snaps if is_clone: data = self._update_attributes(sf_src_vol) return (data, sf_account, model_update) def _update_attributes(self, sf_vol): cloned_count = sf_vol['attributes'].get('cloned_count', 0) cloned_count += 1 attributes = sf_vol['attributes'] attributes['cloned_count'] = cloned_count params = {'volumeID': int(sf_vol['volumeID'])} params['attributes'] = attributes return self._issue_api_request('ModifyVolume', params) def _list_volumes_by_name(self, sf_volume_name, endpoint=None): params = {'volumeName': sf_volume_name} return self._issue_api_request('ListVolumes', params, version='8.0', endpoint=endpoint)['result']['volumes'] def _wait_volume_is_active(self, sf_volume_name): def _wait(): volumes = self._list_volumes_by_name(sf_volume_name) if volumes: LOG.debug("Found Volume [%s] in SolidFire backend. " "Current status is [%s].", sf_volume_name, volumes[0]['status']) if volumes[0]['status'] == 'active': raise loopingcall.LoopingCallDone(volumes[0]) try: timer = loopingcall.FixedIntervalWithTimeoutLoopingCall( _wait) sf_volume = (timer.start( interval=1, timeout=self.configuration.sf_volume_create_timeout).wait()) return sf_volume except loopingcall.LoopingCallTimeOut: msg = ("Timeout while waiting volume [%s] " "to be in active state." % sf_volume_name) LOG.error(msg) raise SolidFireAPIException(msg) def _do_volume_create(self, sf_account, params, endpoint=None): sf_volume_name = params['name'] volumes_found = self._list_volumes_by_name(sf_volume_name, endpoint=endpoint) if volumes_found: raise SolidFireDuplicateVolumeNames(vol_name=sf_volume_name) sf_volid = None try: params['accountID'] = sf_account['accountID'] response = self._issue_api_request( 'CreateVolume', params, endpoint=endpoint) sf_volid = response['result']['volumeID'] except requests.exceptions.ReadTimeout: LOG.debug("Read Timeout exception caught while creating " "volume [%s].", sf_volume_name) # Check if volume was created for the given name, # in case the backend has processed the request but failed # to deliver the response before api request timeout. volume_created = self._wait_volume_is_active(sf_volume_name) sf_volid = volume_created['volumeID'] return self._get_model_info(sf_account, sf_volid, endpoint=endpoint) def _do_snapshot_create(self, params): model_update = {} snapshot_id = self._issue_api_request( 'CreateSnapshot', params, version='6.0')['result']['snapshotID'] snaps = self._get_sf_snapshots() snap = ( next((s for s in snaps if int(s["snapshotID"]) == int(snapshot_id)), None)) model_update['provider_id'] = ( self._create_provider_id_string(snap['snapshotID'], snap['volumeID'])) return model_update def _set_qos_presets(self, volume): qos = {} valid_presets = self.sf_qos_dict.keys() # First look to see if they included a preset presets = [i.value for i in volume.get('volume_metadata') if i.key == 'sf-qos' and i.value in valid_presets] if len(presets) > 0: if len(presets) > 1: LOG.warning('More than one valid preset was ' 'detected, using %s', presets[0]) qos = self.sf_qos_dict[presets[0]] else: # look for explicit settings for i in volume.get('volume_metadata'): if i.key in self.sf_qos_keys: qos[i.key] = int(i.value) return qos def _extract_sf_attributes_from_extra_specs(self, type_id): # This will do a 1:1 copy of the extra spec keys that # include the SolidFire delimeter into a Volume attribute # K/V pair ctxt = context.get_admin_context() volume_type = volume_types.get_volume_type(ctxt, type_id) specs = volume_type.get('extra_specs') sf_keys = [] for key, value in specs.items(): if "SFAttribute:" in key: fields = key.split(':') sf_keys.append({fields[1]: value}) return sf_keys def _set_qos_by_volume_type(self, ctxt, type_id, vol_size): qos = {} scale_qos = {} volume_type = volume_types.get_volume_type(ctxt, type_id) qos_specs_id = volume_type.get('qos_specs_id') specs = volume_type.get('extra_specs') # NOTE(jdg): We prefer the qos_specs association # and over-ride any existing # extra-specs settings if present if qos_specs_id is not None: # Policy changes require admin context to get QoS specs # at the object layer (base:get_by_id), we can either # explicitly promote here, or pass in a context of None # and let the qos_specs api get an admin context for us # personally I prefer explicit, so here ya go. admin_ctxt = context.get_admin_context() kvs = qos_specs.get_qos_specs(admin_ctxt, qos_specs_id)['specs'] else: kvs = specs for key, value in kvs.items(): if ':' in key: fields = key.split(':') key = fields[1] if key in self.sf_qos_keys: qos[key] = int(value) if key in self.sf_scale_qos_keys: scale_qos[key] = value # look for the 'scaledIOPS' key and scale QoS if set if 'scaledIOPS' in scale_qos: scale_qos.pop('scaledIOPS') for key, value in scale_qos.items(): if key == 'scaleMin': qos['minIOPS'] = (qos['minIOPS'] + (int(value) * (vol_size - 1))) elif key == 'scaleMax': qos['maxIOPS'] = (qos['maxIOPS'] + (int(value) * (vol_size - 1))) elif key == 'scaleBurst': qos['burstIOPS'] = (qos['burstIOPS'] + (int(value) * (vol_size - 1))) # Cap the IOPS values at their limits capped = False for key, value in qos.items(): if value > self.sf_iops_lim_max[key]: qos[key] = self.sf_iops_lim_max[key] capped = True if value < self.sf_iops_lim_min[key]: qos[key] = self.sf_iops_lim_min[key] capped = True if capped: LOG.debug("A SolidFire QoS value was capped at the defined limits") # Check that minIOPS <= maxIOPS <= burstIOPS if (qos.get('minIOPS', 0) > qos.get('maxIOPS', 0) or qos.get('maxIOPS', 0) > qos.get('burstIOPS', 0)): msg = (_("Scaled QoS error. Must be minIOPS <= maxIOPS <= " "burstIOPS. Currently: Min: %(min)s, Max: " "%(max)s, Burst: %(burst)s.") % {"min": qos['minIOPS'], "max": qos['maxIOPS'], "burst": qos['burstIOPS']}) raise exception.InvalidQoSSpecs(reason=msg) return qos def _get_sf_volume(self, uuid, params=None, endpoint=None): if params: vols = [v for v in self._issue_api_request( 'ListVolumesForAccount', params)['result']['volumes'] if v['status'] == "active"] else: vols = self._issue_api_request( 'ListActiveVolumes', params, endpoint=endpoint)['result']['volumes'] found_count = 0 sf_volref = None for v in vols: # NOTE(jdg): In the case of "name" we can't # update that on manage/import, so we use # the uuid attribute meta = v.get('attributes') alt_id = '' if meta: alt_id = meta.get('uuid', '') if uuid in v['name'] or uuid in alt_id: found_count += 1 sf_volref = v LOG.debug("Mapped SolidFire volumeID %(volume_id)s " "to cinder ID %(uuid)s.", {'volume_id': v['volumeID'], 'uuid': uuid}) if found_count == 0: # NOTE(jdg): Previously we would raise here, but there are cases # where this might be a cleanup for a failed delete. # Until we get better states we'll just log an error LOG.error("Volume %s, not found on SF Cluster.", uuid) if found_count > 1: LOG.error("Found %(count)s volumes mapped to id: %(uuid)s.", {'count': found_count, 'uuid': uuid}) raise SolidFireDuplicateVolumeNames(vol_name=uuid) return sf_volref def _get_sf_snapshots(self, sf_volid=None): params = {} if sf_volid: params = {'volumeID': sf_volid} return self._issue_api_request( 'ListSnapshots', params, version='6.0')['result']['snapshots'] def _get_sfaccounts_for_tenant(self, cinder_project_id, endpoint=None): accounts = self._issue_api_request( 'ListAccounts', {}, endpoint=endpoint)['result']['accounts'] # Note(jdg): On SF we map account-name to OpenStack's tenant ID # we use tenantID in here to get secondaries that might exist # Also: we expect this to be sorted, so we get the primary first # in the list return sorted([acc for acc in accounts if self._get_sf_account_name(cinder_project_id) in acc['username']], key=lambda k: k['accountID']) def _get_all_active_volumes(self, cinder_uuid=None): params = {} volumes = self._issue_api_request('ListActiveVolumes', params)['result']['volumes'] if cinder_uuid: vols = ([v for v in volumes if cinder_uuid in v.name]) else: vols = [v for v in volumes] return vols def _get_account_create_availability(self, accounts, endpoint=None): # we'll check both the primary and the secondary # if it exists and return whichever one has count # available. for acc in accounts: if len(self._get_volumes_for_account( acc['accountID'], endpoint=endpoint)) < self.max_volumes_per_account: return acc if len(accounts) == 1: sfaccount = self._create_sfaccount(accounts[0]['username'] + '_', endpoint=endpoint) return sfaccount return None def _get_create_account(self, proj_id, endpoint=None): # Retrieve SolidFire accountID to be used for creating volumes. sf_accounts = self._get_sfaccounts_for_tenant( proj_id, endpoint=endpoint) if not sf_accounts: sf_account_name = self._get_sf_account_name(proj_id) sf_account = self._create_sfaccount( sf_account_name, endpoint=endpoint) else: # Check availability for creates sf_account = self._get_account_create_availability( sf_accounts, endpoint=endpoint) if not sf_account: msg = _('Volumes/account exceeded on both primary and ' 'secondary SolidFire accounts.') raise SolidFireDriverException(msg) return sf_account def _create_vag(self, iqn, vol_id=None): """Create a volume access group(vag). Returns the vag_id. """ vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn) params = {'name': vag_name, 'initiators': [iqn], 'volumes': [vol_id], 'attributes': {'openstack': True}} try: result = self._issue_api_request('CreateVolumeAccessGroup', params, version='7.0') return result['result']['volumeAccessGroupID'] except SolidFireAPIException as error: if xExceededLimit in error.msg: if iqn in error.msg: # Initiator double registered. return self._safe_create_vag(iqn, vol_id) else: # VAG limit reached. Purge and start over. self._purge_vags() return self._safe_create_vag(iqn, vol_id) else: raise def _safe_create_vag(self, iqn, vol_id=None): # Potential race condition with simultaneous volume attaches to the # same host. To help avoid this, VAG creation makes a best attempt at # finding and using an existing VAG. vags = self._get_vags_by_name(iqn) if vags: # Filter through the vags and find the one with matching initiator vag = next((v for v in vags if iqn in v['initiators']), None) if vag: return vag['volumeAccessGroupID'] else: # No matches, use the first result, add initiator IQN. vag_id = vags[0]['volumeAccessGroupID'] return self._add_initiator_to_vag(iqn, vag_id) return self._create_vag(iqn, vol_id) def _base_get_vags(self): params = {} vags = self._issue_api_request( 'ListVolumeAccessGroups', params, version='7.0')['result']['volumeAccessGroups'] return vags def _get_vags_by_name(self, iqn): """Retrieve SolidFire volume access group objects by name. Returns an array of vags with a matching name value. Returns an empty array if there are no matches. """ vags = self._base_get_vags() vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn) matching_vags = [vag for vag in vags if vag['name'] == vag_name] return matching_vags def _get_vags_by_volume(self, vol_id): params = {"volumeID": vol_id} vags = self._issue_api_request( 'GetVolumeStats', params)['result']['volumeStats']['volumeAccessGroups'] return vags def _add_initiator_to_vag(self, iqn, vag_id): # Added a vag_id return as there is a chance that we might have to # create a new VAG if our target VAG is deleted underneath us. params = {"initiators": [iqn], "volumeAccessGroupID": vag_id} try: self._issue_api_request('AddInitiatorsToVolumeAccessGroup', params, version='7.0') return vag_id except SolidFireAPIException as error: if xAlreadyInVolumeAccessGroup in error.msg: return vag_id elif xVolumeAccessGroupIDDoesNotExist in error.msg: # No locking means sometimes a VAG can be removed by a parallel # volume detach against the same host. return self._safe_create_vag(iqn) else: raise def _add_volume_to_vag(self, vol_id, iqn, vag_id): # Added a vag_id return to be consistent with add_initiator_to_vag. It # isn't necessary but may be helpful in the future. params = {"volumeAccessGroupID": vag_id, "volumes": [vol_id]} try: self._issue_api_request('AddVolumesToVolumeAccessGroup', params, version='7.0') return vag_id except SolidFireAPIException as error: if xAlreadyInVolumeAccessGroup in error.msg: return vag_id elif xVolumeAccessGroupIDDoesNotExist in error.msg: return self._safe_create_vag(iqn, vol_id) else: raise def _remove_volume_from_vag(self, vol_id, vag_id): params = {"volumeAccessGroupID": vag_id, "volumes": [vol_id]} try: self._issue_api_request('RemoveVolumesFromVolumeAccessGroup', params, version='7.0') except SolidFireAPIException as error: if xNotInVolumeAccessGroup in error.msg: pass elif xVolumeAccessGroupIDDoesNotExist in error.msg: pass else: raise def _remove_volume_from_vags(self, vol_id): # Due to all sorts of uncertainty around multiattach, on volume # deletion we make a best attempt at removing the vol_id from VAGs. vags = self._get_vags_by_volume(vol_id) for vag in vags: self._remove_volume_from_vag(vol_id, vag['volumeAccessGroupID']) def _remove_vag(self, vag_id): params = {"volumeAccessGroupID": vag_id} try: self._issue_api_request('DeleteVolumeAccessGroup', params, version='7.0') except SolidFireAPIException as error: if xVolumeAccessGroupIDDoesNotExist not in error.msg: raise def _purge_vags(self, limit=10): # Purge up to limit number of VAGs that have no active volumes, # initiators, and an OpenStack attribute. Purge oldest VAGs first. vags = self._base_get_vags() targets = [v for v in vags if v['volumes'] == [] and v['initiators'] == [] and v['deletedVolumes'] == [] and v['attributes'].get('openstack')] sorted_targets = sorted(targets, key=lambda k: k['volumeAccessGroupID']) for vag in sorted_targets[:limit]: self._remove_vag(vag['volumeAccessGroupID']) # extended_size > 0 when we are extending a volume def _retrieve_qos_setting(self, volume, extended_size=0): qos = {} if (self.configuration.sf_allow_tenant_qos and volume.get('volume_metadata') is not None): qos = self._set_qos_presets(volume) ctxt = context.get_admin_context() type_id = volume.get('volume_type_id', None) if type_id is not None: qos = self._set_qos_by_volume_type(ctxt, type_id, extended_size if extended_size > 0 else volume.get('size')) return qos def _get_default_volume_params(self, volume, sf_account=None, is_clone=False): if not sf_account: sf_account = self._get_create_account(volume.project_id) qos = self._retrieve_qos_setting(volume) create_time = volume.created_at.isoformat() attributes = { 'uuid': volume.id, 'is_clone': is_clone, 'created_at': create_time, 'cinder-name': volume.get('display_name', "") } if volume.volume_type_id: for attr in self._extract_sf_attributes_from_extra_specs( volume.volume_type_id): for k, v in attr.items(): attributes[k] = v vol_name = '%s%s' % (self.configuration.sf_volume_prefix, volume.id) params = {'name': vol_name, 'accountID': sf_account['accountID'], 'sliceCount': 1, 'totalSize': int(volume.size * units.Gi), 'enable512e': self.configuration.sf_emulate_512, 'attributes': attributes, 'qos': qos} return params def create_volume(self, volume): """Create volume on SolidFire device. The account is where CHAP settings are derived from, volume is created and exported. Note that the new volume is immediately ready for use. One caveat here is that an existing user account must be specified in the API call to create a new volume. We use a set algorithm to determine account info based on passed in cinder volume object. First we check to see if the account already exists (and use it), or if it does not already exist, we'll go ahead and create it. """ sf_account = self._get_create_account(volume['project_id']) params = self._get_default_volume_params(volume, sf_account) # NOTE(jdg): Check if we're a migration tgt, if so # use the old volume-id here for the SF Name migration_status = volume.get('migration_status', None) if migration_status and 'target' in migration_status: k, v = migration_status.split(':') vname = '%s%s' % (self.configuration.sf_volume_prefix, v) params['name'] = vname params['attributes']['migration_uuid'] = volume['id'] params['attributes']['uuid'] = v model_update = self._do_volume_create(sf_account, params) try: rep_settings = self._retrieve_replication_settings(volume) if self.replication_enabled and rep_settings: volume['volumeID'] = ( int(model_update['provider_id'].split()[0])) rep_updates = self._replicate_volume(volume, params, sf_account, rep_settings) if rep_updates: model_update.update(rep_updates) except SolidFireAPIException: # NOTE(jdg): Something went wrong after the source create, due to # the way TFLOW works and it's insistence on retrying the same # command over and over coupled with the fact that the introduction # of objects now sets host to None on failures we'll end up with an # orphaned volume on the backend for every one of these segments # that fail, for n-retries. Sad Sad Panda!! We'll just do it # ourselves until we can get a general fix in Cinder further up the # line with excutils.save_and_reraise_exception(): sf_volid = int(model_update['provider_id'].split()[0]) self._issue_api_request('DeleteVolume', {'volumeID': sf_volid}) self._issue_api_request('PurgeDeletedVolume', {'volumeID': sf_volid}) return model_update def _retrieve_replication_settings(self, volume): rep_data = "Async" ctxt = context.get_admin_context() type_id = volume.get('volume_type_id', None) if type_id is not None: rep_data = self._set_rep_by_volume_type(ctxt, type_id) return rep_data def _set_rep_by_volume_type(self, ctxt, type_id): rep_modes = ['Async', 'Sync', 'SnapshotsOnly'] rep_opts = {} type_ref = volume_types.get_volume_type(ctxt, type_id) specs = type_ref.get('extra_specs') if specs.get('replication_enabled', "") == " True": if specs.get('solidfire:replication_mode') in rep_modes: rep_opts['rep_type'] = specs.get('solidfire:replication_mode') else: rep_opts['rep_type'] = 'Async' return rep_opts def _create_volume_pairing(self, volume, dst_volume, tgt_cluster): src_sf_volid = int(volume['provider_id'].split()[0]) dst_sf_volid = int(dst_volume['provider_id'].split()[0]) @retry(SolidFireReplicationPairingError, tries=6) def _pair_volumes(): rep_type = "Sync" # Enable volume pairing LOG.debug("Starting pairing source volume ID: %s", src_sf_volid) # Make sure we split any pair the volume has params = { 'volumeID': src_sf_volid, 'mode': rep_type } self._issue_api_request('RemoveVolumePair', params, '8.0') rep_key = self._issue_api_request( 'StartVolumePairing', params, '8.0')['result']['volumePairingKey'] LOG.debug("Volume pairing started on source: " "%(endpoint)s", {'endpoint': tgt_cluster['endpoint']['url']}) params = { 'volumeID': dst_sf_volid, 'volumePairingKey': rep_key } self._issue_api_request('CompleteVolumePairing', params, '8.0', endpoint=tgt_cluster['endpoint']) LOG.debug("Volume pairing completed on destination: " "%(endpoint)s", {'endpoint': tgt_cluster['endpoint']['url']}) _pair_volumes() def _replicate_volume(self, volume, params, parent_sfaccount, rep_info): updates = {} rep_success_status = fields.ReplicationStatus.ENABLED # NOTE(erlon): Right now we only support 1 remote target so, we always # get cluster_pairs[0] tgt_endpoint = self.cluster_pairs[0]['endpoint'] LOG.debug("Replicating volume on remote cluster: %(tgt)s\n params: " "%(params)s", {'tgt': tgt_endpoint, 'params': params}) params['username'] = self._get_sf_account_name(volume['project_id']) try: params['initiatorSecret'] = parent_sfaccount['initiatorSecret'] params['targetSecret'] = parent_sfaccount['targetSecret'] self._issue_api_request( 'AddAccount', params, endpoint=tgt_endpoint)['result']['accountID'] except SolidFireAPIException as ex: if 'xDuplicateUsername' not in ex.msg: raise remote_account = ( self._get_sfaccount_by_name(params['username'], endpoint=tgt_endpoint)) # Create the volume on the remote cluster w/same params as original params['accountID'] = remote_account['accountID'] LOG.debug("Create remote volume on: %(endpoint)s with account: " "%(account)s", {'endpoint': tgt_endpoint['url'], 'account': remote_account}) model_update = self._do_volume_create( remote_account, params, endpoint=tgt_endpoint) tgt_sfid = int(model_update['provider_id'].split()[0]) params = {'volumeID': tgt_sfid, 'access': 'replicationTarget'} self._issue_api_request('ModifyVolume', params, '8.0', endpoint=tgt_endpoint) # NOTE(erlon): For some reason the SF cluster randomly fail the # replication of volumes. The generated keys are deemed invalid by the # target backend. When that happens, we re-start the volume pairing # process. @retry(SolidFireReplicationPairingError, tries=6) def _pair_volumes(): # Enable volume pairing LOG.debug("Start volume pairing on volume ID: %s", volume['volumeID']) # Make sure we split any pair the volume have params = {'volumeID': volume['volumeID'], 'mode': rep_info['rep_type']} self._issue_api_request('RemoveVolumePair', params, '8.0') rep_key = self._issue_api_request( 'StartVolumePairing', params, '8.0')['result']['volumePairingKey'] params = {'volumeID': tgt_sfid, 'volumePairingKey': rep_key} LOG.debug("Sending issue CompleteVolumePairing request on remote: " "%(endpoint)s, %(parameters)s", {'endpoint': tgt_endpoint['url'], 'parameters': params}) self._issue_api_request('CompleteVolumePairing', params, '8.0', endpoint=tgt_endpoint) try: _pair_volumes() except SolidFireAPIException: with excutils.save_and_reraise_exception(): params = {'volumeID': tgt_sfid} LOG.debug("Error pairing volume on remote cluster. Rolling " "back and deleting volume %(vol)s at cluster " "%(cluster)s.", {'vol': tgt_sfid, 'cluster': tgt_endpoint}) self._issue_api_request('DeleteVolume', params, endpoint=tgt_endpoint) self._issue_api_request('PurgeDeletedVolume', params, endpoint=tgt_endpoint) updates['replication_status'] = rep_success_status LOG.debug("Completed volume pairing.") return updates def _disable_replication(self, volume): updates = {} tgt_endpoint = self.cluster_pairs[0]['endpoint'] sfvol = self._get_sfvol_by_cinder_vref(volume) if len(sfvol['volumePairs']) != 1: LOG.warning("Trying to disable replication on volume %s but " "volume does not have pairs.", volume.id) updates['replication_status'] = fields.ReplicationStatus.DISABLED return updates params = {'volumeID': sfvol['volumeID']} self._issue_api_request('RemoveVolumePair', params, '8.0') remote_sfid = sfvol['volumePairs'][0]['remoteVolumeID'] params = {'volumeID': remote_sfid} self._issue_api_request('RemoveVolumePair', params, '8.0', endpoint=tgt_endpoint) self._issue_api_request('DeleteVolume', params, endpoint=tgt_endpoint) self._issue_api_request('PurgeDeletedVolume', params, endpoint=tgt_endpoint) updates['replication_status'] = fields.ReplicationStatus.DISABLED return updates @locked_source_id_operation def create_cloned_volume(self, volume, source): """Create a clone of an existing volume.""" (_data, _sfaccount, model) = self._do_clone_volume( source['id'], volume) return model def delete_volume(self, volume): """Delete SolidFire Volume from device. SolidFire allows multiple volumes with same name, volumeID is what's guaranteed unique. """ sf_vol = self._get_sfvol_by_cinder_vref(volume) if sf_vol is not None: for vp in sf_vol.get('volumePairs', []): LOG.debug("Deleting paired volume on remote cluster...") pair_id = vp['clusterPairID'] for cluster in self.cluster_pairs: if cluster['clusterPairID'] == pair_id: params = {'volumeID': vp['remoteVolumeID']} LOG.debug("Issue Delete request on cluster: " "%(remote)s with params: %(parameters)s", {'remote': cluster['endpoint']['url'], 'parameters': params}) self._issue_api_request('DeleteVolume', params, endpoint=cluster['endpoint']) self._issue_api_request('PurgeDeletedVolume', params, endpoint=cluster['endpoint']) # The multiattach volumes are only removed from the VAG on # deletion. if volume.get('multiattach'): self._remove_volume_from_vags(sf_vol['volumeID']) if sf_vol['status'] == 'active': params = {'volumeID': sf_vol['volumeID']} self._issue_api_request('DeleteVolume', params) self._issue_api_request('PurgeDeletedVolume', params) else: LOG.error("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "delete_volume operation!", volume['id']) def delete_snapshot(self, snapshot): """Delete the specified snapshot from the SolidFire cluster.""" sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix, snapshot['id']) accounts = self._get_sfaccounts_for_tenant(snapshot['project_id']) snap = None for acct in accounts: params = {'accountID': acct['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol: sf_snaps = self._get_sf_snapshots(sf_vol['volumeID']) snap = next((s for s in sf_snaps if s["name"] == sf_snap_name), None) if snap: params = {'snapshotID': snap['snapshotID']} self._issue_api_request('DeleteSnapshot', params, version='6.0') return LOG.warning( "Snapshot %s not found, old style clones may not be deleted.", snapshot.id) def create_snapshot(self, snapshot): sfaccount = self._get_sfaccount(snapshot['project_id']) if sfaccount is None: LOG.error("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "create_snapshot operation!", snapshot['volume_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(snapshot['volume_id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=snapshot['volume_id']) params = {'volumeID': sf_vol['volumeID'], 'name': '%s%s' % (self.configuration.sf_volume_prefix, snapshot['id'])} rep_settings = self._retrieve_replication_settings(snapshot.volume) if self.replication_enabled and rep_settings: params['enableRemoteReplication'] = True return self._do_snapshot_create(params) @locked_source_id_operation def create_volume_from_snapshot(self, volume, source): """Create a volume from the specified snapshot.""" if source.get('group_snapshot_id'): # We're creating a volume from a snapshot that resulted from a # consistency group snapshot. Because of the way that SolidFire # creates cgsnaps, we have to search for the correct snapshot. group_snapshot_id = source.get('group_snapshot_id') snapshot_id = source.get('volume_id') sf_name = self.configuration.sf_volume_prefix + group_snapshot_id sf_group_snap = self._get_group_snapshot_by_name(sf_name) return self._create_clone_from_sf_snapshot(snapshot_id, group_snapshot_id, sf_group_snap, volume) (_data, _sfaccount, model) = self._do_clone_volume( source['id'], volume) return model # Consistency group helpers def _sf_create_group_snapshot(self, name, sf_volumes): # Group snapshot is our version of a consistency group snapshot. vol_ids = [vol['volumeID'] for vol in sf_volumes] params = {'name': name, 'volumes': vol_ids} snapshot_id = self._issue_api_request('CreateGroupSnapshot', params, version='7.0') return snapshot_id['result'] def _group_snapshot_creator(self, gsnap_name, src_vol_ids): # Common helper that takes in an array of OpenStack Volume UUIDs and # creates a SolidFire group snapshot with them. vol_names = [self.configuration.sf_volume_prefix + vol_id for vol_id in src_vol_ids] active_sf_vols = self._get_all_active_volumes() target_vols = [vol for vol in active_sf_vols if vol['name'] in vol_names] if len(src_vol_ids) != len(target_vols): msg = (_("Retrieved a different amount of SolidFire volumes for " "the provided Cinder volumes. Retrieved: %(ret)s " "Desired: %(des)s") % {"ret": len(target_vols), "des": len(src_vol_ids)}) raise SolidFireDriverException(msg) result = self._sf_create_group_snapshot(gsnap_name, target_vols) return result def _create_temp_group_snapshot(self, source_cg, source_vols): # Take a temporary snapshot to create the volumes for a new # consistency group. gsnap_name = ("%(prefix)s%(id)s-tmp" % {"prefix": self.configuration.sf_volume_prefix, "id": source_cg['id']}) vol_ids = [vol['id'] for vol in source_vols] self._group_snapshot_creator(gsnap_name, vol_ids) return gsnap_name def _list_group_snapshots(self): result = self._issue_api_request('ListGroupSnapshots', {}, version='7.0') return result['result']['groupSnapshots'] def _get_group_snapshot_by_name(self, name): target_snaps = self._list_group_snapshots() target = next((snap for snap in target_snaps if snap['name'] == name), None) return target def _delete_group_snapshot(self, gsnapid): params = {'groupSnapshotID': gsnapid} self._issue_api_request('DeleteGroupSnapshot', params, version='7.0') def _delete_cgsnapshot_by_name(self, snap_name): # Common function used to find and delete a snapshot. target = self._get_group_snapshot_by_name(snap_name) if not target: msg = _("Failed to find group snapshot named: %s") % snap_name raise SolidFireDriverException(msg) self._delete_group_snapshot(target['groupSnapshotID']) def _find_linked_snapshot(self, target_uuid, group_snap): # Because group snapshots name each individual snapshot the group # snapshot name, we have to trawl through the SolidFire snapshots to # find the SolidFire snapshot from the group that is linked with the # SolidFire volumeID that is linked to the Cinder snapshot source # volume. source_vol = self._get_sf_volume(target_uuid) target_snap = next((sn for sn in group_snap['members'] if sn['volumeID'] == source_vol['volumeID']), None) return target_snap def _create_clone_from_sf_snapshot(self, target_uuid, src_uuid, sf_group_snap, vol): # Find the correct SolidFire backing snapshot. sf_src_snap = self._find_linked_snapshot(target_uuid, sf_group_snap) _data, _sfaccount, model = self._do_clone_volume(src_uuid, vol, sf_src_snap) model['id'] = vol['id'] model['status'] = 'available' return model def _map_sf_volumes(self, cinder_volumes, endpoint=None): """Get a list of SolidFire volumes. Creates a list of SolidFire volumes based on matching a list of cinder volume ID's, also adds an 'cinder_id' key to match cinder. """ vols = self._issue_api_request( 'ListActiveVolumes', {}, endpoint=endpoint)['result']['volumes'] # FIXME(erlon): When we fetch only for the volume name, we miss # volumes that where brought to Cinder via cinder-manage. vlist = ( [sfvol for sfvol in vols for cv in cinder_volumes if cv['id'] in sfvol['name']]) for v in vlist: v['cinder_id'] = v['name'].split( self.configuration.sf_volume_prefix)[1] return vlist # Generic Volume Groups. def create_group(self, ctxt, group): # SolidFire does not have the concept of volume groups. We're going to # play along with the group song and dance. There will be a lot of # no-ops because of this. if volume_utils.is_group_a_cg_snapshot_type(group): return {'status': fields.GroupStatus.AVAILABLE} # Blatantly ripping off this pattern from other drivers. raise NotImplementedError() def create_group_from_src(self, ctxt, group, volumes, group_snapshots=None, snapshots=None, source_group=None, source_vols=None): # At this point this is just a pass-through. if volume_utils.is_group_a_cg_snapshot_type(group): return self._create_consistencygroup_from_src( ctxt, group, volumes, group_snapshots, snapshots, source_group, source_vols) # Default implementation handles other scenarios. raise NotImplementedError() def create_group_snapshot(self, ctxt, group_snapshot, snapshots): # This is a pass-through to the old consistency group stuff. if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self._create_cgsnapshot(ctxt, group_snapshot, snapshots) # Default implementation handles other scenarios. raise NotImplementedError() def delete_group(self, ctxt, group, volumes): # Delete a volume group. SolidFire does not track volume groups, # however we do need to actually remove the member volumes of the # group. Right now only consistent volume groups are supported. if volume_utils.is_group_a_cg_snapshot_type(group): return self._delete_consistencygroup(ctxt, group, volumes) # Default implementation handles other scenarios. raise NotImplementedError() def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None): # Regarding consistency groups SolidFire does not track volumes, so # this is a no-op. In the future with replicated volume groups this # might actually do something. if volume_utils.is_group_a_cg_snapshot_type(group): return self._update_consistencygroup(ctxt, group, add_volumes, remove_volumes) # Default implementation handles other scenarios. raise NotImplementedError() def _create_consistencygroup_from_src(self, ctxt, group, volumes, cgsnapshot, snapshots, source_cg, source_vols): if cgsnapshot and snapshots: sf_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] sf_group_snap = self._get_group_snapshot_by_name(sf_name) # Go about creating volumes from provided snaps. vol_models = [] for vol, snap in zip(volumes, snapshots): vol_models.append(self._create_clone_from_sf_snapshot( snap['volume_id'], snap['id'], sf_group_snap, vol)) return ({'status': fields.GroupStatus.AVAILABLE}, vol_models) elif source_cg and source_vols: # Create temporary group snapshot. gsnap_name = self._create_temp_group_snapshot(source_cg, source_vols) try: sf_group_snap = self._get_group_snapshot_by_name(gsnap_name) # For each temporary snapshot clone the volume. vol_models = [] for vol in volumes: vol_models.append(self._create_clone_from_sf_snapshot( vol['source_volid'], vol['source_volid'], sf_group_snap, vol)) finally: self._delete_cgsnapshot_by_name(gsnap_name) return {'status': fields.GroupStatus.AVAILABLE}, vol_models def _create_cgsnapshot(self, ctxt, cgsnapshot, snapshots): vol_ids = [snapshot['volume_id'] for snapshot in snapshots] vol_names = [self.configuration.sf_volume_prefix + vol_id for vol_id in vol_ids] active_sf_vols = self._get_all_active_volumes() target_vols = [vol for vol in active_sf_vols if vol['name'] in vol_names] if len(snapshots) != len(target_vols): msg = (_("Retrieved a different amount of SolidFire volumes for " "the provided Cinder snapshots. Retrieved: %(ret)s " "Desired: %(des)s") % {"ret": len(target_vols), "des": len(snapshots)}) raise SolidFireDriverException(msg) snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] self._sf_create_group_snapshot(snap_name, target_vols) return None, None def _update_consistencygroup(self, context, group, add_volumes=None, remove_volumes=None): # Similar to create_consistencygroup, SolidFire's lack of a consistency # group object means there is nothing to update on the cluster. return None, None, None def _delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots): snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id'] self._delete_cgsnapshot_by_name(snap_name) return None, None def delete_group_snapshot(self, context, group_snapshot, snapshots): if volume_utils.is_group_a_cg_snapshot_type(group_snapshot): return self._delete_cgsnapshot(context, group_snapshot, snapshots) # Default implementation handles other scenarios. raise NotImplementedError() def _delete_consistencygroup(self, ctxt, group, volumes): # TODO(chris_morrell): exception handling and return correctly updated # volume_models. for vol in volumes: self.delete_volume(vol) return None, None def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update first. The name is a bit misleading as the majority of the data here is cluster data """ if refresh: try: self._update_cluster_status() except SolidFireAPIException: pass LOG.debug("SolidFire cluster_stats: %s", self.cluster_stats) return self.cluster_stats def extend_volume(self, volume, new_size): """Extend an existing volume.""" sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "extend_volume operation!", volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) qos = self._retrieve_qos_setting(volume, new_size) params = { 'volumeID': sf_vol['volumeID'], 'totalSize': int(new_size * units.Gi), 'qos': qos } self._issue_api_request('ModifyVolume', params, version='5.0') rep_settings = self._retrieve_replication_settings(volume) if self.replication_enabled and rep_settings: if len(sf_vol['volumePairs']) != 1: LOG.error("Can't find remote pair while extending the " "volume or multiple replication pairs found!") raise exception.VolumeNotFound(volume_id=volume['id']) tgt_endpoint = self.cluster_pairs[0]['endpoint'] target_vol_id = sf_vol['volumePairs'][0]['remoteVolumeID'] params2 = params.copy() params2['volumeID'] = target_vol_id self._issue_api_request('ModifyVolume', params2, version='5.0', endpoint=tgt_endpoint) def _get_provisioned_capacity_iops(self): response = self._issue_api_request('ListVolumes', {}, version='8.0') volumes = response['result']['volumes'] LOG.debug("%s volumes present in cluster", len(volumes)) provisioned_cap = 0 provisioned_iops = 0 for vol in volumes: provisioned_cap += vol['totalSize'] provisioned_iops += vol['qos']['minIOPS'] return provisioned_cap, provisioned_iops def _update_cluster_status(self): """Retrieve status info for the Cluster.""" params = {} data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'SolidFire Inc' data["driver_version"] = self.VERSION data["storage_protocol"] = constants.ISCSI data['consistencygroup_support'] = True data['consistent_group_snapshot_enabled'] = True data['replication_enabled'] = self.replication_enabled if self.replication_enabled: data['replication'] = 'enabled' data['active_cluster_mvip'] = self.active_cluster['mvip'] data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = True data['multiattach'] = True try: results = self._issue_api_request('GetClusterCapacity', params, version='8.0') except SolidFireAPIException: data['total_capacity_gb'] = 0 data['free_capacity_gb'] = 0 self.cluster_stats = data return results = results['result']['clusterCapacity'] prov_cap, prov_iops = self._get_provisioned_capacity_iops() if self.configuration.sf_provisioning_calc == 'usedSpace': free_capacity = ( results['maxUsedSpace'] - results['usedSpace']) data['total_capacity_gb'] = results['maxUsedSpace'] / units.Gi data['thin_provisioning_support'] = True data['provisioned_capacity_gb'] = prov_cap / units.Gi data['max_over_subscription_ratio'] = ( self.configuration.max_over_subscription_ratio ) else: free_capacity = ( results['maxProvisionedSpace'] - results['usedSpace']) data['total_capacity_gb'] = ( results['maxProvisionedSpace'] / units.Gi) data['free_capacity_gb'] = float(free_capacity / units.Gi) if (results['uniqueBlocksUsedSpace'] == 0 or results['uniqueBlocks'] == 0 or results['zeroBlocks'] == 0 or results['nonZeroBlocks'] == 0): data['compression_percent'] = 100 data['deduplication_percent'] = 100 data['thin_provision_percent'] = 100 else: data['compression_percent'] = ( (float(results['uniqueBlocks'] * 4096) / results['uniqueBlocksUsedSpace']) * 100) data['deduplication_percent'] = ( float(results['nonZeroBlocks'] / results['uniqueBlocks']) * 100) data['thin_provision_percent'] = ( (float(results['nonZeroBlocks'] + results['zeroBlocks']) / results['nonZeroBlocks']) * 100) data['provisioned_iops'] = prov_iops data['current_iops'] = results['currentIOPS'] data['average_iops'] = results['averageIOPS'] data['max_iops'] = results['maxIOPS'] data['peak_iops'] = results['peakIOPS'] data['shared_targets'] = False self.cluster_stats = data def initialize_connection(self, volume, connector): """Initialize the connection and return connection info. Optionally checks and utilizes volume access groups. """ properties = self._sf_initialize_connection(volume, connector) properties['data']['discard'] = True return properties def terminate_connection(self, volume, properties, force): return self._sf_terminate_connection(volume, properties, force) def accept_transfer(self, context, volume, new_user, new_project): sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: LOG.error("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "accept_transfer operation!", volume['id']) raise exception.VolumeNotFound(volume_id=volume['id']) if new_project != volume['project_id']: # do a create_sfaccount here as this tenant # may not exist on the cluster yet sfaccount = self._get_create_account(new_project) params = { 'volumeID': sf_vol['volumeID'], 'accountID': sfaccount['accountID'] } self._issue_api_request('ModifyVolume', params, version='5.0') volume['project_id'] = new_project volume['user_id'] = new_user return self.target_driver.ensure_export(context, volume, None) def _setup_intercluster_volume_migration(self, src_volume, dst_cluster_ref): LOG.info("Setting up cluster migration for volume [%s]", src_volume.name) # We should be able to rollback in case something went wrong def _do_migrate_setup_rollback(src_sf_volume_id, dst_sf_volume_id): # Removing volume pair in source cluster params = {'volumeID': src_sf_volume_id} self._issue_api_request('RemoveVolumePair', params, '8.0') # Removing volume pair in destination cluster params = {'volumeID': dst_sf_volume_id} self._issue_api_request('RemoveVolumePair', params, '8.0', endpoint=dst_cluster_ref["endpoint"]) # Destination volume should also be removed. self._issue_api_request('DeleteVolume', params, endpoint=dst_cluster_ref["endpoint"]) self._issue_api_request('PurgeDeletedVolume', params, endpoint=dst_cluster_ref["endpoint"]) self._get_or_create_cluster_pairing( dst_cluster_ref, check_connected=True) dst_sf_account = self._get_create_account( src_volume['project_id'], endpoint=dst_cluster_ref['endpoint']) LOG.debug("Destination account is [%s]", dst_sf_account["username"]) params = self._get_default_volume_params(src_volume, dst_sf_account) dst_volume = self._do_volume_create( dst_sf_account, params, endpoint=dst_cluster_ref['endpoint']) try: self._create_volume_pairing( src_volume, dst_volume, dst_cluster_ref) except SolidFireReplicationPairingError: with excutils.save_and_reraise_exception(): dst_sf_volid = int(dst_volume['provider_id'].split()[0]) src_sf_volid = int(src_volume['provider_id'].split()[0]) LOG.debug("Error pairing volume on remote cluster. Rolling " "back and deleting volume %(vol)s at cluster " "%(cluster)s.", {'vol': dst_sf_volid, 'cluster': dst_cluster_ref['mvip']}) _do_migrate_setup_rollback(src_sf_volid, dst_sf_volid) return dst_volume def _do_intercluster_volume_migration_data_sync(self, src_volume, src_sf_account, dst_sf_volume_id, dst_cluster_ref): params = {'volumeID': dst_sf_volume_id, 'access': 'replicationTarget'} self._issue_api_request('ModifyVolume', params, '8.0', endpoint=dst_cluster_ref['endpoint']) def _wait_sync_completed(): vol_params = None if src_sf_account: vol_params = {'accountID': src_sf_account['accountID']} sf_vol = self._get_sf_volume(src_volume.id, vol_params) state = sf_vol['volumePairs'][0]['remoteReplication']['state'] if state == 'Active': raise loopingcall.LoopingCallDone(sf_vol) LOG.debug("Waiting volume data to sync. " "Replication state is [%s]", state) try: timer = loopingcall.FixedIntervalWithTimeoutLoopingCall( _wait_sync_completed) timer.start( interval=30, timeout=self.configuration.sf_volume_pairing_timeout).wait() except loopingcall.LoopingCallTimeOut: msg = _("Timeout waiting volumes to sync.") raise SolidFireDataSyncTimeoutError(reason=msg) self._do_intercluster_volume_migration_complete_data_sync( dst_sf_volume_id, dst_cluster_ref) def _do_intercluster_volume_migration_complete_data_sync(self, sf_volume_id, cluster_ref): params = {'volumeID': sf_volume_id, 'access': 'readWrite'} self._issue_api_request('ModifyVolume', params, '8.0', endpoint=cluster_ref['endpoint']) def _cleanup_intercluster_volume_migration(self, src_volume, dst_sf_volume_id, dst_cluster_ref): src_sf_volume_id = int(src_volume['provider_id'].split()[0]) # Removing volume pair in destination cluster params = {'volumeID': dst_sf_volume_id} self._issue_api_request('RemoveVolumePair', params, '8.0', endpoint=dst_cluster_ref["endpoint"]) # Removing volume pair in source cluster params = {'volumeID': src_sf_volume_id} self._issue_api_request('RemoveVolumePair', params, '8.0') # Destination volume should also be removed. self._issue_api_request('DeleteVolume', params) self._issue_api_request('PurgeDeletedVolume', params) def _do_intercluster_volume_migration(self, volume, host, dst_config): LOG.debug("Start migrating volume [%(name)s] to cluster [%(cluster)s]", {"name": volume.name, "cluster": host["host"]}) dst_endpoint = self._build_endpoint_info(backend_conf=dst_config) LOG.debug("Destination cluster mvip is [%s]", dst_endpoint["mvip"]) dst_cluster_ref = self._create_cluster_reference(dst_endpoint) LOG.debug("Destination cluster reference created. API version is [%s]", dst_cluster_ref["clusterAPIVersion"]) dst_volume = self._setup_intercluster_volume_migration( volume, dst_cluster_ref) dst_sf_volume_id = int(dst_volume["provider_id"].split()[0]) # FIXME(sfernand): should pass src account to improve performance self._do_intercluster_volume_migration_data_sync( volume, None, dst_sf_volume_id, dst_cluster_ref) self._cleanup_intercluster_volume_migration( volume, dst_sf_volume_id, dst_cluster_ref) return dst_volume def migrate_volume(self, ctxt, volume, host): """Migrate a SolidFire volume to the specified host/backend""" LOG.info("Migrate volume %(vol_id)s to %(host)s.", {"vol_id": volume.id, "host": host["host"]}) if (volume.status != fields.VolumeStatus.AVAILABLE and volume.status != fields.VolumeStatus.RETYPING): msg = _("Volume status must be 'available' or 'retyping' to " "execute storage assisted migration.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) if volume.is_replicated(): msg = _("Migration of replicated volumes is not allowed.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) src_backend = volume_utils.extract_host( volume.host, "backend").split("@")[1] dst_backend = volume_utils.extract_host( host["host"], "backend").split("@")[1] if src_backend == dst_backend: LOG.info("Same backend, nothing to do.") return True, {} try: dst_config = volume_utils.get_backend_configuration( dst_backend, self.get_driver_options()) except exception.ConfigNotFound: msg = _("Destination backend config not found. Check if " "destination backend stanza is properly configured in " "cinder.conf, or add parameter --force-host-copy True " "to perform host-assisted migration.") raise exception.VolumeMigrationFailed(reason=msg) if self.active_cluster['mvip'] == dst_config.san_ip: LOG.info("Same cluster, nothing to do.") return True, {} else: LOG.info("Source and destination clusters are different. " "A cluster migration will be performed.") LOG.debug("Active cluster: [%(active)s], " "Destination: [%(dst)s]", {"active": self.active_cluster['mvip'], "dst": dst_config.san_ip}) updates = self._do_intercluster_volume_migration(volume, host, dst_config) LOG.info("Successfully migrated volume %(vol_id)s to %(host)s.", {"vol_id": volume.id, "host": host["host"]}) return True, updates def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. Returns a boolean indicating whether the retype occurred and a dict with the updates on the volume. :param ctxt: Context :param volume: A dictionary describing the volume to migrate :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (Not Used). """ model_update = {} LOG.debug("Retyping volume %(vol)s to new type %(type)s", {'vol': volume.id, 'type': new_type}) sfaccount = self._get_sfaccount(volume['project_id']) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) if self.replication_enabled: ctxt = context.get_admin_context() src_rep_type = self._set_rep_by_volume_type( ctxt, volume.volume_type_id) dst_rep_type = self._set_rep_by_volume_type(ctxt, new_type['id']) if src_rep_type != dst_rep_type: if dst_rep_type: rep_settings = self._retrieve_replication_settings(volume) rep_params = self._get_default_volume_params(volume) volume['volumeID'] = ( int(volume.provider_id.split()[0])) rep_updates = self._replicate_volume(volume, rep_params, sfaccount, rep_settings) else: rep_updates = self._disable_replication(volume) if rep_updates: model_update.update(rep_updates) attributes = sf_vol['attributes'] attributes['retyped_at'] = timeutils.utcnow().isoformat() params = {'volumeID': sf_vol['volumeID'], 'attributes': attributes} qos = self._set_qos_by_volume_type(ctxt, new_type['id'], volume.get('size')) if qos: params['qos'] = qos self._issue_api_request('ModifyVolume', params) return True, model_update def manage_existing(self, volume, external_ref): """Manages an existing SolidFire Volume (import to Cinder). Renames the Volume to match the expected name for the volume. Also need to consider things like QoS, Emulation, account/tenant and replication settings. """ sfid = external_ref.get('source-id', None) sfname = external_ref.get('name', None) LOG.debug("Managing volume %(id)s to ref %(ref)s", {'id': volume.id, 'ref': external_ref}) if sfid is None: raise SolidFireAPIException(_("Manage existing volume " "requires 'source-id'.")) # First get the volume on the SF cluster (MUST be active) params = {'startVolumeID': sfid, 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] sf_ref = vols[0] sfaccount = self._get_create_account(volume['project_id']) import_time = volume['created_at'].isoformat() attributes = {'uuid': volume['id'], 'is_clone': 'False', 'os_imported_at': import_time, 'old_name': sfname} params = self._get_default_volume_params(volume) params['volumeID'] = sf_ref['volumeID'] params['attributes'] = attributes params.pop('totalSize') self._issue_api_request('ModifyVolume', params, version='5.0') try: rep_updates = {} rep_settings = self._retrieve_replication_settings(volume) if self.replication_enabled and rep_settings: if len(sf_ref['volumePairs']) != 0: msg = _("Not possible to manage a volume with " "replicated pair! Please split the volume pairs.") LOG.error(msg) raise SolidFireDriverException(msg) else: params = self._get_default_volume_params(volume) params['volumeID'] = sf_ref['volumeID'] volume['volumeID'] = sf_ref['volumeID'] params['totalSize'] = sf_ref['totalSize'] rep_updates = self._replicate_volume( volume, params, sfaccount, rep_settings) except Exception: with excutils.save_and_reraise_exception(): # When the replication fails in mid process, we need to # set the volume properties the way it was before. LOG.error("Error trying to replicate volume %s", volume.id) params = {'volumeID': sf_ref['volumeID']} params['attributes'] = sf_ref['attributes'] self._issue_api_request('ModifyVolume', params, version='5.0') model_update = self._get_model_info(sfaccount, sf_ref['volumeID']) model_update.update(rep_updates) return model_update def manage_existing_get_size(self, volume, external_ref): """Return size of an existing LV for manage_existing. existing_ref is a dictionary of the form: {'name': } """ sfid = external_ref.get('source-id', None) if sfid is None: raise SolidFireAPIException(_("Manage existing get size " "requires 'id'.")) params = {'startVolumeID': int(sfid), 'limit': 1} vols = self._issue_api_request( 'ListActiveVolumes', params)['result']['volumes'] if len(vols) != 1: msg = _("Provided volume id does not exist on SolidFire backend.") raise SolidFireDriverException(msg) return int(math.ceil(float(vols[0]['totalSize']) / units.Gi)) def unmanage(self, volume): """Mark SolidFire Volume as unmanaged (export from Cinder).""" sfaccount = self._get_sfaccount(volume['project_id']) if sfaccount is None: LOG.error("Account for Volume ID %s was not found on " "the SolidFire Cluster while attempting " "unmanage operation!", volume['id']) raise SolidFireAPIException(_("Failed to find account " "for volume.")) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume['id'], params) if sf_vol is None: raise exception.VolumeNotFound(volume_id=volume['id']) export_time = timeutils.utcnow().isoformat() attributes = sf_vol['attributes'] attributes['os_exported_at'] = export_time params = {'volumeID': int(sf_vol['volumeID']), 'attributes': attributes} self._issue_api_request('ModifyVolume', params, version='5.0') def _failover_volume(self, tgt_vol, tgt_cluster, src_vol=None): """Modify remote volume to R/W mode.""" if src_vol: # Put the src in tgt mode assuming it's still available # catch the exception if the cluster isn't available and # continue on params = {'volumeID': src_vol['volumeID'], 'access': 'replicationTarget'} try: self._issue_api_request('ModifyVolume', params) except SolidFireAPIException: # FIXME pass # Now call out to the remote and make the tgt our new src params = {'volumeID': tgt_vol['volumeID'], 'access': 'readWrite'} self._issue_api_request('ModifyVolume', params, endpoint=tgt_cluster['endpoint']) def failover(self, context, volumes, secondary_id=None, groups=None): """Failover to replication target. In order to do failback, you MUST specify the original/default cluster using secondary_id option. You can do this simply by specifying: `secondary_id=default` """ remote = None failback = False volume_updates = [] if not self.replication_enabled: LOG.error("SolidFire driver received failover_host " "request, however replication is NOT " "enabled.") raise exception.UnableToFailOver(reason=_("Failover requested " "on non replicated " "backend.")) # NOTE(erlon): For now we only support one replication target device. # So, there are two cases we have to deal with here: # 1. Caller specified a backend target to fail-over to (this must be # the backend_id as defined in replication_device. Any other values # will raise an error. If the user does not specify anything, we # also fall in this case. # 2. Caller wants to failback and therefore sets backend_id=default. secondary_id = secondary_id.lower() if secondary_id else None if secondary_id == "default" and not self.failed_over: msg = _("SolidFire driver received failover_host " "specifying failback to default, the " "host however is not in `failed_over` " "state.") raise exception.InvalidReplicationTarget(msg) elif secondary_id == "default" and self.failed_over: LOG.info("Failing back to primary cluster.") remote = self._create_cluster_reference() failback = True else: repl_configs = self.configuration.replication_device[0] if secondary_id and repl_configs['backend_id'] != secondary_id: msg = _("Replication id (%s) does not match the configured " "one in cinder.conf.") % secondary_id raise exception.InvalidReplicationTarget(msg) LOG.info("Failing over to secondary cluster %s.", secondary_id) remote = self.cluster_pairs[0] LOG.debug("Target cluster to failover: %s.", {'name': remote['name'], 'mvip': remote['mvip'], 'clusterAPIVersion': remote['clusterAPIVersion']}) target_vols = self._map_sf_volumes(volumes, endpoint=remote['endpoint']) LOG.debug("Total Cinder volumes found in target: %d", len(target_vols)) primary_vols = None try: primary_vols = self._map_sf_volumes(volumes) LOG.debug("Total Cinder volumes found in primary cluster: %d", len(primary_vols)) except SolidFireAPIException: # API Request failed on source. Failover/failback will skip next # calls to it. pass for v in volumes: if v['status'] == "error": LOG.debug("Skipping operation for Volume %s as it is " "on error state.", v['id']) continue target_vlist = [sfv for sfv in target_vols if sfv['cinder_id'] == v['id']] if len(target_vlist) > 0: target_vol = target_vlist[0] if primary_vols: vols = [sfv for sfv in primary_vols if sfv['cinder_id'] == v['id']] if not vols: LOG.error("SolidFire driver cannot proceed. " "Could not find volume %s in " "back-end storage.", v['id']) raise exception.UnableToFailOver( reason=_("Cannot find cinder volume in " "back-end storage.")) # Have at least one cinder volume in storage primary_vol = vols[0] else: primary_vol = None LOG.info('Failing-over volume %s.', v.id) LOG.debug('Target vol: %s', {'access': target_vol['access'], 'accountID': target_vol['accountID'], 'name': target_vol['name'], 'status': target_vol['status'], 'volumeID': target_vol['volumeID']}) LOG.debug('Primary vol: %s', {'access': primary_vol['access'], 'accountID': primary_vol['accountID'], 'name': primary_vol['name'], 'status': primary_vol['status'], 'volumeID': primary_vol['volumeID']}) try: self._failover_volume(target_vol, remote, primary_vol) sf_account = self._get_create_account( v.project_id, endpoint=remote['endpoint']) LOG.debug("Target account: %s", sf_account['accountID']) conn_info = self._build_connection_info( sf_account, target_vol, endpoint=remote['endpoint']) # volume status defaults to failed-over replication_status = 'failed-over' # in case of a failback, volume status must be reset to its # original state if failback: replication_status = 'enabled' vol_updates = { 'volume_id': v['id'], 'updates': { 'replication_status': replication_status } } vol_updates['updates'].update(conn_info) volume_updates.append(vol_updates) except Exception: volume_updates.append({'volume_id': v['id'], 'updates': {'status': 'error', }}) LOG.exception("Error trying to failover volume %s.", v['id']) else: volume_updates.append({'volume_id': v['id'], 'updates': {'status': 'error', }}) return '' if failback else remote['backend_id'], volume_updates, [] def failover_completed(self, context, active_backend_id=None): """Update volume node when `failover` is completed. Expects the following scenarios: 1) active_backend_id='' when failing back 2) active_backend_id= when failing over 3) When `failover` raises an Exception, this will be called with the previous active_backend_id (Will be empty string in case backend wasn't in failed-over state). """ if not active_backend_id: LOG.info("Failback completed. " "Switching active cluster back to default.") self.active_cluster = self._create_cluster_reference() self.failed_over = False # Recreating cluster pairs after a successful failback if self.configuration.replication_device: self._set_cluster_pairs() self.replication_enabled = True else: LOG.info("Failover completed. " "Switching active cluster to %s.", active_backend_id) self.active_cluster = self.cluster_pairs[0] self.failed_over = True def failover_host(self, context, volumes, secondary_id=None, groups=None): """Failover to replication target in non-clustered deployment.""" active_cluster_id, volume_updates, group_updates = ( self.failover(context, volumes, secondary_id, groups)) self.failover_completed(context, active_cluster_id) return active_cluster_id, volume_updates, group_updates def freeze_backend(self, context): """Freeze backend notification.""" pass def thaw_backend(self, context): """Thaw backend notification.""" pass def revert_to_snapshot(self, context, volume, snapshot): """Revert a volume to a given snapshot.""" sfaccount = self._get_sfaccount(volume.project_id) params = {'accountID': sfaccount['accountID']} sf_vol = self._get_sf_volume(volume.id, params) if sf_vol is None: LOG.error("Volume ID %s was not found on " "the SolidFire Cluster while attempting " "revert_to_snapshot operation!", volume.id) raise exception.VolumeNotFound(volume_id=volume['id']) params['volumeID'] = sf_vol['volumeID'] sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix, snapshot.id) sf_snaps = self._get_sf_snapshots(sf_vol['volumeID']) snap = next((s for s in sf_snaps if s["name"] == sf_snap_name), None) if not snap: LOG.error("Snapshot ID %s was not found on " "the SolidFire Cluster while attempting " "revert_to_snapshot operation!", snapshot.id) raise exception.VolumeSnapshotNotFound(volume_id=volume.id) params['snapshotID'] = snap['snapshotID'] params['saveCurrentState'] = 'false' self._issue_api_request('RollbackToSnapshot', params, version='6.0') class SolidFireISCSI(iscsi_driver.SanISCSITarget): def __init__(self, *args, **kwargs): super(SolidFireISCSI, self).__init__(*args, **kwargs) self.sf_driver = kwargs.get('solidfire_driver') def _do_iscsi_export(self, volume): sfaccount = self.sf_driver._get_sfaccount(volume['project_id']) model_update = {} model_update['provider_auth'] = ('CHAP %s %s' % (sfaccount['username'], sfaccount['targetSecret'])) return model_update def create_export(self, context, volume, volume_path): return self._do_iscsi_export(volume) def ensure_export(self, context, volume, volume_path): try: return self._do_iscsi_export(volume) except SolidFireAPIException: return None # Following are abc's that we make sure are caught and # paid attention to. In our case we don't use them # so just stub them out here. def remove_export(self, context, volume): pass def terminate_connection(self, volume, connector, **kwargs): pass def _sf_initialize_connection(self, volume, connector): """Initialize the connection and return connection info. Optionally checks and utilizes volume access groups. """ if self.configuration.sf_enable_vag: iqn = connector['initiator'] provider_id = volume['provider_id'] vol_id = int(provider_id.split()[0]) # safe_create_vag may opt to reuse vs create a vag, so we need to # add our vol_id. vag_id = self.sf_driver._safe_create_vag(iqn, vol_id) self.sf_driver._add_volume_to_vag(vol_id, iqn, vag_id) # Continue along with default behavior return super(SolidFireISCSI, self).initialize_connection(volume, connector) def _sf_terminate_connection(self, volume, properties, force): """Terminate the volume connection. Optionally remove volume from volume access group. If the VAG is empty then the VAG is also removed. """ if self.configuration.sf_enable_vag: provider_id = volume['provider_id'] vol_id = int(provider_id.split()[0]) if properties: iqn = properties['initiator'] vag = self.sf_driver._get_vags_by_name(iqn) if vag and not volume['multiattach']: # Multiattach causes problems with removing volumes from # VAGs. # Compromise solution for now is to remove multiattach # volumes from VAGs during volume deletion. vag = vag[0] vag_id = vag['volumeAccessGroupID'] if [vol_id] == vag['volumes']: self.sf_driver._remove_vag(vag_id) elif vol_id in vag['volumes']: self.sf_driver._remove_volume_from_vag(vol_id, vag_id) else: self.sf_driver._remove_volume_from_vags(vol_id) return super(SolidFireISCSI, self).terminate_connection(volume, properties, force=force) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/spdk.py0000664000175000017500000003644600000000000020650 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from os_brick import initiator from os_brick.initiator import connector from oslo_log import log as logging from oslo_utils import importutils from oslo_utils import units import requests from cinder.common import constants from cinder import context from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import utils from cinder.volume import driver from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @interface.volumedriver class SPDKDriver(driver.VolumeDriver): """Executes commands relating to Volumes.""" VERSION = '1.0.0' # ThirdPartySystems wiki page CI_WIKI_NAME = "Mellanox_CI" def __init__(self, *args, **kwargs): # Parent sets db, host, _execute and base config super(SPDKDriver, self).__init__(*args, **kwargs) self.lvs = [] self.ctxt = context.get_admin_context() target_driver = ( self.target_mapping[self.configuration.safe_get('target_helper')]) LOG.debug('SPDK attempting to initialize LVM driver with the ' 'following target_driver: %s', target_driver) self.target_driver = importutils.import_object( target_driver, configuration=self.configuration, executor=self._execute) @staticmethod def get_driver_options(): return [] def _rpc_call(self, method, params=None): payload = {} payload['jsonrpc'] = '2.0' payload['id'] = 1 payload['method'] = method if params is not None: payload['params'] = params req = requests.post(self.url, data=json.dumps(payload), auth=(self.configuration.spdk_rpc_username, self.configuration.spdk_rpc_password), verify=self.configuration.driver_ssl_cert_verify, timeout=30) if not req.ok: raise exception.VolumeBackendAPIException( data=_('SPDK target responded with error: %s') % req.text) return req.json()['result'] def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug('SPDK Updating volume stats') status = {'volume_backend_name': 'SPDK', 'vendor_name': 'Open Source', 'driver_version': self.VERSION, 'storage_protocol': constants.NVMEOF} pools_status = [] self.lvs = [] output = self._rpc_call('bdev_lvol_get_lvstores') if output: for lvs in output: pool = {} lvs_entry = {} free_size = (lvs['free_clusters'] * lvs['cluster_size'] / units.Gi) total_size = (lvs['total_data_clusters'] * lvs['cluster_size'] / units.Gi) pool["volume_backend_name"] = 'SPDK' pool["vendor_name"] = 'Open Source' pool["driver_version"] = self.VERSION pool["storage_protocol"] = constants.NVMEOF pool["total_capacity_gb"] = total_size pool["free_capacity_gb"] = free_size pool["pool_name"] = lvs['name'] pools_status.append(pool) lvs_entry['name'] = lvs['name'] lvs_entry['uuid'] = lvs['uuid'] lvs_entry['free_size'] = free_size lvs_entry['total_size'] = total_size self.lvs.append(lvs_entry) status['pools'] = pools_status self._stats = status for lvs in self.lvs: LOG.debug('SPDK lvs name: %s, total space: %s, free space: %s', lvs['name'], lvs['total_size'], lvs['free_size']) def _get_spdk_volume_name(self, name): output = self._rpc_call('bdev_get_bdevs') for bdev in output: for alias in bdev['aliases']: if name in alias: return bdev['name'] def _get_spdk_lvs_uuid(self, spdk_name): output = self._rpc_call('bdev_get_bdevs') for bdev in output: if spdk_name in bdev['name']: return bdev['driver_specific']['lvol']['lvol_store_uuid'] def _get_spdk_lvs_free_space(self, lvs_uuid): self._update_volume_stats() for lvs in self.lvs: if lvs_uuid in lvs['uuid']: return lvs['free_size'] return 0 def _delete_bdev(self, name): spdk_name = self._get_spdk_volume_name(name) if spdk_name is not None: params = {'name': spdk_name} self._rpc_call('bdev_lvol_delete', params) LOG.debug('SPDK bdev %s deleted', spdk_name) else: LOG.debug('Could not find volume %s using SPDK driver', name) def _create_volume(self, volume, snapshot=None): output = self._rpc_call('bdev_lvol_get_lvstores') for lvs in output: free_size = (lvs['free_clusters'] * lvs['cluster_size']) if free_size / units.Gi >= volume.size: if snapshot is None: params = { 'lvol_name': volume.name, 'size': volume.size * units.Gi, 'uuid': lvs['uuid']} output2 = self._rpc_call('bdev_lvol_create', params) else: snapshot_spdk_name = ( self._get_spdk_volume_name(snapshot.name)) params = { 'clone_name': volume.name, 'snapshot_name': snapshot_spdk_name} output2 = self._rpc_call('bdev_lvol_clone', params) spdk_name = self._get_spdk_volume_name(volume.name) params = {'name': spdk_name} self._rpc_call('bdev_lvol_inflate', params) if volume.size > snapshot.volume_size: params = {'name': spdk_name, 'size': volume.size * units.Gi} self._rpc_call('bdev_lvol_resize', params) LOG.debug('SPDK created lvol: %s', output2) return LOG.error('Unable to create volume using SPDK - no resources found') raise exception.VolumeBackendAPIException( data=_('Unable to create volume using SPDK' ' - no resources found')) def do_setup(self, context): try: payload = {'method': 'bdev_get_bdevs', 'jsonrpc': '2.0', 'id': 1} self.url = ('%(protocol)s://%(ip)s:%(port)s/' % {'protocol': self.configuration.spdk_rpc_protocol, 'ip': self.configuration.spdk_rpc_ip, 'port': self.configuration.spdk_rpc_port}) requests.post(self.url, data=json.dumps(payload), auth=(self.configuration.spdk_rpc_username, self.configuration.spdk_rpc_password), verify=self.configuration.driver_ssl_cert_verify, timeout=30) except Exception as err: err_msg = ( _('Could not connect to SPDK target: %(err)s') % {'err': err}) LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) def check_for_setup_error(self): """Verify that requirements are in place to use LVM driver.""" # If configuration is incorrect we will get exception here self._rpc_call('bdev_get_bdevs') def create_volume(self, volume): """Creates a logical volume.""" LOG.debug('SPDK create volume') return self._create_volume(volume) def delete_volume(self, volume): """Deletes a logical volume.""" LOG.debug('SPDK deleting volume %s', volume.name) self._delete_bdev(volume.name) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" free_size = self._get_spdk_lvs_free_space( self._get_spdk_lvs_uuid( self._get_spdk_volume_name(snapshot.name))) if free_size < volume.size: raise exception.VolumeBackendAPIException( data=_('Not enough space to create snapshot with SPDK')) return self._create_volume(volume, snapshot) def create_snapshot(self, snapshot): """Creates a snapshot.""" volume = snapshot['volume'] spdk_name = self._get_spdk_volume_name(volume.name) if spdk_name is None: raise exception.VolumeBackendAPIException( data=_('Could not create snapshot with SPDK driver')) free_size = self._get_spdk_lvs_free_space( self._get_spdk_lvs_uuid(spdk_name)) if free_size < volume.size: raise exception.VolumeBackendAPIException( data=_('Not enough space to create snapshot with SPDK')) params = { 'lvol_name': spdk_name, 'snapshot_name': snapshot['name']} self._rpc_call('bdev_lvol_snapshot', params) params = {'name': spdk_name} self._rpc_call('bdev_lvol_inflate', params) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" spdk_name = self._get_spdk_volume_name(snapshot.name) if spdk_name is None: return params = {'name': spdk_name} bdev = self._rpc_call('bdev_get_bdevs', params) if 'clones' in bdev[0]['driver_specific']['lvol']: for clone in bdev[0]['driver_specific']['lvol']['clones']: spdk_name = self._get_spdk_volume_name(clone) params = {'name': spdk_name} self._rpc_call('bdev_lvol_inflate', params) self._delete_bdev(snapshot.name) def create_cloned_volume(self, volume, src_volume): spdk_name = self._get_spdk_volume_name(src_volume.name) free_size = self._get_spdk_lvs_free_space( self._get_spdk_lvs_uuid(spdk_name)) # We need additional space for snapshot that will be used here if free_size < 2 * src_volume.size + volume.size: raise exception.VolumeBackendAPIException( data=_('Not enough space to clone volume with SPDK')) snapshot_name = 'snp-' + src_volume.name params = { 'lvol_name': spdk_name, 'snapshot_name': snapshot_name} self._rpc_call('bdev_lvol_snapshot', params) params = {'name': spdk_name} self._rpc_call('bdev_lvol_inflate', params) snapshot_spdk_name = self._get_spdk_volume_name(snapshot_name) params = { 'clone_name': volume.name, 'snapshot_name': snapshot_spdk_name} self._rpc_call('bdev_lvol_clone', params) spdk_name = self._get_spdk_volume_name(volume.name) params = {'name': spdk_name} self._rpc_call('bdev_lvol_inflate', params) self._delete_bdev(snapshot_name) if volume.size > src_volume.size: self.extend_volume(volume, volume.size) def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume.""" volume['provider_location'] = ( self.create_export(context, volume, None)['provider_location']) connection_data = self.initialize_connection(volume, None)['data'] target_connector = ( connector.InitiatorConnector.factory(initiator.NVME, utils.get_root_helper())) try: device_info = target_connector.connect_volume(connection_data) except Exception: LOG.info('Could not connect SPDK target device') return connection_data['device_path'] = device_info['path'] try: image_utils.fetch_to_raw(context, image_service, image_id, device_info['path'], self.configuration.volume_dd_blocksize, size=volume['size'], disable_sparse=disable_sparse) finally: target_connector.disconnect_volume(connection_data, volume) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" volume['provider_location'] = ( self.create_export(context, volume, None)['provider_location']) connection_data = self.initialize_connection(volume, None)['data'] target_connector = ( connector.InitiatorConnector.factory(initiator.NVME, utils.get_root_helper())) try: device_info = target_connector.connect_volume(connection_data) except Exception: LOG.info('Could not connect SPDK target device') return connection_data['device_path'] = device_info['path'] try: volume_utils.upload_volume(context, image_service, image_meta, device_info['path'], volume) finally: target_connector.disconnect_volume(connection_data, volume) def extend_volume(self, volume, new_size): """Extend an existing volume's size.""" spdk_name = self._get_spdk_volume_name(volume.name) params = {'name': spdk_name, 'size': new_size * units.Gi} self._rpc_call('bdev_lvol_resize', params) # ####### Interface methods for DataPath (Target Driver) ######## def ensure_export(self, context, volume): pass def create_export(self, context, volume, connector, vg=None): export_info = self.target_driver.create_export( context, volume, None) return {'provider_location': export_info['location'], 'provider_auth': export_info['auth'], } def remove_export(self, context, volume): self.target_driver.remove_export(context, volume) def initialize_connection(self, volume, connector, **kwargs): return self.target_driver.initialize_connection(volume, connector) def validate_connector(self, connector): return self.target_driver.validate_connector(connector) def terminate_connection(self, volume, connector, **kwargs): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/storpool.py0000664000175000017500000004646700000000000021574 0ustar00zuulzuul00000000000000# Copyright (c) 2014 - 2019 StorPool # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """StorPool block device driver""" import platform from os_brick.initiator import storpool_utils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from cinder.common import constants from cinder import context from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import configuration from cinder.volume import driver from cinder.volume import volume_types LOG = logging.getLogger(__name__) storpool_opts = [ cfg.StrOpt('storpool_template', default=None, help='The StorPool template for volumes with no type.'), cfg.IntOpt('storpool_replication', default=3, help='The default StorPool chain replication value. ' 'Used when creating a volume with no specified type if ' 'storpool_template is not set. Also used for calculating ' 'the apparent free space reported in the stats.'), ] CONF = cfg.CONF CONF.register_opts(storpool_opts, group=configuration.SHARED_CONF_GROUP) class StorPoolConfigurationInvalid(exception.CinderException): message = _("Invalid parameter %(param)s in the %(section)s section " "of the /etc/storpool.conf file: %(error)s") @interface.volumedriver class StorPoolDriver(driver.VolumeDriver): """The StorPool block device driver. Version history: .. code-block:: none 0.1.0 - Initial driver 0.2.0 - Bring the driver up to date with Kilo and Liberty: - implement volume retyping and migrations - use the driver.*VD ABC metaclasses - bugfix: fall back to the configured StorPool template 1.0.0 - Imported into OpenStack Liberty with minor fixes 1.1.0 - Bring the driver up to date with Liberty and Mitaka: - drop the CloneableVD and RetypeVD base classes - enable faster volume copying by specifying sparse_volume_copy=true in the stats report 1.1.1 - Fix the internal _storpool_client_id() method to not break on an unknown host name or UUID; thus, remove the StorPoolConfigurationMissing exception. 1.1.2 - Bring the driver up to date with Pike: do not translate the error messages 1.2.0 - Inherit from VolumeDriver, implement get_pool() 1.2.1 - Implement interface.volumedriver, add CI_WIKI_NAME, fix the docstring formatting 1.2.2 - Reintroduce the driver into OpenStack Queens, add ignore_errors to the internal _detach_volume() method 1.2.3 - Advertise some more driver capabilities. 2.0.0 - Implement revert_to_snapshot(). 2.1.0 - Use the new API client in os-brick to communicate with the StorPool API instead of packages `storpool` and `storpool.spopenstack` """ VERSION = '2.1.0' CI_WIKI_NAME = 'StorPool_distributed_storage_CI' def __init__(self, *args, **kwargs): super(StorPoolDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(storpool_opts) self._sp_config = None self._ourId = None self._ourIdInt = None self._sp_api = None self._volume_prefix = None @staticmethod def get_driver_options(): return storpool_opts def _backendException(self, e): return exception.VolumeBackendAPIException(data=str(e)) def _template_from_volume(self, volume): default = self.configuration.storpool_template vtype = volume['volume_type'] if vtype is not None: specs = volume_types.get_volume_type_extra_specs(vtype['id']) if specs is not None: return specs.get('storpool_template', default) return default def get_pool(self, volume): template = self._template_from_volume(volume) if template is None: return 'default' else: return 'template_' + template def create_volume(self, volume): size = int(volume['size']) * units.Gi name = storpool_utils.os_to_sp_volume_name( self._volume_prefix, volume['id']) template = self._template_from_volume(volume) create_request = {'name': name, 'size': size} if template is not None: create_request['template'] = template else: create_request['replication'] = \ self.configuration.storpool_replication try: self._sp_api.volume_create(create_request) except storpool_utils.StorPoolAPIError as e: raise self._backendException(e) def _storpool_client_id(self, connector): hostname = connector['host'] if hostname == self.host or hostname == CONF.host: hostname = platform.node() try: cfg = storpool_utils.get_conf(section=hostname) return int(cfg['SP_OURID']) except KeyError: return 65 except Exception as e: raise StorPoolConfigurationInvalid( section=hostname, param='SP_OURID', error=e) def validate_connector(self, connector): return self._storpool_client_id(connector) >= 0 def initialize_connection(self, volume, connector): return {'driver_volume_type': 'storpool', 'data': { 'client_id': self._storpool_client_id(connector), 'volume': volume['id'], 'access_mode': 'rw', }} def terminate_connection(self, volume, connector, **kwargs): pass def create_snapshot(self, snapshot): volname = storpool_utils.os_to_sp_volume_name( self._volume_prefix, snapshot['volume_id']) name = storpool_utils.os_to_sp_snapshot_name( self._volume_prefix, 'snap', snapshot['id']) try: self._sp_api.snapshot_create(volname, {'name': name}) except storpool_utils.StorPoolAPIError as e: raise self._backendException(e) def create_volume_from_snapshot(self, volume, snapshot): size = int(volume['size']) * units.Gi volname = storpool_utils.os_to_sp_volume_name( self._volume_prefix, volume['id']) name = storpool_utils.os_to_sp_snapshot_name( self._volume_prefix, 'snap', snapshot['id']) try: self._sp_api.volume_create({ 'name': volname, 'size': size, 'parent': name }) except storpool_utils.StorPoolAPIError as e: raise self._backendException(e) def create_cloned_volume(self, volume, src_vref): refname = storpool_utils.os_to_sp_volume_name( self._volume_prefix, src_vref['id']) size = int(volume['size']) * units.Gi volname = storpool_utils.os_to_sp_volume_name( self._volume_prefix, volume['id']) src_volume = self.db.volume_get( context.get_admin_context(), src_vref['id'], ) src_template = self._template_from_volume(src_volume) template = self._template_from_volume(volume) LOG.debug('clone volume id %(vol_id)r template %(template)r', { 'vol_id': volume['id'], 'template': template, }) if template == src_template: LOG.info('Using baseOn to clone a volume into the same template') try: self._sp_api.volume_create({ 'name': volname, 'size': size, 'baseOn': refname, }) except storpool_utils.StorPoolAPIError as e: raise self._backendException(e) return None snapname = storpool_utils.os_to_sp_snapshot_name( self._volume_prefix, 'clone', volume['id']) LOG.info( 'A transient snapshot for a %(src)s -> %(dst)s template change', {'src': src_template, 'dst': template}) try: self._sp_api.snapshot_create(refname, {'name': snapname}) except storpool_utils.StorPoolAPIError as e: if e.name != 'objectExists': raise self._backendException(e) try: try: self._sp_api.snapshot_update( snapname, {'template': template}, ) except storpool_utils.StorPoolAPIError as e: raise self._backendException(e) try: self._sp_api.volume_create({ 'name': volname, 'size': size, 'parent': snapname }) except storpool_utils.StorPoolAPIError as e: raise self._backendException(e) try: self._sp_api.snapshot_update( snapname, {'tags': {'transient': '1.0'}}, ) except storpool_utils.StorPoolAPIError as e: raise self._backendException(e) except Exception: with excutils.save_and_reraise_exception(): try: LOG.warning( 'Something went wrong, removing the transient snapshot' ) self._sp_api.snapshot_delete(snapname) except storpool_utils.StorPoolAPIError as e: LOG.error( 'Could not delete the %(name)s snapshot: %(err)s', {'name': snapname, 'err': str(e)} ) def create_export(self, context, volume, connector): pass def remove_export(self, context, volume): pass def delete_volume(self, volume): name = storpool_utils.os_to_sp_volume_name( self._volume_prefix, volume['id']) try: self._sp_api.volumes_reassign([{"volume": name, "detach": "all"}]) self._sp_api.volume_delete(name) except storpool_utils.StorPoolAPIError as e: if e.name == 'objectDoesNotExist': pass else: raise self._backendException(e) def delete_snapshot(self, snapshot): name = storpool_utils.os_to_sp_snapshot_name( self._volume_prefix, 'snap', snapshot['id']) try: self._sp_api.volumes_reassign( [{"snapshot": name, "detach": "all"}]) self._sp_api.snapshot_delete(name) except storpool_utils.StorPoolAPIError as e: if e.name == 'objectDoesNotExist': pass else: raise self._backendException(e) def check_for_setup_error(self): try: self._sp_config = storpool_utils.get_conf() self._sp_api = storpool_utils.StorPoolAPI( self._sp_config["SP_API_HTTP_HOST"], self._sp_config["SP_API_HTTP_PORT"], self._sp_config["SP_AUTH_TOKEN"]) self._volume_prefix = self._sp_config.get( "SP_OPENSTACK_VOLUME_PREFIX", "os") except Exception as e: LOG.error("StorPoolDriver API initialization failed: %s", e) raise def _update_volume_stats(self): try: dl = self._sp_api.disks_list() templates = self._sp_api.volume_templates_list() except storpool_utils.StorPoolAPIError as e: raise self._backendException(e) total = 0 used = 0 free = 0 agSize = 512 * units.Mi for (id, desc) in dl.items(): if desc['generationLeft'] != -1: continue total += desc['agCount'] * agSize used += desc['agAllocated'] * agSize free += desc['agFree'] * agSize * 4096 / (4096 + 128) # Report the free space as if all new volumes will be created # with StorPool replication 3; anything else is rare. free /= self.configuration.storpool_replication space = { 'total_capacity_gb': total / units.Gi, 'free_capacity_gb': free / units.Gi, 'reserved_percentage': 0, 'multiattach': True, 'QoS_support': False, 'thick_provisioning_support': False, 'thin_provisioning_support': True, } pools = [dict(space, pool_name='default')] pools += [dict(space, pool_name='template_' + t['name'], storpool_template=t['name'] ) for t in templates] self._stats = { # Basic driver properties 'volume_backend_name': self.configuration.safe_get( 'volume_backend_name') or 'storpool', 'vendor_name': 'StorPool', 'driver_version': self.VERSION, 'storage_protocol': constants.STORPOOL, # Driver capabilities 'clone_across_pools': True, 'sparse_copy_volume': True, # The actual pools data 'pools': pools } def extend_volume(self, volume, new_size): size = int(new_size) * units.Gi name = storpool_utils.os_to_sp_volume_name( self._volume_prefix, volume['id']) try: self._sp_api.volume_update(name, {'size': size}) except storpool_utils.StorPoolAPIError as e: raise self._backendException(e) def ensure_export(self, context, volume): # Already handled by Nova's AttachDB, we hope. # Maybe it should move here, but oh well. pass def retype(self, context, volume, new_type, diff, host): update = {} if diff['encryption']: LOG.error('Retype of encryption type not supported.') return False templ = self.configuration.storpool_template repl = self.configuration.storpool_replication if diff['extra_specs']: # Check for the StorPool extra specs. We intentionally ignore any # other extra_specs because the cinder scheduler should not even # call us if there's a serious mismatch between the volume types. if diff['extra_specs'].get('volume_backend_name'): v = diff['extra_specs'].get('volume_backend_name') if v[0] != v[1]: # Retype of a volume backend not supported yet, # the volume needs to be migrated. return False if diff['extra_specs'].get('storpool_template'): v = diff['extra_specs'].get('storpool_template') if v[0] != v[1]: if v[1] is not None: update['template'] = v[1] elif templ is not None: update['template'] = templ else: update['replication'] = repl if update: name = storpool_utils.os_to_sp_volume_name( self._volume_prefix, volume['id']) try: self._sp_api.volume_update(name, **update) except storpool_utils.StorPoolAPIError as e: raise self._backendException(e) return True def update_migrated_volume(self, context, volume, new_volume, original_volume_status): orig_id = volume['id'] orig_name = storpool_utils.os_to_sp_volume_name( self._volume_prefix, orig_id) temp_id = new_volume['id'] temp_name = storpool_utils.os_to_sp_volume_name( self._volume_prefix, temp_id) vols = {v['name']: True for v in self._sp_api.volumes_list()} if temp_name not in vols: LOG.error('StorPool update_migrated_volume(): it seems ' 'that the StorPool volume "%(tid)s" was not ' 'created as part of the migration from ' '"%(oid)s".', {'tid': temp_id, 'oid': orig_id}) return {'_name_id': new_volume['_name_id'] or new_volume['id']} if orig_name in vols: LOG.debug('StorPool update_migrated_volume(): both ' 'the original volume "%(oid)s" and the migrated ' 'StorPool volume "%(tid)s" seem to exist on ' 'the StorPool cluster.', {'oid': orig_id, 'tid': temp_id}) int_name = temp_name + '--temp--mig' LOG.debug('Trying to swap volume names, intermediate "%(int)s"', {'int': int_name}) try: LOG.debug('- rename "%(orig)s" to "%(int)s"', {'orig': orig_name, 'int': int_name}) self._sp_api.volume_update(orig_name, {'rename': int_name}) LOG.debug('- rename "%(temp)s" to "%(orig)s"', {'temp': temp_name, 'orig': orig_name}) self._sp_api.volume_update(temp_name, {'rename': orig_name}) LOG.debug('- rename "%(int)s" to "%(temp)s"', {'int': int_name, 'temp': temp_name}) self._sp_api.volume_update(int_name, {'rename': temp_name}) return {'_name_id': None} except storpool_utils.StorPoolAPIError as e: LOG.error('StorPool update_migrated_volume(): ' 'could not rename a volume: ' '%(err)s', {'err': e}) return {'_name_id': new_volume['_name_id'] or new_volume['id']} try: self._sp_api.volume_update(temp_name, {'rename': orig_name}) return {'_name_id': None} except storpool_utils.StorPoolAPIError as e: LOG.error('StorPool update_migrated_volume(): ' 'could not rename %(tname)s to %(oname)s: ' '%(err)s', {'tname': temp_name, 'oname': orig_name, 'err': e}) return {'_name_id': new_volume['_name_id'] or new_volume['id']} def revert_to_snapshot(self, context, volume, snapshot): volname = storpool_utils.os_to_sp_volume_name( self._volume_prefix, volume['id']) snapname = storpool_utils.os_to_sp_snapshot_name( self._volume_prefix, 'snap', snapshot['id']) try: self._sp_api.volume_revert(volname, {'toSnapshot': snapname}) except storpool_utils.StorPoolAPIError as e: LOG.error('StorPool revert_to_snapshot(): could not revert ' 'the %(vol_id)s volume to the %(snap_id)s snapshot: ' '%(err)s', {'vol_id': volume['id'], 'snap_id': snapshot['id'], 'err': e}) raise self._backendException(e) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3991213 cinder-27.0.0/cinder/volume/drivers/stx/0000775000175000017500000000000000000000000020136 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/stx/__init__.py0000664000175000017500000000000000000000000022235 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/stx/client.py0000664000175000017500000007266500000000000022006 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 Dot Hill Systems Corp. # Copyright 2016-2019 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import hashlib import math import re import time from lxml import etree from oslo_log import log as logging from oslo_utils import strutils from oslo_utils import units import requests from cinder import coordination from cinder.i18n import _ import cinder.volume.drivers.stx.exception as stx_exception from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class STXClient(object, metaclass=volume_utils.TraceWrapperMetaclass): def __init__(self, host, login, password, protocol, ssl_verify): self._mgmt_ip_addrs = list(map(str.strip, host.split(','))) self._login = login self._password = password self._protocol = protocol self._session_key = None self.ssl_verify = ssl_verify self._set_host(self._mgmt_ip_addrs[0]) self._fw_type = '' self._fw_rev = 0 self._driver_name = self.__class__.__name__.split('.')[0] self._array_name = 'unknown' self._luns_in_use_by_host = {} def _set_host(self, ip_addr): self._curr_ip_addr = ip_addr self._base_url = "%s://%s/api" % (self._protocol, ip_addr) def _get_auth_token(self, xml): """Parse an XML authentication reply to extract the session key.""" self._session_key = None try: tree = etree.XML(xml) # The 'return-code' property is not valid in this context, so we # we check value of 'response-type-numeric' (0 => Success) rtn = tree.findtext(".//PROPERTY[@name='response-type-numeric']") session_key = tree.findtext(".//PROPERTY[@name='response']") if rtn == '0': self._session_key = session_key except Exception as e: msg = _("Cannot parse session key: %s") % e.msg raise stx_exception.ConnectionError(message=msg) def login(self): if self._session_key is None: return self.session_login() def session_login(self): """Authenticates the service on the device. Tries all the IP addrs listed in the san_ip parameter until a working one is found or the list is exhausted. """ try: self._get_session_key() self.get_firmware_version() if not self._array_name or self._array_name == 'unknown': self._array_name = self.get_serial_number() LOG.debug("Logged in to array %s at %s (session %s)", self._array_name, self._base_url, self._session_key) return except stx_exception.ConnectionError: not_responding = self._curr_ip_addr LOG.exception('session_login failed to connect to %s', self._curr_ip_addr) # Loop through the remaining management addresses # to find one that's up. for host in self._mgmt_ip_addrs: if host is not_responding: continue self._set_host(host) try: self._get_session_key() return except stx_exception.ConnectionError: LOG.error('Failed to connect to %s', self._curr_ip_addr) continue raise stx_exception.ConnectionError( message=_("Failed to log in to management controller")) @coordination.synchronized('{self._driver_name}-{self._array_name}') def _get_session_key(self): """Retrieve a session key from the array.""" # TODO(alee): This appears to use md5 in a security related # context in providing a session key and hashing a login and # password. This should likely be replaced by a version that # does not use md5 here. self._session_key = None digest = hashlib.md5( # nosec ("%s_%s" % (self._login, self._password)).encode('utf-8') ).hexdigest() url = self._base_url + "/login/" + digest try: if self._protocol == 'https': xml = requests.get(url, verify=self.ssl_verify, timeout=30, auth=(self._login, self._password)) else: xml = requests.get(url, verify=self.ssl_verify, timeout=30) except requests.exceptions.RequestException: msg = _("Failed to obtain MC session key") LOG.exception(msg) raise stx_exception.ConnectionError(message=msg) self._get_auth_token(xml.text.encode('utf8')) LOG.debug("session key = %s", self._session_key) if self._session_key is None: raise stx_exception.AuthenticationError def _assert_response_ok(self, tree): """Parses the XML returned by the device to check the return code. Raises a RequestError error if the return code is not 0 or if the return code is None. """ # Get the return code for the operation, raising an exception # if it is not present. return_code = tree.findtext(".//PROPERTY[@name='return-code']") if not return_code: raise stx_exception.RequestError(message="No status found") # If no error occurred, just return. if return_code == '0': return # Format a message for the status code. msg = "%s (%s)" % (tree.findtext(".//PROPERTY[@name='response']"), return_code) raise stx_exception.RequestError(message=msg) def _build_request_url(self, path, *args, **kargs): url = self._base_url + path if kargs: url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v) for (k, v) in kargs.items()]) if args: url += '/' + '/'.join(args) return url def _request(self, path, *args, **kargs): """Performs an API request on the array, with retry. Propagates a ConnectionError if no valid response is received from the array, e.g. if the network is down. Propagates a RequestError if the device returned a response but the status is not 0. The device error message will be used in the exception message. If the status is OK, returns the XML data for further processing. """ tries_left = 2 while tries_left > 0: try: return self._api_request(path, *args, **kargs) except stx_exception.ConnectionError as e: if tries_left < 1: LOG.error("Array Connection error: " "%s (no more retries)", e.msg) raise # Retry on any network connection errors, SSL errors, etc LOG.error("Array Connection error: %s (retrying)", e.msg) except stx_exception.RequestError as e: if tries_left < 1: LOG.error("Array Request error: %s (no more retries)", e.msg) raise # Retry specific errors which may succeed if we log in again # -10027 => The user is not recognized on this system. if '(-10027)' in e.msg: LOG.error("Array Request error: %s (retrying)", e.msg) else: raise tries_left -= 1 self.session_login() @coordination.synchronized('{self._driver_name}-{self._array_name}') def _api_request(self, path, *args, **kargs): """Performs an HTTP request on the device, with locking. Raises a RequestError if the device returned but the status is not 0. The device error message will be used in the exception message. If the status is OK, returns the XML data for further processing. """ url = self._build_request_url(path, *args, **kargs) # Don't log the created URL since it may contain chap secret LOG.debug("Array Request path: %s, args: %s, kargs: %s (session %s)", path, args, strutils.mask_password(kargs), self._session_key) headers = {'dataType': 'api', 'sessionKey': self._session_key} try: xml = requests.get(url, headers=headers, verify=self.ssl_verify, timeout=60) tree = etree.XML(xml.text.encode('utf8')) except Exception as e: message = _("Exception handling URL %(url)s: %(msg)s") % { 'url': url, 'msg': e} raise stx_exception.ConnectionError(message=message) if path == "/show/volumecopy-status": return tree self._assert_response_ok(tree) return tree def logout(self): pass def is_titanium(self): """True for older array firmware.""" return self._fw_type == 'T' def is_g5_fw(self): """Identify firmware updated in/after 2020. Long-deprecated commands have or will be removed. """ if self._fw_type in ['I', 'V']: return True if self._fw_type == 'G' and self._fw_rev >= 280: return True return False def create_volume(self, name, size, backend_name, backend_type): # NOTE: size is in this format: [0-9]+GiB path_dict = {'size': size} if backend_type == "linear": path_dict['vdisk'] = backend_name else: path_dict['pool'] = backend_name try: self._request("/create/volume", name, **path_dict) except stx_exception.RequestError as e: # -10186 => The specified name is already in use. # This can occur during controller failover. if '(-10186)' in e.msg: LOG.warning("Ignoring error in create volume: %s", e.msg) return None raise return None def delete_volume(self, name): try: self._request("/delete/volumes", name) except stx_exception.RequestError as e: # -10075 => The specified volume was not found. # This can occur during controller failover. if '(-10075)' in e.msg: LOG.warning("Ignorning error while deleting %(volume)s:" " %(reason)s", {'volume': name, 'reason': e.msg}) return raise def extend_volume(self, name, added_size): self._request("/expand/volume", name, size=added_size) def create_snapshot(self, volume_name, snap_name): try: self._request("/create/snapshots", snap_name, volumes=volume_name) except stx_exception.RequestError as e: # -10186 => The specified name is already in use. # This can occur during controller failover. if '(-10186)' in e.msg: LOG.warning("Ignoring error attempting to create snapshot:" " %s", e.msg) return None def delete_snapshot(self, snap_name, backend_type): try: if backend_type == 'linear': self._request("/delete/snapshot", "cleanup", snap_name) else: self._request("/delete/snapshot", snap_name) except stx_exception.RequestError as e: # -10050 => The volume was not found on this system. # This can occur during controller failover. if '(-10050)' in e.msg: LOG.warning("Ignoring unmap error -10050: %s", e.msg) return None raise def backend_exists(self, backend_name, backend_type): try: if backend_type == "linear": path = "/show/vdisks" else: path = "/show/pools" self._request(path, backend_name) return True except stx_exception.RequestError: return False def _get_size(self, size): return int(math.ceil(float(size) * 512 / (units.Gi))) def backend_stats(self, backend_name, backend_type): stats = {'free_capacity_gb': 0, 'total_capacity_gb': 0} prop_list = [] if backend_type == "linear": path = "/show/vdisks" prop_list = ["size-numeric", "freespace-numeric"] else: path = "/show/pools" prop_list = ["total-size-numeric", "total-avail-numeric"] tree = self._request(path, backend_name) size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0]) if size: stats['total_capacity_gb'] = self._get_size(size) size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1]) if size: stats['free_capacity_gb'] = self._get_size(size) return stats def list_luns_for_host(self, host): if self.is_titanium(): tree = self._request("/show/host-maps", host) else: tree = self._request("/show/maps/initiator", host) return [int(prop.text) for prop in tree.xpath( "//PROPERTY[@name='lun']")] def _get_first_available_lun_for_host(self, host): """Find next available LUN number. Returns a lun number greater than 0 which is not known to be in use between the array and the specified host. """ luns = self.list_luns_for_host(host) self._luns_in_use_by_host[host] = luns lun = 1 while True: if lun not in luns: return lun lun += 1 def _get_next_available_lun_for_host(self, host, after=0): # host can be a comma-separated list of WWPNs; we only use the first. firsthost = host.split(',')[0] LOG.debug('get_next_available_lun: host=%s, firsthost=%s, after=%d', host, firsthost, after) if after == 0: return self._get_first_available_lun_for_host(firsthost) luns = self._luns_in_use_by_host[firsthost] lun = after + 1 while lun < 1024: LOG.debug('get_next_available_lun: host=%s, trying lun %d', firsthost, lun) if lun not in luns: LOG.debug('get_next_available_lun: host=%s, RETURNING lun %d', firsthost, lun) return lun lun += 1 raise stx_exception.RequestError( message=_("No LUNs available for mapping to host %s.") % host) def _is_mapped(self, volume_name, ids): if not isinstance(ids, list): ids = [ids] try: cmd = "/show/volume-maps" if self.is_titanium() else "/show/maps" xml = self._request(cmd, volume_name) for obj in xml.xpath("//OBJECT[@basetype='volume-view-mappings']"): lun = obj.findtext("PROPERTY[@name='lun']") iid = obj.findtext("PROPERTY[@name='identifier']") if iid in ids: LOG.debug("volume '%s' is already mapped to %s at lun %s", volume_name, iid, lun) return int(lun) except Exception: LOG.exception("failed to look up mappings for volume '%s'", volume_name) raise return None @coordination.synchronized('{self._driver_name}-{self._array_name}-map') def map_volume(self, volume_name, connector, connector_element): # If multiattach enabled, its possible the volume is already mapped lun = self._is_mapped(volume_name, connector[connector_element]) if lun: return lun if connector_element == 'wwpns': lun = self._get_first_available_lun_for_host(connector['wwpns'][0]) host = ",".join(connector['wwpns']) else: host = connector['initiator'] host_status = self._check_host(host) if host_status != 0: hostname = self._safe_hostname(connector['host']) try: if self.is_g5_fw(): self._request("/set/initiator", nickname=hostname, id=host) else: self._request("/create/host", hostname, id=host) except stx_exception.RequestError as e: # -10058: The host identifier or nickname is already in use if '(-10058)' in e.msg: LOG.error("While trying to create host nickname" " %(nickname)s: %(error_msg)s", {'nickname': hostname, 'error_msg': e.msg}) else: raise lun = self._get_first_available_lun_for_host(host) while lun < 255: try: if self.is_g5_fw(): self._request("/map/volume", volume_name, lun=str(lun), initiator=host, access="rw") else: self._request("/map/volume", volume_name, lun=str(lun), host=host, access="rw") return lun except stx_exception.RequestError as e: # -3177 => "The specified LUN overlaps a previously defined LUN if '(-3177)' in e.msg: LOG.info("Unable to map volume" " %(volume_name)s to lun %(lun)d:" " %(reason)s", {'volume_name': volume_name, 'lun': lun, 'reason': e.msg}) lun = self._get_next_available_lun_for_host(host, after=lun) continue raise except Exception as e: LOG.error("Error while mapping volume" " %(volume_name)s to lun %(lun)d:", {'volume_name': volume_name, 'lun': lun}, e) raise raise stx_exception.RequestError( message=_("Failed to find a free LUN for host %s") % host) def unmap_volume(self, volume_name, connector, connector_element): if connector_element == 'wwpns': host = ",".join(connector['wwpns']) else: host = connector['initiator'] try: if self.is_g5_fw(): self._request("/unmap/volume", volume_name, initiator=host) else: self._request("/unmap/volume", volume_name, host=host) except stx_exception.RequestError as e: # -10050 => The volume was not found on this system. # This can occur during controller failover. if '(-10050)' in e.msg: LOG.warning("Ignoring unmap error -10050: %s", e.msg) return None raise def get_active_target_ports(self): ports = [] tree = self._request("/show/ports") for obj in tree.xpath("//OBJECT[@basetype='port']"): port = {prop.get('name'): prop.text for prop in obj.iter("PROPERTY") if prop.get('name') in ["port-type", "target-id", "status"]} if port['status'] == 'Up': ports.append(port) return ports def get_active_fc_target_ports(self): return [port['target-id'] for port in self.get_active_target_ports() if port['port-type'] == "FC"] def get_active_iscsi_target_iqns(self): return [port['target-id'] for port in self.get_active_target_ports() if port['port-type'] == "iSCSI"] def linear_copy_volume(self, src_name, dest_name, dest_bknd_name): """Copy a linear volume.""" self._request("/volumecopy", dest_name, dest_vdisk=dest_bknd_name, source_volume=src_name, prompt='yes') # The copy has started; now monitor until the operation completes. count = 0 while True: tree = self._request("/show/volumecopy-status") return_code = tree.findtext(".//PROPERTY[@name='return-code']") if return_code == '0': status = tree.findtext(".//PROPERTY[@name='progress']") progress = False if status: progress = True LOG.debug("Volume copy is in progress: %s", status) if not progress: LOG.debug("Volume copy completed: %s", status) break else: if count >= 5: LOG.error('Error in copying volume: %s', src_name) raise stx_exception.RequestError time.sleep(1) count += 1 time.sleep(5) def copy_volume(self, src_name, dest_name, dest_bknd_name, backend_type='virtual'): """Copy a linear or virtual volume.""" if backend_type == 'linear': return self.linear_copy_volume(src_name, dest_name, dest_bknd_name) # Copy a virtual volume to another in the same pool. self._request("/copy/volume", src_name, name=dest_name) LOG.debug("Volume copy of source_volume: %(src_name)s to " "destination_volume: %(dest_name)s started.", {'src_name': src_name, 'dest_name': dest_name, }) # Loop until this volume copy is no longer in progress. while self.volume_copy_in_progress(src_name): time.sleep(5) # Once the copy operation is finished, check to ensure that # the volume was not deleted because of a subsequent error. An # exception will be raised if the named volume is not present. self._request("/show/volumes", dest_name) LOG.debug("Volume copy of source_volume: %(src_name)s to " "destination_volume: %(dest_name)s completed.", {'src_name': src_name, 'dest_name': dest_name, }) def volume_copy_in_progress(self, src_name): """Check if a volume copy is in progress for the named volume.""" # 'show volume-copies' always succeeds, even if none in progress. tree = self._request("/show/volume-copies") # Find 0 or 1 job(s) with source volume we're interested in q = "OBJECT[PROPERTY[@name='source-volume']/text()='%s']" % src_name joblist = tree.xpath(q) if len(joblist) == 0: return False LOG.debug("Volume copy of volume: %(src_name)s is " "%(pc)s percent completed.", {'src_name': src_name, 'pc': joblist[0].findtext("PROPERTY[@name='progress']"), }) return True def _check_host(self, host): """Return 0 if initiator id found in the array's host table.""" if self.is_g5_fw(): tree = self._request("/show/initiators") for prop in tree.xpath("//PROPERTY[@name='id' and text()='%s']" % host): return 0 return -1 # Use older syntax for older firmware tree = self._request("/show/hosts") for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']" % host): return 0 return -1 def _safe_hostname(self, hostname): """Modify an initiator name to match firmware requirements. Initiator name cannot include certain characters and cannot exceed 15 bytes in 'T' firmware (31 bytes in 'G' firmware). """ for ch in [',', '"', '\\', '<', '>']: if ch in hostname: hostname = hostname.replace(ch, '') hostname = hostname.replace('.', '_') name_limit = 15 if self.is_titanium() else 31 index = len(hostname) if index > name_limit: index = name_limit return hostname[:index] def get_active_iscsi_target_portals(self): # This function returns {'ip': status,} portals = {} prop = 'ip-address' tree = self._request("/show/ports") for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"): prop = 'primary-ip-address' break iscsi_ips = [ip.text for ip in tree.xpath( "//PROPERTY[@name='%s']" % prop)] if not iscsi_ips: return portals for index, port_type in enumerate(tree.xpath( "//PROPERTY[@name='port-type' and text()='iSCSI']")): status = port_type.getparent().findtext("PROPERTY[@name='status']") if status == 'Up': portals[iscsi_ips[index]] = status return portals def get_chap_record(self, initiator_name): tree = self._request("/show/chap-records") for prop in tree.xpath("//PROPERTY[@name='initiator-name' and " "text()='%s']" % initiator_name): chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator" "-secret']") return chap_secret def create_chap_record(self, initiator_name, chap_secret): self._request("/create/chap-record", name=initiator_name, secret=chap_secret) def get_serial_number(self): tree = self._request("/show/system") return tree.findtext(".//PROPERTY[@name='midplane-serial-number']") def get_owner_info(self, backend_name, backend_type): if backend_type == 'linear': tree = self._request("/show/vdisks", backend_name) else: tree = self._request("/show/pools", backend_name) return tree.findtext(".//PROPERTY[@name='owner']") def modify_volume_name(self, old_name, new_name): self._request("/set/volume", old_name, name=new_name) def get_volume_size(self, volume_name): tree = self._request("/show/volumes", volume_name) size = tree.findtext(".//PROPERTY[@name='size-numeric']") return self._get_size(size) def get_firmware_version(self): """Get the array firmware version""" tree = self._request("/show/controllers") s = tree.xpath("//PROPERTY[@name='sc-fw']")[0].text if len(s): self._fw_type = s[0] fw_rev_match = re.match('^[^0-9]*([0-9]+).*', s) if not fw_rev_match: LOG.error('firmware revision not found in "%s"', s) return s self._fw_rev = int(fw_rev_match.groups()[0]) LOG.debug("Array firmware is %s (%s%d)\n", s, self._fw_type, self._fw_rev) return s def get_volumes(self, filter_type=None): """Get a list of volumes and snapshots""" serial_number_to_name = {} result = {} # first get volume mappings so we note which volumes are mapped mapping_list = self._request("/show/maps").xpath( "//OBJECT[@name='volume-view']") maps = {} for m in mapping_list: for el in m: if el.attrib['name'] == 'volume-name': maps[el.text] = 1 volume_list = self._request("/show/volumes").xpath( "//OBJECT[@name='volume']") for v in volume_list: vol = {} for el in v: key = el.attrib['name'] value = el.text vol[key] = value name = vol['volume-name'] type = vol['volume-type'] if type == 'base': type = 'volume' sn = vol['serial-number'] wwn = vol['wwn'] pool = vol['storage-pool-name'] size = int((int(vol['size-numeric']) * 512) / 2**30) mapped = name in maps parent_sn = vol['volume-parent'] serial_number_to_name[sn] = name if filter_type: if type != filter_type: continue result[name] = { 'name': name, # 32-byte array volume name 'type': type, # 'volume' or 'snapshot' 'size': size, # size in GiB (int) 'serial': sn, # serial number 'wwn': wwn, # world wide name 'pool': pool, # storage pool name 'mapped': mapped, # is mapped? 'parent_serial': parent_sn, # parent serial number, or None } # Now that we've seen all the volumes, we can map the parent serial # number to a name. for v in result.values(): v['parent'] = serial_number_to_name.get(v['parent_serial'], None) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/stx/common.py0000664000175000017500000006053500000000000022011 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 Dot Hill Systems Corp. # Copyright 2016-2019 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Volume driver common utilities for Seagate storage arrays.""" import base64 import uuid from oslo_config import cfg from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder.objects import fields from cinder.volume import configuration from cinder.volume import driver import cinder.volume.drivers.stx.client as client import cinder.volume.drivers.stx.exception as stx_exception from cinder.volume import volume_utils LOG = logging.getLogger(__name__) common_opts = [ cfg.StrOpt('seagate_pool_name', default='A', help="Pool or vdisk name to use for volume creation."), cfg.StrOpt('seagate_pool_type', choices=['linear', 'virtual'], default='virtual', help="linear (for vdisk) or virtual (for virtual pool)."), ] iscsi_opts = [ cfg.ListOpt('seagate_iscsi_ips', default=[], help="List of comma-separated target iSCSI IP addresses."), ] CONF = cfg.CONF CONF.register_opts(common_opts, group=configuration.SHARED_CONF_GROUP) CONF.register_opts(iscsi_opts, group=configuration.SHARED_CONF_GROUP) class STXCommon(object, metaclass=volume_utils.TraceWrapperMetaclass): VERSION = "2.0" stats = {} def __init__(self, config): self.config = config self.vendor_name = "Seagate" self.backend_name = self.config.seagate_pool_name self.backend_type = self.config.seagate_pool_type self.api_protocol = 'http' if self.config.driver_use_ssl: self.api_protocol = 'https' ssl_verify = self.config.driver_ssl_cert_verify if ssl_verify and self.config.driver_ssl_cert_path: ssl_verify = self.config.driver_ssl_cert_path self.client = client.STXClient(self.config.san_ip, self.config.san_login, self.config.san_password, self.api_protocol, ssl_verify) def get_version(self): return self.VERSION def do_setup(self, context): self.client_login() self._validate_backend() self._get_owner_info() self._get_serial_number() self.client_logout() def client_login(self): try: self.client.login() except stx_exception.ConnectionError as ex: msg = _("Failed to connect to %(vendor_name)s Array %(host)s: " "%(err)s") % {'vendor_name': self.vendor_name, 'host': self.config.san_ip, 'err': str(ex)} LOG.error(msg) raise stx_exception.ConnectionError(message=msg) except stx_exception.AuthenticationError: msg = _("Failed to log on %s Array " "(invalid login?).") % self.vendor_name LOG.error(msg) raise stx_exception.AuthenticationError(message=msg) def _get_serial_number(self): self.serialNumber = self.client.get_serial_number() def _get_owner_info(self): self.owner = self.client.get_owner_info(self.backend_name, self.backend_type) def _validate_backend(self): if not self.client.backend_exists(self.backend_name, self.backend_type): self.client_logout() raise stx_exception.InvalidBackend(backend=self.backend_name) def client_logout(self): self.client.logout() def _get_vol_name(self, volume_id): volume_name = self._encode_name(volume_id) return "v%s" % volume_name def _get_snap_name(self, snapshot_id): snapshot_name = self._encode_name(snapshot_id) return "s%s" % snapshot_name def _get_backend_volume_name(self, id, type='volume'): name = self._encode_name(id) return "%s%s" % (type[0], name) def _encode_name(self, name): """Get converted array volume name. Converts the openstack volume id from fceec30e-98bc-4ce5-85ff-d7309cc17cc2 to v_O7DDpi8TOWF_9cwnMF We convert the 128(32*4) bits of the uuid into a 24 characters long base64 encoded string. This still exceeds the limit of 20 characters in some models so we return 19 characters because the _get_{vol,snap}_name functions prepend a character. """ uuid_str = name.replace("-", "") vol_uuid = uuid.UUID('urn:uuid:%s' % uuid_str) vol_encoded = base64.urlsafe_b64encode(vol_uuid.bytes).decode('ascii') return vol_encoded[:19] def check_flags(self, options, required_flags): for flag in required_flags: if not getattr(options, flag, None): msg = _('%s configuration option is not set.') % flag LOG.error(msg) raise exception.InvalidInput(reason=msg) def create_volume(self, volume): self.client_login() # Use base64 to encode the volume name (UUID is too long) volume_name = self._get_vol_name(volume['id']) volume_size = "%dGiB" % volume['size'] LOG.debug("Create Volume having display_name: %(display_name)s " "name: %(name)s id: %(id)s size: %(size)s", {'display_name': volume['display_name'], 'name': volume['name'], 'id': volume_name, 'size': volume_size, }) try: self.client.create_volume(volume_name, volume_size, self.backend_name, self.backend_type) except stx_exception.RequestError as ex: LOG.exception("Creation of volume %s failed.", volume['id']) raise exception.Invalid(ex) finally: self.client_logout() def _assert_enough_space_for_copy(self, volume_size): """The array creates a snap pool before trying to copy the volume. The pool is 5.27GB or 20% of the volume size, whichever is larger. Verify that we have enough space for the pool and then copy. """ pool_size = max(volume_size * 0.2, 5.27) required_size = pool_size + volume_size if required_size > self.stats['pools'][0]['free_capacity_gb']: raise stx_exception.NotEnoughSpace(backend=self.backend_name) def _assert_source_detached(self, volume): """The array requires volume to be detached before cloning.""" if (volume['status'] != "available" or volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED): LOG.error("Volume must be detached for clone operation.") raise exception.VolumeAttached(volume_id=volume['id']) def create_cloned_volume(self, volume, src_vref): self.get_volume_stats(True) self._assert_enough_space_for_copy(volume['size']) self._assert_source_detached(src_vref) LOG.debug("Cloning Volume %(source_id)s to (%(dest_id)s)", {'source_id': src_vref['id'], 'dest_id': volume['id'], }) if src_vref['name_id']: orig_name = self._get_vol_name(src_vref['name_id']) else: orig_name = self._get_vol_name(src_vref['id']) dest_name = self._get_vol_name(volume['id']) self.client_login() try: self.client.copy_volume(orig_name, dest_name, self.backend_name, self.backend_type) except stx_exception.RequestError as ex: LOG.exception("Cloning of volume %s failed.", src_vref['id']) raise exception.Invalid(ex) finally: self.client_logout() if volume['size'] > src_vref['size']: self.extend_volume(volume, volume['size']) def create_volume_from_snapshot(self, volume, snapshot): self.get_volume_stats(True) self._assert_enough_space_for_copy(volume['size']) LOG.debug("Creating Volume from snapshot %(source_id)s to " "(%(dest_id)s)", {'source_id': snapshot['id'], 'dest_id': volume['id'], }) orig_name = self._get_snap_name(snapshot['id']) dest_name = self._get_vol_name(volume['id']) self.client_login() try: self.client.copy_volume(orig_name, dest_name, self.backend_name, self.backend_type) except stx_exception.RequestError as ex: LOG.exception("Create volume failed from snapshot: %s", snapshot['id']) raise exception.Invalid(ex) finally: self.client_logout() if volume['size'] > snapshot['volume_size']: self.extend_volume(volume, volume['size']) def delete_volume(self, volume): LOG.debug("Deleting Volume: %s", volume['id']) if volume['name_id']: volume_name = self._get_vol_name(volume['name_id']) else: volume_name = self._get_vol_name(volume['id']) self.client_login() try: self.client.delete_volume(volume_name) except stx_exception.RequestError as ex: # if the volume wasn't found, ignore the error if 'The volume was not found on this system.' in ex.args: return LOG.exception("Deletion of volume %s failed.", volume['id']) raise exception.Invalid(ex) finally: self.client_logout() def get_volume_stats(self, refresh): if refresh: self.client_login() try: self._update_volume_stats() finally: self.client_logout() return self.stats def _update_volume_stats(self): # storage_protocol and volume_backend_name are # set in the child classes stats = {'driver_version': self.VERSION, 'storage_protocol': None, 'vendor_name': self.vendor_name, 'volume_backend_name': None, 'pools': []} pool = {'QoS_support': False, 'multiattach': True} try: src_type = "%sVolumeDriver" % self.vendor_name backend_stats = self.client.backend_stats(self.backend_name, self.backend_type) pool.update(backend_stats) pool['location_info'] = ('%s:%s:%s:%s' % (src_type, self.serialNumber, self.backend_name, self.owner)) pool['pool_name'] = self.backend_name except stx_exception.RequestError: err = (_("Unable to get stats for backend_name: %s") % self.backend_name) LOG.exception(err) raise exception.Invalid(err) stats['pools'].append(pool) self.stats = stats def _assert_connector_ok(self, connector, connector_element): if not connector[connector_element]: msg = _("Connector does not provide: %s") % connector_element LOG.error(msg) raise exception.InvalidInput(reason=msg) def map_volume(self, volume, connector, connector_element): self._assert_connector_ok(connector, connector_element) if volume['name_id']: volume_name = self._get_vol_name(volume['name_id']) else: volume_name = self._get_vol_name(volume['id']) try: data = self.client.map_volume(volume_name, connector, connector_element) return data except stx_exception.RequestError as ex: LOG.exception("Error mapping volume: %s", volume_name) raise exception.Invalid(ex) def unmap_volume(self, volume, connector, connector_element): self._assert_connector_ok(connector, connector_element) if volume['name_id']: volume_name = self._get_vol_name(volume['name_id']) else: volume_name = self._get_vol_name(volume['id']) self.client_login() try: self.client.unmap_volume(volume_name, connector, connector_element) except stx_exception.RequestError as ex: LOG.exception("Error unmapping volume: %s", volume_name) raise exception.Invalid(ex) finally: self.client_logout() def get_active_fc_target_ports(self): try: return self.client.get_active_fc_target_ports() except stx_exception.RequestError as ex: LOG.exception("Error getting active FC target ports.") raise exception.Invalid(ex) def get_active_iscsi_target_iqns(self): try: return self.client.get_active_iscsi_target_iqns() except stx_exception.RequestError as ex: LOG.exception("Error getting active ISCSI target iqns.") raise exception.Invalid(ex) def get_active_iscsi_target_portals(self): try: return self.client.get_active_iscsi_target_portals() except stx_exception.RequestError as ex: LOG.exception("Error getting active ISCSI target portals.") raise exception.Invalid(ex) def create_snapshot(self, snapshot): LOG.debug("Creating snapshot (%(snap_id)s) from %(volume_id)s)", {'snap_id': snapshot['id'], 'volume_id': snapshot['volume_id'], }) if snapshot['volume']['name_id']: vol_name = self._get_vol_name(snapshot['volume']['name_id']) else: vol_name = self._get_vol_name(snapshot['volume_id']) snap_name = self._get_snap_name(snapshot['id']) self.client_login() try: self.client.create_snapshot(vol_name, snap_name) except stx_exception.RequestError as ex: LOG.exception("Creation of snapshot failed for volume: %s", snapshot['volume_id']) raise exception.Invalid(ex) finally: self.client_logout() def delete_snapshot(self, snapshot): snap_name = self._get_snap_name(snapshot['id']) LOG.debug("Deleting snapshot (%s)", snapshot['id']) self.client_login() try: self.client.delete_snapshot(snap_name, self.backend_type) except stx_exception.RequestError as ex: # if the volume wasn't found, ignore the error if 'The volume was not found on this system.' in ex.args: return LOG.exception("Deleting snapshot %s failed", snapshot['id']) raise exception.Invalid(ex) finally: self.client_logout() def extend_volume(self, volume, new_size): if volume['name_id']: volume_name = self._get_vol_name(volume['name_id']) else: volume_name = self._get_vol_name(volume['id']) old_size = self.client.get_volume_size(volume_name) growth_size = int(new_size) - old_size LOG.debug("Extending Volume %(volume_name)s from %(old_size)s to " "%(new_size)s, by %(growth_size)s GiB.", {'volume_name': volume_name, 'old_size': old_size, 'new_size': new_size, 'growth_size': growth_size, }) if growth_size < 1: return self.client_login() try: self.client.extend_volume(volume_name, "%dGiB" % growth_size) except stx_exception.RequestError as ex: LOG.exception("Extension of volume %s failed.", volume['id']) raise exception.Invalid(ex) finally: self.client_logout() def get_chap_record(self, initiator_name): try: return self.client.get_chap_record(initiator_name) except stx_exception.RequestError as ex: LOG.exception("Error getting chap record.") raise exception.Invalid(ex) def create_chap_record(self, initiator_name, chap_secret): try: self.client.create_chap_record(initiator_name, chap_secret) except stx_exception.RequestError as ex: LOG.exception("Error creating chap record.") raise exception.Invalid(ex) def migrate_volume(self, volume, host): """Migrate directly if source and dest are managed by same storage. :param volume: A dictionary describing the volume to migrate :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities. :returns: (False, None) if the driver does not support migration, (True, None) if successful """ false_ret = (False, None) if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED: return false_ret if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_id, dest_back_name, dest_owner) = info.split(':') except ValueError: return false_ret reqd_dest_type = '%sVolumeDriver' % self.vendor_name if not (dest_type == reqd_dest_type and dest_id == self.serialNumber and dest_owner == self.owner): return false_ret if volume['name_id']: source_name = self._get_vol_name(volume['name_id']) else: source_name = self._get_vol_name(volume['id']) # the array does not support duplicate names dest_name = "m%s" % source_name[1:] self.client_login() try: self.client.copy_volume(source_name, dest_name, dest_back_name, self.backend_type) self.client.delete_volume(source_name) self.client.modify_volume_name(dest_name, source_name) return (True, None) except stx_exception.RequestError as ex: LOG.exception("Error migrating volume: %s", source_name) raise exception.Invalid(ex) finally: self.client_logout() def retype(self, volume, new_type, diff, host): ret = self.migrate_volume(volume, host) return ret[0] def manage_existing(self, volume, existing_ref): """Manage an existing non-openstack array volume existing_ref is a dictionary of the form: {'source-name': } """ target_vol_name = existing_ref['source-name'] modify_target_vol_name = self._get_vol_name(volume['id']) self.client_login() try: self.client.modify_volume_name(target_vol_name, modify_target_vol_name) except stx_exception.RequestError as ex: LOG.exception("Error manage existing volume.") raise exception.Invalid(ex) finally: self.client_logout() def manage_existing_snapshot(self, snapshot, existing_ref): """Import an existing snapshot into Cinder.""" old_snap_name = existing_ref['source-name'] new_snap_name = self._get_snap_name(snapshot.id) LOG.info("Renaming existing snapshot %(old_name)s to " "%(new_name)s", {"old_name": old_snap_name, "new_name": new_snap_name}) self.client_login() try: self.client.modify_volume_name(old_snap_name, new_snap_name) except stx_exception.RequestError as ex: LOG.exception("Error managing existing snapshot.") raise exception.Invalid(ex) finally: self.client_logout() return None def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing. existing_ref is a dictionary of the form: {'source-name': } """ target_vol_name = existing_ref['source-name'] self.client_login() try: size = self.client.get_volume_size(target_vol_name) return size except stx_exception.RequestError as ex: LOG.exception("Error manage existing get volume size.") raise exception.Invalid(ex) finally: self.client_logout() def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of volume to be managed by manage_existing.""" return self.manage_existing_get_size(snapshot, existing_ref) def _get_manageable_vols(self, cinder_resources, resource_type, marker, limit, offset, sort_keys, sort_dirs): """List volumes or snapshots on the backend.""" # We can't translate a backend volume name into a Cinder id # directly, so we create a map to do it. volume_name_to_id = {} for resource in cinder_resources: key = self._get_backend_volume_name(resource['id'], resource_type) value = resource['id'] volume_name_to_id[key] = value self.client_login() try: vols = self.client.get_volumes(filter_type=resource_type) except stx_exception.RequestError as ex: LOG.exception("Error getting manageable volumes.") raise exception.Invalid(ex) finally: self.client_logout() entries = [] for vol in vols.values(): vol_info = {'reference': {'source-name': vol['name']}, 'size': vol['size'], 'cinder_id': None, 'extra_info': None} potential_id = volume_name_to_id.get(vol['name']) if potential_id: vol_info['safe_to_manage'] = False vol_info['reason_not_safe'] = 'already managed' vol_info['cinder_id'] = potential_id elif vol['mapped']: vol_info['safe_to_manage'] = False vol_info['reason_not_safe'] = '%s in use' % resource_type else: vol_info['safe_to_manage'] = True vol_info['reason_not_safe'] = None if resource_type == 'snapshot': origin = vol['parent'] vol_info['source_reference'] = {'source-name': origin} entries.append(vol_info) return volume_utils.paginate_entries_list(entries, marker, limit, offset, sort_keys, sort_dirs) def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): return self._get_manageable_vols(cinder_volumes, 'volume', marker, limit, offset, sort_keys, sort_dirs) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): return self._get_manageable_vols(cinder_snapshots, 'snapshot', marker, limit, offset, sort_keys, sort_dirs) @staticmethod def get_driver_options(): additional_opts = driver.BaseVD._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password', 'driver_use_ssl', 'driver_ssl_cert_verify', 'driver_ssl_cert_path') return common_opts + additional_opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/stx/exception.py0000664000175000017500000000233400000000000022510 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder import exception from cinder.i18n import _ class InvalidBackend(exception.VolumeDriverException): message = _("Backend doesn't exist (%(backend)s)") class ConnectionError(exception.VolumeDriverException): message = "%(message)s" class AuthenticationError(exception.VolumeDriverException): message = "%(message)s" class NotEnoughSpace(exception.VolumeDriverException): message = _("Not enough space on backend (%(backend)s)") class RequestError(exception.VolumeDriverException): message = "%(message)s" class NotTargetPortal(exception.VolumeDriverException): message = _("No active iSCSI portals with supplied iSCSI IPs") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/stx/fc.py0000664000175000017500000002045500000000000021106 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 Dot Hill Systems Corp. # Copyright 2016-2019 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder.common import constants import cinder.volume.driver import cinder.volume.drivers.san.san as san import cinder.volume.drivers.stx.common as common from cinder.zonemanager import utils as fczm_utils class STXFCDriver(cinder.volume.driver.FibreChannelDriver): """OpenStack Fibre Channel cinder drivers for Seagate arrays. .. code:: text Version history: 0.1 - Base version developed for HPMSA FC drivers: "https://github.com/openstack/cinder/tree/stable/juno/ cinder/volume/drivers/san/hp" 1.0 - Version developed for DotHill arrays with the following modifications: - added support for v3 API(virtual pool feature) - added support for retype volume - added support for manage/unmanage volume - added initiator target mapping in FC zoning - added https support 1.6 - Add management path redundancy and reduce load placed on management controller. 1.7 - Modified so it can't be invoked except as a superclass 2.0 - Reworked to create a new Seagate (STX) array driver. """ VERSION = "2.0" CI_WIKI_NAME = 'Seagate_CI' def __init__(self, *args, **kwargs): super(STXFCDriver, self).__init__(*args, **kwargs) self.common = None self.configuration.append_config_values(san.san_opts) self.lookup_service = fczm_utils.create_lookup_service() if type(self) is not STXFCDriver: return self.configuration.append_config_values(common.common_opts) def _init_common(self): return common.STXCommon(self.configuration) def _check_flags(self): required_flags = ['san_ip', 'san_login', 'san_password'] self.common.check_flags(self.configuration, required_flags) def do_setup(self, context): self.common = self._init_common() self._check_flags() self.common.do_setup(context) def check_for_setup_error(self): self._check_flags() def create_volume(self, volume): self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, src_vref): self.common.create_volume_from_snapshot(volume, src_vref) def create_cloned_volume(self, volume, src_vref): self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.common.delete_volume(volume) def initialize_connection(self, volume, connector): self.common.client_login() try: data = {} data['target_lun'] = self.common.map_volume(volume, connector, 'wwpns') ports, init_targ_map = self.get_init_targ_map(connector) data['target_discovered'] = True data['target_wwn'] = ports data['initiator_target_map'] = init_targ_map info = {'driver_volume_type': 'fibre_channel', 'data': data} fczm_utils.add_fc_zone(info) return info finally: self.common.client_logout() def terminate_connection(self, volume, connector, **kwargs): info = {'driver_volume_type': 'fibre_channel', 'data': {}} try: if not self.common.client.list_luns_for_host( connector['wwpns'][0]): ports, init_targ_map = self.get_init_targ_map(connector) info['data'] = {'target_wwn': ports, 'initiator_target_map': init_targ_map} # multiattach volumes cannot be unmapped here, but will # be implicity unmapped when the volume is deleted. if not volume.get('multiattach'): self.common.unmap_volume(volume, connector, 'wwpns') fczm_utils.remove_fc_zone(info) finally: return info def get_init_targ_map(self, connector): init_targ_map = {} target_wwns = [] ports = self.common.get_active_fc_target_ports() if self.lookup_service is not None: dev_map = self.lookup_service.get_device_mapping_from_network( connector['wwpns'], ports) for fabric_name in dev_map: fabric = dev_map[fabric_name] target_wwns += fabric['target_port_wwn_list'] for initiator in fabric['initiator_port_wwn_list']: if initiator not in init_targ_map: init_targ_map[initiator] = [] init_targ_map[initiator] += fabric['target_port_wwn_list'] init_targ_map[initiator] = list(set( init_targ_map[initiator])) target_wwns = list(set(target_wwns)) else: initiator_wwns = connector['wwpns'] target_wwns = ports for initiator in initiator_wwns: init_targ_map[initiator] = target_wwns return target_wwns, init_targ_map def get_volume_stats(self, refresh=False): stats = self.common.get_volume_stats(refresh) stats['storage_protocol'] = constants.FC stats['driver_version'] = self.VERSION backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = (backend_name or self.__class__.__name__) return stats def create_export(self, context, volume, connector=None): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def create_snapshot(self, snapshot): self.common.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.common.delete_snapshot(snapshot) def extend_volume(self, volume, new_size): self.common.extend_volume(volume, new_size) def retype(self, context, volume, new_type, diff, host): return self.common.retype(volume, new_type, diff, host) def manage_existing(self, volume, existing_ref): self.common.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.common.manage_existing_get_size(volume, existing_ref) def manage_existing_snapshot(self, snapshot, existing_ref): return self.common.manage_existing_snapshot(snapshot, existing_ref) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): return self.common.manage_existing_snapshot_get_size(snapshot, existing_ref) def unmanage(self, volume): pass def unmanage_snapshot(self, snapshot): pass def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): return self.common.get_manageable_volumes(cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): return self.common.get_manageable_snapshots(cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs) @staticmethod def get_driver_options(): return common.STXCommon.get_driver_options() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/stx/iscsi.py0000664000175000017500000002206000000000000021622 0ustar00zuulzuul00000000000000# Copyright 2014 Objectif Libre # Copyright 2015 Dot Hill Systems Corp. # Copyright 2016-2019 Seagate Technology or one of its affiliates # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ import cinder.volume.driver import cinder.volume.drivers.san.san as san import cinder.volume.drivers.stx.common as common import cinder.volume.drivers.stx.exception as stx_exception DEFAULT_ISCSI_PORT = "3260" LOG = logging.getLogger(__name__) class STXISCSIDriver(cinder.volume.driver.ISCSIDriver): """OpenStack iSCSI Cinder driver for Seagate storage arrays. .. code:: text Version history: 0.1 - Base structure for DotHill iSCSI drivers based on HPMSA FC drivers: "https://github.com/openstack/cinder/tree/stable/juno/ cinder/volume/drivers/san/hp" 1.0 - Version developed for DotHill arrays with the following modifications: - added iSCSI support - added CHAP support in iSCSI - added support for v3 API(virtual pool feature) - added support for retype volume - added support for manage/unmanage volume - added https support 1.6 - Add management path redundancy and reduce load placed on management controller. 1.7 - Modified so it can't be invoked except as a superclass 2.0 - Reworked to create a new Seagate (STX) array driver. """ VERSION = "2.0" CI_WIKI_NAME = 'Seagate_CI' def __init__(self, *args, **kwargs): super(STXISCSIDriver, self).__init__(*args, **kwargs) self.common = None self.configuration.append_config_values(san.san_opts) if type(self) is not STXISCSIDriver: return self.configuration.append_config_values(common.common_opts) self.configuration.append_config_values(common.iscsi_opts) self.iscsi_ips = self.configuration.seagate_iscsi_ips def _init_common(self): return common.STXCommon(self.configuration) def _check_flags(self): required_flags = ['san_ip', 'san_login', 'san_password'] self.common.check_flags(self.configuration, required_flags) def do_setup(self, context): self.common = self._init_common() self._check_flags() self.common.do_setup(context) self.initialize_iscsi_ports() def initialize_iscsi_ports(self): iscsi_ips = [] if self.iscsi_ips: for ip_addr in self.iscsi_ips: ip = ip_addr.split(':') if len(ip) == 1: iscsi_ips.append([ip_addr, DEFAULT_ISCSI_PORT]) elif len(ip) == 2: iscsi_ips.append([ip[0], ip[1]]) else: msg = _("Invalid IP address format: '%s'") % ip_addr LOG.error(msg) raise exception.InvalidInput(reason=(msg)) self.iscsi_ips = iscsi_ips else: msg = _('At least one valid iSCSI IP address must be set.') LOG.error(msg) raise exception.InvalidInput(reason=(msg)) def check_for_setup_error(self): self._check_flags() def create_volume(self, volume): self.common.create_volume(volume) def create_volume_from_snapshot(self, volume, src_vref): self.common.create_volume_from_snapshot(volume, src_vref) def create_cloned_volume(self, volume, src_vref): self.common.create_cloned_volume(volume, src_vref) def delete_volume(self, volume): self.common.delete_volume(volume) def initialize_connection(self, volume, connector): self.common.client_login() try: data = {} data['target_lun'] = self.common.map_volume(volume, connector, 'initiator') iqns = self.common.get_active_iscsi_target_iqns() data['target_discovered'] = True data['target_iqn'] = iqns[0] iscsi_portals = self.common.get_active_iscsi_target_portals() for ip_port in self.iscsi_ips: if (ip_port[0] in iscsi_portals): data['target_portal'] = ":".join(ip_port) break if 'target_portal' not in data: raise stx_exception.NotTargetPortal() if self.configuration.use_chap_auth: chap_secret = self.common.get_chap_record( connector['initiator'] ) if not chap_secret: chap_secret = self.create_chap_record( connector['initiator'] ) data['auth_password'] = chap_secret data['auth_username'] = connector['initiator'] data['auth_method'] = 'CHAP' info = {'driver_volume_type': 'iscsi', 'data': data} return info finally: self.common.client_logout() def terminate_connection(self, volume, connector, **kwargs): if type(connector) is dict and 'initiator' in connector: # multiattach volumes cannot be unmapped here, but will # be implicity unmapped when the volume is deleted. if not volume.get('multiattach'): self.common.unmap_volume(volume, connector, 'initiator') def get_volume_stats(self, refresh=False): stats = self.common.get_volume_stats(refresh) stats['storage_protocol'] = constants.ISCSI stats['driver_version'] = self.VERSION backend_name = self.configuration.safe_get('volume_backend_name') stats['volume_backend_name'] = (backend_name or self.__class__.__name__) return stats def create_export(self, context, volume, connector=None): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def create_snapshot(self, snapshot): self.common.create_snapshot(snapshot) def delete_snapshot(self, snapshot): self.common.delete_snapshot(snapshot) def extend_volume(self, volume, new_size): self.common.extend_volume(volume, new_size) def create_chap_record(self, initiator_name): chap_secret = self.configuration.chap_password # Chap secret length should be 12 to 16 characters if 12 <= len(chap_secret) <= 16: self.common.create_chap_record(initiator_name, chap_secret) else: msg = _('CHAP secret should be 12-16 bytes.') LOG.error(msg) raise exception.InvalidInput(reason=(msg)) return chap_secret def retype(self, context, volume, new_type, diff, host): return self.common.retype(volume, new_type, diff, host) def manage_existing(self, volume, existing_ref): self.common.manage_existing(volume, existing_ref) def manage_existing_get_size(self, volume, existing_ref): return self.common.manage_existing_get_size(volume, existing_ref) def manage_existing_snapshot(self, snapshot, existing_ref): return self.common.manage_existing_snapshot(snapshot, existing_ref) def manage_existing_snapshot_get_size(self, snapshot, existing_ref): return self.common.manage_existing_snapshot_get_size(snapshot, existing_ref) def unmanage(self, volume): pass def unmanage_snapshot(self, snapshot): pass def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): return self.common.get_manageable_volumes(cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): return self.common.get_manageable_snapshots(cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs) @staticmethod def get_driver_options(): return (common.STXCommon.get_driver_options() + common.iscsi_opts) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3991213 cinder-27.0.0/cinder/volume/drivers/synology/0000775000175000017500000000000000000000000021203 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/synology/__init__.py0000664000175000017500000000000000000000000023302 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/synology/synology_common.py0000664000175000017500000014151000000000000025012 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Synology Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import functools import hashlib import json import math from os import urandom from random import randint import re import urllib from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives.ciphers import algorithms from cryptography.hazmat.primitives.ciphers import Cipher from cryptography.hazmat.primitives.ciphers import modes import eventlet from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units import requests from cinder import exception from cinder.i18n import _ from cinder.objects import snapshot from cinder.objects import volume from cinder import utils from cinder.volume import configuration from cinder.volume import volume_utils cinder_opts = [ cfg.StrOpt('synology_pool_name', default='', help='Volume on Synology storage to be used for creating lun.'), cfg.PortOpt('synology_admin_port', default=5000, help='Management port for Synology storage.'), cfg.StrOpt('synology_username', default='admin', help='Administrator of Synology storage.'), cfg.StrOpt('synology_password', default='', help='Password of administrator for logging in ' 'Synology storage.', secret=True), cfg.BoolOpt('synology_ssl_verify', default=True, help='Do certificate validation or not if ' '$driver_use_ssl is True'), cfg.StrOpt('synology_one_time_pass', default=None, help='One time password of administrator for logging in ' 'Synology storage if OTP is enabled.', secret=True), cfg.StrOpt('synology_device_id', default=None, help='Device id for skip one time password check for ' 'logging in Synology storage if OTP is enabled.'), ] LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.register_opts(cinder_opts, group=configuration.SHARED_CONF_GROUP) class SynoAPIHTTPError(exception.VolumeDriverException): message = _("HTTP exit code: [%(code)s]") class SynoAuthError(exception.VolumeDriverException): message = _("Synology driver authentication failed: %(reason)s.") class SynoLUNNotExist(exception.VolumeDriverException): message = _("LUN not found by UUID: %(uuid)s.") class AESCipher(object): """Encrypt with OpenSSL-compatible way""" SALT_MAGIC = b'Salted__' def __init__(self, password, key_length=32): self._bs = 16 self._salt = urandom(self._bs - len(self.SALT_MAGIC)) self._key, self._iv = self._derive_key_and_iv(password, self._salt, key_length, self._bs) def _pad(self, s): bs = self._bs return (s + (bs - len(s) % bs) * chr(bs - len(s) % bs)).encode('utf-8') # TODO(alee): This probably needs to be replaced with a version that # does not use md5, as this will be disallowed on a FIPS enabled system def _derive_key_and_iv(self, password, salt, key_length, iv_length): d = d_i = b'' while len(d) < key_length + iv_length: md5_str = d_i + password + salt d_i = hashlib.md5(md5_str, usedforsecurity=True).digest() d += d_i return d[:key_length], d[key_length:key_length + iv_length] def encrypt(self, text): cipher = Cipher( algorithms.AES(self._key), modes.CBC(self._iv), backend=default_backend() ) encryptor = cipher.encryptor() ciphertext = encryptor.update(self._pad(text)) + encryptor.finalize() return self.SALT_MAGIC + self._salt + ciphertext class Session(object): def __init__(self, host, port, username, password, https=False, ssl_verify=True, one_time_pass=None, device_id=None): self._proto = 'https' if https else 'http' self._host = host self._port = port self._sess = 'dsm' self._https = https self._url_prefix = self._proto + '://' + host + ':' + str(port) self._url = self._url_prefix + '/webapi/auth.cgi' self._ssl_verify = ssl_verify self._sid = None self._did = device_id data = {'api': 'SYNO.API.Auth', 'method': 'login', 'version': 6} params = {'account': username, 'passwd': password, 'session': self._sess, 'format': 'sid'} if one_time_pass: if device_id: params.update(device_id=device_id) else: params.update(otp_code=one_time_pass, enable_device_token='yes') if not https: params = self._encrypt_params(params) data.update(params) resp = requests.post(self._url, data=data, verify=self._ssl_verify) result = resp.json() if result and result['success']: self._sid = result['data']['sid'] if one_time_pass and not device_id: self._did = result['data']['did'] else: raise SynoAuthError(reason=_('Login failed.')) def _random_AES_passphrase(self, length): available = ('0123456789' 'abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '~!@#$%^&*()_+-/') key = b'' while length > 0: key += available[randint(0, len(available) - 1)].encode('utf-8') length -= 1 return key def _get_enc_info(self): url = self.url_prefix() + '/webapi/encryption.cgi' data = {"api": "SYNO.API.Encryption", "method": "getinfo", "version": 1, "format": "module"} resp = requests.post(url, data=data, verify=self._ssl_verify) result = resp.json() return result["data"] def _encrypt_RSA(self, modulus, passphrase, text): public_numbers = rsa.RSAPublicNumbers(passphrase, modulus) public_key = public_numbers.public_key(default_backend()) if isinstance(text, str): text = text.encode('utf-8') ciphertext = public_key.encrypt( text, padding.PKCS1v15() ) return ciphertext def _encrypt_AES(self, passphrase, text): cipher = AESCipher(passphrase) return cipher.encrypt(text) def _encrypt_params(self, params): enc_info = self._get_enc_info() public_key = enc_info["public_key"] cipher_key = enc_info["cipherkey"] cipher_token = enc_info["ciphertoken"] server_time = enc_info["server_time"] random_passphrase = self._random_AES_passphrase(501) params[cipher_token] = server_time encrypted_passphrase = self._encrypt_RSA(int(public_key, 16), int("10001", 16), random_passphrase) encrypted_params = self._encrypt_AES(random_passphrase, urllib.parse.urlencode(params)) enc_params = { "rsa": base64.b64encode(encrypted_passphrase).decode("ascii"), "aes": base64.b64encode(encrypted_params).decode("ascii") } return {cipher_key: json.dumps(enc_params)} def sid(self): return self._sid def did(self): return self._did def url_prefix(self): return self._url_prefix def query(self, api): url = self._url_prefix + '/webapi/query.cgi' data = {'api': 'SYNO.API.Info', 'version': 1, 'method': 'query', 'query': api} resp = requests.post(url, data=data, verify=self._ssl_verify) result = resp.json() if 'success' in result and result['success']: return result['data'][api] else: return None def __del__(self): if not hasattr(self, '_sid'): return data = {'api': 'SYNO.API.Auth', 'version': 1, 'method': 'logout', 'session': self._sess, '_sid': self._sid} requests.post(self._url, data=data, verify=self._ssl_verify) def _connection_checker(func): """Decorator to check session has expired or not.""" @functools.wraps(func) def inner_connection_checker(self, *args, **kwargs): LOG.debug('in _connection_checker') for attempts in range(2): try: return func(self, *args, **kwargs) except SynoAuthError as e: if attempts < 1: LOG.debug('Session might have expired.' ' Trying to relogin') self.new_session() continue else: LOG.error('Try to renew session: [%s]', e) raise return inner_connection_checker class APIRequest(object): def __init__(self, host, port, username, password, https=False, ssl_verify=True, one_time_pass=None, device_id=None): self._host = host self._port = port self._username = username self._password = password self._https = https self._ssl_verify = ssl_verify self._one_time_pass = one_time_pass self._device_id = device_id self.new_session() def new_session(self): self.__session = Session(self._host, self._port, self._username, self._password, self._https, self._ssl_verify, self._one_time_pass, self._device_id) if not self._device_id: self._device_id = self.__session.did() def _start(self, api, version): apiInfo = self.__session.query(api) self._jsonFormat = apiInfo['requestFormat'] == 'JSON' if (apiInfo and (apiInfo['minVersion'] <= version) and (apiInfo['maxVersion'] >= version)): return apiInfo['path'] else: raise exception.APIException(service=api) def _encode_param(self, params): # Json encode if self._jsonFormat: for key, value in params.items(): params[key] = json.dumps(value) # url encode return urllib.parse.urlencode(params) @utils.synchronized('Synology') @_connection_checker def request(self, api, method, version, **params): cgi_path = self._start(api, version) s = self.__session url = s.url_prefix() + '/webapi/' + cgi_path data = {'api': api, 'version': version, 'method': method, '_sid': s.sid() } data.update(params) LOG.debug('[%s]', url) LOG.debug('%s', json.dumps(data, indent=4)) # Send HTTP Post Request resp = requests.post(url, data=self._encode_param(data), verify=self._ssl_verify) http_status = resp.status_code result = resp.json() LOG.debug('%s', json.dumps(result, indent=4)) # Check for status code if (200 != http_status): result = {'http_status': http_status} elif 'success' not in result: reason = _("'success' not found") raise exception.MalformedResponse(cmd=json.dumps(data, indent=4), reason=reason) if ('error' in result and 'code' in result["error"] and result['error']['code'] in [105, 119]): raise SynoAuthError(reason=_('Session might have expired.')) return result class SynoCommon(object): """Manage Cinder volumes on Synology storage""" TARGET_NAME_PREFIX = 'Cinder-Target-' CINDER_LUN = 'CINDER' METADATA_DS_SNAPSHOT_UUID = 'ds_snapshot_UUID' def __init__(self, config, driver_type): if not config.safe_get('target_ip_address'): raise exception.InvalidConfigurationValue( option='target_ip_address', value='') if not config.safe_get('synology_pool_name'): raise exception.InvalidConfigurationValue( option='synology_pool_name', value='') self.config = config self.vendor_name = 'Synology' self.driver_type = driver_type self.volume_backend_name = self._get_backend_name() self.target_port = self.config.safe_get('target_port') api = APIRequest(self.config.target_ip_address, self.config.synology_admin_port, self.config.synology_username, self.config.synology_password, self.config.safe_get('driver_use_ssl'), self.config.safe_get('synology_ssl_verify'), self.config.safe_get('synology_one_time_pass'), self.config.safe_get('synology_device_id'),) self.synoexec = api.request self.host_uuid = self._get_node_uuid() def _get_node_uuid(self): try: out = self.exec_webapi('SYNO.Core.ISCSI.Node', 'list', 1) self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _get_node_uuid.') if (not self.check_value_valid(out, ['data', 'nodes'], list) or 0 >= len(out['data']['nodes']) or not self.check_value_valid(out['data']['nodes'][0], ['uuid'], str)): msg = _('Failed to _get_node_uuid.') raise exception.VolumeDriverException(message=msg) return out['data']['nodes'][0]['uuid'] def _get_pool_info(self): pool_name = self.config.synology_pool_name if not pool_name: raise exception.InvalidConfigurationValue(option='pool_name', value='') try: out = self.exec_webapi('SYNO.Core.Storage.Volume', 'get', 1, volume_path='/' + pool_name) self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _get_pool_status.') if not self.check_value_valid(out, ['data', 'volume'], object): raise exception.MalformedResponse(cmd='_get_pool_info', reason=_('no data found')) return out['data']['volume'] def _get_pool_size(self): info = self._get_pool_info() if 'size_free_byte' not in info or 'size_total_byte' not in info: raise exception.MalformedResponse(cmd='_get_pool_size', reason=_('size not found')) free_capacity_gb = int(int(info['size_free_byte']) / units.Gi) total_capacity_gb = int(int(info['size_total_byte']) / units.Gi) other_user_data_gb = int(math.ceil((float(info['size_total_byte']) - float(info['size_free_byte']) - float(info['eppool_used_byte'])) / units.Gi)) return free_capacity_gb, total_capacity_gb, other_user_data_gb def _get_pool_lun_provisioned_size(self): pool_name = self.config.synology_pool_name if not pool_name: raise exception.InvalidConfigurationValue(option='pool_name', value=pool_name) try: out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'list', 1, location='/' + pool_name) self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _get_pool_lun_provisioned_size.') if not self.check_value_valid(out, ['data', 'luns'], list): raise exception.MalformedResponse( cmd='_get_pool_lun_provisioned_size', reason=_('no data found')) size = 0 for lun in out['data']['luns']: size += lun['size'] return int(math.ceil(float(size) / units.Gi)) def _get_lun_info(self, lun_name, additional=None): if not lun_name: err = _('Param [lun_name] is invalid.') raise exception.InvalidParameterValue(err=err) params = {'uuid': lun_name} if additional is not None: params['additional'] = additional try: out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'get', 1, **params) self.check_response(out, uuid=lun_name) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _get_lun_info. [%s]', lun_name) if not self.check_value_valid(out, ['data', 'lun'], object): raise exception.MalformedResponse(cmd='_get_lun_info', reason=_('lun info not found')) return out['data']['lun'] def _get_lun_uuid(self, lun_name): if not lun_name: err = _('Param [lun_name] is invalid.') raise exception.InvalidParameterValue(err=err) try: lun_info = self._get_lun_info(lun_name) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _get_lun_uuid. [%s]', lun_name) if not self.check_value_valid(lun_info, ['uuid'], str): raise exception.MalformedResponse(cmd='_get_lun_uuid', reason=_('uuid not found')) return lun_info['uuid'] def _get_lun_status(self, lun_name): if not lun_name: err = _('Param [lun_name] is invalid.') raise exception.InvalidParameterValue(err=err) try: lun_info = self._get_lun_info(lun_name, ['status', 'is_action_locked']) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _get_lun_status. [%s]', lun_name) if not self.check_value_valid(lun_info, ['status'], str): raise exception.MalformedResponse(cmd='_get_lun_status', reason=_('status not found')) if not self.check_value_valid(lun_info, ['is_action_locked'], bool): raise exception.MalformedResponse(cmd='_get_lun_status', reason=_('action_locked ' 'not found')) return lun_info['status'], lun_info['is_action_locked'] def _get_snapshot_info(self, snapshot_uuid, additional=None): if not snapshot_uuid: err = _('Param [snapshot_uuid] is invalid.') raise exception.InvalidParameterValue(err=err) params = {'snapshot_uuid': snapshot_uuid} if additional is not None: params['additional'] = additional try: out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'get_snapshot', 1, **params) self.check_response(out, snapshot_id=snapshot_uuid) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _get_snapshot_info. [%s]', snapshot_uuid) if not self.check_value_valid(out, ['data', 'snapshot'], object): raise exception.MalformedResponse(cmd='_get_snapshot_info', reason=_('snapshot info not ' 'found')) return out['data']['snapshot'] def _get_snapshot_status(self, snapshot_uuid): if not snapshot_uuid: err = _('Param [snapshot_uuid] is invalid.') raise exception.InvalidParameterValue(err=err) try: snapshot_info = self._get_snapshot_info(snapshot_uuid, ['status', 'is_action_locked']) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _get_snapshot_info. [%s]', snapshot_uuid) if not self.check_value_valid(snapshot_info, ['status'], str): raise exception.MalformedResponse(cmd='_get_snapshot_status', reason=_('status not found')) if not self.check_value_valid(snapshot_info, ['is_action_locked'], bool): raise exception.MalformedResponse(cmd='_get_snapshot_status', reason=_('action_locked ' 'not found')) return snapshot_info['status'], snapshot_info['is_action_locked'] def _get_metadata_value(self, obj, key): if key not in obj['metadata']: if isinstance(obj, volume.Volume): raise exception.VolumeMetadataNotFound( volume_id=obj['id'], metadata_key=key) elif isinstance(obj, snapshot.Snapshot): raise exception.SnapshotMetadataNotFound( snapshot_id=obj['id'], metadata_key=key) else: raise exception.MetadataAbsent() return obj['metadata'][key] def _get_backend_name(self): return self.config.safe_get('volume_backend_name') or 'Synology' def _target_create(self, identifier): if not identifier: err = _('Param [identifier] is invalid.') raise exception.InvalidParameterValue(err=err) # 0 for no auth, 1 for single chap, 2 for mutual chap auth_type = 0 chap_username = '' chap_password = '' provider_auth = '' if self.config.safe_get('use_chap_auth') and self.config.use_chap_auth: auth_type = 1 chap_username = (self.config.safe_get('chap_username') or volume_utils.generate_username(12)) chap_password = (self.config.safe_get('chap_password') or volume_utils.generate_password()) provider_auth = ' '.join(('CHAP', chap_username, chap_password)) trg_prefix = self.config.safe_get('target_prefix') trg_name = (self.TARGET_NAME_PREFIX + '%s') % identifier iqn = trg_prefix + trg_name try: out = self.exec_webapi('SYNO.Core.ISCSI.Target', 'create', 1, name=trg_name, iqn=iqn, auth_type=auth_type, user=chap_username, password=chap_password, max_sessions=0) self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _target_create. [%s]', identifier) if not self.check_value_valid(out, ['data', 'target_id']): msg = _('Failed to get target_id of target [%s]') % trg_name raise exception.VolumeDriverException(message=msg) trg_id = out['data']['target_id'] return iqn, trg_id, provider_auth def _target_delete(self, trg_id): if 0 > trg_id: err = _('trg_id is invalid: %d.') % trg_id raise exception.InvalidParameterValue(err=err) try: out = self.exec_webapi('SYNO.Core.ISCSI.Target', 'delete', 1, target_id=('%d' % trg_id)) self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _target_delete. [%d]', trg_id) # is_map True for map, False for ummap def _lun_map_unmap_target(self, volume_name, is_map, trg_id): if 0 > trg_id: err = _('trg_id is invalid: %d.') % trg_id raise exception.InvalidParameterValue(err=err) try: lun_uuid = self._get_lun_uuid(volume_name) out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'map_target' if is_map else 'unmap_target', 1, uuid=lun_uuid, target_ids=['%d' % trg_id]) self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _lun_map_unmap_target. ' '[%(action)s][%(vol)s].', {'action': ('map_target' if is_map else 'unmap_target'), 'vol': volume_name}) def _lun_map_target(self, volume_name, trg_id): self._lun_map_unmap_target(volume_name, True, trg_id) def _lun_unmap_target(self, volume_name, trg_id): self._lun_map_unmap_target(volume_name, False, trg_id) def _modify_lun_name(self, name, new_name): try: out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'set', 1, uuid=name, new_name=new_name) self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _modify_lun_name [%s].', name) def _check_lun_status_normal(self, volume_name): status = '' try: while True: status, locked = self._get_lun_status(volume_name) if not locked: break eventlet.sleep(2) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to get lun status. [%s]', volume_name) LOG.debug('Lun [%(vol)s], status [%(status)s].', {'vol': volume_name, 'status': status}) return status == 'normal' def _check_snapshot_status_healthy(self, snapshot_uuid): status = '' try: while True: status, locked = self._get_snapshot_status(snapshot_uuid) if not locked: break eventlet.sleep(2) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to get snapshot status. [%s]', snapshot_uuid) LOG.debug('Lun [%(snapshot)s], status [%(status)s].', {'snapshot': snapshot_uuid, 'status': status}) return status == 'Healthy' def _check_storage_response(self, out, **kwargs): data = 'internal error' exc = exception.VolumeBackendAPIException(data=data) message = 'Internal error' return (message, exc) def _check_iscsi_response(self, out, **kwargs): LUN_BAD_LUN_UUID = 18990505 LUN_NO_SUCH_SNAPSHOT = 18990532 if not self.check_value_valid(out, ['error', 'code'], int): raise exception.MalformedResponse(cmd='_check_iscsi_response', reason=_('no error code found')) code = out['error']['code'] exc = None message = '' if code == LUN_BAD_LUN_UUID: exc = SynoLUNNotExist(**kwargs) message = 'Bad LUN UUID' elif code == LUN_NO_SUCH_SNAPSHOT: exc = exception.SnapshotNotFound(**kwargs) message = 'No such snapshot' else: data = 'internal error' exc = exception.VolumeBackendAPIException(data=data) message = 'Internal error' message = '%s [%d]' % (message, code) return (message, exc) def _check_ds_pool_status(self): pool_info = self._get_pool_info() if not self.check_value_valid(pool_info, ['readonly'], bool): raise exception.MalformedResponse(cmd='_check_ds_pool_status', reason=_('no readonly found')) if pool_info['readonly']: message = (_('pool [%s] is not writable') % self.config.synology_pool_name) raise exception.VolumeDriverException(message=message) def _check_ds_version(self): try: out = self.exec_webapi('SYNO.Core.System', 'info', 1, type='firmware') self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _check_ds_version') if not self.check_value_valid(out, ['data', 'firmware_ver'], str): raise exception.MalformedResponse(cmd='_check_ds_version', reason=_('data not found')) firmware_version = out['data']['firmware_ver'] # e.g. 'DSM 6.1-7610', 'DSM 6.0.1-7321 update 3', 'DSM UC 1.0-6789' pattern = re.compile(r"^(.*) (\d+)\.(\d+)(?:\.(\d+))?-(\d+)" r"(?: [uU]pdate (\d+))?$") matches = pattern.match(firmware_version) if not matches: m = (_('DS version %s is not supported') % firmware_version) raise exception.VolumeDriverException(message=m) os_name = matches.group(1) major = int(matches.group(2)) minor = int(matches.group(3)) hotfix = int(matches.group(4)) if matches.group(4) else 0 if os_name == 'DSM UC': return elif (os_name == 'DSM' and ((6 > major) or (major == 6 and minor == 0 and hotfix < 2))): m = (_('DS version %s is not supported') % firmware_version) raise exception.VolumeDriverException(message=m) def _check_ds_ability(self): try: out = self.exec_webapi('SYNO.Core.System', 'info', 1, type='define') self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _check_ds_ability') if not self.check_value_valid(out, ['data'], dict): raise exception.MalformedResponse(cmd='_check_ds_ability', reason=_('data not found')) define = out['data'] if 'usbstation' in define and define['usbstation'] == 'yes': m = _('usbstation is not supported') raise exception.VolumeDriverException(message=m) if ('support_storage_mgr' not in define or define['support_storage_mgr'] != 'yes'): m = _('Storage Manager is not supported in DS') raise exception.VolumeDriverException(message=m) if ('support_iscsi_target' not in define or define['support_iscsi_target'] != 'yes'): m = _('iSCSI target feature is not supported in DS') raise exception.VolumeDriverException(message=m) if ('support_vaai' not in define or define['support_vaai'] != 'yes'): m = _('VAAI feature is not supported in DS') raise exception.VolumeDriverException(message=m) if ('supportsnapshot' not in define or define['supportsnapshot'] != 'yes'): m = _('Snapshot feature is not supported in DS') raise exception.VolumeDriverException(message=m) def check_response(self, out, **kwargs): if out['success']: return data = 'internal error' exc = exception.VolumeBackendAPIException(data=data) message = 'Internal error' api = out['api_info']['api'] if (api.startswith('SYNO.Core.ISCSI.')): message, exc = self._check_iscsi_response(out, **kwargs) elif (api.startswith('SYNO.Core.Storage.')): message, exc = self._check_storage_response(out, **kwargs) LOG.exception('%(message)s', {'message': message}) raise exc def exec_webapi(self, api, method, version, **kwargs): result = self.synoexec(api, method, version, **kwargs) if 'http_status' in result and 200 != result['http_status']: raise SynoAPIHTTPError(code=result['http_status']) result['api_info'] = {'api': api, 'method': method, 'version': version} return result def check_value_valid(self, obj, key_array, value_type=None): curr_obj = obj for key in key_array: if key not in curr_obj: LOG.error('key [%(key)s] is not in %(obj)s', {'key': key, 'obj': curr_obj}) return False curr_obj = curr_obj[key] if value_type and not isinstance(curr_obj, value_type): LOG.error('[%(obj)s] is %(type)s, not %(value_type)s', {'obj': curr_obj, 'type': type(curr_obj), 'value_type': value_type}) return False return True def get_ip(self): return self.config.target_ip_address def get_provider_location(self, iqn, trg_id): portals = ['%(ip)s:%(port)d' % {'ip': self.get_ip(), 'port': self.target_port}] sec_ips = self.config.safe_get('target_secondary_ip_addresses') for ip in sec_ips: portals.append('%(ip)s:%(port)d' % {'ip': ip, 'port': self.target_port}) return '%s,%d %s 0' % ( ';'.join(portals), trg_id, iqn) def is_lun_mapped(self, lun_name): if not lun_name: err = _('Param [lun_name] is invalid.') raise exception.InvalidParameterValue(err=err) try: lun_info = self._get_lun_info(lun_name, ['is_mapped']) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to _is_lun_mapped. [%s]', lun_name) if not self.check_value_valid(lun_info, ['is_mapped'], bool): raise exception.MalformedResponse(cmd='_is_lun_mapped', reason=_('is_mapped not found')) return lun_info['is_mapped'] def check_for_setup_error(self): self._check_ds_pool_status() self._check_ds_version() self._check_ds_ability() def update_volume_stats(self): """Update volume statistics. Three kinds of data are stored on the Synology backend pool: 1. Thin volumes (LUNs on the pool), 2. Thick volumes (LUNs on the pool), 3. Other user data. other_user_data_gb is the size of the 3rd one. lun_provisioned_gb is the summation of all thin/thick volume provisioned size. Only thin type is available for Cinder volumes. """ free_gb, total_gb, other_user_data_gb = self._get_pool_size() lun_provisioned_gb = self._get_pool_lun_provisioned_size() data = {} data['volume_backend_name'] = self.volume_backend_name data['vendor_name'] = self.vendor_name data['storage_protocol'] = self.config.target_protocol data['consistencygroup_support'] = False data['QoS_support'] = False data['thin_provisioning_support'] = True data['thick_provisioning_support'] = False data['reserved_percentage'] = self.config.reserved_percentage data['free_capacity_gb'] = free_gb data['total_capacity_gb'] = total_gb data['provisioned_capacity_gb'] = (lun_provisioned_gb + other_user_data_gb) data['max_over_subscription_ratio'] = (self.config. max_over_subscription_ratio) data['target_ip_address'] = self.config.target_ip_address data['pool_name'] = self.config.synology_pool_name data['backend_info'] = ('%s:%s:%s' % (self.vendor_name, self.driver_type, self.host_uuid)) return data def create_volume(self, volume): try: out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'create', 1, name=volume['name'], type=self.CINDER_LUN, location=('/' + self.config.synology_pool_name), size=volume['size'] * units.Gi) self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create_volume. [%s]', volume['name']) if not self._check_lun_status_normal(volume['name']): message = _('Lun [%s] status is not normal') % volume['name'] raise exception.VolumeDriverException(message=message) def delete_volume(self, volume): try: lun_uuid = self._get_lun_uuid(volume['name']) out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'delete', 1, uuid=lun_uuid) self.check_response(out) except SynoLUNNotExist: LOG.warning('LUN does not exist') except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to delete_volume. [%s]', volume['name']) def create_cloned_volume(self, volume, src_vref): try: src_lun_uuid = self._get_lun_uuid(src_vref['name']) out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'clone', 1, src_lun_uuid=src_lun_uuid, dst_lun_name=volume['name'], is_same_pool=True, clone_type='CINDER') self.check_response(out) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create_cloned_volume. [%s]', volume['name']) if not self._check_lun_status_normal(volume['name']): message = _('Lun [%s] status is not normal.') % volume['name'] raise exception.VolumeDriverException(message=message) if src_vref['size'] < volume['size']: self.extend_volume(volume, volume['size']) def extend_volume(self, volume, new_size): try: lun_uuid = self._get_lun_uuid(volume['name']) out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'set', 1, uuid=lun_uuid, new_size=new_size * units.Gi) self.check_response(out) except Exception as e: LOG.exception('Failed to extend_volume. [%s]', volume['name']) raise exception.ExtendVolumeError(reason=e.msg) def update_migrated_volume(self, volume, new_volume): try: self._modify_lun_name(new_volume['name'], volume['name']) except Exception: reason = _('Failed to _modify_lun_name [%s].') % new_volume['name'] raise exception.VolumeMigrationFailed(reason=reason) return {'_name_id': None} def create_snapshot(self, snapshot): desc = '(Cinder) ' + (snapshot['id'] or '') try: resp = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'take_snapshot', 1, src_lun_uuid=snapshot['volume']['name'], is_app_consistent=False, is_locked=False, taken_by='Cinder', description=desc) self.check_response(resp) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create_snapshot. [%s]', snapshot['volume']['name']) if not self.check_value_valid(resp, ['data', 'snapshot_uuid'], str): raise exception.MalformedResponse(cmd='create_snapshot', reason=_('uuid not found')) snapshot_uuid = resp['data']['snapshot_uuid'] if not self._check_snapshot_status_healthy(snapshot_uuid): message = (_('Volume [%(vol)s] snapshot [%(snapshot)s] status ' 'is not healthy.') % {'vol': snapshot['volume']['name'], 'snapshot': snapshot_uuid}) raise exception.VolumeDriverException(message=message) metadata = snapshot['metadata'] metadata.update({ self.METADATA_DS_SNAPSHOT_UUID: snapshot_uuid }) return {'metadata': metadata} def delete_snapshot(self, snapshot): try: ds_snapshot_uuid = (self._get_metadata_value (snapshot, self.METADATA_DS_SNAPSHOT_UUID)) out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'delete_snapshot', 1, snapshot_uuid=ds_snapshot_uuid, deleted_by='Cinder') self.check_response(out, snapshot_id=snapshot['id']) except (exception.SnapshotNotFound, exception.SnapshotMetadataNotFound): return except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to delete_snapshot. [%s]', snapshot['id']) def create_volume_from_snapshot(self, volume, snapshot): try: ds_snapshot_uuid = (self._get_metadata_value (snapshot, self.METADATA_DS_SNAPSHOT_UUID)) out = self.exec_webapi('SYNO.Core.ISCSI.LUN', 'clone_snapshot', 1, src_lun_uuid=snapshot['volume']['name'], snapshot_uuid=ds_snapshot_uuid, cloned_lun_name=volume['name'], clone_type='CINDER') self.check_response(out) except exception.SnapshotMetadataNotFound: with excutils.save_and_reraise_exception(): LOG.exception('Failed to get snapshot UUID. [%s]', snapshot['id']) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create_volume_from_snapshot. [%s]', snapshot['id']) if not self._check_lun_status_normal(volume['name']): message = (_('Volume [%(vol)s] snapshot [%(snapshot)s] status ' 'is not healthy.') % {'vol': snapshot['volume']['name'], 'snapshot': ds_snapshot_uuid}) raise exception.VolumeDriverException(message=message) if snapshot['volume_size'] < volume['size']: self.extend_volume(volume, volume['size']) def get_iqn_and_trgid(self, location): if not location: err = _('Param [location] is invalid.') raise exception.InvalidParameterValue(err=err) result = location.split(' ') if len(result) < 2: raise exception.InvalidInput(reason=location) data = result[0].split(',') if len(data) < 2: raise exception.InvalidInput(reason=location) iqn = result[1] trg_id = data[1] return iqn, int(trg_id, 10) def get_iscsi_properties(self, volume): if not volume['provider_location']: err = _("Param volume['provider_location'] is invalid.") raise exception.InvalidParameterValue(err=err) iqn, trg_id = self.get_iqn_and_trgid(volume['provider_location']) iscsi_properties = { 'target_discovered': False, 'target_iqn': iqn, 'target_portal': '%(ip)s:%(port)d' % {'ip': self.get_ip(), 'port': self.target_port}, 'volume_id': volume['id'], 'access_mode': 'rw', 'discard': False } ips = self.config.safe_get('target_secondary_ip_addresses') if ips: target_portals = [iscsi_properties['target_portal']] for ip in ips: target_portals.append('%(ip)s:%(port)d' % {'ip': ip, 'port': self.target_port}) iscsi_properties.update(target_portals=target_portals) count = len(target_portals) iscsi_properties.update(target_iqns=[ iscsi_properties['target_iqn'] ] * count) iscsi_properties.update(target_lun=0) iscsi_properties.update(target_luns=[ iscsi_properties['target_lun'] ] * count) if 'provider_auth' in volume: auth = volume['provider_auth'] if auth: try: (auth_method, auth_username, auth_password) = auth.split() iscsi_properties['auth_method'] = auth_method iscsi_properties['auth_username'] = auth_username iscsi_properties['auth_password'] = auth_password except Exception: LOG.error('Invalid provider_auth: %s', auth) return iscsi_properties def create_iscsi_export(self, volume_name, identifier): iqn, trg_id, provider_auth = self._target_create(identifier) self._lun_map_target(volume_name, trg_id) return iqn, trg_id, provider_auth def remove_iscsi_export(self, volume_name, trg_id): self._lun_unmap_target(volume_name, trg_id) self._target_delete(trg_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/synology/synology_iscsi.py0000664000175000017500000001374600000000000024645 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Synology Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder import interface from cinder.volume import driver from cinder.volume.drivers.synology import synology_common as common LOG = logging.getLogger(__name__) @interface.volumedriver class SynoISCSIDriver(driver.ISCSIDriver): """OpenStack Cinder drivers for Synology storage. .. code-block:: none Version history: 1.0.0 - Initial driver. Provide Cinder minimum features 1.0.1 - Add support for UC series model """ # ThirdPartySystems wiki page CI_WIKI_NAME = 'Synology_DSM_CI' VERSION = '1.0.1' def __init__(self, *args, **kwargs): super(SynoISCSIDriver, self).__init__(*args, **kwargs) self.common = None self.configuration.append_config_values(common.cinder_opts) self.stats = {} @classmethod def get_driver_options(cls): additional_opts = cls._get_oslo_driver_opts( 'target_ip_address', 'target_protocol', 'target_port', 'driver_use_ssl', 'use_chap_auth', 'chap_username', 'chap_password', 'target_secondary_ip_addresses', 'target_prefix', 'reserved_percentage', 'max_over_subscription_ratio') return common.cinder_opts + additional_opts def do_setup(self, context): self.common = common.SynoCommon(self.configuration, 'iscsi') def check_for_setup_error(self): self.common.check_for_setup_error() def create_volume(self, volume): """Creates a logical volume.""" self.common.create_volume(volume) def delete_volume(self, volume): """Deletes a logical volume.""" self.common.delete_volume(volume) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" self.common.create_cloned_volume(volume, src_vref) def extend_volume(self, volume, new_size): """Extend an existing volume's size.""" if volume['size'] >= new_size: LOG.error('New size is smaller than original size. ' 'New: [%(new)d] Old: [%(old)d]', {'new': new_size, 'old': volume['size']}) return self.common.extend_volume(volume, new_size) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" self.common.create_volume_from_snapshot(volume, snapshot) def update_migrated_volume(self, ctxt, volume, new_volume, status): """Return model update for migrated volume.""" return self.common.update_migrated_volume(volume, new_volume) def create_snapshot(self, snapshot): """Creates a snapshot.""" return self.common.create_snapshot(snapshot) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" self.common.delete_snapshot(snapshot) def get_volume_stats(self, refresh=False): """Get volume status. If 'refresh' is True, run update the stats first. """ try: if refresh or not self.stats: self.stats = self.common.update_volume_stats() self.stats['driver_version'] = self.VERSION except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to get_volume_stats.') return self.stats def ensure_export(self, context, volume): pass def create_export(self, context, volume, connector): model_update = {} try: if self.common.is_lun_mapped(volume['name']): return model_update iqn, trg_id, provider_auth = (self.common.create_iscsi_export (volume['name'], volume['id'])) except Exception as e: LOG.exception('Failed to remove_export.') raise exception.ExportFailure(reason=e) model_update['provider_location'] = (self.common.get_provider_location (iqn, trg_id)) model_update['provider_auth'] = provider_auth return model_update def remove_export(self, context, volume): try: if not self.common.is_lun_mapped(volume['name']): return except common.SynoLUNNotExist: LOG.warning("Volume not exist") return try: _, trg_id = (self.common.get_iqn_and_trgid (volume['provider_location'])) self.common.remove_iscsi_export(volume['name'], trg_id) except Exception as e: LOG.exception('Failed to remove_export.') raise exception.RemoveExportException(volume=volume, reason=e.msg) def initialize_connection(self, volume, connector): LOG.debug('iSCSI initiator: %s', connector['initiator']) try: iscsi_properties = self.common.get_iscsi_properties(volume) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to initialize_connection.') volume_type = self.configuration.safe_get('target_protocol') or 'iscsi' return { 'driver_volume_type': volume_type, 'data': iscsi_properties } def terminate_connection(self, volume, connector, **kwargs): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.3991213 cinder-27.0.0/cinder/volume/drivers/toyou/0000775000175000017500000000000000000000000020477 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/toyou/__init__.py0000664000175000017500000000000000000000000022576 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4031212 cinder-27.0.0/cinder/volume/drivers/toyou/acs5000/0000775000175000017500000000000000000000000021552 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/toyou/acs5000/__init__.py0000664000175000017500000000000000000000000023651 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/toyou/acs5000/acs5000_common.py0000664000175000017500000012107500000000000024555 0ustar00zuulzuul00000000000000# Copyright 2020 toyou Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ acs5000 san for common driver It will be called by iSCSI driver """ import json import math import random from eventlet import greenthread from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units import paramiko from cinder.common import constants from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import ssh_utils from cinder import utils as cinder_utils from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume import volume_utils VOLUME_PREFIX = 'cinder-' LOG = logging.getLogger(__name__) acs5000c_opts = [ cfg.ListOpt( 'acs5000_volpool_name', default=['pool01'], help='Comma separated list of storage system storage ' 'pools for volumes.'), cfg.IntOpt( 'acs5000_copy_interval', default=5, min=3, max=100, help='When volume copy task is going on,refresh volume ' 'status interval'), cfg.BoolOpt( 'acs5000_multiattach', default=False, help='Enable to allow volumes attaching to multiple ' 'hosts with no limit.'), ] CONF = cfg.CONF CONF.register_opts(acs5000c_opts) class Command(object): def __init__(self, run_ssh): self._ssh = run_ssh def _run_ssh(self, ssh_cmd): try: return self._ssh(ssh_cmd) except processutils.ProcessExecutionError as e: msg = (_('CLI Exception output:\n command: %(cmd)s\n ' 'stdout: %(out)s\n stderr: %(err)s.') % {'cmd': ssh_cmd, 'out': e.stdout, 'err': e.stderr}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def run_ssh_info(self, ssh_cmd, key=False): """Run an SSH command and return parsed output.""" ssh_cmd.insert(0, 'cinder') out, err = self._run_ssh(ssh_cmd) if len(err): msg = (_('Execute command %(cmd)s failed, ' 'out: %(out)s, err: %(err)s.') % {'cmd': ' '.join(ssh_cmd), 'out': str(out), 'err': str(err)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) try: info = json.loads(out) except json.JSONDecodeError as e: msg = (_('Parse response error from CLI command %(cmd)s, ' 'out: %(out)s, err: %(err)s') % {'cmd': ' '.join(ssh_cmd), 'out': str(out), 'err': e}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) if not isinstance(info, dict): msg = (_('Unexpected format from CLI command %(cmd)s, ' 'result: %(info)s.') % {'cmd': ' '.join(ssh_cmd), 'info': str(info)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) require = ('key', 'msg', 'arr') require_valid = True for r in require: if r not in info.keys(): require_valid = False break if not require_valid: msg = (_('Unexpected response from CLI command %(cmd)s, ' 'require \'key\' \'msg\' \'arr\'. out: %(info)s.') % {'cmd': ' '.join(ssh_cmd), 'info': str(info)}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) elif int(info['key']) != 0: msg = (_('Unexpected error output from CLI command %(cmd)s, ' 'key: %(key)s, msg: %(msg)s.') % {'cmd': ' '.join(ssh_cmd), 'msg': info['msg'], 'key': info['key']}) LOG.error(msg) if not key: raise exception.VolumeBackendAPIException(data=msg) if key: info['key'] = int(info['key']) return info else: return info['arr'] def get_system(self): ssh_cmd = ['get_system'] return self.run_ssh_info(ssh_cmd) def ls_iscsi(self): ssh_cmd = ['ls_iscsi'] ports = self.run_ssh_info(ssh_cmd) up_ports = [] for port in ports: if 'link' in port and port['link'] != 'Down': up_ports.append(up_ports) return up_ports def ls_fc(self): ssh_cmd = ['ls_fc'] ports = self.run_ssh_info(ssh_cmd) up_ports = [] for port in ports: if 'link' in port and port['link'] == 'Up': up_ports.append(port) return up_ports def get_pool(self, pool): ssh_cmd = ['get_pool', '--pool', pool] return self.run_ssh_info(ssh_cmd) def get_volume(self, volume): ssh_cmd = ['get_volume'] if not volume: return [] elif isinstance(volume, str): ssh_cmd.append('--volume') ssh_cmd.append(volume) elif isinstance(volume, list): for vol in volume: ssh_cmd.append('--volume') ssh_cmd.append(vol) result = self.run_ssh_info(ssh_cmd) if not result: return [] else: return result def ls_controller(self): ssh_cmd = ['ls_controller'] ctrs = self.run_ssh_info(ssh_cmd) nodes = {} for node_data in ctrs: nodes[node_data['id']] = { 'id': int(node_data['id']), 'name': node_data['name'], 'status': node_data['status'] } return nodes def create_volume(self, name, size, pool_name, type='0'): ssh_cmd = ['create_volume', '--size', size, '--volume', name, '--pool', pool_name, '--type', type] return self.run_ssh_info(ssh_cmd, key=True) def delete_volume(self, volume): ssh_cmd = ['delete_volume', '--volume', volume] return self.run_ssh_info(ssh_cmd) def extend_volume(self, volume, size): ssh_cmd = ['extend_volume', '--volume', volume, '--size', str(size)] return self.run_ssh_info(ssh_cmd, key=True) def create_clone(self, volume_name, clone_name): ssh_cmd = ['create_clone', '--volume', volume_name, '--clone', clone_name] return self.run_ssh_info(ssh_cmd, key=True) def start_clone(self, volume_name, snapshot=''): ssh_cmd = ['start_clone', '--volume', volume_name, '--snapshot', snapshot] return self.run_ssh_info(ssh_cmd, key=True) def delete_clone(self, volume_name, snapshot=''): ssh_cmd = ['delete_clone', '--volume', volume_name, '--snapshot', snapshot] return self.run_ssh_info(ssh_cmd, key=True) def create_lun_map(self, volume_name, protocol, host): """Map volume to host.""" LOG.debug('enter: create_lun_map volume %s.', volume_name) ssh_cmd = ['create_lun_map', '--volume', volume_name, '--protocol', protocol] if isinstance(host, list): for ht in host: ssh_cmd.append('--host') ssh_cmd.append(ht) else: ssh_cmd.append('--host') ssh_cmd.append(str(host)) return self.run_ssh_info(ssh_cmd, key=True) def delete_lun_map(self, volume_name, protocol, host): ssh_cmd = ['delete_lun_map', '--volume', volume_name, '--protocol', protocol] if isinstance(host, list): for ht in host: ssh_cmd.append('--host') ssh_cmd.append(ht) else: ssh_cmd.append('--host') ssh_cmd.append(str(host)) return self.run_ssh_info(ssh_cmd, key=True) def create_snapshot(self, volume_name, snapshot_name): ssh_cmd = ['create_snapshot', '--volume', volume_name, '--snapshot', snapshot_name] return self.run_ssh_info(ssh_cmd, key=True) def delete_snapshot(self, volume_name, snapshot_name): ssh_cmd = ['delete_snapshot', '--volume', volume_name, '--snapshot', snapshot_name] return self.run_ssh_info(ssh_cmd, key=True) def rollback_snapshot(self, snapshot_name, volume_name=''): ssh_cmd = ['rollback_snapshot', '--snapshot', snapshot_name, '--volume', volume_name] return self.run_ssh_info(ssh_cmd, key=True) def set_volume_property(self, name, setting): ssh_cmd = ['set_volume', '--volume', name] for key, value in setting.items(): ssh_cmd.extend(['--' + key, value]) return self.run_ssh_info(ssh_cmd, key=True) class Acs5000CommonDriver(san.SanDriver, driver.MigrateVD, driver.CloneableImageVD): """TOYOU ACS5000 storage abstract common class. .. code-block:: none Version history: 1.0.0 - Initial driver """ VENDOR = 'TOYOU' VERSION = '1.0.0' def __init__(self, *args, **kwargs): super(Acs5000CommonDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(acs5000c_opts) self._backend_name = self.configuration.safe_get('volume_backend_name') self.pools = self.configuration.acs5000_volpool_name self._cmd = Command(self._run_ssh) self.protocol = None self._state = {'controller': {}, 'enabled_protocols': set(), 'system_name': None, 'system_id': None, 'code_level': None, 'version': None} @staticmethod def get_driver_options(): additional_opts = driver.BaseVD._get_oslo_driver_opts( 'san_ip', 'san_ssh_port', 'san_login', 'san_password', 'ssh_conn_timeout', 'ssh_min_pool_conn', 'ssh_max_pool_conn') return acs5000c_opts + additional_opts @volume_utils.trace_method def do_setup(self, ctxt): """Check that we have all configuration details from the storage.""" self._validate_pools_exist() self._state.update(self._cmd.get_system()) self._state['controller'] = self._cmd.ls_controller() if self.protocol == constants.FC: ports = self._cmd.ls_fc() else: ports = self._cmd.ls_iscsi() if len(ports) > 0: self._state['enabled_protocols'].add(self.protocol) def _validate_pools_exist(self): LOG.debug('_validate_pools_exist. ' 'pools: %s', ' '.join(self.pools)) for pool in self.pools: pool_data = self._cmd.get_pool(pool) if not pool_data: msg = _('Failed getting details for pool %s.') % pool raise exception.InvalidInput(reason=msg) return True @staticmethod def _convert_name(name): if len(name) >= 12: suffix = name[-12:] elif len(name) > 0: suffix = str(name).zfill(12) else: suffix = str(random.randint(0, 999999)).zfill(12) return VOLUME_PREFIX + suffix @staticmethod def _check_multi_attached(volume, connector): # In the case of multi-attach, these VMs belong to the same host. # The mapping action only happens once. # If the only mapping relationship is cancelled, # volume on other VMs cannot be read or written. if not connector or 'uuid' not in connector: return 0 attached_count = 0 uuid = connector['uuid'] for ref in volume.volume_attachment: ref_connector = {} if 'connector' in ref and ref.connector: # ref.connector may be None ref_connector = ref.connector if 'uuid' in ref_connector and uuid == ref_connector['uuid']: attached_count += 1 return attached_count @volume_utils.trace_method def check_for_setup_error(self): """Ensure that the params are set properly.""" if self._state['system_name'] is None: exception_msg = _('Unable to determine system name.') raise exception.VolumeBackendAPIException(data=exception_msg) if self._state['system_id'] is None: exception_msg = _('Unable to determine system id.') raise exception.VolumeBackendAPIException(data=exception_msg) if len(self._state['controller']) != 2: msg = _('do_setup: The dual controller status is incorrect.') LOG.error(msg) raise exception.VolumeDriverException(message=msg) if self.protocol not in self._state['enabled_protocols']: raise exception.InvalidInput( reason=(_('The storage device does not support %(prot)s. ' 'Please configure the device to support %(prot)s ' 'or switch to a driver using a different ' 'protocol.') % {'prot': self.protocol})) required = ['san_ip', 'san_ssh_port', 'san_login', 'acs5000_volpool_name'] for param in required: if not self.configuration.safe_get(param): raise exception.InvalidInput( reason=_('%s is not set.') % param) if not self.configuration.san_password: raise exception.InvalidInput( reason='Password is required for authentication') return def _run_ssh(self, cmd_list, check_exit_code=True): cinder_utils.check_ssh_injection(cmd_list) command = ' '.join(cmd_list) if not self.sshpool: try: self.sshpool = self._set_up_sshpool(self.configuration.san_ip) except paramiko.SSHException as e: raise exception.VolumeDriverException(message=e) ssh_execute = self._ssh_execute( self.sshpool, command, check_exit_code) return ssh_execute def _set_up_sshpool(self, ip): port = self.configuration.get('san_ssh_port', 22) login = self.configuration.get('san_login') password = self.configuration.get('san_password') timeout = self.configuration.get('ssh_conn_timeout', 30) min_size = self.configuration.get('ssh_min_pool_conn', 1) max_size = self.configuration.get('ssh_max_pool_conn', 5) sshpool = ssh_utils.SSHPool(ip, port, timeout, login, password=password, min_size=min_size, max_size=max_size) return sshpool def _ssh_execute( self, sshpool, command, check_exit_code=True): # noinspection PyBroadException try: with sshpool.item() as ssh: try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.error('Error has occurred: %s', e) raise processutils.ProcessExecutionError( exit_code=e.exit_code, stdout=e.stdout, stderr=e.stderr, cmd=e.cmd) except Exception: with excutils.save_and_reraise_exception(): LOG.error('Error running SSH command: %s', command) def create_volume(self, volume): LOG.debug('create_volume, volume %s.', volume['id']) volume_name = self._convert_name(volume.name) pool_name = volume_utils.extract_host(volume['host'], 'pool') ret = self._cmd.create_volume( volume_name, str(volume['size']), pool_name) if ret['key'] == 310: msg = _('Volume: %s with same name ' 'already exists on the system.') % volume_name raise exception.VolumeBackendAPIException(data=msg) elif ret['key'] == 102: allow_size = 0 for p in self._stats['pools']: if p['pool_name'] == pool_name: allow_size = p['free_capacity_gb'] break raise exception.VolumeSizeExceedsLimit(size=int(volume['size']), limit=allow_size) elif ret['key'] == 307: raise exception.VolumeLimitExceeded(allowed=96, name=volume_name) elif ret['key'] == 308: raise exception.VolumeLimitExceeded(allowed=4096, name=volume_name) elif ret['key'] != 0: msg = (_('Failed to create_volume %(vol)s on pool %(pool)s, ' 'code=%(ret)s, error=%(msg)s.') % {'vol': volume_name, 'pool': pool_name, 'ret': ret['key'], 'msg': ret['msg']}) raise exception.VolumeBackendAPIException(data=msg) return None def delete_volume(self, volume): volume_name = self._convert_name(volume.name) self._cmd.delete_volume(volume_name) def create_snapshot(self, snapshot): volume_name = self._convert_name(snapshot.volume_name) snapshot_name = self._convert_name(snapshot.name) ret = self._cmd.create_snapshot(volume_name, snapshot_name) if ret['key'] == 303: raise exception.VolumeNotFound(volume_id=volume_name) elif ret['key'] == 503: raise exception.SnapshotLimitExceeded(allowed=4096) elif ret['key'] == 504: raise exception.SnapshotLimitExceeded(allowed=64) elif ret['key'] != 0: msg = (_('Failed to create_snapshot %(snap)s on volume %(vol)s ' 'code=%(ret)s, error=%(msg)s.') % {'vol': volume_name, 'snap': snapshot_name, 'ret': ret['key'], 'msg': ret['msg']}) raise exception.VolumeBackendAPIException(data=msg) def delete_snapshot(self, snapshot): volume_name = self._convert_name(snapshot.volume_name) snapshot_name = self._convert_name(snapshot.name) ret = self._cmd.delete_snapshot(volume_name, snapshot_name) if ret['key'] == 505: raise exception.SnapshotNotFound(snapshot_id=snapshot['id']) elif ret['key'] != 0: msg = (_('Failed to delete_snapshot %(snap)s on volume %(vol)s ' 'code=%(ret)s, error=%(msg)s.') % {'vol': volume_name, 'snap': snapshot_name, 'ret': ret['key'], 'msg': ret['msg']}) raise exception.VolumeBackendAPIException(data=msg) def create_volume_from_snapshot(self, volume, snapshot): snapshot_name = self._convert_name(snapshot.name) volume_name = self._convert_name(volume.name) source_volume = self._convert_name(snapshot.volume_name) pool = volume_utils.extract_host(volume['host'], 'pool') self._cmd.create_volume(volume_name, str(volume['size']), pool, '10') self._local_clone_copy(source_volume, volume_name, 'create_volume_from_snapshot', snapshot_name) def snapshot_revert_use_temp_snapshot(self): return False @volume_utils.trace def revert_to_snapshot(self, context, volume, snapshot): volume_name = self._convert_name(volume.name) snapshot_name = self._convert_name(snapshot.name) ret = self._cmd.rollback_snapshot(snapshot_name, volume_name) if ret['key'] == 303: raise exception.VolumeNotFound(volume_id=volume_name) elif ret['key'] == 505: raise exception.SnapshotNotFound(snapshot_id=snapshot_name) elif ret['key'] == 506: msg = (_('Snapshot %s is not the latest one.') % snapshot_name) raise exception.InvalidSnapshot(reason=msg) elif ret['key'] != 0: msg = (_('Failed to revert volume %(vol)s to snapshot %(snap)s, ' 'code=%(ret)s, error=%(msg)s.') % {'vol': volume_name, 'snap': snapshot_name, 'ret': ret['key'], 'msg': ret['msg']}) raise exception.VolumeBackendAPIException(data=msg) def create_cloned_volume(self, tgt_volume, src_volume): clone_name = self._convert_name(tgt_volume.name) volume_name = self._convert_name(src_volume.name) tgt_pool = volume_utils.extract_host(tgt_volume['host'], 'pool') try: self._cmd.create_volume(clone_name, str( tgt_volume['size']), tgt_pool, '10') self._local_clone_copy( volume_name, clone_name, 'create_cloned_volume') except exception.VolumeBackendAPIException: self._cmd.delete_volume(clone_name) raise exception.VolumeBackendAPIException( data='create_cloned_volume failed.') def extend_volume(self, volume, new_size): volume_name = self._convert_name(volume.name) ret = self._cmd.extend_volume(volume_name, int(new_size)) if ret['key'] == 303: raise exception.VolumeNotFound(volume_id=volume_name) elif ret['key'] == 321: msg = _('Volume capacity shall not be ' 'less than the current size %sG.') % volume['size'] raise exception.VolumeBackendAPIException(data=msg) elif ret['key'] == 102: pool_name = volume_utils.extract_host(volume['host'], 'pool') allow_size = 0 for p in self._stats['pools']: if p['pool_name'] == pool_name: allow_size = p['free_capacity_gb'] break raise exception.VolumeSizeExceedsLimit(size=int(new_size), limit=allow_size) elif ret['key'] != 0: msg = (_('Failed to extend_volume %(vol)s to size %(size)s, ' 'code=%(ret)s, error=%(msg)s.') % {'vol': volume_name, 'size': new_size, 'ret': ret['key'], 'msg': ret['msg']}) raise exception.VolumeBackendAPIException(data=msg) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): """Only for host copy.""" existing_name = self._convert_name(new_volume.name) wanted_name = self._convert_name(volume.name) LOG.debug('enter: update_migrated_volume: rename of %(new)s ' 'to original name %(wanted)s.', {'new': existing_name, 'wanted': wanted_name}) is_existed = self._cmd.get_volume(wanted_name) if len(is_existed) == 1: LOG.warning('volume name %(wanted)s is existed, The two volumes ' '%(wanted)s and %(new)s may be on the same system.', {'new': existing_name, 'wanted': wanted_name}) return {'_name_id': new_volume['_name_id'] or new_volume['id']} else: self._cmd.set_volume_property(existing_name, {'new_name': wanted_name}) return {'_name_id': None} def migrate_volume(self, ctxt, volume, host): LOG.debug('enter: migrate_volume id %(id)s, host %(host)s', {'id': volume['id'], 'host': host['host']}) pool = volume_utils.extract_host(volume['host'], 'pool') if 'system_id' not in host['capabilities']: LOG.error('Target host has no system_id') return (False, None) if host['capabilities']['system_id'] != self._state['system_id']: LOG.info('The target host does not belong to the same ' 'storage system as the current volume') return (False, None) if host['capabilities']['pool_name'] == pool: LOG.info('The target host belongs to the same storage system ' 'and pool as the current volume.') return (True, None) LOG.info('The target host belongs to the same storage system ' 'as the current but to a different pool. ' 'The same storage system will clone volume into the new pool') volume_name = self._convert_name(volume.name) tmp_name = VOLUME_PREFIX + 'tmp' tmp_name += str(random.randint(0, 999999)).zfill(8) self._cmd.create_volume(tmp_name, str(volume['size']), host['capabilities']['pool_name'], '10') self._local_clone_copy( volume_name, tmp_name, 'migrate_volume') self._cmd.delete_volume(volume_name) self._cmd.set_volume_property(tmp_name, {'type': '"RAID Volume"', 'new_name': volume_name}) return (True, None) def _manage_get_volume(self, ref, pool_name=None): if 'source-name' in ref: manage_source = ref['source-name'] volumes = self._cmd.get_volume(manage_source) else: reason = _('Reference must contain source-name element ' 'and only support source-name.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) if not volumes: reason = (_('No volume by ref %s.') % manage_source) raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) volume = volumes[0] if pool_name and pool_name != volume['poolname']: reason = (_('Volume %(volume)s does not belong to pool name ' '%(pool)s.') % {'volume': manage_source, 'pool': pool_name}) raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) return volume @volume_utils.trace_method def manage_existing(self, volume, ref): """Manages an existing volume.""" volume_name = ref.get('source-name') if not volume_name: reason = _('Reference must contain source-name element ' 'and only support source-name.') raise exception.ManageExistingInvalidReference(existing_ref=ref, reason=reason) new_name = self._convert_name(volume.name) self._cmd.set_volume_property(volume_name, {'type': '2', 'new_name': new_name}) @volume_utils.trace_method def manage_existing_get_size(self, volume, ref): """Return size of an existing volume for manage_existing.""" pool_name = volume_utils.extract_host(volume['host'], 'pool') vol_backend = self._manage_get_volume(ref, pool_name) size = int(vol_backend.get('size_mb', 0)) size_gb = int(math.ceil(size / 1024)) if (size_gb * 1024) > size: LOG.warning('Volume %(vol)s capacity is %(mb)s MB, ' 'extend to %(gb)s GB.', {'vol': ref['source-name'], 'mb': size, 'gb': size_gb}) self._cmd.extend_volume(ref['source-name'], size_gb) return size_gb def get_volume_stats(self, refresh=False): """Get volume stats. If we haven't gotten stats yet or 'refresh' is True, run update the stats first. """ if not self._stats or refresh: self._update_volume_stats() return self._stats def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug('Updating volume stats, ' 'pools: \'%(host)s#%(pool)s\'.', {'host': self.host, 'pool': ','.join(self.pools)}) data = {} data['vendor_name'] = self.VENDOR data['driver_version'] = self.VERSION data['storage_protocol'] = self.protocol backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = (backend_name or self._state['system_name']) data['pools'] = [self._build_pool_stats(pool) for pool in self.pools] self._stats = data def _build_pool_stats(self, pool): """Build pool status""" pool_stats = {} try: pool_data = self._cmd.get_pool(pool) if pool_data: total_capacity_gb = float( pool_data['capacity']) / units.Gi free_capacity_gb = float( pool_data['free_capacity']) / units.Gi total_volumes = None if 'total_volumes' in pool_data.keys(): total_volumes = int(pool_data['total_volumes']) thin_provisioning = False if 'thin' in pool_data and pool_data['thin'] == 'Enabled': thin_provisioning = True pool_stats = { 'pool_name': pool_data['name'], 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'compression_support': True, 'reserved_percentage': self.configuration.reserved_percentage, 'QoS_support': False, 'consistencygroup_support': False, 'multiattach': self.configuration.acs5000_multiattach, 'thin_provisioning_support': thin_provisioning, 'total_volumes': total_volumes, 'system_id': self._state['system_id']} else: msg = _('Backend storage pool "%s" not found.') % pool LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) except exception.VolumeBackendAPIException: msg = _('Failed getting details for pool %s.') % pool raise exception.VolumeBackendAPIException(data=msg) return pool_stats def _local_clone_copy(self, volume, clone, action=None, snapshot=''): LOG.debug('enter: copy volume %(vol)s to %(clone)s by %(action)s.', {'vol': volume, 'clone': clone, 'action': action}) if self._wait_volume_copy(volume, clone, action, 'wait'): LOG.info('start copy task.') ret = self._cmd.create_clone(volume, clone) if ret['key'] != 0: self._cmd.delete_volume(clone) if ret['key'] == 306: raise exception.VolumeBackendAPIException( data='The source volume must not be larger ' 'than the target volume in a clone relation. ') elif ret['key'] == 0: ret = self._cmd.start_clone(volume, snapshot) if ret['key'] == 505: raise exception.SnapshotNotFound(snapshot_id=snapshot) else: LOG.error('%(action)s failed.', {'action': action}) raise exception.VolumeBackendAPIException(data='clone failed!') if self._wait_volume_copy(volume, clone, action, 'copy'): self._cmd.delete_clone(volume, snapshot) LOG.info('%s successfully.', action) else: LOG.error('%(action)s failed.', {'action': action}) raise exception.VolumeBackendAPIException(data='clone failed!') LOG.debug('leave: copy volume %(vol)s to %(clone)s by %(action)s. ', {'vol': volume, 'clone': clone, 'action': action}) @coordination.synchronized('acs5000-copy-{volume}-task') def _wait_volume_copy(self, volume, clone, function=None, action=None): LOG.debug('_wait_volume_copy, volume %s.', volume) if volume is None or clone is None: LOG.error('volume parameter error.') return False ret = False while_exit = False rescan = 0 interval = self.configuration.acs5000_copy_interval wait_status = ( 'Initiating', 'Rebuilding', 'Erasing', 'Delayed rebuilding') # All status # {"Offline", "Online", "Initiating", # ###"Rebuilding", "Migrating", "Parity chking", # ###"Cloning", "Rolling back", "Parity chking", # ###"Replicating", "Erasing", "Moving", "Replacing", # "Reclaiming", "Delayed rebuilding", "Relocation", "N/A"}; # All health # {"Optimal", "Degraded", "Deleted", "Missing", "Failed", # "Partially optimal", "N/A"} while True: rescan += 1 volume_info = self._cmd.get_volume([volume, clone]) if len(volume_info) == 2: for vol in volume_info: if vol['type'] == 'BACKUP': if vol['health'] == 'Optimal' and ( vol['status'] in wait_status): LOG.info('%(function)s %(action)s task: ' 'rescan %(scan)s times, clone %(clone)s ' 'need wait,status is %(status)s, ' 'health is %(health)s, ' 'process is %(process)s%%. ', {'function': function, 'action': action, 'scan': rescan, 'clone': vol['name'], 'status': vol['status'], 'health': vol['health'], 'process': vol['r']}) elif vol['status'] == 'Cloning': LOG.info('%(function)s %(action)s task: ' 'rescan %(scan)s times,volume %(volume)s ' 'copy process %(process)s%%. ', {'function': function, 'action': action, 'scan': rescan, 'volume': vol['name'], 'process': vol['r']}) elif vol['status'] == 'Queued': LOG.info('%(function)s %(action)s task: ' 'rescan %(scan)s times, ' 'volume %(volume)s is in the queue. ', {'function': function, 'action': action, 'scan': rescan, 'volume': vol['name']}) elif (vol['type'] == 'RAID Volume' and vol['status'] == 'Online'): ret = True while_exit = True LOG.info('%(function)s %(action)s task: ' 'rescan %(scan)s times,volume %(volume)s ' 'copy task completed,status is Online. ', {'function': function, 'action': action, 'scan': rescan, 'volume': vol['name']}) elif (vol['health'] == 'Optimal' and (vol['status'] in wait_status)): LOG.info('%(function)s %(action)s task: ' 'rescan %(scan)s times,volume %(volume)s ' 'need wait, ' 'status is %(status)s,health is %(health)s, ' 'process is %(process)s%%. ', {'function': function, 'action': action, 'scan': rescan, 'volume': vol['name'], 'status': vol['status'], 'health': vol['health'], 'process': vol['r']}) else: LOG.info('%(function)s %(action)s task: ' 'rescan %(scan)s times,volume %(volume)s ' 'is not normal, ' 'status %(status)s,health is %(health)s. ', {'function': function, 'action': action, 'scan': rescan, 'volume': vol['name'], 'status': vol['status'], 'health': vol['health']}) while_exit = True break elif len(volume_info) == 1: while_exit = True if volume_info[0]['name'] == volume: LOG.info('%(function)s %(action)s task: ' 'rescan %(scan)s times,clone %(clone)s ' 'does not exist! ', {'function': function, 'action': action, 'scan': rescan, 'clone': clone}) else: LOG.info('%(function)s %(action)s task: ' 'rescan %(scan)s times,volume %(volume)s ' 'does not exist! ', {'function': function, 'action': action, 'scan': rescan, 'volume': volume}) else: while_exit = True LOG.info('%(function)s %(action)s task: ' 'rescan %(scan)s times,volume %(volume)s ' 'clone %(clone)s does not exist! ', {'function': function, 'action': action, 'scan': rescan, 'volume': volume, 'clone': clone}) if while_exit: break greenthread.sleep(interval) return ret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/toyou/acs5000/acs5000_fc.py0000664000175000017500000001516500000000000023657 0ustar00zuulzuul00000000000000# Copyright 2021 toyou Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ acs5000 FC driver """ from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume.drivers.toyou.acs5000 import acs5000_common from cinder.zonemanager import utils as zone_utils LOG = logging.getLogger(__name__) @interface.volumedriver class Acs5000FCDriver(acs5000_common.Acs5000CommonDriver): """TOYOU ACS5000 storage FC volume driver. .. code-block:: none Version history: 1.0.0 - Initial driver """ VENDOR = 'TOYOU' VERSION = '1.0.0' PROTOCOL = constants.FC # ThirdPartySystems wiki page CI_WIKI_NAME = 'TOYOU_ACS5000_CI' def __init__(self, *args, **kwargs): super(Acs5000FCDriver, self).__init__(*args, **kwargs) self.protocol = self.PROTOCOL @staticmethod def get_driver_options(): return acs5000_common.Acs5000CommonDriver.get_driver_options() def _get_connected_wwpns(self): fc_ports = self._cmd.ls_fc() connected_wwpns = [] for port in fc_ports: if 'wwpn' in port: connected_wwpns.append(port['wwpn']) elif 'WWPN' in port: connected_wwpns.append(port['WWPN']) return connected_wwpns def validate_connector(self, connector): """Check connector for at least one enabled FC protocol.""" if 'wwpns' not in connector: LOG.error('The connector does not ' 'contain the required information.') raise exception.InvalidConnectorException( missing='wwpns') @utils.synchronized('acs5000A-host', external=True) def initialize_connection(self, volume, connector): LOG.debug('enter: initialize_connection: volume ' '%(vol)s with connector %(conn)s', {'vol': volume.id, 'conn': connector}) volume_name = self._convert_name(volume.name) ret = self._cmd.create_lun_map(volume_name, self.protocol, connector['wwpns']) if ret['key'] == 0: if 'lun' in ret['arr']: lun_id = int(ret['arr']['lun']) else: msg = (_('_create_fc_lun: Lun id did not find ' 'when volume %s create lun map.') % volume['id']) raise exception.VolumeBackendAPIException(data=msg) target_wwpns = self._get_connected_wwpns() if len(target_wwpns) == 0: if self._check_multi_attached(volume, connector) < 1: self._cmd.delete_lun_map(volume_name, self.protocol, connector['wwpns']) msg = (_('_create_fc_lun: Did not find ' 'available fc wwpns when volume %s ' 'create lun map.') % volume['id']) raise exception.VolumeBackendAPIException(data=msg) initiator_target = {} for initiator_wwpn in connector['wwpns']: initiator_target[str(initiator_wwpn)] = target_wwpns properties = {'driver_volume_type': 'fibre_channel', 'data': {'target_wwn': target_wwpns, 'target_discovered': False, 'target_lun': lun_id, 'volume_id': volume['id']}} properties['data']['initiator_target_map'] = initiator_target elif ret['key'] == 303: raise exception.VolumeNotFound(volume_id=volume_name) else: msg = (_('failed to map the volume %(vol)s to ' 'connector %(conn)s.') % {'vol': volume['id'], 'conn': connector}) raise exception.VolumeBackendAPIException(data=msg) zone_utils.add_fc_zone(properties) LOG.debug('leave: initialize_connection: volume ' '%(vol)s with connector %(conn)s', {'vol': volume.id, 'conn': connector}) return properties @utils.synchronized('acs5000A-host', external=True) def terminate_connection(self, volume, connector, **kwargs): LOG.debug('enter: terminate_connection: volume ' '%(vol)s with connector %(conn)s', {'vol': volume.id, 'conn': connector}) volume_name = self._convert_name(volume.name) properties = {'driver_volume_type': 'fibre_channel', 'data': {}} initiator_wwpns = [] target_wwpns = [] if connector and 'wwpns' in connector: initiator_wwpns = connector['wwpns'] target_wwpns = self._get_connected_wwpns() if len(target_wwpns) == 0: target_wwpns = [] LOG.warning('terminate_connection: Did not find ' 'available fc wwpns when volume %s ' 'delete lun map.', volume.id) initiator_target = {} for i_wwpn in initiator_wwpns: initiator_target[str(i_wwpn)] = target_wwpns properties['data'] = {'initiator_target_map': initiator_target} if self._check_multi_attached(volume, connector) < 2: if not initiator_wwpns: # -1 means all lun maps of this volume initiator_wwpns = -1 self._cmd.delete_lun_map(volume_name, self.protocol, initiator_wwpns) else: LOG.warning('volume %s has been mapped to multi VMs, and these ' 'VMs belong to the same host. The mapping ' 'cancellation request is aborted.', volume.id) zone_utils.remove_fc_zone(properties) LOG.debug('leave: terminate_connection: volume ' '%(vol)s with connector %(conn)s', {'vol': volume.id, 'conn': connector}) return properties ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/toyou/acs5000/acs5000_iscsi.py0000664000175000017500000001175500000000000024402 0ustar00zuulzuul00000000000000# Copyright 2020 toyou Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ acs5000 iSCSI driver """ from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils from cinder.volume.drivers.toyou.acs5000 import acs5000_common LOG = logging.getLogger(__name__) @interface.volumedriver class Acs5000ISCSIDriver(acs5000_common.Acs5000CommonDriver): """TOYOU ACS5000 storage iSCSI volume driver. .. code-block:: none Version history: 1.0.0 - Initial driver """ VENDOR = 'TOYOU' VERSION = '1.0.0' PROTOCOL = constants.ISCSI # ThirdPartySystems wiki page CI_WIKI_NAME = 'TOYOU_ACS5000_CI' def __init__(self, *args, **kwargs): super(Acs5000ISCSIDriver, self).__init__(*args, **kwargs) self.protocol = self.PROTOCOL @staticmethod def get_driver_options(): return acs5000_common.Acs5000CommonDriver.get_driver_options() def validate_connector(self, connector): """Check connector for at least one enabled iSCSI protocol.""" if 'initiator' not in connector: LOG.error('The connector does not ' 'contain the required information.') raise exception.InvalidConnectorException( missing='initiator') @utils.synchronized('acs5000A-host', external=True) def initialize_connection(self, volume, connector): LOG.debug('enter: initialize_connection: volume ' '%(vol)s with connector %(conn)s', {'vol': volume.id, 'conn': connector}) volume_name = self._convert_name(volume.name) ret = self._cmd.create_lun_map(volume_name, self.protocol, connector['initiator']) if ret['key'] == 0: lun_required = ['iscsi_name', 'portal', 'lun'] lun_info = ret['arr'] for param in lun_required: if param not in lun_info: msg = (_('initialize_connection: Param %(param)s ' 'was not returned correctly when volume ' '%(vol)s mapping.') % {'param': param, 'vol': volume.id}) raise exception.VolumeBackendAPIException(data=msg) data = {'target_discovered': False, 'target_iqns': lun_info['iscsi_name'], 'target_portals': lun_info['portal'], 'target_luns': lun_info['lun'], 'volume_id': volume.id} LOG.debug('leave: initialize_connection: volume ' '%(vol)s with connector %(conn)s', {'vol': volume.id, 'conn': connector}) return {'driver_volume_type': 'iscsi', 'data': data} if ret['key'] == 303: raise exception.VolumeNotFound(volume_id=volume_name) elif ret['key'] == 402: raise exception.ISCSITargetAttachFailed(volume_id=volume_name) else: msg = (_('failed to map the volume %(vol)s to ' 'connector %(conn)s.') % {'vol': volume['id'], 'conn': connector}) raise exception.VolumeBackendAPIException(data=msg) @utils.synchronized('acs5000A-host', external=True) def terminate_connection(self, volume, connector, **kwargs): LOG.debug('enter: terminate_connection: volume ' '%(vol)s with connector %(conn)s', {'vol': volume.id, 'conn': connector}) name = self._convert_name(volume.name) # -1 means all lun maps initiator = '-1' if connector and connector['initiator']: initiator = connector['initiator'] if self._check_multi_attached(volume, connector) < 2: self._cmd.delete_lun_map(name, self.protocol, initiator) else: LOG.warning('volume %s has been mapped to multi VMs, and these ' 'VMs belong to the same host. The mapping ' 'cancellation request is aborted.', volume.id) LOG.debug('leave: terminate_connection: volume ' '%(vol)s with connector %(conn)s', {'vol': volume.id, 'conn': connector}) return {'driver_volume_type': 'iscsi', 'data': {}} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4031212 cinder-27.0.0/cinder/volume/drivers/toyou/tyds/0000775000175000017500000000000000000000000021462 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/toyou/tyds/__init__.py0000664000175000017500000000000000000000000023561 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/toyou/tyds/tyds.py0000664000175000017500000007104000000000000023021 0ustar00zuulzuul00000000000000# Copyright 2023 toyou Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cinder driver for Toyou distributed storage. """ import re import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from cinder.common import constants from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder import interface from cinder import utils as cinder_utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume.drivers.toyou.tyds import tyds_client from cinder.volume import volume_utils LOG = logging.getLogger(__name__) tyds_opts = [ cfg.ListOpt('tyds_pools', default=['pool01'], help='The pool name where volumes are stored.'), cfg.PortOpt('tyds_http_port', default=80, help='The port that connects to the http api.'), cfg.StrOpt('tyds_stripe_size', default='4M', help='Volume stripe size.'), cfg.IntOpt('tyds_clone_progress_interval', default=3, help='Interval (in seconds) for retrieving clone progress.'), cfg.IntOpt('tyds_copy_progress_interval', default=3, help='Interval (in seconds) for retrieving copy progress.') ] CONF = cfg.CONF CONF.register_opts(tyds_opts, group=configuration.SHARED_CONF_GROUP) class TYDSDriverException(exception.VolumeDriverException): message = _("TYDS Cinder toyou failure: %(reason)s") CREATE_VOLUME_SUCCESS = ('[Success] Cinder: Create Block Device, ' 'Block Name: %s, Size in MB: %s, Pool Name: %s, ' 'Stripe Size: %s.') CREATE_VOLUME_FAILED = ('[Failed] Cinder: Create Block Device, ' 'Block Name: %s, Size in MB: %s, Pool Name: %s, ' 'Stripe Size: %s.') DELETE_VOLUME_SUCCESS = ('[Success] Cinder: Delete Block Device, Block Name: ' '%s.') DELETE_VOLUME_FAILED = ('[Failed] Cinder: delete failed, the volume: %s ' 'has normal_snaps: %s, please delete ' 'normal_snaps first.') ATTACH_VOLUME_SUCCESS = ('[Success] Cinder: Attach Block Device, Block Name: ' '%s, IP Address: %s, Host: %s.') DETACH_VOLUME_SUCCESS = ('[Success] Cinder: Detach Block Device, Block Name: ' '%s, IP Address: %s, Host: %s.') EXTEND_VOLUME_SUCCESS = ('[Success] Cinder: Extend volume: %s from %sMB to ' '%sMB.') CREATE_SNAPSHOT_SUCCESS = '[Success] Cinder: Create snapshot: %s, volume: %s.' DELETE_SNAPSHOT_SUCCESS = '[Success] Cinder: Delete snapshot: %s, volume: %s.' CREATE_VOLUME_FROM_SNAPSHOT_SUCCESS = ('[Success] Cinder: Create volume: %s, ' 'pool name: %s; from snapshot: %s ' 'source volume: %s, source pool name: ' '%s.') CREATE_VOLUME_FROM_SNAPSHOT_DONE = ('[Success] Cinder: Create volume: %s ' 'done, pool name: %s; from snapshot:' ' %s source volume: %s, source pool ' 'name: %s.') COPY_VOLUME_DONE = ('[Success] Cinder: Copy volume done, ' 'pool_name: %s; block_name: %s ' 'target_pool_name: %s, target_block_name: %s.') COPY_VOLUME_FAILED = ('[Failed] Cinder: Copy volume failed, ' 'pool_name: %s; block_name: %s ' 'target_pool_name: %s, target_block_name: %s.') @interface.volumedriver class TYDSDriver(driver.MigrateVD, driver.BaseVD): """TOYOU distributed storage abstract common class. .. code-block:: none Version history: 1.0.0 - Initial TOYOU NetStor TYDS Driver """ VENDOR = 'TOYOU' VERSION = '1.0.0' CI_WIKI_NAME = 'TOYOU_TYDS_CI' def __init__(self, *args, **kwargs): super(TYDSDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(tyds_opts) self.configuration.append_config_values(san.san_opts) self.ip = self.configuration.san_ip self.port = self.configuration.tyds_http_port self.username = self.configuration.san_login self.password = self.configuration.san_password self.pools = self.configuration.tyds_pools self.client = None self.storage_protocol = constants.ISCSI @staticmethod def get_driver_options(): additional_opts = driver.BaseVD._get_oslo_driver_opts( 'san_ip', 'san_login', 'san_password' ) return tyds_opts + additional_opts def do_setup(self, context): LOG.debug("Start setup Tyds client") self.client = tyds_client.TydsClient(self.ip, self.port, self.username, self.password) LOG.info("Initialized Tyds Driver Client.") def check_for_setup_error(self): required = [ 'san_ip', 'san_login', 'san_password', 'tyds_pools' ] missing_params = [param for param in required if not self.configuration.safe_get(param)] if missing_params: missing_params_str = ', '.join(missing_params) msg = _("The following parameters are not set: %s" % missing_params_str) raise exception.InvalidInput( reason=msg) def _update_volume_stats(self): """Update the backend stats including TOYOU info and pools info.""" backend_name = self.configuration.safe_get('volume_backend_name') self._stats = { 'vendor_name': self.VENDOR, 'driver_version': self.VERSION, 'volume_backend_name': backend_name, 'pools': self._get_pools_stats(), 'storage_protocol': self.storage_protocol, } LOG.debug('Update volume stats: %s.', self._stats) def _get_pools_stats(self): """Get pools statistics.""" pools_data = self.client.get_pools() volumes_list = self.client.get_volumes() pools_stats = [] for pool_name in self.pools: pool_info = next( (data for data in pools_data if data['name'] == pool_name), None ) if pool_info: max_avail = int(pool_info['stats']['max_avail']) stored = int(pool_info['stats']['stored']) free_capacity = self._convert_gb(max_avail - stored, "B") total_capacity = self._convert_gb(max_avail, "B") allocated_capacity = 0 total_volumes = 0 for vol in volumes_list: if vol['poolName'] == pool_name: allocated_capacity += self._convert_gb( int(vol['sizeMB']), "MB") total_volumes += 1 pools_stats.append({ 'pool_name': pool_name, 'total_capacity_gb': total_capacity, 'free_capacity_gb': free_capacity, 'provisioned_capacity_gb': allocated_capacity, 'thin_provisioning_support': True, 'QoS_support': False, 'consistencygroup_support': False, 'total_volumes': total_volumes, 'multiattach': False }) else: raise TYDSDriverException( reason=_( 'Backend storage pool "%s" not found.') % pool_name ) return pools_stats def _get_volume_by_name(self, volume_name): """Get volume information by name.""" volume_list = self.client.get_volumes() for vol in volume_list: if vol.get('blockName') == volume_name: return vol # Returns an empty dictionary indicating that the volume with the # corresponding name was not found return {} def _get_snapshot_by_name(self, snapshot_name, volume_id=None): """Get snapshot information by name and optional volume ID.""" snapshot_list = self.client.get_snapshot(volume_id) for snap in snapshot_list: if snap.get('snapShotName') == snapshot_name: return snap # Returns an empty dictionary indicating that a snapshot with the # corresponding name was not found return {} @staticmethod def _convert_gb(size, unit): """Convert size from the given unit to GB.""" size_gb = 0 if unit in ['B', '']: size_gb = size / units.Gi elif unit in ['M', 'MB']: size_gb = size / units.Ki return float('%.0f' % size_gb) def _clone_volume(self, pool_name, block_name, block_id, target_pool_name, target_pool_id, target_block_name): self.client.create_clone_volume( pool_name, block_name, block_id, target_pool_name, target_pool_id, target_block_name ) @coordination.synchronized('tyds-copy-{lun_name}-progress') def _wait_copy_progress(lun_id, lun_name, target_block): try: ret = False while_exit = False rescan = 0 interval = self.configuration.tyds_copy_progress_interval while True: rescan += 1 progress_data = self.client.get_copy_progress( lun_id, lun_name, target_block) progress = progress_data.get('progress') # finished clone if progress == '100%': # check new volume existence target = self._get_volume_by_name(target_block) if not target: LOG.info( 'Clone rescan: %(rescan)s, target volume ' 'completed delayed, from %(block_name)s to ' '%(target_block_name)s.', {'rescan': rescan, 'block_name': lun_name, 'target_block_name': target_block}) time.sleep(interval) continue LOG.info( 'Clone rescan: %(rescan)s, task done from ' '%(block_name)s to %(target_block_name)s.', {'rescan': rescan, 'block_name': lun_name, 'target_block_name': target_block}) while_exit = True ret = True elif progress: LOG.info( "Clone rescan: %(rescan)s, progress: %(progress)s," " block_name: %(block_name)s, target_block_name: " "%(target_block_name)s", {"rescan": rescan, "progress": progress, "block_name": lun_name, "target_block_name": target_block}) else: LOG.error( 'Copy: rescan: %(rescan)s, task error from ' '%(block_name)s to %(target_block_name)s.', {'rescan': rescan, 'block_name': lun_name, 'target_block_name': target_block_name}) while_exit = True if while_exit: break time.sleep(interval) return ret except Exception as err: LOG.error('Copy volume failed reason: %s', err) return False if _wait_copy_progress(block_id, block_name, target_block_name): LOG.debug(COPY_VOLUME_DONE, pool_name, block_name, target_pool_name, target_block_name) else: self._delete_volume_if_clone_failed(target_block_name, pool_name, block_name, target_block_name) msg = _("copy volume failed from %s to %s") % ( block_name, target_block_name) raise TYDSDriverException(reason=msg) def _delete_volume_if_clone_failed(self, target_block_name, pool_name, block_name, target_pool_name): target_volume = self._get_volume_by_name(target_block_name) if target_volume: self.client.delete_volume(target_volume.get('id')) LOG.debug(COPY_VOLUME_FAILED, pool_name, block_name, target_pool_name, target_block_name) def create_export(self, context, volume, connector): pass def create_volume(self, volume): LOG.info("Creating volume '%s'", volume.name) vol_name = cinder_utils.convert_str(volume.name) size = int(volume.size) * 1024 pool_name = volume_utils.extract_host(volume.host, 'pool') stripe_size = self.configuration.tyds_stripe_size self.client.create_volume(vol_name, size, pool_name, stripe_size) LOG.debug(CREATE_VOLUME_SUCCESS, vol_name, size, pool_name, stripe_size) def retype(self, context, volume, new_type, diff, host): # success return True, None def delete_volume(self, volume): LOG.debug("deleting volume '%s'", volume.name) vol_name = cinder_utils.convert_str(volume.name) vol = self._get_volume_by_name(vol_name) if vol and vol.get('id'): self.client.delete_volume(vol.get('id')) LOG.debug(DELETE_VOLUME_SUCCESS, vol_name) else: LOG.info('Delete volume %s not found.', vol_name) def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def initialize_connection(self, volume, connector): LOG.debug('initialize_connection: volume %(vol)s with connector ' '%(conn)s', {'vol': volume.name, 'conn': connector}) pool_name = volume_utils.extract_host(volume.host, 'pool') volume_name = cinder_utils.convert_str(volume.name) group_name = "initiator-group-" + cinder_utils.convert_str( connector['uuid']) vol_info = {"name": volume_name, "size": volume.size, "pool": pool_name} # Check initiator existence initiator_list = self.client.get_initiator_list() initiator_existence = False if initiator_list: initiator_existence = any( initiator['group_name'] == group_name for initiator in initiator_list ) if not initiator_existence: # Create initiator client = [{"ip": connector["ip"], "iqn": connector["initiator"]}] self.client.create_initiator_group(group_name=group_name, client=client) # Check Initiator-Target connection existence # add new volume to existing Initiator-Target connection it_list = self.client.get_initiator_target_connections() it_info = None if it_list: it_info = next((it for it in it_list if group_name in it['target_name']), None) if it_info: target_iqn = it_info['target_iqn'] lun_info = next((lun for lun in it_info['block'] if lun['name'] == volume_name), None) if not lun_info: # Add new volume to existing Initiator-Target connection target_name_list = it_info['hostName'] vols_info = it_info['block'] vols_info.append(vol_info) self.client.modify_target(target_iqn, target_name_list, vols_info) else: # Create new Initiator-Target connection target_node_list = self.client.get_target() target_name_list = [target['name'] for target in target_node_list] self.client.create_target(group_name, target_name_list, [vol_info]) it_list = self.client.get_initiator_target_connections() if it_list: it_info = next( (it for it in it_list if group_name in it['target_name']), None) if it_info: target_name = it_info['target_name'] target_iqn = it_info['target_iqn'] lun_info = next((lun for lun in it_info['block'] if lun['name'] == volume_name), None) lun_id = lun_info['lunid'] if lun_info else 0 # Generate config self.client.generate_config(target_iqn) # Generate return info target_node_list = self.client.get_target() target_node = target_node_list[0] target_ip = target_node['ipAddr'] target_portal = '[%s]:3260' % target_ip if ':' in target_ip \ else '%s:3260' % target_ip target_iqns = [target_name] * len(target_node_list) target_portals = ['[%s]:3260' % p['ipAddr'] if ':' in p['ipAddr'] else '%s:3260' % p['ipAddr'] for p in target_node_list] target_luns = [lun_id] * len(target_node_list) properties = { 'target_discovered': False, 'target_portal': target_portal, 'target_lun': lun_id, 'target_iqns': target_iqns, 'target_portals': target_portals, 'target_luns': target_luns } LOG.debug('connection properties: %s', properties) LOG.debug(ATTACH_VOLUME_SUCCESS, volume_name, connector.get('ip'), connector.get('host')) return {'driver_volume_type': 'iscsi', 'data': properties} else: raise exception.VolumeBackendAPIException( data=_('initialize_connection: Failed to create IT ' 'connection for volume %s') % volume_name) def terminate_connection(self, volume, connector, **kwargs): if not connector: # If the connector is empty, the info log is recorded and # returned directly, without subsequent separation operations LOG.info( "Connector is None. Skipping termination for volume %s.", volume.name) return volume_name = cinder_utils.convert_str(volume.name) group_name = "initiator-group-" + cinder_utils.convert_str( connector['uuid']) data = {} # Check Initiator-Target connection existence and remove volume from # Initiator-Target connection if it exists it_list = self.client.get_initiator_target_connections() it_info = next((it for it in it_list if group_name in it['target_name']), None) if it_info: target_iqn = it_info['target_iqn'] target_name_list = it_info['hostName'] vols_info = it_info['block'] vols_info = [vol for vol in vols_info if vol['name'] != volume_name] if not vols_info: self.client.delete_target(it_info['target_iqn']) initiator_list = self.client.get_initiator_list() initiator_to_delete = None if initiator_list: initiator_to_delete = next( (initiator for initiator in initiator_list if initiator['group_name'] == group_name), None) if initiator_to_delete: self.client.delete_initiator_group( initiator_to_delete['group_id']) self.client.restart_service(host_name=it_info['hostName']) else: self.client.modify_target(target_iqn, target_name_list, vols_info) # record log LOG.debug(DETACH_VOLUME_SUCCESS, volume_name, connector.get( 'ip'), connector.get('host')) LOG.info('Detach volume %s successfully', volume_name) target_node_list = self.client.get_target() target_portals = ['%s:3260' % p['ipAddr'] for p in target_node_list] data['ports'] = target_portals return {'driver_volume_type': 'iscsi', 'data': data} def migrate_volume(self): pass def extend_volume(self, volume, new_size): volume_name = cinder_utils.convert_str(volume.name) pool_name = volume_utils.extract_host(volume.host, 'pool') size_mb = int(new_size) * 1024 self.client.extend_volume(volume_name, pool_name, size_mb) LOG.debug(EXTEND_VOLUME_SUCCESS, volume_name, volume.size * 1024, size_mb) def create_cloned_volume(self, volume, src_vref): """Clone a volume.""" # find pool_id to create clone volume try: target_pool_name = volume_utils.extract_host(volume.host, 'pool') except Exception as err: msg = _('target_pool_name must be specified. ' 'extra err msg was: %s') % err raise TYDSDriverException(reason=msg) target_pool_id = None pool_list = self.client.get_pools() for pool in pool_list: if target_pool_name == pool.get('name'): target_pool_id = pool.get('id') break if not target_pool_id: msg = _('target_pool_id: must be specified.') raise TYDSDriverException(reason=msg) # find volume id to create volume_list = self.client.get_volumes() block_name = cinder_utils.convert_str(src_vref.name) pool_name = None block_id = None for vol in volume_list: if block_name == vol.get('blockName'): pool_name = vol.get('poolName') block_id = vol.get('id') break if (not pool_name) or (not block_id): msg = _('block_name: %(block_name)s does not matched a ' 'pool_name or a block_id.') % {'block_name': block_name} raise TYDSDriverException(reason=msg) # get a name from new volume target_block_name = cinder_utils.convert_str(volume.name) # ready to create clone volume self._clone_volume(pool_name, block_name, block_id, target_pool_name, target_pool_id, target_block_name) # handle the case where the new volume size is larger than the source if volume['size'] > src_vref.get('size'): size_mb = int(volume['size']) * 1024 self.client.extend_volume(target_block_name, target_pool_name, size_mb) LOG.debug(EXTEND_VOLUME_SUCCESS, target_block_name, src_vref.get('size') * 1024, size_mb) def create_snapshot(self, snapshot): """Creates a snapshot.""" volume_name = cinder_utils.convert_str(snapshot.volume_name) snapshot_name = cinder_utils.convert_str(snapshot.name) vol = self._get_volume_by_name(volume_name) if vol and vol.get('id'): comment = '%s/%s' % (volume_name, snapshot_name) self.client.create_snapshot(snapshot_name, vol.get('id'), comment) LOG.debug(CREATE_SNAPSHOT_SUCCESS, snapshot_name, volume_name) else: msg = _('Volume "%s" not found.') % volume_name raise TYDSDriverException(reason=msg) def delete_snapshot(self, snapshot): """Deletes a snapshot.""" snapshot_name = cinder_utils.convert_str(snapshot.name) volume_name = cinder_utils.convert_str(snapshot.volume_name) snap = self._get_snapshot_by_name(snapshot_name) if snap and snap.get('id'): self.client.delete_snapshot(snap.get('id')) LOG.debug(DELETE_SNAPSHOT_SUCCESS, snapshot_name, volume_name) else: LOG.info('Delete snapshot %s not found.', snapshot_name) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" snapshot_name = cinder_utils.convert_str(snapshot.name) volume_name = cinder_utils.convert_str(volume.name) pool_name = volume_utils.extract_host(volume.host, 'pool') source_volume = cinder_utils.convert_str(snapshot.volume_name) src_vol = self._get_volume_by_name(source_volume) if not src_vol: msg = _('Volume "%s" not found in ' 'create_volume_from_snapshot.') % volume_name raise TYDSDriverException(reason=msg) self.client.create_volume_from_snapshot(volume_name, pool_name, snapshot_name, source_volume, src_vol.get('poolName')) LOG.debug(CREATE_VOLUME_FROM_SNAPSHOT_SUCCESS, volume_name, pool_name, snapshot_name, source_volume, src_vol.get('poolName')) @coordination.synchronized('tyds-clone-{source_name}-progress') def _wait_clone_progress(task_id, source_name, target_name): ret = False while_exit = False rescan = 0 interval = self.configuration.tyds_clone_progress_interval while True: rescan += 1 progress = self.client.get_clone_progress( task_id, source_name).get('progress', '') if progress == '100%': target = self._get_volume_by_name(target_name) if not target: LOG.info('Clone: rescan: %(rescan)s, task not begin, ' 'from %(source)s to %(target)s.', {'rescan': rescan, 'source': source_name, 'target': target_name}) time.sleep(interval) continue LOG.info('Clone: rescan: %(rescan)s, task done from ' '%(source)s to %(target)s.', {'rescan': rescan, 'source': source_name, 'target': target_name}) while_exit = True ret = True elif re.fullmatch(r'^\d{1,2}%$', progress): LOG.info('Clone: rescan: %(rescan)s, task progress: ' '%(progress)s, from %(source)s to %(target)s.', {'rescan': rescan, 'progress': progress, 'source': source_name, 'target': target_name}) else: while_exit = True LOG.error('Clone: rescan: %(rescan)s, task error from ' '%(source)s to %(target)s.', {'rescan': rescan, 'source': source_name, 'target': target_name}) if while_exit: break time.sleep(interval) return ret if _wait_clone_progress(src_vol.get('id'), source_volume, volume_name): LOG.debug(CREATE_VOLUME_FROM_SNAPSHOT_DONE, volume_name, pool_name, snapshot_name, source_volume, src_vol.get('poolName')) # handle the case where the new volume size is larger than the source new_size = volume.size * 1024 old_size = int(src_vol['sizeMB']) if new_size > old_size: self.client.extend_volume(volume_name, pool_name, new_size) LOG.debug(EXTEND_VOLUME_SUCCESS, volume_name, old_size, new_size) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/toyou/tyds/tyds_client.py0000664000175000017500000004153700000000000024367 0ustar00zuulzuul00000000000000# Copyright 2023 toyou Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import json import time from oslo_log import log as logging from oslo_utils import netutils import requests from cinder import exception from cinder.i18n import _ LOG = logging.getLogger(__name__) class TydsClient(object): def __init__(self, hostname, port, username, password): """Initializes a new instance of the TydsClient. :param hostname: IP address of the Toyou distributed storage system. :param port: The port to connect to the Toyou distributed storage system. :param username: The username for authentication. :param password: The password for authentication. """ self._username = username self._password = base64.standard_b64encode(password.encode('utf-8') ).decode('utf-8') self._baseurl = f"http://{hostname}:{port}/api" self._snapshot_count = 999 self._token = None self._token_expiration = 0 self._ip = self._get_local_ip() def get_token(self): if self._token and time.time() < self._token_expiration: # Token is not expired, directly return the existing Token return self._token # Token has expired or has not been obtained before, # retrieving the Token again self._token = self.login() self._token_expiration = time.time() + 710 * 60 return self._token def send_http_api(self, url, params=None, method='post'): """Send an HTTP API request to the storage. :param url: The URL for the API request. :param params: The parameters for the API request. :param method: The HTTP method for the API request. Default is 'post'. :return: The response from the API request. :raises VolumeBackendAPIException: If the API request fails. """ if params: params = json.dumps(params) url = f"{self._baseurl}/{url}" header = { 'Authorization': self.get_token(), 'Content-Type': 'application/json' } LOG.debug( "Toyou Cinder Driver Requests: http_process header: %(header)s " "url: %(url)s method: %(method)s", {'header': header, 'url': url, 'method': method} ) response = self.do_request(method, url, header, params) return response @staticmethod def _get_local_ip(): """Get the local IP address. :return: The local IP address. """ return netutils.get_my_ipv4() def login(self): """Perform login to obtain an authentication token. :return: The authentication token. :raises VolumeBackendAPIException: If the login request fails or the authentication token cannot be obtained. """ params = { 'REMOTE_ADDR': self._ip, 'username': self._username, 'password': self._password } data = json.dumps(params) url = f"{self._baseurl}/auth/login/" response = self.do_request(method='post', url=url, header={'Content-Type': 'application/json'}, data=data) self._token = response.get('token') return self._token @staticmethod def do_request(method, url, header, data): """Send request to the storage and handle the response. :param method: The HTTP method to use for the request. Valid methods are 'post', 'get', 'put', and 'delete'. :param url: The URL to send the request to. :param header: The headers to include in the request. :param data: The data to send in the request body. :return: The response data returned by the storage system. :raises VolumeBackendAPIException: If the request fails or the response from the storage system is not as expected. """ valid_methods = ['post', 'get', 'put', 'delete'] if method not in valid_methods: raise exception.VolumeBackendAPIException( data=_('Unsupported request type: %s.') % method ) try: req = getattr(requests, method)(url, data=data, headers=header) req.raise_for_status() response = req.json() except requests.exceptions.RequestException as e: msg = (_('Request to %(url)s failed: %(error)s') % {'url': url, 'error': str(e)}) raise exception.VolumeBackendAPIException(data=msg) except ValueError as e: msg = (_('Failed to parse response from %(url)s: %(error)s') % {'url': url, 'error': str(e)}) raise exception.VolumeBackendAPIException(data=msg) LOG.debug('URL: %(url)s, TYPE: %(type)s, CODE: %(code)s, ' 'RESPONSE: %(response)s.', {'url': req.url, 'type': method, 'code': req.status_code, 'response': response}) # Response Error if response.get('code') != '0000': msg = (_('ERROR RESPONSE: %(response)s URL: %(url)s PARAMS: ' '%(params)s.') % {'response': response, 'url': url, 'params': data}) raise exception.VolumeBackendAPIException(data=msg) # return result return response.get('data') def get_pools(self): """Query pool information. :return: A list of pool information. """ url = 'pool/pool/' response = self.send_http_api(url=url, method='get') pool_list = response.get('poolList', []) return pool_list def get_volumes(self): """Query volume information. :return: A list of volume information. """ url = 'block/blocks' vol_list = self.send_http_api(url=url, method='get').get('blockList') return vol_list def create_volume(self, vol_name, size, pool_name, stripe_size): """Create a volume. :param vol_name: The name of the volume. :param size: The size of the volume in MB. :param pool_name: The name of the pool to create the volume in. :param stripe_size: The stripe size of the volume. :return: The response from the API call. """ url = 'block/blocks/' params = {'blockName': vol_name, 'sizeMB': size, 'poolName': pool_name, 'stripSize': stripe_size} return self.send_http_api(url=url, method='post', params=params) def delete_volume(self, vol_id): """Delete a volume. :param vol_id: The ID of the volume to delete. """ url = 'block/recycle/forceCreate/' params = {'id': [vol_id]} self.send_http_api(url=url, method='post', params=params) def extend_volume(self, vol_name, pool_name, size_mb): """Extend the size of a volume. :param vol_name: The name of the volume to extend. :param pool_name: The name of the pool where the volume resides. :param size_mb: The new size of the volume in MB. """ url = 'block/blocks/%s/' % vol_name params = {'blockName': vol_name, 'sizeMB': size_mb, 'poolName': pool_name} self.send_http_api(url=url, method='put', params=params) def create_clone_volume(self, *args): """Create a clone of a volume. :param args: The arguments needed for cloning a volume. Args: - pool_name: The name of the source pool. - block_name: The name of the source block. - block_id: The ID of the source block. - target_pool_name: The name of the target pool. - target_pool_id: The ID of the target pool. - target_block_name: The name of the target block. """ pool_name, block_name, block_id, target_pool_name, target_pool_id, \ target_block_name = args params = { 'poolName': pool_name, 'blockName': block_name, 'blockId': block_id, 'copyType': 0, # 0 means shallow copy, currently copy volume first # default shallow copy, 1 means deep copy 'metapoolName': 'NULL', 'targetMetapoolName': 'NULL', 'targetPoolName': target_pool_name, 'targetPoolId': target_pool_id, 'targetBlockName': target_block_name } url = 'block/block/copy/' self.send_http_api(url=url, params=params) def get_snapshot(self, volume_id=None): """Get a list of snapshots. :param volume_id: The ID of the volume to filter snapshots (default: None). :return: The list of snapshots. """ url = 'block/snapshot?pageNumber=1' if volume_id: url += '&blockId=%s' % volume_id url += '&pageSize=%s' response = self.send_http_api( url=url % self._snapshot_count, method='get') if self._snapshot_count < int(response.get('total')): self._snapshot_count = int(response.get('total')) response = self.send_http_api( url=url % self._snapshot_count, method='get') snapshot_list = response.get('snapShotList') return snapshot_list def create_snapshot(self, name, volume_id, comment=''): """Create a snapshot of a volume. :param name: The name of the snapshot. :param volume_id: The ID of the volume to create a snapshot from. :param comment: The optional comment for the snapshot (default: ''). """ url = 'block/snapshot/' params = {'sourceBlock': volume_id, 'snapShotName': name, 'remark': comment} self.send_http_api(url=url, method='post', params=params) def delete_snapshot(self, snapshot_id): """Delete a snapshot. :param snapshot_id: The ID of the snapshot to delete. """ url = 'block/snapshot/%s/' % snapshot_id self.send_http_api(url=url, method='delete') def create_volume_from_snapshot(self, volume_name, pool_name, snapshot_name, source_volume_name, source_pool_name): """Create a volume from a snapshot. :param volume_name: The name of the new volume. :param pool_name: The name of the pool for the new volume. :param snapshot_name: The name of the snapshot to create the volume from. :param source_volume_name: The name of the source volume (snapshot's origin). :param source_pool_name: The name of the pool for the source volume. """ url = 'block/clone/' params = {'cloneBlockName': volume_name, 'targetPoolName': pool_name, 'snapName': snapshot_name, 'blockName': source_volume_name, 'poolName': source_pool_name, 'targetMetapoolName': 'NULL'} self.send_http_api(url=url, method='post', params=params) def get_clone_progress(self, volume_id, volume_name): """Get the progress of a volume clone operation. :param volume_id: The ID of the volume being cloned. :param volume_name: The name of the volume being cloned. :return: The progress of the clone operation. """ url = 'block/clone/progress/' params = {'blockId': volume_id, 'blockName': volume_name} progress = self.send_http_api(url=url, method='post', params=params) return progress def get_copy_progress(self, block_id, block_name, target_block_name): """Get the progress of a block copy operation. :param block_id: The ID of the block being copied. :param block_name: The name of the block being copied. :param target_block_name: The name of the target block. :return: The progress of the copy operation. """ url = 'block/block/copyprogress/' params = { 'blockId': block_id, 'blockName': block_name, 'targetBlockName': target_block_name } progress_data = self.send_http_api(url=url, params=params) return progress_data def create_initiator_group(self, group_name, client): """Create an initiator group. :param group_name: The name of the initiator group. :param client: The client information for the initiator group. """ url = 'iscsi/client-group/' params = { 'group_name': group_name, 'client': client, 'chap_auth': 0, 'mode': 'ISCSI' } self.send_http_api(url=url, params=params) def delete_initiator_group(self, group_id): """Delete an initiator group. :param group_id: The ID of the initiator group. :return: The response from the API call. """ url = 'iscsi/client-group/?group_id=%s' % group_id return self.send_http_api(url=url, method='delete') def get_initiator_list(self): """Get the list of initiators. :return: The list of initiators. """ url = 'iscsi/client-group/' res = self.send_http_api(url=url, method='get') initiator_list = res.get('client_group_list') return initiator_list def get_target(self): """Get the list of target hosts. :return: The list of target hosts. """ url = '/host/host/' res = self.send_http_api(url=url, method='get') target = res.get('hostList') return target def create_target(self, group_name, target_list, vols_info): """Create a target. :param group_name: The name of the initiator group. :param target_list: The list of target hosts. :param vols_info: The information of the volumes. :return: The response from the API call. """ url = 'iscsi/target/' params = {"group_name": group_name, "chap_auth": 0, "write_cache": 1, "hostName": ",".join(target_list), "block": vols_info} return self.send_http_api(url=url, params=params, method='post') def delete_target(self, target_name): """Delete a target. :param target_name: The name of the target. :return: The response from the API call. """ url = 'iscsi/target/?targetIqn=%s' % target_name return self.send_http_api(url=url, method='delete') def modify_target(self, target_name, target_list, vol_info): """Modify a target. :param target_name: The name of the target. :param target_list: The list of target hosts. :param vol_info: The information of the volumes. :return: The response from the API call. """ url = 'iscsi/target/' params = { "targetIqn": target_name, "chap_auth": 0, "hostName": target_list, "block": vol_info } return self.send_http_api(url=url, params=params, method='put') def get_initiator_target_connections(self): """Get the list of IT (Initiator-Target) connections. :return: The list of IT connections. """ url = 'iscsi/target/' res = self.send_http_api(url=url, method='get') target_list = res.get('target_list') return target_list def generate_config(self, target_name): """Generate configuration for a target. :param target_name: The name of the target. """ url = 'iscsi/target-config/' params = { 'targetName': target_name } self.send_http_api(url=url, params=params, method='post') def restart_service(self, host_name): """Restart the iSCSI service on a host. :param host_name: The name of the host. """ url = 'iscsi/service/restart/' params = { "hostName": host_name } self.send_http_api(url=url, params=params, method='post') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4031212 cinder-27.0.0/cinder/volume/drivers/veritas_access/0000775000175000017500000000000000000000000022316 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/veritas_access/__init__.py0000664000175000017500000000000000000000000024415 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/veritas_access/veritas_iscsi.py0000664000175000017500000010207000000000000025537 0ustar00zuulzuul00000000000000# Copyright 2017 Veritas Technologies LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Veritas Access Driver for ISCSI. """ import ast import hashlib from http import HTTPStatus import json from random import randint from xml.dom import minidom from oslo_config import cfg from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import netutils from oslo_utils import strutils from oslo_utils import units import requests import requests.auth from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.san import san LOG = logging.getLogger(__name__) VA_VOL_OPTS = [ cfg.BoolOpt('vrts_lun_sparse', default=True, help='Create sparse Lun.'), cfg.StrOpt('vrts_target_config', default='/etc/cinder/vrts_target.xml', help='VA config file.') ] CONF = cfg.CONF CONF.register_opts(VA_VOL_OPTS) class NoAuth(requests.auth.AuthBase): """This is a 'authentication' handler. It exists for use with custom authentication systems, such as the one for the Access API, it simply passes the Authorization header as-is. The default authentication handler for requests will clobber the Authorization header. """ def __call__(self, r): return r @interface.volumedriver class ACCESSIscsiDriver(driver.ISCSIDriver): """ACCESS Share Driver. Executes commands relating to ACCESS ISCSI. Supports creation of volumes on ACCESS. .. code-block:: none API version history: 1.0 - Initial version. """ VERSION = "1.0" # ThirdPartySytems wiki page CI_WIKI_NAME = "Veritas_Access_CI" LUN_FOUND_INTERVAL = 30 # seconds # TODO(jsbryant) Remove driver in the 'U' release if CI is not fixed. SUPPORTED = False def __init__(self, *args, **kwargs): # Parent sets db, host, _execute and base config super(ACCESSIscsiDriver, self).__init__(*args, **kwargs) self._va_ip = None self._port = None self._user = None self._pwd = None self.iscsi_port = None self._fs_list_str = '/fs' self._target_list_str = '/iscsi/target/list' self._target_status = '/iscsi/target/status' self._lun_create_str = '/iscsi/lun/create' self._lun_destroy_str = '/iscsi/lun/destroy' self._lun_list_str = '/iscsi/lun/list' self._lun_create_from_snap_str = '/iscsi/lun_from_snap/create' self._snapshot_create_str = '/iscsi/lun/snapshot/create' self._snapshot_destroy_str = '/iscsi/lun/snapshot/destroy' self._snapshot_list_str = '/iscsi/lun/snapshot/list' self._lun_clone_create_str = '/iscsi/lun/clone/create' self._lun_extend_str = '/iscsi/lun/growto' self._lun_shrink_str = '/iscsi/lun/shrinkto' self._lun_getid_str = '/iscsi/lun/getlunid' self._target_map_str = '/iscsi/target/map/add' self._target_list_status = '/iscsi/target/full_list' self.configuration.append_config_values(VA_VOL_OPTS) self.configuration.append_config_values(san.san_opts) self.backend_name = (self.configuration.safe_get('volume_' 'backend_name') or 'ACCESS_ISCSI') self.verify = (self.configuration. safe_get('driver_ssl_cert_verify') or False) if self.verify: verify_path = (self.configuration. safe_get('driver_ssl_cert_path') or None) if verify_path: self.verify = verify_path @staticmethod def get_driver_options(): return VA_VOL_OPTS def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(ACCESSIscsiDriver, self).do_setup(context) required_config = ['san_ip', 'san_login', 'san_password', 'san_api_port'] for attr in required_config: if not getattr(self.configuration, attr, None): message = (_('config option %s is not set.') % attr) raise exception.InvalidInput(message=message) self._va_ip = self.configuration.san_ip self._user = self.configuration.san_login self._pwd = self.configuration.san_password self._port = self.configuration.san_api_port self._sparse_lun_support = self.configuration.vrts_lun_sparse self.target_info_file = self.configuration.vrts_target_config self.iscsi_port = self.configuration.target_port self.session = self._authenticate_access(self._va_ip, self._user, self._pwd) def _get_va_lun_name(self, name): length = len(name) index = int(length / 2) name1 = name[:index] name2 = name[index:] crc1 = hashlib.md5(name1.encode('utf-8'), usedforsecurity=False).hexdigest()[:5] crc2 = hashlib.md5(name2.encode('utf-8'), usedforsecurity=False).hexdigest()[:5] return 'cinder' + '-' + crc1 + '-' + crc2 def check_for_setup_error(self): """Check if veritas access target is online.""" target_list = self._vrts_parse_xml_file(self.target_info_file) if not self._vrts_get_online_targets(target_list): message = ('ACCESSIscsiDriver setup error as ' 'no target is online') raise exception.VolumeBackendAPIException(message=message) def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" pass def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" pass def ensure_export(self, context, volume): """Driver entry point to get the export info for an existing volume.""" pass def _vrts_get_iscsi_properties(self, volume, target_name): """Get target and LUN details.""" lun_name = self._get_va_lun_name(volume.id) data = {} path = self._lun_getid_str provider = '%s:%s' % (self._va_ip, self._port) lun_id_list = self._access_api(self.session, provider, path, json.dumps(data), 'GET') if not lun_id_list: message = _('ACCESSIscsiDriver get LUN ID list ' 'operation failed') LOG.error(message) raise exception.VolumeBackendAPIException(message=message) for lun in ast.literal_eval(lun_id_list['output']): vrts_lun_name = lun['storage_object'].split('/')[3] if vrts_lun_name == lun_name: lun_id = int(lun['index']) target_list = self._vrts_parse_xml_file(self.target_info_file) authentication = False portal_ip = "" for target in target_list: if target_name == target['name']: portal_ip = target['portal_ip'] if target['auth'] == '1': auth_user = target['auth_user'] auth_password = target['auth_password'] authentication = True break if portal_ip == "": message = (_('ACCESSIscsiDriver initialize_connection ' 'failed for %s as no portal ip was found') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) portal_list = portal_ip.split(',') target_portal_list = [] for ip in portal_list: if netutils.is_valid_ipv6(ip): target_portal_list.append('[%s]:%s' % (ip, str(self.iscsi_port))) else: target_portal_list.append('%s:%s' % (ip, str(self.iscsi_port))) iscsi_properties = {} iscsi_properties['target_discovered'] = True iscsi_properties['target_iqn'] = target_name iscsi_properties['target_portal'] = target_portal_list[0] if len(target_portal_list) > 1: iscsi_properties['target_portals'] = target_portal_list iscsi_properties['target_lun'] = lun_id iscsi_properties['volume_id'] = volume.id if authentication: iscsi_properties['auth_username'] = auth_user iscsi_properties['auth_password'] = auth_password iscsi_properties['auth_method'] = 'CHAP' return iscsi_properties def _get_vrts_lun_list(self): """Get Lun list.""" data = {} path = self._lun_list_str provider = '%s:%s' % (self._va_ip, self._port) lun_list = self._access_api(self.session, provider, path, json.dumps(data), 'GET') if not lun_list: message = _('ACCESSIscsiDriver get LUN list ' 'operation failed') LOG.error(message) raise exception.VolumeBackendAPIException(message=message) return lun_list def _vrts_target_initiator_mapping(self, target_name, initiator_name): """Map target to initiator.""" path = self._target_map_str provider = '%s:%s' % (self._va_ip, self._port) data = {} data["target_name"] = target_name data["initiator_name"] = initiator_name result = self._access_api(self.session, provider, path, json.dumps(data), 'POST') if not result: message = (_('ACCESSIscsiDriver target-initiator mapping ' 'failed for target %s') % target_name) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def initialize_connection(self, volume, connector, initiator_data=None): """Initializes the connection and returns connection info. The iscsi driver returns a driver_volume_type of 'iscsi'. the format of the driver data is defined in _vrts_get_iscsi_properties. Example return value:: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'target_lun': 1, 'volume_id': '12345678-1234-4321-1234-123456789012', } } """ lun_name = self._get_va_lun_name(volume.id) target = {'target_name': ''} def _inner(): lun_list = self._get_vrts_lun_list() for lun in lun_list['output']['output']['luns']: if lun['lun_name'] == lun_name: target['target_name'] = lun['target_name'] raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(_inner) try: timer.start(interval=5, timeout=self.LUN_FOUND_INTERVAL).wait() except loopingcall.LoopingCallTimeOut: message = (_('ACCESSIscsiDriver initialize_connection ' 'failed for %s as no target was found') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) self._vrts_target_initiator_mapping(target['target_name'], connector['initiator']) iscsi_properties = self._vrts_get_iscsi_properties( volume, target['target_name']) return { 'driver_volume_type': 'iscsi', 'data': iscsi_properties } def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" pass def _vrts_parse_xml_file(self, filename): """VRTS target info. iqn.2017-02.com.veritas:target03 10.182.174.188 iqn.2017-02.com.veritas:target04 10.182.174.189 :param filename: the configuration file :returns: list """ myfile = open(filename, 'r') data = myfile.read() myfile.close() dom = minidom.parseString(data) mylist = [] target = {} try: for trg in dom.getElementsByTagName('Target'): target['name'] = (trg.getElementsByTagName('Name')[0] .childNodes[0].nodeValue) target['portal_ip'] = (trg.getElementsByTagName('PortalIP')[0] .childNodes[0].nodeValue) target['auth'] = (trg.getElementsByTagName('Authentication')[0] .childNodes[0].nodeValue) if target['auth'] == '1': target['auth_user'] = (trg.getElementsByTagName ('Auth_username')[0] .childNodes[0].nodeValue) target['auth_password'] = (trg.getElementsByTagName ('Auth_password')[0] .childNodes[0].nodeValue) mylist.append(target) target = {} except IndexError: pass return mylist def _vrts_get_fs_list(self): """Get FS list.""" path = self._fs_list_str provider = '%s:%s' % (self._va_ip, self._port) data = {} fs_list = self._access_api(self.session, provider, path, json.dumps(data), 'GET') if not fs_list: message = _('ACCESSIscsiDriver get FS list ' 'operation failed') LOG.error(message) raise exception.VolumeBackendAPIException(message=message) return fs_list def _vrts_get_online_targets(self, available_targets): """Out of available targets get list of targets which are online.""" online_targets = [] path = self._target_list_status provider = '%s:%s' % (self._va_ip, self._port) data = {} target_status_list = self._access_api(self.session, provider, path, json.dumps(data), 'GET') try: target_status_output = (ast. literal_eval(target_status_list['output'])) except KeyError: message = _('ACCESSIscsiDriver get online target list ' 'operation failed') LOG.error(message) raise exception.VolumeBackendAPIException(message=message) for target in available_targets: if target['name'] in target_status_output.keys(): if target_status_output[target['name']] == 'ONLINE': online_targets.append(target) return online_targets def _vrts_get_targets_store(self): """Get target and its store list.""" path = self._target_list_str provider = '%s:%s' % (self._va_ip, self._port) data = {} target_list = self._access_api(self.session, provider, path, json.dumps(data), 'GET') if not target_list: message = _('ACCESSIscsiDriver get target list ' 'operation failed') LOG.error(message) raise exception.VolumeBackendAPIException(message=message) return target_list['output']['output']['targets'] def _vrts_get_assigned_store(self, target, vrts_target_list): """Get the store mapped to given target.""" for vrts_target in vrts_target_list: if vrts_target['wwn'] == target: return vrts_target['fs_list'][0] def _vrts_is_space_available_in_store(self, vol_size, store_name, fs_list): """Check whether space is available on store.""" if self._sparse_lun_support: return True for fs in fs_list: if fs['name'] == store_name: fs_avilable_space = (int(fs['file_storage_capacity']) - int(fs['file_storage_used'])) free_space = fs_avilable_space / units.Gi if free_space > vol_size: return True break return False def _vrts_get_suitable_target(self, target_list, vol_size): """Get a suitable target for lun creation. Picking random target at first, if space is not available in first selected target then check each target one by one for suitable one. """ target_count = len(target_list) incrmnt_pointer = 0 target_index = randint(0, (target_count - 1)) fs_list = self._vrts_get_fs_list() vrts_target_list = self._vrts_get_targets_store() store_name = self._vrts_get_assigned_store( target_list[target_index]['name'], vrts_target_list ) if not self._vrts_is_space_available_in_store( vol_size, store_name, fs_list): while (incrmnt_pointer != target_count - 1): target_index = (target_index + 1) % target_count store_name = self._vrts_get_assigned_store( target_list[target_index]['name'], vrts_target_list ) if self._vrts_is_space_available_in_store( vol_size, store_name, fs_list): return target_list[target_index]['name'] incrmnt_pointer = incrmnt_pointer + 1 else: return target_list[target_index]['name'] return False def create_volume(self, volume): """Creates a Veritas Access Iscsi LUN.""" create_dense = False if 'dense' in volume.metadata.keys(): create_dense = strutils.bool_from_string( volume.metadata['dense']) lun_name = self._get_va_lun_name(volume.id) lun_size = '%sg' % volume.size path = self._lun_create_str provider = '%s:%s' % (self._va_ip, self._port) target_list = self._vrts_parse_xml_file(self.target_info_file) target_name = self._vrts_get_suitable_target(target_list, volume.size) if not target_name: message = (_('ACCESSIscsiDriver create volume failed %s ' 'as no space is available') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) data = {} data["lun_name"] = lun_name data["target_name"] = target_name data["size"] = lun_size if not self._sparse_lun_support or create_dense: data["option"] = "option=dense" result = self._access_api(self.session, provider, path, json.dumps(data), 'POST') if not result: message = (_('ACCESSIscsiDriver create volume failed %s') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def delete_volume(self, volume): """Deletes a Veritas Access Iscsi LUN.""" lun_name = self._get_va_lun_name(volume.id) lun_list = self._get_vrts_lun_list() target_name = "" for lun in lun_list['output']['output']['luns']: if lun['lun_name'] == lun_name: target_name = lun['target_name'] path = self._lun_destroy_str provider = '%s:%s' % (self._va_ip, self._port) data = {} data["lun_name"] = lun_name data["target_name"] = target_name result = self._access_api(self.session, provider, path, json.dumps(data), 'POST') if not result: message = (_('ACCESSIscsiDriver delete volume failed %s') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def create_snapshot(self, snapshot): """Creates a snapshot of LUN.""" lun_name = self._get_va_lun_name(snapshot.volume_id) snap_name = self._get_va_lun_name(snapshot.id) path = self._snapshot_create_str provider = '%s:%s' % (self._va_ip, self._port) data = {} data["lun_name"] = lun_name data["snap_name"] = snap_name result = self._access_api(self.session, provider, path, json.dumps(data), 'POST') if not result: message = (_('ACCESSIscsiDriver create snapshot failed for %s') % snapshot.volume_id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def delete_snapshot(self, snapshot): """Deletes a snapshot of LUN.""" lun_name = self._get_va_lun_name(snapshot.volume_id) snap_name = self._get_va_lun_name(snapshot.id) path = self._snapshot_destroy_str provider = '%s:%s' % (self._va_ip, self._port) data = {} data["lun_name"] = lun_name data["snap_name"] = snap_name result = self._access_api(self.session, provider, path, json.dumps(data), 'POST') if not result: message = (_('ACCESSIscsiDriver delete snapshot failed for %s') % snapshot.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def create_cloned_volume(self, volume, src_vref): """Create a clone of the volume.""" lun_name = self._get_va_lun_name(src_vref.id) cloned_lun_name = self._get_va_lun_name(volume.id) lun_found = False lun_list = self._get_vrts_lun_list() for lun in lun_list['output']['output']['luns']: if lun['lun_name'] == lun_name: store_name = lun['fs_name'] lun_found = True break if not lun_found: message = (_('ACCESSIscsiDriver create cloned volume ' 'failed %s as no source volume found') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) fs_list = self._vrts_get_fs_list() if not self._vrts_is_space_available_in_store(volume.size, store_name, fs_list): message = (_('ACCESSIscsiDriver create cloned volume ' 'failed %s as no space is available') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) path = self._lun_clone_create_str provider = '%s:%s' % (self._va_ip, self._port) data = {} data["lun_name"] = lun_name data["clone_name"] = cloned_lun_name result = self._access_api(self.session, provider, path, json.dumps(data), 'POST') if not result: message = (_('ACCESSIscsiDriver create cloned ' 'volume failed for %s') % src_vref.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) if volume.size > src_vref.size: self._vrts_extend_lun(volume, volume.size) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from snapshot.""" LOG.debug('ACCESSIscsiDriver create_volume_from_snapshot called') lun_name = self._get_va_lun_name(volume.id) snap_name = self._get_va_lun_name(snapshot.id) path = self._snapshot_list_str provider = '%s:%s' % (self._va_ip, self._port) data = {} data["snap_name"] = snap_name snap_info = self._access_api(self.session, provider, path, json.dumps(data), 'GET') target_name = "" if snap_info: for snap in snap_info['output']['output']['snapshots']: if snap['snapshot_name'] == snap_name: target_name = snap['target_name'] break if target_name == "": message = (_('ACCESSIscsiDriver create volume from snapshot ' 'failed for volume %s as failed to gather ' 'snapshot details') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) vrts_target_list = self._vrts_get_targets_store() store_name = self._vrts_get_assigned_store( target_name, vrts_target_list) fs_list = self._vrts_get_fs_list() if not self._vrts_is_space_available_in_store(volume.size, store_name, fs_list): message = (_('ACCESSIscsiDriver create volume from snapshot ' 'failed %s as no space is available') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) path = self._lun_create_from_snap_str provider = '%s:%s' % (self._va_ip, self._port) data = {} data["lun_name"] = lun_name data["snap_name"] = snap_name result = self._access_api(self.session, provider, path, json.dumps(data), 'POST') if not result: message = (_('ACCESSIscsiDriver create volume from snapshot ' 'failed for volume %s') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) if volume.size > snapshot.volume_size: self._vrts_extend_lun(volume, volume.size) def _vrts_extend_lun(self, volume, size): """Extend vrts LUN to given size.""" lun_name = self._get_va_lun_name(volume.id) target = {'target_name': ''} def _inner(): lun_list = self._get_vrts_lun_list() for lun in lun_list['output']['output']['luns']: if lun['lun_name'] == lun_name: target['target_name'] = lun['target_name'] raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(_inner) try: timer.start(interval=5, timeout=self.LUN_FOUND_INTERVAL).wait() except loopingcall.LoopingCallTimeOut: return False lun_size = '%sg' % size path = self._lun_extend_str provider = '%s:%s' % (self._va_ip, self._port) data = {} data["lun_name"] = lun_name data["target_name"] = target['target_name'] data["size"] = lun_size result = self._access_api(self.session, provider, path, json.dumps(data), 'POST') return result def extend_volume(self, volume, size): """Extend the volume to new size""" lun_name = self._get_va_lun_name(volume.id) lun_found = False lun_list = self._get_vrts_lun_list() for lun in lun_list['output']['output']['luns']: if lun['lun_name'] == lun_name: store_name = lun['fs_name'] lun_found = True break if not lun_found: message = (_('ACCESSIscsiDriver extend volume ' 'failed %s as no volume found at backend') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) fs_list = self._vrts_get_fs_list() if not self._vrts_is_space_available_in_store(size, store_name, fs_list): message = (_('ACCESSIscsiDriver extend volume ' 'failed %s as no space is available') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) result = self._vrts_extend_lun(volume, size) if not result: message = (_('ACCESSIscsiDriver extend ' 'volume failed for %s') % volume.id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def _get_api(self, provider, tail): api_root = 'https://%s/api/access' % (provider) if tail == self._fs_list_str: api_root = 'https://%s/api' % (provider) return api_root + tail def _access_api(self, session, provider, path, input_data, method): """Returns False if failure occurs.""" kwargs = {'data': input_data} if not isinstance(input_data, dict): kwargs['headers'] = {'Content-Type': 'application/json'} full_url = self._get_api(provider, path) response = session.request(method, full_url, **kwargs) if response.status_code == 401: LOG.debug('Generating new session.') self.session = self._authenticate_access(self._va_ip, self._user, self._pwd) response = self.session.request(method, full_url, **kwargs) if response.status_code != HTTPStatus.OK: LOG.error('Access API operation failed with HTTP error code %s.', str(response.status_code)) return False result = response.json() return result def _authenticate_access(self, address, username, password): session = requests.session() session.verify = self.verify session.auth = NoAuth() # Here 'address' will be only IPv4. response = session.post('https://%s:%s/api/rest/authenticate' % (address, self._port), data={'username': username, 'password': password}) if response.status_code != HTTPStatus.OK: LOG.error('Failed to authenticate to remote cluster at %s as %s.', address, username) raise exception.NotAuthorized(_('Authentication failure.')) result = response.json() session.headers.update({'Authorization': 'Bearer {}' .format(result['token'])}) session.headers.update({'Content-Type': 'application/json'}) return session def _get_va_backend_capacity(self): """Get VA backend total and free capacity.""" target_list = self._vrts_parse_xml_file(self.target_info_file) fs_list = self._vrts_get_fs_list() vrts_target_list = self._vrts_get_targets_store() total_space = 0 free_space = 0 target_name = [] target_store = [] for target in target_list: target_name.append(target['name']) for target in vrts_target_list: if target['wwn'] in target_name: target_store.append(target['fs_list'][0]) for store in target_store: for fs in fs_list: if fs['name'] == store: total_space = total_space + fs['file_storage_capacity'] fs_free_space = (fs['file_storage_capacity'] - fs['file_storage_used']) if fs_free_space > free_space: free_space = fs_free_space total_capacity = int(total_space) / units.Gi free_capacity = int(free_space) / units.Gi return (total_capacity, free_capacity) def get_volume_stats(self, refresh=False): """Retrieve status info from share volume group.""" total_capacity, free_capacity = self._get_va_backend_capacity() backend_name = self.configuration.safe_get('volume_backend_name') res_percentage = self.configuration.safe_get('reserved_percentage') self._stats["volume_backend_name"] = backend_name or 'VeritasISCSI' self._stats["vendor_name"] = 'Veritas' self._stats["reserved_percentage"] = res_percentage or 0 self._stats["driver_version"] = self.VERSION self._stats["storage_protocol"] = constants.ISCSI self._stats['total_capacity_gb'] = total_capacity self._stats['free_capacity_gb'] = free_capacity self._stats['thin_provisioning_support'] = True return self._stats ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/veritas_cnfs.py0000664000175000017500000001731400000000000022366 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Veritas Technologies LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from oslo_utils import excutils from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface import cinder.privsep.path from cinder.volume.drivers import nfs LOG = logging.getLogger(__name__) @interface.volumedriver class VeritasCNFSDriver(nfs.NfsDriver): """Veritas Clustered NFS based cinder driver .. code-block:: default Version History: 1.0.0 - Initial driver implementations for Kilo. 1.0.1 - Liberty release driver not implemented. Place holder for Liberty release in case we need to support. 1.0.2 - cinder.interface.volumedriver decorator. Mitaka/Newton/Okata Release 1.0.3 - Separate create_cloned_volume() and create_volume_from_snapshot () functionality. Pike Release Executes commands relating to Volumes. """ VERSION = "1.0.3" # ThirdPartySytems wiki page CI_WIKI_NAME = "Veritas_Access_CI" DRIVER_VOLUME_TYPE = constants.NFS_VARIANT # TODO(jsbryant) Remove driver in the 'V' release if CI is not fixed. SUPPORTED = False def __init__(self, *args, **kwargs): self._execute = None self._context = None super(VeritasCNFSDriver, self).__init__(*args, **kwargs) def do_setup(self, context): self._context = context super(VeritasCNFSDriver, self).do_setup(context) opts = self.configuration.nfs_mount_options if not opts or opts.find('vers=3') == -1 or ( opts.find('nfsvers=3')) == -1: msg = _("NFS is not configured to use NFSv3") LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from snapshot.""" LOG.debug('VeritasNFSDriver create_volume_from_snapshot called ' 'volume_id = %(volume)s and snapshot_id = %(snapshot)s', {'volume': volume.id, 'snapshot': snapshot.id}) snap_name = snapshot.name vol_size = volume.size snap_size = snapshot.volume_size self._do_clone_volume(snapshot, snap_name, volume) volume.provider_location = snapshot.provider_location if vol_size != snap_size: try: self.extend_volume(volume, vol_size) except exception.ExtendVolumeError as ex: with excutils.save_and_reraise_exception(): LOG.error('Failed to extend Volume: %s', ex.msg) path = self.local_path(volume) self._delete_file(path) return {'provider_location': volume.provider_location} def _get_vol_by_id(self, volid): vol = self.db.volume_get(self._context, volid) return vol def _delete_file(self, path): """Deletes file from disk and return result as boolean.""" try: LOG.debug('Deleting file at path %s', path) self._execute('rm', '-f', path, run_as_root=True) except OSError as ex: LOG.warning('Exception during deleting %s', ex.strerror) def create_snapshot(self, snapshot): """Create a snapshot of the volume.""" src_vol_id = snapshot.volume_id src_vol_name = snapshot.volume_name src_vol = self._get_vol_by_id(src_vol_id) self._do_clone_volume(src_vol, src_vol_name, snapshot) snapshot.provider_location = src_vol.provider_location LOG.debug("VeritasNFSDriver create_snapshot %r", snapshot.provider_location) return {'provider_location': snapshot.provider_location} def delete_snapshot(self, snapshot): """Delete a snapshot.""" if not snapshot.provider_location: LOG.warning('Snapshot %s does not have provider_location ' 'specified, skipping', snapshot.name) return self._ensure_share_mounted(snapshot.provider_location) snap_path = self.local_path(snapshot) self._delete_file(snap_path) def create_cloned_volume(self, volume, src_vref): """Create a clone of the volume.""" LOG.debug('VeritasNFSDriver create_cloned_volume called ' 'volume_id = %(volume)s and src_vol_id = %(src_vol_id)s', {'volume': volume.id, 'src_vol_id': src_vref.id}) src_vol_name = src_vref.name vol_size = volume.size src_vol_size = src_vref.size self._do_clone_volume(src_vref, src_vol_name, volume) volume.provider_location = src_vref.provider_location if vol_size != src_vol_size: try: self.extend_volume(volume, vol_size) except exception.ExtendVolumeError as ex: with excutils.save_and_reraise_exception(): LOG.error('Failed to extend Volume: %s', ex.msg) path = self.local_path(volume) self._delete_file(path) return {'provider_location': volume.provider_location} def _get_local_volume_path(self, provider_loc, vol_name): mnt_path = self._get_mount_point_for_share(provider_loc) vol_path = os.path.join(mnt_path, vol_name) return vol_path def _do_clone_volume(self, src_vol, src_vol_name, tgt_vol): cnfs_share = src_vol.provider_location tgt_vol_name = tgt_vol.name tgt_vol_path = self._get_local_volume_path(cnfs_share, tgt_vol_name) src_vol_path = self._get_local_volume_path(cnfs_share, src_vol_name) tgt_vol_path_spl = tgt_vol_path + "::snap:vxfs:" cinder.privsep.path.symlink(src_vol_path, tgt_vol_path_spl) LOG.debug("VeritasNFSDriver: do_clone_volume %(src_vol_path)s " "%(tgt_vol_path)s %(tgt_vol_path_spl)s", {'src_vol_path': src_vol_path, 'tgt_vol_path_spl': tgt_vol_path_spl, 'tgt_vol_path': tgt_vol_path}) if not os.path.exists(tgt_vol_path): self._execute('rm', '-f', tgt_vol_path_spl, run_as_root=True) msg = _("Filesnap over NFS is not supported, " "removing the ::snap:vxfs: file") LOG.error(msg) raise exception.NfsException(msg) def extend_volume(self, volume, size): """Extend the volume to new size""" path = self.local_path(volume) self._execute('truncate', '-s', '%sG' % size, path, run_as_root=True) LOG.debug("VeritasNFSDriver: extend_volume volume_id = %s", volume.id) def _update_volume_stats(self): super(VeritasCNFSDriver, self)._update_volume_stats() backend_name = self.configuration.safe_get('volume_backend_name') res_percentage = self.configuration.safe_get('reserved_percentage') self._stats["volume_backend_name"] = backend_name or 'VeritasCNFS' self._stats["vendor_name"] = 'Veritas' self._stats["reserved_percentage"] = res_percentage or 0 self._stats["driver_version"] = self.VERSION self._stats["storage_protocol"] = self.DRIVER_VOLUME_TYPE ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4031212 cinder-27.0.0/cinder/volume/drivers/vmware/0000775000175000017500000000000000000000000020621 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/vmware/__init__.py0000664000175000017500000000000000000000000022720 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/vmware/datastore.py0000664000175000017500000003121700000000000023165 0ustar00zuulzuul00000000000000# Copyright (c) 2014 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Classes and utility methods for datastore selection. """ import random from oslo_log import log as logging from oslo_vmware import pbm from oslo_vmware import vim_util from cinder import coordination from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions LOG = logging.getLogger(__name__) class DatastoreType(object): """Supported datastore types.""" NFS = "nfs" VMFS = "vmfs" VSAN = "vsan" VVOL = "vvol" NFS41 = "nfs41" _ALL_TYPES = {NFS, VMFS, VSAN, VVOL, NFS41} @staticmethod def get_all_types(): return DatastoreType._ALL_TYPES class DatastoreSelector(object): """Class for selecting datastores which satisfy input requirements.""" HARD_AFFINITY_DS_TYPE = "hardAffinityDatastoreTypes" HARD_ANTI_AFFINITY_DS = "hardAntiAffinityDatastores" SIZE_BYTES = "sizeBytes" PROFILE_NAME = "storageProfileName" # TODO(vbala) Remove dependency on volumeops. def __init__(self, vops, session, max_objects, ds_regex=None): self._vops = vops self._session = session self._max_objects = max_objects self._ds_regex = ds_regex self._profile_id_cache = {} @coordination.synchronized('vmware-datastore-profile-{profile_name}') def get_profile_id(self, profile_name): """Get vCenter profile ID for the given profile name. :param profile_name: profile name :return: vCenter profile ID :raises ProfileNotFoundException: """ if profile_name in self._profile_id_cache: LOG.debug("Returning cached ID for profile: %s.", profile_name) return self._profile_id_cache[profile_name] profile_id = pbm.get_profile_id_by_name(self._session, profile_name) if profile_id is None: LOG.error("Storage profile: %s cannot be found in vCenter.", profile_name) raise vmdk_exceptions.ProfileNotFoundException( storage_profile=profile_name) self._profile_id_cache[profile_name] = profile_id LOG.debug("Storage profile: %(name)s resolved to vCenter profile ID: " "%(id)s.", {'name': profile_name, 'id': profile_id}) return profile_id def _filter_by_profile(self, datastores, profile_id): """Filter out input datastores that do not match the given profile.""" cf = self._session.pbm.client.factory hubs = pbm.convert_datastores_to_hubs(cf, datastores) hubs = pbm.filter_hubs_by_profile(self._session, hubs, profile_id) hub_ids = [hub.hubId for hub in hubs] return {k: v for k, v in datastores.items() if vim_util.get_moref_value(k) in hub_ids} def _filter_datastores(self, datastores, size_bytes, profile_id, hard_anti_affinity_ds, hard_affinity_ds_types, valid_host_refs=None): if not datastores: return def _is_valid_ds_type(summary): ds_type = summary.type.lower() return (ds_type in DatastoreType.get_all_types() and (hard_affinity_ds_types is None or ds_type in hard_affinity_ds_types)) def _is_ds_usable(summary): return summary.accessible and not self._vops._in_maintenance( summary) valid_host_refs = valid_host_refs or [] valid_hosts = [vim_util.get_moref_value(host_ref) for host_ref in valid_host_refs] def _is_ds_accessible_to_valid_host(host_mounts): for host_mount in host_mounts: if vim_util.get_moref_value(host_mount.key) in valid_hosts: return True def _is_ds_valid(ds_ref, ds_props): summary = ds_props.get('summary') host_mounts = ds_props.get('host') if (summary is None or host_mounts is None): return False if self._ds_regex and not self._ds_regex.match(summary.name): return False if (hard_anti_affinity_ds and vim_util.get_moref_value(ds_ref) in hard_anti_affinity_ds): return False if summary.capacity == 0 or summary.freeSpace < size_bytes: return False if (valid_hosts and not _is_ds_accessible_to_valid_host(host_mounts)): return False return _is_valid_ds_type(summary) and _is_ds_usable(summary) datastores = {k: v for k, v in datastores.items() if _is_ds_valid(k, v)} if datastores and profile_id: datastores = self._filter_by_profile(datastores, profile_id) return datastores def _get_object_properties(self, obj_content): props = {} if hasattr(obj_content, 'propSet'): prop_set = obj_content.propSet if prop_set: props = {prop.name: prop.val for prop in prop_set} return props def _get_datastores(self): datastores = {} retrieve_result = self._session.invoke_api( vim_util, 'get_objects', self._session.vim, 'Datastore', self._max_objects, properties_to_collect=['host', 'summary']) while retrieve_result: if retrieve_result.objects: for obj_content in retrieve_result.objects: props = self._get_object_properties(obj_content) if ('host' in props and hasattr(props['host'], 'DatastoreHostMount')): props['host'] = props['host'].DatastoreHostMount datastores[obj_content.obj] = props retrieve_result = self._session.invoke_api(vim_util, 'continue_retrieval', self._session.vim, retrieve_result) return datastores def _get_host_properties(self, host_ref): retrieve_result = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, host_ref, ['runtime', 'parent']) if retrieve_result: return self._get_object_properties(retrieve_result[0]) def _get_resource_pool(self, cluster_ref): return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, cluster_ref, 'resourcePool') def _select_best_datastore(self, datastores, valid_host_refs=None): if not datastores: return def _sort_key(ds_props): host = ds_props.get('host') summary = ds_props.get('summary') space_utilization = (1.0 - (summary.freeSpace / float(summary.capacity))) return (-len(host), space_utilization) host_prop_map = {} def _is_host_usable(host_ref): props = host_prop_map.get(vim_util.get_moref_value(host_ref)) if props is None: props = self._get_host_properties(host_ref) host_prop_map[vim_util.get_moref_value(host_ref)] = props runtime = props.get('runtime') parent = props.get('parent') if runtime and parent: return (runtime.connectionState == 'connected' and not runtime.inMaintenanceMode) else: return False valid_host_refs = valid_host_refs or [] valid_hosts = [vim_util.get_moref_value(host_ref) for host_ref in valid_host_refs] def _select_host(host_mounts): random.shuffle(host_mounts) for host_mount in host_mounts: host_mount_key_value = vim_util.get_moref_value(host_mount.key) if valid_hosts and host_mount_key_value not in valid_hosts: continue if (self._vops._is_usable(host_mount.mountInfo) and _is_host_usable(host_mount.key)): return host_mount.key sorted_ds_props = sorted(datastores.values(), key=_sort_key) for ds_props in sorted_ds_props: host_ref = _select_host(ds_props['host']) if host_ref: host_ref_value = vim_util.get_moref_value(host_ref) rp = self._get_resource_pool( host_prop_map[host_ref_value]['parent']) return (host_ref, rp, ds_props['summary']) def select_datastore(self, req, hosts=None): """Selects a datastore satisfying the given requirements. A datastore which is connected to maximum number of hosts is selected. Ties if any are broken based on space utilization-- datastore with least space utilization is preferred. It returns the selected datastore's summary along with a host and resource pool where the volume can be created. :param req: selection requirements :param hosts: list of hosts to consider :return: (host, resourcePool, summary) """ LOG.debug("Using requirements: %s for datastore selection.", req) hard_affinity_ds_types = req.get( DatastoreSelector.HARD_AFFINITY_DS_TYPE) hard_anti_affinity_datastores = req.get( DatastoreSelector.HARD_ANTI_AFFINITY_DS) size_bytes = req[DatastoreSelector.SIZE_BYTES] profile_name = req.get(DatastoreSelector.PROFILE_NAME) profile_id = None if profile_name is not None: profile_id = self.get_profile_id(profile_name) datastores = self._get_datastores() datastores = self._filter_datastores(datastores, size_bytes, profile_id, hard_anti_affinity_datastores, hard_affinity_ds_types, valid_host_refs=hosts) res = self._select_best_datastore(datastores, valid_host_refs=hosts) LOG.debug("Selected (host, resourcepool, datastore): %s", res) return res def is_datastore_compliant(self, datastore, profile_name): """Check if the datastore is compliant with given profile. :param datastore: datastore to check the compliance :param profile_name: profile to check the compliance against :return: True if the datastore is compliant; False otherwise :raises ProfileNotFoundException: """ LOG.debug("Checking datastore: %(datastore)s compliance against " "profile: %(profile)s.", {'datastore': datastore, 'profile': profile_name}) if profile_name is None: # Any datastore is trivially compliant with a None profile. return True profile_id = self.get_profile_id(profile_name) # _filter_by_profile expects a map of datastore references to its # properties. It only uses the properties to construct a map of # filtered datastores to its properties. Here we don't care about # the datastore property, so pass it as None. is_compliant = bool(self._filter_by_profile({datastore: None}, profile_id)) LOG.debug("Compliance is %(is_compliant)s for datastore: " "%(datastore)s against profile: %(profile)s.", {'is_compliant': is_compliant, 'datastore': datastore, 'profile': profile_name}) return is_compliant ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/vmware/exceptions.py0000664000175000017500000000444200000000000023360 0ustar00zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exception definitions. """ from oslo_vmware import exceptions from cinder.i18n import _ class InvalidAdapterTypeException(exceptions.VMwareDriverException): """Thrown when the disk adapter type is invalid.""" msg_fmt = _("Invalid disk adapter type: %(invalid_type)s.") class InvalidDiskTypeException(exceptions.VMwareDriverException): """Thrown when the disk type is invalid.""" msg_fmt = _("Invalid disk type: %(disk_type)s.") class VirtualDiskNotFoundException(exceptions.VMwareDriverException): """Thrown when virtual disk is not found.""" msg_fmt = _("There is no virtual disk device.") class ProfileNotFoundException(exceptions.VMwareDriverException): """Thrown when the given storage profile cannot be found.""" msg_fmt = _("Storage profile: %(storage_profile)s not found.") class NoValidDatastoreException(exceptions.VMwareDriverException): """Thrown when there are no valid datastores.""" msg_fmt = _("There are no valid datastores.") class ClusterNotFoundException(exceptions.VMwareDriverException): """Thrown when the given cluster cannot be found.""" msg_fmt = _("Compute cluster: %(cluster)s not found.") class NoValidHostException(exceptions.VMwareDriverException): """Thrown when there are no valid ESX hosts.""" msg_fmt = _("There are no valid ESX hosts.") class TemplateNotFoundException(exceptions.VMwareDriverException): """Thrown when template cannot be found.""" msg_fmt = _("Template cannot be found at path: %(path)s.") class SnapshotNotFoundException(exceptions.VMwareDriverException): """Thrown when the backend snapshot cannot be found.""" msg_fmt = _("Snapshot: %(name)s not found.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/vmware/fcd.py0000664000175000017500000004045400000000000021736 0ustar00zuulzuul00000000000000# Copyright (c) 2017 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ VMware VStorageObject driver Volume driver based on VMware VStorageObject aka First Class Disk (FCD). This driver requires a minimum vCenter version of 6.5. """ from oslo_log import log as logging from oslo_utils import units from oslo_utils import versionutils from oslo_vmware import image_transfer from oslo_vmware.objects import datastore from oslo_vmware import vim_util from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume.drivers.vmware import datastore as hub from cinder.volume.drivers.vmware import vmdk from cinder.volume.drivers.vmware import volumeops as vops from cinder.volume import volume_utils LOG = logging.getLogger(__name__) @interface.volumedriver class VMwareVStorageObjectDriver(vmdk.VMwareVcVmdkDriver): """Volume driver based on VMware VStorageObject""" # 1.0 - initial version based on vSphere 6.5 vStorageObject APIs # 1.1 - support for vStorageObject snapshot APIs # 1.2 - support for SPBM storage policies # 1.3 - support for retype VERSION = '1.3.0' # ThirdPartySystems wiki page CI_WIKI_NAME = "VMware_CI" # minimum supported vCenter version MIN_SUPPORTED_VC_VERSION = '6.5' STORAGE_TYPE = constants.VSTORAGE def do_setup(self, context): """Any initialization the volume driver needs to do while starting. :param context: The admin context. """ super(VMwareVStorageObjectDriver, self).do_setup(context) self.volumeops.set_vmx_version('vmx-13') vc_67_compatible = versionutils.is_compatible( '6.7.0', self._vc_version, same_major=False) self._use_fcd_snapshot = vc_67_compatible self._storage_policy_enabled = vc_67_compatible def get_volume_stats(self, refresh=False): """Collects volume backend stats. :param refresh: Whether to discard any cached values and force a full refresh of stats. :returns: dict of appropriate values. """ stats = super(VMwareVStorageObjectDriver, self).get_volume_stats( refresh=refresh) stats['storage_protocol'] = self.STORAGE_TYPE return stats def _select_ds_fcd(self, volume): req = {} req[hub.DatastoreSelector.SIZE_BYTES] = volume.size * units.Gi if self._storage_policy_enabled: req[hub.DatastoreSelector.PROFILE_NAME] = ( self._get_storage_profile(volume)) (_host_ref, _resource_pool, summary) = self._select_datastore(req) return summary.datastore def _get_temp_image_folder(self, size_bytes, preallocated=False): req = {} req[hub.DatastoreSelector.SIZE_BYTES] = size_bytes if preallocated: req[hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = ( hub.DatastoreType.get_all_types() - {hub.DatastoreType.VSAN, hub.DatastoreType.VVOL}) (host_ref, _resource_pool, summary) = self._select_datastore(req) folder_path = vmdk.TMP_IMAGES_DATASTORE_FOLDER_PATH dc_ref = self.volumeops.get_dc(host_ref) self.volumeops.create_datastore_folder( summary.name, folder_path, dc_ref) return (dc_ref, summary, folder_path) def _get_disk_type(self, volume): extra_spec_disk_type = super( VMwareVStorageObjectDriver, self)._get_disk_type(volume) return vops.VirtualDiskType.get_virtual_disk_type(extra_spec_disk_type) def _get_storage_profile_id(self, volume): if self._storage_policy_enabled: return super( VMwareVStorageObjectDriver, self)._get_storage_profile_id( volume) def create_volume(self, volume): """Create a new volume on the backend. :param volume: Volume object containing specifics to create. :returns: (Optional) dict of database updates for the new volume. """ disk_type = self._get_disk_type(volume) ds_ref = self._select_ds_fcd(volume) profile_id = self._get_storage_profile_id(volume) fcd_loc = self.volumeops.create_fcd( volume.name, volume.size * units.Ki, ds_ref, disk_type, profile_id=profile_id) return {'provider_location': fcd_loc.provider_location()} def _delete_fcd(self, provider_loc): fcd_loc = vops.FcdLocation.from_provider_location(provider_loc) self.volumeops.delete_fcd(fcd_loc) def delete_volume(self, volume): """Delete a volume from the backend. :param volume: The volume to delete. """ if not volume.provider_location: LOG.warning("FCD provider location is empty for volume %s", volume.id) else: self._delete_fcd(volume.provider_location) def initialize_connection(self, volume, connector, initiator_data=None): """Allow connection to connector and return connection info. :param volume: The volume to be attached. :param connector: Dictionary containing information about what is being connected to. :param initiator_data: (Optional) A dictionary of driver_initiator_data objects with key-value pairs that have been saved for this initiator by a driver in previous initialize_connection calls. :returns: A dictionary of connection information. """ fcd_loc = vops.FcdLocation.from_provider_location( volume.provider_location) connection_info = {'driver_volume_type': self.STORAGE_TYPE} connection_info['data'] = { 'id': fcd_loc.fcd_id, 'ds_ref_val': fcd_loc.ds_ref_val, 'adapter_type': self._get_adapter_type(volume) } LOG.debug("Connection info for volume %(name)s: %(connection_info)s.", {'name': volume.name, 'connection_info': connection_info}) return connection_info def _validate_container_format(self, container_format, image_id): if container_format and container_format != 'bare': msg = _("Container format: %s is unsupported, only 'bare' " "is supported.") % container_format LOG.error(msg) raise exception.ImageUnacceptable(image_id=image_id, reason=msg) def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume. :param context: Security/policy info for the request. :param volume: The volume to create. :param image_service: The image service to use. :param image_id: The image identifier. :param disable_sparse: Enable or disable sparse copy. Default=False. This parameter is ignored by VMware driver. :returns: Model updates. """ metadata = image_service.show(context, image_id) self._validate_disk_format(metadata['disk_format']) self._validate_container_format( metadata.get('container_format'), image_id) properties = metadata['properties'] or {} disk_type = properties.get('vmware_disktype', vmdk.ImageDiskType.PREALLOCATED) vmdk.ImageDiskType.validate(disk_type) size_bytes = metadata['size'] dc_ref, summary, folder_path = self._get_temp_image_folder( volume.size * units.Gi) disk_name = volume.id if disk_type in [vmdk.ImageDiskType.SPARSE, vmdk.ImageDiskType.STREAM_OPTIMIZED]: vmdk_path = self._create_virtual_disk_from_sparse_image( context, image_service, image_id, size_bytes, dc_ref, summary.name, folder_path, disk_name) else: vmdk_path = self._create_virtual_disk_from_preallocated_image( context, image_service, image_id, size_bytes, dc_ref, summary.name, folder_path, disk_name, vops.VirtualDiskAdapterType.LSI_LOGIC) ds_path = datastore.DatastorePath.parse( vmdk_path.get_descriptor_ds_file_path()) dc_path = self.volumeops.get_inventory_path(dc_ref) vmdk_url = datastore.DatastoreURL( 'https', self.configuration.vmware_host_ip, ds_path.rel_path, dc_path, ds_path.datastore) fcd_loc = self.volumeops.register_disk( str(vmdk_url), volume.name, summary.datastore) profile_id = self._get_storage_profile_id(volume) if profile_id: self.volumeops.update_fcd_policy(fcd_loc, profile_id) return {'provider_location': fcd_loc.provider_location()} def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image. :param context: Security/policy info for the request. :param volume: The volume to copy. :param image_service: The image service to use. :param image_meta: Information about the image. :returns: Model updates. """ self._validate_disk_format(image_meta['disk_format']) fcd_loc = vops.FcdLocation.from_provider_location( volume.provider_location) hosts = self.volumeops.get_connected_hosts(fcd_loc.ds_ref()) host = vim_util.get_moref(hosts[0], 'HostSystem') LOG.debug("Selected host: %(host)s for downloading fcd: %(fcd_loc)s.", {'host': host, 'fcd_loc': fcd_loc}) attached = False try: create_params = {vmdk.CREATE_PARAM_DISK_LESS: True} backing = self._create_backing(volume, host, create_params) self.volumeops.attach_fcd(backing, fcd_loc) attached = True vmdk_file_path = self.volumeops.get_vmdk_path(backing) conf = self.configuration # retrieve store information from extra-specs store_id = volume.volume_type.extra_specs.get( 'image_service:store_id') # TODO (whoami-rajat): Remove store_id and base_image_ref # parameters when oslo.vmware calls volume_utils wrapper of # upload_volume instead of image_utils.upload_volume image_transfer.upload_image( context, conf.vmware_image_transfer_timeout_secs, image_service, image_meta['id'], volume.project_id, session=self.session, host=conf.vmware_host_ip, port=conf.vmware_host_port, vm=backing, vmdk_file_path=vmdk_file_path, vmdk_size=volume.size * units.Gi, image_name=image_meta['name'], store_id=store_id, base_image_ref=volume_utils.get_base_image_ref(volume)) finally: if attached: self.volumeops.detach_fcd(backing, fcd_loc) backing = self.volumeops.get_backing_by_uuid(volume.id) if backing: self._delete_temp_backing(backing) def extend_volume(self, volume, new_size): """Extend the size of a volume. :param volume: The volume to extend. :param new_size: The new desired size of the volume. """ fcd_loc = vops.FcdLocation.from_provider_location( volume.provider_location) self.volumeops.extend_fcd(fcd_loc, new_size * units.Ki) def _clone_fcd(self, provider_loc, name, dest_ds_ref, disk_type=vops.VirtualDiskType.THIN, profile_id=None): fcd_loc = vops.FcdLocation.from_provider_location(provider_loc) return self.volumeops.clone_fcd( name, fcd_loc, dest_ds_ref, disk_type, profile_id=profile_id) def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: Information for the snapshot to be created. """ if self._use_fcd_snapshot: fcd_loc = vops.FcdLocation.from_provider_location( snapshot.volume.provider_location) description = "snapshot-%s" % snapshot.id fcd_snap_loc = self.volumeops.create_fcd_snapshot( fcd_loc, description=description) return {'provider_location': fcd_snap_loc.provider_location()} ds_ref = self._select_ds_fcd(snapshot.volume) cloned_fcd_loc = self._clone_fcd( snapshot.volume.provider_location, snapshot.name, ds_ref) return {'provider_location': cloned_fcd_loc.provider_location()} def delete_snapshot(self, snapshot): """Deletes a snapshot. :param snapshot: The snapshot to delete. """ if not snapshot.provider_location: LOG.debug("FCD snapshot location is empty.") return fcd_snap_loc = vops.FcdSnapshotLocation.from_provider_location( snapshot.provider_location) if fcd_snap_loc: self.volumeops.delete_fcd_snapshot(fcd_snap_loc) else: self._delete_fcd(snapshot.provider_location) def _extend_if_needed(self, fcd_loc, cur_size, new_size): if new_size > cur_size: self.volumeops.extend_fcd(fcd_loc, new_size * units.Ki) def _create_volume_from_fcd(self, provider_loc, cur_size, volume): ds_ref = self._select_ds_fcd(volume) disk_type = self._get_disk_type(volume) profile_id = self._get_storage_profile_id(volume) cloned_fcd_loc = self._clone_fcd( provider_loc, volume.name, ds_ref, disk_type=disk_type, profile_id=profile_id) self._extend_if_needed(cloned_fcd_loc, cur_size, volume.size) return {'provider_location': cloned_fcd_loc.provider_location()} def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. :param volume: The volume to be created. :param snapshot: The snapshot from which to create the volume. :returns: A dict of database updates for the new volume. """ fcd_snap_loc = vops.FcdSnapshotLocation.from_provider_location( snapshot.provider_location) if fcd_snap_loc: profile_id = self._get_storage_profile_id(volume) fcd_loc = self.volumeops.create_fcd_from_snapshot( fcd_snap_loc, volume.name, profile_id=profile_id) self._extend_if_needed(fcd_loc, snapshot.volume_size, volume.size) return {'provider_location': fcd_loc.provider_location()} else: return self._create_volume_from_fcd(snapshot.provider_location, snapshot.volume.size, volume) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume. :param volume: New Volume object :param src_vref: Source Volume object """ return self._create_volume_from_fcd( src_vref.provider_location, src_vref.size, volume) def retype(self, context, volume, new_type, diff, host): if not self._storage_policy_enabled: return True profile = self._get_storage_profile(volume) new_profile = self._get_extra_spec_storage_profile(new_type['id']) if profile == new_profile: LOG.debug("Storage profile matches between new type and old type.") return True if self._in_use(volume): LOG.warning("Cannot change storage profile of attached FCD.") return False fcd_loc = vops.FcdLocation.from_provider_location( volume.provider_location) new_profile_id = self.ds_sel.get_profile_id(new_profile) self.volumeops.update_fcd_policy(fcd_loc, new_profile_id.uniqueId) return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/vmware/vmdk.py0000664000175000017500000032444600000000000022151 0ustar00zuulzuul00000000000000# Copyright (c) 2013 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for VMware vCenter managed datastores. The volumes created by this driver are backed by VMDK (Virtual Machine Disk) files stored in datastores. For ease of managing the VMDKs, the driver creates a virtual machine for each of the volumes. This virtual machine is never powered on and is often referred as the shadow VM. """ import math import re from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import units from oslo_utils import uuidutils from oslo_utils import versionutils from oslo_vmware import api from oslo_vmware import exceptions from oslo_vmware import image_transfer from oslo_vmware import pbm from oslo_vmware import vim_util from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.vmware import datastore as hub from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions from cinder.volume.drivers.vmware import volumeops from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) THIN_VMDK_TYPE = 'thin' THICK_VMDK_TYPE = 'thick' EAGER_ZEROED_THICK_VMDK_TYPE = 'eagerZeroedThick' CREATE_PARAM_ADAPTER_TYPE = 'adapter_type' CREATE_PARAM_DISK_LESS = 'disk_less' CREATE_PARAM_BACKING_NAME = 'name' CREATE_PARAM_DISK_SIZE = 'disk_size' CREATE_PARAM_TEMP_BACKING = 'temp_backing' TMP_IMAGES_DATASTORE_FOLDER_PATH = "cinder_temp/" EXTRA_CONFIG_VOLUME_ID_KEY = "cinder.volume.id" EXTENSION_KEY = 'org.openstack.storage' EXTENSION_TYPE = 'volume' vmdk_opts = [ cfg.StrOpt('vmware_host_ip', help='IP address for connecting to VMware vCenter server.'), cfg.PortOpt('vmware_host_port', default=443, help='Port number for connecting to VMware vCenter server.'), cfg.StrOpt('vmware_host_username', help='Username for authenticating with VMware vCenter ' 'server.'), cfg.StrOpt('vmware_host_password', help='Password for authenticating with VMware vCenter ' 'server.', secret=True), cfg.StrOpt('vmware_wsdl_location', help='Optional VIM service WSDL Location ' 'e.g http:///vimService.wsdl. Optional over-ride ' 'to default location for bug work-arounds.'), cfg.IntOpt('vmware_api_retry_count', default=10, help='Number of times VMware vCenter server API must be ' 'retried upon connection related issues.'), cfg.FloatOpt('vmware_task_poll_interval', default=2.0, help='The interval (in seconds) for polling remote tasks ' 'invoked on VMware vCenter server.'), cfg.StrOpt('vmware_volume_folder', default='Volumes', help='Name of the vCenter inventory folder that will ' 'contain Cinder volumes. This folder will be created ' 'under "OpenStack/", where project_folder ' 'is of format "Project ()".'), cfg.IntOpt('vmware_image_transfer_timeout_secs', default=7200, help='Timeout in seconds for VMDK volume transfer between ' 'Cinder and Glance.'), cfg.IntOpt('vmware_max_objects_retrieval', default=100, help='Max number of objects to be retrieved per batch. ' 'Query results will be obtained in batches from the ' 'server and not in one shot. Server may still limit the ' 'count to something less than the configured value.'), cfg.StrOpt('vmware_host_version', help='Optional string specifying the VMware vCenter server ' 'version. ' 'The driver attempts to retrieve the version from VMware ' 'vCenter server. Set this configuration only if you want ' 'to override the vCenter server version.'), cfg.StrOpt('vmware_tmp_dir', default='/tmp', help='Directory where virtual disks are stored during volume ' 'backup and restore.'), cfg.StrOpt('vmware_ca_file', help='CA bundle file to use in verifying the vCenter server ' 'certificate.'), cfg.BoolOpt('vmware_insecure', default=False, help='If true, the vCenter server certificate is not ' 'verified. If false, then the default CA truststore is ' 'used for verification. This option is ignored if ' '"vmware_ca_file" is set.'), cfg.MultiStrOpt('vmware_cluster_name', help='Name of a vCenter compute cluster where volumes ' 'should be created.'), cfg.MultiStrOpt('vmware_storage_profile', help='Names of storage profiles to be monitored. Only ' 'used when vmware_enable_volume_stats is True.'), cfg.IntOpt('vmware_connection_pool_size', default=10, help='Maximum number of connections in http connection pool.'), cfg.StrOpt('vmware_adapter_type', choices=[volumeops.VirtualDiskAdapterType.LSI_LOGIC, volumeops.VirtualDiskAdapterType.BUS_LOGIC, volumeops.VirtualDiskAdapterType.LSI_LOGIC_SAS, volumeops.VirtualDiskAdapterType.PARA_VIRTUAL, volumeops.VirtualDiskAdapterType.IDE], default=volumeops.VirtualDiskAdapterType.LSI_LOGIC, help='Default adapter type to be used for attaching volumes.'), cfg.StrOpt('vmware_snapshot_format', choices=['template', 'COW'], default='template', help='Volume snapshot format in vCenter server.'), cfg.BoolOpt('vmware_lazy_create', default=True, help='If true, the backend volume in vCenter server is created' ' lazily when the volume is created without any source. ' 'The backend volume is created when the volume is ' 'attached, uploaded to image service or during backup.'), cfg.StrOpt('vmware_datastore_regex', help='Regular expression pattern to match the name of ' 'datastores where backend volumes are created.'), cfg.BoolOpt('vmware_enable_volume_stats', default=False, help='If true, this enables the fetching of the volume stats ' 'from the backend. This has potential performance ' 'issues at scale. When False, the driver will not ' 'collect ANY stats about the backend.') ] CONF = cfg.CONF CONF.register_opts(vmdk_opts, group=configuration.SHARED_CONF_GROUP) def _get_volume_type_extra_spec(type_id, spec_key, possible_values=None, default_value=None): """Get extra spec value. If the spec value is not present in the input possible_values, then default_value will be returned. If the type_id is None, then default_value is returned. The caller must not consider scope and the implementation adds/removes scope. The scope used here is 'vmware' e.g. key 'vmware:vmdk_type' and so the caller must pass vmdk_type as an input ignoring the scope. :param type_id: Volume type ID :param spec_key: Extra spec key :param possible_values: Permitted values for the extra spec if known :param default_value: Default value for the extra spec incase of an invalid value or if the entry does not exist :return: extra spec value """ if not type_id: return default_value spec_key = ('vmware:%s') % spec_key spec_value = volume_types.get_volume_type_extra_specs(type_id).get( spec_key, False) if not spec_value: LOG.debug("Returning default spec value: %s.", default_value) return default_value if possible_values is None: return spec_value if spec_value in possible_values: LOG.debug("Returning spec value %s", spec_value) return spec_value LOG.debug("Invalid spec value: %s specified.", spec_value) class ImageDiskType(object): """Supported disk types in images.""" PREALLOCATED = "preallocated" SPARSE = "sparse" STREAM_OPTIMIZED = "streamOptimized" THIN = "thin" @staticmethod def is_valid(extra_spec_disk_type): """Check if the given disk type in extra_spec is valid. :param extra_spec_disk_type: disk type to check :return: True if valid """ return extra_spec_disk_type in [ImageDiskType.PREALLOCATED, ImageDiskType.SPARSE, ImageDiskType.STREAM_OPTIMIZED, ImageDiskType.THIN] @staticmethod def validate(extra_spec_disk_type): """Validate the given disk type in extra_spec. This method throws ImageUnacceptable if the disk type is not a supported one. :param extra_spec_disk_type: disk type :raises: ImageUnacceptable """ if not ImageDiskType.is_valid(extra_spec_disk_type): raise exception.ImageUnacceptable(_("Invalid disk type: %s.") % extra_spec_disk_type) @interface.volumedriver class VMwareVcVmdkDriver(driver.VolumeDriver): """Manage volumes on VMware vCenter server.""" # 1.0 - initial version of driver # 1.1.0 - selection of datastore based on number of host mounts # 1.2.0 - storage profile volume types based placement of volumes # 1.3.0 - support for volume backup/restore # 1.4.0 - support for volume retype # 1.5.0 - restrict volume placement to specific vCenter clusters # 1.6.0 - support for manage existing # 1.7.0 - new config option 'vmware_connection_pool_size' # 1.7.1 - enforce vCenter server version 5.5 # 2.0.0 - performance enhancements # - new config option 'vmware_adapter_type' # - new extra-spec option 'vmware:adapter_type' # 3.0.0 - vCenter storage profile ID caching # support for cloning attached volume # optimize volume creation from image for vCenter datastore based # glance backend # add 'managed by OpenStack Cinder' info to volumes in the backend # support for vSphere template as volume snapshot format # support for snapshot of attached volumes # add storage profile ID to connection info # support for revert-to-snapshot # improve scalability of querying volumes in backend (bug 1600754) # 3.1.0 - support adapter type change using retype # 3.2.0 - config option to disable lazy creation of backend volume # 3.3.0 - config option to specify datastore name regex # 3.4.0 - added NFS41 as a supported datastore type # 3.4.1 - volume capacity stats implemented # 3.4.2 - deprecated option vmware_storage_profile # 3.4.3 - un-deprecated option vmware_storage_profile and added new # option vmware_enable_volume_stats to optionally enable # real get_volume_stats for proper scheduling of this driver. # 3.4.4 - Ensure datastores exist for storage profiles during # get_volume_stats() VERSION = '3.4.4' # ThirdPartySystems wiki page CI_WIKI_NAME = "VMware_CI" # Minimum supported vCenter version. MIN_SUPPORTED_VC_VERSION = '5.5' NEXT_MIN_SUPPORTED_VC_VERSION = '5.5' # PBM is enabled only for vCenter versions 5.5 and above PBM_ENABLED_VC_VERSION = '5.5' def __init__(self, *args, **kwargs): super(VMwareVcVmdkDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(vmdk_opts) self._session = None self._stats = None self._volumeops = None self._storage_policy_enabled = False self._ds_sel = None self._clusters = None self._dc_cache = {} self._ds_regex = None @staticmethod def get_driver_options(): return vmdk_opts @property def volumeops(self): return self._volumeops @property def ds_sel(self): return self._ds_sel def _validate_params(self): # Throw error if required parameters are not set. required_params = ['vmware_host_ip', 'vmware_host_username', 'vmware_host_password'] for param in required_params: if not getattr(self.configuration, param, None): reason = _("%s not set.") % param raise exception.InvalidInput(reason=reason) def check_for_setup_error(self): pass def _update_volume_stats(self): if self.configuration.safe_get('vmware_enable_volume_stats'): self._stats = self._get_volume_stats() else: self._stats = self._get_fake_stats() def _get_fake_stats(self): """Provide fake stats to the scheduler. :param refresh: Whether to get refreshed information """ if not self._stats: backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = self.__class__.__name__ data = {'volume_backend_name': backend_name, 'vendor_name': 'VMware', 'driver_version': self.VERSION, 'storage_protocol': 'vmdk', 'reserved_percentage': 0, 'total_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown', 'shared_targets': False} self._stats = data return self._stats def _get_volume_stats(self): """Fetch the stats about the backend. This can be slow at scale, but allows properly provisioning scheduling. """ backend_name = self.configuration.safe_get('volume_backend_name') if not backend_name: backend_name = self.__class__.__name__ data = {'volume_backend_name': backend_name, 'vendor_name': 'VMware', 'driver_version': self.VERSION, 'storage_protocol': constants.VMDK, 'reserved_percentage': self.configuration.reserved_percentage, 'shared_targets': False} ds_summaries = self._get_datastore_summaries() available_hosts = self._get_hosts(self._clusters) global_capacity = 0 global_free = 0 if ds_summaries: while True: for ds in ds_summaries.objects: ds_props = self._get_object_properties(ds) summary = ds_props['summary'] if self._is_datastore_accessible(summary, ds_props['host'], available_hosts): global_capacity += summary.capacity global_free += summary.freeSpace if getattr(ds_summaries, 'token', None): ds_summaries = self.volumeops.continue_retrieval( ds_summaries) else: break data['total_capacity_gb'] = round(global_capacity / units.Gi) data['free_capacity_gb'] = round(global_free / units.Gi) self._stats = data return data def _get_datastore_summaries(self): client_factory = self.session.vim.client.factory object_specs = [] if (self._storage_policy_enabled and self.configuration.vmware_storage_profile): # Get all available storage profiles on the vCenter and extract the # IDs of those that we want to observe profiles_ids = [] for profile in pbm.get_all_profiles(self.session): if profile.name in self.configuration.vmware_storage_profile: profiles_ids.append(profile.profileId) # Get all matching Datastores for each profile datastores = {} for profile_id in profiles_ids: for pbm_hub in pbm.filter_hubs_by_profile(self.session, None, profile_id): if pbm_hub.hubType != "Datastore": # We are not interested in Datastore Clusters for now continue if pbm_hub.hubId not in datastores: # Reconstruct a managed object reference to datastore datastores[pbm_hub.hubId] = vim_util.get_moref( pbm_hub.hubId, "Datastore") # Build property collector object specs out of them for datastore_ref in datastores.values(): object_specs.append( vim_util.build_object_spec(client_factory, datastore_ref, [])) if not datastores: LOG.warning("No Datastores found for storage profile(s) " "''%s'", ', '.join( self.configuration.safe_get( 'vmware_storage_profile'))) else: # Build a catch-all object spec that would reach all datastores object_specs.append( vim_util.build_object_spec( client_factory, self.session.vim.service_content.rootFolder, [vim_util.build_recursive_traversal_spec(client_factory)])) # If there are no datastores, we won't have object_specs and will # fail when trying to get stats if not object_specs: return prop_spec = vim_util.build_property_spec(client_factory, 'Datastore', ['summary', 'host']) filter_spec = vim_util.build_property_filter_spec(client_factory, prop_spec, object_specs) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = self.configuration.vmware_max_objects_retrieval result = self.session.vim.RetrievePropertiesEx( self.session.vim.service_content.propertyCollector, specSet=[filter_spec], options=options) return result def _get_object_properties(self, obj_content): props = {} if hasattr(obj_content, 'propSet'): prop_set = obj_content.propSet if prop_set: props = {prop.name: prop.val for prop in prop_set} return props def _is_datastore_accessible(self, ds_summary, ds_host_mounts, available_hosts): # available_hosts empty => vmware_cluster_name not specified => don't # filter by hosts cluster_access_to_ds = not available_hosts for host_mount in ds_host_mounts.DatastoreHostMount: for avlbl_host in available_hosts: avlbl_host_value = vim_util.get_moref_value(avlbl_host) host_mount_key_value = vim_util.get_moref_value(host_mount.key) if avlbl_host_value == host_mount_key_value: cluster_access_to_ds = True return (ds_summary.accessible and not self.volumeops._in_maintenance(ds_summary) and cluster_access_to_ds) def _verify_volume_creation(self, volume): """Verify that the volume can be created. Verify the vmdk type and storage profile if the volume is associated with a volume type. :param volume: Volume object """ # validate disk type self._get_disk_type(volume) # validate storage profile profile_name = self._get_storage_profile(volume) if profile_name: self.ds_sel.get_profile_id(profile_name) # validate adapter type self._get_adapter_type(volume) LOG.debug("Verified disk type, adapter type and storage profile " "of volume: %s.", volume.name) def create_volume(self, volume): """Creates a volume. We do not create any backing. We do it only the first time it is being attached to a virtual machine. :param volume: Volume object """ if self.configuration.vmware_lazy_create: self._verify_volume_creation(volume) else: self._create_backing(volume) def _delete_volume(self, volume): """Delete the volume backing if it is present. :param volume: Volume object """ backing = self.volumeops.get_backing(volume['name'], volume['id']) if not backing: LOG.info("Backing not available, no operation " "to be performed.") return self.volumeops.delete_backing(backing) def delete_volume(self, volume): """Deletes volume backing. :param volume: Volume object """ self._delete_volume(volume) def _get_extra_spec_adapter_type(self, type_id): adapter_type = _get_volume_type_extra_spec( type_id, 'adapter_type', default_value=self.configuration.vmware_adapter_type) volumeops.VirtualDiskAdapterType.validate(adapter_type) return adapter_type def _get_adapter_type(self, volume): return self._get_extra_spec_adapter_type(volume['volume_type_id']) def _get_extra_spec_storage_profile(self, type_id): """Get storage profile name in the given volume type's extra spec. If there is no storage profile in the extra spec, default is None. """ return _get_volume_type_extra_spec(type_id, 'storage_profile') def _get_storage_profile(self, volume): """Get storage profile associated with the given volume's volume_type. :param volume: Volume whose storage profile should be queried :return: String value of storage profile if volume type is associated and contains storage_profile extra_spec option; None otherwise """ return self._get_extra_spec_storage_profile(volume['volume_type_id']) @staticmethod def _get_extra_spec_disk_type(type_id): """Get disk type from the given volume type's extra spec. If there is no disk type option, default is THIN_VMDK_TYPE. """ disk_type = _get_volume_type_extra_spec(type_id, 'vmdk_type', default_value=THIN_VMDK_TYPE) volumeops.VirtualDiskType.validate(disk_type) return disk_type @staticmethod def _get_disk_type(volume): """Get disk type from the given volume's volume type. :param volume: Volume object :return: Disk type """ return VMwareVcVmdkDriver._get_extra_spec_disk_type( volume['volume_type_id']) def _get_storage_profile_id(self, volume): storage_profile = self._get_storage_profile(volume) profile_id = None if self._storage_policy_enabled and storage_profile: profile = pbm.get_profile_id_by_name(self.session, storage_profile) if profile: profile_id = profile.uniqueId return profile_id def _get_extra_config(self, volume): return {EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'], volumeops.BACKING_UUID_KEY: volume['id']} def _create_backing(self, volume, host=None, create_params=None): """Create volume backing under the given host. If host is unspecified, any suitable host is selected. :param volume: Volume object :param host: Reference of the host :param create_params: Dictionary specifying optional parameters for backing VM creation :return: Reference to the created backing """ create_params = create_params or {} (host_ref, resource_pool, folder, summary) = self._select_ds_for_volume(volume, host) # check if a storage profile needs to be associated with the backing VM profile_id = self._get_storage_profile_id(volume) # Use volume name as the default backing name. backing_name = create_params.get(CREATE_PARAM_BACKING_NAME, volume['name']) extra_config = self._get_extra_config(volume) # We shoudln't set backing UUID to volume UUID for temporary backing. if create_params.get(CREATE_PARAM_TEMP_BACKING): del extra_config[volumeops.BACKING_UUID_KEY] # default is a backing with single disk disk_less = create_params.get(CREATE_PARAM_DISK_LESS, False) if disk_less: # create a disk-less backing-- disk can be added later; for e.g., # by copying an image return self.volumeops.create_backing_disk_less( backing_name, folder, resource_pool, host_ref, summary.name, profileId=profile_id, extra_config=extra_config) # create a backing with single disk disk_type = VMwareVcVmdkDriver._get_disk_type(volume) size_kb = volume['size'] * units.Mi adapter_type = create_params.get(CREATE_PARAM_ADAPTER_TYPE, self._get_adapter_type(volume)) backing = self.volumeops.create_backing(backing_name, size_kb, disk_type, folder, resource_pool, host_ref, summary.name, profileId=profile_id, adapter_type=adapter_type, extra_config=extra_config) self.volumeops.update_backing_disk_uuid(backing, volume['id']) return backing def _get_hosts(self, clusters): hosts = [] if clusters: for cluster in clusters: cluster_hosts = self.volumeops.get_cluster_hosts(cluster) hosts.extend(cluster_hosts) return hosts def _select_datastore(self, req, host=None): """Selects datastore satisfying the given requirements. :return: (host, resource_pool, summary) """ hosts = None if host: hosts = [host] elif self._clusters: hosts = self._get_hosts(self._clusters) if not hosts: LOG.error("There are no valid hosts available in " "configured cluster(s): %s.", self._clusters) raise vmdk_exceptions.NoValidHostException() best_candidate = self.ds_sel.select_datastore(req, hosts=hosts) if not best_candidate: LOG.error("There is no valid datastore satisfying " "requirements: %s.", req) raise vmdk_exceptions.NoValidDatastoreException() return best_candidate def _get_dc(self, resource_pool): dc = self._dc_cache.get(vim_util.get_moref_value(resource_pool)) if not dc: dc = self.volumeops.get_dc(resource_pool) self._dc_cache[vim_util.get_moref_value(resource_pool)] = dc return dc def _select_ds_for_volume(self, volume, host=None, create_params=None): """Select datastore that can accommodate the given volume's backing. Returns the selected datastore summary along with a compute host and its resource pool and folder where the volume can be created :return: (host, resource_pool, folder, summary) """ # Form requirements for datastore selection. create_params = create_params or {} size = create_params.get(CREATE_PARAM_DISK_SIZE, volume['size']) req = {} req[hub.DatastoreSelector.SIZE_BYTES] = size * units.Gi req[hub.DatastoreSelector.PROFILE_NAME] = self._get_storage_profile( volume) (host_ref, resource_pool, summary) = self._select_datastore(req, host) dc = self._get_dc(resource_pool) folder = self._get_volume_group_folder(dc, volume['project_id']) return (host_ref, resource_pool, folder, summary) def _get_connection_info(self, volume, backing, connector): connection_info = {'driver_volume_type': 'vmdk'} connection_info['data'] = { 'volume': vim_util.get_moref_value(backing), 'volume_id': volume.id, 'name': volume.name, 'profile_id': self._get_storage_profile_id(volume) } # vmdk connector in os-brick needs additional connection info. if 'platform' in connector and 'os_type' in connector: connection_info['data']['vmdk_size'] = volume['size'] * units.Gi vmdk_path = self.volumeops.get_vmdk_path(backing) connection_info['data']['vmdk_path'] = vmdk_path datastore = self.volumeops.get_datastore(backing) connection_info['data']['datastore'] = \ vim_util.get_moref_value(datastore) datacenter = self.volumeops.get_dc(backing) connection_info['data']['datacenter'] = \ vim_util.get_moref_value(datacenter) config = self.configuration vmdk_connector_config = { 'vmware_host_ip': config.vmware_host_ip, 'vmware_host_port': config.vmware_host_port, 'vmware_host_username': config.vmware_host_username, 'vmware_host_password': config.vmware_host_password, 'vmware_api_retry_count': config.vmware_api_retry_count, 'vmware_task_poll_interval': config.vmware_task_poll_interval, 'vmware_ca_file': config.vmware_ca_file, 'vmware_insecure': config.vmware_insecure, 'vmware_tmp_dir': config.vmware_tmp_dir, 'vmware_image_transfer_timeout_secs': config.vmware_image_transfer_timeout_secs, } connection_info['data']['config'] = vmdk_connector_config LOG.debug("Returning connection_info (volume: '%(volume)s', volume_id:" " '%(volume_id)s'), profile_id: '%(profile_id)s' for " "connector: %(connector)s.", {'volume': connection_info['data']['volume'], 'volume_id': volume.id, 'profile_id': connection_info['data']['profile_id'], 'connector': connector}) return connection_info def _initialize_connection(self, volume, connector): """Get information of volume's backing. If the volume does not have a backing yet. It will be created. :param volume: Volume object :param connector: Connector information :return: Return connection information """ backing = self.volumeops.get_backing(volume.name, volume.id) if 'instance' in connector: # The instance exists instance = vim_util.get_moref(connector['instance'], 'VirtualMachine') LOG.debug("The instance: %s for which initialize connection " "is called, exists.", instance) # Get host managing the instance host = self.volumeops.get_host(instance) if not backing: # Create a backing in case it does not exist under the # host managing the instance. LOG.info("There is no backing for the volume: %s. " "Need to create one.", volume.name) backing = self._create_backing(volume, host) else: # Relocate volume is necessary self._relocate_backing(volume, backing, host) else: # The instance does not exist LOG.debug("The instance for which initialize connection " "is called, does not exist.") if not backing: # Create a backing in case it does not exist. It is a bad use # case to boot from an empty volume. LOG.warning("Trying to boot from an empty volume: %s.", volume.name) # Create backing backing = self._create_backing(volume) return self._get_connection_info(volume, backing, connector) def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. The implementation returns the following information: .. code-block:: default { 'driver_volume_type': 'vmdk', 'data': {'volume': $VOLUME_MOREF_VALUE, 'volume_id': $VOLUME_ID } } :param volume: Volume object :param connector: Connector information :return: Return connection information """ return self._initialize_connection(volume, connector) def terminate_connection(self, volume, connector, force=False, **kwargs): pass def create_export(self, context, volume, connector): pass def ensure_export(self, context, volume): pass def remove_export(self, context, volume): pass def _get_snapshot_group_folder(self, volume, backing): dc = self.volumeops.get_dc(backing) return self._get_volume_group_folder( dc, volume.project_id, snapshot=True) def _create_snapshot_template_format(self, snapshot, backing): volume = snapshot.volume folder = self._get_snapshot_group_folder(volume, backing) datastore = self.volumeops.get_datastore(backing) if self._in_use(volume): tmp_backing = self._create_temp_backing_from_attached_vmdk( volume, None, None, folder, datastore, tmp_name=snapshot.name) else: tmp_backing = self.volumeops.clone_backing( snapshot.name, backing, None, volumeops.FULL_CLONE_TYPE, datastore, folder=folder) try: self.volumeops.mark_backing_as_template(tmp_backing) except exceptions.VimException: with excutils.save_and_reraise_exception(): LOG.error("Error marking temporary backing as template.") self._delete_temp_backing(tmp_backing) return {'provider_location': self.volumeops.get_inventory_path(tmp_backing)} def _create_snapshot(self, snapshot): """Creates a snapshot. If the volume does not have a backing then simply pass, else create a snapshot. Snapshot of only available volume is supported. :param snapshot: Snapshot object """ volume = snapshot['volume'] snapshot_format = self.configuration.vmware_snapshot_format if self._in_use(volume) and snapshot_format == 'COW': msg = _("Snapshot of volume not supported in " "state: %s.") % volume['status'] LOG.error(msg) raise exception.InvalidVolume(msg) backing = self.volumeops.get_backing(snapshot['volume_name'], volume['id']) if not backing: LOG.info("There is no backing, so will not create " "snapshot: %s.", snapshot['name']) return model_update = None if snapshot_format == 'COW': self.volumeops.create_snapshot(backing, snapshot['name'], snapshot['display_description']) else: model_update = self._create_snapshot_template_format( snapshot, backing) LOG.info("Successfully created snapshot: %s.", snapshot['name']) return model_update def create_snapshot(self, snapshot): """Creates a snapshot. :param snapshot: Snapshot object """ return self._create_snapshot(snapshot) def _get_template_by_inv_path(self, inv_path): template = self.volumeops.get_entity_by_inventory_path(inv_path) if template is None: LOG.error("Template not found at path: %s.", inv_path) raise vmdk_exceptions.TemplateNotFoundException(path=inv_path) else: return template def _delete_snapshot_template_format(self, snapshot): template = self._get_template_by_inv_path(snapshot.provider_location) self.volumeops.delete_backing(template) def _delete_snapshot(self, snapshot): """Delete snapshot. If the volume does not have a backing or the snapshot does not exist then simply pass, else delete the snapshot. The volume must not be attached for deletion of snapshot in COW format. :param snapshot: Snapshot object """ inv_path = snapshot.provider_location is_template = inv_path is not None backing = self.volumeops.get_backing(snapshot.volume_name, snapshot.volume.id) if not backing: LOG.debug("Backing does not exist for volume.", resource=snapshot.volume) elif (not is_template and not self.volumeops.get_snapshot(backing, snapshot.name)): LOG.debug("Snapshot does not exist in backend.", resource=snapshot) elif self._in_use(snapshot.volume) and not is_template: msg = _("Delete snapshot of volume not supported in " "state: %s.") % snapshot.volume.status LOG.error(msg) raise exception.InvalidSnapshot(reason=msg) else: if is_template: self._delete_snapshot_template_format(snapshot) else: self.volumeops.delete_snapshot(backing, snapshot.name) def delete_snapshot(self, snapshot): """Delete snapshot. :param snapshot: Snapshot object """ self._delete_snapshot(snapshot) def _get_ds_name_folder_path(self, backing): """Get datastore name and folder path of the given backing. :param backing: Reference to the backing entity :return: datastore name and folder path of the backing """ vmdk_ds_file_path = self.volumeops.get_path_name(backing) (datastore_name, folder_path, _) = volumeops.split_datastore_path(vmdk_ds_file_path) return (datastore_name, folder_path) @staticmethod def _validate_disk_format(disk_format): """Verify vmdk as disk format. :param disk_format: Disk format of the image """ if disk_format and disk_format.lower() != 'vmdk': msg = _("Cannot create image of disk format: %s. Only vmdk " "disk format is accepted.") % disk_format LOG.error(msg) raise exception.ImageUnacceptable(msg) def _copy_image(self, context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, upload_file_path): """Copy image (flat extent or sparse vmdk) to datastore.""" timeout = self.configuration.vmware_image_transfer_timeout_secs host_ip = self.configuration.vmware_host_ip port = self.configuration.vmware_host_port ca_file = self.configuration.vmware_ca_file insecure = self.configuration.vmware_insecure cookies = self.session.vim.client.cookiejar dc_name = self.volumeops.get_entity_name(dc_ref) LOG.debug("Copying image: %(image_id)s to %(path)s.", {'image_id': image_id, 'path': upload_file_path}) # ca_file is used for verifying vCenter certificate if it is set. # If ca_file is unset and insecure is False, the default CA truststore # is used for verification. We should pass cacerts=True in this # case. If ca_file is unset and insecure is True, there is no # certificate verification, and we should pass cacerts=False. cacerts = ca_file if ca_file else not insecure tmp_images = image_utils.TemporaryImages.for_image_service( image_service) tmp_image = tmp_images.get(context, image_id) if tmp_image: LOG.debug("Using temporary image.") with open(tmp_image, 'rb') as read_handle: image_transfer.download_file(read_handle, host_ip, port, dc_name, ds_name, cookies, upload_file_path, image_size_in_bytes, cacerts, timeout) else: image_transfer.download_flat_image(context, timeout, image_service, image_id, image_size=image_size_in_bytes, host=host_ip, port=port, data_center_name=dc_name, datastore_name=ds_name, cookies=cookies, file_path=upload_file_path, cacerts=cacerts) LOG.debug("Image: %(image_id)s copied to %(path)s.", {'image_id': image_id, 'path': upload_file_path}) def _delete_temp_disk(self, descriptor_ds_file_path, dc_ref): """Deletes a temporary virtual disk.""" LOG.debug("Deleting temporary disk: %s.", descriptor_ds_file_path) try: self.volumeops.delete_vmdk_file( descriptor_ds_file_path, dc_ref) except exceptions.VimException: LOG.warning("Error occurred while deleting temporary disk: %s.", descriptor_ds_file_path, exc_info=True) def _copy_temp_virtual_disk(self, src_dc_ref, src_path, dest_dc_ref, dest_path): """Clones a temporary virtual disk and deletes it finally.""" try: self.volumeops.copy_vmdk_file( src_dc_ref, src_path.get_descriptor_ds_file_path(), dest_path.get_descriptor_ds_file_path(), dest_dc_ref) except exceptions.VimException: with excutils.save_and_reraise_exception(): LOG.exception("Error occurred while copying %(src)s to " "%(dst)s.", {'src': src_path.get_descriptor_ds_file_path(), 'dst': dest_path.get_descriptor_ds_file_path()}) finally: # Delete temporary disk. self._delete_temp_disk(src_path.get_descriptor_ds_file_path(), src_dc_ref) def _get_temp_image_folder(self, image_size_in_bytes): """Get datastore folder for downloading temporary images.""" # Form requirements for datastore selection. req = {} req[hub.DatastoreSelector.SIZE_BYTES] = image_size_in_bytes # vSAN/VVOL datastores don't support virtual disk with # flat extent; skip such datastores. req[hub.DatastoreSelector.HARD_AFFINITY_DS_TYPE] = ( hub.DatastoreType.get_all_types() - {hub.DatastoreType.VSAN, hub.DatastoreType.VVOL}) # Select datastore satisfying the requirements. (host_ref, _resource_pool, summary) = self._select_datastore(req) ds_name = summary.name dc_ref = self.volumeops.get_dc(host_ref) # Create temporary datastore folder. folder_path = TMP_IMAGES_DATASTORE_FOLDER_PATH self.volumeops.create_datastore_folder(ds_name, folder_path, dc_ref) return (dc_ref, ds_name, folder_path) def _get_vsphere_url(self, context, image_service, image_id): (direct_url, _locations) = image_service.get_location(context, image_id) if direct_url and direct_url.startswith('vsphere://'): return direct_url def _create_virtual_disk_from_sparse_image( self, context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name): """Creates a flat extent virtual disk from sparse vmdk image.""" # Upload the image to a temporary virtual disk. src_disk_name = uuidutils.generate_uuid() src_path = volumeops.MonolithicSparseVirtualDiskPath(ds_name, folder_path, src_disk_name) LOG.debug("Creating temporary virtual disk: %(path)s from sparse vmdk " "image: %(image_id)s.", {'path': src_path.get_descriptor_ds_file_path(), 'image_id': image_id}) vsphere_url = self._get_vsphere_url(context, image_service, image_id) if vsphere_url: self.volumeops.copy_datastore_file( vsphere_url, dc_ref, src_path.get_descriptor_ds_file_path()) else: self._copy_image(context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, src_path.get_descriptor_file_path()) # Copy sparse disk to create a flat extent virtual disk. dest_path = volumeops.FlatExtentVirtualDiskPath(ds_name, folder_path, disk_name) self._copy_temp_virtual_disk(dc_ref, src_path, dc_ref, dest_path) LOG.debug("Created virtual disk: %s from sparse vmdk image.", dest_path.get_descriptor_ds_file_path()) return dest_path def _create_virtual_disk_from_preallocated_image( self, context, image_service, image_id, image_size_in_bytes, dest_dc_ref, dest_ds_name, dest_folder_path, dest_disk_name, adapter_type): """Creates virtual disk from an image which is a flat extent.""" # Upload the image and use it as a flat extent to create a virtual # disk. First, find the datastore folder to download the image. (dc_ref, ds_name, folder_path) = self._get_temp_image_folder(image_size_in_bytes) # pylint: disable=E1101 dc_ref_value = vim_util.get_moref_value(dc_ref) dest_dc_ref_value = vim_util.get_moref_value(dest_dc_ref) if ds_name == dest_ds_name and dc_ref_value == dest_dc_ref_value: # Temporary image folder and destination path are on the same # datastore. We can directly download the image to the destination # folder to save one virtual disk copy. path = volumeops.FlatExtentVirtualDiskPath(dest_ds_name, dest_folder_path, dest_disk_name) dest_path = path else: # Use the image to create a temporary virtual disk which is then # copied to the destination folder. disk_name = uuidutils.generate_uuid() path = volumeops.FlatExtentVirtualDiskPath(ds_name, folder_path, disk_name) dest_path = volumeops.FlatExtentVirtualDiskPath(dest_ds_name, dest_folder_path, dest_disk_name) LOG.debug("Creating virtual disk: %(path)s from (flat extent) image: " "%(image_id)s.", {'path': path.get_descriptor_ds_file_path(), 'image_id': image_id}) # We first create a descriptor with desired settings. self.volumeops.create_flat_extent_virtual_disk_descriptor( dc_ref, path, image_size_in_bytes // units.Ki, adapter_type, EAGER_ZEROED_THICK_VMDK_TYPE) # Upload the image and use it as the flat extent. try: vsphere_url = self._get_vsphere_url(context, image_service, image_id) if vsphere_url: self.volumeops.copy_datastore_file( vsphere_url, dc_ref, path.get_flat_extent_ds_file_path()) else: self._copy_image(context, dc_ref, image_service, image_id, image_size_in_bytes, ds_name, path.get_flat_extent_file_path()) except Exception: # Delete the descriptor. with excutils.save_and_reraise_exception(): LOG.exception("Error occurred while copying image: " "%(image_id)s to %(path)s.", {'path': path.get_descriptor_ds_file_path(), 'image_id': image_id}) LOG.debug("Deleting descriptor: %s.", path.get_descriptor_ds_file_path()) try: self.volumeops.delete_file( path.get_descriptor_ds_file_path(), dc_ref) except exceptions.VimException: LOG.warning("Error occurred while deleting " "descriptor: %s.", path.get_descriptor_ds_file_path(), exc_info=True) if dest_path != path: # Copy temporary disk to given destination. self._copy_temp_virtual_disk(dc_ref, path, dest_dc_ref, dest_path) LOG.debug("Created virtual disk: %s from flat extent image.", dest_path.get_descriptor_ds_file_path()) return dest_path def _check_disk_conversion(self, image_disk_type, extra_spec_disk_type): """Check if disk type conversion is needed.""" if image_disk_type == ImageDiskType.SPARSE: # We cannot reliably determine the destination disk type of a # virtual disk copied from a sparse image. return True # Virtual disk created from flat extent is always of type # eagerZeroedThick. return not (volumeops.VirtualDiskType.get_virtual_disk_type( extra_spec_disk_type) == volumeops.VirtualDiskType.EAGER_ZEROED_THICK) def _delete_temp_backing(self, backing): """Deletes temporary backing.""" LOG.debug("Deleting backing: %s.", backing) try: self.volumeops.delete_backing(backing) except exceptions.VimException: LOG.warning("Error occurred while deleting backing: %s.", backing, exc_info=True) def _create_volume_from_non_stream_optimized_image( self, context, volume, image_service, image_id, image_size_in_bytes, adapter_type, image_disk_type): """Creates backing VM from non-streamOptimized image. First, we create a disk-less backing. Then we create a virtual disk using the image which is then attached to the backing VM. Finally, the backing VM is cloned if disk type conversion is required. """ # We should use the disk type in volume type for backing's virtual # disk. disk_type = VMwareVcVmdkDriver._get_disk_type(volume) # First, create a disk-less backing. create_params = {CREATE_PARAM_DISK_LESS: True} disk_conversion = self._check_disk_conversion(image_disk_type, disk_type) if disk_conversion: # The initial backing is a temporary one and used as the source # for clone operation. disk_name = uuidutils.generate_uuid() create_params[CREATE_PARAM_BACKING_NAME] = disk_name create_params[CREATE_PARAM_TEMP_BACKING] = True else: disk_name = volume['name'] LOG.debug("Creating disk-less backing for volume: %(id)s with params: " "%(param)s.", {'id': volume['id'], 'param': create_params}) backing = self._create_backing(volume, create_params=create_params) attached = False try: # Find the backing's datacenter, host, datastore and folder. (ds_name, folder_path) = self._get_ds_name_folder_path(backing) host = self.volumeops.get_host(backing) dc_ref = self.volumeops.get_dc(host) vmdk_path = None # Create flat extent virtual disk from the image. if image_disk_type == ImageDiskType.SPARSE: # Monolithic sparse image has embedded descriptor. vmdk_path = self._create_virtual_disk_from_sparse_image( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name) else: # The image is just a flat extent. vmdk_path = self._create_virtual_disk_from_preallocated_image( context, image_service, image_id, image_size_in_bytes, dc_ref, ds_name, folder_path, disk_name, adapter_type) # Attach the virtual disk to the backing. LOG.debug("Attaching virtual disk: %(path)s to backing: " "%(backing)s.", {'path': vmdk_path.get_descriptor_ds_file_path(), 'backing': backing}) profile_id = self._get_storage_profile_id(volume) self.volumeops.attach_disk_to_backing( backing, image_size_in_bytes // units.Ki, disk_type, adapter_type, profile_id, vmdk_path.get_descriptor_ds_file_path()) attached = True if disk_conversion: # Clone the temporary backing for disk type conversion. (host, rp, folder, summary) = self._select_ds_for_volume( volume) datastore = summary.datastore LOG.debug("Cloning temporary backing: %s for disk type " "conversion.", backing) extra_config = self._get_extra_config(volume) clone = self.volumeops.clone_backing(volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=disk_type, host=host, resource_pool=rp, extra_config=extra_config, folder=folder) self._delete_temp_backing(backing) backing = clone self.volumeops.update_backing_disk_uuid(backing, volume['id']) except Exception: # Delete backing and virtual disk created from image. with excutils.save_and_reraise_exception(): LOG.exception("Error occurred while creating " "volume: %(id)s" " from image: %(image_id)s.", {'id': volume['id'], 'image_id': image_id}) self._delete_temp_backing(backing) # Delete virtual disk if exists and unattached. if vmdk_path is not None and not attached: self._delete_temp_disk( vmdk_path.get_descriptor_ds_file_path(), dc_ref) def _fetch_stream_optimized_image(self, context, volume, image_service, image_id, image_size, adapter_type): """Creates volume from image using HttpNfc VM import. Uses Nfc API to download the VMDK file from Glance. Nfc creates the backing VM that wraps the VMDK in the vCenter inventory. This method assumes glance image is VMDK disk format and its vmware_disktype is 'streamOptimized'. """ try: # find host in which to create the volume (_host, rp, folder, summary) = self._select_ds_for_volume(volume) except exceptions.VimException as excep: err_msg = (_("Exception in _select_ds_for_volume: " "%s."), excep) raise exception.VolumeBackendAPIException(data=err_msg) size_gb = volume['size'] LOG.debug("Selected datastore %(ds)s for new volume of size " "%(size)s GB.", {'ds': summary.name, 'size': size_gb}) # prepare create spec for backing vm profile_id = self._get_storage_profile_id(volume) disk_type = VMwareVcVmdkDriver._get_disk_type(volume) # The size of stream optimized glance image is often suspect, # so better let vCenter figure out the disk capacity during import. dummy_disk_size = 0 extra_config = self._get_extra_config(volume) vm_create_spec = self.volumeops.get_create_spec( volume['name'], dummy_disk_size, disk_type, summary.name, profile_id=profile_id, adapter_type=adapter_type, extra_config=extra_config) # convert vm_create_spec to vm_import_spec cf = self.session.vim.client.factory vm_import_spec = cf.create('ns0:VirtualMachineImportSpec') vm_import_spec.configSpec = vm_create_spec try: # fetching image from glance will also create the backing timeout = self.configuration.vmware_image_transfer_timeout_secs host_ip = self.configuration.vmware_host_ip port = self.configuration.vmware_host_port LOG.debug("Fetching glance image: %(id)s to server: %(host)s.", {'id': image_id, 'host': host_ip}) backing = image_transfer.download_stream_optimized_image( context, timeout, image_service, image_id, session=self.session, host=host_ip, port=port, resource_pool=rp, vm_folder=folder, vm_import_spec=vm_import_spec, image_size=image_size, http_method='POST') self.volumeops.update_backing_disk_uuid(backing, volume['id']) except (exceptions.VimException, exceptions.VMwareDriverException): with excutils.save_and_reraise_exception(): LOG.exception("Error occurred while copying image: %(id)s " "to volume: %(vol)s.", {'id': image_id, 'vol': volume['name']}) backing = self.volumeops.get_backing(volume['name'], volume['id']) if backing: # delete the backing self.volumeops.delete_backing(backing) LOG.info("Done copying image: %(id)s to volume: %(vol)s.", {'id': image_id, 'vol': volume['name']}) def _extend_backing(self, backing, new_size_in_gb, disk_type): """Extend volume backing's virtual disk. :param backing: volume backing :param new_size_in_gb: new size of virtual disk """ root_vmdk_path = self.volumeops.get_vmdk_path(backing) datacenter = self.volumeops.get_dc(backing) eager_zero = disk_type == EAGER_ZEROED_THICK_VMDK_TYPE self.volumeops.extend_virtual_disk(new_size_in_gb, root_vmdk_path, datacenter, eager_zero) def clone_image(self, context, volume, image_location, image_meta, image_service): """Clone image directly to a volume.""" ret = self.copy_image_to_volume( context, volume, image_service, image_meta['id']) return (ret, True) def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Creates volume from image. This method only supports Glance image of VMDK disk format. Uses flat vmdk file copy for "sparse" and "preallocated" disk types Uses HttpNfc import API for "streamOptimized" disk types. This API creates a backing VM that wraps the VMDK in the vCenter inventory. :param context: context :param volume: Volume object :param image_service: Glance image service :param image_id: Glance image id :param disable_sparse: Enable or disable sparse copy. Default=False. This parameter is ignored by VMDK driver. """ LOG.debug("Copy glance image: %s to create new volume.", image_id) # Verify glance image is vmdk disk format metadata = image_service.show(context, image_id) VMwareVcVmdkDriver._validate_disk_format(metadata['disk_format']) # Validate container format; only 'bare' and 'ova' are supported. container_format = metadata.get('container_format') if (container_format and container_format not in ['bare', 'ova']): msg = _("Container format: %s is unsupported, only 'bare' and " "'ova' are supported.") % container_format LOG.error(msg) raise exception.ImageUnacceptable(image_id=image_id, reason=msg) # Get the disk type, adapter type and size of vmdk image image_disk_type = ImageDiskType.PREALLOCATED image_adapter_type = self._get_adapter_type(volume) image_size_in_bytes = metadata['size'] properties = metadata['properties'] if properties: if 'vmware_disktype' in properties: image_disk_type = properties['vmware_disktype'] if 'vmware_adaptertype' in properties: image_adapter_type = properties['vmware_adaptertype'] try: # validate disk and adapter types in image meta-data volumeops.VirtualDiskAdapterType.validate(image_adapter_type) ImageDiskType.validate(image_disk_type) if image_disk_type == ImageDiskType.STREAM_OPTIMIZED: self._fetch_stream_optimized_image(context, volume, image_service, image_id, image_size_in_bytes, image_adapter_type) else: self._create_volume_from_non_stream_optimized_image( context, volume, image_service, image_id, image_size_in_bytes, image_adapter_type, image_disk_type) except (exceptions.VimException, exceptions.VMwareDriverException): with excutils.save_and_reraise_exception(): LOG.exception("Error occurred while copying image: %(id)s " "to volume: %(vol)s.", {'id': image_id, 'vol': volume['name']}) LOG.debug("Volume: %(id)s created from image: %(image_id)s.", {'id': volume['id'], 'image_id': image_id}) # If the user-specified volume size is greater than backing's # current disk size, we should extend the disk. volume_size = volume['size'] * units.Gi backing = self.volumeops.get_backing(volume['name'], volume['id']) disk_size = self.volumeops.get_disk_size(backing) if volume_size > disk_size: LOG.debug("Extending volume: %(name)s since the user specified " "volume size (bytes): %(vol_size)d is greater than " "backing's current disk size (bytes): %(disk_size)d.", {'name': volume['name'], 'vol_size': volume_size, 'disk_size': disk_size}) self._extend_backing(backing, volume['size'], VMwareVcVmdkDriver._get_disk_type(volume)) # TODO(vbala): handle volume_size < disk_size case. def copy_volume_to_image(self, context, volume, image_service, image_meta): """Creates glance image from volume. Upload of only available volume is supported. The uploaded glance image has a vmdk disk type of "streamOptimized" that can only be downloaded using the HttpNfc API. Steps followed are: 1. Get the name of the vmdk file which the volume points to right now. Can be a chain of snapshots, so we need to know the last in the chain. 2. Use Nfc APIs to upload the contents of the vmdk file to glance. """ # if volume is attached raise exception if self._in_use(volume): msg = _("Upload to glance of attached volume is not supported.") LOG.error(msg) raise exception.InvalidVolume(msg) # validate disk format is vmdk LOG.debug("Copy Volume: %s to new image.", volume['name']) VMwareVcVmdkDriver._validate_disk_format(image_meta['disk_format']) # get backing vm of volume and its vmdk path backing = self.volumeops.get_backing(volume['name'], volume['id']) if not backing: LOG.info("Backing not found, creating for volume: %s", volume['name']) backing = self._create_backing(volume) vmdk_file_path = self.volumeops.get_vmdk_path(backing) # Upload image from vmdk timeout = self.configuration.vmware_image_transfer_timeout_secs host_ip = self.configuration.vmware_host_ip port = self.configuration.vmware_host_port # retrieve store information from extra-specs store_id = volume.volume_type.extra_specs.get('image_service:store_id') # TODO (whoami-rajat): Remove store_id and base_image_ref # parameters when oslo.vmware calls volume_utils wrapper of # upload_volume instead of image_utils.upload_volume image_transfer.upload_image(context, timeout, image_service, image_meta['id'], volume['project_id'], session=self.session, host=host_ip, port=port, vm=backing, vmdk_file_path=vmdk_file_path, vmdk_size=volume['size'] * units.Gi, image_name=image_meta['name'], image_version=1, store_id=store_id, base_image_ref= volume_utils.get_base_image_ref(volume)) LOG.info("Done copying volume %(vol)s to a new image %(img)s", {'vol': volume['name'], 'img': image_meta['name']}) def _in_use(self, volume): """Check if the given volume is in use.""" return (volume['volume_attachment'] and len(volume['volume_attachment']) > 0) def retype(self, ctxt, volume, new_type, diff, host): """Convert the volume to be of the new type. The retype is performed only if the volume is not in use. Retype is NOP if the backing doesn't exist. If disk type conversion is needed, the volume is cloned. If disk type conversion is needed and the volume contains snapshots, the backing is relocated instead of cloning. The backing is also relocated if the current datastore is not compliant with the new storage profile (if any). Finally, the storage profile of the backing VM is updated. :param ctxt: Context :param volume: A dictionary describing the volume to retype :param new_type: A dictionary describing the volume type to convert to :param diff: A dictionary with the difference between the two types :param host: A dictionary describing the host to migrate to, where host['host'] is its name, and host['capabilities'] is a dictionary of its reported capabilities (unused) :returns: True if the retype occurred; False otherwise. """ # Can't attempt retype if the volume is in use. if self._in_use(volume): LOG.warning("Volume: %s is in use, can't retype.", volume['name']) return False # If the backing doesn't exist, retype is NOP. backing = self.volumeops.get_backing(volume['name'], volume['id']) if backing is None: LOG.debug("Backing for volume: %s doesn't exist; retype is NOP.", volume['name']) return True # Check whether we need disk type conversion. disk_type = VMwareVcVmdkDriver._get_disk_type(volume) new_disk_type = VMwareVcVmdkDriver._get_extra_spec_disk_type( new_type['id']) need_disk_type_conversion = disk_type != new_disk_type # Check whether we need to relocate the backing. If the backing # contains snapshots, relocate is the only way to achieve disk type # conversion. need_relocate = (need_disk_type_conversion and self.volumeops.snapshot_exists(backing)) datastore = self.volumeops.get_datastore(backing) # Check whether we need to change the storage profile. need_profile_change = False is_compliant = True new_profile = None if self._storage_policy_enabled: profile = self._get_storage_profile(volume) new_profile = self._get_extra_spec_storage_profile(new_type['id']) need_profile_change = profile != new_profile # The current datastore may be compliant with the new profile. is_compliant = self.ds_sel.is_datastore_compliant(datastore, new_profile) # No need to relocate or clone if there is no disk type conversion and # the current datastore is compliant with the new profile or storage # policy is disabled. if not need_disk_type_conversion and is_compliant: LOG.debug("Backing: %(backing)s for volume: %(name)s doesn't need " "disk type conversion.", {'backing': backing, 'name': volume['name']}) if self._storage_policy_enabled: LOG.debug("Backing: %(backing)s for volume: %(name)s is " "compliant with the new profile: %(new_profile)s.", {'backing': backing, 'name': volume['name'], 'new_profile': new_profile}) else: # Set requirements for datastore selection. req = {} req[hub.DatastoreSelector.SIZE_BYTES] = (volume['size'] * units.Gi) if need_relocate: LOG.debug("Backing: %s should be relocated.", backing) req[hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS] = ( [vim_util.get_moref_value(datastore)]) if new_profile: req[hub.DatastoreSelector.PROFILE_NAME] = new_profile # Select datastore satisfying the requirements. try: best_candidate = self._select_datastore(req) except vmdk_exceptions.NoValidDatastoreException: # No candidate datastores; can't retype. LOG.warning("There are no datastores matching new " "requirements; can't retype volume: %s.", volume['name']) return False (host, rp, summary) = best_candidate dc = self._get_dc(rp) folder = self._get_volume_group_folder(dc, volume['project_id']) new_datastore = summary.datastore datastore_value = vim_util.get_moref_value(datastore) new_datastore_value = vim_util.get_moref_value(new_datastore) if datastore_value != new_datastore_value: # Datastore changed; relocate the backing. LOG.debug("Backing: %s needs to be relocated for retype.", backing) self.volumeops.relocate_backing( backing, new_datastore, rp, host, new_disk_type) self.volumeops.move_backing_to_folder(backing, folder) elif need_disk_type_conversion: # Same datastore, but clone is needed for disk type conversion. LOG.debug("Backing: %s needs to be cloned for retype.", backing) new_backing = None renamed = False tmp_name = uuidutils.generate_uuid() try: self.volumeops.rename_backing(backing, tmp_name) renamed = True new_backing = self.volumeops.clone_backing( volume['name'], backing, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=new_disk_type, host=host, resource_pool=rp, folder=folder) self._delete_temp_backing(backing) backing = new_backing self.volumeops.update_backing_uuid(backing, volume['id']) self.volumeops.update_backing_disk_uuid(backing, volume['id']) except exceptions.VimException: with excutils.save_and_reraise_exception(): LOG.exception("Error occurred while cloning backing: " "%s during retype.", backing) if renamed and not new_backing: LOG.debug("Undo rename of backing: %(backing)s; " "changing name from %(new_name)s to " "%(old_name)s.", {'backing': backing, 'new_name': tmp_name, 'old_name': volume['name']}) try: self.volumeops.rename_backing(backing, volume['name']) except exceptions.VimException: LOG.warning("Changing backing: " "%(backing)s name from " "%(new_name)s to %(old_name)s " "failed.", {'backing': backing, 'new_name': tmp_name, 'old_name': volume['name']}) adapter_type = self._get_adapter_type(volume) new_adapter_type = self._get_extra_spec_adapter_type(new_type['id']) if new_adapter_type != adapter_type: LOG.debug("Changing volume: %(name)s adapter type from " "%(adapter_type)s to %(new_adapter_type)s.", {'name': volume['name'], 'adapter_type': adapter_type, 'new_adapter_type': new_adapter_type}) disk_device = self.volumeops._get_disk_device(backing) self.volumeops.detach_disk_from_backing(backing, disk_device) self.volumeops.attach_disk_to_backing( backing, disk_device.capacityInKB, new_disk_type, new_adapter_type, None, disk_device.backing.fileName) # Update the backing's storage profile if needed. if need_profile_change: LOG.debug("Backing: %(backing)s needs a profile change to:" " %(profile)s.", {'backing': backing, 'profile': new_profile}) profile_id = None if new_profile is not None: profile_id = self.ds_sel.get_profile_id(new_profile) self.volumeops.change_backing_profile(backing, profile_id) # Retype is done. LOG.debug("Volume: %s retype is done.", volume['name']) return True def extend_volume(self, volume, new_size): """Extend volume to new size. Extends the volume backing's virtual disk to new size. First, try to extend in place on the same datastore. If that fails due to insufficient disk space, then try to relocate the volume to a different datastore that can accommodate the backing with new size and retry extend. :param volume: dictionary describing the existing 'available' volume :param new_size: new size in GB to extend this volume to """ vol_name = volume['name'] backing = self.volumeops.get_backing(vol_name, volume['id']) if not backing: LOG.info("There is no backing for volume: %s; no need to " "extend the virtual disk.", vol_name) return # try extending vmdk in place try: self._extend_backing(backing, new_size, VMwareVcVmdkDriver._get_disk_type(volume)) LOG.info("Successfully extended volume: %(vol)s to size: " "%(size)s GB.", {'vol': vol_name, 'size': new_size}) return except exceptions.NoDiskSpaceException: LOG.warning("Unable to extend volume: %(vol)s to size: " "%(size)s on current datastore due to insufficient" " space.", {'vol': vol_name, 'size': new_size}) # Insufficient disk space; relocate the volume to a different datastore # and retry extend. LOG.info("Relocating volume: %s to a different datastore due to " "insufficient disk space on current datastore.", vol_name) try: create_params = {CREATE_PARAM_DISK_SIZE: new_size} (host, rp, folder, summary) = self._select_ds_for_volume( volume, create_params=create_params) self.volumeops.relocate_backing(backing, summary.datastore, rp, host) self.volumeops.move_backing_to_folder(backing, folder) self._extend_backing(backing, new_size, VMwareVcVmdkDriver._get_disk_type(volume)) except exceptions.VMwareDriverException: with excutils.save_and_reraise_exception(): LOG.error("Failed to extend volume: %(vol)s to size: " "%(size)s GB.", {'vol': vol_name, 'size': new_size}) LOG.info("Successfully extended volume: %(vol)s to size: " "%(size)s GB.", {'vol': vol_name, 'size': new_size}) def _get_disk_device(self, vmdk_path, vm_inv_path): # Get the VM that corresponds to the given inventory path. vm = self.volumeops.get_entity_by_inventory_path(vm_inv_path) if vm: # Get the disk device that corresponds to the given vmdk path. disk_device = self.volumeops.get_disk_device(vm, vmdk_path) if disk_device: return (vm, disk_device) def _get_existing(self, existing_ref): src_name = existing_ref.get('source-name') if not src_name: raise exception.InvalidInput( reason=_("source-name cannot be empty.")) # source-name format: vmdk_path@vm_inventory_path parts = src_name.split('@') if len(parts) != 2: raise exception.InvalidInput( reason=_("source-name format should be: " "'vmdk_path@vm_inventory_path'.")) (vmdk_path, vm_inv_path) = parts existing = self._get_disk_device(vmdk_path, vm_inv_path) if not existing: reason = _("%s does not exist.") % src_name raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) return existing def manage_existing_get_size(self, volume, existing_ref): """Return size of the volume to be managed by manage_existing. When calculating the size, round up to the next GB. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ (_vm, disk) = self._get_existing(existing_ref) return int(math.ceil(disk.capacityInKB * units.Ki / float(units.Gi))) def _manage_existing_int(self, volume, vm, disk): LOG.debug("Creating volume from disk: %(disk)s attached to %(vm)s.", {'disk': disk, 'vm': vm}) # Create a backing for the volume. create_params = {CREATE_PARAM_DISK_LESS: True} backing = self._create_backing(volume, create_params=create_params) # Detach the disk to be managed from the source VM. self.volumeops.detach_disk_from_backing(vm, disk) # Move the disk to the datastore folder of volume backing. src_dc = self.volumeops.get_dc(vm) dest_dc = self.volumeops.get_dc(backing) (ds_name, folder_path) = self._get_ds_name_folder_path(backing) dest_path = volumeops.VirtualDiskPath( ds_name, folder_path, volume['name']) self.volumeops.move_vmdk_file(src_dc, disk.backing.fileName, dest_path.get_descriptor_ds_file_path(), dest_dc_ref=dest_dc) # Attach the disk to be managed to volume backing. profile_id = self._get_storage_profile_id(volume) self.volumeops.attach_disk_to_backing( backing, disk.capacityInKB, VMwareVcVmdkDriver._get_disk_type(volume), self._get_adapter_type(volume), profile_id, dest_path.get_descriptor_ds_file_path()) self.volumeops.update_backing_disk_uuid(backing, volume['id']) return backing def manage_existing(self, volume, existing_ref): """Brings an existing virtual disk under Cinder management. Detaches the virtual disk identified by existing_ref and attaches it to a volume backing. :param volume: Cinder volume to manage :param existing_ref: Driver-specific information used to identify a volume """ (vm, disk) = self._get_existing(existing_ref) self._manage_existing_int(volume, vm, disk) def unmanage(self, volume): backing = self.volumeops.get_backing(volume['name'], volume['id']) if backing: extra_config = self._get_extra_config(volume) for key in extra_config: extra_config[key] = '' self.volumeops.update_backing_extra_config(backing, extra_config) @property def session(self): return self._session def _create_session(self): ip = self.configuration.vmware_host_ip port = self.configuration.vmware_host_port username = self.configuration.vmware_host_username password = self.configuration.vmware_host_password api_retry_count = self.configuration.vmware_api_retry_count task_poll_interval = self.configuration.vmware_task_poll_interval wsdl_loc = self.configuration.safe_get('vmware_wsdl_location') ca_file = self.configuration.vmware_ca_file insecure = self.configuration.vmware_insecure pool_size = self.configuration.vmware_connection_pool_size session = api.VMwareAPISession(ip, username, password, api_retry_count, task_poll_interval, wsdl_loc=wsdl_loc, port=port, cacert=ca_file, insecure=insecure, pool_size=pool_size, op_id_prefix='c-vol') return session def _get_vc_version(self): """Connect to vCenter server and fetch version. Can be over-ridden by setting 'vmware_host_version' config. :returns: vCenter version as a LooseVersion object """ version_str = self.configuration.vmware_host_version if version_str: LOG.info("Using overridden vmware_host_version from config: %s", version_str) else: version_str = vim_util.get_vc_version(self.session) LOG.info("Fetched vCenter server version: %s", version_str) return version_str def _validate_vcenter_version(self, vc_version): if not versionutils.is_compatible( self.MIN_SUPPORTED_VC_VERSION, vc_version, same_major=False): msg = _('Running Cinder with a VMware vCenter version less than ' '%s is not allowed.') % self.MIN_SUPPORTED_VC_VERSION LOG.error(msg) raise exceptions.VMwareDriverException(message=msg) elif not versionutils.is_compatible(self.NEXT_MIN_SUPPORTED_VC_VERSION, vc_version, same_major=False): LOG.warning('Running Cinder with a VMware vCenter version ' 'less than %(ver)s is deprecated. The minimum ' 'required version of vCenter server will be raised' ' to %(ver)s in a future release.', {'ver': self.NEXT_MIN_SUPPORTED_VC_VERSION}) def _register_extension(self): ext = vim_util.find_extension(self.session.vim, EXTENSION_KEY) if ext: LOG.debug('Extension %s already exists.', EXTENSION_KEY) else: try: vim_util.register_extension(self.session.vim, EXTENSION_KEY, EXTENSION_TYPE, label='OpenStack Cinder') LOG.info('Registered extension %s.', EXTENSION_KEY) except exceptions.VimFaultException as e: if 'InvalidArgument' in e.fault_list: LOG.debug('Extension %s is already registered.', EXTENSION_KEY) else: raise def do_setup(self, context): """Any initialization the volume driver does while starting.""" self._validate_params() regex_pattern = self.configuration.vmware_datastore_regex if regex_pattern: try: self._ds_regex = re.compile(regex_pattern) except re.error: raise exception.InvalidInput(reason=_( "Invalid regular expression: %s.") % regex_pattern) self._session = self._create_session() # Validate vCenter version. self._vc_version = self._get_vc_version() self._validate_vcenter_version(self._vc_version) # Enable pbm only if vCenter version is 5.5+. if (self._vc_version and versionutils.is_compatible(self.PBM_ENABLED_VC_VERSION, self._vc_version, same_major=False)): pbm_wsdl_loc = pbm.get_pbm_wsdl_location(self._vc_version) if not pbm_wsdl_loc: LOG.error("Not able to configure PBM for vCenter server: %s", self._vc_version) raise exceptions.VMwareDriverException() self._storage_policy_enabled = True self._session.pbm_wsdl_loc_set(pbm_wsdl_loc) self._register_extension() max_objects = self.configuration.vmware_max_objects_retrieval self._volumeops = volumeops.VMwareVolumeOps( self.session, max_objects, EXTENSION_KEY, EXTENSION_TYPE) self._ds_sel = hub.DatastoreSelector( self.volumeops, self.session, max_objects, ds_regex=self._ds_regex) # Get clusters to be used for backing VM creation. cluster_names = self.configuration.vmware_cluster_name if cluster_names: self._clusters = self.volumeops.get_cluster_refs( cluster_names).values() LOG.info("Using compute cluster(s): %s.", cluster_names) self.volumeops.build_backing_ref_cache() LOG.info("Successfully setup driver: %(driver)s for server: " "%(ip)s.", {'driver': self.__class__.__name__, 'ip': self.configuration.vmware_host_ip}) def _get_volume_group_folder(self, datacenter, project_id, snapshot=False): """Get inventory folder for organizing volume backings and snapshots. The inventory folder for organizing volume backings has the following hierarchy: /OpenStack/Project ()/ where volume_folder is the vmdk driver config option "vmware_volume_folder". A sub-folder named 'Snapshots' under volume_folder is used for organizing snapshots in template format. :param datacenter: Reference to the datacenter :param project_id: OpenStack project ID :param snapshot: Return folder for snapshot if True :return: Reference to the inventory folder """ volume_folder_name = self.configuration.vmware_volume_folder project_folder_name = "Project (%s)" % project_id folder_names = ['OpenStack', project_folder_name, volume_folder_name] if snapshot: folder_names.append('Snapshots') return self.volumeops.create_vm_inventory_folder(datacenter, folder_names) def _relocate_backing(self, volume, backing, host): """Relocate volume backing to a datastore accessible to the given host. The backing is not relocated if the current datastore is already accessible to the host and compliant with the backing's storage profile. :param volume: Volume to be relocated :param backing: Reference to the backing :param host: Reference to the host """ # Check if the current datastore is visible to the host managing # the instance and compliant with the storage profile. datastore = self.volumeops.get_datastore(backing) backing_profile = None if self._storage_policy_enabled: backing_profile = self._get_storage_profile(volume) if (self.volumeops.is_datastore_accessible(datastore, host) and self.ds_sel.is_datastore_compliant(datastore, backing_profile)): LOG.debug("Datastore: %(datastore)s of backing: %(backing)s is " "already accessible to instance's host: %(host)s.", {'backing': backing, 'datastore': datastore, 'host': host}) if backing_profile: LOG.debug("Backing: %(backing)s is compliant with " "storage profile: %(profile)s.", {'backing': backing, 'profile': backing_profile}) return # We need to relocate the backing to an accessible and profile # compliant datastore. req = {} req[hub.DatastoreSelector.SIZE_BYTES] = (volume['size'] * units.Gi) req[hub.DatastoreSelector.PROFILE_NAME] = backing_profile # Select datastore satisfying the requirements. (host, resource_pool, summary) = self._select_datastore(req, host) dc = self._get_dc(resource_pool) folder = self._get_volume_group_folder(dc, volume['project_id']) self.volumeops.relocate_backing(backing, summary.datastore, resource_pool, host) self.volumeops.move_backing_to_folder(backing, folder) @staticmethod def _get_clone_type(volume): """Get clone type from volume type. :param volume: Volume object :return: Clone type from the extra spec if present, else return default 'full' clone type """ clone_type = _get_volume_type_extra_spec( volume['volume_type_id'], 'clone_type', default_value=volumeops.FULL_CLONE_TYPE) if (clone_type != volumeops.FULL_CLONE_TYPE and clone_type != volumeops.LINKED_CLONE_TYPE): msg = (_("Clone type '%(clone_type)s' is invalid; valid values" " are: '%(full_clone)s' and '%(linked_clone)s'.") % {'clone_type': clone_type, 'full_clone': volumeops.FULL_CLONE_TYPE, 'linked_clone': volumeops.LINKED_CLONE_TYPE}) LOG.error(msg) raise exception.Invalid(message=msg) return clone_type def _clone_backing(self, volume, backing, snapshot, clone_type, src_vsize): """Clone the backing. :param volume: New Volume object :param backing: Reference to the backing entity :param snapshot: Reference to the snapshot entity :param clone_type: type of the clone :param src_vsize: the size of the source volume """ if (clone_type == volumeops.LINKED_CLONE_TYPE and volume.size > src_vsize): # Volume extend will fail if the volume is a linked clone of # another volume. Use full clone if extend is needed after cloning. clone_type = volumeops.FULL_CLONE_TYPE LOG.debug("Linked cloning not possible for creating volume " "since volume needs to be extended after cloning.", resource=volume) datastore = None host = None rp = None folder = None if clone_type != volumeops.LINKED_CLONE_TYPE: # Pick a datastore where to create the full clone under any host (host, rp, folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore extra_config = self._get_extra_config(volume) clone = self.volumeops.clone_backing(volume['name'], backing, snapshot, clone_type, datastore, host=host, resource_pool=rp, extra_config=extra_config, folder=folder) # vCenter 6.0+ does not allow changing the UUID of delta disk created # during linked cloning; skip setting UUID for vCenter 6.0+. if (clone_type == volumeops.LINKED_CLONE_TYPE and versionutils.is_compatible( '6.0', self._vc_version, same_major=False)): LOG.debug("Not setting vmdk UUID for volume: %s.", volume['id']) else: self.volumeops.update_backing_disk_uuid(clone, volume['id']) # If the volume size specified by the user is greater than # the size of the source volume, the newly created volume will # allocate the capacity to the size of the source volume in the backend # VMDK datastore, though the volume information indicates it has a # capacity of the volume size. If the volume size is greater, # we need to extend/resize the capacity of the vmdk virtual disk from # the size of the source volume to the volume size. if volume['size'] > src_vsize: self._extend_backing(clone, volume['size'], VMwareVcVmdkDriver._get_disk_type(volume)) LOG.info("Successfully created clone: %s.", clone) def _create_volume_from_template(self, volume, path): LOG.debug("Creating backing for volume: %(volume_id)s from template " "at path: %(path)s.", {'volume_id': volume.id, 'path': path}) template = self._get_template_by_inv_path(path) # Create temporary backing by cloning the template. tmp_name = uuidutils.generate_uuid() (host, rp, folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore disk_type = VMwareVcVmdkDriver._get_disk_type(volume) tmp_backing = self.volumeops.clone_backing(tmp_name, template, None, volumeops.FULL_CLONE_TYPE, datastore, disk_type=disk_type, host=host, resource_pool=rp, folder=folder) self._create_volume_from_temp_backing(volume, tmp_backing) def _create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. If the snapshot does not exist or source volume's backing does not exist, then pass. :param volume: New Volume object :param snapshot: Reference to snapshot entity """ backing = self.volumeops.get_backing(snapshot['volume_name'], snapshot['volume']['id']) if not backing: LOG.info("There is no backing for the snapshotted volume: " "%(snap)s. Not creating any backing for the " "volume: %(vol)s.", {'snap': snapshot['name'], 'vol': volume['name']}) return inv_path = snapshot.get('provider_location') if inv_path: self._create_volume_from_template(volume, inv_path) else: snapshot_moref = self.volumeops.get_snapshot(backing, snapshot['name']) if not snapshot_moref: LOG.info("There is no snapshot point for the snapshotted " "volume: %(snap)s. Not creating any backing for " "the volume: %(vol)s.", {'snap': snapshot['name'], 'vol': volume['name']}) return clone_type = VMwareVcVmdkDriver._get_clone_type(volume) self._clone_backing(volume, backing, snapshot_moref, clone_type, snapshot['volume_size']) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. :param volume: New Volume object :param snapshot: Reference to snapshot entity """ self._create_volume_from_snapshot(volume, snapshot) def _get_volume_device_uuid(self, instance, volume_id): prop = 'config.extraConfig["volume-%s"]' % volume_id opt_val = self.session.invoke_api(vim_util, 'get_object_property', self.session.vim, instance, prop) if opt_val is not None: return opt_val.value def _create_temp_backing_from_attached_vmdk( self, src_vref, host, rp, folder, datastore, tmp_name=None): instance = self.volumeops.get_backing_by_uuid( src_vref['volume_attachment'][0]['instance_uuid']) vol_dev_uuid = self._get_volume_device_uuid(instance, src_vref['id']) LOG.debug("Cloning volume device: %(dev)s attached to instance: " "%(instance)s.", {'dev': vol_dev_uuid, 'instance': instance}) tmp_name = tmp_name or uuidutils.generate_uuid() return self.volumeops.clone_backing( tmp_name, instance, None, volumeops.FULL_CLONE_TYPE, datastore, host=host, resource_pool=rp, folder=folder, disks_to_clone=[vol_dev_uuid]) def _extend_if_needed(self, volume, backing): volume_size = volume.size * units.Gi disk_size = self.volumeops.get_disk_size(backing) if volume_size > disk_size: self._extend_backing(backing, volume.size, VMwareVcVmdkDriver._get_disk_type(volume)) def _create_volume_from_temp_backing(self, volume, tmp_backing): try: disk_device = self.volumeops._get_disk_device(tmp_backing) backing = self._manage_existing_int( volume, tmp_backing, disk_device) self._extend_if_needed(volume, backing) finally: self._delete_temp_backing(tmp_backing) def _clone_attached_volume(self, src_vref, volume): # Clone the vmdk attached to the instance to create a temporary # backing. (host, rp, folder, summary) = self._select_ds_for_volume(volume) datastore = summary.datastore tmp_backing = self._create_temp_backing_from_attached_vmdk( src_vref, host, rp, folder, datastore) self._create_volume_from_temp_backing(volume, tmp_backing) def _create_cloned_volume(self, volume, src_vref): """Creates volume clone. If source volume's backing does not exist, then pass. Linked clone of attached volume is not supported. :param volume: New Volume object :param src_vref: Source Volume object """ backing = self.volumeops.get_backing(src_vref['name'], src_vref['id']) if not backing: LOG.info("There is no backing for the source volume: %(src)s. " "Not creating any backing for volume: %(vol)s.", {'src': src_vref['name'], 'vol': volume['name']}) return clone_type = VMwareVcVmdkDriver._get_clone_type(volume) snapshot = None if clone_type == volumeops.LINKED_CLONE_TYPE: if src_vref['status'] != 'available': msg = _("Linked clone of source volume not supported " "in state: %s.") % src_vref['status'] LOG.error(msg) raise exception.InvalidVolume(msg) # To create a linked clone, we create a temporary snapshot of the # source volume, and then create the clone off the temporary # snapshot. snap_name = 'temp-snapshot-%s' % volume['id'] snapshot = self.volumeops.create_snapshot(backing, snap_name, None) if self._in_use(src_vref): self._clone_attached_volume(src_vref, volume) else: try: self._clone_backing(volume, backing, snapshot, clone_type, src_vref['size']) finally: if snapshot: # Delete temporary snapshot. try: self.volumeops.delete_snapshot(backing, snap_name) except exceptions.VimException: LOG.debug("Unable to delete temporary snapshot: %s of " "volume backing.", snap_name, resource=volume, exc_info=True) def create_cloned_volume(self, volume, src_vref): """Creates volume clone. :param volume: New Volume object :param src_vref: Source Volume object """ self._create_cloned_volume(volume, src_vref) def accept_transfer(self, context, volume, new_user, new_project): """Accept the transfer of a volume for a new user/project.""" backing = self.volumeops.get_backing(volume.name, volume.id) if backing: dc = self.volumeops.get_dc(backing) new_folder = self._get_volume_group_folder(dc, new_project) self.volumeops.move_backing_to_folder(backing, new_folder) def revert_to_snapshot(self, context, volume, snapshot): inv_path = snapshot.provider_location is_template = inv_path is not None if is_template: LOG.error("Revert to template based snapshot is not supported.") raise exception.InvalidSnapshot("Cannot revert to template " "based snapshot") backing = self.volumeops.get_backing(volume.name, volume.id) if not backing: LOG.debug("Backing does not exist for volume.", resource=volume) else: self.volumeops.revert_to_snapshot(backing, snapshot.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/vmware/volumeops.py0000664000175000017500000025323500000000000023236 0ustar00zuulzuul00000000000000# Copyright (c) 2013 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Implements operations on volumes residing on VMware datastores. """ import json import urllib from oslo_log import log as logging from oslo_utils import units from oslo_vmware import exceptions from oslo_vmware.objects import datastore as ds_obj from oslo_vmware import vim_util from cinder.i18n import _ from cinder.volume.drivers.vmware import exceptions as vmdk_exceptions LOG = logging.getLogger(__name__) LINKED_CLONE_TYPE = 'linked' FULL_CLONE_TYPE = 'full' BACKING_UUID_KEY = 'instanceUuid' def split_datastore_path(datastore_path): """Split the datastore path to components. return the datastore name, relative folder path and the file name E.g. datastore_path = [datastore1] my_volume/my_volume.vmdk, returns (datastore1, my_volume/, my_volume.vmdk) :param datastore_path: Datastore path of a file :return: Parsed datastore name, relative folder path and file name """ splits = datastore_path.split('[', 1)[1].split(']', 1) datastore_name = None folder_path = None file_name = None if len(splits) == 1: datastore_name = splits[0] else: datastore_name, path = splits # Path will be of form my_volume/my_volume.vmdk # we need into my_volumes/ and my_volume.vmdk splits = path.split('/') file_name = splits[len(splits) - 1] folder_path = path[:-len(file_name)] return (datastore_name.strip(), folder_path.strip(), file_name.strip()) class VirtualDiskPath(object): """Class representing paths of files comprising a virtual disk.""" def __init__(self, ds_name, folder_path, disk_name): """Creates path object for the given disk. :param ds_name: name of the datastore where disk is stored :param folder_path: absolute path of the folder containing the disk :param disk_name: name of the virtual disk """ self._descriptor_file_path = "%s%s.vmdk" % (folder_path, disk_name) self._descriptor_ds_file_path = self.get_datastore_file_path( ds_name, self._descriptor_file_path) def get_datastore_file_path(self, ds_name, file_path): """Get datastore path corresponding to the given file path. :param ds_name: name of the datastore containing the file represented by the given file path :param file_path: absolute path of the file :return: datastore file path """ return "[%s] %s" % (ds_name, file_path) def get_descriptor_file_path(self): """Get absolute file path of the virtual disk descriptor.""" return self._descriptor_file_path def get_descriptor_ds_file_path(self): """Get datastore file path of the virtual disk descriptor.""" return self._descriptor_ds_file_path class FlatExtentVirtualDiskPath(VirtualDiskPath): """Paths of files in a non-monolithic disk with a single flat extent.""" def __init__(self, ds_name, folder_path, disk_name): """Creates path object for the given disk. :param ds_name: name of the datastore where disk is stored :param folder_path: absolute path of the folder containing the disk :param disk_name: name of the virtual disk """ super(FlatExtentVirtualDiskPath, self).__init__( ds_name, folder_path, disk_name) self._flat_extent_file_path = "%s%s-flat.vmdk" % (folder_path, disk_name) self._flat_extent_ds_file_path = self.get_datastore_file_path( ds_name, self._flat_extent_file_path) def get_flat_extent_file_path(self): """Get absolute file path of the flat extent.""" return self._flat_extent_file_path def get_flat_extent_ds_file_path(self): """Get datastore file path of the flat extent.""" return self._flat_extent_ds_file_path class MonolithicSparseVirtualDiskPath(VirtualDiskPath): """Paths of file comprising a monolithic sparse disk.""" pass class VirtualDiskType(object): """Supported virtual disk types.""" EAGER_ZEROED_THICK = "eagerZeroedThick" PREALLOCATED = "preallocated" THIN = "thin" # thick in extra_spec means lazy-zeroed thick disk EXTRA_SPEC_DISK_TYPE_DICT = {'eagerZeroedThick': EAGER_ZEROED_THICK, 'thick': PREALLOCATED, 'thin': THIN } @staticmethod def is_valid(extra_spec_disk_type): """Check if the given disk type in extra_spec is valid. :param extra_spec_disk_type: disk type in extra_spec :return: True if valid """ return (extra_spec_disk_type in VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT) @staticmethod def validate(extra_spec_disk_type): """Validate the given disk type in extra_spec. This method throws an instance of InvalidDiskTypeException if the given disk type is invalid. :param extra_spec_disk_type: disk type in extra_spec :raises: InvalidDiskTypeException """ if not VirtualDiskType.is_valid(extra_spec_disk_type): raise vmdk_exceptions.InvalidDiskTypeException( disk_type=extra_spec_disk_type) @staticmethod def get_virtual_disk_type(extra_spec_disk_type): """Return disk type corresponding to the extra_spec disk type. :param extra_spec_disk_type: disk type in extra_spec :return: virtual disk type :raises: InvalidDiskTypeException """ VirtualDiskType.validate(extra_spec_disk_type) return (VirtualDiskType.EXTRA_SPEC_DISK_TYPE_DICT[ extra_spec_disk_type]) class VirtualDiskAdapterType(object): """Supported virtual disk adapter types.""" LSI_LOGIC = "lsiLogic" BUS_LOGIC = "busLogic" LSI_LOGIC_SAS = "lsiLogicsas" PARA_VIRTUAL = "paraVirtual" IDE = "ide" @staticmethod def is_valid(adapter_type): """Check if the given adapter type is valid. :param adapter_type: adapter type to check :return: True if valid """ return adapter_type in [VirtualDiskAdapterType.LSI_LOGIC, VirtualDiskAdapterType.BUS_LOGIC, VirtualDiskAdapterType.LSI_LOGIC_SAS, VirtualDiskAdapterType.PARA_VIRTUAL, VirtualDiskAdapterType.IDE] @staticmethod def validate(extra_spec_adapter_type): """Validate the given adapter type in extra_spec. This method throws an instance of InvalidAdapterTypeException if the given adapter type is invalid. :param extra_spec_adapter_type: adapter type in extra_spec :raises: InvalidAdapterTypeException """ if not VirtualDiskAdapterType.is_valid(extra_spec_adapter_type): raise vmdk_exceptions.InvalidAdapterTypeException( invalid_type=extra_spec_adapter_type) @staticmethod def get_adapter_type(extra_spec_adapter): """Get the adapter type to be used in VirtualDiskSpec. :param extra_spec_adapter: adapter type in the extra_spec :return: adapter type to be used in VirtualDiskSpec """ VirtualDiskAdapterType.validate(extra_spec_adapter) # We set the adapter type as lsiLogic for lsiLogicsas/paraVirtual # since it is not supported by VirtualDiskManager APIs. This won't # be a problem because we attach the virtual disk to the correct # controller type and the disk adapter type is always resolved using # its controller key. if (extra_spec_adapter == VirtualDiskAdapterType.LSI_LOGIC_SAS or extra_spec_adapter == VirtualDiskAdapterType.PARA_VIRTUAL): return VirtualDiskAdapterType.LSI_LOGIC else: return extra_spec_adapter class ControllerType(object): """Encapsulate various controller types.""" LSI_LOGIC = 'VirtualLsiLogicController' BUS_LOGIC = 'VirtualBusLogicController' LSI_LOGIC_SAS = 'VirtualLsiLogicSASController' PARA_VIRTUAL = 'ParaVirtualSCSIController' IDE = 'VirtualIDEController' CONTROLLER_TYPE_DICT = { VirtualDiskAdapterType.LSI_LOGIC: LSI_LOGIC, VirtualDiskAdapterType.BUS_LOGIC: BUS_LOGIC, VirtualDiskAdapterType.LSI_LOGIC_SAS: LSI_LOGIC_SAS, VirtualDiskAdapterType.PARA_VIRTUAL: PARA_VIRTUAL, VirtualDiskAdapterType.IDE: IDE} @staticmethod def get_controller_type(adapter_type): """Get the disk controller type based on the given adapter type. :param adapter_type: disk adapter type :return: controller type corresponding to the given adapter type :raises: InvalidAdapterTypeException """ if adapter_type in ControllerType.CONTROLLER_TYPE_DICT: return ControllerType.CONTROLLER_TYPE_DICT[adapter_type] raise vmdk_exceptions.InvalidAdapterTypeException( invalid_type=adapter_type) @staticmethod def is_scsi_controller(controller_type): """Check if the given controller is a SCSI controller. :param controller_type: controller type :return: True if the controller is a SCSI controller """ return controller_type in [ControllerType.LSI_LOGIC, ControllerType.BUS_LOGIC, ControllerType.LSI_LOGIC_SAS, ControllerType.PARA_VIRTUAL] class VMwareVolumeOps(object): """Manages volume operations.""" def __init__(self, session, max_objects, extension_key, extension_type): self._session = session self._max_objects = max_objects self._extension_key = extension_key self._extension_type = extension_type self._folder_cache = {} self._backing_ref_cache = {} self._vmx_version = None def set_vmx_version(self, vmx_version): self._vmx_version = vmx_version def get_backing(self, name, backing_uuid): """Get the backing based on name or uuid. :param name: Name of the backing :param backing_uuid: UUID of the backing :return: Managed object reference to the backing """ ref = self.get_backing_by_uuid(backing_uuid) if not ref: # old version of the driver might have created this backing and # hence cannot be queried by uuid LOG.debug("Returning cached ref for %s.", name) ref = self._backing_ref_cache.get(name) LOG.debug("Backing (%(name)s, %(uuid)s) ref: %(ref)s.", {'name': name, 'uuid': backing_uuid, 'ref': ref}) return ref def get_backing_by_uuid(self, uuid): LOG.debug("Get ref by UUID: %s.", uuid) result = self._session.invoke_api( self._session.vim, 'FindAllByUuid', self._session.vim.service_content.searchIndex, uuid=uuid, vmSearch=True, instanceUuid=True) if result: return result[0] def build_backing_ref_cache(self, name_regex=None): LOG.debug("Building backing ref cache.") result = self._session.invoke_api( vim_util, 'get_objects', self._session.vim, 'VirtualMachine', self._max_objects, properties_to_collect=[ 'name', 'config.instanceUuid', 'config.extraConfig["cinder.volume.id"]']) while result: for backing in result.objects: instance_uuid = None vol_id = None for prop in backing.propSet: if prop.name == 'name': name = prop.val elif prop.name == 'config.instanceUuid': instance_uuid = prop.val else: vol_id = prop.val.value if name_regex and not name_regex.match(name): continue if instance_uuid and instance_uuid == vol_id: # no need to cache backing with UUID set to volume ID continue self._backing_ref_cache[name] = backing.obj result = self.continue_retrieval(result) LOG.debug("Backing ref cache size: %d.", len(self._backing_ref_cache)) def delete_backing(self, backing): """Delete the backing. :param backing: Managed object reference to the backing """ LOG.debug("Deleting the VM backing: %s.", backing) task = self._session.invoke_api(self._session.vim, 'Destroy_Task', backing) LOG.debug("Initiated deletion of VM backing: %s.", backing) self._session.wait_for_task(task) LOG.info("Deleted the VM backing: %s.", backing) # TODO(kartikaditya) Keep the methods not specific to volume in # a different file def get_host(self, instance): """Get host under which instance is present. :param instance: Managed object reference of the instance VM :return: Host managing the instance VM """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, instance, 'runtime.host') def get_hosts(self): """Get all host from the inventory. :return: All the hosts from the inventory """ return self._session.invoke_api(vim_util, 'get_objects', self._session.vim, 'HostSystem', self._max_objects) def continue_retrieval(self, retrieve_result): """Continue retrieval of results if necessary. :param retrieve_result: Result from RetrievePropertiesEx """ return self._session.invoke_api(vim_util, 'continue_retrieval', self._session.vim, retrieve_result) def cancel_retrieval(self, retrieve_result): """Cancel retrieval of results if necessary. :param retrieve_result: Result from RetrievePropertiesEx """ self._session.invoke_api(vim_util, 'cancel_retrieval', self._session.vim, retrieve_result) # TODO(vbala): move this method to datastore module def _is_usable(self, mount_info): """Check if a datastore is usable as per the given mount info. The datastore is considered to be usable for a host only if it is writable, mounted and accessible. :param mount_info: Host mount information :return: True if datastore is usable """ writable = mount_info.accessMode == 'readWrite' # If mounted attribute is not set, then default is True mounted = getattr(mount_info, 'mounted', True) # If accessible attribute is not set, then default is False accessible = getattr(mount_info, 'accessible', False) return writable and mounted and accessible def get_connected_hosts(self, datastore): """Get all the hosts to which the datastore is connected and usable. The datastore is considered to be usable for a host only if it is writable, mounted and accessible. :param datastore: Reference to the datastore entity :return: List of managed object references of all connected hosts """ summary = self.get_summary(datastore) if not summary.accessible: return [] host_mounts = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, datastore, 'host') if not hasattr(host_mounts, 'DatastoreHostMount'): return [] connected_hosts = [] for host_mount in host_mounts.DatastoreHostMount: if self._is_usable(host_mount.mountInfo): host_mount_key_value = vim_util.get_moref_value(host_mount.key) connected_hosts.append(host_mount_key_value) return connected_hosts def is_datastore_accessible(self, datastore, host): """Check if the datastore is accessible to the given host. :param datastore: datastore reference :return: True if the datastore is accessible """ hosts = self.get_connected_hosts(datastore) return vim_util.get_moref_value(host) in hosts # TODO(vbala): move this method to datastore module def _in_maintenance(self, summary): """Check if a datastore is entering maintenance or in maintenance. :param summary: Summary information about the datastore :return: True if the datastore is entering maintenance or in maintenance """ if hasattr(summary, 'maintenanceMode'): return summary.maintenanceMode in ['enteringMaintenance', 'inMaintenance'] return False def _get_parent(self, child, parent_type): """Get immediate parent of given type via 'parent' property. :param child: Child entity reference :param parent_type: Entity type of the parent :return: Immediate parent of specific type up the hierarchy via 'parent' property """ if not child: return None if child._type == parent_type: return child parent = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, child, 'parent') return self._get_parent(parent, parent_type) def get_dc(self, child): """Get parent datacenter up the hierarchy via 'parent' property. :param child: Reference of the child entity :return: Parent Datacenter of the param child entity """ return self._get_parent(child, 'Datacenter') def get_vmfolder(self, datacenter): """Get the vmFolder. :param datacenter: Reference to the datacenter entity :return: vmFolder property of the datacenter """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, datacenter, 'vmFolder') def _get_child_folder(self, parent_folder, child_folder_name): LOG.debug("Finding child folder: %s.", child_folder_name) # Get list of child entities for the parent folder prop_val = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, parent_folder, 'childEntity') if prop_val and hasattr(prop_val, 'ManagedObjectReference'): child_entities = prop_val.ManagedObjectReference # Return if the child folder with input name is already present for child_entity in child_entities: if child_entity._type != 'Folder': continue child_entity_name = self.get_entity_name(child_entity) if (child_entity_name and (urllib.parse.unquote(child_entity_name) == child_folder_name)): return child_entity def create_folder(self, parent_folder, child_folder_name): """Creates child folder under the given parent folder. :param parent_folder: Reference to the parent folder :param child_folder_name: Name of the child folder :return: Reference to the child folder """ LOG.debug("Creating folder: %(child_folder_name)s under parent " "folder: %(parent_folder)s.", {'child_folder_name': child_folder_name, 'parent_folder': parent_folder}) try: child_folder = self._session.invoke_api(self._session.vim, 'CreateFolder', parent_folder, name=child_folder_name) LOG.debug("Created child folder: %s.", child_folder) except exceptions.DuplicateName: # Another thread is trying to create the same folder, ignore # the exception. LOG.debug('Folder: %s already exists.', child_folder_name) child_folder = self._get_child_folder(parent_folder, child_folder_name) return child_folder def create_vm_inventory_folder(self, datacenter, path_comp): """Create and return a VM inventory folder. This method caches references to inventory folders returned. :param datacenter: Reference to datacenter :param path_comp: Path components as a list """ LOG.debug("Creating inventory folder: %(path_comp)s under VM folder " "of datacenter: %(datacenter)s.", {'path_comp': path_comp, 'datacenter': datacenter}) path = "/" + vim_util.get_moref_value(datacenter) parent = self._folder_cache.get(path) if not parent: parent = self.get_vmfolder(datacenter) self._folder_cache[path] = parent folder = None for folder_name in path_comp: path = "/".join([path, folder_name]) folder = self._folder_cache.get(path) if not folder: folder = self.create_folder(parent, folder_name) self._folder_cache[path] = folder parent = folder LOG.debug("Inventory folder for path: %(path)s is %(folder)s.", {'path': path, 'folder': folder}) return folder def extend_virtual_disk(self, requested_size_in_gb, path, dc_ref, eager_zero=False): """Extend the virtual disk to the requested size. :param requested_size_in_gb: Size of the volume in GB :param path: Datastore path of the virtual disk to extend :param dc_ref: Reference to datacenter :param eager_zero: Boolean determining if the free space is zeroed out """ LOG.debug("Extending virtual disk: %(path)s to %(size)s GB.", {'path': path, 'size': requested_size_in_gb}) diskMgr = self._session.vim.service_content.virtualDiskManager # VMware API needs the capacity unit to be in KB, so convert the # capacity unit from GB to KB. size_in_kb = requested_size_in_gb * units.Mi task = self._session.invoke_api(self._session.vim, "ExtendVirtualDisk_Task", diskMgr, name=path, datacenter=dc_ref, newCapacityKb=size_in_kb, eagerZero=eager_zero) self._session.wait_for_task(task) LOG.info("Successfully extended virtual disk: %(path)s to " "%(size)s GB.", {'path': path, 'size': requested_size_in_gb}) def _create_controller_config_spec(self, adapter_type): """Returns config spec for adding a disk controller.""" cf = self._session.vim.client.factory controller_type = ControllerType.get_controller_type(adapter_type) controller_device = cf.create('ns0:%s' % controller_type) controller_device.key = -100 controller_device.busNumber = 0 if ControllerType.is_scsi_controller(controller_type): controller_device.sharedBus = 'noSharing' controller_spec = cf.create('ns0:VirtualDeviceConfigSpec') controller_spec.operation = 'add' controller_spec.device = controller_device return controller_spec def _create_disk_backing(self, disk_type, vmdk_ds_file_path): """Creates file backing for virtual disk.""" cf = self._session.vim.client.factory disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo') if disk_type == VirtualDiskType.EAGER_ZEROED_THICK: disk_device_bkng.eagerlyScrub = True elif disk_type == VirtualDiskType.THIN: disk_device_bkng.thinProvisioned = True disk_device_bkng.fileName = vmdk_ds_file_path or '' disk_device_bkng.diskMode = 'persistent' return disk_device_bkng def _create_virtual_disk_config_spec(self, size_kb, disk_type, controller_key, profile_id, vmdk_ds_file_path): """Returns config spec for adding a virtual disk.""" cf = self._session.vim.client.factory disk_device = cf.create('ns0:VirtualDisk') # disk size should be at least 1024KB disk_device.capacityInKB = max(units.Ki, int(size_kb)) if controller_key < 0: disk_device.key = controller_key - 1 else: disk_device.key = -101 disk_device.unitNumber = 0 disk_device.controllerKey = controller_key disk_device.backing = self._create_disk_backing(disk_type, vmdk_ds_file_path) disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') disk_spec.operation = 'add' if vmdk_ds_file_path is None: disk_spec.fileOperation = 'create' disk_spec.device = disk_device if profile_id: disk_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec') disk_profile.profileId = profile_id disk_spec.profile = [disk_profile] return disk_spec def _create_specs_for_disk_add(self, size_kb, disk_type, adapter_type, profile_id, vmdk_ds_file_path=None): """Create controller and disk config specs for adding a new disk. :param size_kb: disk size in KB :param disk_type: disk provisioning type :param adapter_type: disk adapter type :param profile_id: storage policy profile identification :param vmdk_ds_file_path: Optional datastore file path of an existing virtual disk. If specified, file backing is not created for the virtual disk. :return: list containing controller and disk config specs """ controller_spec = None if adapter_type == 'ide': # For IDE disks, use one of the default IDE controllers (with keys # 200 and 201) created as part of backing VM creation. controller_key = 200 else: controller_spec = self._create_controller_config_spec(adapter_type) controller_key = controller_spec.device.key disk_spec = self._create_virtual_disk_config_spec(size_kb, disk_type, controller_key, profile_id, vmdk_ds_file_path) specs = [disk_spec] if controller_spec is not None: specs.append(controller_spec) return specs def _get_extra_config_option_values(self, extra_config): cf = self._session.vim.client.factory option_values = [] for key, value in extra_config.items(): opt = cf.create('ns0:OptionValue') opt.key = key opt.value = value option_values.append(opt) return option_values def _create_managed_by_info(self): managed_by = self._session.vim.client.factory.create( 'ns0:ManagedByInfo') managed_by.extensionKey = self._extension_key managed_by.type = self._extension_type return managed_by def _get_create_spec_disk_less(self, name, ds_name, profileId=None, extra_config=None): """Return spec for creating disk-less backing. :param name: Name of the backing :param ds_name: Datastore name where the disk is to be provisioned :param profileId: Storage profile ID for the backing :param extra_config: Key-value pairs to be written to backing's extra-config :return: Spec for creation """ cf = self._session.vim.client.factory vm_file_info = cf.create('ns0:VirtualMachineFileInfo') vm_file_info.vmPathName = '[%s]' % ds_name create_spec = cf.create('ns0:VirtualMachineConfigSpec') create_spec.name = name create_spec.guestId = 'otherGuest' create_spec.numCPUs = 1 create_spec.memoryMB = 128 create_spec.files = vm_file_info # Set the default hardware version to a compatible version supported by # vSphere 5.0. This will ensure that the backing VM can be migrated # without any incompatibility issues in a mixed cluster of ESX hosts # with versions 5.0 or above. create_spec.version = self._vmx_version or "vmx-08" if profileId: vmProfile = cf.create('ns0:VirtualMachineDefinedProfileSpec') vmProfile.profileId = profileId create_spec.vmProfile = [vmProfile] if extra_config: if BACKING_UUID_KEY in extra_config: create_spec.instanceUuid = extra_config.pop(BACKING_UUID_KEY) create_spec.extraConfig = self._get_extra_config_option_values( extra_config) create_spec.managedBy = self._create_managed_by_info() return create_spec def get_create_spec(self, name, size_kb, disk_type, ds_name, profile_id=None, adapter_type='lsiLogic', extra_config=None): """Return spec for creating backing with a single disk. :param name: name of the backing :param size_kb: disk size in KB :param disk_type: disk provisioning type :param ds_name: datastore name where the disk is to be provisioned :param profile_id: storage policy profile identification :param adapter_type: disk adapter type :param extra_config: key-value pairs to be written to backing's extra-config :return: spec for creation """ create_spec = self._get_create_spec_disk_less( name, ds_name, profileId=profile_id, extra_config=extra_config) create_spec.deviceChange = self._create_specs_for_disk_add( size_kb, disk_type, adapter_type, profile_id) return create_spec def _create_backing_int(self, folder, resource_pool, host, create_spec): """Helper for create backing methods.""" LOG.debug("Creating volume backing with spec: %s.", create_spec) task = self._session.invoke_api(self._session.vim, 'CreateVM_Task', folder, config=create_spec, pool=resource_pool, host=host) task_info = self._session.wait_for_task(task) backing = task_info.result LOG.info("Successfully created volume backing: %s.", backing) return backing def create_backing(self, name, size_kb, disk_type, folder, resource_pool, host, ds_name, profileId=None, adapter_type='lsiLogic', extra_config=None): """Create backing for the volume. Creates a VM with one VMDK based on the given inputs. :param name: Name of the backing :param size_kb: Size in KB of the backing :param disk_type: VMDK type for the disk :param folder: Folder, where to create the backing under :param resource_pool: Resource pool reference :param host: Host reference :param ds_name: Datastore name where the disk is to be provisioned :param profileId: Storage profile ID to be associated with backing :param adapter_type: Disk adapter type :param extra_config: Key-value pairs to be written to backing's extra-config :return: Reference to the created backing entity """ LOG.debug("Creating volume backing with name: %(name)s " "disk_type: %(disk_type)s size_kb: %(size_kb)s " "adapter_type: %(adapter_type)s profileId: %(profile)s at " "folder: %(folder)s resource_pool: %(resource_pool)s " "host: %(host)s datastore_name: %(ds_name)s.", {'name': name, 'disk_type': disk_type, 'size_kb': size_kb, 'folder': folder, 'resource_pool': resource_pool, 'ds_name': ds_name, 'profile': profileId, 'host': host, 'adapter_type': adapter_type}) create_spec = self.get_create_spec( name, size_kb, disk_type, ds_name, profile_id=profileId, adapter_type=adapter_type, extra_config=extra_config) return self._create_backing_int(folder, resource_pool, host, create_spec) def create_backing_disk_less(self, name, folder, resource_pool, host, ds_name, profileId=None, extra_config=None): """Create disk-less volume backing. This type of backing is useful for creating volume from image. The downloaded image from the image service can be copied to a virtual disk of desired provisioning type and added to the backing VM. :param name: Name of the backing :param folder: Folder where the backing is created :param resource_pool: Resource pool reference :param host: Host reference :param ds_name: Name of the datastore used for VM storage :param profileId: Storage profile ID to be associated with backing :param extra_config: Key-value pairs to be written to backing's extra-config :return: Reference to the created backing entity """ LOG.debug("Creating disk-less volume backing with name: %(name)s " "profileId: %(profile)s at folder: %(folder)s " "resource pool: %(resource_pool)s host: %(host)s " "datastore_name: %(ds_name)s.", {'name': name, 'profile': profileId, 'folder': folder, 'resource_pool': resource_pool, 'host': host, 'ds_name': ds_name}) create_spec = self._get_create_spec_disk_less( name, ds_name, profileId=profileId, extra_config=extra_config) return self._create_backing_int(folder, resource_pool, host, create_spec) def get_datastore(self, backing): """Get datastore where the backing resides. :param backing: Reference to the backing :return: Datastore reference to which the backing belongs """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'datastore').ManagedObjectReference[0] def get_summary(self, datastore): """Get datastore summary. :param datastore: Reference to the datastore :return: 'summary' property of the datastore """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, datastore, 'summary') def _create_relocate_spec_disk_locator(self, datastore, disk_type, disk_device): """Creates spec for disk type conversion during relocate.""" cf = self._session.vim.client.factory disk_locator = cf.create("ns0:VirtualMachineRelocateSpecDiskLocator") disk_locator.datastore = datastore disk_locator.diskId = disk_device.key disk_locator.diskBackingInfo = self._create_disk_backing(disk_type, None) return disk_locator def _get_relocate_spec(self, datastore, resource_pool, host, disk_move_type, disk_type=None, disk_device=None): """Return spec for relocating volume backing. :param datastore: Reference to the datastore :param resource_pool: Reference to the resource pool :param host: Reference to the host :param disk_move_type: Disk move type option :param disk_type: Destination disk type :param disk_device: Virtual device corresponding to the disk :return: Spec for relocation """ cf = self._session.vim.client.factory relocate_spec = cf.create('ns0:VirtualMachineRelocateSpec') relocate_spec.datastore = datastore relocate_spec.pool = resource_pool relocate_spec.host = host relocate_spec.diskMoveType = disk_move_type if disk_type is not None and disk_device is not None: disk_locator = self._create_relocate_spec_disk_locator(datastore, disk_type, disk_device) relocate_spec.disk = [disk_locator] LOG.debug("Spec for relocating the backing: %s.", relocate_spec) return relocate_spec def relocate_backing( self, backing, datastore, resource_pool, host, disk_type=None): """Relocates backing to the input datastore and resource pool. The implementation uses moveAllDiskBackingsAndAllowSharing disk move type. :param backing: Reference to the backing :param datastore: Reference to the datastore :param resource_pool: Reference to the resource pool :param host: Reference to the host :param disk_type: destination disk type """ LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s " "and resource pool: %(rp)s with destination disk type: " "%(disk_type)s.", {'backing': backing, 'ds': datastore, 'rp': resource_pool, 'disk_type': disk_type}) # Relocate the volume backing disk_move_type = 'moveAllDiskBackingsAndAllowSharing' disk_device = None if disk_type is not None: disk_device = self._get_disk_device(backing) relocate_spec = self._get_relocate_spec(datastore, resource_pool, host, disk_move_type, disk_type, disk_device) task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task', backing, spec=relocate_spec) LOG.debug("Initiated relocation of volume backing: %s.", backing) self._session.wait_for_task(task) LOG.info("Successfully relocated volume backing: %(backing)s " "to datastore: %(ds)s and resource pool: %(rp)s.", {'backing': backing, 'ds': datastore, 'rp': resource_pool}) def move_backing_to_folder(self, backing, folder): """Move the volume backing to the folder. :param backing: Reference to the backing :param folder: Reference to the folder """ LOG.debug("Moving backing: %(backing)s to folder: %(fol)s.", {'backing': backing, 'fol': folder}) task = self._session.invoke_api(self._session.vim, 'MoveIntoFolder_Task', folder, list=[backing]) LOG.debug("Initiated move of volume backing: %(backing)s into the " "folder: %(fol)s.", {'backing': backing, 'fol': folder}) self._session.wait_for_task(task) LOG.info("Successfully moved volume " "backing: %(backing)s into the " "folder: %(fol)s.", {'backing': backing, 'fol': folder}) def create_snapshot(self, backing, name, description, quiesce=False): """Create snapshot of the backing with given name and description. :param backing: Reference to the backing entity :param name: Snapshot name :param description: Snapshot description :param quiesce: Whether to quiesce the backing when taking snapshot :return: Created snapshot entity reference """ LOG.debug("Snapshoting backing: %(backing)s with name: %(name)s.", {'backing': backing, 'name': name}) task = self._session.invoke_api(self._session.vim, 'CreateSnapshot_Task', backing, name=name, description=description, memory=False, quiesce=quiesce) LOG.debug("Initiated snapshot of volume backing: %(backing)s " "named: %(name)s.", {'backing': backing, 'name': name}) task_info = self._session.wait_for_task(task) snapshot = task_info.result LOG.info("Successfully created snapshot: %(snap)s for volume " "backing: %(backing)s.", {'snap': snapshot, 'backing': backing}) return snapshot @staticmethod def _get_snapshot_from_tree(name, root): """Get snapshot by name from the snapshot tree root. :param name: Snapshot name :param root: Current root node in the snapshot tree :return: None in the snapshot tree with given snapshot name """ if not root: return None if root.name == name: return root.snapshot if (not hasattr(root, 'childSnapshotList') or not root.childSnapshotList): # When root does not have children, the childSnapshotList attr # is missing sometime. Adding an additional check. return None for node in root.childSnapshotList: snapshot = VMwareVolumeOps._get_snapshot_from_tree(name, node) if snapshot: return snapshot def get_snapshot(self, backing, name): """Get snapshot of the backing with given name. :param backing: Reference to the backing entity :param name: Snapshot name :return: Snapshot entity of the backing with given name """ snapshot = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'snapshot') if not snapshot or not snapshot.rootSnapshotList: return None for root in snapshot.rootSnapshotList: return VMwareVolumeOps._get_snapshot_from_tree(name, root) def snapshot_exists(self, backing): """Check if the given backing contains snapshots.""" snapshot = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'snapshot') if snapshot is None or snapshot.rootSnapshotList is None: return False return len(snapshot.rootSnapshotList) != 0 def delete_snapshot(self, backing, name): """Delete a given snapshot from volume backing. :param backing: Reference to the backing entity :param name: Snapshot name """ LOG.debug("Deleting the snapshot: %(name)s from backing: " "%(backing)s.", {'name': name, 'backing': backing}) snapshot = self.get_snapshot(backing, name) if not snapshot: LOG.info("Did not find the snapshot: %(name)s for backing: " "%(backing)s. Need not delete anything.", {'name': name, 'backing': backing}) return task = self._session.invoke_api(self._session.vim, 'RemoveSnapshot_Task', snapshot, removeChildren=False) LOG.debug("Initiated snapshot: %(name)s deletion for backing: " "%(backing)s.", {'name': name, 'backing': backing}) self._session.wait_for_task(task) LOG.info("Successfully deleted snapshot: %(name)s of backing: " "%(backing)s.", {'backing': backing, 'name': name}) def revert_to_snapshot(self, backing, name): LOG.debug("Revert to snapshot: %(name)s of backing: %(backing)s.", {'name': name, 'backing': backing}) snapshot = self.get_snapshot(backing, name) if not snapshot: raise vmdk_exceptions.SnapshotNotFoundException( name=name) task = self._session.invoke_api(self._session.vim, 'RevertToSnapshot_Task', snapshot) self._session.wait_for_task(task) def _get_folder(self, backing): """Get parent folder of the backing. :param backing: Reference to the backing entity :return: Reference to parent folder of the backing entity """ return self._get_parent(backing, 'Folder') def _get_clone_spec(self, datastore, disk_move_type, snapshot, backing, disk_type, host=None, resource_pool=None, extra_config=None, disks_to_clone=None): """Get the clone spec. :param datastore: Reference to datastore :param disk_move_type: Disk move type :param snapshot: Reference to snapshot :param backing: Source backing VM :param disk_type: Disk type of clone :param host: Target host :param resource_pool: Target resource pool :param extra_config: Key-value pairs to be written to backing's extra-config :param disks_to_clone: UUIDs of disks to clone :return: Clone spec """ if disk_type is not None: disk_device = self._get_disk_device(backing) else: disk_device = None relocate_spec = self._get_relocate_spec(datastore, resource_pool, host, disk_move_type, disk_type, disk_device) cf = self._session.vim.client.factory clone_spec = cf.create('ns0:VirtualMachineCloneSpec') clone_spec.location = relocate_spec clone_spec.powerOn = False clone_spec.template = False clone_spec.snapshot = snapshot config_spec = cf.create('ns0:VirtualMachineConfigSpec') config_spec.managedBy = self._create_managed_by_info() clone_spec.config = config_spec if extra_config: if BACKING_UUID_KEY in extra_config: config_spec.instanceUuid = extra_config.pop(BACKING_UUID_KEY) config_spec.extraConfig = self._get_extra_config_option_values( extra_config) if disks_to_clone: config_spec.deviceChange = ( self._create_device_change_for_disk_removal( backing, disks_to_clone)) LOG.debug("Spec for cloning the backing: %s.", clone_spec) return clone_spec def _create_device_change_for_disk_removal(self, backing, disks_to_clone): disk_devices = self._get_disk_devices(backing) device_change = [] for device in disk_devices: if device.backing.uuid not in disks_to_clone: device_change.append(self._create_spec_for_disk_remove(device)) return device_change def clone_backing(self, name, backing, snapshot, clone_type, datastore, disk_type=None, host=None, resource_pool=None, extra_config=None, folder=None, disks_to_clone=None): """Clone backing. If the clone_type is 'full', then a full clone of the source volume backing will be created. Else, if it is 'linked', then a linked clone of the source volume backing will be created. :param name: Name for the clone :param backing: Reference to the backing entity :param snapshot: Snapshot point from which the clone should be done :param clone_type: Whether a full clone or linked clone is to be made :param datastore: Reference to the datastore entity :param disk_type: Disk type of the clone :param host: Target host :param resource_pool: Target resource pool :param extra_config: Key-value pairs to be written to backing's extra-config :param folder: The location of the clone :param disks_to_clone: UUIDs of disks to clone """ LOG.debug("Creating a clone of backing: %(back)s, named: %(name)s, " "clone type: %(type)s from snapshot: %(snap)s on " "resource pool: %(resource_pool)s, host: %(host)s, " "datastore: %(ds)s with disk type: %(disk_type)s.", {'back': backing, 'name': name, 'type': clone_type, 'snap': snapshot, 'ds': datastore, 'disk_type': disk_type, 'host': host, 'resource_pool': resource_pool}) if folder is None: # Use source folder as the location of the clone. folder = self._get_folder(backing) if clone_type == LINKED_CLONE_TYPE: disk_move_type = 'createNewChildDiskBacking' else: disk_move_type = 'moveAllDiskBackingsAndDisallowSharing' clone_spec = self._get_clone_spec( datastore, disk_move_type, snapshot, backing, disk_type, host=host, resource_pool=resource_pool, extra_config=extra_config, disks_to_clone=disks_to_clone) task = self._session.invoke_api(self._session.vim, 'CloneVM_Task', backing, folder=folder, name=name, spec=clone_spec) LOG.debug("Initiated clone of backing: %s.", name) task_info = self._session.wait_for_task(task) new_backing = task_info.result LOG.info("Successfully created clone: %s.", new_backing) return new_backing def _reconfigure_backing(self, backing, reconfig_spec): """Reconfigure backing VM with the given spec.""" LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.", {'backing': backing, 'spec': reconfig_spec}) reconfig_task = self._session.invoke_api(self._session.vim, "ReconfigVM_Task", backing, spec=reconfig_spec) LOG.debug("Task: %s created for reconfiguring backing VM.", reconfig_task) self._session.wait_for_task(reconfig_task) def _get_controller(self, backing, adapter_type): devices = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'config.hardware.device') controller_type = ControllerType.get_controller_type(adapter_type) for device in devices: if device.__class__.__name__ == controller_type: return device def attach_disk_to_backing(self, backing, size_in_kb, disk_type, adapter_type, profile_id, vmdk_ds_file_path): """Attach an existing virtual disk to the backing VM. :param backing: reference to the backing VM :param size_in_kb: disk size in KB :param disk_type: virtual disk type :param adapter_type: disk adapter type :param profile_id: storage policy profile identification :param vmdk_ds_file_path: datastore file path of the virtual disk to be attached """ LOG.debug("Reconfiguring backing VM: %(backing)s to add new disk: " "%(path)s with size (KB): %(size)d and adapter type: " "%(adapter_type)s.", {'backing': backing, 'path': vmdk_ds_file_path, 'size': size_in_kb, 'adapter_type': adapter_type}) cf = self._session.vim.client.factory reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') controller = self._get_controller(backing, adapter_type) if controller: disk_spec = self._create_virtual_disk_config_spec( size_in_kb, disk_type, controller.key, profile_id, vmdk_ds_file_path) specs = [disk_spec] else: specs = self._create_specs_for_disk_add( size_in_kb, disk_type, adapter_type, profile_id, vmdk_ds_file_path=vmdk_ds_file_path) reconfig_spec.deviceChange = specs self._reconfigure_backing(backing, reconfig_spec) LOG.debug("Backing VM: %s reconfigured with new disk.", backing) def _create_spec_for_disk_remove(self, disk_device): cf = self._session.vim.client.factory disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') disk_spec.operation = 'remove' disk_spec.device = disk_device return disk_spec def detach_disk_from_backing(self, backing, disk_device): """Detach the given disk from backing.""" LOG.debug("Reconfiguring backing VM: %(backing)s to remove disk: " "%(disk_device)s.", {'backing': backing, 'disk_device': disk_device}) cf = self._session.vim.client.factory reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') spec = self._create_spec_for_disk_remove(disk_device) reconfig_spec.deviceChange = [spec] self._reconfigure_backing(backing, reconfig_spec) def rename_backing(self, backing, new_name): """Rename backing VM. :param backing: VM to be renamed :param new_name: new VM name """ LOG.info("Renaming backing VM: %(backing)s to %(new_name)s.", {'backing': backing, 'new_name': new_name}) rename_task = self._session.invoke_api(self._session.vim, "Rename_Task", backing, newName=new_name) LOG.debug("Task: %s created for renaming VM.", rename_task) self._session.wait_for_task(rename_task) LOG.info("Backing VM: %(backing)s renamed to %(new_name)s.", {'backing': backing, 'new_name': new_name}) def change_backing_profile(self, backing, profile_id): """Change storage profile of the backing VM. The current profile is removed if the new profile is None. """ LOG.debug("Reconfiguring backing VM: %(backing)s to change profile to:" " %(profile)s.", {'backing': backing, 'profile': profile_id}) cf = self._session.vim.client.factory if profile_id is None: vm_profile = cf.create('ns0:VirtualMachineEmptyProfileSpec') else: vm_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec') vm_profile.profileId = profile_id.uniqueId reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') reconfig_spec.vmProfile = [vm_profile] disk_device = self._get_disk_device(backing) disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') disk_spec.device = disk_device disk_spec.operation = 'edit' disk_spec.profile = [vm_profile] reconfig_spec.deviceChange = [disk_spec] self._reconfigure_backing(backing, reconfig_spec) LOG.debug("Backing VM: %(backing)s reconfigured with new profile: " "%(profile)s.", {'backing': backing, 'profile': profile_id}) def update_backing_disk_uuid(self, backing, disk_uuid): """Update backing VM's disk UUID. :param backing: Reference to backing VM :param disk_uuid: New disk UUID """ LOG.debug("Reconfiguring backing VM: %(backing)s to change disk UUID " "to: %(disk_uuid)s.", {'backing': backing, 'disk_uuid': disk_uuid}) disk_device = self._get_disk_device(backing) disk_device.backing.uuid = disk_uuid cf = self._session.vim.client.factory disk_spec = cf.create('ns0:VirtualDeviceConfigSpec') disk_spec.device = disk_device disk_spec.operation = 'edit' reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') reconfig_spec.deviceChange = [disk_spec] self._reconfigure_backing(backing, reconfig_spec) LOG.debug("Backing VM: %(backing)s reconfigured with new disk UUID: " "%(disk_uuid)s.", {'backing': backing, 'disk_uuid': disk_uuid}) def update_backing_extra_config(self, backing, extra_config): cf = self._session.vim.client.factory reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') if BACKING_UUID_KEY in extra_config: reconfig_spec.instanceUuid = extra_config.pop(BACKING_UUID_KEY) reconfig_spec.extraConfig = self._get_extra_config_option_values( extra_config) self._reconfigure_backing(backing, reconfig_spec) LOG.debug("Backing: %(backing)s reconfigured with extra config: " "%(extra_config)s.", {'backing': backing, 'extra_config': extra_config}) def update_backing_uuid(self, backing, uuid): cf = self._session.vim.client.factory reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') reconfig_spec.instanceUuid = uuid self._reconfigure_backing(backing, reconfig_spec) LOG.debug("Backing: %(backing)s reconfigured with uuid: %(uuid)s.", {'backing': backing, 'uuid': uuid}) def delete_file(self, file_path, datacenter=None): """Delete file or folder on the datastore. :param file_path: Datastore path of the file or folder """ LOG.debug("Deleting file: %(file)s under datacenter: %(dc)s.", {'file': file_path, 'dc': datacenter}) fileManager = self._session.vim.service_content.fileManager task = self._session.invoke_api(self._session.vim, 'DeleteDatastoreFile_Task', fileManager, name=file_path, datacenter=datacenter) LOG.debug("Initiated deletion via task: %s.", task) self._session.wait_for_task(task) LOG.info("Successfully deleted file: %s.", file_path) def create_datastore_folder(self, ds_name, folder_path, datacenter): """Creates a datastore folder. This method returns silently if the folder already exists. :param ds_name: datastore name :param folder_path: path of folder to create :param datacenter: datacenter of target datastore """ fileManager = self._session.vim.service_content.fileManager ds_folder_path = "[%s] %s" % (ds_name, folder_path) LOG.debug("Creating datastore folder: %s.", ds_folder_path) try: self._session.invoke_api(self._session.vim, 'MakeDirectory', fileManager, name=ds_folder_path, datacenter=datacenter) LOG.info("Created datastore folder: %s.", folder_path) except exceptions.FileAlreadyExistsException: LOG.debug("Datastore folder: %s already exists.", folder_path) def get_path_name(self, backing): """Get path name of the backing. :param backing: Reference to the backing entity :return: Path name of the backing """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'config.files').vmPathName def get_entity_name(self, entity): """Get name of the managed entity. :param entity: Reference to the entity :return: Name of the managed entity """ return self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, entity, 'name') def _get_disk_device(self, backing): """Get the virtual device corresponding to disk.""" hardware_devices = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, backing, 'config.hardware.device') if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if device.__class__.__name__ == "VirtualDisk": return device LOG.error("Virtual disk device of backing: %s not found.", backing) raise vmdk_exceptions.VirtualDiskNotFoundException() def get_vmdk_path(self, backing): """Get the vmdk file name of the backing. The vmdk file path of the backing returned is of the form: "[datastore1] my_folder/my_vm.vmdk" :param backing: Reference to the backing :return: VMDK file path of the backing """ disk_device = self._get_disk_device(backing) backing = disk_device.backing if backing.__class__.__name__ != "VirtualDiskFlatVer2BackingInfo": msg = _("Invalid disk backing: %s.") % backing.__class__.__name__ LOG.error(msg) raise AssertionError(msg) return backing.fileName def get_disk_size(self, backing): """Get disk size of the backing. :param backing: backing VM reference :return: disk size in bytes """ disk_device = self._get_disk_device(backing) return disk_device.capacityInKB * units.Ki def _get_virtual_disk_create_spec(self, size_in_kb, adapter_type, disk_type): """Return spec for file-backed virtual disk creation.""" cf = self._session.vim.client.factory spec = cf.create('ns0:FileBackedVirtualDiskSpec') spec.capacityKb = size_in_kb spec.adapterType = VirtualDiskAdapterType.get_adapter_type( adapter_type) spec.diskType = VirtualDiskType.get_virtual_disk_type(disk_type) return spec def create_virtual_disk(self, dc_ref, vmdk_ds_file_path, size_in_kb, adapter_type='busLogic', disk_type='preallocated'): """Create virtual disk with the given settings. :param dc_ref: datacenter reference :param vmdk_ds_file_path: datastore file path of the virtual disk :param size_in_kb: disk size in KB :param adapter_type: disk adapter type :param disk_type: vmdk type """ virtual_disk_spec = self._get_virtual_disk_create_spec(size_in_kb, adapter_type, disk_type) LOG.debug("Creating virtual disk with spec: %s.", virtual_disk_spec) disk_manager = self._session.vim.service_content.virtualDiskManager task = self._session.invoke_api(self._session.vim, 'CreateVirtualDisk_Task', disk_manager, name=vmdk_ds_file_path, datacenter=dc_ref, spec=virtual_disk_spec) LOG.debug("Task: %s created for virtual disk creation.", task) self._session.wait_for_task(task) LOG.debug("Created virtual disk with spec: %s.", virtual_disk_spec) def create_flat_extent_virtual_disk_descriptor( self, dc_ref, path, size_in_kb, adapter_type, disk_type): """Create descriptor for a single flat extent virtual disk. To create the descriptor, we create a virtual disk and delete its flat extent. :param dc_ref: reference to the datacenter :param path: descriptor datastore file path :param size_in_kb: size of the virtual disk in KB :param adapter_type: virtual disk adapter type :param disk_type: type of the virtual disk """ LOG.debug("Creating descriptor: %(path)s with size (KB): %(size)s, " "adapter_type: %(adapter_type)s and disk_type: " "%(disk_type)s.", {'path': path.get_descriptor_ds_file_path(), 'size': size_in_kb, 'adapter_type': adapter_type, 'disk_type': disk_type }) self.create_virtual_disk(dc_ref, path.get_descriptor_ds_file_path(), size_in_kb, adapter_type, disk_type) self.delete_file(path.get_flat_extent_ds_file_path(), dc_ref) LOG.debug("Created descriptor: %s.", path.get_descriptor_ds_file_path()) def copy_vmdk_file(self, src_dc_ref, src_vmdk_file_path, dest_vmdk_file_path, dest_dc_ref=None): """Copy contents of the src vmdk file to dest vmdk file. :param src_dc_ref: Reference to datacenter containing src datastore :param src_vmdk_file_path: Source vmdk file path :param dest_vmdk_file_path: Destination vmdk file path :param dest_dc_ref: Reference to datacenter of dest datastore. If unspecified, source datacenter is used. """ LOG.debug('Copying disk: %(src)s to %(dest)s.', {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path}) dest_dc_ref = dest_dc_ref or src_dc_ref diskMgr = self._session.vim.service_content.virtualDiskManager task = self._session.invoke_api(self._session.vim, 'CopyVirtualDisk_Task', diskMgr, sourceName=src_vmdk_file_path, sourceDatacenter=src_dc_ref, destName=dest_vmdk_file_path, destDatacenter=dest_dc_ref, force=True) LOG.debug("Initiated copying disk data via task: %s.", task) self._session.wait_for_task(task) LOG.info("Successfully copied disk at: %(src)s to: %(dest)s.", {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path}) def move_vmdk_file(self, src_dc_ref, src_vmdk_file_path, dest_vmdk_file_path, dest_dc_ref=None): """Move the given vmdk file to another datastore location. :param src_dc_ref: Reference to datacenter containing src datastore :param src_vmdk_file_path: Source vmdk file path :param dest_vmdk_file_path: Destination vmdk file path :param dest_dc_ref: Reference to datacenter of dest datastore. If unspecified, source datacenter is used. """ LOG.debug('Moving disk: %(src)s to %(dest)s.', {'src': src_vmdk_file_path, 'dest': dest_vmdk_file_path}) dest_dc_ref = dest_dc_ref or src_dc_ref diskMgr = self._session.vim.service_content.virtualDiskManager task = self._session.invoke_api(self._session.vim, 'MoveVirtualDisk_Task', diskMgr, sourceName=src_vmdk_file_path, sourceDatacenter=src_dc_ref, destName=dest_vmdk_file_path, destDatacenter=dest_dc_ref, force=True) self._session.wait_for_task(task) def copy_datastore_file(self, vsphere_url, dest_dc_ref, dest_ds_file_path): """Copy file to datastore location. :param vsphere_url: vsphere URL of the file :param dest_dc_ref: Reference to destination datacenter :param dest_file_path: Destination datastore file path """ LOG.debug("Copying file: %(vsphere_url)s to %(path)s.", {'vsphere_url': vsphere_url, 'path': dest_ds_file_path}) location_url = ds_obj.DatastoreURL.urlparse(vsphere_url) src_path = ds_obj.DatastorePath(location_url.datastore_name, location_url.path) src_dc_ref = self.get_entity_by_inventory_path( location_url.datacenter_path) task = self._session.invoke_api( self._session.vim, 'CopyDatastoreFile_Task', self._session.vim.service_content.fileManager, sourceName=str(src_path), sourceDatacenter=src_dc_ref, destinationName=dest_ds_file_path, destinationDatacenter=dest_dc_ref) self._session.wait_for_task(task) def delete_vmdk_file(self, vmdk_file_path, dc_ref): """Delete given vmdk files. :param vmdk_file_path: VMDK file path to be deleted :param dc_ref: Reference to datacenter that contains this VMDK file """ LOG.debug("Deleting vmdk file: %s.", vmdk_file_path) diskMgr = self._session.vim.service_content.virtualDiskManager task = self._session.invoke_api(self._session.vim, 'DeleteVirtualDisk_Task', diskMgr, name=vmdk_file_path, datacenter=dc_ref) LOG.debug("Initiated deleting vmdk file via task: %s.", task) self._session.wait_for_task(task) LOG.info("Deleted vmdk file: %s.", vmdk_file_path) def _get_all_clusters(self): clusters = {} retrieve_result = self._session.invoke_api(vim_util, 'get_objects', self._session.vim, 'ClusterComputeResource', self._max_objects) while retrieve_result: if retrieve_result.objects: for cluster in retrieve_result.objects: name = urllib.parse.unquote(cluster.propSet[0].val) clusters[name] = cluster.obj retrieve_result = self.continue_retrieval(retrieve_result) return clusters def get_cluster_refs(self, names): """Get references to given clusters. :param names: list of cluster names :return: Dictionary of cluster names to references """ clusters_ref = {} clusters = self._get_all_clusters() for name in names: if name not in clusters: LOG.error("Compute cluster: %s not found.", name) raise vmdk_exceptions.ClusterNotFoundException(cluster=name) clusters_ref[name] = clusters[name] return clusters_ref def get_cluster_hosts(self, cluster): """Get hosts in the given cluster. :param cluster: cluster reference :return: references to hosts in the cluster """ hosts = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, cluster, 'host') host_refs = [] if hosts and hosts.ManagedObjectReference: host_refs.extend(hosts.ManagedObjectReference) return host_refs def get_entity_by_inventory_path(self, path): """Returns the managed object identified by the given inventory path. :param path: Inventory path :return: Reference to the managed object """ return self._session.invoke_api( self._session.vim, "FindByInventoryPath", self._session.vim.service_content.searchIndex, inventoryPath=path) def get_inventory_path(self, entity): return self._session.invoke_api( vim_util, 'get_inventory_path', self._session.vim, entity) def _get_disk_devices(self, vm): disk_devices = [] hardware_devices = self._session.invoke_api(vim_util, 'get_object_property', self._session.vim, vm, 'config.hardware.device') if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if device.__class__.__name__ == "VirtualDisk": disk_devices.append(device) return disk_devices def get_disk_device(self, vm, vmdk_path): """Get the disk device of the VM which corresponds to the given path. :param vm: VM reference :param vmdk_path: Datastore path of virtual disk :return: Matching disk device """ disk_devices = self._get_disk_devices(vm) for disk_device in disk_devices: backing = disk_device.backing if (backing.__class__.__name__ == "VirtualDiskFlatVer2BackingInfo" and backing.fileName == vmdk_path): return disk_device def mark_backing_as_template(self, backing): LOG.debug("Marking backing: %s as template.", backing) self._session.invoke_api(self._session.vim, 'MarkAsTemplate', backing) def _create_fcd_backing_spec(self, disk_type, ds_ref): backing_spec = self._session.vim.client.factory.create( 'ns0:VslmCreateSpecDiskFileBackingSpec') if disk_type == VirtualDiskType.PREALLOCATED: disk_type = 'lazyZeroedThick' backing_spec.provisioningType = disk_type backing_spec.datastore = ds_ref return backing_spec def _create_profile_spec(self, cf, profile_id): profile_spec = cf.create('ns0:VirtualMachineDefinedProfileSpec') profile_spec.profileId = profile_id return profile_spec def create_fcd(self, name, size_mb, ds_ref, disk_type, profile_id=None): cf = self._session.vim.client.factory spec = cf.create('ns0:VslmCreateSpec') spec.capacityInMB = size_mb spec.name = name spec.backingSpec = self._create_fcd_backing_spec(disk_type, ds_ref) if profile_id: profile_spec = self._create_profile_spec(cf, profile_id) spec.profile = [profile_spec] LOG.debug("Creating fcd with spec: %(spec)s on datastore: %(ds_ref)s.", {'spec': spec, 'ds_ref': ds_ref}) vstorage_mgr = self._session.vim.service_content.vStorageObjectManager task = self._session.invoke_api(self._session.vim, 'CreateDisk_Task', vstorage_mgr, spec=spec) task_info = self._session.wait_for_task(task) fcd_loc = FcdLocation.create(task_info.result.config.id, ds_ref) LOG.debug("Created fcd: %s.", fcd_loc) return fcd_loc def delete_fcd(self, fcd_location): cf = self._session.vim.client.factory vstorage_mgr = self._session.vim.service_content.vStorageObjectManager LOG.debug("Deleting fcd: %s.", fcd_location) task = self._session.invoke_api(self._session.vim, 'DeleteVStorageObject_Task', vstorage_mgr, id=fcd_location.id(cf), datastore=fcd_location.ds_ref()) self._session.wait_for_task(task) def clone_fcd( self, name, fcd_location, dest_ds_ref, disk_type, profile_id=None): cf = self._session.vim.client.factory spec = cf.create('ns0:VslmCloneSpec') spec.name = name spec.backingSpec = self._create_fcd_backing_spec(disk_type, dest_ds_ref) if profile_id: profile_spec = self._create_profile_spec(cf, profile_id) spec.profile = [profile_spec] LOG.debug("Copying fcd: %(fcd_loc)s to datastore: %(ds_ref)s with " "spec: %(spec)s.", {'fcd_loc': fcd_location, 'spec': spec, 'ds_ref': dest_ds_ref}) vstorage_mgr = self._session.vim.service_content.vStorageObjectManager task = self._session.invoke_api(self._session.vim, 'CloneVStorageObject_Task', vstorage_mgr, id=fcd_location.id(cf), datastore=fcd_location.ds_ref(), spec=spec) task_info = self._session.wait_for_task(task) dest_fcd_loc = FcdLocation.create(task_info.result.config.id, dest_ds_ref) LOG.debug("Clone fcd: %s.", dest_fcd_loc) return dest_fcd_loc def extend_fcd(self, fcd_location, new_size_mb): cf = self._session.vim.client.factory vstorage_mgr = self._session.vim.service_content.vStorageObjectManager LOG.debug("Extending fcd: %(fcd_loc)s to %(size)s.", {'fcd_loc': fcd_location, 'size': new_size_mb}) task = self._session.invoke_api(self._session.vim, 'ExtendDisk_Task', vstorage_mgr, id=fcd_location.id(cf), datastore=fcd_location.ds_ref(), newCapacityInMB=new_size_mb) self._session.wait_for_task(task) def register_disk(self, vmdk_url, name, ds_ref): vstorage_mgr = self._session.vim.service_content.vStorageObjectManager LOG.debug("Registering disk: %s as fcd.", vmdk_url) fcd = self._session.invoke_api(self._session.vim, 'RegisterDisk', vstorage_mgr, path=vmdk_url, name=name) fcd_loc = FcdLocation.create(fcd.config.id, ds_ref) LOG.debug("Created fcd: %s.", fcd_loc) return fcd_loc def attach_fcd(self, backing, fcd_location): cf = self._session.vim.client.factory reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec') spec = self._create_controller_config_spec( VirtualDiskAdapterType.LSI_LOGIC) reconfig_spec.deviceChange = [spec] self._reconfigure_backing(backing, reconfig_spec) LOG.debug("Attaching fcd: %(fcd_loc)s to %(backing)s.", {'fcd_loc': fcd_location, 'backing': backing}) task = self._session.invoke_api(self._session.vim, "AttachDisk_Task", backing, diskId=fcd_location.id(cf), datastore=fcd_location.ds_ref()) self._session.wait_for_task(task) def detach_fcd(self, backing, fcd_location): cf = self._session.vim.client.factory LOG.debug("Detaching fcd: %(fcd_loc)s from %(backing)s.", {'fcd_loc': fcd_location, 'backing': backing}) task = self._session.invoke_api(self._session.vim, "DetachDisk_Task", backing, diskId=fcd_location.id(cf)) self._session.wait_for_task(task) def create_fcd_snapshot(self, fcd_location, description): LOG.debug("Creating fcd snapshot for %s.", fcd_location) vstorage_mgr = self._session.vim.service_content.vStorageObjectManager cf = self._session.vim.client.factory task = self._session.invoke_api(self._session.vim, 'VStorageObjectCreateSnapshot_Task', vstorage_mgr, id=fcd_location.id(cf), datastore=fcd_location.ds_ref(), description=description) task_info = self._session.wait_for_task(task) fcd_snap_loc = FcdSnapshotLocation(fcd_location, task_info.result.id) LOG.debug("Created fcd snapshot: %s.", fcd_snap_loc) return fcd_snap_loc def delete_fcd_snapshot(self, fcd_snap_loc): LOG.debug("Deleting fcd snapshot: %s.", fcd_snap_loc) vstorage_mgr = self._session.vim.service_content.vStorageObjectManager cf = self._session.vim.client.factory task = self._session.invoke_api( self._session.vim, 'DeleteSnapshot_Task', vstorage_mgr, id=fcd_snap_loc.fcd_loc.id(cf), datastore=fcd_snap_loc.fcd_loc.ds_ref(), snapshotId=fcd_snap_loc.id(cf)) self._session.wait_for_task(task) def create_fcd_from_snapshot(self, fcd_snap_loc, name, profile_id=None): LOG.debug("Creating fcd with name: %(name)s from fcd snapshot: " "%(snap)s.", {'name': name, 'snap': fcd_snap_loc}) vstorage_mgr = self._session.vim.service_content.vStorageObjectManager cf = self._session.vim.client.factory if profile_id: profile = [self._create_profile_spec(cf, profile_id)] else: profile = None task = self._session.invoke_api( self._session.vim, 'CreateDiskFromSnapshot_Task', vstorage_mgr, id=fcd_snap_loc.fcd_loc.id(cf), datastore=fcd_snap_loc.fcd_loc.ds_ref(), snapshotId=fcd_snap_loc.id(cf), name=name, profile=profile) task_info = self._session.wait_for_task(task) fcd_loc = FcdLocation.create(task_info.result.config.id, fcd_snap_loc.fcd_loc.ds_ref()) LOG.debug("Created fcd: %s.", fcd_loc) return fcd_loc def update_fcd_policy(self, fcd_location, profile_id): LOG.debug("Changing fcd: %(fcd_loc)s storage policy to %(policy)s.", {'fcd_loc': fcd_location, 'policy': profile_id}) vstorage_mgr = self._session.vim.service_content.vStorageObjectManager cf = self._session.vim.client.factory if profile_id is None: profile_spec = cf.create('ns0:VirtualMachineEmptyProfileSpec') else: profile_spec = self._create_profile_spec(cf, profile_id) task = self._session.invoke_api( self._session.vim, 'UpdateVStorageObjectPolicy_Task', vstorage_mgr, id=fcd_location.id(cf), datastore=fcd_location.ds_ref(), profile=[profile_spec]) self._session.wait_for_task(task) LOG.debug("Updated fcd storage policy to %s.", profile_id) class FcdLocation(object): def __init__(self, fcd_id, ds_ref_val): self.fcd_id = fcd_id self.ds_ref_val = ds_ref_val @classmethod def create(cls, fcd_id_obj, ds_ref): return cls(fcd_id_obj.id, vim_util.get_moref_value(ds_ref)) def provider_location(self): return "%s@%s" % (self.fcd_id, self.ds_ref_val) def ds_ref(self): return vim_util.get_moref(self.ds_ref_val, 'Datastore') def id(self, cf): id_obj = cf.create('ns0:ID') id_obj.id = self.fcd_id return id_obj @classmethod def from_provider_location(cls, provider_location): fcd_id, ds_ref_val = provider_location.split('@') return cls(fcd_id, ds_ref_val) def __str__(self): return self.provider_location() class FcdSnapshotLocation(object): def __init__(self, fcd_location, snapshot_id): self.fcd_loc = fcd_location self.snap_id = snapshot_id def provider_location(self): loc = {"fcd_location": self.fcd_loc.provider_location(), "fcd_snapshot_id": self.snap_id} return json.dumps(loc) def id(self, cf): id_obj = cf.create('ns0:ID') id_obj.id = self.snap_id return id_obj @classmethod def from_provider_location(cls, provider_location): try: loc = json.loads(provider_location) fcd_loc = FcdLocation.from_provider_location(loc['fcd_location']) return cls(fcd_loc, loc['fcd_snapshot_id']) except ValueError: pass def __str__(self): return self.provider_location() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/vzstorage.py0000664000175000017500000007233000000000000021723 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Parallels IP Holdings GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import errno import json import os import re from os_brick.remotefs import remotefs from oslo_concurrency import processutils as putils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import imageutils from oslo_utils import units from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import utils from cinder.volume import configuration from cinder.volume.drivers import remotefs as remotefs_drv from cinder.volume import volume_utils LOG = logging.getLogger(__name__) vzstorage_opts = [ cfg.StrOpt('vzstorage_shares_config', default='/etc/cinder/vzstorage_shares', help='File with the list of available vzstorage shares.'), cfg.BoolOpt('vzstorage_sparsed_volumes', default=True, help=('Create volumes as sparsed files which take no space ' 'rather than regular files when using raw format, ' 'in which case volume creation takes lot of time.')), cfg.FloatOpt('vzstorage_used_ratio', default=0.95, help=('Percent of ACTUAL usage of the underlying volume ' 'before no new volumes can be allocated to the volume ' 'destination.')), cfg.StrOpt('vzstorage_mount_point_base', default='$state_path/mnt', help=('Base dir containing mount points for ' 'vzstorage shares.')), cfg.ListOpt('vzstorage_mount_options', help=('Mount options passed to the vzstorage client. ' 'See section of the pstorage-mount man page ' 'for details.')), cfg.StrOpt('vzstorage_default_volume_format', default='raw', help=('Default format that will be used when creating volumes ' 'if no volume format is specified.')), ] CONF = cfg.CONF CONF.register_opts(vzstorage_opts, group=configuration.SHARED_CONF_GROUP) PLOOP_BASE_DELTA_NAME = 'root.hds' DISK_FORMAT_RAW = 'raw' DISK_FORMAT_QCOW2 = 'qcow2' DISK_FORMAT_PLOOP = 'ploop' class VzStorageException(exception.RemoteFSException): message = _("Unknown Virtuozzo Storage exception") class VzStorageNoSharesMounted(exception.RemoteFSNoSharesMounted): message = _("No mounted Virtuozzo Storage shares found") class VzStorageNoSuitableShareFound(exception.RemoteFSNoSuitableShareFound): message = _("There is no share which can host %(volume_size)sG") class PloopDevice(object): """Setup a ploop device for ploop image This class is for mounting ploop devices using with statement: with PloopDevice('/vzt/private/my-ct/harddisk.hdd') as dev_path: # do something :param path: A path to ploop harddisk dir :param snapshot_id: Snapshot id to mount :param execute: execute helper """ def __init__(self, path, snapshot_id=None, read_only=True, execute=putils.execute): self.path = path self.snapshot_id = snapshot_id self.read_only = read_only self.execute = execute def __enter__(self): self.dd_path = os.path.join(self.path, 'DiskDescriptor.xml') cmd = ['ploop', 'mount', self.dd_path] if self.snapshot_id: cmd.append('-u') cmd.append(self.snapshot_id) if self.read_only: cmd.append('-r') out, err = self.execute(*cmd, run_as_root=True) m = re.search(r'dev=(\S+)', out) if not m: raise Exception('Invalid output from ploop mount: %s' % out) self.ploop_dev = m.group(1) return self.ploop_dev def _umount(self): self.execute('ploop', 'umount', self.dd_path, run_as_root=True) def __exit__(self, type, value, traceback): self._umount() @interface.volumedriver class VZStorageDriver(remotefs_drv.RemoteFSSnapDriver): """Cinder driver for Virtuozzo Storage. Creates volumes as files on the mounted vzstorage cluster. .. code-block:: none Version history: 1.0 - Initial driver. 1.1 - Supports vz:volume_format in vendor properties. """ VERSION = '1.1' CI_WIKI_NAME = "Virtuozzo_Storage_CI" # TODO(jsbryant) Remove driver in the 'U' release if CI is not fixed. SUPPORTED = False SHARE_FORMAT_REGEX = r'(?:(\S+):\/)?([a-zA-Z0-9_-]+)(?::(\S+))?' def __init__(self, execute=putils.execute, *args, **kwargs): self.driver_volume_type = 'vzstorage' self.driver_prefix = 'vzstorage' self.volume_backend_name = 'Virtuozzo_Storage' self._remotefsclient = None super(VZStorageDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(vzstorage_opts) self._execute_as_root = False root_helper = utils.get_root_helper() # base bound to instance is used in RemoteFsConnector. self.base = self.configuration.vzstorage_mount_point_base opts = self.configuration.vzstorage_mount_options self._remotefsclient = remotefs.VZStorageRemoteFSClient( 'vzstorage', root_helper, execute=execute, vzstorage_mount_point_base=self.base, vzstorage_mount_options=opts) @staticmethod def get_driver_options(): return vzstorage_opts def _update_volume_stats(self): super(VZStorageDriver, self)._update_volume_stats() self._stats['vendor_name'] = 'Virtuozzo' def _init_vendor_properties(self): namespace = 'vz' properties = {} self._set_property( properties, "%s:volume_format" % namespace, "Volume format", _("Specifies volume format."), "string", enum=["qcow2", "ploop", "raw"], default=self.configuration.vzstorage_default_volume_format) return properties, namespace def _qemu_img_info(self, path, volume_name): qemu_img_cache = path + ".qemu_img_info" is_cache_outdated = True if os.path.isdir(path): # Ploop disks stored along with metadata xml as directories # qemu-img should explore base data file inside path = os.path.join(path, PLOOP_BASE_DELTA_NAME) if os.path.isfile(qemu_img_cache): info_tm = os.stat(qemu_img_cache).st_mtime snap_tm = os.stat(path).st_mtime if info_tm >= snap_tm: is_cache_outdated = False if is_cache_outdated: LOG.debug("Cached qemu-img info %s not present or outdated," " refresh", qemu_img_cache) ret = super(VZStorageDriver, self)._qemu_img_info_base( path, volume_name, self.configuration.vzstorage_mount_point_base) # We need only backing_file and file_format d = {'file_format': ret.file_format, 'backing_file': ret.backing_file} with open(qemu_img_cache, "w") as f: json.dump(d, f) else: ret = imageutils.QemuImgInfo() with open(qemu_img_cache, "r") as f: cached_data = json.load(f) ret.file_format = cached_data['file_format'] ret.backing_file = cached_data['backing_file'] return ret @remotefs_drv.locked_volume_id_operation def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. :param volume: volume reference :param connector: connector reference """ # Find active image active_file = self.get_active_image_from_info(volume) data = {'export': volume.provider_location, 'format': self.get_volume_format(volume), 'name': active_file, } return { 'driver_volume_type': self.driver_volume_type, 'data': data, 'mount_point_base': self._get_mount_point_base(), } def do_setup(self, context): """Any initialization the volume driver does while starting.""" super(VZStorageDriver, self).do_setup(context) config = self.configuration.vzstorage_shares_config if not os.path.exists(config): msg = (_("VzStorage config file at %(config)s doesn't exist.") % {'config': config}) LOG.error(msg) raise VzStorageException(msg) if not os.path.isabs(self.base): msg = _("Invalid mount point base: %s.") % self.base LOG.error(msg) raise VzStorageException(msg) used_ratio = self.configuration.vzstorage_used_ratio if not ((used_ratio > 0) and (used_ratio <= 1)): msg = _("VzStorage config 'vzstorage_used_ratio' invalid. " "Must be > 0 and <= 1.0: %s.") % used_ratio LOG.error(msg) raise VzStorageException(msg) self.shares = {} # Check if mount.fuse.pstorage is installed on this system; # note that we don't need to be root to see if the package # is installed. package = 'mount.fuse.pstorage' try: self._execute(package, check_exit_code=False, run_as_root=False) except OSError as exc: if exc.errno == errno.ENOENT: msg = _('%s is not installed.') % package raise VzStorageException(msg) else: raise self.configuration.nas_secure_file_operations = 'true' self.configuration.nas_secure_file_permissions = 'true' def _ensure_share_mounted(self, share): m = re.search(self.SHARE_FORMAT_REGEX, share) if not m: msg = (_("Invalid Virtuozzo Storage share specification: %r. " "Must be: [MDS1[,MDS2],...:/][:PASSWORD].") % share) raise VzStorageException(msg) cluster_name = m.group(2) if share in self.shares: mnt_flags = json.loads(self.shares[share]) else: mnt_flags = [] if '-l' not in mnt_flags: # If logging path is not specified in shares config # set up logging to non-default path, so that it will # be possible to mount the same cluster to another mount # point by hand with default options. mnt_flags.extend([ '-l', '/var/log/vstorage/%s/cinder.log.gz' % cluster_name]) self._remotefsclient.mount(share, mnt_flags) def _find_share(self, volume): """Choose VzStorage share among available ones for given volume size. For instances with more than one share that meets the criteria, the first suitable share will be selected. :param volume: the volume to be created. """ if not self._mounted_shares: raise VzStorageNoSharesMounted() for share in self._mounted_shares: if self._is_share_eligible(share, volume.size): break else: raise VzStorageNoSuitableShareFound( volume_size=volume.size) LOG.debug('Selected %s as target VzStorage share.', share) return share def _is_share_eligible(self, vz_share, volume_size_in_gib): """Verifies VzStorage share is eligible to host volume with given size. :param vz_share: vzstorage share :param volume_size_in_gib: int size in GB """ used_ratio = self.configuration.vzstorage_used_ratio volume_size = volume_size_in_gib * units.Gi total_size, available, allocated = self._get_capacity_info(vz_share) if (allocated + volume_size) // total_size > used_ratio: LOG.debug('_is_share_eligible: %s is above ' 'vzstorage_used_ratio.', vz_share) return False return True def choose_volume_format(self, volume): volume_format = None volume_type = volume.volume_type # Retrieve volume format from volume metadata if 'volume_format' in volume.metadata: volume_format = volume.metadata['volume_format'] # If volume format wasn't found in metadata, use # volume type extra specs if not volume_format and volume_type: extra_specs = volume_type.extra_specs or {} if 'vz:volume_format' in extra_specs: volume_format = extra_specs['vz:volume_format'] # If volume format is still undefined, return default # volume format from backend configuration return (volume_format or self.configuration.vzstorage_default_volume_format) def get_volume_format(self, volume): active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) img_info = self._qemu_img_info(active_file_path, volume.name) return image_utils.from_qemu_img_disk_format(img_info.file_format) def _create_ploop(self, volume_path, volume_size): os.mkdir(volume_path) try: self._execute('ploop', 'init', '-s', '%sG' % volume_size, os.path.join(volume_path, PLOOP_BASE_DELTA_NAME), run_as_root=True) except putils.ProcessExecutionError: os.rmdir(volume_path) raise def _do_create_volume(self, volume): """Create a volume on given vzstorage share. :param volume: volume reference """ volume_format = self.choose_volume_format(volume) volume_path = self.local_path(volume) volume_size = volume.size LOG.debug("Creating new volume at %s.", volume_path) if os.path.exists(volume_path): msg = _('File already exists at %s.') % volume_path LOG.error(msg) raise exception.InvalidVolume(reason=msg) if volume_format == DISK_FORMAT_PLOOP: self._create_ploop(volume_path, volume_size) elif volume_format == DISK_FORMAT_QCOW2: self._create_qcow2_file(volume_path, volume_size) elif self.configuration.vzstorage_sparsed_volumes: self._create_sparsed_file(volume_path, volume_size) else: self._create_regular_file(volume_path, volume_size) info_path = self._local_path_volume_info(volume) snap_info = {'active': os.path.basename(volume_path)} self._write_info_file(info_path, snap_info) # Query qemu-img info to cache the output self._qemu_img_info(volume_path, volume.name) def _delete(self, path): self._execute('rm', '-rf', path, run_as_root=True) @remotefs_drv.locked_volume_id_operation def extend_volume(self, volume, size_gb): LOG.info('Extending volume %s.', volume.id) volume_format = self.get_volume_format(volume) self._extend_volume(volume, size_gb, volume_format) def _extend_volume(self, volume, size_gb, volume_format): self._check_extend_volume_support(volume, size_gb) LOG.info('Resizing file to %sG...', size_gb) active_path = os.path.join( self._get_mount_point_for_share(volume.provider_location), self.get_active_image_from_info(volume)) self._do_extend_volume(active_path, size_gb, volume_format) def _do_extend_volume(self, volume_path, size_gb, volume_format): if volume_format == DISK_FORMAT_PLOOP: self._execute('ploop', 'resize', '-s', '%dG' % size_gb, os.path.join(volume_path, 'DiskDescriptor.xml'), run_as_root=True) else: image_utils.resize_image(volume_path, size_gb) if not self._is_file_size_equal(volume_path, size_gb): raise exception.ExtendVolumeError( reason='Resizing image file failed.') def _check_extend_volume_support(self, volume, size_gb): extend_by = int(size_gb) - volume.size if not self._is_share_eligible(volume.provider_location, extend_by): raise exception.ExtendVolumeError(reason='Insufficient space to ' 'extend volume %s to %sG.' % (volume.id, size_gb)) def _is_file_size_equal(self, path, size): """Checks if file size at path is equal to size.""" data = image_utils.qemu_img_info(path) virt_size = data.virtual_size / units.Gi return virt_size == size def _recreate_ploop_desc(self, image_dir, image_file): self._delete(os.path.join(image_dir, 'DiskDescriptor.xml')) self._execute('ploop', 'restore-descriptor', image_dir, image_file) def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume.""" volume_format = self.get_volume_format(volume) qemu_volume_format = image_utils.fixup_disk_format(volume_format) image_path = self.local_path(volume) if volume_format == DISK_FORMAT_PLOOP: image_path = os.path.join(image_path, PLOOP_BASE_DELTA_NAME) image_utils.fetch_to_volume_format( context, image_service, image_id, image_path, qemu_volume_format, self.configuration.volume_dd_blocksize, disable_sparse=disable_sparse) if volume_format == DISK_FORMAT_PLOOP: self._recreate_ploop_desc(self.local_path(volume), image_path) self._do_extend_volume(self.local_path(volume), volume.size, volume_format) # Query qemu-img info to cache the output self._qemu_img_info(self.local_path(volume), volume.name) def _copy_volume_from_snapshot(self, snapshot, volume, volume_size): """Copy data from snapshot to destination volume. This is done with a qemu-img convert to raw/qcow2 from the snapshot qcow2. """ info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path) vol_dir = self._local_volume_dir(snapshot.volume) out_format = self.choose_volume_format(volume) qemu_out_format = image_utils.fixup_disk_format(out_format) volume_format = self.get_volume_format(snapshot.volume) volume_path = self.local_path(volume) if volume_format in (DISK_FORMAT_QCOW2, DISK_FORMAT_RAW): forward_file = snap_info[snapshot.id] forward_path = os.path.join(vol_dir, forward_file) # Find the file which backs this file, which represents the point # when this snapshot was created. img_info = self._qemu_img_info(forward_path, snapshot.volume.name) path_to_snap_img = os.path.join(vol_dir, img_info.backing_file) LOG.debug("_copy_volume_from_snapshot: will copy " "from snapshot at %s.", path_to_snap_img) image_utils.convert_image(path_to_snap_img, volume_path, qemu_out_format) elif volume_format == DISK_FORMAT_PLOOP: with PloopDevice(self.local_path(snapshot.volume), snapshot.id, execute=self._execute) as dev: base_file = os.path.join(volume_path, 'root.hds') image_utils.convert_image(dev, base_file, qemu_out_format) else: msg = _("Unsupported volume format %s") % volume_format raise exception.InvalidVolume(msg) self._extend_volume(volume, volume_size, out_format) # Query qemu-img info to cache the output img_info = self._qemu_img_info(volume_path, volume.name) @remotefs_drv.locked_volume_id_operation def delete_volume(self, volume): """Deletes a logical volume.""" if not volume.provider_location: msg = (_('Volume %s does not have provider_location ' 'specified, skipping.') % volume.name) LOG.error(msg) return self._ensure_share_mounted(volume.provider_location) volume_dir = self._local_volume_dir(volume) mounted_path = os.path.join(volume_dir, self.get_active_image_from_info(volume)) if os.path.exists(mounted_path): self._delete(mounted_path) self._delete(mounted_path + ".qemu_img_info") else: LOG.info("Skipping deletion of volume %s " "as it does not exist.", mounted_path) info_path = self._local_path_volume_info(volume) self._delete(info_path) def _get_desc_path(self, volume): return os.path.join(self.local_path(volume), 'DiskDescriptor.xml') def _create_snapshot_ploop(self, snapshot): status = snapshot.volume.status if status != 'available': msg = (_('Volume status must be available for ' 'snapshot %(id)s. (is %(status)s)') % {'id': snapshot.id, 'status': status}) raise exception.InvalidVolume(msg) info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path) self._execute('ploop', 'snapshot', '-u', '{%s}' % snapshot.id, self._get_desc_path(snapshot.volume), run_as_root=True) snap_file = os.path.join('volume-%s' % snapshot.volume.id, snapshot.id) snap_info[snapshot.id] = snap_file self._write_info_file(info_path, snap_info) def _delete_snapshot_ploop(self, snapshot): status = snapshot.volume.status if status != 'available': msg = (_('Volume status must be available for ' 'snapshot %(id)s. (is %(status)s)') % {'id': snapshot.id, 'status': status}) raise exception.InvalidVolume(msg) info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path) self._execute('ploop', 'snapshot-delete', '-u', '{%s}' % snapshot.id, self._get_desc_path(snapshot.volume), run_as_root=True) snap_info.pop(snapshot.id, None) self._write_info_file(info_path, snap_info) def _create_snapshot(self, snapshot): volume_format = self.get_volume_format(snapshot.volume) if volume_format == DISK_FORMAT_PLOOP: self._create_snapshot_ploop(snapshot) else: super(VZStorageDriver, self)._create_snapshot(snapshot) def _do_create_snapshot(self, snapshot, backing_filename, new_snap_path): super(VZStorageDriver, self)._do_create_snapshot(snapshot, backing_filename, new_snap_path) # Cache qemu-img info for created snapshot self._qemu_img_info(new_snap_path, snapshot.volume.name) def _delete_snapshot_qcow2(self, snapshot): info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path, empty_if_missing=True) if snapshot.id not in snap_info: LOG.warning("Snapshot %s doesn't exist in snap_info", snapshot.id) return snap_file = os.path.join(self._local_volume_dir(snapshot.volume), snap_info[snapshot.id]) active_file = os.path.join(self._local_volume_dir(snapshot.volume), snap_info['active']) higher_file = self._get_higher_image_path(snapshot) if higher_file: higher_file = os.path.join(self._local_volume_dir(snapshot.volume), higher_file) elif active_file != snap_file: msg = (_("Expected higher file exists for snapshot %s") % snapshot.id) raise VzStorageException(msg) img_info = self._qemu_img_info(snap_file, snapshot.volume.name) base_file = os.path.join(self._local_volume_dir(snapshot.volume), img_info.backing_file) super(VZStorageDriver, self)._delete_snapshot(snapshot) def _qemu_info_cache(fn): return fn + ".qemu_img_info" def _update_backing_file(info_src, info_dst): with open(info_src, 'r') as fs, open(info_dst, 'r') as fd: src = json.load(fs) dst = json.load(fd) dst['backing_file'] = src['backing_file'] with open(info_dst, 'w') as fdw: json.dump(dst, fdw) if snap_file != active_file: # mv snap_file.info higher_file.info _update_backing_file( _qemu_info_cache(snap_file), _qemu_info_cache(higher_file)) self._delete(_qemu_info_cache(snap_file)) elif snapshot.volume.status == 'in-use': # mv base_file.info snap_file.info _update_backing_file( _qemu_info_cache(base_file), _qemu_info_cache(snap_file)) self._delete(_qemu_info_cache(base_file)) else: # rm snap_file.info self._delete(_qemu_info_cache(snap_file)) def _delete_snapshot(self, snapshot): volume_format = self.get_volume_format(snapshot.volume) if volume_format == DISK_FORMAT_PLOOP: self._delete_snapshot_ploop(snapshot) else: self._delete_snapshot_qcow2(snapshot) def _copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" volume_format = self.get_volume_format(volume) if volume_format == DISK_FORMAT_PLOOP: with PloopDevice(self.local_path(volume), execute=self._execute) as dev: volume_utils.upload_volume(context, image_service, image_meta, dev, volume) else: super(VZStorageDriver, self)._copy_volume_to_image(context, volume, image_service, image_meta) def _create_cloned_volume_ploop(self, volume, src_vref): LOG.info('Cloning volume %(src)s to volume %(dst)s', {'src': src_vref.id, 'dst': volume.id}) if src_vref.status != 'available': msg = _("Volume status must be 'available'.") raise exception.InvalidVolume(msg) volume_name = CONF.volume_name_template % volume.id # Create fake snapshot object snap_attrs = ['volume_name', 'size', 'volume_size', 'name', 'volume_id', 'id', 'volume'] Snapshot = collections.namedtuple('Snapshot', snap_attrs) temp_snapshot = Snapshot(id=src_vref.id, volume_name=volume_name, size=src_vref.size, volume_size=src_vref.size, name='clone-snap-%s' % src_vref.id, volume_id=src_vref.id, volume=src_vref) self._create_snapshot_ploop(temp_snapshot) try: volume.provider_location = src_vref.provider_location info_path = self._local_path_volume_info(volume) snap_info = {'active': 'volume-%s' % volume.id} self._write_info_file(info_path, snap_info) self._copy_volume_from_snapshot(temp_snapshot, volume, volume.size) finally: self.delete_snapshot(temp_snapshot) return {'provider_location': src_vref.provider_location} def _create_cloned_volume(self, volume, src_vref, context): """Creates a clone of the specified volume.""" volume_format = self.get_volume_format(src_vref) if volume_format == DISK_FORMAT_PLOOP: return self._create_cloned_volume_ploop(volume, src_vref) else: return super(VZStorageDriver, self)._create_cloned_volume( volume, src_vref, context) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4071214 cinder-27.0.0/cinder/volume/drivers/windows/0000775000175000017500000000000000000000000021012 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/windows/__init__.py0000664000175000017500000000000000000000000023111 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/windows/constants.py0000664000175000017500000000125300000000000023401 0ustar00zuulzuul00000000000000# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. VHD_TYPE_FIXED = 2 VHD_TYPE_DYNAMIC = 3 VHD_TYPE_DIFFERENCING = 4 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/windows/iscsi.py0000664000175000017500000003367300000000000022512 0ustar00zuulzuul00000000000000# Copyright 2012 Pedro Navarro Perez # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume driver for Windows Server 2012 This driver requires ISCSI target role installed """ import contextlib import os from os_win import utilsfactory from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import units from oslo_utils import uuidutils from cinder.common import constants from cinder import exception from cinder.image import image_utils from cinder import interface from cinder.volume import configuration from cinder.volume import driver from cinder.volume import volume_utils LOG = logging.getLogger(__name__) windows_opts = [ cfg.StrOpt('windows_iscsi_lun_path', default=r'C:\iSCSIVirtualDisks', help='Path to store VHD backed volumes'), ] CONF = cfg.CONF CONF.register_opts(windows_opts, group=configuration.SHARED_CONF_GROUP) @interface.volumedriver class WindowsISCSIDriver(driver.ISCSIDriver): """Executes volume driver commands on Windows Storage server.""" VERSION = '1.0.0' # ThirdPartySystems wiki page CI_WIKI_NAME = "Microsoft_iSCSI_CI" SUPPORTED = False def __init__(self, *args, **kwargs): super(WindowsISCSIDriver, self).__init__(*args, **kwargs) self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(windows_opts) self._vhdutils = utilsfactory.get_vhdutils() self._tgt_utils = utilsfactory.get_iscsi_target_utils() self._hostutils = utilsfactory.get_hostutils() @staticmethod def get_driver_options(): return windows_opts def do_setup(self, context): """Setup the Windows Volume driver. Called one time by the manager after the driver is loaded. Validate the flags we care about """ fileutils.ensure_tree(self.configuration.windows_iscsi_lun_path) fileutils.ensure_tree(CONF.image_conversion_dir) def check_for_setup_error(self): """Check that the driver is working and can communicate.""" self._get_portals() def _get_portals(self): available_portals = set(self._tgt_utils.get_portal_locations( available_only=True, fail_if_none_found=True)) LOG.debug("Available iSCSI portals: %s", available_portals) iscsi_port = self.configuration.target_port iscsi_ips = ([self.configuration.target_ip_address] + self.configuration.target_secondary_ip_addresses) requested_portals = {':'.join([iscsi_ip, str(iscsi_port)]) for iscsi_ip in iscsi_ips} unavailable_portals = requested_portals - available_portals if unavailable_portals: LOG.warning("The following iSCSI portals were requested but " "are not available: %s.", unavailable_portals) selected_portals = requested_portals & available_portals if not selected_portals: err_msg = "None of the configured iSCSI portals are available." raise exception.VolumeDriverException(err_msg) return list(selected_portals) def _get_host_information(self, volume, multipath=False): """Getting the portal and port information.""" target_name = self._get_target_name(volume) available_portals = self._get_portals() properties = self._tgt_utils.get_target_information(target_name) # Note(lpetrut): the WT_Host CHAPSecret field cannot be accessed # for security reasons. auth = volume.provider_auth if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret properties['target_portal'] = available_portals[0] properties['target_discovered'] = False properties['target_lun'] = 0 properties['volume_id'] = volume.id if multipath: properties['target_portals'] = available_portals properties['target_iqns'] = [properties['target_iqn'] for portal in available_portals] properties['target_luns'] = [properties['target_lun'] for portal in available_portals] return properties def initialize_connection(self, volume, connector): """Driver entry point to attach a volume to an instance.""" initiator_name = connector['initiator'] target_name = volume.provider_location self._tgt_utils.associate_initiator_with_iscsi_target(initiator_name, target_name) properties = self._get_host_information(volume, connector.get('multipath')) return { 'driver_volume_type': 'iscsi', 'data': properties, } def terminate_connection(self, volume, connector, **kwargs): """Driver entry point to unattach a volume from an instance. Unmask the LUN on the storage system so the given initiator can no longer access it. """ initiator_name = connector['initiator'] target_name = volume.provider_location self._tgt_utils.deassociate_initiator(initiator_name, target_name) def create_volume(self, volume): """Driver entry point for creating a new volume.""" vhd_path = self.local_path(volume) vol_name = volume.name vol_size_mb = volume.size * 1024 self._tgt_utils.create_wt_disk(vhd_path, vol_name, size_mb=vol_size_mb) def local_path(self, volume, disk_format=None): base_vhd_folder = self.configuration.windows_iscsi_lun_path if not disk_format: disk_format = self._tgt_utils.get_supported_disk_format() disk_fname = "%s.%s" % (volume.name, disk_format) return os.path.join(base_vhd_folder, disk_fname) def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" vol_name = volume.name vhd_path = self.local_path(volume) self._tgt_utils.remove_wt_disk(vol_name) fileutils.delete_if_exists(vhd_path) def create_snapshot(self, snapshot): """Driver entry point for creating a snapshot.""" # Getting WT_Snapshot class vol_name = snapshot.volume_name snapshot_name = snapshot.name self._tgt_utils.create_snapshot(vol_name, snapshot_name) def create_volume_from_snapshot(self, volume, snapshot): """Driver entry point for exporting snapshots as volumes.""" snapshot_name = snapshot.name vol_name = volume.name vhd_path = self.local_path(volume) self._tgt_utils.export_snapshot(snapshot_name, vhd_path) self._tgt_utils.import_wt_disk(vhd_path, vol_name) def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" snapshot_name = snapshot.name self._tgt_utils.delete_snapshot(snapshot_name) def ensure_export(self, context, volume): # iSCSI targets exported by WinTarget persist after host reboot. pass def _get_target_name(self, volume): return "%s%s" % (self.configuration.target_prefix, volume.name) def create_export(self, context, volume, connector): """Driver entry point to get the export info for a new volume.""" target_name = self._get_target_name(volume) updates = {} if not self._tgt_utils.iscsi_target_exists(target_name): self._tgt_utils.create_iscsi_target(target_name) updates['provider_location'] = target_name if self.configuration.use_chap_auth: chap_username = (self.configuration.chap_username or volume_utils.generate_username()) chap_password = (self.configuration.chap_password or volume_utils.generate_password()) self._tgt_utils.set_chap_credentials(target_name, chap_username, chap_password) updates['provider_auth'] = ' '.join(('CHAP', chap_username, chap_password)) # This operation is idempotent self._tgt_utils.add_disk_to_target(volume.name, target_name) return updates def remove_export(self, context, volume): """Driver entry point to remove an export for a volume.""" target_name = self._get_target_name(volume) self._tgt_utils.delete_iscsi_target(target_name) def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and create a volume using it.""" # Convert to VHD and file back to VHD vhd_type = self._tgt_utils.get_supported_vhd_type() with image_utils.temporary_file(suffix='.vhd') as tmp: volume_path = self.local_path(volume) image_utils.fetch_to_vhd(context, image_service, image_id, tmp, self.configuration.volume_dd_blocksize, disable_sparse=disable_sparse) # The vhd must be disabled and deleted before being replaced with # the desired image. self._tgt_utils.change_wt_disk_status(volume.name, enabled=False) os.unlink(volume_path) self._vhdutils.convert_vhd(tmp, volume_path, vhd_type) self._vhdutils.resize_vhd(volume_path, volume.size << 30, is_file_max_size=False) self._tgt_utils.change_wt_disk_status(volume.name, enabled=True) @contextlib.contextmanager def _temporary_snapshot(self, volume_name): try: snap_uuid = uuidutils.generate_uuid() snapshot_name = '%s-tmp-snapshot-%s' % (volume_name, snap_uuid) self._tgt_utils.create_snapshot(volume_name, snapshot_name) yield snapshot_name finally: self._tgt_utils.delete_snapshot(snapshot_name) def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" disk_format = self._tgt_utils.get_supported_disk_format() temp_vhd_path = os.path.join(CONF.image_conversion_dir, str(image_meta['id']) + '.' + disk_format) try: with self._temporary_snapshot(volume.name) as tmp_snap_name: # qemu-img cannot access VSS snapshots, for which reason it # must be exported first. self._tgt_utils.export_snapshot(tmp_snap_name, temp_vhd_path) volume_utils.upload_volume( context, image_service, image_meta, temp_vhd_path, volume, 'vhd') finally: fileutils.delete_if_exists(temp_vhd_path) def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" src_vol_name = src_vref.name vol_name = volume.name vol_size = volume.size new_vhd_path = self.local_path(volume) with self._temporary_snapshot(src_vol_name) as tmp_snap_name: self._tgt_utils.export_snapshot(tmp_snap_name, new_vhd_path) self._vhdutils.resize_vhd(new_vhd_path, vol_size << 30, is_file_max_size=False) self._tgt_utils.import_wt_disk(new_vhd_path, vol_name) def _get_capacity_info(self): drive = os.path.splitdrive( self.configuration.windows_iscsi_lun_path)[0] (size, free_space) = self._hostutils.get_volume_info(drive) total_gb = size / units.Gi free_gb = free_space / units.Gi return (total_gb, free_gb) def _update_volume_stats(self): """Retrieve stats info for Windows device.""" LOG.debug("Updating volume stats") total_gb, free_gb = self._get_capacity_info() data = {} backend_name = self.configuration.safe_get('volume_backend_name') data["volume_backend_name"] = backend_name or self.__class__.__name__ data["vendor_name"] = 'Microsoft' data["driver_version"] = self.VERSION data["storage_protocol"] = constants.ISCSI data['total_capacity_gb'] = total_gb data['free_capacity_gb'] = free_gb data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = False self._stats = data def extend_volume(self, volume, new_size): """Extend an Existing Volume.""" old_size = volume.size LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.", {'old_size': old_size, 'new_size': new_size}) additional_size_mb = (new_size - old_size) * 1024 self._tgt_utils.extend_wt_disk(volume.name, additional_size_mb) def backup_use_temp_snapshot(self): return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/windows/smbfs.py0000664000175000017500000006773300000000000022516 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import sys from os_brick.remotefs import windows_remotefs as remotefs_brick from os_win import constants as os_win_const from os_win import utilsfactory from oslo_config import cfg from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import units from cinder import context from cinder import coordination from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import interface from cinder import objects from cinder import utils from cinder.volume import configuration from cinder.volume.drivers import remotefs as remotefs_drv from cinder.volume import volume_utils VERSION = '1.1.0' LOG = logging.getLogger(__name__) volume_opts = [ cfg.StrOpt('smbfs_shares_config', default=r'C:\OpenStack\smbfs_shares.txt', help='File with the list of available smbfs shares.'), cfg.StrOpt('smbfs_default_volume_format', default='vhd', choices=['vhd', 'vhdx'], help=('Default format that will be used when creating volumes ' 'if no volume format is specified.')), cfg.StrOpt('smbfs_mount_point_base', default=r'C:\OpenStack\_mnt', help=('Base dir containing mount points for smbfs shares.')), cfg.DictOpt('smbfs_pool_mappings', default={}, help=('Mappings between share locations and pool names. ' 'If not specified, the share names will be used as ' 'pool names. Example: ' '//addr/share:pool_name,//addr/share2:pool_name2')), ] CONF = cfg.CONF CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP) class SmbfsException(exception.RemoteFSException): message = _("Unknown SMBFS exception.") @interface.volumedriver class WindowsSmbfsDriver(remotefs_drv.RevertToSnapshotMixin, remotefs_drv.RemoteFSPoolMixin, remotefs_drv.RemoteFSManageableVolumesMixin, remotefs_drv.RemoteFSSnapDriverDistributed): VERSION = VERSION driver_volume_type = 'smbfs' driver_prefix = 'smbfs' volume_backend_name = 'Generic_SMBFS' SHARE_FORMAT_REGEX = r'//.+/.+' VERSION = VERSION _DISK_FORMAT_VHD = 'vhd' _DISK_FORMAT_VHD_LEGACY = 'vpc' _DISK_FORMAT_VHDX = 'vhdx' # ThirdPartySystems wiki page CI_WIKI_NAME = "Cloudbase_Cinder_SMB3_CI" SUPPORTED = False _MINIMUM_QEMU_IMG_VERSION = '1.6' _SUPPORTED_IMAGE_FORMATS = [_DISK_FORMAT_VHD, _DISK_FORMAT_VHD_LEGACY, _DISK_FORMAT_VHDX] _VALID_IMAGE_EXTENSIONS = [_DISK_FORMAT_VHD, _DISK_FORMAT_VHDX] _MANAGEABLE_IMAGE_RE = re.compile( r'.*\.(?:%s)$' % '|'.join(_VALID_IMAGE_EXTENSIONS), re.IGNORECASE) _always_use_temp_snap_when_cloning = False _thin_provisioning_support = True _vhd_type_mapping = {'thin': os_win_const.VHD_TYPE_DYNAMIC, 'thick': os_win_const.VHD_TYPE_FIXED} _vhd_qemu_subformat_mapping = {'thin': 'dynamic', 'thick': 'fixed'} def __init__(self, *args, **kwargs): self._remotefsclient = None super(WindowsSmbfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) self.base = getattr(self.configuration, 'smbfs_mount_point_base') self._remotefsclient = remotefs_brick.WindowsRemoteFsClient( 'cifs', root_helper=None, smbfs_mount_point_base=self.base, local_path_for_loopback=True) self._vhdutils = utilsfactory.get_vhdutils() self._pathutils = utilsfactory.get_pathutils() self._smbutils = utilsfactory.get_smbutils() self._diskutils = utilsfactory.get_diskutils() thin_enabled = ( self.configuration.nas_volume_prov_type == 'thin') self._thin_provisioning_support = thin_enabled self._thick_provisioning_support = not thin_enabled @staticmethod def get_driver_options(): return volume_opts def do_setup(self, context): self._check_os_platform() super(WindowsSmbfsDriver, self).do_setup(context) image_utils.check_qemu_img_version(self._MINIMUM_QEMU_IMG_VERSION) config = self.configuration.smbfs_shares_config if not config: msg = (_("SMBFS config file not set (smbfs_shares_config).")) LOG.error(msg) raise SmbfsException(msg) if not os.path.exists(config): msg = (_("SMBFS config file at %(config)s doesn't exist.") % {'config': config}) LOG.error(msg) raise SmbfsException(msg) if not os.path.isabs(self.base): msg = _("Invalid mount point base: %s") % self.base LOG.error(msg) raise SmbfsException(msg) self.shares = {} # address : options self._ensure_shares_mounted() self._setup_pool_mappings() def _setup_pool_mappings(self): self._pool_mappings = self.configuration.smbfs_pool_mappings pools = list(self._pool_mappings.values()) duplicate_pools = set([pool for pool in pools if pools.count(pool) > 1]) if duplicate_pools: msg = _("Found multiple mappings for pools %(pools)s. " "Requested pool mappings: %(pool_mappings)s") raise SmbfsException( msg % dict(pools=duplicate_pools, pool_mappings=self._pool_mappings)) shares_missing_mappings = ( set(self.shares).difference(set(self._pool_mappings))) for share in shares_missing_mappings: msg = ("No pool name was requested for share %(share)s " "Using the share name instead.") LOG.warning(msg, dict(share=share)) self._pool_mappings[share] = self._get_share_name(share) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info. :param volume: volume reference :param connector: connector reference """ # Find active image active_file = self.get_active_image_from_info(volume) fmt = self.get_volume_format(volume) data = {'export': volume.provider_location, 'format': fmt, 'name': active_file} if volume.provider_location in self.shares: data['options'] = self.shares[volume.provider_location] return { 'driver_volume_type': self.driver_volume_type, 'data': data, 'mount_point_base': self._get_mount_point_base() } @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') def initialize_connection_snapshot(self, snapshot, connector): backing_file = self._get_snapshot_backing_file(snapshot) volume = snapshot.volume fmt = self.get_volume_format(volume) data = {'export': volume.provider_location, 'format': fmt, 'name': backing_file, 'access_mode': 'ro'} if volume.provider_location in self.shares: data['options'] = self.shares[volume.provider_location] return { 'driver_volume_type': self.driver_volume_type, 'data': data, 'mount_point_base': self._get_mount_point_base() } def _check_os_platform(self): if sys.platform != 'win32': _msg = _("This system platform (%s) is not supported. This " "driver supports only Win32 platforms.") % sys.platform raise SmbfsException(_msg) def _get_total_allocated(self, smbfs_share): pool_name = self._get_pool_name_from_share(smbfs_share) host = "#".join([self.host, pool_name]) vol_sz_sum = self.db.volume_data_get_for_host( context=context.get_admin_context(), host=host)[1] return float(vol_sz_sum * units.Gi) def local_path(self, volume): """Get volume path (mounted locally fs path) for given volume. :param volume: volume reference """ volume_path_template = self._get_local_volume_path_template(volume) volume_path = self._lookup_local_volume_path(volume_path_template) if volume_path: return volume_path # The image does not exist, so retrieve the volume format # in order to build the path. fmt = self.get_volume_format(volume) volume_path = volume_path_template + '.' + fmt return volume_path def _get_local_volume_path_template(self, volume): local_dir = self._local_volume_dir(volume) local_path_template = os.path.join(local_dir, volume.name) return local_path_template def _lookup_local_volume_path(self, volume_path_template): for ext in self._VALID_IMAGE_EXTENSIONS: volume_path = (volume_path_template + '.' + ext if ext else volume_path_template) if os.path.exists(volume_path): return volume_path def _get_new_snap_path(self, snapshot): vol_path = self.local_path(snapshot.volume) snap_path, ext = os.path.splitext(vol_path) snap_path += '.' + snapshot.id + ext return snap_path def get_volume_format(self, volume, qemu_format=False): volume_path_template = self._get_local_volume_path_template(volume) volume_path = self._lookup_local_volume_path(volume_path_template) if volume_path: ext = os.path.splitext(volume_path)[1].strip('.').lower() if ext in self._VALID_IMAGE_EXTENSIONS: volume_format = ext else: # Hyper-V relies on file extensions so we're enforcing them. raise SmbfsException( _("Invalid image file extension: %s") % ext) else: volume_format = ( self._get_volume_format_spec(volume) or self.configuration.smbfs_default_volume_format) if qemu_format and volume_format == self._DISK_FORMAT_VHD: volume_format = self._DISK_FORMAT_VHD_LEGACY elif volume_format == self._DISK_FORMAT_VHD_LEGACY: volume_format = self._DISK_FORMAT_VHD return volume_format def _get_volume_format_spec(self, volume): vol_type = volume.volume_type extra_specs = {} if vol_type and vol_type.extra_specs: extra_specs = vol_type.extra_specs extra_specs.update(volume.metadata or {}) return (extra_specs.get('volume_format') or extra_specs.get('smbfs:volume_format') or self.configuration.smbfs_default_volume_format) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def create_volume(self, volume): return super(WindowsSmbfsDriver, self).create_volume(volume) def _do_create_volume(self, volume): volume_path = self.local_path(volume) volume_format = self.get_volume_format(volume) volume_size_bytes = volume.size * units.Gi if os.path.exists(volume_path): err_msg = _('File already exists at: %s') % volume_path raise exception.InvalidVolume(err_msg) if volume_format not in self._SUPPORTED_IMAGE_FORMATS: err_msg = _("Unsupported volume format: %s ") % volume_format raise exception.InvalidVolume(err_msg) vhd_type = self._get_vhd_type() self._vhdutils.create_vhd(volume_path, vhd_type, max_internal_size=volume_size_bytes, guid=volume.id) def _ensure_share_mounted(self, smbfs_share): mnt_flags = None if self.shares.get(smbfs_share) is not None: mnt_flags = self.shares[smbfs_share] self._remotefsclient.mount(smbfs_share, mnt_flags) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def delete_volume(self, volume): """Deletes a logical volume.""" if not volume.provider_location: LOG.warning('Volume %s does not have provider_location ' 'specified, skipping.', volume.name) return self._ensure_share_mounted(volume.provider_location) volume_dir = self._local_volume_dir(volume) mounted_path = os.path.join(volume_dir, self.get_active_image_from_info(volume)) if os.path.exists(mounted_path): self._delete(mounted_path) else: LOG.debug("Skipping deletion of volume %s as it does not exist.", mounted_path) info_path = self._local_path_volume_info(volume) self._delete(info_path) def _delete(self, path): fileutils.delete_if_exists(path) def _get_capacity_info(self, smbfs_share): """Calculate available space on the SMBFS share. :param smbfs_share: example //172.18.194.100/var/smbfs """ mount_point = self._get_mount_point_for_share(smbfs_share) total_size, total_available = self._diskutils.get_disk_capacity( mount_point) total_allocated = self._get_total_allocated(smbfs_share) return_value = [total_size, total_available, total_allocated] LOG.info('Smb share %(share)s Total size %(size)s ' 'Total allocated %(allocated)s', {'share': smbfs_share, 'size': total_size, 'allocated': total_allocated}) return [float(x) for x in return_value] def _img_commit(self, snapshot_path): self._vhdutils.merge_vhd(snapshot_path) def _rebase_img(self, image, backing_file, volume_format): # Relative path names are not supported in this case. image_dir = os.path.dirname(image) backing_file_path = os.path.join(image_dir, backing_file) self._vhdutils.reconnect_parent_vhd(image, backing_file_path) def _qemu_img_info(self, path, volume_name=None): # This code expects to deal only with relative filenames. # As this method is needed by the upper class and qemu-img does # not fully support vhdx images, for the moment we'll use Win32 API # for retrieving image information. parent_path = self._vhdutils.get_vhd_parent_path(path) file_format = os.path.splitext(path)[1][1:].lower() if parent_path: backing_file_name = os.path.split(parent_path)[1].lower() else: backing_file_name = None class ImageInfo(object): def __init__(self, image, backing_file): self.image = image self.backing_file = backing_file self.file_format = file_format return ImageInfo(os.path.basename(path), backing_file_name) def _do_create_snapshot(self, snapshot, backing_file, new_snap_path): if self._is_volume_attached(snapshot.volume): LOG.debug("Snapshot is in-use. Performing Nova " "assisted creation.") else: backing_file_full_path = os.path.join( self._local_volume_dir(snapshot.volume), backing_file) self._vhdutils.create_differencing_vhd(new_snap_path, backing_file_full_path) # We're setting the backing file information in the DB as we may not # be able to query the image while it's in use due to file locks. # # When dealing with temporary snapshots created by the driver, we # may not receive an actual snapshot VO. We currently need this check # in order to avoid breaking the volume clone operation. # # TODO(lpetrut): remove this check once we'll start using db entries # for such temporary snapshots, most probably when we'll add support # for cloning in-use volumes. if isinstance(snapshot, objects.Snapshot): snapshot.metadata['backing_file'] = backing_file snapshot.save() else: LOG.debug("Received a '%s' object, skipping setting the backing " "file in the DB.", type(snapshot)) def _extend_volume(self, volume, size_gb): self._check_extend_volume_support(volume, size_gb) volume_path = self._local_path_active_image(volume) LOG.info('Resizing file %(volume_path)s to %(size_gb)sGB.', dict(volume_path=volume_path, size_gb=size_gb)) self._vhdutils.resize_vhd(volume_path, size_gb * units.Gi, is_file_max_size=False) def _delete_snapshot(self, snapshot): # NOTE(lpetrut): We're slightly diverging from the super class # workflow. The reason is that we cannot query in-use vhd/x images, # nor can we add or remove images from a vhd/x chain in this case. info_path = self._local_path_volume_info(snapshot.volume) snap_info = self._read_info_file(info_path, empty_if_missing=True) if snapshot.id not in snap_info: LOG.info('Snapshot record for %s is not present, allowing ' 'snapshot_delete to proceed.', snapshot.id) return file_to_merge = snap_info[snapshot.id] deleting_latest_snap = utils.paths_normcase_equal(snap_info['active'], file_to_merge) if not self._is_volume_attached(snapshot.volume): super(WindowsSmbfsDriver, self)._delete_snapshot(snapshot) else: delete_info = {'file_to_merge': file_to_merge, 'volume_id': snapshot.volume.id} self._nova_assisted_vol_snap_delete( snapshot._context, snapshot, delete_info) # At this point, the image file should no longer be in use, so we # may safely query it so that we can update the 'active' image # reference, if needed. merged_img_path = os.path.join( self._local_volume_dir(snapshot.volume), file_to_merge) if deleting_latest_snap: new_active_file_path = self._vhdutils.get_vhd_parent_path( merged_img_path).lower() snap_info['active'] = os.path.basename(new_active_file_path) self._delete(merged_img_path) # TODO(lpetrut): drop snapshot info file usage. del snap_info[snapshot.id] self._write_info_file(info_path, snap_info) if not isinstance(snapshot, objects.Snapshot): LOG.debug("Received a '%s' object, skipping setting the backing " "file in the DB.", type(snapshot)) elif not deleting_latest_snap: backing_file = snapshot['metadata'].get('backing_file') higher_snapshot = self._get_snapshot_by_backing_file( snapshot.volume, file_to_merge) # The snapshot objects should have a backing file set, unless # created before an upgrade. If the snapshot we're deleting # does not have a backing file set yet there is a newer one that # does, we're clearing it out so that it won't provide wrong info. if higher_snapshot: LOG.debug("Updating backing file reference (%(backing_file)s) " "for higher snapshot: %(higher_snapshot_id)s.", dict(backing_file=snapshot.metadata['backing_file'], higher_snapshot_id=higher_snapshot.id)) higher_snapshot.metadata['backing_file'] = ( snapshot.metadata['backing_file']) higher_snapshot.save() if not (higher_snapshot and backing_file): LOG.info( "The deleted snapshot is not latest one, yet we could not " "find snapshot backing file information in the DB. This " "may happen after an upgrade. Certain operations against " "this volume may be unavailable while it's in-use.") def _get_snapshot_by_backing_file(self, volume, backing_file): all_snapshots = objects.SnapshotList.get_all_for_volume( context.get_admin_context(), volume.id) for snapshot in all_snapshots: snap_backing_file = snapshot.metadata.get('backing_file') if utils.paths_normcase_equal(snap_backing_file or '', backing_file): return snapshot def _get_snapshot_backing_file(self, snapshot): backing_file = snapshot.metadata.get('backing_file') if not backing_file: LOG.info("Could not find the snapshot backing file in the DB. " "This may happen after an upgrade. Attempting to " "query the image as a fallback. This may fail if " "the image is in-use.") backing_file = super( WindowsSmbfsDriver, self)._get_snapshot_backing_file(snapshot) return backing_file def _check_extend_volume_support(self, volume, size_gb): snapshots_exist = self._snapshots_exist(volume) fmt = self.get_volume_format(volume) if snapshots_exist and fmt == self._DISK_FORMAT_VHD: msg = _('Extending volumes backed by VHD images is not supported ' 'when snapshots exist. Please use VHDX images.') raise exception.InvalidVolume(msg) @coordination.synchronized('{self.driver_prefix}-{volume.id}') def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" # If snapshots exist, flatten to a temporary image, and upload it active_file = self.get_active_image_from_info(volume) active_file_path = os.path.join(self._local_volume_dir(volume), active_file) backing_file = self._vhdutils.get_vhd_parent_path(active_file_path) root_file_fmt = self.get_volume_format(volume) temp_path = None try: if backing_file: temp_file_name = '%s.temp_image.%s.%s' % ( volume.id, image_meta['id'], root_file_fmt) temp_path = os.path.join(self._local_volume_dir(volume), temp_file_name) self._vhdutils.convert_vhd(active_file_path, temp_path) upload_path = temp_path else: upload_path = active_file_path volume_utils.upload_volume(context, image_service, image_meta, upload_path, volume, root_file_fmt) finally: if temp_path: self._delete(temp_path) def copy_image_to_volume(self, context, volume, image_service, image_id, disable_sparse=False): """Fetch the image from image_service and write it to the volume.""" volume_path = self.local_path(volume) volume_format = self.get_volume_format(volume, qemu_format=True) volume_subformat = self._get_vhd_type(qemu_subformat=True) self._delete(volume_path) image_utils.fetch_to_volume_format( context, image_service, image_id, volume_path, volume_format, self.configuration.volume_dd_blocksize, volume_subformat, disable_sparse=disable_sparse) volume_path = self.local_path(volume) self._vhdutils.set_vhd_guid(volume_path, volume.id) self._vhdutils.resize_vhd(volume_path, volume.size * units.Gi, is_file_max_size=False) def _copy_volume_from_snapshot(self, snapshot, volume, volume_size, src_encryption_key_id=None, new_encryption_key_id=None): """Copy data from snapshot to destination volume.""" if new_encryption_key_id: msg = _("Encryption key %s was requested. Volume " "encryption is not currently supported.") raise exception.NotSupportedOperation( message=msg % new_encryption_key_id) LOG.debug("snapshot: %(snap)s, volume: %(vol)s, " "volume_size: %(size)s", {'snap': snapshot.id, 'vol': volume.id, 'size': snapshot.volume_size}) vol_dir = self._local_volume_dir(snapshot.volume) # Find the file which backs this file, which represents the point # when this snapshot was created. backing_file = self._get_snapshot_backing_file(snapshot) snapshot_path = os.path.join(vol_dir, backing_file) volume_path = self.local_path(volume) vhd_type = self._get_vhd_type() self._delete(volume_path) self._vhdutils.convert_vhd(snapshot_path, volume_path, vhd_type=vhd_type) self._vhdutils.set_vhd_guid(volume_path, volume.id) self._vhdutils.resize_vhd(volume_path, volume_size * units.Gi, is_file_max_size=False) def _copy_volume_image(self, src_path, dest_path): self._pathutils.copy(src_path, dest_path) def _get_share_name(self, share): return share.replace('/', '\\').lstrip('\\').split('\\', 1)[1] def _get_pool_name_from_share(self, share): return self._pool_mappings[share] def _get_share_from_pool_name(self, pool_name): mappings = {pool: share for share, pool in self._pool_mappings.items()} share = mappings.get(pool_name) if not share: msg = _("Could not find any share for pool %(pool_name)s. " "Pool mappings: %(pool_mappings)s.") raise SmbfsException( msg % dict(pool_name=pool_name, pool_mappings=self._pool_mappings)) return share def _get_vhd_type(self, qemu_subformat=False): prov_type = self.configuration.nas_volume_prov_type if qemu_subformat: vhd_type = self._vhd_qemu_subformat_mapping[prov_type] else: vhd_type = self._vhd_type_mapping[prov_type] return vhd_type def _get_managed_vol_expected_path(self, volume, volume_location): fmt = self._vhdutils.get_vhd_format(volume_location['vol_local_path']) return os.path.join(volume_location['mountpoint'], volume.name + ".%s" % fmt).lower() def manage_existing(self, volume, existing_ref): model_update = super(WindowsSmbfsDriver, self).manage_existing( volume, existing_ref) volume.provider_location = model_update['provider_location'] volume_path = self.local_path(volume) self._vhdutils.set_vhd_guid(volume_path, volume.id) return model_update def _set_rw_permissions(self, path): # The SMBFS driver does not manage file permissions. We chose # to let this up to the deployer. pass def backup_use_temp_snapshot(self): return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4071214 cinder-27.0.0/cinder/volume/drivers/yadro/0000775000175000017500000000000000000000000020436 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/yadro/__init__.py0000664000175000017500000000000000000000000022535 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/yadro/tatlin_api.py0000664000175000017500000000233700000000000023141 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. HOST_GROUPS = 'personalities/v1/config/groups' HOSTS = 'personalities/v1/config/hosts' RESOURCE = 'personalities/v1/personalities/block/%s' RESOURCE_DETAIL = 'personalities/v1/personalities?id=%s' RESOURCE_HEALTH = 'health/v1/personalities?id=%s' RESOURCE_MAPPING = 'personalities/v1/config/resource_mapping' VOLUME_TO_HOST = 'personalities/v1/personalities/block/%s/hosts/%s' ALL_RESOURCES = 'personalities/v1/personalities' POOLS = 'health/v1/pools' STATISTICS = 'health/v1/statistics/current' IP_PORTS = 'osmgr/v1/ports/%s' RESOURCE_COUNT = 'personalities/v1/personalities/block/countPerPool' TATLIN_VERSION = 'upmgr/v1/version' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/yadro/tatlin_client.py0000664000175000017500000006371100000000000023651 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging import requests from cinder import exception from cinder.i18n import _ from cinder.utils import retry from cinder.volume.drivers.yadro import tatlin_api from cinder.volume.drivers.yadro.tatlin_exception import TatlinAPIException LOG = logging.getLogger(__name__) retry_exc = (Exception,) def InitTatlinClient(ip, port, username, password, verify, api_retry_count, wait_interval, wait_retry_count): access_api = TatlinAccessAPI(ip, port, username, password, verify) tatlin_version = access_api.get_tatlin_version() if tatlin_version <= (2, 3): return TatlinClientV23(access_api, api_retry_count=api_retry_count, wait_interval=wait_interval, wait_retry_count=wait_retry_count) else: return TatlinClientV25(access_api, api_retry_count=api_retry_count, wait_interval=wait_interval, wait_retry_count=wait_retry_count) class TatlinAccessAPI: session = None ip = None port = None username = None password = None verify = False _api_version = None def __init__(self, ip, port, user, passwd, verify): self.ip = ip self.port = port self.username = user self.password = passwd self.verify = verify self._authenticate_access() def _authenticate_access(self): LOG.debug('Generating new Tatlin API session') self.session = requests.session() LOG.debug('SSL verification %s', self.session.verify) self.session.verify = self.verify if not self.verify: requests.packages.urllib3.disable_warnings() # Here 'address' will be only IPv4. response = self.session.post('https://%s:%d/auth/login' % (self.ip, self.port), data={'user': self.username, 'secret': self.password}, verify=self.verify) if response.status_code != requests.codes.ok: LOG.error('Failed to authenticate to remote cluster at %s for %s.', self.ip, self.username) raise exception.NotAuthorized(_('Authentication failure.')) result = response.json() self.session.headers.update({'X-Auth-Token': result['token']}) self.session.headers.update({'Content-Type': 'application/json'}) def send_request(self, path, input_data, method): full_url = self._get_api(path) resp = self.session.request( method, full_url, verify=self.verify, json=input_data) LOG.debug('Tatlin response for method %s URL %s %s', method, full_url, resp) if resp.status_code == requests.codes.unauthorized: LOG.info('Not authenticated. Logging in.') self._authenticate_access() resp = self.session.request( method, full_url, verify=self.verify, json=input_data) return resp def get_tatlin_version(self): if not self._api_version: responce = self.send_request(tatlin_api.TATLIN_VERSION, {}, 'GET') ver = responce.json()['build-version'].split('.') self._api_version = (int(ver[0]), int(ver[1])) LOG.debug('Tatlin version: %s', str(self._api_version)) return self._api_version def _get_api(self, tail): return ('https://%s:%d/' % (self.ip, self.port)) + tail class TatlinClientCommon: session = None _api = None access_api_retry_count = 1 def __init__(self, tatlin_rest_api, api_retry_count, wait_interval, wait_retry_count): self.session = None self._api = tatlin_rest_api self.access_api_retry_count = api_retry_count self.wait_interval = wait_interval self.wait_retry_count = wait_retry_count def add_vol_to_host(self, vol_id, host_id): LOG.debug('Adding volume %s to host %s', vol_id, host_id) if self._is_vol_on_host(vol_id, host_id): return path = tatlin_api.VOLUME_TO_HOST % (vol_id, host_id) try: self._access_api(path, {}, 'PUT', pass_codes=[requests.codes.bad_request]) except TatlinAPIException as exp: message = _('Unable to add volume %s to host %s error %s' % (vol_id, host_id, exp.message)) LOG.error(message) raise TatlinAPIException(500, message) if not self._is_vol_on_host(vol_id, host_id): raise exception.VolumeBackendAPIException( 'Unable to add volume %s to host %s' % (vol_id, host_id)) return def remove_vol_from_host(self, vol_id, host_id): if not self._is_vol_on_host(vol_id, host_id): return path = tatlin_api.VOLUME_TO_HOST % (vol_id, host_id) try: LOG.debug('Removing volume %s from host %s', vol_id, host_id) self._access_api(path, {}, 'DELETE', pass_codes=[requests.codes.not_found, requests.codes.bad_request]) except TatlinAPIException as exp: message = _('Unable to remove volume %s from host %s error %s' % (vol_id, host_id, exp.message)) LOG.error(message) raise TatlinAPIException(500, message) if self._is_vol_on_host(vol_id, host_id): raise exception.VolumeBackendAPIException( 'Volume %s still on host %s' % (vol_id, host_id)) return def create_volume(self, vol_id, name, size_in_byte, pool_id, lbaFormat='512e'): data = {"name": name, "size": size_in_byte, "poolId": pool_id, "deduplication": False, "compression": False, "alert_threshold": 0, "lbaFormat": lbaFormat } path = tatlin_api.RESOURCE % vol_id LOG.debug('Create volume: volume=%(v3)s path=%(v1)s body=%(v2)s', {'v1': path, 'v2': data, 'v3': vol_id},) try: self._access_api(path, data, 'PUT') except TatlinAPIException as exp: message = _('Create volume %s failed due to %s' % (id, exp.message)) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def delete_volume(self, vol_id): LOG.debug('Delete volume %s', vol_id) path = tatlin_api.RESOURCE % vol_id try: self._access_api(path, {}, 'DELETE', pass_codes=[requests.codes.not_found, requests.codes.bad_request]) except TatlinAPIException as exp: message = _('Delete volume %s failed due to %s' % (vol_id, exp.message)) LOG.error(message) raise def extend_volume(self, vol_id, new_size_in_byte): path = tatlin_api.RESOURCE % vol_id data = {"new_size": new_size_in_byte} LOG.debug('Extending volume to %s ', new_size_in_byte) try: self._access_api(path, data, 'POST') except TatlinAPIException as exp: message = _('Unable to extend volume %s due to %s' % (vol_id, exp.message)) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def get_resource_mapping(self): try: result, status = self._access_api(tatlin_api.RESOURCE_MAPPING) return result except TatlinAPIException as exp: message = _( 'TATLIN: Error getting resource mapping information %s' % exp.message) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def get_all_hosts(self): try: result, status = self._access_api(tatlin_api.HOSTS) return result except TatlinAPIException: message = _('Unable to get hosts configuration') raise exception.VolumeBackendAPIException(message=message) def get_host_info(self, host_id): try: result, stat = self._access_api(tatlin_api.HOSTS + '/' + host_id) LOG.debug('Host info for %s is %s', host_id, result) return result except TatlinAPIException as exp: message = _('Unable to get host info %s error %s' % (host_id, exp.message)) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def get_host_id(self, name): return self.get_host_id_by_name(name) def get_iscsi_cred(self): auth_path = tatlin_api.RESOURCE % 'auth' try: cred, status = self._access_api(auth_path) except TatlinAPIException as exp: message = _('Unable to get iscsi user cred due to %s' % exp.message) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) return cred def get_host_group_info(self, group_id): try: result, status = self._access_api(tatlin_api.HOST_GROUPS + '/' + group_id) return result except TatlinAPIException as exp: message = _('Unable to get host group info %s error %s' % (group_id, exp.message)) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def get_host_group_id(self, name): try: result, status = self._access_api(tatlin_api.HOST_GROUPS) for h in result: LOG.debug('Host name: %s Host ID %s', h['name'], h['id']) if h['name'] == name: return h['id'] except TatlinAPIException as exp: message = (_('Unable to get id for host group %s error %s') % (name, exp.message)) LOG.error(message) raise exception.VolumeBackendAPIException( message='Unable to find host group id for %s' % name) def get_volume_ports(self, vol_id): if not self.is_volume_exists(vol_id): message = _('Unable to get volume info %s' % vol_id) LOG.error(message) return {} path = tatlin_api.RESOURCE % vol_id + '/ports' try: response, stat = self._access_api(path) except TatlinAPIException as exp: message = _('Unable to get ports for target %s ' 'with %s error code: %s' % (vol_id, exp.message, exp.code)) LOG.error(message) return {} return response def get_resource_ports_array(self, volume_id): ports = self.get_volume_ports(volume_id) if ports == {}: return [] res = [] for p in ports: res.append(p['port']) LOG.debug('Volume %s port list %s', volume_id, res) return res def get_port_portal(self, portal_type): path = tatlin_api.IP_PORTS % portal_type try: result, stat = self._access_api(path) except TatlinAPIException as exp: message = _('Failed to get ports info due to %s' % exp.message) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) return result def is_volume_exists(self, vol_id): volume_path = tatlin_api.RESOURCE % vol_id LOG.debug('get personality statistic: volume_path=%(v1)s ', {'v1': volume_path}) try: volume_result, status = self._access_api( volume_path, {}, 'GET', pass_codes=[requests.codes.not_found]) if status == requests.codes.not_found: message = _('Volume %s does not exist' % vol_id) LOG.debug(message) return False except TatlinAPIException as exp: message = _('Exception Unable to get volume info %s ' 'due to %s stat: %s' % (vol_id, exp.message, exp.code)) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) LOG.debug('Volume %s exists', vol_id) return True def get_volume(self, vol_id): volume_path = tatlin_api.RESOURCE % vol_id LOG.debug('get personality statistic: volume_path=%(v1)s', {'v1': volume_path}) try: volume_result, stat = self._access_api( volume_path, {}, 'GET', pass_codes=[requests.codes.not_found]) if stat == requests.codes.not_found: message = _('Unable to get volume info %s due to %s stat: %s' % (vol_id, 'Volume not found', '404')) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) except TatlinAPIException as exp: message = _('Unable to get volume info %s due to %s stat: %s' % (vol_id, exp.message, exp.code)) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) return volume_result def get_pool_id_by_name(self, pool_name): try: result, status = self._access_api(tatlin_api.POOLS) except TatlinAPIException as exp: message = _('Unable to get pool id for %s due to %s' % (pool_name, exp.message)) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) for p in result: if p['name'] == pool_name: return p['id'] message = _('Pool "%s" not found' % pool_name) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def get_pool_detail(self, pool_id): if not pool_id: return {} path = tatlin_api.POOLS + "/" + pool_id try: result, status = self._access_api(path) except TatlinAPIException as exp: message = _('Unable to get pool information for %s due to %s' % (pool_id, exp.message)) LOG.error(message) return {} return result def get_sys_statistic(self): try: sys_stat, status = self._access_api(tatlin_api.STATISTICS) except TatlinAPIException as exp: message = _('Unable to get system statistic due to %s' % exp.message) LOG.error(message) raise return sys_stat def get_volume_info(self, vol_name): path = tatlin_api.RESOURCE_DETAIL % vol_name try: result, status = self._access_api(path) except TatlinAPIException as exp: message = _('Unable to get volume %s error %s' % (vol_name, exp.message)) LOG.error(message) raise exception.ManageExistingInvalidReference(message) return result def get_tatlin_version(self): return self._api.get_tatlin_version() def get_resource_count(self, p_id): raise NotImplementedError() def is_volume_ready(self, id): path = tatlin_api.RESOURCE_DETAIL % id try: result, status = self._access_api(path) except TatlinAPIException: return False for p in result: LOG.debug('Volume %s status: %s', id, p['status']) if p['status'] != 'ready': return False return True def get_volume_status(self, id): path = tatlin_api.RESOURCE_HEALTH % id try: result, status = self._access_api(path) except TatlinAPIException: return False for p in result: LOG.debug('Volume status: %s', p['status']) return p['status'] return '' def set_port(self, vol_id, port): path = tatlin_api.RESOURCE % vol_id + "/ports/" + port try: self._access_api(path, {}, 'PUT', pass_codes=[requests.codes.conflict]) except TatlinAPIException as e: message = _('Unable to link port %s for volume %s error %s' % (port, vol_id, e.message)) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def export_volume(self, vol_id, eth_ports): raise NotImplementedError() def export_vol_to_port_list(self, vol_id, port_list): path = tatlin_api.RESOURCE % vol_id + "/ports/list" try: self._access_api(path, port_list, 'PUT', pass_codes=[ requests.codes.conflict, requests.codes.bad_request]) except TatlinAPIException as e: message = _('Unable to link ports %s for volume %s error %s' % (port_list, vol_id, e.message)) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def _access_api(self, path, input_data = None, method = None, pass_codes=None): @retry(retry_exc, interval=1, retries=self.access_api_retry_count) def do_access_api(path, input_data, method, pass_codes): if input_data is None: input_data = {} if method is None: method = 'GET' if pass_codes is None: pass_codes = [] pass_codes = [requests.codes.ok] + pass_codes startTime = time.time() response = self._api.send_request(path, input_data, method) finishTime = time.time() duration = str((finishTime - startTime) * 1000) + ' ms' postfix = '[FAST]' if finishTime - startTime < 15 else '[SLOW]' try: result = response.json() except ValueError: result = {} if response.status_code not in pass_codes: message = _('Request: method: %s path: %s ' 'failed with status: %s message: %s in %s %s' % (method, path, str(response.status_code), result, duration, postfix)) LOG.debug(message) raise TatlinAPIException(response.status_code, message, path=path) LOG.debug( 'Request %s %s successfully finished with %s code in %s %s', method, path, str(response.status_code), duration, postfix) return result, response.status_code return do_access_api(path, input_data, method, pass_codes) def _is_vol_on_host(self, vol_id, host_id): LOG.debug('Check resource %s in host %s', vol_id, host_id) try: result, status = self._access_api(tatlin_api.RESOURCE_MAPPING) except TatlinAPIException as exp: raise exception.VolumeBackendAPIException( message=_('Tatlin API exception %s ' 'while getting resource mapping' % exp.message)) for entry in result: if 'host_id' in entry: if entry['resource_id'] == vol_id and \ entry['host_id'] == host_id: LOG.debug('Volume %s already on host %s', vol_id, host_id) return True LOG.debug('Volume %s not on host %s', vol_id, host_id) return False def get_unassigned_ports(self, volume_id, eth_ports): cur_ports = self.get_resource_ports_array(volume_id) LOG.debug('VOLUME %s: Port needed %s actual %s', volume_id, list(eth_ports.keys()), cur_ports) return list(set(eth_ports.keys()) - set(cur_ports)) def is_port_assigned(self, volume_id, port): LOG.debug('VOLUME %s: Checking port %s ', volume_id, port) cur_ports = self._get_ports(volume_id) res = port in cur_ports LOG.debug('VOLUME %s: port %s assigned %s', volume_id, port, str(res)) return res def _check_group_mapping(self, vol_id, group_id): LOG.debug('Check resource %s in group %s', vol_id, group_id) try: result, status = self._access_api(tatlin_api.RESOURCE_MAPPING) except TatlinAPIException as exp: raise exception.VolumeBackendAPIException( message=_('Tatlin API exception %s ' 'while getting resource mapping' % exp.message)) for entry in result: if entry['resource_id'] == vol_id and \ entry['host_group_id'] == group_id: return True return False def update_qos(self, vol_id, iops, bandwith): pass def get_host_id_by_name(self, host_name): try: result, status = self._access_api(tatlin_api.HOSTS) for h in result: LOG.debug('For host %s Host name: %s Host ID %s', host_name, h['name'], h['id']) if h['name'] == host_name: return h['id'] except TatlinAPIException as exp: message = _('Unable to get host information %s' % exp.message) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) raise exception.VolumeBackendAPIException( message='Unable to get host_id for host %s' % host_name) class TatlinClientV25 (TatlinClientCommon): def update_qos(self, vol_id, iops, bandwith): path = tatlin_api.RESOURCE % vol_id data = {"limit_iops": int(iops), "limit_bw": int(bandwith), "tags": []} try: result, status = self._access_api(path, data, 'POST') LOG.debug('Responce %s stat %s', result, status) except TatlinAPIException as exp: message = (_('Unable to update QoS for volume %s due to %s') % (vol_id, exp.message)) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def export_volume(self, vol_id, eth_ports): LOG.debug('VOLUME %s: Export to ports %s started', vol_id, eth_ports) to_export = self.get_unassigned_ports(vol_id, eth_ports) if not to_export: LOG.debug('VOLUME %s: all ports already assigned', vol_id) return self.export_vol_to_port_list(vol_id, to_export) for i in range(self.wait_retry_count): if not self.get_unassigned_ports(vol_id, eth_ports): LOG.debug('VOLUME %s: Export ports %s finished', vol_id, eth_ports) return time.sleep(self.wait_interval) message = (_('VOLUME %s: Unable to export volume to %s') % (vol_id, eth_ports)) raise exception.VolumeBackendAPIException(message=message) def get_resource_count(self, p_id): try: result, status = self._access_api(tatlin_api.RESOURCE_COUNT) except TatlinAPIException: message = _('Unable to get resource count') LOG.error(message) raise exception.ManageExistingInvalidReference(message) poll_resource = 0 cluster_resources = 0 for key in result: if key == p_id: poll_resource = result[key] cluster_resources = cluster_resources + result[key] return poll_resource, cluster_resources class TatlinClientV23 (TatlinClientCommon): def export_volume(self, vol_id, eth_ports): LOG.debug('Export ports %s for volume %s started', eth_ports, vol_id) for port in eth_ports: LOG.debug('Check port %s for volume %s', port, vol_id) if not self.is_port_assigned(vol_id, port): try: self.set_port(vol_id, port) except TatlinAPIException as e: raise exception.VolumeBackendAPIException( message=e.message) LOG.debug('Export ports %s for volume %s finished', eth_ports, vol_id) for i in range(self.wait_retry_count): if not self.get_unassigned_ports(vol_id, eth_ports): LOG.debug('VOLUME %s: Export ports %s finished', vol_id, eth_ports) return time.sleep(self.wait_interval) message = (_('VOLUME %s: Unable to export volume to %s') % (vol_id, eth_ports)) raise exception.VolumeBackendAPIException(message=message) def get_resource_count(self, p_id): try: response, status = self._access_api(tatlin_api.ALL_RESOURCES) if response is not None: return 0, len(response) except TatlinAPIException: message = (_('Unable to get resource list')) LOG.error(message) return 0, 0 return 0, 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/yadro/tatlin_common.py0000664000175000017500000010027000000000000023653 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import units from cinder import context as cinder_context from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.san import san from cinder.volume.drivers.yadro.tatlin_client import InitTatlinClient from cinder.volume.drivers.yadro.tatlin_exception import TatlinAPIException from cinder.volume.drivers.yadro.tatlin_utils import TatlinVolumeConnections from cinder.volume import qos_specs from cinder.volume import volume_types from cinder.volume import volume_utils from cinder.volume.volume_utils import brick_get_connector_properties from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) tatlin_opts = [ cfg.StrOpt('pool_name', default='', help='storage pool name'), cfg.PortOpt('api_port', default=443, help='Port to use to access the Tatlin API'), cfg.StrOpt('export_ports', default='', help='Ports to export Tatlin resource through'), cfg.StrOpt('host_group', default='', help='Tatlin host group name'), cfg.IntOpt('max_resource_count', default=500, help='Max resource count allowed for Tatlin'), cfg.IntOpt('pool_max_resource_count', default=250, help='Max resource count allowed for single pool'), cfg.IntOpt('tat_api_retry_count', default=10, help='Number of retry on Tatlin API'), cfg.StrOpt('auth_method', default='CHAP', help='Authentication method for iSCSI (CHAP)'), cfg.StrOpt('lba_format', default='512e', help='LBA Format for new volume'), cfg.IntOpt('wait_retry_count', default=15, help='Number of checks for a lengthy operation to finish'), cfg.IntOpt('wait_interval', default=30, help='Wait number of seconds before re-checking'), ] CONF = cfg.CONF CONF.register_opts(tatlin_opts, group=configuration.SHARED_CONF_GROUP) class TatlinCommonVolumeDriver(driver.VolumeDriver, object): def __init__(self, *args, **kwargs): super(TatlinCommonVolumeDriver, self).__init__(*args, **kwargs) self._ip = None self._port = 443 self._user = None self._password = None self._pool_name = None self._pool_id = None self.configuration.append_config_values(san.san_opts) self.configuration.append_config_values(tatlin_opts) self._auth_method = 'CHAP' self._chap_username = '' self._chap_password = '' self.backend_name = None self.DRIVER_VOLUME_TYPE = None self._export_ports = None self._host_group = None self.verify = None self.DEFAULT_FILTER_FUNCTION = None self.DEFAULT_GOODNESS_FUNCTION = None self._use_multipath = True self._enforce_multipath = False self._lba_format = '512e' self._ssl_cert_path = None self._max_pool_resource_count = 250 def do_setup(self, context): """Initial driver setup""" required_config = ['san_ip', 'san_login', 'san_password', 'pool_name', 'host_group'] for attr in required_config: if not getattr(self.configuration, attr, None): message = (_('config option %s is not set.') % attr) raise exception.InvalidInput(message=message) self._ip = self.configuration.san_ip self._user = self.configuration.san_login self._password = self.configuration.san_password self._port = self.configuration.api_port self._pool_name = self.configuration.pool_name self._export_ports = self.configuration.export_ports self._host_group = self.configuration.host_group self._auth_method = self.configuration.auth_method self._chap_username = self.configuration.chap_username self._chap_password = self.configuration.chap_password self._wait_interval = self.configuration.wait_interval self._wait_retry_count = self.configuration.wait_retry_count self._ssl_cert_path = (self.configuration. safe_get('driver_ssl_cert_path') or None) self.verify = (self.configuration. safe_get('driver_ssl_cert_verify') or False) if self.verify and self._ssl_cert_path: self.verify = self._ssl_cert_path LOG.info('Tatlin driver version: %s', self.VERSION) self.tatlin_api = self._get_tatlin_client() self.ctx = context self.MAX_ALLOWED_RESOURCES = self.configuration.max_resource_count self._max_pool_resource_count = \ self.configuration.pool_max_resource_count self.DEFAULT_FILTER_FUNCTION = \ 'capabilities.pool_resource_count < ' +\ str(self._max_pool_resource_count) +\ ' and capabilities.overall_resource_count < ' +\ str(self.MAX_ALLOWED_RESOURCES) self.DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization' self._use_multipath = \ (self.configuration.safe_get( 'use_multipath_for_image_xfer') or False) self._enforce_multipath = \ (self.configuration.safe_get( 'enforce_multipath_for_image_xfer') or False) self._lba_format = self.configuration.lba_format self._wait_interval = self.configuration.wait_interval self._wait_retry_count = self.configuration.wait_retry_count self._connections = TatlinVolumeConnections( os.path.join(CONF.state_path, 'tatlin-volume-connections')) def check_for_setup_error(self): pass @volume_utils.trace def create_volume(self, volume): """Entry point for create new volume""" if not self.pool_id: raise exception.VolumeBackendAPIException( message='Wrong Tatlin pool configuration') pool_res_count, cluster_res_count = \ self.tatlin_api.get_resource_count(self.pool_id) LOG.debug('Current pool %(pool)s has %(pool_res)s res.' 'Whole cluster has %(cluster_res)s', {'pool': self.pool_id, 'pool_res': pool_res_count, 'cluster_res': cluster_res_count}) self._stats['pool_resource_count'] = pool_res_count self._stats['overall_resource_count'] = cluster_res_count if pool_res_count > 255: message = _('TatlinVolumeDriver create volume failed. ' 'Too many resources per pool created') LOG.error(message) raise exception.VolumeBackendAPIException(message=message) if cluster_res_count + 1 > self.MAX_ALLOWED_RESOURCES: message = _('TatlinVolumeDriver create volume failed. ' 'Too many resources per cluster created') LOG.error(message) raise exception.VolumeBackendAPIException(message=message) LOG.debug('Create volume %(vol_id)s started', {'vol_id': volume.name_id}) self._create_volume_storage(volume) LOG.debug('Create volume %s finished', volume.name_id) def _create_volume_storage(self, volume): """Create a volume with a specific name in Tatlin""" size = volume.size * units.Gi vol_type = 'snapshot' if 'snapshot_volume' in volume.metadata \ else 'volume' name = 'cinder-%s-%s' % (vol_type, volume.name_id) LOG.debug('Creating Tatlin resource %(name)s ' 'with %(size)s size in pool %(pool)s', {'name': name, 'size': size, 'pool': self.pool_id}) self.tatlin_api.create_volume(volume.name_id, name, size, self.pool_id, lbaFormat=self._lba_format) self.wait_volume_ready(volume) self._update_qos(volume) def wait_volume_ready(self, volume): for counter in range(self._wait_retry_count): if self.tatlin_api.is_volume_ready(volume.name_id): return LOG.warning('Volume %s is not ready', volume.name_id) time.sleep(self._wait_interval) message = _('Volume %s still not ready') % volume.name_id LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def wait_volume_online(self, volume): for counter in range(self._wait_retry_count): if self.tatlin_api.get_volume_status(volume.name_id) == 'online': return LOG.warning('Volume %s still not online', volume.name_id) time.sleep(self._wait_interval) message = _('Volume %s unable to become online' % volume.name_id) raise exception.VolumeBackendAPIException(message=message) @volume_utils.trace def delete_volume(self, volume): """Entry point for delete volume""" LOG.debug('Delete volume started for %s', volume.name_id) if not self.tatlin_api.is_volume_exists(volume.name_id): LOG.debug('Volume %s does not exist', volume.name_id) return try: self.tatlin_api.delete_volume(volume.name_id) except TatlinAPIException as e: message = _('Unable to delete volume %s due to %s' % (volume.name_id, e)) raise exception.VolumeBackendAPIException(message=message) for counter in range(self._wait_retry_count): if not self.tatlin_api.is_volume_exists(volume.name_id): LOG.debug('Delete volume finished for %s', volume.name_id) return LOG.debug('Volume %s still exists, waiting for delete...', volume.name_id) time.sleep(self._wait_interval) if self.tatlin_api.is_volume_exists(volume.name_id): message = _('Unable to delete volume %s' % volume.name_id) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) @volume_utils.trace def extend_volume(self, volume, new_size): size = new_size * units.Gi LOG.debug('Extending volume %s to %s', volume.name_id, size) self.tatlin_api.extend_volume(volume.name_id, size) self.wait_volume_ready(volume) self._update_qos(volume) @volume_utils.trace def create_cloned_volume(self, volume, src_vol): """Entry point for clone existing volume""" LOG.debug('Create cloned volume %(target)s from %(source)s started', {'target': volume.name_id, 'source': src_vol.name_id}) self.create_volume(volume) self._clone_volume_data(volume, src_vol) LOG.debug('Create cloned volume %(target)s from %(source)s finished', {'target': volume.name_id, 'source': src_vol.name_id}) def _clone_volume_data(self, volume, src_vol): props = brick_get_connector_properties( self._use_multipath, self._enforce_multipath) LOG.debug('Volume %s Connection properties %s', volume.name_id, props) dest_attach_info = None src_attach_info = None size_in_mb = int(src_vol['size']) * units.Ki try: src_attach_info, volume_src = self._attach_volume( self.ctx, src_vol, props) LOG.debug('Source attach info: %s volume: %s', src_attach_info, volume_src) except Exception as e: LOG.error('Unable to attach src volume due to %s', e) raise try: dest_attach_info, volume_dest = self._attach_volume( self.ctx, volume, props) LOG.debug('Dst attach info: %s volume: %s', dest_attach_info, volume_dest) except Exception as e: LOG.error('Unable to attach dst volume due to %s', e) self._detach_volume(self.ctx, src_attach_info, src_vol, props) raise try: LOG.debug('Begin copy to %s from %s', volume.name_id, src_vol.name_id) volume_utils.copy_volume(src_attach_info['device']['path'], dest_attach_info['device']['path'], size_in_mb, self.configuration.volume_dd_blocksize, sparse=False) LOG.debug('End copy to %s from %s', volume.name_id, src_vol.name_id) except Exception as e: LOG.error('Unable to clone volume source: %s dst: %s due to %s', src_vol.name_id, volume.name_id, e) raise finally: try: self._detach_volume(self.ctx, src_attach_info, src_vol, props) finally: self._detach_volume(self.ctx, dest_attach_info, volume, props) @volume_utils.trace def _attach_volume(self, context, volume, properties, remote=False): @utils.synchronized('tatlin-volume-attachments-%s' % volume.name_id) def _do_attach_volume(): LOG.debug('Start Tatlin attach volume %s properties %s', volume.name_id, properties) return super(driver.VolumeDriver, self)._attach_volume( context, volume, properties, remote=remote) return _do_attach_volume() @volume_utils.trace def _detach_volume(self, context, attach_info, volume, properties, force=False, remote=False, ignore_errors=False): @utils.synchronized('tatlin-volume-attachments-%s' % volume.name_id) def _do_detach_volume(): LOG.debug('Start Tatlin detach for %s', volume.name_id) connection_count = self._connections.get(volume.name_id) if connection_count > 1: LOG.debug('There are still other connections to volume %s,' ' not detaching', volume.name_id) self._connections.decrement(volume.name_id) return # decrement of connections will happen in terminate_connection() super(driver.VolumeDriver, self).\ _detach_volume(context, attach_info, volume, properties, force=force, remote=remote, ignore_errors=ignore_errors) _do_detach_volume() @volume_utils.trace def initialize_connection(self, volume, connector): @utils.synchronized("tatlin-volume-connections-%s" % volume.name_id) def _initialize_connection(): LOG.debug('Init %s with connector %s', volume.name_id, connector) current_host = self.find_current_host(connector) self.add_volume_to_host(volume, current_host) if self._is_cinder_host_connection(connector): self._connections.increment(volume.name_id) connection_info = self._create_connection_info(volume, connector) fczm_utils.add_fc_zone(connection_info) return connection_info return _initialize_connection() @volume_utils.trace def terminate_connection(self, volume, connector, **kwargs): @utils.synchronized("tatlin-volume-connections-%s" % volume.name_id) def _terminate_connection(): LOG.debug('Terminate connection for %s with connector %s', volume.name_id, connector) connection_info = self._create_connection_info(volume, connector) if not connector: self.remove_volume_from_all_hosts(volume) return connection_info if self._is_cinder_host_connection(connector): connections = self._connections.decrement(volume.name_id) if connections > 0: LOG.debug('Not terminating connection: ' 'volume %s, existing connections: %s', volume.name_id, connections) return connection_info hostname = connector['host'] if self._is_nova_multiattached(volume, hostname): LOG.debug('Volume %s is attached on host %s to multiple VMs.' ' Not terminating connection', volume.name_id, hostname) return connection_info host_id = self.find_current_host(connector) self.remove_volume_from_host(volume, host_id) resources = [r for r in self.tatlin_api.get_resource_mapping() if r.get('host_id', '') == host_id] if not resources: fczm_utils.remove_fc_zone(connection_info) return connection_info _terminate_connection() def _is_cinder_host_connection(self, connector): # Check if attachment happens on this Cinder host properties = brick_get_connector_properties() return properties['initiator'] == connector['initiator'] def _is_nova_multiattached(self, volume, hostname): # Check if connection to the volume happens to multiple VMs # on the same Nova Compute host if not volume.volume_attachment: return False attachments = [a for a in volume.volume_attachment if a.attach_status == objects.fields.VolumeAttachStatus.ATTACHED and a.attached_host == hostname] return len(attachments) > 1 def _create_temp_volume_for_snapshot(self, snapshot): return self._create_temp_volume( self.ctx, snapshot.volume, { 'name_id': snapshot.id, 'display_name': 'snap-vol-%s' % snapshot.id, 'metadata': {'snapshot_volume': 'yes'}, }) @volume_utils.trace def create_snapshot(self, snapshot): LOG.debug('Create snapshot for volume %s, snap id %s', snapshot.volume.name_id, snapshot.id) temp_volume = self._create_temp_volume_for_snapshot(snapshot) try: self.create_cloned_volume(temp_volume, snapshot.volume) finally: temp_volume.destroy() @volume_utils.trace def create_volume_from_snapshot(self, volume, snapshot): LOG.debug('Create volume from snapshot %s', snapshot.id) temp_volume = self._create_temp_volume_for_snapshot(snapshot) try: self.create_volume(volume) self._clone_volume_data(volume, temp_volume) finally: temp_volume.destroy() @volume_utils.trace def delete_snapshot(self, snapshot): LOG.debug('Delete snapshot %s', snapshot.id) temp_volume = self._create_temp_volume_for_snapshot(snapshot) try: self.delete_volume(temp_volume) finally: temp_volume.destroy() @volume_utils.trace def get_volume_stats(self, refresh=False): if not self._stats or refresh: self._update_volume_stats() return self._stats def _update_qos(self, volume): type_id = volume.volume_type_id LOG.debug('VOL_TYPE %s', type_id) if type_id: ctx = cinder_context.get_admin_context() volume_type = volume_types.get_volume_type(ctx, type_id) qos_specs_id = volume_type.get('qos_specs_id') LOG.debug('VOL_TYPE %s QOS_SPEC %s', volume_type, qos_specs_id) specs = {} if qos_specs_id is not None: sp = qos_specs.get_qos_specs(ctx, qos_specs_id) if sp.get('consumer') != 'front-end': specs = qos_specs.get_qos_specs(ctx, qos_specs_id)['specs'] LOG.debug('QoS spec: %s', specs) param_specs = volume_type.get('extra_specs') LOG.debug('Param spec is: %s', param_specs) iops = specs["total_iops_sec_max"] \ if 'total_iops_sec_max' in specs \ else param_specs["YADRO:total_iops_sec_max"] \ if 'YADRO:total_iops_sec_max' in param_specs else '0' bandwidth = specs["total_bytes_sec_max"] \ if 'total_bytes_sec_max' in specs \ else param_specs["YADRO:total_bytes_sec_max"] \ if 'YADRO:total_bytes_sec_max' in param_specs else '0' LOG.debug('QOS spec IOPS: %s BANDWIDTH %s', iops, bandwidth) self.tatlin_api.update_qos( volume.name_id, int(iops), int(bandwidth)) @volume_utils.trace def _update_volume_stats(self): """Retrieve pool info""" LOG.debug('Update volume stats for pool: %s', self.pool_name) if not self.pool_id: LOG.error('Could not retrieve pool id for %s', self.pool_name) return try: pool_stat = self.tatlin_api.get_pool_detail(self.pool_id) except TatlinAPIException as exp: message = (_('TatlinVolumeDriver get volume stats ' 'failed %s due to %s') % (self.pool_name, exp.message)) LOG.error(message) return try: sys_stat = self.tatlin_api.get_sys_statistic() except TatlinAPIException as exp: message = (_('TatlinVolumeDriver get system stats detail ' 'failed %s due to %s') % (self.pool_name, exp.message)) LOG.error(message) return if sys_stat['iops_bandwidth'] is not None and \ len(sys_stat['iops_bandwidth']) > 0: self._stats['read_iops'] = \ sys_stat['iops_bandwidth'][0]['value']['read_iops'] self._stats['write_iops'] = \ sys_stat['iops_bandwidth'][0]['value']['write_iops'] self._stats['total_iops'] = \ sys_stat['iops_bandwidth'][0]['value']['total_iops'] self._stats['read_bytes_ps'] = \ sys_stat['iops_bandwidth'][0]['value']['read_bytes_ps'] self._stats['write_bytes_ps'] = \ sys_stat['iops_bandwidth'][0]['value']['write_bytes_ps'] self._stats['total_bytes_ps'] = \ sys_stat['iops_bandwidth'][0]['value']['total_bytes_ps'] self._stats["volume_backend_name"] = self.backend_name self._stats["vendor_name"] = 'YADRO' self._stats["driver_version"] = self.VERSION self._stats["storage_protocol"] = self.DRIVER_VOLUME_TYPE self._stats["thin_provisioning_support"] = pool_stat['thinProvision'] self._stats["consistencygroup_support"] = False self._stats["consistent_group_snapshot_enabled"] = False self._stats["QoS_support"] = True self._stats["multiattach"] = True self._stats['total_capacity_gb'] = \ (int(pool_stat['capacity']) - int(pool_stat['failed'])) / units.Gi self._stats['tatlin_pool'] = self.pool_name self._stats['tatlin_ip'] = self._ip pool_res_count, cluster_res_count = \ self.tatlin_api.get_resource_count(self.pool_id) self._stats['overall_resource_count'] = cluster_res_count self._stats['pool_resource_count'] = pool_res_count if pool_stat['thinProvision']: self._stats['provisioned_capacity_gb'] = \ (int(pool_stat['capacity']) - int(pool_stat['failed'])) / units.Gi self._stats['free_capacity_gb'] = \ self._stats['provisioned_capacity_gb'] else: self._stats['provisioned_capacity_gb'] = \ (int(pool_stat['available']) - int(pool_stat['failed'])) / units.Gi self._stats['free_capacity_gb'] = \ self._stats['provisioned_capacity_gb'] self._stats['utilization'] = \ (float(self._stats['total_capacity_gb']) - float(self._stats['free_capacity_gb'])) / \ float(self._stats['total_capacity_gb']) * 100 LOG.debug( 'Total capacity: %s Free capacity: %s ' 'Provisioned capacity: %s ' 'Thin provisioning: %s ' 'Resource count: %s ' 'Pool resource count %s ' 'Utilization %s', self._stats['total_capacity_gb'], self._stats['free_capacity_gb'], self._stats['provisioned_capacity_gb'], pool_stat['thinProvision'], self._stats['overall_resource_count'], self._stats['pool_resource_count'], self._stats['utilization']) def _init_vendor_properties(self): LOG.debug('Initializing YADRO vendor properties') properties = {} self._set_property( properties, "YADRO:total_bytes_sec_max", "YADRO QoS Max bytes Write", _("Max write iops setting for volume qos, " "use 0 for unlimited"), "integer", minimum=0, default=0) self._set_property( properties, "YADRO:total_iops_sec_max", "YADRO QoS Max IOPS Write", _("Max write iops setting for volume qos, " "use 0 for unlimited"), "integer", minimum=0, default=0) LOG.debug('YADRO vendor properties: %s', properties) return properties, 'YADRO' def migrate_volume(self, context, volume, host): """Migrate volume Method checks if target volume will be on the same Tatlin/Pool If not, re-type should be executed. """ if 'tatlin_pool' not in host['capabilities']: return False, None self._update_qos(volume) LOG.debug('Migrating volume from pool %s ip %s to pool %s ip %s', self.pool_name, self._ip, host['capabilities']['tatlin_pool'], host['capabilities']['tatlin_ip']) if host['capabilities']['tatlin_ip'] == self._ip and \ host['capabilities']['tatlin_pool'] == self.pool_name: return True, None return False, None def manage_existing(self, volume, external_ref): """Entry point to manage existing resource""" source_name = external_ref.get('source-name', None) if source_name is None: raise exception.ManageExistingInvalidReference( _('source_name should be provided')) try: result = self.tatlin_api.get_volume_info(source_name) except Exception: raise exception.ManageExistingInvalidReference( _('Unable to get resource with %s name' % source_name)) existing_vol = result[0] existing_vol['name'] = volume.name_id volume.name_id = existing_vol['id'] pool_id = existing_vol['poolId'] if pool_id != self.pool_id: raise exception.ManageExistingInvalidReference( _('Existing volume should be in %s pool' % self.pool_name)) self._update_qos(volume) def manage_existing_get_size(self, volume, external_ref): source_name = external_ref.get('source-name', None) if source_name is None: raise exception.ManageExistingInvalidReference( _('source_name should be provided')) try: result = self.tatlin_api.get_volume_info(source_name) except TatlinAPIException: raise exception.ManageExistingInvalidReference( _('Unable to get resource with %s name' % source_name)) size = int(result[0]['size']) / units.G return size def add_volume_to_host(self, volume, host_id): self.tatlin_api.add_vol_to_host(volume.name_id, host_id) self._update_qos(volume) def remove_volume_from_host(self, volume, host_id): self.tatlin_api.remove_vol_from_host(volume.name_id, host_id) def remove_volume_from_all_hosts(self, volume): mappings = self.tatlin_api.get_resource_mapping() hosts = [m['host_id'] for m in mappings if 'resource_id' in m and m['resource_id'] == volume.name_id] for host_id in hosts: self.tatlin_api.remove_vol_from_host(volume.name_id, host_id) def _is_port_assigned(self, volume_id, port): LOG.debug('VOLUME %s: Checking port %s ', volume_id, port) cur_ports = self.tatlin_api.get_resource_ports_array(volume_id) res = port in cur_ports LOG.debug('VOLUME %s: port %s assigned %s', volume_id, port, str(res)) return res def _get_ports_portals(self): return {} def _create_connection_info(self, volume, connector): return {} def _find_mapped_lun(self, volume_id, connector): host_id = self.find_current_host(connector) result = self.tatlin_api.get_resource_mapping() for r in result: if 'host_id' in r: if r['resource_id'] == volume_id and r['host_id'] == host_id: return r['mapped_lun_id'] mess = (_('Unable to get mapped lun for volume %s on host %s') % (volume_id, host_id)) LOG.error(mess) raise exception.VolumeBackendAPIException(message=mess) @staticmethod def get_driver_options(): return tatlin_opts @volume_utils.trace def ensure_export(self, context, volume): LOG.debug('Tatlin ensure export') ports = self._get_ports_portals() self.tatlin_api.export_volume(volume.name_id, ports) @volume_utils.trace def create_export(self, context, volume, connector): LOG.debug('Create export for %s started', volume.name_id) self.ensure_export(context, volume) LOG.debug('Create export for %s finished', volume.name_id) def remove_export(self, context, volume): return def _get_tatlin_client(self): return InitTatlinClient( self._ip, self._port, self._user, self._password, verify=self.verify, api_retry_count=self.configuration.tat_api_retry_count, wait_interval=self._wait_interval, wait_retry_count=self._wait_retry_count) def find_current_host(self, connector): return '' @property def pool_id(self): if not self._pool_id: try: self._pool_id = self.tatlin_api.get_pool_id_by_name( self.pool_name) except exception.VolumeBackendAPIException: LOG.error('Unable to get current Tatlin pool') return self._pool_id @pool_id.setter def pool_id(self, value): self._pool_id = value @property def pool_name(self): return self._pool_name @pool_name.setter def pool_name(self, value): self._pool_name = value def get_default_filter_function(self): return self.DEFAULT_FILTER_FUNCTION def get_default_goodness_function(self): return self.DEFAULT_GOODNESS_FUNCTION @volume_utils.trace def get_backup_device(self, context, backup): """Get a backup device from an existing volume. We currently return original device where possible due to absence of instant clones and snapshots """ if backup.snapshot_id: return super().get_backup_device(context, backup) volume = objects.Volume.get_by_id(context, backup.volume_id) return (volume, False) def backup_use_temp_snapshot(self): return False def snapshot_revert_use_temp_snapshot(self): return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/yadro/tatlin_exception.py0000664000175000017500000000160500000000000024363 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.exception import VolumeBackendAPIException class TatlinAPIException(VolumeBackendAPIException): path = '' code = 500 message = '' def __init__(self, code, message, path=''): self.code = code self.message = message self.path = path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/yadro/tatlin_fc.py0000664000175000017500000001040500000000000022753 0ustar00zuulzuul00000000000000# Copyright, 2023, KNS Group LLC (YADRO) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.yadro import tatlin_common from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) @interface.volumedriver class TatlinFCVolumeDriver(tatlin_common.TatlinCommonVolumeDriver, driver.FibreChannelDriver): """ACCESS Tatlin FC Driver. Executes commands relating to FC. Supports creation of volumes. .. code-block:: none API version history: 1.0 - Initial version. """ VERSION = '1.0' SUPPORTS_ACTIVE_ACTIVE = True # ThirdPartySystems wiki CI_WIKI_NAME = "Yadro_Tatlin_Unified_CI" def __init__(self, *args, **kwargs): super(TatlinFCVolumeDriver, self).__init__(*args, **kwargs) self.backend_name = self.configuration.safe_get( 'volume_backend_name') or 'TatlinFC' self.DRIVER_VOLUME_TYPE = constants.FC self._lookup_service = fczm_utils.create_lookup_service() def _create_connection_info(self, volume, connector): info = { 'driver_volume_type': constants.FC_VARIANT_1, 'data': self._create_volume_data(volume, connector) } return info def _get_ports_portals(self): result = self.tatlin_api.get_port_portal("fc") ports = {} for p in result: iface = p['params']['ifname'] if self._export_ports and iface not in self._export_ports: continue ports.setdefault(iface, []).append(p['params']['wwpn']) return ports def _create_volume_data(self, volume, connector): if connector is None: return {} lun_id = self._find_mapped_lun(volume.name_id, connector) volume_ports = self.tatlin_api.get_volume_ports(volume.name_id) ports_portals = self._get_ports_portals() data = { 'target_discovered': True, 'target_lun': lun_id, 'discard': False, } target_wwns = [] for port in volume_ports: wwpns = ports_portals.get(port['port']) if not wwpns: continue target_wwns += [w.replace(':', '') for w in wwpns] data['target_wwn'] = target_wwns data['initiator_target_map'] = self._build_initiator_target_map( target_wwns, connector) return data def find_current_host(self, connector): wwns = connector['wwpns'] LOG.debug('Try to find host id for %s', wwns) result = self.tatlin_api.get_all_hosts() for h in result: for wwn in h['initiators']: if wwn.replace(':', '') in wwns: LOG.debug('Current host is %s', h['id']) return h['id'] message = _('Unable to get host information for wwns: %s') % str(wwns) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) def _build_initiator_target_map(self, target_wwns, connector): result = {} if self._lookup_service: mapping = self._lookup_service.get_device_mapping_from_network( connector['wwpns'], target_wwns) for fabric in mapping.values(): for initiator in fabric['initiator_port_wwn_list']: result.setdefault(initiator, []).extend( fabric['target_port_wwn_list']) result = {i: list(set(t)) for i, t in result.items()} else: result = dict.fromkeys(connector['wwpns'], target_wwns) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/yadro/tatlin_iscsi.py0000664000175000017500000001225400000000000023501 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.volume import driver from cinder.volume.drivers.yadro.tatlin_common import TatlinCommonVolumeDriver from cinder.volume.drivers.yadro.tatlin_exception import TatlinAPIException LOG = logging.getLogger(__name__) @interface.volumedriver class TatlinISCSIVolumeDriver(TatlinCommonVolumeDriver, driver.ISCSIDriver): """ACCESS Tatlin ISCSI Driver. Executes commands relating to ISCSI. Supports creation of volumes. .. code-block:: none API version history: 1.0 - Initial version. 1.1 - Common code sharing with FC driver """ VERSION = "1.1" SUPPORTS_ACTIVE_ACTIVE = True # ThirdPartySystems wiki CI_WIKI_NAME = "Yadro_Tatlin_Unified_CI" def __init__(self, vg_obj=None, *args, **kwargs): # Parent sets db, host, _execute and base config super(TatlinISCSIVolumeDriver, self).__init__(*args, **kwargs) if self.configuration: self.backend_name = (self.configuration.safe_get( 'volume_backend_name') or 'TatlinISCSI') self.DRIVER_VOLUME_TYPE = 'iSCSI' def _create_connection_info(self, volume, connector): info = { 'driver_volume_type': 'iscsi', 'data': self._create_volume_data(volume, connector) } return info def _get_ports_portals(self): try: result = self.tatlin_api.get_port_portal("ip") except TatlinAPIException as exp: message = (_('Failed to get ports info due to %s') % exp.message) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) ports = {} for p in result: ipaddr = p['params']['ipaddress'] if not ipaddr: continue iface = p['params']['ifname'] if iface.startswith('p'): if self._export_ports and iface not in self._export_ports: continue if iface not in ports: ports[iface] = [] ports[iface].append(ipaddr + ':3260') return ports def _create_volume_data(self, volume, connector): if connector is None: return {} eth_ports = self._get_ports_portals() lun_id = self._find_mapped_lun(volume.name_id, connector) vol_ports = self.tatlin_api.get_volume_ports(volume.name_id) res = {'target_discovered': True, 'target_lun': lun_id} if self._auth_method == 'CHAP': res['auth_method'] = 'CHAP' res['auth_username'] = self._chap_username res['auth_password'] = self._chap_password target_luns = [] target_iqns = [] target_portals = [] for port in vol_ports: if port['port'] not in eth_ports.keys(): continue ips = eth_ports[port['port']] target_portals += ips luns = [lun_id for _ in ips] target_luns += luns if 'running' in port: target_iqns += port['wwn'] * len(port['running']) else: target_iqns += port['wwn'] if not target_portals or not target_iqns or not target_luns: message = (_('Not enough connection data, ' 'luns: %s, portals: %s, iqns: %s') % target_luns, target_portals, target_iqns) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) res['target_lun'] = target_luns[0] res['target_luns'] = target_luns res['target_iqn'] = target_iqns[0] res['target_iqns'] = target_iqns res['target_portal'] = target_portals[0] res['target_portals'] = target_portals LOG.debug("Volume data = %s", res) return res def find_current_host(self, connector): iqn = connector['initiator'] LOG.debug('Try to find host id for %s', iqn) gr_id = self.tatlin_api.get_host_group_id(self._host_group) group_info = self.tatlin_api.get_host_group_info(gr_id) LOG.debug('Group info for %s is %s', self._host_group, group_info) for host_id in group_info['host_ids']: if iqn in self.tatlin_api.get_host_info(host_id)['initiators']: LOG.debug('Found host %s for initiator %s', host_id, iqn) return host_id message = _('Unable to find host for initiator %s' % iqn) LOG.error(message) raise exception.VolumeBackendAPIException(message=message) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/yadro/tatlin_utils.py0000664000175000017500000000574700000000000023540 0ustar00zuulzuul00000000000000# Copyright (C) 2021-2022 YADRO. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from cinder.utils import synchronized from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class TatlinVolumeConnections: """Auxiliary class to keep current host volume connections counted This class keeps connections of volumes to local host where this Cinder instance runs. It prevents disconnection of devices and termination of storage links in cases where two Cinder greenthreads use the same volume (e.g. creation of new volumes from image cache) or connection termination of Nova volume if Nova is collocated on the same host (e.g. with snapshots while volumes are attached). Once Tatlin implements clones and snaps this class should disappear. """ def __init__(self, path): LOG.debug('Initialize counters for volume connections') self.counters = path self.create_store() @synchronized('tatlin-connections-store', external=True) def create_store(self): if not os.path.isdir(self.counters): os.mkdir(self.counters) # We won't intersect with other backend processes # because a volume belongs to one backend. Hence # no external flag need. @synchronized('tatlin-connections-store') def increment(self, id): counter = os.path.join(self.counters, id) connections = 0 if os.path.exists(counter): with open(counter, 'r') as c: connections = int(c.read()) connections += 1 with open(counter, 'w') as c: c.write(str(connections)) return connections @volume_utils.trace @synchronized('tatlin-connections-store') def decrement(self, id): counter = os.path.join(self.counters, id) if not os.path.exists(counter): return 0 with open(counter, 'r') as c: connections = int(c.read()) if connections == 1: os.remove(counter) return 0 connections -= 1 with open(counter, 'w') as c: c.write(str(connections)) return connections @volume_utils.trace @synchronized('tatlin-connections-store') def get(self, id): counter = os.path.join(self.counters, id) if not os.path.exists(counter): return 0 with open(counter, 'r') as c: connections = int(c.read()) return connections ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4071214 cinder-27.0.0/cinder/volume/drivers/zadara/0000775000175000017500000000000000000000000020562 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/zadara/__init__.py0000664000175000017500000000000000000000000022661 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/zadara/common.py0000664000175000017500000004603500000000000022434 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Zadara Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import re from oslo_config import cfg from oslo_log import log as logging import requests LOG = logging.getLogger(__name__) # Number of seconds the repsonse for the request sent to # vpsa is expected. Else the request will be timed out. # Setting it to 300 seconds initially. vpsa_timeout = 300 # Common exception class for all the exceptions that # are used to redirect to the driver specific exceptions. class CommonException(Exception): def __init__(self): pass class UnknownCmd(Exception): def __init__(self, cmd): self.cmd = cmd class BadHTTPResponseStatus(Exception): def __init__(self, status): self.status = status class FailedCmdWithDump(Exception): def __init__(self, status, data): self.status = status self.data = data class SessionRequestException(Exception): def __init__(self, msg): self.msg = msg class ZadaraInvalidAccessKey(Exception): pass exception = CommonException() zadara_opts = [ cfg.HostAddressOpt('zadara_vpsa_host', default=None, help='VPSA - Management Host name or IP address'), cfg.PortOpt('zadara_vpsa_port', default=None, help='VPSA - Port number'), cfg.BoolOpt('zadara_vpsa_use_ssl', default=False, help='VPSA - Use SSL connection'), cfg.BoolOpt('zadara_ssl_cert_verify', default=True, help='If set to True the http client will validate the SSL ' 'certificate of the VPSA endpoint.'), cfg.StrOpt('zadara_access_key', default=None, help='VPSA access key', secret=True), cfg.StrOpt('zadara_vpsa_poolname', default=None, help='VPSA - Storage Pool assigned for volumes'), cfg.BoolOpt('zadara_vol_encrypt', default=False, help='VPSA - Default encryption policy for volumes. ' 'If the option is neither configured nor provided ' 'as metadata, the VPSA will inherit the default value.'), cfg.BoolOpt('zadara_gen3_vol_dedupe', default=False, help='VPSA - Enable deduplication for volumes. ' 'If the option is neither configured nor provided ' 'as metadata, the VPSA will inherit the default value.'), cfg.BoolOpt('zadara_gen3_vol_compress', default=False, help='VPSA - Enable compression for volumes. ' 'If the option is neither configured nor provided ' 'as metadata, the VPSA will inherit the default value.'), cfg.BoolOpt('zadara_default_snap_policy', default=False, help="VPSA - Attach snapshot policy for volumes. " "If the option is neither configured nor provided " "as metadata, the VPSA will inherit the default value.")] # Class used to connect and execute the commands on # Zadara Virtual Private Storage Array (VPSA). class ZadaraVPSAConnection(object): """Executes driver commands on VPSA.""" def __init__(self, conf, driver_ssl_cert_path, block): self.conf = conf self.access_key = conf.zadara_access_key if not self.access_key: raise exception.ZadaraInvalidAccessKey() self.driver_ssl_cert_path = driver_ssl_cert_path # Choose the volume type of either block or file-type # that will help to filter volumes. self.vol_type_str = 'showonlyblock' if block else 'showonlyfile' # Dictionary of applicable VPSA commands in the following format: # 'command': (method, API_URL, {optional parameters}) self.vpsa_commands = { # Volume operations 'create_volume': lambda kwargs: ( 'POST', '/api/volumes.json', {'name': kwargs.get('name'), 'capacity': kwargs.get('size'), 'pool': self.conf.zadara_vpsa_poolname, 'block': 'YES' if self.vol_type_str == 'showonlyblock' else 'NO', 'thin': 'YES', 'crypt': 'YES' if self.conf.zadara_vol_encrypt else 'NO', 'compress': 'YES' if self.conf.zadara_gen3_vol_compress else 'NO', 'dedupe': 'YES' if self.conf.zadara_gen3_vol_dedupe else 'NO', 'attachpolicies': 'NO' if not self.conf.zadara_default_snap_policy else 'YES'}), 'delete_volume': lambda kwargs: ( 'DELETE', '/api/volumes/%s.json' % kwargs.get('vpsa_vol'), {'force': 'YES'}), 'expand_volume': lambda kwargs: ( 'POST', '/api/volumes/%s/expand.json' % kwargs.get('vpsa_vol'), {'capacity': kwargs.get('size')}), 'rename_volume': lambda kwargs: ( 'POST', '/api/volumes/%s/rename.json' % kwargs.get('vpsa_vol'), {'new_name': kwargs.get('new_name')}), # Snapshot operations # Snapshot request is triggered for a single volume though the # API call implies that snapshot is triggered for CG (legacy API). 'create_snapshot': lambda kwargs: ( 'POST', '/api/consistency_groups/%s/snapshots.json' % kwargs.get('cg_name'), {'display_name': kwargs.get('snap_name')}), 'delete_snapshot': lambda kwargs: ( 'DELETE', '/api/snapshots/%s.json' % kwargs.get('snap_id'), {}), 'rename_snapshot': lambda kwargs: ( 'POST', '/api/snapshots/%s/rename.json' % kwargs.get('snap_id'), {'newname': kwargs.get('new_name')}), 'create_clone_from_snap': lambda kwargs: ( 'POST', '/api/consistency_groups/%s/clone.json' % kwargs.get('cg_name'), {'name': kwargs.get('name'), 'snapshot': kwargs.get('snap_id')}), 'create_clone': lambda kwargs: ( 'POST', '/api/consistency_groups/%s/clone.json' % kwargs.get('cg_name'), {'name': kwargs.get('name')}), # Server operations 'create_server': lambda kwargs: ( 'POST', '/api/servers.json', {'iqn': kwargs.get('iqn'), 'iscsi': kwargs.get('iscsi_ip'), 'display_name': kwargs.get('iqn') if kwargs.get('iqn') else kwargs.get('iscsi_ip')}), # Attach/Detach operations 'attach_volume': lambda kwargs: ( 'POST', '/api/servers/%s/volumes.json' % kwargs.get('vpsa_srv'), {'volume_name[]': kwargs.get('vpsa_vol'), 'access_type': kwargs.get('share_proto'), 'readonly': kwargs.get('read_only'), 'force': 'YES'}), 'detach_volume': lambda kwargs: ( 'POST', '/api/volumes/%s/detach.json' % kwargs.get('vpsa_vol'), {'server_name[]': kwargs.get('vpsa_srv'), 'force': 'YES'}), # Update volume comment 'update_volume': lambda kwargs: ( 'POST', '/api/volumes/%s/update_comment.json' % kwargs.get('vpsa_vol'), {'new_comment': kwargs.get('new_comment')}), # Get operations 'list_volumes': lambda kwargs: ( 'GET', '/api/volumes.json?%s=YES' % self.vol_type_str, {}), 'get_volume': lambda kwargs: ( 'GET', '/api/volumes/%s.json' % kwargs.get('vpsa_vol'), {}), 'get_volume_by_name': lambda kwargs: ( 'GET', '/api/volumes.json?display_name=%s' % kwargs.get('display_name'), {}), 'get_pool': lambda kwargs: ( 'GET', '/api/pools/%s.json' % kwargs.get('pool_name'), {}), 'list_controllers': lambda kwargs: ( 'GET', '/api/vcontrollers.json', {}), 'list_servers': lambda kwargs: ( 'GET', '/api/servers.json', {}), 'list_vol_snapshots': lambda kwargs: ( 'GET', '/api/consistency_groups/%s/snapshots.json' % kwargs.get('cg_name'), {}), 'list_vol_attachments': lambda kwargs: ( 'GET', '/api/volumes/%s/servers.json' % kwargs.get('vpsa_vol'), {}), 'list_snapshots': lambda kwargs: ( 'GET', '/api/snapshots.json', {}), # Put operations 'change_export_name': lambda kwargs: ( 'PUT', '/api/volumes/%s/export_name.json' % kwargs.get('vpsa_vol'), {'exportname': kwargs.get('exportname')})} def _generate_vpsa_cmd(self, cmd, **kwargs): """Generate command to be sent to VPSA.""" try: method, url, params = self.vpsa_commands[cmd](kwargs) # Populate the metadata for the volume creation metadata = kwargs.get('metadata') if metadata: for key, value in metadata.items(): params[key] = value except KeyError: raise exception.UnknownCmd(cmd=cmd) if method == 'GET': params = dict(page=1, start=0, limit=0) body = None elif method in ['DELETE', 'POST', 'PUT']: body = params params = None else: msg = ('Method %(method)s is not defined' % {'method': method}) LOG.error(msg) raise AssertionError(msg) # 'access_key' was generated using username and password # or it was taken from the input file headers = {'X-Access-Key': self.access_key} return method, url, params, body, headers def send_cmd(self, cmd, **kwargs): """Send command to VPSA Controller.""" if not self.access_key: raise exception.ZadaraInvalidAccessKey() method, url, params, body, headers = self._generate_vpsa_cmd(cmd, **kwargs) LOG.debug('Invoking %(cmd)s using %(method)s request.', {'cmd': cmd, 'method': method}) host = self._get_target_host(self.conf.zadara_vpsa_host) port = int(self.conf.zadara_vpsa_port) protocol = "https" if self.conf.zadara_vpsa_use_ssl else "http" if protocol == "https": if not self.conf.zadara_ssl_cert_verify: verify = False else: verify = (self.driver_ssl_cert_path if self.driver_ssl_cert_path else True) else: verify = False if port: api_url = "%s://%s:%d%s" % (protocol, host, port, url) else: api_url = "%s://%s%s" % (protocol, host, url) try: with requests.Session() as session: session.headers.update(headers) response = session.request(method, api_url, params=params, data=body, headers=headers, verify=verify, timeout=vpsa_timeout) except requests.exceptions.RequestException as e: msg = ('Exception: %s') % e raise exception.SessionRequestException(msg=msg) if response.status_code != 200: raise exception.BadHTTPResponseStatus( status=response.status_code) data = response.content json_data = json.loads(data) response = json_data['response'] status = int(response['status']) if status == 5: # Invalid Credentials raise exception.ZadaraInvalidAccessKey() if status != 0: raise exception.FailedCmdWithDump(status=status, data=data) LOG.debug('Operation completed with status code %(status)s', {'status': status}) return response def _get_target_host(self, vpsa_host): """Helper for target host formatting.""" ipv6_without_brackets = ':' in vpsa_host and vpsa_host[-1] != ']' if ipv6_without_brackets: return ('[%s]' % vpsa_host) return ('%s' % vpsa_host) def _get_active_controller_details(self): """Return details of VPSA's active controller.""" data = self.send_cmd('list_controllers') ctrl = None vcontrollers = data.get('vcontrollers', []) for controller in vcontrollers: if controller['state'] == 'active': ctrl = controller break if ctrl is not None: target_ip = (ctrl['iscsi_ipv6'] if ctrl['iscsi_ipv6'] else ctrl['iscsi_ip']) return dict(target=ctrl['target'], ip=target_ip, chap_user=ctrl['vpsa_chap_user'], chap_passwd=ctrl['vpsa_chap_secret']) return None def _check_access_key_validity(self): """Check VPSA access key""" if not self.access_key: raise exception.ZadaraInvalidAccessKey() active_ctrl = self._get_active_controller_details() if active_ctrl is None: raise exception.ZadaraInvalidAccessKey() def _get_vpsa_volume(self, name): """Returns a single vpsa volume based on the display name""" volume = None display_name = name if re.search(r"\s", name): display_name = re.split(r"\s", name)[0] data = self.send_cmd('get_volume_by_name', display_name=display_name) if data['status'] != 0: return None volumes = data['volumes'] for vol in volumes: if vol['display_name'] == name: volume = vol break return volume def _get_vpsa_volume_by_id(self, vpsa_vol): """Returns a single vpsa volume based on name""" data = self.send_cmd('get_volume', vpsa_vol=vpsa_vol) return data['volume'] def _get_all_vpsa_snapshots(self): """Returns snapshots from all vpsa volumes""" data = self.send_cmd('list_snapshots') return data['snapshots'] def _get_all_vpsa_volumes(self): """Returns all vpsa block volumes from the configured pool""" data = self.send_cmd('list_volumes') # FIXME: Work around to filter volumes belonging to given pool # Remove this when we have the API fixed to filter based # on pools. This API today does not have virtual_capacity field volumes = [] for volume in data['volumes']: if volume['pool_name'] == self.conf.zadara_vpsa_poolname: volumes.append(volume) return volumes def _get_server_name(self, initiator, share): """Return VPSA's name for server object. 'share' will be true to search for filesystem volumes """ data = self.send_cmd('list_servers') servers = data.get('servers', []) for server in servers: if share: if server['iscsi_ip'] == initiator: return server['name'] else: if server['iqn'] == initiator: return server['name'] return None def _create_vpsa_server(self, iqn=None, iscsi_ip=None): """Create server object within VPSA (if doesn't exist).""" initiator = iscsi_ip if iscsi_ip else iqn share = True if iscsi_ip else False vpsa_srv = self._get_server_name(initiator, share) if not vpsa_srv: data = self.send_cmd('create_server', iqn=iqn, iscsi_ip=iscsi_ip) if data['status'] != 0: return None vpsa_srv = data['server_name'] return vpsa_srv def _get_servers_attached_to_volume(self, vpsa_vol): """Return all servers attached to volume.""" servers = vpsa_vol.get('server_ext_names') list_servers = [] if servers: list_servers = servers.split(',') return list_servers def _detach_vpsa_volume(self, vpsa_vol, vpsa_srv=None): """Detach volume from all attached servers.""" if vpsa_srv: list_servers_ids = [vpsa_srv] else: list_servers_ids = self._get_servers_attached_to_volume(vpsa_vol) for server_id in list_servers_ids: # Detach volume from server self.send_cmd('detach_volume', vpsa_srv=server_id, vpsa_vol=vpsa_vol['name']) def _get_volume_snapshots(self, cg_name): """Get snapshots in the consistency group""" data = self.send_cmd('list_vol_snapshots', cg_name=cg_name) snapshots = data.get('snapshots', []) return snapshots def _get_snap_id(self, cg_name, snap_name): """Return snapshot ID for particular volume.""" snapshots = self._get_volume_snapshots(cg_name) for snap_vol in snapshots: if snap_vol['display_name'] == snap_name: return snap_vol['name'] return None def _get_pool_capacity(self, pool_name): """Return pool's total and available capacities.""" data = self.send_cmd('get_pool', pool_name=pool_name) pool = data.get('pool') if pool is not None: total = int(pool['capacity']) free = int(pool['available_capacity']) provisioned = int(pool['provisioned_capacity']) LOG.debug('Pool %(name)s: %(total)sGB total, %(free)sGB free, ' '%(provisioned)sGB provisioned', {'name': pool_name, 'total': total, 'free': free, 'provisioned': provisioned}) return total, free, provisioned return 'unknown', 'unknown', 'unknown' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/zadara/exception.py0000664000175000017500000000315500000000000023136 0ustar00zuulzuul00000000000000# Copyright (c) 2020 Zadara Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Zadara Cinder driver exception handling. """ from cinder import exception from cinder.i18n import _ class ZadaraSessionRequestException(exception.VolumeDriverException): message = _("%(msg)s") class ZadaraCinderInvalidAccessKey(exception.VolumeDriverException): message = "Invalid VPSA access key" class ZadaraVPSANoActiveController(exception.VolumeDriverException): message = _("Unable to find any active VPSA controller") class ZadaraServerCreateFailure(exception.VolumeDriverException): message = _("Unable to create server object for initiator %(name)s") class ZadaraAttachmentsNotFound(exception.VolumeDriverException): message = _("Failed to retrieve attachments for volume %(name)s") class ZadaraInvalidAttachmentInfo(exception.VolumeDriverException): message = _("Invalid attachment info for volume %(name)s: %(reason)s") class ZadaraServerNotFound(exception.VolumeDriverException): message = _("Unable to find server object for initiator %(name)s") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/drivers/zadara/zadara.py0000664000175000017500000007201000000000000022376 0ustar00zuulzuul00000000000000# Copyright (c) 2019 Zadara Storage, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume driver for Zadara Virtual Private Storage Array (VPSA). This driver requires VPSA with API version 15.07 or higher. """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import strutils from cinder.common import constants from cinder import exception as cinder_exception from cinder.i18n import _ from cinder import interface from cinder.objects import fields from cinder.volume import configuration from cinder.volume import driver from cinder.volume.drivers.zadara import common from cinder.volume.drivers.zadara import exception as zadara_exception from cinder.volume import volume_utils CONF = cfg.CONF CONF.register_opts(common.zadara_opts, group=configuration.SHARED_CONF_GROUP) LOG = logging.getLogger(__name__) cinder_opts = [ cfg.BoolOpt('zadara_use_iser', default=True, help='VPSA - Use ISER instead of iSCSI'), cfg.StrOpt('zadara_vol_name_template', default='OS_%s', help='VPSA - Default template for VPSA volume names')] @interface.volumedriver class ZadaraVPSAISCSIDriver(driver.ISCSIDriver): """Zadara VPSA iSCSI/iSER volume driver. .. code-block:: none Version history: 15.07 - Initial driver 16.05 - Move from httplib to requests 19.08 - Add API access key authentication option 20.01 - Move to json format from xml. Provide manage/unmanage volume/snapshot feature 20.12-01 - Merging with the common code for all the openstack drivers 20.12-02 - Common code changed as part of fixing Zadara github issue #18723 20.12-03 - Adding the metadata support while creating volume to configure vpsa. 20.12-20 - IPv6 connectivity support for Cinder driver 20.12-24 - Optimizing get manageable volumes and snapshots """ VERSION = '20.12-24' # ThirdPartySystems wiki page CI_WIKI_NAME = "ZadaraStorage_VPSA_CI" def __init__(self, *args, **kwargs): super(ZadaraVPSAISCSIDriver, self).__init__(*args, **kwargs) self.vpsa = None self.configuration.append_config_values(common.zadara_opts) self.configuration.append_config_values(cinder_opts) # The valid list of volume options that can be specified # as the metadata while creating cinder volume self.vol_options = ['crypt', 'compress', 'dedupe', 'attachpolicies'] @staticmethod def get_driver_options(): driver_opts = [] driver_opts.extend(common.zadara_opts) driver_opts.extend(cinder_opts) return driver_opts def _check_access_key_validity(self): try: self.vpsa._check_access_key_validity() except common.exception.ZadaraInvalidAccessKey: raise zadara_exception.ZadaraCinderInvalidAccessKey() def do_setup(self, context): """Any initialization the volume driver does while starting. Establishes initial connection with VPSA and retrieves access_key. Need to pass driver_ssl_cert_path here (and not fetch it from the config opts directly in common code), because this config option is different for different drivers and so cannot be figured in the common code. """ driver_ssl_cert_path = self.configuration.driver_ssl_cert_path self.vpsa = common.ZadaraVPSAConnection(self.configuration, driver_ssl_cert_path, True) self._check_access_key_validity() def check_for_setup_error(self): """Returns an error (exception) if prerequisites aren't met.""" self._check_access_key_validity() def local_path(self, volume): """Return local path to existing local volume.""" raise NotImplementedError() def _get_zadara_vol_template_name(self, vol_name): return self.configuration.zadara_vol_name_template % vol_name def _get_vpsa_volume(self, volume, raise_exception=True): vpsa_volume = None if volume.provider_location: vpsa_volume = (self.vpsa._get_vpsa_volume_by_id( volume.provider_location)) else: vol_name = self._get_zadara_vol_template_name(volume.name) vpsa_volume = self.vpsa._get_vpsa_volume(vol_name) if not vpsa_volume: vol_name = self._get_zadara_vol_template_name(volume.name) msg = (_('Backend Volume %(name)s not found') % {'name': vol_name}) if raise_exception: LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) LOG.warning(msg) return vpsa_volume def vpsa_send_cmd(self, cmd, **kwargs): try: response = self.vpsa.send_cmd(cmd, **kwargs) except common.exception.UnknownCmd as e: raise cinder_exception.UnknownCmd(cmd=e.cmd) except common.exception.SessionRequestException as e: raise zadara_exception.ZadaraSessionRequestException(msg=e.msg) except common.exception.BadHTTPResponseStatus as e: raise cinder_exception.BadHTTPResponseStatus(status=e.status) except common.exception.FailedCmdWithDump as e: raise cinder_exception.FailedCmdWithDump(status=e.status, data=e.data) except common.exception.ZadaraInvalidAccessKey: raise zadara_exception.ZadaraCinderInvalidAccessKey() return response def _validate_existing_ref(self, existing_ref): """Validates existing ref""" if not existing_ref.get('name'): raise cinder_exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=_("manage_existing requires a 'name'" " key to identify an existing volume.")) def _get_volume_metadata(self, volume): if 'metadata' in volume: return volume.metadata if 'volume_metadata' in volume: metadata = volume.volume_metadata return {m['key']: m['value'] for m in metadata} return {} def is_valid_metadata(self, metadata): LOG.debug('Metadata while creating volume: %(metadata)s', {'metadata': metadata}) # Check the values allowed for provided metadata return all(value in ('YES', 'NO') for key, value in metadata.items() if key in self.vol_options) def create_volume(self, volume): """Create volume.""" vol_name = self._get_zadara_vol_template_name(volume.name) # Collect the volume metadata if any provided and validate it metadata = self._get_volume_metadata(volume) if not self.is_valid_metadata(metadata): msg = (_('Invalid metadata for Volume %s') % vol_name) LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) data = self.vpsa_send_cmd('create_volume', name=vol_name, size=volume.size, metadata=metadata) return {'provider_location': data.get('vol_name')} def delete_volume(self, volume): """Delete volume. Return ok if doesn't exist. Auto detach from all servers. """ vpsa_volume = self._get_vpsa_volume(volume, False) if not vpsa_volume: return self.vpsa._detach_vpsa_volume(vpsa_vol=vpsa_volume) # Delete volume self.vpsa_send_cmd('delete_volume', vpsa_vol=vpsa_volume['name']) def create_snapshot(self, snapshot): """Creates a snapshot.""" LOG.debug('Create snapshot: %s', snapshot.name) vpsa_volume = self._get_vpsa_volume(snapshot.volume) # Retrieve the CG name for the base volume cg_name = vpsa_volume['cg_name'] data = self.vpsa_send_cmd('create_snapshot', cg_name=cg_name, snap_name=snapshot.name) return {'provider_location': data.get('snapshot_name')} def delete_snapshot(self, snapshot): """Deletes a snapshot.""" LOG.debug('Delete snapshot: %s', snapshot.name) vpsa_volume = self._get_vpsa_volume(snapshot.volume, False) if not vpsa_volume: # If the volume isn't present, then don't attempt to delete return # Retrieve the CG name for the base volume cg_name = vpsa_volume['cg_name'] snap_id = self.vpsa._get_snap_id(cg_name, snapshot.name) if not snap_id: # If the snapshot isn't present, then don't attempt to delete LOG.warning('snapshot: snapshot %s not found, ' 'skipping delete operation', snapshot.name) return self.vpsa_send_cmd('delete_snapshot', snap_id=snap_id) def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" LOG.debug('Creating volume from snapshot: %s', snapshot.name) vpsa_volume = self._get_vpsa_volume(snapshot.volume, False) if not vpsa_volume: LOG.error('Snapshot %(name)s not found.', {'name': snapshot.name}) raise cinder_exception.SnapshotNotFound(snapshot_id=snapshot.id) # Retrieve the CG name for the base volume cg_name = vpsa_volume['cg_name'] snap_id = self.vpsa._get_snap_id(cg_name, snapshot.name) if not snap_id: LOG.error('Snapshot %(name)s not found', {'name': snapshot.name}) raise cinder_exception.SnapshotNotFound(snapshot_id=snapshot.id) volume_name = self._get_zadara_vol_template_name(volume.name) self.vpsa_send_cmd('create_clone_from_snap', cg_name=cg_name, name=volume_name, snap_id=snap_id) vpsa_volume = self._get_vpsa_volume(volume) if volume.size > snapshot.volume_size: self.extend_volume(volume, volume.size) return {'provider_location': vpsa_volume.get('name')} def create_cloned_volume(self, volume, src_vref): """Creates a clone of the specified volume.""" LOG.debug('Creating clone of volume: %s', src_vref.name) vpsa_volume = self._get_vpsa_volume(src_vref) # Retrieve the CG name for the base volume cg_name = vpsa_volume['cg_name'] volume_name = self._get_zadara_vol_template_name(volume.name) self.vpsa_send_cmd('create_clone', cg_name=cg_name, name=volume_name) vpsa_volume = self._get_vpsa_volume(volume) if volume.size > src_vref.size: self.extend_volume(volume, volume.size) return {'provider_location': vpsa_volume.get('name')} def extend_volume(self, volume, new_size): """Extend an existing volume.""" # Get volume vpsa_volume = self._get_vpsa_volume(volume) size = vpsa_volume['virtual_capacity'] if new_size < size: raise cinder_exception.InvalidInput( reason=_('%(new_size)s < current size %(size)s') % {'new_size': new_size, 'size': size}) expand_size = new_size - size self.vpsa_send_cmd('expand_volume', vpsa_vol=vpsa_volume['name'], size=expand_size) def create_export(self, context, volume, vg=None): """Irrelevant for VPSA volumes. Export created during attachment.""" pass def ensure_export(self, context, volume): """Irrelevant for VPSA volumes. Export created during attachment.""" pass def remove_export(self, context, volume): """Irrelevant for VPSA volumes. Export removed during detach.""" pass def get_manageable_volumes(self, cinder_volumes, marker, limit, offset, sort_keys, sort_dirs): """List volumes on the backend available for management by Cinder""" # Get all vpsa volumes all_vpsa_volumes = self.vpsa._get_all_vpsa_volumes() # Create a dictionary of existing volumes existing_vols = {} for cinder_vol in cinder_volumes: if cinder_vol.provider_location: volumes = (list(filter(lambda volume: (volume['name'] == cinder_vol.provider_location), all_vpsa_volumes))) else: cinder_name = (self._get_zadara_vol_template_name( cinder_vol.name)) volumes = (list(filter(lambda volume: (volume['display_name'] == cinder_name), all_vpsa_volumes))) for volume in volumes: existing_vols[volume['name']] = cinder_vol.id # Filter out all volumes already attached to any server volumes_in_use = {} volumes_not_available = {} for volume in all_vpsa_volumes: if volume['name'] in existing_vols: continue if volume['status'] == 'In-use': volumes_in_use[volume['name']] =\ self.vpsa._get_servers_attached_to_volume(volume) continue if volume['status'] != 'Available': volumes_not_available[volume['name']] = volume['display_name'] continue manageable_vols = [] for vpsa_volume in all_vpsa_volumes: vol_name = vpsa_volume['name'] vol_display_name = vpsa_volume['display_name'] cinder_id = existing_vols.get(vol_name) not_safe_msgs = [] if vol_name in volumes_in_use: host_list = volumes_in_use[vol_name] not_safe_msgs.append(_('Volume connected to host(s) %s') % host_list) elif vol_name in volumes_not_available: not_safe_msgs.append(_('Volume not available')) if cinder_id: not_safe_msgs.append(_('Volume already managed')) is_safe = (len(not_safe_msgs) == 0) reason_not_safe = ' && '.join(not_safe_msgs) manageable_vols.append({ 'reference': {'name': vol_display_name}, 'size': vpsa_volume['virtual_capacity'], 'safe_to_manage': is_safe, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, }) return volume_utils.paginate_entries_list( manageable_vols, marker, limit, offset, sort_keys, sort_dirs) def manage_existing(self, volume, existing_ref): """Bring an existing volume into cinder management""" self._validate_existing_ref(existing_ref) # Check if the volume exists in vpsa name = existing_ref['name'] vpsa_volume = self.vpsa._get_vpsa_volume(name) if not vpsa_volume: msg = (_('Volume %(name)s could not be found. ' 'It might be already deleted') % {'name': name}) LOG.error(msg) raise cinder_exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Check if the volume is available if vpsa_volume['status'] != 'Available': msg = (_('Existing volume %(name)s is not available') % {'name': name}) LOG.error(msg) raise cinder_exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Rename the volume to cinder specified name new_name = self._get_zadara_vol_template_name(volume.name) new_vpsa_volume = self.vpsa._get_vpsa_volume(new_name) if new_vpsa_volume: msg = (_('Volume %(new_name)s already exists') % {'new_name': new_name}) LOG.error(msg) raise cinder_exception.VolumeDriverException(message=msg) data = self.vpsa_send_cmd('rename_volume', vpsa_vol=vpsa_volume['name'], new_name=new_name) return {'provider_location': data.get('vol_name')} def manage_existing_get_size(self, volume, existing_ref): """Return size of volume to be managed by manage_existing""" # Check if the volume exists in vpsa self._validate_existing_ref(existing_ref) name = existing_ref['name'] vpsa_volume = self.vpsa._get_vpsa_volume(name) if not vpsa_volume: msg = (_('Volume %(name)s could not be found. ' 'It might be already deleted') % {'name': volume.name}) LOG.error(msg) raise cinder_exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Return the size of the volume return vpsa_volume['virtual_capacity'] def unmanage(self, volume): """Removes the specified volume from Cinder management""" pass def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs): """Interface to support listing manageable snapshots and volumes""" # Get all snapshots vpsa_snapshots = self.vpsa._get_all_vpsa_snapshots() # Get all snapshots of all volumes all_vpsa_snapshots = [] for vpsa_snap in vpsa_snapshots: if (vpsa_snap['pool_name'] == self.configuration.zadara_vpsa_poolname): vpsa_snap['volume_name'] = vpsa_snap['volume_display_name'] vpsa_snap['size'] = float(vpsa_snap['volume_capacity_mb'] / 1024) all_vpsa_snapshots.append(vpsa_snap) existing_snapshots = {} for cinder_snapshot in cinder_snapshots: if cinder_snapshot.provider_location: snapshots = (list(filter(lambda snapshot: ((snapshot['volume_ext_name'] == cinder_snapshot.volume.provider_location) and (snapshot['name'] == cinder_snapshot.provider_location)), all_vpsa_snapshots))) else: volume_name = (self._get_zadara_vol_template_name( cinder_snapshot.volume_name)) snapshots = (list(filter(lambda snapshot: ((snapshot['volume_display_name'] == volume_name) and (snapshot['display_name'] == cinder_snapshot.name)), all_vpsa_snapshots))) for snapshot in snapshots: existing_snapshots[snapshot['name']] = cinder_snapshot.id manageable_snapshots = [] try: unique_snapshots = [] for snapshot in all_vpsa_snapshots: snap_id = snapshot['name'] if snap_id in unique_snapshots: continue cinder_id = existing_snapshots.get(snap_id) is_safe = True reason_not_safe = None if cinder_id: is_safe = False reason_not_safe = _("Snapshot already managed.") manageable_snapshots.append({ 'reference': {'name': snapshot['display_name']}, 'size': snapshot['size'], 'safe_to_manage': is_safe, 'reason_not_safe': reason_not_safe, 'cinder_id': cinder_id, 'extra_info': None, 'source_reference': {'name': snapshot['volume_name']}, }) unique_snapshots.append(snap_id) return volume_utils.paginate_entries_list( manageable_snapshots, marker, limit, offset, sort_keys, sort_dirs) except Exception as e: msg = (_('Exception: %s') % str(e)) LOG.error(msg) raise def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management""" self._validate_existing_ref(existing_ref) snap_name = existing_ref['name'] volume = self._get_vpsa_volume(snapshot.volume, False) if not volume: msg = (_('Source volume of snapshot %s could not be found.' ' Invalid data') % snap_name) LOG.error(msg) raise cinder_exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) # Check if the snapshot exists snap_id = self.vpsa._get_snap_id(volume['cg_name'], snap_name) if not snap_id: msg = (_('Snapshot %s could not be found. It might be' ' already deleted') % snap_name) LOG.error(msg) raise cinder_exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) new_name = snapshot.name new_snap_id = self.vpsa._get_snap_id(volume['cg_name'], new_name) if new_snap_id: msg = (_('Snapshot with name %s already exists') % new_name) LOG.debug(msg) return data = self.vpsa_send_cmd('rename_snapshot', snap_id=snap_id, new_name=new_name) return {'provider_location': data.get('snapshot_name')} def manage_existing_snapshot_get_size(self, snapshot, existing_ref): """Return size of snapshot to be managed by manage_existing""" # We do not have any size field for a snapshot. # We only have it on volumes. So, here just figure # out the parent volume of this snapshot and return its size self._validate_existing_ref(existing_ref) snap_name = existing_ref['name'] volume = self._get_vpsa_volume(snapshot.volume, False) if not volume: msg = (_('Source volume of snapshot %s could not be found.' ' Invalid data') % snap_name) LOG.error(msg) raise cinder_exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) snap_id = self.vpsa._get_snap_id(volume['cg_name'], snap_name) if not snap_id: msg = (_('Snapshot %s could not be found. It might be ' 'already deleted') % snap_name) LOG.error(msg) raise cinder_exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=msg) return volume['virtual_capacity'] def unmanage_snapshot(self, snapshot): """Removes the specified snapshot from Cinder management""" pass def initialize_connection(self, volume, connector): """Attach volume to initiator/host. During this call VPSA exposes volume to particular Initiator. It also creates a 'server' entity for Initiator (if it was not created before) All necessary connection information is returned, including auth data. Connection data (target, LUN) is not stored in the DB. """ # First: Check Active controller: if not valid, raise exception ctrl = self.vpsa._get_active_controller_details() if not ctrl: raise zadara_exception.ZadaraVPSANoActiveController() # Get/Create server name for IQN initiator_name = connector['initiator'] vpsa_srv = self.vpsa._create_vpsa_server(iqn=initiator_name) if not vpsa_srv: raise zadara_exception.ZadaraServerCreateFailure( name=initiator_name) # Get volume vpsa_volume = self._get_vpsa_volume(volume) servers = self.vpsa._get_servers_attached_to_volume(vpsa_volume) attach = None for server in servers: if server == vpsa_srv: attach = server break # Attach volume to server if attach is None: self.vpsa_send_cmd('attach_volume', vpsa_srv=vpsa_srv, vpsa_vol=vpsa_volume['name']) data = self.vpsa_send_cmd('list_vol_attachments', vpsa_vol=vpsa_volume['name']) server = None servers = data.get('servers', []) for srv in servers: if srv['iqn'] == initiator_name: server = srv break if server is None: vol_name = (self._get_zadara_vol_template_name( volume.name)) raise zadara_exception.ZadaraAttachmentsNotFound( name=vol_name) target = server['target'] lun = int(server['lun']) if None in [target, lun]: vol_name = (self._get_zadara_vol_template_name( volume.name)) raise zadara_exception.ZadaraInvalidAttachmentInfo( name=vol_name, reason=_('target=%(target)s, lun=%(lun)s') % {'target': target, 'lun': lun}) ctrl_ip = self.vpsa._get_target_host(ctrl['ip']) properties = {'target_discovered': False, 'target_portal': (('%s:%s') % (ctrl_ip, '3260')), 'target_iqn': target, 'target_lun': lun, 'volume_id': volume.id, 'auth_method': 'CHAP', 'auth_username': ctrl['chap_user'], 'auth_password': ctrl['chap_passwd']} LOG.debug('Attach properties: %(properties)s', {'properties': strutils.mask_password(properties)}) return {'driver_volume_type': ('iser' if (self.configuration.safe_get('zadara_use_iser')) else 'iscsi'), 'data': properties} def terminate_connection(self, volume, connector, **kwargs): """Detach volume from the initiator.""" vpsa_volume = self._get_vpsa_volume(volume) if connector is None: # Detach volume from all servers # Get volume name self.vpsa._detach_vpsa_volume(vpsa_vol=vpsa_volume) return # Check if there are multiple attachments to the volume from the # same host. Terminate connection only for the last attachment from # the corresponding host. count = 0 host = connector.get('host') if connector else None if host and volume.get('multiattach'): attach_list = volume.volume_attachment for attachment in attach_list: if (attachment['attach_status'] != fields.VolumeAttachStatus.ATTACHED): continue if attachment.attached_host == host: count += 1 if count > 1: return # Get server name for IQN initiator_name = connector['initiator'] vpsa_srv = self.vpsa._get_server_name(initiator_name, False) if not vpsa_srv: raise zadara_exception.ZadaraServerNotFound(name=initiator_name) if not vpsa_volume: raise cinder_exception.VolumeNotFound(volume_id=volume.id) # Detach volume from server self.vpsa._detach_vpsa_volume(vpsa_vol=vpsa_volume, vpsa_srv=vpsa_srv) def _update_volume_stats(self): """Retrieve stats info from volume group.""" LOG.debug("Updating volume stats") backend_name = self.configuration.safe_get('volume_backend_name') storage_protocol = (constants.ISER if (self.configuration.safe_get('zadara_use_iser')) else constants.ISCSI) pool_name = self.configuration.zadara_vpsa_poolname (total, free, provisioned) = self.vpsa._get_pool_capacity(pool_name) data = dict( volume_backend_name=backend_name or self.__class__.__name__, vendor_name='Zadara Storage', driver_version=self.VERSION, storage_protocol=storage_protocol, reserved_percentage=self.configuration.reserved_percentage, QoS_support=False, multiattach=True, total_capacity_gb=total, free_capacity_gb=free ) self._stats = data ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4071214 cinder-27.0.0/cinder/volume/flows/0000775000175000017500000000000000000000000016774 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/flows/__init__.py0000664000175000017500000000000000000000000021073 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4111214 cinder-27.0.0/cinder/volume/flows/api/0000775000175000017500000000000000000000000017545 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/flows/api/__init__.py0000664000175000017500000000000000000000000021644 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/flows/api/create_volume.py0000664000175000017500000011561600000000000022763 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from typing import Any, Optional, Type, Union from oslo_config import cfg from oslo_log import log as logging import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft from cinder import context from cinder import exception from cinder import flow_utils from cinder.i18n import _ from cinder.image import glance from cinder import objects from cinder.objects import fields from cinder.policies import volumes as policy from cinder import quota from cinder import quota_utils from cinder import utils from cinder.volume.flows import common from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) ACTION = 'volume:create' CONF = cfg.CONF QUOTAS = quota.QUOTAS # Only in these 'sources' status can we attempt to create a volume from a # source volume or a source snapshot, other status states we can not create # from, 'error' being the common example. SNAPSHOT_PROCEED_STATUS = (fields.SnapshotStatus.AVAILABLE,) SRC_VOL_PROCEED_STATUS = ('available', 'in-use',) REPLICA_PROCEED_STATUS = ('active', 'active-stopped',) CG_PROCEED_STATUS = ('available', 'creating',) CGSNAPSHOT_PROCEED_STATUS = ('available',) GROUP_PROCEED_STATUS = ('available', 'creating',) BACKUP_PROCEED_STATUS = (fields.BackupStatus.AVAILABLE,) class ExtractVolumeRequestTask(flow_utils.CinderTask): """Processes an api request values into a validated set of values. This tasks responsibility is to take in a set of inputs that will form a potential volume request and validates those values against a set of conditions and/or translates those values into a valid set and then returns the validated/translated values for use by other tasks. Reversion strategy: N/A """ # This task will produce the following outputs (said outputs can be # saved to durable storage in the future so that the flow can be # reconstructed elsewhere and continued). default_provides = set(['size', 'snapshot_id', 'source_volid', 'volume_type', 'volume_type_id', 'encryption_key_id', 'consistencygroup_id', 'cgsnapshot_id', 'qos_specs', 'group_id', 'refresh_az', 'backup_id', 'availability_zones', 'multiattach']) def __init__(self, image_service: glance.GlanceImageService, availability_zones, **kwargs) -> None: super(ExtractVolumeRequestTask, self).__init__(addons=[ACTION], **kwargs) self.image_service = image_service self.availability_zones = availability_zones @staticmethod def _extract_resource(resource: Optional[dict], allowed_vals: tuple[tuple[str, ...]], exc: Type[exception.CinderException], resource_name: str, props: tuple[str] = ('status',)) -> Optional[str]: """Extracts the resource id from the provided resource. This method validates the input resource dict and checks that the properties which names are passed in `props` argument match corresponding lists in `allowed` argument. In case of mismatch exception of type exc is raised. :param resource: Resource dict. :param allowed_vals: Tuple of allowed values lists. :param exc: Exception type to raise. :param resource_name: Name of resource - used to construct log message. :param props: Tuple of resource properties names to validate. :return: Id of a resource. """ resource_id = None if resource: for prop, allowed_states in zip(props, allowed_vals): if resource[prop] not in allowed_states: msg = _("Originating %(res)s %(prop)s must be one of " "'%(vals)s' values") msg = msg % {'res': resource_name, 'prop': prop, 'vals': ', '.join(allowed_states)} # TODO(harlowja): what happens if the status changes after # this initial resource status check occurs??? Seems like # someone could delete the resource after this check passes # but before the volume is officially created? raise exc(reason=msg) resource_id = resource['id'] return resource_id def _extract_consistencygroup( self, consistencygroup: Optional[dict]) -> Optional[str]: return self._extract_resource(consistencygroup, (CG_PROCEED_STATUS,), exception.InvalidConsistencyGroup, 'consistencygroup') def _extract_group( self, group: Optional[dict]) -> Optional[str]: return self._extract_resource(group, (GROUP_PROCEED_STATUS,), exception.InvalidGroup, 'group') def _extract_cgsnapshot( self, cgsnapshot: Optional[dict]) -> Optional[str]: return self._extract_resource(cgsnapshot, (CGSNAPSHOT_PROCEED_STATUS,), exception.InvalidCgSnapshot, 'CGSNAPSHOT') def _extract_snapshot( self, snapshot: Optional[dict]) -> Optional[str]: return self._extract_resource(snapshot, (SNAPSHOT_PROCEED_STATUS,), exception.InvalidSnapshot, 'snapshot') def _extract_source_volume( self, source_volume: Optional[dict]) -> Optional[str]: return self._extract_resource(source_volume, (SRC_VOL_PROCEED_STATUS,), exception.InvalidVolume, 'source volume') def _extract_backup( self, backup: Optional[dict]) -> Optional[str]: return self._extract_resource(backup, (BACKUP_PROCEED_STATUS,), exception.InvalidBackup, 'backup') @staticmethod def _extract_size(size: int, source_volume: Optional[objects.Volume], snapshot: Optional[objects.Snapshot], backup: Optional[objects.Backup]) -> int: """Extracts and validates the volume size. This function will validate or when not provided fill in the provided size variable from the source_volume or snapshot and then does validation on the size that is found and returns said validated size. """ def validate_snap_size(size: int) -> None: if snapshot and size < snapshot.volume_size: msg = _("Volume size '%(size)s'GB cannot be smaller than" " the snapshot size %(snap_size)sGB. " "They must be >= original snapshot size.") msg = msg % {'size': size, 'snap_size': snapshot.volume_size} raise exception.InvalidInput(reason=msg) def validate_source_size(size: int) -> None: if source_volume and size < source_volume['size']: msg = _("Volume size '%(size)s'GB cannot be smaller than " "original volume size %(source_size)sGB. " "They must be >= original volume size.") msg = msg % {'size': size, 'source_size': source_volume['size']} raise exception.InvalidInput(reason=msg) def validate_backup_size(size: int) -> None: if backup and size < backup['size']: msg = _("Volume size %(size)sGB cannot be smaller than " "the backup size %(backup_size)sGB. " "It must be >= backup size.") msg = msg % {'size': size, 'backup_size': backup['size']} raise exception.InvalidInput(reason=msg) def validate_int(size: int) -> None: if not isinstance(size, int) or size <= 0: msg = _("Volume size '%(size)s' must be an integer and" " greater than 0") % {'size': size} raise exception.InvalidInput(reason=msg) # Figure out which validation functions we should be applying # on the size value that we extract. validator_functors = [validate_int] if source_volume: validator_functors.append(validate_source_size) elif snapshot: validator_functors.append(validate_snap_size) elif backup: validator_functors.append(validate_backup_size) # If the size is not provided then try to provide it. if not size and source_volume: size = source_volume['size'] elif not size and snapshot: size = snapshot.volume_size elif not size and backup: size = backup['size'] size = utils.as_int(size) LOG.debug("Validating volume size '%(size)s' using %(functors)s", {'size': size, 'functors': ", ".join([common.make_pretty_name(func) for func in validator_functors])}) for func in validator_functors: func(size) return size def _get_image_metadata(self, context: context.RequestContext, image_id: Optional[str], size: int) -> Optional[dict[str, Any]]: """Checks image existence and validates the image metadata. Returns: image metadata or None """ # Check image existence if image_id is None: return None # NOTE(harlowja): this should raise an error if the image does not # exist, this is expected as it signals that the image_id is missing. image_meta = self.image_service.show(context, image_id) volume_utils.check_image_metadata(image_meta, size) return image_meta def _extract_availability_zones( self, availability_zone: Optional[str], snapshot, source_volume, group: Optional[dict], volume_type: Optional[dict[str, Any]] = None) -> tuple[list[str], bool]: """Extracts and returns a validated availability zone list. This function will extract the availability zone (if not provided) from the snapshot or source_volume and then performs a set of validation checks on the provided or extracted availability zone and then returns the validated availability zone. """ refresh_az = False type_azs = volume_utils.extract_availability_zones_from_volume_type( volume_type) type_az_configured = type_azs is not None if type_az_configured: assert type_azs is not None safe_azs = list( set(type_azs).intersection(self.availability_zones)) if not safe_azs: raise exception.InvalidTypeAvailabilityZones(az=type_azs) else: safe_azs = self.availability_zones # If the volume will be created in a group, it should be placed in # in same availability zone as the group. if group: try: availability_zone = group['availability_zone'] except (TypeError, KeyError): pass # Try to extract the availability zone from the corresponding snapshot # or source volume if either is valid so that we can be in the same # availability zone as the source. if availability_zone is None: if snapshot: try: availability_zone = snapshot['volume']['availability_zone'] except (TypeError, KeyError): pass if source_volume: try: availability_zone = source_volume['availability_zone'] except (TypeError, KeyError): pass if availability_zone is None and not type_az_configured: if CONF.default_availability_zone: availability_zone = CONF.default_availability_zone else: # For backwards compatibility use the storage_availability_zone availability_zone = CONF.storage_availability_zone if availability_zone and availability_zone not in safe_azs: refresh_az = True if CONF.allow_availability_zone_fallback: original_az = availability_zone availability_zone = ( CONF.default_availability_zone or CONF.storage_availability_zone) LOG.warning("Availability zone '%(s_az)s' " "not found, falling back to " "'%(s_fallback_az)s'.", {'s_az': original_az, 's_fallback_az': availability_zone}) else: raise exception.InvalidAvailabilityZone(az=availability_zone) # If the configuration only allows cloning to the same availability # zone then we need to enforce that. if availability_zone and CONF.cloned_volume_same_az: snap_az = None try: snap_az = snapshot['volume']['availability_zone'] except (TypeError, KeyError): pass if snap_az and snap_az != availability_zone: msg = _("Volume must be in the same " "availability zone as the snapshot") raise exception.InvalidInput(reason=msg) source_vol_az = None try: source_vol_az = source_volume['availability_zone'] except (TypeError, KeyError): pass if source_vol_az and source_vol_az != availability_zone: msg = _("Volume must be in the same " "availability zone as the source volume") raise exception.InvalidInput(reason=msg) if availability_zone: return [availability_zone], refresh_az else: return safe_azs, refresh_az def _get_encryption_key_id( self, key_manager, context: context.RequestContext, volume_type_id: Optional[str], snapshot: Optional[objects.Snapshot], source_volume: Optional[objects.Volume], image_metadata: Optional[dict[str, Any]]) -> Optional[str]: if volume_type_id and volume_types.is_encrypted( context, volume_type_id): encryption_key_id = None if snapshot is not None: # creating from snapshot encryption_key_id = snapshot['encryption_key_id'] elif source_volume is not None: # cloning volume encryption_key_id = source_volume['encryption_key_id'] elif image_metadata is not None: # creating from image encryption_key_id = image_metadata.get( 'cinder_encryption_key_id') # NOTE(joel-coffman): References to the encryption key should *not* # be copied because the key is deleted when the volume is deleted. # Clone the existing key and associate a separate -- but # identical -- key with each volume. new_encryption_key_id: Optional[str] if encryption_key_id is not None: new_encryption_key_id = volume_utils.clone_encryption_key( context, key_manager, encryption_key_id) else: new_encryption_key_id = volume_utils.create_encryption_key( context, key_manager, volume_type_id) return new_encryption_key_id else: return None @staticmethod def _get_volume_type( context: context.RequestContext, volume_type: Optional[Any], source_volume: Optional[objects.Volume], snapshot: Optional[objects.Snapshot], image_volume_type_id: Optional[str]) -> objects.VolumeType: """Returns a volume_type object or raises. Never returns None.""" if volume_type: return volume_type identifier = None if source_volume: identifier = {'source': 'volume', 'id': source_volume['volume_type_id']} elif snapshot: identifier = {'source': 'snapshot', 'id': snapshot['volume_type_id']} elif image_volume_type_id: identifier = {'source': 'image', 'id': image_volume_type_id} if identifier: try: return objects.VolumeType.get_by_name_or_id( context, identifier['id']) except (exception.VolumeTypeNotFound, exception.VolumeTypeNotFoundByName, exception.InvalidVolumeType): LOG.exception("Failed to find volume type from " "source %(source)s, identifier %(id)s", identifier) raise # otherwise, use the default volume type return volume_types.get_default_volume_type(context) def execute(self, context: context.RequestContext, size: int, snapshot: Optional[dict], image_id: Optional[str], source_volume: Optional[dict], availability_zone: Optional[str], volume_type, metadata, key_manager, consistencygroup, cgsnapshot, group, group_snapshot, backup: Optional[dict]) -> dict[str, Any]: utils.check_exclusive_options(snapshot=snapshot, imageRef=image_id, source_volume=source_volume, backup=backup) context.authorize(policy.CREATE_POLICY) # TODO(harlowja): what guarantee is there that the snapshot or source # volume will remain available after we do this initial verification?? snapshot_id = self._extract_snapshot(snapshot) source_volid = self._extract_source_volume(source_volume) backup_id = self._extract_backup(backup) size = self._extract_size(size, source_volume, snapshot, backup) consistencygroup_id = self._extract_consistencygroup(consistencygroup) cgsnapshot_id = self._extract_cgsnapshot(cgsnapshot) group_id = self._extract_group(group) image_meta = self._get_image_metadata(context, image_id, size) image_properties = image_meta.get( 'properties', {}) if image_meta else {} image_volume_type = image_properties.get( 'cinder_img_volume_type', None) if image_properties else None volume_type = self._get_volume_type( context, volume_type, source_volume, snapshot, image_volume_type) volume_type_id = volume_type.get('id') if volume_type else None availability_zones, refresh_az = self._extract_availability_zones( availability_zone, snapshot, source_volume, group, volume_type=volume_type) encryption_key_id = self._get_encryption_key_id( key_manager, context, volume_type_id, snapshot, source_volume, image_meta) # new key id that's been cloned already if volume_type_id: volume_type = objects.VolumeType.get_by_name_or_id( context, volume_type_id) extra_specs = volume_type.get('extra_specs', {}) multiattach = (extra_specs.get('multiattach', '') == ' True') if multiattach and encryption_key_id: msg = _('Multiattach cannot be used with encrypted volumes.') raise exception.InvalidVolume(reason=msg) if multiattach: context.authorize(policy.MULTIATTACH_POLICY) specs: Optional[dict] = {} if volume_type_id: qos_specs = volume_types.get_volume_type_qos_specs(volume_type_id) if qos_specs['qos_specs']: specs = qos_specs['qos_specs'].get('specs', {}) # Determine default replication status extra_specs = volume_types.get_volume_type_extra_specs( volume_type_id) if not specs: # to make sure we don't pass empty dict specs = None extra_specs = None if volume_utils.is_replicated_spec(extra_specs): replication_status = fields.ReplicationStatus.ENABLED else: replication_status = fields.ReplicationStatus.DISABLED return { 'size': size, 'snapshot_id': snapshot_id, 'source_volid': source_volid, 'volume_type': volume_type, 'volume_type_id': volume_type_id, 'encryption_key_id': encryption_key_id, 'qos_specs': specs, 'consistencygroup_id': consistencygroup_id, 'cgsnapshot_id': cgsnapshot_id, 'group_id': group_id, 'replication_status': replication_status, 'refresh_az': refresh_az, 'backup_id': backup_id, 'multiattach': multiattach, 'availability_zones': availability_zones } class EntryCreateTask(flow_utils.CinderTask): """Creates an entry for the given volume creation in the database. Reversion strategy: remove the volume_id created from the database. """ default_provides = set(['volume_properties', 'volume_id', 'volume']) def __init__(self) -> None: requires = ['description', 'metadata', 'name', 'reservations', 'size', 'snapshot_id', 'source_volid', 'volume_type_id', 'encryption_key_id', 'consistencygroup_id', 'cgsnapshot_id', 'multiattach', 'qos_specs', 'group_id', 'availability_zones'] super(EntryCreateTask, self).__init__(addons=[ACTION], requires=requires) def execute(self, context: context.RequestContext, optional_args: dict, **kwargs) -> dict[str, Any]: """Creates a database entry for the given inputs and returns details. Accesses the database and creates a new entry for the to be created volume using the given volume properties which are extracted from the input kwargs (and associated requirements this task needs). These requirements should be previously satisfied and validated by a pre-cursor task. """ src_volid = kwargs.get('source_volid') src_vol = None if src_volid is not None: src_vol = objects.Volume.get_by_id(context, src_volid) bootable = False if src_vol is not None: bootable = src_vol.bootable elif kwargs.get('snapshot_id'): snapshot = objects.Snapshot.get_by_id(context, kwargs.get('snapshot_id')) volume_id = snapshot.volume_id snp_vol = objects.Volume.get_by_id(context, volume_id) if snp_vol is not None: bootable = snp_vol.bootable availability_zones = kwargs.pop('availability_zones') volume_properties = { 'size': kwargs.pop('size'), 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'creating', 'attach_status': fields.VolumeAttachStatus.DETACHED, 'encryption_key_id': kwargs.pop('encryption_key_id'), # Rename these to the internal name. 'display_description': kwargs.pop('description'), 'display_name': kwargs.pop('name'), 'multiattach': kwargs.pop('multiattach'), 'bootable': bootable, } if len(availability_zones) == 1: volume_properties['availability_zone'] = availability_zones[0] # Merge in the other required arguments which should provide the rest # of the volume property fields (if applicable). volume_properties.update(kwargs) volume = objects.Volume(context=context, **volume_properties) volume.create() # FIXME(dulek): We're passing this volume_properties dict through RPC # in request_spec. This shouldn't be needed, most data is replicated # in both volume and other places. We should make Newton read data # from just one correct place and leave just compatibility code. # # Right now - let's move it to versioned objects to be able to make # non-backward compatible changes. volume_properties = objects.VolumeProperties(**volume_properties) return { 'volume_id': volume['id'], 'volume_properties': volume_properties, # NOTE(harlowja): it appears like further usage of this volume # result actually depend on it being a sqlalchemy object and not # just a plain dictionary so that's why we are storing this here. # # In the future where this task results can be serialized and # restored automatically for continued running we will need to # resolve the serialization & recreation of this object since raw # sqlalchemy objects can't be serialized. 'volume': volume, } def revert(self, context: context.RequestContext, result: Union[dict, ft.Failure], optional_args: dict, **kwargs) -> None: if isinstance(result, ft.Failure): # We never produced a result and therefore can't destroy anything. return if optional_args['is_quota_committed']: # If quota got committed we shouldn't rollback as the volume has # already been created and the quota has already been absorbed. return volume = result['volume'] try: volume.destroy() except exception.CinderException: # We are already reverting, therefore we should silence this # exception since a second exception being active will be bad. # # NOTE(harlowja): Being unable to destroy a volume is pretty # bad though!! LOG.exception("Failed destroying volume entry %s", volume.id) class QuotaReserveTask(flow_utils.CinderTask): """Reserves a single volume with the given size & the given volume type. Reversion strategy: rollback the quota reservation. Warning Warning: if the process that is running this reserve and commit process fails (or is killed before the quota is rolled back or committed it does appear like the quota will never be rolled back). This makes software upgrades hard (inflight operations will need to be stopped or allowed to complete before the upgrade can occur). *In the future* when taskflow has persistence built-in this should be easier to correct via an automated or manual process. """ default_provides = set(['reservations']) def __init__(self): super(QuotaReserveTask, self).__init__(addons=[ACTION]) def execute(self, context: context.RequestContext, size: int, volume_type_id, group_snapshot: Optional[objects.Snapshot], optional_args: dict) -> Optional[dict]: try: values = {'per_volume_gigabytes': size} QUOTAS.limit_check(context, project_id=context.project_id, **values) except exception.OverQuota as e: quotas = e.kwargs['quotas'] raise exception.VolumeSizeExceedsLimit( size=size, limit=quotas['per_volume_gigabytes']) try: if group_snapshot: reserve_opts = {'volumes': 1} else: reserve_opts = {'volumes': 1, 'gigabytes': size} if ('update_size' in optional_args and optional_args['update_size']): reserve_opts.pop('volumes', None) QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id) reservations = QUOTAS.reserve(context, **reserve_opts) return { 'reservations': reservations, } except exception.OverQuota as e: quota_utils.process_reserve_over_quota(context, e, resource='volumes', size=size) return None # TODO: is this correct? def revert(self, context: context.RequestContext, result: Union[dict, ft.Failure], optional_args: dict, **kwargs) -> None: # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return if optional_args['is_quota_committed']: # The reservations have already been committed and can not be # rolled back at this point. return # We actually produced an output that we can revert so lets attempt # to use said output to rollback the reservation. reservations = result['reservations'] try: QUOTAS.rollback(context, reservations) except exception.CinderException: # We are already reverting, therefore we should silence this # exception since a second exception being active will be bad. LOG.exception("Failed rolling back quota for" " %s reservations", reservations) class QuotaCommitTask(flow_utils.CinderTask): """Commits the reservation. Reversion strategy: N/A (the rollback will be handled by the task that did the initial reservation (see: QuotaReserveTask). Warning Warning: if the process that is running this reserve and commit process fails (or is killed before the quota is rolled back or committed it does appear like the quota will never be rolled back). This makes software upgrades hard (inflight operations will need to be stopped or allowed to complete before the upgrade can occur). *In the future* when taskflow has persistence built-in this should be easier to correct via an automated or manual process. """ def __init__(self): super(QuotaCommitTask, self).__init__(addons=[ACTION]) def execute(self, context: context.RequestContext, reservations, volume_properties, optional_args: dict) -> dict: QUOTAS.commit(context, reservations) # updating is_quota_committed attribute of optional_args dictionary optional_args['is_quota_committed'] = True return {'volume_properties': volume_properties} def revert(self, context: context.RequestContext, result: Union[dict, ft.Failure], **kwargs) -> None: # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return volume = result['volume_properties'] try: reserve_opts = {'volumes': -1, 'gigabytes': -volume['size']} QUOTAS.add_volume_type_opts(context, reserve_opts, volume['volume_type_id']) reservations = QUOTAS.reserve(context, project_id=context.project_id, **reserve_opts) if reservations: QUOTAS.commit(context, reservations, project_id=context.project_id) except Exception: LOG.exception("Failed to update quota for deleting " "volume: %s", volume['id']) class VolumeCastTask(flow_utils.CinderTask): """Performs a volume create cast to the scheduler or to the volume manager. This will signal a transition of the api workflow to another child and/or related workflow on another component. Reversion strategy: rollback source volume status and error out newly created volume. """ def __init__(self, scheduler_rpcapi, volume_rpcapi, db) -> None: requires = ['image_id', 'scheduler_hints', 'snapshot_id', 'source_volid', 'volume_id', 'volume', 'volume_type', 'volume_properties', 'consistencygroup_id', 'cgsnapshot_id', 'group_id', 'backup_id', 'availability_zones'] super(VolumeCastTask, self).__init__(addons=[ACTION], requires=requires) self.volume_rpcapi = volume_rpcapi self.scheduler_rpcapi = scheduler_rpcapi self.db = db def _cast_create_volume(self, context: context.RequestContext, request_spec: dict[str, Any], filter_properties: dict) -> None: source_volid = request_spec['source_volid'] volume = request_spec['volume'] snapshot_id = request_spec['snapshot_id'] image_id = request_spec['image_id'] cgroup_id = request_spec['consistencygroup_id'] group_id = request_spec['group_id'] backup_id = request_spec['backup_id'] if cgroup_id: # If cgroup_id existed, we should cast volume to the scheduler # to choose a proper pool whose backend is same as CG's backend. cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id) request_spec['resource_backend'] = volume_utils.extract_host( cgroup.resource_backend) elif group_id: # If group_id exists, we should cast volume to the scheduler # to choose a proper pool whose backend is same as group's backend. group = objects.Group.get_by_id(context, group_id) request_spec['resource_backend'] = volume_utils.extract_host( group.resource_backend) elif snapshot_id and CONF.snapshot_same_host: # NOTE(Rongze Zhu): A simple solution for bug 1008866. # # If snapshot_id is set and CONF.snapshot_same_host is True, make # the call create volume directly to the volume host where the # snapshot resides instead of passing it through the scheduler, so # snapshot can be copied to the new volume. # NOTE(tommylikehu): In order to check the backend's capacity # before creating volume, we schedule this request to scheduler # service with the desired backend information. snapshot = objects.Snapshot.get_by_id(context, snapshot_id) request_spec['resource_backend'] = snapshot.volume.resource_backend elif source_volid: source_volume_ref = objects.Volume.get_by_id(context, source_volid) request_spec['resource_backend'] = ( source_volume_ref.resource_backend) self.scheduler_rpcapi.create_volume( context, volume, snapshot_id=snapshot_id, image_id=image_id, request_spec=request_spec, filter_properties=filter_properties, backup_id=backup_id) def execute(self, context: context.RequestContext, **kwargs) -> None: scheduler_hints = kwargs.pop('scheduler_hints', None) db_vt = kwargs.pop('volume_type') kwargs['volume_type'] = None if db_vt: kwargs['volume_type'] = objects.VolumeType() objects.VolumeType()._from_db_object(context, kwargs['volume_type'], db_vt) request_spec = objects.RequestSpec(**kwargs) filter_properties = {} if scheduler_hints: filter_properties['scheduler_hints'] = scheduler_hints self._cast_create_volume(context, request_spec, filter_properties) def revert(self, context: context.RequestContext, result: Union[dict, ft.Failure], flow_failures, volume: objects.Volume, **kwargs) -> None: if isinstance(result, ft.Failure): return # Restore the source volume status and set the volume to error status. common.restore_source_status(context, self.db, kwargs) common.error_out(volume) LOG.error("Volume %s: create failed", volume.id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info LOG.error('Unexpected build error:', exc_info=exc_info) # noqa def get_flow(db_api, image_service_api, availability_zones, create_what, scheduler_rpcapi=None, volume_rpcapi=None): """Constructs and returns the api entrypoint flow. This flow will do the following: 1. Inject keys & values for dependent tasks. 2. Extracts and validates the input keys & values. 3. Reserves the quota (reverts quota on any failures). 4. Creates the database entry. 5. Commits the quota. 6. Casts to volume manager or scheduler for further processing. """ flow_name = ACTION.replace(":", "_") + "_api" api_flow = linear_flow.Flow(flow_name) api_flow.add(ExtractVolumeRequestTask( image_service_api, availability_zones, rebind={'size': 'raw_size', 'availability_zone': 'raw_availability_zone', 'volume_type': 'raw_volume_type'})) api_flow.add(QuotaReserveTask(), EntryCreateTask(), QuotaCommitTask()) if scheduler_rpcapi and volume_rpcapi: # This will cast it out to either the scheduler or volume manager via # the rpc apis provided. api_flow.add(VolumeCastTask(scheduler_rpcapi, volume_rpcapi, db_api)) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(api_flow, store=create_what) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/flows/api/manage_existing.py0000664000175000017500000001345200000000000023266 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft from cinder import exception from cinder import flow_utils from cinder import objects from cinder.objects import fields from cinder.volume.flows.api import create_volume as create_api from cinder.volume.flows import common LOG = logging.getLogger(__name__) ACTION = 'volume:manage_existing' class EntryCreateTask(flow_utils.CinderTask): """Creates an entry for the given volume creation in the database. Reversion strategy: remove the volume_id created from the database. """ default_provides = set(['volume_properties', 'volume']) def __init__(self, db): requires = ['availability_zone', 'description', 'metadata', 'name', 'host', 'cluster_name', 'bootable', 'volume_type', 'ref'] super(EntryCreateTask, self).__init__(addons=[ACTION], requires=requires) self.db = db def execute(self, context, **kwargs): """Creates a database entry for the given inputs and returns details. Accesses the database and creates a new entry for the to be created volume using the given volume properties which are extracted from the input kwargs. """ volume_type = kwargs.pop('volume_type') volume_type_id = volume_type['id'] if volume_type else None multiattach = False if volume_type and volume_type.get('extra_specs'): multiattach = volume_type['extra_specs'].get( 'multiattach', '') == ' True' volume_properties = { 'size': 0, 'user_id': context.user_id, 'project_id': context.project_id, 'status': 'managing', 'attach_status': fields.VolumeAttachStatus.DETACHED, # Rename these to the internal name. 'display_description': kwargs.pop('description'), 'display_name': kwargs.pop('name'), 'host': kwargs.pop('host'), 'cluster_name': kwargs.pop('cluster_name'), 'availability_zone': kwargs.pop('availability_zone'), 'volume_type_id': volume_type_id, 'metadata': kwargs.pop('metadata') or {}, 'bootable': kwargs.pop('bootable'), 'multiattach': multiattach, } volume = objects.Volume(context=context, **volume_properties) volume.create() return { 'volume_properties': volume_properties, 'volume': volume, } def revert(self, context, result, optional_args=None, **kwargs): # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return vol_id = result['volume_id'] try: self.db.volume_destroy(context.elevated(), vol_id) except exception.CinderException: LOG.exception("Failed destroying volume entry: %s.", vol_id) class ManageCastTask(flow_utils.CinderTask): """Performs a volume manage cast to the scheduler and the volume manager. This which will signal a transition of the api workflow to another child and/or related workflow. """ def __init__(self, scheduler_rpcapi, db): requires = ['volume', 'volume_properties', 'volume_type', 'ref'] super(ManageCastTask, self).__init__(addons=[ACTION], requires=requires) self.scheduler_rpcapi = scheduler_rpcapi self.db = db def execute(self, context, volume, **kwargs): request_spec = kwargs.copy() request_spec['volume_id'] = volume.id # Call the scheduler to ensure that the host exists and that it can # accept the volume self.scheduler_rpcapi.manage_existing(context, volume, request_spec=request_spec) def revert(self, context, result, flow_failures, volume, **kwargs): # Restore the source volume status and set the volume to error status. common.error_out(volume, status='error_managing') LOG.error("Volume %s: manage failed.", volume.id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info LOG.error('Unexpected build error:', exc_info=exc_info) # noqa def get_flow(scheduler_rpcapi, db_api, create_what): """Constructs and returns the api entrypoint flow. This flow will do the following: 1. Inject keys & values for dependent tasks. 2. Extracts and validates the input keys & values. 3. Creates the database entry. 4. Casts to volume manager and scheduler for further processing. """ flow_name = ACTION.replace(":", "_") + "_api" api_flow = linear_flow.Flow(flow_name) # This will cast it out to either the scheduler or volume manager via # the rpc apis provided. api_flow.add(create_api.QuotaReserveTask(), EntryCreateTask(db_api), create_api.QuotaCommitTask(), ManageCastTask(scheduler_rpcapi, db_api)) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(api_flow, store=create_what) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/flows/common.py0000664000175000017500000000730500000000000020643 0ustar00zuulzuul00000000000000# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved. # Copyright (c) 2013 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from typing import Callable from oslo_log import log as logging from cinder import exception LOG = logging.getLogger(__name__) # When a volume errors out we have the ability to save a piece of the exception # that caused said failure, but we don't want to save the whole message since # that could be very large, just save up to this number of characters. REASON_LENGTH = 128 def make_pretty_name(method: Callable) -> str: """Makes a pretty name for a function/method.""" meth_pieces = [method.__name__] # If its an instance method attempt to tack on the class name if hasattr(method, '__self__') and method.__self__ is not None: try: meth_pieces.insert(0, method.__self__.__class__.__name__) except AttributeError: pass return ".".join(meth_pieces) def restore_source_status(context, db, volume_spec): # NOTE(harlowja): Only if the type of the volume that was being created is # the source volume type should we try to reset the source volume status # back to its original value. if not volume_spec or volume_spec.get('type') != 'source_vol': return source_volid = volume_spec['source_volid'] source_status = volume_spec['source_volstatus'] try: LOG.debug('Restoring source %(source_volid)s status to %(status)s', {'status': source_status, 'source_volid': source_volid}) db.volume_update(context, source_volid, {'status': source_status}) except exception.CinderException: # NOTE(harlowja): Don't let this cause further exceptions since this is # a non-critical failure. LOG.exception("Failed setting source " "volume %(source_volid)s back to" " its initial %(source_status)s status", {'source_status': source_status, 'source_volid': source_volid}) def _clean_reason(reason): if reason is None: return 'Unknown reason' reason = str(reason) if len(reason) <= REASON_LENGTH: return reason else: return reason[0:REASON_LENGTH] + '...' def error_out(resource, reason=None, status='error'): """Sets status to error for any persistent OVO.""" reason = _clean_reason(reason) try: LOG.debug('Setting %(object_type)s %(object_id)s to error due to: ' '%(reason)s', {'object_type': resource.obj_name(), 'object_id': resource.id, 'reason': reason}) resource.status = status resource.save() except Exception: # Don't let this cause further exceptions. LOG.exception("Failed setting %(object_type)s %(object_id)s to " " %(status)s status.", {'object_type': resource.obj_name(), 'object_id': resource.id, 'status': status}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4111214 cinder-27.0.0/cinder/volume/flows/manager/0000775000175000017500000000000000000000000020406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/flows/manager/__init__.py0000664000175000017500000000000000000000000022505 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/cinder/volume/flows/manager/create_volume.py0000664000175000017500000017260700000000000023627 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import binascii import traceback import typing from typing import Any, Optional from castellan import key_manager import os_brick.initiator.connectors from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import fileutils from oslo_utils import netutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import uuidutils import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft from cinder import backup as backup_api from cinder.backup import rpcapi as backup_rpcapi from cinder import context as cinder_context from cinder import coordination from cinder import exception from cinder import flow_utils from cinder.i18n import _ from cinder.image import glance from cinder.image import image_utils from cinder.message import api as message_api from cinder.message import message_field from cinder import objects from cinder.objects import fields from cinder import utils from cinder.volume.flows import common from cinder.volume import volume_utils LOG = logging.getLogger(__name__) ACTION = 'volume:create' CONF = cfg.CONF # These attributes we will attempt to save for the volume if they exist # in the source image metadata. IMAGE_ATTRIBUTES = ( 'checksum', 'container_format', 'disk_format', 'min_disk', 'min_ram', 'size', ) REKEY_SUPPORTED_CONNECTORS = ( os_brick.initiator.connectors.iscsi.ISCSIConnector, os_brick.initiator.connectors.fibre_channel.FibreChannelConnector, ) class OnFailureRescheduleTask(flow_utils.CinderTask): """Triggers a rescheduling request to be sent when reverting occurs. If rescheduling doesn't occur this task errors out the volume. Reversion strategy: Triggers the rescheduling mechanism whereby a cast gets sent to the scheduler rpc api to allow for an attempt X of Y for scheduling this volume elsewhere. """ def __init__(self, reschedule_context, db, manager, scheduler_rpcapi, do_reschedule): requires = ['filter_properties', 'request_spec', 'volume', 'context'] super(OnFailureRescheduleTask, self).__init__(addons=[ACTION], requires=requires) self.do_reschedule = do_reschedule self.scheduler_rpcapi = scheduler_rpcapi self.db = db self.manager = manager self.reschedule_context = reschedule_context # These exception types will trigger the volume to be set into error # status rather than being rescheduled. self.no_reschedule_types = [ # Image copying happens after volume creation so rescheduling due # to copy failure will mean the same volume will be created at # another place when it still exists locally. exception.ImageCopyFailure, # Metadata updates happen after the volume has been created so if # they fail, rescheduling will likely attempt to create the volume # on another machine when it still exists locally. exception.MetadataCopyFailure, exception.MetadataUpdateFailure, # The volume/snapshot has been removed from the database, that # can not be fixed by rescheduling. exception.VolumeNotFound, exception.SnapshotNotFound, exception.VolumeTypeNotFound, exception.ImageConversionNotAllowed, exception.ImageUnacceptable, exception.ImageTooBig, exception.InvalidSignatureImage, exception.ImageSignatureVerificationException ] def execute(self, **kwargs): pass def _pre_reschedule(self, volume): """Actions that happen before the rescheduling attempt occur here.""" try: # Update volume's timestamp and host. # # NOTE(harlowja): this is awkward to be done here, shouldn't # this happen at the scheduler itself and not before it gets # sent to the scheduler? (since what happens if it never gets # there??). It's almost like we need a status of 'on-the-way-to # scheduler' in the future. # We don't need to update the volume's status to creating, since # we haven't changed it to error. update = { 'scheduled_at': timeutils.utcnow(), 'host': None, } LOG.debug("Updating volume %(volume_id)s with %(update)s.", {'update': update, 'volume_id': volume.id}) volume.update(update) volume.save() except exception.CinderException: # Don't let updating the state cause the rescheduling to fail. LOG.exception("Volume %s: update volume state failed.", volume.id) def _reschedule(self, context, cause, request_spec, filter_properties, volume) -> None: """Actions that happen during the rescheduling attempt occur here.""" create_volume = self.scheduler_rpcapi.create_volume if not filter_properties: filter_properties = {} if 'retry' not in filter_properties: filter_properties['retry'] = {} retry_info = filter_properties['retry'] num_attempts = retry_info.get('num_attempts', 0) request_spec['volume_id'] = volume.id LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s " "attempt %(num)d due to %(reason)s", {'volume_id': volume.id, 'method': common.make_pretty_name(create_volume), 'num': num_attempts, 'reason': cause.exception_str}) if all(cause.exc_info): # Stringify to avoid circular ref problem in json serialization retry_info['exc'] = traceback.format_exception(*cause.exc_info) create_volume(context, volume, request_spec=request_spec, filter_properties=filter_properties) def _post_reschedule(self, volume): """Actions that happen after the rescheduling attempt occur here.""" LOG.debug("Volume %s: re-scheduled", volume.id) # NOTE(dulek): Here we should be sure that rescheduling occurred and # host field will be erased. Just in case volume was already created at # the backend, we attempt to delete it. try: self.manager.driver_delete_volume(volume) except Exception: # Most likely the volume weren't created at the backend. We can # safely ignore this. pass def revert(self, context, result, flow_failures, volume, **kwargs): # NOTE(dulek): Revert is occurring and manager need to know if # rescheduling happened. We're returning boolean flag that will # indicate that. It which will be available in flow engine store # through get_revert_result method. # If do not want to be rescheduled, just set the volume's status to # error and return. if not self.do_reschedule: common.error_out(volume) LOG.error("Volume %s: create failed", volume.id) return False # Check if we have a cause which can tell us not to reschedule and # set the volume's status to error. for failure in flow_failures.values(): if failure.check(*self.no_reschedule_types): common.error_out(volume) LOG.error("Volume %s: create failed", volume.id) return False # Use a different context when rescheduling. if self.reschedule_context: cause = list(flow_failures.values())[0] context = self.reschedule_context try: self._pre_reschedule(volume) self._reschedule(context, cause, volume=volume, **kwargs) self._post_reschedule(volume) return True except exception.CinderException: LOG.exception("Volume %s: rescheduling failed", volume.id) return False class ExtractVolumeRefTask(flow_utils.CinderTask): """Extracts volume reference for given volume id.""" default_provides = 'refreshed' def __init__(self, db, host, set_error=True): super(ExtractVolumeRefTask, self).__init__(addons=[ACTION]) self.db = db self.host = host self.set_error = set_error def execute(self, context, volume): # NOTE(harlowja): this will fetch the volume from the database, if # the volume has been deleted before we got here then this should fail. # # In the future we might want to have a lock on the volume_id so that # the volume can not be deleted while its still being created? volume.refresh() return volume def revert(self, context, volume, result, **kwargs): if isinstance(result, ft.Failure) or not self.set_error: return reason = _('Volume create failed while extracting volume ref.') common.error_out(volume, reason) LOG.error("Volume %s: create failed", volume.id) class ExtractVolumeSpecTask(flow_utils.CinderTask): """Extracts a spec of a volume to be created into a common structure. This task extracts and organizes the input requirements into a common and easier to analyze structure for later tasks to use. It will also attach the underlying database volume reference which can be used by other tasks to reference for further details about the volume to be. Reversion strategy: N/A """ default_provides = 'volume_spec' def __init__(self, db): requires = ['volume', 'request_spec'] super(ExtractVolumeSpecTask, self).__init__(addons=[ACTION], requires=requires) self.db = db def execute(self, context, volume, request_spec): get_remote_image_service = glance.get_remote_image_service volume_name = volume.name volume_size = utils.as_int(volume.size, quiet=False) # Create a dictionary that will represent the volume to be so that # later tasks can easily switch between the different types and create # the volume according to the volume types specifications (which are # represented in this dictionary). specs = { 'status': volume.status, 'type': 'raw', # This will have the type of the volume to be # created, which should be one of [raw, snap, # source_vol, image, backup] 'volume_id': volume.id, 'volume_name': volume_name, 'volume_size': volume_size, } if volume.snapshot_id: # We are making a snapshot based volume instead of a raw volume. specs.update({ 'type': 'snap', 'snapshot_id': volume.snapshot_id, }) elif volume.source_volid: # We are making a source based volume instead of a raw volume. # # NOTE(harlowja): This will likely fail if the source volume # disappeared by the time this call occurred. source_volid = volume.source_volid source_volume_ref = objects.Volume.get_by_id(context, source_volid) specs.update({ 'source_volid': source_volid, # This is captured incase we have to revert and we want to set # back the source volume status to its original status. This # may or may not be sketchy to do?? 'source_volstatus': source_volume_ref.status, 'type': 'source_vol', }) elif request_spec.get('image_id'): # We are making an image based volume instead of a raw volume. image_href = request_spec['image_id'] image_service, image_id = get_remote_image_service(context, image_href) specs.update({ 'type': 'image', 'image_id': image_id, 'image_location': image_service.get_location(context, image_id), 'image_meta': image_service.show(context, image_id), # Instead of refetching the image service later just save it. # # NOTE(harlowja): if we have to later recover this tasks output # on another 'node' that this object won't be able to be # serialized, so we will have to recreate this object on # demand in the future. 'image_service': image_service, }) elif request_spec.get('backup_id'): # We are making a backup based volume instead of a raw volume. specs.update({ 'type': 'backup', 'backup_id': request_spec['backup_id'], # NOTE(luqitao): if the driver does not implement the method # `create_volume_from_backup`, cinder-backup will update the # volume's status, otherwise we need update it in the method # `CreateVolumeOnFinishTask`. 'need_update_volume': True, }) return specs def revert(self, context, result, **kwargs): if isinstance(result, ft.Failure): return volume_spec = result.get('volume_spec') # Restore the source volume status and set the volume to error status. common.restore_source_status(context, self.db, volume_spec) class NotifyVolumeActionTask(flow_utils.CinderTask): """Performs a notification about the given volume when called. Reversion strategy: N/A """ def __init__(self, db, event_suffix): super(NotifyVolumeActionTask, self).__init__(addons=[ACTION, event_suffix]) self.db = db self.event_suffix = event_suffix def execute(self, context, volume): if not self.event_suffix: return try: volume_utils.notify_about_volume_usage(context, volume, self.event_suffix, host=volume.host) except exception.CinderException: # If notification sending of volume database entry reading fails # then we shouldn't error out the whole workflow since this is # not always information that must be sent for volumes to operate LOG.exception("Failed notifying about the volume" " action %(event)s for volume %(volume_id)s", {'event': self.event_suffix, 'volume_id': volume.id}) class CreateVolumeFromSpecTask(flow_utils.CinderTask): """Creates a volume from a provided specification. Reversion strategy: N/A """ default_provides = 'volume_spec' def __init__(self, manager, db, driver, image_volume_cache=None) -> None: super(CreateVolumeFromSpecTask, self).__init__(addons=[ACTION]) self.manager = manager self.db = db self.driver = driver self.image_volume_cache = image_volume_cache self._message = None @property def message(self): if self._message is None: self._message = message_api.API() return self._message def _handle_bootable_volume_glance_meta(self, context, volume, **kwargs): """Enable bootable flag and properly handle glance metadata. Caller should provide one and only one of snapshot_id,source_volid and image_id. If an image_id specified, an image_meta should also be provided, otherwise will be treated as an empty dictionary. """ log_template = _("Copying metadata from %(src_type)s %(src_id)s to " "%(vol_id)s.") exception_template = _("Failed updating volume %(vol_id)s metadata" " using the provided %(src_type)s" " %(src_id)s metadata") src_type = None src_id = None volume_utils.enable_bootable_flag(volume) try: if kwargs.get('snapshot_id'): src_type = 'snapshot' src_id = kwargs['snapshot_id'] snapshot_id = src_id LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, 'vol_id': volume.id}) self.db.volume_glance_metadata_copy_to_volume( context, volume.id, snapshot_id) elif kwargs.get('source_volid'): src_type = 'source volume' src_id = kwargs['source_volid'] source_volid = src_id LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, 'vol_id': volume.id}) self.db.volume_glance_metadata_copy_from_volume_to_volume( context, source_volid, volume.id) elif kwargs.get('image_id'): src_type = 'image' src_id = kwargs['image_id'] image_id = src_id image_meta = kwargs.get('image_meta', {}) LOG.debug(log_template, {'src_type': src_type, 'src_id': src_id, 'vol_id': volume.id}) self._capture_volume_image_metadata(context, volume.id, image_id, image_meta) except exception.GlanceMetadataNotFound: # If volume is not created from image, No glance metadata # would be available for that volume in # volume glance metadata table pass except exception.CinderException as ex: LOG.exception(exception_template, {'src_type': src_type, 'src_id': src_id, 'vol_id': volume.id}) raise exception.MetadataCopyFailure(reason=ex) def _create_from_snapshot(self, context: cinder_context.RequestContext, volume: objects.Volume, snapshot_id: str, **kwargs: Any) -> dict: snapshot = objects.Snapshot.get_by_id(context, snapshot_id) try: model_update = self.driver.create_volume_from_snapshot(volume, snapshot) finally: self._cleanup_cg_in_volume(volume) # NOTE(harlowja): Subtasks would be useful here since after this # point the volume has already been created and further failures # will not destroy the volume (although they could in the future). make_bootable = False try: originating_vref = objects.Volume.get_by_id(context, snapshot.volume_id) make_bootable = originating_vref.bootable except exception.CinderException as ex: LOG.exception("Failed fetching snapshot %(snapshot_id)s bootable" " flag using the provided glance snapshot " "%(snapshot_ref_id)s volume reference", {'snapshot_id': snapshot_id, 'snapshot_ref_id': snapshot.volume_id}) raise exception.MetadataUpdateFailure(reason=ex) if make_bootable: self._handle_bootable_volume_glance_meta(context, volume, snapshot_id=snapshot_id) return model_update @staticmethod def _setup_encryption_keys(context, volume, encryption): """Return encryption keys in passphrase form for a clone operation. :param context: context :param volume: volume being cloned :param encryption: encryption info dict :returns: tuple (source_pass, new_pass, new_key_id) """ keymgr = key_manager.API(CONF) key = keymgr.get(context, encryption['encryption_key_id']) source_pass = binascii.hexlify(key.get_encoded()).decode('utf-8') new_key_id = volume_utils.create_encryption_key(context, keymgr, volume.volume_type_id) new_key = keymgr.get(context, new_key_id) new_pass = binascii.hexlify(new_key.get_encoded()).decode('utf-8') return (source_pass, new_pass, new_key_id) def _rekey_volume(self, context, volume): """Change encryption key on volume. :returns: model update dict """ LOG.debug('rekey volume %s', volume.name) # Rekeying writes very little data (KB), so doing a single pathed # connection is more efficient, but it is less robust, because the # driver could be returning a single path, and it happens to be down # then the attach would fail where the multipathed connection would # have succeeded. properties = volume_utils.brick_get_connector_properties( self.driver.configuration.use_multipath_for_image_xfer, self.driver.configuration.enforce_multipath_for_image_xfer) LOG.debug("properties: %s", properties) attach_info = None model_update = {} new_key_id = None original_key_id = volume.encryption_key_id key_mgr = key_manager.API(CONF) try: attach_info, volume = self.driver._attach_volume(context, volume, properties) if not any(c for c in REKEY_SUPPORTED_CONNECTORS if isinstance(attach_info['connector'], c)): LOG.debug('skipping rekey, connector: %s', attach_info['connector']) raise exception.RekeyNotSupported() LOG.debug("attempting attach for rekey, attach_info: %s", attach_info) if (isinstance(attach_info['device']['path'], str)): image_info = image_utils.qemu_img_info( attach_info['device']['path']) else: # Should not happen, just a safety check LOG.error('%s appears to not be encrypted', attach_info['device']['path']) raise exception.RekeyNotSupported() encryption = volume_utils.check_encryption_provider( volume, context) (source_pass, new_pass, new_key_id) = self._setup_encryption_keys( context, volume, encryption) # see Bug #1942682 and Change I949f07582a708 for why we do this if strutils.bool_from_string(image_info.encrypted): key_str = source_pass + "\n" + new_pass + "\n" del source_pass (out, err) = utils.execute( 'cryptsetup', 'luksChangeKey', attach_info['device']['path'], '--force-password', run_as_root=True, process_input=key_str, log_errors=processutils.LOG_ALL_ERRORS) del key_str model_update = {'encryption_key_id': new_key_id} else: # volume has not been written to yet, format with luks del source_pass if image_info.file_format != 'raw': # Something has gone wrong if the image is not encrypted # and is detected as another format. raise exception.Invalid() encryption_provider = encryption['provider'] if encryption_provider == 'luks': # Force ambiguous "luks" provider to luks1 for # compatibility with new versions of cryptsetup. encryption_provider = 'luks1' (out, err) = utils.execute( 'cryptsetup', '--batch-mode', 'luksFormat', '--force-password', '--type', encryption_provider, '--cipher', encryption['cipher'], '--key-size', str(encryption['key_size']), '--key-file=-', attach_info['device']['path'], run_as_root=True, process_input=new_pass) del new_pass model_update = {'encryption_key_id': new_key_id} # delete the original key that was cloned for this volume # earlier volume_utils.delete_encryption_key(context, key_mgr, original_key_id) except exception.RekeyNotSupported: pass except Exception: with excutils.save_and_reraise_exception(): if new_key_id is not None: # Remove newly cloned key since it will not be used. volume_utils.delete_encryption_key( context, key_mgr, new_key_id) finally: if attach_info: self.driver._detach_volume(context, attach_info, volume, properties, force=True) return model_update def _create_from_source_volume(self, context: cinder_context.RequestContext, volume: objects.Volume, source_volid: str, **kwargs): # NOTE(harlowja): if the source volume has disappeared this will be our # detection of that since this database call should fail. # # NOTE(harlowja): likely this is not the best place for this to happen # and we should have proper locks on the source volume while actions # that use the source volume are underway. srcvol_ref = objects.Volume.get_by_id(context, source_volid) try: model_update = self.driver.create_cloned_volume(volume, srcvol_ref) if model_update is None: model_update = {} if volume.encryption_key_id is not None: volume.update(model_update) rekey_model_update = self._rekey_volume(context, volume) model_update.update(rekey_model_update) finally: self._cleanup_cg_in_volume(volume) # NOTE(harlowja): Subtasks would be useful here since after this # point the volume has already been created and further failures # will not destroy the volume (although they could in the future). if srcvol_ref.bootable: self._handle_bootable_volume_glance_meta( context, volume, source_volid=srcvol_ref.id) return model_update def _capture_volume_image_metadata(self, context: cinder_context.RequestContext, volume_id: str, image_id: str, image_meta: dict) -> None: volume_metadata = volume_utils.get_volume_image_metadata( image_id, image_meta) LOG.debug("Creating volume glance metadata for volume %(volume_id)s" " backed by image %(image_id)s with: %(vol_metadata)s.", {'volume_id': volume_id, 'image_id': image_id, 'vol_metadata': volume_metadata}) self.db.volume_glance_metadata_bulk_create(context, volume_id, volume_metadata) @staticmethod def _extract_cinder_ids(urls: list[str]) -> list[str]: """Process a list of location URIs from glance :param urls: list of glance location URIs :return: list of IDs extracted from the 'cinder://' URIs """ ids = [] for url in urls: # The url can also be None and a TypeError is raised # TypeError: a bytes-like object is required, not 'str' if not url: continue parts = netutils.urlsplit(url) if parts.scheme == 'cinder': if parts.path: vol_id = parts.path.split('/')[-1] else: vol_id = parts.netloc if uuidutils.is_uuid_like(vol_id): ids.append(vol_id) else: LOG.debug("Ignoring malformed image location uri " "'%(url)s'", {'url': url}) return ids def _clone_image_volume(self, context: cinder_context.RequestContext, volume: objects.Volume, image_location, image_meta: dict[str, Any]) -> tuple[None, bool]: """Create a volume efficiently from an existing image. Returns a dict of volume properties eg. provider_location, boolean indicating whether cloning occurred """ # NOTE (lixiaoy1): currently can't create volume from source vol with # different encryptions, so just return. if not image_location or volume.encryption_key_id: return None, False if (image_meta.get('container_format') != 'bare' or image_meta.get('disk_format') != 'raw'): LOG.info("Requested image %(id)s is not in raw format.", {'id': image_meta.get('id')}) return None, False image_volume = None direct_url, locations = image_location urls = list(set([direct_url] + [loc.get('url') for loc in locations or []])) image_volume_ids = self._extract_cinder_ids(urls) filters = {'id': image_volume_ids} if volume.cluster_name: filters['cluster_name'] = volume.cluster_name else: filters['host'] = volume.host if self.driver.capabilities.get('clone_across_pools'): image_volumes = self.db.volume_get_all( context, filters=filters) else: image_volumes = self.db.volume_get_all_by_host( context, volume['host'], filters=filters) for image_volume in image_volumes: # For the case image volume is stored in the service tenant, # image_owner volume metadata should also be checked. image_owner = None volume_metadata = image_volume.get('volume_metadata') or {} for m in volume_metadata: if m['key'] == 'image_owner': image_owner = m['value'] if (image_meta['owner'] != volume['project_id'] and image_meta['owner'] != image_owner): LOG.info("Skipping image volume %(id)s because " "it is not accessible by current Tenant.", {'id': image_volume.id}) continue LOG.info("Will clone a volume from the image volume " "%(id)s.", {'id': image_volume.id}) break else: LOG.debug("No accessible image volume for image %(id)s found.", {'id': image_meta['id']}) return None, False try: # Drivers like NFS expect OVO and not SQLAlchemy object image_volume = objects.Volume.get_by_id(context, image_volume.id) ret = self.driver.create_cloned_volume(volume, image_volume) self._cleanup_cg_in_volume(volume) return ret, True except (NotImplementedError, exception.CinderException): LOG.exception('Failed to clone image volume %(id)s.', {'id': image_volume['id']}) return None, False def _create_from_image_download(self, context: cinder_context.RequestContext, volume: objects.Volume, image_location, image_meta: dict[str, Any], image_service) -> dict: # TODO(harlowja): what needs to be rolled back in the clone if this # volume create fails?? Likely this should be a subflow or broken # out task in the future. That will bring up the question of how # do we make said subflow/task which is only triggered in the # clone image 'path' resumable and revertable in the correct # manner. model_update = self.driver.create_volume(volume) or {} self._cleanup_cg_in_volume(volume) model_update['status'] = 'downloading' try: volume.update(model_update) volume.save() except exception.CinderException: LOG.exception("Failed updating volume %(volume_id)s with " "%(updates)s", {'volume_id': volume.id, 'updates': model_update}) try: volume_utils.copy_image_to_volume(self.driver, context, volume, image_meta, image_location, image_service) except exception.ImageTooBig: with excutils.save_and_reraise_exception(): LOG.exception("Failed to copy image to volume " "%(volume_id)s due to insufficient space", {'volume_id': volume.id}) return model_update def _create_from_image_cache( self, context: cinder_context.RequestContext, internal_context: cinder_context.RequestContext, volume: objects.Volume, image_id: str, image_meta: dict[str, Any]) -> tuple[None, bool]: """Attempt to create the volume using the image cache. Best case this will simply clone the existing volume in the cache. Worst case the image is out of date and will be evicted. In that case a clone will not be created and the image must be downloaded again. """ assert self.image_volume_cache is not None LOG.debug('Attempting to retrieve cache entry for image = ' '%(image_id)s on host %(host)s.', {'image_id': image_id, 'host': volume.host}) # Currently can't create volume from source vol with different # encryptions, so just return if volume.encryption_key_id: return None, False try: cache_entry = self.image_volume_cache.get_entry(internal_context, volume, image_id, image_meta) if cache_entry: LOG.debug('Creating from source image-volume %(volume_id)s', {'volume_id': cache_entry['volume_id']}) model_update = self._create_from_source_volume( context, volume, cache_entry['volume_id'] ) return model_update, True except exception.SnapshotLimitReached: # If this exception occurred when cloning the image-volume, # it is because the image-volume reached its snapshot limit. # Delete current cache entry and create a "fresh" entry. with excutils.save_and_reraise_exception(): msg = ('Deleting image-volume cache entry that reached its' ' cloning snapshot limit') self.image_volume_cache.delete_cached_volume(context, cache_entry, msg) except NotImplementedError: LOG.warning('Backend does not support creating image-volume ' 'clone. Image will be downloaded from Glance.') return None, False @coordination.synchronized('{image_id}') def _prepare_image_cache_entry(self, context: cinder_context.RequestContext, volume: objects.Volume, image_location: str, image_id: str, image_meta: dict[str, Any], image_service) -> tuple[Optional[dict], bool]: assert self.image_volume_cache is not None internal_context = cinder_context.get_internal_tenant_context() if not internal_context: return None, False cache_entry = self.image_volume_cache.get_entry(internal_context, volume, image_id, image_meta) # If the entry is in the cache then return ASAP in order to minimize # the scope of the lock. If it isn't in the cache then do the work # that adds it. The work is done inside the locked region to ensure # only one cache entry is created. if cache_entry: LOG.debug('Found cache entry for image = ' '%(image_id)s on host %(host)s.', {'image_id': image_id, 'host': volume.host}) return None, False else: LOG.debug('Preparing cache entry for image = ' '%(image_id)s on host %(host)s.', {'image_id': image_id, 'host': volume.host}) model_update = self._create_from_image_cache_or_download( context, volume, image_location, image_id, image_meta, image_service, update_cache=True) return model_update, True def _create_from_image_cache_or_download( self, context: cinder_context.RequestContext, volume: objects.Volume, image_location, image_id: str, image_meta: dict[str, Any], image_service, update_cache: bool = False) -> Optional[dict]: # NOTE(e0ne): check for free space in image_conversion_dir before # image downloading. # NOTE(mnaser): This check *only* happens if the backend is not able # to clone volumes and we have to resort to downloading # the image from Glance and uploading it. if CONF.image_conversion_dir: fileutils.ensure_tree(CONF.image_conversion_dir) try: image_utils.check_available_space( CONF.image_conversion_dir, image_meta['size'], image_id) except exception.ImageTooBig as err: with excutils.save_and_reraise_exception(): self.message.create( context, message_field.Action.COPY_IMAGE_TO_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE, exception=err) # Try and use the image cache. should_create_cache_entry = False cloned = False model_update = None if self.image_volume_cache is not None: internal_context = cinder_context.get_internal_tenant_context() if not internal_context: LOG.info('Unable to get Cinder internal context, will ' 'not use image-volume cache.') else: try: model_update, cloned = self._create_from_image_cache( context, internal_context, volume, image_id, image_meta ) except exception.SnapshotLimitReached: # This exception will be handled by the caller's # (_create_from_image) retry decorator with excutils.save_and_reraise_exception(): LOG.debug("Snapshot limit reached. Creating new " "image-volume.") except exception.CinderException as e: LOG.warning('Failed to create volume from image-volume ' 'cache, image will be downloaded from Glance. ' 'Error: %(exception)s', {'exception': e}) # Don't cache unless directed. if not cloned and update_cache: should_create_cache_entry = True # cleanup consistencygroup field in the volume, # because when creating cache entry, it will need # to update volume object. self._cleanup_cg_in_volume(volume) # Fall back to default behavior of creating volume, # download the image data and copy it into the volume. original_size = volume.size backend_name = volume_utils.extract_host(volume.service_topic_queue) try: if not cloned: try: with image_utils.TemporaryImages.fetch( image_service, context, image_id, backend_name) as tmp_image: if CONF.verify_glance_signatures != 'disabled': # Verify image signature via reading content from # temp image, and store the verification flag if # required. verified = \ image_utils.verify_glance_image_signature( context, image_service, image_id, tmp_image) self.db.volume_glance_metadata_bulk_create( context, volume.id, {'signature_verified': verified}) # Try to create the volume as the minimal size, # then we can extend once the image has been # downloaded. data = image_utils.qemu_img_info(tmp_image) virtual_size = image_utils.check_virtual_size( data.virtual_size, volume.size, image_id) if should_create_cache_entry: if virtual_size and virtual_size != original_size: volume.size = virtual_size volume.save() model_update = self._create_from_image_download( context, volume, image_location, image_meta, image_service ) except exception.ImageTooBig as e: with excutils.save_and_reraise_exception(): self.message.create( context, message_field.Action.COPY_IMAGE_TO_VOLUME, resource_uuid=volume.id, detail= message_field.Detail.NOT_ENOUGH_SPACE_FOR_IMAGE, exception=e) except exception.ImageSignatureVerificationException as err: with excutils.save_and_reraise_exception(): self.message.create( context, message_field.Action.COPY_IMAGE_TO_VOLUME, resource_uuid=volume.id, detail= message_field.Detail.SIGNATURE_VERIFICATION_FAILED, exception=err) except exception.ImageConversionNotAllowed: with excutils.save_and_reraise_exception(): self.message.create( context, message_field.Action.COPY_IMAGE_TO_VOLUME, resource_uuid=volume.id, detail= message_field.Detail.IMAGE_FORMAT_UNACCEPTABLE) if should_create_cache_entry: # Update the newly created volume db entry before we clone it # for the image-volume creation. if model_update: volume.update(model_update) volume.save() self.manager._create_image_cache_volume_entry(internal_context, volume, image_id, image_meta) finally: # If we created the volume as the minimal size, extend it back to # what was originally requested. If an exception has occurred or # extending it back failed, we still need to put this back before # letting it be raised further up the stack. if volume.size != original_size: try: self.driver.extend_volume(volume, original_size) finally: volume.size = original_size volume.save() return model_update @utils.retry(exception.SnapshotLimitReached, retries=1) def _create_from_image(self, context: cinder_context.RequestContext, volume: objects.Volume, image_location, image_id: str, image_meta: dict[str, Any], image_service, **kwargs: Any) -> Optional[dict]: LOG.debug("Cloning %(volume_id)s from image %(image_id)s " " at location %(image_location)s.", {'volume_id': volume.id, 'image_location': image_location, 'image_id': image_id}) virtual_size = image_meta.get('virtual_size') if virtual_size: virtual_size = image_utils.check_virtual_size(virtual_size, volume.size, image_id) # Create the volume from an image. # # First see if the driver can clone the image directly. # # NOTE (singn): two params need to be returned # dict containing provider_location for cloned volume # and clone status. # NOTE (lixiaoy1): Currently all images are raw data, we can't # use clone_image to copy data if new volume is encrypted. volume_is_encrypted = volume.encryption_key_id is not None cloned = False model_update = None if not volume_is_encrypted: model_update, cloned = self.driver.clone_image(context, volume, image_location, image_meta, image_service) # Try and clone the image if we have it set as a glance location. if not cloned and 'cinder' in CONF.allowed_direct_url_schemes: model_update, cloned = self._clone_image_volume(context, volume, image_location, image_meta) # If we're going to try using the image cache then prepare the cache # entry. Note: encrypted volume images are not cached. if not cloned and self.image_volume_cache and not volume_is_encrypted: # If _prepare_image_cache_entry() has to create the cache entry # then it will also create the volume. But if the volume image # is already in the cache then it returns (None, False), and # _create_from_image_cache_or_download() will use the cache. model_update, cloned = self._prepare_image_cache_entry( context, volume, image_location, image_id, image_meta, image_service) # Try and use the image cache, and download if not cached. if not cloned: model_update = self._create_from_image_cache_or_download( context, volume, image_location, image_id, image_meta, image_service) self._handle_bootable_volume_glance_meta(context, volume, image_id=image_id, image_meta=image_meta) typing.cast(dict, model_update) return model_update def _create_from_backup(self, context: cinder_context.RequestContext, volume: objects.Volume, backup_id: str, **kwargs) -> tuple[dict, bool]: LOG.info("Creating volume %(volume_id)s from backup %(backup_id)s.", {'volume_id': volume.id, 'backup_id': backup_id}) ret = {} backup = objects.Backup.get_by_id(context, backup_id) try: ret = self.driver.create_volume_from_backup(volume, backup) need_update_volume = True LOG.info("Created volume %(volume_id)s from backup %(backup_id)s " "successfully.", {'volume_id': volume.id, 'backup_id': backup_id}) except NotImplementedError: LOG.info("Backend does not support creating volume from " "backup %(id)s. It will directly create the raw volume " "at the backend and then schedule the request to the " "backup service to restore the volume with backup.", {'id': backup_id}) model_update = self._create_raw_volume( context, volume, **kwargs) or {} volume.update(model_update) volume.save() backupapi = backup_api.API() backup_host = backupapi.get_available_backup_service_host( backup.host, backup.availability_zone) updates = {'status': fields.BackupStatus.RESTORING, 'restore_volume_id': volume.id, 'host': backup_host} backup.update(updates) backup.save() LOG.info("Raw volume %(volume_id)s created. Calling " "restore_backup %(backup_id)s to complete restoration.", {'volume_id': volume.id, 'backup_id': backup_id}) backuprpcapi = backup_rpcapi.BackupAPI() backuprpcapi.restore_backup(context, backup.host, backup, volume.id, volume_is_new=True) need_update_volume = False return ret, need_update_volume def _create_raw_volume(self, context: cinder_context.RequestContext, volume: objects.Volume, **kwargs: Any): try: ret = self.driver.create_volume(volume) except Exception as ex: with excutils.save_and_reraise_exception(): self.message.create( context, message_field.Action.CREATE_VOLUME_FROM_BACKEND, resource_uuid=volume.id, detail=message_field.Detail.DRIVER_FAILED_CREATE, exception=ex) finally: self._cleanup_cg_in_volume(volume) return ret def execute(self, context: cinder_context.RequestContext, volume: objects.Volume, volume_spec) -> dict: volume_spec = dict(volume_spec) volume_id = volume_spec.pop('volume_id', None) if not volume_id: volume_id = volume.id # we can't do anything if the driver didn't init if not self.driver.initialized: driver_name = self.driver.__class__.__name__ LOG.error("Unable to create volume. " "Volume driver %s not initialized", driver_name) raise exception.DriverNotInitialized() # For backward compatibilty volume.populate_consistencygroup() create_type = volume_spec.pop('type', None) LOG.info("Volume %(volume_id)s: being created as %(create_type)s " "with specification: %(volume_spec)s", {'volume_spec': volume_spec, 'volume_id': volume_id, 'create_type': create_type}) model_update: dict if create_type == 'raw': model_update = self._create_raw_volume( context, volume, **volume_spec) elif create_type == 'snap': model_update = self._create_from_snapshot(context, volume, **volume_spec) elif create_type == 'source_vol': model_update = self._create_from_source_volume( context, volume, **volume_spec) elif create_type == 'image': model_update = self._create_from_image(context, volume, **volume_spec) elif create_type == 'backup': model_update, need_update_volume = self._create_from_backup( context, volume, **volume_spec) volume_spec.update({'need_update_volume': need_update_volume}) else: raise exception.VolumeTypeNotFound(volume_type_id=create_type) # Persist any model information provided on creation. try: if model_update: with volume.obj_as_admin(): volume.update(model_update) volume.save() except exception.CinderException: # If somehow the update failed we want to ensure that the # failure is logged (but not try rescheduling since the volume at # this point has been created). LOG.exception("Failed updating model of volume %(volume_id)s " "with creation provided model %(model)s", {'volume_id': volume_id, 'model': model_update}) raise return volume_spec def _cleanup_cg_in_volume(self, volume: objects.Volume) -> None: # NOTE(xyang): Cannot have both group_id and consistencygroup_id. # consistencygroup_id needs to be removed to avoid DB reference # error because there isn't an entry in the consistencygroups table. if (('group_id' in volume and volume.group_id) and ('consistencygroup_id' in volume and volume.consistencygroup_id)): volume.consistencygroup_id = None if 'consistencygroup' in volume: volume.consistencygroup = None class CreateVolumeOnFinishTask(NotifyVolumeActionTask): """On successful volume creation this will perform final volume actions. When a volume is created successfully it is expected that MQ notifications and database updates will occur to 'signal' to others that the volume is now ready for usage. This task does those notifications and updates in a reliable manner (not re-raising exceptions if said actions can not be triggered). Reversion strategy: N/A """ def __init__(self, db, event_suffix): super(CreateVolumeOnFinishTask, self).__init__(db, event_suffix) self.status_translation = { 'migration_target_creating': 'migration_target', } @typing.no_type_check def execute(self, context, volume, volume_spec): need_update_volume = volume_spec.pop('need_update_volume', True) if not need_update_volume: super(CreateVolumeOnFinishTask, self).execute(context, volume) return new_status = self.status_translation.get(volume_spec.get('status'), 'available') update = { 'status': new_status, 'launched_at': timeutils.utcnow(), } try: # TODO(harlowja): is it acceptable to only log if this fails?? # or are there other side-effects that this will cause if the # status isn't updated correctly (aka it will likely be stuck in # 'creating' if this fails)?? volume.update(update) volume.save() # Now use the parent to notify. super(CreateVolumeOnFinishTask, self).execute(context, volume) except exception.CinderException: LOG.exception("Failed updating volume %(volume_id)s with " "%(update)s", {'volume_id': volume.id, 'update': update}) # Even if the update fails, the volume is ready. LOG.info("Volume %(volume_name)s (%(volume_id)s): " "created successfully", {'volume_name': volume_spec['volume_name'], 'volume_id': volume.id}) def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume, allow_reschedule, reschedule_context, request_spec, filter_properties, image_volume_cache=None): """Constructs and returns the manager entrypoint flow. This flow will do the following: 1. Determines if rescheduling is enabled (ahead of time). 2. Inject keys & values for dependent tasks. 3. Selects 1 of 2 activated only on *failure* tasks (one to update the db status & notify or one to update the db status & notify & *reschedule*). 4. Extracts a volume specification from the provided inputs. 5. Notifies that the volume has started to be created. 6. Creates a volume from the extracted volume specification. 7. Attaches an on-success *only* task that notifies that the volume creation has ended and performs further database status updates. """ flow_name = ACTION.replace(":", "_") + "_manager" volume_flow = linear_flow.Flow(flow_name) # This injects the initial starting flow values into the workflow so that # the dependency order of the tasks provides/requires can be correctly # determined. create_what = { 'context': context, 'filter_properties': filter_properties, 'request_spec': request_spec, 'volume': volume, } volume_flow.add(ExtractVolumeRefTask(db, host, set_error=False)) retry = filter_properties.get('retry', None) # Always add OnFailureRescheduleTask and we handle the change of volume's # status when reverting the flow. Meanwhile, no need to revert process of # ExtractVolumeRefTask. do_reschedule = allow_reschedule and request_spec and retry volume_flow.add(OnFailureRescheduleTask(reschedule_context, db, manager, scheduler_rpcapi, do_reschedule)) LOG.debug("Volume reschedule parameters: %(allow)s " "retry: %(retry)s", {'allow': allow_reschedule, 'retry': retry}) volume_flow.add(ExtractVolumeSpecTask(db)) # Temporary volumes created during migration should not be notified end_notify_suffix = None if volume.use_quota: volume_flow.add(NotifyVolumeActionTask(db, 'create.start')) end_notify_suffix = 'create.end' volume_flow.add(CreateVolumeFromSpecTask(manager, db, driver, image_volume_cache), CreateVolumeOnFinishTask(db, end_notify_suffix)) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(volume_flow, store=create_what) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/flows/manager/manage_existing.py0000664000175000017500000001231000000000000024117 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils import taskflow.engines from taskflow.patterns import linear_flow from cinder import exception from cinder import flow_utils from cinder.i18n import _ from cinder.volume.flows.api import create_volume as create_api from cinder.volume.flows import common as flow_common from cinder.volume.flows.manager import create_volume as create_mgr LOG = logging.getLogger(__name__) ACTION = 'volume:manage_existing' class PrepareForQuotaReservationTask(flow_utils.CinderTask): """Gets the volume size from the driver.""" default_provides = set(['size', 'volume_type_id', 'volume_properties', 'volume_spec']) def __init__(self, db, driver): super(PrepareForQuotaReservationTask, self).__init__(addons=[ACTION]) self.db = db self.driver = driver def execute(self, context, volume, manage_existing_ref): driver_name = self.driver.__class__.__name__ if not self.driver.initialized: LOG.error("Unable to manage existing volume. " "Volume driver %s not initialized.", driver_name) flow_common.error_out(volume, _("Volume driver %s not " "initialized.") % driver_name, status='error_managing') raise exception.DriverNotInitialized() size = 0 try: size = self.driver.manage_existing_get_size(volume, manage_existing_ref) except Exception: with excutils.save_and_reraise_exception(): reason = _("Volume driver %s get exception.") % driver_name flow_common.error_out(volume, reason, status='error_managing') return {'size': size, 'volume_type_id': volume.volume_type_id, 'volume_properties': volume, 'volume_spec': {'status': volume.status, 'volume_name': volume.name, 'volume_id': volume.id}} def revert(self, context, result, flow_failures, volume, **kwargs): reason = _('Volume manage failed.') flow_common.error_out(volume, reason=reason, status='error_managing') LOG.error("Volume %s: manage failed.", volume.id) class ManageExistingTask(flow_utils.CinderTask): """Brings an existing volume under Cinder management.""" default_provides = set(['volume']) def __init__(self, db, driver): super(ManageExistingTask, self).__init__(addons=[ACTION]) self.db = db self.driver = driver def execute(self, context, volume, manage_existing_ref, size): model_update = self.driver.manage_existing(volume, manage_existing_ref) if not model_update: model_update = {} model_update.update({'size': size}) try: volume.update(model_update) volume.save() except exception.CinderException: LOG.exception("Failed updating model of volume %(volume_id)s" " with creation provided model %(model)s", {'volume_id': volume.id, 'model': model_update}) raise return {'volume': volume} def get_flow(context, db, driver, host, volume, ref): """Constructs and returns the manager entrypoint flow.""" flow_name = ACTION.replace(":", "_") + "_manager" volume_flow = linear_flow.Flow(flow_name) # This injects the initial starting flow values into the workflow so that # the dependency order of the tasks provides/requires can be correctly # determined. create_what = { 'context': context, 'volume': volume, 'manage_existing_ref': ref, 'group_snapshot': None, 'optional_args': {'is_quota_committed': False, 'update_size': True} } volume_flow.add(create_mgr.NotifyVolumeActionTask(db, "manage_existing.start"), PrepareForQuotaReservationTask(db, driver), create_api.QuotaReserveTask(), ManageExistingTask(db, driver), create_api.QuotaCommitTask(), create_mgr.CreateVolumeOnFinishTask(db, "manage_existing.end")) # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(volume_flow, store=create_what) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/flows/manager/manage_existing_snapshot.py0000664000175000017500000003453400000000000026052 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import taskflow.engines from taskflow.patterns import linear_flow from taskflow.types import failure as ft from cinder import exception from cinder import flow_utils from cinder.i18n import _ from cinder import objects from cinder.objects import fields from cinder import quota from cinder import quota_utils from cinder.volume.flows import common as flow_common from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS ACTION = 'snapshot:manage_existing' class ExtractSnapshotRefTask(flow_utils.CinderTask): """Extracts snapshot reference for given snapshot id.""" default_provides = 'snapshot_ref' def __init__(self, db): super(ExtractSnapshotRefTask, self).__init__(addons=[ACTION]) self.db = db def execute(self, context, snapshot_id): # NOTE(wanghao): this will fetch the snapshot from the database, if # the snapshot has been deleted before we got here then this should # fail. # # In the future we might want to have a lock on the snapshot_id so that # the snapshot can not be deleted while its still being created? snapshot_ref = objects.Snapshot.get_by_id(context, snapshot_id) LOG.debug("ExtractSnapshotRefTask return" " snapshot_ref: %s", snapshot_ref) return snapshot_ref def revert(self, context, snapshot_id, result, **kwargs): if isinstance(result, ft.Failure): return flow_common.error_out(result) LOG.error("Snapshot %s: create failed", result.id) class NotifySnapshotActionTask(flow_utils.CinderTask): """Performs a notification about the given snapshot when called. Reversion strategy: N/A """ def __init__(self, db, event_suffix, host): super(NotifySnapshotActionTask, self).__init__(addons=[ACTION, event_suffix]) self.db = db self.event_suffix = event_suffix self.host = host def execute(self, context, snapshot_ref): snapshot_id = snapshot_ref['id'] try: volume_utils.notify_about_snapshot_usage(context, snapshot_ref, self.event_suffix, host=self.host) except exception.CinderException: # If notification sending of snapshot database entry reading fails # then we shouldn't error out the whole workflow since this is # not always information that must be sent for snapshots to operate LOG.exception("Failed notifying about the snapshot " "action %(event)s for snapshot %(snp_id)s.", {'event': self.event_suffix, 'snp_id': snapshot_id}) class PrepareForQuotaReservationTask(flow_utils.CinderTask): """Gets the snapshot size from the driver.""" default_provides = set(['size', 'snapshot_properties']) def __init__(self, db, driver): super(PrepareForQuotaReservationTask, self).__init__(addons=[ACTION]) self.db = db self.driver = driver def execute(self, context, snapshot_ref, manage_existing_ref): if not self.driver.initialized: driver_name = (self.driver.configuration. safe_get('volume_backend_name')) LOG.error("Unable to manage existing snapshot. " "Volume driver %s not initialized.", driver_name) flow_common.error_out(snapshot_ref, reason=_("Volume driver %s " "not initialized.") % driver_name) raise exception.DriverNotInitialized() size = self.driver.manage_existing_snapshot_get_size( snapshot=snapshot_ref, existing_ref=manage_existing_ref) return {'size': size, 'snapshot_properties': snapshot_ref} class QuotaReserveTask(flow_utils.CinderTask): """Reserves a single snapshot with the given size. Reversion strategy: rollback the quota reservation. Warning Warning: if the process that is running this reserve and commit process fails (or is killed before the quota is rolled back or committed it does appear like the quota will never be rolled back). This makes software upgrades hard (inflight operations will need to be stopped or allowed to complete before the upgrade can occur). *In the future* when taskflow has persistence built-in this should be easier to correct via an automated or manual process. """ default_provides = set(['reservations']) def __init__(self): super(QuotaReserveTask, self).__init__(addons=[ACTION]) def execute(self, context, size, snapshot_ref, optional_args): try: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': 1} else: # NOTE(tommylikehu): We only use the difference of size here # as we already committed the original size at the API # service before and this reservation task is only used for # managing snapshots now. reserve_opts = {'snapshots': 1, 'gigabytes': int(size) - snapshot_ref.volume_size} if 'update_size' in optional_args and optional_args['update_size']: reserve_opts.pop('snapshots', None) volume = objects.Volume.get_by_id(context, snapshot_ref.volume_id) QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) reservations = QUOTAS.reserve(context, **reserve_opts) return { 'reservations': reservations, } except exception.OverQuota as e: quota_utils.process_reserve_over_quota( context, e, resource='snapshots', size=size) def revert(self, context, result, optional_args, **kwargs): # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return if optional_args['is_quota_committed']: # The reservations have already been committed and can not be # rolled back at this point. return # We actually produced an output that we can revert so lets attempt # to use said output to rollback the reservation. reservations = result['reservations'] try: QUOTAS.rollback(context, reservations) except exception.CinderException: # We are already reverting, therefore we should silence this # exception since a second exception being active will be bad. LOG.exception("Failed rolling back quota for" " %s reservations.", reservations) class QuotaCommitTask(flow_utils.CinderTask): """Commits the reservation. Reversion strategy: N/A (the rollback will be handled by the task that did the initial reservation (see: QuotaReserveTask). Warning Warning: if the process that is running this reserve and commit process fails (or is killed before the quota is rolled back or committed it does appear like the quota will never be rolled back). This makes software upgrades hard (inflight operations will need to be stopped or allowed to complete before the upgrade can occur). *In the future* when taskflow has persistence built-in this should be easier to correct via an automated or manual process. """ def __init__(self): super(QuotaCommitTask, self).__init__(addons=[ACTION]) def execute(self, context, reservations, snapshot_properties, optional_args): QUOTAS.commit(context, reservations) # updating is_quota_committed attribute of optional_args dictionary optional_args['is_quota_committed'] = True return {'snapshot_properties': snapshot_properties} def revert(self, context, result, **kwargs): # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return snapshot = result['snapshot_properties'] try: reserve_opts = {'snapshots': -1, 'gigabytes': -snapshot['volume_size']} reservations = QUOTAS.reserve(context, project_id=context.project_id, **reserve_opts) if reservations: QUOTAS.commit(context, reservations, project_id=context.project_id) except Exception: LOG.exception("Failed to update quota while deleting " "snapshots: %s", snapshot['id']) class ManageExistingTask(flow_utils.CinderTask): """Brings an existing snapshot under Cinder management.""" default_provides = set(['snapshot', 'new_status']) def __init__(self, db, driver): super(ManageExistingTask, self).__init__(addons=[ACTION]) self.db = db self.driver = driver def execute(self, context, snapshot_ref, manage_existing_ref, size): model_update = self.driver.manage_existing_snapshot( snapshot=snapshot_ref, existing_ref=manage_existing_ref) if not model_update: model_update = {} model_update['volume_size'] = size try: snapshot_object = objects.Snapshot.get_by_id(context, snapshot_ref['id']) snapshot_object.update(model_update) snapshot_object.save() except exception.CinderException: LOG.exception("Failed updating model of snapshot " "%(snapshot_id)s with creation provided model " "%(model)s.", {'snapshot_id': snapshot_ref['id'], 'model': model_update}) raise return {'snapshot': snapshot_ref, 'new_status': fields.SnapshotStatus.AVAILABLE} class CreateSnapshotOnFinishTask(NotifySnapshotActionTask): """Perform final snapshot actions. When a snapshot is created successfully it is expected that MQ notifications and database updates will occur to 'signal' to others that the snapshot is now ready for usage. This task does those notifications and updates in a reliable manner (not re-raising exceptions if said actions can not be triggered). Reversion strategy: N/A """ def execute(self, context, snapshot, new_status): LOG.debug("Begin to call CreateSnapshotOnFinishTask execute.") snapshot_id = snapshot['id'] LOG.debug("New status: %s", new_status) update = { 'status': new_status } try: # TODO(harlowja): is it acceptable to only log if this fails?? # or are there other side-effects that this will cause if the # status isn't updated correctly (aka it will likely be stuck in # 'building' if this fails)?? snapshot_object = objects.Snapshot.get_by_id(context, snapshot_id) snapshot_object.update(update) snapshot_object.save() # Now use the parent to notify. super(CreateSnapshotOnFinishTask, self).execute(context, snapshot) except exception.CinderException: LOG.exception("Failed updating snapshot %(snapshot_id)s with " "%(update)s.", {'snapshot_id': snapshot_id, 'update': update}) # Even if the update fails, the snapshot is ready. LOG.info("Snapshot %s created successfully.", snapshot_id) def get_flow(context, db, driver, host, snapshot_id, ref): """Constructs and returns the manager entry point flow.""" LOG.debug("Input parameters: context=%(context)s, db=%(db)s," "driver=%(driver)s, host=%(host)s, " "snapshot_id=(snapshot_id)s, ref=%(ref)s.", {'context': context, 'db': db, 'driver': driver, 'host': host, 'snapshot_id': snapshot_id, 'ref': ref} ) flow_name = ACTION.replace(":", "_") + "_manager" snapshot_flow = linear_flow.Flow(flow_name) # This injects the initial starting flow values into the workflow so that # the dependency order of the tasks provides/requires can be correctly # determined. create_what = { 'context': context, 'snapshot_id': snapshot_id, 'manage_existing_ref': ref, 'optional_args': {'is_quota_committed': False, 'update_size': True} } notify_start_msg = "manage_existing_snapshot.start" notify_end_msg = "manage_existing_snapshot.end" snapshot_flow.add(ExtractSnapshotRefTask(db), NotifySnapshotActionTask(db, notify_start_msg, host=host), PrepareForQuotaReservationTask(db, driver), QuotaReserveTask(), ManageExistingTask(db, driver), QuotaCommitTask(), CreateSnapshotOnFinishTask(db, notify_end_msg, host=host)) LOG.debug("Begin to return taskflow.engines." "load(snapshot_flow,store=create_what).") # Now load (but do not run) the flow using the provided initial data. return taskflow.engines.load(snapshot_flow, store=create_what) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/group_types.py0000664000175000017500000001633000000000000020577 0ustar00zuulzuul00000000000000# Copyright (c) 2016 EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Built-in group type properties.""" from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import webob from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) DEFAULT_CGSNAPSHOT_TYPE = "default_cgsnapshot_type" def create(context, name, group_specs=None, is_public=True, projects=None, description=None): """Creates group types.""" group_specs = group_specs or {} projects = projects or [] elevated = context if context.is_admin else context.elevated() try: type_ref = db.group_type_create(elevated, dict(name=name, group_specs=group_specs, is_public=is_public, description=description), projects=projects) except db_exc.DBError: LOG.exception('DB error:') raise exception.GroupTypeCreateFailed(name=name, group_specs=group_specs) return type_ref def update(context, id, name, description, is_public=None): """Update group type by id.""" if id is None: msg = _("id cannot be None") raise exception.InvalidGroupType(reason=msg) elevated = context if context.is_admin else context.elevated() try: db.group_type_update(elevated, id, dict(name=name, description=description, is_public=is_public)) except db_exc.DBError: LOG.exception('DB error:') raise exception.GroupTypeUpdateFailed(id=id) def destroy(context, id): """Marks group types as deleted.""" if id is None: msg = _("id cannot be None") raise exception.InvalidGroupType(reason=msg) else: elevated = context if context.is_admin else context.elevated() try: db.group_type_destroy(elevated, id) except exception.GroupTypeInUse as e: msg = _('Target group type is still in use. %s') % e raise webob.exc.HTTPBadRequest(explanation=msg) def get_all_group_types(context, inactive=0, filters=None, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None, list_result=False): """Get all non-deleted group_types. Pass true as argument if you want deleted group types returned also. """ grp_types = db.group_type_get_all(context, inactive, filters=filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset, list_result=list_result) return grp_types def get_group_type(ctxt, id, expected_fields=None): """Retrieves single group type by id.""" if id is None: msg = _("id cannot be None") raise exception.InvalidGroupType(reason=msg) if ctxt is None: ctxt = context.get_admin_context() return db.group_type_get(ctxt, id, expected_fields=expected_fields) def get_group_type_by_name(context, name): """Retrieves single group type by name.""" if name is None: msg = _("name cannot be None") raise exception.InvalidGroupType(reason=msg) return db.group_type_get_by_name(context, name) def get_default_group_type(): """Get the default group type.""" name = CONF.default_group_type grp_type = {} if name is not None: ctxt = context.get_admin_context() try: grp_type = get_group_type_by_name(ctxt, name) except exception.GroupTypeNotFoundByName: # Couldn't find group type with the name in default_group_type # flag, record this issue and move on LOG.exception('Default group type is not found. ' 'Please check default_group_type config.') return grp_type def get_default_cgsnapshot_type(): """Get the default group type for migrating cgsnapshots. Get the default group type for migrating consistencygroups to groups and cgsnapshots to group_snapshots. """ grp_type = {} ctxt = context.get_admin_context() try: grp_type = get_group_type_by_name(ctxt, DEFAULT_CGSNAPSHOT_TYPE) except exception.GroupTypeNotFoundByName: # Couldn't find DEFAULT_CGSNAPSHOT_TYPE group type. # Record this issue and move on. LOG.exception('Default cgsnapshot type %s is not found.', DEFAULT_CGSNAPSHOT_TYPE) return grp_type def is_default_cgsnapshot_type(group_type_id): cgsnap_type = get_default_cgsnapshot_type() return cgsnap_type and group_type_id == cgsnap_type['id'] def get_group_type_specs(group_type_id, key=False): group_type = get_group_type(context.get_admin_context(), group_type_id) group_specs = group_type['group_specs'] if key: if group_specs.get(key): return group_specs.get(key) else: return False else: return group_specs def is_public_group_type(context, group_type_id): """Return is_public boolean value of group type""" group_type = db.group_type_get(context, group_type_id) return group_type['is_public'] def add_group_type_access(context, group_type_id, project_id): """Add access to group type for project_id.""" if group_type_id is None: msg = _("group_type_id cannot be None") raise exception.InvalidGroupType(reason=msg) elevated = context if context.is_admin else context.elevated() if is_public_group_type(elevated, group_type_id): msg = _("Type access modification is not applicable to public group " "type.") raise exception.InvalidGroupType(reason=msg) return db.group_type_access_add(elevated, group_type_id, project_id) def remove_group_type_access(context, group_type_id, project_id): """Remove access to group type for project_id.""" if group_type_id is None: msg = _("group_type_id cannot be None") raise exception.InvalidGroupType(reason=msg) elevated = context if context.is_admin else context.elevated() if is_public_group_type(elevated, group_type_id): msg = _("Type access modification is not applicable to public group " "type.") raise exception.InvalidGroupType(reason=msg) return db.group_type_access_remove(elevated, group_type_id, project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/manager.py0000664000175000017500000073511000000000000017635 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume manager manages creating, attaching, detaching, and persistent storage. Persistent storage volumes keep their state independent of instances. You can attach to an instance, terminate the instance, spawn a new instance (even one from a different image) and re-attach the volume with the same data intact. **Related Flags** :volume_manager: The module name of a class derived from :class:`manager.Manager` (default: :class:`cinder.volume.manager.Manager`). :volume_driver: Used by :class:`Manager`. Defaults to :class:`cinder.volume.drivers.lvm.LVMVolumeDriver`. :volume_group: Name of the group that will contain exported volumes (default: `cinder-volumes`) :num_shell_tries: Number of times to attempt to run commands (default: 3) """ import functools import time import typing from typing import Any, Optional, Union from castellan import key_manager from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import units from oslo_utils import uuidutils from oslo_versionedobjects import fields as ovo_fields profiler = importutils.try_import('osprofiler.profiler') import requests from taskflow import exceptions as tfe from cinder.backup import rpcapi as backup_rpcapi from cinder.common import constants from cinder import compute from cinder import context from cinder import coordination from cinder import db from cinder import exception from cinder import flow_utils from cinder.i18n import _ from cinder.image import cache as image_cache from cinder.image import glance from cinder.image import image_utils from cinder.keymgr import migration as key_migration from cinder import manager from cinder.message import api as message_api from cinder.message import message_field from cinder import objects from cinder.objects import cgsnapshot from cinder.objects import consistencygroup from cinder.objects import fields from cinder import quota from cinder import utils from cinder import volume as cinder_volume from cinder.volume import configuration as config from cinder.volume.flows.manager import create_volume from cinder.volume.flows.manager import manage_existing from cinder.volume.flows.manager import manage_existing_snapshot from cinder.volume import group_types from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import volume_migration from cinder.volume import volume_types from cinder.volume import volume_utils LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS GROUP_QUOTAS = quota.GROUP_QUOTAS VALID_REMOVE_VOL_FROM_GROUP_STATUS = ( 'available', 'in-use', 'error', 'error_deleting') VALID_ADD_VOL_TO_GROUP_STATUS = ( 'available', 'in-use') VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,) VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',) VA_LIST = objects.VolumeAttachmentList volume_manager_opts = [ cfg.IntOpt('migration_create_volume_timeout_secs', default=300, help='Timeout for creating the volume to migrate to ' 'when performing volume migration (seconds)'), cfg.BoolOpt('volume_service_inithost_offload', default=False, help='Offload pending volume delete during ' 'volume service startup'), cfg.StrOpt('zoning_mode', help="FC Zoning mode configured, only 'fabric' is " "supported now."), cfg.IntOpt('reinit_driver_count', default=3, help='Maximum times to reintialize the driver ' 'if volume initialization fails. The interval of retry is ' 'exponentially backoff, and will be 1s, 2s, 4s etc.'), cfg.IntOpt('init_host_max_objects_retrieval', default=0, help='Max number of volumes and snapshots to be retrieved ' 'per batch during volume manager host initialization. ' 'Query results will be obtained in batches from the ' 'database and not in one shot to avoid extreme memory ' 'usage. Set 0 to turn off this functionality.'), cfg.IntOpt('backend_stats_polling_interval', default=60, min=3, help='Time in seconds between requests for usage statistics ' 'from the backend. Be aware that generating usage ' 'statistics is expensive for some backends, so setting ' 'this value too low may adversely affect performance.'), ] volume_backend_opts = [ cfg.StrOpt('volume_driver', default='cinder.volume.drivers.lvm.LVMVolumeDriver', help='Driver to use for volume creation'), cfg.StrOpt('extra_capabilities', default='{}', help='User defined capabilities, a JSON formatted string ' 'specifying key/value pairs. The key/value pairs can ' 'be used by the CapabilitiesFilter to select between ' 'backends when requests specify volume types. For ' 'example, specifying a service level or the geographical ' 'location of a backend, then creating a volume type to ' 'allow the user to select by these different ' 'properties.'), cfg.BoolOpt('suppress_requests_ssl_warnings', default=False, help='Suppress requests library SSL certificate warnings.'), cfg.IntOpt('backend_native_threads_pool_size', default=20, min=20, help='Size of the native threads pool for the backend. ' 'Increase for backends that heavily rely on this, like ' 'the RBD driver.'), ] CONF = cfg.CONF CONF.register_opts(volume_manager_opts) CONF.register_opts(volume_backend_opts, group=config.SHARED_CONF_GROUP) # MAPPING is used for driver renames to keep backwards compatibilty. When a # driver is renamed, add a mapping here from the old name (the dict key) to the # new name (the dict value) for at least a cycle to allow time for deployments # to transition. MAPPING = { 'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver': 'cinder.volume.drivers.dell_emc.powermax.iscsi.PowerMaxISCSIDriver', 'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver': 'cinder.volume.drivers.dell_emc.powermax.fc.PowerMaxFCDriver', 'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver': 'cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_fc.FJDXFCDriver', 'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver': 'cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_iscsi.' 'FJDXISCSIDriver', 'cinder.volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver': 'cinder.volume.drivers.dell_emc.powerflex.driver.PowerFlexDriver', 'cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver': 'cinder.volume.drivers.zadara.zadara.ZadaraVPSAISCSIDriver', 'cinder.volume.drivers.nimble.NimbleISCSIDriver': 'cinder.volume.drivers.hpe.nimble.NimbleISCSIDriver', 'cinder.volume.drivers.nimble.NimbleFCDriver': 'cinder.volume.drivers.hpe.nimble.NimbleFCDriver', } def clean_volume_locks(func): @functools.wraps(func) def wrapper(self, context, volume, *args, **kwargs): skip_clean = False try: skip_clean = func(self, context, volume, *args, **kwargs) except Exception: # On quota failure volume will have been deleted from the DB skip_clean = not volume.deleted raise finally: if not skip_clean: # Most TooZ drivers clean after themselves (like etcd3), so # we clean TooZ file locks that are the same as oslo's. utils.clean_volume_file_locks(volume.id, self.driver) return wrapper def clean_snapshot_locks(func): @functools.wraps(func) def wrapper(self, context, snapshot, *args, **kwargs): skip_clean = False try: skip_clean = func(self, context, snapshot, *args, **kwargs) except Exception: skip_clean = not snapshot.deleted raise finally: if not skip_clean: utils.clean_snapshot_file_locks(snapshot.id, self.driver) return wrapper class VolumeManager(manager.CleanableManager, manager.SchedulerDependentManager): """Manages attachable block storage devices.""" RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION FAILBACK_SENTINEL = 'default' target = messaging.Target(version=RPC_API_VERSION) # On cloning a volume, we shouldn't copy volume_type, consistencygroup # and volume_attachment, because the db sets that according to [field]_id, # which we do copy. We also skip some other values that are set during # creation of Volume object. _VOLUME_CLONE_SKIP_PROPERTIES = { 'id', '_name_id', 'name_id', 'name', 'status', 'attach_status', 'migration_status', 'volume_type', 'consistencygroup', 'volume_attachment', 'group', 'snapshots', 'use_quota'} def _get_service(self, host: Optional[str] = None, binary: str = constants.VOLUME_BINARY) -> objects.Service: host = host or self.host ctxt = context.get_admin_context() svc_host = volume_utils.extract_host(host, 'backend') return objects.Service.get_by_args(ctxt, svc_host, binary) def __init__(self, volume_driver=None, service_name: Optional[str] = None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" # update_service_capabilities needs service_name to be volume super(VolumeManager, self).__init__( # type: ignore service_name='volume', *args, **kwargs) # NOTE(dulek): service_name=None means we're running in unit tests. service_name = service_name or 'backend_defaults' self.configuration = config.Configuration(volume_backend_opts, config_group=service_name) self._set_tpool_size( self.configuration.backend_native_threads_pool_size) self.stats: dict = {} self.service_uuid = None self.cluster: str self.host: str self.image_volume_cache: Optional[image_cache.ImageVolumeCache] if not volume_driver: # Get from configuration, which will get the default # if its not using the multi backend volume_driver = self.configuration.volume_driver if volume_driver in MAPPING: LOG.warning("Driver path %s is deprecated, update your " "configuration to the new path.", volume_driver) volume_driver = MAPPING[volume_driver] vol_db_empty = self._set_voldb_empty_at_startup_indicator( context.get_admin_context()) LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty) # We pass the current setting for service.active_backend_id to # the driver on init, in case there was a restart or something curr_active_backend_id = None try: service = self._get_service() except exception.ServiceNotFound: # NOTE(jdg): This is to solve problems with unit tests LOG.info("Service not found for updating " "active_backend_id, assuming default " "for driver init.") else: curr_active_backend_id = service.active_backend_id self.service_uuid = service.uuid if self.configuration.suppress_requests_ssl_warnings: LOG.warning("Suppressing requests library SSL Warnings") rpu = requests.packages.urllib3 # type: ignore rpu.disable_warnings(rpu.exceptions.InsecureRequestWarning) rpu.disable_warnings(rpu.exceptions.InsecurePlatformWarning) self.key_manager = key_manager.API(CONF) self.driver = importutils.import_object( volume_driver, configuration=self.configuration, host=self.host, cluster_name=self.cluster, is_vol_db_empty=vol_db_empty, active_backend_id=curr_active_backend_id) if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE: msg = _('Active-Active configuration is not currently supported ' 'by driver %s.') % volume_driver LOG.error(msg) raise exception.VolumeDriverException(message=msg) self.message_api = message_api.API() if CONF.profiler.enabled and profiler is not None: self.driver = profiler.trace_cls("driver")(self.driver) try: self.extra_capabilities = jsonutils.loads( self.driver.configuration.extra_capabilities) except AttributeError: self.extra_capabilities = {} except Exception: with excutils.save_and_reraise_exception(): LOG.error("Invalid JSON: %s", self.driver.configuration.extra_capabilities) # Check if a per-backend AZ has been specified backend_zone = self.driver.configuration.safe_get( 'backend_availability_zone') if backend_zone: self.availability_zone = backend_zone if self.driver.configuration.safe_get( 'image_volume_cache_enabled'): max_cache_size = self.driver.configuration.safe_get( 'image_volume_cache_max_size_gb') max_cache_entries = self.driver.configuration.safe_get( 'image_volume_cache_max_count') self.image_volume_cache = image_cache.ImageVolumeCache( self.db, cinder_volume.API(), max_cache_size, max_cache_entries, self.driver.capabilities.get('clone_across_pools', False) ) LOG.info('Image-volume cache enabled for host %(host)s.', {'host': self.host}) else: LOG.info('Image-volume cache disabled for host %(host)s.', {'host': self.host}) self.image_volume_cache = None def _count_allocated_capacity(self, ctxt: context.RequestContext, volume: objects.Volume) -> None: pool = volume_utils.extract_host(volume['host'], 'pool') if pool is None: # No pool name encoded in host, so this is a legacy # volume created before pool is introduced, ask # driver to provide pool info if it has such # knowledge and update the DB. try: pool = self.driver.get_pool(volume) except Exception: LOG.exception('Fetch volume pool name failed.', resource=volume) return if pool: new_host = volume_utils.append_host(volume['host'], pool) self.db.volume_update(ctxt, volume['id'], {'host': new_host}) else: # Otherwise, put them into a special fixed pool with # volume_backend_name being the pool name, if # volume_backend_name is None, use default pool name. # This is only for counting purpose, doesn't update DB. pool = (self.driver.configuration.safe_get( 'volume_backend_name') or volume_utils.extract_host( volume['host'], 'pool', True)) try: pool_stat = self.stats['pools'][pool] except KeyError: # First volume in the pool self.stats['pools'][pool] = dict( allocated_capacity_gb=0) pool_stat = self.stats['pools'][pool] pool_sum = pool_stat['allocated_capacity_gb'] pool_sum += volume['size'] self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum self.stats['allocated_capacity_gb'] += volume['size'] def _set_voldb_empty_at_startup_indicator( self, ctxt: context.RequestContext) -> bool: """Determine if the Cinder volume DB is empty. A check of the volume DB is done to determine whether it is empty or not at this point. :param ctxt: our working context """ vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None) if len(vol_entries) == 0: LOG.info("Determined volume DB was empty at startup.") return True else: LOG.info("Determined volume DB was not empty at startup.") return False def _sync_provider_info(self, ctxt, volumes, snapshots) -> None: # NOTE(jdg): For now this just updates provider_id, we can add more # items to the update if they're relevant but we need to be safe in # what we allow and add a list of allowed keys. Things that make sense # are provider_*, replication_status etc updates, snapshot_updates = self.driver.update_provider_info( volumes, snapshots) update: Any if updates: for volume in volumes: # NOTE(JDG): Make sure returned item is in this hosts volumes update = ( [updt for updt in updates if updt['id'] == volume['id']]) if update: update = update[0] self.db.volume_update( ctxt, update['id'], {'provider_id': update['provider_id']}) if snapshot_updates: for snap in snapshots: # NOTE(jdg): For now we only update those that have no entry if not snap.get('provider_id', None): update = ( [updt for updt in snapshot_updates if updt['id'] == snap['id']][0]) if update: self.db.snapshot_update( ctxt, update['id'], {'provider_id': update['provider_id']}) def _include_resources_in_cluster(self, ctxt) -> None: LOG.info('Including all resources from host %(host)s in cluster ' '%(cluster)s.', {'host': self.host, 'cluster': self.cluster}) num_vols = objects.VolumeList.include_in_cluster( ctxt, self.cluster, host=self.host) num_cgs = objects.ConsistencyGroupList.include_in_cluster( ctxt, self.cluster, host=self.host) num_gs = objects.GroupList.include_in_cluster( ctxt, self.cluster, host=self.host) num_cache = db.image_volume_cache_include_in_cluster( ctxt, self.cluster, host=self.host) LOG.info('%(num_vols)s volumes, %(num_cgs)s consistency groups, ' '%(num_gs)s generic groups and %(num_cache)s image ' 'volume caches from host %(host)s have been included in ' 'cluster %(cluster)s.', {'num_vols': num_vols, 'num_cgs': num_cgs, 'num_gs': num_gs, 'host': self.host, 'cluster': self.cluster, 'num_cache': num_cache}) def init_host(self, # type: ignore added_to_cluster=None, **kwargs) -> None: """Perform any required initialization.""" if not self.driver.supported: volume_utils.log_unsupported_driver_warning(self.driver) if not self.configuration.enable_unsupported_driver: LOG.error("Unsupported drivers are disabled." " You can re-enable by adding " "enable_unsupported_driver=True to the " "driver section in cinder.conf", resource={'type': 'driver', 'id': self.__class__.__name__}) return self._init_host(added_to_cluster, **kwargs) if not self.driver.initialized: reinit_count = 0 while reinit_count < CONF.reinit_driver_count: time.sleep(2 ** reinit_count) self._init_host(added_to_cluster, **kwargs) if self.driver.initialized: return reinit_count += 1 def _init_host(self, added_to_cluster=None, **kwargs) -> None: ctxt = context.get_admin_context() # If we have just added this host to a cluster we have to include all # our resources in that cluster. if added_to_cluster: self._include_resources_in_cluster(ctxt) LOG.info("Starting volume driver %(driver_name)s (%(version)s)", {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}) try: self.driver.do_setup(ctxt) self.driver.check_for_setup_error() except Exception: LOG.exception("Failed to initialize driver.", resource={'type': 'driver', 'id': self.__class__.__name__}) # we don't want to continue since we failed # to initialize the driver correctly. return # Initialize backend capabilities list self.driver.init_capabilities() # Zero stats self.stats['pools'] = {} self.stats.update({'allocated_capacity_gb': 0}) # Batch retrieval volumes and snapshots num_vols: int = 0 num_snaps: int = 0 max_objs_num: int = 0 req_range: Union[list[int], range] = [0] req_limit = CONF.init_host_max_objects_retrieval or 0 use_batch_objects_retrieval: bool = req_limit > 0 if use_batch_objects_retrieval: # Get total number of volumes num_vols, __, __ = self._get_my_volumes_summary(ctxt) # Get total number of snapshots num_snaps, __ = self._get_my_snapshots_summary(ctxt) # Calculate highest number of the objects (volumes or snapshots) max_objs_num = max(num_vols, num_snaps) max_objs_num = typing.cast(int, max_objs_num) # Make batch request loop counter req_range = range(0, max_objs_num, req_limit) volumes_to_migrate = volume_migration.VolumeMigrationList() req_offset: int for req_offset in req_range: # Retrieve 'req_limit' number of objects starting from # 'req_offset' position volumes, snapshots = [], [] if use_batch_objects_retrieval: if req_offset < num_vols: volumes = self._get_my_volumes(ctxt, limit=req_limit, offset=req_offset) else: volumes = objects.VolumeList() if req_offset < num_snaps: snapshots = self._get_my_snapshots(ctxt, limit=req_limit, offset=req_offset) else: snapshots = objects.SnapshotList() # or retrieve all volumes and snapshots per single request else: volumes = self._get_my_volumes(ctxt) snapshots = self._get_my_snapshots(ctxt) self._sync_provider_info(ctxt, volumes, snapshots) # FIXME volume count for exporting is wrong try: for volume in volumes: # Account for volumes that have been provisioned already. if volume['host']: # calculate allocated capacity for driver self._count_allocated_capacity(ctxt, volume) try: if volume['status'] in ['in-use']: self.driver.ensure_export(ctxt, volume) except Exception: LOG.exception("Failed to re-export volume, " "setting to ERROR.", resource=volume) volume.conditional_update({'status': 'error'}, {'status': 'in-use'}) # All other cleanups are processed by parent class - # CleanableManager except Exception: LOG.exception("Error during re-export on driver init.", resource=volume) return if len(volumes): volumes_to_migrate.append(volumes, ctxt) del volumes del snapshots self.driver.set_throttle() # at this point the driver is considered initialized. # NOTE(jdg): Careful though because that doesn't mean # that an entry exists in the service table self.driver.set_initialized() # Keep the image tmp file clean when init host. backend_name = volume_utils.extract_host(self.service_topic_queue) assert backend_name is not None image_utils.cleanup_temporary_file(backend_name) # Migrate any ConfKeyManager keys based on fixed_key to the currently # configured key manager. self._add_to_threadpool(key_migration.migrate_fixed_key, volumes=volumes_to_migrate) # collect and publish service capabilities self.publish_service_capabilities(ctxt) LOG.info("Driver initialization completed successfully.", resource={'type': 'driver', 'id': self.driver.__class__.__name__}) # Make sure to call CleanableManager to do the cleanup super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster, **kwargs) def init_host_with_rpc(self) -> None: LOG.info("Initializing RPC dependent components of volume " "driver %(driver_name)s (%(version)s)", {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}) try: # Make sure the driver is initialized first volume_utils.log_unsupported_driver_warning(self.driver) volume_utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: LOG.error("Cannot complete RPC initialization because " "driver isn't initialized properly.", resource={'type': 'driver', 'id': self.driver.__class__.__name__}) return stats = self.driver.get_volume_stats(refresh=True) try: service = self._get_service() except exception.ServiceNotFound: with excutils.save_and_reraise_exception(): LOG.error("Service not found for updating replication_status.") if service.replication_status != fields.ReplicationStatus.FAILED_OVER: if stats and stats.get('replication_enabled', False): replication_status = fields.ReplicationStatus.ENABLED else: replication_status = fields.ReplicationStatus.DISABLED if replication_status != service.replication_status: service.replication_status = replication_status service.save() # Update the cluster replication status if necessary cluster = service.cluster if (cluster and cluster.replication_status != service.replication_status): cluster.replication_status = service.replication_status cluster.save() LOG.info("Driver post RPC initialization completed successfully.", resource={'type': 'driver', 'id': self.driver.__class__.__name__}) def _do_cleanup(self, ctxt: context.RequestContext, vo_resource: 'objects.base.CinderObject') -> bool: if isinstance(vo_resource, objects.Volume): if vo_resource.status == 'downloading': self.driver.clear_download(ctxt, vo_resource) elif vo_resource.status == 'uploading': # Set volume status to available or in-use. self.db.volume_update_status_based_on_attachment( ctxt, vo_resource.id) elif vo_resource.status == 'deleting': if CONF.volume_service_inithost_offload: # Offload all the pending volume delete operations to the # threadpool to prevent the main volume service thread # from being blocked. self._add_to_threadpool(self.delete_volume, ctxt, vo_resource, cascade=True) else: # By default, delete volumes sequentially self.delete_volume(ctxt, vo_resource, cascade=True) # We signal that we take care of cleaning the worker ourselves # (with set_workers decorator in delete_volume method) so # do_cleanup method doesn't need to remove it. return True # For Volume creating and downloading and for Snapshot downloading # statuses we have to set status to error if vo_resource.status in ('creating', 'downloading'): vo_resource.status = 'error' vo_resource.save() return False def is_working(self) -> bool: """Return if Manager is ready to accept requests. This is to inform Service class that in case of volume driver initialization failure the manager is actually down and not ready to accept any requests. """ return self.driver.initialized def _set_resource_host(self, resource: Union[objects.Volume, objects.Group]) -> None: """Set the host field on the DB to our own when we are clustered.""" if (resource.is_clustered and not volume_utils.hosts_are_equivalent(resource.host, self.host)): pool = volume_utils.extract_host(resource.host, 'pool') resource.host = volume_utils.append_host(self.host, pool) resource.save() @objects.Volume.set_workers def create_volume(self, context, volume, request_spec=None, filter_properties=None, allow_reschedule=True) -> ovo_fields.UUIDField: """Creates the volume.""" # Log about unsupported drivers volume_utils.log_unsupported_driver_warning(self.driver) # Make sure the host in the DB matches our own when clustered self._set_resource_host(volume) # Update our allocated capacity counter early to minimize race # conditions with the scheduler. self._update_allocated_capacity(volume) # We lose the host value if we reschedule, so keep it here original_host = volume.host context_elevated = context.elevated() if filter_properties is None: filter_properties = {} if request_spec is None: request_spec = objects.RequestSpec() try: # NOTE(flaper87): Driver initialization is # verified by the task itself. flow_engine = create_volume.get_flow( context_elevated, self, self.db, self.driver, self.scheduler_rpcapi, self.host, volume, allow_reschedule, context, request_spec, filter_properties, image_volume_cache=self.image_volume_cache, ) except Exception: msg = _("Create manager volume flow failed.") LOG.exception(msg, resource={'type': 'volume', 'id': volume.id}) raise exception.CinderException(msg) snapshot_id = request_spec.get('snapshot_id') source_volid = request_spec.get('source_volid') locked_action: Optional[str] if snapshot_id is not None: # Make sure the snapshot is not deleted until we are done with it. locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot') elif source_volid is not None: # Make sure the volume is not deleted until we are done with it. locked_action = "%s-%s" % (source_volid, 'delete_volume') else: locked_action = None def _run_flow() -> None: # This code executes create volume flow. If something goes wrong, # flow reverts all job that was done and reraises an exception. # Otherwise, all data that was generated by flow becomes available # in flow engine's storage. with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() # NOTE(dulek): Flag to indicate if volume was rescheduled. Used to # decide if allocated_capacity should be incremented. rescheduled = False try: if locked_action is None: _run_flow() else: with coordination.COORDINATOR.get_lock(locked_action): _run_flow() except exception.VolumeNotFound: with excutils.save_and_reraise_exception(): utils.clean_volume_file_locks(source_volid, self.driver) except exception.SnapshotNotFound: with excutils.save_and_reraise_exception(): utils.clean_snapshot_file_locks(snapshot_id, self.driver) finally: try: flow_engine.storage.fetch('refreshed') except tfe.NotFound: # If there's no vol_ref, then flow is reverted. Lets check out # if rescheduling occurred. try: rescheduled = flow_engine.storage.get_revert_result( create_volume.OnFailureRescheduleTask.make_name( [create_volume.ACTION])) except tfe.NotFound: pass if rescheduled: # NOTE(geguileo): Volume was rescheduled so we need to update # volume stats because the volume wasn't created here. # Volume.host is None now, so we pass the original host value. self._update_allocated_capacity(volume, decrement=True, host=original_host) # Shared targets is only relevant for some connections. volume.shared_targets = self._driver_shares_targets() # TODO(geguileo): service_uuid won't be enough on Active/Active # deployments. There can be 2 services handling volumes from the same # backend. volume.service_uuid = self.service_uuid volume.save() LOG.info("Created volume successfully.", resource=volume) return volume.id def _driver_shares_targets(self): """Report if driver shares targets and needs locking on connecing side. This is currently only relevant for iSCSI and for NVMe-oF. Relevant for iSCSI, because older iSCSI initiators didn't support disabling automatic scans, allowing race conditions between os-brick and cinder. In the NVMe-oF case it's even worse, because not only scans are always automatic, but also in most cases devices/namespaces cannot be removed from the subsystem, and they are automatically removed when unmapped via an asynchronous message from the storage system. By exposing the shared_targets characteristic in the volume nova (and other consumers) can use os-brick's specific context manager to prevent these race conditions. Can return 3 possible values:: True => iSCSI protocol and shared targets False => iSCSI protocol and not shared targets None => Force shared targets locking in os-brick ignoring ISCSI_SUPPORTS_MANUAL_SCAN. Used by NVMe-oF. Drivers can return in their capabilities shared_targets set to ``None`` to force locking on the host side regarless of the protocol """ capabilities = self.driver.capabilities # We default to True to be on the safe side. shared = capabilities.get('shared_targets', True) # If driver says no shared_targets, honor it if shared is False: return False protocol = capabilities.get('storage_protocol') if protocol in constants.NVMEOF_VARIANTS: return None # True must be changed to None for NVMe-oF drivers # Only iSCSI drivers would need to do locking for shared targets return protocol in constants.ISCSI_VARIANTS def _check_is_our_resource(self, resource) -> None: if resource.host: res_backend = volume_utils.extract_host( resource.service_topic_queue) backend = volume_utils.extract_host(self.service_topic_queue) if res_backend != backend: msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not ' 'local to %(backend)s.') % {'resource': resource.obj_name(), 'id': resource.id, 'backend': backend}) raise exception.Invalid(msg) def driver_delete_volume(self, volume): self.driver.delete_volume(volume) # Most TooZ drivers clean after themselves (like etcd3), so we don't # worry about those locks, only about TooZ file locks that are the same # as oslo's. utils.clean_volume_file_locks(volume.id, self.driver) def driver_delete_snapshot(self, snapshot): self.driver.delete_snapshot(snapshot) utils.clean_snapshot_file_locks(snapshot.id, self.driver) @clean_volume_locks @coordination.synchronized('{volume.id}-delete_volume') @objects.Volume.set_workers def delete_volume(self, context: context.RequestContext, volume: objects.volume.Volume, unmanage_only=False, cascade=False) -> Optional[bool]: """Deletes and unexports volume. 1. Delete a volume(normal case) Delete a volume and update quotas. 2. Delete a migration volume If deleting the volume in a migration, we want to skip quotas but we need database updates for the volume. 3. Delete a temp volume for backup If deleting the temp volume for backup, we want to skip quotas but we need database updates for the volume. """ context = context.elevated() try: volume.refresh() except exception.VolumeNotFound: # NOTE(thingee): It could be possible for a volume to # be deleted when resuming deletes from init_host(). LOG.debug("Attempted delete of non-existent volume: %s", volume.id) return None if context.project_id != volume.project_id: project_id = volume.project_id else: project_id = context.project_id if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED: # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume.id) self._check_is_our_resource(volume) if unmanage_only and volume.encryption_key_id is not None: raise exception.Invalid( reason=_("Unmanaging encrypted volumes is not " "supported.")) if unmanage_only and cascade: # This could be done, but is ruled out for now just # for simplicity. raise exception.Invalid( reason=_("Unmanage and cascade delete options " "are mutually exclusive.")) # We have temporary volumes that did not modify the quota on creation # and should not modify it when deleted. These temporary volumes are # created for volume migration between backends and for backups (from # in-use volume or snapshot). if volume.use_quota: notification = 'unmanage.' if unmanage_only else 'delete.' self._notify_about_volume_usage(context, volume, notification + 'start') try: volume_utils.require_driver_initialized(self.driver) self.driver.remove_export(context, volume) if unmanage_only: self.driver.unmanage(volume) elif cascade: LOG.debug('Performing cascade delete.') snapshots = objects.SnapshotList.get_all_for_volume(context, volume.id) for s in snapshots: if s.status != fields.SnapshotStatus.DELETING: self._clear_db(volume, 'error_deleting') msg = (_("Snapshot %(id)s was found in state " "%(state)s rather than 'deleting' during " "cascade delete.") % {'id': s.id, 'state': s.status}) raise exception.InvalidSnapshot(reason=msg) self.delete_snapshot(context, s) LOG.debug('Snapshots deleted, issuing volume delete') self.driver.delete_volume(volume) else: self.driver.delete_volume(volume) except exception.VolumeIsBusy: LOG.error("Unable to delete busy volume.", resource=volume) # If this is a destination volume, we have to clear the database # record to avoid user confusion. self._clear_db(volume, 'available') return True # Let caller know we skipped deletion except Exception: with excutils.save_and_reraise_exception(): # If this is a destination volume, we have to clear the # database record to avoid user confusion. new_status = 'error_deleting' if unmanage_only is True: new_status = 'error_unmanaging' self._clear_db(volume, new_status) # If deleting source/destination volume in a migration or a temp # volume for backup, we should skip quotas. if volume.use_quota: # Get reservations try: reservations = None if volume.status != 'error_managing_deleting': reserve_opts = {'volumes': -1, 'gigabytes': -volume.size} QUOTAS.add_volume_type_opts(context, reserve_opts, volume.volume_type_id) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: LOG.exception("Failed to update usages deleting volume.", resource=volume) volume.destroy() # If deleting source/destination volume in a migration or a temp # volume for backup, we should skip quotas. if volume.use_quota: self._notify_about_volume_usage(context, volume, notification + 'end') # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=project_id) self._update_allocated_capacity(volume, decrement=True) self.publish_service_capabilities(context) msg = "Deleted volume successfully." if unmanage_only: msg = "Unmanaged volume successfully." LOG.info(msg, resource=volume) return None def _clear_db(self, volume_ref, status) -> None: # This method is called when driver.unmanage() or # driver.delete_volume() fails in delete_volume(), so it is already # in the exception handling part. if volume_ref.is_migration_target(): volume_ref.destroy() LOG.error("Unable to delete the destination volume " "during volume migration, (NOTE: database " "record needs to be deleted).", resource=volume_ref) else: volume_ref.status = status volume_ref.save() def _revert_to_snapshot_generic(self, ctxt: context.RequestContext, volume, snapshot) -> None: """Generic way to revert volume to a snapshot. the framework will use the generic way to implement the revert to snapshot feature: 1. create a temporary volume from snapshot 2. mount two volumes to host 3. copy data from temporary volume to original volume 4. detach and destroy temporary volume """ temp_vol = None try: v_options = {'display_name': '[revert] temporary volume created ' 'from snapshot %s' % snapshot.id} ctxt = context.get_internal_tenant_context() or ctxt temp_vol = self.driver._create_temp_volume_from_snapshot( ctxt, volume, snapshot, volume_options=v_options, status=fields.VolumeStatus.IN_USE) self._copy_volume_data(ctxt, temp_vol, volume) self.driver_delete_volume(temp_vol) temp_vol.destroy() except Exception: with excutils.save_and_reraise_exception(): LOG.exception( "Failed to use snapshot %(snapshot)s to create " "a temporary volume and copy data to volume " " %(volume)s.", {'snapshot': snapshot.id, 'volume': volume.id}) if temp_vol and temp_vol.status == fields.VolumeStatus.IN_USE: self.driver_delete_volume(temp_vol) temp_vol.destroy() def _revert_to_snapshot(self, context, volume, snapshot) -> None: """Use driver or generic method to rollback volume.""" try: self.driver.revert_to_snapshot(context, volume, snapshot) except (NotImplementedError, AttributeError): LOG.info("Driver's 'revert_to_snapshot' is not found. " "Try to use copy-snapshot-to-volume method.") self._revert_to_snapshot_generic(context, volume, snapshot) def _create_backup_snapshot(self, context, volume) -> objects.Snapshot: kwargs = { 'volume_id': volume.id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', 'volume_size': volume.size, 'display_name': '[revert] volume %s backup snapshot' % volume.id, 'display_description': 'This is only used for backup when ' 'reverting. If the reverting process ' 'failed, you can restore you data by ' 'creating new volume with this snapshot.', 'volume_type_id': volume.volume_type_id, 'encryption_key_id': volume.encryption_key_id, 'use_quota': False, # Don't use quota for temporary snapshot 'metadata': {} } snapshot = objects.Snapshot(context=context, **kwargs) snapshot.create() self.create_snapshot(context, snapshot) return snapshot def revert_to_snapshot(self, context, volume, snapshot) -> None: """Revert a volume to a snapshot. The process of reverting to snapshot consists of several steps: 1. create a snapshot for backup (in case of data loss) 2.1. use driver's specific logic to revert volume 2.2. try the generic way to revert volume if driver's method is missing 3. delete the backup snapshot """ backup_snapshot = None try: LOG.info("Start to perform revert to snapshot process.") self._notify_about_volume_usage(context, volume, "revert.start") self._notify_about_snapshot_usage(context, snapshot, "revert.start") # Create a snapshot which can be used to restore the volume # data by hand if revert process failed. if self.driver.snapshot_revert_use_temp_snapshot(): backup_snapshot = self._create_backup_snapshot(context, volume) self._revert_to_snapshot(context, volume, snapshot) except Exception as error: with excutils.save_and_reraise_exception(): self._notify_about_volume_usage(context, volume, "revert.end") self._notify_about_snapshot_usage(context, snapshot, "revert.end") msg = ('Volume %(v_id)s revert to ' 'snapshot %(s_id)s failed with %(error)s.') msg_args = {'v_id': volume.id, 's_id': snapshot.id, 'error': error} v_res = volume.update_single_status_where( 'error', 'reverting') if not v_res: msg_args = {"id": volume.id, "status": 'error'} msg += ("Failed to reset volume %(id)s " "status to %(status)s.") % msg_args s_res = snapshot.update_single_status_where( fields.SnapshotStatus.AVAILABLE, fields.SnapshotStatus.RESTORING) if not s_res: msg_args = {"id": snapshot.id, "status": fields.SnapshotStatus.AVAILABLE} msg += ("Failed to reset snapshot %(id)s " "status to %(status)s." % msg_args) LOG.exception(msg, msg_args) v_res = volume.update_single_status_where( 'available', 'reverting') if not v_res: msg_args = {"id": volume.id, "status": 'available'} msg = _("Revert finished, but failed to reset " "volume %(id)s status to %(status)s, " "please manually reset it.") % msg_args raise exception.BadResetResourceStatus(reason=msg) s_res = snapshot.update_single_status_where( fields.SnapshotStatus.AVAILABLE, fields.SnapshotStatus.RESTORING) if not s_res: msg_args = {"id": snapshot.id, "status": fields.SnapshotStatus.AVAILABLE} msg = _("Revert finished, but failed to reset " "snapshot %(id)s status to %(status)s, " "please manually reset it.") % msg_args raise exception.BadResetResourceStatus(reason=msg) if backup_snapshot: self.delete_snapshot(context, backup_snapshot) msg = ('Volume %(v_id)s reverted to snapshot %(snap_id)s ' 'successfully.') msg_args = {'v_id': volume.id, 'snap_id': snapshot.id} LOG.info(msg, msg_args) self._notify_about_volume_usage(context, volume, "revert.end") self._notify_about_snapshot_usage(context, snapshot, "revert.end") @objects.Snapshot.set_workers def create_snapshot(self, context, snapshot) -> ovo_fields.UUIDField: """Creates and exports the snapshot.""" context = context.elevated() self._notify_about_snapshot_usage( context, snapshot, "create.start") try: volume_utils.require_driver_initialized(self.driver) # Pass context so that drivers that want to use it, can, # but it is not a requirement for all drivers. snapshot.context = context model_update = self.driver.create_snapshot(snapshot) if model_update: snapshot.update(model_update) snapshot.save() except Exception as create_error: with excutils.save_and_reraise_exception(): snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() self.message_api.create( context, action=message_field.Action.SNAPSHOT_CREATE, resource_type=message_field.Resource.VOLUME_SNAPSHOT, resource_uuid=snapshot['id'], exception=create_error, detail=message_field.Detail.SNAPSHOT_CREATE_ERROR) vol_ref = self.db.volume_get(context, snapshot.volume_id) if vol_ref.bootable: try: self.db.volume_glance_metadata_copy_to_snapshot( context, snapshot.id, snapshot.volume_id) except exception.GlanceMetadataNotFound: # If volume is not created from image, No glance metadata # would be available for that volume in # volume glance metadata table pass except exception.CinderException as ex: LOG.exception("Failed updating snapshot" " metadata using the provided volumes" " %(volume_id)s metadata", {'volume_id': snapshot.volume_id}, resource=snapshot) snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() self.message_api.create( context, action=message_field.Action.SNAPSHOT_CREATE, resource_type=message_field.Resource.VOLUME_SNAPSHOT, resource_uuid=snapshot['id'], exception=ex, detail=message_field.Detail.SNAPSHOT_UPDATE_METADATA_FAILED ) raise exception.MetadataCopyFailure(reason=str(ex)) snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.progress = '100%' # Resync with the volume's DB value. This addresses the case where # the snapshot creation was in flight just prior to when the volume's # fixed_key encryption key ID was migrated to Barbican. snapshot.encryption_key_id = vol_ref.encryption_key_id snapshot.save() self._notify_about_snapshot_usage(context, snapshot, "create.end") LOG.info("Create snapshot completed successfully", resource=snapshot) return snapshot.id @clean_snapshot_locks @coordination.synchronized('{snapshot.id}-delete_snapshot') def delete_snapshot(self, context: context.RequestContext, snapshot: objects.Snapshot, unmanage_only: bool = False) -> Optional[bool]: """Deletes and unexports snapshot.""" context = context.elevated() snapshot._context = context project_id = snapshot.project_id self._notify_about_snapshot_usage( context, snapshot, "delete.start") try: volume_utils.require_driver_initialized(self.driver) # Pass context so that drivers that want to use it, can, # but it is not a requirement for all drivers. snapshot.context = context snapshot.save() if unmanage_only: self.driver.unmanage_snapshot(snapshot) else: self.driver.delete_snapshot(snapshot) except exception.SnapshotIsBusy as busy_error: LOG.error("Delete snapshot failed, due to snapshot busy.", resource=snapshot) snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.save() self.message_api.create( context, action=message_field.Action.SNAPSHOT_DELETE, resource_type=message_field.Resource.VOLUME_SNAPSHOT, resource_uuid=snapshot['id'], exception=busy_error) return True # Let caller know we skipped deletion except Exception as delete_error: with excutils.save_and_reraise_exception(): snapshot.status = fields.SnapshotStatus.ERROR_DELETING snapshot.save() self.message_api.create( context, action=message_field.Action.SNAPSHOT_DELETE, resource_type=message_field.Resource.VOLUME_SNAPSHOT, resource_uuid=snapshot['id'], exception=delete_error, detail=message_field.Detail.SNAPSHOT_DELETE_ERROR) # Get reservations reservations = None try: if snapshot.use_quota: if CONF.no_snapshot_gb_quota: reserve_opts = {'snapshots': -1} else: reserve_opts = { 'snapshots': -1, 'gigabytes': -snapshot.volume_size, } volume_ref = self.db.volume_get(context, snapshot.volume_id) QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.get('volume_type_id')) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception("Update snapshot usages failed.", resource=snapshot) self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id) snapshot.destroy() self._notify_about_snapshot_usage(context, snapshot, "delete.end") # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=project_id) msg = "Delete snapshot completed successfully." if unmanage_only: msg = "Unmanage snapshot completed successfully." LOG.info(msg, resource=snapshot) return None @coordination.synchronized('{volume_id}') def attach_volume(self, context, volume_id, instance_uuid, host_name, mountpoint, mode, volume=None) -> objects.VolumeAttachment: """Updates db to show volume is attached.""" # FIXME(lixiaoy1): Remove this in v4.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look # up the volume by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) # Get admin_metadata. This needs admin context. with volume.obj_as_admin(): volume_metadata = volume.admin_metadata # check the volume status before attaching if volume.status == 'attaching': if (volume_metadata.get('attached_mode') and volume_metadata.get('attached_mode') != mode): raise exception.InvalidVolume( reason=_("being attached by different mode")) host_name_sanitized = volume_utils.sanitize_hostname( host_name) if host_name else None if instance_uuid: attachments = ( VA_LIST.get_all_by_instance_uuid( context, instance_uuid)) else: attachments = ( VA_LIST.get_all_by_host( context, host_name_sanitized)) if attachments: # check if volume<->instance mapping is already tracked in DB for attachment in attachments: if attachment['volume_id'] == volume_id: volume.status = 'in-use' volume.save() return attachment if (volume.status == 'in-use' and not volume.multiattach and not volume.migration_status): raise exception.InvalidVolume( reason=_("volume is already attached and multiple attachments " "are not enabled")) self._notify_about_volume_usage(context, volume, "attach.start") attachment = volume.begin_attach(mode) if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): attachment.attach_status = ( fields.VolumeAttachStatus.ERROR_ATTACHING) attachment.save() raise exception.InvalidUUID(uuid=instance_uuid) try: if volume_metadata.get('readonly') == 'True' and mode != 'ro': raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume.id) volume_utils.require_driver_initialized(self.driver) LOG.info('Attaching volume %(volume_id)s to instance ' '%(instance)s at mountpoint %(mount)s on host ' '%(host)s.', {'volume_id': volume_id, 'instance': instance_uuid, 'mount': mountpoint, 'host': host_name_sanitized}, resource=volume) except Exception as excep: with excutils.save_and_reraise_exception(): self.message_api.create( context, message_field.Action.ATTACH_VOLUME, resource_uuid=volume_id, exception=excep) attachment.attach_status = ( fields.VolumeAttachStatus.ERROR_ATTACHING) attachment.save() volume = attachment.finish_attach( instance_uuid, host_name_sanitized, mountpoint, mode) self._notify_about_volume_usage(context, volume, "attach.end") LOG.info("Attach volume completed successfully.", resource=volume) return attachment @coordination.synchronized('{volume_id}-{f_name}') def detach_volume(self, context, volume_id, attachment_id=None, volume=None) -> None: """Updates db to show volume is detached.""" # TODO(vish): refactor this into a more general "unreserve" # FIXME(lixiaoy1): Remove this in v4.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the volume # by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) if attachment_id: try: attachment = objects.VolumeAttachment.get_by_id(context, attachment_id) except exception.VolumeAttachmentNotFound: LOG.info("Volume detach called, but volume not attached.", resource=volume) # We need to make sure the volume status is set to the correct # status. It could be in detaching status now, and we don't # want to leave it there. volume.finish_detach(attachment_id) return else: # We can try and degrade gracefully here by trying to detach # a volume without the attachment_id here if the volume only has # one attachment. This is for backwards compatibility. attachments = volume.volume_attachment if len(attachments) > 1: # There are more than 1 attachments for this volume # we have to have an attachment id. msg = _("Detach volume failed: More than one attachment, " "but no attachment_id provided.") LOG.error(msg, resource=volume) raise exception.InvalidVolume(reason=msg) elif len(attachments) == 1: attachment = attachments[0] else: # there aren't any attachments for this volume. # so set the status to available and move on. LOG.info("Volume detach called, but volume not attached.", resource=volume) volume.status = 'available' volume.attach_status = fields.VolumeAttachStatus.DETACHED volume.save() return self._notify_about_volume_usage(context, volume, "detach.start") try: volume_utils.require_driver_initialized(self.driver) LOG.info('Detaching volume %(volume_id)s from instance ' '%(instance)s.', {'volume_id': volume_id, 'instance': attachment.get('instance_uuid')}, resource=volume) except Exception: with excutils.save_and_reraise_exception(): self.db.volume_attachment_update( context, attachment.get('id'), { 'attach_status': fields.VolumeAttachStatus.ERROR_DETACHING}) # NOTE(jdg): We used to do an ensure export here to # catch upgrades while volumes were attached (E->F) # this was necessary to convert in-use volumes from # int ID's to UUID's. Don't need this any longer # We're going to remove the export here # (delete the iscsi target) try: volume_utils.require_driver_initialized(self.driver) self.driver.remove_export(context.elevated(), volume) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception("Detach volume failed, due to " "uninitialized driver.", resource=volume) except Exception as ex: LOG.exception("Detach volume failed, due to " "remove-export failure.", resource=volume) raise exception.RemoveExportException(volume=volume_id, reason=str(ex)) volume.finish_detach(attachment.id) self._notify_about_volume_usage(context, volume, "detach.end") LOG.info("Detach volume completed successfully.", resource=volume) def _create_image_cache_volume_entry(self, ctx, volume_ref, image_id, image_meta) -> None: """Create a new image-volume and cache entry for it. This assumes that the image has already been downloaded and stored in the volume described by the volume_ref. """ assert self.image_volume_cache is not None cache_entry = self.image_volume_cache.get_entry(ctx, volume_ref, image_id, image_meta) if cache_entry: LOG.debug('Cache entry already exists with image ID %' '(image_id)s', {'image_id': image_id}) return image_volume = None try: if not self.image_volume_cache.ensure_space(ctx, volume_ref): LOG.warning('Unable to ensure space for image-volume in' ' cache. Will skip creating entry for image' ' %(image)s on %(service)s.', {'image': image_id, 'service': volume_ref.service_topic_queue}) return image_volume = self._clone_image_volume(ctx, volume_ref, image_meta) if not image_volume: LOG.warning('Unable to clone image_volume for image ' '%(image_id)s will not create cache entry.', {'image_id': image_id}) return self.image_volume_cache.create_cache_entry( ctx, image_volume, image_id, image_meta ) except exception.CinderException as e: LOG.warning('Failed to create new image-volume cache entry.' ' Error: %(exception)s', {'exception': e}) if image_volume: self.delete_volume(ctx, image_volume) def _clone_image_volume(self, ctx: context.RequestContext, volume, image_meta: dict) -> Optional[objects.Volume]: # TODO: should this return None? volume_type_id: str = volume.get('volume_type_id') reserve_opts: dict = {'volumes': 1, 'gigabytes': volume.size} QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id) reservations = QUOTAS.reserve(ctx, **reserve_opts) # NOTE(yikun): Skip 'snapshot_id', 'source_volid' keys to avoid # creating tmp img vol from wrong snapshot or wrong source vol. skip: set[str] = {'snapshot_id', 'source_volid'} skip.update(self._VOLUME_CLONE_SKIP_PROPERTIES) try: new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip} new_vol_values['volume_type_id'] = volume_type_id new_vol_values['attach_status'] = ( fields.VolumeAttachStatus.DETACHED) new_vol_values['status'] = 'creating' new_vol_values['project_id'] = ctx.project_id new_vol_values['display_name'] = 'image-%s' % image_meta['id'] new_vol_values['source_volid'] = volume.id LOG.debug('Creating image volume entry: %s.', new_vol_values) image_volume = objects.Volume(context=ctx, **new_vol_values) image_volume.create() except Exception as ex: LOG.exception('Create clone_image_volume: %(volume_id)s ' 'for image %(image_id)s, ' 'failed (Exception: %(except)s)', {'volume_id': volume.id, 'image_id': image_meta['id'], 'except': ex}) QUOTAS.rollback(ctx, reservations) return None QUOTAS.commit(ctx, reservations, project_id=new_vol_values['project_id']) try: self.create_volume(ctx, image_volume, allow_reschedule=False) image_volume.refresh() if image_volume.status != 'available': raise exception.InvalidVolume(_('Volume is not available.')) self.db.volume_admin_metadata_update(ctx.elevated(), image_volume.id, {'readonly': 'True'}, False) return image_volume except exception.CinderException: LOG.exception('Failed to clone volume %(volume_id)s for ' 'image %(image_id)s.', {'volume_id': volume.id, 'image_id': image_meta['id']}) try: self.delete_volume(ctx, image_volume) except exception.CinderException: LOG.exception('Could not delete the image volume %(id)s.', {'id': volume.id}) return None def _clone_image_volume_and_add_location(self, ctx, volume, image_service, image_meta) -> bool: """Create a cloned volume and register its location to the image.""" if (image_meta['disk_format'] != 'raw' or image_meta['container_format'] != 'bare'): return False image_volume_context = ctx if self.driver.configuration.image_upload_use_internal_tenant: internal_ctx = context.get_internal_tenant_context() if internal_ctx: image_volume_context = internal_ctx image_volume = self._clone_image_volume(image_volume_context, volume, image_meta) if not image_volume: return False # The image_owner metadata should be set before uri is added to # the image so glance cinder store can check its owner. image_volume_meta = {'image_owner': ctx.project_id} self.db.volume_metadata_update(image_volume_context, image_volume.id, image_volume_meta, False) image_registered = None # retrieve store information from extra-specs store_id = volume.volume_type.extra_specs.get('image_service:store_id') location_metadata = {} if store_id: location_metadata['store'] = store_id uri = 'cinder://%s/%s' % (store_id, image_volume.id) else: uri = 'cinder://%s' % image_volume.id try: image_registered = image_service.add_location( ctx, image_meta['id'], uri, location_metadata) except (exception.NotAuthorized, exception.Invalid, exception.NotFound): LOG.exception('Failed to register image volume location ' '%(uri)s.', {'uri': uri}) if not image_registered: LOG.warning('Registration of image volume URI %(uri)s ' 'to image %(image_id)s failed.', {'uri': uri, 'image_id': image_meta['id']}) try: self.delete_volume(image_volume_context, image_volume) except exception.CinderException: LOG.exception('Could not delete failed image volume ' '%(id)s.', {'id': image_volume.id}) return False image_volume_meta['glance_image_id'] = image_meta['id'] self.db.volume_metadata_update(image_volume_context, image_volume.id, image_volume_meta, False) return True def copy_volume_to_image(self, context: context.RequestContext, volume_id: str, image_meta: dict) -> None: """Uploads the specified volume to Glance. image_meta is a dictionary containing the following keys: 'id', 'container_format', 'disk_format' """ payload: dict = {'volume_id': volume_id, 'image_id': image_meta['id']} image_service = None try: volume = objects.Volume.get_by_id(context, volume_id) volume_utils.require_driver_initialized(self.driver) image_service, image_id = \ glance.get_remote_image_service(context, image_meta['id']) if (self.driver.configuration.image_upload_use_cinder_backend and self._clone_image_volume_and_add_location( context, volume, image_service, image_meta)): LOG.debug("Registered image volume location to glance " "image-id: %(image_id)s.", {'image_id': image_meta['id']}, resource=volume) else: self.driver.copy_volume_to_image(context, volume, image_service, image_meta) LOG.debug("Uploaded volume to glance image-id: %(image_id)s.", {'image_id': image_meta['id']}, resource=volume) except Exception as error: LOG.error("Upload volume to image encountered an error " "(image-id: %(image_id)s).", {'image_id': image_meta['id']}, resource=volume) self.message_api.create( context, message_field.Action.COPY_VOLUME_TO_IMAGE, resource_uuid=volume_id, exception=error, detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME) if image_service is not None: # Deletes the image if it is in queued or saving state self._delete_image(context, image_meta['id'], image_service) with excutils.save_and_reraise_exception(): payload['message'] = str(error) finally: self.db.volume_update_status_based_on_attachment(context, volume_id) LOG.info("Copy volume to image completed successfully.", resource=volume) def _delete_image(self, context, image_id, image_service) -> None: """Deletes an image stuck in queued or saving state.""" try: image_meta = image_service.show(context, image_id) image_status = image_meta.get('status') if image_status == 'queued' or image_status == 'saving': LOG.warning("Deleting image in unexpected status: " "%(image_status)s.", {'image_status': image_status}, resource={'type': 'image', 'id': image_id}) image_service.delete(context, image_id) except Exception: LOG.warning("Image delete encountered an error.", exc_info=True, resource={'type': 'image', 'id': image_id}) def _parse_connection_options(self, context, volume: objects.Volume, conn_info: dict) -> dict: # Add qos_specs to connection info typeid = volume.volume_type_id specs = None if typeid: res = volume_types.get_volume_type_qos_specs(typeid) qos = res['qos_specs'] # only pass qos_specs that is designated to be consumed by # front-end, or both front-end and back-end. if qos and qos.get('consumer') in ['front-end', 'both']: specs = qos.get('specs') # NOTE(mnaser): The following configures for per-GB QoS if specs is not None: volume_size = int(volume.size) tune_opts = ('read_iops_sec', 'read_bytes_sec', 'write_iops_sec', 'write_bytes_sec', 'total_iops_sec', 'total_bytes_sec') for option in tune_opts: option_per_gb = '%s_per_gb' % option option_per_gb_min = '%s_per_gb_min' % option option_max = '%s_max' % option if option_per_gb in specs: minimum_value = int(specs.pop(option_per_gb_min, 0)) value = int(specs[option_per_gb]) * volume_size per_gb_value = max(minimum_value, value) max_value = int(specs.pop(option_max, per_gb_value)) specs[option] = min(per_gb_value, max_value) specs.pop(option_per_gb) qos_spec = dict(qos_specs=specs) conn_info['data'].update(qos_spec) # Add access_mode to connection info volume_metadata = volume.admin_metadata access_mode = volume_metadata.get('attached_mode') if access_mode is None: # NOTE(zhiyan): client didn't call 'os-attach' before access_mode = ('ro' if volume_metadata.get('readonly') == 'True' else 'rw') conn_info['data']['access_mode'] = access_mode # Add encrypted flag to connection_info if not set in the driver. if conn_info['data'].get('encrypted') is None: encrypted = bool(volume.encryption_key_id) conn_info['data']['encrypted'] = encrypted # Add cacheable flag to connection_info if not set in the driver. if typeid: cacheable = volume_types.get_volume_type_extra_specs( typeid).get('cacheable') if conn_info['data'].get('cacheable') is not None: driver_setting = bool(conn_info['data']['cacheable']) # override a True driver_setting but respect False conn_info['data']['cacheable'] = (driver_setting and (cacheable == ' True')) else: conn_info['data']['cacheable'] = (cacheable == ' True') # Add discard flag to connection_info if not set in the driver and # configured to be reported. if conn_info['data'].get('discard') is None: discard_supported = (self.driver.configuration .safe_get('report_discard_supported')) if discard_supported: conn_info['data']['discard'] = True return conn_info def initialize_connection(self, context, volume: objects.Volume, connector: dict) -> dict: """Prepare volume for connection from host represented by connector. This method calls the driver initialize_connection and returns it to the caller. The connector parameter is a dictionary with information about the host that will connect to the volume in the following format: .. code:: json { "ip": "", "initiator": "" } ip: the ip address of the connecting machine initiator: the iscsi initiator name of the connecting machine. This can be None if the connecting machine does not support iscsi connections. driver is responsible for doing any necessary security setup and returning a connection_info dictionary in the following format: .. code:: json { "driver_volume_type": "", "data": "" } driver_volume_type: a string to identify the type of volume. This can be used by the calling code to determine the strategy for connecting to the volume. This could be 'iscsi', 'rbd', etc. data: this is the data that the calling code will use to connect to the volume. Keep in mind that this will be serialized to json in various places, so it should not contain any non-json data types. """ # TODO(jdg): Add deprecation warning volume_utils.require_driver_initialized(self.driver) try: self.driver.validate_connector(connector) except exception.InvalidConnectorException as err: raise exception.InvalidInput(reason=str(err)) except Exception as err: err_msg = (_("Validate volume connection failed " "(error: %(err)s).") % {'err': err}) LOG.exception(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) try: model_update = self.driver.create_export(context.elevated(), volume, connector) except exception.CinderException as ex: msg = _("Create export of volume failed (%s)") % ex.msg LOG.exception(msg, resource=volume) raise exception.VolumeBackendAPIException(data=msg) try: if model_update: volume.update(model_update) volume.save() except Exception as ex: LOG.exception("Model update failed.", resource=volume) try: self.driver.remove_export(context.elevated(), volume) except Exception: LOG.exception('Could not remove export after DB model failed.') raise exception.ExportFailure(reason=str(ex)) try: conn_info = self.driver.initialize_connection(volume, connector) except Exception as err: err_msg = (_("Driver initialize connection failed " "(error: %(err)s).") % {'err': err}) LOG.exception(err_msg, resource=volume) self.driver.remove_export(context.elevated(), volume) raise exception.VolumeBackendAPIException(data=err_msg) conn_info = self._parse_connection_options(context, volume, conn_info) conn_info['data']['enforce_multipath'] = connector.get( 'enforce_multipath', False) LOG.info("Initialize volume connection completed successfully.", resource=volume) return conn_info def initialize_connection_snapshot(self, ctxt, snapshot_id: ovo_fields.UUIDField, connector: dict) -> dict: volume_utils.require_driver_initialized(self.driver) snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id) try: self.driver.validate_connector(connector) except exception.InvalidConnectorException as err: raise exception.InvalidInput(reason=str(err)) except Exception as err: err_msg = (_("Validate snapshot connection failed " "(error: %(err)s).") % {'err': err}) LOG.exception(err_msg, resource=snapshot) raise exception.VolumeBackendAPIException(data=err_msg) model_update = None try: LOG.debug("Snapshot %s: creating export.", snapshot.id) model_update = self.driver.create_export_snapshot( ctxt.elevated(), snapshot, connector) if model_update: snapshot.provider_location = model_update.get( 'provider_location', None) snapshot.provider_auth = model_update.get( 'provider_auth', None) snapshot.save() except exception.CinderException as ex: msg = _("Create export of snapshot failed (%s)") % ex.msg LOG.exception(msg, resource=snapshot) raise exception.VolumeBackendAPIException(data=msg) try: if model_update: snapshot.update(model_update) snapshot.save() except exception.CinderException as ex: LOG.exception("Model update failed.", resource=snapshot) raise exception.ExportFailure(reason=str(ex)) try: conn = self.driver.initialize_connection_snapshot(snapshot, connector) except Exception as err: try: err_msg = (_('Unable to fetch connection information from ' 'backend: %(err)s') % {'err': err}) LOG.error(err_msg) LOG.debug("Cleaning up failed connect initialization.") self.driver.remove_export_snapshot(ctxt.elevated(), snapshot) except Exception as ex: ex_msg = (_('Error encountered during cleanup ' 'of a failed attach: %(ex)s') % {'ex': ex}) LOG.error(ex_msg) raise exception.VolumeBackendAPIException(data=ex_msg) raise exception.VolumeBackendAPIException(data=err_msg) conn['data']['enforce_multipath'] = connector.get( 'enforce_multipath', False) LOG.info("Initialize snapshot connection completed successfully.", resource=snapshot) return conn def terminate_connection(self, context, volume_id: ovo_fields.UUIDField, connector: dict, force=False) -> None: """Cleanup connection from host represented by connector. The format of connector is the same as for initialize_connection. """ volume_utils.require_driver_initialized(self.driver) volume_ref = objects.Volume.get_by_id(context, volume_id) try: self.driver.terminate_connection(volume_ref, connector, force=force) except Exception as err: err_msg = (_('Terminate volume connection failed: %(err)s') % {'err': err}) LOG.exception(err_msg, resource=volume_ref) raise exception.VolumeBackendAPIException(data=err_msg) LOG.info("Terminate volume connection completed successfully.", resource=volume_ref) def terminate_connection_snapshot(self, ctxt, snapshot_id: ovo_fields.UUIDField, connector: dict, force=False) -> None: volume_utils.require_driver_initialized(self.driver) snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id) try: self.driver.terminate_connection_snapshot(snapshot, connector, force=force) except Exception as err: err_msg = (_('Terminate snapshot connection failed: %(err)s') % {'err': err}) LOG.exception(err_msg, resource=snapshot) raise exception.VolumeBackendAPIException(data=err_msg) LOG.info("Terminate snapshot connection completed successfully.", resource=snapshot) def remove_export(self, context, volume_id: ovo_fields.UUIDField) -> None: """Removes an export for a volume.""" volume_utils.require_driver_initialized(self.driver) volume_ref = self.db.volume_get(context, volume_id) try: self.driver.remove_export(context, volume_ref) except Exception: msg = _("Remove volume export failed.") LOG.exception(msg, resource=volume_ref) raise exception.VolumeBackendAPIException(data=msg) LOG.info("Remove volume export completed successfully.", resource=volume_ref) def remove_export_snapshot(self, ctxt, snapshot_id: ovo_fields.UUIDField) -> None: """Removes an export for a snapshot.""" volume_utils.require_driver_initialized(self.driver) snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id) try: self.driver.remove_export_snapshot(ctxt, snapshot) except Exception: msg = _("Remove snapshot export failed.") LOG.exception(msg, resource=snapshot) raise exception.VolumeBackendAPIException(data=msg) LOG.info("Remove snapshot export completed successfully.", resource=snapshot) def accept_transfer(self, context, volume_id, new_user, new_project, no_snapshots=False) -> dict: volume_utils.require_driver_initialized(self.driver) # NOTE(jdg): need elevated context as we haven't "given" the vol # yet volume_ref = self.db.volume_get(context.elevated(), volume_id) # NOTE(jdg): Some drivers tie provider info (CHAP) to tenant # for those that do allow them to return updated model info model_update = self.driver.accept_transfer(context, volume_ref, new_user, new_project) if model_update: try: self.db.volume_update(context.elevated(), volume_id, model_update) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.exception("Update volume model for " "transfer operation failed.", resource=volume_ref) self.db.volume_update(context.elevated(), volume_id, {'status': 'error'}) LOG.info("Transfer volume completed successfully.", resource=volume_ref) return model_update def _connect_device(self, conn: dict) -> dict: use_multipath = self.configuration.use_multipath_for_image_xfer device_scan_attempts = self.configuration.num_volume_device_scan_tries protocol = conn['driver_volume_type'] connector = volume_utils.brick_get_connector( protocol, use_multipath=use_multipath, device_scan_attempts=device_scan_attempts, conn=conn) vol_handle = connector.connect_volume(conn['data']) root_access = True if not connector.check_valid_device(vol_handle['path'], root_access): if isinstance(vol_handle['path'], str): raise exception.DeviceUnavailable( path=vol_handle['path'], reason=(_("Unable to access the backend storage via the " "path %(path)s.") % {'path': vol_handle['path']})) else: raise exception.DeviceUnavailable( path=None, reason=(_("Unable to access the backend storage via file " "handle."))) return {'conn': conn, 'device': vol_handle, 'connector': connector} def _attach_volume(self, ctxt, volume, properties, remote=False, attach_encryptor=False) -> dict: """Attach a volume. Returns a dict of attachment info or raises an exception. """ status = volume['status'] if remote: rpcapi = volume_rpcapi.VolumeAPI() try: conn = rpcapi.initialize_connection(ctxt, volume, properties) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to attach volume %(vol)s.", {'vol': volume['id']}) self.db.volume_update(ctxt, volume['id'], {'status': status}) else: conn = self.initialize_connection(ctxt, volume, properties) attach_info = None try: attach_info = self._connect_device(conn) if attach_encryptor and ( volume_types.is_encrypted(ctxt, volume.volume_type_id)): encryption = self.db.volume_encryption_metadata_get( ctxt.elevated(), volume.id) if encryption: volume_utils.brick_attach_volume_encryptor(ctxt, attach_info, encryption) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to attach volume encryptor" " %(vol)s.", {'vol': volume['id']}) self._detach_volume(ctxt, attach_info, volume, properties, force=True, remote=remote) attach_info = typing.cast(dict, attach_info) return attach_info def _detach_volume(self, ctxt, attach_info, volume, properties, force=False, remote=False, attach_encryptor=False) -> None: if attach_info: connector = attach_info['connector'] if attach_encryptor and ( volume_types.is_encrypted(ctxt, volume.volume_type_id)): encryption = self.db.volume_encryption_metadata_get( ctxt.elevated(), volume.id) if encryption: volume_utils.brick_detach_volume_encryptor(attach_info, encryption) connector.disconnect_volume(attach_info['conn']['data'], attach_info['device'], force=force) if remote: rpcapi = volume_rpcapi.VolumeAPI() rpcapi.terminate_connection(ctxt, volume, properties, force=force) rpcapi.remove_export(ctxt, volume) else: try: self.terminate_connection(ctxt, volume['id'], properties, force=force) self.remove_export(ctxt, volume['id']) except Exception as err: with excutils.save_and_reraise_exception(): LOG.error('Unable to terminate volume connection: ' '%(err)s.', {'err': err}) def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None) -> None: """Copy data from src_vol to dest_vol.""" LOG.debug('_copy_volume_data %(src)s -> %(dest)s.', {'src': src_vol['name'], 'dest': dest_vol['name']}) attach_encryptor = False # If the encryption method or key is changed, we have to # copy data through dm-crypt. if volume_types.volume_types_encryption_changed( ctxt, src_vol.volume_type_id, dest_vol.volume_type_id): attach_encryptor = True use_multipath = self.configuration.use_multipath_for_image_xfer enforce_multipath = self.configuration.enforce_multipath_for_image_xfer properties = volume_utils.brick_get_connector_properties( use_multipath, enforce_multipath) dest_remote = remote in ['dest', 'both'] dest_attach_info = self._attach_volume( ctxt, dest_vol, properties, remote=dest_remote, attach_encryptor=attach_encryptor) try: src_remote = remote in ['src', 'both'] src_attach_info = self._attach_volume( ctxt, src_vol, properties, remote=src_remote, attach_encryptor=attach_encryptor) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to attach source volume for copy.") self._detach_volume(ctxt, dest_attach_info, dest_vol, properties, remote=dest_remote, attach_encryptor=attach_encryptor, force=True) # Check the backend capabilities of migration destination host. rpcapi = volume_rpcapi.VolumeAPI() capabilities = rpcapi.get_capabilities(ctxt, dest_vol.service_topic_queue, False) sparse_copy_volume = bool(capabilities and capabilities.get('sparse_copy_volume', False)) try: size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB volume_utils.copy_volume(src_attach_info['device']['path'], dest_attach_info['device']['path'], size_in_mb, self.configuration.volume_dd_blocksize, sparse=sparse_copy_volume) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to copy volume %(src)s to %(dest)s.", {'src': src_vol['id'], 'dest': dest_vol['id']}) finally: try: self._detach_volume(ctxt, dest_attach_info, dest_vol, properties, force=True, remote=dest_remote, attach_encryptor=attach_encryptor) finally: self._detach_volume(ctxt, src_attach_info, src_vol, properties, force=True, remote=src_remote, attach_encryptor=attach_encryptor) def _migrate_volume_generic(self, ctxt: context.RequestContext, volume, backend, new_type_id) -> None: rpcapi = volume_rpcapi.VolumeAPI() # Create new volume on remote host tmp_skip = {'snapshot_id', 'source_volid'} skip = {'host', 'cluster_name', 'availability_zone'} skip.update(tmp_skip) skip.update(self._VOLUME_CLONE_SKIP_PROPERTIES) new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip} if new_type_id: new_vol_values['volume_type_id'] = new_type_id if volume_types.volume_types_encryption_changed( ctxt, volume.volume_type_id, new_type_id): encryption_key_id = volume_utils.create_encryption_key( ctxt, self.key_manager, new_type_id) new_vol_values['encryption_key_id'] = encryption_key_id dst_service = self._get_service(backend['host']) new_volume = objects.Volume( context=ctxt, host=backend['host'], availability_zone=dst_service.availability_zone, cluster_name=backend.get('cluster_name'), status='creating', attach_status=fields.VolumeAttachStatus.DETACHED, migration_status='target:%s' % volume['id'], use_quota=False, # Don't use quota for temporary volume **new_vol_values ) new_volume.create() rpcapi.create_volume(ctxt, new_volume, None, None, allow_reschedule=False) # Wait for new_volume to become ready starttime = time.time() deadline = starttime + CONF.migration_create_volume_timeout_secs new_volume.refresh() tries = 0 while new_volume.status != 'available': tries += 1 now = time.time() if new_volume.status == 'error': msg = _("failed to create new_volume on destination") self._clean_temporary_volume(ctxt, volume, new_volume, clean_db_only=True) raise exception.VolumeMigrationFailed(reason=msg) elif now > deadline: msg = _("timeout creating new_volume on destination") self._clean_temporary_volume(ctxt, volume, new_volume, clean_db_only=True) raise exception.VolumeMigrationFailed(reason=msg) else: time.sleep(tries ** 2) new_volume.refresh() # Set skipped value to avoid calling # function except for _create_raw_volume tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)} if tmp_skipped_values: new_volume.update(tmp_skipped_values) new_volume.save() # Copy the source volume to the destination volume try: attachments = volume.volume_attachment if not attachments: # Pre- and post-copy driver-specific actions self.driver.before_volume_copy(ctxt, volume, new_volume, remote='dest') self._copy_volume_data(ctxt, volume, new_volume, remote='dest') self.driver.after_volume_copy(ctxt, volume, new_volume, remote='dest') # The above call is synchronous so we complete the migration self.migrate_volume_completion(ctxt, volume, new_volume, error=False) else: nova_api = compute.API() # This is an async call to Nova, which will call the completion # when it's done for attachment in attachments: instance_uuid = attachment['instance_uuid'] nova_api.update_server_volume(ctxt, instance_uuid, volume.id, new_volume.id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception( "Failed to copy volume %(vol1)s to %(vol2)s", { 'vol1': volume.id, 'vol2': new_volume.id}) self._clean_temporary_volume(ctxt, volume, new_volume) def _clean_temporary_volume(self, ctxt, volume, new_volume, clean_db_only=False) -> None: # If we're in the migrating phase, we need to cleanup # destination volume because source volume is remaining if volume.migration_status == 'migrating': try: if clean_db_only: # The temporary volume is not created, only DB data # is created new_volume.destroy() else: # The temporary volume is already created rpcapi = volume_rpcapi.VolumeAPI() rpcapi.delete_volume(ctxt, new_volume) except exception.VolumeNotFound: LOG.info("Couldn't find the temporary volume " "%(vol)s in the database. There is no need " "to clean up this volume.", {'vol': new_volume.id}) else: # If we're in the completing phase don't delete the # destination because we may have already deleted the # source! But the migration_status in database should # be cleared to handle volume after migration failure try: new_volume.migration_status = None new_volume.save() except exception.VolumeNotFound: LOG.info("Couldn't find destination volume " "%(vol)s in the database. The entry might be " "successfully deleted during migration " "completion phase.", {'vol': new_volume.id}) LOG.warning("Failed to migrate volume. The destination " "volume %(vol)s is not deleted since the " "source volume may have been deleted.", {'vol': new_volume.id}) def migrate_volume_completion(self, ctxt: context.RequestContext, volume, new_volume, error=False) -> ovo_fields.UUIDField: try: volume_utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): volume.migration_status = 'error' volume.save() # NOTE(jdg): Things get a little hairy in here and we do a lot of # things based on volume previous-status and current-status. At some # point this should all be reworked but for now we need to maintain # backward compatibility and NOT change the API so we're going to try # and make this work best we can LOG.debug("migrate_volume_completion: completing migration for " "volume %(vol1)s (temporary volume %(vol2)s", {'vol1': volume.id, 'vol2': new_volume.id}) rpcapi = volume_rpcapi.VolumeAPI() orig_volume_status = volume.previous_status if error: LOG.info("migrate_volume_completion is cleaning up an error " "for volume %(vol1)s (temporary volume %(vol2)s", {'vol1': volume['id'], 'vol2': new_volume.id}) rpcapi.delete_volume(ctxt, new_volume) updates = {'migration_status': 'error', 'status': orig_volume_status} volume.update(updates) volume.save() return volume.id volume.migration_status = 'completing' volume.save() volume_attachments = [] # NOTE(jdg): With new attach flow, we deleted the attachment, so the # original volume should now be listed as available, we still need to # do the magic swappy thing of name.id etc but we're done with the # original attachment record # In the "old flow" at this point the orig_volume_status will be in-use # and the current status will be retyping. This is sort of a # misleading deal, because Nova has already called terminate # connection # New Attach Flow, Nova has gone ahead and deleted the attachemnt, this # is the source/original volume, we've already migrated the data, we're # basically done with it at this point. We don't need to issue the # detach to toggle the status if orig_volume_status == 'in-use' and volume.status != 'available': for attachment in volume.volume_attachment: # Save the attachments the volume currently have volume_attachments.append(attachment) try: self.detach_volume(ctxt, volume.id, attachment.id) except Exception as ex: LOG.error("Detach migration source volume " "%(volume.id)s from attachment " "%(attachment.id)s failed: %(err)s", {'err': ex, 'volume.id': volume.id, 'attachment.id': attachment.id}, resource=volume) # Give driver (new_volume) a chance to update things as needed # after a successful migration. # Note this needs to go through rpc to the host of the new volume # the current host and driver object is for the "existing" volume. rpcapi.update_migrated_volume(ctxt, volume, new_volume, orig_volume_status) volume.refresh() new_volume.refresh() # Swap src and dest DB records so we can continue using the src id and # asynchronously delete the destination id updated_new = volume.finish_volume_migration(new_volume) updates = {'status': orig_volume_status, 'previous_status': volume.status, 'migration_status': 'success'} # NOTE(jdg): With new attachment API's nova will delete the # attachment for the source volume for us before calling the # migration-completion, now we just need to do the swapping on the # volume record, but don't jack with the attachments other than # updating volume_id # In the old flow at this point the volumes are in attaching and # deleting status (dest/new is deleting, but we've done our magic # swappy thing so it's a bit confusing, but it does unwind properly # when you step through it) # In the new flow we simplified this and we don't need it, instead of # doing a bunch of swapping we just do attachment-create/delete on the # nova side, and then here we just do the ID swaps that are necessary # to maintain the old behavior # Restore the attachments for old flow use-case if orig_volume_status == 'in-use' and volume.status in ['available', 'reserved', 'attaching']: for attachment in volume_attachments: LOG.debug('Re-attaching: %s', attachment) # This is just a db state toggle, the volume is actually # already attach and in-use, new attachment flow won't allow # this rpcapi.attach_volume(ctxt, volume, attachment.instance_uuid, attachment.attached_host, attachment.mountpoint, attachment.attach_mode or 'rw') # At this point we now have done almost all of our swapping and # state-changes. The target volume is now marked back to # "in-use" the destination/worker volume is now in deleting # state and the next steps will finish the deletion steps volume.update(updates) volume.save() # Asynchronous deletion of the source volume in the back-end (now # pointed by the target volume id) try: rpcapi.delete_volume(ctxt, updated_new) except Exception as ex: LOG.error('Failed to request async delete of migration source ' 'vol %(vol)s: %(err)s', {'vol': volume.id, 'err': ex}) # For the new flow this is really the key part. We just use the # attachments to the worker/destination volumes that we created and # used for the libvirt migration and we'll just swap their volume_id # entries to coorespond with the volume.id swap we did for attachment in VA_LIST.get_all_by_volume_id(ctxt, updated_new.id): attachment.volume_id = volume.id attachment.save() # Phewww.. that was easy! Once we get to a point where the old attach # flow can go away we really should rewrite all of this. LOG.info("Complete-Migrate volume completed successfully.", resource=volume) return volume.id def _can_use_driver_migration(self, diff): """Return when we can use driver assisted migration on a retype.""" # We can if there's no retype or there are no difference in the types if not diff: return True extra_specs = diff.get('extra_specs') qos = diff.get('qos_specs') enc = diff.get('encryption') # We cant' if QoS or Encryption changes and we can if there are no # extra specs changes. if qos or enc or not extra_specs: return not (qos or enc) # We can use driver assisted migration if we only change the backend # name, and the AZ. extra_specs = extra_specs.copy() extra_specs.pop('volume_backend_name', None) extra_specs.pop('RESKEY:availability_zones', None) return not extra_specs def migrate_volume(self, ctxt: context.RequestContext, volume, host, force_host_copy: bool = False, new_type_id=None, diff=None) -> None: """Migrate the volume to the specified host (called on source host).""" try: volume_utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): volume.migration_status = 'error' volume.save() model_update = None moved = False status_update = None if volume.status in ('retyping', 'maintenance'): status_update = {'status': volume.previous_status} volume.migration_status = 'migrating' volume.save() # Do not attempt driver assisted migration when the volume has # attachments. Nova must be involved when migrating an attached # volume, and that's handled by the generic migration code. if (len(volume.volume_attachment) == 0 and not force_host_copy and self._can_use_driver_migration(diff)): try: LOG.debug("Issue driver.migrate_volume.", resource=volume) moved, model_update = self.driver.migrate_volume(ctxt, volume, host) if moved: dst_service = self._get_service(host['host']) updates = { 'host': host['host'], 'cluster_name': host.get('cluster_name'), 'migration_status': 'success', 'availability_zone': dst_service.availability_zone, 'previous_status': volume.status, } if status_update: updates.update(status_update) if model_update: updates.update(model_update) if new_type_id: updates['volume_type_id'] = new_type_id volume.update(updates) volume.save() except Exception: with excutils.save_and_reraise_exception(): updates = {'migration_status': 'error'} if status_update: updates.update(status_update) volume.update(updates) volume.save() if not moved: try: self._migrate_volume_generic(ctxt, volume, host, new_type_id) except Exception: with excutils.save_and_reraise_exception(): updates = {'migration_status': 'error'} if status_update: updates.update(status_update) volume.update(updates) volume.save() LOG.info("Migrate volume completed successfully.", resource=volume) def _report_driver_status(self, context: context.RequestContext) -> None: # It's possible during live db migration that the self.service_uuid # value isn't set (we didn't restart services), so we'll go ahead # and make this a part of the service periodic if not self.service_uuid: # We hack this with a try/except for unit tests temporarily try: service = self._get_service() self.service_uuid = service.uuid except exception.ServiceNotFound: LOG.warning("Attempt to update service_uuid " "resulted in a Service NotFound " "exception, service_uuid field on " "volumes will be NULL.") if not self.driver.initialized: if self.driver.configuration.config_group is None: config_group = '' else: config_group = ('(config name %s)' % self.driver.configuration.config_group) LOG.warning("Update driver status failed: %(config_group)s " "is uninitialized.", {'config_group': config_group}, resource={'type': 'driver', 'id': self.driver.__class__.__name__}) return slowmsg = "The " + self.driver.__class__.__name__ + " volume " \ "driver's get_volume_stats operation ran for " \ "%(seconds).1f seconds. This may indicate a " \ "performance problem with the backend which can lead " \ "to instability." @timeutils.time_it( LOG, log_level=logging.WARN, message=slowmsg, min_duration=CONF.backend_stats_polling_interval / 2) def get_stats(): return self.driver.get_volume_stats(refresh=True) volume_stats = get_stats() if self.extra_capabilities: if "pools" in volume_stats: for pool in volume_stats["pools"]: pool.update(self.extra_capabilities) else: volume_stats.update(self.extra_capabilities) if volume_stats: # NOTE(xyang): If driver reports replication_status to be # 'error' in volume_stats, get model updates from driver # and update db if volume_stats.get('replication_status') == ( fields.ReplicationStatus.ERROR): filters = self._get_cluster_or_host_filters() groups = objects.GroupList.get_all_replicated( context, filters=filters) group_model_updates, volume_model_updates = ( self.driver.get_replication_error_status(context, groups)) for grp_update in group_model_updates: try: grp_obj = objects.Group.get_by_id( context, grp_update['group_id']) grp_obj.update(grp_update) grp_obj.save() except exception.GroupNotFound: # Group may be deleted already. Log a warning # and continue. LOG.warning("Group %(grp)s not found while " "updating driver status.", {'grp': grp_update['group_id']}, resource={ 'type': 'group', 'id': grp_update['group_id']}) for vol_update in volume_model_updates: try: vol_obj = objects.Volume.get_by_id( context, vol_update['volume_id']) vol_obj.update(vol_update) vol_obj.save() except exception.VolumeNotFound: # Volume may be deleted already. Log a warning # and continue. LOG.warning("Volume %(vol)s not found while " "updating driver status.", {'vol': vol_update['volume_id']}, resource={ 'type': 'volume', 'id': vol_update['volume_id']}) # Append volume stats with 'allocated_capacity_gb' self._append_volume_stats(volume_stats) # Append cacheable flag for iSCSI/FC/NVMe-oF and only when # cacheable is not set in driver level if (volume_stats.get('storage_protocol') in constants.CACHEABLE_PROTOCOLS): if volume_stats.get('pools'): for pool in volume_stats.get('pools'): if pool.get('cacheable') is None: pool['cacheable'] = True else: if volume_stats.get('cacheable') is None: volume_stats['cacheable'] = True # Append filter and goodness function if needed volume_stats = ( self._append_filter_goodness_functions(volume_stats)) # queue it to be sent to the Schedulers. self.update_service_capabilities(volume_stats) def _append_volume_stats(self, vol_stats) -> None: pools = vol_stats.get('pools', None) if pools: if isinstance(pools, list): for pool in pools: pool_name = pool['pool_name'] try: pool_stats = self.stats['pools'][pool_name] except KeyError: # Pool not found in volume manager pool_stats = dict(allocated_capacity_gb=0) pool.update(pool_stats) else: raise exception.ProgrammingError( reason='Pools stats reported by the driver are not ' 'reported in a list') # For drivers that are not reporting their stats by pool we will use # the data from the special fixed pool created by # _count_allocated_capacity. elif self.stats.get('pools'): vol_stats.update(next(iter(self.stats['pools'].values()))) # This is a special subcase of the above no pool case that happens when # we don't have any volumes yet. else: vol_stats.update(self.stats) vol_stats.pop('pools', None) def _append_filter_goodness_functions(self, volume_stats: dict) -> dict: """Returns volume_stats updated as needed.""" # Append filter_function if needed if 'filter_function' not in volume_stats: volume_stats['filter_function'] = ( self.driver.get_filter_function()) # Append goodness_function if needed if 'goodness_function' not in volume_stats: volume_stats['goodness_function'] = ( self.driver.get_goodness_function()) return volume_stats @periodic_task.periodic_task(spacing=CONF.backend_stats_polling_interval) def publish_service_capabilities(self, context: context.RequestContext) -> None: """Collect driver status and then publish.""" self._report_driver_status(context) self._publish_service_capabilities(context) def _notify_about_volume_usage(self, context: context.RequestContext, volume: objects.Volume, event_suffix: str, extra_usage_info: Optional[dict] = None) \ -> None: volume_utils.notify_about_volume_usage( context, volume, event_suffix, extra_usage_info=extra_usage_info, host=self.host) def _notify_about_snapshot_usage(self, context: context.RequestContext, snapshot: objects.Snapshot, event_suffix: str, extra_usage_info: Optional[dict] = None) \ -> None: volume_utils.notify_about_snapshot_usage( context, snapshot, event_suffix, extra_usage_info=extra_usage_info, host=self.host) def _notify_about_group_usage(self, context: context.RequestContext, group: objects.Group, event_suffix: str, volumes=None, extra_usage_info=None) -> None: volume_utils.notify_about_group_usage( context, group, event_suffix, extra_usage_info=extra_usage_info, host=self.host) if not volumes: volumes = objects.VolumeList.get_all_by_generic_group( context, group.id) if volumes: for volume in volumes: volume_utils.notify_about_volume_usage( context, volume, event_suffix, extra_usage_info=extra_usage_info, host=self.host) def _notify_about_group_snapshot_usage( self, context: context.RequestContext, group_snapshot: objects.GroupSnapshot, event_suffix: str, snapshots: Optional[list] = None, extra_usage_info=None) -> None: volume_utils.notify_about_group_snapshot_usage( context, group_snapshot, event_suffix, extra_usage_info=extra_usage_info, host=self.host) if not snapshots: snapshots = objects.SnapshotList.get_all_for_group_snapshot( context, group_snapshot.id) if snapshots: for snapshot in snapshots: volume_utils.notify_about_snapshot_usage( context, snapshot, event_suffix, extra_usage_info=extra_usage_info, host=self.host) def extend_volume(self, context: context.RequestContext, volume: objects.Volume, new_size: int, reservations) -> None: try: volume_utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): volume.status = 'error_extending' volume.save() self._notify_about_volume_usage(context, volume, "resize.start") try: self.driver.extend_volume(volume, new_size) except exception.TargetUpdateFailed: # We just want to log this but continue on with quota commit LOG.warning('Volume extended but failed to update target.') except Exception: LOG.exception("Extend volume failed.", resource=volume) self.extend_volume_completion(context, volume, new_size, reservations, error=True) return self.extend_volume_completion(context, volume, new_size, reservations, error=False) attachments = volume.volume_attachment or [] # If instance_uuid field is None on attachment, it means that the # volume is used by Glance Cinder store instance_uuids = [attachment.instance_uuid for attachment in attachments if attachment.instance_uuid] # If the volume is not attached to any instances, we should not send # external events to Nova if instance_uuids: nova_api = compute.API() nova_api.extend_volume(context, instance_uuids, volume.id) def extend_volume_completion(self, context: context.RequestContext, volume: objects.Volume, new_size: int, reservations: list[str], error: bool) -> None: project_id = volume.project_id size_increase = new_size - volume.size if error: LOG.error("Failed to extend volume.", resource=volume) self.message_api.create( context, message_field.Action.EXTEND_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.DRIVER_FAILED_EXTEND) try: self.db.volume_update(context, volume.id, {'status': 'error_extending'}) raise exception.CinderException(_("Volume %s: Error trying " "to extend volume") % volume.id) finally: QUOTAS.rollback(context, reservations, project_id=project_id) return QUOTAS.commit(context, reservations, project_id=project_id) if not volume.volume_attachment: orig_volume_status = 'available' else: orig_volume_status = 'in-use' volume.update({'size': int(new_size), 'status': orig_volume_status}) volume.save() pool = volume_utils.extract_host(volume.host, 'pool') if pool is None: # Legacy volume, put them into default pool pool = self.driver.configuration.safe_get( 'volume_backend_name') or volume_utils.extract_host( volume.host, 'pool', True) try: self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase except KeyError: self.stats['pools'][pool] = dict( allocated_capacity_gb=size_increase) self._notify_about_volume_usage( context, volume, "resize.end", extra_usage_info={'size': int(new_size)}) LOG.info("Extend volume completed successfully.", resource=volume) def _is_our_backend(self, host: str, cluster_name: str): return ((not cluster_name and volume_utils.hosts_are_equivalent(self.driver.host, host)) or (cluster_name and volume_utils.hosts_are_equivalent(self.driver.cluster_name, cluster_name))) def retype(self, context: context.RequestContext, volume: objects.Volume, new_type_id: str, host, migration_policy: str = 'never', reservations=None, old_reservations=None) -> None: def _retype_error(context, volume, old_reservations, new_reservations, status_update) -> None: try: volume.update(status_update) volume.save() finally: if old_reservations: QUOTAS.rollback(context, old_reservations) if new_reservations: QUOTAS.rollback(context, new_reservations) previous_status = ( volume.previous_status or volume.status) status_update = {'status': previous_status} if context.project_id != volume.project_id: project_id = volume.project_id else: project_id = context.project_id try: volume_utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): # NOTE(flaper87): Other exceptions in this method don't # set the volume status to error. Should that be done # here? Setting the volume back to it's original status # for now. volume.update(status_update) volume.save() # We already got the new reservations new_reservations = reservations # If volume types have the same contents, no need to do anything. # Use the admin context to be able to access volume extra_specs retyped = False diff, all_equal = volume_types.volume_types_diff( context.elevated(), volume.volume_type_id, new_type_id) if all_equal: retyped = True # Call driver to try and change the type retype_model_update = None # NOTE(jdg): Check to see if the destination host or cluster (depending # if it's the volume is in a clustered backend or not) is the same as # the current. If it's not don't call the driver.retype method, # otherwise drivers that implement retype may report success, but it's # invalid in the case of a migrate. # We assume that those that support pools do this internally # so we strip off the pools designation if (not retyped and not diff.get('encryption') and self._is_our_backend(host['host'], host.get('cluster_name'))): try: new_type = objects.VolumeType.get_by_id( context.elevated(), new_type_id) with volume.obj_as_admin(): ret = self.driver.retype(context, volume, new_type, diff, host) # Check if the driver retype provided a model update or # just a retype indication if type(ret) is tuple: retyped, retype_model_update = ret else: retyped = ret if retyped: LOG.info("Volume %s: retyped successfully.", volume.id) except Exception: retyped = False LOG.exception("Volume %s: driver error when trying to " "retype, falling back to generic " "mechanism.", volume.id) # We could not change the type, so we need to migrate the volume, where # the destination volume will be of the new type if not retyped: if migration_policy == 'never': _retype_error(context, volume, old_reservations, new_reservations, status_update) msg = _("Retype requires migration but is not allowed.") raise exception.VolumeMigrationFailed(reason=msg) snaps = objects.SnapshotList.get_all_for_volume(context, volume.id) if snaps: _retype_error(context, volume, old_reservations, new_reservations, status_update) msg = _("Volume must not have snapshots.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) # Don't allow volume with replicas to be migrated rep_status = volume.replication_status if (rep_status is not None and rep_status not in [fields.ReplicationStatus.DISABLED, fields.ReplicationStatus.NOT_CAPABLE]): _retype_error(context, volume, old_reservations, new_reservations, status_update) msg = _("Volume must not be replicated.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) volume.migration_status = 'starting' volume.save() try: self.migrate_volume(context, volume, host, new_type_id=new_type_id, diff=diff) except Exception: with excutils.save_and_reraise_exception(): _retype_error(context, volume, old_reservations, new_reservations, status_update) else: model_update = {'volume_type_id': new_type_id, 'host': host['host'], 'cluster_name': host.get('cluster_name'), 'status': status_update['status']} if retype_model_update: model_update.update(retype_model_update) self._set_replication_status(diff, model_update) volume.update(model_update) volume.save() # We need to make the snapshots match the volume's type for snap in volume.snapshots: snap.volume_type_id = new_type_id snap.save() if old_reservations: QUOTAS.commit(context, old_reservations, project_id=project_id) if new_reservations: QUOTAS.commit(context, new_reservations, project_id=project_id) self._notify_about_volume_usage( context, volume, "retype", extra_usage_info={'volume_type': new_type_id}) self.publish_service_capabilities(context) LOG.info("Retype volume completed successfully.", resource=volume) @staticmethod def _set_replication_status(diff, model_update: dict) -> None: """Update replication_status in model_update if it has changed.""" if not diff or model_update.get('replication_status'): return diff_specs = diff.get('extra_specs', {}) replication_diff = diff_specs.get('replication_enabled') if replication_diff: is_replicated = volume_utils.is_boolean_str(replication_diff[1]) if is_replicated: replication_status = fields.ReplicationStatus.ENABLED else: replication_status = fields.ReplicationStatus.DISABLED model_update['replication_status'] = replication_status def manage_existing(self, ctxt: context.RequestContext, volume: objects.Volume, ref=None) -> ovo_fields.UUIDField: vol_ref = self._run_manage_existing_flow_engine( ctxt, volume, ref) self._update_stats_for_managed(vol_ref) LOG.info("Manage existing volume completed successfully.", resource=vol_ref) return vol_ref.id def _update_stats_for_managed(self, volume_reference: objects.Volume) -> None: # Update volume stats pool = volume_utils.extract_host(volume_reference.host, 'pool') if pool is None: # Legacy volume, put them into default pool pool = self.driver.configuration.safe_get( 'volume_backend_name') or volume_utils.extract_host( volume_reference.host, 'pool', True) try: self.stats['pools'][pool]['allocated_capacity_gb'] \ += volume_reference.size except KeyError: self.stats['pools'][pool] = dict( allocated_capacity_gb=volume_reference.size) def _run_manage_existing_flow_engine(self, ctxt: context.RequestContext, volume: objects.Volume, ref): try: flow_engine = manage_existing.get_flow( ctxt, self.db, self.driver, self.host, volume, ref, ) except Exception: msg = _("Failed to create manage_existing flow.") LOG.exception(msg, resource={'type': 'volume', 'id': volume.id}) raise exception.CinderException(msg) with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() # Fetch created volume from storage vol_ref = flow_engine.storage.fetch('volume') return vol_ref def _get_cluster_or_host_filters(self) -> dict[str, Any]: if self.cluster: filters = {'cluster_name': self.cluster} else: filters = {'host': self.host} return filters def _get_my_volumes_summary( self, ctxt: context.RequestContext) -> tuple: filters = self._get_cluster_or_host_filters() return objects.VolumeList.get_volume_summary(ctxt, False, filters) def _get_my_snapshots_summary( self, ctxt: context.RequestContext) -> tuple: filters = self._get_cluster_or_host_filters() return objects.SnapshotList.get_snapshot_summary(ctxt, False, filters) def _get_my_resources(self, ctxt: context.RequestContext, ovo_class_list, limit: Optional[int] = None, offset: Optional[int] = None) -> list: filters = self._get_cluster_or_host_filters() return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters, limit=limit, offset=offset) def _get_my_volumes(self, ctxt: context.RequestContext, limit: Optional[int] = None, offset: Optional[int] = None) -> objects.VolumeList: return self._get_my_resources(ctxt, objects.VolumeList, limit, offset) def _get_my_snapshots( self, ctxt: context.RequestContext, limit: Optional[int] = None, offset: Optional[int] = None) -> objects.SnapshotList: return self._get_my_resources(ctxt, objects.SnapshotList, limit, offset) def get_manageable_volumes(self, ctxt: context.RequestContext, marker, limit: Optional[int], offset: Optional[int], sort_keys, sort_dirs, want_objects=False) -> list: try: volume_utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception("Listing manageable volumes failed, due " "to uninitialized driver.") cinder_volumes = self._get_my_volumes(ctxt) try: driver_entries = self.driver.get_manageable_volumes( cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) if want_objects: driver_entries = (objects.ManageableVolumeList. from_primitives(ctxt, driver_entries)) except AttributeError: LOG.debug('Driver does not support listing manageable volumes.') return [] except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Listing manageable volumes failed, due " "to driver error.") return driver_entries def create_group(self, context: context.RequestContext, group) -> objects.Group: """Creates the group.""" context = context.elevated() # Make sure the host in the DB matches our own when clustered self._set_resource_host(group) model_update = None self._notify_about_group_usage(context, group, "create.start") try: volume_utils.require_driver_initialized(self.driver) LOG.info("Group %s: creating", group.name) try: model_update = self.driver.create_group(context, group) except NotImplementedError: if not group_types.is_default_cgsnapshot_type( group.group_type_id): model_update = self._create_group_generic(context, group) else: cg, __ = self._convert_group_to_cg(group, []) model_update = self.driver.create_consistencygroup( context, cg) if model_update: if (model_update['status'] == fields.GroupStatus.ERROR): msg = (_('Create group failed.')) LOG.error(msg, resource={'type': 'group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = fields.GroupStatus.ERROR group.save() LOG.error("Group %s: create failed", group.name) group.status = fields.GroupStatus.AVAILABLE group.created_at = timeutils.utcnow() group.save() LOG.info("Group %s: created successfully", group.name) self._notify_about_group_usage(context, group, "create.end") LOG.info("Create group completed successfully.", resource={'type': 'group', 'id': group.id}) return group def create_group_from_src( self, context: context.RequestContext, group: objects.Group, group_snapshot: Optional[objects.GroupSnapshot] = None, source_group=None) -> objects.Group: """Creates the group from source. The source can be a group snapshot or a source group. """ source_name = None snapshots = None source_vols = None try: volumes = objects.VolumeList.get_all_by_generic_group(context, group.id) if group_snapshot: try: # Check if group_snapshot still exists group_snapshot.refresh() except exception.GroupSnapshotNotFound: LOG.error("Create group from snapshot-%(snap)s failed: " "SnapshotNotFound.", {'snap': group_snapshot.id}, resource={'type': 'group', 'id': group.id}) raise source_name = _("snapshot-%s") % group_snapshot.id snapshots = objects.SnapshotList.get_all_for_group_snapshot( context, group_snapshot.id) for snap in snapshots: if (snap.status not in VALID_CREATE_GROUP_SRC_SNAP_STATUS): msg = (_("Cannot create group " "%(group)s because snapshot %(snap)s is " "not in a valid state. Valid states are: " "%(valid)s.") % {'group': group.id, 'snap': snap['id'], 'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS}) raise exception.InvalidGroup(reason=msg) if source_group: try: source_group.refresh() except exception.GroupNotFound: LOG.error("Create group " "from source group-%(group)s failed: " "GroupNotFound.", {'group': source_group.id}, resource={'type': 'group', 'id': group.id}) raise source_name = _("group-%s") % source_group.id source_vols = objects.VolumeList.get_all_by_generic_group( context, source_group.id) for source_vol in source_vols: if (source_vol.status not in VALID_CREATE_GROUP_SRC_GROUP_STATUS): msg = (_("Cannot create group " "%(group)s because source volume " "%(source_vol)s is not in a valid " "state. Valid states are: " "%(valid)s.") % {'group': group.id, 'source_vol': source_vol.id, 'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS}) raise exception.InvalidGroup(reason=msg) # Sort source snapshots so that they are in the same order as their # corresponding target volumes. sorted_snapshots = None if group_snapshot and snapshots: sorted_snapshots = self._sort_snapshots(volumes, snapshots) # Sort source volumes so that they are in the same order as their # corresponding target volumes. sorted_source_vols = None if source_group and source_vols: sorted_source_vols = self._sort_source_vols(volumes, source_vols) self._notify_about_group_usage( context, group, "create.start") volume_utils.require_driver_initialized(self.driver) try: model_update, volumes_model_update = ( self.driver.create_group_from_src( context, group, volumes, group_snapshot, sorted_snapshots, source_group, sorted_source_vols)) except NotImplementedError: if not group_types.is_default_cgsnapshot_type( group.group_type_id): model_update, volumes_model_update = ( self._create_group_from_src_generic( context, group, volumes, group_snapshot, sorted_snapshots, source_group, sorted_source_vols)) else: cg, volumes = self._convert_group_to_cg( group, volumes) cgsnapshot, sorted_snapshots = ( self._convert_group_snapshot_to_cgsnapshot( group_snapshot, sorted_snapshots, context)) source_cg, sorted_source_vols = ( self._convert_group_to_cg(source_group, sorted_source_vols)) model_update, volumes_model_update = ( self.driver.create_consistencygroup_from_src( context, cg, volumes, cgsnapshot, sorted_snapshots, source_cg, sorted_source_vols)) self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots) self._remove_consistencygroup_id_from_volumes(volumes) self._remove_consistencygroup_id_from_volumes( sorted_source_vols) if volumes_model_update: for update in volumes_model_update: self.db.volume_update(context, update['id'], update) if model_update: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = fields.GroupStatus.ERROR group.save() LOG.error("Create group " "from source %(source)s failed.", {'source': source_name}, resource={'type': 'group', 'id': group.id}) # Update volume status to 'error' as well. self._remove_consistencygroup_id_from_volumes(volumes) for vol in volumes: vol.status = 'error' vol.save() now = timeutils.utcnow() status = 'available' for vol in volumes: update = {'status': status, 'created_at': now} self._update_volume_from_src(context, vol, update, group=group) self._update_allocated_capacity(vol) group.status = status group.created_at = now group.save() self._notify_about_group_usage( context, group, "create.end") LOG.info("Create group " "from source-%(source)s completed successfully.", {'source': source_name}, resource={'type': 'group', 'id': group.id}) return group def _create_group_from_src_generic( self, context: context.RequestContext, group: objects.Group, volumes: list[objects.Volume], group_snapshot: Optional[objects.GroupSnapshot] = None, snapshots: Optional[list[objects.Snapshot]] = None, source_group: Optional[objects.Group] = None, source_vols: Optional[list[objects.Volume]] = None) \ -> tuple[dict[str, str], list[dict[str, str]]]: """Creates a group from source. :param context: the context of the caller. :param group: the Group object to be created. :param volumes: a list of volume objects in the group. :param group_snapshot: the GroupSnapshot object as source. :param snapshots: a list of snapshot objects in group_snapshot. :param source_group: the Group object as source. :param source_vols: a list of volume objects in the source_group. :returns: model_update, volumes_model_update """ model_update = {'status': 'available'} volumes_model_update: list[dict] = [] for vol in volumes: if snapshots: for snapshot in snapshots: if vol.snapshot_id == snapshot.id: vol_model_update = {'id': vol.id} try: driver_update = ( self.driver.create_volume_from_snapshot( vol, snapshot)) if driver_update: driver_update.pop('id', None) vol_model_update.update(driver_update) if 'status' not in vol_model_update: vol_model_update['status'] = 'available' except Exception: vol_model_update['status'] = 'error' model_update['status'] = 'error' volumes_model_update.append(vol_model_update) break elif source_vols: for source_vol in source_vols: if vol.source_volid == source_vol.id: vol_model_update = {'id': vol.id} try: driver_update = self.driver.create_cloned_volume( vol, source_vol) if driver_update: driver_update.pop('id', None) vol_model_update.update(driver_update) if 'status' not in vol_model_update: vol_model_update['status'] = 'available' except Exception: vol_model_update['status'] = 'error' model_update['status'] = 'error' volumes_model_update.append(vol_model_update) break return model_update, volumes_model_update def _sort_snapshots(self, volumes, snapshots) -> list: # Sort source snapshots so that they are in the same order as their # corresponding target volumes. Each source snapshot in the snapshots # list should have a corresponding target volume in the volumes list. if not volumes or not snapshots or len(volumes) != len(snapshots): msg = _("Input volumes or snapshots are invalid.") LOG.error(msg) raise exception.InvalidInput(reason=msg) sorted_snapshots: list = [] for vol in volumes: found_snaps = [snap for snap in snapshots if snap['id'] == vol['snapshot_id']] if not found_snaps: LOG.error("Source snapshot cannot be found for target " "volume %(volume_id)s.", {'volume_id': vol['id']}) raise exception.SnapshotNotFound( snapshot_id=vol['snapshot_id']) sorted_snapshots.extend(found_snaps) return sorted_snapshots def _sort_source_vols(self, volumes, source_vols: objects.VolumeList) -> list: # Sort source volumes so that they are in the same order as their # corresponding target volumes. Each source volume in the source_vols # list should have a corresponding target volume in the volumes list. if not volumes or not source_vols or len(volumes) != len(source_vols): msg = _("Input volumes or source volumes are invalid.") LOG.error(msg) raise exception.InvalidInput(reason=msg) sorted_source_vols: list = [] for vol in volumes: found_source_vols = [source_vol for source_vol in source_vols if source_vol['id'] == vol['source_volid']] if not found_source_vols: LOG.error("Source volumes cannot be found for target " "volume %(volume_id)s.", {'volume_id': vol['id']}) raise exception.VolumeNotFound( volume_id=vol['source_volid']) sorted_source_vols.extend(found_source_vols) return sorted_source_vols def _update_volume_from_src(self, context: context.RequestContext, vol, update, group=None) -> None: try: snapshot_id = vol.get('snapshot_id') source_volid = vol.get('source_volid') if snapshot_id: snapshot = objects.Snapshot.get_by_id(context, snapshot_id) orig_vref = self.db.volume_get(context, snapshot.volume_id) if orig_vref.bootable: update['bootable'] = True self.db.volume_glance_metadata_copy_to_volume( context, vol['id'], snapshot_id) if source_volid: source_vol = objects.Volume.get_by_id(context, source_volid) if source_vol.bootable: update['bootable'] = True self.db.volume_glance_metadata_copy_from_volume_to_volume( context, source_volid, vol['id']) if source_vol.multiattach: update['multiattach'] = True except exception.SnapshotNotFound: LOG.error("Source snapshot %(snapshot_id)s cannot be found.", {'snapshot_id': vol['snapshot_id']}) self.db.volume_update(context, vol['id'], {'status': 'error'}) if group: group.status = fields.GroupStatus.ERROR group.save() raise except exception.VolumeNotFound: LOG.error("The source volume %(volume_id)s " "cannot be found.", {'volume_id': snapshot.volume_id}) self.db.volume_update(context, vol['id'], {'status': 'error'}) if group: group.status = fields.GroupStatus.ERROR group.save() raise except exception.CinderException as ex: LOG.error("Failed to update %(volume_id)s" " metadata using the provided snapshot" " %(snapshot_id)s metadata.", {'volume_id': vol['id'], 'snapshot_id': vol['snapshot_id']}) self.db.volume_update(context, vol['id'], {'status': 'error'}) if group: group.status = fields.GroupStatus.ERROR group.save() raise exception.MetadataCopyFailure(reason=str(ex)) self.db.volume_update(context, vol['id'], update) def _update_allocated_capacity(self, vol: objects.Volume, decrement: bool = False, host: Optional[str] = None) -> None: # Update allocated capacity in volume stats host = host or vol['host'] pool = volume_utils.extract_host(host, 'pool') if pool is None: # Legacy volume, put them into default pool pool = self.driver.configuration.safe_get( 'volume_backend_name') or volume_utils.extract_host(host, 'pool', True) vol_size = -vol['size'] if decrement else vol['size'] try: self.stats['pools'][pool]['allocated_capacity_gb'] += vol_size except KeyError: self.stats['pools'][pool] = dict( allocated_capacity_gb=max(vol_size, 0)) def delete_group(self, context: context.RequestContext, group: objects.Group) -> None: """Deletes group and the volumes in the group.""" context = context.elevated() project_id = group.project_id if context.project_id != group.project_id: project_id = group.project_id else: project_id = context.project_id volumes = objects.VolumeList.get_all_by_generic_group( context, group.id) for vol_obj in volumes: if vol_obj.attach_status == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=vol_obj.id) self._check_is_our_resource(vol_obj) self._notify_about_group_usage( context, group, "delete.start") volumes_model_update = None model_update = None try: volume_utils.require_driver_initialized(self.driver) try: model_update, volumes_model_update = ( self.driver.delete_group(context, group, volumes)) except NotImplementedError: if not group_types.is_default_cgsnapshot_type( group.group_type_id): model_update, volumes_model_update = ( self._delete_group_generic(context, group, volumes)) else: cg, volumes = self._convert_group_to_cg( group, volumes) model_update, volumes_model_update = ( self.driver.delete_consistencygroup(context, cg, volumes)) self._remove_consistencygroup_id_from_volumes(volumes) if volumes_model_update: for update in volumes_model_update: # If we failed to delete a volume, make sure the # status for the group is set to error as well if (update['status'] in ['error_deleting', 'error'] and model_update['status'] not in ['error_deleting', 'error']): model_update['status'] = update['status'] self.db.volumes_update(context, volumes_model_update) if model_update: if model_update['status'] in ['error_deleting', 'error']: msg = (_('Delete group failed.')) LOG.error(msg, resource={'type': 'group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except Exception: with excutils.save_and_reraise_exception(): group.status = fields.GroupStatus.ERROR group.save() # Update volume status to 'error' if driver returns # None for volumes_model_update. if not volumes_model_update: self._remove_consistencygroup_id_from_volumes(volumes) for vol_obj in volumes: vol_obj.status = 'error' vol_obj.save() # Get reservations for group grpreservations: Optional[list] try: reserve_opts = {'groups': -1} grpreservations = GROUP_QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: grpreservations = None LOG.exception("Delete group " "failed to update usages.", resource={'type': 'group', 'id': group.id}) for vol in volumes: # Get reservations for volume reservations: Optional[list] try: reserve_opts = {'volumes': -1, 'gigabytes': -vol.size} QUOTAS.add_volume_type_opts(context, reserve_opts, vol.volume_type_id) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception("Delete group " "failed to update usages.", resource={'type': 'group', 'id': group.id}) vol.destroy() # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=project_id) self.stats['allocated_capacity_gb'] -= vol.size if grpreservations: GROUP_QUOTAS.commit(context, grpreservations, project_id=project_id) group.destroy() self._notify_about_group_usage( context, group, "delete.end") self.publish_service_capabilities(context) LOG.info("Delete group " "completed successfully.", resource={'type': 'group', 'id': group.id}) def _convert_group_to_cg( self, group: objects.Group, volumes: objects.VolumeList) -> tuple[objects.Group, objects.VolumeList]: if not group: return None, None cg = consistencygroup.ConsistencyGroup() cg.from_group(group) for vol in volumes: vol.consistencygroup_id = vol.group_id vol.consistencygroup = cg return cg, volumes def _remove_consistencygroup_id_from_volumes( self, volumes: Optional[list[objects.Volume]]) -> None: if not volumes: return for vol in volumes: vol.consistencygroup_id = None vol.consistencygroup = None def _convert_group_snapshot_to_cgsnapshot( self, group_snapshot: objects.GroupSnapshot, snapshots: objects.SnapshotList, ctxt) -> tuple[objects.CGSnapshot, objects.SnapshotList]: if not group_snapshot: return None, None cgsnap = cgsnapshot.CGSnapshot() cgsnap.from_group_snapshot(group_snapshot) # Populate consistencygroup object grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id) cg, __ = self._convert_group_to_cg(grp, []) cgsnap.consistencygroup = cg for snap in snapshots: snap.cgsnapshot_id = snap.group_snapshot_id snap.cgsnapshot = cgsnap return cgsnap, snapshots def _remove_cgsnapshot_id_from_snapshots( self, snapshots: Optional[list]) -> None: if not snapshots: return for snap in snapshots: snap.cgsnapshot_id = None snap.cgsnapshot = None def _create_group_generic(self, context: context.RequestContext, group) -> dict: """Creates a group.""" # A group entry is already created in db. Just returns a status here. model_update = {'status': fields.GroupStatus.AVAILABLE, 'created_at': timeutils.utcnow()} return model_update def _delete_group_generic(self, context: context.RequestContext, group: objects.Group, volumes) -> tuple: """Deletes a group and volumes in the group.""" model_update = {'status': group.status} volume_model_updates = [] for volume_ref in volumes: volume_model_update = {'id': volume_ref.id} try: self.driver.remove_export(context, volume_ref) self.driver_delete_volume(volume_ref) volume_model_update['status'] = 'deleted' except exception.VolumeIsBusy: volume_model_update['status'] = 'available' except Exception: volume_model_update['status'] = 'error' model_update['status'] = fields.GroupStatus.ERROR volume_model_updates.append(volume_model_update) return model_update, volume_model_updates def _update_group_generic( self, context: context.RequestContext, group, add_volumes=None, remove_volumes=None) -> tuple[None, None, None]: """Updates a group.""" # NOTE(xyang): The volume manager adds/removes the volume to/from the # group in the database. This default implementation does not do # anything in the backend storage. return None, None, None def _collect_volumes_for_group( self, context: context.RequestContext, group, volumes: Optional[str], add: bool = True) -> list: valid_status: tuple[str, ...] if add: valid_status = VALID_ADD_VOL_TO_GROUP_STATUS else: valid_status = VALID_REMOVE_VOL_FROM_GROUP_STATUS volumes_ref: list = [] if not volumes: return volumes_ref for add_vol in volumes.split(','): try: add_vol_ref = objects.Volume.get_by_id(context, add_vol) except exception.VolumeNotFound: LOG.error("Update group " "failed to %(op)s volume-%(volume_id)s: " "VolumeNotFound.", {'volume_id': add_vol, 'op': 'add' if add else 'remove'}, resource={'type': 'group', 'id': group.id}) raise if add_vol_ref.status not in valid_status: msg = (_("Can not %(op)s volume %(volume_id)s to " "group %(group_id)s because volume is in an invalid " "state: %(status)s. Valid states are: %(valid)s.") % {'volume_id': add_vol_ref.id, 'group_id': group.id, 'status': add_vol_ref.status, 'valid': valid_status, 'op': 'add' if add else 'remove'}) raise exception.InvalidVolume(reason=msg) if add: self._check_is_our_resource(add_vol_ref) volumes_ref.append(add_vol_ref) return volumes_ref def update_group(self, context: context.RequestContext, group, add_volumes: Optional[str] = None, remove_volumes: Optional[str] = None) -> None: """Updates group. Update group by adding volumes to the group, or removing volumes from the group. """ add_volumes_ref = self._collect_volumes_for_group(context, group, add_volumes, add=True) remove_volumes_ref = self._collect_volumes_for_group(context, group, remove_volumes, add=False) self._notify_about_group_usage( context, group, "update.start") try: volume_utils.require_driver_initialized(self.driver) try: model_update, add_volumes_update, remove_volumes_update = ( self.driver.update_group( context, group, add_volumes=add_volumes_ref, remove_volumes=remove_volumes_ref)) except NotImplementedError: if not group_types.is_default_cgsnapshot_type( group.group_type_id): model_update, add_volumes_update, remove_volumes_update = ( self._update_group_generic( context, group, add_volumes=add_volumes_ref, remove_volumes=remove_volumes_ref)) else: cg, remove_volumes_ref = self._convert_group_to_cg( group, remove_volumes_ref) model_update, add_volumes_update, remove_volumes_update = ( self.driver.update_consistencygroup( context, cg, add_volumes=add_volumes_ref, remove_volumes=remove_volumes_ref)) self._remove_consistencygroup_id_from_volumes( remove_volumes_ref) volumes_to_update: list = [] if add_volumes_update: volumes_to_update.extend(add_volumes_update) if remove_volumes_update: volumes_to_update.extend(remove_volumes_update) self.db.volumes_update(context, volumes_to_update) if model_update: if model_update['status'] in ( [fields.GroupStatus.ERROR]): msg = (_('Error occurred when updating group ' '%s.') % group.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) group.update(model_update) group.save() except Exception as e: with excutils.save_and_reraise_exception(): if isinstance(e, exception.VolumeDriverException): LOG.error("Error occurred in the volume driver when " "updating group %(group_id)s.", {'group_id': group.id}) else: LOG.error("Failed to update group %(group_id)s.", {'group_id': group.id}) group.status = fields.GroupStatus.ERROR group.save() for add_vol in add_volumes_ref: add_vol.status = 'error' add_vol.save() for rem_vol in remove_volumes_ref: if isinstance(e, exception.VolumeDriverException): rem_vol.consistencygroup_id = None rem_vol.consistencygroup = None rem_vol.status = 'error' rem_vol.save() for add_vol in add_volumes_ref: add_vol.group_id = group.id add_vol.save() for rem_vol in remove_volumes_ref: rem_vol.group_id = None rem_vol.save() group.status = fields.GroupStatus.AVAILABLE group.save() self._notify_about_group_usage( context, group, "update.end") LOG.info("Update group completed successfully.", resource={'type': 'group', 'id': group.id}) def create_group_snapshot( self, context: context.RequestContext, group_snapshot: objects.GroupSnapshot) -> objects.GroupSnapshot: """Creates the group_snapshot.""" caller_context = context context = context.elevated() LOG.info("GroupSnapshot %s: creating.", group_snapshot.id) snapshots = objects.SnapshotList.get_all_for_group_snapshot( context, group_snapshot.id) self._notify_about_group_snapshot_usage( context, group_snapshot, "create.start") snapshots_model_update = None model_update = None try: volume_utils.require_driver_initialized(self.driver) LOG.debug("Group snapshot %(grp_snap_id)s: creating.", {'grp_snap_id': group_snapshot.id}) # Pass context so that drivers that want to use it, can, # but it is not a requirement for all drivers. group_snapshot.context = caller_context for snapshot in snapshots: snapshot.context = caller_context try: model_update, snapshots_model_update = ( self.driver.create_group_snapshot(context, group_snapshot, snapshots)) except NotImplementedError: if not group_types.is_default_cgsnapshot_type( group_snapshot.group_type_id): model_update, snapshots_model_update = ( self._create_group_snapshot_generic( context, group_snapshot, snapshots)) else: cgsnapshot, snapshots = ( self._convert_group_snapshot_to_cgsnapshot( group_snapshot, snapshots, context)) model_update, snapshots_model_update = ( self.driver.create_cgsnapshot(context, cgsnapshot, snapshots)) self._remove_cgsnapshot_id_from_snapshots(snapshots) if snapshots_model_update: for snap_model in snapshots_model_update: # Update db for snapshot. # NOTE(xyang): snapshots is a list of snapshot objects. # snapshots_model_update should be a list of dicts. snap_id = snap_model.pop('id') snap_obj = objects.Snapshot.get_by_id(context, snap_id) snap_obj.update(snap_model) snap_obj.save() if (snap_model['status'] in [ fields.SnapshotStatus.ERROR_DELETING, fields.SnapshotStatus.ERROR] and model_update['status'] not in [fields.GroupSnapshotStatus.ERROR_DELETING, fields.GroupSnapshotStatus.ERROR]): model_update['status'] = snap_model['status'] if model_update: if model_update['status'] == fields.GroupSnapshotStatus.ERROR: msg = (_('Error occurred when creating group_snapshot ' '%s.') % group_snapshot.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) group_snapshot.update(model_update) group_snapshot.save() except exception.CinderException: with excutils.save_and_reraise_exception(): group_snapshot.status = fields.GroupSnapshotStatus.ERROR group_snapshot.save() # Update snapshot status to 'error' if driver returns # None for snapshots_model_update. self._remove_cgsnapshot_id_from_snapshots(snapshots) if not snapshots_model_update: for snapshot in snapshots: snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() for snapshot in snapshots: volume_id = snapshot.volume_id snapshot_id = snapshot.id vol_obj = objects.Volume.get_by_id(context, volume_id) if vol_obj.bootable: try: self.db.volume_glance_metadata_copy_to_snapshot( context, snapshot_id, volume_id) except exception.GlanceMetadataNotFound: # If volume is not created from image, No glance metadata # would be available for that volume in # volume glance metadata table pass except exception.CinderException as ex: LOG.error("Failed updating %(snapshot_id)s" " metadata using the provided volumes" " %(volume_id)s metadata.", {'volume_id': volume_id, 'snapshot_id': snapshot_id}) snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() raise exception.MetadataCopyFailure( reason=str(ex)) snapshot.status = fields.SnapshotStatus.AVAILABLE snapshot.progress = '100%' snapshot.save() group_snapshot.status = fields.GroupSnapshotStatus.AVAILABLE group_snapshot.save() LOG.info("group_snapshot %s: created successfully", group_snapshot.id) self._notify_about_group_snapshot_usage( context, group_snapshot, "create.end") return group_snapshot def _create_group_snapshot_generic( self, context: context.RequestContext, group_snapshot: objects.GroupSnapshot, snapshots: list) -> tuple[dict, list[dict]]: """Creates a group_snapshot.""" model_update = {'status': 'available'} snapshot_model_updates = [] for snapshot in snapshots: snapshot_model_update = {'id': snapshot.id} try: driver_update = self.driver.create_snapshot(snapshot) if driver_update: driver_update.pop('id', None) snapshot_model_update.update(driver_update) if 'status' not in snapshot_model_update: snapshot_model_update['status'] = ( fields.SnapshotStatus.AVAILABLE) except Exception: snapshot_model_update['status'] = ( fields.SnapshotStatus.ERROR) model_update['status'] = 'error' snapshot_model_updates.append(snapshot_model_update) return model_update, snapshot_model_updates def _delete_group_snapshot_generic( self, context: context.RequestContext, group_snapshot: objects.GroupSnapshot, snapshots: list) -> tuple[dict, list[dict]]: """Deletes a group_snapshot.""" model_update = {'status': group_snapshot.status} snapshot_model_updates = [] for snapshot in snapshots: snapshot_model_update = {'id': snapshot.id} try: self.driver_delete_snapshot(snapshot) snapshot_model_update['status'] = ( fields.SnapshotStatus.DELETED) except exception.SnapshotIsBusy: snapshot_model_update['status'] = ( fields.SnapshotStatus.AVAILABLE) except Exception: snapshot_model_update['status'] = ( fields.SnapshotStatus.ERROR) model_update['status'] = 'error' snapshot_model_updates.append(snapshot_model_update) return model_update, snapshot_model_updates def delete_group_snapshot(self, context: context.RequestContext, group_snapshot: objects.GroupSnapshot) -> None: """Deletes group_snapshot.""" caller_context = context context = context.elevated() project_id = group_snapshot.project_id LOG.info("group_snapshot %s: deleting", group_snapshot.id) snapshots = objects.SnapshotList.get_all_for_group_snapshot( context, group_snapshot.id) self._notify_about_group_snapshot_usage( context, group_snapshot, "delete.start") snapshots_model_update = None model_update = None try: volume_utils.require_driver_initialized(self.driver) LOG.debug("group_snapshot %(grp_snap_id)s: deleting", {'grp_snap_id': group_snapshot.id}) # Pass context so that drivers that want to use it, can, # but it is not a requirement for all drivers. group_snapshot.context = caller_context for snapshot in snapshots: snapshot.context = caller_context try: model_update, snapshots_model_update = ( self.driver.delete_group_snapshot(context, group_snapshot, snapshots)) except NotImplementedError: if not group_types.is_default_cgsnapshot_type( group_snapshot.group_type_id): model_update, snapshots_model_update = ( self._delete_group_snapshot_generic( context, group_snapshot, snapshots)) else: cgsnapshot, snapshots = ( self._convert_group_snapshot_to_cgsnapshot( group_snapshot, snapshots, context)) model_update, snapshots_model_update = ( self.driver.delete_cgsnapshot(context, cgsnapshot, snapshots)) self._remove_cgsnapshot_id_from_snapshots(snapshots) if snapshots_model_update: for snap_model in snapshots_model_update: # NOTE(xyang): snapshots is a list of snapshot objects. # snapshots_model_update should be a list of dicts. snap = next((item for item in snapshots if item.id == snap_model['id']), None) if snap: snap_model.pop('id') snap.update(snap_model) snap.save() if (snap_model['status'] in [fields.SnapshotStatus.ERROR_DELETING, fields.SnapshotStatus.ERROR] and model_update['status'] not in ['error_deleting', 'error']): model_update['status'] = snap_model['status'] if model_update: if model_update['status'] in ['error_deleting', 'error']: msg = (_('Error occurred when deleting group_snapshot ' '%s.') % group_snapshot.id) LOG.error(msg) raise exception.VolumeDriverException(message=msg) else: group_snapshot.update(model_update) group_snapshot.save() except exception.CinderException: with excutils.save_and_reraise_exception(): group_snapshot.status = fields.GroupSnapshotStatus.ERROR group_snapshot.save() # Update snapshot status to 'error' if driver returns # None for snapshots_model_update. if not snapshots_model_update: self._remove_cgsnapshot_id_from_snapshots(snapshots) for snapshot in snapshots: snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() for snapshot in snapshots: # Get reservations reservations: Optional[list] try: reserve_opts = {'snapshots': -1} if not CONF.no_snapshot_gb_quota: reserve_opts['gigabytes'] = -snapshot.volume_size volume_ref = objects.Volume.get_by_id(context, snapshot.volume_id) QUOTAS.add_volume_type_opts(context, reserve_opts, volume_ref.volume_type_id) reservations = QUOTAS.reserve(context, project_id=project_id, **reserve_opts) except Exception: reservations = None LOG.exception("Failed to update usages deleting snapshot") self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id) snapshot.destroy() # Commit the reservations if reservations: QUOTAS.commit(context, reservations, project_id=project_id) group_snapshot.destroy() LOG.info("group_snapshot %s: deleted successfully", group_snapshot.id) self._notify_about_group_snapshot_usage(context, group_snapshot, "delete.end", snapshots) def update_migrated_volume(self, ctxt: context.RequestContext, volume: objects.Volume, new_volume: objects.Volume, volume_status) -> None: """Finalize migration process on backend device.""" model_update = None model_update_default = {'_name_id': new_volume.name_id, 'provider_location': new_volume.provider_location} try: model_update = self.driver.update_migrated_volume(ctxt, volume, new_volume, volume_status) except NotImplementedError: # If update_migrated_volume is not implemented for the driver, # _name_id and provider_location will be set with the values # from new_volume. model_update = model_update_default if model_update: model_update_default.update(model_update) # Swap keys that were changed in the source so we keep their values # in the temporary volume's DB record. # Need to convert 'metadata' and 'admin_metadata' since # they are not keys of volume, their corresponding keys are # 'volume_metadata' and 'volume_admin_metadata'. model_update_new = dict() for key in model_update: if key == 'metadata': if volume.get('volume_metadata'): model_update_new[key] = { metadata['key']: metadata['value'] for metadata in volume.volume_metadata} elif key == 'admin_metadata': model_update_new[key] = { metadata['key']: metadata['value'] for metadata in volume.volume_admin_metadata} else: model_update_new[key] = volume[key] with new_volume.obj_as_admin(): new_volume.update(model_update_new) new_volume.save() with volume.obj_as_admin(): volume.update(model_update_default) volume.save() # Replication V2.1 and a/a method def failover(self, context: context.RequestContext, secondary_backend_id=None) -> None: """Failover a backend to a secondary replication target. Instructs a replication capable/configured backend to failover to one of it's secondary replication targets. host=None is an acceetable input, and leaves it to the driver to failover to the only configured target, or to choose a target on it's own. All of the hosts volumes will be passed on to the driver in order for it to determine the replicated volumes on the host, if needed. :param context: security context :param secondary_backend_id: Specifies backend_id to fail over to """ updates = {} repl_status = fields.ReplicationStatus service = self._get_service() # TODO(geguileo): We should optimize these updates by doing them # directly on the DB with just 3 queries, one to change the volumes # another to change all the snapshots, and another to get replicated # volumes. # Change non replicated volumes and their snapshots to error if we are # failing over, leave them as they are for failback volumes = self._get_my_volumes(context) replicated_vols = [] for volume in volumes: if volume.replication_status not in (repl_status.DISABLED, repl_status.NOT_CAPABLE): replicated_vols.append(volume) elif secondary_backend_id != self.FAILBACK_SENTINEL: volume.previous_status = volume.status volume.status = 'error' volume.replication_status = repl_status.NOT_CAPABLE volume.save() for snapshot in volume.snapshots: snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() volume_update_list = None group_update_list = None try: # For non clustered we can call v2.1 failover_host, but for # clustered we call a/a failover method. We know a/a method # exists because BaseVD class wouldn't have started if it didn't. failover = getattr(self.driver, 'failover' if service.is_clustered else 'failover_host') # expected form of volume_update_list: # [{volume_id: , updates: {'provider_id': xxxx....}}, # {volume_id: , updates: {'provider_id': xxxx....}}] # It includes volumes in replication groups and those not in them # expected form of group_update_list: # [{group_id: , updates: {'xxxx': xxxx....}}, # {group_id: , updates: {'xxxx': xxxx....}}] filters = self._get_cluster_or_host_filters() groups = objects.GroupList.get_all_replicated(context, filters=filters) active_backend_id, volume_update_list, group_update_list = ( failover(context, replicated_vols, secondary_id=secondary_backend_id, groups=groups)) try: update_data = {u['volume_id']: u['updates'] for u in volume_update_list} except KeyError: msg = "Update list, doesn't include volume_id" raise exception.ProgrammingError(reason=msg) try: update_group_data = {g['group_id']: g['updates'] for g in group_update_list} except KeyError: msg = "Update list, doesn't include group_id" raise exception.ProgrammingError(reason=msg) except Exception as exc: # NOTE(jdg): Drivers need to be aware if they fail during # a failover sequence, we're expecting them to cleanup # and make sure the driver state is such that the original # backend is still set as primary as per driver memory # We don't want to log the exception trace invalid replication # target if isinstance(exc, exception.InvalidReplicationTarget): log_method = LOG.error # Preserve the replication_status: Status should be failed over # if we were failing back or if we were failing over from one # secondary to another secondary. In both cases # active_backend_id will be set. if service.active_backend_id: updates['replication_status'] = repl_status.FAILED_OVER else: updates['replication_status'] = repl_status.ENABLED else: log_method = LOG.exception updates.update(disabled=True, replication_status=repl_status.FAILOVER_ERROR) log_method("Error encountered during failover on host: %(host)s " "to %(backend_id)s: %(error)s", {'host': self.host, 'backend_id': secondary_backend_id, 'error': exc}) # We dump the update list for manual recovery LOG.error('Failed update_list is: %s', volume_update_list) self.finish_failover(context, service, updates) return if secondary_backend_id == "default": updates['replication_status'] = repl_status.ENABLED updates['active_backend_id'] = '' updates['disabled'] = service.frozen updates['disabled_reason'] = 'frozen' if service.frozen else '' else: updates['replication_status'] = repl_status.FAILED_OVER updates['active_backend_id'] = active_backend_id updates['disabled'] = True updates['disabled_reason'] = 'failed-over' self.finish_failover(context, service, updates) for volume in replicated_vols: update = update_data.get(volume.id, {}) if update.get('status', '') == 'error': update['replication_status'] = repl_status.FAILOVER_ERROR elif update.get('replication_status') in (None, repl_status.FAILED_OVER): update['replication_status'] = updates['replication_status'] if update['replication_status'] == repl_status.FAILOVER_ERROR: update.setdefault('status', 'error') # Set all volume snapshots to error for snapshot in volume.snapshots: snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() if 'status' in update: update['previous_status'] = volume.status volume.update(update) volume.save() for grp in groups: update = update_group_data.get(grp.id, {}) if update.get('status', '') == 'error': update['replication_status'] = repl_status.FAILOVER_ERROR elif update.get('replication_status') in (None, repl_status.FAILED_OVER): update['replication_status'] = updates['replication_status'] if update['replication_status'] == repl_status.FAILOVER_ERROR: update.setdefault('status', 'error') grp.update(update) grp.save() LOG.info("Failed over to replication target successfully.") # TODO(geguileo): In P - remove this failover_host = failover def finish_failover(self, context: context.RequestContext, service, updates) -> None: """Completion of the failover locally or via RPC.""" # If the service is clustered, broadcast the service changes to all # volume services, including this one. if service.is_clustered: # We have to update the cluster with the same data, and we do it # before broadcasting the failover_completed RPC call to prevent # races with services that may be starting.. for key, value in updates.items(): setattr(service.cluster, key, value) service.cluster.save() rpcapi = volume_rpcapi.VolumeAPI() rpcapi.failover_completed(context, service, updates) else: service.update(updates) service.save() def failover_completed(self, context: context.RequestContext, updates) -> None: """Finalize failover of this backend. When a service is clustered and replicated the failover has 2 stages, one that does the failover of the volumes and another that finalizes the failover of the services themselves. This method takes care of the last part and is called from the service doing the failover of the volumes after finished processing the volumes. """ service = self._get_service() service.update(updates) try: self.driver.failover_completed(context, service.active_backend_id) except Exception: msg = _('Driver reported error during replication failover ' 'completion.') LOG.exception(msg) service.disabled = True service.disabled_reason = msg service.replication_status = ( fields.ReplicationStatus.ERROR) service.save() def freeze_host(self, context: context.RequestContext) -> bool: """Freeze management plane on this backend. Basically puts the control/management plane into a Read Only state. We should handle this in the scheduler, however this is provided to let the driver know in case it needs/wants to do something specific on the backend. :param context: security context """ # TODO(jdg): Return from driver? or catch? # Update status column in service entry try: self.driver.freeze_backend(context) except exception.VolumeDriverException: # NOTE(jdg): In the case of freeze, we don't really # need the backend's consent or anything, we'll just # disable the service, so we can just log this and # go about our business LOG.warning('Error encountered on Cinder backend during ' 'freeze operation, service is frozen, however ' 'notification to driver has failed.') service = self._get_service() service.disabled = True service.disabled_reason = "frozen" service.save() LOG.info("Set backend status to frozen successfully.") return True def thaw_host(self, context: context.RequestContext) -> bool: """UnFreeze management plane on this backend. Basically puts the control/management plane back into a normal state. We should handle this in the scheduler, however this is provided to let the driver know in case it needs/wants to do something specific on the backend. :param context: security context """ # TODO(jdg): Return from driver? or catch? # Update status column in service entry try: self.driver.thaw_backend(context) except exception.VolumeDriverException: # NOTE(jdg): Thaw actually matters, if this call # to the backend fails, we're stuck and can't re-enable LOG.error('Error encountered on Cinder backend during ' 'thaw operation, service will remain frozen.') return False service = self._get_service() service.disabled = False service.disabled_reason = "" service.save() LOG.info("Thawed backend successfully.") return True def manage_existing_snapshot(self, ctxt: context.RequestContext, snapshot: objects.Snapshot, ref=None) -> ovo_fields.UUIDField: LOG.debug('manage_existing_snapshot: managing %s.', ref) try: flow_engine = manage_existing_snapshot.get_flow( ctxt, self.db, self.driver, self.host, snapshot.id, ref) except Exception: LOG.exception("Failed to create manage_existing flow: " "%(object_type)s %(object_id)s.", {'object_type': 'snapshot', 'object_id': snapshot.id}) raise exception.CinderException( _("Failed to create manage existing flow.")) with flow_utils.DynamicLogListener(flow_engine, logger=LOG): flow_engine.run() return snapshot.id def get_manageable_snapshots(self, ctxt: context.RequestContext, marker, limit: Optional[int], offset: Optional[int], sort_keys, sort_dirs, want_objects=False): try: volume_utils.require_driver_initialized(self.driver) except exception.DriverNotInitialized: with excutils.save_and_reraise_exception(): LOG.exception("Listing manageable snapshots failed, due " "to uninitialized driver.") cinder_snapshots = self._get_my_snapshots(ctxt) try: driver_entries = self.driver.get_manageable_snapshots( cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs) if want_objects: driver_entries = (objects.ManageableSnapshotList. from_primitives(ctxt, driver_entries)) except AttributeError: LOG.debug('Driver does not support listing manageable snapshots.') return [] except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Listing manageable snapshots failed, due " "to driver error.") return driver_entries def get_capabilities(self, context: context.RequestContext, discover: bool): """Get capabilities of backend storage.""" if discover: self.driver.init_capabilities() capabilities = self.driver.capabilities LOG.debug("Obtained capabilities list: %s.", capabilities) return capabilities def get_backup_device(self, ctxt: context.RequestContext, backup: objects.Backup, want_objects: bool = False, async_call: bool = False): (backup_device, is_snapshot) = ( self.driver.get_backup_device(ctxt, backup)) secure_enabled = self.driver.secure_file_operations_enabled() backup_device_dict = {'backup_device': backup_device, 'secure_enabled': secure_enabled, 'is_snapshot': is_snapshot, } # TODO(sborkows): from_primitive method will be removed in O, so there # is a need to clean here then. backup_device = (objects.BackupDeviceInfo.from_primitive( backup_device_dict, ctxt) if want_objects else backup_device_dict) if async_call: # we have to use an rpc call back to the backup manager to # continue the backup LOG.info("Calling backup continue_backup for: %s", backup) rpcapi = backup_rpcapi.BackupAPI() rpcapi.continue_backup(ctxt, backup, backup_device) else: # The rpc api version doesn't support the async callback # so we fallback to returning the value itself. return backup_device def secure_file_operations_enabled( self, ctxt: context.RequestContext, volume: Optional[objects.Volume]) -> bool: secure_enabled = self.driver.secure_file_operations_enabled() return secure_enabled def _connection_create(self, ctxt: context.RequestContext, volume: objects.Volume, attachment: objects.VolumeAttachment, connector: dict) -> dict[str, Any]: try: self.driver.validate_connector(connector) except exception.InvalidConnectorException as err: raise exception.InvalidInput(reason=str(err)) except Exception as err: err_msg = (_("Validate volume connection failed " "(error: %(err)s).") % {'err': err}) LOG.error(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) try: model_update = self.driver.create_export(ctxt.elevated(), volume, connector) except exception.CinderException as ex: err_msg = (_("Create export for volume failed (%s).") % ex.msg) LOG.exception(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) try: if model_update: volume.update(model_update) volume.save() except exception.CinderException as ex: LOG.exception("Model update failed.", resource=volume) raise exception.ExportFailure(reason=str(ex)) try: conn_info = self.driver.initialize_connection(volume, connector) except Exception as err: err_msg = (_("Driver initialize connection failed " "(error: %(err)s).") % {'err': err}) LOG.exception(err_msg, resource=volume) self.driver.remove_export(ctxt.elevated(), volume) raise exception.VolumeBackendAPIException(data=err_msg) conn_info = self._parse_connection_options(ctxt, volume, conn_info) # NOTE(jdg): Get rid of the nested dict (data key) conn_data = conn_info.pop('data', {}) connection_info = conn_data.copy() connection_info.update(conn_info) values = {'volume_id': volume.id, 'attach_status': 'attaching', 'connector': jsonutils.dumps(connector)} # TODO(mriedem): Use VolumeAttachment.save() here. self.db.volume_attachment_update(ctxt, attachment.id, values) connection_info['attachment_id'] = attachment.id # Append the enforce_multipath value if the connector has it connection_info['enforce_multipath'] = connector.get( 'enforce_multipath', False) LOG.debug("Connection info returned from driver %(connection_info)s", {'connection_info': strutils.mask_dict_password(connection_info)}) return connection_info def attachment_update(self, context: context.RequestContext, vref: objects.Volume, connector: dict, attachment_id: str) -> dict[str, Any]: """Update/Finalize an attachment. This call updates a valid attachment record to associate with a volume and provide the caller with the proper connection info. Note that this call requires an `attachment_ref`. It's expected that prior to this call that the volume and an attachment UUID has been reserved. param: vref: Volume object to create attachment for param: connector: Connector object to use for attachment creation param: attachment_ref: ID of the attachment record to update """ mode = connector.get('mode', 'rw') self._notify_about_volume_usage(context, vref, 'attach.start') attachment_ref = objects.VolumeAttachment.get_by_id(context, attachment_id) # Check to see if a mode parameter was set during attachment-create; # this seems kinda wonky, but it's how we're keeping back compatability # with the use of connector.mode for now. In other words, we're # making sure we still honor ro settings from the connector but # we override that if a value was specified in attachment-create if attachment_ref.attach_mode != 'null': mode = attachment_ref.attach_mode connector['mode'] = mode connection_info = self._connection_create(context, vref, attachment_ref, connector) try: volume_utils.require_driver_initialized(self.driver) except Exception as err: self.message_api.create( context, message_field.Action.UPDATE_ATTACHMENT, resource_uuid=vref.id, exception=err) with excutils.save_and_reraise_exception(): self.db.volume_attachment_update( context, attachment_ref.id, {'attach_status': fields.VolumeAttachStatus.ERROR_ATTACHING}) self.db.volume_attached(context.elevated(), attachment_ref.id, attachment_ref.instance_uuid, connector.get('host', ''), connector.get('mountpoint', 'na'), mode, False) vref.refresh() attachment_ref.refresh() LOG.info("attachment_update completed successfully.", resource=vref) return connection_info def _connection_terminate(self, context: context.RequestContext, volume, attachment, force: bool = False) -> Optional[bool]: """Remove a volume connection, but leave attachment. Exits early if the attachment does not have a connector and returns None to indicate shared connections are irrelevant. """ volume_utils.require_driver_initialized(self.driver) connector = attachment.connector if not connector and not force: # It's possible to attach a volume to a shelved offloaded server # in nova, and a shelved offloaded server is not on a compute host, # which means the attachment was made without a host connector, # so if we don't have a connector we can't terminate a connection # that was never actually made to the storage backend, so just # log a message and exit. LOG.debug('No connector for attachment %s; skipping storage ' 'backend terminate_connection call.', attachment.id) # None indicates we don't know and don't care. return None try: shared_connections = self.driver.terminate_connection(volume, connector, force=force) if not isinstance(shared_connections, bool): shared_connections = False except Exception as err: err_msg = (_('Terminate volume connection failed: %(err)s') % {'err': err}) LOG.exception(err_msg, resource=volume) raise exception.VolumeBackendAPIException(data=err_msg) LOG.info("Terminate volume connection completed successfully.", resource=volume) # NOTE(jdg): Return True/False if there are other outstanding # attachments that share this connection. If True should signify # caller to preserve the actual host connection (work should be # done in the brick connector as it has the knowledge of what's # going on here. return shared_connections def attachment_delete(self, context: context.RequestContext, attachment_id: str, vref: objects.Volume) -> None: """Delete/Detach the specified attachment. Notifies the backend device that we're detaching the specified attachment instance. param: attachment_id: Attachment id to remove param: vref: Volume object associated with the attachment """ volume_utils.require_driver_initialized(self.driver) attachment = objects.VolumeAttachment.get_by_id(context, attachment_id) self._notify_about_volume_usage(context, vref, "detach.start") has_shared_connection = self._connection_terminate(context, vref, attachment) try: LOG.debug('Deleting attachment %(attachment_id)s.', {'attachment_id': attachment.id}, resource=vref) if has_shared_connection is not None and not has_shared_connection: self.driver.remove_export(context.elevated(), vref) except Exception as exc: # Failures on detach_volume and remove_export are not considered # failures in terms of detaching the volume. LOG.warning('Failed to detach volume on the backend, ignoring ' 'failure %s', exc) # Replication group API (Tiramisu) def enable_replication(self, ctxt: context.RequestContext, group: objects.Group) -> None: """Enable replication.""" group.refresh() if group.replication_status != fields.ReplicationStatus.ENABLING: msg = _("Replication status in group %s is not " "enabling. Cannot enable replication.") % group.id LOG.error(msg) raise exception.InvalidGroup(reason=msg) volumes = group.volumes for vol in volumes: vol.refresh() if vol.replication_status != fields.ReplicationStatus.ENABLING: msg = _("Replication status in volume %s is not " "enabling. Cannot enable replication.") % vol.id LOG.error(msg) raise exception.InvalidVolume(reason=msg) self._notify_about_group_usage( ctxt, group, "enable_replication.start") volumes_model_update = None model_update = None try: volume_utils.require_driver_initialized(self.driver) model_update, volumes_model_update = ( self.driver.enable_replication(ctxt, group, volumes)) if volumes_model_update: for update in volumes_model_update: vol_obj = objects.Volume.get_by_id(ctxt, update['id']) vol_obj.update(update) vol_obj.save() # If we failed to enable a volume, make sure the status # for the group is set to error as well if (update.get('replication_status') == fields.ReplicationStatus.ERROR and model_update.get('replication_status') != fields.ReplicationStatus.ERROR): model_update['replication_status'] = update.get( 'replication_status') if model_update: if (model_update.get('replication_status') == fields.ReplicationStatus.ERROR): msg = _('Enable replication failed.') LOG.error(msg, resource={'type': 'group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except exception.CinderException as ex: group.status = fields.GroupStatus.ERROR group.replication_status = fields.ReplicationStatus.ERROR group.save() # Update volume status to 'error' if driver returns # None for volumes_model_update. if not volumes_model_update: for vol in volumes: vol.status = 'error' vol.replication_status = fields.ReplicationStatus.ERROR vol.save() err_msg = _("Enable replication group failed: %s.") % ex raise exception.ReplicationGroupError(reason=err_msg, group_id=group.id) for vol in volumes: vol.replication_status = fields.ReplicationStatus.ENABLED vol.save() group.replication_status = fields.ReplicationStatus.ENABLED group.save() self._notify_about_group_usage( ctxt, group, "enable_replication.end", volumes) LOG.info("Enable replication completed successfully.", resource={'type': 'group', 'id': group.id}) # Replication group API (Tiramisu) def disable_replication(self, ctxt: context.RequestContext, group: objects.Group) -> None: """Disable replication.""" group.refresh() if group.replication_status != fields.ReplicationStatus.DISABLING: msg = _("Replication status in group %s is not " "disabling. Cannot disable replication.") % group.id LOG.error(msg) raise exception.InvalidGroup(reason=msg) volumes = group.volumes for vol in volumes: vol.refresh() if (vol.replication_status != fields.ReplicationStatus.DISABLING): msg = _("Replication status in volume %s is not " "disabling. Cannot disable replication.") % vol.id LOG.error(msg) raise exception.InvalidVolume(reason=msg) self._notify_about_group_usage( ctxt, group, "disable_replication.start") volumes_model_update = None model_update = None try: volume_utils.require_driver_initialized(self.driver) model_update, volumes_model_update = ( self.driver.disable_replication(ctxt, group, volumes)) if volumes_model_update: for update in volumes_model_update: vol_obj = objects.Volume.get_by_id(ctxt, update['id']) vol_obj.update(update) vol_obj.save() # If we failed to enable a volume, make sure the status # for the group is set to error as well if (update.get('replication_status') == fields.ReplicationStatus.ERROR and model_update.get('replication_status') != fields.ReplicationStatus.ERROR): model_update['replication_status'] = update.get( 'replication_status') if model_update: if (model_update.get('replication_status') == fields.ReplicationStatus.ERROR): msg = _('Disable replication failed.') LOG.error(msg, resource={'type': 'group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except exception.CinderException as ex: group.status = fields.GroupStatus.ERROR group.replication_status = fields.ReplicationStatus.ERROR group.save() # Update volume status to 'error' if driver returns # None for volumes_model_update. if not volumes_model_update: for vol in volumes: vol.status = 'error' vol.replication_status = fields.ReplicationStatus.ERROR vol.save() err_msg = _("Disable replication group failed: %s.") % ex raise exception.ReplicationGroupError(reason=err_msg, group_id=group.id) for vol in volumes: vol.replication_status = fields.ReplicationStatus.DISABLED vol.save() group.replication_status = fields.ReplicationStatus.DISABLED group.save() self._notify_about_group_usage( ctxt, group, "disable_replication.end", volumes) LOG.info("Disable replication completed successfully.", resource={'type': 'group', 'id': group.id}) # Replication group API (Tiramisu) def failover_replication(self, ctxt: context.RequestContext, group: objects.Group, allow_attached_volume: bool = False, secondary_backend_id=None) -> None: """Failover replication.""" group.refresh() if group.replication_status != fields.ReplicationStatus.FAILING_OVER: msg = _("Replication status in group %s is not " "failing-over. Cannot failover replication.") % group.id LOG.error(msg) raise exception.InvalidGroup(reason=msg) volumes = group.volumes for vol in volumes: vol.refresh() if vol.status == 'in-use' and not allow_attached_volume: msg = _("Volume %s is attached but allow_attached_volume flag " "is False. Cannot failover replication.") % vol.id LOG.error(msg) raise exception.InvalidVolume(reason=msg) if (vol.replication_status != fields.ReplicationStatus.FAILING_OVER): msg = _("Replication status in volume %s is not " "failing-over. Cannot failover replication.") % vol.id LOG.error(msg) raise exception.InvalidVolume(reason=msg) self._notify_about_group_usage( ctxt, group, "failover_replication.start") volumes_model_update = None model_update = None try: volume_utils.require_driver_initialized(self.driver) model_update, volumes_model_update = ( self.driver.failover_replication( ctxt, group, volumes, secondary_backend_id)) if volumes_model_update: for update in volumes_model_update: vol_obj = objects.Volume.get_by_id(ctxt, update['id']) vol_obj.update(update) vol_obj.save() # If we failed to enable a volume, make sure the status # for the group is set to error as well if (update.get('replication_status') == fields.ReplicationStatus.ERROR and model_update.get('replication_status') != fields.ReplicationStatus.ERROR): model_update['replication_status'] = update.get( 'replication_status') if model_update: if (model_update.get('replication_status') == fields.ReplicationStatus.ERROR): msg = _('Failover replication failed.') LOG.error(msg, resource={'type': 'group', 'id': group.id}) raise exception.VolumeDriverException(message=msg) else: group.update(model_update) group.save() except exception.CinderException as ex: group.status = fields.GroupStatus.ERROR group.replication_status = fields.ReplicationStatus.ERROR group.save() # Update volume status to 'error' if driver returns # None for volumes_model_update. if not volumes_model_update: for vol in volumes: vol.status = 'error' vol.replication_status = fields.ReplicationStatus.ERROR vol.save() err_msg = _("Failover replication group failed: %s.") % ex raise exception.ReplicationGroupError(reason=err_msg, group_id=group.id) for vol in volumes: if secondary_backend_id == "default": vol.replication_status = fields.ReplicationStatus.ENABLED else: vol.replication_status = ( fields.ReplicationStatus.FAILED_OVER) vol.save() if secondary_backend_id == "default": group.replication_status = fields.ReplicationStatus.ENABLED else: group.replication_status = fields.ReplicationStatus.FAILED_OVER group.save() self._notify_about_group_usage( ctxt, group, "failover_replication.end", volumes) LOG.info("Failover replication completed successfully.", resource={'type': 'group', 'id': group.id}) def list_replication_targets(self, ctxt: context.RequestContext, group: objects.Group) -> dict[str, list]: """Provide a means to obtain replication targets for a group. This method is used to find the replication_device config info. 'backend_id' is a required key in 'replication_device'. Response Example for admin: .. code:: json { "replication_targets": [ { "backend_id": "vendor-id-1", "unique_key": "val1" }, { "backend_id": "vendor-id-2", "unique_key": "val2" } ] } Response example for non-admin: .. code:: json { "replication_targets": [ { "backend_id": "vendor-id-1" }, { "backend_id": "vendor-id-2" } ] } """ replication_targets = [] try: group.refresh() if self.configuration.replication_device: if ctxt.is_admin: for rep_dev in self.configuration.replication_device: keys = rep_dev.keys() dev = {} for k in keys: dev[k] = rep_dev[k] replication_targets.append(dev) else: for rep_dev in self.configuration.replication_device: dev = rep_dev.get('backend_id') if dev: replication_targets.append({'backend_id': dev}) except exception.GroupNotFound: err_msg = (_("Get replication targets failed. Group %s not " "found.") % group.id) LOG.exception(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) return {'replication_targets': replication_targets} def _refresh_volume_glance_meta(self, context, volume, image_meta): volume_utils.enable_bootable_flag(volume) volume_meta = volume_utils.get_volume_image_metadata( image_meta['id'], image_meta) LOG.debug("Creating volume glance metadata for volume %(volume_id)s" " backed by image %(image_id)s with: %(vol_metadata)s.", {'volume_id': volume.id, 'image_id': image_meta['id'], 'vol_metadata': volume_meta}) self.db.volume_glance_metadata_delete_by_volume(context, volume.id) self.db.volume_glance_metadata_bulk_create(context, volume.id, volume_meta) def reimage(self, context, volume, image_meta, image_snap=None): """Reimage a volume with specific image.""" image_id = None try: if image_snap: # We are not calling the driver method here since the snapshot # could belong to a different volume. # Even if the snapshot belongs to a different volume, we are # doing generic revert where we create a volume out of the # snapshot and do a copy so we are safe here. # Size checks are already done on the API layer so we don't # need to worry about the image fitting into the volume. self._revert_to_snapshot_generic(context, volume, image_snap) else: image_id = image_meta['id'] image_service, _ = glance.get_remote_image_service( context, image_meta['id']) image_location = image_service.get_location(context, image_id) volume_utils.copy_image_to_volume(self.driver, context, volume, image_meta, image_location, image_service, disable_sparse=True) self._refresh_volume_glance_meta(context, volume, image_meta) volume.status = volume.previous_status volume.save() if volume.status in ['reserved']: nova_api = compute.API() attachments = volume.volume_attachment instance_uuids = [attachment.instance_uuid for attachment in attachments] nova_api.reimage_volume(context, instance_uuids, volume.id) LOG.debug("Re-imaged %(image_id)s" " to volume %(volume_id)s successfully.", {'image_id': image_id, 'volume_id': volume.id}) except Exception as err: with excutils.save_and_reraise_exception(): LOG.error('Failed to re-image volume %(volume_id)s with ' 'image %(image_id)s.', {'image_id': image_id, 'volume_id': volume.id}) volume.previous_status = volume.status volume.status = 'error' volume.save() if isinstance(err, exception.ImageConversionNotAllowed): self.message_api.create( context, message_field.Action.REIMAGE_VOLUME, resource_uuid=volume.id, detail= message_field.Detail.IMAGE_FORMAT_UNACCEPTABLE) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/qos_specs.py0000664000175000017500000002115400000000000020216 0ustar00zuulzuul00000000000000# Copyright (c) 2013 eBay Inc. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The QoS Specs Implementation""" from oslo_db import exception as db_exc from oslo_log import log as logging from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects from cinder import utils from cinder.volume import volume_types LOG = logging.getLogger(__name__) CONTROL_LOCATION = ['front-end', 'back-end', 'both'] def create(context, name, specs=None): """Creates qos_specs. :param specs: Dictionary that contains specifications for QoS Expected format of the input parameter: .. code-block:: python { 'consumer': 'front-end', 'total_iops_sec': 1000, 'total_bytes_sec': 1024000 } """ # Validate the key-value pairs in the qos spec. utils.validate_dictionary_string_length(specs) consumer = specs.get('consumer') if consumer: # If we need to modify specs, copy so we don't cause unintended # consequences for the caller specs = specs.copy() del specs['consumer'] values = dict(name=name, consumer=consumer, specs=specs) LOG.debug("Dict for qos_specs: %s", values) qos_spec = objects.QualityOfServiceSpecs(context, **values) qos_spec.create() return qos_spec def update(context, qos_specs_id, specs): """Update qos specs. :param specs: dictionary that contains key/value pairs for updating existing specs. e.g. {'consumer': 'front-end', 'total_iops_sec': 500, 'total_bytes_sec': 512000,} """ LOG.debug('qos_specs.update(): specs %s', specs) try: utils.validate_dictionary_string_length(specs) qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, qos_specs_id) if 'consumer' in specs: qos_spec.consumer = specs['consumer'] # If we need to modify specs, copy so we don't cause unintended # consequences for the caller specs = specs.copy() del specs['consumer'] # Update any values in specs dict qos_spec.specs.update(specs) qos_spec.save() except exception.InvalidInput as e: raise exception.InvalidQoSSpecs(reason=e) except db_exc.DBError: LOG.exception('DB error:') raise exception.QoSSpecsUpdateFailed(specs_id=qos_specs_id, qos_specs=specs) return qos_spec def delete(context, qos_specs_id, force=False): """Marks qos specs as deleted. 'force' parameter is a flag to determine whether should destroy should continue when there were entities associated with the qos specs. force=True indicates caller would like to mark qos specs as deleted even if there was entities associate with target qos specs. Trying to delete a qos specs still associated with entities will cause QoSSpecsInUse exception if force=False (default). """ if qos_specs_id is None: msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) qos_spec = objects.QualityOfServiceSpecs.get_by_id( context, qos_specs_id) qos_spec.destroy(force) def delete_keys(context, qos_specs_id, keys): """Marks specified key of target qos specs as deleted.""" if qos_specs_id is None: msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) qos_spec = objects.QualityOfServiceSpecs.get_by_id(context, qos_specs_id) # Previous behavior continued to delete keys until it hit first unset one, # so for now will mimic that. In the future it would be useful to have all # or nothing deletion of keys (or at least delete all set keys), # especially since order of keys from CLI to API is not preserved currently try: for key in keys: try: del qos_spec.specs[key] except KeyError: raise exception.QoSSpecsKeyNotFound( specs_key=key, specs_id=qos_specs_id) finally: qos_spec.save() def get_associations(context, qos_specs_id): """Get all associations of given qos specs.""" try: types = objects.VolumeTypeList.get_all_types_for_qos(context, qos_specs_id) except db_exc.DBError: LOG.exception('DB error:') msg = _('Failed to get all associations of ' 'qos specs %s') % qos_specs_id LOG.warning(msg) raise exception.CinderException(message=msg) result = [] for vol_type in types: result.append({ 'association_type': 'volume_type', 'name': vol_type.name, 'id': vol_type.id }) return result def associate_qos_with_type(context, specs_id, type_id): """Associate qos_specs with volume type. Associate target qos specs with specific volume type. :param specs_id: qos specs ID to associate with :param type_id: volume type ID to associate with :raises VolumeTypeNotFound: if volume type doesn't exist :raises QoSSpecsNotFound: if qos specs doesn't exist :raises InvalidVolumeType: if volume type is already associated with qos specs other than given one. :raises QoSSpecsAssociateFailed: if there was general DB error """ try: get_qos_specs(context, specs_id) res = volume_types.get_volume_type_qos_specs(type_id) if res.get('qos_specs', None): if res['qos_specs'].get('id') != specs_id: msg = (_("Type %(type_id)s is already associated with another " "qos specs: %(qos_specs_id)s") % {'type_id': type_id, 'qos_specs_id': res['qos_specs']['id']}) raise exception.InvalidVolumeType(reason=msg) else: db.qos_specs_associate(context, specs_id, type_id) except db_exc.DBError: LOG.exception('DB error:') LOG.warning('Failed to associate qos specs ' '%(id)s with type: %(vol_type_id)s', dict(id=specs_id, vol_type_id=type_id)) raise exception.QoSSpecsAssociateFailed(specs_id=specs_id, type_id=type_id) def disassociate_qos_specs(context, specs_id, type_id): """Disassociate qos_specs from volume type.""" try: get_qos_specs(context, specs_id) db.qos_specs_disassociate(context, specs_id, type_id) except db_exc.DBError: LOG.exception('DB error:') LOG.warning('Failed to disassociate qos specs ' '%(id)s with type: %(vol_type_id)s', dict(id=specs_id, vol_type_id=type_id)) raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, type_id=type_id) def disassociate_all(context, specs_id): """Disassociate qos_specs from all entities.""" try: get_qos_specs(context, specs_id) db.qos_specs_disassociate_all(context, specs_id) except db_exc.DBError: LOG.exception('DB error:') LOG.warning('Failed to disassociate qos specs %s.', specs_id) raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, type_id=None) def get_all_specs(context, filters=None, marker=None, limit=None, offset=None, sort_keys=None, sort_dirs=None): """Get all non-deleted qos specs.""" return objects.QualityOfServiceSpecsList.get_all( context, filters=filters, marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def get_qos_specs(ctxt, spec_id): """Retrieves single qos specs by id.""" if spec_id is None: msg = _("id cannot be None") raise exception.InvalidQoSSpecs(reason=msg) if ctxt is None: ctxt = context.get_admin_context() return objects.QualityOfServiceSpecs.get_by_id(ctxt, spec_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/rpcapi.py0000664000175000017500000006250700000000000017504 0ustar00zuulzuul00000000000000# Copyright 2012, Intel, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from typing import Optional, Union from cinder.common import constants from cinder import context from cinder import objects from cinder import quota from cinder import rpc from cinder.volume import volume_utils QUOTAS = quota.QUOTAS class VolumeAPI(rpc.RPCAPI): """Client side of the volume rpc API. API version history: .. code-block:: none 1.0 - Initial version. 1.1 - Adds clone volume option to create_volume. 1.2 - Add publish_service_capabilities() method. 1.3 - Pass all image metadata (not just ID) in copy_volume_to_image. 1.4 - Add request_spec, filter_properties and allow_reschedule arguments to create_volume(). 1.5 - Add accept_transfer. 1.6 - Add extend_volume. 1.7 - Adds host_name parameter to attach_volume() to allow attaching to host rather than instance. 1.8 - Add migrate_volume, rename_volume. 1.9 - Add new_user and new_project to accept_transfer. 1.10 - Add migrate_volume_completion, remove rename_volume. 1.11 - Adds mode parameter to attach_volume() to support volume read-only attaching. 1.12 - Adds retype. 1.13 - Adds create_export. 1.14 - Adds reservation parameter to extend_volume(). 1.15 - Adds manage_existing and unmanage_only flag to delete_volume. 1.16 - Removes create_export. 1.17 - Add replica option to create_volume, promote_replica and sync_replica. 1.18 - Adds create_consistencygroup, delete_consistencygroup, create_cgsnapshot, and delete_cgsnapshot. Also adds the consistencygroup_id parameter in create_volume. 1.19 - Adds update_migrated_volume 1.20 - Adds support for sending objects over RPC in create_snapshot() and delete_snapshot() 1.21 - Adds update_consistencygroup. 1.22 - Adds create_consistencygroup_from_src. 1.23 - Adds attachment_id to detach_volume. 1.24 - Removed duplicated parameters: snapshot_id, image_id, source_volid, source_replicaid, consistencygroup_id and cgsnapshot_id from create_volume. All off them are already passed either in request_spec or available in the DB. 1.25 - Add source_cg to create_consistencygroup_from_src. 1.26 - Adds support for sending objects over RPC in create_consistencygroup(), create_consistencygroup_from_src(), update_consistencygroup() and delete_consistencygroup(). 1.27 - Adds support for replication V2 1.28 - Adds manage_existing_snapshot 1.29 - Adds get_capabilities. 1.30 - Adds remove_export 1.31 - Updated: create_consistencygroup_from_src(), create_cgsnapshot() and delete_cgsnapshot() to cast method only with necessary args. Forwarding CGSnapshot object instead of CGSnapshot_id. 1.32 - Adds support for sending objects over RPC in create_volume(). 1.33 - Adds support for sending objects over RPC in delete_volume(). 1.34 - Adds support for sending objects over RPC in retype(). 1.35 - Adds support for sending objects over RPC in extend_volume(). 1.36 - Adds support for sending objects over RPC in migrate_volume(), migrate_volume_completion(), and update_migrated_volume(). 1.37 - Adds old_reservations parameter to retype to support quota checks in the API. 1.38 - Scaling backup service, add get_backup_device() and secure_file_operations_enabled() 1.39 - Update replication methods to reflect new backend rep strategy 1.40 - Add cascade option to delete_volume(). ... Mitaka supports messaging version 1.40. Any changes to existing methods in 1.x after that point should be done so that they can handle the version_cap being set to 1.40. 2.0 - Remove 1.x compatibility 2.1 - Add get_manageable_volumes() and get_manageable_snapshots(). 2.2 - Adds support for sending objects over RPC in manage_existing(). 2.3 - Adds support for sending objects over RPC in initialize_connection(). 2.4 - Sends request_spec as object in create_volume(). 2.5 - Adds create_group, delete_group, and update_group 2.6 - Adds create_group_snapshot, delete_group_snapshot, and create_group_from_src(). ... Newton supports messaging version 2.6. Any changes to existing methods in 2.x after that point should be done so that they can handle the version_cap being set to 2.6. 3.0 - Drop 2.x compatibility 3.1 - Remove promote_replica and reenable_replication. This is non-backward compatible, but the user-facing API was removed back in Mitaka when introducing cheesecake replication. 3.2 - Adds support for sending objects over RPC in get_backup_device(). 3.3 - Adds support for sending objects over RPC in attach_volume(). 3.4 - Adds support for sending objects over RPC in detach_volume(). 3.5 - Adds support for cluster in retype and migrate_volume 3.6 - Switch to use oslo.messaging topics to indicate backends instead of @backend suffixes in server names. 3.7 - Adds do_cleanup method to do volume cleanups from other nodes that we were doing in init_host. 3.8 - Make failover_host cluster aware and add failover_completed. 3.9 - Adds new attach/detach methods 3.10 - Returning objects instead of raw dictionaries in get_manageable_volumes & get_manageable_snapshots 3.11 - Removes create_consistencygroup, delete_consistencygroup, create_cgsnapshot, delete_cgsnapshot, update_consistencygroup, and create_consistencygroup_from_src. 3.12 - Adds set_log_levels and get_log_levels 3.13 - Add initialize_connection_snapshot, terminate_connection_snapshot, and remove_export_snapshot. 3.14 - Adds enable_replication, disable_replication, failover_replication, and list_replication_targets. 3.15 - Add revert_to_snapshot method 3.16 - Add no_snapshots to accept_transfer method 3.17 - Make get_backup_device a cast (async) 3.18 - Add reimage method 3.19 - Add extend_volume_completion method 3.20 - Add image_snap parameter to reimage method """ RPC_API_VERSION = '3.20' RPC_DEFAULT_VERSION = '3.0' TOPIC = constants.VOLUME_TOPIC BINARY = constants.VOLUME_BINARY def _get_cctxt(self, host: Optional[str] = None, version: Optional[Union[str, tuple[str, ...]]] = None, **kwargs) -> rpc.RPCAPI: if host: server = volume_utils.extract_host(host) # TODO(dulek): If we're pinned before 3.6, we should send stuff the # old way - addressing server=host@backend, topic=cinder-volume. # Otherwise we're addressing server=host, # topic=cinder-volume.host@backend. This conditional can go away # when we stop supporting 3.x. if self.client.can_send_version('3.6'): kwargs['topic'] = '%(topic)s.%(host)s' % {'topic': self.TOPIC, 'host': server} server = volume_utils.extract_host(server, 'host') kwargs['server'] = server return super(VolumeAPI, self)._get_cctxt(version=version, **kwargs) def create_volume(self, ctxt: context.RequestContext, volume: 'objects.Volume', request_spec: Optional[dict], filter_properties: Optional[dict], allow_reschedule: bool = True) -> None: cctxt = self._get_cctxt(volume.service_topic_queue) cctxt.cast(ctxt, 'create_volume', request_spec=request_spec, filter_properties=filter_properties, allow_reschedule=allow_reschedule, volume=volume) @rpc.assert_min_rpc_version('3.15') def revert_to_snapshot(self, ctxt, volume, snapshot): version = self._compat_ver('3.15') cctxt = self._get_cctxt(volume.service_topic_queue, version) cctxt.cast(ctxt, 'revert_to_snapshot', volume=volume, snapshot=snapshot) def delete_volume(self, ctxt: context.RequestContext, volume: 'objects.Volume', unmanage_only: bool = False, cascade: bool = False) -> None: volume.create_worker() cctxt = self._get_cctxt(volume.service_topic_queue) msg_args = { 'volume': volume, 'unmanage_only': unmanage_only, 'cascade': cascade, } cctxt.cast(ctxt, 'delete_volume', **msg_args) def create_snapshot(self, ctxt: context.RequestContext, volume: 'objects.Volume', snapshot: 'objects.Snapshot') -> None: snapshot.create_worker() cctxt = self._get_cctxt(volume.service_topic_queue) cctxt.cast(ctxt, 'create_snapshot', snapshot=snapshot) def delete_snapshot(self, ctxt, snapshot, unmanage_only=False): cctxt = self._get_cctxt(snapshot.service_topic_queue) cctxt.cast(ctxt, 'delete_snapshot', snapshot=snapshot, unmanage_only=unmanage_only) def attach_volume(self, ctxt, volume, instance_uuid, host_name, mountpoint, mode): msg_args = {'volume_id': volume.id, 'instance_uuid': instance_uuid, 'host_name': host_name, 'mountpoint': mountpoint, 'mode': mode, 'volume': volume} cctxt = self._get_cctxt(volume.service_topic_queue, ('3.3', '3.0')) if not cctxt.can_send_version('3.3'): msg_args.pop('volume') return cctxt.call(ctxt, 'attach_volume', **msg_args) def detach_volume(self, ctxt, volume, attachment_id): msg_args = {'volume_id': volume.id, 'attachment_id': attachment_id, 'volume': volume} cctxt = self._get_cctxt(volume.service_topic_queue, ('3.4', '3.0')) if not self.client.can_send_version('3.4'): msg_args.pop('volume') return cctxt.call(ctxt, 'detach_volume', **msg_args) def copy_volume_to_image(self, ctxt, volume, image_meta): cctxt = self._get_cctxt(volume.service_topic_queue) cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'], image_meta=image_meta) def initialize_connection(self, ctxt, volume, connector): cctxt = self._get_cctxt(volume.service_topic_queue) return cctxt.call(ctxt, 'initialize_connection', connector=connector, volume=volume) def terminate_connection(self, ctxt, volume, connector, force=False): cctxt = self._get_cctxt(volume.service_topic_queue) return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'], connector=connector, force=force) def remove_export(self, ctxt, volume, sync=False): cctxt = self._get_cctxt(volume.service_topic_queue) if sync: cctxt.call(ctxt, 'remove_export', volume_id=volume.id) else: cctxt.cast(ctxt, 'remove_export', volume_id=volume.id) def publish_service_capabilities(self, ctxt): cctxt = self._get_cctxt(fanout=True) cctxt.cast(ctxt, 'publish_service_capabilities') def accept_transfer(self, ctxt, volume, new_user, new_project, no_snapshots=False): msg_args = {'volume_id': volume['id'], 'new_user': new_user, 'new_project': new_project, 'no_snapshots': no_snapshots } cctxt = self._get_cctxt(volume.service_topic_queue, ('3.16', '3.0')) if not self.client.can_send_version('3.16'): msg_args.pop('no_snapshots') return cctxt.call(ctxt, 'accept_transfer', **msg_args) def extend_volume(self, ctxt, volume, new_size, reservations): cctxt = self._get_cctxt(volume.service_topic_queue) cctxt.cast(ctxt, 'extend_volume', volume=volume, new_size=new_size, reservations=reservations) @rpc.assert_min_rpc_version('3.19') def extend_volume_completion(self, ctxt, volume, new_size, reservations, error): cctxt = self._get_cctxt(volume.service_topic_queue, version='3.19') cctxt.cast(ctxt, 'extend_volume_completion', volume=volume, new_size=new_size, reservations=reservations, error=error) def migrate_volume(self, ctxt, volume, dest_backend, force_host_copy): backend_p = {'host': dest_backend.host, 'cluster_name': dest_backend.cluster_name, 'capabilities': dest_backend.capabilities} version = '3.5' if not self.client.can_send_version(version): version = '3.0' del backend_p['cluster_name'] cctxt = self._get_cctxt(volume.service_topic_queue, version) cctxt.cast(ctxt, 'migrate_volume', volume=volume, host=backend_p, force_host_copy=force_host_copy) def migrate_volume_completion(self, ctxt, volume, new_volume, error): cctxt = self._get_cctxt(volume.service_topic_queue) return cctxt.call(ctxt, 'migrate_volume_completion', volume=volume, new_volume=new_volume, error=error,) def retype(self, ctxt, volume, new_type_id, dest_backend, migration_policy='never', reservations=None, old_reservations=None): backend_p = {'host': dest_backend.host, 'cluster_name': dest_backend.cluster_name, 'capabilities': dest_backend.capabilities} version = '3.5' if not self.client.can_send_version(version): version = '3.0' del backend_p['cluster_name'] cctxt = self._get_cctxt(volume.service_topic_queue, version) cctxt.cast(ctxt, 'retype', volume=volume, new_type_id=new_type_id, host=backend_p, migration_policy=migration_policy, reservations=reservations, old_reservations=old_reservations) def manage_existing(self, ctxt, volume, ref): cctxt = self._get_cctxt(volume.service_topic_queue) cctxt.cast(ctxt, 'manage_existing', ref=ref, volume=volume) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): cctxt = self._get_cctxt(new_volume.service_topic_queue) cctxt.call(ctxt, 'update_migrated_volume', volume=volume, new_volume=new_volume, volume_status=original_volume_status) def freeze_host(self, ctxt, service): """Set backend host to frozen.""" cctxt = self._get_cctxt(service.service_topic_queue) return cctxt.call(ctxt, 'freeze_host') def thaw_host(self, ctxt, service): """Clear the frozen setting on a backend host.""" cctxt = self._get_cctxt(service.service_topic_queue) return cctxt.call(ctxt, 'thaw_host') def failover(self, ctxt, service, secondary_backend_id=None): """Failover host to the specified backend_id (secondary). """ version = '3.8' method = 'failover' if not self.client.can_send_version(version): version = '3.0' method = 'failover_host' cctxt = self._get_cctxt(service.service_topic_queue, version) cctxt.cast(ctxt, method, secondary_backend_id=secondary_backend_id) def failover_completed(self, ctxt, service, updates): """Complete failover on all services of the cluster.""" cctxt = self._get_cctxt(service.service_topic_queue, '3.8', fanout=True) cctxt.cast(ctxt, 'failover_completed', updates=updates) def manage_existing_snapshot(self, ctxt, snapshot, ref, backend): cctxt = self._get_cctxt(backend) cctxt.cast(ctxt, 'manage_existing_snapshot', snapshot=snapshot, ref=ref) def get_capabilities(self, ctxt, backend_id, discover): cctxt = self._get_cctxt(backend_id) return cctxt.call(ctxt, 'get_capabilities', discover=discover) def get_backup_device(self, ctxt, backup, volume): cctxt = self._get_cctxt(volume.service_topic_queue, ('3.17', '3.2', '3.0')) if cctxt.can_send_version('3.17'): cctxt.cast(ctxt, 'get_backup_device', backup=backup, want_objects=True, async_call=True) backup_obj = None elif cctxt.can_send_version('3.2'): backup_obj = cctxt.call(ctxt, 'get_backup_device', backup=backup, want_objects=True) else: backup_dict = cctxt.call(ctxt, 'get_backup_device', backup=backup) backup_obj = objects.BackupDeviceInfo.from_primitive(backup_dict, ctxt) return backup_obj def secure_file_operations_enabled(self, ctxt, volume): cctxt = self._get_cctxt(volume.service_topic_queue) return cctxt.call(ctxt, 'secure_file_operations_enabled', volume=volume) def get_manageable_volumes(self, ctxt, service, marker, limit, offset, sort_keys, sort_dirs): version = ('3.10', '3.0') cctxt = self._get_cctxt(service.service_topic_queue, version=version) msg_args = {'marker': marker, 'limit': limit, 'offset': offset, 'sort_keys': sort_keys, 'sort_dirs': sort_dirs, } if cctxt.can_send_version('3.10'): msg_args['want_objects'] = True return cctxt.call(ctxt, 'get_manageable_volumes', **msg_args) def get_manageable_snapshots(self, ctxt, service, marker, limit, offset, sort_keys, sort_dirs): version = ('3.10', '3.0') cctxt = self._get_cctxt(service.service_topic_queue, version=version) msg_args = {'marker': marker, 'limit': limit, 'offset': offset, 'sort_keys': sort_keys, 'sort_dirs': sort_dirs, } if cctxt.can_send_version('3.10'): msg_args['want_objects'] = True return cctxt.call(ctxt, 'get_manageable_snapshots', **msg_args) def create_group(self, ctxt: context.RequestContext, group: 'objects.Group') -> None: cctxt = self._get_cctxt(group.service_topic_queue) cctxt.cast(ctxt, 'create_group', group=group) def delete_group(self, ctxt, group): cctxt = self._get_cctxt(group.service_topic_queue) cctxt.cast(ctxt, 'delete_group', group=group) def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None): cctxt = self._get_cctxt(group.service_topic_queue) cctxt.cast(ctxt, 'update_group', group=group, add_volumes=add_volumes, remove_volumes=remove_volumes) def create_group_from_src(self, ctxt, group, group_snapshot=None, source_group=None): cctxt = self._get_cctxt(group.service_topic_queue) cctxt.cast(ctxt, 'create_group_from_src', group=group, group_snapshot=group_snapshot, source_group=source_group) def create_group_snapshot(self, ctxt, group_snapshot): cctxt = self._get_cctxt(group_snapshot.service_topic_queue) cctxt.cast(ctxt, 'create_group_snapshot', group_snapshot=group_snapshot) def delete_group_snapshot(self, ctxt, group_snapshot): cctxt = self._get_cctxt(group_snapshot.service_topic_queue) cctxt.cast(ctxt, 'delete_group_snapshot', group_snapshot=group_snapshot) @rpc.assert_min_rpc_version('3.13') def initialize_connection_snapshot(self, ctxt, snapshot, connector): cctxt = self._get_cctxt(snapshot.service_topic_queue, version='3.13') return cctxt.call(ctxt, 'initialize_connection_snapshot', snapshot_id=snapshot.id, connector=connector) @rpc.assert_min_rpc_version('3.13') def terminate_connection_snapshot(self, ctxt, snapshot, connector, force=False): cctxt = self._get_cctxt(snapshot.service_topic_queue, version='3.13') return cctxt.call(ctxt, 'terminate_connection_snapshot', snapshot_id=snapshot.id, connector=connector, force=force) @rpc.assert_min_rpc_version('3.13') def remove_export_snapshot(self, ctxt, snapshot, sync=False): cctxt = self._get_cctxt(snapshot.service_topic_queue, version='3.13') if sync: cctxt.call(ctxt, 'remove_export_snapshot', snapshot_id=snapshot.id) else: cctxt.cast(ctxt, 'remove_export_snapshot', snapshot_id=snapshot.id) @rpc.assert_min_rpc_version('3.9') def attachment_update(self, ctxt, vref, connector, attachment_id): version = self._compat_ver('3.9') cctxt = self._get_cctxt(vref.service_topic_queue, version=version) return cctxt.call(ctxt, 'attachment_update', vref=vref, connector=connector, attachment_id=attachment_id) @rpc.assert_min_rpc_version('3.9') def attachment_delete(self, ctxt, attachment_id, vref): version = self._compat_ver('3.9') cctxt = self._get_cctxt(vref.service_topic_queue, version=version) return cctxt.call(ctxt, 'attachment_delete', attachment_id=attachment_id, vref=vref) @rpc.assert_min_rpc_version('3.7') def do_cleanup(self, ctxt, cleanup_request): """Perform this service/cluster resource cleanup as requested.""" destination = cleanup_request.service_topic_queue cctxt = self._get_cctxt(destination, '3.7') # NOTE(geguileo): This call goes to do_cleanup code in # cinder.manager.CleanableManager unless in the future we overwrite it # in cinder.volume.manager cctxt.cast(ctxt, 'do_cleanup', cleanup_request=cleanup_request) @rpc.assert_min_rpc_version('3.12') def set_log_levels(self, context, service, log_request): cctxt = self._get_cctxt(host=service.host, version='3.12') cctxt.cast(context, 'set_log_levels', log_request=log_request) @rpc.assert_min_rpc_version('3.12') def get_log_levels(self, context, service, log_request): cctxt = self._get_cctxt(host=service.host, version='3.12') return cctxt.call(context, 'get_log_levels', log_request=log_request) @rpc.assert_min_rpc_version('3.14') def enable_replication(self, ctxt, group): cctxt = self._get_cctxt(group.service_topic_queue, version='3.14') cctxt.cast(ctxt, 'enable_replication', group=group) @rpc.assert_min_rpc_version('3.14') def disable_replication(self, ctxt, group): cctxt = self._get_cctxt(group.service_topic_queue, version='3.14') cctxt.cast(ctxt, 'disable_replication', group=group) @rpc.assert_min_rpc_version('3.14') def failover_replication(self, ctxt, group, allow_attached_volume=False, secondary_backend_id=None): cctxt = self._get_cctxt(group.service_topic_queue, version='3.14') cctxt.cast(ctxt, 'failover_replication', group=group, allow_attached_volume=allow_attached_volume, secondary_backend_id=secondary_backend_id) @rpc.assert_min_rpc_version('3.14') def list_replication_targets(self, ctxt, group): cctxt = self._get_cctxt(group.service_topic_queue, version='3.14') return cctxt.call(ctxt, 'list_replication_targets', group=group) @rpc.assert_min_rpc_version('3.18') def reimage(self, ctxt, volume, image_meta, image_snap=None): cctxt = self._get_cctxt( volume.service_topic_queue, version=('3.20', '3.18')) if cctxt.can_send_version('3.20'): cctxt.cast(ctxt, 'reimage', volume=volume, image_meta=image_meta, image_snap=image_snap) else: cctxt.cast(ctxt, 'reimage', volume=volume, image_meta=image_meta) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4151213 cinder-27.0.0/cinder/volume/targets/0000775000175000017500000000000000000000000017313 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/__init__.py0000664000175000017500000000000000000000000021412 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/cxt.py0000664000175000017500000002305100000000000020464 0ustar00zuulzuul00000000000000# Copyright 2015 Chelsio Communications Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import fileutils from oslo_utils import netutils from cinder import exception from cinder import utils from cinder.volume.targets import iscsi LOG = logging.getLogger(__name__) class CxtAdm(iscsi.ISCSITarget): """Chiscsi target configuration for block storage devices. This includes things like create targets, attach, detach etc. """ TARGET_FMT = """ target: TargetName=%s TargetDevice=%s PortalGroup=1@%s """ TARGET_FMT_WITH_CHAP = """ target: TargetName=%s TargetDevice=%s PortalGroup=1@%s AuthMethod=CHAP Auth_CHAP_Policy=Oneway Auth_CHAP_Initiator=%s """ cxt_subdir = 'cxt' def __init__(self, *args, **kwargs): super(CxtAdm, self).__init__(*args, **kwargs) self.volumes_dir = self.configuration.safe_get('volumes_dir') self.volumes_dir = os.path.join(self.volumes_dir, self.cxt_subdir) self.config = self.configuration.safe_get('chiscsi_conf') def _get_volumes_dir(self): return self.volumes_dir def _get_target(self, iqn): # We can use target=iqn here, but iscsictl has no --brief mode, and # this way we save on a lot of unnecessary parsing (out, err) = utils.execute('iscsictl', '-c', 'target=ALL', run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line: parsed = line.split() tid = parsed[2] return tid[3:].rstrip(',') return None def _get_iscsi_target(self, context, vol_id): return 0 def _get_target_and_lun(self, context, volume): lun = 0 # For chiscsi dev starts at lun 0 iscsi_target = 1 return iscsi_target, lun @staticmethod def _get_portal(ip, port=None): # ipv6 addresses use [ip]:port format, ipv4 use ip:port portal_port = ':%d' % port if port else '' if netutils.is_valid_ipv4(ip): portal_ip = ip else: portal_ip = '[' + ip + ']' return portal_ip + portal_port def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): (out, err) = utils.execute('iscsictl', '-c', 'target=ALL', run_as_root=True) LOG.debug("Targets prior to update: %s", out) volumes_dir = self._get_volumes_dir() fileutils.ensure_tree(volumes_dir) vol_id = name.split(':')[1] cfg_port = kwargs.get('portals_port') cfg_ips = kwargs.get('portals_ips') portals = ','.join(map(lambda ip: self._get_portal(ip, cfg_port), cfg_ips)) if chap_auth is None: volume_conf = self.TARGET_FMT % (name, path, portals) else: volume_conf = self.TARGET_FMT_WITH_CHAP % (name, path, portals, '"%s":"%s"' % chap_auth) LOG.debug('Creating iscsi_target for: %s', vol_id) volume_path = os.path.join(volumes_dir, vol_id) if os.path.exists(volume_path): LOG.warning('Persistence file already exists for volume, ' 'found file at: %s', volume_path) utils.robust_file_write(volumes_dir, vol_id, volume_conf) LOG.debug('Created volume path %(vp)s,\n' 'content: %(vc)s', {'vp': volume_path, 'vc': volume_conf}) old_persist_file = None old_name = kwargs.get('old_name', None) if old_name: LOG.debug('Detected old persistence file for volume ' '%(vol)s at %(old_name)s', {'vol': vol_id, 'old_name': old_name}) old_persist_file = os.path.join(volumes_dir, old_name) try: # With the persistent tgts we create them # by creating the entry in the persist file # and then doing an update to get the target # created. (out, err) = utils.execute('iscsictl', '-S', 'target=%s' % name, '-f', volume_path, '-x', self.config, run_as_root=True) except putils.ProcessExecutionError as e: LOG.error("Failed to create iscsi target for volume " "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) # Don't forget to remove the persistent file we created os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) finally: LOG.debug("StdOut from iscsictl -S: %s", out) LOG.debug("StdErr from iscsictl -S: %s", err) # Grab targets list for debug (out, err) = utils.execute('iscsictl', '-c', 'target=ALL', run_as_root=True) LOG.debug("Targets after update: %s", out) iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: LOG.error("Failed to create iscsi target for volume " "id:%(vol_id)s. Please verify your configuration " "in %(volumes_dir)s'", {'vol_id': vol_id, 'volumes_dir': volumes_dir, }) raise exception.NotFound() if old_persist_file is not None and os.path.exists(old_persist_file): os.unlink(old_persist_file) return tid def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info('Removing iscsi_target for: %s', vol_id) vol_uuid_file = vol_name volume_path = os.path.join(self._get_volumes_dir(), vol_uuid_file) if not os.path.exists(volume_path): LOG.warning('Volume path %s does not exist, ' 'nothing to remove.', volume_path) return if os.path.isfile(volume_path): iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file) else: raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) target_exists = False try: (out, err) = utils.execute('iscsictl', '-c', 'target=%s' % iqn, run_as_root=True) LOG.debug("StdOut from iscsictl -c: %s", out) LOG.debug("StdErr from iscsictl -c: %s", err) except putils.ProcessExecutionError as e: if "NOT found" in e.stdout: LOG.info("No iscsi target present for volume " "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) return else: raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) else: target_exists = True try: utils.execute('iscsictl', '-s', 'target=%s' % iqn, run_as_root=True) except putils.ProcessExecutionError as e: # There exists a race condition where multiple calls to # remove_iscsi_target come in simultaneously. If we can poll # for a target successfully but it is gone before we can remove # it, fail silently if "is not found" in e.stderr and target_exists: LOG.info("No iscsi target present for volume " "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) return else: LOG.error("Failed to remove iscsi target for volume " "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # Carried over from tgt # NOTE(jdg): This *should* be there still but incase # it's not we don't care, so just ignore it if was # somehow deleted between entry of this method # and here if os.path.exists(volume_path): os.unlink(volume_path) else: LOG.debug('Volume path %s not found at end, ' 'of remove_iscsi_target.', volume_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/driver.py0000664000175000017500000000612600000000000021165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_config import cfg from cinder import db CONF = cfg.CONF class Target(object, metaclass=abc.ABCMeta): """Target object for block storage devices. Base class for target object, where target is data transport mechanism (target) specific calls. This includes things like create targets, attach, detach etc. Base class here does nothing more than set an executor and db as well as force implementation of required methods. """ storage_protocol = None SHARED_TARGET_SUPPORT = False SECONDARY_IP_SUPPORT = True def __init__(self, *args, **kwargs): # TODO(stephenfin): Drop this in favour of using 'db' directly self.db = db self.configuration = kwargs.get('configuration') self._root_helper = kwargs.get('root_helper', 'sudo cinder-rootwrap %s' % CONF.rootwrap_config) @abc.abstractmethod def ensure_export(self, context, volume, volume_path): """Synchronously recreates an export for a volume.""" pass @abc.abstractmethod def create_export(self, context, volume, volume_path): """Exports a Target/Volume. Can optionally return a Dict of changes to the volume object to be persisted. """ pass @abc.abstractmethod def remove_export(self, context, volume): """Removes an export for a Target/Volume.""" pass @abc.abstractmethod def initialize_connection(self, volume, connector): """Allow connection to connector and return connection info.""" pass @abc.abstractmethod def terminate_connection(self, volume, connector, **kwargs): """Disallow connection from connector.""" pass @staticmethod def are_same_connector(A, B): """Whether 2 connectors belong to the same host or not. This is used for multi attach volumes, to be able to know when there are no more attachments on a given host. This is the generic implementation, but specific targets may overwrite it. For example iSCSI would check the the "initiator" key instead, and NVMe-oF would check the "nqn" key. """ a_host = A.get('host') return a_host and (a_host == B.get('host')) def extend_target(self, volume): """Reinitializes a target after the volume has been extended. Most drivers don't need to do anything, but in other cases this may cause IO disruption. """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/fake.py0000664000175000017500000000205000000000000020570 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder.volume.targets import iscsi class FakeTarget(iscsi.ISCSITarget): VERSION = '0.1' def _get_target_and_lun(self, context, volume): return (0, 0) def create_iscsi_target(self, name, tid, lun, path, chap_auth, **kwargs): pass def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): pass def _get_iscsi_target(self, context, vol_id): pass def _get_target(self, iqn): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/iscsi.py0000664000175000017500000003575700000000000021020 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_concurrency import processutils from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder import utils from cinder.volume.targets import driver from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class ISCSITarget(driver.Target): """Target object for block storage devices. Base class for target object, where target is data transport mechanism (target) specific calls. This includes things like create targets, attach, detach etc. """ def __init__(self, *args, **kwargs): super(ISCSITarget, self).__init__(*args, **kwargs) self.iscsi_target_prefix = self.configuration.safe_get('target_prefix') self.iscsi_protocol = self.configuration.safe_get('target_protocol') self.protocol = constants.ISCSI self.volumes_dir = self.configuration.safe_get('volumes_dir') def _get_iscsi_properties(self, volume, multipath=False): """Gets iscsi configuration We ideally get saved information in the volume entity, but fall back to discovery if need be. Discovery may be completely removed in the future. The properties are: :target_discovered: boolean indicating whether discovery was used :target_iqn: the IQN of the iSCSI target :target_portal: the portal of the iSCSI target :target_lun: the lun of the iSCSI target :volume_id: the uuid of the volume :auth_method:, :auth_username:, :auth_password: the authentication details. Right now, either auth_method is not present meaning no authentication, or auth_method == `CHAP` meaning use CHAP with the specified credentials. :discard: boolean indicating if discard is supported In some of drivers that support multiple connections (for multipath and for single path with failover on connection failure), it returns :target_iqns, :target_portals, :target_luns, which contain lists of multiple values. The main portal information is also returned in :target_iqn, :target_portal, :target_lun for backward compatibility. Note that some of drivers don't return :target_portals even if they support multipath. Then the connector should use sendtargets discovery to find the other portals if it supports multipath. """ properties = {} location = volume['provider_location'] if location: # provider_location is the same format as iSCSI discovery output properties['target_discovered'] = False else: location = self._do_iscsi_discovery(volume) if not location: msg = (_("Could not find iSCSI export for volume %s") % (volume['name'])) raise exception.InvalidVolume(reason=msg) LOG.debug("ISCSI Discovery: Found %s", location) properties['target_discovered'] = True results = location.split(" ") portals = results[0].split(",")[0].split(";") iqn = results[1] nr_portals = len(portals) try: lun = int(results[2]) except (IndexError, ValueError): lun = 0 if nr_portals > 1 or multipath: properties['target_portals'] = portals properties['target_iqns'] = [iqn] * nr_portals properties['target_luns'] = [lun] * nr_portals properties['target_portal'] = portals[0] properties['target_iqn'] = iqn properties['target_lun'] = lun properties['volume_id'] = volume['id'] auth = volume['provider_auth'] if auth: (auth_method, auth_username, auth_secret) = auth.split() properties['auth_method'] = auth_method properties['auth_username'] = auth_username properties['auth_password'] = auth_secret geometry = volume.get('provider_geometry', None) if geometry: (physical_block_size, logical_block_size) = geometry.split() properties['physical_block_size'] = physical_block_size properties['logical_block_size'] = logical_block_size encryption_key_id = volume.get('encryption_key_id', None) properties['encrypted'] = encryption_key_id is not None return properties def _iscsi_authentication(self, chap, name, password): return "%s %s %s" % (chap, name, password) def _do_iscsi_discovery(self, volume): # TODO(justinsb): Deprecate discovery and use stored info # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) LOG.warning("ISCSI provider_location not stored, using discovery") volume_id = volume['id'] try: # NOTE(griff) We're doing the split straight away which should be # safe since using '@' in hostname is considered invalid (out, _err) = utils.execute('iscsiadm', '-m', 'discovery', '-t', 'sendtargets', '-p', volume['host'].split('@')[0], run_as_root=True) except processutils.ProcessExecutionError as ex: LOG.error("ISCSI discovery attempt failed for: %s", volume['host'].split('@')[0]) LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr) return None for target in out.splitlines(): if (self.configuration.safe_get('target_ip_address') in target and volume_id in target): return target return None def _get_portals_config(self): # Prepare portals configuration portals_ips = ([self.configuration.target_ip_address] + self.configuration.target_secondary_ip_addresses or []) return {'portals_ips': portals_ips, 'portals_port': self.configuration.target_port} def create_export(self, context, volume, volume_path): """Creates an export for a logical volume.""" # 'iscsi_name': 'iqn.2010-10.org.openstack:volume-00000001' iscsi_name = "%s%s" % (self.configuration.target_prefix, volume['name']) iscsi_target, lun = self._get_target_and_lun(context, volume) # Verify we haven't setup a CHAP creds file already # if DNE no big deal, we'll just create it chap_auth = self._get_target_chap_auth(context, volume) if not chap_auth: chap_auth = (volume_utils.generate_username(), volume_utils.generate_password()) # Get portals ips and port portals_config = self._get_portals_config() # NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need # should clean this all up at some point in the future tid = self.create_iscsi_target(iscsi_name, iscsi_target, lun, volume_path, chap_auth, **portals_config) data = {} data['location'] = self._iscsi_location( self.configuration.target_ip_address, tid, iscsi_name, lun, self.configuration.target_secondary_ip_addresses) LOG.debug('Set provider_location to: %s', data['location']) data['auth'] = self._iscsi_authentication( 'CHAP', *chap_auth) return data def remove_export(self, context, volume): try: iscsi_target, lun = self._get_target_and_lun(context, volume) except exception.NotFound: LOG.info("Skipping remove_export. No iscsi_target " "provisioned for volume: %s", volume['id']) return try: # NOTE: provider_location may be unset if the volume hasn't # been exported location = volume['provider_location'].split(' ') iqn = location[1] # ietadm show will exit with an error # this export has already been removed self.show_target(iscsi_target, iqn=iqn) except Exception: LOG.info("Skipping remove_export. No iscsi_target " "is presently exported for volume: %s", volume['id']) return # NOTE: For TgtAdm case volume['id'] is the ONLY param we need self.remove_iscsi_target(iscsi_target, lun, volume['id'], volume['name']) def ensure_export(self, context, volume, volume_path): """Recreates an export for a logical volume.""" iscsi_name = "%s%s" % (self.configuration.target_prefix, volume['name']) chap_auth = self._get_target_chap_auth(context, volume) # Get portals ips and port portals_config = self._get_portals_config() iscsi_target, lun = self._get_target_and_lun(context, volume) self.create_iscsi_target( iscsi_name, iscsi_target, lun, volume_path, chap_auth, check_exit_code=False, old_name=None, **portals_config) def initialize_connection(self, volume, connector): """Initializes the connection and returns connection info. The iscsi driver returns a driver_volume_type of 'iscsi'. The format of the driver data is defined in _get_iscsi_properties. Example return value:: { 'driver_volume_type': 'iscsi' 'data': { 'target_discovered': True, 'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001', 'target_portal': '127.0.0.0.1:3260', 'volume_id': '9a0d35d0-175a-11e4-8c21-0800200c9a66', 'discard': False, } } """ iscsi_properties = self._get_iscsi_properties(volume, connector.get( 'multipath')) return { 'driver_volume_type': self.iscsi_protocol, 'data': iscsi_properties } def terminate_connection(self, volume, connector, **kwargs): pass def validate_connector(self, connector): # NOTE(jdg): api passes in connector which is initiator info if 'initiator' not in connector: err_msg = ('The volume driver requires the iSCSI initiator ' 'name in the connector.') LOG.error(err_msg) raise exception.InvalidConnectorException(missing='initiator') return True def _iscsi_location(self, ip, target, iqn, lun=None, ip_secondary=None): ip_secondary = ip_secondary or [] port = self.configuration.target_port portals = map(lambda x: "%s:%s" % (volume_utils.sanitize_host(x), port), [ip] + ip_secondary) return ("%(portals)s,%(target)s %(iqn)s %(lun)s" % ({'portals': ";".join(portals), 'target': target, 'iqn': iqn, 'lun': lun})) def show_target(self, iscsi_target, iqn, **kwargs): if iqn is None: raise exception.InvalidParameterValue( err=_('valid iqn needed for show_target')) tid = self._get_target(iqn) if tid is None: raise exception.NotFound() def _get_target_chap_auth(self, context, volume): """Get the current chap auth username and password.""" try: # Query DB to get latest state of volume volume_info = self.db.volume_get(context, volume['id']) # 'provider_auth': 'CHAP user_id password' if volume_info['provider_auth']: return tuple(volume_info['provider_auth'].split(' ', 3)[1:]) except exception.NotFound: LOG.debug('Failed to get CHAP auth from DB for %s.', volume['id']) def extend_target(self, volume): """Reinitializes a target after the LV has been extended. Note: This will cause IO disruption in most cases. """ iscsi_name = "%s%s" % (self.configuration.target_prefix, volume['name']) if volume.volume_attachment: self._do_tgt_update(iscsi_name, force=True) @abc.abstractmethod def _get_target_and_lun(self, context, volume): """Get iscsi target and lun.""" pass @abc.abstractmethod def create_iscsi_target(self, name, tid, lun, path, chap_auth, **kwargs): pass @abc.abstractmethod def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): pass @abc.abstractmethod def _get_iscsi_target(self, context, vol_id): pass @abc.abstractmethod def _get_target(self, iqn): pass def _do_tgt_update(self, name, force=False): pass @staticmethod def are_same_connector(A, B): a_initiator = A.get('initiator') return a_initiator and (a_initiator == B.get('initiator')) class SanISCSITarget(ISCSITarget): """iSCSI target for san devices. San devices are slightly different, they don't need to implement all of the same things that we need to implement locally fro LVM and local block devices when we create and manage our own targets. """ @abc.abstractmethod def create_export(self, context, volume, volume_path): pass @abc.abstractmethod def remove_export(self, context, volume): pass @abc.abstractmethod def ensure_export(self, context, volume, volume_path): pass @abc.abstractmethod def terminate_connection(self, volume, connector, **kwargs): pass # NOTE(jdg): Items needed for local iSCSI target drivers, # but NOT sans Stub them out here to make abc happy # Use care when looking at these to make sure something # that's inheritted isn't dependent on one of # these. def _get_target_and_lun(self, context, volume): pass def _get_target_chap_auth(self, context, volume): pass def create_iscsi_target(self, name, tid, lun, path, chap_auth, **kwargs): pass def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): pass def _get_iscsi_target(self, context, vol_id): pass def _get_target(self, iqn): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/lio.py0000664000175000017500000001750700000000000020462 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils as putils from oslo_log import log as logging from cinder import exception from cinder import utils from cinder.volume.targets import iscsi LOG = logging.getLogger(__name__) class LioAdm(iscsi.ISCSITarget): """iSCSI target administration for LIO using python-rtslib.""" def __init__(self, *args, **kwargs): super(LioAdm, self).__init__(*args, **kwargs) # FIXME(jdg): modify executor to use the cinder-rtstool self.iscsi_target_prefix = self.configuration.safe_get('target_prefix') self._verify_rtstool() def _verify_rtstool(self): try: # This call doesn't need locking utils.execute('cinder-rtstool', 'verify') except (OSError, putils.ProcessExecutionError): LOG.error('cinder-rtstool is not installed correctly') raise @staticmethod @utils.synchronized('lioadm', external=True) def _execute(*args, **kwargs): """Locked execution to prevent racing issues. Racing issues are derived from a bug in RTSLib: https://github.com/agrover/rtslib-fb/issues/36 """ return utils.execute(*args, **kwargs) def _get_target(self, iqn): (out, err) = self._execute('cinder-rtstool', 'get-targets', run_as_root=True) lines = out.split('\n') for line in lines: if iqn in line: return line return None def _get_targets(self): (out, err) = self._execute('cinder-rtstool', 'get-targets', run_as_root=True) return out def _get_iscsi_target(self, context, vol_id): return 0 def _get_target_and_lun(self, context, volume): lun = 0 # For lio, the lun starts at lun 0. iscsi_target = 0 # NOTE: Not used by lio. return iscsi_target, lun def _persist_configuration(self, vol_id): try: self._execute('cinder-rtstool', 'save', run_as_root=True) # On persistence failure we don't raise an exception, as target has # been successfully created. except putils.ProcessExecutionError: LOG.warning("Failed to save iscsi LIO configuration when " "modifying volume id: %(vol_id)s.", {'vol_id': vol_id}) def _restore_configuration(self): try: self._execute('cinder-rtstool', 'restore', run_as_root=True) # On persistence failure we don't raise an exception, as target has # been successfully created. except putils.ProcessExecutionError: LOG.warning("Failed to restore iscsi LIO configuration.") def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): # tid and lun are not used vol_id = name.split(':')[1] LOG.info('Creating iscsi_target for volume: %s', vol_id) chap_auth_userid = "" chap_auth_password = "" if chap_auth is not None: (chap_auth_userid, chap_auth_password) = chap_auth optional_args = [] if 'portals_port' in kwargs: optional_args.append('-p%s' % kwargs['portals_port']) if 'portals_ips' in kwargs: optional_args.append('-a' + ','.join(kwargs['portals_ips'])) try: command_args = ['cinder-rtstool', 'create', path, name, chap_auth_userid, chap_auth_password, self.iscsi_protocol == 'iser'] + optional_args self._execute(*command_args, run_as_root=True) except putils.ProcessExecutionError: LOG.exception("Failed to create iscsi target for volume " "id:%s.", vol_id) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: LOG.error("Failed to create iscsi target for volume id:%s.", vol_id) raise exception.NotFound() # We make changes persistent self._persist_configuration(vol_id) return tid def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info('Removing iscsi_target: %s', vol_id) vol_uuid_name = vol_name iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_name) try: self._execute('cinder-rtstool', 'delete', iqn, run_as_root=True) except putils.ProcessExecutionError: LOG.exception("Failed to remove iscsi target for volume id:%s.", vol_id) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # We make changes persistent self._persist_configuration(vol_id) def initialize_connection(self, volume, connector): volume_iqn = volume['provider_location'].split(' ')[1] (auth_method, auth_user, auth_pass) = \ volume['provider_auth'].split(' ', 3) # Add initiator iqns to target ACL try: self._execute('cinder-rtstool', 'add-initiator', volume_iqn, auth_user, auth_pass, connector['initiator'], run_as_root=True) except putils.ProcessExecutionError: LOG.exception("Failed to add initiator iqn %s to target", connector['initiator']) raise exception.ISCSITargetAttachFailed( volume_id=volume['id']) # We make changes persistent self._persist_configuration(volume['id']) return super(LioAdm, self).initialize_connection(volume, connector) def terminate_connection(self, volume, connector, **kwargs): if volume['provider_location'] is None: LOG.debug('No provider_location for volume %s.', volume['id']) return volume_iqn = volume['provider_location'].split(' ')[1] # Delete initiator iqns from target ACL try: self._execute('cinder-rtstool', 'delete-initiator', volume_iqn, connector['initiator'], run_as_root=True) except putils.ProcessExecutionError: LOG.exception( "Failed to delete initiator iqn %s from target.", connector['initiator']) raise exception.ISCSITargetDetachFailed(volume_id=volume['id']) # We make changes persistent self._persist_configuration(volume['id']) def ensure_export(self, context, volume, volume_path): """Recreate exports for logical volumes.""" # Restore saved configuration file if no target exists. if not self._get_targets(): LOG.info('Restoring iSCSI target from configuration file') self._restore_configuration() return LOG.info("Skipping ensure_export. Found existing iSCSI target.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/nvmeof.py0000664000175000017500000002156400000000000021167 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from cinder.common import constants from cinder import exception from cinder.i18n import _ from cinder.volume.targets import driver LOG = logging.getLogger(__name__) class UnsupportedNVMETProtocol(exception.Invalid): message = _("An invalid 'target_protocol' " "value was provided: %(protocol)s") class NVMeOF(driver.Target): """Target object for block storage devices with RDMA transport.""" protocol = constants.NVMEOF_VARIANT_2 target_protocol_map = { 'nvmet_rdma': 'rdma', 'nvmet_tcp': 'tcp', } def __init__(self, *args, **kwargs): """Reads NVMeOF configurations.""" super(NVMeOF, self).__init__(*args, **kwargs) self.target_ips = ([self.configuration.target_ip_address] + self.configuration.target_secondary_ip_addresses) self.target_port = self.configuration.target_port self.nvmet_port_id = self.configuration.nvmet_port_id self.nvmet_ns_id = self.configuration.nvmet_ns_id self.nvmet_subsystem_name = self.configuration.target_prefix # Compatibility with non lvm drivers self.share_targets = getattr(self.configuration, 'lvm_share_target', False) target_protocol = self.configuration.target_protocol if target_protocol in self.target_protocol_map: self.nvme_transport_type = self.target_protocol_map[ target_protocol] else: raise UnsupportedNVMETProtocol( protocol=target_protocol ) # Secondary ip addresses only work with new connection info if (self.configuration.target_secondary_ip_addresses and self.configuration.nvmeof_conn_info_version == 1): raise exception.InvalidConfigurationValue( 'Secondary addresses need to use NVMe-oF connection properties' ' format version 2 or greater (nvmeof_conn_info_version).') def initialize_connection(self, volume, connector): """Returns the connection info. In NVMeOF driver, :driver_volume_type: is set to 'nvmeof', :data: is the driver data that has the value of _get_connection_properties_from_vol. Example return value: .. code-block:: json { "driver_volume_type": "nvmeof", "data": { "target_portal": "1.1.1.1", "target_port": 4420, "nqn": "nqn.volume-0001", "transport_type": "rdma", "ns_id": 10 } } """ return { 'driver_volume_type': self.protocol, 'data': self._get_connection_properties_from_vol(volume) } def _get_connection_properties_from_vol(self, volume): """Gets NVMeOF connection configuration. Returns the connection info based on the volume's provider_location and the _get_nvme_uuid method for the volume. For the specific data returned check the _get_connection_properties method. :return: dictionary with the connection properties using one of the 2 existing formats depending on the nvmeof_conn_info_version configuration option. """ location = volume['provider_location'] target_connection, nvme_transport_type, nqn, nvmet_ns_id = ( location.split(' ')) target_portals, target_port = target_connection.split(':') target_portals = target_portals.split(',') uuid = self._get_nvme_uuid(volume) return self._get_connection_properties(nqn, target_portals, target_port, nvme_transport_type, nvmet_ns_id, uuid) def _get_connection_properties(self, nqn, portals, port, transport, ns_id, uuid): """Get connection properties dictionary. For nvmeof_conn_info_version set to 1 (default) the old format will be sent: { 'target_portal': NVMe target IP address 'target_port': NVMe target port 'nqn': NQN of the NVMe target 'transport_type': Network fabric being used for an NVMe-oF network One of: tcp, rdma 'ns_id': namespace id associated with the subsystem } For nvmeof_conn_info_version set to 2 the new format will be sent: { 'target_nqn': NQN of the NVMe target 'vol_uuid': NVMe-oF UUID of the volume. May be different than Cinder volume id and may be None if ns_id is provided. 'portals': [(target_address, target_port, transport_type) ... ] 'ns_id': namespace id associated with the subsystem, in case target doesn't provide the volume_uuid. } Unlike the old format the transport_type can be one of RoCEv2 and tcp :return: dictionary with the connection properties using one of the 2 existing formats depending on the nvmeof_conn_info_version configuration option. """ # NVMe-oF Connection Information Version 2 if self.configuration.nvmeof_conn_info_version == 2: if transport == 'rdma': transport = 'RoCEv2' return { 'target_nqn': nqn, 'vol_uuid': uuid, 'portals': [(portal, port, transport) for portal in portals], 'ns_id': ns_id, } # NVMe-oF Connection Information Version 1 result = { 'target_portal': portals[0], 'target_port': port, 'nqn': nqn, 'transport_type': transport, 'ns_id': ns_id, } return result def _get_nvme_uuid(self, volume): """Return the NVMe uuid of a given volume. Targets that want to support the nvmeof_conn_info_version=2 option need to override this method and return the NVMe uuid of the given volume. """ return None def get_nvmeof_location(self, nqn, target_ips, target_port, nvme_transport_type, nvmet_ns_id): """Serializes driver data into single line string.""" return "%(ip)s:%(port)s %(transport)s %(nqn)s %(ns_id)s" % ( {'ip': ','.join(target_ips), 'port': target_port, 'transport': nvme_transport_type, 'nqn': nqn, 'ns_id': nvmet_ns_id}) def terminate_connection(self, volume, connector, **kwargs): pass @staticmethod def are_same_connector(A, B): a_nqn = A.get('nqn') return a_nqn and (a_nqn == B.get('nqn')) def create_export(self, context, volume, volume_path): """Creates export data for a logical volume.""" return self.create_nvmeof_target( volume['id'], self.configuration.target_prefix, self.target_ips, self.target_port, self.nvme_transport_type, self.nvmet_port_id, self.nvmet_ns_id, volume_path) def ensure_export(self, context, volume, volume_path): pass def remove_export(self, context, volume): return self.delete_nvmeof_target(volume) def validate_connector(self, connector): required = 'nqn' if required not in connector: LOG.error('Required information %(required)s not found in ' 'connector data.', {"required": required}) raise exception.InvalidConnectorException( missing=required) return True def create_nvmeof_target(self, volume_id, subsystem_name, target_ips, target_port, transport_type, nvmet_port_id, ns_id, volume_path): """Targets that don't override create_export must implement this.""" pass def delete_nvmeof_target(self, target_name): """Targets that don't override remove_export must implement this.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/nvmet.py0000664000175000017500000003056000000000000021022 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_log import log as logging from oslo_utils import uuidutils from cinder import exception from cinder.privsep.targets import nvmet from cinder import utils from cinder.volume.targets import nvmeof LOG = logging.getLogger(__name__) class NVMETTargetAddError(exception.CinderException): message = "Failed to add subsystem: %(subsystem)s" class NVMETTargetDeleteError(exception.CinderException): message = "Failed to delete subsystem: %(subsystem)s" class NVMET(nvmeof.NVMeOF): SHARED_TARGET_SUPPORT = True def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._nvmet_root = nvmet.Root() # ####### Connection initiation methods ######## def initialize_connection(self, volume, connector): """Create an export & map if shared.""" # Non-shared connections was the original implementation where all the # export & mapping was done on export and the connection info was # stored in the volume, so let the original implementation handle it. if not self.share_targets: return super().initialize_connection(volume, connector) # For the shared case the export only stores the path of the volume volume_path = volume.provider_location if not os.path.exists(volume_path): raise exception.InvalidConfigurationValue( 'Target driver configured with shared targets, but volume ' 'exported as non shared.') nqn, ns_id = self._map_volume(volume, volume_path, connector) uuid = self._get_nvme_uuid(volume) return { 'driver_volume_type': self.protocol, 'data': self._get_connection_properties(nqn, self.target_ips, self.target_port, self.nvme_transport_type, ns_id, uuid), } def create_export(self, context, volume, volume_path): """Create an export & map if not shared.""" # For shared targets everything gets done on initialize_connection if self.share_targets: location = volume_path else: nqn, ns_id = self._map_volume(volume, volume_path) location = self.get_nvmeof_location(nqn, self.target_ips, self.target_port, self.nvme_transport_type, ns_id) return {'location': location, 'auth': ''} @utils.synchronized('nvmetcli', external=True) def _map_volume(self, volume, volume_path, connector=None): """Ensure a volume is exported and mapped in nvmet.""" # Create NVME subsystem for previously created LV nqn = self._get_target_nqn(volume.id, connector) try: uuid = self._get_nvme_uuid(volume) ns_id = self._ensure_subsystem_exists(nqn, volume_path, uuid) self._ensure_port_exports(nqn, self.target_ips, self.target_port, self.nvme_transport_type, self.nvmet_port_id) except Exception: LOG.error('Failed to add subsystem: %s', nqn) raise NVMETTargetAddError(subsystem=nqn) LOG.info('Subsystem %s now exported on port %s', nqn, self.target_port) return nqn, ns_id def _ensure_subsystem_exists(self, nqn, volume_path, uuid): """Ensure a subsystem and namespace exist in nvmet.""" # Assume if subsystem exists, it has the right configuration try: subsystem = nvmet.Subsystem(nqn) LOG.debug('Skip creating subsystem %s as it already exists.', nqn) ns_id = self._ensure_namespace_exists(subsystem, volume_path, uuid) return ns_id except nvmet.NotFound: LOG.debug('Creating subsystem %s.', nqn) ns_id = self.nvmet_ns_id subsystem_section = { "allowed_hosts": [], "attr": { "allow_any_host": "1" }, "namespaces": [self._namespace_dict(uuid, volume_path, ns_id)], "nqn": nqn} nvmet.Subsystem.setup(subsystem_section) # privsep LOG.debug('Added subsystem: %s', nqn) return ns_id def _namespace_dict(self, uuid, volume_path, ns_id): """Build the dict data for a new namespace in nvmet library format.""" if self.share_targets: nguid = uuid LOG.debug('Sharing subsystem, using nguid = uuid = %s', nguid) else: nguid = str(uuidutils.generate_uuid()) LOG.debug('Not sharing subsystem, using randmo nguid = %s', nguid) return { "device": { "nguid": nguid, "uuid": uuid, "path": volume_path, }, "enable": 1, "nsid": ns_id } def _ensure_namespace_exists(self, subsystem, volume_path, uuid): """Ensure the namespace exists in nvmet.""" for ns in subsystem.namespaces: if ns.get_attr('device', 'path') == volume_path: return ns.nsid ns_id = self._get_available_namespace_id(subsystem) ns_data = self._namespace_dict(uuid, volume_path, ns_id) nvmet.Namespace.setup(subsystem, ns_data) return ns_id def _get_available_namespace_id(self, subsystem): """Get the next available ns_id. Shared targets will have multiple namespaces under the same subsystem, so we cannot use self.nvmet_ns_id for them all. This method searches for an available namespace id in the provided subsystem considering all ids below self.nvmet_ns_id as reserved. We cannot let the nvmet library assign it automatically because it starts assigning from 1. For non shared the method returns configured nvmet_ns_id. """ minimum = self.nvmet_ns_id if not self.share_targets: return minimum used = [ns.nsid for ns in subsystem.namespaces if ns.nsid >= minimum] if not used: return minimum higher = max(used) # If there are no gaps return the next available id if len(used) > higher - minimum: if higher == nvmet.Namespace.MAX_NSID: raise Exception('Reached max namespaces in subsystem') return higher + 1 # Find an id in the gaps. Don't include higher, as we know it's used available = set(range(minimum, higher)).difference(used) return available.pop() def _get_nvme_uuid(self, volume): return volume.name_id def _ensure_port_exports(self, nqn, addrs, port, transport_type, port_id): for addr in addrs: # Assume if port exists, it has the right configuration try: nvme_port = nvmet.Port(port_id) LOG.debug('Skip creating port %s as it already exists.', port_id) except nvmet.NotFound: LOG.debug('Creating port %s.', port_id) # Port section port_section = { "addr": { "adrfam": "ipv4", "traddr": addr, "treq": "not specified", "trsvcid": port, "trtype": transport_type, }, "portid": port_id, "referrals": [], "subsystems": [nqn] } nvmet.Port.setup(self._nvmet_root, port_section) # privsep LOG.debug('Added port: %s', port_id) else: if nqn in nvme_port.subsystems: LOG.debug('%s already exported on port %s', nqn, port_id) else: nvme_port.add_subsystem(nqn) # privsep LOG.debug('Exported %s on port %s', nqn, port_id) port_id += 1 # ####### Connection termination methods ######## def terminate_connection(self, volume, connector, **kwargs): """Remove the mapping for shared.""" # TODO: Add support for force and other parameters if self.share_targets: self._locked_unmap_volume(volume, connector) LOG.info('Volume %s is no longer exported', volume.id) def remove_export(self, context, volume): """Remove the mapping for non shared.""" if not self.share_targets: self._locked_unmap_volume(volume) LOG.info('Volume %s is no longer exported', volume.id) @utils.synchronized('nvmetcli', external=True) def _locked_unmap_volume(self, volume, connector=None): """Remove volume's ns from subsystem and subsystem if empty.""" if connector or not self.share_targets: nqns = [self._get_target_nqn(volume.id, connector)] else: # We need to remove all existing maps (we are sharing) LOG.debug('Removing EVERYTHING for volume %s', volume.id) nqns = self._get_nqns_for_location(volume.provider_location) exceptions = [] for nqn in nqns: try: self._unmap_volume(volume, nqn) except Exception as exc: exceptions.append(exc) # TODO: Once we only support Python 3.11+ use ExceptionGroup to raise # all the exceptions. if exceptions: raise exceptions[0] def _unmap_volume(self, volume, nqn): try: subsystem = nvmet.Subsystem(nqn) except nvmet.NotFound: LOG.info('Skipping unmapping. No NVMe subsystem for volume: %s', volume.id) return if self.share_targets: volume_path = volume.provider_location for ns in subsystem.namespaces: if ns.get_attr('device', 'path') == volume_path: LOG.debug('Deleting namespace %s', ns.nsid) ns.delete() # privsep call break # If there are still namespaces we cannot remove the subsystem if any(s for s in subsystem.namespaces): return for port in self._nvmet_root.ports: if nqn in port.subsystems: LOG.debug('Removing %s from port %s', nqn, port.portid) port.remove_subsystem(nqn) # privsep call LOG.debug('Deleting %s', nqn) subsystem.delete() # privsep call LOG.info('Subsystem %s removed', nqn) # ####### General methods ######## def _get_target_nqn(self, volume_id, connector): # For shared targets the subsystem is named after the host if self.share_targets: postfix = connector['host'] else: postfix = volume_id return f'nqn.{self.nvmet_subsystem_name}-{postfix}' def _get_nqns_for_location(self, provider_location): """Get all subystem nqns for a give provider location. This also returns empty subsystems, since we don't know if those were created to try to use them for the volume of the provider_location and failed during the creation. This method needs to be called within the nvmetcli locked section. """ nqns = [] for subsys in self._nvmet_root.subsystems: empty = True # subsytems is an iterable, can check it with bool found = False for ns in subsys.namespaces: empty = False if ns.get_attr('device', 'path') == provider_location: found = True break if found or empty: nqns.append(subsys.nqn) return nqns ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/scst.py0000664000175000017500000004247700000000000020657 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_concurrency import processutils as putils from oslo_log import log as logging from cinder import exception from cinder.i18n import _ import cinder.privsep.targets.scst from cinder.volume.targets import iscsi from cinder.volume import volume_utils LOG = logging.getLogger(__name__) class SCSTAdm(iscsi.ISCSITarget): def __init__(self, *args, **kwargs): super(SCSTAdm, self).__init__(*args, **kwargs) self.volumes_dir = self.configuration.safe_get('volumes_dir') self.iscsi_target_prefix = self.configuration.safe_get( 'target_prefix') self.target_name = self.configuration.safe_get('scst_target_iqn_name') self.target_driver = self.configuration.safe_get('scst_target_driver') self.chap_username = self.configuration.safe_get('chap_username') self.chap_password = self.configuration.safe_get('chap_password') self.initiator_iqn = None self.remove_initiator_iqn = None def scst_execute(self, *args): return cinder.privsep.targets.scst.run_scstadmin(*args) def validate_connector(self, connector): # iSCSI drivers require the initiator information if 'initiator' not in connector: err_msg = _('The volume driver requires the iSCSI initiator ' 'name in the connector.') LOG.error(err_msg) raise exception.VolumeBackendAPIException(data=err_msg) self.initiator_iqn = connector['initiator'] def terminate_connection(self, volume, connector, **kwargs): self.remove_initiator_iqn = connector['initiator'] def _get_target(self, iqn): (out, _err) = self.scst_execute('-list_target') if iqn in out: return self._target_attribute(iqn) return None def _target_attribute(self, iqn): (out, _err) = self.scst_execute('-list_tgt_attr', iqn, '-driver', self.target_driver) lines = out.split('\n') for line in lines: if "rel_tgt_id" in line: parsed = line.split() return parsed[1] def _get_group(self): scst_group = "%s%s" % (self.initiator_iqn, self.target_name) (out, _err) = self.scst_execute('-list_group') if scst_group in out: return out return None def _get_luns_info(self): scst_group = "%s%s" % (self.initiator_iqn, self.target_name) (out, _err) = self.scst_execute('-list_group', scst_group, '-driver', self.target_driver, '-target', self.target_name) first = "Assigned LUNs:" last = "Assigned Initiators:" start = out.index(first) + len(first) end = out.index(last, start) out = out[start:end] luns = [] for line in out.strip().split("\n")[2:]: luns.append(int(line.strip().split(" ")[0])) luns = sorted(set(luns)) return luns def _get_target_and_lun(self, context, volume): iscsi_target = 0 if not self.target_name or not self._get_group(): lun = 1 return iscsi_target, lun luns = self._get_luns_info() if (not luns) or (luns[0] != 1): lun = 1 return iscsi_target, lun else: for lun in luns: if (luns[-1] == lun) or (luns[lun - 1] + 1 != luns[lun]): return iscsi_target, (lun + 1) def create_iscsi_target(self, name, vol_id, tid, lun, path, chap_auth=None): scst_group = "%s%s" % (self.initiator_iqn, self.target_name) vol_name = path.split("/")[3] try: (out, _err) = self.scst_execute('-noprompt', '-set_drv_attr', self.target_driver, '-attributes', 'enabled=1') LOG.debug('StdOut from set driver attribute: %s', out) except putils.ProcessExecutionError as e: LOG.error("Failed to set attribute for enable target driver %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to enable SCST Target driver.") if self._get_target(name) is None: try: (out, _err) = self.scst_execute('-add_target', name, '-driver', self.target_driver) LOG.debug("StdOut from scstadmin create target: %s", out) except putils.ProcessExecutionError as e: LOG.error("Failed to create iscsi target for volume " "id:%(vol_id)s: %(e)s", {'vol_id': name, 'e': e}) raise exception.ISCSITargetCreateFailed(volume_id=vol_name) try: (out, _err) = self.scst_execute('-enable_target', name, '-driver', self.target_driver) LOG.debug("StdOut from scstadmin enable target: %s", out) except putils.ProcessExecutionError as e: LOG.error("Failed to set 'enable' attribute for " "SCST target %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to enable SCST Target.") if chap_auth and self.target_name: try: chap_string = self._iscsi_authentication('IncomingUser=', *chap_auth) (out, _err) = self.scst_execute('-noprompt', '-set_tgt_attr', name, '-driver', self.target_driver, '-attributes', chap_string) LOG.debug("StdOut from scstadmin set target attribute:" " %s.", out) except putils.ProcessExecutionError: msg = _("Failed to set attribute 'Incoming user' for " "SCST target.") LOG.exception(msg) raise exception.ISCSITargetHelperCommandFailed( error_message=msg) if self.target_name: if self._get_group() is None: try: (out, _err) = self.scst_execute('-add_group', scst_group, '-driver', self.target_driver, '-target', name) LOG.debug("StdOut from scstadmin create group: %s", out) except putils.ProcessExecutionError as e: LOG.error("Failed to create group to SCST target %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to create group to SCST target.") try: (out, _err) = self.scst_execute('-add_init', self.initiator_iqn, '-driver', self.target_driver, '-target', name, '-group', scst_group) LOG.debug("StdOut from scstadmin add initiator: %s", out) except putils.ProcessExecutionError as e: LOG.error("Failed to add initiator to group " " for SCST target %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to add Initiator to group for " "SCST target.") tid = self._get_target(name) if self.target_name is None: disk_id = "disk%s" % tid else: disk_id = "%s%s" % (lun, vol_id.split('-')[-1]) try: self.scst_execute('-open_dev', disk_id, '-handler', 'vdisk_fileio', '-attributes', 'filename=%s' % path) except putils.ProcessExecutionError as e: LOG.error("Failed to add device to handler %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to add device to SCST handler.") try: if self.target_name: self.scst_execute('-add_lun', lun, '-driver', self.target_driver, '-target', name, '-device', disk_id, '-group', scst_group) else: self.scst_execute('-add_lun', lun, '-driver', self.target_driver, '-target', name, '-device', disk_id) except putils.ProcessExecutionError as e: LOG.error("Failed to add lun to SCST target " "id:%(vol_id)s: %(e)s", {'vol_id': name, 'e': e}) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to add LUN to SCST Target for " "volume " + vol_name) # SCST uses /etc/scst.conf as the default configuration when it # starts try: self.scst_execute('-write_config', '/etc/scst.conf') except putils.ProcessExecutionError: LOG.error("Failed to write in /etc/scst.conf.") raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to write in /etc/scst.conf.") return tid def _iscsi_location(self, ip, target, iqn, lun=None): return "%s:%s,%s %s %s" % (volume_utils.sanitize_host(ip), self.configuration.target_port, target, iqn, lun) def _get_iscsi_name(self, volume): if self.target_name is None: return "%s%s" % (self.configuration.target_prefix, volume['name']) else: return self.target_name def _get_iscsi_target(self, context, vol_id): # FIXME(jdg): Need to implement abc method pass def _get_target_chap_auth(self, context, volume): # FIXME(jdg): Need to implement abc method iscsi_name = self._get_iscsi_name(volume) if self._get_target(iscsi_name) is None: return None (out, _err) = self.scst_execute('-list_tgt_attr', iscsi_name, '-driver', self.target_driver) first = "KEY" last = "Dynamic attributes" start = out.index(first) + len(first) end = out.index(last, start) out = out[start:end] out = out.split("\n")[2] if "IncomingUser" in out: out = out.split(" ") out = [a for a in out if a != ""] return (out[1], out[2]) else: return None def ensure_export(self, context, volume, volume_path): iscsi_target, lun = self._get_target_and_lun(context, volume) iscsi_name = self._get_iscsi_name(volume) if self.chap_username and self.chap_password: chap_auth = (self.chap_username, self.chap_password) else: chap_auth = self._get_target_chap_auth(context, volume) self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target, lun, volume_path, chap_auth) def create_export(self, context, volume, volume_path): """Creates an export for a logical volume.""" iscsi_target, lun = self._get_target_and_lun(context, volume) iscsi_name = self._get_iscsi_name(volume) if self.chap_username and self.chap_password: chap_auth = (self.chap_username, self.chap_password) else: chap_auth = self._get_target_chap_auth(context, volume) if not chap_auth: chap_auth = (volume_utils.generate_username(), volume_utils.generate_password()) tid = self.create_iscsi_target(iscsi_name, volume['id'], iscsi_target, lun, volume_path, chap_auth) data = {} data['location'] = self._iscsi_location( self.configuration.target_ip_address, tid, iscsi_name, lun) LOG.debug('Set provider_location to: %s', data['location']) data['auth'] = self._iscsi_authentication( 'CHAP', *chap_auth) return data def remove_export(self, context, volume): try: location = volume['provider_location'].split(' ') iqn = location[1] iscsi_target = self._get_target(iqn) self.show_target(iscsi_target, iqn) except Exception: LOG.error("Skipping remove_export. No iscsi_target is " "presently exported for volume: %s", volume['id']) return vol = self.db.volume_get(context, volume['id']) lun = "".join(vol['provider_location'].split(" ")[-1:]) self.remove_iscsi_target(iscsi_target, lun, volume['id'], volume['name']) def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): disk_id = "%s%s" % (lun, vol_id.split('-')[-1]) vol_uuid_file = vol_name if self.target_name is None: iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file) else: iqn = self.target_name if self.target_name is None: try: self.scst_execute('-noprompt', '-rem_target', iqn, '-driver', 'iscsi') except putils.ProcessExecutionError as e: LOG.error("Failed to remove iscsi target for volume " "id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) try: self.scst_execute('-noprompt', '-close_dev', "disk%s" % tid, '-handler', 'vdisk_fileio') except putils.ProcessExecutionError as e: LOG.error("Failed to close disk device %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to close disk device for " "SCST handler.") if self._get_target(iqn): try: self.scst_execute('-noprompt', '-rem_target', iqn, '-driver', self.target_driver) except putils.ProcessExecutionError as e: LOG.error("Failed to remove iscsi target for " "volume id:%(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) else: if not int(lun) in self._get_luns_info(): raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) try: scst_group = "%s%s" % (self.remove_initiator_iqn, self.target_name) self.scst_execute('-noprompt', '-rem_lun', lun, '-driver', self.target_driver, '-target', iqn, '-group', scst_group) except putils.ProcessExecutionError as e: LOG.error("Failed to remove LUN %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to remove LUN for SCST Target.") try: self.scst_execute('-noprompt', '-close_dev', disk_id, '-handler', 'vdisk_fileio') except putils.ProcessExecutionError as e: LOG.error("Failed to close disk device %s", e) raise exception.ISCSITargetHelperCommandFailed( error_message="Failed to close disk device for " "SCST handler.") self.scst_execute('-write_config', '/etc/scst.conf') def show_target(self, tid, iqn): if iqn is None: raise exception.InvalidParameterValue( err=_('valid iqn needed for show_target')) tid = self._get_target(iqn) if tid is None: raise exception.ISCSITargetHelperCommandFailed( error_message="Target not found") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/spdknvmf.py0000664000175000017500000001540200000000000021517 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import string from oslo_config import cfg from oslo_log import log as logging import requests from cinder import exception from cinder.i18n import _ from cinder.volume import configuration from cinder.volume.targets import nvmeof from cinder.volume import volume_utils spdk_opts = [ cfg.StrOpt('spdk_rpc_ip', help='The NVMe target remote configuration IP address.'), cfg.PortOpt('spdk_rpc_port', default=8000, help='The NVMe target remote configuration port.'), cfg.StrOpt('spdk_rpc_username', help='The NVMe target remote configuration username.'), cfg.StrOpt('spdk_rpc_password', help='The NVMe target remote configuration password.', secret=True), cfg.StrOpt('spdk_rpc_protocol', choices=['http', 'https'], default='http', help='Protocol to be used with SPDK RPC proxy'), cfg.IntOpt('spdk_max_queue_depth', default=64, min=1, max=128, help='Queue depth for rdma transport.'), ] CONF = cfg.CONF CONF.register_opts(spdk_opts, group=configuration.SHARED_CONF_GROUP) LOG = logging.getLogger(__name__) class SpdkNvmf(nvmeof.NVMeOF): SECONDARY_IP_SUPPORT = False def __init__(self, *args, **kwargs): super(SpdkNvmf, self).__init__(*args, **kwargs) self.configuration.append_config_values(spdk_opts) self.url = ('%(protocol)s://%(ip)s:%(port)s/' % {'protocol': self.configuration.spdk_rpc_protocol, 'ip': self.configuration.spdk_rpc_ip, 'port': self.configuration.spdk_rpc_port}) # SPDK NVMe-oF Target application requires one time creation # of RDMA transport type each time it is started. It will # fail on second attempt which is expected behavior. try: params = { 'trtype': 'rdma', 'max_queue_depth': self.configuration.spdk_max_queue_depth } self._rpc_call('nvmf_create_transport', params) except Exception: pass def _rpc_call(self, method, params=None): payload = {} payload['jsonrpc'] = '2.0' payload['id'] = 1 payload['method'] = method if params is not None: payload['params'] = params req = requests.post(self.url, data=json.dumps(payload), auth=(self.configuration.spdk_rpc_username, self.configuration.spdk_rpc_password), verify=self.configuration.driver_ssl_cert_verify, timeout=30) if not req.ok: raise exception.VolumeBackendAPIException( data=_('SPDK target responded with error: %s') % req.text) return req.json()['result'] def _get_spdk_volume_name(self, name): output = self._rpc_call('bdev_get_bdevs') for bdev in output: for alias in bdev['aliases']: if name in alias: return bdev['name'] def _get_nqn_with_volume_name(self, name): output = self._rpc_call('nvmf_get_subsystems') spdk_name = self._get_spdk_volume_name(name) if spdk_name is not None: for subsystem in output[1:]: for namespace in subsystem['namespaces']: if spdk_name in namespace['bdev_name']: return subsystem['nqn'] def _get_first_free_node(self): cnode_num = [] output = self._rpc_call('nvmf_get_subsystems') # Get node numbers for nqn string like this: nqn.2016-06.io.spdk:cnode1 for subsystem in output[1:]: cnode_num.append(int(subsystem['nqn'].split("cnode")[1])) test_set = set(range(1, len(cnode_num) + 2)) return list(test_set.difference(cnode_num))[0] def create_nvmeof_target(self, volume_id, subsystem_name, target_ips, target_port, transport_type, nvmet_port_id, ns_id, volume_path): LOG.debug('SPDK create target') nqn = self._get_nqn_with_volume_name(volume_id) if nqn is None: node = self._get_first_free_node() nqn = '%s:cnode%s' % (subsystem_name, node) choice = string.ascii_uppercase + string.digits serial = ''.join( volume_utils.generate_password(length=12, symbolgroups=choice)) params = { 'nqn': nqn, 'allow_any_host': True, 'serial_number': serial, } self._rpc_call('nvmf_create_subsystem', params) listen_address = { 'trtype': transport_type, 'traddr': target_ips[0], 'trsvcid': str(target_port), } params = { 'nqn': nqn, 'listen_address': listen_address, } self._rpc_call('nvmf_subsystem_add_listener', params) ns = { 'bdev_name': self._get_spdk_volume_name(volume_id), 'nsid': ns_id, } params = { 'nqn': nqn, 'namespace': ns, } self._rpc_call('nvmf_subsystem_add_ns', params) location = self.get_nvmeof_location( nqn, target_ips, target_port, transport_type, ns_id) return {'location': location, 'auth': '', 'provider_id': nqn} def delete_nvmeof_target(self, target_name): LOG.debug('SPDK delete target: %s', target_name) nqn = self._get_nqn_with_volume_name(target_name.name) if nqn is not None: try: params = {'nqn': nqn} self._rpc_call('nvmf_delete_subsystem', params) LOG.debug('SPDK subsystem %s deleted', nqn) except Exception as e: LOG.debug('SPDK ERROR: subsystem not deleted: %s', e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/targets/tgt.py0000664000175000017500000002766600000000000020504 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import textwrap import time from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import fileutils from cinder import exception import cinder.privsep.targets.tgt from cinder import utils from cinder.volume.targets import iscsi LOG = logging.getLogger(__name__) class TgtAdm(iscsi.ISCSITarget): """Target object for block storage devices. Base class for target object, where target is data transport mechanism (target) specific calls. This includes things like create targets, attach, detach etc. """ VOLUME_CONF = textwrap.dedent(""" backing-store %(path)s driver %(driver)s %(chap_auth)s %(target_flags)s write-cache %(write_cache)s scsi_sn %(scsi_sn)s scsi_id %(scsi_id)s """) def _get_target(self, iqn): (out, err) = cinder.privsep.targets.tgt.tgtadmin_show() lines = out.split('\n') for line in lines: if iqn in line: parsed = line.split() tid = parsed[1] return tid[:-1] return None def _verify_backing_lun(self, iqn, tid): backing_lun = True capture = False target_info = [] (out, err) = cinder.privsep.targets.tgt.tgtadmin_show() lines = out.split('\n') for line in lines: if iqn in line and "Target %s" % tid in line: capture = True if capture: target_info.append(line) if iqn not in line and 'Target ' in line: capture = False if ' LUN: 1' not in target_info: backing_lun = False return backing_lun def _recreate_backing_lun(self, iqn, tid, name, path): LOG.warning('Attempting recreate of backing lun...') # Since we think the most common case of this is a dev busy # (create vol from snapshot) we're going to add a sleep here # this will hopefully give things enough time to stabilize # how long should we wait?? I have no idea, let's go big # and error on the side of caution time.sleep(10) (out, err) = (None, None) try: (out, err) = cinder.privsep.targets.tgt.tgtadm_create(tid, path) except putils.ProcessExecutionError as e: LOG.error("Failed recovery attempt to create " "iscsi backing lun for Volume " "ID:%(vol_id)s: %(e)s", {'vol_id': name, 'e': e}) finally: LOG.debug('StdOut from recreate backing lun: %s', out) LOG.debug('StdErr from recreate backing lun: %s', err) def _get_iscsi_target(self, context, vol_id): return 0 def _get_target_and_lun(self, context, volume): lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1 iscsi_target = 0 # NOTE(jdg): Not used by tgtadm return iscsi_target, lun @utils.retry(putils.ProcessExecutionError) def _do_tgt_update(self, name, force=False): (out, err) = cinder.privsep.targets.tgt.tgtadmin_update(name, force) LOG.debug("StdOut from tgt-admin --update: %s", out) LOG.debug("StdErr from tgt-admin --update: %s", err) @utils.retry(exception.NotFound) def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): # Note(jdg) tid and lun aren't used by TgtAdm but remain for # compatibility # NOTE(jdg): Remove this when we get to the bottom of bug: #1398078 # for now, since we intermittently hit target already exists we're # adding some debug info to try and pinpoint what's going on (out, err) = cinder.privsep.targets.tgt.tgtadm_show() LOG.debug("Targets prior to update: %s", out) fileutils.ensure_tree(self.volumes_dir) vol_id = name.split(':')[1] write_cache = self.configuration.get('iscsi_write_cache', 'on') driver = self.iscsi_protocol chap_str = '' if chap_auth is not None: chap_str = 'incominguser %s %s' % chap_auth target_flags = self.configuration.get('iscsi_target_flags', '') if target_flags: target_flags = 'bsoflags ' + target_flags # Create unique scsi_sn and scsi_id fields based on the volume id scsi_sn = vol_id scsi_id = vol_id volume_conf = self.VOLUME_CONF % { 'name': name, 'path': path, 'driver': driver, 'chap_auth': chap_str, 'target_flags': target_flags, 'write_cache': write_cache, 'scsi_sn': scsi_sn, 'scsi_id': scsi_id} LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id) volumes_dir = self.volumes_dir volume_path = os.path.join(volumes_dir, vol_id) if os.path.exists(volume_path): LOG.debug(('Persistence file already exists for volume, ' 'found file at: %s'), volume_path) utils.robust_file_write(volumes_dir, vol_id, volume_conf) LOG.debug(('Created volume path %(vp)s,\n' 'content: %(vc)s'), {'vp': volume_path, 'vc': volume_conf}) old_persist_file = None old_name = kwargs.get('old_name', None) if old_name is not None: LOG.debug('Detected old persistence file for volume ' '%(vol)s at %(old_name)s', {'vol': vol_id, 'old_name': old_name}) old_persist_file = os.path.join(volumes_dir, old_name) try: # With the persistent tgts we create them # by creating the entry in the persist file # and then doing an update to get the target # created. self._do_tgt_update(name) except putils.ProcessExecutionError as e: if "target already exists" in e.stderr: # Adding the additional Warning message below for a clear # ER marker (Ref bug: #1398078). LOG.warning('Could not create target because ' 'it already exists for volume: %s', vol_id) LOG.debug('Exception was: %s', e) else: LOG.error("Failed to create iscsi target for Volume " "ID: %(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) # Don't forget to remove the persistent file we created os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) # Grab targets list for debug # Consider adding a check for lun 0 and 1 for tgtadm # before considering this as valid cinder.privsep.targets.tgt.tgtadm_show() LOG.debug("Targets after update: %s", out) iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: LOG.warning("Failed to create iscsi target for Volume " "ID: %(vol_id)s. It could be caused by problem " "with concurrency. " "Also please ensure your tgtd config " "file contains 'include %(volumes_dir)s/*'", {'vol_id': vol_id, 'volumes_dir': volumes_dir, }) raise exception.NotFound() # NOTE(jdg): Sometimes we have some issues with the backing lun # not being created, believe this is due to a device busy # or something related, so we're going to add some code # here that verifies the backing lun (lun 1) was created # and we'll try and recreate it if it's not there if not self._verify_backing_lun(iqn, tid): try: self._recreate_backing_lun(iqn, tid, name, path) except putils.ProcessExecutionError: os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) # Finally check once more and if no go, fail and punt if not self._verify_backing_lun(iqn, tid): os.unlink(volume_path) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) if old_persist_file is not None: fileutils.delete_if_exists(old_persist_file) return tid def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info('Removing iscsi_target for Volume ID: %s', vol_id) vol_uuid_file = vol_name volume_path = os.path.join(self.volumes_dir, vol_uuid_file) if not os.path.exists(volume_path): LOG.warning('Volume path %s does not exist, ' 'nothing to remove.', volume_path) return if os.path.isfile(volume_path): iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file) else: raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) try: # NOTE(vish): --force is a workaround for bug: # https://bugs.launchpad.net/cinder/+bug/1159948 cinder.privsep.targets.tgt.tgtadmin_delete(iqn, force=True) except putils.ProcessExecutionError as e: non_fatal_errors = ("can't find the target", "access control rule does not exist") if any(error in e.stderr for error in non_fatal_errors): LOG.warning("Failed target removal because target or " "ACL's couldn't be found for iqn: %s.", iqn) else: LOG.error("Failed to remove iscsi target for Volume " "ID: %(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # NOTE(jdg): There's a bug in some versions of tgt that # will sometimes fail silently when using the force flag # https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343 # For now work-around by checking if the target was deleted, # if it wasn't, try again without the force. # This will NOT do any good for the case of mutliple sessions # which the force was aded for but it will however address # the cases pointed out in bug: # https://bugs.launchpad.net/cinder/+bug/1304122 if self._get_target(iqn): try: LOG.warning('Silent failure of target removal ' 'detected, retry....') cinder.privsep.targets.tgt.tgtadmin_delete(iqn) except putils.ProcessExecutionError as e: LOG.error("Failed to remove iscsi target for Volume " "ID: %(vol_id)s: %(e)s", {'vol_id': vol_id, 'e': e}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) # NOTE(jdg): This *should* be there still but incase # it's not we don't care, so just ignore it if was # somehow deleted between entry of this method # and here if os.path.exists(volume_path): os.unlink(volume_path) else: LOG.debug('Volume path %s not found at end, ' 'of remove_iscsi_target.', volume_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/throttling.py0000664000175000017500000001020500000000000020410 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Hitachi Data Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume copy throttling helpers.""" import contextlib from oslo_concurrency import processutils from oslo_log import log as logging from cinder import exception import cinder.privsep.cgroup from cinder import utils LOG = logging.getLogger(__name__) class Throttle(object): """Base class for throttling disk I/O bandwidth""" DEFAULT = None @staticmethod def set_default(throttle): Throttle.DEFAULT = throttle @staticmethod def get_default(): return Throttle.DEFAULT or Throttle() def __init__(self, prefix=None): self.prefix = prefix or [] @contextlib.contextmanager def subcommand(self, srcpath, dstpath): """Sub-command that reads from srcpath and writes to dstpath. Throttle disk I/O bandwidth used by a sub-command, such as 'dd', that reads from srcpath and writes to dstpath. The sub-command must be executed with the generated prefix command. """ yield {'prefix': self.prefix} class BlkioCgroup(Throttle): """Throttle disk I/O bandwidth using blkio cgroups.""" def __init__(self, bps_limit, cgroup_name): self.bps_limit = bps_limit self.cgroup = cgroup_name self.srcdevs = {} self.dstdevs = {} try: cinder.privsep.cgroup.cgroup_create(self.cgroup) except processutils.ProcessExecutionError: LOG.error('Failed to create blkio cgroup \'%(name)s\'.', {'name': cgroup_name}) raise def _get_device_number(self, path): try: return utils.get_blkdev_major_minor(path) except exception.CinderException as e: LOG.error('Failed to get device number for throttling: ' '%(error)s', {'error': e}) def _limit_bps(self, rw, dev, bps): try: cinder.privsep.cgroup.cgroup_limit(self.cgroup, rw, dev, bps) except processutils.ProcessExecutionError: LOG.warning('Failed to setup blkio cgroup to throttle the ' 'device \'%(device)s\'.', {'device': dev}) def _set_limits(self, rw, devs): total = sum(devs.values()) for dev in sorted(devs): self._limit_bps(rw, dev, self.bps_limit * devs[dev] / total) @utils.synchronized('BlkioCgroup') def _inc_device(self, srcdev, dstdev): if srcdev: self.srcdevs[srcdev] = self.srcdevs.get(srcdev, 0) + 1 self._set_limits('read', self.srcdevs) if dstdev: self.dstdevs[dstdev] = self.dstdevs.get(dstdev, 0) + 1 self._set_limits('write', self.dstdevs) @utils.synchronized('BlkioCgroup') def _dec_device(self, srcdev, dstdev): if srcdev: self.srcdevs[srcdev] -= 1 if self.srcdevs[srcdev] == 0: del self.srcdevs[srcdev] self._set_limits('read', self.srcdevs) if dstdev: self.dstdevs[dstdev] -= 1 if self.dstdevs[dstdev] == 0: del self.dstdevs[dstdev] self._set_limits('write', self.dstdevs) @contextlib.contextmanager def subcommand(self, srcpath, dstpath): srcdev = self._get_device_number(srcpath) dstdev = self._get_device_number(dstpath) if srcdev is None and dstdev is None: yield {'prefix': []} return self._inc_device(srcdev, dstdev) try: yield {'prefix': ['cgexec', '-g', 'blkio:%s' % self.cgroup]} finally: self._dec_device(srcdev, dstdev) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/volume_migration.py0000664000175000017500000000441700000000000021602 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import db from cinder import objects class VolumeMigration(object): """Lightweight Volume Migration object. Will be used by KeyMigrator instead of regular Volume object to avoid extra memory usage. """ @staticmethod def from_volume(volume, context): volume_migration = VolumeMigration(volume.id, volume.user_id, volume.encryption_key_id) volume_migration._context = context return volume_migration def __init__(self, id, user_id, encryption_key_id): self.id = id self.user_id = user_id self.orig_encryption_key_id = encryption_key_id self.encryption_key_id = encryption_key_id def _get_updates(self): updates = {} if self.orig_encryption_key_id != self.encryption_key_id: updates['encryption_key_id'] = self.encryption_key_id return updates def _reset_changes(self): self.orig_encryption_key_id = self.encryption_key_id def save(self): updates = self._get_updates() if updates: db.volume_update(self._context, self.id, updates) self._reset_changes() def __str__(self): return 'id = {}'.format(self.id) def __repr__(self): return self.__str__() class VolumeMigrationList(list): def __init__(self): list.__init__(self) def append(self, volumes, context): if not isinstance(volumes, objects.volume.VolumeList): return for volume in volumes: volume_migration = VolumeMigration.from_volume(volume, context) super(VolumeMigrationList, self).append(volume_migration) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/volume_types.py0000664000175000017500000004714000000000000020755 0ustar00zuulzuul00000000000000# Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2011 Ken Pepple # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Built-in volume type properties.""" from typing import Any, Iterable, Optional, Union from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import uuidutils from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder import quota from cinder import rpc from cinder import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) QUOTAS = quota.QUOTAS ENCRYPTION_IGNORED_FIELDS = ('volume_type_id', 'created_at', 'updated_at', 'deleted_at', 'encryption_id') QOS_IGNORED_FIELDS = ('id', 'name', 'created_at', 'updated_at', 'deleted_at') DEFAULT_VOLUME_TYPE = "__DEFAULT__" MIN_SIZE_KEY = "provisioning:min_vol_size" MAX_SIZE_KEY = "provisioning:max_vol_size" def create(context: context.RequestContext, name: str, extra_specs: Optional[dict[str, Any]] = None, is_public: bool = True, projects: Optional[list[str]] = None, description: Optional[str] = None): """Creates volume types.""" extra_specs = extra_specs or {} projects = projects or [] elevated = context if context.is_admin else context.elevated() try: type_ref = db.volume_type_create(elevated, dict(name=name, extra_specs=extra_specs, is_public=is_public, description=description), projects=projects) except db_exc.DBError: LOG.exception('DB error:') raise exception.VolumeTypeCreateFailed(name=name, extra_specs=extra_specs) return type_ref def update(context: context.RequestContext, id: Optional[str], name: Optional[str], description: Optional[str], is_public: Optional[bool] = None) -> None: """Update volume type by id.""" if id is None: msg = _("id cannot be None") raise exception.InvalidVolumeType(reason=msg) elevated = context if context.is_admin else context.elevated() old_volume_type = get_volume_type(elevated, id) try: db.volume_type_update(elevated, id, dict(name=name, description=description, is_public=is_public)) # Rename resource in quota if volume type name is changed. if name: old_type_name = old_volume_type.get('name') if old_type_name != name: old_description = old_volume_type.get('description') old_public = old_volume_type.get('is_public') try: QUOTAS.update_quota_resource(elevated, old_type_name, name) # Rollback the updated information to the original except db_exc.DBError: db.volume_type_update(elevated, id, dict(name=old_type_name, description=old_description, is_public=old_public)) raise except db_exc.DBError: LOG.exception('DB error:') raise exception.VolumeTypeUpdateFailed(id=id) def destroy(context: context.RequestContext, id: str) -> dict[str, Any]: """Marks volume types as deleted. There must exist at least one volume type (i.e. the default type) in the deployment. This method achieves that by ensuring: 1) the default_volume_type is set and is a valid one 2) the type requested to delete isn't the default type :raises VolumeTypeDefaultDeletionError: when the type requested to delete is the default type """ if id is None: msg = _("id cannot be None") raise exception.InvalidVolumeType(reason=msg) projects_with_default_type = db.get_all_projects_with_default_type( context.elevated(), id) if len(projects_with_default_type) > 0: # don't allow delete if the type requested is a project default project_list = [p.project_id for p in projects_with_default_type] LOG.exception('Default type with %(volume_type_id)s is associated ' 'with projects %(projects)s', {'volume_type_id': id, 'projects': project_list}) raise exception.VolumeTypeDefaultDeletionError(volume_type_id=id) # Default type *must* be set in order to delete any volume type. # If the default isn't set, the following call will raise # VolumeTypeDefaultMisconfiguredError exception which will error out the # delete operation. default_type = get_default_volume_type() # don't allow delete if the type requested is the conf default type if id == default_type.get('id'): raise exception.VolumeTypeDefaultDeletionError(volume_type_id=id) elevated = context if context.is_admin else context.elevated() return db.volume_type_destroy(elevated, id) def get_all_types(context: context.RequestContext, inactive: int = 0, filters: Optional[dict] = None, marker: Optional[dict[str, Any]] = None, limit: Optional[int] = None, sort_keys: Optional[list[str]] = None, sort_dirs: Optional[list[str]] = None, offset: Optional[int] = None, list_result: bool = False) -> Union[dict[str, Any], list]: """Get all non-deleted volume_types. Pass true as argument if you want deleted volume types returned also. """ vol_types = db.volume_type_get_all(context, inactive, filters=filters, marker=marker, limit=limit, sort_keys=sort_keys, sort_dirs=sort_dirs, offset=offset, list_result=list_result) return vol_types def get_volume_type( ctxt: Optional[context.RequestContext], id: Optional[str], expected_fields: Optional[Iterable[str]] = None) -> dict[str, Any]: """Retrieves single volume type by id.""" if id is None: msg = _("id cannot be None") raise exception.InvalidVolumeType(reason=msg) if ctxt is None: ctxt = context.get_admin_context() return db.volume_type_get(ctxt, id, expected_fields=expected_fields) def get_by_name_or_id(context: context.RequestContext, identity: str) -> dict[str, Any]: """Retrieves volume type by id or name""" if uuidutils.is_uuid_like(identity): # both name and id can be in uuid format try: return get_volume_type(context, identity) except exception.VolumeTypeNotFound: try: # A user can create a type with the name in a UUID format, # so here we check for the uuid-like name. return get_volume_type_by_name(context, identity) except exception.VolumeTypeNotFoundByName: raise exception.VolumeTypeNotFound(volume_type_id=identity) return get_volume_type_by_name(context, identity) def get_volume_type_by_name(context: context.RequestContext, name: Optional[str]) -> dict[str, Any]: """Retrieves single volume type by name.""" if name is None: msg = _("name cannot be None") raise exception.InvalidVolumeType(reason=msg) return db.volume_type_get_by_name(context, name) def get_default_volume_type( contxt: Optional[context.RequestContext] = None) -> dict[str, Any]: """Get the default volume type. :raises VolumeTypeDefaultMisconfiguredError: when the configured default is not found """ if contxt: project_default = db.project_default_volume_type_get( contxt, contxt.project_id) if project_default: return get_volume_type(contxt, project_default.volume_type_id) name = CONF.default_volume_type ctxt = context.get_admin_context() vol_type = {} try: vol_type = get_volume_type_by_name(ctxt, name) except (exception.VolumeTypeNotFoundByName, exception.InvalidVolumeType): # Couldn't find volume type with the name in default_volume_type # flag, record this issue and raise exception LOG.exception('Default volume type is not found. ' 'Please check default_volume_type config:') raise exception.VolumeTypeDefaultMisconfiguredError( volume_type_name=name) return vol_type def get_volume_type_extra_specs( volume_type_id: str, ) -> dict: volume_type = get_volume_type(context.get_admin_context(), volume_type_id) return volume_type['extra_specs'] def is_public_volume_type(context: context.RequestContext, volume_type_id: str) -> bool: """Return is_public boolean value of volume type""" volume_type = db.volume_type_get(context, volume_type_id) return volume_type['is_public'] @utils.if_notifications_enabled def notify_about_volume_type_access_usage(context: context.RequestContext, volume_type_id: str, project_id: str, event_suffix: str, host: Optional[str] = None) -> None: """Notify about successful usage type-access-(add/remove) command. :param context: security context :param volume_type_id: volume type uuid :param project_id: tenant uuid :param event_suffix: name of called operation access-(add/remove) :param host: hostname """ notifier_info = {'volume_type_id': volume_type_id, 'project_id': project_id} if not host: host = CONF.host notifier = rpc.get_notifier("volume_type_project", host) notifier.info(context, 'volume_type_project.%s' % event_suffix, notifier_info) def add_volume_type_access(context: context.RequestContext, volume_type_id: Optional[str], project_id: str) -> None: """Add access to volume type for project_id.""" if volume_type_id is None: msg = _("volume_type_id cannot be None") raise exception.InvalidVolumeType(reason=msg) elevated = context if context.is_admin else context.elevated() if is_public_volume_type(elevated, volume_type_id): msg = _("Type access modification is not applicable to public volume " "type.") raise exception.InvalidVolumeType(reason=msg) db.volume_type_access_add(elevated, volume_type_id, project_id) notify_about_volume_type_access_usage(context, volume_type_id, project_id, 'access.add') def remove_volume_type_access(context: context.RequestContext, volume_type_id: Optional[str], project_id: str) -> None: """Remove access to volume type for project_id.""" if volume_type_id is None: msg = _("volume_type_id cannot be None") raise exception.InvalidVolumeType(reason=msg) elevated = context if context.is_admin else context.elevated() if is_public_volume_type(elevated, volume_type_id): msg = _("Type access modification is not applicable to public volume " "type.") raise exception.InvalidVolumeType(reason=msg) db.volume_type_access_remove(elevated, volume_type_id, project_id) notify_about_volume_type_access_usage(context, volume_type_id, project_id, 'access.remove') def is_encrypted(context: context.RequestContext, volume_type_id: Optional[str]) -> bool: return get_volume_type_encryption(context, volume_type_id) is not None def get_volume_type_encryption( context: context.RequestContext, volume_type_id: Optional[str]) -> Optional[dict]: if volume_type_id is None: return None encryption = db.volume_type_encryption_get(context, volume_type_id) return encryption def get_volume_type_qos_specs(volume_type_id: str) -> dict[str, Any]: """Get all qos specs for given volume type.""" ctxt = context.get_admin_context() res = db.volume_type_qos_specs_get(ctxt, volume_type_id) return res def volume_types_diff(context: context.RequestContext, vol_type_id1: str, vol_type_id2: str) -> tuple[dict[str, Any], bool]: """Returns a 'diff' of two volume types and whether they are equal. Returns a tuple of (diff, equal), where 'equal' is a boolean indicating whether there is any difference, and 'diff' is a dictionary with the following format: .. code-block:: default { 'extra_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type), 'key2': (value_in_1st_vol_type, value_in_2nd_vol_type), {...}} 'qos_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type), 'key2': (value_in_1st_vol_type, value_in_2nd_vol_type), {...}} 'encryption': {'cipher': (value_in_1st_vol_type, value_in_2nd_vol_type), {'key_size': (value_in_1st_vol_type, value_in_2nd_vol_type), {...}} } """ def _fix_qos_specs(qos_specs: Optional[dict]) -> None: if qos_specs: for key in QOS_IGNORED_FIELDS: qos_specs.pop(key, None) qos_specs.update(qos_specs.pop('specs', {})) def _fix_encryption_specs(encryption: Optional[dict]) -> Optional[dict]: if encryption: encryption = dict(encryption) for param in ENCRYPTION_IGNORED_FIELDS: encryption.pop(param, None) return encryption def _dict_diff(dict1: Optional[dict], dict2: Optional[dict]) -> tuple[dict[str, Any], bool]: res = {} equal = True if dict1 is None: dict1 = {} if dict2 is None: dict2 = {} for k, v in dict1.items(): res[k] = (v, dict2.get(k)) if k not in dict2 or res[k][0] != res[k][1]: equal = False for k, v in dict2.items(): res[k] = (dict1.get(k), v) if k not in dict1 or res[k][0] != res[k][1]: equal = False return (res, equal) all_equal = True diff = {} vol_type_data = [] for vol_type_id in (vol_type_id1, vol_type_id2): if vol_type_id is None: specs = {'extra_specs': None, 'qos_specs': None, 'encryption': None} else: specs = {} vol_type = get_volume_type(context, vol_type_id) specs['extra_specs'] = vol_type.get('extra_specs') qos_specs = get_volume_type_qos_specs(vol_type_id) specs['qos_specs'] = qos_specs.get('qos_specs') _fix_qos_specs(specs['qos_specs']) specs['encryption'] = get_volume_type_encryption(context, vol_type_id) specs['encryption'] = _fix_encryption_specs(specs['encryption']) vol_type_data.append(specs) diff['extra_specs'], equal = _dict_diff(vol_type_data[0]['extra_specs'], vol_type_data[1]['extra_specs']) if not equal: all_equal = False diff['qos_specs'], equal = _dict_diff(vol_type_data[0]['qos_specs'], vol_type_data[1]['qos_specs']) if not equal: all_equal = False diff['encryption'], equal = _dict_diff(vol_type_data[0]['encryption'], vol_type_data[1]['encryption']) if not equal: all_equal = False return (diff, all_equal) def volume_types_encryption_changed( context: context.RequestContext, vol_type_id1: Optional[str], vol_type_id2: Optional[str]) -> bool: """Return whether encryptions of two volume types are same.""" def _get_encryption(enc: dict) -> dict: enc = dict(enc) for param in ENCRYPTION_IGNORED_FIELDS: enc.pop(param, None) return enc enc1 = get_volume_type_encryption(context, vol_type_id1) enc2 = get_volume_type_encryption(context, vol_type_id2) enc1_filtered = _get_encryption(enc1) if enc1 else None enc2_filtered = _get_encryption(enc2) if enc2 else None return enc1_filtered != enc2_filtered def provision_filter_on_size(context: context.RequestContext, volume_type: Optional[dict[str, Any]], size: Union[str, int]) -> None: """This function filters volume provisioning requests on size limits. If a volume type has provisioning size min/max set, this filter will ensure that the volume size requested is within the size limits specified in the volume type. """ if not volume_type: volume_type = get_default_volume_type() if volume_type: size_int = int(size) extra_specs = volume_type.get('extra_specs', {}) min_size = extra_specs.get(MIN_SIZE_KEY) if min_size and size_int < int(min_size): msg = _("Specified volume size of '%(req_size)d' is less " "than the minimum required size of '%(min_size)s' " "for volume type '%(vol_type)s'.") % { 'req_size': size_int, 'min_size': min_size, 'vol_type': volume_type['name'] } raise exception.InvalidInput(reason=msg) max_size = extra_specs.get(MAX_SIZE_KEY) if max_size and size_int > int(max_size): msg = _("Specified volume size of '%(req_size)d' is " "greater than the maximum allowable size of " "'%(max_size)s' for volume type '%(vol_type)s'." ) % { 'req_size': size_int, 'max_size': max_size, 'vol_type': volume_type['name']} raise exception.InvalidInput(reason=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/volume/volume_utils.py0000664000175000017500000016463700000000000020764 0ustar00zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume-related Utilities and helpers.""" import abc import ast import functools import inspect import json import logging as py_logging import math import operator import os from os import urandom from random import shuffle import re import socket import tempfile import time import types import typing from typing import Any, BinaryIO, Callable, IO from typing import Optional, Union import uuid from castellan.common.credentials import keystone_password from castellan.common import exception as castellan_exception from castellan import key_manager as castellan_key_manager import eventlet from eventlet import tpool from keystoneauth1 import loading as ks_loading from os_brick import encryptors from os_brick.initiator import connector from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import netutils from oslo_utils import strutils from oslo_utils import timeutils from oslo_utils import units from cinder.brick.local_dev import lvm as brick_lvm from cinder import context from cinder import db from cinder import exception from cinder.i18n import _ from cinder.image import image_utils from cinder import objects from cinder.objects import fields from cinder import rpc from cinder import utils from cinder.volume import configuration from cinder.volume import driver from cinder.volume import group_types from cinder.volume import throttling from cinder.volume import volume_types CONF = cfg.CONF LOG = logging.getLogger(__name__) GB: int = units.Gi # These attributes we will attempt to save for the volume if they exist # in the source image metadata. IMAGE_ATTRIBUTES = ( 'checksum', 'container_format', 'disk_format', 'min_disk', 'min_ram', 'size', ) VALID_TRACE_FLAGS = {'method', 'api'} TRACE_API = False TRACE_METHOD = False def null_safe_str(s: Optional[str]) -> str: return str(s) if s else '' def _usage_from_volume(context: context.RequestContext, volume_ref: 'objects.Volume', **kw) -> dict: now = timeutils.utcnow() launched_at = volume_ref['launched_at'] or now created_at = volume_ref['created_at'] or now volume_status = volume_ref['status'] if volume_status == 'error_managing_deleting': volume_status = 'deleting' usage_info = dict( tenant_id=volume_ref['project_id'], host=volume_ref['host'], user_id=volume_ref['user_id'], availability_zone=volume_ref['availability_zone'], volume_id=volume_ref['id'], volume_type=volume_ref['volume_type_id'], display_name=volume_ref['display_name'], launched_at=launched_at.isoformat(), created_at=created_at.isoformat(), status=volume_status, snapshot_id=volume_ref['snapshot_id'], size=volume_ref['size'], replication_status=volume_ref['replication_status'], replication_extended_status=volume_ref['replication_extended_status'], replication_driver_data=volume_ref['replication_driver_data'], metadata=volume_ref.get('volume_metadata'),) usage_info.update(kw) try: attachments = db.volume_attachment_get_all_by_volume_id( context, volume_ref['id']) usage_info['volume_attachment'] = attachments glance_meta = db.volume_glance_metadata_get(context, volume_ref['id']) if glance_meta: usage_info['glance_metadata'] = glance_meta except exception.GlanceMetadataNotFound: pass except exception.VolumeNotFound: LOG.debug("Can not find volume %s at notify usage", volume_ref['id']) return usage_info def _usage_from_backup(backup: 'objects.Backup', **kw) -> dict: num_dependent_backups = backup.num_dependent_backups usage_info = dict(tenant_id=backup.project_id, user_id=backup.user_id, availability_zone=backup.availability_zone, backup_id=backup.id, host=backup.host, display_name=backup.display_name, created_at=backup.created_at.isoformat(), status=backup.status, volume_id=backup.volume_id, size=backup.size, service_metadata=backup.service_metadata, service=backup.service, fail_reason=backup.fail_reason, parent_id=backup.parent_id, num_dependent_backups=num_dependent_backups, snapshot_id=backup.snapshot_id, ) usage_info.update(kw) return usage_info @utils.if_notifications_enabled def notify_about_volume_usage(context: context.RequestContext, volume: 'objects.Volume', event_suffix: str, extra_usage_info: Optional[dict] = None, host: Optional[str] = None) -> None: if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_volume(context, volume, **extra_usage_info) rpc.get_notifier("volume", host).info(context, 'volume.%s' % event_suffix, usage_info) @utils.if_notifications_enabled def notify_about_backup_usage(context: context.RequestContext, backup: 'objects.Backup', event_suffix: str, extra_usage_info: Optional[dict] = None, host: Optional[str] = None) -> None: if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_backup(backup, **extra_usage_info) rpc.get_notifier("backup", host).info(context, 'backup.%s' % event_suffix, usage_info) def _usage_from_snapshot(snapshot: 'objects.Snapshot', context: context.RequestContext, **extra_usage_info) -> dict: # (niedbalski) a snapshot might be related to a deleted # volume, if that's the case, the volume information is still # required for filling the usage_info, so we enforce to read # the volume data even if the volume has been deleted. context.read_deleted = "yes" volume = db.volume_get(context, snapshot.volume_id) usage_info = { 'tenant_id': snapshot.project_id, 'user_id': snapshot.user_id, 'availability_zone': volume['availability_zone'], 'volume_id': snapshot.volume_id, 'volume_size': snapshot.volume_size, 'snapshot_id': snapshot.id, 'display_name': snapshot.display_name, 'created_at': snapshot.created_at.isoformat(), 'status': snapshot.status, 'deleted': null_safe_str(snapshot.deleted), 'metadata': null_safe_str(snapshot.metadata), } usage_info.update(extra_usage_info) return usage_info @utils.if_notifications_enabled def notify_about_snapshot_usage(context: context.RequestContext, snapshot: 'objects.Snapshot', event_suffix: str, extra_usage_info: Optional[dict] = None, host: Optional[str] = None) -> None: if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_snapshot(snapshot, context, **extra_usage_info) rpc.get_notifier('snapshot', host).info(context, 'snapshot.%s' % event_suffix, usage_info) def _usage_from_capacity(capacity: dict[str, Any], **extra_usage_info) -> dict[str, Any]: capacity_info = { 'name_to_id': capacity['name_to_id'], 'total': capacity['total'], 'free': capacity['free'], 'allocated': capacity['allocated'], 'provisioned': capacity['provisioned'], 'virtual_free': capacity['virtual_free'], 'reported_at': capacity['reported_at'] } capacity_info.update(extra_usage_info) return capacity_info @utils.if_notifications_enabled def notify_about_capacity_usage(context: context.RequestContext, capacity: dict, suffix: str, extra_usage_info: Optional[dict] = None, host: Optional[str] = None) -> None: if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_capacity(capacity, **extra_usage_info) rpc.get_notifier('capacity', host).info(context, 'capacity.%s' % suffix, usage_info) def _usage_from_consistencygroup(group_ref: 'objects.Group', **kw) -> dict: usage_info = dict(tenant_id=group_ref.project_id, user_id=group_ref.user_id, availability_zone=group_ref.availability_zone, consistencygroup_id=group_ref.id, name=group_ref.name, created_at=group_ref.created_at.isoformat(), status=group_ref.status) usage_info.update(kw) return usage_info @utils.if_notifications_enabled def notify_about_consistencygroup_usage( context: context.RequestContext, group: 'objects.Group', event_suffix: str, extra_usage_info: Optional[dict] = None, host: Optional[str] = None) -> None: if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_consistencygroup(group, **extra_usage_info) rpc.get_notifier("consistencygroup", host).info( context, 'consistencygroup.%s' % event_suffix, usage_info) def _usage_from_group(group_ref: 'objects.Group', **kw) -> dict: usage_info = dict(tenant_id=group_ref.project_id, user_id=group_ref.user_id, availability_zone=group_ref.availability_zone, group_id=group_ref.id, group_type=group_ref.group_type_id, name=group_ref.name, created_at=group_ref.created_at.isoformat(), status=group_ref.status) usage_info.update(kw) return usage_info @utils.if_notifications_enabled def notify_about_group_usage(context: context.RequestContext, group: 'objects.Group', event_suffix: str, extra_usage_info: Optional[dict] = None, host: Optional[str] = None) -> None: if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_group(group, **extra_usage_info) rpc.get_notifier("group", host).info( context, 'group.%s' % event_suffix, usage_info) def _usage_from_cgsnapshot(cgsnapshot: 'objects.CGSnapshot', **kw) -> dict: usage_info = dict( tenant_id=cgsnapshot.project_id, user_id=cgsnapshot.user_id, cgsnapshot_id=cgsnapshot.id, name=cgsnapshot.name, consistencygroup_id=cgsnapshot.consistencygroup_id, created_at=cgsnapshot.created_at.isoformat(), status=cgsnapshot.status) usage_info.update(kw) return usage_info def _usage_from_group_snapshot(group_snapshot: 'objects.GroupSnapshot', **kw) -> dict: usage_info = dict( tenant_id=group_snapshot.project_id, user_id=group_snapshot.user_id, group_snapshot_id=group_snapshot.id, name=group_snapshot.name, group_id=group_snapshot.group_id, group_type=group_snapshot.group_type_id, created_at=group_snapshot.created_at.isoformat(), status=group_snapshot.status) usage_info.update(kw) return usage_info @utils.if_notifications_enabled def notify_about_cgsnapshot_usage(context: context.RequestContext, cgsnapshot: 'objects.CGSnapshot', event_suffix: str, extra_usage_info: Optional[dict] = None, host: Optional[str] = None) -> None: if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_cgsnapshot(cgsnapshot, **extra_usage_info) rpc.get_notifier("cgsnapshot", host).info( context, 'cgsnapshot.%s' % event_suffix, usage_info) @utils.if_notifications_enabled def notify_about_group_snapshot_usage(context: context.RequestContext, group_snapshot: 'objects.GroupSnapshot', event_suffix: str, extra_usage_info=None, host: Optional[str] = None) -> None: if not host: host = CONF.host if not extra_usage_info: extra_usage_info = {} usage_info = _usage_from_group_snapshot(group_snapshot, **extra_usage_info) rpc.get_notifier("group_snapshot", host).info( context, 'group_snapshot.%s' % event_suffix, usage_info) def _check_blocksize(blocksize: Union[str, int]) -> Union[str, int]: # Check if volume_dd_blocksize is valid try: # Rule out zero-sized/negative/float dd blocksize which # cannot be caught by strutils if (blocksize.startswith(('-', '0')) or # type: ignore '.' in blocksize): # type: ignore raise ValueError strutils.string_to_bytes('%sB' % blocksize) except ValueError: LOG.warning("Incorrect value error: %(blocksize)s, " "it may indicate that \'volume_dd_blocksize\' " "was configured incorrectly. Fall back to default.", {'blocksize': blocksize}) # Fall back to default blocksize CONF.clear_override('volume_dd_blocksize') blocksize = CONF.volume_dd_blocksize return blocksize def check_for_odirect_support(src: str, dest: str, flag: str = 'oflag=direct') -> bool: # Check whether O_DIRECT is supported try: # iflag=direct and if=/dev/zero combination does not work # error: dd: failed to open '/dev/zero': Invalid argument if (src == '/dev/zero' and flag == 'iflag=direct'): return False else: utils.execute('dd', 'count=0', 'if=%s' % src, 'of=%s' % dest, flag, run_as_root=True) return True except processutils.ProcessExecutionError: return False def _copy_volume_with_path(prefix, srcstr: str, deststr: str, size_in_m: int, blocksize: Union[str, int], sync: bool = False, execute: Callable = utils.execute, ionice=None, sparse: bool = False) -> None: cmd = prefix[:] if ionice: cmd.extend(('ionice', ionice)) blocksize = _check_blocksize(blocksize) size_in_bytes = size_in_m * units.Mi cmd.extend(('dd', 'if=%s' % srcstr, 'of=%s' % deststr, 'count=%d' % size_in_bytes, 'bs=%s' % blocksize)) # Use O_DIRECT to avoid thrashing the system buffer cache odirect = check_for_odirect_support(srcstr, deststr, 'iflag=direct') cmd.append('iflag=count_bytes,direct' if odirect else 'iflag=count_bytes') if check_for_odirect_support(srcstr, deststr, 'oflag=direct'): cmd.append('oflag=direct') odirect = True # If the volume is being unprovisioned then # request the data is persisted before returning, # so that it's not discarded from the cache. conv = [] if sync and not odirect: conv.append('fdatasync') if sparse: conv.append('sparse') if conv: conv_options = 'conv=' + ",".join(conv) cmd.append(conv_options) # Perform the copy start_time = timeutils.utcnow() execute(*cmd, run_as_root=True) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) # NOTE(jdg): use a default of 1, mostly for unit test, but in # some incredible event this is 0 (cirros image?) don't barf if duration < 1: duration = 1 mbps = (size_in_m / duration) LOG.debug("Volume copy details: src %(src)s, dest %(dest)s, " "size %(sz).2f MB, duration %(duration).2f sec", {"src": srcstr, "dest": deststr, "sz": size_in_m, "duration": duration}) LOG.info("Volume copy %(size_in_m).2f MB at %(mbps).2f MB/s", {'size_in_m': size_in_m, 'mbps': mbps}) def _open_volume_with_path(path: str, mode: str) -> IO[Any]: try: with utils.temporary_chown(path): handle = open(path, mode) return handle except Exception: LOG.error("Failed to open volume from %(path)s.", {'path': path}) raise def _transfer_data(src: IO, dest: IO, length: int, chunk_size: int) -> None: """Transfer data between files (Python IO objects).""" chunks = int(math.ceil(length / chunk_size)) remaining_length = length LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred.", {'chunks': chunks, 'bytes': chunk_size}) for chunk in range(0, chunks): before = time.time() data = tpool.execute(src.read, min(chunk_size, remaining_length)) # If we have reached end of source, discard any extraneous bytes from # destination volume if trim is enabled and stop writing. if data == b'': break tpool.execute(dest.write, data) remaining_length -= len(data) delta = (time.time() - before) rate = (chunk_size / delta) / units.Ki LOG.debug("Transferred chunk %(chunk)s of %(chunks)s (%(rate)dK/s).", {'chunk': chunk + 1, 'chunks': chunks, 'rate': rate}) # yield to any other pending operations eventlet.sleep(0) tpool.execute(dest.flush) def _copy_volume_with_file(src: Union[str, IO], dest: Union[str, IO], size_in_m: int) -> None: src_handle = src if isinstance(src, str): src_handle = _open_volume_with_path(src, 'rb') src_handle = typing.cast(IO, src_handle) dest_handle = dest if isinstance(dest, str): dest_handle = _open_volume_with_path(dest, 'wb') dest_handle = typing.cast(IO, dest_handle) if not src_handle: raise exception.DeviceUnavailable( _("Failed to copy volume, source device unavailable.")) if not dest_handle: raise exception.DeviceUnavailable( _("Failed to copy volume, destination device unavailable.")) start_time = timeutils.utcnow() _transfer_data(src_handle, dest_handle, size_in_m * units.Mi, units.Mi * 4) duration = max(1, timeutils.delta_seconds(start_time, timeutils.utcnow())) if isinstance(src, str): src_handle.close() if isinstance(dest, str): dest_handle.close() mbps = (size_in_m / duration) LOG.info("Volume copy completed (%(size_in_m).2f MB at " "%(mbps).2f MB/s).", {'size_in_m': size_in_m, 'mbps': mbps}) def copy_volume(src: Union[str, BinaryIO], dest: Union[str, BinaryIO], size_in_m: int, blocksize: Union[str, int], sync=False, execute=utils.execute, ionice=None, throttle=None, sparse=False) -> None: """Copy data from the source volume to the destination volume. The parameters 'src' and 'dest' are both typically of type str, which represents the path to each volume on the filesystem. Connectors can optionally return a volume handle of type RawIOBase for volumes that are not available on the local filesystem for open/close operations. If either 'src' or 'dest' are not of type str, then they are assumed to be of type RawIOBase or any derivative that supports file operations such as read and write. In this case, the handles are treated as file handles instead of file paths and, at present moment, throttling is unavailable. """ if (isinstance(src, str) and isinstance(dest, str)): if not throttle: throttle = throttling.Throttle.get_default() with throttle.subcommand(src, dest) as throttle_cmd: _copy_volume_with_path(throttle_cmd['prefix'], src, dest, size_in_m, blocksize, sync=sync, execute=execute, ionice=ionice, sparse=sparse) else: _copy_volume_with_file(src, dest, size_in_m) def clear_volume(volume_size: int, volume_path: str, volume_clear: Optional[str] = None, volume_clear_size: Optional[int] = None, volume_clear_ionice: Optional[str] = None, throttle=None) -> None: """Unprovision old volumes to prevent data leaking between users.""" if volume_clear is None: volume_clear = CONF.volume_clear if volume_clear_size is None: volume_clear_size = CONF.volume_clear_size if volume_clear_size == 0: volume_clear_size = volume_size if volume_clear_ionice is None: volume_clear_ionice = CONF.volume_clear_ionice LOG.info("Performing secure delete on volume: %s", volume_path) # We pass sparse=False explicitly here so that zero blocks are not # skipped in order to clear the volume. if volume_clear == 'zero': return copy_volume('/dev/zero', volume_path, volume_clear_size, CONF.volume_dd_blocksize, sync=True, execute=utils.execute, ionice=volume_clear_ionice, throttle=throttle, sparse=False) else: raise exception.InvalidConfigurationValue( option='volume_clear', value=volume_clear) def supports_thin_provisioning() -> bool: return brick_lvm.LVM.supports_thin_provisioning( utils.get_root_helper()) def get_all_physical_volumes(vg_name=None) -> list: return brick_lvm.LVM.get_all_physical_volumes( utils.get_root_helper(), vg_name) def get_all_volume_groups(vg_name=None) -> list: return brick_lvm.LVM.get_all_volume_groups( utils.get_root_helper(), vg_name) def extract_availability_zones_from_volume_type( volume_type: Union['objects.VolumeType', dict]) \ -> Optional[list[str]]: if not volume_type: return None extra_specs = volume_type.get('extra_specs', {}) if 'RESKEY:availability_zones' not in extra_specs: return None azs = extra_specs.get('RESKEY:availability_zones', '').split(',') return [az.strip() for az in azs if az != ''] # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1 'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O 'abcdefghijkmnopqrstuvwxyz') # Removed: l def generate_password( length: int = 16, symbolgroups: tuple[str, ...] = DEFAULT_PASSWORD_SYMBOLS) -> str: """Generate a random password from the supplied symbol groups. At least one symbol from each group will be included. Unpredictable results if length is less than the number of symbol groups. Believed to be reasonably secure (with a reasonable password length!) """ # NOTE(jerdfelt): Some password policies require at least one character # from each group of symbols, so start off with one random character # from each symbol group bytes = 1 # Number of random bytes to generate for each choice password = [s[ord(urandom(bytes)) % len(s)] for s in symbolgroups] # If length < len(symbolgroups), the leading characters will only # be from the first length groups. Try our best to not be predictable # by shuffling and then truncating. shuffle(password) password = password[:length] length -= len(password) # then fill with random characters from all symbol groups symbols = ''.join(symbolgroups) password.extend( [symbols[ord(urandom(bytes)) % len(symbols)] for _i in range(length)]) # finally shuffle to ensure first x characters aren't from a # predictable group shuffle(password) return ''.join(password) def generate_username( length: int = 20, symbolgroups: tuple[str, ...] = DEFAULT_PASSWORD_SYMBOLS) -> str: # Use the same implementation as the password generation. return generate_password(length, symbolgroups) DEFAULT_POOL_NAME = '_pool0' def extract_host(host: Optional[str], level: str = 'backend', default_pool_name: bool = False) -> Optional[str]: """Extract Host, Backend or Pool information from host string. :param host: String for host, which could include host@backend#pool info :param level: Indicate which level of information should be extracted from host string. Level can be 'host', 'backend' or 'pool', default value is 'backend' :param default_pool_name: this flag specify what to do if level == 'pool' and there is no 'pool' info encoded in host string. default_pool_name=True will return DEFAULT_POOL_NAME, otherwise we return None. Default value of this parameter is False. :return: expected information, string or None :raises: exception.InvalidVolume For example: host = 'HostA@BackendB#PoolC' ret = extract_host(host, 'host') # ret is 'HostA' ret = extract_host(host, 'backend') # ret is 'HostA@BackendB' ret = extract_host(host, 'pool') # ret is 'PoolC' host = 'HostX@BackendY' ret = extract_host(host, 'pool') # ret is None ret = extract_host(host, 'pool', True) # ret is '_pool0' """ if host is None: msg = _("volume is not assigned to a host") raise exception.InvalidVolume(reason=msg) if level == 'host': # make sure pool is not included hst = host.split('#')[0] return hst.split('@')[0] elif level == 'backend': return host.split('#')[0] elif level == 'pool': lst = host.split('#') if len(lst) == 2: return lst[1] elif default_pool_name is True: return DEFAULT_POOL_NAME else: return None return None # not hit def append_host(host: Optional[str], pool: Optional[str]) -> Optional[str]: """Encode pool into host info.""" if not host or not pool: return host new_host = "#".join([host, pool]) return new_host def matching_backend_name(src_volume_type, volume_type) -> bool: if src_volume_type.get('volume_backend_name') and \ volume_type.get('volume_backend_name'): return src_volume_type.get('volume_backend_name') == \ volume_type.get('volume_backend_name') else: return False def hosts_are_equivalent(host_1: str, host_2: str) -> bool: # In case host_1 or host_2 are None if not (host_1 and host_2): return host_1 == host_2 return extract_host(host_1) == extract_host(host_2) def extract_id_from_volume_name(vol_name: str) -> Optional[str]: regex: typing.Pattern = re.compile( CONF.volume_name_template.replace('%s', r'(?P.+)')) match = regex.match(vol_name) return match.group('uuid') if match else None def check_already_managed_volume(vol_id: Optional[str]): """Check cinder db for already managed volume. :param vol_id: volume id parameter :returns: bool -- return True, if db entry with specified volume id exists, otherwise return False :raises: ValueError if vol_id is not a valid uuid string """ try: return (vol_id and isinstance(vol_id, str) and uuid.UUID(vol_id, version=4) and objects.Volume.exists(context.get_admin_context(), vol_id)) except ValueError: return False def extract_id_from_snapshot_name(snap_name: str) -> Optional[str]: """Return a snapshot's ID from its name on the backend.""" regex = re.compile( CONF.snapshot_name_template.replace('%s', r'(?P.+)')) match = regex.match(snap_name) return match.group('uuid') if match else None def paginate_entries_list(entries: list[dict], marker: Optional[Union[dict, str]], limit: int, offset: Optional[int], sort_keys: list[str], sort_dirs: list[str]) -> list: """Paginate a list of entries. :param entries: list of dictionaries :marker: The last element previously returned :limit: The maximum number of items to return :offset: The number of items to skip from the marker or from the first element. :sort_keys: A list of keys in the dictionaries to sort by :sort_dirs: A list of sort directions, where each is either 'asc' or 'dec' """ comparers = [(operator.itemgetter(key.strip()), multiplier) for (key, multiplier) in zip(sort_keys, sort_dirs)] def comparer(left, right) -> int: for fn, d in comparers: left_val = fn(left) right_val = fn(right) if isinstance(left_val, dict): left_val = sorted(left_val.values())[0] if isinstance(right_val, dict): right_val = sorted(right_val.values())[0] if left_val == right_val: continue if d == 'asc': return -1 if left_val < right_val else 1 else: return -1 if left_val > right_val else 1 else: return 0 sorted_entries = sorted(entries, key=functools.cmp_to_key(comparer)) start_index = 0 if offset is None: offset = 0 if marker: if not isinstance(marker, dict): try: marker = json.loads(marker) except ValueError: msg = _('marker %s can not be analysed, please use json like ' 'format') % marker raise exception.InvalidInput(reason=msg) start_index = -1 for i, entry in enumerate(sorted_entries): if entry['reference'] == marker: start_index = i + 1 break if start_index < 0: msg = _('marker not found: %s') % marker raise exception.InvalidInput(reason=msg) range_end = start_index + limit return sorted_entries[start_index + offset:range_end + offset] def convert_config_string_to_dict(config_string: str) -> dict: """Convert config file replication string to a dict. The only supported form is as follows: "{'key-1'='val-1' 'key-2'='val-2'...}" :param config_string: Properly formatted string to convert to dict. :response: dict of string values """ resultant_dict = {} try: st = config_string.replace("=", ":") st = st.replace(" ", ", ") resultant_dict = ast.literal_eval(st) except Exception: LOG.warning("Error encountered translating config_string: " "%(config_string)s to dict", {'config_string': config_string}) return resultant_dict def create_encryption_key(context: context.RequestContext, key_manager, volume_type_id: str) -> Optional[str]: encryption_key_id = None if volume_types.is_encrypted(context, volume_type_id): volume_type_encryption: db.sqlalchemy.models.Encryption = ( volume_types.get_volume_type_encryption(context, volume_type_id)) if volume_type_encryption is None: raise exception.Invalid(message="Volume type error") cipher = volume_type_encryption.cipher length = volume_type_encryption.key_size algorithm = cipher.split('-')[0] if cipher else None if algorithm is None: raise exception.InvalidVolumeType( message="Invalid encryption spec") try: encryption_key_id = key_manager.create_key( context, algorithm=algorithm, length=length) except castellan_exception.KeyManagerError: # The messaging back to the client here is # purposefully terse, so we don't leak any sensitive # details. LOG.exception("Key manager error") raise exception.Invalid(message="Key manager error") typing.cast(str, encryption_key_id) return encryption_key_id def delete_encryption_key(context: context.RequestContext, key_manager, encryption_key_id: str) -> None: try: key_manager.delete(context, encryption_key_id) except castellan_exception.ManagedObjectNotFoundError: pass except castellan_exception.KeyManagerError: LOG.info("First attempt to delete key id %s failed, retrying with " "cinder's service context.", encryption_key_id) conf = CONF ks_loading.register_auth_conf_options(conf, 'keystone_authtoken') ks_loading.load_auth_from_conf_options(conf, 'keystone_authtoken') service_context = keystone_password.KeystonePassword( password=conf.keystone_authtoken.password, auth_url=conf.keystone_authtoken.auth_url, username=conf.keystone_authtoken.username, user_domain_name=conf.keystone_authtoken.user_domain_name, project_name=conf.keystone_authtoken.project_name, project_domain_name=conf.keystone_authtoken.project_domain_name) try: castellan_key_manager.API(conf).delete(service_context, encryption_key_id) except castellan_exception.ManagedObjectNotFoundError: pass def clone_encryption_key(context: context.RequestContext, key_manager, encryption_key_id: str) -> str: clone_key_id = None if encryption_key_id is not None: clone_key_id = key_manager.store( context, key_manager.get(context, encryption_key_id)) return clone_key_id def is_boolean_str(str: Optional[str]) -> bool: spec = (str or '').split() return (len(spec) == 2 and spec[0] == '' and strutils.bool_from_string(spec[1])) def is_replicated_spec(extra_specs: dict) -> bool: return (bool(extra_specs) and is_boolean_str(extra_specs.get('replication_enabled'))) def is_multiattach_spec(extra_specs: dict) -> bool: return (bool(extra_specs) and is_boolean_str(extra_specs.get('multiattach'))) def group_get_by_id(group_id): ctxt = context.get_admin_context() group = db.group_get(ctxt, group_id) return group def is_group_a_cg_snapshot_type(group_or_snap) -> bool: LOG.debug("Checking if %s is a consistent snapshot group", group_or_snap) if group_or_snap["group_type_id"] is not None: spec = group_types.get_group_type_specs( group_or_snap["group_type_id"], key="consistent_group_snapshot_enabled" ) return spec == " True" return False def is_group_a_type(group: 'objects.Group', key: str) -> bool: if group.group_type_id is not None: spec = group_types.get_group_type_specs( group.group_type_id, key=key ) return spec == " True" return False def get_max_over_subscription_ratio( str_value: Union[str, float], supports_auto: bool = False) -> Union[str, float]: """Get the max_over_subscription_ratio from a string As some drivers need to do some calculations with the value and we are now receiving a string value in the conf, this converts the value to float when appropriate. :param str_value: Configuration object :param supports_auto: Tell if the calling driver supports auto MOSR. :response: value of mosr """ if not supports_auto and str_value == "auto": msg = _("This driver does not support automatic " "max_over_subscription_ratio calculation. Please use a " "valid float value.") LOG.error(msg) raise exception.VolumeDriverException(message=msg) if str_value == 'auto': str_value = typing.cast(str, str_value) return str_value mosr = float(str_value) if mosr < 1: msg = _("The value of max_over_subscription_ratio must be " "greater than 1.") LOG.error(msg) raise exception.InvalidParameterValue(message=msg) return mosr def check_image_metadata(image_meta: dict[str, Union[str, int]], vol_size: int) -> None: """Validates the image metadata.""" # Check whether image is active if image_meta['status'] != 'active': msg = _('Image %(image_id)s is not active.' ) % {'image_id': image_meta['id']} raise exception.InvalidInput(reason=msg) # Check image size is not larger than volume size. image_size = utils.as_int(image_meta['size'], quiet=False) image_size_in_gb = (image_size + GB - 1) // GB if image_size_in_gb > vol_size: msg = _('Size of specified image %(image_size)sGB' ' is larger than volume size %(volume_size)sGB.') msg = msg % {'image_size': image_size_in_gb, 'volume_size': vol_size} raise exception.InvalidInput(reason=msg) # Check image min_disk requirement is met for the particular volume min_disk = image_meta.get('min_disk', 0) min_disk = typing.cast(int, min_disk) if vol_size < min_disk: msg = _('Volume size %(volume_size)sGB cannot be smaller' ' than the image minDisk size %(min_disk)sGB.') msg = msg % {'volume_size': vol_size, 'min_disk': min_disk} raise exception.InvalidInput(reason=msg) # Check if virtual size is not greater than volume size virtual_size = image_meta.get('virtual_size') if virtual_size: virtual_size = utils.as_int(virtual_size) image_id = image_meta['id'] image_id = typing.cast(str, image_id) image_utils.check_virtual_size(virtual_size, vol_size, image_id) def enable_bootable_flag(volume: 'objects.Volume') -> None: try: LOG.debug('Marking volume %s as bootable.', volume.id) volume.bootable = True volume.save() except exception.CinderException as ex: LOG.exception("Failed updating volume %(volume_id)s bootable " "flag to true", {'volume_id': volume.id}) raise exception.MetadataUpdateFailure(reason=ex) def get_volume_image_metadata(image_id: str, image_meta: dict[str, Any]) -> dict: # Save some base attributes into the volume metadata base_metadata = { 'image_id': image_id, } name = image_meta.get('name', None) if name: base_metadata['image_name'] = name # Save some more attributes into the volume metadata from the image # metadata for key in IMAGE_ATTRIBUTES: if key not in image_meta: continue value = image_meta.get(key, None) if value is not None: base_metadata[key] = value # Save all the image metadata properties into the volume metadata property_metadata = {} image_properties = image_meta.get('properties', {}) image_properties = typing.cast(dict, image_properties) for (key, value) in image_properties.items(): if value is not None: property_metadata[key] = value volume_metadata = dict(property_metadata) volume_metadata.update(base_metadata) return volume_metadata def copy_image_to_volume(driver, context: context.RequestContext, volume: 'objects.Volume', image_meta: dict, image_location: Union[str, tuple[Optional[str], Any]], image_service, disable_sparse: bool = False) -> None: """Downloads Glance image to the specified volume.""" image_id = image_meta['id'] LOG.debug("Attempting download of %(image_id)s (%(image_location)s)" " to volume %(volume_id)s.", {'image_id': image_id, 'volume_id': volume.id, 'image_location': image_location}) try: image_encryption_key = image_meta.get('cinder_encryption_key_id') if volume.encryption_key_id and image_encryption_key: # If the image provided an encryption key, we have # already cloned it to the volume's key in # _get_encryption_key_id, so we can do a direct copy. driver.copy_image_to_volume( context, volume, image_service, image_id, disable_sparse=disable_sparse) elif volume.encryption_key_id: # Creating an encrypted volume from a normal, unencrypted, # image. driver.copy_image_to_encrypted_volume( context, volume, image_service, image_id, disable_sparse=disable_sparse) else: driver.copy_image_to_volume( context, volume, image_service, image_id, disable_sparse=disable_sparse) except processutils.ProcessExecutionError as ex: LOG.exception("Failed to copy image %(image_id)s to volume: " "%(volume_id)s", {'volume_id': volume.id, 'image_id': image_id}) raise exception.ImageCopyFailure(reason=ex.stderr) except (exception.ImageUnacceptable, exception.ImageTooBig, exception.ImageConversionNotAllowed): with excutils.save_and_reraise_exception(): LOG.exception("Failed to copy image %(image_id)s to volume: " "%(volume_id)s", {'volume_id': volume.id, 'image_id': image_id}) except Exception as ex: LOG.exception("Failed to copy image %(image_id)s to " "volume: %(volume_id)s", {'volume_id': volume.id, 'image_id': image_id}) if not isinstance(ex, exception.ImageCopyFailure): raise exception.ImageCopyFailure(reason=ex) else: raise LOG.debug("Downloaded image %(image_id)s (%(image_location)s)" " to volume %(volume_id)s successfully.", {'image_id': image_id, 'volume_id': volume.id, 'image_location': image_location}) def image_conversion_dir() -> str: tmpdir = (CONF.image_conversion_dir or tempfile.gettempdir()) # ensure temporary directory exists if not os.path.exists(tmpdir): os.makedirs(tmpdir) return tmpdir def check_encryption_provider( volume: 'objects.Volume', context: context.RequestContext, ) -> dict: """Check that this is a LUKS encryption provider. :returns: encryption dict """ encryption = db.volume_encryption_metadata_get(context, volume.id) if 'provider' not in encryption: message = _("Invalid encryption spec.") raise exception.VolumeDriverException(message=message) provider = encryption['provider'] if provider in encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP: provider = encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP[provider] encryption['provider'] = provider if provider != encryptors.LUKS: message = _("Provider %s not supported.") % provider raise exception.VolumeDriverException(message=message) if 'cipher' not in encryption or 'key_size' not in encryption: msg = _('encryption spec must contain "cipher" and ' '"key_size"') raise exception.VolumeDriverException(message=msg) return encryption def sanitize_host(host: str) -> str: """Ensure IPv6 addresses are enclosed in [] for iSCSI portals.""" if netutils.is_valid_ipv6(host): return '[%s]' % host return host def sanitize_hostname(hostname) -> str: """Return a hostname which conforms to RFC-952 and RFC-1123 specs.""" hostname = hostname.encode('latin-1', 'ignore') hostname = hostname.decode('latin-1') hostname = re.sub(r'[ _]', '-', hostname) hostname = re.sub(r'[^\w.-]+', '', hostname) hostname = hostname.lower() hostname = hostname.strip('.-') return hostname def resolve_hostname(hostname: str) -> str: """Resolves host name to IP address. Resolves a host name (my.data.point.com) to an IP address (10.12.143.11). This routine also works if the data passed in hostname is already an IP. In this case, the same IP address will be returned. :param hostname: Host name to resolve. :returns: IP Address for Host name. """ ip = socket.getaddrinfo(hostname, None)[0][4][0] LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.', {'host': hostname, 'ip': ip}) return str(ip) def update_backup_error(backup, err: str, status=fields.BackupStatus.ERROR) -> None: backup.status = status backup.fail_reason = err backup.save() # TODO (whoami-rajat): Remove this method when oslo.vmware calls volume_utils # wrapper of upload_volume instead of image_utils.upload_volume def get_base_image_ref(volume: 'objects.Volume'): # This method fetches the image_id from volume glance metadata and pass # it to the driver calling it during upload volume to image operation base_image_ref = None if volume.glance_metadata: base_image_ref = volume.glance_metadata.get('image_id') return base_image_ref def upload_volume(context: context.RequestContext, image_service, image_meta, volume_path, volume: 'objects.Volume', volume_format: str = 'raw', run_as_root: bool = True, compress: bool = True, volume_fd = None) -> None: # retrieve store information from extra-specs store_id = volume.volume_type.extra_specs.get('image_service:store_id') # This fetches the image_id from volume glance metadata and pass # it to the driver calling it during upload volume to image operation base_image_ref = None if volume.glance_metadata: base_image_ref = volume.glance_metadata.get('image_id') image_utils.upload_volume(context, image_service, image_meta, volume_path, volume_format=volume_format, run_as_root=run_as_root, compress=compress, store_id=store_id, base_image_ref=base_image_ref, volume_fd=volume_fd) def get_backend_configuration(backend_name, backend_opts=None): """Get a configuration object for a specific backend.""" config_stanzas = CONF.list_all_sections() if backend_name not in config_stanzas: msg = _("Could not find backend stanza %(backend_name)s in " "configuration. Available stanzas are %(stanzas)s") params = { "stanzas": config_stanzas, "backend_name": backend_name, } raise exception.ConfigNotFound(message=msg % params) config = configuration.Configuration(driver.volume_opts, config_group=backend_name) if backend_opts: config.append_config_values(backend_opts) return config def brick_get_connector_properties(multipath: bool = False, enforce_multipath: bool = False): """Wrapper to automatically set root_helper in brick calls. :param multipath: A boolean indicating whether the connector can support multipath. :param enforce_multipath: If True, it raises exception when multipath=True is specified but multipathd is not running. If False, it falls back to multipath=False when multipathd is not running. """ root_helper = utils.get_root_helper() return connector.get_connector_properties(root_helper, CONF.my_ip, multipath, enforce_multipath) def brick_get_connector(protocol: str, driver=None, use_multipath: bool = False, device_scan_attempts: int = 3, *args, **kwargs): """Wrapper to get a brick connector object. This automatically populates the required protocol as well as the root_helper needed to execute commands. """ root_helper = utils.get_root_helper() return connector.InitiatorConnector.factory(protocol, root_helper, driver=driver, use_multipath=use_multipath, device_scan_attempts= device_scan_attempts, *args, **kwargs) def brick_get_encryptor(connection_info: dict, *args, **kwargs): """Wrapper to get a brick encryptor object.""" root_helper = utils.get_root_helper() km = castellan_key_manager.API(CONF) return encryptors.get_volume_encryptor(root_helper=root_helper, connection_info=connection_info, keymgr=km, *args, **kwargs) def brick_attach_volume_encryptor(context: context.RequestContext, attach_info: dict, encryption: dict) -> None: """Attach encryption layer.""" connection_info = attach_info['conn'] connection_info['data']['device_path'] = attach_info['device']['path'] encryptor = brick_get_encryptor(connection_info, **encryption) encryptor.attach_volume(context, **encryption) def brick_detach_volume_encryptor(attach_info: dict, encryption: dict) -> None: """Detach encryption layer.""" connection_info = attach_info['conn'] connection_info['data']['device_path'] = attach_info['device']['path'] encryptor = brick_get_encryptor(connection_info, **encryption) encryptor.detach_volume(**encryption) # NOTE: the trace methods are included in volume_utils because # they are currently only called by code in the volume area # of Cinder. These can be moved to a different file if they # are needed elsewhere. def trace(*dec_args, **dec_kwargs): """Trace calls to the decorated function. This decorator should always be defined as the outermost decorator so it is defined last. This is important so it does not interfere with other decorators. Using this decorator on a function will cause its execution to be logged at `DEBUG` level with arguments, return values, and exceptions. :returns: a function decorator """ def _decorator(f): func_name = f.__name__ @functools.wraps(f) def trace_logging_wrapper(*args, **kwargs): filter_function = dec_kwargs.get('filter_function') if len(args) > 0: maybe_self = args[0] else: maybe_self = kwargs.get('self', None) if maybe_self and hasattr(maybe_self, '__module__'): logger = logging.getLogger(maybe_self.__module__) else: logger = LOG # NOTE(ameade): Don't bother going any further if DEBUG log level # is not enabled for the logger. if not logger.isEnabledFor(py_logging.DEBUG): return f(*args, **kwargs) all_args = inspect.getcallargs(f, *args, **kwargs) pass_filter = filter_function is None or filter_function(all_args) if pass_filter: logger.debug('==> %(func)s: call %(all_args)r', {'func': func_name, 'all_args': strutils.mask_password( str(all_args))}) start_time = time.time() * 1000 try: result = f(*args, **kwargs) except Exception as exc: total_time = int(round(time.time() * 1000)) - start_time logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r', {'func': func_name, 'time': total_time, 'exc': exc}) raise total_time = int(round(time.time() * 1000)) - start_time if isinstance(result, dict): mask_result = strutils.mask_dict_password(result) elif isinstance(result, str): mask_result = strutils.mask_password(result) else: mask_result = result if pass_filter: logger.debug('<== %(func)s: return (%(time)dms) %(result)r', {'func': func_name, 'time': total_time, 'result': mask_result}) return result return trace_logging_wrapper if len(dec_args) == 0: # filter_function is passed and args does not contain f return _decorator else: # filter_function is not passed return _decorator(dec_args[0]) def trace_api(*dec_args, **dec_kwargs): """Decorates a function if TRACE_API is true.""" def _decorator(f): @functools.wraps(f) def trace_api_logging_wrapper(*args, **kwargs): if TRACE_API: return trace(f, *dec_args, **dec_kwargs)(*args, **kwargs) return f(*args, **kwargs) return trace_api_logging_wrapper if len(dec_args) == 0: # filter_function is passed and args does not contain f return _decorator else: # filter_function is not passed return _decorator(dec_args[0]) def trace_method(f): """Decorates a function if TRACE_METHOD is true.""" @functools.wraps(f) def trace_method_logging_wrapper(*args, **kwargs): if TRACE_METHOD: return trace(f)(*args, **kwargs) return f(*args, **kwargs) return trace_method_logging_wrapper class TraceWrapperMetaclass(type): """Metaclass that wraps all methods of a class with trace_method. This metaclass will cause every function inside of the class to be decorated with the trace_method decorator. To use the metaclass you define a class like so: class MyClass(object, metaclass=utils.TraceWrapperMetaclass): """ def __new__(meta, classname, bases, classDict): newClassDict = {} for attributeName, attribute in classDict.items(): if isinstance(attribute, types.FunctionType): # replace it with a wrapped version attribute = functools.update_wrapper(trace_method(attribute), attribute) newClassDict[attributeName] = attribute return type.__new__(meta, classname, bases, newClassDict) class TraceWrapperWithABCMetaclass(abc.ABCMeta, TraceWrapperMetaclass): """Metaclass that wraps all methods of a class with trace.""" pass def setup_tracing(trace_flags): """Set global variables for each trace flag. Sets variables TRACE_METHOD and TRACE_API, which represent whether to log methods or api traces. :param trace_flags: a list of strings """ global TRACE_METHOD global TRACE_API try: trace_flags = [flag.strip() for flag in trace_flags] except TypeError: # Handle when trace_flags is None or a test mock trace_flags = [] for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS): LOG.warning('Invalid trace flag: %s', invalid_flag) TRACE_METHOD = 'method' in trace_flags TRACE_API = 'api' in trace_flags def require_driver_initialized(driver): """Verifies if `driver` is initialized If the driver is not initialized, an exception will be raised. :params driver: The driver instance. :raises: `exception.DriverNotInitialized` """ # we can't do anything if the driver didn't init if not driver.initialized: driver_name = driver.__class__.__name__ LOG.error("Volume driver %s not initialized", driver_name) raise exception.DriverNotInitialized() else: log_unsupported_driver_warning(driver) def log_unsupported_driver_warning(driver): """Annoy the log about unsupported drivers.""" if not driver.supported: # Check to see if the driver is flagged as supported. LOG.warning("Volume driver (%(driver_name)s %(version)s) is " "currently unsupported and may be removed in the " "next release of OpenStack. Use at your own risk.", {'driver_name': driver.__class__.__name__, 'version': driver.get_version()}, resource={'type': 'driver', 'id': driver.__class__.__name__}) def is_all_zero(chunk: bytes) -> bool: """Return true if the chunk of bytes is all zeroes.""" return chunk == bytes(len(chunk)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4151213 cinder-27.0.0/cinder/wsgi/0000775000175000017500000000000000000000000015304 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/wsgi/__init__.py0000664000175000017500000000000000000000000017403 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/wsgi/api.py0000664000175000017500000000142300000000000016427 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """WSGI application entry-point for Cinder Volume API.""" import threading from cinder.wsgi import wsgi application = None lock = threading.Lock() with lock: if application is None: application = wsgi.initialize_application() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/wsgi/common.py0000664000175000017500000001147700000000000017160 0ustar00zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for working with WSGI servers.""" import webob.dec import webob.exc from cinder.i18n import _ class Request(webob.Request): pass class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [app:wadl] latest_version = 1.3 paste.app_factory = cinder.api.fancy_api:Wadl.factory which would result in a call to the `Wadl` class as import cinder.api.fancy_api fancy_api.Wadl(latest_version='1.3') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ return cls(**local_config) def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): # Any of the following objects work as responses: # Option 1: simple string res = 'message\n' # Option 2: a nicely formatted HTTP exception page res = exc.HTTPForbidden(explanation='Nice try') # Option 3: a webob Response object (in case you need to play with # headers, or you want to be treated like an iterable) res = Response(); res.app_iter = open('somefile') # Option 4: any wsgi app to be run next res = self.application # Option 5: you can get a Response object for a wsgi app, too, to # play with headers etc res = req.get_response(self.application) # You can then just return your response... return res # ... or set req.response and return None. req.response = res See the end of http://pythonpaste.org/webob/modules/dec.html for more info. """ raise NotImplementedError(_('You must implement __call__')) class Middleware(Application): """Base WSGI middleware. These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ @classmethod def factory(cls, global_config, **local_config): """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method as kwargs. A hypothetical configuration would look like: [filter:analytics] redis_host = 127.0.0.1 paste.filter_factory = cinder.api.analytics:Analytics.factory which would result in a call to the `Analytics` class as import cinder.api.analytics analytics.Analytics(app_from_paste, redis_host='127.0.0.1') You could of course re-implement the `factory` method in subclasses, but using the kwarg passing it shouldn't be necessary. """ def _factory(app): return cls(app, **local_config) return _factory def __init__(self, application): self.application = application def process_request(self, req): """Called on each request. If this returns None, the next application down the stack will be executed. If it returns a response then that response will be returned and execution will stop here. """ return None def process_response(self, response): """Do whatever you'd like to the response.""" return response @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): response = self.process_request(req) if response: return response response = req.get_response(self.application) return self.process_response(response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/wsgi/eventlet_server.py0000664000175000017500000000375000000000000021077 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Methods for working with eventlet WSGI servers.""" import socket from oslo_config import cfg from oslo_service import wsgi from oslo_utils import netutils socket_opts = [ cfg.BoolOpt('tcp_keepalive', default=True, help="Sets the value of TCP_KEEPALIVE (True/False) for each " "server socket."), cfg.IntOpt('tcp_keepalive_interval', help="Sets the value of TCP_KEEPINTVL in seconds for each " "server socket. Not supported on OS X."), cfg.IntOpt('tcp_keepalive_count', help="Sets the value of TCP_KEEPCNT for each " "server socket. Not supported on OS X."), ] CONF = cfg.CONF CONF.register_opts(socket_opts) class Server(wsgi.Server): """Server class to manage a WSGI server, serving a WSGI application.""" def _set_socket_opts(self, _socket): _socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # NOTE(praneshp): Call set_tcp_keepalive in oslo to set # tcp keepalive parameters. Sockets can hang around forever # without keepalive netutils.set_tcp_keepalive(_socket, self.conf.tcp_keepalive, self.conf.tcp_keepidle, self.conf.tcp_keepalive_count, self.conf.tcp_keepalive_interval) return _socket ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/wsgi/wsgi.py0000664000175000017500000000342300000000000016631 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Cinder OS API WSGI application.""" import sys import warnings warnings.simplefilter('once', DeprecationWarning) from oslo_config import cfg from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from oslo_service import wsgi from cinder import objects # noqa from cinder import i18n # noqa i18n.enable_lazy() # Need to register global_opts from cinder.common import config from cinder.common import constants from cinder import coordination from cinder import rpc from cinder import service from cinder import version CONF = cfg.CONF def initialize_application(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") config.set_middleware_defaults() # NOTE(amorin): Do not register signal handers because it does not work # in wsgi applications gmr.TextGuruMeditation.setup_autorun( version, conf=CONF, setup_signal=False) coordination.COORDINATOR.start() rpc.init(CONF) service.setup_profiler(constants.API_BINARY, CONF.host) return wsgi.Loader(CONF).load_app(name='osapi_volume') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4151213 cinder-27.0.0/cinder/zonemanager/0000775000175000017500000000000000000000000016641 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/__init__.py0000664000175000017500000000000000000000000020740 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4151213 cinder-27.0.0/cinder/zonemanager/drivers/0000775000175000017500000000000000000000000020317 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/__init__.py0000664000175000017500000000000000000000000022416 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4191215 cinder-27.0.0/cinder/zonemanager/drivers/brocade/0000775000175000017500000000000000000000000021716 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/__init__.py0000664000175000017500000000000000000000000024015 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/brcd_fabric_opts.py0000664000175000017500000000473600000000000025567 0ustar00zuulzuul00000000000000# (c) Copyright 2019 Brocade, a Broadcom Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from oslo_log import log as logging from cinder.volume import configuration brcd_zone_opts = [ cfg.StrOpt('fc_southbound_protocol', default='REST_HTTP', choices=('SSH', 'HTTP', 'HTTPS', 'REST_HTTP', 'REST_HTTPS'), help='South bound connector for the fabric.'), cfg.StrOpt('fc_fabric_address', default='', help='Management IP of fabric.'), cfg.StrOpt('fc_fabric_user', default='', help='Fabric user ID.'), cfg.StrOpt('fc_fabric_password', default='', help='Password for user.', secret=True), cfg.PortOpt('fc_fabric_port', default=22, help='Connecting port'), cfg.StrOpt('fc_fabric_ssh_cert_path', default='', help='Local SSH certificate Path.'), cfg.StrOpt('zoning_policy', default='initiator-target', help='Overridden zoning policy.'), cfg.BoolOpt('zone_activate', default=True, help='Overridden zoning activation state.'), cfg.StrOpt('zone_name_prefix', default='openstack', help='Overridden zone name prefix.'), cfg.StrOpt('fc_virtual_fabric_id', default=None, help='Virtual Fabric ID.') ] CONF = cfg.CONF CONF.register_opts(brcd_zone_opts, group='BRCD_FABRIC_EXAMPLE') LOG = logging.getLogger(__name__) def load_fabric_configurations(fabric_names): fabric_configs = {} for fabric_name in fabric_names: config = configuration.Configuration(brcd_zone_opts, fabric_name) LOG.debug("Loaded FC fabric config %(fabricname)s", {'fabricname': fabric_name}) fabric_configs[fabric_name] = config return fabric_configs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py0000664000175000017500000001762000000000000027632 0ustar00zuulzuul00000000000000# (c) Copyright 2019 Brocade, a Broadcom Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts from cinder.zonemanager import fc_san_lookup_service as fc_service from cinder.zonemanager import utils as fczm_utils LOG = logging.getLogger(__name__) class BrcdFCSanLookupService(fc_service.FCSanLookupService): """The SAN lookup service that talks to Brocade switches. Version History: 1.0.0 - Initial version 1.1 - Add support to use config option for switch southbound protocol 1.2 - Fix open sessions issue """ VERSION = "1.2" def __init__(self, **kwargs): """Initializing the client.""" super(BrcdFCSanLookupService, self).__init__(**kwargs) self.configuration = kwargs.get('configuration', None) self.create_configuration() def create_configuration(self): """Configuration specific to SAN context values.""" config = self.configuration fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')] LOG.debug('Fabric Names: %s', fabric_names) # There can be more than one SAN in the network and we need to # get credentials for each for SAN context lookup later. if len(fabric_names) > 0: self.fabric_configs = fabric_opts.load_fabric_configurations( fabric_names) def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): """Provides the initiator/target map for available SAN contexts. Looks up nameserver of each fc SAN configured to find logged in devices and returns a map of initiator and target port WWNs for each fabric. :param initiator_wwn_list: List of initiator port WWN :param target_wwn_list: List of target port WWN :returns: List -- device wwn map in following format .. code-block:: default { : { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'..) 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121'..) } } :raises Exception: when connection to fabric is failed """ device_map = {} formatted_target_list = [] formatted_initiator_list = [] fabric_map = {} fabric_names = self.configuration.fc_fabric_names fabrics = None if not fabric_names: raise exception.InvalidParameterValue( err=_("Missing Fibre Channel SAN configuration " "param - fc_fabric_names")) fabrics = [x.strip() for x in fabric_names.split(',')] LOG.debug("FC Fabric List: %s", fabrics) if fabrics: for t in target_wwn_list: formatted_target_list.append(fczm_utils.get_formatted_wwn(t)) for i in initiator_wwn_list: formatted_initiator_list.append(fczm_utils. get_formatted_wwn(i)) for fabric_name in fabrics: fabric_ip = self.fabric_configs[fabric_name].safe_get( 'fc_fabric_address') # Get name server data from fabric and find the targets # logged in nsinfo = '' conn = None try: LOG.debug("Getting name server data for " "fabric %s", fabric_ip) conn = self._get_southbound_client(fabric_name) nsinfo = conn.get_nameserver_info() except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): LOG.error("Failed collecting name server info from" " fabric %s", fabric_ip) except Exception as e: msg = _("Connection failed " "for %(fabric)s with error: %(err)s" ) % {'fabric': fabric_ip, 'err': e} LOG.error(msg) raise exception.FCSanLookupServiceException(message=msg) finally: if conn: conn.cleanup() LOG.debug("Lookup service:nsinfo-%s", nsinfo) LOG.debug("Lookup service:initiator list from " "caller-%s", formatted_initiator_list) LOG.debug("Lookup service:target list from " "caller-%s", formatted_target_list) visible_targets = [x for x in nsinfo if x in formatted_target_list] visible_initiators = [x for x in nsinfo if x in formatted_initiator_list] if visible_targets: LOG.debug("Filtered targets is: %s", visible_targets) # getting rid of the : before returning for idx, elem in enumerate(visible_targets): elem = str(elem).replace(':', '') visible_targets[idx] = elem else: LOG.debug("No targets are in the nameserver for SAN %s", fabric_name) if visible_initiators: # getting rid of the : before returning ~sk for idx, elem in enumerate(visible_initiators): elem = str(elem).replace(':', '') visible_initiators[idx] = elem else: LOG.debug("No initiators are in the nameserver " "for SAN %s", fabric_name) fabric_map = { 'initiator_port_wwn_list': visible_initiators, 'target_port_wwn_list': visible_targets } device_map[fabric_name] = fabric_map LOG.debug("Device map for SAN context: %s", device_map) return device_map def _get_southbound_client(self, fabric): """Implementation to get SouthBound Connector. South bound connector will be dynamically selected based on the configuration :param fabric: fabric information """ fabric_info = self.fabric_configs[fabric] fc_ip = fabric_info.safe_get('fc_fabric_address') sb_connector = fabric_info.safe_get('fc_southbound_protocol') if sb_connector is None: sb_connector = self.configuration.brcd_sb_connector try: conn_factory = importutils.import_object( "cinder.zonemanager.drivers.brocade." "brcd_fc_zone_connector_factory." "BrcdFCZoneFactory") client = conn_factory.get_connector(fabric_info, sb_connector.upper()) except Exception: msg = _("Failed to create south bound connector for %s.") % fc_ip LOG.exception(msg) raise exception.FCZoneDriverException(msg) return client ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py0000664000175000017500000006127400000000000027104 0ustar00zuulzuul00000000000000# (c) Copyright 2019 Brocade, a Broadcom Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Script to push the zone configuration to brocade SAN switches. """ import random import re from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder import ssh_utils from cinder import utils from cinder.zonemanager.drivers.brocade import exception as b_exception import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant LOG = logging.getLogger(__name__) class BrcdFCZoneClientCLI(object): switch_ip = None switch_port = '22' switch_user = 'admin' switch_pwd = 'none' switch_key = 'none' patrn = re.compile(r'[;\s]+') def __init__(self, ipaddress, username, password, port, key): """Initializing the client.""" self.switch_ip = ipaddress self.switch_port = port self.switch_user = username self.switch_pwd = password self.switch_key = key self.sshpool = None def get_active_zone_set(self): """Return the active zone configuration. Return active zoneset from fabric. When none of the configurations are active then it will return empty map. :returns: Map -- active zone set map in the following format .. code-block:: python { 'zones': {'openstack50060b0000c26604201900051ee8e329': ['50060b0000c26604', '201900051ee8e329'] }, 'active_zone_config': 'OpenStack_Cfg' } """ zone_set = {} zone = {} zone_member = None zone_name = None switch_data = None zone_set_name = None try: switch_data = self._get_switch_info( [zone_constant.GET_ACTIVE_ZONE_CFG]) except b_exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): LOG.error("Failed getting active zone set " "from fabric %s", self.switch_ip) try: for line in switch_data: line_split = re.split('\\t', line) if len(line_split) > 2: line_split = [x.replace( '\n', '') for x in line_split] line_split = [x.replace( ' ', '') for x in line_split] if zone_constant.CFG_ZONESET in line_split: zone_set_name = line_split[1] continue if line_split[1]: zone_name = line_split[1] zone[zone_name] = list() if line_split[2]: zone_member = line_split[2] zone_member_list = zone.get(zone_name) zone_member_list.append(zone_member) zone_set[zone_constant.CFG_ZONES] = zone zone_set[zone_constant.ACTIVE_ZONE_CONFIG] = zone_set_name except Exception: # In case of parsing error here, it should be malformed cli output. msg = _("Malformed zone configuration: (switch=%(switch)s " "zone_config=%(zone_config)s)." ) % {'switch': self.switch_ip, 'zone_config': switch_data} LOG.exception(msg) raise exception.FCZoneDriverException(reason=msg) switch_data = None return zone_set def add_zones(self, zones, activate, active_zone_set=None): """Add zone configuration. This method will add the zone configuration passed by user. :param zones: zone names mapped to members. Zone members are colon separated but case-insensitive .. code-block:: python { zonename1:[zonememeber1, zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } :param activate: True/False :param active_zone_set: active zone set dict retrieved from get_active_zone_set method """ LOG.debug("Add Zones - Zones passed: %s", zones) cfg_name = None iterator_count = 0 zone_with_sep = '' if not active_zone_set: active_zone_set = self.get_active_zone_set() LOG.debug("Active zone set: %s", active_zone_set) zone_list = active_zone_set[zone_constant.CFG_ZONES] LOG.debug("zone list: %s", zone_list) for zone in zones.keys(): zone_members_with_sep = ';'.join(str(member) for member in zones[zone]) LOG.debug("Forming command for create zone") cmd = 'zonecreate "%(zone)s", "%(zone_members_with_sep)s"' % { 'zone': zone, 'zone_members_with_sep': zone_members_with_sep} LOG.debug("Creating zone, cmd to run %s", cmd) self.apply_zone_change(cmd.split()) if iterator_count > 0: zone_with_sep += ';' iterator_count += 1 zone_with_sep += zone if not zone_with_sep: return try: # If zone_list exists, there are active zones, # so add new zone to existing active config. # Otherwise, create the zone config. if zone_list: cfg_name = active_zone_set[zone_constant.ACTIVE_ZONE_CONFIG] else: cfg_name = None cmd = None if not cfg_name: cfg_name = zone_constant.OPENSTACK_CFG_NAME cmd = 'cfgcreate "%(zoneset)s", "%(zones)s"' \ % {'zoneset': cfg_name, 'zones': zone_with_sep} else: cmd = 'cfgadd "%(zoneset)s", "%(zones)s"' \ % {'zoneset': cfg_name, 'zones': zone_with_sep} LOG.debug("Zone config cmd to run %s", cmd) self.apply_zone_change(cmd.split()) if activate: self.activate_zoneset(cfg_name) else: self._cfg_save() except Exception as e: self._cfg_trans_abort() msg = _("Creating and activating zone set failed: " "(Zone set=%(cfg_name)s error=%(err)s)." ) % {'cfg_name': cfg_name, 'err': str(e)} LOG.error(msg) raise b_exception.BrocadeZoningCliException(reason=msg) def update_zones(self, zones, activate, operation, active_zone_set=None): """Update the zone configuration. This method will update the zone configuration passed by user. :param zones: zone names mapped to members. Zone members are colon separated but case-insensitive .. code-block:: python { zonename1:[zonememeber1, zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } :param activate: True/False :param operation: zone add or zone remove :param active_zone_set: active zone set dict retrieved from get_active_zone_set method """ LOG.debug("Update Zones - Zones passed: %s", zones) cfg_name = None iterator_count = 0 zone_with_sep = '' if not active_zone_set: active_zone_set = self.get_active_zone_set() LOG.debug("Active zone set: %s", active_zone_set) zone_list = active_zone_set[zone_constant.CFG_ZONES] LOG.debug("Active zone list: %s", zone_list) for zone in zones.keys(): zone_members_with_sep = ';'.join(str(member) for member in zones[zone]) cmd = '%(operation)s "%(zone)s", "%(zone_members_with_sep)s"' % { 'operation': operation, 'zone': zone, 'zone_members_with_sep': zone_members_with_sep} LOG.debug("Updating zone, cmd to run %s", cmd) self.apply_zone_change(cmd.split()) if iterator_count > 0: zone_with_sep += ';' iterator_count += 1 zone_with_sep += zone if not zone_with_sep: return try: cfg_name = active_zone_set[zone_constant.ACTIVE_ZONE_CONFIG] if activate: self.activate_zoneset(cfg_name) else: self._cfg_save() except Exception as e: self._cfg_trans_abort() msg = _("Activating zone set failed: " "(Zone set=%(cfg_name)s error=%(err)s)." ) % {'cfg_name': cfg_name, 'err': str(e)} LOG.error(msg) raise b_exception.BrocadeZoningCliException(reason=msg) def activate_zoneset(self, cfgname): """Method to Activate the zone config. Param cfgname - ZonesetName.""" cmd_list = [zone_constant.ACTIVATE_ZONESET, cfgname] return self._ssh_execute(cmd_list, True, 1) def deactivate_zoneset(self): """Method to deActivate the zone config.""" return self._ssh_execute([zone_constant.DEACTIVATE_ZONESET], True, 1) def delete_zones(self, zone_names, activate, active_zone_set=None): """Delete zones from fabric. Method to delete the active zone config zones :param zone_names: zoneNames separated by semicolon :param activate: True/False :param active_zone_set: the active zone set dict retrieved from get_active_zone_set method """ active_zoneset_name = None zone_list = [] if not active_zone_set: active_zone_set = self.get_active_zone_set() active_zoneset_name = active_zone_set[ zone_constant.ACTIVE_ZONE_CONFIG] zone_list = active_zone_set[zone_constant.CFG_ZONES] zones = self.patrn.split(''.join(zone_names)) cmd = None try: if len(zones) == len(zone_list): self.deactivate_zoneset() cmd = 'cfgdelete "%(active_zoneset_name)s"' \ % {'active_zoneset_name': active_zoneset_name} # Active zoneset is being deleted, hence reset activate flag activate = False else: cmd = 'cfgremove "%(active_zoneset_name)s", "%(zone_names)s"' \ % {'active_zoneset_name': active_zoneset_name, 'zone_names': zone_names } LOG.debug("Delete zones: Config cmd to run: %s", cmd) self.apply_zone_change(cmd.split()) for zone in zones: self._zone_delete(zone) if activate: self.activate_zoneset(active_zoneset_name) else: self._cfg_save() except Exception as e: msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)." ) % {'cmd': cmd, 'err': str(e)} LOG.error(msg) self._cfg_trans_abort() raise b_exception.BrocadeZoningCliException(reason=msg) def get_nameserver_info(self): """Get name server data from fabric. This method will return the connected node port wwn list(local and remote) for the given switch fabric """ cli_output = None return_list = [] try: cmd = '%(nsshow)s;%(nscamshow)s' % { 'nsshow': zone_constant.NS_SHOW, 'nscamshow': zone_constant.NS_CAM_SHOW} cli_output = self._get_switch_info([cmd]) except b_exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): LOG.error("Failed collecting nsshow " "info for fabric %s", self.switch_ip) if (cli_output): return_list = self._parse_ns_output(cli_output) cli_output = None return return_list def _cfg_save(self): self._ssh_execute([zone_constant.CFG_SAVE], True, 1) def _zone_delete(self, zone_name): cmd = 'zonedelete "%(zone_name)s"' % {'zone_name': zone_name} self.apply_zone_change(cmd.split()) def _cfg_trans_abort(self): if self._is_trans_abortable(): self.apply_zone_change([zone_constant.CFG_ZONE_TRANS_ABORT]) def _is_trans_abortable(self): stdout, stderr = None, None stdout, stderr = self._run_ssh( [zone_constant.CFG_SHOW_TRANS], True, 1) output = stdout.splitlines() is_abortable = False for line in output: if zone_constant.TRANS_ABORTABLE in line: is_abortable = True break if stderr: msg = _("Error while checking transaction status: %s") % stderr raise b_exception.BrocadeZoningCliException(reason=msg) else: return is_abortable def apply_zone_change(self, cmd_list): """Execute zoning cli with no status update. Executes CLI commands such as addZone where status return is not expected. """ stdout, stderr = None, None LOG.debug("Executing command via ssh: %s", cmd_list) stdout, stderr = self._run_ssh(cmd_list, True, 1) # no output expected, so output means there is an error if stdout: msg = _("Error while running zoning CLI: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd_list, 'err': stdout} LOG.error(msg) self._cfg_trans_abort() raise b_exception.BrocadeZoningCliException(reason=msg) def is_supported_firmware(self): """Check firmware version is v6.4 or higher. This API checks if the firmware version per the plug-in support level. This only checks major and minor version. """ cmd = ['version'] firmware = 0 try: stdout, stderr = self._execute_shell_cmd(cmd) if (stdout): for line in stdout: if 'Fabric OS: v' in line: LOG.debug("Firmware version string: %s", line) ver = line.split('Fabric OS: v')[1].split('.') if (ver): firmware = int(ver[0] + ver[1]) return firmware > 63 else: LOG.error("No CLI output for firmware version check") return False except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd, 'err': str(e)} LOG.error(msg) raise b_exception.BrocadeZoningCliException(reason=msg) def _get_switch_info(self, cmd_list): stdout, stderr, sw_data = None, None, None try: stdout, stderr = self._run_ssh(cmd_list, True, 1) if (stdout): sw_data = stdout.splitlines() return sw_data except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd_list, 'err': str(e)} LOG.error(msg) raise b_exception.BrocadeZoningCliException(reason=msg) def _parse_ns_output(self, switch_data): """Parses name server data. Parses nameserver raw data and adds the device port wwns to the list :returns: List -- list of device port wwn from ns info """ return_list = [] for line in switch_data: if not (" NL " in line or " N " in line): continue linesplit = line.split(';') if len(linesplit) > 2: node_port_wwn = linesplit[2] return_list.append(node_port_wwn) else: msg = _("Malformed nameserver string: %s") % line LOG.error(msg) raise exception.InvalidParameterValue(err=msg) return return_list def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): # TODO(skolathur): Need to implement ssh_injection check # currently, the check will fail for zonecreate command # as zone members are separated by ';'which is a danger char command = ' '. join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, self.switch_key, min_size=1, max_size=5) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.exception('Error executing SSH command.') last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error running SSH command: %s", command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. Executes CLI commands such as cfgsave where status return is expected. """ utils.check_ssh_injection(cmd_list) command = ' '. join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, self.switch_key, min_size=1, max_size=5) stdin, stdout, stderr = None, None, None LOG.debug("Executing command via ssh: %s", command) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: stdin, stdout, stderr = ssh.exec_command(command) stdin.write("%s\n" % zone_constant.YES) channel = stdout.channel exit_status = channel.recv_exit_status() LOG.debug("Exit Status from ssh: %s", exit_status) # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if check_exit_code and exit_status != 0: raise processutils.ProcessExecutionError( exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=command) else: return True else: return True except Exception as e: LOG.exception('Error executing SSH command.') last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) LOG.debug("Handling error case after " "SSH: %s", last_exception) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Error executing command via ssh: %s", e) finally: if stdin: stdin.flush() stdin.close() if stdout: stdout.close() if stderr: stderr.close() def _execute_shell_cmd(self, cmd): """Run command over shell for older firmware versions. Invokes shell and issue the command and return the output. This is primarily used for issuing read commands when we are not sure if the firmware supports exec_command. """ utils.check_ssh_injection(cmd) command = ' '. join(cmd) stdout, stderr = None, None if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, self.switch_key, min_size=1, max_size=5) with self.sshpool.item() as ssh: LOG.debug('Running cmd (SSH): %s', command) channel = ssh.invoke_shell() stdin_stream = channel.makefile('wb') stdout_stream = channel.makefile('rb') stderr_stream = channel.makefile('rb') stdin_stream.write('''%s exit ''' % command) stdin_stream.flush() stdout = stdout_stream.readlines() stderr = stderr_stream.readlines() stdin_stream.close() stdout_stream.close() stderr_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if exit_status != 0: LOG.debug("command %s failed", command) raise processutils.ProcessExecutionError( exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=command) try: channel.close() except Exception: LOG.exception('Error closing channel.') LOG.debug("_execute_cmd: stdout to return: %s", stdout) LOG.debug("_execute_cmd: stderr to return: %s", stderr) return (stdout, stderr) def cleanup(self): self.sshpool = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/brcd_fc_zone_connector_factory.py0000664000175000017500000000726600000000000030521 0ustar00zuulzuul00000000000000# (c) Copyright 2019 Brocade, a Broadcom Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Brocade Zone Connector Factory is responsible to dynamically create the connection object based on the configuration """ from oslo_log import log as logging from oslo_utils import importutils from cinder.zonemanager.drivers.brocade import fc_zone_constants LOG = logging.getLogger(__name__) class BrcdFCZoneFactory(object): def __init__(self): self.sb_conn_map = {} def get_connector(self, fabric, sb_connector): """Returns Device Connector. Factory method to create and return correct SB connector object based on the protocol """ fabric_ip = fabric.safe_get('fc_fabric_address') client = self.sb_conn_map.get(fabric_ip) if not client: fabric_user = fabric.safe_get('fc_fabric_user') fabric_pwd = fabric.safe_get('fc_fabric_password') fabric_port = fabric.safe_get('fc_fabric_port') fc_vfid = fabric.safe_get('fc_virtual_fabric_id') fabric_ssh_cert_path = fabric.safe_get('fc_fabric_ssh_cert_path') LOG.debug("Client not found. Creating connection client for" " %(ip)s with %(connector)s protocol " "for the user %(user)s at port %(port)s.", {'ip': fabric_ip, 'connector': sb_connector, 'user': fabric_user, 'port': fabric_port, 'vf_id': fc_vfid}) if sb_connector.lower() in (fc_zone_constants.REST_HTTP, fc_zone_constants.REST_HTTPS): client = importutils.import_object( "cinder.zonemanager.drivers.brocade." "brcd_rest_fc_zone_client.BrcdRestFCZoneClient", ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vfid=fc_vfid, protocol=sb_connector ) elif sb_connector.lower() in (fc_zone_constants.HTTP, fc_zone_constants.HTTPS): client = importutils.import_object( "cinder.zonemanager.drivers.brocade." "brcd_http_fc_zone_client.BrcdHTTPFCZoneClient", ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vfid=fc_vfid, protocol=sb_connector ) else: client = importutils.import_object( "cinder.zonemanager.drivers.brocade." "brcd_fc_zone_client_cli.BrcdFCZoneClientCLI", ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, key=fabric_ssh_cert_path, port=fabric_port ) self.sb_conn_map.update({fabric_ip: client}) return client ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py0000664000175000017500000005461700000000000026275 0ustar00zuulzuul00000000000000# (c) Copyright 2019 Brocade, a Broadcom Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Brocade Zone Driver is responsible to manage access control using FC zoning for Brocade FC fabrics. This is a concrete implementation of FCZoneDriver interface implementing add_connection and delete_connection interfaces. **Related Flags** :zone_activate: Used by: class: 'FCZoneDriver'. Defaults to True :zone_name_prefix: Used by: class: 'FCZoneDriver'. Defaults to 'openstack' """ import string from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts from cinder.zonemanager.drivers.brocade import exception as b_exception from cinder.zonemanager.drivers.brocade import fc_zone_constants from cinder.zonemanager.drivers import driver_utils from cinder.zonemanager.drivers import fc_zone_driver from cinder.zonemanager import utils LOG = logging.getLogger(__name__) SUPPORTED_CHARS = string.ascii_letters + string.digits + '_' brcd_opts = [ cfg.StrOpt('brcd_sb_connector', default=fc_zone_constants.HTTP.upper(), help='South bound connector for zoning operation'), ] CONF = cfg.CONF CONF.register_opts(brcd_opts, group='fc-zone-manager') @interface.fczmdriver class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver): """Brocade FC zone driver implementation. OpenStack Fibre Channel zone driver to manage FC zoning in Brocade SAN fabrics. .. code-block:: none Version history: 1.0 - Initial Brocade FC zone driver 1.1 - Implements performance enhancements 1.2 - Added support for friendly zone name 1.3 - Added HTTP connector support 1.4 - Adds support to zone in Virtual Fabrics 1.5 - Initiator zoning updates through zoneadd/zoneremove 1.6 - Add REST connector """ VERSION = "1.6" # ThirdPartySystems wiki page CI_WIKI_NAME = "Brocade_OpenStack_CI" # TODO(smcginnis) Evaluate removing plans once we get to the V release SUPPORTED = False def __init__(self, **kwargs): super(BrcdFCZoneDriver, self).__init__(**kwargs) self.sb_conn_map = {} self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(brcd_opts) # Adding a hack to handle parameters from super classes # in case configured with multiple back ends. fabric_names = self.configuration.safe_get('fc_fabric_names') base_san_opts = [] if not fabric_names: base_san_opts.append( cfg.StrOpt('fc_fabric_names', help='Comma separated list of fibre channel ' 'fabric names. This list of names is used to' ' retrieve other SAN credentials for connecting' ' to each SAN fabric' )) if len(base_san_opts) > 0: CONF.register_opts(base_san_opts) self.configuration.append_config_values(base_san_opts) fc_fabric_names = self.configuration.fc_fabric_names fabric_names = [x.strip() for x in fc_fabric_names.split(',')] # There can be more than one SAN in the network and we need to # get credentials for each SAN. if fabric_names: self.fabric_configs = fabric_opts.load_fabric_configurations( fabric_names) @staticmethod def get_driver_options(): return fabric_opts.brcd_zone_opts + brcd_opts @lockutils.synchronized('brcd', 'fcfabric-', True) def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.info("BrcdFCZoneDriver - Add connection for fabric " "%(fabric)s for I-T map: %(i_t_map)s", {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'zone_name_prefix') zone_activate = self.fabric_configs[fabric].safe_get( 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab LOG.info("Zoning policy for Fabric %(policy)s", {'policy': zoning_policy}) if (zoning_policy != 'initiator' and zoning_policy != 'initiator-target'): LOG.info("Zoning policy is not valid, " "no zoning will be performed.") return client = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(client) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} zone_update_map = {} initiator = initiator_key.lower() target_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for target in target_list: zone_members = [utils.get_formatted_wwn(initiator), utils.get_formatted_wwn(target)] zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) if (len(cfgmap_from_fabric) == 0 or ( zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone already exists. LOG.info("Zone exists in I-T mode. Skipping " "zone creation for %(zonename)s", {'zonename': zone_name}) elif zoning_policy == 'initiator': zone_members = [utils.get_formatted_wwn(initiator)] for target in target_list: zone_members.append(utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) # If zone exists, then do a zoneadd to update # the zone members in the existing zone. Otherwise, # do a zonecreate to create a new zone. if len(zone_names) > 0 and (zone_name in zone_names): # Verify that the target WWNs are not already members # of the existing zone. If so, remove them from the # list of members to add, otherwise error will be # returned from the switch. for t in target_list: if t in cfgmap_from_fabric['zones'][zone_name]: zone_members.remove(utils.get_formatted_wwn(t)) if zone_members: zone_update_map[zone_name] = zone_members else: zone_map[zone_name] = zone_members LOG.info("Zone map to create: %(zonemap)s", {'zonemap': zone_map}) LOG.info("Zone map to update: %(zone_update_map)s", {'zone_update_map': zone_update_map}) try: if zone_map: client.add_zones(zone_map, zone_activate, cfgmap_from_fabric) LOG.debug("Zones created successfully: %(zonemap)s", {'zonemap': zone_map}) if zone_update_map: client.update_zones(zone_update_map, zone_activate, fc_zone_constants.ZONE_ADD, cfgmap_from_fabric) LOG.debug("Zones updated successfully: %(updatemap)s", {'updatemap': zone_update_map}) except (b_exception.BrocadeZoningCliException, b_exception.BrocadeZoningHttpException, b_exception.BrocadeZoningRestException) as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception: msg = _("Failed to add or update zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: client.cleanup() @lockutils.synchronized('brcd', 'fcfabric-', True) def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.info("BrcdFCZoneDriver - Delete connection for fabric " "%(fabric)s for I-T map: %(i_t_map)s", {'fabric': fabric, 'i_t_map': initiator_target_map}) zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'zone_name_prefix') zone_activate = self.fabric_configs[fabric].safe_get( 'zone_activate') if zoning_policy_fab: zoning_policy = zoning_policy_fab LOG.info("Zoning policy for fabric %(policy)s", {'policy': zoning_policy}) conn = self._get_southbound_client(fabric) cfgmap_from_fabric = self._get_active_zone_set(conn) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push changes to # fabric. This operation could result in an update for zone config # with new member list or deleting zones from active cfg. LOG.debug("zone config from Fabric: %(cfgmap)s", {'cfgmap': cfgmap_from_fabric}) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = utils.get_formatted_wwn(initiator) zone_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) LOG.debug("Zone name to delete: %(zonename)s", {'zonename': zone_name}) if len(zone_names) > 0 and (zone_name in zone_names): # delete zone. LOG.debug("Added zone to delete to list: %(zonename)s", {'zonename': zone_name}) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append(utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) if (zone_names and (zone_name in zone_names)): # Check to see if there are other zone members # in the zone besides the initiator and # the targets being removed. has_members = any( x for x in cfgmap_from_fabric['zones'][zone_name] if x not in zone_members) # If there are other zone members, proceed with # zone update to remove the targets. Otherwise, # delete the zone. if has_members: zone_members.remove(formatted_initiator) # Verify that the zone members in target list # are listed in zone definition. If not, remove # the zone members from the list of members # to remove, otherwise switch will return error. zm_list = cfgmap_from_fabric['zones'][zone_name] for t in t_list: formatted_target = utils.get_formatted_wwn(t) if formatted_target not in zm_list: zone_members.remove(formatted_target) if zone_members: LOG.debug("Zone members to remove: " "%(members)s", {'members': zone_members}) zone_map[zone_name] = zone_members else: zones_to_delete.append(zone_name) else: LOG.warning("Zoning policy not recognized: %(policy)s", {'policy': zoning_policy}) LOG.debug("Zone map to update: %(zonemap)s", {'zonemap': zone_map}) LOG.debug("Zone list to delete: %(zones)s", {'zones': zones_to_delete}) try: # Update zone membership. if zone_map: conn.update_zones(zone_map, zone_activate, fc_zone_constants.ZONE_REMOVE, cfgmap_from_fabric) # Delete zones if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ( '%s%s' % ( zone_name_string, zones_to_delete[i])) else: zone_name_string = '%s;%s' % ( zone_name_string, zones_to_delete[i]) conn.delete_zones( zone_name_string, zone_activate, cfgmap_from_fabric) except (b_exception.BrocadeZoningCliException, b_exception.BrocadeZoningHttpException, b_exception.BrocadeZoningRestException) as brocade_ex: raise exception.FCZoneDriverException(brocade_ex) except Exception: msg = _("Failed to update or delete zoning " "configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: conn.cleanup() def get_san_context(self, target_wwn_list): """Lookup SAN context for visible end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ formatted_target_list = [] fabric_map = {} fc_fabric_names = self.configuration.fc_fabric_names fabrics = [x.strip() for x in fc_fabric_names.split(',')] LOG.debug("Fabric List: %(fabrics)s", {'fabrics': fabrics}) LOG.debug("Target WWN list: %(targetwwns)s", {'targetwwns': target_wwn_list}) if len(fabrics) > 0: for t in target_wwn_list: formatted_target_list.append(utils.get_formatted_wwn(t)) LOG.debug("Formatted target WWN list: %(targetlist)s", {'targetlist': formatted_target_list}) for fabric_name in fabrics: conn = self._get_southbound_client(fabric_name) # Get name server data from fabric and get the targets # logged in. nsinfo = None try: nsinfo = conn.get_nameserver_info() LOG.debug("Name server info from fabric: %(nsinfo)s", {'nsinfo': nsinfo}) except (b_exception.BrocadeZoningCliException, b_exception.BrocadeZoningHttpException): if not conn.is_supported_firmware(): msg = _("Unsupported firmware on switch %s. Make sure " "switch is running firmware v6.4 or higher" ) % conn.switch_ip LOG.exception(msg) raise exception.FCZoneDriverException(msg) with excutils.save_and_reraise_exception(): LOG.exception("Error getting name server info.") except Exception: msg = _("Failed to get name server info.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) finally: conn.cleanup() visible_targets = [x for x in nsinfo if x in formatted_target_list] if visible_targets: LOG.info("Filtered targets for SAN is: %(targets)s", {'targets': visible_targets}) # getting rid of the ':' before returning for idx, elem in enumerate(visible_targets): visible_targets[idx] = str( visible_targets[idx]).replace(':', '') fabric_map[fabric_name] = visible_targets else: LOG.debug("No targets found in the nameserver " "for fabric: %(fabric)s", {'fabric': fabric_name}) LOG.debug("Return SAN context output: %(fabricmap)s", {'fabricmap': fabric_map}) return fabric_map def _get_active_zone_set(self, conn): cfgmap = None try: cfgmap = conn.get_active_zone_set() except (b_exception.BrocadeZoningCliException, b_exception.BrocadeZoningHttpException): if not conn.is_supported_firmware(): msg = _("Unsupported firmware on switch %s. Make sure " "switch is running firmware v6.4 or higher" ) % conn.switch_ip LOG.error(msg) raise exception.FCZoneDriverException(msg) with excutils.save_and_reraise_exception(): LOG.exception("Error getting name server info.") except Exception as e: msg = (_("Failed to retrieve active zoning configuration %s") % str(e)) LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Active zone set from fabric: %(cfgmap)s", {'cfgmap': cfgmap}) return cfgmap def _get_southbound_client(self, fabric): """Implementation to get SouthBound Connector. South bound connector will be dynamically selected based on the configuration :param fabric: fabric information """ fabric_info = self.fabric_configs[fabric] fc_ip = fabric_info.safe_get('fc_fabric_address') sb_connector = fabric_info.safe_get('fc_southbound_protocol') if sb_connector is None: sb_connector = self.configuration.brcd_sb_connector try: conn_factory = importutils.import_object( "cinder.zonemanager.drivers.brocade." "brcd_fc_zone_connector_factory." "BrcdFCZoneFactory") client = conn_factory.get_connector(fabric_info, sb_connector.upper()) except Exception: msg = _("Failed to create south bound connector for %s.") % fc_ip LOG.exception(msg) raise exception.FCZoneDriverException(msg) return client ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py0000664000175000017500000012242600000000000027311 0ustar00zuulzuul00000000000000# (c) Copyright 2019 Brocade, a Broadcom Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Brocade south bound connector to communicate with switch using HTTP or HTTPS protocol. """ import time from oslo_log import log as logging from oslo_serialization import base64 from oslo_utils import encodeutils import requests from cinder.i18n import _ from cinder.zonemanager.drivers.brocade import exception as b_exception import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant LOG = logging.getLogger(__name__) class BrcdHTTPFCZoneClient(object): def __init__(self, ipaddress, username, password, port, vfid, protocol): """Initializing the client with the parameters passed. Creates authentication token and authenticate with switch to ensure the credentials are correct and change the VF context. :param ipaddress: IP Address of the device. :param username: User id to login. :param password: User password. :param port: Device Communication port :param vfid: Virtual Fabric ID. :param protocol: Communication Protocol. """ self.switch_ip = ipaddress self.switch_user = username self.switch_pwd = password self.protocol = protocol self.vfid = vfid self.cfgs = {} self.zones = {} self.alias = {} self.qlps = {} self.ifas = {} self.active_cfg = '' self.parsed_raw_zoneinfo = "" self.random_no = '' self.auth_version = '' self.session = None # Create and assign the authentication header based on the credentials self.auth_header = self.create_auth_token() # Authenticate with the switch # If authenticated successfully, save the auth status and # create auth header for future communication with the device. self.is_auth, self.auth_header = self.authenticate() self.check_change_vf_context() def connect(self, requestType, requestURL, payload='', header=None): """Connect to the switch using HTTP/HTTPS protocol. :param requestType: Connection Request method :param requestURL: Connection URL :param payload: Data to send with POST request :param header: Request Headers :returns: HTTP response data :raises BrocadeZoningHttpException: """ try: if header is None: header = {} header.update({"User-Agent": "OpenStack Zone Driver"}) # Ensure only one connection is made throughout the life cycle protocol = zone_constant.HTTP if self.protocol == zone_constant.PROTOCOL_HTTPS: protocol = zone_constant.HTTPS if self.session is None: self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=1, pool_maxsize=1) self.session.mount(protocol + '://', adapter) url = '%s://%s%s' % (protocol, self.switch_ip, requestURL) response = None if requestType == zone_constant.GET_METHOD: response = self.session.get(url, headers=(header), verify=False) elif requestType == zone_constant.POST_METHOD: response = self.session.post(url, payload, headers=(header), verify=False) # Throw exception when response status is not OK if response.status_code != zone_constant.STATUS_OK: msg = _("Error while querying page %(url)s on the switch, " "reason %(error)s.") % {'url': url, 'error': response.reason} raise b_exception.BrocadeZoningHttpException(msg) else: return response.text except requests.exceptions.ConnectionError as e: msg = (_("Error while connecting the switch %(switch_id)s " "with protocol %(protocol)s. Error: %(error)s.") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'error': str(e)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) except b_exception.BrocadeZoningHttpException as ex: msg = (_("Unexpected status code from the switch %(switch_id)s " "with protocol %(protocol)s for url %(page)s. " "Error: %(error)s") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'page': requestURL, 'error': str(ex)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def create_auth_token(self): """Create the authentication token. Creates the authentication token to use in the authentication header return authentication header (Base64(username:password:random no)). :returns: Authentication Header :raises BrocadeZoningHttpException: """ try: # Send GET request to secinfo.html to get random number response = self.connect(zone_constant.GET_METHOD, zone_constant.SECINFO_PAGE) parsed_data = self.get_parsed_data(response, zone_constant.SECINFO_BEGIN, zone_constant.SECINFO_END) # Get the auth version for 8.1.0b+ switches self.auth_version = self.get_nvp_value(parsed_data, zone_constant.AUTHVERSION) if self.auth_version == "1": # Extract the random no from secinfo.html response self.random_no = self.get_nvp_value(parsed_data, zone_constant.RANDOM) # Form the authentication string auth_string = '%s:%s:%s' % (self.switch_user, self.switch_pwd, self.random_no) else: auth_string = '%s:%s' % (self.switch_user, self.switch_pwd) auth_token = base64.encode_as_text(auth_string).strip() auth_header = (zone_constant.AUTH_STRING + auth_token) # Build the proper header except Exception as e: msg = (_("Error while creating authentication token: %s") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return auth_header def authenticate(self): """Authenticate with the switch. Returns authentication status with modified authentication header (Base64(username:xxx:random no)). :returns: Authentication status :raises BrocadeZoningHttpException: """ headers = {zone_constant.AUTH_HEADER: self.auth_header} try: # GET Request to authenticate.html to verify the credentials response = self.connect(zone_constant.GET_METHOD, zone_constant.AUTHEN_PAGE, header=headers) parsed_data = self.get_parsed_data(response, zone_constant.AUTHEN_BEGIN, zone_constant.AUTHEN_END) isauthenticated = self.get_nvp_value( parsed_data, zone_constant.AUTHENTICATED) if isauthenticated == "yes": if self.auth_version == "3": auth_id = self.get_nvp_value(parsed_data, zone_constant.IDENTIFIER) auth_string = '%s:xxx:%s' % (self.switch_user, auth_id) else: # Replace password in the authentication string with xxx auth_string = '%s:xxx:%s' % (self.switch_user, self.random_no) auth_token = base64.encode_as_text(auth_string).strip() auth_header = zone_constant.AUTH_STRING + auth_token return True, auth_header else: auth_error_code = self.get_nvp_value(parsed_data, "errCode") msg = (_("Authentication failed, verify the switch " "credentials, error code %s.") % auth_error_code) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) except Exception as e: msg = (_("Error while authenticating with switch: %s.") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def get_session_info(self): """Get the session information from the switch :returns: Connection status information. """ try: headers = {zone_constant.AUTH_HEADER: self.auth_header} # GET request to session.html response = self.connect(zone_constant.GET_METHOD, zone_constant.SESSION_PAGE_ACTION, header=headers) except Exception as e: msg = (_("Error while getting session information %s.") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return response def get_parsed_data(self, data, delim1, delim2): """Return the sub string between the delimiters. :param data: String to manipulate :param delim1: Delimiter 1 :param delim2: Delimiter 2 :returns: substring between the delimiters """ try: start = data.index(delim1) start = start + len(delim1) end = data.index(delim2) return data[start:end] except ValueError as e: msg = (_("Error while parsing the data: %s.") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def get_nvp_value(self, data, keyname): """Get the value for the key passed. :param data: NVP to manipulate :param keyname: Key name :returns: value for the NVP """ try: start = data.index(keyname) start = start + len(keyname) temp = data[start:] end = temp.index("\n") return (temp[:end].lstrip('= ')) except ValueError as e: msg = (_("Error while getting nvp value: %s.") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def get_managable_vf_list(self, session_info): """List of VFIDs that can be managed. :param session_info: Session information from the switch :returns: manageable VF list :raises BrocadeZoningHttpException: """ try: # Check the value of manageableLFList NVP, # throw exception as not supported if the nvp not available vf_list = self.get_nvp_value(session_info, zone_constant.MANAGEABLE_VF) if vf_list: vf_list = vf_list.split(",") # convert the string to list except b_exception.BrocadeZoningHttpException as e: msg = (_("Error while checking whether " "VF is available for management %s.") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return vf_list[:-1] def change_vf_context(self, vfid, session_data): """Change the VF context in the session. :param vfid: VFID to which context should be changed. :param session_data: Session information from the switch :raises BrocadeZoningHttpException: """ try: managable_vf_list = self.get_managable_vf_list(session_data) LOG.debug("Manageable VF IDs are %(vflist)s.", {'vflist': managable_vf_list}) # proceed changing the VF context # if VF id can be managed if not throw exception if vfid in managable_vf_list: headers = {zone_constant.AUTH_HEADER: self.auth_header} data = zone_constant.CHANGE_VF.format(vfid=vfid) response = self.connect(zone_constant.POST_METHOD, zone_constant.SESSION_PAGE, data, headers) parsed_info = self.get_parsed_data(response, zone_constant.SESSION_BEGIN, zone_constant.SESSION_END) session_LF_Id = self.get_nvp_value(parsed_info, zone_constant.SESSION_LF_ID) if session_LF_Id == vfid: LOG.info("VF context is changed in the session.") else: msg = _("Cannot change VF context in the session.") LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) else: msg = (_("Cannot change VF context, " "specified VF is not available " "in the manageable VF list %(vf_list)s.") % {'vf_list': managable_vf_list}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) except b_exception.BrocadeZoningHttpException as e: msg = (_("Error while changing VF context %s.") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def get_zone_info(self): """Parse all the zone information and store it in the dictionary.""" try: self.cfgs = {} self.zones = {} self.active_cfg = '' self.alias = {} self.qlps = {} self.ifas = {} headers = {zone_constant.AUTH_HEADER: self.auth_header} # GET request to gzoneinfo.htm response = self.connect(zone_constant.GET_METHOD, zone_constant.ZONE_PAGE, header=headers) # get the zone string from the response self.parsed_raw_zoneinfo = self.get_parsed_data( response, zone_constant.ZONEINFO_BEGIN, zone_constant.ZONEINFO_END).strip("\n") LOG.debug("Original zone string from the switch: %(zoneinfo)s", {'zoneinfo': self.parsed_raw_zoneinfo}) # convert the zone string to list zoneinfo = self.parsed_raw_zoneinfo.split() i = 0 while i < len(zoneinfo): info = zoneinfo[i] # check for the cfg delimiter if zone_constant.CFG_DELIM in info: # extract the cfg name cfg_name = info.lstrip(zone_constant.CFG_DELIM) # update the dict as # self.cfgs={cfg_name:zone_name1;zone_name2} self.cfgs.update({cfg_name: zoneinfo[i + 1]}) i = i + 2 # check for the zone delimiter elif zone_constant.ZONE_DELIM in info: # extract the zone name zone_name = info.lstrip(zone_constant.ZONE_DELIM) # update the dict as # self.zones={zone_name:members1;members2} self.zones.update({zone_name: zoneinfo[i + 1]}) i = i + 2 elif zone_constant.ALIAS_DELIM in info: alias_name = info.lstrip(zone_constant.ALIAS_DELIM) # update the dict as # self.alias={alias_name:members1;members2} self.alias.update({alias_name: zoneinfo[i + 1]}) i = i + 2 # check for quickloop zones elif zone_constant.QLP_DELIM in info: qlp_name = info.lstrip(zone_constant.QLP_DELIM) # update the map as self.qlps={qlp_name:members1;members2} self.qlps.update({qlp_name: zoneinfo[i + 1]}) i = i + 2 # check for fabric assist zones elif zone_constant.IFA_DELIM in info: ifa_name = info.lstrip(zone_constant.IFA_DELIM) # update the map as self.ifas={ifa_name:members1;members2} self.ifas.update({ifa_name: zoneinfo[i + 1]}) i = i + 2 elif zone_constant.ACTIVE_CFG_DELIM in info: # update the string self.active_cfg=cfg_name self.active_cfg = info.lstrip( zone_constant.ACTIVE_CFG_DELIM) if self.active_cfg == zone_constant.DEFAULT_CFG: self.active_cfg = "" i = i + 2 else: i = i + 1 except Exception as e: msg = (_("Error while changing VF context %s.") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def is_supported_firmware(self): """Check firmware version is v6.4 or higher. This API checks if the firmware version per the plug-in support level. This only checks major and minor version. :returns: True if firmware is supported else False. :raises BrocadeZoningHttpException: """ isfwsupported = False try: headers = {zone_constant.AUTH_HEADER: self.auth_header} # GET request to switch.html response = self.connect(zone_constant.GET_METHOD, zone_constant.SWITCH_PAGE, header=headers) parsed_data = self.get_parsed_data(response, zone_constant.SWITCHINFO_BEGIN, zone_constant.SWITCHINFO_END) # get the firmware version nvp value fwVersion = self.get_nvp_value( parsed_data, zone_constant.FIRMWARE_VERSION).lstrip('v') ver = fwVersion.split(".") LOG.debug("Firmware version: %(version)s.", {'version': ver}) if int(ver[0] + ver[1]) > 63: isfwsupported = True except Exception as e: msg = (_("Error while checking the firmware version %s.") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return isfwsupported def get_active_zone_set(self): """Return the active zone configuration. Return active zoneset from fabric. When none of the configurations are active then it will return empty map. :returns: Map -- active zone set map in the following format .. code-block:: python { 'zones': {'openstack50060b0000c26604201900051ee8e329': ['50060b0000c26604', '201900051ee8e329'] }, 'active_zone_config': 'OpenStack_Cfg' } :raises BrocadeZoningHttpException: """ active_zone_set = {} zones_map = {} try: self.get_zone_info() # get the zone information of the switch if self.active_cfg != '': # get the zones list of the active_Cfg zones_list = self.cfgs[self.active_cfg].split(";") for n in zones_list: # build the zones map zones_map.update( {n: self.zones[n].split(";")}) # Format map in the correct format active_zone_set = { "active_zone_config": self.active_cfg, "zones": zones_map} return active_zone_set except Exception as e: msg = (_("Failed getting active zone set from fabric %s.") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def add_zones(self, add_zones_info, activate, active_zone_set=None): """Add zone configuration. This method will add the zone configuration passed by user. :param add_zones_info: Zone names mapped to members. Zone members are colon separated but case-insensitive .. code-block:: python { zonename1:[zonememeber1,zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } :param activate: True will activate the zone config. :param active_zone_set: Active zone set dict retrieved from get_active_zone_set method :raises BrocadeZoningHttpException: """ LOG.debug("Add zones - zones passed: %(zones)s.", {'zones': add_zones_info}) cfg_name = zone_constant.CFG_NAME cfgs = self.cfgs zones = self.zones alias = self.alias qlps = self.qlps ifas = self.ifas active_cfg = self.active_cfg # update the active_cfg, zones and cfgs map with new information zones, cfgs, active_cfg = self.add_zones_cfgs(cfgs, zones, add_zones_info, active_cfg, cfg_name) # Build the zonestring with updated maps data = self.form_zone_string(cfgs, active_cfg, zones, alias, qlps, ifas, activate) LOG.debug("Add zones: final zone string after applying " "to the switch: %(zonestring)s", {'zonestring': data}) # Post the zone data to the switch error_code, error_msg = self.post_zone_data(data) if error_code != "0": msg = (_("Applying the zones and cfgs to the switch failed " "(error code=%(err_code)s error msg=%(err_msg)s.") % {'err_code': error_code, 'err_msg': error_msg}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def update_zones(self, zone_info, activate, operation, active_zone_set=None): """Update zone configuration. This method will update the zone configuration passed by user. :param zone_info: Zone names mapped to members. Zone members are colon separated but case-insensitive .. code-block:: python { zonename1:[zonememeber1,zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } :param activate: True will activate the zone config. :param operation: ZONE_ADD or ZONE_REMOVE :param active_zone_set: Active zone set dict retrieved from get_active_zone_set method :raises BrocadeZoningHttpException: """ LOG.debug("Update zones - zones passed: %(zones)s.", {'zones': zone_info}) cfgs = self.cfgs zones = self.zones alias = self.alias qlps = self.qlps ifas = self.ifas active_cfg = self.active_cfg # update the zones with new information zones = self._update_zones(zones, zone_info, operation) # Build the zonestring with updated maps data = self.form_zone_string(cfgs, active_cfg, zones, alias, qlps, ifas, activate) LOG.debug("Update zones: final zone string after applying " "to the switch: %(zonestring)s", {'zonestring': data}) # Post the zone data to the switch error_code, error_msg = self.post_zone_data(data) if error_code != "0": msg = (_("Applying the zones and cfgs to the switch failed " "(error code=%(err_code)s error msg=%(err_msg)s.") % {'err_code': error_code, 'err_msg': error_msg}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def form_zone_string(self, cfgs, active_cfg, zones, alias, qlps, ifas, activate): """Build the zone string in the required format. :param cfgs: cfgs map :param active_cfg: Active cfg string :param zones: zones map :param alias: alias map :param qlps: qlps map :param ifas: ifas map :param activate: True will activate config. :returns: zonestring in the required format :raises BrocadeZoningHttpException: """ try: zoneString = zone_constant.ZONE_STRING_PREFIX # based on the activate save only will be changed saveonly = "false" if activate is True else "true" # Form the zone string based on the dictionary of each items for cfg in sorted(cfgs.keys()): zoneString += (zone_constant.CFG_DELIM + cfg + " " + cfgs.get(cfg) + " ") for zone in sorted(zones.keys()): zoneString += (zone_constant.ZONE_DELIM + zone + " " + zones.get(zone) + " ") for al in sorted(alias.keys()): zoneString += (zone_constant.ALIAS_DELIM + al + " " + alias.get(al) + " ") for qlp in sorted(qlps.keys()): zoneString += (zone_constant.QLP_DELIM + qlp + " " + qlps.get(qlp) + " ") for ifa in sorted(ifas.keys()): zoneString += (zone_constant.IFA_DELIM + ifa + " " + ifas.get(ifa) + " ") # append the active_cfg string only if it is not null and activate # is true if active_cfg != "" and activate: zoneString += (zone_constant.ACTIVE_CFG_DELIM + active_cfg + " null ") # Build the final zone string zoneString += zone_constant.ZONE_END_DELIM + saveonly except Exception as e: msg = (_("Exception while forming the zone string: %s.") % str(e)) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) # Reconstruct the zoneString to type base string for OpenSSL return encodeutils.safe_encode(zoneString) def add_zones_cfgs(self, cfgs, zones, add_zones_info, active_cfg, cfg_name): """Add the zones and cfgs map based on the new zones info. This method will return the updated zones,cfgs and active_cfg :param cfgs: Existing cfgs map :param active_cfg: Existing Active cfg string :param zones: Existing zones map :param add_zones_info: Zones map to add :param active_cfg: Existing active cfg :param cfg_name: New cfg name :returns: updated zones, zone configs map, and active_cfg """ cfg_string = "" delimiter = "" zones_in_active_cfg = "" try: if active_cfg: zones_in_active_cfg = cfgs.get(active_cfg) for zone_name, members in add_zones_info.items(): # if new zone is not active_cfg, build the cfg string with the # new zones if zone_name not in zones_in_active_cfg: cfg_string += delimiter + zone_name delimiter = ";" # add a new zone with the members zones.update({zone_name: ";".join(members)}) # update cfg string if active_cfg: if cfg_string: # update the existing active cfg map with cfgs string cfgs.update( {active_cfg: cfg_string + ";" + cfgs.get(active_cfg)}) else: # create new cfg and update that cfgs map with the new cfg active_cfg = cfg_name cfgs.update({cfg_name: cfg_string}) except Exception as e: msg = (_("Error while updating the new zones and cfgs " "in the zone string. Error %(description)s.") % {'description': str(e)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return zones, cfgs, active_cfg def _update_zones(self, zones, updated_zones_info, operation): """Update the zones based on the updated zones info. This method will return the updated zones :param zones: Existing zones map :param updated_zones_info: Zones map to update :param operation: ZONE_ADD or ZONE_REMOVE :returns: updated zones """ try: for zone_name in updated_zones_info: members = updated_zones_info[zone_name] # update the zone string # if zone name already exists and dont have the new members # already current_members = zones.get(zone_name).split(";") if operation == zone_constant.ZONE_ADD: new_members = set(members).difference(set(current_members)) if new_members: # update the existing zone with new members zones.update({zone_name: (";".join(new_members) + ";" + zones.get(zone_name))}) else: new_members = set(current_members).difference(set(members)) if new_members: zones.pop(zone_name) zones.update({zone_name: ";".join(new_members)}) except Exception as e: msg = (_("Error while updating the zones " "in the zone string. Error %(description)s.") % {'description': str(e)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return zones def is_vf_enabled(self): """To check whether VF is enabled or not. :returns: boolean to indicate VF enabled and session information """ session_info = self.get_session_info() parsed_data = self.get_parsed_data(session_info, zone_constant.SESSION_BEGIN, zone_constant.SESSION_END) try: is_vf_enabled = bool(self.get_nvp_value( parsed_data, zone_constant.VF_ENABLED)) except b_exception.BrocadeZoningHttpException: is_vf_enabled = False parsed_data = None return is_vf_enabled, parsed_data def get_nameserver_info(self): """Get name server data from fabric. Return the connected node port wwn list(local and remote) for the given switch fabric. :returns: name server information. """ nsinfo = [] headers = {zone_constant.AUTH_HEADER: self.auth_header} response = self.connect(zone_constant.GET_METHOD, zone_constant.NS_PAGE, header=headers) # GET request to nsinfo.html for line in response.splitlines(): if line.startswith(zone_constant.NS_DELIM): nsinfo.append(line.split('=')[-1]) return nsinfo def delete_zones_cfgs( self, cfgs, zones, delete_zones_info, active_cfg): """Delete the zones and cfgs map based on the new zones info. Return the updated zones, cfgs and active_cfg after deleting the required items. :param cfgs: Existing cfgs map :param active_cfg: Existing Active cfg string :param zones: Existing zones map :param delete_zones_info: Zones map to add :param active_cfg: Existing active cfg :returns: updated zones, zone config sets, and active zone config :raises BrocadeZoningHttpException: """ try: delete_zones_info = delete_zones_info.split(";") for zone in delete_zones_info: # remove the zones from the zone map zones.pop(zone) # iterated all the cfgs, but need to check since in SSH only # active cfg is iterated for k, v in list(cfgs.items()): v = v.split(";") if zone in v: # remove the zone from the cfg string v.remove(zone) # if all the zones are removed, remove the cfg from the # cfg map if not v: cfgs.pop(k) # update the original cfg with the updated string else: cfgs[k] = ";".join(v) # if all the zones are removed in the active_cfg, update it with # empty string if active_cfg not in cfgs: active_cfg = "" except KeyError as e: msg = (_("Error while removing the zones and cfgs " "in the zone string: %(description)s.") % {'description': str(e)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) return zones, cfgs, active_cfg def delete_zones(self, delete_zones_info, activate, active_zone_set=None): """Delete zones from fabric. Deletes zones in the active zone config. :param zone_names: zoneNames separated by semicolon :param activate: True/False :param active_zone_set: the active zone set dict retrieved from get_active_zone_set method """ cfgs = self.cfgs zones = self.zones alias = self.alias qlps = self.qlps ifas = self.ifas active_cfg = self.active_cfg # update the active_cfg, zones and cfgs map with required information # being removed zones, cfgs, active_cfg = self.delete_zones_cfgs( cfgs, zones, delete_zones_info, active_cfg) # Build the zonestring with updated maps data = self.form_zone_string(cfgs, active_cfg, zones, alias, qlps, ifas, activate) LOG.debug("Delete zones: final zone string after applying " "to the switch: %(zonestring)s", {'zonestring': data}) error_code, error_msg = self.post_zone_data(data) if error_code != "0": msg = (_("Applying the zones and cfgs to the switch failed " "(error code=%(err_code)s error msg=%(err_msg)s.") % {'err_code': error_code, 'err_msg': error_msg}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def post_zone_data(self, data): """Send POST request to the switch with the payload. :param data: payload to be sent to switch """ status = "progress" parsed_data_txn = "" headers = {zone_constant.AUTH_HEADER: self.auth_header} LOG.debug("Requesting the switch with posting the zone string.") # POST request to gzoneinfo with zonestring as payload response = self.connect(zone_constant.POST_METHOD, zone_constant.ZONE_PAGE, data, headers) parsed_data = self.get_parsed_data(response, zone_constant.ZONE_TX_BEGIN, zone_constant.ZONE_TX_END) transID = self.get_nvp_value(parsed_data, zone_constant.ZONE_TX_ID) transURL = zone_constant.ZONE_TRAN_STATUS.format(txnId=transID) timeout = 360 sleep_time = 3 time_elapsed = 0 while status != "done": txn_response = self.connect( zone_constant.GET_METHOD, transURL, "", headers) parsed_data_txn = self.get_parsed_data(txn_response, zone_constant.ZONE_TX_BEGIN, zone_constant.ZONE_TX_END) status = self.get_nvp_value(parsed_data_txn, zone_constant.ZONE_TX_STATUS) time.sleep(sleep_time) time_elapsed += sleep_time if time_elapsed > timeout: break if status != "done": errorCode = -1 errorMessage = ("Timed out, waiting for zone transaction on " "the switch to complete") else: errorCode = self.get_nvp_value(parsed_data_txn, zone_constant.ZONE_ERROR_CODE) errorMessage = self.get_nvp_value(parsed_data_txn, zone_constant.ZONE_ERROR_MSG) return errorCode, errorMessage def check_change_vf_context(self): """Check whether VF related configurations is valid and proceed.""" vf_enabled, session_data = self.is_vf_enabled() # VF enabled will be false if vf is disable or not supported LOG.debug("VF enabled on switch: %(vfenabled)s.", {'vfenabled': vf_enabled}) # Change the VF context in the session if vf_enabled: if self.vfid is None: msg = _("No VF ID is defined in the configuration file.") LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) elif self.vfid != 128: self.change_vf_context(self.vfid, session_data) else: if self.vfid is not None: msg = _("VF is not enabled.") LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def _disconnect(self): """Disconnect from the switch using HTTP/HTTPS protocol. :raises BrocadeZoningHttpException: """ try: headers = {zone_constant.AUTH_HEADER: self.auth_header} response = self.connect(zone_constant.GET_METHOD, zone_constant.LOGOUT_PAGE, header=headers) return response except requests.exceptions.ConnectionError as e: msg = (_("Error while connecting the switch %(switch_id)s " "with protocol %(protocol)s. Error: %(error)s.") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'error': str(e)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) except b_exception.BrocadeZoningHttpException as ex: msg = (_("Unexpected status code from the switch %(switch_id)s " "with protocol %(protocol)s for url %(page)s. " "Error: %(error)s") % {'switch_id': self.switch_ip, 'protocol': self.protocol, 'page': zone_constant.LOGOUT_PAGE, 'error': str(ex)}) LOG.error(msg) raise b_exception.BrocadeZoningHttpException(reason=msg) def cleanup(self): """Close session.""" self._disconnect() self.session.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/brcd_rest_fc_zone_client.py0000664000175000017500000004361000000000000027304 0ustar00zuulzuul00000000000000# (c) Copyright 2019 Brocade, a Broadcom Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Brocade south bound connector to communicate with switch using REST over HTTP or HTTPS protocol. """ import json from oslo_log import log as logging from oslo_serialization import base64 import requests from cinder.i18n import _ from cinder.zonemanager.drivers.brocade import exception from cinder.zonemanager.drivers.brocade import fc_zone_constants from cinder.zonemanager.drivers.brocade import rest_constants LOG = logging.getLogger(__name__) class BrcdRestFCZoneClient(object): def __init__(self, ipaddress, username, password, port, vfid, protocol): """Initializing the client with the parameters passed. :param ipaddress: IP Address of the device. :param username: User id to login. :param password: User password. :param port: Device Communication port :param vfid: Virtual Fabric ID. :param protocol: Communication Protocol. """ self.sw_ip = ipaddress self.sw_user = username self.sw_pwd = password self.protocol = protocol self.vfid = vfid self.status_code = '' self.session = None self._login() def is_supported_firmware(self): is_supported_firmware = False fw_version = self._get_firmware_version() ver = fw_version.split(".") if len(ver[0]) > 1: major_ver = ver[0] ver[0] = major_ver[1] if len(ver[2]) > 1: patch_ver = ver[2] ver[2] = patch_ver[0] LOG.debug("Firmware version: %(version)s.", {'version': ver}) if int(ver[0] + ver[1] + ver[2]) > 820: is_supported_firmware = True return is_supported_firmware def get_active_zone_set(self): active_zone_set, checksum = self._get_effective_zone_set() return active_zone_set def get_nameserver_info(self): return self._get_name_server() def add_zones(self, add_zone_map, activate, active_zone_set=None): self._add_zones(add_zone_map, activate) def update_zones(self, update_zone_map, activate, operation, active_zone_set=None): self._update_zones(update_zone_map, activate, operation) def delete_zones(self, zone_names_to_delete, activate, active_zone_set=None): self._delete_zones(zone_names_to_delete, activate) def cleanup(self): self._logout() def _login(self): if self.protocol == fc_zone_constants.REST_HTTPS: self.protocol = fc_zone_constants.HTTPS else: self.protocol = fc_zone_constants.HTTP if self.session is None: self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=1, pool_maxsize=1) self.session.mount(self.protocol + '://', adapter) credentials = base64.encode_as_text('%s:%s' % (self.sw_user, self.sw_pwd)).replace('\n', '') self.session.headers = {rest_constants.USER_AGENT: rest_constants.ZONE_DRIVER, rest_constants.ACCEPT: rest_constants.YANG, rest_constants.AUTHORIZATION: "Basic %s" % credentials} response = self.session.post(self._build_url(rest_constants.LOGIN)) if response.status_code == 200: auth = response.headers.get('Authorization') LOG.info("REST login success, setting auth: %s", auth) self.session.headers = {rest_constants.USER_AGENT: rest_constants.ZONE_DRIVER, rest_constants.ACCEPT: rest_constants.YANG, rest_constants.CONTENT_TYPE: rest_constants.YANG, rest_constants.AUTHORIZATION: auth} else: msg = (_("REST login failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) return response.status_code def _logout(self): response = self.session.post(self._build_url(rest_constants.LOGOUT)) if response.status_code == 204: LOG.info("REST logout success") else: msg = (_("REST logout failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) def _get_firmware_version(self): response = self.session.get(self._build_url(rest_constants.GET_SWITCH)) firmware_version = '' if response.status_code == 200: data = response.json() json_response = data[rest_constants.RESPONSE] switch = json_response[rest_constants.SWITCH] firmware_version = switch[rest_constants.FIRMWARE_VERSION] LOG.info("REST firmware version: %s", firmware_version) else: msg = (_("REST get switch fw version failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) return firmware_version def _get_name_server(self): port_names = [] url = self._build_url(rest_constants.GET_NAMESERVER) response = self.session.get(url) if response.status_code == 200: data = response.json() json_response = data[rest_constants.RESPONSE] nsinfos = json_response[rest_constants.FC_NAME_SERVER] i = 0 for nsinfo in nsinfos: port_names.append(nsinfos[i][rest_constants.PORT_NAME]) i = i + 1 else: msg = (_("REST get NS info failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) return port_names def _get_effective_zone_set(self): active_zone_set = {} zones_map = {} url = self._build_url(rest_constants.GET_ACTIVE_ZONE_CFG) response = self.session.get(url) checksum = '' active_cfg_name = '' if response.status_code == 200: data = response.json() json_response = data[rest_constants.RESPONSE] effective_cfg = json_response[rest_constants.EFFECTIVE_CFG] checksum = effective_cfg[rest_constants.CHECKSUM] try: active_cfg_name = effective_cfg[rest_constants.CFG_NAME] zones = effective_cfg[rest_constants.ENABLED_ZONE] if type(zones) is list: for i, zone in enumerate(zones): zones_map.update({zones[i][rest_constants.ZONE_NAME]: zones[i][rest_constants.MEMBER_ENTRY] [rest_constants.ENTRY_NAME]}) else: zones_map.update({zones[rest_constants.ZONE_NAME]: zones[rest_constants.MEMBER_ENTRY] [rest_constants.ENTRY_NAME]}) except Exception: active_cfg_name = '' LOG.info("REST get effective zoneset success: " "active cfg: %(cfg_name)s, checksum: %(chksum)s", {'cfg_name': active_cfg_name, 'chksum': checksum}) else: msg = (_("REST get effective zoneset failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) active_zone_set = {"active_zone_config": active_cfg_name, "zones": zones_map} return active_zone_set, checksum def _add_zones(self, add_zone_map, activate): active_zone_set, checksum = self._get_effective_zone_set() # if activate, get the zones already configured in the active cfg if activate: zones_in_active_cfg = active_zone_set.get("zones") # for each new zone, create a zone entry in defined zone db for zone_name, members in add_zone_map.items(): if zone_name not in zones_in_active_cfg: body = {rest_constants.MEMBER_ENTRY: {rest_constants.ENTRY_NAME: add_zone_map.get(zone_name)}} json_str = json.dumps(body) url = self._build_url(rest_constants.POST_ZONE + zone_name) response = self.session.post(url, data=json_str) if response.status_code == 201: LOG.info("REST create zone success: %s", zone_name) else: msg = (_("REST create zone failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # update the cfg with the new zones active_cfg_name = active_zone_set.get("active_zone_config") active_zones = active_zone_set.get("zones") active_zone_names = active_zones.keys() active_zone_names.extend(add_zone_map.keys()) body = {rest_constants.MEMBER_ZONE: {rest_constants.ZONE_NAME: active_zone_names}} json_str = json.dumps(body) if active_cfg_name == '': active_cfg_name = fc_zone_constants.CFG_NAME url = self._build_url(rest_constants.POST_CFG + active_cfg_name) response = self.session.post(url, data=json_str) if response.status_code == 201: LOG.info("REST cfg create success: %s", active_cfg_name) self._save_and_activate_cfg(checksum, activate, active_cfg_name) else: msg = (_("REST cfg create failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) else: url = self._build_url(rest_constants.PATCH_CFG + active_cfg_name) response = self.session.patch(url, data=json_str) # if update successful, save the configuration changes if response.status_code == 204: LOG.info("REST cfg update success: %s", active_cfg_name) self._save_and_activate_cfg(checksum, activate, active_cfg_name) else: msg = (_("REST cfg update failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) def _update_zones(self, update_zone_map, activate, operation): active_zone_set, checksum = self._get_effective_zone_set() active_cfg_name = active_zone_set.get("active_zone_config") active_zones = active_zone_set.get("zones") # for each zone, update the zone members in defined zone db for zone_name, members in update_zone_map.items(): current_members = active_zones.get(zone_name) if operation == "ADD": new_members = set(members).difference(set(current_members)) if new_members: update_zone_map.update({zone_name: new_members}) elif operation == "REMOVE": new_members = set(current_members).difference(set(members)) if new_members: update_zone_map.update({zone_name: new_members}) # for each zone to be updated, make REST PATCH call to update for zone in update_zone_map.keys(): body = {rest_constants.MEMBER_ENTRY: {rest_constants.ENTRY_NAME: update_zone_map.get(zone)}} json_str = json.dumps(body) url = self._build_url(rest_constants.POST_ZONE + zone) response = self.session.patch(url, data=json_str) if response.status_code == 204: LOG.info("REST zone update success: %s", zone) else: msg = (_("REST zone update failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # save and activate the config changes self._save_and_activate_cfg(checksum, activate, active_cfg_name) def _delete_zones(self, zone_names_to_delete, activate): zone_names_to_delete = zone_names_to_delete.split(";") active_zone_set, checksum = self._get_effective_zone_set() # for each zone name, make REST DELETE call for zone in zone_names_to_delete: url = self._build_url(rest_constants.DELETE_ZONE + zone) response = self.session.delete(url) if response.status_code == 204: LOG.info("REST delete zone success: %s", zone) else: msg = (_("REST delete zone failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # update the cfg removing the deleted zones active_cfg_name = active_zone_set.get("active_zone_config") active_zones = active_zone_set.get("zones") active_zone_names = active_zones.keys() if len(active_zone_names) == len(zone_names_to_delete): # disable the cfg url = self._build_url(rest_constants.PATCH_CFG_DISABLE) body = {"checksum": checksum} json_str = json.dumps(body) response = self.session.patch(url, data=json_str) if response.status_code == 204: LOG.info("REST cfg disable success") else: msg = (_("REST cfg disable failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # delete the cfg url = self._build_url(rest_constants.DELETE_CFG + active_cfg_name) response = self.session.delete(url) if response.status_code == 204: LOG.info("REST cfg delete success: %s", active_cfg_name) else: msg = (_("REST cfg delete failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) checksum = self._get_checksum() self._save_and_activate_cfg(checksum, False, active_cfg_name) else: # update the cfg by removing the deleted zones zone_names_in_cfg = list(set(active_zone_names) .difference(set(zone_names_to_delete))) body = {rest_constants.MEMBER_ZONE: {rest_constants.ZONE_NAME: zone_names_in_cfg}} json_str = json.dumps(body) url = self._build_url(rest_constants.PATCH_CFG + active_cfg_name) response = self.session.patch(url, data=json_str) # if update successful, save the configuration changes if response.status_code == 204: LOG.info("REST cfg update success: %s", active_cfg_name) self._save_and_activate_cfg(checksum, activate, active_cfg_name) else: msg = (_("REST cfg update failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) def _save_and_activate_cfg(self, checksum, activate, active_cfg_name): body = {"checksum": checksum} json_str = json.dumps(body) url = self._build_url(rest_constants.PATCH_CFG_SAVE) response = self.session.patch(url, data=json_str) if response.status_code == 204: LOG.info("REST cfg save success") else: msg = (_("REST cfg save failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) # if activate=true, then enable the cfg changes to effective cfg if activate: checksum = self._get_checksum() body = {"checksum": checksum} json_str = json.dumps(body) url = self._build_url(rest_constants.PATCH_CFG_ENABLE + active_cfg_name) response = self.session.patch(url, data=json_str) if response.status_code == 204: LOG.info("REST cfg activate success: %s", active_cfg_name) else: msg = (_("REST cfg activate failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) def _get_checksum(self): url = self._build_url(rest_constants.GET_CHECKSUM) response = self.session.get(url) checksum = '' if response.status_code == 200: data = response.json() json_response = data[rest_constants.RESPONSE] effective_cfg = json_response[rest_constants.EFFECTIVE_CFG] checksum = effective_cfg[rest_constants.CHECKSUM] LOG.info("REST get checksum success: %s", checksum) else: msg = (_("REST get checksum failed: %s") % response.text) LOG.error(msg) raise exception.BrocadeZoningRestException(reason=msg) return checksum def _build_url(self, path): url = '%s://%s%s' % (self.protocol, self.sw_ip, path) if self.vfid is not None: url = '%s?vf-id=%s' % (url, self.vfid) return url ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/exception.py0000664000175000017500000000200500000000000024263 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import exception from cinder.i18n import _ class BrocadeZoningCliException(exception.CinderException): message = _("Brocade Fibre Channel Zoning CLI error: %(reason)s") class BrocadeZoningHttpException(exception.CinderException): message = _("Brocade Fibre Channel Zoning HTTP error: %(reason)s") class BrocadeZoningRestException(exception.CinderException): message = _("Brocade Fibre Channel Zoning REST error: %(reason)s") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/fc_zone_constants.py0000664000175000017500000000616400000000000026016 0ustar00zuulzuul00000000000000# (c) Copyright 2019 Brocade, a Broadcom Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Common constants used by Brocade FC Zone Driver. """ YES = 'y' ACTIVE_ZONE_CONFIG = 'active_zone_config' CFG_ZONESET = 'cfg:' CFG_ZONES = 'zones' OPENSTACK_CFG_NAME = 'OpenStack_Cfg' SUCCESS = 'Success' TRANS_ABORTABLE = 'It is abortable' """ CLI Commands for FC zoning operations. """ GET_ACTIVE_ZONE_CFG = 'cfgactvshow' ZONE_CREATE = 'zonecreate ' ZONESET_CREATE = 'cfgcreate ' CFG_SAVE = 'cfgsave' CFG_ADD = 'cfgadd ' ACTIVATE_ZONESET = 'cfgenable ' DEACTIVATE_ZONESET = 'cfgdisable' CFG_DELETE = 'cfgdelete ' CFG_REMOVE = 'cfgremove ' ZONE_DELETE = 'zonedelete ' ZONE_ADD = 'zoneadd ' ZONE_REMOVE = 'zoneremove ' CFG_SHOW_TRANS = 'cfgtransshow' CFG_ZONE_TRANS_ABORT = 'cfgtransabort' NS_SHOW = 'nsshow' NS_CAM_SHOW = 'nscamshow' """ HTTPS connector constants """ AUTH_HEADER = "Authorization" PROTOCOL_HTTPS = "HTTPS" STATUS_OK = 200 SECINFO_PAGE = "/secinfo.html" AUTHEN_PAGE = "/authenticate.html" GET_METHOD = "GET" POST_METHOD = "POST" SECINFO_BEGIN = "--BEGIN SECINFO" SECINFO_END = "--END SECINFO" RANDOM = "RANDOM" AUTHVERSION = "AUTHVERSION" IDENTIFIER = "Identifier" AUTH_STRING = "Custom_Basic " # Trailing space is required, do not remove AUTHEN_BEGIN = "--BEGIN AUTHENTICATE" AUTHEN_END = "--END AUTHENTICATE" AUTHENTICATED = "authenticated" SESSION_PAGE_ACTION = "/session.html?action=query" SESSION_BEGIN = "--BEGIN SESSION" SESSION_END = "--END SESSION" SESSION_PAGE = "/session.html" LOGOUT_PAGE = "/logout.html" ZONEINFO_BEGIN = "--BEGIN ZONE INFO" ZONEINFO_END = "--END ZONE INFO" SWITCH_PAGE = "/switch.html" SWITCHINFO_BEGIN = "--BEGIN SWITCH INFORMATION" SWITCHINFO_END = "--END SWITCH INFORMATION" FIRMWARE_VERSION = "swFWVersion" VF_ENABLED = "vfEnabled" MANAGEABLE_VF = "manageableLFList" CHANGE_VF = ("Session=--BEGIN SESSION\n\taction=apply\n\tLFId= {vfid} " "\b\t--END SESSION") ZONE_TRAN_STATUS = "/gzoneinfo.htm?txnId={txnId}" CFG_DELIM = "\x01" ZONE_DELIM = "\x02" ALIAS_DELIM = "\x03" QLP_DELIM = "\x04" ZONE_END_DELIM = "\x05&saveonly=" IFA_DELIM = "\x06" ACTIVE_CFG_DELIM = "\x07" DEFAULT_CFG = "d__efault__Cfg" NS_PAGE = "/nsinfo.htm?format=1&type=all" NS_DELIM = "deviceport.portwwn=" ZONE_TX_BEGIN = "--BEGIN ZONE_TXN_INFO" ZONE_TX_END = "--END ZONE_TXN_INFO" ZONE_ERROR_CODE = "errorCode" ZONE_PAGE = "/gzoneinfo.htm" CFG_NAME = "openstack_cfg" ZONE_STRING_PREFIX = "zonecfginfo=" ZONE_ERROR_MSG = "errorMessage" ZONE_TX_ID = "txnId" ZONE_TX_STATUS = "status" SESSION_LF_ID = "sessionLFId" HTTP = "http" HTTPS = "https" REST_HTTP = "rest_http" REST_HTTPS = "rest_https" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/brocade/rest_constants.py0000664000175000017500000000420500000000000025342 0ustar00zuulzuul00000000000000# (c) Copyright 2019 Brocade, a Broadcom Company # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # YANG = "application/yang-data+json" ACCEPT = "Accept" CONTENT_TYPE = "Content-Type" AUTHORIZATION = "Authorization" USER_AGENT = "User-Agent" ZONE_DRIVER = "OpenStack Zone Driver" LOGIN = "/rest/login" LOGOUT = "/rest/logout" NAME_SERVER = "/rest/running/brocade-name-server" ZONING = "/rest/running/zoning" DEFINED_CFG = "/defined-configuration" EFFECTIVE_CFG = "/effective-configuration" GET_SWITCH = "/rest/running/switch/fibrechannel-switch" GET_NAMESERVER = NAME_SERVER + "/fibrechannel-name-server" GET_DEFINED_ZONE_CFG = ZONING + DEFINED_CFG GET_ACTIVE_ZONE_CFG = ZONING + EFFECTIVE_CFG GET_CHECKSUM = ZONING + EFFECTIVE_CFG + "/checksum" POST_ZONE = ZONING + DEFINED_CFG + "/zone/zone-name/" POST_CFG = ZONING + DEFINED_CFG + "/cfg/cfg-name/" PATCH_CFG = ZONING + DEFINED_CFG + "/cfg/cfg-name/" PATCH_CFG_SAVE = ZONING + EFFECTIVE_CFG + "/cfg-action/1" PATCH_CFG_DISABLE = ZONING + EFFECTIVE_CFG + "/cfg-action/2" PATCH_CFG_ENABLE = ZONING + EFFECTIVE_CFG + "/cfg-name/" DELETE_ZONE = POST_ZONE DELETE_CFG = POST_CFG RESPONSE = "Response" SWITCH = "fibrechannel-switch" FIRMWARE_VERSION = "firmware-version" FC_NAME_SERVER = "fibrechannel-name-server" PORT_NAME = "port-name" DEFINED_CFG = "defined-configuration" CFG = "cfg" CFG_NAME = "cfg-name" MEMBER_ZONE = "member-zone" ZONE_NAME = "zone-name" ZONE = "zone" MEMBER_ENTRY = "member-entry" ENTRY_NAME = "entry-name" ALIAS = "alias" ALIAS_ENTRY_NAME = "alias-entry-name" EFFECTIVE_CFG = "effective-configuration" CHECKSUM = "checksum" ENABLED_ZONE = "enabled-zone" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4191215 cinder-27.0.0/cinder/zonemanager/drivers/cisco/0000775000175000017500000000000000000000000021417 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/cisco/__init__.py0000664000175000017500000000000000000000000023516 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/cisco/cisco_fabric_opts.py0000664000175000017500000000367200000000000025454 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_config import cfg from cinder.volume import configuration cisco_zone_opts = [ cfg.StrOpt('cisco_fc_fabric_address', default='', help='Management IP of fabric'), cfg.StrOpt('cisco_fc_fabric_user', default='', help='Fabric user ID'), cfg.StrOpt('cisco_fc_fabric_password', default='', help='Password for user', secret=True), cfg.PortOpt('cisco_fc_fabric_port', default=22, help='Connecting port'), cfg.StrOpt('cisco_zoning_policy', default='initiator-target', help='overridden zoning policy'), cfg.BoolOpt('cisco_zone_activate', default=True, help='overridden zoning activation state'), cfg.StrOpt('cisco_zone_name_prefix', help='overridden zone name prefix'), cfg.StrOpt('cisco_zoning_vsan', help='VSAN of the Fabric'), ] CONF = cfg.CONF CONF.register_opts(cisco_zone_opts, group='CISCO_FABRIC_EXAMPLE') def load_fabric_configurations(fabric_names): fabric_configs = {} for fabric_name in fabric_names: config = configuration.Configuration(cisco_zone_opts, fabric_name) fabric_configs[fabric_name] = config return fabric_configs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py0000664000175000017500000003540600000000000027523 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import random from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder import ssh_utils from cinder import utils from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts from cinder.zonemanager.drivers.cisco import exception as c_exception import cinder.zonemanager.drivers.cisco.fc_zone_constants as zone_constant from cinder.zonemanager import fc_san_lookup_service as fc_service from cinder.zonemanager import utils as zm_utils LOG = logging.getLogger(__name__) class CiscoFCSanLookupService(fc_service.FCSanLookupService): """The SAN lookup service that talks to Cisco switches. Version History: 1.0.0 - Initial version """ VERSION = "1.0.0" def __init__(self, **kwargs): """Initializing the client.""" super(CiscoFCSanLookupService, self).__init__(**kwargs) self.configuration = kwargs.get('configuration', None) self.create_configuration() self.switch_user = "" self.switch_port = "" self.switch_pwd = "" self.switch_ip = "" self.sshpool = {} def create_configuration(self): """Configuration specific to SAN context values.""" config = self.configuration fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')] LOG.debug('Fabric Names: %s', fabric_names) # There can be more than one SAN in the network and we need to # get credentials for each for SAN context lookup later. # Cisco Zonesets require VSANs if fabric_names: self.fabric_configs = fabric_opts.load_fabric_configurations( fabric_names) def get_device_mapping_from_network(self, initiator_wwn_list, target_wwn_list): """Provides the initiator/target map for available SAN contexts. Looks up fcns database of each fc SAN configured to find logged in devices and returns a map of initiator and target port WWNs for each fabric. :param initiator_wwn_list: List of initiator port WWN :param target_wwn_list: List of target port WWN :returns: List -- device wwn map in following format .. code-block:: python { : { 'initiator_port_wwn_list': ('200000051e55a100', '200000051e55a121'..) 'target_port_wwn_list': ('100000051e55a100', '100000051e55a121'..) } } :raises Exception: when connection to fabric is failed """ device_map = {} formatted_target_list = [] formatted_initiator_list = [] fabric_map = {} fabric_names = self.configuration.fc_fabric_names if not fabric_names: raise exception.InvalidParameterValue( err=_("Missing Fibre Channel SAN configuration " "param - fc_fabric_names")) fabrics = [x.strip() for x in fabric_names.split(',')] LOG.debug("FC Fabric List: %s", fabrics) if fabrics: for t in target_wwn_list: formatted_target_list.append(zm_utils.get_formatted_wwn(t)) for i in initiator_wwn_list: formatted_initiator_list.append(zm_utils.get_formatted_wwn(i)) for fabric_name in fabrics: self.switch_ip = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_address') self.switch_user = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_user') self.switch_pwd = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_password') self.switch_port = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_port') zoning_vsan = self.fabric_configs[fabric_name].safe_get( 'cisco_zoning_vsan') # Get name server data from fabric and find the targets # logged in nsinfo = '' LOG.debug("show fcns database for vsan %s", zoning_vsan) nsinfo = self.get_nameserver_info(zoning_vsan) self.cleanup() LOG.debug("Lookup service:fcnsdatabase-%s", nsinfo) LOG.debug("Lookup service:initiator list from caller-%s", formatted_initiator_list) LOG.debug("Lookup service:target list from caller-%s", formatted_target_list) visible_targets = [x for x in nsinfo if x in formatted_target_list] visible_initiators = [x for x in nsinfo if x in formatted_initiator_list] if visible_targets: LOG.debug("Filtered targets is: %s", visible_targets) # getting rid of the : before returning for idx, elem in enumerate(visible_targets): elem = str(elem).replace(':', '') visible_targets[idx] = elem else: LOG.debug("No targets are in the fcns database" " for vsan %s", zoning_vsan) if visible_initiators: # getting rid of the : before returning ~sk for idx, elem in enumerate(visible_initiators): elem = str(elem).replace(':', '') visible_initiators[idx] = elem else: LOG.debug("No initiators are in the fcns database" " for vsan %s", zoning_vsan) fabric_map = {'initiator_port_wwn_list': visible_initiators, 'target_port_wwn_list': visible_targets } device_map[fabric_name] = fabric_map LOG.debug("Device map for SAN context: %s", device_map) return device_map def get_nameserver_info(self, fabric_vsan): """Get fcns database info from fabric. This method will return the connected node port wwn list(local and remote) for the given switch fabric """ cli_output = None nsinfo_list = [] try: cmd = ([zone_constant.FCNS_SHOW, fabric_vsan, ' | no-more']) cli_output = self._get_switch_info(cmd) except exception.FCSanLookupServiceException: with excutils.save_and_reraise_exception(): LOG.error("Failed collecting show fcns database for fabric") if cli_output: nsinfo_list = self._parse_ns_output(cli_output) LOG.debug("Connector returning fcns info-%s", nsinfo_list) return nsinfo_list def _get_switch_info(self, cmd_list): stdout, stderr, sw_data = None, None, None try: stdout, stderr = self._run_ssh(cmd_list, True, 1) LOG.debug("CLI output from ssh - output: %s", stdout) if (stdout): sw_data = stdout.splitlines() return sw_data except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd_list, 'err': str(e)} LOG.error(msg) raise c_exception.CiscoZoningCliException(reason=msg) def _parse_ns_output(self, switch_data): """Parses name server data. Parses nameserver raw data and adds the device port wwns to the list :returns: list of device port wwn from ns info """ nsinfo_list = [] for line in switch_data: if " N " not in line: continue linesplit = line.split() if len(linesplit) > 2: node_port_wwn = linesplit[2] nsinfo_list.append(node_port_wwn) else: msg = _("Malformed fcns output string: %s") % line LOG.error(msg) raise exception.InvalidParameterValue(err=msg) return nsinfo_list def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): command = ' '.join(cmd_list) if self.sshpool.get(self.switch_ip) is None: self.sshpool[self.switch_ip] = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, min_size=1, max_size=5) last_exception = None try: with self.sshpool.get(self.switch_ip).item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: msg = _("Exception: %s") % str(e) LOG.error(msg) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Error running SSH command: %s", command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. Executes CLI commands where status return is expected. cmd_list is a list of commands, where each command is itself a list of parameters. We use utils.check_ssh_injection to check each command, but then join then with " ; " to form a single command. """ # Check that each command is secure for cmd in cmd_list: utils.check_ssh_injection(cmd) # Combine into a single command. command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list)) if self.sshpool.get(self.switch_ip) is None: self.sshpool[self.switch_ip] = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, min_size=1, max_size=5) stdin, stdout, stderr = None, None, None LOG.debug("Executing command via ssh: %s", command) last_exception = None try: with self.sshpool.get(self.switch_ip).item() as ssh: while attempts > 0: attempts -= 1 try: stdin, stdout, stderr = ssh.exec_command(command) greenthread.sleep(random.randint(20, 500) / 100.0) channel = stdout.channel exit_status = channel.recv_exit_status() LOG.debug("Exit Status from ssh:%s", exit_status) # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if check_exit_code and exit_status != 0: raise processutils.ProcessExecutionError( exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=command) else: return True else: return True except Exception as e: msg = _("Exception: %s") % str(e) LOG.error(msg) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) LOG.debug("Handling error case after SSH:%s", last_exception) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception as e: with excutils.save_and_reraise_exception(): msg = (_("Error executing command via ssh: %s") % str(e)) LOG.error(msg) finally: if stdin: stdin.flush() stdin.close() if stdout: stdout.close() if stderr: stderr.close() def cleanup(self): self.sshpool = {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py0000664000175000017500000004706000000000000026770 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Script to push the zone configuration to Cisco SAN switches. """ import random import re from eventlet import greenthread from oslo_concurrency import processutils from oslo_log import log as logging from oslo_utils import excutils from cinder import exception from cinder.i18n import _ from cinder import ssh_utils from cinder import utils from cinder.zonemanager.drivers.cisco import exception as c_exception import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant LOG = logging.getLogger(__name__) class CiscoFCZoneClientCLI(object): """Cisco FC zone client cli implementation. OpenStack Fibre Channel zone client cli connector to manage FC zoning in Cisco SAN fabrics. Version history: 1.0 - Initial Cisco FC zone client cli """ switch_ip = None switch_port = '22' switch_user = 'admin' switch_pwd = 'none' def __init__(self, ipaddress, username, password, port, vsan): """initializing the client.""" self.switch_ip = ipaddress self.switch_port = port self.switch_user = username self.switch_pwd = password self.fabric_vsan = vsan self.sshpool = None def get_active_zone_set(self): """Return the active zone configuration. Return active zoneset from fabric. When none of the configurations are active then it will return empty map. :returns: Map -- active zone set map in the following format .. code-block:: python { 'zones': {'openstack50060b0000c26604201900051ee8e329': ['50060b0000c26604', '201900051ee8e329'] }, 'active_zone_config': 'OpenStack_Cfg' } """ zone_set = {} zone = {} zone_member = None zone_name = None switch_data = None zone_set_name = None try: switch_data = self._get_switch_info( [ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan, ' | no-more']) except c_exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error("Failed getting active zone set " "from fabric %s", self.switch_ip) try: for line in switch_data: # Split on non-word characters, line_split = re.split(r'[\s\[\]]+', line) if ZoneConstant.CFG_ZONESET in line_split: # zoneset name [name] vsan [vsan] zone_set_name = \ line_split[line_split.index(ZoneConstant.CFG_ZONESET) + 2] continue if ZoneConstant.CFG_ZONE in line_split: # zone name [name] vsan [vsan] zone_name = \ line_split[line_split.index(ZoneConstant.CFG_ZONE) + 2] zone[zone_name] = list() continue if ZoneConstant.CFG_ZONE_MEMBER in line_split: # Examples: # pwwn c0:50:76:05:15:9f:00:12 # * fcid 0x1e01c0 [pwwn 50:05:07:68:02:20:48:04] [V7K_N1P2] zone_member = \ line_split[ line_split.index(ZoneConstant.CFG_ZONE_MEMBER) + 1] zone_member_list = zone.get(zone_name) zone_member_list.append(zone_member) zone_set[ZoneConstant.CFG_ZONES] = zone zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name except Exception as ex: # In case of parsing error here, it should be malformed cli output. msg = _("Malformed zone configuration: (switch=%(switch)s " "zone_config=%(zone_config)s)." ) % {'switch': self.switch_ip, 'zone_config': switch_data} LOG.error(msg) exc_msg = _("Exception: %s") % str(ex) LOG.error(exc_msg) raise exception.FCZoneDriverException(reason=msg) return zone_set def add_zones(self, zones, activate, fabric_vsan, active_zone_set, zone_status): """Add zone configuration. This method will add the zone configuration passed by user. :param zones: Zone names mapped to members and VSANs Zone members are colon separated but case-insensitive .. code-block:: python { zonename1:[zonememeber1,zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } :param activate: True will activate the zone config. :param fabric_vsan: :param active_zone_set: Active zone set dict retrieved from get_active_zone_set method :param zone_status: Status of the zone :raises CiscoZoningCliException: """ LOG.debug("Add Zones - Zones passed: %s", zones) LOG.debug("Active zone set: %s", active_zone_set) zone_list = active_zone_set[ZoneConstant.CFG_ZONES] LOG.debug("zone list: %s", zone_list) LOG.debug("zone status: %s", zone_status) cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] zone_cmds = [['conf'], ['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]] for zone in zones.keys(): zone_cmds.append(['zone', 'name', zone]) for member in zones[zone]: zone_cmds.append(['member', 'pwwn', member]) zone_cmds.append(['end']) try: LOG.debug("Add zones: Config cmd to run: %s", zone_cmds) self._ssh_execute(zone_cmds, True, 1) if activate: self.activate_zoneset(cfg_name, fabric_vsan, zone_status) self._cfg_save() except Exception as e: msg = _("Creating and activating zone set failed: " "(Zone set=%(zoneset)s error=%(err)s)." ) % {'zoneset': cfg_name, 'err': str(e)} LOG.error(msg) raise c_exception.CiscoZoningCliException(reason=msg) def update_zones(self, zones, activate, fabric_vsan, operation, active_zone_set, zone_status): """Update the zone configuration. This method will update the zone configuration passed by user. :param zones: zone names mapped to members. Zone members are colon separated but case-insensitive .. code-block:: python { zonename1:[zonememeber1, zonemember2,...], zonename2:[zonemember1, zonemember2,...]...} e.g: { 'openstack50060b0000c26604201900051ee8e329': ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'] } :param activate: True will activate the zone config. :param operation: zone add or zone remove :param fabric_vsan: Virtual San # :param active_zone_set: Active zone set dict retrieved from get_active_zone_set method :param zone_status: Status of the zone :raises CiscoZoningCliException: """ LOG.debug("Update Zones - Operation: %(op)s - Zones " "passed: %(zones)s", {'op': operation, 'zones': zones}) cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] zone_cmds = [['conf'], ['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]] zone_mod_cmd = [] if operation == ZoneConstant.ZONE_ADD: zone_mod_cmd = ['member', 'pwwn'] elif operation == ZoneConstant.ZONE_REMOVE: zone_mod_cmd = ['no', 'member', 'pwwn'] for zone, zone_members in zones.items(): zone_cmds.append(['zone', 'name', zone]) for member in zone_members: zone_cmds.append(zone_mod_cmd + [member]) zone_cmds.append(['end']) try: LOG.debug("Update zones: Config cmd to run: %s", zone_cmds) self._ssh_execute(zone_cmds, True, 1) if activate: self.activate_zoneset(cfg_name, fabric_vsan, zone_status) self._cfg_save() except Exception as e: msg = (_("Updating and activating zone set failed: " "(Zone set=%(zoneset)s error=%(err)s).") % {'zoneset': cfg_name, 'err': str(e)}) LOG.error(msg) raise c_exception.CiscoZoningCliException(reason=msg) def activate_zoneset(self, cfgname, fabric_vsan, zone_status): """Method to Activate the zone config. Param cfgname - ZonesetName.""" LOG.debug("zone status: %s", zone_status) cmd_list = [['conf'], ['zoneset', 'activate', 'name', cfgname, 'vsan', self.fabric_vsan]] if zone_status['mode'] == 'enhanced': cmd_list.append(['zone', 'commit', 'vsan', fabric_vsan]) cmd_list.append(['end']) return self._ssh_execute(cmd_list, True, 1) def get_zoning_status(self): """Return the zoning mode and session for a zoneset.""" zone_status = {} try: switch_data = self._get_switch_info( [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]) except c_exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error("Failed getting zone status " "from fabric %s", self.switch_ip) try: for line in switch_data: # Split on non-word characters, line_split = re.split(r'[\s\[\]]+', line) if 'mode:' in line_split: # mode: zone_status['mode'] = line_split[line_split.index('mode:') + 1] continue if 'session:' in line_split: # session: zone_status['session'] = \ line_split[line_split.index('session:') + 1] continue except Exception as ex: # In case of parsing error here, it should be malformed cli output. msg = _("Malformed zone status: (switch=%(switch)s " "zone_config=%(zone_config)s)." ) % {'switch': self.switch_ip, 'zone_status': switch_data} LOG.error(msg) exc_msg = _("Exception: %s") % str(ex) LOG.error(exc_msg) raise exception.FCZoneDriverException(reason=msg) return zone_status def delete_zones(self, zone_names, activate, fabric_vsan, active_zone_set, zone_status): """Delete zones from fabric. Method to delete the active zone config zones params zone_names: zoneNames separated by semicolon params activate: True/False """ LOG.debug("zone_names %s", zone_names) active_zoneset_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] cmds = [['conf'], ['zoneset', 'name', active_zoneset_name, 'vsan', fabric_vsan]] try: for zone in set(zone_names.split(';')): cmds.append(['no', 'zone', 'name', zone]) cmds.append(['end']) LOG.debug("Delete zones: Config cmd to run: %s", cmds) self._ssh_execute(cmds, True, 1) if activate: self.activate_zoneset(active_zoneset_name, fabric_vsan, zone_status) self._cfg_save() except Exception as e: msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)." ) % {'cmd': cmds, 'err': str(e)} LOG.error(msg) raise c_exception.CiscoZoningCliException(reason=msg) def get_nameserver_info(self): """Get name server data from fabric. This method will return the connected node port wwn list(local and remote) for the given switch fabric show fcns database """ cli_output = None return_list = [] try: cli_output = self._get_switch_info([ZoneConstant.FCNS_SHOW, self.fabric_vsan]) except c_exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.error("Failed collecting fcns database " "info for fabric %s", self.switch_ip) if (cli_output): return_list = self._parse_ns_output(cli_output) LOG.info("Connector returning fcnsinfo-%s", return_list) return return_list @utils.retry(processutils.ProcessExecutionError, retries=5) def _cfg_save(self): cmd = ['copy', 'running-config', 'startup-config'] self._run_ssh(cmd, True) def _get_switch_info(self, cmd_list): stdout, stderr, sw_data = None, None, None try: stdout, stderr = self._run_ssh(cmd_list, True) LOG.debug("CLI output from ssh - output: %s", stdout) if (stdout): sw_data = stdout.splitlines() return sw_data except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " "error=%(err)s).") % {'cmd': cmd_list, 'err': str(e)} LOG.error(msg) raise c_exception.CiscoZoningCliException(reason=msg) def _parse_ns_output(self, switch_data): """Parses name server data. Parses nameserver raw data and adds the device port wwns to the list :returns: List -- list of device port wwn from ns info """ return_list = [] for line in switch_data: if " N " not in line: continue linesplit = line.split() if len(linesplit) > 2: node_port_wwn = linesplit[2] return_list.append(node_port_wwn) else: msg = _("Malformed show fcns database string: %s") % line LOG.error(msg) raise exception.InvalidParameterValue(err=msg) return return_list def _run_ssh(self, cmd_list, check_exit_code=True): command = ' '.join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, min_size=1, max_size=5) try: with self.sshpool.item() as ssh: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception: with excutils.save_and_reraise_exception(): LOG.warning("Error running SSH command: %s", command) def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1): """Execute cli with status update. Executes CLI commands where status return is expected. cmd_list is a list of commands, where each command is itself a list of parameters. We use utils.check_ssh_injection to check each command, but then join then with " ; " to form a single command. """ # Check that each command is secure for cmd in cmd_list: utils.check_ssh_injection(cmd) # Combine into a single command. command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list)) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, min_size=1, max_size=5) stdin, stdout, stderr = None, None, None LOG.debug("Executing command via ssh: %s", command) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: stdin, stdout, stderr = ssh.exec_command(command) channel = stdout.channel exit_status = channel.recv_exit_status() LOG.debug("Exit Status from ssh: %s", exit_status) # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug('Result was %s', exit_status) if check_exit_code and exit_status != 0: raise processutils.ProcessExecutionError( exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=command) else: return True else: return True except Exception as e: LOG.exception('Error executing SSH command.') last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) LOG.debug("Handling error case after SSH: %s", last_exception) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Error executing command via ssh.") finally: if stdin: stdin.flush() stdin.close() if stdout: stdout.close() if stderr: stderr.close() def cleanup(self): self.sshpool = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py0000664000175000017500000006260100000000000026154 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Cisco Zone Driver is responsible to manage access control using FC zoning for Cisco FC fabrics. This is a concrete implementation of FCZoneDriver interface implementing add_connection and delete_connection interfaces. **Related Flags** :zone_activate: Used by: class: 'FCZoneDriver'. Defaults to True :zone_name_prefix: Used by: class: 'FCZoneDriver'. Defaults to 'openstack' """ import re import string from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder import interface from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts from cinder.zonemanager.drivers.cisco import exception as c_exception from cinder.zonemanager.drivers.cisco import fc_zone_constants as ZoneConstant from cinder.zonemanager.drivers import driver_utils from cinder.zonemanager.drivers import fc_zone_driver from cinder.zonemanager import utils as zm_utils LOG = logging.getLogger(__name__) SUPPORTED_CHARS = string.ascii_letters + string.digits + '$' + '-' + '^' + '_' cisco_opts = [ cfg.StrOpt('cisco_sb_connector', default='cinder.zonemanager.drivers.cisco' '.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI', help='Southbound connector for zoning operation'), ] CONF = cfg.CONF CONF.register_opts(cisco_opts, group='fc-zone-manager') @interface.fczmdriver class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver): """Cisco FC zone driver implementation. OpenStack Fibre Channel zone driver to manage FC zoning in Cisco SAN fabrics. .. code-block:: none Version history: 1.0 - Initial Cisco FC zone driver 1.1 - Added friendly zone name support """ VERSION = "1.1.0" # ThirdPartySystems wiki name CI_WIKI_NAME = "Cisco_ZM_CI" def __init__(self, **kwargs): super(CiscoFCZoneDriver, self).__init__(**kwargs) self.configuration = kwargs.get('configuration', None) if self.configuration: self.configuration.append_config_values(cisco_opts) # Adding a hack to handle parameters from super classes # in case configured with multi backends. fabric_names = self.configuration.safe_get('fc_fabric_names') activate = self.configuration.safe_get('cisco_zone_activate') prefix = self.configuration.safe_get('cisco_zone_name_prefix') base_san_opts = [] if not fabric_names: base_san_opts.append( cfg.StrOpt('fc_fabric_names', help='Comma separated list of fibre channel ' 'fabric names. This list of names is used to' ' retrieve other SAN credentials for connecting' ' to each SAN fabric' )) if not activate: base_san_opts.append( cfg.BoolOpt('cisco_zone_activate', default=True, help='Indicates whether zone should ' 'be activated or not')) if not prefix: base_san_opts.append( cfg.StrOpt('cisco_zone_name_prefix', default="openstack", help="A prefix to be used when naming zone")) if len(base_san_opts) > 0: CONF.register_opts(base_san_opts) self.configuration.append_config_values(base_san_opts) fabric_names = [x.strip() for x in self. configuration.fc_fabric_names.split(',')] # There can be more than one SAN in the network and we need to # get credentials for each SAN. if fabric_names: self.fabric_configs = fabric_opts.load_fabric_configurations( fabric_names) @staticmethod def get_driver_options(): return cisco_opts @lockutils.synchronized('cisco', 'fcfabric-', True) def add_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of add_connection. Based on zoning policy and state of each I-T pair, list of zone members are created and pushed to the fabric to add zones. The new zones created or zones updated are activated based on isActivate flag set in cinder.conf returned by volume driver after attach operation. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Add connection for Fabric: %s", fabric) LOG.info("CiscoFCZoneDriver - Add connection " "for I-T map: %s", initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') if zoning_policy_fab: zoning_policy = zoning_policy_fab zone_name_prefix = self.fabric_configs[fabric].safe_get( 'cisco_zone_name_prefix') if not zone_name_prefix: zone_name_prefix = self.configuration.cisco_zone_name_prefix zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info("Zoning policy for Fabric %s", zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # based on zoning policy, create zone member list and # push changes to fabric. for initiator_key in initiator_target_map.keys(): zone_map = {} zone_update_map = {} initiator = initiator_key.lower() t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': for t in t_list: target = t.lower() zone_members = [ zm_utils.get_formatted_wwn(initiator), zm_utils.get_formatted_wwn(target)] zone_name = ( driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS)) if (len(cfgmap_from_fabric) == 0 or ( zone_name not in zone_names)): zone_map[zone_name] = zone_members else: # This is I-T zoning, skip if zone exists. LOG.info("Zone exists in I-T mode. " "Skipping zone creation %s", zone_name) elif zoning_policy == 'initiator': zone_members = [ zm_utils.get_formatted_wwn(initiator)] for t in t_list: target = t.lower() zone_members.append( zm_utils.get_formatted_wwn(target)) zone_name = ( driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS)) # If zone exists, then perform an update_zone and add # new members into existing zone. if zone_name and (zone_name in zone_names): zone_members = list(filter( lambda x: x not in cfgmap_from_fabric['zones'][zone_name], zone_members)) if zone_members: zone_update_map[zone_name] = zone_members else: zone_map[zone_name] = zone_members else: msg = _("Zoning Policy: %s, not" " recognized") % zoning_policy LOG.error(msg) raise exception.FCZoneDriverException(msg) LOG.info("Zone map to add: %(zone_map)s", {'zone_map': zone_map}) LOG.info("Zone map to update add: %(zone_update_map)s", {'zone_update_map': zone_update_map}) if zone_map or zone_update_map: conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) if zone_map: conn.add_zones( zone_map, self.configuration.cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) if zone_update_map: conn.update_zones( zone_update_map, self.configuration.cisco_zone_activate, zoning_vsan, ZoneConstant.ZONE_ADD, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except c_exception.CiscoZoningCliException as cisco_ex: msg = _("Exception: %s") % str(cisco_ex) raise exception.FCZoneDriverException(msg) except Exception: msg = _("Failed to add zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zones added successfully: %s", zone_map) else: LOG.debug("Zones already exist - Initiator Target Map: %s", initiator_target_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan) @lockutils.synchronized('cisco', 'fcfabric-', True) def delete_connection(self, fabric, initiator_target_map, host_name=None, storage_system=None): """Concrete implementation of delete_connection. Based on zoning policy and state of each I-T pair, list of zones are created for deletion. The zones are either updated deleted based on the policy and attach/detach state of each I-T pair. :param fabric: Fabric name from cinder.conf file :param initiator_target_map: Mapping of initiator to list of targets """ LOG.debug("Delete connection for fabric: %s", fabric) LOG.info("CiscoFCZoneDriver - Delete connection for I-T map: %s", initiator_target_map) fabric_ip = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric].safe_get( 'cisco_fc_fabric_port') zoning_policy = self.configuration.zoning_policy zoning_policy_fab = self.fabric_configs[fabric].safe_get( 'cisco_zoning_policy') zone_name_prefix = self.fabric_configs[fabric].safe_get( 'cisco_zone_name_prefix') if not zone_name_prefix: zone_name_prefix = self.configuration.cisco_zone_name_prefix if zoning_policy_fab: zoning_policy = zoning_policy_fab zoning_vsan = self.fabric_configs[fabric].safe_get('cisco_zoning_vsan') LOG.info("Zoning policy for fabric %s", zoning_policy) statusmap_from_fabric = self.get_zoning_status( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) if statusmap_from_fabric.get('session') == 'none': cfgmap_from_fabric = self.get_active_zone_set( fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan) zone_names = [] if cfgmap_from_fabric.get('zones'): zone_names = cfgmap_from_fabric['zones'].keys() # Based on zoning policy, get zone member list and push # changes to fabric. This operation could result in an update # for zone config with new member list or deleting zones from # active cfg. LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric) for initiator_key in initiator_target_map.keys(): initiator = initiator_key.lower() formatted_initiator = zm_utils.get_formatted_wwn(initiator) zone_update_map = {} zones_to_delete = [] t_list = initiator_target_map[initiator_key] if zoning_policy == 'initiator-target': # In this case, zone needs to be deleted. for t in t_list: target = t.lower() zone_name = ( driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS)) LOG.debug("Zone name to del: %s", zone_name) if (len(zone_names) > 0 and (zone_name in zone_names)): # delete zone. LOG.debug("Added zone to delete to list: %s", zone_name) zones_to_delete.append(zone_name) elif zoning_policy == 'initiator': zone_members = [formatted_initiator] for t in t_list: target = t.lower() zone_members.append( zm_utils.get_formatted_wwn(target)) zone_name = driver_utils.get_friendly_zone_name( zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, SUPPORTED_CHARS) # Check if there are zone members leftover after removal if (zone_names and (zone_name in zone_names)): filtered_members = list(filter( lambda x: x not in zone_members, cfgmap_from_fabric['zones'][zone_name])) # The assumption here is that initiator is always # there in the zone as it is 'initiator' policy. # If filtered list is empty, we remove that zone. # If there are other members leftover, then perform # update_zone to remove targets LOG.debug("Zone delete - I mode: filtered targets: %s", filtered_members) if filtered_members: remove_members = list(filter( lambda x: x in cfgmap_from_fabric['zones'][zone_name], zone_members)) if remove_members: # Do not want to remove the initiator remove_members.remove(formatted_initiator) LOG.debug("Zone members to remove: %s", remove_members) zone_update_map[zone_name] = remove_members LOG.debug("Filtered zone Map to update: %s", zone_update_map) else: zones_to_delete.append(zone_name) else: LOG.info("Zoning Policy: %s, not recognized", zoning_policy) LOG.debug("Zone map to remove update: %s", zone_update_map) LOG.debug("Final Zone list to delete: %s", zones_to_delete) conn = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) # Update zone membership. if zone_update_map: conn.update_zones( zone_update_map, self.configuration.cisco_zone_activate, zoning_vsan, ZoneConstant.ZONE_REMOVE, cfgmap_from_fabric, statusmap_from_fabric) # Delete zones ~sk. if zones_to_delete: zone_name_string = '' num_zones = len(zones_to_delete) for i in range(0, num_zones): if i == 0: zone_name_string = ('%s%s' % ( zone_name_string, zones_to_delete[i])) else: zone_name_string = ('%s%s%s' % ( zone_name_string, ';', zones_to_delete[i])) conn.delete_zones(zone_name_string, self.configuration. cisco_zone_activate, zoning_vsan, cfgmap_from_fabric, statusmap_from_fabric) conn.cleanup() except Exception: msg = _("Failed to update or delete zoning configuration") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zones deleted successfully: %s", zone_update_map) else: LOG.debug("Zoning session exists VSAN: %s", zoning_vsan) def get_san_context(self, target_wwn_list): """Lookup SAN context for visible end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ formatted_target_list = [] fabric_map = {} fabrics = [x.strip() for x in self. configuration.fc_fabric_names.split(',')] LOG.debug("Fabric List: %s", fabrics) LOG.debug("Target wwn List: %s", target_wwn_list) if len(fabrics) > 0: for t in target_wwn_list: formatted_target_list.append( zm_utils.get_formatted_wwn(t.lower())) LOG.debug("Formatted Target wwn List: %s", formatted_target_list) for fabric_name in fabrics: fabric_ip = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_address') fabric_user = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_user') fabric_pwd = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_password') fabric_port = self.fabric_configs[fabric_name].safe_get( 'cisco_fc_fabric_port') zoning_vsan = self.fabric_configs[fabric_name].safe_get( 'cisco_zoning_vsan') # Get name server data from fabric and get the targets # logged in. nsinfo = None try: conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) nsinfo = conn.get_nameserver_info() LOG.debug("show fcns database info from fabric: %s", nsinfo) conn.cleanup() except c_exception.CiscoZoningCliException: with excutils.save_and_reraise_exception(): LOG.exception("Error getting show fcns database info.") except Exception: msg = _("Failed to get show fcns database info.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) visible_targets = list(filter( lambda x: x in formatted_target_list, nsinfo)) if visible_targets: LOG.info("Filtered targets for SAN is: %s", {fabric_name: visible_targets}) # getting rid of the ':' before returning fabric_map[fabric_name] = list(map( lambda x: re.sub(r':', '', x), visible_targets)) else: LOG.debug("No targets are in the fcns info for SAN %s", fabric_name) LOG.debug("Return SAN context output: %s", fabric_map) return fabric_map def get_active_zone_set(self, fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan): """Gets active zoneset config for vsan.""" cfgmap = {} conn = None try: LOG.debug("Southbound connector: %s", self.configuration.cisco_sb_connector) conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) cfgmap = conn.get_active_zone_set() conn.cleanup() except Exception: msg = _("Failed to access active zoning configuration.") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Active zone set from fabric: %s", cfgmap) return cfgmap def get_zoning_status(self, fabric_ip, fabric_user, fabric_pwd, fabric_port, zoning_vsan): """Gets zoneset status and mode.""" statusmap = {} conn = None try: LOG.debug("Southbound connector: %s", self.configuration.cisco_sb_connector) conn = importutils.import_object( self.configuration.cisco_sb_connector, ipaddress=fabric_ip, username=fabric_user, password=fabric_pwd, port=fabric_port, vsan=zoning_vsan) statusmap = conn.get_zoning_status() conn.cleanup() except Exception: msg = _("Failed to access zoneset status:%s") LOG.exception(msg) raise exception.FCZoneDriverException(msg) LOG.debug("Zoneset status from fabric: %s", statusmap) return statusmap ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/cisco/exception.py0000664000175000017500000000136500000000000023774 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cinder import exception from cinder.i18n import _ class CiscoZoningCliException(exception.CinderException): message = _("Cisco Fibre Channel Zoning CLI error: %(reason)s") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/cisco/fc_zone_constants.py0000664000175000017500000000204200000000000025506 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Common constants used by Cisco FC Zone Driver. """ ACTIVE_ZONE_CONFIG = 'active_zone_config' CFG_ZONESET = 'zoneset' CFG_ZONE = 'zone' CFG_ZONE_MEMBER = 'pwwn' CFG_ZONES = 'zones' ZONE_ADD = 'zoneadd' ZONE_REMOVE = 'zoneremove' """ CLI Commands for FC zoning operations. """ GET_ACTIVE_ZONE_CFG = 'show zoneset active vsan ' FCNS_SHOW = 'show fcns database vsan ' GET_ZONE_STATUS = 'show zone status vsan ' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/driver_utils.py0000664000175000017500000000605200000000000023407 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import re from oslo_log import log LOG = log.getLogger(__name__) def get_friendly_zone_name(zoning_policy, initiator, target, host_name, storage_system, zone_name_prefix, supported_chars): """Utility function implementation of _get_friendly_zone_name. Get friendly zone name is used to form the zone name based on the details provided by the caller :param zoning_policy: determines the zoning policy is either initiator-target or initiator :param initiator: initiator WWN :param target: target WWN :param host_name: Host name returned from Volume Driver :param storage_system: Storage name returned from Volume Driver :param zone_name_prefix: user defined zone prefix configured in cinder.conf :param supported_chars: Supported character set of FC switch vendor. Example: `abc123_-$`. These are defined in the FC zone drivers. """ if host_name is None: host_name = '' if storage_system is None: storage_system = '' if zoning_policy == 'initiator-target': host_name = host_name[:14] storage_system = storage_system[:14] if len(host_name) > 0 and len(storage_system) > 0: zone_name = (host_name + "_" + initiator.replace(':', '') + "_" + storage_system + "_" + target.replace(':', '')) else: zone_name = (zone_name_prefix + initiator.replace(':', '') + target.replace(':', '')) LOG.info("Zone name created using prefix because either " "host name or storage system is none.") else: host_name = host_name[:47] if len(host_name) > 0: zone_name = (host_name + "_" + initiator.replace(':', '')) else: zone_name = (zone_name_prefix + initiator.replace(':', '')) LOG.info("Zone name created using prefix because host " "name is none.") LOG.info("Friendly zone name after forming: %(zonename)s", {'zonename': zone_name}) zone_name = re.sub('[^%s]' % supported_chars, '', zone_name) return zone_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/drivers/fc_zone_driver.py0000664000175000017500000000350700000000000023674 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Base Zone Driver is responsible to manage access control using FC zoning Vendor specific implementations should extend this class to provide concrete implementation for add_connection and delete_connection interfaces. **Related Flags** :zoning_policy: Used by: class: 'FCZoneDriver'. Defaults to 'none' :zone_driver: Used by: class: 'FCZoneDriver'. Defaults to 'none' """ from oslo_log import log as logging from cinder.interface import fczm_driver from cinder.zonemanager import fc_common LOG = logging.getLogger(__name__) class FCZoneDriver( fc_common.FCCommon, fczm_driver.FibreChannelZoneManagerDriver): """Interface to manage Connection control during attach/detach.""" # If a driver hasn't maintained their CI system, this will get set # to False, which prevents the driver from starting. # Add enable_unsupported_driver = True in cinder.conf to get the # unsupported driver started. SUPPORTED = True def __init__(self, **kwargs): super(FCZoneDriver, self).__init__(**kwargs) LOG.debug("Initializing FCZoneDriver") @property def supported(self): return self.SUPPORTED ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/fc_common.py0000664000175000017500000000160600000000000021156 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # class FCCommon(object): """Common interface for FC operations.""" VERSION = "1.0" def __init__(self, **kwargs): pass def get_version(self): return self.VERSION ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/fc_san_lookup_service.py0000664000175000017500000000700600000000000023560 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Base Lookup Service for name server lookup to find the initiator to target port mapping for available SAN contexts. Vendor specific lookup classes are expected to implement the interfaces defined in this class. """ from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder.volume import configuration as config from cinder.zonemanager import fc_common from cinder.zonemanager import fc_zone_manager LOG = logging.getLogger(__name__) class FCSanLookupService(fc_common.FCCommon): """Base Lookup Service. Base Lookup Service for name server lookup to find the initiator to target port mapping for available SAN contexts. """ lookup_service = None def __init__(self, **kwargs): super(FCSanLookupService, self).__init__(**kwargs) opts = fc_zone_manager.zone_manager_opts self.configuration = config.Configuration(opts, 'fc-zone-manager') def get_device_mapping_from_network(self, initiator_list, target_list): """Get device mapping from FC network. Gets a filtered list of initiator ports and target ports for each SAN available. :param initiator_list: list of initiator port WWN :param target_list: list of target port WWN :returns: device wwn map in following format .. code-block:: python { : { 'initiator_port_wwn_list': ('200000051E55A100', '200000051E55A121'..) 'target_port_wwn_list': ('100000051E55A100', '100000051E55A121'..) } } :raises Exception: when a lookup service implementation is not specified in cinder.conf:fc_san_lookup_service """ # Initialize vendor specific implementation of FCZoneDriver if (self.configuration.fc_san_lookup_service): lookup_service = self.configuration.fc_san_lookup_service LOG.debug("Lookup service to invoke: " "%s", lookup_service) self.lookup_service = importutils.import_object( lookup_service, configuration=self.configuration) else: msg = _("Lookup service not configured. Config option for " "fc_san_lookup_service needs to specify a concrete " "implementation of the lookup service.") LOG.error(msg) raise exception.FCSanLookupServiceException(msg) try: device_map = self.lookup_service.get_device_mapping_from_network( initiator_list, target_list) except Exception as e: LOG.exception('Unable to get device mapping from network.') raise exception.FCSanLookupServiceException(e) return device_map ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/fc_zone_manager.py0000664000175000017500000003503000000000000022331 0ustar00zuulzuul00000000000000# (c) Copyright 2014 Brocade Communications Systems Inc. # All Rights Reserved. # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ ZoneManager is responsible to manage access control using FC zoning when zoning mode is set as 'fabric'. ZoneManager provides interfaces to add connection and remove connection for given initiator and target list associated with a FC volume attach and detach operation. **Related Flags** :zone_driver: Used by:class:`ZoneManager`. Defaults to `cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver` :zoning_policy: Used by: class: 'ZoneManager'. Defaults to 'none' """ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from cinder import exception from cinder.i18n import _ from cinder.volume import configuration as config from cinder.zonemanager import fc_common import cinder.zonemanager.fczm_constants as zone_constant LOG = logging.getLogger(__name__) zone_manager_opts = [ cfg.StrOpt('zone_driver', default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver' '.BrcdFCZoneDriver', help='FC Zone Driver responsible for zone management'), cfg.StrOpt('zoning_policy', default='initiator-target', help='Zoning policy configured by user; valid values include ' '"initiator-target" or "initiator"'), cfg.StrOpt('fc_fabric_names', help='Comma separated list of Fibre Channel fabric names.' ' This list of names is used to retrieve other SAN credentials' ' for connecting to each SAN fabric'), cfg.StrOpt('fc_san_lookup_service', default='cinder.zonemanager.drivers.brocade' '.brcd_fc_san_lookup_service.BrcdFCSanLookupService', help='FC SAN Lookup Service'), cfg.BoolOpt('enable_unsupported_driver', default=False, help="Set this to True when you want to allow an unsupported " "zone manager driver to start. Drivers that haven't " "maintained a working CI system and testing are marked " "as unsupported until CI is working again. This also " "marks a driver as deprecated and may be removed in the " "next release."), ] CONF = cfg.CONF CONF.register_opts(zone_manager_opts, group='fc-zone-manager') class ZoneManager(fc_common.FCCommon): """Manages Connection control during attach/detach. Version History: 1.0 - Initial version 1.0.1 - Added __new__ for singleton 1.0.2 - Added friendly zone name """ VERSION = "1.0.2" driver = None _initialized = False fabric_names = [] def __new__(class_, *args, **kwargs): if not hasattr(class_, "_instance"): class_._instance = object.__new__(class_) return class_._instance def __init__(self, **kwargs): """Load the driver from the one specified in args, or from flags.""" super(ZoneManager, self).__init__(**kwargs) self.configuration = config.Configuration(zone_manager_opts, 'fc-zone-manager') self.set_initialized(False) self._build_driver() def _build_driver(self): zone_driver = self.configuration.zone_driver LOG.debug("Zone driver from config: %(driver)s", {'driver': zone_driver}) zm_config = config.Configuration(zone_manager_opts, 'fc-zone-manager') # Initialize vendor specific implementation of FCZoneDriver self.driver = importutils.import_object( zone_driver, configuration=zm_config) if not self.driver.supported: self._log_unsupported_driver_warning() if not self.configuration.enable_unsupported_driver: LOG.error("Unsupported drivers are disabled." " You can re-enable by adding " "enable_unsupported_driver=True to the " "fc-zone-manager section in cinder.conf", resource={'type': 'zone_manager', 'id': self.__class__.__name__}) return self.set_initialized(True) @property def initialized(self): return self._initialized def set_initialized(self, value=True): self._initialized = value def _require_initialized(self): """Verifies that the zone manager has been properly initialized.""" if not self.initialized: LOG.error("Fibre Channel Zone Manager is not initialized.") raise exception.ZoneManagerNotInitialized() else: self._log_unsupported_driver_warning() def _log_unsupported_driver_warning(self): """Annoy the log about unsupported fczm drivers.""" if not self.driver.supported: LOG.warning("Zone Manager driver (%(driver_name)s %(version)s)" " is currently unsupported and may be removed in " "the next release of OpenStack. Use at your own " "risk.", {'driver_name': self.driver.__class__.__name__, 'version': self.driver.get_version()}, resource={'type': 'zone_manager', 'id': self.driver.__class__.__name__}) def get_zoning_state_ref_count(self, initiator_wwn, target_wwn): """Zone management state check. Performs state check for given I-T pair to return the current count of active attach for the pair. """ # TODO(sk): ref count state management count = 0 # check the state for I-T pair return count def add_connection(self, conn_info): """Add connection control. Adds connection control for the given initiator target map. initiator_target_map - each initiator WWN mapped to a list of one or more target WWN: .. code-block:: python e.g.: { '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] } """ connected_fabric = None host_name = None storage_system = None try: # Make sure the driver is loaded and we are initialized self._log_unsupported_driver_warning() self._require_initialized() except exception.ZoneManagerNotInitialized: LOG.error("Cannot add Fibre Channel Zone because the " "Zone Manager is not initialized properly.", resource={'type': 'zone_manager', 'id': self.__class__.__name__}) return try: initiator_target_map = ( conn_info[zone_constant.DATA][zone_constant.IT_MAP]) if zone_constant.HOST in conn_info[zone_constant.DATA]: host_name = conn_info[ zone_constant.DATA][ zone_constant.HOST].replace(" ", "_") if zone_constant.STORAGE in conn_info[zone_constant.DATA]: storage_system = ( conn_info[ zone_constant.DATA][ zone_constant.STORAGE].replace(" ", "_")) for initiator in initiator_target_map.keys(): target_list = initiator_target_map[initiator] LOG.debug("Target list : %(targets)s", {'targets': target_list}) # get SAN context for the target list fabric_map = self.get_san_context(target_list) LOG.debug("Fabric map after context lookup: %(fabricmap)s", {'fabricmap': fabric_map}) # iterate over each SAN and apply connection control for fabric in fabric_map.keys(): connected_fabric = fabric t_list = fabric_map[fabric] # get valid I-T map to add connection control i_t_map = {initiator: t_list} valid_i_t_map = self.get_valid_initiator_target_map( i_t_map, True) LOG.info("Final filtered map for fabric: %(i_t_map)s", {'i_t_map': valid_i_t_map}) # Call driver to add connection control self.driver.add_connection(fabric, valid_i_t_map, host_name, storage_system) LOG.info("Add connection: finished iterating " "over all target list") except Exception as e: msg = _("Failed adding connection for fabric=%(fabric)s: " "Error: %(err)s") % {'fabric': connected_fabric, 'err': str(e)} LOG.error(msg) raise exception.ZoneManagerException(reason=msg) def delete_connection(self, conn_info): """Delete connection. Updates/deletes connection control for the given initiator target map. initiator_target_map - each initiator WWN mapped to a list of one or more target WWN: .. code-block:: python e.g.: { '10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40'] } """ connected_fabric = None host_name = None storage_system = None try: # Make sure the driver is loaded and we are initialized self._log_unsupported_driver_warning() self._require_initialized() except exception.ZoneManagerNotInitialized: LOG.error("Cannot delete fibre channel zone because the " "Zone Manager is not initialized properly.", resource={'type': 'zone_manager', 'id': self.__class__.__name__}) return try: initiator_target_map = ( conn_info[zone_constant.DATA][zone_constant.IT_MAP]) if zone_constant.HOST in conn_info[zone_constant.DATA]: host_name = conn_info[zone_constant.DATA][zone_constant.HOST] if zone_constant.STORAGE in conn_info[zone_constant.DATA]: storage_system = ( conn_info[ zone_constant.DATA][ zone_constant.STORAGE].replace(" ", "_")) for initiator in initiator_target_map.keys(): target_list = initiator_target_map[initiator] LOG.info("Delete connection target list: %(targets)s", {'targets': target_list}) # get SAN context for the target list fabric_map = self.get_san_context(target_list) LOG.debug("Delete connection fabric map from SAN " "context: %(fabricmap)s", {'fabricmap': fabric_map}) # iterate over each SAN and apply connection control for fabric in fabric_map.keys(): connected_fabric = fabric t_list = fabric_map[fabric] # get valid I-T map to add connection control i_t_map = {initiator: t_list} valid_i_t_map = self.get_valid_initiator_target_map( i_t_map, False) LOG.info("Final filtered map for delete connection: " "%(i_t_map)s", {'i_t_map': valid_i_t_map}) # Call driver to delete connection control if len(valid_i_t_map) > 0: self.driver.delete_connection(fabric, valid_i_t_map, host_name, storage_system) LOG.debug("Delete connection - finished iterating over all" " target list") except Exception as e: msg = _("Failed removing connection for fabric=%(fabric)s: " "Error: %(err)s") % {'fabric': connected_fabric, 'err': str(e)} LOG.error(msg) raise exception.ZoneManagerException(reason=msg) def get_san_context(self, target_wwn_list): """SAN lookup for end devices. Look up each SAN configured and return a map of SAN (fabric IP) to list of target WWNs visible to the fabric. """ fabric_map = self.driver.get_san_context(target_wwn_list) LOG.debug("Got SAN context: %(fabricmap)s", {'fabricmap': fabric_map}) return fabric_map def get_valid_initiator_target_map(self, initiator_target_map, add_control): """Reference count check for end devices. Looks up the reference count for each initiator-target pair from the map and returns a filtered list based on the operation type add_control - operation type can be true for add connection control and false for remove connection control """ filtered_i_t_map = {} for initiator in initiator_target_map.keys(): t_list = initiator_target_map[initiator] for target in t_list: count = self.get_zoning_state_ref_count(initiator, target) if add_control: if count > 0: t_list.remove(target) # update count = count + 1 else: if count > 1: t_list.remove(target) # update count = count - 1 if t_list: filtered_i_t_map[initiator] = t_list else: LOG.info("No targets to add or remove connection for " "initiator: %(init_wwn)s", {'init_wwn': initiator}) return filtered_i_t_map ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/fczm_constants.py0000664000175000017500000000134100000000000022245 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Common constants used by FC Zone Manager. """ IT_MAP = 'initiator_target_map' DATA = 'data' HOST = 'host_name' STORAGE = 'storage_system' SYSTEM = 'system' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/cinder/zonemanager/utils.py0000664000175000017500000000734700000000000020366 0ustar00zuulzuul00000000000000# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """ Utility functions related to the Zone Manager. """ from oslo_log import log from cinder.volume import configuration from cinder.volume import manager from cinder.zonemanager import fc_san_lookup_service from cinder.zonemanager import fc_zone_manager LOG = log.getLogger(__name__) def create_zone_manager(): """If zoning is enabled, build the Zone Manager.""" config = configuration.Configuration(manager.volume_manager_opts) LOG.debug("Zoning mode: %s.", config.safe_get('zoning_mode')) if config.safe_get('zoning_mode') == 'fabric': LOG.debug("FC Zone Manager enabled.") zm = fc_zone_manager.ZoneManager() if zm.initialized: LOG.info("Using FC Zone Manager %(zm_version)s," " Driver %(drv_name)s %(drv_version)s.", {'zm_version': zm.get_version(), 'drv_name': zm.driver.__class__.__name__, 'drv_version': zm.driver.get_version()}) return zm else: LOG.debug("FC Zone Manager %(zm_version)s disabled", {"zm_version": zm.get_version()}) return None else: LOG.debug("FC Zone Manager not enabled in cinder.conf.") return None def create_lookup_service(): config = configuration.Configuration(manager.volume_manager_opts) LOG.debug("Zoning mode: %s.", config.safe_get('zoning_mode')) if config.safe_get('zoning_mode') == 'fabric': LOG.debug("FC Lookup Service enabled.") lookup = fc_san_lookup_service.FCSanLookupService() LOG.info("Using FC lookup service %s.", lookup.lookup_service) return lookup else: LOG.debug("FC Lookup Service not enabled in cinder.conf.") return None def get_formatted_wwn(wwn_str): """Utility API that formats WWN to insert ':'.""" if (len(wwn_str) != 16): return wwn_str.lower() else: return (':'.join([wwn_str[i:i + 2] for i in range(0, len(wwn_str), 2)])).lower() def add_fc_zone(connection_info): """Utility function to add a FC Zone.""" if connection_info: vol_type = connection_info.get('driver_volume_type', None) if vol_type == 'fibre_channel': if connection_info['data'].get('initiator_target_map'): zm = create_zone_manager() if zm: LOG.debug("add_fc_zone connection info: %(conninfo)s.", {'conninfo': connection_info}) zm.add_connection(connection_info) def remove_fc_zone(connection_info): """Utility function for FC drivers to remove zone.""" if connection_info: vol_type = connection_info.get('driver_volume_type', None) if vol_type == 'fibre_channel': if connection_info['data'].get('initiator_target_map'): zm = create_zone_manager() if zm: LOG.debug("remove_fc_zone connection info: %(conninfo)s.", {'conninfo': connection_info}) zm.delete_connection(connection_info) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8271253 cinder-27.0.0/cinder.egg-info/0000775000175000017500000000000000000000000016025 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315576.0 cinder-27.0.0/cinder.egg-info/PKG-INFO0000644000175000017500000001336000000000000017123 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: cinder Version: 27.0.0 Summary: OpenStack Block Storage Home-page: https://docs.openstack.org/cinder/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org Project-URL: Source, https://opendev.org/openstack/cinder Project-URL: Tracker, https://bugs.launchpad.net/cinder Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Requires-Python: >=3.10 License-File: LICENSE Requires-Dist: pbr>=5.8.0 Requires-Dist: decorator>=4.4.2 Requires-Dist: eventlet!=0.32.0,>=0.30.1 Requires-Dist: greenlet>=0.4.16 Requires-Dist: iso8601>=0.1.12 Requires-Dist: jsonschema>=3.2.0 Requires-Dist: keystoneauth1>=4.2.1 Requires-Dist: keystonemiddleware>=9.1.0 Requires-Dist: lxml>=4.5.2 Requires-Dist: oslo.config>=8.3.2 Requires-Dist: oslo.concurrency>=4.5.0 Requires-Dist: oslo.context>=3.4.0 Requires-Dist: oslo.db>=11.0.0 Requires-Dist: oslo.log>=4.6.1 Requires-Dist: oslo.messaging>=14.6.0 Requires-Dist: oslo.middleware>=4.1.1 Requires-Dist: oslo.policy>=4.5.0 Requires-Dist: oslo.privsep>=2.6.2 Requires-Dist: oslo.reports>=3.2.0 Requires-Dist: oslo.rootwrap>=6.2.0 Requires-Dist: oslo.serialization>=4.2.0 Requires-Dist: oslo.service>=2.8.0 Requires-Dist: oslo.upgradecheck>=1.1.1 Requires-Dist: oslo.utils>=6.0.0 Requires-Dist: oslo.versionedobjects>=2.4.0 Requires-Dist: osprofiler>=3.4.0 Requires-Dist: packaging>=20.4 Requires-Dist: paramiko>=2.7.2 Requires-Dist: Paste>=3.4.3 Requires-Dist: PasteDeploy>=2.1.0 Requires-Dist: pyparsing>=2.4.7 Requires-Dist: python-barbicanclient>=5.0.1 Requires-Dist: python-glanceclient>=3.2.2 Requires-Dist: python-keystoneclient>=4.1.1 Requires-Dist: python-novaclient>=18.2.0 Requires-Dist: python-swiftclient>=3.10.1 Requires-Dist: requests>=2.25.1 Requires-Dist: Routes>=2.4.1 Requires-Dist: taskflow>=4.5.0 Requires-Dist: rtslib-fb>=2.1.74 Requires-Dist: SQLAlchemy>=1.4.23 Requires-Dist: stevedore>=3.2.2 Requires-Dist: tabulate>=0.8.7 Requires-Dist: tenacity>=6.3.1 Requires-Dist: WebOb>=1.8.6 Requires-Dist: oslo.i18n>=5.1.0 Requires-Dist: oslo.vmware>=3.10.0 Requires-Dist: os-brick>=6.10.0 Requires-Dist: os-win>=5.5.0 Requires-Dist: tooz>=2.8.0 Requires-Dist: google-api-python-client>=1.11.0 Requires-Dist: castellan>=3.7.0 Requires-Dist: cryptography>=3.1 Requires-Dist: cursive>=0.2.2 Requires-Dist: zstd>=1.4.5.1 Requires-Dist: boto3>=1.18.49 Requires-Dist: distro>=1.8.0 Requires-Dist: tzdata>=2022.4 Provides-Extra: all Requires-Dist: websocket-client>=1.3.2; extra == "all" Requires-Dist: pyOpenSSL>=17.5.0; extra == "all" Requires-Dist: storops>=0.5.10; extra == "all" Requires-Dist: pywbem>=0.7.0; extra == "all" Requires-Dist: python-3parclient>=4.2.10; extra == "all" Requires-Dist: krest>=1.3.0; extra == "all" Requires-Dist: infinisdk>=103.0.1; extra == "all" Requires-Dist: py-pure-client>=1.47.0; extra == "all" Requires-Dist: rsd-lib>=1.1.0; extra == "all" Requires-Dist: storpool>=7.1.0; extra == "all" Requires-Dist: storpool.spopenstack>=2.2.1; extra == "all" Requires-Dist: dfs-sdk>=1.2.25; extra == "all" Requires-Dist: rbd-iscsi-client>=0.1.8; extra == "all" Requires-Dist: python-linstor>=1.7.0; extra == "all" Requires-Dist: psutil>=5.7.2; extra == "all" Provides-Extra: datacore Requires-Dist: websocket-client>=1.3.2; extra == "datacore" Provides-Extra: powermax Requires-Dist: pyOpenSSL>=17.5.0; extra == "powermax" Provides-Extra: vnx Requires-Dist: storops>=0.5.10; extra == "vnx" Provides-Extra: unity Requires-Dist: storops>=0.5.10; extra == "unity" Provides-Extra: fujitsu Requires-Dist: pywbem>=0.7.0; extra == "fujitsu" Provides-Extra: hpe3par Requires-Dist: python-3parclient>=4.2.10; extra == "hpe3par" Provides-Extra: kaminario Requires-Dist: krest>=1.3.0; extra == "kaminario" Provides-Extra: ds8k Requires-Dist: pyOpenSSL>=17.5.0; extra == "ds8k" Provides-Extra: infinidat Requires-Dist: infinisdk>=103.0.1; extra == "infinidat" Provides-Extra: pure Requires-Dist: py-pure-client>=1.47.0; extra == "pure" Provides-Extra: rsd Requires-Dist: rsd-lib>=1.1.0; extra == "rsd" Provides-Extra: storpool Requires-Dist: storpool>=7.1.0; extra == "storpool" Requires-Dist: storpool.spopenstack>=2.2.1; extra == "storpool" Provides-Extra: datera Requires-Dist: dfs-sdk>=1.2.25; extra == "datera" Provides-Extra: rbd-iscsi Requires-Dist: rbd-iscsi-client>=0.1.8; extra == "rbd-iscsi" Provides-Extra: linstor Requires-Dist: python-linstor>=1.7.0; extra == "linstor" Provides-Extra: quobyte Requires-Dist: psutil>=5.7.2; extra == "quobyte" Provides-Extra: test Requires-Dist: hacking<7.1.0,>=7.0.0; extra == "test" Requires-Dist: flake8-import-order<0.19.0; extra == "test" Requires-Dist: flake8-logging-format>=0.6.0; extra == "test" Requires-Dist: stestr>=3.2.1; extra == "test" Requires-Dist: coverage>=5.5; extra == "test" Requires-Dist: ddt>=1.4.4; extra == "test" Requires-Dist: fixtures>=3.0.0; extra == "test" Requires-Dist: oslotest>=4.5.0; extra == "test" Requires-Dist: PyMySQL>=0.10.0; extra == "test" Requires-Dist: psycopg2-binary>=2.8.5; extra == "test" Requires-Dist: SQLAlchemy-Utils>=0.37.8; extra == "test" Requires-Dist: testtools>=2.4.0; extra == "test" Requires-Dist: doc8>=0.8.1; extra == "test" Requires-Dist: mypy<1.18.0,>=1.7.0; extra == "test" Requires-Dist: moto>=5.0.0; extra == "test" Requires-Dist: distro>=1.8.0; extra == "test" file: README.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315576.0 cinder-27.0.0/cinder.egg-info/SOURCES.txt0000664000175000017500000064142700000000000017727 0ustar00zuulzuul00000000000000.coveragerc .pylintrc .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt driver-requirements.txt mypy-files.txt pyproject.toml reno.yaml requirements.txt ruff.toml setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/api_microversion_history.rst api-ref/source/conf.py api-ref/source/index.rst api-ref/source/status.yaml api-ref/source/v2/api-versions.inc api-ref/source/v2/availability-zones-v2.inc api-ref/source/v2/capabilities-v2.inc api-ref/source/v2/consistencygroups-v2.inc api-ref/source/v2/ext-backups-actions-v2.inc api-ref/source/v2/ext-backups.inc api-ref/source/v2/hosts.inc api-ref/source/v2/index.rst api-ref/source/v2/limits.inc api-ref/source/v2/os-cgsnapshots-v2.inc api-ref/source/v2/os-vol-pool-v2.inc api-ref/source/v2/os-vol-transfer-v2.inc api-ref/source/v2/parameters.yaml api-ref/source/v2/qos-specs-v2-qos-specs.inc api-ref/source/v2/quota-classes.inc api-ref/source/v2/quota-sets.inc api-ref/source/v2/volume-manage.inc api-ref/source/v2/volume-type-access.inc api-ref/source/v2/volumes-v2-extensions.inc api-ref/source/v2/volumes-v2-snapshots-actions.inc api-ref/source/v2/volumes-v2-snapshots.inc api-ref/source/v2/volumes-v2-types.inc api-ref/source/v2/volumes-v2-versions.inc api-ref/source/v2/volumes-v2-volumes-actions.inc api-ref/source/v2/volumes-v2-volumes.inc api-ref/source/v2/samples/availability-zone-list-response.json api-ref/source/v2/samples/backend-capabilities-response.json api-ref/source/v2/samples/backup-create-request.json api-ref/source/v2/samples/backup-create-response.json api-ref/source/v2/samples/backup-force-delete-request.json api-ref/source/v2/samples/backup-record-export-response.json api-ref/source/v2/samples/backup-record-import-request.json api-ref/source/v2/samples/backup-record-import-response.json api-ref/source/v2/samples/backup-reset-status-request.json api-ref/source/v2/samples/backup-restore-request.json api-ref/source/v2/samples/backup-restore-response.json api-ref/source/v2/samples/backup-show-response.json api-ref/source/v2/samples/backups-list-detailed-response.json api-ref/source/v2/samples/backups-list-response.json api-ref/source/v2/samples/cgsnapshots-create-request.json api-ref/source/v2/samples/cgsnapshots-create-response.json api-ref/source/v2/samples/cgsnapshots-list-detailed-response.json api-ref/source/v2/samples/cgsnapshots-list-response.json api-ref/source/v2/samples/cgsnapshots-show-response.json api-ref/source/v2/samples/consistency-group-create-from-src-request.json api-ref/source/v2/samples/consistency-group-create-request.json api-ref/source/v2/samples/consistency-group-create-response.json api-ref/source/v2/samples/consistency-group-delete-request.json api-ref/source/v2/samples/consistency-group-show-response.json api-ref/source/v2/samples/consistency-group-update-request.json api-ref/source/v2/samples/consistency-groups-list-detailed-response.json api-ref/source/v2/samples/consistency-groups-list-response.json api-ref/source/v2/samples/encryption-type-create-request.json api-ref/source/v2/samples/encryption-type-create-response.json api-ref/source/v2/samples/encryption-type-show-response.json api-ref/source/v2/samples/encryption-type-update-request.json api-ref/source/v2/samples/encryption-type-update-response.json api-ref/source/v2/samples/extensions-list-response.json api-ref/source/v2/samples/host-attach-request.json api-ref/source/v2/samples/hosts-get-response.json api-ref/source/v2/samples/hosts-list-response.json api-ref/source/v2/samples/image-metadata-show-request.json api-ref/source/v2/samples/image-metadata-show-response.json api-ref/source/v2/samples/limits-show-response.json api-ref/source/v2/samples/pools-list-detailed-response.json api-ref/source/v2/samples/qos-create-request.json api-ref/source/v2/samples/qos-create-response.json api-ref/source/v2/samples/qos-list-response.json api-ref/source/v2/samples/qos-show-response.json api-ref/source/v2/samples/qos-unset-request.json api-ref/source/v2/samples/qos-update-request.json api-ref/source/v2/samples/qos-update-response.json api-ref/source/v2/samples/qos_show_response.json api-ref/source/v2/samples/quota-classes-show-response.json api-ref/source/v2/samples/quota-classes-update-request.json api-ref/source/v2/samples/quota-classes-update-response.json api-ref/source/v2/samples/quotas-show-defaults-response.json api-ref/source/v2/samples/quotas-show-response.json api-ref/source/v2/samples/quotas-update-request.json api-ref/source/v2/samples/quotas-update-response.json api-ref/source/v2/samples/quotas-user-show-detailed-response.json api-ref/source/v2/samples/quotas-user-show-response.json api-ref/source/v2/samples/snapshot-create-request.json api-ref/source/v2/samples/snapshot-create-response.json api-ref/source/v2/samples/snapshot-metadata-create-request.json api-ref/source/v2/samples/snapshot-metadata-create-response.json api-ref/source/v2/samples/snapshot-metadata-show-response.json api-ref/source/v2/samples/snapshot-metadata-update-request.json api-ref/source/v2/samples/snapshot-metadata-update-response.json api-ref/source/v2/samples/snapshot-show-response.json api-ref/source/v2/samples/snapshot-status-reset-request.json api-ref/source/v2/samples/snapshot-update-request.json api-ref/source/v2/samples/snapshot-update-response.json api-ref/source/v2/samples/snapshots-list-detailed-response.json api-ref/source/v2/samples/snapshots-list-response.json api-ref/source/v2/samples/user-quotas-show-response.json api-ref/source/v2/samples/user-quotas-update-request.json api-ref/source/v2/samples/user-quotas-update-response.json api-ref/source/v2/samples/version-show-response.json api-ref/source/v2/samples/versions-response.json api-ref/source/v2/samples/volume-attach-request.json api-ref/source/v2/samples/volume-bootable-status-update-request.json api-ref/source/v2/samples/volume-create-request.json api-ref/source/v2/samples/volume-create-response.json api-ref/source/v2/samples/volume-detach-request.json api-ref/source/v2/samples/volume-extend-request.json api-ref/source/v2/samples/volume-force-delete-request.json api-ref/source/v2/samples/volume-force-detach-request.json api-ref/source/v2/samples/volume-image-metadata-set-request.json api-ref/source/v2/samples/volume-image-metadata-unset-request.json api-ref/source/v2/samples/volume-image-metadata-update-response.json api-ref/source/v2/samples/volume-manage-request.json api-ref/source/v2/samples/volume-manage-response.json api-ref/source/v2/samples/volume-metadata-create-request.json api-ref/source/v2/samples/volume-metadata-create-response.json api-ref/source/v2/samples/volume-metadata-show-key-response.json api-ref/source/v2/samples/volume-metadata-show-response.json api-ref/source/v2/samples/volume-metadata-update-key-request.json api-ref/source/v2/samples/volume-metadata-update-key-response.json api-ref/source/v2/samples/volume-metadata-update-request.json api-ref/source/v2/samples/volume-metadata-update-response.json api-ref/source/v2/samples/volume-os-migrate_volume-request.json api-ref/source/v2/samples/volume-os-migrate_volume_completion-request.json api-ref/source/v2/samples/volume-os-retype-request.json api-ref/source/v2/samples/volume-show-response.json api-ref/source/v2/samples/volume-status-reset-request.json api-ref/source/v2/samples/volume-transfer-accept-request.json api-ref/source/v2/samples/volume-transfer-accept-response.json api-ref/source/v2/samples/volume-transfer-create-request.json api-ref/source/v2/samples/volume-transfer-create-response.json api-ref/source/v2/samples/volume-transfer-show-response.json api-ref/source/v2/samples/volume-transfers-list-detailed-response.json api-ref/source/v2/samples/volume-transfers-list-response.json api-ref/source/v2/samples/volume-type-access-add-request.json api-ref/source/v2/samples/volume-type-access-delete-request.json api-ref/source/v2/samples/volume-type-access-list-response.json api-ref/source/v2/samples/volume-type-create-request.json api-ref/source/v2/samples/volume-type-show-request.json api-ref/source/v2/samples/volume-type-show-response.json api-ref/source/v2/samples/volume-type-update-request.json api-ref/source/v2/samples/volume-types-list-response.json api-ref/source/v2/samples/volume-unmanage-request.json api-ref/source/v2/samples/volume-update-request.json api-ref/source/v2/samples/volume-update-response.json api-ref/source/v2/samples/volumes-list-detailed-response.json api-ref/source/v2/samples/volumes-list-response.json api-ref/source/v3/api-versions.inc api-ref/source/v3/attachments.inc api-ref/source/v3/availability-zones-v3.inc api-ref/source/v3/capabilities-v3.inc api-ref/source/v3/clusters.inc api-ref/source/v3/consistencygroups-v3.inc api-ref/source/v3/default-types.inc api-ref/source/v3/ext-backups-actions-v3.inc api-ref/source/v3/ext-backups.inc api-ref/source/v3/group-replication.inc api-ref/source/v3/group-snapshots.inc api-ref/source/v3/group-type-specs.inc api-ref/source/v3/group-types.inc api-ref/source/v3/groups.inc api-ref/source/v3/hosts.inc api-ref/source/v3/index.rst api-ref/source/v3/limits.inc api-ref/source/v3/messages.inc api-ref/source/v3/os-cgsnapshots-v3.inc api-ref/source/v3/os-services.inc api-ref/source/v3/os-vol-pool-v3.inc api-ref/source/v3/os-vol-transfer-v3.inc api-ref/source/v3/parameters.yaml api-ref/source/v3/qos-specs-v3-qos-specs.inc api-ref/source/v3/quota-classes.inc api-ref/source/v3/quota-sets.inc api-ref/source/v3/resource-filters.inc api-ref/source/v3/snapshot-manage.inc api-ref/source/v3/valid-boolean-values.inc api-ref/source/v3/vol-transfer-v3.inc api-ref/source/v3/volume-manage.inc api-ref/source/v3/volume-type-access.inc api-ref/source/v3/volumes-v3-extensions.inc api-ref/source/v3/volumes-v3-snapshots-actions.inc api-ref/source/v3/volumes-v3-snapshots.inc api-ref/source/v3/volumes-v3-types.inc api-ref/source/v3/volumes-v3-versions.inc api-ref/source/v3/volumes-v3-volumes-actions.inc api-ref/source/v3/volumes-v3-volumes.inc api-ref/source/v3/worker-cleanup.inc api-ref/source/v3/samples/attachment-complete.json api-ref/source/v3/samples/attachment-create-request.json api-ref/source/v3/samples/attachment-create-response.json api-ref/source/v3/samples/attachment-list-detailed-response.json api-ref/source/v3/samples/attachment-list-response.json api-ref/source/v3/samples/attachment-show-response.json api-ref/source/v3/samples/attachment-update-request.json api-ref/source/v3/samples/attachment-update-response.json api-ref/source/v3/samples/availability-zone-list-response.json api-ref/source/v3/samples/backend-capabilities-response.json api-ref/source/v3/samples/backup-force-delete-request.json api-ref/source/v3/samples/backup-record-export-response.json api-ref/source/v3/samples/backup-record-import-request.json api-ref/source/v3/samples/backup-record-import-response.json api-ref/source/v3/samples/backup-reset-status-request.json api-ref/source/v3/samples/backup-restore-request.json api-ref/source/v3/samples/backup-restore-response.json api-ref/source/v3/samples/backups-list-response.json api-ref/source/v3/samples/cgsnapshots-create-request.json api-ref/source/v3/samples/cgsnapshots-create-response.json api-ref/source/v3/samples/cgsnapshots-list-detailed-response.json api-ref/source/v3/samples/cgsnapshots-list-response.json api-ref/source/v3/samples/cgsnapshots-show-response.json api-ref/source/v3/samples/consistency-group-create-from-src-request.json api-ref/source/v3/samples/consistency-group-create-request.json api-ref/source/v3/samples/consistency-group-create-response.json api-ref/source/v3/samples/consistency-group-delete-request.json api-ref/source/v3/samples/consistency-group-show-response.json api-ref/source/v3/samples/consistency-group-update-request.json api-ref/source/v3/samples/consistency-groups-list-detailed-response.json api-ref/source/v3/samples/consistency-groups-list-response.json api-ref/source/v3/samples/get-default-type-response.json api-ref/source/v3/samples/get-default-types-response.json api-ref/source/v3/samples/group-create-from-src-request.json api-ref/source/v3/samples/group-create-from-src-response.json api-ref/source/v3/samples/group-create-request.json api-ref/source/v3/samples/group-create-response.json api-ref/source/v3/samples/group-delete-request.json api-ref/source/v3/samples/group-replication-disable.json api-ref/source/v3/samples/group-replication-enable.json api-ref/source/v3/samples/group-replication-failover.json api-ref/source/v3/samples/group-replication-list-targets.json api-ref/source/v3/samples/group-replication-target.json api-ref/source/v3/samples/group-reset-status-request.json api-ref/source/v3/samples/group-show-response.json api-ref/source/v3/samples/group-snapshot-reset-status-request.json api-ref/source/v3/samples/group-snapshots-create-request.json api-ref/source/v3/samples/group-snapshots-create-response.json api-ref/source/v3/samples/group-snapshots-list-detailed-response.json api-ref/source/v3/samples/group-snapshots-list-response.json api-ref/source/v3/samples/group-snapshots-show-response.json api-ref/source/v3/samples/group-type-create-request.json api-ref/source/v3/samples/group-type-default-response.json api-ref/source/v3/samples/group-type-show-response.json api-ref/source/v3/samples/group-type-specs-create-request.json api-ref/source/v3/samples/group-type-specs-create-response.json api-ref/source/v3/samples/group-type-specs-list-response.json api-ref/source/v3/samples/group-type-specs-show-response.json api-ref/source/v3/samples/group-type-specs-update-request.json api-ref/source/v3/samples/group-type-specs-update-response.json api-ref/source/v3/samples/group-type-update-request.json api-ref/source/v3/samples/group-types-list-response.json api-ref/source/v3/samples/group-update-request.json api-ref/source/v3/samples/groups-list-detailed-response.json api-ref/source/v3/samples/groups-list-response.json api-ref/source/v3/samples/host-attach-request.json api-ref/source/v3/samples/hosts-get-response.json api-ref/source/v3/samples/hosts-list-response.json api-ref/source/v3/samples/image-metadata-show-request.json api-ref/source/v3/samples/image-metadata-show-response.json api-ref/source/v3/samples/messages-list-response.json api-ref/source/v3/samples/messages-show-response.json api-ref/source/v3/samples/pools-list-detailed-response.json api-ref/source/v3/samples/quota-classes-show-response.json api-ref/source/v3/samples/quota-classes-update-request.json api-ref/source/v3/samples/quota-classes-update-response.json api-ref/source/v3/samples/resource-filters-list-response.json api-ref/source/v3/samples/services-disable-log-request.json api-ref/source/v3/samples/services-disable-log-response.json api-ref/source/v3/samples/services-disable-request.json api-ref/source/v3/samples/services-disable-response.json api-ref/source/v3/samples/services-enable-request.json api-ref/source/v3/samples/services-enable-response.json api-ref/source/v3/samples/services-failover-host-request.json api-ref/source/v3/samples/services-freeze-request.json api-ref/source/v3/samples/services-get-log-request.json api-ref/source/v3/samples/services-get-log-response.json api-ref/source/v3/samples/services-list-response.json api-ref/source/v3/samples/services-set-log-request.json api-ref/source/v3/samples/services-thaw-request.json api-ref/source/v3/samples/set-default-type-request.json api-ref/source/v3/samples/set-default-type-response.json api-ref/source/v3/samples/snapshot-force-delete-request.json api-ref/source/v3/samples/snapshot-manage-list-detail-response.json api-ref/source/v3/samples/snapshot-manage-list-response.json api-ref/source/v3/samples/snapshot-status-reset-request.json api-ref/source/v3/samples/snapshot-status-update-request.json api-ref/source/v3/samples/user-quotas-show-response.json api-ref/source/v3/samples/user-quotas-update-request.json api-ref/source/v3/samples/user-quotas-update-response.json api-ref/source/v3/samples/volume-attach-request.json api-ref/source/v3/samples/volume-begin-detaching-request.json api-ref/source/v3/samples/volume-bootable-status-update-request.json api-ref/source/v3/samples/volume-detach-request.json api-ref/source/v3/samples/volume-extend-request.json api-ref/source/v3/samples/volume-force-delete-request.json api-ref/source/v3/samples/volume-force-detach-request.json api-ref/source/v3/samples/volume-image-metadata-set-request.json api-ref/source/v3/samples/volume-image-metadata-unset-request.json api-ref/source/v3/samples/volume-image-metadata-update-response.json api-ref/source/v3/samples/volume-initialize-connection-request.json api-ref/source/v3/samples/volume-manage-list-detail-response.json api-ref/source/v3/samples/volume-manage-list-response.json api-ref/source/v3/samples/volume-manage-request-cluster.json api-ref/source/v3/samples/volume-os-extend_volume_completion-request.json api-ref/source/v3/samples/volume-os-migrate_volume-request.json api-ref/source/v3/samples/volume-os-migrate_volume_completion-request.json api-ref/source/v3/samples/volume-os-reimage-request.json api-ref/source/v3/samples/volume-os-retype-request.json api-ref/source/v3/samples/volume-readonly-update-request.json api-ref/source/v3/samples/volume-reserve-request.json api-ref/source/v3/samples/volume-revert-to-snapshot-request.json api-ref/source/v3/samples/volume-roll-detaching-request.json api-ref/source/v3/samples/volume-status-reset-request.json api-ref/source/v3/samples/volume-terminate-connection-request.json api-ref/source/v3/samples/volume-type-access-delete-request.json api-ref/source/v3/samples/volume-unmanage-request.json api-ref/source/v3/samples/volume-unreserve-request.json api-ref/source/v3/samples/worker-cleanup-request.json api-ref/source/v3/samples/worker-cleanup-response.json api-ref/source/v3/samples/backups/backup-create-request.json api-ref/source/v3/samples/backups/backup-create-response.json api-ref/source/v3/samples/backups/backup-show-response.json api-ref/source/v3/samples/backups/backups-list-detailed-response.json api-ref/source/v3/samples/backups/backups-list-response.json api-ref/source/v3/samples/backups/v3.18/backup-create-response.json api-ref/source/v3/samples/backups/v3.18/backup-show-response.json api-ref/source/v3/samples/backups/v3.18/backups-list-detailed-response.json api-ref/source/v3/samples/backups/v3.18/backups-list-response.json api-ref/source/v3/samples/backups/v3.43/backup-create-response.json api-ref/source/v3/samples/backups/v3.43/backup-show-response.json api-ref/source/v3/samples/backups/v3.43/backups-list-detailed-response.json api-ref/source/v3/samples/backups/v3.43/backups-list-response.json api-ref/source/v3/samples/backups/v3.45/backup-create-response.json api-ref/source/v3/samples/backups/v3.45/backup-show-response.json api-ref/source/v3/samples/backups/v3.45/backups-list-detailed-response.json api-ref/source/v3/samples/backups/v3.45/backups-list-response.json api-ref/source/v3/samples/backups/v3.56/backup-create-response.json api-ref/source/v3/samples/backups/v3.56/backup-show-response.json api-ref/source/v3/samples/backups/v3.56/backups-list-detailed-response.json api-ref/source/v3/samples/backups/v3.56/backups-list-response.json api-ref/source/v3/samples/backups/v3.9/backup-create-response.json api-ref/source/v3/samples/backups/v3.9/backup-show-response.json api-ref/source/v3/samples/backups/v3.9/backup-update-request.json api-ref/source/v3/samples/backups/v3.9/backup-update-response.json api-ref/source/v3/samples/backups/v3.9/backups-list-detailed-response.json api-ref/source/v3/samples/backups/v3.9/backups-list-response.json api-ref/source/v3/samples/clusters/v3.7/cluster-disable-request.json api-ref/source/v3/samples/clusters/v3.7/cluster-disable-response.json api-ref/source/v3/samples/clusters/v3.7/cluster-enable-request.json api-ref/source/v3/samples/clusters/v3.7/cluster-enable-response.json api-ref/source/v3/samples/clusters/v3.7/cluster-show-response.json api-ref/source/v3/samples/clusters/v3.7/clusters-list-detailed-response.json api-ref/source/v3/samples/clusters/v3.7/clusters-list-response.json api-ref/source/v3/samples/extensions/extensions-list-response.json api-ref/source/v3/samples/limits/limits-show-response.json api-ref/source/v3/samples/qos/qos-create-request.json api-ref/source/v3/samples/qos/qos-create-response.json api-ref/source/v3/samples/qos/qos-list-response.json api-ref/source/v3/samples/qos/qos-show-response.json api-ref/source/v3/samples/qos/qos-unset-request.json api-ref/source/v3/samples/qos/qos-update-request.json api-ref/source/v3/samples/qos/qos-update-response.json api-ref/source/v3/samples/qos/qos_show_response.json api-ref/source/v3/samples/quota_classes/quota-classes-show-response.json api-ref/source/v3/samples/quota_classes/quota-classes-update-request.json api-ref/source/v3/samples/quota_classes/quota-classes-update-response.json api-ref/source/v3/samples/quota_sets/quotas-show-defaults-response.json api-ref/source/v3/samples/quota_sets/quotas-show-response.json api-ref/source/v3/samples/quota_sets/quotas-show-usage-response.json api-ref/source/v3/samples/quota_sets/quotas-update-request.json api-ref/source/v3/samples/quota_sets/quotas-update-response.json api-ref/source/v3/samples/snapshot_manage_extensions/snapshot-manage-request.json api-ref/source/v3/samples/snapshot_manage_extensions/snapshot-manage-response.json api-ref/source/v3/samples/snapshots/snapshot-create-request.json api-ref/source/v3/samples/snapshots/snapshot-create-response.json api-ref/source/v3/samples/snapshots/snapshot-metadata-create-request.json api-ref/source/v3/samples/snapshots/snapshot-metadata-create-response.json api-ref/source/v3/samples/snapshots/snapshot-metadata-show-key-response.json api-ref/source/v3/samples/snapshots/snapshot-metadata-show-response.json api-ref/source/v3/samples/snapshots/snapshot-metadata-update-key-request.json api-ref/source/v3/samples/snapshots/snapshot-metadata-update-key-response.json api-ref/source/v3/samples/snapshots/snapshot-metadata-update-request.json api-ref/source/v3/samples/snapshots/snapshot-metadata-update-response.json api-ref/source/v3/samples/snapshots/snapshot-show-response.json api-ref/source/v3/samples/snapshots/snapshot-update-request.json api-ref/source/v3/samples/snapshots/snapshot-update-response.json api-ref/source/v3/samples/snapshots/snapshots-list-detailed-response.json api-ref/source/v3/samples/snapshots/snapshots-list-response.json api-ref/source/v3/samples/snapshots/v3.14/snapshot-create-response.json api-ref/source/v3/samples/snapshots/v3.14/snapshot-show-response.json api-ref/source/v3/samples/snapshots/v3.14/snapshot-update-response.json api-ref/source/v3/samples/snapshots/v3.14/snapshots-list-detailed-response.json api-ref/source/v3/samples/snapshots/v3.41/snapshot-create-response.json api-ref/source/v3/samples/snapshots/v3.41/snapshot-show-response.json api-ref/source/v3/samples/snapshots/v3.41/snapshot-update-response.json api-ref/source/v3/samples/snapshots/v3.41/snapshots-list-detailed-response.json api-ref/source/v3/samples/snapshots/v3.65/snapshot-create-response.json api-ref/source/v3/samples/snapshots/v3.65/snapshot-show-response.json api-ref/source/v3/samples/snapshots/v3.65/snapshot-update-response.json api-ref/source/v3/samples/snapshots/v3.65/snapshots-list-detailed-response.json api-ref/source/v3/samples/versions/version-show-response.json api-ref/source/v3/samples/versions/versions-response.json api-ref/source/v3/samples/volume_actions/volume-upload-to-image-request.json api-ref/source/v3/samples/volume_actions/volume-upload-to-image-response.json api-ref/source/v3/samples/volume_manage_extensions/volume-manage-request.json api-ref/source/v3/samples/volume_manage_extensions/volume-manage-response.json api-ref/source/v3/samples/volume_transfer/volume-transfer-accept-request.json api-ref/source/v3/samples/volume_transfer/volume-transfer-accept-response.json api-ref/source/v3/samples/volume_transfer/volume-transfer-create-request.json api-ref/source/v3/samples/volume_transfer/volume-transfer-create-response.json api-ref/source/v3/samples/volume_transfer/volume-transfer-show-response.json api-ref/source/v3/samples/volume_transfer/volume-transfers-list-detailed-response.json api-ref/source/v3/samples/volume_transfer/volume-transfers-list-response.json api-ref/source/v3/samples/volume_transfers/volume-transfers-accept-request.json api-ref/source/v3/samples/volume_transfers/volume-transfers-accept-response.json api-ref/source/v3/samples/volume_transfers/v3.55/volume-transfers-create-request.json api-ref/source/v3/samples/volume_transfers/v3.55/volume-transfers-create-response.json api-ref/source/v3/samples/volume_transfers/v3.55/volume-transfers-show-response.json api-ref/source/v3/samples/volume_transfers/v3.57/volume-transfers-create-request.json api-ref/source/v3/samples/volume_transfers/v3.57/volume-transfers-create-response.json api-ref/source/v3/samples/volume_transfers/v3.57/volume-transfers-show-response.json api-ref/source/v3/samples/volume_type/encryption-type-create-request.json api-ref/source/v3/samples/volume_type/encryption-type-create-response.json api-ref/source/v3/samples/volume_type/encryption-type-show-response.json api-ref/source/v3/samples/volume_type/encryption-type-specific-specs-show-response.json api-ref/source/v3/samples/volume_type/encryption-type-update-request.json api-ref/source/v3/samples/volume_type/encryption-type-update-response.json api-ref/source/v3/samples/volume_type/volume-type-access-add-request.json api-ref/source/v3/samples/volume_type/volume-type-access-list-response.json api-ref/source/v3/samples/volume_type/volume-type-all-extra-specs-show-response.json api-ref/source/v3/samples/volume_type/volume-type-create-request.json api-ref/source/v3/samples/volume_type/volume-type-create-response.json api-ref/source/v3/samples/volume_type/volume-type-default-response.json api-ref/source/v3/samples/volume_type/volume-type-extra-specs-create-update-request.json api-ref/source/v3/samples/volume_type/volume-type-extra-specs-create-update-response.json api-ref/source/v3/samples/volume_type/volume-type-show-response.json api-ref/source/v3/samples/volume_type/volume-type-specific-extra-specs-show-response.json api-ref/source/v3/samples/volume_type/volume-type-specific-extra-specs-update-request.json api-ref/source/v3/samples/volume_type/volume-type-specific-extra-specs-update-response.json api-ref/source/v3/samples/volume_type/volume-type-update-request.json api-ref/source/v3/samples/volume_type/volume-type-update-response.json api-ref/source/v3/samples/volume_type/volume-types-list-response.json api-ref/source/v3/samples/volumes/volume-create-request.json api-ref/source/v3/samples/volumes/volume-create-response.json api-ref/source/v3/samples/volumes/volume-metadata-create-request.json api-ref/source/v3/samples/volumes/volume-metadata-create-response.json api-ref/source/v3/samples/volumes/volume-metadata-show-key-response.json api-ref/source/v3/samples/volumes/volume-metadata-show-response.json api-ref/source/v3/samples/volumes/volume-metadata-update-key-request.json api-ref/source/v3/samples/volumes/volume-metadata-update-key-response.json api-ref/source/v3/samples/volumes/volume-metadata-update-request.json api-ref/source/v3/samples/volumes/volume-metadata-update-response.json api-ref/source/v3/samples/volumes/volume-show-response.json api-ref/source/v3/samples/volumes/volume-update-request.json api-ref/source/v3/samples/volumes/volume-update-response.json api-ref/source/v3/samples/volumes/volumes-list-detailed-response.json api-ref/source/v3/samples/volumes/volumes-list-response.json api-ref/source/v3/samples/volumes/volumes-list-summary-response.json api-ref/source/v3/samples/volumes/v3.13/volume-create-response.json api-ref/source/v3/samples/volumes/v3.13/volume-show-response.json api-ref/source/v3/samples/volumes/v3.13/volume-update-response.json api-ref/source/v3/samples/volumes/v3.13/volumes-list-detailed-response.json api-ref/source/v3/samples/volumes/v3.21/volume-create-response.json api-ref/source/v3/samples/volumes/v3.21/volume-show-response.json api-ref/source/v3/samples/volumes/v3.21/volume-update-response.json api-ref/source/v3/samples/volumes/v3.21/volumes-list-detailed-response.json api-ref/source/v3/samples/volumes/v3.48/volume-create-response.json api-ref/source/v3/samples/volumes/v3.48/volume-show-response.json api-ref/source/v3/samples/volumes/v3.48/volume-update-response.json api-ref/source/v3/samples/volumes/v3.48/volumes-list-detailed-response.json api-ref/source/v3/samples/volumes/v3.61/volume-create-response.json api-ref/source/v3/samples/volumes/v3.61/volume-show-response.json api-ref/source/v3/samples/volumes/v3.61/volume-update-response.json api-ref/source/v3/samples/volumes/v3.61/volumes-list-detailed-response.json api-ref/source/v3/samples/volumes/v3.63/volume-create-response.json api-ref/source/v3/samples/volumes/v3.63/volume-show-response.json api-ref/source/v3/samples/volumes/v3.63/volume-update-response.json api-ref/source/v3/samples/volumes/v3.63/volumes-list-detailed-response.json api-ref/source/v3/samples/volumes/v3.65/volume-create-response.json api-ref/source/v3/samples/volumes/v3.65/volume-show-response.json api-ref/source/v3/samples/volumes/v3.65/volume-update-response.json api-ref/source/v3/samples/volumes/v3.65/volumes-list-detailed-response.json api-ref/source/v3/samples/volumes/v3.69/volume-create-response.json api-ref/source/v3/samples/volumes/v3.69/volume-show-response.json api-ref/source/v3/samples/volumes/v3.69/volume-update-response.json api-ref/source/v3/samples/volumes/v3.69/volumes-list-detailed-response.json cinder/__init__.py cinder/context.py cinder/coordination.py cinder/exception.py cinder/flow_utils.py cinder/i18n.py cinder/manager.py cinder/opts.py cinder/policy.py cinder/quota.py cinder/quota_utils.py cinder/rpc.py cinder/service.py cinder/service_auth.py cinder/ssh_utils.py cinder/utils.py cinder/version.py cinder.egg-info/PKG-INFO cinder.egg-info/SOURCES.txt cinder.egg-info/dependency_links.txt cinder.egg-info/entry_points.txt cinder.egg-info/not-zip-safe cinder.egg-info/pbr.json cinder.egg-info/requires.txt cinder.egg-info/top_level.txt cinder/api/__init__.py cinder/api/api_utils.py cinder/api/common.py cinder/api/extensions.py cinder/api/microversions.py cinder/api/urlmap.py cinder/api/versions.py cinder/api/contrib/__init__.py cinder/api/contrib/admin_actions.py cinder/api/contrib/availability_zones.py cinder/api/contrib/backups.py cinder/api/contrib/capabilities.py cinder/api/contrib/cgsnapshots.py cinder/api/contrib/consistencygroups.py cinder/api/contrib/extended_services.py cinder/api/contrib/extended_snapshot_attributes.py cinder/api/contrib/hosts.py cinder/api/contrib/qos_specs_manage.py cinder/api/contrib/quota_classes.py cinder/api/contrib/quotas.py cinder/api/contrib/resource_common_manage.py cinder/api/contrib/scheduler_hints.py cinder/api/contrib/scheduler_stats.py cinder/api/contrib/services.py cinder/api/contrib/snapshot_actions.py cinder/api/contrib/snapshot_manage.py cinder/api/contrib/snapshot_unmanage.py cinder/api/contrib/types_extra_specs.py cinder/api/contrib/types_manage.py cinder/api/contrib/used_limits.py cinder/api/contrib/volume_actions.py cinder/api/contrib/volume_encryption_metadata.py cinder/api/contrib/volume_host_attribute.py cinder/api/contrib/volume_image_metadata.py cinder/api/contrib/volume_manage.py cinder/api/contrib/volume_mig_status_attribute.py cinder/api/contrib/volume_tenant_attribute.py cinder/api/contrib/volume_transfer.py cinder/api/contrib/volume_type_access.py cinder/api/contrib/volume_type_encryption.py cinder/api/contrib/volume_unmanage.py cinder/api/middleware/__init__.py cinder/api/middleware/auth.py cinder/api/middleware/fault.py cinder/api/middleware/request_id.py cinder/api/openstack/__init__.py cinder/api/openstack/api_version_request.py cinder/api/openstack/rest_api_version_history.rst cinder/api/openstack/versioned_method.py cinder/api/openstack/wsgi.py cinder/api/schemas/__init__.py cinder/api/schemas/admin_actions.py cinder/api/schemas/attachments.py cinder/api/schemas/backups.py cinder/api/schemas/clusters.py cinder/api/schemas/default_types.py cinder/api/schemas/group_snapshots.py cinder/api/schemas/group_specs.py cinder/api/schemas/group_types.py cinder/api/schemas/groups.py cinder/api/schemas/qos_specs.py cinder/api/schemas/quota_classes.py cinder/api/schemas/quotas.py cinder/api/schemas/scheduler_hints.py cinder/api/schemas/services.py cinder/api/schemas/snapshot_actions.py cinder/api/schemas/snapshot_manage.py cinder/api/schemas/snapshots.py cinder/api/schemas/types_extra_specs.py cinder/api/schemas/volume_actions.py cinder/api/schemas/volume_image_metadata.py cinder/api/schemas/volume_manage.py cinder/api/schemas/volume_metadata.py cinder/api/schemas/volume_transfer.py cinder/api/schemas/volume_type_access.py cinder/api/schemas/volume_type_encryption.py cinder/api/schemas/volume_types.py cinder/api/schemas/volumes.py cinder/api/schemas/workers.py cinder/api/v2/__init__.py cinder/api/v2/limits.py cinder/api/v2/snapshots.py cinder/api/v2/volume_metadata.py cinder/api/v2/volumes.py cinder/api/v2/views/__init__.py cinder/api/v2/views/volumes.py cinder/api/v3/__init__.py cinder/api/v3/attachments.py cinder/api/v3/backups.py cinder/api/v3/clusters.py cinder/api/v3/consistencygroups.py cinder/api/v3/default_types.py cinder/api/v3/group_snapshots.py cinder/api/v3/group_specs.py cinder/api/v3/group_types.py cinder/api/v3/groups.py cinder/api/v3/limits.py cinder/api/v3/messages.py cinder/api/v3/resource_common_manage.py cinder/api/v3/resource_filters.py cinder/api/v3/router.py cinder/api/v3/snapshot_manage.py cinder/api/v3/snapshot_metadata.py cinder/api/v3/snapshots.py cinder/api/v3/types.py cinder/api/v3/volume_manage.py cinder/api/v3/volume_metadata.py cinder/api/v3/volume_transfer.py cinder/api/v3/volumes.py cinder/api/v3/workers.py cinder/api/v3/views/__init__.py cinder/api/v3/views/attachments.py cinder/api/v3/views/backups.py cinder/api/v3/views/clusters.py cinder/api/v3/views/default_types.py cinder/api/v3/views/group_snapshots.py cinder/api/v3/views/group_types.py cinder/api/v3/views/groups.py cinder/api/v3/views/messages.py cinder/api/v3/views/resource_filters.py cinder/api/v3/views/snapshots.py cinder/api/v3/views/types.py cinder/api/v3/views/volumes.py cinder/api/v3/views/workers.py cinder/api/validation/__init__.py cinder/api/validation/parameter_types.py cinder/api/validation/validators.py cinder/api/views/__init__.py cinder/api/views/availability_zones.py cinder/api/views/backups.py cinder/api/views/capabilities.py cinder/api/views/cgsnapshots.py cinder/api/views/consistencygroups.py cinder/api/views/limits.py cinder/api/views/manageable_snapshots.py cinder/api/views/manageable_volumes.py cinder/api/views/qos_specs.py cinder/api/views/scheduler_stats.py cinder/api/views/snapshots.py cinder/api/views/transfers.py cinder/api/views/types.py cinder/api/views/versions.py cinder/backup/__init__.py cinder/backup/api.py cinder/backup/chunkeddriver.py cinder/backup/driver.py cinder/backup/manager.py cinder/backup/rpcapi.py cinder/backup/drivers/__init__.py cinder/backup/drivers/ceph.py cinder/backup/drivers/gcs.py cinder/backup/drivers/glusterfs.py cinder/backup/drivers/nfs.py cinder/backup/drivers/posix.py cinder/backup/drivers/s3.py cinder/backup/drivers/swift.py cinder/brick/README.txt cinder/brick/__init__.py cinder/brick/local_dev/__init__.py cinder/brick/local_dev/lvm.py cinder/cmd/__init__.py cinder/cmd/api.py cinder/cmd/backup.py cinder/cmd/manage.py cinder/cmd/rtstool.py cinder/cmd/scheduler.py cinder/cmd/status.py cinder/cmd/volume.py cinder/cmd/volume_usage_audit.py cinder/common/__init__.py cinder/common/config.py cinder/common/constants.py cinder/common/sqlalchemyutils.py cinder/compute/__init__.py cinder/compute/nova.py cinder/db/__init__.py cinder/db/alembic.ini cinder/db/api.py cinder/db/base.py cinder/db/migration.py cinder/db/migrations/__init__.py cinder/db/migrations/env.py cinder/db/migrations/script.py.mako cinder/db/migrations/versions/89aa6f9639f9_drop_legacy_migrate_version_table.py cinder/db/migrations/versions/921e1a36b076_initial.py cinder/db/migrations/versions/9ab1b092a404_make_use_quota_non_nullable.py cinder/db/migrations/versions/9c74c1c6971f_quota_add_backup_defaults_in_quota_class.py cinder/db/migrations/versions/__init__.py cinder/db/migrations/versions/b7b88f50aab5_remove_quota_consistencygroups.py cinder/db/migrations/versions/b8660621f1b9_update_reservations_resource.py cinder/db/migrations/versions/c92a3e68beed_make_shared_targets_nullable.py cinder/db/migrations/versions/daa98075b90d_add_resource_indexes.py cinder/db/sqlalchemy/__init__.py cinder/db/sqlalchemy/api.py cinder/db/sqlalchemy/models.py cinder/group/__init__.py cinder/group/api.py cinder/image/__init__.py cinder/image/accelerator.py cinder/image/cache.py cinder/image/format_inspector.py cinder/image/glance.py cinder/image/image_utils.py cinder/image/accelerators/__init__.py cinder/image/accelerators/gzip.py cinder/image/accelerators/qat.py cinder/interface/__init__.py cinder/interface/backup_chunked_driver.py cinder/interface/backup_driver.py cinder/interface/base.py cinder/interface/fczm_driver.py cinder/interface/util.py cinder/interface/volume_consistencygroup_driver.py cinder/interface/volume_driver.py cinder/interface/volume_group_driver.py cinder/interface/volume_manageable_driver.py cinder/interface/volume_snapshot_revert.py cinder/interface/volume_snapshotmanagement_driver.py cinder/keymgr/__init__.py cinder/keymgr/conf_key_mgr.py cinder/keymgr/migration.py cinder/keymgr/transfer.py cinder/locale/ja/LC_MESSAGES/cinder.po cinder/locale/zh_CN/LC_MESSAGES/cinder.po cinder/message/__init__.py cinder/message/api.py cinder/message/defined_messages.py cinder/message/message_field.py cinder/objects/__init__.py cinder/objects/backup.py cinder/objects/base.py cinder/objects/cgsnapshot.py cinder/objects/cleanable.py cinder/objects/cleanup_request.py cinder/objects/cluster.py cinder/objects/consistencygroup.py cinder/objects/dynamic_log.py cinder/objects/fields.py cinder/objects/group.py cinder/objects/group_snapshot.py cinder/objects/group_type.py cinder/objects/manageableresources.py cinder/objects/qos_specs.py cinder/objects/request_spec.py cinder/objects/service.py cinder/objects/snapshot.py cinder/objects/volume.py cinder/objects/volume_attachment.py cinder/objects/volume_type.py cinder/policies/__init__.py cinder/policies/attachments.py cinder/policies/backup_actions.py cinder/policies/backups.py cinder/policies/base.py cinder/policies/capabilities.py cinder/policies/clusters.py cinder/policies/default_types.py cinder/policies/group_actions.py cinder/policies/group_snapshot_actions.py cinder/policies/group_snapshots.py cinder/policies/group_types.py cinder/policies/groups.py cinder/policies/hosts.py cinder/policies/limits.py cinder/policies/manageable_snapshots.py cinder/policies/manageable_volumes.py cinder/policies/messages.py cinder/policies/qos_specs.py cinder/policies/quota_class.py cinder/policies/quotas.py cinder/policies/scheduler_stats.py cinder/policies/services.py cinder/policies/snapshot_actions.py cinder/policies/snapshot_metadata.py cinder/policies/snapshots.py cinder/policies/type_extra_specs.py cinder/policies/volume_access.py cinder/policies/volume_actions.py cinder/policies/volume_metadata.py cinder/policies/volume_transfer.py cinder/policies/volume_type.py cinder/policies/volumes.py cinder/policies/workers.py cinder/privsep/__init__.py cinder/privsep/cgroup.py cinder/privsep/format_inspector.py cinder/privsep/fs.py cinder/privsep/lvm.py cinder/privsep/path.py cinder/privsep/targets/__init__.py cinder/privsep/targets/nvmet.py cinder/privsep/targets/scst.py cinder/privsep/targets/tgt.py cinder/scheduler/__init__.py cinder/scheduler/base_filter.py cinder/scheduler/base_handler.py cinder/scheduler/base_weight.py cinder/scheduler/driver.py cinder/scheduler/filter_scheduler.py cinder/scheduler/host_manager.py cinder/scheduler/manager.py cinder/scheduler/rpcapi.py cinder/scheduler/scheduler_options.py cinder/scheduler/evaluator/__init__.py cinder/scheduler/evaluator/evaluator.py cinder/scheduler/filters/__init__.py cinder/scheduler/filters/affinity_filter.py cinder/scheduler/filters/availability_zone_filter.py cinder/scheduler/filters/capabilities_filter.py cinder/scheduler/filters/capacity_filter.py cinder/scheduler/filters/driver_filter.py cinder/scheduler/filters/extra_specs_ops.py cinder/scheduler/filters/ignore_attempted_hosts_filter.py cinder/scheduler/filters/instance_locality_filter.py cinder/scheduler/filters/json_filter.py cinder/scheduler/flows/__init__.py cinder/scheduler/flows/create_volume.py cinder/scheduler/weights/__init__.py cinder/scheduler/weights/capacity.py cinder/scheduler/weights/chance.py cinder/scheduler/weights/goodness.py cinder/scheduler/weights/stochastic.py cinder/scheduler/weights/volume_number.py cinder/tests/README.rst cinder/tests/__init__.py cinder/tests/fake_driver.py cinder/tests/fixtures.py cinder/tests/compliance/__init__.py cinder/tests/compliance/test_backup_drivers.py cinder/tests/compliance/test_fczm_drivers.py cinder/tests/compliance/test_volume_drivers.py cinder/tests/functional/__init__.py cinder/tests/functional/api_samples_test_base.py cinder/tests/functional/functional_helpers.py cinder/tests/functional/test_default_types.py cinder/tests/functional/test_extensions.py cinder/tests/functional/test_group_replication.py cinder/tests/functional/test_group_snapshots.py cinder/tests/functional/test_groups.py cinder/tests/functional/test_login.py cinder/tests/functional/test_middleware.py cinder/tests/functional/test_volumes.py cinder/tests/functional/api/__init__.py cinder/tests/functional/api/client.py cinder/tests/functional/api/foxinsocks.py cinder/tests/functional/api_sample_tests/__init__.py cinder/tests/functional/api_sample_tests/fakes.py cinder/tests/functional/api_sample_tests/test_backups.py cinder/tests/functional/api_sample_tests/test_extensions.py cinder/tests/functional/api_sample_tests/test_limits.py cinder/tests/functional/api_sample_tests/test_qos.py cinder/tests/functional/api_sample_tests/test_quota_classes.py cinder/tests/functional/api_sample_tests/test_quota_sets.py cinder/tests/functional/api_sample_tests/test_snapshot_manage_extensions.py cinder/tests/functional/api_sample_tests/test_snapshots.py cinder/tests/functional/api_sample_tests/test_versions.py cinder/tests/functional/api_sample_tests/test_volume_actions.py cinder/tests/functional/api_sample_tests/test_volume_manage_extensions.py cinder/tests/functional/api_sample_tests/test_volume_transfer.py cinder/tests/functional/api_sample_tests/test_volume_transfers.py cinder/tests/functional/api_sample_tests/test_volume_types.py cinder/tests/functional/api_sample_tests/test_volumes.py cinder/tests/functional/api_sample_tests/samples/backups/backup-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/backup-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/backup-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/backup-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/backups-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/backups-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backup-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backup-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backups-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.18/backups-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backup-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backup-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backups-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.43/backups-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backup-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backup-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backups-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.45/backups-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.47/backup-restore-request.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.47/backup-restore-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backup-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backup-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backups-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.56/backups-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backup-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backups-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/backups/v3.9/backups-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/extensions/extensions-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/limits/limits-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/qos/qos-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/qos/qos-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/qos/qos-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/qos/qos-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/qos/qos-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/qos/qos-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/qos/qos_show_response.json.tpl cinder/tests/functional/api_sample_tests/samples/quota_classes/quota-classes-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/quota_classes/quota-classes-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/quota_classes/quota-classes-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-show-defaults-response.json.tpl cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-show-usage-response.json.tpl cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/quota_sets/quotas-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshot_manage_extensions/snapshot-manage-request.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshot_manage_extensions/snapshot-manage-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-show-key-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-key-request.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-key-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-metadata-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshot-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshots-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/snapshots-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshot-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshot-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshot-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.14/snapshots-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshot-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshot-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshot-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.41/snapshots-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshot-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshot-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshot-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/snapshots/v3.65/snapshots-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/versions/version-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/versions/versions-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_actions/volume-upload-to-image-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_actions/volume-upload-to-image-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_manage_extensions/volume-manage-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_manage_extensions/volume-manage-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-accept-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-accept-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfer-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfers-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfer/volume-transfers-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfers/volume-transfers-accept-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfers/volume-transfers-accept-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.55/volume-transfers-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.55/volume-transfers-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.55/volume-transfers-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.57/volume-transfers-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.57/volume-transfers-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_transfers/v3.57/volume-transfers-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-specific-specs-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/encryption-type-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-access-add-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-access-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-all-extra-specs-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-default-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-extra-specs-create-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-extra-specs-create-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-specific-extra-specs-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-type-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volume_type/volume-types-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-create-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-show-key-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-key-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-key-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-metadata-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-update-request.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volume-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volumes-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/volumes-list-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volume-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volume-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volume-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.13/volumes-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volume-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volume-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volume-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.21/volumes-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volume-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volume-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volume-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.48/volumes-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volume-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volume-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volume-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.61/volumes-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volume-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volume-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volume-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.63/volumes-list-detailed-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volume-create-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volume-show-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volume-update-response.json.tpl cinder/tests/functional/api_sample_tests/samples/volumes/v3.65/volumes-list-detailed-response.json.tpl cinder/tests/hacking/__init__.py cinder/tests/hacking/checks.py cinder/tests/stubs/oslo_i18n/__init__.pyi cinder/tests/stubs/oslo_i18n/_factory.pyi cinder/tests/stubs/oslo_i18n/_gettextutils.pyi cinder/tests/stubs/oslo_i18n/_i18n.pyi cinder/tests/stubs/oslo_i18n/_lazy.pyi cinder/tests/stubs/oslo_i18n/_locale.pyi cinder/tests/stubs/oslo_i18n/_message.pyi cinder/tests/stubs/oslo_i18n/_translate.pyi cinder/tests/stubs/oslo_i18n/fixture.pyi cinder/tests/stubs/oslo_i18n/log.pyi cinder/tests/unit/__init__.py cinder/tests/unit/cast_as_call.py cinder/tests/unit/conf_fixture.py cinder/tests/unit/fake_cluster.py cinder/tests/unit/fake_constants.py cinder/tests/unit/fake_group.py cinder/tests/unit/fake_group_snapshot.py cinder/tests/unit/fake_notifier.py cinder/tests/unit/fake_objects.py cinder/tests/unit/fake_service.py cinder/tests/unit/fake_snapshot.py cinder/tests/unit/fake_utils.py cinder/tests/unit/fake_volume.py cinder/tests/unit/known_issues.py cinder/tests/unit/policy.yaml cinder/tests/unit/test.py cinder/tests/unit/test_api.py cinder/tests/unit/test_api_urlmap.py cinder/tests/unit/test_cleanable_manager.py cinder/tests/unit/test_cmd.py cinder/tests/unit/test_context.py cinder/tests/unit/test_coordination.py cinder/tests/unit/test_db_api.py cinder/tests/unit/test_db_worker_api.py cinder/tests/unit/test_evaluator.py cinder/tests/unit/test_exception.py cinder/tests/unit/test_fixtures.py cinder/tests/unit/test_hacking.py cinder/tests/unit/test_image_utils.py cinder/tests/unit/test_interface.py cinder/tests/unit/test_manager.py cinder/tests/unit/test_paginate_query.py cinder/tests/unit/test_policy.py cinder/tests/unit/test_qos_specs.py cinder/tests/unit/test_quota.py cinder/tests/unit/test_quota_utils.py cinder/tests/unit/test_rpc.py cinder/tests/unit/test_service.py cinder/tests/unit/test_service_auth.py cinder/tests/unit/test_setup_profiler.py cinder/tests/unit/test_ssh_utils.py cinder/tests/unit/test_test.py cinder/tests/unit/test_test_utils.py cinder/tests/unit/test_utils.py cinder/tests/unit/test_volume_cleanup.py cinder/tests/unit/test_volume_configuration.py cinder/tests/unit/test_volume_glance_metadata.py cinder/tests/unit/test_volume_throttling.py cinder/tests/unit/test_volume_transfer.py cinder/tests/unit/test_volume_types.py cinder/tests/unit/test_volume_types_extra_specs.py cinder/tests/unit/test_volume_utils.py cinder/tests/unit/utils.py cinder/tests/unit/api/__init__.py cinder/tests/unit/api/fakes.py cinder/tests/unit/api/test_api_validation.py cinder/tests/unit/api/test_common.py cinder/tests/unit/api/test_versions.py cinder/tests/unit/api/contrib/__init__.py cinder/tests/unit/api/contrib/test_admin_actions.py cinder/tests/unit/api/contrib/test_availability_zones.py cinder/tests/unit/api/contrib/test_backup_project_attribute.py cinder/tests/unit/api/contrib/test_backups.py cinder/tests/unit/api/contrib/test_capabilities.py cinder/tests/unit/api/contrib/test_cgsnapshots.py cinder/tests/unit/api/contrib/test_consistencygroups.py cinder/tests/unit/api/contrib/test_extended_snapshot_attributes.py cinder/tests/unit/api/contrib/test_hosts.py cinder/tests/unit/api/contrib/test_qos_specs_manage.py cinder/tests/unit/api/contrib/test_quotas.py cinder/tests/unit/api/contrib/test_quotas_classes.py cinder/tests/unit/api/contrib/test_scheduler_hints.py cinder/tests/unit/api/contrib/test_scheduler_stats.py cinder/tests/unit/api/contrib/test_services.py cinder/tests/unit/api/contrib/test_snapshot_actions.py cinder/tests/unit/api/contrib/test_snapshot_manage.py cinder/tests/unit/api/contrib/test_snapshot_unmanage.py cinder/tests/unit/api/contrib/test_types_extra_specs.py cinder/tests/unit/api/contrib/test_types_manage.py cinder/tests/unit/api/contrib/test_used_limits.py cinder/tests/unit/api/contrib/test_volume_actions.py cinder/tests/unit/api/contrib/test_volume_encryption_metadata.py cinder/tests/unit/api/contrib/test_volume_host_attribute.py cinder/tests/unit/api/contrib/test_volume_image_metadata.py cinder/tests/unit/api/contrib/test_volume_manage.py cinder/tests/unit/api/contrib/test_volume_migration_status_attribute.py cinder/tests/unit/api/contrib/test_volume_tenant_attribute.py cinder/tests/unit/api/contrib/test_volume_transfer.py cinder/tests/unit/api/contrib/test_volume_type_access.py cinder/tests/unit/api/contrib/test_volume_type_encryption.py cinder/tests/unit/api/contrib/test_volume_unmanage.py cinder/tests/unit/api/middleware/__init__.py cinder/tests/unit/api/middleware/test_auth.py cinder/tests/unit/api/middleware/test_faults.py cinder/tests/unit/api/openstack/__init__.py cinder/tests/unit/api/openstack/test_api_version_request.py cinder/tests/unit/api/openstack/test_versioned_method.py cinder/tests/unit/api/openstack/test_wsgi.py cinder/tests/unit/api/v2/__init__.py cinder/tests/unit/api/v2/fakes.py cinder/tests/unit/api/v2/test_limits.py cinder/tests/unit/api/v2/test_snapshots.py cinder/tests/unit/api/v2/test_volume_metadata.py cinder/tests/unit/api/v2/test_volumes.py cinder/tests/unit/api/v3/__init__.py cinder/tests/unit/api/v3/fakes.py cinder/tests/unit/api/v3/stubs.py cinder/tests/unit/api/v3/test_attachments.py cinder/tests/unit/api/v3/test_backups.py cinder/tests/unit/api/v3/test_cluster.py cinder/tests/unit/api/v3/test_consistencygroups.py cinder/tests/unit/api/v3/test_default_types.py cinder/tests/unit/api/v3/test_group_snapshots.py cinder/tests/unit/api/v3/test_group_specs.py cinder/tests/unit/api/v3/test_group_types.py cinder/tests/unit/api/v3/test_groups.py cinder/tests/unit/api/v3/test_limits.py cinder/tests/unit/api/v3/test_messages.py cinder/tests/unit/api/v3/test_resource_filters.py cinder/tests/unit/api/v3/test_snapshot_manage.py cinder/tests/unit/api/v3/test_snapshot_metadata.py cinder/tests/unit/api/v3/test_snapshots.py cinder/tests/unit/api/v3/test_types.py cinder/tests/unit/api/v3/test_types_orig.py cinder/tests/unit/api/v3/test_volume_manage.py cinder/tests/unit/api/v3/test_volume_metadata.py cinder/tests/unit/api/v3/test_volume_transfer.py cinder/tests/unit/api/v3/test_volumes.py cinder/tests/unit/api/v3/test_workers.py cinder/tests/unit/api/views/__init__.py cinder/tests/unit/api/views/test_versions.py cinder/tests/unit/attachments/__init__.py cinder/tests/unit/attachments/test_attachments_api.py cinder/tests/unit/attachments/test_attachments_manager.py cinder/tests/unit/backup/__init__.py cinder/tests/unit/backup/fake_backup.py cinder/tests/unit/backup/fake_google_client.py cinder/tests/unit/backup/fake_google_client2.py cinder/tests/unit/backup/fake_s3_client.py cinder/tests/unit/backup/fake_service.py cinder/tests/unit/backup/fake_swift_client.py cinder/tests/unit/backup/fake_swift_client2.py cinder/tests/unit/backup/test_backup.py cinder/tests/unit/backup/test_backup_messages.py cinder/tests/unit/backup/test_chunkeddriver.py cinder/tests/unit/backup/test_rpcapi.py cinder/tests/unit/backup/drivers/__init__.py cinder/tests/unit/backup/drivers/test_backup_ceph.py cinder/tests/unit/backup/drivers/test_backup_driver_base.py cinder/tests/unit/backup/drivers/test_backup_glusterfs.py cinder/tests/unit/backup/drivers/test_backup_google.py cinder/tests/unit/backup/drivers/test_backup_nfs.py cinder/tests/unit/backup/drivers/test_backup_posix.py cinder/tests/unit/backup/drivers/test_backup_s3.py cinder/tests/unit/backup/drivers/test_backup_swift.py cinder/tests/unit/brick/__init__.py cinder/tests/unit/brick/fake_lvm.py cinder/tests/unit/brick/test_brick_lvm.py cinder/tests/unit/cmd/__init__.py cinder/tests/unit/cmd/test_status.py cinder/tests/unit/compute/__init__.py cinder/tests/unit/compute/test_nova.py cinder/tests/unit/consistencygroup/__init__.py cinder/tests/unit/consistencygroup/fake_cgsnapshot.py cinder/tests/unit/consistencygroup/fake_consistencygroup.py cinder/tests/unit/db/__init__.py cinder/tests/unit/db/test_cluster.py cinder/tests/unit/db/test_default_types.py cinder/tests/unit/db/test_migration.py cinder/tests/unit/db/test_migrations.py cinder/tests/unit/db/test_name_id.py cinder/tests/unit/db/test_orm_relationships.py cinder/tests/unit/db/test_purge.py cinder/tests/unit/db/test_qos_specs.py cinder/tests/unit/db/test_reset_backend.py cinder/tests/unit/db/test_transfers.py cinder/tests/unit/db/test_volume_type.py cinder/tests/unit/group/__init__.py cinder/tests/unit/group/test_groups_api.py cinder/tests/unit/group/test_groups_manager.py cinder/tests/unit/group/test_groups_manager_replication.py cinder/tests/unit/image/__init__.py cinder/tests/unit/image/fake.py cinder/tests/unit/image/glance_stubs.py cinder/tests/unit/image/test_accelerator.py cinder/tests/unit/image/test_cache.py cinder/tests/unit/image/test_format_inspector.py cinder/tests/unit/image/test_glance.py cinder/tests/unit/image/accelerators/__init__.py cinder/tests/unit/image/accelerators/test_qat_gzip.py cinder/tests/unit/keymgr/__init__.py cinder/tests/unit/keymgr/fake.py cinder/tests/unit/keymgr/test_conf_key_mgr.py cinder/tests/unit/keymgr/test_migration.py cinder/tests/unit/keymgr/test_transfer.py cinder/tests/unit/message/__init__.py cinder/tests/unit/message/test_api.py cinder/tests/unit/message/test_defined_messages.py cinder/tests/unit/message/test_message_field.py cinder/tests/unit/monkey_patch_example/__init__.py cinder/tests/unit/monkey_patch_example/example_a.py cinder/tests/unit/monkey_patch_example/example_b.py cinder/tests/unit/objects/__init__.py cinder/tests/unit/objects/test_backup.py cinder/tests/unit/objects/test_base.py cinder/tests/unit/objects/test_cgsnapshot.py cinder/tests/unit/objects/test_cleanable.py cinder/tests/unit/objects/test_cleanup_request.py cinder/tests/unit/objects/test_cluster.py cinder/tests/unit/objects/test_consistencygroup.py cinder/tests/unit/objects/test_fields.py cinder/tests/unit/objects/test_group.py cinder/tests/unit/objects/test_group_snapshot.py cinder/tests/unit/objects/test_group_type.py cinder/tests/unit/objects/test_manageable_volumes_snapshots.py cinder/tests/unit/objects/test_objects.py cinder/tests/unit/objects/test_qos.py cinder/tests/unit/objects/test_service.py cinder/tests/unit/objects/test_snapshot.py cinder/tests/unit/objects/test_volume.py cinder/tests/unit/objects/test_volume_attachment.py cinder/tests/unit/objects/test_volume_type.py cinder/tests/unit/policies/__init__.py cinder/tests/unit/policies/base.py cinder/tests/unit/policies/test_attachments.py cinder/tests/unit/policies/test_backups.py cinder/tests/unit/policies/test_base.py cinder/tests/unit/policies/test_default_volume_types.py cinder/tests/unit/policies/test_group_actions.py cinder/tests/unit/policies/test_group_snapshots.py cinder/tests/unit/policies/test_group_types.py cinder/tests/unit/policies/test_groups.py cinder/tests/unit/policies/test_limits.py cinder/tests/unit/policies/test_messages.py cinder/tests/unit/policies/test_quota_class.py cinder/tests/unit/policies/test_quotas.py cinder/tests/unit/policies/test_snapshot_actions.py cinder/tests/unit/policies/test_snapshot_metadata.py cinder/tests/unit/policies/test_snapshots.py cinder/tests/unit/policies/test_type_extra_specs.py cinder/tests/unit/policies/test_volume.py cinder/tests/unit/policies/test_volume_access.py cinder/tests/unit/policies/test_volume_actions.py cinder/tests/unit/policies/test_volume_metadata.py cinder/tests/unit/policies/test_volume_transfers.py cinder/tests/unit/policies/test_volume_type.py cinder/tests/unit/privsep/__init__.py cinder/tests/unit/privsep/test_format_inspector.py cinder/tests/unit/privsep/targets/__init__.py cinder/tests/unit/privsep/targets/fake_nvmet_lib.py cinder/tests/unit/privsep/targets/test_nvmet.py cinder/tests/unit/scheduler/__init__.py cinder/tests/unit/scheduler/fake_hosts.py cinder/tests/unit/scheduler/fakes.py cinder/tests/unit/scheduler/helpers.py cinder/tests/unit/scheduler/test_allocated_capacity_weigher.py cinder/tests/unit/scheduler/test_base_filter.py cinder/tests/unit/scheduler/test_capacity_weigher.py cinder/tests/unit/scheduler/test_chance_weigher.py cinder/tests/unit/scheduler/test_filter_scheduler.py cinder/tests/unit/scheduler/test_goodness_weigher.py cinder/tests/unit/scheduler/test_host_filters.py cinder/tests/unit/scheduler/test_host_manager.py cinder/tests/unit/scheduler/test_rpcapi.py cinder/tests/unit/scheduler/test_scheduler.py cinder/tests/unit/scheduler/test_scheduler_options.py cinder/tests/unit/scheduler/test_stochastic_weight_handler.py cinder/tests/unit/scheduler/test_volume_number_weigher.py cinder/tests/unit/scheduler/test_weights.py cinder/tests/unit/targets/__init__.py cinder/tests/unit/targets/targets_fixture.py cinder/tests/unit/targets/test_base_iscsi_driver.py cinder/tests/unit/targets/test_cxt_driver.py cinder/tests/unit/targets/test_iser_driver.py cinder/tests/unit/targets/test_lio_driver.py cinder/tests/unit/targets/test_nvmeof_driver.py cinder/tests/unit/targets/test_nvmet_driver.py cinder/tests/unit/targets/test_scst_driver.py cinder/tests/unit/targets/test_spdknvmf.py cinder/tests/unit/targets/test_tgt_driver.py cinder/tests/unit/volume/__init__.py cinder/tests/unit/volume/test_availability_zone.py cinder/tests/unit/volume/test_capabilities.py cinder/tests/unit/volume/test_connection.py cinder/tests/unit/volume/test_driver.py cinder/tests/unit/volume/test_image.py cinder/tests/unit/volume/test_init_host.py cinder/tests/unit/volume/test_manage_volume.py cinder/tests/unit/volume/test_replication_manager.py cinder/tests/unit/volume/test_rpcapi.py cinder/tests/unit/volume/test_snapshot.py cinder/tests/unit/volume/test_volume.py cinder/tests/unit/volume/test_volume_manager.py cinder/tests/unit/volume/test_volume_migration.py cinder/tests/unit/volume/test_volume_reimage.py cinder/tests/unit/volume/test_volume_retype.py cinder/tests/unit/volume/test_volume_usage_audit.py cinder/tests/unit/volume/drivers/__init__.py cinder/tests/unit/volume/drivers/test_datera.py cinder/tests/unit/volume/drivers/test_fujitsu_dx.py cinder/tests/unit/volume/drivers/test_gpfs.py cinder/tests/unit/volume/drivers/test_hedvig.py cinder/tests/unit/volume/drivers/test_infinidat.py cinder/tests/unit/volume/drivers/test_kaminario.py cinder/tests/unit/volume/drivers/test_kioxia.py cinder/tests/unit/volume/drivers/test_linstordrv.py cinder/tests/unit/volume/drivers/test_lvm_driver.py cinder/tests/unit/volume/drivers/test_macrosan_drivers.py cinder/tests/unit/volume/drivers/test_nfs.py cinder/tests/unit/volume/drivers/test_prophetstor_dpl.py cinder/tests/unit/volume/drivers/test_pure.py cinder/tests/unit/volume/drivers/test_qnap.py cinder/tests/unit/volume/drivers/test_quobyte.py cinder/tests/unit/volume/drivers/test_rbd.py cinder/tests/unit/volume/drivers/test_remotefs.py cinder/tests/unit/volume/drivers/test_rsd.py cinder/tests/unit/volume/drivers/test_san.py cinder/tests/unit/volume/drivers/test_seagate.py cinder/tests/unit/volume/drivers/test_spdk.py cinder/tests/unit/volume/drivers/test_storpool.py cinder/tests/unit/volume/drivers/test_veritas_cnfs.py cinder/tests/unit/volume/drivers/test_vzstorage.py cinder/tests/unit/volume/drivers/test_zadara.py cinder/tests/unit/volume/drivers/ceph/__init__.py cinder/tests/unit/volume/drivers/ceph/test_rbd_iscsi.py cinder/tests/unit/volume/drivers/datacore/__init__.py cinder/tests/unit/volume/drivers/datacore/test_datacore_api.py cinder/tests/unit/volume/drivers/datacore/test_datacore_driver.py cinder/tests/unit/volume/drivers/datacore/test_datacore_fc.py cinder/tests/unit/volume/drivers/datacore/test_datacore_iscsi.py cinder/tests/unit/volume/drivers/datacore/test_datacore_passwd.py cinder/tests/unit/volume/drivers/datacore/test_datacore_utils.py cinder/tests/unit/volume/drivers/dell_emc/__init__.py cinder/tests/unit/volume/drivers/dell_emc/test_xtremio.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/__init__.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/mocks.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_attach_detach_volume.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_cloned_volume.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_snapshot.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_volume.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_volume_from_snapshot.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_delete_snapshot.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_delete_volume.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_extend_volume.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_get_manageable.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_groups.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_initialize_connection.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_initialize_connection_snapshot.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_manage_existing.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_manage_existing_snapshot.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_migrate_volume.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_misc.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_powerflex_client.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_replication.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_revert_volume_to_snapshot.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_sdc.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_versions.py cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/query_sdc_by_id_response.json cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/query_sdc_instances_response.json cinder/tests/unit/volume/drivers/dell_emc/powerflex/mockup/query_sdc_volumes_response.json cinder/tests/unit/volume/drivers/dell_emc/powermax/__init__.py cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_fc.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_iscsi.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_metadata.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_nvme_tcp.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_performance.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/__init__.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_base.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_client.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_nfs.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_replication.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_snapshot_create_delete_revert.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_attach_detach.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_delete_extend.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_create_from_source.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_group_create_delete_update.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_group_create_from_source.py cinder/tests/unit/volume/drivers/dell_emc/powerstore/test_volume_group_snapshot_create_delete.py cinder/tests/unit/volume/drivers/dell_emc/sc/__init__.py cinder/tests/unit/volume/drivers/dell_emc/sc/test_fc.py cinder/tests/unit/volume/drivers/dell_emc/sc/test_sc.py cinder/tests/unit/volume/drivers/dell_emc/sc/test_scapi.py cinder/tests/unit/volume/drivers/dell_emc/unity/__init__.py cinder/tests/unit/volume/drivers/dell_emc/unity/fake_enum.py cinder/tests/unit/volume/drivers/dell_emc/unity/fake_exception.py cinder/tests/unit/volume/drivers/dell_emc/unity/test_adapter.py cinder/tests/unit/volume/drivers/dell_emc/unity/test_client.py cinder/tests/unit/volume/drivers/dell_emc/unity/test_driver.py cinder/tests/unit/volume/drivers/dell_emc/unity/test_replication.py cinder/tests/unit/volume/drivers/dell_emc/unity/test_utils.py cinder/tests/unit/volume/drivers/dell_emc/vnx/__init__.py cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_enum.py cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_exception.py cinder/tests/unit/volume/drivers/dell_emc/vnx/mocked_cinder.yaml cinder/tests/unit/volume/drivers/dell_emc/vnx/mocked_vnx.yaml cinder/tests/unit/volume/drivers/dell_emc/vnx/res_mock.py cinder/tests/unit/volume/drivers/dell_emc/vnx/test_adapter.py cinder/tests/unit/volume/drivers/dell_emc/vnx/test_base.py cinder/tests/unit/volume/drivers/dell_emc/vnx/test_client.py cinder/tests/unit/volume/drivers/dell_emc/vnx/test_common.py cinder/tests/unit/volume/drivers/dell_emc/vnx/test_driver.py cinder/tests/unit/volume/drivers/dell_emc/vnx/test_replication.py cinder/tests/unit/volume/drivers/dell_emc/vnx/test_res_mock.py cinder/tests/unit/volume/drivers/dell_emc/vnx/test_res_mock.yaml cinder/tests/unit/volume/drivers/dell_emc/vnx/test_taskflows.py cinder/tests/unit/volume/drivers/dell_emc/vnx/test_utils.py cinder/tests/unit/volume/drivers/dell_emc/vnx/utils.py cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/__init__.py cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/lib/__init__.py cinder/tests/unit/volume/drivers/dell_emc/vnx/fake_storops/lib/tasks.py cinder/tests/unit/volume/drivers/fungible/__init__.py cinder/tests/unit/volume/drivers/fungible/test_adapter.py cinder/tests/unit/volume/drivers/fungible/test_driver.py cinder/tests/unit/volume/drivers/fusionstorage/__init__.py cinder/tests/unit/volume/drivers/fusionstorage/test_dsware.py cinder/tests/unit/volume/drivers/fusionstorage/test_fs_client.py cinder/tests/unit/volume/drivers/fusionstorage/test_fs_conf.py cinder/tests/unit/volume/drivers/fusionstorage/test_utils.py cinder/tests/unit/volume/drivers/hitachi/__init__.py cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py cinder/tests/unit/volume/drivers/hpe/__init__.py cinder/tests/unit/volume/drivers/hpe/fake_hpe_3par_client.py cinder/tests/unit/volume/drivers/hpe/fake_hpe_client_exceptions.py cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py cinder/tests/unit/volume/drivers/hpe/test_nimble.py cinder/tests/unit/volume/drivers/hpe/xp/__init__.py cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py cinder/tests/unit/volume/drivers/huawei/__init__.py cinder/tests/unit/volume/drivers/huawei/test_huawei_drivers.py cinder/tests/unit/volume/drivers/ibm/__init__.py cinder/tests/unit/volume/drivers/ibm/fake_pyxcli.py cinder/tests/unit/volume/drivers/ibm/fake_pyxcli_exceptions.py cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem.py cinder/tests/unit/volume/drivers/ibm/test_ibm_flashsystem_iscsi.py cinder/tests/unit/volume/drivers/ibm/test_ibm_storage.py cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py cinder/tests/unit/volume/drivers/ibm/test_xiv_proxy.py cinder/tests/unit/volume/drivers/infortrend/__init__.py cinder/tests/unit/volume/drivers/infortrend/test_infortrend_cli.py cinder/tests/unit/volume/drivers/infortrend/test_infortrend_common.py cinder/tests/unit/volume/drivers/inspur/__init__.py cinder/tests/unit/volume/drivers/inspur/as13000/__init__.py cinder/tests/unit/volume/drivers/inspur/as13000/test_as13000_driver.py cinder/tests/unit/volume/drivers/inspur/instorage/__init__.py cinder/tests/unit/volume/drivers/inspur/instorage/fakes.py cinder/tests/unit/volume/drivers/inspur/instorage/test_common.py cinder/tests/unit/volume/drivers/inspur/instorage/test_fc_driver.py cinder/tests/unit/volume/drivers/inspur/instorage/test_helper_routines.py cinder/tests/unit/volume/drivers/inspur/instorage/test_iscsi_driver.py cinder/tests/unit/volume/drivers/inspur/instorage/test_replication.py cinder/tests/unit/volume/drivers/lightos/__init__.py cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py cinder/tests/unit/volume/drivers/nec/__init__.py cinder/tests/unit/volume/drivers/nec/test_volume.py cinder/tests/unit/volume/drivers/nec/v/__init__.py cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py cinder/tests/unit/volume/drivers/netapp/__init__.py cinder/tests/unit/volume/drivers/netapp/fakes.py cinder/tests/unit/volume/drivers/netapp/test_common.py cinder/tests/unit/volume/drivers/netapp/test_utils.py cinder/tests/unit/volume/drivers/netapp/dataontap/__init__.py cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_base.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_driver_interfaces.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_fc_cmode.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_iscsi_cmode.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_nvme_cmode.py cinder/tests/unit/volume/drivers/netapp/dataontap/test_nvme_library.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/__init__.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_base.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest_asar2.py cinder/tests/unit/volume/drivers/netapp/dataontap/performance/__init__.py cinder/tests/unit/volume/drivers/netapp/dataontap/performance/fakes.py cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_base.py cinder/tests/unit/volume/drivers/netapp/dataontap/performance/test_perf_cmode.py cinder/tests/unit/volume/drivers/netapp/dataontap/utils/__init__.py cinder/tests/unit/volume/drivers/netapp/dataontap/utils/fakes.py cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_loopingcalls.py cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py cinder/tests/unit/volume/drivers/nexenta/__init__.py cinder/tests/unit/volume/drivers/nexenta/test_nexenta.py cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_iscsi.py cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_jsonrpc.py cinder/tests/unit/volume/drivers/nexenta/test_nexenta5_nfs.py cinder/tests/unit/volume/drivers/open_e/__init__.py cinder/tests/unit/volume/drivers/open_e/test_common.py cinder/tests/unit/volume/drivers/open_e/test_driver.py cinder/tests/unit/volume/drivers/open_e/test_iscsi.py cinder/tests/unit/volume/drivers/open_e/test_rest.py cinder/tests/unit/volume/drivers/open_e/test_rest_proxy.py cinder/tests/unit/volume/drivers/sandstone/__init__.py cinder/tests/unit/volume/drivers/sandstone/test_sds_client.py cinder/tests/unit/volume/drivers/sandstone/test_sds_driver.py cinder/tests/unit/volume/drivers/sandstone/test_utils.py cinder/tests/unit/volume/drivers/solidfire/__init__.py cinder/tests/unit/volume/drivers/solidfire/scaled_iops_invalid_data.json cinder/tests/unit/volume/drivers/solidfire/scaled_iops_test_data.json cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py cinder/tests/unit/volume/drivers/synology/__init__.py cinder/tests/unit/volume/drivers/synology/test_synology_common.py cinder/tests/unit/volume/drivers/synology/test_synology_iscsi.py cinder/tests/unit/volume/drivers/toyou/__init__.py cinder/tests/unit/volume/drivers/toyou/test_acs5000.py cinder/tests/unit/volume/drivers/toyou/test_tyds.py cinder/tests/unit/volume/drivers/veritas_access/__init__.py cinder/tests/unit/volume/drivers/veritas_access/test_veritas_iscsi.py cinder/tests/unit/volume/drivers/vmware/__init__.py cinder/tests/unit/volume/drivers/vmware/fake.py cinder/tests/unit/volume/drivers/vmware/test_fcd.py cinder/tests/unit/volume/drivers/vmware/test_vmware_datastore.py cinder/tests/unit/volume/drivers/vmware/test_vmware_vmdk.py cinder/tests/unit/volume/drivers/vmware/test_vmware_volumeops.py cinder/tests/unit/volume/drivers/yadro/__init__.py cinder/tests/unit/volume/drivers/yadro/test_tatlin_client.py cinder/tests/unit/volume/drivers/yadro/test_tatlin_common.py cinder/tests/unit/volume/drivers/yadro/test_tatlin_fc.py cinder/tests/unit/volume/drivers/yadro/test_tatlin_iscsi.py cinder/tests/unit/volume/drivers/yadro/test_tatlin_utils.py cinder/tests/unit/volume/flows/__init__.py cinder/tests/unit/volume/flows/fake_volume_api.py cinder/tests/unit/volume/flows/test_create_volume_flow.py cinder/tests/unit/volume/flows/test_manage_snapshot_flow.py cinder/tests/unit/volume/flows/test_manage_volume_flow.py cinder/tests/unit/volume/flows/api/__init__.py cinder/tests/unit/volume/flows/api/test_create_volume.py cinder/tests/unit/windows/__init__.py cinder/tests/unit/windows/db_fakes.py cinder/tests/unit/windows/test_iscsi.py cinder/tests/unit/windows/test_smbfs.py cinder/tests/unit/zonemanager/__init__.py cinder/tests/unit/zonemanager/test_brcd_fc_san_lookup_service.py cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py cinder/tests/unit/zonemanager/test_brcd_http_fc_zone_client.py cinder/tests/unit/zonemanager/test_brcd_lookup_service.py cinder/tests/unit/zonemanager/test_brcd_rest_fc_zone_client.py cinder/tests/unit/zonemanager/test_cisco_fc_san_lookup_service.py cinder/tests/unit/zonemanager/test_cisco_fc_zone_client_cli.py cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py cinder/tests/unit/zonemanager/test_cisco_lookup_service.py cinder/tests/unit/zonemanager/test_driverutils.py cinder/tests/unit/zonemanager/test_fc_zone_manager.py cinder/tests/unit/zonemanager/test_volume_driver.py cinder/transfer/__init__.py cinder/transfer/api.py cinder/volume/__init__.py cinder/volume/api.py cinder/volume/configuration.py cinder/volume/driver.py cinder/volume/driver_utils.py cinder/volume/group_types.py cinder/volume/manager.py cinder/volume/qos_specs.py cinder/volume/rpcapi.py cinder/volume/throttling.py cinder/volume/volume_migration.py cinder/volume/volume_types.py cinder/volume/volume_utils.py cinder/volume/drivers/__init__.py cinder/volume/drivers/infinidat.py cinder/volume/drivers/lightos.py cinder/volume/drivers/linstordrv.py cinder/volume/drivers/lvm.py cinder/volume/drivers/nfs.py cinder/volume/drivers/pure.py cinder/volume/drivers/qnap.py cinder/volume/drivers/quobyte.py cinder/volume/drivers/rbd.py cinder/volume/drivers/remotefs.py cinder/volume/drivers/rsd.py cinder/volume/drivers/solidfire.py cinder/volume/drivers/spdk.py cinder/volume/drivers/storpool.py cinder/volume/drivers/veritas_cnfs.py cinder/volume/drivers/vzstorage.py cinder/volume/drivers/ceph/__init__.py cinder/volume/drivers/ceph/rbd_iscsi.py cinder/volume/drivers/datacore/api.py cinder/volume/drivers/datacore/driver.py cinder/volume/drivers/datacore/exception.py cinder/volume/drivers/datacore/fc.py cinder/volume/drivers/datacore/iscsi.py cinder/volume/drivers/datacore/passwd.py cinder/volume/drivers/datacore/utils.py cinder/volume/drivers/datera/__init__.py cinder/volume/drivers/datera/datera_api21.py cinder/volume/drivers/datera/datera_api22.py cinder/volume/drivers/datera/datera_common.py cinder/volume/drivers/datera/datera_iscsi.py cinder/volume/drivers/dell_emc/__init__.py cinder/volume/drivers/dell_emc/xtremio.py cinder/volume/drivers/dell_emc/powerflex/__init__.py cinder/volume/drivers/dell_emc/powerflex/driver.py cinder/volume/drivers/dell_emc/powerflex/options.py cinder/volume/drivers/dell_emc/powerflex/rest_client.py cinder/volume/drivers/dell_emc/powerflex/simplecache.py cinder/volume/drivers/dell_emc/powerflex/utils.py cinder/volume/drivers/dell_emc/powermax/__init__.py cinder/volume/drivers/dell_emc/powermax/common.py cinder/volume/drivers/dell_emc/powermax/fc.py cinder/volume/drivers/dell_emc/powermax/iscsi.py cinder/volume/drivers/dell_emc/powermax/masking.py cinder/volume/drivers/dell_emc/powermax/metadata.py cinder/volume/drivers/dell_emc/powermax/migrate.py cinder/volume/drivers/dell_emc/powermax/nvme.py cinder/volume/drivers/dell_emc/powermax/nvme_tcp.py cinder/volume/drivers/dell_emc/powermax/performance.py cinder/volume/drivers/dell_emc/powermax/provision.py cinder/volume/drivers/dell_emc/powermax/rest.py cinder/volume/drivers/dell_emc/powermax/utils.py cinder/volume/drivers/dell_emc/powerstore/__init__.py cinder/volume/drivers/dell_emc/powerstore/adapter.py cinder/volume/drivers/dell_emc/powerstore/client.py cinder/volume/drivers/dell_emc/powerstore/driver.py cinder/volume/drivers/dell_emc/powerstore/exception.py cinder/volume/drivers/dell_emc/powerstore/nfs.py cinder/volume/drivers/dell_emc/powerstore/options.py cinder/volume/drivers/dell_emc/powerstore/utils.py cinder/volume/drivers/dell_emc/powervault/__init__.py cinder/volume/drivers/dell_emc/powervault/client.py cinder/volume/drivers/dell_emc/powervault/common.py cinder/volume/drivers/dell_emc/powervault/fc.py cinder/volume/drivers/dell_emc/powervault/iscsi.py cinder/volume/drivers/dell_emc/sc/__init__.py cinder/volume/drivers/dell_emc/sc/storagecenter_api.py cinder/volume/drivers/dell_emc/sc/storagecenter_common.py cinder/volume/drivers/dell_emc/sc/storagecenter_fc.py cinder/volume/drivers/dell_emc/sc/storagecenter_iscsi.py cinder/volume/drivers/dell_emc/unity/__init__.py cinder/volume/drivers/dell_emc/unity/adapter.py cinder/volume/drivers/dell_emc/unity/client.py cinder/volume/drivers/dell_emc/unity/driver.py cinder/volume/drivers/dell_emc/unity/replication.py cinder/volume/drivers/dell_emc/unity/utils.py cinder/volume/drivers/dell_emc/vnx/__init__.py cinder/volume/drivers/dell_emc/vnx/adapter.py cinder/volume/drivers/dell_emc/vnx/client.py cinder/volume/drivers/dell_emc/vnx/common.py cinder/volume/drivers/dell_emc/vnx/const.py cinder/volume/drivers/dell_emc/vnx/driver.py cinder/volume/drivers/dell_emc/vnx/replication.py cinder/volume/drivers/dell_emc/vnx/taskflows.py cinder/volume/drivers/dell_emc/vnx/utils.py cinder/volume/drivers/fujitsu/__init__.py cinder/volume/drivers/fujitsu/eternus_dx/__init__.py cinder/volume/drivers/fujitsu/eternus_dx/constants.py cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_cli.py cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_fc.py cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_iscsi.py cinder/volume/drivers/fungible/__init__.py cinder/volume/drivers/fungible/constants.py cinder/volume/drivers/fungible/driver.py cinder/volume/drivers/fungible/rest_client.py cinder/volume/drivers/fungible/swagger_api_client.py cinder/volume/drivers/fusionstorage/__init__.py cinder/volume/drivers/fusionstorage/constants.py cinder/volume/drivers/fusionstorage/dsware.py cinder/volume/drivers/fusionstorage/fs_client.py cinder/volume/drivers/fusionstorage/fs_conf.py cinder/volume/drivers/hedvig/__init__.py cinder/volume/drivers/hedvig/config.py cinder/volume/drivers/hedvig/hedvig_cinder.py cinder/volume/drivers/hedvig/rest_client.py cinder/volume/drivers/hitachi/__init__.py cinder/volume/drivers/hitachi/hbsd_common.py cinder/volume/drivers/hitachi/hbsd_fc.py cinder/volume/drivers/hitachi/hbsd_iscsi.py cinder/volume/drivers/hitachi/hbsd_replication.py cinder/volume/drivers/hitachi/hbsd_rest.py cinder/volume/drivers/hitachi/hbsd_rest_api.py cinder/volume/drivers/hitachi/hbsd_rest_fc.py cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py cinder/volume/drivers/hitachi/hbsd_utils.py cinder/volume/drivers/hpe/__init__.py cinder/volume/drivers/hpe/hpe_3par_base.py cinder/volume/drivers/hpe/hpe_3par_common.py cinder/volume/drivers/hpe/hpe_3par_fc.py cinder/volume/drivers/hpe/hpe_3par_iscsi.py cinder/volume/drivers/hpe/nimble.py cinder/volume/drivers/hpe/xp/hpe_xp_fc.py cinder/volume/drivers/hpe/xp/hpe_xp_iscsi.py cinder/volume/drivers/hpe/xp/hpe_xp_rest.py cinder/volume/drivers/hpe/xp/hpe_xp_utils.py cinder/volume/drivers/huawei/__init__.py cinder/volume/drivers/huawei/common.py cinder/volume/drivers/huawei/constants.py cinder/volume/drivers/huawei/fc_zone_helper.py cinder/volume/drivers/huawei/huawei_conf.py cinder/volume/drivers/huawei/huawei_driver.py cinder/volume/drivers/huawei/huawei_utils.py cinder/volume/drivers/huawei/hypermetro.py cinder/volume/drivers/huawei/replication.py cinder/volume/drivers/huawei/rest_client.py cinder/volume/drivers/huawei/smartx.py cinder/volume/drivers/ibm/__init__.py cinder/volume/drivers/ibm/flashsystem_common.py cinder/volume/drivers/ibm/flashsystem_fc.py cinder/volume/drivers/ibm/flashsystem_iscsi.py cinder/volume/drivers/ibm/gpfs.py cinder/volume/drivers/ibm/ibm_storage/__init__.py cinder/volume/drivers/ibm/ibm_storage/certificate.py cinder/volume/drivers/ibm/ibm_storage/cryptish.py cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py cinder/volume/drivers/ibm/ibm_storage/ds8k_restclient.py cinder/volume/drivers/ibm/ibm_storage/ibm_storage.py cinder/volume/drivers/ibm/ibm_storage/proxy.py cinder/volume/drivers/ibm/ibm_storage/strings.py cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py cinder/volume/drivers/ibm/ibm_storage/xiv_replication.py cinder/volume/drivers/ibm/storwize_svc/__init__.py cinder/volume/drivers/ibm/storwize_svc/replication.py cinder/volume/drivers/ibm/storwize_svc/storwize_const.py cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py cinder/volume/drivers/infortrend/__init__.py cinder/volume/drivers/infortrend/infortrend_fc_cli.py cinder/volume/drivers/infortrend/infortrend_iscsi_cli.py cinder/volume/drivers/infortrend/raidcmd_cli/__init__.py cinder/volume/drivers/infortrend/raidcmd_cli/cli_factory.py cinder/volume/drivers/infortrend/raidcmd_cli/common_cli.py cinder/volume/drivers/inspur/__init__.py cinder/volume/drivers/inspur/as13000/__init__.py cinder/volume/drivers/inspur/as13000/as13000_driver.py cinder/volume/drivers/inspur/instorage/__init__.py cinder/volume/drivers/inspur/instorage/instorage_common.py cinder/volume/drivers/inspur/instorage/instorage_const.py cinder/volume/drivers/inspur/instorage/instorage_fc.py cinder/volume/drivers/inspur/instorage/instorage_iscsi.py cinder/volume/drivers/inspur/instorage/replication.py cinder/volume/drivers/kaminario/__init__.py cinder/volume/drivers/kaminario/kaminario_common.py cinder/volume/drivers/kaminario/kaminario_fc.py cinder/volume/drivers/kaminario/kaminario_iscsi.py cinder/volume/drivers/kioxia/entities.py cinder/volume/drivers/kioxia/kumoscale.py cinder/volume/drivers/kioxia/rest_client.py cinder/volume/drivers/lenovo/__init__.py cinder/volume/drivers/lenovo/lenovo_client.py cinder/volume/drivers/lenovo/lenovo_common.py cinder/volume/drivers/lenovo/lenovo_fc.py cinder/volume/drivers/lenovo/lenovo_iscsi.py cinder/volume/drivers/macrosan/__init__.py cinder/volume/drivers/macrosan/config.py cinder/volume/drivers/macrosan/devop_client.py cinder/volume/drivers/macrosan/driver.py cinder/volume/drivers/nec/__init__.py cinder/volume/drivers/nec/cli.py cinder/volume/drivers/nec/product.xml cinder/volume/drivers/nec/volume.py cinder/volume/drivers/nec/volume_common.py cinder/volume/drivers/nec/volume_helper.py cinder/volume/drivers/nec/v/__init__.py cinder/volume/drivers/nec/v/nec_v_fc.py cinder/volume/drivers/nec/v/nec_v_iscsi.py cinder/volume/drivers/nec/v/nec_v_rest.py cinder/volume/drivers/nec/v/nec_v_utils.py cinder/volume/drivers/netapp/__init__.py cinder/volume/drivers/netapp/common.py cinder/volume/drivers/netapp/options.py cinder/volume/drivers/netapp/utils.py cinder/volume/drivers/netapp/dataontap/__init__.py cinder/volume/drivers/netapp/dataontap/block_base.py cinder/volume/drivers/netapp/dataontap/block_cmode.py cinder/volume/drivers/netapp/dataontap/fc_cmode.py cinder/volume/drivers/netapp/dataontap/iscsi_cmode.py cinder/volume/drivers/netapp/dataontap/nfs_base.py cinder/volume/drivers/netapp/dataontap/nfs_cmode.py cinder/volume/drivers/netapp/dataontap/nvme_cmode.py cinder/volume/drivers/netapp/dataontap/nvme_library.py cinder/volume/drivers/netapp/dataontap/client/__init__.py cinder/volume/drivers/netapp/dataontap/client/api.py cinder/volume/drivers/netapp/dataontap/client/client_base.py cinder/volume/drivers/netapp/dataontap/client/client_cmode.py cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest_asar2.py cinder/volume/drivers/netapp/dataontap/performance/__init__.py cinder/volume/drivers/netapp/dataontap/performance/perf_base.py cinder/volume/drivers/netapp/dataontap/performance/perf_cmode.py cinder/volume/drivers/netapp/dataontap/utils/__init__.py cinder/volume/drivers/netapp/dataontap/utils/capabilities.py cinder/volume/drivers/netapp/dataontap/utils/data_motion.py cinder/volume/drivers/netapp/dataontap/utils/loopingcalls.py cinder/volume/drivers/netapp/dataontap/utils/utils.py cinder/volume/drivers/nexenta/__init__.py cinder/volume/drivers/nexenta/iscsi.py cinder/volume/drivers/nexenta/jsonrpc.py cinder/volume/drivers/nexenta/nfs.py cinder/volume/drivers/nexenta/options.py cinder/volume/drivers/nexenta/utils.py cinder/volume/drivers/nexenta/ns5/__init__.py cinder/volume/drivers/nexenta/ns5/iscsi.py cinder/volume/drivers/nexenta/ns5/jsonrpc.py cinder/volume/drivers/nexenta/ns5/nfs.py cinder/volume/drivers/open_e/__init__.py cinder/volume/drivers/open_e/iscsi.py cinder/volume/drivers/open_e/options.py cinder/volume/drivers/open_e/jovian_common/__init__.py cinder/volume/drivers/open_e/jovian_common/driver.py cinder/volume/drivers/open_e/jovian_common/exception.py cinder/volume/drivers/open_e/jovian_common/jdss_common.py cinder/volume/drivers/open_e/jovian_common/rest.py cinder/volume/drivers/open_e/jovian_common/rest_proxy.py cinder/volume/drivers/prophetstor/__init__.py cinder/volume/drivers/prophetstor/dpl_fc.py cinder/volume/drivers/prophetstor/dpl_iscsi.py cinder/volume/drivers/prophetstor/dplcommon.py cinder/volume/drivers/prophetstor/options.py cinder/volume/drivers/san/__init__.py cinder/volume/drivers/san/san.py cinder/volume/drivers/san/hp/__init__.py cinder/volume/drivers/san/hp/hpmsa_client.py cinder/volume/drivers/san/hp/hpmsa_common.py cinder/volume/drivers/san/hp/hpmsa_fc.py cinder/volume/drivers/san/hp/hpmsa_iscsi.py cinder/volume/drivers/sandstone/__init__.py cinder/volume/drivers/sandstone/constants.py cinder/volume/drivers/sandstone/sds_client.py cinder/volume/drivers/sandstone/sds_driver.py cinder/volume/drivers/stx/__init__.py cinder/volume/drivers/stx/client.py cinder/volume/drivers/stx/common.py cinder/volume/drivers/stx/exception.py cinder/volume/drivers/stx/fc.py cinder/volume/drivers/stx/iscsi.py cinder/volume/drivers/synology/__init__.py cinder/volume/drivers/synology/synology_common.py cinder/volume/drivers/synology/synology_iscsi.py cinder/volume/drivers/toyou/__init__.py cinder/volume/drivers/toyou/acs5000/__init__.py cinder/volume/drivers/toyou/acs5000/acs5000_common.py cinder/volume/drivers/toyou/acs5000/acs5000_fc.py cinder/volume/drivers/toyou/acs5000/acs5000_iscsi.py cinder/volume/drivers/toyou/tyds/__init__.py cinder/volume/drivers/toyou/tyds/tyds.py cinder/volume/drivers/toyou/tyds/tyds_client.py cinder/volume/drivers/veritas_access/__init__.py cinder/volume/drivers/veritas_access/veritas_iscsi.py cinder/volume/drivers/vmware/__init__.py cinder/volume/drivers/vmware/datastore.py cinder/volume/drivers/vmware/exceptions.py cinder/volume/drivers/vmware/fcd.py cinder/volume/drivers/vmware/vmdk.py cinder/volume/drivers/vmware/volumeops.py cinder/volume/drivers/windows/__init__.py cinder/volume/drivers/windows/constants.py cinder/volume/drivers/windows/iscsi.py cinder/volume/drivers/windows/smbfs.py cinder/volume/drivers/yadro/__init__.py cinder/volume/drivers/yadro/tatlin_api.py cinder/volume/drivers/yadro/tatlin_client.py cinder/volume/drivers/yadro/tatlin_common.py cinder/volume/drivers/yadro/tatlin_exception.py cinder/volume/drivers/yadro/tatlin_fc.py cinder/volume/drivers/yadro/tatlin_iscsi.py cinder/volume/drivers/yadro/tatlin_utils.py cinder/volume/drivers/zadara/__init__.py cinder/volume/drivers/zadara/common.py cinder/volume/drivers/zadara/exception.py cinder/volume/drivers/zadara/zadara.py cinder/volume/flows/__init__.py cinder/volume/flows/common.py cinder/volume/flows/api/__init__.py cinder/volume/flows/api/create_volume.py cinder/volume/flows/api/manage_existing.py cinder/volume/flows/manager/__init__.py cinder/volume/flows/manager/create_volume.py cinder/volume/flows/manager/manage_existing.py cinder/volume/flows/manager/manage_existing_snapshot.py cinder/volume/targets/__init__.py cinder/volume/targets/cxt.py cinder/volume/targets/driver.py cinder/volume/targets/fake.py cinder/volume/targets/iscsi.py cinder/volume/targets/lio.py cinder/volume/targets/nvmeof.py cinder/volume/targets/nvmet.py cinder/volume/targets/scst.py cinder/volume/targets/spdknvmf.py cinder/volume/targets/tgt.py cinder/wsgi/__init__.py cinder/wsgi/api.py cinder/wsgi/common.py cinder/wsgi/eventlet_server.py cinder/wsgi/wsgi.py cinder/zonemanager/__init__.py cinder/zonemanager/fc_common.py cinder/zonemanager/fc_san_lookup_service.py cinder/zonemanager/fc_zone_manager.py cinder/zonemanager/fczm_constants.py cinder/zonemanager/utils.py cinder/zonemanager/drivers/__init__.py cinder/zonemanager/drivers/driver_utils.py cinder/zonemanager/drivers/fc_zone_driver.py cinder/zonemanager/drivers/brocade/__init__.py cinder/zonemanager/drivers/brocade/brcd_fabric_opts.py cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py cinder/zonemanager/drivers/brocade/brcd_fc_zone_client_cli.py cinder/zonemanager/drivers/brocade/brcd_fc_zone_connector_factory.py cinder/zonemanager/drivers/brocade/brcd_fc_zone_driver.py cinder/zonemanager/drivers/brocade/brcd_http_fc_zone_client.py cinder/zonemanager/drivers/brocade/brcd_rest_fc_zone_client.py cinder/zonemanager/drivers/brocade/exception.py cinder/zonemanager/drivers/brocade/fc_zone_constants.py cinder/zonemanager/drivers/brocade/rest_constants.py cinder/zonemanager/drivers/cisco/__init__.py cinder/zonemanager/drivers/cisco/cisco_fabric_opts.py cinder/zonemanager/drivers/cisco/cisco_fc_san_lookup_service.py cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py cinder/zonemanager/drivers/cisco/cisco_fc_zone_driver.py cinder/zonemanager/drivers/cisco/exception.py cinder/zonemanager/drivers/cisco/fc_zone_constants.py doc/.gitignore doc/README.rst doc/requirements.txt doc/ext/__init__.py doc/ext/cinder_driverlist.py doc/ext/driver_opts.py doc/source/conf.py doc/source/drivers-all-about.rst doc/source/index.rst doc/source/_extra/.htaccess doc/source/_static/.placeholder doc/source/admin/README.rst doc/source/admin/accelerate-image-compression.rst doc/source/admin/api-configuration.rst doc/source/admin/api-throughput.rst doc/source/admin/availability-zone-type.rst doc/source/admin/backup-disks.rst doc/source/admin/basic-volume-qos.rst doc/source/admin/boot-from-volume.rst doc/source/admin/capacity-based-qos.rst doc/source/admin/consistency-groups.rst doc/source/admin/default-volume-types.rst doc/source/admin/driver-filter-weighing.rst doc/source/admin/generalized-filters.rst doc/source/admin/get-capabilities.rst doc/source/admin/groups.rst doc/source/admin/image-volume-cache.rst doc/source/admin/index.rst doc/source/admin/lio-iscsi-support.rst doc/source/admin/manage-volumes.rst doc/source/admin/multi-backend.rst doc/source/admin/nfs-backend.rst doc/source/admin/over-subscription.rst doc/source/admin/ratelimit-volume-copy-bandwidth.rst doc/source/admin/replication-in-openstack.rst doc/source/admin/security.rst doc/source/admin/troubleshoot.rst doc/source/admin/ts-HTTP-bad-req-in-cinder-vol-log.rst doc/source/admin/ts-cinder-config.rst doc/source/admin/ts-db-cpu-spikes.rst doc/source/admin/ts-duplicate-3par-host.rst doc/source/admin/ts-failed-attach-vol-after-detach.rst doc/source/admin/ts-failed-attach-vol-no-sysfsutils.rst doc/source/admin/ts-failed-connect-vol-FC-SAN.rst doc/source/admin/ts-multipath-warn.rst doc/source/admin/ts-no-emulator-x86-64.rst doc/source/admin/ts-non-existent-host.rst doc/source/admin/ts-non-existent-vlun.rst doc/source/admin/upgrades.rst doc/source/admin/user-visible-extra-specs.rst doc/source/admin/volume-backed-image.rst doc/source/admin/volume-backups-export-import.rst doc/source/admin/volume-backups.rst doc/source/admin/volume-migration.rst doc/source/admin/volume-multiattach.rst doc/source/admin/volume-number-weigher.rst doc/source/cli/README.rst doc/source/cli/cinder-manage.rst doc/source/cli/cinder-status.rst doc/source/cli/cli-cinder-quotas.rst doc/source/cli/cli-cinder-scheduling.rst doc/source/cli/cli-manage-volumes.rst doc/source/cli/cli-set-quotas.rst doc/source/cli/index.rst doc/source/common/glossary.rst doc/source/configuration/README.rst doc/source/configuration/index.rst doc/source/configuration/block-storage/backup-drivers.rst doc/source/configuration/block-storage/block-storage-overview.rst doc/source/configuration/block-storage/config-options.rst doc/source/configuration/block-storage/fc-zoning.rst doc/source/configuration/block-storage/logs.rst doc/source/configuration/block-storage/policy-config-HOWTO.rst doc/source/configuration/block-storage/policy-personas.rst doc/source/configuration/block-storage/policy.rst doc/source/configuration/block-storage/scheduler-filters.rst doc/source/configuration/block-storage/scheduler-weights.rst doc/source/configuration/block-storage/schedulers.rst doc/source/configuration/block-storage/service-token.rst doc/source/configuration/block-storage/volume-drivers.rst doc/source/configuration/block-storage/volume-encryption.rst doc/source/configuration/block-storage/backup/ceph-backup-driver.rst doc/source/configuration/block-storage/backup/gcs-backup-driver.rst doc/source/configuration/block-storage/backup/glusterfs-backup-driver.rst doc/source/configuration/block-storage/backup/nfs-backup-driver.rst doc/source/configuration/block-storage/backup/posix-backup-driver.rst doc/source/configuration/block-storage/backup/s3-backup-driver.rst doc/source/configuration/block-storage/backup/swift-backup-driver.rst doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst doc/source/configuration/block-storage/drivers/datacore-volume-driver.rst doc/source/configuration/block-storage/drivers/datera-volume-driver.rst doc/source/configuration/block-storage/drivers/dell-emc-powerflex-driver.rst doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst doc/source/configuration/block-storage/drivers/dell-emc-powerstore-driver.rst doc/source/configuration/block-storage/drivers/dell-emc-powerstore-nfs.rst doc/source/configuration/block-storage/drivers/dell-emc-powervault-me.rst doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst doc/source/configuration/block-storage/drivers/dell-emc-vnx-driver.rst doc/source/configuration/block-storage/drivers/dell-emc-xtremio-driver.rst doc/source/configuration/block-storage/drivers/dell-storagecenter-driver.rst doc/source/configuration/block-storage/drivers/fujitsu-eternus-dx-driver.rst doc/source/configuration/block-storage/drivers/fungible-storage-driver.rst doc/source/configuration/block-storage/drivers/hedvig-volume-driver.rst doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst doc/source/configuration/block-storage/drivers/hp-msa-driver.rst doc/source/configuration/block-storage/drivers/hpe-3par-driver.rst doc/source/configuration/block-storage/drivers/hpe-xp-driver.rst doc/source/configuration/block-storage/drivers/huawei-storage-driver.rst doc/source/configuration/block-storage/drivers/ibm-flashsystem-volume-driver.rst doc/source/configuration/block-storage/drivers/ibm-gpfs-volume-driver.rst doc/source/configuration/block-storage/drivers/ibm-storage-volume-driver.rst doc/source/configuration/block-storage/drivers/ibm-storwize-svc-driver.rst doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst doc/source/configuration/block-storage/drivers/infortrend-volume-driver.rst doc/source/configuration/block-storage/drivers/inspur-as13000-driver.rst doc/source/configuration/block-storage/drivers/inspur-instorage-driver.rst doc/source/configuration/block-storage/drivers/intel-rsd-volume-driver.rst doc/source/configuration/block-storage/drivers/kaminario-driver.rst doc/source/configuration/block-storage/drivers/kioxia-kumoscale-driver.rst doc/source/configuration/block-storage/drivers/lenovo-driver.rst doc/source/configuration/block-storage/drivers/lightbits-lightos-driver.rst doc/source/configuration/block-storage/drivers/linstor-driver.rst doc/source/configuration/block-storage/drivers/lvm-volume-driver.rst doc/source/configuration/block-storage/drivers/macrosan-storage-driver.rst doc/source/configuration/block-storage/drivers/nec-storage-m-series-driver.rst doc/source/configuration/block-storage/drivers/nec-storage-v-series-driver.rst doc/source/configuration/block-storage/drivers/netapp-volume-driver.rst doc/source/configuration/block-storage/drivers/nexentastor4-driver.rst doc/source/configuration/block-storage/drivers/nexentastor5-driver.rst doc/source/configuration/block-storage/drivers/nfs-volume-driver.rst doc/source/configuration/block-storage/drivers/nimble-volume-driver.rst doc/source/configuration/block-storage/drivers/open-e-joviandss-driver.rst doc/source/configuration/block-storage/drivers/prophetstor-dpl-driver.rst doc/source/configuration/block-storage/drivers/pure-storage-driver.rst doc/source/configuration/block-storage/drivers/quobyte-driver.rst doc/source/configuration/block-storage/drivers/sandstone-storage-driver.rst doc/source/configuration/block-storage/drivers/seagate-driver.rst doc/source/configuration/block-storage/drivers/solidfire-volume-driver.rst doc/source/configuration/block-storage/drivers/spdk-volume-driver.rst doc/source/configuration/block-storage/drivers/storpool-volume-driver.rst doc/source/configuration/block-storage/drivers/synology-dsm-driver.rst doc/source/configuration/block-storage/drivers/toyou-netstor-driver.rst doc/source/configuration/block-storage/drivers/toyou-netstor-tyds-driver.rst doc/source/configuration/block-storage/drivers/veritas-access-iscsi-driver.rst doc/source/configuration/block-storage/drivers/vmware-vmdk-driver.rst doc/source/configuration/block-storage/drivers/vzstorage-driver.rst doc/source/configuration/block-storage/drivers/windows-iscsi-volume-driver.rst doc/source/configuration/block-storage/drivers/windows-smb-volume-driver.rst doc/source/configuration/block-storage/drivers/yadro-tatlin-volume-driver.rst doc/source/configuration/block-storage/drivers/zadara-volume-driver.rst doc/source/configuration/block-storage/samples/api-paste.ini.inc doc/source/configuration/block-storage/samples/api-paste.ini.rst doc/source/configuration/block-storage/samples/cinder.conf.rst doc/source/configuration/block-storage/samples/index.rst doc/source/configuration/block-storage/samples/policy.yaml.rst doc/source/configuration/block-storage/samples/rootwrap.conf.inc doc/source/configuration/block-storage/samples/rootwrap.conf.rst doc/source/configuration/figures/ceph-architecture.png doc/source/configuration/figures/emc-enabler.png doc/source/configuration/figures/ibm-storage-nova-concept.png doc/source/configuration/tables/cinder-ibm_gpfs.inc doc/source/configuration/tables/cinder-ibm_gpfs_nfs.inc doc/source/configuration/tables/cinder-ibm_gpfs_remote.inc doc/source/configuration/tables/cinder-ibm_storage.inc doc/source/configuration/tables/cinder-infortrend.inc doc/source/configuration/tables/cinder-netapp_cdot_iscsi.inc doc/source/configuration/tables/cinder-netapp_cdot_nfs.inc doc/source/configuration/tables/cinder-nexenta.inc doc/source/configuration/tables/cinder-nexenta5.inc doc/source/configuration/tables/cinder-prophetstor_dpl.inc doc/source/configuration/tables/cinder-storwize.inc doc/source/configuration/tables/cinder-vmware.inc doc/source/configuration/tables/manual/cinder-netapp_cdot_extraspecs.inc doc/source/configuration/tables/manual/cinder-pure_storage_extraspecs.inc doc/source/contributor/README.rst doc/source/contributor/addmethod.openstackapi.rst doc/source/contributor/api.apache.rst doc/source/contributor/api_conditional_updates.rst doc/source/contributor/api_microversion_dev.rst doc/source/contributor/api_microversion_history.rst doc/source/contributor/architecture.rst doc/source/contributor/attach_detach_conventions.rst doc/source/contributor/attach_detach_conventions_v2.rst doc/source/contributor/backporting.rst doc/source/contributor/cinder-groups.rst doc/source/contributor/contributing.rst doc/source/contributor/database-migrations.rst doc/source/contributor/development.environment.rst doc/source/contributor/documentation.rst doc/source/contributor/drivers.rst doc/source/contributor/drivers_locking_examples.rst doc/source/contributor/gerrit.rst doc/source/contributor/gmr.rst doc/source/contributor/groups.rst doc/source/contributor/high_availability.rst doc/source/contributor/i18n.rst doc/source/contributor/index.rst doc/source/contributor/launchpad.rst doc/source/contributor/migration.rst doc/source/contributor/new_driver_checklist.rst doc/source/contributor/releasecycle.rst doc/source/contributor/releasenotes.rst doc/source/contributor/releases.rst doc/source/contributor/replication.rst doc/source/contributor/rolling.upgrades.rst doc/source/contributor/rpc.rst doc/source/contributor/testing.rst doc/source/contributor/thin_provisioning.rst doc/source/contributor/threading.rst doc/source/contributor/user_messages.rst doc/source/contributor/zuul.rst doc/source/images/architecture.png doc/source/images/cinder.png doc/source/images/rpc/arch.png doc/source/images/rpc/arch.svg doc/source/images/rpc/flow1.png doc/source/images/rpc/flow1.svg doc/source/images/rpc/flow2.png doc/source/images/rpc/flow2.svg doc/source/images/rpc/rabt.png doc/source/images/rpc/rabt.svg doc/source/images/rpc/state.png doc/source/install/README.rst doc/source/install/cinder-backup-install-obs.rst doc/source/install/cinder-backup-install-rdo.rst doc/source/install/cinder-backup-install-ubuntu.rst doc/source/install/cinder-controller-install-obs.rst doc/source/install/cinder-controller-install-rdo.rst doc/source/install/cinder-controller-install-ubuntu.rst doc/source/install/cinder-storage-install-obs.rst doc/source/install/cinder-storage-install-rdo.rst doc/source/install/cinder-storage-install-ubuntu.rst doc/source/install/cinder-storage-install-windows.rst doc/source/install/cinder-verify.rst doc/source/install/get-started-block-storage.rst doc/source/install/index-obs.rst doc/source/install/index-rdo.rst doc/source/install/index-ubuntu.rst doc/source/install/index-windows.rst doc/source/install/index.rst doc/source/install/figures/hwreqs.png doc/source/install/figures/network1-services.png doc/source/install/figures/network2-services.png doc/source/reference/README.rst doc/source/reference/support-matrix.ini doc/source/reference/support-matrix.rst doc/source/user/README.rst doc/test/redirect-tests.txt etc/cinder/README-cinder.conf.sample etc/cinder/README-policy.generate.md etc/cinder/api-httpd.conf etc/cinder/api-paste.ini etc/cinder/logging_sample.conf etc/cinder/resource_filters.json etc/cinder/rootwrap.conf etc/cinder/rootwrap.d/volume.filters playbooks/cinder-multibackend-matrix.yaml playbooks/enable-fips.yaml playbooks/post-cinderlib.yaml playbooks/tempest-and-cinderlib-run.yaml rally-jobs/README.rst rally-jobs/cinder.yaml rally-jobs/extra/README.rst rally-jobs/plugins/README.rst rally-jobs/plugins/__init__.py releasenotes/README.rst releasenotes/notes/1220b8a67602b8e7-update_rootwrap_volume_filters.yaml releasenotes/notes/1884495-173f375dc5274fe6.yaml releasenotes/notes/1885946-17bc5c3dc0535044.yaml releasenotes/notes/1899512-7a872a2c19e53536.yaml releasenotes/notes/1918099-18b26dd9107f19c0.yaml releasenotes/notes/3par-create-cg-from-source-cg-5634dcf9feb813f6.yaml releasenotes/notes/3par-create-fc-vlun-match-set-type-babcf2cbce1ce317.yaml releasenotes/notes/3par-get-capability-de60c9bc7ae51c14.yaml releasenotes/notes/3par-license-check-51a16b5247675760.yaml releasenotes/notes/3par-manage-unmanage-snapshot-eb4e504e8782ba43.yaml releasenotes/notes/84-to-90-endpoints-831c28423d32cac5.yaml releasenotes/notes/Code-changes-to-handle-groups-with-replication_enabled-or-snapshot_enabled-72f669fe2719ce3d.yaml releasenotes/notes/Dell-SC-Driver-to-dell_emc-folder-e5d6fb1f1cf84149.yaml releasenotes/notes/Dell-SC-New-Extra-Specs-1de0d3f1ebc62881.yaml releasenotes/notes/Dell-SC-Retype-Limitations-74f4b5f6a94ffe4f.yaml releasenotes/notes/Dell-SC-ServerOS-Config-Option-bd0e018319758e03.yaml releasenotes/notes/Dell-SC-excluded_domain_ips_ListOpt-51bacddee199ce83.yaml releasenotes/notes/Dell-SC-live-volume-41bacddee199ce83.yaml releasenotes/notes/Dell-SC-replication-failover_host-failback-a9e9cbbd6a1be6c3.yaml releasenotes/notes/Dell-SC-thaw_backend-b9362d381fabd4c9.yaml releasenotes/notes/Dell-SC-v2.1-replication-ef6b1d6a4e2795a0.yaml releasenotes/notes/Ds8k-revert-to-snapshot-support-ea0e06e14a8710ee.yaml releasenotes/notes/Enable-HPE-3PAR-Compression-Feature-90e4de4b64a74a46.yaml releasenotes/notes/Fusionstorage_Cinder_Driver_Support_OceanStor_100D-d21a300fd27b2440.yaml releasenotes/notes/HPE-3par-Generic-Volume-Group-e048002e1c3469a3.yaml releasenotes/notes/Huawei-volume-driver-replication-v2.1-ada5bc3ad62dc633.yaml releasenotes/notes/Huawei_Cinder_Driver_Support_Dorado_V6-5289a3b0ef90e8b1.yaml releasenotes/notes/IET_iSCSI_target-dea5f68dc297510d.yaml releasenotes/notes/Lefthand-generic-volume-group-570d07b4786b93c2.yaml releasenotes/notes/MacroSAN-volume-driver-6477e4ec7c38f49d.yaml releasenotes/notes/NetApp-ONTAP-full-cg-support-cfdc91bf0acf9fe1.yaml releasenotes/notes/SC-included_domain_ips_ListOpt-61bacddee199ce83.yaml releasenotes/notes/SolidFire-generic-volume-group-1b1e55661cd83a43.yaml releasenotes/notes/VMEM-6000-drivers-removed-9b6675ff7ae5f960.yaml releasenotes/notes/Zadara-change-to-access-key-b16bdaa9d8460b57.yaml releasenotes/notes/Zadara-newlayout-support-features-ffa20694c008ba86.yaml releasenotes/notes/ZadaraStorage-13a5fff6f4fa1710.yaml releasenotes/notes/a7401ead26a7c83b-keystone-url.yaml releasenotes/notes/add-availability_zone-filter-for-snapshot-8e1494212276abde.yaml releasenotes/notes/add-backup-project-attribute-3f57051ef9159b08.yaml releasenotes/notes/add-backup-swift-container-storage-policy-8d4a268ed61b9fe2.yaml releasenotes/notes/add-cg-capability-to-groups-2eb3e71682a88600.yaml releasenotes/notes/add-cinder-wsgi-module-ae72ad42bfebbea8.yaml releasenotes/notes/add-cluster-name-to-volume-details-ce01dd828faafcde.yaml releasenotes/notes/add-coho-driver-b4472bff3f64aa41.yaml releasenotes/notes/add-configurable-img-conversion-param-1e7b545ae816dfe8.yaml releasenotes/notes/add-connection-info-to-attachment-84d4dg45uh41db15.yaml releasenotes/notes/add-count-info-in-list-api-e43wac44yu750c23.yaml releasenotes/notes/add-datacore-volume-driver-3775797b0515f538.yaml releasenotes/notes/add-datacore-volume-driver-5c1802798425acc1.yaml releasenotes/notes/add-del-volumeTypeAccess-b1c8cb14a9d14db3.yaml releasenotes/notes/add-encryption-key-id-to-details-e721977fba0f2b51.yaml releasenotes/notes/add-filter-to-group-snapshots-74sd8g138a289dh4.yaml releasenotes/notes/add-filters-support-to-get_pools-0852e9c0e42fbf98.yaml releasenotes/notes/add-glance-service-section-3e73daee0e995442.yaml releasenotes/notes/add-google-backup-driver-d1e7ac33d5780b79.yaml releasenotes/notes/add-io-ports-option-c751d1bd395dd614.yaml releasenotes/notes/add-like-filter-support-7d4r78d6de3984dv.yaml releasenotes/notes/add-operation-to-request-spec-7yt6ub75uy1284as.yaml releasenotes/notes/add-option-max_luns_per_storage_group-dfe3e1396b262bc8.yaml releasenotes/notes/add-periodic-task-to-clean-expired-messages-84f47gxc88hda035.yaml releasenotes/notes/add-powermax-live-migration-without-a-pool-name-7690fcd67b5f690c.yaml releasenotes/notes/add-project-id-to-group-groupsnapshot-response-512013e95a80784a.yaml releasenotes/notes/add-reset-group-snapshot-status-sd21a31cde5fa035.yaml releasenotes/notes/add-reset-group-status-sd21a31cde5fa034.yaml releasenotes/notes/add-resource-filters-api-8g3dub1700qaye98.yaml releasenotes/notes/add-revert-to-snapshot-support-2d21a3dv4f5fa087.yaml releasenotes/notes/add-split-logger-conf-option-0424e3bd91de3a5a.yaml releasenotes/notes/add-stochastic-scheduling-option-99e10eae023fbcca.yaml releasenotes/notes/add-suppress-lvm-fd-warnings-option.402bebc03b0a9f00.yaml releasenotes/notes/add-tegile-driver-b7919c5f30911998.yaml releasenotes/notes/add-transfer-pagination-support-7y33u7y68de3cb16.yaml releasenotes/notes/add-user-id-attribute-to-backup-response-ce27364680c895f7.yaml releasenotes/notes/add-vmax-replication-490202c15503ae03.yaml releasenotes/notes/add-volume-re-image-api-6f02dcefd4975a66.yaml releasenotes/notes/add-volume-type-filter-to_get-pools-c791132540921398.yaml releasenotes/notes/add-volume-upload-image-options-3a61a31c544fa034.yaml releasenotes/notes/add_ceph_custom_keyring_path-43a3b8c21a1ab3c4.yaml releasenotes/notes/add_manage_unmanage_itri_disco_driver-1c9ee31cc86b6eda.yaml releasenotes/notes/add_multiattach_policies-8e0b22505ed6cbd8.yaml releasenotes/notes/add_nvme_tcp_driver-558ff80aa2029e2b.yaml releasenotes/notes/add_replication_failback_to_solidfire-82668c071f4fa91d.yaml releasenotes/notes/added-virtual-size-check-42a84f6b24366e5d.yaml releasenotes/notes/added_ontap_libs_for_asar2_platform-6688b9f811645b96.yaml releasenotes/notes/allow-admin-quota-operations-c1c2236711224023.yaml releasenotes/notes/allow-deleting-__DEFAULT__-type-d35dfb5d89760b9b.yaml releasenotes/notes/allow-encrypted-rbd-volumes-35d3536505e6309b.yaml releasenotes/notes/allow-huawei-driver-lun-copy-speed-configurable-361a480e7b7e361d.yaml releasenotes/notes/allow-remove-name-and-description-for-consisgroup-408257a0a18bd530.yaml releasenotes/notes/allow_disable_image_conversion-ebf33ce9d5edf724.yaml releasenotes/notes/announce-ceph-min-version-4eddb0def1c39928.yaml releasenotes/notes/apply-limits-to-qemu-img-29f722a1bf4b91f8.yaml releasenotes/notes/attach-format-after-snapshot-9a1857456706aa72.yaml releasenotes/notes/backend-options-ed19e6c63b2b9090.yaml releasenotes/notes/backup-ceph-driver-journaling-exculsive-lock-features-6b6044138a288a83.yaml releasenotes/notes/backup-driver-configuration-36357733962dab03.yaml releasenotes/notes/backup-path-removal-c411bb6c0d3887f1.yaml releasenotes/notes/backup-snapshot-6e7447db930c31f6.yaml releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml releasenotes/notes/backup-sparse-f396b35bfe17332e.yaml releasenotes/notes/backup-sparse-f685f4321f2994f5.yaml releasenotes/notes/backup-update-d0b0db6a7b1c2a5b.yaml releasenotes/notes/backup-user-messages-5ee0c7ead3def8f9.yaml releasenotes/notes/backup-volumenotfound-set-to-error-fa47b3631093a702.yaml releasenotes/notes/backup_driver_init_state-d4834fa927e502ab.yaml releasenotes/notes/backup_max_operations-27753c748ba1dc1a.yaml releasenotes/notes/backup_s3_driver-238e3612acd7cc06.yaml releasenotes/notes/balanced-fc-port-selection-fbf6b841fea99156.yaml releasenotes/notes/bdd-pools-stats-afb4398daa9248de.yaml releasenotes/notes/bp-add-volume-backup-id-e10d053638cb2e78.yaml releasenotes/notes/bp-datera-cinder-driver-update-2.1-5c6455b45563adc5.yaml releasenotes/notes/bp-dell-powerflex-aa-828facb25b1fde63.yaml releasenotes/notes/bp-dell-powermax-consistency-exempt.yaml releasenotes/notes/bp-dell-powermax-nvme-tcp-606b091620685c06.yaml releasenotes/notes/bp-dell-powerstore-aa-ca7b2e9355a1e5a5.yaml releasenotes/notes/bp-dell-powerstore-qos-1532737fa1bb2664.yaml releasenotes/notes/bp-ibm-gpfs-supported-26ae5381dd2a47ad.yaml releasenotes/notes/bp-infinidat-add-snapshot-revert-1bab97e85ff10780.yaml releasenotes/notes/bp-inspur-instorage-driver-40371862c9559238.yaml releasenotes/notes/bp-jdss-add-cert-and-snapshot-revert-b34f352754ad07de.yaml releasenotes/notes/bp-kumoscale-driver-3a01460f1aa83939.yaml releasenotes/notes/bp-lightbits-lightos-clustered-nvmetcp-driver-d1ef8f83263921f2.yaml releasenotes/notes/bp-netapp-flexgroup-support-c462fca33f0d8906.yaml releasenotes/notes/bp-netapp-ontap-adaptive-qos-45891585a91eab75.yaml releasenotes/notes/bp-netapp-ontap-min-throughput-qos-cd3812df5c7da8fd.yaml releasenotes/notes/bp-netapp-self-signed-https-support-cb30081d4465acd1.yaml releasenotes/notes/bp-netapp-solidfire-ipv6-on-management-ip-10187de7b732335a.yaml releasenotes/notes/bp-netapp-solidfire-revert-to-snapshot-741b7c204cc99546.yaml releasenotes/notes/bp-nfs-volume-encryption-3d8362843caeb39c.yaml releasenotes/notes/bp-nvmeof-lvm-target-b7771955b426abe7.yaml releasenotes/notes/bp-open-src-ibm-storage-driver-d17808e52aa4eacb.yaml releasenotes/notes/bp-powermax-protected-snap-82eb6731553356d9.yaml releasenotes/notes/bp-powerstore-cg-support-ac1842d2041dcbfd.yaml releasenotes/notes/bp-powerstore-cinder-driver-94f8c7f1371eafe7.yaml releasenotes/notes/bp-powerstore-nfs-cinder-driver-b743a8a89acafa35.yaml releasenotes/notes/bp-powerstore-replication-support-700016b83437602e.yaml releasenotes/notes/bp-provisioning-improvements-bb7e28896e2a2539.yaml releasenotes/notes/bp-remove-netapp-7mode-drivers-c38398e54662f2d4.yaml releasenotes/notes/bp-toyou-acs5000-driver-16449ca18280def3.yaml releasenotes/notes/bp-vmware-fcd-fbe19ee577d2e9e4.yaml releasenotes/notes/bp-yadro-tatlin-unified-driver-122218f077d70312.yaml releasenotes/notes/bp-yadro-tatlin-unified-fc-b6e1225ad99c6304.yaml releasenotes/notes/brcd_lookupservice_http_support-f6485b38a1feaa15.yaml releasenotes/notes/brocade_http_connector-0021e41dfa56e671.yaml releasenotes/notes/brocade_looup_fail_get_client-179151d449a34aa4.yaml releasenotes/notes/brocade_py3-15647dbe3981d44b.yaml releasenotes/notes/brocade_rest_client-202cfd474c96d3fe.yaml releasenotes/notes/brocade_virtual_fabrics_support-d2d0b95b19457c1d.yaml releasenotes/notes/bug-1518213-a5bf2ea0d008f329.yaml releasenotes/notes/bug-1560649-d4f3ff71fe4ddb89.yaml releasenotes/notes/bug-1560867-support-nova-specific-image-7yt6fd1173c4e3wd.yaml releasenotes/notes/bug-1570845-efdb0206718f4ca4.yaml releasenotes/notes/bug-1587376-fix-manage-resource-quota-issue-78f59f39b9fa4762.yaml releasenotes/notes/bug-1612763-report-multiattach-enabled-NetApp-backends-0fbf2cb621e4747d.yaml releasenotes/notes/bug-1614095-add-user_id-to-snapshot_show-4884fab825983c3a.yaml releasenotes/notes/bug-1615451-NetApp-cDOT-fix-reporting-replication-capability-dca29f39b9fa7651.yaml releasenotes/notes/bug-1622057-netapp-cdot-fix-replication-status-cheesecake-volumes-804dc8b0b1380e6b.yaml releasenotes/notes/bug-1632333-netapp-ontap-copyoffload-downloads-glance-image-twice-08801d8c7b9eed2c.yaml releasenotes/notes/bug-1634203-netapp-cdot-fix-clone-from-nfs-image-cache-2218fb402783bc20.yaml releasenotes/notes/bug-1660927-netapp-no-copyoffload-77fc3cf4f2cf2335.yaml releasenotes/notes/bug-1667071-dc6407f40a1f7d15.yaml releasenotes/notes/bug-1670260-fix-boolean-is_public-d16e1957c0f09d65.yaml releasenotes/notes/bug-1671220-4d521be71d0b8aa4.yaml releasenotes/notes/bug-1686745-e8f1569455f998ba.yaml releasenotes/notes/bug-1690954-40fc21683977e996.yaml releasenotes/notes/bug-1691771-fix-netapp-manage-volumes-62bec192a08b3ceb.yaml releasenotes/notes/bug-1693084-fix-az-cache-invalid-6td4q74q28uxcd68.yaml releasenotes/notes/bug-1699936-fix-host-show-incorrect-fg8698gu7y6r7d15.yaml releasenotes/notes/bug-1703405-53f09205024f2095.yaml releasenotes/notes/bug-1705375-prohibit-group-deletion-if-groupsnapshot-exists.yaml releasenotes/notes/bug-1706888-update-backend-when-extending-3e4a9831a0w29d68.yaml releasenotes/notes/bug-1712651-7bc90264eb5001ea.yaml releasenotes/notes/bug-1714209-netapp-ontap-drivers-oversubscription-issue-c4655b9c4858d7c6.yaml releasenotes/notes/bug-1718739-netapp-eseries-fix-provisioned-capacity-report-8c51fd1173c15dbf.yaml releasenotes/notes/bug-1723226-allow-purging-0day-4de8979db7215cf3.yaml releasenotes/notes/bug-1730933-1bb0272e3c51eed3.yaml releasenotes/notes/bug-1735337-remove-skip-quota-validation-flag-2ecb24143f1f1292.yaml releasenotes/notes/bug-1762424-f76af2f37fe408f1.yaml releasenotes/notes/bug-1765182-34fdc4bb8482f8a5.yaml releasenotes/notes/bug-1765182-bcafd577f4b81eb6.yaml releasenotes/notes/bug-1765182-de132ba52167800b.yaml releasenotes/notes/bug-1765610-qnap-fix-volume-snapshot-create-fail-2bb785eafdb87fb6.yaml releasenotes/notes/bug-1766768-qnap-fix-upload-volume-detach-fail-33cbee59f1381bda.yaml releasenotes/notes/bug-1773446-984d76ed29445c9b.yaml releasenotes/notes/bug-1773725-xtremio-remove-provisioning-factor-y7r5uy3489yd9pbf.yaml releasenotes/notes/bug-1782588-7e058b379da95309.yaml releasenotes/notes/bug-1783790-multiattach-none-when-manage-volume-yu7du8yth78i0e6b.yaml releasenotes/notes/bug-1784871-7f67402eb13abca7.yaml releasenotes/notes/bug-1790141-vmax-powermaxos-upgrade-fix-4c76186cfca66790.yaml releasenotes/notes/bug-1799221-fix-truncated-volumes-in-case-of-glance-errors-6cae19218249c3cf.yaml releasenotes/notes/bug-1805550-default-policy-file-db15eaa76fefa115.yaml releasenotes/notes/bug-1809323-fix-invalid-backup-4a341dc362ded88e.yaml releasenotes/notes/bug-1812685-powermax-replication-specs-fix-aa6b13b93b4059d6.yaml releasenotes/notes/bug-1813851-60a4f0ffe386d9b6.yaml releasenotes/notes/bug-1823200-victoria-ecd2d99c9223d84b.yaml releasenotes/notes/bug-1823445-c47c25870a98335a.yaml releasenotes/notes/bug-1828386-fix-retype-rbd-backend.yaml releasenotes/notes/bug-1833115-fix-netapp-ontap-python3-failures-dd869e602f9539e1.yaml releasenotes/notes/bug-1837524-strowize-create_consistency_group_failures-bb2a976dfe9454a4.yaml releasenotes/notes/bug-1859652-netapp-fix-retype-attached-volume-to-solidfire-1933f03673ff078d.yaml releasenotes/notes/bug-1859653-solidfire-fix-failover-after-service-restart-77e5e4da45c9c1aa.yaml releasenotes/notes/bug-1860100-8c542363def7d408.yaml releasenotes/notes/bug-1869746-cross-project-incremental-backup-error.yaml releasenotes/notes/bug-1870103-013e314e9a5b8e08.yaml releasenotes/notes/bug-1871524-5f6df9a61bf6b775.yaml releasenotes/notes/bug-1874134-netapp-ONTAP-fix-max-resize-size-ad2d88da8721560e.yaml releasenotes/notes/bug-1874541-netapp-fix-update-cluster-status-8331655904fb4fed.yaml releasenotes/notes/bug-1875570-nfs-image-volume-cache-c45e840a6ec2a702.yaml releasenotes/notes/bug-1879578-volume_type-regression-de82f4152c7b2f77.yaml releasenotes/notes/bug-1884030-ds8k_support_volume_name_template-91e1b70ece172ef8.yaml releasenotes/notes/bug-1887859-backup-manager-fb8dbf289eedc4b0.yaml releasenotes/notes/bug-1887885-nec-fix-snapshot-detach-error-fff3012e0e9a2d2b.yaml releasenotes/notes/bug-1887908-nec-live-migration-failure-withfc-3128fff7c48e739f.yaml releasenotes/notes/bug-1887962-643379faf20f01cf.yaml releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml releasenotes/notes/bug-1890241-strowize-delete_group_snapshot_fix-2e491e74e1f73ba7.yaml releasenotes/notes/bug-1890254-clone-fcmap-is-not-deleting-in-cleanup-f5bbb467be1b889d.yaml releasenotes/notes/bug-1890586-storwize-check_flashcopy_rate-fix-571e6e182b604725.yaml releasenotes/notes/bug-1890588-storwize-select_io_group-fix-7200f2e00140ab34.yaml releasenotes/notes/bug-1890589-create_flashcopy_to_consistgrp_fix-9eeea4aaceb8a191.yaml releasenotes/notes/bug-1890591-Pool-information-is-not-saved-in-stats-22f302d941cd9fe2.yaml releasenotes/notes/bug-1892034-Volume-name-is-not-validated-for-host-4ec0d1bd14281c77.yaml releasenotes/notes/bug-1894381-fix-cinder-manage-cluster-remove-raising-nosuchopterror.yaml releasenotes/notes/bug-1895035-rbd-restore-0cd94ccd467ae1e3.yaml releasenotes/notes/bug-1895510-REST-API-issue-to-get-bundle-198a3b89255759bb.yaml releasenotes/notes/bug-1896087-rollback-volume-status-bd04951f929bb88d.yaml releasenotes/notes/bug-1896214-Fix-in-change_vdisk_iogrp-during-retype-ef83ccf27d8829f5.yaml releasenotes/notes/bug-1897598-powerflex-volume-type-conversion.yaml releasenotes/notes/bug-1898746-ibm-svf-fix-host-failover-switch-relationship-9d3c58822a8c918c.yaml releasenotes/notes/bug-1898918-b24a93d7d5aff238.yaml releasenotes/notes/bug-1900979-powerstore-chap-support.yaml releasenotes/notes/bug-1900979-xtremio-ports-filtering-e68f90d47f17a7d9.yaml releasenotes/notes/bug-1901241-361b1b361bfa5152.yaml releasenotes/notes/bug-1903648-ds8k-ostype-compatability-support-a86f608d8c014a29.yaml releasenotes/notes/bug-1904440-clone-rekey-fd57a2b5f6224e0f.yaml releasenotes/notes/bug-1904892-ipv6-nfs-manage-391118115dfaaf54.yaml releasenotes/notes/bug-1905564-e7dcf28fd734d3b2.yaml releasenotes/notes/bug-1905988-ibm-svf-fix-volume-iops-throttling-issue-b2b89e31af5973b2.yaml releasenotes/notes/bug-1906528-ibm-svf-fix-host-failback-switch-relationship-b5b7320811688cda.yaml releasenotes/notes/bug-1907964-9277e5ddec2abeda.yaml releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml releasenotes/notes/bug-1910767-00f20702f5fc96db.yaml releasenotes/notes/bug-1912451-ibm-svf-update-replication-properties-68c4f9ea56df212d.yaml releasenotes/notes/bug-1912564-strowize-hyperswap-volume-is-not-deleting-a94291248f8f59cd.yaml releasenotes/notes/bug-1912624-bakup-a-z-regression-452f4bc9dfd41871.yaml releasenotes/notes/bug-1913363-ibm-svf_Fix_multiple_lshost_calls_during_attach-528f92b44a0ff6b8.yaml releasenotes/notes/bug-1913449-4796b366ae7e871b.yaml releasenotes/notes/bug-1914639-fix-chap-auth-issue-in-netapp-driver-e92eaa431d6fcbac.yaml releasenotes/notes/bug-1917605-ibm-svf_Bulk_create_Hyperswap_volume_is_failing-79a9ec2108612240.yaml releasenotes/notes/bug-1918229-0aa9fd75c5e843d5.yaml releasenotes/notes/bug-1918889-xtremio-iscsi-ipv6-05c59b897da5c01b.yaml releasenotes/notes/bug-1920099-ibm-svf-fix_extend_to_clone_rep_volumes-015e030332f2e714.yaml releasenotes/notes/bug-1920237-backup-remove-export-race-941e2ab1f056e54c.yaml releasenotes/notes/bug-1920729-powerstore-iscsi-targets-filtering-9623ac03da5c6721.yaml releasenotes/notes/bug-1920870-ibm-svf-fix-extend-issue-for-mirroring-volumes-31b1a9119c49e112.yaml releasenotes/notes/bug-1920890-ibm-svf-Retype-in-use-hyperswap-volume-95a6c033e493ee59.yaml releasenotes/notes/bug-1920912-add_volumes_to_clone_group_fix-1cc9668ea077831e.yaml releasenotes/notes/bug-1922013-ibm-svf-fix_addvol_gmcv_grp-caa0bc2035747d99.yaml releasenotes/notes/bug-1922255-dell-powervault-manage-volumes.rst releasenotes/notes/bug-1922408-create-encryption-volume-from-snapshot-skip-resize-bb5d77c5e912b5c1.yaml releasenotes/notes/bug-1924568-ibm-svf-fix_drp_vol_create_issue-d1b75c4befb0e993.yaml releasenotes/notes/bug-1924602-ibm-svf_Storwize_HyperSwap_snapshot_clone_is_failing-c144e6b99d56de64.yaml releasenotes/notes/bug-1926286-ibm-svf-fix-volume-relationship-properties-fetch-issue-6b443f8521cbb15c.yaml releasenotes/notes/bug-1926491-ibm-svf-update-rccg-info-for-mirror-volumes-67fbec05d803745d.yaml releasenotes/notes/bug-1929223-powerflex-connector-certificate-validation-707b4f9f2077d4bc.yaml releasenotes/notes/bug-1931968-ibm-svf-HyperSwap_volume_service_status_update-293dea5d0f750824.yaml releasenotes/notes/bug-1934168-a61c71869742867d.yaml releasenotes/notes/bug-1935670-svc_update_rep_properties_for_empty_values_fix-a2faabbf2139195e.yaml releasenotes/notes/bug-1936848-6ecc78e0e970419a.yaml releasenotes/notes/bug-193688-bb045badcd5aecad.yaml releasenotes/notes/bug-1938212-ibm-svf-fix-to-add-replication-support-for-V5000E-e88df9c8eb22c2a8.yaml releasenotes/notes/bug-1939139-02ab552420813e70.yaml releasenotes/notes/bug-1939145-ibm-svf-fix-systemname-issue-with-multiple-partnership-c437ebbb511879b9.yaml releasenotes/notes/bug-1939241-storpool-attach-encrypted-volumes-783c723683b8f9a9.yaml releasenotes/notes/bug-1941694-svc_detach_second_instance_for_multi_attach_type_volume_fix-b9a882a7faa8eed6.yaml releasenotes/notes/bug-1941815-RBD-call-trash-operation-when-plain-deletion-fails-50cef4a8a8010ba9.yaml releasenotes/notes/bug-1942154-backup-availability-zone-object-fix-939f93fda2c539b8.yml releasenotes/notes/bug-1942210-show-msg-check-clone-v2-api-raise-attribute-error-40efd74bb92b9482.yaml releasenotes/notes/bug-1943682-ibm-svf-update-rccgname-to-metadata-for-clone-group-volumes-baa6bebcf8caacb8.yaml releasenotes/notes/bug-1944577-no-manage-to-encrypted-type-b5b5d7f8360f037f.yaml releasenotes/notes/bug-1946483-cryptic-error-in-backup-fe03939577867e0a.yaml releasenotes/notes/bug-1947518-rbd-open-readonly-ba523c4b0ddbba76.yaml releasenotes/notes/bug-1949061-ibm-svf-fix_retype_issue_of_mirror_volume-5f37c265bee89d97.yaml releasenotes/notes/bug-1951046-ds8k_fixed_detach_for_multi_attach_volumes-b86940efafa926f2.yaml releasenotes/notes/bug-1951250-storwize-fix-multiple-ssh-calls-for-retype-d3b56379b7d8b049.yaml releasenotes/notes/bug-1952805-cinder-schedules-incremental-backups-on-the-wrong-node-b20b0c137f33ef03.yaml releasenotes/notes/bug-1953168-fix-capacity-filter-message-456dea41fa8a4a1b.yaml releasenotes/notes/bug-1953185-ibm-svf-RevertToSnapshot_for_rep_volumes_in_group-e3ff6c87edd4de39.yaml releasenotes/notes/bug-1955057-fix-get-ontap-version-4d9fa1f6c5d2eaf3.yaml releasenotes/notes/bug-1957073-0d1307a8637a62b7.yaml releasenotes/notes/bug-1960314-ibm-svf-Resize_of_GMCV_volumes_in_group-f9a176153518204c.yaml releasenotes/notes/bug-1960315-ibm-svf-delete_and_resize_volume_issue_in_reverse_replication-952164a73b336a6d.yaml releasenotes/notes/bug-1961548-ibm-svf-Fix_multiple_SVC_CLI_calls_for_create_volume_operation-338b009bca72ee60.yaml releasenotes/notes/bug-1962824-ff0cac0d35021f84.yaml releasenotes/notes/bug-1965847-fix-backup-import-3b3ccdf740a13cff.yaml releasenotes/notes/bug-1966639-ibm-svf-resize_issue_in_reverse_replication_for_volume_part_of_group-59e3f5d652a4707c.yaml releasenotes/notes/bug-1968159-ibm-svf-Retype_failure_for_replication_volume-type-4e0671b299315f4b.yaml releasenotes/notes/bug-1968170-add-parameters-used-in-reimage-volume-48d5b8008ec82ea6.yaml releasenotes/notes/bug-1970768-temp-vol-delete-6586a13f08d7a5c1.yaml releasenotes/notes/bug-1976400-storwize-Fix_multiple_SVC_CLI_calls_for_rc-relationship_operations-24d15dfccc922cdd.yaml releasenotes/notes/bug-1976499-storwize-lsfcportsetmember_is_being_called_in_the_wrong_SVC_code_level-db06c4eca902f389.yaml releasenotes/notes/bug-1978020-glance-upload-uri-8fbc70c442ac620c.yaml releasenotes/notes/bug-1978290-ibm-svf-optimize_SSH_calls_in_creation_of_replicated_volumes-8fad7f54a4d3e73a.yaml releasenotes/notes/bug-1978729-cinder-backup-4cd87c4d71b7713e.yaml releasenotes/notes/bug-1981354-infinidat-iscsi-fix-multipath-3f8a0be5f541c66e.yaml releasenotes/notes/bug-1981420-dell-powermax-fix-for-force-flag-9320910dfbf998d2.yaml releasenotes/notes/bug-1981982-infinidat-fix-ssl-options-6ddd852c24b16760.yaml releasenotes/notes/bug-1982078-Driver_initialization_error_w.r.t_default_portset-3992a060cca2adcb.yaml releasenotes/notes/bug-1982350-infinidat-fix-multi-attach-19f62d182b675e59.yaml releasenotes/notes/bug-1982405-infinidat-fix-generic-volume-migration-da33a6fe980ac4eb.yaml releasenotes/notes/bug-1983287-infinidat-fix-backup-attached-volume-b28e5dd5c25a24ec.yaml releasenotes/notes/bug-1984000-infinidat-fix-consistency-groups-cf5b9c85dbf972ee.yaml releasenotes/notes/bug-1985065-storwize-mkhost-failure-592d8cb76e9feeb2.yaml releasenotes/notes/bug-1996188-vmdk-subformat-allow-list-93e6943d9a486d11.yaml releasenotes/notes/bug-2003300-ibm-svf-enable_support_for_replication_volume_with_mirror_pool_option-83563770463ebbca.yaml releasenotes/notes/bug-2008017-netapp-fix-native-threads-04d8f58f4c29b03d.yaml releasenotes/notes/bug-2008931-hpe-keyerror-on-migration-71d31e6c0a8ab0d9.yaml releasenotes/notes/bug-2012246-292d7d93260a1fe5.yaml releasenotes/notes/bug-2016138-56f07bc9376f55f7.yaml releasenotes/notes/bug-2017815-infinidat-fix-compression-setting-04eaf71933d55912.yaml releasenotes/notes/bug-2027532-volume-list-sort-by-boolean-fix-49972c69007d5ebc.yaml releasenotes/notes/bug-2028857-fix-netapp-replica-failover-error-a9cad94ae56af8d0.yaml releasenotes/notes/bug-2045230-dell-powermax-fix-snapvx-unlink-e27d67d6b217d706.yaml releasenotes/notes/bug-2051830-dell-powermax-rest-api-timeout-b70bd2754debf16a.yaml releasenotes/notes/bug-2052995-dell-powerflex-rest-api-timeout-3a05b6b5d5460176.yaml releasenotes/notes/bug-2055022-dell-powerstore-rest-api-timeout-51b3ae19266757f9.yaml releasenotes/notes/bug-2058596-3c676e7fdc642b3d.yaml releasenotes/notes/bug-2059809-disallow-qcow2-datafile-abc4e6d8be766710.yaml releasenotes/notes/bug-2077643-manage-quota-sync-no-args-7fe8dbc6e3069cfc.yaml releasenotes/notes/bug-2078968-fix-nvme-namespace-mapping-failed-during-live-migration-bbd26bb157b076bf.yaml releasenotes/notes/bug-2081742-dell-powermax-rest-api-hostlunid-ee22d0105c990ea0.yaml releasenotes/notes/bug-2082587-fix-type-passed-during-backup-restoration.yaml releasenotes/notes/bug-2089656-dell-powermax-fix-multi-detach-req-eb0f189841689ce8.yaml releasenotes/notes/bug-2092259-dell-powermax-volume-delete-failed-fix-active-snapshot-ccc3f9b6251d2634.yaml releasenotes/notes/bug-2103742-fix-fail-resize-nfs-volumes-with-snapshots-e861d69b1ae6f97d.yaml releasenotes/notes/bug-2105961-fix-nvmeof-fail-due-to-initiator-property-missing-db8315541f94447f.yaml releasenotes/notes/bug-2110274-fix-detach-issue-for-multiattached-volume-7202cecaeed5ecd0.yaml releasenotes/notes/bug-2111461-fix-db-purge-fails-due-to-foreign-key-constraint-errors-8a60db1f0158b36e.yaml releasenotes/notes/bug-2112245-bulk-vms-creation-device-issue-b9d82f7a826c9f2b.yaml releasenotes/notes/bug-2112403-delete-unusable-image-cache-volumes-f87144726a717d28.yaml releasenotes/notes/bug-2114879-dell-powerflex-improve-secret-handling-b1217791a9dceb1a.yaml releasenotes/notes/bug-2114993-iscsi-fc-detach-operation-fails-when-multiple-initiators-connected-b1069bab32d86027.yaml releasenotes/notes/bug-2116261-fix-consistency-group-support-for-nvme-driver-102c67c706afc25c.yaml releasenotes/notes/bug-2117263-adding-total-volumes-capability-for-netapp-iscsi-nvme-drivers-79da99111b086161.yaml releasenotes/notes/bug-2119644-enable-snapshot-creation-for-flexgroup-pool-zapi-4a6af85888a99a02.yaml releasenotes/notes/bug-b3c37df596c7a632.yaml releasenotes/notes/bug-fix-1866871-f9d61defc00f4007.yaml releasenotes/notes/bug-fix-1867163-27afa39ac77b9e15.yaml releasenotes/notes/bug-gpfs-fix-nfs-cow.yaml releasenotes/notes/bug-invalid-content-type-1715094-8yu8i9w425ua08f3.yaml releasenotes/notes/bug-reno-69539ecb9b0b5464.yaml releasenotes/notes/bug1929429-e749f5e5a242a599.yaml releasenotes/notes/bug1938488-a528893c103c03af.yaml releasenotes/notes/bug1945500-e4df056b8be2e0ef.yaml releasenotes/notes/bug2002995-e423f17eaddae22d.yaml releasenotes/notes/bug_1828993-8e78d7bbee16ca08.yaml releasenotes/notes/bug_1870367-49b74d10a9bfcf07.yaml releasenotes/notes/bug_1945824-7f8f238e274ddebd.yaml releasenotes/notes/bugfix-1744692-5aebd0c97ae66407.yaml releasenotes/notes/bugfix-netapp-driver-cinder-ipv6-c3c4d0d6a7d0de91.yaml releasenotes/notes/capacity-based-qos-9f5d174658a40bd5.yaml releasenotes/notes/capacity-based-qos-minimum-values-b24a5f49c986f11d.yaml releasenotes/notes/capacity-headroom-4b07701f1df9e5c4.yaml releasenotes/notes/castellan-backend-0c49591a54821c45.yaml releasenotes/notes/ceph-add-option-to-keep-only-last-n-snapshots-89dc532656f453f4.yaml releasenotes/notes/ceph-backup-no-flatten-36557727e9d73b2b.yaml releasenotes/notes/ceph-catch-more-failure-conditions-d2ec640f5ff8051c.yaml releasenotes/notes/ceph-iscsi-driver-b515bd7fb73ce13b.yaml releasenotes/notes/certificate-based-authentication-for-netapp-drivers-b06a62df620aebc3.yaml releasenotes/notes/cg-scheduler-change-180a36b77e8cc26b.yaml releasenotes/notes/cg_api_volume_type-7db1856776e707c7.yaml releasenotes/notes/cgroupsv2-75476a8e1ea88b5f.yaml releasenotes/notes/change-default-rbd_exclusive_cinder_pool-e59c528c7f728780.yaml releasenotes/notes/change-encryption-key-on-clone-3be7cdb0e27386e0.yaml releasenotes/notes/check-displayname-displaydescription-123sd5gef91acb12.yaml releasenotes/notes/check-snapshots-when-cascade-deleting-transferred-volume-575ef0b76bd7f334.yaml releasenotes/notes/cheesecake-promotion-30a3336fb911c3ad.yaml releasenotes/notes/cinder-api-microversions-d2082a095c322ce6.yaml releasenotes/notes/cinder-api-middleware-remove-deprecated-option-98912ab7e8b472e8.yaml releasenotes/notes/cinder-backup-swift-service-token-9b86e8e73ebd2a22.yaml releasenotes/notes/cinder-clone-encrypted-a28338e2b1838a63.yaml releasenotes/notes/cinder-coprhd-driver-11ebd149ea8610fd.yaml releasenotes/notes/cinder-manage-db-online-schema-migrations-d1c0d40f26d0f033.yaml releasenotes/notes/cinder-manage-online-migrations-exit-status-7c16edb7facc37bb.yaml releasenotes/notes/cinder-manage-quota-ed4ee17f7097d11f.yaml releasenotes/notes/cinder-status-check-backup_driver-fe009985df2bc32f.yaml releasenotes/notes/cinder-status-check-policyjson-ef61826eab95372b.yaml releasenotes/notes/cinder-status-check-stein_removed_drivers-8184abe8ce82f373.yaml releasenotes/notes/cinder-status-check-windows_iscsi_driver-5f4e0b93c7b92f53.yaml releasenotes/notes/cinder_backend_aa_glance-300c8e087c8cf192.yaml releasenotes/notes/clean-file-locks-on-remove-e5898012f4114d3c.yaml releasenotes/notes/clean-file-locks-tool-3a62ba05ef2d2239.yaml releasenotes/notes/cleanup-rbd-temp-file-during-convert-fail-3848e9dbe7e15fc6.yaml releasenotes/notes/cloudbyte-retype-support-4b9f79f351465279.yaml releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml releasenotes/notes/compress-images-fed3e354d94b0845.yaml releasenotes/notes/consistency_group_manage-d30a2ad8917a7a86.yaml releasenotes/notes/coprhd-generic-volume-group-a1d41d439f94ae19.yaml releasenotes/notes/coprhd-mark-unsupported-aa48145873db1ab1.yaml releasenotes/notes/coprhd-remove-the-driver-00ef2c41f4c7dccd.yaml releasenotes/notes/create-update-rules-b46cf9c07c5a3966.yaml releasenotes/notes/create_volume_from_encrypted_image-9666e1ed7b4eab5f.yaml releasenotes/notes/datacore-mark-unsupported-2399bc19a789fb4c.yaml releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml releasenotes/notes/datera-2.4.0-driver-update-164bbc77e6b45eb7.yaml releasenotes/notes/datera-2019.12.10.0-driver-update-cadadf95e4725164.yaml releasenotes/notes/datera-driver-v2-update-930468e8259c8e86.yaml releasenotes/notes/datera-mark-unsupported-7b71d9124b3fded2.yaml releasenotes/notes/db-resource-indexes-8010c9a881277503.yaml releasenotes/notes/db-schema-from-kilo-e6e952744531caa2.yaml releasenotes/notes/db-schema-from-liberty-f5fa57d67441dece.yaml releasenotes/notes/db-schema-from-mitaka-168ac06161e9ca0d.yaml releasenotes/notes/db-schema-from-newton-79b18439bd15e4c4.yaml releasenotes/notes/db-schema-from-ocata-e1d7dd1dc4d3a0d9.yaml releasenotes/notes/db-schema-from-queens-de5025a780ff1d30.yaml releasenotes/notes/default-apiv1-disabled-9f6bb0c67b38e670.yaml releasenotes/notes/del_volume_with_fc-f024b9f2d6eaca0f.yaml releasenotes/notes/delete-tsm-backup-driver-725e33f7c213fd50.yaml releasenotes/notes/delete-volume-metadata-keys-3e19694401e13d00.yaml releasenotes/notes/delete-volume-with-snapshots-0b104e212d5d36b1.yaml releasenotes/notes/delete_parameters-6f44fece22a7787d.yaml releasenotes/notes/dell-emc-pvme-driver-9333594b2cc1e0b5.yaml releasenotes/notes/dell-emc-sc-api-timeouts-ce8d166e1847ea94.yaml releasenotes/notes/dell-emc-sc-bugfix-1756914-ffca3133273040f6.yaml releasenotes/notes/dell-emc-sc-mult-attach-d09cfd06ee8db8da.yaml releasenotes/notes/dell-emc-sc-support-generic-groups-98c7452d705b36f9.yaml releasenotes/notes/dell-emc-unity-driver-72cb901467b23b22.yaml releasenotes/notes/dell-powerflex-bugfix-1998136-self-signed-certificates-62e3cb444ab7ff2b.yaml releasenotes/notes/dell-powermax-unisphere-v101-7195af74d1c7671c.yaml releasenotes/notes/demc-trim-bb2165f74a5703a6.yaml releasenotes/notes/deprecate-api-v2-9f4543ab2e14b018.yaml releasenotes/notes/deprecate-backends-in-default-b9784a2333fe22f2.yaml releasenotes/notes/deprecate-backup-service-to-driver-mapping-a3afabd4f55eca01.yaml releasenotes/notes/deprecate-block-device-driver-d30232547a31fe1e.yaml releasenotes/notes/deprecate-cinder-linux-smb-driver-4aec58f15a963c54.yaml releasenotes/notes/deprecate-consistency-group-apis-0d9120d16f090781.yaml releasenotes/notes/deprecate-dell-emc-drivers-caracal-a575e95cd61ac1d8.yaml releasenotes/notes/deprecate-glusterfs-backup-drv-5581909c0cc83102.yaml releasenotes/notes/deprecate-iser-opts-b0af9a68b7b8578c.yaml releasenotes/notes/deprecate-json-formatted-policy-file-dc3441a7b1dbfb47.yaml releasenotes/notes/deprecate-linbit-drbdmanagedrv-75c41ce8e81cac80.yaml releasenotes/notes/deprecate-nested-quota-d1ad7e8f54492a87.yaml releasenotes/notes/deprecate-rbd_keyring_conf-432efbcd47e52c8a.yaml releasenotes/notes/deprecate-sf-allow-template-caching-b3a1ea32512cbb11.yaml releasenotes/notes/deprecate-tsm-backup-driver-8be0c78ec1a9d6dc.yaml releasenotes/notes/deprecate-windows-support-4667f38d71fa8ad6.yaml releasenotes/notes/deprecate-xml-api-bf3e4079f1dc5eae.yaml releasenotes/notes/deprecate_hosts_api_extension-fe0c042af10a20db.yaml releasenotes/notes/deprecate_logs_commands-a0d59cb7535a2138.yaml releasenotes/notes/deprecate_osapi_volume_base_url-b6984886a902a562.yaml releasenotes/notes/deprecate_san_rest_port-0d8610a872e92e09.yaml releasenotes/notes/deprecated-ibm-multipath-f06c0e907a6301de.yaml releasenotes/notes/deprecated-nas-ip-fd86a734c92f6fae.yaml releasenotes/notes/detach-notification-31ae15dafdef36c1.yaml releasenotes/notes/detach-race-delete-012820ad9c8dbe16.yaml releasenotes/notes/detachedinstanceerror-64be35894c624eae.yaml releasenotes/notes/diff-srps-674f2c0cc893db4b.yaml releasenotes/notes/discard-config-option-711a7fbf20685834.yaml releasenotes/notes/disco-cinder-driver-9dac5fb04511de1f.yaml releasenotes/notes/disco-mark-unsupported-f6eb8208c8c4eb3b.yaml releasenotes/notes/disco-options-94fe9eaad5e397a5.yaml releasenotes/notes/dothill-drivers-removed-da00a6b83865271a.yaml releasenotes/notes/downstream_genconfig-e50791497ce87ce3.yaml releasenotes/notes/drbd-linstor-rest-update-52fd52f6c09a4dd3.yaml releasenotes/notes/drbd-linstor-volume-driver-20273a9ad3783cf5.yaml releasenotes/notes/drbd-resource-options-88599c0a8fc5b8a3.yaml releasenotes/notes/driver_reinitialization-b26a8b3e665567ec.yaml releasenotes/notes/drop-db_driver-opt-b644963bf3b6aced.yaml releasenotes/notes/drop-mysql-5-5-support-fe3ececc3c9b9915.yaml releasenotes/notes/drop-py2-377a91a5b66165ab.yaml releasenotes/notes/drop-python-3-6-and-3-7-fa2dda5d6be0cad6.yaml releasenotes/notes/ds8k-allow-multi-attach-41fa7bddbbd719ec.yaml releasenotes/notes/ds8k-replication-group-3f2e8cd3c2e291a3.yaml releasenotes/notes/ds8k-report-backend-state-in-service-list-f0898950a0f4b122.yaml releasenotes/notes/ds8k_async_clone_volume-25232c55da921202.yaml releasenotes/notes/ds8k_specify_pool_lss-5329489c263951ba.yaml releasenotes/notes/enable-force-delete-lun-d0e05b5d669e40f7.yaml releasenotes/notes/enable-multiattach-iscsi-fcp-netapp-driver-98ad2d75fbbf333f.yaml releasenotes/notes/enable-multiattach-nfs-netapp-driver-406b9b285d85c989.yaml releasenotes/notes/enforce_min_vmware-a080055111b04692.yaml releasenotes/notes/extend-volume-completion-action-9bf6b0ed551a8e32.yaml releasenotes/notes/fail-detach-lun-when-auto-zone-enabled-9c87b18a3acac9d1.yaml releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml releasenotes/notes/falconstor-extend-driver-to-utilize-multiple-fss-pools-dc6f2bc84432a672.yaml releasenotes/notes/feature-abort-restore-fe1252288c59e105.yaml releasenotes/notes/feature-clone-across-pools-63021bc853e9161a.yaml releasenotes/notes/feature-cross-az-backups-6b68c4c4456f2fd7.yaml releasenotes/notes/feature-multi-process-backup-8cf5ad5a0cf9b2d5.yaml releasenotes/notes/feature-netapp-iscsi-aa-support-eff8ed19a30e87c0.yaml releasenotes/notes/feature-rbd-exclusive-pool-a9bdebdeb1f0bf37.yaml releasenotes/notes/fix-3par-live-migration-0065bd2626fdb4a1.yaml releasenotes/notes/fix-3par-migrate-rename-662d984e070a1de2.yaml releasenotes/notes/fix-500-http-error-on-resource-conflict.yaml releasenotes/notes/fix-abort-backup-df196e9dcb992586.yaml releasenotes/notes/fix-backup-handling-of-encryption-key-id-f2fa56cadd80d582.yaml releasenotes/notes/fix-cacheable-capability-f893520d79c3db60.yaml releasenotes/notes/fix-cinder-manage-groups-quota-bug-421ae9c9eb99b22f.yaml releasenotes/notes/fix-cross-az-migration-ce97eff61280e1c7.yaml releasenotes/notes/fix-ensure-export-3cccf107a82b35a0.yaml releasenotes/notes/fix-extend-volume-939e30f2e9e516bc.yaml releasenotes/notes/fix-extend-volume-in-thin-pools-57a3d53be4d47704.yaml releasenotes/notes/fix-groups-actions-in-a-a-mode-5d554b30a26da22c.yaml releasenotes/notes/fix-hnas-clone-with-different-volume-type-b969897cba2610cc.yaml releasenotes/notes/fix-hnas-stats-reporting-1335e582e46ff440.yaml releasenotes/notes/fix-host-info-in-volume-details-1759280bd236421e.yaml releasenotes/notes/fix-import-backup-quota-issue-8yh69hd19u7tuu23.yaml releasenotes/notes/fix-kaminario-unique_fqdn_network-ecde36f614c30733.yaml releasenotes/notes/fix-leave-mapped-volume-ef0bd683d415f7b1.yaml releasenotes/notes/fix-list-volume-filtering-3f2bf93ab9b98974.yaml releasenotes/notes/fix-manage-no-action-46b023476e8cd938.yaml releasenotes/notes/fix-manage-replicated-multiattach-9bc258d349e0f5a6.yaml releasenotes/notes/fix-multiattach-deletion-b3990acf1f5fd378.yaml releasenotes/notes/fix-netapp-cg-da4fd6c396e5bedb.yaml releasenotes/notes/fix-netapp-custom-igroup-e049b4f3b341dd54.yaml releasenotes/notes/fix-netapp-force_detach-36bdf75dd2c9a030.yaml releasenotes/notes/fix-nfs-optimized-create-vol-9eb43f2050bba74a.yaml releasenotes/notes/fix-nfs-revert-to-snap-adc04204b3661d66.yaml releasenotes/notes/fix-nfs-vol-from-snapshot-654a07d25a33bf7d.yaml releasenotes/notes/fix-powerflex-volume-cache-da3fa1769ef78ae8.yaml releasenotes/notes/fix-quota-deleting-temporary-volume-274e371b425e92cc.yaml releasenotes/notes/fix-rbd-upload-diff-format-38fc4ef24d7145ba.yaml releasenotes/notes/fix-reimage-image-snap-15ecd5fce9973d5d.yaml releasenotes/notes/fix-reimage-sparse-copy-d346e8f55afa6280.yaml releasenotes/notes/fix-reimage-status-rollback-eb2aa8f82a8caabc.yaml releasenotes/notes/fix-remotefs-clone-volume-locking-385e740d4a5a813b.yaml releasenotes/notes/fix-request_id-6f6972b2c12d1a18.yaml releasenotes/notes/fix-reserve-volume-policy-31790a8d865ee0a1.yaml releasenotes/notes/fix-reserved-image-properties-9519ddc080e7ed1a.yaml releasenotes/notes/fix-resource-size-76e8ff25f07925f2.yaml releasenotes/notes/fix-retype-with-az-e048123d982f213d.yaml releasenotes/notes/fix-schema-validation-attachment-create-3488914cb52d44d2.yaml releasenotes/notes/fix-show-transfer-for-non-admins-be001d79975b325d.yaml releasenotes/notes/fix-show-volume-non-admins-1bc5238398e73981.yaml releasenotes/notes/fix-size-reporting-seagate-driver.rst releasenotes/notes/fix-solidfire-provisioning-report-880141e64c1ea52f.yaml releasenotes/notes/fix-solidfire-python3-support-ee02ff2c1ec920f2.yaml releasenotes/notes/fix-solidfire-replication-dcb3e59b29950933.yaml releasenotes/notes/fix-storage_protocol-6baf55e13249463c.yaml releasenotes/notes/fix-sub-clone-operation-f42a84ab17930f24.yaml releasenotes/notes/fix-transfer-accept-policy-7594806372b14284.yaml releasenotes/notes/fix-unnecessary-migration-on-retype-67cedb1bd8e4c4b2.yaml releasenotes/notes/fix-vol-image-metadata-endpoints-returning-none-ba0590e6c6757b0c.yaml releasenotes/notes/force-delete-mv-a53924f09c475386.yaml releasenotes/notes/friendly-zone-names-d5e131d356040de0.yaml releasenotes/notes/fujitsu-add-cli-copy-1647fb54970a186d.yaml releasenotes/notes/fujitsu-add-cli-extend-e94b887dac8a45b3.yaml releasenotes/notes/fujitsu-eternus-dx-fc-741319960195215c.yaml releasenotes/notes/fujitsu-eternus-dx-iscsi-e796beffb740db89.yaml releasenotes/notes/fujitsu-eternus-dx-update-4755ec446030d263.yaml releasenotes/notes/fujitsu-improve-cli-function-6cabf36ffc6d44d6.yaml releasenotes/notes/fujitsu-improve-create-snapshot-cd796e66eea43c90.yaml releasenotes/notes/fujitsu-improve-delete-volume-8fa509f0424deb8e.yaml releasenotes/notes/fujitsu-multiple-pools-a0dd9197b16b3122.yaml releasenotes/notes/fujitsu-pool-infomation-modified-7ebcbbc11a2e6f28.yaml releasenotes/notes/fujitsu-qos-support-1c1528da06d0b38a.yaml releasenotes/notes/fujitsu-support-revert-to-snapshot-5d40dbe8b918e68e.yaml releasenotes/notes/fujitsu-update-migrated-volume-1d205cdbd7e65a28.yaml releasenotes/notes/fungible-cinder-driver-af8aeb57846c8ecc.yaml releasenotes/notes/fusionstorage-cinder-driver-8f3bca98f6e2065a.yaml releasenotes/notes/fusionstorage-driver-cf13b8d95ae97319.yaml releasenotes/notes/general-upgrades-notes-120f022aa5bfa1ea.yaml releasenotes/notes/generalized-resource-filter-hg598uyvuh119008.yaml releasenotes/notes/generic-group-quota-manage-support-559629ad07a406f4.yaml releasenotes/notes/generic-groups-in-gpfs-00bb093945a02642.yaml releasenotes/notes/generic-groups-in-vnx-cbbe1346e889b5c2.yaml releasenotes/notes/generic-volume-groups-69f998ce44f42737.yaml releasenotes/notes/get-driver-opts-924f72346ca1e459.yaml releasenotes/notes/glance-v1-removed-5121af3bef285324.yaml releasenotes/notes/glance_v2_upload-939c5693bcc25483.yaml releasenotes/notes/gmr-ca97ba4602ce0831.yaml releasenotes/notes/google-auth-for-gcs-backup-1642cd0e741fbdf9.yaml releasenotes/notes/group-snapshots-36264409bbb8850c.yaml releasenotes/notes/group-type-group-specs-531e33ee0ae9f822.yaml releasenotes/notes/group-update-d423eaa18dbcecc1.yaml releasenotes/notes/handle-external-events-in-extend-6ae53b822baf0004.yaml releasenotes/notes/hbsd-driver-deletion-d81f7c4513f45d7b.yaml releasenotes/notes/healthcheck-449ed4292e6bfa22.yaml releasenotes/notes/hedvig-cinder-driver-e7b98f4bc214bc49.yaml releasenotes/notes/hgst-mark-unsupported-b2886de36421c8b0.yaml releasenotes/notes/hitachi-fix-delete-volume-issues-e648525e597505fd.yaml releasenotes/notes/hitachi-fix-gad-vol-compatibility-e9c62c18f7a12bc7.yaml releasenotes/notes/hitachi-fix-output-token-c9eb15423e05c5b9.yaml releasenotes/notes/hitachi-fix-unlock-f4e044807985e60b.yaml releasenotes/notes/hitachi-generic-volume-groups-434a27b290d51bf3.yaml releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml releasenotes/notes/hitachi-storage-driver-d38dbd990730388d.yaml releasenotes/notes/hitachi-unsupported-drivers-37601e5bfabcdb8f.yaml releasenotes/notes/hitachi-vsp-add-gad-volume-514edf8ebeb2e983.yaml releasenotes/notes/hitachi-vsp-add-hostgroup-name-format-option-4c8e4a5ddd69b9bd.yaml releasenotes/notes/hitachi-vsp-add-multi-pool-4c4589b93399e641.yaml releasenotes/notes/hitachi-vsp-aix-os-type-23bf7cc3b98dff3a.yaml releasenotes/notes/hitachi-vsp-driver-87659bb496bb459b.yaml releasenotes/notes/hitachi-vsp-fix-except-in-del-vol-ca8b4c5d40d69531.yaml releasenotes/notes/hitachi-vsp-fix-keyerr-when-backend-down-a5a35b15dc8f1132.yaml releasenotes/notes/hitachi-vsp-fix-resource-lock-msg-5a119426e6c65998.yaml releasenotes/notes/hitachi-vsp-fix-to-use-correct-HGname-78c3c47dcf984ddf.yaml releasenotes/notes/hitachi-vsp-fix-to-use-correct-pool-in-GAD-9413a343dcc98029.yaml releasenotes/notes/hitachi-vsp-iscsi-driver-cac31d7c54d7718d.yaml releasenotes/notes/hitachi-vsp-port-scheduler-207e01b3cd13350b.yaml releasenotes/notes/hitachi-vsp-ports-option-7147289e6529d7fe.yaml releasenotes/notes/hitachi-vsp-support-dedup-comp-4e27d95b34681f66.yaml releasenotes/notes/hitachi-vsp-support-new-storages-d8e8a527462dba24.yaml releasenotes/notes/hitachi-vsp-support-qos-667ca4f8ae8c2ba2.yaml releasenotes/notes/hitachi-vsp-tgt-port-asgn-7536da008990824a.yaml releasenotes/notes/hitachi-vsp-update-retype-483a9fb48dc667d9.yaml releasenotes/notes/hitachi_fix-ldevnickname-0a0756449e7448d9.yaml releasenotes/notes/hitachi_fix-testscripts-e4490f9f99994fb8.yaml releasenotes/notes/hnas-change-snapshot-names-8153b043eb7e99fc.yaml releasenotes/notes/hnas-deprecate-iscsi-driver-cd521b3a2ba948f3.yaml releasenotes/notes/hnas-deprecate-nfs-driver-0d114bbe141b5d90.yaml releasenotes/notes/hnas-deprecated-svc-volume-type-77768f27946aadf4.yaml releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml releasenotes/notes/hnas-list-manageable-9329866618fa9a9c.yaml releasenotes/notes/hnas-manage-unmanage-snapshot-support-40c8888cc594a7be.yaml releasenotes/notes/hnas-remove-iscsi-driver-419e9c08133f9f0a.yaml releasenotes/notes/hnas_deprecate_xml-16840b5a8c25d15e.yaml releasenotes/notes/hpe-3par-add-alletra-9k-info-5e1d09e083d3faa9.yaml releasenotes/notes/hpe-3par-add-alletra-mp-info-3ed7f5160bf58dbe.yaml releasenotes/notes/hpe-3par-add-get-manageable-2926f21116c98599.yaml releasenotes/notes/hpe-3par-add-ipv6-support-a9f453a66c51e6d3.yaml releasenotes/notes/hpe-3par-calculate-free-capacity-926b60b70bba18b7.yaml releasenotes/notes/hpe-3par-clone-of-repl-vol-914a6e0e105996b4.yaml releasenotes/notes/hpe-3par-code-changes-for-new-wsapi-25865a65a428ce46.yaml releasenotes/notes/hpe-3par-code-changes-for-wsapi-2025-75a9fda5d994504c.yaml releasenotes/notes/hpe-3par-comment-for-cloned-volume-ef16dccf7639452b.yaml releasenotes/notes/hpe-3par-convert-to-base-vol-delete-snap-a460a4b1c419804a.yaml releasenotes/notes/hpe-3par-fix-multi-detach-in-multi-host-env-3f2211f29a336b6e.yaml releasenotes/notes/hpe-3par-ignore-duplicate-ip-7e67260ee1cab40e.yaml releasenotes/notes/hpe-3par-login-getWsApiVersion-0252d655844ae054.yaml releasenotes/notes/hpe-3par-peer-persistence.yaml-91cc84bf89dbb462.yaml releasenotes/notes/hpe-3par-pp-primera-a3442d004545b3a9.yaml releasenotes/notes/hpe-3par-pp-rcg-policy-options-53271f38c315779f.yaml releasenotes/notes/hpe-3par-primera-add-iscsi-5af339643dfa0928.yaml releasenotes/notes/hpe-3par-return-lun-ids-6bc973ef74d0bf9c.yaml releasenotes/notes/hpe-3par-retype-migrate-2383ddaf92c87f9e.yaml releasenotes/notes/hpe-3par-retype-thin-deco-2263063d847db454.yaml releasenotes/notes/hpe-3par-retype-vol-without-comment-a44c9be1ed76e7bb.yaml releasenotes/notes/hpe-3par-reuse-session-4439cb07b9118867.yaml releasenotes/notes/hpe-3par-small-qos-latency-values-d5fa70a605b04335.yaml releasenotes/notes/hpe-3par-specify-nsp-for-fc-bootable-volume-f372879e1b625b4d.yaml releasenotes/notes/hpe-3par-support-duplicated-fqdn-751ad1dbcd137fbb.yaml releasenotes/notes/hpe-3par-use-vlan-iscsi-ips-f75787b0d281030b.yaml releasenotes/notes/hpe-xp-fc-iscsi-cinder-driver-75e04febff42c9ba.yaml releasenotes/notes/hpe3par-replication-group-a18a28d18de09e95.yaml releasenotes/notes/hpe_3par_multiattach-bf98a9e5c2208902.yaml releasenotes/notes/hpe_3par_multiattach_detach_fix-d2d3785d656fba90.yaml releasenotes/notes/hpelh-deprecations-eb7716a0b02f145e.yaml releasenotes/notes/hpmsa-driver-updates-train-4fcbe71f3e2bb2da.yaml releasenotes/notes/huawei-backend-capabilities-report-optimization-d1c18d9f62ef71aa.yaml releasenotes/notes/huawei-fusionstorage-driver-readd-70fecc39eeaa0f5f.yaml releasenotes/notes/huawei-fusionstorage-unsupported-4be766dd2ba8f980.yaml releasenotes/notes/huawei-generic-group-bc3fb7236efc58e7.yaml releasenotes/notes/huawei-iscsi-multipath-support-a056201883909287.yaml releasenotes/notes/huawei-manage-unmanage-snapshot-e35ff844d72fedfb.yaml releasenotes/notes/huawei-pool-disktype-support-7c1f64639b42a48a.yaml releasenotes/notes/huawei-support-iscsi-configuration-in-replication-7ec53737b95ffa54.yaml releasenotes/notes/huawei-support-manage-volume-2a746cd05621423d.yaml releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml releasenotes/notes/ibm-flashsystem-manage-unmanage-88e56837102f838c.yaml releasenotes/notes/ibm-flashsystem-unsupported-28f9aaf11b56fb2f.yaml releasenotes/notes/ibm-gpfs-unsupported-b95274829573835d.yaml releasenotes/notes/ibm-storage-supported-a373a54777333929.yaml releasenotes/notes/ibm-storage-unsupported-9e8ea6ce9cea503d.yaml releasenotes/notes/ibm-storwize-removehostmappings-e7eeaf898786c6bf.yaml releasenotes/notes/ibm-storwize-supported-6518628fb78d58a4.yaml releasenotes/notes/ibm-storwize-unsupported-e79cfd27523f013c.yaml releasenotes/notes/ibm-storwzie-mirror-volume-ffe4c9bde78cdf1d.yaml releasenotes/notes/ibm-svf-add-cleanrate-support-e246a8f218d2f22e.yaml releasenotes/notes/ibm-svf-delete-volume-flag-support-for-deletevolumegroup-4224db1ca798a3bf.yaml releasenotes/notes/ibm-svf-manage-gmcv-change-volumes-on-childpools-9d2217e1e6f07a0e.yaml releasenotes/notes/ibm-svf-manage-host-attachment-using-portsets-0003c54b185f0eb2.yaml releasenotes/notes/ibm-svf-provide-IOPs-based-storage-offering-1b7532f42fd6d76e.yaml releasenotes/notes/ibm-svf-support-for-temporary-volumegroup-3db871245b864a52.yaml releasenotes/notes/ibm-svf-support-hyperswap-volume-extend-f578efa02314faff.yaml releasenotes/notes/ibm-svf-volumegroup-configuration-parameter-44fe67bebe284191.yaml releasenotes/notes/ibm-svf-volumegroup-snapshot-support-0a16d8a065501d66.yaml releasenotes/notes/ibm-svf-volumegroup-support-134fc2194ad092bd.yaml releasenotes/notes/ibmsvciogrpselection-e607739b6f655a27.yaml releasenotes/notes/idempotent-glance-metadata-aa78e09736cf57d9.yaml releasenotes/notes/iet-deprecation-f8059417c6adbb78.yaml releasenotes/notes/image-metadata-size-increase-323812970dc0e513.yaml releasenotes/notes/image-volume-type-c91b7cff3cb41c13.yaml releasenotes/notes/improve-volume-transfer-records-5599e82ade4d302c.yaml releasenotes/notes/improvement-to-get-group-detail-0e8b68114e79a8a2.yaml releasenotes/notes/improvement-to-query-consistency-group-detail-84a906d45383e067.yaml releasenotes/notes/incorrect-host-config-option-347e60f957458d54_new.yaml releasenotes/notes/increase_glance_num_retries-66b455a0729c4535.yaml releasenotes/notes/infinidat-add-infinibox-driver-67cc33fc3fbff1bb.yaml releasenotes/notes/infinidat-add-storage-assisted-migration-4e12f24ee297ef65.yaml releasenotes/notes/infinidat-compression-a828904aaba90da2.yaml releasenotes/notes/infinidat-group-support-44cd0715de1ea502.yaml releasenotes/notes/infinidat-infinisdk-04f0edc0d0a597e3.yaml releasenotes/notes/infinidat-iscsi-support-78e0d34d9e7e08c4.yaml releasenotes/notes/infinidat-manage-unmanage-ccc42b79d741369f.yaml releasenotes/notes/infinidat-max-osr-2d9fd2d0f9424657.yaml releasenotes/notes/infinidat-multi-attach-support-533b3e559c15801f.yaml releasenotes/notes/infinidat-qos-50d743591543db98.yaml releasenotes/notes/inspur-as13000-cinder-driver-bfa5cc17683d87a9.yaml releasenotes/notes/inspur-instorage-fc-cinder-driver-70c13e4a64d785d5.yaml releasenotes/notes/itri-disco-driver-removal-11e14fbf431ea876.yaml releasenotes/notes/joviandss-iscsi-driver-0becc6ee6a0b3c0a.yaml releasenotes/notes/json-schema-validation-0d22576bd556f4e0.yaml releasenotes/notes/k2-disable-discovery-bca0d65b5672ec7b.yaml releasenotes/notes/k2-non-unique-fqdns-b62a269a26fd53d5.yaml releasenotes/notes/kaminario-cinder-driver-bug-1646692-7aad3b7496689aa7.yaml releasenotes/notes/kaminario-cinder-driver-bug-1646766-fe810f5801d24f2f.yaml releasenotes/notes/kaminario-cinder-driver-bug-44c728f026394a85.yaml releasenotes/notes/kaminario-cinder-driver-remove-deprecate-option-831920f4d25e2979.yaml releasenotes/notes/kaminario-fc-cinder-driver-8266641036281a44.yaml releasenotes/notes/kaminario-iscsi-cinder-driver-c34fadf63cd253de.yaml releasenotes/notes/lefthand-consistency-groups-d73f8e418884fcc6.yaml releasenotes/notes/lefthand-manage-unmanage-snapshot-04de39d268d51169.yaml releasenotes/notes/lenovo-driver-updates-train-f2ff96ca4a2885db.yaml releasenotes/notes/leverage-compression-accelerator-579c7032290cd1e9.yaml releasenotes/notes/lightbits-qos-support-1a44a9fcdfd1ee93.yaml releasenotes/notes/lightbits-snapshot-timeout-6b25dbd15a650d52.yaml releasenotes/notes/lightbits-volume-ipacl-23da3aa469689817.yaml releasenotes/notes/list-manageable-86c77fc39c5b2cc9.yaml releasenotes/notes/live_migration_v3-ae98c0d00e64c954.yaml releasenotes/notes/lock_path-940af881b2112bbe.yaml releasenotes/notes/lvm-delete-error-f12da00c1b3859dc.yaml releasenotes/notes/lvm-ipv6-fix-e8d418726c92bbd5.yaml releasenotes/notes/lvm-minimum-version-2-02-107-df4551d088f8b5a3.yaml releasenotes/notes/lvm-nvmet-fixes-fc5e867abc699633.yaml releasenotes/notes/lvm-nvmet-new-conn_props-25320e34d6ca6ac7.yaml releasenotes/notes/lvm-nvmet-tcp-72a41be1a1fe0fbd.yaml releasenotes/notes/lvm-thin-overprovision-1d279f66ee2252ff.yaml releasenotes/notes/lvm-type-default-to-auto-a2ad554fc8bb25f2.yaml releasenotes/notes/manage-resources-v3-c06096f75927fd3b.yaml releasenotes/notes/mark-blockbridge-unsupported-c9e55df0eb2e3c9f.yaml releasenotes/notes/mark-brocade-unsupported-d5760b4bb8173652.yaml releasenotes/notes/mark-cisco-zm-unsupported-57e5612f57e2407b.yaml releasenotes/notes/mark-cloudbyte-unsupported-8615a127439ed262.yaml releasenotes/notes/mark-coho-unsupported-989db9d88ed7fff8.yaml releasenotes/notes/mark-dothill-unsupported-7f95115b7b24e53c.yaml releasenotes/notes/mark-falconstor-unsupported-3b065556a4cd94de.yaml releasenotes/notes/mark-hpe-xp-unsupported-c9ce6cfbab622e46.yaml releasenotes/notes/mark-infortrend-deprecated-553de89f8dd58aa8.yaml releasenotes/notes/mark-nexenta-edge-unsupported-76c500738f0b3c61.yaml releasenotes/notes/mark-nexentaedge-unsupported-56d184fdccc6eaac.yaml releasenotes/notes/mark-nimble-deprecated-9f7d1c178b48fa39.yaml releasenotes/notes/mark-qnap-unsupported-79bd8ece9a2bfcd2.yaml releasenotes/notes/mark-reduxio-deprecated-b435032a8fdb16f2.yaml releasenotes/notes/mark-scality-unsupported-530370e034a6f488.yaml releasenotes/notes/mark-synology-deprecated-134ba9764e14af67.yaml releasenotes/notes/mark-tegile-deprecated-1effb23010ea997c.yaml releasenotes/notes/mark-unsupported-gnap-739f90232c60ab3e.yaml releasenotes/notes/mark-unsupported-lefthand-312f8c34df6ed119.yaml releasenotes/notes/mark-unsupported-tintri-driver-a71b09b7c001c75e.yaml releasenotes/notes/mark-unsupported-vrtshyperscale-2bd42e2af8b803d7.yaml releasenotes/notes/mark-violin-unsupported-fdf6b34cf9847359.yaml releasenotes/notes/mark-xio-deprecated-18c914e15695d793.yaml releasenotes/notes/mark-zte-unsupported-3c048e419264eca2.yaml releasenotes/notes/metadata-for-volume-summary-729ba648db4e4e54.yaml releasenotes/notes/migrate-backup-encryption-keys-to-barbican-6f07fd48d4937b2a.yaml releasenotes/notes/migrate-cg-to-generic-volume-groups-f82ad3658f3e567c.yaml releasenotes/notes/migrate-fixed-key-to-barbican-91dfcb829efd4bb6.yaml releasenotes/notes/min-max-vol-size-on-type-bc7c75ea73a74d02.yaml releasenotes/notes/minimum-u4p-version-4c66d69d1b873796.yaml releasenotes/notes/modify-ensure-export-1d56a40f5e762aa8.yaml releasenotes/notes/move-scaleio-driver-to-dell-emc-dir-c195374ca6b7e98d.yaml releasenotes/notes/move-xtremio-driver-to-dell-emc-dir-f7e07a502cafd78f.yaml releasenotes/notes/msa-multiattach-5407eb60093de8f1.yaml releasenotes/notes/msa2060-99150398a9c416f6.yaml releasenotes/notes/multiple_clone-82bd7f80ae439080.yaml releasenotes/notes/mv-use_quota-b8e010f8f68a1eaa.yaml releasenotes/notes/mv-volume-type-name-and-id-5f4fd8480874fe9b.yaml releasenotes/notes/nec-allow-more-than-4iSCSI-portals-8342defe64491f81.yaml releasenotes/notes/nec-auto-accesscontrol-55f4b090e8128f5e.yaml releasenotes/notes/nec-delete-unused-parameter-367bc9447acbb03e.yaml releasenotes/notes/nec-delete-volume-per-limit-d10b9df86f64b80e.yaml releasenotes/notes/nec-manage-unmanage-06f9beb3004fc227.yaml releasenotes/notes/nec-nondisruptive-backup-471284d07cd806ce.yaml releasenotes/notes/nec-storage-assist-revert-to-sanpshot-58cddebfbf06d222.yaml releasenotes/notes/nec-support-multi-attach-8aae5100f513656c.yaml releasenotes/notes/nec_storage_volume_driver-57663f9ecce1ae19.yaml releasenotes/notes/nec_v_storage_volume_driver-e3cb7e3c496ab066.yaml releasenotes/notes/netapp-add-generic-group-support-cdot-9bebd13356694e13.yaml releasenotes/notes/netapp-add-support-for-adaptive-qos-0b76dadf7c044cd8.yaml releasenotes/notes/netapp-asar2-disaggregated-platform-support-a1b2c3d4e5f6g7h8.yaml releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml releasenotes/notes/netapp-chap-iscsi-auth-264cd942b2a76094.yaml releasenotes/notes/netapp-data-ontap-deprecate-7mode-drivers-a39bfcb3afefc9a5.yaml releasenotes/notes/netapp-deprecate-eseries-drivers-bc4f552d277c07b9.yaml releasenotes/notes/netapp-eseries-consistency-groups-4f6b2af2d20c94e9.yaml releasenotes/notes/netapp-fix-issue-while-ensuring-snapmirror-creation-bea36a69d443e86f.yaml releasenotes/notes/netapp-log-filter-f3256f55c3ac3faa.yaml releasenotes/notes/netapp-migrated-qos-c0c8aae50d010c75.yaml releasenotes/notes/netapp-nfs-aa-support-477ddf585c5aa578.yaml releasenotes/notes/netapp-nfs-consistency-group-support-83eccc2da91ee19b.yaml releasenotes/notes/netapp-nfs-copy-offload-image-812c7152d9fe4aae.yaml releasenotes/notes/netapp-nfs-deprecate-copy-offload-option-f9d6fe8e3dfafb04.yaml releasenotes/notes/netapp-non-discovery-19af4e10f7b190ea.yaml releasenotes/notes/netapp-nvme-in-use-expansion-d11f03fb64050145.yaml releasenotes/notes/netapp-ontap-add-revert-to-snapshot-ce20810bcf094fce.yaml releasenotes/notes/netapp-ontap-fix-detach-multiattach-d99d33dff2fefb4c.yaml releasenotes/notes/netapp-ontap-fix-flexvol-replica-create-c7772837df20021f.yaml releasenotes/notes/netapp-ontap-fix-force-detach-55be3f4ac962b493.yaml releasenotes/notes/netapp-ontap-fix-qos-min-support-svm-scoped-account-a8458445d459023c.yaml releasenotes/notes/netapp-ontap-rest-api-client-d889cfa895f01249.yaml releasenotes/notes/netapp-ontap-use_exact_size-d03c90efbb8a30ac.yaml releasenotes/notes/netapp-solidfire-add-replication-mode-eb26535d0ec78cb4.yaml releasenotes/notes/netapp-solidfire-fix-exceptions-while-eos-upgrade-1e3df89b5fb79165.yaml releasenotes/notes/netapp-solidfire-fix-osprofiler-infinite-recursion-ec3d4794c89b2f83.yaml releasenotes/notes/netapp-solidfire-stats-improving-57207f313d7faf42.yaml releasenotes/notes/netapp-space-allocation-support-36a26aecc8fe1500.yaml releasenotes/notes/netapp_cdot_report_shared_blocks_exhaustion-073a73e05daf09d4.yaml releasenotes/notes/netapp_fix_svm_scoped_permissions.yaml releasenotes/notes/new-config-opts-for-periodic_interval-d0cb17a2d72e0cd0.yaml releasenotes/notes/new-nova-config-section-2a7a51a0572e7064.yaml releasenotes/notes/new-osprofiler-call-0bb1a305c8e8f9cc.yaml releasenotes/notes/nexenta-edge-driver-removal-5626d542d75f3d43.yaml releasenotes/notes/nexenta-edge-iscsi-b3f12c7a719e8b8c.yaml releasenotes/notes/nexenta-ns5-5d223f3b60f58aad.yaml releasenotes/notes/nexentaedge-iscsi-driver-302529c56cdbbf38.yaml releasenotes/notes/nexentaedge-iscsi-ee5d6c05d65f97af.yaml releasenotes/notes/nexentaedge-nbd-eb48268723141f12.yaml releasenotes/notes/nexentastor5-driver-update-937d2a1ba76a504a.yaml releasenotes/notes/nexentastor5-https-6d58004838cfab30.yaml releasenotes/notes/nexentastor5-smartcompression-disabled-9c6ca7c758b6de69.yaml releasenotes/notes/nexentastor5_iscsi-e1d88b07d15c660b.yaml releasenotes/notes/nexentastor5_nfs-bcc8848716daea63.yaml releasenotes/notes/nfs-online-snapshot-c05e6c8113bbded6.yaml releasenotes/notes/nfs-snapshots-21b641300341cba1.yaml releasenotes/notes/nfs_backup_no_overwrite-be7b545453baf7a3.yaml releasenotes/notes/nimble-add-alletra-6k-info-8d242a809e6044a5.yaml releasenotes/notes/nimble-add-fc-support-0007fdbd647be947.yaml releasenotes/notes/nimble-add-force-backup-539e1e5c72f84e61.yaml releasenotes/notes/nimble-change-location-277b7fc0c39fd91d.yaml releasenotes/notes/nimble-consistency-groups-support-7c932d5557fa725e.yaml releasenotes/notes/nimble-manage-unmanage-1d6d5fc23cbe59a1.yaml releasenotes/notes/nimble-mark-supported-0c8e5e21c8d1179d.yaml releasenotes/notes/nimble-mark-usnsupported-0c8e5e21c8d1179d.yaml releasenotes/notes/nimble-max-oversubscription-ratio-1d9812954f824fcf.yaml releasenotes/notes/nimble-qos-specs-8cd006777c66a64e.yaml releasenotes/notes/nimble-replication-a5f757f7d7047065.yaml releasenotes/notes/nimble-rest-api-support-75c2324ee462d026.yaml releasenotes/notes/nimble-retype-support-18f717072948ba6d.yaml releasenotes/notes/nimble-thin-provision-by-default-c5ac66120b2361ef.yaml releasenotes/notes/nvmeof-premature-terminate-conn-63e3cc1fd1832874.yaml releasenotes/notes/nvmet-multipath-d35f55286f263e72.yaml releasenotes/notes/nvmet-shared-targets-20ed7279ef29f002.yaml releasenotes/notes/online-migration-checks-64b0d1732901e78e.yaml releasenotes/notes/ontap-add-provisioned-capacity-option-2f8122663eec51ae.yaml releasenotes/notes/ontap-add-storage-assisted-migration-70f6fb95dbb7e580.yaml releasenotes/notes/open-e-joviandss-disable-revert-to-snapshot-359a2e0317e618ec.yaml releasenotes/notes/open-e-joviandss-enable-multiattach-b1d38ffcc53bf59c.yaml releasenotes/notes/operate-migrated-groups-with-cp-apis-e5835c6673191805.yaml releasenotes/notes/oracle-zfssa-unsupported-4ce035213fa0e097.yaml releasenotes/notes/orphaned_unmanaged_volume-db63ec0509b70b8f.yaml releasenotes/notes/os-brick-lock-dir-35bdd8ec0c0ef46d.yaml releasenotes/notes/per-backend-az-28727aca360a1cc8.yaml releasenotes/notes/period-task-clean-reservation-0e0617a7905df923.yaml releasenotes/notes/permit_volume_type_operations-b2e130fd7088f335.yaml releasenotes/notes/policy-for-type-list-and-show-apis-rt56uy78crt5e378.yaml releasenotes/notes/policy-in-code-226f71562ab28195.yaml releasenotes/notes/powermax-90-to-91-endpoints-a92c4d158cb63fe4.yaml releasenotes/notes/powermax-91-to-92-endpoints-bb467c8aca0165dd.yaml releasenotes/notes/powermax-active-active-support-bec2d96480046d82.yaml releasenotes/notes/powermax-auto-migration-5cc57773c23fef02.yaml releasenotes/notes/powermax-bug-1853589-f6c7164177da0496.yaml releasenotes/notes/powermax-bug-1875478-8c9072ad9a87b83d.yaml releasenotes/notes/powermax-bug-1894086-iterator-expiration-674a28d8b9e13b34.yaml releasenotes/notes/powermax-bug-1930290-4f598329a6ced006.yaml releasenotes/notes/powermax-create-grp-source-560139c0850e60ce.yaml releasenotes/notes/powermax-deadlock-5fdcacb63ca87159.yaml releasenotes/notes/powermax-delete-replication-group-76656e96262201d5.yaml releasenotes/notes/powermax-disable-inuse-metro-89e9f398ec9e2672.yaml releasenotes/notes/powermax-existing-host-092f7daf29053d82.yaml releasenotes/notes/powermax-expand-replicated-volume-124c62ea78b1c347.yaml releasenotes/notes/powermax-failover-abilities-1fa0a23128f1c00b.yaml releasenotes/notes/powermax-inuse-retype-support-64bd35adab17420d.yaml releasenotes/notes/powermax-load-balance-9cd152e53ecb34fd.yaml releasenotes/notes/powermax-multiple-replication-devices-0cc532ae621ea9a5.yaml releasenotes/notes/powermax-ode-metro-support-ed50bb20f932548b.yaml releasenotes/notes/powermax-snapvx-link-mode-0050ac6b4a16c739.yaml releasenotes/notes/powermax-srdf-enhancement-56b0a2817c4d310d.yaml releasenotes/notes/powermax-storage-group-tagging-d2281e9b35994bec.yaml releasenotes/notes/powermax-tdev-deallocation-90bda0f95ab0b271.yaml releasenotes/notes/powermax-train-tag-removal-1dfa77df7440e5f5.yaml releasenotes/notes/powermax-user-defined-hostname-portgroup-0b01aaaa730dfaaf.yaml releasenotes/notes/powermax-vol-metadata-acd2555818d25b72.yaml releasenotes/notes/powermax_initiator_check-249279d30e3f8322.yaml releasenotes/notes/powermax_legacy_generation_fix-09e437f955cd9d70.yaml releasenotes/notes/powermax_port-check_enhancement-c95dd94328f31524.yaml releasenotes/notes/powerstore-nvme-tcp-support-ee37cf4fdbce1621.yaml releasenotes/notes/powerstore-request-data-validation-6268f2ed07b7bf40.yaml releasenotes/notes/privsep-rocky-35bdfe70ed62a826.yaml releasenotes/notes/project-default-types-3a14ad0d653e604e.yaml releasenotes/notes/project-id-optional-in-urls-db97e2c447167853.yaml releasenotes/notes/promotion_offline_r1_fix-f7a008d0d13a3eff.yaml releasenotes/notes/promotion_rdfg_num_fix-65a5838277ac8edf.yaml releasenotes/notes/prophetstor-generic-groups-c7136c32b2f75c0a.yaml releasenotes/notes/ps-removedriver-5ba447c50f2474e7.yaml releasenotes/notes/pure-active-active-support-dbd0d3da3ab64e64.yaml releasenotes/notes/pure-active-cluster-edf8e7e80739b0f8.yaml releasenotes/notes/pure-check-nvmefc-ports-cf2aec3952d8192f.yaml releasenotes/notes/pure-cinder-manage-aa40d62bf2bd0d33.yaml releasenotes/notes/pure-configure-pg-pod-names-525a4ce8e2f46b69.yaml releasenotes/notes/pure-custom-user-agent-dcca4cb44b69e763.yaml releasenotes/notes/pure-default-replica-interval-07de0a56f61c7c1e.yaml releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml releasenotes/notes/pure-fc-wwpn-case-c1d97f3fa7663acf.yaml releasenotes/notes/pure-generic-volume-groups-2b0941103f7c01cb.yaml releasenotes/notes/pure-host-personality-3512f7ccd961d4ad.yaml releasenotes/notes/pure-iscsi-cidr-cbc1afb3850a9217.yaml releasenotes/notes/pure-iscsi-cidrs-7195eda9f7214fce.yaml releasenotes/notes/pure-list-mangeable-fed4a1b23212f545.yaml releasenotes/notes/pure-replicated-cg-03016fa79bcd51c1.yaml releasenotes/notes/pure-report-addressing-91963e29fbed32a4.yaml releasenotes/notes/pure-storage-add-qos-37958a90beff12d6.yaml releasenotes/notes/pure-storage-change-purity-support-b94057d3842a80a8.yaml releasenotes/notes/pure-storage-driver-typo-a24d19021f25a4f8.yaml releasenotes/notes/pure-storage-fix-failover-fe6260a112409742.yaml releasenotes/notes/pure-storage-multiattach-support-994da363e181d627.yaml releasenotes/notes/pure-storage-nvme-driver-f4217c00379c4827.yaml releasenotes/notes/pure-storage-revert-snapshot-b7e0ec4f958418c4.yaml releasenotes/notes/pure-v2.1-replication-0246223caaa8a9b5.yaml releasenotes/notes/pure-validate-replica-arrays-a76630cab9435770.yaml releasenotes/notes/pure-verify-https-requests-464320c97ba77a1f.yaml releasenotes/notes/pure_aa_replication-e3f5f6666f0b0c43.yaml releasenotes/notes/pure_consistent_hostname-419f9c31cb77e16d.yaml releasenotes/notes/pure_eg1_dr-f08544454cfd105e.yaml releasenotes/notes/pure_evergreen_one_model-0533b91fb096c468.yaml releasenotes/notes/pure_failover_sync-86814167598af2f8.yaml releasenotes/notes/pure_fc_personality-3cada97fc940e498.yaml releasenotes/notes/pure_fix_clone_cg-cfdf7d16b63882f8.yaml releasenotes/notes/pure_fix_clone_qos-4b80be464b506e4c.yaml releasenotes/notes/pure_fix_replica_interval-917cd01f23ac45cc.yaml releasenotes/notes/pure_lacp_iscsi-34678bdb98fa6bab.yaml releasenotes/notes/pure_manage_quota_delete-dd24495e883498e7.yaml releasenotes/notes/pure_nvme_tcp-a00efa8966a74f77.yaml releasenotes/notes/pure_per_gb_qos-0b96279d615b81a1.yaml releasenotes/notes/pure_pod_safemode-d64b0705828529e5.yaml releasenotes/notes/pure_realm_manage_fix-eb5fe76e7c55297d.yaml releasenotes/notes/pure_reconnect_failure-7bbc135eecc77695.yaml releasenotes/notes/pure_replication_capability-f9fa78aa96501a69.yaml releasenotes/notes/pure_retype_sync_support-fe72a726f83cb063.yaml releasenotes/notes/pure_sdk_version_checks-257cb8387ed6f5f8.yaml releasenotes/notes/pure_storage_add_volume_metadata-89f1e23573efcf83.yaml releasenotes/notes/pure_storage_fix_clone_provider_id_powervc.yaml-e794f05b0cd90f45.yaml releasenotes/notes/pure_storage_multiattach-f4aee3576757b2ff.yaml releasenotes/notes/pure_storage_scheduler_data-9b28bb309b17e8aa.yaml releasenotes/notes/pure_tempest_cg_fix-913d405f7487de00.yaml releasenotes/notes/pure_trisync_replication-d236bba76a1ebea5.yaml releasenotes/notes/pure_uniform_disconnect_bug-357c849bf12e8832.yaml releasenotes/notes/pure_version_bump-4877df33faa27efa.yaml releasenotes/notes/pure_vlan_lacp-d58f141282efb723.yaml releasenotes/notes/pure_vol_tags-a2efbd9909697771.yaml releasenotes/notes/pure_volume_group_support-303a4585277b4e1f.yaml releasenotes/notes/purestorage_rest_upgrade-b830122e37d2958a.yaml releasenotes/notes/qb-backup-5b1f2161d160648a.yaml releasenotes/notes/qb-overlay-from-snap-cache-dc102acb4820e368.yaml releasenotes/notes/qb-switch-nas-sec-opts-635c6ef1205e4f3f.yaml releasenotes/notes/qnap-enhance-support-4ab5cbb110b3303b.yaml releasenotes/notes/qnap-support-qes-200-2a3dda49afe14103.yaml releasenotes/notes/qnap-support-qes-210-de75892f684cb9c3.yaml releasenotes/notes/qnap-support-qts-440-c482f109694cb77f.yaml releasenotes/notes/qnap-tds-support-qes-5e5d766cded3a26d.yaml releasenotes/notes/queens-driver-removal-72a1a36689b6d890.yaml releasenotes/notes/quobyte-extra-requires-8dc1761859da923a.yaml releasenotes/notes/quobyte-unsupported-96c8f109eecb88c7.yaml releasenotes/notes/quobyte_vol-snap-cache-baf607f14d916ec7.yaml releasenotes/notes/quota-backup-resources-fc4e0795f520c4ab.yaml releasenotes/notes/quota-on-retype-with-snapshots-2d9fc7b2c75f899d.yaml releasenotes/notes/quota-sync-migrating-2c99e134e117a945.yaml releasenotes/notes/quota-sync-temporary-b4103ebc2c484c89.yaml releasenotes/notes/quota-temp-snapshots-9d032f97f80050c5.yaml releasenotes/notes/quota-until_refresh-updated-d35e8530f30c5522.yaml releasenotes/notes/quota-usage-duplicates-c00725089da7bbd8.yaml releasenotes/notes/quota-volume-transfer-abd1f418c6c63db0.yaml releasenotes/notes/rbd-active-active-replication-b230367912fe4a23.yaml releasenotes/notes/rbd-backend-qos-implementation-0e141b742e277d26.yaml releasenotes/notes/rbd-bug-2065713-driver-exc-handling-f8de823cd9acd767.yaml releasenotes/notes/rbd-choose-correct-stripe-unit-9d317f4717533fb4.yaml releasenotes/notes/rbd-driver-assisted-migration-2d29788243060f77.yaml releasenotes/notes/rbd-flatten-child-volumes-4cb0b7fcf3a1df5e.yaml releasenotes/notes/rbd-multiattach-exceptions-43066312f3b527f5.yaml releasenotes/notes/rbd-multiattach-support-2900ce0245af0239.yaml releasenotes/notes/rbd-optimize-image-upload-836c9df06674a665.yaml releasenotes/notes/rbd-rbd_secret_uuid-fsid-95daee128f59c8e4.yaml releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml releasenotes/notes/rbd-support-list-manageable-snapshots-3474c62ed83fb788.yaml releasenotes/notes/rbd-support-list-manageable-volumes-8a088a44e01d227f.yaml releasenotes/notes/rbd-support-managing-existing-snapshot-fb871a3ea98dc572.yaml releasenotes/notes/rbd-support-report-backend-state-4e124eb9efd36724.yaml releasenotes/notes/rbd-support-revert-to-snapshot-c9ca62c9efbabf5f.yaml releasenotes/notes/rbd-thin-provisioning-c98522d6fe7b71ff.yaml releasenotes/notes/rbd-total_capacity-60f10b45e3a8c8ea.yaml releasenotes/notes/rbd-update-features-bugfix-df97b50864ce9712.yaml releasenotes/notes/rbd-v2.1-replication-64a9d0bec5987faf.yaml releasenotes/notes/rbd_replication_add_secret_uuid_config-c74d65e6d3d610c6.yaml releasenotes/notes/re-add-nexenta-driver-d3af97e33551a485.yaml releasenotes/notes/readd-infortrend-driver-d9b399b53a4355f8.yaml releasenotes/notes/readd-qnap-driver-e1dc6b0c3fabe30e.yaml releasenotes/notes/rebranded-hpe-drivers-caf1dcef1afe37ba.yaml releasenotes/notes/rebranded-vnx-driver-2fb7424ddc9c41df.yaml releasenotes/notes/redirect-detach-nova-4b7b7902d7d182e0.yaml releasenotes/notes/redundancy-in-volume-url-4282087232e6e6f1.yaml releasenotes/notes/reduxio-iscsci-driver-5827c32a0c498949.yaml releasenotes/notes/refactor-disco-volume-driver-3ff0145707ec0f3e.yaml releasenotes/notes/reject-volume_clear_size-settings-larger-than-1024MiB-30b38811da048948.yaml releasenotes/notes/remove-api-v2-dadd877ee5457f79.yaml releasenotes/notes/remove-backup-service-to-driver-mapping-4d2ed6f868a64175.yaml releasenotes/notes/remove-block-device-driver-14f76dca2ee9bd38.yaml releasenotes/notes/remove-cinder-manage-logs-cmds-40fb8f475b37fb2f.yaml releasenotes/notes/remove-cinder-manage-shell-6d6f42e5a4ee8c5c.yaml releasenotes/notes/remove-configurable-migration-backend-aaee5a2f808c9b36.yaml releasenotes/notes/remove-datacore-300c667e9f504590.yaml releasenotes/notes/remove-deprecated-driver-mappings-b927d8ef9fc3b713.yaml releasenotes/notes/remove-deprecated-keymgr-d11a25c620862ed6.yaml releasenotes/notes/remove-deprecated-nova-opts-b1ec66fe3a9bb3b9.yaml releasenotes/notes/remove-deprecated-option-9ad954726ed4d8c2.yaml releasenotes/notes/remove-drbdmanage-driver-4edd1e1e43b6ba39.yaml releasenotes/notes/remove-flashsystem-multipath-0a3ee133ebe35d1e.yaml releasenotes/notes/remove-hgst-flash-driver-a930927de333329a.yaml releasenotes/notes/remove-hitachi-57d0b37cb9cc7e13.yaml releasenotes/notes/remove-hp-cliq-41f47fd61e47d13f.yaml releasenotes/notes/remove-hp3par-config-options-3cf0d865beff9018.yaml releasenotes/notes/remove-hpe-lefthand-driver-57b03ca9ada2654c.yaml releasenotes/notes/remove-ibm-nas-driver-0ed204ed0a2dcf55.yaml releasenotes/notes/remove-iscsi-target-config-options-d23e424eb8f82042.yaml releasenotes/notes/remove-lvm-over-sub-3c8addbf47827045.yaml releasenotes/notes/remove-mirrorpolicy-parameter-from-huawei-driver-d32257a60d32fd90.yaml releasenotes/notes/remove-multiattach-request-param-4444e02533f919da.yaml releasenotes/notes/remove-nas-ip-config-option-8d56c14f1f4614fc.yaml releasenotes/notes/remove-nested-quota-driver-8b56f03694e3a694.yaml releasenotes/notes/remove-netapp-teseries-thost-type-config-option-908941dc7d2a1d59.yaml releasenotes/notes/remove-py39-7c8a2397befd6ecc.yaml releasenotes/notes/remove-pybasedir-config-option-572604d26a57ba5e.yaml releasenotes/notes/remove-query-volume-filters-b59958fc68d3abb4.yaml releasenotes/notes/remove-rbd_keyring_conf-2d54a4de634c255c.yaml releasenotes/notes/remove-scality-fa209aae9748a1f3.yaml releasenotes/notes/remove-single-backend-7bf02e525bbbdd3a.yaml releasenotes/notes/remove-sqlalchemy-migrate-c62b541fd5f4ab10.yaml releasenotes/notes/remove-vol-in-error-from-cg-1ed0fde04ab2b5be.yaml releasenotes/notes/remove-volume-clear-shred-bde9f7f9ff430feb.yaml releasenotes/notes/remove-windows-mapping-51a004f466470a2b.yaml releasenotes/notes/remove-xml-api-392b41f387e60eb1.yaml releasenotes/notes/remove_deprecated_xml-4065b893d781f65c.yaml releasenotes/notes/remove_eseries-bb1bc134645aee50.yaml releasenotes/notes/remove_export_failure_leaves_attachment-24e0c648269b0177.yaml releasenotes/notes/remove_glusterfs_volume_driver-d8fd2cf5f38e754b.yaml releasenotes/notes/remove_lvmdriver-9c35f83132cd2ac8.yaml releasenotes/notes/remove_osapi_volume_base_url-33fed24c4ad1b2b6.yaml releasenotes/notes/remove_service_filter-380e7990bfdbddc8.yaml releasenotes/notes/remove_storwize_npiv-b704ff2d97207666.yaml releasenotes/notes/remove_veritas_hyperscale_driver-988ad62d2417124f.yaml releasenotes/notes/remove_volume_tmp_dir_option-c83c5341e5a42378.yaml releasenotes/notes/removed-apiv1-616b1b76a15521cf.yaml releasenotes/notes/removed-isertgtadm-7ccefab5d3e89c59.yaml releasenotes/notes/removed-rpc-topic-config-options-21c2b3f0e64f884c.yaml releasenotes/notes/removed-scality-7151638fdac3ed9d.yaml releasenotes/notes/removing-cinder-all-9f5c3d1eb230f9e6.yaml releasenotes/notes/removing-middleware-sizelimit-ba86907acbda83de.yaml releasenotes/notes/rename-backup-driver-status-check-interval-option-6b27c1e29cb863e9.yaml releasenotes/notes/rename-huawei-driver-092025e46b65cd48.yaml releasenotes/notes/rename-iscsi-target-config-options-24913d7452c4a58e.yaml releasenotes/notes/rename-windows-iscsi-a7b0ca62a48c1371.yaml releasenotes/notes/rename_xiv_ds8k_to_ibm_storage-154eca69c44b3f95.yaml releasenotes/notes/replication-group-7c6c8a153460ca58.yaml releasenotes/notes/replication-v2.1-3par-b3f780a109f9195c.yaml releasenotes/notes/replication-v2.1-lefthand-745b72b64e5944c3.yaml releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml releasenotes/notes/report-backend-state-in-service-list-1e4ee5a2c623671e.yaml releasenotes/notes/report-backend-state-in-service-list-739a5398eec4a6b7.yaml releasenotes/notes/report-backend-state-in-service-list-93e9f2b204b735c0.yaml releasenotes/notes/reset-status-notification-update-4a80a8b5feb821ef.yaml releasenotes/notes/reset-status-notification-update-b655615871db4659.yaml releasenotes/notes/retype-assisted-migration-6cdc7f9b21beb859.yaml releasenotes/notes/retype-encrypted-volume-49b66d3e8e65f9a5.yaml releasenotes/notes/revert-snapshot-non-admin-8485be55060eab0d.yaml releasenotes/notes/revert-volume-to-snapshot-6aa0dffb010265e5.yaml releasenotes/notes/rpc-apis-3.0-b745f429c11d8198.yaml releasenotes/notes/rpc-update-50bef83f48d4f96f.yaml releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml releasenotes/notes/rsd-cinder-driver-d71b88292536bfea.yaml releasenotes/notes/sandstone-iscsi-driver-31ed72d5657a4307.yaml releasenotes/notes/sc-handle-multiattach-onterminate-6ab1f96f21bb284d.yaml releasenotes/notes/scaleio-backup-via-snapshot-8e75aa3f4570e17c.yaml releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml releasenotes/notes/scaleio-default-volume-provisioning-c648118fcc8f297f.yaml releasenotes/notes/scaleio-deprecate-1.32-32033134fec181bb.yaml releasenotes/notes/scaleio-deprecate-config-1aa300d0c78ac81c.yaml releasenotes/notes/scaleio-enable-multiattach-e7d84ffa282842e9.yaml releasenotes/notes/scaleio-generic-volume-group-ee36e4dba8893422.yaml releasenotes/notes/scaleio-get-manageable-volumes-dda1e7b8e22be59e.yaml releasenotes/notes/scaleio-manage-existing-32217f6d1c295193.yaml releasenotes/notes/scaleio-manage-existing-snapshot-5bbd1818654c0776.yaml releasenotes/notes/scaleio-qos-support-2ba20be58150f251.yaml releasenotes/notes/scaleio-rebranding-d2d113c5d8e5c118.yaml releasenotes/notes/scaleio-remove-force-delete-config-48fae029e3622d6d.yaml releasenotes/notes/scaleio-scaling-qos-50c58e43d4b54247.yaml releasenotes/notes/scaleio-thin-provisioning-support-9c3b9203567771dd.yaml releasenotes/notes/scaleio-vxflexos-rebrand-27dfe2b82d35b6a2.yaml releasenotes/notes/scaleio-zeropadding-a0273c56c4d14fca.yaml releasenotes/notes/scaling-backup-service-7e5058802d2fb3dc.yaml releasenotes/notes/seagate-new-driver-d420fad549e9045f.yaml releasenotes/notes/service_dynamic_log_change-55147d288be903f1.yaml releasenotes/notes/sf-add-migration-support-691ace064d7576e9.yaml releasenotes/notes/sf-fix-clone-and-request-timeout-issues-56f7a7659c7ec775.yaml releasenotes/notes/sf-fix-duplicate-volume-request-lost-adefacda1298dc62.yaml releasenotes/notes/sf-fix-error-on-cluster-rebalancing-515bf41104cd181a.yaml releasenotes/notes/shared-backend-config-d841b806354ad5be.yaml releasenotes/notes/sheepdog-driver-removal-b63d12460e886c33.yaml releasenotes/notes/sheepdog-mark-unsupported-648b2458d4a198de.yaml releasenotes/notes/show-provider-id-for-admin-ff4fd5a2518a4bfa.yaml releasenotes/notes/slow-get-volume-stats-91b84c6e661dc605.yaml releasenotes/notes/slug-b6a0fc3db0a2dd45.yaml releasenotes/notes/slug-qnap-driver-d4465ea6009c66df.yaml releasenotes/notes/smbfs-drop-alloc-data-file-8b94da952a3b1548.yaml releasenotes/notes/smbfs-fixed-image-9b642b63fcb79c18.yaml releasenotes/notes/smbfs-manage-unmanage-f1502781dd5f82cb.yaml releasenotes/notes/smbfs-pools-support-bc43c653cfb1a34f.yaml releasenotes/notes/smbfs-removed-options-2c86101340273252.yaml releasenotes/notes/smbfs-revert-snapshot-5b265ed5ded951dc.yaml releasenotes/notes/smbfs-snapshot-attach-14742fe8f5864ac6.yaml releasenotes/notes/snapshot-in-use-without-force-86c6d74ebc9c0d60.yaml releasenotes/notes/snapshot_backing_up_status_support-164fbbb2a564e137.yaml releasenotes/notes/solidfire-active-active-replication-support-f77e0e12320f8b21.yaml releasenotes/notes/solidfire-fix-retype-and-name-exception-on-migration-2af26f095b7cb345.yaml releasenotes/notes/solidfire-no-attach-metadata-b17729ebd34703db.yaml releasenotes/notes/solidfire-scaled-qos-9b8632453909e2db.yaml releasenotes/notes/solidfire-v2.1-replication-570a1f12f70e67b4.yaml releasenotes/notes/spdk-add-config-parameter-39a880ec22956fd2.yaml releasenotes/notes/spdk-nvmf-target-31e4d4dd5e2f2114.yaml releasenotes/notes/spdk-volume-081f6e72396b30e8.yaml releasenotes/notes/speed-up-starting-cinder-backup-76c1618b4cdb9d6e.yaml releasenotes/notes/split-out-nested-quota-driver-e9493f478d2b8be5.yaml releasenotes/notes/ssl-cert-fix-42e8f263c15d5343.yaml releasenotes/notes/store-volume-format-info-1e17e029a9a9e578.yaml releasenotes/notes/storpool-clone-across-pools-b3f7923dee35503a.yaml releasenotes/notes/storpool-clone-better-dca90f40c9273de9.yaml releasenotes/notes/storpool-move-api-and-config-code-in-tree-92cfe30690b78ef1.yaml releasenotes/notes/storpool-multiattach-f9b7edccd4db7e02.yaml releasenotes/notes/storpool-revert-to-snapshot-a202358ee16ecb62.yaml releasenotes/notes/storpool-volume-driver-4d5f16ad9c2f373a.yaml releasenotes/notes/storwize-backup-snapshot-support-728e18dfa0d42943.yaml releasenotes/notes/storwize-cg-replication-b038ff0d39fe909f.yaml releasenotes/notes/storwize-disable-create-volume-with-non-cgsnap-group-6cba8073e3d6cadd.yaml releasenotes/notes/storwize-dr-pool-support-52db3a95e54aef88.yaml releasenotes/notes/storwize-generic-volume-group-74495fa23e059bf9.yaml releasenotes/notes/storwize-gmcv-support-8aceee3f40eddb9f.yaml releasenotes/notes/storwize-hyperswap-host-site-update-621e763768fab9ee.yaml releasenotes/notes/storwize-hyperswap-support-b830182e1058cb4f.yaml releasenotes/notes/storwize-multiple-management-ip-1cd364d63879d9b8.yaml releasenotes/notes/storwize-pool-aware-support-7a40c9934642b202.yaml releasenotes/notes/storwize-retain-aux-volme-f90fa6fde657d64f.yaml releasenotes/notes/storwize-revert-snapshot-681c76d68676558a.yaml releasenotes/notes/storwize-split-up-__init__-153fa8f097a81e37.yaml releasenotes/notes/storwize_iscsi_multipath_enhance-9de9dc29661869cd.yaml releasenotes/notes/support-az-in-volumetype-8yt6fg67de3976ty.yaml releasenotes/notes/support-cg-2b55da0bd9f69c7d.yaml releasenotes/notes/support-create-volume-from-backup-d363e2b502a76dc2.yaml releasenotes/notes/support-extend-inuse-volume-9e4atf8912qaye99.yaml releasenotes/notes/support-glance-multiple-stores-79d11c5344f41446.yaml releasenotes/notes/support-huawei-consistency-group-b666f8f6c6cddd8f.yaml releasenotes/notes/support-image-signature-verification-yu8qub7286et9dh4.yaml releasenotes/notes/support-images-api-2.11-3699b20670db1843.yaml releasenotes/notes/support-incremental-backup-completion-in-rbd-1f2165fefcc470d1.yaml releasenotes/notes/support-metadata-based-snapshot-list-filtering-6e6df68a7ce981f5.yaml releasenotes/notes/support-metadata-for-backup-3d8753f67e2934fa.yaml releasenotes/notes/support-modern-compression-9984f77bb12e97e0.yaml releasenotes/notes/support-project-id-filter-for-limit-bc5d49e239baee2a.yaml releasenotes/notes/support-retype-operation-for-global-mirror-volumes-e7091ac130e41cbd.yaml releasenotes/notes/support-tenants-project-in-attachment-list-3edd8g138a28s4r8.yaml releasenotes/notes/support-to-query-cinder-resources-filter-by-update-at-and-created-at-32ae9aaea131d598.yaml releasenotes/notes/support-volume-glance-metadata-query-866b9e3beda2cd55.yaml releasenotes/notes/support-zeromq-messaging-driver-d26a1141290f5548.yaml releasenotes/notes/support-zmq-messaging-41085787156fbda1.yaml releasenotes/notes/support_deferred_deletion_in_RBD-0c5d96f8eac5b50a.yaml releasenotes/notes/support_sort_backup_by_name-0b080bcb60c0eaa0.yaml releasenotes/notes/support_sort_snapshot_with_name-7b66a2d8e587275d.yaml releasenotes/notes/supported-drivers-9c95dd2378cd308d.yaml releasenotes/notes/svf-revert-to-snapshot-globalmirror-volume-e70fdb9115020283.yaml releasenotes/notes/switch-to-alembic-2bbe27749fde70ff.yaml releasenotes/notes/sync-bump-versions-a1e6f6359173892e.yaml releasenotes/notes/synchronous-mirror-support-for-netapp-backends-3cece6d56fec332c.yaml releasenotes/notes/synology-support-uc-model-9cda442828c2eb32.yaml releasenotes/notes/synology-volume-driver-c5e0f655b04390ce.yaml releasenotes/notes/tintri-driver-removal-91a0931c417481d2.yaml releasenotes/notes/tintri_image_direct_clone-f73e561985aad867.yaml releasenotes/notes/tooz-coordination-heartbeat-cfac1064fd7878be.yaml releasenotes/notes/tooz-locks-0f9f2cc15f8dad5a.yaml releasenotes/notes/toyou-netstor-storage-acs5000-fc-driver-f0d7428924bfeda1.yaml releasenotes/notes/toyou-netstor-storage-tyds-iscsi-driver-798da24653d8cd0d.yaml releasenotes/notes/tpool-size-11121f78df24db39.yaml releasenotes/notes/transfer-encrypted-volume-2f040a6993435e79.yaml releasenotes/notes/transfer-snapshots-with-volume-a7763570a807c742.yaml releasenotes/notes/type-extra-spec-policies-b7742b0ac2732864.yaml releasenotes/notes/unisphere-for-powermax-10-support-637dfde0f8fa9862.yaml releasenotes/notes/unity-backup-via-snapshot-81a2d5a118c97042.yaml releasenotes/notes/unity-compressed-volume-support-4998dee84534a324.yaml releasenotes/notes/unity-enable-ssl-14db2497225c4395.yaml releasenotes/notes/unity-fast-clone-02ae88ba8fdef145.yaml releasenotes/notes/unity-force-detach-7c89e72105f9de61.yaml releasenotes/notes/unity-group-replication-support-97d74275a84b06af.yaml releasenotes/notes/unity-multiattach-support-993b997e522d9e84.yaml releasenotes/notes/unity-remove-empty-host-17d567dbb6738e4e.yaml releasenotes/notes/unity-replication-support-2ab121a5ea5a2ade.yaml releasenotes/notes/unity-return-logged-out-initiator-6ab1f96f21bb284c.yaml releasenotes/notes/unity-retype-volume-support-773ae17b8811fb3f.yaml releasenotes/notes/unity-storage-assisted-migration-support-145fce87f36f1ecc.yaml releasenotes/notes/unity-thick-support-fdbef833f2b4d54f.yaml releasenotes/notes/unity-volume-tiering-policy-support-c6d0aaff4b141bd3.yaml releasenotes/notes/unsupport-prophetstor-bfbc674fd86303db.yaml releasenotes/notes/unsupport-veritas-access-ecfb4122770d93f9.yaml releasenotes/notes/untyped_to_default_type-0068e6bc8000986c.yaml releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml releasenotes/notes/update_config_options_disco_volume_driver-07e52aa43e83c243.yaml releasenotes/notes/updated-at-list-0f899098f7258331.yaml releasenotes/notes/upgrade-checks-e58c4a81c857847d.yaml releasenotes/notes/upload-volume-to-multiple-stores-ab130774897e41c3.yaml releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml releasenotes/notes/use-glance-v2-api-and-deprecate-glance_api_version-1a3b698429cb754e.yaml releasenotes/notes/use-multipath-everywhere-3707593eebdaf9eb.yaml releasenotes/notes/use-oslo_middleware_sizelimit-5f171cf1c44444f8.yaml releasenotes/notes/user-visible-extra-specs-6cf7e49c6be57a01.yaml releasenotes/notes/ussuri-release-prelude-ceeb11bc7fe46191.yaml releasenotes/notes/ussuri-unsupported-drivers-not-removed-8cfb3e01b720e9d1.yaml releasenotes/notes/validate-expired-user-tokens-40b15322197653ae.yaml releasenotes/notes/validate_vol_create_uuids-4f08b4ef201385f6.yaml releasenotes/notes/verbose-online-migrations-94fb7e8a85cdbc10.yaml releasenotes/notes/verify-dorado-luntype-for-huawei-driver-4fc2f4cca3141bb3.yaml releasenotes/notes/veritas_access_driver-c73b2320ba9f46a8.yaml releasenotes/notes/veritas_access_iscsi_driver-de642dad9e7d0890.yaml releasenotes/notes/veritas_cluster_nfs_unsupported-88ab3ea5cbb6cd88.yaml releasenotes/notes/vhd-disk-format-upload-to-image-5851f9d35f4ee447.yaml releasenotes/notes/victoria-release-prelude-0fbae229ebe8f9b7.yaml releasenotes/notes/virtuozzo-unsupported-645b47dceb0ddbd2.yaml releasenotes/notes/vmax-attach-snapshot-3137e59ab4ff39a4.yaml releasenotes/notes/vmax-clone-cg-09fce492931c957f.yaml releasenotes/notes/vmax-compression-support-1dfe463328b56d7f.yaml releasenotes/notes/vmax-deprecate-backend-xml-708a41919bcc55a8.yaml releasenotes/notes/vmax-driver-multiattach-support-43a7f99cd2d742ee.yaml releasenotes/notes/vmax-failover-unisphere-2de78d1f76b5f836.yaml releasenotes/notes/vmax-generic-volume-group-28b3b2674c492bbc.yaml releasenotes/notes/vmax-iscsi-chap-authentication-e47fcfe310b85f7b.yaml releasenotes/notes/vmax-iscsi-multipath-76cc09bacf4fdfbf.yaml releasenotes/notes/vmax-list-manageable-vols-snaps-6a7f5aa114fae8f3.yaml releasenotes/notes/vmax-manage-unmanage-snapshot-3805c4ac64b8133a.yaml releasenotes/notes/vmax-metadata-ac9bdd31e7e561c3.yaml releasenotes/notes/vmax-oversubscription-d61d0e3b1df2487a.yaml releasenotes/notes/vmax-powermax-rebrand-70569fc8cdf40a8c.yaml releasenotes/notes/vmax-qos-eb40ed35bd2f457d.yaml releasenotes/notes/vmax-rename-dell-emc-f9ebfb9eb567f427.yaml releasenotes/notes/vmax-replication-enhancements-c3bec80a3abb6d2e.yaml releasenotes/notes/vmax-replication-enhancements2-0ba03224cfca9959.yaml releasenotes/notes/vmax-replication-group-2f65ed92d761f90d.yaml releasenotes/notes/vmax-rest-94e48bed6f9c134c.yaml releasenotes/notes/vmax-rest-compression-10c2590052a9465e.yaml releasenotes/notes/vmax-rest-livemigration-885dd8731d5a8a88.yaml releasenotes/notes/vmax-rest-qos-6bb4073b92c932c6.yaml releasenotes/notes/vmax-rest-replication-612fcfd136cc076e.yaml releasenotes/notes/vmax-rest-retype-ceba5e8d04f637b4.yaml releasenotes/notes/vmax-retype-replicated-volumes-325be6e5fd626819.yaml releasenotes/notes/vmax-revert-volume-to-snapshot-b4a837d84a8b2a85.yaml releasenotes/notes/vmax-volume-migration-992c8c68e2207bbc.yaml releasenotes/notes/vmdk_backup_restore-41f807b7bc8e0ae8.yaml releasenotes/notes/vmdk_config_conn_pool_size-0658c497e118533f.yaml releasenotes/notes/vmdk_default_task_poll_interval-665f032bebfca39e.yaml releasenotes/notes/vmdk_image_ova-d3b3a0e72221110c.yaml releasenotes/notes/vmdk_vc_51-df29eeb5fc93fbb1.yaml releasenotes/notes/vmem-7000-iscsi-3c8683dcc1f0b9b4.yaml releasenotes/notes/vmware-vmdk-config-eb70892e4ccf8f3c.yaml releasenotes/notes/vmware-vmdk-manage-existing-0edc20d9d4d19172.yaml releasenotes/notes/vmware-vmdk-removed-bfb04eed77b95fdf.yaml releasenotes/notes/vmware-vmdk-revert-to-snapshot-ee3d638565649f44.yaml releasenotes/notes/vmware-vmdk-snapshot-template-d3dcfc0906c02edd.yaml releasenotes/notes/vmware_adapter_type-66164bc3857f244f.yaml releasenotes/notes/vmware_enable_volume_stats-1ef84e170187f0fa.yaml releasenotes/notes/vmware_fcd_retype-979418c39fd5d59d.yaml releasenotes/notes/vmware_fcd_snapshot-b702f7e950dfbe7a.yaml releasenotes/notes/vmware_fcd_storage_policy-636d6a95f1c44b6e.yaml releasenotes/notes/vmware_lazy_create-52f52f71105d2067.yaml releasenotes/notes/vmware_retype_adapter_type-dbd8935b8d3bcb1b.yaml releasenotes/notes/vmware_vmdk_datastore_regex-fe7b68ad69ef7384.yaml releasenotes/notes/vmware_vmdk_default_adapter_type-8e247bce5b229c7a.yaml releasenotes/notes/vmware_vmdk_enforce_vc_55-7e1b3ede9bf2129b.yaml releasenotes/notes/vmware_vmdk_managed_by-3de05504d0f9a65a.yaml releasenotes/notes/vmware_vmdk_nfs41-450908bbbc9eea6d.yaml releasenotes/notes/vmware_vmdk_paravirtual-3d5eeef96dcbcfb7.yaml releasenotes/notes/vmware_vmdk_storage_profile_config-fa3784f1ed50df9e.yaml releasenotes/notes/vnx-add-async-migrate-option-0734164feeaecadc.yaml releasenotes/notes/vnx-add-force-detach-support-26f215e6f70cc03b.yaml releasenotes/notes/vnx-async-migration-support-3c449139bb264004.yaml releasenotes/notes/vnx-configurable-migration-rate-5e0a2235777c314f.yaml releasenotes/notes/vnx-fail-delete-lun-due-to-tmp-snapshot-edd3cdd85e28be60.yaml releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml releasenotes/notes/vnx-perf-optimize-bd55dc3ef7584228.yaml releasenotes/notes/vnx-qos-support-7057196782e2c388.yaml releasenotes/notes/vnx-replication-group-2ebf04c80e2171f7.yaml releasenotes/notes/vnx-replication-v2.1-4d89935547183cc9.yaml releasenotes/notes/vnx-repv2.1-config-update-cc2f60c20aec88dd.yaml releasenotes/notes/vnx-revert-to-snapshot-e5494b6fb5ad5a1e.yaml releasenotes/notes/vnx-update-sg-in-cache-3ecb673727bea79b.yaml releasenotes/notes/vnx_clone_cg-db74ee2ea71bedcb.yaml releasenotes/notes/volume-filtering-for-quoted-display-name-7f5e8ac888a73001.yaml releasenotes/notes/volume-migrate-create-delete-notification-f567cae5522852ec.yaml releasenotes/notes/volume-type-encryption-api-policy-base-4334ca94d73df238.yaml releasenotes/notes/volume-type-encryption-api-policy-granularity-7071e45f4c7894c5.yaml releasenotes/notes/volume_init_max_objects_retrieval-966f607c46190946.yaml releasenotes/notes/volumes-summary-6b2485f339c88a91.yaml releasenotes/notes/vrts_hyperscale_driver-5b63ab706ea8ae89.yaml releasenotes/notes/vxflexos-3.5.x-support-403427dc65a7a4f6.yaml releasenotes/notes/vxflexos-migration-support-a04a73cda323b382.yaml releasenotes/notes/vxflexos-powerflex-rebrand-37dfe2b82d35b6a2.yaml releasenotes/notes/vxflexos-replication-support-f43e62df35e16e3a.yaml releasenotes/notes/vxflexos-revert-to-snapshot-a90c40ec476cc2bd.yaml releasenotes/notes/vxflexos-support-compression-9139e556677ac093.yaml releasenotes/notes/vxflexos_drop_deprecated_opt-3231a222e458fa92.yaml releasenotes/notes/vzstorage-log-path-7539342e562a2e4a.yaml releasenotes/notes/vzstorage-volume-format-cde85d3ad02f6bb4.yaml releasenotes/notes/win-iscsi-config-portals-51895294228d7883.yaml releasenotes/notes/windows-multiple-backends-9aa83631ad3d42f2.yaml releasenotes/notes/windows-volume-backup-b328858a20f5a499.yaml releasenotes/notes/xena-policy-changes-7a563020337f6be9.yaml releasenotes/notes/xena-release-prelude-2190d8c515dbedc1.yaml releasenotes/notes/xiv-ds8k-replication-2.1-996c871391152e31.yaml releasenotes/notes/xiv-generic-volume-group-4609cdc86d6aaf81.yaml releasenotes/notes/xiv-new-qos-independent-type-58885c77efe24798.yaml releasenotes/notes/xiv-replication-group-7ca437c90f2474a7.yaml releasenotes/notes/xtremio-cg-from-cg-e05cf286e3a1e943.yaml releasenotes/notes/xtremio-generic-groups-912e11525573e970.yaml releasenotes/notes/xtremio-ig-cleanup-bbb4bee1f1e3611c.yaml releasenotes/notes/xtremio-manage-snapshot-5737d3ad37df81d1.yaml releasenotes/notes/xtremio-support-multiattache-20b1882a1216a8b2.yaml releasenotes/notes/yoga-release-prelude-9fc369cf00df1a48.yaml releasenotes/notes/zfssa-iscsi-get-manageable-volumes-eb23a11570c813d7.yaml releasenotes/notes/zfssa-iscsi-multi-connect-3be99ee84660a280.yaml releasenotes/notes/zfssa-volume-manage-unmanage-ccd80807103b69c8.yaml releasenotes/notes/zte_cinder_driver-76ba6d034e1b6f65.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/2024.2.rst releasenotes/source/2025.1.rst releasenotes/source/README.txt releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po roles/configure-run-migration-tests/defaults/main.yaml roles/configure-run-migration-tests/tasks/main.yaml roles/save-cinder-migration-results/defaults/main.yaml roles/save-cinder-migration-results/tasks/main.yaml roles/save-cinder-migration-results/templates/migration_results_reporter.py.j2 tools/check_exec.py tools/coding-checks.sh tools/fast8.sh tools/generate_driver_list.py tools/mypywrap.sh tools/test-setup.sh tools/config/__init__.py tools/config/check_uptodate.sh tools/config/cinder-config-generator.conf tools/config/cinder-policy-generator.conf tools/config/generate_cinder_opts.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315576.0 cinder-27.0.0/cinder.egg-info/dependency_links.txt0000664000175000017500000000000100000000000022073 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315576.0 cinder-27.0.0/cinder.egg-info/entry_points.txt0000664000175000017500000000354600000000000021333 0ustar00zuulzuul00000000000000[cinder.scheduler.filters] AvailabilityZoneFilter = cinder.scheduler.filters.availability_zone_filter:AvailabilityZoneFilter CapabilitiesFilter = cinder.scheduler.filters.capabilities_filter:CapabilitiesFilter CapacityFilter = cinder.scheduler.filters.capacity_filter:CapacityFilter DifferentBackendFilter = cinder.scheduler.filters.affinity_filter:DifferentBackendFilter DriverFilter = cinder.scheduler.filters.driver_filter:DriverFilter InstanceLocalityFilter = cinder.scheduler.filters.instance_locality_filter:InstanceLocalityFilter JsonFilter = cinder.scheduler.filters.json_filter:JsonFilter RetryFilter = cinder.scheduler.filters.ignore_attempted_hosts_filter:IgnoreAttemptedHostsFilter SameBackendFilter = cinder.scheduler.filters.affinity_filter:SameBackendFilter [cinder.scheduler.weights] AllocatedCapacityWeigher = cinder.scheduler.weights.capacity:AllocatedCapacityWeigher CapacityWeigher = cinder.scheduler.weights.capacity:CapacityWeigher ChanceWeigher = cinder.scheduler.weights.chance:ChanceWeigher GoodnessWeigher = cinder.scheduler.weights.goodness:GoodnessWeigher VolumeNumberWeigher = cinder.scheduler.weights.volume_number:VolumeNumberWeigher [console_scripts] cinder-api = cinder.cmd.api:main cinder-backup = cinder.cmd.backup:main cinder-manage = cinder.cmd.manage:main cinder-rootwrap = oslo_rootwrap.cmd:main cinder-rtstool = cinder.cmd.rtstool:main cinder-scheduler = cinder.cmd.scheduler:main cinder-status = cinder.cmd.status:main cinder-volume = cinder.cmd.volume:main cinder-volume-usage-audit = cinder.cmd.volume_usage_audit:main [oslo.config.opts] cinder = cinder.opts:list_opts [oslo.config.opts.defaults] cinder = cinder.common.config:set_external_library_defaults [oslo.policy.enforcer] cinder = cinder.policy:get_enforcer [oslo.policy.policies] cinder = cinder.policies:list_rules [wsgi_scripts] cinder-wsgi = cinder.wsgi.wsgi:initialize_application ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315576.0 cinder-27.0.0/cinder.egg-info/not-zip-safe0000664000175000017500000000000100000000000020253 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315576.0 cinder-27.0.0/cinder.egg-info/pbr.json0000664000175000017500000000006000000000000017477 0ustar00zuulzuul00000000000000{"git_version": "c36f40684", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315576.0 cinder-27.0.0/cinder.egg-info/requires.txt0000664000175000017500000000421400000000000020426 0ustar00zuulzuul00000000000000pbr>=5.8.0 decorator>=4.4.2 eventlet!=0.32.0,>=0.30.1 greenlet>=0.4.16 iso8601>=0.1.12 jsonschema>=3.2.0 keystoneauth1>=4.2.1 keystonemiddleware>=9.1.0 lxml>=4.5.2 oslo.config>=8.3.2 oslo.concurrency>=4.5.0 oslo.context>=3.4.0 oslo.db>=11.0.0 oslo.log>=4.6.1 oslo.messaging>=14.6.0 oslo.middleware>=4.1.1 oslo.policy>=4.5.0 oslo.privsep>=2.6.2 oslo.reports>=3.2.0 oslo.rootwrap>=6.2.0 oslo.serialization>=4.2.0 oslo.service>=2.8.0 oslo.upgradecheck>=1.1.1 oslo.utils>=6.0.0 oslo.versionedobjects>=2.4.0 osprofiler>=3.4.0 packaging>=20.4 paramiko>=2.7.2 Paste>=3.4.3 PasteDeploy>=2.1.0 pyparsing>=2.4.7 python-barbicanclient>=5.0.1 python-glanceclient>=3.2.2 python-keystoneclient>=4.1.1 python-novaclient>=18.2.0 python-swiftclient>=3.10.1 requests>=2.25.1 Routes>=2.4.1 taskflow>=4.5.0 rtslib-fb>=2.1.74 SQLAlchemy>=1.4.23 stevedore>=3.2.2 tabulate>=0.8.7 tenacity>=6.3.1 WebOb>=1.8.6 oslo.i18n>=5.1.0 oslo.vmware>=3.10.0 os-brick>=6.10.0 os-win>=5.5.0 tooz>=2.8.0 google-api-python-client>=1.11.0 castellan>=3.7.0 cryptography>=3.1 cursive>=0.2.2 zstd>=1.4.5.1 boto3>=1.18.49 distro>=1.8.0 tzdata>=2022.4 [all] websocket-client>=1.3.2 pyOpenSSL>=17.5.0 storops>=0.5.10 pywbem>=0.7.0 python-3parclient>=4.2.10 krest>=1.3.0 infinisdk>=103.0.1 py-pure-client>=1.47.0 rsd-lib>=1.1.0 storpool>=7.1.0 storpool.spopenstack>=2.2.1 dfs-sdk>=1.2.25 rbd-iscsi-client>=0.1.8 python-linstor>=1.7.0 psutil>=5.7.2 [datacore] websocket-client>=1.3.2 [datera] dfs-sdk>=1.2.25 [ds8k] pyOpenSSL>=17.5.0 [fujitsu] pywbem>=0.7.0 [hpe3par] python-3parclient>=4.2.10 [infinidat] infinisdk>=103.0.1 [kaminario] krest>=1.3.0 [linstor] python-linstor>=1.7.0 [powermax] pyOpenSSL>=17.5.0 [pure] py-pure-client>=1.47.0 [quobyte] psutil>=5.7.2 [rbd_iscsi] rbd-iscsi-client>=0.1.8 [rsd] rsd-lib>=1.1.0 [storpool] storpool>=7.1.0 storpool.spopenstack>=2.2.1 [test] hacking<7.1.0,>=7.0.0 flake8-import-order<0.19.0 flake8-logging-format>=0.6.0 stestr>=3.2.1 coverage>=5.5 ddt>=1.4.4 fixtures>=3.0.0 oslotest>=4.5.0 PyMySQL>=0.10.0 psycopg2-binary>=2.8.5 SQLAlchemy-Utils>=0.37.8 testtools>=2.4.0 doc8>=0.8.1 mypy<1.18.0,>=1.7.0 moto>=5.0.0 distro>=1.8.0 [unity] storops>=0.5.10 [vnx] storops>=0.5.10 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315576.0 cinder-27.0.0/cinder.egg-info/top_level.txt0000664000175000017500000000000700000000000020554 0ustar00zuulzuul00000000000000cinder ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4231215 cinder-27.0.0/doc/0000775000175000017500000000000000000000000013634 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/.gitignore0000664000175000017500000000006100000000000015621 0ustar00zuulzuul00000000000000_build/* source/contributor/api/* .autogenerated ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/README.rst0000664000175000017500000000154100000000000015324 0ustar00zuulzuul00000000000000======================= Cinder Development Docs ======================= Files under this directory tree are used for generating the documentation for the Cinder source code. Developer documentation is built to: https://docs.openstack.org/cinder/latest/ Tools ===== Sphinx The Python Sphinx package is used to generate the documentation output. Information on Sphinx, including formatting information for RST source files, can be found in the `Sphinx online documentation `_. Graphviz Some of the diagrams are generated using the ``dot`` language from Graphviz. See the `Graphviz documentation `_ for Graphviz and dot language usage information. Building Documentation ====================== Doc builds are performed using tox with the ``docs`` target:: % cd .. % tox -e docs ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4231215 cinder-27.0.0/doc/ext/0000775000175000017500000000000000000000000014434 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/ext/__init__.py0000664000175000017500000000000000000000000016533 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/ext/cinder_driverlist.py0000664000175000017500000000162500000000000020525 0ustar00zuulzuul00000000000000# Copyright 2016 Dell Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from cinder import utils import sys def setup(app): print('** Generating driver list...') rv = utils.execute(sys.executable, './tools/generate_driver_list.py', ['docs']) print(rv[0]) return { 'parallel_read_safe': True, 'parallel_write_safe': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/ext/driver_opts.py0000664000175000017500000001226500000000000017354 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Sphinx extension to be able to extract driver config options from code.""" import importlib from docutils import nodes from docutils.parsers import rst from docutils.parsers.rst import directives from docutils import statemachine as sm from sphinx.util import logging from oslo_config import cfg LOG = logging.getLogger(__name__) class ConfigTableDirective(rst.Directive): """Directive to extract config options into docs output.""" option_spec = { 'table-title': directives.unchanged, 'config-target': directives.unchanged, 'exclude-list': directives.unchanged, 'exclusive-list': directives.unchanged, } has_content = True def _doc_module(self, module, filters, exclusive): """Extract config options from module.""" options = [] try: mod = importlib.import_module(module) for prop in dir(mod): # exclusive-list overrides others if exclusive and prop not in exclusive: continue if prop in filters: continue thing = getattr(mod, prop) if isinstance(thing, cfg.Opt) and thing not in options: # An individual config option options.append(thing) elif (isinstance(thing, list) and len(thing) > 0 and isinstance(thing[0], cfg.Opt)): # A list of config opts options.extend(thing) except Exception as e: self.error('Unable to import {}: {}'.format(module, e)) return options def _get_default(self, opt): """Tries to pick the best text to use as the default.""" if hasattr(opt, 'sample_default') and opt.sample_default: return opt.sample_default if type(opt.default) == list: return "[%s]" % ', '.join(str(item) for item in opt.default) result = str(opt.default) if not result: result = '<>' return result def run(self): """Load and find config options to document.""" modules = [c.strip() for c in self.content if c.strip()] if not modules: raise self.error('No modules provided to document.') env = self.state.document.settings.env app = env.app result = sm.ViewList() source = '<{}>'.format(__name__) target = self.options.get('config-target', '') title = self.options.get( 'table-title', 'Description of {} configuration options'.format(target)) # See if there are option sets that need to be ignored exclude = self.options.get('exclude-list', '') exclude_list = [e.strip() for e in exclude.split(',') if e.strip()] exclusive = self.options.get('exclusive-list', '') exclusive_list = [e.strip() for e in exclusive.split(',') if e.strip()] result.append('.. _{}:'.format(title.replace(' ', '-')), source) result.append('', source) result.append('.. list-table:: {}'.format(title), source) result.append(' :header-rows: 1', source) result.append(' :class: config-ref-table', source) result.append('', source) result.append(' * - Configuration option = Default value', source) result.append(' - Description', source) options = [] for module in modules: retval = self._doc_module(module, exclude_list, exclusive_list) if retval: options.extend(retval) else: LOG.info('[config-table] No options found in {}'.format( module)) # Get options sorted alphabetically but with deprecated options last list.sort(options, key=lambda opt: opt.name) list.sort(options, key=lambda opt: opt.deprecated_for_removal) for opt in options: result.append( ' * - ``{}`` = ``{}``'.format( opt.name, self._get_default(opt)), source) result.append( ' - ({}) {}{}'.format( opt.type, opt.help, ' **DEPRECATED**' if opt.deprecated_for_removal else ''), source) node = nodes.section() node.document = self.state.document self.state.nested_parse(result, 0, node) return node.children def setup(app): app.add_directive('config-table', ConfigTableDirective) return { 'parallel_read_safe': True, 'parallel_write_safe': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/requirements.txt0000664000175000017500000000075600000000000017130 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. openstackdocstheme>=2.2.7 # Apache-2.0 reno>=3.2.0 # Apache-2.0 sphinx>=3.5.1 # BSD Pygments>=2.6.1 # BSD license os-api-ref>=2.1.0 # Apache-2.0 sphinxcontrib-apidoc>=0.3.0 # BSD sphinx-feature-classification>=1.1.0 # Apache 2.0 # redirect tests in docs whereto>=0.4.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4231215 cinder-27.0.0/doc/source/0000775000175000017500000000000000000000000015134 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4231215 cinder-27.0.0/doc/source/_extra/0000775000175000017500000000000000000000000016416 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/_extra/.htaccess0000664000175000017500000000056700000000000020224 0ustar00zuulzuul00000000000000redirectmatch 301 ^/cinder/([^/]+)/man/cinder-manage.html$ /cinder/$1/cli/cinder-manage.html redirectmatch 301 ^/cinder/([^/]+)/upgrade.html$ /cinder/$1/admin/upgrades.html redirectmatch 301 ^/cinder/([^/]+)/admin/blockstorage-([^/]+).html$ /cinder/$1/admin/$2.html redirectmatch 301 ^/cinder/([^/]+)/admin/generalized_filters.html$ /cinder/$1/admin/generalized-filters.html ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4231215 cinder-27.0.0/doc/source/_static/0000775000175000017500000000000000000000000016562 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/_static/.placeholder0000664000175000017500000000000000000000000021033 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4351215 cinder-27.0.0/doc/source/admin/0000775000175000017500000000000000000000000016224 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/README.rst0000664000175000017500000000104200000000000017710 0ustar00zuulzuul00000000000000================================================== Cinder Administration Documentation (source/admin) ================================================== Introduction: ------------- This directory is intended to hold any documentation that relates to how to run or operate Cinder. Previously, this content was in the admin-guide section of openstack-manuals. The full spec for organization of documentation may be seen in the `OS Manuals Migration Spec `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/accelerate-image-compression.rst0000664000175000017500000000444300000000000024472 0ustar00zuulzuul00000000000000.. _accelerate_image_compression: ============================ Accelerate image compression ============================ A general framework to accommodate hardware compression accelerators for compression of volumes uploaded to the Image service (Glance) as images and decompression of compressed images used to create volumes is introduced in Train release. The only accelerator supported in this release is Intel QuickAssist Technology (QAT), which produces a compressed file in gzip format. Additionally, the framework provides software-based compression using GUNzip tool if a suitable hardware accelerator is not available. Because this software fallback could cause performance problems if the Cinder services are not deployed on sufficiently powerful nodes, the default setting is *not* to enable compression on image upload or download. The compressed image of a volume will be stored in the Image service (Glance) with the ``container_format`` image property of ``compressed``. See the `Image service documentation `_ for more information about this image container format. Configure image compression ~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable the image compression feature, set the following configuration option in the ``cinder.conf`` file: .. code-block:: ini allow_compression_on_image_upload = True By default it will be set to False, which means image compression is disabled. .. code-block:: ini compression_format = gzip This is to specify image compression format. The only supported format is ``gzip`` in Train release. System requirement ~~~~~~~~~~~~~~~~~~ In order to use this feature, there should be a hardware accelerator existing in system, otherwise no benefit will get from this feature. Regarding the two accelerators that supported, system should be configured as below: - ``Intel QuickAssist Technology (QAT)`` - This is the hardware accelerator from Intel. The driver of QAT should be installed, refer to https://01.org/intel-quickassist-technology. Also the compression library QATzip should be installed, refer to https://github.com/intel/QATzip. - ``GUNzip`` - The related package of ``GUNzip`` should be installed and the command ``gzip`` should be available. This is used as fallback when hardware accelerator is not available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/api-configuration.rst0000664000175000017500000000544500000000000022404 0ustar00zuulzuul00000000000000================= API Configuration ================= .. todo:: This needs to be expanded to include information on e.g. PasteDeploy. Rate limiting ------------- .. warning:: This is legacy functionality that is poorly tested and may be removed in the future. You may wish to enforce rate limiting through a proxy server instead. Cinder supports admin-configured API limits. These are disabled by default but can be configured by modifying :file:`api-paste.ini` to enabled the ``RateLimitingMiddleware`` middleware. For example, given the following composite application definitions in e.g. ``/etc/cinder/api-paste.ini``: .. code-block:: ini [composite:openstack_volume_api_v2] use = call:cinder.api.middleware.auth:pipeline_factory noauth = cors ... apiv2 keystone = cors ... apiv2 keystone_nolimit = cors ... apiv2 [composite:openstack_volume_api_v3] use = call:cinder.api.middleware.auth:pipeline_factory noauth = cors ... apiv3 keystone = cors ... apiv3 keystone_nolimit = cors ... apiv3 You can configure rate limiting by adding a new filter to call ``RateLimitingMiddleware`` and configure the composite applications to use this filter: .. code-block:: ini [composite:openstack_volume_api_v2] use = call:cinder.api.middleware.auth:pipeline_factory noauth = cors ... ratelimit apiv2 keystone = cors ... ratelimit apiv2 keystone_nolimit = cors ... ratelimit apiv2 [composite:openstack_volume_api_v3] use = call:cinder.api.middleware.auth:pipeline_factory noauth = cors ... ratelimit apiv3 keystone = cors ... ratelimit apiv3 keystone_nolimit = cors ... ratelimit apiv3 [filter:ratelimit] paste.filter_factory = cinder.api.v2.limits:RateLimitingMiddleware.factory Once configured, restart the :program:`cinder-api` service. Users can then view API limits using the ``openstack limits show --rate`` command. For example: .. code-block:: bash $ openstack limits show --rate +--------+-----------------+-------+--------+--------+---------------------+ | Verb | URI | Value | Remain | Unit | Next Available | +--------+-----------------+-------+--------+--------+---------------------+ | POST | * | 10 | 10 | MINUTE | 2021-03-23T12:36:09 | | PUT | * | 10 | 10 | MINUTE | 2021-03-23T12:36:09 | | DELETE | * | 100 | 100 | MINUTE | 2021-03-23T12:36:09 | | POST | */servers | 50 | 50 | DAY | 2021-03-23T12:36:09 | | GET | *changes-since* | 3 | 3 | MINUTE | 2021-03-23T12:36:09 | +--------+-----------------+-------+--------+--------+---------------------+ .. note:: Rate limits are entirely separate from absolute limits, which track resource utilization and can be seen using the ``openstack limits show --absolute`` command. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/api-throughput.rst0000664000175000017500000000271100000000000021737 0ustar00zuulzuul00000000000000============================================= Increase Block Storage API service throughput ============================================= By default, the Block Storage API service runs in one process. This limits the number of API requests that the Block Storage service can process at any given time. In a production environment, you should increase the Block Storage API throughput by allowing the Block Storage API service to run in as many processes as the machine capacity allows. .. note:: The Block Storage API service is named ``openstack-cinder-api`` on the following distributions: CentOS, Fedora, openSUSE, Red Hat Enterprise Linux, and SUSE Linux Enterprise. In Ubuntu and Debian distributions, the Block Storage API service is named ``cinder-api``. To do so, use the Block Storage API service option ``osapi_volume_workers``. This option allows you to specify the number of API service workers (or OS processes) to launch for the Block Storage API service. To configure this option, open the ``/etc/cinder/cinder.conf`` configuration file and set the ``osapi_volume_workers`` configuration key to the number of CPU cores/threads on a machine. On distributions that include ``openstack-config``, you can configure this by running the following command instead: .. code-block:: console # openstack-config --set /etc/cinder/cinder.conf \ DEFAULT osapi_volume_workers CORES Replace ``CORES`` with the number of CPU cores/threads on a machine. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/availability-zone-type.rst0000664000175000017500000000441400000000000023363 0ustar00zuulzuul00000000000000======================= Availability-zone types ======================= Background ---------- In a newly deployed region environment, the volume types (SSD, HDD or others) may only exist on part of the AZs, but end users have no idea which AZ is allowed for one specific volume type and they can't realize that only when the volume failed to be scheduled to backend. In this case, we have supported availability zone volume type in Rocky cycle which administrators can take advantage of to fix that. How to config availability zone types? -------------------------------------- We decided to use type's extra-specs to store this additional info, administrators can turn it on by updating volume type's key ``RESKEY:availability_zones`` as below:: "RESKEY:availability_zones": "az1,az2,az3" It's an array list whose items are separated by comma and stored in string. Once the availability zone type is configured, any UI component or client can filter out invalid volume types based on their choice of availability zone:: Request example: /v3/{project_id}/types?extra_specs={'RESKEY:availability_zones':'az1'} Remember, Cinder will always try inexact match for this spec value, for instance, when extra spec ``RESKEY:availability_zones`` is configured with value ``az1,az2``, both ``az1`` and ``az2`` are valid inputs for query, also this spec will not be used during performing capability filter, instead it will be only used for choosing suitable availability zones in these two cases below. 1. Create volume, within this feature, now we can specify availability zone via parameter ``availability_zone``, volume source (volume, snapshot, group), configuration option ``default_availability_zone`` and ``storage_availability_zone``. When creating new volume, Cinder will try to read the AZ(s) in the priority of:: source group > parameter availability_zone > source snapshot (or volume) > volume type > configuration default_availability_zone > storage_availability_zone If there is a conflict between any of them, 400 BadRequest will be raised, also now a AZ list instead of single AZ will be delivered to ``AvailabilityZoneFilter``. 2. Retype volume, this flow also has been updated, if new type has configured ``RESKEY:availability_zones`` Cinder scheduler will validate this as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/backup-disks.rst0000664000175000017500000002312500000000000021341 0ustar00zuulzuul00000000000000=================================== Back up Block Storage service disks =================================== While you can use the LVM snapshot to create snapshots, you can also use it to back up your volumes. By using LVM snapshot, you reduce the size of the backup; only existing data is backed up instead of the entire volume. To back up a volume, you must create a snapshot of it. An LVM snapshot is the exact copy of a logical volume, which contains data in a frozen state. This prevents data corruption because data cannot be manipulated during the volume creation process. Remember that the volumes created through an :command:`openstack volume create` command exist in an LVM logical volume. You must also make sure that the operating system is not using the volume and that all data has been flushed on the guest file systems. This usually means that those file systems have to be unmounted during the snapshot creation. They can be mounted again as soon as the logical volume snapshot has been created. Before you create the snapshot you must have enough space to save it. As a precaution, you should have at least twice as much space as the potential snapshot size. If insufficient space is available, the snapshot might become corrupted. For this example assume that a 100 GB volume named ``volume-00000001`` was created for an instance while only 4 GB are used. This example uses these commands to back up only those 4 GB: * :command:`lvm2` command. Directly manipulates the volumes. * :command:`kpartx` command. Discovers the partition table created inside the instance. * :command:`tar` command. Creates a minimum-sized backup. * :command:`sha1sum` command. Calculates the backup checksum to check its consistency. You can apply this process to volumes of any size. **To back up Block Storage service disks** #. Create a snapshot of a used volume * Use this command to list all volumes .. code-block:: console # lvdisplay * Create the snapshot; you can do this while the volume is attached to an instance: .. code-block:: console # lvcreate --size 10G --snapshot --name volume-00000001-snapshot \ /dev/cinder-volumes/volume-00000001 Use the ``--snapshot`` configuration option to tell LVM that you want a snapshot of an already existing volume. The command includes the size of the space reserved for the snapshot volume, the name of the snapshot, and the path of an already existing volume. Generally, this path is ``/dev/cinder-volumes/VOLUME_NAME``. The size does not have to be the same as the volume of the snapshot. The ``--size`` parameter defines the space that LVM reserves for the snapshot volume. As a precaution, the size should be the same as that of the original volume, even if the whole space is not currently used by the snapshot. * Run the :command:`lvdisplay` command again to verify the snapshot: .. code-block:: console --- Logical volume --- LV Name /dev/cinder-volumes/volume-00000001 VG Name cinder-volumes LV UUID gI8hta-p21U-IW2q-hRN1-nTzN-UC2G-dKbdKr LV Write Access read/write LV snapshot status source of /dev/cinder-volumes/volume-00000026-snap [active] LV Status available # open 1 LV Size 15,00 GiB Current LE 3840 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 251:13 --- Logical volume --- LV Name /dev/cinder-volumes/volume-00000001-snap VG Name cinder-volumes LV UUID HlW3Ep-g5I8-KGQb-IRvi-IRYU-lIKe-wE9zYr LV Write Access read/write LV snapshot status active destination for /dev/cinder-volumes/volume-00000026 LV Status available # open 0 LV Size 15,00 GiB Current LE 3840 COW-table size 10,00 GiB COW-table LE 2560 Allocated to snapshot 0,00% Snapshot chunk size 4,00 KiB Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 251:14 #. Partition table discovery * To exploit the snapshot with the :command:`tar` command, mount your partition on the Block Storage service server. The :command:`kpartx` utility discovers and maps table partitions. You can use it to view partitions that are created inside the instance. Without using the partitions created inside instances, you cannot see its content and create efficient backups. .. code-block:: console # kpartx -av /dev/cinder-volumes/volume-00000001-snapshot .. note:: On a Debian-based distribution, you can use the :command:`apt-get install kpartx` command to install :command:`kpartx`. If the tools successfully find and map the partition table, no errors are returned. * To check the partition table map, run this command: .. code-block:: console $ ls /dev/mapper/nova* You can see the ``cinder--volumes-volume--00000001--snapshot1`` partition. If you created more than one partition on that volume, you see several partitions; for example: ``cinder--volumes-volume--00000001--snapshot2``, ``cinder--volumes-volume--00000001--snapshot3``, and so on. * Mount your partition .. code-block:: console # mount /dev/mapper/cinder--volumes-volume--volume--00000001--snapshot1 /mnt If the partition mounts successfully, no errors are returned. You can directly access the data inside the instance. If a message prompts you for a partition or you cannot mount it, determine whether enough space was allocated for the snapshot or the :command:`kpartx` command failed to discover the partition table. Allocate more space to the snapshot and try the process again. #. Use the :command:`tar` command to create archives Create a backup of the volume: .. code-block:: console $ tar --exclude="lost+found" --exclude="some/data/to/exclude" -czf \ volume-00000001.tar.gz -C /mnt/ /backup/destination This command creates a ``tar.gz`` file that contains the data, *and data only*. This ensures that you do not waste space by backing up empty sectors. #. Checksum calculation You should always have the checksum for your backup files. When you transfer the same file over the network, you can run a checksum calculation to ensure that your file was not corrupted during its transfer. The checksum is a unique ID for a file. If the checksums are different, the file is corrupted. Run this command to run a checksum for your file and save the result to a file: .. code-block:: console $ sha1sum volume-00000001.tar.gz > volume-00000001.checksum .. note:: Use the :command:`sha1sum` command carefully because the time it takes to complete the calculation is directly proportional to the size of the file. Depending on your CPU, the process might take a long time for files larger than around 4 to 6 GB. #. After work cleaning Now that you have an efficient and consistent backup, use this command to clean up the file system: * Unmount the volume. .. code-block:: console $ umount /mnt * Delete the partition table. .. code-block:: console $ kpartx -dv /dev/cinder-volumes/volume-00000001-snapshot * Remove the snapshot. .. code-block:: console $ lvremove -f /dev/cinder-volumes/volume-00000001-snapshot Repeat these steps for all your volumes. #. Automate your backups Because more and more volumes might be allocated to your Block Storage service, you might want to automate your backups. The `SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh`_ script assists you with this task. The script performs the operations from the previous example, but also provides a mail report and runs the backup based on the ``backups_retention_days`` setting. Launch this script from the server that runs the Block Storage service. This example shows a mail report: .. code-block:: console Backup Start Time - 07/10 at 01:00:01 Current retention - 7 days The backup volume is mounted. Proceed... Removing old backups... : /BACKUPS/EBS-VOL/volume-00000019/volume-00000019_28_09_2011.tar.gz /BACKUPS/EBS-VOL/volume-00000019 - 0 h 1 m and 21 seconds. Size - 3,5G The backup volume is mounted. Proceed... Removing old backups... : /BACKUPS/EBS-VOL/volume-0000001a/volume-0000001a_28_09_2011.tar.gz /BACKUPS/EBS-VOL/volume-0000001a - 0 h 4 m and 15 seconds. Size - 6,9G --------------------------------------- Total backups size - 267G - Used space : 35% Total execution time - 1 h 75 m and 35 seconds The script also enables you to SSH to your instances and run a :command:`mysqldump` command into them. To make this work, enable the connection to the Compute project keys. If you do not want to run the :command:`mysqldump` command, you can add ``enable_mysql_dump=0`` to the script to turn off this functionality. .. Links .. _`SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh`: https://github.com/Razique/BashStuff/blob/master/SYSTEMS/OpenStack/SCR_5005_V01_NUAC-OPENSTACK-EBS-volumes-backup.sh ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/basic-volume-qos.rst0000664000175000017500000000527600000000000022156 0ustar00zuulzuul00000000000000=============================== Basic volume quality of service =============================== Basic volume QoS allows you to define hard performance limits for volumes on a per-volume basis. Performance parameters for attached volumes are controlled using volume types and associated extra-specs. As of the 13.0.0 Rocky release, Cinder supports the following options to control volume quality of service, the values of which should be fairly self-explanatory: For Fixed IOPS per volume. * `read_iops_sec` * `write_iops_sec` * `total_iops_sec` For Burst IOPS per volume. * `read_iops_sec_max` * `write_iops_sec_max` * `total_iops_sec_max` For Fixed bandwidth per volume. * `read_bytes_sec` * `write_bytes_sec` * `total_bytes_sec` For Burst bandwidth per volume. * `read_bytes_sec_max` * `write_bytes_sec_max` * `total_bytes_sec_max` For burst bucket size. * `size_iops_sec` Note that the `total_*` and `total_*_max` options for both iops and bytes cannot be used with the equivalent `read` and `write` values. For example, in order to create a QoS extra-spec with 20000 read IOPs and 10000 write IOPs, you might use the Cinder client in the following way: .. code-block:: console $ cinder qos-create high-iops consumer="front-end" \ read_iops_sec=20000 write_iops_sec=10000 +----------+--------------------------------------+ | Property | Value | +----------+--------------------------------------+ | consumer | front-end | | id | f448f61c-4238-4eef-a93a-2024253b8f75 | | name | high-iops | | specs | read_iops_sec : 20000 | | | write_iops_sec : 10000 | +----------+--------------------------------------+ The equivalent OpenStack client command would be: .. code-block:: console $ openstack volume qos create --consumer "front-end" \ --property "read_iops_sec=20000" \ --property "write_iops_sec=10000" \ high-iops Once this is done, you can associate this QoS with a volume type by using the `qos-associate` Cinder client command. .. code-block:: console $ cinder qos-associate QOS_ID VOLUME_TYPE_ID or using the `openstack volume qos associate` OpenStack client command. .. code-block:: console $ openstack volume qos associate QOS_ID VOLUME_TYPE_ID You can now create a new volume and attempt to attach it to a consumer such as Nova. If you login to the Nova compute host, you'll be able to see the assigned limits when checking the XML definition of the virtual machine with `virsh dumpxml`. .. note:: As of the Nova 18.0.0 Rocky release, front end QoS settings are only supported when using the libvirt driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/boot-from-volume.rst0000664000175000017500000000045000000000000022166 0ustar00zuulzuul00000000000000================ Boot from volume ================ In some cases, you can store and run instances from inside volumes. For information, see `Launch an instance from a volume`_. .. _`Launch an instance from a volume`: https://docs.openstack.org/nova/latest/user/launch-instance-from-volume.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/capacity-based-qos.rst0000664000175000017500000000677000000000000022441 0ustar00zuulzuul00000000000000================================= Capacity based quality of service ================================= In many environments, the performance of the storage system which Cinder manages scales with the storage space in the cluster. For example, a Ceph RBD cluster could have a capacity of 10,000 IOPs and 1000 GB storage. However, as the RBD cluster scales to 2000 GB, the IOPs scale to 20,000 IOPs. Basic QoS allows you to define hard limits for volumes, however, if you have a limit of 1000 IOPs for a volume and you have a user which creates 10x 1GB volumes with 1000 IOPs (in a cluster with 1000GB storage and 10,000 IOPs), you're not able to guarantee the quality of service without having to add extra capacity (which will go un-used). The inverse can be problematic, if a user creates a 1000GB volume with 1000 IOPs, leaving 9000 un-used IOPs. Capacity based quality of service allows you to multiply the quality of service values by the size of the volume, which will allow you to efficiently use the storage managed by Cinder. In some cases, it will 'force' the user to provision a larger volume than they need to get the IOPs they need, but that extra space would have gone un-used if they didn't use it in order to deliver the quality of service. There are currently 6 options to control capacity based quality of service which values should be fairly self explanatory: For dynamic IOPS per volume. * ``read_iops_sec_per_gb`` * ``write_iops_sec_per_gb`` * ``total_iops_sec_per_gb`` For dynamic bandwidth per volume. * ``read_bytes_sec_per_gb`` * ``write_bytes_sec_per_gb`` * ``total_bytes_sec_per_gb`` In addition, there are 6 more options which allow you to control the minimum possible value. This can be useful in cases where a user creates a volume that is very small and ends up with an unusable volume because of performance. For minimum IOPS per volume. * ``read_iops_sec_per_gb_min`` * ``write_iops_sec_per_gb_min`` * ``total_iops_sec_per_gb_min`` For minimum bandwidth per volume. * ``read_bytes_sec_per_gb_min`` * ``write_bytes_sec_per_gb_min`` * ``total_bytes_sec_per_gb_min`` Capacity based options might be used in conjunction with basic options, like ``*_sec_max``, in order to set upper limits for volumes. This may be useful for large volumes, which may consume all storage performance. For example, in order to create a QoS with 30 IOPs total writes per GB and a throughput of 1MB per GB, you might use the Cinder client in the following way: .. code-block:: console $ cinder qos-create high-iops consumer="front-end" \ total_iops_sec_per_gb=30 total_bytes_sec_per_gb=1048576 +----------+--------------------------------------+ | Property | Value | +----------+--------------------------------------+ | consumer | front-end | | id | f448f61c-4238-4eef-a93a-2024253b8f75 | | name | high-iops | | specs | total_iops_sec_per_gb : 30 | | | total_bytes_sec_per_gb : 1048576 | +----------+--------------------------------------+ Once this is done, you can associate this QoS with a volume type by using the ``qos-associate`` Cinder client command. .. code-block:: console $ cinder qos-associate You can now create a new volume and attempt to attach it to a consumer such as Nova. If you login to a Nova compute host, you'll be able to see the new calculated limits when checking the XML definition of the virtual machine with ``virsh dumpxml``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/consistency-groups.rst0000664000175000017500000002726400000000000022647 0ustar00zuulzuul00000000000000================== Consistency groups ================== Consistency group support is available in OpenStack Block Storage. The support is added for creating snapshots of consistency groups. This feature leverages the storage level consistency technology. It allows snapshots of multiple volumes in the same consistency group to be taken at the same point-in-time to ensure data consistency. The consistency group operations can be performed using the Block Storage command line. .. note:: The Consistency Group APIs have been deprecated since the Queens release. Use the Generic Volume Group APIs instead. The Consistency Group APIs are governed by the same policies as the Generic Volume Group APIs. For information about configuring cinder policies, see :ref:`policy-configuration`. Before using consistency groups, make sure the Block Storage driver that you are running has consistency group support by reading the Block Storage manual or consulting the driver maintainer. There are a small number of drivers that have implemented this feature. The default LVM driver does not support consistency groups yet because the consistency technology is not available at the storage level. The following consistency group operations are supported: - Create a consistency group, given volume types. .. note:: A consistency group can support more than one volume type. The scheduler is responsible for finding a back end that can support all given volume types. A consistency group can only contain volumes hosted by the same back end. A consistency group is empty upon its creation. Volumes need to be created and added to it later. - Show a consistency group. - List consistency groups. - Create a volume and add it to a consistency group, given volume type and consistency group id. - Create a snapshot for a consistency group. - Show a snapshot of a consistency group. - List consistency group snapshots. - Delete a snapshot of a consistency group. - Delete a consistency group. - Modify a consistency group. - Create a consistency group from the snapshot of another consistency group. - Create a consistency group from a source consistency group. The following operations are not allowed if a volume is in a consistency group: - Volume migration. - Volume retype. - Volume deletion. .. note:: A consistency group has to be deleted as a whole with all the volumes. The following operations are not allowed if a volume snapshot is in a consistency group snapshot: - Volume snapshot deletion. .. note:: A consistency group snapshot has to be deleted as a whole with all the volume snapshots. The details of consistency group operations are shown in the following. .. note:: Currently, no OpenStack client command is available to run in place of the cinder consistency group creation commands. Use the cinder commands detailed in the following examples. **Create a consistency group**: .. code-block:: console cinder consisgroup-create [--name name] [--description description] [--availability-zone availability-zone] volume-types .. note:: The parameter ``volume-types`` is required. It can be a list of names or UUIDs of volume types separated by commas without spaces in between. For example, ``volumetype1,volumetype2,volumetype3.``. .. code-block:: console $ cinder consisgroup-create --name bronzeCG2 volume_type_1 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | availability_zone | nova | | created_at | 2014-12-29T12:59:08.000000 | | description | None | | id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | | name | bronzeCG2 | | status | creating | +-------------------+--------------------------------------+ **Show a consistency group**: .. code-block:: console $ cinder consisgroup-show 1de80c27-3b2f-47a6-91a7-e867cbe36462 +-------------------+--------------------------------------+ | Property | Value | +-------------------+--------------------------------------+ | availability_zone | nova | | created_at | 2014-12-29T12:59:08.000000 | | description | None | | id | 2a6b2bda-1f43-42ce-9de8-249fa5cbae9a | | name | bronzeCG2 | | status | available | | volume_types | volume_type_1 | +-------------------+--------------------------------------+ **List consistency groups**: .. code-block:: console $ cinder consisgroup-list +--------------------------------------+-----------+-----------+ | ID | Status | Name | +--------------------------------------+-----------+-----------+ | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | available | bronzeCG2 | | 3a2b3c42-b612-479a-91eb-1ed45b7f2ad5 | error | bronzeCG | +--------------------------------------+-----------+-----------+ **Create a volume and add it to a consistency group**: .. note:: When creating a volume and adding it to a consistency group, a volume type and a consistency group id must be provided. This is because a consistency group can support more than one volume type. .. code-block:: console $ openstack volume create --type volume_type_1 --consistency-group \ 1de80c27-3b2f-47a6-91a7-e867cbe36462 --size 1 cgBronzeVol +---------------------------------------+--------------------------------------+ | Field | Value | +---------------------------------------+--------------------------------------+ | attachments | [] | | availability_zone | nova | | bootable | false | | consistencygroup_id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | | created_at | 2014-12-29T13:16:47.000000 | | description | None | | encrypted | False | | id | 5e6d1386-4592-489f-a56b-9394a81145fe | | metadata | {} | | name | cgBronzeVol | | os-vol-host-attr:host | server-1@backend-1#pool-1 | | os-vol-mig-status-attr:migstat | None | | os-vol-mig-status-attr:name_id | None | | os-vol-tenant-attr:tenant_id | 1349b21da2a046d8aa5379f0ed447bed | | os-volume-replication:driver_data | None | | os-volume-replication:extended_status | None | | replication_status | disabled | | size | 1 | | snapshot_id | None | | source_volid | None | | status | creating | | user_id | 93bdea12d3e04c4b86f9a9f172359859 | | volume_type | volume_type_1 | +---------------------------------------+--------------------------------------+ **Create a snapshot for a consistency group**: .. code-block:: console $ cinder cgsnapshot-create 1de80c27-3b2f-47a6-91a7-e867cbe36462 +---------------------+--------------------------------------+ | Property | Value | +---------------------+--------------------------------------+ | consistencygroup_id | 1de80c27-3b2f-47a6-91a7-e867cbe36462 | | created_at | 2014-12-29T13:19:44.000000 | | description | None | | id | d4aff465-f50c-40b3-b088-83feb9b349e9 | | name | None | | status | creating | +---------------------+-------------------------------------+ **Show a snapshot of a consistency group**: .. code-block:: console $ cinder cgsnapshot-show d4aff465-f50c-40b3-b088-83feb9b349e9 **List consistency group snapshots**: .. code-block:: console $ cinder cgsnapshot-list +--------------------------------------+--------+----------+ | ID | Status | Name | +--------------------------------------+--------+----------+ | 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 | available | None | | aa129f4d-d37c-4b97-9e2d-7efffda29de0 | available | None | | bb5b5d82-f380-4a32-b469-3ba2e299712c | available | None | | d4aff465-f50c-40b3-b088-83feb9b349e9 | available | None | +--------------------------------------+--------+----------+ **Delete a snapshot of a consistency group**: .. code-block:: console $ cinder cgsnapshot-delete d4aff465-f50c-40b3-b088-83feb9b349e9 **Delete a consistency group**: .. note:: The force flag is needed when there are volumes in the consistency group: .. code-block:: console $ cinder consisgroup-delete --force 1de80c27-3b2f-47a6-91a7-e867cbe36462 **Modify a consistency group**: .. code-block:: console cinder consisgroup-update [--name NAME] [--description DESCRIPTION] [--add-volumes UUID1,UUID2,......] [--remove-volumes UUID3,UUID4,......] CG The parameter ``CG`` is required. It can be a name or UUID of a consistency group. UUID1,UUID2,...... are UUIDs of one or more volumes to be added to the consistency group, separated by commas. Default is None. UUID3,UUID4,...... are UUIDs of one or more volumes to be removed from the consistency group, separated by commas. Default is None. .. code-block:: console $ cinder consisgroup-update --name 'new name' \ --description 'new description' \ --add-volumes 0b3923f5-95a4-4596-a536-914c2c84e2db,1c02528b-3781-4e32-929c-618d81f52cf3 \ --remove-volumes 8c0f6ae4-efb1-458f-a8fc-9da2afcc5fb1,a245423f-bb99-4f94-8c8c-02806f9246d8 \ 1de80c27-3b2f-47a6-91a7-e867cbe36462 **Create a consistency group from the snapshot of another consistency group**: .. code-block:: console $ cinder consisgroup-create-from-src [--cgsnapshot CGSNAPSHOT] [--name NAME] [--description DESCRIPTION] The parameter ``CGSNAPSHOT`` is a name or UUID of a snapshot of a consistency group: .. code-block:: console $ cinder consisgroup-create-from-src \ --cgsnapshot 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 \ --name 'new cg' --description 'new cg from cgsnapshot' **Create a consistency group from a source consistency group**: .. code-block:: console $ cinder consisgroup-create-from-src [--source-cg SOURCECG] [--name NAME] [--description DESCRIPTION] The parameter ``SOURCECG`` is a name or UUID of a source consistency group: .. code-block:: console $ cinder consisgroup-create-from-src \ --source-cg 6d9dfb7d-079a-471e-b75a-6e9185ba0c38 \ --name 'new cg' --description 'new cloned cg' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/default-volume-types.rst0000664000175000017500000000530600000000000023055 0ustar00zuulzuul00000000000000==================== Default Volume Types ==================== Beginning with the Train release, untyped volumes (that is, volumes with no volume-type) have been disallowed. To facilitate this, a ``__DEFAULT__`` volume-type was included as part of the Train database migration. Since the Train release, handling of the default volume-type has been improved: - The default_volume_type configuration option is required to have a value. The default value is ``__DEFAULT__``. - A request to delete the currently configured default_volume_type will fail. (You can delete that volume-type, but you cannot do it while it is the value of the configuration option.) - There must always be at least one volume-type defined in a Cinder installation. This is enforced by the type-delete call. - If the default_volume_type is misconfigured (that is, if the value refers to a non-existent volume-type), requests that rely on the default volume-type (for example, a volume-create request that does not specify a volume-type) will result in a HTTP 500 response. Default types per project ------------------------- We have overriden the existing Cinder default Volume Type on a per project basis to make it easier to manage complex deployments. With the introduction of this new default volume type support, we’ll now have 2 different default volume types. From more specific to more generic these are: - Per project - Defined in cinder.conf (defaults to ``__DEFAULT__`` type) So when a user creates a new volume that has no defined volume type (explicit or in the source), Cinder will look for the appropriate default first by checking if there’s one defined in the DB for the specific project and use it, if there isn’t one, it will continue like it does today, using the default type from cinder.conf. Administrators and users must still be careful with the normal Cinder behavior when creating volumes, as Cinder will still only resort to using the default volume type if the user doesn’t select one on the request or if there’s no volume type in the source, which means that Cinder will not use any of those defaults if we: - Create a volume providing a volume type - Create a volume from a snapshot - Clone a volume - Create a volume from an image that has cinder_img_volume_type defined in its metadata. There is a new set of commands in the python-cinderclient to match the new REST API endpoints: - Set default: ``cinder default-type-set `` - Unset default: ``cinder default-type-unset `` - List defaults: ``cinder default-type-list [--project ]`` By default the policy restricting access to set, unset, get or list all project default volume type is set to admins only. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/driver-filter-weighing.rst0000664000175000017500000003123000000000000023332 0ustar00zuulzuul00000000000000.. _filter_weigh_scheduler: ========================================================== Configure and use driver filter and weighing for scheduler ========================================================== OpenStack Block Storage enables you to choose a volume back end based on back-end specific properties by using the DriverFilter and GoodnessWeigher for the scheduler. The driver filter and weigher scheduling can help ensure that the scheduler chooses the best back end based on requested volume properties as well as various back-end specific properties. What is driver filter and weigher and when to use it ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver filter and weigher gives you the ability to more finely control how the OpenStack Block Storage scheduler chooses the best back end to use when handling a volume request. One example scenario where using the driver filter and weigher can be if a back end that utilizes thin-provisioning is used. The default filters use the ``free capacity`` property to determine the best back end, but that is not always perfect. If a back end has the ability to provide a more accurate back-end specific value you can use that as part of the weighing. Another example of when the driver filter and weigher can prove useful is if a back end exists where there is a hard limit of 1000 volumes. The maximum volume size is 500 GB. Once 75% of the total space is occupied the performance of the back end degrades. The driver filter and weigher can provide a way for these limits to be checked for. Enable driver filter and weighing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable the driver filter, set the ``scheduler_default_filters`` option in the ``cinder.conf`` file to ``DriverFilter``. The DriverFilter can also be used along with other filters by adding it to the list if other filters are already present. To enable the goodness filter as a weigher, set the ``scheduler_default_weighers`` option in the ``cinder.conf`` file to ``GoodnessWeigher`` or add it to the list if other weighers are already present. You can choose to use the ``DriverFilter`` without the ``GoodnessWeigher`` or vice-versa. The filter and weigher working together, however, create the most benefits when helping the scheduler choose an ideal back end. .. important:: The GoodnessWeigher can be used along with CapacityWeigher and others, but must be used with caution as it might obfuscate the CapacityWeigher. Example ``cinder.conf`` configuration file: .. code-block:: ini scheduler_default_filters = DriverFilter scheduler_default_weighers = GoodnessWeigher .. note:: It is useful to use the other filters and weighers available in OpenStack in combination with these custom ones. For example, the ``CapacityFilter`` and ``CapacityWeigher`` can be combined with these. Using them together should be done with caution as depending on the defined logic, one might obfuscate the other. Defining your own filter and goodness functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can define your own filter and goodness functions through the use of various properties that OpenStack Block Storage has exposed. Properties exposed include information about the volume request being made, ``volume_type`` settings, and back-end specific information about drivers. All of these allow for a lot of control over how the ideal back end for a volume request will be decided. The ``filter_function`` option is a string defining an equation that will determine whether a back end should be considered as a potential candidate in the scheduler. The ``goodness_function`` option is a string defining an equation that will rate the quality of the potential host (0 to 100, 0 lowest, 100 highest). .. important:: The drive filter and weigher will use default values for filter and goodness functions for each back end if you do not define them yourself. If complete control is desired then a filter and goodness function should be defined for each of the back ends in the ``cinder.conf`` file. Supported operations in filter and goodness functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Below is a table of all the operations currently usable in custom filter and goodness functions created by you: +--------------------------------+-------------------------+ | Operations | Type | +================================+=========================+ | +, -, \*, /, ^ | standard math | +--------------------------------+-------------------------+ | not, and, or, &, \|, ! | logic | +--------------------------------+-------------------------+ | >, >=, <, <=, ==, <>, != | equality | +--------------------------------+-------------------------+ | +, - | sign | +--------------------------------+-------------------------+ | x ? a : b | ternary | +--------------------------------+-------------------------+ | abs(x), max(x, y), min(x, y) | math helper functions | +--------------------------------+-------------------------+ .. caution:: Syntax errors you define in filter or goodness strings are thrown at a volume request time. Available properties when creating custom functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are various properties that can be used in either the ``filter_function`` or the ``goodness_function`` strings. The properties allow access to volume info, qos settings, extra specs, and so on. The following properties and their sub-properties are currently available for use: Host stats for a back end ------------------------- In order to access these properties, use the following format: ``stats.`` host The host's name volume\_backend\_name The volume back end name vendor\_name The vendor name driver\_version The driver version storage\_protocol The storage protocol QoS\_support Boolean signifying whether QoS is supported total\_capacity\_gb The total capacity in GB allocated\_capacity\_gb The allocated capacity in GB free\_capacity\_gb The free capacity in GB reserved\_percentage The reserved storage percentage Capabilities specific to a back end ----------------------------------- These properties are determined by the specific back end you are creating filter and goodness functions for. Some back ends may not have any properties available here. Once the capabilities vary too much according to the backend, it is better to check its properties reported on the scheduler log. The scheduler reports these capabilities constantly. In order to access these properties, use the following format: ``capabilities.`` Requested volume properties --------------------------- In order to access the volume properties, use the following format: ``volume.`` status Status for the requested volume volume\_type\_id The volume type ID display\_name The display name of the volume volume\_metadata Any metadata the volume has reservations Any reservations the volume has user\_id The volume's user ID attach\_status The attach status for the volume display\_description The volume's display description id The volume's ID replication\_status The volume's replication status snapshot\_id The volume's snapshot ID encryption\_key\_id The volume's encryption key ID source\_volid The source volume ID volume\_admin\_metadata Any admin metadata for this volume source\_replicaid The source replication ID consistencygroup\_id The consistency group ID size The size of the volume in GB metadata General metadata The property most used from here will most likely be the ``size`` sub-property. Extra specs for the requested volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ View the available properties for volume types by running: .. code-block:: console $ cinder extra-specs-list Current QoS specs for the requested volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ View the available properties for volume types by running: .. code-block:: console $ openstack volume qos list In order to access these properties in a custom string use the following format: ``.`` Driver filter and weigher usage examples ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Below are examples for using the filter and weigher separately, together, and using driver-specific properties. Example ``cinder.conf`` file configuration for customizing the filter function: .. code-block:: ini [default] scheduler_default_filters = DriverFilter enabled_backends = lvm-1, lvm-2 [lvm-1] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name = sample_LVM01 filter_function = "volume.size < 10" [lvm-2] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name = sample_LVM02 filter_function = "volume.size >= 10" The above example will filter volumes to different back ends depending on the size of the requested volume. Default OpenStack Block Storage scheduler weighing is done. Volumes with a size less than 10 GB are sent to lvm-1 and volumes with a size greater than or equal to 10 GB are sent to lvm-2. Example ``cinder.conf`` file configuration for customizing the goodness function: .. code-block:: ini [default] scheduler_default_weighers = GoodnessWeigher enabled_backends = lvm-1, lvm-2 [lvm-1] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name = sample_LVM01 goodness_function = "(volume.size < 5) ? 100 : 50" [lvm-2] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name = sample_LVM02 goodness_function = "(volume.size >= 5) ? 100 : 25" The above example will determine the goodness rating of a back end based off of the requested volume's size. Default OpenStack Block Storage scheduler filtering is done. The example shows how the ternary if statement can be used in a filter or goodness function. If a requested volume is of size 10 GB then lvm-1 is rated as 50 and lvm-2 is rated as 100. In this case lvm-2 wins. If a requested volume is of size 3 GB then lvm-1 is rated 100 and lvm-2 is rated 25. In this case lvm-1 would win. Example ``cinder.conf`` file configuration for customizing both the filter and goodness functions: .. code-block:: ini [default] scheduler_default_filters = DriverFilter scheduler_default_weighers = GoodnessWeigher enabled_backends = lvm-1, lvm-2 [lvm-1] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name = sample_LVM01 filter_function = "stats.total_capacity_gb < 500" goodness_function = "(volume.size < 25) ? 100 : 50" [lvm-2] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name = sample_LVM02 filter_function = "stats.total_capacity_gb >= 500" goodness_function = "(volume.size >= 25) ? 100 : 75" The above example combines the techniques from the first two examples. The best back end is now decided based off of the total capacity of the back end and the requested volume's size. Example ``cinder.conf`` file configuration for accessing driver specific properties: .. code-block:: ini [default] scheduler_default_filters = DriverFilter scheduler_default_weighers = GoodnessWeigher enabled_backends = lvm-1,lvm-2,lvm-3 [lvm-1] volume_group = stack-volumes-lvmdriver-1 volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name = lvmdriver-1 filter_function = "volume.size < 5" goodness_function = "(capabilities.total_volumes < 3) ? 100 : 50" [lvm-2] volume_group = stack-volumes-lvmdriver-2 volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name = lvmdriver-2 filter_function = "volume.size < 5" goodness_function = "(capabilities.total_volumes < 8) ? 100 : 50" [lvm-3] volume_group = stack-volumes-lvmdriver-3 volume_driver = cinder.volume.drivers.LVMVolumeDriver volume_backend_name = lvmdriver-3 goodness_function = "55" The above is an example of how back-end specific properties can be used in the filter and goodness functions. In this example the LVM driver's ``total_volumes`` capability is being used to determine which host gets used during a volume request. In the above example, lvm-1 and lvm-2 will handle volume requests for all volumes with a size less than 5 GB. Both lvm-1 and lvm-2 will have the same priority while lvm-1 contains 3 or less volumes. After that lvm-2 will have priority while it contains 8 or less volumes. The lvm-3 will collect all volumes greater or equal to 5 GB as well as all volumes once lvm-1 and lvm-2 lose priority. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/generalized-filters.rst0000664000175000017500000000556000000000000022723 0ustar00zuulzuul00000000000000=================== Generalized filters =================== Background ---------- Cinder introduced generalized resource filters since Pike. Administrator can control the allowed filter keys for **non-admin** user by editing the filter configuration file. Also since this feature, cinder will raise ``400 BadRequest`` if any invalid query filter is specified. How do I configure the filter keys? ----------------------------------- ``resource_query_filters_file`` is introduced to cinder to represent the filter config file path, and the config file accepts the valid filter keys for **non-admin** user with json format: .. code-block:: json { "volume": ["name", "status", "metadata"] } the key ``volume`` (singular) here stands for the resource you want to apply and the value accepts an list which contains the allowed filters collection, once the configuration file is changed and API service is restarted, cinder will only recognize this filter keys, **NOTE**: the default configuration file will include all the filters we already enabled. Which filter keys are supported? -------------------------------- Not all the attributes are supported at present, so we add this table below to indicate which filter keys are valid and can be used in the configuration. Since v3.34 we could use '~' to indicate supporting querying resource by inexact match, for example, if we have a configuration file as below: .. code-block:: json { "volume": ["name~"] } User can query volume both by ``name=volume`` and ``name~=volume``, and the volumes named ``volume123`` and ``a_volume123`` are both valid for second input while neither are valid for first. The supported APIs are marked with "*" below in the table. .. list-table:: :header-rows: 1 * - API - Valid filter keys * - list volume* - id, group_id, name, status, bootable, migration_status, metadata, host, image_metadata, availability_zone, user_id, volume_type_id, project_id, size, description, replication_status, multiattach * - list snapshot* - id, volume_id, user_id, project_id, status, volume_size, name, description, volume_type_id, group_snapshot_id, metadata, availability_zone * - list backup* - id, name, status, container, availability_zone, description, volume_id, is_incremental, size, host, parent_id * - list group* - id, user_id, status, availability_zone, group_type, name, description, host * - list g-snapshot* - id, name, description, group_id, group_type_id, status * - list attachment* - id, volume_id, instance_id, attach_status, attach_mode, connection_info, mountpoint, attached_host * - list message* - id, event_id, resource_uuid, resource_type, request_id, message_level, project_id * - get pools - name, volume_type * - list types (3.52) - is_public, extra_specs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/get-capabilities.rst0000664000175000017500000003060300000000000022166 0ustar00zuulzuul00000000000000.. _get_capabilities: ================ Get capabilities ================ When an administrator configures ``volume type`` and ``extra specs`` of storage on the back end, the administrator has to read the right documentation that corresponds to the version of the storage back end. Deep knowledge of storage is also required. OpenStack Block Storage enables administrators to configure ``volume type`` and ``extra specs`` without specific knowledge of the storage back end. .. note:: * ``Volume Type``: A group of volume policies. * ``Extra Specs``: The definition of a volume type. This is a group of policies. For example, provision type, QOS that will be used to define a volume at creation time. * ``Capabilities``: What the current deployed back end in Cinder is able to do. These correspond to extra specs. Usage of cinder client ~~~~~~~~~~~~~~~~~~~~~~ When an administrator wants to define new volume types for their OpenStack cloud, the administrator would fetch a list of ``capabilities`` for a particular back end using the cinder client. First, get a list of the services: .. code-block:: console $ openstack volume service list +------------------+-------------------+------+---------+-------+----------------------------+ | Binary | Host | Zone | Status | State | Updated At | +------------------+-------------------+------+---------+-------+----------------------------+ | cinder-scheduler | controller | nova | enabled | up | 2016-10-24T13:53:35.000000 | | cinder-volume | block1@ABC-driver | nova | enabled | up | 2016-10-24T13:53:35.000000 | | cinder-backup | controller | nova | enabled | up | 2016-10-24T13:53:35.000000 | +------------------+-------------------+------+---------+-------+----------------------------+ With one of the listed hosts, pass that to ``get-capabilities``, then the administrator can obtain volume stats and also back end ``capabilities`` as listed below. .. code-block:: console $ cinder get-capabilities block1@ABC-driver +---------------------+----------------------------------------------+ | Volume stats | Value | +---------------------+----------------------------------------------+ | description | None | | display_name | Capabilities of Cinder Vendor ABC driver | | driver_version | 2.0.0 | | namespace | OS::Storage::Capabilities::block1@ABC-driver | | pool_name | None | | replication_targets | [] | | storage_protocol | iSCSI | | vendor_name | Vendor ABC | | visibility | pool | | volume_backend_name | ABC-driver | +---------------------+----------------------------------------------+ +----------------------+-----------------------------------------------------+ | Backend properties | Value | +----------------------+-----------------------------------------------------+ | compression | {u'type':u'boolean', u'title':u'Compression', ...} | | ABC:compression_type | {u'enum':u'['lossy', 'lossless', 'special']', ...} | | qos | {u'type':u'boolean', u'title':u'QoS', ...} | | replication | {u'type':u'boolean', u'title':u'Replication', ...} | | thin_provisioning | {u'type':u'boolean', u'title':u'Thin Provisioning'} | | ABC:minIOPS | {u'type':u'integer', u'title':u'Minimum IOPS QoS',} | | ABC:maxIOPS | {u'type':u'integer', u'title':u'Maximum IOPS QoS',} | | ABC:burstIOPS | {u'type':u'integer', u'title':u'Burst IOPS QoS',..} | +----------------------+-----------------------------------------------------+ Disable a service ~~~~~~~~~~~~~~~~~ When an administrator wants to disable a service, identify the Binary and the Host of the service. Use the :command:` openstack volume service set` command combined with the Binary and Host to disable the service: #. Determine the binary and host of the service you want to remove initially. .. code-block:: console $ openstack volume service list +------------------+----------------------+------+---------+-------+----------------------------+ | Binary | Host | Zone | Status | State | Updated At | +------------------+----------------------+------+---------+-------+----------------------------+ | cinder-scheduler | devstack | nova | enabled | up | 2016-10-24T13:53:35.000000 | | cinder-volume | devstack@lvmdriver-1 | nova | enabled | up | 2016-10-24T13:53:35.000000 | | cinder-backup | devstack | nova | enabled | up | 2016-10-24T13:53:35.000000 | +------------------+----------------------+------+---------+-------+----------------------------+ #. Disable the service using the Binary and Host name, placing the Host before the Binary name. .. code-block:: console $ openstack volume service set --disable HOST_NAME BINARY_NAME #. Remove the service from the database. .. code-block:: console $ cinder-manage service remove BINARY_NAME HOST_NAME Usage of REST API ~~~~~~~~~~~~~~~~~ New endpoint to ``get capabilities`` list for specific storage back end is also available. For more details, refer to the Block Storage API reference. API request: .. code-block:: console GET /v3/{tenant_id}/capabilities/{hostname} Example of return value: .. code-block:: json { "namespace": "OS::Storage::Capabilities::block1@ABC-driver", "volume_backend_name": "ABC-driver", "pool_name": "pool", "driver_version": "2.0.0", "storage_protocol": "iSCSI", "display_name": "Capabilities of Cinder Vendor ABC driver", "description": "None", "visibility": "public", "properties": { "thin_provisioning": { "title": "Thin Provisioning", "description": "Sets thin provisioning.", "type": "boolean" }, "compression": { "title": "Compression", "description": "Enables compression.", "type": "boolean" }, "ABC:compression_type": { "title": "Compression type", "description": "Specifies compression type.", "type": "string", "enum": [ "lossy", "lossless", "special" ] }, "replication": { "title": "Replication", "description": "Enables replication.", "type": "boolean" }, "qos": { "title": "QoS", "description": "Enables QoS.", "type": "boolean" }, "ABC:minIOPS": { "title": "Minimum IOPS QoS", "description": "Sets minimum IOPS if QoS is enabled.", "type": "integer" }, "ABC:maxIOPS": { "title": "Maximum IOPS QoS", "description": "Sets maximum IOPS if QoS is enabled.", "type": "integer" }, "ABC:burstIOPS": { "title": "Burst IOPS QoS", "description": "Sets burst IOPS if QoS is enabled.", "type": "integer" }, } } Usage of volume type access extension ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some volume types should be restricted only. For example, test volume types where you are testing a new technology or ultra high performance volumes (for special cases) where you do not want most users to be able to select these volumes. An administrator/operator can then define private volume types using cinder client. Volume type access extension adds the ability to manage volume type access. Volume types are public by default. Private volume types can be created by setting the ``--private`` parameter at creation time. Access to a private volume type can be controlled by adding or removing a project from it. Private volume types without projects are only visible by users with the admin role/context. Create a public volume type by setting ``--public`` parameter: .. code-block:: console $ openstack volume type create vol_Type1 --description test1 --public +-------------+--------------------------------------+ | Field | Value | +-------------+--------------------------------------+ | description | test1 | | id | b7dbed9e-de78-49f8-a840-651ae7308592 | | is_public | True | | name | vol_Type1 | +-------------+--------------------------------------+ Create a private volume type by setting ``--private`` parameter: .. code-block:: console $ openstack volume type create vol_Type2 --description test2 --private +-------------+--------------------------------------+ | Field | Value | +-------------+--------------------------------------+ | description | test2 | | id | 154baa73-d2c4-462f-8258-a2df251b0d39 | | is_public | False | | name | vol_Type2 | +-------------+--------------------------------------+ Get a list of the volume types: .. code-block:: console $ openstack volume type list +--------------------------------------+-------------+ | ID | Name | +--------------------------------------+-------------+ | 0a948c84-bad5-4fba-88a2-c062006e4f6b | vol_Type1 | | 87e5be6f-9491-4ea5-9906-9ac56494bb91 | lvmdriver-1 | | fd508846-213f-4a07-aaf2-40518fb9a23f | vol_Type2 | +--------------------------------------+-------------+ Get a list of the projects: .. code-block:: console $ openstack project list +----------------------------------+--------------------+ | ID | Name | +----------------------------------+--------------------+ | 4105ead90a854100ab6b121266707f2b | alt_demo | | 4a22a545cedd4fcfa9836eb75e558277 | admin | | 71f9cdb1a3ab4b8e8d07d347a2e146bb | service | | c4860af62ffe465e99ed1bc08ef6082e | demo | | e4b648ba5108415cb9e75bff65fa8068 | invisible_to_admin | +----------------------------------+--------------------+ Add volume type access for the given demo project, using its project-id: .. code-block:: console $ openstack volume type set --project c4860af62ffe465e99ed1bc08ef6082e \ vol_Type2 List the access information about the given volume type: .. code-block:: console $ openstack volume type show vol_Type2 +--------------------+--------------------------------------+ | Field | Value | +--------------------+--------------------------------------+ | access_project_ids | c4860af62ffe465e99ed1bc08ef6082e | | description | | | id | fd508846-213f-4a07-aaf2-40518fb9a23f | | is_public | False | | name | vol_Type2 | | properties | | | qos_specs_id | None | +--------------------+--------------------------------------+ Remove volume type access for the given project: .. code-block:: console $ openstack volume type unset --project c4860af62ffe465e99ed1bc08ef6082e \ vol_Type2 $ openstack volume type show vol_Type2 +--------------------+--------------------------------------+ | Field | Value | +--------------------+--------------------------------------+ | access_project_ids | | | description | | | id | fd508846-213f-4a07-aaf2-40518fb9a23f | | is_public | False | | name | vol_Type2 | | properties | | | qos_specs_id | None | +--------------------+--------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/groups.rst0000664000175000017500000002402000000000000020273 0ustar00zuulzuul00000000000000===================== Generic volume groups ===================== Generic volume group support is available in OpenStack Block Storage (cinder) since the Newton release. The support is added for creating group types and group specs, creating groups of volumes, and creating snapshots of groups. The group operations can be performed using the Block Storage command line. A group type is a type for a group just like a volume type for a volume. A group type can also have associated group specs similar to extra specs for a volume type. In cinder, there is a group construct called `consistency group`. Consistency groups only support consistent group snapshots and only a small number of drivers can support it. The following is a list of drivers that support consistency groups and the release when the support was added: - Juno: EMC VNX - Kilo: EMC VMAX, IBM (GPFS, Storwize, SVC, and XIV), ProphetStor, Pure - Liberty: Dell Storage Center, EMC XtremIO, HPE 3Par and LeftHand - Mitaka: EMC ScaleIO, NetApp Data ONTAP, SolidFire - Newton: CoprHD, FalconStor, Huawei Consistency group cannot be extended easily to serve other purposes. A tenant may want to put volumes used in the same application together in a group so that it is easier to manage them together, and this group of volumes may or may not support consistent group snapshot. Generic volume group is introduced to solve this problem. There is a plan to migrate existing consistency group operations to use generic volume group operations in future releases. More information can be found in `Cinder specs `_. .. note:: Only Block Storage V3 API supports groups. You can specify ``--os-volume-api-version 3.x`` when using the `cinder` command line for group operations where `3.x` contains a microversion value for that command. The generic volume group feature was completed in several patches. As a result, the minimum required microversion is different for group types, groups, and group snapshots APIs. The following group type operations are supported: - Create a group type. - Delete a group type. - Set group spec for a group type. - Unset group spec for a group type. - List group types. - Show a group type details. - Update a group. - List group types and group specs. The following group and group snapshot operations are supported: - Create a group, given group type and volume types. .. note:: A group must have one group type. A group can support more than one volume type. The scheduler is responsible for finding a back end that can support the given group type and volume types. A group can only contain volumes hosted by the same back end. A group is empty upon its creation. Volumes need to be created and added to it later. - Show a group. - List groups. - Delete a group. - Modify a group. - Create a volume and add it to a group. - Create a snapshot for a group. - Show a group snapshot. - List group snapshots. - Delete a group snapshot. - Create a group from a group snapshot. - Create a group from a source group. The following operations are not allowed if a volume is in a group: - Volume migration. - Volume retype. - Volume deletion. .. note:: A group has to be deleted as a whole with all the volumes. The following operations are not allowed if a volume snapshot is in a group snapshot: - Volume snapshot deletion. .. note:: A group snapshot has to be deleted as a whole with all the volume snapshots. The details of group type operations are shown in the following. The minimum microversion to support group type and group specs is 3.11: **Create a group type**: .. code-block:: console cinder --os-volume-api-version 3.11 group-type-create [--description DESCRIPTION] [--is-public IS_PUBLIC] NAME .. note:: The parameter ``NAME`` is required. The ``--is-public IS_PUBLIC`` determines whether the group type is accessible to the public. It is ``True`` by default. By default, the policy on privileges for creating a group type is admin-only. **Show a group type**: .. code-block:: console cinder --os-volume-api-version 3.11 group-type-show GROUP_TYPE .. note:: The parameter ``GROUP_TYPE`` is the name or UUID of a group type. **List group types**: .. code-block:: console cinder --os-volume-api-version 3.11 group-type-list .. note:: Only admin can see private group types. **Update a group type**: .. code-block:: console cinder --os-volume-api-version 3.11 group-type-update [--name NAME] [--description DESCRIPTION] [--is-public IS_PUBLIC] GROUP_TYPE_ID .. note:: The parameter ``GROUP_TYPE_ID`` is the UUID of a group type. By default, the policy on privileges for updating a group type is admin-only. **Delete group type or types**: .. code-block:: console cinder --os-volume-api-version 3.11 group-type-delete GROUP_TYPE [GROUP_TYPE ...] .. note:: The parameter ``GROUP_TYPE`` is name or UUID of the group type or group types to be deleted. By default, the policy on privileges for deleting a group type is admin-only. **Set or unset group spec for a group type**: .. code-block:: console cinder --os-volume-api-version 3.11 group-type-key GROUP_TYPE ACTION KEY=VALUE [KEY=VALUE ...] .. note:: The parameter ``GROUP_TYPE`` is the name or UUID of a group type. Valid values for the parameter ``ACTION`` are ``set`` or ``unset``. ``KEY=VALUE`` is the group specs key and value pair to set or unset. For unset, specify only the key. By default, the policy on privileges for setting or unsetting group specs key is admin-only. **List group types and group specs**: .. code-block:: console cinder --os-volume-api-version 3.11 group-specs-list .. note:: By default, the policy on privileges for seeing group specs is admin-only. The details of group operations are shown in the following. The minimum microversion to support groups operations is 3.13. **Create a group**: .. code-block:: console cinder --os-volume-api-version 3.13 group-create [--name NAME] [--description DESCRIPTION] [--availability-zone AVAILABILITY_ZONE] GROUP_TYPE VOLUME_TYPES .. note:: The parameters ``GROUP_TYPE`` and ``VOLUME_TYPES`` are required. ``GROUP_TYPE`` is the name or UUID of a group type. ``VOLUME_TYPES`` can be a list of names or UUIDs of volume types separated by commas without spaces in between. For example, ``volumetype1,volumetype2,volumetype3.``. **Show a group**: .. code-block:: console cinder --os-volume-api-version 3.13 group-show GROUP .. note:: The parameter ``GROUP`` is the name or UUID of a group. **List groups**: .. code-block:: console cinder --os-volume-api-version 3.13 group-list [--all-tenants [<0|1>]] .. note:: ``--all-tenants`` specifies whether to list groups for all tenants. Only admin can use this option. **Create a volume and add it to a group**: .. code-block:: console cinder --os-volume-api-version 3.13 create --volume-type VOLUME_TYPE --group-id GROUP_ID SIZE .. note:: When creating a volume and adding it to a group, the parameters ``VOLUME_TYPE`` and ``GROUP_ID`` must be provided. This is because a group can support more than one volume type. **Delete a group**: .. code-block:: console cinder --os-volume-api-version 3.13 group-delete [--delete-volumes] GROUP [GROUP ...] .. note:: ``--delete-volumes`` allows or disallows groups to be deleted if they are not empty. If the group is empty, it can be deleted without ``--delete-volumes``. If the group is not empty, the flag is required for it to be deleted. When the flag is specified, the group and all volumes in the group will be deleted. **Modify a group**: .. code-block:: console cinder --os-volume-api-version 3.13 group-update [--name NAME] [--description DESCRIPTION] [--add-volumes UUID1,UUID2,......] [--remove-volumes UUID3,UUID4,......] GROUP .. note:: The parameter ``UUID1,UUID2,......`` is the UUID of one or more volumes to be added to the group, separated by commas. Similarly the parameter ``UUID3,UUID4,......`` is the UUID of one or more volumes to be removed from the group, separated by commas. The details of group snapshots operations are shown in the following. The minimum microversion to support group snapshots operations is 3.14. **Create a snapshot for a group**: .. code-block:: console cinder --os-volume-api-version 3.14 group-snapshot-create [--name NAME] [--description DESCRIPTION] GROUP .. note:: The parameter ``GROUP`` is the name or UUID of a group. **Show a group snapshot**: .. code-block:: console cinder --os-volume-api-version 3.14 group-snapshot-show GROUP_SNAPSHOT .. note:: The parameter ``GROUP_SNAPSHOT`` is the name or UUID of a group snapshot. **List group snapshots**: .. code-block:: console cinder --os-volume-api-version 3.14 group-snapshot-list [--all-tenants [<0|1>]] [--status STATUS] [--group-id GROUP_ID] .. note:: ``--all-tenants`` specifies whether to list group snapshots for all tenants. Only admin can use this option. ``--status STATUS`` filters results by a status. ``--group-id GROUP_ID`` filters results by a group id. **Delete group snapshot**: .. code-block:: console cinder --os-volume-api-version 3.14 group-snapshot-delete GROUP_SNAPSHOT [GROUP_SNAPSHOT ...] .. note:: The parameter ``GROUP_SNAPSHOT`` specifies the name or UUID of one or more group snapshots to be deleted. **Create a group from a group snapshot or a source group**: .. code-block:: console $ cinder --os-volume-api-version 3.14 group-create-from-src [--group-snapshot GROUP_SNAPSHOT] [--source-group SOURCE_GROUP] [--name NAME] [--description DESCRIPTION] .. note:: The parameter ``GROUP_SNAPSHOT`` is a name or UUID of a group snapshot. The parameter ``SOURCE_GROUP`` is a name or UUID of a source group. Either ``GROUP_SNAPSHOT`` or ``SOURCE_GROUP`` must be specified, but not both. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/image-volume-cache.rst0000664000175000017500000001055400000000000022413 0ustar00zuulzuul00000000000000.. _image_volume_cache: ================== Image-Volume cache ================== OpenStack Block Storage has an optional Image cache which can dramatically improve the performance of creating a volume from an image. The improvement depends on many factors, primarily how quickly the configured back end can clone a volume. When a volume is first created from an image, a new cached image-volume will be created that is owned by the Block Storage Internal Tenant. Subsequent requests to create volumes from that image will clone the cached version instead of downloading the image contents and copying data to the volume. The cache itself is configurable per back end and will contain the most recently used images. .. _internal-tenant: Configure the Internal Tenant ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Image-Volume cache requires that the Internal Tenant be configured for the Block Storage services. This project will own the cached image-volumes so they can be managed like normal users including tools like volume quotas. This protects normal users from having to see the cached image-volumes, but does not make them globally hidden. To enable the Block Storage services to have access to an Internal Tenant, set the following options in the ``cinder.conf`` file: .. code-block:: ini cinder_internal_tenant_project_id = PROJECT_ID cinder_internal_tenant_user_id = USER_ID An example ``cinder.conf`` configuration file: .. code-block:: ini cinder_internal_tenant_project_id = b7455b8974bb4064ad247c8f375eae6c cinder_internal_tenant_user_id = f46924c112a14c80ab0a24a613d95eef .. note:: The actual user and project that are configured for the Internal Tenant do not require any special privileges. They can be the Block Storage service project or can be any normal project and user. Configure the Image-Volume cache ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable the Image-Volume cache, set the following configuration option in the ``cinder.conf`` file: .. code-block:: ini image_volume_cache_enabled = True .. note:: If you use Ceph as a back end, set the following configuration option in the ``cinder.conf`` file: .. code-block:: ini [ceph] image_volume_cache_enabled = True This can be scoped per back end definition or in the default options. There are optional configuration settings that can limit the size of the cache. These can also be scoped per back end or in the default options in the ``cinder.conf`` file: .. code-block:: ini image_volume_cache_max_size_gb = SIZE_GB image_volume_cache_max_count = MAX_COUNT By default they will be set to 0, which means unlimited. For example, a configuration which would limit the max size to 200 GB and 50 cache entries will be configured as: .. code-block:: ini image_volume_cache_max_size_gb = 200 image_volume_cache_max_count = 50 .. note:: As mentioned above, the :ref:`internal tenant` configured as the cache owner does not require any special permissions and is subject to quotas like any other user. Hence, it is possible that the quotas for the internal tenant may need to be adjusted to allow the internal tenant to hold at least ``image_volume_cache_max_count`` volumes not exceeding ``image_volume_cache_max_size_gb`` total size. Thus, although the default value for these image volume cache settings is ``0`` (unlimited), in practice, these will be limited by the quotas that apply to the internal tenant. See :doc:`../cli/cli-cinder-quotas` for more information. Notifications ~~~~~~~~~~~~~ Cache actions will trigger Telemetry messages. There are several that will be sent. - ``image_volume_cache.miss`` - A volume is being created from an image which was not found in the cache. Typically this will mean a new cache entry would be created for it. - ``image_volume_cache.hit`` - A volume is being created from an image which was found in the cache and the fast path can be taken. - ``image_volume_cache.evict`` - A cached image-volume has been deleted from the cache. Managing cached Image-Volumes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In normal usage there should be no need for manual intervention with the cache. The entries and their backing Image-Volumes are managed automatically. If needed, you can delete these volumes manually to clear the cache. By using the standard volume deletion APIs, the Block Storage service will clean up correctly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/index.rst0000664000175000017500000000351600000000000020072 0ustar00zuulzuul00000000000000.. _block_storage: ===================== Cinder Administration ===================== The OpenStack Block Storage service works through the interaction of a series of daemon processes named ``cinder-*`` that reside persistently on the host machine or machines. You can run all the binaries from a single node, or spread across multiple nodes. You can also run them on the same node as other OpenStack services. To administer the OpenStack Block Storage service, it is helpful to understand a number of concepts. You must make certain choices when you configure the Block Storage service in OpenStack. The bulk of the options come down to two choices - single node or multi-node install. You can read a longer discussion about `Storage Decisions`_ in the `OpenStack Operations Guide`_. OpenStack Block Storage enables you to add extra block-level storage to your OpenStack Compute instances. This service is similar to the Amazon EC2 Elastic Block Storage (EBS) offering. .. toctree:: :maxdepth: 1 security accelerate-image-compression api-throughput manage-volumes troubleshoot availability-zone-type generalized-filters backup-disks boot-from-volume basic-volume-qos capacity-based-qos consistency-groups driver-filter-weighing get-capabilities user-visible-extra-specs groups image-volume-cache lio-iscsi-support multi-backend nfs-backend over-subscription ratelimit-volume-copy-bandwidth volume-backed-image volume-backups-export-import volume-backups volume-migration volume-multiattach volume-number-weigher default-volume-types api-configuration upgrades replication-in-openstack .. _`Storage Decisions`: https://docs.openstack.org/arch-design/design-storage/design-storage-arch.html .. _`OpenStack Operations Guide`: https://wiki.openstack.org/wiki/OpsGuide ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/lio-iscsi-support.rst0000664000175000017500000000072100000000000022363 0ustar00zuulzuul00000000000000===================== Use LIO iSCSI support ===================== The default mode for the ``target_helper`` tool is ``tgtadm``. To use LIO iSCSI, install the ``python-rtslib`` package, and set ``target_helper=lioadm`` in the ``cinder.conf`` file. Once configured, you can use the :command:`cinder-rtstool` command to manage the volumes. This command enables you to create, delete, and verify volumes and determine targets and add iSCSI initiators to the system. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/manage-volumes.rst0000664000175000017500000000527300000000000021705 0ustar00zuulzuul00000000000000============== Manage volumes ============== The default OpenStack Block Storage service implementation is an iSCSI solution that uses :term:`Logical Volume Manager (LVM)` for Linux. .. note:: The OpenStack Block Storage service also provides drivers that enable you to use several vendors' back-end storage devices in addition to the base LVM implementation. These storage devices can also be used instead of the base LVM installation. This high-level procedure shows you how to create and attach a volume to a server instance. **To create and attach a volume to an instance** #. Configure the OpenStack Compute and the OpenStack Block Storage services through the ``/etc/cinder/cinder.conf`` file. #. Use the :command:`openstack volume create` command to create a volume. This command creates an LV into the volume group (VG) ``cinder-volumes``. #. Use the :command:`openstack server add volume` command to attach the volume to an instance. This command creates a unique :term:`IQN ` that is exposed to the compute node. * The compute node, which runs the instance, now has an active iSCSI session and new local storage (usually a ``/dev/sdX`` disk). * Libvirt uses that local storage as storage for the instance. The instance gets a new disk (usually a ``/dev/vdX`` disk). For this particular walkthrough, one cloud controller runs ``nova-api``, ``nova-scheduler``, ``nova-conductor`` and ``cinder-*`` services. Two additional compute nodes run ``nova-compute``. The walkthrough uses a custom partitioning scheme that carves out 60 GB of space and labels it as LVM. The network uses the ``FlatManager`` and ``NetworkManager`` settings for OpenStack Compute. The network mode does not interfere with OpenStack Block Storage operations, but you must set up networking for Block Storage to work. For details, see `networking`_. .. _networking: https://docs.openstack.org/neutron/latest/ To set up Compute to use volumes, ensure that Block Storage is installed along with ``lvm2``. This guide describes how to troubleshoot your installation and back up your Compute volumes. .. toctree:: boot-from-volume nfs-backend multi-backend backup-disks volume-migration volume-backups volume-backups-export-import lio-iscsi-support volume-number-weigher capacity-based-qos consistency-groups driver-filter-weighing ratelimit-volume-copy-bandwidth over-subscription image-volume-cache volume-backed-image get-capabilities user-visible-extra-specs groups .. note:: To enable the use of encrypted volumes, see the setup instructions in :ref:`Create an encrypted volume type `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/multi-backend.rst0000664000175000017500000001675600000000000021514 0ustar00zuulzuul00000000000000.. _multi_backend: ==================================== Configure multiple-storage back ends ==================================== When you configure multiple-storage back ends, you can create several back-end storage solutions that serve the same OpenStack Compute configuration and one ``cinder-volume`` is launched for each back-end storage or back-end storage pool. In a multiple-storage back-end configuration, each back end has a name (``volume_backend_name``). Several back ends can have the same name. In that case, the scheduler properly decides which back end the volume has to be created in. The name of the back end is declared as an extra-specification of a volume type (such as, ``volume_backend_name=LVM``). When a volume is created, the scheduler chooses an appropriate back end to handle the request, according to the volume type specified by the user. Enable multiple-storage back ends ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable a multiple-storage back ends, you must set the `enabled_backends` flag in the ``cinder.conf`` file. This flag defines the names (separated by a comma) of the configuration groups for the different back ends: one name is associated to one configuration group for a back end (such as, ``[lvmdriver-1]``). .. note:: The configuration group name is not related to the ``volume_backend_name``. .. note:: After setting the ``enabled_backends`` flag on an existing cinder service, and restarting the Block Storage services, the original ``host`` service is replaced with a new host service. The new service appears with a name like ``host@backend``. Use: .. code-block:: console $ cinder-manage volume update_host --currenthost CURRENTHOST --newhost CURRENTHOST@BACKEND to convert current block devices to the new host name. The options for a configuration group must be defined in the group (or default options are used). All the standard Block Storage configuration options (``volume_group``, ``volume_driver``, and so on) might be used in a configuration group. Configuration values in the ``[DEFAULT]`` configuration group are not used. These examples show three back ends: .. code-block:: ini enabled_backends=lvmdriver-1,lvmdriver-2,lvmdriver-3 [lvmdriver-1] volume_group=cinder-volumes-1 volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name=LVM [lvmdriver-2] volume_group=cinder-volumes-2 volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name=LVM [lvmdriver-3] volume_group=cinder-volumes-3 volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name=LVM_b In this configuration, ``lvmdriver-1`` and ``lvmdriver-2`` have the same ``volume_backend_name``. If a volume creation requests the ``LVM`` back end name, the scheduler uses the capacity filter scheduler to choose the most suitable driver, which is either ``lvmdriver-1`` or ``lvmdriver-2``. The capacity filter scheduler is enabled by default. The next section provides more information. In addition, this example presents a ``lvmdriver-3`` back end. .. note:: For Fiber Channel drivers that support multipath, the configuration group requires the ``use_multipath_for_image_xfer=true`` option. In the example below, you can see details for HPE 3PAR and EMC Fiber Channel drivers. .. code-block:: ini [3par] use_multipath_for_image_xfer = true volume_driver = cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver volume_backend_name = 3parfc [emc] use_multipath_for_image_xfer = true volume_driver = cinder.volume.drivers.emc.emc_smis_fc.EMCSMISFCDriver volume_backend_name = emcfc Configure shared volume driver backends ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When configuring multiple volume backends, common configuration parameters can be shared using the `[backend_defaults]` section. As an example: .. code-block:: ini [DEFAULT] enabled_backends=backend1,backend2,backend3 [backend_defaults] image_volume_cache_enabled = True volume_clear = none target_helper = tgtadm volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver [backend1] volume_group = cinder-volume-1 image_volume_cache_enabled = False [backend2] volume_group = cinder-volume-2 [backend3] volume_group = cinder-volume-3 In this configuration, ``backend2`` and ``backend3`` have the same ``image_volume_cache_enabled`` as it is defined in the ``backend_defaults`` section. In other words, ``backend2`` and ``backend3`` have enabled the image cache features. ``image_volume_cache_enabled`` in ``backend1`` is False, that means any overwritten configuration in a volume backend will ignore the original value in ``backend_defaults``. .. note:: The ``backend_defaults`` section should be configured according to your cloud environment or your backend driver information. Configure Block Storage scheduler multi back end ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You must enable the `filter_scheduler` option to use multiple-storage back ends. The filter scheduler: #. Filters the available back ends. By default, ``AvailabilityZoneFilter``, ``CapacityFilter`` and ``CapabilitiesFilter`` are enabled. #. Weights the previously filtered back ends. By default, the `CapacityWeigher` option is enabled. When this option is enabled, the filter scheduler assigns the highest weight to back ends with the most available capacity. The scheduler uses filters and weights to pick the best back end to handle the request. The scheduler uses volume types to explicitly create volumes on specific back ends. For more information about filter and weighing, see :ref:`filter_weigh_scheduler`. Volume type ~~~~~~~~~~~ Before using it, a volume type has to be declared to Block Storage. This can be done by the following command: .. code-block:: console $ openstack --os-username admin --os-tenant-name admin volume type create lvm Then, an extra-specification has to be created to link the volume type to a back end name. Run this command: .. code-block:: console $ openstack --os-username admin --os-tenant-name admin volume type set lvm \ --property volume_backend_name=LVM_iSCSI This example creates a ``lvm`` volume type with ``volume_backend_name=LVM_iSCSI`` as extra-specifications. Create another volume type: .. code-block:: console $ openstack --os-username admin --os-tenant-name admin volume type create lvm_gold $ openstack --os-username admin --os-tenant-name admin volume type set lvm_gold \ --property volume_backend_name=LVM_iSCSI_b This second volume type is named ``lvm_gold`` and has ``LVM_iSCSI_b`` as back end name. .. note:: To list the extra-specifications, use this command: .. code-block:: console $ openstack --os-username admin --os-tenant-name admin volume type list --long .. note:: If a volume type points to a ``volume_backend_name`` that does not exist in the Block Storage configuration, the ``filter_scheduler`` returns an error that it cannot find a valid host with the suitable back end. Usage ~~~~~ When you create a volume, you must specify the volume type. The extra-specifications of the volume type are used to determine which back end has to be used. .. code-block:: console $ openstack volume create --size 1 --type lvm test_multi_backend Considering the ``cinder.conf`` described previously, the scheduler creates this volume on ``lvmdriver-1`` or ``lvmdriver-2``. .. code-block:: console $ openstack volume create --size 1 --type lvm_gold test_multi_backend This second volume is created on ``lvmdriver-3``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/nfs-backend.rst0000664000175000017500000001165200000000000021136 0ustar00zuulzuul00000000000000================================= Configure an NFS storage back end ================================= This section explains how to configure OpenStack Block Storage to use NFS storage. You must be able to access the NFS shares from the server that hosts the ``cinder`` volume service. .. note:: The ``cinder`` volume service is named ``openstack-cinder-volume`` on the following distributions: * CentOS * Fedora * openSUSE * Red Hat Enterprise Linux * SUSE Linux Enterprise In Ubuntu and Debian distributions, the ``cinder`` volume service is named ``cinder-volume``. **Configure Block Storage to use an NFS storage back end** #. Log in as ``root`` to the system hosting the ``cinder`` volume service. #. Create a text file named ``nfs_shares`` in the ``/etc/cinder/`` directory. #. Add an entry to ``/etc/cinder/nfs_shares`` for each NFS share that the ``cinder`` volume service should use for back end storage. Each entry should be a separate line, and should use the following format: .. code-block:: bash HOST:SHARE Where: * HOST is the IP address or host name of the NFS server. * SHARE is the absolute path to an existing and accessible NFS share. | #. Set ``/etc/cinder/nfs_shares`` to be owned by the ``root`` user and the ``cinder`` group: .. code-block:: console # chown root:cinder /etc/cinder/nfs_shares #. Set ``/etc/cinder/nfs_shares`` to be readable by members of the cinder group: .. code-block:: console # chmod 0640 /etc/cinder/nfs_shares #. Configure the ``cinder`` volume service to use the ``/etc/cinder/nfs_shares`` file created earlier. To do so, open the ``/etc/cinder/cinder.conf`` configuration file and set the ``nfs_shares_config`` configuration key to ``/etc/cinder/nfs_shares``. On distributions that include ``openstack-config``, you can configure this by running the following command instead: .. code-block:: console # openstack-config --set /etc/cinder/cinder.conf \ DEFAULT nfs_shares_config /etc/cinder/nfs_shares The following distributions include openstack-config: * CentOS * Fedora * openSUSE * Red Hat Enterprise Linux * SUSE Linux Enterprise #. Optionally, provide any additional NFS mount options required in your environment in the ``nfs_mount_options`` configuration key of ``/etc/cinder/cinder.conf``. If your NFS shares do not require any additional mount options (or if you are unsure), skip this step. On distributions that include ``openstack-config``, you can configure this by running the following command instead: .. code-block:: console # openstack-config --set /etc/cinder/cinder.conf \ DEFAULT nfs_mount_options OPTIONS Replace OPTIONS with the mount options to be used when accessing NFS shares. See the manual page for NFS for more information on available mount options (:command:`man nfs`). #. Configure the ``cinder`` volume service to use the correct volume driver, namely ``cinder.volume.drivers.nfs.NfsDriver``. To do so, open the ``/etc/cinder/cinder.conf`` configuration file and set the volume_driver configuration key to ``cinder.volume.drivers.nfs.NfsDriver``. On distributions that include ``openstack-config``, you can configure this by running the following command instead: .. code-block:: console # openstack-config --set /etc/cinder/cinder.conf \ DEFAULT volume_driver cinder.volume.drivers.nfs.NfsDriver #. You can now restart the service to apply the configuration. .. note:: The ``nfs_sparsed_volumes`` configuration key determines whether volumes are created as sparse files and grown as needed or fully allocated up front. The default and recommended value is ``true``, which ensures volumes are initially created as sparse files. Setting ``nfs_sparsed_volumes`` to ``false`` will result in volumes being fully allocated at the time of creation. This leads to increased delays in volume creation. However, should you choose to set ``nfs_sparsed_volumes`` to ``false``, you can do so directly in ``/etc/cinder/cinder.conf``. On distributions that include ``openstack-config``, you can configure this by running the following command instead: .. code-block:: console # openstack-config --set /etc/cinder/cinder.conf \ DEFAULT nfs_sparsed_volumes false .. warning:: If a client host has SELinux enabled, the ``virt_use_nfs`` boolean should also be enabled if the host requires access to NFS volumes on an instance. To enable this boolean, run the following command as the ``root`` user: .. code-block:: console # setsebool -P virt_use_nfs on This command also makes the boolean persistent across reboots. Run this command on all client hosts that require access to NFS volumes on an instance. This includes all compute nodes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/over-subscription.rst0000664000175000017500000001354700000000000022465 0ustar00zuulzuul00000000000000.. _over_subscription: ===================================== Oversubscription in thin provisioning ===================================== OpenStack Block Storage enables you to choose a volume back end based on virtual capacities for thin provisioning using the oversubscription ratio. A reference implementation is provided for the default LVM driver. The illustration below uses the LVM driver as an example. Configure oversubscription settings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To support oversubscription in thin provisioning, a flag ``max_over_subscription_ratio`` is introduced into ``cinder.conf``. This is a float representation of the oversubscription ratio when thin provisioning is involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times of the total physical capacity. A ratio of 10.5 means provisioned capacity can be 10.5 times of the total physical capacity. A ratio of 1.0 means provisioned capacity cannot exceed the total physical capacity. A ratio lower than 1.0 is ignored and the default value is used instead. This parameter also can be set as ``max_over_subscription_ratio=auto``. When using auto, Cinder will automatically calculate the ``max_over_subscription_ratio`` based on the provisioned capacity and the used space. This allows the creation of a larger number of volumes at the beginning of the pool's life, and start to restrict the creation as the free space approaches to 0 or the reserved limit. .. note:: ``max_over_subscription_ratio`` can be configured for each back end when multiple-storage back ends are enabled. It is provided as a reference implementation and is used by the LVM driver. However, it is not a requirement for a driver to use this option from ``cinder.conf``. ``max_over_subscription_ratio`` is for configuring a back end. For a driver that supports multiple pools per back end, it can report this ratio for each pool. The LVM driver does not support multiple pools. Setting this value to 'auto'. The values calculated by Cinder can dynamically vary according to the pool's provisioned capacity and consumed space. The existing ``reserved_percentage`` flag is used to prevent over provisioning. This flag represents the percentage of the back-end capacity that is reserved. .. note:: There is a change on how ``reserved_percentage`` is used. It was measured against the free capacity in the past. Now it is measured against the total capacity. Capabilities ~~~~~~~~~~~~ Drivers can report the following capabilities for a back end or a pool: .. code-block:: ini thin_provisioning_support = True(or False) thick_provisioning_support = True(or False) provisioned_capacity_gb = PROVISIONED_CAPACITY max_over_subscription_ratio = MAX_RATIO Where ``PROVISIONED_CAPACITY`` is the apparent allocated space indicating how much capacity has been provisioned and ``MAX_RATIO`` is the maximum oversubscription ratio. For the LVM driver, it is ``max_over_subscription_ratio`` in ``cinder.conf``. Two capabilities are added here to allow a back end or pool to claim support for thin provisioning, or thick provisioning, or both. The LVM driver reports ``thin_provisioning_support=True`` and ``thick_provisioning_support=False`` if the ``lvm_type`` flag in ``cinder.conf`` is ``thin``. Otherwise it reports ``thin_provisioning_support=False`` and ``thick_provisioning_support=True``. Volume type extra specs ~~~~~~~~~~~~~~~~~~~~~~~ If volume type is provided as part of the volume creation request, it can have the following extra specs defined: .. code-block:: python 'capabilities:thin_provisioning_support': ' True' or ' False' 'capabilities:thick_provisioning_support': ' True' or ' False' .. note:: ``capabilities`` scope key before ``thin_provisioning_support`` and ``thick_provisioning_support`` is not required. So the following works too: .. code-block:: python 'thin_provisioning_support': ' True' or ' False' 'thick_provisioning_support': ' True' or ' False' The above extra specs are used by the scheduler to find a back end that supports thin provisioning, thick provisioning, or both to match the needs of a specific volume type. Volume replication extra specs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenStack Block Storage has the ability to create volume replicas. Administrators can define a storage policy that includes replication by adjusting the cinder volume driver. Volume replication for OpenStack Block Storage helps safeguard OpenStack environments from data loss during disaster recovery. To enable replication when creating volume types, configure the cinder volume with ``capabilities:replication=" True"``. Each volume created with the replication capability set to ``True`` generates a copy of the volume on a storage back end. One use case for replication involves an OpenStack cloud environment installed across two data centers located nearby each other. The distance between the two data centers in this use case is the length of a city. At each data center, a cinder host supports the Block Storage service. Both data centers include storage back ends. Depending on the storage requirements, there can be one or two cinder hosts. The administrator accesses the ``/etc/cinder/cinder.conf`` configuration file and sets ``capabilities:replication=" True"``. If one data center experiences a service failure, administrators can redeploy the VM. The VM will run using a replicated, backed up volume on a host in the second data center. Capacity filter ~~~~~~~~~~~~~~~ In the capacity filter, ``max_over_subscription_ratio`` is used when choosing a back end if ``thin_provisioning_support`` is True and ``max_over_subscription_ratio`` is greater than 1.0. Capacity weigher ~~~~~~~~~~~~~~~~ In the capacity weigher, virtual free capacity is used for ranking if ``thin_provisioning_support`` is True. Otherwise, real free capacity will be used as before. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ratelimit-volume-copy-bandwidth.rst0000664000175000017500000000324200000000000025170 0ustar00zuulzuul00000000000000.. _ratelimit_volume_copy_bandwidth: ================================ Rate-limit volume copy bandwidth ================================ When you create a new volume from an image or an existing volume, or when you upload a volume image to the Image service, large data copy may stress disk and network bandwidth. To mitigate slow down of data access from the instances, OpenStack Block Storage supports rate-limiting of volume data copy bandwidth. Configure volume copy bandwidth limit ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To configure the volume copy bandwidth limit, set the ``volume_copy_bps_limit`` option in the configuration groups for each back end in the ``cinder.conf`` file. This option takes the integer of maximum bandwidth allowed for volume data copy in byte per second. If this option is set to ``0``, the rate-limit is disabled. While multiple volume data copy operations are running in the same back end, the specified bandwidth is divided to each copy. Example ``cinder.conf`` configuration file to limit volume copy bandwidth of ``lvmdriver-1`` up to 100 MiB/s: .. code-block:: ini [lvmdriver-1] volume_group=cinder-volumes-1 volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name=LVM volume_copy_bps_limit=104857600 .. note:: This feature requires libcgroup to set up blkio cgroup for disk I/O bandwidth limit. The libcgroup is provided by the cgroup-tools package in Debian and Ubuntu, or by the libcgroup-tools package in Fedora, Red Hat Enterprise Linux, CentOS, openSUSE, and SUSE Linux Enterprise. .. note:: Some back ends which use remote file systems such as NFS are not supported by this feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/replication-in-openstack.rst0000664000175000017500000003166700000000000023675 0ustar00zuulzuul00000000000000======================== Replication in OpenStack ======================== Replication provides a Disaster Recovery (DR) solution for mission-critical workloads. This guide will provide you step by step procedure on how to configure/utilize the Cinder Replication feature in your own deployment. There are two parts to the feature, Cinder side and Driver side. The Cinder side steps should be common, however, the driver side steps may differ. This guide will use RBD as the reference driver for the procedure. Prerequisites ------------- - Should have 2 backend clusters - Cinder driver should support replication See :doc:`../reference/support-matrix` to know about which backends support replication. Enable Replication ------------------ CEPH ^^^^ Reference: https://docs.ceph.com/en/latest/rbd/rbd-mirroring *NOTE*: These steps are Ceph specific and are tested against Pacific release of Ceph. Make sure that: - A pool with the same name exists on both storage clusters. - A pool contains journal-enabled images you want to mirror. STEPS """"" * Get shell access for primary and secondary ceph clusters .. code-block:: console site-a # sudo cephadm shell --fsid -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring site-b # sudo cephadm shell --fsid -c /etc/ceph2/ceph.conf -k /etc/ceph2/ceph.client.admin.keyring * Enable RBD mirroring on both hosts .. code-block:: console site-a # ceph orch apply rbd-mirror --placement= site-b # ceph orch apply rbd-mirror --placement= * Enable image level mirroring .. code-block:: console site-a # rbd mirror pool enable volumes image site-b # rbd mirror pool enable volumes image * Bootstrap Peers *NOTE*: These commands needs to be executed outside the cephadm shell. .. code-block:: console site-a # sudo cephadm shell --fsid -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring -- rbd mirror pool peer bootstrap create --site-name | awk 'END{print}' > "$HOME/token_file" site-b # sudo cephadm shell --fsid -c /etc/ceph2/ceph.conf -k /etc/ceph2/ceph.client.admin.keyring -- rbd mirror pool peer bootstrap import --site-name - < "$HOME/token_file" Verification """""""""""" Verify that **Mode: image** and **Direction: rx-tx** are set in the below output. .. code-block:: console site-a # rbd mirror pool info volumes Mode: image Site Name: 55b6325e-e6b3-4b7c-91fd-64b5720c1685 Peer Sites: UUID: 544777e2-4418-4dba-8f10-03238f63990d Name: 69cc3310-8dd4-4656-a75b-64d4890b0ca6 Mirror UUID: Direction: rx-tx Client: client.rbd-mirror-peer .. code-block:: console site-b # rbd mirror pool info volumes Mode: image Site Name: 69cc3310-8dd4-4656-a75b-64d4890b0ca6 Peer Sites: UUID: a102dd15-cc37-4df6-acf1-266ec0248a37 Name: 55b6325e-e6b3-4b7c-91fd-64b5720c1685 Mirror UUID: Direction: rx-tx Client: client.rbd-mirror-peer CINDER ^^^^^^ STEPS """"" * Set the ``replication_device`` values in ``cinder.conf`` file. .. code-block:: console replication_device = backend_id:,conf:,user:,secret_uuid: * Create a replicated volume type. Note that we've used the ``volume_backend_name=ceph`` here which can be different for your deployment. .. code-block:: console openstack volume type create --property replication_enabled=' True' --property volume_backend_name='ceph' ceph Verification """""""""""" - Create a volume with replicated volume type .. code-block:: console openstack volume create --type ceph --size 1 replicated-volume - Confirm on RBD side that a replica is created On site-a, you will see **mirroring primary: true** .. code-block:: console site-a # rbd info volumes/volume-d217e292-0a98-4572-ae68-a4c40b73a278 rbd image 'volume-d217e292-0a98-4572-ae68-a4c40b73a278': size 1 GiB in 256 objects order 22 (4 MiB objects) snapshot_count: 0 id: a9ebeef62570 block_name_prefix: rbd_data.a9ebeef62570 format: 2 features: layering, exclusive-lock, object-map, fast-diff, deep-flatten, journaling op_features: flags: create_timestamp: Thu May 15 14:15:04 2025 access_timestamp: Thu May 15 14:15:04 2025 modify_timestamp: Thu May 15 14:15:04 2025 journal: a9ebeef62570 mirroring state: enabled mirroring mode: journal mirroring global id: e8f583ed-abab-489c-b9d5-ef68c0a1b56f mirroring primary: true On site-b, you will see **mirroring primary: false** .. code-block:: console rbd ls volumes volume-d217e292-0a98-4572-ae68-a4c40b73a278 rbd info volumes/volume-d217e292-0a98-4572-ae68-a4c40b73a278 rbd image 'volume-d217e292-0a98-4572-ae68-a4c40b73a278': size 1 GiB in 256 objects order 22 (4 MiB objects) snapshot_count: 0 id: 6a993924cde block_name_prefix: rbd_data.6a993924cde format: 2 features: layering, exclusive-lock, object-map, fast-diff, deep-flatten, journaling op_features: flags: create_timestamp: Thu May 15 14:15:06 2025 access_timestamp: Thu May 15 14:15:06 2025 modify_timestamp: Thu May 15 14:15:06 2025 journal: 6a993924cde mirroring state: enabled mirroring mode: journal mirroring global id: e8f583ed-abab-489c-b9d5-ef68c0a1b56f mirroring primary: false Failover of a Boot From Volume (BFV) Server ------------------------------------------- * Create a bootable replicated volume .. code-block:: console openstack volume create --type ceph --image --size 1 test-bootable-replicated * Launch a server from the volume .. code-block:: console openstack server create --flavor c1 --nic=none --volume test-bfv-server * Create a file to write data to the VM disk .. code-block:: console $ cat > failover-dr < # Before failover > this should be consistent before/after failover > EOF * Failover the replicated cinder backend .. code-block:: console cinder failover-host @ * Shelve/unshelve the server. (This is required to remove the connection from the volume in primary backend and create a new connection to the volume replica in secondary backend) .. code-block:: console openstack server shelve openstack server unshelve Verification ^^^^^^^^^^^^ * Verify that the connection is now made from secondary cluster .. code-block:: console # In cinder-volume logs, we can see the ``hosts``, ``cluster_name`` and ``auth_username`` fields will point to secondary cluster Connection info returned from driver {'name': 'volumes/volume-e310359c-6587-4454-9a9c-a590b50dd4a5', 'hosts': ['127.0.0.1'], 'ports': ['6789'], 'cluster_name': 'ceph2', 'auth_enabled': True, 'auth_username': 'cinder2', 'secret_type': '***', 'secret_uuid': '***', 'volume_id': 'e310359c-6587-4454-9a9c-a590b50dd4a5', 'discard': True, 'qos_specs': None, 'access_mode': 'rw', 'encrypted': False, 'cacheable': False, 'driver_volume_type': 'rbd', 'attachment_id': 'b691cd50-83a1-4484-8081-7120a5cad054', 'enforce_multipath': True} * Confirm that the data written before failover is persistent. .. code-block:: console $ cat failover-dr # Before failover this should be consistent before/after failover Failback of a Boot From Volume (BFV) Server ------------------------------------------- * Create a file and write data to the VM disk. (Note that the volume backend is in failover mode and we are writing to the replica disk in secondary backend.) .. code-block:: console $ cat > failover-dr < # Before Failback > this should be consistent before/after failback > EOF * Failback to primary backend .. code-block:: console cinder failover-host @ --backend_id default * Shelve/Unshelve the server (This is required to remove the connection from the replica volume in secondary backend and create a new connection to the original volume in primary backend) .. code-block:: console openstack server shelve openstack server unshelve Verification ^^^^^^^^^^^^ * Verify that the connection is now made from primary cluster .. code-block:: console # In cinder-volume logs, we can see the ``hosts``, ``cluster_name`` and ``auth_username`` fields will point to primary cluster Connection info returned from driver {'name': 'volumes/volume-e310359c-6587-4454-9a9c-a590b50dd4a5', 'hosts': ['10.0.79.218'], 'ports': ['6789'], 'cluster_name': 'ceph', 'auth_enabled': True, 'auth_username': 'cinder', 'secret_type': '***', 'secret_uuid': '***', 'volume_id': 'e310359c-6587-4454-9a9c-a590b50dd4a5', 'discard': True, 'qos_specs': None, 'access_mode': 'rw', 'encrypted': False, 'cacheable': False, 'driver_volume_type': 'rbd', 'attachment_id': '2c8bb96b-5d5c-444c-aba5-13272b673b34', 'enforce_multipath': True} * Confirm that the data written before failback is persistent. .. code-block:: console $ cat failback-dr # Before Failback this should be consistent before/after failback Failover of a External Data Volume ---------------------------------- * Create a test server .. code-block:: console openstack server create --flavor c1 --nic=none --image test-server * Create and attach data volume to it .. code-block:: console openstack volume create --type ceph --size 1 replicated-vol openstack server add volume * Write data to the volume. Note that creating a filesystem and mounting the device are implied here. .. code-block:: console $ cat > failover-dr < # Before failover > this should be consistent before/after failover > EOF * Detach and attach the external data volume .. code-block:: console openstack server remove volume openstack server add volume Verification ^^^^^^^^^^^^ * Verify that the connection is now made from secondary cluster .. code-block:: console # In cinder-volume logs, we can see the ``hosts``, ``cluster_name`` and ``auth_username`` fields will point to secondary cluster Connection info returned from driver {'name': 'volumes/volume-437573fd-08e2-42c9-b658-2f982bc0cdd2', 'hosts': ['127.0.0.1'], 'ports': ['6789'], 'cluster_name': 'ceph2', 'auth_enabled': True, 'auth_username': 'cinder2', 'secret_type': '***', 'secret_uuid': '***', 'volume_id': '437573fd-08e2-42c9-b658-2f982bc0cdd2', 'discard': True, 'qos_specs': None, 'access_mode': 'rw', 'encrypted': False, 'cacheable': False, 'driver_volume_type': 'rbd', 'attachment_id': '595bd265-4212-4d9a-8d48-ba6fb59d19fe', 'enforce_multipath': True} * Verify that the data exists after failover. NOTE that in some cases, the data might/might not be persistent depending on the type of replication i.e. async or sync. .. code-block:: console $ cat failover-dr # Before failover this should be consistent before/after failover Failback of a External Data Volume ---------------------------------- * Create a file and write data to the external data volume. (Note that the volume backend is in failover mode and we are writing to the replica disk in secondary backend.) .. code-block:: console $ cat > failback-dr < # Before Failback > this should be consistent before/after failback > EOF * Failback to primary backend .. code-block:: console cinder failover-host @ --backend_id default * Detach and attach the external data volume .. code-block:: console openstack server remove volume openstack server add volume Verification ^^^^^^^^^^^^ * Verify that the connection is now made from primary cluster .. code-block:: console # In cinder-volume logs, we can see the ``hosts``, ``cluster_name`` and ``auth_username`` fields will point to primary cluster Connection info returned from driver {'name': 'volumes/volume-437573fd-08e2-42c9-b658-2f982bc0cdd2', 'hosts': ['10.0.79.218'], 'ports': ['6789'], 'cluster_name': 'ceph', 'auth_enabled': True, 'auth_username': 'cinder', 'secret_type': '***', 'secret_uuid': '***', 'volume_id': '437573fd-08e2-42c9-b658-2f982bc0cdd2', 'discard': True, 'qos_specs': None, 'access_mode': 'rw', 'encrypted': False, 'cacheable': False, 'driver_volume_type': 'rbd', 'attachment_id': 'b4e0c0a6-50b6-4ff3-83a5-a3da7be0e18c', 'enforce_multipath': True} * Confirm that the data written before failback is persistent. NOTE that in some cases, the data might/might not be persistent depending on the type of replication i.e. async or sync. .. code-block:: console $ cat failback-dr # Before Failback this should be consistent before/after failback ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/security.rst0000664000175000017500000000604300000000000020630 0ustar00zuulzuul00000000000000======== Security ======== Network traffic ~~~~~~~~~~~~~~~ Depending on your deployment's security requirements, you might be required to encrypt network traffic. This can be accomplished with TLS. There are multiple deployment options, with the most common and recommended ones being: - Only encrypt traffic between clients and public endpoints. This approach results in fewer certificates to manage, and we refer to it as public TLS. Public endpoints, in this sense, are endpoints only exposed to end-users. Traffic between internal endpoints is not encrypted. - Leverages TLS for all endpoints in the entire deployment, including internal endpoints of the OpenStack services and with auxiliary services such as the database and the message broker. You can look at `TripleO's documentation on TLS`_ for examples on how to do this. Cinder drivers should support secure TLS/SSL communication between the cinder volume service and the backend, as configured by the ``driver_ssl_cert_verify`` and ``driver_ssl_cert_path`` options in ``cinder.conf``. If unsure whether your driver supports TLS/SSL, please check the driver's specific page in the :ref:`volume-drivers` page or contact the vendor. Data at rest ~~~~~~~~~~~~ Volumes' data can be secured at rest using Cinder's volume encryption feature. For encryption keys Cinder uses a Key management service, with Barbican being the recommended service. More information on encryption can be found on the :ref:`volume-encryption` section. Data leakage ~~~~~~~~~~~~ Some users and admins worry about data leakage between OpenStack projects or users caused by a new volume containing partial or full data from a previously deleted volume. These concerns are sometimes instigated by the ``volume_clear`` and ``volume_clear_size`` configuration options, but these options are only relevant to the LVM driver, and only when using thick volumes (which are not the default, thin volumes are). Writing data on a Cinder volume as a generic mechanism to prevent data leakage is not implemented for other drivers because it does not ensure that the data will be actually erased on the physical disks, as the storage solution could be doing copy-on-write or other optimizations. Thin provisioned volumes return zeros for unallocated blocks, so we don't have to worry about data leakage. As for thick volumes, each of the individual Cinder drivers must ensure that data from a deleted volume can never leak to a newly created volume. This prevents other OpenStack projects and users from being able to get data from deleted volumes, but since the data may still be present on the physical disks, somebody with physical access to the disks may still be able to retrieve that data. For those concerned with this, we recommend using encrypted volumes or read your storage solution's documentation or contact your vendor to see if they have some kind of clear policy option available on their storage solution. .. _TripleO's documentation on TLS: https://docs.openstack.org/project-deploy-guide/tripleo-docs/latest/features/tls-introduction.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/troubleshoot.rst0000664000175000017500000000106500000000000021511 0ustar00zuulzuul00000000000000============================== Troubleshoot your installation ============================== This section provides useful tips to help you troubleshoot your Block Storage installation. .. toctree:: :maxdepth: 1 ts-cinder-config.rst ts-multipath-warn.rst ts-HTTP-bad-req-in-cinder-vol-log.rst ts-duplicate-3par-host.rst ts-failed-attach-vol-after-detach.rst ts-failed-attach-vol-no-sysfsutils.rst ts-failed-connect-vol-FC-SAN.rst ts-no-emulator-x86-64.rst ts-non-existent-host.rst ts-non-existent-vlun.rst ts-db-cpu-spikes.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-HTTP-bad-req-in-cinder-vol-log.rst0000664000175000017500000000530600000000000024721 0ustar00zuulzuul00000000000000===================================== HTTP bad request in cinder volume log ===================================== Problem ~~~~~~~ These errors appear in the ``cinder-volume.log`` file: .. code-block:: console 2013-05-03 15:16:33 INFO [cinder.volume.manager] Updating volume status 2013-05-03 15:16:33 DEBUG [hp3parclient.http] REQ: curl -i https://10.10.22.241:8080/api/v1/cpgs -X GET -H "X-Hp3Par-Wsapi-Sessionkey: 48dc-b69ed2e5 f259c58e26df9a4c85df110c-8d1e8451" -H "Accept: application/json" -H "User-Agent: python-3parclient" 2013-05-03 15:16:33 DEBUG [hp3parclient.http] RESP:{'content-length': 311, 'content-type': 'text/plain', 'status': '400'} 2013-05-03 15:16:33 DEBUG [hp3parclient.http] RESP BODY:Second simultaneous read on fileno 13 detected. Unless you really know what you're doing, make sure that only one greenthread can read any particular socket. Consider using a pools.Pool. If you do know what you're doing and want to disable this error, call eventlet.debug.hub_multiple_reader_prevention(False) 2013-05-03 15:16:33 ERROR [cinder.manager] Error during VolumeManager._report_driver_status: Bad request (HTTP 400) Traceback (most recent call last): File "/usr/lib/python2.7/dist-packages/cinder/manager.py", line 167, in periodic_tasks task(self, context) File "/usr/lib/python2.7/dist-packages/cinder/volume/manager.py", line 690, in _report_driver_status volume_stats = self.driver.get_volume_stats(refresh=True) File "/usr/lib/python2.7/dist-packages/cinder/volume/drivers/san/hp/hp_3par_fc.py", line 77, in get_volume_stats stats = self.common.get_volume_stats(refresh, self.client) File "/usr/lib/python2.7/dist-packages/cinder/volume/drivers/san/hp/hp_3par_common.py", line 421, in get_volume_stats cpg = client.getCPG(self.config.hp3par_cpg) File "/usr/lib/python2.7/dist-packages/hp3parclient/client.py", line 231, in getCPG cpgs = self.getCPGs() File "/usr/lib/python2.7/dist-packages/hp3parclient/client.py", line 217, in getCPGs response, body = self.http.get('/cpgs') File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 255, in get return self._cs_request(url, 'GET', **kwargs) File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 224, in _cs_request **kwargs) File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 198, in _time_request resp, body = self.request(url, method, **kwargs) File "/usr/lib/python2.7/dist-packages/hp3parclient/http.py", line 192, in request raise exceptions.from_response(resp, body) HTTPBadRequest: Bad request (HTTP 400) Solution ~~~~~~~~ You need to update your copy of the ``hp_3par_fc.py`` driver which contains the synchronization code. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-cinder-config.rst0000664000175000017500000001431600000000000022116 0ustar00zuulzuul00000000000000============================================ Troubleshoot the Block Storage configuration ============================================ Most Block Storage errors are caused by incorrect volume configurations that result in volume creation failures. To resolve these failures, review these logs: - ``cinder-api`` log (``/var/log/cinder/api.log``) - ``cinder-volume`` log (``/var/log/cinder/volume.log``) The ``cinder-api`` log is useful for determining if you have endpoint or connectivity issues. If you send a request to create a volume and it fails, review the ``cinder-api`` log to determine whether the request made it to the Block Storage service. If the request is logged and you see no errors or tracebacks, check the ``cinder-volume`` log for errors or tracebacks. .. note:: Create commands are listed in the ``cinder-api`` log. These entries in the ``cinder.conf`` file can be used to assist in troubleshooting your Block Storage configuration. .. code-block:: console # Print debugging output (set logging level to DEBUG instead # of default WARNING level). (boolean value) # debug=false # Log output to standard error (boolean value) # use_stderr=true # Default file mode used when creating log files (string # value) # logfile_mode=0644 # format string to use for log messages with context (string # value) # logging_context_format_string=%(asctime)s.%(msecs)03d %(levelname)s # %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s%(message)s # format string to use for log mes #logging_default_format_string=%(asctime)s. # %(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s # data to append to log format when level is DEBUG (string # value) # logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d # prefix each line of exception output with this format # (string value) # logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s # %(instance)s # list of logger=LEVEL pairs (list value) # default_log_levels=amqplib=WARN,sqlalchemy=WARN,boto=WARN,suds=INFO, # keystone=INFO,eventlet.wsgi.server=WARNsages without context # (string value) # If an instance is passed with the log message, format it # like this (string value) # instance_format="[instance: %(uuid)s]" # If an instance UUID is passed with the log message, format # it like this (string value) #instance_uuid_format="[instance: %(uuid)s] " # Format string for %%(asctime)s in log records. Default: # %(default)s (string value) # log_date_format=%Y-%m-%d %H:%M:%S # (Optional) Name of log file to output to. If not set, # logging will go to stdout. (string value) # log_file= # (Optional) The directory to keep log files in (will be # prepended to --log-file) (string value) # log_dir= # instance_uuid_format="[instance: %(uuid)s]" # If this option is specified, the logging configuration file # specified is used and overrides any other logging options # specified. Please see the Python logging module # documentation for details on logging configuration files. # (string value) # Use syslog for logging. (boolean value) # use_syslog=false # syslog facility to receive log lines (string value) # syslog_log_facility=LOG_USER # log_config= These common issues might occur during configuration, and the following potential solutions describe how to address the issues. Issues with ``state_path`` and ``volumes_dir`` settings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem ------- The OpenStack Block Storage uses ``tgtd`` as the default iSCSI helper and implements persistent targets. This means that in the case of a ``tgt`` restart, or even a node reboot, your existing volumes on that node will be restored automatically with their original :term:`IQN `. By default, Block Storage uses a ``state_path`` variable, which if installing with Yum or APT should be set to ``/var/lib/cinder/``. The next part is the ``volumes_dir`` variable, by default this appends a ``volumes`` directory to the ``state_path``. The result is a file-tree: ``/var/lib/cinder/volumes/``. Solution -------- In order to ensure nodes are restored to their original IQN, the iSCSI target information needs to be stored in a file on creation that can be queried in case of restart of the ``tgt daemon``. While the installer should handle all this, it can go wrong. If you have trouble creating volumes and this directory does not exist you should see an error message in the ``cinder-volume`` log indicating that the ``volumes_dir`` does not exist, and it should provide information about which path it was looking for. The persistent tgt include file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem ------- The Block Storage service may have issues locating the persistent ``tgt include`` file. Along with the ``volumes_dir`` option, the iSCSI target driver also needs to be configured to look in the correct place for the persistent ``tgt include `` file. This is an entry in the ``/etc/tgt/conf.d`` file that should have been set during the OpenStack installation. Solution -------- If issues occur, verify that you have a ``/etc/tgt/conf.d/cinder.conf`` file. If the file is not present, create it with: .. code-block:: console # echo 'include /var/lib/cinder/volumes/ *' >> /etc/tgt/conf.d/cinder.conf Failed to create iscsi target error in the ``cinder-volume.log`` file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem ------- .. code-block:: console 2013-03-12 01:35:43 1248 TRACE cinder.openstack.common.rpc.amqp \ ISCSITargetCreateFailed: \ Failed to create iscsi target for volume \ volume-137641b2-af72-4a2f-b243-65fdccd38780. You might see this error in ``cinder-volume.log`` after trying to create a volume that is 1 GB. Solution -------- To fix this issue, change the content of the ``/etc/tgt/targets.conf`` file from ``include /etc/tgt/conf.d/*.conf`` to ``include /etc/tgt/conf.d/cinder_tgt.conf``, as follows: .. code-block:: shell include /etc/tgt/conf.d/cinder_tgt.conf include /etc/tgt/conf.d/cinder.conf default-driver iscsi Restart ``tgt`` and ``cinder-*`` services, so they pick up the new configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-db-cpu-spikes.rst0000664000175000017500000000343200000000000022052 0ustar00zuulzuul00000000000000===================================== Database CPU spikes during operations ===================================== Query load upon the database can become a bottleneck that cascades across a deployment and ultimately degrades not only the Cinder service but also the whole OpenStack deployment. Often, depending on load, query patterns, periodic tasks, and so on and so forth, additional indexes may be needed to help provide hints to the database so it can most efficently attempt to reduce the number of rows which need to be examined in order to return a result set. Adding indexes -------------- In older releases, before 2023.1 (Antelope), there were some tables that performed poorly in the presence of a large number of deleted resources (volumes, snapshots, backups, etc) which resulted in high CPU loads on the DB servers not only when listing those resources, but also when doing some operations on them. This was resolved by adding appropriate indexes to them. This example below is specific to MariaDB/MySQL, but the syntax should be easy to modify for operators using PostgreSQL, and it represents the changes that older releases could add to resolve these DB server CPU spikes in such a way that they would not conflict with the ones that Cinder introduced in 2023.1 (Antelope). .. code-block:: sql use cinder; create index groups_deleted_project_id_idx on groups (deleted, project_id); create index group_snapshots_deleted_project_id_idx on groups (deleted, project_id); create index volumes_deleted_project_id_idx on volumes (deleted, project_id); create index volumes_deleted_host_idx on volumes (deleted, host); create index snapshots_deleted_project_id_idx on snapshots (deleted, project_id); create index backups_deleted_project_id_idx on backups (deleted, project_id); ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-duplicate-3par-host.rst0000664000175000017500000000164600000000000023201 0ustar00zuulzuul00000000000000=================== Duplicate 3PAR host =================== Problem ~~~~~~~ This error may be caused by a volume being exported outside of OpenStack using a host name different from the system name that OpenStack expects. This error could be displayed with the :term:`IQN ` if the host was exported using iSCSI: .. code-block:: console Duplicate3PARHost: 3PAR Host already exists: Host wwn 50014380242B9750 \ already used by host cld4b5ubuntuW(id = 68. The hostname must be called\ 'cld4b5ubuntu'. Solution ~~~~~~~~ Change the 3PAR host name to match the one that OpenStack expects. The 3PAR host constructed by the driver uses just the local host name, not the fully qualified domain name (FQDN) of the compute host. For example, if the FQDN was *myhost.example.com*, just *myhost* would be used as the 3PAR host name. IP addresses are not allowed as host names on the 3PAR storage server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-failed-attach-vol-after-detach.rst0000664000175000017500000000311100000000000025207 0ustar00zuulzuul00000000000000======================================= Failed to attach volume after detaching ======================================= Problem ~~~~~~~ Failed to attach a volume after detaching the same volume. Solution ~~~~~~~~ You must change the device name on the :command:`nova-attach` command. The VM might not clean up after a :command:`nova-detach` command runs. This example shows how the :command:`nova-attach` command fails when you use the ``vdb``, ``vdc``, or ``vdd`` device names: .. code-block:: console # ls -al /dev/disk/by-path/ total 0 drwxr-xr-x 2 root root 200 2012-08-29 17:33 . drwxr-xr-x 5 root root 100 2012-08-29 17:33 .. lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0 -> ../../vda lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part1 -> ../../vda1 lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part2 -> ../../vda2 lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:04.0-virtio-pci-virtio0-part5 -> ../../vda5 lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:06.0-virtio-pci-virtio2 -> ../../vdb lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:08.0-virtio-pci-virtio3 -> ../../vdc lrwxrwxrwx 1 root root 9 2012-08-29 17:33 pci-0000:00:09.0-virtio-pci-virtio4 -> ../../vdd lrwxrwxrwx 1 root root 10 2012-08-29 17:33 pci-0000:00:09.0-virtio-pci-virtio4-part1 -> ../../vdd1 You might also have this problem after attaching and detaching the same volume from the same VM with the same mount point multiple times. In this case, restart the KVM host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-failed-attach-vol-no-sysfsutils.rst0000664000175000017500000000171500000000000025532 0ustar00zuulzuul00000000000000================================================= Failed to attach volume, systool is not installed ================================================= Problem ~~~~~~~ This warning and error occurs if you do not have the required ``sysfsutils`` package installed on the compute node: .. code-block:: console WARNING nova.virt.libvirt.utils [req-1200f887-c82b-4e7c-a891-fac2e3735dbb\ admin admin|req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin] systool\ is not installed ERROR nova.compute.manager [req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin\ admin|req-1200f887-c82b-4e7c-a891-fac2e3735dbb admin admin] [instance: df834b5a-8c3f-477a-be9b-47c97626555c|instance: df834b5a-8c3f-47\ 7a-be9b-47c97626555c] Failed to attach volume 13d5c633-903a-4764-a5a0-3336945b1db1 at /dev/vdk. Solution ~~~~~~~~ Run the following command on the compute node to install the ``sysfsutils`` packages: .. code-block:: console # apt-get install sysfsutils ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-failed-connect-vol-FC-SAN.rst0000664000175000017500000000224400000000000023762 0ustar00zuulzuul00000000000000================================== Failed to connect volume in FC SAN ================================== Problem ~~~~~~~ The compute node failed to connect to a volume in a Fibre Channel (FC) SAN configuration. The WWN may not be zoned correctly in your FC SAN that links the compute host to the storage array: .. code-block:: console ERROR nova.compute.manager [req-2ddd5297-e405-44ab-aed3-152cd2cfb8c2 admin\ demo|req-2ddd5297-e405-44ab-aed3-152cd2cfb8c2 admin demo] [instance: 60ebd\ 6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3|instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3\ d5f3] Failed to connect to volume 6f6a6a9c-dfcf-4c8d-b1a8-4445ff883200 while\ attaching at /dev/vdjTRACE nova.compute.manager [instance: 60ebd6c7-c1e3-4\ bf0-8ef0-f07aa4c3d5f3|instance: 60ebd6c7-c1e3-4bf0-8ef0-f07aa4c3d5f3] Traceback (most recent call last):…f07aa4c3d5f3\] ClientException: The\ server has either erred or is incapable of performing the requested\ operation.(HTTP 500)(Request-ID: req-71e5132b-21aa-46ee-b3cc-19b5b4ab2f00) Solution ~~~~~~~~ The network administrator must configure the FC SAN fabric by correctly zoning the WWN (port names) from your compute node HBAs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-multipath-warn.rst0000664000175000017500000000162100000000000022356 0ustar00zuulzuul00000000000000========================== Multipath call failed exit ========================== Problem ~~~~~~~ Multipath call failed exit. This warning occurs in the Compute log if you do not have the optional ``multipath-tools`` package installed on the compute node. This is an optional package and the volume attachment does work without the multipath tools installed. If the ``multipath-tools`` package is installed on the compute node, it is used to perform the volume attachment. The IDs in your message are unique to your system. .. code-block:: console WARNING nova.storage.linuxscsi [req-cac861e3-8b29-4143-8f1b-705d0084e571 admin admin|req-cac861e3-8b29-4143-8f1b-705d0084e571 admin admin] Multipath call failed exit (96) Solution ~~~~~~~~ Run the following command on the compute node to install the ``multipath-tools`` packages. .. code-block:: console # apt-get install multipath-tools ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-no-emulator-x86-64.rst0000664000175000017500000000113300000000000022514 0ustar00zuulzuul00000000000000========================================= Cannot find suitable emulator for x86_64 ========================================= Problem ~~~~~~~ When you attempt to create a VM, the error shows the VM is in the ``BUILD`` then ``ERROR`` state. Solution ~~~~~~~~ On the KVM host, run :command:`cat /proc/cpuinfo`. Make sure the ``vmx`` or ``svm`` flags are set. Follow the instructions in the `Enable KVM `__ section in the OpenStack Configuration Reference to enable hardware virtualization support in your BIOS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-non-existent-host.rst0000664000175000017500000000154400000000000023014 0ustar00zuulzuul00000000000000================= Non-existent host ================= Problem ~~~~~~~ This error could be caused by a volume being exported outside of OpenStack using a host name different from the system name that OpenStack expects. This error could be displayed with the :term:`IQN ` if the host was exported using iSCSI. .. code-block:: console 2013-04-19 04:02:02.336 2814 ERROR cinder.openstack.common.rpc.common [-] Returning exception Not found (HTTP 404) NON_EXISTENT_HOST - HOST '10' was not found to caller. Solution ~~~~~~~~ Host names constructed by the driver use just the local host name, not the fully qualified domain name (FQDN) of the Compute host. For example, if the FQDN was **myhost.example.com**, just **myhost** would be used as the 3PAR host name. IP addresses are not allowed as host names on the 3PAR storage server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/ts-non-existent-vlun.rst0000664000175000017500000000112500000000000023016 0ustar00zuulzuul00000000000000================= Non-existent VLUN ================= Problem ~~~~~~~ This error occurs if the 3PAR host exists with the correct host name that the OpenStack Block Storage drivers expect but the volume was created in a different domain. .. code-block:: console HTTPNotFound: Not found (HTTP 404) NON_EXISTENT_VLUN - VLUN 'osv-DqT7CE3mSrWi4gZJmHAP-Q' was not found. Solution ~~~~~~~~ The ``hpe3par_domain`` configuration items either need to be updated to use the domain the 3PAR host currently resides in, or the 3PAR host needs to be moved to the domain that the volume was created in. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/upgrades.rst0000664000175000017500000003133400000000000020574 0ustar00zuulzuul00000000000000======== Upgrades ======== Cinder aims to provide upgrades with minimal downtime. This should be achieved for both data and control plane. As Cinder doesn't interfere with data plane, its upgrade shouldn't affect any volumes being accessed by virtual machines. Keeping the control plane running during an upgrade is more difficult. This document's goal is to provide preliminaries and a detailed procedure of such upgrade. Concepts -------- Here are the key concepts you need to know before reading the section on the upgrade process: RPC version pinning ~~~~~~~~~~~~~~~~~~~ Through careful RPC versioning, newer services are able to talk to older services (and vice-versa). The versions are autodetected using information reported in ``services`` table. In case of receiving ``CappedVersionUnknown`` or ``ServiceTooOld`` exceptions on service start, you're probably having some old orphaned records in that table. Graceful service shutdown ~~~~~~~~~~~~~~~~~~~~~~~~~ Many cinder services are python processes listening for messages on an AMQP queue. When the operator sends SIGTERM signal to the process, it stops getting new work from its queue, completes any outstanding work and then terminates. During this process, messages can be left on the queue for when the python process starts back up. This gives us a way to shutdown a service using older code, and start up a service using newer code with minimal impact. .. note:: Waiting for completion of long-running operations (e.g. slow volume copy operation) may take a while. .. note:: This was tested with RabbitMQ messaging backend and may vary with other backends. Database upgrades ~~~~~~~~~~~~~~~~~ Cinder has two types of database upgrades in use: - Schema migrations - Data migrations Schema migrations are defined in ``cinder/db/migrations/versions``. They are the routines that transform our database structure, which should be additive and able to be applied to a running system before service code has been upgraded. Data migrations are banned from schema migration scripts and are instead defined in ``cinder/db/api.py``. They are kept separate to make DB schema migrations less painful to execute. Instead, the migrations are executed by a background process in a manner that doesn't interrupt running services (you can also execute online data migrations with services turned off if you're doing a cold upgrade). The ``cinder-manage db online_data_migrations`` utility can be used for this purpose. Before upgrading N to N+1, you need to run this tool in the background until it tells you no more migrations are needed. Note that you won't be able to apply N+1's schema migrations before completing N's online data migrations. For information on developing your own schema or data migrations as part of a feature or bugfix, refer to :doc:`/contributor/database-migrations`. .. note:: Occasionally we receive reports of database issues during upgrades due to an old version of database software being used (see, for example, `Bug #1968746 `_). Upgrades are tested in the gate using the mysql or mariadb version packaged with any of the Linux distributions supported for that release. Thus, if you are using an earlier version of mysql or mariadb, you may want to do additional research before upgrading to make sure you won't run into an issue caused by us using a newer feature than your database version supports. To assist you in this, the OpenStack Technical Committee maintains a list of the Linux distributions supported for each release on the `Release based Runtimes `_ page. API load balancer draining ~~~~~~~~~~~~~~~~~~~~~~~~~~ When upgrading API nodes, you can make your load balancer only send new connections to the newer API nodes, allowing for a seamless update of your API nodes. DB prune deleted rows ~~~~~~~~~~~~~~~~~~~~~ Currently resources are soft deleted in the database, so users are able to track instances in the DB that are created and destroyed in production. However, most people have a data retention policy, of say 30 days or 90 days after which they will want to delete those entries. Not deleting those entries affects DB performance as indices grow very large and data migrations take longer as there is more data to migrate. To make pruning easier there's a ``cinder-manage db purge `` command that permanently deletes records older than specified age. Versioned object backports ~~~~~~~~~~~~~~~~~~~~~~~~~~ RPC pinning ensures new services can talk to the older service's method signatures. But many of the parameters are objects that may well be too new for the old service to understand. Cinder makes sure to backport an object to a version that it is pinned to before sending. Minimal Downtime Upgrade Procedure ---------------------------------- Plan your upgrade ~~~~~~~~~~~~~~~~~ * Read and ensure you understand the release notes for the next release. * Make a backup of your database. Cinder does not support downgrading of the database. Hence, in case of upgrade failure, restoring database from backup is the only choice. * To avoid dependency hell it is advised to have your Cinder services deployed separately in containers or Python venvs. .. note:: Cinder is basing version detection on what is reported in the ``services`` table in the DB. Before upgrade make sure you don't have any orphaned old records there, because these can block starting newer services. You can clean them up using ``cinder-manage service remove `` command. Note that there's an assumption that live upgrade can be performed only between subsequent releases. This means that you cannot upgrade N directly to N+2, you need to upgrade to N+1 first. The assumed service upgrade order is ``cinder-scheduler``, ``cinder-volume``, ``cinder-backup`` and finally ``cinder-api``. Rolling upgrade process ~~~~~~~~~~~~~~~~~~~~~~~ To reduce downtime, the services can be upgraded in a rolling fashion. It means upgrading a few services at a time. To minimise downtime you need to have HA Cinder deployment, so at the moment a service is upgraded, you'll keep other service instances running. .. rubric:: Before maintenance window * First you should execute required DB schema migrations. To achieve that without interrupting your existing installation, install new Cinder code in new venv or a container and run the DB sync (``cinder-manage db sync``). These schema change operations should have minimal or no effect on performance, and should not cause any operations to fail. * At this point, new columns and tables may exist in the database. These DB schema changes are done in a way that both the N and N+1 release can perform operations against the same schema. .. rubric:: During maintenance window 1. The first service is cinder-scheduler. It is load-balanced by the message queue, so the only thing you need to worry about is to shut it down gracefully (using ``SIGTERM`` signal) to make sure it will finish all the requests being processed before shutting down. Then you should upgrade the code and restart the service. 2. Repeat first step for all of your cinder-scheduler services. 3. Then you proceed to upgrade cinder-volume services. The problem here is that due to Active/Passive character of this service, you're unable to run multiple instances of cinder-volume managing a single volume backend. This means that there will be a moment when you won't have any cinder-volume in your deployment and you want that disruption to be as short as possible. .. note:: The downtime here is non-disruptive as long as it doesn't exceed the service heartbeat timeout. If you don't exceed that, then cinder-schedulers will not notice that cinder-volume is gone and the message queue will take care of queuing any RPC messages until cinder-volume is back. To make sure it's achieved, you can either lengthen the timeout by tweaking ``service_down_time`` value in ``cinder.conf``, or prepare upgraded cinder-volume on another node and do a very quick switch by shutting down older service and starting the new one just after that. Also note that in case of A/P HA configuration you need to make sure both primary and secondary c-vol have the same hostname set (you can override it using ``host`` option in ``cinder.conf``), so both will be listening on the same message queue and will accept the same messages. 4. Repeat third step for all cinder-volume services. 5. Now we should proceed with (optional) cinder-backup services. You should upgrade them in the same manner like cinder-scheduler. .. note:: Backup operations are time consuming, so shutting down a c-bak service without interrupting ongoing requests can take time. It may be useful to disable the service first using ``cinder service-disable`` command, so it won't accept new requests, and wait a reasonable amount of time until all the in-progress jobs are completed. Then you can proceed with the upgrade. To make sure the backup service finished all the ongoing requests, you can check the service logs. .. note:: Until Liberty cinder-backup was tightly coupled with cinder-volume service and needed to coexist on the same physical node. This is not true starting with Mitaka version. If you're still keeping that coupling, then your upgrade strategy for cinder-backup should be more similar to how cinder-volume is upgraded. 6. cinder-api services should go last. In HA deployment you're typically running them behind a load balancer (e.g. HAProxy), so you need to take one service instance out of the balancer, shut it down, upgrade the code and dependencies, and start the service again. Then you can plug it back into the load balancer. .. note:: You may want to start another instance of older c-api to handle the load while you're upgrading your original services. 7. Then you should repeat step 6 for all of the cinder-api services. .. rubric:: After maintenance window * Once all services are running the new code, double check in the DB that there are no old orphaned records in ``services`` table (Cinder doesn't remove the records when service is gone or service hostname is changed, so you need to take care of that manually; you should be able to distinguish dead records by looking at when the record was updated). Cinder is basing its RPC version detection on that, so stale records can prevent you from going forward. * Now all services are upgraded, we need to send the ``SIGHUP`` signal, so all the services clear any cached service version data. When a new service starts, it automatically detects which version of the service's RPC protocol to use, and will downgrade any communication to that version. Be advised that cinder-api service doesn't handle ``SIGHUP`` so it needs to be restarted. It's best to restart your cinder-api services as last ones, as that way you make sure API will fail fast when user requests new features on a deployment that's not fully upgraded (new features can fail when RPC messages are backported to lowest common denominator). Order of the rest of the services shouldn't matter. * Now all the services are upgraded, the system is able to use the latest version of the RPC protocol and able to access all the features of the new release. * At this point, you must also ensure you update the configuration, to stop using any deprecated features or options, and perform any required work to transition to alternative features. All the deprecated options should be supported for one cycle, but should be removed before your next upgrade is performed. * Since Ocata, you also need to run ``cinder-manage db online_data_migrations`` command to make sure data migrations are applied. The tool lets you limit the impact of the data migrations by using ``--max_count`` option to limit number of migrations executed in one run. If this option is used, the exit status will be 1 if any migrations were successful (even if others generated errors, which could be due to dependencies between migrations). The command should be rerun while the exit status is 1. If no further migrations are possible, the exit status will be 2 if some migrations are still generating errors, which requires intervention to resolve. The command should be considered completed successfully only when the exit status is 0. You need to complete all of the migrations before starting upgrade to the next version (e.g. you need to complete Ocata's data migrations before proceeding with upgrade to Pike; you won't be able to execute Pike's DB schema migrations before completing Ocata's data migrations). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/user-visible-extra-specs.rst0000664000175000017500000002016300000000000023625 0ustar00zuulzuul00000000000000.. _user_visible_extra_specs: ======================== User visible extra specs ======================== Starting in Xena, certain volume type ``extra specs`` (i.e. properties) are considered user visible, meaning their visibility is not restricted to only cloud administrators. This feature provides regular users with more information about the volume types available to them, and lets them make more informed decisions on which volume type to choose when creating volumes. The following ``extra spec`` keys are treated as user visible: - ``RESKEY:availability_zones`` - ``multiattach`` - ``replication_enabled`` .. note:: * The set of user visible ``extra specs`` is a fixed list that is not configurable. * The feature is entirely policy based, and does not require a new microversion. Behavior using openstack client ------------------------------- Consider the following volume type, as viewed from an administrator's perspective. In this example, ``multiattach`` is a user visible ``extra spec`` and ``volume_backend_name`` is not. .. code-block:: console # Administrator behavior [admin@host]$ openstack volume type show vol_type +--------------------+-------------------------------------------------------+ | Field | Value | +--------------------+-------------------------------------------------------+ | access_project_ids | None | | description | None | | id | d03a0f33-e695-4f5c-b712-7d92abbf72be | | is_public | True | | name | vol_type | | properties | multiattach=' True', volume_backend_name='secret' | | qos_specs_id | None | +--------------------+-------------------------------------------------------+ Here is the output when a regular user executes the same command. Notice only the user visible ``multiattach`` property is listed. .. code-block:: console # Regular user behavior [user@host]$ openstack volume type show vol_type +--------------------+--------------------------------------+ | Field | Value | +--------------------+--------------------------------------+ | access_project_ids | None | | description | None | | id | d03a0f33-e695-4f5c-b712-7d92abbf72be | | is_public | True | | name | vol_type | | properties | multiattach=' True' | +--------------------+--------------------------------------+ The behavior for listing volume types is similar. Administrators will see all ``extra specs`` but regular users will see only user visible ``extra specs``. .. code-block:: console # Administrator behavior [admin@host]$ openstack volume type list --long +--------------------------------------+-------------+-----------+---------------------+-------------------------------------------------------+ | ID | Name | Is Public | Description | Properties | +--------------------------------------+-------------+-----------+---------------------+-------------------------------------------------------+ | d03a0f33-e695-4f5c-b712-7d92abbf72be | vol_type | True | None | multiattach=' True', volume_backend_name='secret' | | 80f38273-f4b9-4862-a4e6-87692eb66a96 | __DEFAULT__ | True | Default Volume Type | | +--------------------------------------+-------------+-----------+---------------------+-------------------------------------------------------+ # Regular user behavior [user@host]$ openstack volume type list --long +--------------------------------------+-------------+-----------+---------------------+-------------------------+ | ID | Name | Is Public | Description | Properties | +--------------------------------------+-------------+-----------+---------------------+-------------------------+ | d03a0f33-e695-4f5c-b712-7d92abbf72be | vol_type | True | None | multiattach=' True' | | 80f38273-f4b9-4862-a4e6-87692eb66a96 | __DEFAULT__ | True | Default Volume Type | | +--------------------------------------+-------------+-----------+---------------------+-------------------------+ Regular users may view these properties, but they may not modify them. Attempts to modify a user visible property by a non-administrator will fail. .. code-block:: console [user@host]$ openstack volume type set --property multiattach=' False' vol_type Failed to set volume type property: Policy doesn't allow volume_extension:types_extra_specs:create to be performed. (HTTP 403) Filtering with extra specs -------------------------- API microversion 3.52 adds support for using ``extra specs`` to filter the list of volume types. Regular users are able to use that feature to filter for user visible ``extra specs``. If a regular user attempts to filter on a non-user visible ``extra spec`` then an empty list is returned. .. code-block:: console # Administrator behavior [admin@host]$ cinder --os-volume-api-version 3.52 type-list \ > --filters extra_specs={"multiattach":" True"} +--------------------------------------+----------+-------------+-----------+ | ID | Name | Description | Is_Public | +--------------------------------------+----------+-------------+-----------+ | d03a0f33-e695-4f5c-b712-7d92abbf72be | vol_type | - | True | +--------------------------------------+----------+-------------+-----------+ [admin@host]$ cinder --os-volume-api-version 3.52 type-list \ > --filters extra_specs={"volume_backend_name":"secret"} +--------------------------------------+----------+-------------+-----------+ | ID | Name | Description | Is_Public | +--------------------------------------+----------+-------------+-----------+ | d03a0f33-e695-4f5c-b712-7d92abbf72be | vol_type | - | True | +--------------------------------------+----------+-------------+-----------+ # Regular user behavior [user@host]$ cinder --os-volume-api-version 3.52 type-list \ > --filters extra_specs={"multiattach":" True"} +--------------------------------------+----------+-------------+-----------+ | ID | Name | Description | Is_Public | +--------------------------------------+----------+-------------+-----------+ | d03a0f33-e695-4f5c-b712-7d92abbf72be | vol_type | - | True | +--------------------------------------+----------+-------------+-----------+ [user@host]$ cinder --os-volume-api-version 3.52 type-list \ > --filters extra_specs={"volume_backend_name":"secret"} +----+------+-------------+-----------+ | ID | Name | Description | Is_Public | +----+------+-------------+-----------+ +----+------+-------------+-----------+ Security considerations ----------------------- Cloud administrators who do not wish to expose any ``extra specs`` to regular users may restore the previous behavior by setting the following policies to their pre-Xena default values. .. code-block:: console "volume_extension:access_types_extra_specs": "rule:admin_api" "volume_extension:types_extra_specs:index": "rule:admin_api" "volume_extension:types_extra_specs:show": "rule:admin_api" To restrict regular users from using ``extra specs`` to filter the list of volume types, modify /etc/cinder/resource_filters.json to restore the *"volume_type"* entry to its pre-Xena default value. .. code-block:: console "volume_type": ["is_public"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/volume-backed-image.rst0000664000175000017500000000560600000000000022563 0ustar00zuulzuul00000000000000.. _volume_backed_image: =================== Volume-backed image =================== OpenStack Block Storage can quickly create a volume from an image that refers to a volume storing image data (Image-Volume). Compared to the other stores such as file and swift, creating a volume from a Volume-backed image performs better when the block storage driver supports efficient volume cloning. If the image is set to public in the Image service, the volume data can be shared among projects. Configure the Volume-backed image ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Volume-backed image feature requires locations information from the cinder store of the Image service. To enable the Image service to use the cinder store, add ``cinder`` to the ``stores`` option in the ``glance_store`` section of the ``glance-api.conf`` file: .. code-block:: ini stores = file, http, swift, cinder To expose locations information, set the following options in the ``DEFAULT`` section of the ``glance-api.conf`` file: .. code-block:: ini show_multiple_locations = True To enable the Block Storage services to create a new volume by cloning Image- Volume, set the following options in the ``DEFAULT`` section of the ``cinder.conf`` file. For example: .. code-block:: ini allowed_direct_url_schemes = cinder To enable the :command:`openstack image create --volume ` command to create an image that refers an ``Image-Volume``, set the following options in each back-end section of the ``cinder.conf`` file: .. code-block:: ini image_upload_use_cinder_backend = True By default, the :command:`openstack image create --volume ` command creates the Image-Volume in the current project. To store the Image-Volume into the internal project, set the following options in each back-end section of the ``cinder.conf`` file: .. code-block:: ini image_upload_use_internal_tenant = True To make the Image-Volume in the internal project accessible from the Image service, set the following options in the ``glance_store`` section of the ``glance-api.conf`` file: - ``cinder_store_auth_address`` - ``cinder_store_user_name`` - ``cinder_store_password`` - ``cinder_store_project_name`` Creating a Volume-backed image ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To register an existing volume as a new Volume-backed image, use the following commands: .. code-block:: console $ openstack image create --disk-format raw --container-format bare IMAGE_NAME $ glance location-add --url cinder:// If the ``image_upload_use_cinder_backend`` option is enabled, the following command creates a new Image-Volume by cloning the specified volume and then registers its location to a new image. The disk format and the container format must be raw and bare (default). Otherwise, the image is uploaded to the default store of the Image service. .. code-block:: console $ openstack image create --volume SOURCE_VOLUME IMAGE_NAME ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/volume-backups-export-import.rst0000664000175000017500000000433500000000000024547 0ustar00zuulzuul00000000000000.. _volume_backups_export_import: ================================= Export and import backup metadata ================================= A volume backup can only be restored on the same Block Storage service. This is because restoring a volume from a backup requires metadata available on the database used by the Block Storage service. .. note:: For information about how to back up and restore a volume, see the section called :ref:`volume_backups`. You can, however, export the metadata of a volume backup. To do so, run this command as an OpenStack ``admin`` user (presumably, after creating a volume backup): .. code-block:: console $ cinder backup-export BACKUP_ID Where ``BACKUP_ID`` is the volume backup's ID. This command should return the backup's corresponding database information as encoded string metadata. Exporting and storing this encoded string metadata allows you to completely restore the backup, even in the event of a catastrophic database failure. This will preclude the need to back up the entire Block Storage database, particularly if you only need to keep complete backups of a small subset of volumes. If you have placed encryption on your volumes, the encryption will still be in place when you restore the volume if a UUID encryption key is specified when creating volumes. Using backup metadata support, UUID keys set up for a volume (or volumes) will remain valid when you restore a backed-up volume. The restored volume will remain encrypted, and will be accessible with your credentials. In addition, having a volume backup and its backup metadata also provides volume portability. Specifically, backing up a volume and exporting its metadata will allow you to restore the volume on a completely different Block Storage database, or even on a different cloud service. To do so, first import the backup metadata to the Block Storage database and then restore the backup. To import backup metadata, run the following command as an OpenStack ``admin``: .. code-block:: console $ cinder backup-import METADATA Where ``METADATA`` is the backup metadata exported earlier. Once you have imported the backup metadata into a Block Storage database, restore the volume (see the section called :ref:`volume_backups`). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/volume-backups.rst0000664000175000017500000002522500000000000021721 0ustar00zuulzuul00000000000000.. _volume_backups: ========================================= Back up and restore volumes and snapshots ========================================= The ``openstack`` command-line interface provides the tools for creating a volume backup. You can restore a volume from a backup as long as the backup's associated database information (or backup metadata) is intact in the Block Storage database. Run this command to create a backup of a volume: .. code-block:: console $ openstack volume backup create [--incremental] [--force] VOLUME Where ``VOLUME`` is the name or ID of the volume, ``incremental`` is a flag that indicates whether an incremental backup should be performed, and ``force`` is a flag that allows or disallows backup of a volume when the volume is attached to an instance. Without the ``incremental`` flag, a full backup is created by default. With the ``incremental`` flag, an incremental backup is created. Without the ``force`` flag, the volume will be backed up only if its status is ``available``. With the ``force`` flag, the volume will be backed up whether its status is ``available`` or ``in-use``. A volume is ``in-use`` when it is attached to an instance. The backup of an ``in-use`` volume means your data is crash consistent. The ``force`` flag is False by default. .. note:: The ``force`` flag is new in OpenStack Liberty. The incremental backup is based on a parent backup which is an existing backup with the latest timestamp. The parent backup can be a full backup or an incremental backup depending on the timestamp. .. note:: The first backup of a volume has to be a full backup. Attempting to do an incremental backup without any existing backups will fail. There is an ``is_incremental`` flag that indicates whether a backup is incremental when showing details on the backup. Another flag, ``has_dependent_backups``, returned when showing backup details, will indicate whether the backup has dependent backups. If it is ``true``, attempting to delete this backup will fail. A new configure option ``backup_swift_block_size`` is introduced into ``cinder.conf`` for the default Swift backup driver. This is the size in bytes that changes are tracked for incremental backups. The existing ``backup_swift_object_size`` option, the size in bytes of Swift backup objects, has to be a multiple of ``backup_swift_block_size``. The default is 32768 for ``backup_swift_block_size``, and the default is 52428800 for ``backup_swift_object_size``. The configuration option ``backup_swift_enable_progress_timer`` in ``cinder.conf`` is used when backing up the volume to Object Storage back end. This option enables or disables the timer. It is enabled by default to send the periodic progress notifications to the Telemetry service. This command also returns a backup ID. Use this backup ID when restoring the volume: .. code-block:: console $ openstack volume backup restore BACKUP_ID VOLUME_ID When restoring from a full backup, it is a full restore. When restoring from an incremental backup, a list of backups is built based on the IDs of the parent backups. A full restore is performed based on the full backup first, then restore is done based on the incremental backup, laying on top of it in order. You can view a backup list with the :command:`openstack volume backup list` command. Optional arguments to clarify the status of your backups include: running ``--name``, ``--status``, and ``--volume`` to filter through backups by the specified name, status, or volume-id. Search with ``--all-projects`` for details of the projects associated with the listed backups. Because volume backups are dependent on the Block Storage database, you must also back up your Block Storage database regularly to ensure data recovery. .. note:: Alternatively, you can export and save the metadata of selected volume backups. Doing so precludes the need to back up the entire Block Storage database. This is useful if you need only a small subset of volumes to survive a catastrophic database failure. If you specify a UUID encryption key when setting up the volume specifications, the backup metadata ensures that the key will remain valid when you back up and restore the volume. For more information about how to export and import volume backup metadata, see the section called :ref:`volume_backups_export_import`. By default, the swift object store is used for the backup repository. If instead you want to use an NFS export as the backup repository, add the following configuration options to the ``[DEFAULT]`` section of the ``cinder.conf`` file and restart the Block Storage services: .. code-block:: ini backup_driver = cinder.backup.drivers.nfs backup_share = HOST:EXPORT_PATH For the ``backup_share`` option, replace ``HOST`` with the DNS resolvable host name or the IP address of the storage server for the NFS share, and ``EXPORT_PATH`` with the path to that share. If your environment requires that non-default mount options be specified for the share, set these as follows: .. code-block:: ini backup_mount_options = MOUNT_OPTIONS ``MOUNT_OPTIONS`` is a comma-separated string of NFS mount options as detailed in the NFS man page. There are several other options whose default values may be overridden as appropriate for your environment: .. code-block:: ini backup_compression_algorithm = zlib backup_sha_block_size_bytes = 32768 backup_file_size = 1999994880 The option ``backup_compression_algorithm`` can be set to ``zlib``, ``bz2``, ``zstd`` or ``none``. The value ``none`` can be a useful setting when the server providing the share for the backup repository itself performs deduplication or compression on the backup data. The option ``backup_file_size`` must be a multiple of ``backup_sha_block_size_bytes``. It is effectively the maximum file size to be used, given your environment, to hold backup data. Volumes larger than this will be stored in multiple files in the backup repository. ``backup_file_size`` also determines the buffer size used to produce backup files; on smaller hosts it may need to be scaled down to avoid OOM issues. The ``backup_sha_block_size_bytes`` option determines the size of blocks from the cinder volume being backed up on which digital signatures are calculated in order to enable incremental backup capability. You also have the option of resetting the state of a backup. When creating or restoring a backup, sometimes it may get stuck in the creating or restoring states due to problems like the database or rabbitmq being down. In situations like these resetting the state of the backup can restore it to a functional status. Run this command to restore the state of a backup: .. code-block:: console $ openstack volume backup set --state BACKUP Run this command to create a backup of a snapshot: .. code-block:: console $ openstack volume backup create [--incremental] [--force] \ [--snapshot SNAPSHOT_ID] VOLUME Where ``VOLUME`` is the name or ID of the volume, ``SNAPSHOT_ID`` is the ID of the volume's snapshot. Cancelling ---------- Since Liberty it is possible to cancel an ongoing backup operation on any of the Chunked Backup type of drivers such as Swift, NFS, Google, GlusterFS, and Posix. To issue a backup cancellation on a backup we must request a force delete on the backup. .. code-block:: console $ openstack volume backup delete --force BACKUP_ID .. note:: The policy on force delete defaults to admin only. Even if the backup is immediately deleted, and therefore no longer appears in the listings, the cancellation may take a little bit longer, so please check the status of the source resource to see when it stops being "backing-up". .. note:: Before Pike the "backing-up" status would always be stored in the volume, even when backing up a snapshot, so when backing up a snapshot any delete operation on the snapshot that followed a cancellation could result in an error if the snapshot was still mapped. Polling on the volume to stop being "backing-up" prior to the deletion is required to ensure success. Since Rocky it is also possible to cancel an ongoing restoring operation on any of the Chunked Backup type of drivers. To issue a backup restoration cancellation we need to alter its status to anything other than `restoring`. We strongly recommend using the "error" state to avoid any confusion on whether the restore was successful or not. .. code-block:: console $ openstack volume backup set --state error BACKUP_ID .. warning:: After a restore operation has started, if it is then cancelled, the destination volume is useless, as there is no way of knowing how much data, or if any, was actually restored, hence our recommendation of using the "error" state. backup_max_operations --------------------- With this configuration option will let us select the maximum number of operations, backup and restore, that can be performed concurrently. This option has a default value of 15, which means that we can have 15 concurrent backups, or 15 concurrent restores, or any combination of backups and restores as long as the sum of the 2 operations don't exceed 15. The concurrency limitation of this configuration option is also enforced when we run multiple processes for the same backup service using the ``backup_workers`` configuration option. It is not a per process restriction, but global to the service, so we won't be able to run ``backup_max_operations`` on each one of the processes, but on all the running processes from the same backup service. Backups and restore operations are both CPU and memory intensive, but thanks to this option we can limit the concurrency and prevent DoS attacks or just service disruptions caused by many concurrent requests that lead to Out of Memory (OOM) kills. The amount of memory (RAM) used during the operation depends on the configured chunk size as well as the compression ratio achieved on the data during the operation. Example: Let's have a look at how much memory would be needed if we use the default backup chunk size (~1.86 GB) while doing a restore to an RBD volume from a non Ceph backend (Swift, NFS etc). In a restore operation the worst case scenario, from the memory point of view, is when the compression ratio is close to 0% (the compressed data chunk is almost the same size as the uncompressed data). In this case the memory usage would be ~5.58 GB of data for each chunk: ~5.58 GB = read buffer + decompressed buffer + write buffer used by the librbd library = ~1.86 GB + 1.86 GB + 1.86 GB For 15 concurrent restore operations, the cinder-backup service will require ~83.7 GB of memory. Similar calculations can be done for environment specific scenarios and this config option can be set accordingly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/volume-migration.rst0000664000175000017500000002341200000000000022256 0ustar00zuulzuul00000000000000.. _volume_migration.rst: =============== Migrate volumes =============== OpenStack has the ability to migrate volumes between back ends which support its volume-type. Migrating a volume transparently moves its data from the current back end for the volume to a new one. This is an administrator function, and can be used for functions including storage evacuation (for maintenance or decommissioning), or manual optimizations (for example, performance, reliability, or cost). These workflows are possible for a migration: #. If the storage can migrate the volume on its own, it is given the opportunity to do so. This allows the Block Storage driver to enable optimizations that the storage might be able to perform. If the back end is not able to perform the migration, the Block Storage uses one of two generic flows, as follows. #. If the volume is not attached, the Block Storage service creates a volume and copies the data from the original to the new volume. .. note:: While most back ends support this function, not all do. See the :doc:`driver documentation ` for more details. #. If the volume is attached to a VM instance, the Block Storage creates a volume, and calls Compute to copy the data from the original to the new volume. Currently this is supported only by the Compute libvirt driver. As an example, this scenario shows two LVM back ends and migrates an attached volume from one to the other. This scenario uses the third migration flow. First, list the available back ends: .. code-block:: console # cinder get-pools +----------+----------------------------------------------------+ | Property | Value | +----------+----------------------------------------------------+ | name | server1@lvmstorage-1#lvmstorage-1 | +----------+----------------------------------------------------+ +----------+----------------------------------------------------+ | Property | Value | +----------+----------------------------------------------------+ | name | server2@lvmstorage-2#lvmstorage-2 | +----------+----------------------------------------------------+ .. note:: Block Storage API supports :command:`cinder get-pools` since V2 version. You can also get available back ends like following: .. code-block:: console # cinder-manage host list server1@lvmstorage-1 zone1 server2@lvmstorage-2 zone1 But it needs to add pool name in the end. For example, ``server1@lvmstorage-1#zone1``. Next, as the admin user, you can see the current status of the volume (replace the example ID with your own): .. code-block:: console $ openstack volume show 6088f80a-f116-4331-ad48-9afb0dfb196c +--------------------------------+--------------------------------------+ | Field | Value | +--------------------------------+--------------------------------------+ | attachments | [] | | availability_zone | zone1 | | bootable | false | | consistencygroup_id | None | | created_at | 2013-09-01T14:53:22.000000 | | description | test | | encrypted | False | | id | 6088f80a-f116-4331-ad48-9afb0dfb196c | | migration_status | None | | multiattach | False | | name | test | | os-vol-host-attr:host | server1@lvmstorage-1#lvmstorage-1 | | os-vol-mig-status-attr:migstat | None | | os-vol-mig-status-attr:name_id | None | | os-vol-tenant-attr:tenant_id | d88310717a8e4ebcae84ed075f82c51e | | properties | readonly='False' | | replication_status | disabled | | size | 1 | | snapshot_id | None | | source_volid | None | | status | in-use | | type | None | | updated_at | 2016-07-31T07:22:19.000000 | | user_id | d8e5e5727f3a4ce1886ac8ecec058e83 | +--------------------------------+--------------------------------------+ Note these attributes: * ``os-vol-host-attr:host`` - the volume's current back end. * ``os-vol-mig-status-attr:migstat`` - the status of this volume's migration (None means that a migration is not currently in progress). * ``os-vol-mig-status-attr:name_id`` - the volume ID that this volume's name on the back end is based on. Before a volume is ever migrated, its name on the back end storage may be based on the volume's ID (see the ``volume_name_template`` configuration parameter). For example, if ``volume_name_template`` is kept as the default value (``volume-%s``), your first LVM back end has a logical volume named ``volume-6088f80a-f116-4331-ad48-9afb0dfb196c``. During the course of a migration, if you create a volume and copy over the data, the volume get the new name but keeps its original ID. This is exposed by the ``name_id`` attribute. .. note:: If you plan to decommission a block storage node, you must stop the ``cinder`` volume service on the node after performing the migration. On nodes that run CentOS, Fedora, openSUSE, Red Hat Enterprise Linux, or SUSE Linux Enterprise, run: .. code-block:: console # service openstack-cinder-volume stop # chkconfig openstack-cinder-volume off On nodes that run Ubuntu or Debian, run: .. code-block:: console # service cinder-volume stop # chkconfig cinder-volume off Stopping the cinder volume service will prevent volumes from being allocated to the node. Migrate this volume to the second LVM back end: .. code-block:: console $ openstack volume migrate 6088f80a-f116-4331-ad48-9afb0dfb196c \ --host server2@lvmstorage-2#lvmstorage-2 You can use the :command:`openstack volume show` command to see the status of the migration. While migrating, the ``migstat`` attribute shows states such as ``migrating`` or ``completing``. On error, ``migstat`` is set to None and the host attribute shows the original ``host``. On success, in this example, the output looks like: .. code-block:: console $ openstack volume show 6088f80a-f116-4331-ad48-9afb0dfb196c +--------------------------------+--------------------------------------+ | Field | Value | +--------------------------------+--------------------------------------+ | attachments | [] | | availability_zone | zone1 | | bootable | false | | consistencygroup_id | None | | created_at | 2013-09-01T14:53:22.000000 | | description | test | | encrypted | False | | id | 6088f80a-f116-4331-ad48-9afb0dfb196c | | migration_status | None | | multiattach | False | | name | test | | os-vol-host-attr:host | server2@lvmstorage-2#lvmstorage-2 | | os-vol-mig-status-attr:migstat | completing | | os-vol-mig-status-attr:name_id | None | | os-vol-tenant-attr:tenant_id | d88310717a8e4ebcae84ed075f82c51e | | properties | readonly='False' | | replication_status | disabled | | size | 1 | | snapshot_id | None | | source_volid | None | | status | in-use | | type | None | | updated_at | 2017-02-22T02:35:03.000000 | | user_id | d8e5e5727f3a4ce1886ac8ecec058e83 | +--------------------------------+--------------------------------------+ Note that ``migstat`` is None, host is the new host, and ``name_id`` holds the ID of the volume created by the migration. If you look at the second LVM back end, you find the logical volume ``volume-133d1f56-9ffc-4f57-8798-d5217d851862``. .. note:: The migration is not visible to non-admin users (for example, through the volume ``status``). However, some operations are not allowed while a migration is taking place, such as attaching/detaching a volume and deleting a volume. If a user performs such an action during a migration, an error is returned. .. note:: Migrating volumes that have snapshots are currently not allowed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/volume-multiattach.rst0000664000175000017500000001304000000000000022600 0ustar00zuulzuul00000000000000.. _volume_multiattach: ================================================================== Volume multi-attach: Enable attaching a volume to multiple servers ================================================================== The ability to attach a volume to multiple hosts/servers simultaneously is a use case desired for active/active or active/standby scenarios. Support was added in both `Cinder`_ and `Nova`_ in the Queens release to volume multi-attach with read/write (RW) mode. .. warning:: It is the responsibility of the user to ensure that a multiattach or clustered file system is used on the volumes. Otherwise there may be a high probability of data corruption. In Cinder the functionality is available from microversion '3.50' or higher. As a prerequisite `new Attach/Detach APIs were added to Cinder`_ in Ocata to overcome earlier limitations towards achieving volume multi-attach. In case you use Cinder together with Nova, compute API calls were switched to using the new block storage volume attachment APIs in Queens, if the required block storage API microversion is available. For more information on using multiattach volumes with the compute service, refer to the corresponding `compute admin guide section `_. How to create a 'multiattach' volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order to be able to attach a volume to multiple server instances you need to have the 'multiattach' flag set to 'True' in the volume details. Please ensure you have the right role and policy settings before performing the operation. The only way to create a multiattach volume is by creating a multiattach volume type and using it to create the volume. .. note:: For information on back ends that provide the functionality see `Back end support`. Multiattach volume type ----------------------- Starting from the Queens release the ability to attach a volume to multiple hosts/servers requires that the volume is of a special type that includes an extra-spec capability setting of ``multiattach= True``. You can create the volume type the following way: .. code-block:: console $ openstack volume type create multiattach $ openstack volume type set --property multiattach=" True" multiattach .. note:: Creating a new volume type is an admin-only operation by default. You can change the settings in the cinder policy file if needed. For more information about configuring cinder policies, see :ref:`policy-configuration`. To create the volume you need to use the volume type you created earlier, like this: .. code-block:: console $ openstack volume create --size 10 --type multiattach my-volume In addition, it is possible to retype a volume to be (or not to be) multiattach capable. Currently however we only allow retyping a volume if its status is ``available``. The reasoning behind the limitation is that some consumers/hypervisors need to make special considerations at attach-time for multiattach volumes (like disable caching) and there's no mechanism currently to update a currently attached volume in a safe way while keeping it attached the whole time. RO / RW caveats (the secondary RW attachment issue) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default, secondary volume attachments are made in read/write mode which can be problematic, especially for operations like volume migration. There might be improvements to provide support to specify the attach-mode for the secondary attachments, for the latest information please take a look into `Cinder's specs list`_ for the current release. Back end support ~~~~~~~~~~~~~~~~ In order to have the feature available, multi-attach needs to be supported by the chosen back end which is indicated through capabilities in the corresponding volume driver. The reference implementation is available on LVM in the Queens release. You can check the :ref:`Driver Support Matrix ` for further information on which back end provides the functionality. Policy rules ~~~~~~~~~~~~ You can control the availability of volume multi-attach through policies that you can configure in the cinder policy file. For more information about the cinder policy file, including how to generate a sample file so you can view the default policy settings, see :ref:`policy-configuration`. Multiattach policy ------------------ The general policy rule to allow the creation or retyping of multiattach volumes is named ``volume:multiattach``. Multiattach policy for bootable volumes --------------------------------------- This is a policy to disallow the ability to create multiple attachments on a volume that is marked as bootable with the name ``volume:multiattach_bootable_volume``. Known issues and limitations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Retyping an in-use volume from a multiattach-capable type to a non-multiattach-capable type, or vice-versa, is not supported. - It is not recommended to retype an in-use multiattach volume if that volume has more than one active read/write attachment. - Encryption is not supported with multiattach-capable volumes. .. _`Cinder`: https://specs.openstack.org/openstack/cinder-specs/specs/queens/enable-multiattach.html .. _`Nova`: https://specs.openstack.org/openstack/nova-specs/specs/queens/approved/cinder-volume-multi-attach.html .. _`new Attach/Detach APIs were added to Cinder`: http://specs.openstack.org/openstack/cinder-specs/specs/ocata/add-new-attach-apis.html .. _`Cinder's specs list`: https://specs.openstack.org/openstack/cinder-specs/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/admin/volume-number-weigher.rst0000664000175000017500000000547300000000000023214 0ustar00zuulzuul00000000000000.. _volume_number_weigher: ======================================= Configure and use volume number weigher ======================================= OpenStack Block Storage enables you to choose a volume back end according to ``free_capacity`` and ``allocated_capacity``. The volume number weigher feature lets the scheduler choose a volume back end based on its volume number in the volume back end. This can provide another means to improve the volume back ends' I/O balance and the volumes' I/O performance. Enable volume number weigher ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable a volume number weigher, set the ``scheduler_default_weighers`` to ``VolumeNumberWeigher`` flag in the ``cinder.conf`` file to define ``VolumeNumberWeigher`` as the selected weigher. Configure multiple-storage back ends ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To configure ``VolumeNumberWeigher``, use ``LVMVolumeDriver`` as the volume driver. This configuration defines two LVM volume groups: ``stack-volumes`` with 10 GB capacity and ``stack-volumes-1`` with 60 GB capacity. This example configuration defines two back ends: .. code-block:: ini scheduler_default_weighers=VolumeNumberWeigher enabled_backends=lvmdriver-1,lvmdriver-2 [lvmdriver-1] volume_group=stack-volumes volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name=LVM [lvmdriver-2] volume_group=stack-volumes-1 volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name=LVM Volume type ~~~~~~~~~~~ Define a volume type in Block Storage: .. code-block:: console $ openstack volume type create lvm Create an extra specification that links the volume type to a back-end name: .. code-block:: console $ openstack volume type set lvm --property volume_backend_name=LVM This example creates a lvm volume type with ``volume_backend_name=LVM`` as extra specifications. Usage ~~~~~ To create six 1-GB volumes, run the :command:`openstack volume create --size 1 --type lvm volume1` command six times: .. code-block:: console $ openstack volume create --size 1 --type lvm volume1 This command creates three volumes in ``stack-volumes`` and three volumes in ``stack-volumes-1``. List the available volumes: .. code-block:: console # lvs LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert volume-3814f055-5294-4796-b5e6-1b7816806e5d stack-volumes -wi-a---- 1.00g volume-72cf5e79-99d2-4d23-b84e-1c35d3a293be stack-volumes -wi-a---- 1.00g volume-96832554-0273-4e9d-902b-ad421dfb39d1 stack-volumes -wi-a---- 1.00g volume-169386ef-3d3e-4a90-8439-58ceb46889d9 stack-volumes-1 -wi-a---- 1.00g volume-460b0bbb-d8a0-4bc3-9882-a129a5fe8652 stack-volumes-1 -wi-a---- 1.00g volume-9a08413b-0dbc-47c9-afb8-41032ab05a41 stack-volumes-1 -wi-a---- 1.00g ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4351215 cinder-27.0.0/doc/source/cli/0000775000175000017500000000000000000000000015703 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/cli/README.rst0000664000175000017500000000126500000000000017376 0ustar00zuulzuul00000000000000===================================== Cinder CLI Documentation (source/cli) ===================================== Introduction: ------------- This directory is intended to hold any documentation that relates to Cinder's Command Line Interface. Note that this directory is intended for basic descriptions of the commands supported, similar to what you would find with a 'man page'. Tutorials or step-by-step guides should go into 'doc/source/admin' or 'doc/source/user' depending on the target audience. The full spec for organization of documentation may be seen in the `OS Manuals Migration Spec `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/cli/cinder-manage.rst0000664000175000017500000002150300000000000021130 0ustar00zuulzuul00000000000000============= cinder-manage ============= ------------------------------------------ Control and manage OpenStack block storage ------------------------------------------ :Author: openstack-discuss@lists.openstack.org :Copyright: OpenStack Foundation :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== cinder-manage [] DESCRIPTION =========== :command:`cinder-manage` provides control of cinder database migration, and provides an interface to get information about the current state of cinder. More information about OpenStack Cinder is available at `OpenStack Cinder `_. OPTIONS ======= The standard pattern for executing a cinder-manage command is: ``cinder-manage []`` For example, to obtain a list of the cinder services currently running: ``cinder-manage service list`` Run without arguments to see a list of available command categories: ``cinder-manage`` The categories are listed below, along with detailed descriptions. You can also run with a category argument such as 'db' to see a list of all commands in that category: ``cinder-manage db`` These sections describe the available categories and arguments for cinder-manage. Cinder Quota ~~~~~~~~~~~~ Cinder quotas sometimes run out of sync, and while there are some mechanisms in place in Cinder that, with the proper configuration, try to do a resync of the quotas, they are not perfect and are susceptible to race conditions, so they may result in less than perfect accuracy in refreshed quotas. The cinder-manage quota commands are meant to help manage these issues while allowing a finer control of when and what quotas are fixed. **Checking if quotas and reservations are correct.** ``cinder-manage quota check [-h] [--project-id PROJECT_ID]`` Accepted arguments are: .. code-block:: console --project-id PROJECT_ID The ID of the project where we want to sync the quotas (defaults to all projects). This command checks quotas and reservations, for a specific project (passing ``--project-id``) or for all projects, to see if they are out of sync. The check will also look for duplicated entries. One way to use this action in combination with the sync action is to run the check for all projects, take note of those that are out of sync, and the sync them one by one at intervals to allow cinder to operate semi-normally. **Fixing quotas and reservations** ``cinder-manage quota sync [-h] [--project-id PROJECT_ID]`` Accepted arguments are: .. code-block:: console --project-id PROJECT_ID The ID of the project where we want to sync the quotas (defaults to all projects). This command refreshes existing quota usage and reservation count for a specific project or for all projects. The refresh will also remove duplicated entries. This operation is best executed when Cinder is not running, but it can be run with cinder services running as well. A different transaction is used for each project's quota sync, so an action failure will only rollback the current project's changes. Cinder Db ~~~~~~~~~ ``cinder-manage db version`` Print the current database version. ``cinder-manage db sync [--bump-versions] [version]`` Sync the database up to the most recent version. This is the standard way to create the db as well. This command interprets the following options when it is invoked: version Database version --bump-versions Update RPC and Objects versions when doing offline upgrades, with this we no longer need to restart the services twice after the upgrade to prevent ServiceTooOld exceptions. ``cinder-manage db purge []`` Purge database entries that are marked as deleted, that are older than the number of days specified. ``cinder-manage db online_data_migrations [--max_count ]`` Perform online data migrations for database upgrade between releases in batches. This command interprets the following options when it is invoked: .. code-block:: console --max_count Maximum number of objects to migrate. If not specified, all possible migrations will be completed, in batches of 50 at a time. Returns exit status 0 if no (further) updates are possible, 1 if the ``--max_count`` option was used and some updates were completed successfully (even if others generated errors), 2 if some updates generated errors and no other migrations were able to take effect in the last batch attempted, or 127 if invalid input is provided (e.g. non-numeric max-count). This command should be run after upgrading the database schema. If it exits with partial updates (exit status 1) it should be called again, even if some updates initially generated errors, because some updates may depend on others having completed. If it exits with status 2, intervention is required to resolve the issue causing remaining updates to fail. It should be considered successfully completed only when the exit status is 0. Cinder Volume ~~~~~~~~~~~~~ ``cinder-manage volume delete `` Delete a volume without first checking that the volume is available. ``cinder-manage volume update_host --currenthost --newhost `` Updates the host name of all volumes currently associated with a specified host. ``cinder-manage volume update_service`` When upgrading cinder, new service entries are created in the database as the existing cinder-volume host(s) are upgraded. In some cases, rows in the volumes table keep references to the old service, which can prevent the old services from being deleted when the database is purged. This command makes sure that all volumes have updated service references for all volumes on all cinder-volume hosts. Cinder Host ~~~~~~~~~~~ ``cinder-manage host list []`` Displays a list of all physical hosts and their zone. The optional zone argument allows the list to be filtered on the requested zone. Cinder Service ~~~~~~~~~~~~~~ ``cinder-manage service list`` Displays a list of all cinder services and their host, zone, status, state and when the information was last updated. ``cinder-manage service remove `` Removes a specified cinder service from a specified host. Cinder Backup ~~~~~~~~~~~~~ ``cinder-manage backup list`` Displays a list of all backups (including ones in progress) and the host on which the backup operation is running. ``cinder-manage backup update_backup_host --currenthost --newhost `` Updates the host name of all backups currently associated with a specified host. Cinder Version ~~~~~~~~~~~~~~ ``cinder-manage version list`` Displays the codebase version cinder is running upon. Cinder Config ~~~~~~~~~~~~~ ``cinder-manage config list []`` Displays the current configuration parameters (options) for Cinder. The optional flag parameter may be used to display the configuration of one parameter. Cinder Util ~~~~~~~~~~~ ``cinder-manage util clean_locks [-h] [--services-offline]`` Clean file locks on the current host that were created and are used by drivers and cinder services for volumes, snapshots, and the backup service on the current host. Should be run on any host where we are running a Cinder service (API, Scheduler, Volume, Backup) and can be run with the Cinder services running or stopped. If the services are running it will check existing resources in the Cinder database in order to only remove resources that are no longer present (it's safe to delete the files). For backups, the way to know if we can remove the startup lock is by checking if the PGRP in the file name is currently running cinder-backup. Deleting locks while the services are offline is faster as there's no need to check the database or the running processes. Default assumes that services are online, must pass ``--services-offline`` to specify that they are offline. The common use case for running the command with ``--services-offline`` is to be called on startup as a service unit before any cinder service is started. Command will be usually called without the ``--services-offline`` parameter manually or from a cron job. .. warning:: Passing ``--services-offline`` when the Cinder services are still running breaks the locking mechanism and can lead to undesired behavior in ongoing Cinder operations. .. note:: This command doesn't clean DLM locks (except when using file locks), as those don't leave lock leftovers. FILES ===== The cinder.conf file contains configuration information in the form of python-gflags. The cinder-manage.log file logs output from cinder-manage. SEE ALSO ======== * `OpenStack Cinder `__ BUGS ==== * Cinder is hosted on Launchpad so you can view current bugs at `Bugs : Cinder `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/cli/cinder-status.rst0000664000175000017500000000631500000000000021227 0ustar00zuulzuul00000000000000============= cinder-status ============= ---------------------------------------- CLI interface for cinder status commands ---------------------------------------- :Author: openstack-discuss@lists.openstack.org :Copyright: OpenStack Foundation :Manual section: 1 :Manual group: cloud computing Synopsis ======== :: cinder-status [] Description =========== :program:`cinder-status` is a tool that provides routines for checking the status of a Cinder deployment. Options ======= The standard pattern for executing a :program:`cinder-status` command is:: cinder-status [] Run without arguments to see a list of available command categories:: cinder-status Categories are: * ``upgrade`` Detailed descriptions are below. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: cinder-status upgrade These sections describe the available categories and arguments for :program:`cinder-status`. Upgrade ~~~~~~~ .. _cinder-status-checks: ``cinder-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. This command expects to have complete configuration and access to the database. It may also make requests to other services' REST API via the Keystone service catalog. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **14.0.0 (Stein)** * Check added to ensure the backup_driver setting is using the full driver class path and not just the module path. * Checks for the presence of a **policy.json** file have been added to warn if policy changes should be present in a **policy.yaml** file. * Ensure that correct volume_driver path is used for Windows iSCSI driver. * Ensure that none of the volume drivers removed in Stein are enabled. Please note that if a driver is in **cinder.conf** but not in the ``enabled_drivers`` config option this check will not catch the problem. If you have used the CoprHD, ITRI Disco or HGST drivers in the past you should ensure that any data from these backends is transferred to a supported storage array before upgrade. **15.0.0 (Train)** * Check added to make operators aware of new finer-grained configuration options affecting the periodicity of various Cinder tasks. Triggered when the ``periodic_interval`` option is not set to its default value. * Added check for use of deprecated ``cinder.quota.NestedDbQuotaDriver``. See Also ======== * `OpenStack Cinder `_ Bugs ==== * Cinder bugs are managed at `Launchpad `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/cli/cli-cinder-quotas.rst0000664000175000017500000002302400000000000021761 0ustar00zuulzuul00000000000000=================================== Manage Block Storage service quotas =================================== As an administrative user, you can update the OpenStack Block Storage service quotas for a project. You can also update the quota defaults for a new project. **Block Storage quotas** =================== ============================================= Property name Defines the number of =================== ============================================= gigabytes Volume gigabytes allowed for each project. snapshots Volume snapshots allowed for each project. volumes Volumes allowed for each project. =================== ============================================= View Block Storage quotas ~~~~~~~~~~~~~~~~~~~~~~~~~ Administrative users can view Block Storage service quotas. #. Obtain the project ID: .. code-block:: console $ PROJECT_ID=$(openstack project show -f value -c id PROJECT_NAME) #. List the default quotas for a project: .. code-block:: console $ openstack quota show --default $PROJECT_ID +-----------------------+-------+ | Field | Value | +-----------------------+-------+ | backup-gigabytes | 1000 | | backups | 10 | | cores | 20 | | fixed-ips | -1 | | floating-ips | 50 | | gigabytes | 1000 | | gigabytes_lvmdriver-1 | -1 | | health_monitors | None | | injected-file-size | 10240 | | injected-files | 5 | | injected-path-size | 255 | | instances | 10 | | key-pairs | 100 | | l7_policies | None | | listeners | None | | load_balancers | None | | location | None | | name | None | | networks | 10 | | per-volume-gigabytes | -1 | | pools | None | | ports | 50 | | project | None | | project_id | None | | properties | 128 | | ram | 51200 | | rbac_policies | 10 | | routers | 10 | | secgroup-rules | 100 | | secgroups | 10 | | server-group-members | 10 | | server-groups | 10 | | snapshots | 10 | | snapshots_lvmdriver-1 | -1 | | subnet_pools | -1 | | subnets | 10 | | volumes | 10 | | volumes_lvmdriver-1 | -1 | +-----------------------+-------+ .. note:: Listing default quotas with the OpenStack command line client will provide all quotas for storage and network services. Previously, the :command:`cinder quota-defaults` command would list only storage quotas. You can use `$PROJECT_ID` or `$PROJECT_NAME` arguments to show Block Storage service quotas. If the `$PROJECT_ID` argument returns errors in locating resources, use `$PROJECT_NAME`. #. View Block Storage service quotas for a project: .. code-block:: console $ openstack quota show --volume $PROJECT_ID +-----------------------+-------+ | Resource | Limit | +-----------------------+-------+ | volumes | 10 | | snapshots | 10 | | gigabytes | 1000 | | backups | 10 | | volumes_lvmdriver-1 | -1 | | gigabytes_lvmdriver-1 | -1 | | snapshots_lvmdriver-1 | -1 | | volumes___DEFAULT__ | -1 | | gigabytes___DEFAULT__ | -1 | | snapshots___DEFAULT__ | -1 | | groups | 10 | | backup-gigabytes | 1000 | | per-volume-gigabytes | -1 | +-----------------------+-------+ #. Show the current usage of a per-project quota: .. code-block:: console $ openstack quota show --volume --usage $PROJECT_ID +-----------------------+-------+--------+----------+ | Resource | Limit | In Use | Reserved | +-----------------------+-------+--------+----------+ | volumes | 10 | 1 | 0 | | snapshots | 10 | 0 | 0 | | gigabytes | 1000 | 1 | 0 | | backups | 10 | 0 | 0 | | volumes_lvmdriver-1 | -1 | 1 | 0 | | gigabytes_lvmdriver-1 | -1 | 1 | 0 | | snapshots_lvmdriver-1 | -1 | 0 | 0 | | volumes___DEFAULT__ | -1 | 0 | 0 | | gigabytes___DEFAULT__ | -1 | 0 | 0 | | snapshots___DEFAULT__ | -1 | 0 | 0 | | groups | 10 | 0 | 0 | | backup-gigabytes | 1000 | 0 | 0 | | per-volume-gigabytes | -1 | 0 | 0 | +-----------------------+-------+--------+----------+ Edit and update Block Storage service quotas ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Administrative users can edit and update Block Storage service quotas. #. To update the default quota values for the initial deployment, update the values of the :guilabel:`quota_*` config options in the ``/etc/cinder/cinder.conf`` file. For more information, see the :doc:`Block Storage service configuration `. .. note:: The values of the :guilabel:`quota_*` config options are only used at the initial database sync in the initial deployment. If you want to change a default value for a new project, see the following. To update a default value for a new project, set ``use_default_quota_class = True`` (which is the default setting) in the :guilabel:`DEFAULT` section of the ``/etc/cinder/cinder.conf`` file, and run the command as the following. .. code-block:: console $ openstack quota set --class default --QUOTA_NAME QUOTA_VALUE Replace ``QUOTA_NAME`` with the quota that is to be updated, ``QUOTA_VALUE`` with the required new value. #. To update Block Storage service quotas for an existing project .. code-block:: console $ openstack quota set --QUOTA_NAME QUOTA_VALUE PROJECT_ID Replace ``QUOTA_NAME`` with the quota that is to be updated, ``QUOTA_VALUE`` with the required new value. Use the :command:`openstack quota show` command with ``PROJECT_ID``, which is the required project ID. For example: .. code-block:: console $ openstack quota set --volumes 15 $PROJECT_ID $ openstack quota show $PROJECT_ID +-----------------------+----------------------------------+ | Field | Value | +-----------------------+----------------------------------+ | backup-gigabytes | 1000 | | backups | 10 | | cores | 20 | | fixed-ips | -1 | | floating-ips | 29 | | gigabytes | 1000 | | gigabytes_lvmdriver-1 | -1 | | health_monitors | None | | injected-file-size | 10240 | | injected-files | 5 | | injected-path-size | 255 | | instances | 10 | | key-pairs | 100 | | l7_policies | None | | listeners | None | | load_balancers | None | | location | None | | name | None | | networks | 10 | | per-volume-gigabytes | -1 | | pools | None | | ports | 50 | | project | e436339c7f9c476cb3120cf3b9667377 | | project_id | None | | properties | 128 | | ram | 51200 | | rbac_policies | 10 | | routers | 10 | | secgroup-rules | 100 | | secgroups | 10 | | server-group-members | 10 | | server-groups | 10 | | snapshots | 10 | | snapshots_lvmdriver-1 | -1 | | subnet_pools | -1 | | subnets | 10 | | volumes | 15 | | volumes_lvmdriver-1 | -1 | +-----------------------+----------------------------------+ #. To clear per-project quota limits: .. code-block:: console $ openstack quota delete --volume $PROJECT_ID ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/cli/cli-cinder-scheduling.rst0000664000175000017500000000324600000000000022576 0ustar00zuulzuul00000000000000=============================== Manage Block Storage scheduling =============================== As an administrative user, you have some control over which volume back end your volumes reside on. You can specify affinity or anti-affinity between two volumes. Affinity between volumes means that they are stored on the same back end, whereas anti-affinity means that they are stored on different back ends. For information on how to set up multiple back ends for Cinder, refer to :ref:`multi_backend`. Example Usages ~~~~~~~~~~~~~~ #. Create a new volume on the same back end as Volume_A: .. code-block:: console $ openstack volume create --hint same_host=Volume_A-UUID \ --size SIZE VOLUME_NAME #. Create a new volume on a different back end than Volume_A: .. code-block:: console $ openstack volume create --hint different_host=Volume_A-UUID \ --size SIZE VOLUME_NAME #. Create a new volume on the same back end as Volume_A and Volume_B: .. code-block:: console $ openstack volume create --hint same_host=Volume_A-UUID \ --hint same_host=Volume_B-UUID --size SIZE VOLUME_NAME Or: .. code-block:: console $ openstack volume create --hint same_host="[Volume_A-UUID, \ Volume_B-UUID]" --size SIZE VOLUME_NAME #. Create a new volume on a different back end than both Volume_A and Volume_B: .. code-block:: console $ openstack volume create --hint different_host=Volume_A-UUID \ --hint different_host=Volume_B-UUID --size SIZE VOLUME_NAME Or: .. code-block:: console $ openstack volume create --hint different_host="[Volume_A-UUID, \ Volume_B-UUID]" --size SIZE VOLUME_NAME ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/cli/cli-manage-volumes.rst0000664000175000017500000007760300000000000022137 0ustar00zuulzuul00000000000000.. _volume: ============== Manage volumes ============== A volume is a detachable block storage device, similar to a USB hard drive. You can attach a volume to only one instance. Use the ``openstack`` client commands to create and manage volumes. Create a volume ~~~~~~~~~~~~~~~ This example creates a ``my-new-volume`` volume based on an image. #. List images, and note the ID of the image that you want to use for your volume: .. code-block:: console $ openstack image list +--------------------------------------+---------------------------------+ | ID | Name | +--------------------------------------+---------------------------------+ | 8bf4dc2a-bf78-4dd1-aefa-f3347cf638c8 | cirros-0.3.5-x86_64-uec | | 9ff9bb2e-3a1d-4d98-acb5-b1d3225aca6c | cirros-0.3.5-x86_64-uec-kernel | | 4b227119-68a1-4b28-8505-f94c6ea4c6dc | cirros-0.3.5-x86_64-uec-ramdisk | +--------------------------------------+---------------------------------+ #. List the availability zones, and note the ID of the availability zone in which you want to create your volume: .. code-block:: console $ openstack availability zone list --volume +-----------+-------------+ | Zone Name | Zone Status | +-----------+-------------+ | nova | available | +-----------+-------------+ #. Create a volume with 8 gibibytes (GiB) of space, and specify the availability zone and image: .. code-block:: console $ openstack volume create --image 8bf4dc2a-bf78-4dd1-aefa-f3347cf638c8 \ --size 8 --availability-zone nova my-new-volume +------------------------------+--------------------------------------+ | Property | Value | +------------------------------+--------------------------------------+ | attachments | [] | | availability_zone | nova | | bootable | false | | consistencygroup_id | None | | created_at | 2016-09-23T07:52:42.000000 | | description | None | | encrypted | False | | id | bab4b0e0-ce3d-4d57-bf57-3c51319f5202 | | metadata | {} | | multiattach | False | | name | my-new-volume | | os-vol-tenant-attr:tenant_id | 3f670abbe9b34ca5b81db6e7b540b8d8 | | replication_status | disabled | | size | 8 | | snapshot_id | None | | source_volid | None | | status | creating | | updated_at | None | | user_id | fe19e3a9f63f4a14bd4697789247bbc5 | | volume_type | lvmdriver-1 | +------------------------------+--------------------------------------+ #. To verify that your volume was created successfully, list the available volumes: .. code-block:: console $ openstack volume list +--------------------------------------+---------------+-----------+------+-------------+ | ID | Name | Status | Size | Attached to | +--------------------------------------+---------------+-----------+------+-------------+ | bab4b0e0-ce3d-4d57-bf57-3c51319f5202 | my-new-volume | available | 8 | | +--------------------------------------+---------------+-----------+------+-------------+ If your volume was created successfully, its status is ``available``. If its status is ``error``, you might have exceeded your quota. .. _Create_a_volume_from_specified_volume_type: Volume Types ------------ Cinder supports these three ways to specify ``volume type`` during volume creation. #. volume_type #. cinder_img_volume_type (via glance image metadata) #. default volume type (via project defaults or cinder.conf) volume-type ^^^^^^^^^^^ User can specify `volume type` when creating a volume. .. code-block:: console $ openstack volume create --type ... cinder_img_volume_type ^^^^^^^^^^^^^^^^^^^^^^ If glance image has ``cinder_img_volume_type`` property, Cinder uses this parameter to specify ``volume type`` when creating a volume. Choose glance image which has ``cinder_img_volume_type`` property and create a volume from the image. .. code-block:: console $ openstack image list +--------------------------------------+---------------------------------+--------+ | ID | Name | Status | +--------------------------------------+---------------------------------+--------+ | 376bd633-c9c9-4c5d-a588-342f4f66d086 | cirros-0.3.5-x86_64-uec | active | | 2c20fce7-2e68-45ee-ba8d-beba27a91ab5 | cirros-0.3.5-x86_64-uec-ramdisk | active | | a5752de4-9faf-4c47-acbc-78a5efa7cc6e | cirros-0.3.5-x86_64-uec-kernel | active | +--------------------------------------+---------------------------------+--------+ $ openstack image show 376bd633-c9c9-4c5d-a588-342f4f66d086 +------------------------+------------------------------------------------------+ | Field | Value | +------------------------+------------------------------------------------------+ | checksum | eb9139e4942121f22bbc2afc0400b2a | | cinder_img_volume_type | nfstype | | container_format | ami | | created_at | 2016-10-13T03:28:55Z | | disk_format | ami | | file | /v2/images/376bd633-c9c9-4c5d-a588-342f4f66d086/file | | id | 376bd633-c9c9-4c5d-a588-342f4f66d086 | | min_disk | 0 | | min_ram | 0 | | name | cirros-0.3.5-x86_64-uec | | owner | 88ba456e3a884c318394737765e0ef4d | | properties | kernel_id='a5752de4-9faf-4c47-acbc-78a5efa7cc6e', | | | ramdisk_id='2c20fce7-2e68-45ee-ba8d-beba27a91ab5' | | protected | False | | schema | /v2/schemas/image | | size | 25165824 | | status | active | | tags | | | updated_at | 2016-10-13T03:28:55Z | | virtual_size | None | | visibility | public | +------------------------+------------------------------------------------------+ $ openstack volume create --image 376bd633-c9c9-4c5d-a588-342f4f66d086 \ --size 1 --availability-zone nova test +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | attachments | [] | | availability_zone | nova | | bootable | false | | consistencygroup_id | None | | created_at | 2016-10-13T06:29:53.688599 | | description | None | | encrypted | False | | id | e6e6a72d-cda7-442c-830f-f306ea6a03d5 | | multiattach | False | | name | test | | properties | | | replication_status | disabled | | size | 1 | | snapshot_id | None | | source_volid | None | | status | creating | | type | nfstype | | updated_at | None | | user_id | 33fdc37314914796883706b33e587d51 | +---------------------+--------------------------------------+ default volume type ^^^^^^^^^^^^^^^^^^^ If above parameters are not set, cinder uses default volume type during volume creation. The effective default volume type (whether it be project default or default_volume_type) can be checked with the following command: .. code-block:: console $ openstack volume type list --default There are two ways to set the default volume type: 1) Project specific defaults 2) default_volume_type defined in cinder.conf Project specific defaults (available since mv 3.62 or higher) """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""" Project specific defaults can be managed using the `Default Volume Types API `_ It is set on a per project basis and has a higher priority over default_volume_type defined in cinder.conf default_volume_type """"""""""""""""""" If the project specific default is not set then default_volume_type configured in cinder.conf is used to create volumes. Example cinder.conf file configuration. .. code-block:: ini [default] default_volume_type = lvmdriver-1 .. _Attach_a_volume_to_an_instance: Attach a volume to an instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Attach your volume to a server, specifying the server ID and the volume ID: .. code-block:: console $ openstack server add volume 84c6e57d-a6b1-44b6-81eb-fcb36afd31b5 \ 573e024d-5235-49ce-8332-be1576d323f8 --device /dev/vdb #. Show information for your volume: .. code-block:: console $ openstack volume show 573e024d-5235-49ce-8332-be1576d323f8 The output shows that the volume is attached to the server with ID ``84c6e57d-a6b1-44b6-81eb-fcb36afd31b5``, is in the nova availability zone, and is bootable. .. code-block:: console +------------------------------+-----------------------------------------------+ | Field | Value | +------------------------------+-----------------------------------------------+ | attachments | [{u'device': u'/dev/vdb', | | | u'server_id': u'84c6e57d-a | | | u'id': u'573e024d-... | | | u'volume_id': u'573e024d... | | availability_zone | nova | | bootable | true | | consistencygroup_id | None | | created_at | 2016-10-13T06:08:07.000000 | | description | None | | encrypted | False | | id | 573e024d-5235-49ce-8332-be1576d323f8 | | multiattach | False | | name | my-new-volume | | properties | | | replication_status | disabled | | size | 8 | | snapshot_id | None | | source_volid | None | | status | in-use | | type | lvmdriver-1 | | updated_at | 2016-10-13T06:08:11.000000 | | user_id | 33fdc37314914796883706b33e587d51 | +------------------------------+-----------------------------------------------+ Detach a volume from an instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Detach your volume from a server, specifying the server ID and the volume ID: .. code-block:: console $ openstack server remove volume 84c6e57d-a6b1-44b6-81eb-fcb36afd31b5 \ 573e024d-5235-49ce-8332-be1576d323f8 #. Show information for your volume: .. code-block:: console $ openstack volume show 573e024d-5235-49ce-8332-be1576d323f8 The output shows that the volume is no longer attached to the server: .. code-block:: console +------------------------------+-----------------------------------------------+ | Field | Value | +------------------------------+-----------------------------------------------+ | attachments | [] | | availability_zone | nova | | bootable | true | | consistencygroup_id | None | | created_at | 2016-10-13T06:08:07.000000 | | description | None | | encrypted | False | | id | 573e024d-5235-49ce-8332-be1576d323f8 | | multiattach | False | | name | my-new-volume | | properties | | | replication_status | disabled | | size | 8 | | snapshot_id | None | | source_volid | None | | status | in-use | | type | lvmdriver-1 | | updated_at | 2016-10-13T06:08:11.000000 | | user_id | 33fdc37314914796883706b33e587d51 | +------------------------------+-----------------------------------------------+ Delete a volume ~~~~~~~~~~~~~~~ #. To delete your volume, you must first detach it from the server. To detach the volume from your server and check for the list of existing volumes, see steps 1 and 2 in Resize_a_volume_. Delete the volume using either the volume name or ID: .. code-block:: console $ openstack volume delete my-new-volume This command does not provide any output. #. List the volumes again, and note that the status of your volume is ``deleting``: .. code-block:: console $ openstack volume list +----------------+-----------------+-----------+------+-------------+ | ID | Name | Status | Size | Attached to | +----------------+-----------------+-----------+------+-------------+ | 573e024d-52... | my-new-volume | deleting | 8 | | | bd7cf584-45... | my-bootable-vol | available | 8 | | +----------------+-----------------+-----------+------+-------------+ When the volume is fully deleted, it disappears from the list of volumes: .. code-block:: console $ openstack volume list +----------------+-----------------+-----------+------+-------------+ | ID | Name | Status | Size | Attached to | +----------------+-----------------+-----------+------+-------------+ | bd7cf584-45... | my-bootable-vol | available | 8 | | +----------------+-----------------+-----------+------+-------------+ .. _Resize_a_volume: Resize a volume ~~~~~~~~~~~~~~~ #. To resize your volume, you must first detach it from the server if the volume driver does not support in-use extend. (See Extend_attached_volume_.) To detach the volume from your server, pass the server ID and volume ID to the following command: .. code-block:: console $ openstack server remove volume 84c6e57d-a6b1-44b6-81eb-fcb36afd31b5 573e024d-5235-49ce-8332-be1576d323f8 This command does not provide any output. #. List volumes: .. code-block:: console $ openstack volume list +----------------+-----------------+-----------+------+-------------+ | ID | Name | Status | Size | Attached to | +----------------+-----------------+-----------+------+-------------+ | 573e024d-52... | my-new-volume | available | 8 | | | bd7cf584-45... | my-bootable-vol | available | 8 | | +----------------+-----------------+-----------+------+-------------+ Note that the volume is now available. #. Resize the volume by passing the volume ID and the new size (a value greater than the old one) as parameters: .. code-block:: console $ openstack volume set 573e024d-5235-49ce-8332-be1576d323f8 --size 10 This command does not provide any output. Note: The volume status ``reserved`` is not a valid state for an extend operation. .. note:: When extending an LVM volume with a snapshot, the volume will be deactivated. The reactivation is automatic unless ``auto_activation_volume_list`` is defined in ``lvm.conf``. See ``lvm.conf`` for more information. .. _Extend_attached_volume: Extend attached volume ~~~~~~~~~~~~~~~~~~~~~~ Starting from microversion 3.42, it is also possible to extend an attached volume with status ``in-use``, depending upon policy settings and the capabilities of the backend storage. Sufficient amount of storage must exist to extend the volume. #. Resize the volume by passing the microversion,the volume ID, and the new size (a value greater than the old one) as parameters: .. code-block:: console $ openstack --os-volume-api-version 3.42 volume set 573e024d-5235-49ce-8332-be1576d323f8 --size 10 This command does not provide any output. Migrate a volume ~~~~~~~~~~~~~~~~ As an administrator, you can migrate a volume with its data from one location to another in a manner that is transparent to users and workloads. You can migrate only detached volumes with no snapshots. Possible use cases for data migration include: * Bring down a physical storage device for maintenance without disrupting workloads. * Modify the properties of a volume. * Free up space in a thinly-provisioned back end. Migrate a volume with the :command:`openstack volume migrate` command, as shown in the following example: .. code-block:: console $ openstack volume migrate [-h] --host [--force-host-copy] [--lock-volume] The arguments for this command are: host The destination host in the format `host@backend-name#pool`. volume The ID of the volume to migrate. *force-host-copy* Disables any driver optimizations and forces the data to be copied by the host. *lock-volume* Prevents other processes from aborting the migration. .. note:: If the volume has snapshots, the specified host destination cannot accept the volume. If the user is not an administrator, the migration fails. Transfer a volume ~~~~~~~~~~~~~~~~~ You can transfer a volume from one owner to another by using the :command:`openstack volume transfer request create` command. The volume donor, or original owner, creates a transfer request and sends the created transfer ID and authorization key to the volume recipient. The volume recipient, or new owner, accepts the transfer by using the ID and key. Starting with the Rocky release, Cinder changes the API behavior for the v2 and v3 API up to microversion 3.55. Snapshots will be transferred with the volume by default. That means if the volume has some snapshots, when a user transfers a volume from one owner to another, then those snapshots will be transferred with the volume as well. Starting with microversion 3.55 and later, Cinder supports the ability to transfer volume without snapshots. If users don't want to transfer snapshots, they need to specify the new optional argument `--no-snapshots`. Starting with microversion 3.70 and later, Cinder supports the ability to transfer encrypted volumes. Snapshots must be transferred with the volume. .. note:: The procedure for volume transfer is intended for projects (both the volume donor and recipient) within the same cloud. Use cases include: * Create a custom bootable volume or a volume with a large data set and transfer it to a customer. * For bulk import of data to the cloud, the data ingress system creates a new Block Storage volume, copies data from the physical device, and transfers device ownership to the end user. Create a volume transfer request -------------------------------- #. While logged in as the volume donor, list the available volumes: .. code-block:: console $ openstack volume list +-----------------+-----------------+-----------+------+-------------+ | ID | Name | Status | Size | Attached to | +-----------------+-----------------+-----------+------+-------------+ | 72bfce9f-cac... | None | error | 1 | | | a1cdace0-08e... | None | available | 1 | | +-----------------+-----------------+-----------+------+-------------+ #. As the volume donor, request a volume transfer authorization code for a specific volume: .. code-block:: console $ openstack volume transfer request create [--no-snapshots] The arguments to be passed are: ```` Name or ID of volume to transfer. ``--no-snapshots`` Transfer the volume without snapshots. The volume must be in an ``available`` state or the request will be denied. If the transfer request is valid in the database (that is, it has not expired or been deleted), the volume is placed in an ``awaiting-transfer`` state. For example: .. code-block:: console $ openstack volume transfer request create a1cdace0-08e4-4dc7-b9dc-457e9bcfe25f The output shows the volume transfer ID in the ``id`` row and the authorization key. .. code-block:: console +------------+--------------------------------------+ | Field | Value | +------------+--------------------------------------+ | auth_key | 0a59e53630f051e2 | | created_at | 2016-11-03T11:49:40.346181 | | id | 34e29364-142b-4c7b-8d98-88f765bf176f | | name | None | | volume_id | a1cdace0-08e4-4dc7-b9dc-457e9bcfe25f | +------------+--------------------------------------+ .. note:: Optionally, you can specify a name for the transfer by using the ``--name transferName`` parameter. .. note:: While the ``auth_key`` property is visible in the output of ``openstack volume transfer request create VOLUME_ID``, it will not be available in subsequent ``openstack volume transfer request show TRANSFER_ID`` command. #. Send the volume transfer ID and authorization key to the new owner (for example, by email). #. View pending transfers: .. code-block:: console $ openstack volume transfer request list +--------------------------------------+--------------------------------------+------+ | ID | Volume | Name | +--------------------------------------+--------------------------------------+------+ | 6e4e9aa4-bed5-4f94-8f76-df43232f44dc | a1cdace0-08e4-4dc7-b9dc-457e9bcfe25f | None | +--------------------------------------+--------------------------------------+------+ #. After the volume recipient, or new owner, accepts the transfer, you can see that the transfer is no longer available: .. code-block:: console $ openstack volume transfer request list +----+-----------+------+ | ID | Volume ID | Name | +----+-----------+------+ +----+-----------+------+ Accept a volume transfer request -------------------------------- #. As the volume recipient, you must first obtain the transfer ID and authorization key from the original owner. #. Accept the request: .. code-block:: console $ openstack volume transfer request accept transferID authKey For example: .. code-block:: console $ openstack volume transfer request accept 6e4e9aa4-bed5-4f94-8f76-df43232f44dc b2c8e585cbc68a80 +-----------+--------------------------------------+ | Property | Value | +-----------+--------------------------------------+ | id | 6e4e9aa4-bed5-4f94-8f76-df43232f44dc | | name | None | | volume_id | a1cdace0-08e4-4dc7-b9dc-457e9bcfe25f | +-----------+--------------------------------------+ .. note:: If you do not have a sufficient quota for the transfer, the transfer is refused. Delete a volume transfer ------------------------ #. List available volumes and their statuses: .. code-block:: console $ openstack volume list +-----------------+-----------------+-----------------+------+-------------+ | ID | Name | Status | Size | Attached to | +-----------------+-----------------+-----------------+------+-------------+ | 72bfce9f-cac... | None | error | 1 | | | a1cdace0-08e... | None |awaiting-transfer| 1 | | +-----------------+-----------------+-----------------+------+-------------+ #. Find the matching transfer ID: .. code-block:: console $ openstack volume transfer request list +--------------------------------------+--------------------------------------+------+ | ID | VolumeID | Name | +--------------------------------------+--------------------------------------+------+ | a6da6888-7cdf-4291-9c08-8c1f22426b8a | a1cdace0-08e4-4dc7-b9dc-457e9bcfe25f | None | +--------------------------------------+--------------------------------------+------+ #. Delete the volume: .. code-block:: console $ openstack volume transfer request delete Name or ID of transfer to delete. For example: .. code-block:: console $ openstack volume transfer request delete a6da6888-7cdf-4291-9c08-8c1f22426b8a #. Verify that transfer list is now empty and that the volume is again available for transfer: .. code-block:: console $ openstack volume transfer request list +----+-----------+------+ | ID | Volume ID | Name | +----+-----------+------+ +----+-----------+------+ .. code-block:: console $ openstack volume list +-----------------+-----------------+-----------------+------+-------------+ | ID | Name | Status | Size | Attached to | +-----------------+-----------------+-----------------+------+-------------+ | 72bfce9f-cac... | None | error | 1 | | | a1cdace0-08e... | None | available | 1 | | +-----------------+-----------------+-----------------+------+-------------+ Manage and unmanage a snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A snapshot is a point in time version of a volume. As an administrator, you can manage and unmanage snapshots. Manage a snapshot ----------------- Manage a snapshot with the :command:`openstack volume snapshot set` command: .. code-block:: console $ openstack volume snapshot set [-h] [--name ] [--description ] [--no-property] [--property ] [--state ] The arguments to be passed are: ``--name `` New snapshot name ``--description `` New snapshot description ``--no-property`` Remove all properties from (specify both --no-property and --property to remove the current properties before setting new properties.) ``--property `` Property to add or modify for this snapshot (repeat option to set multiple properties) ``--state `` New snapshot state. ("available", "error", "creating", "deleting", or "error_deleting") (admin only) (This option simply changes the state of the snapshot in the database with no regard to actual status, exercise caution when using) ```` Snapshot to modify (name or ID) .. code-block:: console $ openstack volume snapshot set my-snapshot-id Unmanage a snapshot ------------------- Unmanage a snapshot with the :command:`openstack volume snapshot unset` command: .. code-block:: console $ openstack volume snapshot unset [-h] [--property ] The arguments to be passed are: ``--property `` Property to remove from snapshot (repeat option to remove multiple properties) ```` Snapshot to modify (name or ID). The following example unmanages the ``my-snapshot-id`` image: .. code-block:: console $ openstack volume snapshot unset my-snapshot-id Report backend state in service list ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each of the Cinder services report a Status and a State. These are the administrative state and the runtime state, respectively. To get a listing of all Cinder services and their states, run the command: .. code-block:: console $ openstack volume service list +------------------+-------------------+------+---------+-------+----------------------------+ | Binary | Host | Zone | Status | State | Updated At | +------------------+-------------------+------+---------+-------+----------------------------+ | cinder-scheduler | tower | nova | enabled | up | 2018-03-30T21:16:11.000000 | | cinder-volume | tower@lvmdriver-1 | nova | enabled | up | 2018-03-30T21:16:15.000000 | | cinder-backup | tower | nova | enabled | up | 2018-03-30T21:16:14.000000 | +------------------+-------------------+------+---------+-------+----------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/cli/cli-set-quotas.rst0000664000175000017500000000352500000000000021314 0ustar00zuulzuul00000000000000.. _manage-quotas: ============= Manage quotas ============= To prevent system capacities from being exhausted without notification, you can set up quotas. Quotas are operational limits. For example, the number of gigabytes allowed for each project can be controlled so that cloud resources are optimized. Quotas can be enforced at both the project and the project-user level. Using the command-line interface, you can manage quotas for the OpenStack Compute service, the OpenStack Block Storage service, and the OpenStack Networking service. The cloud operator typically changes default values because a project requires more than ten volumes or 1 TB on a compute node. .. note:: To view all projects, run: .. code-block:: console $ openstack project list +----------------------------------+----------+ | ID | Name | +----------------------------------+----------+ | e66d97ac1b704897853412fc8450f7b9 | admin | | bf4a37b885fe46bd86e999e50adad1d3 | services | | 21bd1c7c95234fd28f589b60903606fa | tenant01 | | f599c5cd1cba4125ae3d7caed08e288c | tenant02 | +----------------------------------+----------+ To display all current users for a project, run: .. code-block:: console $ openstack user list --project PROJECT_NAME +----------------------------------+--------+ | ID | Name | +----------------------------------+--------+ | ea30aa434ab24a139b0e85125ec8a217 | demo00 | | 4f8113c1d838467cad0c2f337b3dfded | demo01 | +----------------------------------+--------+ Use :samp:`openstack quota show {PROJECT_NAME}` to list all quotas for a project. Use :samp:`openstack quota set {PROJECT_NAME} {--parameters}` to set quota values. .. toctree:: :maxdepth: 2 cli-cinder-quotas.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/cli/index.rst0000664000175000017500000000121600000000000017544 0ustar00zuulzuul00000000000000================================ Command-Line Interface Reference ================================ In this section you will find information on Cinder's command line utilities. Cinder Management Commands -------------------------- These commands are used to manage existing installations. They are designed to be run by operators in an environment where they have direct access to the Cinder database. .. toctree:: :maxdepth: 1 cinder-manage cinder-status Additional Tools and Information -------------------------------- .. toctree:: :maxdepth: 1 cli-manage-volumes cli-set-quotas cli-cinder-quotas cli-cinder-scheduling ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4351215 cinder-27.0.0/doc/source/common/0000775000175000017500000000000000000000000016424 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/common/glossary.rst0000664000175000017500000000400500000000000021020 0ustar00zuulzuul00000000000000======== Glossary ======== This glossary offers a list of terms and definitions to define a vocabulary for Cinder concepts. .. glossary:: Logical Volume Manager (LVM) Provides a method of allocating space on mass-storage devices that is more flexible than conventional partitioning schemes. iSCSI Qualified Name (IQN) IQN is the format most commonly used for iSCSI names, which uniquely identify nodes in an iSCSI network. All IQNs follow the pattern ``iqn.yyyy-mm.domain:identifier``, where ``yyyy-mm`` is the year and month in which the domain was registered, ``domain`` is the reversed domain name of the issuing organization, and ``identifier`` is an optional string which makes each IQN under the same domain unique. For example: ``iqn.2015-10.org.openstack.408ae959bce1`` NVMe Qualified Name (NQN) NQN is the format most commonly used for NVMe names, which uniquely identify hosts or NVM subsystems in a network. NQNs can follow one of two supported formats. The first format, used by organizations that own a domain, is ``nqn.yyyy-mm.domain:identifier``, where ``yyyy-mm`` is the year and month in which the domain was registered, ``domain`` is the reversed domain name of the issuing organization, and ``identifier`` is an optional string which makes each NQN unique under the same domain name. For example: ``nqn.2014-08.com.example:nvme:nvm-subsystem-sn-d78432`` The second format is used to create unique identifiers when there is not a naming authority or there is not a requirement for a human interpretable string. This format is ``nqn.2014-08.org.nvmexpress:uuid:identifier``, where only the ``identifier`` is variable and consists of a 128-bit UUID based on the definition in RFC 4122 represented as a string. For example: ``nqn.2014-08.org.nvmexpress:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/conf.py0000664000175000017500000002102000000000000016426 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file is execfile()d with the current directory set # to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = ['sphinx.ext.coverage', 'sphinx.ext.todo', 'sphinx.ext.graphviz', 'sphinx_feature_classification.support_matrix', 'openstackdocstheme', 'stevedore.sphinxext', 'oslo_config.sphinxconfiggen', 'ext.cinder_driverlist', 'ext.driver_opts', 'oslo_policy.sphinxext', 'oslo_policy.sphinxpolicygen', 'sphinxcontrib.apidoc', ] # sphinxcontrib.apidoc options apidoc_module_dir = '../../cinder' apidoc_output_dir = 'contributor/api' apidoc_excluded_paths = [ 'tests/*', 'tests', 'db/migrations/*', 'db/migrations', 'db/sqlalchemy/*', 'db/sqlalchemy', 'volume/drivers/*', 'volume/drivers', 'zonemanager/drivers/*', 'zonemanager/drivers', 'backup/drivers/*', 'backup/drivers', 'wsgi/api.py', ] apidoc_separate_modules = True config_generator_config_file = ( '../../tools/config/cinder-config-generator.conf') sample_config_basename = '_static/cinder' # These are driver specific libraries that are not always present autodoc_mock_imports = ['nvmet'] policy_generator_config_file = ( '../../tools/config/cinder-policy-generator.conf') sample_policy_basename = '_static/cinder' # Add any paths that contain templates here, relative to this directory. templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. openstackdocs_repo_name = 'openstack/cinder' openstackdocs_bug_project = 'cinder' openstackdocs_bug_tag = 'doc' openstackdocs_pdf_link = True copyright = '2010-present, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['cinder.'] # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', 'title', 'Authors name', 'manual' man_pages = [ ('cli/cinder-manage', 'cinder-manage', 'Cloud controller fabric', ['OpenStack'], 1), ('cli/cinder-status', 'cinder-status', 'Upgrade checking utility', ['OpenStack'], 1), ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any paths that contain "extra" files, such as .htaccess. html_extra_path = ['_extra'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'cinderdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-cinder.tex', 'Cinder Documentation', 'Cinder Contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False latex_domain_indices = False latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', 'maxlistdepth': 10, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4351215 cinder-27.0.0/doc/source/configuration/0000775000175000017500000000000000000000000020003 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/README.rst0000664000175000017500000000152000000000000021470 0ustar00zuulzuul00000000000000========================================================= Cinder Configuration Documentation (source/configuration) ========================================================= Introduction: ------------- This directory is intended to hold any documentation that relates to how to configure Cinder. It is intended that some of this content be automatically generated in the future. At the moment, however, it is not. If you would like to work on this, please use Launchpad Bug `#1847600 `_ for tracking purposes. Changes to configuration options for Cinder or its drivers needs to be put under this directory. The full spec for organization of documentation may be seen in the `OS Manuals Migration Spec `_. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4391217 cinder-27.0.0/doc/source/configuration/block-storage/0000775000175000017500000000000000000000000022537 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4431217 cinder-27.0.0/doc/source/configuration/block-storage/backup/0000775000175000017500000000000000000000000024004 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/backup/ceph-backup-driver.rst0000664000175000017500000000600400000000000030211 0ustar00zuulzuul00000000000000================== Ceph backup driver ================== The Ceph backup driver backs up volumes of any type to a Ceph back-end store. The driver can also detect whether the volume to be backed up is a Ceph RBD volume, and if so, it tries to perform incremental and differential backups. For source Ceph RBD volumes, you can perform backups within the same Ceph pool (not recommended). You can also perform backups between different Ceph pools and between different Ceph clusters. At the time of writing, differential backup support in Ceph/librbd was quite new. This driver attempts a differential backup in the first instance. If the differential backup fails, the driver falls back to full backup/copy. If incremental backups are used, multiple backups of the same volume are stored as snapshots so that minimal space is consumed in the backup store. It takes far less time to restore a volume than to take a full copy. By default, all incremental backups are held on the source volume storage, which can take up much disk space on the usually more expensive primary storage compared to backup storage. Enabling the option ``backup_ceph_max_snapshots`` can save disk space on the source volume storage by only keeping a limited number of snapshots per backup volume. After every successful creation of a new incremental backup, the Ceph backup driver will then ensure that excess snapshots of the corresponding backup volume are deleted so that only the ``backup_ceph_max_snapshots`` most recent snapshots are kept on the primary storage. However, this can cause incremental backups to automatically become full backups instead if a user manually deleted at least ``backup_ceph_max_snapshots`` incremental backups. In that case the next snapshot, being a full backup, will require more disk space on the backup storage and will take longer to complete than an incremental backup would have. Thus, the option allows to configure a tradeoff between required space on the source volume storage and required space on the backup storage as well as a longer backup process under the above conditions. .. note:: Block Storage enables you to: - Restore to a new volume, which is the default and recommended action. - Restore to the original volume from which the backup was taken. The restore action takes a full copy because this is the safest action. To enable the Ceph backup driver, include the following option in the ``cinder.conf`` file: .. code-block:: ini backup_driver = cinder.backup.drivers.ceph.CephBackupDriver The following configuration options are available for the Ceph backup driver. .. config-table:: :config-target: Ceph backup driver cinder.backup.drivers.ceph This example shows the default options for the Ceph backup driver. .. code-block:: ini backup_ceph_conf=/etc/ceph/ceph.conf backup_ceph_user = cinder-backup backup_ceph_chunk_size = 134217728 backup_ceph_pool = backups backup_ceph_stripe_unit = 0 backup_ceph_stripe_count = 0 backup_ceph_max_snapshots = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/backup/gcs-backup-driver.rst0000664000175000017500000000105600000000000030050 0ustar00zuulzuul00000000000000======================================= Google Cloud Storage backup driver ======================================= The Google Cloud Storage (GCS) backup driver backs up volumes of any type to Google Cloud Storage. To enable the GCS backup driver, include the following option in the ``cinder.conf`` file: .. code-block:: ini backup_driver = cinder.backup.drivers.gcs.GoogleBackupDriver The following configuration options are available for the GCS backup driver. .. config-table:: :config-target: GCS backup driver cinder.backup.drivers.gcs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/backup/glusterfs-backup-driver.rst0000664000175000017500000000101000000000000031300 0ustar00zuulzuul00000000000000======================= GlusterFS backup driver ======================= The GlusterFS backup driver backs up volumes of any type to GlusterFS. To enable the GlusterFS backup driver, include the following option in the ``cinder.conf`` file: .. code-block:: ini backup_driver = cinder.backup.drivers.glusterfs.GlusterfsBackupDriver The following configuration options are available for the GlusterFS backup driver. .. config-table:: :config-target: GlusterFS backup driver cinder.backup.drivers.glusterfs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/backup/nfs-backup-driver.rst0000664000175000017500000000107200000000000030060 0ustar00zuulzuul00000000000000================= NFS backup driver ================= The backup driver for the NFS back end backs up volumes of any type to an NFS exported backup repository. To enable the NFS backup driver, include the following option in the ``[DEFAULT]`` section of the ``cinder.conf`` file: .. code-block:: ini backup_driver = cinder.backup.drivers.nfs.NFSBackupDriver The following configuration options are available for the NFS back-end backup driver. .. config-table:: :config-target: NFS backup driver cinder.backup.drivers.nfs cinder.backup.drivers.posix ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/backup/posix-backup-driver.rst0000664000175000017500000000106700000000000030440 0ustar00zuulzuul00000000000000================================ POSIX file systems backup driver ================================ The POSIX file systems backup driver backs up volumes of any type to POSIX file systems. To enable the POSIX file systems backup driver, include the following option in the ``cinder.conf`` file: .. code-block:: ini backup_driver = cinder.backup.drivers.posix.PosixBackupDriver The following configuration options are available for the POSIX file systems backup driver. .. config-table:: :config-target: POSIX backup driver cinder.backup.drivers.posix ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/backup/s3-backup-driver.rst0000664000175000017500000000077400000000000027627 0ustar00zuulzuul00000000000000======================== S3 Storage backup driver ======================== The S3 backup driver backs up volumes to any type of Amazon S3 and S3 compatible object storages. To enable the S3 backup driver, include the following option in the ``cinder.conf`` file: .. code-block:: ini backup_driver = cinder.backup.drivers.s3.S3BackupDriver The following configuration options are available for the S3 backup driver. .. config-table:: :config-target: S3 backup driver cinder.backup.drivers.s3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/backup/swift-backup-driver.rst0000664000175000017500000000276000000000000030433 0ustar00zuulzuul00000000000000=================== Swift backup driver =================== The backup driver for the swift back end performs a volume backup to an object storage system. To enable the swift backup driver, include the following option in the ``cinder.conf`` file: .. code-block:: ini backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver The following configuration options are available for the Swift back-end backup driver. .. config-table:: :config-target: Swift backup driver cinder.backup.drivers.swift To enable the swift backup driver for 1.0, 2.0, or 3.0 authentication version, specify ``1``, ``2``, or ``3`` correspondingly. For example: .. code-block:: ini backup_swift_auth_version = 2 In addition, the 2.0 authentication system requires the definition of the ``backup_swift_tenant`` setting: .. code-block:: ini backup_swift_tenant = This example shows the default options for the Swift back-end backup driver. .. code-block:: ini backup_swift_url = http://localhost:8080/v1/AUTH_ backup_swift_auth_url = http://localhost:5000/v3 backup_swift_auth = per_user backup_swift_auth_version = 1 backup_swift_user = backup_swift_user_domain = backup_swift_key = backup_swift_container = volumebackups backup_swift_object_size = 52428800 backup_swift_project = backup_swift_project_domain = backup_swift_retry_attempts = 3 backup_swift_retry_backoff = 2 backup_compression_algorithm = zlib ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/backup-drivers.rst0000664000175000017500000000125400000000000026214 0ustar00zuulzuul00000000000000============== Backup drivers ============== .. sort by the drivers by open source software .. and the drivers for proprietary components .. toctree:: backup/ceph-backup-driver.rst backup/glusterfs-backup-driver.rst backup/nfs-backup-driver.rst backup/posix-backup-driver.rst backup/swift-backup-driver.rst backup/gcs-backup-driver.rst backup/s3-backup-driver.rst This section describes how to configure the cinder-backup service and its drivers. The volume drivers are included with the `Block Storage repository `_. To set a backup driver, use the ``backup_driver`` flag. By default there is no backup driver enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/block-storage-overview.rst0000664000175000017500000001047700000000000027702 0ustar00zuulzuul00000000000000========================================= Introduction to the Block Storage service ========================================= The Block Storage service provides persistent block storage resources that Compute instances can consume. This includes secondary attached storage similar to the Amazon Elastic Block Storage (EBS) offering. In addition, you can write images to a Block Storage device for Compute to use as a bootable persistent instance. The Block Storage service differs slightly from the Amazon EBS offering. The Block Storage service does not provide a shared storage solution like NFS. With the Block Storage service, you can attach a device to only one instance. The Block Storage service provides: - ``cinder-api`` - a WSGI app that authenticates and routes requests throughout the Block Storage service. It supports the OpenStack APIs only, although there is a translation that can be done through Compute's EC2 interface, which calls in to the Block Storage client. - ``cinder-scheduler`` - schedules and routes requests to the appropriate volume service. Depending upon your configuration, this may be simple round-robin scheduling to the running volume services, or it can be more sophisticated through the use of the Filter Scheduler. The Filter Scheduler is the default and enables filters on things like Capacity, Availability Zone, Volume Types, and Capabilities as well as custom filters. - ``cinder-volume`` - manages Block Storage devices, specifically the back-end devices themselves. - ``cinder-backup`` - provides a means to back up a Block Storage volume to OpenStack Object Storage (swift). The Block Storage service contains the following components: - **Back-end Storage Devices** - the Block Storage service requires some form of back-end storage that the service is built on. The default implementation is to use LVM on a local volume group named "cinder-volumes." In addition to the base driver implementation, the Block Storage service also provides the means to add support for other storage devices to be utilized such as external Raid Arrays or other storage appliances. These back-end storage devices may have custom block sizes when using KVM or QEMU as the hypervisor. - **Users and Tenants (Projects)** - the Block Storage service can be used by many different cloud computing consumers or customers (tenants on a shared system), using role-based access assignments. Roles control the actions that a user is allowed to perform. In the default configuration, most actions do not require a particular role, but this can be configured by the system administrator in the cinder policy file that maintains the rules. .. note:: For more information about configuring cinder policies, see :ref:`policy-configuration`. A user's access to particular volumes is limited by tenant, but the user name and password are assigned per user. Key pairs granting access to a volume are enabled per user, but quotas to control resource consumption across available hardware resources are per tenant. For tenants, quota controls are available to limit: - The number of volumes that can be created. - The number of snapshots that can be created. - The total number of GBs allowed per tenant (shared between snapshots and volumes). You can revise the default quota values with the Block Storage CLI, so the limits placed by quotas are editable by admin users. - **Volumes, Snapshots, and Backups** - the basic resources offered by the Block Storage service are volumes and snapshots which are derived from volumes and volume backups: - **Volumes** - allocated block storage resources that can be attached to instances as secondary storage or they can be used as the root store to boot instances. Volumes are persistent R/W block storage devices most commonly attached to the compute node through iSCSI. - **Snapshots** - a read-only point in time copy of a volume. The snapshot can be created from a volume that is currently in use (through the use of ``--force True``) or in an available state. The snapshot can then be used to create a new volume through create from snapshot. - **Backups** - an archived copy of a volume currently stored in Object Storage (swift). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/config-options.rst0000664000175000017500000000455600000000000026241 0ustar00zuulzuul00000000000000================== Additional options ================== These options can also be set in the ``cinder.conf`` file. .. config-table:: :config-target: API :exclusive-list: api_opts,compute_opts,socket_opts cinder.api.common cinder.common.config cinder.compute cinder.service cinder.wsgi.eventlet_server .. config-table:: :config-target: [oslo_middleware] oslo_middleware.http_proxy_to_wsgi oslo_middleware.sizelimit oslo_middleware.ssl .. config-table:: :config-target: authorization :exclusive-list: auth_opts cinder.common.config .. config-table:: :config-target: Volume Manager cinder.volume.manager .. config-table:: :config-target: Volume Scheduler cinder.scheduler.manager cinder.scheduler.host_manager cinder.scheduler.driver cinder.scheduler.weights.volume_number cinder.scheduler.weights.capacity .. config-table:: :config-target: backup :exclusive-list: backup_opts,backup_manager_opts cinder.common.config cinder.backup.api cinder.backup.chunkeddriver cinder.backup.driver cinder.backup.manager cinder.db.api .. config-table:: :config-target: [nova] cinder.compute.nova .. config-table:: :config-target: images :exclusive-list: image_opts,glance_core_properties_opts cinder.image.glance cinder.image.image_utils cinder.volume.driver cinder.common.config .. config-table:: :config-target: NAS cinder.volume.drivers.remotefs .. config-table:: :config-target: common driver :exclusive-list: volume_opts cinder.volume.driver .. _cinder-storage: .. config-table:: :config-target: common :exclusive-list: global_opts,db_opts cinder.common.config cinder.db.api .. config-table:: :config-target: [profiler] osprofiler.opts .. config-table:: :config-target: quota cinder.quota .. config-table:: :config-target: SAN cinder.volume.drivers.san.san .. config-table:: :config-target: iSER volume driver :exclusive-list: iser_opts cinder.volume.driver .. config-table:: :config-target: NVMET volume driver :exclusive-list: nvmet_opts cinder.volume.driver .. config-table:: :config-target: SCST volume driver :exclusive-list: scst_opts cinder.volume.driver .. config-table:: :config-target: zones :exclude-list: allow_force_upload_opt,volume_host_opt,az_cache_time_opt cinder.volume.api ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4551218 cinder-27.0.0/doc/source/configuration/block-storage/drivers/0000775000175000017500000000000000000000000024215 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst0000664000175000017500000001642300000000000031237 0ustar00zuulzuul00000000000000============================= Ceph RADOS Block Device (RBD) ============================= If you use KVM, QEMU or Hyper-V as your hypervisor, you can configure the Compute service to use `Ceph RADOS block devices (RBD) `__ for volumes. Ceph is a massively scalable, open source, distributed storage system. It is comprised of an object store, block store, and a POSIX-compliant distributed file system. The platform can auto-scale to the exabyte level and beyond. It runs on commodity hardware, is self-healing and self-managing, and has no single point of failure. Due to its open-source nature, you can install and use this portable storage platform in public or private clouds. .. figure:: ../../figures/ceph-architecture.png Ceph architecture .. note:: **Supported Ceph versions** The current `release cycle model for Ceph `_ targets a new release yearly on 1 March, with there being at most two active stable releases at any time. For a given OpenStack release, *Cinder supports the current Ceph active stable releases plus the two prior releases.* For example, at the time of the OpenStack Wallaby release in April 2021, the Ceph active supported releases are Pacific and Octopus. The Cinder Wallaby release therefore supports Ceph Pacific, Octopus, Nautilus, and Mimic. Additionally, it is expected that the version of the Ceph client available to Cinder or any of its associated libraries (os-brick, cinderlib) is aligned with the Ceph server version. Mixing server and client versions is *unsupported* and may lead to anomalous behavior. The minimum requirements for using Ceph with Hyper-V are Ceph Pacific and Windows Server 2016. RADOS ~~~~~ Ceph is based on Reliable Autonomic Distributed Object Store (RADOS). RADOS distributes objects across the storage cluster and replicates objects for fault tolerance. RADOS contains the following major components: *Object Storage Device (OSD) Daemon* The storage daemon for the RADOS service, which interacts with the OSD (physical or logical storage unit for your data). You must run this daemon on each server in your cluster. For each OSD, you can have an associated hard drive disk. For performance purposes, pool your hard drive disk with raid arrays, or logical volume management (LVM). By default, the following pools are created: data, metadata, and RBD. *Meta-Data Server (MDS)* Stores metadata. MDSs build a POSIX file system on top of objects for Ceph clients. However, if you do not use the Ceph file system, you do not need a metadata server. *Monitor (MON)* A lightweight daemon that handles all communications with external applications and clients. It also provides a consensus for distributed decision making in a Ceph/RADOS cluster. For instance, when you mount a Ceph shared on a client, you point to the address of a MON server. It checks the state and the consistency of the data. In an ideal setup, you must run at least three ``ceph-mon`` daemons on separate servers. Ways to store, use, and expose data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To store and access your data, you can use the following storage systems: *RADOS* Use as an object, default storage mechanism. *RBD* Use as a block device. The Linux kernel RBD (RADOS block device) driver allows striping a Linux block device over multiple distributed object store data objects. It is compatible with the KVM RBD image. *CephFS* Use as a file, POSIX-compliant file system. Ceph exposes RADOS; you can access it through the following interfaces: *RADOS Gateway* OpenStack Object Storage and Amazon-S3 compatible RESTful interface (see `RADOS_Gateway `__). *librados* and its related C/C++ bindings *RBD and QEMU-RBD* Linux kernel and QEMU block devices that stripe data across multiple objects. RBD pool ~~~~~~~~ The RBD pool used by the Cinder backend is configured with option ``rbd_pool``, and by default the driver expects exclusive management access to that pool, as in being the only system creating and deleting resources in it, since that's the recommended deployment choice. Pool sharing is strongly discouraged, and if we were to share the pool with other services, within OpenStack (Nova, Glance, another Cinder backend) or outside of OpenStack (oVirt), then the stats returned by the driver to the scheduler would not be entirely accurate. The inaccuracy would be that the actual size in use by the cinder volumes would be lower than the reported one, since it would be also including the used space by the other services. We can set the ``rbd_exclusive_cinder_pool`` configuration option to ``false`` to fix this inaccuracy, but this has a performance impact. .. warning:: Setting ``rbd_exclusive_cinder_pool`` to ``false`` will increase the burden on the Cinder driver and the Ceph cluster, since a request will be made for each existing image, to retrieve its size, during the stats gathering process. For deployments with large amount of volumes it is recommended to leave the default value of ``true``, and accept the inaccuracy, as it should not be particularly problematic. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options supported by the Ceph RADOS Block Device driver. .. config-table:: :config-target: Ceph storage cinder.volume.drivers.rbd RBD Mirroring ~~~~~~~~~~~~~ The cinder RBD driver supports mirroring between multiple clusters. You can configure it on the cinder side with the usual replication configuration. Refer to the :doc:`documentation ` for more information. You will also have to configure replication on the Ceph side. To do so you may refer to the `Ceph documentation `_. Note that with the RBD driver in cinder you need to configure the pool replication option in image mode. For instance, if your pool is named ``volumes``, the command would be: ``rbd mirror pool enable volumes image``. RBD QoS ~~~~~~~~~~~~~ Currently, the Cinder RBD driver supports the following QoS options compatible with Ceph Octopus release and above: .. list-table:: :header-rows: 1 * - Cinder Value - Ceph Mapping * - ``total_iops_sec`` - ``rbd_qos_iops_limit`` * - - * - ``read_iops_sec`` - ``rbd_qos_read_iops_limit`` * - - * - ``write_iops_sec`` - ``rbd_qos_write_iops_limit`` * - - * - ``total_bytes_sec`` - ``rbd_qos_bps_limit`` * - - * - ``read_bytes_sec`` - ``rbd_qos_read_bps_limit`` * - - * - ``write_bytes_sec`` - ``rbd_qos_write_bps_limit`` * - - * - ``total_iops_sec_max`` - ``rbd_qos_bps_burst`` * - - * - ``read_iops_sec_max`` - ``rbd_qos_read_iops_burst`` * - - * - ``write_iops_sec_max`` - ``rbd_qos_write_iops_burst`` * - - * - ``total_bytes_sec_max`` - ``rbd_qos_bps_burst`` * - - * - ``read_bytes_sec_max`` - ``rbd_qos_read_bps_burst`` * - - * - ``write_bytes_sec_max`` - ``rbd_qos_write_bps_burst`` * - - For more information on QoS settings you may refer to `Ceph QoS documentation `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/datacore-volume-driver.rst0000664000175000017500000003074000000000000031333 0ustar00zuulzuul00000000000000================================== DataCore SANsymphony volume driver ================================== DataCore SANsymphony volume driver provides OpenStack Compute instances with access to the SANsymphony(TM) Software-defined Storage Platform. When volumes are created in OpenStack, the driver creates corresponding virtual disks in the SANsymphony server group. When a volume is attached to an instance in OpenStack, a Linux host is registered and the corresponding virtual disk is served to the host in the SANsymphony server group. Requirements ------------- * DataCore server group running SANsymphony software version 10 PSP6 or later. * OpenStack Integration has been tested with the OpenStack environment installed on Ubuntu 20.04. For the list of qualified Linux host operating system types, refer to the `Linux Host Configuration Guide `_ on the `DataCore Technical Support Web page `_. * If using multipath I/O, ensure that iSCSI ports are logged in on all OpenStack Compute nodes. (All Fibre Channel ports will be logged in automatically.) Python dependencies ~~~~~~~~~~~~~~~~~~~ * ``websocket-client>=0.32.0`` Install this package using pip: .. code-block:: console $ sudo pip install "websocket-client>=0.32.0" Configuration ------------- The volume driver can be configured by editing the ``cinder.conf`` file. The options below can be configured either per server group or as extra specifications in a volume type configuration. Configuration options and default values: * ``datacore_disk_pools = None`` Sets the pools to use for the DataCore OpenStack Cinder Volume Driver. This option acts like a filter and any number of pools may be specified. The list of specified pools will be used to select the storage sources needed for virtual disks; one for single or two for mirrored. Selection is based on the pools with the most free space. This option may also be specified as an extra specification of a volume type. * ``datacore_disk_type = single`` Sets the SANsymphony virtual disk type (single or mirrored). **Single** virtual disks are created by default. Specify **mirrored** to override this behavior. Mirrored virtual disks require two DataCore Servers in the server group. This option may also be specified as an extra specification of a volume type. * ``datacore_storage_profile = Normal`` Sets the storage profile of the virtual disk. The default setting is Normal. Other valid values include the standard storage profiles (Critical, High, Low, and Archive) and the names of custom profiles that have been created. This option may also be specified as an extra specification of a volume type. * ``datacore_api_timeout = 300`` Sets the number of seconds to wait for a response from a DataCore API call. This option is used in the server group back-end configuration only. * ``datacore_disk_failed_delay = 300`` Sets the number of seconds to wait for the SANsymphony virtual disk to come out of the "Failed" state. This option is used in the server group back-end configuration only. * ``datacore_iscsi_unallowed_targets = []`` Sets a list of iSCSI targets that cannot be used to attach to the volume. By default, the DataCore iSCSI volume driver attaches a volume through all target ports with the Front-end role enabled, unlike the DataCore Fibre Channel volume driver that attaches a volume only through target ports connected to initiator. To prevent the DataCore iSCSI volume driver from using some front-end targets in volume attachment, specify this option and list the iqn and target machine for each target as the value, such as ``, , ``. For example, ````. This option is used in the server group back-end configuration only. * ``use_chap_auth = False`` Sets the CHAP authentication for the iSCSI targets that are used to serve the volume. This option is disabled by default and will allow hosts (OpenStack Compute nodes) to connect to iSCSI storage back-ends without authentication. To enable CHAP authentication, which will prevent hosts (OpenStack Compute nodes) from connecting to back-ends without authentication, set this option to **True**. In addition, specify the location where the DataCore volume driver will store dynamically created CHAP secrets by setting the **datacore_iscsi_chap_storage** option. This option is used in the server group back-end configuration only. The driver will enable CHAP only for involved target ports, therefore, not all DataCore Servers may have CHAP configured. *Before enabling CHAP, ensure that there are no SANsymphony volumes attached to any instances.* * ``datacore_iscsi_chap_storage = /var/lib/cinder/.datacore_chap`` Sets the path to the iSCSI CHAP authentication password storage file. **datacore_iscsi_chap_storage** is only used when **use_chap_auth = True** and **chap_password** is not set. Default **datacore_iscsi_chap_storage** value is $state_path/.datacore_chap. *CHAP secrets are passed from OpenStack Block Storage to compute in clear text. This communication should be secured to ensure that CHAP secrets are not compromised. This can be done by setting up file permissions. Before changing the CHAP configuration, ensure that there are no SANsymphony volumes attached to any instances.* This option is used in the server group back-end configuration only. Configuration Examples ~~~~~~~~~~~~~~~~~~~~~~ Examples of option configuration in the ``cinder.conf`` file. * An example using **datacore_disk_pools**, **datacore_disk_type**, and **datacore_storage_profile** to create a mirrored virtual disk with a High priority storage profile using specific pools: .. code-block:: ini volume_driver = cinder.volume.drivers.datacore.iscsi.ISCSIVolumeDriver san_ip = san_login = san_password = datacore_disk_type = mirrored datacore_disk_pools = Disk pool 1, Disk pool 2 datacore_storage_profile = High * An example using **datacore_iscsi_unallowed_targets** to prevent the volume from using the specified targets: .. code-block:: ini volume_driver = cinder.volume.drivers.datacore.iscsi.ISCSIVolumeDriver san_ip = san_login = san_password = datacore_iscsi_unallowed_targets = iqn.2000-08.com.datacore:mns-ssv-10-1,iqn.2000-08.com.datacore:mns-ssvdev-01-1 * An example using **use_chap_auth** and **chap_username** and **chap_password** to enable CHAP authentication: .. code-block:: ini volume_driver = cinder.volume.drivers.datacore.iscsi.ISCSIVolumeDriver use_chap_auth = True chap_username = user1 chap_password = user1_password * An example using **use_chap_auth** and **datacore_iscsi_chap_storage** to enable CHAP authentication and provide the path to the CHAP password storage file: .. code-block:: ini volume_driver = cinder.volume.drivers.datacore.iscsi.ISCSIVolumeDriver use_chap_auth = True datacore_iscsi_chap_storage = /var/lib/cinder/.datacore_chap DataCore volume driver stores CHAP secrets in clear text, and the password file must be secured by setting up file permissions. The following example shows how to create a password file and set up permissions. It assumes that the cinder-volume service is running under the user `cinder`. Please note that following steps are only required if the user wants to change the default **datacore_iscsi_chap_storage** location. .. code-block:: console $ sudo mkdir /opt/user_dir/cinder -p $ sudo /bin/sh -c "> /opt/user_dir/cinder/.datacore_chap" $ sudo chown cinder:cinder /opt/user_dir/cinder $ sudo chown cinder:cinder /opt/user_dir/cinder/.datacore_chap $ sudo chmod -v 600 /opt/user_dir/cinder/.datacore_chap CHAP will be enabled in SANsymphony after setting **use_chap_auth = True**. **chap_username** and **chap_password** will be used if mentioned, if not iSCSI initiator PortName will be used as chap_username with a random password, and the credentials will be stored in **datacore_iscsi_chap_storage** location. Creating Volume Types --------------------- Volume types can be created with the DataCore disk type specified in the datacore:disk_type extra specification. In the following example, a volume type named mirrored_disk is created and the disk type is set to mirrored. .. code-block:: console $ cinder type-create mirrored_disk $ cinder type-key mirrored_disk set datacore:disk_type=mirrored In addition, volume specifications can also be declared as extra specifications for volume types. The example below sets additional configuration options for the volume type mirrored_disk; storage profile will be set to High and virtual disks will be created from Disk pool 1, Disk pool 2, or Disk pool 3. .. code-block:: console $ cinder type-key mirrored_disk set datacore:storage_profile=High $ cinder type-key mirrored_disk set "datacore:disk_pools=Disk pool 1, Disk pool 2, Disk pool 3" Configuring Multiple Storage Back Ends -------------------------------------- OpenStack Block Storage can be configured to use several back-end storage solutions. Multiple back-end configuration allows you to configure different storage configurations for SANsymphony server groups. The configuration options for a group must be defined in the group. To enable multiple back ends: 1. In the ``cinder.conf`` file, set the **enabled_backends** option to identify the groups. One name is associated with each server group back-end configuration. In the example below there are two groups, ``datacore-1`` and ``datacore-2``: .. code-block:: ini [DEFAULT] enabled_backends = datacore-1, datacore-2 2. Define the back-end storage used by each server group in a separate section (for example ``[datacore-1]``): .. code-block:: ini [datacore-1] volume_driver = cinder.volume.drivers.datacore.iscsi.ISCSIVolumeDriver volume_backend_name = DataCore_iSCSI san_ip = san_login = san_password = use_chap_auth = True chap_username = chap_password = datacore_iscsi_chap_storage = /var/lib/cinder/.datacore_chap datacore_iscsi_unallowed_targets = iqn.2000-08.com.datacore:mns-ssv-10-1 datacore_disk_type = mirrored [datacore-2] volume_driver = cinder.volume.drivers.datacore.fc.FibreChannelVolumeDriver volume_backend_name = DataCore_FibreChannel san_ip = san_login = san_password = datacore_disk_type = mirrored datacore_disk_pools = Disk pool 1, Disk pool 2 datacore_storage_profile = High 3. Create the volume types .. code-block:: ini $ cinder type-create datacore_iscsi $ cinder type-create datacore_fc 4. Add an extra specification to link the volume type to a back-end name: .. code-block:: ini $ cinder type-key datacore_iscsi set volume_backend_name=DataCore_iSCSI $ cinder type-key datacore_fc set volume_backend_name=DataCore_FibreChannel See `Configure multiple-storage back ends `__ for additional information. Detaching Volumes and Terminating Instances ------------------------------------------- Notes about the expected behavior of SANsymphony software when detaching volumes and terminating instances in OpenStack: 1. When a volume is detached from a host in OpenStack, the virtual disk will be unserved from the host in SANsymphony, but the virtual disk will not be deleted. 2. If all volumes are detached from a host in OpenStack, the host will remain registered and all virtual disks will be unserved from that host in SANsymphony. The virtual disks will not be deleted. 3. If an instance is terminated in OpenStack, the virtual disk for the instance will be unserved from the host and either be deleted or remain as unserved virtual disk depending on the option selected when terminating. Support ------- In the event that a support bundle is needed, the administrator should save the files from the ``/var/log`` folder on the Linux host and attach to DataCore Technical Support incident manually. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/datera-volume-driver.rst0000664000175000017500000000776700000000000031026 0ustar00zuulzuul00000000000000============== Datera drivers ============== Datera iSCSI driver ------------------- The Datera Data Services Platform (DSP) is a scale-out storage software that turns standard, commodity hardware into a RESTful API-driven, intent-based policy controlled storage fabric for large-scale clouds. The Datera DSP integrates seamlessly with the Block Storage service. It provides storage through the iSCSI block protocol framework over the iSCSI block protocol. Datera supports all of the Block Storage services. System requirements, prerequisites, and recommendations ------------------------------------------------------- Prerequisites ~~~~~~~~~~~~~ * All nodes must have access to Datera DSP through the iSCSI block protocol. * All nodes accessing the Datera DSP must have the following packages installed: * Linux I/O (LIO) * open-iscsi * open-iscsi-utils * wget .. config-table:: :config-target: Datera cinder.volume.drivers.datera.datera_iscsi Configuring the Datera volume driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Modify the ``/etc/cinder/cinder.conf`` file for Block Storage service. * Enable the Datera volume driver: .. code-block:: ini [DEFAULT] # ... enabled_backends = datera # ... * Optional. Designate Datera as the default back-end: .. code-block:: ini default_volume_type = datera * Create a new section for the Datera back-end definition. The ``VIP`` can be either the Datera Management Network VIP or one of the Datera iSCSI Access Network VIPs depending on the network segregation requirements. For a complete list of parameters that can be configured, please see the section `Volume Driver Cinder.conf Options `_ .. code-block:: ini [datera] volume_driver = cinder.volume.drivers.datera.datera_iscsi.DateraDriver san_ip = san_login = admin san_password = password datera_tenant_id = volume_backend_name = datera datera_volume_type_defaults=replica_count:3 Enable the Datera volume driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Verify the OpenStack control node can reach the Datera ``VIP``: .. code-block:: bash $ ping -c 4 * Start the Block Storage service on all nodes running the ``cinder-volume`` services: .. code-block:: bash $ service cinder-volume restart Configuring one (or more) Datera specific volume types ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are extra volume type parameters that can be used to define Datera volume types with specific QoS policies (R/W IOPS, R/W bandwidth) and/or placement policies (replica count, type of media, IP pool to use, etc.) For a full list of supported options please see the `Volume-Type ExtraSpecs `_ section in the driver documentation. See more examples in the `Usage `_ section. .. code-block:: bash # Create 2 replica volume type $ openstack volume type create datera_2way --property volume_backend_name=datera --property DF:replica_count=2 # Create volume type with limited write IOPS $ openstack volume type create datera_iops --property volume_backend_name=datera --property DF:write_iops_max=5000 Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, detach, manage, unmanage, and list volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Copy an image to a volume. * Copy a volume to an image. * Clone a volume. * Extend a volume. * Support for naming convention changes. Configuring multipathing ~~~~~~~~~~~~~~~~~~~~~~~~ Enabling multipathing is strongly reccomended for reliability and availability reasons. Please refer to the following `file `_ for an example of configuring multipathing in Linux 3.x kernels. Some parameters in different Linux distributions may be different. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/dell-emc-powerflex-driver.rst0000664000175000017500000004346700000000000031751 0ustar00zuulzuul00000000000000================================= Dell PowerFlex Storage driver ================================= Overview -------- Dell PowerFlex (formerly named Dell ScaleIO/VxFlex OS) is a software-only solution that uses existing servers local disks and LAN to create a virtual SAN that has all of the benefits of external storage, but at a fraction of the cost and complexity. Using the driver, Block Storage hosts can connect to a PowerFlex Storage cluster. The Dell PowerFlex Cinder driver is designed and tested to work with both PowerFlex and with ScaleIO. The :ref:`configuration options ` are identical for both PowerFlex and ScaleIO. .. _powerflex_docs: Official PowerFlex documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To find the PowerFlex documentation: #. Go to the `PowerFlex product documentation page `_. #. On the page, search for the relevant PowerFlex version. Supported PowerFlex or VxFlex OS Versions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Dell PowerFlex Block Storage driver has been tested against the following versions of VxFlex OS and PowerFlex and found to be compatible: * PowerFlex 3.6.0 * PowerFlex 4.x At the time of publication, the Dell PowerFlex Block Storage driver has been tested against the following version series of VxFlex OS and PowerFlex and found to be compatible: 3.6, 4.0, 4.5,and 4.6. You can find the current list of compatible versions at https://elabnavigator.dell.com/eln/elnhome. Please consult the :ref:`powerflex_docs` to determine supported operating systems for each version of PowerFlex or VxFlex OS. Deployment prerequisites ~~~~~~~~~~~~~~~~~~~~~~~~ * The PowerFlex Gateway must be installed and accessible in the network. For installation steps, refer to the Preparing the installation Manager and the Gateway section in PowerFlex Deployment Guide. See :ref:`powerflex_docs`. * PowerFlex Storage Data Client (SDC) must be installed on all OpenStack nodes. .. note:: Ubuntu users must follow the specific instructions in the PowerFlex OS Deployment Guide for Ubuntu environments. See the ``Deploying on Ubuntu Servers`` section in PowerFlex Deployment Guide. See :ref:`powerflex_docs`. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, clone, attach, detach, migrate, manage, and unmanage volumes * Create, delete, manage, and unmanage volume snapshots * Create a volume from a snapshot * Revert a volume to a snapshot * Copy an image to a volume * Copy a volume to an image * Extend a volume * Get volume statistics * Create, list, update, and delete consistency groups * Create, list, update, and delete consistency group snapshots * OpenStack replication v2.1 support * Cinder volume active/active support PowerFlex Block Storage driver configuration -------------------------------------------- This section explains how to configure and connect the block storage nodes to a PowerFlex storage cluster. Edit the ``cinder.conf`` file by adding the configuration below under a new section (for example, ``[powerflex]``) and change the ``enable_backends`` setting (in the ``[DEFAULT]`` section) to include this new back end. The configuration file is usually located at ``/etc/cinder/cinder.conf``. For a configuration example, refer to the example :ref:`cinder.conf `. PowerFlex driver name ~~~~~~~~~~~~~~~~~~~~~ Configure the driver name by adding the following parameter: .. code-block:: ini volume_driver = cinder.volume.drivers.dell_emc.powerflex.driver.PowerFlexDriver PowerFlex Gateway server IP ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The PowerFlex Gateway provides a REST interface to PowerFlex. Configure the Gateway server IP address by adding the following parameter: .. code-block:: ini san_ip = PowerFlex Storage Pools ~~~~~~~~~~~~~~~~~~~~~~~ Multiple Storage Pools and Protection Domains can be listed for use by the virtual machines. The list should include every Protection Domain and Storage Pool pair that you would like Cinder to utilize. To retrieve the available Storage Pools, use the command :command:`scli --query_all` and search for available Storage Pools. Configure the available Storage Pools by adding the following parameter: .. code-block:: ini powerflex_storage_pools = PowerFlex user credentials ~~~~~~~~~~~~~~~~~~~~~~~~~~ Block Storage requires a PowerFlex user with administrative privileges. Dell recommends creating a dedicated OpenStack user account that has an administrative user role. Refer to the PowerFlex User Guide for details on user account management. Configure the user credentials by adding the following parameters: .. code-block:: ini san_login = san_password = Oversubscription ~~~~~~~~~~~~~~~~ Configure the oversubscription ratio by adding the following parameter under the separate section for PowerFlex: .. code-block:: ini powerflex_max_over_subscription_ratio = .. note:: The default value for ``powerflex_max_over_subscription_ratio`` is 10.0. Oversubscription is calculated correctly by the Block Storage service only if the extra specification ``provisioning:type`` appears in the volume type regardless of the default provisioning type. Maximum oversubscription value supported for PowerFlex is 10.0. Default provisioning type ~~~~~~~~~~~~~~~~~~~~~~~~~ If provisioning type settings are not specified in the volume type, the default value is set according to the ``san_thin_provision`` option in the configuration file. The default provisioning type will be ``thin`` if the option is not specified in the configuration file. To set the default provisioning type ``thick``, set the ``san_thin_provision`` option to ``false`` in the configuration file, as follows: .. code-block:: ini san_thin_provision = false The configuration file is usually located in ``/etc/cinder/cinder.conf``. For a configuration example, see: :ref:`cinder.conf `. .. _cg_configuration_example_dellemc: Configuration example ~~~~~~~~~~~~~~~~~~~~~ **cinder.conf example file** You can update the ``cinder.conf`` file by editing the necessary parameters as follows: .. code-block:: ini [DEFAULT] enabled_backends = powerflex [powerflex] volume_driver = cinder.volume.drivers.dell_emc.powerflex.driver.PowerFlexDriver volume_backend_name = powerflex san_ip = GATEWAY_IP powerflex_storage_pools = Domain1:Pool1,Domain2:Pool2 san_login = POWERFLEX_USER san_password = POWERFLEX_PASSWD san_thin_provision = false Connector configuration ~~~~~~~~~~~~~~~~~~~~~~~ .. note:: Since 2025.2 release, users do not need to create connector configuration. Before using attach/detach volume operations PowerFlex connector must be properly configured. On each node where PowerFlex SDC is installed do the following: #. Create ``/opt/emc/scaleio/openstack/connector.conf`` if it does not exist. .. code-block:: console $ mkdir -p /opt/emc/scaleio/openstack $ touch /opt/emc/scaleio/openstack/connector.conf #. For each PowerFlex section in the ``cinder.conf`` create the same section in the ``/opt/emc/scaleio/openstack/connector.conf`` and populate it with passwords. Example: .. code-block:: ini [powerflex] san_password = POWERFLEX_PASSWD replicating_san_password = REPLICATION_SYSTEM_POWERFLEX_PASSWD # if applicable [powerflex-new] san_password = SIO2_PASSWD replicating_san_password = REPLICATION_SYSTEM_SIO2_PASSWD # if applicable .. _cg_configuration_options_dellemc: Configuration options ~~~~~~~~~~~~~~~~~~~~~ The PowerFlex driver supports these configuration options: .. config-table:: :config-target: PowerFlex cinder.volume.drivers.dell_emc.powerflex.driver Volume Types ------------ Volume types can be used to specify characteristics of volumes allocated via the PowerFlex Driver. These characteristics are defined as ``Extra Specs`` within ``Volume Types``. .. _powerflex_pd_sp: PowerFlex Protection Domain and Storage Pool ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When multiple storage pools are specified in the Cinder configuration, users can specify which pool should be utilized by adding the ``pool_name`` Extra Spec to the volume type extra-specs and setting the value to the requested protection_domain:storage_pool. .. code-block:: console $ openstack volume type create powerflex_type_1 $ openstack volume type set --property volume_backend_name=powerflex powerflex_type_1 $ openstack volume type set --property pool_name=Domain2:Pool2 powerflex_type_1 PowerFlex thin provisioning support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Block Storage driver supports creation of thin-provisioned and thick-provisioned volumes. The provisioning type settings can be added as an extra specification of the volume type, as follows: .. code-block:: console $ openstack volume type create powerflex_type_thick $ openstack volume type set --property provisioning:type=thick powerflex_type_thick PowerFlex QoS support ~~~~~~~~~~~~~~~~~~~~~ QoS support for the PowerFlex driver includes the ability to set the following capabilities: ``maxIOPS`` The QoS I/O rate limit. If not set, the I/O rate will be unlimited. The setting must be larger than 10. ``maxIOPSperGB`` The QoS I/O rate limit. The limit will be calculated by the specified value multiplied by the volume size. The setting must be larger than 10. ``maxBWS`` The QoS I/O bandwidth rate limit in KBs. If not set, the I/O bandwidth rate will be unlimited. The setting must be a multiple of 1024. ``maxBWSperGB`` The QoS I/O bandwidth rate limit in KBs. The limit will be calculated by the specified value multiplied by the volume size. The setting must be a multiple of 1024. The QoS keys above must be created and associated with a volume type. For example: .. code-block:: console $ openstack volume qos create qos-limit-iops --consumer back-end --property maxIOPS=5000 $ openstack volume type create powerflex_limit_iops $ openstack volume qos associate qos-limit-iops powerflex_limit_iops The driver always chooses the minimum between the QoS keys value and the relevant calculated value of ``maxIOPSperGB`` or ``maxBWSperGB``. Since the limits are per SDC, they will be applied after the volume is attached to an instance, and thus to a compute node/SDC. PowerFlex compression support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Starting from version 3.0, PowerFlex supports volume compression. By default driver will create volumes without compression. In order to create a compressed volume, a volume type which enables compression support needs to be created first: .. code-block:: console $ openstack volume type create powerflex_compressed $ openstack volume type set --property provisioning:type=compressed powerflex_compressed If a volume with this type is scheduled to a storage pool which doesn't support compression, then ``thin`` provisioning will be used. See table below for details. +-------------------+----------------------------+--------------------+ | provisioning:type | storage pool supports compression | | +----------------------------+--------------------+ | | yes (PowerFlex 3.0 FG pool)| no (other pools) | +===================+============================+====================+ | compressed | thin with compression | thin | +-------------------+----------------------------+--------------------+ | thin | thin | thin | +-------------------+----------------------------+--------------------+ | thick | thin | thick | +-------------------+----------------------------+--------------------+ | not set | thin | thin | +-------------------+----------------------------+--------------------+ .. note:: PowerFlex 3.0 Fine Granularity storage pools don't support thick provisioned volumes. You can add property ``compression_support=' True'`` to volume type to limit volumes allocation only to data pools which supports compression. .. code-block:: console $ openstack volume type set --property compression_support=' True' powerflex_compressed PowerFlex replication support ----------------------------- Starting from version 3.5, PowerFlex supports volume replication. Prerequisites ~~~~~~~~~~~~~ * PowerFlex replication components must be installed on source and destination systems. * Source and destination systems must have the same configuration for Protection Domains and their Storage Pools (i.e. names, zero padding, etc.). * Source and destination systems must be paired and have at least one Replication Consistency Group created. See :ref:`powerflex_docs` for instructions. Configure replication ~~~~~~~~~~~~~~~~~~~~~ #. Enable replication in ``cinder.conf`` file. To enable replication feature for storage backend ``replication_device`` must be set as below: .. code-block:: ini [DEFAULT] enabled_backends = powerflex [powerflex] volume_driver = cinder.volume.drivers.dell_emc.powerflex.driver.PowerFlexDriver volume_backend_name = powerflex san_ip = GATEWAY_IP powerflex_storage_pools = Domain1:Pool1,Domain2:Pool2 san_login = POWERFLEX_USER san_password = POWERFLEX_PASSWD san_thin_provision = false replication_device = backend_id:powerflex_repl, san_ip: REPLICATION_SYSTEM_GATEWAY_IP, san_login: REPLICATION_SYSTEM_POWERFLEX_USER, san_password: REPLICATION_SYSTEM_POWERFLEX_PASSWD * Only one replication device is supported for storage backend. * The following parameters are optional for replication device: * REST API port - ``powerflex_rest_server_port``. * SSL certificate verification - ``driver_ssl_cert_verify`` and ``driver_ssl_cert_path``. For more information see :ref:`cg_configuration_options_dellemc`. #. Create volume type for volumes with replication enabled. .. code-block:: console $ openstack volume type create powerflex_replicated $ openstack volume type set --property replication_enabled=' True' powerflex_replicated #. Set PowerFlex Replication Consistency Group name for volume type. .. code-block:: console $ openstack volume type set --property powerflex:replication_cg= \ powerflex_replicated #. Set Protection Domain and Storage Pool if multiple Protection Domains are specified. PowerFlex Replication Consistency Group is created between source and destination Protection Domains. If more than one Protection Domain is specified in ``cinder.conf`` you should set ``pool_name`` property for volume type with appropriate Protection Domain and Storage Pool. See :ref:`powerflex_pd_sp`. Failover host ~~~~~~~~~~~~~ In the event of a disaster, or where there is a required downtime the administrator can issue the failover host command: .. code-block:: console $ cinder failover-host cinder_host@powerflex --backend_id powerflex_repl After issuing Cinder failover-host command Cinder will switch to configured replication device, however to get existing instances to use this target and new paths to volumes it is necessary to first shelve Nova instances and then unshelve them, this will effectively restart the Nova instance and re-establish data paths between Nova instances and the volumes. .. code-block:: console $ nova shelve $ nova unshelve [--availability-zone ] If the primary system becomes available, the administrator can initiate failback operation using ``--backend_id default``: .. code-block:: console $ cinder failover-host cinder_host@powerflex --backend_id default PowerFlex storage-assisted volume migration ------------------------------------------- Starting from version 3.0, PowerFlex supports storage-assisted volume migration. Known limitations ~~~~~~~~~~~~~~~~~ * Migration between different backends is not supported. * For migration from Medium Granularity (MG) to Fine Granularity (FG) storage pool zero padding must be enabled on the MG pool. * For migration from MG to MG pool zero padding must be either enabled or disabled on both pools. In the above cases host-assisted migration will be perfomed. Migrate volume ~~~~~~~~~~~~~~ Volume migration is performed by issuing the following command: .. code-block:: console $ cinder migrate .. note:: Volume migration has a timeout of 3600 seconds (1 hour). It is done to prevent from endless waiting for migration to complete if something unexpected happened. If volume still is in migration after timeout has expired, volume status will be changed to ``maintenance`` to prevent future operations with this volume. The corresponding warning will be logged. In this situation the status of the volume should be checked on the storage side. If volume migration succeeded, its status can be changed manually: .. code-block:: console $ cinder reset-state --state available Using PowerFlex Storage with a containerized overcloud ------------------------------------------------------ #. Create a file with below contents: .. code-block:: yaml parameter_defaults: NovaComputeOptVolumes: - /opt/emc/scaleio:/opt/emc/scaleio CinderVolumeOptVolumes: - /opt/emc/scaleio:/opt/emc/scaleio GlanceApiOptVolumes: - /opt/emc/scaleio:/opt/emc/scaleio Name it whatever you like, e.g. ``powerflex_volumes.yml``. #. Use ``-e`` to include this customization file to deploy command. #. Install the Storage Data Client (SDC) on all nodes after deploying the overcloud. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst0000664000175000017500000044545500000000000031603 0ustar00zuulzuul00000000000000============================================ Dell PowerMax iSCSI, FC and NVMe-TCP drivers ============================================ The Dell PowerMax drivers, ``PowerMaxISCSIDriver``, ``PowerMaxFCDriver`` and ``PowerMaxNVMETCPDriver``, support the use of Dell PowerMax and VMAX storage arrays with the Cinder Block Storage project. They both provide equivalent functions and differ only in support for their respective host attachment methods. The drivers perform volume operations by communicating with the back-end PowerMax storage management software. They use the Requests HTTP library to communicate with a Unisphere for PowerMax instance, using a RESTAPI interface in the backend to perform PowerMax and VMAX storage operations. .. _VMAX-Hybrid: .. note:: DEPRECATION NOTICE: The VMAX Hybrid series will not be supported from the ``Z`` release of OpenStack. Also, any All Flash array running HyperMaxOS 5977 will no longer be supported from the ``Z`` release onwards. .. note:: While ``PowerMax`` will be used throughout this document, it will be used to collectively categorize the following supported arrays, PowerMax 2000, 8000, 2500, 8500, VMAX All Flash 250F, 450F, 850F and 950F and VMAX-Hybrid_. System requirements and licensing ================================= The Dell PowerMax Cinder driver supports the VMAX-Hybrid_ series, VMAX All-Flash series and the PowerMax v3 and v4 arrays. Download Solutions Enabler and Unisphere from the Dell's support web site (login is required). See the `Dell Solutions Enabler Installation and Configuration Guide` and `Dell Unisphere for PowerMax Installation Guide` at the `Dell Support`_ site. .. note:: At the time each OpenStack release, support-matrix-table_ was the recommended PowerMax management software and OS combinations. Please reach out your local PowerMax representative to see if these versions are still valid. Starting with Antelope, the PowerMax OS version is now aligned with the Unisphere version scheme. .. _support-matrix-table: .. table:: PowerMax Management software and OS for OpenStack release +-----------+--------------+-------------+--------------------------------+ | OpenStack | Unisphere | PowerMax OS | Supported Arrays | | release | for PowerMax | | | +===========+==============+=============+================================+ | Flamingo | 10.2.0 | 10.2.0 | PowerMax 2500,8500 | | | | (6079.275) | | | +--------------+-------------+--------------------------------+ | | 10.1.0 | 10.1.0 | PowerMax 2500,8500 | | | | (6079.225) | | | +--------------+-------------+--------------------------------+ | | | 5978.711 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | +-----------+--------------+-------------+--------------------------------+ | Epoxy | 10.2.0 | 10.2.0 | PowerMax 2500,8500 | | | | (6079.275) | | | +--------------+-------------+--------------------------------+ | | 10.1.0 | 10.1.0 | PowerMax 2500,8500 | | | | (6079.225) | | | +--------------+-------------+--------------------------------+ | | | 5978.711 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | +-----------+--------------+-------------+--------------------------------+ | Dalmatian | 10.2.0 | 10.2.0 | PowerMax 2500,8500 | | | | (6079.275) | | | +--------------+-------------+--------------------------------+ | | 10.1.0 | 10.1.0 | PowerMax 2500,8500 | | | | (6079.225) | | | +--------------+-------------+--------------------------------+ | | | 5978.711 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | +-----------+--------------+-------------+--------------------------------+ | Caracal | 10.2.0 | 10.2.0 | PowerMax 2500,8500 | | | | (6079.275) | | | +--------------+-------------+--------------------------------+ | | 10.1.0 | 10.1.0 | PowerMax 2500,8500 | | | | (6079.225) | | | +--------------+-------------+--------------------------------+ | | | 5978.711 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | +-----------+--------------+-------------+--------------------------------+ | Bobcat | 10.0.1 | 10.0.1 | PowerMax 2500,8500 | | | | (6079.175) | | | | +-------------+--------------------------------+ | | | 5978.711 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | +-----------+--------------+-------------+--------------------------------+ | Antelope | 10.0.1 | 10.0.1 | PowerMax 2500,8500 | | | | (6079.175) | | | | +-------------+--------------------------------+ | | | 5978.711 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | +-----------+--------------+-------------+--------------------------------+ | Zed | 9.2.2 | 5978.711 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | +-----------+--------------+-------------+--------------------------------+ | Yoga | 9.2.2 | 5978.711 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | | | | | VMAX 100K, 200K, 400K (Hybrid) | +-----------+--------------+-------------+--------------------------------+ | Xena | 9.2.2 | 5978.711 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | | | | | VMAX 100K, 200K, 400K (Hybrid) | +-----------+--------------+-------------+--------------------------------+ | Wallaby | 9.2.1 | 5978.711 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | | | | | VMAX 100K, 200K, 400K (Hybrid) | +-----------+--------------+-------------+--------------------------------+ | Victoria | 9.2.0 | 5978.669 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | | | | | VMAX 100K, 200K, 400K (Hybrid) | +-----------+--------------+-------------+--------------------------------+ | Ussuri | 9.1.x | 5978.479 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | | | | | VMAX 100K, 200K, 400K (Hybrid) | +-----------+--------------+-------------+--------------------------------+ | Train | 9.1.x | 5978.444 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | | | | | VMAX 100K, 200K, 400K (Hybrid) | +-----------+--------------+-------------+--------------------------------+ | Stein | 9.0.x | 5978.221 | PowerMax 2000,8000 | | | | | VMAX 250F, 450F, 850F, 950F | | | | | VMAX 100K, 200K, 400K (Hybrid) | +-----------+--------------+-------------+--------------------------------+ .. note:: A Hybrid array can only run HyperMax OS 5977, and is still supported until the ``Z`` release of OpenStack. Some functionality will not be available in older versions of the OS. If in any doubt, please contact your local PowerMax representative. .. note:: Newer versions of Unisphere for PowerMax and PowerMax OS are not retrospectively tested on older versions of OpenStack. If it is necessary to upgrade, the older REST endpoints will be used. For example, in Ussuri, if upgrading to Unisphere for PowerMax 9.2, the older ``91`` endpoints will be used. .. note:: - PowerMax NVMe-TCP support was introduced starting with PowerMax v4 (models like 2500 and 8500) and requires Unisphere 10.x or later. - Additionally, the installation of the nvme-cli utility is required. Required PowerMax software suites for OpenStack ----------------------------------------------- The storage system requires a Unisphere for PowerMax (SMC) eLicense. PowerMax ~~~~~~~~ There are two licenses for the PowerMax 2000 and 8000: - Essentials software package - Pro software package The Dell PowerMax cinder driver requires the Pro software package. All Flash ~~~~~~~~~ For full functionality including SRDF for the VMAX All Flash, the FX package, or the F package plus the SRDF a la carte add on is required. Hybrid ~~~~~~ There are five Dell Software Suites sold with the VMAX-Hybrid_ arrays: - Base Suite - Advanced Suite - Local Replication Suite - Remote Replication Suite - Total Productivity Pack The Dell PowerMax Cinder driver requires the Advanced Suite and the Local Replication Suite or the Total Productivity Pack (it includes the Advanced Suite and the Local Replication Suite) for the VMAX Hybrid. Using PowerMax Remote Replication functionality will also require the Remote Replication Suite. .. note:: Each are licensed separately. For further details on how to get the relevant license(s), reference eLicensing Support below. eLicensing support ------------------ To activate your entitlements and obtain your PowerMax license files, visit the Service Center on `Dell Support`_, as directed on your License Authorization Code (LAC) letter emailed to you. - For help with missing or incorrect entitlements after activation (that is, expected functionality remains unavailable because it is not licensed), contact your EMC account representative or authorized reseller. - For help with any errors applying license files through Solutions Enabler, contact the Dell Customer Support Center. - If you are missing a LAC letter or require further instructions on activating your licenses through the Online Support site, contact EMC's worldwide Licensing team at ``licensing@emc.com`` or call: North America, Latin America, APJK, Australia, New Zealand: SVC4EMC (800-782-4362) and follow the voice prompts. EMEA: +353 (0) 21 4879862 and follow the voice prompts. PowerMax for OpenStack Cinder customer support ---------------------------------------------- If you require help or assistance with PowerMax and Cinder please open a Service Request (SR) through standard support channels at `Dell Support`_. When opening a SR please include the following information: - Array Model & uCode level - Unisphere for PowerMax version - Solutions Enabler Version - OpenStack host Operating System (Ubuntu, RHEL, etc.) - OpenStack version (Usurri, Train, etc.) - PowerMax for Cinder driver version, this can be located in the comments in the PowerMax driver file: ``{cinder_install_dir}/cinder/volume/drivers/dell_emc/powermax/fc.py`` - Cinder logs - Detailed description of the issue you are encountering Supported operations ==================== PowerMax drivers support these operations: - Create, list, delete, attach, and detach volumes - Create, list, and delete volume snapshots - Copy an image to a volume - Copy a volume to an image - Clone a volume - Extend a volume - Retype a volume (Host and storage assisted volume migration) - Create a volume from a snapshot - Create and delete generic volume group - Create and delete generic volume group snapshot - Modify generic volume group (add and remove volumes) - Create generic volume group from source - Live Migration - Volume replication SRDF/S, SRDF/A and SRDF Metro - Quality of service (QoS) - Manage and unmanage volumes and snapshots - List Manageable Volumes/Snapshots - Backup create, delete, list, restore and show PowerMax drivers also support the following features: - Dynamic masking view creation - Dynamic determination of the target iSCSI IP address - iSCSI multipath support - Oversubscription - Service Level support - SnapVX support - Compression support(All Flash and PowerMax) - Deduplication support(PowerMax) - CHAP Authentication - Multi-attach support - Volume Metadata in logs - Encrypted Volume support - Extending attached volume - Replicated volume retype support - Retyping attached(in-use) volume - Unisphere High Availability(HA) support - Online device expansion of a metro device - Rapid TDEV deallocation of deletes - Multiple replication devices - PowerMax array and storage group tagging - Short host name and port group templates - Snap id support - Seamless Live Migration from SMI-S support - Port group & port performance load balancing - Cinder volume active/active support .. note:: In certain cases, when creating a volume from a source snapshot or source volume, subsequent operations using the volumes may fail due to a missing snap_name exception. A manual refresh on the connected Unisphere instance or waiting until another operation automatically refreshes the connected Unisphere instance, will alleviate this issue. PowerMax naming conventions =========================== .. note:: ``shortHostName`` will be altered using the following formula, if its length exceeds 16 characters. This is because the storage group and masking view names cannot exceed 64 characters: .. code-block:: text if len(shortHostName) > 16: 1. Perform md5 hash on the shortHostName 2. Convert output of 1. to hex 3. Take last 6 characters of shortHostName and append output of 2. 4. If the length of output of 3. exceeds 16 characters, join the first 8 characters and last 8 characters. .. note:: ``portgroup_name`` will be altered using the following formula, if its length exceeds 12 characters. This is because the storage group and masking view names cannot exceed 64 characters: .. code-block:: text if len(portgroup_name) > 12: 1. Perform md5 hash on the portgroup_name 2. Convert output of 1. to hex 3. Take last 6 characters of portgroup_name and append output of 2. 4. If the length of output of 3. exceeds 12 characters, join the first 6 characters and last 6 characters. Masking view names ------------------ Masking views are dynamically created by the PowerMax FC and iSCSI drivers using the following naming conventions. ``[protocol]`` is either ``I`` for volumes attached over iSCSI or ``F`` for volumes attached over Fibre Channel or ``NT`` for volumes attached over NVMe-TCP. .. code-block:: text OS-[shortHostName]-[protocol]-[portgroup_name]-MV Initiator group names --------------------- For each host that is attached to PowerMax volumes using the drivers, an initiator group is created or re-used (per attachment type). All initiators of the appropriate type known for that host are included in the group. At each new attach volume operation, the PowerMax driver retrieves the initiators (either WWNNs or IQNs) from OpenStack and adds or updates the contents of the Initiator Group as required. Names are of the following format. ``[protocol]`` is either ``I`` for volumes attached over iSCSI or ``F`` for volumes attached over Fibre Channel or ``NT`` for volumes attached over NVMe-TCP. .. code-block:: console OS-[shortHostName]-[protocol]-IG .. note:: Hosts attaching to OpenStack managed PowerMax storage cannot also attach to storage on the same PowerMax that are not managed by OpenStack. FA port groups -------------- PowerMax array FA ports to be used in a new masking view are retrieved from the port group provided as the extra spec on the volume type, or chosen from the list provided in the Dell configuration file. Storage group names ------------------- As volumes are attached to a host, they are either added to an existing storage group (if it exists) or a new storage group is created and the volume is then added. Storage groups contain volumes created from a pool, attached to a single host, over a single connection type (iSCSI or FC). ``[protocol]`` is either ``I`` for volumes attached over iSCSI or ``F`` for volumes attached over Fibre Channel or ``NT`` for volumes attached over NVMe-TCP. PowerMax Cinder driver utilizes cascaded storage groups - a ``parent`` storage group which is associated with the masking view, which contains ``child`` storage groups for each configured SRP/slo/workload/compression-enabled or disabled/replication-enabled or disabled combination. PowerMax, VMAX All Flash and VMAX-Hybrid_ Parent storage group: .. code-block:: text OS-[shortHostName]-[protocol]-[portgroup_name]-SG Child storage groups: .. code-block:: text OS-[shortHostName]-[SRP]-[ServiceLevel/Workload]-[portgroup_name]-CD-RE .. note:: CD and RE are only set if compression is explicitly disabled or replication explicitly enabled. See the compression `11. All Flash compression support`_ and replication `Volume replication support`_ sections below. .. note:: For VMAX All Flash with PowerMax OS (5978) or greater, workload if set will be ignored and set to NONE. .. _my-table: .. table:: Replication storage group naming conventions +----------------------------+---------------------------------+--------------------------------+--------------------+ | Default storage group | Attached child storage group | Management Group | Replication Type | +============================+=================================+================================+====================+ | OS-[SRP]-[SL]-[WL]-SG | OS-[HOST]-[SRP]-[SL/WL]-[PG] | N/A | None | +----------------------------+---------------------------------+--------------------------------+--------------------+ | OS-[SRP]-[SL]-[WL]-RE-SG | OS-[HOST]-[SRP]-[SL/WL]-[PG]-RE | N/A | Synchronous | +----------------------------+---------------------------------+--------------------------------+--------------------+ | OS-[SRP]-[SL]-[WL]-RA-SG | OS-[HOST]-[SRP]-[SL/WL]-[PG]-RA | OS-[RDFG]-Asynchronous-rdf-sg | Asynchronous | +----------------------------+---------------------------------+--------------------------------+--------------------+ | OS-[SRP]-[SL]-[WL]-RM-SG | OS-[HOST]-[SRP]-[SL/WL]-[PG]-RM | OS-[RDFG]-Metro-rdf-sg | Metro | +----------------------------+---------------------------------+--------------------------------+--------------------+ PowerMax driver integration =========================== 1. Prerequisites ---------------- #. Download Solutions Enabler from `Dell Support`_ and install it. You can install Solutions Enabler on a non-OpenStack host. Supported platforms include different flavors of Windows, Red Hat, and SUSE Linux. Solutions Enabler can be installed on a physical server, or as a Virtual Appliance (a VMware ESX server VM). Additionally, starting with HYPERMAX OS Q3 2015, you can manage VMAX3 arrays using the Embedded Management (eManagement) container application. See the ``Dell Solutions Enabler 9.2.1 Installation and Configuration Guide`` on `Dell Support`_ for more details. .. note:: You must discover storage arrays before you can use the PowerMax drivers. Follow instructions in ``Dell Solutions Enabler 9.2.1 Installation and Configuration Guide`` on `Dell Support`_ for more details. #. Download Unisphere from `Dell Support`_ and install it. Unisphere can be installed in local, remote, or embedded configurations - i.e., on the same server running Solutions Enabler; on a server connected to the Solutions Enabler server; or using the eManagement container application (containing Solutions Enabler and Unisphere for PowerMax). See ``Dell Solutions Enabler 9.2.1 Installation and Configuration Guide`` at `Dell Support`_. #. Pay attention to the number of Gatekeepers device to have in your environment. It may vary depending on simultaneous call to Unisphere. 2. FC zoning with PowerMax -------------------------- Zone Manager is required when there is a fabric between the host and array. This is necessary for larger configurations where pre-zoning would be too complex and open-zoning would raise security concerns. 3. iSCSI with PowerMax ---------------------- - Make sure the ``open-iscsi`` package (or distro equivalent) is installed on all Compute nodes. .. note:: You can only ping the PowerMax iSCSI target ports when there is a valid masking view. An attach operation creates this masking view. 4. Configure block storage in cinder.conf ----------------------------------------- .. config-table:: :config-target: PowerMax cinder.volume.drivers.dell_emc.powermax.common .. note:: ``san_api_port`` is ``8443`` by default but can be changed if necessary. For the purposes of this documentation the default is assumed so the tag will not appear in any of the ``cinder.conf`` extracts below. .. note:: PowerMax ``PortGroups`` must be pre-configured to expose volumes managed by the array. Port groups can be supplied in ``cinder.conf``, or can be specified as an extra spec ``storagetype:portgroupname`` on a volume type. If a port group is set on a volume type as an extra specification it takes precedence over any port groups set in ``cinder.conf``. For more information on port and port group selection please see the section ``port group & port load balancing``. .. note:: PowerMax ``SRP`` cannot be changed once configured and in-use. SRP renaming on the PowerMax array is not supported. .. note:: Service Level can be added to ``cinder.conf`` when the backend is the default case and there is no associated volume type. This not a recommended configuration as it is too restrictive. Workload is ``NONE`` for PowerMax and any All Flash with PowerMax OS (5978) or greater. +--------------------+----------------------------+----------+----------+ | PowerMax parameter | cinder.conf parameter | Default | Required | +====================+============================+==========+==========+ | ``ServiceLevel`` | ``powermax_service_level`` | ``None`` | No | +--------------------+----------------------------+----------+----------+ To configure PowerMax block storage, add the following entries to ``/etc/cinder/cinder.conf``: .. code-block:: ini enabled_backends = CONF_GROUP_ISCSI, CONF_GROUP_FC, CONF_GROUP_NVME_TCP [CONF_GROUP_ISCSI] volume_driver = cinder.volume.drivers.dell_emc.powermax.iscsi.PowerMaxISCSIDriver volume_backend_name = POWERMAX_ISCSI powermax_port_groups = [OS-ISCSI-PG] san_ip = 10.10.10.10 san_login = my_username san_password = my_password powermax_array = 000123456789 powermax_srp = SRP_1 [CONF_GROUP_FC] volume_driver = cinder.volume.drivers.dell_emc.powermax.fc.PowerMaxFCDriver volume_backend_name = POWERMAX_FC powermax_port_groups = [OS-FC-PG] san_ip = 10.10.10.10 san_login = my_username san_password = my_password powermax_array = 000123456789 powermax_srp = SRP_1 [CONF_GROUP_NVME_TCP] volume_driver = cinder.volume.drivers.dell_emc.powermax.nvme_tcp.PowerMaxNVMETCPDriver volume_backend_name = POWERMAX_NVME_TCP powermax_port_groups = [OS-NVME-TCP-PG] san_ip = 10.10.10.10 san_login = my_username san_password = my_password powermax_array = 000123456789 powermax_srp = SRP_1 In this example, three back-end configuration groups are enabled: ``CONF_GROUP_ISCSI``, ``CONF_GROUP_FC`` and ``CONF_GROUP_NVME_TCP``. Each configuration group has a section describing unique parameters for connections, drivers and the ``volume_backend_name``. 5. SSL support -------------- #. Get the CA certificate of the Unisphere server. This pulls the CA cert file and saves it as ``.pem`` file: .. code-block:: console # openssl s_client -showcerts \ -connect my_unisphere_host:8443 \ /dev/null \ | openssl x509 -outform PEM > my_unisphere_host.pem Where ``my_unisphere_host`` is the hostname of the unisphere instance and ``my_unisphere_host.pem`` is the name of the ``.pem`` file. #. Add this path to ``cinder.conf`` under the PowerMax backend stanza and set SSL verify to ``True`` .. code-block:: console driver_ssl_cert_verify = True driver_ssl_cert_path = /path/to/my_unisphere_host.pem ``OR`` follow the steps 3-6 below if you would like to add the CA cert to the system certificate bundle instead of specifying the path to cert: #. OPTIONAL: Copy the ``.pem`` cert to the system certificate directory and convert to ``.crt``: .. code-block:: console # cp my_unisphere_host.pem /usr/share/ca-certificates/ca_cert.crt #. OPTIONAL: Update CA certificate database with the following command. Ensure you select to enable the cert from step 3 when prompted: .. code-block:: console # sudo dpkg-reconfigure ca-certificates #. OPTIONAL: Set a system environment variable to tell the Requests library to use the system cert bundle instead of the default Certifi bundle: .. code-block:: console # export REQUESTS_CA_BUNDLE = /etc/ssl/certs/ca-certificates.crt #. OPTIONAL: Set cert verification to ``True`` under the PowerMax backend stanza in ``cinder.conf``: .. code-block:: console # driver_ssl_cert_verify = True #. Ensure ``driver_ssl_cert_verify`` is set to ``True`` in ``cinder.conf`` backend stanzas if steps 3-6 are followed, otherwise ensure both ``driver_ssl_cert_path`` and ``driver_ssl_cert_verify`` are set in ``cinder.conf`` backend stanzas. 6. Create volume types ---------------------- Once ``cinder.conf`` has been updated, `Openstack CLI`_ commands need to be issued in order to create and associate OpenStack volume types with the declared ``volume_backend_names``. Additionally, each volume type will need an associated ``pool_name`` - an extra specification indicating the service level/ workload combination to be used for that volume type. .. note:: The ``pool_name`` is an additional property which has to be set and is of the format: ``++``. This can be obtained from the output of the ``cinder get-pools--detail``. Workload is NONE for PowerMax or any All Flash with PowerMax OS (5978) or greater. There is also the option to assign a port group to a volume type by setting the ``storagetype:portgroupname`` extra specification. .. code-block:: console $ openstack volume type create POWERMAX_ISCSI_SILVER $ openstack volume type set --property volume_backend_name=ISCSI_backend \ --property pool_name=Silver+SRP_1+000123456789 \ --property storagetype:portgroupname=OS-PG2 \ POWERMAX_ISCSI_SILVER $ openstack volume type create POWERMAX_FC_DIAMOND $ openstack volume type set --property volume_backend_name=FC_backend \ --property pool_name=Gold+SRP_1+000123456789 \ --property storagetype:portgroupname=OS-PG1 \ POWERMAX_FC_DIAMOND $ openstack volume type create POWERMAX_NVME_TCP_DIAMOND $ openstack volume type set --property volume_backend_name=NVME_backend \ --property pool_name=Gold+SRP_1+000123456789 \ --property storagetype:portgroupname=OS-PG3 \ POWERMAX_NVME_TCP_DIAMOND By issuing these commands, the Block Storage volume type ``POWERMAX_ISCSI_SILVER`` is associated with the ``ISCSI_backend``, a Silver Service Level. The type ``POWERMAX_FC_DIAMOND`` is associated with the ``FC_backend``, a Diamond Service Level. The type ``POWERMAX_NVME_TCP_DIAMOND`` is associated with the ``NVME_TCP_backend``, a Diamond Service Level. The ``ServiceLevel`` manages the underlying storage to provide expected performance. Setting the ``ServiceLevel`` to ``None`` means that non-FAST managed storage groups will be created instead (storage groups not associated with any service level). .. code-block:: console openstack volume type set --property pool_name=None+SRP_1+000123456789 .. note:: PowerMax and VMAX-Hybrid_ support ``Diamond``, ``Platinum``, ``Gold``, ``Silver``, ``Bronze``, ``Optimized``, and ``None`` service levels. VMAX All Flash running HyperMax OS (5977) supports ``Diamond`` and ``None``. VMAX-Hybrid_ and All Flash support ``DSS_REP``, ``DSS``, ``OLTP_REP``, ``OLTP``, and ``None`` workloads, the latter up until ucode 5977. Please refer to Stein PowerMax online documentation if you wish to use ``workload``. There is no support for workloads in PowerMax OS (5978) or greater. These will be silently ignored if set for VMAX All-Flash arrays which have been upgraded to PowerMax OS (5988). 7. Interval and retries ----------------------- By default, ``interval`` and ``retries`` are ``3`` seconds and ``200`` retries respectively. These determine how long (``interval``) and how many times (``retries``) a user is willing to wait for a single Rest call, ``3*200=600seconds``. Depending on usage, these may need to be overridden by the user in ``cinder.conf``. For example, if performance is a factor, then the ``interval`` should be decreased to check the job status more frequently, and if multiple concurrent provisioning requests are issued then ``retries`` should be increased so calls will not timeout prematurely. In the example below, the driver checks every 3 seconds for the status of the job. It will continue checking for 200 retries before it times out. Add the following lines to the PowerMax backend in ``cinder.conf``: .. code-block:: console [CONF_GROUP_ISCSI] volume_driver = cinder.volume.drivers.dell_emc.powermax.iscsi.PowerMaxISCSIDriver volume_backend_name = POWERMAX_ISCSI powermax_port_groups = [OS-ISCSI-PG] san_ip = 10.10.10.10 san_login = my_username san_password = my_password powermax_array = 000123456789 powermax_srp = SRP_1 interval = 1 retries = 700 8. CHAP authentication support ------------------------------ This supports one-way initiator CHAP authentication functionality into the PowerMax backend. With CHAP one-way authentication, the storage array challenges the host during the initial link negotiation process and expects to receive a valid credential and CHAP secret in response. When challenged, the host transmits a CHAP credential and CHAP secret to the storage array. The storage array looks for this credential and CHAP secret which stored in the host initiator's initiator group (IG) information in the ACLX database. Once a positive authentication occurs, the storage array sends an acceptance message to the host. However, if the storage array fails to find any record of the credential/secret pair, it sends a rejection message, and the link is closed. Assumptions, restrictions and prerequisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. The host initiator IQN is required along with the credentials the host initiator will use to log into the storage array with. The same credentials should be used in a multi node system if connecting to the same array. #. Enable one-way CHAP authentication for the iSCSI initiator on the storage array using ``SYMCLI``. Template and example shown below. For the purpose of this setup, the credential/secret used would be ``my_username/my_password`` with iSCSI initiator of ``iqn.1991-05.com.company.lcseb130`` .. code-block:: console # symaccess -sid -iscsi \ {enable chap | disable chap | set chap} \ -cred -secret # symaccess -sid 128 \ -iscsi iqn.1991-05.com.company.lcseb130 \ set chap -cred my_username -secret my_password Settings and configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Set the configuration in the PowerMax backend group in ``cinder.conf`` using the following parameters and restart cinder. +-----------------------+-------------------------+-------------------+ | Configuration options | Value required for CHAP | Required for CHAP | +=======================+=========================+===================+ | ``use_chap_auth`` | ``True`` | Yes | +-----------------------+-------------------------+-------------------+ | ``chap_username`` | ``my_username`` | Yes | +-----------------------+-------------------------+-------------------+ | ``chap_password`` | ``my_password`` | Yes | +-----------------------+-------------------------+-------------------+ .. code-block:: ini [POWERMAX_ISCSI] volume_driver = cinder.volume.drivers.dell_emc.powermax.iscsi.PowerMaxISCSIDriver volume_backend_name = POWERMAX_ISCSI san_ip = 10.10.10.10 san_login = my_u4v_username san_password = my_u4v_password powermax_srp = SRP_1 powermax_array = 000123456789 powermax_port_groups = [OS-ISCSI-PG] use_chap_auth = True chap_username = my_username chap_password = my_password Usage ~~~~~ #. Using ``SYMCLI``, enable CHAP authentication for a host initiator as described above, but do not set ``use_chap_auth``, ``chap_username`` or ``chap_password`` in ``cinder.conf``. Create a bootable volume. .. code-block:: console openstack volume create --size 1 \ --image \ --type \ test #. Boot instance named ``test_server`` using the volume created above: .. code-block:: console openstack server create --volume test \ --flavor m1.small \ --nic net-id=private \ test_server #. Verify the volume operation succeeds but the boot instance fails as CHAP authentication fails. #. Update ``cinder.conf`` with ``use_chap_auth`` set to true and ``chap_username`` and ``chap_password`` set with the correct credentials. #. Rerun ``openstack server create`` #. Verify that the boot instance operation ran correctly and the volume is accessible. #. Verify that both the volume and boot instance operations ran successfully and the user is able to access the volume. 9. QoS (Quality of Service) support ----------------------------------- Quality of service (QoS) has traditionally been associated with network bandwidth usage. Network administrators set limitations on certain networks in terms of bandwidth usage for clients. This enables them to provide a tiered level of service based on cost. The Nova/Cinder QoS offer similar functionality based on volume type setting limits on host storage bandwidth per service offering. Each volume type is tied to specific QoS attributes some of which are unique to each storage vendor. In the hypervisor, the QoS limits the following: - Limit by throughput - Total bytes/sec, read bytes/sec, write bytes/sec - Limit by IOPS - Total IOPS/sec, read IOPS/sec, write IOPS/sec QoS enforcement in Cinder is done either at the hyper-visor (front-end), the storage subsystem (back-end), or both. This section focuses on QoS limits that are enforced by either the PowerMax backend and the hyper-visor front end interchangeably or just back end (Vendor Specific). The PowerMax driver offers support for Total bytes/sec limit in throughput and Total IOPS/sec limit of IOPS. The PowerMax driver supports the following attributes that are front end/back end agnostic - ``total_iops_sec`` - Maximum IOPs (in I/Os per second). Valid values range from 100 IO/Sec to 100000 IO/sec. - ``total_bytes_sec`` - Maximum bandwidth (throughput) in bytes per second. Valid values range from 1048576 bytes (1MB) to 104857600000 bytes (100,000MB) The PowerMax driver offers the following attribute that is vendor specific to the PowerMax and dependent on the ``total_iops_sec`` and/or ``total_bytes_sec`` being set. - ``Dynamic Distribution`` - Enables/Disables dynamic distribution of host I/O limits. Possible values are: - ``Always`` - Enables full dynamic distribution mode. When enabled, the configured host I/O limits will be dynamically distributed across the configured ports, thereby allowing the limits on each individual port to adjust to fluctuating demand. - ``OnFailure`` - Enables port failure capability. When enabled, the fraction of configured host I/O limits available to a configured port will adjust based on the number of ports currently online. - ``Never`` - Disables this feature (Default). USE CASE 1 - Default values ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites - PowerMax - Host I/O Limit (MB/Sec) - No Limit - Host I/O Limit (IO/Sec) - No Limit - Set Dynamic Distribution - N/A .. table:: **Prerequisites - Block Storage (Cinder) back-end (storage group)** +-----------------------+-----------------------+ | Key | Value | +=======================+=======================+ | ``total_iops_sec`` | ``500`` | +-----------------------+-----------------------+ | ``total_bytes_sec`` | ``104857600`` (100MB) | +-----------------------+-----------------------+ | ``DistributionType`` | ``Always`` | +-----------------------+-----------------------+ #. Create QoS Specs with the prerequisite values above: .. code-block:: console $ openstack volume qos create --consumer back-end \ --property total_iops_sec=500 \ --property total_bytes_sec=104857600 \ --property DistributionType=Always \ my_qos #. Associate QoS specs with specified volume type: .. code-block:: console $ openstack volume qos associate my_qos my_volume_type #. Create volume with the volume type indicated above: .. code-block:: console $ openstack volume create --size 1 --type my_volume_type my_volume **Outcome - PowerMax (storage group)** - Host I/O Limit (MB/Sec) - ``100`` - Host I/O Limit (IO/Sec) - ``500`` - Set Dynamic Distribution - ``Always`` **Outcome - Block Storage (Cinder)** Volume is created against volume type and QoS is enforced with the parameters above. USE CASE 2 - Pre-set limits ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites - PowerMax - Host I/O Limit (MB/Sec) - ``2000`` - Host I/O Limit (IO/Sec) - ``2000`` - Set Dynamic Distribution - ``Never`` .. table:: **Prerequisites - Block Storage (Cinder) back-end (storage group)** +-----------------------+-----------------------+ | Key | Value | +=======================+=======================+ | ``total_iops_sec`` | ``500`` | +-----------------------+-----------------------+ | ``total_bytes_sec`` | ``104857600`` (100MB) | +-----------------------+-----------------------+ | ``DistributionType`` | ``Always`` | +-----------------------+-----------------------+ #. Create QoS specifications with the prerequisite values above. The consumer in this use case is both for front-end and back-end: .. code-block:: console $ openstack volume qos create --consumer back-end \ --property total_iops_sec=500 \ --property total_bytes_sec=104857600 \ --property DistributionType=Always \ my_qos #. Associate QoS specifications with specified volume type: .. code-block:: console $ openstack volume qos associate my_qos my_volume_type #. Create volume with the volume type indicated above: .. code-block:: console $ openstack volume create --size 1 --type my_volume_type my_volume #. Attach the volume created in step 3 to an instance .. code-block:: console $ openstack server add volume my_instance my_volume **Outcome - PowerMax (storage group)** - Host I/O Limit (MB/Sec) - ``100`` - Host I/O Limit (IO/Sec) - ``500`` - Set Dynamic Distribution - ``Always`` **Outcome - Block Storage (Cinder)** Volume is created against volume type and QoS is enforced with the parameters above. **Outcome - Hypervisor (Nova)** ``Libvirt`` includes an extra ``xml`` flag within the ```` section called ``iotune`` that is responsible for rate limitation. To confirm that, first get the ``OS-EXT-SRV-ATTR:instance_name`` value of the server instance, for example ``instance-00000003``. .. code-block:: console $ openstack server show +-------------------------------------+-----------------------------------------------------------------+ | Field | Value | +-------------------------------------+-----------------------------------------------------------------+ | OS-DCF:diskConfig | AUTO | | OS-EXT-AZ:availability_zone | nova | | OS-EXT-SRV-ATTR:host | myhost | | OS-EXT-SRV-ATTR:hypervisor_hostname | myhost | | OS-EXT-SRV-ATTR:instance_name | instance-00000003 | | OS-EXT-STS:power_state | Running | | OS-EXT-STS:task_state | None | | OS-EXT-STS:vm_state | active | | OS-SRV-USG:launched_at | 2017-11-02T08:15:42.000000 | | OS-SRV-USG:terminated_at | None | | accessIPv4 | | | accessIPv6 | | | addresses | private=fd21:99c2:73f3:0:f816:3eff:febe:30ed, 10.0.0.3 | | config_drive | | | created | 2017-11-02T08:15:34Z | | flavor | m1.tiny (1) | | hostId | e7b8312581f9fbb8508587d45c0b6fb4dc86102c632ed1f3a6a49d42 | | id | 0ef0ff4c-dbda-4dc7-b8ed-45d2fc2f31db | | image | cirros-0.3.5-x86_64-disk (b7c220f5-2408-4296-9e58-fc5a41cb7e9d) | | key_name | myhostname | | name | myhosthame | | progress | 0 | | project_id | bae4b97a0d8b42c28a5add483981e5db | | properties | | | security_groups | name='default' | | status | ACTIVE | | updated | 2017-11-02T08:15:42Z | | user_id | 7bccf456740546799a7e20457f13c38b | | volumes_attached | | +-------------------------------------+-----------------------------------------------------------------+ We then run the following command using the ``OS-EXT-SRV-ATTR:instance_name`` retrieved above. .. code-block:: console $ virsh dumpxml instance-00000003 | grep -1 "total_bytes_sec\|total_iops_sec" The output of the command contains the XML below. It is found between the ```` start and end tag. .. code-block:: xml 104857600 500 USE CASE 3 - Pre-set limits ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites - PowerMax - Host I/O Limit (MB/Sec) - ``100`` - Host I/O Limit (IO/Sec) - ``500`` - Set Dynamic Distribution - ``Always`` .. table:: **Prerequisites - Block Storage (Cinder) back end (storage group)** +-----------------------+-----------------------+ | Key | Value | +=======================+=======================+ | ``total_iops_sec`` | ``500`` | +-----------------------+-----------------------+ | ``total_bytes_sec`` | ``104857600`` (100MB) | +-----------------------+-----------------------+ | ``DistributionType`` | ``OnFailure`` | +-----------------------+-----------------------+ #. Create QoS specifications with the prerequisite values above: .. code-block:: console $ openstack volume qos create --consumer back-end \ --property total_iops_sec=500 \ --property total_bytes_sec=104857600 \ --property DistributionType=OnFailure \ my_qos #. Associate QoS specifications with specified volume type: .. code-block:: console $ openstack volume qos associate my_qos my_volume_type #. Create volume with the volume type indicated above: .. code-block:: console $ openstack volume create --size 1 --type my_volume_type my_volume **Outcome - PowerMax (storage group)** - Host I/O Limit (MB/Sec) - ``100`` - Host I/O Limit (IO/Sec) - ``500`` - Set Dynamic Distribution - ``OnFailure`` **Outcome - Block Storage (Cinder)** Volume is created against volume type and QOS is enforced with the parameters above. USE CASE 4 - Default values ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites - PowerMax - Host I/O Limit (MB/Sec) - ``No Limit`` - Host I/O Limit (IO/Sec) - ``No Limit`` - Set Dynamic Distribution - ``N/A`` .. table:: **Prerequisites - Block Storage (Cinder) back end (storage group)** +-----------------------+---------------+ | Key | Value | +=======================+===============+ | ``DistributionType`` | ``Always`` | +-----------------------+---------------+ #. Create QoS specifications with the prerequisite values above: .. code-block:: console $ openstack volume qos create --consumer back-end \ --property DistributionType=Always \ my_qos #. Associate QoS specifications with specified volume type: .. code-block:: console $ openstack volume qos associate my_qos my_volume_type #. Create volume with the volume type indicated above: .. code-block:: console $ openstack volume create --size 1 --type my_volume_type my_volume **Outcome - PowerMax (storage group)** - Host I/O Limit (MB/Sec) - ``No Limit`` - Host I/O Limit (IO/Sec) - ``No Limit`` - Set Dynamic Distribution - ``N/A`` **Outcome - Block Storage (Cinder)** Volume is created against volume type and there is no QoS change. 10. Multi-pathing support ------------------------- - Install ``open-iscsi`` on all nodes on your system if on an iSCSI setup. - Do not install EMC PowerPath as they cannot co-exist with native multi-path software - Multi-path tools must be installed on all Nova compute nodes On Ubuntu: .. code-block:: console # apt-get install multipath-tools #multipath modules # apt-get install sysfsutils sg3-utils #file system utilities # apt-get install scsitools #SCSI tools On openSUSE and SUSE Linux Enterprise Server: .. code-block:: console # zipper install multipath-tools #multipath modules # zipper install sysfsutils sg3-utils #file system utilities # zipper install scsitools #SCSI tools On Red Hat Enterprise Linux and CentOS: .. code-block:: console # yum install iscsi-initiator-utils #ensure iSCSI is installed # yum install device-mapper-multipath #multipath modules # yum install sysfsutils sg3-utils #file system utilities Multipath configuration file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The multi-path configuration file may be edited for better management and performance. Log in as a privileged user and make the following changes to ``/etc/multipath.conf`` on the Compute (Nova) node(s). .. code-block:: vim devices { # Device attributed for EMC PowerMax device { vendor "EMC" product "SYMMETRIX" path_grouping_policy multibus getuid_callout "/lib/udev/scsi_id --page=pre-spc3-83 --whitelisted --device=/dev/%n" path_selector "round-robin 0" path_checker tur features "0" hardware_handler "0" prio const rr_weight uniform no_path_retry 6 rr_min_io 1000 rr_min_io_rq 1 } } You may need to reboot the host after installing the MPIO tools or restart iSCSI and multi-path services. On Ubuntu iSCSI: .. code-block:: console # service open-iscsi restart # service multipath-tools restart On Ubuntu FC .. code-block:: console # service multipath-tools restart On openSUSE, SUSE Linux Enterprise Server, Red Hat Enterprise Linux, and CentOS iSCSI: .. code-block:: console # systemctl restart open-iscsi # systemctl restart multipath-tools On openSUSE, SUSE Linux Enterprise Server, Red Hat Enterprise Linux, and CentOS FC: .. code-block:: console # systemctl restart multipath-tools .. code-block:: console $ lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT sda 8:0 0 1G 0 disk ..360000970000196701868533030303235 (dm-6) 252:6 0 1G 0 mpath sdb 8:16 0 1G 0 disk ..360000970000196701868533030303235 (dm-6) 252:6 0 1G 0 mpath vda 253:0 0 1T 0 disk OpenStack configurations ~~~~~~~~~~~~~~~~~~~~~~~~ On Compute (Nova) node, add the following flag in the ``[libvirt]`` section of ``nova.conf`` and ``nova-cpu.conf``: .. code-block:: ini volume_use_multipath = True On Cinder controller node, multi-path for image transfer can be enabled in ``cinder.conf`` for each backend section or in ``[backend_defaults]`` section as a common configuration for all backends. .. code-block:: ini use_multipath_for_image_xfer = True Restart ``nova-compute`` and ``cinder-volume`` services after the change. Verify you have multiple initiators available on the compute node for I/O ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Create a 3GB PowerMax volume. #. Create an instance from image out of native LVM storage or from PowerMax storage, for example, from a bootable volume #. Attach the 3GB volume to the new instance: .. code-block:: console # multipath -ll mpath102 (360000970000196700531533030383039) dm-3 EMC,SYMMETRIX size=3G features='1 queue_if_no_path' hwhandler='0' wp=rw '-+- policy='round-robin 0' prio=1 status=active 33:0:0:1 sdb 8:16 active ready running '- 34:0:0:1 sdc 8:32 active ready running #. Use the ``lsblk`` command to see the multi-path device: .. code-block:: console # lsblk NAME MAJ:MIN RM SIZE RO TYPE sdb 8:0 0 3G 0 disk ..360000970000196700531533030383039 (dm-6) 252:6 0 3G 0 mpath sdc 8:16 0 3G 0 disk ..360000970000196700531533030383039 (dm-6) 252:6 0 3G 0 mpath vda 11. All Flash compression support --------------------------------- On an All Flash array, the creation of any storage group has a compressed attribute by default. Setting compression on a storage group does not mean that all the devices will be immediately compressed. It means that for all incoming writes compression will be considered. Setting compression ``off`` on a storage group does not mean that all the devices will be uncompressed. It means all the writes to compressed tracks will make these tracks uncompressed. .. note:: This feature is only applicable for All Flash arrays, 250F, 450F, 850F and 950F and PowerMax 2000 and 8000. It was first introduced Solutions Enabler 8.3.0.11 or later and is enabled by default when associated with a Service Level. This means volumes added to any newly created storage groups will be compressed. Use case 1 - Compression disabled create, attach, detach, and delete volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Create a new volume type called ``POWERMAX_COMPRESSION_DISABLED``. #. Set an extra spec ``volume_backend_name``. #. Set a new extra spec ``storagetype:disablecompression = True``. #. Create a new volume. #. Check in Unisphere or SYMCLI to see if the volume exists in storage group ``OS----CD-SG``, and compression is disabled on that storage group. #. Attach the volume to an instance. Check in Unisphere or SYMCLI to see if the volume exists in storage group ``OS-----CD``, and compression is disabled on that storage group. #. Detach volume from instance. Check in Unisphere or symcli to see if the volume exists in storage group ``OS----CD-SG``, and compression is disabled on that storage group. #. Delete the volume. If this was the last volume in the ``OS----CD-SG`` storage group, it should also be deleted. Use case 2 - Retype from compression disabled to compression enabled ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Repeat steps 1-4 of Use case 1. #. Create a new volume type. For example ``POWERMAX_COMPRESSION_ENABLED``. #. Set extra spec ``volume_backend_name`` as before. #. Set the new extra spec's compression as ``storagetype:disablecompression = False`` or DO NOT set this extra spec. #. Retype from volume type ``POWERMAX_COMPRESSION_DISABLED`` to ``POWERMAX_COMPRESSION_ENABLED``. #. Check in Unisphere or symcli to see if the volume exists in storage group ``OS----SG``, and compression is enabled on that storage group. .. note:: If extra spec ``storagetype:disablecompression`` is set on a VMAX-Hybrid_, it is ignored because compression is not an available feature on a VMAX-Hybrid_. 12. Oversubscription support ---------------------------- Please refer to the official OpenStack `over-subscription documentation`_ for further information on using over-subscription with PowerMax. 13. Live migration support -------------------------- **Non-live migration** (sometimes referred to simply as 'migration'). The instance is shut down for a period of time to be moved to another hyper-visor. In this case, the instance recognizes that it was rebooted. **Live migration** (or 'true live migration'). Almost no instance downtime. Useful when the instances must be kept running during the migration. The different types of live migration are: - **Shared storage-based live migration** Both hyper-visors have access to shared storage. - **Block live migration** No shared storage is required. Incompatible with read-only devices such as CD-ROMs and Configuration Drive (config_drive). - **Volume-backed live migration** Instances are backed by volumes rather than ephemeral disk. For PowerMax volume-backed live migration, shared storage is required. The PowerMax driver supports shared volume-backed live migration. Architecture ~~~~~~~~~~~~ In PowerMax, A volume cannot belong to two or more FAST storage groups at the same time. To get around this limitation we leverage both cascaded storage groups and a temporary non-FAST storage group. A volume can remain 'live' if moved between masking views that have the same initiator group and port groups which preserves the host path. During live migration, the following steps are performed by the PowerMax driver on the volume: #. Within the originating masking view, the volume is moved from the FAST storage group to the non-FAST storage group within the parent storage group. #. The volume is added to the FAST storage group within the destination parent storage group of the destination masking view. At this point the volume belongs to two storage groups. #. One of two things happen: - If the connection to the destination instance is successful, the volume is removed from the non-FAST storage group in the originating masking view, deleting the storage group if it contains no other volumes. - If the connection to the destination instance fails, the volume is removed from the destination storage group, deleting the storage group, if empty. The volume is reverted back to the original storage group. Live migration configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Please refer to the official OpenStack documentation on `configuring migrations`_ and `live migration usage`_ for more information. .. note:: OpenStack Oslo uses an open standard for messaging middleware known as ``AMQP``. This messaging middleware (the RPC messaging system) enables the OpenStack services that run on multiple servers to talk to each other. By default, the RPC messaging client is set to timeout after 60 seconds, meaning if any operation you perform takes longer than 60 seconds to complete the operation will timeout and fail with the ERROR message ``Messaging Timeout: Timed out waiting for a reply to message ID`` ``[message_id]`` If this occurs, increase the ``rpc_response_timeout`` flag value in ``cinder.conf`` and ``nova.conf`` on all Cinder and Nova nodes and restart the services. What to change this value to will depend entirely on your own environment, you might only need to increase it slightly, or if your environment is under heavy network load it could need a bit more time than normal. Fine tuning is required here, change the value and run intensive operations to determine if your timeout value matches your environment requirements. At a minimum please set ``rpc_response_timeout`` to ``240``, but this will need to be raised if high concurrency is a factor. This should be sufficient for all Cinder backup commands also. System configuration ~~~~~~~~~~~~~~~~~~~~ ``NOVA-INST-DIR/instances/`` (for example, ``/opt/stack/data/nova/instances``) has to be mounted by shared storage. Ensure that ``NOVA-INST-DIR`` (set with ``state_path`` in the ``nova.conf`` file) is the same on all hosts. #. Configure your DNS or ``/etc/hosts`` and ensure it is consistent across all hosts. Make sure that the three hosts can perform name resolution with each other. As a test, use the ping command to ping each host from one another. .. code-block:: console $ ping HostA $ ping HostB $ ping HostC #. Export ``NOVA-INST-DIR/instances`` from ``HostA``, and ensure it is readable and writable by the Compute user on ``HostB`` and ``HostC``. Please refer to the relevant OS documentation for further details, for example `Ubuntu NFS Documentation`_ #. On all compute nodes, enable the ``execute/search`` bit on your shared directory to allow ``qemu`` to be able to use the images within the directories. On all hosts, run the following command: .. code-block:: console $ chmod o+x NOVA-INST-DIR/instances .. note:: If migrating from compute to controller, make sure to run step two above on the controller node to export the instance directory. Use case ~~~~~~~~ For our use case shown below, we have three hosts with host names ``HostA``, ``HostB`` and ``HostC``. ``HostA`` is the controller node while ``HostB`` and ``HostC`` are the compute nodes. The following were also used in live migration. - 2GB bootable volume using the CirrOS image. - Instance created using the 2GB volume above with a flavor ``m1.small`` using 2048 RAM, 20GB of Disk and 1 VCPU. #. Create a bootable volume. .. code-block:: console $ openstack volume create --size 2 \ --image cirros-0.3.5-x86_64-disk \ --volume_lm_1 #. Launch an instance using the volume created above on ``HostB``. .. code-block:: console $ openstack server create --volume volume_lm_1 \ --flavor m1.small \ --nic net-id=private \ --security-group default \ --availability-zone nova:HostB \ server_lm_1 #. Confirm on ``HostB`` has the instance created by running: .. code-block:: console $ openstack server show server_lm_1 | grep "hypervisor_hostname\|instance_name" | OS-EXT-SRV-ATTR:hypervisor_hostname | HostB | OS-EXT-SRV-ATTR:instance_name | instance-00000006 #. Confirm, through ``virsh`` using the instance_name returned in step 3 (``instance-00000006``), on ``HostB`` that the instance is created using: .. code-block:: console $ virsh list --all Id Name State -------------------------------- 1 instance-00000006 Running #. Migrate the instance from ``HostB`` to ``HostA`` with: .. code-block:: console $ openstack server migrate --os-compute-api-version 2.30 \ --live-migration --host HostA \ server_lm_1 #. Run the command on step 3 above when the instance is back in available status. The hypervisor should be on Host A. #. Run the command on Step 4 on Host A to confirm that the instance is created through ``virsh``. 14. Multi-attach support ------------------------ PowerMax cinder driver supports the ability to attach a volume to multiple hosts/servers simultaneously. Please see the official OpenStack `multi-attach documentation`_ for configuration information. Multi-attach architecture ~~~~~~~~~~~~~~~~~~~~~~~~~ In PowerMax, a volume cannot belong to two or more FAST storage groups at the same time. This can cause issues when we are attaching a volume to multiple instances on different hosts. To get around this limitation, we leverage both cascaded storage groups and non-FAST storage groups (i.e. a storage group with no service level, workload, or SRP specified). .. note:: If no service level is assigned to the volume type, no extra work on the backend is required – the volume is attached to and detached from each host as normal. Example use case ~~~~~~~~~~~~~~~~ Volume ``Multi-attach-Vol-1`` (with a multi-attach capable volume type, and associated with a Diamond Service Level) is attached to Instance ``Multi-attach-Instance-A`` on HostA. We then issue the command to attach ``Multi-attach-Vol-1`` to ``Multi-attach-Instance-B`` on HostB: #. In the ``HostA`` masking view, the volume is moved from the FAST managed storage group to the non-FAST managed storage group within the parent storage group. #. The volume is attached as normal on ``HostB`` – i.e., it is added to a FAST managed storage group within the parent storage group of the ``HostB`` masking view. The volume now belongs to two masking views, and is exposed to both ``HostA`` and ``HostB``. We then decide to detach the volume from ``Multi-attach-Instance-B`` on ``HostB``: #. The volume is detached as normal from ``HostB`` – i.e., it is removed from the FAST managed storage group within the parent storage group of the ``HostB`` masking view – this includes cleanup of the associated elements if required. The volume now belongs to one masking view, and is no longer exposed to ``HostB``. #. In the ``HostA`` masking view, the volume is returned to the FAST managed storage group from the non-FAST managed storage group within the parent storage group. The non-FAST managed storage group is cleaned up, if required. 15. Volume encryption support ----------------------------- Encryption is supported through the use of OpenStack Barbican. Only front-end encryption is supported, back-end encryption is handled at the hardware level with `Data at Rest Encryption`_ (D@RE). For further information on OpenStack Barbican including setup and configuration please refer to the following `official Barbican documentation`_. 16. Volume metadata ------------------- Volume metadata is returned to the user in both the Cinder Volume logs and with volumes and snapshots created in Cinder via the UI or CLI. 16.1 Volume metadata in logs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If debug is enabled in the default section of ``cinder.conf``, PowerMax Cinder driver will log additional volume information in the Cinder volume log, on each successful operation. The facilitates bridging the gap between OpenStack and the Array by tracing and describing the volume from a VMAX/ PowerMax view point. .. code-block:: console +------------------------------------+---------------------------------------------------------+ | Key | Value | +------------------------------------+---------------------------------------------------------+ | service_level | Gold | | is_compression_disabled | no | | powermax_cinder_driver_version | 3.2.0 | | identifier_name | OS-819470ab-a6d4-49cc-b4db-6f85e82822b7 | | openstack_release | 13.0.0.0b3.dev3 | | volume_id | 819470ab-a6d4-49cc-b4db-6f85e82822b7 | | storage_model | PowerMax_8000 | | successful_operation | delete | | default_sg_name | OS-DEFAULT_SRP-Gold-NONE-SG | | device_id | 01C03 | | unisphere_for_powermax_version | V9.0.0.9 | | workload | NONE | | openstack_version | 13.0.0 | | volume_updated_time | 2018-08-03 03:13:53 | | platform | Linux-4.4.0-127-generic-x86_64-with-Ubuntu-16.04-xenial | | python_version | 2.7.12 | | volume_size | 20 | | srp | DEFAULT_SRP | | openstack_name | 90_Test_Vol56 | | storage_firmware_version | 5978.143.144 | | serial_number | 000123456789 | +------------------------------------+---------------------------------------------------------+ 16.2 Metadata in the UI and CLI ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default metadata will be set on all volume and snapshot objects created in Cinder. This information represents the state of the object on the backend PowerMax and will change when volume attributes are changed by performing actions on them such as re-type or attaching to an instance. .. code-block:: console demo@openstack-controller:~$ cinder show powermax-volume +--------------------------------+------------------------------------------------------------+ | Property | Value | +--------------------------------+------------------------------------------------------------+ | metadata | ArrayID : 000123456789 | | | ArrayModel : PowerMax_8000 | | | CompressionDisabled : False | | | Configuration : TDEV | | | DeviceID : 0012F | | | DeviceLabel : OS-d87edb98-60fd-49dd-bb0f-cc388cf6f3f4 | | | Emulation : FBA | | | ReplicationEnabled : False | | | ServiceLevel : Diamond | | | Workload : None | | name | powermax-volume | +--------------------------------+------------------------------------------------------------+ 17. Unisphere High Availability (HA) support -------------------------------------------- This feature facilitates high availability of Unisphere for PowerMax servers, allowing for one or more backup unisphere instances in the event of a loss in connection to the primary Unisphere instance. The PowerMax driver will cycle through the list of failover instances, trying each until a successful connection is made. The ordering is first in, first out (FIFO), so the first ``u4p_failover_target`` specified in ``cinder.conf`` will be the first selected, the second ``u4p_failover_target`` in ``cinder.conf`` will be the second selected, and so on until all failover targets are exhausted. Requirements ~~~~~~~~~~~~ - All required instances of Unisphere for PowerMax are set up and configured for the array(s) - Array(s) are locally registered with the instance of Unisphere that will be used as a failover instance. There are two failover types, local and remote: - `Local failover` - Primary Unisphere is unreachable, failover to secondary local instance of Unisphere to resume normal operations at primary site. - `Remote failover` - Complete loss of primary site so primary instance of Unisphere is unreachable, failover to secondary instance of Unisphere at remote site to resume operations with the R2 array. .. note:: Replication must be configured in advance for remote failover to work successfully. Human intervention will also be required to failover from R1 array to R2 array in Cinder using ``cinder failover-host`` command (see `Volume replication support`_ for replication setup details). .. note:: The remote target array must be registered as local to the remote instance of Unisphere Configuration ~~~~~~~~~~~~~ The following configuration changes need to be made in ``cinder.conf`` under the PowerMax backend stanza in order to support the failover to secondary Unisphere. Cinder services will need to be restarted for changes to take effect. .. code-block:: console [POWERMAX_1] ... u4p_failover_timeout = 30 u4p_failover_retries = 3 u4p_failover_backoff_factor = 1 u4p_failover_autofailback = True u4p_failover_target = san_ip:10.10.10.12, san_api_port: 8443, san_login:my_username, san_password:my_password, driver_ssl_cert_verify: False, u4p_failover_target = san_ip:10.10.10.13, san_api_port: 8443 san_login:my_username, san_password:my_password, driver_ssl_cert_verify: True, driver_ssl_cert_path: /path/to/my_unisphere_host.pem .. note:: ``u4p_failover_target`` key value pairs will need to be on the same line (separated by commas) in ``cinder.conf``. They are displayed on separated lines above for readability. .. note:: To add more than one Unisphere failover target create additional ``u4p_failover_target`` details for the Unisphere instance. These will be cycled through in a first-in, first-out (FIFO) basis, the first failover target in ``cinder.conf`` will be the first backup instance of Unisphere used by the PowerMax driver. 18. Rapid TDEV deallocation --------------------------- The PowerMax driver can now leverage the enhanced volume delete feature-set made available in the PowerMax 5978 Foxtail uCode release. These enhancements allow volume deallocation & deletion to be combined into a single call. Previously, volume deallocation & deletion were split into separate tasks; now a single REST call is dispatched and a response code on the projected outcome of their request is issued rapidly allowing other task execution to proceed without the delay. No additional configuration is necessary, the system will automatically determine when to use either the rapid or legacy compliant volume deletion sequence based on the connected PowerMax array’s metadata. 19. PowerMax online (in-use) device expansion --------------------------------------------- .. table:: +---------------------------------+-------------------------------------------+ | uCode Level | Supported In-Use Volume Extend Operations | +----------------+----------------+--------------+--------------+-------------+ | R1 uCode Level | R2 uCode Level | Sync | Async | Metro | +================+================+==============+==============+=============+ | 5978.711 | 5978.711 | Y | Y | Y | +----------------+----------------+--------------+--------------+-------------+ | 5978.711 | 5978.669 | Y | Y | Y | +----------------+----------------+--------------+--------------+-------------+ | 5978.711 | 5978.444 | Y | Y | Y | +----------------+----------------+--------------+--------------+-------------+ | 5978.711 | 5978.221 | Y | Y | N | +----------------+----------------+--------------+--------------+-------------+ | 5978.669 | 5978.669 | Y | Y | Y | +----------------+----------------+--------------+--------------+-------------+ | 5978.669 | 5978.444 | Y | Y | Y | +----------------+----------------+--------------+--------------+-------------+ | 5978.669 | 5978.221 | Y | Y | N | +----------------+----------------+--------------+--------------+-------------+ | 5978.444 | 5978.444 | Y | Y | Y | +----------------+----------------+--------------+--------------+-------------+ | 5978.444 | 5978.221 | Y | Y | N | +----------------+----------------+--------------+--------------+-------------+ | 5978.221 | 5978.221 | Y | Y | N | +----------------+----------------+--------------+--------------+-------------+ Assumptions, restrictions and prerequisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - ODE in the context of this document refers to extending a volume where it is in-use, that is, attached to an instance. - The ``allow_extend`` is only applicable on VMAX-Hybrid_ arrays or All Flash arrays with HyperMax OS. If included elsewhere, it is ignored. - Where one array is a lower uCode than the other, the environment is limited to functionality of that of the lowest uCode level, i.e. if R1 is 5978.444 and R2 is 5978.221, expanding a metro volume is not supported, both R1 and R2 need to be on 5978.444 uCode at a minimum. 20. PowerMax array and storage group tagging -------------------------------------------- Unisphere for PowerMax 9.1 and later supports tagging of storage groups and arrays, so the user can give their own 'tag' for ease of searching and/or grouping. Assumptions, restrictions and prerequisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The storage group tag(s) is associated with a volume type extra spec key ``storagetype:storagegrouptags``. - The array tag is associated with the backend stanza using key ``powermax_array_tag_list``. It expects a list of one or more comma separated values, for example ``powermax_array_tag_list=[value1,value2, value3]`` - They can be one or more values in a comma separated list. - There is a 64 characters limit of letters, numbers, - and _. - 8 tags are allowed per storage group and array. - Tags cannot be modified once a volume has been created with that volume type. This is an OpenStack constraint. - Tags can be modified on the backend stanza, but none will ever be removed, only added. - There is no restriction on creating or deleting tags of OpenStack storage groups or arrays outside of OpenStack, for example Unisphere for PowerMax UI. The max number of 8 tags will apply however, as this is a Unisphere for PowerMax limit. Set a storage group tag on a volume type: .. code-block:: console $ openstack volume type set --property storagetype:storagegrouptags=myStorageGroupTag1,myStorageGroupTag2 Set an array tag on the PowerMax backend: .. code-block:: console [POWERMAX_ISCSI] volume_driver = cinder.volume.drivers.dell_emc.powermax.iscsi.PowerMaxISCSIDriver volume_backend_name = POWERMAX_ISCSI san_ip = 10.10.10.10 san_login = my_u4v_username san_password = my_u4v_password powermax_srp = SRP_1 powermax_array = 000123456789 powermax_port_groups = [OS-ISCSI-PG] powermax_array_tag_list = [openstack1, openstack2] 21. PowerMax short host name and port group name override --------------------------------------------------------- This functionality allows the user to customize the short host name and port group name that are contained in the PowerMax driver storage groups and masking views names. For current functionality please refer to `PowerMax naming conventions`_ for more details. As the storage group name and masking view name are limited to 64 characters the short host name needs to be truncated to 16 characters or less and port group needs to be truncated to 12 characters or less. This functionality offers a little bit more flexibility to determine how these truncated components should look. .. note:: Once the port group and short host name have been overridden with any new format, it is not possible to return to the default format or change to another format if any volumes are in an attached state. This is because there is no way to determine the overridden format once ``powermax_short_host_name_template` or ``powermax_port_group_name_template`` have been removed or changed. Assumptions, restrictions, and prerequisites ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Backward compatibility with old format is preserved. - ``cinder.conf`` will have 2 new configuration options, ``short_host_name_template`` and ``port_group_name_template``. - If a storage group, masking view or initiator group in the old naming convention already exists, this remains and any new attaches will use the new naming convention where the label for the short host name and/or port group has been customized by the user. - Only the short host name and port group name components can be renamed within the storage group, initiator group and masking view names. - If the ``powermax_short_host_name_template`` and ``powermax_port_group_name_template`` do not adhere to the rules, then the operation will fail early and gracefully with a clear description as to the problem. - The templates cannot be changed once volumes have been attached using the new configuration. - If only one of the templates are configured, then the other will revert to the default option. - The UUID is generated from the MD5 hash of the full short host name and port group name - If ``userdef`` is used, the onus is on the user to make sure it will be unique among all short host names (controller and compute nodes) and unique among port groups. .. table:: Short host name templates +-----------------------------------+-------------------------------------+------------------------------------+ | powermax_short_host_name_template | Description | Rule | +===================================+=====================================+====================================+ | shortHostName | This is the default option | Existing functionality, if over 16 | | | | characters then see | | | | `PowerMax naming conventions`_, | | | | otherwise short host name | +-----------------------------------+-------------------------------------+------------------------------------+ | shortHostName[:x])uuid[:x] | First x characters of the short | Must be less than 16 characters | | e.g. | host name and x uuid | | | shortHostName[:6]uuid[:9] | characters created from md5 | | | | hash of short host name | | +-----------------------------------+-------------------------------------+------------------------------------+ | shortHostName[:x]userdef | First x characters of the short | Must be less than 16 characters | | e.g. | host name and a user defined x char | | | shortHostName[:6]-testHost | name. NB - the responsibility is on | | | | the user for uniqueness | | +-----------------------------------+-------------------------------------+------------------------------------+ | shortHostName[-x:]uuid[:x] | Last x characters of the short | Must be less than 16 characters | | e.g. | host name and x uuid | | | shortHostName[-6:]uuid[:9] | characters created from md5 | | | | hash of short host name | | +-----------------------------------+-------------------------------------+------------------------------------+ | shortHostName[-x:]userdef | Last x characters of the short | Must be less than 16 characters | | e.g. | host name and a user defined x char | | | shortHostName[-6:]-testHost | name. NB - the responsibility is on | | | | the user for uniqueness | | +-----------------------------------+-------------------------------------+------------------------------------+ .. table:: Port group name templates +-----------------------------------+-------------------------------------+------------------------------------+ | powermax_port_group_name_template | Description | Rule | +===================================+=====================================+====================================+ | portGroupName | This is the default option | Existing functionality, if over 12 | | | | characters then see | | | | `PowerMax naming conventions`_, | | | | otherwise port group name | +-----------------------------------+-------------------------------------+------------------------------------+ | portGroupName[:x])uuid[:x] | First x characters of the port | Must be less than 12 characters | | e.g. | group name and x uuid | | | portGroupName[:6]uuid[:5] | characters created from md5 | | | | hash of port group name | | +-----------------------------------+-------------------------------------+------------------------------------+ | portGroupName[:x]userdef | First x characters of the port | Must be less than 12 characters | | e.g. | group name and a user defined x char| | | portGroupName[:6]-test | name. NB - the responsibility is on | | | | the user for uniqueness | | +-----------------------------------+-------------------------------------+------------------------------------+ | portGroupName[-x:]uuid[:x] | Last x characters of the port | Must be less than 12 characters | | e.g. | group name and x uuid | | | portGroupName[-6:]uuid[:5] | characters created from md5 | | | | hash of port group name | | +-----------------------------------+-------------------------------------+------------------------------------+ | portGroupName[-x:]userdef | Last x characters of the port | Must be less than 12 characters | | e.g. | group name and a user defined x char| | | portGroupName[-6:]-test | name. NB - the responsibility is on | | | | the user for uniqueness | | +-----------------------------------+-------------------------------------+------------------------------------+ 21. Snap ids replacing generations ---------------------------------- Snap ids were introduced to the PowerMax in microcde 5978.669.669 and Unisphere for PowerMax 9.2. Generations existed previously and could cause stale data if deleted out of sequence, even though we locked against this occurence. This happened when the newer generation(s) inherited its deleted predecessors generation number. So in a series of 0, 1, 2 and 3 generations, if generation 1 gets deleted, generation 2 now becomes generation 1 and generation 3 becomes generation 2 and so on down the line. Snap ids are unique to each snapVX and will not change once assigned at creation so out of sequence deletions are no longer an issue. Generations will remain for arrays with microcode less than 5978.669.669. Cinder supported operations =========================== Volume replication support -------------------------- .. note:: A mix of ``RDF1+TDEV`` and ``TDEV`` volumes should not exist in the same storage group. This can happen on a cleanup operation after breaking the pair and a 'TDEV' remains in the storage group on either the local or remote array. If this happens, remove the volume from the storage group so that further replicated volume operations can continue. For example, Remove ``TDEV`` from ``OS-[SRP]-[SL]-[WL]-RA-SG``. .. note:: Replication storage groups should exist on both local and remote array but never on just one. For example, if OS-[SRP]-[SL]-[WL]-RA-SG exists on local array A it must also exist on remote array B. If this condition does not hold, further replication operations will fail. This applies to management storage groups in the case of ``Asynchronous`` and ``Metro`` modes also. See :ref:`my-table`. .. note:: The number of devices in replication storage groups in both local and remote arrays should be same. This also applies to management storage groups in ``Asynchronous`` and ``Metro`` modes. See :ref:`my-table`. Configure a single replication target ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. Configure an SRDF group between the chosen source and target arrays for the PowerMax Cinder driver to use. The source array must correspond with the ``powermax_array`` entry in ``cinder.conf``. #. Select both the director and the ports for the SRDF emulation to use on both sides. Bear in mind that network topology is important when choosing director endpoints. Supported modes are ``Synchronous``, ``Asynchronous``, and ``Metro``. .. note:: If the source and target arrays are not managed by the same Unisphere server (that is, the target array is remotely connected to server - for example, if you are using embedded management), in the event of a full disaster scenario (i.e. the primary array is completely lost and all connectivity to it is gone), the Unisphere server would no longer be able to contact the target array. In this scenario, the volumes would be automatically failed over to the target array, but administrator intervention would be required to either; configure the target (remote) array as local to the current Unisphere server (if it is a stand-alone server), or enter the details of a second Unisphere server to the ``cinder.conf``, which is locally connected to the target array (for example, the embedded management Unisphere server of the target array), and restart the Cinder volume service. .. note:: If you are setting up an SRDF/Metro configuration, it is recommended that you configure a Witness or vWitness for bias management. Please see the `SRDF Metro Overview & Best Practices`_ guide for more information. .. note:: The PowerMax Cinder drivers do not support Cascaded SRDF. .. note:: The transmit idle functionality must be disabled on the R2 array for Asynchronous rdf groups. If this is not disabled it will prevent failover promotion in the event of access to the R1 array being lost. .. code-block:: console # symrdf -sid -rdfg set rdfa -transmit_idle off .. note:: When creating RDF enabled volumes, if there are existing volumes in the target storage group, all rdf pairs related to that storage group must have the same rdf state i.e. rdf pair states must be consistent across all volumes in a storage group when attempting to create a new replication enabled volume. If mixed rdf pair states are found during a volume creation attempt, an error will be raised by the rdf state validation checks. In this event, please wait until all volumes in the storage group have reached a consistent state. #. Enable replication in ``/etc/cinder/cinder.conf``. To enable the replication functionality in PowerMax Cinder driver, it is necessary to create a replication volume-type. The corresponding back-end stanza in ``cinder.conf`` for this volume-type must then include a ``replication_device`` parameter. This parameter defines a single replication target array and takes the form of a list of key value pairs. .. code-block:: console enabled_backends = POWERMAX_FC_REPLICATION [POWERMAX_FC_REPLICATION] volume_driver = cinder.volume.drivers.dell_emc.powermax.fc.PowerMaxFCDriver san_ip = 10.10.10.10 san_login = my_u4v_username san_password = my_u4v_password powermax_srp = SRP_1 powermax_array = 000123456789 powermax_port_groups = [OS-FC-PG] volume_backend_name = POWERMAX_FC_REPLICATION replication_device = target_device_id:000197811111, remote_port_group:os-failover-pg, remote_pool:SRP_1, rdf_group_label: 28_11_07, mode:Metro, metro_use_bias:False, sync_interval:3, sync_retries:200 .. note:: ``replication_device`` key value pairs will need to be on the same line (separated by commas) in ``cinder.conf``. They are displayed here on separate lines above for improved readability. * ``target_device_id`` The unique PowerMax array serial number of the target array. For full failover functionality, the source and target PowerMax arrays must be discovered and managed by the same U4V server. * ``remote_port_group`` The name of a PowerMax port group that has been pre-configured to expose volumes managed by this backend in the event of a failover. Make sure that this port group contains either all FC or all iSCSI or all NVMe-TCP port groups (for a given back end), as appropriate for the configured driver (iSCSI or FC or NVME-TCP). * ``remote_pool`` The unique pool name for the given target array. * ``rdf_group_label`` The name of a PowerMax SRDF group that has been pre-configured between the source and target arrays. * ``mode`` The SRDF replication mode. Options are ``Synchronous``, ``Asynchronous``, and ``Metro``. This defaults to ``Synchronous`` if not set. * ``metro_use_bias`` Flag to indicate if 'bias' protection should be used instead of Witness. This defaults to False. * ``sync_interval`` How long in seconds to wait between intervals for SRDF sync checks during Cinder PowerMax SRDF operations. Default is 3 seconds. * ``sync_retries`` How many times to retry RDF sync checks during Cinder PowerMax SRDF operations. Default is 200 retries. * ``allow_extend`` Only applicable to VMAX-Hybrid_ arrays or All Flash arrays running HyperMax OS (5977). It is a flag for allowing the extension of replicated volumes. To extend a volume in an SRDF relationship, this relationship must first be broken, the R1 device extended, and a new device pair established. If not explicitly set, this flag defaults to ``False``. .. note:: As the SRDF link must be severed, due caution should be exercised when performing this operation. If absolutely necessary, only one source and target pair should be extended at a time (only only applicable to VMAX-Hybrid_ arrays or All Flash arrays with HyperMax OS). #. Create a ``replication-enabled`` volume type. Once the ``replication_device`` parameter has been entered in the PowerMax backend entry in the ``cinder.conf``, a corresponding volume type needs to be created ``replication_enabled`` property set. See above `Create volume types`_ for details. .. code-block:: console # openstack volume type set --property replication_enabled=" True" \ POWERMAX_FC_REPLICATION .. note:: Service Level and Workload: An attempt will be made to create a storage group on the target array with the same service level and workload combination as the primary. However, if this combination is unavailable on the target (for example, in a situation where the source array is a VMAX-Hybrid_, the target array is an All Flash, and an All Flash incompatible service level like Bronze is configured), no service level will be applied. Configure multiple replication targets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Setting multiple replication devices in cinder.conf allows the use of all the supported replication modes simultaneously. Up to three replication devices can be set, one for each of the replication modes available. An additional volume type ``extra spec`` (``storagetype:replication_device_backend_id``) is then used to determine which replication device should be utilized when attempting to perform an operation on a volume which is replication enabled. All details, guidelines and recommendations set out in the `Configure a single replication target`_ section also apply in a multiple replication device scenario. Multiple replication targets limitations and restrictions: #. There can only be one of each replication mode present across all of the replication devices set in ``cinder.conf``. #. Details for ``target_device_id``, ``remote_port_group`` and ``remote_pool`` should be identical across replication devices. #. The ``backend_id`` and ``rdf_group_label`` values must be unique across all replication devices. Adding additional ``replication_device`` to cinder.conf: #. Open ``cinder.conf`` for editing #. If a replication device is already present, add the ``backend_id`` key with a value of ``backend_id_legacy_rep``. If this key is already defined, it's value must be updated to ``backend_id_legacy_rep``. #. Add the additional replication devices to the backend stanza. Any additional replication devices must have a ``backend_id`` key set. The value of these must ``not`` be ``backend_id_legacy_rep``. Example existing backend stanza pre-multiple replication: .. code-block:: console enabled_backends = POWERMAX_FC_REPLICATION [POWERMAX_FC_REPLICATION] volume_driver = cinder.volume.drivers.dell_emc.powermax.fc.PowerMaxFCDriver san_ip = 10.10.10.10 san_login = my_u4v_username san_password = my_u4v_password powermax_srp = SRP_1 powermax_array = 000123456789 powermax_port_groups = [OS-FC-PG] volume_backend_name = POWERMAX_FC_REPLICATION replication_device = backend_id:id, target_device_id:000197811111, remote_port_group:os-failover-pg, remote_pool:SRP_1, rdf_group_label: 28_11_07, mode:Metro, metro_use_bias:False, sync_interval:3, sync_retries:200 Example updated backend stanza: .. code-block:: console enabled_backends = POWERMAX_FC_REPLICATION [POWERMAX_FC_REPLICATION] volume_driver = cinder.volume.drivers.dell_emc.powermax.fc.PowerMaxFCDriver san_ip = 10.10.10.10 san_login = my_u4v_username san_password = my_u4v_password powermax_srp = SRP_1 powermax_array = 000123456789 powermax_port_groups = [OS-FC-PG] volume_backend_name = POWERMAX_FC_REPLICATION replication_device = backend_id:backend_id_legacy_rep target_device_id:000197811111, remote_port_group:os-failover-pg, remote_pool:SRP_1, rdf_group_label: 28_11_07, mode:Metro, metro_use_bias:False, sync_interval:3, sync_retries:200 replication_device = backend_id:sync-rep-id target_device_id:000197811111, remote_port_group:os-failover-pg, remote_pool:SRP_1, rdf_group_label: 29_12_08, mode:Synchronous, sync_interval:3, sync_retries:200 replication_device = backend_id:async-rep-id target_device_id:000197811111, remote_port_group:os-failover-pg, remote_pool:SRP_1, rdf_group_label: 30_13_09, mode:Asynchronous, sync_interval:3, sync_retries:200 .. note:: For environments without existing replication devices. The ``backend_id`` values can be set to any value for all replication devices. The ``backend_id_legacy_rep`` value is only needed when updating a legacy system with an existing replication device to use multiple replication devices. The additional replication devices defined in ``cinder.conf`` will be detected after restarting the cinder volume service. To specify which ``replication_device`` a volume type should use an additional property named ``storagetype:replication_device_backend_id`` must be added to the extra specs of the volume type. The id value assigned to the ``storagetype:replication_device_backend_id`` key in the volume type must match the ``backend_id`` assigned to the ``replication_device`` in ``cinder.conf``. .. code-block:: console # openstack volume type set \ --property storagetype:replication_device_backend_id="" \ .. note:: Specifying which replication device to use is done in addition to the basic replication setup for a volume type seen in `Configure a single replication target`_ .. note:: In a legacy system where volume types are present that were replication enabled before adding multiple replication devices, the ``storagetype:replication_device_backend_id`` should be omitted from any volume type that does/will use the legacy ``replication_device`` i.e. when ``storagetype:replication_device_backend_id`` is omitted the replication_device with a ``backend_id`` of ``backend_id_legacy_rep`` will be used. Volume replication interoperability with other features ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Most features are supported, except for the following: * Replication Group operations are available for volumes in Synchronous mode only. * The Ussuri release of OpenStack supports retyping in-use volumes to and from replication enabled volume types with limited exception of volumes with Metro replication enabled. To retype to a volume-type that is Metro enabled the volume **must** first be detached then retyped. The reason for this is so the paths from the Nova instance to the Metro R1 & R2 volumes must be initialised, this is not possible on the R2 device whilst a volume is attached. * The image volume cache functionality is supported (enabled by setting ``image_volume_cache_enabled = True``), but one of two actions must be taken when creating the cached volume: * The first boot volume created on a backend (which will trigger the cached volume to be created) should be the smallest necessary size. For example, if the minimum size disk to hold an image is 5GB, create the first boot volume as 5GB. All subsequent boot volumes are extended to the user specific size. * Alternatively, ensure that the ``allow_extend`` option in the ``replication_device parameter`` is set to ``True``. This is only applicable to VMAX-Hybrid_ arrays or All Flash array with HyperMax OS. Failover host ~~~~~~~~~~~~~ .. note:: Failover and failback operations are not applicable in Metro configurations. In the event of a disaster, or where there is required downtime, upgrade of the primary array for example, the administrator can issue the failover host command to failover to the configured target: .. code-block:: console # cinder failover-host cinder_host@POWERMAX_FC_REPLICATION After issuing ``cinder failover-host`` Cinder will set the R2 array as the target array for Cinder, however, to get existing instances to use this new array and paths to volumes it is necessary to first shelve Nova instances and then unshelve them, this will effectively restart the Nova instance and re-establish data paths between Nova instances and the volumes on the R2 array. .. code-block:: console # nova shelve # nova unshelve [--availability-zone ] When a host is in failover mode performing normal volume or snapshot provisioning will not be possible, failover host mode simply provides access to replicated volumes to minimise environment down-time. The primary objective whilst in failover mode should be to get the R1 array back online. When the primary array becomes available again, you can initiate a fail-back using the same failover command and specifying ``--backend_id default``: .. code-block:: console # cinder failover-host cinder_host@POWERMAX_FC_REPLICATION --backend_id default After issuing the failover command to revert to the default backend host it is necessary to re-issue the Nova shelve and unshelve commands to restore the data paths between Nova instances and their corresponding back end volumes. Once reverted to the default backend volume and snapshot provisioning operations can continue as normal. Failover promotion ~~~~~~~~~~~~~~~~~~ Failover promotion can be used to transfer all existing RDF enabled volumes to the R2 array and overwrite any references to the original R1 array. This can be used in the event of total R1 array failure or in other cases where an array transfer is warranted. If the R1 array is online and working and the RDF links are still enabled the failover promotion will automatically delete rdf pairs as necessary. If the R1 array or the link to the R1 array is down, a half deletepair must be issued manually for those volumes during the failover promotion. 1. Issue failover command: .. code-block:: console # cinder failover-host 2. Enable array promotion: .. code-block:: console # cinder failover-host --backend_id=pmax_failover_start_array_promotion 3. View and re-enable the cinder service .. code-block:: console # cinder service-list # cinder service-enable .. note:: With Cinder volume active/active deployment, use the following commands to view and enable the cluster as well. .. code-block:: console # cinder --os-volume-api-version 3.17 cluster-list # cinder --os-volume-api-version 3.17 cluster-enable [] 4. Remove all volumes from volume groups .. code-block:: console # cinder --os-volume-api-version 3.13 group-update --remove-volumes 5. Detach all volumes that are attached to instances .. code-block:: console # openstack server remove volume .. note:: Deleting the instance will call a detach volume for each attached volume. A terminate connection can be issued manually using the following command for volumes that are stuck in the attached state without an instance. .. code-block:: console # cinder --os-volume-api-version 3.50 attachment-delete 6. Delete all remaining instances .. code-block:: console # nova delete 7. Create new volume types New volume types must be created with references to the remote array. All new volume types must adhere to the following guidelines: .. code-block:: text 1. Uses the same workload, SLO & compression setting as the previous R1 volume type. 2. Uses the remote array instead of the primary for its pool name. 3. Uses the same volume_backend_name as the previous volume type. 4. Must not have replication enabled. Example existing volume type extra specs. .. code-block:: text pool_name='Gold+None+SRP_1+000297900330', replication_enabled=' True', storagetype:replication_device_backend_id='async-rep-1', volume_backend_name='POWERMAX_ISCSI_NONE' Example new volume type extra specs. .. code-block:: text pool_name='Gold+None+SRP_1+000197900049', volume_backend_name='POWERMAX_ISCSI_NONE' 8. Retype volumes to new volume types Additional checks will be performed during failover promotion retype to ensure workload, compression and slo settings meet the criteria specified above when creating the new volume types. .. code-block:: console # cinder retype --migration-policy on-demand .. note:: If the volumes RDF links are offline during this retype then a half deletepair must be performed manually after retype. Please reference section 8.a. below for guidance on this process. 8.a. Retype and RDF half deletepair In instances where the rdf links are offline and rdf pairs have been set to partitioned state there are additional requirements. In that scenario the following order should be adhered to: .. code-block:: text 1. Retype all Synchronous volumes. 2. Half_deletepair all Synchronous volumes using the default storage group. 3. Retype all Asynchronous volumes. 4. Half_deletepair all Asynchronous volumes using their management storage group. 5. Retype all Metro volumes. 6. Half_deletepair all Metro volumes using their management storage group. 7. Delete the Asynchronous and Metro management storage groups. .. note:: A half deletepair cannot be performed on Metro enabled volumes unless the symforce option has been enabled in the symapi options. In symapi/config/options uncomment and set 'SYMAPI_ALLOW_RDF_SYMFORCE = True'. .. code-block:: console # symrdf -sid -sg -rdfg -force -symforce half_deletepair 9. Issue failback Issuing the failback command will disable both the failover and promotion flags. Please ensure all volumes have been retyped and all replication pairs have been deleted before issuing this command. .. code-block:: console # cinder failover-host --backend_id default 10. Update cinder.conf Update the cinder.conf file to include details for the new primary array. For more information please see the Configure block storage in cinder.conf section of this documentation. 11. Restart the cinder services Restart the cinder volume service to allow it to detect the changes made to the cinder.conf file. 12. Set Metro volumes to ready state Metro volumes will be set to a Not Ready state after performing rdf pair cleanup. Set these volumes back to Ready state to allow them to be attached to instances. The U4P instance must be restarted for this change to be detected. .. code-block:: console # symdev -sid ready -devs Asynchronous and metro replication management groups ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Asynchronous and metro volumes in an RDF session, i.e. belonging to an SRDF group, must be managed together for RDF operations (although there is a ``consistency exempt`` option for creating and deleting pairs in an Async group). To facilitate this management, we create an internal RDF management storage group on the backend. This RDF management storage group will use the following naming convention: .. code-block:: text OS-[rdf_group_label]-[replication_mode]-rdf-sg It is crucial for correct management that the volumes in this storage group directly correspond to the volumes in the RDF group. For this reason, it is imperative that the RDF group specified in the ``cinder.conf`` is for the exclusive use by this Cinder backend. If there are any issues with the state of your RDF enabled volumes prior to performing additional operations in Cinder you will be notified in the Cinder volume logs. Metro support ~~~~~~~~~~~~~ SRDF/Metro is a high availability solution. It works by masking both sides of the RDF relationship to the host, and presenting all paths to the host, appearing that they all point to the one device. In order to do this, there needs to be multi-path software running to manage writing to the multiple paths. .. note:: The metro issue around formatting volumes when they are added to existing metro RDF groups has been fixed in Unisphere for PowerMax 9.1, however, it has only been addressed on arrays with PowerMax OS and will not be available on arrays running a HyperMax OS. Volume retype - storage assisted volume migration -------------------------------------------------- Volume retype with storage assisted migration is supported now for PowerMax arrays. Cinder requires that for storage assisted migration, a volume cannot be retyped across backends. For using storage assisted volume retype, follow these steps: .. note:: From the Ussuri release of OpenStack the PowerMax driver supports retyping in-use volumes to and from replication enabled volume types with limited exception of volumes with Metro replication enabled. To retype to a volume-type that is Metro enabled the volume **must** first be detached then retyped. The reason for this is so the paths from the instance to the Metro R1 & R2 volumes must be initialised, this is not possible on the R2 device whilst a volume is attached. .. note:: When multiple replication devices are configured. If retyping from one replication mode to another the R1 device ID is preserved and a new R2 side device is created. As a result, the device ID on the R2 array may be different after the retype operation has completed. .. note:: Retyping an in-use volume to a metro enabled volume type is not currently supported via storage-assisted migration. This retype can still be performed using host-assisted migration by setting the migration-policy to ``on-demand``. .. code-block:: console cinder retype --migration-policy on-demand #. For migrating a volume from one Service Level or Workload combination to another, use volume retype with the migration-policy to on-demand. The target volume type should have the same volume_backend_name configured and should have the desired pool_name to which you are trying to retype to (please refer to `Create volume types`_ for details). .. code-block:: console $ cinder retype --migration-policy on-demand Generic volume group support ---------------------------- Generic volume group operations are performed through the CLI using API version 3.1x of the Cinder API. Generic volume groups are multi-purpose groups which can be used for various features. The PowerMax driver supports consistent group snapshots and replication groups. Consistent group snapshots allows the user to take group snapshots which are consistent based on the group specs. Replication groups allow for tenant facing APIs to enable and disable replication, and to failover and failback, a group of volumes. Generic volume groups have replaced the deprecated consistency groups. Consistent group snapshot ~~~~~~~~~~~~~~~~~~~~~~~~~ To create a consistent group snapshot, set a group-spec, having the key ``consistent_group_snapshot_enabled`` set to `` True`` on the group. .. code-block:: console # cinder --os-volume-api-version 3.11 group-type-key GROUP_TYPE set consistent_group_snapshot_enabled=" True" Similarly the same key should be set on any volume type which is specified while creating the group. .. code-block:: console # openstack volume type set --property consistent_group_snapshot_enabled=" True" POWERMAX_GROUP If this key is not set on the group-spec or volume type, then the generic volume group will be created/managed by Cinder (not the PowerMax driver). .. note:: The consistent group snapshot should not be confused with the PowerMax consistency group which is an SRDF construct. Replication groups ~~~~~~~~~~~~~~~~~~ As with Consistent group snapshot ``consistent_group_snapshot_enabled`` should be set to true on the group and the volume type for replication groups. Only Synchronous replication is supported for use with Replication Groups. When a volume is created into a replication group, replication is on by default. The ``disable_replication`` api suspends I/O traffic on the devices, but does NOT remove replication for the group. The ``enable_replication`` api resumes I/O traffic on the RDF links. The ``failover_group`` api allows a group to be failed over and back without failing over the entire host. See below for usage. .. note:: A generic volume group can be both consistent group snapshot enabled and consistent group replication enabled. Storage group names ~~~~~~~~~~~~~~~~~~~ Storage groups are created on the PowerMax as a result of creation of generic volume groups. These storage groups follow a different naming convention and are of the following format depending upon whether the groups have a name. .. code-block:: text TruncatedGroupName_GroupUUID or GroupUUID Group type, group, and group snapshot operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Please refer to the official OpenStack `block-storage groups`_ documentation for the most up to date group operations Group replication operations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Generic volume group operations no longer require the user to specify the Cinder CLI version, however, performing generic volume group replication operations still require this setting. When running generic volume group commands set the value ``--os-volume-api-version`` to ``3.38``. These commands are not listed in the latest Cinder CLI documentation so will remain here until added to the latest Cinder CLI version or deprecated from Cinder. This is how to create a replication group. Please refer to the official OpenStack `block-storage groups`_ documentation for the most up to date group operations. - Make sure there is a `replication_device` for Synchronous in `cinder.conf` .. code-block:: console replication_device = backend_id:backend_id_legacy_rep,target_device_id:0001234567890,remote_port_group:PG1,remote_pool:SRP_1,rdf_group_label:os-sync,mode:Synchronous - Create a volume type with property `replication_enabled=’ True’`. .. code-block:: console $ openstack volume type create --property replication_enabled=' True' SYNC_REP_VT - Create a Generic group type with extra specs `consistent_group_snapshot_enabled=’ True’` and `consistent_group_replication_enabled=’ True’`. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-type-create GROUP_REP_VT $ cinder --os-volume-api-version 3.38 group-type-key GROUP_REP_VT set \ consistent_group_snapshot_enabled=' True' \ consistent_group_replication_enabled=' True' - Create a Generic group with synchronous volume type SYNC_REP_VT .. code-block:: console $ cinder --os-volume-api-version 3.13 group-create --name GROUP_REP GROUP_REP_VT GROUP_REP_VT - Create a volume in the Generic group .. code-block:: console $ cinder --os-volume-api-version 3.38 create --volume-type SYNC_REP_VT --group-id GROUP_REP \ --name VOL_REP_GROUP 1 - Enable group replication .. code-block:: console $ cinder --os-volume-api-version 3.38 group-enable-replication GROUP_REP - Disable group replication .. code-block:: console $ cinder --os-volume-api-version 3.38 group-disable-replication GROUP_REP - Failover group .. code-block:: console $ cinder --os-volume-api-version 3.38 group-failover-replication GROUP_REP - Failback group .. code-block:: console $ cinder --os-volume-api-version 3.38 group-failover-replication GROUP_REP \ --secondary-backend-id default Manage and unmanage Volumes --------------------------- Managing volumes in OpenStack is the process whereby a volume which exists on the storage device is imported into OpenStack to be made available for use in the OpenStack environment. For a volume to be valid for managing into OpenStack, the following prerequisites must be met: - The volume exists in a Cinder managed pool - The volume is not part of a Masking View - The volume is not part of an SRDF relationship - The volume is configured as a TDEV (thin device) - The volume is set to FBA emulation - The volume must a whole GB e.g. 5.5GB is not a valid size - The volume cannot be a SnapVX target For a volume to exist in a Cinder managed pool, it must reside in the same Storage Resource Pool (SRP) as the backend which is configured for use in OpenStack. Specifying the pool correctly can be entered manually as it follows the same format: .. code-block:: console Pool format: ++ Pool example: Diamond+SRP_1+111111111111 .. list-table:: Pool values :header-rows: 1 * - Key - Value * - ``service_level`` - The service level of the volume to be managed * - ``srp`` - The Storage Resource Pool configured for use by the backend * - ``array_id`` - The PowerMax serial number (12 digit numerical) Manage volumes ~~~~~~~~~~~~~~ With your pool name defined you can now manage the volume into OpenStack, this is possible with the CLI command ``cinder manage``. The ``bootable`` parameter is optional in the command, if the volume to be managed into OpenStack is not bootable leave this parameter out. OpenStack will also determine the size of the value when it is managed so there is no need to specify the volume size. Command format: .. code-block:: console $ cinder manage --name --volume-type \ --availability-zone <--bootable> Command Example: .. code-block:: console $ cinder manage --name powermax_managed_volume --volume-type POWERMAX_ISCSI_DIAMOND \ --availability-zone nova demo@POWERMAX_ISCSI_DIAMOND#Diamond+SRP_1+111111111111 031D8 After the above command has been run, the volume will be available for use in the same way as any other OpenStack PowerMax volume. .. note:: An unmanaged volume with a prefix of ``OS-`` in its identifier name cannot be managed into OpenStack, as this is a reserved keyword for managed volumes. If the identifier name has this prefix, an exception will be thrown by the PowerMax driver on a manage operation. Managing volumes with replication enabled ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Whilst it is not possible to manage volumes into OpenStack that are part of a SRDF relationship, it is possible to manage a volume into OpenStack and enable replication at the same time. This is done by having a replication enabled PowerMax volume type (for more information see section Volume Replication) during the manage volume process you specify the replication volume type as the chosen volume type. Once managed, replication will be enabled for that volume. .. note:: It is not possible to manage into OpenStack SnapVX linked target volumes, only volumes which are a SnapVX source are permitted. We do not want a scenario where a snapshot source can exist outside of OpenStack management. Unmanage volume ~~~~~~~~~~~~~~~ Unmanaging a volume is not the same as deleting a volume. When a volume is deleted from OpenStack, it is also deleted from the PowerMax at the same time. Unmanaging a volume is the process whereby a volume is removed from OpenStack but it remains for further use on the PowerMax. The volume can also be managed back into OpenStack at a later date using the process discussed in the previous section. Unmanaging volume is carried out using the Cinder unmanage CLI command: Command format: .. code-block:: console $ cinder unmanage Command example: .. code-block:: console $ cinder unmanage powermax_test_vol Once unmanaged from OpenStack, the volume can still be retrieved using its device ID or OpenStack volume ID. Within Unisphere you will also notice that the ``OS-`` prefix has been removed, this is another visual indication that the volume is no longer managed by OpenStack. Manage/unmanage snapshots ------------------------- Users can manage PowerMax SnapVX snapshots into OpenStack if the source volume already exists in Cinder. Similarly, users will be able to unmanage OpenStack snapshots to remove them from Cinder but keep them on the storage backend. Set-up, restrictions and requirements: #. No additional settings or configuration is required to support this functionality. #. Manage/Unmanage snapshots requires SnapVX functionality support on PowerMax. #. Manage/Unmanage Snapshots in OpenStack Cinder is only supported at present through Cinder CLI commands. #. It is only possible to manage or unmanage one snapshot at a time in Cinder. Manage SnapVX snapshot ~~~~~~~~~~~~~~~~~~~~~~ It is possible to manage PowerMax SnapVX snapshots into OpenStack, where the source volume from which the snapshot is taken already exists in, and is managed by OpenStack Cinder. The source volume may have been created in OpenStack Cinder, or it may have been managed in to OpenStack Cinder also. With the support of managing SnapVX snapshots included in OpenStack Queens, the restriction around managing SnapVX source volumes has been removed. .. note:: It is not possible to manage into OpenStack SnapVX linked target volumes, only volumes which are a SnapVX source are permitted. We do not want a scenario where a snapshot source can exist outside of OpenStack management. Requirements/restrictions: #. The SnapVX source volume must be present in and managed by Cinder. #. The SnapVX snapshot name must not begin with ``OS-``. #. The SnapVX snapshot source volume must not be in a failed-over state. #. Managing a SnapVX snapshot will only be allowed if the snapshot has no linked target volumes. Command structure: #. Identify your SnapVX snapshot for management on the PowerMax, note the name. #. Ensure the source volume is already managed into OpenStack Cinder, note the device ID. #. Using the Cinder CLI, use the following command structure to manage a Snapshot into OpenStack Cinder: .. code-block:: console $ cinder snapshot-manage --id-type source-name [--name ] [--description ] [--metadata [ [ ...]]] Positional arguments: - ```` Source OpenStack volume name - ```` Name of existing snapshot on PowerMax backend Optional arguments: - ``--name `` Snapshot name (Default=``None``) - ``--description `` Snapshot description (Default=``None``) - ``--metadata [ [ ...]]`` Metadata ``key=value`` pairs (Default=``None``) Example: .. code-block:: console $ cinder snapshot-manage --name SnapshotManaged \ --description "Managed Queens Feb18" \ powermax-vol-1 PowerMaxSnapshot Where: - The name in OpenStack after managing the SnapVX snapshot will be ``SnapshotManaged``. - The snapshot will have the description ``Managed Queens Feb18``. - The Cinder volume name is ``powermax-vol-1``. - The name of the SnapVX snapshot on the PowerMax backend is ``PowerMaxSnapshot``. Outcome: After the process of managing the Snapshot has completed, the SnapVX snapshot on the PowerMax backend will be prefixed by the letters ``OS-``, leaving the snapshot in this example named ``OS-PowerMaxSnapshot``. The associated snapshot managed by Cinder will be present for use under the name ``SnapshotManaged``. Unmanage cinder snapshot ~~~~~~~~~~~~~~~~~~~~~~~~ Unmanaging a snapshot in Cinder is the process whereby the snapshot is removed from and no longer managed by Cinder, but it still exists on the storage backend. Unmanaging a SnapVX snapshot in OpenStack Cinder follows this behaviour, whereby after unmanaging a PowerMax SnapVX snapshot from Cinder, the snapshot is removed from OpenStack but is still present for use on the PowerMax backend. Requirements/Restrictions: - The SnapVX source volume must not be in a failed over state. Command Structure: Identify the SnapVX snapshot you want to unmanage from OpenStack Cinder, note the snapshot name or ID as specified by Cinder. Using the Cinder CLI use the following command structure to unmanage the SnapVX snapshot from Cinder: .. code-block:: console $ cinder snapshot-unmanage Positional arguments: - ```` Cinder snapshot name or ID. Example: .. code-block:: console $ cinder snapshot-unmanage SnapshotManaged Where: - The SnapVX snapshot name in OpenStack Cinder is SnapshotManaged. After the process of unmanaging the SnapVX snapshot in Cinder, the snapshot on the PowerMax backend will have the ``OS-`` prefix removed to indicate it is no longer OpenStack managed. In the example above, the snapshot after unmanaging from OpenStack will be named ``PowerMaxSnapshot`` on the storage backend. List manageable volumes and snapshots ------------------------------------- Manageable volumes ~~~~~~~~~~~~~~~~~~ Volumes that can be managed by and imported into Openstack. List manageable volume is filtered by: - Volume size should be 1026MB or greater (1GB PowerMax Cinder Vol = 1026 MB) - Volume size should be a whole integer GB capacity - Volume should not be a part of masking view. - Volume status should be ``Ready`` - Volume service state should be ``Normal`` - Volume emulation type should be ``FBA`` - Volume configuration should be ``TDEV`` - Volume should not be a system resource. - Volume should not be ``private`` - Volume should not be ``encapsulated`` - Volume should not be ``reserved`` - Volume should not be a part of an RDF session - Volume should not be a SnapVX Target - Volume identifier should not begin with ``OS-``. - Volume should not be in more than one storage group. Manageable snaphots ~~~~~~~~~~~~~~~~~~~ Snapshots that can be managed by and imported into Openstack List manageable snapshots is filtered by: - The source volume should be marked as SnapVX source. - The source volume should be 1026MB or greater - The source volume should be a whole integer GB capacity. - The source volume emulation type should be ``FBA``. - The source volume configuration should be ``TDEV``. - The source volume should not be ``private``. - The source volume should be not be a system resource. - The snapshot identifier should not start with ``OS-`` or ``temp-``. - The snapshot should not be expired. - The snapshot generation number should npt be greater than 0. .. note:: There is some delay in the syncing of the Unisphere for PowerMax database when the state/properties of a volume is modified using ``symcli``. To prevent this it is preferable to modify state/properties of volumes within Unisphere. Cinder backup support --------------------- PowerMax Cinder driver support Cinder backup functionality. For further information on setup, configuration and usage please see the official OpenStack `volume backup`_ documentation and related `volume backup CLI`_ guide. .. note:: ``rpc_response_timeout`` may need to be increased significantly in volume backup operations especially in replication scenarios where the creation operation will be longer. For more information on ``rpc_response_timeout`` please refer to `Live migration configuration`_ Port group & port load balancing -------------------------------- By default port groups are selected at random from ``cinder.conf`` when connections are initialised between volumes on the backend array and compute instances in Nova. If a port group is set in the volume type extra specifications this will take precedence over any port groups configured in ``cinder.conf``. Port selection within the chosen port group is also selected at random by default. With port group and port load balancing in the PowerMax for Cinder driver users can now select the port group and port load by determining which has the lowest load. The load metric is defined by the user in both instances so the selection process can better match the needs of the user and their environment. Available metrics are detailed in the ``performance metrics`` section. Port Groups are reported on at five minute time deltas (diagnostic), and FE Ports are reported on at one minute time deltas (real-time) if real-time metrics are enabled, else default five minute time delta (diagnostic). The window at which performance metrics are analysed is a user-configured option in ``cinder.conf``, this is detailed in the ``configuration`` section. Calculating load ~~~~~~~~~~~~~~~~ The process by which Port Group or Port load is calculated is the same for both. The user specifies the look back window which determines how many performance intervals to measure, 60 minutes will give 12 intervals of 5 minutes each for example. If no lookback window is specified or is set to 0 only the most recent performance metric will be analysed. This will give a slight performance improvement but with the improvements made to the performance REST endpoints for load this improvement is negligible. For real-time stats a minimum of 1 minute is required. Once a call is made to the performance REST endpoints, the performance data for that PG or port is extracted. Then the metric values are summed and divided by the count of intervals to get the average for the look back window. The performance metric average value for each asset is added to a Python heap. Once all assets have been measured the lowest value will always be at position 0 in the heap so there is no extra time penalty requirement for search. Pre-requisites ~~~~~~~~~~~~~~ Before load balancing can be enabled in the PowerMax for Cinder driver performance metrics collection must be enabled in Unisphere. Real-time performance metrics collection is enabled separately from diagnostic metrics collection. Performance metric collection is only available for local arrays in Unisphere. After performance metrics registration there is a time delay before Unisphere records performance metrics, adequate time must be given before enabling load balancing in Cinder else default random selection method will be used. It is recommended to wait 4 hours after performance registration before enabling load balancing in Cinder. Configuration ~~~~~~~~~~~~~ A number of configuration options are available for users so load balancing can be set to better suit the needs of the environment. These configuration options are detailed in the table below. .. table:: Load balance cinder.conf configuration options +-----------------------------+----------------+-----------------+----------------------------------------+ | ``cinder.conf parameter`` | options | Default | Description | +=============================+================+=================+========================================+ | ``load_balance`` | ``True/False`` | ``False`` | | Enable/disable load balancing for | | | | | | a PowerMax backend. | +-----------------------------+----------------+-----------------+----------------------------------------+ | ``load_balance_real_time`` | ``True/False`` | ``False`` | | Enable/disable real-time performance | | | | | | metrics for Port level metrics | | | | | | (not available for Port Group). | +-----------------------------+----------------+-----------------+----------------------------------------+ | ``load_data_format`` | ``Avg/Max`` | ``Avg`` | | Performance data format, not | | | | | | applicable for real-time. | +-----------------------------+----------------+-----------------+----------------------------------------+ | ``load_lookback`` | ``int`` | ``60`` | | How far in minutes to look back for | | | | | | diagnostic performance metrics in | | | | | | load calculation, minimum of 0 | | | | | | maximum of 1440 (24 hours). | +-----------------------------+----------------+-----------------+----------------------------------------+ | ``load_real_time_lookback`` | ``int`` | ``1`` | | How far in minutes to look back for | | | | | | real-time performance metrics in | | | | | | load calculation, minimum of 1 | | | | | | maximum of 60 (24 hours). | +-----------------------------+----------------+-----------------+----------------------------------------+ | ``snapvx_unlink_symforce`` | ``True/False`` | ``False`` | | Enable/disable symforce | | | | | | for SnapVx unlink. | +-----------------------------+----------------+-----------------+----------------------------------------+ | ``port_group_load_metric`` | See below | ``PercentBusy`` | | Metric used for port group load | | | | | | calculation. | +-----------------------------+----------------+-----------------+----------------------------------------+ | ``port_load_metric`` | See below | ``PercentBusy`` | | Metric used for port group load | | | | | | calculation. | +-----------------------------+----------------+-----------------+----------------------------------------+ Port-Group Metrics ~~~~~~~~~~~~~~~~~~ .. table:: Port-group performance metrics +-------------------+--------------------+-----------------------------------------------------------+ | Metric | cinder.conf option | Description | +===================+====================+===========================================================+ | % Busy | ``PercentBusy`` | The percent of time the port group is busy. | +-------------------+--------------------+-----------------------------------------------------------+ | Avg IO Size (KB) | ``AvgIOSize`` | | Calculated value: (HA Kbytes transferred per sec / | | | | | total IOs per sec) | +-------------------+--------------------+-----------------------------------------------------------+ | Host IOs/sec | ``IOs`` | | The number of host IO operations performed each second, | | | | | including writes and random and sequential reads. | +-------------------+--------------------+-----------------------------------------------------------+ | Host MBs/sec | ``MBs`` | The number of host MBs read each second. | +-------------------+--------------------+-----------------------------------------------------------+ | MBs Read/sec | ``MBRead`` | The number of reads per second in MBs. | +-------------------+--------------------+-----------------------------------------------------------+ | MBs Written/sec | ``MBWritten`` | The number of writes per second in MBs. | +-------------------+--------------------+-----------------------------------------------------------+ | Reads/sec | ``Reads`` | The average number of host reads performed per second. | +-------------------+--------------------+-----------------------------------------------------------+ | Writes/sec | ``Writes`` | The average number of host writes performed per second. | +-------------------+--------------------+-----------------------------------------------------------+ Port Metrics ~~~~~~~~~~~~ .. table:: Port performance metrics +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | Metric | cinder.conf option | Real-Time Supported | Description | +=====================+=======================+=====================+============================================================+ | % Busy | ``PercentBusy`` | Yes | The percent of time the port is busy. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | Avg IO Size (KB) | ``AvgIOSize`` | Yes | | Calculated value: (HA Kbytes transferred per sec / | | | | | | total IOs per sec) | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | Host IOs/sec | ``IOs`` | Yes | | The number of host IO operations performed each second, | | | | | | including writes and random and sequential reads. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | Host MBs/sec | ``MBs`` | Yes | The number of host MBs read each second. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | MBs Read/sec | ``MBRead`` | Yes | The number of reads per second in MBs. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | MBs Written/sec | ``MBWritten`` | Yes | The number of writes per second in MBs. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | Reads/sec | ``Reads`` | Yes | The number of read operations performed by the port per | | | | | second. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | Writes/sec | ``Writes`` | Yes | The number of write operations performed each second by | | | | | the port. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | Speed Gb/sec | ``SpeedGBs`` | No | Speed. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | Response Time (ms) | ``ResponseTime`` | No | The average response time for the reads and writes. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | Read RT (ms) | ``ReadResponseTime`` | No | The average time it takes to serve one read IO. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ | Write RT (ms) | ``WriteResponseTime`` | No | The average time it takes to serve one write IO. | +---------------------+-----------------------+---------------------+------------------------------------------------------------+ Upgrading from SMI-S based driver to REST API based driver ========================================================== Seamless upgrades from an SMI-S based driver to REST API based driver, following the setup instructions above, are supported with a few exceptions: #. Seamless upgrade from SMI-S(Ocata and earlier) to REST(Pike and later) is now available on all functionality including Live Migration. #. Consistency groups are deprecated in Pike. Generic Volume Groups are supported from Pike onwards. Known issues ============ These known issues exist in the current release of OpenStack: - `Launchpad #1951977`_ Cannot create backups for metro volumes with multipath enabled. .. Document Hyperlinks .. _Dell Support: https://www.dell.com/support .. _Openstack CLI: https://docs.openstack.org/cinder/latest/cli/cli-manage-volumes.html#volume-types .. _over-subscription documentation: https://docs.openstack.org/cinder/latest/admin/over-subscription.html .. _configuring migrations: https://docs.openstack.org/nova/latest/admin/configuring-migrations.html .. _live migration usage: https://docs.openstack.org/nova/latest/admin/live-migration-usage.html .. _Ubuntu NFS Documentation: https://help.ubuntu.com/lts/serverguide/network-file-system.html .. _multi-attach documentation: https://docs.openstack.org/cinder/latest/admin/volume-multiattach.html .. _Data at Rest Encryption: https://www.dellemc.com/resources/en-us/asset/white-papers/products/storage/h13936-dell-emc-powermax-vmax-all-flash-data-rest-encryption.pdf .. _official Barbican documentation: https://docs.openstack.org/cinder/latest/configuration/block-storage/volume-encryption.html .. _SRDF Metro Overview & Best Practices: https://www.emc.com/collateral/technical-documentation/h14556-vmax3-srdf-metro-overview-and-best-practices-tech-note.pdf .. _block-storage groups: https://docs.openstack.org/cinder/latest/admin/groups.html .. _volume backup: https://docs.openstack.org/cinder/latest/configuration/block-storage/backup-drivers.html .. _volume backup CLI: https://docs.openstack.org/python-openstackclient/latest/cli/command-objects/volume-backup.html .. _PyU4V: https://pyu4v.readthedocs.io/en/latest/ .. _Create volume types: `6. Create volume types`_ .. _Launchpad #1951977: https://bugs.launchpad.net/cinder/+bug/1951977 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/dell-emc-powerstore-driver.rst0000664000175000017500000002347500000000000032144 0ustar00zuulzuul00000000000000========================== Dell PowerStore driver ========================== This section explains how to configure and connect the block storage nodes to an PowerStore storage cluster. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach and detach volumes. - Create, delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Get volume statistics. - Attach a volume to multiple servers simultaneously (multiattach). - Revert a volume to a snapshot. - OpenStack replication v2.1 support. - Create, delete, update Consistency Groups. - Create, delete Consistency Groups snapshots. - Clone a Consistency Group. - Create a Consistency Group from a Consistency Group snapshot. - Quality of Service (QoS) - Cinder volume active/active support. Driver configuration ~~~~~~~~~~~~~~~~~~~~ Add the following content into ``/etc/cinder/cinder.conf``: .. code-block:: ini [DEFAULT] enabled_backends = powerstore [powerstore] # PowerStore REST IP san_ip = # PowerStore REST username and password san_login = san_password = # Storage protocol storage_protocol = # FC or iSCSI # Volume driver name volume_driver = cinder.volume.drivers.dell_emc.powerstore.driver.PowerStoreDriver # Backend name volume_backend_name = # PowerStore allowed ports powerstore_ports = # Ex. 58:cc:f0:98:49:22:07:02,58:cc:f0:98:49:23:07:02 Driver configuration to use NVMe-OF ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NVMe-OF support was added in PowerStore starting from version 2.1. .. note:: Currently the driver supports only NVMe over TCP. To configure NVMe-OF driver add the following content into ``/etc/cinder/cinder.conf``: .. code-block:: ini [DEFAULT] enabled_backends = powerstore [powerstore] # PowerStore REST IP san_ip = # PowerStore REST username and password san_login = san_password = # Volume driver name volume_driver = cinder.volume.drivers.dell_emc.powerstore.driver.PowerStoreDriver # Backend name volume_backend_name = powerstore_nvme = True Driver options ~~~~~~~~~~~~~~ The driver supports the following configuration options: .. config-table:: :config-target: PowerStore cinder.volume.drivers.dell_emc.powerstore.driver SSL support ~~~~~~~~~~~ To enable the SSL certificate verification, modify the following options in the ``cinder.conf`` file: .. code-block:: ini driver_ssl_cert_verify = True driver_ssl_cert_path = By default, the SSL certificate validation is disabled. If the ``driver_ssl_cert_path`` option is omitted, the system default CA will be used. Image Volume Caching support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The image volume cache functionality is supported. To enable it, modify the following options in the ``cinder.conf`` file: .. code-block:: ini image_volume_cache_enabled = True By default, Image Volume Caching is disabled. Thin provisioning and compression ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver creates thin provisioned compressed volumes by default. Thick provisioning is not supported. CHAP authentication support ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver supports one-way (Single mode) CHAP authentication. To use CHAP authentication CHAP Single mode has to be enabled on the storage side. .. note:: When enabling CHAP, any previously added hosts will need to be updated with CHAP configuration since there will be I/O disruption for those hosts. It is recommended that before adding hosts to the cluster, decide what type of CHAP configuration is required, if any. CHAP configuration is retrieved from the storage during driver initialization, no additional configuration is needed. Secrets are generated automatically. Replication support ~~~~~~~~~~~~~~~~~~~ Configure replication ^^^^^^^^^^^^^^^^^^^^^ #. Pair source and destination PowerStore systems. #. Create Protection policy and Replication rule with desired RPO. #. Enable replication in ``cinder.conf`` file. To enable replication feature for storage backend set ``replication_device`` as below: .. code-block:: ini ... replication_device = backend_id:powerstore_repl_1, san_ip: , san_login: , san_password: * Only one replication device is supported for storage backend. * Replication device supports the same options as the main storage backend. #. Create volume type for volumes with replication enabled. .. code-block:: console $ openstack volume type create powerstore_replicated $ openstack volume type set --property replication_enabled=' True' powerstore_replicated #. Set Protection policy name for volume type. .. code-block:: console $ openstack volume type set --property powerstore:protection_policy= \ powerstore_replicated Failover host ^^^^^^^^^^^^^ In the event of a disaster, or where there is a required downtime the administrator can issue the failover host command: .. code-block:: console $ cinder failover-host cinder_host@powerstore --backend_id powerstore_repl_1 After issuing Cinder failover-host command Cinder will switch to configured replication device, however to get existing instances to use this target and new paths to volumes it is necessary to first shelve Nova instances and then unshelve them, this will effectively restart the Nova instance and re-establish data paths between Nova instances and the volumes. .. code-block:: console $ nova shelve $ nova unshelve [--availability-zone ] If the primary system becomes available, the administrator can initiate failback operation using ``--backend_id default``: .. code-block:: console $ cinder failover-host cinder_host@powerstore --backend_id default Consistency Groups support ~~~~~~~~~~~~~~~~~~~~~~~~~~ To use PowerStore Volume Groups create Group Type with consistent group snapshot enabled. .. code-block:: console $ cinder --os-volume-api-version 3.11 group-type-create powerstore_vg $ cinder --os-volume-api-version 3.11 group-type-key powerstore_vg set consistent_group_snapshot_enabled=" True" .. note:: Currently driver does not support Consistency Groups replication. Adding volume to Consistency Group and creating volume in Consistency Group will fail if volume is replicated. QoS (Quality of Service) support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: QoS is supported in PowerStore version 4.0 or later. The PowerStore driver supports Quality of Service (QoS) by enabling the following capabilities: ``bandwidth_limit_type`` The QoS bandwidth limit type. This type setting determines how the max_iops and max_bw attributes are used. This has the following two values: 1. ``Absolute`` - Limits are absolute values specified, either I/O operations per second or bandwidth. 2. ``Density`` - Limits specified are per GB, e.g. I/O operations per second per GB. .. note:: This (bandwidth_limit_type) property is mandatory when creating QoS. ``max_iops`` Maximum I/O operations in either I/O operations per second (IOPS) or I/O operations per second per GB. The specification of the type attribute determines which metric is used. If type is set to absolute, max_iops is specified in IOPS. If type is set to density, max_iops is specified in IOPS per GB. If both max_iops and max_bw are specified, the system will limit I/O if either value is exceeded. The value must be within the range of 1 to 2147483646. ``max_bw`` Maximum I/O bandwidth measured in either Kilobytes per second or Kilobytes per second / per GB. The specification of the type attribute determines which measurement is used. If type is set to absolute, max_bw is specified in Kilobytes per second. If type is set to density max_bw is specified in Kilobytes per second / per GB. If both max_iops and max_bw are specified, the system will limit I/O if either value is exceeded. The value must be within the range of 2000 to 2147483646. ``burst_percentage`` Percentage indicating by how much the limit may be exceeded. If I/O normally runs below the specified limit, then the volume or volume_group will accumulate burst credits that can be used to exceed the limit for a short period (a few seconds, but will not exceed the burst limit). This burst percentage applies to both max_iops and max_bw and is independent of the type setting. The value must be within the range of 0 to 100. If this property is not specified during QoS creation, a default value of 0 will be used. .. note:: When creating QoS, you must define either ``max_iops`` or ``max_bw``, or you can define both. .. code-block:: console $ openstack volume qos create --consumer back-end --property max_iops=100 --property max_bw=50000 --property bandwidth_limit_type=Absolute --property burst_percentage=80 powerstore_qos $ openstack volume type create --property volume_backend_name=powerstore powerstore $ openstack volume qos associate powerstore_qos powerstore .. note:: There are two approaches for updating QoS properties in PowerStore: #. ``Retype the Volume``: This involves retyping the volume with the different QoS settings and migrating the volume to the new type. #. ``Modify Existing QoS Properties`` (Recommended): This method entails changing the existing QoS properties and creating a new instance or image volume to update the QoS policy in PowerStore. This will also update the QoS properties of existing attached volumes, created with the same volume type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/dell-emc-powerstore-nfs.rst0000664000175000017500000000330300000000000031423 0ustar00zuulzuul00000000000000========================== Dell PowerStore NFS Driver ========================== PowerStore NFS driver enables storing Block Storage service volumes on a PowerStore storage back end. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach and detach volumes. - Create, delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Get volume statistics. - Attach a volume to multiple servers simultaneously (multiattach). - Revert a volume to a snapshot. Driver configuration ~~~~~~~~~~~~~~~~~~~~ Add the following content into ``/etc/cinder/cinder.conf``: .. code-block:: ini [DEFAULT] enabled_backends = powerstore-nfs [powerstore-nfs] volume_driver = cinder.volume.drivers.dell_emc.powerstore.nfs.PowerStoreNFSDriver nfs_qcow2_volumes = True nfs_snapshot_support = True nfs_sparsed_volumes = False nas_host = nas_share_path = /nfs-export nas_secure_file_operations = False nas_secure_file_permissions = False volume_backend_name = powerstore-nfs Dell PowerStore NFS Copy Offload API ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A feature for effective creation of a volume from snapshot/volume was added in PowerStore NFS Driver. The dellfcopy utility provides the ability to copy a file very quickly on a Dell SDNAS filesystem mounted by a client. To download it, contact your local Dell representative. The dellfcopy tool is used in the following operations: - Create a volume from a snapshot. - Clone a volume. To use PowerStore NFS driver with this feature, you must install the tool with the following command: .. code-block:: console # sudo dpkg -i ./dellfcopy_1.3-1_amd64.deb ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/dell-emc-powervault-me.rst0000664000175000017500000001344100000000000031241 0ustar00zuulzuul00000000000000============================================================== Dell PowerVault ME Series Fibre Channel and iSCSI drivers ============================================================== The ``PVMEFCDriver`` and ``PVMEISCSIDriver`` Cinder drivers allow the Dell PowerVault ME Series storage arrays to be used for Block Storage in OpenStack deployments. System requirements ~~~~~~~~~~~~~~~~~~~ To use the PowerVault ME Series drivers, the following are required: - PowerVault ME5 Series storage array with I200 or later firmware, or - PowerVault ME4 Series storage array with G280 or later firmware - iSCSI or Fibre Channel host interfaces - Network connectivity between the OpenStack hosts and the array's embedded management interface - The HTTPS protocol must be enabled on the array Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Migrate a volume with back-end assistance. - Retype a volume. - Manage and unmanage a volume. Configuring the array ~~~~~~~~~~~~~~~~~~~~~ #. Verify that the array can be managed via an HTTPS connection. HTTP can also be used if ``driver_use_ssl`` is set to False in the ``cinder.conf`` file. Confirm that virtual pools A and B are already present on the array. If they are missing, create them. #. Edit the ``cinder.conf`` file to define a storage back-end entry for each storage pool on the array that will be managed by OpenStack. Each entry consists of a unique section name, surrounded by square brackets, followed by options specified in a ``key=value`` format. * The ``pvme_pool_name`` value specifies the name of the storage pool or vdisk on the array. * The ``volume_backend_name`` option value can be a unique value, if you wish to be able to assign volumes to a specific storage pool on the array, or a name that is shared among multiple storage pools to let the volume scheduler choose where new volumes are allocated. #. The following ``cinder.conf`` options generally have identical values for each backend section on the array: * ``volume_driver`` specifies the Cinder driver name. * ``san_ip`` specifies the IP addresses or host names of the array's management controllers. * ``san_login`` and ``san_password`` specify the username and password of an array user account with ``manage`` privileges * ``driver_use_ssl`` must be set to True to enable use of the HTTPS protocol. * ``pvme_iscsi_ips`` specifies the iSCSI IP addresses for the array if using the iSCSI transport protocol In the examples below, two back ends are defined, one for pool A and one for pool B, and a common ``volume_backend_name`` is used so that a single volume type definition can be used to allocate volumes from both pools. **iSCSI example back-end entries** .. code-block:: ini [pool-a] pvme_pool_name = A volume_backend_name = pvme-array volume_driver = cinder.volume.drivers.dell_emc.powervault.iscsi.PVMEISCSIDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage pvme_iscsi_ips = 10.2.3.4,10.2.3.5 driver_use_ssl = true [pool-b] pvme_pool_name = B volume_backend_name = pvme-array volume_driver = cinder.volume.drivers.dell_emc.powervault.iscsi.PVMEISCSIDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage pvme_iscsi_ips = 10.2.3.4,10.2.3.5 driver_use_ssl = true **Fibre Channel example back-end entries** .. code-block:: ini [pool-a] pvme_pool_name = A volume_backend_name = pvme-array volume_driver = cinder.volume.drivers.dell_emc.powervault.fc.PVMEFCDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage driver_use_ssl = true [pool-b] pvme_pool_name = B volume_backend_name = pvme-array volume_driver = cinder.volume.drivers.dell_emc.powervault.fc.PVMEFCDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage driver_use_ssl = true #. If HTTPS is enabled, you can enable certificate verification with the option ``driver_ssl_cert_verify = True``. You may also use the ``driver_ssl_cert_path`` parameter to specify the path to a CA\_BUNDLE file containing CAs other than those in the default list. #. Modify the ``[DEFAULT]`` section of the ``cinder.conf`` file to add an ``enabled_backends`` parameter specifying the backend entries you added, and a ``default_volume_type`` parameter specifying the name of a volume type that you will create in the next step. **Example of [DEFAULT] section changes** .. code-block:: ini [DEFAULT] enabled_backends = pool-a,pool-b default_volume_type = pvme #. Create a new volume type for each distinct ``volume_backend_name`` value that you added in the ``cinder.conf`` file. The example below assumes that the same ``volume_backend_name=pvme-array`` option was specified in all of the entries, and specifies that the volume type ``pvme`` can be used to allocate volumes from any of them. **Example of creating a volume type** .. code-block:: console $ openstack volume type create pvme $ openstack volume type set --property volume_backend_name=pvme-array pvme #. After modifying the ``cinder.conf`` file, restart the ``cinder-volume`` service. Driver-specific options ~~~~~~~~~~~~~~~~~~~~~~~ The following table contains the configuration options that are specific to the PowerVault ME Series drivers. .. config-table:: :config-target: PowerVault ME Series cinder.volume.drivers.dell_emc.powervault.common ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst0000664000175000017500000005210400000000000031072 0ustar00zuulzuul00000000000000===================== Dell Unity driver ===================== Unity driver has been integrated in the OpenStack Block Storage project since the Ocata release. The driver is built on the top of Block Storage framework and a Dell distributed Python package `storops `_. Prerequisites ~~~~~~~~~~~~~ +-------------------+-----------------+ | Software | Version | +===================+=================+ | Unity OE | 4.1.X or newer | +-------------------+-----------------+ | storops | 1.2.3 or newer | +-------------------+-----------------+ Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create, delete, attach, and detach compressed volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Create an image from a volume. - Clone a volume. - Extend a volume. - Migrate a volume. - Get volume statistics. - Efficient non-disruptive volume backup. - Revert a volume to a snapshot. - Create thick volumes. - Create volume with tiering policy. - Create and delete consistent groups. - Add/remove volumes to/from a consistent group. - Create and delete consistent group snapshots. - Clone a consistent group. - Create a consistent group from a snapshot. - Attach a volume to multiple servers simultaneously (multiattach). - Volume replications. - Consistency group replications. Driver configuration ~~~~~~~~~~~~~~~~~~~~ .. note:: The following instructions should all be performed on cinder-volume container. #. Install `storops` from pypi: .. code-block:: console # pip install storops #. Add the following content into ``/etc/cinder/cinder.conf``: .. code-block:: ini [DEFAULT] enabled_backends = unity [unity] # Storage protocol storage_protocol = iSCSI # Unisphere IP san_ip = # Unisphere username and password san_login = san_password = # Volume driver name volume_driver = cinder.volume.drivers.dell_emc.unity.Driver # backend's name volume_backend_name = Storage_ISCSI_01 .. note:: These are minimal options for Unity driver, for more options, see `Driver options`_. .. note:: (**Optional**) If you require multipath based data access, perform below steps on both Block Storage and Compute nodes. #. Install ``sysfsutils``, ``sg3-utils`` and ``multipath-tools``: .. code-block:: console # apt-get install multipath-tools sg3-utils sysfsutils #. (Required for FC driver in case `Auto-zoning Support`_ is disabled) Zone the FC ports of Compute nodes with Unity FC target ports. #. Enable Unity storage optimized multipath configuration: Add the following content into ``/etc/multipath.conf`` .. code-block:: vim blacklist { # Skip the files uner /dev that are definitely not FC/iSCSI devices # Different system may need different customization devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" devnode "^hd[a-z][0-9]*" devnode "^cciss!c[0-9]d[0-9]*[p[0-9]*]" # Skip LUNZ device from VNX/Unity device { vendor "DGC" product "LUNZ" } } defaults { user_friendly_names no flush_on_last_del yes } devices { # Device attributed for EMC CLARiiON and VNX/Unity series ALUA device { vendor "DGC" product ".*" product_blacklist "LUNZ" path_grouping_policy group_by_prio path_selector "round-robin 0" path_checker emc_clariion features "0" no_path_retry 12 hardware_handler "1 alua" prio alua failback immediate } } #. Restart the multipath service: .. code-block:: console # service multipath-tools restart #. Enable multipath for image transfer in ``/etc/cinder/cinder.conf`` for each backend or in ``[backend_defaults]`` section as a common configuration for all backends. .. code-block:: ini use_multipath_for_image_xfer = True Restart the ``cinder-volume`` service to load the change. #. Enable multipath for volume attache/detach in ``/etc/nova/nova.conf``. .. code-block:: ini [libvirt] ... volume_use_multipath = True ... #. Restart the ``nova-compute`` service. Driver options ~~~~~~~~~~~~~~ .. config-table:: :config-target: Unity cinder.volume.drivers.dell_emc.unity.driver FC or iSCSI ports option ------------------------ Specify the list of FC or iSCSI ports to be used to perform the IO. Wild card character is supported. For iSCSI ports, use the following format: .. code-block:: ini unity_io_ports = spa_eth2, spb_eth2, *_eth3 For FC ports, use the following format: .. code-block:: ini unity_io_ports = spa_iom_0_fc0, spb_iom_0_fc0, *_iom_0_fc1 List the port ID with the :command:`uemcli` command: .. code-block:: console $ uemcli /net/port/eth show -output csv ... "spa_eth2","SP A Ethernet Port 2","spa","file, net, iscsi", ... "spb_eth2","SP B Ethernet Port 2","spb","file, net, iscsi", ... ... $ uemcli /net/port/fc show -output csv ... "spa_iom_0_fc0","SP A I/O Module 0 FC Port 0","spa", ... "spb_iom_0_fc0","SP B I/O Module 0 FC Port 0","spb", ... ... Live migration integration ~~~~~~~~~~~~~~~~~~~~~~~~~~ It is suggested to have multipath configured on Compute nodes for robust data access in VM instances live migration scenario. Once ``user_friendly_names no`` is set in defaults section of ``/etc/multipath.conf``, Compute nodes will use the WWID as the alias for the multipath devices. To enable multipath in live migration: .. note:: Make sure `Driver configuration`_ steps are performed before following steps. #. Set multipath in ``/etc/nova/nova.conf``: .. code-block:: ini [libvirt] ... volume_use_multipath = True ... Restart `nova-compute` service. #. Set ``user_friendly_names no`` in ``/etc/multipath.conf`` .. code-block:: text ... defaults { user_friendly_names no } ... #. Restart the ``multipath-tools`` service. Thin and thick provisioning ~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default, the volume created by Unity driver is thin provisioned. Run the following commands to create a thick volume. .. code-block:: console # openstack volume type create --property provisioning:type=thick \ --property thick_provisioning_support=' True' thick_volume_type # openstack volume create --type thick_volume_type thick_volume Compressed volume support ~~~~~~~~~~~~~~~~~~~~~~~~~ Unity driver supports ``compressed volume`` creation, modification and deletion. In order to create a compressed volume, a volume type which enables compression support needs to be created first: .. code-block:: console $ openstack volume type create CompressedVolumeType $ openstack volume type set --property provisioning:type=compressed --property compression_support=' True' CompressedVolumeType Then create volume and specify the new created volume type. .. note:: In Unity, only All-Flash pools support compressed volume, for the other type of pools, "'compression_support': False" will be returned when getting pool stats. Storage-assisted volume migration support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Unity driver supports storage-assisted volume migration, when the user starts migrating with ``cinder migrate --force-host-copy False `` or ``cinder migrate ``, cinder will try to leverage the Unity's native volume migration functionality. If Unity fails to migrate the volume, host-assisted migration will be triggered. In the following scenarios, Unity storage-assisted volume migration will not be triggered. Instead, host-assisted volume migration will be triggered: - Volume is to be migrated across backends. - Migration of cloned volume. For example, if vol_2 was cloned from vol_1, the storage-assisted volume migration of vol_2 will not be triggered. Retype volume support ~~~~~~~~~~~~~~~~~~~~~ Unity driver supports to change a volume's type after its creation. .. code-block:: console $ cinder retype [--migration-policy ] The --migration-policy is not enabled by default. Some retype operations will require migration based on back-end support. In these cases, the storage-assisted migration will be triggered regardless the --migration-policy. For examples: retype between 'thin' and 'thick', retype between 'thick' and 'compressed', retype to type(s) current host doesn't support. QoS support ~~~~~~~~~~~ Unity driver supports ``maxBWS`` and ``maxIOPS`` specs for the back-end consumer type. ``maxBWS`` represents the ``Maximum Bandwidth (KBPS)`` absolute limit, ``maxIOPS`` represents the ``Maximum IO/S`` absolute limit on the Unity respectively. Storage tiering support ~~~~~~~~~~~~~~~~~~~~~~~ Unity supports fully automated storage tiering which requires the FAST VP license activated on the Unity. The OpenStack administrator can use the extra spec key ``storagetype:tiering`` to set the tiering policy of a volume and use the key ``fast_support=' True'`` to let Block Storage scheduler find a volume back end which manages a Unity with FAST VP license activated. There are four supported values for the extra spec key ``storagetype:tiering`` when creating volume. - Key: ``storagetype:tiering`` - Possible values: - ``StartHighThenAuto`` - ``Auto`` - ``HighestAvailable`` - ``LowestAvailable`` - Default: ``StartHighThenAuto`` Run the following commands to create a volume type with tiering policy: .. code-block:: console $ openstack volume type create VolumeOnAutoTier $ openstack volume type set --property storagetype:tiering=Auto --property fast_support=' True' VolumeOnAutoTier Auto-zoning support ~~~~~~~~~~~~~~~~~~~ Unity volume driver supports auto-zoning, and share the same configuration guide for other vendors. Refer to :ref:`fc_zone_manager` for detailed configuration steps. Solution for LUNZ device ~~~~~~~~~~~~~~~~~~~~~~~~ The EMC host team also found LUNZ on all of the hosts, EMC best practice is to present a LUN with HLU 0 to clear any LUNZ devices as they can cause issues on the host. See KB `LUNZ Device `_. To workaround this issue, Unity driver creates a `Dummy LUN` (if not present), and adds it to each host to occupy the `HLU 0` during volume attachment. .. note:: This `Dummy LUN` is shared among all hosts connected to the Unity. Efficient non-disruptive volume backup ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The default implementation in Block Storage for non-disruptive volume backup is not efficient since a cloned volume will be created during backup. An effective approach to backups is to create a snapshot for the volume and connect this snapshot to the Block Storage host for volume backup. SSL support ~~~~~~~~~~~ Admin is able to enable the SSL verification for any communication against Unity REST API. By default, the SSL verification is disabled, user can enable it by following steps: #. Setup the Unity array certificate and import it to the Unity, see section `Storage system certificate` of `Security Configuration Guide `_. #. Import the CA certificate to the Cinder nodes on which the driver is running. #. Enable the changes on cinder nodes and restart the cinder services. .. code-block:: ini [unity] ... driver_ssl_cert_verify = True driver_ssl_cert_path = ... If `driver_ssl_cert_path` is omitted, the system default CA will be used for CA verification. IPv6 support ~~~~~~~~~~~~ This driver can support IPv6-based control path and data path. For control path, please follow below steps: - Enable Unity's Unipshere IPv6 address. - Configure the IPv6 network to make sure that cinder node can access Unishpere via IPv6 address. - Change Cinder config file ``/etc/cinder/cinder.conf``. Make the ``san_ip`` as Unisphere IPv6 address. For example, ``san_ip = [fd99:f17b:37d0::100]``. - Restart the Cinder service to make new configuration take effect. **Note**: The IPv6 support on control path depends on the fix of cpython `bug 32185 `__. Please make sure your Python's version includes this bug's fix. For data path, please follow below steps: - On Unity, Create iSCSI interface with IPv6 address. - Configure the IPv6 network to make sure that you can ``ping`` the Unity's iSCSI IPv6 address from the Cinder node. - If you create a volume using Cinder and attach it to a VM, the connection between this VM and volume will be IPv6-based iSCSI. Force detach volume from all hosts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The user could use `os-force_detach` action to detach a volume from all its attached hosts. For more detail, please refer to https://docs.openstack.org/api-ref/block-storage/v3/?expanded=force-detach-a-volume-detail#force-detach-a-volume Consistent group support ~~~~~~~~~~~~~~~~~~~~~~~~ For a group to support consistent group snapshot, the group specs in the corresponding group type should have the following entry: .. code-block:: ini {'consistent_group_snapshot_enabled': True} Similarly, for a volume to be in a group that supports consistent group snapshots, the volume type extra specs would also have the following entry: .. code-block:: ini {'consistent_group_snapshot_enabled': True} Refer to :doc:`/admin/groups` for command lines detail. Volume replications ~~~~~~~~~~~~~~~~~~~ To enable volume replications, follow below steps: 1. On Unisphere, configure remote system and interfaces for replications. The way could be different depending on the type of replications - sync or async. Refer to `Unity Replication White Paper `_ for more detail. 2. Add `replication_device` to storage backend settings in `cinder.conf`, then restart Cinder Volume service. Example of `cinder.conf` for volume replications: .. code-block:: ini [unity-primary] san_ip = xxx.xxx.xxx.xxx ... replication_device = backend_id:unity-secondary,san_ip:yyy.yyy.yyy.yyy,san_login:username,san_password:****,max_time_out_of_sync:60 - Only one `replication_device` can be configured for each primary backend. - Keys `backend_id`, `san_ip`, `san_password`, and `max_time_out_of_sync` are supported in `replication_device`, while `backend_id` and `san_ip` are required. - `san_password` uses the same one as primary backend's if it is omitted. - `max_time_out_of_sync` is the max time in minutes replications are out of sync. It must be equal or greater than `0`. `0` means sync replications of volumes will be created. Note that remote systems for sync replications need to be created on Unity first. `60` will be used if it is omitted. #. Create a volume type with property `replication_enabled=' True'`. .. code-block:: console $ openstack volume type create --property replication_enabled=' True' type-replication #. Any volumes with volume type of step #3 will failover to secondary backend after `failover_host` is executed. .. code-block:: console $ cinder failover-host --backend_id unity-secondary stein@unity-primary #. Later, they could be failed back. .. code-block:: console $ cinder failover-host --backend_id default stein@unity-primary .. note:: The volume can be deleted even when it is participating in a replication. The replication session will be deleted from Unity before the LUN is deleted. Consistency group replications ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable consistency group replications, follow below steps: 1. On Unisphere, configure remote system and interfaces for replications. The way could be different depending on the type of replications - sync or async. Refer to `Unity Replication White Paper `_ for more detail. 2. Add `replication_device` to storage backend settings in `cinder.conf`, then restart Cinder Volume service. Example of `cinder.conf` for volume replications: .. code-block:: ini [unity-primary] san_ip = xxx.xxx.xxx.xxx ... replication_device = backend_id:unity-secondary,san_ip:yyy.yyy.yyy.yyy,san_login:username,san_password:****,max_time_out_of_sync:60 - Only one `replication_device` can be configured for each primary backend. - Keys `backend_id`, `san_ip`, `san_password`, and `max_time_out_of_sync` are supported in `replication_device`, while `backend_id` and `san_ip` are required. - `san_password` uses the same one as primary backend's if it is omitted. - `max_time_out_of_sync` is the max time in minutes replications are out of sync. It must be equal or greater than `0`. `0` means sync replications of volumes will be created. Note that remote systems for sync replications need to be created on Unity first. `60` will be used if it is omitted. 3. Create a volume type with property `replication_enabled=' True'`. .. code-block:: console $ openstack volume type create --property replication_enabled=' True' type-replication 4. Create a consistency group type with properties `consistent_group_snapshot_enabled=' True'` and `consistent_group_replication_enabled=' True'`. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-type-create type-cg-replication $ cinder --os-volume-api-version 3.38 group-type-key type-cg-replication set consistent_group_snapshot_enabled=' True' consistent_group_replication_enabled=' True' 5. Create a group type with volume types support replication. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-create --name test-cg {type-cg-replication-id} type-replication 6. Create volume in the consistency group. .. code-block:: console $ cinder --os-volume-api-version 3.38 create --volume-type type-replication --group-id {test-cg-id} --name {volume-name} {size} 7. Enable consistency group replication. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-enable-replication test-cg 8. Disable consistency group replication. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-disable-replication test-cg 9. Failover consistency group replication. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-failover-replication test-cg 10. Failback consistency group replication. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-failover-replication test-cg --secondary-backend-id default .. note:: Only support group replication of consistency group, see step 4 and 5 to create consistency group support replication. Troubleshooting ~~~~~~~~~~~~~~~ To troubleshoot a failure in OpenStack deployment, the best way is to enable verbose and debug log, at the same time, leverage the build-in `Return request ID to caller `_ to track specific Block Storage command logs. #. Enable verbose log, set following in ``/etc/cinder/cinder.conf`` and restart all Block Storage services: .. code-block:: ini [DEFAULT] ... debug = True verbose = True ... If other projects (usually Compute) are also involved, set `debug` and ``verbose`` to ``True``. #. use ``--debug`` to trigger any problematic Block Storage operation: .. code-block:: console # cinder --debug create --name unity_vol1 100 You will see the request ID from the console, for example: .. code-block:: console DEBUG:keystoneauth:REQ: curl -g -i -X POST http://192.168.1.9:8776/v2/e50d22bdb5a34078a8bfe7be89324078/volumes -H "User-Agent: python-cinderclient" -H "Content-Type: application/json" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}bf4a85ad64302b67a39ad7c6f695a9630f39ab0e" -d '{"volume": {"status": "creating", "user_id": null, "name": "unity_vol1", "imageRef": null, "availability_zone": null, "description": null, "multiattach": false, "attach_status": "detached", "volume_type": null, "metadata": {}, "consistencygroup_id": null, "source_volid": null, "snapshot_id": null, "project_id": null, "source_replica": null, "size": 10}}' DEBUG:keystoneauth:RESP: [202] X-Compute-Request-Id: req-3a459e0e-871a-49f9-9796-b63cc48b5015 Content-Type: application/json Content-Length: 804 X-Openstack-Request-Id: req-3a459e0e-871a-49f9-9796-b63cc48b5015 Date: Mon, 12 Dec 2016 09:31:44 GMT Connection: keep-alive #. Use commands like ``grep``, ``awk`` to find the error related to the Block Storage operations. .. code-block:: console # grep "req-3a459e0e-871a-49f9-9796-b63cc48b5015" cinder-volume.log ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/dell-emc-vnx-driver.rst0000664000175000017500000011541100000000000030536 0ustar00zuulzuul00000000000000=================== Dell VNX driver =================== EMC VNX driver interacts with configured VNX array. It supports both iSCSI and FC protocol. The VNX cinder driver performs the volume operations by executing Navisphere CLI (NaviSecCLI) which is a command-line interface used for management, diagnostics, and reporting functions for VNX. It also supports both iSCSI and FC protocol. System requirements ~~~~~~~~~~~~~~~~~~~ - VNX Operational Environment for Block version 5.32 or higher. - VNX Snapshot and Thin Provisioning license should be activated for VNX. - Python library ``storops`` version 0.5.7 or higher to interact with VNX. - Navisphere CLI v7.32 or higher is installed along with the driver. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Clone a volume. - Extend a volume. - Migrate a volume. - Retype a volume. - Get volume statistics. - Create and delete consistency groups. - Create, list, and delete consistency group snapshots. - Modify consistency groups. - Efficient non-disruptive volume backup. - Create a cloned consistency group. - Create a consistency group from consistency group snapshots. - Replication v2.1 support. - Generic Group support. - Revert a volume to a snapshot. Preparation ~~~~~~~~~~~ This section contains instructions to prepare the Block Storage nodes to use the EMC VNX driver. You should install the Navisphere CLI and ensure you have correct zoning configurations. Install Navisphere CLI ---------------------- Navisphere CLI needs to be installed on all Block Storage nodes within an OpenStack deployment. You need to download different versions for different platforms: - For Ubuntu x64, DEB is available at `EMC OpenStack Github `_. - For all other variants of Linux, Navisphere CLI is available at `Downloads for VNX2 Series `_ or `Downloads for VNX1 Series `_. Install Python library storops ------------------------------ ``storops`` is a Python library that interacts with VNX array through Navisphere CLI. Use the following command to install the ``storops`` library: .. code-block:: console $ pip install storops Check array software -------------------- Make sure your have the following software installed for certain features: +--------------------------------------------+---------------------+ | Feature | Software Required | +============================================+=====================+ | All | ThinProvisioning | +--------------------------------------------+---------------------+ | All | VNXSnapshots | +--------------------------------------------+---------------------+ | FAST cache support | FASTCache | +--------------------------------------------+---------------------+ | Create volume with type ``compressed`` | Compression | +--------------------------------------------+---------------------+ | Create volume with type ``deduplicated`` | Deduplication | +--------------------------------------------+---------------------+ **Required software** You can check the status of your array software in the :guilabel:`Software` page of :guilabel:`Storage System Properties`. Here is how it looks like: .. figure:: ../../figures/emc-enabler.png Network configuration --------------------- For the FC Driver, FC zoning is properly configured between the hosts and the VNX. Check :ref:`register-fc-port-with-vnx` for reference. For the iSCSI Driver, make sure your VNX iSCSI port is accessible by your hosts. Check :ref:`register-iscsi-port-with-vnx` for reference. You can use ``initiator_auto_registration = True`` configuration to avoid registering the ports manually. Check the detail of the configuration in :ref:`emc-vnx-conf` for reference. If you are trying to setup multipath, refer to :ref:`multipath-setup`. .. _emc-vnx-conf: Back-end configuration ~~~~~~~~~~~~~~~~~~~~~~ Make the following changes in the ``/etc/cinder/cinder.conf`` file. Minimum configuration --------------------- Here is a sample of minimum back-end configuration. See the following sections for the detail of each option. Set ``storage_protocol = iscsi`` if iSCSI protocol is used. .. code-block:: ini [DEFAULT] enabled_backends = vnx_array1 [vnx_array1] san_ip = 10.10.72.41 san_login = sysadmin san_password = sysadmin naviseccli_path = /opt/Navisphere/bin/naviseccli volume_driver = cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver initiator_auto_registration = True storage_protocol = fc Multiple back-end configuration ------------------------------- Here is a sample of a minimum back-end configuration. See following sections for the detail of each option. Set ``storage_protocol = iscsi`` if iSCSI protocol is used. .. code-block:: ini [DEFAULT] enabled_backends = backendA, backendB [backendA] storage_vnx_pool_names = Pool_01_SAS, Pool_02_FLASH san_ip = 10.10.72.41 storage_vnx_security_file_dir = /etc/secfile/array1 naviseccli_path = /opt/Navisphere/bin/naviseccli volume_driver = cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver initiator_auto_registration = True storage_protocol = fc [backendB] storage_vnx_pool_names = Pool_02_SAS san_ip = 10.10.26.101 san_login = username san_password = password naviseccli_path = /opt/Navisphere/bin/naviseccli volume_driver = cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver initiator_auto_registration = True storage_protocol = fc The value of the option ``storage_protocol`` can be either ``fc`` or ``iscsi``, which is case insensitive. For more details on multiple back ends, see :doc:`Configure multiple-storage back ends `. Required configurations ----------------------- **IP of the VNX Storage Processors** Specify SP A or SP B IP to connect: .. code-block:: ini san_ip = **VNX login credentials** There are two ways to specify the credentials. - Use plain text username and password. Supply for plain username and password: .. code-block:: ini san_login = san_password = storage_vnx_authentication_type = global Valid values for ``storage_vnx_authentication_type`` are: ``global`` (default), ``local``, and ``ldap``. - Use Security file. This approach avoids the plain text password in your cinder configuration file. Supply a security file as below: .. code-block:: ini storage_vnx_security_file_dir = Check Unisphere CLI user guide or :ref:`authenticate-by-security-file` for how to create a security file. **Path to your Unisphere CLI** Specify the absolute path to your naviseccli: .. code-block:: ini naviseccli_path = /opt/Navisphere/bin/naviseccli **Driver's storage protocol** - For the FC Driver, add the following option: .. code-block:: ini volume_driver = cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver storage_protocol = fc - For iSCSI Driver, add the following option: .. code-block:: ini volume_driver = cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver storage_protocol = iscsi Optional configurations ~~~~~~~~~~~~~~~~~~~~~~~ VNX pool names -------------- Specify the list of pools to be managed, separated by commas. They should already exist in VNX. .. code-block:: ini storage_vnx_pool_names = pool 1, pool 2 If this value is not specified, all pools of the array will be used. **Initiator auto registration** When ``initiator_auto_registration`` is set to ``True``, the driver will automatically register initiators to all working target ports of the VNX array during volume attaching (The driver will skip those initiators that have already been registered) if the option ``io_port_list`` is not specified in the ``cinder.conf`` file. If the user wants to register the initiators with some specific ports but not register with the other ports, this functionality should be disabled. When a comma-separated list is given to ``io_port_list``, the driver will only register the initiator to the ports specified in the list and only return target port(s) which belong to the target ports in the ``io_port_list`` instead of all target ports. - Example for FC ports: .. code-block:: ini io_port_list = a-1,B-3 ``a`` or ``B`` is *Storage Processor*, number ``1`` and ``3`` are *Port ID*. - Example for iSCSI ports: .. code-block:: ini io_port_list = a-1-0,B-3-0 ``a`` or ``B`` is *Storage Processor*, the first numbers ``1`` and ``3`` are *Port ID* and the second number ``0`` is *Virtual Port ID* .. note:: - Rather than de-registered, the registered ports will be simply bypassed whatever they are in ``io_port_list`` or not. - The driver will raise an exception if ports in ``io_port_list`` do not exist in VNX during startup. Force delete volumes in storage group ------------------------------------- Some ``available`` volumes may remain in storage group on the VNX array due to some OpenStack timeout issue. But the VNX array do not allow the user to delete the volumes which are in storage group. Option ``force_delete_lun_in_storagegroup`` is introduced to allow the user to delete the ``available`` volumes in this tricky situation. When ``force_delete_lun_in_storagegroup`` is set to ``True`` in the back-end section, the driver will move the volumes out of the storage groups and then delete them if the user tries to delete the volumes that remain in the storage group on the VNX array. The default value of ``force_delete_lun_in_storagegroup`` is ``True``. Over subscription in thin provisioning -------------------------------------- Over subscription allows that the sum of all volume's capacity (provisioned capacity) to be larger than the pool's total capacity. ``max_over_subscription_ratio`` in the back-end section is the ratio of provisioned capacity over total capacity. The default value of ``max_over_subscription_ratio`` is 20.0, which means the provisioned capacity can be 20 times of the total capacity. If the value of this ratio is set larger than 1.0, the provisioned capacity can exceed the total capacity. Storage group automatic deletion -------------------------------- For volume attaching, the driver has a storage group on VNX for each compute node hosting the vm instances which are going to consume VNX Block Storage (using compute node's host name as storage group's name). All the volumes attached to the VM instances in a compute node will be put into the storage group. If ``destroy_empty_storage_group`` is set to ``True``, the driver will remove the empty storage group after its last volume is detached. For data safety, it does not suggest to set ``destroy_empty_storage_group=True`` unless the VNX is exclusively managed by one Block Storage node because consistent ``lock_path`` is required for operation synchronization for this behavior. Initiator auto deregistration ----------------------------- Enabling storage group automatic deletion is the precondition of this function. If ``initiator_auto_deregistration`` is set to ``True`` is set, the driver will deregister all FC and iSCSI initiators of the host after its storage group is deleted. FC SAN auto zoning ------------------ The EMC VNX driver supports FC SAN auto zoning when ``ZoneManager`` is configured and ``zoning_mode`` is set to ``fabric`` in ``cinder.conf``. For ZoneManager configuration, refer to :doc:`../fc-zoning`. Volume number threshold ----------------------- In VNX, there is a limitation on the number of pool volumes that can be created in the system. When the limitation is reached, no more pool volumes can be created even if there is remaining capacity in the storage pool. In other words, if the scheduler dispatches a volume creation request to a back end that has free capacity but reaches the volume limitation, the creation fails. The default value of ``check_max_pool_luns_threshold`` is ``False``. When ``check_max_pool_luns_threshold=True``, the pool-based back end will check the limit and will report 0 free capacity to the scheduler if the limit is reached. So the scheduler will be able to skip this kind of pool-based back end that runs out of the pool volume number. .. note:: From Queens, ``check_max_pool_luns_threshold`` is obsolete. And the behavior is like where ``check_max_pool_luns_threshold`` is set to ``True``. iSCSI initiators ---------------- ``iscsi_initiators`` is a dictionary of IP addresses of the iSCSI initiator ports on OpenStack compute and block storage nodes which want to connect to VNX via iSCSI. If this option is configured, the driver will leverage this information to find an accessible iSCSI target portal for the initiator when attaching volume. Otherwise, the iSCSI target portal will be chosen in a relative random way. .. note:: This option is only valid for iSCSI driver. Here is an example. VNX will connect ``host1`` with ``10.0.0.1`` and ``10.0.0.2``. And it will connect ``host2`` with ``10.0.0.3``. The key name (``host1`` in the example) should be the output of :command:`hostname` command. .. code-block:: ini iscsi_initiators = {"host1":["10.0.0.1", "10.0.0.2"],"host2":["10.0.0.3"]} Default timeout --------------- Specify the timeout in minutes for operations like LUN migration, LUN creation, etc. For example, LUN migration is a typical long running operation, which depends on the LUN size and the load of the array. An upper bound in the specific deployment can be set to avoid unnecessary long wait. The default value for this option is ``infinite``. .. code-block:: ini default_timeout = 60 Max LUNs per storage group -------------------------- The ``max_luns_per_storage_group`` specify the maximum number of LUNs in a storage group. Default value is 255. It is also the maximum value supported by VNX. Ignore pool full threshold -------------------------- If ``ignore_pool_full_threshold`` is set to ``True``, driver will force LUN creation even if the full threshold of pool is reached. Default to ``False``. Default value for async migration --------------------------------- Option ``vnx_async_migrate`` is used to set the default value of async migration for the backend. The default value of this option is `True` if it isn't set in ``cinder.conf`` to preserve compatibility. If ``async_migrate`` is not set in metadata of volume, the value of this option will be used. Otherwise, ``async_migrate`` value in metadata will override the value of this option. For more detail, refer to `asynchronous migration support`_. Extra spec options ~~~~~~~~~~~~~~~~~~ Extra specs are used in volume types created in Block Storage as the preferred property of the volume. The Block Storage scheduler will use extra specs to find the suitable back end for the volume and the Block Storage driver will create the volume based on the properties specified by the extra spec. Use the following command to create a volume type: .. code-block:: console $ openstack volume type create demoVolumeType Use the following command to update the extra spec of a volume type: .. code-block:: console $ openstack volume type set --property provisioning:type=thin --property thick_provisioning_support=' True' demoVolumeType The following sections describe the VNX extra keys. Provisioning type ----------------- - Key: ``provisioning:type`` - Possible Values: - ``thick`` Volume is fully provisioned. Run the following commands to create a ``thick`` volume type: .. code-block:: console $ openstack volume type create ThickVolumeType $ openstack volume type set --property provisioning:type=thick --property thick_provisioning_support=' True' ThickVolumeType - ``thin`` Volume is virtually provisioned. Run the following commands to create a ``thin`` volume type: .. code-block:: console $ openstack volume type create ThinVolumeType $ openstack volume type set --property provisioning:type=thin --property thin_provisioning_support=' True' ThinVolumeType - ``deduplicated`` Volume is ``thin`` and deduplication is enabled. The administrator shall go to VNX to configure the system level deduplication settings. To create a deduplicated volume, the VNX Deduplication license must be activated on VNX, and specify ``deduplication_support=True`` to let Block Storage scheduler find the proper volume back end. Run the following commands to create a ``deduplicated`` volume type: .. code-block:: console $ openstack volume type create DeduplicatedVolumeType $ openstack volume type set --property provisioning:type=deduplicated --property deduplicated_support=' True' DeduplicatedVolumeType - ``compressed`` Volume is ``thin`` and compression is enabled. The administrator shall go to the VNX to configure the system level compression settings. To create a compressed volume, the VNX Compression license must be activated on VNX, and use ``compression_support=True`` to let Block Storage scheduler find a volume back end. VNX does not support creating snapshots on a compressed volume. Run the following commands to create a ``compressed`` volume type: .. code-block:: console $ openstack volume type create CompressedVolumeType $ openstack volume type set --property provisioning:type=compressed --property compression_support=' True' CompressedVolumeType - Default: ``thick`` .. note:: ``provisioning:type`` replaces the old spec key ``storagetype:provisioning``. The latter one is obsolete since the *Mitaka* release. Storage tiering support ----------------------- - Key: ``storagetype:tiering`` - Possible values: - ``StartHighThenAuto`` - ``Auto`` - ``HighestAvailable`` - ``LowestAvailable`` - ``NoMovement`` - Default: ``StartHighThenAuto`` VNX supports fully automated storage tiering which requires the FAST license activated on the VNX. The OpenStack administrator can use the extra spec key ``storagetype:tiering`` to set the tiering policy of a volume and use the key ``fast_support=' True'`` to let Block Storage scheduler find a volume back end which manages a VNX with FAST license activated. Here are the five supported values for the extra spec key ``storagetype:tiering``: Run the following commands to create a volume type with tiering policy: .. code-block:: console $ openstack volume type create ThinVolumeOnAutoTier $ openstack volume type set --property provisioning:type=thin --property storagetype:tiering=Auto --property fast_support=' True' ThinVolumeOnAutoTier .. note:: The tiering policy cannot be applied to a deduplicated volume. Tiering policy of the deduplicated LUN align with the settings of the pool. FAST cache support ------------------ - Key: ``fast_cache_enabled`` - Possible values: - ``True`` - ``False`` - Default: ``False`` VNX has FAST Cache feature which requires the FAST Cache license activated on the VNX. Volume will be created on the backend with FAST cache enabled when `` True`` is specified. Pool name --------- - Key: ``pool_name`` - Possible values: name of the storage pool managed by cinder - Default: None If the user wants to create a volume on a certain storage pool in a back end that manages multiple pools, a volume type with a extra spec specified storage pool should be created first, then the user can use this volume type to create the volume. Run the following commands to create the volume type: .. code-block:: console $ openstack volume type create HighPerf $ openstack volume type set --property pool_name=Pool_02_SASFLASH --property volume_backend_name=vnx_41 HighPerf Obsolete extra specs -------------------- .. note:: *DO NOT* use the following obsolete extra spec keys: - ``storagetype:provisioning`` - ``storagetype:pool`` Force detach ------------ The user could use `os-force_detach` action to detach a volume from all its attached hosts. For more detail, please refer to https://docs.openstack.org/api-ref/block-storage/v3/?expanded=force-detach-a-volume-detail#force-detach-a-volume Advanced features ~~~~~~~~~~~~~~~~~ Snap copy --------- - Metadata Key: ``snapcopy`` - Possible Values: - ``True`` or ``true`` - ``False`` or ``false`` - Default: `False` VNX driver supports snap copy which accelerates the process for creating a copied volume. By default, the driver will use `asynchronous migration support`_, which will start a VNX migration session. When snap copy is used, driver creates a snapshot and mounts it as a volume for the 2 kinds of operations which will be instant even for large volumes. To enable this functionality, append ``--metadata snapcopy=True`` when creating cloned volume or creating volume from snapshot. .. code-block:: console $ cinder create --source-volid --name "cloned_volume" --metadata snapcopy=True Or .. code-block:: console $ cinder create --snapshot-id --name "vol_from_snapshot" --metadata snapcopy=True The newly created volume is a snap copy instead of a full copy. If a full copy is needed, retype or migrate can be used to convert the snap-copy volume to a full-copy volume which may be time-consuming. You can determine whether the volume is a snap-copy volume or not by showing its metadata. If the ``snapcopy`` in metadata is ``True`` or ``true``, the volume is a snap-copy volume. Otherwise, it is a full-copy volume. .. code-block:: console $ cinder metadata-show **Constraints** - The number of snap-copy volumes created from a single source volume is limited to 255 at one point in time. - The source volume which has snap-copy volume can not be deleted or migrated. - snapcopy volume will be change to full-copy volume after host-assisted or storage-assisted migration. - snapcopy volume can not be added to consisgroup because of VNX limitation. Efficient non-disruptive volume backup -------------------------------------- The default implementation in Block Storage for non-disruptive volume backup is not efficient since a cloned volume will be created during backup. The approach of efficient backup is to create a snapshot for the volume and connect this snapshot (a mount point in VNX) to the Block Storage host for volume backup. This eliminates migration time involved in volume clone. **Constraints** - Backup creation for a snap-copy volume is not allowed if the volume status is ``in-use`` since snapshot cannot be taken from this volume. Configurable migration rate --------------------------- VNX cinder driver is leveraging the LUN migration from the VNX. LUN migration is involved in cloning, migrating, retyping, and creating volume from snapshot. When admin set ``migrate_rate`` in volume's ``metadata``, VNX driver can start migration with specified rate. The available values for the ``migrate_rate`` are ``high``, ``asap``, ``low`` and ``medium``. The following is an example to set ``migrate_rate`` to ``asap``: .. code-block:: console $ cinder metadata set migrate_rate=asap After set, any cinder volume operations involving VNX LUN migration will take the value as the migration rate. To restore the migration rate to default, unset the metadata as following: .. code-block:: console $ cinder metadata unset migrate_rate .. note:: Do not use the ``asap`` migration rate when the system is in production, as the normal host I/O may be interrupted. Use asap only when the system is offline (free of any host-level I/O). Replication v2.1 support ------------------------ Cinder introduces Replication v2.1 support in Mitaka, it supports fail-over and fail-back replication for specific back end. In VNX cinder driver, **MirrorView** is used to set up replication for the volume. To enable this feature, you need to set configuration in ``cinder.conf`` as below: .. code-block:: ini replication_device = backend_id:, san_ip:192.168.1.2, san_login:admin, san_password:admin, naviseccli_path:/opt/Navisphere/bin/naviseccli, storage_vnx_authentication_type:global, storage_vnx_security_file_dir: Currently, only synchronized mode **MirrorView** is supported, and one volume can only have 1 secondary storage system. Therefore, you can have only one ``replication_device`` presented in driver configuration section. To create a replication enabled volume, you need to create a volume type: .. code-block:: console $ openstack volume type create replication-type $ openstack volume type set --property replication_enabled=" True" replication-type And then create volume with above volume type: .. code-block:: console $ openstack volume create replication-volume --type replication-type --size 1 **Supported operations** - Create volume - Create cloned volume - Create volume from snapshot - Fail-over volume: .. code-block:: console $ cinder failover-host --backend_id - Fail-back volume: .. code-block:: console $ cinder failover-host --backend_id default **Requirements** - 2 VNX systems must be in same domain. - For iSCSI MirrorView, user needs to setup iSCSI connection before enable replication in Cinder. - For FC MirrorView, user needs to zone specific FC ports from 2 VNX system together. - MirrorView Sync enabler( **MirrorView/S** ) installed on both systems. - Write intent log enabled on both VNX systems. For more information on how to configure, please refer to: `MirrorView-Knowledgebook:-Releases-30-–-33 `_ Asynchronous migration support ------------------------------ VNX Cinder driver now supports asynchronous migration during volume cloning. The driver now using asynchronous migration when creating a volume from source as the default cloning method. The driver will return immediately after the migration session starts on the VNX, which dramatically reduces the time before a volume is available for use. To disable this feature, user needs to do any one of below actions: - Configure ``vnx_async_migrate = False`` for the backend in ``cinder.conf``, then restart Cinder services. - Add ``--metadata async_migrate=False`` when creating new volume from source. Be aware, ``async_migrate`` in metadata overrides the option ``vnx_async_migrate`` when both are set. **Constraints** - Before the migration finishes, snapshots cannot be created from the source volume, which could affect subsequent clones from the same source volume. The typical affected use case is that creating volume-2 via cloning volume-1 immediately after creating volume-1 via cloning volume-0. To achieve so, users are advised to take any one of below actions: 1) wait for the first clone finishing, or 2) create volume-2 via cloning volume-0 instead of volume-1, or 3) create volume-1 with ``--metadata async_migrate=False``. Best practice ~~~~~~~~~~~~~ .. _multipath-setup: Multipath setup --------------- Enabling multipath volume access is recommended for robust data access. The major configuration includes: #. Install ``multipath-tools``, ``sysfsutils`` and ``sg3-utils`` on the nodes hosting compute and ``cinder-volume`` services. Check the operating system manual for the system distribution for specific installation steps. For Red Hat based distributions, they should be ``device-mapper-multipath``, ``sysfsutils`` and ``sg3_utils``. #. Specify ``use_multipath_for_image_xfer=true`` in the ``cinder.conf`` file for each FC/iSCSI back end. #. Specify ``volume_use_multipath=True`` in ``libvirt`` section of the ``nova.conf`` file. This option is valid for both iSCSI and FC driver. In versions prior to Newton, the option was called ``iscsi_use_multipath``. For multipath-tools, here is an EMC recommended sample of ``/etc/multipath.conf`` file. ``user_friendly_names`` is not specified in the configuration and thus it will take the default value ``no``. It is not recommended to set it to ``yes`` because it may fail operations such as VM live migration. .. code-block:: vim blacklist { # Skip the files under /dev that are definitely not FC/iSCSI devices # Different system may need different customization devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" devnode "^hd[a-z][0-9]*" devnode "^cciss!c[0-9]d[0-9]*[p[0-9]*]" # Skip LUNZ device from VNX device { vendor "DGC" product "LUNZ" } } defaults { user_friendly_names no flush_on_last_del yes } devices { # Device attributed for EMC CLARiiON and VNX series ALUA device { vendor "DGC" product ".*" product_blacklist "LUNZ" path_grouping_policy group_by_prio path_selector "round-robin 0" path_checker emc_clariion features "1 queue_if_no_path" hardware_handler "1 alua" prio alua failback immediate } } .. note:: When multipath is used in OpenStack, multipath faulty devices may come out in Nova-Compute nodes due to different issues (`Bug 1336683 `_ is a typical example). A solution to completely avoid faulty devices has not been found yet. ``faulty_device_cleanup.py`` mitigates this issue when VNX iSCSI storage is used. Cloud administrators can deploy the script in all Nova-Compute nodes and use a CRON job to run the script on each Nova-Compute node periodically so that faulty devices will not stay too long. Refer to: `VNX faulty device cleanup `_ for detailed usage and the script. Restrictions and limitations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ iSCSI port cache ---------------- EMC VNX iSCSI driver caches the iSCSI ports information, so that the user should restart the ``cinder-volume`` service or wait for seconds (which is configured by ``periodic_interval`` in the ``cinder.conf`` file) before any volume attachment operation after changing the iSCSI port configurations. Otherwise the attachment may fail because the old iSCSI port configurations were used. No extending for volume with snapshots -------------------------------------- VNX does not support extending the thick volume which has a snapshot. If the user tries to extend a volume which has a snapshot, the status of the volume would change to ``error_extending``. Limitations for deploying cinder on computer node ------------------------------------------------- It is not recommended to deploy the driver on a compute node if ``cinder upload-to-image --force True`` is used against an in-use volume. Otherwise, ``cinder upload-to-image --force True`` will terminate the data access of the vm instance to the volume. Storage group with host names in VNX ------------------------------------ When the driver notices that there is no existing storage group that has the host name as the storage group name, it will create the storage group and also add the compute node's or Block Storage node's registered initiators into the storage group. If the driver notices that the storage group already exists, it will assume that the registered initiators have also been put into it and skip the operations above for better performance. It is recommended that the storage administrator does not create the storage group manually and instead relies on the driver for the preparation. If the storage administrator needs to create the storage group manually for some special requirements, the correct registered initiators should be put into the storage group as well (otherwise the following volume attaching operations will fail). EMC storage-assisted volume migration ------------------------------------- EMC VNX driver supports storage-assisted volume migration, when the user starts migrating with ``cinder migrate --force-host-copy False `` or ``cinder migrate ``, cinder will try to leverage the VNX's native volume migration functionality. In following scenarios, VNX storage-assisted volume migration will not be triggered: - ``in-use`` volume migration between back ends with different storage protocol, for example, FC and iSCSI. - Volume is to be migrated across arrays. Appendix ~~~~~~~~ .. _authenticate-by-security-file: Authenticate by security file ----------------------------- VNX credentials are necessary when the driver connects to the VNX system. Credentials in ``global``, ``local`` and ``ldap`` scopes are supported. There are two approaches to provide the credentials. The recommended one is using the Navisphere CLI security file to provide the credentials which can get rid of providing the plain text credentials in the configuration file. Following is the instruction on how to do this. #. Find out the Linux user id of the ``cinder-volume`` processes. Assuming the ``cinder-volume`` service is running by the account ``cinder``. #. Run ``su`` as root user. #. In ``/etc/passwd`` file, change ``cinder:x:113:120::/var/lib/cinder:/bin/false`` to ``cinder:x:113:120::/var/lib/cinder:/bin/bash`` (This temporary change is to make step 4 work.) #. Save the credentials on behalf of ``cinder`` user to a security file (assuming the array credentials are ``admin/admin`` in ``global`` scope). In the command below, the ``-secfilepath`` switch is used to specify the location to save the security file. .. code-block:: console # su -l cinder -c \ '/opt/Navisphere/bin/naviseccli -AddUserSecurity -user admin -password admin -scope 0 -secfilepath ' #. Change ``cinder:x:113:120::/var/lib/cinder:/bin/bash`` back to ``cinder:x:113:120::/var/lib/cinder:/bin/false`` in ``/etc/passwd`` file. #. Remove the credentials options ``san_login``, ``san_password`` and ``storage_vnx_authentication_type`` from ``cinder.conf`` file. (normally it is ``/etc/cinder/cinder.conf`` file). Add option ``storage_vnx_security_file_dir`` and set its value to the directory path of your security file generated in the above step. Omit this option if ``-secfilepath`` is not used in the above step. #. Restart the ``cinder-volume`` service to validate the change. .. _register-fc-port-with-vnx: Register FC port with VNX ------------------------- This configuration is only required when ``initiator_auto_registration=False``. To access VNX storage, the Compute nodes should be registered on VNX first if initiator auto registration is not enabled. To perform ``Copy Image to Volume`` and ``Copy Volume to Image`` operations, the nodes running the ``cinder-volume`` service (Block Storage nodes) must be registered with the VNX as well. The steps mentioned below are for the compute nodes. Follow the same steps for the Block Storage nodes also (The steps can be skipped if initiator auto registration is enabled). #. Assume ``20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2`` is the WWN of a FC initiator port name of the compute node whose host name and IP are ``myhost1`` and ``10.10.61.1``. Register ``20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2`` in Unisphere: #. Log in to :guilabel:`Unisphere`, go to :menuselection:`FNM0000000000 > Hosts > Initiators`. #. Refresh and wait until the initiator ``20:00:00:24:FF:48:BA:C2:21:00:00:24:FF:48:BA:C2`` with SP Port ``A-1`` appears. #. Click the :guilabel:`Register` button, select :guilabel:`CLARiiON/VNX` and enter the host name (which is the output of the :command:`hostname` command) and IP address: - Hostname: ``myhost1`` - IP: ``10.10.61.1`` - Click :guilabel:`Register`. #. Then host ``10.10.61.1`` will appear under :menuselection:`Hosts > Host List` as well. #. Register the ``wwn`` with more ports if needed. .. _register-iscsi-port-with-vnx: Register iSCSI port with VNX ---------------------------- This configuration is only required when ``initiator_auto_registration=False``. To access VNX storage, the compute nodes should be registered on VNX first if initiator auto registration is not enabled. To perform ``Copy Image to Volume`` and ``Copy Volume to Image`` operations, the nodes running the ``cinder-volume`` service (Block Storage nodes) must be registered with the VNX as well. The steps mentioned below are for the compute nodes. Follow the same steps for the Block Storage nodes also (The steps can be skipped if initiator auto registration is enabled). #. On the compute node with IP address ``10.10.61.1`` and host name ``myhost1``, execute the following commands (assuming ``10.10.61.35`` is the iSCSI target): #. Start the iSCSI initiator service on the node: .. code-block:: console # /etc/init.d/open-iscsi start #. Discover the iSCSI target portals on VNX: .. code-block:: console # iscsiadm -m discovery -t st -p 10.10.61.35 #. Change directory to ``/etc/iscsi`` : .. code-block:: console # cd /etc/iscsi #. Find out the ``iqn`` of the node: .. code-block:: console # more initiatorname.iscsi #. Log in to :guilabel:`VNX` from the compute node using the target corresponding to the SPA port: .. code-block:: console # iscsiadm -m node -T iqn.1992-04.com.emc:cx.apm01234567890.a0 -p 10.10.61.35 -l #. Assume ``iqn.1993-08.org.debian:01:1a2b3c4d5f6g`` is the initiator name of the compute node. Register ``iqn.1993-08.org.debian:01:1a2b3c4d5f6g`` in Unisphere: #. Log in to :guilabel:`Unisphere`, go to :menuselection:`FNM0000000000 > Hosts > Initiators`. #. Refresh and wait until the initiator ``iqn.1993-08.org.debian:01:1a2b3c4d5f6g`` with SP Port ``A-8v0`` appears. #. Click the :guilabel:`Register` button, select :guilabel:`CLARiiON/VNX` and enter the host name (which is the output of the :command:`hostname` command) and IP address: - Hostname: ``myhost1`` - IP: ``10.10.61.1`` - Click :guilabel:`Register`. #. Then host ``10.10.61.1`` will appear under :menuselection:`Hosts > Host List` as well. #. Log out :guilabel:`iSCSI` on the node: .. code-block:: console # iscsiadm -m node -u #. Log in to :guilabel:`VNX` from the compute node using the target corresponding to the SPB port: .. code-block:: console # iscsiadm -m node -T iqn.1992-04.com.emc:cx.apm01234567890.b8 -p 10.10.61.36 -l #. In ``Unisphere``, register the initiator with the SPB port. #. Log out :guilabel:`iSCSI` on the node: .. code-block:: console # iscsiadm -m node -u #. Register the ``iqn`` with more ports if needed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/dell-emc-xtremio-driver.rst0000664000175000017500000001612700000000000031416 0ustar00zuulzuul00000000000000===================================== Dell XtremIO Block Storage driver ===================================== The high performance XtremIO All Flash Array (AFA) offers Block Storage services to OpenStack. Using the driver, OpenStack Block Storage hosts can connect to an XtremIO Storage cluster. This section explains how to configure and connect the block storage nodes to an XtremIO storage cluster. Support matrix ~~~~~~~~~~~~~~ XtremIO version 4.x is supported. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, clone, attach, and detach volumes. - Create and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Extend a volume. - Manage and unmanage a volume. - Manage and unmanage a snapshot. - Get volume statistics. - Create, modify, delete, and list consistency groups. - Create, modify, delete, and list snapshots of consistency groups. - Create consistency group from consistency group or consistency group snapshot. - Volume Migration (host assisted) XtremIO Block Storage driver configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Edit the ``cinder.conf`` file by adding the configuration below under the [DEFAULT] section of the file in case of a single back end or under a separate section in case of multiple back ends (for example [XTREMIO]). The configuration file is usually located under the following path ``/etc/cinder/cinder.conf``. .. config-table:: :config-target: XtremIO cinder.volume.drivers.dell_emc.xtremio For a configuration example, refer to the configuration :ref:`emc_extremio_configuration_example`. XtremIO driver name ------------------- Configure the driver name by setting the following parameter in the ``cinder.conf`` file: - For iSCSI: .. code-block:: ini volume_driver = cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver - For Fibre Channel: .. code-block:: ini volume_driver = cinder.volume.drivers.dell_emc.xtremio.XtremIOFibreChannelDriver XtremIO management server (XMS) IP ---------------------------------- To retrieve the management IP, use the :command:`show-xms` CLI command. Configure the management IP by adding the following parameter: .. code-block:: ini san_ip = XMS Management IP XtremIO cluster name -------------------- In XtremIO version 4.0, a single XMS can manage multiple cluster back ends. In such setups, the administrator is required to specify the cluster name (in addition to the XMS IP). Each cluster must be defined as a separate back end. To retrieve the cluster name, run the :command:`show-clusters` CLI command. Configure the cluster name by adding the following parameter: .. code-block:: ini xtremio_cluster_name = Cluster-Name .. note:: When a single cluster is managed in XtremIO version 4.0, the cluster name is not required. XtremIO user credentials ------------------------ OpenStack Block Storage requires an XtremIO XMS user with administrative privileges. XtremIO recommends creating a dedicated OpenStack user account that holds an administrative user role. Refer to the XtremIO User Guide for details on user account management. Create an XMS account using either the XMS GUI or the :command:`add-user-account` CLI command. Configure the user credentials by adding the following parameters: .. code-block:: ini san_login = XMS username san_password = XMS username password Multiple back ends ~~~~~~~~~~~~~~~~~~ Configuring multiple storage back ends enables you to create several back-end storage solutions that serve the same OpenStack Compute resources. When a volume is created, the scheduler selects the appropriate back end to handle the request, according to the specified volume type. Setting thin provisioning and multipathing parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To support thin provisioning and multipathing in the XtremIO Array, the following parameters from the Nova and Cinder configuration files should be modified as follows: - Thin Provisioning All XtremIO volumes are thin provisioned. The default value of 20 should be maintained for the ``max_over_subscription_ratio`` parameter. The ``use_cow_images`` parameter in the ``nova.conf`` file should be set to ``False`` as follows: .. code-block:: ini use_cow_images = False - Multipathing The ``use_multipath_for_image_xfer`` parameter in the ``cinder.conf`` file should be set to ``True`` for each backend or in ``[backend_defaults]`` section as a common configuration for all backends. .. code-block:: ini use_multipath_for_image_xfer = True Image service optimization ~~~~~~~~~~~~~~~~~~~~~~~~~~ Limit the number of copies (XtremIO snapshots) taken from each image cache. .. code-block:: ini xtremio_volumes_per_glance_cache = 100 The default value is ``100``. A value of ``0`` ignores the limit and defers to the array maximum as the effective limit. SSL certification ~~~~~~~~~~~~~~~~~ To enable SSL certificate validation, modify the following option in the ``cinder.conf`` file: .. code-block:: ini driver_ssl_cert_verify = true By default, SSL certificate validation is disabled. To specify a non-default path to ``CA_Bundle`` file or directory with certificates of trusted CAs: .. code-block:: ini driver_ssl_cert_path = Certificate path Configuring CHAP ~~~~~~~~~~~~~~~~ The XtremIO Block Storage driver supports CHAP initiator authentication and discovery. If CHAP initiator authentication is required, set the CHAP Authentication mode to initiator. To set the CHAP initiator mode using CLI, run the following XMCLI command: .. code-block:: console $ modify-chap chap-authentication-mode=initiator If CHAP initiator discovery is required, set the CHAP discovery mode to initiator. To set the CHAP initiator discovery mode using CLI, run the following XMCLI command: .. code-block:: console $ modify-chap chap-discovery-mode=initiator The CHAP initiator modes can also be set via the XMS GUI. Refer to XtremIO User Guide for details on CHAP configuration via GUI and CLI. The CHAP initiator authentication and discovery credentials (username and password) are generated automatically by the Block Storage driver. Therefore, there is no need to configure the initial CHAP credentials manually in XMS. Configuring ports filtering ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The XtremIO Block Storage driver supports ports filtering to define a list of iSCSI IP-addresses or FC WWNs which will be used to attach volumes. If option is not set all ports are allowed. .. code-block:: ini xtremio_ports = iSCSI IPs or FC WWNs .. _emc_extremio_configuration_example: Configuration example ~~~~~~~~~~~~~~~~~~~~~ You can update the ``cinder.conf`` file by editing the necessary parameters as follows: .. code-block:: ini [Default] enabled_backends = XtremIO [XtremIO] volume_driver = cinder.volume.drivers.dell_emc.xtremio.XtremIOFibreChannelDriver san_ip = XMS_IP xtremio_cluster_name = Cluster01 xtremio_ports = 21:00:00:24:ff:57:b2:36,21:00:00:24:ff:57:b2:55 san_login = XMS_USER san_password = XMS_PASSWD volume_backend_name = XtremIOAFA ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/dell-storagecenter-driver.rst0000664000175000017500000003704700000000000032036 0ustar00zuulzuul00000000000000================================================== Dell SC Series Fibre Channel and iSCSI drivers ================================================== The Dell Storage Center volume driver interacts with configured Storage Center arrays. The Dell Storage Center driver manages a Storage Center array via the Dell Storage Manager (DSM) Data Collector or by directly connecting to the Storage Center at the cost of replication and Live Volume functionality. Also note that the directly connecting to the Storage Center is only supported with Storage Center OS 7.1.1 or later. Any version of Storage Center OS supported by DSM is supported if connecting via the Data Collector. Driver configuration settings and Storage Center options are defined in the ``cinder.conf`` file. Prerequisites: - Storage Center OS version 7.1.1 or later and OpenStack Ocata or later must be used if connecting directly to the Storage Center. - Dell Storage Manager 2015 R1 or later if connecting through DSM. Supported operations ~~~~~~~~~~~~~~~~~~~~ The Dell Storage Center volume driver provides the following Cinder volume operations: - Create, delete, attach (map), and detach (unmap) volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Create, delete, list and update a consistency group. - Create, delete, and list consistency group snapshots. - Manage an existing volume. - Replication (Requires DSM.) - Failover-host for replicated back ends. (Requires DSM.) - Create a replication using Live Volume. (Requires DSM.) Extra spec options ~~~~~~~~~~~~~~~~~~ Volume type extra specs can be used to enable a variety of Dell Storage Center options. Selecting Storage Profiles, Replay Profiles, enabling replication, replication options including Live Volume and Active Replay replication. (Replication options are available when connected via DSM.) Storage Profiles control how Storage Center manages volume data. For a given volume, the selected Storage Profile dictates which disk tier accepts initial writes, as well as how data progression moves data between tiers to balance performance and cost. Predefined Storage Profiles are the most effective way to manage data in Storage Center. By default, if no Storage Profile is specified in the volume extra specs, the default Storage Profile for the user account configured for the Block Storage driver is used. The extra spec key ``storagetype:storageprofile`` with the value of the name of the Storage Profile on the Storage Center can be set to allow to use Storage Profiles other than the default. For ease of use from the command line, spaces in Storage Profile names are ignored. As an example, here is how to define two volume types using the ``High Priority`` and ``Low Priority`` Storage Profiles: .. code-block:: console $ openstack volume type create "GoldVolumeType" $ openstack volume type set --property storagetype:storageprofile=highpriority "GoldVolumeType" $ openstack volume type create "BronzeVolumeType" $ openstack volume type set --property storagetype:storageprofile=lowpriority "BronzeVolumeType" Replay Profiles control how often the Storage Center takes a replay of a given volume and how long those replays are kept. The default profile is the ``daily`` profile that sets the replay to occur once a day and to persist for one week. The extra spec key ``storagetype:replayprofiles`` with the value of the name of the Replay Profile or profiles on the Storage Center can be set to allow to use Replay Profiles other than the default ``daily`` profile. As an example, here is how to define a volume type using the ``hourly`` Replay Profile and another specifying both ``hourly`` and the default ``daily`` profile: .. code-block:: console $ openstack volume type create "HourlyType" $ openstack volume type set --property storagetype:replayprofile=hourly "HourlyType" $ openstack volume type create "HourlyAndDailyType" $ openstack volume type set --property storagetype:replayprofiles=hourly,daily "HourlyAndDailyType" Note the comma separated string for the ``HourlyAndDailyType``. Replication for a given volume type is enabled via the extra spec ``replication_enabled``. To create a volume type that specifies only replication enabled back ends: .. code-block:: console $ openstack volume type create "ReplicationType" $ openstack volume type set --property replication_enabled=' True' "ReplicationType" Extra specs can be used to configure replication. In addition to the Replay Profiles above, ``replication:activereplay`` can be set to enable replication of the volume's active replay. And the replication type can be changed to synchronous via the ``replication_type`` extra spec can be set. To create a volume type that enables replication of the active replay: .. code-block:: console $ openstack volume type create "ReplicationType" $ openstack volume type key --property replication_enabled=' True' "ReplicationType" $ openstack volume type key --property replication:activereplay=' True' "ReplicationType" To create a volume type that enables synchronous replication : .. code-block:: console $ openstack volume type create "ReplicationType" $ openstack volume type key --property replication_enabled=' True' "ReplicationType" $ openstack volume type key --property replication_type=' sync' "ReplicationType" To create a volume type that enables replication using Live Volume: .. code-block:: console $ openstack volume type create "ReplicationType" $ openstack volume type key --property replication_enabled=' True' "ReplicationType" $ openstack volume type key --property replication:livevolume=' True' "ReplicationType" If QOS options are enabled on the Storage Center they can be enabled via extra specs. The name of the Volume QOS can be specified via the ``storagetype:volumeqos`` extra spec. Likewise the name of the Group QOS to use can be specified via the ``storagetype:groupqos`` extra spec. Volumes created with these extra specs set will be added to the specified QOS groups. To create a volume type that sets both Volume and Group QOS: .. code-block:: console $ openstack volume type create "StorageCenterQOS" $ openstack volume type key --property 'storagetype:volumeqos'='unlimited' "StorageCenterQOS" $ openstack volume type key --property 'storagetype:groupqos'='limited' "StorageCenterQOS" Data reduction profiles can be specified in the ``storagetype:datareductionprofile`` extra spec. Available options are None, Compression, and Deduplication. Note that not all options are available on every Storage Center. To create volume types that support no compression, compression, and deduplication and compression respectively: .. code-block:: console $ openstack volume type create "NoCompressionType" $ openstack volume type key --property 'storagetype:datareductionprofile'='None' "NoCompressionType" $ openstack volume type create "CompressedType" $ openstack volume type key --property 'storagetype:datareductionprofile'='Compression' "CompressedType" $ openstack volume type create "DedupType" $ openstack volume type key --property 'storagetype:datareductionprofile'='Deduplication' "DedupType" Note: The default is no compression. iSCSI configuration ~~~~~~~~~~~~~~~~~~~ Use the following instructions to update the configuration file for iSCSI: .. code-block:: ini default_volume_type = delliscsi enabled_backends = delliscsi [delliscsi] # Name to give this storage back-end volume_backend_name = delliscsi # The iSCSI driver to load volume_driver = cinder.volume.drivers.dell_emc.sc.storagecenter_iscsi.SCISCSIDriver # IP address of the DSM or the Storage Center if attaching directly. san_ip = 172.23.8.101 # DSM user name san_login = Admin # DSM password san_password = secret # The Storage Center serial number to use dell_sc_ssn = 64702 # ==Optional settings== # The DSM API port dell_sc_api_port = 3033 # Server folder to place new server definitions dell_sc_server_folder = devstacksrv # Volume folder to place created volumes dell_sc_volume_folder = devstackvol/Cinder Fibre Channel configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use the following instructions to update the configuration file for fibre channel: .. code-block:: ini default_volume_type = dellfc enabled_backends = dellfc [dellfc] # Name to give this storage back-end volume_backend_name = dellfc # The FC driver to load volume_driver = cinder.volume.drivers.dell_emc.sc.storagecenter_fc.SCFCDriver # IP address of the DSM or the Storage Center if attaching directly. san_ip = 172.23.8.101 # DSM user name san_login = Admin # DSM password san_password = secret # The Storage Center serial number to use dell_sc_ssn = 64702 # ==Optional settings== # The DSM API port dell_sc_api_port = 3033 # Server folder to place new server definitions dell_sc_server_folder = devstacksrv # Volume folder to place created volumes dell_sc_volume_folder = devstackvol/Cinder Dual DSM ~~~~~~~~ It is possible to specify a secondary DSM to use in case the primary DSM fails. Configuration is done through the cinder.conf. Both DSMs have to be configured to manage the same set of Storage Centers for this backend. That means the dell_sc_ssn and any Storage Centers used for replication or Live Volume. Add network and credential information to the backend to enable Dual DSM. .. code-block:: ini [dell] # The IP address and port of the secondary DSM. secondary_san_ip = 192.168.0.102 secondary_sc_api_port = 3033 # Specify credentials for the secondary DSM. secondary_san_login = Admin secondary_san_password = secret The driver will use the primary until a failure. At that point it will attempt to use the secondary. It will continue to use the secondary until the volume service is restarted or the secondary fails at which point it will attempt to use the primary. Note: Requires two DSM Data Collectors. Replication configuration ~~~~~~~~~~~~~~~~~~~~~~~~~ Add the following to the back-end specification to specify another Storage Center to replicate to. .. code-block:: ini [dell] replication_device = target_device_id: 65495, qosnode: cinderqos The ``target_device_id`` is the SSN of the remote Storage Center and the ``qosnode`` is the QoS Node setup between the two Storage Centers. Note that more than one ``replication_device`` line can be added. This will slow things down, however. A volume is only replicated if the volume is of a volume-type that has the extra spec ``replication_enabled`` set to `` True``. Warning: replication_device requires DSM. If this is on a backend that is directly connected to the Storage Center the driver will not load as it is unable to meet the replication requirement. Replication notes ~~~~~~~~~~~~~~~~~ This driver supports both standard replication and Live Volume (if supported and licensed). The main difference is that a VM attached to a Live Volume is mapped to both Storage Centers. In the case of a failure of the primary Live Volume still requires a failover-host to move control of the volume to the second controller. Existing mappings should work and not require the instance to be remapped but it might need to be rebooted. Live Volume is more resource intensive than replication. One should be sure to plan accordingly. Failback ~~~~~~~~ The failover-host command is designed for the case where the primary system is not coming back. If it has been executed and the primary has been restored it is possible to attempt a failback. Simply specify default as the backend_id. .. code-block:: console $ cinder failover-host cinder@delliscsi --backend_id default Non trivial heavy lifting is done by this command. It attempts to recover as best it can but if things have diverged too far it can only do so much. It is also a one time only command so do not reboot or restart the service in the middle of it. Failover and failback are significant operations under OpenStack Cinder. Be sure to consult with support before attempting. Server type configuration ~~~~~~~~~~~~~~~~~~~~~~~~~ This option allows one to set a default Server OS type to use when creating a server definition on the Dell Storage Center. When attaching a volume to a node the Dell Storage Center driver creates a server definition on the storage array. This definition includes a Server OS type. The type used by the Dell Storage Center cinder driver is "Red Hat Linux 6.x". This is a modern operating system definition that supports all the features of an OpenStack node. Add the following to the back-end specification to specify the Server OS to use when creating a server definition. The server type used must come from the drop down list in the DSM. .. code-block:: ini [dell] dell_server_os = 'Red Hat Linux 7.x' Note that this server definition is created once. Changing this setting after the fact will not change an existing definition. The selected Server OS does not have to match the actual OS used on the node. Excluding a domain ~~~~~~~~~~~~~~~~~~ This option excludes a list of Storage Center ISCSI fault domains from the ISCSI properties returned by the initialize_connection call. This only applies to the ISCSI driver. Add the excluded_domain_ips option into the backend config for several fault domains to be excluded. This option takes a comma separated list of Target IP addresses listed under the fault domain. Older versions of DSM (EM) may list this as the Well Known IP Address. Note that the ``included_domain_ips`` takes precedance over ``excluded_domain_ips``. When ``included_domain_ips`` is not an empty list, the option ``excluded_domain_ips`` is ignored. Add the following to the back-end specification to exclude the domains at 172.20.25.15 and 172.20.26.15. .. code-block:: ini [dell] excluded_domain_ips=172.20.25.15, 172.20.26.15, 0:0:0:0:0:ffff:c0a8:15 Including domains ~~~~~~~~~~~~~~~~~~ This option includes or will whitelist a list of Storage Center ISCSI fault domains from the ISCSI properties returned by the initialize_connection call. This only applies to the ISCSI driver. Add the ``included_domain_ips`` option into the backend config for several default domains to be included or whitelisted. This option takes a comma separated list of Target IP addresses listed under the fault domain. Older versions of DSM (EM) may list this as the Well Known IP Address. Note that the ``included_domain_ips`` takes precedance over ``excluded_domain_ips``. When ``included_domain_ips`` is not an empty list, the option ``excluded_domain_ips`` is ignored. Add the following to the back-end specification to include or whitelist the domains at 172.20.25.15 and 172.20.26.15. .. code-block:: ini [dell] included_domain_ips=172.20.25.15, 172.20.26.15, 0:0:0:0:0:ffff:c0a8:15 Setting Dell SC REST API timeouts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The user can specify timeouts for Dell SC REST API calls. To set the timeout for ASYNC REST API calls in seconds. .. code-block:: ini [dell] dell_api_async_rest_timeout=15 To set the timeout for SYNC REST API calls in seconds. .. code-block:: ini [dell] dell_api_sync_rest_timeout=30 Generally these should not be set without guidance from Dell support. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options specific to the Dell Storage Center volume driver. .. config-table:: :config-target: SC Series cinder.volume.drivers.dell_emc.sc.storagecenter_common ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/fujitsu-eternus-dx-driver.rst0000664000175000017500000004317200000000000032034 0ustar00zuulzuul00000000000000========================= Fujitsu ETERNUS DX driver ========================= Fujitsu ETERNUS DX driver provides FC and iSCSI support for ETERNUS DX series. The driver performs volume operations by communicating with ETERNUS DX. It uses a CIM client in Python called PyWBEM to perform CIM operations over HTTP. You can specify RAID Group and Thin Provisioning Pool (TPP) in ETERNUS DX as a storage pool. System requirements ~~~~~~~~~~~~~~~~~~~ Supported storages: * ETERNUS AF150 S3 * ETERNUS AF250 S3/AF250 S2/AF250 * ETERNUS AF650 S3/AF650 S2/AF650 * ETERNUS DX200F * ETERNUS DX60 S5/S4/S3 * ETERNUS DX100 S5/S4/S3 * ETERNUS DX200 S5/S4/S3 * ETERNUS DX500 S5/S4/S3 * ETERNUS DX600 S5/S4/S3 * ETERNUS DX8700 S3/DX8900 S4/S3 Requirements: * Firmware version V10L30 or later is required. * The multipath environment with ETERNUS Multipath Driver is unsupported. * An Advanced Copy Feature license is required to create snapshots, create volume from snapshots, or clone volumes. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Copy an image to a volume. * Copy a volume to an image. * Clone a volume. * Extend a volume. * Get volume statistics. * Migrate Volume. * Revert a volume to snapshot. Preparation ~~~~~~~~~~~ Package installation -------------------- Install the ``python-pywbem`` package for your distribution. ETERNUS DX setup ---------------- Perform the following steps using ETERNUS Web GUI or ETERNUS CLI. .. note:: * These following operations require an account that has the ``Admin`` role. * For detailed operations, refer to ETERNUS Web GUI User's Guide or ETERNUS CLI User's Guide for ETERNUS DX series. #. Create an account with software role for communication with cinder controller. #. Enable the SMI-S of ETERNUS DX. #. Register an Advanced Copy Feature license and configure copy table size. #. Create a storage pool for volumes. #. (Optional) If you want to create snapshots on a different storage pool for volumes, create a storage pool for snapshots. #. Create Snap Data Pool Volume (SDPV) to enable Snap Data Pool (SDP) for ``create a snapshot``. #. Configure storage ports to be used by the Block Storage service. * Set those storage ports to CA mode. * Enable the host-affinity settings of those storage ports. (ETERNUS CLI command for enabling host-affinity settings): .. code-block:: console CLI> set fc-parameters -host-affinity enable -port CLI> set iscsi-parameters -host-affinity enable -port .. note:: * Replace and with the name of the controller enclosure where the port is located. * Replace with the port number. #. Ensure LAN connection between cinder controller and MNT port of ETERNUS DX and SAN connection between Compute nodes and CA ports of ETERNUS DX. #. (Optional) If you want to use a public key to SSH to the ETERNUS DX storage, generate the SSH key, and upload the ``eternus.ietf`` file to the ETERNUS storage. For information about how to set the public key, refer to the ETERNUS Web GUI manuals. .. code-block:: console $ ssh-keygen -t rsa -N "" -f ./eternus -m PEM $ ssh-keygen -e -f ./eternus.pub > ./eternus.ietf If the public key(eternus.ietf) that was created is deleted by mistake, use the following command to recreate the key. .. code-block:: console $ ssh-keygen -e -f /root/.ssh/eternus.pub > ./eternus.ietf Configuration ~~~~~~~~~~~~~ #. Add the following entries to ``/etc/cinder/cinder.conf``: FC entries: .. code-block:: ini volume_driver = cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_fc.FJDXFCDriver cinder_eternus_config_file = /etc/cinder/eternus_dx.xml iSCSI entries: .. code-block:: ini volume_driver = cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_iscsi.FJDXISCSIDriver cinder_eternus_config_file = /etc/cinder/eternus_dx.xml If there is no description about ``cinder_eternus_config_file``, then the parameter is set to default value ``/etc/cinder/cinder_fujitsu_eternus_dx.xml``. #. Create a driver configuration file. Create a driver configuration file in the file path specified as ``cinder_eternus_config_file`` in ``cinder.conf``, and add parameters to the file as below: FC configuration: .. code-block:: xml 0.0.0.0 5988 smisuser smispassword raid5_0001 tpp_0001 raid_0002 raid5_0001 iSCSI configuration: .. code-block:: xml 0.0.0.0 5988 smisuser smispassword raid5_0001 tpp_0001 raid_0002 raid5_0001 1.1.1.1 1.1.1.2 1.1.1.3 1.1.1.4 Where: ``EternusIP`` IP address of the SMI-S connection of the ETRENUS device. Use the IP address of the MNT port of device. ``EternusPort`` Port number for the SMI-S connection port of the ETERNUS device. ``EternusUser`` User name of ``sofware`` role for the connection ``EternusIP``. ``EternusPassword`` Corresponding password of ``EternusUser`` on ``EternusIP``. ``EternusPool`` (Multiple setting allowed) Name of the storage pool for the volumes from ``ETERNUS DX setup``. Use the pool RAID Group pool name or TPP pool name in the ETERNUS device. ``EternusSnapPool`` (Multiple setting allowed) Name of the storage pool for the snapshots from ``ETERNUS DX setup``. Use the pool RAID Group pool name or TPP pool name in the ETERNUS device. If you did not create a different pool for snapshots, use the same value as ``EternusPool``. ``EternusISCSIIP`` (Multiple setting allowed) iSCSI connection IP address of the ETERNUS DX. .. note:: * You can specify the same RAID Group pool name or TPP pool name for ``EternusPool`` and ``EternusSnapPool`` if you create volumes and snapshots on a same storage pool. * For ``EternusPool``, when multiple pools are specified, cinder-scheduler will select one from multiple pools to create the volume. Configuration example ~~~~~~~~~~~~~~~~~~~~~ #. Edit ``cinder.conf``: .. code-block:: ini [DEFAULT] enabled_backends = DXFC, DXISCSI [DXFC] volume_driver = cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_fc.FJDXFCDriver cinder_eternus_config_file = /etc/cinder/fc.xml volume_backend_name = FC fujitsu_passwordless = False [DXISCSI] volume_driver = cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_iscsi.FJDXISCSIDriver cinder_eternus_config_file = /etc/cinder/iscsi.xml volume_backend_name = ISCSI fujitsu_passwordless = True fujitsu_private_key_path = /etc/cinder/eternus #. Create the driver configuration files ``fc.xml`` and ``iscsi.xml``. #. Create a volume type and set extra specs to the type: .. code-block:: console $ cinder type-create DX_FC $ cinder type-key DX_FX set volume_backend_name=FC $ cinder type-create DX_ISCSI $ cinder type-key DX_ISCSI set volume_backend_name=ISCSI By issuing these commands, the volume type ``DX_FC`` is associated with the ``FC``, and the type ``DX_ISCSI`` is associated with the ``ISCSI``. Supported Functions of the ETERNUS OpenStack VolumeDriver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Migrate Volume -------------- Moves volumes to a different storage pool. #. ETERNUS AF/DX functions * Creates migration destination volumes / deletes migration source volumes. * Sets access paths to migration volumes / deletes migration access paths to migration source volumes. * Uses Create Volume, Delete Volume, Attach Volume and Detach Volume. #. Cinder operation * Copies data in the migration source volume to the migration destination volume. .. note:: Host information must be specified in Migrated Volume. The input format is as follows: ``Host-Name@Backend-Name#Pool-Name`` For the following environment or settings, specify ``test.localhost@Backend1#PoolA`` for the host. * PoolA is a pool specified in ``/etc/cinder/cinder_fujitsu_eternus_dx.xml``. .. code-block:: console $ hostname test.localhost $ cat /etc/cinder/cinder.conf (snip) [Backend1] volume_driver=cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_fc.FJDXFCDriver cinder_eternus_config_file = /etc/cinder/cinder_fujitsu_eternus_dx.xml volume_backend_name=volume_backend_name1 .. warning:: There are some restrictions for volume migration: #. You cannot migrate a volume that has snapshots. #. You cannot use driver-assisted migration to move a volume to or from a backend that does not use the ETERNUS OpenStack volume driver. Supplementary Information for the Supported Functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ QoS Settings ------------ The QoS settings that are linked with the volume QoS function of the ETERNUS AF/DX are available. An upper limit value of the bandwidth(BWS) can be set for each volume. A lower limit value can not be set. The upper limit is set if the firmware version of the ETERNUS AF/DX is earlier than V11L30, and the IOPS/Throughput of Total/Read/Write for the volume is set separately for V11L30 and later. The following procedure shows how to set the QoS. #. Create a QoS definition. * The firmware version of the ETERNUS AF/DX is earlier than V11L30 .. code-block:: ini $ cinder qos-create maxBWS=xx For , specify the name of the definition that is to be created. For maxBWS, specify a value in MB. * The firmware version of the ETERNUS AF/DX is V11L30 or later .. code-block:: console $ cinder qos-create read_iops_sec=15000 write_iops_sec=12600 total_iops_sec=15000 read_bytes_sec=800 write_bytes_sec=700 total_bytes_sec=800 #. When not using the existing volume type, create a new volume type. .. code-block:: console $ cinder type-create For , specify the name of the volume type that is to be created. #. Associate the QoS definition with the volume type. .. code-block:: console $ cinder qos-associate For , specify the ID of the QoS definition that was created. For , specify the ID of the volume type that was created. **Cautions** #. For the procedure to cancel the QoS settings, refer to "OpenStack Command-Line Interface Reference". #. The QoS mode of the ETERNUS AF/DX must be enabled in advance. For details, refer to the ETERNUS Web GUI manuals. #. When the firmware version of the ETERNUS AF/DX is earlier than V11L30, for the volume QoS settings of the ETERNUS AF/DX, upper limits are set using the predefined options. Therefore, set the upper limit of the ETERNUS AF/DX side to a maximum value that does not exceed the specified maxBWS. The following table shows the upper limits that can be set on the ETERNUS AF/DX side and example settings. For details about the volume QoS settings of the ETERNUS AF/DX, refer to the ETERNUS Web GUI manuals. +--------------------------------+ | Settings for the ETERNUS AF/DX | +================================+ | Unlimited | +--------------------------------+ | 15000 IOPS (800MB/s) | +--------------------------------+ | 12600 IOPS (700MB/s) | +--------------------------------+ | 10020 IOPS (600MB/s) | +--------------------------------+ | 7500 IOPS (500MB/s) | +--------------------------------+ | 5040 IOPS (400MB/s) | +--------------------------------+ | 3000 IOPS (300MB/s) | +--------------------------------+ | 1020 IOPS (200MB/s) | +--------------------------------+ | 780 IOPS (100MB/s) | +--------------------------------+ | 600 IOPS (70MB/s) | +--------------------------------+ | 420 IOPS (40MB/s) | +--------------------------------+ | 300 IOPS (25MB/s) | +--------------------------------+ | 240 IOPS (20MB/s) | +--------------------------------+ | 180 IOPS (15MB/s) | +--------------------------------+ | 120 IOPS (10MB/s) | +--------------------------------+ | 60 IOPS (5MB/s) | +--------------------------------+ * When specified maxBWS=750 "12600 IOPS (700MB/s)" is set on the ETERNUS AF/DX side. * When specified maxBWS=900 "15000 IOPS (800MB/s)" is set on the ETERNUS AF/DX side. #. While a QoS definition is being created, if an option other than maxBWS/read_iops_sec/write_iops_sec/total_iops_sec/read_bytes_sec /write_bytes_sec/total_bytes_sec is specified, a warning log is output and the QoS information setting is continued. #. For an ETERNUS AF/DX wth a firmware version of before V11L30, if a QoS definition volume type that is set with read_iops_sec/ write_iops_sec/total_iops_sec/read_bytes_sec/write_bytes_sec/total_bytes_sec is specified for Create Volume, a warning log is output and the process is terminated. #. For an ETERNUS AF/DX with a firmware version of V11L30 or later, if a QoS definition volume type that is set with maxBWS is specified for Create Volume, a warning log is output and the process is terminated. #. After the firmware of the ETERNUS AF/DX is upgraded from V11L10/V11L2x to a newer version, the volume types related to the QoS definition created before the firmware upgrade can no longer be used. Set a QoS definition and create a new volume type. #. When the firmware of the ETERNUS AF/DX is downgraded to V11L10/V11L2x, do not use a volume type linked to a pre-firmware downgrade QoS definition, because the QoS definition may work differently from ones post-firmware downgrade. For the volume, create and link a volume type not associated with any QoS definition and after the downgrade, create and link a volume type associated with a QoS definition. #. If Create Volume terminates with an error, Cinder may not invoke Delete Volume. If volumes are created but the QoS settings fail, the ETERNUS OpenStack VolumeDriver ends the process to prevent the created volumes from being left in the ETERNUS AF/DX. If volumes fail to be created, the process terminates with an error. Specification of the Snapshot Creation Destination Pool ------------------------------------------------------- A RAID Group or a Thin Provisioning Pool (TPP) can be specified as the snapshot creation destination pool. In an ETERNUS AF/DX with a firmware version earlier than or equal to V10L60, Thin Provisioning Pools(TPPs) cannot be used as the snapshot creation destination pool. Multiple snapshot creation destination pools can be specified. A pool where snapshots can be created is searched in the order written in the driver configuration file and if one is found, snapshots are created in that pool. **Cautions** #. If the creation destination pool is a RAID Group, more than 128 snapshots cannot be created. Therefore, to create more than 128 snapshots in a RAID Group, multiple RAID Groups must be specified as snapshot creation destination pools. #. When creating a snapshot, Cinder Scheduler checks the capacity of the pool where the source volume is located. This may lead to the failure of snapshot creation fail to be created if this pool has insufficient capacity, even if the snapshot pool specified by ``EternusSnapPool`` has sufficient capacity. #. If multiple snapshot creation destination pools are specified, a different pool must be specified for the volume creation destination pool (``EternusPool`` and ``EternusSnapPool`` can be specified multiple times but the same pool name cannot be specified). If the same pool name is specified and instructions to create multiple volumes and multiple snapshots are issued at the same time, the number of logical volumes in a RAID Group will reach 128 and the operation may fail. #. To address the issue that a volume with snapshot cannot be extended, a parameter ``fujitsu_use_cli_copy`` has been introduced. The default value of ``fujitsu_use_cli_copy`` is ``False``. If ``fujitsu_use_cli_copy`` is set to ``True``, create a Snapshot using the CLI method instead of SMI-S method, allowing volume extension of the source volume. .. code-block:: console $ cat /etc/cinder/cinder.conf (snip) [Backend1] volume_driver=cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_fc.FJDXFCDriver cinder_eternus_config_file = /etc/cinder/cinder_fujitsu_eternus_dx.xml volume_backend_name = volume_backend_name1 fujitsu_use_cli_copy = True Note that ``fujitsu_use_cli_copy`` cannot be set to True when the type of target pool is RAID Group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/fungible-storage-driver.rst0000664000175000017500000000414400000000000031500 0ustar00zuulzuul00000000000000============================== Fungible Storage Driver ============================== Fungible Storage volume driver provides OpenStack Compute instances with access to Fungible Storage Cluster. This documentation explains how to configure Cinder for use with the Fungible Storage Cluster. Driver requirements ~~~~~~~~~~~~~~~~~~~ - Fungible Storage Cluster - FSC version >= 4.0 - nvme cli version >= v1.13 - The Block Storage Node should also have a data path to the Fungible Storage Cluster for the following operations: - Copy volume to image - Copy image to volume Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options supported by the Fungible Storage driver. .. config-table:: :config-target: Fungible Storage Cluster cinder.volume.drivers.fungible.driver Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, list, delete, attach and detach volumes - Create, list and delete volume snapshots - Copy image to volume - Copy volume to image - Create volume from snapshot - Clone volume - Extend volume Configure Fungible Storage Cluster backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section details the steps required to configure the Fungible Storage cinder driver. #. In the ``cinder.conf`` configuration file under the ``[DEFAULT]`` section, set the enabled_backends parameter. .. code-block:: ini [DEFAULT] enabled_backends = fungible #. Add a backend group section for the backend group specified in the enabled_backends parameter. #. In the newly created backend group section, set the following configuration options: .. code-block:: ini [fungible] # Backend name volume_backend_name=fungible # The driver path volume_driver=cinder.volume.drivers.fungible.driver.FungibleDriver # Fungible composer details san_ip = san_login = san_password = # List below are optional nvme_connect_port = api_enable_ssl = True/False iops_for_image_migration = ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/hedvig-volume-driver.rst0000664000175000017500000001037700000000000031023 0ustar00zuulzuul00000000000000==================== Hedvig Volume Driver ==================== Hedvig provides software-defined storage for enterprises building private, hybrid, or multi-cloud environments. Hedvig's patented Universal Data Plane technology forms a distributed, scale-out cluster that transforms commodity servers or cloud computing into a unified data fabric. The Hedvig Cinder Driver interacts with a configured backend Hedvig Cluster using REST APIs. Using the Hedvig Volume Driver ------------------------------ With the Hedvig Volume Driver for OpenStack, you can : - Integrate public and private clouds: Build a unified hybrid environment to easily migrate to or from your data center and public clouds. - Set granular virtual disk policies: Assign enterprise-class features on a per volume basis to best fit your application requirements. - Connect to any compute environment: Use with any hypervisor, application, or bare-metal system. - Grow seamlessly with an elastic cluster: Scale storage performance and capacity on-the-fly with off-the-shelf x86 servers. - Deliver predictable performance: Receive consistent high-IOPS performance for demanding applications through massive parallelism, dedicated flash, and edge cache configurations. Requirement ----------- Hedvig Volume Driver, version 1.0.0 and later, supports Hedvig release 3.0 and later. Supported operations -------------------- Hedvig supports the core features of OpenStack Cinder: - Create and delete volumes - Attach and detach volumes - Create and delete snapshots - Create volume from snapshot - Get volume stats - Copy image to volume - Copy volume to image - Clone volume - Extend volume - Enable deduplication, encryption, cache, compression, custom replication policy on a volume level using volume-type extra-specs Hedvig Volume Driver configuration ----------------------------------- The Hedvig Volume Driver can be configured by editing the cinder.conf file located in the /etc/cinder/ directory. .. code-block:: ini [DEFAULT] enabled_backends=hedvig [HEDVIG_BACKEND_NAME] volume_driver=cinder.volume.drivers.hedvig.hedvig_cinder.HedvigISCSIDriver san_ip= san_login=HEDVIG_USER san_password=HEDVIG_PASSWORD san_clustername=HEDVIG_CLUSTER Run the following commands on the OpenStack Cinder Node to create a Volume Type for Hedvig: .. code-block:: console cinder type-create HEDVIG_VOLUME_TYPE cinder type-key HEDVIG_VOLUME_TYPE set volume_backend_name=HEDVIG_BACKEND_NAME This section contains definitions of the terms used above. HEDVIG_IP/HOSTNAME The IP address or hostnames of the Hedvig Storage Cluster Nodes HEDVIG_USER Username to login to the Hedvig Cluster with minimum ``super user`` (admin) privilege HEDVIG_PASSWORD Password to login to the Hedvig Cluster HEDVIG_CLUSTER Name of the Hedvig Cluster .. note:: Restart the ``cinder-volume`` service after updating the ``cinder.conf`` file to apply the changes and to initialize the Hedvig Volume Driver. Hedvig QoS Spec parameters and values ------------------------------------- - dedup_enable – true/false - compressed_enable – true/false - cache_enable – true/false - replication_factor – 1-6 - replication_policy – Agnostic/RackAware/DataCenterAware - replication_policy_info – comma-separated list of data center names (applies only to a replication_policy of DataCenterAware) - disk_residence – Flash/HDD - encryption – true/false Creating a Hedvig Cinder Volume with custom attributes (QoS Specs) ------------------------------------------------------------------ 1. Create a QoS Spec with the list of attributes that you want to associate with a volume. For example, to create a Cinder Volume with deduplication enabled, create a QoS Spec called dedup_enable with dedup_enable=true #. Create a new volume type and associate this QoS Spec with it, OR associate the QoS Spec with an existing volume type. #. Every Cinder Volume that you create of the above volume type will have deduplication enabled. #. If you do create a new volume type, make sure to add the key volume_backend_name so OpenStack knows that the Hedvig Volume Driver handles all requests for this volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst0000664000175000017500000006254100000000000030467 0ustar00zuulzuul00000000000000============================ Hitachi block storage driver ============================ Hitachi block storage driver provides Fibre Channel and iSCSI support for Hitachi VSP storages. System requirements ~~~~~~~~~~~~~~~~~~~ Supported storages: +-----------------+------------------------+ | Storage model | Firmware version | +=================+========================+ | VSP E590, | 93-03-22 or later | | E790 | | +-----------------+------------------------+ | VSP E990 | 93-01-01 or later | +-----------------+------------------------+ | VSP E1090, | 93-06-2x or later | | E1090H | | +-----------------+------------------------+ | VSP F350, | 88-01-04 or later | | F370, | | | F700, | | | F900 | | | | | | VSP G350, | | | G370, | | | G700, | | | G900 | | +-----------------+------------------------+ | VSP F400, | 83-04-43 or later | | F600, | | | F800 | | | | | | VSP G200, | | | G400, | | | G600, | | | G800 | | +-----------------+------------------------+ | VSP N400, | 83-06-01 or later | | N600, | | | N800 | | +-----------------+------------------------+ | VSP 5100, | 90-01-41 or later | | 5500, | | | 5100H, | | | 5500H | | +-----------------+------------------------+ | VSP 5200, | 90-08-0x or later | | 5600, | | | 5200H, | | | 5600H | | +-----------------+------------------------+ | VSP F1500 | 80-05-43 or later | | | | | VSP G1000, | | | VSP G1500 | | +-----------------+------------------------+ Required storage licenses: * Hitachi Storage Virtualization Operating System (SVOS) - Hitachi LUN Manager - Hitachi Dynamic Provisioning * Hitachi Local Replication (Hitachi Thin Image) Optional storage licenses: * Deduplication and compression * Global-Active Device Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Create, list, update, and delete consistency groups. * Create, list, and delete consistency group snapshots. * Copy a volume to an image. * Copy an image to a volume. * Clone a volume. * Extend a volume. * Migrate a volume (host assisted). * Migrate a volume (storage assisted). * Get volume statistics. * Efficient non-disruptive volume backup. * Manage and unmanage a volume. * Attach a volume to multiple instances at once (multi-attach). * Revert a volume to a snapshot. Hitachi block storage driver also supports the following additional features: * Global-Active Device * Maximum number of copy pairs and consistency groups * Data deduplication and compression * Port scheduler * Port assignment using extra spec * Configuring Quality of Service (QoS) settings .. note:: * A volume having snapshots cannot be extended with this driver. * Storage assisted volume migration is only supported between same storage. Configuration ~~~~~~~~~~~~~ Set up Hitachi storage ---------------------- You need to specify settings as described below for storage systems. For details about each setting, see the user's guide of the storage systems. Common resources: 1. ``All resources`` The name of any storage resource, such as a DP pool or a host group, cannot contain any whitespace characters or else it will be unusable by the driver. 2. ``User accounts`` Create a storage device account belonging to the Administrator User Group. 3. ``DP Pool`` Create a DP pool that is used by the driver. 4. ``Resource group`` If using a new resource group for exclusive use by an OpenStack system, create a new resource group, and assign the necessary resources, such as LDEVs, port, and host group (iSCSI target) to the created resource. 5. ``Ports`` Enable Port Security for the ports used by the driver. If you use iSCSI: 1. ``Ports`` Assign an IP address and a TCP port number to the port. .. note:: * Do not change LDEV nickname for the LDEVs created by Hitachi block storage driver. The nickname is referred when deleting a volume or a snapshot, to avoid data-loss risk. See details in `bug #2072317`_. Set up Hitachi storage volume driver and volume operations ---------------------------------------------------------- Set the volume driver to Hitachi block storage driver by setting the volume_driver option in the cinder.conf file as follows: If you use Fibre Channel: .. code-block:: ini [hitachi_vsp] volume_driver = cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver volume_backend_name = hitachi_vsp san_ip = 1.2.3.4 san_login = hitachiuser san_password = password hitachi_storage_id = 123456789012 hitachi_pools = pool0 If you use iSCSI: .. code-block:: ini [hitachi_vsp] volume_driver = cinder.volume.drivers.hitachi.hbsd_iscsi.HBSDISCSIDriver volume_backend_name = hitachi_vsp san_ip = 1.2.3.4 san_login = hitachiuser san_password = password hitachi_storage_id = 123456789012 hitachi_pools = pool0, pool1 Configuration options ~~~~~~~~~~~~~~~~~~~~~ This table shows configuration options for Hitachi block storage driver. .. config-table:: :config-target: Hitachi block storage driver cinder.volume.drivers.hitachi.hbsd_common cinder.volume.drivers.hitachi.hbsd_rest cinder.volume.drivers.hitachi.hbsd_rest_fc cinder.volume.drivers.hitachi.hbsd_replication Required options ---------------- - ``san_ip`` IP address of SAN controller - ``san_login`` Username for SAN controller - ``san_password`` Password for SAN controller - ``hitachi_storage_id`` Product number of the storage system. - ``hitachi_pools`` Pool number(s) or pool name(s) of the DP pool. Set up and operation for additional features ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Set up Global-Active Device and volume operation ------------------------------------------------ Beginning with the 2023.1, If you use Global-Active Device (GAD), you can make the data of individual volumes redundant between two storage systems, thereby improving the availability of the storage systems. For details, see the `Global-Active Device User Guide`_. .. note:: * You cannot apply Global-Active Device configuration and remote replication configuration to the same backend. * You cannot use Asymmetric Logical Unit Access (ALUA). Storage firmware versions for GAD <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< If you are using a VSP F350, F370, F700, F900 storage system or a VSP G350, G370, G700,G900 storage system in a Global-Active Device configuration, make sure the firmware version is 88-03-21 or later. Creating a Global-Active Device environment <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< Before using Global-Active Device, create the prerequisite environment, such as connecting remote paths, configuring a quorum disk, and creating a virtual storage machine (VSM), by other storage system management tools. Hitachi block storage driver supports the following configurations. * Configuration where the P-VOL is not registered to a VSM * Configuration where the P-VOL is registered to a VSM For details, see the Workflow for creating a GAD environment in the `Global-Active Device User Guide`_ Hitachi block storage driver automatically setups following procedures that are described in the section `Workflow for creating a GAD environment`_ : * The following steps of Setting up the secondary storage system: - Setting the GAD reserve attribute on the S-VOL - Creating a host group (Only if the configuration option ``hitachi_group_create`` is True) - Creating the S-VOL - Adding an LU path to the S-VOL * Updating the CCI configuration definition files * Creating the GAD pair * Adding an alternate path to the S-VOL You must register the information about the secondary storage system to the REST API server in the primary site and register the information about the primary storage system to the REST API server in the secondary site. For details about how to register the information, see the `Hitachi Command Suite Configuration Manager REST API Reference Guide`_ or the `Hitachi Ops Center API Configuration Manager REST API Reference Guide`_. .. note:: * The users specified for both configuration options ``san_login`` and ``hitachi_mirror_rest_user`` must have following roles: * Storage Administrator (View & Modify) * Storage Administrator (Remote Copy) * Reserve unused host group IDs (iSCSI target IDs) for the resource groups related on the VSM. Reserve the IDs in ascending order. The number of IDs you need to reserve is 1 plus the sum of the number of controller nodes and the number of compute nodes. For details on how to reserve a host group ID (iSCSI target ID), see `Global-Active Device User Guide`_. * The LUNs of the host groups (iSCSI targets) of the specified ports on the primary storage system must match the LUNs of the host groups (iSCSI targets) of the specified ports on the secondary storage system. If they do not match, match the LUNs for the primary storage system with those for the secondary storage system. * When you use a same storage system as secondary storage system for Global-Active Device configuration and backend storage system for general use at the same time, you cannot use the same ports between different backend storage systems. Please specify different ports in the configuration options ``hitachi_target_ports``, ``hitachi_compute_target_ports``, or ``hitachi_rest_pair_target_ports`` between different backend storage systems. Create volume in a Global-Active Device configuration <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< If you create a Cinder volume in a Global-Active Device configuration, each Global-Active Device pair is mapped to a Cinder volume. In order for you to create volumes with the Global-Active Device attribute specified, you must first create a volume type that contains the ``hbsd:topology=active_active_mirror_volume`` extra-spec. You can do this as follows: .. code-block:: console $ openstack volume type create $ openstack volume type set --property \ hbsd:topology=active_active_mirror_volume You can then create GAD volumes as follows: .. code-block:: console $ openstack volume create --type --size .. note:: * In this case, the following restrictions apply: * You cannot create a volume for which the deduplication and compression function is enabled, or creating a volume will be failed with the error ``MSGID0753-E: Failed to create a volume in a GAD environment because deduplication is enabled for the volume type.``. * Note the following if the configuration is "P-VOL registered to a VSM": * Do not create volumes whose volume types do not have ``hbsd:topology=active_active_mirror_volume`` extra-spec. * While setting up the environment, set a virtual LDEV ID for every LDEV specified by the configuration option ``hitachi_ldev_range parameter`` on the primary storage system using storage management software because virtual LDEV IDs are necessary for GAD pair creation. Unavailable Cinder functions <<<<<<<<<<<<<<<<<<<<<<<<<<<< Following cinder functions are unavailable in a Global-Active Device configuration: * Migrate a volume (storage assisted) * Manage Volume * Unmanage Volume .. note:: In addition, if the configuration is "P-VOL registered to a VSM", the backup creation command of the Backup Volume functions cannot be run with the ``--snapshot option`` or the ``--force`` option specified. Maximum number of copy pairs and consistency groups --------------------------------------------------- The maximum number of Thin Image pairs that can be created for each LDEV assigned to a volume (or snapshot) is restricted on a per-storage-system basis. If the number of pairs exceeds the maximum, copying cannot proceed normally. For information about the maximum number of copy pairs and consistency groups that can be created, see the `Hitachi Thin Image User Guide`_. Configuring Quality of Service (QoS) settings --------------------------------------------- By configuring Quality of Service (QoS) settings, you can restrict the I/O processing of each volume, thereby maintaining the required performance and quality levels. In Hitachi block storage driver, you can configure the following settings for each volume. However, you cannot configure these settings for journal volumes. * Throughput (IOPS, amount of data transferred in MB/s) You can set the upper and lower limits on throughput. If an upper limit is exceeded, I/O is suppressed. If a lower limit is not met, I/O is adjusted so that the lower limit is met. * Priority level of the I/O processing You can set priority levels for the I/O processing of multiple volumes. I/O is adjusted for faster I/O response, starting with high-priority volumes. **System requirements for a QoS** <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< **Storage firmware versions** +-----------------+------------------------+ | Storage model | Firmware version | +=================+========================+ | VSP F350, | 88-06-01 or later | | F370, | | | F700, | | | F900 | | | | | | VSP G350, | | | G370, | | | G700, | | | G900 | | +-----------------+------------------------+ | VSP 5100, | 90-04-01 or later | | 5500, | | | 5100H, | | | 5500H | | +-----------------+------------------------+ **Storage management software** Configuration Manager REST API version 10.2.0-00 or later is required. **Configuring QoS settings and creating volumes** <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< Create QoS specs that define QoS settings, and then associate the QoS specs with a volume type. You can configure QoS settings for a volume by running the following functions with this volume type specified. * Create Volume * Create Snapshot * Create Volume from Snapshot * Create Volume from Volume (Clone) * Consistency Group * Generic volume group The following example describes the procedure for configuring QoS settings when creating a new volume using the Create Volume function. Before you begin, Check the following information. * QoS settings - Upper or lower limit on throughput (IOPS, amount of data transferred in MB/s) - Priority level of I/O processing * ID and name of the volume type A volume type is needed in order to associate it with the QoS specs. If no volume types exist, create one in advance. **Procedure** 1. Create the QoS specs a. If you use the cinder command: .. code-block:: console $ cinder qos-create [consumer=back-end] \ = \ [= ...] \ b. If you use the openstack command: .. code-block:: console $ openstack volume qos create [--consumer back-end] \ --property \ = \ [--property \ = ...] \ \ Specify a name for ````. Specify ```` and ```` as follows. For details on the range of values you can specify, see the overview of QoS operations in the `Performance Guide`_. +--------------------+------------------------------------------+ | QoS specs property | Description | +====================+==========================================+ | upperIops | The upper limit on IOPS. | +--------------------+------------------------------------------+ | upperTransferRate | The upper limit on the amount of data | | | transferred in MB/s. | +--------------------+------------------------------------------+ | lowerIops | The lower limit on IOPS. | +--------------------+------------------------------------------+ | lowerTransferRate | The lower limit on the amount of data | | | transferred in MB/s. | +--------------------+------------------------------------------+ | responsePriority | The priority level of the I/O processing.| +--------------------+------------------------------------------+ The following is an example of running the command. \ a. If you use the cinder command: .. code-block:: console $ cinder qos-create test_qos consumer=back-end upperIops=2000 \ b. If you use the openstack command: .. code-block:: console $ openstack volume qos create --consumer back-end \ --property upperIops=2000 test_qos \ When you run this command, the ID of the created QoS specs is also output. Record this ID, because you will need it in a later step. \ 2. Associate the QoS specs with a volume type. a. If you use the cinder command: .. code-block:: console $ cinder qos-associate \ b. If you use the openstack command: .. code-block:: console $ openstack volume qos associate \ 3. Specify the volume type that is associated with the QoS specs, and then create a volume. a. If you use the cinder command: .. code-block:: console $ cinder create --volume-type \ b. If you use the openstack command: .. code-block:: console $ openstack volume create --size --type \ **Changing QoS settings** To change the QoS settings, use the Retype function to change the volume type to one that has different QoS specs. You can also change a volume type for which no QoS specs are set to a volume type for which QoS specs are set, and vice versa. **Clearing QoS settings** To clear the QoS settings, clear the association between the volume type and QoS specs, and then delete the QoS specs. Data deduplication and compression ---------------------------------- Use deduplication and compression to improve storage utilization using data reduction. For details, see `Capacity saving function: data deduplication and compression`_ in the `Provisioning Guide`_. **Enabling deduplication and compression** To use the deduplication and compression on the storage models, your storage administrator must first enable the deduplication and compression for the DP pool. For details about how to enable this setting, see the description of pool management in the `Hitachi Command Suite Configuration Manager REST API Reference Guide`_ or the `Hitachi Ops Center API Configuration Manager REST API Reference Guide`_. .. note:: * Do not set a subscription limit (virtualVolumeCapacityRate) for the DP pool. Creating a volume with deduplication and compression enabled <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< To create a volume with the deduplication and compression setting enabled, enable deduplication and compression for the relevant volume type. **Procedure** 1. To enable the deduplication and compression setting, specify the value ``deduplication_compression`` for ``hbsd:capacity_saving`` in the extra specs for the volume type. 2. When creating a volume of the volume type created in the previous step, you can create a volume with the deduplication and compression function enabled. Deleting a volume with deduplication and compression enabled <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< The cinder delete command finishes when the storage system starts the LDEV deletion process. The LDEV cannot be reused until the LDEV deletion process is completed on the storage system. Port scheduler -------------- You can use the port scheduler function to reduce the number of WWNs, which are storage system resource. In Hitachi block storage driver, if host groups are created automatically, host groups are created for each compute node or VM (in an environment that has a WWN for each VM). If you do not use the port scheduler function, host groups are created and the same WWNs are registered in all of the ports that are specified for the configuration option ``hitachi_compute_target_ports`` or for the configuration option ``hitachi_target_ports``. For Hitachi storage devices, a maximum of 255 host groups and 255 WWNs can be registered for one port. When volumes are attached, the upper limit on the number of WWNs that can be registered might be unexpectedly exceeded. For the port scheduler function, when the cinder-volume service starts, the Fibre Channel Zone Manager obtains the WWNs of active compute nodes and of active VMs. When volumes are attached, the WWNs are registered in a round-robin procedure, in the same order as the order of ports specified for the configuiration option ``hitachi_compute_target_ports`` or for the configuiration option ``hitachi_target_ports``. If you want to use the port scheduler function, set the configuration option ``hitachi_port_scheduler``. .. note:: * Only Fibre Channel is supported. For details about ports, see Fibre Channel connectivity. * If a host group already exists in any of the ports specified for the configuration option ``hitachi_compute_target_ports`` or for the configuration option ``hitachi_target_ports``, no new host group will be created on those ports. * Restarting the cinder-volume service re-initializes the round robin scheduling determined by the configuration option ``hitachi_compute_target_ports`` or the configuration option ``hitachi_target_ports``. * The port scheduler function divides up the active WWNs from each fabric controller and registers them to each port. For this reason, the number of WWNs registered may vary from port to port. Port assignment using extra specs --------------------------------- Defining particular ports in the Hitachi-supported extra spec ``hbsd:target_ports`` determines which of the ports specified by the configuration options ``hitachi_target_ports`` or the configuration option ``hitachi_compute_target_ports`` are used to create LUN paths during volume attach operations for each volume type. .. note:: * Use a comma to separate multiple ports. * In a Global-Active Device configuration, use the extra spec ``hbsd:target_ports`` for the primary storage system and the extra spec ``hbsd:remote_target_ports`` for the secondary storage system. * In a Global-Active Device configuration, the ports specified for the extra spec ``hbsd:target_ports`` must be specified for both the configuration options for the primary storage system (``hitachi_target_ports`` or ``hitachi_compute_target_ports``) and for the secondary storage system (``hitachi_mirror_target_ports`` or ``hitachi_mirror_compute_target_ports``). .. Document Hyperlinks .. _Global-Active Device User Guide: https://knowledge.hitachivantara.com/ Documents/Management_Software/SVOS/9.8.7/Global-Active_Device .. _Hitachi Command Suite Configuration Manager REST API Reference Guide: https://knowledge.hitachivantara.com/Documents/Management_Software/ Ops_Center/API_Configuration_Manager/10.5.x/REST_API_Reference_Guide .. _Hitachi Ops Center API Configuration Manager REST API Reference Guide: https://knowledge.hitachivantara.com/Documents/Management_Software/ Ops_Center/10.9.x/API_Configuration_Manager .. _Hitachi Thin Image User Guide: https://knowledge.hitachivantara.com/ Documents/Management_Software/SVOS/7.3.1/Administration_Guides/ Thin_Image_User_Guide .. _Workflow for creating a GAD environment: https://knowledge.hitachivantara.com/Documents/Management_Software/SVOS/ 9.8.7/Global-Active_Device/04_Configuration_and_pair_management_using_CCI .. _Provisioning Guide: https://docs.hitachivantara.com/r/en-us/svos/9.8.7/mk-97hm85026/ introduction-to-provisioning .. _Capacity saving function\: data deduplication and compression: https://docs.hitachivantara.com/r/en-us/svos/9.8.7/mk-97hm85026/ about-adaptive-data-reduction/capacity-saving/ capacity-saving-function-data-deduplication-and-compression .. _bug #2072317: https://bugs.launchpad.net/cinder/+bug/2072317 .. _Performance Guide: https://docs.hitachivantara.com/r/en-us/svos/9.6.0/mk-98rd9019/ hitachi-performance-monitor-operations ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/hp-msa-driver.rst0000664000175000017500000001441300000000000027430 0ustar00zuulzuul00000000000000======================================= HPE MSA Fibre Channel and iSCSI drivers ======================================= The ``HPMSAFCDriver`` and ``HPMSAISCSIDriver`` Cinder drivers allow the HPE MSA 2060, 1060, 2050, 1050, 2040, and 1040 arrays to be used for Block Storage in OpenStack deployments. System requirements ~~~~~~~~~~~~~~~~~~~ To use the HPMSA drivers, the following are required: - HPE MSA 2060, 1060, 2050, 1050, 2040 or 1040 array with: - iSCSI or FC host interfaces - G22x, V270 or I100 firmware or later - Network connectivity between the OpenStack host and the array management interfaces - HTTPS or HTTP must be enabled on the array Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Migrate a volume with back-end assistance. - Retype a volume. - Manage and unmanage a volume. Configuring the array ~~~~~~~~~~~~~~~~~~~~~ #. Verify that the array can be managed using an HTTPS connection. HTTP can also be used if ``hpmsa_api_protocol=http`` is placed into the appropriate sections of the ``cinder.conf`` file, but this option is deprecated and will be removed in a future release. Confirm that virtual pools A and B are present if you plan to use virtual pools for OpenStack storage. If you plan to use vdisks instead of virtual pools, create or identify one or more vdisks to be used for OpenStack storage; typically this will mean creating or setting aside one disk group for each of the A and B controllers. #. Edit the ``cinder.conf`` file to define a storage back-end entry for each storage pool on the array that will be managed by OpenStack. Each entry consists of a unique section name, surrounded by square brackets, followed by options specified in ``key=value`` format. * The ``hpmsa_pool_name`` value specifies the name of the storage pool or vdisk on the array. * The ``volume_backend_name`` option value can be a unique value, if you wish to be able to assign volumes to a specific storage pool on the array, or a name that is shared among multiple storage pools to let the volume scheduler choose where new volumes are allocated. * The rest of the options will be repeated for each storage pool in a given array: * ``volume_driver`` specifies the Cinder driver name. * ``san_ip`` specifies the IP addresses or host names of the array's management controllers. * ``san_login`` and ``san_password`` specify the username and password of an array user account with ``manage`` privileges. * ``driver_use_ssl`` should be set to ``true`` to enable use of the HTTPS protocol. * ``hpmsa_iscsi_ips`` specifies the iSCSI IP addresses for the array if using the iSCSI transport protocol. In the examples below, two back ends are defined, one for pool A and one for pool B, and a common ``volume_backend_name`` is used so that a single volume type definition can be used to allocate volumes from both pools. **Example: iSCSI example back-end entries** .. code-block:: ini [pool-a] hpmsa_pool_name = A volume_backend_name = hpmsa-array volume_driver = cinder.volume.drivers.san.hp.hpmsa_iscsi.HPMSAISCSIDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage hpmsa_iscsi_ips = 10.2.3.4,10.2.3.5 driver_use_ssl = true [pool-b] hpmsa_pool_name = B volume_backend_name = hpmsa-array volume_driver = cinder.volume.drivers.san.hp.hpmsa_iscsi.HPMSAISCSIDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage hpmsa_iscsi_ips = 10.2.3.4,10.2.3.5 driver_use_ssl = true **Example: Fibre Channel example back-end entries** .. code-block:: ini [pool-a] hpmsa_pool_name = A volume_backend_name = hpmsa-array volume_driver = cinder.volume.drivers.san.hp.hpmsa_fc.HPMSAFCDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage driver_use_ssl = true [pool-b] hpmsa_pool_name = B volume_backend_name = hpmsa-array volume_driver = cinder.volume.drivers.san.hp.hpmsa_fc.HPMSAFCDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage driver_use_ssl = true #. If any ``volume_backend_name`` value refers to a vdisk rather than a virtual pool, add an additional statement ``hpmsa_pool_type = linear`` to that back end entry. #. If HTTPS is not enabled in the array, include ``hpmsa_api_protocol = http`` in each of the back-end definitions. #. If HTTPS is enabled, you can enable certificate verification with the option ``driver_ssl_cert_verify = True``. You may also use the ``driver_ssl_cert_path`` option to specify the path to a CA_BUNDLE file containing CAs other than those in the default list. #. Modify the ``[DEFAULT]`` section of the ``cinder.conf`` file to add an ``enabled_backends`` parameter specifying the back-end entries you added, and a ``default_volume_type`` parameter specifying the name of a volume type that you will create in the next step. **Example: [DEFAULT] section changes** .. code-block:: ini [DEFAULT] # ... enabled_backends = pool-a,pool-b default_volume_type = hpmsa #. Create a new volume type for each distinct ``volume_backend_name`` value that you added to the ``cinder.conf`` file. The example below assumes that the same ``volume_backend_name=hpmsa-array`` option was specified in all of the entries, and specifies that the volume type ``hpmsa`` can be used to allocate volumes from any of them. **Example: Creating a volume type** .. code-block:: console $ openstack volume type create hpmsa $ openstack volume type set --property volume_backend_name=hpmsa-array hpmsa #. After modifying the ``cinder.conf`` file, restart the ``cinder-volume`` service. Driver-specific options ~~~~~~~~~~~~~~~~~~~~~~~ The following table contains the configuration options that are specific to the HPMSA drivers. .. config-table:: :config-target: HPE MSA cinder.volume.drivers.san.hp.hpmsa_common ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/hpe-3par-driver.rst0000664000175000017500000004650700000000000027673 0ustar00zuulzuul00000000000000=============================================================== HPE 3PAR, HPE Primera, HPE Alletra 9k and HPE Alletra MP Driver =============================================================== The ``HPE3PARFCDriver`` and ``HPE3PARISCSIDriver`` drivers, which are based on the Block Storage service (Cinder) plug-in architecture, run volume operations by communicating with the HPE 3PAR, HPE Primera and HPE Alletra 9k storage systems over HTTP, HTTPS, and SSH connections. The HTTP & HTTPS communications use ``python-3parclient``, which is part of PyPi. For information on HPE storage systems, refer to `the Alletra Storage product page `_. System requirements ~~~~~~~~~~~~~~~~~~~ To use the HPE 3PAR, HPE Primera, HPE Alletra 9k and HPE Alletra MP drivers, install the following software and components on the HPE 3PAR storage system: * HPE 3PAR Operating System software version 3.1.3 MU1 or higher. * Deduplication provisioning requires SSD disks and HPE 3PAR Operating System software version 3.2.1 MU1 or higher. * Enabling Flash Cache Policy requires the following: * Array must contain SSD disks. * HPE 3PAR Operating System software version 3.2.1 MU2 or higher. * python-3parclient version 4.2.0 or newer. * Flash Cache must be enabled on the array with the CLI command :command:`createflashcache SIZE`, where size must be in 16 GB increments. For example, :command:`createflashcache 128g` will create 128 GB of Flash Cache for each node pair in the array. * The Dynamic Optimization is required to support any feature that results in a volume changing provisioning type or CPG. This may apply to the volume :command:`migrate`, :command:`retype` and :command:`manage` commands. * The Virtual Copy feature supports any operation that involves volume snapshots. This applies to the volume :command:`snapshot-*` commands. * Enabling Volume Compression requires the following: * Array must contain SSD disks. * HPE 3PAR Operating System software version 3.3.1 MU1 or higher. * HPE 3PAR Storage System with 8k or 20k series * HPE 3PAR Web Services API Server must be enabled and running. * One Common Provisioning Group (CPG). * Additionally, you must install the ``python-3parclient`` version 4.2.0 or newer from PyPi on the system with the enabled Block Storage service volume drivers. To use the HPE Primera, Alletra 9k and Alletra MP backends, install the following software and components on the HPE storage system: * Operating System software: * HPE Primera: version 4.4.0 or higher. * HPE Alletra 9k: version 9.4.0 or higher. * HPE Alletra MP: version 10.4.2.23 or higher. * On HPE Primera/Alletra 9k/Alletra MP storage system, Dedup & Compression is combined as single option 'deco'. Due to this, only either 'thin' volume or 'deco' volume can be created. * Also, port number 443 is used instead of 8080. This only affects cinder configuration. * Additionally, you must install the ``python-3parclient`` version 4.2.14 or newer from PyPi on the system with the enabled Block Storage service volume drivers. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Copy an image to a volume. * Copy a volume to an image. * Clone a volume. * Extend a volume. * Migrate a volume with back-end assistance. * Retype a volume. * Manage and unmanage a volume. * Manage and unmanage a snapshot. * Replicate host volumes. * Fail-over host volumes. * Fail-back host volumes. * Retype a replicated volume. * Create, delete, update, snapshot, and clone generic volume groups. * Create and delete generic volume group snapshots. * Create a generic volume group from a group snapshot or another group. * Volume Compression. * Group Replication with More Granularity (Tiramisu). * Volume Revert to Snapshot. * Additional Backend Capabilities. * Report Backend State in Service List. * Attach a volume to multiple servers simultaneously (multiattach). * Peer Persistence. Volume type support for both HPE 3PAR drivers includes the ability to set the following capabilities in the OpenStack Block Storage API ``cinder.api.contrib.types_extra_specs`` volume type extra specs extension module: * ``hpe3par:snap_cpg`` * ``hpe3par:provisioning`` * ``hpe3par:persona`` * ``hpe3par:vvs`` * ``hpe3par:flash_cache`` * ``hpe3par:compression`` To work with the default filter scheduler, the key values are case sensitive and scoped with ``hpe3par:``. For information about how to set the key-value pairs and associate them with a volume type, run the following command: .. code-block:: console $ openstack help volume type .. note:: Volumes that are cloned only support the extra specs keys cpg, snap_cpg, provisioning and vvs. The others are ignored. In addition the comments section of the cloned volume in the HPE 3PAR / Primera / Alletra 9k / Alletra MP array is not populated. If volume types are not used or a particular key is not set for a volume type, the following defaults are used: * ``hpe3par:cpg`` - Defaults to the ``hpe3par_cpg`` setting in the ``cinder.conf`` file. * ``hpe3par:snap_cpg`` - Defaults to the ``hpe3par_snap`` setting in the ``cinder.conf`` file. If ``hpe3par_snap`` is not set, it defaults to the ``hpe3par_cpg`` setting. * ``hpe3par:provisioning`` - Defaults to ``thin`` provisioning, the valid values are ``thin``, ``full``, and ``dedup``. * ``hpe3par:persona`` - Defaults to the ``2 - Generic-ALUA`` persona. The valid values are: * ``1 - Generic`` * ``2 - Generic-ALUA`` * ``3 - Generic-legacy`` * ``4 - HPUX-legacy`` * ``5 - AIX-legacy`` * ``6 - EGENERA`` * ``7 - ONTAP-legacy`` * ``8 - VMware`` * ``9 - OpenVMS`` * ``10 - HPUX`` * ``11 - WindowsServer`` * ``hpe3par:flash_cache`` - Defaults to ``false``, the valid values are ``true`` and ``false``. QoS support for both HPE 3PAR drivers includes the ability to set the following capabilities in the OpenStack Block Storage API ``cinder.api.contrib.qos_specs_manage`` qos specs extension module: * ``minBWS`` * ``maxBWS`` * ``minIOPS`` * ``maxIOPS`` * ``latency`` * ``priority`` The qos keys above no longer require to be scoped but must be created and associated to a volume type. For information about how to set the key-value pairs and associate them with a volume type, run the following commands: .. code-block:: console $ openstack help volume qos The following keys require that the HPE 3PAR/Primera/Alletra 9k/ Alletra MP array has a Priority Optimization enabled. ``hpe3par:vvs`` The virtual volume set name that has been predefined by the Administrator with quality of service (QoS) rules associated to it. If you specify extra_specs ``hpe3par:vvs``, the qos_specs ``minIOPS``, ``maxIOPS``, ``minBWS``, and ``maxBWS`` settings are ignored. ``minBWS`` The QoS I/O issue bandwidth minimum goal in MBs. If not set, the I/O issue bandwidth rate has no minimum goal. ``maxBWS`` The QoS I/O issue bandwidth rate limit in MBs. If not set, the I/O issue bandwidth rate has no limit. ``minIOPS`` The QoS I/O issue count minimum goal. If not set, the I/O issue count has no minimum goal. ``maxIOPS`` The QoS I/O issue count rate limit. If not set, the I/O issue count rate has no limit. ``latency`` The latency goal in milliseconds. ``priority`` The priority of the QoS rule over other rules. If not set, the priority is ``normal``, valid values are ``low``, ``normal`` and ``high``. .. note:: Since the Icehouse release, minIOPS and maxIOPS must be used together to set I/O limits. Similarly, minBWS and maxBWS must be used together. If only one is set the other will be set to the same value. The following key requires that the HPE 3PAR/Primera/Alletra 9k/Alletra MP array has an Adaptive Flash Cache enabled. * ``hpe3par:flash_cache`` - The flash-cache policy, which can be turned on and off by setting the value to ``true`` or ``false``. * ``hpe3par:compression`` - The volume compression, which can be turned on and off by setting the value to ``true`` or ``false``. Other restrictions and considerations for ``hpe3par:compression``: * For a compressed volume, minimum volume size needed is 16 GB; otherwise resulting volume will be created successfully but will not be a compressed volume. * A full provisioned volume cannot be compressed, if a compression is enabled and provisioning type requested is full, the resulting volume defaults to thinly provisioned compressed volume. * While creating volume on HPE Primera/Alletra 9k/Alletra MP storage system, only below two combinations are supported. If any other combination is used, then volume is not created. * thin volume: provisioning = ``thin`` and compression = ``false`` * deco volume: provisioning = ``dedup`` and compression = ``true`` LDAP and AD authentication is now supported in the HPE 3PAR driver. The 3PAR back end must be properly configured for LDAP and AD authentication prior to configuring the volume driver. For details on setting up LDAP with 3PAR, see the 3PAR user guide. Once configured, ``hpe3par_username`` and ``hpe3par_password`` parameters in ``cinder.conf`` can be used with LDAP and AD credentials. Enable the HPE 3PAR Fibre Channel and iSCSI drivers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``HPE3PARFCDriver`` and ``HPE3PARISCSIDriver`` are installed with the OpenStack software. #. Install the ``python-3parclient`` Python package on the OpenStack Block Storage system. .. code-block:: console $ pip install 'python-3parclient>=4.0,<5.0' #. Verify that the HPE 3PAR Web Services API server is enabled and running on the HPE 3PAR / Primera / Alletra 9k / Alletra MP storage system. a. Log onto the HPE 3PAR / Primera / Alletra 9k / Alletra MP storage system with administrator access. .. code-block:: console $ ssh 3paradm@ b. View the current state of the Web Services API Server. .. code-block:: console $ showwsapi -Service- -State- -HTTP_State- HTTP_Port -HTTPS_State- HTTPS_Port -Version- Enabled Active Enabled 8008 Enabled 8080 1.1 c. If the Web Services API Server is disabled, start it. .. code-block:: console $ startwsapi #. If the HTTP or HTTPS state is disabled, enable one of them. .. code-block:: console $ setwsapi -http enable or .. code-block:: console $ setwsapi -https enable .. note:: To stop the Web Services API Server, use the :command:`stopwsapi` command. For other options run the :command:`setwsapi -h` command. #. If you are not using an existing CPG, create a CPG on the HPE 3PAR / Primera / Alletra 9k / Alletra MP storage system to be used as the default location for creating volumes. #. Make the following changes in the ``/etc/cinder/cinder.conf`` file. .. code-block:: ini # WSAPI Server URL. # This setting applies to all: 3PAR, Primera, Alletra 9k and Alletra MP. # Example 1: for 3PAR, URL is: https://<3par ip>:8080/api/v1 # Example 2: for Primera/Alletra 9k/Alletra MP, URL is: https://:443/api/v1 # 3PAR / Primera / Alletra 9k / Alletra MP username with the 'edit' role hpe3par_username=edit3par # 3PAR / Primera / Alletra 9k / Alletra MP password for the user specified in hpe3par_username hpe3par_password=3parpass # 3PAR / Primera / Alletra 9k / Alletra MP CPG to use for volume creation hpe3par_cpg=OpenStackCPG_RAID5_NL # IP address of SAN controller for SSH access to the array san_ip=10.10.22.241 # Username for SAN controller for SSH access to the array san_login=3paradm # Password for SAN controller for SSH access to the array san_password=3parpass # FIBRE CHANNEL DRIVER # (uncomment the next line to enable the FC driver) #volume_driver=cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver # iSCSI DRIVER # If you enable the iSCSI driver, you must also set values # for hpe3par_iscsi_ips or iscsi_ip_address in this file. # Note: The iSCSI driver is supported with 3PAR (all versions) # and Primera (version 4.2 or higher). If you configure iSCSI # with Primera 4.0 or 4.1, the driver will fail to start. # (uncomment the next line to enable the iSCSI driver) #volume_driver=cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver # iSCSI multiple port configuration # hpe3par_iscsi_ips=10.10.220.253:3261,10.10.222.234 # Still available for single port iSCSI configuration #iscsi_ip_address=10.10.220.253 # Enable HTTP debugging to 3PAR / Primera / Alletra 9k / Alletra MP hpe3par_debug=False # Enable CHAP authentication for iSCSI connections. hpe3par_iscsi_chap_enabled=false # The CPG to use for Snapshots for volumes. If empty hpe3par_cpg will be # used. hpe3par_cpg_snap=OpenStackSNAP_CPG # Time in hours to retain a snapshot. You can't delete it before this # expires. hpe3par_snapshot_retention=48 # Time in hours when a snapshot expires and is deleted. This must be # larger than retention. hpe3par_snapshot_expiration=72 # The ratio of oversubscription when thin provisioned volumes are # involved. Default ratio is 20.0, this means that a provisioned # capacity can be 20 times of the total physical capacity. max_over_subscription_ratio=20.0 # This flag represents the percentage of reserved back-end capacity. reserved_percentage=15 .. note:: You can enable only one driver on each cinder instance unless you enable multiple back-end support. See the Cinder multiple back-end support instructions to enable this feature. .. note:: You can configure one or more iSCSI addresses by using the ``hpe3par_iscsi_ips`` option. Separate multiple IP addresses with a comma (``,``). When you configure multiple addresses, the driver selects the iSCSI port with the fewest active volumes at attach time. The 3PAR array does not allow the default port 3260 to be changed, so IP ports need not be specified. #. Save the changes to the ``cinder.conf`` file and restart the cinder-volume service. The HPE 3PAR Fibre Channel and iSCSI drivers are now enabled on your OpenStack system. If you experience problems, review the Block Storage service log files for errors. The following table contains all the configuration options supported by the HPE 3PAR Fibre Channel and iSCSI drivers. .. config-table:: :config-target: 3PAR cinder.volume.drivers.hpe.hpe_3par_common Specify NSP for FC Bootable Volume ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Given a system connected to HPE 3PAR via FC and multipath setting is NOT used in cinder.conf. When the user tries to create a bootable volume, it fails intermittently with the following error: Fibre Channel volume device not found This happens when a zone is created using second or later target from 3PAR backend. In this case, HPE 3PAR client code picks up first target to form initiator target map. This can be illustrated with below example. Sample output of showport command: ``$ showport -sortcol 6`` .. code-block:: console N:S:P Mode State ----Node_WWN---- -Port_WWN/HW_Addr- Type Protocol Partner FailoverState 0:1:1 target ready 2FF70002AC002DB6 20110002AC002DB6 host FC - - 0:1:2 target ready 2FF70002AC002DB6 20120002AC002DB6 host FC 1:1:2 none 1:1:1 initiator ready 2FF70002AC002DB6 21110002AC002DB6 rcfc FC - - 1:1:2 target ready 2FF70002AC002DB6 21120002AC002DB6 host FC 0:1:2 none 2:1:1 initiator ready 2FF70002AC002DB6 22110002AC002DB6 rcfc FC - - 2:1:2 target ready 2FF70002AC002DB6 22120002AC002DB6 host FC 3:1:2 none 3:1:1 target ready 2FF70002AC002DB6 23110002AC002DB6 host FC - - 3:1:2 target ready 2FF70002AC002DB6 23120002AC002DB6 host FC 2:1:2 none Suppose zone is created using targets "2:1:2" and "3:1:2" from above output. Then initiator target map is created using target "0:1:1" only. In such a case, the path is not found, and bootable volume creation fails. To avoid above mentioned failure, the user can specify the target in 3PAR backend section of cinder.conf as follows: ``hpe3par_target_nsp = 3:1:2`` Using above mentioned nsp, respective wwn information is fetched. Later initiator target map is created using wwn information and bootable volume is created successfully. Note: If above mentioned option (nsp) is not specified in cinder.conf, then the original flow is executed i.e first target is picked and bootable volume creation may fail. Peer Persistence support ~~~~~~~~~~~~~~~~~~~~~~~~ Given 3PAR/Primera backend configured with replication setup, currently only Active/Passive replication is supported by 3PAR/Primera in OpenStack. When failover happens, nova does not support volume force-detach (from dead primary backend) / re-attach to secondary backend. Storage engineer's manual intervention is required. To overcome above scenario, support for Peer Persistence is added. Given a system with Peer Persistence configured and replicated volume is created. When this volume is attached to an instance, vlun is created automatically in secondary backend, in addition to primary backend. So that when a failover happens, it is seamless. For Peer Persistence support, perform following steps: 1] enable multipath 2] set replication mode as "sync" 3] configure a quorum witness server Specify ip address of quorum witness server in ``/etc/cinder/cinder.conf`` [within backend section] as given below: .. code-block:: console [3pariscsirep] hpe3par_api_url = http://10.50.3.7:8008/api/v1 hpe3par_username = hpe3par_password = ... ... replication_device = backend_id:CSIM-EOS12_1611702, replication_mode:sync, quorum_witness_ip:10.50.3.192, hpe3par_api_url:http://10.50.3.22:8008/api/v1, ... ... Support duplicated FQDN in network ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The 3PAR driver uses the FQDN of the node that is doing the attach as an unique identifier to map the volume. The problem is that the FQDN is not always unique, there are environments where the same FQDN can be found in different systems, and in those cases if both try to attach volumes the second system will fail. One example of this happening would be on a QA environment where you are creating VMs and they all have names like controller-0.localdomain and compute-0.localdomain. To support these kind of environments, the user can specify below flag in backend_defaults section or the specific cinder driver section of cinder.conf as follows: ``unique_fqdn_network = False`` When this flag is used, then during attach volume to instance, iscsi initiator name is used instead of FQDN. If above mentioned flag is not specified in cinder.conf, then its value is considered as True (by default) and FQDN is used (existing behavior). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/hpe-xp-driver.rst0000664000175000017500000000573500000000000027453 0ustar00zuulzuul00000000000000============================ HPE XP block storage driver ============================ HPE XP block storage driver provides Fibre Channel and iSCSI support for HPE XP storages. System requirements ~~~~~~~~~~~~~~~~~~~ Supported storages: +-----------------+------------------------+ | Storage model | Firmware version | +=================+========================+ | XP8 | 90-01-41 or later | +-----------------+------------------------+ | XP7 | 80-05-43 or later | +-----------------+------------------------+ Required storage licenses: * Thin Provisioning * Fast Snap Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Create, list, update, and delete consistency groups. * Create, list, and delete consistency group snapshots. * Copy a volume to an image. * Copy an image to a volume. * Clone a volume. * Extend a volume. * Migrate a volume. * Get volume statistics. * Efficient non-disruptive volume backup. * Manage and unmanage a volume. * Attach a volume to multiple instances at once (multi-attach). * Revert a volume to a snapshot. .. note:: The volume having snapshots cannot be extended in this driver. Configuration ~~~~~~~~~~~~~ Set up HPE XP storage ---------------------- You need to specify settings as described below for storage systems. For details about each setting, see the user's guide of the storage systems. #. User accounts Create a storage device account belonging to the Administrator User Group. #. THP pool Create a THP pool that is used by the driver. #. Ports Enable Port Security for the ports used by the driver. Set up HPE XP storage volume driver ------------------------------------ Set the volume driver to HPE XP block storage driver by setting the volume_driver option in the cinder.conf file as follows: If you use Fibre Channel: .. code-block:: ini [hpe_xp] volume_driver = cinder.volume.drivers.hpe.xp.hpe_xp_fc.HPEXPFCDriver volume_backend_name = hpexp_fc san_ip = 1.2.3.4 san_login = hpexpuser san_password = password hpexp_storage_id = 123456789012 hpexp_pools = pool0 If you use iSCSI: .. code-block:: ini [hpe_xp] volume_driver = cinder.volume.drivers.hpe.xp.hpe_xp_iscsi.HPEXPISCSIDriver volume_backend_name = hpexp_iscsi san_ip = 1.2.3.4 san_login = hpexpuser san_password = password hpexp_storage_id = 123456789012 hpexp_pools = pool0 This table shows configuration options for HPE XP block storage driver. .. config-table:: :config-target: HPE XP block storage driver cinder.volume.drivers.hpe.xp.hpe_xp_rest Required options ---------------- - ``san_ip`` IP address of SAN controller - ``san_login`` Username for SAN controller - ``san_password`` Password for SAN controller - ``hpexp_storage_id`` Product number of the storage system. - ``hpexp_pools`` Pool number(s) or pool name(s) of the THP pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/huawei-storage-driver.rst0000664000175000017500000003500700000000000031171 0ustar00zuulzuul00000000000000==================== Huawei volume driver ==================== Huawei volume driver can be used to provide functions such as the logical volume and snapshot for virtual machines (VMs) in the OpenStack Block Storage driver that supports iSCSI and Fibre Channel protocols. Version mappings ~~~~~~~~~~~~~~~~ The following table describes the version mappings among the Block Storage driver, Huawei storage system and OpenStack: .. list-table:: **Version mappings among the Block Storage driver and Huawei storage system** :widths: 30 35 :header-rows: 1 * - Description - Storage System Version * - Create, delete, expand, attach, detach, manage and unmanage volumes Create volumes with assigned storage pools Create volumes with assigned disk types Create, delete and update a consistency group Copy an image to a volume Copy a volume to an image Auto Zoning SmartThin Volume Migration Replication V2.1 Create, delete, manage, unmanage and backup snapshots Create and delete a cgsnapshot - OceanStor T series V2R2 C00/C20/C30 OceanStor V3 V3R1C10/C20 V3R2C10 V3R3C00/C10/C20 OceanStor 2200V3 V300R005C00 OceanStor 2600V3 V300R005C00 OceanStor 18500/18800 V1R1C00/C20/C30 V3R3C00 OceanStor Dorado V300R001C00 OceanStor V3 V300R006C00 OceanStor 2200V3 V300R006C00 OceanStor 2600V3 V300R006C00 * - Clone a volume Create volume from snapshot Retype SmartQoS SmartTier SmartCache Thick - OceanStor T series V2R2 C00/C20/C30 OceanStor V3 V3R1C10/C20 V3R2C10 V3R3C00/C10/C20 OceanStor 2200V3 V300R005C00 OceanStor 2600V3 V300R005C00 OceanStor 18500/18800V1R1C00/C20/C30 OceanStor V3 V300R006C00 OceanStor 2200V3 V300R006C00 OceanStor 2600V3 V300R006C00 * - SmartPartition - OceanStor T series V2R2 C00/C20/C30 OceanStor V3 V3R1C10/C20 V3R2C10 V3R3C00/C10/C20 OceanStor 2600V3 V300R005C00 OceanStor 18500/18800V1R1C00/C20/C30 OceanStor V3 V300R006C00 OceanStor 2600V3 V300R006C00 * - Hypermetro Hypermetro consistency group - OceanStor V3 V3R3C00/C10/C20 OceanStor 2600V3 V3R5C00 OceanStor 18500/18800 V3R3C00 OceanStor Dorado V300R001C00 OceanStor V3 V300R006C00 OceanStor 2600V3 V300R006C00 Volume driver configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to configure the Huawei volume driver for either iSCSI storage or Fibre Channel storage. **Pre-requisites** When creating a volume from image, install the ``multipath`` tool and add the following configuration keys for each backend section or in ``[backend_defaults]`` section as a common configuration for all backends in ``/etc/cinder/cinder.conf`` file: .. code-block:: ini use_multipath_for_image_xfer = True enforce_multipath_for_image_xfer = True To configure the volume driver, follow the steps below: #. In ``/etc/cinder``, create a Huawei-customized driver configuration file. The file format is XML. #. Change the name of the driver configuration file based on the site requirements, for example, ``cinder_huawei_conf.xml``. #. Configure parameters in the driver configuration file. Each product has its own value for the ``Product`` parameter under the ``Storage`` xml block. The full xml file with the appropriate ``Product`` parameter is as below: .. code-block:: xml PRODUCT PROTOCOL xxxxxxxx xxxxxxxx https://x.x.x.x:8088/deviceManager/rest/ xxx xxx xxx x.x.x.x The corresponding ``Product`` values for each product are as below: * **For T series V2** .. code-block:: xml TV2 * **For V3** .. code-block:: xml V3 * **For OceanStor 18000 series** .. code-block:: xml 18000 * **For OceanStor Dorado series** .. code-block:: xml Dorado The ``Protocol`` value to be used is ``iSCSI`` for iSCSI and ``FC`` for Fibre Channel as shown below: .. code-block:: xml # For iSCSI iSCSI # For Fibre channel FC .. note:: For details about the parameters in the configuration file, see the `Configuration file parameters`_ section. #. Configure the ``cinder.conf`` file. In the ``[default]`` block of ``/etc/cinder/cinder.conf``, enable the ``VOLUME_BACKEND``: .. code-block:: ini enabled_backends = VOLUME_BACKEND Add a new block ``[VOLUME_BACKEND]``, and add the following contents: .. code-block:: ini [VOLUME_BACKEND] volume_driver = VOLUME_DRIVER cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml volume_backend_name = Huawei_Storage * ``volume_driver`` indicates the loaded driver. * ``cinder_huawei_conf_file`` indicates the specified Huawei-customized configuration file. * ``volume_backend_name`` indicates the name of the backend. Add information about remote devices in ``/etc/cinder/cinder.conf`` in target backend block for ``Hypermetro``. .. code-block:: ini [VOLUME_BACKEND] volume_driver = VOLUME_DRIVER cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml volume_backend_name = Huawei_Storage metro_san_user = xxx metro_san_password = xxx metro_domain_name = xxx metro_san_address = https://x.x.x.x:8088/deviceManager/rest/ metro_storage_pools = xxx Add information about remote devices in ``/etc/cinder/cinder.conf`` in target backend block for ``Replication``. .. code-block:: ini [VOLUME_BACKEND] volume_driver = VOLUME_DRIVER cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml volume_backend_name = Huawei_Storage replication_device = backend_id: xxx, storage_pool :xxx, san_address: https://x.x.x.x:8088/deviceManager/rest/, san_user: xxx, san_password: xxx, iscsi_default_target_ip: x.x.x.x .. note:: By default, the value for ``Hypermetro`` and ``Replication`` is ``None``. For details about the parameters in the configuration file, see the `Configuration file parameters`_ section. The ``volume-driver`` value for every product is as below: .. code-block:: ini # For iSCSI volume_driver = cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver # For FC volume_driver = cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver #. Run the :command:`service cinder-volume restart` command to restart the Block Storage service. Configuring iSCSI Multipathing ------------------------------ To configure iSCSI Multipathing, follow the steps below: #. Add the port group settings in the Huawei-customized driver configuration file and configure the port group name needed by an initiator. .. code-block:: xml x.x.x.x #. Enable the multipathing switch of the Compute service module. Add ``volume_use_multipath = True`` in ``[libvirt]`` of ``/etc/nova/nova.conf``. #. Run the :command:`service nova-compute restart` command to restart the ``nova-compute`` service. Configuring FC Multipathing ------------------------------ To configure FC Multipathing, follow the steps below: #. Enable the multipathing switch of the Compute service module. Add ``volume_use_multipath = True`` in ``[libvirt]`` of ``/etc/nova/nova.conf``. #. Run the :command:`service nova-compute restart` command to restart the ``nova-compute`` service. Configuring CHAP and ALUA ------------------------- On a public network, any application server whose IP address resides on the same network segment as that of the storage systems iSCSI host port can access the storage system and perform read and write operations in it. This poses risks to the data security of the storage system. To ensure the storage systems access security, you can configure ``CHAP`` authentication to control application servers access to the storage system. Adjust the driver configuration file as follows: .. code-block:: xml ``ALUA`` indicates a multipathing mode. 0 indicates that ``ALUA`` is disabled. 1 indicates that ``ALUA`` is enabled. ``CHAPinfo`` indicates the user name and password authenticated by ``CHAP``. The format is ``mmuser; mm-user@storage``. The user name and password are separated by semicolons (``;``). Configuring multiple storage ---------------------------- Multiple storage systems configuration example: .. code-block:: ini enabled_backends = v3_fc, 18000_fc [v3_fc] volume_driver = cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf_v3_fc.xml volume_backend_name = huawei_v3_fc [18000_fc] volume_driver = cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf_18000_fc.xml volume_backend_name = huawei_18000_fc Configuration file parameters ----------------------------- This section describes mandatory and optional configuration file parameters of the Huawei volume driver. .. list-table:: **Mandatory parameters** :widths: 10 10 50 10 :header-rows: 1 * - Parameter - Default value - Description - Applicable to * - Product - ``-`` - Type of a storage product. Possible values are ``TV2``, ``18000`` and ``V3``. - All * - Protocol - ``-`` - Type of a connection protocol. The possible value is either ``'iSCSI'`` or ``'FC'``. - All * - RestURL - ``-`` - Access address of the REST interface, ``https://x.x.x.x/devicemanager/rest/``. The value ``x.x.x.x`` indicates the management IP address. OceanStor 18000 uses the preceding setting, and V2 and V3 requires you to add port number ``8088``, for example, ``https://x.x.x.x:8088/deviceManager/rest/``. If you need to configure multiple RestURL, separate them by semicolons (;). - All * - UserName - ``-`` - User name of a storage administrator. - All * - UserPassword - ``-`` - Password of a storage administrator. - All * - StoragePool - ``-`` - Name of a storage pool to be used. If you need to configure multiple storage pools, separate them by semicolons (``;``). - All .. note:: The value of ``StoragePool`` cannot contain Chinese characters. .. list-table:: **Optional parameters** :widths: 20 10 50 15 :header-rows: 1 * - Parameter - Default value - Description - Applicable to * - LUNType - Thick - Type of the LUNs to be created. The value can be ``Thick`` or ``Thin``. Dorado series only support ``Thin`` LUNs. - All * - WriteType - 1 - Cache write type, possible values are: ``1`` (write back), ``2`` (write through), and ``3`` (mandatory write back). - All * - LUNcopyWaitInterval - 5 - After LUN copy is enabled, the plug-in frequently queries the copy progress. You can set a value to specify the query interval. - All * - Timeout - 432000 - Timeout interval for waiting LUN copy of a storage device to complete. The unit is second. - All * - Initiator Name - ``-`` - Name of a compute node initiator. - All * - Initiator TargetIP - ``-`` - IP address of the iSCSI port provided for compute nodes. - All * - Initiator TargetPortGroup - ``-`` - IP address of the iSCSI target port that is provided for compute nodes. - All * - DefaultTargetIP - ``-`` - Default IP address of the iSCSI target port that is provided for compute nodes. - All * - OSType - Linux - Operating system of the Nova compute node's host. - All * - HostIP - ``-`` - IP address of the Nova compute node's host. - All * - metro_san_user - ``-`` - User name of a storage administrator of hypermetro remote device. - V3R3/2600 V3R5/18000 V3R3 * - metro_san_password - ``-`` - Password of a storage administrator of hypermetro remote device. - V3R3/2600 V3R5/18000 V3R3 * - metro_domain_name - ``-`` - Hypermetro domain name configured on ISM. - V3R3/2600 V3R5/18000 V3R3 * - metro_san_address - ``-`` - Access address of the REST interface, https://x.x.x.x/devicemanager/rest/. The value x.x.x.x indicates the management IP address. - V3R3/2600 V3R5/18000 V3R3 * - metro_storage_pools - ``-`` - Remote storage pool for hypermetro. - V3R3/2600 V3R5/18000 V3R3 * - backend_id - ``-`` - Target device ID. - All * - storage_pool - ``-`` - Pool name of target backend when failover for replication. - All * - san_address - ``-`` - Access address of the REST interface, https://x.x.x.x/devicemanager/rest/. The value x.x.x.x indicates the management IP address. - All * - san_user - ``-`` - User name of a storage administrator of replication remote device. - All * - san_password - ``-`` - Password of a storage administrator of replication remote device. - All * - iscsi_default_target_ip - ``-`` - Remote transaction port IP. - All .. important:: The ``Initiator Name``, ``Initiator TargetIP``, and ``Initiator TargetPortGroup`` are ``ISCSI`` parameters and therefore not applicable to ``FC``. The following are the Huawei driver specific options that may be set in `cinder.conf`: .. config-table:: :config-target: Huawei cinder.volume.drivers.huawei.common ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/ibm-flashsystem-volume-driver.rst0000664000175000017500000001340300000000000032655 0ustar00zuulzuul00000000000000============================== IBM FlashSystem 840/900 driver ============================== The volume driver for FlashSystem provides OpenStack Block Storage hosts with access to IBM FlashSystems. This driver is to be used with IBM FlashSystem 840/900 systems only. For any other FlashSystem storage systems (including 5xxx, 7xxx, and 9xxx platforms) see the :doc:`IBM Storage Virtualize family volume driver documentation `. Supported operations ~~~~~~~~~~~~~~~~~~~~ These operations are supported: - Create, delete, attach, and detach volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Get volume statistics. - Manage and unmanage a volume. Configure FlashSystem ~~~~~~~~~~~~~~~~~~~~~ Configure storage array ----------------------- The volume driver requires a pre-defined array. You must create an array on the FlashSystem before using the volume driver. An existing array can also be used and existing data will not be deleted. .. note:: FlashSystem can only create one array, so no configuration option is needed for the IBM FlashSystem driver to assign it. Configure user authentication for the driver -------------------------------------------- The driver requires access to the FlashSystem management interface using SSH. It should be provided with the FlashSystem management IP using the ``san_ip`` flag, and the management port should be provided by the ``san_ssh_port`` flag. By default, the port value is configured to be port 22 (SSH). .. note:: Make sure the compute node running the ``cinder-volume`` driver has SSH network access to the storage system. Using password authentication, assign a password to the user on the FlashSystem. For more detail, see the driver configuration flags for the user and password here: :ref:`config_fc_flags` or :ref:`config_iscsi_flags`. There are some common configuration options for either driver: .. list-table:: List of common configuration options for IBM FlashSystem drivers :header-rows: 1 * - Flag name - Type - Default - Description * - ``san_ip`` - Required - - Management IP or host name * - ``san_ssh_port`` - Optional - 22 - Management port * - ``san_login`` - Required - - Management login user name * - ``san_password`` - Required - - Management login password IBM FlashSystem FC driver ~~~~~~~~~~~~~~~~~~~~~~~~~ Data Path configuration ----------------------- Using Fiber Channel (FC), each FlashSystem node should have at least one WWPN port configured. If the ``flashsystem_multipath_enabled`` flag is set to ``True`` in the Block Storage service configuration file, the driver uses all available WWPNs to attach the volume to the instance. If the flag is not set, the driver uses the WWPN associated with the volume's preferred node (if available). Otherwise, it uses the first available WWPN of the system. The driver obtains the WWPNs directly from the storage system. You do not need to provide these WWPNs to the driver. .. note:: Using FC, ensure that the block storage hosts have FC connectivity to the FlashSystem. .. _config_fc_flags: Enable IBM FlashSystem FC driver -------------------------------- Set the volume driver to the FlashSystem driver by setting the ``volume_driver`` option in the ``cinder.conf`` configuration file, as follows: .. code-block:: ini volume_driver = cinder.volume.drivers.ibm.flashsystem_fc.FlashSystemFCDriver To enable the IBM FlashSystem FC driver, configure the following options in the ``cinder.conf`` configuration file: .. config-table:: :config-target: IBM FlashSystem FC cinder.volume.drivers.ibm.flashsystem_common cinder.volume.drivers.ibm.flashsystem_fc IBM FlashSystem iSCSI driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Network configuration --------------------- Using iSCSI, each FlashSystem node should have at least one iSCSI port configured. iSCSI IP addresses of IBM FlashSystem can be obtained by FlashSystem GUI or CLI. For more information, see the appropriate IBM Redbook for the FlashSystem. .. note:: Using iSCSI, ensure that the compute nodes have iSCSI network access to the IBM FlashSystem. .. _config_iscsi_flags: Enable IBM FlashSystem iSCSI driver ----------------------------------- Set the volume driver to the FlashSystem driver by setting the ``volume_driver`` option in the ``cinder.conf`` configuration file, as follows: .. code-block:: ini volume_driver = cinder.volume.drivers.ibm.flashsystem_iscsi.FlashSystemISCSIDriver To enable IBM FlashSystem iSCSI driver, configure the following options in the ``cinder.conf`` configuration file: .. config-table:: :config-target: IBM FlashSystem iSCSI cinder.volume.drivers.ibm.flashsystem_common cinder.volume.drivers.ibm.flashsystem_iscsi .. note:: On the cluster of the FlashSystem, the ``iscsi_ip_address`` column is the seventh column ``IP_address`` of the output of ``lsportip``. .. note:: On the cluster of the FlashSystem, port ID column is the first column ``id`` of the output of ``lsportip``, not the sixth column ``port_id``. Limitations and known issues ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ IBM FlashSystem only works when: .. code-block:: ini open_access_enabled=off .. note:: The ``flashsystem_multihost_enabled`` setting allows the driver to map a vdisk to more than one host at a time. This scenario occurs during migration of a virtual machine with an attached volume; the volume is simultaneously mapped to both the source and destination compute hosts. If your deployment does not require attaching vdisks to multiple hosts, setting this flag to ``False`` will provide added safety. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/ibm-gpfs-volume-driver.rst0000664000175000017500000002413700000000000031260 0ustar00zuulzuul00000000000000================================ IBM Spectrum Scale volume driver ================================ IBM Spectrum Scale is a flexible software-defined storage that can be deployed as high performance file storage or a cost optimized large-scale content repository. IBM Spectrum Scale, previously known as IBM General Parallel File System (GPFS), is designed to scale performance and capacity with no bottlenecks. IBM Spectrum Scale is a cluster file system that provides concurrent access to file systems from multiple nodes. The storage provided by these nodes can be direct attached, network attached, SAN attached, or a combination of these methods. Spectrum Scale provides many features beyond common data access, including data replication, policy based storage management, and space efficient file snapshot and clone operations. How the Spectrum Scale volume driver works ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Spectrum Scale volume driver, named ``gpfs.py``, enables the use of Spectrum Scale in a fashion similar to that of the NFS driver. With the Spectrum Scale driver, instances do not actually access a storage device at the block level. Instead, volume backing files are created in a Spectrum Scale file system and mapped to instances, which emulate a block device. .. note:: Spectrum Scale must be installed and cluster has to be created on the storage nodes in the OpenStack environment. A file system must also be created and mounted on these nodes before configuring the cinder service to use Spectrum Scale storage. For more details, please refer to `Spectrum Scale product documentation `_. Optionally, the Image service can be configured to store glance images in a Spectrum Scale file system. When a Block Storage volume is created from an image, if both image data and volume data reside in the same Spectrum Scale file system, the data from image file is moved efficiently to the volume file using copy-on-write optimization strategy. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create, delete volume snapshots. - Create a volume from a snapshot. - Create cloned volumes. - Extend a volume. - Migrate a volume. - Retype a volume. - Create, delete consistency groups. - Create, delete consistency group snapshots. - Copy an image to a volume. - Copy a volume to an image. - Backup and restore volumes. Driver configurations ~~~~~~~~~~~~~~~~~~~~~ The Spectrum Scale volume driver supports three modes of deployment. Mode 1 – Pervasive Spectrum Scale Client ---------------------------------------- When Spectrum Scale is running on compute nodes as well as on the cinder node. For example, Spectrum Scale filesystem is available to both Compute and Block Storage services as a local filesystem. To use Spectrum Scale driver in this deployment mode, set the ``volume_driver`` in the ``cinder.conf`` as: .. code-block:: ini volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSDriver The following table contains the configuration options supported by the Spectrum Scale driver in this deployment mode. .. include:: ../../tables/cinder-ibm_gpfs.inc .. note:: The ``gpfs_images_share_mode`` flag is only valid if the Image Service is configured to use Spectrum Scale with the ``gpfs_images_dir`` flag. When the value of this flag is ``copy_on_write``, the paths specified by the ``gpfs_mount_point_base`` and ``gpfs_images_dir`` flags must both reside in the same GPFS file system and in the same GPFS file set. Mode 2 – Remote Spectrum Scale Driver with Local Compute Access --------------------------------------------------------------- When Spectrum Scale is running on compute nodes, but not on the Block Storage node. For example, Spectrum Scale filesystem is only available to Compute service as Local filesystem where as Block Storage service accesses Spectrum Scale remotely. In this case, ``cinder-volume`` service running Spectrum Scale driver access storage system over SSH and creates volume backing files to make them available on the compute nodes. This mode is typically deployed when the cinder and glance services are running inside a Linux container. The container host should have Spectrum Scale client running and GPFS filesystem mount path should be bind mounted into the Linux containers. .. note:: Note that the user IDs present in the containers should match as that in the host machines. For example, the containers running cinder and glance services should be privileged containers. To use Spectrum Scale driver in this deployment mode, set the ``volume_driver`` in the ``cinder.conf`` as: .. code-block:: ini volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSRemoteDriver The following table contains the configuration options supported by the Spectrum Scale driver in this deployment mode. .. include:: ../../tables/cinder-ibm_gpfs_remote.inc .. note:: The ``gpfs_images_share_mode`` flag is only valid if the Image Service is configured to use Spectrum Scale with the ``gpfs_images_dir`` flag. When the value of this flag is ``copy_on_write``, the paths specified by the ``gpfs_mount_point_base`` and ``gpfs_images_dir`` flags must both reside in the same GPFS file system and in the same GPFS file set. Mode 3 – Remote Spectrum Scale Access ------------------------------------- When both Compute and Block Storage nodes are not running Spectrum Scale software and do not have access to Spectrum Scale file system directly as local filesystem. In this case, we create an NFS export on the volume path and make it available on the cinder node and on compute nodes. Optionally, if one wants to use the copy-on-write optimization to create bootable volumes from glance images, one need to also export the glance images path and mount it on the nodes where glance and cinder services are running. The cinder and glance services will access the GPFS filesystem through NFS. To use Spectrum Scale driver in this deployment mode, set the ``volume_driver`` in the ``cinder.conf`` as: .. code-block:: ini volume_driver = cinder.volume.drivers.ibm.gpfs.GPFSNFSDriver The following table contains the configuration options supported by the Spectrum Scale driver in this deployment mode. .. include:: ../../tables/cinder-ibm_gpfs_nfs.inc Additionally, all the options of the base NFS driver are applicable for GPFSNFSDriver. The above table lists the basic configuration options which are needed for initialization of the driver. .. note:: The ``gpfs_images_share_mode`` flag is only valid if the Image Service is configured to use Spectrum Scale with the ``gpfs_images_dir`` flag. When the value of this flag is ``copy_on_write``, the paths specified by the ``gpfs_mount_point_base`` and ``gpfs_images_dir`` flags must both reside in the same GPFS file system and in the same GPFS file set. Volume creation options ~~~~~~~~~~~~~~~~~~~~~~~ It is possible to specify additional volume configuration options on a per-volume basis by specifying volume metadata. The volume is created using the specified options. Changing the metadata after the volume is created has no effect. The following table lists the volume creation options supported by the GPFS volume driver. .. list-table:: **Volume Create Options for Spectrum Scale Volume Drivers** :widths: 10 25 :header-rows: 1 * - Metadata Item Name - Description * - fstype - Specifies whether to create a file system or a swap area on the new volume. If fstype=swap is specified, the mkswap command is used to create a swap area. Otherwise the mkfs command is passed the specified file system type, for example ext3, ext4 or ntfs. * - fslabel - Sets the file system label for the file system specified by fstype option. This value is only used if fstype is specified. * - data_pool_name - Specifies the GPFS storage pool to which the volume is to be assigned. Note: The GPFS storage pool must already have been created. * - replicas - Specifies how many copies of the volume file to create. Valid values are 1, 2, and, for Spectrum Scale V3.5.0.7 and later, 3. This value cannot be greater than the value of the MaxDataReplicasattribute of the file system. * - dio - Enables or disables the Direct I/O caching policy for the volume file. Valid values are yes and no. * - write_affinity_depth - Specifies the allocation policy to be used for the volume file. Note: This option only works if allow-write-affinity is set for the GPFS data pool. * - block_group_factor - Specifies how many blocks are laid out sequentially in the volume file to behave as a single large block. Note: This option only works if allow-write-affinity is set for the GPFS data pool. * - write_affinity_failure_group - Specifies the range of nodes (in GPFS shared nothing architecture) where replicas of blocks in the volume file are to be written. See Spectrum Scale documentation for more details about this option. This example shows the creation of a 50GB volume with an ``ext4`` file system labeled ``newfs`` and direct IO enabled: .. code-block:: console $ openstack volume create --property fstype=ext4 fslabel=newfs dio=yes \ --size 50 VOLUME Note that if the metadata for the volume is changed later, the changes do not reflect in the backend. User will have to manually change the volume attributes corresponding to metadata on Spectrum Scale filesystem. Operational notes for GPFS driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Volume snapshots are implemented using the GPFS file clone feature. Whenever a new snapshot is created, the snapshot file is efficiently created as a read-only clone parent of the volume, and the volume file uses copy-on-write optimization strategy to minimize data movement. Similarly when a new volume is created from a snapshot or from an existing volume, the same approach is taken. The same approach is also used when a new volume is created from an Image service image, if the source image is in raw format, and ``gpfs_images_share_mode`` is set to ``copy_on_write``. The Spectrum Scale driver supports encrypted volume back end feature. To encrypt a volume at rest, specify the extra specification ``gpfs_encryption_rest = True``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/ibm-storage-volume-driver.rst0000664000175000017500000004742000000000000031765 0ustar00zuulzuul00000000000000================================ IBM Storage Driver for OpenStack ================================ Introduction ~~~~~~~~~~~~ The IBM Storage Driver for OpenStack is a software component of the OpenStack cloud environment that enables utilization of storage resources provided by supported IBM storage systems. The driver was validated on storage systems, as detailed in the Supported storage systems section below. After the driver is configured on the OpenStack Cinder nodes, storage volumes can be allocated by the Cinder nodes to the Nova nodes. Virtual machines on the Nova nodes can then utilize these storage resources. Concept diagram --------------- This figure illustrates how an IBM storage system is connected to the OpenStack cloud environment and provides storage resources when the IBM Storage Driver for OpenStack is configured on the OpenStack Cinder nodes. The OpenStack cloud is connected to the IBM storage system over Fibre Channel. Remote cloud users can issue requests for storage resources from the OpenStack cloud. These requests are transparently handled by the IBM Storage Driver, which communicates with the IBM storage system and controls the storage volumes on it. The IBM storage resources are then provided to the Nova nodes in the OpenStack cloud. .. figure:: ../../figures/ibm-storage-nova-concept.png Compatibility and requirements ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section specifies the compatibility and requirements of the IBM Storage Driver for OpenStack. Supported storage systems ------------------------- The IBM Storage Driver for OpenStack supports the IBM storage systems, as detailed in the following table. +-----------------+--------------------+--------------------+ | Storage system | Microcode version | Connectivity | +=================+====================+====================+ | IBM DS8870 | 7.5 SP4 or later, | Fibre Channel (FC) | | | 7.5 with RESTful | | | | API patch | | +-----------------+--------------------+--------------------+ | IBM DS8880 | 8.1 or later | Fibre Channel (FC) | +-----------------+--------------------+--------------------+ Copy Services license --------------------- Copy Services features help you implement storage solutions to keep your business running 24 hours a day, 7 days a week by providing image caching, replication and cloning functions. The Copy Services license is based on usable capacity of the volumes involved in Copy Services functionality. The Copy Services license is available for the following license scopes: FB and ALL (both FB and CKD). The Copy Services license includes the following features: * Global Mirror * Metro Mirror * Metro/Global Mirror * Point-in-Time Copy/FlashCopy® * z/OS® Global Mirror * z/OS Metro/Global Mirror Incremental Resync (RMZ) The Copy Services license feature codes are ordered in increments up to a specific capacity. For example, if you require 160 TB of capacity, order 10 of feature code 8251 (10 TB each up to 100 TB capacity), and 4 of feature code 8252 (15 TB each, for an extra 60 TB). The Copy Services license includes the following feature codes. +--------------+-----------------------------------------------------+ | Feature Code | Feature code for licensed function indicator | +==============+=====================================================+ | 8250 | CS - inactive | +--------------+-----------------------------------------------------+ | 8251 | CS - 10 TB (up to 100 TB capacity) | +--------------+-----------------------------------------------------+ | 8252 | CS - 15 TB (from 100.1 TB to 250 TB capacity) | +--------------+-----------------------------------------------------+ | 8253 | CS - 25 TB (from 250.1 TB to 500 TB capacity) | +--------------+-----------------------------------------------------+ | 8254 | CS - 75 TB (from 500.1 to 1250 TB capacity) | +--------------+-----------------------------------------------------+ | 8255 | CS - 175 TB (from 1250.1 TB to 3000 TB capacity) | +--------------+-----------------------------------------------------+ | 8256 | CS - 300 TB (from 3000.1 TB to 6000 TB capacity) | +--------------+-----------------------------------------------------+ | 8260 | CS - 500 TB (from 6000.1 TB to 10,000 TB capacity) | +--------------+-----------------------------------------------------+ The following ordering rules apply when you order the Copy Services license: * The Copy Services license should be ordered based on the total usable capacity of all volumes involved in one or more Copy Services relationships. * The licensed authorization must be equal to or less that the total usable capacity allocated to the volumes that participate in Copy Services operations. * You must purchase features for both the source (primary) and target (secondary) storage system. Required software on the OpenStack Cinder and Nova nodes -------------------------------------------------------- The IBM Storage Driver makes use of the following software on the OpenStack Cinder and Nova-compute nodes. +------------------------+----------------------------------+ | Software | Installed on | +========================+==================================+ | Ubuntu Server (16.04), | All OpenStack Cinder nodes | | x64 | | | | | | Red Hat Enterprise | | | Linux (RHEL) 7.x, x64 | | | | | | CentOS Linux 7.x, x64 | | | | | | KVM for IBM z Systems | | +------------------------+----------------------------------+ | IBM Storage Host | All OpenStack Cinder and Nova | | Attachment Kit for | compute nodes that connect to | | Linux | storage systems and use RHEL 7.x | | | or CentOS Linux 7.x | +------------------------+----------------------------------+ | Linux patch package | All OpenStack Cinder nodes | +------------------------+----------------------------------+ | sysfsutils utility | All OpenStack Cinder nodes on FC | | | network | +------------------------+----------------------------------+ Configuration ~~~~~~~~~~~~~ Configure the driver manually by changing the ``cinder.conf`` file as follows: .. code-block:: ini volume_driver = cinder.volume.drivers.ibm.ibm_storage.IBMStorageDriver Configuration Description for DS8000 ------------------------------------ .. include:: ../../tables/cinder-ibm_storage.inc Replication parameters ---------------------- +-----------------+------------------------------+---------------+ | Parameter | Description | Applicable to | +=================+==============================+===============+ | replication | Volume replication | DS8000 | | _device | parameters | | +-----------------+------------------------------+---------------+ | backend_id | IP address or host name of | DS8000 | | | the target storage system | | +-----------------+------------------------------+---------------+ | san_login | User name to be used during | DS8000 | | | replication procedure | | +-----------------+------------------------------+---------------+ | san_password | Password to be used during | DS8000 | | | replication procedure | | | | (base64-encoded) | | +-----------------+------------------------------+---------------+ | san_clustername | Pool name on the target | DS8000 | | | storage system | | +-----------------+------------------------------+---------------+ | port_pairs | ID pairs of IO ports, | DS8000 | | | participating in | | | | replication | | +-----------------+------------------------------+---------------+ | lss_range_for | LSS range to reserve for | DS8000 | | _cg | consistency groups | | +-----------------+------------------------------+---------------+ Security ~~~~~~~~ The following information provides an overview of security for the IBM Storage Driver for OpenStack. Configuring Cinder nodes for trusted communication ------------------------------------------------------------------ The IBM Storage Driver for OpenStack communicates with DS8000 over HTTPS, using self-signed certificate or certificate signed by a certificate authority (CA). Configure a trusted communication link to ensure a successful attachment of a Cinder node to a DS8000 storage system, as detailed in the following sections. Configuring trusted communication link -------------------------------------- Before configuring a DS8000 backend, complete the following steps to establish the chain of trust. #. In your operating system shell, run this command to obtain the certificate: ``openssl x509 -in <(openssl s_client -connect :8452 -prexit 2>/dev/null) -text -out .pem`` If the certificate is self-signed, the following information is displayed: .. code-block:: ini --- Certificate chain 0 s:/CN=ds8000.ibm.com i:/CN=ds8000.ibm.com --- #. Create an exception by moving the certificate ``.pem to the /opt/ibm/ds8k_certs/.pem`` file. #. Verify that the is the same as configured in san_ip. #. If the certificate subject and issuer are different, the certificate is signed by a CA, as illustrated below: .. code-block:: ini --- Certificate chain 0 s:/C=US/ST=New York/L=Armonk/O=IBM/OU=EI/CN=www.ibm.com i:/C=US/O=GeoTrust Inc./CN=GeoTrust SSL CA - G3 1 s:/C=US/O=GeoTrust Inc./CN=GeoTrust SSL CA - G3 i:/C=US/O=GeoTrust Inc./CN=GeoTrust Global CA --- #. Add a public certificate to trusted CA certificate store to complete the chain of trust, as explained below. #. Verify trusted communication link, as explained below. Adding a public certificate to trusted CA certificate store ----------------------------------------------------------- Add the CA public certificate to the trusted CA certificates store on the Cinder node, according to procedures for the operating system in use. #. For RHEL 7.x or CentOS 7.x, place the certificate to be trusted (in PEM format) into the /etc/pki/ca-trust/source/anchors/ directory. Then, run the ``sudo update-ca-trust`` command. #. For Ubuntu 18.04, place the certificate to be trusted (in PEM format) into the /usr/local/share/ca-certificates/ directory. Rename the file, using the ``*.crt`` extension. Then, run the ``sudo update-ca-certificates`` command. #. For Python requests library with certifi, run the ``cat ca_public_certificate.pem`` command to append the certificate to the location of the certifi trust store file. For example: .. code-block:: ini cat ca_public_certificate.pem >> /usr/local/lib/python3.6/ dist-packages/certifi/cacert.pem. Verifying trusted communication link ------------------------------------ Verify the chain of trust has been established successfully. #. Obtain the location of the Python library requests trust store, according to the installation type. #. RHEL 7.x or CentOS 7.x: .. code-block:: console # python3 Python 3.6.8 (default, Aug 7 2019, 17:28:10) [GCC 4.8.5 20150623 (Red Hat 4.8.5-39)] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import requests >>> print(requests.certs.where()) /etc/pki/ca-trust/extracted/openssl/ ca-bundle.trust.crt #. Ubuntu 18.04: .. code-block:: console # python3 Python 3.6.9 (default, Nov 7 2019, 10:44:02) [GCC 8.3.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import requests >>> print(requests.certs.where()) /etc/ssl/certs/ca-certificates.crt #. Python requests library with certifi: .. code-block:: console # python3 Python 3.6.9 (default, Nov 7 2019, 10:44:02) [GCC 8.3.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import requests >>> print(requests.certs.where()) /usr/local/lib/python3.6/dist-packages/ certifi/cacert.pem #. Run the ``openssl s_client -CAfile -connect :8452 `_. .. note:: IBM Storage Virtualize family is formerly known as IBM Storwize. As a result, the product code contains 'Storwize' terminology and prefixes. Supported operations ~~~~~~~~~~~~~~~~~~~~ The IBM Storage Virtualize family volume driver supports the following block storage service volume operations: - Create, list, delete, attach (map), and detach (unmap) volumes. - Create, list, and delete volume snapshots. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Retype a volume. - Create a volume from a snapshot. - Create, list, and delete consistency group. - Create, list, and delete consistency group snapshot. - Modify consistency group (add or remove volumes). - Create consistency group from source (source can be a CG or CG snapshot) - Manage an existing volume. - Failover-host for replicated back ends. - Failback-host for replicated back ends. - Create, list, and delete replication group. - Enable, disable replication group. - Failover, failback replication group. Configure the Storage Virtualize family system ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Network configuration --------------------- The Storage Virtualize family system must be configured for iSCSI, Fibre Channel, or both. If using iSCSI, each Storage Virtualize family node should have at least one iSCSI IP address. The Storage Virtualize family driver uses an iSCSI IP address associated with the volume's preferred node (if available) to attach the volume to the instance, otherwise it uses the first available iSCSI IP address of the system. The driver obtains the iSCSI IP address directly from the storage system. You do not need to provide these iSCSI IP addresses directly to the driver. .. note:: If using iSCSI, ensure that the compute nodes have iSCSI network access to the Storage Virtualize family system. If using Fibre Channel (FC), each Storage Virtualize family node should have at least one WWPN port configured. The driver uses all available WWPNs to attach the volume to the instance. The driver obtains the WWPNs directly from the storage system. You do not need to provide these WWPNs directly to the driver. .. note:: If using FC, ensure that the compute nodes have FC connectivity to the Storage Virtualize family system. iSCSI CHAP authentication ------------------------- If using iSCSI for data access and the ``storwize_svc_iscsi_chap_enabled`` is set to ``True``, the driver will associate randomly-generated CHAP secrets with all hosts on the Storage Virtualize family. The compute nodes use these secrets when creating iSCSI connections. .. warning:: CHAP secrets are added to existing hosts as well as newly-created ones. If the CHAP option is enabled, hosts will not be able to access the storage without the generated secrets. .. note:: Not all OpenStack Compute drivers support CHAP authentication. Please check compatibility before using. .. note:: CHAP secrets are passed from OpenStack Block Storage to Compute in clear text. This communication should be secured to ensure that CHAP secrets are not discovered. Configure storage pools ----------------------- The IBM Storage Virtualize family driver can allocate volumes in multiple pools. The pools should be created in advance and be provided to the driver using the ``storwize_svc_volpool_name`` configuration flag in the form of a comma-separated list. For the complete list of configuration flags, see :ref:`config_flags`. Configure user authentication for the driver -------------------------------------------- The driver requires access to the Storage Virtualize family system management interface. The driver communicates with the management using SSH. The driver should be provided with the Storage Virtualize family management IP using the ``san_ip`` flag, and the management port should be provided by the ``san_ssh_port`` flag. By default, the port value is configured to be port 22 (SSH). Also, you can set the secondary management IP using the ``storwize_san_secondary_ip`` flag. .. note:: Make sure the compute node running the cinder-volume management driver has SSH network access to the storage system. To allow the driver to communicate with the Storage Virtualize family system, you must provide the driver with a user on the storage system. The driver has two authentication methods: password-based authentication and SSH key pair authentication. The user should have an Administrator role. It is suggested to create a new user for the management driver. Please consult with your storage and security administrator regarding the preferred authentication method and how passwords or SSH keys should be stored in a secure manner. .. note:: When creating a new user on the Storage Virtualize family system, make sure the user belongs to the Administrator group or to another group that has an Administrator role. If using password authentication, assign a password to the user on the Storage Virtualize family system. The driver configuration flags for the user and password are ``san_login`` and ``san_password``, respectively. If you are using the SSH key pair authentication, create SSH private and public keys using the instructions below or by any other method. Associate the public key with the user by uploading the public key: select the :guilabel:`choose file` option in the Storage Virtualize family management GUI under :guilabel:`SSH public key`. Alternatively, you may associate the SSH public key using the command-line interface; details can be found in the Storage Virtualize family documentation. The private key should be provided to the driver using the ``san_private_key`` configuration flag. Create a SSH key pair with OpenSSH ---------------------------------- You can create an SSH key pair using OpenSSH, by running: .. code-block:: console $ ssh-keygen -t rsa The command prompts for a file to save the key pair. For example, if you select ``key`` as the filename, two files are created: ``key`` and ``key.pub``. The ``key`` file holds the private SSH key and ``key.pub`` holds the public SSH key. The command also prompts for a pass phrase, which should be empty. The private key file should be provided to the driver using the ``san_private_key`` configuration flag. The public key should be uploaded to the Storage Virtualize family system using the storage management GUI or command-line interface. .. note:: Ensure that Cinder has read permissions on the private key file. Configure the Storage Virtualize family driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Enable the Storage Virtualize family driver -------------------------------------------- Set the volume driver to the Storage Virtualize family driver by setting the ``volume_driver`` option in the ``cinder.conf`` file as follows: iSCSI: .. code-block:: ini [svc1234] volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_iscsi.StorwizeSVCISCSIDriver san_ip = 1.2.3.4 san_login = superuser san_password = passw0rd storwize_svc_volpool_name = cinder_pool1 volume_backend_name = svc1234 FC: .. code-block:: ini [svc1234] volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver san_ip = 1.2.3.4 san_login = superuser san_password = passw0rd storwize_svc_volpool_name = cinder_pool1 volume_backend_name = svc1234 Replication configuration ------------------------- Add the following to the back-end specification to specify another storage to replicate to: .. code-block:: ini replication_device = backend_id:rep_svc, san_ip:1.2.3.5, san_login:superuser, san_password:passw0rd, pool_name:cinder_pool1 The ``backend_id`` is a unique name of the remote storage, the ``san_ip``, ``san_login``, and ``san_password`` is authentication information for the remote storage. The ``pool_name`` is the pool name for the replication target volume. .. note:: Only one ``replication_device`` can be configured for one back end storage since only one replication target is supported now. .. _config_flags: Storage Virtualize family driver options in cinder.conf -------------------------------------------------------- The following options specify default values for all volumes. Some can be over-ridden using volume types, which are described below. .. note:: IBM Storage Virtualize family is formerly known as IBM Storwize. As a result, the product code contains 'Storwize' terminology and prefixes. .. include:: ../../tables/cinder-storwize.inc Note the following: * The authentication requires either a password (``san_password``) or SSH private key (``san_private_key``). One must be specified. If both are specified, the driver uses only the SSH private key. * The driver creates thin-provisioned volumes by default. The ``storwize_svc_vol_rsize`` flag defines the initial physical allocation percentage for thin-provisioned volumes, or if set to ``-1``, the driver creates full allocated volumes. More details about the available options are available in the Storage Virtualize family documentation. Placement with volume types --------------------------- The IBM Storage Virtualize family exposes capabilities that can be added to the ``extra specs`` of volume types, and used by the filter scheduler to determine placement of new volumes. Make sure to prefix these keys with ``capabilities:`` to indicate that the scheduler should use them. The following ``extra specs`` are supported: - ``capabilities:volume_backend_name`` - Specify a specific back-end where the volume should be created. The back-end name is a concatenation of the name of the Storage Virtualize family storage system as shown in ``lssystem``, an underscore, and the name of the pool (mdisk group). For example: .. code-block:: ini capabilities:volume_backend_name=myV7000_openstackpool - ``capabilities:compression_support`` - Specify a back-end according to compression support. A value of ``True`` should be used to request a back-end that supports compression, and a value of ``False`` will request a back-end that does not support compression. If you do not have constraints on compression support, do not set this key. Note that specifying ``True`` does not enable compression; it only requests that the volume be placed on a back-end that supports compression. Example syntax: .. code-block:: ini capabilities:compression_support=' True' .. note:: Currently, the compression_enabled() API that indicates compression_license support is not fully functional. It does not work on all storage types. Additional functionalities will be added in a later release. - ``capabilities:easytier_support`` - Similar semantics as the ``compression_support`` key, but for specifying according to support of the Easy Tier feature. Example syntax: .. code-block:: ini capabilities:easytier_support=' True' - ``capabilities:pool_name`` - Specify a specific pool to create volume if only multiple pools are configured. pool_name should be one value configured in storwize_svc_volpool_name flag. Example syntax: .. code-block:: ini capabilities:pool_name=cinder_pool2 Configure per-volume creation options ------------------------------------- Volume types can also be used to pass options to the IBM Storage Virtualize family driver, which over-ride the default values set in the configuration file. Contrary to the previous examples where the ``capabilities`` scope was used to pass parameters to the Cinder scheduler, options can be passed to the Storage Virtualize family driver with the ``drivers`` scope. The following ``extra specs`` keys are supported by the Storage Virtualize family driver: - rsize - warning - autoexpand - grainsize - compression - easytier - multipath - iogrp - mirror_pool - volume_topology - peer_pool - flashcopy_rate - clean_rate - cycle_period_seconds These keys have the same semantics as their counterparts in the configuration file. They are set similarly; for example, ``rsize=2`` or ``compression=False``. Example: Volume types --------------------- In the following example, we create a volume type to specify a controller that supports compression, and enable compression: .. code-block:: console $ openstack volume type create compressed $ openstack volume type set --property capabilities:compression_support=' True' --property drivers:compression=True compressed We can then create a 50GB volume using this type: .. code-block:: console $ openstack volume create "compressed volume" --type compressed --size 50 In the following example, create a volume type that enables synchronous replication (metro mirror): .. code-block:: console $ openstack volume type create ReplicationType $ openstack volume type set --property replication_type=" metro" \ --property replication_enabled=' True' --property volume_backend_name=svc234 ReplicationType In the following example, we create a volume type to support stretch cluster volume or mirror volume: .. code-block:: console $ openstack volume type create mirror_vol_type $ openstack volume type set --property volume_backend_name=svc1 \ --property drivers:mirror_pool=pool2 mirror_vol_type Volume types can be used, for example, to provide users with different - performance levels (such as, allocating entirely on an HDD tier, using Easy Tier for an HDD-SDD mix, or allocating entirely on an SSD tier) - resiliency levels (such as, allocating volumes in pools with different RAID levels) - features (such as, enabling/disabling Real-time Compression, replication volume creation) QOS --- The Storage Virtualize family driver provides QOS support for storage volumes by controlling the I/O amount. QOS is enabled by editing the ``etc/cinder/cinder.conf`` file and setting the ``storwize_svc_allow_tenant_qos`` to ``True``. There are three ways to set the Storage Virtualize family ``IOThrotting`` parameter for storage volumes: - Add the ``qos:IOThrottling`` key into a QOS specification and associate it with a volume type. - Add the ``qos:IOThrottling`` key into an extra specification with a volume type. - Add the ``qos:IOThrottling`` key to the storage volume metadata. .. note:: If you are changing a volume type with QOS to a new volume type without QOS, the QOS configuration settings will be removed. Operational notes for the Storage Virtualize family driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Migrate volumes --------------- In the context of OpenStack block storage's volume migration feature, the IBM Storage Virtualize family driver enables the storage's virtualization technology. When migrating a volume from one pool to another, the volume will appear in the destination pool almost immediately, while the storage moves the data in the background. .. note:: To enable this feature, both pools involved in a given volume migration must have the same values for ``extent_size``. If the pools have different values for ``extent_size``, the data will still be moved directly between the pools (not host-side copy), but the operation will be synchronous. Extend volumes -------------- The IBM Storage Virtualize family driver allows for extending a volume's size, but only for volumes without snapshots. Snapshots and clones -------------------- Snapshots are implemented using FlashCopy with no background copy (space-efficient). Volume clones (volumes created from existing volumes) are implemented with FlashCopy, but with background copy enabled. This means that volume clones are independent, full copies. While this background copy is taking place, attempting to delete or extend the source volume will result in that operation waiting for the copy to complete. Volume retype ------------- The IBM Storage Virtualize family driver enables you to modify volume types. When you modify volume types, you can also change these extra specs properties: - rsize - warning - autoexpand - grainsize - compression - easytier - iogrp - nofmtdisk - mirror_pool - volume_topology - peer_pool - flashcopy_rate - cycle_period_seconds .. note:: When you change the ``rsize``, ``grainsize`` or ``compression`` properties, volume copies are asynchronously synchronized on the array. .. note:: To change the ``iogrp`` property, IBM Storage Virtualize family firmware version 6.4.0 or later is required. Replication operation --------------------- Configure replication in volume type <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< A volume is only replicated if the volume is created with a volume-type that has the extra spec ``replication_enabled`` set to `` True``. Three types of replication are supported now, global mirror(async), global mirror with change volume(async) and metro mirror(sync). It can be specified by a volume-type that has the extra spec ``replication_type`` set to `` global``, `` gmcv`` or `` metro``. If no ``replication_type`` is specified, global mirror will be created for replication. If ``replication_type`` set to `` gmcv``, cycle_period_seconds can be set as the cycling time perform global mirror relationship with multi cycling mode. Default value is 300. Example syntax: .. code-block:: console $ cinder type-create gmcv_type $ cinder type-key gmcv_type set replication_enabled=' True' \ replication_type=" gmcv" drivers:cycle_period_seconds=500 .. note:: It is better to establish the partnership relationship between the replication source storage and the replication target storage manually on the storage back end before replication volume creation. Failover host <<<<<<<<<<<<< The ``failover-host`` command is designed for the case where the primary storage is down. .. code-block:: console $ cinder failover-host cinder@svciscsi --backend_id target_svc_id If a failover command has been executed and the primary storage has been restored, it is possible to do a failback by simply specifying default as the ``backend_id``: .. code-block:: console $ cinder failover-host cinder@svciscsi --backend_id default .. note:: Before you perform a failback operation, synchronize the data from the replication target volume to the primary one on the storage back end manually, and do the failback only after the synchronization is done since the synchronization may take a long time. If the synchronization is not done manually, Storage Virtualize family block storage service driver will perform the synchronization and do the failback after the synchronization is finished. Replication group <<<<<<<<<<<<<<<<< Before creating replication group, a group-spec which key ``consistent_group_replication_enabled`` set to `` True`` should be set in group type. Volume type used to create group must be replication enabled, and its ``replication_type`` should be set either `` global`` or `` metro``. The "failover_group" api allows group to be failed over and back without failing over the entire host. Example syntax: - Create replication group .. code-block:: console $ cinder group-type-create rep-group-type-example $ cinder group-type-key rep-group-type-example set consistent_group_replication_enabled=' True' $ cinder type-create type-global $ cinder type-key type-global set replication_enabled=' True' replication_type=' global' $ cinder group-create rep-group-type-example type-global --name global-group - Failover replication group .. code-block:: console $ cinder group-failover-replication --secondary-backend-id target_svc_id group_id - Failback replication group .. code-block:: console $ cinder group-failover-replication --secondary-backend-id default group_id .. note:: Optionally, allow-attached-volume can be used to failover the in-use volume, but fail over/back an in-use volume is not recommended. If the user does failover operation to an in-use volume, the volume status remains in-use after failover. But the in-use replication volume would change to read-only since the primary volume is changed to auxiliary side and the instance is still attached to the master volume. As a result please detach the replication volume first and attach again if user want to reuse the in-use replication volume as read-write. HyperSwap Volumes ----------------- A HyperSwap volume is created with a volume-type that has the extra spec ``drivers:volume_topology`` set to ``hyperswap``. To support HyperSwap volumes, IBM Storage Virtualize family firmware version 7.6.0 or later is required. Add the following to the back-end configuration to specify the host preferred site for HyperSwap volume. FC: .. code-block:: ini storwize_preferred_host_site = site1:20000090fa17311e&ff00000000000001, site2:20000089762sedce&ff00000000000000 iSCSI: .. code-block:: ini storwize_preferred_host_site = site1:iqn.1993-08.org.debian:01:eac5ccc1aaa&iqn.1993-08.org.debian:01:be53b7e236be, site2:iqn.1993-08.org.debian:01:eac5ccc1bbb&iqn.1993-08.org.debian:01:abcdefg9876w The site1 and site2 are names of the two host sites used in Storage Virtualize family storage systems. The WWPNs and IQNs are the connectors used for host mapping in the Storage Virtualize family. .. code-block:: console $ cinder type-create hyper_type $ cinder type-key hyper_type set drivers:volume_topology=hyperswap \ drivers:peer_pool=Pool_site2 .. note:: The property ``rsize`` is considered as ``buffersize`` for the HyperSwap volume. The HyperSwap property ``iogrp`` is selected by storage. A group is created as a HyperSwap group with a group-type that has the group spec ``hyperswap_group_enabled`` set to `` True``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst0000664000175000017500000001710500000000000031516 0ustar00zuulzuul00000000000000======================================== INFINIDAT InfiniBox Block Storage driver ======================================== The INFINIDAT Block Storage volume driver provides iSCSI and Fibre Channel support for INFINIDAT InfiniBox storage systems. This section explains how to configure the INFINIDAT driver. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Copy a volume to an image. * Copy an image to a volume. * Clone a volume. * Extend a volume. * Get volume statistics. * Create, modify, delete, and list consistency groups. * Create, modify, delete, and list snapshots of consistency groups. * Create consistency group from consistency group or consistency group snapshot. * Revert a volume to a snapshot. * Manage and unmanage volumes and snapshots. * List manageable volumes and snapshots. * Attach a volume to multiple instances at once (multi-attach). * Host and storage assisted volume migration. * Efficient non-disruptive volume backup. External package installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver requires the ``infinisdk`` package for communicating with InfiniBox systems. Install the package from PyPI using the following command: .. code-block:: console $ pip install infinisdk Setting up the storage array ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create a storage pool object on the InfiniBox array in advance. The storage pool will contain volumes managed by OpenStack. Mixing OpenStack APIs and non-OpenStack methods are not supported when used to attach the same hosts via the same protocol. For example, it is not possible to create boot-from-SAN volumes and OpenStack volumes for the same host with Fibre Channel. Instead, use a different protocol for one of the volumes. Refer to the InfiniBox manuals for details on pool management. Driver configuration ~~~~~~~~~~~~~~~~~~~~ Edit the ``cinder.conf`` file, which is usually located under the following path ``/etc/cinder/cinder.conf``. * Add a section for the INFINIDAT driver back end. * Under the ``[DEFAULT]`` section, set the ``enabled_backends`` parameter with the name of the new back-end section. Configure the driver back-end section with the parameters below. * Configure the driver name by setting the following parameter: .. code-block:: ini volume_driver = cinder.volume.drivers.infinidat.InfiniboxVolumeDriver * Configure the management IP of the InfiniBox array by adding the following parameter: .. code-block:: ini san_ip = InfiniBox management IP * Verify that the InfiniBox array can be managed via an HTTPS connection. And the ``driver_use_ssl`` parameter should be set to ``true`` to enable use of the HTTPS protocol. HTTP can also be used if ``driver_use_ssl`` is set to (or defaults to) ``false``. To suppress requests library SSL certificate warnings, set the ``suppress_requests_ssl_warnings`` parameter to ``true``. .. code-block:: ini driver_use_ssl = true/false suppress_requests_ssl_warnings = true/false These parameters defaults to ``false``. * Configure user credentials. The driver requires an InfiniBox user with administrative privileges. We recommend creating a dedicated OpenStack user account that holds a pool admin user role. Refer to the InfiniBox manuals for details on user account management. Configure the user credentials by adding the following parameters: .. code-block:: ini san_login = infinibox_username san_password = infinibox_password * Configure the name of the InfiniBox pool by adding the following parameter: .. code-block:: ini infinidat_pool_name = Pool defined in InfiniBox * The back-end name is an identifier for the back end. We recommend using the same name as the name of the section. Configure the back-end name by adding the following parameter: .. code-block:: ini volume_backend_name = back-end name * Thin provisioning. The INFINIDAT driver supports creating thin or thick provisioned volumes. Configure thin or thick provisioning by adding the following parameter: .. code-block:: ini san_thin_provision = true/false This parameter defaults to ``true``. * Configure the connectivity protocol. The InfiniBox driver supports connection to the InfiniBox system in both the fibre channel and iSCSI protocols. Configure the desired protocol by adding the following parameter: .. code-block:: ini infinidat_storage_protocol = iscsi/fc This parameter defaults to ``fc``. * Configure iSCSI netspaces. When using the iSCSI protocol to connect to InfiniBox systems, you must configure one or more iSCSI network spaces in the InfiniBox storage array. Refer to the InfiniBox manuals for details on network space management. Configure the names of the iSCSI network spaces to connect to by adding the following parameter: .. code-block:: ini infinidat_iscsi_netspaces = iscsi_netspace Multiple network spaces can be specified by a comma separated string. This parameter is ignored when using the FC protocol. * Configure CHAP InfiniBox supports CHAP authentication when using the iSCSI protocol. To enable CHAP authentication, add the following parameter: .. code-block:: ini use_chap_auth = true To manually define the username and password, add the following parameters: .. code-block:: ini chap_username = username chap_password = password If the CHAP username or password are not defined, they will be auto-generated by the driver. The CHAP parameters are ignored when using the FC protocol. * Volume compression Volume compression is available for all supported InfiniBox versions. By default, compression for all newly created volumes is inherited from its parent pool at creation time. All pools are created by default with compression enabled. To explicitly enable or disable compression for all newly created volumes, add the following configuration parameter: .. code-block:: ini infinidat_use_compression = true/false Or leave this configuration parameter unset (commented out) for all created volumes to inherit their compression setting from their parent pool at creation time. The default value is unset. After modifying the ``cinder.conf`` file, restart the ``cinder-volume`` service. Create a new volume type for each distinct ``volume_backend_name`` value that you added in the ``cinder.conf`` file. The example below assumes that the same ``volume_backend_name=infinidat-pool-a`` option was specified in all of the entries, and specifies that the volume type ``infinidat`` can be used to allocate volumes from any of them. Example of creating a volume type: .. code-block:: console $ openstack volume type create infinidat $ openstack volume type set --property volume_backend_name=infinidat-pool-a infinidat Configuration example ~~~~~~~~~~~~~~~~~~~~~ .. code-block:: ini [DEFAULT] enabled_backends = infinidat-pool-a [infinidat-pool-a] volume_driver = cinder.volume.drivers.infinidat.InfiniboxVolumeDriver volume_backend_name = infinidat-pool-a driver_use_ssl = true suppress_requests_ssl_warnings = true san_ip = 10.1.2.3 san_login = openstackuser san_password = openstackpass san_thin_provision = true infinidat_pool_name = pool-a infinidat_storage_protocol = iscsi infinidat_iscsi_netspaces = default_iscsi_space Driver-specific options ~~~~~~~~~~~~~~~~~~~~~~~ The following table contains the configuration options that are specific to the INFINIDAT driver. .. config-table:: :config-target: INFINIDAT InfiniBox cinder.volume.drivers.infinidat ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/infortrend-volume-driver.rst0000664000175000017500000000731200000000000031722 0ustar00zuulzuul00000000000000======================== Infortrend volume driver ======================== The `Infortrend `__ volume driver is a Block Storage driver providing iSCSI and Fibre Channel support for Infortrend storages. Supported operations ~~~~~~~~~~~~~~~~~~~~ The Infortrend volume driver supports the following volume operations: * Create, delete, attach, and detach volumes. * Create and delete a snapshot. * Create a volume from a snapshot. * Copy an image to a volume. * Copy a volume to an image. * Clone a volume. * Extend a volume * Retype a volume. * Manage and unmanage a volume. * Migrate a volume with back-end assistance. * Live migrate an instance with volumes hosted on an Infortrend backend. System requirements ~~~~~~~~~~~~~~~~~~~ To use the Infortrend volume driver, the following settings are required: Set up Infortrend storage ------------------------- * Create logical volumes in advance. * Host side setting ``Peripheral device type`` should be ``No Device Present (Type=0x7f)``. Set up cinder-volume node ------------------------- * Install JRE 7 or later. * Download the Infortrend storage CLI from the `release page `__. Choose the raidcmd_ESDS10.jar file, which's under v2.1.3 on the github releases page, and assign it to the default path ``/opt/bin/Infortrend/``. Driver configuration ~~~~~~~~~~~~~~~~~~~~ On ``cinder-volume`` nodes, set the following in your ``/etc/cinder/cinder.conf``, and use the following options to configure it: Driver options -------------- .. include:: ../../tables/cinder-infortrend.inc iSCSI configuration example --------------------------- .. code-block:: ini [DEFAULT] default_volume_type = IFT-ISCSI enabled_backends = IFT-ISCSI [IFT-ISCSI] volume_driver = cinder.volume.drivers.infortrend.infortrend_iscsi_cli.InfortrendCLIISCSIDriver volume_backend_name = IFT-ISCSI infortrend_pools_name = POOL-1,POOL-2 san_ip = MANAGEMENT_PORT_IP san_password = MANAGEMENT_PASSWORD infortrend_slots_a_channels_id = 0,1,2,3 infortrend_slots_b_channels_id = 0,1,2,3 Fibre Channel configuration example ----------------------------------- .. code-block:: ini [DEFAULT] default_volume_type = IFT-FC enabled_backends = IFT-FC [IFT-FC] volume_driver = cinder.volume.drivers.infortrend.infortrend_fc_cli.InfortrendCLIFCDriver volume_backend_name = IFT-FC infortrend_pools_name = POOL-1,POOL-2,POOL-3 san_ip = MANAGEMENT_PORT_IP san_password = MANAGEMENT_PASSWORD infortrend_slots_a_channels_id = 4,5 Multipath configuration ----------------------- * Enable multipath for image transfer in ``/etc/cinder/cinder.conf`` for each back end or in ``[backend_defaults]`` section as a common configuration for all backends. .. code-block:: ini use_multipath_for_image_xfer = True Restart the ``cinder-volume`` service. * Enable multipath for volume attach and detach in ``/etc/nova/nova.conf``. .. code-block:: ini [libvirt] ... volume_use_multipath = True ... Restart the ``nova-compute`` service. Extra spec usage ---------------- * ``infortrend:provisioning`` - Defaults to ``full`` provisioning, the valid values are thin and full. * ``infortrend:tiering`` - Defaults to use ``all`` tiering, the valid values are subsets of 0, 1, 2, 3. If multi-pools are configured in ``cinder.conf``, it can be specified for each pool, separated by semicolon. For example: ``infortrend:provisioning``: ``POOL-1:thin; POOL-2:full`` ``infortrend:tiering``: ``POOL-1:all; POOL-2:0; POOL-3:0,1,3`` For more details, see `Infortrend documents `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/inspur-as13000-driver.rst0000664000175000017500000000440600000000000030551 0ustar00zuulzuul00000000000000=================================== Inspur AS13000 series volume driver =================================== Inspur AS13000 series volume driver provides OpenStack Compute instances with access to Inspur AS13000 series storage system. Inspur AS13000 storage can be used with iSCSI connection. This documentation explains how to configure and connect the block storage nodes to Inspur AS13000 series storage. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options supported by the Inspur AS13000 iSCSI driver. .. config-table:: :config-target: Inspur AS13000 cinder.volume.drivers.inspur.as13000.as13000_driver Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, list, delete, attach (map), and detach (unmap) volumes. - Create, list and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. Configure Inspur AS13000 iSCSI backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section details the steps required to configure the Inspur AS13000 storage cinder driver. #. In the ``cinder.conf`` configuration file under the ``[DEFAULT]`` section, set the enabled_backends parameter. .. code-block:: ini [DEFAULT] enabled_backends = AS13000-1 #. Add a backend group section for backend group specified in the enabled_backends parameter. #. In the newly created backend group section, set the following configuration options: .. code-block:: ini [AS13000-1] # The driver path volume_driver = cinder.volume.drivers.inspur.as13000.as13000_driver.AS13000Driver # Management IP of Inspur AS13000 storage array san_ip = 10.0.0.10 # The Rest API port san_api_port = 8088 # Management username of Inspur AS13000 storage array san_login = root # Management password of Inspur AS13000 storage array san_password = passw0rd # The Pool used to allocated volumes as13000_ipsan_pools = Pool0 # The Meta Pool to use, should be a replication Pool as13000_meta_pool = Pool_Rep # Backend name volume_backend_name = AS13000 #. Save the changes to the ``/etc/cinder/cinder.conf`` file and restart the ``cinder-volume`` service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/inspur-instorage-driver.rst0000664000175000017500000000726300000000000031561 0ustar00zuulzuul00000000000000===================================== Inspur InStorage family volume driver ===================================== Inspur InStorage family volume driver provides OpenStack Compute instances with access to Inspur Instorage family storage system. Inspur InStorage storage system can be used with FC or iSCSI connection. This documentation explains how to configure and connect the block storage nodes to Inspur InStorage family storage system. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, list, delete, attach (map), and detach (unmap) volumes. - Create, list and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Retype a volume. - Manage and unmanage a volume. - Create, list, and delete consistency group. - Create, list, and delete consistency group snapshot. - Modify consistency group (add or remove volumes). - Create consistency group from source. - Failover and Failback support. Configure Inspur InStorage iSCSI/FC backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section details the steps required to configure the Inspur InStorage Cinder Driver for single FC or iSCSI backend. #. In the ``cinder.conf`` configuration file under the ``[DEFAULT]`` section, set the enabled_backends parameter with the iSCSI or FC back-end group - For Fibre Channel: .. code-block:: ini [DEFAULT] enabled_backends = instorage-fc-1 - For iSCSI: .. code-block:: ini [DEFAULT] enabled_backends = instorage-iscsi-1 #. Add a back-end group section for back-end group specified in the enabled_backends parameter #. In the newly created back-end group section, set the following configuration options: - For Fibre Channel: .. code-block:: ini [instorage-fc-1] # Management IP of Inspur InStorage storage array san_ip = 10.0.0.10 # Management Port of Inspur InStorage storage array, by default set to 22 san_ssh_port = 22 # Management username of Inspur InStorage storage array san_login = username # Management password of Inspur InStorage storage array san_password = password # Private key for Inspur InStorage storage array san_private_key = path/to/the/private/key # The Pool used to allocated volumes instorage_mcs_volpool_name = Pool0 # The driver path volume_driver = cinder.volume.drivers.inspur.instorage.instorage_fc.InStorageMCSFCDriver # Backend name volume_backend_name = instorage_fc - For iSCSI: .. code-block:: ini [instorage-iscsi-1] # Management IP of Inspur InStorage storage array san_ip = 10.0.0.10 # Management Port of Inspur InStorage storage array, by default set to 22 san_ssh_port = 22 # Management username of Inspur InStorage storage array san_login = username # Management password of Inspur InStorage storage array san_password = password # Private key for Inspur InStorage storage array san_private_key = path/to/the/private/key # The Pool used to allocated volumes instorage_mcs_volpool_name = Pool0 # The driver path volume_driver = cinder.volume.drivers.inspur.instorage.instorage_iscsi.InStorageMCSISCSIDriver # Backend name volume_backend_name = instorage_iscsi .. note:: When both ``san_password`` and ``san_private_key`` are provide, the driver will use private key prefer to password. #. Save the changes to the ``/etc/cinder/cinder.conf`` file and restart the ``cinder-volume`` service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/intel-rsd-volume-driver.rst0000664000175000017500000000261100000000000031446 0ustar00zuulzuul00000000000000==================================== Intel Rack Scale Design (RSD) driver ==================================== The Intel Rack Scale Design volume driver is a block storage driver providing NVMe-oF support for RSD storage. System requirements ~~~~~~~~~~~~~~~~~~~ To use the RSD driver, the following requirements are needed: * The driver only supports RSD API at version 2.4 or later. * The driver requires rsd-lib. * ``cinder-volume`` should be running on one of the composed node in RSD, and have access to the PODM url. * All the ``nova-compute`` services should be running on the composed nodes in RSD. * All the ``cinder-volume`` and ``nova-compute`` nodes should have installed ``dmidecode`` and the latest ``nvme-cli`` with connect/disconnect subcommands. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete volumes. * Attach, detach volumes. * Copy an image to a volume. * Copy a volume to an image. * Create, delete snapshots. * Create a volume from a snapshot. * Clone a volume. * Extend a volume. * Get volume statistics. Configuration ~~~~~~~~~~~~~ On ``cinder-volume`` nodes, using the following configurations in your ``/etc/cinder/cinder.conf``: .. code-block:: ini volume_driver = cinder.volume.drivers.rsd.RSDDriver The following table contains the configuration options supported by the RSD driver: .. config-table:: :config-target: RSD cinder.volume.drivers.rsd ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/kaminario-driver.rst0000664000175000017500000002205000000000000030211 0ustar00zuulzuul00000000000000======================================================== Kaminario K2 all-flash array iSCSI and FC volume drivers ======================================================== Kaminario's K2 all-flash array leverages a unique software-defined architecture that delivers highly valued predictable performance, scalability and cost-efficiency. Kaminario's K2 all-flash iSCSI and FC arrays can be used in OpenStack Block Storage for providing block storage using ``KaminarioISCSIDriver`` class and ``KaminarioFCDriver`` class respectively. This documentation explains how to configure and connect the block storage nodes to one or more K2 all-flash arrays. Driver requirements ~~~~~~~~~~~~~~~~~~~ - Kaminario's K2 all-flash iSCSI and/or FC array - K2 REST API version >= 2.2.0 - K2 version 5.8 or later are supported - ``krest`` python library(version 1.3.1 or later) should be installed on the Block Storage node using :command:`sudo pip install krest` - The Block Storage Node should also have a data path to the K2 array for the following operations: - Create a volume from snapshot - Clone a volume - Copy volume to image - Copy image to volume - Retype 'dedup without replication'<->'nodedup without replication' Supported operations ~~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Retype a volume. - Manage and unmanage a volume. - Replicate volume with failover and failback support to K2 array. Limitations and known issues ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If your OpenStack deployment is not setup to use multipath, the network connectivity of the K2 all-flash array will use a single physical port. This may significantly limit the following benefits provided by K2: - available bandwidth - high-availability - non disruptive-upgrade The following steps are required to setup multipath access on the Compute and the Block Storage nodes #. Install multipath software on both Compute and Block Storage nodes. For example: .. code-block:: console # apt-get install sg3-utils multipath-tools #. In the ``[libvirt]`` section of the ``nova.conf`` configuration file, specify ``volume_use_multipath=True``. This option is valid for both iSCSI and FC drivers. In versions prior to Newton, the option was called ``iscsi_use_multipath``. Additional resources: Kaminario Host Configuration Guide for Linux (for configuring multipath) #. Restart the compute service for the changes to take effect. .. code-block:: console # service nova-compute restart Configure single Kaminario iSCSI/FC back end ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section details the steps required to configure the Kaminario Cinder Driver for single FC or iSCSI backend. #. In the ``cinder.conf`` configuration file under the ``[DEFAULT]`` section, set the ``scheduler_default_filters`` parameter: .. code-block:: ini [DEFAULT] scheduler_default_filters = DriverFilter,CapabilitiesFilter See following documents for more information: :ref:`cinder_scheduler_filters` and :ref:`filter_weigh_scheduler`. #. Under the ``[DEFAULT]`` section, set the enabled_backends parameter with the iSCSI or FC back-end group .. code-block:: ini [DEFAULT] # For iSCSI enabled_backends = kaminario-iscsi-1 # For FC # enabled_backends = kaminario-fc-1 #. Add a back-end group section for back-end group specified in the enabled_backends parameter #. In the newly created back-end group section, set the following configuration options: .. code-block:: ini [kaminario-iscsi-1] # Management IP of Kaminario K2 All-Flash iSCSI/FC array san_ip = 10.0.0.10 # Management username of Kaminario K2 All-Flash iSCSI/FC array san_login = username # Management password of Kaminario K2 All-Flash iSCSI/FC array san_password = password # Enable Kaminario K2 iSCSI/FC driver volume_driver = cinder.volume.drivers.kaminario.kaminario_iscsi.KaminarioISCSIDriver # volume_driver = cinder.volume.drivers.kaminario.kaminario_fc.KaminarioFCDriver # Backend name # volume_backend_name = kaminario_fc_1 volume_backend_name = kaminario_iscsi_1 # K2 driver calculates max_oversubscription_ratio on setting below # option as True. Default value is False # auto_calc_max_oversubscription_ratio = False # Set a limit on total number of volumes to be created on K2 array, for example: # filter_function = "capabilities.total_volumes < 250" # For replication, replication_device must be set and the replication peer must be configured # on the primary and the secondary K2 arrays # Syntax: # replication_device = backend_id:,login:,password:,rpo: # where: # s-array-ip is the secondary K2 array IP # rpo must be either 60(1 min) or multiple of 300(5 min) # Example: # replication_device = backend_id:10.0.0.50,login:kaminario,password:kaminario,rpo:300 # Suppress requests library SSL certificate warnings on setting this option as True # Default value is 'False' # suppress_requests_ssl_warnings = False #. Restart the Block Storage services for the changes to take effect: .. code-block:: console # service cinder-api restart # service cinder-scheduler restart # service cinder-volume restart Setting multiple Kaminario iSCSI/FC back ends ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following steps are required to configure multiple K2 iSCSI/FC backends: #. In the :file:`cinder.conf` file under the [DEFAULT] section, set the enabled_backends parameter with the comma-separated iSCSI/FC back-end groups. .. code-block:: ini [DEFAULT] enabled_backends = kaminario-iscsi-1, kaminario-iscsi-2, kaminario-iscsi-3 #. Add a back-end group section for each back-end group specified in the enabled_backends parameter #. For each back-end group section, enter the configuration options as described in the above section ``Configure single Kaminario iSCSI/FC back end`` See :doc:`Configure multiple-storage back ends ` for additional information. #. Restart the cinder volume service for the changes to take effect. .. code-block:: console # service cinder-volume restart Creating volume types ~~~~~~~~~~~~~~~~~~~~~ Create volume types for supporting volume creation on the multiple K2 iSCSI/FC backends. Set following extras-specs in the volume types: - volume_backend_name : Set value of this spec according to the value of ``volume_backend_name`` in the back-end group sections. If only this spec is set, then dedup Kaminario cinder volumes will be created without replication support .. code-block:: console $ openstack volume type create kaminario_iscsi_dedup_noreplication $ openstack volume type set --property volume_backend_name=kaminario_iscsi_1 \ kaminario_iscsi_dedup_noreplication - kaminario:thin_prov_type : Set this spec in the volume type for creating nodedup Kaminario cinder volumes. If this spec is not set, dedup Kaminario cinder volumes will be created. - kaminario:replication : Set this spec in the volume type for creating replication supported Kaminario cinder volumes. If this spec is not set, then Kaminario cinder volumes will be created without replication support. .. code-block:: console $ openstack volume type create kaminario_iscsi_dedup_replication $ openstack volume type set --property volume_backend_name=kaminario_iscsi_1 \ kaminario:replication=enabled kaminario_iscsi_dedup_replication $ openstack volume type create kaminario_iscsi_nodedup_replication $ openstack volume type set --property volume_backend_name=kaminario_iscsi_1 \ kaminario:replication=enabled kaminario:thin_prov_type=nodedup \ kaminario_iscsi_nodedup_replication $ openstack volume type create kaminario_iscsi_nodedup_noreplication $ openstack volume type set --property volume_backend_name=kaminario_iscsi_1 \ kaminario:thin_prov_type=nodedup kaminario_iscsi_nodedup_noreplication Supported retype cases ~~~~~~~~~~~~~~~~~~~~~~ The following are the supported retypes for Kaminario cinder volumes: - Nodedup-noreplication <--> Nodedup-replication .. code-block:: console $ cinder retype volume-id new-type - Dedup-noreplication <--> Dedup-replication .. code-block:: console $ cinder retype volume-id new-type - Dedup-noreplication <--> Nodedup-noreplication .. code-block:: console $ cinder retype --migration-policy on-demand volume-id new-type For non-supported cases, try combinations of the :command:`cinder retype` command. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options that are specific to the Kaminario K2 FC and iSCSI Block Storage drivers. .. config-table:: :config-target: Kaminario cinder.volume.drivers.kaminario.kaminario_common ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/kioxia-kumoscale-driver.rst0000664000175000017500000000354300000000000031512 0ustar00zuulzuul00000000000000============================== KIOXIA Kumoscale NVMeOF Driver ============================== KIOXIA Kumoscale volume driver provides OpenStack Compute instances with access to KIOXIA Kumoscale NVMeOF storage systems. This documentation explains how to configure Cinder for use with the KIOXIA Kumoscale storage backend system. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options supported by the KIOXIA Kumoscale NVMeOF driver. .. config-table:: :config-target: KIOXIA Kumoscale cinder.volume.drivers.kioxia.kumoscale Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, list, delete, attach and detach volumes - Create, list and delete volume snapshots - Create a volume from a snapshot - Copy an image to a volume. - Copy a volume to an image. - Create volume from snapshot - Clone a volume - Extend a volume Configure KIOXIA Kumoscale NVMeOF backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section details the steps required to configure the KIOXIA Kumoscale storage cinder driver. #. In the ``cinder.conf`` configuration file under the ``[DEFAULT]`` section, set the enabled_backends parameter. .. code-block:: ini [DEFAULT] enabled_backends = kumoscale-1 #. Add a backend group section for the backend group specified in the enabled_backends parameter. #. In the newly created backend group section, set the following configuration options: .. code-block:: ini [kumoscale-1] # Backend name volume_backend_name=kumoscale-1 # The driver path volume_driver=cinder.volume.drivers.kioxia.kumoscale.KumoScaleBaseVolumeDriver # Kumoscale provisioner URL kioxia_url=https://70.0.0.13:30100 # Kumoscale provisioner cert file kioxia_cafile=/etc/kioxia/ssdtoolbox.pem # Kumoscale provisioner token token=eyJhbGciOiJIUzI1NiJ9... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/lenovo-driver.rst0000664000175000017500000001402600000000000027545 0ustar00zuulzuul00000000000000====================================== Lenovo Fibre Channel and iSCSI drivers ====================================== The ``LenovoFCDriver`` and ``LenovoISCSIDriver`` Cinder drivers allow Lenovo S-Series arrays to be used for block storage in OpenStack deployments. System requirements ~~~~~~~~~~~~~~~~~~~ To use the Lenovo drivers, the following are required: - Lenovo S2200, S3200, DS2200, DS4200 or DS6200 array with: - iSCSI or FC host interfaces - G22x firmware or later - Network connectivity between the OpenStack host and the array management interfaces - HTTPS or HTTP must be enabled on the array Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Migrate a volume with back-end assistance. - Retype a volume. - Manage and unmanage a volume. .. note:: The generic grouping functionality supported in the G265 and later firmware is not supported by OpenStack Cinder due to differences in the grouping models used in Cinder and the S-Series firmware. Configuring the array ~~~~~~~~~~~~~~~~~~~~~ #. Verify that the array can be managed using an HTTPS connection. HTTP can also be used if ``hpmsa_api_protocol=http`` is placed into the appropriate sections of the ``cinder.conf`` file, but this option is deprecated and will be removed in a future release. Confirm that virtual pools A and B are present if you plan to use virtual pools for OpenStack storage. #. Edit the ``cinder.conf`` file to define a storage back-end entry for each storage pool on the array that will be managed by OpenStack. Each entry consists of a unique section name, surrounded by square brackets, followed by options specified in ``key=value`` format. - The ``lenovo_pool_name`` value specifies the name of the storage pool on the array. - The ``volume_backend_name`` option value can be a unique value, if you wish to be able to assign volumes to a specific storage pool on the array, or a name that is shared among multiple storage pools to let the volume scheduler choose where new volumes are allocated. - The rest of the options will be repeated for each storage pool in a given array: * ``volume_driver`` specifies the Cinder driver name. * ``san_ip`` specifies the IP addresses or host names of the array's management controllers. * ``san_login`` and ``san_password`` specify the username and password of an array user account with ``manage`` privileges. * ``driver_use_ssl`` should be set to ``true`` to enable use of the HTTPS protocol. * ``lenovo_iscsi_ips`` specifies the iSCSI IP addresses for the array if using the iSCSI transport protocol. In the examples below, two back ends are defined, one for pool A and one for pool B, and a common ``volume_backend_name`` is used so that a single volume type definition can be used to allocate volumes from both pools. **Example: iSCSI example back-end entries** .. code-block:: ini [pool-a] lenovo_pool_name = A volume_backend_name = lenovo-array volume_driver = cinder.volume.drivers.lenovo.lenovo_iscsi.LenovoISCSIDriver san_ip = 10.1.2.3 san_login = manage san_password = !manage lenovo_iscsi_ips = 10.2.3.4,10.2.3.5 driver_use_ssl = true [pool-b] lenovo_pool_name = B volume_backend_name = lenovo-array volume_driver = cinder.volume.drivers.lenovo.lenovo_iscsi.LenovoISCSIDriver san_ip = 10.1.2.3 san_login = manage san_password = !manage lenovo_iscsi_ips = 10.2.3.4,10.2.3.5 driver_use_ssl = true **Example: Fibre Channel example back-end entries** .. code-block:: ini [pool-a] lenovo_pool_name = A volume_backend_name = lenovo-array volume_driver = cinder.volume.drivers.lenovo.lenovo_fc.LenovoFCDriver san_ip = 10.1.2.3 san_login = manage san_password = !manage driver_use_ssl = true [pool-b] lenovo_pool_name = B volume_backend_name = lenovo-array volume_driver = cinder.volume.drivers.lenovo.lenovo_fc.LenovoFCDriver san_ip = 10.1.2.3 san_login = manage san_password = !manage driver_use_ssl = true #. If HTTPS is not enabled in the array, add ``lenovo_api_protocol = http`` in each of the back-end definitions. #. If HTTPS is enabled, you can enable certificate verification with the option ``driver_ssl_cert_verify = True``. You may also use the ``driver_ssl_cert_path`` option to specify the path to a CA_BUNDLE file containing CAs other than those in the default list. #. Modify the ``[DEFAULT]`` section of the ``cinder.conf`` file to add an ``enabled_backends`` parameter specifying the back-end entries you added, and a ``default_volume_type`` parameter specifying the name of a volume type that you will create in the next step. **Example: [DEFAULT] section changes** .. code-block:: ini [DEFAULT] # ... enabled_backends = pool-a,pool-b default_volume_type = lenovo #. Create a new volume type for each distinct ``volume_backend_name`` value that you added to the ``cinder.conf`` file. The example below assumes that the same ``volume_backend_name=lenovo-array`` option was specified in all of the entries, and specifies that the volume type ``lenovo`` can be used to allocate volumes from any of them. **Example: Creating a volume type** .. code-block:: console $ openstack volume type create lenovo $ openstack volume type set --property volume_backend_name=lenovo-array lenovo #. After modifying the ``cinder.conf`` file, restart the ``cinder-volume`` service. Driver-specific options ~~~~~~~~~~~~~~~~~~~~~~~ The following table contains the configuration options that are specific to the Lenovo drivers. .. config-table:: :config-target: Lenovo cinder.volume.drivers.lenovo.lenovo_common ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/lightbits-lightos-driver.rst0000664000175000017500000001425200000000000031704 0ustar00zuulzuul00000000000000======================= Lightbits Cinder Driver ======================= The Lightbits(TM) OpenStack driver enables OpenStack clusters to use Lightbits clustered storage servers. This documentation explains how to configure Cinder for use with the Lightbits storage backend system. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create volume - Delete volume - Attach volume - Detach volume - Create image from volume - Live migration - Volume replication - Thin provisioning - Multi-attach - Supported vendor driver - Extend volume - Create snapshot - Delete snapshot - Create volume from snapshot - Create volume from volume (clone) - Active active deployment support - Volume retype (host assisted) - Multi Tenancy support Lightbits OpenStack Driver Components ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Lightbits OpenStack driver has three components: - Cinder driver - Nova libvirt volume driver - os_brick initiator connector In addition, it requires the Lightbits ``discovery-client``, provided with product. The os_brick connector uses the Lightbits ``discovery-client`` to communicate with Lightbits NVMe/TCP discovery services. The Cinder Driver ~~~~~~~~~~~~~~~~~ The Cinder driver integrates with Cinder and performs REST operations against the Lightbits storage cluster. To enable the driver, add the following to Cinder's configuration file .. code-block:: ini enabled_backends = lightos, and .. code-block:: ini [lightos] volume_driver = cinder.volume.drivers.lightos.LightOSVolumeDriver volume_backend_name = lightos lightos_api_address = lightos_api_port = 443 lightos_jwt= lightos_default_num_replicas = 3 lightos_default_compression_enabled = False lightos_api_service_timeout=30 - ``TARGET_ACCESS_IPS`` are the Lightbits cluster nodes access IPs. Multiple nodes should be separated by commas. For example: ``lightos_api_address = 192.168.67.78,192.168.34.56,192.168.12.17``. These IPs are where the driver looks for the Lightbits clusters REST API servers. - ``LIGHTOS_JWT`` is the JWT (JSON Web Token) that is located at the Lightbits installation controller. You can find the jwt at ``~/lightos-default-admin-jwt``. - The default number of replicas for volumes is 3, and valid values for ``lightos_default_num_replicas`` are 1, 2, or 3. - The default compression setting is False (i.e., data is uncompressed). The default compression setting can also be True to indicate that new volumes should be created compressed, assuming no other compression setting is specified via the volume type. To control compression on a per-volume basis, create volume types for compressed and uncompressed, and use them as appropriate. - The default time to wait for API service response is 30 seconds per API endpoint. Creating volumes with non-default compression, number of replica(s) and multi tenancy can be done through the volume types mechanism. To create a new volume type with compression or multi tenancy enabled: .. code-block:: console $ openstack volume type create --property compression=' True' volume-with-compression To create a new volume type with one replica: .. code-block:: console $ openstack volume type create --property lightos:num_replicas=1 volume-with-one-replica To create a new type for a compressed volume with three replicas: .. code-block:: console $ openstack volume type create --property compression=' True' --property lightos:num_replicas=3 volume-with-three-replicas-and-compression Then create a new volume with one of these volume types: .. code-block:: console $ openstack volume create --size --type Create a volume type for multi tenancy: .. code-block:: console $ openstack volume type create --property lightos:project_name=project-01 type-with-mt Quality of Service (QoS) Support -------------------------------- The Lightbits driver allows administrators to better manage and optimize storage performance by associating QoS policies with volume types. - Administrators must first create the required QoS policy on the Lightbits cluster. - Once the QoS policy is created, it can be linked to a volume type in the system using the policy's unique UUID. Example: .. code-block:: bash openstack volume type create LightbitsWithQos --property volume_backend_name= --property=lightos:qos_policy= NVNe/TCP and Asymmetric Namespace Access (ANA) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Lightbits clusters expose their volumes using NVMe/TCP Asynchronous Namespace Access (ANA). ANA is a relatively new feature in the NVMe/TCP stack in Linux but it is fully supported in Ubuntu 20.04. Each compute host in the OpenStack cluster needs to be ANA-capable to provide OpenStack VMs with Lightbits volumes over NVMe/TCP. For more information on how to set up the compute nodes to use ANA, see the CentOS Linux Cluster Client Software Installation section of the Lightbits(TM) Cluster Installation and Initial Configuration Guide. Note ~~~~ In the current version, if any of the cluster nodes changes its access IPs, the Cinder driver's configuration file should be updated with the cluster nodes access IPs and restarted. As long as the Cinder driver can access at least one cluster access IP it will work, but will be susceptible to cluster node failures. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options supported by the Lightbits Cinder driver. .. config-table:: :config-target: Lightbits cluster cinder.volume.drivers.lightos Active active deployment support ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable active-active deployment, follow these steps: 1. Activate the active-active mode by setting the "cluster" option in the "DEFAULT" section. 2. Configure the Distributed Lock Manager (DLM) such as Redis or etcd in the "coordination" section. These options should be added to the cinder.conf file: .. code-block:: ini [DEFAULT] cluster = [coordination] backend_url = For more detailed instructions, please refer to the guidelines at:: https://docs.openstack.org/cinder/latest/contributor/high_availability.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/linstor-driver.rst0000664000175000017500000000161100000000000027731 0ustar00zuulzuul00000000000000============== LINSTOR driver ============== The LINSTOR driver allows Cinder to use DRBD/LINSTOR instances. External package installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The driver requires the ``python-linstor`` package for communication with the LINSTOR Controller. Install the package from PYPI using the following command: .. code-block:: console $ python -m pip install python-linstor Configuration ~~~~~~~~~~~~~ Set the following option in the ``cinder.conf`` file for the DRBD transport: .. code-block:: ini volume_driver = cinder.volume.drivers.linstordrv.LinstorDrbdDriver Or use the following for iSCSI transport: .. code-block:: ini volume_driver = cinder.volume.drivers.linstordrv.LinstorIscsiDriver The following table contains the configuration options supported by the LINSTOR driver: .. config-table:: :config-target: LINSTOR cinder.volume.drivers.linstordrv ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/lvm-volume-driver.rst0000664000175000017500000000241700000000000030347 0ustar00zuulzuul00000000000000=== LVM === The default volume back end uses local volumes managed by LVM. This driver supports different transport protocols to attach volumes, currently iSCSI and iSER. Set the following in your ``cinder.conf`` configuration file, and use the following options to configure for iSCSI transport: .. code-block:: ini volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver target_protocol = iscsi Use the following options to configure for the iSER transport: .. code-block:: ini volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver target_protocol = iser .. config-table:: :config-target: LVM cinder.volume.drivers.lvm .. caution:: When extending an existing volume which has a linked snapshot, the related logical volume is deactivated. This logical volume is automatically reactivated unless ``auto_activation_volume_list`` is defined in LVM configuration file ``lvm.conf``. See the ``lvm.conf`` file for more information. If auto activated volumes are restricted, then include the cinder volume group into this list: .. code-block:: ini auto_activation_volume_list = [ "existingVG", "cinder-volumes" ] This note does not apply for thinly provisioned volumes because they do not need to be deactivated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/macrosan-storage-driver.rst0000664000175000017500000003426400000000000031516 0ustar00zuulzuul00000000000000========================================== MacroSAN Fibre Channel and iSCSI drivers ========================================== The ``MacroSANFCDriver`` and ``MacroSANISCSIDriver`` Cinder drivers allow the MacroSAN Storage arrays to be used for Block Storage in OpenStack deployments. System requirements ~~~~~~~~~~~~~~~~~~~ To use the MacroSAN drivers, the following are required: - MacroSAN Storage arrays with: - iSCSI or FC host interfaces - Enable RESTful service on the MacroSAN Storage Appliance. (The service is automatically turned on in the device. You can check if `python /odsp/scripts/devop/devop.py` is available via `ps -aux|grep python`. ) - Network connectivity between the OpenStack host and the array management interfaces - HTTPS or HTTP must be enabled on the array When creating a volume from image, install the ``multipath`` tool and add the following configuration keys for each backend section or in ``[backend_defaults]`` section as a common configuration for all backends in ``/etc/cinder/cinder.conf`` file: .. code-block:: ini [cinder-iscsi-a] use_multipath_for_image_xfer = True When creating a instance from image, install the ``multipath`` tool and add the following configuration keys in the ``[libvirt]`` configuration group of the ``/etc/nova/nova.conf`` file: .. code-block:: ini iscsi_use_multipath = True Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Volume Migration (Host Assisted). - Volume Migration (Storage Assisted). - Retype a volume. - Manage and unmanage a volume. - Manage and unmanage a snapshot. - Volume Replication. - Thin Provisioning. Configuring the array ~~~~~~~~~~~~~~~~~~~~~ #. Verify that the array can be managed via an HTTPS connection. Confirm that virtual pools A and B are present if you plan to use virtual pools for OpenStack storage. #. Edit the ``cinder.conf`` file to define a storage backend entry for each storage pool on the array that will be managed by OpenStack. Each entry consists of a unique section name, surrounded by square brackets, followed by options specified in a ``key=value`` format. * The ``volume_backend_name`` option value can be a unique value, if you wish to be able to assign volumes to a specific storage pool on the array, or a name that is shared among multiple storage pools to let the volume scheduler choose where new volumes are allocated. In the examples below, two back ends are defined, one for pool A and one for pool B. * Add the following configuration keys in the configuration group of enabled_backends of the ``/etc/cinder/cinder.conf`` file: **iSCSI example back-end entries** .. code-block:: ini [DEFAULT] enabled_backends = cinder-iscsi-a, cinder-iscsi-b rpc_response_timeout = 300 [cinder-iscsi-a] # Storage protocol. iscsi_protocol = iscsi #iSCSI target user-land tool. iscsi_helper = tgtadm # The iSCSI driver to load volume_driver = cinder.volume.drivers.macrosan.driver.MacroSANISCSIDriver. # Name to give this storage back-end. volume_backend_name = macrosan #Choose attach/detach volumes in cinder using multipath for volume to image and image to volume transfers. use_multipath_for_image_xfer = True # IP address of the Storage if attaching directly. san_ip = 172.17.251.142, 172.17.251.143 # Storage user name. san_login = openstack # Storage user password. san_password = openstack #Choose using thin-lun or thick lun. When set san_thin_provision to True,you must set #macrosan_thin_lun_extent_size, macrosan_thin_lun_low_watermark, macrosan_thin_lun_high_watermark. san_thin_provision = False #The name of Pool in the Storage. macrosan_pool = Pool-a #The default ports used for initializing connection. #Separate the controller by semicolons (``;``) #Separate the ports by comma (``,``) macrosan_client_default = eth-1:0:0, eth-1:0:1; eth-2:0:0, eth-2:0:1 #The switch to force detach volume when deleting macrosan_force_unmap_itl = True #Set snapshot's resource ratio macrosan_snapshot_resource_ratio = 1 #Calculate the time spent on the operation in the log file. macrosan_log_timing = True # =============Optional settings============= #Set the thin lun's extent size when the san_thin_provision is True. macrosan_thin_lun_extent_size = 8 #Set the thin lun's low watermark when the san_thin_provision is True. #macrosan_thin_lun_low_watermark = 8 #Set the thin lun's high watermark when the san_thin_provision is True. macrosan_thin_lun_high_watermark = 40 #The setting of Symmetrical Dual Active Storage macrosan_sdas_ipaddrs = 172.17.251.142, 172.17.251.143 macrosan_sdas_username = openstack macrosan_sdas_password = openstack #The setting of Replication Storage. When you set ip, you must set #the macrosan_replication_destination_ports parameter. macrosan_replication_ipaddrs = 172.17.251.142, 172.17.251.143 macrosan_replication_username = openstack macrosan_replication_password = openstack ##The ports used for the Replication Storage. #Separate the controller by semicolons (``,``) #Separate the ports by semicolons (``/``) macrosan_replication_destination_ports = eth-1:0:0/eth-1:0:1, eth-2:0:0/eth-2:0:1 #Macrosan iscsi_clients list. You can configure multiple clients. Separate the ports by semicolons (``/``) macrosan_client = (devstack; controller1name; eth-1:0:0/eth-1:0:1; eth-2:0:0/eth-2:0:1), (dev; controller2name; eth-1:0:0/eth-1:0:1; eth-2:0:0/eth-2:0:1) [cinder-iscsi-b] iscsi_protocol = iscsi iscsi_helper = tgtadm volume_driver = cinder.volume.drivers.macrosan.driver.MacroSANISCSIDriver volume_backend_name = macrosan use_multipath_for_image_xfer = True san_ip = 172.17.251.142, 172.17.251.143 san_login = openstack san_password = openstack macrosan_pool = Pool-b san_thin_provision = False macrosan_force_unmap_itl = True macrosan_snapshot_resource_ratio = 1 macrosan_log_timing = True macrosan_client_default = eth-1:0:0, eth-1:0:1; eth-2:0:0, eth-2:0:1 macrosan_thin_lun_extent_size = 8 macrosan_thin_lun_low_watermark = 8 macrosan_thin_lun_high_watermark = 40 macrosan_sdas_ipaddrs = 172.17.251.142, 172.17.251.143 macrosan_sdas_username = openstack macrosan_sdas_password = openstack macrosan_replication_ipaddrs = 172.17.251.142, 172.17.251.143 macrosan_replication_username = openstack macrosan_replication_password = openstack macrosan_replication_destination_ports = eth-1:0:0, eth-2:0:0 macrosan_client = (devstack; controller1name; eth-1:0:0; eth-2:0:0), (dev; controller2name; eth-1:0:0; eth-2:0:0) **Fibre Channel example backend entries** .. code-block:: ini [DEFAULT] enabled_backends = cinder-fc-a, cinder-fc-b rpc_response_timeout = 300 [cinder-fc-a] volume_driver = cinder.volume.drivers.macrosan.driver.MacroSANFCDriver volume_backend_name = macrosan use_multipath_for_image_xfer = True san_ip = 172.17.251.142, 172.17.251.143 san_login = openstack san_password = openstack macrosan_pool = Pool-a san_thin_provision = False macrosan_force_unmap_itl = True macrosan_snapshot_resource_ratio = 1 macrosan_log_timing = True #FC Zoning mode configured. zoning_mode = fabric #The number of ports used for initializing connection. macrosan_fc_use_sp_port_nr = 1 #In the case of an FC connection, the configuration item associated with the port is maintained. macrosan_fc_keep_mapped_ports = True # =============Optional settings============= macrosan_thin_lun_extent_size = 8 macrosan_thin_lun_low_watermark = 8 macrosan_thin_lun_high_watermark = 40 macrosan_sdas_ipaddrs = 172.17.251.142, 172.17.251.143 macrosan_sdas_username = openstack macrosan_sdas_password = openstack macrosan_replication_ipaddrs = 172.17.251.142, 172.17.251.143 macrosan_replication_username = openstack macrosan_replication_password = openstack macrosan_replication_destination_ports = eth-1:0:0, eth-2:0:0 [cinder-fc-b] volume_driver = cinder.volume.drivers.macrosan.driver.MacroSANFCDriver volume_backend_name = macrosan use_multipath_for_image_xfer = True san_ip = 172.17.251.142, 172.17.251.143 san_login = openstack san_password = openstack macrosan_pool = Pool-b san_thin_provision = False macrosan_force_unmap_itl = True macrosan_snapshot_resource_ratio = 1 macrosan_log_timing = True zoning_mode = fabric macrosan_fc_use_sp_port_nr = 1 macrosan_fc_keep_mapped_ports = True macrosan_thin_lun_extent_size = 8 macrosan_thin_lun_low_watermark = 8 macrosan_thin_lun_high_watermark = 40 macrosan_sdas_ipaddrs = 172.17.251.142, 172.17.251.143 macrosan_sdas_username = openstack macrosan_sdas_password = openstack macrosan_replication_ipaddrs = 172.17.251.142, 172.17.251.143 macrosan_replication_username = openstack macrosan_replication_password = openstack macrosan_replication_destination_ports = eth-1:0:0, eth-2:0:0 #. After modifying the ``cinder.conf`` file, restart the ``cinder-volume`` service. #. Create and use volume types. **Create and use sdas volume types** .. code-block:: console $ openstack volume type create sdas $ openstack volume type set --property sdas=True sdas **Create and use replication volume types** .. code-block:: console $ openstack volume type create replication $ openstack volume type set --property replication_enabled=True replication Configuration file parameters ----------------------------- This section describes mandatory and optional configuration file parameters of the MacroSAN volume driver. .. list-table:: **Mandatory parameters** :widths: 10 10 50 10 :header-rows: 1 * - Parameter - Default value - Description - Applicable to * - volume_backend_name - ``-`` - indicates the name of the backend - All * - volume_driver - ``cinder.volume.drivers.lvm.LVMVolumeDriver`` - indicates the loaded driver - All * - use_multipath_for_image_xfer - ``False`` - Chose attach/detach volumes in cinder using multipath for volume to image and image to volume transfers. - All * - san_thin_provision - ``True`` - Default volume type setting, True is thin lun, and False is thick lun. - All * - macrosan_force_unmap_itl - ``True`` - Force detach volume when deleting - All * - macrosan_log_timing - ``True`` - Calculate the time spent on the operation in the log file. - All * - macrosan_snapshot_resource_ratio - ``1`` - Set snapshot's resource ratio". - All * - iscsi_helper - ``tgtadm`` - iSCSI target user-land tool to use. - iSCSI * - iscsi_protocol - ``iscsi`` - Determines the iSCSI protocol for new iSCSI volumes, created with tgtadm. - iSCSI * - macrosan_client_default - ``None`` - This is the default connection information for iscsi. This default configuration is used when no host related information is obtained. - iSCSI * - zoning_mode - ``True`` - FC Zoning mode configured. - Fibre channel * - macrosan_fc_use_sp_port_nr - ``1`` - The use_sp_port_nr parameter is the number of online FC ports used by the single-ended memory when the FC connection is established in the switch non-all-pass mode. The maximum is 4. - Fibre channel * - macrosan_fc_keep_mapped_ports - ``True`` - In the case of an FC connection, the configuration item associated with the port is maintained. - Fibre channel .. list-table:: **Optional parameters** :widths: 20 10 50 15 :header-rows: 1 * - Parameter - Default value - Description - Applicable to * - macrosan_sdas_ipaddrs - ``-`` - The ip of Symmetrical Dual Active Storage - All * - macrosan_sdas_username - ``-`` - The username of Symmetrical Dual Active Storage - All * - macrosan_sdas_password - ``-`` - The password of Symmetrical Dual Active Storage - All * - macrosan_replication_ipaddrs - ``-`` - The ip of replication Storage. When you set ip, you must set the macrosan_replication_destination_ports parameter. - All * - macrosan_replication_username - ``-`` - The username of replication Storage - All * - macrosan_replication_password - ``-`` - The password of replication Storage - All * - macrosan_replication_destination_ports - ``-`` - The ports of replication storage when using replication storage. - All * - macrosan_thin_lun_extent_size - ``8`` - Set the thin lun's extent size when the san_thin_provision is True. - All * - macrosan_thin_lun_low_watermark - ``5`` - Set the thin lun's low watermark when the san_thin_provision is True. - All * - macrosan_thin_lun_high_watermark - ``20`` - Set the thin lun's high watermark when the san_thin_provision is True. - All * - macrosan_client - ``True`` - Macrosan iscsi_clients list. You can configure multiple clients. You can configure it in this format: (hostname; client_name; sp1_iscsi_port; sp2_iscsi_port), E.g: (controller1; decive1; eth-1:0:0; eth-2:0:0),(controller2; decive2; eth-1:0:0/ eth-1:0:1; eth-2:0:0/ eth-2:0:1) - All .. important:: Client_name has the following requirements: [a-zA-Z0-9.-_:], the maximum number of characters is 31 The following are the MacroSAN driver specific options that may be set in `cinder.conf`: .. config-table:: :config-target: MacroSAN cinder.volume.drivers.macrosan.config ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/nec-storage-m-series-driver.rst0000664000175000017500000002074500000000000032201 0ustar00zuulzuul00000000000000=========================== NEC Storage M series driver =========================== NEC Storage M series are dual-controller disk arrays which support online maintenance. This driver supports both iSCSI and Fibre Channel. System requirements ~~~~~~~~~~~~~~~~~~~ Supported models: +-----------------+------------------------+-----------------+ | Storage model | Storage control | Disk type | | | software (firmware) | | +=================+========================+=================+ | M110, | 0979 or later | SSD/HDD hybrid | | M310, | | | | M510, | | | | M710 | | | +-----------------+------------------------+-----------------+ | M310F, | 0979 or later | all flash | | M710F | | | +-----------------+------------------------+-----------------+ | M120, | 1028 or later | SSD/HDD hybrid | | M320 | | | +-----------------+------------------------+-----------------+ | M320F | 1028 or later | all flash | +-----------------+------------------------+-----------------+ Requirements: - NEC Storage M series requires firmware revision 1028 or later to create more than 1024 volumes in a pool. - NEC Storage DynamicDataReplication license. - (Optional) NEC Storage IO Load Manager license for QoS. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Migrate a volume. - Get volume statistics. - Efficient non-disruptive volume backup. - Manage and unmanage a volume. - Manage and unmanage a snapshot. - Attach a volume to multiple instances at once (multi-attach). - Revert a volume to a snapshot. Preparation ~~~~~~~~~~~ Below is minimum preparation to a disk array. For details of each command, see the NEC Storage Manager Command Reference (IS052). - Common (iSCSI and Fibre Channel) #. Initial setup * Set IP addresses for management and BMC with the network configuration tool. * Enter license keys. (iSMcfg licenserelease) #. Create pools * Create pools for volumes. (iSMcfg poolbind) * Create pools for snapshots. (iSMcfg poolbind) #. Create system volumes * Create a Replication Reserved Volume (RSV) in one of pools. (iSMcfg ldbind) * Create Snapshot Reserve Areas (SRAs) in each snapshot pool. (iSMcfg srabind) #. (Optional) Register SSH public key - iSCSI only #. Set IP addresses of each iSCSI port. (iSMcfg setiscsiport) #. Create LD Sets for each node. (iSMcfg addldset) #. Register initiator names of each node to the corresponding LD Set. (iSMcfg addldsetinitiator) - Fibre Channel only #. Start access control. (iSMcfg startacc) #. Create LD Sets for each node. (iSMcfg addldset) #. Register WWPNs of each node to the corresponding LD Set. (iSMcfg addldsetpath) Configuration ~~~~~~~~~~~~~ Set the following in your ``cinder.conf``, and use the following options to configure it. If you use Fibre Channel: .. code-block:: ini [Storage1] volume_driver = cinder.volume.drivers.nec.volume.MStorageFCDriver .. end If you use iSCSI: .. code-block:: ini [Storage1] volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver .. end Also, set ``volume_backend_name``. .. code-block:: ini [DEFAULT] volume_backend_name = Storage1 .. end This table shows configuration options for NEC Storage M series driver. .. config-table:: :config-target: NEC Storage M Series cinder.volume.drivers.nec.volume_common Required options ---------------- - ``nec_ismcli_fip`` FIP address of M-Series Storage. - ``nec_ismcli_user`` User name for M-Series Storage iSMCLI. - ``nec_ismcli_password`` Password for M-Series Storage iSMCLI. - ``nec_ismcli_privkey`` RSA secret key file name for iSMCLI (for public key authentication only). Encrypted RSA secret key file cannot be specified. - ``nec_diskarray_name`` Diskarray name of M-Series Storage. This parameter must be specified to configure multiple groups (multi back end) by using the same storage device (storage device that has the same ``nec_ismcli_fip``). Specify the disk array name targeted by the relevant config-group for this parameter. - ``nec_backup_pools`` Specify one pool number where snapshots are created. Multiple pools are not supported. Timeout configuration --------------------- - ``rpc_response_timeout`` Set the timeout value in seconds. If three or more volumes can be created at the same time, the reference value is 30 seconds multiplied by the number of volumes created at the same time. Also, Specify nova parameters below in ``nova.conf`` file. .. code-block:: ini [DEFAULT] block_device_allocate_retries = 120 block_device_allocate_retries_interval = 10 .. end - ``timeout server (HAProxy configuration)`` In addition, you need to edit the following value in the HAProxy configuration file (``/etc/haproxy/haproxy.cfg``) in an environment where HAProxy is used. .. code-block:: ini timeout server = 600 #Specify a value greater than rpc_response_timeout. .. end Run the :command:`service haproxy reload` command after editing the value to reload the HAProxy settings. .. note:: The OpenStack environment set up using Red Hat OpenStack Platform Director may be set to use HAProxy. Configuration example for /etc/cinder/cinder.conf ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When using one config-group --------------------------- - When using ``nec_ismcli_password`` to authenticate iSMCLI (Password authentication): .. code-block:: ini [DEFAULT] enabled_backends = Storage1 [Storage1] volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver volume_backend_name = Storage1 nec_ismcli_fip = 192.168.1.10 nec_ismcli_user = sysadmin nec_ismcli_password = sys123 nec_pools = 0 nec_backup_pools = 1 .. end - When using ``nec_ismcli_privkey`` to authenticate iSMCLI (Public key authentication): .. code-block:: ini [DEFAULT] enabled_backends = Storage1 [Storage1] volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver volume_backend_name = Storage1 nec_ismcli_fip = 192.168.1.10 nec_ismcli_user = sysadmin nec_ismcli_privkey = /etc/cinder/id_rsa nec_pools = 0 nec_backup_pools = 1 .. end When using multi config-group (multi-backend) --------------------------------------------- - Four config-groups (backends) Storage1, Storage2, Storage3, Storage4 - Two disk arrays 200000255C3A21CC(192.168.1.10) Example for using config-group, Storage1 and Storage2 2000000991000316(192.168.1.20) Example for using config-group, Storage3 and Storage4 .. code-block:: ini [DEFAULT] enabled_backends = Storage1,Storage2,Storage3,Storage4 [Storage1] volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver volume_backend_name = Gold nec_ismcli_fip = 192.168.1.10 nec_ismcli_user = sysadmin nec_ismcli_password = sys123 nec_pools = 0 nec_backup_pools = 2 nec_diskarray_name = 200000255C3A21CC [Storage2] volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver volume_backend_name = Silver nec_ismcli_fip = 192.168.1.10 nec_ismcli_user = sysadmin nec_ismcli_password = sys123 nec_pools = 1 nec_backup_pools = 3 nec_diskarray_name = 200000255C3A21CC [Storage3] volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver volume_backend_name = Gold nec_ismcli_fip = 192.168.1.20 nec_ismcli_user = sysadmin nec_ismcli_password = sys123 nec_pools = 0 nec_backup_pools = 2 nec_diskarray_name = 2000000991000316 [Storage4] volume_driver = cinder.volume.drivers.nec.volume.MStorageISCSIDriver volume_backend_name = Silver nec_ismcli_fip = 192.168.1.20 nec_ismcli_user = sysadmin nec_ismcli_password = sys123 nec_pools = 1 nec_backup_pools = 3 nec_diskarray_name = 2000000991000316 .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/nec-storage-v-series-driver.rst0000664000175000017500000000703100000000000032203 0ustar00zuulzuul00000000000000=========================== NEC Storage V series driver =========================== NEC Storage V series driver provides Fibre Channel and iSCSI support for NEC V series storages. System requirements ~~~~~~~~~~~~~~~~~~~ Supported models: +-----------------+------------------------+ | Storage model | Firmware version | +=================+========================+ | V100, | 93-04-21 or later | | V300 | | +-----------------+------------------------+ Required storage licenses: * iStorage Local Replication Local Replication Software Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Create, list, update, and delete consistency groups. * Create, list, and delete consistency group snapshots. * Copy a volume to an image. * Copy an image to a volume. * Clone a volume. * Extend a volume. * Migrate a volume. * Get volume statistics. * Efficient non-disruptive volume backup. * Manage and unmanage a volume. * Attach a volume to multiple instances at once (multi-attach). * Revert a volume to a snapshot. .. note:: A volume with snapshots cannot be extended in this driver. Configuration ~~~~~~~~~~~~~ Set up NEC V series storage --------------------------- You need to specify settings as described below for storage systems. For details about each setting, see the user's guide of the storage systems. Common resources: - ``All resources`` All storage resources, such as DP pools and host groups, can not have a name including blank space in order for the driver to use them. - ``User accounts`` Create a storage device account belonging to the Administrator User Group. - ``DP Pool`` Create a DP pool that is used by the driver. - ``Resource group`` If using a new resource group for exclusive use by an OpenStack system, create a new resource group, and assign the necessary resources, such as LDEVs, port, and host group (iSCSI target) to the created resource. - ``Ports`` Enable Port Security for the ports used by the driver. If you use iSCSI: - ``Ports`` Assign an IP address and a TCP port number to the port. Set up NEC V series storage volume driver ----------------------------------------- Set the volume driver to NEC V series storage driver by setting the volume_driver option in the cinder.conf file as follows: If you use Fibre Channel: .. code-block:: ini [Storage1] volume_driver = cinder.volume.drivers.nec.v.nec_v_fc.VStorageFCDriver volume_backend_name = Storage1 san_ip = 1.2.3.4 san_api_port = 23451 san_login = userid san_password = password nec_v_storage_id = 123456789012 nec_v_pools = pool0 If you use iSCSI: .. code-block:: ini [Storage1] volume_driver = cinder.volume.drivers.nec.v.nec_v_iscsi.VStorageISCSIDriver volume_backend_name = Storage1 san_ip = 1.2.3.4 san_api_port = 23451 san_login = userid san_password = password nec_v_storage_id = 123456789012 nec_v_pools = pool0 This table shows configuration options for NEC V series storage driver. .. config-table:: :config-target: NEC V series storage driver cinder.volume.drivers.nec.v.nec_v_rest Required options ---------------- - ``san_ip`` IP address of SAN controller - ``san_login`` Username for SAN controller - ``san_password`` Password for SAN controller - ``nec_v_storage_id`` Product number of the storage system. - ``nec_v_pools`` Pool number(s) or pool name(s) of the DP pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/netapp-volume-driver.rst0000664000175000017500000003134600000000000031043 0ustar00zuulzuul00000000000000===================== NetApp unified driver ===================== The NetApp unified driver is a Block Storage driver that supports multiple storage families and protocols. Currently, the only storage family supported by this driver is the clustered Data ONTAP. The storage protocol refers to the protocol used to initiate data storage and access operations on those storage systems like NVMe, iSCSI and NFS. The NetApp unified driver can be configured to provision and manage OpenStack volumes on a given storage family using a specified storage protocol. Also, the NetApp unified driver supports over subscription or over provisioning when thin provisioned Block Storage volumes are in use. The OpenStack volumes can then be used for accessing and storing data using the storage protocol on the storage family system. The NetApp unified driver is an extensible interface that can support new storage families and protocols. .. note:: With the Juno release of OpenStack, Block Storage has introduced the concept of storage pools, in which a single Block Storage back end may present one or more logical storage resource pools from which Block Storage will select a storage location when provisioning volumes. In releases prior to Juno, the NetApp unified driver contained some scheduling logic that determined which NetApp storage container (namely, a FlexVol volume for Data ONTAP) that a new Block Storage volume would be placed into. With the introduction of pools, all scheduling logic is performed completely within the Block Storage scheduler, as each NetApp storage container is directly exposed to the Block Storage scheduler as a storage pool. Previously, the NetApp unified driver presented an aggregated view to the scheduler and made a final placement decision as to which NetApp storage container the Block Storage volume would be provisioned into. NetApp clustered Data ONTAP storage family ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The NetApp clustered Data ONTAP storage family represents a configuration group which provides Compute instances access to clustered Data ONTAP storage systems. At present it can be configured in Block Storage to work with NVme, iSCSI and NFS storage protocols. NetApp iSCSI configuration for clustered Data ONTAP --------------------------------------------------- The NetApp iSCSI configuration for clustered Data ONTAP is an interface from OpenStack to clustered Data ONTAP storage systems. It provisions and manages the SAN block storage entity, which is a NetApp LUN that can be accessed using the iSCSI protocol. The iSCSI configuration for clustered Data ONTAP is a direct interface from Block Storage to the clustered Data ONTAP instance and as such does not require additional management software to achieve the desired functionality. It uses NetApp APIs to interact with the clustered Data ONTAP instance. **Configuration options** Configure the volume driver, storage family, and storage protocol to the NetApp unified driver, clustered Data ONTAP, and iSCSI respectively by setting the ``volume_driver``, ``netapp_storage_family`` and ``netapp_storage_protocol`` options in the ``cinder.conf`` file as follows: .. code-block:: ini volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver netapp_storage_family = ontap_cluster netapp_storage_protocol = iscsi netapp_vserver = openstack-vserver netapp_server_hostname = myhostname netapp_server_port = port netapp_login = username netapp_password = password .. note:: To use the iSCSI protocol, you must override the default value of ``netapp_storage_protocol`` with ``iscsi``. Note that this is not the same value that is reported by the driver to the scheduler as `storage_protocol`, which is always ``iSCSI`` (case sensitive). .. include:: ../../tables/cinder-netapp_cdot_iscsi.inc .. note:: If you specify an account in the ``netapp_login`` that only has virtual storage server (Vserver) administration privileges (rather than cluster-wide administration privileges), some advanced features of the NetApp unified driver will not work and you may see warnings in the Block Storage logs. .. note:: The driver supports iSCSI CHAP uni-directional authentication. To enable it, set the ``use_chap_auth`` option to ``True``. .. tip:: For more information on these options and other deployment and operational scenarios, visit the `NetApp OpenStack website `_. NetApp NVMe/TCP configuration for clustered Data ONTAP ------------------------------------------------------ The NetApp NVMe/TCP configuration for clustered Data ONTAP is an interface from OpenStack to clustered Data ONTAP storage systems. It provisions and manages the SAN block storage entity, which is a NetApp namespace that can be accessed using the NVMe/TCP protocol. The NVMe/TCP configuration for clustered Data ONTAP is a direct interface from Block Storage to the clustered Data ONTAP instance and as such does not require additional management software to achieve the desired functionality. It uses NetApp APIs to interact with the clustered Data ONTAP instance. **Configuration options** Configure the volume driver, storage family, and storage protocol to the NetApp unified driver, clustered Data ONTAP, and NVMe respectively by setting the ``volume_driver``, ``netapp_storage_family`` and ``netapp_storage_protocol`` options in the ``cinder.conf`` file as follows: .. code-block:: ini volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver netapp_storage_family = ontap_cluster netapp_storage_protocol = nvme netapp_vserver = openstack-vserver netapp_server_hostname = myhostname netapp_server_port = port netapp_login = username netapp_password = password .. note:: To use the NVMe/TCP protocol, you must override the default value of ``netapp_storage_protocol`` with ``nvme``. Note that this is not the same value that is reported by the driver to the scheduler as `storage_protocol`, which is always ``NVMe`` (case sensitive). .. note:: If you specify an account in the ``netapp_login`` that only has virtual storage server (Vserver) administration privileges (rather than cluster-wide administration privileges), some advanced features of the NetApp unified driver will not work and you may see warnings in the Block Storage logs. .. note:: The driver only supports the minimal Cinder driver features: create/delete volume and snapshots, extend volume, attack/detach volume, create volume from volume and create volume from image/snapshot. .. tip:: For more information on these options and other deployment and operational scenarios, visit the `NetApp OpenStack website `_. NetApp NFS configuration for clustered Data ONTAP ------------------------------------------------- The NetApp NFS configuration for clustered Data ONTAP is an interface from OpenStack to a clustered Data ONTAP system for provisioning and managing OpenStack volumes on NFS exports provided by the clustered Data ONTAP system that are accessed using the NFS protocol. The NFS configuration for clustered Data ONTAP is a direct interface from Block Storage to the clustered Data ONTAP instance and as such does not require any additional management software to achieve the desired functionality. It uses NetApp APIs to interact with the clustered Data ONTAP instance. **Configuration options** Configure the volume driver, storage family, and storage protocol to NetApp unified driver, clustered Data ONTAP, and NFS respectively by setting the ``volume_driver``, ``netapp_storage_family``, and ``netapp_storage_protocol`` options in the ``cinder.conf`` file as follows: .. code-block:: ini volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver netapp_storage_family = ontap_cluster netapp_storage_protocol = nfs netapp_vserver = openstack-vserver netapp_server_hostname = myhostname netapp_server_port = port netapp_login = username netapp_password = password nfs_shares_config = /etc/cinder/nfs_shares .. include:: ../../tables/cinder-netapp_cdot_nfs.inc .. note:: Additional NetApp NFS configuration options are shared with the generic NFS driver. These options can be found here: :ref:`cinder-storage_nfs`. .. note:: If you specify an account in the ``netapp_login`` that only has virtual storage server (Vserver) administration privileges (rather than cluster-wide administration privileges), some advanced features of the NetApp unified driver will not work and you may see warnings in the Block Storage logs. NetApp NFS Copy Offload client ------------------------------ A feature was added in the Icehouse release of the NetApp unified driver that enables Image service images to be efficiently copied to a destination Block Storage volume. When the Block Storage and Image service are configured to use the NetApp NFS Copy Offload client, a controller-side copy will be attempted before reverting to downloading the image from the Image service. This improves image provisioning times while reducing the consumption of bandwidth and CPU cycles on the host(s) running the Image and Block Storage services. This is due to the copy operation being performed completely within the storage cluster. The NetApp NFS Copy Offload client can be used in either of the following scenarios: - The Image service is configured to store images in an NFS share that is exported from a NetApp FlexVol volume *and* the destination for the new Block Storage volume will be on an NFS share exported from a different FlexVol volume than the one used by the Image service. Both FlexVols must be located within the same cluster. - The source image from the Image service has already been cached in an NFS image cache within a Block Storage back end. The cached image resides on a different FlexVol volume than the destination for the new Block Storage volume. Both FlexVols must be located within the same cluster. To use this feature, you must configure the Image service, as follows: - Set the ``default_store`` configuration option to ``file``. - Set the ``filesystem_store_datadir`` configuration option to the path to the Image service NFS export. - Set the ``show_image_direct_url`` configuration option to ``True``. - Set the ``show_multiple_locations`` configuration option to ``True``. - Set the ``filesystem_store_metadata_file`` configuration option to a metadata file. The metadata file should contain a JSON object that contains the correct information about the NFS export used by the Image service. To use this feature, you must configure the Block Storage service, as follows: - Set the ``netapp_copyoffload_tool_path`` configuration option to the path to the NetApp Copy Offload binary. .. important:: This feature requires that: - The storage system must have Data ONTAP v8.2 or greater installed. - The vStorage feature must be enabled on each storage virtual machine (SVM, also known as a Vserver) that is permitted to interact with the copy offload client. - To configure the copy offload workflow, enable NFS v4.0 or greater and export it from the SVM. .. tip:: To download the NetApp copy offload binary to be utilized in conjunction with the ``netapp_copyoffload_tool_path`` configuration option, please visit the Utility Toolchest page at the `NetApp Support portal `__ (login is required). .. tip:: For more information on these options and other deployment and operational scenarios, visit the `NetApp OpenStack website `_. NetApp-supported extra specs for clustered Data ONTAP ----------------------------------------------------- Extra specs enable vendors to specify extra filter criteria. The Block Storage scheduler uses the specs when the scheduler determines which volume node should fulfill a volume provisioning request. When you use the NetApp unified driver with a clustered Data ONTAP storage system, you can leverage extra specs with Block Storage volume types to ensure that Block Storage volumes are created on storage back ends that have certain properties. An example of this is when you configure QoS, mirroring, or compression for a storage back end. Extra specs are associated with Block Storage volume types. When users request volumes of a particular volume type, the volumes are created on storage back ends that meet the list of requirements. An example of this is the back ends that have the available space or extra specs. Use the specs in the following table to configure volumes. Define Block Storage volume types by using the :command:`openstack volume type set` command. .. include:: ../../tables/manual/cinder-netapp_cdot_extraspecs.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/nexentastor4-driver.rst0000664000175000017500000001125500000000000030702 0ustar00zuulzuul00000000000000===================================== NexentaStor 4.x NFS and iSCSI drivers ===================================== NexentaStor is an Open Source-driven Software-Defined Storage (OpenSDS) platform delivering unified file (NFS and SMB) and block (FC and iSCSI) storage services, runs on industry standard hardware, scales from tens of terabytes to petabyte configurations, and includes all data management functionality by default. For NexentaStor 4.x user documentation, visit https://nexenta.com/products/downloads/nexentastor. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Copy an image to a volume. * Copy a volume to an image. * Clone a volume. * Extend a volume. * Migrate a volume. * Change volume type. Nexenta iSCSI driver ~~~~~~~~~~~~~~~~~~~~ The Nexenta iSCSI driver allows you to use a NexentaStor appliance to store Compute volumes. Every Compute volume is represented by a single zvol in a predefined Nexenta namespace. The Nexenta iSCSI volume driver should work with all versions of NexentaStor. The NexentaStor appliance must be installed and configured according to the relevant Nexenta documentation. A volume and an enclosing namespace must be created for all iSCSI volumes to be accessed through the volume driver. This should be done as specified in the release-specific NexentaStor documentation. The NexentaStor Appliance iSCSI driver is selected using the normal procedures for one or multiple backend volume drivers. You must configure these items for each NexentaStor appliance that the iSCSI volume driver controls: #. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` file. .. code-block:: ini # Enable Nexenta iSCSI driver volume_driver=cinder.volume.drivers.nexenta.iscsi.NexentaISCSIDriver # IP address of NexentaStor host (string value) nexenta_host=HOST-IP # Username for NexentaStor REST (string value) nexenta_user=USERNAME # Port for Rest API (integer value) nexenta_rest_port=8457 # Password for NexentaStor REST (string value) nexenta_password=PASSWORD # Volume on NexentaStor appliance (string value) nexenta_volume=volume_name .. note:: nexenta_volume represents a zpool which is called volume on NS appliance. It must be pre-created before enabling the driver. #. Save the changes to the ``/etc/cinder/cinder.conf`` file and restart the ``cinder-volume`` service. Nexenta NFS driver ~~~~~~~~~~~~~~~~~~ The Nexenta NFS driver allows you to use NexentaStor appliance to store Compute volumes via NFS. Every Compute volume is represented by a single NFS file within a shared directory. While the NFS protocols standardize file access for users, they do not standardize administrative actions such as taking snapshots or replicating file systems. The OpenStack Volume Drivers bring a common interface to these operations. The Nexenta NFS driver implements these standard actions using the ZFS management plane that is already deployed on NexentaStor appliances. The Nexenta NFS volume driver should work with all versions of NexentaStor. The NexentaStor appliance must be installed and configured according to the relevant Nexenta documentation. A single-parent file system must be created for all virtual disk directories supported for OpenStack. This directory must be created and exported on each NexentaStor appliance. This should be done as specified in the release- specific NexentaStor documentation. You must configure these items for each NexentaStor appliance that the NFS volume driver controls: #. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` file. .. code-block:: ini # Enable Nexenta NFS driver volume_driver=cinder.volume.drivers.nexenta.nfs.NexentaNfsDriver # Path to shares config file nexenta_shares_config=/home/ubuntu/shares.cfg .. note:: Add your list of Nexenta NFS servers to the file you specified with the ``nexenta_shares_config`` option. For example, this is how this file should look: .. code-block:: bash 192.168.1.200:/volumes/VOLUME_NAME/NFS_SHARE http://USER:PASSWORD@192.168.1.200:8457 192.168.1.201:/volumes/VOLUME_NAME/NFS_SHARE http://USER:PASSWORD@192.168.1.201:8457 192.168.1.202:/volumes/VOLUME_NAME/NFS_SHARE http://USER:PASSWORD@192.168.1.202:8457 Each line in this file represents an NFS share. The first part of the line is the NFS share URL, the second line is the connection URL to the NexentaStor Appliance. Driver options ~~~~~~~~~~~~~~ Nexenta Driver supports these options: .. include:: ../../tables/cinder-nexenta.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/nexentastor5-driver.rst0000664000175000017500000001252600000000000030705 0ustar00zuulzuul00000000000000===================================== NexentaStor 5.x NFS and iSCSI drivers ===================================== NexentaStor is an Open Source-driven Software-Defined Storage (OpenSDS) platform delivering unified file (NFS and SMB) and block (FC and iSCSI) storage services. NexentaStor runs on industry standard hardware, scales from tens of terabytes to petabyte configurations, and includes all data management functionality by default. For user documentation, see the `Nexenta Documentation Center `__. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Copy an image to a volume. * Copy a volume to an image. * Clone a volume. * Extend a volume. * Migrate a volume. * Change volume type. * Get volume statistics. * Revert a volume to a snapshot. * Manage and unmanage volumes and snapshots. * List manageable volumes and snapshots. * Create, modify, delete, and list consistency groups. * Create, modify, delete, and list snapshots of consistency groups. * Create consistency group from consistency group or consistency group snapshot. * Support consistency groups capability to generic volume groups. * Attach a volume to multiple servers simultaneously (multiattach). iSCSI driver ~~~~~~~~~~~~ The NexentaStor appliance must be installed and configured according to the relevant Nexenta documentation. A pool and an enclosing namespace must be created for all iSCSI volumes to be accessed through the volume driver. This should be done as specified in the release-specific NexentaStor documentation. The NexentaStor Appliance iSCSI driver is selected using the normal procedures for one or multiple back-end volume drivers. You must configure these items for each NexentaStor appliance that the iSCSI volume driver controls: #. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` file. .. code-block:: ini # Enable Nexenta iSCSI driver volume_driver=cinder.volume.drivers.nexenta.ns5.iscsi.NexentaISCSIDriver # IP address of NexentaStor host (string value) nexenta_host=HOST-IP # Port for Rest API (integer value) nexenta_rest_port=8443 # Username for NexentaStor Rest (string value) nexenta_user=USERNAME # Password for NexentaStor Rest (string value) nexenta_password=PASSWORD # Pool on NexentaStor appliance (string value) nexenta_volume=volume_name # Name of a parent Volume group where cinder created zvols will reside (string value) nexenta_volume_group = iscsi .. note:: nexenta_volume represents a zpool, which is called pool on NS 5.x appliance. It must be pre-created before enabling the driver. Volume group does not need to be pre-created, the driver will create it if does not exist. #. Save the changes to the ``/etc/cinder/cinder.conf`` file and restart the ``cinder-volume`` service. NFS driver ~~~~~~~~~~ The Nexenta NFS driver allows you to use NexentaStor appliance to store Compute volumes via NFS. Every Compute volume is represented by a single NFS file within a shared directory. While the NFS protocols standardize file access for users, they do not standardize administrative actions such as taking snapshots or replicating file systems. The OpenStack Volume Drivers bring a common interface to these operations. The Nexenta NFS driver implements these standard actions using the ZFS management plane that already is deployed on NexentaStor appliances. The NexentaStor appliance must be installed and configured according to the relevant Nexenta documentation. A single-parent file system must be created for all virtual disk directories supported for OpenStack. Create and export the directory on each NexentaStor appliance. You must configure these items for each NexentaStor appliance that the NFS volume driver controls: #. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` file. .. code-block:: ini # Enable Nexenta NFS driver volume_driver=cinder.volume.drivers.nexenta.ns5.nfs.NexentaNfsDriver # IP address or Hostname of NexentaStor host (string value) nas_host=HOST-IP # Port for Rest API (integer value) nexenta_rest_port=8443 # Path to parent filesystem (string value) nas_share_path=POOL/FILESYSTEM # Recommended NFS options nas_mount_options=vers=3,minorversion=0,timeo=100,nolock #. Create filesystem on appliance and share via NFS. For example: .. code-block:: vim "securityContexts": [ {"readWriteList": [{"allow": true, "etype": "fqnip", "entity": "1.1.1.1"}], "root": [{"allow": true, "etype": "fqnip", "entity": "1.1.1.1"}], "securityModes": ["sys"]}] #. Create ACL for the filesystem. For example: .. code-block:: json {"type": "allow", "principal": "everyone@", "permissions": ["list_directory","read_data","add_file","write_data", "add_subdirectory","append_data","read_xattr","write_xattr","execute", "delete_child","read_attributes","write_attributes","delete","read_acl", "write_acl","write_owner","synchronize"], "flags": ["file_inherit","dir_inherit"]} Driver options ~~~~~~~~~~~~~~ Nexenta Driver supports these options: .. include:: ../../tables/cinder-nexenta5.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/nfs-volume-driver.rst0000664000175000017500000001320700000000000030336 0ustar00zuulzuul00000000000000========== NFS driver ========== The Network File System (NFS) is a distributed file system protocol originally developed by Sun Microsystems in 1984. An NFS server ``exports`` one or more of its file systems, known as ``shares``. An NFS client can mount these exported shares on its own file system. You can perform file actions on this mounted remote file system as if the file system were local. How the NFS driver works ~~~~~~~~~~~~~~~~~~~~~~~~ The NFS driver, and other drivers based on it, work quite differently than a traditional block storage driver. The NFS driver does not actually allow an instance to access a storage device at the block level. Instead, files are created on an NFS share and mapped to instances, which emulates a block device. This works in a similar way to QEMU, which stores instances in the ``/var/lib/nova/instances`` directory. Enable the NFS driver and related options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use Cinder with the NFS driver, first set the ``volume_driver`` in the ``cinder.conf`` configuration file: .. code-block:: ini volume_driver=cinder.volume.drivers.nfs.NfsDriver The following table contains the options supported by the NFS driver. .. _cinder-storage_nfs: .. config-table:: :config-target: NFS storage cinder.volume.drivers.nfs .. note:: As of the Icehouse release, the NFS driver (and other drivers based off it) will attempt to mount shares using version 4.1 of the NFS protocol (including pNFS). If the mount attempt is unsuccessful due to a lack of client or server support, a subsequent mount attempt that requests the default behavior of the :command:`mount.nfs` command will be performed. On most distributions, the default behavior is to attempt mounting first with NFS v4.0, then silently fall back to NFS v3.0 if necessary. If the ``nfs_mount_options`` configuration option contains a request for a specific version of NFS to be used, or if specific options are specified in the shares configuration file specified by the ``nfs_shares_config`` configuration option, the mount will be attempted as requested with no subsequent attempts. How to use the NFS driver ~~~~~~~~~~~~~~~~~~~~~~~~~ Creating an NFS server is outside the scope of this document. Configure with one NFS server ----------------------------- This example assumes access to the following NFS server and mount point: * 192.168.1.200:/storage This example demonstrates the usage of this driver with one NFS server. Set the ``nas_host`` option to the IP address or host name of your NFS server, and the ``nas_share_path`` option to the NFS export path: .. code-block:: ini nas_host = 192.168.1.200 nas_share_path = /storage Configure with multiple NFS servers ----------------------------------- .. note:: You can use the multiple NFS servers with `cinder multi back ends `_ feature. Configure the :ref:`enabled_backends ` option with multiple values, and use the ``nas_host`` and ``nas_share`` options for each back end as described above. The below example is another method to use multiple NFS servers, and demonstrates the usage of this driver with multiple NFS servers. Multiple servers are not required. One is usually enough. This example assumes access to the following NFS servers and mount points: * 192.168.1.200:/storage * 192.168.1.201:/storage * 192.168.1.202:/storage #. Add your list of NFS servers to the file you specified with the ``nfs_shares_config`` option. For example, if the value of this option was set to ``/etc/cinder/shares.txt`` file, then: .. code-block:: console # cat /etc/cinder/shares.txt 192.168.1.200:/storage 192.168.1.201:/storage 192.168.1.202:/storage Comments are allowed in this file. They begin with a ``#``. #. Configure the ``nfs_mount_point_base`` option. This is a directory where ``cinder-volume`` mounts all NFS shares stored in the ``shares.txt`` file. For this example, ``/var/lib/cinder/nfs`` is used. You can, of course, use the default value of ``$state_path/mnt``. #. Start the ``cinder-volume`` service. ``/var/lib/cinder/nfs`` should now contain a directory for each NFS share specified in the ``shares.txt`` file. The name of each directory is a hashed name: .. code-block:: console # ls /var/lib/cinder/nfs/ ... 46c5db75dc3a3a50a10bfd1a456a9f3f ... #. You can now create volumes as you normally would: .. code-block:: console $ openstack volume create --size 5 # ls /var/lib/cinder/nfs/46c5db75dc3a3a50a10bfd1a456a9f3f volume-a8862558-e6d6-4648-b5df-bb84f31c8935 This volume can also be attached and deleted just like other volumes. NFS driver notes ~~~~~~~~~~~~~~~~ * ``cinder-volume`` manages the mounting of the NFS shares as well as volume creation on the shares. Keep this in mind when planning your OpenStack architecture. If you have one master NFS server, it might make sense to only have one ``cinder-volume`` service to handle all requests to that NFS server. However, if that single server is unable to handle all requests, more than one ``cinder-volume`` service is needed as well as potentially more than one NFS server. * Because data is stored in a file and not actually on a block storage device, you might not see the same IO performance as you would with a traditional block storage driver. Please test accordingly. * Despite possible IO performance loss, having volume data stored in a file might be beneficial. For example, backing up volumes can be as easy as copying the volume files. .. note:: Regular IO flushing and syncing still stands. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/nimble-volume-driver.rst0000664000175000017500000002607000000000000031020 0ustar00zuulzuul00000000000000========================================= Nimble & Alletra 6k Storage volume driver ========================================= Nimble Storage fully integrates with the OpenStack platform through the Nimble Cinder driver, allowing a host to configure and manage Nimble and Alletra 6k Storage array features through Block Storage interfaces. Support for iSCSI storage protocol is available with NimbleISCSIDriver Volume Driver class and Fibre Channel with NimbleFCDriver. Support for the Liberty release and above is available from Nimble OS 2.3.8 or later. Support for the Ocata release and above is available from Nimble OS 3.6 or later. For Xena release, Nimble OS 5.3 or later is used and Alletra OS 6.0 or later is used. Nimble and Alletra 6k Storage Cinder driver does not support port binding with multiple interfaces on the same subnet due to existing limitation in os-brick. This is partially referenced in the bug https://bugs.launchpad.net/os-brick/+bug/1722432 but does not resolve for multiple software iscsi ifaces. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, clone, attach, and detach volumes * Create and delete volume snapshots * Create a volume from a snapshot * Copy an image to a volume * Copy a volume to an image * Extend a volume * Get volume statistics * Manage and unmanage a volume * Enable encryption and default performance policy for a volume-type extra-specs * Force backup of an in-use volume * Retype a volume * Create a Thinly Provisioned Volume * Attach a volume to multiple servers simultaneously (multiattach) * Volume Revert to Snapshot * Create, list, update, and delete consistency groups * Create, list, and delete consistency group snapshots * Consistency group replication Nimble and Alletra 6k Storage driver configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Update the file ``/etc/cinder/cinder.conf`` with the given configuration. Note: These parameters apply to Alletra 6k Storage as well. In case of a basic (single back-end) configuration, add the parameters within the ``[default]`` section as follows. .. code-block:: ini [default] san_ip = NIMBLE_MGMT_IP san_login = NIMBLE_USER san_password = NIMBLE_PASSWORD use_multipath_for_image_xfer = True volume_driver = NIMBLE_VOLUME_DRIVER san_thin_provision = True In case of multiple back-end configuration, for example, configuration which supports multiple Nimble Storage arrays or a single Nimble Storage array with arrays from other vendors, use the following parameters. .. code-block:: ini [default] enabled_backends = Nimble-Cinder [Nimble-Cinder] san_ip = NIMBLE_MGMT_IP san_login = NIMBLE_USER san_password = NIMBLE_PASSWORD use_multipath_for_image_xfer = True volume_driver = NIMBLE_VOLUME_DRIVER volume_backend_name = NIMBLE_BACKEND_NAME In case of multiple back-end configuration, Nimble Storage volume type is created and associated with a back-end name as follows. .. note:: Single back-end configuration users do not need to create the volume type. .. code-block:: console $ openstack volume type create NIMBLE_VOLUME_TYPE $ openstack volume type set --property volume_backend_name=NIMBLE_BACKEND_NAME NIMBLE_VOLUME_TYPE This section explains the variables used above: NIMBLE_MGMT_IP Management IP address of Nimble/Alletra 6k Storage array/group. NIMBLE_USER Nimble/Alletra 6k Storage account login with minimum ``power user`` (admin) privilege if RBAC is used. NIMBLE_PASSWORD Password of the admin account for Nimble/Alletra 6k array. NIMBLE_VOLUME_DRIVER Use either cinder.volume.drivers.hpe.nimble.NimbleISCSIDriver for iSCSI or cinder.volume.drivers.hpe.nimble.NimbleFCDriver for Fibre Channel. NIMBLE_BACKEND_NAME A volume back-end name which is specified in the ``cinder.conf`` file. This is also used while assigning a back-end name to the Nimble volume type. NIMBLE_VOLUME_TYPE The Nimble volume-type which is created from the CLI and associated with ``NIMBLE_BACKEND_NAME``. .. note:: Restart the ``cinder-api``, ``cinder-scheduler``, and ``cinder-volume`` services after updating the ``cinder.conf`` file. Nimble driver extra spec options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Nimble volume driver also supports the following extra spec options: 'nimble:encryption'='yes' Used to enable encryption for a volume-type. 'nimble:perfpol-name'=PERF_POL_NAME PERF_POL_NAME is the name of a performance policy which exists on the Nimble/Alletra 6k array and should be enabled for every volume in a volume type. .. note:: When upgrading to OpenStack deployment to Victoria or later, do unset ``nimble:multi-initiator`` extra-spec and set ``multiattach=' True'``. nimble:dedupe'='true' Used to enable dedupe support for a volume-type. 'nimble:iops-limit'=IOPS_LIMIT Used to set the IOPS_LIMIT between 256 and 4294967294 for all volumes created for this volume-type. 'nimble:folder'=FOLDER_NAME FOLDER_NAME is the name of the folder which exists on the Nimble/Alletra 6k array and should be enabled for every volume in a volume type These extra-specs can be enabled by using the following command: .. code-block:: console $ openstack volume type set --property KEY=VALUE VOLUME_TYPE ``VOLUME_TYPE`` is the Nimble volume type and ``KEY`` and ``VALUE`` are the options mentioned above. Configuration options ~~~~~~~~~~~~~~~~~~~~~ The Nimble/Alletra 6k storage driver supports these configuration options: .. config-table:: :config-target: Nimble cinder.volume.drivers.hpe.nimble Multipathing ~~~~~~~~~~~~ In OpenStack environments where Cinder block device multipathing is desired there are a few things to consider. Configuring mulitpathing varies by system depending on the environment. In a scenario where solely Nimble devices are being created by Cinder, the following ``/etc/multipath.conf`` file may be used: .. code-block:: text defaults { user_friendly_names yes find_multipaths no } blacklist { devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" devnode "^hd[a-z]" device { vendor ".*" product ".*" } } blacklist_exceptions { device { vendor "Nimble" product "Server" } } devices { device { vendor "Nimble" product "Server" path_grouping_policy group_by_prio prio "alua" hardware_handler "1 alua" path_selector "service-time 0" path_checker tur features "1 queue_if_no_path" no_path_retry 30 failback immediate fast_io_fail_tmo 5 dev_loss_tmo infinity rr_min_io_rq 1 rr_weight uniform } } After making changes to ``/etc/multipath.conf``, the multipath subsystem needs to be reconfigured: .. code-block:: console # multipathd reconfigure .. tip:: The latest best practices for Nimble devices can be found in the HPE Nimble Storage Linux Integration Guide found on https://infosight.hpe.com .. important:: OpenStack Cinder is currently not compatible with the HPE Nimble Storage Linux Toolkit (NLT) Nova needs to be configured to pickup the actual multipath device created on the host. In ``/etc/nova/nova.conf``, add the following to the ``[libvirt]`` section: .. code-block:: ini [libvirt] volume_use_multipath = True .. note:: In versions prior to Newton, the option was called ``iscsi_use_multipath`` After editing the Nova configuration file, the ``nova-conductor`` service needs to be restarted. .. tip:: Depending on which particular OpenStack distribution is being used, Nova may use a different configuration file than the default. To validate that instances get properly connected to the multipath device, inspect the instance devices: .. code-block:: console # virsh dumpxml Consistency group replication ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To enable consistency group replication, follow below steps: 1. Add `replication_device` to storage backend settings in `cinder.conf`, then restart Cinder Volume service. Example of `cinder.conf` for volume replications: .. code-block:: ini [nimble] san_ip = xxx.xxx.xxx.xxx ... replication_device = backend_id:nimblevsagroup2, san_ip:10.132.239.66, san_login:admin, san_password:admin, schedule_name:sched-one, downstream_partner:nimblevsagroup2, period:15, period_unit:minutes - Only one `replication_device` can be configured for each primary backend. - Keys `backend_id`, `san_ip`, `san_login`, `san_password`, `schedule_name` and `downstream_partner` are mandatory. - Other parameters are optional (if not given, then default values will be used): period:1 period_unit:days num_retain:10 num_retain_replica:1 at_time:'00:00' until_time:'23:59' days='all' replicate_every:1 alert_threshold:'24:00' 2. Create a volume type with properties `replication_enabled=' True'` and `consistent_group_snapshot_enabled=' True'` .. code-block:: console $ cinder type-create nimble $ cinder type-key nimble set volume_backend_name='nimble' $ cinder type-key nimble set replication_enabled=' True' $ cinder type-key nimble set consistent_group_snapshot_enabled=' True' 3. Create a consistency group type with properties `consistent_group_snapshot_enabled=' True'` and `consistent_group_replication_enabled=' True'`. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-type-create repl_type $ cinder --os-volume-api-version 3.38 group-type-key repl_type set consistent_group_snapshot_enabled=' True' consistent_group_replication_enabled=' True' 4. Create a group type with volume types support replication. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-create --name grp_1 repl_type nimble 5. Create volume in the consistency group. .. code-block:: console $ cinder --os-volume-api-version 3.38 create --volume-type nimble --group-id {grp_1-id} --name {volume-name} {size} 6. Enable consistency group replication. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-enable-replication grp_1 7. Disable consistency group replication. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-disable-replication grp_1 8. Failover consistency group replication. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-failover-replication --secondary-backend-id nimblevsagroup2 grp_1 9. Failback consistency group replication. .. code-block:: console $ cinder --os-volume-api-version 3.38 group-failover-replication --secondary-backend-id default grp_1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/open-e-joviandss-driver.rst0000664000175000017500000001244400000000000031426 0ustar00zuulzuul00000000000000============================= Open-E JovianDSS iSCSI driver ============================= The ``JovianISCSIDriver`` allows usage of Open-E JovianDSS Data Storage Solution to be used as Block Storage in OpenStack deployments. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Migrate a volume with back-end assistance. Configuring ~~~~~~~~~~~ Edit with your favourite editor Cinder config file. It can be found at /etc/cinder/cinder.conf Add the field enabled\_backends with value open-e-jdss-0: :: enabled_backends = open-e-jdss-0 Provide settings to Open-E JovianDSS driver by adding 'open-e-jdss-0' description: :: [open-e-jdss-0] backend_name = Open-EJovianDSS chap_password_len = 14 driver_use_ssl = True driver_ssl_cert_verify = True driver_ssl_cert_path = /etc/cinder/jdss.crt iscsi_target_prefix = iqn.2016-04.com.open-e.cinder: jovian_pool = Pool-0 jovian_block_size = 64K san_api_port = 82 target_port = 3260 volume_driver = cinder.volume.drivers.open_e.iscsi.JovianISCSIDriver san_hosts = 192.168.0.40 san_login = admin san_password = admin san_thin_provision = True .. list-table:: **Open-E JovianDSS configuration options** :header-rows: 1 * - Option - Default value - Description * - ``backend_name`` - Open-EJovianDSS - Name of the back end * - ``chap_password_len`` - 12 - Length of the unique generated CHAP password. * - ``driver_use_ssl`` - True - Use SSL to send requests to Open-E JovianDSS[1] * - ``driver_ssl_cert_verify`` - True - Verify authenticity of Open-E JovianDSS[1] certificate * - ``driver_ssl_cert_path`` - None - Path to the Open-E JovianDSS[1] certificate for verification * - ``iscsi_target_prefix`` - iqn.2016-04.com.open-e:01:cinder- - Prefix that will be used to form target name for volume * - ``jovian_pool`` - Pool-0 - Pool name that is going to be used. Must be created in [2] * - ``jovian_block_size`` - 64K - Block size for newly created volumes * - ``san_api_port`` - 82 - Rest port according to the settings in [1] * - ``target_port`` - 3260 - Port for iSCSI connections * - ``volume_driver`` - - Location of the driver source code * - ``san_hosts`` - - Comma separated list of IP address of the Open-E JovianDSS * - ``san_login`` - admin - Must be set according to the settings in [1] * - ``san_password`` - admin - Open-E Jovian DSS password [1], **should be changed** * - ``san_thin_provision`` - False - Using thin provisioning for new volumes 1. Open-E JovianDSS Web interface/System Settings/REST Access 2. Pool can be created by going to Open-E JovianDSS Web interface/Storage .. _interface/Storage: `More info about Open-E JovianDSS `__ Multiple Pools ~~~~~~~~~~~~~~ In order to add another Open-E JovianDSS Pool, create a copy of Open-E JovianDSS config in cinder.conf file. For instance if you want to add ``Pool-1`` located on the same host as ``Pool-0``. You extend ``cinder.conf`` file like: :: enabled_backends = open-e-jdss-0, open-e-jdss-1 [open-e-jdss-0] backend_name = open-e-jdss-0 chap_password_len = 14 driver_use_ssl = True driver_ssl_cert_verify = False iscsi_target_prefix = iqn.2016-04.com.open-e.cinder: jovian_pool = Pool-0 jovian_block_size = 64K san_api_port = 82 target_port = 3260 volume_driver = cinder.volume.drivers.open_e.iscsi.JovianISCSIDriver san_hosts = 192.168.0.40 san_login = admin san_password = admin san_thin_provision = True [open-e-jdss-1] backend_name = open-e-jdss-1 chap_password_len = 14 driver_use_ssl = True driver_ssl_cert_verify = False iscsi_target_prefix = iqn.2016-04.com.open-e.cinder: jovian_pool = Pool-1 jovian_block_size = 64K san_api_port = 82 target_port = 3260 volume_driver = cinder.volume.drivers.open_e.iscsi.JovianISCSIDriver san_hosts = 192.168.0.50 san_login = admin san_password = admin san_thin_provision = True HA Cluster ~~~~~~~~~~ To utilize High Availability feature of Open-E JovianDSS: 1. `Guide`_ on configuring Pool to high availability cluster .. _Guide: https://www.youtube.com/watch?v=juWIQT_bAfM 2. Set ``jovian_hosts`` with list of ``virtual IPs`` associated with this Pool For instance if you have ``Pool-2`` with 2 virtual IPs 192.168.21.100 and 192.168.31.100 the configuration file will look like: :: [open-e-jdss-2] backend_name = open-e-jdss-2 chap_password_len = 14 driver_use_ssl = True driver_ssl_cert_verify = False iscsi_target_prefix = iqn.2016-04.com.open-e.cinder: jovian_pool = Pool-0 jovian_block_size = 64K san_api_port = 82 target_port = 3260 volume_driver = cinder.volume.drivers.open_e.iscsi.JovianISCSIDriver san_hosts = 192.168.21.100, 192.168.31.100 san_login = admin san_password = admin san_thin_provision = True Feedback -------- Please address problems and proposals to andrei.perepiolkin@open-e.com ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/prophetstor-dpl-driver.rst0000664000175000017500000000625600000000000031417 0ustar00zuulzuul00000000000000=========================================== ProphetStor Fibre Channel and iSCSI drivers =========================================== ProhetStor Fibre Channel and iSCSI drivers add support for ProphetStor Flexvisor through the Block Storage service. ProphetStor Flexvisor enables commodity x86 hardware as software-defined storage leveraging well-proven ZFS for disk management to provide enterprise grade storage services such as snapshots, data protection with different RAID levels, replication, and deduplication. The ``DPLFCDriver`` and ``DPLISCSIDriver`` drivers run volume operations by communicating with the ProphetStor storage system over HTTPS. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Copy an image to a volume. * Copy a volume to an image. * Clone a volume. * Extend a volume. Enable the Fibre Channel or iSCSI drivers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``DPLFCDriver`` and ``DPLISCSIDriver`` are installed with the OpenStack software. #. Query storage pool id to configure ``dpl_pool`` of the ``cinder.conf`` file. a. Log on to the storage system with administrator access. .. code-block:: console $ ssh root@STORAGE_IP_ADDRESS b. View the current usable pool id. .. code-block:: console $ flvcli show pool list - d5bd40b58ea84e9da09dcf25a01fdc07 : default_pool_dc07 c. Use ``d5bd40b58ea84e9da09dcf25a01fdc07`` to configure the ``dpl_pool`` of ``/etc/cinder/cinder.conf`` file. .. note:: Other management commands can be referenced with the help command :command:`flvcli -h`. #. Make the following changes on the volume node ``/etc/cinder/cinder.conf`` file. .. code-block:: ini # IP address of SAN controller (string value) san_ip=STORAGE IP ADDRESS # Username for SAN controller (string value) san_login=USERNAME # Password for SAN controller (string value) san_password=PASSWORD # Use thin provisioning for SAN volumes? (boolean value) san_thin_provision=true # The port that the iSCSI daemon is listening on. (integer value) iscsi_port=3260 # DPL pool uuid in which DPL volumes are stored. (string value) dpl_pool=d5bd40b58ea84e9da09dcf25a01fdc07 # DPL port number. (integer value) dpl_port=8357 # Uncomment one of the next two option to enable Fibre channel or iSCSI # FIBRE CHANNEL(uncomment the next line to enable the FC driver) #volume_driver=cinder.volume.drivers.prophetstor.dpl_fc.DPLFCDriver # iSCSI (uncomment the next line to enable the iSCSI driver) #volume_driver=cinder.volume.drivers.prophetstor.dpl_iscsi.DPLISCSIDriver #. Save the changes to the ``/etc/cinder/cinder.conf`` file and restart the ``cinder-volume`` service. The ProphetStor Fibre Channel or iSCSI drivers are now enabled on your OpenStack system. If you experience problems, review the Block Storage service log files for errors. The following table contains the options supported by the ProphetStor storage driver. .. include:: ../../tables/cinder-prophetstor_dpl.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst0000664000175000017500000003542700000000000030670 0ustar00zuulzuul00000000000000========================================================= Pure Storage iSCSI, Fibre Channel and NVMe volume drivers ========================================================= The Pure Storage FlashArray volume drivers for OpenStack Block Storage interact with configured Pure Storage arrays and support various operations. Support for iSCSI storage protocol is available with the PureISCSIDriver Volume Driver class, Fibre Channel with the PureFCDriver and NVMe-ROCE or NVMe-TCP with the PureNVMEDriver. iSCSI, Fibre Channel and NVMe-RoCE drivers are compatible with FlashArrays that support the REST API version 2.4 and higher (Purity 6.1.0 and newer). The NVMe-TCP driver is compatible with FlashArrays that are running Purity 6.4.2 and higher. Some features may require newer versions of Purity. Limitations and known issues ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you do not set up the nodes hosting instances to use multipathing, all network connectivity will use a single physical port on the array. In addition to significantly limiting the available bandwidth, this means you do not have the high-availability and non-disruptive upgrade benefits provided by FlashArray. Multipathing must be used to take advantage of these benefits. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, detach, retype, clone, and extend volumes. * Create a volume from snapshot. * Create, list, and delete volume snapshots. * Create, list, update, and delete consistency groups. * Create, list, and delete consistency group snapshots. * Revert a volume to a snapshot. * Manage and unmanage a volume. * Manage and unmanage a snapshot. * Get volume statistics. * Create a thin provisioned volume. * Replicate volumes to remote Pure Storage array(s) QoS support for the Pure Storage drivers include the ability to set the following capabilities in the OpenStack Block Storage API ``cinder.api.contrib.qos_spec_manage`` qos specs extension module: * **maxIOPS** - Maximum number of IOPs allowed for volume. Range: 100 - 100M * **maxBWS** - Maximum bandwidth limit in MB/s. Range: 1 - 524288 (512GB/s) * **maxIOPS_per_GB** - Maximum number of IOPs allowed for volume based on capacity. Range: 100 - 100M * **maxBWS_per_GB** - Maximum bandwidth limit in MB/s based on capacity. Range: 1 - 524288 (512GB/s) If both max and per_GB values are provided for a QoS type, the max value will take precedence. If the calculated per_GB value for a volume based on capacity is greater than the maximum allowed value, the maximum allowed values will be applied. The qos keys above must be created and asscoiated to a volume type. For information on how to set the key-value pairs and associate them with a volume type see the `volume qos `_ section in the OpenStack Client command list. QoS settings are not applied to any volume in a volume group as these are controlled at the volume group level. Configure OpenStack and Purity ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You need to configure both your Purity array and your OpenStack cluster. .. note:: These instructions assume that the ``cinder-api`` and ``cinder-scheduler`` services are installed and configured in your OpenStack cluster. Configure the OpenStack Block Storage service --------------------------------------------- In these steps, you will edit the ``cinder.conf`` file to configure the OpenStack Block Storage service to enable multipathing and to use the Pure Storage FlashArray as back-end storage. #. Install Pure Storage PyPI module. A requirement for the Pure Storage driver is the installation of the Pure Storage Python SDK version 1.47.0 or later from PyPI. .. code-block:: console $ pip install py-pure-client #. Retrieve an API token from Purity. The OpenStack Block Storage service configuration requires an API token from Purity. Actions performed by the volume driver use this token for authorization. Also, Purity logs the volume driver's actions as being performed by the user who owns this API token. If you created a Purity user account that is dedicated to managing your OpenStack Block Storage volumes, copy the API token from that user account. Use the appropriate create or list command below to display and copy the Purity API token: * To create a new API token: .. code-block:: console $ pureadmin create --api-token USER The following is an example output: .. code-block:: console $ pureadmin create --api-token pureuser Name API Token Created pureuser 902fdca3-7e3f-d2e4-d6a6-24c2285fe1d9 2014-08-04 14:50:30 * To list an existing API token: .. code-block:: console $ pureadmin list --api-token --expose USER The following is an example output: .. code-block:: console $ pureadmin list --api-token --expose pureuser Name API Token Created pureuser 902fdca3-7e3f-d2e4-d6a6-24c2285fe1d9 2014-08-04 14:50:30 #. Copy the API token retrieved (``902fdca3-7e3f-d2e4-d6a6-24c2285fe1d9`` from the examples above) to use in the next step. #. Edit the OpenStack Block Storage service configuration file. The following sample ``/etc/cinder/cinder.conf`` configuration lists the relevant settings for a typical Block Storage service using a single Pure Storage array: .. code-block:: ini [DEFAULT] enabled_backends = puredriver-1 default_volume_type = puredriver-1 [puredriver-1] volume_backend_name = puredriver-1 volume_driver = PURE_VOLUME_DRIVER san_ip = IP_PURE_MGMT pure_api_token = PURE_API_TOKEN use_multipath_for_image_xfer = True Replace the following variables accordingly: PURE_VOLUME_DRIVER Use ``cinder.volume.drivers.pure.PureISCSIDriver`` for iSCSI, ``cinder.volume.drivers.pure.PureFCDriver`` for Fibre Channel or ``cinder.volume.drivers.pure.PureNVMEDriver`` for NVME connectivity. If using the NVME driver, specify the ``pure_nvme_transport`` value. Supported values are ``roce`` or ``tcp``. IP_PURE_MGMT The IP address of the Pure Storage array's management interface or a domain name that resolves to that IP address. PURE_API_TOKEN The Purity Authorization token that the volume driver uses to perform volume management on the Pure Storage array. .. note:: The volume driver automatically creates Purity host objects for initiators as needed. If CHAP authentication is enabled via the ``use_chap_auth`` setting, you must ensure there are no manually created host objects with IQN's that will be used by the OpenStack Block Storage service. The driver will only modify credentials on hosts that it manages. .. note:: If using the PureFCDriver it is recommended to use the OpenStack Block Storage Fibre Channel Zone Manager. Volume auto-eradication ~~~~~~~~~~~~~~~~~~~~~~~ To enable auto-eradication of deleted volumes, snapshots, and consistency groups on deletion, modify the following option in the ``cinder.conf`` file: .. code-block:: ini pure_eradicate_on_delete = true By default, auto-eradication is disabled and all deleted volumes, snapshots, and consistency groups are retained on the Pure Storage array in a recoverable state for 24 hours from time of deletion. Setting host personality ~~~~~~~~~~~~~~~~~~~~~~~~ The host personality determines how the Purity system tunes the protocol used between the array and the initiator. To ensure the array works optimally with the host, set the personality to the name of the host operating or virtual memory system. Valid values are aix, esxi, hitachi-vsp, hpux, oracle-vm-server, solaris, and vms. If your system is not listed as one of the valid host personalities, do not set the option. By default, the host personality is not set. To set the host personality, modify the following option in the ``cinder.conf`` file: .. code-block:: ini pure_host_personality = .. note:: ``pure_host_personality`` is available from Purity REST API version 1.14, and affects only newly-created hosts. SSL certification ~~~~~~~~~~~~~~~~~ To enable SSL certificate validation, modify the following option in the ``cinder.conf`` file: .. code-block:: ini driver_ssl_cert_verify = true By default, SSL certificate validation is disabled. To specify a non-default path to ``CA_Bundle`` file or directory with certificates of trusted CAs: .. code-block:: ini driver_ssl_cert_path = Certificate path Replication configuration ~~~~~~~~~~~~~~~~~~~~~~~~~ Add the following to the back-end specification to specify another Flash Array to replicate to: .. code-block:: ini [puredriver-1] replication_device = backend_id:PURE2_NAME,san_ip:IP_PURE2_MGMT,api_token:PURE2_API_TOKEN,type:REPLICATION_TYPE Where ``PURE2_NAME`` is the name of the remote Pure Storage system, ``IP_PURE2_MGMT`` is the management IP address of the remote array, and ``PURE2_API_TOKEN`` is the Purity Authorization token of the remote array. The ``REPLICATION_TYPE`` value for the ``type`` key can be either ``sync`` or ``async`` If the ``type`` is ``sync`` volumes will be created in a stretched Pod. This requires two arrays pre-configured with Active Cluster enabled. You can optionally specify ``uniform`` as ``true`` or ``false``, this will instruct the driver that data paths are uniform between arrays in the cluster and data connections should be made to both upon attaching. Note that more than one ``replication_device`` line can be added to allow for multi-target device replication. To enable 3-site replication, ie. a volume that is synchronously replicated to one array and also asynchronously replicated to another then you must supply two, and only two, ``replication_device`` lines, where one has ``type`` of ``sync`` and one where ``type`` is ``async``. Additionally, the parameter ``pure_trisync_enabled`` must be set ``True``. A volume is only replicated if the volume is of a volume-type that has the extra spec ``replication_enabled`` set to `` True``. You can optionally specify the ``replication_type`` key to specify `` sync`` or `` async`` or `` trisync`` to choose the type of replication for that volume. If not specified it will default to ``async``. To create a volume type that specifies replication to remote back ends with async replication: .. code-block:: console $ openstack volume type create ReplicationType $ openstack volume type set --property replication_enabled=' True' ReplicationType $ openstack volume type set --property replication_type=' async' ReplicationType The following table contains the optional configuration parameters available for async replication configuration with the Pure Storage array. .. list-table:: Pure Storage replication configuration options :header-rows: 1 * - Option - Description - Default * - ``pure_replica_interval_default`` - Snapshot replication interval in seconds. - ``3600`` * - ``pure_replica_retention_short_term_default`` - Retain all snapshots on target for this time (in seconds). - ``14400`` * - ``pure_replica_retention_long_term_per_day_default`` - Retain how many snapshots for each day. - ``3`` * - ``pure_replica_retention_long_term_default`` - Retain snapshots per day on target for this time (in days). - ``7`` * - ``pure_replication_pg_name`` - Pure Protection Group name to use for async replication (will be created if it does not exist). - ``cinder-group`` * - ``pure_replication_pod_name`` - Pure Pod name to use for sync replication (will be created if it does not exist). - ``cinder-pod`` .. note:: ``failover-host`` is only supported from the primary array to any of the multiple secondary arrays, but subsequent ``failover-host`` is only supported back to the original primary array. .. note:: ``pure_replication_pg_name`` and ``pure_replication_pod_name`` should not be changed after volumes have been created in the Cinder backend, as this could have unexpected results in both replication and failover. Automatic thin-provisioning/oversubscription ratio ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This feature allows the driver to calculate the array oversubscription ratio as (total provisioned/actual used). By default this feature is enabled. To disable this feature and honor the hard-coded configuration option ``max_over_subscription_ratio`` add the following option in the ``cinder.conf`` file: .. code-block:: ini [puredriver-1] pure_automatic_max_oversubscription_ratio = False .. note:: Arrays with very good data reduction rates (compression/data deduplication/thin provisioning) can get *very* large oversubscription rates applied. Scheduling metrics ~~~~~~~~~~~~~~~~~~ A large number of metrics are reported by the volume driver which can be useful in implementing more control over volume placement in multi-backend environments using the driver filter and weighter methods. Performance metrics are provided based on an average over the previous 30 seconds. Metrics reported include, but are not limited to: .. code-block:: text total_capacity_gb free_capacity_gb provisioned_capacity total_volumes total_snapshots total_hosts total_pgroups writes_per_sec reads_per_sec input_per_sec output_per_sec usec_per_read_op usec_per_read_op queue_usec_per_mirrored_write_op queue_usec_per_read_op queue_usec_per_write_op replication_type .. note:: All total metrics include non-OpenStack managed objects on the array. In conjunction with QOS extra-specs, you can create very complex algorithms to manage volume placement. More detailed documentation on this is available in other external documentation. Configuration Options ~~~~~~~~~~~~~~~~~~~~~ The following list all Pure driver specific configuration options that can be set in `cinder.conf`: .. config-table:: :config-target: Pure cinder.volume.drivers.pure Pure Storage-supported extra specs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Extra specs are associated with Block Storage volume types. When users request volumes of a particular volume type, the volumes are created on storage backends that meet the list of requirements. In the case of Pure Storage, these vendor-specific extra specs can be used to bring all volumes of a specific volume type into a construct known as a volume group. Additionally, the storage quality of service limits can be applied to the volume group. Use the specs in the following table to configure volume groups and associate with a volume type. Define Block Storage volume types by using the :command:`openstack volume type set` command. .. include:: ../../tables/manual/cinder-pure_storage_extraspecs.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/quobyte-driver.rst0000664000175000017500000000333000000000000027727 0ustar00zuulzuul00000000000000============== Quobyte driver ============== The `Quobyte `__ volume driver enables storing Block Storage service volumes on a Quobyte storage back end. Block Storage service back ends are mapped to Quobyte volumes and individual Block Storage service volumes are stored as files on a Quobyte volume. Selection of the appropriate Quobyte volume is done by the aforementioned back end configuration that specifies the Quobyte volume explicitly. .. note:: Note the dual use of the term ``volume`` in the context of Block Storage service volumes and in the context of Quobyte volumes. For more information see `the Quobyte support webpage `__. Supported operations ~~~~~~~~~~~~~~~~~~~~ The Quobyte volume driver supports the following volume operations: - Create, delete, attach, and detach volumes - Secure NAS operation (Starting with Mitaka release secure NAS operation is optional but still default) - Create and delete a snapshot - Create a volume from a snapshot - Extend a volume - Clone a volume - Copy a volume to image - Generic volume migration (no back end optimization) .. note:: When running VM instances off Quobyte volumes, ensure that the `Quobyte Compute service driver `__ has been configured in your OpenStack cloud. Configuration ~~~~~~~~~~~~~ To activate the Quobyte volume driver, configure the corresponding ``volume_driver`` parameter: .. code-block:: ini volume_driver = cinder.volume.drivers.quobyte.QuobyteDriver The following table contains the configuration options supported by the Quobyte driver: .. config-table:: :config-target: Quobyte USP cinder.volume.drivers.quobyte ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/sandstone-storage-driver.rst0000664000175000017500000000704600000000000031707 0ustar00zuulzuul00000000000000====================== SandStone iSCSI Driver ====================== SandStone USP volume can be used as a block storage resource in the OpenStack Block Storage driver that supports iSCSI protocols. Before to go, you should have installed `SandStoneUSP `_. System requirements ~~~~~~~~~~~~~~~~~~~ +-----------------+--------------------+ | Cluster | version | +=================+====================+ | SandStone USP | 3.2.3+ | +-----------------+--------------------+ To use the SandStone driver, the following are required: - Network connectivity between the OpenStack host and the SandStone USP management interfaces. - HTTPS or HTTP must be enabled on the array. When creating a volume from image, add the following configuration keys in the ``[DEFAULT]`` configuration group of the ``/etc/cinder/cinder.conf`` file: Configuration example ~~~~~~~~~~~~~~~~~~~~~ The following table contains the configuration options supported by the SandStone driver. .. code-block:: ini [DEFAULT] enabled_backends = sds-iscsi [sds-iscsi] volume_driver = cinder.volume.drivers.sandstone.sds_driver.SdsISCSIDriver volume_backend_name = sds-iscsi san_ip = 10.10.16.21 san_login = admin san_password = admin default_sandstone_target_ips = 10.10.16.21,10.10.16.22,10.10.16.23 chap_username = 123456789123 chap_password = 1234567891234 sandstone_pool = vms initiator_assign_sandstone_target_ip = {"iqn.1993-08.org.debian:01:3a9cd5c484a": "10.10.16.21"} General parameters ~~~~~~~~~~~~~~~~~~ +----------------------+-------------------------------------+ | Parameter | Description | +======================+=====================================+ | volume_driver | Indicates the loaded driver | +----------------------+-------------------------------------+ | volume_backend_name | Indicates the name of the backend | +----------------------+-------------------------------------+ | san_ip | IP addresses of the management | | | interfaces of SandStone USP | +----------------------+-------------------------------------+ | san_login | Storage system user name | +----------------------+-------------------------------------+ | san_password | Storage system password | +----------------------+-------------------------------------+ | default_sandstone | Default IP address of the iSCSI | | _target_ips | target port that is provided for | | | compute nodes | +----------------------+-------------------------------------+ | chap_username | CHAP authentication username | +----------------------+-------------------------------------+ | chap_password | CHAP authentication password | +----------------------+-------------------------------------+ | sandstone_pool | SandStone storage pool resource name| +----------------------+-------------------------------------+ | initiator_assign | Initiator assign target with assign | | _sandstone_target_ip | ip | +----------------------+-------------------------------------+ #. After modifying the ``cinder.conf`` file, restart the ``cinder-volume`` service. #. Create and use volume types. **Create and use sds-iscsi volume types** .. code-block:: console $ openstack volume type create sandstone $ openstack volume type set --property volume_backend_name=sds-iscsi sandstone ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/seagate-driver.rst0000664000175000017500000001401300000000000027650 0ustar00zuulzuul00000000000000============================================= Seagate Array Fibre Channel and iSCSI drivers ============================================= The ``STXFCDriver`` and ``STXISCSIDriver`` Cinder drivers allow the Seagate Technology (STX) storage arrays to be used for Block Storage in OpenStack deployments. System requirements ~~~~~~~~~~~~~~~~~~~ To use the Seagate drivers, the following are required: - Seagate storage array with: - iSCSI or FC host interfaces - G28x firmware or later - Network connectivity between the OpenStack host and the array management interfaces - The HTTPS or HTTP protocol must be enabled on the array Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes. - Create, list, and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Migrate a volume with back-end assistance. - Retype a volume. - Manage and unmanage a volume. Configuring the array ~~~~~~~~~~~~~~~~~~~~~ #. Verify that the array can be managed via an HTTPS connection. HTTP can also be used if ``driver_use_ssl`` is set to (or defaults to) False in the ``cinder.conf`` file. Confirm that virtual pools A and B are present if you plan to use virtual pools for OpenStack storage. If you plan to use vdisks instead of virtual pools, create or identify one or more vdisks to be used for OpenStack storage; typically this will mean creating or setting aside one disk group for each of the A and B controllers. #. Edit the ``cinder.conf`` file to define a storage back-end entry for each storage pool on the array that will be managed by OpenStack. Each entry consists of a unique section name, surrounded by square brackets, followed by options specified in a ``key=value`` format. * The ``seagate_pool_name`` value specifies the name of the storage pool or vdisk on the array. * The ``volume_backend_name`` option value can be a unique value, if you wish to be able to assign volumes to a specific storage pool on the array, or a name that is shared among multiple storage pools to let the volume scheduler choose where new volumes are allocated. #. The following ``cinder.conf`` options generally have identical values for each backend section on the array: * ``volume_driver`` specifies the Cinder driver name. * ``san_ip`` specifies the IP addresses or host names of the array's management controllers. * ``san_login`` and ``san_password`` specify the username and password of an array user account with ``manage`` privileges * ``driver_use_ssl`` must be set to True to enable use of the HTTPS protocol. * ``seagate_iscsi_ips`` specifies the iSCSI IP addresses for the array if using the iSCSI transport protocol In the examples below, two back ends are defined, one for pool A and one for pool B, and a common ``volume_backend_name`` is used so that a single volume type definition can be used to allocate volumes from both pools. **iSCSI example back-end entries** .. code-block:: ini [pool-a] seagate_pool_name = A volume_backend_name = seagate-array volume_driver = cinder.volume.drivers.stx.iscsi.STXISCSIDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage seagate_iscsi_ips = 10.2.3.4,10.2.3.5 driver_use_ssl = true [pool-b] seagate_backend_name = B volume_backend_name = seagate-array volume_driver = cinder.volume.drivers.stx.iscsi.STXISCSIDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage seagate_iscsi_ips = 10.2.3.4,10.2.3.5 driver_use_ssl = true **Fibre Channel example back-end entries** .. code-block:: ini [pool-a] seagate_backend_name = A volume_backend_name = seagate-array volume_driver = cinder.volume.drivers.stx.fc.STXFCDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage driver_use_ssl = true [pool-b] seagate_backend_name = B volume_backend_name = seagate-array volume_driver = cinder.volume.drivers.stx.fc.STXFCDriver san_ip = 10.1.2.3,10.1.2.4 san_login = manage san_password = !manage driver_use_ssl = true #. If any ``volume_backend_name`` value refers to a vdisk rather than a virtual pool, add an additional statement ``seagate_backend_type = linear`` to that back-end entry. #. If HTTPS is enabled, you can enable certificate verification with the option ``driver_ssl_cert_verify = True``. You may also use the ``driver_ssl_cert_path`` parameter to specify the path to a CA\_BUNDLE file containing CAs other than those in the default list. #. Modify the ``[DEFAULT]`` section of the ``cinder.conf`` file to add an ``enabled_backends`` parameter specifying the backend entries you added, and a ``default_volume_type`` parameter specifying the name of a volume type that you will create in the next step. **Example of [DEFAULT] section changes** .. code-block:: ini [DEFAULT] enabled_backends = pool-a,pool-b default_volume_type = seagate #. Create a new volume type for each distinct ``volume_backend_name`` value that you added in the ``cinder.conf`` file. The example below assumes that the same ``volume_backend_name=seagate-array`` option was specified in all of the entries, and specifies that the volume type ``seagate`` can be used to allocate volumes from any of them. **Example of creating a volume type** .. code-block:: console $ openstack volume type create seagate $ openstack volume type set --property volume_backend_name=seagate-array seagate #. After modifying the ``cinder.conf`` file, restart the ``cinder-volume`` service. Driver-specific options ~~~~~~~~~~~~~~~~~~~~~~~ The following table contains the configuration options that are specific to the Seagate drivers. .. config-table:: :config-target: Seagate cinder.volume.drivers.stx.common ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/solidfire-volume-driver.rst0000664000175000017500000001063700000000000031534 0ustar00zuulzuul00000000000000========= SolidFire ========= The SolidFire Cluster is a high performance all SSD iSCSI storage device that provides massive scale out capability and extreme fault tolerance. A key feature of the SolidFire cluster is the ability to set and modify during operation specific QoS levels on a volume for volume basis. The SolidFire cluster offers this along with de-duplication, compression, and an architecture that takes full advantage of SSDs. To configure the use of a SolidFire cluster with Block Storage, modify your ``cinder.conf`` file as follows: .. code-block:: ini volume_driver = cinder.volume.drivers.solidfire.SolidFireDriver san_ip = 172.17.1.182 # the address of your MVIP san_login = sfadmin # your cluster admin login san_password = sfpassword # your cluster admin password sf_account_prefix = '' # prefix for tenant account creation on solidfire cluster .. warning:: Older versions of the SolidFire driver (prior to Icehouse) created a unique account prefixed with ``$cinder-volume-service-hostname-$tenant-id`` on the SolidFire cluster for each tenant. Unfortunately, this account formation resulted in issues for High Availability (HA) installations and installations where the ``cinder-volume`` service can move to a new node. The current default implementation does not experience this issue as no prefix is used. For installations created on a prior release, the OLD default behavior can be configured by using the keyword ``hostname`` in sf_account_prefix. .. note:: The SolidFire driver creates names for volumes on the back end using the format UUID-. This works well, but there is a possibility of a UUID collision for users running multiple clouds against the same cluster. In Mitaka the ability was added to eliminate the possibility of collisions by introducing the **sf_volume_prefix** configuration variable. On the SolidFire cluster each volume will be labeled with the prefix, providing the ability to configure unique volume names for each cloud. The default prefix is 'UUID-'. Changing the setting on an existing deployment will result in the existing volumes being inaccessible. To introduce this change to an existing deployment it is recommended to add the Cluster as if it were a second backend and disable new deployments to the current back end. .. config-table:: :config-target: SolidFire cinder.volume.drivers.solidfire Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Copy an image to a volume. * Copy a volume to an image. * Clone a volume. * Extend a volume. * Retype a volume. * Manage and unmanage a volume. * Consistency group snapshots. QoS support for the SolidFire drivers includes the ability to set the following capabilities in the OpenStack Block Storage API ``cinder.api.contrib.qos_specs_manage`` qos specs extension module: * **minIOPS** - The minimum number of IOPS guaranteed for this volume. Default = 100. * **maxIOPS** - The maximum number of IOPS allowed for this volume. Default = 15,000. * **burstIOPS** - The maximum number of IOPS allowed over a short period of time. Default = 15,000. * **scaledIOPS** - The presence of this key is a flag indicating that the above IOPS should be scaled by the following scale values. It is recommended to set the value of scaledIOPS to True, but any value will work. The absence of this key implies false. * **scaleMin** - The amount to scale the minIOPS by for every 1GB of additional volume size. The value must be an integer. * **scaleMax** - The amount to scale the maxIOPS by for every 1GB of additional volume size. The value must be an integer. * **scaleBurst** - The amount to scale the burstIOPS by for every 1GB of additional volume size. The value must be an integer. The QoS keys above no longer require to be scoped but must be created and associated to a volume type. For information about how to set the key-value pairs and associate them with a volume type, see the `volume qos `_ section in the OpenStackClient command list. .. note:: When using scaledIOPS, the scale values must be chosen such that the constraint minIOPS <= maxIOPS <= burstIOPS is always true. The driver will enforce this constraint. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/spdk-volume-driver.rst0000664000175000017500000000426100000000000030511 0ustar00zuulzuul00000000000000========================================== Storage Performance Development Kit driver ========================================== Storage Performance Development Kit (SPDK) is a user space, polled-mode, asynchronous, lockless NVMe driver. It provides zero-copy, highly parallel access directly to an SSD from a user space application. SPDK provides NVMe-oF target that is capable of serving disks over the network or to other processes. Preparation ~~~~~~~~~~~ SPDK NVMe-oF target installation -------------------------------- Follow instructions available on https://spdk.io/doc/nvmf.html to install and configure environment with SPDK NVMe-oF target application. Starting from Ussuri release SPDK release v19.10 or higher is required. Storage pools configuration --------------------------- SPDK Cinder driver requires storage pools to be configured upfront in SPDK NVMe-oF target application. SPDK driver uses Logical Volume Stores (LVS) as storage pools. Details on configuring LVS are available on https://spdk.io/doc/logical_volumes.html. After storage pools are configured remote access has to be enabled. Launch ``scripts/rpc_http_proxy.py`` script from SPDK directory to start an http server that will manage requests from volume driver. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, attach, and detach volumes. * Create, list, and delete volume snapshots. * Create a volume from a snapshot. * Copy an image to a volume. * Copy a volume to an image. * Clone a volume. * Extend a volume. * Get volume statistics. Configuration ~~~~~~~~~~~~~ Use the following options to configure for the SPDK NVMe-oF transport: .. code-block:: ini volume_driver = cinder.volume.drivers.spdk.SPDKDriver target_protocol = nvmet_rdma # SPDK driver supports only nvmet_rdma target protocol target_helper = spdk-nvmeof # SPDK volume driver requires SPDK NVMe-oF target driver target_ip_address = 192.168.0.1 # NVMe-oF target IP address target_port = 4260 # NVMe-oF target port target_prefix = nqn.2014-08.org.spdk # NVMe-oF target nqn prefix .. config-table:: :config-target: SPDK cinder.volume.targets.spdknvmf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/storpool-volume-driver.rst0000664000175000017500000000555600000000000031441 0ustar00zuulzuul00000000000000====================== StorPool volume driver ====================== StorPool is distributed data storage software running on standard x86 servers. StorPool aggregates the performance and capacity of all drives into a shared pool of storage distributed among the servers. Within this storage pool the user creates thin-provisioned volumes that are exposed to the clients as block devices. StorPool consists of two parts wrapped in one package - a server and a client. The StorPool server allows a hypervisor to act as a storage node, while the StorPool client allows a hypervisor node to access the storage pool and act as a compute node. In OpenStack terms the StorPool solution allows each hypervisor node to be both a storage and a compute node simultaneously. Prerequisites ------------- * The controller and all the compute nodes must have access to the StorPool API service. * All nodes where StorPool-backed volumes will be attached must have access to the StorPool data network and run the ``storpool_block`` service. * If StorPool-backed Cinder volumes need to be created directly from Glance images, then the node running the ``cinder-volume`` service must also have access to the StorPool data network and run the ``storpool_block`` service. Configuring the StorPool volume driver -------------------------------------- A valid ``/etc/storpool.conf`` file is required; please contact the StorPool support team for assistance. The StorPool Cinder volume driver has two configuration options that may be specified both in the global configuration (e.g. in a ``cinder.conf`` volume backend definition) and per volume type: - ``storpool_template``: specifies the StorPool template (replication, placement, etc. specifications defined once and used for multiple volumes and snapshots) to use for the Cinder volume type or, if specified globally, as a default value for Cinder volumes. There is no default value for this option, see ``storpool_replication``. - ``storpool_replication``: if ``storpool_template`` is not set, the volume will be created with the specified chain replication and with the default placement constraints for the StorPool cluster. The default value for the chain replication is 3. Using the StorPool volume driver -------------------------------- The most common use for the Cinder StorPool volume driver is probably attaching volumes to Nova instances. For this to work, the ``nova-compute`` service and the ``os-brick`` library must recognize the "storpool" volume attachment driver; please contact the StorPool support team for more information. Currently there is no StorPool driver for Nova ephemeral volumes; to run Nova instances with a StorPool-backed volume as a root device, create a Cinder volume with the root filesystem image, make a snapshot, and let Nova create the instance with a root device as a new volume created from that snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/synology-dsm-driver.rst0000664000175000017500000000661600000000000030715 0ustar00zuulzuul00000000000000========================== Synology DSM volume driver ========================== The ``SynoISCSIDriver`` volume driver allows Synology NAS to be used for Block Storage (cinder) in OpenStack deployments. Information on OpenStack Block Storage volumes is available in the DSM Storage Manager. System requirements ~~~~~~~~~~~~~~~~~~~ The Synology driver has the following requirements: * DSM version 6.0.2 or later. * Your Synology NAS model must support advanced file LUN, iSCSI Target, and snapshot features. Refer to the `Support List for applied models `_. .. note:: The DSM driver is available in the OpenStack Newton release. Supported operations ~~~~~~~~~~~~~~~~~~~~ * Create, delete, clone, attach, and detach volumes. * Create and delete volume snapshots. * Create a volume from a snapshot. * Copy an image to a volume. * Copy a volume to an image. * Extend a volume. * Get volume statistics. Driver configuration ~~~~~~~~~~~~~~~~~~~~ Edit the ``/etc/cinder/cinder.conf`` file on your volume driver host. Synology driver uses a volume in Synology NAS as the back end of Block Storage. Every time you create a new Block Storage volume, the system will create an advanced file LUN in your Synology volume to be used for this new Block Storage volume. The following example shows how to use different Synology NAS servers as the back end. If you want to use all volumes on your Synology NAS, add another section with the volume number to differentiate between volumes within the same Synology NAS. .. code-block:: ini [default] enabled_backends = ds1515pV1, ds1515pV2, rs3017xsV3, others [ds1515pV1] # configuration for volume 1 in DS1515+ [ds1515pV2] # configuration for volume 2 in DS1515+ [rs3017xsV1] # configuration for volume 1 in RS3017xs Each section indicates the volume number and the way in which the connection is established. Below is an example of a basic configuration: .. code-block:: ini [Your_Section_Name] # Required settings volume_driver = cinder.volume.drivers.synology.synology_iscsi.SynoISCSIDriver target_protocol = iscsi target_ip_address = DS_IP synology_admin_port = DS_PORT synology_username = DS_USER synology_password = DS_PW synology_pool_name = DS_VOLUME # Optional settings volume_backend_name = VOLUME_BACKEND_NAME iscsi_secondary_ip_addresses = IP_ADDRESSES driver_use_ssl = True use_chap_auth = True chap_username = CHAP_USER_NAME chap_password = CHAP_PASSWORD ``DS_PORT`` This is the port for DSM management. The default value for DSM is 5000 (HTTP) and 5001 (HTTPS). To use HTTPS connections, you must set ``driver_use_ssl = True``. ``DS_IP`` This is the IP address of your Synology NAS. ``DS_USER`` This is the account of any DSM administrator. ``DS_PW`` This is the password for ``DS_USER``. ``DS_VOLUME`` This is the volume you want to use as the storage pool for the Block Storage service. The format is ``volume[0-9]+``, and the number is the same as the volume number in DSM. .. note:: If you set ``driver_use_ssl`` as ``True``, ``synology_admin_port`` must be an HTTPS port. Configuration options ~~~~~~~~~~~~~~~~~~~~~ The Synology DSM driver supports the following configuration options: .. config-table: :config-target: Synology cinder.volume.drivers.synology.synology_common ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/toyou-netstor-driver.rst0000664000175000017500000000575500000000000031127 0ustar00zuulzuul00000000000000=========================== TOYOU NetStor Cinder driver =========================== TOYOU NetStor series volume driver provides OpenStack Compute instances with access to TOYOU NetStor series storage systems. TOYOU NetStor storage can be used with iSCSI or FC connection. This documentation explains how to configure and connect the block storage nodes to TOYOU NetStor series storage. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options supported by the TOYOU NetStor iSCSI/FC driver. .. config-table:: :config-target: TOYOU NetStor cinder.volume.drivers.toyou.acs5000.acs5000_common Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create, list, delete, attach (map), and detach (unmap) volumes. - Create, list and delete volume snapshots. - Create a volume from a snapshot. - Copy an image to a volume. - Copy a volume to an image. - Clone a volume. - Extend a volume. - Migrate a volume. - Manage/Unmanage volume. - Revert to Snapshot. - Multi-attach. - Thin Provisioning. - Extend Attached Volume. Configure TOYOU NetStor iSCSI/FC backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section details the steps required to configure the TOYOU NetStor storage cinder driver. #. In the ``cinder.conf`` configuration file under the ``[DEFAULT]`` section, set the enabled_backends parameter with the iSCSI or FC back-end group. - For Fibre Channel: .. code-block:: ini [DEFAULT] enabled_backends = toyou-fc-1 - For iSCSI: .. code-block:: ini [DEFAULT] enabled_backends = toyou-iscsi-1 #. Add a backend group section for the backend group specified in the enabled_backends parameter. #. In the newly created backend group section, set the following configuration options: - For Fibre Channel: .. code-block:: ini [toyou-fc-1] # The TOYOU NetStor driver path volume_driver = cinder.volume.drivers.toyou.acs5000.acs5000_fc.Acs5000FCDriver # Management IP of TOYOU NetStor storage array san_ip = 10.0.0.10 # Management username of TOYOU NetStor storage array san_login = cliuser # Management password of TOYOU NetStor storage array san_password = clipassword # The Pool used to allocated volumes acs5000_volpool_name = pool01 # Backend name volume_backend_name = toyou-fc - For iSCSI: .. code-block:: ini [toyou-iscsi-1] # The TOYOU NetStor driver path volume_driver = cinder.volume.drivers.toyou.acs5000.acs5000_iscsi.Acs5000ISCSIDriver # Management IP of TOYOU NetStor storage array san_ip = 10.0.0.10 # Management username of TOYOU NetStor storage array san_login = cliuser # Management password of TOYOU NetStor storage array san_password = clipassword # The Pool used to allocated volumes acs5000_volpool_name = pool01 # Backend name volume_backend_name = toyou-iscsi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/toyou-netstor-tyds-driver.rst0000664000175000017500000000415000000000000032074 0ustar00zuulzuul00000000000000================================ TOYOU NetStor TYDS Cinder driver ================================ TOYOU NetStor TYDS series volume driver provides OpenStack Compute instances with access to TOYOU NetStor TYDS series storage systems. TOYOU NetStor TYDS storage can be used with iSCSI connection. This documentation explains how to configure and connect the block storage nodes to TOYOU NetStor TYDS series storage. Driver options ~~~~~~~~~~~~~~ The following table contains the configuration options supported by the TOYOU NetStor TYDS iSCSI driver. .. config-table:: :config-target: TOYOU NetStor TYDS cinder.volume.drivers.toyou.tyds.tyds Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create Volume. - Delete Volume. - Attach Volume. - Detach Volume. - Extend Volume - Create Snapshot. - Delete Snapshot. - Create Volume from Snapshot. - Create Volume from Volume (clone). - Create lmage from Volume. - Volume Migration (host assisted). Configure TOYOU NetStor TOYOU TYDS iSCSI backend ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section details the steps required to configure the TOYOU NetStor TYDS storage cinder driver. #. In the ``cinder.conf`` configuration file under the ``[DEFAULT]`` section, set the enabled_backends parameter with the iSCSI back-end group. .. code-block:: ini [DEFAULT] enabled_backends = toyou-tyds-iscsi-1 #. Add a backend group section for the backend group specified in the enabled_backends parameter. #. In the newly created backend group section, set the following configuration options: .. code-block:: ini [toyou-tyds-iscsi-1] # The TOYOU NetStor TYDS driver path volume_driver = cinder.volume.drivers.toyou.tyds.tyds.TYDSDriver # Management http ip of TOYOU NetStor TYDS storage san_ip = 10.0.0.10 # Management http username of TOYOU NetStor TYDS storage san_login = superuser # Management http password of TOYOU NetStor TYDS storage san_password = Toyou@123 # The Pool used to allocated volumes tyds_pools = pool01 # Backend name volume_backend_name = toyou-tyds-iscsi-1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/veritas-access-iscsi-driver.rst0000664000175000017500000000531700000000000032272 0ustar00zuulzuul00000000000000=========================== Veritas ACCESS iSCSI driver =========================== Veritas Access is a software-defined scale-out network-attached storage (NAS) solution for unstructured data that works on commodity hardware and takes advantage of placing data on premise or in the cloud based on intelligent policies. Through Veritas Access iSCSI Driver, OpenStack Block Storage can use Veritas Access backend as a block storage resource. The driver enables you to create iSCSI volumes that an OpenStack Block Storage server can allocate to any virtual machine running on a compute host. Requirements ~~~~~~~~~~~~ The Veritas ACCESS iSCSI Driver, version ``1.0.0`` and later, supports Veritas ACCESS release ``7.4`` and later. Supported operations ~~~~~~~~~~~~~~~~~~~~ - Create and delete volumes. - Create and delete snapshots. - Create volume from snapshot. - Extend a volume. - Attach and detach volumes. - Clone volumes. Configuration ~~~~~~~~~~~~~ #. Enable RESTful service on the Veritas Access Backend. #. Create Veritas Access iSCSI target, add store and portal IP to it. You can create target and add portal IP, store to it as follows: .. code-block:: console Target> iscsi target create iqn.2018-02.com.veritas:target02 Target> iscsi target store add target_fs iqn.2018-02.com.veritas:target02 Target> iscsi target portal add iqn.2018-02.com.veritas:target02 10.10.10.1 ... You can add authentication to target as follows: .. code-block:: console Target> iscsi target auth incominguser add iqn.2018-02.com.veritas:target02 user1 ... #. Ensure that the Veritas Access iSCSI target service is online. If the Veritas Access iSCSI target service is not online, enable the service by using the CLI or REST API. .. code-block:: console Target> iscsi service start Target> iscsi service status ... Define the following required properties in the ``cinder.conf`` file: .. code-block:: ini volume_driver = cinder.volume.drivers.veritas_access.veritas_iscsi.ACCESSIscsiDriver san_ip = va_console_ip san_api_port = 14161 san_login = master san_password = password target_port = 3260 vrts_lun_sparse = True vrts_target_config = /etc/cinder/vrts_target.xml #. Define Veritas Access Target details in ``/etc/cinder/vrts_target.xml``: .. code-block:: console iqn.2018-02.com.veritas:target02 10.10.10.1 0 ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/vmware-vmdk-driver.rst0000664000175000017500000003103100000000000030476 0ustar00zuulzuul00000000000000.. _block_storage_vmdk_driver: ================== VMware VMDK driver ================== Use the VMware VMDK driver to enable management of the OpenStack Block Storage volumes on vCenter-managed data stores. Volumes are backed by VMDK files on data stores that use any VMware-compatible storage technology such as NFS, iSCSI, FiberChannel, and vSAN. .. note:: The VMware VMDK driver requires vCenter version 5.1 at minimum. Functional context ~~~~~~~~~~~~~~~~~~ The VMware VMDK driver connects to vCenter, through which it can dynamically access all the data stores visible from the ESX hosts in the managed cluster. When you create a volume, the VMDK driver creates a VMDK file on demand. The VMDK file creation completes only when the volume is subsequently attached to an instance. The reason for this requirement is that data stores visible to the instance determine where to place the volume. Before the service creates the VMDK file, attach a volume to the target instance. The running vSphere VM is automatically reconfigured to attach the VMDK file as an extra disk. Once attached, you can log in to the running vSphere VM to rescan and discover this extra disk. With the update to ESX version 6.0, the VMDK driver now supports NFS version 4.1. Configuration ~~~~~~~~~~~~~ The recommended volume driver for OpenStack Block Storage is the VMware vCenter VMDK driver. When you configure the driver, you must match it with the appropriate OpenStack Compute driver from VMware and both drivers must point to the same server. In the ``nova.conf`` file, use this option to define the Compute driver: .. code-block:: ini compute_driver = vmwareapi.VMwareVCDriver In the ``cinder.conf`` file, use this option to define the volume driver: .. code-block:: ini volume_driver = cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver The following table lists various options that the drivers support for the OpenStack Block Storage configuration (``cinder.conf``): .. include:: ../../tables/cinder-vmware.inc VMDK disk type ~~~~~~~~~~~~~~ The VMware VMDK drivers support the creation of VMDK disk file types ``thin``, ``lazyZeroedThick`` (sometimes called thick or flat), or ``eagerZeroedThick``. A thin virtual disk is allocated and zeroed on demand as the space is used. Unused space on a Thin disk is available to other users. A lazy zeroed thick virtual disk will have all space allocated at disk creation. This reserves the entire disk space, so it is not available to other users at any time. An eager zeroed thick virtual disk is similar to a lazy zeroed thick disk, in that the entire disk is allocated at creation. However, in this type, any previous data will be wiped clean on the disk before the write. This can mean that the disk will take longer to create, but can also prevent issues with stale data on physical media. Use the ``vmware:vmdk_type`` extra spec key with the appropriate value to specify the VMDK disk file type. This table shows the mapping between the extra spec entry and the VMDK disk file type: .. list-table:: Extra spec entry to VMDK disk file type mapping :header-rows: 1 * - Disk file type - Extra spec key - Extra spec value * - thin - ``vmware:vmdk_type`` - ``thin`` * - lazyZeroedThick - ``vmware:vmdk_type`` - ``thick`` * - eagerZeroedThick - ``vmware:vmdk_type`` - ``eagerZeroedThick`` If you do not specify a ``vmdk_type`` extra spec entry, the disk file type will default to ``thin``. The following example shows how to create a ``lazyZeroedThick`` VMDK volume by using the appropriate ``vmdk_type``: .. code-block:: console $ openstack volume type create THICK_VOLUME $ openstack volume type set --property vmware:vmdk_type=thick THICK_VOLUME $ openstack volume create --size 1 --type THICK_VOLUME VOLUME1 Clone type ~~~~~~~~~~ With the VMware VMDK drivers, you can create a volume from another source volume or a snapshot point. The VMware vCenter VMDK driver supports the ``full`` and ``linked/fast`` clone types. Use the ``vmware:clone_type`` extra spec key to specify the clone type. The following table captures the mapping for clone types: .. list-table:: Extra spec entry to clone type mapping :header-rows: 1 * - Clone type - Extra spec key - Extra spec value * - full - ``vmware:clone_type`` - ``full`` * - linked/fast - ``vmware:clone_type`` - ``linked`` If you do not specify the clone type, the default is ``full``. The following example shows linked cloning from a source volume, which is created from an image: .. code-block:: console $ openstack volume type create FAST_CLONE $ openstack volume type set --property vmware:clone_type=linked FAST_CLONE $ openstack volume create --size 1 --type FAST_CLONE --image MYIMAGE SOURCE_VOL $ openstack volume create --size 1 --source SOURCE_VOL DEST_VOL Adapter type ~~~~~~~~~~~~ The VMware vCenter VMDK driver supports the adapter types ``LSI Logic Parallel``, ``BusLogic Parallel``, ``LSI Logic SAS``, ``VMware Paravirtual`` and ``IDE`` for volumes. Use the ``vmware:adapter_type`` extra spec key to specify the adapter type. The following table captures the mapping for adapter types: .. list-table:: Extra spec entry to adapter type mapping :header-rows: 1 * - Adapter type - Extra spec key - Extra spec value * - BusLogic Parallel - ``vmware:adapter_type`` - ``busLogic`` * - IDE - ``vmware:adapter_type`` - ``ide`` * - LSI Logic Parallel - ``vmware:adapter_type`` - ``lsiLogic`` * - LSI Logic SAS - ``vmware:adapter_type`` - ``lsiLogicsas`` * - VMware Paravirtual - ``vmware:adapter_type`` - ``paraVirtual`` If you do not specify the adapter type, the default is the value specified by the config option ``vmware_adapter_type``. Use vCenter storage policies to specify back-end data stores ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to configure back-end data stores using storage policies. In vCenter 5.5 and greater, you can create one or more storage policies and expose them as a Block Storage volume-type to a vmdk volume. The storage policies are exposed to the vmdk driver through the extra spec property with the ``vmware:storage_profile`` key. For example, assume a storage policy in vCenter named ``gold_policy.`` and a Block Storage volume type named ``vol1`` with the extra spec key ``vmware:storage_profile`` set to the value ``gold_policy``. Any Block Storage volume creation that uses the ``vol1`` volume type places the volume only in data stores that match the ``gold_policy`` storage policy. The Block Storage back-end configuration for vSphere data stores is automatically determined based on the vCenter configuration. If you configure a connection to connect to vCenter version 5.5 or later in the ``cinder.conf`` file, the use of storage policies to configure back-end data stores is automatically supported. .. note:: You must configure any data stores that you configure for the Block Storage service for the Compute service. **To configure back-end data stores by using storage policies** #. In vCenter, tag the data stores to be used for the back end. OpenStack also supports policies that are created by using vendor-specific capabilities; for example vSAN-specific storage policies. .. note:: The tag value serves as the policy. For details, see :ref:`vmware-spbm`. #. Set the extra spec key ``vmware:storage_profile`` in the desired Block Storage volume types to the policy name that you created in the previous step. #. Optionally, for the ``vmware_host_version`` parameter, enter the version number of your vSphere platform. For example, ``5.5``. This setting overrides the default location for the corresponding WSDL file. Among other scenarios, you can use this setting to prevent WSDL error messages during the development phase or to work with a newer version of vCenter. #. Complete the other vCenter configuration parameters as appropriate. .. note:: Any volume that is created without an associated policy (that is to say, without an associated volume type that specifies ``vmware:storage_profile`` extra spec), there is no policy-based placement for that volume. Supported operations ~~~~~~~~~~~~~~~~~~~~ The VMware vCenter VMDK driver supports these operations: - Create, delete, attach, and detach volumes. .. note:: When a volume is attached to an instance, a reconfigure operation is performed on the instance to add the volume's VMDK to it. The user must manually rescan and mount the device from within the guest operating system. - Create, list, and delete volume snapshots. .. note:: Allowed only if volume is not attached to an instance. - Create a volume from a snapshot. .. note:: The vmdk UUID in vCenter will not be set to the volume UUID if the vCenter version is 6.0 or above and the extra spec key ``vmware:clone_type`` in the destination volume type is set to ``linked``. - Copy an image to a volume. .. note:: Only images in ``vmdk`` disk format with ``bare`` container format are supported. The ``vmware_disktype`` property of the image can be ``preallocated``, ``sparse``, ``streamOptimized`` or ``thin``. - Copy a volume to an image. .. note:: - Allowed only if the volume is not attached to an instance. - This operation creates a ``streamOptimized`` disk image. - Clone a volume. .. note:: - Supported only if the source volume is not attached to an instance. - The vmdk UUID in vCenter will not be set to the volume UUID if the vCenter version is 6.0 or above and the extra spec key ``vmware:clone_type`` in the destination volume type is set to ``linked``. - Backup a volume. .. note:: This operation creates a backup of the volume in ``streamOptimized`` disk format. - Restore backup to new or existing volume. .. note:: Supported only if the existing volume doesn't contain snapshots. - Change the type of a volume. .. note:: This operation is supported only if the volume state is ``available``. - Extend a volume. .. _vmware-spbm: Storage policy-based configuration in vCenter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can configure Storage Policy-Based Management (SPBM) profiles for vCenter data stores supporting the Compute, Image service, and Block Storage components of an OpenStack implementation. In a vSphere OpenStack deployment, SPBM enables you to delegate several data stores for storage, which reduces the risk of running out of storage space. The policy logic selects the data store based on accessibility and available storage space. Prerequisites ~~~~~~~~~~~~~ - Determine the data stores to be used by the SPBM policy. - Determine the tag that identifies the data stores in the OpenStack component configuration. - Create separate policies or sets of data stores for separate OpenStack components. Create storage policies in vCenter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. In vCenter, create the tag that identifies the data stores: #. From the :guilabel:`Home` screen, click :guilabel:`Tags`. #. Specify a name for the tag. #. Specify a tag category. For example, ``spbm-cinder``. #. Apply the tag to the data stores to be used by the SPBM policy. .. note:: For details about creating tags in vSphere, see the `vSphere documentation `__. #. In vCenter, create a tag-based storage policy that uses one or more tags to identify a set of data stores. .. note:: For details about creating storage policies in vSphere, see the `vSphere documentation `__. Data store selection ~~~~~~~~~~~~~~~~~~~~ If storage policy is enabled, the driver initially selects all the data stores that match the associated storage policy. If two or more data stores match the storage policy, the driver chooses a data store that is connected to the maximum number of hosts. In case of ties, the driver chooses the data store with lowest space utilization, where space utilization is defined by the ``(1-freespace/totalspace)`` meters. These actions reduce the number of volume migrations while attaching the volume to instances. The volume must be migrated if the ESX host for the instance cannot access the data store that contains the volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/vzstorage-driver.rst0000664000175000017500000000073400000000000030270 0ustar00zuulzuul00000000000000======================== Virtuozzo Storage driver ======================== The Virtuozzo Storage driver is a fault-tolerant distributed storage system that is optimized for virtualization workloads. Set the following in your ``cinder.conf`` file, and use the following options to configure it. .. code-block:: ini volume_driver = cinder.volume.drivers.vzstorage.VZStorageDriver .. config-table:: :config-target: Virtuozzo Storage cinder.volume.drivers.vzstorage ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/windows-iscsi-volume-driver.rst0000664000175000017500000000446200000000000032355 0ustar00zuulzuul00000000000000.. _windows_iscsi_volume_driver: =========================== Windows iSCSI volume driver =========================== Windows Server offers an integrated iSCSI Target service that can be used with OpenStack Block Storage in your stack. Being entirely a software solution, consider it in particular for mid-sized networks where the costs of a SAN might be excessive. The Windows iSCSI Block Storage driver works with OpenStack Compute on any hypervisor. This driver creates volumes backed by fixed-type VHD images on Windows Server 2012 and dynamic-type VHDX on Windows Server 2012 R2 and onwards, stored locally on a user-specified path. The system uses those images as iSCSI disks and exports them through iSCSI targets. Each volume has its own iSCSI target. The ``cinder-volume`` service as well as the required Python components will be installed directly onto the Windows node. Prerequisites ~~~~~~~~~~~~~ The Windows iSCSI volume driver depends on the ``wintarget`` Windows service. This will require the ``iSCSI Target Server`` Windows feature to be installed. .. note:: The Cinder MSI will automatically enable this feature, if available (some minimal Windows versions do not provide it). You may check the availability of this feature by running the following: .. code-block:: powershell Get-WindowsFeature FS-iSCSITarget-Server .. end .. end The Windows Server installation requires at least 16 GB of disk space. The volumes hosted by this node will need extra space. Configuring cinder-volume ~~~~~~~~~~~~~~~~~~~~~~~~~ Below is a configuration sample for using the Windows iSCSI Driver. Append those options to your already existing ``cinder.conf`` file, described at :ref:`cinder_storage_install_windows`. .. code-block:: ini [DEFAULT] enabled_backends = winiscsi [winiscsi] volume_driver = cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver windows_iscsi_lun_path = C:\iSCSIVirtualDisks volume_backend_name = winiscsi # The following config options are optional # # use_chap_auth = true # target_port = 3260 # target_ip_addres = # iscsi_secondary_ip_addresses = # reserved_percentage = 5 .. end The ``windows_iscsi_lun_path`` config option specifies the directory in which VHD backed volumes will be stored. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/windows-smb-volume-driver.rst0000664000175000017500000002061000000000000032015 0ustar00zuulzuul00000000000000.. _windows_smb_volume_driver: ========================= Windows SMB volume driver ========================= Description ~~~~~~~~~~~ The Windows SMB volume driver leverages pre-existing SMB shares, used to store volumes as virtual disk images. The main reasons to use the Windows SMB driver are: * ease of management and use * great integration with other Microsoft technologies (e.g. Hyper-V Failover Cluster) * suitable for a various range of deployment types and sizes The ``cinder-volume`` service as well as the required Python components will be installed directly onto designated Windows nodes (preferably the ones exposing the shares). Common deployment scenarios --------------------------- The SMB driver is designed to support a variety of scenarios, such as: * Scale-Out File Servers (``SoFS``), providing highly available SMB shares. * standalone Windows or Samba shares * any other SMB 3.0 capable device By using SoFS shares, the virtual disk images are stored on Cluster Shared Volumes (``CSV``). A common practice involves deploying CSVs on top of SAN backed LUNs (exposed to all the nodes of the cluster through iSCSI or Fibre Channel). In absence of a SAN, Storage Spaces/Storage Spaces Direct (``S2D``) may be used for the underlying storage. .. note:: S2D is commonly used in hyper-converged deployments. .. end Features -------- ``VHD`` and ``VHDX`` are the currently supported image formats and may be consumed by Hyper-V and KVM compute nodes. By default, dynamic (thinly provisioned) images will be used, unless configured otherwise. The driver accepts one or more shares that will be reported to the Cinder scheduler as storage pools. This can provide means of tiering, allowing specific shares (pools) to be requested through volume types. .. code-block:: console openstack volume type set $volume_type --property pool_name=$pool_name .. end Frontend QoS specs may be associated with the volume types and enforced on the consumer side (e.g. Hyper-V). .. code-block:: console openstack volume qos create $rule_name --property consumer=front-end --property total_bytes_sec=20971520 openstack volume qos associate $rule_name $volume_type_id openstack volume create $volume_name --type $volume_type_id --size $size .. end The ``Cinder Backup Service`` can be run on Windows. This driver stores the volumes using vhdx images stored on SMB shares which can be attached in order to retrieve the volume data and send it to the backup service. Prerequisites: * All physical disks must be in byte mode * rb+ must be used when writing backups to disk Clustering support ------------------ Active-Active Cinder clustering is currently experimental and should not be used in production. This implies having multiple Cinder Volume services handling the same share simultaneously. On the other hand, Active-Passive clustering can easily be achieved, configuring the Cinder Volume service as clustered using Microsoft Failover Cluster. By using SoFS, you can provide high availability of the shares used by Cinder. This can be used in conjunction with the Nova Hyper-V cluster driver, which allows clustering virtual machines. This ensures that when a compute node is compromised, the virtual machines are transparently migrated to a healthy node, preserving volume connectivity. .. note:: The Windows SMB driver is the only Cinder driver that may be used along with the Nova Hyper-V cluster driver. The reason is that during an unexpected failover, the volumes need to be available on the destination compute node side. .. _windows_smb_volume_driver_prerequisites: Prerequisites ~~~~~~~~~~~~~ Before setting up the SMB driver, you will need to create and configure one or more SMB shares that will be used for storing virtual disk images. .. note:: The driver does not manage share permissions. You will have to make sure that Cinder as well as share consumers (e.g. Nova, Hyper-V) have access. Note that Hyper-V VMs are run using a built-in user group: ``NT VIRTUAL MACHINE\Virtual Machines``. .. end The easiest way to provide share access is by using Active Directory accounts. You may grant share access to the users running OpenStack services, as well as the compute nodes (and optionally storage nodes), using per computer account access rules. One of the main advantages is that by doing so, you don't need to pass share credentials to Cinder (and implicitly volume consumers). By granting access to a computer account, you're basically granting access to the LocalSystem account of that node, and thus to the VMs running on that host. .. note:: By default, OpenStack services deployed using the MSIs are run as LocalSystem. Once you've granted share access to a specific account, don't forget to also configure file system level permissions on the directory exported by the share. Configuring cinder-volume ~~~~~~~~~~~~~~~~~~~~~~~~~ Below is a configuration sample for using the Windows SMB Driver. Append those options to your already existing ``cinder.conf`` file, described at :ref:`cinder_storage_install_windows`. .. code-block:: ini [DEFAULT] enabled_backends = winsmb [winsmb] volume_backend_name = myWindowsSMBBackend volume_driver = cinder.volume.drivers.windows.smbfs.WindowsSmbfsDriver smbfs_mount_point_base = C:\OpenStack\mnt\ smbfs_shares_config = C:\Program Files\Cloudbase Solutions\OpenStack\etc\cinder\smbfs_shares_list # The following config options are optional # # image_volume_cache_enabled = true # image_volume_cache_max_size_gb = 100 # image_volume_cache_max_count = 10 # # nas_volume_prov_type = thin # smbfs_default_volume_format = vhdx # max_over_subscription_ratio = 1.5 # reserved_percentage = 5 # smbfs_pool_mappings = //addr/share:pool_name,//addr/share2:pool_name2 .. end The ``smbfs_mount_point_base`` config option allows you to specify where the shares will be *mounted*. This directory will contain symlinks pointing to the shares used by Cinder. Each symlink name will be a hash of the actual share path. Configuring the list of available shares ---------------------------------------- In addition to ``cinder.conf``, you will need to have another config file, providing a list of shares that will be used by Cinder for storing disk images. In the above sample, this file is referenced by the ``smbfs_shares_config`` option. The share list config file must contain one share per line, optionally including mount options. You may also add comments, using a '#' at the beginning of the line. Bellow is a sample of the share list config file: .. code-block:: ini # Cinder Volume shares //sofs-cluster/share //10.0.0.10/volumes -o username=user,password=mypassword .. end Keep in mind that Linux hosts can also consume those volumes. For this reason, the mount options resemble the ones used by mount.cifs (in fact, those will actually be passed to mount.cifs by the Nova Linux nodes). In case of Windows nodes, only the share location, username and password will be used when mounting the shares. The share address must use slashes instead of backslashes (as opposed to what Windows admins may expect) because of the above mentioned reason. Depending on the configured share access rules, you may skip including share credentials in the config file, as described in the :ref:`windows_smb_volume_driver_prerequisites` section. Configuring Nova credentials ---------------------------- The SMB volume driver relies on the ``nova assisted volume snapshots`` feature when snapshotting in-use volumes, as do other similar drivers using shared filesystems. By default, the Nova policy requires admin rights for this operation. You may provide Cinder specific credentials to be used when requesting Nova assisted volume snapshots, as shown bellow: .. code-block:: ini [nova] region_name=RegionOne auth_strategy=keystone auth_type=password auth_url=http://keystone_host/identity project_name=service username=nova password=password project_domain_name=Default user_domain_name=Default .. end Configuring storage pools ------------------------- Each share is reported to the Cinder scheduler as a storage pool. By default, the share name will be the name of the pool. If needed, you may provide pool name mappings, specifying a custom pool name for each share, as shown bellow: .. code-block:: ini smbfs_pool_mappings = //addr/share:pool0 .. end In the above sample, the ``//addr/share`` share will be reported as ``pool0``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/yadro-tatlin-volume-driver.rst0000664000175000017500000000607200000000000032161 0ustar00zuulzuul00000000000000============================ YADRO Cinder Driver ============================ YADRO Cinder driver provides iSCSI and FC support for TATLIN.UNIFIED storages. Supported Functions ~~~~~~~~~~~~~~~~~~~~ Basic Functions --------------- * Create Volume * Delete Volume * Attach Volume * Detach Volume * Extend Volume * Create Volume from Volume (clone) * Create Image from Volume * Volume Migration (host assisted) Additional Functions -------------------- * Extend an Attached Volume * Thin Provisioning * Manage/Unmanage Volume * Image Cache * Multiattach * High Availability Configuration ~~~~~~~~~~~~~ Set up TATLIN.UNIFIED storage ----------------------------- You need to specify settings as described below for storage systems. For details about each setting, see the user's guide of the storage system. #. User account Create a storage account belonging to the admin user group. #. Pool Create a storage pool that is used by the driver. #. Ports Setup Ethernet or FC ports you want to export volumes to. #. Hosts Create storage hosts and set ports of the initiators. One host must correspond to one initiator. #. Host Group Create storage host group and add hosts created on the previous step to the host group. #. CHAP Authentication Set up CHAP credentials for iSCSI storage hosts (if CHAP is used). Set up YADRO Cinder Driver ------------------------------------ Add the following configuration to ``/etc/cinder/cinder.conf``: .. code-block:: ini [iscsi-1] volume_driver=cinder.volume.drivers.yadro.tatlin_iscsi.TatlinISCSIVolumeDriver san_ip= san_login= san_password= tat_api_retry_count= api_port= pool_name= export_ports=, host_group= max_resource_count= auth_method= chap_username= chap_password= or .. code-block:: ini [fc-1] volume_driver=cinder.volume.drivers.yadro.tatlin_fc.TatlinFCVolumeDriver san_ip= san_login= san_password= tat_api_retry_count= api_port= pool_name= export_ports=, host_group= max_resource_count= ``volume_driver`` Volume driver name. ``san_ip`` TATLIN.UNIFIED management IP address or FQDN. ``san_login`` TATLIN.UNIFIED user name. ``san_password`` TATLIN.UNIFIED user password. ``tat_api_retry_count`` Number of repeated requests to TATLIN.UNIFIED. ``api_port`` TATLIN.UNIFIED management port. Default: 443. ``pool_name`` TATLIN.UNIFIED name of pool for Cinder Volumes. ``export_ports`` Comma-separated data ports for volumes to be exported to. ``host_group`` TATLIN.UNIFIED host group name. ``max_resource_count`` Limit on the number of resources for TATLIN.UNIFIED. Default: 150 ``auth_method`` (only iSCSI) Authentication method: * ``CHAP`` — use CHAP authentication (default) ``chap_username``, ``chap_password`` (if ``auth_method=CHAP``) CHAP credentials to validate the initiator. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/drivers/zadara-volume-driver.rst0000664000175000017500000000501000000000000031003 0ustar00zuulzuul00000000000000================================= Zadara Storage VPSA volume driver ================================= Zadara Storage, Virtual Private Storage Array (VPSA) is the first software defined, Enterprise-Storage-as-a-Service. It is an elastic and private block and file storage system which, provides enterprise-grade data protection and data management storage services. The ``ZadaraVPSAISCSIDriver`` volume driver allows the Zadara Storage VPSA to be used as a volume back end storage in OpenStack deployments. System requirements ~~~~~~~~~~~~~~~~~~~ To use Zadara Storage VPSA Volume Driver you will require: - Zadara Storage VPSA version 15.07 and above - iSCSI or iSER host interfaces Supported operations ~~~~~~~~~~~~~~~~~~~~~ - Create, delete, attach, and detach volumes - Create, list, and delete volume snapshots - Create a volume from a snapshot - Copy an image to a volume - Copy a volume to an image - Clone a volume - Extend a volume - Migrate a volume with back end assistance - Manage and unmanage a volume - Manage and unmanage volume snapshots - Multiattach a volume Configuration ~~~~~~~~~~~~~ #. Create a VPSA pool(s) or make sure you have an existing pool(s) that will be used for volume services. The VPSA pool(s) will be identified by its ID (pool-xxxxxxxx). For further details, see the `VPSA's user guide `_. #. Adjust the ``cinder.conf`` configuration file to define the volume driver name along with a storage back end entry for each VPSA pool that will be managed by the block storage service. Each back end entry requires a unique section name, surrounded by square brackets (or parentheses), followed by options in ``key=value`` format. .. note:: Restart cinder-volume service after modifying ``cinder.conf``. Sample minimum back end configuration .. code-block:: ini [DEFAULT] enabled_backends = vpsa [vpsa] zadara_vpsa_host = 172.31.250.10 zadara_vpsa_port = 80 zadara_user = vpsauser zadara_password = mysecretpassword zadara_use_iser = false zadara_vpsa_poolname = pool-00000001 volume_driver = cinder.volume.drivers.zadara.zadara.ZadaraVPSAISCSIDriver volume_backend_name = vpsa Driver-specific options ~~~~~~~~~~~~~~~~~~~~~~~ This section contains the configuration options that are specific to the Zadara Storage VPSA driver. .. config-table:: :config-target: Zadara cinder.volume.drivers.zadara.common cinder.volume.drivers.zadara.zadara .. note:: By design, all volumes created within the VPSA are thin provisioned. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/fc-zoning.rst0000664000175000017500000001232400000000000025165 0ustar00zuulzuul00000000000000 .. _fc_zone_manager: ========================== Fibre Channel Zone Manager ========================== The Fibre Channel Zone Manager allows FC SAN Zone/Access control management in conjunction with Fibre Channel block storage. The configuration of Fibre Channel Zone Manager and various zone drivers are described in this section. Configure Block Storage to use Fibre Channel Zone Manager ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If Block Storage is configured to use a Fibre Channel volume driver that supports Zone Manager, update ``cinder.conf`` to add the following configuration options to enable Fibre Channel Zone Manager. Make the following changes in the ``/etc/cinder/cinder.conf`` file under a ``[fc-zone-manager]`` section. .. config-table:: :config-target: zoning cinder.zonemanager.fc_zone_manager To use different Fibre Channel Zone Drivers, use the parameters described in this section. .. note:: When multi backend configuration is used, provide the ``zoning_mode`` configuration option as part of the volume driver configuration where ``volume_driver`` option is specified. .. note:: Default value of ``zoning_mode`` is ``None`` and this needs to be changed to ``fabric`` to allow fabric zoning. .. note:: ``zoning_policy`` can be configured as ``initiator-target`` or ``initiator`` Brocade Fibre Channel Zone Driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Brocade Fibre Channel Zone Driver performs zoning operations through HTTP, HTTPS, or SSH. .. warning:: The Brocade Fibre Channel Zone Driver is being supported by the Cinder community on a best-effort basis. While it is tested with the first Release Candidate of each release, be aware that it is not continually tested by a third-party CI system. The driver was deprecated and marked as 'unsupported' in the Ussuri release, and is subject to immediate removal if the maintenance burden exceeds the community's capacity. Set the following options in the ``cinder.conf`` configuration file under the ``[fc-zone-manager]`` section. .. config-table:: :config-target: Brocade zoning manager cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver Configure SAN fabric parameters under a section matching the name used in ``fc_fabric_names`` as described in the example below: .. config-table:: :config-target: Brocade zoning fabrics cinder.zonemanager.drivers.brocade.brcd_fabric_opts .. note:: Define a fabric group for each fabric using the fabric names used in ``fc_fabric_names`` configuration option as group name. .. note:: To define a fabric group for a switch which has Virtual Fabrics enabled, include the ``fc_virtual_fabric_id`` configuration option and ``fc_southbound_protocol`` configuration option set to ``HTTP``, ``HTTPS``, ``REST_HTTP`` or ``REST_HTTPS`` in the fabric group. Zoning on VF enabled fabric using ``SSH`` southbound protocol is not supported. .. note:: On switches running Fabric OS v8.2.1 or greater, the use of the REST interface is recommended for southbound communication. Set the ``fc_southbound_protocol`` configuration option to ``REST_HTTP`` or ``REST_HTTPS`` in the fabric group. System requirements ------------------- Brocade Fibre Channel Zone Driver requires firmware version FOS v6.4 or higher. As a best practice for zone management, use a user account with ``zoneadmin`` role. Users with ``admin`` role (including the default ``admin`` user account) are limited to a maximum of two concurrent SSH sessions. For information about how to manage Brocade Fibre Channel switches, see the Brocade Fabric OS user documentation. Cisco Fibre Channel Zone Driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cisco Fibre Channel Zone Driver automates the zoning operations through SSH. Configure Cisco Zone Driver, Cisco Southbound connector, FC SAN lookup service and Fabric name. Set the following options in the ``cinder.conf`` configuration file. .. code-block:: ini [fc-zone-manager] zone_driver = cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver.CiscoFCZoneDriver fc_san_lookup_service = cinder.zonemanager.drivers.cisco.cisco_fc_san_lookup_service.CiscoFCSanLookupService fc_fabric_names = CISCO_FABRIC_EXAMPLE cisco_sb_connector = cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI .. config-table:: :config-target: Cisco zoning manager cinder.zonemanager.drivers.cisco.cisco_fc_zone_driver Configure SAN fabric parameters under a section matching the name used in ``fc_fabric_names`` as described in the example below: .. config-table:: :config-target: Cisco zoning fabrics cinder.zonemanager.drivers.cisco.cisco_fabric_opts .. note:: Define a fabric group for each fabric using the fabric names used in ``fc_fabric_names`` configuration option as group name. The Cisco Fibre Channel Zone Driver supports basic and enhanced zoning modes.The zoning VSAN must exist with an active zone set name which is same as the ``fc_fabric_names`` option. System requirements ------------------- Cisco MDS 9000 Family Switches. Cisco MDS NX-OS Release 6.2(9) or later. For information about how to manage Cisco Fibre Channel switches, see the Cisco MDS 9000 user documentation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/logs.rst0000664000175000017500000000143500000000000024240 0ustar00zuulzuul00000000000000=============================== Log files used by Block Storage =============================== The corresponding log file of each Block Storage service is stored in the ``/var/log/cinder/`` directory of the host on which each service runs. .. list-table:: **Log files used by Block Storage services** :header-rows: 1 :widths: 10 20 10 * - Log file - Service/interface (for CentOS, Fedora, openSUSE, Red Hat Enterprise Linux, and SUSE Linux Enterprise) - Service/interface (for Ubuntu and Debian) * - api.log - openstack-cinder-api - cinder-api * - cinder-manage.log - cinder-manage - cinder-manage * - scheduler.log - openstack-cinder-scheduler - cinder-scheduler * - volume.log - openstack-cinder-volume - cinder-volume ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/policy-config-HOWTO.rst0000664000175000017500000005236500000000000026744 0ustar00zuulzuul00000000000000.. Copyright (c) 2018 Red Hat Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================== Policy configuration HowTo ========================== You can use Cinder policies to control how your users and administrators interact with the Block Storage Service. In this HowTo, we'll discuss the user model Cinder employs and how it can be modified by adjusting policies. * Like most OpenStack services, Cinder uses the OpenStack ``oslo.policy`` library as a base for its policy-related code. For a discussion of "rules" and "roles", other vocabulary, and general information about OpenStack policies and the policy configuration file, see `Administering Applications that use oslo.policy `_. * See :doc:`policy` for the list of policy targets recognized by Cinder. * Since the Queens release, the default way to run Cinder is without a policy file. This is because sensible default values are defined in the code. To run Cinder with a custom policy configuration, however, you'll need to write your changes into a policy file. .. only:: html * Elsewhere in this documentation, you can find a copy of the :doc:`sample policy file <./samples/policy.yaml>` that contains all the default settings. * Instructions for generating a sample ``policy.yaml`` file directly from the Cinder source code can be found in the file ``README-policy.generate.md`` in the ``etc/cinder`` directory in the Cinder `source code repository `_ (or its `github mirror `_). * OpenStack has deprecated the use of a JSON policy file since the Wallaby release (Cinder 18.0.0). If you are still using the JSON format, there is a `oslopolicy-convert-json-to-yaml`__ tool that will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html Vocabulary Note ~~~~~~~~~~~~~~~ We need to clarify some terms we'll be using below. Project This is an administrative grouping of users into a unit that can own cloud resources. (This is what used to be called a "tenant".) Service This is an OpenStack component that users interact with through an API it provides. For example, "Cinder" is the OpenStack code name for the service that provides the Block Storage API versions 2 and 3. Cinder is also known as the OpenStack Block Storage Service. The point of making this distinction is that there's another use of the term 'project' that is relevant to the discussion, but that we're **not** going to use. Each OpenStack service is produced and maintained by a "project team". *We will not be using the term 'project' in that sense in this document. We'll always use the term 'service'.* (If you are new to OpenStack, this won't be a problem. But if you're discussing this content with someone who's been around OpenStack for a while, you'll want to be clear about this so that you're not talking past each other.) .. _cinder-user-model: The User Model ~~~~~~~~~~~~~~ The Cinder code is written with the expectation that there are two kinds of users. End users These are users who consume resources and (possibly) pay the bills. End users are restricted to acting within a specific project and cannot perform operations on resources that are not owned by the project(s) they are in. Administrative users ("admins") These are users who keep the lights on. They have the ability to view all resources controlled by Cinder and can perform most operations on them. They also have access to other operations (for example, setting quotas) that cannot be performed by end users. Additionally, admins can view resource properties that cannot be seen by end users (for example, the migration status of a volume). The technical term to describe this is that when a volume-show call is made in an *administrative context* it will contain additional properties than when the call is *not* made in an administrative context. Similarly, when a volume-list call is made in an administrative context, the response may include volumes that are not owned by the project of the person making the call; this never happens when a call is *not* made in an administrative context. Policies ~~~~~~~~ Broadly speaking, an operator can accomplish two things with policies: 1. The policy file can define the criteria for what users are granted the privilege to act in an administrative context. 2. The policy file can specify for specific *actions* (or *policy targets*), which users can perform those actions. In general, while an operator can define *who* can make calls in an administrative context, an operator cannot affect *what* can be done in an administrative context (because that's already been decided when the code was implemented). For example, the boundaries between projects are strictly enforced in Cinder, and only an admin can view resources across projects. There is no way to grant a user the ability to "see" into another project (at least not by policy configuration--this could be done by using the Identity Service to add the user to the other project, but note that at that point, the user is no longer *not* a member of the project owning the now visible resources.) Pre-Defined Policy Rules ~~~~~~~~~~~~~~~~~~~~~~~~ The default Cinder policy file contains three rules that are used as the basis of policy file configuration. "context_is_admin" This defines the administrative context in Cinder. You'll notice that it's defined once at the beginning of the sample policy file and isn't referred to anywhere else in that file. To understand what this does, it's helpful to know something about the API implementation. A user's API request must be accompanied by an authentication token from the Identity Service. (If you are using client software, for example, the python-cinderclient or python-openstack client, the token is being requested for you under the hood.) The Block Storage API confirms that the token is unexpired and obtains other information about the requestor, for example, what roles the Identity Service recognizes the user to have. Cinder uses this information to create an internal context object that will be passed around the code as various functions and services are called to satisfy the user's request. When the request context object is created, Cinder uses the "context_is_admin" rule to decide whether this context object will be recognized as providing an administrative context. It does this by setting the "is_admin" property to True on the context object. Cinder code later in the call chain simply checks whether the "is_admin" property is true on the context object to determine whether the call is taking place in an administrative context. Similarly, policies will refer to "is_admin:True" (either directly or indirectly) to require an administrative context. All of this is a long-winded way to say that in a Cinder policy file, you'll only see "context_is_admin" at the top; after that, you'll see "is_admin:True" whenever you want to refer to an administrative context. "admin_or_owner" This is the default rule for most non-admin API calls. As the name indicates, it allows an administrator or an owner to make the call. "admin_api" This is the default rule for API calls that only administrators should be allowed to make. .. note:: For some API calls, there are checks way down in the code to ensure that a call is being made in an administrative context before the request is allowed to succeed. Thus it is not always the case that simply changing a policy target whose value is "rule:admin_api" to "rule:admin_or_owner" (or "rule:admin_api or role:some-special-role") will give a non-admin user the ability to successfully make the call. Unfortunately, you can't tell which calls these are without experimenting with a policy file (or looking at the source code). A good rule of thumb, however, is that API calls governed by policies marked as "rule:admin_api" in the default policy configuration fall into this category. Example: Configuring a Read-Only Administrator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A fairly common configuration request is to create a special category of administrator who has only an *observer* ("look but don't touch") function. The idea is that for security and stability reasons, it's a good idea to allow all users, including administrators, the least amount of privileges they need to successfully perform their job. Someone whose job is to audit information about Cinder (for example, to see what the current quota settings are) doesn't need the ability to change these settings. In this section, we'll discuss one way to configure the Cinder policy file to accomplish this. .. note:: To keep the discussion focused, this example assumes that you're working from the default policy file. Hopefully the general strategy will be clear enough to be applied to clouds already using non-default configurations. Additionally, there are other logically equivalent ways to configure the policy file to introduce a read-only administrator; this is not by any means the only way to do it. Given the job requirements, the observer administrator (who we'll refer to as the "observer-admin" for short) needs to operate in the administrative context. Thus, we'll have to adjust the "context_is_admin" definition in the policy file to include such a person. Note that this will make such a person a **full administrator** if we make no other changes to the policy file. Thus the strategy we'll use is to first make the observer-admin a full administrator, and then block the observer-admin's access to those API calls that aren't read-only. .. warning:: Metaphorically, what we are doing is opening the floodgates and then plugging up the holes one by one. That sounds alarming, and it should. We cannot emphasize strongly enough that any policy file changes should be **well-contained** (that is, you know exactly who has the new role or roles) and **tested** (you should have some kind of tests in place to determine that your changes have only the effects you intend). This is probably as good a place as any to remind you that the suggestions that follow are provided without warranty of any kind, either expressed or implied. Like the OpenStack source code, they are covered by the `Apache License, version 2.0 `_. In particular, we direct your attention to sections 7-9. Step 0: Testing ``````````````` We mention testing first (even though you haven't made any changes yet) because if we wait to mention it until after we've made the configuration changes, you might get the impression that it's the last thing to do (or the least important). It will make your life much easier if you come up with a plan for how you will test these changes before you start modifiying the policy configuration. We advise setting up automated tests because the Block Storage API has a lot of API calls and you'll want to test each of them against an admin user, an observer-admin user, and a "regular" end user. Further, if you anticipate that you may require finer-grained access than outlined in this example (for example, you would like a "creator" role that can create and read, but not delete), your configuration will be all the more complex and hence require more extensive testing that you won't want to do by hand. Step 1: Create a new role ````````````````````````` In the Identity Service, create a new role. It's a good idea to make this a new, never before assigned role so that you can easily track who it's been assigned to. As you recall from the discussion above, this person will have **full administrative powers** for any functions that are missed when we do the "block up the holes" stage. For this example, we'll use a role named ``cinder:reader-admin``. There is nothing special about this role name; you may use any name that makes sense to the administrators who will be assigning the role and configuring the policies. (The 'cinder:' part is to remind you that this role applies to the Block Storage Service, the 'reader' part is from the role name that OpenStack has converged upon for this type of observer role, and the '-admin' part is to remind you that whoever has this role will be able to observe admin-type stuff.) .. note:: Beginning with the Rocky release, the Identity Service (Keystone) creates three roles when the service is initiated: ``member``, ``reader``, and ``admin``. By default, the ``reader`` role is not assigned to any users. Work is underway during the Stein cycle so that the Identity API will recognize users with the ``reader`` role as having read-only access to the Identity API. See the Keystone spec `Basic Default Roles `_ for more information. We mention this so that you are aware that if you use a role named ``reader`` when doing the policy configuration described in this document, at some point users assigned the ``reader`` role may have read-only access to services other than the Block Storage Service. The desirability of this outcome depends upon your particular use case. Step 2: Open the floodgates ``````````````````````````` If your installation doesn't have an ``/etc/cinder/policy.yaml`` file, you can generate one from the source code (see the introductory section of this document). .. note:: The default file is *completely commented out*. For any of the changes you make below to be effective, don't forget to *uncomment* the line in which they occur. To extend the administrative context to include the new role, change:: "context_is_admin": "role:admin" to:: "context_is_admin": "role:admin or role:cinder:reader-admin" Step 3: Plug the holes in the Admin API ``````````````````````````````````````` Now we make adjustments to the policy configuration so that the observer-admin will in fact have only read-only access to Cinder resources. 3A: New Policy Rule ------------------- First, we create a new policy rule for Admin API access that specifically excludes the new role. Find the line in the policy file that has ``"admin_api"`` on the left hand side. Immediately after it, introduce a new rule:: "strict_admin_api": "not role:cinder:reader-admin and rule:admin_api" 3B: Plugging Holes ------------------ Now, plug up the holes we've opened in the Admin API by using this new rule. Find each of the lines in the remainder of the policy file that look like:: "target": "rule:admin_api" and for each line, decide whether the observer-admin needs access to this action or not. For example, the target ``"volume_extension:services:index"`` specifies a read-only action, so it's appropriate for the observer-admin to perform. We'll leave that one in its default configuration of:: "volume_extension:services:index": "rule:admin_api" On the other hand, if the target is something that allows modification, we most likely don't want to allow the observer-admin to perform it. For such actions we need to use the "strict" form of the admin rule. For example, consider the action ``"volume_extension:quotas:delete"``. To exclude the observer-admin from performing it, change the default setting of:: "volume_extension:quotas:delete": "rule:admin_api" to:: "volume_extension:quotas:delete": "rule:strict_admin_api" Do this on a case-by-case basis for the other policy targets that by default are governed by the ``rule:admin_api``. 3C: Other Changes ----------------- You've probably figured this out already, but there may be some other changes that are implied by, but not explicitly mentioned in, the above instructions. For example, you'll find the following policies in the sample file:: "volume_extension:volume_type_encryption": "rule:admin_api" "volume_extension:volume_type_encryption:create": "rule:volume_extension:volume_type_encryption" "volume_extension:volume_type_encryption:get": "rule:volume_extension:volume_type_encryption" "volume_extension:volume_type_encryption:update": "rule:volume_extension:volume_type_encryption" "volume_extension:volume_type_encryption:delete": "rule:volume_extension:volume_type_encryption" The first policy covers all of create/read/update/delete (and is deprecated for removal during the Stein development cycle). However, if you set it to ``"rule:strict_admin_api"``, the observer-admin won't be able to read the volume type encryption. So it should be left at ``"rule:admin_api"`` and the create/update/delete policies should be changed to ``"rule:strict_admin_api"``. Additionally, in preparation for the deprecated policy target's removal, it's a good idea to change the value of the ``get`` policy to ``"rule:admin_api"``. Step 4: Plug the holes in the "Regular" API ``````````````````````````````````````````` As stated earlier, a user with the role ``cinder:reader-admin`` is elevated to full administrative powers. That implies that such a user can perform administrative functions on end-user resources. Hence, we have another set of holes to plug up. 4A: New Policy Rule ------------------- As we did for the Admin API, we'll create a strict version of the "admin_or_owner" rule so we can specifically exclude the observer-admin from executing that action. Find the line in the policy file where ``"admin_or_owner"`` appears on the left hand side. It probably looks something like this:: "admin_or_owner": "is_admin:True or (role:admin and is_admin_project:True) or project_id:%(project_id)s" Immediately following it, introduce a new rule:: "strict_admin_or_owner": "(not role:cinder:reader-admin and (is_admin:True or (role:admin and is_admin_project:True))) or project_id:%(project_id)s" .. note:: To understand what this change does, note that the "admin_or_owner" rule definition has the general structure:: or To construct the strict version, we need to make sure that the ``not cinder:reader-admin`` part applies only the left-hand side (the ). The easiest way to do that is to structure the new rule as follows:: (not role:cinder:reader-admin and ()) or .. note:: If you don't need a user with the role ``cinder:reader-admin`` to manage resources in their own project, you could simplify this rule to:: "strict_admin_or_owner": "not role:cinder:reader-admin and rule:admin_or_owner" 4B: Plugging Holes ------------------ Find each line in the policy file that looks like:: "target": "rule:admin_or_owner" and decide whether it represents an action that the observer-admin needs to perform. For those actions you *don't* want the observer-admin to do, change the policy to:: "target": "rule:strict_admin_or_owner" 4C: Unrestricted Policies ------------------------- There are some policies in the default file that look like this:: "target": "" These are called *unrestricted policies* because the requirements are empty, and hence can be satisfied by any authenticated user. (Recall from the earlier discussion of :ref:`cinder-user-model`, however, that this does *not* mean that any user can see any other user's resources.) Unrestricted policies may be found on GET calls that don't have a particular resource to refer to (for example, the call to get all volumes) or a POST call that creates a completely new resource (for example, the call to create a volume). You don't see them much in the Cinder policy file because the code implementing the Block Storage API v2 and v3 always make sure there's a target object containing at least the ``project_id`` and ``user_id`` that can be used in evaluating whether the policy should allow the action or not. Thus, obvious read-only targets (for example, ``volume_extension:type_get``) can be left unrestricted. Policy targets that are not read only (for example, ``volume:accept_transfer``), can be changed to ``rule:strict_admin_or_owner``. Step 5: Testing ``````````````` We emphasized above that because of the nature of this change, it is extremely important to test it carefully. One thing to watch out for: because we're using a clause like ``not role:cinder:reader-admin``, a typographical error in the role name will cause problems. (For example, if you enter it into the file as ``not role:cinder_reader-admin``, it won't exclude the user we're worried about, who has the role ``cinder:reader-admin``.) As mentioned earlier, we advise setting up automated tests so that you can prevent regressions if you have to modify your policy files at some point. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/policy-personas.rst0000664000175000017500000016254100000000000026431 0ustar00zuulzuul00000000000000=============================== Policy Personas and Permissions =============================== Beginning with the Xena release, the Block Storage service API v3 takes advantage of the default authentication and authorization apparatus supplied by the Keystone project to give operators a rich set of default policies to control how users interact with the Block Storage service API. This document describes Cinder's part in an effort across OpenStack services to provide a consistent and useful default RBAC configuration. (This effort is referred to as "secure RBAC" for short.) Vocabulary Note --------------- We need to clarify some terms we'll be using below. Project This is a grouping of users into a unit that can own cloud resources. (This is what used to be called a "tenant", but you should never call it that.) Users, projects, and their associations are created in Keystone. Service This is an OpenStack component that users interact with through an API it provides. For example, "Cinder" is the OpenStack code name for the service that provides the Block Storage API version 3. Cinder is also known as the OpenStack Block Storage service. The point of making this distinction is that there's another use of the term 'project' that is relevant to the discussion, but that we're **not** going to use. Each OpenStack service is produced and maintained by a "project team". *We will not be using the term 'project' in that sense in this document. We'll always use the term 'service'.* (If you are new to OpenStack, this won't be a problem. But if you're discussing this content with someone who's been around OpenStack for a while, you'll want to be clear about this so that you're not talking past each other.) .. _cinder-personas: The Cinder Personas ------------------- This is easiest to explain if we introduce the five "personas" Cinder recognizes. In the list below, a "system" refers to the deployed system (that is, Cinder and all its services), and a "project" refers to a container or namespace for resources. * In order to consume resources, a user must be assigned to a project by being given a role (for example, 'member') in that project. That's done in Keystone; it's not a Cinder concern. See `Default Roles `_ in the Keystone documentation for more information. .. list-table:: The Five Personas :header-rows: 1 * - who - what * - project-reader - Has access to the API for read-only requests that affect only project-specific resources (that is, cannot create, update, or delete resources within a project) * - project-member - A normal user in a project. * - project-admin - All the normal stuff plus some minor administrative abilities in a particular project, for example, able to set the default volume type for a project. (The administrative abilities are "minor" in the sense that they have no impact on the Cinder system, they only allow the project-admin to make system-safe changes isolated to that project.) * - system-reader - Has read only access to the API; like the project-reader, but can read any project recognized by cinder. * - system-admin - Has the highest level of authorization on the system and can perform any action in Cinder. In most deployments, only the operator, deployer, or other highly trusted person will be assigned this persona. This is a Cinder super-user who can do *everything*, both with respect to the Cinder system and all individual projects. .. note:: The Keystone project provides the ability to describe additional personas, but Cinder does not currently recognize them. In particular: * Cinder does not recognize the ``domain`` scope at all. So even if you successfully request a "domain-scoped" token from the Identity service, you won't be able to use it with Cinder. Instead, request a "project-scoped" token for the particular project in your domain that you want to act upon. * Cinder does not recognize a "system-member" persona, that is, a user with the ``member`` role on a ``system``. The default Cinder policy configuration treats such a user as identical to the *system-reader* persona described above. More information about roles and scope is available in the `Keystone Administrator Guides `__. .. note:: **Privacy Expectations** Cinder's model of resources (volumes, backups, snapshots, etc.) is that they are owned by the *project*. Thus, they are shared by all users who have a role assignment on that project, no matter what persona that user has been assigned. For example, if Alice and Bob are in Project P, and Alice has persona project-member while Bob has persona project-reader, if Alice creates volume V in Project P, Bob can see volume V in the volume-list response, and Bob can read all the volume metadata on volume V that Alice can read--even volume metadata that Alice may have added to the volume. The key point here is that even though Alice created volume V, *it's not her volume*. The volume is "owned" by Project P and is available to all users who have authorization on that project via role assignments in keystone. What a user can do with volume V depends on whether that user has an admin, member, or reader role in project P. With respect to Project P, the personas with system scope (system-admin and system-reader) have access to the project in the sense that a cinder system-admin can do anything in Project P that the project-admin can do plus some additional powers. A cinder system-reader has read-only access to everything in Project P that the system-admin can access. The above describe the default policy configuration for Cinder. It is possible to modify policies to obtain different behavior, but that is beyond the scope of this document. .. _cinder-s-rbac-schedule: Implementation Schedule ----------------------- For reasons that will become clear in this section, the secure RBAC effort is being implemented in Cinder in two phases. In Xena, there are three personas. .. list-table:: The 3 Xena Personas :header-rows: 1 * - who - Keystone technical info * - project-reader - ``reader`` role on a ``project``, resulting in project-scope * - project-member - ``member`` role on a ``project``, resulting in project-scope * - system-admin - ``admin`` role on a ``project``, but recognized by Cinder as having permission to act on the cinder *system* Note that you *cannot* create a project-admin persona on your own simply by assigning the ``admin`` role to a user. Such assignment results in that user becoming a system-admin. In the Yoga release, we plan to implement the full set of Cinder personas: .. list-table:: The 5 Yoga Personas :header-rows: 1 * - who - Keystone technical info * - project-reader - ``reader`` role on a ``project``, resulting in project-scope * - project-member - ``member`` role on a ``project``, resulting in project-scope * - project-admin - ``admin`` role on a ``project``, resulting in project-scope * - system-reader - ``reader`` role on a ``system``, resulting in system-scope * - system-admin - ``admin`` role on a ``system``, resulting in system-scope Note that although the underlying technical information changes for the system-admin, the range of actions performable by that persona does not change. .. _cinder-permissions-matrix: Cinder Permissions Matrix ------------------------- Now that you know who the personas are, here's what they can do with respect to the policies that are recognized by Cinder. Keep in mind that only three of the personas (project-reader, project-member, and system-admin) are implemented in the Xena release. NOTE: the columns in () will be deleted; they are here for comparison as the matrix is validated by human beings. .. list-table:: Attachments (Microversion 3.27) :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - Create attachment - ``POST /attachments`` - volume:attachment_create - empty - no - yes - yes - no - yes - yes - yes * - Update attachment - ``PUT /attachments/{attachment_id}`` - volume:attachment_update - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Delete attachment - ``DELETE /attachments/{attachment_id}`` - volume:attachment_delete - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Mark a volume attachment process as completed (in-use) - | Microversion 3.44 | ``POST /attachments/{attachment_id}/action`` (os-complete) - volume:attachment_complete - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Allow multiattach of bootable volumes - | This is a secondary check on | ``POST /attachments`` | which is governed by another policy - volume:multiattach_bootable_volume - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes .. list-table:: User Messages (Microversion 3.3) :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - List messages - ``GET /messages`` - message:get_all - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Show message - ``GET /messages/{message_id}`` - message:get - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Delete message - ``DELETE /messages/{message_id}`` - message:delete - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes .. list-table:: Clusters (Microversion 3.7) :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - List clusters - | ``GET /clusters`` | ``GET /clusters/detail`` - clusters:get_all - rule:admin_api - no - no - no - no - yes - no - yes * - Show cluster - ``GET /clusters/{cluster_id}`` - clusters:get - rule:admin_api - no - no - no - no - yes - no - yes * - Update cluster - ``PUT /clusters/{cluster_id}`` - clusters:update - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Workers (Microversion 3.24) :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - Clean up workers - ``POST /workers/cleanup`` - workers:cleanup - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Snapshots :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - List snapshots - | ``GET /snapshots`` | ``GET /snapshots/detail`` - volume:get_all_snapshots - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - List or show snapshots with extended attributes - | ``GET /snapshots/{snapshot_id}`` | ``GET /snapshots/detail`` - volume_extension:extended_snapshot_attributes - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Create snapshot - ``POST /snapshots`` - volume:create_snapshot - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Show snapshot - ``GET /snapshots/{snapshot_id}`` - volume:get_snapshot - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Update snapshot - ``PUT /snapshots/{snapshot_id}`` - volume:update_snapshot - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Delete snapshot - ``DELETE /snapshots/{snapshot_id}`` - volume:delete_snapshot - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Reset status of a snapshot. - ``POST /snapshots/{snapshot_id}/action`` (os-reset_status) - volume_extension:snapshot_admin_actions:reset_status - rule:admin_api - no - no - no - no - yes - no - yes * - Update status (and optionally progress) of snapshot - ``POST /snapshots/{snapshot_id}/action`` (os-update_snapshot_status) - snapshot_extension:snapshot_actions:update_snapshot_status - empty - no - yes - yes - no - yes - yes - yes * - Force delete a snapshot - ``POST /snapshots/{snapshot_id}/action`` (os-force_delete) - volume_extension:snapshot_admin_actions:force_delete - rule:admin_api - no - no - no - no - yes - no - yes * - List (in detail) of snapshots which are available to manage - | ``GET /manageable_snapshots`` | ``GET /manageable_snapshots/detail`` - snapshot_extension:list_manageable - rule:admin_api - no - no - no - no - yes - no - yes * - Manage an existing snapshot - ``POST /manageable_snapshots`` - snapshot_extension:snapshot_manage - rule:admin_api - no - no - no - no - yes - no - yes * - Unmanage a snapshot - ``POST /snapshots/{snapshot_id}/action`` (os-unmanage) - snapshot_extension:snapshot_unmanage - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Snapshot Metadata :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - Show snapshot's metadata or one specified metadata with a given key - | ``GET /snapshots/{snapshot_id}/metadata`` | ``GET /snapshots/{snapshot_id}/metadata/{key}`` - volume:get_snapshot_metadata - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Update snapshot's metadata or one specified metadata with a given key - | ``PUT /snapshots/{snapshot_id}/metadata`` | ``PUT /snapshots/{snapshot_id}/metadata/{key}`` - volume:update_snapshot_metadata - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Delete snapshot's specified metadata with a given key - ``DELETE /snapshots/{snapshot_id}/metadata/{key}`` - volume:delete_snapshot_metadata - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes .. Backups: most of these are enforced in cinder/backup/api.py .. list-table:: Backups :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - List backups - | ``GET /backups`` | ``GET /backups/detail`` - backup:get_all - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Include project attributes in the list backups, show backup responses - | Microversion 3.18 | Adds ``os-backup-project-attr:project_id`` to the following responses: | ``GET /backups/detail`` | ``GET /backups/{backup_id}`` | The ability to make these API calls is governed by other policies. - backup:backup_project_attribute - rule:admin_api - no - no - no - no - yes - no - yes * - Create backup - ``POST /backups`` - backup:create - empty - no - yes - yes - no - yes - yes - yes * - Show backup - ``GET /backups/{backup_id}`` - backup:get - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Update backup - | Microversion 3.9 | ``PUT /backups/{backup_id}`` - backup:update - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Delete backup - ``DELETE /backups/{backup_id}`` - backup:delete - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Restore backup - ``POST /backups/{backup_id}/restore`` - backup:restore - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Import backup - ``POST /backups/{backup_id}/import_record`` - backup:backup-import - rule:admin_api - no - no - no - no - yes - no - yes * - Export backup - ``POST /backups/{backup_id}/export_record`` - backup:export-import - rule:admin_api - no - no - no - no - yes - no - yes * - Reset status of a backup - ``POST /backups/{backup_id}/action`` (os-reset_status) - volume_extension:backup_admin_actions:reset_status - rule:admin_api - no - no - no - no - yes - no - yes * - Force delete a backup - ``POST /backups/{backup_id}/action`` (os-force_delete) - volume_extension:backup_admin_actions:force_delete - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Groups (Microversion 3.13) :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - List groups - | ``GET /groups`` | ``GET /groups/detail`` - group:get_all - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Create group, create group from src - | ``POST /groups`` | Microversion 3.14: | ``POST /groups/action`` (create-from-src) - group:create - empty - no - yes - yes - no - yes - yes - yes * - Show group - ``GET /groups/{group_id}`` - group:get - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Update group - ``PUT /groups/{group_id}`` - group:update - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Include project attributes in the list groups, show group responses - | Microversion 3.58 | Adds ``project_id`` to the following responses: | ``GET /groups/detail`` | ``GET /groups/{group_id}`` | The ability to make these API calls is governed by other policies. - group:group_project_attribute - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Group Types (Microversion 3.11) :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - | **DEPRECATED** | Create, update or delete a group type - | (NOTE: new policies split POST, PUT, DELETE) | ``POST /group_types/`` | ``PUT /group_types/{group_type_id}`` | ``DELETE /group_types/{group_type_id}`` - group:group_types_manage - rule:admin_api - no - no - no - no - yes - no - yes * - | **NEW** | Create a group type - ``POST /group_types/`` - group:group_types:create - (new policy) - no - no - no - no - yes - n/a - n/a * - | **NEW** | Update a group type - ``PUT /group_types/{group_type_id}`` - group:group_types:update - (new policy) - no - no - no - no - yes - n/a - n/a * - | **NEW** | Delete a group type - ``DELETE /group_types/{group_type_id}`` - group:group_types:delete - (new policy) - no - no - no - no - yes - n/a - n/a * - Show group type with type specs attributes - | Adds ``group_specs`` to the following responses: | ``GET /group_types`` | ``GET /group_types/default`` | ``GET /group_types/{group_type_id}`` | These calls are not governed by a policy. - group:access_group_types_specs - rule:admin_api - no - no - no - no - yes - no - yes * - | **DEPRECATED** | Create, show, update and delete group type spec - | (NOTE: new policies split GET, POST, PUT, DELETE) | ``GET /group_types/{group_type_id}/group_specs`` | ``GET /group_types/{group_type_id}/group_specs/{g_spec_id}`` | ``POST /group_types/{group_type_id}/group_specs`` | ``PUT /group_types/{group_type_id}/group_specs/{g_spec_id}`` | ``DELETE /group_types/{group_type_id}/group_specs/{g_spec_id}`` - group:group_types_specs - rule:admin_api - no - no - no - no - yes - no - yes * - | **NEW** | Create group type spec - ``POST /group_types/{group_type_id}/group_specs`` - group:group_types_specs:create - (new policy) - no - no - no - no - yes - n/a - n/a * - | **NEW** | List group type specs - ``GET /group_types/{group_type_id}/group_specs`` - group:group_types_specs:get_all - (new policy) - no - no - no - no - yes - n/a - n/a * - | **NEW** | Show detail for a group type spec - ``GET /group_types/{group_type_id}/group_specs/{g_spec_id}`` - group:group_types_specs:get - (new policy) - no - no - no - no - yes - n/a - n/a * - | **NEW** | Update group type spec - ``PUT /group_types/{group_type_id}/group_specs/{g_spec_id}`` - group:group_types_specs:update - (new policy) - no - no - no - no - yes - n/a - n/a * - | **NEW** | Delete group type spec - ``DELETE /group_types/{group_type_id}/group_specs/{g_spec_id}`` - group:group_types_specs:delete - (new policy) - no - no - no - no - yes - n/a - n/a .. list-table:: Group Snapshots (Microversion 3.14) :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - List group snapshots - | ``GET /group_snapshots`` | ``GET /group_snapshots/detail`` - group:get_all_group_snapshots - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Create group snapshot - ``POST /group_snapshots`` - group:create_group_snapshot - empty - no - yes - yes - no - yes - yes - yes * - Show group snapshot - ``GET /group_snapshots/{group_snapshot_id}`` - group:get_group_snapshot - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Delete group snapshot - ``DELETE /group_snapshots/{group_snapshot_id}`` - group:delete_group_snapshot - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Update group snapshot - | ``PUT /group_snapshots/{group_snapshot_id}`` | Note: even though the policy is defined, this call is not implemented in the Block Storage API. - group:update_group_snapshot - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Reset status of group snapshot - | Microversion 3.19 | ``POST /group_snapshots/{group_snapshot_id}/action`` (reset_status) - group:reset_group_snapshot_status - rule:admin_api - no - no - no - no - yes - no - yes * - Include project attributes in the list group snapshots, show group snapshot responses - | Microversion 3.58 | Adds ``project_id`` to the following responses: | ``GET /group_snapshots/detail`` | ``GET /group_snapshots/{group_snapshot_id}`` | The ability to make these API calls is governed by other policies. - group:group_snapshot_project_attribute - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Group Actions :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - Delete group - ``POST /groups/{group_id}/action`` (delete) - group:delete - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Reset status of group - | Microversion 3.20 | ``POST /groups/{group_id}/action`` (reset_status) - group:reset_status - rule:admin_api - no - no - no - no - yes - no - yes * - Enable replication - | Microversion 3.38 | ``POST /groups/{group_id}/action`` (enable_replication) - group:enable_replication - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Disable replication - | Microversion 3.38 | ``POST /groups/{group_id}/action`` (disable_replication) - group:disable_replication - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Fail over replication - | Microversion 3.38 | ``POST /groups/{group_id}/action`` (failover_replication) - group:failover_replication - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - List failover replication - | Microversion 3.38 | ``POST /groups/{group_id}/action`` (list_replication_targets) - group:list_replication_targets - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes .. list-table:: QOS specs :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - List qos specs or list all associations - | ``GET /qos-specs`` | ``GET /qos-specs/{qos_id}/associations`` - volume_extension:qos_specs_manage:get_all - rule:admin_api - no - no - no - no - yes - no - yes * - Show qos specs - ``GET /qos-specs/{qos_id}`` - volume_extension:qos_specs_manage:get - rule:admin_api - no - no - no - no - yes - no - yes * - Create qos specs - ``POST /qos-specs`` - volume_extension:qos_specs_manage:create - rule:admin_api - no - no - no - no - yes - no - yes * - Update qos specs: update key/values in the qos-spec or update the volume-types associated with the qos-spec - | ``PUT /qos-specs/{qos_id}`` | ``GET /qos-specs/{qos_id}/associate?vol_type_id={volume_id}`` | ``GET /qos-specs/{qos_id}/disassociate?vol_type_id={volume_id}`` | ``GET /qos-specs/{qos_id}/disassociate_all`` | (yes, these GETs are really updates) - volume_extension:qos_specs_manage:update - rule:admin_api - no - no - no - no - yes - no - yes * - Delete a qos-spec, or remove a list of keys from the qos-spec - | ``DELETE /qos-specs/{qos_id}`` | ``PUT /qos-specs/{qos_id}/delete_keys`` - volume_extension:qos_specs_manage:delete - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Quotas :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - | **DEPRECATED** | Show or update project quota class - | (NOTE: new policies split GET and PUT) | ``GET /os-quota-class-sets/{project_id}`` | ``PUT /os-quota-class-sets/{project_id}`` - volume_extension:quota_classes - rule:admin_api - no - no - no - no - yes - no - yes * - | **NEW** | Show project quota class - ``GET /os-quota-class-sets/{project_id}`` - volume_extension:quota_classes:get - (new policy) - no - no - no - no - yes - n/a - n/a * - | **NEW** | Update project quota class - ``PUT /os-quota-class-sets/{project_id}`` - volume_extension:quota_classes:update - (new policy) - no - no - no - no - yes - n/a - n/a * - Show project quota (including usage and default) - | ``GET /os-quota-sets/{project_id}`` | ``GET /os-quota-sets/{project_id}/default`` | ``GET /os-quota-sets/{project_id}?usage=True`` - volume_extension:quotas:show - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Update project quota - ``PUT /os-quota-sets/{project_id}`` - volume_extension:quotas:update - rule:admin_api - no - no - no - no - yes - no - yes * - Delete project quota - ``DELETE /os-quota-sets/{project_id}`` - volume_extension:quotas:delete - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Capabilities :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - Show backend capabilities - ``GET /capabilities/{host_name}`` - volume_extension:capabilities - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Services :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - List all services - ``GET /os-services`` - volume_extension:services:index - rule:admin_api - no - no - no - no - yes - no - yes * - Update service - | ``PUT /os-services/enable`` | ``PUT /os-services/disable`` | ``PUT /os-services/disable-log-reason`` | ``PUT /os-services/freeze`` | ``PUT /os-services/thaw`` | ``PUT /os-services/failover_host`` | ``PUT /os-services/failover`` (microversion 3.26) | ``PUT /os-services/set-log`` | ``PUT /os-services/get-log`` - volume_extension:services:update - rule:admin_api - no - no - no - no - yes - no - yes * - Freeze a backend host. Secondary check; must also satisfy volume_extension:services:update to make this call. - ``PUT /os-services/freeze`` - volume:freeze_host - rule:admin_api - no - no - no - no - yes - no - yes * - Thaw a backend host. Secondary check; must also satisfy volume_extension:services:update to make this call. - ``PUT /os-services/thaw`` - volume:thaw_host - rule:admin_api - no - no - no - no - yes - no - yes * - Failover a backend host. Secondary check; must also satisfy volume_extension:services:update to make this call. - | ``PUT /os-services/failover_host`` | ``PUT /os-services/failover`` (microversion 3.26) - volume:failover_host - rule:admin_api - no - no - no - no - yes - no - yes * - List all backend pools - ``GET /scheduler-stats/get_pools`` - scheduler_extension:scheduler_stats:get_pools - rule:admin_api - no - no - no - no - yes - no - yes * - | List, update or show hosts for a project | (NOTE: will be deprecated in Yoga and new policies introduced | for GETs and PUT) - | ``GET /os-hosts`` | ``PUT /os-hosts/{host_name}`` | ``GET /os-hosts/{host_id}`` - volume_extension:hosts - rule:admin_api - no - no - no - no - yes - no - yes * - Show limits with used limit attributes - ``GET /limits`` - limits_extension:used_limits - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - List (in detail) of volumes which are available to manage - | ``GET /manageable_volumes`` | ``GET /manageable_volumes/detail`` - volume_extension:list_manageable - rule:admin_api - no - no - no - no - yes - no - yes * - Manage existing volumes - ``POST /manageable_volumes`` - volume_extension:volume_manage - rule:admin_api - no - no - no - no - yes - no - yes * - Unmanage a volume - ``POST /volumes/{volume_id}/action`` (os-unmanage) - volume_extension:volume_unmanage - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Volume Types :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - | **DEPRECATED** | Create, update and delete volume type | (new policies for create/update/delete) - | ``POST /types`` | ``PUT /types/{type_id}`` | ``DELETE /types`` - volume_extension:types_manage - rule:admin_api - no - no - no - no - yes - no - yes * - | **NEW** | Create a volume type - ``POST /types`` - volume_extension:type_create - (new policy) - no - no - no - no - yes - no - yes * - | **NEW** | Update a volume type - ``PUT /types/{type_id}`` - volume_extension:type_update - (new policy) - no - no - no - no - yes - no - yes * - | **NEW** | Delete a volume type - ``DELETE /types/{type_id}`` - volume_extension:type_delete - (new policy) - no - no - no - no - yes - no - yes * - Show a specific volume type - ``GET /types/{type_id}`` - volume_extension:type_get - empty - yes - yes - yes - yes - yes - yes - yes * - List volume types - ``GET /types`` - volume_extension:type_get_all - empty - yes - yes - yes - yes - yes - yes - yes * - | **DEPRECATED** | Base policy for all volume type encryption type operations | (NOTE: can't use this anymore, because it gives GET and POST same permissions) - Convenience default policy for the situation where you don't want to configure all the ``volume_type_encryption`` policies separately - volume_extension:volume_type_encryption - rule:admin_api - - - - - - no - yes * - Create volume type encryption - ``POST /types/{type_id}/encryption`` - volume_extension:volume_type_encryption:create - rule:volume_extension:volume_type_encryption - no - no - no - no - yes - no - yes * - Show a volume type's encryption type, show an encryption specs item - | ``GET /types/{type_id}/encryption`` | ``GET /types/{type_id}/encryption/{key}`` - volume_extension:volume_type_encryption:get - rule:volume_extension:volume_type_encryption - no - no - no - no - yes - no - yes * - Update volume type encryption - ``PUT /types/{type_id}/encryption/{encryption_id}`` - volume_extension:volume_type_encryption:update - rule:volume_extension:volume_type_encryption - no - no - no - no - yes - no - yes * - Delete volume type encryption - ``DELETE /types/{type_id}/encryption/{encryption_id}`` - volume_extension:volume_type_encryption:delete - rule:volume_extension:volume_type_encryption - no - no - no - no - yes - no - yes * - List or show volume type with extra specs attribute - | Adds ``extra_specs`` to the following responses: | ``GET /types/{type_id}`` | ``GET /types`` | The ability to make these API calls is governed by other policies. - volume_extension:access_types_extra_specs - empty - yes - yes - yes - yes - yes - yes - yes * - List or show volume type with access type qos specs id attribute - | Adds ``qos_specs_id`` to the following responses: | ``GET /types/{type_id}`` | ``GET /types`` | The ability to make these API calls is governed by other policies. - volume_extension:access_types_qos_specs_id - rule:admin_api - no - no - no - no - yes - no - yes * - Show whether a volume type is public in the type response - | Adds ``os-volume-type-access:is_public`` to the following responses: | ``GET /types`` | ``GET /types/{type_id}`` | ``POST /types`` | The ability to make these API calls is governed by other policies. - volume_extension:volume_type_access - rule:admin_or_owner - no - yes - yes - no - yes - no - yes * - | **NEW** | List private volume type access detail, that is, list the projects that have access to this type | (was formerly controlled by volume_extension:volume_type_access) - ``GET /types/{type_id}/os-volume-type-access`` - volume_extension:volume_type_access:get_all_for_type - (new policy) - no - no - no - no - yes - n/a - n/a * - Add volume type access for project - ``POST /types/{type_id}/action`` (addProjectAccess) - volume_extension:volume_type_access:addProjectAccess - rule:admin_api - no - no - no - no - yes - no - yes * - Remove volume type access for project - ``POST /types/{type_id}/action`` (removeProjectAccess) - volume_extension:volume_type_access:removeProjectAccess - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Volume Actions :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - Extend a volume - ``POST /volumes/{volume_id}/action`` (os-extend) - volume:extend - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Extend an attached volume - | Microversion 3.42 | ``POST /volumes/{volume_id}/action`` (os-extend) - volume:extend_attached_volume - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Revert a volume to a snapshot - | Microversion 3.40 | ``POST /volumes/{volume_id}/action`` (revert) - volume:revert_to_snapshot - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Reset status of a volume - ``POST /volumes/{volume_id}/action`` (os-reset_status) - volume_extension:volume_admin_actions:reset_status - rule:admin_api - no - no - no - no - yes - no - yes * - Retype a volume - ``POST /volumes/{volume_id}/action`` (os-retype) - volume:retype - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Update a volume's readonly flag - ``POST /volumes/{volume_id}/action`` (os-update_readonly_flag) - volume:update_readonly_flag - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Force delete a volume - ``POST /volumes/{volume_id}/action`` (os-force_delete) - volume_extension:volume_admin_actions:force_delete - rule:admin_api - no - no - no - no - yes - no - yes * - Upload a volume to image with public visibility - ``POST /volumes/{volume_id}/action`` (os-volume_upload_image) - volume_extension:volume_actions:upload_public - rule:admin_api - no - no - no - no - yes - no - yes * - Upload a volume to image - ``POST /volumes/{volume_id}/action`` (os-volume_upload_image) - volume_extension:volume_actions:upload_image - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Force detach a volume. - ``POST /volumes/{volume_id}/action`` (os-force_detach) - volume_extension:volume_admin_actions:force_detach - rule:admin_api - no - no - no - no - yes - no - yes * - Migrate a volume to a specified host - ``POST /volumes/{volume_id}/action`` (os-migrate_volume) - volume_extension:volume_admin_actions:migrate_volume - rule:admin_api - no - no - no - no - yes - no - yes * - Complete a volume migration - ``POST /volumes/{volume_id}/action`` (os-migrate_volume_completion) - volume_extension:volume_admin_actions:migrate_volume_completion - rule:admin_api - no - no - no - no - yes - no - yes * - Initialize volume attachment - ``POST /volumes/{volume_id}/action`` (os-initialize_connection) - volume_extension:volume_actions:initialize_connection - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Terminate volume attachment - ``POST /volumes/{volume_id}/action`` (os-terminate_connection) - volume_extension:volume_actions:terminate_connection - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Roll back volume status to 'in-use' - ``POST /volumes/{volume_id}/action`` (os-roll_detaching) - volume_extension:volume_actions:roll_detaching - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Mark volume as reserved - ``POST /volumes/{volume_id}/action`` (os-reserve) - volume_extension:volume_actions:reserve - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Unmark volume as reserved - ``POST /volumes/{volume_id}/action`` (os-unreserve) - volume_extension:volume_actions:unreserve - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Begin detach volumes - ``POST /volumes/{volume_id}/action`` (os-begin_detaching) - volume_extension:volume_actions:begin_detaching - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Add attachment metadata - ``POST /volumes/{volume_id}/action`` (os-attach) - volume_extension:volume_actions:attach - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Clear attachment metadata - ``POST /volumes/{volume_id}/action`` (os-detach) - volume_extension:volume_actions:detach - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes .. list-table:: Volume Transfers :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - List volume transfer - | ``GET /os-volume-transfer`` | ``GET /os-volume-transfer/detail`` | ``GET /volume-transfers`` | ``GET /volume-transfers/detail`` - volume:get_all_transfers - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Create a volume transfer - | ``POST /os-volume-transfer`` | ``POST /volume-transfers`` - volume:create_transfer - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Show one specified volume transfer - | ``GET /os-volume-transfer/{transfer_id}`` | ``GET /volume-transfers/{transfer_id}`` - volume:get_transfer - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Accept a volume transfer - | ``POST /os-volume-transfer/{transfer_id}/accept`` | ``POST /volume-transfers/{transfer_id}/accept`` - volume:accept_transfer - empty - no - yes - yes - no - yes - yes - yes * - Delete volume transfer - | ``DELETE /os-volume-transfer/{transfer_id}`` | ``DELETE /volume-transfers/{transfer_id}`` - volume:delete_transfer - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes .. list-table:: Volume Metadata :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - Show volume's metadata or one specified metadata with a given key. - | ``GET /volumes/{volume_id}/metadata`` | ``GET /volumes/{volume_id}/metadata/{key}`` | ``POST /volumes/{volume_id}/action`` (os-show_image_metadata) - volume:get_volume_metadata - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Create volume metadata - ``POST /volumes/{volume_id}/metadata`` - volume:create_volume_metadata - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Update volume's metadata or one specified metadata with a given key - | ``PUT /volumes/{volume_id}/metadata`` | ``PUT /volumes/{volume_id}/metadata/{key}`` - volume:update_volume_metadata - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Delete volume's specified metadata with a given key - ``DELETE /volumes/{volume_id}/metadata/{key}`` - volume:delete_volume_metadata - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - | **DEPRECATED** | Volume's image metadata related operation, create, delete, show and list - | (NOTE: new policies are introduced below to split GET and POST) | Microversion 3.4 | ``GET /volumes/detail`` | ``GET /volumes/{volume_id}`` | ``POST /volumes/{volume_id}/action`` (os-set_image_metadata) | ``POST /volumes/{volume_id}/action`` (os-unset_image_metadata) | (NOTE: ``POST /volumes/{volume_id}/action`` (os-show_image_metadata) is governed by volume:get_volume_metadata - volume_extension:volume_image_metadata - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - | **NEW** | Include volume's image metadata in volume detail responses - | Microversion 3.4 | ``GET /volumes/detail`` | ``GET /volumes/{volume_id}`` | The ability to make these API calls is governed by other policies. - volume_extension:volume_image_metadata:show - (new policy) - yes - yes - yes - yes - yes - yes - yes * - | **NEW** | Set image metadata for a volume - | Microversion 3.4 | ``POST /volumes/{volume_id}/action`` (os-set_image_metadata) - volume_extension:volume_image_metadata:set - (new policy) - no - yes - yes - no - yes - yes - yes * - | **NEW** | Remove specific image metadata from a volume - | Microversion 3.4 | ``POST /volumes/{volume_id}/action`` (os-unset_image_metadata) - volume_extension:volume_image_metadata:remove - (new policy) - no - yes - yes - no - yes - yes - yes * - Update volume admin metadata. - | This permission is required to complete the following operations: | ``POST /volumes/{volume_id}/action`` (os-update_readonly_flag) | ``POST /volumes/{volume_id}/action`` (os-attach) | The ability to make these API calls is governed by other policies. - volume:update_volume_admin_metadata - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Volume Type Extra-Specs :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - List type extra specs - ``GET /types/{type_id}/extra_specs`` - volume_extension:types_extra_specs:index - empty - yes - yes - yes - yes - yes - yes - yes * - Create type extra specs - ``POST /types/{type_id}/extra_specs`` - volume_extension:types_extra_specs:create - rule:admin_api - no - no - no - no - yes - no - yes * - Show one specified type extra specs - ``GET /types/{type_id}/extra_specs/{extra_spec_key}`` - volume_extension:types_extra_specs:show - empty - yes - yes - yes - yes - yes - yes - yes * - Update type extra specs - ``PUT /types/{type_id}/extra_specs/{extra_spec_key}`` - volume_extension:types_extra_specs:update - rule:admin_api - no - no - no - no - yes - no - yes * - Delete type extra specs - ``DELETE /types/{type_id}/extra_specs/{extra_spec_key}`` - volume_extension:types_extra_specs:delete - rule:admin_api - no - no - no - no - yes - no - yes * - Include extra_specs fields that may reveal sensitive information about the deployment that should not be exposed to end users in various volume-type responses that show extra_specs. - | ``GET /types`` | ``GET /types/{type_id}`` | ``GET /types/{type_id}/extra_specs`` | ``GET /types/{type_id}/extra_specs/{extra_spec_key}`` | The ability to make these API calls is governed by other policies. - volume_extension:types_extra_specs:read_sensitive - rule:admin_api - no - no - no - no - yes - no - yes .. list-table:: Volumes :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - Create volume - ``POST /volumes`` - volume:create - empty - no - yes - yes - no - yes - yes - yes * - Create volume from image - ``POST /volumes`` - volume:create_from_image - empty - no - yes - yes - no - yes - yes - yes * - Show volume - ``GET /volumes/{volume_id}`` - volume:get - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - List volumes or get summary of volumes - | ``GET /volumes`` | ``GET /volumes/detail`` | ``GET /volumes/summary`` - volume:get_all - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Update volume or update a volume's bootable status - | ``PUT /volumes`` | ``POST /volumes/{volume_id}/action`` (os-set_bootable) - volume:update - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Delete volume - ``DELETE /volumes/{volume_id}`` - volume:delete - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes * - Force Delete a volume (Microversion 3.23) - ``DELETE /volumes/{volume_id}?force=true`` - volume:force_delete - rule:admin_api - no - no - no - no - yes - no - yes * - List or show volume with host attribute - | Adds ``os-vol-host-attr:host`` to the following responses: | ``GET /volumes/{volume_id}`` | ``GET /volumes/detail`` | The ability to make these API calls is governed by other policies. - volume_extension:volume_host_attribute - rule:admin_api - no - no - no - no - yes - no - yes * - List or show volume with "tenant attribute" (actually, the project ID) - | Adds ``os-vol-tenant-attr:tenant_id`` to the following responses: | ``GET /volumes/{volume_id}`` | ``GET /volumes/detail`` | The ability to make these API calls is governed by other policies. - volume_extension:volume_tenant_attribute - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - List or show volume with migration status attribute - | Adds ``os-vol-mig-status-attr:migstat`` to the following responses: | ``GET /volumes/{volume_id}`` | ``GET /volumes/detail`` | The ability to make these API calls is governed by other policies. - volume_extension:volume_mig_status_attribute - rule:admin_api - no - no - no - no - yes - no - yes * - Show volume's encryption metadata - | ``GET /volumes/{volume_id}/encryption`` | ``GET /volumes/{volume_id}/encryption/{encryption_key}`` - volume_extension:volume_encryption_metadata - rule:admin_or_owner - yes - yes - yes - yes - yes - yes - yes * - Create multiattach capable volume - | Indirectly affects the success of these API calls: | ``POST /volumes`` | ``POST /volumes/{volume_id}/action`` (os-retype) | The ability to make these API calls is governed by other policies. - volume:multiattach - rule:admin_or_owner - no - yes - yes - no - yes - yes - yes .. list-table:: Default Volume Types (Microversion 3.62) :header-rows: 1 * - functionality - API call - policy name - (old rule) - project-reader - project-member - project-admin - system-reader - system-admin - (old "owner") - (old "admin") * - Set or update default volume type for a project - ``PUT /default-types`` - volume_extension:default_set_or_update - rule:system_or_domain_or_project_admin - no - no - yes - no - yes - no - yes * - Get default type for a project - | ``GET /default-types/{project-id}`` | (Note: a project-\* persona can always determine their effective default-type by making the ``GET /v3/{project_id}/types/default`` call, which is governed by the volume_extension:type_get policy.) - volume_extension:default_get - rule:system_or_domain_or_project_admin - no - no - yes - no - yes - no - yes * - Get all default types - ``GET /default-types/`` - volume_extension:default_get_all - role:admin and system_scope:all - no - no - no - no - yes - no - yes * - Unset default type for a project - ``DELETE /default-types/{project-id}`` - volume_extension:default_unset - rule:system_or_domain_or_project_admin - no - no - yes - no - yes - no - yes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/policy.rst0000664000175000017500000000063100000000000024570 0ustar00zuulzuul00000000000000.. _policy-configuration: ==================== Policy configuration ==================== Configuration ~~~~~~~~~~~~~ The following is an overview of all available policies in Cinder. For information on how to write a custom policy file to modify these policies, see :ref:`policy-file` in the Cinder configuration documentation. .. show-policy:: :config-file: tools/config/cinder-policy-generator.conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4591217 cinder-27.0.0/doc/source/configuration/block-storage/samples/0000775000175000017500000000000000000000000024203 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/samples/api-paste.ini.inc0000664000175000017500000000363600000000000027347 0ustar00zuulzuul00000000000000############# # OpenStack # ############# [composite:osapi_volume] use = call:cinder.api:root_app_factory /: apiversions /v3: openstack_volume_api_v3 [composite:openstack_volume_api_v3] use = call:cinder.api.middleware.auth:pipeline_factory noauth = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3 noauth_include_project_id = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth_include_project_id apiv3 keystone = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 keystone_nolimit = cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 [filter:request_id] paste.filter_factory = oslo_middleware.request_id:RequestId.factory [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = cinder [filter:faultwrap] paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory [filter:noauth] paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory [filter:noauth_include_project_id] paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddlewareIncludeProjectID.factory [filter:sizelimit] paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory [app:apiv3] paste.app_factory = cinder.api.v3.router:APIRouter.factory [pipeline:apiversions] pipeline = cors http_proxy_to_wsgi faultwrap osvolumeversionapp [app:osvolumeversionapp] paste.app_factory = cinder.api.versions:Versions.factory ########## # Shared # ########## [filter:keystonecontext] paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/samples/api-paste.ini.rst0000664000175000017500000000023500000000000027376 0ustar00zuulzuul00000000000000============= api-paste.ini ============= Use the ``api-paste.ini`` file to configure the Block Storage API service. .. literalinclude:: api-paste.ini.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/samples/cinder.conf.rst0000664000175000017500000000103200000000000027121 0ustar00zuulzuul00000000000000=========== cinder.conf =========== The ``cinder.conf`` file is installed in ``/etc/cinder`` by default. When you manually install the Block Storage service, the options in the ``cinder.conf`` file are set to default values. .. only:: html The sample configuration file can also be viewed in `file form <../../../_static/cinder.conf.sample>`_. .. literalinclude:: ../../../_static/cinder.conf.sample :language: ini .. only:: latex See the on-line version of this documentation for the full example config file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/samples/index.rst0000664000175000017500000000056000000000000026045 0ustar00zuulzuul00000000000000.. _block-storage-sample-configuration-file: ================================================ Block Storage service sample configuration files ================================================ All the files in this section can be found in ``/etc/cinder``. .. toctree:: :maxdepth: 2 cinder.conf.rst api-paste.ini.rst policy.yaml.rst rootwrap.conf.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/samples/policy.yaml.rst0000664000175000017500000000515300000000000027201 0ustar00zuulzuul00000000000000.. _policy-file: =========== policy.yaml =========== The ``policy.yaml`` file defines additional access controls that apply to the Block Storage service. Prior to Cinder 12.0.0 (the Queens release), a JSON policy file was required to run Cinder. From the Queens release onward, the following hold: * It is possible to run Cinder safely without a policy file, as sensible default values are defined in the code. * If you wish to run Cinder with policies different from the default, you may write a policy file. * Given that JSON does not allow comments, we recommend using YAML to write a custom policy file. (Also, see next item.) * OpenStack has deprecated the use of a JSON policy file since the Wallaby release (Cinder 18.0.0). If you are still using the JSON format, there is a `oslopolicy-convert-json-to-yaml`__ tool that will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html * If you supply a custom policy file, you only need to supply entries for the policies you wish to change from their default values. For instance, if you want to change the default value of "volume:create", you only need to keep this single rule in your policy config file. * The default policy file location is ``/etc/cinder/policy.yaml``. You may override this by specifying a different file location as the value of the ``policy_file`` configuration option in the ``[oslo_policy]`` section of the the Cinder configuration file. * Instructions for generating a sample ``policy.yaml`` file directly from the Cinder source code can be found in the file ``README-policy.generate.md`` in the ``etc/cinder`` directory in the Cinder `source code repository `_ (or its `github mirror `_). .. only:: html The following provides a listing of the default policies. It is not recommended to copy this file into ``/etc/cinder`` unless you are planning on providing a different policy for an operation that is not the default. The sample policy file can also be viewed in `file form <../../../_static/cinder.policy.yaml.sample>`_. .. literalinclude:: ../../../_static/cinder.policy.yaml.sample :language: ini .. only:: latex A sample policy file is available in the online version of this documentation. Make sure you are looking at the sample file for the OpenStack release you are running as the available policy rules and their default values may change from release to release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/samples/rootwrap.conf.inc0000664000175000017500000000174000000000000027501 0ustar00zuulzuul00000000000000# Configuration for cinder-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/usr/lpp/mmfs/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, local0, local1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/samples/rootwrap.conf.rst0000664000175000017500000000040100000000000027531 0ustar00zuulzuul00000000000000============= rootwrap.conf ============= The ``rootwrap.conf`` file defines configuration values used by the ``rootwrap`` script when the Block Storage service must escalate its privileges to those of the root user. .. literalinclude:: rootwrap.conf.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/scheduler-filters.rst0000664000175000017500000000024500000000000026716 0ustar00zuulzuul00000000000000.. _cinder_scheduler_filters: ======================== Cinder Scheduler Filters ======================== .. list-plugins:: cinder.scheduler.filters :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/scheduler-weights.rst0000664000175000017500000000021300000000000026713 0ustar00zuulzuul00000000000000========================== Cinder Scheduler Weights ========================== .. list-plugins:: cinder.scheduler.weights :detailed: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/schedulers.rst0000664000175000017500000000053000000000000025430 0ustar00zuulzuul00000000000000======================== Block Storage schedulers ======================== Block Storage service uses the ``cinder-scheduler`` service to determine how to dispatch block storage requests. For more information, see: .. toctree:: :maxdepth: 1 Cinder Scheduler Filters Cinder Scheduler Weights ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/service-token.rst0000664000175000017500000002402000000000000026045 0ustar00zuulzuul00000000000000==================== Using service tokens ==================== .. warning:: For all OpenStack releases after 2023-05-10, it is **required** that Nova be configured to send a service token to Cinder and Cinder to receive it. This is required by the fix for `CVE-2023-2088 `_. See `OSSA-2023-003 `_ for details. When a user initiates a request whose processing involves multiple services (for example, a boot-from-volume request to the Compute Service will require processing by the Block Storage Service, and may require processing by the Image Service), the user's token is handed from service to service. This ensures that the requestor is tracked correctly for audit purposes and also guarantees that the requestor has the appropriate permissions to do what needs to be done by the other services. There are several instances where we want to differentiate between a request coming from the user to one coming from another OpenStack service on behalf of the user: - **For security reasons** There are some operations in the Block Storage service, required for normal operations, that could be exploited by a malicious user to gain access to resources belonging to other users. By differentiating when the request comes directly from a user and when from another OpenStack service the Cinder service can protect the deployment. - To prevent long-running job failures: If the chain of operations takes a long time, the user's token may expire before the action is completed, leading to the failure of the user's original request. One way to deal with this is to set a long token life in Keystone, and this may be what you are currently doing. But this can be problematic for installations whose security policies prefer short user token lives. Beginning with the Queens release, an alternative solution is available. You have the ability to configure some services (particularly Nova and Cinder) to send a "service token" along with the user's token. When properly configured, the Identity Service will validate an expired user token *when it is accompanied by a valid service token*. Thus if the user's token expires somewhere during a long running chain of operations among various OpenStack services, the operations can continue. .. note:: There's nothing special about a service token. It's a regular token that has been requested by a service user. And there's nothing special about a service user, it's just a user that has been configured in the Identity Service to have specific roles that identify that user as a service. The key point here is that the "service token" doesn't need to have an extra long life -- it can have the same short life as all the other tokens because it will be a **fresh** (and hence valid) token accompanying the (possibly expired) user's token. .. _service-token-configuration: Configuration ~~~~~~~~~~~~~ To configure an OpenStack service that supports Service Tokens, like Nova and Cinder, to send a "service token" along with the user's token when it makes a request to another service, you must do the following: 1. Configure the "sender" services to send the token when calling other OpenStack services. 2. Configure each service's user to have a service role in Keystone. 3. Configure the "receiver" services to expect the token and validate it appropriately on reception. Send service token ^^^^^^^^^^^^^^^^^^ To send the token we need to add to our configuration file the ``[service_user]`` section and fill it in with the appropriate configuration for your service user (``username``, ``project_name``, etc.) and set the ``send_service_user_token`` option to ``true`` to tell the service to send the token. The configuration for the service user is basically the normal keystone user configuration like we would have in the ``[keystone_authtoken]`` section, but without the 2 configuration options we'll see in one of the next subsection to configure the reception of service tokens. In most cases we would use the same user we do in ``[keystone_authtoken]``, for example for the nova configuration we would have something like this: .. code-block:: ini [service_user] send_service_user_token = True # Copy following options from [keystone_authtoken] section project_domain_name = Default project_name = service user_domain_name = Default password = abc123 username = nova auth_url = http://192.168.121.66/identity auth_type = password Service role ^^^^^^^^^^^^ A service role is nothing more than a Keystone role that allows a deployment to identify a service without the need to make them admins, that way there is no change in the privileges but we are able to identify that the request is coming from another service and not a user. The default service role is ``service``, but we can use a different name or even have multiple service roles. For simplicity's sake we recommend having just one, ``service``. We need to make sure that the user configured in the ``[service_user]`` section for a project has a service role. Assuming our users are ``nova`` and ``cinder`` from the ``service`` project and the service role is going to be the default ``service``, we first check `if the role exists or not `_: .. code-block:: bash $ openstack role show service If it doesn't, we need `to create it `_ .. code-block:: bash $ openstack role create service Check if the users have the roles assigned or not: .. code-block:: bash $ openstack role assignment list --user cinder --project service --names $ openstack role assignment list --user nova --project service --names And if they are not we `assign the role to those users `_ .. code-block:: bash $ openstack role add --user cinder --project service service $ openstack role add --user nova --project service service More information on creating service users can be found in `the Keystone documentation `_ Receive service token ^^^^^^^^^^^^^^^^^^^^^ Now we need to make the services validate the service token on reception, this part is crucial. The 2 configuration options in ``[keystone_authoken]`` related to receiving service tokens are ``service_token_roles`` and ``service_token_roles_required``. The ``service_token_roles`` contains a list of roles that we consider to belong to services. The service user must belong to at least one of them to be considered a valid service token. The value defaults to ``service``, so we don't need to set it if that's the value we are using. Now we need to tell the keystone middleware to actually validate the service token and confirm that it's not only a valid token, but that it has one of the roles set in ``service_token_roles``. We do this by setting ``service_token_roles_required`` to ``true``. So we would have something like this in our ``[keystone_authtoken]`` section: .. code-block:: ini [keystone_authtoken] service_token_roles = service service_token_roles_required = true .. _service-token-troubleshooting: Troubleshooting ~~~~~~~~~~~~~~~ If you've configured this feature and are still having long-running job failures, there are basically three degrees of freedom to take into account: (1) each source service, (2) each receiving service, and (3) the Identity Service (Keystone). 1. Each source service (basically, Nova and Cinder) must have the ``[service_user]`` section in the **source service** configuration file filled in as described in the :ref:`service-token-configuration` section above. .. note:: As of the 2023.1 release, Glance does not have the ability to pass service tokens. It can receive them, though. The place where you may still see a long running failure is when Glance is using a backend that requires Keystone validation (for example, the Swift backend) and the user token has expired. 2. There are several things to pay attention to in Keystone: * When ``service_token_roles_required`` is enabled you must make sure that any service user who will be contacting that receiving service (and for whom you want to enable "service token" usage) has one of the roles specified in the receiving services's ``service_token_roles`` setting. (This is a matter of creating and assigning roles using the Identity Service API, it's not a configuration file issue.) * Even with a service token, an expired user token cannot be used indefinitely. There's a Keystone configuration setting that controls this: ``[token]/allow_expired_window`` in the **Keystone** configuration file. The default setting is 2 days, so some security teams may want to lower this just on general principles. You need to make sure it's not set too low to be completely ineffective. * If you are using Fernet tokens, you need to be careful with your Fernet key rotation period. Whoever sets up the key rotation has to pay attention to the ``[token]/allow_expired_window`` setting as well as the obvious ``[token]/expiration`` setting. If keys get rotated faster than ``expiration`` + ``allow_expired_window`` seconds, an expired user token might not be decryptable, even though the request using it is being made within ``allow_expired_window`` seconds. To summarize, you need to be aware of: * Keystone: must allow a decent sized ``allow_expired_window`` (default is 2 days) * Each source service: must be configured to be able to create and send service tokens (default is OFF) * Each receiving service: has to be configured to accept service tokens (default is ON) and require role verification (default is OFF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/volume-drivers.rst0000664000175000017500000000161500000000000026257 0ustar00zuulzuul00000000000000.. _volume-drivers: ============== Volume drivers ============== To use different volume drivers for the cinder-volume service, use the parameters described in these sections. These volume drivers are included in the `Block Storage repository `_. To set a volume driver, use the ``volume_driver`` flag. The default is: .. code-block:: ini volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver Note that some third party storage systems may maintain more detailed configuration documentation elsewhere. Contact your vendor for more information if needed. Driver Configuration Reference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. sort by the drivers by open source software .. and the drivers for proprietary components .. toctree:: :glob: :maxdepth: 1 drivers/ceph-rbd-volume-driver drivers/lvm-volume-driver drivers/nfs-volume-driver drivers/* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/block-storage/volume-encryption.rst0000664000175000017500000001752100000000000026776 0ustar00zuulzuul00000000000000.. _volume-encryption: ============================================== Volume encryption supported by the key manager ============================================== We recommend the Key management service (barbican) for storing encryption keys used by the OpenStack volume encryption feature. It can be enabled by updating ``cinder.conf`` and ``nova.conf``. Initial configuration ~~~~~~~~~~~~~~~~~~~~~ Configuration changes need to be made to any nodes running the ``cinder-api`` or ``nova-compute`` server. Steps to update ``cinder-api`` servers: #. Edit the ``/etc/cinder/cinder.conf`` file to use Key management service as follows: * Look for the ``[key_manager]`` section. * Enter a new line directly below ``[key_manager]`` with the following: .. code-block:: ini backend = barbican #. Restart ``cinder-api``, ``cinder-volume`` and ``cinder-backup``. Update ``nova-compute`` servers: #. Install the ``python-barbicanclient`` Python package. #. Set up the Key Manager service by editing ``/etc/nova/nova.conf``: .. code-block:: ini [key_manager] backend = barbican .. note:: Use a '#' prefix to comment out the line in this section that begins with 'fixed_key'. #. Restart ``nova-compute``. Key management access control ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Special privileges can be assigned on behalf of an end user to allow them to manage their own encryption keys, which are required when creating the encrypted volumes. The Barbican `Default Policy `_ for access control specifies that only users with an ``admin`` or ``creator`` role can create keys. The policy is very flexible and can be modified. To assign the ``creator`` role, the admin must know the user ID, project ID, and creator role ID. See `Assign a role `_ for more information. An admin can list existing roles and associated IDs using the ``openstack role list`` command. If the creator role does not exist, the admin can `create the role `_. .. _create__encrypted_volume_type: Create an encrypted volume type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Block Storage volume type assignment provides scheduling to a specific back-end, and can be used to specify actionable information for a back-end storage device. This example creates a volume type called LUKS and provides configuration information for the storage system to encrypt or decrypt the volume. #. Source your admin credentials: .. code-block:: console $ . admin-openrc.sh #. Create the volume type, marking the volume type as encrypted and providing the necessary details. Use ``--encryption-control-location`` to specify where encryption is performed: ``front-end`` (default) or ``back-end``. .. code-block:: console $ openstack volume type create --encryption-provider luks \ --encryption-cipher aes-xts-plain64 --encryption-key-size 256 --encryption-control-location front-end LUKS +-------------+----------------------------------------------------------------+ | Field | Value | +-------------+----------------------------------------------------------------+ | description | None | | encryption | cipher='aes-xts-plain64', control_location='front-end', | | | encryption_id='8584c43f-1666-43d1-a348-45cfcef72898', | | | key_size='256', | | | provider='luks' | | id | b9a8cff5-2f60-40d1-8562-d33f3bf18312 | | is_public | True | | name | LUKS | +-------------+----------------------------------------------------------------+ The OpenStack dashboard (horizon) supports creating the encrypted volume type as of the Kilo release. For instructions, see `Create an encrypted volume type `_. Create an encrypted volume ~~~~~~~~~~~~~~~~~~~~~~~~~~ Use the OpenStack dashboard (horizon), or :command:`openstack volume create` command to create volumes just as you normally would. For an encrypted volume, pass the ``--type LUKS`` flag, which specifies that the volume type will be ``LUKS`` (Linux Unified Key Setup). If that argument is left out, the default volume type, ``unencrypted``, is used. #. Source your admin credentials: .. code-block:: console $ . admin-openrc.sh #. Create an unencrypted 1GB test volume: .. code-block:: console $ openstack volume create --size 1 'unencrypted volume' #. Create an encrypted 1GB test volume: .. code-block:: console $ openstack volume create --size 1 --type LUKS 'encrypted volume' Notice the encrypted parameter; it will show ``True`` or ``False``. The option ``volume_type`` is also shown for easy review. Non-admin users need the ``creator`` role to store secrets in Barbican and to create encrypted volumes. As an administrator, you can give a user the creator role in the following way: .. code-block:: console $ openstack role add --project PROJECT --user USER creator For details, see the `Barbican Access Control page `_. Testing volume encryption ~~~~~~~~~~~~~~~~~~~~~~~~~ This is a simple test scenario to help validate your encryption. It assumes an LVM based Block Storage server. Perform these steps after completing the volume encryption setup and creating the volume-type for LUKS as described in the preceding sections. #. Create a VM: .. code-block:: console $ openstack server create --image cirros-0.3.1-x86_64-disk --flavor m1.tiny TESTVM #. Create two volumes, one encrypted and one not encrypted then attach them to your VM: .. code-block:: console $ openstack volume create --size 1 'unencrypted volume' $ openstack volume create --size 1 --type LUKS 'encrypted volume' $ openstack volume list $ openstack server add volume --device /dev/vdb TESTVM 'unencrypted volume' $ openstack server add volume --device /dev/vdc TESTVM 'encrypted volume' .. note:: The ``--device`` option to specify the mountpoint for the attached volume may not be where the block device is actually attached in the guest VM, it is used here for illustration purposes. #. On the VM, send some text to the newly attached volumes and synchronize them: .. code-block:: console # echo "Hello, world (unencrypted /dev/vdb)" >> /dev/vdb # echo "Hello, world (encrypted /dev/vdc)" >> /dev/vdc # sync && sleep 2 # sync && sleep 2 #. On the system hosting cinder volume services, synchronize to flush the I/O cache then test to see if your strings can be found: .. code-block:: console # sync && sleep 2 # sync && sleep 2 # strings /dev/stack-volumes/volume-* | grep "Hello" Hello, world (unencrypted /dev/vdb) In the above example you see that the search returns the string written to the unencrypted volume, but not the encrypted one. Known Issues ~~~~~~~~~~~~ Retyping an unencrypted volume to the same size encrypted volume will most likely fail. Even though the volume is the same size as the source volume, the encrypted volume needs to store additional encryption information overhead. This results in the new volume not being large enough to hold all data. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.4591217 cinder-27.0.0/doc/source/configuration/figures/0000775000175000017500000000000000000000000021447 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/figures/ceph-architecture.png0000664000175000017500000006346000000000000025565 0ustar00zuulzuul00000000000000PNG  IHDR._tEXtSoftwareAdobe ImageReadyqe<"iTXtXML:com.adobe.xmp rPLTEe±z,ˣၔO:6zzz1kkj6@;"]X&&$#""+/-*)HD#KKKic&84tttu5gW={q(433cbasfCDCCSSSɀ[ZZ;;;[Yο3jR:,+ѷbԔp4`DArigtvDztTD̀\IʇǸ2xyآrYZYce0Z>W^`WW\6bnpgrhꠝKTU`__72 0ur+QZ[jy{vl'QXQ;jiz)z9:8*&!B30psoHGGWQ4^cicnd䜙6PK%JKEDJKxQtrv)^[XYVQQOߺvYVVCEGձrrs~q}rԾ=@;iifMNN667ɊcN˩m[cZEHC899fee@AA~OOM>>>ኇ0b111~~1. 883yxxnk.--~*onnN\匉&%%)((xuUxww7JǶ''&Ư! ʼ2Ζ}TXQ%-ihh0/.|mi)8+]`IDATx\T׵? NQY) 0hHAB%HH5rMҊR%'P4oKHA?D\ ɳE1-*V 3sf03 9Yg[A{#ZSqxăGO:}F0 8|(n/ tgM/ά *5gif3@X`gI+Ra$3!5.@t@x@E8.rgj6>} x:0%z300@[.qB1%mPIu%@}$Ƞ%]tIqt?pM2.VzHC<)4𬲁JyiCɽg .1v?{d/qbi%WڼY> /q[@l+!jr) VgC<}-{٧b^=0Qu*ܓBąEdGW,onA}\. F={͈{5im:x'sb! VgUQNH.ݠ%~` BBk^( ;B_=O1#>1@/Aq:pK焇q180 nuV9nU" $빸/A\ tq (D\xH (o#>U~- 4Vj:FhA2stC,Dz8śEꬲ'h5G8ve2HhA\Z!.PA~BZFDrıP%@\?E qe.&=hŸ8AꬲAe"! Uą'}5>X5)= 8]),³Co(WfWhp뱝YI=d[ %{HtkI,.PA{Lm~P:Er;|Et  xJT&R,ga>CMh!u_Tt$aI0Ǜ!PApswBXVB|n ňk]F!ň{#D,xG_)Ň{zo >ݕR<#9)d1!Ա7LgVx? xcz¡;𫖸cxT;FܲUI.>B:Ӊ{6 oǑ/Nʬ^8a{$P9OdlԤQSȸ1xz1y)oˋȦ^Q{j.n%UG+c珠1h$2\^%zmK8%7K%fhAgDM^2I}L`G&jcפjٕ1S ak$ҩIAqV@zuʠ{FJ7Wi"XvJãj4GwAK@K:iâIRJ6H`{*Do+WhIj\ϩG+#zچ^.x&{zF\I0.*P!ohSQII:Lؚ OgQPKLNf"s$NdO'kR"dj[0L/b wJM>ǹMT'"ްLjn:%B-(>;Y]IO΁t!: RgQ;BwDŽO17"Ǭ2ʮnXlnM*F+;%(ej v&#Û돣Mڙ\nWo!<jx7bT%yBJffZ;./yNZH5Xkk(f<=YqCfeF<.H uox-Ѽ>|VK"ӍڋmMBVf%kUvZU8`2BmSi׺Re߮3+b&o NU8&J"( 37e" NNT 9;<'T:饃9r=TRةx,2#^aOo1hk)u䮬o~C#*҄:V~0d:dS-ѪR[PIUvV eI_q˴7dWe0'AM# #J:i6BR~ОR>1>+9jT eF<=P߳{1{jԠ=Te .C!b~l(3jnLQVXJ2.R&0Ժ;!ar ) !ܜ$w0ǤMfpe>E, GDZ4Ѭ_l=b>apsoU ["҉VĚfFncltbnG31Tƙ<ۚc>XJ9rD}fe74+>3Cצ#xny:DD w2Jw+>-I)(GM]K[L[QյD\qAKByi R/ g:xkg9,ofƖ}^E$oU[աoLoE1wWٛ,7;nF&rw٤ITΛEёVJ/+wZrTP v2J=^J &CYL?D' u}dZ}]aPzҺe\Lڊ8?bYK\;G 5E6sP\3/'9WZQPsq{`65 ٤9#M*S*T ds8n㴹sxV1I]jNTm?KNwwCIsQaKQ $ydp'g9'8GdL&IgdX9:wY.qƋlًϠ[VѬk3O^*i{%( .5~ :J!՛#TIʏ5PZf4E_8Ivѕxduhhts]l) 0yt+fT"M;hЋ {gހ/D' *D@>JJ!mEKFm*t:xlixpB&(|Y ^ ٤TӒX촗H$Ϯ[^QYLn1Yqd~lݸ&G 3l6hg4R LݲPRUN (!MX$?ӈUizl/}r ޏcN9ȱlORқ|I`([\v #dPee!{o8F02z0:r 3Y>ber'17E׻ӆ!tWҫPɱA??'yB7Vxq NS]^Q6HxkDغX4UwǂR#ommEwǖVl˨ώjٶmˢ:rl4 bT+k*%&mMDz&9ɪhy{UPOL9e*Š8omnDCnPJ _Ϭx>)OpƟLNUU#Н$ + ӡѪrWG7 ׅZ9ժ؛ēFۧ쪢IRiN{h4+&o&Vۚ.24ڕp''CV bë&g@jwSO((O?}}E԰8}QsfxRѤ"k)[)RZlŶD@w\6z37 ER :1P}7{OMR>S3*Cu!6:XWZX<S!pixN:z@|(&#)pLR6&MvTcT#<Ŀ /?!SK"Z/2jZ?dVO 9- IdG,%B,#a2>ʤҥȩp<]rSz4)^l?^l?ًD4O4![ipj5J, e(G2qD[ysrFXVUܼ>ug y., Yʍb]蒑RӳEH`ɽUɛXlr g23 țdm/ F\Q+d9or/cxo),wjۼf-#G1i?CgjJK,FLCFĴyf:"NwS/pէK ذ5N} @+`屭\KJP8\oPi2 "ećTNl273`6inid065 -dO>j6>7v"XŖ ɵj띔ú<7]UPmd`h?~ NFVg0)Yb_Yifh=uˢ 9 #ަ%-·=#wHaxɬIgNh&kr&fW&Yql(̈́LV_)L37Uf*gO3KbTv%a'}V?i)g3XBk"3PD|F9ۏ]AnmD#7fK,[&KU-32n5ylr%øIp.-z7b;urnLߜ:!a&}a^/$8ޱx*ߦ\#xL+;kMg|-bl ; 5$g24\Ǜ\ldsA{K&G}Xf6M~܎J 4ACz%J*=  dJr|k_\3YkZrdrUN HIp~P}/NWӮ Y+` 7dak/rm3i@sFvJ)nQtG\jvȋwR{^xxyq"M)+ ޥD<^?l }F,7b#bqY#r&PdF,xbB>r=E8 AEұ9G8KKz#HƤ%1P$0Yٶ/sd,R鋯RvyZ1tHRGWDZfV6Y5p9/grOT/?OcM?-\{Ϝ5&YNj=tEZKáDWz;T|=K[l_;fbDݙL0})%9b@f:O)ef/+Bd 9Bb4TWH~;JD4h|[UH^6RJFX7dɥX7dJtۊ Fpiğpϴ08}^&>xĪ;fixќpsgǺYuǔ<1EsVI [B ;&C%RB,ːĐ9±nKHb9"jcrk&f&\ʋ#҈ˇO*tt'[Zy,j]^Tl>p_R-][< ݎʿC2\0E4dx)okLZBlo1orɲL.-43˚<{33Yq:l7YlcY)k¥2ժ@'nQ1"D!Uuf,b0v)WEp2"9N5822R:aR0p QŲuNn8Q$:bOM&bb399əAKMؙۙ-Bř{ylV!*C 4ۢƲ ?3mOBmib@`[aXt ^6J/PIy4;3f塋OdL^⨌^zM,>mc֖qݮ9븡eZ3kȪ&#[CC λsQLȢقNm>b~ZkX!M͆TR~*)KY?wqtkr/ dۨ*٠⧵Li/LVaܨL˔&K9E)Ɉpk2(;|bjU&M{>Ȕ!n^!{WtN.Qw aFU?zsj-&3L0QU-iӼ09s7ys(eelFFv%N/yGLrpE!lF+ZdO3G=+k ˮWl7m6#غJݼ]z-JKj-6/e-3bLu .G;m%Ed&Li 1)g\]!{GoNem0{vE9)*M櫻0yBv8Y:gHq,WI:ۚcV x7ei!D-mҷpX;iۼ$GLVtR¯x'rfJo#^d ^L6kP1Nf_IwkHذj%LKW 7y1Yf6kuP&-_eLn3b=T,Pި 2)E#UdYpѷIM*Һ[o{_qf׺4`dM[dHYՠd*iDb&ZK:,ѶY<YZJR;EfgƺPldvY2Q}NA8#\fVXޖ*f33 '>IREdّw(f~3B6&Od4KB&k7/4%KmM<ř$3/eig6dv~o08N`;%|dW^bA3;qhCiKeH8j*\OZ-u;b&Z %;9;9VZ&XL8YіZ쟥Nh=ch([8s _nggfWC$L)F.\G;6kN6qY1 3Yac2ɸd +{ϱ_/Xnkla+lNYXc̓j#E1pT%bnAdɹ248K _:Ǣ ı~*qK $BH#v@Oj/PdIՋ6&EE8`C&WU6&Gf)a|B^]q(*{Ɵ j['Nn5tV@r: \ZM$4ڥm=-0)^~t=qυK6(Mr`gWyOZt>1z"Nj[AJS7_Wÿ7)Xnt #ceML ӵVx#PpfO^A-K&~>%޵STWtz'|dF]:c혟ɱ7Gau/o!ތ7@28Pz\twHWAdxX[LB: =ͳqXg@| 5@\ᰎ`I_*̿P2j=slr"Mfx>_ s.U}VX?'2[=Ǡه)v2dtEgU)Ȼ@ z:q;CuJiV"k'lӺI@Jv/هx? xܣPkj>VZ f%ҽ5َ˙BU$4Tw+Kgbb8%A< 黲ta +hw]ס 쭌҄L:qMӻۍēYr N2FjC\GfLF ̬9bvf[0L/2N? N6+/NILLIJA;n94s-!XMFL㳛*׮P!tHSdJēQ. BE@:Y8]WI F+>rqV%C^)HC~V"`+>t"Qv"( rhmNw@GWYǒVj]Ht̛e+2Sē&ŏP(= #Nq)EDŀCU Ri@^h|`:O4W}b ّAZR]4k(ZQ3s! 948]479/.4Lo3G`'!ޗXH:?$ Iv\׎F7M7LPfs\A7+E1Je/c|t%Hn+=NdTX%^L͌"! -C{ma\zsZ|FD vGO4+aúPb b\+Tы{IwT.7hg쪳@dIqzHMzsHm[YKf2~ F/L2ϡ咷I=_(GմXivYA6%C}W;;8ھ5vAvz? _^0]:ɢtR=SAF,6yY}ixDLpd~u[JUp@c;IZ sn&T]vh.tq3ܻPd?@k{:zIC5Co8cW[[Yc\)TX+޶j27z))77<₉ZN$/ -yZo/I0VyniRI)YӅ*jsȡ;HN:J3*1?uNAQu|.5*%+OۻL1XB<ߧGw9u"Rç-TϑvC ~Rɋ/_G0a -sqɑNΨ R\I M{{k5[b! 9"ޏsy|(CKdSG  U:I壶¸&}Lu8>/vDeĥ3*tPeP&xou_=[_yC0̔L=鋤S0Ŏz.YG}B\Vlu qƋOrU_N.oPT%i!=ѣz[>B ӓIrk n X|iˌ>^#[`%>2?^{eҼx3N$ ULguRC:SVrsKtAKC߯#qUVf)՚z2#l};+Vӻ$.7wzC+F?A//RQnW' UVez1_EqX]HA@C&$Kbq0/EeZ8jՌ9!.x]ssR߼L܋/ + j)/[D0!%yA| -}3uowȐ/)Mkfx%J$HkMmbZ#>JBx.}L+9>8#5x^h؝3cD\Z͜DJT [9+crSb [b 鈓;2*\.<ǍV3RLkCǽdn{W_~)ż8FZ+詉TxtUѪhx6 ^|=]Qe.%ˡ$. =g uŲkJo[w(K 摸ۑg )ss BNt~ oF|A\zxxu(9ΛDQ)TJ,K\Oq3>nE 'Eދaغl>PioǶZw؋2qq3jĜr3uLG0ӻlusj{3^ϐ\i?m8&ů%#K˃QK^żx 2 -c\/6M'YFm>e\.Kh47US4 ̋DкΜ4/Bl(t̋'KT?90KH'EUπe0f ƥ;#7VOdID :qͥ^ͪ%VݛّKB|TsKܦĻ),5}џ̧L<*.yɯ?6ypy韵jv6D܏yt9b^ >!/>YK4kEQI&h#ßarBD3 S8@Xpn^miE g!vOkkeN3ᩂOFkiD;U"5QiNiǫːfdiO_bMyF2[Œʟ[GOX2beJz +]pˢ .]T_ k'@}E% i\9}/rpxx?O:..CO>JdOq)|(֥g5:@9},i9JEăJj )'њ1'FE2Skbvsn TJt :s|J:d'/<'Man-R9Q) KzvÉ^M9I<2% @\"ip.0ҤVRL|`xY#\,TaH"e79˯9W 7lx qىj #Z$?z{IM4on@iCF;-*rxD U}=/ᲇ>ؐ.0y< *B"V=և7yz9_+\Ć Of|Ǿʄ0}qO#KDABqPF?XAr"+tF16@-㞏^EOЫƘԘfԶ^⽘XnYgW  yhXdrF7l؂>V @W7o6$WE\jxk2^ȠFDvyψˊ_zX ކ2BA}Æ:3*??B&$!=a8Ar" #nP"5?^ӈ+&m{N]P6?Piq^}YțMO$DwSpD"O8Rn(Ta^=DoN67_y)rڋ1OX#ϫ^%:o6FԨZM3k1=CͨxS9盛n#&PvUX\ ScgTedW &2 *Xj쎣9cz};65&^|fШ 87 8>yUԨxrGP:_x3]) 1sث nDn)[ &TBoseXӦ0(A~bbSE^츨}GGJ(F<)Yبzkil~&3*Y< /Z+S3#.^IK@]}ktф?4Ad],T 3e4i }cjeeL}Nڙ| Qѹ^t ;&*M:;:|sr) N38sU+Y9? hVv3Bǎ, Ppu:qpҍK47(ȪI>i 9><[5/_v V^ aQpv ,n x w:4Q+h!e,e ?AudP熟_ALRq"^Aܱ N"*&SXx bD9 CC ^ \/X'\=l`FaT9ʷ/7L6[;dz!T6 @ 1-NMd%S׮J'b}ЛܙLf=JGKm=zQh#IK Q,@( *͉a;z !m8D`(tlQxB\V4AoLICG' >qSR8~h $$yq #iR' I4bdfzVq`H>j#ZrJvh Zā ֢A83wN9; 86t%08Kq~PFu ub3@RrB'g ~th&#"su UPp@AsE<!,31I"*ph \=-9. \i&h[q%X[Ifac&KFL  &FY:tD\>i8C3*(~BUN H6Rt)<-U( 8#͊4 '>P60AtH>6C H6QBTbg z4A ]2B8:ҩcHzn?A|W8:u*ag"lҰj0 nLE+qyȦZǴT,#nkkSbnesxA&Z1s#=h >B7Z5$m٢ 3qg3SXҝ֧%@ue\Fyx_ aQC o > +|a>CMu*J.;B_m[帼CX.wt*E2}WTA(*Bi1N'SQ =Sʨ.J8\KBd"@hI΋Op eqXhdLPZUCo$XA|_^RSW"yK jV>gZ^ׂb&#R)2jYɔxdxF}vߏ/޻M1SV⑩B\){+OoOä!W,;ڭ[0`T ZIWC=|BT/2LP55)ć'(ؾV7 *M2C1ɄEJH)+$$,TQFӦy}NvGq"snCۖO`Z$'t k?2?mqBZlR/?A|{܇-81QPZ ,I)`\cOy;-T|CmT & |Q'=Jj~"_>?!R]S/.\8(Wj{ni~<*H[~;LZ_m_Sj;-d6,SYyCKJSvMDy^Wk#@iv}jD-h0Q3j zz 76΃$#Du+ \LgMVE8xYx`S#j[SXiVٺ#qgݟl#CEk@Boyv>6ZT0l.'yA_zh?1KDW7' 2MREԑ14QmC="壉[}{Ľ_~oK-5c} 7`v٣J7՚7aR/]ш5f}S&??}Mː M.?2-*]Ew%q/+Eg34K 4?gK ?hA|2*;/i*hn /.)r9ăd!n vQ[WN.r^܃@:m?_Wg=^|#N :OXhWfS{jtrv][qڹnA-$`0by7(U9-NO xG9d`L6Z-~}o/eN ;V{ G|ةy^f%d_ƋxyFcR9{'+/S857fƝ(ݴdT܁ًHx4B-wfƝstmRxz4j~t\49^ܓQM7( <7!q%_8Nw'ikcjXبM1):Zd~_aCCZL.} Yğ?WhhL%ۏ+"{]1WW·~ kƯOF~U|޲;xˇ*bG/hV#6nR_Wc^Д(o#0N fMFh;|8 p[3`}U;j)h۳^@A}@84"0Y`6 Ȉ&_0){_6IƷ57⒈#| EJzqՙ݈g[5쪰^N~'v zD_;qS⻷=v1 >nƆ選Ef&▮0Iijo/f_yvlli|, <Y5 8ɩ͓xX{KE3CVɑC6;Hy8 ?u"5ۃ;?%R燫! V4ZZ5"TgjʋCص`Hzd?Onl\~UѥddK">)M!~Gzjn߼y#a{nR|f/n`Xk7c}?*y;ğ:&o޺VT`;eǯkR==_>?oM->b>7`H*~& ??o/t ßտ{}Aw₮$UֈGq[f2q&2? ~͚-[(Kϒ}j]q6XPc я/4"4#>{xyٝzab'f=lF+Tۏ2Ajģk]SHFe&!~mBSzXpN'_(6NfMbcJoknKj_3B&QѨWELR^+hn`DrIJFlQF{G3 [i~ױ#~wS%RNC^aqsGآ6 ޻ɤYgnMMbo2mmzI+/Mjn%6W jXN`ڰn!~3ыgl  >`tM; wl\WOAG;L>K\_ȰgY?Fo6kil1 ߜy?j=/ߪn={5X PƦyWW O?CO=}_+xS{O\Ҕaq?n-^pp!h}|/>淃կۊQcI?qn(F߂e;Tյmg ~}Q`-L?cgqWn\q`y ?'-^|ofoⅇO>yX{kğߜ|P> ?zNǏӏ{2|6V3_"J]9)#iH] }tÅpB=oYIχ{Jݳ)/dek>zmOB`t@OCu"z77?z;!3 O4_e+ss~_ssӼzýƏN2M~:P~rtB`PE"FɾQ*kXE]YS3p{QJƢ?O2pً tO8o _[$ !?R Z-nO<~}ܣrsO䥗r_VM|+/r}8_oseC hY}ysW.2._KΚn$^Y^*O5 ~*}7*ް3[v;śĆDGbZ qc<{/_ţGs?' k_;Cَ""˱نZ8ſE>_G^x;,=P"#]?{gq *jw }J3: Z{‚+|!e{X^*>,Ad80}I`ɃK+6 H3p}$&ޓ8mMy'$saS#ęeN^~]CLű_*vӟAOu=]\i'o3wi_u+?l宋YĿr9}vifaR^;z(dqyDNm/ I{`T]*^'/91)v U,L]=ȯ3URuqq{N.(溸_ʼn#yQ%gŕ.X+zWrCm˴vk].yۡ) do7n}l$rGC2/@!#۾LF.^ሯg'+FEuݓ5@ްDܱF]\T'̕4₻'!CzeN=#JEj3/88!~4TĩB.MqWMb/=''j[h/R>5^*\uD5XqS?Mm_v\bl #0x;OC`k47imImvacQjRlnfi}"hV/ r;Ug;ºf[;T[(,nC~f8]RʏxD6Iq~ 7oSSkG+tZyG=^:X7ޅC vmv)5X{f gSja\se kS CoB/76%q@G|cYʟ#gT"I8DH8DH8DH8'q'q'J 01%IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/figures/emc-enabler.png0000664000175000017500000012424300000000000024335 0ustar00zuulzuul00000000000000PNG  IHDR#wsRGBgAMA a pHYsod8IDATx^wG sϷ{wsY޵=~#Bvw  +{nhi4^#}xV.###~UksX7~7h䤯.}>00k\\F;m_OI_KO |5>c焯鳕+ć+eOWŇ W8|)q Ā!2<*.=*|NZ/ǵ_{krؐq #MCsd-Ƴo| _M#j EqmP ??l_}?FO)?vq#Vܔ? ?4<*b4ႄ,1T9ϲjY4l\XT;x  ,y}s;{oϬ=gJ3O Z,8vO'w(|cNO\K\'wY~/}Wz,gkV「AYfo_ Zuob#6?bxk[aȵێơۏ #ㄝ&G9#IĜg͆Pd7輹Λo9?qaj_1`?8ork֏Zy᱋E-> .<<}x{cdz鱦y!a#Ldyɫ{up^->*X,4] 7p޾` 7{z@^3{ /1xj 3ɻ:M/v.x=I׃1hy=Gm3Gny`*7O75t#L&C760]wmx=p[,^7_Giou^~^_^! 病{x~Iz9/v_~R]> /pAǒ';?Ѿqz|0fW ڰ^.́ ޳B;H{z}t7BE#[IRsOLj z% 4Ż6kZn ג[|Nnm՘-G7%r&p ,yG鰵Mifc,{{pY;&?yYA[:z$>.op?qD/9<9oO.X}*{ͩC<&)0C=7eصp*Aۂ^qbaqM_gWA ļTL*Bˋ*z%!Dѽ&mg A»<^!Kty+H| n. 5c\3[xIX@(Oƙmڮ`Xie.=å'~wiqtpqcng7ը u}`ܩŏWoq_أ>HEܷ"_.iՏW~KӪ, @?\K%~x%+=`pϪVݞw'oh0яksJKo:.tܰ܌7_F>U;wI,%CNdXϻ9 W8M#m&o5roİ#Uzx##9!#?m#szxЊC U9lռv4bض67 wq eB.}oXp*FIܗt_;L3,xeߦ2 u~C#k-琭\e)8FaB'ŋ:/zfAku[RG& w^@7*.z z <2b0_Fuz-.ò/Cߜ@w)])a}]Acr>4;^]g؞wON=y{mwˤmOa]Fmh1r=U=3,%}/LW|Xo') +Amo!,- \oQ<ɒg,y+޳zr@?p+"ߔ7 %?O7K͈϶?wXxժO7ZO{!b gESv@8wErsiQ'Yb?lo)@,CCbzZݻ>鵡oM}{ns[ߞ;կgy{8ЯAtCQ^( _w(8xw:fcߞh_ܼgpgY@_/g,=D1Șt޺@śn>[\s۾R3]5 5qf tiZ]B]ruŲr2e)y0nm=1 K)1_C,&xjdyͰt{¼A qE1|`pߟV^؂}>yC{d`y҃!=hg;sgo`LeN\ V`J/t[ }S};~B_K2x7Nӧ ȸ;'`W? .ܧx.?CK~B\{K%?p_?nrïl{vNi9-ݣ\>w zޒ=a/~mz;UreT u(2'=K˺uuu.k{tYߣ3xs[{v޳^w켷g}Jׯ1gg>^(N5@UۡN_ףn]`+v)enA~{ 0k/ڄ_틥ۿX; /ܘ_ʆcدρ (.T"p S6ڀqa^RAWV끺$]sw5%a2xuu  EnUAZ\+ YK zU!iFt oh$.gKݧnd֎q Òw}a>cog,-[d’&55+E?ɎEЏ7qBe/⇚+^OU[.~! ӒFH\kxս~DqczV};- gwI̢]rsf1%\^DV9?xaL^&A`vNe+^8>.aYe:quk;on'~n{v;ӥ^~CлAxp:U䇂ON_ӹj#mnۛohZ4n13sW'b,yl\keA9|Y˲s+0?١vE>*|M!B6aC?&G֋%Z;@?~\}Uzws] j>rOD?~X%#o]~Z@{>*non}v ПWy-H[ {.8~.mY7`c/lSGC|"_ x)@`zO~?֏8s;끻r+;?tGwֻcY;VֻcuעPs/] 1GzMWֻ^R‡o_P3{3ޗweЇ!~||8 ,å.,KJ^_BlXl /yHTʲD6 FcH'2s ׌嘳yxxL#Ȗ׉Hvsrv_\sŇr`̟+. Fc,KEe c}^ _׃A~p%X`\=ǻ/ vy,t6\v~, F@ٞ6Y4׏9C {Dpu^K|;A^לUoD i' 2pϠY 2^/Gt%X&_6/ h-ij)+|)%A[`OsdRH=tx5Sfa{S87t_\1x_8 ȟ}u*yL;Mnj5j׽7?zMg>7u?Qm j]>^s@`կ1OG°w=^B?-_ǯ>В5q7;76Go;o7x%c+7 nFn8ҽPAd蟃 =iz/6_`ˁ9 >VE) 7~ڱ':RqV?Ω!{[}ےo+߶|<ׂ n+eݶﶂn˻{~??̩u8.W ȭdGհbe>|tY _]r~lA&ݳ yGzY 5r $".xP qKrŮ/s0O'"JplVxn2 e`;Z2x/P[jR? .gfCdћBo_ #{l- `\@{y^h@z%Gϛvw/ѕ@^< KN;v0Ň4ps"F?~mū}WW/~gCg{[~F~nDZu[=v4 A/dE\䅛 -j j`?WxMnIuRi>g{{4?PJòPV>^ϛ;!Ud[E?%`U!2ۧs{n/ 8?"{0_CZ1/o<[=YW#`wk|fVs=7dr|gcOBAʖQTDŃ.s끺$ .3I\u^.5>&pY|_̧@x`[ϣA-  ^c%f>; J5* ^Ze7tmkAX|7Oo"j֋@+&[6R2AkOKUG CxG?u}?۳U4k䗫~c='wLJ^]y?AᅀųqZ 'փS6?K7n3(|;(`4?W༏}onҔ)3{?rόGhݬf?V7-z잒.ynuV>Ygw {p_ňlSdeO]xݢG.~nXG{GΨWwzS-دm_C?k~a?x o9}&8b@!+Zl@?(w*VJ!+ܿZSմN_I||$C,/^ YGëA+wa7?GܗE q6 %7. gKhOzw~.gس=- {z3&aܿgVAތ{bw:oB?>h4˶vXϞV2C?d6~>Če2~/.Gpv0Y2oAe/.#!Gs#>2^ Ԓ#G\*l+^pI(94e,sD khJ)c&cKC}>-^brQ$T|ҳ.a=«I`]cw_ݧcJwxzNuN|VmhFIKm^텿v<я_B {W~D?=zHKn/_t:xx7xq+&J?tχVٴ+`1jֵ -ݰ鋶=JCvd4j=WύZbr!3m~y|3z9>{Ncn^E{ =ۻ?Jjý~kwͩCPq{?xU'~&Vox7%oJ$>Gy_^>(&L0،TO_ZƏ, MR^w8 VqJ}.K=Goj &B^B \(: >Z@__PW‘9{{us~w/t[Th{^}mXC}}RL+ӕ#?~;ɏHÒ}bՏ7|JяK~|w"DU.lϺ~|Wr/udWK+hX)K0bIK:%ʝ?[2hד۬z žGOF!,Nw xN.ƹf7xqN5xq~sx1 |a[|[/{w_>3M'k \;-y7/z77^yㅬ/̫¼7_Y9v ܏}y{>8ٿ|J3!|#Bޡo/C}"M4́s-;I;9ytO?鯦{S8bio+zC) =<\̑sDm=nz{VS*>q:}|Xo\+E_ǣmz\گoǏo:7rt7؁9 sxWg1=4m|3a8?( oNf~ޱKhi? KK{u[*W=:wMEc\ %wTzWq>yx11|xJm n[: >Bx{:XڣG{+C#_up"-|rXwX֞6x{gјM-Fnh6ly{';<֮EVEFڼ_,u%JC{A!nՒ ۃ}~K]=sK==W֋g=׽3p= ~q~FnBhD1~|tqȅ{}&&Ku_nn(Y6oT~7,jްw|;Z[ڼ[۾M8xewZ;%Srn6-˦~W"n ?rX C V]s:Mnb~߳W&5AL~G%+}zGs+O ^ n^a xydw4=|-x%{p.Xά9ETGz>Oj>}ݵ'Ol,L|eo}`ZJ0ιpA=1/g=1ZmaYo:G6qalsݜ@q?CSG}6E:"X}_<ԑǦ[1V ƨ7oD\5F 9M]?G47rn3d|V.uX~ͬC147o),6],Bmܡ=?qȢ߃qF~Fsv][M ~/X\'ڗ\ߥź[F,ryǖKڷ,wj[ڽ˺& KB_<4OiAx|- SKaw|RrVI(vyV@eVX #ʟ p | ͥ ׃I^ }\Cʡ1u%9%s;7fx3|3!?3͏bmk3>K5᭽Yx0>~' (/?C+wix۝~|6Խd$nmX}Kjt-n;}a`.n]}6g4Bw8XNiZ8hcН7L*Z +hٓ=z6}tWXPDG6[iS|ᓆ@x1>)((WGU_Wz/"=תOskIc`jzqޞYn:4ĭnn=j#ֿ7tmÁ췪~/v['KRЯ&& Šօo>q1n۫~qO?ձ)/&&\ne/XKW{7xwGs{[K~~9XG۪Ϣv[rm2gVl;z>rכ8U^l%ym\ Y=?ޱw{v9wdޗ˪:u|?_6+(e\/w{/`Lە+f.9}7W! ?}f-=>g9Z>, 7v>lzmY ۈQ0}R]3ŇmmBY{`;"^- ⛷S7^ +5Vw[ug#\>"J=6s)ڻDzT-q-oX,}(&bbqBʅO%JM_ZY/+Vi{[cG⚽kol ʳ<m}{K{Tٰ sv]&mv6lcO7G ]=ySE.|~⾰ Ӓ?__$U~zH_Gt#F?.t[A=>}GokUw{STÏ'Z5aOhS[k޳7nJʧ iAd_9 J#?5C9YsW@G{XoҚ `8(zZߜ;>JLQ_ߜ7QgctGE![z8~ޡY{-?{w;Oe2kϑ{w ,lWҫ˟<ѡv%;,im_|ExV-d_d˂ T} q_ćĪ^B0i~1q4g, z~ |~5 !я?mM f#4ǧu{ƍ~dRx.һ/],wy{ާ}"_٦M싙GO[7q/} )yN]=k~*_o\YV?që a<*N'b7xJ# SՏKk:]YJ;!KQ6ǧ+#SҎ"G1Yp46m!փ>[X1>`@O֙b~Kҟt9/M)= Zòl8ن }~(3ڤ]e3~vǴ]ζ㶴Z3p}WkK˞ dۗ<ڮG'1,ѕB?u}-V;=oB?,k0~Xk{=%O’%?Sb/Do#A k~X{$:Ou}Wy=NoKݧorr_K7>H |rߠgF~!x?ƨMЛʰ7{WYTXm!ѾM-nm6/F@H#2X%?ma h@Vd61ZP[=({VmRaZ ӝoI>O:/œ>nB\~*!NϮ7&\}\GWʒ״¥X luG):CֶpX?wu`j/|c1!Y6n3Ǝ6A>"s?0E"[[y,X,c䃭|zdNC7G-y擥v]я7x_>%@Ove.Nzj~$=_ǻU ?FGw_d'2L )Ă͟HܯU/L/KQV=&Gt&7pbYʮaO\_fi[Ngًuӝ~kE0+C+ XڏQ?V h3pVe/t_ls':=&e}#O*_|Sp=jTrǿx }Wה $ } }AK {7}#-%逶pJGN(D.Swq-.WY?; 1ƽo$Xkڵouf1z1qqڸ=+qǛb.oDV&eㅚҝ蟺vwMDWkh"&*r?ߊXQaEtћۊ~Y s؏/yq7Mk U[Z/,{. >lɎw(y /o*4VOؒ__-5D?]ߤ>Z 5 };@y%@+ANMiA|m) \2Џґ0]W"WHOL(V.:<^[Z_s?\s/o% nq/^m{_i[dBOy=}}mE+G֪ яG~z%w@ݞ~>o?@?eWw{C? -V^ *K~jɏV#CވX7/ox>qOyJߴb-H )z+>"lhF;n q qrAsܽ YhS=g7FM/w6đ?z- +na%%  #XP8Ts{ w pFӃ-=xÃ~7Myya^FszMG#JgX]~soϯ{\z{,Z*X)VOt G3%1Ak1/qF}aЕMklxݵ>Ѿ艎%Bw{ً== ^COik? 4a|K~nO%|T}侀}yǸַz䒟oQ_> MƦྡྷzks-H6 8J1a6?J U Zdv|_UhӡL~6~[Rv+0ݼ"n7{f|[ q5: b.843Zoy pf#E1*|+pKF# _ߠ:>uO_5J|˽Hr^{bU{X~2ϣWK?.{v[ 2 4-LzZڗ>&X |a7v<#Aߏ~#0Gz,k>3~=^Xgѓ~cɏw{ \g{ ^L'rL80~!K~taɏu}kkH~r8xޯ!|"/$m [z"|fÛ=o5xޯף^'~-fWxb<ˌ/oĴ e1%s{~_5pm|| FE`}E.Sy 0F8ztN<Y9;j/ze$٦WYut.x15k;Q@^ o8Uxp"śo9Sl)x%[.vNz9GeLbftabb U=aR4GjڸkM,&-fo32q%Pψлl6Nw6Us_}XC7fn_zl6&%o3l6;-AÐelvFx.V}g&w2l6; <CCf3/:89)-Y8[_z\+Zeh(kԪ%;0d > '}q+ߴhy|eq%.g#'kԪ%yɳ 35~hg<2WґBr߱ԶUKv0g }a@UdH!Y&$[LʤJ\H`PɃ$e@ f>؆*B$&RcHI2<6R#_waQ3T I<7z?sкC}_I-6_^Ej h?a1-QEA] "y QFnT`+5q9*;1OJ=`=[E rVꢠu5Y: *9O'tXֱ1Vu`'U6WA~:a&ϡ+}S7Pߋ+q!uB ً17 @D䄨C5]̽fcHA+>Z.WW#@b `D#1>;A[6Tq8hy8;WV;{}-Eaoˈ,7J; TU/Ine@{䄾<ΐ=@f@%s6 FrZ50v$Ua`;یffA4 Ӗ@ f솾&MAoJ?65ɿwA_8$u [wpu5ΧrRУʝeH1$HyM*5l^wncNۍ7nx'ŤPj=}!u e܆%"]<}%З8 zO5qF(^YsWI oR*l3EI ,,< W…U| -^|/xkvcǒ }B9W]yWb׉S1 +1k–{-RybQ'Wlޭ abv4(HItď>>amDLoS,6rcQR "LѤ7ZO'9l'0q TN_6댨pIё0 $kVUHFjAҡHdY/iWau{ttLM #e퓾_rKrF  [Ɓf_xBNC `*&^ E%fm*levcĘ/[دvAF2C$ )|3ǕhRloaZduo}N0ӹgʰM}}u6ZwVkGgYha]a6ƛMydYĪ<>m;8'j?xY?&}8 $&s 4,-&eq%.yl`dqgAjx`oz VnG*ya&}=A $oI򿔔s E+S+q{ à_ΦVMo;vw^|ػ Iz3yПKXB__8q߀'u,%C)J{ àgS;i͘ɳܗ+}ᛂWHRGM$i$Y&$[LʤJ\H0$PɃ$e@ f <Ǐln0=JW%OlvjZ4 ;6jD &q/ ~[nJYԔ$hJU=}WL|Jq/?ˆy, >ПE+?gXW8{3Y,+}A>A'10Y,+M˥%G6gX4Ts\-9}e>bB?⏨>bXi0S>`wO_zkŷ }JG@sqF.CbWПg=ybXi8GgIoKm׆YkuQ,"97u{<_5k*j3i bXarC⛁ +WC.{^ bXa _H&F@&seIr\W }ˉT∧>Uw`XT+}>WYˍ*nLțMX%",oaFU{M]bXB#CJwxk I"X<^AbzX%7fՋe?:bj^ՄC w}a$oaXUuW}"JXҿ%/]3X,+U>,?7TCAAb]@OK4GrbX)C9`/gXR-g{?<fX,V q[XЇHbXՓi?2#r'?Z0RZt m$#JǾG ,VҥKro &wX#w!dRL/_ɑH}_P᳼`b3 o@dJ1]r%77Wn"mTAՂ-?*<ƒpM_X)&&TܷQI^U\@WWHBu Zb]'7oYn|# ITE۷Y/k| >W>8Z_ }킆X۾Vm6\@a%$~04`xVd-LzPTj_PG܏)@@XÒ#E҅ @ cwئ7/\,VM"|¶wèġHa"U} \Y<+P$p/ G/~PkH/6#M `Dp3`l밬(#YDw0@A8@b\pp+RAK,b(H R8J\Vʮ)dMycSD3}TwFI^UG.-84v4B/dXG ˤKruЛ"UҎu(R1"Ͱ _X`GO1;RIȋqBLH5t[2J,uVҝ`f}hTHvg_PWq ǾW 9B_!٧sWI$Τ EPaЍ7-Z$bD*ɑ!c (&U%m+"AVۢfߚC%cuv?K]yAՄ~ /<<Ї >5ի /z3xh󘙝a?İXUxbzu:uQe|\0!Ka1D&kʱ-!2̾ Jrҝ:`VEL&I^UyG _| },J_']g+53"ZhgE rw,V_ZXX(׮ 71qGVG,l^TXbьշt|" {C Net ɮL &8#L*>_'E`|q57+#]$$uY>nLmBT&"94LU(H+EEE"|GR!rq$ Kyc !mM5,X=TQV3WLP |Z4"]f_PO,><>x?b`RLLro >Ɵ\FO.>2~HrJ)o+..H}_P5E`R}a~Ї+?R^RR"7X6* ܋C_~Q/pgRJQZZ*7X6* ܋CjQ0:@+m>#>\Y`E*ۨ$p/ }'.g,%]OgX,V$(*p /EXvl?@A_X,UE}f,= Ї 5!B?A>j摙B80+@7a#^$>7^}0QfٮHeG3rdacɍsuRdN"&3RH׾`%=n+ձ[asJ224J_[y"kN.^!kncOG }X9HE((8_픳WΨG$1iL{RbwpEThG%Ig73PJz&.)+|"5LvXgHYž\/Q:#vF9 Cʴs|#]rGne6XEz:}+,b5,+PF+N_Zy|* O.^a V+'!9Z33 4F1I(ECb3ʈqfFNcz/*c2S]һƀ7 e%.tN/3M[4oUAr*eZZ32~uG[LNwÙZ^&9#ek_XTDZIq@ug\aZmA#im)לY+k?9MW9OΨJy畲 a&eZԒ|zpkݷJHor\Mrj/3&C xU~iE:![Lc-qڙ!42"+v6g^W:`欂w/1]idht?trꓹkkO.]a מ'`Np==]bpX@ )8h_^]'fyఽ} <6& ] _0,RUhyc]ڑpHG2z#m/W`ѽ"lҶttTU[E; q Ʃx ClsdA˹kO.\{*ot:4٤@_0s(8^م":oLРQEHiQ5 }fS7zw* f7lF.Tk͜FشYk]F-7Pݍ"a ;UU1VbdRrx `zg"6Ɖs_EbWȮU 1f_f ylٷvmzGkl$S HZ-"yFFW=`=A`i>HO'̺b`=mꀑ}1LD$LJI #2aJ@rE9Kul4ԲȨk+zژ  7a5`:#CŽrTa34.B{QEQgJKaB#0^ .EywـX-4F\GWyb^cAƘ=1֡y93T[/p:vqG%j0%jV{\F2m$FF u' O.p*v,x|(Cf|o/2yɓ ׆y}A_ xpiߊ6-|pW66#C#>p㩢M7􋷜-ތG }bUMQAع+@M%Į\[ֳ[Ζbxd&Oܷq_tɖӥ[ms%N0Y.m$BF_\)rz x뙥[Į~-S>_gexd&Oܷ+K/vfv~َ/ 3Y7%2N⑙{sv4o\13 S&8fU5Ӗ#JȗMѪ%X- u,%X9Ĩ\fgX 1Ui܋SlR2[4it43!:2+f1@lQ6AW;Ϯu_˵_i&(+_|٬t$-UUFd7=V"^ct>]#mQ,hԗ4ewVNE&8)WX$"嬌dVq\TOެ=0-6#im !}n/ ZW7 aL’J́bfV&9$X;S%r;K z>c]{8OUvܒ2| F$92}R؀{wf HթY + EE 70@Yuۭ9BF3__[[xu0@\A_(0s|Bz@a5c6K VT"MB儲q^%%; 뱄{"jeS ctK=ILk_DJs_G:ʔ/ H6ϴ-*0AY$[#+0ĨTY:v:M܊^$ܛ#l ǬfrDpC9ZuEAVܔEױ+ KRTT;٪īîe${eR#诰;ӏ'c]k~3L蠿fc7;a܆ء3iWHzk[D @eETX\Bꔂ}躐L$%^rdZ6#9#r7 [OK!CGh`Zk]8}6.h5}DI%J3GFCjJ8W&_6.!]#IGKõ'cq=O=Q.cTQ+'Hƻ;<XQ)Z_-f# m'9# Zaʌ% Y{5ah[HM3lFFڔ24:/;| `,<>u}mY8屚>5 IBXRɕ9P9@|ʤ јugTqUnc)!`)Q݄o6n@FuJ rK%Gֆ7<93EUy$aVXi(*Zl@gY k^nUV3G蠟Uv`~ZI7eEJ(zSIHm; @%JJ8hgVvZ r8$s_TԬ[uW2dƠ 24Riп_tOº LN<kP9Bj5h <(̯+6vբ 6ܔEJ0Z%U/L5z ݁JJtn[$52y-WKo:*dhпyn bq]pYW \ "sdW<Ʀ/(+EiPPT%kՈQ 9u6c-c(8ZVP0xhj9k9F05XF Qfx-vpfs_!^z̝ͪr^+E4} -QFC.b]zC! oC瀉}I|?5Y6!C#]%u>\B_.8lEH^ͬ(Ff:6!C>AZ'm$BFw@_">bWl]J⑙?O5U ʊJDZ*ՙ% S%R"TVu*E#:o8YrF:n@ BFM!w"wYN nZ4stjd8f#ee5tT>\Wq>W-6{Vfb*`Y譓Vc hD&߱8# Za18ـɛ^}k*f،)+dhD}@>5 IBXRɕ9P9@|ʤ јugTqUnc)!zأjj䐵UYVc\-5!(g*n6lY + EE 7@Yuۭ9BF }؁}A<<}SPV4Ϋ6[}r(o[FT%ăމ7GY[$52#2mp -Pi)B"U'~St9/hw CPuK(!bh1.G V=֡IB&goCYV,V Gf}C_- nHVɫUL'qF"dh2Ogjxd&Oܷ)5Yf}hbX)j ?WL6!C#~Y8ogjxd&Oܷ)h/>g&Lo#24 3?;JeLEUoyI!C;r$ nWN6gUF/mm%dՒJ .92Y[<}h'}:HnEw1Q+y#SY24R_; !P %#,!R:sbsg3Q.h;P+U22Y[90JD ,흚>8}Œ7B(7R`d(}1Z!/ȉn#\b@V5U+U62Y՗׷tP$aoiH[kη}x)ATyR5` ΋̜PL1*s5|zX8#UM9kIL {⠬Yc$c,BFҁ~@_?$[MyI ]6Ԁ2 a Vq&/2TQɪd憞,(~~QBȝl{$< :8hSF!c$YCƋqŁdVB3N5¶V(dUKo9#!f7k|(ǜV=dC_A?ڠi,H M2YN)?2Xܷ-3YX<2'H w5_( O.Y,U5E <2rWZ'm$B2Y#3y⾍DФ@g:Lo#24YKql]?'dUi% Mfs}y54P!<:X!#3(L"dhпy6C?OcLVfI%S24ڕy)}< !J;};q:;Bd]8x{W1F +22Y[9I|Ș/<0!CS *;*Y({ 'P Jvڴ(Yg @ +22Y[o6Ա&sZ9#3|!Cpv$6I2H<UUlPW%gK(vArvA1*.JiLV}kO0/RD!$2x!C#~>Wzӌ':Yl+Im*!eƆyJ@H3VKa*c&c@ +22YH-L sz̠i~~A?kő?ȿ |“HuGtj#=7wXTQL[[H249<}5Z F;hOha觷BF&+} 3wp =qd [ VdE oiT2[xr u>, N%c|=O%ͽC0*J2*eLV2Vc24 O)f{>1r̘Ǟ#3 1K֭RdE o4Q :`HX 1i'H:!q3'yFU,hTU R5]Cxsozˎ$Q=Ÿ% $֞<^88;|F2DUVO'dhЗ+}i̬O6\ꐲZBy5ڥ䖣*(Nj\!#TҘ7 kX!F%;fV L} #a;f##+:U@>6#!'fX{a*LV} #ݜ:4+T"c<}RB'OyMJR$ %9fJtIg%vqHrBYHڇ/jgV +|d+ѷƧQ^C@ν彵,i^qRV]e7U=LV25w2v@_%x\e6 ͠ޅbȋŃEt~7`UG!#0j/8&3*yBF }m^H)fC"0򰪭Ɋ@oCNp[H |QّBFUiu+oq0O2if~=2*b)S\ytR-bym*|JVLHSYUdE ٷ0s b+5}dRM<2'H M.i^gexd&Oܷ--Z'm$BF }~D'>ɪyE12YnUoyJ!C#~-Fu422Y[}222Y[IKdA8uR$ v"t$72O!#̾כ$cÄ S*;*Y&DLV֛Fzz0٦9>IB`D>ո6gOHYYЪ6j 2S!#d [dMD/B"*k+')24J蟸u_M3^`f}$W5B2S!#T k"X!F%צI g0O2if~=2*b)SyXɊ@o)f24“Ԁq*'VrLV4V3-Iq+}}o@y9wd%oiZW2.!C#tJA?aT{ȼi>%D]3S-z_f QMdE#ְjDqG&Z2VN=24jkնwug*yױ4(dVcD+*6UB«%,լGjLqG&Zoa{(?̰i/%g4A!Wh1jF~58;jsMG:Sj_;2YՒ[sK(cقT%Xe})&dhAEam_X5 m$B23Hr%W+)+m$BF'o[m2YN<2'H MSX,jJKyϪu⑙oX>(aFQav{XJ@#Um}毪 |W<6i+_mJD!#uLxҺ =r|XÄ6pE/tVj‚G{G&+ }+O g?O?L Z)t682;z(idGY\!r+s62Y՗׷Ä&29f،33AФBwjG}pID3@ ;Xu5:td,ٷDYKP) ײ(W{rbW>MA0q0BmQWXcC2V#U y}@LL\(242A+}|~ZBԶqrI 4l4mLP^WWPTjWdG&+ ɾfHMgsnB&R)2 ?OЧlHl (+ }$#^E2Vf+dd"lM1T%XXݿL24B0x_b#3}I~>K)623JܷkY,)m$B)Y,U5JŊF<2'H eXQGf} }+*Lo#2bE%S6sյM ~w,hK5 s4඗+WqF&i%&24oЯ\6y;VFڀ/:E62Y՗շ07yU24*%.Ѯq#}c@3 E)923Dܷg6!C,ŪxbE#m$B2Y#3y⾍DP>xd&Oܷdhb~aJN^BF .֮y7.Ix[_HWs\mQdE o]'KФB s?K:]42`?k)P!+C22Y[R+KЈ<ۺX;U/cJF m2F4 }wecBF&+} cBÄ ,{gΥ<B,LJWC\y=˦>XɊ@^ߺ'u`YJO7s9#3|2"C ws>DyUaQɵi2"C w;7N+I@F߶?FUdm3#֙OD]:J_LV} 9N/L<}R2N%jm`Dr*!sjBHS 쑕 $6|R睚jZ3}Gf} }+*Lo#2bE%m$B)Y,U5JŊF<2'H eXQGf} }+*Lo#2bE%S)dhRwg$R7D}ʭboW!#uMjAA*־%w `!X)dd"շIC%dhC_ƚJɊ@f:&5x24IYB,u {Ra^6ڜHX .$gN<Γ eHfAG=,V" $6lRg5dh ZO։Gf} }+*Lo#24MbXW,V4⑙+#3y⾍DЈ<mYg:Lo#2Or|2"U(USF&Kz}M AЏoR̛j+}22Y[}'K̂yqX$|22Y[|@h.!C3 *i2A'1~́hV,ߋ-w`QJFTpUdE o1fʘ[<0!C3zTCgθiFX]*XD'QXW#LUS]VdE od8sN4/`HX dO"qɄlH}ݧ@K "=Qs,e]>3C!#d3 3hVY[2I4_I&3 +RoHHr ƋRαhur &D B9'+0Ĩ`24sg<>1,=KK :RBz+e3ceYbuUdE ٷ0'y9 DiOJߚO^Qzu }y'و9!1Fx2-eBF▻2R1 $֜R:*la]z 24*׉}Ҋ$C?yr4+HܷO@ }VBY} }+*Lo#24MbX)[yxLo#24:g<^e6CU#3y⾍DP>xd&OܷпEOe2bd׷<ݤ}K >ӬSɪ쾥WşuF} yΪILVe--+Yw -[/,C@I+(8f̧,4BRMk {co2.{ Q keBF&+}+'1PL %iQnqݧ ϨN'j)7"hdJYA7T 8\`Jm0+(g| ax(FN"0fU, Mc# kseSoyͰU 6X[X@6+ѕ[;q90Vj=2YQH==Pּ!$|5eɀ>9nBmΰULmXCJ5b=RX!#TtY%&=P!Ч 1*9X6s w(%cIsS "E)k(Q ,*JڦJR^a#U}ɾQ NsA&R)2 ӆsDXK8!C}-> ϴ)uxegQ`kˌy1+i[`TUɪDߚCJGHL91.G V= ! 4G)c⨦Ffmm$B&|+K{ziOVM(H y+բP;bݜxd&Oܷ\这-Y,U5EPbX(>b"%oC3Y,+egXZCgXTP@yK,ugXZ$>b"1Y,bXHB 3Y,+e- }JY%xc6CbRACgXTRCbRR }ŪEJ&_=OfX,-V4п[@6CbRQ }ŪEbX,V-Cbj,UgXZ$>b"1Y,bXHI_=bX),J?X,:8MCbXU@nٱ;ޯCbXUPп\0bnJ }ŪE3Y,+ 3Y,+Ls,u1/fjzN])%MIƦa(1Y( 2) QZcBϱ `h>v 2-S䄾LKC%GY9`~Z#dЗi"'eZ*y);|0CU|{ })rB_,V 2-S䄾LKC%D K֞Zrw73Y)''eZ }J& Z-__^E/kDN˴W, }bX5$ ƍ`wjsu앛q+蓪znJN˴tOPPANo>ЗiquOۄ O8 }+)>H@}W$֤G>j*䄾LLU?^@|}!7n 'eZ\}K+"P')'3YZf9Һo$&Q2D{‚TDn(ʈi]QQDƗ2-_?X:~s@\OPzPRpÉg8p&_zwVЗiquMcӱ0E=Y6;SV hܸE>dACU|D Wk]6w[wk *Ā ai`K8*tbG }Wn?YB40|@ )0Eb3 3PbBv2-(,D%f;R5՝ !}8 }+)>,LW..zUm\W}񰁱'b 􄑺@* s@2-ݘk_Wˍkh @1p)'eZ\ў2\H-Hxh a9`]ڇg"#2] νrŷ鋗r*kP:vHO0?W(Зiqu>@܆3`J>q*m!N<Õ3pώ];儾L+oO6E,{ƙSW+7}8 }+)>"y*;ՉSlʍˠFF/SS,&HHH3y ⯎ /'eZe: ;]Wۘ u2/''ꔩ }W'u wܫ/ L:M,fMgNZPb%EG$Ot (ҥ@$Ɗ;]j^`$].y@3pTN˴tzs@5! c.TW2@wW• }W0֟=&ةy1~o1sUp,VR| [U]00ErB_eЗii~S.>x1ɵv@.d/\RCN˴H%q$%AN˴- H&"%w5h3YZ ''eZ }J6д@Ϫ>~qrB_eЗii(>UC>~qrB_eЗii(>UC>~qrB_eЗii(>UC>f)S2OؔdlJCbj,U!g/?`>b ֟ fX,V(/gX[$ADX,J3CbXՑiz@bX(r,R6i6B'gigX,V2o%Y,J$g*a6K:gX,-ˎ{|,)M,)bX,V-RoCՙ?yMbX骛2CbTU~>b@_bX*AbXi)>b"1Y,)JD,J5:R6f}vCǯԞl6N#g-;oMl6flv-2Cfkl6](􏞽ftwЗbXtV_gX~bX+7黸3 evby31 }! K3Y)S(mJ#Qo:Z/5-DI@w ?}MC_V"#'>+hʴ2-= ׀}/cO*Se ؀+$͛22a9F '΅X9RGD6tYJ.WRQuFoz3~;Rzf<"7_9_pzIC 3""q\|%3p%cK)^^6K" ^+EtVp4eZz } ;"7 ~'J:~MrB_(( s.?ca>+7-D G;t<%_j:Z0[3Jb2Dÿi-3bX" ws%!g.;,!C] } 9w,1 ~ҍr?[FB>6f_o;rd> {] z%3J+.VrPxjɁ=߅ճ65lv WA#>O3]^z% ^Ƃ~O'سLC_[}?; ~΂_=˷,7 -[??g_CDz_W/WP_a*xfUjgߴ_Uf'fȩ\OC>gYo-0Kڿ5 ?myVш{4~o _ŧ^| _EП`π?'gY?\SL|O/Uբ{MdxVD]uZÞf'lਗ਼qQQW`qI}B?:  ?S`/H_ sso/m} %Td%=+{|$^P^L~^Gnoߔ$j6;}ﳁ~HK̅xG,s> hE_+V$}>BVq}6鏫~ >'Ԃvxz"Lozq#B! 5B j!P!٠B!.5B j!P!٠B!.5B j!P!٠B!.5B j!P!٠B!.5B j!P!٠B!.Nyռ͛7իCqqqϟkc>#Gvر;v޳g^Kק j!?>|8ƬX+;HW?68777}hIIIbRFe [> !⚔'ə1cwuדO>p{הi ޚǏDZ+UԤI)͙3Go¬Z* @ ݺu[|e__J\$&&Xq5k>|xVV#B!IѼڵkkVzh߾}ȆAU\Yzxx W.G ! BddݻwW*۹.]~hԨъ+=ѣӧO+VD) -r_W_8p`Μ98jAAArhv3j({رzZ{uI {Iiq=`kѣ___ǿ㔏xB!&FрjժO@B|>C㕜=P]Mw>oѢž}j֬aH]LLL:uyu `wݳgFaCf`ȃ 2^əBJGby8 -[Dо*'LؖҡCG#xB!&L>o&>>VO<ًعsU裏򖏥@֭Ǎyf1EGGݻys~ 9>7xC_{ꩧ6/O>+VCf?++nڴ)e6\Npvk/Ɓわ cxB!&RwرaÆX??w}oͶ  oJ;9)) ;ĉqOyz+WnժU6mp om߾s_|1<8E=pqyoooVT +'NŨyJAAA=_Gɓ'+C3N8X??CY }lHHHȊ+T9QFrP`}= F> !⚔K;s ʍ77oҼ"91cDN:ułx̙3 {ނ :z*rzuO4رcpaÆGDDAa{t"8!!y`2v dGE`FR?:1bOMإ=عslp Mٳ;$*{|B!5)'c~u-ZԩӺu9rѣv;vlݺu̙sūlܸm۶m޼Y )*ؙq0$y3WNzΞ=Uuֲef͚"=;F3 >̨y`k5h Qk ssJ1.}dδ^{kw &dffJ#r/>>^-0}8ͩ*Kwso۶ͨy`B23UӖwѺu7J2k'BR5p#'sdJߗ5h(//z˨yN:}tZZҸ={Zk׮-[OLgJ bmذڵkk_{!4wg.5=;%o3f̸x"dfҤI10D}c哂4,F;y$Ph5=6tPmWnd׻ժUN~мS}oM+?kب\d+ ߿"664> !⪔{;vqڴu_jjFC߮Y~'Ot̞=[r2ة|o 4ET|(h޷6ۮp#JJJ:r?+t]dy ;z*6jޙ3g'}֭b[Z6mvUÇF,RHMM>xhYӬ9׫0sL7ƫww}% 6g'B wUiGk6k٣G۷5w}TiӦ7on={sT?v.]`A-`&+Wl̳W板|sڒ}]>+Wlٲm۶-Z^lO B)nņLBqUA`H,M=z\F4ŋ۶m9 < 9{kK]-|2|f i+7 5-[Q &@r+ț۴i-ϗ<ЕFͻpGo>}9" `- }% iޅiw}Q}||DFF؄B!hMtdի?k,egg_t魷Bf͚-z*r>l0QMZZkΝ[5qȑ#!x(СC-5EܪI%Kx0aBPP_G˯IIIP,[ VZ*)V33[Nx+ӞQ~/~ݻwFŋO!Wũ4vfm:ۥu֛6mRwʕk׮AWXH((NVta̙364?91k/]|Ӷz}!8x},wG#hYĚM*KyWرe˖>֭v?I64,JmLW5Cw?{DqׯWܦM-ZȡiUܷBxB!*Ny`7jd !8!g{%ǧɓ;$ Ho6eK'Zڿffᓦ'м?7~y`1'c+ 5v 60@``ctSQjc4iY0@=j 弫a;w!|(K?(i,b&DU;y{]/o'9sF^z=xqss1c`]> !8!gISB sd I>L- ={v̙Fڣy??fuӯ64٫GO]I~8G͛.\;vlYJdcTPo߾e˖+Wԣ˗z!mUp3 "{Gfqk~y{q-RܨQ~A+*O!W 5훮ѧu!--EH ''W^z@@>2{o'|rVZ-ZN,<>h]@Pnݺ?~ƍxxʻjӁ'-ĉݻw$hxuBf-]e!V͢;r2K9u*7m4tPgϞ :1}B!UqNͻ=Zֺ}):ƣ̅Yjv߮]^z=گyq? y2}eFΝ;ON0aڵ;=PnV'hv,ܢ`ҤI{r۷xF:''+֬YF\ٯyX.iIpLL 4]vO||>ĢNLBqUT~iɔ%kEu1Bn&ND#z˗c< bРAЪz롇V~O}"hވ1H @1VZs$ yX:]/{,{s'&&b9r?_:ԴiS ',~ᾅ<ĭ"hވ1U/ZGDǷ> !8_O?jCmݕ5tG ցΝ;G3]pw߸q{*8pHEӼM֨Q2+@cyիWǐcE4tF͟lhᓗΌWe(deeQ-y gyW3 `h^dd$S,SzNk׮Q̘:u*CDDѣGW\ٺݽ2-ּ?4MEм/x;w.뫏(VM.*Xg^s(SHMMN?xo"h>Țgok0@g<{*yf1\(3͛-Z񸻻=ЕB!k1 7HدyP;5 ywܱqƟ^z Q؋6~5׼To>---((Crg"k/Å< `25 GDc/(E/&5o8==}D큮LBqU\B~Ͽ^a5[ \\rrrf̘Qre*U]ŋ===ϳSp Nvj{KW8qbܸq bŊ>Pv4Opa5OLSZ* {̙vڢEN}䜝guSxĉ<9(III(ote'Bgv+jY ._LAqҠw߰aölrq^z].նؘѼCFB2334hrɓ'cH]txܦ!_a5OLAƍ.\رc:|'ͅqAޚm3XWC;׭[!O!WU4AhwyӦMٳgttt|||ܭڠe˖Zsgy /ܹ#/6l؀{3j[NpիWx㍺uN6ęKFCldC~αQ$G\\pa65' P.sClȵfF%R6]b'xދQ@$_xYfAN*aÆ 4tL3ͳsy}1ywWS7AB!,yHc}H6?e*@`j6`̘1%qVĘ;رci:5@ 6mֵk/233&LW?)+TLzzĈLΝϟobPi=b'7iD:-[~cި`nGTV$dYi6-o(Gl{ݲe,FЎ9811W_Sԧg>#ǥPSA3 ,HOO/)Л !⒔͘1Ct^5jg y'5PXaϨs w8ýc6? ,4u ݻy0sA<믿">q~PPиqN4v/T慆A"!yxx E;C {f͚7,[P3p4O.+3~IѬc>5A!NYb4=(SϞ=g?!j#/_,ynݺ5.];vl}%fidUBڪƟoAP#!B\<)Ҩ^;yH*Ãɤ+XȚ8JNeyxTg~Ȅe?*W6 jޣ:dM4O~?#77ܹsǎ;t2Çc͐%Ł'O󋏏G}յk֫WoĨ'{/^ܾ}{d>>>jLqƌ7v5Z+yf~͓(HP}ӗ1fY^^W +VD`q1(\,*K,=z4d  ڵknkO{&3yFO>j~O!Ⲕš贩I 'W}欬RO WLsoy?le_Nh W!ĶF8nա_{/z({.QլYrAnS>gdfWi"]tZbCȘ%{V˗/j(zղe+V%pgB_'3{Hϣsqu"QcliN?el|Uay&RBW&5O.}̓޻= 촗>9EcB;?U m V`Fyq!ec<쳐_~Yc[c8o'zs PKBqIJVJg˟ {qW>=,2<˱鵛F`l4hҤڵk-[t14ܻߺKA5nl:Cbipldl|#h |7 &L7~ӧO ^rE~~0@n1O>bϜ9wK>s2j^9sԩΟx XYE CY}HC865jTVm*[64N͓(k!\ښ55MvUY1Ƽ$-|1ގ}ݷm6{BT0j+V W͢dԼ#o >Ɵ6 )oB!Ij##CZgG61o;ވr&|w$TRm*Ud$a7n@!00Q[j;ʥy׼S }y'>ڒz"qq$p?ô{쩯o75:w{ׯ9$t+_pѣΝiNNΥKvofPPݢ=8oϤ4{Aph.Jj*-MZY ̢aAD^>'<5<-~M,1\ʚ'u?9ý/49޼ >HDcB*M+ ¯5.E5RouƍCBByfԀaÆի+IiY98pf @!d5ORy0a8{_d,MLޒ,0= #H==:tfcƌ9v*sss{9??)Ӧg}{Yi-'{4^T![5 :SyȉxN}̚oN!Q%#c:Y!Uo|Gh.rdŋrK'@;#|~XlYݺuG?ؙS} 5-[~dȉJ;Ӱ4#uFoI㸸8}͛7#u>|x~ ,ٞ閚(@K<䯦Y£O^s_d@x_;iY%C.ר~fAgD6/"ٳxh%>26k׮^Wk׮nݺifժU'O ߭S̗^rq۷=,X@nR5"WlNM2M}gQm'%W|x7>;wO4*7~wχ~ -,gtK͓(<􏭀# =v]o1yLgѠ㳤fFgxQQQ0CaT|K|ʔ)^{MFipxӦM< -GH!W5O]9jDȌ?qq˒%ɮ~s=h\J}ݯsv׹yp([uH4ouکMbBr17d@={lfaY",>rH^jF)^5X,z):Un1fB!,%yvUl:[:b<;論$,,,LV7nW$^W\>:uDﲯܰ=< PI yʓ;U'C>H\5k7xI' ~k<)S~kРt GBߋ)^tIcX.E⫖lv-0̙866;5LVx;BW,M[ժUkoVΝ+9czߣ^ca#eҼK`hlO1[n[n=|0vPl[$y!.ڜLqimɂaBA}OT6WwaÇK3:v,Po- og)|O{챴4Ԝ:uo߾A 1nd8[^PЭ>bB!$%y\._)kސ(3M}͗ƅx|IYM6ȴ:ygBDdCySNH`'{4qa;ƶo"W9֨QcŊS<7 ؁R?Ӟw5Y F. XeԨ a鐽lƍǁ{|=G>c#C´C~駳ga: v%ϗ;&O|0^kZZZv:%m!dV4OA 45OQG&.݃}8n!&O]_SI3*='%:t3UH͸~%pr 0[v)@, 999((b<Фi%UYT[]==SE<<<-[֨Q#/c, ؟c,JxTDۭ[7Y['|oX ~eĉ{+Vг>DB!$%y|&-t>> I3a]F]m6%܋//K.S12Ξ֞ae6Y<JGThԬ՘Ż(CdB~ ˻~;ةy  I7n ElexA^QFM?~;/(K.͜97eyȆ4i򯾃MСCzzzLL̰a}]b{쑟onM_qScHm@)W+D[<y3f̸N%ȹ&JUYT;Yi?+!PA(C~͛77%xP~X̢ KbRAؾ};8##lҴEԈްaK ,$BkRRgf)F KAVٓ 1j`/z۾ @jxEO!-~+#} 2`# nݺƍ9f 32UI$А'N@?Z~g)^333O>ظT6E#FΝ;Y5$祗^°~i;#=EJYs nUOZgl.lt<~}“"}W~y7phhj{} 4}!%%1;E:w&+Ul!rF{5=#DCjW^E%fiD34T6^x%f @oA!B\}<|+?oS֪\2Ʃww3J*%&&Jm۶]>3H.TC: . EEn=RIBǎ۷o=wvڵk7n'Cھ4Kys׿اGYXbFO6YPf̘1C;@ly1\ҚgOG3}l)bGĘx GO>+m}_4?~Ν;Nڭ[73%XjK"ެRQ8c]K uȑe<bРAtB!.IkVy42YʎQ팞o}wcFl7MG,'D )`XXXTTN8?'t} e۾gLd(iSgA+{cY2ٷCG~b/E;#g.ldY ys ㎸8ɒwލ|W^/_HX?1_~^xٳj)clogG؇#}饗""" ~x %PiA9w7T G0wGc ~ȳwW*ku]P)/;Ez!XWff&M6PCviE !LB;g/~g !@}3C3⹹a)M؇ O] ޷o_ ;fl0 'hDdZԴ1Ž;vy͚5*q[0=~#11QTeTBqM_ZZG`k2KLpDO~qiѓg=IKL߸ݏ/%Ue]ޝM٣ݻ3@| ^}U!7Xx1bd7]%{ӗ&h?jP?iF$ʰM;H2*`ʔ)%mk!!! syyyW\ZC!HsJJƷT?8RӧOQ~wp0x۷d,2qsf}F.1E[G>'&8s\zRqf͚GK: ]~?X|P%;v%lNۇ +x$l\R^͛7Kܹsذa\ K0 8Ĭ@9{"Q6v.KeW^}<<<Ν;';t耥%A!5O=A!Of)L\GcyrW>=xEy.K'K1wuW{9sgߊm۶uرE r[y+WN>XbI6RWDNy6 [QS_E;-i cwavώ(^QF * p楌YWlzPDnB%n/֩Sg͚5Z[V+'OD֫*[LK3f믿!{ :>M; ?7oSLwD=6,F:Loػ[kȂ~ шf-[4iddd7QUp,bX4oÆ 8 u*ƒV>6|K n>DOp7>tRZNy]6>hVF+W"7 BE [.+ʨARB:ڵkaAB!5)N͓kݕ Rdq=ّ^罾$yHG<ڸ[_U[L$KH[ӷ<]S(m?z )r;"aaw-ؘxM`zڇo:=꽝2m J4{c˖-D|EVtF sb@?]/)/Rl}b<Znݺsb{(Vns+FO`7 7kIqf4 "ǯ={NBCC~ #]BKH-Ǐm}Y N2E1 DMnС>@NN>ocnxN=HvV0Y>L%p̢]ϔl?dC3 D:wÇc"Y&-q)ʨA=1!xhW_#??MdtIj7D!85O 5ђ"㽺0rbdx}PXEe$b){kijoK=Mr2ʞ #ӧҾ<.: Zz5dAb:HyA===Gӧk4ؽoШ,?0aOZGRdqMǑ+c"[uߠ_?xWgo+ǴUMQhvc^z饺u]kA(IS`VAvv,_{޸qC:QKMM`C sMoatiB}Fe3Q8Z `L(㠠6So0|Mե vusd_@-`4d__9sH _ PdN2;qbWMxn&B!$ũydԿ<7)2o hɇO$o1MRO,IɄ!|2rA'RRR Yf/2~-77r˗nٳ}L錢Ԡ0ymۚXBVnJVi{8M$YRdq98WF=?Y;xGo FgΜ4/##oСCnݰ[$x_9+WN:c1ŚvrS{= -[y .,J0zb,,%ih6;+1`FH# 6z_MuմimbևC= ?1ū5۫W/8`e=v_ǨQ 7o>@n{0/-"ڷ<Q*+12BƊ!‡E;vgY~IaÆ޽[4 8q"ҷnݪEu `bW#,udff"q!6k |' ؄鏊B!Jqj^hh(Bwp؄/+Cʭ'?iV|YʗQƽYr!Ϣ+eee)Dg!BƆRgҥkvwծ][fڛN,YRfM(nwDDݽy~"gBT[.hJxoOqzӫLJ=W7Y8rdꖛlTNp$yHRO8P^g}ҥKy;(?x…ӧOcw@ u$`Ŋ?0s<FFh7EF=G[Qß_(SךDuZ `3BX6h=HKWz{\\ /^~Djf'vɓSSS%bO=v[sO%0L4~OEx>DH#r䢴1v=_mҫl2y/r5dijV00^(#,7!/_bE= GE}OR2>,GݮUF9#-mʝ _駟ć @kaK.F| Ez׬À'=="S PZ=0rI6[yarwu~|t? }/:u+FWk \G |RJ#ο꫎;6o ͛5k6-G!85O,wA1or2 bXJ xRddxC'Y2grɨ" Q Uްaî\D2GN W~ϟc*3gΠ|?N>&.E~-5/)դyvץߤyȪ;vc 驟,YgϞwۇ]$we K~n8!ᅔWΩJ=ve)]p!6mڎ;o/Wi<_zK U_9|O,?^קd7xˌdͫcW$P0(B?,xʹs~f͚A@?c8;;T\;Yoz!m ͓a 8¤y:-v]ځc6M?98G<7cҡ M@]|h'OΙ3GdZ`ZA2pEТj͑#GF/#y:CCCo|`s!B\R<<Ȍm''5 Xhi(d{%muss!Qsm,XިQz izP1Eogg}yM\/v͓,y7]&g%Þ>%lڣ('zue#qyyyٖ=mU?|EϫW~؟Ymڵki&l'D~ WWbةy`ɨӨa)4-yGNhG@stU4`eۊ>}={VF,ݸqc߾}\f v,z'es5CIjgv6;አݸ ,gK_Y4wBi`<:|!m˞ea"t\b `~뭷ԩ-W 1NXFMBqUJJE,R7 eX/q_?mz}`yrHƒ, @&55ur* )ݍ7ѲX4odʒO(~oB,WkWN_oF?4v4@jW^v:~3;vɮU%~~ 믍ϼMҼA&-gqii.a2|O GKhaF\X,c{O=" ; NNz;wƺ8bѼMMOyvC;0)U `3vT@Ocݖi=˗k`"{'Op2p^k>Ӷmw}6lؿ)SQ=>ևK!D4o,`wVn=) yEQ05 )))r{бcGdHΐ![w}[o=zTƂR0(ya]@ejʍ (/: 8Rdb;~)qϏX#4Ocq9YYs'xBregC[d(JĒ%K`CݱcǾ}`2^HJM5qUP*] `Ԩ{#}sSo _ý84OȺBc 0bOoa>X n/놴0q&i;PN髯*or!9'_ '.op4<ۓy68""bL4b `va oA"Ν~5j$[ٳg6B!%)~ͫRN%K!ɓz L̓A0!? uVb3pa/2t xP,' swwnG#cCĮh ?Fqil@мc*KV'm3iΓ eqTҾϼoo~heE6~'=" 0^uooTdf!0]*p5 `4>u|hވ;1=܎i=1c \y{ϵP =zgO~zˆVZ,#B!IqjRst;4|eor>O#Ywm >:>jGƯ*T+ `3BZ>\4~ܼwP<'@;w,ovѨѣ]m۶/, rZٳgߨQ=z|g[lAoH#B!Iqj<ǔCUP?CçYK·<$*!'F';?H"#?*v}6 G#7H {Hm͛ -[Щ=d$]؇oC1x=t7 "JohMwV^c6r-vvtҊ+s駟aT~ ַTyU}o^/9<(]0@mpCF mD}x6!8Te#p<ف~wPwwV,'X޶mtZ^rq۫W/8n݊o|xf *|B!5)N̓t l|Q1y+"CZ,fC<Rj$}ɿ4 dnݺ8qB${H-A=lyީS:v(+]w%=TYM\v#^Wo%7z΁O/vov+~WZ5${ ~CBBnܸ>bϜ9sRSS~i0cƌqX] ʐM'azQ(!x:y(ʘŻMlKLk: !W=doFǍ'+8Z?gPDp"D 1PFx#ȱC_[w蘾o7b<~ 9r-[xO>߿OEwiVB@ejժ7n]h_֫W_~֭{7tgӲ$^u7r6|[pI#K v}|jKQr'|{Ç˷Vx ( ơ4~Ӭg܅EJcX GcxGNm#_N]]eo߾˖-~G)G)!NJJ;u00'BkR̚9 tDY)ʓ?܋ 3^94TpCi1n>:A@<< ! oZ(]jN턟 ?_|i6D.U,ydr lx˖-ȌsiӦ YTjv r6=zqԼ42F;Д盖MiҴYAﺧX:wf< aaa3g5PdOǚb !⪔׫;ұYOo}2y*-ɟʏKލ›'!F?U[(JTߢE }*qпٳg%0XGΏ1cdZZ%[~듦AZUZ^O>yJ_ ځwZ̓##9\z!yVаaɓ'Ю];);֔(|B!5)AҐwex9_\\{Ϗ`"W6`N`Se5f嘡!pO n/$'ف #oO?ce{3Xh#B!Ik'|}rnXA$VF 7\azzi cFX' i:TFU1DHܕ*Uƛ1G뫕 j# i,h%ʘ4iD[`gGB!aFHny"o P!B3Br$''C-yRP!B3BrDEEA-:uϓ2G!B jrss*RvP!B3Br[D8zB!*m#גB/B!W!-@Ŋ2gzB!*OӧzѣeB!2@̓ѵkNO (={lq1w\t .aЗB!Bʈ5O͚5p9/zi <%;;[bt) &M3ǃ37%B\ f9v gYJ|>Ro<(5ju5cVj zZex Egd<@G`\K1nWjPt( Ko׮Yca͚5fg!0;'8܎AT%_FPڠFJK`RJ,ӐUڨJFE @A-ҿhtUT?LYڠKYW24z bRio zIEA !B\j!N}d1 *.]u΀)CHmeoСvKF/B!58v&)ђxU@cdPx*?$rE#2 ~"cDQ4FuA.gRFA918q0R6 3 ]&-QF۷:>eu40@Q0GaFg><o߬[i*(0fK&Bsc+"N GIc03 uA/2|F!*O4(H,BAEVT^lEֵ T3.3V⍫UPQ0 u>::%@F.h0<#,> Aڣ`LX!BWQV es)ooV!gcJʖyn\ƉEKP! T1Ql*h#r E >Pn 5# {'ʲ! (X(_Gy/XVǞDYV1*fQ`ux(X}d(?ʰ:>C!s K 5ȇ0+Yf)# >S* 3jQcRʻ>xZ0( *+'f  &/X}j^Q@l~*tEu[1(}FӓMcCZ-Od(`Z[x"Z(HS,SơeշYC2<:HԊBq2;Ga)ui#>S* S-oӨ3j!BHS9N}VAr&nȫ;rfUv(,R}o )ZwXW.==|؜lXW ^jzX*e #-VGdu-(EEKG\xjg?U%B)+2;-dKjP{H=VˑTeiްAօ |XnL,6%1 G,Iԣ @Yr>:T-F`]d(1F(zi#w @%6acxJ3m yjff&\U(f"J`<(DGGK%-؄J@`-tO2 ҧ eFqR.j]bc q g)H?\cc\ 3A JRc0=0kF%V16@AU-*Ml)jd̂g40kjceZd?̹" q˵*aV#g͚egؠԣTid`(c)֒ԂgҤIhWF@o"(j0T%ЕCfZ.RT0JTgxJWIjrgj B!q\"}{F2"X% )pfxlJʰvANh, IC]ǟC*xGoG֕?R9s(CrPFUYTjize]Yٮ8 I[P=]c)fQPzzե T?XQB7]I;@!bj!I'^5LCD!P fUl@?P@=tE*!0 "*R0+0"%E@@lG@h$ֲWl4Ap'c"RzŻҸQ(GYYw'Hx-!BnfH)1=|SFdzC @4FLINWRģ`V걢PE4(N cfcb(qxJP6٥Xm,*Q@c)IR)TʊŽB!XO)+)G:$6FR/̀( TJ=*'DJ8 D*M]܌1,`ĸHՒFRm0 {QAJ?hVAڴB*B!zDH\IYQ,ӐcVO%K5"8EV4r:kIal Xե`"X &"&u.Z : X*ҭ@mܤyfV1B!c= "P K)±g,/(=,"(yDZR#WR hzuORci1ȶĵm0[ʛj*R.:QJP)E!x 2K!bBgX90d7V5O*E!& #^EHRTMn#-Q WQ0ՠ c_^1V O*21l(ƆJlݬgUTc r+4jY1v[1B!ئ!!đ %c0ˬ՛TH%PXEE"#NCJ1Ͳ"4:@%0,H>5ifQ@u+ !BHA!OB eLŀڙ4Ǭ1SK^f*BAcm5&EмL!BHYZ7W=htu Thxm euz7p3 *bzoo1+yD^*u@!B!ei`q0OGlr/*Bu[4Vn&*(]Plֿ̊YAc@R;9=6q@R B!Rv3AZ= evG;40:@3>y@*-WN}@ U/ǂB!,WENHΞa)CcbVVz;5Oꍠ`w4M R/gV !BkQ̀hA ]h=f2 R(hvkͳ1*gYق%B!BHYҚ'g3Tz0K.=ihe~gyhoT5DAZWh8ƽJ!B)[\ZDɠO0TPzYEKnfԋ 6NPyXE0C,ޡ1ve[xҏ%B!BHYZٹ\h o= m+R 3UCP2Cä1@چ^Q :%( 5qo!BsR P B!Œ<8P!B3BB j!BaFHSA#B! q*yB!!)<ǁG!Bbj8XB!fZ8>^_L!B\ f.rssSoF_KSTX1;;[_L!B\ fΙ3g`6l;vݻSNAAA+ +JzqssӏB!<#//^7{lhk3fڭ111 fH zիW !BAs Μ98z萐]*T\r.]`k/^~={sdee+ٳgπ}K4oÆ Hq  z-!Bq=yOn3444:::!!X O @Iq}+;yԩz-!Bq=yNȆ F3f tKwȑ#񑑑+WAAcbb!;?$BqeyCzzzTTGpp3J]HII rss7C!vfNyB!Pp9rD)'++ :Zvm bŊ0UH!nD<B!%crssgϞ-wˬ\rll,IrȪU"##Mʓ{ANB!P%gΜ=zo# FR9xrm锖BB!j^9I|޽3^tj*]$yG||-:u0=~իW^8B!5܀l>&&FEGG;U'g@N\<^!F4}YrԌqdNB!;sᬪ &vFg'YYYЌ3z쩞sֻwx>B!đ9.$qqqz1C*V8uTo0$$D .;iiiP.]}111=B!^0ȤR#?k,OPPPjj3 ݣ\ "%%'тp>B!ġ9r/44g? vTTTTu 6DDDhd:m;`A|3FBEx;#B!e5ϱ={6e0c =&sh8GrryyyAAAU\x!zA!RP3g$^HII =}tyiӲwRɱDWrB! < 55U?rplRNV- xɇ z0pZ!BH)C+{e9&qqqؓ~~~PoxlGL}.rWZ7K#B)}yeI^^^~:͕{e J25޲;a!BH)@+3Μ9#w慚ȑ#GѣGT$&&a<.~6--M=w$R? w?IHiA+x~~~rIOI~^& ca<޽{ggg;B)o$''nݺm۶x d6`Qv;utFW ׿"1۷ 8pA:t|obLf׌0s5}%VZ翤ꅄ驧bL6i8|ll,\é2B!\o׮[n:uiFum FKX㑩^uօX0&٧OG{1(%L 0OWĹ6t$!!4M/===\{*FT bG9r![sO<=j2*1aۘ\ ) A7mڬy-[n&fzrrʼNⰻptNDB!<!pP0Ψխy{"g~@w;݌ &Z k׮=Ԯ}uСcZGEE8Լ҃WV(!FݩS'lK.ϟ׷J ++K_&/^gnRSSR|ܸqc͚5s5,w>vҥK(Z=*B\ѼZjkg2=iԫgzi%-TOwN5lҼEaa֭ۆn۶=4jB+%xe >qV^g̘oiӦyNdϞ=8E|!yB3Q&3vI0Yfac'Cd)*(e))B ,O39s0(QnyFO; t3Fi^&MCBhڪ4t^۶:v浡9+. xv~^^^ѣC6%%E )؁rN^jD+cT `a)O6EI^"jRhP"eh( PGB9::V8'JMnOJ8R)2 YW{JeqinW#1+5FP0qH#x˲u?bL=W!h<Uź41:ʧbw7WisFL`w-Z5hаMvݺEBsN]dggyn[LL~  1{^BT\{?s<@]@od0@QdYWROb/h2r)Օ̚I# JQ],ClNhe(H?8 )PBPI%/ mW%hHySm[)BnּZ^5jԬYӽ'L@Ԥ)ߠvs|3C7;~=kլUg _w_Z~m{;c[M@Z-X~dyyyrQeٳ'MBB~&11EJ#G-jp}'EDF<r*˙+)WHꍠ7?,2ZL:2#P2il0+J@P(+h20c?`WO(%ZwX(=&JBVZ 'WzF Ą,oz0Owwmx^^^Qnv8v)oFXt+K̓IAT <]Š Q>D"ugt31A00~XCt&5YGvSXg7JPv|0(73H(ҥKFyՒ^u%r^Kl/}4Ci=N͓7.eQ+3@%>`ZV* !Q[WBDXR>;^ts%ͬۚI|/(ڠ:5m_yyy JR N 5?ׯSB$''fxѣG8 ؂>yS 9%YW7v2¬\~5j "TRA34JljƋ?1k!JؖAꍨa+Ʋr20,"c؀mA5G\hxvPUz /M; ?wkР!&|PhL?3]ذacoo[PDFFpwzv(uP!,=Bn:<NOY Ea)W$BHPm w@MnbN3by ezd?^=ʨiРQP5[>^$BJ%K湻{ߪAWj5$93btDG ''vnnn(!go߿56l~3gΜ[hkomd[FBBjPnV\٭fMWY? [qb2>CBPB L}^50=}ItHnh-lذAC%劌 鵈E=j'm]gwu >MΉ2A?F%F̓5m{=@Ϯ]#N-Z.ߵvm_t e,åVgQ;3&3̓|jWKۺ6-7vyN 5ORj$$$Xsk?e<7~j^|AjMVV%]7]zXQfϞY H>xy!%DĄ?wJv2؆R,@<#UN8q[nR),5 :Yp!k- (H46jHF]q*E+hzXd9BaI_;1C`<LFÇނ7&LMMNN62SXB h2լޮ]G۷#p9M]K7k =%gQ;3&y+Æ 4mҳh}صkQdZUL=@`zkX&c@,Qΰma/(+ ÞMaTR/CkAЉVƣ`rF%.NZZ"b)FW;J/4/11 ? /INN lٲUhh-CըQc,jܸ #4'nbBYvNO@43(x2a,[^Wv zyy{}S<QQQ`$@f̘')1Q&J(Z*(sSjQXKΪema$Q)c+)`TRԫnU{4O8 ,Ҍ0fw_h`bOC)\'[?WC~ !<"6BBB䭨>+V_hU5jժU^iD75ja_t+]̱~N]c4]^moۼ| C]hhhllUϹT?K-؎R`$IKԹ/Q;˒P4{2#"}ư-e.TFʪeXE6gLW$SΟ?ؐ`#;S7+V4Nj^"꧘s }A~h1rJybz6/tUbzfzE>gnzT6:z[`yJ9ּ܄u退,=sRNAu̖jT"NXQ-UWT;Ĉq]xʂ oU 䬝Ԙ:gȤtVDomˬ[56M ^]S*+Q:R) PƦVqd𮱋FƯLG퍗&->l;99yԩH;$//RGJqTT@[[rټkժQ\)vjHoFW݌jB"kVp^ˣF-j mպu[lq&ټmR4Y>LJJs:oܨy@LH|  3Jĵ h4ƬUy*X-L-h[2++A4`rjTXX'RY RC##e<#3Rm/{ jvLo raW).̎#3m4(='^WbEk9Y/#1\#`y7=s`wTsc; ^zut6gHھ~VӦxg8F7`{I6@р*R%FmPhPDTlTZϛYbR568buhj6*}bC=PedH*q>_H^-e^ʋeeeAr"""t9&&x f̤~ G_ǧ5kNի{yyܤ LL;WdӃz,waAMӦ!=z$yC|aҘ8Ҽ[8档 HUS{A];D@4C#ѱ0=lͣ9-y4=z44@s&DTg΃7 -tɅ0%ifQEhDP2 ҏԣԣ,3S(% XJ?@Y/ot C ̓UX1((_ߐrA@QL) f 4OOu[x1kh2]=@`$<^8%&&!E90h>&3BAZBQNzi&r1]AP-J3,BοI%CX$m`R ex-1x52gϞO9nuz-!6> Æ x&W)ir@6ωq KMME!)tGAٝ*@P/SO <JLdE(+{@<: NO݂,ʂE2"!!cHLL?k'RҨ( !SNӧO'Xv4m댦r͚5Еv(X5_:(A 6Fo͛oG<|"]6,_(!6-4܋q1[jZ~Z'Q/2f"D&խb/ͰT&2Q)"ԣo !afW>ϟX"<㦬IOO:ujHHFϛIRҨ+<<WФX<`]40[N=uB!S7=^ýWޞ5|kxըW+<-4l؈sn^nϞ=y)'(!>3WI Jny@DGq >-O,V5lʢy@5JPbHA8$,RC*["##1$M); xH7!B |QQQz!v4f͚x@7< ֮knGya)4}VVڍXLxyy5hаq`=zs2!>_,^Z7MLLDq$fϞW~C6=1:4P5O.]WYZ5I46 yBv<純Ʌ=#Ϋ50^nl[kwtćz4> T~=@3 ɩ\2r8ӧ9s SN+Vׯ$\.vss_nbb )X-C؝yh`t<`z7,yNԪe Lw`1݇nڣǃhӉ)˓{oߞj-"xb_'mz̪zT hzDe`He4@AA. D?D'0`BD6t>}ttѫWB Ӄ復굄_i-3^7;ozu:441jY5Nrr2 YC{,J޵EC,j!VklMKAptޯNt֫W $YDDmlRJAF+W5{FA'^7-X uP[@ 9r$!!!22"DH]bb:Ň0=)B%WFN4uTb׹ҼL&C9.4݅寇[NX f||<ԫ_}ƍZl'gڴi˳yJihr4="CA`qr%S9TcH~'bZs"t%p{i-RٳGpw"ho !vna}q"6n3j&kyrC =h $m§@ӫWz5U=ܫrYg:~۵֍5xrANNNBBBhh(q7o^Ϟ=Kqb?Hy^/?vh5ODbvfOmhcj4QSG-ڵ}%l^6mxѦR+\ᵚgϞ=ѕ+WҥŋZ#G@o5diY !X[t:V55L~EժwKjժe&uVxR74JFmXj0}sp&Mvn 5Y)AS=W!Opp0\F[Nc-((bB K^^>FfNQQQK恛45fk`2$Ӎ>=<<=y4o!A4m4=-"Upp0+ϟ!E?!%&&p[Ţyv^jj59SPPziL4/yhWFox D4o~ƌ'D`w۷OJJko4X gώۚɈU+yyyl?@85hаnz>>7l~ xXQ@{Bwm[>i)~KMME҃,Jσ!ɉ\bŊ !ӧi@xecby&äPgA`gp<:gcLH3_kjz=4 97Ŭy0B4! B\B*T=z4oI)+W_WlکykիWfvW#y 6jڴ,m֬EDN q.YzDO 9/.VFNNNdd\o !E^^/r7i&`bSMZj01[iR'Wۮ^fjܫ{֪Yomа6mAwesVS'y21c>BH ϓ`4ܤy lf'm̭PI顽/Z_5L5n-kݚwtZM+V;đٳ'OGcX {croڴi+W,}S$%%͍G) ϙ~{e)@_*AɠmJ kzՇUZ ah5ݎӫV-Of=yh9)CvY"'Z5#--M5\!7`GZjMR&&&{xxddd Hyf鬣k_&`d23:< ~jԨ.G]7&<7m[jR</s`oc]rx߲UǏ|zg'~ėgN=m~|qN9G]s܍\?t_?gz4?~+jα?v6Gٞa+OmZ>UJQhQUc]zx_ˏ\i:XGל8 Nl:sr_;v/ֱﯞoVv~?8X/2K?x~Ϲ3Ngm:v蓝id/~oN۶2**'..Ρƶ~zh'MRҜ9sf~~~4$%%E:'qd5jb5תq`xEWuw;mEѩyI1h+vEnq]w!Aosr[VI2ܽ}/&-YE4ӳ:4o߮$ܻuxMƑu6dfʩ-gNm;{F iHΥ ,89uC;X?=֯`g_z$Ng}}I\+_arA xqqqglƌzǣo@)5ߏ,?""bÆ z)nмbY7۝lC&a[DԀ%K;gz<:"q&z!i霞=yFtͫUwdGlIX cS $N:yzZxx^5݌٩<&Ǔ ãI9^PP@V 3\>i؄i7_1=$__?M#"l59-Cւ(Alf$y;QlP@_O!OYׯI!EFFѼΒO&)odx~~uC*U]4y{G[֯ nڭ[4o"miyeժUz.C-?yVŞIKKrgI!HnnnBB#D4kewj*P4LٝLpzC*WvCrUUw?zu}5mڬeP^Æv5i^wEky%22Rb# ە%彯SoM}?m˒mߖ|wn_kǧwٻkx`ߗ:𡯏vcGw?+N8ugNg`:upVcGܲ{vo^vh*j-)HLksҶ,Gi+vl_3]Ł}pL:zt2 duC'=vt8лwIO8m˲=;J%hh(>3y;BH7zhܻnyY\0YM}}}!uF ~SjU777lKs3PB)#rssN C@.ג`U8M<5Yv^^ޢybzAu&*W\JUwwwf+FҼU F14LL[DCx69)߿ l]^LފR#[>Nͻ%V5/×X*oꙩJvӦM?]ϟkٳORB);W^ZW{Zb[4m_v&Kb&X^}OOO7*څfg@9~~گ0Fw 7`59)T^Bt3{#'?铤;Җf߶;˟2[j{z=y5X<8y>vi'm^p:kǾ]]RӬM8L4ow+22)K7 !OiI4O0yܭKd z3݃% vXWJUTvf4^WZaxy7nҵ+93EѼl)mڔԼ[bUyE`'rl^e>XzRּ$%%N͘1c'm_"E!eHvvvTTTFFʓ@SKލ?f`Qcq533Ϊ byzz LLL/L(Nx*zSZQzWP.lҤg}Y-[5nܸ];7hizRnc]y:54;}W/ofe\KtVz~ ݼ*8+w󮧜KNN}++j[^=ޭj̓ u ///L֡`0TȞ&0էyTxV M)5oR=Ɇ`Qcʧy+ysSׯ_ܹaGDXAA~{o3ͫJ?8u2e$^/R͋177Nz`0T [yq+R1KG+x)P&M/|\dkװA}]m-=pA~f-uѬY3_|aۥ9X\G3rhޓYyOn?{zi^5Qz5hN8Iuco1 $<<A'"˦WAנy[t>?&񴵅V>3^>+C'yzzPN԰!*= NXoܸq۶u=C󲲲5jp¨=i^@]5/&&Vk}2АI`0TPwwwY+RieD>F1Q<Ȟ4, J{#cm۶ѣYGyJ94?oV5Kzzk5@b< :sssU)M6>QY@h9#L(/ٺ~إCN Q{///OHK~XpRxH2aRG&BF5k̓IjC pG$17n.4{w֛ΔCpnOU O?;jO_&]:C.mͻ0Ghoaaڕ v5Ҳ7_Zz~4/66zȐ!8zں[ݺ[YvF(+J;ulնM6[h7l Ԭzj#&&HXMզ m,^ K󒒮9cc޽kAc`̻hfܦA6-4jVm'飡#g QP<61G:4QKr::м<&[25Attt M4177{ֽzٴoo4O])>U7aT5f7n\(OHHppp`P;6nݲf7xfլ'qm~ufm'QF۱ck+3\G,_2{&\>uՊiWY5sָg }7zmڸxB媍1H___(oڳ' 0pˎlYy2YjRYÇsneԼǏhx˗L4tk|cm]|+TڨoĆ1 vٰ!qQd;MMM$/x"AW<\~-ܺP=}Y66_ |WڵgC+eռѣG^n>Xsx:'䐴k3L;qff7>s샜 ^~09gyϟx櫗s{⟿?%4Gϟe=~ 1nk~Jռhj'k] ꟲ?#8v+3";be= 'KrP>W/?oP[ygc'={738tށv*o?z5֭\cݹ}/{XzZTx?|w? w><}\sث{9|vQY4UVa& ןkY\cݽ5}.>ɣPWXoњ_;7OQq _~a'`#:CnnF޽]`0ijРb(Nͻ\_?yٻw/>|xRY׏?Ƃ]s+fFdÙmY4i͹ IcNC`c.%_ޓmnc3Ӹu+tX4=\ɽ5֣'bx~_=z^Lk? 1?ѳ=ϾzF36./Ms>Gk؍'=Y^>w^H͛7^< ɾxFlzɇүL8NeMm0F~z"zoZe~M[&Xn"+3:i)!6\>a~S2VXi-.Sj+ KZTX/8 ՟6?h,_8޿{)Z2W~oف =oМ=;צWK\Myͻgth^KUuּk!1'7X:~؁=7fȷO:ou|<.ڿg!lݪI::$lr5/ԆUfipppN Py@z|'xR~t< EҸq<$ʢy(6"5Bhִq5.b6įG:t`Illlp¾^W54/aC=L랋NMM:rʡ._ܛxasb?s7:ԉ'Badi#Ex/7iЕgpk7HHTjR+&^x!b;sj㞀y*y+w>W<`mmm`` p H5"'S; ^S"4A>hQB)R8v]:?kDojnޕ}PA)]p ӧp?¨p"w>v58{/w GY/dm5CWYwʽWރqPf9@yJммN̛VB}Shh`V[3׼ )H5OA !Mn,MA QĤS KllР!(`FFffMtͣ@J;YQNE;%䶘S;cOEsgC/ \xr+"^9t%ZRΥħ]~%3#9F $$zԥc;x^6pX:s-kSǧ&]+3*7^>xhgǞynQLi"/aÆ(.]y.KV]g ԟ?Gc 5']lp쩝ɗC`y Q[5'6XԉPк"(5^̓yfhCE5x㾺ͧ#nrw؉}PA])]ż~Q=`'4d9n縍ǹC@Gas&PX0g₹(,7 8K$nX/g, _dJڵͳ_ҽ_ޫW/~FޓO~9/?xѫWO_s]7o߾ܴimX76A<ʜ>5yФys[:`K7,%RacEsp89i"YÆ|aCc=vXͻ|lTT /Vz74O$)) ێper$qOII21LddoyE?)  rE)PHDbvR Q !ê%רI(Qp̘+kp3¨6p'Ӽ]{YwcԷ nj~?|]*WԼ~9xl-1hi5y|;UZ\ѢMui!AdE͛5c׶]e>ӧ4OJfj)yyy7ZPKK899i0'O6lM% x4ԊA늠hk+p?n94i [[[6 K"00Ą]vtS $$"5%/(gbbP:Pm`n'!@y?R;I'@~l94/x[y<ˏ٬~M|fa 4O])Ep函5w=`Je/B{_ܼU0y]!͛4l"5/9+7l-%+'~?޿~i񂑲<% c֬uAV#޿>#_v.e(9H5o yTԼϞ+ h#[E MB?|x97/M0q@y2FJ⢧GG_||Ж!))HѣGc-jaS ɓ'N'6aw*NG:94}<:ǏInԨ?/9&&uaj>rWNHHpEbT2 ?__[.?o͇o~>m۷o>}pfQ'Rd 8x0 @rz}樠y߈ǻq?GE{A֫WOJмs'^Cs=yxn3oXnMc_,#WN_~ ؾ4l̶Av[Zxcl6jD~뤫{sFK>X2QɿҼ y}ԼGnf>{ݛO~Wm8k,KKZuVx,I۩O%''>쇕.Lxxx`kQ2Cj'4^HpppPN 5O %rzzzb)P QǠ6W>7IZW4JƏ)7=LRbj?רq&|`3͛@y}Ҷm;}"EB%Au h^bGxR+i{ӆ_K]Ν[+yS7'jWv?૖S;=|oqR%w8,իQ r)7snEE>|84''{ּSFܲ|aC/%_~ѳt;fY:U5w܉72cc}ٴQGUl~``peqqqB\$GGG[AM:Uwwwp?;[HHHy5&&Ri*1̨#@oPq/qW6#hB&MDH)Byh\ F- -,-7h%mIQYI{zLtpϞ=۾ͿBG4o{PݿE׹ޥL);gZ˪Ҽ۷o]p^I إ U //ohMMMNJ'==*:@ʕ޽{?28jԨ0a&;;ښR FM?)7"7jE];WIۦ?:PTUi^A`CU <"44>TsӣP>HwElN~RejjՁCM "R@VW u)yśЛǏ Co [ZYuniٍ1%i%8M;;; oFu]R7m5sEC6:o^m={gW7}ʈϟ&jނ :ϲyCޑMp[LTcRqDD<*@nn#*W8W ]]]q eU?III}NNN=Qdo!h\Nƍ/IӦ7j^ǽg`ظUV۴i׺ukժMS֛y])UJ8_MA,f>ARn_e 7'M^ }7o$֏' wgJii*gWFciZ[[B'"!!!p6԰{UȆHGLTl>ӼjD75Jh$==ܩ%<<>goo 0cbbžd yG@p2 ]! ɂ ?Հcիgllܭ[Kn<$%夤$TͨNUMVyE Ӽ׼;^zOK<: D4 FglܪMmڴ35жm;##;w6۵k4O])EpfF5kf_3sY"ްm#F_9lZ?wӆy6ߌqƅ~^!lټx%[n[ƇHX@~$̧ 5/6Լc7WsQzvה]Nرl=m,6WXliAcm˳"?kXA;W杏 ްnvil_dcW^'63͓g-)pv;;;j!III8ijjx%ƞ߿ FmC5>v!+;p38L?ƍ ' <͛04l 31iӶm;tvУ5wԖ4T8%3\dw3lJN\N8v)><¡ g;LtP"#?ȡ58]wmeߔ&oyݟWg?.e$7kêep_wnBϝiT{gtS'PM'7?ը|X!8gN]۰ wOݾy ~n^7z7aV~\@ׯ䯋qOMi^j[7S/]GeRUtl$j^ SA\yymڴm׮}v]aw]!zԘ4Z83\d}BVf9Wܻt/;>ٛ3R#Rvv]q6f˙O:x#ٿgVH;۹y֍le[; ~ٺW̅ V|cʜ; Q7bOeF];rpW.$^؝p.™gϜ9ybM'/=v+|Cw gǤ]'`s?m Y{űm9Wwo_uF'0R4y46/&jk,{f5)p؝ܺ-X[ UyrŝHK9ƺ5Vܭi'3G%IExx.B6lE쩍\c_y%yq5+4hހ)BcmڱYhmoV4Oun_FC׮N05hPIڃUI^FǛ^DA !׼M㙚uХyMu]Ee^K:xqMi^嶴ƾ6[ZXzSƊ[ E~,5(<Į*Լ',cl,Կpc/TԸLZ '_FE!߿? ;Y2Q)yyyyB+2E?RW$ƕLfw +Fy\oi^܋yмN:<)ϣ1LTs4FD`#mrssGNrࠥEGkԨQaaa:E@@lСBs2hկ@SSdxE#^CFZn۶]y]Z"ylM5iWŽ`z5+ zhС#^̃yÆ ^y9Q R7uUVc (n 0aBR4o䟷-^h6ӼZ )wZNdrXv!<*ܡ+\ɡQšаeV&mڴm߾ ٥y.y{m۶cyJI׿ a .o׼䛗f_C˻u)iN5/4oڔ7n׫Қaf|f͇Q{iIx4gjjZbZ[[){ f5/nrħ?xBŻoWIёyy/9 Mqwr4/Aʝ[&c~a~trܛ_Sμ211/ba1 ֡ǨEĒILOJZ:An&/IdIhڴY-[2w:vdfn=4O])I\\\]W]]{w'2fso'~욗zPzJȅ3~L?vc{XYqsx߾y)|͛}mzvjb5/ţ=-puv鏟Ml~koBUyR#"""hV xG)ּǏnxp4?, _4sLuWp$2i^굃pKƎ3hIGjo?Ro,Hv iм+ +yϟ?Xx{==̩տe yμ pӤU܃ 5𸻻_C9s&~}A$pW^R; g(~tuu `ܦ)9yKz?yJI鉳-L9`WWF^W0~1}޵k]wߥ/YByNqs:w2-ztf;Oy_3fwK@S+yGL0xF!@G kދ|N5C'[gw6ӭΤ,WVK{7QƏ1dH?hދϺvb_6j]o|Gw\U&&&iw3)ͺNףF™# FMs4%\1fV@_)ܧ-[41iMT`nn37 > y8+r>Hm>^z^lѴNTG'u~v%5tmBoH5PI02Gy:Y[y|kR94/׼c;fO?Thwy&sztmi8c_9O\j,ӼBiQm&̾}W7IJJqֹP)GUy4=t^a`zղsgsh^nݡyl5$ )Uvu%ybGCéf_ys t;lFϛ9e<7i3s/ƛh;lbe7֌!Xrsseꐐ!_(h2mu&e PkeiV}~sKu GT GO"x%VF3Mj[Zy|ej7Mbmz72"6۴i۲VCtnS쉛]ʨNpR GiX1{ٳBPػ7xܸiSɚG&Hh+ NGT@đ6is~66Jhh,uLTlpedeeA}555 ό@8eO>3OY4~|f)o Q>+Uo26niӰ^=-FulޡCG==h<5ͣ#?*}=x+8kۗ6m`W#=d|밡O],X4VFF Ӽ`]y2+KM Fyӿxb0DA ҂}}DAJzJ)3#rC(y+OZMM٠AC?Kc n<fffHȨ*p}Rͣ6ps⋞oA5q5> m佟fn5/w-$&j \#cm+kRʃR7q`Yy !:t° 0|PKˮLjPƏpIN4Ec )y5݉@82xXW򤧧khh :T8ҥKG|駦ݭO׮V| "/+p=kK׃h^LnfS=f4[4iФRnYozS7 @8(999yHKK+9jR9Osʷ?<ؑ]Ǐ8q"42`TdةGN:}DLLdlə3]CJ<,&[bYN_}ؑXX':-i<6Km=zpzeпܠ俖>ܶE C ={|i!ͫ*]C%u< #<%Iu>iFZh4k֬Ԕ4M7O])]p_(Dk[߾}Q"2a/]6,*y-]<\|=zQznTz_?AQlHAc=yzk|cG³t6fW_'U(@KKZM FE_6yP llӧ߀a_8,T:ߡG^+2P|N ԯ&Zj~ 55`lܪK A5t5z'nuHӧO7L*y 1/xLy򩤫IIגϥ\z=!-rFzRfFח,]ڔipMv1#7%ӬiSW??taF6컌D4:J@y~Y>}~8piiO>Y"O?FCW y 5x i xtE5oV7>FںH744lѢ?&F'4hh=z)C1H䚇P<мdwRxoq/QHybn 4O])988ྶFޓ* qQ`BNCCC___D)M:N߹F^^^0luPwocbЁL.>縊pb_V59;; vvvLJ 99Y[[wGtȇ6CO 3IDATWA@yŁYwȧXozхXb7H]*3eռp\IA!U4RTY E[Ygn<}'EىhɹR>H=yt#ͧy9{~-7r# BwHy9yOn>M/ z^zx"M!ǣ e+fz6NCMZMti5mZجY3ԳKsyjL94UGH/㉐"B*釥(Â|}H% 1YW&yy~~~Ci 5^գ[DDH\ccc\qNu|N`TkkkcNRRR KyzKtEQ>ס?8!+QG^'[Q: u4nR_ uPGWGQ aw:A0SWʧy>j4O5"```aޥF)phX/ԋ u">>#wwwb0ׯHZ|7}PADSS yV=<Rb($xPt0q&M6k䳆0=3M<.MD+ ͚5kժU6m;uҡCGH]׮Vݺuֵ%iސ!Cb<֡W`Nݚ:4/>>L'XPP;uuuqQkpa/0g޽{<ҐjB ~3 q1Afz֡Yt/A늦1NjcddD׮iǎtA{ݺܹK֭+<@ KT*e'((:KhzN~7y..n.2`AeC{W Aqz{{ gۺ'Ҍ:+!ܜ[5LSxZNAjzY(G[[y ftEv"Xy- [jeҾ}<,-y;w671ayjK5/44S///Zp!DzVVVd_WgPx߾}iEX &!bF U 4D]==)._f~ eԼ1C?M(WYdeJOٗp~+4oOƆU5¼K2`Ae?vbjj vꗠG Fo>XC 4@t8!$qdE"NϽ׺YG8^N]ܢ{w=z"iSA tԨQɵ`PP03a$UaG55i0IIG} 3`@|}0ȮߠA?x0d!vC" 6taC:|0{{Ñ8l`dƂ(2O8e*IFG@5o@ҥ_:7E/\A?"2 :j[4a#U%5[׬L$ GŰ/ٝ4 qJs" "fY@mVQ ֜*)nb8uyJPUnj~???uC[: GQ%CW <~S-K!]a6nЀ׮Y8ҲyM"all-Y M  LKK:.E2 .vQND"[ECᔂ@/EĂHH 'EXXO*H-I;/]r]˂w{ ^/d <9XG;='<dS'N:}xLLDlLTܙSqqgƞH\\'a#aؿ16zgʕ=Ǐ=CP+PF+ [THR#\#L 8 }RO+E ?vq<[?Y&E bȰ?aiNֿOo> EJ,Jx͚?{"(G:K TBQh3iӐ(FCqs6t5!4a-$77WWWD8 p"ŕT Fq<==233Jg}ߤILXdzWihz|a-Zڶmgjjk^.],#m ^[p+1nqs/S 8هYO41LR:@:ubY̥tS6+$bijIި \FB0W^%AQ ΜIx=itՐkӯL ~ffĭ'oܹ}&ι9>ᵼǩOd<z[^y>_^y? M(W/>x{%{wbS7vP Q~n!i)E?ݏY/~?5W]B_ł\͹s>';:3pt;3\Fi۶5jUI~2(2NRg#!#<)E0yEvD=vʴij"жb;L[)WS"44T8NNNZje .4R H1bDU]ԉ\kkkH`T7olܸ4I`zP5XC<7nҡY]-;uܥy'X^nݺwփ7<4qhjjV)0Qq N~3R,tdt3#3$iLiz(%%S!qqEj݋Nl<9'z䎘S1wr̾sg\8w(BxBф/%D^t+1Iq׮Kz=!-rzՌk72Sn̸ufvm۷R._v &$?'w\p~1c@@5i.|#*?+IgSqX[\oeeHMC'^<.,>?heKzyN۶է}U%5+/oYDvW&ifEEhHq2X2V'ۺQ}M‘PWDHp}t-FYo7De4OfzhpC._5mڴys^-O癙uw {ݺ֭;}QuܹsiӦe)M@v"cyeDy:::43`qG7E䚧JBZlilܪMpYy + )&":ݻzAhMkZ0աyNy:Ry^4~~G2zjy+GOx^I|4=ͫ_`x45L"Ҳ[],jyQQQ8-zxxI4ٰaChvPDzp`<#EJ*пOo}ȏݕ%>̀`X v*K/cBgu}&M4=&Pŏ)??_8 PfG8C4ȨCffͻvjem/=z9Abyzz,YCZe9θU+;) ;!HGQ:)TJ\5ґ"dVI-AqbQ  BGACʇ@쁔\njƿ&T#MLLw#00{ԔѢP5;ovvvA8P7($x %tQ{/r_zw^}?_o߽}'YrގyWcmw߾ųߠV ==][xu?Cf%lh&$<=I"T@dȉEU:ѝ+>IcOT%,Wⶮ6ʏ<(!=TOJRnlݣЄjn)z-*fÜ= -4<2ݾ}Eśi`% w< p/ZD x!磚sߟ6f+** xr0$q$EGN}7bd;i (YIiz IZIcAiLR4 Utlxakmw^t5qOkW$Lv8-hF7o޾y6ݹ|=M}0㬼'=/L7/޽{û}ϟ<~t;.deDKz J3Cwb|fll|>Jf%2!XI9䤧GEQ6∋X _BnKXSW&t@EQtHC(rk(4 0lNB3*y[nܸ B 䚇Zq-Q~'([pkgM (dƍΝpŠ+>:ꫯbbb8p_ _m^BP2"`I N(DѝyZD(6BH4 HkBJbH8=/J[.݁H„jh^-5y.9)d׼FC%@l+'OqOLPf!QDKG*0_KSUku5/,:{ؓb/t[ ,ɻS}:P)lmm Vg0Th)))W^={c<GZ=?fŗCAꥭݰ?lذ}/t17+ԴI;uԹs.]tUwnc7| 0pg矯]FuF 7oѦM[ٱcgCCF\[yǿqT5ͣ}07g޼y3xLLر8ΟyBŠ7M.ШQQQɻ  2пkxzz ` ԛ/PiR=zH' ;c̉'M2u̙H,1'y )VD$+;q"bƎ7qٳg ȀLԒj|h:杌t*tԖbN<=΄?¹x!<1ȥc#\zTҕkIg%~|ԄKWnd&gHA̸p-9哉G8{jgҥZy111ڷYD%P@q^Ta(pINu---sssa0W~D .xdL4+E+ǯ^A l H,WÇˏTԐ*j ,*ݻwMCizùEz53ׯ 7s۸}s6iüMoވoB?_O_/Ml^e~K-+KkƅX˦XT44k4r%XPcS> Fj{J"`Tkkkȼ *!j|Iv#{xI0J!-(J(q) (NDr?lYWQ#m2T^䚷y~ٯm!|ӿ7÷?@ ?~?>\n\7  B0xЏa$"8{bG2pڨyc/t *F #4 ǩõPPe6I =R㙬OjnH!Ù8EBRA"VA"qT:;D5vl ĦZ"h12%׼Y3Fhpr+' ^aYza؁ӧZ5/,,Wn2`A5ë͚+BRp܌366vCDe,0";ȝXP/ؤv\E(.\q_Tdz/D@Qm 5{'S6dzùTQǨo+{H_ܻ{$Yk/>"3 T0,8Fh65}$=>>^G u!77WWWLhiC%!5Ф"A,8 jLQRD#$IߓpRDA5DfhMbBKJɆ,So8*J +cX= gz(73.@ !<%_Fռ(XP]yO>=%^ ;;;a1\f0jN", =#"*"xVFVx Sjɿ🞞R7dS2͋<;{۬nǹMp8}s̛;u,pLυ<=BB<}svzg'&ĝ=l٢˗x{/]bʕ+V |ҥ -py.()8Ȼjd^J7gWԈ\MMM:k3D\\\pQgep$??_hf6C*%hYLNX=j yldjlJyn^/'']IL8ty" J wS5W(h ~&P*YYYv8;;C`0N ^^^¹vp.46mߎk^_5j~#͒ijj;QWz-(`0* y@KK DDDxxxW2(T>״`z=zYP,(9LiͲ vWg#I8]`0ApѴiS˨ bbbI{ƍ{?0Ga7A3tqcǏi¸Ə8Nr-.v4Ν .pss={欙.M8e(s؟ǎ8zolƆBͪjt[[[-Oë2`A ^=x"8cle$**JCCCSSM؃ 9`TQXjmmmNMΝ;O?~رcƌ?G:A#G}aC׷gf]i{/(?~q6aC>aߣ؟aG<`䈁#_bcc%Ԭ]qSi]./6kjJa+/qqqcm۫q7Odee1Qt߿4ƤfZ]֯eV9m߲`6πv, X+`{v 7xC^[~ΰCv scGC;pxXĉȣQQb x]ޫe>̽s훧ҏf8i?}4RWĈR5;8"%)0R@])W'HvF񛙑ns+nٜKȽ)*b`?W|##QQQR"?~`,/?x;l_f2nF:FMLr{11:]&$,TyQC'M &I2F^^СC댍٠,* $O,\UV}``T7>>>?~t#GĵI*Arrrp5jP/.lti׵+)WCSH L;v3#V1wnw\ν9P[ۼ/=}pޝNH?z+P¹-kV8 cw5ovQ{wyKۯ'}Rh)$Gr^|s!WyYϟ~+o<}ŇPy/T;'/ł_}7³&߲vc5i{:FF d'6N ^ޫqӝ/>"*3'B3;vvv8̲cQrOD0S&|󍳳3;C:t666#5.x7rq[m!R$I7"z27DJiQ72Nݼsf\wn_wa9>k؍'=Y^>w^H͛7^<gɾxFlzɇS:aYZqKam z#; 8v$]Ǐ>qu/]P ͻadi#[jZ#|"6Xw gQ_@"u9IwngeF_?|0ZHLz%& Yd?~읞r<59<ʡK._ ,vͱ'7DG ᡣSJ¶+R,b _p?lnAXp&S&_d5'HM{j]իUam+ 8:ƀCe"8*2ߟFUqէO{nRR3֎:,q)4!pQ5UČjh^L<~}j̱q1{wŞ 9}rm"<ylsıM'l<ҦY+ 90S]%YxI?_9oK8&\lpAƜ5YlKd~;y2}ua(4L1fhMӮB|V;Oq_ң_ޫW/~ui^6wCG^=;߾}iE۹SV~u-5y.?NVj Oh~/=l@/ϟEc|5kIced@._q5;ٵi^ 0'V>H,ߟ&AF'iT)-j*ʃmW s_\ 7Z[[SIL.ǐZ`(1 ?K _YqQw,W2Y3sgo7=[÷=BBf b_$7|Պ~{\INzP5n`oE՟[]֜BWzԔ李kD؏[*ldK [QE0%sƐKʽzm{5 8ysYXU`ofLi"N`ʫYZZaMB()tKbaϏ1 Vpjp/Ao-2T4f FQY*Q6niC}e\yñfK }LdwظEf޽{`'咒hooorœ1)ʁEH"c`(1 ?KP,--555kTAW^lG ~ݻ7ruy+g*S ٷ._ץeJNN_;~{y`7YCaͳߴq"ϙԼkWÇףesKL{@dBAj阐]]] @NeU*FGp"'S;Z ?FgXYhh(*eR22 ؾȨ<|>|x{E;w @ =9iJ~Fy7sRS ɸ#3'CXm 貁ۅg_<~|ꕳܛ8q0 ~ jJlСP2йsoޤ^Ozrޗm[:owqȷ0*Ҽ'~ĉvxՁ{UQFa+pVمUϧɳ]]]uuuKKKy:; BiRʄ N>MڷҥK҄ԂF<2ANN@դW 7___aQNc~Jؕ:T"hJjC۷oر k޴?mzExnjzz[Z5x^h^m[oݺ铛7on͋>pg_ܷoߵk*y.SGm[5 ivΈ cX 6lfO=Y.dhٓo۶}7ɗ999᪊Iã.8~̌}UZE iRjАR\EH-(J777!G$$$P M&%ԿbN4 ӌr',B+!77v5>!&F2n+ytAOC\8;u\,ܿjc^40iҼ?IGu+LlSn:ykR/WSG޾jiyrӛOdGqnb2s /Ӽ(NxaX=#q{V-8(+`}˦Y]7o3eZk&[5~ ޣW-cW.M!r|џ̬Bͻx!44x5!{ ocͳiԍ&oX; MX9n b\.,EG9SSwӧ?7:{p>{5N4ւ[n4oiE5Z溏BKN:k/k5_% 5/X}g5MX&fX6R)ͣ1W1 ~Vr!<R,1?鍘fPQюi@+**V@j£yCbA M.6ɇEN^>vJ\d[-,BVl* /Ӽnݼ~rr.ܹ}¯]>pbp…g99&U=p{f1)pغ~޴f5(~ڟҡj5}{7˾x;Ί}#5{`&$}l3FFmX8ءEG, s0x]{u\ؾmٺgߺ~>#Y{ j^j/cO1hX|Gm|c{TѼ\;;;l%s<| aԅ4)E+dWLRQAyde4KHTY"H#BT1IѠ4P%HG@nAJ;`o ?KPlͪ{UQ2Wyx}: kgoX;kY>kfa뺕.kWN_bV{O]=er+9\6iI+: L^2q Uy;j6d66r]mQk*خZo_T~ KY{),Fqu⦡ZSZtbk޹̖6Rj,j)icF4%%%bFd05Z(=qU+E<74RO .i^Fj_}vz@*ԼiBJl\=e׼\//a0鮮(~hs߇(WTݹuA(W)J=z_5/""BSSSCCGrP:jvj <H*^,d(pg*2KԔWU`4YM#m)E!XHg#mrx{{C𴴴&Cň_@jj0(h 0Qx${4CPSk{GRIJ?Fg*@zO=JgWݚ' AɟlGLIzz-,++K8c2Th u2L"P0`0]!pA!t չZ 8޾0F8=1EJ,<<\KK 7=(;hxUӼ޳~P͓ E mڸs+Ӽ 6Ņ!B *JzhJHLOg/Q(62^ܶ0 F8O1EF"6UyM4qtt@\F4SwAZtJ̕c_|T^̓&EGBOsǪ y)7cSRc%ݺR5Y'^&+AG\`tJ ڃI,N-#LI()"a%ٗ( A*,G\JIKA5)|6juN_ya8hl`0jR䘙궶¦0^%5^nf㻋uzGȅC>+yS'nywek~fYpʝGMW6hޣG7n?Ħ)oʽ-M2rUKWzp#.}틙Aovz욗zPzJȅ3~L8~쉟X^ɣyOn>{ ӿ}rӦ 6=.oXQ{='t}sf͝;~}߾yY%GdqaFQ@) kl9'/toG/ z~.zk,H]IFN^Þtrni\zy~brk~:e:y~~~8PgTȨ="h#."b􉄐 D=#NkdSͥrPehՀk(Ui,Qt(c.Q2-EHE抚 +V"QQc~JWgbbm`z;2;]4zWm\5ȩʣyм k8]:w]7xJw߿A-T~~mzu}{:L_zPQtuu0772#*yyM[ױ;ƚ{2jr/ 욗~-$$4o0a#~G i,QI{ASgʑ?zûy>>>s&xb^B @)ƈD^$B"b94IG*Bs*#4.2NHhՙ.EaK Dq3tuT"TCu@/Q(,Bկ Gfnr=V̓4hŽ;|AeVN #:qʥyL7f$+N0xC :nAÆ6lC--?EX/4_65;Uy^DD0]]GXPa{Fe',]mz]ʥy1GuAS4֐bkh~~%g]h4 jmF;u>i˷M;ޚ{Y;xLE]!$ [L GuvoQ:`.".Cp8!N""R,zeEhN<3Pu!3=#]] s!< b FQY*ZYxx6ֶ{eJc [Lc7'8.UlNÖ.v/'9 <cG?a':u3Wr̈́Z7`=xj zUn^ՂbAesn8S b{uҳ/k _^n8Ҧukjk3<,Hn4c!h\cEIkϞ@h^%3u=HgQa=k22M<꾣]лw@LǨ( "Ӓf )  ' !=M)EVwFe*꣸T$x=hJ=~6ŝʈ|By -" FT ո</7m^]J=mEWu txSGlڸdРJ’K>`nzo<ios;xT]5_.tv.{zwW\6ysKP5(ms{odXIp5%))ۻO>2 ܹ:+2Iq0`,@sB(NsiyW( **#< ]v4aHV$$ [HĥUB*As5QIP+KLLe7Laaa†2 SI{3f}dаտ<2T:5A;phx)U 8WJ3\ n`cŝY͛:o͒ŞkQCc\?BHH1׿}( YEDG>(jA*(.%ZԲJ"̬=-)BJĮ9B*M:T_C#"nNUF;5 B81?FgѣG?[[[({kX?.is¸!3gLKY#mVUV*ytaMݼqCx6 _>nggG011qrr Na u6w&pUUkޛ[{o *VFIIefW,KLq(QSpS,RLiR4ʉg+32sj({7VaA(P.VBQN;+,,E'bccy'cb M#;5ir):tСÇ>,qHȑ S+Mj^4lo|}2֑Çq>Fxg&!ĝpW+!DɨNKԝ NJLL#""x}Ca͛Ɛ}:'&7:>>fR|+ƾɓ'L7uJԩ&?lJiW1o9]ǩyűN-y/ }a^JWR* @#.j%8i,ygCCC=|cANPj^qP-mM;dPƎ}qA+ 4 22R}B[VBQNZ`xE4O͋qLG IIIQǦtP&yԼh+c'O?eSNOHx%1qNygy.ѼϠy1q7N8L5v +&ehvj^16l؀8rBjj%hp2)XTNq6w^)V~maʺXƸJ0fDTmV↲ ֪勔l;(i5QZ˅tAǘ&QNr!زeA`i˃apW̚9L˃歷iHa\lwj4@}BگVgbg &x ђ)S,ݳoŋUJ4XT+ 6;=Z %>kh0ԑ|ȕjРAV_ VWUA0bt ӤcV\c@-ylްûuV9NmyNӼ=мݴ9|XqFI&iDV7! ju&Q"1jޚ5k[4`U橺.y SK}dQo(0V+Q,~8(|Rh XT%nb(R61uZ:i^vv6 11Qǀ^[Jֳ7 ANN>}5U7rxAlѢ:0 Ũy VD.ՙȿ#"|Ar"c;)`"&{m{hi}:umHH| H.Pgi4i|Ɣ|P l:P -bJ>@XwNIS1,@ `i,yN Bq\d( }IJ##@*шS&4̾ &I њbi%j^؋_4RRA@T mz5Z7umx)R<Ȩ)XQ󂂂6mc7i';<Ӧ]'=d?$jtO?Þy3<ᩎwxَXOYS:vYpR04>lă_z:ۿlq4O^!]pW3$D6U&K$P2G*H>*QvgB%v$׻^+3+YUX,-\}9Έ\3^ eFU%rtJx'`diyaaa~~~j<<xݴY9MμiǛ65NR_C.Q!ifBԾb)$!hLZSb&@&~%i1٣ئє͵J/Q_7/NtV_2m e66̘;6NK`E͋(/u=qW3"~D8e/Fy+ e֛^ʶ)XQRRR0 e.SyFxH8rh2&a35X|V%a~6O#^jPv>02fDVm:_ULt%h`|iy׷M6jH<x}gwKjޞ/UµoU;Ba*A(D *IK%,JF.ͩA1H@xQ X9N6Zcm⹬U HHswR88@VIIӂGqo\JCD)W!RSeeea ըx/5GSt"xc|0⮸䫕x:^|()t >>'=py;0ٵSl {8 颢- S곿>{gsA Νsg v[х 瑉8 cCT Q9fӷd|cȰ#":UA͋3{n?챔?[EuyN\hKC;Tռ޽D~鬭tRj,zz`֯%uV΢<,{gO}0?wƏLw#Uj^{)N.߿w3/~ÇxK9sڄg}6뵄/V:MjTAͻ?ɥl:X_>lۯ>l8j/od}i}xyshJg!HC!nZ !%QN'o6wl[3;mWK7}p}bF?6fب|8aK&DO0wA˽{=ӭ}|7g/l/?|gk/]g w's> Y%/7/ ފVUNY"k{m=٣>=vdc1[gOwv`Ӷb-:t'gϝ3~!ۓ;Qa0:/v}sgذ*5oe]` mk_]~/:io>[m*<=~9oE{ӈ~8DZ,6mZNLBqGJ)t ّ_0u3r]wUէyc8~il/mݶUN{gwV^{l+,Çv9wHc ~5:{oOrkd=`vlg9;ٵ_lL[:i\ldn?axie5VZ(Pw_gK?6ˇ 8Xh}I|q4dkwXƴŋ'O;f`מxqijSntt!w_ɨNK`i6SЫgYqs&%y%eΔS$̟8k 濾pX0)u̴ԷZ6gK.]:o˗/Xb+W.N_z{;VNի}w5"}|;}p9io.174jNt1V, (W_bW]?J?7[Kkۿlق^t[Wwtyge:ڿ%ish7M{m|K,+Ѫ-ZVQjCɳ`͕5w=՝]tԕ+V\ro/_jU`]좥K.Nm?XӓgOʈqc_+KRSSU+Kb7eN|*fI헃>l%;r[,|<^6'.[6G5lhkQt rfVV!w_t[Rp2)XZܷoM6mX:}QGbbjeQ[ڵk_d$Jиq@]C!n ]շ!ĥdTS}1SyŨW^ٗlPmc%Rprrh) e^iӦJ#n:씿Nq{]!.':-5N:7V`j y8w5޾(lذvZ }X؟O3| J W28Gq)8ijbB֦M5BT͕J G<;;[}B;Gq)8ij恎;rN*I~~~BU9TmٲJЪU+@ZGj&|*_1%Hq)8ijbR *B* - fFDD8cIJȥtB!5@+ =GU+تGyyKyτGEEaaa:uի0B\SNGQӧ^y)iPԼː1UF1>ǷY 6x{{ByXje(** Č VɓƏux"@񥑛G!cby';;3ƍMbeV\٦M|V6mʧ\Hjj*X7!x??>}ʕ+q!}U_W۶miY@ff/9>>^1!$Zj3O_8b4))IB.s|P}oB\cbbi\@aaapp0&۷ǤP4!+Wӧ|^zV>e9KBjEEE͚5!`r뭷: y_p6n=F"ZOT8rܫI#˒ut'Yx III>>>H!'8̖-[,r 佚8iiizkrr2l"ęY{V YYYjՊo$\$]߆ ՉM! ;tO___|Bo˖-]&p]NNP!xȩ!eÇǣk`xxxRR}Qp*իWϯSN:NN~K!ryx&+++$$d/55U^BFۛO$'e_ӦMccc+]P ;u| !ρG*?46233azZs_BN:{v222 zęS|Oru~asަ3!9PHOӃp"֠ _7'%%H@XX/ײe|[jSOɍ|B'C#3Z@W\D~L+C̕Sjc ֭[wഒu_~~ܨٱcGިI!?*ECS2YYY80=̉d BCCT$,, ]l]8p1ˢT vb| 9jf͚ 7oqiB!5!`GQQQS{_+W>Ew]xxxZZZ&qqqg!GP}N߾}{ H:u&O$b2!B5|`IShӦ 2k.5iY&>>O>VvR 1歛$66xEaaaǎ痚y$r&"**$^FF|!BP*fiii.S-D8[`~/:u e Ď'ͦMY6f8/fffUVZyx''']7Kx `MB!#W5@q@9*aX5&ɓ'7n +CoV  CԩSWFFFDöBK"a^`k&7#j9ܺɷT7ÇǷ3DN{):u =h!>0j{'WO\:449YB!fA#%;;WT֭WsHH灷UzvwcےSRRJOOP<,!G*?4MZZ8ZjfJ` SKMMh@tMu:!cw|BHM!SNX <\lcz^nFDD 44J8r/BqQ"a!#99YMIU P\ bbb6WɯwThXll?ى3.B?G#R*ECCl/\{ݺujN*v%<<\ q o'@ӦM铚+р䈈mwQQQ\ G!V"a!6 |}}իgkD5qP5@zz:Dhܸɓ+W4PP.>))),,ۛOB,1X%BPȟddd*CRRhsDaaaZZ\%$$QLC4jsiӦb5ZJ#Pe+zJ!R"}HLBCCu*!"+++"*eB LyT"Os*!9ҢE OXXx2u$222:B!B`Qjqj)`|oɓoO?[N~T TB7B,Nw =Pdffʣz:u:rO"??_~+cǎ4RO=B!SV85SN.BL0IKKS˄85T ;}}}ԢE5k(OɠG~~~Qhh("V$#O#88jN:-mӦͺu047n^EĂS||S|aB T*P'O*6:u1N1|B!hy3::Z 3x]v)֭ԩ\ LIIM.<<<--&=} :t-[ ;ǏW)Bc]^_?h !'111}G (Spp mڴIJJWvj`[cǎ_ sxQDD8aY7xC҄ ='l<(W|w0gꥣ j|GĉԲ}^Jª(errrN 00-;i3W^R\ x8w„ 85jٓU:tۙ'(`GZ*""&R ;EfxqJXUcԸ ]vZFˑ_H]y-, (,B46b+DPXoA"4B>j27UGHe!-F1Tbh6h/岡ZʠlBq6i!Q6Ha:4ډgҥS `T2r6eN3k@#.K<|aaa*&7ro^ $Q3^1>Ň9ʵ0XziPA(AedԦ7X%[06L4csAL#E#i²;2D)i)6S%M<^f$L4P FRNj 81222`eOym " ***..0Gb%4;"0P9ETda$ P9HT`Aۢ$DhO l"2e+JӎΔFvd/R a+) Lr+o60x<:¿~=K=A#VsPLDJZRRR FBҐhAQĩɑL 56и`}5ϴ92e+Èʈ& Ǹ96D"5c-DLF9H^ACfim@qtI! J':ͬ5B%ϵǡ+pk* ^G!2@2V@ c 4O2K-DVv]0.3l6R& P{T q ==L<nܔ`TIXBJES vt{0cTr=h81|TRNâv'l"CJJcm/4&͓E} &DUFCke4\CNaڀCtQ40M Fk P!8Jvvv``dddzzsAH`A6Q_.D,_-TXȗ2GVY%WI;B]T b+$1h6jVi-+~RU5;d|Jlq_X 47e0U8if yBGAAARRRXXwhh~; r~5¢P%"\*Rv;BYj")_lj0ʈ.%Y" Z&5Bq222rMf% )jtz}zժU/@N%: L%Ҽ*)'4IڦW%Ӆs̏%! 77xMi!f)㻃9bcc|-m5V.m #N3k@s6@˫ õ&jSuJT[Jdp$iZfd_U.W! \ubq o2;f$Lgxu}*;ӤIe~iwuYjS5oZ;tP̤W UbP5~jjEj^z5! ))),,;44411om!47eklpUpSd='|r1cFڵ^^{ uYjSYN q"\[3^(w0 )cG,X iΎE2awEEO{q[KwxbsٝiTw;n VAt#QHdUq6UlH(/ Iic]$VĴڀjԨ ƆZ222yT|Û xW u]pA}}|bMmPP\Y-_d„ iL0^l+"Hƭq_2ws}J8Z!0 h*YD iIDD2Zh} e) = tZJ!ӽÐZ[IKKzP21]千YIHG */Y.bAK||c2)Μ7z ވ# 09&Su hvS{<*04<"A3rd1 0/3 `v$4T'HH1;Lz'bX@RXv$m;UFQ9;(Bi |Jt~j =P@Jz;u.!#T%ØHT V8>4 z/^#5yQgTT#q o2&RLҥW_}ewjg~ur6e!X5n26YeLb2}Ǵi#Sp u,b\@&9SdrK؅xԌYJ"a̮@e6H(d ^`ic@eLkIK=@_E#RPJҺGR%Sh)*ATe8>< bZD HW_V*!ر"MsSOk~?^sG}Tt&MػwsLt@if G>2_7bgcN Ƶ0!ux=| &>!Ȣn* {)ZAVi z_FQ1ȑTkL2-&ZccꫵMe)m1\vLM*cѶM1ZV~Rs>B47M5:mյq/??3gpyyy=#a:ͬZ/F1MsnP\%A."=w"V:`Haq -f)YyFdMC2/˜FĘZclɯQsb/{HYQ.ey֯_&md㇫Lr@if yo92!g!O񥥥?8eG-R47eԬ[ti5jԣGojͫW}᩶zWR5KXj!X(wppp\\#n&)fּ>lÆ /bǎ{okǎ5jjMv6{KXwBq-QQQ|ty5.;շ\THMq>^Tjjr)˳{'|\8:ͬ5Bq0*`&j2 qc}}m~Gy9[Aw5iӦҵ{׬@/if yBH S XDMsS5c򅁷 ȫuK,_YFmҼ//LJJ ѣG^^r)3k,???Znk{h:ͬ5BwhhhbbbnnZGL47eX-o }xvmCA_z-ZdҼM6m޼ycǎ=q5aÆ+Ich:ͬ5B2S|!!!*+inʰN|w0gb;\sMLLᒘ8q"֢ Jv>0iޖ-[>](==]l}oަM 4Z|i|,h:ͬ5Bz0eQ? /$3Qܔa_⽋vRVWX2Rv&ۺum֬Yn9RgՍ7nPΦL@Y6`uYj!RFFFˋ:q!  j|kBBB2338Jl+))ɤy_}Ռ3n!$$$ӠY0TuYj!${ o_I>{)Åq,o[l*հa (+ J%<{gҼ۷ر㥗^'OD:thРAү:~g$L4T5B!Φ(==NHinpI9o!ZjEFFfee4oΝȔ_{!l޼GE@P֯^jIZN3k@#B\Oǎ1ES|Z47e8?RЏu6v&۵kWNNλۢE֭[oݺUYbE@yߞeR+ڦN3k@#B,\3>ŗRE`&j2[?}/{u}}JѪy.4oݹfͺ[џjW___ۻְgN3 k S5B!CeBDMsSsX޶^]leԖt:INN[]vׯ7iޞ={F!읮!@Jw.W;ut80$uYj!R:IeL47eTw9ob__ܐ!C U' =b>>>QQQ[n5i޽{)j*Rgƍ-Z@@p&?\esN3k@#Bj%%%y{{&&&_B#`&j25.__Gpn߾й`د4ছn6mIlٲuXs !--~ҵFfP!GFFFTTT`` dOe榌jM>bwo}͚5'\ޤIiLfM̝;A/ǕKYov̘1cyLɁ6P!7!&&H`&j2<;3OOQÛ4i-W3sLGyf {X]h:ͬ5Bq222E|f)j#!.V?7`xVA1¤yr}mvϞ=ʮ,ܹsϟGWZrJj^P!󡬬,NJKK_L<DMsSFımW\qQF/>YHOO-[MwcǎA/҉'Z x.\~W5B0K|!!!|37MUp W^٬Y3ɓ}|]{ޱcI?9r$ 7Pt~}{キvZj^P!tS|+~ĽUSթSg̙v3۶mw^Ѱٳϟ?wOyC#Bȟc|-L47eT,&\cS";mB_y7n{]~W_}UTM`ɒ%ҩl<))) 6=z?rM/?sj^yB!/ TO#5SܔQX|A`IS~+ꪫ֭['3Zf͓O>yw"]2dHAAd2nݺ-U! .۶m۲e 5bP!R2Ƨxe=L47e8ϝ;Hc=&_}\\ּ/p&MnzӦMQ'77k׮ҩڵk'%%)m۶[ei9s&''gԼ@#BH9(,, % f)Ñ`ΰ޵0ޣF?|)K,iԨ {޼yZ`w7o6m<} fբE Lfu$.wȑ#*ޯٽ{Ν;yG!򑑑ȧj榌ƌul"|IAv$Z֭[z 7+8kj -_tg>ZӧO[ѣO>DˣU N:"8q"::ZB" %S6LnJMM]QC8@ͫByB|*EEEjXDMsSFq,o['~~~IIIcg?,5M5/;;@-[nܸ}ҩuΚ5K]ļt?Opp0f_f @<}Ǐ=JͫryBb_hhhbbbnnZA\ f)gN=l~ o),\^z +VКٱcGrrr```Ϟ=ݫ\@^\j~رcǔف"y`PWP!R]### "05MXpv6}_(#F'ڵ맟~5>k׮ѣGx/_c 'cխ[7`I7tSLL/"믿@]5ʡB!ڑqKL47eHl)wܱ|reiUDvvv.]kvȑFA}B͛D>|xȐ!5\YUnw)0~ gWԼG!gs)N>405M#|||ƎkAjȾ̙5naÆYf0jhOc<4tR^۶myqwJZl AJ5 B!GFFFGG,R=`&jzl] 0@? (t":tXV>}dggkۻwo~~>lA/cǔKY^{W:3d. 5B!ĊaHL47ٔٺC ШQT^N&Ӯ];i \eԩF̓@x^z%믿s Rگ_?ݩEa,ԼG!+/;uZM*fGwsQ?\3rCp5k֬{UAAAoּ8p`ӦM:u (<0֭[G͚5C Լ*G!c|/++K4M='f$tJ+g}b5֭ ;}WgMQQC_yVǠU ˗/.D履~׬Yc<ițP* 5B!CAAAffZB^3in!׵k)Sԯ_С0~xv 5رcǏߴic=ֲeO?T)s9{>@kA4̭\P*5B!n v"##ƬTT05M2'Hx>6l؀Odo̙J,Ͼ}ft@&NhԼ¯&Mtppαc0ri;Hc+5P!B) ^`&j{Amݶdɒå+h޼_~BBQ6o 9LMM{y晼*4CfրG!ƧԩZQsL47Arl=!!!P u}YȨy۷Zy[޵kr)˓SOI֭;k,uӪL @;uԱc<*@%+Qzu5נNoZâ~w0iq~14{~w}u4,Y+22"@ӧ٨i7qD{>{5u=4b#RDVXqwE5///o޽#GlР믿~v[oU駟4TҩCrkfnPΦLӧRFP!BDFFFTTT``dd$Nćinj`N&#Fl00Wڵ|ͯ- ; @Yfp5o߾v^ʥy?歰3mڴMbqW>54uYj!BHM_VVZ 2B105M]&̓-^o֬r F%y{{Úvޭ5Oՙz6 ie`ՌgO>lͳ?pi[p\ 1ҒYǯOEtJfրG!>@9BCCK|Q'f}ٳgJ 4g}F9rdÆ .\x:;w\^^FZGCz[9q\W^W_}5z{9DMtMfրG!n`N/OGFFb-05M]i/'M] ߿{#pXgdaaa-[ܼyҲ*pveت8yч֭ڿ=ap@Ö߿_wd>~!o\xr)˳yVZIի[oR˖-߯:uJiJ4O~i y:ug~Wv4MA+6S :ͬ5B!SL47uU4WZ`|if yB!fBkxw4m>r#+I~K;vΜ9u?~˥ϟ/** b_yqqqyv9N4s;H IJJ Z2D10J4Ƭ|2m0o޼B`Wݻwk׮E{L'a"۬]]*Gnj] =so=rȆ .\lu"wܹ<}yʏm}3fЏ4\|x1.:uYj!Binꪸ-;?wxutB[;;+5}:~6qjzb_=dž:ug.]֭[ֲe͛7+-R~ .?~D*QƏ[ˏ'p [̈́}}|&ƘP!B<DMsSWg/<֨ɽASމxkg'G͌!-Q?[b>)[z_YoX3rȴnm'O;}KvY̓|V5u"! ݃U&04+Z?0DsAAA7nUO8w h޸q䵙HQQ"-oˆW]߲y/_~w=zw>}`߾}ּcb|y(CڴiʩyUR9,5B!SL47uUKB#|{"ƨ 7qج=+7E>./..nԩ 48qҸ9ܥYXXxk:vD͛5kVfJ`:(TfրG!)`&j*׼bVv5]/ӿ11?;cѢE} Jo٩3&ҟU-_駟+Q#¨d``if yB!fA͛Yv$=ޕ[U>x}氰0RHHWng~ᇣ*y(FJ";:tw{:ow,op0*[uYj!BinꪸQ\;qY24a/M\B@ O_ew{Sj^#!<N9 y=윮FO`w ȕM5o( <̙3 WPׯ^jdF]fրG!)`&jZ';nimw̋.M7?π7Z_{՝#%m6kWƼ;}w}3Z`w]iިQ{|gFD2|uYj!Binjaݛ={CZ,1pqސBJyr#l}x1Ͽ.?zzǒCׯ}ai,{kl5wwزiH :ͬ5B!SL47f$ooox]{5Cg-b!eѼ=^G;ֆv{sgcm}Wt}iy׷o_(#T@^~eH)[n/֭Br6eQP5B!x VrW9X)d7']]ǯ~7עU]gxx,r~?ѡ7E=&ӧOcTRD̙b빗W`@õP5B!x ߬]gl<t(w\t4zеʹO;' m7k~pnsw;DݻN)9s;H#IJe:w,?sĘhӈ1\8"4~y=l G4@%[:ּ^zʅwW ֭CzŊ)))ڵCԩ}ʅMcrـMn;"u'^L TN3k@#B05MkbEe5ﲁJv(Cz칵8yUW]Eۻa`p$I,no&jBBBuHͫ|BuYj!BinZscYGnjxwy*י ̓G{qMB'SD r42{jf 294m.%j^ݿk^64i&}!SO;! 9*Rr'r]6N@*UX${ Qlh AI] -"dQM`o&FӮ2(JfրG!)`&jM|16pwԴx`+}e5/""V1y邕HBE~)cC/BP@;JȔ4.!h!t6gZ#^gTGL5(hxK.AfրG!)`&jYWݫ~uH׭[7i P-['+QyNUUgDoϑkhFdua$$ic3k0ݔcFzC9 kMiܔG!Bj gNkxky`x"piy6]3hެu JvJ`Ҽ_|;]:u0u _EmM(%71JZ(Zdu▅i !*K[uQSeZF-,V. @r0:ͬ5B!S_L$H߽Q|RC*@uZtoW/\7@ԼAÇ=V˻A̙i%Кꫯڼ5o[)1~mJr7#EGDvdQ\ݒ4EBr@j6UCT-A3ҥCdz1IcRX8i}_KU1Hv]ȢMif yB!B&2#ד`f̶YpU4ZbYW_m:{LyfK 0]1lFՑJ+͊+*PoFBUr,@ঢ:@  98^Z`V~4NaբrO%3׸V9z/4Ye 4V{qi@.9XQe[P!B<DMsSG>sC1_Eh͓6]q^7כֿ}}XdPS$n oַzkVdVK.A;cjp!2#&^x`Ѿ$xXEP *0ЭsG$cHX #a,iv<im07q)[I˗i8!?ixHL))qکN3k@#BLlCwrC k2F͓UD̓f)QRV/VJa7^);7~XM_G<<r?k|b>7@g̙wyxk#$k/a:hT5B!x 榎 BεX-smdr'c6ɗ+[-S˨yr͖YiE*]˥ET2R7mp~;>k֬R@եxmJ%>cG9ϴ#:ͬ5B!SL47u< Wp*ɩZmrdǣ{T n%-m ~JdwZxcvVZ>/V*^.*gwหP!B<DMsS);ީS+u떙߾tg N3k@#B05M%Ywy]l/ތp_7l֬_o٦z P!B<DMsSF%뮻JNNϐ^b۵kwWb{t>O:ͬ5B!SL47e\6 ׿C +=zSױ};>@àN3k@#B05Mwsڄ>j׺? 2dxl#N3k@#B05M+?ݳ)s%ҔtVȋ:esZu% E1:gi-ÃG!B\CjXaLaʗMi@{JbX<@Å:ͬ5B!SL47p4jps%j+yCռ{l`X-T5B!x o&HWXi+4˽DB?'5_T >4}7총`_hɖ-[$`sITͥ*I:t(:+%,XES r"""o5UVTh޼y^^Z3aICTd/s}  XvZl0^`@Ȓ,ʆHJ@IY (]# .Sʫy(iHcw7f Oֽ@SQfСh:ör8 @ldԠD/yRrZ^ 7X Sc#&';Ba(cܵGqgB!gyH<tLD E]}% $"UFPX+ J&4&#,.$ShҠt"oLJki픴 HB\ cc4eDF%(6zLPHfqv0vM%v8HPXec4"j**tcP ؋^[L112#*]ش;w-jZTVضexiC5-V c61cJ;jFU(G|l!Bq|=Y7rudS!xJ.P6(U@#-_[ b| TG*-\H\36 }dw!]$*J3cwl[|sl2*tJll.;C&`-.:'hlFL4*c;X0=EB!B"2}|=Y7M5}E@6$_\6(zۃ\m+cKlP;1=ٗ_ǢQMu"_HKkQ@QugHVXJP5)cΆ%RFM鋧qgB!gI\k(U"0CrPnj_nF k,,#KU[Iol`ڠ{al9ڃV"*BHw &UH`ne;^ZNv!`%09Sc?ޭ5$i9@B!8ӔX hVҜ/~a"B!wˁkn{>+ 4^ss[7σG!B\ ,wPڽGU5jԨU {`. See https://bugs.launchpad.net/nova/+bug/2004555 for details. .. note:: The examples of common configurations for shared service and libraries, such as database connections and RPC messaging, can be seen in Cinder's sample configuration file: `cinder.conf.sample <../_static/cinder.conf.sample>`_. The Block Storage service works with many different storage drivers that you can configure by using these instructions. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.463122 cinder-27.0.0/doc/source/configuration/tables/0000775000175000017500000000000000000000000021255 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-ibm_gpfs.inc0000664000175000017500000000345500000000000025007 0ustar00zuulzuul00000000000000.. _cinder-ibm_gpfs: .. list-table:: Description of Spectrum Scale volume driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``gpfs_images_dir`` = ``None`` - (String) Specifies the path of the Image service repository in GPFS. Leave undefined if not storing images in GPFS. * - ``gpfs_images_share_mode`` = ``None`` - (String) Specifies the type of image copy to be used. Set this when the Image service repository also uses GPFS so that image files can be transferred efficiently from the Image service to the Block Storage service. There are two valid values: "copy" specifies that a full copy of the image is made; "copy_on_write" specifies that copy-on-write optimization strategy is used and unmodified blocks of the image file are shared efficiently. * - ``gpfs_max_clone_depth`` = ``0`` - (Integer) Specifies an upper limit on the number of indirections required to reach a specific block due to snapshots or clones. A lengthy chain of copy-on-write snapshots or clones can have a negative impact on performance, but improves space utilization. 0 indicates unlimited clone depth. * - ``gpfs_mount_point_base`` = ``None`` - (String) Specifies the path of the GPFS directory where Block Storage volume and snapshot files are stored. * - ``gpfs_sparse_volumes`` = ``True`` - (Boolean) Specifies that volumes are created as sparse files which initially consume no space. If set to False, the volume is created as a fully allocated file, in which case, creation may take a significantly longer time. * - ``gpfs_storage_pool`` = ``system`` - (String) Specifies the storage pool that volumes are assigned to. By default, the system storage pool is used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-ibm_gpfs_nfs.inc0000664000175000017500000000474200000000000025655 0ustar00zuulzuul00000000000000.. _cinder-ibm_gpfs_nfs: .. list-table:: Description of Spectrum Scale NFS volume driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``gpfs_images_dir`` = ``None`` - (String) Specifies the path of the Image service repository in GPFS. Leave undefined if not storing images in GPFS. * - ``gpfs_images_share_mode`` = ``None`` - (String) Specifies the type of image copy to be used. Set this when the Image service repository also uses GPFS so that image files can be transferred efficiently from the Image service to the Block Storage service. There are two valid values: "copy" specifies that a full copy of the image is made; "copy_on_write" specifies that copy-on-write optimization strategy is used and unmodified blocks of the image file are shared efficiently. * - ``gpfs_max_clone_depth`` = ``0`` - (Integer) Specifies an upper limit on the number of indirections required to reach a specific block due to snapshots or clones. A lengthy chain of copy-on-write snapshots or clones can have a negative impact on performance, but improves space utilization. 0 indicates unlimited clone depth. * - ``gpfs_mount_point_base`` = ``None`` - (String) Specifies the path of the GPFS directory where Block Storage volume and snapshot files are stored. * - ``gpfs_sparse_volumes`` = ``True`` - (Boolean) Specifies that volumes are created as sparse files which initially consume no space. If set to False, the volume is created as a fully allocated file, in which case, creation may take a significantly longer time. * - ``gpfs_storage_pool`` = ``system`` - (String) Specifies the storage pool that volumes are assigned to. By default, the system storage pool is used. * - ``nas_host`` = - (String) IP address or Hostname of NAS system. * - ``nas_login`` = ``admin`` - (String) User name to connect to NAS system. * - ``nas_password`` = - (String) Password to connect to NAS system. * - ``nas_private_key`` = - (String) Filename of private key to use for SSH authentication. * - ``nas_ssh_port`` = ``22`` - (Port number) SSH port to use to connect to NAS system. * - ``nfs_mount_point_base`` = ``$state_path/mnt`` - (String) Base dir containing mount points for NFS shares. * - ``nfs_shares_config`` = ``/etc/cinder/nfs_shares`` - (String) File with the list of available NFS shares. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-ibm_gpfs_remote.inc0000664000175000017500000000514600000000000026361 0ustar00zuulzuul00000000000000.. _cinder-ibm_gpfs_remote: .. list-table:: Description of Spectrum Scale Remote volume driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``gpfs_hosts`` = - (List) Comma-separated list of IP address or hostnames of GPFS nodes. * - ``gpfs_hosts_key_file`` = ``$state_path/ssh_known_hosts`` - (String) File containing SSH host keys for the gpfs nodes with which driver needs to communicate. Default=$state_path/ssh_known_hosts * - ``gpfs_images_dir`` = ``None`` - (String) Specifies the path of the Image service repository in GPFS. Leave undefined if not storing images in GPFS. * - ``gpfs_images_share_mode`` = ``None`` - (String) Specifies the type of image copy to be used. Set this when the Image service repository also uses GPFS so that image files can be transferred efficiently from the Image service to the Block Storage service. There are two valid values: "copy" specifies that a full copy of the image is made; "copy_on_write" specifies that copy-on-write optimization strategy is used and unmodified blocks of the image file are shared efficiently. * - ``gpfs_max_clone_depth`` = ``0`` - (Integer) Specifies an upper limit on the number of indirections required to reach a specific block due to snapshots or clones. A lengthy chain of copy-on-write snapshots or clones can have a negative impact on performance, but improves space utilization. 0 indicates unlimited clone depth. * - ``gpfs_mount_point_base`` = ``None`` - (String) Specifies the path of the GPFS directory where Block Storage volume and snapshot files are stored. * - ``gpfs_private_key`` = - (String) Filename of private key to use for SSH authentication. * - ``gpfs_sparse_volumes`` = ``True`` - (Boolean) Specifies that volumes are created as sparse files which initially consume no space. If set to False, the volume is created as a fully allocated file, in which case, creation may take a significantly longer time. * - ``gpfs_ssh_port`` = ``22`` - (Port number) SSH port to use. * - ``gpfs_storage_pool`` = ``system`` - (String) Specifies the storage pool that volumes are assigned to. By default, the system storage pool is used. * - ``gpfs_strict_host_key_policy`` = ``False`` - (Boolean) Option to enable strict gpfs host key checking while connecting to gpfs nodes. Default=False * - ``gpfs_user_login`` = ``root`` - (String) Username for GPFS nodes. * - ``gpfs_user_password`` = - (String) Password for GPFS node user. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-ibm_storage.inc0000664000175000017500000000262000000000000025505 0ustar00zuulzuul00000000000000.. _cinder-ibm_storage: .. list-table:: Description of IBM Storage driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``ds8k_devadd_unitadd_mapping`` = - (String) Mapping between IODevice address and unit address. * - ``ds8k_host_type`` = ``auto`` - (String) Set to zLinux if your OpenStack version is prior to Liberty and you're connecting to zLinux systems. Otherwise set to auto. Valid values for this parameter are: 'auto', 'AMDLinuxRHEL', 'AMDLinuxSuse', 'AppleOSX', 'Fujitsu', 'Hp', 'HpTru64', 'HpVms', 'LinuxDT', 'LinuxRF', 'LinuxRHEL', 'LinuxSuse', 'Novell', 'SGI', 'SVC', 'SanFsAIX', 'SanFsLinux', 'Sun', 'VMWare', 'Win2000', 'Win2003', 'Win2008', 'Win2012', 'iLinux', 'nSeries', 'pLinux', 'pSeries', 'pSeriesPowerswap', 'zLinux', 'iSeries'. * - ``ds8k_ssid_prefix`` = ``FF`` - (String) Set the first two digits of SSID * - ``proxy`` = ``cinder.volume.drivers.ibm.ibm_storage.proxy.IBMStorageProxy`` - (String) Proxy driver that connects to the IBM Storage Array * - ``san_clustername`` = - (String) Cluster name to use for creating volumes * - ``san_ip`` = - (String) IP address of SAN controller * - ``san_login`` = ``admin`` - (String) Username for SAN controller * - ``san_password`` = - (String) Password for SAN controller ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-infortrend.inc0000664000175000017500000000272300000000000025370 0ustar00zuulzuul00000000000000.. _cinder-infortrend: .. list-table:: Description of Infortrend volume driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``infortrend_cli_max_retries`` = ``5`` - (Integer) The maximum retry times if a command fails. * - ``infortrend_cli_path`` = ``/opt/bin/Infortrend/raidcmd_ESDS10.jar`` - (String) The Infortrend CLI absolute path. * - ``infortrend_cli_timeout`` = ``60`` - (Integer) The timeout for CLI in seconds. * - ``infortrend_cli_cache`` = ``False`` - (Boolean) The Infortrend CLI cache. Make sure the array is only managed by Openstack, and it is only used by one cinder-volume node. Otherwise, never enable it! The data might be asynchronous if there were any other operations. * - ``infortrend_pools_name`` = ``None`` - (String) The Infortrend logical volumes name list. It is separated with comma. * - ``infortrend_iqn_prefix`` = ``iqn.2002-10.com.infortrend`` - (String) Infortrend iqn prefix for iSCSI. * - ``infortrend_slots_a_channels_id`` = ``None`` - (String) Infortrend raid channel ID list on Slot A for OpenStack usage. It is separated with comma. * - ``infortrend_slots_b_channels_id`` = ``None`` - (String) Infortrend raid channel ID list on Slot A for OpenStack usage. It is separated with comma. * - ``java_path`` = ``/usr/bin/java`` - (String) The Java absolute path. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-netapp_cdot_iscsi.inc0000664000175000017500000000712700000000000026713 0ustar00zuulzuul00000000000000.. _cinder-netapp_cdot_iscsi: .. list-table:: Description of NetApp cDOT iSCSI driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``netapp_login`` = ``None`` - (String) Administrative user account name used to access the storage system or proxy server. * - ``netapp_lun_ostype`` = ``None`` - (String) This option defines the type of operating system that will access a LUN exported from Data ONTAP; it is assigned to the LUN at the time it is created. * - ``netapp_lun_space_reservation`` = ``enabled`` - (String) This option determines if storage space is reserved for LUN allocation. If enabled, LUNs are thick provisioned. If space reservation is disabled, storage space is allocated on demand. * - ``netapp_password`` = ``None`` - (String) Password for the administrative user account specified in the netapp_login option. * - ``netapp_pool_name_search_pattern`` = ``(.+)`` - (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use NVMe, iSCSI or FC. * - ``netapp_replication_aggregate_map`` = ``None`` - (Unknown) Multi opt of dictionaries to represent the aggregate mapping between source and destination back ends when using whole back end replication. For every source aggregate associated with a cinder pool (NetApp FlexVol), you would need to specify the destination aggregate on the replication target device. A replication target device is configured with the configuration option replication_device. Specify this option as many times as you have replication devices. Each entry takes the standard dict config form: netapp_replication_aggregate_map = backend_id:,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,... * - ``netapp_server_hostname`` = ``None`` - (String) The hostname (or IP address) for the storage system or proxy server. * - ``netapp_server_port`` = ``None`` - (Integer) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS. * - ``netapp_size_multiplier`` = ``1.2`` - (Floating point) The quantity to be multiplied by the requested volume size to ensure enough space is available on the virtual storage server (Vserver) to fulfill the volume creation request. Note: this option is deprecated and will be removed in favor of "reserved_percentage" in the Mitaka release. * - ``netapp_snapmirror_quiesce_timeout`` = ``3600`` - (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover. * - ``netapp_storage_family`` = ``ontap_cluster`` - (String) The storage family type used on the storage system; the only valid value is ontap_cluster for using clustered Data ONTAP. * - ``netapp_storage_protocol`` = ``None`` - (String) The storage protocol to be used on the data path with the storage system. * - ``netapp_transport_type`` = ``http`` - (String) The transport protocol used when communicating with the storage system or proxy server. * - ``netapp_vserver`` = ``None`` - (String) This option specifies the virtual storage server (Vserver) name on the storage cluster on which provisioning of block storage volumes should occur. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-netapp_cdot_nfs.inc0000664000175000017500000001060300000000000026360 0ustar00zuulzuul00000000000000.. _cinder-netapp_cdot_nfs: .. list-table:: Description of NetApp cDOT NFS driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``expiry_thres_minutes`` = ``720`` - (Integer) This option specifies the threshold for last access time for images in the NFS image cache. When a cache cleaning cycle begins, images in the cache that have not been accessed in the last M minutes, where M is the value of this parameter, will be deleted from the cache to create free space on the NFS share. * - ``netapp_copyoffload_tool_path`` = ``None`` - (String) This option specifies the path of the NetApp copy offload tool binary. Ensure that the binary has execute permissions set which allow the effective user of the cinder-volume process to execute the file. * - ``netapp_host_type`` = ``None`` - (String) This option defines the type of operating system for all initiators that can access a LUN. This information is used when mapping LUNs to individual hosts or groups of hosts. * - ``netapp_login`` = ``None`` - (String) Administrative user account name used to access the storage system or proxy server. * - ``netapp_lun_ostype`` = ``None`` - (String) This option defines the type of operating system that will access a LUN exported from Data ONTAP; it is assigned to the LUN at the time it is created. * - ``netapp_password`` = ``None`` - (String) Password for the administrative user account specified in the netapp_login option. * - ``netapp_pool_name_search_pattern`` = ``(.+)`` - (String) This option is used to restrict provisioning to the specified pools. Specify the value of this option to be a regular expression which will be applied to the names of objects from the storage backend which represent pools in Cinder. This option is only utilized when the storage protocol is configured to use iSCSI or FC. * - ``netapp_replication_aggregate_map`` = ``None`` - (Unknown) Multi opt of dictionaries to represent the aggregate mapping between source and destination back ends when using whole back end replication. For every source aggregate associated with a cinder pool (NetApp FlexVol), you would need to specify the destination aggregate on the replication target device. A replication target device is configured with the configuration option replication_device. Specify this option as many times as you have replication devices. Each entry takes the standard dict config form: netapp_replication_aggregate_map = backend_id:,src_aggr_name1:dest_aggr_name1,src_aggr_name2:dest_aggr_name2,... * - ``netapp_server_hostname`` = ``None`` - (String) The hostname (or IP address) for the storage system or proxy server. * - ``netapp_server_port`` = ``None`` - (Integer) The TCP port to use for communication with the storage system or proxy server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for HTTPS. * - ``netapp_snapmirror_quiesce_timeout`` = ``3600`` - (Integer) The maximum time in seconds to wait for existing SnapMirror transfers to complete before aborting during a failover. * - ``netapp_storage_family`` = ``ontap_cluster`` - (String) The storage family type used on the storage system; the only valid value is ontap_cluster for using clustered Data ONTAP. * - ``netapp_storage_protocol`` = ``None`` - (String) The storage protocol to be used on the data path with the storage system. * - ``netapp_transport_type`` = ``http`` - (String) The transport protocol used when communicating with the storage system or proxy server. * - ``netapp_vserver`` = ``None`` - (String) This option specifies the virtual storage server (Vserver) name on the storage cluster on which provisioning of block storage volumes should occur. * - ``thres_avl_size_perc_start`` = ``20`` - (Integer) If the percentage of available space for an NFS share has dropped below the value specified by this option, the NFS image cache will be cleaned. * - ``thres_avl_size_perc_stop`` = ``60`` - (Integer) When the percentage of available space on an NFS share has reached the percentage specified by this option, the driver will stop clearing files from the NFS image cache that have not been accessed in the last M minutes, where M is the value of the expiry_thres_minutes configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-nexenta.inc0000664000175000017500000000575600000000000024671 0ustar00zuulzuul00000000000000.. _cinder-nexenta: .. list-table:: Description of Nexenta driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``nexenta_blocksize`` = ``4096`` - (Integer) Block size for datasets * - ``nexenta_chunksize`` = ``32768`` - (Integer) NexentaEdge iSCSI LUN object chunk size * - ``nexenta_client_address`` = - (String) NexentaEdge iSCSI Gateway client address for non-VIP service * - ``nexenta_dataset_compression`` = ``on`` - (String) Compression value for new ZFS folders. * - ``nexenta_dataset_dedup`` = ``off`` - (String) Deduplication value for new ZFS folders. * - ``nexenta_dataset_description`` = - (String) Human-readable description for the folder. * - ``nexenta_host`` = - (String) IP address of Nexenta SA * - ``nexenta_iscsi_target_portal_port`` = ``3260`` - (Integer) Nexenta target portal port * - ``nexenta_mount_point_base`` = ``$state_path/mnt`` - (String) Base directory that contains NFS share mount points * - ``nexenta_nbd_symlinks_dir`` = ``/dev/disk/by-path`` - (String) NexentaEdge logical path of directory to store symbolic links to NBDs * - ``nexenta_nms_cache_volroot`` = ``True`` - (Boolean) If set True cache NexentaStor appliance volroot option value. * - ``nexenta_password`` = ``nexenta`` - (String) Password to connect to Nexenta SA * - ``nexenta_rest_port`` = ``0`` - (Integer) HTTP(S) port to connect to Nexenta REST API server. If it is equal zero, 8443 for HTTPS and 8080 for HTTP is used * - ``nexenta_rest_protocol`` = ``auto`` - (String) Use http or https for REST connection (default auto) * - ``nexenta_rrmgr_compression`` = ``0`` - (Integer) Enable stream compression, level 1..9. 1 - gives best speed; 9 - gives best compression. * - ``nexenta_rrmgr_connections`` = ``2`` - (Integer) Number of TCP connections. * - ``nexenta_rrmgr_tcp_buf_size`` = ``4096`` - (Integer) TCP Buffer size in KiloBytes. * - ``nexenta_shares_config`` = ``/etc/cinder/nfs_shares`` - (String) File with the list of available nfs shares * - ``nexenta_sparse`` = ``False`` - (Boolean) Enables or disables the creation of sparse datasets * - ``nexenta_sparsed_volumes`` = ``True`` - (Boolean) Enables or disables the creation of volumes as sparsed files that take no space. If disabled (False), volume is created as a regular file, which takes a long time. * - ``nexenta_target_group_prefix`` = ``cinder/`` - (String) Prefix for iSCSI target groups on SA * - ``nexenta_target_prefix`` = ``iqn.1986-03.com.sun:02:cinder-`` - (String) IQN prefix for iSCSI targets * - ``nexenta_use_https`` = ``True`` - (Boolean) Use secure HTTP for REST connection (default True) * - ``nexenta_user`` = ``admin`` - (String) User name to connect to Nexenta SA * - ``nexenta_volume`` = ``cinder`` - (String) SA Pool that holds all volumes ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-nexenta5.inc0000664000175000017500000000354200000000000024745 0ustar00zuulzuul00000000000000.. _cinder-nexenta5: .. list-table:: Description of NexentaStor 5 driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``nexenta_dataset_compression`` = ``on`` - (String) Compression value for new ZFS folders. * - ``nexenta_dataset_dedup`` = ``off`` - (String) Deduplication value for new ZFS folders. * - ``nexenta_dataset_description`` = - (String) Human-readable description for the folder. * - ``nexenta_host`` = - (String) IP address of Nexenta SA * - ``nexenta_iscsi_target_portal_port`` = ``3260`` - (Integer) Nexenta target portal port * - ``nexenta_mount_point_base`` = ``$state_path/mnt`` - (String) Base directory that contains NFS share mount points * - ``nexenta_ns5_blocksize`` = ``32`` - (Integer) Block size for datasets * - ``nexenta_rest_port`` = ``0`` - (Integer) HTTP(S) port to connect to Nexenta REST API server. If it is equal zero, 8443 for HTTPS and 8080 for HTTP is used * - ``nexenta_rest_protocol`` = ``auto`` - (String) Use http or https for REST connection (default auto) * - ``nexenta_sparse`` = ``False`` - (Boolean) Enables or disables the creation of sparse datasets * - ``nexenta_sparsed_volumes`` = ``True`` - (Boolean) Enables or disables the creation of volumes as sparsed files that take no space. If disabled (False), volume is created as a regular file, which takes a long time. * - ``nexenta_use_https`` = ``True`` - (Boolean) Use secure HTTP for REST connection (default True) * - ``nexenta_user`` = ``admin`` - (String) User name to connect to Nexenta SA * - ``nexenta_volume`` = ``cinder`` - (String) SA Pool that holds all volumes * - ``nexenta_volume_group`` = ``iscsi`` - (String) Volume group for ns5 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-prophetstor_dpl.inc0000664000175000017500000000147500000000000026451 0ustar00zuulzuul00000000000000.. _cinder-prophetstor_dpl: .. list-table:: Description of ProphetStor Fibre Channel and iSCSi drivers configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``dpl_pool`` = - (String) DPL pool uuid in which DPL volumes are stored. * - ``dpl_port`` = ``8357`` - (Port number) DPL port number. * - ``iscsi_port`` = ``3260`` - (Port number) The port that the iSCSI daemon is listening on * - ``san_ip`` = - (String) IP address of SAN controller * - ``san_login`` = ``admin`` - (String) Username for SAN controller * - ``san_password`` = - (String) Password for SAN controller * - ``san_thin_provision`` = ``True`` - (Boolean) Use thin provisioning for SAN volumes? ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-storwize.inc0000664000175000017500000001126300000000000025103 0ustar00zuulzuul00000000000000.. _cinder-storwize: .. list-table:: Description of IBM Storage Virtualize family driver configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``san_ip`` = - (String) IP address of SAN controller. * - ``san_login`` = ``admin`` - (String) Username for SAN controller. * - ``san_password`` = - (String) Password for SAN controller. * - ``san_private_key`` = - (String) Filename of private key to use for SSH authentication. * - ``san_ssh_port`` = ``22`` - (Port number) SSH port to use with SAN. * - ``ssh_conn_timeout`` = ``30`` - (Integer) SSH connection timeout in seconds. * - ``ssh_min_pool_conn`` = ``1`` - (Integer) Minimum SSH connections in the pool. * - ``ssh_max_pool_conn`` = ``5`` - (Integer) Maximum SSH connections in the pool. * - ``storwize_san_secondary_ip`` = ``None`` - (String) Specifies secondary management IP or hostname to be used if san_ip is invalid or becomes inaccessible. * - ``storwize_svc_allow_tenant_qos`` = ``False`` - (Boolean) Allow tenants to specify QoS on create. * - ``storwize_svc_flashcopy_rate`` = ``50`` - (Integer) Specifies the Storage Virtualize family FlashCopy copy rate to be used when creating a full volume copy. The default is rate is 50, and the valid rates are 1-100. * - ``storwize_svc_clean_rate`` = ``50`` - (Integer) Specifies the Storwize cleaning rate for the mapping. The default rate is 50, and the valid rates are 0-150. * - ``storwize_svc_flashcopy_timeout`` = ``120`` - (Integer) Maximum number of seconds to wait for FlashCopy to be prepared. * - ``storwize_svc_iscsi_chap_enabled`` = ``True`` - (Boolean) Configure CHAP authentication for iSCSI connections. (Default: Enabled) * - ``storwize_svc_multihostmap_enabled`` = ``True`` - (Boolean) DEPRECATED: This option no longer has any affect. It is deprecated and will be removed in the next release. * - ``storwize_svc_multipath_enabled`` = ``False`` - (Boolean) Connect with multipath (FC only; iSCSI multipath is controlled by Nova). * - ``storwize_svc_stretched_cluster_partner`` = ``None`` - (String) If operating in stretched cluster mode, specify the name of the pool in which mirrored copies are stored. For example: "pool2" * - ``storwize_svc_vol_autoexpand`` = ``True`` - (Boolean) Storage system autoexpand parameter for volumes (True/False). * - ``storwize_svc_vol_compression`` = ``False`` - (Boolean) Storage system compression option for volumes. * - ``storwize_svc_vol_easytier`` = ``True`` - (Boolean) Enable Easy Tier for volumes. * - ``storwize_svc_vol_grainsize`` = ``256`` - (Integer) Storage system grain size parameter for volumes (32/64/128/256) * - ``storwize_svc_vol_iogrp`` = ``0`` - (Integer) The I/O group in which to allocate volumes * - ``storwize_svc_vol_nofmtdisk`` = ``False`` - (Boolean) Specifies that the volume not be formatted during creation. * - ``storwize_svc_vol_rsize`` = ``2`` - (Integer) Storage system space-efficiency parameter for volumes (percentage). * - ``storwize_svc_vol_warning`` = ``0`` - (Integer) Storage system threshold for volume capacity warnings (percentage). * - ``storwize_svc_volpool_name`` = ``volpool`` - (List) Comma separated list of storage system storage pools for volumes. * - ``storwize_svc_mirror_pool`` = ``None`` - (String) Specifies the name of the pool in which mirrored copy is stored. For example: "pool2" * - ``storwize_svc_retain_aux_volume`` = ``False`` - (Boolean) Defines an optional parameter to retain an auxiliary volume in a mirror relationship upon deletion of the primary volume or moving it to a non-mirror relationship. * - ``storwize_peer_pool`` = ``None`` - (String) Specifies the name of the peer pool for a HyperSwap volume. The peer pool must exist on the other site. * - ``storwize_preferred_host_site`` = ``{}`` - (Dictionary) Specifies the site information for host. One WWPN or multi-WWPNs used in the host can be specified. For example: storwize_preferred_host_site=site1:wwpn1,site2:wwpn2&wwpn3 or storwize_preferred_host_site=site1:iqn1,site2:iqn2 * - ``cycle_period_seconds`` = ``300`` - (Integer) Defines an optional cycle period that applies to Global Mirror relationships with a cycling mode of multi. A Global Mirror relationship using the multi cycling_mode performs a complete cycle at most once each period. The default is 300 seconds, and the valid seconds are 60-86400. * - ``storwize_portset`` = ``None`` - (String) Specifies the name of the portset in which the host is to be created. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/cinder-vmware.inc0000664000175000017500000000562500000000000024523 0ustar00zuulzuul00000000000000.. _cinder-vmware: .. list-table:: Description of VMware configuration options :header-rows: 1 :class: config-ref-table * - Configuration option = Default value - Description * - **[DEFAULT]** - * - ``vmware_adapter_type`` = ``lsiLogic`` - (String) Default adapter type to be used for attaching volumes. * - ``vmware_api_retry_count`` = ``10`` - (Integer) Number of times VMware vCenter server API must be retried upon connection related issues. * - ``vmware_ca_file`` = ``None`` - (String) CA bundle file to use in verifying the vCenter server certificate. * - ``vmware_cluster_name`` = ``None`` - (Multi-valued) Name of a vCenter compute cluster where volumes should be created. * - ``vmware_connection_pool_size`` = ``10`` - (Integer) Maximum number of connections in http connection pool. * - ``vmware_host_ip`` = ``None`` - (String) IP address for connecting to VMware vCenter server. * - ``vmware_host_password`` = ``None`` - (String) Password for authenticating with VMware vCenter server. * - ``vmware_host_port`` = ``443`` - (Port number) Port number for connecting to VMware vCenter server. * - ``vmware_host_username`` = ``None`` - (String) Username for authenticating with VMware vCenter server. * - ``vmware_host_version`` = ``None`` - (String) Optional string specifying the VMware vCenter server version. The driver attempts to retrieve the version from VMware vCenter server. Set this configuration only if you want to override the vCenter server version. * - ``vmware_image_transfer_timeout_secs`` = ``7200`` - (Integer) Timeout in seconds for VMDK volume transfer between Cinder and Glance. * - ``vmware_insecure`` = ``False`` - (Boolean) If true, the vCenter server certificate is not verified. If false, then the default CA truststore is used for verification. This option is ignored if "vmware_ca_file" is set. * - ``vmware_max_objects_retrieval`` = ``100`` - (Integer) Max number of objects to be retrieved per batch. Query results will be obtained in batches from the server and not in one shot. Server may still limit the count to something less than the configured value. * - ``vmware_task_poll_interval`` = ``2.0`` - (Floating point) The interval (in seconds) for polling remote tasks invoked on VMware vCenter server. * - ``vmware_tmp_dir`` = ``/tmp`` - (String) Directory where virtual disks are stored during volume backup and restore. * - ``vmware_volume_folder`` = ``Volumes`` - (String) Name of the vCenter inventory folder that will contain Cinder volumes. This folder will be created under "OpenStack/", where project_folder is of format "Project ()". * - ``vmware_wsdl_location`` = ``None`` - (String) Optional VIM service WSDL Location e.g http:///vimService.wsdl. Optional over-ride to default location for bug work-arounds. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.463122 cinder-27.0.0/doc/source/configuration/tables/manual/0000775000175000017500000000000000000000000022532 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/manual/cinder-netapp_cdot_extraspecs.inc0000664000175000017500000000632400000000000031235 0ustar00zuulzuul00000000000000.. list-table:: Description of extra specs options for NetApp Unified Driver with Clustered Data ONTAP :header-rows: 1 * - Extra spec - Type - Description * - ``netapp_raid_type`` - String - Limit the candidate volume list based on one of the following raid types: ``raid4, raid_dp``. * - ``netapp_disk_type`` - String - Limit the candidate volume list based on one of the following disk types: ``ATA, BSAS, EATA, FCAL, FSAS, LUN, MSATA, SAS, SATA, SCSI, XATA, XSAS, or SSD.`` * - ``netapp:qos_policy_group`` [1]_ - String - Specify the name of a QoS policy group, which defines measurable Service Level Objectives, that should be applied to the OpenStack Block Storage volume at the time of volume creation. Ensure that the QoS policy group object within Data ONTAP should be defined before an OpenStack Block Storage volume is created, and that the QoS policy group is not associated with the destination FlexVol volume. * - ``netapp:qos_policy_group_is_adaptive`` - Boolean - Set to " True" in order to instruct the driver to use an Adaptive QoS policy group for the netapp:qos_policy_group setting. Leave this unset or set to " False" in order to use a standard QoS policy group for the netapp:qos_policy_group setting. * - ``netapp_mirrored`` - Boolean - Limit the candidate volume list to only the ones that are mirrored on the storage controller. * - ``netapp_unmirrored`` [2]_ - Boolean - Limit the candidate volume list to only the ones that are not mirrored on the storage controller. * - ``netapp_dedup`` - Boolean - Limit the candidate volume list to only the ones that have deduplication enabled on the storage controller. * - ``netapp_nodedup`` - Boolean - Limit the candidate volume list to only the ones that have deduplication disabled on the storage controller. * - ``netapp_compression`` - Boolean - Limit the candidate volume list to only the ones that have compression enabled on the storage controller. * - ``netapp_nocompression`` - Boolean - Limit the candidate volume list to only the ones that have compression disabled on the storage controller. * - ``netapp_thin_provisioned`` - Boolean - Limit the candidate volume list to only the ones that support thin provisioning on the storage controller. * - ``netapp_thick_provisioned`` - Boolean - Limit the candidate volume list to only the ones that support thick provisioning on the storage controller. .. [1] Please note that this extra spec has a colon (``:``) in its name because it is used by the driver to assign the QoS policy group to the OpenStack Block Storage volume after it has been provisioned. .. [2] In the Juno release, these negative-assertion extra specs are formally deprecated by the NetApp unified driver. Instead of using the deprecated negative-assertion extra specs (for example, ``netapp_unmirrored``) with a value of ``true``, use the corresponding positive-assertion extra spec (for example, ``netapp_mirrored``) with a value of ``false``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/configuration/tables/manual/cinder-pure_storage_extraspecs.inc0000664000175000017500000000105500000000000031430 0ustar00zuulzuul00000000000000.. list-table:: Description of extra specs options for Pure Storage FlashArray :header-rows: 1 * - Extra spec - Type - Description * - ``flasharray:vg_name`` - String - Specify the name of the volume group in which all volumes using this volume type will be created. * - ``flasharray:vg_maxIOPS`` - String - Maximum number of IOPs allowed for the volume group. Range 100 - 100M * - ``flasharray:vg_maxBWS`` - String - Maximum bandwidth limit for the volume group. Range 1024 - 524288 (512GB/s) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.471122 cinder-27.0.0/doc/source/contributor/0000775000175000017500000000000000000000000017506 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/README.rst0000664000175000017500000000131700000000000021177 0ustar00zuulzuul00000000000000===================================================== Cinder Contributor Documentation (source/contributor) ===================================================== Introduction: ------------- This directory is intended to hold any documentation that relates to how to contribute to Cinder or how the project is managed. Some of this content was previous under 'developer' in openstack-manuals. The content of the documentation, however, goes beyond just developers to anyone contributing to the project, thus the change in naming. The full spec for organization of documentation may be seen in the `OS Manuals Migration Spec `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/addmethod.openstackapi.rst0000664000175000017500000000655600000000000024665 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Adding a Method to the OpenStack API ==================================== The interface is a mostly RESTful API. REST stands for Representational State Transfer and provides an architecture "style" for distributed systems using HTTP for transport. Figure out a way to express your request and response in terms of resources that are being created, modified, read, or destroyed. Routing ------- To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.groovie.org/ for more information. URLs are mapped to "action" methods on "controller" classes in ``cinder/api/openstack/__init__/ApiRouter.__init__`` . See http://routes.readthedocs.io/en/latest/ for all syntax, but you'll probably just need these two: - mapper.connect() lets you map a single URL to a single action on a controller. - mapper.resource() connects many standard URLs to actions on a controller. Controllers and actions ----------------------- Controllers live in ``cinder/api/openstack``, and inherit from cinder.wsgi.Controller. See ``cinder/api/v3/volumes.py`` for an example. Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc. Serialization ------------- Actions return a dictionary, and wsgi.Controller serializes that to JSON or XML based on the request's content-type. Errors ------ There will be occasions when you will want to return a REST error response to the caller and there are multiple valid ways to do this: - If you are at the controller level you can use a ``faults.Fault`` instance to indicate the error. You can either return the ``Fault`` instance as the result of the action, or raise it, depending on what's more convenient: ``raise faults.Fault(webob.exc.HTTPBadRequest(explanation=msg))``. - If you are raising an exception our WSGI middleware exception handler is smart enough to recognize webob exceptions as well, so you don't really need to wrap the exceptions in a ``Fault`` class and you can just let the middleware add it for you: ``raise webob.exc.HTTPBadRequest(explanation=msg)``. - While most errors require an explicit webob exception there are some Cinder exceptions (``NotFound`` and ``Invalid``) that are so common that they are directly handled by the middleware and don't need us to convert them, we can just raise them at any point in the API service and they will return the appropriate REST error to the caller. So any ``NotFound`` exception, or child class, will return a 404 error, and any ``Invalid`` exception, or child class, will return a 400 error. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/api.apache.rst0000664000175000017500000000326500000000000022237 0ustar00zuulzuul00000000000000.. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Running Cinder API under Apache =============================== Files ----- Copy the file etc/cinder/api-httpd.conf to the appropriate location for your Apache server, most likely: ``/etc/httpd/conf.d/cinder_wsgi.conf`` Update this file to match your system configuration (for example, some distributions put httpd logs in the apache2 directory and some in the httpd directory). Create the directory /var/www/cgi-bin/cinder/. You can either hard or soft link the file cinder/wsgi/wsgi.py to be osapi_volume under the /var/www/cgi-bin/cinder/ directory. For a distribution appropriate place, it should probably be copied to: ``/usr/share/openstack/cinder/httpd/cinder.py`` Cinder's primary configuration file (etc/cinder.conf) and the PasteDeploy configuration file (etc/cinder-paste.ini) must be readable to httpd in one of the default locations described in Configuring Cinder. Access Control -------------- If you are running with Linux kernel security module enabled (for example SELinux or AppArmor), make sure that the configuration file has the appropriate context to access the linked file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/api_conditional_updates.rst0000664000175000017500000004437200000000000025133 0ustar00zuulzuul00000000000000API Races - Conditional Updates =============================== Background ---------- On Cinder API nodes we have to check that requested action can be performed by checking request arguments and involved resources, and only if everything matches required criteria we will proceed with the RPC call to any of the other nodes. Checking the conditions must be done in a non racy way to ensure that already checked requirements don't change while we check remaining conditions. This is of utter importance, as Cinder uses resource status as a lock to prevent concurrent operations on a resource. An simple example of this would be extending a volume, where we first check the status: .. code:: python if volume['status'] != 'available': Then update the status: .. code:: python self.update(context, volume, {'status': 'extending'}) And finally make the RPC call: .. code:: python self.volume_rpcapi.extend_volume(context, volume, new_size, reservations) The problem is that this code would allow races, as other request could have already changed the volume status between us getting the value and updating the DB. There are multiple ways to fix this, such as: - Using a Distributed Locking Mechanism - Using DB isolation level - Using SQL SELECT ... FOR UPDATE - USING compare and swap mechanism in SQL query Our tests showed that the best alternative was compare and swap and we decided to call this mechanism "Conditional Update" as it seemed more appropriate. Conditional Update ------------------ Conditional Update is the mechanism we use in Cinder to prevent races when updating the DB. In essence it is the SQL equivalent of an ``UPDATE ... FROM ... WHERE;`` clause. It is implemented as an abstraction layer on top of SQLAlchemy ORM engine in our DB api layer and exposed for consumption in Cinder's Persistent Versioned Objects through the ``conditional_update`` method so it can be used from any Versioned Object instance that has persistence (Volume, Snapshot, Backup...). Method signature is: .. code:: python def conditional_update(self, values, expected_values=None, filters=(), save_all=False, session=None, reflect_changes=True, order=None): :values: Dictionary of key-value pairs with changes that we want to make to the resource in the DB. :expected_values: Dictionary with conditions that must be met for the update to be executed. Condition ``field.id == resource.id`` is implicit and there is no need to add it to the conditions. If no ``expected_values`` argument is provided update will only go through if no field in the DB has changed. Dirty fields from the Versioned Object are excluded as we don't know their original value. :filters: Additional SQLAlchemy filters can be provided for more complex conditions. :save_all: By default we will only be updating the DB with values provided in the ``values`` argument, but we can explicitly say that we also want to save object's current dirty fields. :session: A SQLAlchemy session can be provided, although it is unlikely to be needed. :reflect_changes: On a successful update we will also update Versioned Object instance to reflect these changes, but we can prevent this instance update passing False on this argument. :order: Specific order of fields in which to update the values. :Return Value: We'll return the number of changed rows. So we'll get a 0 value if the conditional update has not been successful instead of an exception. Basic Usage ----------- - **Simple match** The most basic example is doing a simple match, for example for a ``volume`` variable that contains a Versioned Object Volume class instance we may want to change the ``status`` to "deleting" and update the ``terminated_at`` field with current UTC time only if current ``status`` is "available" and the volume is not in a consistency group. .. code:: python values={'status': 'deleting', 'terminated_at': timeutils.utcnow()} expected_values = {'status': 'available', 'consistencygroup_id': None} volume.conditional_update(values, expected_values) - **Iterable match** Conditions can contain not only single values, but also iterables, and the conditional update mechanism will correctly handle the presence of None values in the range, unlike SQL ``IN`` clause that doesn't support ``NULL`` values. .. code:: python values={'status': 'deleting', 'terminated_at': timeutils.utcnow()} expected_values={ 'status': ('available', 'error', 'error_restoring' 'error_extending'), 'migration_status': (None, 'deleting', 'error', 'success'), 'consistencygroup_id': None } volume.conditional_update(values, expected_values) - **Exclusion** In some cases we'll need to set conditions on what is *not* in the DB record instead of what is in, for that we will use the exclusion mechanism provided by the ``Not`` class in all persistent objects. This class accepts single values as well as iterables. .. code:: python values={'status': 'deleting', 'terminated_at': timeutils.utcnow()} expected_values={ 'attach_status': volume.Not('attached'), 'status': ('available', 'error', 'error_restoring' 'error_extending'), 'migration_status': (None, 'deleting', 'error', 'success'), 'consistencygroup_id': None } volume.conditional_update(values, expected_values) - **Filters** We can use complex filters in the conditions, but these must be SQLAlchemy queries/conditions and as the rest of the DB methods must be properly abstracted from the API. Therefore we will create the method in cinder/db/sqlalchemy/api.py: .. code:: python def volume_has_snapshots_filter(): return sql.exists().where( and_(models.Volume.id == models.Snapshot.volume_id, ~models.Snapshot.deleted)) Then expose this filter through the cinder/db/api.py: .. code:: python def volume_has_snapshots_filter(): return IMPL.volume_has_snapshots_filter() And finally used in the API (notice how we are negating the filter at the API): .. code:: python filters = [~db.volume_has_snapshots_filter()] values={'status': 'deleting', 'terminated_at': timeutils.utcnow()} expected_values={ 'attach_status': volume.Not('attached'), 'status': ('available', 'error', 'error_restoring' 'error_extending'), 'migration_status': (None, 'deleting', 'error', 'success'), 'consistencygroup_id': None } volume.conditional_update(values, expected_values, filters) Returning Errors ---------------- The most important downside of using conditional updates to remove API races is the inherent uncertainty of the cause of failure resulting in more generic error messages. When we use the `conditional_update` method we'll use returned value to determine the success of the operation, as a value of 0 indicates that no rows have been updated and the conditions were not met. But we don't know which one, or which ones, were the cause of the failure. There are 2 approaches to this issue: - On failure we go one by one checking the conditions and return the first one that fails. - We return a generic error message indicating all conditions that must be met for the operation to succeed. It was decided that we would go with the second approach, because even though the first approach was closer to what we already had and would give a better user experience, it had considerable implications such as: - More code was needed to do individual checks making operations considerable longer and less readable. This was greatly alleviated using helper methods to return the errors. - Higher number of DB queries required to determine failure cause. - Since there could be races because DB contents could be changed between the failed update and the follow up queries that checked the values for the specific error, a loop would be needed to make sure that either the conditional update succeeds or one of the condition checks fails. - Having such a loop means that a small error in the code could lead to an endless loop in a production environment. This coding error could be an incorrect conditional update filter that would always fail or a missing or incorrect condition that checked for the specific issue to return the error. A simple example of a generic error can be found in `begin_detaching` code: .. code:: python @wrap_check_policy def begin_detaching(self, context, volume): # If we are in the middle of a volume migration, we don't want the # user to see that the volume is 'detaching'. Having # 'migration_status' set will have the same effect internally. expected = {'status': 'in-use', 'attach_status': 'attached', 'migration_status': self.AVAILABLE_MIGRATION_STATUS} result = volume.conditional_update({'status': 'detaching'}, expected) if not (result or self._is_volume_migrating(volume)): msg = _("Unable to detach volume. Volume status must be 'in-use' " "and attach_status must be 'attached' to detach.") LOG.error(msg) raise exception.InvalidVolume(reason=msg) Building filters on the API --------------------------- SQLAlchemy filters created as mentioned above can create very powerful and complex conditions, but sometimes we may require a condition that, while more complex than the basic match and not match on the resource fields, it's still quite simple. For those cases we can create filters directly on the API using the ``model`` field provided in Versioned Objects. This ``model`` field is a reference to the ORM model that allows us to reference ORM fields. We'll use as an example changing the ``status`` field of a backup to "restoring" if the backup status is "available" and the volume where we are going to restore the backup is also in "available" state. Joining of tables is implicit when using a model different from the one used for the Versioned Object instance. - **As expected_values** Since this is a matching case we can use ``expected_values`` argument to make the condition: .. code:: python values = {'status': 'restoring'} expected_values={'status': 'available', objects.Volume.model.id: volume.id, objects.Volume.model.status: 'available'} - **As filters** We can also use the ``filters`` argument to achieve the same results: .. code:: python filters = [objects.Volume.model.id == volume.id, objects.Volume.model.status == 'available'] - **Other filters** If we are not doing a match for the condition the only available option will be to use ``filters`` argument. For example if we want to do a check on the volume size against the backup size: .. code:: python filters = [objects.Volume.model.id == volume.id, objects.Volume.model.size >= backup.model.size] Using DB fields for assignment ------------------------------ - **Using non modified fields** Similar to the way we use the fields to specify conditions, we can also use them to set values in the DB. For example when we disable a service we want to keep existing ``updated_at`` field value: .. code:: python values = {'disabled': True, 'updated_at': service.model.updated_at} - **Using modified field** In some cases we may need to use a DB field that we are also updating, for example when we are updating the ``status`` but we also want to keep the old value in the ``previous_status`` field. .. code:: python values = {'status': 'retyping', 'previous_status': volume.model.status} Conditional update mechanism takes into account that MySQL does not follow SQL language specs and adjusts the query creation accordingly. - **Together with filters** Using DB fields for assignment together with using them for values can give us advanced functionality like for example increasing a quota value based on current value and making sure we don't exceed our quota limits. .. code:: python values = {'in_use': quota.model.in_use + volume.size} filters = [quota.model.in_use <= max_usage - volume.size] Conditional value setting ------------------------- Under certain circumstances you may not know what value should be set in the DB because it depends on another field or on another condition. For those cases we can use the ``Case`` class present in our persistent Versioned Objects which implements the SQL CASE clause. The idea is simple, using ``Case`` class we can say which values to set in a field based on conditions and also set a default value if none of the conditions are True. Conditions must be SQLAlchemy conditions, so we'll need to use fields from the ``model`` attribute. For example setting the status to "maintenance" during migration if current status is "available" and leaving it as it was if it's not can be done using the following: .. code:: python values = { 'status': volume.Case( [ (volume.model.status == 'available', 'maintenance') ], else_=volume.model.status) } reflect_changes considerations ------------------------------ As we've already mentioned ``conditional_update`` method will update Versioned Object instance with provided values if the row in the DB has been updated, and in most cases this is OK since we can set the values directly because we are using simple values, but there are cases where we don't know what value we should set in the instance, and is in those cases where the default ``reflect_changes`` value of True has performance implications. There are 2 cases where Versioned Object ``conditional_update`` method doesn't know the value it has to set on the Versioned Object instance, and they are when we use a field for assignment and when we are using the ``Case`` class, since in both cases the DB is the one deciding the value that will be set. In those cases ``conditional_update`` will have to retrieve the value from the DB using ``get_by_id`` method, and this has a performance impact and therefore should be avoided when possible. So the recommendation is to set ``reflect_changes`` to False when using ``Case`` class or using fields in the ``values`` argument if we don't care about the stored value. Limitations ----------- We can only use functionality that works on **all** supported DBs, and that's why we don't allow multi table updates and will raise ProgrammingError exception even when the code is running against a DB engine that supports this functionality. This way we make sure that we don't inadvertently add a multi table update that works on MySQL but will surely fail on PostgreSQL. MySQL DB engine also has some limitations that we should be aware of when creating our filters. One that is very common is when we are trying to check if there is a row that matches a specific criteria in the same table that we are updating. For example, when deleting a Consistency Group we want to check that it is not being used as the source for a Consistency Group that is in the process of being created. The straightforward way of doing this is using the core exists expression and use an alias to differentiate general query fields and the exists subquery. Code would look like this: .. code:: python def cg_creating_from_src(cg_id): model = aliased(models.ConsistencyGroup) return sql.exists().where(and_( ~model.deleted, model.status == 'creating', conditions.append(model.source_cgid == cg_id))) While this will work in SQLite and PostgreSQL, it will not work on MySQL and an error will be raised when the query is executed: "You can't specify target table 'consistencygroups' for update in FROM clause". To solve this we have 2 options: - Create a specific query for MySQL engines using an update with a left self join, which is a feature only available in MySQL. - Use a trick -using a select subquery- that will work on all DBs. Considering that it's always better to have only 1 way of doing things and that SQLAlchemy doesn't support MySQL's non standard behavior we should generate these filters using the select subquery method like this: .. code:: python def cg_creating_from_src(cg_id): subq = sql.select(models.ConsistencyGroup).where( and_( ~model.deleted, model.status == 'creating' ) ).alias('cg2') return sql.exists([subq]).where(subq.c.source_cgid == cgid) Considerations for new ORM & Versioned Objects ---------------------------------------------- Conditional update mechanism works using generic methods for getting an object from the DB as well as determining the model for a specific Versioned Object instance for field binding. These generic methods rely on some naming rules for Versioned Object classes, ORM classes, and get methods, so when we are creating a new ORM class and adding the matching Versioned Object and access methods we must be careful to follow these rules or at least specify exceptions if we have a good reason not to follow these conventions. Rules: - Versioned Object class name must be the same as the ORM class - Get method name must be ORM class converted to snake format with postfix "_get". For example, for ``Volume`` ORM class expected method is ``volume_get``, and for an imaginary ``MyORMClass`` it would be ``my_orm_class_get``. - Get method must receive the ``context`` as the first argument and the ``id`` as the second one, although it may accept more optional arguments. We should avoid diverging from these rules whenever is possible, but there are cases where this is not possible, for example ``BackupImport`` Versioned Object that really uses ``Backup`` ORM class. For cases such as this we have a way to set exceptions both for the generic get method and the model for a Versioned Object. To add exceptions for the get method we have to add a new entry to ``GET_EXCEPTIONS`` dictionary mapping in ``cinder.db.sqlalchemy.api._get_get_method``. And for determining the model for the Versioned Object we have to add a new entry to ``VO_TO_MODEL_EXCEPTIONS`` dictionary mapping in ``cinder.db.sqlalchemy.api.get_model_for_versioned_object``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/api_microversion_dev.rst0000664000175000017500000003320000000000000024444 0ustar00zuulzuul00000000000000API Microversions ================= Background ---------- Cinder uses a framework we called 'API Microversions' for allowing changes to the API while preserving backward compatibility. The basic idea is that a user has to explicitly ask for their request to be treated with a particular version of the API. So breaking changes can be added to the API without breaking users who don't specifically ask for it. This is done with an HTTP header ``OpenStack-API-Version`` which is a monotonically increasing semantic version number starting from ``3.0``. Each OpenStack service that uses microversions will share this header, so the Volume service will need to prefix the semantic version number with the word ``volume``:: OpenStack-API-Version: volume 3.0 If a user makes a request without specifying a version, they will get the ``_MIN_API_VERSION`` as defined in ``cinder/api/openstack/api_version_request.py``. This value is currently ``3.0`` and is expected to remain so for quite a long time. The Nova project was the first to implement microversions. For full details please read Nova's `Kilo spec for microversions `_ When do I need a new Microversion? ---------------------------------- A microversion is needed when the contract to the user is changed. The user contract covers many kinds of information such as: - the Request - the list of resource URLs which exist on the server Example: adding a new shares/{ID}/foo which didn't exist in a previous version of the code - the list of query parameters that are valid on URLs Example: adding a new parameter ``is_yellow`` servers/{ID}?is_yellow=True - the list of query parameter values for non free form fields Example: parameter filter_by takes a small set of constants/enums "A", "B", "C". Adding support for new enum "D". - new headers accepted on a request - the Response - the list of attributes and data structures returned Example: adding a new attribute 'locked': True/False to the output of shares/{ID} - the allowed values of non free form fields Example: adding a new allowed ``status`` to shares/{ID} - the list of status codes allowed for a particular request Example: an API previously could return 200, 400, 403, 404 and the change would make the API now also be allowed to return 409. - changing a status code on a particular response Example: changing the return code of an API from 501 to 400. - new headers returned on a response The following flow chart attempts to walk through the process of "do we need a microversion". .. graphviz:: digraph states { label="Do I need a microversion?" silent_fail[shape="diamond", style="", label="Did we silently fail to do what is asked?"]; ret_500[shape="diamond", style="", label="Did we return a 500 before?"]; new_error[shape="diamond", style="", label="Are we changing what status code is returned?"]; new_attr[shape="diamond", style="", label="Did we add or remove an attribute to a payload?"]; new_param[shape="diamond", style="", label="Did we add or remove an accepted query string parameter or value?"]; new_resource[shape="diamond", style="", label="Did we add or remove a resource URL?"]; no[shape="box", style=rounded, label="No microversion needed"]; yes[shape="box", style=rounded, label="Yes, you need a microversion"]; no2[shape="box", style=rounded, label="No microversion needed, it's a bug"]; silent_fail -> ret_500[label="no"]; silent_fail -> no2[label="yes"]; ret_500 -> no2[label="yes [1]"]; ret_500 -> new_error[label="no"]; new_error -> new_attr[label="no"]; new_error -> yes[label="yes"]; new_attr -> new_param[label="no"]; new_attr -> yes[label="yes"]; new_param -> new_resource[label="no"]; new_param -> yes[label="yes"]; new_resource -> no[label="no"]; new_resource -> yes[label="yes"]; {rank=same; yes new_attr} {rank=same; no2 ret_500} {rank=min; silent_fail} } If a patch that will require a microversion increment is proposed having similar intention and code with a previously merged patch given the previous merged patch hasn't been released, then the previously merged patch could be modified to include the new patch code under the same microversion. **Footnotes** [1] - When fixing 500 errors that previously caused stack traces, try to map the new error into the existing set of errors that API call could previously return (400 if nothing else is appropriate). Changing the set of allowed status codes from a request is changing the contract, and should be part of a microversion. The reason why we are so strict on contract is that we'd like application writers to be able to know, for sure, what the contract is at every microversion in Cinder. If they do not, they will need to write conditional code in their application to handle ambiguities. When in doubt, consider application authors. If it would work with no client side changes on both Cinder versions, you probably don't need a microversion. If, on the other hand, there is any ambiguity, a microversion is probably needed. In Code ------- In ``cinder/api/openstack/wsgi.py`` we define an ``@api_version`` decorator which is intended to be used on top-level Controller methods. It is not appropriate for lower-level methods. Some examples: Adding a new API method ~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("3.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``OpenStack-API-Version`` of >= ``3.4``. If they had specified a lower version (or not specified it and received the default of ``3.1``) the server would respond with ``HTTP/404``. Removing an API method ~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("3.1", "3.4") def my_api_method(self, req, id): .... This method would only be available if the caller had specified an ``OpenStack-API-Version`` of <= ``3.4``, and >= ``3.1``. If ``3.5`` or later is specified or if ``3.0`` or earlier (/v2 or /v1 endpoint), the server will respond with ``HTTP/404`` Changing a method's behaviour ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the controller class:: @wsgi.Controller.api_version("3.1", "3.3") def my_api_method(self, req, id): .... method_1 ... @my_api_method.api_version("3.4") def my_api_method(self, req, id): .... method_2 ... If a caller specified ``3.1``, ``3.2`` or ``3.3`` (or received the default of ``3.1``) they would see the result from ``method_1``, ``3.4`` or later ``method_2``. We could use ``wsgi.Controller.api_version`` decorator on the second ``my_api_method`` as well, but then we would have to add ``# noqa`` to that line to avoid failing flake8's ``F811`` rule. So the recommended approach is to use the ``api_version`` decorator from the first method that is defined, as illustrated by the example above, and then use ``my_api_method`` decorator for subsequent api versions of the same method. The two methods may be different in any kind of semantics (schema validation, return values, response codes, etc.). A method with only small changes between versions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A method may have only small changes between microversions, in which case you can decorate a private method:: @wsgi.Controller.api_version("3.1", "3.4") def _version_specific_func(self, req, arg1): pass @_version_specific_func.api_version(min_ver="3.5") def _version_specific_func(self, req, arg1): pass def show(self, req, id): .... common stuff .... self._version_specific_func(req, "foo") .... common stuff .... When not using decorators ~~~~~~~~~~~~~~~~~~~~~~~~~ When you don't want to use the ``@api_version`` decorator on a method or you want to change behaviour within a method (say it leads to simpler or simply a lot less code) you can directly test for the requested version with a method as long as you have access to the api request object (commonly called ``req``). Every API method has an api_version_request object attached to the req object and that can be used to modify behaviour based on its value:: def index(self, req): req_version = req.api_version_request if req_version.matches("3.1", "3.5"): ....stuff.... elif req_version.matches("3.6", "3.10"): ....other stuff.... elif req_version > api_version_request.APIVersionRequest("3.10"): ....more stuff..... The first argument to the matches method is the minimum acceptable version and the second is maximum acceptable version. A specified version can be null:: null_version = APIVersionRequest() If the minimum version specified is null then there is no restriction on the minimum version, and likewise if the maximum version is null there is no restriction the maximum version. Alternatively an one sided comparison can be used as in the example above. Other necessary changes ----------------------- If you are adding a patch which adds a new microversion, it is necessary to add changes to other places which describe your change: * Update ``REST_API_VERSION_HISTORY`` in ``cinder/api/openstack/api_version_request.py`` * Update ``_MAX_API_VERSION`` in ``cinder/api/openstack/api_version_request.py`` * Add a verbose description to ``cinder/api/openstack/rest_api_version_history.rst``. There should be enough information that it could be used by the docs team for release notes. * Constants should be used in the code to minimize errors on microversion merge conflicts. Define a constant for the new microversion in the ``cinder/api/microversions.py`` file and use that in the rest of the code. * Update the expected versions in affected tests. * API changes should almost always include a release note announcing the availability of the new API functionality. The description of the API change should indicate which microversion is required for the change, and it should refer to the numerical value of the microversion and not its constant name. * Update the ``version`` parameter in api-ref responses here ``cinder/api-ref/ source/v3/samples/versions/version-show-response.json`` and here ``cinder/api-ref/source/v3/samples/versions/versions-response.json`` to the latest microversion to avoid functional test failure. * If the API microversion has changed an endpoint accepted parameters or the values it returns, we need to create the appropriate API samples within the ``api-ref/source/v3/samples`` tree creating a new ``vX.Y`` directory with our request and/or response json. * Update the functional API tests in the ``cinder/tests/functional/api_sample_tests`` tree to make requests and validate responses with the new microversion. There are multiple convenience methods provided for testing, such as ``use_versions`` class decorator that allows us to run the same tests with different microversions (each will use their respective json and templates), the ``override_mv`` method decorator to change the microversion in a single test, and the ``common_api_sample`` context manager to use the base sample instead of a microversion specific one. * Update the documentation adding any new parameter to ``api-ref/source/v3/parameters.yaml`` (remember to add the ``min_version``) and then making appropriate changes to the ``.inc`` file in ``api-ref/source/v3/`` to reflect new possible return codes, new accepted parameters and their ``Request Example (vX.Y)`` title and include file, and returned values and their ``Response Example (vX.Y)`` title and include file. The Cinder project's policy is that the sample requests and responses should always reflect the *most recent* microversion. Allocating a microversion ------------------------- If you are adding a patch which adds a new microversion, it is necessary to allocate the next microversion number. Except under extremely unusual circumstances and this would have been mentioned in the blueprint for the change, the minor number of ``_MAX_API_VERSION`` will be incremented. This will also be the new microversion number for the API change. It is possible that multiple microversion patches would be proposed in parallel and the microversions would conflict between patches. This will cause a merge conflict. We don't reserve a microversion for each patch in advance as we don't know the final merge order. Developers may need over time to rebase their patch calculating a new version number as above based on the updated value of ``_MAX_API_VERSION``. Testing Microversioned API Methods ---------------------------------- Unit tests for microversions should be put in cinder/tests/unit/api/v3/ . Since all existing functionality is tested in cinder/tests/unit/api/v2, these unit tests are not replicated in .../v3, and only new functionality needs to be place in the .../v3/directory. Testing a microversioned API method is very similar to a normal controller method test, you just need to add the ``OpenStack-API-Version`` header, for example:: req = fakes.HTTPRequest.blank('/testable/url/endpoint') req.headers['OpenStack-API-Version'] = 'volume 3.6' req.api_version_request = api_version.APIVersionRequest('3.6') controller = controller.TestableController() res = controller.index(req) ... assertions about the response ... REST API Version History ------------------------ Details for each existing microversion change can be found in the :doc:`REST API Version History ` documentation. .. toctree:: :hidden: api_microversion_history ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/api_microversion_history.rst0000664000175000017500000000011000000000000025361 0ustar00zuulzuul00000000000000.. include:: ../../../cinder/api/openstack/rest_api_version_history.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/architecture.rst0000664000175000017500000000407100000000000022724 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Cinder System Architecture ========================== The Cinder Block Storage Service is intended to be ran on one or more nodes. Cinder uses a sql-based central database that is shared by all Cinder services in the system. The amount and depth of the data fits into a sql database quite well. For small deployments this seems like an optimal solution. For larger deployments, and especially if security is a concern, cinder will be moving towards multiple data stores with some kind of aggregation system. Components ---------- Below you will find a brief explanation of the different components. .. figure:: ../images/architecture.png :alt: Cinder architecture :align: center :width: 110% .. * DB: sql database for data storage. Used by all components (LINKS NOT SHOWN). * Web Dashboard: potential external component that talks to the api. * api: component that receives http requests, converts commands and communicates with other components via the queue or http. * Auth Manager: component responsible for users/projects/and roles. Can backend to DB or LDAP. This is not a separate binary, but rather a python class that is used by most components in the system. * scheduler: decides which host gets each volume. * volume: manages dynamically attachable block devices. * backup: manages backups of block storage devices. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/attach_detach_conventions.rst0000664000175000017500000001704400000000000025447 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================= Volume Attach/Detach workflow ============================= There are six API calls associated with attach/detach of volumes in Cinder (3 calls for each operation). This can lead to some confusion for developers trying to work on Cinder. The convention is actually quite simple, although it may be difficult to decipher from the code. Attach/Detach Operations are multi-part commands ================================================ There are three things that happen in the workflow for an attach or detach call. 1. Update the status of the volume in the DB (ie attaching/detaching) - For Attach, this is the cinder.volume.api.reserve_volume method - For Detach, the analogous call is cinder.volume.api.begin_detaching 2. Handle the connection operations that need to be done on the Volume - For Attach, this is the cinder.volume.api.initialize_connection method - For Detach, the analogous call is cinder.volume.api.terminate_connection 3. Finalize the status of the volume and release the resource - For attach, this is the cinder.volume.api.attach method - For detach, the analogous call is cinder.volume.api.detach Attach workflow =============== reserve_volume(self, context, volume) ------------------------------------- Probably the most simple call in to Cinder. This method simply checks that the specified volume is in an "available" state and can be attached. Any other state results in an Error response notifying Nova that the volume is NOT available. The only valid state for this call to succeed is "available". NOTE: multi-attach will add "in-use" to the above acceptable states. If the volume is in fact available, we immediately issue an update to the Cinder database and mark the status of the volume to "attaching" thereby reserving the volume so that it won't be used by another API call anywhere else. initialize_connection(self, context, volume, connector) ------------------------------------------------------- This is the only attach related API call that should be doing any significant work. This method is responsible for building and returning all of the info needed by the caller (Nova) to actually attach the specified volume to the remote node. This method returns vital information to the caller that includes things like CHAP credential, iqn and lun information. An example response is shown here: :: { 'driver_volume_type': 'iscsi', 'data': { 'auth_password': 'YZ2Hceyh7VySh5HY', 'target_discovered': False, 'encrypted': False, 'qos_specs': None, 'target_iqn': 'iqn.2010-10.org.openstack:volume-8b1ec3fe-8c57-45ca-a1cf-a481bfc8fce2', 'target_portal': '11.0.0.8:3260', 'volume_id': '8b1ec3fe-8c57-45ca-a1cf-a481bfc8fce2', 'target_lun': 1, 'access_mode': 'rw', 'auth_username': 'nE9PY8juynmmZ95F7Xb7', 'auth_method': 'CHAP' } } In the process of building this data structure, the Cinder Volume Manager makes a number of calls to the backend driver, and builds a volume_attachment entry in the database to store the connection information passed in via the connector object. driver.validate_connector ************************* Simply verifies that the initiator data is included in the passed in connector (there are some drivers that utilize pieces of this connector data, but in the case of the reference, it just verifies it's there). driver.create_export ******************** This is the target specific, persistent data associated with a volume. This method is responsible for building an actual iSCSI target, and providing the "location" and "auth" information which will be used to form the response data in the parent request. We call this infor the model_update and it's used to update vital target information associated with the volume in the Cinder database. driver.initialize_connection **************************** Now that we've actually built a target and persisted the important bits of information associated with it, we're ready to actually assign the target to a volume and form the needed info to pass back out to our caller. This is where we finally put everything together and form the example data structure response shown earlier. This method is sort of deceptive, it does a whole lot of formatting of the data we've put together in the create_export call, but it doesn't really offer any new info. It's completely dependent on the information that was gathered in the create_export call and put into the database. At this point, all we're doing is taking all the various entries from the database and putting it together into the desired format/structure. The key method call for updating and obtaining all of this info was done by the create_export call. This formatted data is then passed back up to the API and returned as the response back out to Nova. At this point, we return attach info to the caller that provides everything needed to make the remote iSCSI connection. attach(self, context, volume, instance_uuid, host_name, mountpoint, mode) -------------------------------------------------------------------------- This is the last call that *should* be pretty simple. The intent is that this is simply used to finalize the attach process. In other words, we simply update the status on the Volume in the database, and provide a mechanism to notify the driver that the attachment has completed successfully. There's some additional information that has been added to this finalize call over time like instance_uuid, host_name etc. Some of these are only provided during the actual attach call and may be desired for some drivers for one reason or another. Detach workflow =============== begin_detaching(self, context, volume) -------------------------------------- Analogous to the Attach workflows ``reserve_volume`` method. Performs a simple conditional update of Volume status to ``detaching``. terminate_connection(self, context, volume, connector, force=False) ------------------------------------------------------------------- Analogous to the Attach workflows ``initialize_connection`` method. Used to send calls down to drivers/target-drivers to do any sort of cleanup they might require. For most this is a noop, as connections and **iscsi session management is the responsibility of the initiator**. HOWEVER, there are a number of special cases here, particularly for target-drivers like LIO that use access-groups, in those cases they remove the initiator from the access list during this call which effectively closes sessions from the target side. detach(self, context, volume, attachment_id) ------------------------------------------------------------------- The final update to the DB and yet another opportunity to pass something down to the volume-driver. Initially a simple call-back that now has quite a bit of cruft built up in the volume-manager. For drivers like LVM this again is a noop and just updates the db entry to mark things as complete and set the volume to available again. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/attach_detach_conventions_v2.rst0000664000175000017500000001622300000000000026054 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================== Volume Attach/Detach workflow - V2 ================================== Previously there were six API calls associated with attach/detach of volumes in Cinder (3 calls for each operation). As the projects grew and the functionality of *simple* things like attach/detach evolved things have become a bit vague and we have a number of race issues during the calls that continually cause us some problems. Additionally, the existing code path makes things like multi-attach extremely difficult to implement due to no real good tracking mechanism of attachment info. To try and improve this we've proposed a new Attachments Object and API. Now we keep an Attachment record for each attachment that we want to perform as opposed to trying to infer the information from the Volume Object. Attachment Object ================= We actually already had a VolumeAttachment Table in the db, however we weren't really using it, or at least using it efficiently. For V2 of attach implementation (V3 API) flow we'll use the Attachment Table (object) as the primary handle for managing attachment(s) for a volume. In addition, we also introduce the AttachmentSpecs Table which will store the connector information for an Attachment so we no longer have the problem of lost connector info, or trying to reassemble it. New API and Flow ================ attachment-create ----------------- ``` cinder --os-volume-api-version 3.27 attachment-create ``` The attachment_create call simply creates an empty Attachment record for the specified Volume with an Instance UUID field set. This is particularly useful for cases like Nova Boot from Volume where Nova hasn't sent the job to the actual Compute host yet, but needs to make initial preparations to reserve the volume for use, so here we can reserve the volume and indicate that we will be attaching it to in the future. Alternatively, the caller may provide a connector in which case the Cinder API will create the attachment and perform the update on the attachment to set the connector info and return the connection data needed to make a connection. The attachment_create call can be used in one of two ways: 1. Create an empty Attachment object (reserve). In this case the attachment_create call requires an instance_uuid and a volume_uuid, and just creates an empty Attachment object and returns the UUID of Attachment to the caller. 2. Create and complete the Attachment process in one call. The reserve process is only needed in certain cases, in many cases Nova actually has enough information to do everything in a single call. Also, non-nova consumers typically don't require the granularity of a separate reserve at all. To perform the complete operation, include the connector data in the attachment_create call and the Cinder API will perform the reserve and initialize the connection in the single request. This full usage of attachment-create would be:: usage: cinder --os-volume-api-version 3.27 attachment-create ... Positional arguments: Name or ID of volume or volumes to attach. ID of instance attaching to. Optional arguments: --connect Make an active connection using provided connector info (True or False). --initiator iqn of the initiator attaching to. Default=None. --ip ip of the system attaching to. Default=None. --host Name of the host attaching to. Default=None. --platform Platform type. Default=x86_64. --ostype OS type. Default=linux2. --multipath Use multipath. Default=False. --mountpoint Mountpoint volume will be attached at. Default=None. Returns the connection information for the attachment:: +-------------------+-----------------------------------------------------------------------+ | Property | Value | +-------------------+-----------------------------------------------------------------------+ | access_mode | rw | | attachment_id | 6ab061ad-5c45-48f3-ad9c-bbd3b6275bf2 | | auth_method | CHAP | | auth_password | kystSioDKHSV2j9y | | auth_username | hxGUgiWvsS4GqAQcfA78 | | encrypted | False | | qos_specs | None | | target_discovered | False | | target_iqn | iqn.2010-10.org.openstack:volume-23212c97-5ed7-42d7-b433-dbf8fc38ec35 | | target_lun | 0 | | target_portal | 192.168.0.9:3260 | | volume_id | 23212c97-5ed7-42d7-b433-dbf8fc38ec35 | +-------------------+-----------------------------------------------------------------------+ attachment-update ----------------- ``` cinder --os-volume-api-version 3.27 attachment-update ``` Once we have a reserved volume, this CLI can be used to update an attachment for a cinder volume. This call is designed to be more of an attachment completion than anything else. It expects the value of a connector object to notify the driver that the volume is going to be connected and where it's being connected to. The usage is the following:: usage: cinder --os-volume-api-version 3.27 attachment-update ... Positional arguments: ID of attachment. Optional arguments: --initiator iqn of the initiator attaching to. Default=None. --ip ip of the system attaching to. Default=None. --host Name of the host attaching to. Default=None. --platform Platform type. Default=x86_64. --ostype OS type. Default=linux2. --multipath Use multipath. Default=False. --mountpoint Mountpoint volume will be attached at. Default=None. attachment-delete ----------------- ``` cinder --os-volume-api-version 3.27 attachment-delete ``` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/backporting.rst0000664000175000017500000001112000000000000022536 0ustar00zuulzuul00000000000000================= Backporting a Fix ================= **tl;dr:** Only propose a cherry pick from a *merged* commit, even if you want to backport the patch to multiple stable branches. Doing them all at once doesn't speed anything up, because the cinder-stable-maint team will **not** approve a backport to branch *n*-1 until the patch has been merged into branch *n*. From time to time, you may find a bug that's been fixed in master, and you'd like to have that fix in the release you're currently using (for example, Wallaby). What you want to do is propose a **backport** of the fix. .. note:: The Cinder project observes the OpenStack `Stable Branch Policy `_. Thus, not every change in master is backportable to the stable branches. In particular, features are *never* backportable. A really complicated bugfix may not be backportable if what it fixes is low-occurrence and there's a high risk that it may cause a regression elsewhere in the software. How can you tell? Ask in the ``#openstack-cinder`` channel on IRC or during the open discussion part of the weekly Cinder team meeting. Since we use git for source code version control, backporting is done by *cherry-picking* a change that has already been merged into one branch into another branch. The gerrit web interface makes it really easy to do this. In fact, maybe *too* easy. Here are some guidelines: * Before you cherry-pick a change, make sure it has already **merged** to master. If the change hasn't merged yet, it may require further revision, and the commit you've cherry-picked won't be the correct commit to backport. * Backports must be done in *reverse chronological order*. Since OpenStack releases are named alphabetically, this means reverse alphabetical order: ``stable/yoga``, ``stable/xena``, ``stable/wallaby``, etc. * The cherry-pick must have **merged** into the closest most recent branch before it will be considered for a branch, that is, a cherry-pick to ``stable/xena`` will **not** be considered until it has merged into ``stable/yoga`` first. * This is because sometimes a backport requires revision along the way. For example, different OpenStack releases support different versions of Python. So if a fix uses a language feature introduced in Python 3.8, it will merge just fine into current master (during zed development), but it will not pass unit tests in ``stable/yoga`` (which supports Python 3.6). Likewise, if you already cherry-picked the patch from master directly to ``stable/xena``, it won't pass tests there either (because xena also supports Python 3.6). So it's better to follow the policy and wait until the patch is merged into ``stable/yoga`` *before* you propose a backport to ``stable/xena``. * You can propose backports directly from git instead of using the gerrit web interface, but if you do, you must include the fact that it's a cherry-pick in the commit message. Gerrit does this automatically for you *if you cherry-pick from a merged commit* (which is the only kind of commit you should cherry-pick from in Gerrit); git will do it for you if you use the ``-x`` flag when you do a manual cherry-pick. This will keep the history of this backport intact as it goes from branch to branch. We want this information to be in the commit message and to be accurate, because if the fix causes a regression (which is always possible), it will be helpful to the poor sucker who has to fix it to know where this code came from without digging through a bunch of git history. If you have questions about any of this, or if you have a bug to fix that is only present in one of the stable branches, ask for advice in ``#openstack-cinder`` on IRC. Backport CI Testing ------------------- Like all code changes, backports should undergo continuous integration testing. This is done automatically by Zuul for changes that affect the main cinder code. When a vendor driver patch backport is proposed, we would like to see a clear statement on the gerrit review that the patch has been tested in an appropriate environment. This shouldn't be a big deal because presumably you've done local testing with your backend to ensure that the code works as expected in a stable branch; we're simply asking that this be documented on the backport. A good example of how to document this can be found on `https://review.opendev.org/c/openstack/cinder/+/821893/ `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/cinder-groups.rst0000664000175000017500000001337100000000000023026 0ustar00zuulzuul00000000000000.. _cinder-groups: ===================================== Cinder Groups in Gerrit and Launchpad ===================================== Cinder-related groups in Launchpad ================================== .. list-table:: :header-rows: 1 * - group - what - who - where * - "Cinder" team - not sure, exactly - an "open" team, anyone with a Launchpad account can join - https://launchpad.net/~cinder * - "Cinder Bug Team" team - can triage (change status fields) on bugs - an "open" team, people self-nominate - https://launchpad.net/~cinder-bugs * - "Cinder Drivers" team - Maintains the Launchpad space for Cinder, os-brick, cinderlib, python-cinderclient, and cinder-tempest-plugin - Anyone who is interested in doing some work, has a Launchpad account, and is approved by the current members - https://launchpad.net/~cinder-drivers * - "Cinder Core security contacts" team - can see and work on private security bugs while they are under embargo - subset of cinder-core (the OpenStack Vulnerablity Management Team likes to keep this team small), so even though the PTL can add people, you should propose them on the mailing list first - https://launchpad.net/~cinder-coresec Cinder-related groups in Gerrit =============================== The Cinder project has total control over the membership of these groups. .. list-table:: :header-rows: 1 * - group - what - who - where * - cinder-core - +2 powers in Cinder project code repositories - cinder core reviewers - https://review.opendev.org/#/admin/groups/83,members * - cinder-specs-core - +2 powers in cinder-specs repository - cinder-core plus other appropriate people - https://review.opendev.org/#/admin/groups/344,members * - cinder-tempest-plugin-core - +2 powers on the cinder-tempest-plugin repository - cinder-core plus other appropriate people - https://review.opendev.org/#/admin/groups/2088,members * - rbd-iscsi-client-core - +2 powers on the rbd-iscsi-client repository - cinder-core (plus others if appropriate; currently only cinder-core) - https://review.opendev.org/admin/groups/b25813f5baef62b9449371c91f7dbacbcf7bc6d6,members The Cinder project shares control over the membership of these groups. If you want to add someone to one of these groups who doesn't already have membership by being in an included group, be sure to include the other groups or individual members in your proposal email. .. list-table:: :header-rows: 1 * - group - what - who - where * - cinder-stable-maint - +2 powers on backports to stable branches - subset of cinder-core (selected in consultation with stable-maint-core) plus the stable-maint-core team - https://review.opendev.org/#/admin/groups/534,members * - devstack-plugin-ceph-core - +2 powers on the code repo for the Ceph devstack plugin - cinder-core, devstack-core, manila-core, qa-release, other appropriate people - https://review.opendev.org/#/admin/groups/1196,members * - devstack-plugin-nfs-core - +2 powers on the code repo for the NFS devstack plugin - cinder-core, devstack-core, other appropriate people - https://review.opendev.org/#/admin/groups/1330,members * - devstack-plugin-open-cas-core - +2 powers on the code repo for the Open CAS devstack plugin - cinder-core, devstack-core, other appropriate people - https://review.opendev.org/#/admin/groups/2082,members NOTE: The following groups exist, but I don't think they are used for anything anymore. .. list-table:: :header-rows: 1 * - group - where * - cinder-ci - https://review.opendev.org/#/admin/groups/508,members * - cinder-milestone - https://review.opendev.org/#/admin/groups/82,members * - cinder-release - https://review.opendev.org/#/admin/groups/144,members * - cinder-release-branch - https://review.opendev.org/#/admin/groups/1507,members How Gerrit groups are connected to project repositories ------------------------------------------------------- The connection between the groups defined in gerrit and what they can do is defined in the project-config repository: https://opendev.org/openstack/project-config * ``gerrit/projects.yaml`` sets the config file for a project * ``gerrit/acls/openstack`` contains the config files The Special Relationship with OpenStack Command Line Client and SDK ------------------------------------------------------------------- The OpenStack Command Line Client (aka OSC) and the OpenStack SDK provide unified interfaces across most of the OpenStack APIs. To facilitate this, they make use of two kinds of core teams: - "service cores": people very familiar with the particular API that's being given an interface in the OSC or SDK. For example, cinder cores can be service cores for implementations in OSC or SDK that provide an interface to the Block Storage API. - additionally, the OSC and SDK projects have their own core teams whose members have a broader vision over the OSC and SDK, and therefore can enforce consistency across all the service code. This way, end users will be provided with a consistent and predictable interface to OpenStack as a whole. The cinder-core gerrit group acts as "service cores" for the OSC and SDK. This means that the cinder-core members have +2 powers on OSC or SDK reviews but do not have permission to approve patches. The connection between cinder-core and the python-openstackclient and openstacksdk code repositories is made directly in their ACL files in the ``project-config`` repository: - ``gerrit/acls/openstack/openstacksdk.config`` - ``gerrit/acls/openstack/python-openstackclient.config`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/contributing.rst0000664000175000017500000004153400000000000022756 0ustar00zuulzuul00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with the Cinder project, which is responsible for the following OpenStack deliverables: cinder | The OpenStack Block Storage service. | code: https://opendev.org/openstack/cinder | docs: https://cinder.openstack.org | api-ref: https://docs.openstack.org/api-ref/block-storage | Launchpad: https://launchpad.net/cinder os-brick | Shared library for managing local volume attaches. | code: https://opendev.org/openstack/os-brick | docs: https://docs.openstack.org/os-brick | Launchpad: https://launchpad.net/os-brick python-cinderclient | Python client library for the OpenStack Block Storage API; includes a CLI shell. | code: https://opendev.org/openstack/python-cinderclient | docs: https://docs.openstack.org/python-cinderclient | Launchpad: https://launchpad.net/python-cinderclient python-brick-cinderclient-ext | Extends the python-cinderclient library so that it can handle local volume attaches. | code: https://opendev.org/openstack/python-brick-cinderclient-ext | docs: https://docs.openstack.org/python-brick-cinderclient-ext | Launchpad: (doesn't have its own space, uses python-cinderclient's) cinderlib | Library that allows direct usage of Cinder backend drivers without cinder services. | code: https://opendev.org/openstack/cinderlib | docs: https://docs.openstack.org/cinderlib | Launchpad: https://launchpad.net/cinderlib rbd-iscsi-client | Library that provides a REST client that talks to ceph-isci's rbd-target-api to export rbd images/volumes to an iSCSI initiator. | code: https://opendev.org/openstack/rbd-iscsi-client | docs: https://docs.openstack.org/rbd-iscsi-client | Launchpad: https://launchpad.net/rbd-iscsi-client cinder-tempest-plugin | Contains additional Cinder tempest-based tests beyond those in the main OpenStack Integration Test Suite (tempest). | code: https://opendev.org/openstack/cinder-tempest-plugin | Launchpad: https://launchpad.net/cinder-tempest-plugin See the ``CONTRIBUTING.rst`` file in each code repository for more information about contributing to that specific deliverable. Additionally, you should look over the docs links above; most components have helpful developer information specific to that deliverable. (The main cinder documentation is especially thorough in this regard and you should read through it, particularly :ref:`background-concepts` and :ref:`programming-howtos`.) Communication ~~~~~~~~~~~~~ IRC We use IRC *a lot*. You will, too. You can find infomation about what IRC network OpenStack uses for communication (and tips for using IRC) in the `Setup IRC `_ section of the main `OpenStack Contributor Guide`. People working on the Cinder project may be found in the ``#openstack-cinder`` IRC channel during working hours in their timezone. The channel is logged, so if you ask a question when no one is around, you can check the log to see if it's been answered: http://eavesdrop.openstack.org/irclogs/%23openstack-cinder/ weekly meeting Wednesdays at 14:00 UTC in the ``#openstack-meeting-alt`` IRC channel. Meetings are logged: http://eavesdrop.openstack.org/meetings/cinder/ More information (including some pointers on meeting etiquette and an ICS file to put the meeting on your calendar) can be found at: http://eavesdrop.openstack.org/#Cinder_Team_Meeting The meeting agenda for a particular development cycle is kept on an etherpad. You can find a link to the current agenda from the Cinder Meetings wiki page: https://wiki.openstack.org/wiki/CinderMeetings The last meeting of each month is held simultaneously in videoconference and IRC. Connection information is posted on the meeting agenda. weekly bug squad meeting This is a half-hour meeting on Wednesdays at 15:00 UTC (right after the Cinder weekly meeting) in the ``#openstack-cinder`` IRC channel. At this meeting, led by the Cinder Bug Deputy, we discuss new bugs that have been filed against Cinder project deliverables (and, if there's time, discuss the relevance of old bugs that haven't seen any action recently). Info about the meeting is here: http://eavesdrop.openstack.org/#Cinder_Bug_Squad_Meeting mailing list We use the openstack-discuss@lists.openstack.org mailing list for asynchronous discussions or to communicate with other OpenStack teams. Use the prefix ``[cinder]`` in your subject line (it's a high-volume list, so most people use email filters). More information about the mailing list, including how to subscribe and read the archives, can be found at: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss virtual meet-ups From time to time, the Cinder project will have video meetings to address topics not easily covered by the above methods. These are announced well in advance at the weekly meeting and on the mailing list. Additionally, the Cinder project has been holding two virtual mid-cycle meetings during each development cycle, roughly at weeks R-18 and R-9. These are used to discuss follow-up issues from the PTG before the spec freeze, and to assess the development status of features and priorities roughly one month before the feature freeze. The exact dates of these are announced at the weekly meeting and on the mailing list. cinder festival of XS reviews This is a standing video meeting held the third Friday of each month from 14:00-16:00 UTC in meetpad to review very small patches that haven't yet been merged. It's held in video so we can quickly discuss issues and hand reviews back and forth. It is not recorded. Info about the meeting is here: http://eavesdrop.openstack.org/#Cinder_Festival_of_XS_Reviews physical meet-ups The Cinder project usually has a presence at the OpenDev/OpenStack Project Team Gathering that takes place at the beginning of each development cycle. Planning happens on an etherpad whose URL is announced at the weekly meetings and on the mailing list. Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~ The cinder-core team is an active group of contributors who are responsible for directing and maintaining the Cinder project. As a new contributor, your interaction with this group will be mostly through code reviews, because only members of cinder-core can approve a code change to be merged into the code repository. You can learn more about the role of core reviewers in the OpenStack governance documentation: https://docs.openstack.org/contributors/common/governance.html#core-reviewer The membership list of cinder-core is maintained in gerrit: https://review.opendev.org/#/admin/groups/83,members You can also find the members of the cinder-core team at the Cinder weekly meetings. Project Team Lead ~~~~~~~~~~~~~~~~~ For each development cycle, Cinder project Active Technical Contributors (ATCs) elect a Project Team Lead who is responsible for running the weekly meetings, midcycles, and Cinder sessions at the Project Team Gathering for that cycle (and who is also ultimately responsible for everything else the project does). * You automatically become an ATC by making a commit to one of the cinder deliverables. Other people who haven't made a commit, but have contributed to the project in other ways (for example, making good bug reports) may be recognized as "extra-ATCs" and obtain voting privileges. If you are such a person, contact the current PTL before the "Extra-ATC freeze" indicated on the current development cycle schedule (which you can find from the `OpenStack Releases homepage `_ . The current Cinder project Project Team Lead (PTL) is listed in the `Cinder project reference `_ maintained by the OpenStack Technical Committee. All common PTL duties are enumerated in the `PTL guide `_. Additional responsibilities for the Cinder PTL can be found by reading through the :ref:`managing-development` section of the Cinder documentation. New Feature Planning ~~~~~~~~~~~~~~~~~~~~ The Cinder project uses both "specs" and "blueprints" to track new features. Here's a quick rundown of what they are and how the Cinder project uses them. specs | Exist in the cinder-specs repository. Each spec must have a Launchpad blueprint (see below) associated with it for tracking purposes. | A spec is required for any new Cinder core feature, anything that changes the Block Storage API, or anything that entails a mass change to existing drivers. | The specs repository is: https://opendev.org/openstack/cinder-specs | It contains a ``README.rst`` file explaining how to file a spec. | You can read rendered specs docs at: | https://specs.openstack.org/openstack/cinder-specs/ blueprints | Exist in Launchpad, where they can be targeted to release milestones. | You file one at https://blueprints.launchpad.net/cinder | Examples of changes that can be covered by a blueprint only are: * adding a new volume, backup, or target driver; or * adding support for a defined capability that already exists in the base volume, backup, or target drivers Feel free to ask in ``#openstack-cinder`` or at the weekly meeting if you have an idea you want to develop and you're not sure whether it requires a blueprint *and* a spec or simply a blueprint. The Cinder project observes the following deadlines. For the current development cycle, the dates of each (and a more detailed description) may be found on the release schedule, which you can find from: https://releases.openstack.org/ * spec freeze (all specs must be approved by this date) * new driver merge deadline * new target driver merge deadline * new feature status checkpoint * driver features declaration * third-party CI compliance checkpoint Additionally, the Cinder project observes the OpenStack-wide deadlines, for example, final release of non-client libraries (os-brick), final release for client libraries (python-cinderclient), feature freeze, etc. These are also noted and explained on the release schedule for the current development cycle. Task Tracking ~~~~~~~~~~~~~ We track our tasks in Launchpad. See the top of the page for the URL of each Cinder project deliverable. If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' tag in the Bugs section. When you start working on a bug, make sure you assign it to yourself. Otherwise someone else may also start working on it, and we don't want to duplicate efforts. Also, if you find a bug in the code and want to post a fix, make sure you file a bug (and assign it to yourself!) just in case someone else comes across the problem in the meantime. Reporting a Bug ~~~~~~~~~~~~~~~ You found an issue and want to make sure we are aware of it? You can do so in the Launchpad space for the affected deliverable: * cinder: https://bugs.launchpad.net/cinder * os-brick: https://bugs.launchpad.net/os-brick * python-cinderclient: https://bugs.launchpad.net/python-cinderclient * python-brick-cinderclient-ext: same as for python-cinderclient, but tag the bug with 'brick-cinderclient-ext' * cinderlib: https://bugs.launchpad.net/cinderlib * cinder-tempest-plugin: https://bugs.launchpad.net/cinder-tempest-plugin Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ Before your patch can be merged, it must be *reviewed* and *approved*. The Cinder project policy is that a patch must have two +2s before it can be merged. (Exceptions are documentation changes, which require only a single +2, and specs, for which the PTL may require more than two +2s, depending on the complexity of the proposal.) Only members of the cinder-core team can vote +2 (or -2) on a patch, or approve it. .. note:: Although your contribution will require reviews by members of cinder-core, these aren't the only people whose reviews matter. Anyone with a gerrit account can post reviews, so you can ask other developers you know to review your code ... and you can review theirs. (A good way to learn your way around the codebase is to review other people's patches.) If you're thinking, "I'm new at this, how can I possibly provide a helpful review?", take a look at `How to Review Changes the OpenStack Way `_. There are also some Cinder project specific reviewing guidelines in the :ref:`reviewing-cinder` section of the Cinder Contributor Guide. Patches lacking unit tests are unlikely to be approved. Check out the :ref:`testing-cinder` section of the Cinder Contributors Guide for a discussion of the kinds of testing we do with cinder. In addition, some changes may require a release note. Any patch that changes functionality, adds functionality, or addresses a significant bug should have a release note. You can find more information about how to write a release note in the :ref:`release-notes` section of the Cinder Contributors Guide. Keep in mind that the best way to make sure your patches are reviewed in a timely manner is to review other people's patches. We're engaged in a cooperative enterprise here. If your patch has a -1 from Zuul, you should fix it right away, because people are unlikely to review a patch that is failing the CI system. * If it's a pep8 issue, the job leaves sufficient information for you to fix the problems yourself. * If you are failing unit or functional tests, you should look at the failures carefully. These tests guard against regressions, so if your patch causing failures, you need to figure out exactly what is going on. * The unit, functional, and pep8 tests can all be run locally before you submit your patch for review. By doing so, you can help conserve gate resources. * Other test failures: we also run integration tests in the gate that run your changes in the context of an OpenStack deployment, where cinder and os-brick interact with users, admins, and other services. Sometimes these tests will fail, and it may not obviously be your patch's fault. Keep in mind, however, that the failure could still be a cinder issue, for which the cinder project (which includes you, as a contributor) is responsible. So please take a few minutes to look over the logs from the failing test job to see if you can identify the issue. * If you're not sure how to do this, ask in the ``#openstack-cinder`` channel (or during open discussion at the weekly cinder meeting), and someone will walk you through the basic process. * You can tell Zuul to do a recheck, but first: * Make sure you look at the job's build history, because if the job is failing consistently, it's probably due to some particular issue that must be fixed before the job will start passing again. So a recheck in this situation will just waste resources. Check the mailing list or ask in IRC or look at the comments on your patch (sometimes a reviewer will leave a note saying not to recheck until some other patch has merged). * When you think a recheck is appropriate, make sure you follow the OpenStack community guidelines for `How to Handle Test Failures `_. How long it may take for your review to get attention will depend on the current project priorities. For example, the feature freeze is at the third milestone of each development cycle, so feature patches have the highest priority just before M-3. Likewise, once the new driver freeze is in effect, new driver patches are unlikely to receive timely reviews until after the stable branch has been cut (this happens three weeks before release). Similarly, os-brick patches have review priority before the nonclient library release deadline, and cinderclient patches have priority before the client library release each cycle. These dates are clearly noted on the release schedule for the current release, which you can find from https://releases.openstack.org/ You can see who's been doing what with Cinder recently in Stackalytics: https://www.stackalytics.io/report/activity?module=cinder-group ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/database-migrations.rst0000664000175000017500000001157500000000000024167 0ustar00zuulzuul00000000000000=================== Database migrations =================== .. note:: This document details how to generate database migrations as part of a new feature or bugfix. For info on how to apply existing database migrations, refer to the documentation for the :program:`cinder-manage db sync` command in :doc:`/cli/cinder-manage`. For info on the general upgrade process for a cinder deployment, refer to :doc:`/admin/upgrades`. Occasionally the databases used in cinder will require schema or data migrations. Schema migrations ----------------- .. versionchanged:: 19.0.0 (Xena) The database migration engine was changed from ``sqlalchemy-migrate`` to ``alembic``. .. versionchanged:: 22.0.0 (Antelope) The legacy ``sqlalchemy-migrate``-based database migrations were removed. The `alembic`__ database migration tool is used to manage schema migrations in cinder. The migration files and related metadata can be found in ``cinder/db/migrations``. As discussed in :doc:`/admin/upgrades`, these can be run by end users using the :program:`cinder-manage db sync` command. .. __: https://alembic.sqlalchemy.org/en/latest/ .. note:: There were also legacy migrations provided in the ``cinder/db/legacy_migrations`` directory . These were provided to facilitate upgrades from pre-Xena (19.0.0) deployments. They were removed in the 22.0.0 (Antelope) release. The best reference for alembic is the `alembic documentation`__, but a small example is provided here. You can create the migration either manually or automatically. Manual generation might be necessary for some corner cases such as renamed tables but auto-generation will typically handle your issues. Examples of both are provided below. In both examples, we're going to demonstrate how you could add a new model, ``Foo``, to the main database. .. __: https://alembic.sqlalchemy.org/en/latest/ .. code-block:: diff diff --git cinder/db/sqlalchemy/models.py cinder/db/sqlalchemy/models.py index 7eab643e14..8f70bcdaca 100644 --- cinder/db/sqlalchemy/models.py +++ cinder/db/sqlalchemy/models.py @@ -73,6 +73,16 @@ def MediumText(): sqlalchemy.dialects.mysql.MEDIUMTEXT(), 'mysql') +class Foo(BASE, models.SoftDeleteMixin): + """A test-only model.""" + + __tablename__ = 'foo' + + id = sa.Column(sa.Integer, primary_key=True) + uuid = sa.Column(sa.String(36), nullable=True) + bar = sa.Column(sa.String(255)) + + class Service(BASE, models.SoftDeleteMixin): """Represents a running service on a host.""" (you might not be able to apply the diff above cleanly - this is just a demo). .. rubric:: Auto-generating migration scripts In order for alembic to compare the migrations with the underlying models, it require a database that it can inspect and compare the models against. As such, we first need to create a working database. We'll bypass ``cinder-manage`` for this and go straight to the :program:`alembic` CLI. The ``alembic.ini`` file provided in the ``cinder/db`` directory is helpfully configured to use an SQLite database by default (``cinder.db``). Create this database and apply the current schema, as dictated by the current migration scripts: .. code-block:: bash $ tox -e venv -- alembic -c cinder/db/alembic.ini \ upgrade head Once done, you should notice the new ``cinder.db`` file in the root of the repo. Now, let's generate the new revision: .. code-block:: bash $ tox -e venv -- alembic -c cinder/db/alembic.ini \ revision -m "Add foo model" --autogenerate This will create a new file in ``cinder/db/migrations/versions`` with ``add_foo_model`` in the name including (hopefully!) the necessary changes to add the new ``Foo`` model. You **must** inspect this file once created, since there's a chance you'll be missing imports or something else which will need to be manually corrected. Once you've inspected this file and made any required changes, you can apply the migration and make sure it works: .. code-block:: bash $ tox -e venv -- alembic -c cinder/db/alembic.ini \ upgrade head .. rubric:: Manually generating migration scripts For trickier migrations or things that alembic doesn't understand, you may need to manually create a migration script. This is very similar to the auto-generation step, with the exception being that you don't need to have a database in place beforehand. As such, you can simply run: .. code-block:: bash $ tox -e venv -- alembic -c cinder/db/alembic.ini \ revision -m "Add foo model" As before, this will create a new file in ``cinder/db/migrations/versions`` with ``add_foo_model`` in the name. You can simply modify this to make whatever changes are necessary. Once done, you can apply the migration and make sure it works: .. code-block:: bash $ tox -e venv -- alembic -c cinder/db/alembic.ini \ upgrade head Data migrations --------------- .. todo: Populate this. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/development.environment.rst0000664000175000017500000001163100000000000025127 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Setting Up a Development Environment ==================================== This page describes how to setup a working Python development environment that can be used in developing cinder on Ubuntu, Fedora or macOS. These instructions assume you're already familiar with git. Refer to GettingTheCode_ for additional information. .. _GettingTheCode: https://wiki.openstack.org/wiki/Getting_The_Code Following these instructions will allow you to run the cinder unit tests. Running cinder is currently only supported on Linux. Some jobs can be run on macOS, but unfortunately due to some differences in system packages there are known issues with running unit tests. Virtual environments -------------------- Cinder development uses `virtualenv `__ to track and manage Python dependencies while in development and testing. This allows you to install all of the Python package dependencies in a virtual environment or "virtualenv" (a special subdirectory of your cinder directory), instead of installing the packages at the system level. .. note:: Virtualenv is useful for running the unit tests, but is not typically used for full integration testing or production usage. Linux Systems ------------- .. note:: If you have Ansible and git installed on your system, you may be able to get a working development environment quickly set up by running the following: .. code:: sudo ansible-pull -U https://github.com/stmcginnis/cinder-dev-setup If that does not work for your system, continue on with the manual steps below. Install the prerequisite packages. On Ubuntu20.04-64:: sudo apt-get install libssl-dev python3-pip libmysqlclient-dev libpq-dev libffi-dev To get a full python3 development environment, the two python3 packages need to be added to the list above:: python3-dev python3-pip On Red Hat-based distributions e.g., Fedora/RHEL/CentOS/Scientific Linux (tested on CentOS 6.5 and CentOS 7.3):: sudo yum install python-virtualenv openssl-devel python-pip git gcc libffi-devel libxslt-devel mysql-devel postgresql-devel On openSUSE-based distributions (SLES 12, openSUSE 13.1, Factory or Tumbleweed):: sudo zypper install gcc git libmysqlclient-devel libopenssl-devel postgresql-devel python-devel python-pip macOS Systems ------------- Install virtualenv:: sudo pip install virtualenv Check the version of OpenSSL you have installed:: openssl version If you have installed OpenSSL 1.0.0a, which can happen when installing a MacPorts package for OpenSSL, you will see an error when running ``cinder.tests.auth_unittest.AuthTestCase.test_209_can_generate_x509``. The stock version of OpenSSL that ships with Mac OS X 10.6 (OpenSSL 0.9.8l) or later should work fine with cinder. Getting the code ---------------- Grab the code:: git clone https://opendev.org/openstack/cinder.git cd cinder Running unit tests ------------------ The preferred way to run the unit tests is using ``tox``. It executes tests in isolated environment, by creating separate virtualenv and installing dependencies from the ``requirements.txt`` and ``test-requirements.txt`` files, so the only package you install is ``tox`` itself:: sudo pip install tox Run the unit tests by doing:: tox -e py3 See :doc:`testing` for more details. .. _virtualenv: Manually installing and using the virtualenv -------------------------------------------- You can also manually install the virtual environment:: tox -e py3 --notest This will install all of the Python packages listed in the ``requirements.txt`` file into your virtualenv. To activate the Cinder virtualenv you can run:: $ source .tox/py3/bin/activate To exit your virtualenv, just type:: $ deactivate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running:: $ tox -e venv -- Contributing Your Work ---------------------- Once your work is complete you may wish to contribute it to the project. Cinder uses the Gerrit code review system. For information on how to submit your branch to Gerrit, see GerritWorkflow_. .. _GerritWorkflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/documentation.rst0000664000175000017500000001376400000000000023124 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Contributing Documentation to Cinder ==================================== Starting with the Pike release, Cinder's documentation has been moved from the openstack-manuals repository to the ``docs`` directory in the Cinder repository. This makes it even more important that Cinder add and maintain good documentation. .. note:: Documentation for python-cinderclient and os-brick has undergone the same transition. The information here can be applied for those projects as well. This page provides guidance on how to provide documentation for those who may not have previously been active writing documentation for OpenStack. Documentation Content --------------------- To keep the documentation consistent across projects, and to maintain quality, please follow the OpenStack `Writing style `_ guide. Using RST --------- OpenStack documentation uses reStructuredText to write documentation. The files end with a ``.rst`` extension. The ``.rst`` files are then processed by Sphinx to build HTML based on the RST files. .. note:: Files that are to be included using the ``.. include::`` directive in an RST file should use the ``.inc`` extension. If you instead use the ``.rst`` this will result in the RST file being processed twice during the build and cause Sphinx to generate a warning during the build. reStructuredText is a powerful language for generating web pages. The documentation team has put together an `RST conventions`_ page with information and links related to RST. .. _RST conventions: https://docs.openstack.org/doc-contrib-guide/rst-conv.html Building Cinder's Documentation ------------------------------- To build documentation the following command should be used: .. code-block:: console tox -e docs,pep8 When building documentation it is important to also run pep8 as it is easy to introduce pep8 failures when adding documentation. (The tox pep8 job also runs doc8, but currently we do not run doc8 as part of the tox docs job.) .. note:: The tox documentation jobs (docs, releasenotes, api-ref) are set up to treat Sphinx warnings as errors. This is because many Sphinx warnings result in improperly formatted pages being generated, so we prefer to fix those right now, instead of waiting for someone to report a docs bug. During the documentation build a number of things happen: * All of the RST files under ``doc/source`` are processed and built. * The openstackdocs theme is applied to all of the files so that they will look consistent with all the other OpenStack documentation. * The resulting HTML is put into ``doc/build/html``. * Sample files like cinder.conf.sample are generated and put into ``doc/source/_static``. * All of Cinder's ``.py`` files are processed and the docstrings are used to generate the files under ``doc/source/contributor/api`` After the build completes the results may be accessed via a web browser in the ``doc/build/html`` directory structure. Review and Release Process -------------------------- Documentation changes go through the same review process as all other changes. .. note:: Reviewers can see the resulting web page output by clicking on ``openstack-tox-docs`` in the "Zuul check" table on the review, and then look for "Artifacts" > "Docs preview site". This is also true for the ``build-openstack-api-ref`` and ``build-openstack-releasenotes`` check jobs. Once a patch is approved it is immediately released to the docs.openstack.org website and can be seen under Cinder's Documentation Page at https://docs.openstack.org/cinder/latest\ . When a new release is cut a snapshot of that documentation will be kept at ``https://docs.openstack.org/cinder/``. Changes from master can be backported to previous branches if necessary. Doc Directory Structure ----------------------- The main location for Cinder's documentation is the ``doc/source`` directory. The top level index file that is seen at `https://docs.openstack/org/cinder/latest`_ resides here as well as the ``conf.py`` file which is used to set a number of parameters for the build of OpenStack's documentation. Each of the directories under source are for specific kinds of documentation as is documented in the ``README`` in each directory: .. toctree:: :maxdepth: 1 ../admin/README ../cli/README ../configuration/README ../contributor/README ../install/README ../reference/README ../user/README .. _https://docs.openstack/org/cinder/latest: https://docs.openstack/org/cinder/latest Finding something to contribute ------------------------------- If you are reading the documentation and notice something incorrect or undocumented, you can directly submit a patch following the advice set out below. There are also documentation bugs that other people have noticed that you could address: * https://bugs.launchpad.net/cinder/+bugs?field.tag=doc * https://bugs.launchpad.net/python-cinderclient/+bugs?field.tag=doc * https://bugs.launchpad.net/os-brick/+bugs?field.tag=doc * https://bugs.launchpad.net/cinderlib/+bugs?field.tag=doc .. note:: If you don't see a bug listed, you can also try the tag 'docs' or 'documentation'. We tend to use 'doc' as the appropriate tag, but occasionally a bug gets tagged with a variant. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/drivers.rst0000664000175000017500000002256600000000000021731 0ustar00zuulzuul00000000000000.. Copyright (c) 2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Drivers ======= Cinder exposes an API to users to interact with different storage backend solutions. The following are standards across all drivers for Cinder services to properly interact with a driver. Basic attributes ---------------- There are some basic attributes that all drivers classes should have: * VERSION: Driver version in string format. No naming convention is imposed, although semantic versioning is recommended. * CI_WIKI_NAME: Must be the exact name of the `ThirdPartySystems wiki page `_. This is used by our tooling system to associate jobs to drivers and track their CI reporting status correctly. The tooling system will also use the name and docstring of the driver class. Configuration options --------------------- Each driver requires different configuration options set in the cinder.conf file to operate, and due to the complexities of the Object Oriented programming mechanisms (inheritance, composition, overwriting, etc.) once your driver defines its parameters in the code Cinder has no automated way of telling which configuration options are relevant to your driver. In order to assist operators and installation tools we recommend reporting the relevant options: * For operators: In the documentation under ``doc/source/configuration/block-storage``. * For operators and installers: Through the ``get_driver_options`` static method returning that returns a list of all the Oslo Config parameters. .. _drivers_minimum_features: Minimum Features ---------------- Minimum features are enforced to avoid having a grid of what features are supported by which drivers and which releases. Cinder Core requires that all drivers implement the following minimum features. Core Functionality ------------------ * Volume Create/Delete * Volume Attach/Detach * Snapshot Create/Delete * Create Volume from Snapshot * Get Volume Stats * Copy Image to Volume * Copy Volume to Image * Clone Volume * Extend Volume Security Requirements --------------------- * Drivers must delete volumes in a way where volumes deleted from the backend will not leak data into new volumes when they are created. Cinder operates in multi-tenant environments and this is critical to ensure data safety. * Drivers should support secure TLS/SSL communication between the cinder volume service and the backend as configured by the "driver_ssl_cert_verify" and "driver_ssl_cert_path" options in cinder.conf. * Drivers should use standard Python libraries to handle encryption-related functionality, and not contain custom implementations of encryption code. .. _drivers_volume_stats: Volume Stats ------------ Volume stats are used by the different schedulers for the drivers to provide a report on their current state of the backend. The following should be provided by a driver. * driver_version * free_capacity_gb * storage_protocol * total_capacity_gb * vendor_name * volume_backend_name **NOTE:** If the driver is unable to provide a value for free_capacity_gb or total_capacity_gb, keywords can be provided instead. Please use 'unknown' if the backend cannot report the value or 'infinite' if the backend has no upper limit. But, it is recommended to report real values as the Cinder scheduler assigns lowest weight to any storage backend reporting 'unknown' or 'infinite'. **NOTE:** By default, Cinder assumes that the driver supports attached volume extending. If it doesn't, it should report 'online_extend_support=False'. Otherwise the scheduler will attempt to perform the operation, and may leave the volume in 'error_extending' state. Value of ``storage_protocol`` is a single string representing the transport protocol used by the storage. Existing protocols are present in ``cinder.common.constants`` and should be used by drivers instead of string literals. Variant values only exist for older drivers that were already reporting those values. New drivers must use non variant versions. The ``storage_protocol`` can be used by operators using the ``cinder get-pools --detail`` command, by volume types in their extra specs, and by the filter and goodness functions. We must not mistake the value of the ``storage_protocol`` with the identifier of the os-brick connector, which is returned by the ``initialize_connection`` driver method in the ``driver_volume_type`` dictionary key. In some cases they may have the same value, but they are different things. Feature Enforcement ------------------- All concrete driver implementations should use the ``cinder.interface.volumedriver`` decorator on the driver class:: @interface.volumedriver class LVMVolumeDriver(driver.VolumeDriver): This will register the driver and allow automated compliance tests to run against and verify the compliance of the driver against the required interface to support the `Core Functionality`_ listed above. Running ``tox -e compliance`` will verify all registered drivers comply to this interface. This can be used during development to perform self checks along the way. Any missing method calls will be identified by the compliance tests. The details for the required volume driver interfaces can be found in the ``cinder/interface/volume_*_driver.py`` source. New Driver Review Checklist --------------------------- There are some common issues caught during the review of new driver patches that can easily be avoided. New driver maintainers should review the :doc:`new_driver_checklist` for some things to watch out for. .. toctree:: :hidden: new_driver_checklist Driver Development Documentations --------------------------------- The LVM driver is our reference for all new driver implementations. The information below can provide additional documentation for the methods that volume drivers need to implement. Volume ID ````````` Drivers should always get a volume's ID using the ``name_id`` attribute instead of the ``id`` attribute. A Cinder volume may have two different UUIDs, a user facing one, and one the driver should use. When a volume is created these two are the same, but when doing a generic migration (create new volume, then copying data) they will be different if we were unable to rename the new volume in the final migration steps. So the volume will have been created using the new volume's UUID and the driver will have to look for it using that UUID, but the user on the other hand will keep referencing the volume with the original UUID. Base Driver Interface ````````````````````` The methods documented below are the minimum required interface for a volume driver to support. All methods from this interface must be implemented in order to be an official Cinder volume driver. .. automodule:: cinder.interface.volume_driver :members: :noindex: Manage/Unmanage Support ``````````````````````` An optional feature a volume backend can support is the ability to manage existing volumes or unmanage volumes - keep the volume on the storage backend but no longer manage it through Cinder. To support this functionality, volume drivers must implement these methods: .. automodule:: cinder.interface.volume_manageable_driver :members: :noindex: Manage/Unmanage Snapshot Support ```````````````````````````````` In addition to the ability to manage and unmanage volumes, Cinder backend drivers may also support managing and unmanaging volume snapshots. These additional methods must be implemented to support these operations. .. automodule:: cinder.interface.volume_snapshotmanagement_driver :members: :noindex: Volume Consistency Groups ````````````````````````` Some storage backends support the ability to group volumes and create write consistent snapshots across the group. In order to support these operations, the following interface must be implemented by the driver. .. automodule:: cinder.interface.volume_consistencygroup_driver :members: :noindex: Generic Volume Groups ````````````````````` The generic volume groups feature provides the ability to manage a group of volumes together. Because this feature is implemented at the manager level, every driver gets this feature by default. If a driver wants to override the default behavior to support additional functionalities such as consistent group snapshot, the following interface must be implemented by the driver. Once every driver supporting volume consistency groups has added the consistent group snapshot capability to generic volume groups, we no longer need the volume consistency groups interface listed above. .. automodule:: cinder.interface.volume_group_driver :members: :noindex: Revert To Snapshot `````````````````` Some storage backends support the ability to revert a volume to the last snapshot. To support snapshot revert, the following interface must be implemented by the driver. .. automodule:: cinder.interface.volume_snapshot_revert :members: :noindex: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/drivers_locking_examples.rst0000664000175000017500000001471700000000000025334 0ustar00zuulzuul00000000000000.. Using orphan, as document is explicitly imported and not part of the toctree :orphan: Drivers Locking Examples ======================== This document presents an incomplete list of locks being currently used in driver related code (main driver code, helper classes and method, etc.), to serve as a reference to other driver developers. .. note:: Please keep in mind that Cinder drivers may support different deployment options. Some may only support running one backend on each node. Others may support running multiple backends in a single node. And some may even support Active-Active deployments. Therefore these references are not necessarily examples of how drivers achieve Active-Active. LIO target - Lock scope: Node. - Critical section: Calls to `cinder-rtstool` CLI. - Lock name: `'lioadm'`. - Where: `_execute` method. - File: `cinder/volume/targets/lio.py` NVMET target - Lock scope: Node. - Critical section: Creating or deleting NVMeOF targets operations. - Lock name: `'nvmetcli'`. - Where: `delete_nvmeof_target` and `create_nvmeof_target` methods. - File: `cinder/volume/targets/nvmet.py`. HGST driver: - Lock scope: Process. - Critical section: Create volume operation. - Lock name: `'hgst'devices'`. - Where: `create_volume` method. - File: `cinder/volume/drivers/hgst.py`. Solidfire driver: - Lock scope: Process - Critical section: Creating volume from an image, cloning volume, creating volume from a snapshot. - Lock name: `solidfire-{resource_id}`. - Where: `locked_image_id_operation` and `locked_source_id_operation` decorators. - File: `cinder/volume/drivers/solidfire.py`. Infinidat driver: - Lock scope: Global. - Critical section: Initialize and terminate connections operations. - Lock name: `infinidat-{management_address}-lock`. - Where: `initialize_connection` and `terminate_connection` methods. - File: `cinder/volume/drivers/infinidat.py`. Kaminario FC driver: - Lock scope: Global. - Critical section: Initialize and terminate connections operations. - Lock name: `kaminario-{san_ip}`. - Where: `initialize_connection` and `terminate_connection` methods. - File: `cinder/volume/drivers/kaminario/kaminario_fc.py` Kaminario iSCSI driver: - Lock scope: Global. - Critical section: Initialize and terminate connections operations. - Lock name: `kaminario-{san_ip}`. - Where: `initialize_connection` and `terminate_connection` methods. - File: `cinder/volume/drivers/kaminario/kaminario_iscsi.py` Dell Unity: - Lock scope: Global. - Critical section: Create or get a host on the backend. - Lock name: `{self.host}-{name}` - Where: `create_host` method. - File: `cinder/volume/drivers/dell_emc/unity/client.py` Dell Unity: - Lock scope: Global. - Critical section: Create host and attach. - Lock name: `{client.host}-{host_name}` - Where: `_create_host_and_attach` method. - File: `cinder/volume/drivers/dell_emc/unity/adapter.py` Dell Unity: - Lock scope: Global. - Critical section: Create host and attach as part of the `initialize_connection` process, and also detach and delete host as part of the `terminate_connection` process. - Lock name: `{client.host}-{host_name}` - Where: `_create_host_and_attach` and `_detach_and_delete_host` methods. - File: `cinder/volume/drivers/dell_emc/unity/adapter.py` Dothill: - Lock scope: Global - Critical section: Retrieving a session key from the array. Perform HTTP requests on the device. - Lock name: `{driver_name}-{array_name}` - Where: `_get_session_key` and `_api_request` methods. - File: `cinder/volume/drivers/dothill/dothill_client.py`. Dothill: - Lock scope: Global - Critical section: Mapping a volume as part of the `initialize_connection` process. - Lock name: `{driver_name}-{array_name}-map` - Where: `map_volume` method. - File: `cinder/volume/drivers/dothill/dothill_client.py`. Other files ----------- Other files that also make use of the locking mechanisms, and can be useful as reference, are: - `cinder/volume/drivers/dell_emc/vmax/common.py` - `cinder/volume/drivers/dell_emc/vmax/masking.py` - `cinder/volume/drivers/dell_emc/vmax/provision.py` - `cinder/volume/drivers/dell_emc/vmax/rest.py` - `cinder/volume/drivers/dell_emc/vmax/utils.py` - `cinder/volume/drivers/fujitsu/eternus_dx_common.py` - `cinder/volume/drivers/hpe/hpe_3par_common.py` - `cinder/volume/drivers/hpe/hpe_lefthand_iscsi.py` - `cinder/volume/drivers/huawei/huawei_driver.py` - `cinder/volume/drivers/huawei/rest_client.py` - `cinder/volume/drivers/huawei/smartx.py` - `cinder/volume/drivers/ibm/flashsystem_common.py` - `cinder/volume/drivers/ibm/flashsystem_fc.py` - `cinder/volume/drivers/ibm/flashsystem_iscsi.py` - `cinder/volume/drivers/ibm/ibm_storage/ds8k_helper.py` - `cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py` - `cinder/volume/drivers/ibm/ibm_storage/ds8k_replication.py` - `cinder/volume/drivers/ibm/ibm_storage/xiv_proxy.py` - `cinder/volume/drivers/ibm/storwize_svc/storwize_const.py` - `cinder/volume/drivers/ibm/storwize_svc/storwize_svc_fc.py` - `cinder/volume/drivers/ibm/storwize_svc/storwize_svc_iscsi.py` - `cinder/volume/drivers/inspur/instorage/instorage_const.py` - `cinder/volume/drivers/inspur/instorage/instorage_fc.py` - `cinder/volume/drivers/inspur/instorage/instorage_iscsi.py` - `cinder/volume/drivers/nec/cli.py` - `cinder/volume/drivers/nec/volume_helper.py` - `cinder/volume/drivers/netapp/dataontap/nfs_base.py` Notes on Driver Locking ----------------------- From the volume manager flow, create_cloned_volume() happens to be called with a lock that prevents concurrent calls to clone from the same volume at the same time. This is done by the cinder/volume/manager.py create_volume() code:: elif source_volid is not None: locked_action = "%s-%s" % (source_volid, 'delete_volume') and subsequent COORDINATOR.get_lock() call. This seems to have been intended to prevent a volume from being deleted while being used as the source of a volume clone, the fact that it prevents concurrent clone operations is a side effect. This means that a driver that cannot correctly handle concurrent clone operations from the same volume will work for normal clone operations, but then fail when a clone operation is performed as part of cloning from the image-volume cache, or cloning from a Cinder backend of Glance. (See https://bugs.launchpad.net/cinder/+bug/1851512 for an example.) It should be assumed that, at some point, this locking behavior will be changed to allow concurrent clone calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/gerrit.rst0000664000175000017500000003572400000000000021547 0ustar00zuulzuul00000000000000.. _reviewing-cinder: Code Reviews ============ Cinder follows the same `Review guidelines`_ outlined by the OpenStack community. This page provides additional information that is helpful for reviewers of patches to Cinder. Gerrit ------ Cinder uses the `Gerrit`_ tool to review proposed code changes. The review site is https://review.opendev.org Gerrit is a complete replacement for Github pull requests. `All Github pull requests to the Cinder repository will be ignored`. See `Quick Reference`_ for information on quick reference for developers. See `Getting Started`_ for information on how to get started using Gerrit. See `Development Workflow`_ for more detailed information on how to work with Gerrit. The Great Change ---------------- With the demise of Python 2.7 in January 2020, beginning with the Ussuri development cycle, Cinder only needs to support Python 3 runtimes (in particular, 3.6 and 3.7). Thus we can begin to incorporate Python 3 language features and remove Python 2 compatibility code. At the same time, however, we are still supporting stable branches that must support Python 2. Our biggest interaction with the stable branches is backporting bugfixes, where in the ideal case, we're just doing a simple cherry-pick of a commit from master to the stable branches. You can see that there's some tension here. With that in mind, here are some guidelines for reviewers and developers that the Cinder community has agreed on during this phase where we want to write pure Python 3 but still must support Python 2 code. .. _transition-guidelines: Python 2 to Python 3 transition guidelines ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * We need to be checking the code coverage of test cases very carefully so that new code has excellent coverage. The idea is that we want these tests to fail when a backport is proposed to a stable branch and the tests are run under Python 2 (if the code is using any Python-3-only language features). * New features can use Python-3-only language constructs, but bugfixes likely to be backported should be more conservative and write for Python 2 compatibilty. * The code for drivers may continue to use the six compatibility library at their discretion. * We will not remove six from mainline Cinder code that impacts the drivers (for example, classes they inherit from). * We can remove six from code that doesn't impact drivers, keeping in mind that backports may be more problematic, and hence making sure that we have really good test coverage. Targeting Milestones -------------------- In an effort to guide team review priorities the Cinder team has adopted the process of adding comments to reviews to target a milestone for a particular patch. This process is not required for all patches but is beneficial for patches that may be time sensitive. For example patches that need to land earlier in the release cycle so as to get additional test time or because later development activities are dependent upon that functionality merging. To target a patch to a milestone a reviewer should add a comment using the following format: ``target--`` Release should be used to indicate the release to which the patch should be targeted, all lower case. The milestone is a single number, 1 to 3, indicating the milestone number. So, to target a patch to land in Milestone 2 of the Rocky release a comment like the following would be added: ``target-rocky-2`` Adding this tag allows reviewers to search for these tags and use them as a guide in review priorities. Targeting patches should be done by Cinder Core Review Team members. If a patch developer feels that a patch should be targeted to a milestone the developer should bring the request up to the Cinder team in a weekly meeting or on the ``#openstack-cinder`` IRC channel. Reviewing Vendor Patches ------------------------ It is important to consider, when reviewing patches to a vendor's Cinder driver, whether the patch passes the vendor's CI process. CI reports are the only tool we have to ensure that a patch works with the Vendor's driver. A patch to a vendor's driver that does not pass that vendor's CI should not be merged. If a patch is submitted by a person that does not work with the vendor that owns the driver, a +1 review from someone at that vendor is also required. Finally, a patch should not be merged before the Vendor's CI has run against the patch. .. note:: Patches which have passed vendor CI and have merged in master are exempt from this requirement upon backport to stable and/or driverfixes branches as vendors are not required to run CI on those branches. If the vendor, however, is running CI on stable and/or driverfix branches failures should not be ignored unless otherwise verified by a developer from the vendor. Unit Tests ---------- Cinder requires unit tests with all patches that introduce a new branch or function in the code. Changes that do not come with a unit test change should be considered closely and usually returned to the submitter with a request for the addition of unit test. .. note:: Unit test changes are not validated in any way by vendor's CI. Vendor CI's run the tempest volume tests against a change which does not include a unit test execution. CI Job rechecks --------------- CI job runs may result in false negatives for a considerable number of causes: - Network failures. - Not enough resources on the job runner. - Storage timeouts caused by the array running nightly maintenance jobs. - External service failure: pypi, package repositories, etc. - Non cinder components spurious bugs. And the list goes on and on. When we detect one of these cases the normal procedure is to run a recheck writing a comment with ``recheck`` for core Zuul jobs, or the specific third party CI recheck command, for example ``run-DellEMC PowerStore CI``. These false negative have periods of time where they spike, for example when there are spurious failures, and a lot of rechecks are necessary until a valid result is posted by the CI job. And it's in these periods of time where people acquire the tendency to blindly issue rechecks without looking at the errors reported by the jobs. When these blind checks happen on real patch failures or with external services that are going to be out for a while, they lead to wasted resources as well as longer result times for patches in other projects. The Cinder community has noticed this tendency and wants to fix it, so now it is strongly encouraged to avoid issuing naked rechecks and instead issue them with additional information to indicate that we have looked at the failure and confirmed it is unrelated to the patch. Here are some real examples of proper rechecks: - Spurious issue in other component: ``recheck tempest-integrated-storage : intermittent failure nova bug #1836754`` - Deployment issue on the job: ``recheck cinder-plugin-ceph-tempest timed out, errors all over the place`` - External service failure: ``Third party recheck grenade : Failed to retrieve .deb packages`` Another common case for blindly rechecking a patch is when it is only changing a specific driver but there are failures on jobs that don't use that driver. In such cases we still have to look at the failures, because they can be failures that are going to take a while to fix, and issuing a recheck will be futile at that time and we should wait for a couple of hours, or maybe even a day, before issuing a recheck that can yield the desired result. Efficient Review Guidelines --------------------------- This section will guide you through the best practices you can follow to do quality code reviews: * **Failing Gate**: You can check for jobs like pep8, py36, py38, functional etc that are generic to all the patches and look for possible failures in linting, unit test, functional test etc and provide feedback on fixing it. Usually it's the author's responsibility to do a local run of tox and ensure they don't fail upstream but if something is failing on gate and the author is not be aware about how to fix it then we can provide valuable guidance on it. There are also jobs specific to particular area of code (for example, ``cinder-plugin-ceph-tempest`` for the RBD volume driver, ``devstack-plugin-nfs-tempest-full`` for the generic NFS driver etc) so look for issues in the jobs if they are related to the code changes proposed. There is a past example on why we should check these jobs, the ``devstack-plugin-nfs-tempest-full`` is a non-voting job and was failing on one of the FS drivers related `patch`_ which got merged and started failing the ``NetApp CI`` blocking the netapp features during that time. * **Documentation**: Check whether the patch proposed requires documentation or not and ensure the proper documentation is added. If the proper documentation is added then the next step is to check the status of docs job if it's failing or passing. If it passes, you can check how it looks in HTML as follows: Go to ``openstack-tox-docs job`` link -> ``View Log`` -> ``docs`` and go to the appropriate section for which the documentation is added. Rendering: We do have a job for checking failures related to document changes proposed (openstack-tox-docs) but we need to be aware that even if a document change passes all the syntactical rules, it still might not be logically correct i.e. after rendering it could be possible that the bullet points are not under the desired section or the spacing and indentation is not as desired. It is always good to check the final document after rendering in the docs job which might yield possible logical errors. * **Readability**: In a large codebase (like Cinder), Readability is a big factor as remembering the logic of every code path is not feasible and contributors change from time to time. We should adapt to writing readable code which is easy to follow and can be understood by anyone having knowledge about Python constructs and working of Cinder. Sometimes it happens that a logic can only be written in a complex way, in that case, it's always good practice to add a comment describing the functionality. So, if a logic proposed is not readable, do ask/suggest a more readable version of it and if that's not feasible then asking for a comment that would explain it is also a valid review point. * **Type Annotations**: There has been an ongoing effort to implement type annotations all across Cinder with the help of mypy tooling. Certain areas of code already adapt to mypy coding style and it's good practice that new code merging into Cinder should also adapt to it. We, as reviewers, should ensure that new code proposed should include mypy constructs. * **Microversions**: Cinder uses the microversion framework for implementing new feature that causes a change in the API behavior (request/response) while maintaining backward compatibility at the same time. There have been examples in the past where a patch adding a new microversion misses file(s) where the microversion changes are necessary so it's a good practice for the author and reviewer to ensure that all files associated with a microversion change should be updated. You can find the list of files and changes required in our `Microversion Doc`_. * **Downvoting reason**: It often happens that the reviewer adds a bunch of comments some of which they would like to be addressed (blocking) and some of them are good to have but not a hard requirement (non-blocking). It's a good practice for the reviewer to mention for which comments is the -1 valid so to make sure they are always addressed. * **Testing**: Always check if the patch adds the associated unit, functional and tempest tests depending on the change. * **Commit Message**: There are few things that we should make sure the commit message includes: 1) Make sure the author clearly explains in the commit message why the code changes are necessary and how exactly the code changes fix the issue. 2) It should have the appropriate tags (Eg: Closes-Bug, Related-Bug, Blueprint, Depends-On etc). For detailed information refer to `external references in commit message`_. 3) It should follow the guidelines of commit message length i.e. 50 characters for the summary line and 72 characters for the description. More information can be found at `Summary of Git commit message structure`_. 4) Sometimes it happens that the author updates the code but forgets to update the commit message leaving the commit describing the old changes. Verify that the commit message is updated as per code changes. * **Release Notes**: There are different cases where a releasenote is required like fixing a bug, adding a feature, changing areas affecting upgrade etc. You can refer to the `Release notes`_ section in our contributor docs for more information. * **Ways of reviewing**: There are various ways you can go about reviewing a patch, following are some of the standard ways you can follow to provide valuable feedback on the patch: 1) Testing it in local environment: The easiest way to check the correctness of a code change proposed is to reproduce the issue (steps should be in launchpad bug) and try the same steps after applying the patch to your environment and see if the provided code changes fix the issue. You can also go a little further to think of possible corner cases where an end user might possibly face issues again and provide the same feedback to cover those cases in the original change proposed. 2) Optimization: If you're not aware about the code path the patch is fixing, you can still go ahead and provide valuable feedback about the python code if that can be optimized to improve maintainability or performance. 3) Perform Dry Run: Sometimes the code changes are on code paths that we don't have or can't create environment for (like vendor driver changes or optional service changes like cinder-backup) so we can read through the code or use some example values to perform a dry run of the code and see if it fails in that scenario. .. _Review guidelines: https://docs.openstack.org/doc-contrib-guide/docs-review-guidelines.html .. _Gerrit: https://review.opendev.org/#/q/project:openstack/cinder+status:open .. _Quick Reference: https://docs.openstack.org/infra/manual/developers.html#quick-reference .. _Getting Started: https://docs.openstack.org/infra/manual/developers.html#getting-started .. _Development Workflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow .. _patch: https://review.opendev.org/c/openstack/cinder/+/761152 .. _Microversion Doc: https://opendev.org/openstack/cinder/src/branch/master/doc/source/contributor/api_microversion_dev.rst#other-necessary-changes .. _external references in commit message: https://wiki.openstack.org/wiki/GitCommitMessages#Including_external_references .. _Summary of Git commit message structure: https://wiki.openstack.org/wiki/GitCommitMessages#Summary_of_Git_commit_message_structure .. _Release notes: https://docs.openstack.org/cinder/latest/contributor/releasenotes.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/gmr.rst0000664000175000017500000000776600000000000021045 0ustar00zuulzuul00000000000000.. Copyright (c) 2013 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Cinder contains a mechanism whereby developers and system administrators can generate a report about the state of a running Cinder executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR2* signal to any Cinder process with support (see below). The *GMR* will then output to standard error for that particular process. For example, suppose that ``cinder-api`` has process id ``8675``, and was run with ``2>/var/log/cinder/cinder-api-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/cinder/cinder-api-err.log``. There is other way to trigger a generation of report, user should add a configuration in Cinder's conf file:: [oslo_reports] file_event_handler=['The path to a file to watch for changes to trigger ' 'the reports, instead of signals. Setting this option ' 'disables the signal trigger for the reports.'] file_event_handler_interval=['How many seconds to wait between polls when ' 'file_event_handler is set, default value ' 'is 1'] a *GMR* can be generated by "touch"ing the file which was specified in file_event_handler. The *GMR* will then output to standard error for that particular process. For example, suppose that ``cinder-api`` was run with ``2>/var/log/cinder/cinder-api-err.log``, and the file path is ``/tmp/guru_report``. Then, ``touch /tmp/guru_report`` will trigger the Guru Meditation report to be printed to ``/var/log/cinder/cinder-api-err.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo-incubator), as well as the Cinder version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from cinder import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/groups.rst0000664000175000017500000003557500000000000021576 0ustar00zuulzuul00000000000000Generic Volume Groups ===================== Introduction to generic volume groups ------------------------------------- Generic volume group support was added in cinder in the Newton release. There is support for creating group types and group specs, creating groups of volumes, and creating snapshots of groups. Detailed information on how to create a group type, a group, and a group snapshot can be found in :doc:`block storage admin guide `. How is generic volume groups different from consistency groups in cinder? The consistency group feature was introduced in cinder in Juno and are supported by a few drivers. Currently consistency groups in cinder only support consistent group snapshot. It cannot be extended easily to serve other purposes. A tenant may want to put volumes used in the same application together in a group so that it is easier to manage them together, and this group of volumes may or may not support consistent group snapshot. Generic volume group is introduced to solve this problem. By decoupling the tight relationship between the group construct and the consistency concept, generic volume groups can be extended to support other features in the future. Action items for drivers supporting consistency groups ------------------------------------------------------ Drivers currently supporting consistency groups are in the following: - Juno: EMC VNX - Kilo: EMC VMAX, IBM (GPFS, Storwize, SVC, and XIV), ProphetStor, Pure - Liberty: Dell Storage Center, EMC XtremIO, HPE 3Par and LeftHand - Mitaka: EMC ScaleIO, NetApp Data ONTAP, SolidFire - Newton: CoprHD, FalconStor, Huawei Since the addition of generic volume groups, there is plan to migrate consistency groups to generic volume groups. A migration command and changes in CG APIs to support migrating CGs to groups are developed and merged in Ocata [1][2]. In order to support rolling upgrade, it will take a couple of releases before consistency groups can be deprecated. For drivers planning to add consistency groups support, the new generic volume group driver interfaces should be implemented instead of the CG interfaces. For drivers already supporting consistency groups, the new generic volume group driver interfaces should be implemented to include the CG support. For drivers wanting generic volume groups but not consistent group snapshot support, no code changes are necessary. By default, every cinder volume driver already supports generic volume groups since Newton because the support was added to the common code. Testing should be done for every driver to make sure this feature works properly. Drivers already supporting CG are expected to add CG support to generic volume groups by Pike-1. This is a deadline discussed and agreed upon at the Ocata summit in Barcelona. Group Type and Group Specs / Volume Types and Extra Specs --------------------------------------------------------- The driver interfaces for consistency groups and generic volume groups are very similar. One new concept introduced for generic volume groups is the group type. Group type is used to categorize a group just like a volume type is used to describe a volume. Similar to extra specs for a volume type, group specs are also introduced to be associated with a group type. Group types allow a user to create different types of groups. A group can support multiple volume types and volume types are required as input parameters when creating a group. In addition to volume types, a group type is also required when creating a group. Group types and volume types are created by the Cloud Administrator. A tenant uses the group types and volume types to create groups and volumes. A driver can support both consistent group snapshot and a group of snapshots that do not maintain the write order consistency by using different group types. In other words, a group supporting consistent group snapshot is a special type of generic volume group. For a group to support consistent group snapshot, the group specs in the corresponding group type should have the following entry:: {'consistent_group_snapshot_enabled': True} Similarly, for a volume to be in a group that supports consistent group snapshots, the volume type extra specs would also have the following entry:: {'consistent_group_snapshot_enabled': True} By requiring the above entry to be in both group specs and volume type extra specs, we can make sure the scheduler will choose a backend that supports the group type and volume types for a group. It is up to the driver to parse the group type info when creating a group, parse the volume type info when creating a volume, and set things up as requested. Capabilities reporting ---------------------- The following entry is expected to be added to the stats/capabilities update for drivers supporting consistent group snapshot:: stats["consistent_group_snapshot_enabled"] = True Driver methods -------------- The following driver methods should to be implemented for the driver to support consistent group snapshot: - create_group(context, group) - delete_group(context, group, volumes) - update_group(context, group, add_volumes=None, remove_volumes=None) - create_group_from_src(context, group, volumes, group_snapshot=None, snapshots=None, source_group=None, source_vols=None) - create_group_snapshot(context, group_snapshot, snapshots) - delete_group_snapshot(context, group_snapshot, snapshots) Here is an example that add CG capability to generic volume groups [3]. Details of driver interfaces are as follows. **create_group** This method creates a group. It has context and group object as input parameters. A group object has volume_types and group_type_id that can be used by the driver. create_group returns model_update. model_update will be in this format: {'status': xxx, ......}. If the status in model_update is 'error', the manager will throw an exception and it will be caught in the try-except block in the manager. If the driver throws an exception, the manager will also catch it in the try-except block. The group status in the db will be changed to 'error'. For a successful operation, the driver can either build the model_update and return it or return None. The group status will be set to 'available'. **delete_group** This method deletes a group. It has context, group object, and a list of volume objects as input parameters. It returns model_update and volumes_model_update. volumes_model_update is a list of volume dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate volumes_model_update and model_update and return them. The manager will check volumes_model_update and update db accordingly for each volume. If the driver successfully deleted some volumes but failed to delete others, it should set statuses of the volumes accordingly so that the manager can update db correctly. If the status in any entry of volumes_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of the group will be set to 'error' in the db. If volumes_model_update is not returned by the driver, the manager will set the status of every volume in the group to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager. The statuses of the group and all volumes in it will be set to 'error'. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. The statuses of the group and all volumes will be set to 'deleted' after the manager deletes them from db. **update_group** This method adds existing volumes to a group or removes volumes from a group. It has context, group object, a list of volume objects to be added to the group, and a list of a volume objects to be removed from the group. It returns model_update, add_volumes_update, and remove_volumes_update. model_update is a dictionary that the driver wants the manager to update upon a successful return. If None is returned, the manager will set the status to 'available'. add_volumes_update and remove_volumes_update are lists of dictionaries that the driver wants the manager to update upon a successful return. Note that each entry requires a {'id': xxx} so that the correct volume entry can be updated. If None is returned, the volume will remain its original status. If the driver throws an exception, the status of the group as well as those of the volumes to be added/removed will be set to 'error'. **create_group_from_src** This method creates a group from source. The source can be a group_snapshot or a source group. create_group_from_src has context, group object, a list of volume objects, group_snapshot object, a list of snapshot objects, source group object, and a list of source volume objects as input parameters. It returns model_update and volumes_model_update. volumes_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. To be consistent with other volume operations, the manager will assume the operation is successful if no exception is thrown by the driver. For a successful operation, the driver can either build the model_update and volumes_model_update and return them or return None, None. **create_group_snapshot** This method creates a group_snapshot. It has context, group_snapshot object, and a list of snapshot objects as input parameters. It returns model_update and snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully created some snapshots but failed to create others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error', the status in model_update will be set to the same if it is not already 'error'. If the status in model_update is 'error', the manager will raise an exception and the status of group_snapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of group_snapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of group_snapshot and all snapshots will be set to 'available' at the end of the manager function. **delete_group_snapshot** This method deletes a group_snapshot. It has context, group_snapshot object, and a list of snapshot objects. It returns model_update and snapshots_model_update. snapshots_model_update is a list of dictionaries. It has to be built by the driver. An entry will be in this format: {'id': xxx, 'status': xxx, ......}. model_update will be in this format: {'status': xxx, ......}. The driver should populate snapshots_model_update and model_update and return them. The manager will check snapshots_model_update and update db accordingly for each snapshot. If the driver successfully deleted some snapshots but failed to delete others, it should set statuses of the snapshots accordingly so that the manager can update db correctly. If the status in any entry of snapshots_model_update is 'error_deleting' or 'error', the status in model_update will be set to the same if it is not already 'error_deleting' or 'error'. If the status in model_update is 'error_deleting' or 'error', the manager will raise an exception and the status of group_snapshot will be set to 'error' in the db. If snapshots_model_update is not returned by the driver, the manager will set the status of every snapshot to 'error' in the except block. If the driver raises an exception during the operation, it will be caught by the try-except block in the manager and the statuses of group_snapshot and all snapshots will be set to 'error'. For a successful operation, the driver can either build the model_update and snapshots_model_update and return them or return None, None. The statuses of group_snapshot and all snapshots will be set to 'deleted' after the manager deletes them from db. Migrate CGs to Generic Volume Groups ------------------------------------ This section only affects drivers already supporting CGs by the Newton release. Drivers planning to add CG support after Newton are not affected. A group type named default_cgsnapshot_type will be created by the migration script. The following command needs to be run to migrate migrate data and copy data from consistency groups to groups and from cgsnapshots to group_snapshots. Migrated consistency groups and cgsnapshots will be removed from the database:: cinder-manage db online_data_migrations --max_count max_count is optional. Default is 50. After running the above migration command to migrate CGs to generic volume groups, CG and group APIs work as follows: * Create CG only creates in the groups table. * Modify CG modifies in the CG table if the CG is in the CG table, otherwise it modifies in the groups table. * Delete CG deletes from the CG or the groups table depending on where the CG is. * List CG checks both CG and groups tables. * List CG Snapshots checks both the CG and the groups tables. * Show CG checks both tables. * Show CG Snapshot checks both tables. * Create CG Snapshot creates either in the CG or the groups table depending on where the CG is. * Create CG from Source creates in either the CG or the groups table depending on the source. * Create Volume adds the volume either to the CG or the group. * default_cgsnapshot_type is reserved for migrating CGs. * Group APIs will only write/read in/from the groups table. * Group APIs will not work on groups with default_cgsnapshot_type. * Groups with default_cgsnapshot_type can only be operated by CG APIs. * After CG tables are removed, we will allow default_cgsnapshot_type to be used by group APIs. References ---------- [1] Migration script https://review.openstack.org/#/c/350350/ [2] CG APIs changes for migrating CGs https://review.openstack.org/#/c/401839/ [3] Example adding CG capability to generic volume groups https://review.openstack.org/#/c/413927/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/high_availability.rst0000664000175000017500000014605500000000000023724 0ustar00zuulzuul00000000000000High Availability ================= In this guide we'll go over design and programming considerations related to high availability in Cinder. The document aims to provide a single point of truth in all matters related to Cinder's high availability. Cinder developers must always have these aspects present during the design and programming of the Cinder core code, as well as the drivers' code. Most topics will focus on Active-Active deployments. Some topics covering node and process concurrency will also apply to Active-Passive deployments. Overview -------- There are 4 services that must be considered when looking at a highly available Cinder deployment: API, Scheduler, Volume, Backup. Each of these services has its own challenges and mechanisms to support concurrent and multi node code execution. This document provides a general overview of Cinder aspects related to high availability, together with implementation details. Given the breadth and depth required to properly explain them all, it will fall short in some places. It will provide external references to expand on some of the topics hoping to help better understand them. Some of the topics that will be covered are: - Job distribution. - Message queues. - Threading model. - Versioned Objects used for rolling upgrades. - Heartbeat system. - Mechanism used to clean up out of service cluster nodes. - Mutual exclusion mechanisms used in Cinder. It's good to keep in mind that Cinder threading model is based on eventlet's green threads. Some Cinder and driver code may use native threads to prevent thread blocking, but that's not the general rule. Throughout the document we'll be referring to clustered and non clustered Volume services. This distinction is not based on the number of services running, but on their configurations. A non clustered Volume service is one that will be deployed as Active-Passive and has not been included in a Cinder cluster. On the other hand, a clustered Volume service is one that can be deployed as Active-Active because it is part of a Cinder cluster. We consider a Volume service to be clustered even when there is only one node in the cluster. Job distribution ---------------- Cinder uses RPC calls to pass jobs to Scheduler, Volume, and Backup services. A message broker is used for the transport layer on the RPC calls and parameters. Job distribution is handled by the message broker using message queues. The different services, except the API, listen on specific message queues for RPC calls. Based on the maximum number of nodes that will connect, we can differentiate two types of message queues: those with a single listener and those with multiple listeners. We use single listener queues to send RPC calls to a specific service in a node. For example, when the API calls a non clustered Volume service to create a snapshot. Message queues having multiple listeners are used in operations such as: - Creating any volume. Call made from the API to the Scheduler. - Creating a volume in a clustered Volume service. Call made from the Scheduler to the Volume service. - Attaching a volume in a clustered Volume service. Call made from the API to the Volume service. Regardless of the number of listeners, all the above mentioned RPC calls are unicast calls. The caller will place the request in a queue in the message broker and a single node will retrieve it and execute the call. There are other kinds of RPC calls, those where we broadcast a single RPC call to multiple nodes. The best example of this type of call is the Volume service capabilities report sent to all the Schedulers. Message queues are fair queues and are used to distribute jobs in a round robin fashion. Single target RPC calls made to message queues with multiple listeners are distributed in round robin. So sending three request to a cluster of 3 Schedulers will send one request to each one. Distribution is content and workload agnostic. A node could be receiving all the quick and easy jobs while another one gets all the heavy lifting and its ongoing workload keeps increasing. Cinder's job distribution mechanism allows fine grained control over who to send RPC calls. Even on clustered Volume services we can still access individual nodes within the cluster. So developers must pay attention to where they want to send RPC calls and ask themselves: Is the target a clustered service? Is the RPC call intended for *any* node running the service? Is it for a *specific* node? For *all* nodes? The code in charge of deciding the target message queue, therefore the recipient, is in the `rpcapi.py` files. Each service has its own file with the RPC calls: `volume/rpcapi.py`, `scheduler/rpcapi.py`, and `backup/rpcapi.py`. For RPC calls the different `rcpapi.py` files ultimately use the `_get_cctxt` method from the `cinder.rpc.RPCAPI` class. For a detailed description on the issue, ramifications, and solutions, please refer to the `Cinder Volume Job Distribution`_. The `RabbitMQ tutorials`_ are a good way to understand message brokers general topics. Heartbeats ---------- Cinder services, with the exception of API services, have a periodic heartbeat to indicate they are up and running. When services are having health issues, they may decide to stop reporting heartbeats, even if they are running. This happens during initialization if the driver cannot be setup correctly. The database is used to report service heartbeats. Fields `report_count` and `updated_at`, in the `services` table, keep a heartbeat counter and the last time the counter was updated. There will be multiple database entries for Cinder Volume services running multiple backends. One per backend. Using a date-time to mark the moment of the last heartbeat makes the system time relevant for Cinder's operation. A significant difference in system times on our nodes could cause issues in a Cinder deployment. All services report and expect the `updated_at` field to be UTC. To determine if a service is up, we check the time of the last heartbeat to confirm that it's not older than `service_down_time` seconds. Default value for `service_down_time` configuration option is 60 seconds. Cinder uses method `is_up`, from the `Service` and `Cluster` Versioned Object, to ensure consistency in the calculations across the whole code base. Heartbeat frequency in Cinder services is determined by the `report_interval` configuration option. The default is 10 seconds, allowing network and database interruptions. Cinder protects itself against some incorrect configurations. If `report_interval` is greater or equal than `service_down_time`, Cinder will log a warning and use a service down time of two and a half times the configured `report_interval`. .. note:: It is of utter importance having the same `service_down_time` and `report_interval` configuration options in all your nodes. In each service's section we'll expand this topic with specific information only relevant to that service. Cleanup ------- Power outages, hardware failures, unintended reboots, and software errors. These are all events that could make a Cinder service unexpectedly halt its execution. A running Cinder service is usually carrying out actions on resources. So when the service dies unexpectedly, it will abruptly stop those operations. Stopped operations in this way leaves resources in transitioning states. For example a volume could be left in a `deleting` or `creating` status. If left alone resources will remain in this state forever, as the service in charge of transitioning them to a rest status (`available`, `error`, `deleted`) is no longer running. Existing reset-status operations allow operators to forcefully change the state of a resource. But these state resets are not recommended except in very specific cases and when we really know what we are doing. Cleanup mechanisms are tasked with service's recovery after an abrupt stop of the service. They are the recommended way to resolve stuck transitioning states caused by sudden service stop. There are multiple cleanup mechanisms in Cinder, but in essence they all follow the same logic. Based on the resource type and its status the mechanism determines the best cleanup action that will transition the state to a rest state. Some actions require a resource going through several services. In this case deciding the cleanup action may also require taking into account where the resource was being processed. Cinder has two types of cleanup mechanisms: - On node startup: Happen on Scheduler, Volume, and Backup services. - Upon user request. User requested cleanups can only be triggered on Scheduler and Volume nodes. When a node starts it will do a cleanup, but only for the resources that were left in a transitioning state when the service stopped. It will never touch resources from other services in the cluster. Node startup cleanup is slightly different on services supporting user requested cleanups -Scheduler and Volume- than on Backup services. Backup cleanups will be covered in the service's section. For services supporting user requested cleanups we can differentiate the following tasks: - Tracking transitioning resources: Using workers table and Cleanable Versioned Objects methods. - Defining when a resource must be cleaned if service dies: Done in Cleanable Versioned Objects. - Defining how a resource must be cleaned: Done in the service manager. .. note:: All Volume services can accept cleanup requests, doesn't matter if they are clustered or not. This will provide a better alternative to the reset-state mechanism to handle resources stuck in a transitioning state. Workers table ~~~~~~~~~~~~~ For Cinder Volume managed resources -Volumes and Snapshots- we used to establish a one-to-one relationship between a resource and the volume service managing it. A resource would belong to a node if the resource's `host` field matched that of the running Cinder Volume service. Snapshots must always be managed by the same service as the volume they originate from, so they don't have a `host` field in the database. In this case the parent volume's `host` is used to determine who owns the resource. Cinder-Volume services can be clustered, so we no longer have a one-to-one owner relationship. On clustered services we use the `cluster_name` database field instead of the `host` to determine ownership. Now we have a one-to-many ownership relationship. When a clustered service abruptly stops running, any of the nodes from the same cluster can cleanup the resources it was working on. There is no longer a need to restart the service to get the resources cleaned by the node startup cleanup process. We keep track of the resources our Cinder services are working on in the `workers` table. Only resources that can be cleaned are tracked. This table stores the resource type and id, the status that should be cleared on service failure, the service that is working on it, etc. And we'll be updating this table as the resources move from service to service. `Worker` entries are not passed as RPC parameters, so we don't need a Versioned Object class to represent them. We only have the `Worker` ORM class to represent database entries. Following subsections will cover implementation details required to develop new cleanup resources and states. For a detailed description on the issue, ramifications, and overall solution, please refer to the `Cleanup spec`_. Tracking resources ~~~~~~~~~~~~~~~~~~ Resources supporting cleanup using the workers table must inherit from the `CinderCleanableObject` Versioned Object class. This class provides helper methods and the general interface used by Cinder for the cleanup mechanism. This interface is conceptually split in three tasks: - Manage workers table on the database. - Defining what states must be cleaned. - Defining how to clean resources. Among methods provided by the `CinderCleanableObject` class the most important ones are: - `is_cleanable`: Checks if the resource, given its current status, is cleanable. - `create_worker`: Create a worker entry on the API service. - `set_worker`: Create or update worker entry. - `unset_worker`: Remove an entry from the database. This is a real delete, not a soft-delete. - `set_workers`: Function decorator to create or update worker entries. Inheriting classes must define `_is_cleanable` method to define which resource states can be cleaned up. Earlier we mentioned how cleanup depends on a resource's current state. But it also depends under what version the services are running. With rolling updates we can have a service running under an earlier pinned version for compatibility purposes. A version X service could have a resource that it would consider cleanable, but it's pinned to version X-1, where it was not considered cleanable. To avoid breaking things, the resource should be considered as non cleanable until the service version is unpinned. Implementation of `_is_cleanable` method must take them both into account. The state, and the version. Volume's implementation is a good example, as workers table was not supported before version 1.6: .. code-block:: python @staticmethod def _is_cleanable(status, obj_version): if obj_version and obj_version < 1.6: return False return status in ('creating', 'deleting', 'uploading', 'downloading') Tracking states in the workers table starts by calling the `create_worker` method on the API node. This is best done on the different `rpcapi.py` files. For example, a create volume operation will go from the API service to the Scheduler service, so we'll add it in `cinder/scheduler/rpcapi.py`: .. code-block:: python def create_volume(self, ctxt, volume, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, backup_id=None): volume.create_worker() But if we are deleting a volume or creating a snapshot the API will call the Volume service directly, so changes should go in `cinder/scheduler/rpcapi.py`: .. code-block:: python def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False): volume.create_worker() Once we receive the call on the other side's manager we have to call the `set_worker` method. To facilitate this task we have the `set_workers` decorator that will automatically call `set_worker` for any cleanable versioned object that is in a cleanable state. For the create volume on the Scheduler service: .. code-block:: python @objects.Volume.set_workers @append_operation_type() def create_volume(self, context, volume, snapshot_id=None, image_id=None, request_spec=None, filter_properties=None, backup_id=None): And then again for the create volume on the Volume service: .. code-block:: python @objects.Volume.set_workers def create_volume(self, context, volume, request_spec=None, filter_properties=None, allow_reschedule=True): In these examples we are using the `set_workers` method from the `Volume` Versioned Object class. But we could be using it from any other class as it is a `staticmethod` that is not overwritten by any of the classes. Using the `set_workers` decorator will cover most of our use cases, but sometimes we may have to call the `set_worker` method ourselves. That's the case when transitioning from `creating` state to `downloading`. The `worker` database entry was created with the `creating` state and the working service was updated when the Volume service received the RPC call. But once we change the status to `creating` the worker and the resource status don't match, so the cleanup mechanism will ignore the resource. To solve this we add another worker update in the `save` method from the `Volume` Versioned Object class: .. code-block:: python def save(self): ... if updates.get('status') == 'downloading': self.set_worker() Actions on resource cleanup ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We've seen how to track cleanable resources in the `workers` table. Now we'll cover how to define the actions used to cleanup a resource. Services using the `workers` table inherit from the `CleanableManager` class and must implement the `_do_cleanup` method. This method receives a versioned object to clean and indicates whether we should keep the `workers` table entry. On asynchronous cleanup tasks method must return `True` and take care of removing the worker entry on completion. Simplified version of the cleanup of the Volume service, illustrating synchronous and asynchronous cleanups and how we can do a synchronous cleanup and take care ourselves of the `workers` entry: .. code-block:: python def _do_cleanup(self, ctxt, vo_resource): if isinstance(vo_resource, objects.Volume): if vo_resource.status == 'downloading': self.driver.clear_download(ctxt, vo_resource) elif vo_resource.status == 'deleting': if CONF.volume_service_inithost_offload: self._add_to_threadpool(self.delete_volume, ctxt, vo_resource, cascade=True) else: self.delete_volume(ctxt, vo_resource, cascade=True) return True if vo_resource.status in ('creating', 'downloading'): vo_resource.status = 'error' vo_resource.save() When the volume is `downloading` we don't return anything, so the caller receives `None`, which evaluates to not keep the row entry. When the status is `deleting` we call `delete_volume` synchronously or asynchronously. The `delete_volume` has the `set_workers` decorator, that calls `unset_worker` once the decorated method has successfully finished. So when calling `delete_volume` we must ask the caller of `_do_cleanup` to not try to remove the `workers` entry. Cleaning resources ~~~~~~~~~~~~~~~~~~ We may not have a `Worker` Versioned Object because we didn't need it, but we have a `CleanupRequest` Versioned Object to specify resources for cleanup. Resources will be cleaned when a node starts up and on user request. In both cases we'll use the `CleanupRequest` that contains a filtering of what needs to be cleaned up. The `CleanupRequest` can be considered as a filter on the `workers` table to determine what needs to be cleaned. Managers for services using the `workers` table must support the startup cleanup mechanism. Support for this mechanism is provided via the `init_host` method in the `CleanableManager` class. So managers inheriting from `CleanableManager` must make sure they call this `init_host` method. This can be done using `CleanableManager` as the first inherited class and using `super` to call the parent's `init_host` method, or by calling the class method directly: `cleanableManager.init_host(self, ...)`. `CleanableManager`'s `init_host` method will create a `CleanupRequest` for the current service before calling its `do_cleanup` method with it before returning. Thus cleaning up all transitioning resources from the service. For user requested cleanups, the API generates a `CleanupRequest` object using the request's parameters and calls the scheduler's `work_cleanup` RPC with it. The Scheduler receives the `work_cleanup` RPC call and uses the `CleanupRequest` to filter services that match the request. With this list of services the Scheduler sends an individual cleanup request for each of the services. This way we can spread the cleanup work if we have multiple services to cleanup. The Scheduler checks the service to clean to know where it must send the clean request. Scheduler service cleanup can be performed by any Scheduler, so we send it to the scheduler queue where all Schedulers are listening. In the worst case it will come back to us if there is no other Scheduler running at the time. For the Volume service we'll be sending it to the cluster message queue if it's a clustered service, or to a single node if it's non clustered. But unlike with the Scheduler, we can't be sure that there is a service to do the cleanup, so we check if the service or cluster is up before sending the request. After sending all the cleanup requests, the Scheduler will return a list of services that have received a cleanup request, and all the services that didn't because they were down. Mutual exclusion ---------------- In Cinder, as many other concurrent and parallel systems, there are "critical sections". Code sections that share a common resource that can only be accessed by one of them at a time. Resources can be anything, not only Cinder resources such as Volumes and Snapshots, and they can be local or remote. Examples of resources are libraries, command line tools, storage target groups, etc. Exclusion scopes can be per process, per node, or global. We have four mutual exclusion mechanisms available during Cinder development: - Database locking using resource states. - Process locks. - Node locks. - Global locks. For performance reasons we must always try to avoid using any mutual exclusion mechanism. If avoiding them is not possible, we should try to use the narrowest scope possible and reduce the critical section as much as possible. Locks by decreasing order of preference are: process locks, node locks, global locks, database locks. Status based locking ~~~~~~~~~~~~~~~~~~~~ Many Cinder operations are inherently exclusive and the Cinder core code ensures that drivers will not receive contradictory or incompatible calls. For example, you cannot clone a volume if it's being created. And you shouldn't delete the source volume of an ongoing snapshot. To prevent these from happening Cinder API services use resource status fields to check for incompatibilities preventing operations from getting through. There are exceptions to this rule, for example the force delete operation that ignores the status of a resource. We should also be aware that administrators can forcefully change the status of a resource and then call the API, bypassing the check that prevents multiple operations from being requested to the drivers. Resource locking using states is expanded upon in the `Race prevention`_ subsection in the `Cinder-API`_ section. Process locks ~~~~~~~~~~~~~ Cinder services are multi-threaded -not really since we use greenthreads-, so the narrowest possible scope of locking is among the threads of a single process. Some cases where we may want to use this type of locking are when we share arrays or dictionaries between the different threads within the process, and when we use a Python or C library that doesn't properly handle concurrency and we have to be careful with how we call its methods. To use this locking in Cinder we must use the `synchronized` method in `cinder.utils`. This method in turn uses the `synchronized` method from `oslo_concurrency.lockutils` with the `cinder-` prefix for all the locks to avoid conflict with other OpenStack services. The only required parameter for this usage is the name of the lock. The name parameter provided for these locks must be a literal string value. There is no kind of templating support. Example from `cinder/volume/throttling.py`: .. code-block:: python @utils.synchronized('BlkioCgroup') def _inc_device(self, srcdev, dstdev): .. note:: When developing a driver, and considering which type of lock to use, we must remember that Cinder is a multi backend service. So the same driver can be running multiple times on different processes in the same node. Node locks ~~~~~~~~~~ Sometimes we want to define the whole node as the scope of the lock. Our critical section requires that only one thread in the whole node is using the resource. This inter process lock ensures that no matter how many processes and backends want to access the same resource, only one will access it at a time. All others will have to wait. These locks are useful when: - We want to ensure there's only one ongoing call to a command line program. That's the case of the `cinder-rtstool` command in `cinder/volume/targets/lio.py`, and the `nvmetcli` command in `cinder/volume/targets/nvmet.py`. - Common initialization in all processes in the node. This is the case of the backup service cleanup code. The backup service can run multiple processes simultaneously for the same backend, but only one of them can run the cleanup code on start. - Drivers not supporting Active-Active configurations. Any operation that should only be performed by one driver at a time. For example creating target groups for a node. This type of lock use the same method as the `Process locks`_, `synchronized` method from `cinder.utils`. Here we need to pass two parameters, the name of the lock, and `external=True` to make sure that file locks are being used. The name parameter provided for these locks must be a literal string value. There is no kind of templating support. Example from `cinder/volume/targets/lio.py`: .. code-block:: python @staticmethod @utils.synchronized('lioadm', external=True) def _execute(*args, **kwargs): Example from `cinder/backup/manager.py`: .. code-block:: python @utils.synchronized('backup-pgid-%s' % os.getpgrp(), external=True, delay=0.1) def _cleanup_incomplete_backup_operations(self, ctxt): .. warning:: These are not fair locks. Order in which the lock is acquired by callers may differ from request order. Starvation is possible, so don't choose a generic lock name for all your locks and try to create a unique name for each locking domain. Drivers that use node locks based on volumes should implement method ``clean_volume_file_locks`` and if they use locks based on the snapshots they should also implement ``clean_snapshot_file_locks`` and use method ``synchronized_remove`` from ``cinder.utils``. Example for a driver that used ``cinder.utils.synchronized``: .. code-block:: python def my_operation(self, volume): @utils.synchronized('my-driver-lock' + volume.id) def method(): pass method() @classmethod def clean_volume_file_locks(cls, volume_id): utils.synchronized_remove('my-driver-lock-' + volume_id) Global locks ~~~~~~~~~~~~ Global locks, also known as distributed locks in Cinder, provide mutual exclusion in the global scope of the Cinder services. They allow you to have a lock regardless of the backend, for example to prevent deleting a volume that is being cloned, or making sure that your driver is only creating a Target group at a time, in the whole Cinder deployment, to avoid race conditions. Global locking functionality is provided by the `synchronized` decorator from `cinder.coordination`. .. attention:: Optional `blocking` and `coordinator` arguments to the `synchronized` decorator are **keyword** arguments only and cannot be passed as positional arguments. This method is more advanced than the one used for the `Process locks`_ and the `Node locks`_, as the name supports templates. For the template we have all the method parameters as well as `f_name` that represents that name of the method being decorated. Templates must use Python's `Format Specification Mini-Language`_. Using brackets we can access the function name `'{f_name}'`, an attribute of a parameter `'{volume.id}'`, a key in a dictonary `{snapshot['name']}`, etc. Up to date information on the method can be found in the `synchronized method's documentation`_. Example from the delete volume operation in `cinder/volume/manager.py`. We use the `id` attribute of the `volume` parameter, and the function name to form the lock name: .. code-block:: python @coordination.synchronized('{volume.id}-{f_name}') @objects.Volume.set_workers def delete_volume(self, context, volume, unmanage_only=False, cascade=False): Example from create snapshot in `cinder/volume/drivers/nfs.py`, where we use an attribute from `self`, and a recursive reference in the `snapshot` parameter. .. code-block:: python @coordination.synchronized('{self.driver_prefix}-{snapshot.volume.id}') def create_snapshot(self, snapshot): Some drivers may require multiple locks for a critical section, which could potentially create deadlocks. Like in the following example, where `PowerMax` method `move_volume_between_storage_groups` creates 2 locks: .. code-block:: python @coordination.synchronized( "emc-sg-{source_storagegroup_name}-{serial_number}") @coordination.synchronized( "emc-sg-{target_storagegroup_name}-{serial_number}") def move_volume_between_storage_groups( self, serial_number, device_id, source_storagegroup_name, target_storagegroup_name, extra_specs, force=False, parent_sg=None): That code can result in a deadlock if 2 opposite requests come in concurrently and their first lock acquisition interleaves. The solution is calling the `synchronized` decorator with both lock names and let it resolve the acquire ordering issue for us. The right code would be: .. code-block:: python @coordination.synchronized( "emc-sg-{source_storagegroup_name}-{serial_number}", "emc-sg-{target_storagegroup_name}-{serial_number}") def move_volume_between_storage_groups( self, serial_number, device_id, source_storagegroup_name, target_storagegroup_name, extra_specs, force=False, parent_sg=None): Internally Cinder uses the `Tooz library`_ to provide the distributed locking. By default, this library is configured for Active-Passive deployments, where it uses file locks equivalent to those used for `Node locks`_. To support Active-Active deployments a specific driver will need to be configured using the `backend_url` configuration option in the `coordination` section. For a detailed description of the requirement for global locks in cinder please refer to the `replacing local locks with Tooz`_ and `manager local locks`_ specs. Drivers that use global locks based on volumes should implement method ``clean_volume_file_locks`` and if they use locks based on the snapshots they should also implement ``clean_snapshot_file_locks`` and use method ``synchronized_remove`` from ``cinder.coordination``. Example for the 3PAR driver: .. code-block:: python @classmethod def clean_volume_file_locks(cls, volume_id): coordination.synchronized_remove('3par-' + volume_id) Cinder locking ~~~~~~~~~~~~~~ Cinder uses the different locking mechanisms covered in this section to assure mutual exclusion on some actions. Here's an *incomplete* list: Barbican keys - Lock scope: Global. - Critical section: Migrate Barbican encryption keys. - Lock name: `{id}-_migrate_encryption_key`. - Where: `_migrate_encryption_key` method. - File: `cinder/keymgr/migration.py`. Backup service - Lock scope: Node. - Critical section: Cleaning up resources at startup. - Lock name: `backup-pgid-{process-group-id}`. - Where: `_cleanup_incomplete_backup_operations` method. - File: `cinder/backup/manager.py`. Image cache - Lock scope: Global. - Critical section: Create a new image cache entry. - Lock name: `{image_id}`. - Where: `_prepare_image_cache_entry` method. - File: `cinder/volume/flows/manager/create_volume.py`. Throttling: - Lock scope: Process. - Critical section: Set parameters of a cgroup using `cgset` CLI. - Lock name: `''BlkioCgroup'`. - Where: `_inc_device` and `_dec_device` methods. - File: `cinder/volume/throttling.py`. Volume deletion: - Lock scope: Global. - Critical section: Volume deletion operation. - Lock name: `{volume.id}-delete_volume`. - Where: `delete_volume` method. - File: `cinder/volume/manager.py`. Volume deletion request: - Lock scope: Status based. - Critical section: Volume delete RPC call. - Status requirements: attach_status != 'attached' && not migrating - Where: `delete` method. - File: `cinder/volume/api.py`. Snapshot deletion: - Lock scope: Global. - Critical section: Snapshot deletion operation. - Lock name: `{snapshot.id}-delete_snapshot`. - Where: `delete_snapshot` method. - File: `cinder/volume/manager.py`. Volume creation: - Lock scope: Global. - Critical section: Protect source of volume creation from deletion. Volume or Snapshot. - Lock name: `{snapshot-id}-delete_snapshot` or `{volume-id}-delete_volume}`. - Where: Inside `create_volume` method as context manager for calling `_fun_flow`. - File: `cinder/volume/manager.py`. Attach volume: - Lock scope: Global. - Critical section: Updating DB to show volume is attached. - Lock name: `{volume_id}`. - Where: `attach_volume` method. - File: `cinder/volume/manager.py`. Detach volume: - Lock scope: Global. - Critical section: Updating DB to show volume is detached. - Lock name: `{volume_id}-detach_volume`. - Where: `detach_volume` method. - File: `cinder/volume/manager.py`. Volume upload image: - Lock scope: Status based. - Critical section: `copy_volume_to_image` RPC call. - Status requirements: status = 'available' or (force && status = 'in-use') - Where: `copy_volume_to_image` method. - File: `cinder/volume/api.py`. Volume extend: - Lock scope: Status based. - Critical section: `extend_volume` RPC call. - Status requirements: status in ('in-use', 'available') - Where: `_extend` method. - File: `cinder/volume/api.py`. Volume migration: - Lock scope: Status based. - Critical section: `migrate_volume` RPC call. - Status requirements: status in ('in-use', 'available') && not migrating - Where: `migrate_volume` method. - File: `cinder/volume/api.py`. Volume retype: - Lock scope: Status based. - Critical section: `retype` RPC call. - Status requirements: status in ('in-use', 'available') && not migrating - Where: `retype` method. - File: `cinder/volume/api.py`. Driver locking ~~~~~~~~~~~~~~ There is no general rule on where drivers should use locks. Each driver has its own requirements and limitations determined by the storage backend and the tools and mechanisms used to manage it. Even if they are all different, commonalities may exist between drivers. Providing a list of where some drivers are using locks, even if the list is incomplete, may prove useful to other developers. To contain the length of this document and keep it readable, the list with the :doc:`drivers_locking_examples` has its own document. Cinder-API ---------- The API service is the public face of Cinder. Its REST API makes it possible for anyone to manage and consume block storage resources. So requests from clients can, and usually do, come from multiple sources. Each Cinder API service by default will run multiple workers. Each worker is run in a separate subprocess and will run a predefined maximum number of green threads. The number of API workers is defined by the `osapi_volume_workers` configuration option. Defaults to the number of CPUs available. Number of green threads per worker is defined by the `wsgi_default_pool_size` configuration option. Defaults to 100 green threads. The service takes care of validating request parameters. Any detected error is reported immediately to the user. Once the request has been validated, the database is changed to reflect the request. This can result in adding a new entry to the database and/or modifying an existing entry. For create volume and create snapshot operations the API service will create a new database entry for the new resource. And the new information for the resource will be returned to the caller right after the service passes the request to the next Cinder service via RPC. Operations like retype and delete will change the database entry referenced by the request, before making the RPC call to the next Cinder service. Create backup and restore backup are two of the operations that will create a new entry in the database, and modify an existing one. These database changes are very relevant to the high availability operation. Cinder core code uses resource states extensively to control exclusive access to resources. Race prevention ~~~~~~~~~~~~~~~ The API service checks that resources referenced in requests are in a valid state. Unlike allowed resource states, valid states are those that allow an operation to proceed. Validation usually requires checking multiple conditions. Careless coding leaves Cinder open to race conditions. Patterns in the form of DB data read, data check, and database entry modification, must be avoided in the Cinder API service. Cinder has implemented a custom mechanism, called conditional updates, to prevent race conditions. Leverages the SQLAlchemy ORM library to abstract the equivalent ``UPDATE ... FROM ... WHERE;`` SQL query. Complete reference information on the conditional updates mechanism is available on the :doc:`api_conditional_updates` development document. For a detailed description on the issue, ramifications, and solution, please refer to the `API Race removal spec`_. Cinder-Volume ------------- The most common deployment option for Cinder-Volume is as Active-Passive. This requires a common storage backend, the same Cinder backend configuration in all nodes, having the `backend_host` set on the backend sections, and using a high-availability cluster resource manager like Pacemaker. .. attention:: Having the same `host` value configured on more than one Cinder node is highly discouraged. Using `backend_host` in the backend section is the recommended way to set Active-Passive configurations. Setting the same `host` field will make Scheduler and Backup services report using the same database entry in the `services` table. This may create a good number of issues: We cannot tell when the service in a node is down, backups services will break other running services operation on start, etc. For Active-Active configurations we need to include the Volume services that will be managing the same backends on the cluster. To include a node in a cluster, we need to define its name in the `[DEFAULT]` section using the `cluster` configuration option, and start or restart the service. .. note:: We can create a cluster with a single volume node. Having a single node cluster allows us to later on add new nodes to the cluster without restarting the existing node. .. warning:: The name of the cluster must be unique and cannot match any of the `host` or `backend_host` values. Non unique values will generate duplicated names for message queues. When a Volume service is configured to be part of a cluster, and the service is restarted, the manager detects the change in configuration and moves existing resources to the cluster. Resources are added to the cluster in the `_include_resources_in_cluster` method setting the `cluster_name` field in the database. Volumes, groups, consistency groups, and image cache elements are added to the cluster. Clustered Volume services are different than normal services. To determine if a backend is up, it is no longer enough checking `service.is_up`, as that will only give us the status of a specific service. In a clustered deployment there could be other services that are able to service the same backend. That's why we'll have to check if a service is clustered using `cinder.is_clustered` and if it is, check the cluster's `is_up` property instead: `service.cluster.is_up`. In the code, to detect if a cluster is up, the `is_up` property from the `Cluster` Versioned Object uses the `last_heartbeat` field from the same object. The `last_heartbeat` is a *column property* from the SQLAlchemy ORM model resulting from getting the latest `updated_at` field from all the services in the same cluster. RPC calls ~~~~~~~~~ When we discussed the `Job distribution`_ we mentioned message queues having multiple listeners and how they were used to distribute jobs in a round robin fashion to multiple nodes. For clustered Volume services we have the same queues used for broadcasting and to address a specific node, but we also have queues to broadcast to the cluster and to send jobs to the cluster. Volume services will be listening in all these queues and they can receive request from any of them. Which they'll have to do to process RPC calls addressed to the cluster or to themselves. Deciding the target message queue for request to the Volume service is done in the `volume/rpcapi.py` file. We use method `_get_cctxt`, from the `VolumeAPI` class, to prepare the client context to make RPC calls. This method accepts a `host` parameter to indicate where we want to make the RPC. This `host` parameter refers to both hosts and clusters, and is used to determine the server and the topic. When calling the `_get_cctx` method, we would need to pass the resource's `host` field if it's not clustered, and `cluster_name` if it is. To facilitate this, clustered resources implement the `service_topic_queue` property that automatically gives you the right value to pass to `_get_cctx`. An example for the create volume: .. code-block:: python def create_volume(self, ctxt, volume, request_spec, filter_properties, allow_reschedule=True): cctxt = self._get_cctxt(volume.service_topic_queue) cctxt.cast(ctxt, 'create_volume', request_spec=request_spec, filter_properties=filter_properties, allow_reschedule=allow_reschedule, volume=volume) As we know, snapshots don't have a `host` or `cluseter_name` fields, but we can still use the `service_topic_queue` property from the `Snapshot` Versioned Object to get the right value. The `Snapshot` internally checks these values from the `Volume` Versioned Object linked to that `Snapshot` to determine the right value. Here's an example for deleting a snapshot: .. code-block:: python def delete_snapshot(self, ctxt, snapshot, unmanage_only=False): cctxt = self._get_cctxt(snapshot.service_topic_queue) cctxt.cast(ctxt, 'delete_snapshot', snapshot=snapshot, unmanage_only=unmanage_only) Replication ~~~~~~~~~~~ Replication v2.1 failover is requested on a per node basis, so when a failover request is received by the API it is then redirected to a specific Volume service. Only one of the services that form the cluster for the storage backend will receive the request, and the others will be oblivious to this change and will continue using the same replication site they had been using before. To support the replication feature on clustered Volume services, drivers need to implement the `Active-Active replication spec`_. In this spec the `failover_host` method is split in two, `failover` and `failover_completed`. On a backend supporting replication on Active-Active deployments, `failover_host` would end up being a call to `failover` followed by a call to `failover_completed`. Code extract from the RBD driver: .. code-block:: python def failover_host(self, context, volumes, secondary_id=None, groups=None): active_backend_id, volume_update_list, group_update_list = ( self.failover(context, volumes, secondary_id, groups)) self.failover_completed(context, secondary_id) return active_backend_id, volume_update_list, group_update_list Enabling Active-Active on Drivers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Supporting Active-Active configurations is driver dependent, so they have to opt in. By default drivers are not expected to support Active-Active configurations and will fail on startup if we try to deploy them as such. Drivers can indicate they support Active-Active setting the class attribute `SUPPORTS_ACTIVE_ACTIVE` to `True`. If a single driver supports multiple storage solutions, it can leave the class attribute as it is, and set it as an overriding instance attribute on `__init__`. There is no well defined procedure required to allow driver maintainers to set `SUPPORTS_ACTIVE_ACTIVE` to `True`. Though there is an ongoing effort to write a spec on `testing Active-Active`_. So for now, we could say that it's "self-certification". Vendors must do their own testing until they are satisfied with their testing. Real testing of Active-Active deployments requires multiple Cinder Volume nodes on different hosts, as well as a properly configured Tooz DLM. Driver maintainers can use Devstack to catch the rough edges on their initial testing. Running 2 Cinder Volume services on an All-In-One DevStack installation makes it easy to deploy and debug. Running 2 Cinder Volume services on the same node simulating different nodes can be easily done: - Creating a new directory for local locks: Since we are running both services on the same node, a file lock could make us believe that the code would work on different nodes. Having a different lock directory, default is `/opt/stack/data/cinder`, will prevent this. - Creating a layover cinder configuration file: Cinder supports having different configurations files where each new files overrides the common parts of the old ones. We can use the same base cinder configuration provided by DevStack and write a different file with a `[DEFAULT]` section that configures `host` (to anything different than the one used in the first service), and `lock_path` (to the new directory we created). For example we could create `/etc/cinder/cinder2.conf`. - Create a new service unit: This service unit should be identical to the existing `devstack@c-vol` except replace the `ExecStart` that should have the postfix `--config-file /etc/cinder/cinder2.conf`. Once we have tested it in DevStack way we should deploy Cinder in a new Node, and continue with the testings. It is not necessary to do the DevStack step first, we can jump to having Cinder in multiple nodes right from the start. Whatever way we decide to test this, we'll have to change `cinder.conf` and add the `cluster` configuration option and restart the Cinder service. We also need to modify the driver under test to include the `SUPPORTS_ACTIVE_ACTIVE = True` class attribute. Cinder-Scheduler ---------------- Unlike the Volume service, the Cinder Scheduler has supported Active-Active deployments for a long time. Unfortunately, current support is not perfect, scheduling on Active-Active deployments has some issues. The root cause of these issues is that the scheduler services don't have a reliable single source of truth for the information they rely on to make the scheduling. Volume nodes periodically send a broadcast with the backend stats to all the schedulers. The stats include total storage space, free space, configured maximum over provisioning, etc. All the backends' information is stored in memory at the Schedulers, and used to decide where to create new volumes, migrate them on a retype, and so on. For additional information on the stats, please refer to the :ref:`volume stats ` section of the Contributor/Developer docs. Trying to keep updated stats, schedulers reduce available free space on backends in their internal dictionary. These updates are not shared between schedulers, so there is not a single source of truth, and other schedulers don't operate with the same information. Until the next stat reports is sent, schedulers will not get in sync. This may create unexpected behavior on scheduling. There are ongoing efforts to fix this problem. Multiple solutions are being discussed: using the database as a single source of truth, or using an external placement service. When we added Active-Active support to the Cinder Volume service we had to update the scheduler to understand it. This mostly entailed 3 things: - Setting the `cluster_name` field on Versioned Objects once a backend has been chosen. - Grouping stats for all clustered hosts. We don't want to have individual entries for the stats of each host that manages a cluster, as there should be only one up to date value. We stopped using the `host` field as the id for each host, and created a new property called `backend_id` that takes into account if the service is clustered and returns the host or the cluster as the identifier. - Prevent race conditions on stats reports. Due to the concurrency on the multiple Volume services in a cluster, and the threading in the Schedulers, we could receive stat reports out of order (more up to date stats last). To prevent this we started time stamping the stats on the Volume services. Using the timestamps schedulers can discard older stats. Heartbeats ~~~~~~~~~~ Like any other non API service, schedulers also send heartbeats using the database. The difference is that, unlike other services, the purpose of these heartbeats is merely informative. Admins can easily know whether schedulers are running or not with a Cinder command. Using the same `host` configuration in all nodes defeats the whole purpose of reporting heartbeats in the schedulers, as they will all report on the same database entry. Cinder-Backups -------------- Originally, the Backup service was not only limited to Active-Passive deployments, but it was also tightly coupled to the Volume service. This coupling meant that the Backup service could only backup volumes created by the Volume service running on the same node. In the Mitaka cycle, the `Scalable Backup Service spec`_ was implemented. This added support for Active-Active deployments to the backup service. The Active-Active implementation for the backup service is different than the one we explained for the Volume Service. The reason lays not only on the fact that the Backup service supported it first, but also on it not supporting multiple backends, and not using the Scheduler for any operations. Scheduling ~~~~~~~~~~ For backups, it's the API the one selecting the host that will do the backup, using methods `_get_available_backup_service_host`, `_is_backup_service_enabled`, and `_get_any_available_backup_service`. These methods use the Backup services' heartbeats to determine which hosts are up to handle requests. Cleaning ~~~~~~~~ Cleanup on Backup services is only performed on start up. To know what resources each node is working on, they set the `host` field in the backup Versioned Object when they receive the RPC call. That way they can select them for cleanup on start. The method in charge of doing the cleanup for the backups is called `_cleanup_incomplete_backup_operations`. Unlike with the Volume service we cannot have a backup node clean up after another node's. .. _API Race removal spec: https://specs.openstack.org/openstack/cinder-specs/specs/mitaka/cinder-volume-active-active-support.html .. _Cinder Volume Job Distribution: https://specs.openstack.org/openstack/cinder-specs/specs/ocata/ha-aa-job-distribution.html .. _RabbitMQ tutorials: https://www.rabbitmq.com/getstarted.html .. _Cleanup spec: https://specs.openstack.org/openstack/cinder-specs/specs/newton/ha-aa-cleanup.html .. _synchronized method's documentation: https://docs.openstack.org/cinder/latest/contributor/api/cinder.coordination.html#module-cinder.coordination .. _Format Specification Mini-Language: https://docs.python.org/2.7/library/string.html#formatspec .. _Tooz library: https://opendev.org/openstack/tooz .. _replacing local locks with Tooz: https://specs.openstack.org/openstack/cinder-specs/specs/mitaka/ha-aa-tooz-locks.html .. _manager local locks: https://specs.openstack.org/openstack/cinder-specs/specs/newton/ha-aa-manager_locks.html .. _Active-Active replication spec: https://specs.openstack.org/openstack/cinder-specs/specs/ocata/ha-aa-replication.html .. _testing Active-Active: https://review.openstack.org/#/c/443504 .. _Scalable Backup Service spec: https://specs.openstack.org/openstack/cinder-specs/specs/mitaka/scalable-backup-service.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/i18n.rst0000664000175000017500000000452100000000000021021 0ustar00zuulzuul00000000000000Internationalization ==================== For internationalization guidelines, see the `oslo.i18n documentation `_. The information below can be used to get started. Cinder uses `gettext `_ so that user-facing strings such as log messages appear in the appropriate language in different locales. To use gettext, make sure that the strings passed to the logger are wrapped in a ``_Lx()`` function call. For example:: LOG.info(_LI("block_device_mapping %s"), block_device_mapping) There are a few different _() translation markers, depending on the logging level of the text: - _LI() - Used for INFO level log messages - _LW() - Used for WARNING level log messages - _LE() - Used for ERROR level log messages (this includes LOG.exception) - _() - Used for any exception messages, including strings used for both logging and exceptions. .. note:: Starting with the Pike series, OpenStack no longer supports log translation markers like ``_Lx()``, only ``_()`` should still be used for exceptions that could be user facing. It is not necessary to add ``_Lx()`` translation instructions to new code, and the instructions can be removed from old code. Refer to the email thread `understanding log domain change `_ on the openstack-dev mailing list for more details. Do not use ``locals()`` for formatting messages because: 1. It is not as clear as using explicit dicts. 2. It could produce hidden errors during refactoring. 3. Changing the name of a variable causes a change in the message. 4. It creates a lot of otherwise unused variables. If you do not follow the project conventions, your code may cause pep8 hacking check failures. For translation to work properly, the top level scripts for Cinder need to first do the following before any Cinder modules are imported:: from cinder import i18n i18n.enable_lazy() Note: this should *only* be called from top level scripts - no library code or common modules should call this method. Any files that use the _() for translation then must have the following lines:: from cinder.i18n import _ If the above code is missing, it may result in an error that looks like:: NameError: name '_' is not defined ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/index.rst0000664000175000017500000000500200000000000021344 0ustar00zuulzuul00000000000000.. Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Contributor Guide ================= In this section you will find information on how to contribute to Cinder. Content includes architectural overviews, tips and tricks for setting up a development environment, and information on Cinder's lower level programming APIs. Getting Started --------------- .. toctree:: :maxdepth: 2 contributing backporting releases documentation Writing Release Notes --------------------- Please follow the format, it will make everyone's life easier. There's even a special section on writing release notes for Cinder drivers. .. toctree:: :maxdepth: 2 releasenotes .. _programming-howtos: Programming HowTos and Tutorials -------------------------------- .. toctree:: :maxdepth: 2 development.environment testing api_microversion_dev api_conditional_updates addmethod.openstackapi drivers high_availability gmr replication user_messages migration api.apache rolling.upgrades groups database-migrations .. _managing-development: Managing the Development Cycle ------------------------------ .. toctree:: :maxdepth: 1 releasecycle cinder-groups Documentation Contribution -------------------------- .. toctree:: :maxdepth: 2 documentation .. _background-concepts: Background Concepts for Cinder ------------------------------ .. toctree:: :maxdepth: 3 architecture attach_detach_conventions attach_detach_conventions_v2 thin_provisioning threading i18n rpc Other Resources --------------- .. toctree:: :maxdepth: 3 launchpad gerrit zuul releasenotes Module Reference .. only:: html Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/launchpad.rst0000664000175000017500000000313700000000000022203 0ustar00zuulzuul00000000000000Project hosting with Launchpad ============================== `Launchpad`_ hosts the Cinder project. The Cinder project homepage on Launchpad is https://launchpad.net/cinder. Launchpad credentials --------------------- Creating a login on Launchpad is important even if you don't use the Launchpad site itself, since Launchpad credentials are used for logging in on several OpenStack-related sites. These sites include: * `Wiki`_ * Gerrit (see :doc:`gerrit`) * Zuul (see :doc:`zuul`) Mailing list ------------ The mailing list email is ``openstack-discuss@lists.openstack.org``. This is a common mailing list across the OpenStack projects. To participate in the mailing list: #. Subscribe to the list at https://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss The mailing list archives are at https://lists.openstack.org/pipermail/openstack-discuss/. Bug tracking ------------ Report Cinder bugs at https://bugs.launchpad.net/cinder Feature requests (Blueprints) ----------------------------- Cinder uses Launchpad Blueprints to track feature requests. Blueprints are at https://blueprints.launchpad.net/cinder. Technical support (Answers) --------------------------- Cinder no longer uses Launchpad Answers to track Cinder technical support questions. Note that `Ask OpenStack`_ (which is not hosted on Launchpad) can be used for technical support requests. .. _Launchpad: https://launchpad.net .. _Wiki: https://wiki.openstack.org/wiki/Main_Page .. _Cinder Team: https://launchpad.net/~cinder .. _OpenStack Team: https://launchpad.net/~openstack .. _Ask OpenStack: https://ask.openstack.org ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/migration.rst0000664000175000017500000003165300000000000022241 0ustar00zuulzuul00000000000000.. Copyright (c) 2015 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Migration ========= Introduction to volume migration -------------------------------- Cinder provides the volume migration support within the same deployment, which means the node of cinder volume service, c-vol node where the source volume is located, is able to access the c-vol node where the destination volume is located, and both of them share the same Cinder API service, scheduler service, message queue service, etc. As a general rule migration is possible for volumes in 'available' or 'in-use' status, for the driver which has implemented volume migration. So far, we are confident that migration will succeed for 'available' volumes, whose drivers implement the migration routines. However, the migration of 'in-use' volumes is driver dependent. It depends on different drivers involved in the operation. It may fail depending on the source or destination driver of the volume. For example, for older releases (before Ussuri), the migration of 'in-use' volume from RBD to LVM will succeed, but from LVM to RBD, it will fail. Currently, migration of 'in-use' volumes will not succeed for any backend when they are attached to an instance in any of these states: SHUTOFF, SUSPENDED, or SOFT-DELETED. There are two major scenarios, which volume migration supports in Cinder: Scenario 1: Migration between two back-ends with the same volume type, regardless if they are located on the same c-vol node or not. Scenario 2: Migration between two back-ends with different volume types, regardless if the back-ends are located on the same c-vol node or not. .. note:: Retyping an unencrypted volume to the same size encrypted volume will most likely fail. Even though the volume is the same size as the source volume, the encrypted volume needs to store additional encryption information overhead. This results in the new volume not being large enough to hold all data. Please do not try this in older releases. How to do volume migration via CLI ---------------------------------- Scenario 1 of volume migration is done via the following command from the CLI:: cinder migrate [--force-host-copy []] [--lock-volume []] Mandatory arguments: ID of volume to migrate. Destination host. The format of host is host@backend#POOL, while 'host' is the host name of the volume node, 'backend' is the back-end name and 'POOL' is a logical concept to describe a set of storage resource, residing in the back-end. If the back-end does not have specified pools, 'POOL' needs to be set with the same name as 'backend'. Optional arguments: --force-host-copy [] Enables or disables generic host-based force- migration, which bypasses the driver optimization. Default=False. --lock-volume [] Enables or disables the termination of volume migration caused by other commands. This option applies to the available volume. True means it locks the volume state and does not allow the migration to be aborted. The volume status will be in maintenance during the migration. False means it allows the volume migration to be aborted. The volume status is still in the original status. Default=False. Important note: Currently, error handling for failed migration operations is under development in Cinder. If we would like the volume migration to finish without any interruption, please set --lock-volume to True. If it is set to False, we cannot predict what will happen, if other actions like attach, detach, extend, etc, are issued on the volume during the migration. It all depends on which stage the volume migration has reached and when the request of another action comes. Scenario 2 of volume migration can be done via the following command from the CLI:: cinder retype --migration-policy on-demand Mandatory arguments: Name or ID of volume for which to modify type. New volume type. Source volume type and destination volume type must be different and they must refer to different back-ends. Configurations -------------- To set up an environment to try the volume migration, we need to configure at least two different back-ends on the same node of cinder volume service, c-vol node or two back-ends on two different volume nodes of cinder volume service, c-vol nodes. Which command to use, 'cinder migrate' or 'cinder retype', depends on which type of volume we would like to test. **Scenario 1 for migration** To configure the environment for Scenario 1 migration, e.g. a volume is migrated from back-end on Node 1 to back-end on Node 2, cinder.conf needs to contains the following entries for the same back-end on both of source and the destination nodes: For Node 1: ... [] volume_driver=xxxx volume_backend_name= ... For Node 2: ... [] volume_driver=xxxx volume_backend_name= ... If a volume with a predefined volume type is going to migrate, the back-end drivers from Node 1 and Node 2 should have the same value for volume_backend_name, which means should be the same for Node 1 and Node 2. The volume type can be created with the extra specs {volume_backend_name: driver-biz}. If we are going to migrate a volume with a volume type of none, it is not necessary to set the same value to volume_backend_name for both Node 1 and Node 2. **Scenario 2 for migration** To configure the environment for Scenario 2 migration: For example, a volume is migrated from driver-biz back-end on Node 1 to driver-net back-end on Node 2, cinder.conf needs to contains the following entries: For Node 1: ... [driver-biz] volume_driver=xxxx volume_backend_name=driver-biz ... For Node 2: ... [driver-net] volume_driver=xxxx volume_backend_name=driver-net ... For example, a volume is migrated from driver-biz back-end on Node 1 to driver-biz back-net on the same node, cinder.conf needs to contains the following entries: ... [driver-biz] volume_driver=xxxx volume_backend_name=driver-biz ... ... [driver-net] volume_driver=xxxx volume_backend_name=driver-net ... Two volume types need to be created. One is with the extra specs: {volume_backend_name: driver-biz}. The other is with the extra specs: {volume_backend_name: driver-net}. What can be tracked during volume migration ------------------------------------------- The volume migration is an administrator only action and it may take a relatively long time to finish. The property 'migration status' will indicate the stage of the migration process for the volume. The administrator can check the 'migration status' via the 'cinder list' or 'cinder show ' command. The 'cinder list' command presents a list of all the volumes with some properties displayed, including the migration status, only to the administrator. However, the migration status is not included if 'cinder list' is issued by an ordinary user. The 'cinder show ' will present all the detailed information of a specific volume, including the migration status, only to the administrator. If the migration status of a volume shows 'starting', 'migrating' or 'completing', it means the volume is in the process of a migration. If the migration status is 'success', it means the migration has finished and the previous migration of this volume succeeded. If the migration status is 'error', it means the migration has finished and the previous migration of this volume failed. How to implement volume migration for a back-end driver ------------------------------------------------------- There are two kinds of implementations for the volume migration currently in Cinder. The first is the generic host-assisted migration, which consists of two different transfer modes, block-based and file-based. This implementation is based on the volume attachment to the node of cinder volume service, c-vol node. Any back-end driver supporting iSCSI will be able to support the generic host-assisted migration for sure. The back-end driver without iSCSI supported needs to be tested to decide if it supports this kind of migration. The block-based transfer mode is done by 'dd' command, applying to drivers like LVM, Storwize, etc, and the file-based transfer mode is done by file copy, typically applying to the RBD driver. The second is the driver specific migration. Since some storage back-ends have their special commands to copy the volume, Cinder also provides a way for them to implement in terms of their own internal commands to migrate. If the volume is migrated between two nodes configured with the same storage back-end, the migration will be optimized by calling the method migrate_volume in the driver, if the driver provides an implementation for it to migrate the volume within the same back-end, and will fallback to the generic host-assisted migration provided in the manager, if no such implementation is found or this implementation is not applicable for this migration. If your storage driver in Cinder provides iSCSI support, it should naturally work under the generic host-assisted migration, when --force-host-copy is set to True from the API request. Normally you do not need to change any code, unless you need to transfer the volume from your driver via a different way from the block-based transfer or the file-based transfer. If your driver uses a network connection to communicate the block data itself, you can use file I/O to participate in migration. Please take the RBD driver as a reference for this implementation. If you would like to implement a driver specific volume migration for your driver, the API method associated with the driver specific migration is the following admin only method:: migrate_volume(self, ctxt, volume, host) If your driver is taken as the destination back-end for a generic host-assisted migration and your driver needs to update the volume model after a successful migration, you need to implement the following method for your driver:: update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status) Required methods ---------------- There is one mandatory method that needs to be implemented for the driver to implement the driver specific volume migration. **migrate_volume** Used to migrate the volume directly if source and destination are managed by same storage. There is one optional method that could be implemented for the driver to implement the generic host-assisted migration. **update_migrated_volume** Used to return the key-value pairs to update the volume model after a successful migration. The key-value pairs returned are supposed to be the final values your driver would like to be in the volume model, if a migration is completed. This method can be used in a generally wide range, but the most common use case covered in this method is to rename the back-end name to the original volume id in your driver to make sure that the back-end still keeps the same id or name as it is before the volume migration. For this use case, there are two important fields: _name_id and provider_location. The field _name_id is used to map the cinder volume id and the back-end id or name. The default value is None, which means the cinder volume id is the same to the back-end id or name. If they are different, _name_id is used to saved the back-end id or name. The field provider_location is used to save the export information, created by the volume attach. This field is optional, since some drivers support the export creation and some do not. It is the driver maintainer's responsibility to decide what this field needs to be. If the back-end id or name is renamed successfully, this method can return {'_name_id': None, 'provider_location': None}. It is the choice for your driver to implement this method and decide what use cases should be covered. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/new_driver_checklist.rst0000664000175000017500000002067300000000000024445 0ustar00zuulzuul00000000000000=========================== New Driver Review Checklist =========================== Reviewers can use this list for some common things to watch for when doing new driver reviews. This list is by no means exhaustive, but does try to capture some of things that have been found in past reviews. .. note:: Feel free to propose additional items to help make this a more complete list. Review Checklist ---------------- * Driver Code * Passing all gate tests * Driver keeps all configuration in ``cinder.conf`` and not in separate vendor specific config file. * xml files for configs are forbidden * Common gotchas * Code should use ``volume.name_id`` instead of ``volume.id``. * Handles detach where ``connector == None`` for force detach * Create from snapshot and clone properly account for new volume size being larger than original volume size * Volume not found in delete calls should return success * Ensure proper code format w/ pep8 (``tox -e pep8``), but start here first: https://docs.openstack.org/hacking/latest/user/hacking.html * ``tox -e fast8`` can be used as a quick check only against modified files * Unit tests included for all but trivial code in driver * Make sure there's an ``__init__.py`` file in the directory containing the test files or they won't be discovered by stestr when running the generic ``tox -e pyXX`` command to run unit tests. * Use the results of the ``cinder-code-coverage`` job or run ``tox -e cover`` locally to see a test coverage report. * All source code files contain Apache 2 copyright header * Stating copyright for vendor is optional * Don't attribute copyright to the OpenStack Foundation * Run ``tox -e compliance`` to make sure all required interfaces are implemented. * Required in driver: * Concrete driver implementation has decorator ``@interface.volumedriver`` * ``VERSION`` constant defined in driver class * ``CI_WIKI_NAME`` constant defined in driver class * well documented version history in the comment block for the main driver class. * Support :ref:`minimum driver features `. * Meet release deadline(s) * By Milestone 2 of the current development cycle, the driver should have working third party CI and no code review issues. * You can find the exact date on the current release schedule, which you can find from https://releases.openstack.org/index.html * Driver does not add unnecessary new config options * For example, adding vendor_username instead of using the common san_login * Driver reports all options it uses in get_driver_options() method * This is necessary for cinderlib/emberCSI use of the driver * The response should include any common config options (see above) in addition to driver-specific options * See https://review.opendev.org/c/openstack/cinder/+/770807/ for an example of how to do this * If the driver is a subclass of an existing driver, verify that it implements its own ``_update_volume_stats()`` function to override any capabilities of the parent driver that the child driver may not have. For example, the parent driver may support multiattach, while this may not be the case (or may not yet be verified) for the child driver. * Driver specific exceptions inherit from ``VolumeDriverException`` or ``VolumeBackendAPIException`` * Exceptions should be defined with driver code * Logging level is appropriate for content * General tracing should be at debug level * Things operators should be aware of should be at Info level * Issues that are of concern but may not have an impact on actual operation should be warning * Issues operators need to take action on or should definitely know about should be ERROR * Messages about a failure should include the snapshot or volume in question. * All exception messages that could be raised to users should be marked for translation with _() * Cryptography * Drivers must not use md5 for any security-related purpose. (In fact, drivers should avoid using it at all, because some security audits only allow a "yes"/"no" checkbox for md5 use ... but that's up to the vendor.) * If md5 *is* being used for a non security-related purpose, the code must use oslo.utils and not call hashlib directly to access md5. Here's an example of how to do this: https://review.opendev.org/c/openstack/os-brick/+/756151 * Any cryptography done by a driver should be implemented by using a well-respected cryptographic library. *Under no circumstances should a driver implement its own cryptographic functions.* If the library is already in OpenStack global requirements, then it is well-respected; otherwise, you will find out if it's well-respected when you apply for it to be added to global requirements (see next item). * Any additional libraries needed for a driver must be added to the global requirements. * https://wiki.openstack.org/wiki/Requirements#Adding_a_Requirement_to_an_OpenStack_Project * Pypi installable libraries should be added to driver section in setup.cfg * Binary dependencies need to be OSI licensed and added to bindep.txt * Third Party CI checks * Responds correctly to recheck from "run-" * Tempest run console log available * ``cinder.conf`` and all cinder service logs available * LVM driver is not being configured in ``local.conf/cinder.conf`` * Only the driver in question should be in ``cinder.conf`` and enabled * ``default_volume_type`` and ``enabled_backends`` in ``cinder.conf``, OR * ``CINDER_DEFAULT_VOLUME_TYPE`` and ``CINDER_ENABLED_BACKENDS`` in ``local.conf``, OR * ``TEMPEST_VOLUME_DRIVER`` and ``TEMPEST_VOLUME_VENDER`` in ``local.conf`` * specify correct patch for each CI run * ``CINDER_BRANCH`` in ``local.conf``, OR * ``git fetch https://review.opendev.org/openstack/cinder refs/changes/56/657856/2 && git checkout cherry-pick`` (https://wiki.openstack.org/wiki/Cinder/tested-3rdParty-drivers ) * CI runs ``tox -e all -- *volume*`` * Any skipped tests need to be clearly documented why they are being skipped including the plan for getting rid of the need to skip them. * https://opendev.org/openstack/cinder-tempest-plugin needs to be installed so those tempest tests run as well. * ``tox`` | ``tempest`` with ``--subunit`` helps generate HTML output (https://docs.openstack.org/os-testr/latest/user/subunit2html.html ) * ``tox`` | ``tempest`` with ``--concurrency=`` for specifying ```` number of test runners * CI must run Cinder services using Python 3. More specifically: * At the Ussuri Virtual Mid-Cycle meeting (session 2, 16 March 2020), the Cinder team agreed that new Third-Party CI systems should: * ideally, test using *all* of the cycle Python runtimes * otherwise, test using at least one of the cycle runtimes * The current Python runtimes are determined by the OpenStack Technical Committee. See `Tested Runtimes `_ in the OpenStack governance documents. * CI does not report failures or exception due to the CI operation and not due to test failures due to code changes. * *optional, but highly recommended:* CI only runs on third party CI recheck trigger or on successful +1 from Zuul. * CI only runs on patches to the master branch unless they are intentionally set up to be able to properly run stable branch testing. * Included with driver patch * Release note stating something like "New volume driver added for Blah blah blah storage" * See Reno usage information here: https://docs.openstack.org/reno/latest/user/usage.html * Make sure that the release note is in the correct subdirectory, namely, ``releasenotes/notes/`` in the repository root directory. It should *not* be located in the driver's section of the code tree. * Driver added to ``doc/source/reference/support-matrix.ini`` and ``doc/source/reference/support-matrix.rst`` * Driver configuration information added under ``doc/source/configuration/block-storage/drivers`` * Update ``cinder/opts.py`` including the new driver library options using the command ``tox -e genopts`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/releasecycle.rst0000664000175000017500000003272300000000000022707 0ustar00zuulzuul00000000000000=================== Release Cycle Tasks =================== This document describes the relative ordering and rough timeline for all of the steps related to tasks that need to be completed during a release cycle for Cinder. Before PTG (after closing previous release) =========================================== #. Collect topics and prepare notes for PTG discussions in an etherpad. The PTGbot will generate a list of etherpads at some point that will be named according to the convention:: https://etherpad.openstack.org/p/-ptg-cinder (You can use a different name, but following the convention makes it easy to locate the etherpad for any project for any release. Something we've done in the past is to do the planning on an etherpad named:: https://etherpad.openstack.org/p/-ptg-cinder-planning and then move the topics over to the "real" etherpad when the team has decided on what to include and the ordering. Do whatever works for you. Just make sure the team knows where the planning etherpad is and give everyone plenty of reminders to add topics. #. Add any Cinder-specific schedule information to the release calendar as soon as it's available. Example patch: https://review.opendev.org/c/openstack/releases/+/754484 * We used to wait to do this until after proposed deadlines were discussed at the PTG, but recently people have been getting antsy about what the deadlines are as soon as the stable branch for the previous release is cut (which is roughly a month before the PTG). So you may want to go ahead and post the patch early and announce the dates at a Cinder meeting so that people can point out conflicts. Or do it the old-fashioned way and work it out at the PTG. Either way, the point is to make sure you don't forget to add Cinder-specific dates to the main release schedule. #. Review the :ref:`cinder-groups`. Between PTG and Milestone-1 =========================== #. Review output from the PTG and set Review-Priority on any high priority items identified from those discussions. Send out recap to the mailing list. #. Review information about standing Cinder meetings posted at https://meetings.opendev.org/ in case any changes were discussed at the PTG. You make changes by proposing a patch to https://opendev.org/opendev/irc-meetings Example patch: https://review.opendev.org/c/opendev/irc-meetings/+/695339 #. **Supported Python versions** * The supported Python runtimes for the cycle may have changed from the previous cycle. You can find them at https://governance.openstack.org/tc/reference/runtimes/ * Review the tox testenvs defined in ``tox.ini`` and make sure there are functional testenvs for each. You don't have to worry about unit tests--tox is smart enough to know what to do for those--but if you specify ``tox -e functional-py312`` tox will bomb unless there's a 'functional-py312' testenv defined. * The OpenStack required check and gate tests are defined in a template in `zuul.d/project-templates.yaml`_ in the `openstack/openstack-zuul-jobs repo`_. The template is maintained by the OpenStack QA team. It should have an easily recognizable name, for example, ``openstack-python3-zed-jobs``. Usually there will be autogenerated patches for each cinder project repo to change the template from the previous cycle's to the current cycle's, so watch for those. Or you can proactively make the changes yourself as soon as the template is available. Example new template patch: https://review.opendev.org/c/openstack/openstack-zuul-jobs/+/831826 .. _zuul.d/project-templates.yaml: https://opendev.org/openstack/openstack-zuul-jobs/src/branch/master/zuul.d/project-templates.yaml .. _openstack/openstack-zuul-jobs repo: https://opendev.org/openstack/openstack-zuul-jobs * Check the ``setup.cfg`` file in each cinder deliverable to make sure that the claimed supported Python versions line up with the cycle's supported Python versions. #. Focus on spec reviews to get them approved and updated early in the cycle to allow enough time for implementation. #. Review new driver submissions and give early feedback so there isn't a rush at the new driver deadline. Check for status of third party CI and any missing steps they need to know about. #. Review community-wide goals and decide a plan or response to them. Milestone-1 =========== #. Propose library releases for os-brick or python-cinderclient if there are merged commits ready to be released. Watch for any releases proposed by the release team. #. Check progress on new drivers and specs and warn contributors if it looks like they are at risk for making it in this cycle. Between Milestone-1 and Milestone-2 =================================== #. cinderlib is a "trailing" deliverable type on a "cycle-with-intermediary" release model. That means that its release for the *previous* cycle hasn't happened yet. The release must happen no later than 3 months after the main release, which will put it roughly one week before Milestone-2 (check the current release schedule for the exact deadline). Example patch: https://review.opendev.org/c/openstack/releases/+/742503 #. Review stable backports and release status. #. The Cinder Spec Freeze usually occurs sometime within this window. After all the approved specs have merged, propose a patch that adds a directory for the next release. (You may have to wait until the release name has been determined by the TC.) Example patch: https://review.opendev.org/c/openstack/cinder-specs/+/778436 #. Watch for and respond to updates to new driver patches. Milestone-2 =========== #. Propose library releases for os-brick or python-cinderclient if there are merged commits ready to be released. Watch for any releases proposed by the release team. Between Milestone-2 and Milestone-3 =================================== #. Review stable backports and release status. #. Set Review-Priority for any os-brick changes that are needed for feature work to make sure they are ready by the library freeze prior to Milestone-3. #. Make sure any new feature work that needs client changes are proposed and on track to land before the client library freeze at Milestone-3. Ensure microversion bumps are reflected in cinderclient/api_versions.py MAX_VERSION. #. The week before Milestone-3, propose releases for unreleased changes in os-brick. (The release team may have already proposed an auto- generated patch 1-2 weeks earlier; make sure you -1 it if there are still changes that need to land in os-brick before release.) Include branch request for stable/$series creation. Example patch: https://review.opendev.org/c/openstack/releases/+/804670 Milestone-3 =========== #. Propose releases for unreleased changes in python-cinderclient and python-brick-cinderclient-ext. These will be the official cycle releases for these deliverables. Watch for a release patch proposed by the release team; it may need to be updated to include all the appropriate changes. Include branch request for stable/$series creation. Example patches: | https://review.opendev.org/c/openstack/releases/+/806583 | https://review.opendev.org/c/openstack/releases/+/807167 #. Set Review-Priority -1 for any feature work not complete in time for inclusion in this cycle. Remind contributors that FFE will need to be requested to still allow it in this cycle. #. Complete the responses to community-wide goals if not already done. #. Add cycle-highlights in the releases deliverable file. The deadline for this has been moved up (since wallaby) to the Friday of M-3 week. (There should be an entry on the cycle release schedule, and a reminder email with subject "[PTLs][release] xxx Cycle Highlights" to the ML.) The Foundation people use the info to start preparing press releases for the cycle coordinated release, so it's good to have key features mentioned. (If something has an FFE and you're not sure if it will land, you can always update the cycle-highlights later and shoot an email to whoever sent out the reminder so they know to look for it.) Example patch: https://review.opendev.org/c/openstack/releases/+/807398 Between Milestone-3 and RC1 =========================== #. Make sure the maximum microversion is up-to-date in the version history file ``cinder/api/openstack/rest_api_version_history.rst`` * Any patch that bumped the microversion should have already included an entry in this file; you need to add "(Maximum in )" to the last (highest) entry. * This file is pulled into the api-ref by the documentation build process. #. Prepare "prelude" release notes as summaries of the content of the release so that those are merged before their first release candidate. #. Check the "Driver Removal History" section (bottom) of ``doc/source/reference/support-matrix.rst`` to make sure any drivers removed during the cycle are mentioned there. #. Check the upgrade check tool ``cmd/status.py`` to make sure the removed drivers list is up to date. RC1 week ======== #. Propose RC1 release for cinder or watch for proposal from the release team. Include ``stable/$series`` branching request with the release. #. Update any cycle-highlights for the release cycle if there was something you weren't sure about at M-3. #. Remind contributors that ``master`` is now the next cycle but focus should be on wrapping up the current cycle. #. Watch for translation and new stable branch patches and merge them quickly. Between RC1 and Final ===================== #. The release team has started adding a 'release-notes' field to the deliverables' yaml files. You can watch for the patch and vote on it if you see it. Example patch: https://review.opendev.org/c/openstack/releases/+/810236 #. Related to the previous point: at this time in the cycle, the release notes for all the cinder cycle deliverables (cinder, os-brick, python-cinderclient, and python-brick-cinderclient-ext) should have been published automatically at https://docs.openstack.org/releasenotes/. Sometimes the promotion job fails, though, so it's good to check that the release notes for the current cycle are actually there. #. Propose additional RC releases as needed. .. note:: Try to avoid creating more than 3 release candidates so we are not creating candidates that consumers are then trained to ignore. Each release candidate should be kept for at least 1 day, so if there is a proposal to create RCx but clearly a reason to create another one, delay RCX to include the additional patches. #. Watch for translation patches and merge them quickly. #. Make sure final RC request is done one week before the final release date. #. | Watch for the final release proposal from the release team to review and +1 so team approval is included in the metadata that goes onto the signed tag. Example patch: https://review.opendev.org/c/openstack/releases/+/785754 | Here's what it looks like when people forget to check for this patch: https://review.opendev.org/c/openstack/releases/+/812251 Final Release ============= #. Start planning for next release cycle. #. Check for bugfixes that would be good to backport to older stable branches. #. Propose any bugfix releases for things that did not make the freeze for final library or service releases. Post-Final Release ================== #. Make sure at least three SQLAlchemy-Migrate migrations are reserved for potential backports. Example patch: https://review.opendev.org/c/openstack/cinder/+/649436 #. Unblock any new driver submission patches that missed the previous release cycle's deadline. #. Review approved cinder-specs that were merged to the previous cycle folder that did not get implemented. Revert or move those specs to the next cycles's folder. #. The oldest active stable branch (that is, the oldest one you can still release from) will go to Extended Maintenance mode shortly after the coordinated release. Watch for an email notification from the release team about the projected date, which you can also find in the "Next Phase" column for that release series on https://releases.openstack.org * Prioritize any open reviews that should get into the final stable release from this branch for all relevant cinder deliverables and motivate the cinder-stable-maint cores to review them. * Propose a final release for any deliverable that needs one. Example patch: https://review.opendev.org/c/openstack/releases/+/761929 * The release team will probably propose a placeholder patch to tag the stable branch for each deliverable as -em (or if they haven't gotten around to it yet, you can propose it yourself). Verify that the hash is at the current HEAD for each deliverable (it may have changed if some last-minute stuff was merged). Example patch: https://review.opendev.org/c/openstack/releases/+/762372 * After the "transition to EM" patch has merged, update the zuul jobs for the cinder-tempest-plugin. We always have 3 jobs for the active stable branches plus jobs for master. Add a new job for the most recent release and remove the job for the stable branch that just went to EM. Example patch: https://review.opendev.org/c/openstack/cinder-tempest-plugin/+/756330 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/releasenotes.rst0000664000175000017500000001346600000000000022743 0ustar00zuulzuul00000000000000.. Copyright 2015 Intel Corporation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _release-notes: Release notes ============= The release notes for a patch should be included in the patch. If the following applies to the patch, a release note is required: * Upgrades * The deployer needs to take an action when upgrading * A new config option is added that the deployer should consider changing from the default * A configuration option is deprecated or removed * Features * A new feature or driver is implemented * Feature is deprecated or removed * Current behavior is changed * Bugs * A security bug is fixed * A long-standing or important bug is fixed * APIs * REST API changes Reviewing release note content ------------------------------ Release notes are user facing. We expect operators to read them (and other people interested in seeing what's in a new release may read them, too). This makes a release note different from a commit message, which is aimed at other developers. Keep this in mind as you review a release note. Also, since it's user facing, something you would think of as a nit in a code comment (for example, bad punctuation or a misspelled word) is not really a nit in a release note--it's something that needs to be corrected. This also applies to the format of the release note, which should follow the standards set out later in this document. In summary, don't feel bad about giving a -1 for a nit in a release note. We don't want to have to go back and fix typos later, especially for a bugfix that's likely to be backported, which would require squashing the typo fix into the backport patch (which is something that's easy to forget). Thus we really want to get release notes right the first time. Fixing a release note --------------------- Of course, even with careful writing and reviewing, a mistake can slip through that isn't noticed until after a release. If that happens, the patch to correct a release note must be proposed *directly to the stable branch in which the release note was introduced*. (Yes, this is completely different from how we handle bugs.) This is because of how reno scans release notes and determines what release they go with. See `Updating Stable Branch Release Notes `_ in the `reno User Guide` for more information. Bugs ---- For bug fixes, release notes must include the bug number in Launchpad with a link to it as a RST link like in the following example: .. code-block:: yaml --- fixes: - | `Bug #1889758 `_: Fixed revert to snapshot not working for non admin users when using the snapshot's name. Note the use of the past tense ("Fixed") instead of the present tense ("Fix"). This is because although you are fixing the bug right now in the present, operators will be reading the release notes in the future (at the time of the release), at which time your bug fix will be a thing of the past. Additionally, keep in mind that when your release note is published, it is mixed in with all the other release notes and won't obviously be connected to your patch. Thus, in order for it to make sense, you may need to repeat information that you already have in your commit message. That's OK. Drivers ------- For release notes related to a specific driver -be it volume, backup, or zone manager- the release note line must start with `` driver:``. For example: .. code-block:: yaml --- features: - | RBD driver: Added support for volume manage and unmanage operations. When fixing a driver bug we must not only have the driver name prefix but also the bug number and link: .. code-block:: yaml --- fixes: - | Brocade driver `bug #1866860 `_: Fixed ``AttributeError`` when using ``REST_HTTP`` or ``REST_HTTPS`` as the ``fc_southbound_protocol`` option and an exception is raised by the client. There are times when a bug affects multiple drivers. In such a cases we must list each of the driver as an independent item following above rules: .. code-block:: yaml --- fixes: - | Unity driver `bug #1881108 `_: Fixed leaving leftover devices on the host when validation of the attached volume fails on some cloning cases and create volume from snapshot. - | Kaminario driver `bug #1881108 `_: Fixed leaving leftover devices on the host when validation of the attached volume fails on some cloning cases and create volume from snapshot. Creating the note ----------------- Cinder uses `reno `_ to generate release notes. Please read the docs for details. In summary, use .. code-block:: bash $ tox -e venv -- reno new Then edit the sample file that was created and push it with your change. To see the results: .. code-block:: bash $ git commit # Commit the change because reno scans git log. $ tox -e releasenotes Then look at the generated release notes files in releasenotes/build/html in your favorite browser. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/releases.rst0000664000175000017500000001064700000000000022053 0ustar00zuulzuul00000000000000Cinder Project Releases ======================= The Cinder project follows the OpenStack 6 month development cycle, at the end of which a new stable branch is created from master, and master becomes the development branch for the next development cycle. Because many OpenStack consumers don't move as quickly as OpenStack development, we backport appropriate bugfixes from master into the stable branches and create new releases for consumers to use ... for a while. See the `Stable Branches `_ section of the `OpenStack Project Team Guide `_ for details about the timelines. What follows is information about the Cinder project and its releases. Where Stuff Is ~~~~~~~~~~~~~~ The Cinder Project Deliverables ------------------------------- https://governance.openstack.org/tc/reference/projects/cinder.html#deliverables The Code Repositories --------------------- * https://opendev.org/openstack/cinder * https://opendev.org/openstack/cinderlib * https://opendev.org/openstack/os-brick * https://opendev.org/openstack/python-cinderclient * https://opendev.org/openstack/python-brick-cinderclient-ext * https://opendev.org/openstack/rbd-iscsi-client * https://opendev.org/openstack/cinder-tempest-plugin * https://opendev.org/openstack/cinder-specs (no releases) Review Dashboards for Releases ------------------------------ * Patches for releasable stable branches: http://tiny.cc/cinder-releasable-stable * Patches for nonreleasable stable branches: http://tiny.cc/cinder-em-branches * Cinder project release patches: http://tiny.cc/cinder-release-patches All Cinder Project Releases --------------------------- https://releases.openstack.org/teams/cinder.html How Stuff Works ~~~~~~~~~~~~~~~ Releases from Master -------------------- Releases from **master** for *cinder* follow the 'cycle-with-rc' release model. * The 'cycle-with-rc' model describes projects that produce a single release at the end of the cycle, with one or more release candidates (RC) close to the end of the cycle and optional development milestone betas published on a per-project need. Releases from **master** for *os-brick, cinderlib, and the clients* follow the 'cycle-with-intermediary' release model. * The 'cycle-with-intermediary' model describes projects that produce multiple full releases during the development cycle, with a final release to match the end of the cycle. * os-brick has a deliverable type of 'library' * python-cinderclient and python-brick-cinderclient-ext have a deliverable type of 'client-library' * cinderlib has a deliverable type of 'trailing' * The final cinderlib release for a cycle must occur no later than 3 months after the coordinated OpenStack release of cinder. Releases from **master** for *cinder-tempest-plugin* follow the 'cycle-automatic' scheme. * No stable branches are created. * Released automatically at the end of each cycle, or on-demand. Releases from **master** for *rbd-iscsi-client* follow the 'independent' scheme. * No stable branches are created. * Released on demand whenever necessary because it has to track ceph development more than openstack development. For more information about the release models and deliverable types: https://releases.openstack.org/reference/release_models.html Branching --------- All Cinder project deliverables (except cinder-tempest-plugin and rbd-iscsi-client) follow the `OpenStack stable branch policy `_. Briefly, * The stable branches are intended to be a safe source of fixes for high impact bugs and security issues which have been fixed on master since a given release. * Stable branches are cut from the last release of a given deliverable, at the end of the common 6-month development cycle. Only members of the `cinder-stable-maint `_ gerrit group have +2 powers on patches proposed to stable branches. This is a subset of `cinder-core `_ plus the OpenStack-wide `stable-maint-core `_ team. While anyone may propose a release, releases must be approved by the `OpenStack Release Managers `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/replication.rst0000664000175000017500000005207200000000000022557 0ustar00zuulzuul00000000000000Replication =========== For backend devices that offer replication features, Cinder provides a common mechanism for exposing that functionality on a per volume basis while still trying to allow flexibility for the varying implementation and requirements of all the different backend devices. There are 2 sides to Cinder's replication feature, the core mechanism and the driver specific functionality, and in this document we'll only be covering the driver side of things aimed at helping vendors implement this functionality in their drivers in a way consistent with all other drivers. Although we'll be focusing on the driver implementation there will also be some mentions on deployment configurations to provide a clear picture to developers and help them avoid implementing custom solutions to solve things that were meant to be done via the cloud configuration. Overview -------- As a general rule replication is enabled and configured via the cinder.conf file under the driver's section, and volume replication is requested through the use of volume types. *NOTE*: Current replication implementation is v2.1 and it's meant to solve a very specific use case, the "smoking hole" scenario. It's critical that you read the Use Cases section of the spec here: https://specs.openstack.org/openstack/cinder-specs/specs/mitaka/cheesecake.html From a user's perspective volumes will be created using specific volume types, even if it is the default volume type, and they will either be replicated or not, which will be reflected on the ``replication_status`` field of the volume. So in order to know if a snapshot is replicated we'll have to check its volume. After the loss of the primary storage site all operations on the resources will fail and VMs will no longer have access to the data. It is then when the Cloud Administrator will issue the ``failover-host`` command to make the cinder-volume service perform the failover. After the failover is completed, the Cinder volume service will start using the failed-over secondary storage site for all operations and the user will once again be able to perform actions on all resources that were replicated, while all other resources will be in error status since they are no longer available. Storage Device configuration ---------------------------- Most storage devices will require configuration changes to enable the replication functionality, and this configuration process is vendor and storage device specific so it is not contemplated by the Cinder core replication functionality. It is up to the vendors whether they want to handle this device configuration in the Cinder driver or as a manual process, but the most common approach is to avoid including this configuration logic into Cinder and having the Cloud Administrators do a manual process following a specific guide to enable replication on the storage device before configuring the cinder volume service. Service configuration --------------------- The way to enable and configure replication is common to all drivers and it is done via the ``replication_device`` configuration option that goes in the driver's specific section in the ``cinder.conf`` configuration file. ``replication_device`` is a multi dictionary option, that should be specified for each replication target device the admin wants to configure. While it is true that all drivers use the same ``replication_device`` configuration option this doesn't mean that they will all have the same data, as there is only one standardized and **REQUIRED** key in the configuration entry, all others are vendor specific: - backend_id: Values of ``backend_id`` keys are used to uniquely identify within the driver each of the secondary sites, although they can be reused on different driver sections. These unique identifiers will be used by the failover mechanism as well as in the driver initialization process, and the only requirement is that is must never have the value "default". An example driver configuration for a device with multiple replication targets is show below:: ..... [driver-biz] volume_driver=xxxx volume_backend_name=biz [driver-baz] volume_driver=xxxx volume_backend_name=baz [driver-foo] volume_driver=xxxx volume_backend_name=foo replication_device = backend_id:vendor-id-1,unique_key:val.... replication_device = backend_id:vendor-id-2,unique_key:val.... In this example the result of calling ``self.configuration.safe_get('replication_device')`` within the driver is the following list:: [{backend_id: vendor-id-1, unique_key: val1}, {backend_id: vendor-id-2, unique_key: val2}] It is expected that if a driver is configured with multiple replication targets, that replicated volumes are actually replicated on **all targets**. Besides specific replication device keys defined in the ``replication_device``, a driver may also have additional normal configuration options in the driver section related with the replication to allow Cloud Administrators to configure things like timeouts. Capabilities reporting ---------------------- There are 2 new replication stats/capability keys that drivers supporting replication v2.1 should be reporting: ``replication_enabled`` and ``replication_targets``:: stats["replication_enabled"] = True|False stats["replication_targets"] = [...] If a driver is behaving correctly we can expect the ``replication_targets`` field to be populated whenever ``replication_enabled`` is set to ``True``, and it is expected to either be set to ``[]`` or be missing altogether when ``replication_enabled`` is set to ``False``. The purpose of the ``replication_enabled`` field is to be used by the scheduler in volume types for creation and migrations. As for the ``replication_targets`` field it is only provided for informational purposes so it can be retrieved through the ``get_capabilities`` using the admin REST API, but it will not be used for validation at the API layer. That way Cloud Administrators will be able to know available secondary sites where they can failover. Volume Types / Extra Specs --------------------------- The way to control the creation of volumes on a cloud with backends that have replication enabled is, like with many other features, through the use of volume types. We won't go into the details of volume type creation, but suffice to say that you will most likely want to use volume types to discriminate between replicated and non replicated volumes and be explicit about it so that non replicated volumes won't end up in a replicated backend. Since the driver is reporting the ``replication_enabled`` key, we just need to require it for replication volume types adding ``replication_enabled=' True'`` and also specifying it for all non replicated volume types ``replication_enabled=' False'``. It's up to the driver to parse the volume type info on create and set things up as requested. While the scoping key can be anything, it's strongly recommended that all backends utilize the same key (replication) for consistency and to make things easier for the Cloud Administrator. Additional replication parameters can be supplied to the driver using vendor specific properties through the volume type's extra-specs so they can be used by the driver at volume creation time, or retype. It is up to the driver to parse the volume type info on create and retype to set things up as requested. A good pattern to get a custom parameter from a given volume instance is this:: extra_specs = getattr(volume.volume_type, 'extra_specs', {}) custom_param = extra_specs.get('custom_param', 'default_value') It may seem convoluted, but we must be careful when retrieving the ``extra_specs`` from the ``volume_type`` field as it could be ``None``. Vendors should try to avoid obfuscating their custom properties and expose them using the ``_init_vendor_properties`` method so they can be checked by the Cloud Administrator using the ``get_capabilities`` REST API. *NOTE*: For storage devices doing per backend/pool replication the use of volume types is also recommended. Volume creation --------------- Drivers are expected to honor the replication parameters set in the volume type during creation, retyping, or migration. When implementing the replication feature there are some driver methods that will most likely need modifications -if they are implemented in the driver (since some are optional)- to make sure that the backend is replicating volumes that need to be replicated and not replicating those that don't need to be: - ``create_volume`` - ``create_volume_from_snapshot`` - ``create_cloned_volume`` - ``retype`` - ``clone_image`` - ``migrate_volume`` In these methods the driver will have to check the volume type to see if the volumes need to be replicated, we could use the same pattern described in the `Volume Types / Extra Specs`_ section:: def _is_replicated(self, volume): specs = getattr(volume.volume_type, 'extra_specs', {}) return specs.get('replication_enabled') == ' True' But it is **not** the recommended mechanism, and the ``is_replicated`` method available in volumes and volume types versioned objects instances should be used instead. Drivers are expected to keep the ``replication_status`` field up to date and in sync with reality, usually as specified in the volume type. To do so in above mentioned methods' implementation they should use the update model mechanism provided for each one of those methods. One must be careful since the update mechanism may be different from one method to another. What this means is that most of these methods should be returning a ``replication_status`` key with the value set to ``enabled`` in the model update dictionary if the volume type is enabling replication. There is no need to return the key with the value of ``disabled`` if it is not enabled since that is the default value. In the case of the ``create_volume``, and ``retype`` method there is no need to return the ``replication_status`` in the model update since it has already been set by the scheduler on creation using the extra spec from the volume type. And on ``migrate_volume`` there is no need either since there is no change to the ``replication_status``. *NOTE*: For storage devices doing per backend/pool replication it is not necessary to check the volume type for the ``replication_enabled`` key since all created volumes will be replicated, but they are expected to return the ``replication_status`` in all those methods, including the ``create_volume`` method since the driver may receive a volume creation request without the replication enabled extra spec and therefore the driver will not have set the right ``replication_status`` and the driver needs to correct this. Besides the ``replication_status`` field that drivers need to update there are other fields in the database related to the replication mechanism that the drivers can use: - ``replication_extended_status`` - ``replication_driver_data`` These fields are string type fields with a maximum size of 255 characters and they are available for drivers to use internally as they see fit for their normal replication operation. So they can be assigned in the model update and later on used by the driver, for example during the failover. To avoid using magic strings drivers must use values defined by the ``ReplicationStatus`` class in ``cinder/objects/fields.py`` file and these are: - ``ERROR``: When setting the replication failed on creation, retype, or migrate. This should be accompanied by the volume status ``error``. - ``ENABLED``: When the volume is being replicated. - ``DISABLED``: When the volume is not being replicated. - ``FAILED_OVER``: After a volume has been successfully failed over. - ``FAILOVER_ERROR``: When there was an error during the failover of this volume. - ``NOT_CAPABLE``: When we failed-over but the volume was not replicated. The first 3 statuses revolve around the volume creation and the last 3 around the failover mechanism. The only status that should not be used for the volume's ``replication_status`` is the ``FAILING_OVER`` status. Whenever we are referring to values of the ``replication_status`` in this document we will be referring to the ``ReplicationStatus`` attributes and not a literal string, so ``ERROR`` means ``cinder.objects.field.ReplicationStatus.ERROR`` and not the string "ERROR". Failover -------- This is the mechanism used to instruct the cinder volume service to fail over to a secondary/target device. Keep in mind the use case is that the primary backend has died a horrible death and is no longer valid, so any volumes that were on the primary and were not being replicated will no longer be available. The method definition required from the driver to implement the failback mechanism is as follows:: def failover_host(self, context, volumes, secondary_id=None): There are several things that are expected of this method: - Promotion of a secondary storage device to primary - Generating the model updates - Changing internally to access the secondary storage device for all future requests. If no secondary storage device is provided to the driver via the ``backend_id`` argument (it is equal to ``None``), then it is up to the driver to choose which storage device to failover to. In this regard it is important that the driver takes into consideration that it could be failing over from a secondary (there was a prior failover request), so it should discard current target from the selection. If the ``secondary_id`` is not a valid one the driver is expected to raise ``InvalidReplicationTarget``, for any other non recoverable errors during a failover the driver should raise ``UnableToFailOver`` or any child of ``VolumeDriverException`` class and revert to a state where the previous backend is in use. The failover method in the driver will receive a list of replicated volumes that need to be failed over. Replicated volumes passed to the driver may have diverse ``replication_status`` values, but they will always be one of: ``ENABLED``, ``FAILED_OVER``, or ``FAILOVER_ERROR``. The driver must return a 2-tuple with the new storage device target id as the first element and a list of dictionaries with the model updates required for the volumes so that the driver can perform future actions on those volumes now that they need to be accessed on a different location. It's not a requirement for the driver to return model updates for all the volumes, or for any for that matter as it can return ``None`` or an empty list if there's no update necessary. But if elements are returned in the model update list then it is a requirement that each of the dictionaries contains 2 key-value pairs, ``volume_id`` and ``updates`` like this:: [{ 'volume_id': volumes[0].id, 'updates': { 'provider_id': new_provider_id1, ... }, 'volume_id': volumes[1].id, 'updates': { 'provider_id': new_provider_id2, 'replication_status': fields.ReplicationStatus.FAILOVER_ERROR, ... }, }] In these updates there is no need to set the ``replication_status`` to ``FAILED_OVER`` if the failover was successful, as this will be performed by the manager by default, but it won't create additional DB queries if it is returned. It is however necessary to set it to ``FAILOVER_ERROR`` for those volumes that had errors during the failover. Drivers don't have to worry about snapshots or non replicated volumes, since the manager will take care of those in the following manner: - All non replicated volumes will have their current ``status`` field saved in the ``previous_status`` field, the ``status`` field changed to ``error``, and their ``replication_status`` set to ``NOT_CAPABLE``. - All snapshots from non replicated volumes will have their statuses changed to ``error``. - All replicated volumes that failed on the failover will get their ``status`` changed to ``error``, their current ``status`` preserved in ``previous_status``, and their ``replication_status`` set to ``FAILOVER_ERROR`` . - All snapshots from volumes that had errors during the failover will have their statuses set to ``error``. Any model update request from the driver that changes the ``status`` field will trigger a change in the ``previous_status`` field to preserve the current status. Once the failover is completed the driver should be pointing to the secondary and should be able to create and destroy volumes and snapshots as usual, and it is left to the Cloud Administrator's discretion whether resource modifying operations are allowed or not. Failback -------- Drivers are not required to support failback, but they are required to raise a ``InvalidReplicationTarget`` exception if the failback is requested but not supported. The way to request the failback is quite simple, the driver will receive the argument ``secondary_id`` with the value of ``default``. That is why it was forbidden to use the ``default`` on the target configuration in the cinder configuration file. Expected driver behavior is the same as the one explained in the `Failover`_ section: - Promotion of the original primary to primary - Generating the model updates - Changing internally to access the original primary storage device for all future requests. If the failback of any of the volumes fail the driver must return ``replication_status`` set to ``ERROR`` in the volume updates for those volumes. If they succeed it is not necessary to change the ``replication_status`` since the default behavior will be to set them to ``ENABLED``, but it won't create additional DB queries if it is set. The manager will update resources in a slightly different way than in the failover case: - All non replicated volumes will not have any model modifications. - All snapshots from non replicated volumes will not have any model modifications. - All replicated volumes that failed on the failback will get their ``status`` changed to ``error``, have their current ``status`` preserved in the ``previous_status`` field, and their ``replication_status`` set to ``FAILOVER_ERROR``. - All snapshots from volumes that had errors during the failover will have their statuses set to ``error``. We can avoid using the "default" magic string by using the ``FAILBACK_SENTINEL`` class attribute from the ``VolumeManager`` class. Initialization -------------- It stands to reason that a failed over Cinder volume service may be restarted, so there needs to be a way for a driver to know on start which storage device should be used to access the resources. So, to let drivers know which storage device they should use the manager passes drivers the ``active_backend_id`` argument to their ``__init__`` method during the initialization phase of the driver. Default value is ``None`` when the default (primary) storage device should be used. Drivers should store this value if they will need it, as the base driver is not storing it, for example to determine the current storage device when a failover is requested and we are already in a failover state, as mentioned above. Freeze / Thaw ------------- In many cases, after a failover has been completed we'll want to allow changes to the data in the volumes as well as some operations like attach and detach while other operations that modify the number of existing resources, like delete or create, are not allowed. And that is where the freezing mechanism comes in; freezing a backend puts the control plane of the specific Cinder volume service into a read only state, or at least most of it, while allowing the data plane to proceed as usual. While this will mostly be handled by the Cinder core code, drivers are informed when the freezing mechanism is enabled or disabled via these 2 calls:: freeze_backend(self, context) thaw_backend(self, context) In most cases the driver may not need to do anything, and then it doesn't need to define any of these methods as long as its a child class of the ``BaseVD`` class that already implements them as noops. Raising a `VolumeDriverException` exception in any of these methods will result in a 500 status code response being returned to the caller and the manager will not log the exception, so it's up to the driver to log the error if it is appropriate. If the driver wants to give a more meaningful error response, then it can raise other exceptions that have different status codes. When creating the `freeze_backend` and `thaw_backend` driver methods we must remember that this is a Cloud Administrator operation, so we can return errors that reveal internals of the cloud, for example the type of storage device, and we must use the appropriate internationalization translation methods when raising exceptions; for `VolumeDriverException` no translation is necessary since the manager doesn't log it or return to the user in any way, but any other exception should use the ``_()`` translation method since it will be returned to the REST API caller. For example, if a storage device doesn't support the thaw operation when failed over, then it should raise an `Invalid` exception:: def thaw_backend(self, context): if self.failed_over: msg = _('Thaw is not supported by driver XYZ.') raise exception.Invalid(msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/rolling.upgrades.rst0000664000175000017500000005452600000000000023533 0ustar00zuulzuul00000000000000.. Copyright (c) 2016 Intel Corporation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Upgrades ======== Starting from Mitaka release Cinder gained the ability to be upgraded without introducing downtime of control plane services. Operator can simply upgrade Cinder services instances one-by-one. To achieve that, developers need to make sure that any introduced change doesn't break older services running in the same Cinder deployment. In general there is a requirement that release N will keep backward compatibility with release N-1 and in a deployment N's and N-1's services can safely coexist. This means that when performing a live upgrade you cannot skip any release (e.g. you cannot upgrade N to N+2 without upgrading it to N+1 first). Further in the document N will denote the current release, N-1 a previous one, N+1 the next one, etc. Having in mind that we only support compatibility with N-1, most of the compatibility code written in N needs to exist just for one release and can be removed in the beginning of N+1. A good practice here is to mark them with :code:`TODO` or :code:`FIXME` comments to make them easy to find in the future. Please note that proper upgrades solution should support both release-to-release upgrades as well as upgrades of deployments following the Cinder master more closely. We cannot just merge patches implementing compatibility at the end of the release - we should keep things compatible through the whole release. To achieve compatibility, discipline is required from the developers. There are several planes on which incompatibility may occur: * **REST API changes** - these are prohibited by definition and this document will not describe the subject. For further information one may use `API Working Group guidelines `_ for reference. * **Database schema migrations** - e.g. if N-1 was relying on some column in the DB being present, N's migrations cannot remove it. N+1's however can (assuming N has no notion of the column). * **Database data migrations** - if a migration requires big amount of data to be transferred between columns or tables or converted, it will most likely lock the tables. This may cause services to be unresponsive, causing the downtime. * **RPC API changes** - adding or removing RPC method parameter, or the method itself, may lead to incompatibilities. * **RPC payload changes** - adding, renaming or removing a field from the dict passed over RPC may lead to incompatibilities. Next sections of this document will focus on explaining last four points and provide means to tackle required changes in these matters while maintaining backward compatibility. Database schema and data migrations ----------------------------------- In general incompatible database schema migrations can be tracked to ALTER and DROP SQL commands instruction issued either against a column or table. This is why a unit test that blocks such migrations was introduced. We should try to keep our DB modifications additive. Moreover we should aim not to introduce migrations that cause the database tables to lock for a long period. Long lock on whole table can block other queries and may make real requests to fail. Adding a column ............... This is the simplest case - we don't have any requirements when adding a new column apart from the fact that it should be added as the last one in the table. If that's covered, the DB engine will make sure the migration won't be disruptive. Dropping a column not referenced in SQLAlchemy code ................................................... When we want to remove a column that wasn't present in any SQLAlchemy model or it was in the model, but model was not referenced anywhere in our code (this basically means that N-1 wasn't depending on the presence of that column in the DB), then the situation is simple. We should be able to safely drop the column in N release. Removal of unnecessary column ............................. When we want to remove a used column without migrating any data out of it (for example because what's kept in the column is obsolete), then we just need to remove it from the SQLAlchemy model and API in N release. In N+1 or as a post-upgrade migration in N we can merge a migration issuing DROP for this column (we cannot do that earlier because N-1 will depend on the presence of that column). ALTER on a column ................. A rule of thumb to judge which ALTER or DROP migrations should be allowed is to look in the `MySQL documentation `_. If operation has "yes" in all 4 columns besides "Copies Table?", then it *probably* can be allowed. If operation doesn't allow concurrent DML it means that table row modifications or additions will be blocked during the migration. This sometimes isn't a problem - for example it's not the end of the world if a service won't be able to report it's status one or two times (and :code:`services` table is normally small). Please note that even if this does apply to "rename a column" operation, we cannot simply do such ALTER, as N-1 will depend on the older name. If an operation on column or table cannot be allowed, then it is required to create a new column with desired properties and start moving the data (in a live manner). In worst case old column can be removed in N+2. Whole procedure is described in more details below. In aforementioned case we need to make more complicated steps stretching through 3 releases - always keeping the backwards compatibility. In short when we want to start to move data inside the DB, then in N we should: * Add a new column for the data. * Write data in both places (N-1 needs to read it). * Read data from the old place (N-1 writes there). * Prepare online data migration cinder-manage command to be run before upgrading to N+1 (because N+1 will read from new place, so we need to make sure all the records have new place populated). In N+1 we should: * Write data to both places (N reads from old one). * Read data from the new place (N saves there). In N+2 * Remove old place from SQLAlchemy. * Read and write only to the new place. * Remove the column as the post-upgrade migration (or as first migration in N+3). Please note that this is the most complicated case. If data in the column cannot actually change (for example :code:`host` in :code:`services` table), in N we can read from new place and fallback to the old place if data is missing. This way we can skip one release from the process. Of course real-world examples may be different. E.g. sometimes it may be required to write some more compatibility code in the oslo.versionedobjects layer to compensate for different versions of objects passed over RPC. This is explained more in `RPC payload changes (oslo.versionedobjects)`_ section. More details about that can be found in the `online-schema-upgrades spec `_. RPC API changes --------------- It can obviously break service communication if RPC interface changes. In particular this applies to changes of the RPC method definitions. To avoid that we assume N's RPC API compatibility with N-1 version (both ways - :code:`rpcapi` module should be able to downgrade the message if needed and :code:`manager` module should be able to tolerate receiving messages in older version. Below is an example RPC compatibility shim from Mitaka's :code:`cinder.volume.manager`. This code allows us to tolerate older versions of the messages:: def create_volume(self, context, volume_id, request_spec=None, filter_properties=None, allow_reschedule=True, volume=None): """Creates the volume.""" # FIXME(thangp): Remove this in v2.0 of RPC API. if volume is None: # For older clients, mimic the old behavior and look up the volume # by its volume_id. volume = objects.Volume.get_by_id(context, volume_id) And here's a contrary shim in cinder.volume.rpcapi (RPC client) that downgrades the message to make sure it will be understood by older instances of the service:: def create_volume(self, ctxt, volume, host, request_spec, filter_properties, allow_reschedule=True): request_spec_p = jsonutils.to_primitive(request_spec) msg_args = {'volume_id': volume.id, 'request_spec': request_spec_p, 'filter_properties': filter_properties, 'allow_reschedule': allow_reschedule} if self.client.can_send_version('1.32'): version = '1.32' msg_args['volume'] = volume else: version = '1.24' new_host = utils.extract_host(host) cctxt = self.client.prepare(server=new_host, version=version) request_spec_p = jsonutils.to_primitive(request_spec) cctxt.cast(ctxt, 'create_volume', **msg_args) As can be seen there's this magic :code:`self.client.can_send_version()` method which detects if we're running in a version-heterogeneous environment and need to downgrade the message. Detection is based on dynamic RPC version pinning. In general all the services (managers) report supported RPC API version. RPC API client gets all the versions from the DB, chooses the lowest one and starts to downgrade messages to it. To limit impact on the DB the pinned version of certain RPC API is cached. After all the services in the deployment are updated, operator should restart all the services or send them a SIGHUP signal to force reload of version pins. As we need to support only N RPC API in N+1 release, we should be able to drop all the compatibility shims in N+1. To be technically correct when doing so we should also bump the major RPC API version. We do not need to do that in every release (it may happen that through the release nothing will change in RPC API or cost of technical debt of compatibility code is lower than the cost of complicated procedure of increasing major version of RPC APIs). The process of increasing the major version is explained in details in `Nova's documentation `_. Please note that in case of Cinder we're accessing the DB from all of the services, so we should follow the more complicated "Mixed version environments" process for every of our services. In case of removing whole RPC method we need to leave it there in N's manager and can remove it in N+1 (because N-1 will be talking with N). When adding a new one we need to make sure that when the RPC client is pinned to a too low version any attempt to send new message should fail (because client will not know if manager receiving the message will understand it) or ensure the manager will get updated before clients by stating the recommended order of upgrades for that release. RPC payload changes (oslo.versionedobjects) ------------------------------------------- `oslo.versionedobjects `_ is a library that helps us to maintain compatibility of the payload sent over RPC. As during the process of upgrades it is possible that a newer version of the service will send an object to an older one, it may happen that newer object is incompatible with older service. Version of an object should be bumped every time we make a change that will result in an incompatible change of the serialized object. Tests will inform you when you need to bump the version of a versioned object, but rule of thumb is that we should never bump the version when we modify/adding/removing a method to the versioned object (unlike Nova we don't use remotable methods), and should always bump it when we modify the fields dictionary. There are exceptions to this rule, for example when we change a ``fields.StringField`` by a custom ``fields.BaseEnumField``. The reason why a version bump is not required in this case it's because the actual data doesn't change, we are just removing magic string by an enumerate, but the strings used are exactly the same. As mentioned before, you don't have to know all the rules, as we have a test that calculates the hash of all objects taking all these rules into consideration and will tell you exactly when you need to bump the version of a versioned object. You can run this test with ``tox -epy35 -- --path cinder/tests/unit/objects/test_objects.py``. But you may need to run it multiple times until it passes since it may not detect all required bumps at once. Then you'll see which versioned object requires a bump and you need to bump that version and update the object_data dictionary in the test file to reflect the new version as well as the new hash. There is a very common false positive on the version bump test, and that is when we have modified a versioned object that is being used by other objects using the ``fields.ObjectField`` class. Due to the backporting mechanism implemented in Cinder we don't require bumping the version for these cases and we'll just need to update the hash used in the test. For example if we were to add a new field to the Volume object and then run the test we may think that we need to bump Volume, Snapshot, Backup, RequestSpec, and VolumeAttachment objects, but we really only need to bump the version of the Volume object and update the hash for all the other objects. Imagine that we (finally!) decide that :code:`request_spec` sent in :code:`create_volume` RPC cast is duplicating data and we want to start to remove redundant occurrences. When running in version-mixed environment older services will still expect this redundant data. We need a way to somehow downgrade the :code:`request_spec` before sending it over RPC. And this is were o.vo come in handy. o.vo provide us the infrastructure to keep the changes in object versioned and to be able to downgrade them to a particular version. Let's take a step back - similarly to the RPC API situation we need a way to tell if we need to send a backward-compatible version of the message. In this case we need to know to what version to downgrade the object. We're using a similar solution to the one used for RPC API for that. A problem here is that we need a single identifier (that we will be reported to :code:`services` DB table) to denote whole set of versions of all the objects. To do that we've introduced a concept of :code:`CinderObjectVersionHistory` object, where we keep sets of individual object versions aggregated into a single version string. When making an incompatible change in a single object you need to bump its version (we have a unit test enforcing that) *and* add a new version to :code:`cinder.objects.base.CinderObjectVersionsHistory` (there's a unit test as well). Example code doing that is below:: OBJ_VERSIONS.add('1.1', {'Service': '1.2', 'ServiceList': '1.1'}) This line adds a new 1.1 aggregated object version that is different from 1.0 by two objects - :code:`Service` in 1.2 and :code:`ServiceList` in 1.1. This means that the commit which added this line bumped versions of these two objects. Now if we know that a service we're talking to is running 1.1 aggregated version - we need to downgrade :code:`Service` and :code:`ServiceList` to 1.2 and 1.1 respectively before sending. Please note that of course other objects are included in the 1.1 aggregated version, but you just need to specify what changed (all the other versions of individual objects will be taken from the last version - 1.0 in this case). Getting back to :code:`request_spec` example. So let's assume we want to remove :code:`volume_properties` from there (most of data in there is already somewhere else inside the :code:`request_spec` object). We've made a change in the object fields, we've bumped it's version (from 1.0 to 1.1), we've updated hash in the :code:`cinder.tests.unit.test_objects` to synchronize it with the current state of the object, making the unit test pass and we've added a new aggregated object history version in :code:`cinder.objects.base`. What else is required? We need to provide code that actually downgrades RequestSpec object from 1.1 to 1.0 - to be used when sending the object to older services. This is done by implementing :code:`obj_make_compatible` method in the object:: from oslo_utils import versionutils def obj_make_compatible(self, primitive, target_version): super(RequestSpec, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1) and not 'volume_properties' in primitive: volume_properties = {} # TODO: Aggregate all the required information from primitive. primitive['volume_properties'] = volume_properties Please note that primitive is a dictionary representation of the object and not an object itself. This is because o.vo are of course sent over RPC as dicts. With these pieces in place Cinder will take care of sending :code:`request_spec` with :code:`volume_properties` when running in mixed environment and without when all services are upgraded and will understand :code:`request_spec` without :code:`volume_properties` element. Note that o.vo layer is able to recursively downgrade all of its fields, so when `request_spec` will be used as a field in other object, it will be correctly downgraded. A more common case where we need backporting code is when we add new fields. In such case the backporting consist on removing the newly added fields. For example if we add 3 new fields to the Group object in version 1.1, then we need to remove them if backporting to earlier versions:: from oslo_utils import versionutils def obj_make_compatible(self, primitive, target_version): super(Group, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 1): for key in ('group_snapshot_id', 'source_group_id', 'group_snapshots'): primitive.pop(key, None) As time goes on we will be adding more and more new fields to our objects, so we may end up with a long series of if and for statements like in the Volume object:: from oslo_utils import versionutils def obj_make_compatible(self, primitive, target_version): super(Volume, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 4): for key in ('cluster', 'cluster_name'): primitive.pop(key, None) if target_version < (1, 5): for key in ('group', 'group_id'): primitive.pop(key, None) So a different pattern would be preferable as it will make the backporting easier for future additions:: from oslo_utils import versionutils def obj_make_compatible(self, primitive, target_version): added_fields = (((1, 4), ('cluster', 'cluster_name')), ((1, 5), ('group', 'group_id'))) super(Volume, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) for version, remove_fields in added_fields: if target_version < version: for obj_field in remove_fields: primitive.pop(obj_field, None) Upgrade Checks -------------- Starting with the Stein release of OpenStack, Cinder has added support for Upgrade Checks. Upgrade checks provide a release-specific readiness check before restarting services with new code. Details on how to run an Upgrade Check can be seen in the `CLI interface for :doc:`cinder status commands ` page. Upgrade checks are intended to help identify changes between releases that may impact the deployment environment. As a result, developers should take time to consider if the operator would benefit from having an Upgrade Check added along with changes they are proposing. The following are a few examples of changes that would require an Upgrade Check: * Changes to Configuration Options * Removal * Change in Behavior * Driver Removal * Changes to Configuration File Locations * Deprecations To add an Upgrade Check edit the `cinder/cmd/status.py` file. Add a new function that contains the check you wish to implement. Functions need to return either a `uc.Result` where the result can be one of: * SUCCESS * FAILURE, * WARNING, Your new function should then be added to the `_upgrade_checks` tuple. For your check give the name of the Upgrade Check to be displayed to end users upon success or failure as well as the name of the function used to implement your check. Upgrade Checks should be submitted with Unit Tests. The `doc/source/cli/cinder-status.rst` documentation should be updated to indicate the release for which your Upgrade Check was released and to explain the reason or limitations of your check, if appropriate. A release note should also be created with an explanation of the Upgrade Check in the `upgrade` section. It is preferable to have Upgrade Checks submitted as part of the patch that is making the change in question. The checks, however, can be submitted as a separate patch and are appropriate for backport if they are being created after a release has been cut. For additional details on Upgrade Checks please see `Nova's Upgrade Checks Documentation `_ . What can be checked? .................... The cinder-status CLI tool is assumed to be run from a place where it can read cinder.conf for the services, and that it can access the Cinder database to query information. It cannot be assumed to have network access to a storage backend -- a backend may only be accessible from the Cinder Volume service and not reachable directly from where this tool is run. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/rpc.rst0000664000175000017500000003325300000000000021032 0ustar00zuulzuul00000000000000.. Copyright (c) 2010 Citrix Systems, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. AMQP and Cinder =============== AMQP is the messaging technology chosen by the OpenStack cloud. The AMQP broker, either RabbitMQ or Qpid, sits between any two Cinder components and allows them to communicate in a loosely coupled fashion. More precisely, Cinder components (the compute fabric of OpenStack) use Remote Procedure Calls (RPC hereinafter) to communicate to one another; however such a paradigm is built atop the publish/subscribe paradigm so that the following benefits can be achieved: * Decoupling between client and servant (such as the client does not need to know where the servant's reference is). * Full a-synchronism between client and servant (such as the client does not need the servant to run at the same time of the remote call). * Random balancing of remote calls (such as if more servants are up and running, one-way calls are transparently dispatched to the first available servant). Cinder uses direct, fanout, and topic-based exchanges. The architecture looks like the one depicted in the figure below: .. image:: /images/rpc/arch.png :width: 60% .. Cinder implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Cinder service (for example Scheduler, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example cinder-volume.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example cinder-volume). The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only. Cinder RPC Mappings ------------------- The figure below shows the internals of a message broker node (referred to as a RabbitMQ node in the diagrams) when a single instance is deployed and shared in an OpenStack cloud. Every Cinder component connects to the message broker and, depending on its personality, may use the queue either as an Invoker (such as API or Scheduler) or a Worker (such as Volume). Invokers and Workers do not actually exist in the Cinder object model, but we are going to use them as an abstraction for sake of clarity. An Invoker is a component that sends messages in the queuing system via two operations: 1) rpc.call and ii) rpc.cast; a Worker is a component that receives messages from the queuing system and reply accordingly to rpc.call operations. Figure 2 shows the following internal elements: * Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery. * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations). * Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host'). * Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message. * Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by Qpid or RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a message broker node will have only one topic-based exchange for every topic in Cinder. * Direct Exchange: this is a routing table that is created during rpc.call operations; there are many instances of this kind of exchange throughout the life-cycle of a message broker node, one for each rpc.call invoked. * Queue Element: A Queue is a message bucket. Messages are kept in the queue until a Consumer (either Topic or Direct Consumer) connects to the queue and fetch it. Queues can be shared or can be exclusive. Queues whose routing key is 'topic' are shared amongst Workers of the same personality. .. image:: /images/rpc/rabt.png :width: 60% .. RPC Calls --------- The diagram below shows the message flow during an rpc.call operation: 1. a Topic Publisher is instantiated to send the message request to the queuing system; immediately before the publishing operation, a Direct Consumer is instantiated to wait for the response message. 2. once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic.host') and passed to the Worker in charge of the task. 3. once the task is completed, a Direct Publisher is allocated to send the response message to the queuing system. 4. once the message is dispatched by the exchange, it is fetched by the Direct Consumer dictated by the routing key (such as 'msg_id') and passed to the Invoker. .. image:: /images/rpc/flow1.png :width: 60% .. RPC Casts --------- The diagram below the message flow during an rpc.cast operation: 1. A Topic Publisher is instantiated to send the message request to the queuing system. 2. Once the message is dispatched by the exchange, it is fetched by the Topic Consumer dictated by the routing key (such as 'topic') and passed to the Worker in charge of the task. .. image:: /images/rpc/flow2.png :width: 60% .. AMQP Broker Load ---------------- At any given time the load of a message broker node running either Qpid or RabbitMQ is function of the following parameters: * Throughput of API calls: the number of API calls (more precisely rpc.call ops) being served by the OpenStack cloud dictates the number of direct-based exchanges, related queues and direct consumers connected to them. * Number of Workers: there is one queue shared amongst workers with the same personality; however there are as many exclusive queues as the number of workers; the number of workers dictates also the number of routing keys within the topic-based exchange, which is shared amongst all workers. The figure below shows the status of a RabbitMQ node after Cinder components' bootstrap in a test environment (phantom is hostname). Exchanges and queues being created by Cinder components are: * Exchanges 1. cinder-scheduler_fanout (fanout exchange) 2. cinder-volume.phantom@lvm_fanout (fanout exchange) 3. cinder-volume_fanout (fanout exchange) 4. openstack (topic exchange) * Queues 1. cinder-scheduler 2. cinder-scheduler.phantom 3. cinder-scheduler_fanout_572c35c0fbf94560b4c49572d5868ea5 4. cinder-volume 5. cinder-volume.phantom@lvm 6. cinder-volume.phantom@lvm.phantom 7. cinder-volume.phantom@lvm_fanout_cb3387f7a7684b1c9ee5f2f88325b7d5 8. cinder-volume_fanout_9017a1a7f4b44867983dcddfb56531a2 .. image:: /images/rpc/state.png :width: 60% .. RabbitMQ Gotchas ---------------- Cinder uses Kombu to connect to the RabbitMQ environment. Kombu is a Python library that in turn uses AMQPLib, a library that implements the standard AMQP 0.8 at the time of writing. When using Kombu, Invokers and Workers need the following parameters in order to instantiate a Connection object that connects to the RabbitMQ server (please note that most of the following material can be also found in the Kombu documentation; it has been summarized and revised here for sake of clarity): * Hostname: The hostname to the AMQP server. * Userid: A valid username used to authenticate to the server. * Password: The password used to authenticate to the server. * Virtual_host: The name of the virtual host to work with. This virtual host must exist on the server, and the user must have access to it. Default is "/". * Port: The port of the AMQP server. Default is 5672 (amqp). The following parameters are default: * Insist: insist on connecting to a server. In a configuration with multiple load-sharing servers, the Insist option tells the server that the client is insisting on a connection to the specified server. Default is False. * Connect_timeout: the timeout in seconds before the client gives up connecting to the server. The default is no timeout. * SSL: use SSL to connect to the server. The default is False. More precisely Consumers need the following parameters: * Connection: the above mentioned Connection object. * Queue: name of the queue. * Exchange: name of the exchange the queue binds to. * Routing_key: the interpretation of the routing key depends on the value of the exchange_type attribute. * Direct exchange: if the routing key property of the message and the routing_key attribute of the queue are identical, then the message is forwarded to the queue. * Fanout exchange: messages are forwarded to the queues bound the exchange, even if the binding does not have a key. * Topic exchange: if the routing key property of the message matches the routing key of the key according to a primitive pattern matching scheme, then the message is forwarded to the queue. The message routing key then consists of words separated by dots (".", like domain names), and two special characters are available; star ("*") and hash ("#"). The star matches any word, and the hash matches zero or more words. For example ".stock.#" matches the routing keys "usd.stock" and "eur.stock.db" but not "stock.nasdaq". * Durable: this flag determines the durability of both exchanges and queues; durable exchanges and queues remain active when a RabbitMQ server restarts. Non-durable exchanges/queues (transient exchanges/queues) are purged when a server restarts. It is worth noting that AMQP specifies that durable queues cannot bind to transient exchanges. Default is True. * Auto_delete: if set, the exchange is deleted when all queues have finished using it. Default is False. * Exclusive: exclusive queues (such as non-shared) may only be consumed from by the current connection. When exclusive is on, this also implies auto_delete. Default is False. * Exchange_type: AMQP defines several default exchange types (routing algorithms) that covers most of the common messaging use cases. * Auto_ack: acknowledgement is handled automatically once messages are received. By default auto_ack is set to False, and the receiver is required to manually handle acknowledgment. * No_ack: it disable acknowledgement on the server-side. This is different from auto_ack in that acknowledgement is turned off altogether. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. * Auto_declare: if this is True and the exchange name is set, the exchange will be automatically declared at instantiation. Auto declare is on by default. Publishers specify most the parameters of Consumers (such as they do not specify a queue name), but they can also specify the following: * Delivery_mode: the default delivery mode used for messages. The value is an integer. The following delivery modes are supported by RabbitMQ: * 1 or "transient": the message is transient. Which means it is stored in memory only, and is lost if the server dies or restarts. * 2 or "persistent": the message is persistent. Which means the message is stored both in-memory, and on disk, and therefore preserved if the server dies or restarts. The default value is 2 (persistent). During a send operation, Publishers can override the delivery mode of messages so that, for example, transient messages can be sent over a durable queue. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/testing.rst0000664000175000017500000001361100000000000021717 0ustar00zuulzuul00000000000000.. _testing-cinder: Testing ======= Cinder contains a few different test suites in the cinder/tests/ directory. The different test suites are Unit Tests, Functional Tests, and Tempest Tests. Test Types ---------- Unit Tests ~~~~~~~~~~ Unit tests are tests for individual methods, with at most a small handful of modules involved. Mock should be used to remove any external dependencies. All significant code changes should have unit test coverage validating the code happy path and any failure paths. There's a tox environment defined that will run code coverage tests for you:: tox -e cover It will create an HTML code coverage report that you can use a web browser to read locally from the location ``./cover/index.html`` (relative to the location of your ``tox.ini`` file). If you are reviewing someone else's patch in Gerrit, we have ``cinder-code-coverage`` job that generates a coverage report that you can read. From the review page, follow: "Zuul Summary" tab -> "cinder-code-coverage" link -> "Logs" tab. The "raw" link next to "cover" will take you to the index page of the report. Any proposed code change will be automatically rejected by the OpenDev Zuul project gating system [#f1]_ if the change causes unit test failures. Functional Tests ~~~~~~~~~~~~~~~~ Functional tests validate a code path within Cinder. These tests should validate the interaction of various modules within the project to verify the code is logically correct. Functional tests run with a database present and may start Cinder services to accept requests. These tests should not need to access an other OpenStack non-Cinder services. Tempest Tests ~~~~~~~~~~~~~ The tempest tests in the Cinder tree validate the operational correctness between Cinder and external components such as Nova, Glance, etc. These are integration tests driven via public APIs to verify actual end user usage scenarios. Running the tests ----------------- There are a number of ways to run tests currently, and there's a combination of frameworks used depending on what commands you use. The preferred method is to use tox, which calls ostestr via the tox.ini file. Unit Tests ~~~~~~~~~~ To run all unit tests simply run:: tox This will create a virtual environment, load all the packages from test-requirements.txt and run all unit tests as well as run flake8 and hacking checks against the code. You may run individual test targets, for example only py37 tests, by running:: tox -e py37 Note that you can inspect the tox.ini file to get more details on the available options and what the test run does by default. Functional Tests ~~~~~~~~~~~~~~~~ To run all functional tests, run:: tox -e functional Tempest Tests ~~~~~~~~~~~~~ Tempest tests in the Cinder tree are "plugged in" to the normal tempest test execution. To ensure the Cinder tests are picked up when running tempest, run:: cd /opt/stack/tempest tox -e venv-tempest -- pip install (path to the cinder-tempest-plugin directory) tox -e all More information about tempest can be found in the `Tempest Documentation `_. Database Setup ~~~~~~~~~~~~~~~ Some unit and functional tests will use a local database. You can use ``tools/test-setup.sh`` to set up your local system the same way as it's setup in the CI environment. Running a subset of tests using tox ----------------------------------- One common activity is to just run a single test, you can do this with tox simply by specifying to just run py37 tests against a single test:: tox -epy37 -- cinder.tests.unit.volume.test_availability_zone.AvailabilityZoneTestCase.test_list_availability_zones_cached Or all tests in the test_volume.py file:: tox -epy37 -- cinder.tests.unit.volume.test_volume You may also use regular expressions to run any matching tests:: tox -epy37 -- test_volume For more information on these options and details about stestr, please see the `stestr documentation `_. Gotchas ------- **Running Tests from Shared Folders** If you are running the unit tests from a shared folder, you may see tests start to fail or stop completely as a result of Python lockfile issues. You can get around this by manually setting or updating the following line in ``cinder/tests/conf_fixture.py``:: CONF['lock_path'].SetDefault('/tmp') Note that you may use any location (not just ``/tmp``!) as long as it is not a shared folder. **Assertion types in unit tests** In general, it is best to use the most specific assertion possible in a unit test, to have the strongest validation of code behavior. For example: .. code-block:: python self.assertEqual("in-use", volume.status) is preferred over .. code-block:: python self.assertIsNotNone(volume.status) or Test methods that implement comparison checks are also generally preferred over writing code into assertEqual() or assertTrue(). .. code-block:: python self.assertGreater(2, volume.size) is preferred over .. code-block:: python self.assertTrue(2 > volume.size) However, assertFalse() behavior is not obvious in this regard. Since ``None`` evaluates to ``False`` in Python, the following check will pass when x is ``False`` or ``None``. .. code-block:: python self.assertFalse(x) Therefore, it is preferable to use: .. code-block:: python self.assertEqual(x, False) .. rubric:: Footnotes .. [#f1] See :doc:`zuul`. Debugging --------- **Debugging unit tests** It is possible to attach a debugger to unit tests. First, modify the test you want to debug by adding the following to the test code itself: .. code-block:: python import pdb pdb.set_trace() Then run the unit test with pdb enabled: .. code-block:: bash source .tox/py36/bin/activate stestr run -n cinder.tests.unit.test_volume_utils # Or to get a list of tests to run stestr list test_volume_utils > tests_to_run.txt stestr run --load-list tests_to_run.txt ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/thin_provisioning.rst0000664000175000017500000000326600000000000024017 0ustar00zuulzuul00000000000000Cinder Thin provisioning and Oversubscription ============================================== Background ~~~~~~~~~~ After the support on Cinder for Thin provisioning, driver maintainers have been struggling to understand what is the expected behavior of their drivers and what exactly each value reported means. This document summarizes the concepts, definitions and terminology from all specs related to the subject and should be used as reference for new drivers implementing support for thin provisioning. Core concepts and terminology ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order to maintain the same behavior among all drivers, we first need to define some concepts used throughout drivers. This terminology is discussed and defined in this spec[1] and should be used as reference in further implementations. Stats to be reported ~~~~~~~~~~~~~~~~~~~~ The following fields should be reported by drivers supporting thin provisioning on the get_volume_stats() function: Mandatory Fields ---------------- .. code-block:: ini thin_provisioning_support = True (or False) Optional Fields --------------- .. code-block:: ini thick_provisioning_support = True (or False) provisioned_capacity_gb = PROVISIONED_CAPACITY max_over_subscription_ratio = MAX_RATIO .. note:: If provisioned_capacity_gb is not reported, the value used in the scheduler calculations and filtering is allocated_capacity_gb. .. note:: If max_over_subscription_ratio is not reported, the scheduler will use the value defined on the [DEFAULT] section. This falls back to the default value (20.0) if not set by the user. [1] https://specs.openstack.org/openstack/cinder-specs/specs/queens/provisioning-improvements.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/threading.rst0000664000175000017500000000525700000000000022216 0ustar00zuulzuul00000000000000Threading model =============== All OpenStack services use *green thread* model of threading, implemented through using the Python `eventlet `_ and `greenlet `_ libraries. Green threads use a cooperative model of threading: thread context switches can only occur when specific eventlet or greenlet library calls are made (e.g., sleep, certain I/O calls). From the operating system's point of view, each OpenStack service runs in a single thread. The use of green threads reduces the likelihood of race conditions, but does not completely eliminate them. In some cases, you may need to use the ``@utils.synchronized(...)`` decorator to avoid races. In addition, since there is only one operating system thread, a call that blocks that main thread will block the entire process. Yielding the thread in long-running tasks ----------------------------------------- If a code path takes a long time to execute and does not contain any methods that trigger an eventlet context switch, the long-running thread will block any pending threads. This scenario can be avoided by adding calls to the eventlet sleep method in the long-running code path. The sleep call will trigger a context switch if there are pending threads, and using an argument of 0 will avoid introducing delays in the case that there is only a single green thread:: from eventlet import greenthread ... greenthread.sleep(0) In current code, time.sleep(0) does the same thing as greenthread.sleep(0) if time module is patched through eventlet.monkey_patch(). To be explicit, we recommend contributors use ``greenthread.sleep()`` instead of ``time.sleep()``. MySQL access and eventlet ------------------------- There are some MySQL DB API drivers for oslo.db, like `PyMySQL`_, MySQL-python etc. PyMySQL is the default MySQL DB API driver for oslo.db, and it works well with eventlet. MySQL-python uses an external C library for accessing the MySQL database. Since eventlet cannot use monkey-patching to intercept blocking calls in a C library, queries to the MySQL database using libraries like MySQL-python will block the main thread of a service. The Diablo release contained a thread-pooling implementation that did not block, but this implementation resulted in a `bug`_ and was removed. See this `mailing list thread`_ for a discussion of this issue, including a discussion of the `impact on performance`_. .. _bug: https://bugs.launchpad.net/cinder/+bug/838581 .. _mailing list thread: https://lists.launchpad.net/openstack/msg08118.html .. _impact on performance: https://lists.launchpad.net/openstack/msg08217.html .. _PyMySQL: https://wiki.openstack.org/wiki/PyMySQL_evaluation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/user_messages.rst0000664000175000017500000002266500000000000023120 0ustar00zuulzuul00000000000000User Messages ============= General information ~~~~~~~~~~~~~~~~~~~ User messages are a way to inform users about the state of asynchronous operations. One example would be notifying the user of why a volume provisioning request failed. End users can request these messages via the Volume v3 REST API under the ``/messages`` resource. The REST API allows only GET and DELETE verbs for this resource. Internally, you use the ``cinder.message.api`` to work with messages. In order to prevent leakage of sensitive information or breaking the volume service abstraction layer, free-form messages are *not* allowed. Instead, all messages must be defined using a combination of pre-defined fields in the ``cinder.message.message_field`` module. The message ultimately displayed to end users is combined from an ``Action`` field and a ``Detail`` field. * The ``Action`` field describes what was taking place when the message was created, for example, ``Action.COPY_IMAGE_TO_VOLUME``. * The ``Detail`` field is used to provide more information, for example, ``Detail.NOT_ENOUGH_SPACE_FOR_IMAGE`` or ``Detail.QUOTA_EXCEED``. Example ~~~~~~~ Example message generation:: from cinder import context from cinder.message import api as message_api from cinder.message import message_field self.message_api = message_api.API() context = context.RequestContext() volume_id = 'f292cc0c-54a7-4b3b-8174-d2ff82d87008' self.message_api.create( context, message_field.Action.UNMANAGE_VOLUME, resource_uuid=volume_id, detail=message_field.Detail.UNMANAGE_ENC_NOT_SUPPORTED) Will produce roughly the following:: GET /v3/6c430ede-9476-4128-8838-8d3929ced223/messages { "messages": [ { "id": "5429fffa-5c76-4d68-a671-37a8e24f37cf", "event_id": "VOLUME_VOLUME_006_008", "user_message": "unmanage volume: Unmanaging encrypted volumes is not supported.", "message_level": "ERROR", "resource_type": "VOLUME", "resource_uuid": "f292cc0c-54a7-4b3b-8174-d2ff82d87008", "created_at": 2018-08-27T09:49:58-05:00, "guaranteed_until": 2018-09-27T09:49:58-05:00, "request_id": "req-936666d2-4c8f-4e41-9ac9-237b43f8b848", } ] } Adding user messages ~~~~~~~~~~~~~~~~~~~~ If you are creating a message in the code but find that the predefined fields are insufficient, just add what you need to ``cinder.message.message_field``. The key thing to keep in mind is that all defined fields should be appropriate for any API user to see and not contain any sensitive information. A good rule-of-thumb is to be very general in error messages unless the issue is due to a bad user action, then be specific. As a convenience to developers, the ``Detail`` class contains a ``EXCEPTION_DETAIL_MAPPINGS`` dict. This maps ``Detail`` fields to particular Cinder exceptions, and allows you to create messages in a context where you've caught an Exception that could be any of several possibilities. Instead of having to sort through them where you've caught the exception, you can call ``message_api.create`` and pass it both the exception and a general detail field like ``Detail.SOMETHING_BAD_HAPPENED`` (that's not a real field, but you get the idea). If the passed exception is in the mapping, the resulting message will have the mapped ``Detail`` field instead of the generic one. Usage patterns ~~~~~~~~~~~~~~ These are taken from the Cinder code. The exact code may have changed by the time you read this, but the general idea should hold. No exception in context ----------------------- From cinder/compute/nova.py:: def extend_volume(self, context, server_ids, volume_id): api_version = '2.51' events = [self._get_volume_extended_event(server_id, volume_id) for server_id in server_ids] result = self._send_events(context, events, api_version=api_version) if not result: self.message_api.create( context, message_field.Action.EXTEND_VOLUME, resource_uuid=volume_id, detail=message_field.Detail.NOTIFY_COMPUTE_SERVICE_FAILED) return result * You must always pass the context object and an action. * We're working with an existing volume, so pass its ID as the ``resource_uuid``. * You need to fill in some detail, or else the code will supply an ``UNKNOWN_ERROR``, which isn't very helpful. Cinder exception in context --------------------------- From cinder/scheduler/manager.py:: except exception.NoValidBackend as ex: QUOTAS.rollback(context, reservations, project_id=volume.project_id) _extend_volume_set_error(self, context, ex, request_spec) self.message_api.create( context, message_field.Action.EXTEND_VOLUME, resource_uuid=volume.id, exception=ex) * You must always pass the context object and an action. * Since we have it available, pass the volume ID as the resource_uuid. * It's a Cinder exception. Check to see if it's in the mapping. * If it's there, we can pass it, and the detail will be supplied by the code. * It it's not, consider adding it and mapping it to an existing ``Detail`` field. If there's no current ``Detail`` field for that exception, go ahead and add that, too. * On the other hand, maybe it's in the mapping, but you have more information in this code context than is available in the mapped ``Detail`` field. In that case, you may want to use a different ``Detail`` field (creating it if necessary). * Remember, if you pass *both* a mapped exception *and* a detail, the passed detail will be ignored and the mapped ``Detail`` field will be used instead. General Exception in context ---------------------------- Not passing the Exception to message_api.create() +++++++++++++++++++++++++++++++++++++++++++++++++ From cinder/volume/manager.py:: try: self.driver.extend_volume(volume, new_size) except exception.TargetUpdateFailed: # We just want to log this but continue on with quota commit LOG.warning('Volume extended but failed to update target.') except Exception: LOG.exception("Extend volume failed.", resource=volume) self.message_api.create( context, message_field.Action.EXTEND_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.DRIVER_FAILED_EXTEND) * Pass the context object and an action; pass a ``resource_uuid`` since we have it. * We're not passing the exception, so the ``detail`` we pass is guaranteed to be used. Passing the Exception to message_api.create() +++++++++++++++++++++++++++++++++++++++++++++ From cinder/volume/manager.py:: try: if volume_metadata.get('readonly') == 'True' and mode != 'ro': raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume.id) utils.require_driver_initialized(self.driver) LOG.info('Attaching volume %(volume_id)s to instance ' '%(instance)s at mountpoint %(mount)s on host ' '%(host)s.', {'volume_id': volume_id, 'instance': instance_uuid, 'mount': mountpoint, 'host': host_name_sanitized}, resource=volume) self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint) except Exception as excep: with excutils.save_and_reraise_exception(): self.message_api.create( context, message_field.Action.ATTACH_VOLUME, resource_uuid=volume_id, exception=excep) attachment.attach_status = ( fields.VolumeAttachStatus.ERROR_ATTACHING) attachment.save() * Pass the context object and an action; pass a resource_uuid since we have it. * We're passing an exception, which could be a Cinder ``InvalidVolumeAttachMode``, which is in the mapping. In that case, the mapped ``Detail`` will be used; otherwise, the code will supply a ``Detail.UNKNOWN_ERROR``. This is appropriate if we really have no idea what happened. If it's possible to provide more information, we can pass a different, generic ``Detail`` field (creating it if necessary). The passed detail would be used for any exception that's *not* in the mapping. If it's a mapped exception, then the mapped ``Detail`` field will be used. Module documentation ~~~~~~~~~~~~~~~~~~~~ The Message API Module ---------------------- .. automodule:: cinder.message.api :noindex: :members: :undoc-members: The Message Field Module ------------------------ .. automodule:: cinder.message.message_field :noindex: The Defined Messages Module --------------------------- This module is DEPRECATED and is currently only used by ``cinder.api.v3.messages`` to handle pre-Pike message database objects. (Editorial comment:: With the default ``message_ttl`` of 2592000 seconds (30 days), it's probably safe to remove this module during the Train development cycle.) .. automodule:: cinder.message.defined_messages :noindex: :members: :undoc-members: :show-inheritance: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/contributor/zuul.rst0000664000175000017500000000247300000000000021245 0ustar00zuulzuul00000000000000Continuous Integration with Zuul ================================ Cinder uses `Zuul`_ as project gating system. The Zuul web front-end is at https://status.opendev.org. Zuul ensures that only tested code gets merged. The configuration is mainly done in `cinder's .zuul.yaml`_ file. The following is a partial list of jobs that are configured to run on changes. Test jobs run initially on proposed changes and get run again after review and approval. Note that for each job run the code gets rebased to current HEAD to test exactly the state that gets merged. openstack-tox-pep8 Run linters like PEP8 checks. openstack-tox-pylint Run Pylint checks. openstack-tox-python27 Run unit tests using python2.7. openstack-tox-python36 Run unit tests using python3.6. openstack-tox-docs Build this documentation for review. The following jobs are some of the jobs that run after a change is merged: publish-openstack-tox-docs Build this documentation and publish to `OpenStack Cinder `_. publish-openstack-python-branch-tarball Do ``python setup.py sdist`` to create a tarball of the cinder code and upload it to http://tarballs.openstack.org/cinder. .. _Zuul: https://zuul-ci.org .. _cinder's .zuul.yaml: https://opendev.org/openstack/cinder/src/.zuul.yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/drivers-all-about.rst0000664000175000017500000002127500000000000021231 0ustar00zuulzuul00000000000000======================== All About Cinder Drivers ======================== .. toctree:: :hidden: reference/support-matrix drivers General Considerations ~~~~~~~~~~~~~~~~~~~~~~ Cinder allows you to integrate various storage solutions into your OpenStack cloud. It does this by providing a stable interface for hardware providers to write *drivers* that allow you to take advantage of the various features that their solutions offer. "Supported" drivers ------------------- In order to make it easier for you to assess the stability and quality of a particular vendor's driver, The Cinder team has introduced the concept of a **supported** driver. These are drivers that: * have an identifiable *driver maintainer* * are included in the Cinder source code repository * use the upstream Cinder bug tracking mechanism * support the Cinder :ref:`required_driver_functions` * maintain a third-party Continuous Integration system that runs the OpenStack Tempest test suite against their storage devices * this must be done for every Cinder commit, and the results must be reported to the OpenStack Gerrit code review interface * for details, see `Driver Testing `_ In summary, there are two important aspects to a driver being considered as **supported**: * the code meets the Cinder driver specifications (so you know it should integrate properly with Cinder) * the driver code is continually tested against changes to Cinder (so you know that the code actually does integrate properly with Cinder) The second point is particularly important because changes to Cinder can impact the drivers in two ways: * A Cinder change may introduce a bug that only affects a particular driver or drivers (this could be because many drivers implement functionality well beyond the Required Driver Functions). With a properly running and reporting third-party CI system, such a bug can be detected at the code review stage. * A Cinder change may exercise a new code path that exposes a driver bug that had previously gone undetected. A properly running third-party CI system will detect this and alert the driver maintainer that there is a problem. New Driver CI Requirements -------------------------- When adding a new driver, the following requirements are made of the driver and its associated 3rd Party CI system: * CI_WIKI_NAME correct in driver properties * CI wiki page exists under https://wiki.openstack.org/wiki/ThirdPartySystems * Email ping to contact in wiki page receives a pong * Recheck trigger functioning correctly * CI is responding on the new driver patch * CI is responding on other cinder patches * CI is responding on os-brick patches * CI runs all cinder-tempest-plugin tests * CI result is accessible Failure of any one of these requirements will preclude a new driver from being accepted into the Cinder project. Driver Compliance ----------------- The current policy for CI compliance is: * CIs must report on every patch, whether the code change is in their own driver code or not * The CI comments must be properly formatted to show up in the CI summary in Gerrit Non-compliant drivers will be tagged as unsupported if: * No CI success reporting occurs within a two week span * The CI is found to not be testing the expected driver (CI runs using the default LVM driver, etc.) * Other issues are found but failed to be addressed in a timely manner CI results are reviewed on a regular basis and if found non-compliant, a driver patch is submitted flagging it as 'unsupported'. This can occur at any time during the development cycle. A driver can be returned to 'supported' status as soon as the CI problem is corrected. We do a final compliance check around the third milestone of each release. If a driver is marked as 'unsupported', vendors have until the time of the first Release Candidate tag (two weeks after the third milestone) to become compliant, in which case the patch flagging the driver as 'unsupported' can be reverted. Otherwise, the driver will be considered 'unsupported' in the release. The CI results are currently posted here: http://cinderstats.ivehearditbothways.com/cireport.txt "Unsupported" drivers --------------------- A driver is marked as 'unsupported' when it is out of compliance. Such a driver will log a warning message to be logged in the cinder-volume log stating that it is unsupported and deprecated for removal. In order to use an unsupported driver, an operator must set the configuration option ``enable_unsupported_driver=True`` in the driver's configuration section of ``cinder.conf`` or the Cinder service will fail to load. If the issue is not corrected before the next release, the driver will be eligible for removal from the Cinder code repository per the standard OpenStack deprecation policy. If the issue *is* corrected before the next release and the team maintaining the driver in question submits a patch marking the driver as 'supported', that patch is eligible (at the discretion of the cinder stable maintenance team) for backport to the *most recent stable branch*. .. note:: The idea behind backporting 'supported' status is that reinstatement should happen very early in the next development cycle after the driver has been marked 'unsupported'. For example, a driver is marked 'unsupported' in the Victoria release but CI issues are addressed early in the Wallaby development cycle; the patch marking the driver may then be proposed to ``stable/victoria``. Thus the patch will be included in the first stable release of Victoria, and operators upgrading from Ussuri to this release will not have to change their configuration files. Note the "at the discretion of the cinder stable maintenance team" qualification. One reason for this is that the third party CI systems typically run only on changes to the development branch. Thus if a driver's CI is restored early in the development cycle when there have not been many code changes yet, the CI passing in the development branch can be interpreted as a proxy for CI in the most recent stable branch. Obviously, this interpretation becomes increasingly invalid as the development cycle progresses. Further, this interpretation does not extend to older stable branches. Driver Removal -------------- **(Added January 2020**) As stated above, an unsupported driver is eligible for removal during the development cycle following the release in which it was marked 'unsupported'. (For example, a driver marked 'unsupported' in the Ussuri release is eligible for removal during the development cycle leading up to the Victoria release.) During the Ussuri development cycle, the Cinder team decided that drivers eligible for removal, at the discretion of the team, may remain in the code repository *as long as they continue to pass OpenStack CI testing*. When such a driver blocks the CI check or gate, it will be removed immediately. (This does not violate the OpenStack deprecation policy because such a driver's deprecation period began when it was marked as 'unsupported'.) .. note:: Why the "at the discretion of the team" qualification? Some vendors may announce that they have no intention of continuing to support a driver. In that case, the Cinder team reserves the right to remove the driver as soon as the deprecation period has passed. Thus, unsupported drivers *may* remain in the code repository for multiple releases following their declaration as 'unsupported'. Operators should therefore take into account the length of time a driver has been marked 'unsupported' when deciding to deploy an unsupported driver. This is because as an unmaintained driver ages, updates and bugfixes to libraries and other software it depends on may cause the driver to fail unit and functional tests, making it subject to immediate removal. The intent of this policy revision is twofold. First, it gives vendors a longer grace period in which to make the necessary changes to have their drivers reinstated as 'supported'. Second, keeping these drivers in-tree longer should make life easier for operators who have deployed storage backends with drivers that have been marked as 'unsupported'. Operators should keep the above points in mind, however, when deploying such a driver. Current Cinder Drivers ~~~~~~~~~~~~~~~~~~~~~~ The Cinder team maintains a page of the current drivers and what exactly they support in the :ref:`Driver Support Matrix `. You may find more details about the current drivers on the :doc:`Available Drivers ` page. Additionally, the configuration reference for each driver provides even more information. See :doc:`Volume drivers `. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.471122 cinder-27.0.0/doc/source/images/0000775000175000017500000000000000000000000016401 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/images/architecture.png0000664000175000017500000030421500000000000021576 0ustar00zuulzuul00000000000000PNG  IHDR{ # tEXtmxfile%3Cmxfile%20modified%3D%222019-10-16T21%3A29%3A05.611Z%22%20host%3D%22www.draw.io%22%20agent%3D%22Mozilla%2F5.0%20(X11%3B%20Linux%20x86_64)%20AppleWebKit%2F537.36%20(KHTML%2C%20like%20Gecko)%20Chrome%2F77.0.3865.120%20Safari%2F537.36%22%20etag%3D%22FXROTg8N5oUiCkF9ohsV%22%20version%3D%2212.1.2%22%20type%3D%22device%22%20pages%3D%221%22%3E%3Cdiagram%20id%3D%22yfl9vqnKmze2r_EAsJn_%22%20name%3D%22Page-1%22%3E7V1bc5s4G%2F41ntm9cAYkDvZlYifdfrPNpnVnu736RjbYZoPBCziJ%2B%2BtXAnHQweBwMGSbTmcLQhL4Pel5D9KO4Gz38iFA%2B%2B0n37LdEVCslxGcjwBQNQBG5K9iHWnLFEySlk3gWLQtb1g4P2zaqNDWg2PZIdMx8n03cvZs48r3PHsVMW0oCPxnttvad9m37tHGFhoWK%2BSKrd8cK9omrRNg5u2%2F2c5mm75ZNabJkx1KO9NfEm6R5T8XmuDtCM4C34%2BSq93LzHYJ9VK6JOPuTjzNPiywveicAYvPKrz%2BjO4N9OG7p91rdx%2Fn3hjCZJon5B7oLx4Bw8UT3ljOE%2Fnq6EhJYfxzIJ96E9kv0Ri5zsYbwWvcw7XXUf4UX23ov%2FEsy4BvwV8YT822tvM2fm7AzAkC%2F%2BBZNqGHih8%2Fb53IXuzRijx9xvKL27bRzqWP147rznzXD%2BKxcB3%2Fwe1hFPiPduGJgv%2Fc3ZEnKYcVfLPyd86KXm9cFIb0OhmeyhIkPx2F2%2ByjNgGyHMzRdH7P9%2BzspzzZAabHSf6rmVRhfbT9nR0FR9yFDjCSAVQRdT25fc6F2tRpl21BoCGg8oyoIm2yiXNZwxdU3F4jeuBd9H4S0VM1RvYMeEHZu%2F7%2Fn9%2BQN1vub%2BZf1jP9w60ffBrr5rvsvR3ZEwRNIo4nZQ%2BwsmeqfcueIYje9afPDwLTbAvjEHrrB9HW3%2Fgecm%2Fz1pucrYTAeZ%2FffX9Pyfq3HUVHCqrQIfJZVtsvTvRX4fo7mepKp3fzFzpzfHNMbzxMg7%2BKN4VR5DYfFt%2Bl4xhun2Rr6B%2BClV1CvBQComBjRyX96NpCiFgqJIHtosh5YsFe6yxXdYHnX24XX%2Fvkec7n70U2V%2FA8Z%2FP3dL4SnnfPZqi3zWc69MF38DdnRgROFMaKAFNhp0i%2BlI7KpeU6CNCx0G1POoSCPGUf2mBF03uVJsaCgHNNyJWCkQFjRrB%2FWCFU8d2DHTiYYnYgsy7k5gFF%2BKkXtwBFExYgcBHx1IxhmSHBCq0czP9gjPaODLichBFKNYxoYeXGCINROggVYe1W0z7FtVtXOiKgiBrfl%2B6zlQaca9MHpTPatBf21lxxL2wHz2Vp%2B3bwwqurGKpILCch0gpT%2FOCS657tpaZzIEUTYzyXtZfqVKDbIvIDEnIFyvXDx7cIgA0GsahXCoB1EEuIlSa6JgFq3LAijjJxm5PmO8etFItK3YRn6mY6YdcQ2uAgtKo1hdCn3qNL33N3sj8o648vki%2Boa0%2F%2B9%2BPTt0dzZf1tPX75xwY%2FPuv7mzEQw%2B7likHiQawcB3bo%2FEDLuAMRKEoX3Fu%2FGelz3ELDR%2FMVlh0idzfElDgr5F7TBzvHsmKlctHSdm%2FQ6nETq5cs%2FCMKYrnGi%2BFCmqGhnzwqJkFk1ky5Mqc6ZAFgctdQEFVmzrHOjvfX69CORrzla8ND03o1d7U8NHVUHyl2Dz%2FONXH9uWH3irf9bRx9fXlEpnV%2F%2FYeCnv8ci7o%2FrGCQWks2QJVw2J4lrne4savVTpp0Uid9iULZZ4se%2BZPvHnZ275DSgOxi2j%2BkFBGl5z%2Bh8crf7Q%2FR8AimK33HLNJCCibVxZsamWoWCHNeBKBSpwo00EtI0HB953CmwVM2sSUCzhRxglkx0QnA2hZeUMVwXRJtOpVhLAnd1YKPeLkIk5VDLaDJOBMpYsmILDk3IdYtx9t8jdefsdaOTk11jg%2BGoFIycYKdaZQYDXi1Rr0xJTKVlpRImKhrJToFtn5yJTIl61JXSiT1gCeiyvQVNTfPjf0AJhabI1w53mUiPLSeoRje4TAx7ZED4mYZ1KxYsoCI5aGIM%2B1OW6FZuZ0wIWsnxnp51EZVjNIB3YRtVDEBdIdnUshr8dDFbPERX97fLfB%2Fg6XVjgXpNopTppxtBHFUDZoso0ArC5TORu3Sl7QaxpHLgGSxH3q%2B78owYNF0jUnmvVai%2FT9rsAQBm3IGRj0T%2Fje2fGaV5ZvCsgGNLV8pd6QB67n95KxE3zvcoj25XB3dJFlW6YAvEzX5fZk1ZNbsj0OEZ0lxVAuAyAAc3VUg%2BhWqKsFEameeBRDrxN7LUsWy1AxMdVmR2oKI6SoLujVTBN0gTVddSMImgoTNfGzofTfOYCv3vmX%2FEv5ayxti%2BUGZJOHb%2BYhGJhPsUspwushdhpdFEWiDr1MWexiawFZNYje6iyKLZuPRPoYRIffwy7rUad8xZQ32AulqlmPVh1jQECFWWaFgzz4hHzuCtMa1U6CjaYIuSbNzA3TlSmW7uS%2BHnQaNg550orpIu%2FucOxQ3WyTpIpdsNhmgcVR5bwCIkOHCxrFXf7dWZvpyBtXsy6A2Y%2BmZ21HejI0DLdo4Q9E4%2F7ph2dEFrJwYlqRlBMO1c4Mzc%2BJKMUtLfOcivTqMbZx0aYpejNT3VUS3FshdsBY4qBmsczs1BQ6aEgaasCMGGiJwm6VFIZLgB9oR%2FnjLkPxDvN6WUoI%2FjxPMxUez%2ByovuAUNlpbV9YNTTmzUqEoO1sE3r9%2BHI6XTuZvXpm1jlhPxb24p0DqqC%2BflVYNVdeF6Wf%2FG3qeUN6INk24wGyAwK1PJVnAZYAHDkFzPMjXjPc9hbrXUOSWEaZbzEnhMmuV7S8XyLaREyzKdlZY6Pc2kc1OdYqy0GpifoiVTPVVM6Xs6re3od%2F98LmSXqcDvSVJB6%2Bc1NDIyqpgNS41zuEfeSJYCXfteNF6jneMekxxo5kzgBRKSar2t7T7ZZGkVnsS9Z7IxKHCQe7p%2FiLxwHNqBsyZN8YP4O5LSQvIVyv4le5R7luNVsniTHsFm%2BQvQJtmcgGxtTq915VdZHlfnj%2FbTMXVJa3xsXXaXUluP6Y1b5uSaTK4TiuqkPKqir5r1TQWw1jQgnyZhavaEFGrpyVx3ybCkXAvf3s3823xcwvJsXIy18F2itqRZjW9zxEXalLgtNWzJWJ0QUJ%2FHTxL0RR5Q%2FEUaWQRGHlIMRh5KUVj6VSkSIx0LvzhZ72U0LeItCWEFmmdDM3LnyqkTQ531nJhaPs0xbzcKb0dh3r4pzMO8gC4C7OzkFCbZLCluEwUjvmU%2Bl31AxabYyApzOgEv9VlNQ2Ia2iwS7qHELw0BtwHN4SQ9LKudjZrdF%2FFJ8kL%2FyULO1risXCmKzqGyZkxOw4hcINjoYj%2BufPHv18W4MpsjPvMd8b2W6cZw%2FMqf1K1s7A1qF%2FAG08iVrNx18exEq60gRxVRGhTuk%2FOp184LkaV2smic%2BQSSbT0TSdhm0lXYJo3uv8aj4otKU4Rc7c2QgqDMg2GuJd5M%2BQJfUh%2FbPt4byqYwc3pGKR6QyE8bSRy5%2FIhh0zmKsCetPLjIE2vM8ZTOPiTWMcvIuv7BukjQdMJTbyIqn5R63WmfaLZodW9OwMrYc58kNQB3eI9s%2B%2FxFSQrfkECqCrenAapn6nN35BNLUwZMPi4LIjkrujPqyY%2FIF%2BnTPVgdyUBfJdKUn7YyOg9pJlJyAZ9A%2BpX9%2BIE1Cwf64I7RJ3dE%2B%2FvNXuIGUg2x9FEgBmdqJE%2Bb%2FY81%2BI1k2pmLltGV3RCjW7QekGDpw753ikG%2B5kOXUKyrdLNcysR1qtt0fSP68RvLoCrZunhZ%2BomZtCHTj8dJlyw%2FldNPPNeq67O5m2mwwkkgvOAmLikFQc8J%2FJp1fG2cEnK6GLhFtJCqSOURe72iBUn8adh6xBdepXsZ%2BtMjEXB1c9Z1I7ppOncwYe8IQhJ7GSLdDJWjW%2B92W1aDwxvy3AD6e9v7unU8ljK1Dp6V5VOqTaVI2sucyGZyp%2BCpHEPOPpHNqJiovRPZ5DENSUBteGH1Rko2nXIUngg6BiWCUiOoPqLFKwUO5WUr8PZf%3C%2Fdiagram%3E%3C%2Fmxfile%3ErK IDATx^TEֶ"b"AI>D%PĀ(:0JyL`eP@0bD b"KutwUSk9U] 6l0  @ @R$'q @ @#@ @ @HSZ8 @ @1@ @ q2 @ @c @ @ -Lid@ @ & @ @@Z @ @Lf?6lؐ={ @ @`A% LS~@ @(dLLy?t @rL )C0#0e1-@ @@a@`B`+)]L!@ @ r<3<S @ 6&t @rL )C0#0e1-@ @@a@`B`+)]L!@ @ r<3<S @ 6&t @rL )C0#0e1-@ @@a@`B`+)]L!@ @ r<3<S @ J6l0=) C )њL)4@@$͝;׾[l??l .ŋ۲elb [jovZ[~TG l+6lcnm;Z*Uz;ۮj5j԰5knuAJw@HSc'qB@`~>s?_~}6{lc=SV-'H$QjժN$X$HD$IbEbD'O$FIZt$VIx%kNo^zV~}kР>v)0ʘ @S#5 +Ҁǩ 3yd2eG٢Eh~YÆ m=Ӊ;BQ!JW__|a}'N VtA֤Ik֬h‰[@# l %I&ٸql„ [o9AyִiS;m5k}6uT{`vGZVM6va'  @ ɇqQ2!@ (ш#lԨQ6fc:ʉ0ZʖoEK$6vX3N[ǎo? 1LL\T'|%O?m>P:餓Otɲ (9/l/p:ӬSNֶmBCA!@I@`B`Jjx0S^f@4%~Gsɶ8 _ꢖ(E3]rs9;<@@`{A@H^}v۹k]vu)ePCڣ>j\r]~.9 "LLy- 0彋  $@`v7ݻ;d]wML'?:;tMW_}ժU H 'S޻B@9$,za}*U,MK,[nJh@ Sޏw1 RL4ziᄏtM,Hђ~ٷ~kwqvah*!@@`B`i!S9$pm 7`wuu%>da+kz @S… m]v18Qs[oe-[ ؟gYgv&)tN=Tw\2%=}'7k2z?vl!0΁ HP7o=V~_VvmD & C97р{!CU:oʜx*z^.ݶbMtQF`*[} P/_nr8z†_pN{mϡ%4 @&%x"VEKWM>|Oŋ[J\R֭MtEU_4 \wyE\1MR˲?/~g׎Ծ}{g?~_vqI'v}٧~{9 0K!@ p v衇ڿLkػkcƌ.J @"oڹDUV9q>pѬYlŊVZ5h}ɓ'I} ::f„ [ZӦM N:Yzܶz4i LuĤÇh?;kKyg<z'.IPUn]'f- E7'NPSj֬&/"'=D_ݶf+^L|@k5[o5s ƫf̘^R @N)b~U_3gy/^{r/}.&{o.ѣG7$D-[E E5nY\veN` *h%]OرcTKbT^zե~2*-<mrb6h O> 5tP׆4TG!S9|'H5kָ,/]vڽ`@So_*UX^\G-bN}6KS{v\$?.[n;_G?S.H"I/EC~鮯":vo|kzZ&7n8'0M穔Տt/@t r> L@s 4ȦM泙m^^yIoR< G)("4S&o[ƦehW$:iyY78t J~n#mBԩS]mB # L8XBSn@(ovU3% J#@S. kw2E!S _i?EuS @F )4S{BizS J% O( |#oc)=J!@@Q;֮] 7oqn  @  0!0ۘF`{A@J*ҥK29JZ=\l@(o`a e˖&@ &L#05l 6i$kѢEy'&Olv' @S#jԨT  0!0y7(6)j@>=ƍgm۶QFYv|0 =ڷooi-gd9 #&|כ\޻B(HϷ1cXNn/ yxzmO?p D%>z @ L<G^Dk` HmڴivZF쮻ʕ+X3K`ʕvW،3ƍoyIt9|%،.&r"@@i71b 8:,,. S|I۷u*^8 Sޏ{&ryb:@ |;v*Ud}c9 eb~-իҘ3/ɦWh I )-'m1 v H6tP;l]vK/2w}O?d={]J"~ˀT @8&\7D.}@doO=ZΝ3ϴ+fDf21}mww4c @@`B`e,f&rCK C<ߦLbÇwկ_:t`ڵ}'= >Fm/}n׾38Ú5kTR[R p0 @ Gr4,챦%@to5ƌvkӦ}ֲeKRJ:iKK,'oaƍ3 w 'XOtrÜ@0& WD.>@@Ϸ? )Ǐw"Zhs9kذa?>SɓMKZn턷>8[$Q  @ "LL %a"o R'{ϦNjQFv~j֬zrtw}g_|IPO?3fD6mj͛7w[&J& { (&DJ1 u@em…N9s}N/mٲeV^=Sծ]%TF Dv)k~_?؂ Lbϛ7Νkg϶v4hD}'U^=+vfoY@G )/&ryb:@ |[t͙3 8?u$Hسxb'TZv۶nklUTɶzkjl--b}mݺuvZ[f^VZe+V˗;KԆD0!QKD.]jr~֭k;cNNj~) %?)o=L 6@ WB*֯_o-r?$ IH$H#H$*鹭Il$I"(R$RI`TZ5SB}߼ &\yd1-@2My g~ @'{d&6YGN <iV*oYL# L9&@H @ ʞ3` @ LlΥt 0VC@gwG0;!@(@  0<%ha< @)1<,`a: P0WjԸ}Kx@ScyXt@`pNLq ,.݇ 󰀝@^RY\,A!@La;!!:5W#0-賸XvCvCC{ujF`J[gq>  0<%ha< @)1<,`a: P0WjԸ}Kx@ScyXt@`pNLq ,.݇ 󰀝@^RY\,A!@La;!!:5W#0Y֭m„  *Uؒ%K<5[jeǏOd΂ xJɯ),@F(1xR<CvB@2x%C˟c?@O>'}OmS6)9$!@ <qT13[~j@ {|O`CGStӚ9x̂ |K _No9CO@ |O`C L&mR6ssH C@ }͜9ӮJ1c_ԩcc96lu+~M:նb P֭[7;3So?~HtL[ @ D>'}!HmذaCGi3'JՅ A@}5h L\pGsY׮]oUZ5~HPT}2Y$0]|vYgҥK7ް/ܺwn{N+WںulvHdOJoɶB#}B%mR6ssH C@ y]*UdwBR̚5իgz9w_{Wv~gwM7d۷>=\Iw߹][qc='wV^mwM4ׯoGq͝;EJ}v:AkZh'؇ OމDG~7חGyĶfOg;S;UON;1O>?߾[wc=fuMؾDN:~Kj? IDAT~ :>BI!)\ @H֡C' )稣Dh_VZNߝtgaa$|׶`;CөS'[hլY#梋.rQol4h^x;Rђ@]hC=d5Xǎ[ou ۗIo1B&}B!S6)9$!@ >a۞}Y2e5lЮ/0IH:mٲeN,R8ԧOԴiS_]^$˗o/gС;qJ]q3EDiiu]!I&H-[LH`jܸGmME].-PH_~F"$6}W L$0)ZKE|֬YDDKdD@!> ! )q (oVr<8R^# Gw}N?t7oF2ڵsQ>͛7c=EB(کk/^ɍ7ڶmk;wvTP|NhR=j/VVXaKYL&OײXQ>̶j+/-S|1I}W{T|A%2t["q  p&{BI!)\ @HCENq焣SN9e`P0j֬vR~"\N*x%jUT'|EI`:ӬUVvec$NQB-9TcǺy?=.Y/Xb5Z%mX`~*0JԾDN:~K~ :>BI!)\ @H_)'얾ICOTzu'T\ ,3<ӦOnG}[^ȥx{qKFrKL޳n-ݤu^zetJ$>wK$qJVJ2.JI%2p.^{9)ɸhIr*}&*1I9tҥ 2-ڗIo1B'}B#S6)9$!@ >#aFTr%Vђ6*3fmMKٔIXh EA Li7tUW9?vT"%jRTϞ=]{ŋSc{z%LJl+W\Pw}<ٷnnaIBl.rZ&נAMvKľDN~K  2>BI!)\ @Mev$iB*!C_!> D 0!0%?j;C`\@H@hϷ^z-?; M-ZeyTB[!B~> ~x#9+1!@a2 K vm]~ *L^R? B } }FrV 0!0%7bؐ=ѵh&n: @ -<—[0 >61 )q o8-La5 =>'}!{ģk )iM'}!wmb&S6)9$!@ <qT13[~j@ {|O`CGRA L kn֭wÆ 6dӧxvWFGJf7A@04oit@ p]˭z[Zʕm…VZ5[j[ۇOl630 B 7L/0@pꫯ;~4+Vh={o9:Ԕ7'iHoaG~&UAyI6܂;ۚ5k6m뭷Gr1o8.@@9@[~j@ {|O`CGR LBBRtʗ/@&:[ 6> Z`r!z)[7|J @L~ -La5 =>'}!{ģk&aTvӮq^n`R@Q0`]QWK}&2 ! `C,xIQ.]vC{)\7J @T-]M r㎶z0 J@9 9@vi LN "3F2odFQ {aoծ];'h͛gGq}7ɟ 6&/T02EM6ֻwo;c3FL^nƍqT@ B(S^+/#9 v -{k#mJ@29~22 N=>A` kLyĻM.qV @ַo_2eJ!t7/جY38p}y:@ | `:2M8G}D]_QeS#/J E~6|pkڴiZpr L:8 3-@@Z>ԸyyVAy - G@T@Y">{dz"ͤJsζ~?T<@{)5n^Eu}^B (0@[իgÆ :*KL|Mҥ͞=۶rdOx@yK@ϱ*TI6lh3gt3f3fXʕe˖v뭷ڞ{Y"nףGvW_}/:ϟ~oҤs=Vn]{]z6b[h%\bW\q|ɒ%cիjժݺu>?묳L}k)XQʟz >٣gFsfL."{+X~թS-9Qƍ7aTJw~ZSӧOSO=u#D S8.j"@赉k` x'mРA6mڴ@e__@ L~m/lgy <:t`:;pbuNl'ėGyNnIU^E^in)H#EyIh&}$) )aE-@D]_~R^GX  M '`zRrK@ɼ}];d˭1@S1iwvKbEӔKIҪU\N"E9쳏wy Lv"i[n1%AF;e LcԨQ裏}^)(^`P@_EN0-sfzV`2;ECŗ믿R"}Y2e\Oʙr!O?,ա>JR͚5sKT/+b#Gt1]vS"._Z|垭~l@[SN9Mzp; ?&oC@L`裏6đn @%nZ_'Lb]w喇i[n+W`աFt;qJ/&% p#)^`zf7m{M&=G"(wS=}&MK$nii^{J)&գH`RX2vڹVb_:TsJ`|ȇSaDCOK~*@>Ўe8~a $=;-  0 IbD>E<_RX駟sT/0XQҶn*ʳⓈkSO=I4Rd|Ow.rZ$h6-+K`Ҳ(S-]Ӓ3$ȣ3ZBw}[rںu{+"sΦ\{*Ph$-͓/0):I+$;N Lʗ$ME~m'.I8{MŒ$0Z(զD$s:OveԩM>)Ou})u6 @HB[awwgMr)d_[V뭲m0#PZƍg pEZ:h%m6iD٣$e LL/_ bŊN.jp;U߽sϹD飤*X˄wy T!%F unL`WJIr(,A:$)KԖvHnT#[hp~YÆ ]=իg*TT*Y~͞=۽TRsO?jժAdM4qWX1f82@=>ؐIbu]gO>iRЭ[7+->jժv-?& o߾n(4hvE0tMO磏>7h%1[o5mݻn\?߾[7QR̺uZiLS 9r+ã/ֹ3f$oۂ n6{I8PM>l͚5v]w/\c/t,^*UdsnݺD;_ȸ>lc[xS^xn;׶&ٯ4o%SF "/8FR_yӚԪU˽Pu]]rիF;K.inV[m"LiNvZl+Vpɶ.]ꞷ(Y?i7߸[A>a,,[@#lȝRoI_o6?~@=s;l\" E]dr>^M.͛kbgB%0I7n tNW\&8$0__/QF6`^hm"SJj4-IիS`ܙQ_Q3U <@@㎳?N`R/*Wlguc%TM  ї ٹsgˋ|7ŋ(xMx;teClIe.Vϙ3MK_cVL i* @ hg$H$QHe˖9H~$&ITA%ID(Q$Ni>D+$b)gJNN@fCO SC ˱L]vuR=M:4;>ؐ:ܝԮ];E")GHRd~g\Tf(3pa C*z+\|;wu *%TiI==)nSƮƁ|2+QbϞ=IF"^'*_r*_-i?ڐ`$PmIh/%%⋝(ĊްjrLf?Snf @ OqB7ꪫG Z"裏nTP E-\E%u LʡߋxJBUzkeE(ąDQv6qȴk ZB'?*IDNahTGFaby\BmU\L{%rwEŏxAL) 6C @A_LJ|7;2:C QAtצ& ʫܻ첋գ LZR(-T]t٬xp ͖СH&;d LɴG,ѥD Ɠqbب/Y@h4%lߕ[p$'Z%rR%,m.i4&!HRnҲIաM`gQyQN"LSFԡG*cs @ Mz)\Q @ B7vSv{/{d&Eh C=>jyӝw޹1!ݲ(E)i'EEne LrvEQdbvK~mE(iӦi/KD`EN#t^LSrv?eڑnĉnO IDAT%EIHUH8M6"JP(6-*K ӎsANǖԸ3叨@@ EҠANz!E #lѳL!/Wh< %Rr**b0uA{|o$gSr>: q#'a" dSYLSpS1 {6i4xfc @Q^ISTt_| ~y%1kQQ_QD; @ ⣗b ŔQT@ g| 9s@ #0ϷS7^ك?BB@TzuPU\-Z~_r[.\H.(@S O cJuLYF/;g 8H@!> A%r!d/H'#7i"/`  pf>"A9KmD}D]_0m3#o]K @ {68 BZ)6G}D]_IWGN( @ E>|,")P澢/=-a! 3gڕW^i3f̰[:ulv1Ǹ.6̺tew{e>}whnSN-]qֳgOYge 6k~a |"lѧL!z/P+#'ihР.ӳ瞳]7|cUV5j?07T*THTT;uÆ vWo"0]|NHRu:1N@`J:AH{|!t9?).΀/닮Y(Lk@>XvUTϟX5kիWN=T9rr^y[dIy7to>#;smƏo?\mO(իWyg&MGasuR_~]xNn쮻-Zl3ϸh+GĊE0 L{]Dw܁ @ Bt4S^+/#9 v b:tD! 0Gu&B/bjrk8 O_ۂ CGy:ud-5kc$T]tE.:p?O=O?:.Ml3gUXq3vi6mGmr5iWt[ʕ_W ,{6dydM!0E2E}D]_ mX@ _(=6eʔ/0IH:mٲeN,R8ԧOԴiS_7=Z|mÇСC_wSN$E[ή:)c$~ֲeMp/^>`ꫯܱ[QTw}fM6͎;8GE|{|!_)DbsAt;B@VZe/ z7p`zwOym]vֱcGk޼[H(E;^{5!-7nm:w1?͞= MGʊ+wK}vkD$E9}V[$ߪwޮ_*L\G=>ؐ=ѵ)M*QG=ҿ[C  Ph[ߤpt)lbLK.aԬY3[ݺu]r9(bIomUT'|XmZ.%q %xT^QC qT|.ᷖՕD.N9  `CtDWSZSlK T-Sd P2/-zL$.)GR:uz&QI$}}ej\4j($QI{=R9}MZZ'K9W^nw|A;V>3׮ڌyerjOXLx@{6-A`2s "C4|v@@!;vJI"TMƌcl[ʦL"%VT$qH)ٷ|ͮ>4wUW9?vUF\}=ENYJZr>)I2<+.rTH#B&<lȵRi )q9\^# @'MDV\izkۥ@>`Cɧ"S(G5p}w?dS)Xa8 @ =L|'}|S)b8tY"װ ml@> )1Z\yN:@H0q ` p9ȅ8n\!@)2:N >PE` k@Hq6  pT}LL![` 0OgCylOLxe\^j@@zǏ!N6'&h6s;  @ !<A%}Bt KX"u6s:  L8@|`C*f$BE)Xa8 @ =L|'}|SI!0!08nC#?=~ @w>}w?!0!OC%raz !@`ޖ?Ά ;>ໟBEW3 OAylс,c\E0 2)D@A> A8L![` 0OgCylO%هE` k@Hq6  pTE~K";'3 476'?N`3 ? `HΊIH8H ҏxY* )!06ƞ|όg p],0i.ZjtfZ) lH_G+0]\P*v{|A`ʧ/ L>L˿6sDj} >zϧn:g~IpСGy  NqB# Lz@9r=OG0$OCLaz !@yf]-]Zz5*J@ pGӝw_ Nwg8tVC#¼MDx[73c) oc X)JG0QR~O@@B|KlФ?՗SH@> !z+Ioyz0]lذddu-(iR @ <VBHylH_.B`Rn KҴiq!z-uv @@!< 0R!3 >ؐ 3_͹P]7nТ$Q Y3f;lC܍$K:%Z.{*QIgxS|J#sioswYZsв酫4VQ  B" !"8u@H6/D`ey9E۵FfF#7GDj[?ݻoKgvr | _!G}ҨQ,ˇK DpDXvLN&AP.k%X_Ԇ~Fi< oq(@@>ȇDh%%(f MQO@<>B`*iǹiӢ{{ qn=`5 @`f6` Yq< p8Bv|G8mG1ֽxaןsv.2T^*\d 4\J#}#F!+2?TK57&鷏_ 6D2udL`RrB%NuxIQK/z;}!DF*0iY~*zb9TTĩdI7? l(֚kGKџ~2;|Q̶S˦Gh @|`CTS[dtbE3yTQxp,oSy(<s_  I#i'ɔM(%<7mZQT lz)[t,   {JLڙ btXPU@ 1>}!1Z~$zhivLT]nmѶZ^Va5(%J @Cb4ȬW9ùZY23gW_}e}-\Юz+B ZjլFVN{m8.5!D`RhRnĄhR%E-N$Y-}@@ /y:Cl.Z ԩSm6~x2eկ_k/UN[uي+lѢEۜ9s/p5i;0kժu5L`R\JDЉknE&ZR'+K- DIu~X@Q|s+Oj/x A`͚5v}ٰalڵvI'Gm-ZpQJɔO?$R;oZŊ}v)X͓cy֤1R)$ nRs)4J-[U"}3I~o@M5W4rd.q~ DC[o5wqv绨(G}d#Gg}*Uds]pVr唛Y Lz5nGQFE[&+wUgKJIm,k> @ x'ϼh͔G@ S^uݻ˛ԯ_?k$ EM>Kv[Ϟ=zIʳ&id LZΦK\"DWU:"'[=@ sU4ӌS |'p7۝wiw}zwg͚eC C:qo߾I&)\Κ˻kY}͂Y-}@@ O^j-]So3!B'н{wӲ'|cnӧDO>9!{mɒ%zjlN? dE`*|d1LT|B6q @ tN\z]m_r4]uU.*T4 H&L{ϦMfJ">{l]*U\^ 6تUl…NpwZe#H:Y/l#+wRdi׷tE @u1QhvoL+ @}eqmvv+l6h '"IxzWe˖vaAdիW϶jb MQ05qDkӦSN;{l]^]/-d:GITP&*jѷߢgJ  C"sھkǏf͚y@Byg~uڵkvњ5kP /3ϴK/{Q;*i)>Ra\S'#յk5~|D&&*ħ#O@@vgZ >sO9-/]l=v]wn]w5l0/ZSI Lݺu+JDNR&EKIpDa e1-@|#?[/3z =v@ DL;}7nXkoє*KSݺD-)Hע.z5nG;Mrda,1?o~+ @$?sW46-ss!\ҥ֫W\4}fͲ=zϏ<.ۜI LzHQQXtQv^ǎ2v$.e- =[v8  @'<3 5ߞ7LTL6e?9soi)r_/^v\N@L?=s._U>&-ѣCxR;#F%daIe-5C|%?s<+x^ aÆٕW^i]VrDN4 (g՞{iƍƱDi L2V"}F.%vv?vKjNdHX&*-Wi&)- UZ[2N@xgk)zĢMv6S;\׿:4k31))*㗹D<1?` =Donǖx)&$I7[l"<vɎ8;#8Eoxg#Z,ʋʄXJ+@*>l[r >*VJ&,}ҥy$1r)>Ot荇*>߰(%KyEmcQK4Q;&M8)l Em(mr0Z@.RJɼ)%p'1r)5$#|.| N [!5< <#f]=\ķ#F{%RIYe`3c 0Q KLaz !x}G[JqAi뭷&%iĉXQ]~ms]ļ-\J ,w3 RKnE۠0Qol@ qÆ sK٪$n+5ҩS''®kzC*Q-]wQ Q@|v%{GN_xx…VjU,Kݔ?Fm"R$XjR5D%Ye9 B` H`ѣuݺuVФÚmfz*8v~[;5iҤcfC9s9c _̟?=7ŶB}C LZF׸qD5e8L\-sl؜@:ul޼yN\*OhNo /R{ e]fwr!1-QXgΜV~wM+lVBvmZj.wC QL4# ΥK҆>8("?5j(r$~{[{^ʝy嶵n:cc蜭jF)jAv뭷Z۶mmvꩧg￷ۇ~h&dSN&-ѣ^%6,%#; "2Ye7A (k׮9&4iYtAnPv["kQ 2zh?~M2Mk/UNNT}شh"d|Μ9&1nݺMvclժU^M}!ڜg" 6S  ~{O#IyKH啗Q~E4FjڴivWo,$1I@zv#Fyf+RgI9*%))JN&Nai߰!E1԰aC/޳f͚Hlxm{o#J$f&vڵvI'-ZpR2O?u";vhXoءLU'lM{(@8~Ť{S &؉'^hn& c='p)Mƍ+w}{ L/6?&0)gϞ6rH݋~ضrK[nq]g϶s=Ӌ _իWyMn}\dTL G"RԬleڲMbvg}v d&M^x!-dԞ|+Y5C%فPo-6`M/p\dy.WA1y+( Z_-Pfd"Gqk>KnyZkJ4o\3Hk)(RԸqccǎ|r~]sjzN L,.0)*gu}]R&n\xj׮"$0;drI$_y_E;^:-aN38EH L(Io@,1%H*rT,oP9"8ns¯1"vqGGF:u?b/M] MJ"(+ ɘh2"ETRJi2TO$S2E2 Iۜ{}܇{k}^gw} /y>I&눗A -+sEB!A] :dr8DSуW_}UC<ൃpy ̚5KI0!-u:&O o,$xV$}cƍ\cy衇wJ0!B!SLQ=%-67\v4`iԨQmxcSg/ Y묮[f%?x#r0XBK']B˗kRr'+;Upb {ˠm)`J !~4$F"RJĒu+w-ܢL2[`q=6`^8$u*hG@q*UHٲe)v\۶mP  kiIt{9"@LByw ynjRwZ4H0` ڜ=/=k:*NB@$dyq p I, @v i}m]ϋG/} "7wP$7XhԮ][,Y"ʕ3J  ˖-;F0\)@]݂g7lVT7a`H0F0%'QQqL+ Bj֬Nl5ffΜ)C !KQO"`QFI>}dڴi%6ܫq~ 8P+YlLi 2_4$*Ҳnf%5x/у)xi#@)u'̙3k%L)dѯҞ ]vZͱ) q 6{$ "$n߾tAI;lذAoر?~ѣvmtq$)\w$Ò-" $R'.B>}Z?uo[Yb<-Kw& 'eҥ%Vݫ?}Qi۶wy&H0y3;qLn6HL&hDU%2ܾÇ+jժR+(Y[l۴i##Yz|gInG2ddjSNd֭z/[/^,_Sfe ^L"@@ل5kƍD9S@ y2elApdܸqa 8DKB)πSGՉ@H h2Ջn PtT2e5%-?K7|Dݟy;5GVEi'*W,%K$K/URd׮]2|8qV"=~˗OF!ŊJE&` 5nXOxYVFz:l 'Ug^R/["W_}%ə3g([j֬)ŘSgU??1w˳%\pԫWO79rP[nI C0qz'p&ú. 2QCسg"@RFSbDz4+9(#0l0ydJ3(ZݺuS"f)ɩSz駥yj9 mۦ+T'N^$kS;DXh & ȕG8EY+) $2#"$yGD*U$y<$@ cl IDATYs dSDzdɢI ``B[y%!o U16hѢ㏫N|&KGjL\=ږS,Dx7bQ;_E6/\J`BΝ;˦MΠM ,$CQ` aFb' ^A+/G &|{.k֬GyDrέ/Lxi }442 Y1Np"$`B)yZ {G;B $Np:$]G&$#PG50$5 Scv`DE]^7UW]oɁy BpX erWͳ\rr6b dppP +$~ l.,K?m"Z뮻U).9ka2O)rJp#C= ao KaG#/4Bަ@M~Ic9><ȝ>B_4}MAN"Tavv"4]vOAq{8b"@"/8 M@ :yZޣG2ZPLV&lPMV\rlB\(`"U{8% 1=2  `HI+MJӜ9s4_ jxK0B@&A;ðv*>C$9e`2:ԍL3{l E^x,[L#2 GA&'x"\y!Q H-/oiR~S CCn#NhQH%,g(Ky¡Zɵ^{LML utkL!R_v&xNyWH)D1GC v!P y֍64!N`!2*aE{(fܴ&$YB>Pe&$az _' nIX 4lPO_yM.i=f*$L "@rhM&+ 9%f؆Z"a!RbuW17|SԠ<ի$ML&Z: `cAm)-#~fCͰ D>$RRƊ؟C^_d%O?j0`,\, FJ `cA4m)'-s" aN Bl%H!9wu&;A"mp6ժUK͛'SNpYP7(>,d̚jl2iժ|wiFu #`c#|L5j(9yA{h7m@ H޽{%_|5kV9|0ٲeK%\X\X`Ĉ2c >}z`Fś7o.eʔ={qx D ULx3A N&JM6ĩL&GXbi&Yt;$ <1*V(E 7FX /_^t"-[LՈ9R.V2B*AMkܝ6 &'6z"@LDk׮ZlٲrJUTnA_A`!h"],YDʕ+g&^ cƌH5BHgAm)bȩvI4kvkD@l#dŊҹsg:thl7|P|M%-/_o(F>}Ǿ?L*OGr7T `"yK)VD "Oh B*UhjРM=*SL1${:m87.\(ԅ jժi % P8qB'[lf/ӤO>,]4J4+իWG}Tڶm+w^ P##`c"<',8eqh-<.A "@EÒoY_oժU$@(hKXȆ Zpƍ{n9|*y䑫Zʔ)d g̘Qk&ƍs.#N:ҷo߰N$".A0=|3@;F0\)'O3[i7gD$BP "k%=Nchرc"N>TʃC=C +|˙3gbr?(w}'=LL@AB y&D&<  'O.|jԚ!+(9|.ل7ɝ;w AH5 q;v8ϩSE(E_*^$,TPYfiŋ'ޠ-̟?_ڴi9@' ?=ىc%AEstlLi ._4G!Dw0=AVI0`rw:zui&u X6KR@EP@5_~Y>#ԩmV-jiԨQ7H׮]GFE% aF<tI0` UtL-GÇ ۷Ov%;wxK}vZTXkז+"`rdD `{ :TR1ns9f@cbՂv1^O@z@ 5fz1%n}*HD[if͛瞓ի޽{D:t9rDN<)1cFɖ-^|Œ?~+pRxq봚""@A<t0"5M09yDɾqxe !D$E %% z1%>[8=q n{e{D0am7A- &!ډ}qMڢݼÚ=" y/YӋ)1;PAbq7nvN< D ="`nA= &&UtTH{^L.&}E{7$+ n `nn`v$HT=\kkвa"@@r֭[ѣHl SK"??W"^0am7A ڛ [ՙ}L5҅^xXJE4 &pgD `nAm)-ϣ pq` ը5 D 3vc A" && f\k`JK pe]3?̴ "D*0ϲ cъ֭[e׮]~9|?,[Ljk@rE)rĉJ6o{nپ}lڴI+_TREW.%JpB-&&܎vCD{6"D"` gjDEѣoѣR~}UT\Y XoT/yIƌ;J*vt$D)IW|B"@0̰ "K/ ~֭+۷W#'eժU2uT4iM[:H֬Ym9 k :8͐`"s3>NM[L "^^ X +gϖ{L&=Sr׻>Tx45JM&]tnݺIdyAرc+u(7n~ɚ5k]we[_~E8 GDYdъvď k :ďw:F0-Wz9g{왓 ;Ī+b3<#O߾}9tjhx'\1cC2EgϞX^URg $,]TV^-H"u˙3dΜYN>-߿_ 'TCPѮjժ1'+70am7Aqw?SG$0|9y@$B,ЃP^}p>BX]pN6H[^׵kW/W^^RZ5R-[VJ,)W]u){aٶm; Q ]Vkڴ\|ŎFLXM!M09E 9Վ`s8wmnamrOfY0YH9qqp9yGرc(QBϟ/7xC֮];i֬4lPn=Dɓer=HΝC'' &z$:|o1QgY6#B@sp808ap ?CF)otMҧO)V1;vt0B5#D)VuCX98灣p&0l`XW<3 Ӗ-[ɬ;{n_5 ('G}$&M|UAŔKL${|Jk7ri n=\""\sL?cJ#k& 4R1 -@$5j(ysuם袋dΝJ̚5K% ,YU@zt|ٵb˗K۶mM6SO߯WݺuE0RtJIY`ԩSTrH"J0uA=[@:@/Ν[)9s_7 ={a?\*U0Nl]vVI3ggW5< 5Q.µ81+ ?  ΂_zbp0)L+%эf,{P"vca7ҥK @P q7o$믿s$RDTڴiә{?7<(EȒu)i~A@͛==r饗*1#Gy׵jH.]J YP*Fqz'՛ WI֭[7m9 $w-m&&L-Z(x뭷9/tQ[Za1)JdĦv݂e/hF\a=4gtԄfq2W^yA}g2f-)ӡCdɒRreiժVc m`&   oԩJ_ +^ɒ%zaz׷M0A7vGyDj֬hH>ɓG=@0 @0!\^^ >xlܸQI;zB % Y0Bx~QCj ]gp-NBa6Q<_4dPP1%|y!(W"9%^$nBk?dzr0!l !\ r)XB.T 9/.ڵJ0}iBj_xLA2eL>]F%WRBEI0((x1 an6l&Mh% K$[ !jRSlٔ|$`L"qiXH#$'TvCx^<KN0'"-7pBz%"<6lS>zsLu:L;3v/{} zB^J ̘1cp&[4EeCx;DʼnM0GSbOwAwy` iAmEBjZE0b]wݥj͚5ӁBt;2wTkCXje3$ɃqCܱcdrrJkroxlRz^E <J@D#)wr z"J*&|I ݐ `/ZH%GCTѳD0a$9܇p=6mT֬Y/ c߿O$-1LʼnY&L`"az}1]seB,{PgpPָiL r :o$Ο?"< =Hb7|pAjxဴAkVoEkT[4i'O$ ŋ+Ao#6E0z jn <0hLhmH/TIP7x*H"Hr s='wO>[oɅ^#9FY{(#L3U'f0GSbOwAwy` iAm@v s|^#wH/\C&<٬F3JĬ&H0`Ji0=>92!a= ;\snakRD]( Ekp-N2asFϟ!;Lu CJIut#A.Y0Ԇ9a05+VԼUME13gʐ!C*\Ls`[s΂tbp0)L+%эf,{P"v r A2*\LM09yD)ǻ"1Սf{1h 9 D9֮]/P=PTZe„m)bȩv3aһdP'q1-t y78ס "FcUўFC`ŊzAS+?[Fkqb ~$H0%4xwDa |^΃5kHƍe֭v 0ahµ>lK{9MRByk?6!^Zpa裏wnIk9s%C RT)y{IxGϗL2iH"K/I:uċ}]KݻwW_-+W%J$Ҕr-N 0GSbOwAw mG ҁFBcLEO=>|X^uǻԩS'%tEyyR/E0@?ci׮&͆جY3%dp-NBa ĞӃ#yI0?~\nۤgϞ'(1t1ꪫwޑ̙3e]&۷oy*ݻwSNibssIС}Zu&SN}Zߖ;Os⋺m۶ҫW/m.*%*a .L)OZa ĞӃ#yI0! &6.fxh"u~Wdɒ%2yd_Ԯ][vX|7n;V ޽{)N@J͙3G74`E}9r^l2-U ok;8DHlʖ-I:AZ (ɕ1cF=\ ; Dxc"LUÆ S$Bٜ&C+ 6~cǎZ)r2iҤ3~-uF#Z dLӧO'xBg ^D]t=֮]T7Q`b }~kqbV ~L z a2S-Vr_G/E0C7: pM6F~?.x F_}u]c߿I+WN@"7P ɘ1c~d"=܏6)ׯccjժn/` #5`w/{ȐENH4L2ҧOiذ:tHrȡaoٳY"LN0+ 2!Ν;x0!4< &칠'$wܺb_"5 +>H/0J|1)Jdt0݂g77z9@asM<@@@KqFݔ3Fo%}f_G}4^N$[lvZ%.]Px4w1X&Fmn"`>\s?3"`!4!ɓ5_5kv/!!e@Bآ]c$ؿW_U&Io>"u<ͱ )SDa gHFmʳ'uM0y䓌04i` ں ynV6<` %B@&$nHrNkԨ!ӦM~+DL9s^_ &x'tJ`B/'?K{1>IYpAXp{OKz=&x5mTݫ{)xx3WGhץD0YI+KGT ڷocN`>,_@FppU LʼnY'L&҂,_4a2hZv [F\/AF9D8ICZK=;,!87Ynn/8жac\sgZh^D9'GDԩ^@-" &fI!{ִ r :4uLUTISM4>y0!/;tڅ^D81HÄ &&Ni0=>C$UE^Fp F m$ "! {+ *O68|(Br&TIN6Dp/K{x1 ꃘ;۷oB݉@Tpro7Ȟ>@qfW/uoEj%^Ap(vw x#P$&YիWGgY/`"|6mM)Db:$ Griµ81 ?L${|;L0TYTdcbPSz25sxB֐f͚?2(UμEɝ"l޼yt@\/d| 4^niyB v =n=Hڹm-s͉9GbCo$Ɏ^:tP#F8ќcmp-N 0GSbOwAwm<;8\۪>xS3Q}לpۗ3AB1Ç7Ct_/B MkqbV~$H0%4xwDa |לf0Y(Tଆ59 5?+wٳ5'զMAkqb~L5j a2S-Vr_G1#x- "5'Qy?H7|SL" 5( ziQxg*Mr-p~u& l1ns9qjma2fymLNy`;Lb,}4n$<0kfك#5'DoKl; ٳg;wJkLP1tCojJ;7M'M6 ƌh%H fͪSO=%sYFjժ%[nUϥHiȐ!4}t^K/rZ 9nѣсl}ᇵ"~;%NNCSs f<0xY6"Fg5 kL⋺ɷ֜Ai΍{NK[SE:̘1C_) L2ҳgO#rzL|@CHݫW/M AH!},Y4 EH /H !7B^xm#oF 믿^{뭷YyuO{ejCˆO>);vP.8 &16'ծ][,Y"ʕ3It ưçlٲƌ?iӦiB@4U\YP,ay^H6QhdP/SLzpiAm@_,G=3L2in z/yc A(|5JKE$b#|HGC=y7D9}jɓꥇP 2h~&x/a_년il`J фɠi)Ln r9̚Y6D Dz1{{+ygd̘x9{?_Kqꑞ:_~T4/1 ?LLaz}ӮHy H"^@Dz1{@'|"ڵ:(Aq 6,cǎkmNg_b ?L${|;L0zڵ r9<5aiBnx z<;"6ʡjs/@w#>Ʊ%Vܽ{Q]䢷n ~ߴ~i,B}@JL"9sC}͚5oJnaӧOWOG\ǎ5eZLf#%m$f#~N4{`rzX@yyxM6MJ.-jՒoY*Vx<#OJ̙ׯ/76Ɠ*V>hkO0GSσoA Dvl ƘB`µ`5'm"h `_i!0{lAREɲeˤx0:A yJܹ5@̙5E+=* b(k.ٹs[oJeTXӃ|?m 3~asi^ ӆɠf 78o|gJ0а׹,P8o4{`  ˈ}f--yf8h޽J"!#G4+(-[6ə3&Ο?\yRpa%:͑IIa3L9F0\i j>m|7JlDۙo;4S. u QfFm!A[L>L&< M j>m|qLf!6n>Kv !0Z$wMT <&Hm 3^a6E&e0}Nɚ Ys!rf#m~P=3$bX._uxi<'D m 3 0GIM{lyah|6˴$̲G,YqWW^!~pa&H0`Ji0=>ivsx!q>bA,{Ģ %{h'{8yu7a ĞӃ#vm7爧J! d!Іϒ=Ј= iCp K 0GSbOwAFOsS C`2qhghN^]E{x4!D %j#D)ǻ #6gC A,Cġ %{'{8yu7asFϟ!;Lu CJ.v:`g7zdUd'=B"\C5L9F0-Wz9g?}:1C8uw &Ahv ]8"u#G$ ~g+{wIl7߶}oZsަ?'Dd%f0g`rSbwE L"J8Yd'OJݥW^"H0yk'{sYڳg\~?~FbŊIɒ%壏>:K+?C;L :-Z$s?޽{Kf͔T/u2uT۷|'|r_TR/< 8p#3xÆ Ҿ}{媫1cHBTo`~iժqH˭*Gmʒ%K+[͛գz7rʲj*XڵK'˭P|yy'T?XIKĉgǏK|dĈEL3g;v{8ի' .] &lٲEW.C{`Sp1wޑK/4-(S68mD@0ӽ(aLLy1F!ʓ'z@2f̘h EFm=){)RDI-[*TR%$ŋKNowy o%1 , >R$k4j(1cc8:S_J4lP^z%7o|ggHҥu uU H%4ƍSÊ'H$L[kРl߾]6mڤׁiڴ*)7n-x3 m,[o=p%()y2 DX" [*NXz #D q%>bg[~$H09txZDacW"l05jPx@Gc{^?|_?%?I"dF~^ ;a;'`wx iFmۦIS FnݔL<]iO?)p9 LŃ>(v%!e6KdNG>"9vܩ H$t>zK>R}H%?8b$xr P)}&<`7x"^ZfϞLY) Bw}LhL @?O?v~7kFIGNǏO?T=sʥZ;&!54Ä &L=_ Ӄh{pVZ5%(g|? 9c}M҉%xݻwK֬YB29 Y, y֬Yc+T(` H͚5_L29qs< E%`p {:u_~Y=Vb$ '<,o1A:a]J2e @Cx#B,&\-[6YvNVr&!|Ǝ+Ngk7n<vgטN[}9q &'PdD <m 3 0Giϗm} t/F'0 >$|5+ȍL i޽>K𤁗W\#d֭2|p{Lz̝;Wӧ!VI-x8(yH3Std9"-v&!GnzZQ:DT;C%Gi$^C hpB9s^\Ogșp&;HI&ɔ)S6_6ablSr"rf0Giϗm} t/i/~D|vrSa!r(}@$!q3#K@ LA% YL ªkAi xxjkUb^H 09y0!6p @69!v7oai R@^L<[{ᑄNK0!Y90UJ6S~".~`Z9M AM{Xԇvs]5"dZ/$%{`F/#a!%nGB@xߜ9sj~!T0CH =Z+jB2TC xxZ#Û L*r$F0_~b@TN :vVKM?x!1?m["xGYaD^z>vJjrʝ"/)' n馴ۘ;LL ۉ@fa1)JdP7Dm{}ĝK7Li!d=K|zZD4k=H0aVQG"A[üC^Oa6QhdP{S8Wn#GgXM0!tU% : A.v,I>b~ĢߩSt"Hm C}vfD m 3a6E&e0}Nɚ 9VnL'Nđէ `lI1`66/[~ q2BAGZfKmpanka=b\Pop>ۇ>h!rfSOi5jhRc䲱<,yݺuZk۶m Y䡇Rɐ#ƒ Q 9f[IQ ˺frk-Kpmv4*U>H[lQQ nh[ tgg޳DUWH"@@k%1p!D)󘛱n_Hjդzs>7H!r/z&$ m4vXsr=Ȏ;ܚ-[6^O9<(7nx)}We) >:t I&ZgLû z{ yj8[4LA%A\+VLM,{= iC@0}M4]??mL/`'/7" /}B@,M0AfΜCf޽IBR" `ˑ#z0Q4eʔA1Ex'Hu !H$Ծ}{ {Gt+{{WI27*G @G׮]!AǏD!_9K֬Y>SLad`A |~[:R*rB2MB\> T*X*r넹7tSӯ*mڴ\J {I1GS9L[5sH"VSu rO*r @DAnݺisz09@;9,hpEA@X0}M4]??m)#L~;\}=2H%^_CHyof (K~wSLhVSC qWW^!~种mLNCN$h N][wX'D0ܹsmjS2eۥQFQ;%d^I)^ l@UAS^~秵I01ɷ/`'oJ" wEA,J0# 01]"*(50" Q#A 0g@DADȞaUwuzU[/}V%0)WM.]29sT(0+֮];N+1%Brm\͖C{C`k |E?@ ~Mݾ{͉>D&E)hY1)RÆ ]M 3|w*Ս7-;lРA%ŹOSNb~D0w#N\SJ2 "߯ۗ"P#0!0:l;J@&m~]w4g[Bh޼ynDQr6%VBo,hY IDATL٤ݶ7 `k@`jf Ds 0!092;#|%e HZ$WډK۾7nY8i$k۶͘1BX"ɛ'roH8U\ST3~ P|&n_ )Qߜ`EL8њ5kfEEEؐN;GIk;խ[/UW]e/*U7|5i)Kja`@STrLQ @a}qΚ V חK&HS0[:W^>JWTnqV)A?(L_}/Yە2A✂gב`?E/kS\l _S+`STrLQ @a}qΚSeG|a9ώ#!8`]:,_^5s_@,_}/YX`\0A*󮟟7?U`>+QV)k( N#(o;Z 1ô/싶 #w1L!se7'vF83b\*ߦྋ&?}o|%"wC: 0!03o8 7`DH!b3ezM4!;!wXLLqο{FC0BΥSXVH ~mݾ8=h/93`>wK䂳*ĚKu]Xf@a}q&8_F}sbg=#|XJ.% N:7k5?rV[s߇@i_}/ԼWZR% &ĉYfVTTAY| XfOpY_.,3Z@0~mݾ8gIhSJ^2Ρ#0A?}+8gבz&L:k/x+c7D<w@v~mݾx%X"9ZĮo5^"iu0}wYEQDAٟ>?@a}a!v La Ca`; &oѱ`>gqzw"^#=nzsa} Nk_&|92;#|spD.ԌIףG\;_C^!l}I6qb{ ʀ983` v&x+ vs 0!092;#|B?m6ve]V⑝wvuW9rd|ͭSNvu׹?_~ŔsCo6d={թSǶ~{W?p =>@(kg<)L&wQ L&nQQC_7?j~K}&Po߾ŋ?_M`RM46xcG}zamڴqĉOѣGn+V={ڳ>k֨Q#;jԨarS 3ի%ϔU m>KhС}7.2iܹnZB`>|u^u7ouQvg?$-{^zM?߶j+;3W/_x(E@)rvH45r?BdBke>c Lu7&Hes_*JL%"|/JE-)?W_D&E͚5˪WDzE1=/ڢE.AqJ*7TG'0!h3)F 3G@X|n_X~H&tpb{9}aNQO^xi'9JjRGmLq; y}F.05oj9U}3ACVTQgg!0cLR, yk%4)v*CWL谏bM4[4%{m={ ~5+%T`"/S(#PvF>@ ~mݾ; LaE!0;9a8q5k̊roYE4])TB4*af0p ~mݾTf7 $LѵߢcVOzD~"|f6I)Nu)ّٓa} +o}6sL7o-ZVX6yYN[nիWvi'QFVq-+IrE('vCL%"Vg|>X"UКLAIUS\3/bA@wyǞ}Y?~۶v;`uԱM6ĉJO'6/6k,6m}{mp{hذa(rE`p"1AB9"oEH. Z)()q. 05#"OK.u]v}w[|{'(R>SHkٸqjժv1XVlKrE`B`J@Nl}e`>gଂD` Jʯz LqH/M7d(}>ѣGۈ#վ}{;묳z))9sOX9'b\pVAk"0%W=LqH//h\r˛tWZƍ>PE4Oۅ^h]vڵk/SS%*qb疿bgfH⤟~\7ҟ=?VPҀo֭[G櫯СCս{Jm4a*c|٥ R9e[0*M4qX(By#\7"GN@`b*@aܹiC=dnmMg}f^{M2-;mwΜ96|[d[hVBE.+-7=~MaugK䂳 Z)()UtXps=֭[7ƚ nd_ĥ1cƸ۾hK]bqEUiD:(|\IĵcժUuZr-^͛'|Z:蠔&"rFË]_OHXY8ç>(Z,ocz{(Cnen[u_Y5=͛77}[[W3Ev-A袋v=H; <}3|ꩧc~'f͚VZ5O"}&K>!<$GZ6`FEo}.9رcYf^{]vuWkР:eAo[oĉCuI'dnwĠSe򍆗ʼOTd]Tua. i,y?/ٷ)CRݷߩSlt]n5jd?sI=AZܹs<>.bW_E&^x{KiƎWn/ieÇ]J/27E'iG&.sV4imf̘Qɯy5@вƌcZܴiSs=(v۹]CKv[wu T#;\~ŗ^zT+%ryUhƏ~!U+[~}n*s5rbv z>~H!ICuj/K@R Qz׭FVNl.ԭ[/&\;i7lM4A`3 @ <KL-qIhŽ8−Hz*nۜJ M` ]? /+uu;[n-l/' zˠQl@QPĒv~ꩧ\;}q@:/[nubˁΝͦN5ȣ@`JGIkf}1V|8Wc J4z#@fÆ sQ;v={ZRԯ"|cz̬;5ʬE:F`JG!01&~%8>A{zˠD?L/k1~G@;)wmp@:2 x.qСC'X`ˬwi)LP㘲`{BM8њ5kfEAvܣ?}D.< ۗ$L@1EQPV՗x N;{GjժV+$/d ,~IbZSNf7 Qk:fl'\V *B%꩝n{Y=bmqi-~PR!0gUQ9s&lbkV8 V B$ >>(@ |m޼yc8-D@;*SVR:.ʁ&^J6kV) t<1-~jD!SRyLAD( L -+ LSNۛ]~nˮj1bsv)뮻kKmT`ڬY/t I@%k k}_o˗/w?6۸$-Y~?좖`*G|!1C  b w}7v@}qLZ.s ,0MnV~Pj2 |Q-nd "zNSDSv|V39Y"g|>c=1c#G} L^?tMNxRѶm:㏷N:ɦ+5j=Ӷ^{YN]v._sIKJo'X sq A{v5_|Օ{)IyST$/j֭k#G$'Գ~>QٌÙ<٬Iԇ ) pDX[X$s__sj>Tzyb;|#п\.c%%%I;&__SIaɱE9h}=*q۷h[y$)Iyٴm=pQN'|~RBo Kk]qv-8qi[ $IK'x˳[o&ׯo;v,I|Q3 0U)q֒KJSEQDS5z,pʕJxju;[n-l}}1{^&o&ǰyT>\tEv}ٜ9s\dvS-ep+Vp"Ur;wdJ.!K8 7t”Kһ-؂&$xOǔC@{q8L_vծ]팻FcXdE`Lr+B`MOej__;o=.K?Qp`].GqN 'En3 {Q&}Ѽyqw͚bʕZx )7=jn-xYu4ݸȗ/QpH`Rd.Gq{!',9rۍ5r;ckյQFn7SN9Ү7SO=Һ :fuֱjժ?wn&;'p}ׁیYls9&6leiLPv\I˞X-w2F *'\Ch8'ܸ~(?& &+-=/"suP\g\xnsK.&Or(>JLҲw? L%)A,mT14I׫g6cFH&O6k$Q-/:):~Y6w7DФoTj+'^,5y>(Z<> $0{lkfMB~Ot&L`G})Zg 6xz[|m]]wuWƾvv-6lSCKOL848 IDATصkW=z)cQQX }t 7oe޽kguײeˬA6dȐrW_2#ރ^}Tnv=x UiB[&_3 On'HdRJY|GqPb9P4h=NQ6(^ L[&4 I^~n9g}p(M6"k̙6]vҤIn߭6ʋL%I~P&EubŻȕ%0oP9Ə7+*-jJrn*,hڄTOmCjڕz=^7Jh3!! F rhGV-#S#EQcꫭe˖pBYNСC_\M`gK#Feyʻ4cƌ"40H% L/NH+!J’l;v,Y0{'lÆ U>62wwbHK7*ĉcӪE=Q TDS&Σ@PDώ9t)9&Eht|126-K`%0%|+Kɲ5:h$suy>34}t GN`z뭷AO=C'rK-"m^vt7ZVn+3Ţ͆ t}Q-QJE_TuR(H`Ob_GPk8 dB@UW]eճ?ߎ:ꨴ+K`RdI'd?"w顇rQ<&E W,)仴ɻ)biҥ.;1W$0ItQ{9]9$^2Լ!NV転,0ud3>@ zRa@U^r_j QPR ] XO?=i k;8;]PbIYۓ&-W.rJh>|H^:ݲ3 9JDoyKW`R.MZnlԩn^֭]E0i)9);`KJ~mŋݮww6TN"OKFƔŤnI=9dQYu@8cmqn^_j QPR @/TmAjAwyǔCvi'K]!-SR<%SZhD(Mr+Vɓ'[O$߉ϓ#$\k|hrlnƌJ[  tb8~$[y?Zy"0e(YݺśP !h -c裏JdYg墿÷$-XP<ʖѣo@8jËq>(-0eʕ+W3~o}䗅XoOţ"Ey@bʃK34h pLq.z,0fsJE`RkJ޲eqV*/It1Sn*%F`e:`r@.Lr}v}>/ md *(,3&p]wݍ Gus9TzTɻLoo8?`M4O?ݩ}ʼ>rH袋6|pZߖ.]ꄫ"J*嶙dZ xB Kp/bZ=;u9MhVh ѽ{wGSd}ݶ-YjU<8uBW\a7tmzym^]w]kwdva{mOYQQjfQ#[JAϭB89|Jڔό ee]{M9%nF{WQ gU`*Ü+)ҹsgo\߶nkᄏKXv2:m8p@mɒ% E#Q?Epuq[n馮 mǮZ9b{mRRXTދkoԨ >-ZI@ŋ 7|\x<k,dMܷ :gΜB%̘1l7N;m*IIҎ5jp$K(H%Ic,]k/'tILzGwEH(LK./O_|GgDq]gX7=+ͅq<BO?uzFG@z;%TYl˔^XFQT%O DG}z喱mNxm`۝C88M4ɪWéƲ&E"٪U+3giz뭗m\ $YR%3~E)1IK/d{ tK;vzT& H;]ve3}[( YjO"~W$t)Iє?խ[ז-[&E+)rIEgSd8Ub~o~!abn ~ ěLszNG>}+e]`*4jTq8Eyev/!IKRiӦN8Kans/&JD/{?CQt4=E8%0)کIE-\c)^,I`RԢS M%y! ow IQ6 >hwP;E9ii7ݎ&q=cN;mND_נI{LK !02AEk @ gĊlG=ß%yfÇWT e}>:vXf7zIkӦ 1K0*cOUT$$%~/-0^fS/jORUދ^\YgML "\k/RP""$`+j)mij-k+{,XID7oRZ5Sn Y|KԫWD`R޹ 8A]y,X'ۛɀP-meoTQR%gTyS=>еkג/#."a R93AaUIEyI4DKnLÉNAz`mJ^8ZiPexHJ`RUyYܓ!&}su׹%2RġD_RiѢE.nfvI'h" LO?]s5nI\͚5{q,mXP:"*Zҫ6%,.ԗ4}pҵEv*IʖOy1[^^J 'I@9Ccq[As/nfxb={ˏ1hi*_eKΕW^ir{2)"|dn Z":6pCm%KشZKr;(ڪBIRι'dDl_Z6׺ukqȐ!v 08ѱ)%ٳgT @ ]5!Q6~=+/%2)y&qP ޽Eɓ+<)ϒr 7%ծnJԭ\K*]~gH[;BiQ Tnd!J/]©%)ڑNNEI&_NOO\*Evꫯ*ω` V @Fe==/,VcqfO;p[<19GYV>*U\FB)w1B : K#A{-ْ@r[֭]F"Jnv8ptIAz>.ZjFmdmm6[Æ m+NJ7S81Ԓ>2|H=z81g 7\-鯿rbg뮻l 6ӧ'lsqSN9ڶmG V:jԨavn0;S=.1V:`*+pVvi  P6 e+Wg fb Kœ#ڄG=.= W_}Ք'H ׯo͚5s$ԩSǽ6w\ꫯ W^y&Nynz+' 駟Ⱥt4^;a9UIԴKUl$uS L7x=#VjUׯjӣ>j7ti[FI< c6hk8gsTJ-6m;:(ܠ.b-`;3mȐ!N{_<{hyH hTS=g3)CW9.RS]lT<>@!k`q̚5-Zڵkvmg{챇[(\*wzp>+={VW,7%Z.`Ao6rdK`&LpO"ƍZ*/]~6ydWoرPiڴv~N:ߺqRvUz{饗>z;E9Q &B@\_U~e"s.)Ee7CeP}V+r"tRxv!X>}"9@!eƌb j/;p׺O!U4SB`:#M6Ra~pJ%֘?qٴNɹ(Rln'0}76x`%B@ ='*UW]e ٜZ)%QJ!o*<%Y`R/ DK.TkU\ji')~kY"ȬSNnb1cƔ,۷#<Җ,Y%2'JK=X]!N h-R3lLa=@ ω ݮkJ-[vGvS^)*T#ۧJ LjQI`JENRIDL-l_ި:[FC@%=7've֗@ǟc  yNf]J k vyg$AVN ol5ktxURtR[hn3g-Eʣ)HRI g& E%y5E%Dq: HFcKD@;1kI\"jIkܸ9Y)&(|%sğ)SִiӜhO?9i…n+VV6Um馶V[Yݺu]dhڵk>_aWN L"IHtʥ|La86 F@|%rEƐ)=+Ä /aD96˶*j?-n96nh&LL#aЗ}%r.I l~}MemT@@>^d "-k$(iy\Nfzk^:)8INZ)sqqbo@` Pz80p~e0JA`Ewwf LM^Mi=:78 }C !0qbqQE&Q +s^%%=,]$6SdI㜒 @ N-NI;M*3_ 8((IK=C7D}n@>` ^&_p~ ȄUG^s6/E@  .n_sdH ؤ%s{EE{NϞIilaG⹡ +q~q~Uyy+煄˶nO@Hww+0%CWĒ&S^ʶq<&?\$꾌T%w+4ʁq67 Oe}rA^+™?YQQo8g/ca IDAT"‹>?8/8/@XR:$ @(4 8|A`>}C @ PBwwJLqҧo@ @` 0 P8%p|HS':3 F!ӟLqҧo@)"0Ώr7 <%wLqҧo@)"0Ώr7 <%wLqҧo@)` P^P8? 9-O @ ]ݾ8S @ <LyDi8?Rg !}q)N @ ERF"QPf |n_)N @ ERF"QPf |n_)N @ E,J GAB@]ݾ8S @  8TB`>}C @ s) !5#MG8 #|/N"0I!HZ^P8? ssZ 0I!HZ^P8? ssZ 0I!H%@)zA((w3X@S 8@`>}C @ PBwwJLqҧo@ @` 0 P8XT8fHG@|#{O87  pCK GAB@~}qN 87  pCK GAB@~}qN 87  (E`T/(n @yJwwLqҧo@ @J.n_S )N @ @S}CH7QdOȳze]fM6u\%0mᆶfg}f뮻nwe9soN`ܹ=svw>>+/ߪUs=ڶmڙ:uqvۿ% %yXD}/qS:8@Lns@ͭzUDS^?ݿ_r%ְaC' N$JL5j԰OkԨQ +Vخī>{ I%L)IBVv&L`Ǐω ϻrvj!0e+B l.ʊ4 ' hwɓ'H$E&}.HLFiCzʍ]v֢E kժ["7b?^W_{2 /ЪWn7xc̎>S%E+I>C7hy[ao.nmڴY y_wu[oLЭ[7[tq~b&E9I 6l͛7w7"T`J}qMLG@@f 0e68'G`2 ХK[ly¡ j %/EYh;숪\s[NM7do}h7U=0Q{=%_/pԦK,X`ݻwwg(X?좋.rki饗lɒ% .:u>o۶i =zp^v7|vU;,O!8;0D;&(O)V_4>>}cE?{#-Z_Q2ep %z8GZ @vQG]%}H@HL}68S֢E ۯ_?'|%7o܉7{ɟuDM6f̘ᄗ^x K믻a6tըQnV'd.a+>"QJbDC=٥]YK LZ/q` \E I 6t6]bXYL(I~sm!;#>WZ@ ?n9  @ L%xi9A% 2Ď>h|W"$"%UH`zW\n/XE!%6ʐ#ouQ|NQ'\>cJ Lʅ[:af͚6`{'ȕc]Wj/Q+kРv\"f]q.Il.{.bڵko߾Nz Co @$Pej>}z(ކnsD=Es!0es6 "@Rr6C*%^)"*@H@B`tMDQ!Rŋ E9i3,W`suOK$p nU 2m3oǏw$h& e7vBƫ &enVys0i~駒(-E$+4bRD'LR?i=Ԇ(KEw_D% j?;F:>SIG@S匨kF DG !0}EHQo 7ZU^BҖJ 7%N.|'&nsô̭W^6k,{衇P~#oر.&% p#)Y`RZ}vi& Ooc?:;)yɞ&MK$nii;J)&H`R^D2s-WD|?"M*A@e2B|B6c"PY&%A$$En-uK9s%Y`ϊD.믿_$(I9;\h?I4RVem}#GkZrDy\Kܴdnٲev%w۩SN."ڵkg^z*Ph$-͓,0):ITZ`ZV5jT|IݔI\$pM4ɉKN.ze Lf)I✎r=-<餓lʔ)L t@ @@*/l]sQꭶj^ed1ع (X_}/Ή87 J#딎*$g)2! = 'ӫLqҧo@ 2"C# @@9w0Lqҧo@ 2"C# @@9w0Lqҧo@ 2"C# JD)@ }q87 nJ@%0E ߯@`>}CF@@ 01 ߟ}/ '}"# 2ԁ:0Q K"ω@`>}CF@@@ ωA`>}CF@@@ ωA`>}CF@@TB%rL@GDsF 0I!p# uG LT*!)@ ~mݾ8gS7PD%@9@}qN87 nJ@%X" |Nݾ8gS7PD%@ Pp|Nݾ8' S7PD%@ Pp|Nݾ8' S7PD%@,c@??'n_3)N DFAdu?a P %L@Gk9#O@dD:PG#&*AL@GDs 0I!p# uG LT*!9 #s9#O@dD:PG#tfSM|+WV^T>)@XWg)5 ODsn!0I!p# uG L9]E ѣSGکx6onVTd֩YZ] @Ds!0I!y#Q}VN0>cS)SN:6U)qұ鏰lp eֻwjm"0!0.0it1?ӧկE3iV^jjCKDs 0I!y#3gm&Zk?UiŊVJG` K)lڞ- ̺tYs%4Q TFkU7#0e.mCHF'Xǎ￷ í~`7ooގ>h4ivGaK,38^ufml=?E0}vgٳ][vc5jd3gδǯ!0xvڈ#Zjv=XmʕֵkW=z_VTTZ{q$G0]yWnӧO-eSv{ϟowq+Ւ?R!0~q^4ܞh p-O=e&i١CY6װ }qz)N DF ƍv(jٲtM6n8{K& ,M41cؑGi>$*};hѢEִiS'"ItyYgeh¦Mf_|7d;餓`ۉJ:tp_gy.r{}Ǯ NyHLoW_}Ս-!($^xL.%U>C@r(2idd2B]R} ,4iܨQfMDo=BADLqҧo@ 2f̘H*˗/K:&8[`c9tl6ml}.3w\ꫭaÆo,ht-Xڵ0ᅲNӁ9ٳz뙖X YMsαuکZ8$0=֭[70am؊&E+%VjU׿ RRG*mS7;$qS½Jd@@rN 74T5>@9w9Lqҧo@ 2YJdIK?ى?p;Ste75oE({~X["vygNKTtkq?K:[ͦ_$mԩ.oT֭#IQLnw=P{衇\"sEUI`?rM?}v 7S-#JKZGZ ߯/O@dҹ|G.HL]J"H&Eiǵ7xöv[;C.sÆ sI#I):Ji\p[b)Ƥ%R?O9IKٔ\9T8w{駝ХwE, n6~YQQ4  @KDs 0I!p#5S s-U+Wn@rO@dD:PG#&*A`\~LEIj9Q1FdFDˌ~fG#0eƏ!!/G5U\Krsnl'O6@ ~mݾ8gS7PD%@\Kr~zq7:w11@ 3_}/3?r7?@ W D.W=a:t(V\#c@|NݾtqSi7\?@ f͛ެ~@MDs 0I!p# uG LT O7_xMCf@%|Nݾ8'S7PD%@,ˏ)ʕ1&FODK|G"0eΐ  /'Qښr}P`Z IDATxmź&$?~dv 鴃5r7\?Gikx?Xג \sÏa}a!vҡ1@F_@`Xj=zn:ud%r/ܮ:֭r@^ N9dž@H|NݾܐV3Lia @ p#c/ 0[E`…Vvm[{zvĉmڼyf͚  &sz87 n)J,W = pWX~lٲe%Vj]v}h* 3ߟ}/N"0I!p# uG LUB` = (M7Ԗ.]ZRZj6w\   }q)N DFAdu?aSl8 (&B !sf87 n)JL^ jrωO@dD:PG#*!0ņS (&*]Ƚ8Bω{O@dD:PG#*M뢢̠cTH@QL:tC{@ }qN87 (~Xr_a  D5?` kŽ"0I!@@ x8z]?z^gO|5@|&n_[#S @#0Ko~G@|Gn_C`>}C ptJ&11XBƏKA:Z>% 3: @ p3 D05@_+@ ~9|@)9.9 @9J V@`mFb Pa?aY6!&@ P` ]݅&f$@Lq @ o 4ojh[Q (G)B]@ n~ͮP|[IlW!@^e+wd5F \sҞ:@ЯL1L: *^rs88(j?zZE9( T 5)C}@@@`p엏_@($.98w @XSO |?@@`Jo 0Ǎ @yK)o][20 |?@@`Jo 0Ǎ @yK)o]k~k TDk6g @ 0x8y/`  Lq(@@0aЊv=0 ܏?@@`Jo 0Ǎ @@@u/`  Sz8  , \W{?z7X@4BfW(\r%S @pp엯 }k>b  P1Bf#0q@ 80l0;3k$BShS 5)IF @ Wp[ٳoԑЦ A(k6S. @@Hxԩ־}{;mҤI?nGq_vWۈ#.kmҥvmϛ7϶v[5k}vyٯj뮻qּy29ҮJ]ϪUݻ-_ܶb ^uM7䄧ի[۶mHsI'CڨQ駟:udڵGy E^~y'h8;Sqoos 2~m'V>SYիW a^}U'5\c_|m֥K'b{ LkD)=ӉWguEM6ͪVxƥ@Ep&BC ' @@:& ; ,p|v1،3\4Ğ/} /_n'Ou:AI{ֺuk'@)G"L*U\4-[l  >ܞ|I5cz!'VIRz'DTtAvgΜ颏4 LǏ/iW^qZ^Sw߹~7[k޺[Yf[:xTD`'a" ` 0D@@!H^4~|!pƚ!EKZJڴic:tp)e˖6xcڵ@ "E=iɚnI3Eh]bҷo__ߴ矷{ϔ[ȉ_ZާlxR&@D`RhJEwm'pB ?7rI 0L Q PHxo#hT$0)ivK/Sn=zFZB׹sgє:#>]v QZ&I… ];x {sS&C/)7Slh"NA&E-=3m)!JYj믿v Y%I#dwF\T&%dҟor#TDЯY0( @oTH"7EILKGM-c=\'좟[+-+enEbԮjoN o2;;$V)~+ǒr0IBWcǎ% wqGCu˜WvmN"l(q͝;-S>(%ZGжs^^L9lF@5<# M)mt9s`: LɻIP:Nn`T."`{Μ9%‹I9KmŊN*]$`)"JM|)I)HIiРAJӛo鄭O> aZg]_~n[om~=UV-;swysMi.㏔:2 Fkv(Y"4!@@~@`?V4 (%gqҧo@ڌ6 @ 0y8^%0)2KQq'} LjC S;c@vpE`̢U@ۻ o7( (Qc ĎI58BE4 1RXQ[,`5b F XaK ҾyKDžes:3{\;?sN J ~!@|0 !@( `"Twqj `ڰ9!( P-Z%;̲@#_׏|C50m0m @hu~  ss @|QQ}7!@={ے%K2 @#|o!@`=[+ @R Sj0mp܆9 PPr4ֶr)o3R=Kܿg-rx @%|!`|T (&_! @~8d(~5UCiC[6Tq @ {{?Ly!@`]={6nC(plJQjC-P߳LB @* eTj]|G `9 s(QhcWVyp_Yyz {f 'cW?TCu(uĒ%K A@dZ @2|5\? @@as@an"@%+`Sɶr`|X? @@as@an"@%+ `*֮0(5B[_%@|O^T_FN( `21 @k0 |m @U(Z4wիW=:fϞ;sqEUq5رcc&Mu'  @ ֭=EV[mv-^|8㗿e{ }ѳg8C7Y .Ll\i(g#@r!cE׮]_W4jԨ^{-N?O_9sflѿ8餓bѩS8c„ gŐ!C⦛n{/Zn~{KqYg1&M9smݖ}뮋qwd^ﴊo߾ nرcv??cРAQ~\8*ITb/ @P^zi >|u/^85kW^ye~oFVg̘1#ZhKcgqFFP;C'D˖-'B?qee[[tݻ+[I=sjO>9>M~_ؒX>}KvF@L%2 XQ iKAR ͛Ja=z]w5 yʦj֬Y.jr)m .V5`Z5`Juqg7xc<Jܷoe. `0 @oX8?'?ϲE_~gOy7|СC~s[ne1jԨ*L:'NV/-=;{^v˝@ }?b LŚ%@$Vy3vcL>`t9[4WnrH\q0y9ϞV;}СCssnK4.;('S9uX _@$`*,6 @`56m7|zkC.;)IXJS(GU9`:ΞV5[x,J=3W_eߥQ)`cqq?W]uUvK\ƍcذamv)(wS'P,X3V @@lEȑ#O?͞Խ{ qi[ fouKNQUW0zٛƌbJ+?/}uwig}yY4pk /k6X`Aq=*ӎJU@T5.) `06* @ҪSN9%{ (GS9vݘ W@$`*U9e^~A/9춺tk\F0m|Sg$@L]LJL/{+7TTbpr'{NZ ! `0!@(*Bٍ"ЯӬrr'!@F0 6r: P @0 7MH#8"{ۤInݺYW̙M6\mٛ9̾ @@LO_ @(iڴiѻwҥv @ L@jK>;⊘2eJl֫`zG",X;c :4k#hٲe*`zwjƌV[-Z ~T>rhݺu<5klu%K,)I` @@J=`㡇ǠAV N͛7_|1vm6lX} /Đ!C~/B0Ϗs=7;Cdװ @A@T]6F 0]wu>d7.[L)L=zt3&K6lPw^r!ѭ[s={3gF>}s͛7NtPxqQG]  @0w*'@5"0nӶn]#d'MNS[o'x"[ʹ,`0`@qVָ[ɓ .38#:w}nwy'vzh4iҤ)x:7X] Pv- @` 0-\0.@J*iԨQY0IsNv[%\}7MvLznLf-t  @Y@AȯSk2O?ӧgaѤIg0UN&Lǣ>O=T̝;7 -pLp@8~ms_|qoX}i @@QLE휺  @X5`J'h׮] )m#GV%}7N;eA.=Ί{.x6m?#FdoKM=K/n ܷo= P:饑 @(p-S0( `21 @dMFبd @i}K  @(ySɷ (S40  @Q@Ǯ/ `Z3G @8{ SlF@t  PJ~+n U0UE> @ZcE @`&3XMiӈ?\ѣ#:tD @`&XMK"n9b뭗"'`2I @% `2? @mr,l @L @H/bvnl+6S @ P5eoL#Sd#@ C# @ PϖڷҐF0F@ @ nG1gR۷HJC@T}4  @HA/<\яHKr2 @`LM @[[lX@zmuĸq-Zp#@R0R7Ԃ)`J["Z HAR-nּyĈ¥5R Hv!@ gt1~";/xz+ )\r[A0f_Ԩ@ZŔny;wˤ_/ֹMk''@ ` 0e @`L0 Pi5SZV&}ٺW)z̚,:jsZֵ9 ?S+" `*N'!:!!*P<9"*}L0f^g!o=NK90!!@EHRzxMN$`ZU@Uz_V>(w- @0m,I!@ @Lo @ @% `XC @(SS6ް  @ @K@$ @ P2m|^ݹs'ܹsM՜'@ @- `xEEE/o߾k=LKU{]mN:ň#- @ @5! `Ik\iΜ9 粭yr:tljS @ @0դs`*}+ @ @@LkIyV(V*Wۍ @JL@Tb 5 @ @@m j[ @ @%& `* @ @L-z @ @0XC  @ Pw= @ @@ JC @mSm @ @L%P!@ @Զ7z .zE 34j(Zj}>8رcm۶Q~SN|;߉O<1nhذ^a @ @u  2CLӦM&M<@\}%?,`ڵk٨>۷;F= 2Re @ @E0cLʮȂLi^zŬYbС2  @ @  ұLSL/˘0aJ+O'pB?N:餂T @ @@Ls6lgώ^z)=hܸq,Y$͛:t/ @ PPs-`z뭷e˖W_SO= ǣ>/b T @ @SAf޽{kӟ 9sĶn3gΌ߾ U& @ P$SAj+ȞԼy7zkL:5ԩS* @( Z05h ~q!Ā>&׶msiKp1hРhѢEAFL @ @huL @ @ r @ @& `*ZK @ș)g Q @ @huL @ @ r @ @& `*ZK @ș)g Q @ @huL @ @ r @ @& `*ZK @ș)g Q @ @huL @ @ r @ @& `*ZʨށF.]qe4jC%@ @0geSqÆ cѢEѽ{իl:o @ P4S:VF|ѣGXxqԩS'u&h** @G@T^em]̞=;{Me9  @. `UTTD~3?ߡcǎ1z蕺T^h׮]9 @ @ W*ǬYkZT.F @, `2k[ =gϞك=] @T]@Tu+{ֲ2 @ @ 0m j^`ѥKhܸq_ @ @ 0m0  @ @< @ @Z|&@ @0 @ @0U @ @&s @ @ZjZ/2^} .\0ի:u#FTyÆ  oK,Aŝw|AlvѮ]knvG?nݺٿwqǸ袋K/A  @ PXS[of!OÆ /;,8L[ou1eʔb-*?{G}K.$x2dH|zNKѠA,`ڵkyy&O{ls=q '\Iy @ @0mJ*\.}' &LD*N9l5R3m)Hbȑ_nj3b]v_~95kVyEžW]tY-`J;`*hJl @ @ r<7R{+DJ+}lQZBQFFrYgE?yv#<7xc'[~Ӆ^5믿>JJ#@ @6iSw`_J[JRx,`Jfя~oVv^0 :4Fcǎ]JwqG)}n[3RuqeQGc% @ @Z@;z-oiRRԦMx+s=7{Rz`1cMu ]}Ցַo閺UݤIH @ @0t~̙3'=nQ~ʀ)?zl-r)`z駳ضf8쳣m۶SzSӦMvg.p_*;fՇ|EY @ @@L9lJ*)={eeR0tTL/ΞϔV-JmYeV?=3qG-bqWfKoW)@Y @ @vCviTaz5\'N ݻwmݖ0UN1bĈ5kVv\ǎnVB Ne @ @ ФYbnoo}cֹ @ @20Y_x8㳷ĥ @ @0UWǧǥ[N< r6 @ @@uLs, @ @@L @ @j ` @ @9@ @ P-SL @ `2 @ @% `  @ @L @ @@Ls0 @ @ @ @Z|&@ @0 @ @0U @ @&s @ @Zj9 @ @@d @ @TK@T-> @ @ @ @j ` @ @9@ @ P-SL @ `2 @ @% `  @ @L@n]tƍF @ @&  6EEݣW^vJa @ Pr97=zŋG:u[nKi @ P}!FvٳZׯ/h*DI @& `WTTD~jNիڵ#G!@ @r `Av>f͚`B @) `g_TL={L @ȯ))ʼE @( *2]tƍ @F@V-\0{u *+w|vmY2v8cb6[m=cqUWӳtA;slltڲh"&MSe @ @y0#eӴiӢI&Gg̘gyf~qM7eS׮]_(>'̂G^g'Vo֬Y|9P @ gSN)z]w?K0=Sq;S9"iӦ^Y @ @hvlm+~_đGW_}Lb}VZEN#X`$P @()Z0.թS'3/^~?{6SZԶmۨ_J83{>6sv1comڴ뮻.smNe @ @rڨUW0}'^{?oEnա[n!CԩSQF.Ne @ @rڨ5"קOx嗳HU қ֭-[%Kb-W^y%sOSN, @ P$SNi޼y뮻C=?OuSMkQFEc1lذz re @ @" rڭEn1|l8cb6[mǏ?<%;?8{VA ۯt( @D@TF) @ WS^;. @ @@ALi2  @ @y03"@ @D@TF) @ WS^;. @ @@ALi2  @ @y03"@ @D@TF) @ WS^;. @ @@ALi2  @ @y03"@ @D@TF) @ WS^;. @ @@ALi2  @ @y03"@ @D@TF) @ WS^;. @ @@ALi2  @ @y03"@ @D@TF) @ WS^;. @ @@ALi2  @ @y03"@ @D@TF) @ WS^;. @ @@ALi2  @ @y`Ҟ/IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/images/cinder.png0000664000175000017500000001323000000000000020352 0ustar00zuulzuul00000000000000PNG  IHDRsa]v~iCCPPhotoshop ICC profile(u+Ca?gi IjQeҨ5SnMls*7~]p\+Eܹ&nѻ&s|}j/pHN$d3N\t})2& IAܿնZ H-bEj t)2HL$@z/3Ulc/Nb%k^=WRj"F23L1Ic 20C_R@FA2d)CJo62\Amk 7mжpܬk}e]K<)3UCړy jC| "Wx~wgx cHRMz%u0`:o_FIDATxyewU?~Ndt2DH{P),B(Q*BTIJ,JK-qAT-a(I  ;dd{oww=3{߽==wιrq)RJ0K),,RJ0K),,RJ0K),,RJ0K0K),RJ0/wor{"z-}K}J$|^cv .`3yCC  +hiwǏN^x` xp4ZZH\ M L -߽5<@/DC^!*F EHf3WZ':%{"a52%W40]~u;l3R\o}P5Blf)m4r8tL4c-j#۬6# 8p6/Vd93_ ps^j3sH!ƊAT#ο+.<8q%6qுP=s %\֚喭Un^qrδ(TPLsх{&Cx%:uS`+V-RM1!Juu*xwأf 1_(O W\9g([ ln*KwsLi9]_OAnS{DIZXs k*Xp &po6Te읏y [k] ~ Wg@ižj ܠp/P?=TRX# D¦>KdPzI<\9tk:1±>\!6tݎIt^fAj3̉֠I/OX3uPMWᩛ*ܸ)f[0bR,d;gSkrx>9%J<ʝ)ߙ9'պZBc+)MP8,s\Bs3ycA*`3"lܴ1ǧ4o:LX=/J;w$٭`2x=c5!ͬWgpEPTT6{ |[Uy7$.0St٤0N!JkG,ox ׏Vf6NR<4Y.3z"|I=HrALY-SaYr/:w-m@+C1~vg,xs}፯OOң'>R1V KhBl+7pW{el0%WP0h#|@̦&"^]/Sj!|L΁ U ʁ#pd·f2+3aN~ٮ?uyJ!v[s5`+Uख़W~CLcZ79V?NF<ܸ1沚ū $pObQ)j |`9C܇\3 X.90 TjphLlU#wM|x_)5T CpH*L%la>4'2*'o 0i 2-sI&:UE“# B] 9F=v],= ^no7cKiǗ'2Īg|y"+zXN/SLa\p op3> syUO;w-NM. Y_;7,+^M;̐ !?{*Fnk͵#k&60! x <βʷڨŊPTl66m81.e}}*#!jCsM+F忏y͞d%hw4иT|fU{1KŘ/k-q;,y@T(.sRuWtU.?T1G3 !a6!O )czW*>FF722R%bĚH%79X/+KyWJ>416P%wK:|:i^Ρf0C sjl޶`)5p`Us9bl:S:/[oeCڮ˽&|yU޳iV_yqw.+V(N'Vj s3M1FyJDh8I53ھǟ6Igμ$C ع-w<_qy3qi ucSDh;y0f髼j7h. TƼlrƮ{_Lbء4 1kU.M&!dpgpujkVoyU)~l"=KAocl|<ϗT1c?ǎ$u%"4+湻V~}J ⻳9MO|`C"rg l'.H3+Kmlrm\9Gʊ B"E(/i'; r1V #pODZ3S% oJr o n;>PlX|?[Tۯ "\N :Tl*as>'kNgvj4I1lZ~fGL+r6{uUg+ s;4r1/38<#rҬɳ<Ȳ 1ti|c3.`޺Ê<Lt۟ ȴW8m%A nH5MІQ({;.SzLn(C XC&LO}nc? mWPt~qMV(D<"A bȋ*i8? YtIw%,g"`WG ;-U+xEDxfFWh(& ZU7tJlSaTiI*bK@;<<h|:_XF*R] } +mzi9Se4 O?1|:GHca,s443OEʱ6Nw4ǔi#yt+E~PiafZaW-pl"X˅jޭs1㣮_E( G-M=~eUk>+LnX^,VMz.GJfQ*>ZE %)s31W|-+U 184BQC~T)Dds؅R  Wx΅Ek-S){Pc- o܄ci5,LuLrdgP$/jUzVWoc.wYW+.N`>!GTʹ1IfB{Pw'Dzk9j>i{%ga4a ,k9t@fM4kd.t~RʠA)OPPq9xIENDB`././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.475122 cinder-27.0.0/doc/source/images/rpc/0000775000175000017500000000000000000000000017165 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/images/rpc/arch.png0000664000175000017500000006410200000000000020613 0ustar00zuulzuul00000000000000PNG  IHDRp HsRGB@} pHYs+tEXtSoftwareMicrosoft Office5qgIDATx \gy9g3q[j[ Xhq()X)ϴ~=:3mVqhV9xTy2^)Ea| #p)SK43:v Y9KH0wo{[?sU}~~o@vT \*!9 Ge#qUm, q }}ΛgtwY׽o@XɖF.ʶ[s+n^-;uWnhxbV>Mk6u9' N@ל7lt̗-'e#p鶹J!p&py\[n nWv} Z_+e+ufzC p \kdӆDo2 3K`N|7ls8FuI{+7nSx^9[k~@#p5뮹~n>KDޥ%iUӿy˹m̻eV p~pL[$Nn p p@ΥYuT>bs屺)7xJ^K[Usuݪ{_* n w9nI LMU@U_"U&V'dZ`KBDdE 7ɀҎLiI'p@'peI+bg[mGs@% \'py ѼymZR*N˜@8f89 W+rfDlۦZuͤwv\xWp2U蜛ULĭ2 nl\~p0ǁ&˾yt+pGۛ}g?W̞<8@8@=j-Q:ArU?Շ[g p T}"CT^(z~  p@յ@1_[ p{@-zV1UL"ކ#pR%ai8 pRU'a zi.S8@*:^}z p A7˻i p7WΛ6c#ru 1ۙ p{muM6, p}K;-o~7Ьey(]y啟߼yM6u#MlܸBaEٲe˅e{݈e?VN @zg۫_պ|辞Y۽5eNc{ ԓߖ^#Cx6}fVk>}K'}ǚA-2(/N$\2"m"[ܿ7i]DD$:vׯ4?~y8Ν;|'Zrk׮g4z'4 "6rOiYN4+p"\d_\+%Ѭ_8vX+5 /Id[oȨDgff>&QEN&f;ԗZr8yԏOY^ "q"t C{qD٤?Y$n?Vx3g&Bڽ$xѦD/YEf9`ed5|aJ4<ۥitff{ed+++S!mq,,ѹ 6|5$i8?9+^ \L 5"p i$VH&Qϛm 5u"pC8)#3%ږihfƍ;33+t0i'DD,Ҭj3 pC8I*#He$|#- LӼn!mذᖫ&e鸼$m۶F"w.pMdJR4m8҈ pm.˽okv…|gɈLKRK'y_G;xky,p~G$Ft2G~kuLC:8 oSO=o^'"^{G-4'9 C qeQ%pRtΝߑ/p"k?pGuz;8oh2 \;KIKS9an@`lN*(IxE^D>O~Z%J"`"Pmּ뮻ɖlKǮ#O8(p|'}{ُ/p;ɶ*~տΗemn$d*iFC8)%B'5$tiN|}INEeEt/>*hNoX񿇼~NU>w_]W?틧ø(\q!p C^L8۔ (Ӵ Q#=&LY_"W*p?X+ UwwmfڿIh]Y}I9WnýkEnx厛>!T0q*]`dw{m& )P,$PYKfm3 *E| nq/I["0'&IM@8nLNRsܟ>ſ ?Kj^s5gu4 NrW|;Y6 ZT&oe]F>ڪv?ML.#?wDDeLC8Pꫯڇ>u}x%ͪ"pqA^w[n&&pZo>ڣVn|9:"HVd2Mee85]G  mтvK]F~|}wҮ~+eV#۶mDZ^2[ٴiol (p*T6zr F+оty.9m7q7odPXo\o/i.ݵk׳ҿ0{*pފȑ/V>2ҸmHPB?,GD佢Q9v@&Ñe˖;\YY^裏6_>i&붏  pU"cڟL3-IGT"/mIA86;;333ۿ.ٳڦS+|Y8ی*BMPjI?7`#A'E?!Y@Ñ㒈]c=6u9IA6I0= gY1d-fNѮpFʺ2@͛(͆RjRs{= ]tыGI`"Niis BF V"pMj\m:YF[v/!p=TvHmOU*J:{MqܹL[#G{KXS}D̀)Q.jd<4iδD4凊.7wkojeIKyt^~/ wC:{lNSqGDHF "K0^ܫ|i %.97rH~ 8bO+5,8@Oщ%AIK/ȔE#y"ocƍ?KsE{pߋDDbǢ pT \*;I*8ynZ}^t[lމV%W>o[},[G88Fi81Ras2VM Y[u(鲡&T'sx{ߗiYÅW`Qp \vC8@{INiڌUEyiщޢ׬}hBE8@`.ois(N'%h?~NnW$:"0U'$r$B*aÎ Ҳˋ2)(F%mwF88@C@*@88fd~^@18M+h=AU/IL#7-' F88^ $ -KWy!-B8@"oEɼ4d CN^͗VOV lKk2)VkW~J}8,,F^!pPNu ҈H/,cבZ|^dL[ѓ:WvlQ{ݞJLSe}9D8Pi\ؠͬPDOh//u:WHTL5Gnr p-^ɰSe5jהvg?/+g*zU|c+p*AI͇$ p p8mT4Gr"EVO~6XN4=lDC8  p pS#p[_~ |7{CC8mx)y%N<98@88@8@8@C8@8Bo} rȮ/8@8@ƙNn]E`[:Z0wח8#(E4.(`Q]Ku[K>!\X͸9'ry..nl!7n+=3\}N͵uwmo+v'!p#*޴~>(镈^Q7h{wuf*!v?>ќT~;"q~,>r躪F!p.VMs9#m }IXAY uULd~FT("p!pOv_Y$nNVąXK/яrG:9بX}YIeY[nL)y*&bWw&C8@5ڇc/ymjΛVH#p'oU|ZUrQ5g}e'e8õMWqb`6ֽ抁\4mO/Bt p7>ק^hom>_|8w$>TupYv?7ѷ{ K&nIB94}@%#y(UmjU~o6@%.%`=0m]Wn14mޔܳL{?g!p I{aLCk˵'8Oa+plN8K2_85B^@D$fT^!p!p@q-H!p7zH Vd62CR6(%&&ͨڔ*5!p_5nڌM2}%/\&ӉQ]G%ǹ #8C\jMYVݘ*unN.H3"}ߤG8CF=D`YĹ*SN ,WbޫĐY_$ \`4\s/P8@8.k0ebaJej U3F :u]}As w'7绻kG߿3_U/I+z!p-vFiIX%_gmfJeB hVtpe?3rkK7\>/V`5w GW%&Q7i: !pHwвYm/H['` ߤ>pKȽُf^Pm:#pZ'ͧ0ivg=0=5mЭMAETڤߛD$H/8@8?\޳\ 0NW4P .p \ tX'pF?\M/:^ZJKGф!p7뀤0^n 93d"knE~]n[Ծmf=1^moe+__ovN8@xy>CC\*.iK0_u@E|gZN7%))8i T7K?88nn2'TL9MsUf7ɼ38[ӑ&TCv 6%z@FμnM/j8#y p\ّؗQ!p]cΘݫyNw|g4ij?MK=d9"9N1 ppz8M#"zM!pgvlnv pЗKӈhTȀ!pK$n;%pZUp*s!p\q-Aj'HN襤ŞD8TN"nkŞg 8HEN8s78kځ=KG9NJC8SH  Bഘ ͨA !pd }K2 p!pC^@`(gn!p\Qqn8&#Ry!pK{ p0pc*!p\_4I{ B C8NqN8; C8|&i/ p0XKMQ2 8@8 K{s8@8I{Cːg( p\F!peH(-pg+#=} :#i/ pq'ه'#CǾt_s۫_N}7qGs.wi I{C8h{-Y&&BҒ8_"p.iiJfϱ&??Z}0M"u<[o ܑ2'p$m/,w&m#ozs)?ҝw4Y8;-&Gܴ)5Ko=i굛e~/} 8䤥TYri~"y{ byڟ9w'#{N8NxC8DZG&VtsS|'DN?=KGA8!q8‰O Y8^$E8!q8 騒s(!pC#pS!prHڋ!pC#S!p6sBw<8=7'ed@Zfڐ4 Z 3fg7~~מ"25.ud8!q8m*6o/$GG8!űMexMC^/BhNI-g>B\wHdE4q> 8|AhN4x=\+Y&!pAn*eM)ZEc/&GN8c!pm\nZ"p pMn`By ceEhB 5q|p؄:M%85Mj}m$pc/ѵ2=)g^U=T\)amho{8ܚ/`~6v8;cn"cZbIͮyzqY֗8%)C$uȴ<385UEl3vG'Ix;=w FT-є5^3sQysЭSX0f4%?Jcs}]G?xg=pxi}w.E!p5iuhSmNS;7kg?7>֍S+h.$#p]nx˭."nP~tQ \hQ^q+ѫLkȱ1;:Y+\c]tAC8n v/nJ?ZOG|نVY}mC b6`f`ќJf;%nzNj%Jq;n|9^Cw'|I~w>̍{K$`c-p˧N)Rg+ _DRJF&T=4ݧD⒍m٨8]?TTM]7 ,8[0 70ȼ<.pI&{ jXϻ>:&C*IQ~pi' =v3MxY~EX_ܠFZyMV\Ɋ ڞ~TtZ{j.i7d>[i+/yilBO/6 7QgC=HRG$1lG.t3& K8˻6Q9<5MoH4 kO'?hD'.  _)/˽{6&hGM 9!\j 7Z4{sSFɛ]= \^ q֎1Vgf#֎#)'&vΗa$$=$ä"8}3FB{l4=_CWX~ŏN?\wMUݼm:Pň2.cCCAͺi$mGNE"JO}j2/&%M[KZ5Z 'fIzh]j%)6{.kIe_n˖󣽡h} @F"pClBuIMX>OBQ4tqY"նoM9% 5}jGAH \:ǹ8:Bk7W;6@/^ky5t}k.A4Ku¯Gg`;%pk"PRӈu칦sPkz[)ϵӈlٲiW2^JV K7ȒY!p}( T;SqM~"P h}ŴSd/5o{sE71d_I()AJ!}#ceͩr~K42Y;6mܱ{MȡLIe\ۚ^Y1 M |nu lbP\oB6RPs?tNjtzct?Cǰ} i)Nsv 3gW$p"#p f "p_#6!*~Iuա=iKLisD- F[?+NEfԁK78/I2)O85߱=)GD$č"Ljw؅dI>rX:(AaN4!$R p4C 鹥N8ӇͥX^lr(>oR)PscoSih6srIlS$5}gG&vJ8V=?A/pXz/t"ycr!p pCS4 дM 1G*IC NK)-=*Eo~6 &ˮ:{ pU3i^$%ErmʵzٶowF`h7L[1{@H oC:9^{j@7h:$NqcMu.n8 bP#!p9j"uE".x!p p,KsRk IjlWN /͸}u"pMY?IBe۽J*<8='p˒,4'[^#wv]nٚ~6N$p~~^j(#pC#pc)p:9%2Vp9SJZ4T*Uv%+O~Ύ:Wi ?G+lY>zqc!p)˵'v]ۋ%7qv'+Ez!t_!S1Gk%#8!q8PG)pK!p{=^{ٗ8'#G8X rT5P;=^.'\rT-&tϒi3h鶚K\ŕq:^8%7M1gRO :+TCٗ3iZ}8!q8QS*[n"NQ;}٬iH:8+uݗ?AL/<8=7K}Hj8/89.]M`:˝+nn :x9(J4"pKrjϱf&dt_qPrILkx厛>>G? :? C8sx87 .'ܹI.p}}GNm_<OC8w&ۑ]F/t"V.p:y9םǞB8[NCk{%/ pMMpҜ!p/ps+.!( pH{ght5nj7*;}͋.!i2%-U4=ѽ,i e{ρIõ~C 86߄1g 7ZK?C8J8;\; -< p7'ͫړT~er!p!p!pA'H!p!p8y3C8@;OʨT8CC#p˒C8@+pC8@\\]=!5R8@8C8.+TqC8C2"p.'ܹI(p!pMh+p85_C8! RmGtΞᜁn8C8=#"y!p .;wPR9oC8oeDNJNI+ppB󩧞 !p%Eͻo^}#p!pIݵؗs8n$6^dC8k{p!p7=\;"p!p\FN[99C*pZ5=!p@q8}}ӟnEgy4"!p2wv z<97Tѧ|>8qC8D^*pBN8@ DBR!puY[y78CpKy|8nh8|8C>Sι7pm/7Ji!pu}/G<7p MIE97pӺ6&B8n=G|I88Cz:q> \$&i*!p=.'9  E%?%'Є!p|4  p.r3S!psiIy!p p8)%8C"p2Uj5C8.5;9 7ț: "tRJ C8~,b? p@ΎFѧ"sA8󼾻wr -@ ܉'V+3ڨ!p6 pܘ DDz-99kG/@TN1< b@8K9'7 3w00o* RZ>!p@nF#\y啟߼yM6>7} Š[7oٲ傿e]l_9cZNFJ7k]}G8Cz>ˈT1ȬEK 1331.K/G*[z{m>-8}@Ξ?}A'?'ᵯ}"yߎNFl4/8}1 C8.\ 2!p"<lr/ȚҭGoٳǏ_kwDJ'El C8K 8`,E܈}O<^+++wnݺLC!p p-p.'9iN圃\.{[ltN:gle~ ۔biH⾙ Cos˜smذo{yE[iwĉ־5tƿ뮻n:yIF$C8@_$ᜃTNFJn߾z]gO&#'m-MgH[GyH@&_9ZrAj'?|TNNOSaבmؔv<ɲ2]~R%?]]_hX7F@88)%8 ^۶lrxR􏋋hYђyϙ|yt;S>%MLҧMiTqWfY8i%S%M"1Q#v?>coMm''$p{h(N% FgDb(jh+Kj%}6mWc+RC , \*x3{jie 9[ ,X'LW[_"^ *0i Q]UHJ6v"p=x0Μ9TMkN埡Y{GDÊXGT#i Uqϲ^rބۄڍ٨,/͠@麡"~tivW۾vJc*M4\or=5] ~2M8nNkA%L{5o:UmTt_ĹfčZA,C8FFo'H mSdsѰ_>|ͫ LTn"p pFj;$3gaBb?\J΍lU}fN#p  `@90l[pѪbH.h{BWksfCN _!_7=?҃!pr= ]CZyLҊʵrwm$o8䄻M0VpQ p!p^""z~0d!L~.jC&V91|Y?si=lʀWJ`F8`:سT=\} ps*޴F(Ȁ_BC&õ=!p8|V>0ꕽ*xӋ*1)s#{cwߪI[rIx~ {[hBOyh pc#p/_eN8n:PKF*Fƴ"&-!V4/M;Vn(8}yߵn=,7~\B%D:#D$!pEom#I m.gWer~=++yR;MpKѵsC%m!Q*y͐K WFLkAD81vSInzMkZ-f]ҡSw1Aw(֯k-W?p,K^85r$2eU쳑$On[ŀ)cdۊr@?!p*)&S`}7iF iM )@qC8 #p]CCFD8 #-vDKЈPzaK`3"r.an +g3]Mo. W\knre?ݚ'?݇08D~E}`:@Q;,׎uMpq ܺT^z7 foa/Fz\ Y i{>x& :¢W̾`*鶶iTCuL%[!nX~KKtT~tэLjɖ`k{w/מ@.gZ px=$'u*qO⋝\ޥյ*L\;" p\ZWtѰ7=Xʦ1T;9 Y p\KrM!p!,j{׊dõ=!p@V%7 pA2Hu@[팮.oI _҆ pxmCzKRvWf9+.j48 #@^#`TimTVk2_3mƑɢM\|?8I@*2*p~Ky侥?Mi_I؛ E\ݒWdWLbޒMŅ&u˗#u?*脳A_k_D3;ŗ]/BCG{Cs۫_7qGDN[/qU'Fk}"勞^qrXtSWt4B\g;|>O4b !}l}UZR'dµ,C\ƒF#_dɫaZJhYhKhUJlǟ^ @,r ܧ!pܿ/Vd̓eykeBӺn'l3MJjy'K5} g=fڼ[?ׅ8@ 8 .Y(֏?[)e5z=ѓn :VBr& :_ewM]'\ ڢ67秊=yf'oz͛gE p0Q{j" șHO͎"jg<=0]F$LI5g?vN'-#h4yr6gϘռFE7dRZylgA G͈)tU/O4O}="4"d6Y i+.2$Z:Me+6jnNӁz]ƒ\ p@H8'oʏw'=*IF▵"'k.VDԴ֧iM.{Y2U[G p}kxN%ITZn?}s}FΏMGFJ1˭ۏC#|kh<jB{8~MqҴګte pN/מ |(uNH>[)ҦJګUrfQ$gbuVNu`0HI8DoXJ *V*Gqԋv/2HU3q۝h9̴I b@8N%=1WRDlXR34CJ3O+qq42'ےmE |Yn(.~IJ,֢Il|ش !pɸ (sD(B7d)o=]+Y3yBhrUL~K[{ V ~t݊Nfy}ُ-gߨ/ p\ % 1 RrL%Z*:TwjEtb pyj-_ "QrHڤ91t*p1 杘j4ڊ%]\y>%BǤ~?`k/>k1.Egͩ +yNi m Hu"p%;xҴlL4[ݿ0B}5eUL_ӄ`G2&CElM8y:?:'1{~f+ !pU#ju#ꁁeoz$팋pi$oeJ&4[>4ɵ-?biHFMAI71|-xK5@:,l3Utb4I!& p5aOqvZd7qkJJV?h7 >.޼Ri[ެIDͭR;"LCH`puN;)iSMC/[\̀B׶|ݦFBQ=]&TP/0x봫ڍ:uH8_hqDGʶ{T.3f _i^nG>ډ b%UY22M뚪 ٮz}::O+Ifa{j?_ه~^ !p0T\(b'pzz>|R3 0Xay9SJZ%R:aD"pON#zf[XvL6]ͯw7.^\ oFv҄$I%WnIQ? $z%BBbASw&Y ^n'q~t7oƔG}tpM YHiz3{w#pzMvK=~H^ezopnuΛy @I[:r[ v8?YlWgж6ӆEF`0I8'_NuМqOC畴ļyjKbed6nW^[6Yy.|v"76: Ok'\IT V8$È!p0D1M1 BR^e.N~bM5P՝-[>k (SkQ8JfE%]].onFnjqi8mF$e!Tl׍:3#78mfИ^"%/WrQ ̳ͯ"pWiCҬ ^TfXJ ;D"K''"[}N viWN89I7o_ p7eyC`JGp uy]0rVItZ=$Q&lnޭS67om5 FmE3h\#p7ßo?zAo$y:\/oRm7'_e7Nk-SԨٶOW4Ӌ^@z"^*p!pϧrd@Ϧe +.!p%`G|٬2b.=8C`*k%HR?=Ȱ*# p&&[\i|hٸ}pQ7< KM3kn%ifeVs+*2O-*P!pW}*3׈@r7DElNmZC5MUdyM)mO_8`M FTD$7+y5:~mC8ۦSur\HMp~'lZFLK,BԡO-:ge04MZp5F+^\[k/_pڥ bѽ1ZRKrA \R=A]._wQI>! pᇌVTh fS[0^-Y#\gHe\-SCb(Ҥ5-4 p\zurȪi(pu͆)VG|Yo{fl 'pYH6Nde4MQ$ FIW4Q8/Z{[#gG+FCj_9mJ@Cĩt "j2 >P!Sl:*U0.JRu=:;@%ai12`͇:1+(`Gq$^>j~j6Tt 5"78>pc+0xٲ#OYx}+r!Ls8@8ٰeȹ A!p0 pTY~ e+njޤh/+Y4B)O}l#f+es-俜K9 ^b.꼙em-ՆB'gJ}/󾧢"NEWaQmO@8 [:n~/mTU'V4-}?w1f%JDI܂}rZU.H p(J[ɫP7Fl5-YjxߣNO %\MHa@8)._54ڧ%/JsM߹$R\U/ -m.e2=+RX=n0h ̜rk.ЯnMG4& N5Iӯ|'ܢ,8`XXz6Ħ_˙A%AOt|zW5uui~\L/@8Q \tL@lsZ[i޴Jq9,|@8 \3v) px0 8@@^BS$q$p;D`d<8IQSi3IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/images/rpc/arch.svg0000664000175000017500000004405000000000000020626 0ustar00zuulzuul00000000000000 Page-1 Box.8 Compute Compute Box.2 Volume Storage VolumeStorage Box Auth Manager Auth Manager Box.4 Cloud Controller CloudController Box.3 API Server API Server Box.6 Object Store ObjectStore Box.7 Node Controller NodeController Dynamic connector Dynamic connector.11 Dynamic connector.12 http http Circle Cinder-Manage Cinder-Manage Circle.15 Euca2ools Euca2ools Dynamic connector.16 Dynamic connector.17 Sheet.15 Project User Role Network VPN ProjectUserRoleNetworkVPN Sheet.16 VM instance Security group Volume Snapshot VM image IP address... VM instanceSecurity groupVolumeSnapshotVM imageIP addressSSH keyAvailability zone Box.20 Network Controller Network Controller Box.5 Storage Controller Storage Controller Dot & arrow Dot & arrow.14 Dynamic connector.13 Sheet.22 AMQP AMQP Sheet.23 AMQP AMQP Sheet.24 AMQP AMQP Sheet.25 REST REST Sheet.26 local method local method Sheet.27 local method local method Sheet.28 local method local method ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/images/rpc/flow1.png0000664000175000017500000012002600000000000020724 0ustar00zuulzuul00000000000000PNG  IHDRYiásRGBgAMA a cHRMz&u0`:pQ< pHYs&?IDATx^ ?pEPWJHb"nqGçDTB *"ʧĨ QED%cHL&1Im=3=<̽oWw_SU P*@T P*@T P*@T P*@T P*@T P*@T g}N۷oFm3Q̶xn+[)G!{84T P* mm}馛}|?1r;Z5e @v7Q^=ˇy7h555QCe\< *@PdžnD1WKW>  <6o~;wf 7VzMxT P*@@eP-|166P zٹ|Դv5T P*@@;v\cF Zllllh+WG;v9ϝ ύ P*@Ȏ=6d?~`6666.ḩb鮭1;kJT MMѥ-ו!llllͷM4bf |T dCF6Gh6666.]RU&]Ґ-gWebӿoZ8+C[m:.6666 r;O|VUOl ^,2fMNj0NȓC1X.&7S| _ ŹeI&fT P*Z'4<h}Fw33~*+@e T P*V7^8e N"iRmQ`.yT T@۶>XtE$FKoY=@lllnnMuhYLړrdTfW?}4m0 L9[mFKjXT P*@,W @dmQ=aKT P hhM'mmQ=]u.%?T $@> CғњGT `llllDlϮ=OQ1eOZK.grDe*@@h磾O`Nx# jyE!F"ˠT  `u ( kdlvEl F@> `"md ,V8z`x4*@@F =2DyxjX^ u{(A@ 6666i :u9N9 Ϩ^DQ6;eP*P4r)DѱKߵ[=աCL뮻s#)_9/QF5L= N/`25휅8 %YW SF$}Pkk/]w\>ǏWs{E]䔿K.jΜ9Ny0uSܦ͜g5֩'` N 3Q*@Z+)Pf>B@K&+ ` X> U(@e66H ` ^lp)JoXa&&I,6,` l޻5uVmm 6@%ֹK |`vW>T dBD פ==<lkXl&z*I%`x$*@r(@"mKQũ`Ū\zj~΢k>;>v!euh%&G lb}|/-_#a(@"mKc'Z9`[n%!X Ib/e=8B)r8hօa>C9U&_Y"GM{QxoXlnXl;ĄG+)i3g{ `3FG\_LwNG9KYiC u A CJ,oll,Pk`y[=a/+y&b?nf7JVs`zX,(˶g/׏KB`c!OSS oL}77*E9/P AN{@lj/ +R᥽aONNXl̨O)2Eaq)@KYK7y x Xv(edzYhzQj^as"'Ӌm⽕j6{" `pX*@GD׎e$K8'B&$N,!L4<#9ݳiEVU"*ٸSz# ɉ‰qd\ d ` 97pl,@ R:N{B'b+ rK[!¶E(|}ӌ~YS T " m/ 66H `F&²(1+[ <6USF` Kا=\XP: .ix^,>TV!mvnցrecK Ј1kk7m#*eZQ&ʐ|?VQlJ#̱ $0Ɉ NW>y|',[x/ {Y7yX7 4'X*W9A@,ʇAl/^&pF{PyL$ vɉ`vǝv"U_W>װ?`5g&&2[[ "l(pP~0><sHX,; vQCd=@Գ>I! !NJLµ|靧1B\h.8[Bm20dX.~/VC&sL@_^?")OnMuܹsziV%c5l Xly`^X+}b?٬+Kk/_'Qs6b{YbЉ2n:_f5Gƽ^BlN |f9~^W_Z.L 0H%zt.%p}?|RZ`4hP)OϭVSΈ^Y̋PO,Ao"jVg_y]_{냊 Wypd?˟^K^foCHfyvR/y>\]|W(,-7So}OX)C`cMyx] JPڵkWuM7zl{U޽K eYVWГK%֧e,<GVB!M@yڰ}k2-+Ʒ*;h&c.s\,ݲu+6N!,:KSz_:tu/{7Ϩ,k3:6g!<6 J^Q `voB_%E߉Gɱfn VR|ئ7ooг> S]` -mn=hQ8W,#کS'u 7իWGLݾ8~5$IL]u&` 9E?,Ҍv9cccc`mlgYs|,l[ظDf]}!`kj@ݙ@Xޠ8#F7xC^5Gj|숑VYr,Ko2E4+<ƕ:GOP9Sc3ks=!K؄*!:ֆ ^Wm~{Yj˱#fh8fxxe^!,:d,s;ҋ/=Xc|lguf jKKZyd`Ye#|a2k^ g dR&.Z֞1uAqƈVhpFaÖ EA5 ` Q.eCYW6lO'xBׯ@` oT;w.,r|l4,_NMMMS'L㐠8`j6o|7~CB6M<[>Kyr2eNuvkypw&e(?ͺ0-028i9,h}G1z~59c&o=8;Z $` X,Ee <럸\uH6QQ9@Lo@10ȋQvC2E7)PjzPy emB|^f} ,Va"(,G?zմin<@Ϫ?uMX?~SuMx,˞8J0 pg{FB|@s9$m(6Ӌ[ 4weJXt%- 8f5!=>SG/H\pBˠ6c)t^NH]pZh0aѣ6lZjU]!=sLէOj)Y, ` 6a_`ˁOMϪYF_ޠyoxXMFJ%vvi'/aaE(gl_ji9ۗS%وfmƎ>裺qƵ{ҩUz\QD%`s֓t6nJքz,f*L!)komӠR]v)o7=ǥL S0zY_ kN%X?)i{+;6'NrYM(A:czV2i V~McA~G{,pPE<&@+chM>+p6]l6M+B,8_Bul^}ݞVZt:=k_׺_޹Ոd٢l{k`c ?|?8qBh&7L m*Ui}>W{fcmw rqb@(<+~V&C۱ iXl-RvK{G=?\}]۶׿ZmV\,XShNX l~(seHHr%؉[US$Ne#ٳgP_)Mjv*+xq;?\T|I]ɠDXg紟 6fVZGuO\+C3^a3Vj>GGqSj-(XLd{LBًLdzq,n֘{Kl{'ڴU+Z1;0v[O<PI,E6fgHu FDѮ(^BӜI[/azZ,,$N2iI1&b(Ij'OP͜;B"c`1h=uu oxٳ:KCM:U}駉>LV?O?.Rl (le=f[Y2 Nv{>)[`3i륇x4.vj,.e`ᕑL)3 與՜^F.Ky8n~XoȌwy%胔rP1Ly*q#ۥI]r%&q1B,X,f&~ bR(f ?k^Ń>H `+[l}bAUk5x| )clSk6h S4@Sk ԬԼhaRX3,Nt CSR͚+[miُ[sT@dW]oT2lX`1 6mxa]ӏ5vpCue)BZz>}ݻ:՛oQ%_y['pLM'zȞQ=.oGNGc+@@v4b,jf?26.2`Y6PȎ! p^s5j 6 v9 ;n8oJX?O>PW]uٳ3fn,B}MqG^x {I 7<Փ|؉nYróJasGʳ}69?֩&:L `M`v\xmLM,Ʊ9 8R+m'zrFS%mYaEK#;!wɘ+Iخ`-ZcȧB|g-,oHou6 @f><.k{c]|X'S$39* `fyϣv"9x6lvI9! p9?mzBWP R gV:&w}j;=&MZ}քZl^HJ]ClcKmaQB<^rG km7l!xTk{zD0Dy_]#bRW=.5%wXFtjTsϩoQ=%,}: /r `}% 7:l0Ld/\O>K%BI7ݦ `ǃYW h.Ab;|E8&62W`su9~2mSH\xM]#t11BI{uPA,p}ꩧu1qSW\qE}#lP t uh:I{EݫJ8!IB o~ҞY<[wʖ !~]wݥ0$,cilKՏVڴ|=Iױǝ:q&K-7U+-3 ^Z$OUM+9~vjY!ZR9x~ͬ?fEVj`]ղN=Ƿf`1#0f6w3br0h E~|W_uޝ[~s`TuI{@8kv;YDL{:PCnnԬY2q輪.eK,\PaJXĉ` ̩ |ro+&YXշ~[]ry-e= u 7WꤣVWn-z"\x^7*[D8`zi>a귖jr^(/x~/d+o_Wc`csqw眣~zjwb‹uy]?6xaO<(;h5iXz<_l /&r lNeoNBXmN@"+0o9H"#)a~+!dIʖr\lC9&dC sT'z`. ZWj/j,ׄ}/}%@I~!P`'( aHXJ `S٧  cõSl6 T  d`1k5& ʨ: 96hmT*`cK 1ʕ+ը#=//'NPC[MK:~D߫N;[>yd V`|Me,,a  P/ꕵ6@fT d@,`PBY]֞؄Jke,$-C1#>*`c^ucZW{3'ք׫/l-{ԀexPuܸq(V1v"v;o,6.ǡ%`'% d`-&0(o VJPl{Rs0m/ z'[Y1<:ýz=X@묣~{UWy XK8&f ɘAy޼y^3^r= ɢ v@dwM 41@ DK:hq\mu_y&lnze+ȏ0`^w9AXWgl.2KXQu`_"bҦFH0†ixⷻᄏٓĽvU=kR`Xjc`W ^I[Hx)ݧErA2> `V,Ac`1v/`++kڳ'BmsgKXV:D&:L `T6,KƸժ !~%/:u^a}hu\U.S/f̘+ 0+/y`A)\/sZ½D6U];Lu : lX,[-RE0rh=t=>o_o&cdXٰc1GP \Cu\"D&:L `T6,KƐ,wq5g!;ݛmsgϮ'N&r_x)`Lt]j ܧ5$:X'QPF%` _c[;"P8eo A`u]^X,V\.o̘1jСjN.`"zT\>`l"<6,K0F :먣:J=c>sjᄏ6hyI{_&zgJK<}-D,Bh"ZTL>׏X'K:2m׮/G1eOQ9j="K%Vkxbc?d{b %KZ,ւuI#>[ >'O5ar.ӳ uM`v?lzEkn׎dFMMMSӭ}IN & 6 #,c.Z#`[&kvt-UW d{s56hmVs;kޚ=&[nEmmCLX3&lz lzE <Յd%`Cy?s`a'% DFAmQ Ɣ! pj0%m2O VсN:Cm.%Wbvߠ5Yz"P=O#}xc|kBMu5^H[/Ν;7t"`ueOMj{Z\"φn赕]o K6m8CYaA,֩s< I\JuW2JgDlo'~gtJ~V<4uV؇5 n>NQ=PݻP.D1%go ]tS~9s>vs᭭@~d-lgd$FMRmh d(Hz?~Ho\,:F=gyt7zc`` 7n,`#|Pa2(oϽD"R##a$x'&kJ`wd$FMRmh \bL:В7vM7U^{4N_.אq~4G?<$XeKM!`-ZW`jP>lF=K'S{>%; 1t\zm"4rԩjžiҤIb9z)-:vTM Ξ=[U>1mOd;HH{ ` 삣TKmma)9d}hS`U,ŀF$@$BnMB{O~ /԰ ̘FpҩgK\yN%`Gb'% `Sr!rR l IpI(,vv:rHhѢRz衇=T] E%lw@: dVXlj;O=JXO?* >LX;s/Ǟc|~H%`jBDZ9QuWp+ !:q-c3KYi/&Fq ΪVTKM=ar]wۣ| kΟ?K;m4/r-jРAxj< ٝxkqݻW/cܔ' vE͙3ǩh0uS{0ԠO;UhW=E[|X~;C(„G7 ӳ{SRVJ%AXlf ALԱc|jƌva "ZtjQk2si4(=HQZ $bx_x,`+Ʒa/+y6p N 3Q+@%f .Tc۴i*+\GH.Xl6v V?Pe,[Z?~fr#QH `3҉b#:vUzt^&))!m!` Y-[%xXl߬9TK4aR=[n{52}.iˤC%`sh]?%X{<+æU6'r2&[yG\O\=oAKHm\C%` ɛihXCXթGG="VEd D/PI֐E>7'63<#9ՠ)WK%`Xl򖂦%I7H)X ե@<+$Q,'t,"ٳrb˕W uyh*!غK:O'Ym XlV ~av%_f! u<+[McOTMiއ[;/C6 x|+p>=+5&6p ` ~kyeWk& i:6Qq 6.?)Q$%W"hiDv%ʎeep!` a˵WvmX+∣쁕tE޻͐{ĥBǥ.-X,@mK~۶m} yy>6ɲjT[ܝ TP^玘gT>)l6V}s٩.Cs?^)/2.]9s8 a6Ե}GB`szE]̉ -ey3؟`k`{myfW^:"최cfdĒZ41/]wA 1k-t~_tHtqM'l&|1t`R(ס [XdQXg#} l=`kR;6U˼؄'ݢ7mC؆e;;%lΩeCg/vcer,f~۰eoX,n..f9ucP-ͲN;_ٯ^(H6#z߾aa ?,]gu`hhKCP=IpTlI}&W= s`/6;jO$[l Ap.+0[I󵽺c;ز]gMn ݐl uu ZkOkz!c 3ܳ(9r|Am/7kn󋜲˗')yUR>g-l\~R6%"'@ ?_(;?fg 5y3T:U̐S@]FGǬ ?`K^U RtGlkQ F*aQ@K&ZONvq ն4\sáܾ.לyJ` JQ n zI6/4 `!*/ZK.04엲Ug_AHC4\ԁh1;ZVe9:yclϘ҉TW4e˸]s*3#|MPm28xq,bs68$`iģ@oB' z,x`74J$,4$sOTlB~@3ʸF|WX 5={fDž}e ~G2UfD.!ɘYׄF\(KB]rm85˖I!eH'.0.,Vf/tRGݾ9I$L`^fX˯ I_]Ji)ZJ`LrQ6fXVū߄Dk8ZZ~%dva6K9fÝέs˭Ejau~E9] F=>7:(:Ko3%%cP+"\p#{d#P)@%:r"`g;$ű`\Tnrbx[t%`sfOz:Xbu\1Hok.@~${"'&Ƕqc>R&kdt=Fܟ C,67KJuZ` 032gaBSCŒT ` XF#$B[k<}78Yɤ`ՇP*X,F@a_ux%.x<<*X,F@#` ʤ`3uϲTn ` 9K$݁xŞL#ueM%֭O Xd@ `[,f11@YTZ>hnO?T&yYK=uz^$Z8@IMX4st#G}~_%ݡO?vX`;wD͙3'0~j뭷'eW577;tM"ϥa}3w ` Q,l[ظDf]Ϊj?_*Ph!`Kr xw}4 3X{q^Sw+0ͷU;찓S{mvmwoS>#Lw=(+A%F htqnd2`љG)26X4bsf}#7ܷ9N: vxlXR&'6zV*V xl@%`nlf"fPk~cNMw/W3$Ya%`i2 0WYGʳё~-y9),i1YtuL1ј \j:9ԒqˀϢma)9d}hS|@6wQq/}&q\sj~͸K6/\t`MR`ibP /%.yd:+!1CMϤ{Z ͨ^:q@cO0Y;Y=H$ʶL6U~qQ>c}<灜,F>\ruvꀲ$ïiX,*@^JUIxuV4`Ő4biR?㽖o1 ^-QmUvh]Q"yt%4CʳNj#(7^W.+k2K=L Ica_9>`O֖{qKϢ䥔]5^]WK-9-F.O04=koڍ 6RX{oԣ\x1 6_Au[!ߘSR=96/H.HƫC%`Mxq= ^Q3$_/h3Q,3x|À[,7vHzuʥR¤;ӝr2VQ9xA;#` oXۀ@WՄVs `즷Ƌ0»k$n[ףv'<X'[j5)'3Q`K%>ZDȃY,#$i$X" !MViV_ tۃtl}7@lS(@MϵCM&@ hh*xƷt\/"ꫯzU3g]ۭ[w5GAW `[_@p1pq,X6C4Yg^{5oWLSNU{.l>+U˸ΛqKϢ䥔]5^]WKJhg`Xu=|b\a;*][nXa fRby)eW$e$!` /qypX.=0&`b؃7!I'Kw}j*u:@NTeRjrɌTjL]WKqJe@9JfS 0}Yzʕ+= 6C,„;d&*y)XDs$*wF%^2/UY3_2Jչ&%p=蠃ŋ{epbJ(ǁ>qyQdK Թ8d&*y)XDs4Mkt8s^6<":L `T6=ȠqnC]:aʤwkA#`ix^n{`:u}<Л?[sN뙐Ȝ`LQLe3Pbʮ4:,KeHi0駟V .T\sѣ+\5g;73$c)L<_,Z!:L `T6؅{vS45Ël,4kQi<=묳fmƎ>裺yۯݺuWnӖ)֩ `4х.DTtmrj13`#1uE.Ԋ+26[mm 5m>sjܹꨣb5k5UmŸ]}WfNzCu#Xm`tav [N-L+]h'zЪf;k5 ^-YD=jwVV/_Rt7頃Ju#'uFf(Ѡ}KuenZL%!<$ڞra ,H0Bq6 /Zl0a|~3[o?n kՎ=ZuС=ղ9Ku"ا Ff(Ѡqz`aFNhY0NM,ߝ gXԘeԅ#Gag=20uOv;1/gaPIjX@ !~ǔ,b|,@kbq7|Svi%mll"MDu2Ψ F NS[<Zn6D'LQm߷袋7tQC 'ekN ><0_|(wwT{vʋaZ_S~8~66P `^_1V3g_Z>CJ ۩&jk#+@` l[}L؂XBSAFcgm{ɓ'do[#SO;ep jСNy)L]s"to7tF>|~3^O4D+WT/~;?p[5=j/=~{iso` n`f"'zސr1 _0PrĕXxQキj?MJ' W_}kVu]v'|RkظKKX~[ptd6786{BLK%L9sǏ~aQb<[n^L:T&y"ڟeMtZiYF8eol?` Q֞|VXlZcZO:HuzyW}wۭbx`o7… ՠAڣh"駟ƞx'|R͘1|#`26߷'K'` )5{ yT]!K\s52ط~[&@;{ァ>0ٳլYc.K- 5סɞyG;)PCc;Sc1[cShhvN1]w.zj/+TݺuSƍSΨqߘkV ) 0ŘvKH\pA Vy7|qEhzTK,=9PnB h$NmUuNKXz&"ƼV DOzիzƪV>s\.>uXXlKCm ({x^ u<*#bO?_(õ^(z`遭g+{`yu 7Uf!~)Z|~ k9XLGycbw_o,_sp>Zdzg }w|YfLsFl;L!Ee3fFKI٢# osuJSƍᑕ Dxb<|)o.6K(a|4郺`c Klj<}SCetnf_/-YX\=?ϥY{F>80A(!/"^2:ۭ O3¾])+v[+մcAasǩ<lrI=THm-7xL rcFv#첪ڇmõ^(z`遭g㱃=X,fuZUӞX٦1|x(|wG,^dL淀+f2Lflٻ ۙXۙ;4ɨ?ؾvWÖKM>Ǒ LxEZm6 n{ W\qECѬSsL,=@%s=Տ=2:V&x /k^577:,^]V,b -f6&/;EulM{`ќ? U28[8kL`0Űe~, $lk޸>lN |Zv{ pXz`xl7,bM^xA-[[FcFbxa%wNo&sXYFǜ * [wL t"\?BXn{Υ'ծE@?TVV6Ӌ+b3c;I^8`kki߻kߜ.+y!9'qj}`遥<sO5.IJp[l `NА -JmW[4̚:^D, IO$? %)tOʀVs.:>qi5oݶ>B}skghK߄hbtv=MiF='% 4znB6?G)iڕP▥qZ2dp l|XlF}[jNag)lg>Fcam G(cB죈j8>V{m{oo~*/#J'tTD: lqEt&͂ծcLضE$oYuZ]{e>,FkBME7 m4zc^g=:va)ST{n OcoyeBrwV#< >"D\`1YӱǝzO']ho:c8#0-}K.JqXLi̩v( uYF{TwrK 53ACpe{`Q@<*D5 %KI5V@cnO}Z/E慓& 8&hv]A G՞W=#ӽf7\P0!$f͊ev{ Hf~@_xD;oك%Nb Vxִ` +Mg7X؟~a?w`+٥vbNJV&?=Ys~Z34ﭔyCg־pӸae4es;i_]+C'`[Ծ}o pBi έ^y$NǏW[n:g!AŋYzꙇpS6gĉLĘ燂XXx\`ًὝ={5k1cׯ"%e=Qwbf' v 0'rǹXN䚑vx1% RM\,l[ظDL`1 )7,b? ! ~RDʉ` )~P b,{+.\P=3 PxUWJ+c#9D;K0]OtO:نyF3&rI!xW A@ .kqgf9@+ٹ)!ftvmL /EB[b$D0Wj%DA7Frùx`˅d70|(țrӘgKm,!K,=yqԱ%/G=iU<2:AK4^1 0 5L%` IXaksTMY`a,6ٹoRzL6^JX\$Vbm+Ltxk"dr,}mLcr>oܲy RcӻE,=l<ݕSR bs9`L^vB,to[{cefG}v}`j!WQb} SnY\z`km?sT/qޠ3\።z;3xL픩37t->{챪I-K&[`m]tZ+a'qbX~*}4I8U ٕhC`ـل%"obs GtɜX %M}o V2$|юAWܟK,=9pɘ+Ucz~Ue=Sp^Y%whƛ]WmoW]Lb|]?c0K 3 lP Dl#eiQȋy[f16㶷 ڷu/QנZ`W6yU78O tKlP`oi-[2'yj;vl(y晪^J:dNgFe|9ڸ5XFyD%`T>6V6` --Sy(FXlLjHcRXV[mtRc~U 0/h;Hy=M\66dLgIl3s%tgX,{G` !fq&oWuةw=$O.aHWRƬY<;wnD͓%`uoܐ S+݇Cc3iFɢبˋרgb|l3֎x8u^;b`u]>j&Ϙ1A}rbKUsF3hYg@!?yRKrF!u`(z1>vq7V]wK$N:g)ƾXC m5'k2s!` R9,K%`+}A]!Cr bCwqGջwoή9uF}矌UYS|w dqw} kVJ&b6>#S}Ob3gz Om5IVÄ|X'CK%:*D%` S#FrJ?>'jOG8E5յl׼g ?['冩kpNUNxF}=&tС~xb$o;,Vnu_zKM%`ng"` بdoŋj|j.z=.:Z_kƮu"$q+m` n,td3S/]AVKmiC{C*8q-Ԥ̬zz2Y3/]7[nnP-*kj&?b9t饗M7ݴt!8ݰϣ':Y Xl 6.?)QYףYؔU0! [lYXMs`u˞y?X҆'^i3g{7H>}7cq<X$~_]v٥t c\n֩'`XFL)@- 2:ɵlpU%C{;`asakU۶m=\GzQGy橅 iӦyr K`r7N;+n^4p%:؂۷)9"عbʦ' sX~e6ئ6Šk{I%8K.D-X`t{~SOm5#/X'"R}_Wn)CN-2@Tۨa0eR5nȲ L`4Cf# a=0.CAn P/c6'.ƴBlp)yin-UW db u7~ f8מ={= m|IՍdD td9[-LlN)@c5V`RP&y„F:3hWO F H3 ]2R/9:<:u2^q<(Ko1ҠvS%w-^{ ϟ?_M:X#M4Il06W N}xIdtd994bZ~r6g*ht ~0} 7r[ hխE& ˱2BM/pjlݬ,o3KzmڴQ'|2ed;cK\gϘqlyV P:(@5h0cʱ /H.HƫCm} si)7=W*2Cih+PNJ]q%pXewv1,rΙ]㵋 `3n)YT ` BLa2Қ3%']p6P6@͸gQRʮ/H.HƫC%~F4o+Xs-3)'YW`^NuuN9 +@^ ( ʝ` ,Bl1:Hc`)wr,μf :Ru9N9 +@^ (CnHSX'3$39*@u*lMIʵX,֘6mm 6@uN21XG dQ,Ke`HUu{t*(#:GX'QPF*5Jeѓ666&G:`swIzBغσSx ` d `5P:ؔ]> Z: ~P sF=L0a&z-c&/m嫔:[vx)]ʽΩ6$g7kac P:YeYi<\!N&F/Giװ*Q60 P*v +cy'ּAmIaʼnn)=?tw'a'e|Mx8'*uR''`e ^*6 ]*Hȧ;%xZNOx-\͹s*{~cebxi!T7X/a_%| r̝uǐmm,)rpa_M;2D&v"ʅ}o!#яaP*PAj7\M `0@,XM煰RLXuugoːh9[xd v9AфvDo=&'8,d3IW@5+q02nլ<0dxQ y@`{zz`ۇKe``` `g DPcQS  ՜c QeWʰ!:SgT TP/A`|$ DEB 1`"5Z荠Gmmmm[ ,%RPwt4Zֶmq~\jjp#rOT P)9(jcxmmm5asFgc;)ed ъ%iU&]1<|t.y&0cԕֻ ء7nTo12~Z(cN~cBfI0;1Ca=rN86cHPWoy|f⑮5 s| 6IBRh]0#QNj,5a9;hf#n3Gō$J(׬ B5Ȣ79K Ģa% It9{$@&x@E{醲r^ԡ>2q<_<6aĊ l/}>(+M}ӡ Ûd,Va^Sl͡t0ZU)KhfJ'8 85!֮jY lOL^s"ۀx#]`v:c[KYI{ҩ>С=HPA] V+`2ZE%I|J]cx6OaS˛3, רa-;fWUv_2txzA{v",&J7ؠßuXUĝR\$gGuŎ =,;,E 3F}ze(f<8Dx-iFFN%F+I~KC&bpxH(`M[ۨT_$V }f>^|^J(5GnfWZ?M{fE7جH;E.Cx(@ `Qaإ=-"uOM;a|6l70m\q,F7AkB,SA y`f|['Vn80BM&aĵ< ޱc/ḩi1X| ++@~~]vd+Z< M"=S#foa`gͺӨ vʝK8Gi㧍YI[hwUNnP;ҩc yÌY{ 9aT9?wh⍓E3?&%%@mQ;l5ss6ӛ +6XIJy~}.1:[5+ǐ}Źoh2=4bsbxX&+_2y Y$jM1dB͘YWd\1qƝ:=rɧsL,'|B8hI8cH \s?cI1ܜ0Eډw]רQ f 7n@y[d{tÔ)y,n^?Tmvxd̕0l#7@=u#?2k:\-|1{| =T> 4gd JԞ v;V Gyrؖ@,uَ`˹͖lf8/V43Vǽ³ A`ޤaD #j˱7IvFzZWa΃ym^ml\Z!?S}zcD 2\кm.GƳ LaQdHoaʌ+ohŸ(˥T D@S 8E'.`, P*P'z6:UY*VsfcEwC[1ݧ {ъ^1GT P*@Q L;X!axLbn$?8TǢT P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@T P*@fĬXi\ẏSODT P*@@)%,uy֟eO ([9 ,>AL,K1d|rJ'\ʋݨT P*@FF@+jKj!ؕM]SΥ`fQOW~f+=ʍ. o, P*@T P0=Â7@(ҮJ+N~y ƒP,M^m 6]rKuQyT P*@ȼBvb]O$EpXxŻX),X~}P%@#1`d*@T P*@@%d Wp B~h,N~gy`_6 8Ts8=h@.QGW(:+?`ECeV5BrQ~pHnT P*@T +P `l(ȘN'߆#6!$^H? 3cL˘ScHMhϻ̺PNyhtuL5.Wwk?Zü[`%4Zݢ{1AT P*@T P_,\+J .M57˒ c?+k1pi?J1 jX_{WlzM-5?~!2|u2CŇ%T P*@TYs s< RBfz<()Pm' c6t1ߣXYmmv9WW>B)c'Ow:.۠\Q*@T P*@JKRUni9L`]ARH3+^a36 m ٨6hvg?5V4)w [ m% njT P*@T Psd9,V/i{* r". e{/XZ a~\",`yZG8f {рl5CT P*@(M$ڢ شZ[ !A;JwY:*E9rΑ۩T P*@ Txx[.&,03VF偅h8o$ X5k^.;78@2O /Pgc%5F@ J=b3]onkkyubM P*@T P(UZRԘ=gSpr8rk"O]}ʕe ǖ8 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.56 Direct Consumer DirectConsumer Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.60 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.64 rpc.call (topic.host) rpc.call(topic.host) Sheet.63 Sheet.66 Sheet.67 Sheet.68 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/images/rpc/flow2.png0000664000175000017500000007367200000000000020743 0ustar00zuulzuul00000000000000PNG  IHDRWcsRGBgAMA a cHRMz&u0`:pQ< pHYs&?w#IDATx^ $E!CfoA@<("Ȣ0(=,.l31 #8 ( "*SdVFUeUETu]y9OƲj|PPPPPPPPPPPPPPPPPPPPPPPP`p ,v۩fϞ}Zkuc6 `fͺqbbҩ}+(((+ h=rbo1cpЋyq'.3o64l ?8 3>?%l-~~|xu{Ofzq(((c y|!/`eClF;X+ˆX, dfMܿWF c60~6pg*}xkfdۢQp@@@U9*oxv m݀GY:wqΜq-m3@@@3g/y٢ݿx^lmn)6xӥ8 h*~]ț?n<;6 ܳb-|ێfK] d̙kj_+v>_z /3w]6EV!T2o[[_7wW\3MiPJa^[džM45C`["MNmFܒݖoo]hGIPP 3"?&pU2^U̞=mHHADǵ)Ǖ|%l`«ޓ@.(0j 8@6 `M;P/`epD`q1@+6帒 `@u`q1@$`q8l`6p+[bK.B_>eWDw6K<;Ц`؁>r\ P Xe5b{>Q۬Yw޹6;Xtݍ6ڨX*n`ATz7lT7޸2lb酗<;AvM; } @ Š[3޶N*N9"+V&9sԦS#8XhQTZ%dMe˖Ew/tg}.Dh!f# =>D(xCPIͰOWl5 =>D(,3(:mk,a"gm"o@Woh!&l(Xvn`d*SNCB @& G |;X6fqP`XvPOA-XW0|l(XvZϞ`+5ڳE |i^m`siTΑأYRn!.ndV}_p@w0ox Ālˠ @ 0o9~vizɔbw(M`;NA>~i#y Mj`1j (Q?tC1 Q"# {{[+[AW`, _σ*|~jš{rקl,pѯ|-X6نp8{=-|t7cGTZ}ݏC0T!v8ݰ*HP<,uX #rm 0Hi*ES} VitLii=,<]y8*$}g6ƶX4}.Xi9vݔS|'6nU;@`w4zv  6]lٸ:`:;zQ) Kl0u+.UX%iS6_{hR3,X`'EءLg,s[ه5xVj ֑lj4UZyO|譥O-i >5ԒO VJE`>Nv@!r]/G寫y]\+SVOՌ,6DN,XF 2gDOڎcT 5gt`l`V`ʩ}3U.#f*^XEPe~MGbYlp/KZ >`dׇK< [_:]" l]\w;gyYl1yl)lhXucRk)  *{3ܾmU* .mvetF`#}#P  o,60@`a1ljeC4f-a @gm0|Ɇy_Y__Ua=XϏ8relv8߉-i މ(#ƪO]NX`bor;Ϻ*VXQ/̙SN 8bѢEQihM6)-[^u zCGߨ}*)\ݡGXk?y+vgT+Cl6/n[~<#?O[m3=度iS66v"O}vD`31 ;>u=;`؁l҆d6&NYD`g}WݾG֛= y}F.^Hw^*kƆz^5 9`}ē Mz+ !ύ!{ݒ73fhu=jm 1֡`N1u뢬ZNb\ډޯ둼Z7Thk]8QH$j `OVTl5~Xv /)nA \\sbM7?ˮX 2uescu[cWv*j==-|* ^}i+ՑM܂{s&ٲ`pb= XtMSN)f͚:xU' `XV, `Zu w/mU6JuȡizI|_]wÖS?:`&s-?ox+_9 4-GpL, ڄwv{n: ejaw(惮޹уtLinU_oDQ^_5-?-|[9²*d xfLquQl,.LaVVk;裋}{ŏ~MKt(:k;W\}]_LO lmY9&?un7Si xRZ㣎YJ ,|yj1;m:_u;SkJcmʧ!6>.2/7MDqmʍ\@FN̄]xw,?qvy9ldzK dI^u,H{&lD@A`؝X(4X] X]5A _@qܪ,~&Uy4 ;@PXo C w]3(~t?(N8UǪ;φn`XV)0[ 9}cfl_R׺[:=_wN5 8y= k9{a$?y[|[-Y5. -HK`ս7˲U.U9eY䶮 q-:\ Gݟ`XVAjۮ?_owvֳv߳[f~#i, ^6\VﶺI..mX$O!]+['[ [x;v$;IKZ~F\6`]x֬Pjos9f31,;j-iW2:;16&Mc,݊V۹#Jg+t g6ەIw[w>vu`;4l=j-r _ַg۴ϛ鷿t, ¶,Vhg ~Վ2q:ơ3[uuMOs4I6ѓ/6 qjIOwQp~s;NvVBu Ewu #29t v ^9`#ql{yu5,N??6Nôjqn*_~%}tܺ+?c!6Dޮue~[K^奁mMR3q`;؛okE.6+z;W}g:G, {ۺ6wtϷa=ٓ8 b|"1cF.vdIe][@VbӪ}86ŕFF&J`bwܾ/ ܟԧ7߼ws[v gS`o[o-.]W{zhh㪼ہ>;5fW݌>w:*>6t}0n~~T|h97Yy>bw.MnZ;׿6Eciv)lT&1nxcxWF{+/8*]e^9,ԁ`h`HiIoV݄} {&8뮻Z׼5nşgGyd_z.myyqt`_ [#f]M>[B\fH&%wp1wpM:Iu`6*@m6p0vHť`08䠃M]zqM&&JV3^[<.絢oiO<5.VewM9/n'ظT,+SB<*k]czΚv݇u}v `È߭8URe)Yzأr9:sTj{^Z?я[ne񖷼noۡlńoA,ASN| mM`؞V]dqf7Ty1]ݬdW`4~bw,ji:E`؎i'vbm`⁑Qn|[Zu+V(4_l%\2pXmFm`7Z1q`v~;wSq*]jbNj徏w1Vi3 ,>ZV3 c`-??}<i2gIfoWn۟r-a⧻gf`w~_nyI4ֺ[V{}?я ->/_PT>C5\Ӛ-,5՚_-|H`q¿wSC>lt3}KMȿk~gdK'm#n}®7tf# ft/v[-zscƦfaB(5X,˷*mlo`WU/V3^tgr$C'>Qlu!=_M^xÑG7Z\V]xӟ]vYV[{xxG_mV|K_j;ëM` 6on2`iy`Ѥˇ-O E[ [@:}mKj4$t7usbx zEakF9uM'`<a-[FcXΗ&)m=Z:lc^׬FH?m s=7`?ϊw-G>҂^7{ō7ؚL (E~tS}AD`}o+v;mfޝ-?uX]}n1񲚨hǝvx٘u`ΟJ1B:V5+sY8d}v!>؊r!Ů/fʕ+[}kl"cx=}_p*?|i]vݭ}㗶)ʩ:"׹گs۵_O۵_ ԔʢoS:uR2~,04- k`6b|MdZ |/6hbxn:[r@;Z6 `52:6LGfwON27*vi? MOj!7Dl}ۏ WL;QA F-iw4 @:fI+[WS׳4VVQY<5+_K n{ylt; FKE¾&=rɋݶ,mE'fo3`:viy:hV_vQIEF{ꢐݼC2wⷯ=-;8̍ύu[o-^W/]sU^VVbeV}of ifWu[3+2f{!ROG>kp((RTZT p-@Wf8.BoOgye SNFlT$QbwUGqr>9+M|x57UvBO}W<د녺oS,,;lE{f3gƝF`Ս8t" .lu"<#ٍMu'7`Bܐ558֚+/d ^ Bu{EOSVSN9xH O(:vZ˖-ku+^hQUXU:E^SX?~KwQHgPhm|G7?ܰp6lsvFlT$D6 t@OL|;GL$, d.jfb-7)JNU׬XM5]-RuוºunVd_6m/6[o?>;;wnZU)f~>ǭ 1U&cy i5]ֈ tnl8fVleyB>jRzXur:ֆ>sXո;Vڰe0)ϘtLdlWI?mMk9_\wKtI'x`5{Y)Q*žzVf$ֶ/(]}n<7tbDhS2k[+8 x ǜ*u-MdyҔN_WQ\g]O׳Q_>JdTMR nhHԲ9ro[D '|r曯 o^Ʀn]sܶk 2<lOy`&Nr`vmw `nbqد~n@ws7Io|cq}Cnp X߾Jt6VE[n&J6R+-IcqT;G"oݷ2/cݖ_x-) 2:ͷgfVa7Q ֻzm.== UV?(x`uYlf42`nߟ uS Xt~O8a.7u\<\o\lRnO&UeA0Vp&4QS]v#A㨕l~cٸ~\'25 ZgncnW]y{c]S\W`Ea7sT= 4r a57֕=5=96S+2NX=~Գ;I .ݒRU`$V0qT9yw6??:BtڧS0ky:}]YMj]>lpۉS^+_Jk)+~-=(~mwn|yrJCه)uh5~]_?k6lzl DIiTR'lt; FKEBWXBlQXQi`} yj_Z 뀳 >R^{)Y`m?]ϋ\4nWp@kb[dϫ9ն&X[sun8[ Q>Ѿ- k8'IPl"6X &(*Hֵp AuÞA6|بνVպoU]vokp*nU Vwviuk_Z+n,PPWU Ѯ%ڼ$Dj+dQbJԠ]Vk]` f{6;1لu`5A&qRv1JǻtT5Qԗ_k'ԧ*elu댓-l=?;J^6*`Җu 2,4е_E`mlٌ51P9k]٦b +n//|akYR?<ȖEQ_ᆱj&6ؠַ؛nmw AZ+Z{7^{m/}>WlF;HG]{Zcy]UohF|;M(P$Zw`fIlV&`R:jZ:LON4udPY-}Cn=\w`EJ7qcZ"׺I5לؽ]w^qnҪf0~'":GalK` LTomm&G`󵁑XE誺m&`עekhׅ؟MX{[ɚ`zs=*vw^zn?U]˺ھ<;ZGV3 ˶Tv酗T.ޠQ'|;M(kYl,Yv=m׭^"^Lbtl0[;TMU `b+UXiok.g} `5V٪m]Y@+zЋ^RVv1c8mW\}ݦz)l49 4 vr> ;y>~,"\=8j&}[u.Nq9裋]mXgDQU۠Wo^w->ymvs6s95|;M(kB8.J=l}[dAt>v[kVagQ?kt^wZ=uZ_lٲnÚIsλ4ŽM7%;Qxvp7WB))aS`؂lO(9^wuŖ믿J`8@aZ{v밺 enmM'yҾ7߼&W׿^̝95{~vk__r1Tw5Io}G݂rb"\NFYWO6>åNMBVllPޙZL t\7uccWz3ֈdKW@VݎMTvL{5״R\v~g5ɷ*`ϟl"uC9>4WݹM/^?);U|A]Sk]WnV@V/| {\p \WGث;j7 lZ1ȺkѮ- k`D|'`Bzg `o𗽬x/1~Ӌ\%X8)I&\vL{UW~{ M>Yp-6`"! 1 ;.`iMZFon*馛9._ܚj;5i-^vemӆy`Fl_ZݘΗֹ6ڨ5C0˺ASN| vhHblciG-e%aݠz;\xqq׷V2}+635U*iVmt\u39G AgU>+U w +l:uhע pW;-[ &}BFmCyM %m\D`_Yubu7I@v]7~Oo~X`떷{k2'U>E{]K`nD`Qqnɛ9s>N*e={[ D*2ΜnY^O?[O84/u?uMU/:4 >c\g`~O,t奁mMB0۝3K 4vaf![5<Κ՚xG|}nkwL `?϶Ƶv1nA`_M'yl .q>m lMQœ6U%o = `\N`sW]uUkv[^gZ `/B0/~>Gz6 2+<.J Mnʌն9e y.!S Fba:1c,[G]h X7qYkPsNum^}]mx`q콐7`N67xwxXY87am'a4[Mx FA>[o}&umOkNjcER;ݎ=BKLklq. cy4lQ\!Tk&rР튫+ g>Ph9q(*pXZ|WFoZvivQ]`t,5 ,&pZ؄+'â@;XM̤H⨣j-6E]T\q_nZgT0`X6C!"' ߼kEqWn*nFڟsF-6@%ՏRPu|`${Vk= 1C:3U,;Gw=*jYV阺+w?VaEzu[´lOB WNE`dN4jN;q{_~yk[ti{NfK/${L͙dyXkHYUn~b~}JouUzKc)w/`|Q*o,ls_Fmb#"߀O|-/~X'!؄+'âl6wy&`Rbm/vK̝;zYZ)7ڔ` |rlتqTE[ `n:n`X6`Um V0 fړa}' :0v, &dx۴ H ƥPFO`kK^V;m.X;[3'SPP`krlFS鸓Fb;GFnn;.^]uםUX6__̙36qE'&˖-۽SuZ?g4Nfp۩lm5XـN͝7w{{'C?v|p_4>˝``ky׊kmQo^xI60RDKx\qK_(S"1}ф^Ko`pnPAV GyvAk9. r)lQ[\yُ&J> ҅XA} ]k`O]sT\'| ,׺wڅ`8Ku $N` j0#@V [#$cepgt^x*?k)NAVٹo}ؾ `>Uÿ0ϪkuU~G>ƏVՃPVŮ>l&`~`ؑmHnևW]u /8GfIoM&dzk-2}n TemP; /mU`Ď۠ EkX#mZٷq5*.ku҅Xl۷ozƿײzu5xViUvl~`5  d +m/k]15 NP +bM*  2 |4ТzR??5>)x;Tf+lS:ǠJe+Ҕu!6k*]̵bGAu>CmοgrڏUꃿiaew; CgI6g*-Pr>XFw&zԤ|mIaQCȮe=бAaYuOv_M i 2gG=IOŽiT`k]2'aΠz;ԢeN5дb}K;ԶVUCiPH(E VW~i`)q>{`17y}ز/|t;~({MُC˗.kFjPaztY]~|,M>r/ ZwRWo3`bB`AߘAY;X ZUECMFۢcPU^NʉkʁPՏd???mwV^Н-(, 8$XlDmMLv=n,nXo`^?vZ U >M}*'a4[6 E-c%"a~4vzviҮZN" Eu~8 l *Ch?xCbC?~@<o?l?`ӪK&x^#@ ?.Fp.l\8~ +ˢ\-I_m4c[5 %5'F:f~:'x`˟:2wu{X{&h#P$v*1[`XcѶ^#l6&,_hCX4~7ܘ?35e͵? v_VkUVUׅαz`.V^~7eHit`؄|Q)J_"6PSIMh?HcN"zYuQ׻.ģ`~ZW? ҇il=аQjf1,kFlLmx{W~LVk5y9az+[/@?tB` (E; t`gRu@~-nz s{z6Yc˺c`;pO#ilT$PQ:"XcT4L>X6m&Ig `.g^1#c7=| g' ׉gج{ {Όq q:*N]e2rf]H3RX6y&U `Vl΁Nz =xgY)jȩ]STf^YI:̓W`b`ؑ&R>܍Clb~s+ 10w 3:ͨ2(* WA6\k` |Aq|^>lۋwnz~wL o6p j36(0dXc<.q8RC]`5qCn.l$,{0R Y2dCy @ pZ3ywu4R,Q[|H[.-ǁv6mҿ 6;suBJ(hk*}MS~GP̎#_H.^XlYԶ_|qmڳ:M~^{V7p3ΈJީQ:-Oq&>{כ=|ZZX)56 H)0{9w֥su|Ӡ n酗g[m>s7 oYN~…SJ哷*vQ7|tYW\}]6",P<`.6n֦Ȭ"8:ǠN\EJu҇cG혎oWޚ!tct]+gg]}6&Tcڴ . я QP(8 paw[1HU_ȭR4-s$ʣtSl׷%G4lYذˮҰ۱`1P*EU}ؕ"ԇ)lyX޺2PZֆݬ꒺l`%߄]AhP]Ɯ[ !$[ Sӵ+U?6 p elt loB6ߺ +>~2g #uk -16 btk?ln'm#֋z3尽]g`|]%Wt>Nq* g!dʢÏj Nu S12 %+SԺmCz ]`]&vެYn9s iPu||o};w-a&qH>eXyjMo$Q~e'n~7L!iL??S31TY{yfMh=?{beh  GZ+-;،+/k\""-'3)1GflE;met옮<:gxGX~2:ue}O)_Rs k7ۏ@gGyT}6U6FgѩI ,s&(M4%RS57XBiCgvX6U&ӧrp^t `5X{Lo'6Q`<6ɰh,J0Y8u 짳ډvkAup|կInQ fe}vnLԢ3*Iܰ;-{T6]콡rW/ ܦ )hdz fQ`A6ɰh,;AV`Թq퇓XgSe*sqֺǺ]m4'Ͷ=sַƆzaxn\DMΦ6a^mMe`݆6cߧ6Bg#ȹ+&\l•aXvHK?:ol8qA'󗪒f7F-YgYb7}ʹU+ʆTMtoTpv #ȹ+&\l•aXESsG<6X89} d2-k" #Uaڰ|׆ȾVm06C"@`oO=Ɂ, @lecam~7\ְpWl8N $Y}r 4 C`gP؄MMr2, /}:w6 -Yڃut* ' `OD`Iʺ{? fqPdl)w'8l 2:u<:#}$q>+E^Vllb#wjkα ,kYT\U?) ֵíRqf@3 ;p/kEz%K k@je4ialB y~fkww6ͱZ~~gX:P5.3lxzd*{^tj@l֡GR`>K 4elĵ^UKTuNUxS?i:N֕]^M^M`.3|PW^}ΟG`Xl 1nƃZW9MA `)6Z*(b"(0& , ZiFiU_k.F{QlT$`@), `@6F;YlT$`@)p*2<%¹)6 h5 3!f(0> 8rt. - QPPS=(Nl`X6FlT$D@@qo {u5l -`] 6Z* 6 h(3\яϷT.iE/&Q6 m=- Q _滢a(Р,r2e~lnhH*[wեNMBV@;@i*Y/ 96-gm-+&\yl•aX5 dΎ7em#ȹ+&\l•aX`lh ="l5&\9 `q\v\^BC4 ="l5&\9 `Xm#ȹ+&\l•aXǵa5' nlE]6` , `@6fQ`A6ɰh,ڰJa6F{D)I`6ɰh, 6 4llGtK9/:5 QZ6aC~Ghy)8 ;G(?Olw`.3|PW^3?ُ, 6 4ll#FKEXLDqmq%z{ 0w`(6Z* `Xla`,6Z* X )0s:޶nܣ(H 6 tclwFKEr'P PF''sl(6}`"! x 86 `MbRPPXLe>bRPPX>8MEpȇh 6 .- Q _&\ȷT.:86 flt[FKEBWz 4LyIl _`h6Z*@ %OX6_G'lnhH*[w)\)Slh5 &l"l•aъ ܗ":86 fltS FKE㓨lkة`qSs)6 kl[tK9#:5 QZ6a` ,R:6 4llE]6` 8 ;DQw]S6fQ`A6ɰh, 6 4llE]6` 8 ;MEpȇh 6 zD9w؄kMr2, ,6 ` GDsWM؄+'â,kÎ+Q|fuה zD9w؄kMr2, ,6 ` GDsWM؄+'â,kÎkS! klG4/:% Ql&\9 `Xmr)A^`{UOUA|P XǵaǕYQ3ꎺkhe b(N`Xl6`"aD`1Xǵaǵ 6ڋ`"! H`Xl6`"! H1f\ۖߍװG)uGa]RF}(c۽v6 jlFKEB@@`qqlhh(((e.6 h(((qm*C>D|mv1hH*0ኾ4SrHS(Sw6 m=- Q _滢a(Р,pj0&|mnhH*[wD|mvhHb(g3gm9lmvhHb(R`S7Al.hHX TB@QN<$6 - QPPSQɧl 5`] 6Z* 8ԜF`@ 6FlT$D@@el`lFKEBW W->%G4 1Q6;F(]AhPv4FR6`"! oQ`qzSpz)v >- Q _|.ŒbQ&v4FR6%`"aVL*2-w`qzSpz)v 4- kXOR iK`j zlvEKEB6_`K,ч%4Rp) ]4 M؄+'â, b6а zD9w؄kMr2, 6^ >`3(r pdX4`Xl6C"p  WNE`X׆T"@h$60<`3(r pdX4`Xl6C"p  WNE`X׆W^Ëz=ڧblE]6` , `@6F{D)I`6ɰh,ڰJrglG¥ozUU>?mnk6_e*`Xla`=a]W巶_9^MF[ SJt6^Qga=E3yw5^ ѮN}YK/C5}((0: spF@@HRzhglM=B@zJvW60j6 ((0,Nw6ω"B\#?XQsl PbP *Zt=lQv yrk\ P`$`qǴ~il`x6&Ѵm\P= ˂4>v r@UVSq{!og%#o\Š|reg`d=6 # ݊~j+eiTlT `OFouG6|9h>֥€X?_kܣmRz][ūAH%+zhgl*!U]TS;Ǣa[j+ >762,)_23.4J wa _ayv^av{q spFرs-kA,ł$,jc>~1x<}P1cǮaU2E t=p/{X(塏;Em-=O *@`lMo,j(U~da@kUBǂ/CJgk t ?+Շb[&M+)ڋ@5XZʻW;eׂؕ2\nVtp̬e%%;M7K$@is |~tR Ee_0M@^CVŁ5;F}xZ^f蛆KD3ކ<-~WfS"6Gvx6•QP9z8h `fl3msRգ/ z*Wt{6uXa6FL " @[XQsl ct(Yz >ۯway>`T93LKul͓G)fMLu8|sl`Tl)A@@9s؇pGŁ>el`x6S"@@5?%Çh `b8H>((((7pЋl yZkpZ+2F@@@[oJ8|60*6pu?eE@&ݭ a6н lrB[@@Q@ XUwª@ M:R`a7C;lgm݅!P0A_t '?Y>>9&+[]rBVT־cؿ?]4?JcW3ohƬYg{@l̘1M Kdռl*(چލ=t(eЧ_d^,UWS1u_9:麖Fy?je4q5&_5 v{p޺s ݰl`\m@_gF-'[}nq*~oY ܉Q4Y) '?xE#vUH~A_re+*i #5:a7 `@6:qoC&&TO@h`%(0{Ծگsѷ҄ gst;u$P!K^,UXh'k%å `>zX(=9ݩY)p-}bl>rE3gu5fq r (h60>/zr@>o|_Wi /-;ς;~9]ߘon `~ G=P;׺ۋ?K#g6k֬/Bt@34qq*'dO@P)Pϕ#*>\Tn:CW Au!u` EqrĸWxœrk'քQr {VE&gVrW/u2<^~9{DRbi]oAu/Gk;"?ֺI H ÞN;v!w l wa޼ ַ f>5ȞXom `AZ(Pu~[7a_pv'u$Y{u=<&FS`bb6b.U@,clso>G8g-j"h+R >6螀6/ZY4պ6QAm,-fX$@(, tjz0zUVČՃ]"GSmgϞ}Q#6 `g[̚xx?k4p.{*Njd0:[ClKP镶]/&ݧV\6l?z䍍-ln6Ef=s:akq^ D +GFva@.6pGO͞Vrkۄ}(i'`0,CO ,rփ&<ձ0O|529ul FO͝q+PS(ؼl79y+pZ᭷vMǝTh{;=`3V6 `y#^{[{u~"wxϻ٢S~3zukw=SwͲj_y(( * :-vߓlh `@v6`%SuG2zϞMS6LWA5PPPPPFu޴~WON.a&/k=? G3=AJsf2|PPPPPP` N­Bi=miXtLZBB+L^`*AYݢ~(n;qjoaܮAꤓ:V9PPPPPd Mĺ{X^Gi5T ([*u<:VIaewv F5A@@@@h\XEa=40w/~RFR ؘA,oyZDW=2Vb(((((zeQH[UUs= NFi@Ri>eZ:++#ۃqr* @^ vq:WA}WMQ(F+#k{:Yqm:f+ʤtK*ZG]ں!D|UNXi2_YX=>Wi.~:LU#0QPPPPW `}5ˢ|u  ,iװ|>U3UZ2{SY>v~̤Nu?Wj`rZ^~ުVki.u _QPPPPPT*U,<c?!G] ȧcS+'^VkyYe,Y>e[$Q_O!l[}?B\hGM&&E@@@@@U0@3X#ief@,i08a@Vآ&V)ӲX,[qP76.jݮ1 t@ZӪe윲n"M>`}.ezuQ=\~t8N].' ehv5u1FԕE"{ʫ@cN@[ǫ`} jϱ"޷p|o: q(((((+PUi죁:+P궚 J8+Ֆ rݏEЯ: ӖMskPPPPPPU[5VҺѵnWuޕ]4xentK;QU7]vV_X+lB\C@@@@@(PXժbl/j5*j]ҲTU*w6҄P\UUePz®nY]lvi/Fa];yY]@PPPPPLv[Yl7pXc(\ZzKmUe͇*ѪL++ge5mMTf@Դ@418!DZM[іQ xPPPPPPQ.d>뎪og0vXAct MYwU,bە߇[rVu:Oԅ״y*+}+:gj̮3YQ5u:_*CcUzv6tW:+WF:ڪ4`UnHxkn((((((*Gہa\((((((c@UW1E@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@1Pߎő>7IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/images/rpc/flow2.svg0000664000175000017500000005775000000000000020755 0ustar00zuulzuul00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.26 key: topic key: topic Sheet.27 key: topic.host key: topic.host Sheet.28 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Sheet.36 Worker (e.g. compute) Worker(e.g. compute) Rectangle.57 Sheet.57 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.59 Sheet.61 RabbitMQ Node RabbitMQ Node Sheet.62 Sheet.63 rpc.cast(topic) rpc.cast(topic) Sheet.64 Sheet.65 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/images/rpc/rabt.png0000664000175000017500000012764400000000000020641 0ustar00zuulzuul00000000000000PNG  IHDR6sRGBgAMA a cHRMz&u0`:pQ< pHYs&? IDATx^ U H dWV5"AЈ#,FAAq (O"@%&D@v1 (*:3sm{9TuuuuZ6 `m`g͡޳f. @M2:qU_7M/*}șa]Fv lvs?sne +|ml*@@@J+tJ+'9_z{/l6Y\ykV]uo## `66[OwG +jk|ȏ~wXh?N76 `؀o;=5*GC`~МGl@6p#n׽2;,WÄQ~{'ˆȂ( $6y WslhВpZ׼4PP[e}͑ BAl`l`:!yo5OPPS/?w?@qs<l/M\eئ(((0 ?<8=qN:6 `aZO9~4IIW@ ,W^`yl`(m '>\uH`7؆^+@E٨a;606e4*@`t5h T89`6P9xWl;(0 GZԏ e, v՛Y7)}q5> n7M*l_a`9+ RRmVU~UWn3aC$ 0?0O<֒ t z[uUc_ٳg)SDh~\/n޸fx׻nA|tzӒoP! ,kcVHלl>9s8CKb4G2~} 6%S`zRU?5|0\{/ǕW-T O=;s_pTiӦnɝ}nݾF/iMӪu],[w-CT|) n7{xv;(; pp쬊).BS%̹7y: .M?;0o׷.ziU^:`X:,.Km{)[Y?rV4akֵr<i5cm[Qv7OSCE3 B-*8>ә?{׻5zݑ3q.~* ;X-VLLJR7*?"8#*UV{xcοR7[ەG)#_ByL dԵ@c6Y-qڧj{PX;70V;_,ɏ`Î3r{{gUW]z2&igD=p;i;g'j l-;d.++jXEXP!8'UbEfW}6.xZ몬w=׭mJMmqX~ץunVNu< U`ql1G&aǹnnpկJ}knҤI 5qq\=vP, ؂CvXA--bdw. jnVjbVGPUL+cpmuYR  leŃ_[) z[1MSY\~awGeYf˲;Cޥ`bleNED=AM9a *3ܦE|cV.Jk( ufYjuQH~`Ǎ3+IbN쏱&YsGRyGŋg}Ҵ`{RW2>V3k;v ]X-v?99} :Tɚq'z6݌.1]3zlݡc|u`%eVdj>ZĐi'RY%*Q-x 5&Y1vӧOww{纚.b֫5q^_ ZO`leRE'w= `ET~bAm]Mgkd(`֜<`yVwvpr98MdfsOf 1xc~ǝݍCҵ`ble~F+x Oj݃f*N돉MB![bOei~ ;WQFP[C蘴zÉXs[Nϱ^|U vhKll1G'G{UVi⪫9O^xQw'O 7ڸ'n<(s ,[wt L)vTW/6ߴ#slde`#`;6=ݡYg/Q+~g'>1Lid͘\r:N5`X}F: !kT,)LƤ1On`9:"_W>G?rozVF-ɌP/Z& e2/`K`XJ|r`9:1;|ww;mܣ>~5*]nʔ)L>̠;*5`X= V*li`ӟ/<3Ug?ӀleR"{]w uk;[oT:j?QY3+W Uչjl ->}f=[c,UB[ovmJˬ}E`+{p*-{?^׻|;_lTz'̙3nj41٘Zgόb6ݏz [o}ckSN9e еh|:\u^p}l? `;(,6.Ċw}wsql;C/FA.sm(Ns%8}m7|IgqK~򪼢sᇻ78MEME9zklm` usW~3~]F]V謺ݎ}@~ۧϣJYӶ=1[]S7=[kܩ?6.|n„ [vw-Xj&foOq;6J`167oϯrvS&oxct$7xjScN9M`5eMmFrUm`V&4@4UF7c) Zv`}+;kX؇~)R;cƌVD+tѨ㏻~v4ԩ`;U}6YSoQ"lYXEDZXu/#\?낫nFIYvvz޼QB;& neq'pB*zǜy{VF%iƌeٝj'm5S^me tjrqymVPv 63MS v޼y$bk^K^0%7{r"OOg}nTַ56̹`;m6ӭ.D` -F`($S`L9*F(h8c[ ._#p#}=[^ v~OQooh7 `jlYc`yc`:1]*a w˝Rȫ"XŋSO=4AN~򓟸Y/??}7v,l3K75eVsF5,U`f_u 'cRuX,XHCusjewi lW} `;wҺns>&c`?k]};<[{'kؗ^z}ݭ|`Ts}3'Ы h`ZuI/m՛^$ t!:8C"K-ԍU>[n'\VDžj)l٭2>8`v6mydE`\~y[fg!I~+sϹ:˭ڭ߀_| .׿oG?Tlg /^[1X>`+C&&%.[R6 i5تmTl6S) vnAlW_=<7+դN'reN}O?>O 7]veNYu2pUwaD>H/[bek#li"ۧ)߱ۨ60luDž^p!leRRP] lش:3ßYs=+nYmwfƬCu;KYt, b-ۿ23hM9m $}| Wy Nm'l8V)z8[A_Vmm!@Tum:^AS`k ^VIIA( 6W  `/H7[ɎH*hS#Xnn9Gs.pmove;Fg!3g"[EaՕ(o¤YMyM6q?Zլ$pGg5 Kt ׫WNr] f ~0) KD[ ԮyX}/n;5 *?Z-k6 vNuk`\lTdAIli5y+J*jWYj~&2"Vss4_IօVKwK^\1=, 뮻m?׿R=eVr9kO^ nMpsM CPմhQ"El# `6ܞ5a楃PLJ @in=]B<+1|%>9 ЅgϷ@ēN-Cy{=C_\0f/M_B~z{+b6wۚku5VWa?-x<O=b+B[v]V.9"DW44Sd3**p?cUWܢq^Bg2W LJ @(T$ai$ɭkU*7H7Vsϔ-}Qm"mtubV"i5ցtM~ j=guV4tŭɔ` YMn+GWCUa.[> <~>2T^O(E҅;9&sǿS=et> >㏻6޸>*ɚQ=s[?Y;[oeb9?dkl}݌ګnl{`+*P {>QS'cq!/86:U"/@Uweଲ p'X+S[0Zx["Ν*sΘ5ӓg$*{T!>?w?.ܘ|žs9j+XMKZGmx衇ZhsAo7~뮻57 =<&a 'b<|P  , |P,,*"yȫpb16u-6Ej<Ϫ`{{;\UwъYf̤MXE^ ^H@v{p5BEa-i 4!Ԏ]R `m`fU 2:>E*iKt 6V6`:l ꏱXK~6]ƖVuה+XE/^ ~|Aett(+{r0Uz=UDu7q x7:Fp߶ŋ&s;^O` W `Umnx̚؟ *` :k]  jl۹Õ̜VL"\7ovfEO=XEc7JgΞ:ooTR?v!2HZW20[AU(PMqi4)d6{+ykWegc ږQY~TyOOQa;Ͻa5Qǻt 'L/'|m2ՄNZSVsm~C8+ jYO:ia*1 qv<9c7W RYwfCZ\NdPk@),<~],MiPo]`# ;p!{mtXdGbTr4a2ɢieذMlS9Wdh_ZbǯVKc`N,xlNkMN_j݀#r_K"}XT0qj߾{>EO~Ҋ>mSS_"gҤRբD`;a![i ; U@e<2f=vz%`;wB=]* 8P$bkzH2v~Z3x≎ֆ6^șr7j?i}\{ CC"CzD;mǍ`;wBk -ksHY=_%u '-s„Qx=)9vV[fVM_cWۍk}rK `y jmYszʶQ0KWXθá![?Q l)`{yU8i8QXomn…|u;(^c{[K쬝@ dw*7/5`r#Ofswedҙvv tUE/ [21)iNeQ Ԡ 5hUWAsg7 `%I$v5pW_}ܔd Uex6r<@k_kMXMn7܊j4쬱D`c leźII++PXF s/kN>:k t >͓I`8yV[}ݣ_ynqۮK Ә.7[`ͮ|u׵h$T}{νpPЫsϪltӮ)5`+,ʤ`Xz,XX-GկvR,UUo׶0%["Io],Y tMf)I8K/=] leN[T,$̺`;wԲVݭ(m9`7yw(DOLn=k_Kf͚4@W+}{oT`;^Aۿ׮W63y1leRRP Mg.M4+1X%>9 ;V,+ Or8'p,ĚTdM%pdg@7cn7Xil.x?=] le[T fN*lw[E;j7zwƐjǍ1ZvK-x*r$w)<_jml`;^۟׭BB\V,(uwՌlG7g;;nܹcV /LG`yo}[nɜ4+7X+Cm c۹mn`;v"-U4~DԤdj>7'$pZk3eK`;wvNW%vm-X{'ft\ph"Z6Y+V3+ ;)鎬oiYPl+(-> 188a t!.A,q\7v檫Nzi"i5XկKerAQ㫪=Xhتr9Ze7qd'$3tI-UwauZؑGcً5Q {cʘ&nUWmՕV_ߊ |$: 56'6rCQY>%$O4VV) vhL(VPigItؽ op}\uX'Fd78]tEe`L"~qhj:85D`ÆMS2W =cߏ؟#Hߒ8K*|yd_jd:xmS޵^yJl}vkOYkÎt;sT~vMʫ2/s7ol]tWˮ,vjUF: XoD`r۩ 9}3>}o+2Zb@v$.X͓w `e]뷕29} g$%u+-Z6NƻǶh~[;A26^1lu~xH>Ul`{%0mFAvsG'h)ik5Ubu5r4nUXM"U6";`]GcWMͯr!SLmRkB*1s`#d+ 2Iw @`#{(9 +2 믿; Af!V qvZoѕMlm%dX(Nթ (^u 8`#mmh|cd2ҵ;쮺*wwm7h F dW9 B;I6 f!nSluG: 6YBHߧJ+`TjQ`zR Ol1GȏqZf5 J@tƌ[.\ `uxd<컦MsZՎ1]'.s{+)|9W,[`*lYD'زr PStͽz8 hiIG}Úk;oC{x(ܦ=ߴ_-}0w9 V|P l1G?EW};`ۯTޱo 9s渝vi8#g4h[ [`XؽV#U"nK[g98mjyMԏ>y{ح/(UHybN??hp\k`lÐ XUY LqX1`|O `͚(Pll1GD ~H!?;T>3Nr;Zp'V~Mn}l FWl1Gq) ~H!?[ZAͿ1 - {`X} F: :IC,;s6>c`#1[Ы6l)6a)b`8ϝ*`|k st 4 ,[7tU!vybNiגetۀϭV]uh~\ٳg)SDh~\9s8(C~ևoa}79_6@ϰ>,=y;tz7+|~l>/n5,|-d_XIipkߋj8/)}dP5U鸓m`#//;/֛aIyMkO'Ҵc]r \V _&~|[IFj&OѣjoWn>(ȫ>6mJpAyrͿ,]#dU`bQ|97_Vo 4A6*z0[AeͺU]W)~]X6a lPOMj'iHu,蔵Ɂs`b O'GMpFtm5P68Zt<+82X*x`˚`b? 4P.u8ݾVq۷"íϴOIHU:VyE4G#G-[y/^1]vn{Y)+OWS;Vm QI+-lWYUv>vLzk;ZK?_Otva'kb?Eg.Yc[D`쟴dbКZV=#[Ѥg ς߯}%P:QCX(=0Pҏ`P "zog0npp]ʷvXUiPC4hdD*G49(lN_ڦ:^ Ͽ F*OUP2iH~meZyt W=>N@ nZIwQ9a8.w\Q[!y sK%Ex6V`? u4bWI zBPAy  6i~\( X3PkAV,2@+S[C- :mR@hD0صs#jsxja~0:c|K!эwr:`l^ygKϠg2np2`*3fu[[:ҶOoվ:m}HPY hVd~-|aTYHgvQ, nkiE#,4xjږuJ{=tGdW럧{ܐ l|Kڌ=#%ې%늬m>K[+6*|Y4eX%ް~àzL`# @AjXb(`+ FCS՛_!kcpp986F:VLxu`XĤY_ y/ד={e{>c~$4`{fÉV+ga(?ígz 0+$+Ut Ml^vSV6`BBdaN7-*:"k!6@ KkgҔ;`XϺI+yN3%|akz{ļ\ {v aOzf+IWlwš +$ 4UD`j61gq YD2|V6`}nNcl`Nb $ۛ_ [jpx]C 6ζ]u|XvyQN`XM"ÅbVeY6[k޷AoޱlWM GRIkRߺv:և:+;i?En>?ipipo]Ah8Em5+Xp[ڟ!l= cu'/3u]®u{ZP`X>Ll+Rƴb5P=*ǯ#X/+wl;H#YƖ j\3k̨a6ɢWGtL(ZSO`-ZW4sÞ#ip8 ~xU]۽W]gY$y؏5)0+GOCN'0vy"uONQyQVy`XnQ_=,gtZ.U /3VZ>Z^VmɵɝT oT,NwyJlr$o3VYgJɸWSJ{,k9iCГp6 l@S|_T蘴zGacTn/fs9< XPll3.аkw\ `H\X~{+*rQ(Pll~yP=6P ,&cH]/As tG6RW-(tXte`8j`#'N85 Al, w8 Z stXte`XܽQ(pk `x,[xE"w5-.ot: `H\6`;D`kQ7K˔!I=Ui~e΁ Yvʔ)={+r̜9s1'Og l`8zFeNeR tU"ِC\/&E8~D^ iuN0QRtZjWEUE=9ԑÎ,66H!?$ 嘥; } 2l`mT7XmJM}5}8H(/6F: ITWn>(ȫf;r9:n oYAc8Ӻ> .{Mw׎ #H;aY-뗫\EdlЛl,[f$v!օneq(FH &k M ®Ƃ:ZY֝V 4Vx[F}ǛZ9Y#eO[d,mЪUVZ0kqEA63`ill,[ زq\Ԭ'G6`& J+8 ZC8A^15߰I|NsڧrԾ"MC.ayc\^6 46U`ز:59м)~hl#~$3tvFj{Yc] os6\i݊<~1p'`@7mmsJX vU^ G(;YKdM ljuMV~?qX"B^x/**u;ܺF@/+B"3 4C6~ l)i w'B6cʬx[+.6n291`7u $[%gBDS' t? *M.OzjNdT5VU?n&vjkVTl4,VT5d`\Jgb@N ݛuou卣󩢽l@+ţë`IlI4P5NΕE! .ѱHԍ_`.ѓ6ɱ>i G٘p4) ngif(.~@r-6 B `+"Pnl h׭7ʁLX|E@TvΤOmiW_iݪcl@VዯN)L^%;nۋ.BH_ U-ǕvM `mWjZ>) ڂo+Seile[Tӓ2f):TeF!YN wQ>h͈mk3iy=2Һ[ `pDǪ fZL$Iu=ء<Ir'`y톶Ql'LG@`mo%HԒ"@ ]4ov@=M~WkVJ4x`# kgҶ-$:C[l^.>a]AZltӏfu !`MXn:.l]~>(*9+,i@)F^N^Aol{6`$U&R5ӘkTy!Ԫ,"! ӺEt!ni_u#lxwMOl5`L8Al:HvԆp^[)-kc`K4c+еq9Z+h7p__$bΌNӮv$NC~66_#rԧ[Tllus4u#a4j]ӖQ9f!ƂFVݟXkcy?߀7mc.vmȯ_ߦ4}flÐ ׈)֧55U)$ 4͙=dY熓9 e%`Fk>#`}W6/ǏZ7mU]H[(m[ɖ*J[c6OC~66_#rԧ[Tl,PH}\m \fat>`#`aw^9l)#lt>`#`{4Nh 49n*+o{lÐ ׈)֧55U)$<z؝I:mtMLܜ(Pl-2SI j8u:ԅa@ltȆ@ۀ 4A6*8Ms&i6 `u0 K(rkȫ ((R `M6a  l]/Z(0}$!F,dӜIڃMb@6*@*059CΌlBG*((R `M MB`kjPJXɦ9:mҫ,/^Z 8u:ԅa@l!BUkϙ,dӜIڃMb@6CQCp,b"uaofs'ݰ?/[11)iNeQ Ԡ[TU+`Fly Zf &:SϽ96 VY(jPAd@`DN{l`IX7}}ǀ{6`mV[͝u@lC ̳`+RKiA ` :uƀf}ln„ yf[w ,[SV&%ե[$W4v?~(n /tO?t/2te|llU`+*P`zRr<_ʔD7zz瘣}tך̶ s9gMrcjw/JleRRP HȝT @iL6RJ"q t4p]{ٳݯ~ctx0g`5"G} iMM)F  pݰmēNu"&Mr'p[x{g;J*GYٛm<\_g6akDح\Q_ *F}y[{:r8G|IsU}QwGeYf=zPlt>P +kv)D[lm`΅8EB-**vÍ6v_z% [DOlÐ ׈)֧55U)$6I^iWw?яo~FuQc֛-,d;Y6akD`Ӛ*R&o01c[c5ܧ?8/B#OS}LÎ`|l F: |Q[ƍIC7>ll; `կo}l9soƤ+M2śyc|l9[`#llFO6 :h\*i6RL4606ϟ﮽Z]vzw]c7M6g}C~IĝP';MWtwqi5uZ/*_>O;-d#C%t` , `M* @CmG! wl-[Z(vrk @;sOk\_zw'Ig1cymlQϡ LLX%ih4zӒ +6ު%`[ DJJ|@+p/bcfOfmƌ=s_pO=" l 硆C47ޘX s*4-(pK;=(CtqxEi2wb?4&{cNf|l*t|}?7pёUIJͳn"lX.lD6ޱ |( ~o[y駟v~_;nحl뮽aؑh,L^~}Q ގQW2qg>N;GmW_wm]|W{&Nt$3s}ZN1{'>~aO}gqƤܽ3>v>2>6خ P, F,M9ƓY};i k*_9FtrT4Sٳg 蠌8;F|E]Zw]7UjM>hv[m?5&m;(.3~| y|,$䕶l lwZu~ŋ]GpYS4.r&YuUT4 .z?>V"wt˻ɽk =SꫯvSNuz׻C=P:~GvHV;`+v,**`+2,[ǝVqlc /gK r!~ss믴;}֨b'p/g_}~u _Q`+s,&&%U6  Vfm `X;:8`تfʋ)7n\O>>^{mw/=M_ֵ`*~cq&'`تl]9, qU\Dl E}qf:o~MI `z{oK'?qYkz饗vvR;,[kXY1RSf몎a%`d(/ޫ[1n‹^L~2\Bg.>>3WkI[p+vש]{uB\{H= CJq2`L9]ؙO8'`)ꋻ7) `V\ѽA;+[nqN,>swzp I'?8u:)*pڴwq馛Xo*V*pH)NO,{vuebcјK AS槁ne_J V[̚,[5(Q^o5Oet4 MWt+~'I&KT}#i[`߸n̙nM6q]vJ">h ^mk{le .IJs'Jjzu!@j`y;nSUleRsX6|w/5],UwMWY=72 60)b>n-,/~-Zk/{x#8WM$@O.֋vr`Rl8XsVyE1.rL/=8J9,t}qrNY+뷢ߨ-;lS>{`Ѕ.nY"28?':֌PoTȪ.ĭY#_A-srX6|w/5]<n<}~޿+#Y(q va7=LJjVݙX-C2 8zKS 6) %t ,H5ձ0x˜]v؆i. 3,'4-(`m:xѾ{9O ,hu!u`F (UW]8W?]q-{'ƺn FtNl.i`32+Oízr*rh CULe)جR@f7 <`MQ3p0o|l-(ƍ`^KM)`5)f Vz$عs3L;n緿=u}nu`0g#pHؕ7 2pjZoK`ZM0B2I]F'߬ 1z7< {%jXl9=,t}qrN?ywH2KևDcN_=䓭O?{gZR9'4͙3=i6s܍78fX\`_6aklͣ#Zq0 ɴYp8X xװkf&y`Eohl&q"t3l?^jN{v_ߚXi˱&waMdۓ1&-# #-gle^llЧ3l8!eD``t7P{BϢ~x?*+.kڊXX6&Z&6`ئ틻t`~a{>k-raӦ^5^֒'MrIO؞wyg4V-[\IIVI Z>Pz?nr0Yc`>zl2#,"D`0~H,$<5}#ہ7k lթm9З-sxX6|w/5]NVW\]vYO}j pq ~I#YEf6A`[_ذ1qDZMlaهz9 ha%~[U7a.'` +Dz8`^Ծ9^┺qΝ$ԇպ'Nt>hk 췓hwXDb'IVC}_ó҂SK[gtvfz+aoLlHX-B6Ӱe8iw.V 4Mn)) l7`2P85Tfeƻr$H%Q`9(>iw-VYŽAIÇzgZZU@)͟?5& [ܦHl~o̬>Ԇ&cubz\j~ڞIּN.~ «~bn:6`*2< `~~ǝ݉'n[4VՒnXmlfkيZ?(YVK~cJ VӒ`k֩UCMtw'nrZs{y_waG?] `oJS>4ϺϥkZ%C~Uy~O6Ρ`تfʋtXf7pH]{üB0U :]1%0{^2+{&\"m7&q,=+UVC^U/m[>Rʯn iG6ʰXԍ&]1yP:nܳ#X`GIZ7rt5X5q/Χr|WEфjtWEJ+qI6&FbRAoL7<[}o[}׊pg?s>,r8ց-]K9]R˛&qzܸ/<<4OM q6 v\w(Hs/~1;mD@6&5`w˾uXa7« VuGS Rۥ:&qzvSwuRHzSz `E{#0U:.oʸq9o4Oʃ?qn) `ӢGZFG>æk]آ%:ޫ {z_k[dzRkXuSJZzdv+ӂb #9C/U γJ}مXF"y!#1Cq_(wm֛˧aWdڮr|hqͯ'{cfEfkKu]:Nos¶uƻUVHlSmQi[ITEXLmwv%pjzTi=dօ1jIk *-<@n`>\XEmk3so6ƸdOݍ̸}c.ȂLIm3hQ(4pRmKoz_A)Oگm*˺Lox87Oћ9ZO6`~tisn'u;ݷѱ[f7!I X&~reG^4idccNf~馛ZyiL qN@Gs{nPV{`+3 `ȦE}ƈɓkC l@LFi@6?<FoVWo Exa4эrS(-r<[كTAl ygE`oI"I8}pK u$/#cmR3fMWܵ5b3]hIlyv2zD=1}?To}^.?2~'tNaO_6oGE`J,9]Sgz;nO{olZ" aY7rCݏpYC,r[=SMCSVcYSuq63l`/`/V֟+t;o-Y";3ۂʴفoF7yd⣒ΛI8{.3&`zK7ÝA|ߐȾ!K}!~ HYP^i=?TY~Pcɩ"~}ʬ_ 1VUFaƣrt!. v34B\:9)\ q6/F;;v…(jwOtw,gq[1 RAM6iɞ6Hf2`XPش^El[;3Ap+36;qT'>ֱlYmvolҥP0z7#3ó7 ~;Xzԃp҅8?K6Ζf'ƍ7؝}٭%o4V([C}sђ7'tR+L_WUw}wt`|Xc~#1p' {m+&`m VVNhe%w%.N/oT\9l ^ܽ2,:unWٵ^{kllt9,_j jD5~XM&U&tMNK=sk']> %N4u5K<l`rVXK/= o~[{e7%@֢GS[nT`^S~HVl}pK.QO,ꖨW|s7wHq6NIcx^d|Ig}{ĉ< bOO"h9\sM `uI, vhS+,\ y8l>qNsm7d>ڱEMd<,7pC?N&)BmKuX& ui9, x+igV^yd79s渻;*mF; b2:j ؗmر8ȔlwZuqNmny]ҫ_|VRIز[VkMF_5/6=f-royt!Nho4xtșǸWo}[_ >jyE_Їw1`$]|NcvJ+ ;۔΃q{f9 ==06]DU^Q,cqI6=Fci~N:f|2:Z'- `$@˻7߼5[+lE`e}ahE~HV,[e%K;:ph?v6pWu_(0&SOM½.?|%uh={_.-{Mwͪ9Cm3[)ܯ!jiy|_GUuնSկo"r&ʗ6VTgǨ qEf:>4r( ӜYZۧ6% ScjOUKlFtWtsUW~~5]yDQFmC~Jv$.d&V(GnyEbɌSsp4_?lhہeFhU":΢VZykt8!Z?1l-Fw gs[׿͚5=tzʶNCatUU2XC&)F^ cQİk ч\ߏpg]8eV ~ 5^U6i9 `CX`Z `k񱂬^z4"?aD\K.QF: |Ql}ZSSE B_H~wa`$Er8)F+!Zqi6-fk1g4]3>vpIpt>`#`9$N6Iuni3a*hL^EA5f.ayy2-Ӵ(.c`p^Y6^b' 0g`5"G} iMM)F s~et6yVPiqx3Ӵ樖Zڤ[Mڤr-8!ZdSN x`#llFO>"H!x'G **@󣗊k"Ӣayiδe(!XDmipE:~[M(6d?B/lz`#llFO>"H!ꝡ( ƔY6=Yup\o ȷ6akD`Ӛ*Rwn Hn7-ӺZg-Yx67v Ԛ9ydoәÓ<@#`qhDH&v.TF˱`ͱzmtȆ@۫Z6:J8 `@ltȆ@N@]o-yf9R8\l6a  l]/Z(0}$!Fl)zc6,`qP` աygF6O!@`Hr=li6ڤ21]^[l5)$4ՠ@ 8E=c3؀&tK9K+ڀfVמ;`*( KE8LNlh (\S(Џ l7tY9'=1"g>x4lEw6P-4tn ou߰m5StQzZ}V-irl[VVvֹ 6&OpSOZ(PlygcJ~kM}䈙NTJߏjm<_tOu18c i_nx=_<[ePV}nCqY'( a~+bM:ԁ((C]`A_l>0pɄ!|k#AS&v "'-qgSg l6``UvL+ׇTw+׏>}PC}7}LCrG@@:`scEqMr6 i|} (PH\dX[N!b[uۀ͟WoxQ}#0ni.lA[FGyնp=oEd:16˲AͻN?WϟY՞Prom<0iܔll#e7!0jD2bҩIQ)P#COt},šVX+W7}6?ڦK琶LhI;ElZj\f"A6le.Š$[=-O3`ºt~! `Nʀm)h <i TujeZ;غ%(`uܰll@{3ݚ.ƊZd6 kҘ6ʟPj@ d=rC~\jw{ .}PaXArVt5m x/%o ɹa@6 Uw_n̶̎`ֶ M` (U/ T'-H ,, bqY&C CtY|my@,aFb}F5! B-[~ V0ya+606tU_?mFufaP*V+|zMoZln*P`}6Enu!#~u, :v `6 T )`2U %g4_>-7}60‚߳X34lYiՅXMi݊n.[AǨm&oC}=mk5jYA'̆[c]~O4-xdG Hj{-F-c Hֈ&>doUB1og-].e`qb؆KfZ& 3jGJG:^ـ` ix)]3T[liUyCUg@RSX`닗aD7,ۢ*dQ``Aæd뭿{hImkjڼJkJײ68v枚! +*P2P &>p#41=6;H-E B˦iMݨKtWYe~#4 PPP`KU{CF\OlVҏTۭ g Vٴq~7Y*{Ë"Cueu,] ڡV鐥v.0M8a@@@>P`Vx,SĹ&//6`jpi[ x!*Ϗ1MꯒU>BiWhuvn~bMOU;i, :oCDZ;\x[inG6MD@@Xy-%gaۀ`,:u5aTpçAOI58w֧V( / 4`UO[emh9B܏]4?Sr-5<@@@>P`+_{9Ѕ.60P6`p.^| !ASDg9 D6@LJ0kX#G[>4PPP Q`OD?5 >XF- `|lu7v5iZ]g+oo(FobT<@SS^5Ŷև0{_/r606ۅXiMռl^43沠 f}ȓne0>9g@T@k( TfWGpE }Q08XǟuHvi,3I kc` f/mcHjXwf^Ӵ6뱵V2ӗN;í &r"{s)r*e?~g;/ 9^6`6^˰?q 8#uXeڌ!̽;IFƝZYUi+˖Q9~j[Vȟ9 7Mp~n̳c4 BZoV)U ]rSԎ)zmǥuoTKqӶ17/ߩ+U:!h"jKɜ}Lij1Im֡2~mWڕ!VYvNa{*29US-p'N? r+(lh.SdVIfiхcWoD72}>_# SeH!nAl(jYfc3 O\=NtY3w^=w+ 0<:Y-8HC0{ .aRӇGƬɐ"O+pou -h_OhYT+ڧn[(,˩`؀u.xRETc-zVw; rZ?% $m8RowX/P9w+Rc.pS7lzE4¥tK5R}p9VN.3yJx> 7[I1[5z5vb],Xf]3M>h_v3}ԆAGui^3\_Lߠڱu3ѥEvNV[TUF*ol׾ct(6 `ՋqϺ[I}P 4m(`?g*_umJ ;CȠiӼ|LشYOSQi|v-xFiŰLJݼ12Lu|,ݝZꪓO^8ԉS `6- Zmⳗ6u0teUӱ]sgi-YŏԆ+X7c7s줭R:6 X4+CLZЀޒ0(-bc-'rqPlE^ߺ~k~yٜшaSY=CùflioO+#wYuL6l$*҂tlo˴'v2DXq&`ONg"f~7F۲?Pzn[qi{t3.gzp̱lh ,\H<҆]aj³6tMHC?SVC woMM+cV:Z]3ea\GY^3lUŶ~ւd8v /4&aպ+s- m &Zflk3u:`6o[uҤgVC O&ZD:N?3p8C擫>}:u!qVIM6Z/BP A?7ʪ1MOVT9G|rmɋ"8QN6 `W<ۭj/N8=qӜaжav0d+)Y-Xծ/oijpgeZy~WN_ZS ֵڇh) @]G<vf!- ^"MhԺ b,}8VaSW^y{Xc?~șq, `6PO8//_p4yp{g跇˾KP>oj@kvk3 P~Lruc#i5?3\/GF̎umq *c4՝#\1vۑꍌ9)Iq 9 ows?snvl6`=sv:Ə_)<Mf};_qr-{9@@@@@X>)GEn_Tt2kФE?lB:h=BW/5츲8+K[t~e{ɋ(((((0 &YIQBXK?. ԥAky;gvv_yŜ{<l r< @KXA4m 1S'vEt v1GR"WTo)Xдv|1W<(((((Sذq^l0TENm't,>Eu!NXt&1݆4PPPPP Fvu~Eo"vݍCTF'-ke+Ϗ8eGcifg2&im^<4λiۂE۵ʻ1L@@@@@V `Ɍ|P ZwP6*Auv]~V7+ kQSՍZ2mjV9k;k_uUNkw]fZ~Ktژ`ٕ_Kyɩ X+ XZɴҢMб>M^P~g &X #ifAFuOx]Z4S~0rx6TyO(l 1~lY^]GficZ鬺 ,× iv⃯-¬sL%;@@@@@h)``֭VpQfЬm*fz> U )+j],=L7^K:㳺 !녅I>67k4]eQPPPP\?2h]j53P6oW+ˠ&EL>٘6mتtg>6"t6i^0OLd9\Hk ۖG@@@@T @;5 R$UaFqCP[+*X#UldNֵ8'eq-pM!=9]@@@@@_]OТ(((((}mσr]G6 _ce{U10N+W/j† @* pM8v%#x ۮV_nj. YPcF ڭYݙ^aLC;6jjf9_BMUF Ĵ!.Tڷ3ߞAu>mZ:ԝ(((((mH*smR%e߾ U *Q!ژU}Y3qaQW>JjYGtvܬˡsY9w V~pib6L.Ŵ!.o߀6\Z&,˺lG@@@@P@PNdvx3yT`Ӡ %BX lڸ߬(i AݱX@^0&FK6(iFѭ6X=ۆłoi܇UE#n 4Am18оp,mI!LBuv3e bISUWeHkC<]p[xiggڤ`^;cڐVFڶMɿo:<6Mա (((((PR0;ƕX Ni|Ӝ Ӗ`U+tlܱ̃S]> 9 PPPPU!OF% %`Mjp)pI2 ` " D?cam 1]v.YjB _b!ͮm>Q~Ft=qj?XV3 qv@@@@l_|Jn3kG- -QP^nYT"t?Vg8mW.;,2=m[x}ٙv/\{}YHp;_}^b#u#,_ٔl(C|V&(ە& duLZT-pF[ՙ,_EwϬxڢ㲎-]^Ӯm;߼6䝃owl[ Ŷ|((((( P mңe3}hNA4PPPPPPP4T~7>wFꖜ PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP KEmWIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/images/rpc/rabt.svg0000664000175000017500000010200700000000000020636 0ustar00zuulzuul00000000000000 Page-1 Rounded rectangle ATM switch name: control_exchange (type: topic) Sheet.3 Sheet.4 Sheet.5 Sheet.6 Sheet.7 Sheet.8 name: control_exchange(type: topic) Sheet.17 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.9 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.25 Sheet.27 key: topic key: topic Sheet.28 key: topic.host key: topic.host Sheet.26 Rectangle Topic Consumer Topic Consumer Rectangle.30 Topic Consumer Topic Consumer Sheet.31 Sheet.32 Sheet.33 Rectangle.34 Rectangle.35 Direct Publisher DirectPublisher Sheet.36 Worker (e.g. compute) Worker(e.g. compute) ATM switch.37 name: msg_id (type: direct) Sheet.38 Sheet.39 Sheet.40 Sheet.41 Sheet.42 Sheet.43 name: msg_id(type: direct) Sheet.44 Rectangle Rectangle.10 Rectangle.11 Rectangle.12 Rectangle.13 Rectangle.14 Rectangle.15 Sheet.52 key: msg_id key: msg_id Sheet.53 Sheet.54 Rectangle.57 Rectangle.58 Direct Consumer DirectConsumer Sheet.59 Invoker (e.g. api) Invoker(e.g. api) Rectangle.55 Topic Publisher Topic Publisher Sheet.56 Sheet.60 Sheet.62 RabbitMQ Node (single virtual host context) RabbitMQ Node(single virtual host context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/images/rpc/state.png0000664000175000017500000042032400000000000021020 0ustar00zuulzuul00000000000000PNG  IHDRv, iCCPICC ProfileHWTZ RBo*z6B AŎ,*T,XUZ+`@Dee],Pyv=gr;ߝܙ3-;//U GP e&&%3I=@X2# PF˻7%u*'DAqr >@hzYy<Kp kJp OFALe(Ix3 90@b/N 'BL<8:Nò\Bsߒ-6j08Z3\0 B|\ ė\8"?f l0u f|؞-B{4_+ǩhy|P.,2D16iaG2bd4FA:kf#A:"PDc~Q(/cZIG Y)96{˂wFʣ`b bΆMF{NE0÷xNc-B7.OQV33d ʳK>;v}qOr6a&>7 jg(m-O|z%K%'9Աo(~߭a?Zb˰E v;5&v kڰ> w`ABC V=x!H8$!)H:"@JEQ]>hNG|-AWjt?ڈAn:Lc`5aX2 XVUcX o`'t[ p/W?{A+F!X !DB:aPIC8J8MHd͈.p_&3s+[ Nb/qD"iHHT@*%m"'"]'>d{r 9, +' * & \9 v+(\SSR(XJ&e1e#rFQQPMq"_qFŃ{?RըT?4zzFXdZm%vAdUZTԨt]饲 "JהTTLUT* TTt UT#UsTWS\fU+QۥvVэ~t} }7_)?Lf6s#s@ @lӠ`0ΰذ(hQѠdyuLL\M2L6\4yojf`ԴYYYsyyM EVK2òjlŷj90m`B.kuuu &ܦئDL8񫭓mnvjvvv-v-9U7h ^9Z99q;MvZY\bťU5u%7Bnݝ a$II'zz=wzv{1Rvxu{xX\3 L>/}m}G};lxhX847t0!8,xMpWn'6d0%t~0jXLk'?0D4EȐȵ̢~B5jhyc13cż]{? 950 886 t62iDOT(hIX@IDATxpyPʴLEvil 6]nj+G ssCZ̩hx 9ã\q45'%{Y1`1Duc)mڦm^2m8.v ,Ic~g}      ؐ~GXMa!F (l$@$@$@$@$@$@>O$@$@$@$@$@$@aK      (l7tx       8??U<C(~3qYo |o~7?k'  !RƮ;w:r#ۿ\ 'WOx#;~tǞxo-vk|m~r6q.ڈN/}}+TX}th_n6#ľ⑃?r#c8p;Y*s}ZWC1ĘHH`x7=UL͛7T "7l{9ǡ=;EӘ'Q--BcgVaӈ6 ύ)q{^_®7"hZ5/##]}"kNxԎ]78k/ T}At3v8k|~ R|IrJ^jfM9J:w;鶎 om4H:' OJ`0ӺnNh,o"f}&KIi<<}A'PD+'ި'%_n]e)Ϣ/6n7s^KQ (>5CJ  gm^B*L:krJA+VRTy㱨m}iKi,ހc {@6ؗB_pରEWA&SEmGI֦q)uܐg8Žh=a~7!uR|/)ح@uo5~]JJoֻ[F7rgjg߰L -n݀OJ)6!FOL#b$߯zҘM&:6 F7#l=Rq/Fl,畃}ZTӸ6\D sR]:2ÆSl  -lS?/;cc7~6zZ1zճ 4_PM1|\V}伕gBZ6l6y}Ȱ^Mxx!qފyZ_O;)JaY8 \r}Jɛ[N+e{0kCts7+?B0E'6i4^Z&~;S~9ny|/Cox[~4?{աugZ^uw'V:莧]&LG$1 G"a='t^'v{v՘-ϞAs$eO oO:7 s3| |r~7 mm2fe=Wܼ8pcF> ˙~q_QoKtGO" i7Zo]k̅~4;0;pxS9U'-4j_tQ]FK_ gx{2J͜k:= *%'lh2ZEhŁ >tϔT{\?6 ږkm^%"hr+h'ѝ뭖353}&v Kk(JwuKW_ľQmhİϮ<_N rï14#Kܧƣ%~fH-s{9GK3 [k| ﶯtt &"Oj?bX‹-j:y\{}r=GH=,.s1Ğ:R3 !\2V.qOTnxO/kh{*p%&FoOO[,V%)^2'F:Bi|!m<>v#2'D{^5x7Qb۽9gqlD)D'3=*; 3McRW*QiU}nxj #|N](GNՈ#갸BM,#Ori֩i3[Op7n{<! U}3b& My+t|pO]kSlVTy'j*+̾J룼;-H^VYNakx w]q["쐿//a+lu݋]ygX}Z]-l3 w پO.7>[ށ/}xp{(ЫӀfT: YG1we\6݀{f0ww8!?@1Eaɪ2pCЁ*kKYe=ӱet AW<Ca J&4Ys;n㈢ugrnBM]u_:#DJ.Ro nojE@@.=OൡN!^ ۹x<!l76sa(l\X~AdFl:IZd7j#fRwccz_CE˧SP/ζ<|_z:OhOL7[>AvSv uaA>u?)/V擋ʍ-'wۨM1gt{|Akx{j}/?o_~ T|LZ|?7[.?QaOjzG~q1ʖl9e{oGĵ6ke[h>{QM!"B񴪪>;C KmAwe?;~rZ` tŻw bbqg"vmIX.?ό}pA?d:_K lpZ@kj¾Y8ix5DK:91$ (~&1=|FfG)w|X'4(G w.iB 5h~E[\~m!7"Ѥ j$)u#ٶf^#dzRDS<3iJة]6s]_֯Tӻ?׋DcƣRأq7sj_۪kV%0Y2fTijugZLN?e`V lGQMJ uZ4ƲcA%rsX-g`놧_9ɧpLS OLҐPGcvbb GFL-S׆Wح~kd\(Tvy=C~fW_g?2^NNvk/r n:Jw'l[oC I]1ԟP\M7{;v7(yBʍQ{B}^P=*E 5JcRVaJ}Q5VmL|e]֧AkYnn_*]nfxVNndcv #Ňᡘ*+y-/`$*EB+U; [oly"oգ\'%{-$~m~|ʼn_Σst65j'=Ȳ0mK[Ȟb$-OJ~5Pq-l,Vf1oZhL?<뷼fBNdDr1;ǝĤTs{W^~rs{=~" Xp}q akve,K/.|3Իg`fGԕ?1yTWEL H~qqڔ4.itoH-_ycͨ<,kا`~dk7hAr(Ve]֧),ײ./#)b*/F Vƛ?E_O$6֧^)&JyU\ضmJH~jߊ}QimX1F3k߭i|p/v>.?u?KJ5J(1>mwnh啒W+_9畺bO&q9+gN8hbPo<yIq`T䷧pL.~][wWZ?gNE]~(:XOw#,7aPDQ R(#J$-x7D g+/q/ᱧc(}ub6&Bt/N.%.a*ԈW_ދ7QW,ϡPbkbVg\+DjzM+5G. .<*mykE=S#zb-ژxqso!| ^Aj.^݉9y 9Lim8GD$ވ͟nB5 cO|&7Kbߧb6oƧIhwT/Zߋz: qgZ5Xp>3+olNye.SLC@ E+kVy*ZrB,7T7.Ɂ_ %mϜMVJ^V>7|(P/Gp3|/cφî|̘ζ~>53:R}*:^w]W+~=˵nL8o~]ӻXo5Yw%\WgM$p)m,_YFIӞכP6@2kk_ Ցq<6etbV/ i|tP +S3&&$.ԙGbͅF\5iegK11<7ᣭumvU]im=ӟ5r#|6f[ / ϛ KCed$1Xpx>2۫ޔі4BlZ>o/2m,w}r(_Zj:o=ai4#Eg06r.r07nXm'%*uŤDfd\9'(heybYu ~][wL3ߪCtg+_~(:<1v{j!M]_KakXkYJ֦g[tVq3or4YgKԹj|p8}ؐK]R,6(ކL3r+ a+xZ̃h@+yGm߀Docjm޶ugnrn7泝T^)keV\?&;ݾ3]çy:uڨ-.E7#gk`oCM\wrgڵőu}qWr?D۶g}PukӂG3#"?k$}h} p|--lmD4Vm=m>#xhB7cm [:*(%!F/ 5>]) u meY6b2O5ʶ$fCz9Mr-ڡmaܞnSv#:lu ͌[_wފ6i#: rn[ns6mKlRxZgOdO}RNĻC3>8 h-tgڵőu}q+lݟC>Zϊ;PuP5 W(X^,KLc>wɃ2yy._@ݮo`^,^}O<׫lkͧWW`? jŘ2wo`=vT͢$$;C"5{qF >h?Z ъ/w7v$)\9w -n*0h! ?'+?[M3֏𓻕(XN#^FeUr#d%P7Ʋe!) bQNV1‡Ѧ,97܇Aho: 3!O8鿌9UoZ+L}5ŢC^VvGg"kpj\n1SղZ{8=o/V;˭G89V(lǯKOzmس3TJ1KC'p#}{i j6.^G-#.Nrʓi8k.ʗ>D{?Oqce3ٻx*n}#S|Z1_{m ~1 Z-׋POC՗_Rn)4;K_aלkua<6|0=3#N2^3benzP\خ5Q[?xGpz>ͷmunTυxUg_Q]KV{۷[󶮣>7LnFׇoM.} qp.SW=Ώo"AgJbY?bzW> wg6Ur`pr[" #P]~> y3>%tirXnF)lܒ{=~6.|J=oڃ9huڵvPNq^̵i>Cy|QF H;vAbL$p p)l=>ҢB0ĺ'q !E ԋ76 [X{Ö 9"rG'Q,-@MXEڵ$|`Q$$ Fq?7@"-/44NQpHn z V׻M:4-ȎM"\KO,n݅*y_^DFO @Wy睨߁d>E7>[ށ/}xp{h+wH;3ک U/ GlH^@-<$ McJ~6vOçԪĿP/ζ<|_z:!ZVAG      HuEIi29(3!+eǢ++i'VLkCBFk08m;Q)!3Q޼"6{ }y4vXim.7( Mv%k=i"α_,f`p˰V<ھĠoY~(|mwLgW:B}>p|tMh3hRJ K|X y3j;A gQ/>0 T[F&u?hM,ʷ3o_Ig]_kH=/Qym jDžȈBm|Yi!sJBxmSnwWB?z>d<'3s\ȅ>@r2Y +ػK^~KxE^XՒ)e ys8~h *כ #{Ģit]|rl q@%L6!nߎjy.aeVuؗ%n)d6>j(%j}*"AZ. B6LLdyqj)e!llB~*:^]W+JZcvKkѢTRtigG<)XU"5=ݖ[j w -@;qSN\5>T/&%2`1)N'bjSY ;ָ/Wļ__4Vr!Е~) g',fƢ>S?OK6cQ5qUaZ[~(AA}>@ Zc[i($E+5ؕFpU,l]_`·kU.}>@gFUF6 a%će4b{m^kaUGP'GpXKQ(JX^Z{푃9 dT!U8N遥ĻX3m:y 7주 EBS&2ZD<.sC/C[W:uBA~4؅gȌ>@b Qc) 44< "!CHցJHQHPPimKa$O^4s_X)6) QWG<RZBmR_9sKN=*^#xF,O&cR LH]a)n e%O7Ńެ`4L]OH!?sp>>;&멬[|V6p@--Qp-C GiM08|ҐGD(#Sm{;/٧qRT?> $ѠĜJaf޽۷σQ5ǂU7hCb{F`yi+QC 5+n{ g5)4HQ-؄܋{R9*9K xwoۊʭpho nRJԼ˧lst}W+êqMdJa$ڄg_HHHHL y|xɂdxBs4ܕ-ݖmaׇx̨1ټ. ѓ!F!^dlPԀj;" Xʞxep%[NRk_|~tح%|?xT ] F[D_geyi?9tƧ GP!"X7ThT_igdRt>h Z5xEU{c wa    [ca{x>̷A $A<'Y|6b2    [ qG olԈ ^G0M"IY- M־Ax^ "9v36{p:mF֛h:t    (7 vE%\̿O;Ã=:52GE .Įg;IHHHH` Pخ;l`sk ޝKCxEKlʦ+h m98@&.Tmތϯ_"mG)V|qm`}q-'   ( e 4HHHHHH- [䘏HHHHHH,PؖE7      (lݒc>      @a[@#HHHHHHuKHHHHHHʂmYt      pK-9#     ( e 4HHHHHH- [䘏HHHHHH,PؖE7      (lݒc>      @a[@#HHHHHHuKHHHHHHʂmYt      pK-9#     ( e 4HHHHHH- [䘏HHHHHH,PؖE7      (lݒc>      @a[@#HHHHHHuKHHHHHHʂmYt      pK-9#     ( e 4HHHHHH- [䘏HHHHHH,PؖE7      (lݒc>      @a[@#HHHHHHuKHHHHHHʂmYt      pK-9#     ( e 4HHHHHH- [䘏HHHHHH,PؖE7      (lݒc>      @a[@#HHHHHHuKHHHHHHʂmYt      pK-9#     ( e 4HHHHHH- [䘏HHHHHH,PخI7x>7qLI,HHHHHH@#@a@ׁ|tE<\exvVKW.ZN      @a됢/2H͌"h-O5dt7yGon$@$@$@$@$pKu؝+ [ὗNة[k6K"˳x!=&'     '@a됩a <2"#94HHHHH @a)l)l 1WEe:oͅOTOa*b݃P$ݧ-?xKgq漶7<$ꃨo~QԷW_~.{L5A]y#Jsۜ{]{S0q Cj3RXoほA {|A<kSߜ+?PU56 @9p-l}-U(WE1@GZ5Oj t큆L)KCwTO x ;Ly5qcLD$lٔS%h:/;=jLZ=^N r0'ѝzt3QjHHHHH\!Sې^̵7xc؆աBiQB؞Wc8V#b 55@]<ʥEj<3EE$f;puo8FO @Wy睨i|Da!ڢhq9^]bH/wnzb=%xww{], @%}xpF%:_Ӆ۸WHIIE'@6yyȕR}ZیBhi[3)%ybbCCRW({ߡs005Rj_eL^cbێڦkw12}>@\BL~AԑT xzHW ݍ86#D^[O+v ^7=hA)y-gcP}*eQF G¶#ľHM須xl>ۜ<ܤG@b2D+xr^!OİIwV}y Y`FŲhUr ɫ|&&"T?rQ43HHHHH֘@vkѩ~)_++l3pTq1ؼFb5LM]YO*DzNmʭ^_=ZŚ9vS,N&%y=aiT=)Ţ>S̓S[t'}>@/mX)ցɬ4*IfL%D z#XF]?.iBi:[^/>r%Bc3kZH@IDAT=+}>@} 0BB.H1E5E@.A؆F!DNέ~i #L˴BAnMW>GύOD ڰ(F'''ސQ^ht~WW#+}>pKFiSe!9ԛT`4~&!Ňr l#ڲbbMJjb#-F*^8>ZZ}-2Z?\ [H^&dPו'+&'RWbJ'[ChYdMk eIdA}>@Xp< ?эnGm$B]>}Ƿ|g+kL𨊙46gN %5޺JyK x?]–pPIYIJٟ2I* aDr *6UnvɜH.Gi6Gp`wui~W>&l^:QZ/ؑٻ0-~ܺ w EcL4ecv R2wo`=vTg,$@$@$@$@$@$z [ F[аC97܇AEjMq>#*"$2h>۰ggFMpi<=M,'>a|?n"~WMaJIHHHHnUm'lZA$@$@$@$@$@$(lW(#     XW늛 6/l#uyA<      $ᅭ\C񖧗*     XC]C:,HHHHHH Pؖ}@      b(l1      '@a[]DIHHHHH-FHHHHHHʞmw $     (F¶#     ({eE4HHHHHH btLǼmHc7q) ? 1pM uIAJ$@$@$@$@$@$(lkDX>-6m{C{hjb [?o[7.>Qr} SM׋mwB .vB& <0:Y9C>E} yr\ [;\d6) ??/~܋@CfcN^6\ؑxDm"_C |/܁JVTFyO*?BԪyR3tNh4dY#FH=)s->z}\- sbĶLd\d:366' cw3|L^3<(=- 7^s3|)/:@= k7UDT*$9i(ls+ M'J؆zвN5>ko`&T߱ _CY'pFWjjdOyKHW(ygϵ#ؙ]H?V[Nakx w]Q^_)H 1J[bӓqK]M vy\{]lPW#?HBw#d z?Zkjq_9obx5I$ăm_DBئm:+/\kľپ+HHHHHH`U8P/ζFH/qe ᕓ# O(&F{%yetHq)0'\LNNH}ao@O/5Ld;cjdL dh&@I'\}4ɳ0Xrȋ>@}>@pyXLHu@؇%: ބ !ƅ82A`KJ] /M4"#yhZ]tKQ ㊋]k~|y,315ٖ.Ǣ~%V}71up0}>@WOfZOC;]TY4jKt~[@Tx/oOQGDF<xLD(i 3\V6AihBieQخI`B}>@XC /Œkes Ĩ&NQIy6OU#lu\d=ToĆmaK V[ >PFl3#vm^$?-m4v5}>@]`r Fl=u@Q% LD[oDS[-1e[غZL_dku ؊uC@_F{0oM}>@XcXeGy HOXjF%pߨ"ɹ/ d(Ki/ ZS[@m [yuF;ZB3\Pj~'ȞprsPgFlg;GVdE}>@(&ҧQl_oMVs&P`J`[_X)6) )6گ|mP8 lϬM?ELnuDj!O * )6! "Oo@FT',jB<{Rf},Ȃ>@}>PNFt?v>>'ZMZG5Ub2.>[ D7Ť[Dr@=0mBEu;S"mS&mP{*K x?]–pP)^IfCE`lITY+NmwER#5hAd0b"':>aqw?v}wbx^e[ǃA쩭_1?oHWޅ*˧o6ÀޠV+Syn~<cG, 9 p(le;mAUl{?@g| pzB7xbQ&0L]ęoÞCzNİOw-֪{®Fa&:$lܧo˴N\}YYjG؊M۾ @r^zsŖg8Vjfg.O'`<>|?n>x-4>BA Ӓ vUj_B UE % ۲E$@$@$@$@$@$`]RLG$@$@$@$@$@$P6Hʁzs.%mE$@$@$@$@$@$6CyۀSb(UwH$@$@$@$@$@N厘 % ۵˲IHHHHH֜#f$@$@$@$@$@$@kIv-l      5'@aY Z]K,HHHHHH` Pخ9bV@$@$@$@$@$@$(lג.&     Xsk % [#]ocEu]}O<׻$@$@$@$@$@$`7Tm< #53nc+M$@$@$@$@$@ [cw a¶:%m IHHHHH\ P{H$@$@$@$@$@$!PH$@$@$@$@$@$!,l>?z-*i\_|o.|6"A<]>LxaOx|ZHHHHHp-l}-UWZAE>; 3<=j9ب r0݇Ğ@mwT͝ՂGy0vך뜹Џcg, ma]Bx? D yQ.Ϝ+q)<ʱ~B\ ]5&g`i?҃lvBɤZ1r 'J؆zвN%ko`&T߱ _CU#B,ʑq5So5Ⱦ:,.PS#xxȓ\ZDF;{z+|~w}9k%uwjD^ MئKD D[ҋƻvFVgi\nısb2殾WwO֦.q1s&OCn\\٩^ mQ4./ 6_G}]u~?;lߧ~OWƇ`;庝no M     $P499?:&yr$c㱨Z7*C1ilrB {~4&P7:=?>O@~4'lMmb i-h`j†{c;6u6rEy9uŭL&ѤXWHog~\|Eg1ǿ:[ }>@}8~ip\vpX&vEhl<#GpF"ʾl:3, IibRqҞx(poHpW&25/,Lj{.x*NfRwLEcJ?% },[aEZ)} }>@@>`'ܯɄZ+,lǥ6_6(DL#b'klp^RAju[0Q# t %oFW@@apVL3FXXK/!Xq@3׼fV_dחFE ZU!RZz=okGZ0D_s$wiHp[䴪O]tԩ%6s ! vSsŭJk46z6EW쯱wXźW._ߟEZr%      ێxvV"5="ŧENUBD@n# F1]"9hTj)e!l!l?yVvS▋[;56BLkJ}(P_r 8q"W*IHHHHHi">S>TdV9xTK!lq)j+2]W˫MWXأڨM6v|PY2Dm֗N& ˷%OQ[.&e;}_PI̚d%Y2& vZ>gYY}>@||ng,8 ZS-IfL%RM㍈HjЦ)l&;P/d"\TK.* ,pF{i&ӑ}>@} @eBRLvXy5aU.My#z~i S}y2P_pWqM9__4](蔕)瞋;;MlJ堍'mPr?}>@Bwm,$zqۤaejiBe^M.AfGBEDA񅥘"jm؈_ߙ]ͮ ,PՊG/&ue)ȫs.nThl[ݡ)/ 9Oo}}'H"E}>@GI';'8p;jkD'28{8,]At_ .C'Acs6QPt %ROWvjm)=ԠGRV[R ~Hr?*8lo:ATY~-P)®T_cJCO {vpi™8FRo _v|<*NųTbD0"l.҃pwHk ڋk}w~s GΣ؀[ks;핛ᡇom[.`zF._ @.6ݞYlם,XőkNVQr$6k/wvU# pk."ZB0lL5Fh ݚ0R&X7uwXU"'v"Dqebap$K8>3s~IHHHHH7M \W4/'     jĂSVm._oplHHHHHH`uyc LbU1      XF7]FZHHHHHH`]u)a@$@$@$@$@$@$@˒ :4.% HHHHHH4bY      UGvե TCƶZ,K$@$@$@$@$@$خ0       jVCeIHHHHHVU?vOac:vŦIHHHHH`.k1o:5y0v(yĶHHHHHVUX2`K=O໡0ials>* r]neQl)4$@$@$@$@$@* *I     96~)o޻<̽f? \mcڇp4^ƱE F_7׎SR>= ҜoZ,enXZI!<%} \.)[QxO_ `:k/IHHHHH`x6R{cɋKj+Jbxu_$m:^b tv1ۅm^_0?֍¹9<sktgc bxxKEetW5x/L^DN     Xm<p0:5kca»7P'f {ERVŹ$\܎v5cn.&Lo@ Mj2YIAwo^/0}m|446ݎ;lFϬUYHHV)=5ڍxZ0 6*/1w>mB=wfݏ4m~4^@U~.{+47)_:j|; jU[_x:V5{+;;iYA2 `=r2qpiMc3!ubKIgR>>ͤ߫iqVEۃ#]"кFC[ =܋i BX=Ou@(Ԩ#`*w'Z#>wMILDZ;s=R[$@$@$@$@$@$ "6XVfrz(XG\R?(1b1u_6{\萭OPM}qq;[E運m, mCQGGt~#&?{ז1f?[%mdd;jjjkI/2;+ol.P(NtI.1.9,Ե:^8=qi8 ~excFLzRC;حM^5@ P5@ P5pݘU 8"%&Ԙ0#}EuCʌmFVI kF:3$Ke̔gxJ/WHNMhCQkq9[n c;1,ʴd<˴_1>qK"+jjj'QXːRbth19fŵ*r哃L\˗ݏd-"NO\l)f{(VK@; XYy0cEA^<P5@ P5@ Pj L齲EK7̨yIn3{6[U4y8=riW}KcaƖƶڃmzu%P'5@ P5@ P+VY3~XBQ 2/0.3c'ur#[«2ڗ ŗxW4Mw² 5@ P5@ P@hMqk>@18Y"mq}*r?n+)O PvibՈjeuばGc뎓-74?#ȊjjVH*Frx?%6-m?[È`<Ȧ DIf䡄T[sl'#f8~6yw&t.E0g\l6-E.IIr3\Gc[А?ȊjjVBDcF-iK̓8\[Q}C`.fƆO:NbUܶ.oƆit?),Eq<}x[ס~zl`'Ƙh̝GlړS  2U[%;жQ3ʞЛ?r{u#K8BݍH1yvM}=sSw}$@$@$@$@$@ &ce--|Q?n8 Rnw]Cinm~2 ,)%Y]c+mlV+}3a^r:$@$@$@$@$Z خ`fhlW~il+[$@$@$@$@$ ,yf2>ޛ\ILl~7_p8?N_0gv /2xDD%֪n/`<hO>S/.匭?c߾ 1?\rz ml6fnl^Vcj-pQGy;h_啋/UE^:     x6R{ŪXxg,:/?Z 8O/bu`1.m|Qt}InTW;**w6 ?45TS\Dk%U`!qh/YZOEFžjع6}wKv]Q*ǂa;lYl./HHHHH"؆ѱY#ԅw1wu o_o47Aߛ+=%qvss945)uxOohRNFIZՃjr6>nw6Ithg!Mxźx%su,_…wfyaL;ؾxn(9?w 3-̴!ݽ/Hփ?d`ի~&4_ud5FZ-mj1ڛۏb[v ,=h˪޹HHHHnU[_x:V ̞;D F083S7߃{]Zåz4}>ԕB-J wӧڇv&pR4y(W9oXWѾerUEfTB66F=Hk᫑oW!Zώ$Wk Gp:a뺆F/sʞ£n-p/:Z{apolv@¼W_ִ[Yv][M     bP.䡱fP̱0cI}LNJs/SeO_6[9?28> ,v,eYUbN)Œj<c|jLzFJ?lF Rҙ8 ɒ>~y.ɕ޾0j\ZuZ˻e(W/$cRkEʗX@-"(>/;!jjjImO*U*1fle0l5/W>9(ˈ_r8[56DzzLxcˌea?s_m ck=uj-$' jTvEtm_!E+l>+J P5@ P7ܞdFIaN{R44s4[`68MZaF:ؓ.N'LsﵞИ|+sL-q:cYuʌ>k1Ns|85@ P5@ (6Ozo1c',(/0.3c'(\J] $ZN][g_ d9jjU1O c=yɥ2;eElrrɉy4}E'p̚QOī\<\)x?ХuY/Z>wXQcUJfNaYky7/r t2q;cX\zIum7_k<ȃjZg*a\ 3Pǟ 2Ez"4{e SG +0+qzgQgd ; f( Z-q%(g3%9 E- L- |vFVdE P5@ P+<$ ڨ]§ Ď9־cmYvu`ء F=nv"f9D|\- bgNݩ`GsZ`a~ykI{ى!EEQyYx"kTޛ\$&{6}~?֛}GCoeq[/݌w^;O{K<KsْJkUGW0i @TIAyw4܂') r o߇܇.k=6637/1~weՌ\z1>M^\.)TG1wz5oR9k&    H"=(VuxťەKU0gAԩmNY{%yv!nOuQIf%TCADxr"Z+1RC?8G{rzJ.2"Uε񫵼u=>C ˨K@b]er%?ٟ3ZJ40{>螗/vL$@$@$@$@@ c[6.kxz| ubȸ/ p.#]͘ˡI1xx'P??|CZwD7BL~RtPU{WOOl)Uz1gĄ>оu_gۊ,&QCyzL:/j.̟B66Fú -f+oW!Zώ$Wk Gp:u _0=GZ^t5w3zѺ "A,L3ޒKՕrPSQT~8f?2@ q`NHHHHV=YD_@lVNK9ǒ:>_,9fֿ0lr>dB7q|@9YZXʔīĜ S%x2ԘOEq 9R5@ P5@ \7 oOr\,'crTcH4.9`U %u{"mƛ3ay/l牋ӈcf?=)w̽34]s]X#yz|+s_/bR54cG=cЁ֎퓃cʊΟB.5@ P5pc6+3\jAq?&H_IbtH!j6hmlяlclbCV=O\i0bfr0d gCI`N]Q/@HN3CQxȾ-Ŵ+!/Pi9]卵ӮE{|Ȋj2XːdhvLYf f[r HƌQgтklE2ƛ+\fl,31,u3e\f³:w}̮g0ZF@R$R j2polY20crzyɻ]/|Ȋje׀r2Sz=a5npIU 2c[ؚ&%f\QEg 3vh:a^"굞=W&ߢ+uZ˻3z/"W/Wm53}Zʺ'jj`kmBNט YZJO'+gl=rqelr"m,1gmlƪMuZ[zKMu=L _om`<'⋤27L;c|jjX1 XeFK0*fW{YElrrIv4}E'pUUrqZ j O~Ki˦/vCz̵I0"W~]^F(|+ y \vQŢS }5 _8uydE P5@ P54>?dKQI˩a,iR~^0{"E:sjflA}J\+f~6L≋0f=q}%pajHaf%N14.w&/ͶQ.icL\O/_Y Nf9Pi£/pIO#"M1|W8b>dEV5@ P5@ ,B-<$ ڨ]§ Ď9־cmYvu`ء F=nv"fD|\-N;sNlp; (_E,]3OsmEuN q/-J#WԧWě6^깈xv9|k;N!7_K^YP79,r4[Sً+&659L嫸m]*F#!t9>2R㻱Q1?;/_u׭&q|AσY MKS8OBowMfNgz-I$@$@$@$@@VPhۨMe ~}gMM9=ytܣ(֝fu*d-WTVB ۶hfVY>}Oj2?t)ދ[~?SE3[*_r%OBhǃsZ3S%,n 7_|Έ:W_ j خ-X*Rd;$@$@$@$@$Phlk?7hloʴs$@$@$@$@$H wv4=CHHHH@ۘXI9؊mCȞVļvI$@$@$@$@@a(=pe\fUż_lHHHHj `lk:#&     X:4Kǒ- .IHHHHHұdK$@$@$@$@$@$@+@vK      #@ct, ]HHHHHH`.KD$@$@$@$@$@$hlWwC0K0E     XhlWuzד`זz`bǸj,E$@$@$@$@$@72iBݍHZcq5(gǑ e$@$@$@$@$@$p-X2`K=O໡2Vn0o׎b׮0      "@c[tt(ݭMbg{L)&     Xi4e2@c[ w *#hc潛QxosmW&1|C8}k/XA}f1|[JD]͖WZ=1p'mK"E<$L+0Ǿ}rb,~q1(}X&c_M\.)OA V`3CsXnW.bœQGضY:!.E @wMoe]nhݨTң_;vx1>qO)iSѾp~)6xO!RC?8G{z`Lb bxxKSQLƋܥ7(>g     UK c[6.kxz| י=%qvss945)uxOohRNF M"{z! oyvqg3Dffj7 _h7~s6l.Mc7q3`˷Ŷ6Un*-KfRT"<^<]@s23k^NLfYt03S7߃{]Zåz4}>ԕB-&UUlpn4C̖3cmu#H_'ZШl#=Suk{qؘVc8ž b^ZV;%L{nMc+]hƆH#zXuh#/Vg^fuJeA$310{xs Tj(e$@$@$@$@$@$dѿyh,+gY9=s#.UKcrZWD䘺/=.G|Z ugic\NE-8̂}X&CJIL&)kM#vM6B:F!Y*v 28B#Zu,ܮ !jjj`Uj}PȠnr\dvL urDi g@M_P*:L޵XʶӺq/cվ=XzXc u͞;2/TbuF.B P5@ P5@ j Θ 80%%P1a&FD[1Ki13dȘ ͔Y*ÅYLƤ1ŢsPLl2Pg,nLflֶS VL,>!+jjwГʨ3cf&KLnu4c;&,3yA_l3FL8/}نau!$(g7;vm9:VKjcu '+jjjXp̔+kaFMیqϭn[ӰUո쯌yf߶K.5 ki2c;ƶŒt']N5cEʬ1glDan?WXZjjX mDY3~>RBQ2S23|R7j+gl-V2>Mk[=9){kc |PoobpLnVjjUIfYŬa,'T*Fs6rXX9d&-`J㓂r8& QcUJfU+ХϪ].lܟK Ov+?]tɶ=~1ÝNUmレȊjjX1 4b¸f.yD4-m?c 19Ȧ 3RDN3y(Ԏi9h3N?d3QT!˥AsQpeV.d!6+ cwil+\]_'ٴq$'ɑpIL6kuȊjjB@cF-iK̓8\[Q}C`s+!!oSa$._mVllF#!ҫ=ylp/`kFܹWfgxv9|k;EujT?l Z >ͫԯ_banhQTNWn]uICչM$@$@$@$@$@@V =m̦ga~>3&āB]"^OZ[kZ?r4Ff;Z_ş>NWu>  k.^FO9ƻnXHHHHHzTm9llmuy+6&f3.B$@$@$@$@$@Iv9iK     @b[q?rl'gl+!    ԾV]4Jz"xUja`HHHHH@[=pe˴GW0Y{/D$@$@$@$@$@ pEUIHHHHHjmͧ      ͝HHHHHjmͧ      ͝HHHHHjmͧ      ͝HHHHHjmͧ      [^*ɛ?GO+L/ob}}=+{=QIa$@$@$@$@UXcۓ`זz`bxA/Eq)@6|zWz,QLyJg5@ P5Psz ]ͭ]HrbL|I!HVwx3Ȃ,j%5TS'4Uƶ]f߳crLB7,-PϱS5@ P7}O )[$r{oo¸YߏX$&m/zW0i)طC×w[|z`3گ_oR'Zqpq r8^>U>`X}RG O>SñׁP_|2bu]ʟ6-m̉{lw>4bT_QY^'ʬe*hOnm6Z_T7ыqhl'U8ʋh}76vOU@8\o~q·=N:>    &ޢ@SwtT9)L<(Le#/n^i&QoLkOE1 aFuԫhں F'ѺQAI<$iDh* azOm-|O໡^E=؆vzb]V|&5򐇪yc&RL,l^^N9Ѝ מSV@/*~ 9"=,"F[_0S/zyLFE.E޽(    XQތm AlTB_~X ܍{[0GBGzoڄkψ*w?y/NtY.»Bsrǫ?ZRj;r6>nw6cSV99n ;AWŝb~D "wf,`q7@2ݻT㝟,s[w;ּmV:ƌо<_gJbP~5.~V«~ f8t!}D =; lӨ֙rn+2x)2w>ב3|MKZg-w<̝91㙧ǘY6(l1n-zt9Hb‚ y|a Dys뾧1`RN |oecl.R|܆1_^ye1JV;uB^hz˻#]{Cj!){ ý8\uۅ&E$@$@$@$ ZyZw?b,ZLvB>M8G&8> WRO ;'cOvRcjt~QY9,/ŒjP,11WԏcW4j9f,)1:>"_/&|ţ<~򐾂rzgYͽ^}1Cet,drP/y]mwl(Z19W!j먁jJ]Ddة N;!@QpF:8.@Y̊Aok&S?#'p{+ ~])_3=<(wKMh<29]6czj˶&X "r:˞ڗJceLy@;"—uJ> =EB.5@ PhKCHM-➺İgۥy$ݪ% 38g`\,o>Iz_P0^!.hpIS=szN\Y|AUqmhTn*/Pdj.eXqCĂG[*,+%K;Z{G^֙qс2z̟1*Zw7sSF!u|j UE"Z֪e%NY}T Ccqu05lm^cy1yw㈹HHHH`5J]b'N* Iq\a1 jleS߹̶@Ztіr]n>`lF}r0*2 F+ʳ/ؽg/H48)sh\0JОEkU< _hў2 cSKPR/jߞTԋFsOܱWGVFU饰aX;D5e9Eqp/ *%1_D>^⛕1}=y$a[pX{&ImI[f3.)^l[RV5JKĢ{ϸ;z1ry~r2_\#ORL>+̑h{L ?πm82"r$Gjjj}p-Zd IdJ{Ӷ`QT>`<=qm^0TKYǑX?clUG^go]3 STwCk}zru^Kr{ҳrObǶV4?_Gx):6}=_vz֡njbt*}Nd\Dz:A9x\3rd3 'tsjC*I+߈uJ|3iTW!FQSUeT8?_䠖w8CrJ,{1Zǧj`z_ t31o ^1kczDٱ,"Ϋ_&V'?jjUvV l6-E.IIr3\j4.VE0S(V ;)"'USOXO>J`H)%쳭lĺFEl;<#r+!H?;xȟ1*gLh3ڌlOjjqm6ߞw뇎k}֊^,^g}ɼa>:?y%Pyw&/ҎáxvayjjV-%)بܺcF9UA] 9}Ww_ÖP=-B7z,>cKyLyǞk+Z?K}O+ږ[5ܓlC⑊yN'WF' b^ ~Pa1[b=[3/73>ewzjg*%y0U 0Z)⥗'MZ.Ji]y[h>U=> [8ށ-G$8C޹G[WH jSAgOAG-A #1bq>9+#2G0IHHHVwck ܰ ||0zU Cj݀Ϧ#SUvC0n|> 8#ƶNn}~ <Њp+n]+r8?*F]FBܿVyR&fgVֆaU~%#n)jIΏb瞸uIHHHfAa4>#B|w+rSd's2`7k,bȮ c;DC<~}ւlA$@$@$,-?0&'Cr_DZ~VfX2b1X㩥PscrZh/;6$Kigƒ5UyƗ}v.RHOjMDeuQe'FN~ϷծʍqjX9Cc=7j ՜@WR3=UijhSǸxMJrbL|"PIV ŏS7vΗ?#cjU3!4ΓAԃƶ2(''a;yh'`0(:'4Qoh]U19&-R~}C缆RgL^5@ P}O )[$r{oo¸Yߏz$&m/zW0i)طC×wh9{uClXWm@"D5#c=nA˧O "Aɇ8?y v8f09`f,wֿcraT8kS{߯d8fCjExԘ>!ܷ2ŏ F_7׎UIu+ L}>Xp=۴Ƕ1'Y֬WR߼W:/t=>ޛ\䧽tf5y)8:w o٣?Bq67q"bqt<\!&1ǓU#.1+ǟZZq_DKg(E|9E|/Z$@$@$@wc bxxIsJR xPG^,ܴMޘאc`fI+0 \ܸODPHI<$iDd,%#VqxB8П|Z,LwCfj7X**͊V DwEs?z|*/O)47*K@YyLF֝%nOXEENDƄInc_$m5Nmz%ĸʼn۵vYǧS`!qh/8 Gyеg'rh?MKC8ʬ*ye~>Bg0*8)>/Hg7=?Ţqvc]knhg3-Vx64`~>:J/"|3|؆&*|I$@$@$J x3wQ<~{Mhnۂit?̌EIv ] 7mBS}ݵg ;<':,X~.{+47)'yߎŔXxs! oyvqg9 IeЙss;۰c[Ky۟_ܙ+Jh늡M40/b}gZ+Zr7[*d&1KSE=bX}Zp.5c;}an㯽[(Q0/Nܞ +O63im-}fI#]͘ˡI!?W'_xs74A'-fOr0;J@sr!ݽ/!Zp0:i.kxz|YeR􅍧gjqcbZyaN>Jϼ gbNI{+Lc K|fl*eK,yx)<o 3iJ{Й^.f^w?sh}]`1z?x6SdN--DcvK~~_҇ƀ]L΃^DWgJ>_]hHHH`p\HEɈ9)W/N69S;";Od'ILфS[>dBc@Ѣ2F !<,D#;9YҗR+5N/5+l+J1mRS]G3i>9`߰{vbXodPٔl&)˔q?q6SgmJڲp#ԸgM-"ej_C6B>QS"1Vѵkֶ>X9BdA P5p5P `pbm7vU dHiPna tlgYO1Y9< ;9(cĚIay/\? BO³NGCf'E3f䞀3;3:+3]ҖqL(gBh̡5vK]Nmڿ0{:U}f' j1+Wk_dPo+]Oxl7YQgf]3'B/o_jvٴ3~'د2s\p?ǟ-N&z\Ysll/3D(KN]W؂#iVڵ](|q{&d}L׳\G;siCڙew.J>j~f׸W"6{V>*ޝ4sLA-1j鿍!P赧-ݾJ<bsvI%7m6@ [c+WF%R,FNޤ3s1As#kbYEkE3 ŎЌO*e<}XtUtp/V ǞI4j_q::]H> iʼncMHEgV䂦]A?g5d}N%=ٱVRZmuJvMw\:%i.+~Qي|mz6UJ,|65y&d}L׳\g{?Jzsx֯p{> |j$\BQ P+0TI$H0n<@<?| W\r6v^$Py8Ⱦve! @ȘHX8Ҭ ۣy8b;ڧŊ%ctQKgOmgEU>\%%3ȆlhڀǓ;=FbZsk27@*ltTg,:;#}mƚ~cmL5BmUgǵ"c6~>%$Az{mŦ[ݱR=6R6xZ0m`]PZ ( 59!iO3&+V>G厄mE hv}EmڠmD xLW>g[K=°IO& BYzov\NEEۮWF|e*|Z~; a/O7zNW2}Y6ˢ=hb6,xfl;wbu873?\ ni-*׮-סw_'4@;ƥUp9RFЛHfHFi"MtC8>/qQIUܥ)x;pǧjPF\8PYge:{"o%/$H R)Rt$ގ 0Hd/˯_/bsٳǺ UrYsȑ/`GtiwM=>+6޼.+N6 *?Y~cƹ,0KT#pjvzmE`n#eF'q^eCu =bزnFumñz` -0]YMԙIL]7o뤽{`;+S݃jdOGOS9tZqMr{><:?HـQNz,B<_ 4; mVマ.ǯNz:*rFI6Wq85~\Jmzx8-mˍfyqpf֘hݛn ,/o"0_lVέx>1֯p1b虹̓[&qtc8Kevϝio^3C%3G2U| \u8| UqkC_0L\ ; c/JpyA:HHHV@7p:i]^reZ@]_Zۼ~6~dI@sILC$@$@$@mtl?+H g$@$@$@$@$ @$ؖX+<%    Gm8nE%*rNMI&SD$@$@$@$@$.=cp ]/{JHkVYl 5L5\6HHHHHtl-$@$@$@$@$@$p c{ +M#     Al# \^eHHHHHHz @z2H$@$@$@$@$@0:װr4     б6 5L`{L0\6z& WVb~m8y~=[N$@$@$@$tVımόaS%0wpjWXw㝳Ǻ {;oUr@^ {=7:]gyH$@$@$@$@$P.plcD]Y8|fKsgSl4(EخQHi#;7zNtbs~~3qo'o4qhc_c_dNrm1<&   :V4??8κutl]]x|KtⵕX>$:\xH$@$@$@$@W8WGJc[/vSCi4u /Vme%n   ~бTNgY4vBƱ1&#ڊ U'Pc%p쳸EfY~y\f#(ֻ{t$f1H'qšxC>|FsՐxH?Xt([qXνѓ55RŸs'1ntcvz1*d!d+\76o_ċ?>ͳDZן]4%=qTwcG?;VB''WЃhOű&c53w}΋}bJ`s0l,_DVO$@$@$@$@wl#qtw|_dD)5{%ݑ2?Kc3.Ne6/[Kn|o[p*@Ͽ6csy.l4=q>vD)CۘG)l-yR,US[j0W:vFo=\2Vc4JYy5q{1Ye(,7*uca?F`/T:.:Иv[iCi`yT5c#/pf~~y?{(V\xH$@$@$@$@slF9L߾ 5w֢j4ݗ0h3~ *oUJ2 6"`g/{-_l~οq[T<܊'tr[,#7'>48nFd!-Il{8}Mp[=m9=%+LiԷQ/E,̉|u5W#RciH<\zv<ɗ>5#}Zd8۟c5de/N^wM_2'j-;5"vIΊZnj9SAYȧfQU3^C?ʹY̯!ёsCH?ǶK==95-&s1jLºǧjjxQ=[k>Yӝ{)<֡98oy;ӏHݒb/+7o0ڋcM2߷J.<[x"s^0AYGByM*N qc%#SE 4ı-Mb`B^Hes6W3%bYtW:/:mh^ۭ S4ž÷ouY\DՕSƋX[nah9m`װvaAT`:sVwRQqw's'#nkqbqȻW<ec٠C8ҟ8pZPYp!/Vxm-K @pdW*mbbBjucd1ڟ.]o4ukS^eEqn->{Ȑ&=umhˌGz]d2tol6{dgGZX^=*~C-Qf~-^$_)c}ZoYNH'&mSҕ")FRjEFoԴ>{M!@sqݰѾmtឳFnm6@ \g6D\zvڽ;:=;Ʋ8;6mpƴwT=Iv;_{;$ѢNe d&۵g2ZQK&&Hᄗn#3b:SE)cZ{7Uw;' k_1G6T |[쎼uЃtKW6/ csCؾFa:F*_^ҟ]SȝoόaSo.EUG!mAl{Õ$@$@$@$@$p5LsBEakmg{mgѼ2[8}q.CM-FS:eXק:Hu^ڲUNj"`u8jOi=ZSZ.榑 6۲I;F-ѱ9CfRי6C hGm[,ΝhLoZʵquG$_+8FFTd,d1μqvU*{{9"- cV` j*i"MtC8>/qQIUܥ)x;pǧjPGEp=ҡ + ^dɋs"v Vǚꁹ3H7+TGu{9Uz0ufS?nG:l\S9($@$@$@$@$P@8V/2GW[_'p~OuE5َ+[A 1teڰe=uIȨ|P^< 7৙̵@Ɋ9ۀNKD{zDU-y(֌>Křa\bSщs)e/ڌ$Q)ȏ:\\c: 3Jg ??@p :X!:2 ۾ 'uT~[&IVqwnp9|睧O$@$@$@$@ wlUEmVマ.ǯNz:*rFI6Wq8c ܆;7ᅨ7_?Ҷknyqpf֘hݛn *KCYWӁ/Us8:2ű;qjyNy? ]3RQ (߱]1Y ,h+ *dusʵLE$@$@$@$@$4. WJ$@$@$@$@$@$L.hVC$@$@$@$@$@$4. WJ$@$@$@$@$@$L.hVC$@$@$@$@$@$4. WJ$@$@$@$@$@$L.hVC$@$@$@$@$@$4. WJ$@$@$@$@$@$L؄o@IDATqlםm(\'iU;P92/)b wd˼w,aJ5[CAT, م,FJVjJ֦nBrZI8J20 g8CD򗈣p9f8_s~H_$@$@$@$@$@$@$pSj ۛu4HHHHHH@&@a8       P$@$@$@$@$@$@      M>O$@$@$@$@$@$@a{@{j`v{=K$@$@$@$@$ P.W sWK\⫋$p~շ-B@$@$@$@$@$@+Lv #P]3x<زĵ/: 3d $@$@$@$@$@c nՑ (lɫ]8vHHHHH (a55_wONe+jx0:=ቯ>j;VEM5<$La8N[7׋MڻiXGT ;|gg`2OYp,Ƶ?❙O&€.Yx|A<{w)a;x8rMc(/hA$@$@$@$@$@'JzQFUźӘЋ#'L}jqFw"TÅS-8rZ6ǰW5Ս"_IjB(P Gajl8ׅsjv<]ؒX9Qxovc^b/܌`jhZ${m@G˷Jj~G@5ގ=eN):s' 8jޥI'~j&"Kz|!laq|2,=;j1BD/Ջ*Q7 2{ ǮbýسKOGQ$Puè\niWPU)ƹ8ڟ- 3*g/.Gbƻ|Q؇6Ԛ5BD .0`zTܵ _B2?Ma_]Cwmhz,֋ǣ[e.s8Ӷ-F.n mI!sB5WGF;qFUNVJl)OX;"'2r+ЌDt/dلw "B׊Oh06oBIQn;.l0B9ZⲙCW ;Esb{8;ġBv^ij^0𡳯me7BOa.%ɾۂ'ҞP^>rDkV-6t'P_kxbQ̘O`9 `"ޯi-lEZy~M Bֿ@kL&H,PFC#O.Zr=O;(e`WC^y7*%#Rg؛W6ҝP ŢϢ1V~U(z%zvh !jigOٟLIa=u痺G2u H4c1` 0J] U aJGpF6պ6|^]J ތMƺ .]lvI^M@gز=R0]RC _sXp,ʴ"Z>dZ b)ke^"RwƧɑ,wX}}M6d` 0c1Xp0هF%菊uAٗ<$ַKN٩hɣBbrWL9>C^O:%L)S}ebDQzT){|? ƲZ{kWEZsxyY#eE֦bKJ [BX_l3ȋ.=J; P75Jy$i}zv4 'HjI4jݽ z蒓5ii"qDZՏbKӢr},}Zɬ3Մ̂QOP5[Wtfc~c!W\0sC:}:O@"koG~oQS8'xoem5 !=yU|sEhg 0c1` 0VQ 8H%dxyqt/Ji\ukYE9fv{Cٖ'Ԉ-jv|x z|*l3Wj"ԿļAY77=\g/nö/WRc ߢ#"!23W8M%2LE,#>>:n߼;Ae }@amUa(&޻;7jD'X0|ʇ.qL~4>ƝZ)甞SqZ`{o"h;8df|O;6l&ܽB^VOV%Y*;?     X [0z-lQT[)\:s[ [áy!jƷjQ`iL?QuakHCZ8qJg7" T+K?|ű/~(N; Tw0?'QU ~2dYMΖ wУUʎ ]tE$@$@$@$@$ x!m{82 (cguaG|oggJm=f'˸  cG qm1>zx5_V0߁Gqsiz|yn~"{2~-hV?$@$@$@$@$@$,Jeaϱ[#      # [# n tJ+l$s`      UA46*"jD 4O*P      @IxHM5:Iu3"$@$@$@$@$@$@$P"a[.-      [mHHHHH  5TvHHHHHn%W     X(lנS%     PJf_IHHHHH` ]NeHHHHHHV"@a{+y}%     5Hv :]"     [-+l}nc`sW     XSnaaۃ5HMmM9!     [-+ $@aKaÚ]"    PRJξ AxxdV!kE}[ߨ6_.^ lp (iGfzjxܳIkq.`HHHHHH&慭'A>-喎J D{Urʤ1qGNj <=z0vn59q GOf5ǰWx/!+͟ ִED㝪S6>**=V|]ވ7Ԏ?K8O_vV)u&"KF8@Ej ]FJh4._y}UdM\n= j*<^űpo-쪅,S{QPHHHHHHr]a&/=xg 4taBoRRqܜ{U3މ3MoԨ )LE`q<ϴCW ;v_ymyJ;fH4= !x_?vgHHHHHnukTz׃][a~u|:7m=BORcqhgF@v jV9pONF8P[#yQݝwAjm6Ƈr̍ A\Vk/ k럿QN&drXjXM[yRE3}J] ף(ong>F^R [Eseu,~b[)9%yךXYs#L?3c1` ,s ۠y}QyMdNq[zP#NGQfUJT$FgzZyD&#k#EFzb8PXs_7uwBGq:u|O$@$@$@$@$@"5)l!yRf|@ajB*6#X-#";}8Oئ+'ӡQ ӷ "];IHHHHHV%#=̔HiS|S}Rϐ:9,'Fg̴㞨O=N<# O}iÅˉ,l ?#c1` 0c1 HsĈ^[o@2zEJbgzZ1#™hZTd#uC|ڬA$@$@$@$@$@$P~Pg_\s@ MHRw&I9y$cEzq~\-0n?9ϭV}ā;N"X_o_:hkڣߗ!HHVka=Ԇrq85ϰaÆgSHKqm{n.㱋i67u/ƀʲ6] $cmkTan(s ['zg>Vߨռׯ~rg5r& j*0oCĥSI~FwS[dMG!&׻Ief W@y} _C+&ۊXX]cim,畓[QŅFap|=o9.so*ʼnk:o5Q 4E~??7~7wx7ۆEI4=be K-l1ܴW.g/.Gbƻ|zxt/<X1Q1) f9 k8vŞ]mm4uի+u{JÎ|Cv{z /NK XaĮq-2,,]'D8^|zv%)XR0%10-T2g}E!wM_Y^98o,4p;yۓY-ּs 缜~Y mZbgkN1AN ec Pܝ77,z{P3ԕP,M7s;fLjQIEv%OKb7 aOSdBj ѢB7GkNkrzAA%6PybjAzª=9doi}Z֛KK6IgjZPo:Dڥx_:$e3+oۏlIe5p.SL'DZ\f/}5Y+<q-9~Ƽn\$=ůƚ۶g.[LY=7|(P#gp3fφ]ŘmGZ}ƬX\JQaQTWOB3:o\3:֮6K"WE\_t?8߂$@$B,GF-Ek#K(ic3i^כ.І6/=ȮqO}-F^9s y|\wK>N*~<" HaepfȈ4<'G6/̯؅!4g`}rp9~)&F_ ӓshkrD]gfe2;3}ґ34іؚG@٘mebQׄ7N򅥮^2R)N<[v^U>5f>TG@/4BlߚmLKVrY*1f"SozmL|R?,E7Vs۞rRsgLJdFƕQJ rp/v#-?FRgeSYQpv,"/nWE', ` ,[ X_NakXkY(КYMu[A9(ֶ|c#73n-3Y SN(2 lϰSʌv՞;5Fr,s/vWo@ CrRkGa˄?j畅c#f~$xWLu?6F`ma>ܞn9o,g"SqFȮO4 keV\?Fz%;ݿ[LYS,m{n˙mHǧy4m圣9|Ƌ>%ДwV[c3q-'*I<96Y=+/{|q(ǹty\.爅x`}6bdAcɤ݀/5&zFDm%g/YׅXgEY5FuTPI>et^莖Rb/#c[;ܾe[$3^yz|o[غ3@ur>-O7ubP~_+YY;BYs侷?s3}'ݶ綜mڈpi՞> ?&-Gž8Ӯ[$YYlݷr/^vZT%kVa"/,,BIVǸ` (܀^N5^𴋼d F=po{x ym, -l) g)7G9O~aѾ }qP&js~X:µ#; 6J}TL5fwGgd+/ݥΫB6׃xjGKkIlby6rr2z1񩏨L=b{LzGoU[eaD|n}`96m7HaDUSxP9yr/PԞ쎍(۸ wz|=ӊ_es}>G< p۞e9+d\#"|eNf1qgqp *͠٧qtzi3^]u/`Ve`{&ASQۈC ās|&6q} w<%ӫ$s0y"/r J$@%"vNh+thq_=j7vCRt$w[NkBwߠv,4.?f.ܩd!|<˕!+!7֭n @*/̜, 8,e9?79CG wءGN|7T3!O8_u9UoZY ۜc fqa+Tg5i8HεQ&Kpz:3rtӇx#vm*%ۘ8r™=7(Xf\V(i%,;嶽BCй1Sq⬥Uu.F!TK|%/}Z(~ߪZO3ſOTZ|ᔰ%_[ڵӎ.rx\Ec'OIHJ'lk@>qcv&qdF,-j<<|?s|x]7 Z8_?=Pȇ_4~g 7qm[o}k5_w`mt_߾3bإoik(.lKmR6</(fd8=ܖ[:k7mf|2NƳ(Ӯ% F=ͷٻ&+Qaǎ|i{ACCvQ(r9+߻͍3{x[7 ./㢈"Rno)gҸ{ڹK& #ha;8avl[5i7(54{xg s&]K-} 6DMm1)ތ;Py F`vfu|cATk\gh&\PZY{=~4Kl.J=O [(lsk B@  X\ H p,EXqKn.Pd - h-v^$0I4_8}-!^$~ h UUceո ,!փ`./-*dXwt'N"zjD 43,:Ʊ} }^mb-/rϬЮ%霬KZC/kș NscQE$@$@$@$@$@%!X1 4=^{ rYzvN^v\^_V+#!^Z;mw 8Rt5 DxgpFcTa~ :ЛRڹ-HHHHHH{av;akm$p~շ-O#HHHHHH`Pخz6L$@$@$@$@$@$J&l=^/6Z|ma|QJw֛•8ukZF BxGx跑k[Q4M❙CV מ^=sLuz11E"g6 8}wn3s |G$@$@$@$@$PJ(B_i1=xH;.lɭ S+\;G֭ܐ_yzBLϮyO]? cV%CnĹ.=7nx4Sb.cvB狊uZI*aXgHL.X=b`f<`nOc8_     (kklq}7*[op^Eg/.Gbƻ|~֘xaMoH5vzH"5.{#QNi\6m *eUƹ8o v;S6>**=V&+s9mg#aXbs@s15.]ņ{kgW-d:=aq      &PaɆk%t w Jdxv =Z҇|kCe+ЅM s)yO6>aٶD^ȏ;sϵ3ЕhN.Űhؓӈ/ [k-BH°BVuGF$D"ɰYaHHHHH$ѤBtHdRJH>m1.n46;׺6m!Nնz"۶2߈0#萄>+Jsl(skVAwD2Ր7ƸDwR'۱8+<ߧiwx >#-m{펅tJH\Bjf a[hy]Ӭ3c&    (=˩ BtSGK6/(5wƤDf7& +ie6ycqJ^T]c\;w۞f{UhU=+~N̪ 0c1` ,C 8jGt,d.Mml)#d6 t,bڲ]ֵU'% x#`Fh:7YUh:lO[\P؊CVvZ3^qVgwCVc1` 0W1HpZZf#Uh&p@K -rRZ a=OSrD:(Ň 铺 %ղ:iKݞ PoE*u8w.tw2#3c1` 01(l.aꂱ\FzP&ˈxs c{oCwF<&Ef@(gͽm[#"sARLRO5x)ȱ:"k~' OHhpކ +b-|FՐM-AƎ>Hy0j'J]sv$/b 0c1<1>yHx۬(e3c>ُJ5c[ C_#FڞH-tjWvoގנL.>gJU9;цZ%T F1 ܹ~tk8v%RmIHHHH`p,lWpׇ;jwn>JZ[EwjRZ^޽@d-ghN^ 7jo!!;gܿu'˸JRę=ƣ<7~x=G7{Qۥ.o j p6x9؃G*aL-Y\,$l]K$@$@$@$@$@n Pغ%:{ݿ -ыCVf{i=gY0      UDv ]YMa8&0> xUjf, W^j)l(#    XnMH`/oud=4=7v㥓?Ykɪ5HOcw1ɕv "@a!axA ~?[y\Ũ\yp mH      e!@a,XY) @Pؖ4!     X˂ mH      e!@a,XY) @Pؖ4!     X˂ [GN፟vFje׏GvlEҸȈ>YtS/B O{6aR-g le\=\8v  |4zUю-=yQfc=c6cĞRb7$@$@$@$@$:ZGWZbHM#Dn Qi~hbD!Q<=!'syJl\ꏞ7<)l1ю) =lO r%[V!Drޏ؋Um3?y[i܅-`3Ή{ e|v?Mjɣ\1L 5&/g-FzrqHs#low Nh ``~oo*nTݷ W|MUq #x +nT[`Tԭ FHW+aӶRVuikۍAٗ/%2Tw̧0ym|0TTޅ{6ZɃ"y^űpo-쪅,S{lCf+JǨmVT1?'lT-aB"3*;=;7~q W>7ޅ/Tm}xpOKx NF$WvDcwտRNj`n $ >9bI)LJ#@Y0 y |НPŢFh'9+FD1h(㓺-~_TˊCvɟc|Te29$=:=Ž4[RG5/Ԟ3Y?U;; r'{|fary*គq)DrLسsu4%^:'q` 0c` Ā3'T–il _#RgkM2HH}wDYU  0n)&s֤olEcJ|/`јjH_Bk]7'~pӝ4&K&lO#&/wc1` 02z2#Φ č ԂMTIaevXjXOArzfu@k^rKl*< 5K^0BPA^}ƸjgKj~QK.yӒ [qc`(^1"/?'c1` 0k3 ">"uDq㼈 /!lEMuS8~-bF1 =Ub1H:uVVZӓvRssbmK,^w'*ܼEzܲ}ѸHxC}aϋZ^ b=Ou:yKi}s<]/ n'#w?[Sz'    0p&l}"KJ&ԷY5VomW8 $b."%<ﰚ9AsV4*5Z%B~Zy!+ B_dEnRZq_O0JR-ngHe?|(P5+p3pQsf`ny.E~а{v\Vm!lċR#    #`:', (S|R,_.gʤӇsVfkN$E^]z=6jSSuŦ*vU$\U}J͝1)Y#,/NǤӦ:[g#y6\/mLBg_SrW1` 0[(8+uf:KZdjW8 *Rui"g&i0#M'*(8:"ߊ n#) td33aBwpӭkbLEDt?\Иx8s߂?uf|1c1(Y 8kH锳wrcf׶pn p@&Ҁγ/ugĹ( IݙX$ LU"Txܕ9􃘬늧[y- Jcn>lǧ)11]%R03+#c1` 0UN3#' 5l1% !s` .LVh7BEj*rvPd 1;_X)vXVFlކ("وe|F֒؊guKaLg>vp?~pS u?1ȚI ^MHh~ߍ't?Q"d` 0c1cY(qw| ۹I?؈p U&_JVḑ0qZٹl=/0d]rHw{ʔfA []k2цƩZS6eCH TMO`7WcaۗP)gE:hEJ H$IHf1qgqp *Ϡ٧qrLe˩=7<σ*.3.WFY<#e|/çg^8 4MEx&26 N yho 5[|?x2.r eZndG6C>k![pzzȉzFT^9Ps7UW Y#^iL?'Be l8>l)W{n1Sq?͕TKyÈ jRHC ^:= - ]V;&ijBX1ƲG8ƴ$񒵓[$@$@$@$@$  =ͷ|߽_ZRBl'od<?_EѷR'-ׇ;jwۄ>ަ%sn;\=gfG$@$@$@$@$Pm ,; eGHHHHHHre$@$@$@$@$@$@Nv      $@atY7 ]vlHHHHHH`9 P.']M$@$@$@$@$@$(l1      XNIu ,; eGHHHHHHre$@$@$@$@$@$@Nv      $@atY7 X!aA މ1]d$@$@$@$@$@$@k0n/ίa_j+\ Xp5`[ZJEqN7ͬ/C귷xlBg-    XaCPje#fq}8y9kP|Ajumjl>"?D[J eeX'cc=7r{A|m[;g9$ߓ Z& Ε7c=RkWvHJ&P,b6YިC=ıRr/ғ`l~^~/hlXH*9W5oigɛc[)\B\V:[zP\sc1` 0n%:7zh͝8P,(2ׄU-ּ/q` 0^cA-̦R)̾37_|κY׋M"Lk9xQÁ@O|_o6y~Y:J/B O{6aR-g le\=\8v  |4zUю- ƶ"%e:3F +G9^#$){B@Ma5YJ/\tPۉKw_Z;3r`Dw?Ԁo|I<.^ X"A8/*j/M"^Z Z.){nđ0^\Fw9.jKw_/KTUXEn| D{Z( 3ҘЋ#';36S'hƮjRgD"h f̡?mʸE$@$@$@$T [Mlě lUmCը,DcAU\xÈ556޸enScx7rU3=;Wæm_AU|Sƹ8ot;՝)L^~w{6Dň]@؉aj5\ bϮZ8=u57ي91jU [7|5UnFnګ%\SlxP=Xg|v?m֝\u?1=Yteذ#|cUJ/Y.=2?<g2ȯMuxIE@j]UJ͢1L\Qq&| 3_\g̲6UMo:(FCûN~w?UڞH%Za!c1|w7^[*++ ^9s<(ER&[m 'TTUd_@ "K@g,`@fY}_mHݍAm>+ڒj0ҐW|o9xr"*!51oT ;:r?lMKjfXkH/_ !59Y3GyF{H=9i r懇s7`N7GbOHZ4<^iP{AE2@wF~=)K!iY+ҐքS_ſ] _Km#%82gII=#,;mEj@`ԭ 펲6m(:mEa)Igt6G:3C= k>g=LB k/: ?!ۮ Xdԥ8RuZ}@>GwR>bj_6-ŵmN4h֢7s!p/ ;f,ʴ?ifB X펝I8ASz@6(2xUuePj9qm5F)l$`iQtwKY\ᨔH$J7ͮieF yuwHb_02T7z3Nͱ] {1ۜc ي^~>~@8R"/}@Do|meׇ.ηrYQ8 5Qneqkcf6666666P|#; 6޸Sin ޢΊ=ws8NdI } Pn<lj`GDtNC[6́EBV=x=y$GjگFsR-V? ''SAy֟c[B^l52S%U^dlk}fMQG 9ڋ~Tq8`yFPG5#4uH&yOVٟ߿WLc;}jPo2L 0&` Gci(Og`}6b/Yvϳ:hEFJuQ t44 }=+Qʳ=^u|ҶUdKu D.{ɱ~Rrlvʿ[<ۀ),kQȟ (r=>tA&9v9>X1x (=25KC4@uZ_!mpzeCRKEm3"3\0rpȮ ?ՁIݦ~M`/rBvѩSTk!/ˁԨ{WFag<,"[dقl~}mO-(;Vq̜mmmmmm~(rDLꝶu8A˹gnO ^CJdxU@YԩI2 YQ[T?UbF6D|98x_(ٮQθ^SUwklO0-^"1)\l#f+h{=YVyDfP0A3EѪyH39JUz#[piG"@j3[g+3%WyÒPDJ'%A8X*:g׍4z$)eAS}Շy@.]HAjo6lX9㵻SJF"!-@n>VbWay4$v|22qDA(IB~S7z nY~kO8yɖ_ |or n.ϼP8݆n&|xc#g{Σ?#ޤ9W < ɔl!4tɚR.db1c}؋m@.Ex5hEMPnET83_ eS 9`Kuxeih#oSϰ-sIa&6gt14LԯN`r+0 %c;`BqtקQX`9XF wN |uY-wsV-rAՑ7.o^ʋ{ũN"KBq]S[JbRcV@v )pyvٮyp澅m6s]u+Wa~2Ycq 'Ҝ)Y.Rg؎m& ׇφ5:ABTfS$ck$E#)לc C=mG;)˱\Y.OҠLtD㭪9#ueՇ} !&Μ8D<oϖ㩼r\9t%/V$ssl]Oa5?qւص2hyaan/υC聿cı=r?M|3M?˚raQ`dj1]%ê_SP8g\}eERH?Y=r0;&1|'ϸ $k(Cq*oal z02$Hy /g]VQ'%GQaʬb!f ]S/EW%ҥ_7պ$LAd^jGjG3o5y]t_{&h]_}_zs1p9/P}TmuGuwa^xOEO岝,p([9] k<=K/IL+y@l/`[`_[h9V:v(_.ګJ@[UJKCFPasMSi 4hu>FOb{66-s7' ?q_{ٖ8h_i.tT _\LBk^Ӛʣ{@Y->[~Ӹ.x aZnlJkbnЮe+"obN,>f׋@pǖ5}opNzv4??)Pͥ7QJ0S9G;>PvIqjRh믢fG:0ZߚME2O}: Ya8MCӹ(Z3`-xBዦZ܄>,E|ُDG-ЃoZ}菞v%Vy) ihPFtl/~K`/ r HwV;(#` v*ݨ;p"B=-Bx:1c7R''6A{!Iqhcp|Ep~ cǮ=yz;Vo!g4C啥wY%]~ϸHYsʹ1;@oP0'R߾!Zb_xc&p}sliOf1/&p=kWM#q W8k@w`m WRp>T0fpXsjͱ*+73鞱8Ylh[-L*J*nrtd',hɓdv5 ,/ 3LXosSgq,R7?z3N{8؞k<֏R[^{lMfkN=m~r`śŶ39vdw{5w#@􊅅лmV vF-{o +WEgFz٥1>ň2}/(=d렇lѕ?-2ؾԇ OhAV?Vەخ۹,.&ZJn5\|˩ۦu7 ֲ^0юtT"!<0;a<pw,Fz L<ݒBSA;Љo=Y<*V@6,,UGOtvl/L>K{4somއ-UI녗G+az>=EmBvn_9zƅSj5 {`RqM.M@IDATmR2֓O`!J"]0syr 㐀i/3&9!R\gtֈUtT"!:G3]+RǾcyQ ZA5fnr2")|KX|=;)?pN4JnJpTJ$wxeG!AlrR}ԟn oPBz=/Pb aR+uC裩َcGVň9U\f+B7HPTJvjI7GX Im}j4p4-kp{ڜSU DbIGﯜ^#l=/rIMz^#}G;gcv?\,Gy}]5:}K[JhJ#:AգhԟUTva7zŨR9ѻ͙mmRq!Mf 'WR twJZpm d@ٗ]+d'ӝƭ&Y-l=qˋ/ 3b0vj[G7I c)rY)'3FqyFq{MNBC2MDQ v7Ҍ%@$xϒIgi&w۟M+h|[ٞsm.cOͧ ?J݋yFVC)*L*r狊,f1 9u!GiY2| -ohQ"{JըGZL3-wz8uѲoyRqP+[oݖ->)94S'Y8KV~^?s VLwv7J=͘vfKظmrvbاaXƭ?Q-R{9LCރ8CFOWQdrdm}O:RD}&M\hZ=P?m*OF,.⑔z0sӟ rx%=:3c*4Pұ=J {tnh;/v-2AciʓJ%ǛG M:&7čȜޱ %-N!'[.?KK{/i)@ǜm(ƷWͿ[S-7}rimEMin3 ?Q7zʳly=/XOQ`^ t\!6Oː:,Cwd%e/8zeeWP롗xb b+q4W8; Β[Hg`|%G7"{ϔX+ry4*eU[2;+G _b,+ܑ.}@%rڟWBav>}IC:U=z^ ،p%a3tkGefybΔýNȸGOzǯ%i[ڨGyYDfGjy/[IimnxH3C$}^-Pޥ<Vypy'z(K,<nד۹.F$d93v,6D&]8oi3^k`2.efw>rkʰ?R6(n0r[}R!q3w׈9/zW6H 2, BNΩ>'[aS<0I.}OH_[”&D|y-Q^Ynӧg X?m/][nHjQyڀS^L{lmҦ٦2= w:˳iw ;~ [{pW޷P\M ;;L'hFɡurLN ԤʠnwsWsxؼTa#Zgm?7LR.''vmqy.)| AJ?5r۟(oqQRN+{E^xD h 2n-.Yۉ`ߢupԖy)څe0HpOJ՟+-hw d~.d0O3TOͧhԩ3ڠa dޭ=g)zL^|$,NVOQO{1H -Ŭм}\ڢ%qu:6Bé/iƨ#鯯OˆjЫR.urDPMwl~!e6轕<mt]@U!kVfV \ژ5rwEԣ<@qՆoA`c: |JDj~nlgRLJO vLɱ5v0^^qV ^MO*.1(GFIIeZOy 31ERFq:,8CRG*$hڛmOօ:l=n$=R2 9/#- ?Q7z,O}&ԧh&^IIM(F6'uX\bɓa`b(WE7o9)ۭm+0e֢#pYHOsr.bx<<RC:鲄8*uleY" Z{plEp<%-o#­-sY\IV#;99$5D~vDI*u'ռv/mcrqXj[y6{lGǶ ߠfEjVW u7 zE6}뢤@z/C_ <.ц~}e_Ȯ(/8DoyڎhޙM06||"aE)?n^p䫖Asy̼SP~\Yc?nc"`QrtCTދ'NB*E9&gZGʟTG?w gvj~c%}|Q'O㑝M5?BVDD((y3z [xp˺ x5X[!=@ӟ[=RLF9QZNR{6h,影c{?x=2dtP[!\'?3wp;*OGn6rij^QF{ 6z.}SKPUlc3^fBZ#| j~{jn+ =&_}W}>g"RB2}(;dwҊѿ|'G/~5W^{t 1Cp i:}c0F[Q<49LSKY?'odsS8{h'D|$qv$9}./5E ʨxo~!U,N= m#fgL"̱ ڐ5jDHs%) 'n^E[#h6`mT.V8pxE『.M4vZwD-`z7qz2!+fKsݔp#=xO@#uh|vj=;c ?Q@zPGK|7jA N6.Kf'&7$lqtC9N'vVmުzF^"It7Pc*SAOǾ&9zG(ζʎSş;ZnzCeQ&#;=m㦇{>Ƹޔˡk{OuEߧ9HVQ&%/reGסA s቞J=6҃R-6m.GyH],zMcgvtqeE8{[Og?;=^dnc> zTܸW8r1_>z2gp]~)YE!a]%s8 b`܊ cv|@pVH 勸k3HXuxwen B,V>|or nNyN<` >6v3+9s\TTxl-ރ*<l=p\=JN:;^埩7Y_y!gİR{l@h ? 6Nayx?-@OOF}5f&|z\)ZNwC&YC{+)1ȸcH}QqGWKϣiKuɯ ;nM?x⟙B(߱])8&J:'JYN;r#փGP|`L 0&;˛KcL 0&`L 0&`L 0&`L`q c4&`L 0&X`.0PΎ 0&`L 0&;˛KcL 0&`L 0&`L 0&`L`q c4&`L 0&X`.0PΎ 0&`L 0&;˛KcL 0&`L 0&`L 0&`L`q c4&`L 0&X`.0PΎ 0&`L 0&9!l#[_. 0&`L 0&>fımbۆ `vm{qn FRhUwFOችmK )2ُ]Pf0;1&`L 0&Ts*SOů~$Յ]5ȏbk] J;++*gvH,*`L 0&Xy?z_ZVaWjvl#L 0&`L , %qlPYcر ΎS2&`L 0&رȾ[8`L 0&`ב@َm(y̼6_,s}pk:uKtW1b0K/߇UIz=VxJTV^, #Q8s nBcgqN"1DywFΠ^:Gl##Őz o⦻jeS-d0yw4 ũPېF-e17KnL j"R 4Sg`UO(40Ic1 'Z}}nf{ lT[Ώȁ"G5YZ*mo齘TA9Fރ/EN`E} 媴D<-Q>`[,gBN-S.6RmJ\z3m9Dz -ٝHVVȴTy"C'hD3SP*)LdXj+_+b<=eü7Ql^l\ab`-VI#ʶ0wM0F"oYY~UV3eNaݯ矘`L 0&X|:qd8v*mz 'EwHvnG ݤE ',P9N,N~vv?AxX-oD2{H:K6+8bwv=|hR%{ӨG[j{\㽇bV?Rz(K+b,acVL 9ҟ?3&`L 0&sl# I%@3H8%ٝ9+ZX5fYUS9a=jyAyVd@{/i~# ^7w(ƷW %~+~O!EmBkl &E覇r.OYCQ^c ۬V;"XUA`L 0&XNQR%>mog&1-/rf}H4bMrnJAE75 Z?!+JG\cu?pTV{zKFi@ݗI[].:u)SoߋL-V/Җ?̝ɿ:ĝmmmmmmmm`lԮE!"ګcܱ :NS#, );9P~KQYkDE=a茰ARg;bFF\t<*ڨ;ѺLe8:6 !ـ锣9Oul}L&E-:5\ye1"uj}IF$-QtLiUEND SDDUwULyNWrqKYQ=kυ;q4sbNllllllllKl~`ÒPDJ'gK͎Q ,::FJ:}nXu5j$)evPTB}Pslbyԑr^I4H6Wj[.{zᲃ(ژ>;ZHCRzKi3W5bi[qjsRw:,R`ckf, E^ײ q|[Qqܱ~5V!v=9F~0;ו+1&?f,2 %g=-UJsxPJKLZ0U<*~ڸV9_Xuw߃{>_rTxdg>G?'y4yQ"\B(ֈA!…_叀`cMX.G{c?ƎW@\SGNlTpD/A$@ưbWy %$ ?`L 0&Xv9r5C16$hM%9şىN-zْE΢ETl6mP(4wŁcgSJ4vrLO=BVRm"qDѪsZ ؞:Y:H] ԬW<.>}MN/V2rӟßD TACaǫ[W¦Qm֟Yf>fL 0&`L`Y؊j"o"Zw3%C+,w8@fޝxdzd--K[]%\]*hUeSo]릷h<6PA~ cL 0&`7;7"\&`L 0&XVر]Vba`L 0&`LN[;>gL 0&`L 0eEe. 0&`L 0&رs&`L 0&XVر]Vba`L 0&`LN[;>gL 0&`L 0eEe. 0&`L 0&رs&`L 0&XVر]Vba`L 0&`LN[;>gL 0&`L 0eEe. 0&`L 0&رs&`L 0&XVȱ !eYVXX&`L 0&,cۘĶ (P:Ѽ{ۖ@SdjoGpzeavGKc>]V" |Y$\:t!R- `L 0sdfp'pRI#.A~[ZKLʊ i`v5Nch cHo޽$vJ{Vc՟#{\glbϦ T&f]%֯hל/8xҐگL 0& H$ޢLԜ,jzc+ K`g9r̮6NK9.)RN>6Tt>m! 9|z)dKf;SQWٮ>Okl'C(z+")9 #Q8s nBcgqN"1DywFΠGrvrnbHD= xGPzpaYV!f ]c"~B"}7_Ƴ- %SZ=l ӟE.O'3Zuqlt>:LKv] yMJSOlOyu^ Ϋ{G6≧8x8qԮW6kp:M.և}@5e/ul56~/H 0&`Lm(o kO;Rj$H-1r*n\*pt dBqt}+̓V`lfb)4WQc@o`"rH>;XzNBVrO :3Tm37qśd~"i zdxk{[tc7,چbi|oj7)`|73Xt鲒hwt[8PlGhĦJr)WNW,zb [~}FfqD7 ([<*]GؿHVJOmю̗{~F#;KySs<^囸*F*6qGw}Awtzs{h%ʵk|`L 0&`F;z9Y߽ zpU8H\ufh%sk5X1l)-(zڇ* 3SeQVpe3H"oTco,zq]U$ iZjT9L> o⦻jeS-d0yw4 ũPېF-e17KnL jb.-TG`*~Fѧs}dͤXM>73sϽ6|QlRRs@sxq4b7J,-ʋ8Ih}3V<֮OԑgD ւTӇo2@WùQH÷"J+@UB@;Vu6ƀKbJ|Jaby`Z˒ gzqQ5@T|jr"EާG6 FGyAyC{M;rR -P]۪),f>\ |Sk3Edwd~a 0&`LZ)H()i%gcL3VTdQ4,^>p>d cΓMic$Uɿ}fbV̼ϩN [X_j,s{3_֙z.]eD-rH8 wDq5$!:F-19IiUE4;)7zcX_bsrO吣GFYPP"1)ˬckomP#a+8rQu"6;zPPT-fFlllllll 5cB7'"R>a, &dŠr;( IJemPTB geDl'd]dHH=p١wmLn-O$աE-=]Uj R>5>td12K!̝p>wˌk©Ѵ3.ǮH6G ꖓ2sӑ65IKM YzoˬPUڜWF|^׳Ͱ |l_("|")ztvzo\VTܺw_G8]c< sy̼SPb%~܄LA]zLdȁd37; $D _ų{j-A??~ڸV9_Xuw߃{>_(0i%+g@nJAlКm  U8pQ(*TZ j0=K8zG Y1KQ]#VhrM_{׽H] ԬW<.>}Mnnz|[ !~ =Њ$%Yw'^q)iB%uW.͘ *ʦ7DջE:v WV fgqN.(>4Ceui(7uXA_:CNJ] `L sd,388~iER]hUx/ֵ,˨DBq]@%8êқw/Ƀq|՘}Ȟ1Y$[سiU)7YW!5g; 4dgvO03`L 0&pHE?KK.9Yr$V2%,r:Nu]9m&rd{]R|1m:3|(pCrRR%Z3v]kOWYN4˺ͼn!X;@pvJ Yц6J# s> 븨7Kϒv?s @)p}o;;ٱc[&<&eh2SDX,&EQe7U)Z ;q'roئcı=r?=|3M?ml(su#,5U. xaG/~҆^cۜ=~ GЃwW_-I!d}@IDAT<{lH?\Y<.H Gĝ(?3ѥ@聝~dR4QGk>Gu>:1.9UzvJe[g\Bx ]c"~B"}7_Ƴ-!bI_`L , m(o k%O;Rj$H-1r*n\*pt dBqt}+̓V`lfb)4WQc@o`"rH>;XzNBVrO :3Tm37q8}b&I)u쬙Ѽ7l-D١x(s,PRJc)UG}EHY-J"b᷐,IDGlc[kVC!P( EE@α1ؘ&o_Vގ{A}(_1G3C(=_+WWv(/[V|Ə;q<,1^ƥ_{>rֱ֍?&I2n.0u7xmXUaqWCf$4#Iaz6Tӧq~'1+p"]TgҨ,NU",3_ QIb_>}濢ߡY>M9d7gۊ]g+* 4pR[˱E,Cl[0◽gfG{dc e6@>͍R( Bj ؿuoRK4L`w fweQK%!d.T'=~v7sg.Nr"mbj3|ME[rc0R߅y5dsϐ=wp'gWrgf5k3~̃_%䆵AτF:p|z;)#弊$9(r`#g&L뼼3={1yt'#*l:,2hݲΡCiyV-,fʱfEzptMl2'i~#綒+n׳G,V"v{f$v7;ʘg A.G1tgJ6,[JwADs9v, M_39|VZΤ 5L.QckyYڱʟ|EӰkg.aJ!P(  A:=XVMΌGѣvxv4بQ㵡+?lw!P꽏Lj1oIZr$ZtFd^NؔM7Tcmp#c>66h~*Fd$z:u>*O&4=[03xIFm}B+&koFI.\2옢a-3e6h^o:OBIcB>mqZ:,M"[ e l 1cv:>w `G]}dF[tmt6N؉FJ;.NP 6M5i=}}ZgK,RTINna;8TN0+.8NČp2{ҡȻVW.4~ל>sl< sllHX2ia:Ri wd$=.[X%igan8CI-2HCwvƲZch1lhIWcӫZJ,MbS^T](P6l@@ p:^lR5g4g ;fyMp'dBtIfp6eiac&hEl84;$:d{Mgp:?u+8qN:78If=ZL㰎-?CZRJ<Ѭ9%OekN?;]p[ i|t/Iz<0:W(;3.ɱ![aZmK/+DZfSF=gRM<t)IWcdٞdQo*G=x 7Ag~>zWًen0XK6;ԙ;KYynj;7LyE/IXG۱bB~rOa٣:Lk]Zggi찞Z2IӺe~ar^³C^^f=l8Nly\~[J>)TX'[4$I_VV1OFS^|6l`%(qV V=ҭBـe|:D\9zm`;r "' e2C$C0f<7u [=m-}7_*ޑr2rH c{ePYK[ZN͗YZ)RWLϋ;Q<)ʱe3ƌ{6*{6Sw.WoP;r3I} ,ςȱ Q$s淲O'[ kcuAـe7 S>4`y@Jv:ήA7C<ǖ~A}| 󺤎-D阜t!7:I3|?/9F8`9d~msҡ:%m'%_t-e,x̊ۊ'.NxޖïnFoӱOKԵSـe H{EmDiu){饫: d;ݘ2}mD2Bq`4۬>w& e (XL+'րXc&K~h%VkVaH<$vTr}s^cyrL|{SDJ`Un;!<ޡ>ZwT!79͵l(/2%g*ۊp|Wu^:s;*g<_=}HW,O;+rHoFۋzUO֚baݎ^ DD2?3.PQi7 d3p`{) \>U$*Ww3rx7mwU` &Z\2sӿ:v .7q ֖3^pN8^[M{=Fg1E | nnD"y=~zx-rSı36t'|Er]kg@DD>zOOuP( B@αeDhˤ𙪵}v\&Q*a:;<1fa0Z38wwJry RM]jn3SƿA s|,cX-:s[uաo5o?#oZPr.7uV "Pg׌.X`qK/eގ]F7tf?)r'иq !!u@2[% mlIt Ai{Ѻܵ(&%ѳD2BW_mgC|sOӁcgdu[|&*/,Kig W`4q+3# T_> Bg B@!P@m)UZ{ѱMӬҺaK%YQ( B:A@9׉"=ٍıIJVB@!P(cl@! 8++J(e%C2P( B=rlCR^_;A')Tgc͍IJVB@!P(cl@! @jJ.\xC%PyݩD1P( UC@9W ZB@!P( B@!P,ʱ]  B@!P( B@!j(A2V( B@!P( @@9KP( B@!P(W ^5hU B@!P( B@!(v)PV4 B@!P( B!۫X!P( B@!P(.ʊB@!P( B@!P\5cA"/UMeP(D(Va~ ̾NQ7V{e])N>K]GPQx!pMۦԬ+Ƒٍ3XCZvTx|w5Ck;oeZag*ڀ;p˛ܾ^R>s+ ow?;nLk/6WWM `V񖺾6öR[cdս([̿~KS823!N)Y߄y\+4h2=q=J!e˷'O%wbCXbH| XIٿseoۏkz)#8xؗdnR(k NMw߱T_ܧpJLO`zE'ԃh<BOR)&V㏠c?2+~~Ū*]).2/GU85~q|_;k+p8qķzѺ Iժ\!rӠ+Zg4%b~;=:|^Gxg zbǣzk:b/`kf5Tל[?fwne< mzzkFwlg} fV~X_aIS'[QT$eE!'aEcE\>?֍(zPƧɁF:1W;ֈu\\%xm[1F5OԠݧN(k2d3Y\']J_,<[kR wUni i=NRM®2MkYn-VJ>Kzy<ӣcoS`Y{ZCԻe,Ҩ5L6GvדĩtZ^^:mm-̷PH]hwK6荍hMAv5춫>am>-ڨ٘ג;C]ǠOt-Mg݄ 4ۖdh;|DK$jBV類 xx/ =h)ΒC,.6aWJ9*$w=yrGrћ]L*\wcbV7sg0,}.byNh=4ԡEƋiDBYNFzڮ f'eXKT=sFu@l~.ˉsB2N_(w!ngv>\c0:(olE);~ZGu6yU~E6é׺{Al/dž>Z0v!&w7u{_G/ޤqHt .'^xwQ˃}ʦ+YP:ulJENׯ^@'+o,̽{hײ++]RpGv=Ov=^`kea׹AOdġڇp(-#;B6CWckem->=/C?*Adɍ(xgOF>1}T 7/yq^_Di\|Թ@"[֯~)d6 P:W#Iˍ)X^'ZxIcۑ: EtfkmDv BL<8wꫜn݈mwKC=#iЮMhZ0K'-An"Mf;\܉HZNۚ "ip#ɘbc@>DtRV/}r^PO`"pl:OB.res~rdyWx?σU%Fho*^?wmk0"$"$b7m, S7'M'>Q+&ؾ `xJsO<׊=뭨X1ɳ}=>A;ΎI{ʴehoL퍡V>ɓܦtUm ?30_|}7If  #aױ&Yҡo Of (E>Bٙk5>i3x/ ֯](9 vk9"Adt}{m[v-ۀU?Kv^}KaAtJRf^'hR$zG~()\YKBnQ=]C44)Tt Ҟ]k3E{=|;IT`B;s+z֞9lEִ늗/Gk13//#2e\v}I{l"\:O`ߦV/E+[ᇉN24=YMǁqۿq_/J=_ؒp(HN>R]94FX9L߾L)nGŽJB#IcfQzWPuQ^l)A|݃ 2.= V1TfF2\aoø ћ4H&(8:= ΏUMOcF/畍JLZ0Gv UϑGT/>m"{mF=t}av g~/X`>,9~T kfR1//!xOT`v6r N|_x"ib~E9HpGҟTk[H7hV c0~d+'lfV$:և= ]1L^zWcՇW#:&;gWbE;>LJcţoIm,f$@4y)d;>cZ]{S&{wg޳Kcgs텒?LNKPz!di~!`_mAU 3$iR3< >HTRS8H_k&[1Zjx^];2g Yꏄckw 5d3$a= Ҩ%9/@iL;l!n#'V`=l$QU5um}Qkx 7bgABKpN48TP5egy]5[`jG%#弊ɓK˴2j -ŚBk- ne%>b-gct|(8hCz~ʠs԰v BA1vx 8l^  xӪt Ѓ!ax #s۵hnb5;P :Hӳؼ~q%eg>z'Q۞,F7 dzGKA{if5rlC5dʟ|%:nhV"K4_ٌNڙlV˖(3]iveLiɧS Quˎrrl.},Ve봚ˤM=9A׷ wq1o8]{ DO9 mzޓ)G4͊Њ h+D _Re9,ѱOȦuwW|U9y7*ְq3NmnWe=-D:g+,Έ[w"[;<>3݈֖ȗ&cHǣѱ>CԢbD1WHTקuCGZ**#$;pl=?ZoPpHn8iǸPļ,hw<w!nK|'25ώ;jt26cf,Ǵ0v4ʆ;F#I;x &""'HfێxIuGfd4>ѦcQ/7p@9şwwץN|E륵3I{!W'?-cRڷ\i~XNk71m'Ұ+}tѺv눚.ӌl/]4 |#Z x3`"]0='cz  vuq(-+:,tˇWŴO+e_gL0`_խ jx1gEOȦsauH\kh"733~fRi1ݻmy>9Mt<=Z)Kx\8>컈,/R?(x]D> G1տvu LjcE略_gw=*\7 1$w> T^鸓7٬?ewly^U:>NY{l@tw9YDZ&v0?/b.į9a[F1AKJ M(I޻py e#vMK}Iz\3_AN]I;3.ɱ![azc,eXب֞2tئѯ[e4a˟|r;nv&i/ĻU tZm׶'nAo;b"<╖Y߀\tt;J_#}-`kkj}|=ݦEdio! 6(=#(=Zba6u [yƵގ<uڠ7VGFˇWϲ=w_Q(EΫrȰцٹ]- lB6e~=Zoۑ!D=/M+L;s:ϰ'B6OŃl/ZXh79Oos?]w-E+í[J^KS@,×͉DHg/R4=V4`1ʵ?<^AhiqM% 78,D?"4}zP-ͷÑVrQv{;e4~׍cKȼG}kIi^IʇsE/}ߝ_˘Jt\6P8Ӝ d$<ٷg ֵ;>g}h#|h؞Rs vT S$L!#K`6%\qP?-I -ų_J| GbS2SY:_ت;]OmXh)})(%:ۉmZIZp 2O®y;=  A$Kgi7;^|k3^lgy-agNg?iIrWf Q-}Z^,y7L؞:eoQ7aeB;m?!Od:1{ ʃ!xv E/}de1mpYgًN)>Y_]!#=^zoE{>%{>UHc>D#~v%eKRE7FJ d~s^̈Xi| EϏO`u^iұ%JQ?1:ckkM!y۵SPӣ\7l":fY#ח###Q}ffYޡQjXl13H^,G^vG7lӲ tn~ʯYm}z9d^/uz20M煟W}fS"."Fµ~z>LymqxyڧWE W}pd˯'^gKϤ#ܓ7J+˂_"/4hOSFr6( y-q F>&3aT2FUwx3\t1#QPw/crpdήcK%xZn|c -KA:w͐kBjh4wxPDY쀳K#egz7#^ j".˟|<6~Zj;ңT+Fݔ_|x+Ш>Ar,u'SxZo6YGGl:Q^)0Aﬓ[}튬}ʦӷ}߻>WH"!پo?YG^6)ϖ_|_%`RQcj$է|GxeYQ4`܍i=z٤pu/YKea5@_\E1 1x-_JX3#}my ,eq2uE-=DiOwjZߠ((B$=pfUHd2c3:oʹ#0ؖd7p1#ԆkOZMf/Ep IO׊vMywPGHFK\/ B'w+%'(}m @v&a/L2Cw0uضR|?|i!dM6hGlnGuFWMgR2i5ϕ^Ǭކ9vm.7 m'X3*I+|:c> wCa)ܾ_Xڈa%MjQX$`W;MdRI-iZ5P.+T?41e/u'k3Y\$g/(lb(Ig]]{G7'րt֜񙛙K~h%Vt2L$N_TYe1f=,l c s3xm kLwU;Gw٧rrIy{pG*P)L#ۛw?YDsE{r6Bg2jD,铻 TT~U?wU? *q<;̗݁uksMg KkNgv,x2>Fff&BB^0&XBcm7oNɖkwNnk'7jA*Mn+:N]Kf/M܂LCvN8^[M{34=jp0y&_[nH^gymg%꽮^y|q}Va2݉=0$D\ڙ0P+Q>'E_OKogE@~Ƀ.]%s#d)[P}:3}.0~9sXuY:Fgd+v6%B6ˋ'Ϯg&zQWs8D / .٦N eF$˃jxz,a탇$/`܏g/&ϑI qt6`uW+Y8 hOW^Te`f.`nMlonwLrD3[گs/9Ff) "(8g-[ar?^me|?yQlz_ A,[OȤϤqԟM/d?TL"ImQWcx$ЖI3Uky.>}HKt%ɏ M /Rl mlXgz ¾vDMj—7W>59rj,˕$R$9׉ Q'Eʱ:t͗QevpG9\i(JcƝ4v$$6/HZ="eIt Ai{Ѻܵ(&%dWD9goiu:altnCF:XO tʮŴA"' }m pt ,p/<֮ŴaCK4ՎyϠX_OeyD+,녹)< O ^ԖS mBe1*QJ/X]߲8:v7Ny=:x81Ј\z?rl[Q/V~F4q9#Dϳ@c?b#[P?Xy}NLZ$6IDATv?4`ۅ'_Z3O>çϤq_!zˑ ;}S?m2K[ޱEbHn&/^/Ny:ax? sna7 0_aqngt%gҖ.G2Q)I)tVJx# :i}5;}_$`ml :Ж= CP{ B@!f(AP{]B@!P( ʱ}hRɱXmKΔ"P( B@!P t% @Q*1Ij=L-%B@!P( B@!PBG=SD ht5.\x+(zP( B@!.PuŔB@!P( B@!PE@9AR) B@!P( urlK( B@!P( B (ʱ zO!P( B@!P(Kc{]E1P( B@!P(APmP{ B@!P( B@!p]"R-)B@!P( B@!rl"S( B@!P( kFH p]¢R@lX]V70 8uFY{7hW^ T9< k6f]07tnвoۯ ]CQ} 9hWl!k k/6WW`e~&ygRi|s+~2W'0*H-_K|OVʕw\ƅc"^ޝS~ޞ<3@|*ŔrǐٿHO!z8w¤K7Gm+i鉋?!%|wb%~[ocr.W("1ɪ{qmQ-ppd {]qX~,銗#Ly`^O,}^jix?݇o0hpWxVHv1 k+8qd;okF*&٨UF!܎rhӦsCP+*g03u /,pbū7EI|U{'u8ﰩR?/AͻnDSmh۳zz#7}:XdjD㣵tcnuS}N6"SNWb97aF C^c~;8E(B7ut$ TX3h܊V3յ!kVy2G}Jҟ|Ch>`8{·6m,]r _biEU ͫ2ͅO[{'"tcKUُyuW!p##p [&qQ?rm (̗{?YCw;V)֥d[?ܰ|yrMr}oL QYx4|R2N~t7OK{xh^y{JI`M47yi?I܍M[ҝI=t7X_a˕#giM#{(dY,%sa&Qد g=_Y$J+Gԉyay/JE]|:4j2"NOn"R_ZTOuc-\n4nΌSKݕA' (~ʛ`iP #_}X̳[ʏ|ۿ+ Ol!7*.\?m,U@^tA+|yƅy DXtOvnh  ET{Yq5b]n/{Iۆac[s8D UB43up ѯ?Uؘ6ܓd/U*LLqIǔM,FYRyz7elWK8ǵCgmZ ZHGDg]ԡӺN-;j:;wG+p(kU.Nz|ʶ+u Z:c{O:Ðc% Zz^z .RanmW6L2hMq>-ڨlLkIaS{/Фsc!n7˻~m,U@^t+|yit7lz},m{,^^Y;zRamuu̒+jOR8+9 f3c7fշi: &mhw\:sҎuS\5nӘZǖK]=> Rm37үuo^@CK ;w:-=6 v_SDZS1\dT%*ż=<|x,B#W`۟W9s*Oyz"m\oX 8El-Zazm1-y)r$q;r}a ?qo^ГH4դm Pd \^&RxScŻOh3}$Ғy-S5 r(R;Va^;ϰXG+(߿ 0PBD 1.[?!ـwx'DS/\Gtrj{I  Ql8/`%n٭)I"}o/[Y!jX,O`1[HuW!p! RuzȔ;Cᷗ7T<3EN0˔JIrl$6ٔאFњ"bG1{И]{8ފUl'>;,̻X:ef \ ²emhw׻wNSa9:u`b:?Q /ODȓ]؃=mla=dӅ)GC'N<7}ۧ=A 'Ê?^0x !n+npx c6ca?w/SQq=+Ha֡Xy ~;˖:N5Ol>oAbqW9#ӱNEyn!Kks㎻*Go4#H,38?2>pW56mփO-6 +ͶHՙdax4p[(#̬/{ض;+yAVp4vQXPX<,^S:}rY cj7\x,V#P`/g|~i2:6D/0 Mveuaq1MJu!Yt P"|v$NA[tD#bvȦOH/_±^Pz|g.dV#Ϗx*T,̜{=ge1: ?}g9/<xwM3+S^tVŬ?ّLt<%=z/lkB@@@±BGE΢-ϱxmuv_~gfopN48TP@5egYF< ̄Z(;)?syq93y"5;ˋpdKz1={1yt'u:h=+8VG%p-˾m(T;yXkf$v7;ʊg6<]JvgbrYvfT,rp<}l6ޏ=9/1;u0WНG)Z0J3>(u TȖ#0nw8):G^xb;NEUTO<)n35W :44P5+r8ӆæhBy;<= F:ou*h8YًG#[ׯs ڋ;'Y\E>|\ @>Hg^\u+?hUL=_H{$!R|J#N[ 3wy?67ۧ'6m˶]-jɈ.s2b&3n.o@hrhtWIA;#:0^I:Xs;()N,9ʏ{,c-kfkv 1ZJ˂KKemlg&:DM!!ڛf`4c\ha^^n(oWDdiV0K4\_ٌvWgәsnb5;[Åe `ƠAVPs jbM󳾓f;Q/?ڀD _Re9,νؾŏq *ߌ_۱-Tv`):i$Ń`0>'0ܢN:d姠c>4]C4AgV##Ng#_URnM=Kgiī`S57;KͨtAt K6ʟ˱ .-*l1ݱ]N29\??@3mkN_ ,'!ʑU'Ôڃ^n~o}Wcwb ?dcֱeا =bWVz?7&Su;߃{>PӺ#M]!9#dQCX4`4m7U11hm 'm[χ*|t|Od[rCZT>'2jzN ѸJ 1l;Z]oG.pđ S^N8ƅ-e8eD[| iٷD]\Ǹ:i!Z1,lƴA^Psa)wL4#^znoѫ ڐ،ZmaIxZ:6(ihGtzn-VEvNT^\<2A\M[(H{4- %hglcc'5;晘1"'s|]bnP|Eaوg@2y"Lc12;Wa4 M(ILRs[Fq?XHQsʃ9S;%n=0?[V/?ZWQv/u ;:RҽSLF۱r8Ao/ߖ\ФEȃ b *M6^EAS۝W;ecwX ғMy!5dƴk35d쾈!ne]<_ĵ g B?·|^״t.,yB hV#KO6`ٺOu$˃AWO,=؃en~ً=ƪE?\CݦZ/]{6Oan)r,CQx>v\ _zY U41rTZ:K_cEyQڟie9Z(&p\Ӛ~mBp +_^jHo$cA컮ÑVr%QvX̣3%ug/{/n8G;//QcKKt7 KtplnhL\?s(Å=4By[xXvɧ H«vpn=:e/E7ae u ڛ9.􆵴J4u{nY(wn-s+G ZlAG>^lr/Rϳ=2Zy.kkvJ8)@9b6sٞDzD9O`%_a㵕E 8C/|N-/R~9 FFF</;4?ۑЖ&s% a[Ndm/HðY{KTxL;Beg=IiyMV>]S;aj:gpY>{{We/s~]RϘ9dKp~Է(`  0F38ZhxSc+gvQ KX$ub9P|]Ђy Аe<'i@"n-2WZrH rj(ؖz{/#Z[w31;..<]J.|ڊ|m^Nʉ2#גߒwt}tx=ThRZdZ\*i?/^\`s1腑OXr+o =orJg[Q,yömP/ ~l8Y+AǷwϑ~g|9 dAq~=d9,D9*^'/WĹJOo38^ecUZI6itõw_f?upR6@8C;zަ itx#mIyƭ}/ɳ`pǁ {ixioxŲ%w)q9o>+$㋡+bguY +%-ֻ#a f,؋/'H!m/Y2Z6^b=(bй]P`1 t B Vזm 3luY:_<I\xV|kGWOՃ:X|@:HA=d9tۊjo9D9 ?zr2*vHןʀ:Wl깗ս]О%j-uK#1>;s44^93wcZ[}yD2Bw06j qg+TFhqs^4$JW\TewwOƛo:3šk`_9#KzDZӝڬ7xK,?!ou$H_}Bh2cvZoWYAVﴲQ?VDX+D*g2#"8bz:ДELY6h:lck;8ebΩ6qR*˪si=-m/.]|Y29 Q\6cKٞT6=Y>cڐGdcԈ;T.Խ>cȨW; I?+b8-w6`G|#+/Z_ =dӉe D9{MjtЌۖ:Hu}JғO֟VֿR)l @ <? ^z|C+Qr5ns̖a $uBuQXay=e1f=,5L@ ~~07˰<_m*ۊp|W#xO@g`gg$Ϣ+-Y;Ϣ?'#$@g¯e\/=cld[Cgd+v6O)#KO6YjGٹhU;tyZ#Lla_oxzd? Hا$=y{Yփ[C [QX9 Hm>Sֈ>mn,ksWyrґϋ0C[o֙#6?})+_j—7WcXԯG> VyC2G IMDlq.R$9׉ Eb7_F#슐i_rNuV "Pg׌.QL/ͼ 4otZ"2VTIt Ai{N]뱌loC8jkL]LzѴ sn )nCa:ӅRIDqRbX]iteҸ&ƞnMzwyw~r|n}A4~ϷuwҴ5f4uɟsZ:إ_ϳG3;5&z[yC;wwd0/9kŞf<,9JdKċIl Bꋵ8q\oe/Lx!ecGOGQCƙR pּ7~,P}>r*/YRFNOZ7yցxYv\UwŒ>bt赦-8\(\B+\yGZxp6ze}Fm'Rv(ء̤6QOY`}Ҿ. Ľgk# OlrUҵt>LhEUQkg?^hor{`Y5]876 m}-fSU+M.Nj+xQo~(0{ |L\;ý}H+'{b? ;bpU胺{yp^ &EYn*6d]1 !Y5T7o^Ջթu("D?,U,u}KMp8RjuGMݝnyݪ_ftY7.܇Jsևu9.t΍]-rr٢T]K5%^+ue}:}.0sOZ.3=*xpָGqýZ9^շxO1?-ۧ-  Dnewm }rt(Y)G!G(VC@ Ra   @MW   V$V*,C@@(ۢ*v@@@JJe   E#@b[4]Ŏ"  X Z @@@hHlQ@@@+[+!  mt;   `%@bk2@@@ -bG@@@HlTX   P46uyV[ohǮ0v@@@ ]`'k+Uu?]g   E#@bKb[4ʎ"  V$$Vq2@@@hHlIl&XQ@@@J`$gLwjF_Dry{jӂ4kkwNyH}V=BO'Wp=z򱇴w:{ںlrS'}:C;*L   l:^iU[mV'r}z7ϨdEFMQˇҗ|zfB'5S,Ή㝪o9^6vndۥ~y^Q2]Ng   rubv3olHidur]"kikMۢ I _6>U*ˣ M3)O* &xg3:B-ɢG@%ѬFOidVTնJER㙩zbgKdUn   Mlj{Y})w4wʌ֣aM=5y5Rމc{T/ŹvJڴN=vP5fN9Mtzf/u|LiUTb7țZ|!  ]CGVBS3ެ.oulm=UvvJS45H;ךa ʹk"8?]?v$#SɗWL»#%MSWkz$Ļ qu@@@A׿pxxx0nIl#x۝H,XfQfܶ_woht23eS岨I4vgi~=b~b b nq rĶΌy3'sjА#.0?joUlȌF~:k#tĈ#3̨"wi38vSZF%=f8u:u+M3 װ!b b b l[o`@9fvňӫʜ*ei\|0zbk}nӠ\(cjXNLEsVf['ͶEbVCs@@@`>}r=u p-,xH[ƬkD-w-X*1mshMGI`8;pr*rMGi: ˛loisNcIO;_<mg[i7,+x;b b bEME;I=3Wfj|'o)ʵ hӗ̝*[W}>WzʔȱsO튮L#7M?M+]}C;vu D/t*r| ^smȵqm S]*}`6|\k#gE:/~WgVx  [OQ5m)\ZgN.Yr.[mc,%9͝84֫zsXK/tX"Il3sՕ*19lmF&??]=RYc!   PdJl%[_ (hwY,]9#WEMUW0;eZR⤆JX#ܞ*=XVj* 5ۧl_aV@@@n{E&}A@@@ ڼi   `w[0C@@l.@bky   Hlô@@;!  v {>@@@$6`  ]=L@@@ h^ۺHIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/index.rst0000664000175000017500000001122600000000000016777 0ustar00zuulzuul00000000000000.. Copyright 2010-2012 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================== OpenStack Block Storage (Cinder) documentation ============================================== .. figure:: images/cinder.png :alt: Cinder logo :align: center What is Cinder? --------------- Cinder is the OpenStack Block Storage service for providing volumes to Nova virtual machines, Ironic bare metal hosts, containers and more. Some of the goals of Cinder are to be/have: * **Component based architecture**: Quickly add new behaviors * **Highly available**: Scale to very serious workloads * **Fault-Tolerant**: Isolated processes avoid cascading failures * **Recoverable**: Failures should be easy to diagnose, debug, and rectify * **Open Standards**: Be a reference implementation for a community-driven api For end users ------------- As an end user of Cinder, you'll use Cinder to create and manage volumes using the Horizon user interface, command line tools such as the `python-cinderclient `_, or by directly using the `REST API `_. Tools for using Cinder ~~~~~~~~~~~~~~~~~~~~~~ * `Horizon `_: The official web UI for the OpenStack Project. * `OpenStack Client `_: The official CLI for OpenStack Projects. You should use this as your CLI for most things, it includes not just nova commands but also commands for most of the projects in OpenStack. * `Cinder Client `_: The **openstack** CLI is recommended, but there are some advanced features and administrative commands that are not yet available there. For CLI access to these commands, the **cinder** CLI can be used instead. Using the Cinder API ~~~~~~~~~~~~~~~~~~~~ All features of Cinder are exposed via a REST API that can be used to build more complicated logic or automation with Cinder. This can be consumed directly or via various SDKs. The following resources can help you get started consuming the API directly. * `Cinder API `_ * :doc:`Cinder microversion history ` For operators ------------- This section has details for deploying and maintaining Cinder services. Installing Cinder ~~~~~~~~~~~~~~~~~ Cinder can be configured standalone using the configuration setting ``auth_strategy = noauth``, but in most cases you will want to at least have the `Keystone `_ Identity service and other `OpenStack services `_ installed. .. toctree:: :maxdepth: 1 Installation Guide Administrating Cinder ~~~~~~~~~~~~~~~~~~~~~ Contents: .. toctree:: :maxdepth: 1 admin/index Reference ~~~~~~~~~ Contents: .. toctree:: :maxdepth: 1 configuration/index .. toctree:: :maxdepth: 2 :titlesonly: :includehidden: drivers-all-about .. toctree:: :maxdepth: 1 cli/index Additional resources ~~~~~~~~~~~~~~~~~~~~ * `Cinder release notes `_ For contributors ---------------- Contributions to Cinder are welcome. There can be a lot of background information needed to get started. This section should help get you started. Please feel free to also ask any questions in the **#openstack-cinder** IRC channel. Contributing to Cinder ~~~~~~~~~~~~~~~~~~~~~~ Contents: .. toctree:: :maxdepth: 1 contributor/index API Microversions For reviewers ------------- * :ref:`transition-guidelines` Additional reference -------------------- Contents: .. toctree:: :maxdepth: 1 common/glossary.rst .. only:: html Indices and tables ------------------ Contents: * :ref:`genindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.479122 cinder-27.0.0/doc/source/install/0000775000175000017500000000000000000000000016602 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/README.rst0000664000175000017500000000114100000000000020266 0ustar00zuulzuul00000000000000================================================== Cinder Installation Documentation (source/install) ================================================== Introduction: ------------- This directory is intended to hold any installation documentation for Cinder. Documentation that explains how to bring Cinder up to the point that it is ready to use in an OpenStack or standalone environment should be put in this directory. The full spec for organization of documentation may be seen in the `OS Manuals Migration Spec `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-backup-install-obs.rst0000664000175000017500000000304200000000000024267 0ustar00zuulzuul00000000000000:orphan: Install and configure the backup service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Optionally, install and configure the backup service. For simplicity, this configuration uses the Block Storage node and the Object Storage (swift) driver, thus depending on the `Object Storage service `_. .. note:: You must :ref:`install and configure a storage node ` prior to installing and configuring the backup service. Install and configure components -------------------------------- .. note:: Perform these steps on the Block Storage node. #. Install the packages: .. code-block:: console # zypper install openstack-cinder-backup #. Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: #. In the ``[DEFAULT]`` section, configure backup options: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver backup_swift_url = SWIFT_URL Replace ``SWIFT_URL`` with the URL of the Object Storage service. The URL can be found by showing the object-store API endpoints: .. code-block:: console $ openstack catalog show object-store Finalize installation --------------------- Start the Block Storage backup service and configure it to start when the system boots: .. code-block:: console # systemctl enable openstack-cinder-backup.service # systemctl start openstack-cinder-backup.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-backup-install-rdo.rst0000664000175000017500000000303100000000000024266 0ustar00zuulzuul00000000000000:orphan: Install and configure the backup service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Optionally, install and configure the backup service. For simplicity, this configuration uses the Block Storage node and the Object Storage (swift) driver, thus depending on the `Object Storage service `_. .. note:: You must :ref:`install and configure a storage node ` prior to installing and configuring the backup service. Install and configure components -------------------------------- .. note:: Perform these steps on the Block Storage node. #. Install the packages: .. code-block:: console # yum install openstack-cinder #. Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: #. In the ``[DEFAULT]`` section, configure backup options: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver backup_swift_url = SWIFT_URL Replace ``SWIFT_URL`` with the URL of the Object Storage service. The URL can be found by showing the object-store API endpoints: .. code-block:: console $ openstack catalog show object-store Finalize installation --------------------- Start the Block Storage backup service and configure it to start when the system boots: .. code-block:: console # systemctl enable openstack-cinder-backup.service # systemctl start openstack-cinder-backup.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-backup-install-ubuntu.rst0000664000175000017500000000262100000000000025030 0ustar00zuulzuul00000000000000:orphan: Install and configure the backup service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Optionally, install and configure the backup service. For simplicity, this configuration uses the Block Storage node and the Object Storage (swift) driver, thus depending on the `Object Storage service `_. .. note:: You must :ref:`install and configure a storage node ` prior to installing and configuring the backup service. Install and configure components -------------------------------- .. note:: Perform these steps on the Block Storage node. #. Install the packages: .. code-block:: console # apt install cinder-backup 2. Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure backup options: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver backup_swift_url = SWIFT_URL Replace ``SWIFT_URL`` with the URL of the Object Storage service. The URL can be found by showing the object-store API endpoints: .. code-block:: console $ openstack catalog show object-store Finalize installation --------------------- Restart the Block Storage backup service: .. code-block:: console # service cinder-backup restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-controller-install-obs.rst0000664000175000017500000002243700000000000025216 0ustar00zuulzuul00000000000000Install and configure controller node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Block Storage service, code-named cinder, on the controller node. This service requires at least one additional storage node that provides volumes to instances. Prerequisites ------------- Before you install and configure the Block Storage service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: #. Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p #. Create the ``cinder`` database: .. code-block:: console MariaDB [(none)]> CREATE DATABASE cinder; #. Grant proper access to the ``cinder`` database: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \ IDENTIFIED BY 'CINDER_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \ IDENTIFIED BY 'CINDER_DBPASS'; Replace ``CINDER_DBPASS`` with a suitable password. #. Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: #. Create a ``cinder`` user: .. code-block:: console $ openstack user create --domain default --password-prompt cinder User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 9d7e33de3e1a498390353819bc7d245d | | name | cinder | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ #. Add the ``admin`` role to the ``cinder`` user: .. code-block:: console $ openstack role add --project service --user cinder admin .. note:: This command provides no output. #. Create the ``cinderv3`` service entity: .. code-block:: console $ openstack service create --name cinderv3 \ --description "OpenStack Block Storage" volumev3 +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Block Storage | | enabled | True | | id | ab3bbbef780845a1a283490d281e7fda | | name | cinderv3 | | type | volumev3 | +-------------+----------------------------------+ .. note:: Beginning with the Xena release, the Block Storage services require only one service entity. For prior releases, please consult the documentation for that specific release. #. Create the Block Storage service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ volumev3 public http://controller:8776/v3/%\(project_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | 03fa2c90153546c295bf30ca86b1344b | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | ab3bbbef780845a1a283490d281e7fda | | service_name | cinderv3 | | service_type | volumev3 | | url | http://controller:8776/v3/%(project_id)s | +--------------+------------------------------------------+ $ openstack endpoint create --region RegionOne \ volumev3 internal http://controller:8776/v3/%\(project_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | 94f684395d1b41068c70e4ecb11364b2 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | ab3bbbef780845a1a283490d281e7fda | | service_name | cinderv3 | | service_type | volumev3 | | url | http://controller:8776/v3/%(project_id)s | +--------------+------------------------------------------+ $ openstack endpoint create --region RegionOne \ volumev3 admin http://controller:8776/v3/%\(project_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | 4511c28a0f9840c78bacb25f10f62c98 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | ab3bbbef780845a1a283490d281e7fda | | service_name | cinderv3 | | service_type | volumev3 | | url | http://controller:8776/v3/%(project_id)s | +--------------+------------------------------------------+ Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # zypper install openstack-cinder-api openstack-cinder-scheduler #. Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: #. In the ``[database]`` section, configure database access: .. path /etc/cinder/cinder.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage database. #. In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. #. In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = CINDER_PASS Replace ``CINDER_PASS`` with the password you chose for the ``cinder`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. #. In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the management interface IP address of the controller node: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... my_ip = 10.0.0.11 #. In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/cinder/cinder.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/cinder/tmp Configure Compute to use Block Storage -------------------------------------- #. Edit the ``/etc/nova/nova.conf`` file and add the following to it: .. path /etc/nova/nova.conf .. code-block:: ini [cinder] os_region_name = RegionOne Finalize installation --------------------- #. Restart the Compute API service: .. code-block:: console # systemctl restart openstack-nova-api.service #. Start the Block Storage services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service # systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-controller-install-rdo.rst0000664000175000017500000002267400000000000025222 0ustar00zuulzuul00000000000000Install and configure controller node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Block Storage service, code-named cinder, on the controller node. This service requires at least one additional storage node that provides volumes to instances. Prerequisites ------------- Before you install and configure the Block Storage service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: #. Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p #. Create the ``cinder`` database: .. code-block:: console MariaDB [(none)]> CREATE DATABASE cinder; #. Grant proper access to the ``cinder`` database: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \ IDENTIFIED BY 'CINDER_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \ IDENTIFIED BY 'CINDER_DBPASS'; Replace ``CINDER_DBPASS`` with a suitable password. #. Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: #. Create a ``cinder`` user: .. code-block:: console $ openstack user create --domain default --password-prompt cinder User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 9d7e33de3e1a498390353819bc7d245d | | name | cinder | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ #. Add the ``admin`` role to the ``cinder`` user: .. code-block:: console $ openstack role add --project service --user cinder admin .. note:: This command provides no output. #. Create the ``cinderv3`` service entity: .. code-block:: console $ openstack service create --name cinderv3 \ --description "OpenStack Block Storage" volumev3 +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Block Storage | | enabled | True | | id | ab3bbbef780845a1a283490d281e7fda | | name | cinderv3 | | type | volumev3 | +-------------+----------------------------------+ .. note:: Beginning with the Xena release, the Block Storage services require only one service entity. For prior releases, please consult the documentation for that specific release. #. Create the Block Storage service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ volumev3 public http://controller:8776/v3/%\(project_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | 03fa2c90153546c295bf30ca86b1344b | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | ab3bbbef780845a1a283490d281e7fda | | service_name | cinderv3 | | service_type | volumev3 | | url | http://controller:8776/v3/%(project_id)s | +--------------+------------------------------------------+ $ openstack endpoint create --region RegionOne \ volumev3 internal http://controller:8776/v3/%\(project_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | 94f684395d1b41068c70e4ecb11364b2 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | ab3bbbef780845a1a283490d281e7fda | | service_name | cinderv3 | | service_type | volumev3 | | url | http://controller:8776/v3/%(project_id)s | +--------------+------------------------------------------+ $ openstack endpoint create --region RegionOne \ volumev3 admin http://controller:8776/v3/%\(project_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | 4511c28a0f9840c78bacb25f10f62c98 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | ab3bbbef780845a1a283490d281e7fda | | service_name | cinderv3 | | service_type | volumev3 | | url | http://controller:8776/v3/%(project_id)s | +--------------+------------------------------------------+ Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # yum install openstack-cinder #. Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: #. In the ``[database]`` section, configure database access: .. path /etc/cinder/cinder.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage database. #. In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. #. In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = CINDER_PASS Replace ``CINDER_PASS`` with the password you chose for the ``cinder`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. #. In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the management interface IP address of the controller node: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... my_ip = 10.0.0.11 #. In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/cinder/cinder.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/cinder/tmp #. Populate the Block Storage database: .. code-block:: console # su -s /bin/sh -c "cinder-manage db sync" cinder .. note:: Ignore any deprecation messages in this output. Configure Compute to use Block Storage -------------------------------------- #. Edit the ``/etc/nova/nova.conf`` file and add the following to it: .. path /etc/nova/nova.conf .. code-block:: ini [cinder] os_region_name = RegionOne Finalize installation --------------------- #. Restart the Compute API service: .. code-block:: console # systemctl restart openstack-nova-api.service #. Start the Block Storage services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service # systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-controller-install-ubuntu.rst0000664000175000017500000002244300000000000025752 0ustar00zuulzuul00000000000000Install and configure controller node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Block Storage service, code-named cinder, on the controller node. This service requires at least one additional storage node that provides volumes to instances. Prerequisites ------------- Before you install and configure the Block Storage service, you must create a database, service credentials, and API endpoints. #. To create the database, complete these steps: #. Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console # mysql #. Create the ``cinder`` database: .. code-block:: console MariaDB [(none)]> CREATE DATABASE cinder; #. Grant proper access to the ``cinder`` database: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \ IDENTIFIED BY 'CINDER_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \ IDENTIFIED BY 'CINDER_DBPASS'; Replace ``CINDER_DBPASS`` with a suitable password. #. Exit the database access client. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the service credentials, complete these steps: #. Create a ``cinder`` user: .. code-block:: console $ openstack user create --domain default --password-prompt cinder User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 9d7e33de3e1a498390353819bc7d245d | | name | cinder | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ #. Add the ``admin`` role to the ``cinder`` user: .. code-block:: console $ openstack role add --project service --user cinder admin .. note:: This command provides no output. #. Create the ``cinderv3`` service entity: .. code-block:: console $ openstack service create --name cinderv3 \ --description "OpenStack Block Storage" volumev3 +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Block Storage | | enabled | True | | id | ab3bbbef780845a1a283490d281e7fda | | name | cinderv3 | | type | volumev3 | +-------------+----------------------------------+ .. note:: Beginning with the Xena release, the Block Storage services require only one service entity. For prior releases, please consult the documentation for that specific release. #. Create the Block Storage service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ volumev3 public http://controller:8776/v3/%\(project_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | 03fa2c90153546c295bf30ca86b1344b | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | ab3bbbef780845a1a283490d281e7fda | | service_name | cinderv3 | | service_type | volumev3 | | url | http://controller:8776/v3/%(project_id)s | +--------------+------------------------------------------+ $ openstack endpoint create --region RegionOne \ volumev3 internal http://controller:8776/v3/%\(project_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | 94f684395d1b41068c70e4ecb11364b2 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | ab3bbbef780845a1a283490d281e7fda | | service_name | cinderv3 | | service_type | volumev3 | | url | http://controller:8776/v3/%(project_id)s | +--------------+------------------------------------------+ $ openstack endpoint create --region RegionOne \ volumev3 admin http://controller:8776/v3/%\(project_id\)s +--------------+------------------------------------------+ | Field | Value | +--------------+------------------------------------------+ | enabled | True | | id | 4511c28a0f9840c78bacb25f10f62c98 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | ab3bbbef780845a1a283490d281e7fda | | service_name | cinderv3 | | service_type | volumev3 | | url | http://controller:8776/v3/%(project_id)s | +--------------+------------------------------------------+ Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # apt install cinder-api cinder-scheduler #. Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: #. In the ``[database]`` section, configure database access: .. path /etc/cinder/cinder.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage database. #. In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. #. In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = CINDER_PASS Replace ``CINDER_PASS`` with the password you chose for the ``cinder`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. #. In the ``[DEFAULT]`` section, configure the ``my_ip`` option to use the management interface IP address of the controller node: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... my_ip = 10.0.0.11 #. In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/cinder/cinder.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/cinder/tmp #. Populate the Block Storage database: .. code-block:: console # su -s /bin/sh -c "cinder-manage db sync" cinder .. note:: Ignore any deprecation messages in this output. Configure Compute to use Block Storage -------------------------------------- #. Edit the ``/etc/nova/nova.conf`` file and add the following to it: .. path /etc/nova/nova.conf .. code-block:: ini [cinder] os_region_name = RegionOne Finalize installation --------------------- #. Restart the Compute API service: .. code-block:: console # service nova-api restart #. Restart the Block Storage services: .. code-block:: console # service cinder-scheduler restart # service apache2 restart ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-storage-install-obs.rst0000664000175000017500000001606000000000000024472 0ustar00zuulzuul00000000000000Install and configure a storage node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites ------------- Before you install and configure the Block Storage service on the storage node, you must prepare the storage device. .. note:: Perform these steps on the storage node. #. Install the supporting utility packages. #. Install the LVM packages: .. code-block:: console # zypper install lvm2 #. (Optional) If you intend to use non-raw image types such as QCOW2 and VMDK, install the QEMU package: .. code-block:: console # zypper install qemu .. end .. note:: Some distributions include LVM by default. #. Create the LVM physical volume ``/dev/sdb``: .. code-block:: console # pvcreate /dev/sdb Physical volume "/dev/sdb" successfully created #. Create the LVM volume group ``cinder-volumes``: .. code-block:: console # vgcreate cinder-volumes /dev/sdb Volume group "cinder-volumes" successfully created The Block Storage service creates logical volumes in this volume group. #. Only instances can access Block Storage volumes. However, the underlying operating system manages the devices associated with the volumes. By default, the LVM volume scanning tool scans the ``/dev`` directory for block storage devices that contain volumes. If projects use LVM on their volumes, the scanning tool detects these volumes and attempts to cache them which can cause a variety of problems with both the underlying operating system and project volumes. You must reconfigure LVM to scan only the devices that contain the ``cinder-volumes`` volume group. Edit the ``/etc/lvm/lvm.conf`` file and complete the following actions: * In the ``devices`` section, add a filter that accepts the ``/dev/sdb`` device and rejects all other devices: .. path /etc/lvm/lvm.conf .. code-block:: bash devices { ... filter = [ "a/sdb/", "r/.*/"] .. end Each item in the filter array begins with ``a`` for **accept** or ``r`` for **reject** and includes a regular expression for the device name. The array must end with ``r/.*/`` to reject any remaining devices. You can use the :command:`vgs -vvvv` command to test filters. .. warning:: If your storage nodes use LVM on the operating system disk, you must also add the associated device to the filter. For example, if the ``/dev/sda`` device contains the operating system: .. ignore_path /etc/lvm/lvm.conf .. code-block:: ini filter = [ "a/sda/", "a/sdb/", "r/.*/"] .. end Similarly, if your compute nodes use LVM on the operating system disk, you must also modify the filter in the ``/etc/lvm/lvm.conf`` file on those nodes to include only the operating system disk. For example, if the ``/dev/sda`` device contains the operating system: .. path /etc/openstack-dashboard/local_settings.py .. code-block:: ini filter = [ "a/sda/", "r/.*/"] .. end Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # zypper install openstack-cinder-volume tgt #. Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/cinder/cinder.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder .. end Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage database. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = CINDER_PASS .. end Replace ``CINDER_PASS`` with the password you chose for the ``cinder`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS .. end Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network interface on your storage node, typically 10.0.0.41 for the first node in the `example architecture `_. * In the ``[lvm]`` section, configure the LVM back end with the LVM driver, ``cinder-volumes`` volume group, iSCSI protocol, and appropriate iSCSI service: .. path /etc/cinder/cinder.conf .. code-block:: ini [lvm] # ... volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes target_protocol = iscsi target_helper = tgtadm .. end * In the ``[DEFAULT]`` section, enable the LVM back end: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... enabled_backends = lvm .. end .. note:: Back-end names are arbitrary. As an example, this guide uses the name of the driver as the name of the back end. * In the ``[DEFAULT]`` section, configure the location of the Image service API: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... glance_api_servers = http://controller:9292 .. end * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/cinder/cinder.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/cinder/tmp .. end #. Create the ``/etc/tgt/conf.d/cinder.conf`` file with the following data: .. code-block:: shell include /var/lib/cinder/volumes/* .. end Finalize installation --------------------- #. Start the Block Storage volume service including its dependencies and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-cinder-volume.service tgtd.service # systemctl start openstack-cinder-volume.service tgtd.service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-storage-install-rdo.rst0000664000175000017500000001615200000000000024475 0ustar00zuulzuul00000000000000Install and configure a storage node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites ------------- Before you install and configure the Block Storage service on the storage node, you must prepare the storage device. .. note:: Perform these steps on the storage node. #. Install the supporting utility packages: * Install the LVM packages: .. code-block:: console # yum install lvm2 device-mapper-persistent-data .. end * This is not required for CentOS 8 or later, which comes with a version of LVM that does not use the lvmetad service: .. code-block:: console # systemctl enable lvm2-lvmetad.service # systemctl start lvm2-lvmetad.service .. end .. note:: Some distributions include LVM by default. #. Create the LVM physical volume ``/dev/sdb``: .. code-block:: console # pvcreate /dev/sdb Physical volume "/dev/sdb" successfully created .. end #. Create the LVM volume group ``cinder-volumes``: .. code-block:: console # vgcreate cinder-volumes /dev/sdb Volume group "cinder-volumes" successfully created .. end The Block Storage service creates logical volumes in this volume group. #. Only instances can access Block Storage volumes. However, the underlying operating system manages the devices associated with the volumes. By default, the LVM volume scanning tool scans the ``/dev`` directory for block storage devices that contain volumes. If projects use LVM on their volumes, the scanning tool detects these volumes and attempts to cache them which can cause a variety of problems with both the underlying operating system and project volumes. You must reconfigure LVM to scan only the devices that contain the ``cinder-volumes`` volume group. Edit the ``/etc/lvm/lvm.conf`` file and complete the following actions: * In the ``devices`` section, add a filter that accepts the ``/dev/sdb`` device and rejects all other devices: .. path /etc/lvm/lvm.conf .. code-block:: bash devices { ... filter = [ "a/sdb/", "r/.*/"] .. end Each item in the filter array begins with ``a`` for **accept** or ``r`` for **reject** and includes a regular expression for the device name. The array must end with ``r/.*/`` to reject any remaining devices. You can use the :command:`vgs -vvvv` command to test filters. .. warning:: If your storage nodes use LVM on the operating system disk, you must also add the associated device to the filter. For example, if the ``/dev/sda`` device contains the operating system: .. ignore_path /etc/lvm/lvm.conf .. code-block:: ini filter = [ "a/sda/", "a/sdb/", "r/.*/"] .. end Similarly, if your compute nodes use LVM on the operating system disk, you must also modify the filter in the ``/etc/lvm/lvm.conf`` file on those nodes to include only the operating system disk. For example, if the ``/dev/sda`` device contains the operating system: .. path /etc/openstack-dashboard/local_settings.py .. code-block:: ini filter = [ "a/sda/", "r/.*/"] .. end Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # yum install openstack-cinder targetcli .. end #. Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/cinder/cinder.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder .. end Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage database. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = CINDER_PASS .. end Replace ``CINDER_PASS`` with the password you chose for the ``cinder`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS .. end Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network interface on your storage node, typically 10.0.0.41 for the first node in the `example architecture `_. * In the ``[lvm]`` section, configure the LVM back end with the LVM driver, ``cinder-volumes`` volume group, iSCSI protocol, and appropriate iSCSI service. If the ``[lvm]`` section does not exist, create it: .. path /etc/cinder/cinder.conf .. code-block:: ini [lvm] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes target_protocol = iscsi target_helper = lioadm .. end * In the ``[DEFAULT]`` section, enable the LVM back end: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... enabled_backends = lvm .. end .. note:: Back-end names are arbitrary. As an example, this guide uses the name of the driver as the name of the back end. * In the ``[DEFAULT]`` section, configure the location of the Image service API: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... glance_api_servers = http://controller:9292 .. end * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/cinder/cinder.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/cinder/tmp .. end Finalize installation --------------------- * Start the Block Storage volume service including its dependencies and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-cinder-volume.service target.service # systemctl start openstack-cinder-volume.service target.service .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-storage-install-ubuntu.rst0000664000175000017500000001555100000000000025235 0ustar00zuulzuul00000000000000Install and configure a storage node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Prerequisites ------------- Before you install and configure the Block Storage service on the storage node, you must prepare the storage device. .. note:: Perform these steps on the storage node. #. Install the supporting utility packages: .. code-block:: console # apt install lvm2 thin-provisioning-tools .. end .. note:: Some distributions include LVM by default. #. Create the LVM physical volume ``/dev/sdb``: .. code-block:: console # pvcreate /dev/sdb Physical volume "/dev/sdb" successfully created .. end #. Create the LVM volume group ``cinder-volumes``: .. code-block:: console # vgcreate cinder-volumes /dev/sdb Volume group "cinder-volumes" successfully created .. end The Block Storage service creates logical volumes in this volume group. #. Only instances can access Block Storage volumes. However, the underlying operating system manages the devices associated with the volumes. By default, the LVM volume scanning tool scans the ``/dev`` directory for block storage devices that contain volumes. If projects use LVM on their volumes, the scanning tool detects these volumes and attempts to cache them which can cause a variety of problems with both the underlying operating system and project volumes. You must reconfigure LVM to scan only the devices that contain the ``cinder-volumes`` volume group. Edit the ``/etc/lvm/lvm.conf`` file and complete the following actions: * In the ``devices`` section, add a filter that accepts the ``/dev/sdb`` device and rejects all other devices: .. path /etc/lvm/lvm.conf .. code-block:: bash devices { ... filter = [ "a/sdb/", "r/.*/"] .. end Each item in the filter array begins with ``a`` for **accept** or ``r`` for **reject** and includes a regular expression for the device name. The array must end with ``r/.*/`` to reject any remaining devices. You can use the :command:`vgs -vvvv` command to test filters. .. warning:: If your storage nodes use LVM on the operating system disk, you must also add the associated device to the filter. For example, if the ``/dev/sda`` device contains the operating system: .. ignore_path /etc/lvm/lvm.conf .. code-block:: ini filter = [ "a/sda/", "a/sdb/", "r/.*/"] .. end Similarly, if your compute nodes use LVM on the operating system disk, you must also modify the filter in the ``/etc/lvm/lvm.conf`` file on those nodes to include only the operating system disk. For example, if the ``/dev/sda`` device contains the operating system: .. path /etc/openstack-dashboard/local_settings.py .. code-block:: ini filter = [ "a/sda/", "r/.*/"] .. end Install and configure components -------------------------------- #. Install the packages: .. code-block:: console # apt install cinder-volume tgt .. end #. Edit the ``/etc/cinder/cinder.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/cinder/cinder.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder .. end Replace ``CINDER_DBPASS`` with the password you chose for the Block Storage database. * In the ``[DEFAULT]`` section, configure ``RabbitMQ`` message queue access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... transport_url = rabbit://openstack:RABBIT_PASS@controller .. end Replace ``RABBIT_PASS`` with the password you chose for the ``openstack`` account in ``RabbitMQ``. * In the ``[DEFAULT]`` and ``[keystone_authtoken]`` sections, configure Identity service access: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... auth_strategy = keystone [keystone_authtoken] # ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = cinder password = CINDER_PASS .. end Replace ``CINDER_PASS`` with the password you chose for the ``cinder`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[keystone_authtoken]`` section. * In the ``[DEFAULT]`` section, configure the ``my_ip`` option: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS .. end Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network interface on your storage node, typically 10.0.0.41 for the first node in the `example architecture `_. * In the ``[lvm]`` section, configure the LVM back end with the LVM driver, ``cinder-volumes`` volume group, iSCSI protocol, and appropriate iSCSI service: .. path /etc/cinder/cinder.conf .. code-block:: ini [lvm] # ... volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes target_protocol = iscsi target_helper = tgtadm .. end * In the ``[DEFAULT]`` section, enable the LVM back end: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... enabled_backends = lvm .. end .. note:: Back-end names are arbitrary. As an example, this guide uses the name of the driver as the name of the back end. * In the ``[DEFAULT]`` section, configure the location of the Image service API: .. path /etc/cinder/cinder.conf .. code-block:: ini [DEFAULT] # ... glance_api_servers = http://controller:9292 .. end * In the ``[oslo_concurrency]`` section, configure the lock path: .. path /etc/cinder/cinder.conf .. code-block:: ini [oslo_concurrency] # ... lock_path = /var/lib/cinder/tmp .. end #. Create the ``/etc/tgt/conf.d/cinder.conf`` file with the following data: .. note:: Perform this step only when using tgt target. .. code-block:: shell include /var/lib/cinder/volumes/* .. end Finalize installation --------------------- #. Restart the Block Storage volume service including its dependencies: .. code-block:: console # service tgt restart # service cinder-volume restart .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-storage-install-windows.rst0000664000175000017500000001153700000000000025405 0ustar00zuulzuul00000000000000.. _cinder_storage_install_windows: Install and configure a storage node ==================================== Prerequisites ~~~~~~~~~~~~~ The following Windows versions are officially supported by Cinder: * ``Windows Server 2012`` * ``Windows Server 2012 R2`` * ``Windows Server 2016`` The OpenStack Cinder Volume MSI installer is the recommended deployment tool for Cinder on Windows. You can find it at https://cloudbase.it/openstack-windows-storage/#download. It installs an independent Python environment, in order to avoid conflicts with existing applications. It can dynamically generate a ``cinder.conf`` file based on the parameters you provide. The OpenStack Cinder Volume MSI installer can be deployed in a fully automated way using Puppet, Chef, SaltStack, Ansible, Juju, DSC, Windows Group Policies or any other automated configuration framework. Configure NTP ------------- Network time services must be configured to ensure proper operation of the OpenStack nodes. To set network time on your Windows host you must run the following commands: .. code-block:: bat net stop w32time w32tm /config /manualpeerlist:pool.ntp.org,0x8 /syncfromflags:MANUAL net start w32time Keep in mind that the node will have to be time synchronized with the other nodes of your OpenStack environment, so it is important to use the same NTP server. .. note:: In case of an Active Directory environment, you may do this only for the AD Domain Controller. .. end Install and configure components ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The MSI may be run in the following modes: Graphical mode -------------- The installer will walk you through the commonly used cinder options, automatically generating a config file based on your input. You may run the following in order to run the installer in graphical mode, also specifying a log file. Please use the installer full path. .. code-block:: powershell msiexec /i CinderVolumeSetup.msi /l*v msi_log.txt .. end Unattended mode --------------- The installer will deploy Cinder, taking care of required Windows services and features. A minimal sample config file will be generated and need to be updated accordingly. Run the following in order to install Cinder in unattended mode, enabling the iSCSI and SMB volume drivers. .. code-block:: powershell msiexec /i CinderVolumeSetup.msi /qn /l*v msi_log.txt ` ADDLOCAL="iscsiDriver,smbDriver" .. end By default, Cinder will be installed at ``%ProgramFiles%\Cloudbase Solutions\OpenStack``. You may choose a different install directory by using the ``INSTALLDIR`` argument, as following: .. code-block:: powershell msiexec /i CinderVolumeSetup.msi /qn /l*v msi_log.txt ` ADDLOCAL="iscsiDriver,smbDriver" ` INSTALLDIR="C:\cinder" .. end The installer will generate a Windows service, called ``cinder-volume``. .. note:: Previous MSI releases may use a separate service per volume backend (e.g. cinder-volume-smb). You may double check the cinder services along with their executable paths by running the following: .. code-block:: powershell get-service cinder-volume* sc.exe qc cinder-volume-smb .. end Note that ``sc`` is also an alias for ``Set-Content``. To use the service control utility, you have to explicitly call ``sc.exe``. .. end Configuring Cinder ------------------ If you've run the installer in graphical mode, you may skip this part as the MSI already took care of generating the configuration files. The Cinder Volume Windows service configured by the MSI expects the cinder config file to reside at:: %INSTALLDIR%\etc\cinder.conf You may use the following config sample, updating fields appropriately. .. code-block:: ini [DEFAULT] my_ip = MANAGEMENT_INTERFACE_IP_ADDRESS auth_strategy = keystone transport_url = rabbit://RABBIT_USER:RABBIT_PASS@controller:5672 glance_api_servers = http://controller/image sql_connection = mysql+pymysql://cinder:CINDER_DBPASS@controller/cinder image_conversion_dir = C:\OpenStack\ImageConversionDir\ lock_path = C:\OpenStack\Lock\ log_dir = C:\OpenStack\Log\ log_file = cinder-volume.log [coordination] backend_url = file:///C:/OpenStack/Lock/ [key_manager] api_class = cinder.keymgr.conf_key_mgr.ConfKeyManager .. end .. note:: The above sample doesn't configure any Cinder Volume driver. To do so, follow the configuration guide for the driver of choice, appending driver specific config options. .. end Currently supported drivers on Windows: * :ref:`windows_smb_volume_driver` * :ref:`windows_iscsi_volume_driver` Finalize installation ~~~~~~~~~~~~~~~~~~~~~ #. Restart the Cinder Volume service: .. code-block:: powershell Restart-Service cinder-volume .. end #. Ensure that the Cinder Volume service is running: .. code-block:: powershell Get-Service cinder-volume .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/cinder-verify.rst0000664000175000017500000000215700000000000022107 0ustar00zuulzuul00000000000000.. _cinder-verify: Verify Cinder operation ~~~~~~~~~~~~~~~~~~~~~~~ Verify operation of the Block Storage service. .. note:: Perform these commands on the controller node. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc .. end #. List service components to verify successful launch of each process: .. code-block:: console $ openstack volume service list +------------------+------------+------+---------+-------+----------------------------+ | Binary | Host | Zone | Status | State | Updated_at | +------------------+------------+------+---------+-------+----------------------------+ | cinder-scheduler | controller | nova | enabled | up | 2016-09-30T02:27:41.000000 | | cinder-volume | block@lvm | nova | enabled | up | 2016-09-30T02:27:46.000000 | | cinder-backup | controller | nova | enabled | up | 2016-09-30T02:27:41.000000 | +------------------+------------+------+---------+-------+----------------------------+ .. end ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.479122 cinder-27.0.0/doc/source/install/figures/0000775000175000017500000000000000000000000020246 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/figures/hwreqs.png0000664000175000017500000026132200000000000022273 0ustar00zuulzuul00000000000000PNG  IHDRvFNsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx]E֮sTL gTs8쬎,άb8|FTQ HJ%g6LgvfvR{^U#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0#0@( #A*ܕCv˖W ތ#0@D(f&mX&,O8H2.q60ZDP@D;0@XbF`@@Q Ey E,[oMSܕhV2Gb@07s\M?^WꝹT2e# e_FH2sVq]_[oXv?wc) i,f5Bh 5;hczF><\a=fδSٳg[oxxwoG􋗮HtYnR.v:ggE t>}w#m3`nNe4M E;+HwKJ;R~0!g2#kpFYr(D>Q^M6ʥ.G=ky=#ӅìekǾ)OZLGb!,ߕ '^S4mGނf#n[E(8^878oU:x ~[}c)jU Ct\BbRZ\p ]C9@K<\p*B;^S'N,өY%sauy驺.ߺe̺dY?U|/ҟS |("z 5&ؑ"s<н eڢئ(=fb2GTz-&q7^'M=Vռ"gn J. d?'-6' 9O~\ALcdfOs"%۳MӄrIOԣ_a 0'7yBރnESRf:'*i0^c]Mj P 7#AKkW޷سf8o"(>+caa%|>'=aSzk`ԕ<Eb+--' ˎ`Ggǘs`LG/Ҕ\Mt>ݣy *Dn{U{.|XQpRЫ{bT:}{ 2&(Y$,-џ2PIiFEcm|oVH,s ˫(Pt'*wpT(ɓq_2.uNJ +촪(Cul"VKgф.oAcBU-x|Iv}BNq1y[=|á'7PS<,65^A;J^M K YHzYizP0h16×J j1^S Tߠ>#zuV@9+ j/zy>nl?IRiVIS5$LS[5]v-wŎ'qy7gnZmϷ 0rۃU364@e0)BgS4gC3R}1,3s'œ2*t^Q-,t!t4~g9XIq@J[+Ii8FZiv.pN ùP>@qfOzlaeFu>DlV|dabPWƧׂBE|C`I9Od ݫF%B ]3i,^( fC /vI +1ԢX.$'96J%D&)@]ӴkL~OMXB=T?q.WEyڨ$w޹N"5Z”d"Y_/#"Шct"2: :CcwKu b7@i QPzF(gwe:,C|6^-=k^$SIIgXsAVEEK4U5@Q61'R>}GMwNØ.w޵jq ~YQvJ :"ԨB h%mGOOVB9EX$O"_z<yBvk00ѠDR=|+]̸Y۬=G}+vL8|}X0#`lza2#-+gE/R?km:Xoh,r2wY.b>h}Yޓ PoTA2}rlRxɓbD"ʹc@ǂHI Hv0P8Mt-G"ܼ1jO:e#OVp`5:xI>պH·>t^{-h~N+<@?x +C'=1GCC ,`,9*> i$^܍g^dmH'E:'z`~T1ԋ[`1+7Oyd$R\~0 9r@J{zu|deJ6h5>]7RJY^vU`E|2uC54ȞfP+aWś,ocֈjq&ٶ(9m #wH~ˣ?>O2[y &&GS,7rG(i F$|L_+hc$Igewf+ŢlBՏY!$`%94*#5wcmݓ RL7U4o{ĥߒ*|xE*J 7&3?N`"#rd|-#lyJL7QhjLaJt=Bs:b{as2%Ok)]U.(g}ffJV}kBn[oFm=m-͆c1҇ 揽6WL4 vH?<߃GJ")^J7vf0L| Rٱ֣:0`E92>hTvN&V}c0+/gU@a1Pi>p* ưX)JٶGglFl٬_cy-ӬkC`3 v,)_Cv3-}Pğž&?]!`n Β0;3hc#c0p PLh@Hk,c!rʠ#`Ք*+{tL#ś;_@5ә:Jiyp)Mke}fU9E0︍f2d3G$Y(mj+fڴ2_ynچM>'rC`C93lY'jLvcYEu>Bmг̽p ӓsoW;/wCIWSyp=88uMp$ðOYQt䯡޳#(+o˕CT.(r hZ%ā#ʬ{$Bm6GxŰ9>}bZi7L^̊*3i(8OC/:2dTʐ>š]<$ww$=O`@ mcon뉵8i )&נGRn(,c[}޷q91p!8xݳo} .np[|^g?> Jsxw﬊Sx3$,+0h ;#G7IFj#/lE/pF]섣Z@θ??0K gTw<1'``T)lʣ F irҠ#@\(HB[i@;Jϝ|q䭶(^YK}_;|yÄ@;X֋Xڻ SQ~d,; |f=;SJ%w~-6=pĤ;_`H j;ݢFxw$ܛZA.a8y-_doӱfg*΃}KYS ?-L "lO,ϼaۻ߂w`wr]1oxpo(HK'dM\zű :Իq{ 2 FAjSN.uLn"eF i`0ˎ`trf8oߒ2>Ǟ{fuƓϒO.嫛_T~fӺ)K?لP,¶!}7i{#7һMa<߁P/ vW)=ZJu3s&aڐ?ɓ]I e=]ŧz E%%GJ ܑ))3cA,Hd/lv!~/[,M4ig|dr,Fi pʹimJ M.TtxPK]E,Fk d*(0M܂~u]O}sD`5RGЗeEѮWOE E5U<(|d1MB^rb;MUXNS4ӘH6fܘvF  (cvy򽺴B2;o[cXINNSDCw<_D!IE9YNS2Hv)e`D`6- _ZZG'g\MSާN"[b "8F"FJs!6ir+vR)T؂KNVYTcm`Fܳ)g'Y$'eFYdn!/g%9Yt@nP_U'OI/:i! Q:{ͫ=2w#DyMd*ɔmReB_RgAe #`@xɑ[]nYN$Hޅ,LɬU͑\fF ^(pq61B#'S`95VN `\RF sHXQυ&Or)cAtJ",Ij%AMmv $ڵM*aENܣiD@sG$ˤr4(9fY2UO/KF !p % +H7̛' ˑIƃ4ɀrreTMsc y˥f6ډK>{7W&R3G2EDr4r4(9!FsCɗQ+…~F Al#dDŠ2 b_p|F!"y"Jc9M#s / +#$=Hu&('J@Op+yt-XFp8H:JMX^*|Jʯi~)A)kť„m>f"pV.[o23/N;L(tZYە3'}95;a1deCtd9={l[NEM6^47orc NnR"K*HmQH=ﰇkMpEc*LMݮ3q-ó'`fqaixAzbxxE`BIIWJA vTsmO8YQl\j)2?7Rjq:/Mfn踖`aX7ns 3gڵu[jl,-q$լ7?eq@~ d_g8xg_|a\~o i +mJ!Em2+գΘGm1C[v|&劫ZE;MceFrkyYbpMt,8@*/h,QsA#ұ]<ݷǒ^7GT+NAyQ& 2M.rǬ)(B¹ky :Sv `n^P@֪uvnfڴ}nhn},W|=+"F&(Vt4*ɄQ}cīAqA>u?0U+֎Eƣa>p# ~.PE(4nW!=Eî6p*}ZOA hxtafs.-Mr4t܇`Wзgf;fHζ,"m,Y*c'.GÅrk`˧EQн\4e5 PeiPNPEv'&߱՘t ~ ~RJfɰ姚罈7no{-ߪy|K3І4:a}B :~&Sr,|g~觿'1|No}AUh/<1DQtM]mf. \ ehB^(-·#~F9U=_ai,5 D[VcO+>ɓP<4rV0i'יNJ_gDx< Б 2J]_ rX;AJ #5/PEk-ؼ3þG wgsL~čW8vT( ^=]W=-tmi{q՛4=Zl)rwt^u Vxtzy0;R]꙾ ňN[~|iԇaP(C=XC)Dky0)c脵Pt7Rh#e?\,D>q7ws~Nݺ&t7wcG6aߢR{z .LF2Cn&(^9Ȇnis4Rq ޥ. cg=w:2 3]E 8Wk,W ogɑ.I8q2mT>vm8|㢉"Fp>TN4@ou٬m>©Xxi dBvA}Cc)g[ZBɚݹU_ծiLO^Vf"ΦNQ~n(YB,6҆ay躩Sӡ:Qb&`ml.F?QL /^>ڮ)[?lgڹzz1A~NENrmh'|z$ڟSgh1+.ZG]e+uzEts ],bF?%rhmk1DRm10`&,z2{l64@,uBPB`Eyy.P^C6yMVQHuHX ER8(`/Ke2@:zC~Ip:hA!|L?z,/#=#gKG3*HyTLWtO7jVs.m]論CvaJU8]Gs9O5X ̞Nle NO6aP^O i@=*RIf&cIզB=]+%$<4{GA~XYt:UR"m*qEQXU5#]})(ӗ'}Oܦ[}s#A7O *bt4kJ>+I&(l;b'd5lͭ}A+k~7\WꜼ̸ŝ#=&v Ki?MѱۙdO,~LtFBz/bG%BĢ~ QE=WVe1Œ[M7 BжN+as[Y^N_lDz#[m_X(f"j.X,:, *$ N សv>{+wyt g)w_a#3E, гPV4$m *Zwh H rLAb.hyv(jOWzo,?5F?I-8IW$|W2RO.#四kR;靆1ϝ1a›(`Ka/jLW;׀I.1? ^.yH]YDZ fhKz*S|+٘Iܙ.dzo<]a} ODspqǢ#Ć<~hk+.,ФY(<y3@<,<Kd{ *YfO,lSb*lsL؞lo<6ϡWӺ}q1@p)h#,lſ gAWHߡQɘHнm'jo[ъT uZ%O^iQ] /]y=13RlQ4ݎ{J04$4taC;)\"KzJGޣ Z =0(|-m7az K1M_C4> }#9.Ƅ>$wg'e˷\'VeVLt>Sh+pN29yl!ØD1;ЦBmcPw JCD ]O[iVz/BbXRO~\1ϫ/(q6/Qݭvj)wx=;E(wBIV%fOz5ٔeϾ7`[?p$>"*eQYUC6Ca:p+iܸg3hzy>ثmw[h ̰'ꍰÂu pIt*A8%2k3(Wfz< adѴAf@,^wOD&@^TH1_+0Y:H?= L9 *=lWåv7 'v֙^q]6p׻ۭa3 NٯH Ie'o;(VдCq (`5q70g(0DsPקca⧜i`#(Iޗz Pzq -_Z,DK.~ݧ,tgZS;܄Ix6"'.6f/8Z$ѩmt`=O'=دdOv tz}m=D,䬷PިRƸ@:KƑxdS_0خFXk ɑ5lV!-ӡ1$& eI5v(ŶXRNXU7AvDg K& "L}Xă-l`G>:_j l#dd`<2 K''YjaZ(彜Q޽s->nrL E$p ˼ݤ4eB)sxmUlo]C?Pi(x~"מ_GKa$ߢC {mvY˛t(jGt pkA6zr y7[O^m"x.Au{OF&rbB>LC:gNPQ}@`*k  Ou-&Ͻj;tv[;Wfgi1kRŭ~po`(B gcep>S}B<+ !B]ݱQ"ܽ;/Py6ziE<|%FKSfOE!4mf2](g 5̫c@?$sD QRQOqN">v e!ڱ7$tl'iVYd|m D?6u6[ >)c.},uWaw,.T=*ꄋ6xӫHnYPo9F{z'ty}9b;$ ,]`ʵͦpO;oKJм4wx &&δDYG4^2h_11DUޕn?uMvF`] Oȓ>g$(=xď:oe%y 0nFgwa{9c̓ف hX-ykC A5E:<(`ǻp??ṭ%-Wc)'0,/ h\e: ߥ~&,38޴۬Cy|9i؇U!>a,}   36pɼBV=WcP @|mN ud4]xk]=,_@o{gb$lw)ǢSy䠬K+J[ d]^B[[( lهv(*Kp>li:v pkbaB*Tb9Z_k%BAnEkt}ݮ< ; `Wø4d\tۑYM7dH}l,_fSSP֎ )~a}>?! [ܙ?s0g01ll6j}YS40^^ˍ[$Z6lqN7;h5&'YqVlY=!+ab)'}o{F"&^3dʌ4?VwӲ5OM=R|:ڛUefoH{[EphGu릒UVkuW4~{d]ƓaX\]`m [Q}N|ﶆV8^ŖVWUVpEg: ]1KȔri"%+nu:zlnߦd%$6I蟡]C}l, tVEN7T{/Eh|_ 6ӮsMɞA 1)Z<c6pN(f(+&3if*~eT{ Ɉ6j̒'t캂]W5H͐xLG3ߊ~hi]f|} 9UOR>: v*1>˓p2dMBedF!0sĢi'i["1#uƓVǞ-9+˲֬uQƁBȌ#Д[(I_Ҕ ea P5Ů)!f2Z5ڝ(XQN=0#0#qn1Uɮpc!D㸌#0@X8%F#hIqH3g`FHG@cc BI̗A83)#0#0 VØ\F`F` rjp\F`F`l2#4M&:gf)T@Д)Se9y=58Gwtu^}a#yKb^7>^3 TMq c2ta%g%qߦSQJAS.yp[}bE(-.U#)bPPz=[gҬۊlc!⽉o&Z6R&[>⛦<^|tڒ:2y]V#5A5Q9$)iPo|(Prl5G 6'7zUպЩX-r6=P\vTj{+-2ȵ>Z[ɗ^z AHJ\zmY[^o[ofj;oUO,x s&/üB(kn_x&U˕4-#YNG#2ENS(+d۪MۧUM:_wmA!tWr}su࠲FL|^>movz5kR;˫ݘojoF}]]ʓ!=A⟟(nAT^sgM(eJ*irP΄64w^>JxY,~ݷ4 \StQ?-:ȍ|N1sb*PXЪ f^sd#ԾJRbƂV]X#&3ȻXTu&ӉjP9ι4$eK,{Q#/jw٭NERl(_[l7TB*:M5 =y5Ő~i8d7:m2ii(wQ˩tf+ʔ>ݫ׀xGeg.Fjyc\o12֭cA#0*0Ĩ)5 r$ih_W*~^`9ݏEw,qCS3;jJf=SsQ^=J#v#@]-v~ķ?z?s&fOf͕y JQI!u$n_K4ARܾFCRp`D|f9 95Yc5O۷ք]~cvN H5kv^|_2kɅYGse^GRaRkn_G,$RоB r*HU64 NB.UrjLC&wYG@-ȟ]uF )9ٸ)[ QUf^'kgȐf} @>rj a9m ~=t@IDATJ95KQtI/c0쓜& 7Ή  ϼ-Of_7aogQSKg|#y4Lal.cxM*oYpMԅMU@ypOVXU]*D䝉kX:&:O_%!+V*I^&]NCAC%oC֝m[.jrA}5e[VV!_(ϼ5_ypqؑ!ٰyϜ,C).=e7 dN0D;d=}XzS>8W,#zvn/=T%~:ŦtԮsAaźY, pE:c=|< _(Xv^ug'(XlzXyYW`_zoXrp{T1wqQEOmٱWbMe~2쫯'Rb=aI_4HggHw涯a)S5QӠ@!A4 Ivi„wȄT4Lkʷ8,A|w{_~Emeg] =䴴Akvfh}$r$۴'9_,>XclܶKNrZoܷصRQMtj* BA %|tI 3>9v; {]KƉ[D//74JۭXnS8|IxZ A':(tҪ氟V^]iK3= `2|x_ OMd/גXmݦ˟~zLͼ% myR޾yY=4 ܺE.Fm6/<ǃ1`Ц66(}lK) -AVkSMn_=#H8B!c4T-_gbbnٚUecЎm[ Z47vԟL m)r7g2BI^qzyj4r^Saq3Wj@%ɚQ/M|k+\s0Z=ط,4bpw8CIs:<3.$萨8Kd\4OFz1q,^J7<{*u}E~[+^H.ӟ9_.U&ͻ&f/`b`VѮUYI^:+dV#u^oqpf;2\ 浬3tMi- O wMy># COc`>qx"-_S!'^D$2֒. *ѩ]˔_n2NNc= hC8MFN]sW ]mR]6m\gf}!~\OjqfRIN':]#^6x7FLb7CQ%NWb-{֎܅cROdsgڪsFA\Ӌ&Hi8|: 35Ӟ{WL܀-!*VVo n& 𙔭͘}#^x_=OGsI絑gu sSp0}_}p 8HM0?5{.Bp)tIԁoڶ[_U}):X(9\$ڈw:L!jb)GrtnCM~ᚖ1P$D}7v*}NI iegwh?|q&Y^2TEgdc(SDtHc-AiPɑ rB|ǥ3Xg1b,H3jh&%{؊l.WU$7SdGG+QEœw]]irߧ1cB 8damLG~Yv-y0ܥSFAvrOiHl>咢egqRw|aZ!ʶ gkHvqTӱ?8h>EnB(#i3HYJtdf2_~/îB;sƛJSTzArI2TOOfXZcEKD׿Ox0CʱTh[q!:ZH&+Ad \:N?4{t)vK9%s aSo bYHL4)PDr}حE4dB|zx-)$oیt];b_>@;.;GG ΐ3xmF߳Xb}Pr `lHp9Z,6CEPMȶrۮ}M svwDȴ~OF) 2#ͨϧkPϾ}ũnw=E/O(Zaf˄يFu? VW~8Qxy*Gc$Kbq/;IUxQWreۘ3@rh1k<[8RcE98 #0#l&:O4g>Mvڜ#Д(3+@`F`D@G]('%GgJK@di0#0"hY@cF1sigF`F0 f(Ӗ'W7p)$tiᥱ$,F4=mHrʮ>bw~}: [wS{GG:ئ%N>Ŀ,hбGGNizq۬M\>ؾk/NX8+OG ؎kŌ>qŧgN#Ʊ Y?Vz5p{\{՟FZ|~? sq"JJ KCPk8, ']'Ł:0DbE|j(MG(Ĵ}Wp4F1o7?{goҊf<89q3==i'nQux˼3{}OߴΙ S XN/=f!p$w{.&I_~? Wg␙1HpYɊ#'N]aFm(l3U.2$lپ[-:%s{+sok6n[`qۄf̴iT4@,qs:r:yj%tXZL\sɘUbOٓxTj~n!o%sR(UUmUw #)}e $Óѩ(FDu[SUm_gZ.QQ ,OFҶxJ]D ȃQZ-(y dIt*7S'2Cn0iM8ȃšmZGrit)u:W»ʩF{r-Rчk.r: Id6 Zヺ.ޝpR `R-r}_H?:_jݘxZ}:[X &zlEY0kkv]E}|} ڥtY' M<FsVFSy=ff[7*W/,:~@`w!Yo`E3#/>54lT,?ÈX.DGL 񭶦fW]#b.ݼ&[f>1&[势9J>m?`ޱO.i2Jio_ MUNn_82hsӡPlIY~G_0}u/z'xW?VyPWH;f 3GM&cq`]* *Y6ԀK%@غFI9vر? ~Xo Fl7n%h1Gv;(>WQe#MtSNV~z~f,#9[{19^auNB_W} Acq[p>W)|#y@~0ܧt=uw#{Wj1E*E?_'h쏿Szum0LQھkjrjbj-~$x2Mf9 ^)2BL޺sԩW8^t6tp}g0 ,OmlD9Ք~ :3e@Bu+Q7o=D%=u1 {6pѸ3%x% u-K# - FA?=J?|!NO  fl@ѧ{W%H@.6ҝ{5qkǶyb򓯉ŠԷ/d^pމn Χ?/cm)g"|Oa*D$ʼnNv ?qW6K8K̓LK |KFҙͿXxe t:y33t➌%Ll_!3&Fde4"\L(S,څ y*d;.A TcO0| ?(ʏHQ'9@xڂEo8q5 d|ϼ6}xȥzk|w)h_sn/0mgȝ4q?IޥRNPi$Ear{|.;]n߲}p-A4K84|q:7$HdH._,T2cA+)u@Q% w&ѐ&&䵉mr@!il C.9thڈ ipTIhfΨTWϿo۩۸xCh9JIgRВFPT9ߡRzs >x M3L?ʯUv#+ol.}?b$f/z"}u/X6żOP~V|g]טiD贈KS& 6=Vp!$k TP=F&::(Pu Y 35JA~y{<#я!tϹM^u|mr\|3f9mPK4IȟnW [+whsׁo>+75tJ2p}& < 0A,A4LS'f+DQ"xJqVR*M`|/OؑB-T'yF{rg2>J+K=ڇ]ZyoHc):p2fJQ̂+ THʲB*MQlGbr,Mctu}LOy&9Txe*ϙgglsHaRbLWyߜdr`ӳĊ3Ii, ::4>kYYheƊ44.웑>6kYY}caNj>&4MtFzVmVo)t:e#_S2d^'Ԧ]vŢ :B9Z2t"y>]< $JSφ"""!R}fnnڽ0;~gΝsϜ{mAgc#=wߴ;v G{ރײ{nEFɦGYH*>*/iNk9z%vUj|H{#+.h]dɧ^ɦEz]iBiACfזC|rxbye6]~~/Uj O+q {Wk&v8x*]| 8/f)#S'<9·[ITB|_:œ_V> x{^b,S QFڳ0 ^h$?زI>2/iL`nK> -/4>$N2tfžP*s6)%>}ov&UHy*I/ w>ebWqKRHZ`U_TD%|*|Z1FRкdɧ(UFCPv?>VkD_XVht5NIqFq\->'7nA@|,E7!I:2Aݭ\\ʣ܍ɧ%X|%4d*=c0|:G䋚eS",>1)itTښ:ď8Zc,)jc=nKϩӍ|xV9Rӫ,iJ5Ir| .$i{s$xxORNɒOCL:ŧ+ςԚ 87 H ֲj8¿rz;D SI7cbCJ%D~4Z:HZ5:$Jk(M| j~H>NM>A\&5hnJ?ɕvmwr9 Nđx ChaK˯L_A#䕒V k_0TZ'`A]`LFH>?>Vu@|ݪ7<г@鈀>FOъD?7Ġ8&)>Vf"(~;wiZ;/[7,Vl^tm+BE֩1˹bWc7\W :W,?%Dl}PpOJa_Z'eҮG-:v(2ggB׎TM` ŷ lvCthT\:o8.G>F7A6QPnyM?p(7/<1Gq0~X* k~8[ 4+4D4owO_љ³] UO[I>վFE1O HHrDnJ-(?ZjLZoxX+4R+{=.6v;Mt6CuմBRKgA[%h1ng5,30?\߉sgC޾mï'$:(emnH@!$_ \MeCF2'x^I`QZg>.}hXiIش렠a6 /;G{z {vd?˝7aB[_v^/+Qu أn1>]5 ,wDB4C">w|ՕO#4zBz)"yx7#1&§ޭz%FO1O HPRT rVϲyXU*^n&GNdD-p䒗4w ΜɧQrv)WNiBpXXy8t<[,YyW A "Ƞѯ|#ˍhYwمgNjB#Fϳ#F{t "_n:I1cbgR}jjɳ gwi})`L嗥%kwi+yHF34(kH Sȍ>HUO[v|QZ?Y3Y%HAp7}\< 67cBhZ\ ~!{?%s8b_YDʽW ~eGDvNl.DQXq1UvA/˄ю",d)بW2Ss#Lz#Nk8Q%!&E&$׽ϏRsX&6=qRٴ5W>Eӹ?-p[/;WL>u ܂RQ?"3ҺGxLO[9ɧKhV 4DOKV/(z [~@%S@[GP@Rҗ I Ɖ\]<^Cu,`x[EDe47UdFge9ZYnDs@u]KuHKu>Nk%M'x{|pͼ!M35չ\'YQtyg?/.U/MBϹ6ɚ428Mk`1n|5X蘾w+*(hZrF!iTX^z0(Fļ_761/rQ4Հ41/~۸(ia˶'!_]^ $C'o9@WNRĻRꦀp8I`0.M8L&IEt$ϒ_oߧE|k1{)=;Bf|*X{Yr`0|`ldJi?^ne%jYS#(剷UGBPf#6<$v?C7.@Mdnovb-[ϰ=ϲ Wʹ"+;,$uvN⅕hn(07mP\DxbmT(Өˡ]%k҆gy~i5_WFN.mkg鞍4{:,m-wOL*cޫSkѺY2p.ͧ8imr&ru糖huFug9zFвjF ѩBO{m|?^n["jA:=j;f,5n#ez+j^癚{)#/.]KB3_&7]:@-f'6\{L3-5P\c)oꭂ_JtZG6DZC.O&z>UPQњv_7Xsmʎa (Ցp+ H|DFz(#j*(ˠW))wkW : }Z;.lcVzZd./s\i;Ge ѯX)]S.}YsGS g:לq2/<4uXkC# 4ywkExޗ|Fո%KUUN+Jҵjh6I-C#P|Q(QVHl{*f\QG֑,`dq_l_%L-|Ѫ^#H$D@" HAjEJ" H$D DF?_٢azL^H$D@" H".&*Bvh8uIA9d^D@" R㊒%Ij9jkH!M n.dxCpm7oe\d|\kJOHteSieSj_ v{!HhѸ򂳴gmn[U :#9:kďģ&B|=g$ü[C5_g`{kOC@qXV$Vu n?>=|l&} T 8&&?65}X_8>uC]T[AL,^]6r7JѸA]՝I^TKeu7f#Pj ʻ5u$ѽ}Kк-?qt[_3;|8x/w P܆鹅h a,մM1H׻ܷpذ#S'ͭS/'3"1h?bv ..nk ho˱us|e. !H>.ޕ]ʦ@hәWevj,ZM:\ѳSߧ7gqtB˽ =w嗿}:q0SYf_ϻo!nǻ<:2|Gݥx/>za;G{_iX/QtlD$cڻ@\ܯp\fc0DVMĿ!sŌy+ĕu]ۉ\F|2_j5 F\v u&EKEZ;=Dێ_i'Ol ݊i/Y$FߪV(2:u|n_|ګSk]l)P}OO}|}œ fNFO6 28 [ 'O nAٝh|9Z x tWb~f# AYu8v;yбuS>u L.1kկ Y%'%`J.%m{oOP|U狄X_P|wm`Hnq$ѫckmpش|C3fŜujt!v30GguI \|֠S _hW].tT]9F_}9.qÐUnwp]Ck3F_C}jFz|KURyxǸh)rGP@}Z'!.)dܰ3S{_o`'v~ e"Сu_)`ӵ- GVbݾ 96ѵh_iU}O3SG(ӋY's(*q=?:C?/Z'$Ɖa( +Z7/,3z}šdro.~s (3luPc~΂@|,f"Fa5̲e9 G"Azh30LSY7H‚ʪWumh:R㫯g§_⨨~WՕOgwkZL3L!hXsȗi\kEP6BƤ(P ʧ@kMYK\q>FZPִf9GN(4]hdp{4o!-_: !‡^6#>'5O#=mubMZߖmpwzܺNb˞C"挏6L_ @밧'P06)ȷ ^_8Ui4psׅuϖMkaf\G.gdzsĺӚFc;=CP{:u96jƸ([8;'f >g-^[*wg ӎz؊|8oD45hOlс8{v-DٞXUV&7n6o_,YCeg,j4=&onT먶Һii_}kFp| JS.B{'R{m\?7B3Dúu:$L Scp6h OiGsT*|>FT>@IDATBLfcݟzI;G{C>ŧ\ͬa`NXI[ 6z rj!4ezCݹ Y.6&dqE}:v-?%޲W]=^?aOQz8snv1f[3?X~9E0ۦe[xؼ⮺lh,p-|,ѼcfOv>Dr:K xG~/k+FnP$W/JQՙO#8#2i^|zQ^D[s^NAO{vl%?7|Qm9)AFvA ~}+횙WbR>T=kT|* k iU}VħmiN:me~;XHʬRxF[767lsP[D.e}&F{jZim> *h:Y-).<oF'!=JA'jO,ޣ947).,4*F>$dJݥ (W]ާ.&*V - HAf_dl~]IlIQ J詢LRdpO!YI*'@h+A3mi&n.+ ė8ZtzYX3n`R/OrF/<7?^I|ZO;k0Si Fx|& #9&S3I!.$-n׼۵wvuK%-BRZZ0C$e6I{ 1#Y3es{jghU}ѭ\q#~/y-+h#4+~j̃ktZvQ_}6=#m<$pӼ&M]mE_ˍVNn%E:ɧaK^M!|8qpU 筫߸F|%;@6jY7h~pqg,5s1&ė8o&7uYG5xmۺ3/)Mhii]f3US``#z. k(M.G2&OIHyvmޘi/,ȬӰYv:pC=зM{YCv dX'((,gŠC9ذnH߸;'CUt.KSΙ)VY3h 閃ŐOYu(E L^fr|uYڅ2FG˷\L91F":1T(G3d|ei"ͪCC FHa,t̤I;DqC84=-W4]pիv;䐄wjR|\/QZժ7]q:vmg;}`PAq4)D0](:\ǃB> uAY(#*"A/pX ԴU6uUI`d*Chxu>SJ hI:a.[y[ԫy3ָ{]ݶvǏ:{@2.yx]S ]HMVLM2{'Ao ǥwuaͧ,TҺbh uUBҬb YyT ;\#k9&i·uB]^ AyZ] 94%J[)WUf@0DPnimVc}͑3l(1ߞ Ze]Xk=oM>+Om2qX?V]?\H ְIpُ {ZXZXç,H:08æu`J, __tZY:>[,u]5}{㧂^m:vngۖZ|\#YjUY~"(l1ե {/e˺hVA1t 6ÃמBrmSBAA4˨I?PL?jɥPoRLR_]1w*SV"iiHCH1,EGQe #p-76lR?繃R9uß>+2:,؎EUimlN)i,N_ ( K/aˊpgs=aLtM(=j(R{v&bS!!>B]湲oCfs {fdv>eIk=DkE+iΐiV inpWhL#PgH탸a3GjMJTɆLkS ̾5]$I?JQT4d$އGu.M OK?=i$*YUD~в?1Z:f iɔVJ\bڌ!-o?y ˴h E_k6J= @N9b~;c\YYk{囶GU!T+AِB$D@" LUubT/ R'^R]_Biiye|SRW0JPM D@" T2S' Q0z0o ]S}eifLU珱Mj8f Yxpm?t,VRG@U6cYY*r2D@" H D8) 1J)fͲ1?VpVk,>i;UeC4N(i$nkйPz! DM SXs#nο?3< ~KA22^" H$QDOQlߪ M6Y~Smz0:uU0Ø,#ڴI%ھ}1.HZ̮争jr.{2 a5t++;G;&I:Oe9 8W;Yodq H$D RB]v ^)i.j3<^Z\.]; ۫ܶ Pm`vbeAxͶwAb}*_JQXY0O[ɬD@" @@Q7w`LAgbK]f?}hE?u:(DLlL %rϵD3Z)$xC7Jky߳{2^պF*_RP;S" H$%{A*=|$c49Ӈ(}-&6N־x枫D&dtw_i޾lLqC+Z),+E" ,P$D@" j} lEh ۥofIi(5El k'b{=4@Z)g"uOSPQ%D@"5"m)3Xps_P x }λ @R޿ 2K!qrr&mEķ{jÕgZ:E" H$F #m<"]O˟ק:gf5kSNݤ;yis ;y+v[;,XS?hZƜ5xpQڙL ʵD@" <G%x\󒂛M}Ѡ!Έw #ŽG>rGau_eyֽq1+D; eg)(CMfH$DbC[PVM q dR,]!ZĸRĨ:Jh 8k $ v_D@" H${vX5{Ӱ,~'=uo1"TQ}rbi` 8Tn)nVwĶ}EDt\&f_#N[/("{tQW?IoD\;z.]ڭĬĭ6KsVQ,y-kwQe۳CIB8♛Eg;glR\kI/;.H$U ѩJm -7|33e^*S BMGd{V7v8s-įK׋0 PjZݡSgwo">!ܢ {Ѡn8k[wMeW^wlTظ DEr:ycޝݵ_zkѾEcm; x\NniMA9l\VsFz(#)5F(ːH$@( j{lU暇x#)еY6zתl+CM3*YFh)4OzXz8uJ9&+ (3 OF:x Bov<_@ uD|l A9@.yEת5!kdg%D@"Pxǖ 266u fiK7M`>oipª-{MفaۤA]q,봻'K ևF'o+9MKۿ{{9v-q^#vҫ f>'3 E%Bv#?đxf? ٥"$D@"  $)UfZ5B c]8aOvvmBfvwpWѻskq_n!]޶.^@cfw.[G)a >խ7bҍ ם4o|&/eBry5k+e%D@"%,f wUfNd o.\)8{7־T.@م^>]ˉ\PʫDN5//},ycA^:.j\_x) z (FkQD^.Xp.}Q@CUo O+z޼Gئ%?hngvf;7k_'qgB~>3xCC RP9O" H$X6w ZX,<1_uae϶-6iU[-Gg+qVѠ^"L(0]my}:7"' N($ϝnrAB0!lt0ɴD@" ^`*E&5>wHYj鳍Ze&5K޹e_vpUudy3-36\k%fH*ဟdUEas5oӮclBB:,鐥v,,,8]{྽/[:?7 ( \v'8٘ʢ$D@"6S' a!S *\{#ﰉPv#{fddPÑ# G=uqAegš Ҕ_j C eBy ʺɅm25ʵ*'`z1*Kr8ɼD@" 0Uyr#hۛz` HN7ɠ 9 n \KAhq16vin3O.P]Bu$Y" H$ SWx"odj[l0&M>6- Qej=ڠY&. < #FaZe&vE " `%D@"  DK9a |;]_ }iv!M/JeO DX?!5L#dN py%D@"j|eA5>N¡(־S'<?YA1*xO2 _Ѕe ƺp ȵ8FpHeD@" T )Ƃ;a| 4Uudl@M5C-p\53Ob_ ]LTGG  ӧO7vY׽R`m{iј.~8j[Bk6=*7.DnVyKU#OZ\F <1O^qi$֊4<q&|Uq8u0~m4G>t z>":*k-c<܋pݨ@<52_L%>~}+lbf4ۃtSiarK!4&GϕI|>/.Z5(c(~xHt+isɽ /+㻑dLK4'<[xCН&X3_D5N c(H:v;>#ǧ!-/d<,(`yMnU/1|.pj,ʰ=Ǡvb$_y%>xέF@`(vU1VF{վBo>jcݱcOD2@C.x!.W7lMjt8`j!8 {yrnsfl*Q[vlKT!sNS'*JlQc##(Oظ W`rpħDV]v!K+EQ6vy2H$U% ]Ѥ*ҴٌQ)iTN=nѨz_[6_#ld>!f3h^-S>DgPIk@/+2*ӓ''eO2hFOVf&PqgwMʿ,11{V+1q8:DWs ;Ti/بY²Q~QDv:!ii<췧g?ֶ:$*,^|S-,_ !9]UL:ak%֫ ɨߺaY؜f$K!9xpRAZ4J&e:lQ }AU6kp;n4fsϧnd~D 2lY2 @ms @lӗojIJR9aT2SiGy$[>\Иp_IMrtLs}`bx^ 0eD (oyt3i4"2* @t.x+ʻcaңZn"%5nxԮrǽv֏OH a b^:~tď8ͦtSr.ʨUH#uÐ^4R|3M^$hZy.[#mBWHJ#@ \[:V  n6:u/]en]_F糌vngI\ H<-fEhqr F~&Yo9Y$, b4) »t.cJ"!)sK>H(^p;I I޴+Ss]ӹޝDՊ!M#`m1}RrnMMĐ~Űs{h3Z6.rJtIgXth[UaZ9k6 Bt Msnf\??-\#Nm6nX$jeg^~A+AA&iVݧ l22hϪ5ˎJ"f] UreaZY> H7˔Ax\nbaZOjz{uj%cYPB8ѬS'&Vlkw_=H\=lկ#'5ax!X=,ߴKB>Ec-jLQh:фOZ$5o(mn#K{EfPx"4o̧\ .mje8X!2D@"P*@ˁͰso02H$a" G-aV(Spӊ#n/8NM;|U8!Ndhc]5XDtYm&T7m$x4kTO?gՂ,5+6” (@yc^:e_.ĉ_9n\4gnp @tMAHLTrYČ{716X6{wam\o X%X׷ldz9rTp!4<<ӳnGwL,+,QS|3lԳ5Gi$Mr5OQ;s"P nd~컧2htaYW-}m~ _{}[/uEWK֋@^~OOpv6b [m .ڋEuᏸ9 { ̷tvѧ[;]6]#ba]Q]1)b_ж>Xxe!Bxx"pİ/"4WZ<_32J6WBH^ dtvpp| bàuSu, kLPz$xٓRd0~iR'ON܄t8m?kpM;܋ l}_{]%jRY5LA&dԎBzn[4~|?)7­m|9WB` c1G~9A[TWNDͦ~rr왻wY~JPÊE8g犕{ڔơDf kNx??$"W˞%]sƆۻpb3o|.~3shbpnߖqo} |q<~N>LsXMWDW^9.:X,fSMii2=l-Kk6zcg0{fٷN /cDV/Z^OI_7^Y;o={2y.#6 ,x Y׫J-c: cnD3+(׉R uh$ĊwU*59/ڤvsW7xK:|.Ύma\!Ku9s(gP^ :k 0v1ZC8",x6hF'}34qvco %1aӞytī$Iubyjm.xaz4Qf<`4QqZs^UJh益WE8jO,Ѕd/p!inؗ C| -,gf4O~"h$t sYG(LʍKוup[DWW|&I/#E% @nQEuFT'0I7 [=o )icoӳeҺsߞߊ~|c*9"`>Ԑj_TLch|DU,sU{ɈFE| 0i yvA1)>CQxNCSq;v&Θ_o[Z}?mṺͳny{t|)[irH M:՞OƇ aZFb^Z0 LhМhTU,ܞ}X9|`1_UG^?X EXxʣpKA5dy?MK($Dz ^8w/Z_ Ļ&<}!K`n_ɴxP\$!${f2 A|kizwaPK==j04=*z^OO{fTvxL.c90Y@J@Mo#{ Up}.kڎL8zg54pr7'{u8e/ Z?Y&=iψ#:ݧ+rGpw`xJ,SVv{Uu=_x$jQx4=]Ո#GqE> w%FӆmQLS$[hF\w?LTcŃ2['`L᠟.K,l>|ϊ ɛ-Mu}S-\C`4/_Jx44>/=:~D[Y̆0 z^GE C?WW>u`r:a>ƇG  u4h980LSQ@ UuQzQ2ڻd*i-Xd˖|a>u߻1@y&pmkWg,Tp%CE A lAI;UG[}%Zh;A᭬jƭ" B{&Ѓj9~ -{Fx2@y@FZԮb:vEu G 6Qi BQfZ-'E|6{y|q+pG3RR 't.4ߌ66d3./WbK7cecaWŋE"o_4, dv10M`CKk[b"|_5>3`LruR?5-'=Դq3G̓10uB{ϧ"E!6n~ͳRo~nA{= Ӑj冠%$dViO.A`Ygkǡ׿9η}*9BKf:h-Ӿ`4:S20/d6˻rB|̆ŌB$ױm XxYvni k"}isn|*IƘ匫[ 5AZ0|6LgarAZRQ #O^NMSxFj(ffFﯯQ[-4ۋ&\SKvkƉ\>r{ e7ܺiï{ہK2''%bXkvfdUo?4cF㔑uxWK A"UA:L Y0tk쳝 I }g`[*ilfN-m=K]+ju_b* '<~oZ[t+.m+vqn! ު8}`1^gE0-7;9|"e=z2je-Ѧ[nFi^hV[8JRM=\zK/M}鬂 Xl? lYT !>\jr~2rט  g[-3l= a1څDP6/uPPuz$OG-ncw:Irci(ƣ~uӌQ /> b *-rxD]Nآ(␟1P\(?fMI2!ezEGj2kQLc"s}Gz}5gQIpG!قC{r%ڧomڵKH IY1#] Lc=t~nɃ{wo۰|z0%-;\N\-(~*76s{ݜ(pp)ѶR1{)@~h!lHq|Ӟ{By >D>O.(=P)jh{*\=;>UCUu[R_&Kg)`h_< KhCX|  [0W|rQbQv:Ch~Br I/NmF8a9 ltL0e҂>r=.suS:U#^g&4ҏ3P@@)Qke_=~/.??pՂykpC<]e cyItA9 T e˴fDĠ5Zг흿VewNh>ƿ Xi6R7ָ<꾧k|tǕ` J62?5WJ1 ؃zy : xt 90\bjѪRa"ўOIU ťGK:1ӊM¥M8$HԸL-p3@$o?uؖm~G=y⟠[:_%<e'RPtSKmU|#})[gestuϧ.;ե4b O$$EEjwM^T>MYZMZc!ċaB0 KaBs_7x6+|fRՕO,1dL i?1!5<*8L6U^5(T^=)1TzXq4iqYk!PÍB2tAB)ē_Y+_5WL&(#:)UNvph8X]G^+й( =ϳSuݣ怇eu_ZVew;eM4TGyOfѶ4i Hl**a0l rTRF@8ۤ@AZ Uf`7A@IDAT|m00"˴~C #)~֡֯u2?ׂ>Bii!P^}=wIt65ЦdCSHWmdph T )1\< ݶᅌ&/ƓFb揟bZ]m.~~?sO`\,,~9p 7yM?^'X\N>ue§{^^t|[g\UQԐk4 TJQABM2^sxާVy0_5/ <󠖘B05{Q*5u!Yϋ^{|x5^Sc#4AYbz4>vKd)$){lq vGAnxQ.X,UAfc~j?aV ʏ6 E!c³kba%bmk*6))JhOnh!cPt_f?MP \[чB2sřUKxk4 гBaoÒh}\ibDLnƐ AtT_JR*=nX|W~3v&(]H@N</ieiuס> J[xT ՕO#l/ xN_v+Jzt%"pL*`pħ?^5c<=}6*7TvT?aMISL1xε!9wM?8H\qQVL\kEoCv(!a\@J96VqC<9?؅ޗJ3W;+MMLΣ1cS^: l3ZMIɄսo61yrfUp9qβ1ƣƧoUS̤unl~T\GBP.Şܵ;Az6?j+/+445aP Oɋ=GZn$]h>WOL G׹?޴=E`jz+̈_FLDQGZ[co(8 3F]$>fz*q1j/OxQĺu%א{T$Ⱥ&]*PvNpu2''[-|b172|jTFc;X(^]||hd0+3}Ahfh<몃^s5[&4| ~RL^ڗ0"a4Ư ش=nj'm|EvIQ>1_@5Jau :̳.0 -*=2Gpp85zB@A\Z eqfh`ЌYpvt,ɋ#I!9P+;)wЬAqDNPk1nq8Kx H13YLW,2W ((zf:ӥ~@&묉oZ(ELtdžuwл~Wʘ<ה|w>,&.6WyI1֠O@OtvsҲv#1M-nhkܽQ&J"\GO>'^<<Oha.x?[H>-I1qvxq\t.>.LbgwlcwZWu(^hoӝCr@8.5!AjMyhNzN,Rx Ն͛-4횜LkOآ++pL "a@ZUVH zZMm㶄%m遣=LN,Ȋ9OçKU-646II=cO.D@"PQ>+' 9 >A.z+j XՊ~zJ= (("MIvw3w3Iv7{Nvٙwgy}hwujwx_`|x&>q %Z eo3~EY?rK (* Ͷ?/݂RNiZMfD|iKa$?i oj;Ep jζZjodk_F4͜ '>$c%v`) 8%MW6kUL*.% To4٪<]E/ck]1rw1>&+a; orG7nڵ_V&P`k[k_p&Nh.F@'(Ϛ_$,&c&&!I=;]ЫS$z)":Y+`ӰyoHd4Z˧¼oRaYWſnw8Z1oǺ E՗cN.v\O~*umjSzW˝NW[F T0jT?Mkq˂Kxw MdHMz+{Zg'nܳkpńRk]F.پv[jEYW3l¼vl~Bb kC$r;Iuw }Sl*_ JyrJFNhmg2@#D  A<>y7<ɦ-!lk2{'/O>{:P$?g|\]tqâw(` Z+ʵ> --遛Gb,A 5p"7{0,ѻ\l?e,uO?雲[7ޢh2Y'oUr@0@h(SO%5U[H~ DpH["9TǼI";֪y OrBk\ D42.1<T0oa!ƍړ&'jz1 nSb v}K ]cdyso;jϊE`hşosbAKr-sM  O8/#o^,Ud5&jNlv?͒@>aG#cR_2T8_L$Z'e\yB5>O6]WyF ({h; š7 D}M!?YnqGU62T>üAN ͜W.D.s텳@u`ȜdM50 =c|`YH` < &$iZev4dꙢVu> τ xdp4oT**)1pKPuˤ⬸TR?-zvA'ɩ\q3<7rZn8>Y}­`eLly 7ĴN0.4}9/VÐᎉҟ{|Op4ƘW6pu!Ƣdnv'eVk[@ r1NƶTRWno7g~}3d%Ջ*;,[: %eܺE s>_-I>iM`+ʾ]'2o((x>^|h_9$ì+ +uRocS^T :q**m]Э ՗ m’敿!3׺F#sì}2F xXhՠbi4=lX4S/P9.)+.m[~+TaW|Gz dKşhȤz| EHQOCú7Am!!^$g[cSjG0Oa~#kDjĵ3D+l79~~c+h޴P=>W<5 u>y[eOgjPp>a+|6mY4p|լ\0#h_5JaRbWq aOg.\jHok[O 4L;i[5#%":Ac~Qnw;!=4Lq{v8~`kQ 8㜫a6dTv0nHAney$y2I2649,\7HS 9~B>6$sYdVT4wo $Anma0DVfwov/OyXmWM΅3&KUmk)!+ʒ$Wv mf3;j[/Wfѡul?ĕ8QӴl.Yt8$8M6-uӱ3]d*P1qq>~|=t  zQxܩӇZqP"EN!Փ ēvDrJ. -/\~ڛcv]cڌ귓KdM EVD7Կ՟|TX:HIJ!cxٟS+Y]~CyU9{2zf?3%BYKɾbt _W% JDhQfWk`oz>0zFK4|X++fskB>.3:_ 9v)#bQ_\3IN O|??NLҵ6.QG3rD>NAQi9>@'Aώ]'im|Y$>wfbF@'h( +B:t}3e|C`go}\tuB %ފo]^ !ӻK$L?ɻ\M$onX XM~/6I7>6F$B.iv{4?&j L}p;ͤ5iBՅ*r!4RHv_#KI<}(TrD2Z*fJ3{&ddo>/Gl,eZt:}2kE†,)4t5u#eљ @פ  Uv^'4MSڒu2-L}NK' mZDX/FΒu8-̕mNŖv6~2uqȩ/-֢L$*;]<D' ݥ,CuS9 9XP"+Ѥ0Hs?Fs8dBr)# 5eO*Ou6/Kߡaʴ`ʿ4wHd9DV)e7~;ejl?4Op }+0Lw)bNkZ6k 8ϿTv;-!\sKZoh/z:~x$dAfT K [\.֠i337t|0(Z E$tSA # B)܄ Ԅ䰡UIZre^FNiY ϝ\n9k??pbF@(FI}>+:1!@ouC:qvF "`E9"iN5f&fc~ /\;#BXQ!\#.3W \/#)h0)p?HC hE-UvIp"3#)F`F@mVR6%\#0#0N%T*8 lF@ǝ:{ .dF`0 FSe%29E6]52zaI}/S irsmrO r|v!8uέku-c R1B.s y c6CWֲuuz*t0oݟj>ZjNx7N"O_ t/ÝGK,/"TN€33@!/V*fVUр^,",5ulFz[V^~/^AkepH%Z.9Jk o{ QQ&jO6EnmaђuhYAUߡ+Q؂_NG9\xlSB~!4E_tKˬ<Lٷµ?T\s|%l#UN_u]r#5Z[vTZ+.EQNNJJD=>jY<*@?: PPT*mZ;U㨲_o^-.,.}Ӛ& ,JdopOGtd/LvC82VYQQ@Trýb3Tpslʬe" ;Z\*)>qla_ɑ*Zg0@8JQ\(4Pfb=)=%Qf7WOgCvzSO*r%.&Ì;j׏w(ZaANS*\r+:̇u|Fz_0ۥF@mQ&@&wְHꞨ 8$O Fl^,2胰 aٺ]pE٩/kp$<\(V>WyT~xSvdܠ$>g"޴7-a4u-c1Ͼcg_+*yj=)|[F˲kZ6"h*h\u]J®ç]sp#΋z\Ho&Ê{@~b0%;yZАkgJ #MN0Of} L*aRtI(ټn/ܼ7G oN[{>Oa>ΛLO׿8mY)ՒV`fs2|'\[ Bذ(<(d-Dfe%؏^wnRy(ۄ#VN hmAS\Ӈyu5d6O^P84O[و笄]Ѣ] GҘkuUIr* 5#4v6sJ'>]/IUN .] ugo{k䩾p_XRYyģ'Q4лmE@ޙӯ~JUƃH-nGz8ܼau?vLӣ6nq6k*:ߎuu%רӾE5iDgC֘Zat5 ci8jt.`4B h2TVP,uK/wlAކ+Q dj+ȍ[~nNw}e o5? 7ojv.e '95 8<&rFo©$uעH7ki/7`|C xE~=dդaRidU!ab+n߁[^|{GWjaZ?cot`Id'p˟ax>\ oڷRk<2a6N#4ZzA2@^Ku?nOo@pZ"q l8}[k-^ '\ _Y-%KNMȗ_%oGўhmN?l\PXTRsÚ^RĆ>" l{qjjwdϮɔߴY{r:筸h70k1.4_2UV|Kn 8uur_3u˖,)--`Tp6t`ɓ}74}7|tUyy))dE&Bɟї5s JGG\ .qC+y7# QpXp%T1K^}۵ҭ[|bRZlB|d)*6ke$R9s!,Jk$)[JkS4_>dw3>IzL|\SUTN|ġq K?=$ܰ͟(MsjsC޲4k5jaQocPâi@VO_(b/~ٷ}U7]v& Q"\h6Y#IQ&K2:FR~-s*;q9遈n̟SIem MV 㫓TŠ=Ζ/#e(-_9"Hd=B$p!HHR")‹0:Nyō75I̟oʟqEf%0uvu˜aַr.FPejr0Ll5Y$VI(5DxPR",^, IpF6WEirKaIQu5`+ƒĵIM0A hEGKu`NVIRZtJr)uorLkRłaI̟g 01{5s&+g[{F@w(a )P(ƴѤ$ \_`EzIM_mFT&36׮"kƵ˜OϜo1e] x7h"ERr,ָ+aBI e : \kubUÜՇRbu0[`TE #4AL̟<;<.s&ס)9Sg>̪C.`zQE{x0 )6q@$#HFVC9Wf?ZFF`Fq#̖} pF`F` ̖*7.F`F`F@ЊrG; 7F=#0Q+QJ޴% $ѻ oqVYݿߊ&Έ;_x\ Vr5XZT5W?9e`,c%v`) 8%x` ù֪*GAQT\f5LgϞwVe[W-zDJWbd!ެWnk.E„N\בF5u8=aVt׌#2du*M5wK1cSR吤̞ZK`) bʬiؼ7Gs$t|/Bl2>>길G+6uy3 g .?/-ǜ6\4ȟ2UcR-Ɵ1@T0jl,?R T$/j81IW6H9n/!ܳkpńRk]F.پvYC!s~o6[Ha\uy;q6?w\1hu׆!WP(So:ĉp*_ Jyrf`E hEd?丟?}1#-9>DMޝyl~ľWDeǟ^-Ao?M& ̛o*yPP:{?ĝGxf}V`UP`J2~bΟ52g9;#@Њr 5xI)_!e끛GLsCn. ],9h薝`nʖ+wf\c{y?%oyE壳;u}`XP7`a}k5+WcP Sy| @:]ɝ++*E05U*)bfjr=׏nYR|BoK57?Rd'Z5OI9\""h()7̵:Z\g" Wph0J_rkwLbhmV:+pr}շjšC1o_ ۝ 5Or%B$ZE$Ys\E$!Z I<͟UYe[x`@7i/*,Ħi, [%6:/G0o_(7,\h_F{\F+!נXr.^FA+,U4 krk~6f4h "%IjqG(162T>üs8q44sʃ( `|̫12g~x|_`|)`t@Њ2TQ< &B$*[+=VǓ 戶G3!)i֤{?*?Լi\xK<`KȢLQ? ;u|z E +d1 ʵ?P hKm 4_#3#Ы_-Yq-.TMW OWaaq#J.\bA%&1bQ a^AQ7ߕ72zvRfݢjdt=(]K&_!=;@ZN8#4j/Y]jZFˌCHVrM{i2|y3C.M1(Wy-\HNp| A FP 2XR ]5~Z"$H tގzCrx0[c!Ku/zhN_~ZR9QvK.zQk%jvyj)ʨNR7Yv7 qrJ:~s B;DPկչ=uS#|*w`!@0tP..\.iɉc;\qvMަfSmc }C_=+KPYO>XCuwjDkТL)(Cl;!:P9Maْ,lJyQ+$:2M(5keAn ~ZY(;&Pb! fHF hrt6i&Zr,%I1 (h/[ڴLwiȆ 0u_I eK}LѡƅC/m-IZ\#lAqEФ2̛}١:X9mOJUNmNŖV\Ye٤H:݋L,'F VC "SMc-SiV鷌h$͟yu_N(Zдs05`[#0C x+Yh"F 8(<W 0#0"E9#P9<.T>F@ìn^;cm``EY;ldF 0(<.'F-0[ja YuH@F@ˆr#@D(pb!@YfQ}n"h,!QUrڨ2QrX,Uv`%eЪy*\7~fcgh[Qdwt??Rn${k"to >_?.$Pvn?_iоUsj{iAku`9UP ic`0".K'a㞣mci8|܃{ |j++^ :ъ|j]Go6Q} 4֊J8v&dS/˩zX6XN SNFe_*"qTzchj"Q:{_Jʬ0OYpפa@!?CjR@pՔB]@q`1dkddާ]~~9#1:*8wt=d5Lu-?#0W9#>=}Vc@IDAT.2qI`E,SQ%1~0w6pbZ4:ɀ +}FU>p~ZgxIT)Za7'Z7d &ү[;xIЧk6|b3Y+_:b܆z+yԛGE_8 \ Y7 2O`;VXRE%нux""h(2i(]Q#0Ar~,iNmZgoLIJTT-!Y 9u.2}A@V{&_ q_ϋdR|-cQN<5 zwj#++yQC-cr|iד;&r᩿ASES6ʐjɵe}=D@YNF;A^+ՂyWznZh@7x~okw·@88gHԦ=Cn KCVùlN8C>ޣC&e?΢``bJw) DL*j:fe%GTzxfxo{ c16RYy<:@l@L-V9 Z^"Z1@hZQVX)ʲZqTuE"&4(.0>dtY󗊡'/^ Ӳr/btxYeeߟC'T{řR\L ZJv-( 6U ZhsAhݎ-oIOIg}]#*+*  GXkQNCk3T/@93# ]/RUZEP$kYi^qyA[?v.žj{ ,E1;\c43r:_@ytc(Gܞ7d:ٯ\ùCV4IhiXjډp$<**`JժJ.W`hFo?~ړ_L3y,~xv!MqN4}\8Ŷ< ;uA!j\Gu+e^5vN9p|u#(R'ܙվ!^^)K8C7v}En zwIW:g8E|^G|2͆ЧKIњ8d-] FZ5s6Yv:%.rL3^?twD3_\ͪ p>$x( U6~ݲ+hqYM} ncyy6-a@ve.E! [+UN}ZpG{wfF6k*BSCkLdŅpiKƯzmc|ubyTz QٗX`ř/50P &.kDy$=yuִ TTVkBu<"ΰ6W6^wç.T%\hnZ=3k{un6o6ҟAOSy]O9S\ύL[U &r]w k:6rIN_͜?\# P֑ŃRlTC5>9) 8{}pL4SE]õRI8e@+<8]ڦ6G8^<{<%)(Kт7OkyhQe H,\yjEЬE3d\{HSrbb P7Z@e2@h(Ss,uK/wl-A5Ƅ5;WcrZp#pCʲջzΛ[<^7 ^)(u4D\8"f|wp.k{U1@8B Oo] E1dƥi5Z⪪*rRFyqUjz`cI4ܐ^*`lN 6D δq`湇vo߃xbŅ.y\YƄ;ojo9Wq*Q<\e qqԽx Pަ)buܺ8#n| O2XF0#L r%ZCIAܩ)͚IVClw ,εrR @ķ?f8}[їxjELJVT?tزMfϼv}al${/7awJ= kz?P3wUrb~>0"y4kAʲ;/7%宻Z5O1Gq#@ >Y K]0'Y#IH*_ _Z޽3'#+K*z3oxIѽ\)BI߀sf5+4PZ k`*g|eE9F"L\(TeL]69wEaHk45['uہ4m¢}/"6BiTB -ٕc2r6qOD V\ywnuxX( G J2E8jl-8V/s-a>T-K-0#Ъ>V;0<30Z @Vczi82S4..zI||bҰӮ[Q&9gc8E˶a(\4E£ 4On+:$_dIiiEle\h:1HRjN<򗘘lɓt2yC'q7?oo*//%"BVWp:[׵\#=r=:T.k_F+A+d-7[(Y8Mp )kgbҫo]uC9-6!>l2d2m+jD];iHsrkayiI>s`CX!!+p d&kvʩA%sמMc"ogT%䭢uK'8phqCZ?ޙ<̣,Ӻqpm0ġX[ K1NH8[A.&5K:Aqk:_}m:c0"C2̪n;Efb 'Y>R.o-%}mBIh4 p߰ SGSU֊?xY~/'…R|fOؔBHR݂֤1,X+ >'N\qz wXzÕ̟?^uuZFˌnΦmCq I嵃u?M4JzA]Kz̗6sFA+8Pju)œ+wRpeK(fKDAD,11Ũ0p3[ ͘ظlxw@E~F-V k$)d E &Fß&PU SxFԊ(3o}rsK~(*'gN?;u~Tkk}BvOO`G xEẕVfRe"J EY)1qqyfeKN.UV}$%V߾{ϟ=uRVJ%@NJҢLB P(XgGl?gsC *x|KbOr[t1cwL6LpBOTftgazUr"F_B(SNkGI$eLV)XsNUpWvƼ3F]w/~?șlLѢT) ϥ5 <7_#и:.11&V[wb;ws1J*I]to?~mXWku{fYfOdL;Ճ@RB(ɡWO-4w?N*3nͷ_WQԅ/.>ko_9#\1H9U7fn m˺KQ9fԵπ>XG賯Yʵ9\Yxf5ewRQ aRbLkr%&3MiQբ?fN27m__zћ鿊`NV$ڤ; [8oKcbcr F@O4dю˞u]۟?SoYMW3ZguəP9 ;| # 5e,U~E2-t XqWhSlLQ0afs[\SCfKp[[Vm~|ofxվ &Ġ_+11gnD9m)Ɂ1 ȏ>ԩkEpoUoMoNzZW ~靳ŧ17DUzi<|m2-/a6 1 r:Ŧ&~4gf;b@Wbm]2X ;p:xn._ &I>n4ߨ8/u9ox:֘8G&9/#h@Њm y1R G+-3k|A-٩J3oJ+o0j -"`G:*a3>WRO6ӧo1#0 "ZC?͜E%rlWJOô#Y6ǜRJW{T購h7>wOfM4W@%9BQBNfyX|>TotBqΞ?(<7Z%IQm 5r7t˩Gq3>|`Qpբ,\29h\w'W/%%Ó^gOg0C8!^&q K#KT_rx\}~_^o0)eG_zz9}U6V,u0[D*Q#y3OEYsۻˇ;r|9'..!ils"dez8wCztb5zM !r]*}1Ƣ,zG\\r}cl|ԅ,5Xl D:ѫx`0#p8Rl+d+3wï߂AMfdJ~lnSbB3|l-x*6y?0l +m<]^8ȥ7;MC=S7`%~r<@D q>5b)ʄvKb?A{w:?/_"h+:l J?e}%],"3@cB@ E$,UDA\2TVJV*CNe4&}圆 u  >}᲼G8"]n|N8UY-#Khخ^G#XN\`?ZQDKUl,kZ#=\+݀K\fI)Y9K -#4t_*.?n2+N3-۹*++'))):K8Lpd4 vzPNNc˩:x0@(^~o[!$?gա*fh ZSmړgARtL<E%0{7h01HY&wrx\ :n÷Έ|Ơ*'Vr*x`? '7G}ZOv8?lu.F&9#i*VO;M,j5ɽm3AFj_X]Nχda |qtl!g]ql{ faHI~v ͽ/ zqu]]G`]Poczth?4 >C! t@aL+oHN#SBADF *^PUlfd$E/$Ud*߱3PYU}d_~^87⍳Kۖqw_,zYѻƏRo6@d`+*"muڵj&+v?{v/߰ )=4yKd5& wŹ֫!`@ç\ǂ '_™ѠtL4Q4U(QEॲJ 9Ɖm\BN kaDmce sp4szC_崡1b9%M0F@ VBR%Uj4H/eX)k.%; Knm#MXdeZPPXJ]Љ,L}:gCl R?z1}SGuTkMi㐒NEw9ւjr壛ټY튯}ޥ]K;C"X~u'e;]+?G8U'6._Fk_|O8gxa̘9$Dڡu\/;lg9r闌B얰xv鏜z#&, 0\#wj24 {O*. %Vl|ŵ*\lwlB]?x &r}_B+N3[ὥ`hʼnvơ;1\3)Ey6)4+F2"As gpfGN^y*ڹpUcd0]%tƏ y_Qa;4G8=+ |G3*~8<_4/7s_xgtnY}V9ݹOQHoڬg_$oo@>>ԕR!\M䴡14Pݟ2@`r r}bÅ \5ú_C;{Sny`d*ÓK3pxR<2{:/­şsWZkaUMN"4X 7|Ou%D(%I#XNkF`C@ E" 9n\2B_]8sª>¢C^9A!/pÊeOK/L2'F+4>m3IKf9oYN}És1@d";j}S/w?w^ze98ӆBkX7`;( ބwM|Oq_ rL 3-Iо8\h* zK'_%R#1Ҕמ9nzg;>|`]/REׇ`|zGV |7?x%R(э@r %nԤ( pr4)m96BI&aˆ>ʤ G|"hFAZ0 g}#Ϡ7gimݺ+J,NC1@T# KIX97#Wj8 Ng͛ħ{: "HJ G )BQ}PwEl/%q&|LNL!dS/H%𛂦t7=[X"˩{XYN{F-%*iUӟ_ɠIp,z}kRr0YAAQl)ĴN6)Ф$S^qKn7Iϝ9z|=c3罈\* iO=ts3)v,N6,N<`(gf-:k'xa\0gG}̹=(/˅"Zeq&|hOQOrx\"`@RJEE^Ui`tE݂:$,&F˩ K~ a"ApCP&Mv"et#&%cZSVENЍX(LkNr)p㏗N{J/9~{3~x~1rW!,qYD<|C3g>{ޓ4- ̙E]Ib;bi0 ! I2 Ad.ޑ,oI;lR@?LBS# s<YN=r(Ǖ06#  ލ7hYUW||֕0/wmvqKA,f#%9QJi`Ļ`Ս4TT2GAQfCZfG4@`OhPLI2?^P\9 -x5*_~.c9uO˩{\x/#D'j(QgKŀ_ϙʾQ%%wfCA=]ڶ4Q1;WCRD &;>=:q6:i?%?p㳦<|M.+l]nI>OSgλJE9$;~Aio-,`nDW3A;Fs&%6VjQTbN_ o}~bm`o> lG+Zs s 㰓-Fuj*-imXFkHF͜K7?҃x =:d:V}ĕ&/h2S W\!N2!g,oeG/,i,qu`1CuYvg{An=lj]WCl-SMF%۰)~}FǶoٱJkV.EM(QcoY15mef=ҹ{-s?XNk0r0|W[Nkj-F lQ n1$ ~`}Ѭ$G8%3#j| ϺQMEͺ#-9x18 StzvtZOcF@ЊYhiD}\,D&l$9W7Orp~bu1c4>#88kM4(fuCi+!,AC0@#B hE9 -U kr|ѣ⼣q4E(ҾӰz[xWCQ7 {qfE9VV9~>8r"f>s`"eJ}*(ǥyq 8ծywtk' rySqj|O81/u2ǝ:{ ƒS(eM6mW&`Fog׹o43S@y7Ɂ )QqNBQG ' O0[?)0{à\jrc%0RD8YLN .&ūft} x xp$lI~aXvt瓆zG7_Q9_7Сu0?ԭc\Xq^mZ+{ʧY K!E:L*HM xMxC:@8 TM$qx`\N?п[[d=~zYf,~ a9GD#RB8݄I KUUeIQiyѿw TVUti7텣q#*䖰qw2xgZѯ3N'33+š9 .ꨞ?t u"~vt:}C*Y/ GoIE4? @hqrT雬-?šgԿےAۿ=Ϛ>4߼36Z"--ʉ+srɟ37U?33_{ߗJ̃Ohz/(29F޵y5aq9[nۋO=d:,2˜^,k8ш 22O:(}luJD:@.Z;Y7o Y魵uk/˜n[5CN:v.'?2BZw+8GgC꘭u̗s8v!=ǟ{= #|ʞe'f<7jdQece(? o5OhқTKN?Z}ɯ}ΡyP;\3t˶LӳFD5sZnsz̾N6r p7ztq.ǫV|tC-]<0F#CGЛyPsh;=?.\~Mܜ7?&CN4GD?,s|A(B3$18W2%}w c,2-co bxHpwϑzi n =4ܵOy`Sڧ}x >,9s{,bZܼ˓էgvΰ^}v\Njeok>1߾vvT$oI׋]鞹봝~JZP!giTC@ tX2Y; W8>sGْf<ٗisLNҵKL0z{2+[QkA8ݞ5>C>?9ŗ2o9 ()O5Ua$L:kA!LӆT؆_EUYSzs}}͝y5ss`vr,k[L_˒2prc<ء}+Jte77H*S,[,{g:Z2k ϡJgjsiG#tC:kaQ:H!lZ0ʝ F;[Q9X֛"+qÞ}?]Fۥ0&])IkmAh9T["}/l"'=ǟR4H֡;\/v PLCמwGK@r:m! #@hqV, =vjU|yPi$Lwr7wJCd٣wg]t׎ܠddi 2-62:$eqe8E]zy~|k.E헬sآwY"sX򣞽t'zR~WAAawiY!5#}unW.Yſl۲E6틬0@\^ ogԫrҋruQI-<|q>\|sNơAr?+h<%{@38PxZ7J@ (M8 Kxj8 u{׭[k_7F 5]VVfҠ% \MGYoӁ5Pn˅z\p|BO,Yz\IV~q(I@:@e߷ =UvF~i_( ӡE^0(րO' g d㴋:(jj?2{˺ꩭy$'Δּ3JљI~sw\MeN+W^$n#7N IZ(KWgN>?Vd|̗@9l!p=r@YB@Ygd(C˲~Hb Jg]/0HM*8ٜ= 5θ*ȷ^\~ktFoqöKEμBN$@%U'mSNdӔS 5[} X:}eٜ7S{.>MjrR3[ё=;7Ot6{N5KcWl:Zu*tb|hlOVA#D=|4À%lY0@ 9嶺IgGTm+a.S|Nnsæ8?y|،P |3/d]{/a p_k7:+Ǐ?lq46O" >e_\ᵨTyH>4/U'*+4iRgIEfX YϹլ;~~oG^WO>iMg:N͕ }Unm\>y+gV JqXJ?{ƾwۘ1̤D*Y3=.%zӑ?-$!5v팪x|GWu:V~uX7n]?23~~r$D#^&s̷/C^k}gqńʓk+y$j"~S/Q3J}^tRsƞPХDŽ@> Ak2/&HΧsE@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@mˢGUL_2q{e,g&j=lQ{u@@]eߟeUrUƺk3GYk(7^J(#3Cg@l?b[ @@!ЪrIBa!@k2?yVVc^-GM;|9]/+DŜ+@@*jrIEZ;uoI UgZ|KWZ8'0f`j*w 7HkTh{>bb}.~ikY\ܼ姝_` Һ|׷ ZIDATi瞷aѳ^,y  Q -9dhuB25E@d[ FSH l=ov0#.'H>0݌QqeBj@@4P-27ItKNjg)13ZH? :>ɍ{-{{O3C,+F@h@e}qCͬ e Hzf\  69 {0'm޹KQG$}  r(˹Oa"y3qM  pHhRy$uID< }̷L  m,@YM!uʼnz+  @ EW@@h=ֳ%g@@,@܁<  zʭgK  X@yT@@r6EU"p[ UwI`92ƚ2RՔq6%  EyE]rÒgG:jW)A*u%Syr& Jv]YߟUj˒J`nQvzllioNHS[ C?kϖSs ,N;{$3Y<3mFe0qq<3Gs'6mITzӤI=mJeXuĬucEkix˰,) uxub¯T!ʾZcnn7_^\p ucէ /swT}3<7L|`]uMg#Ψ,[itjjv&=N:FZ>;?6pg9c!51/Y1bynH߿m̘MzdEՌtfڈ|m=yraX_]1*)YςdˉM-_ZcnL++[p6l&_5.i:WTM&u$lML  >hQnCk!җ Wcw.i4ƛZo=cvmAnVSwkJ#oun ctyet硒g_Ϲ}ݠ6XZwwobԹ@[nwLvM$i%^)/<pPZѝM$2.CՔ&%0V=~G&oVAwǍ7.=uS][[>$ejf W4fڻT~G-$1SjT~guW'}x2ZFv gGswn &)vs-{)LIn췮N"﬇[u)/جn٤Pk/H#S]:Fre #Պ4Y fcR_5tY_ZA4A] -u~- m.zf}~ܿ5R e/}_bW߳_O_D;ۛ.z]-J"s޹;N/W˾EnT/# p0~ʞZ9vwKr^H7g|NJ>7#Q'-waU\h`4N ʀh|dիɔdQ7vMA&<|VCI7%0X2/;OR Ni1KL4IWǫʺGVךm}SI_Z7]C6"~S|e-eKdd+5䓣ytl64  pP|Ps[PwW {W֥}cI}:z]2 K5҂yYv>!7UiZ LTZ7HsV[W$ܨ붂>~"9 3W]M0W{WnkS+M*-]H4zQc۔IbKV$emdx?vPkƎ }b&%KnzC؆ X@Z5ѹBzsLήTUɍ{tkdX?qѭ)Gj%UUtC:Dоl1.`s].琫ԥsǨUC؆ \8^̌QE2;BԮ@Y>oJR9@0ܪ3?󯖹Y¸YGr  @r(K-dG0wjxf\1i<=_7)1@@lԋ RU@oFzp?{?('Gf')w̨ :3! -YʲE2؇rw[\+2Q<3x  @L͝GdsL:Q꧎`{:iq*  @i< 6^vbQ\&7  ОZ5P]V%W]%#8sn=aE]13O08:n7'-@@&rv5FUL_mH_ ?%Pޤs]ia|x9G@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@iGUL_|q{4gC3W|7{[ޡ;XF.Ɨ'w͌ĸߵzR@@ ڤEb◜K]y:=ieeZ]NZC\cGfT2M8@@@@Gwm @@+7Wo <*LlJf#N-(@@ -??nJ:[A6;y՚# (yKK%oVt{ieen4u[&.G{6;muU׬wy?Ԇ'3BZz-oh:?6qbim>D#57VWuNzmrMNMH cl)HkSF'Ϩ,[2Ɉ+igG~AZɟJՉz~?Y=UJYEpz/F\ /H1򣣿 /XZ돖7^ʱo9Y+77\]Yig㤬c$ϻ峵,Ouw.bWXQک}g]zsytRC}}_t @ve׋W$kr% 4>'I-6tEvTɺ?N{rGF K88W5ɷ-$wEXE&nԥ6>.)ֳWO,sz]F'ARVF7%PNk4h ^b圷 =!ܿeir2Åsz=n?X %Fm,K-Od9ܳb/fϑWڶPy("a.H=G@~Ę Yn|s;P1.,ay$? Α-/.8vN|7欝?jv @ZkzSj.bqA[J{o#>$QY~mUwpl> KtuNo[]QNs1K7v쿦8^yV}2(#vl] к-3WTM&_kK4|-@~ zJ^Jp:NZgTkN˷/$M5xDU;sUrI3pl{BKKswaF:p3m^>|M#֓zɓ \}u]8g1;FZe3%?0+Yo?X[;p6l&_|>+-–D"pO"/_>Yk60iu{ *eDnIm֬YҨմ%컬"=Z/dg{=_Vw˸L70|H$[;_ȊLZ5v$eLoW$~*&)];\ՙ^a}-n:J_}lVfn@N%-ԚK:~dm(owoJ݁W̿{G>V@@hQ>){Wד)w}Ci\T}Zu]-deǟ3-頑9r/+U?1\g{LUfBwf,:1eƎWS^Fsdo%z*R b 5{)z]xK{Ub[˟ʞk> Rnr؏ @GjQkiHaDnΣGAa+o|S"LeҟL:ZbQA`~$I/ܫӺ⛫>:=i RJGg>'eܾcS>ܿ7$J*&Fץ6Pn <݋~_NJݜݖNe*W\1auwk;K7%0X.;OR Ni1KL4xUYhZO}*Kpp~wKKxl򃡲%2ĕҚFcdQyUʵV7vMA&<|~+&{lJu<Εkp3K@[ g-2@\罋`KsK 7-DҒeyҊDI59#fcCgT_س`FQ:X\9O9~ZNnbGϗKƻpe].9.)#A<_&_o_xG?˿3k&]-zL6VaY[OzԂZpi2TJ0"}]H4z'_/:s2xU21X)JK&J%~IڹQcc7ȍ$G}檯gg@ZA@n.zFV:ol{ڵ@ h ݊+ݕ~/ ~Co+8\כ;MǏ?qz%UU4ֿG}٦IX.TUX>*ā^6V^coZ5} @EsP52L9c L{E m.b_Z965/'@9킛f$*2cT# KVf'iֱ԰5Bq;ي  К9 5}h׶5+/yowt|wDs^{LR۝!}wW9y[@~=ru9D@89{ȾNGl}<ʯ 8LyGM7zQot<8eN@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@  [.eTIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/figures/network1-services.png0000664000175000017500000051645000000000000024362 0ustar00zuulzuul00000000000000PNG  IHDRsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|Eg]){E}W}$ Rr\](ڐDEWW_{{W@Per%ʳ\vwv晙3m 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&Z٪BÁaL \W P`U=dL&p~N%}|>m|=g9ci??)S,yĊc#L׼wQ;;* Njń^FGRўUƅB; P" ]ե00N~[O(O(_w]3c䲖 [[H&sLr꼧Nx CIo [pnhG(4Z:Nz(ݶHi]8e ϐ쌸vswlڎ}}[%N;?OZws}(/CpR+%dB}n٧LpW.[o*)ԃVX}Pd?tK-qtdyw+WR-sSu./pk:W(cyx@>/KwHM!-k]0o eׇoǥ3&v<lxicu$ΥKz ̝PjD%b1iS=4GY;$4i|~Ή-;y>CZ]LvP%æ>{{pSM_2@~+a;kVw8rMNs? gç% Ӕ9sPZ θ%%8I=6VhB.^Kc)|Azη0i1ʫ<055yXU̝SOS5[42riA^maCnPiqsYM (NYA5᠉xuGܲl2;Gem>&|Th_G6{Ni>ϞΎMu W KM 켳ș??KWValp{敷(C턼l/%Y#rq 1챹ʼoȗ=n]<ıGN]Hh4R Mq8t#z>āUϿň+5FDRa{Lt:gOŸaxGXjV"^PXQғU.I4!/vF\}[:Wq)@xvm{f_yM!S yy }Lg%DƢ韠7a0NM c454 *V+l~ 8G%>QaIӔv>W\Pi160gOǝsE'deqc:B1˜=/`Q<4RRChQ >9Θ[߃dYgPY@C4ވ^6[a&yTlz5FG h,AR>ϝiT73p;zesw@a?Hހ~fx/3GӴa?)yx :1=nXJ܎' Ay[0zM"!zDz\ޥg(ʺhJ2p+]iQyx + '0ǯ!.qFzHL9帡p>.0go2_"S4V.lX3x*n]=#d+U꺧^5гsozC>?v@)z r!V#h')=HSg`[OO ]yϱ}FJ5}}GV]² ﲼLLh6?g-Az`mT^]gV|TXGkMQFbռ>qB=P=HK1,@Cr?^SH{ИCHXraQlKZN B(Des|iU?'D"I}#'q커h4Z[`crC %LѿγeryPmF x VhƳzGJ @;t&N\}y4^[!Ngi]pE 27 C{΍Ms!?<&xG F(QU7 뜚nҴtTS0MMSVF~hxz=KaHߋkr4/{%K*@tp-ØaxлebrPZ#DuzOr@y*/v~m7T~5O2OޱBE%kJשÿN'wt/5J Y3NK8\=v@X^(/O3F{yW[*'7H'﹭:Y[1J_7 y,>Z:y3oM fI9 Iq w( Sը`Ўԃ,*q+ eFcY ~Y?*!y: n7ˎ@=C7= '{t8첌̨R 'mN GznO5 SUTL̃5]APS EV|τ) rc7L|-)s(:Q\8 "UNq;1{s?YL)c Nun+=nES#lk$;FT93uȌ/] y=;~h9Li,R!7S=HX?dĒ/7+A E_wN/Ww/}_59nW; n n߻T_7BZCoy6DѠ0XҲȴ+8ڼ%<:|q.H1@ٌwD*pNMLaO(QOH/\2:uܸrJn!M>Jfnv+&0cFaO@CPc=>3jht;M1ʖ#ei% :yNg@HgKT+ $9=C t3(gEwG| yS1Es"92"V6bq$е{z h䟄]]E< p%qC`A9pz㉴Qo2db9|[IL@^l]iQP] BWP}Ĵs쮱*%;mی ܘ.'sZ˾emmj Fn7\B Z;\* J{$/U}kHhpQ#Q^n*7዗\J胺 _؊캡kCqǗLMD(9L As+=2 6CdFm+``+ :?EC^ԋz:~R ]T5!'WY3f_ǓF2zw~)uF?pO * d%`IAbBS\(Nі_b+aM%;8_JcKĬG8g}";}cpT(3<ydIwc?YU+fs=L[nHAZGO[pé@ci=@AÔ9C/Z# !=yhϻkߑ0Mi}!^}P>俑H-ACK>ȨWw)qE B ؛xg(Xba.cnN8nuؼY +}~o§rx,Ec0hO{]V.Φ߷Aw/ֆ捈c`Ҕi^a?-\f]<>S6>-D{~V|93Z3VBJ`)#s/z?*iyp*=vm¨ U)"Ӽ%~w)eڕ k=J4)l<*f>_oX'hE?E*hNu')+X\3*$kguۉ8alSq!挝WKs{^"7,'b3b*~YbЮ4.G>DQ ȝJω4 ٗ&M9W{NkcJ&,ԩ?8͙qѢԾ\J<(zcz|bH(DoYUW9EpFOuJN?Ɖ(/> LR}88lV tvj LS  ͨ^FxnM>W"bD3 n ?ʽ펯@k##-E8<)!t*4 չcZ;90_ 3-NUBR&G{*x#ѳ&zw&%>Nؒd(G"^Q ے&0 CLl݋ cٍg <Ŋ`M*y*44X=xXH3лP,n}0Q)U[6WKĝ):AzDc x.ޕ1\&gԌi\ڸFp]ާw14ZZ5$4W,2(V=L@c))Ǐ k1T JEaIWCǍ]^:)(1R!w}|S%#6l/41JwzwҤe.C;lzUb(|Z-V6ZmpCv9" MKݍ Cly2Ǣ0{4veJAxN7矆Jph8 M_'|0LQy`8Ζm!ӵ.QuqRԿRt!QOECbE]sJ1 ߐ?*p>)> tnfz]5Bѻ)[5`7:uX9є/(6~.O;HS;`F<؋>7gGݞys1r0 ,L8$]KUpm ,j Jɉqʯw]&eU^֨ѬVuA2_O"JiWxkl䀅SmƁN(s\qy-+5&F`wC0@9]76oDK[PNå|9\zv \gò2F\|Z5 VRh|DjrN[1bGhߍF~ OP!mfJ7240aw^ F/ܫh^. dUSɨʺtEvbzI5;R9cK,6M/a"0q!5ћ!in mFω7'Q1ׅd EPxL Kvfِ_Gm\qb !~ Hv < yM8r4B1.bP>>8.·!HOB7O |>\HO䗺JCԀ'9o )r 'JCV +_!/P^b|;'Oo@y} %+,;ZN˩hAMp_719XW[uehå7go>4;PƶWJ{E/G בg;ko(N!3~y>7&o$sMɲ(+*Yo+C[(3֏?/ƇZcQޔ 3.hI<ђ&'Hiduf&#P/Cw8r1;3cR(Th$X%Gi޴'PVbVxwo%n $w1~P4݃vK8| "{D_߳{|X}I/Vet6J&E@&TYACF]hPaRC>%h't05 h[ԝ[Lv啾+}4tv!(7w{+L–s0[aە kS+8ճ裟r!(чCH?dik_4$~"|E:3܆S4Yϰx=vއaGxgL(̼_Leڹ0< _Tuꀜ=&Yȋws[Z׋tH a}L#Yj~DSkFSgڷZ_1=v\A9^ח5Vp.bYp5F1V0ꮡ]xjhez͛*0rx51 r MDŽ^eZ?"Z9KF@ MS>o~prO4-ОXWL<54o龮TjĪoZ+ƭ{_bh#l9OVK_-yzfvHfi&`mFF+¼0Ej*RXЗ1J XI_9{q Oej9`+1U9NLZa]]vǂ쾈614Ӥz`O--.הּDPO&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&N 0&RI@w(6VFGRv2“C~ɸcL 0&RETd9L sD( t_B珺w]7wԭI_چ/( Nn3PEcÿCוjJ?X6`L`G 82&9sXs^&KThͩhP\uxCib%M(y>g@{lj 0&Мh6o5JiڕyW]T ZovM`E-'" 0VCV&4emKxd"~FBO*u2dﮤ Ta?ΑhɜizRq8t9|Pbo  ^_*447aXk3BG q^d܇Mú,ʮy:kҲ/ >=;\AѸf EcU$8KL (AXCj6y2ţsj8}.nGe+Y4 ,"+$*n^=v E/}jEs?\ٜLYDF *1&@xd~Fl 0&P/jD Dcx^ź-+[~w_y-nj1X<~ק@Z1JaYsp?}%t̐KJE0—oݵڳtX1HoAp7U`*@gr+,51ݛ<}Dܱ&MG6bsaS&@JJP(n-oYi8s0ȧ.E~$|ãGE`ʢjE,cDb m;ƿ?ӚLI⎹ d$^Av [~C# aO2C@B|r˴u Bvvj$F+^q)uB%% 0&l4?s 0vLs!zeK6ROml ; D1<($(;N37 'R9F}׊*ɀ_u'K6k^H%bѓ5Mk%̝?vBYbO(3b5ţ,=xh0snbqw@ J =ʌo`L ;&v UԪgbv-|E 5zAw1]⾧kC1u e 5 #GeFwgHak]&Z1 ߅>)P M[-5REFWC-|>{޴l:sL 'JhH&H=V6Rϔ%2& td{U2_3`-v_ EAՖpz^6֊5@/~/,' 02(/8|.HN~[O!˺>_tI/b(W/.Ч؁3}! ڙedAz޾$U̷h|`!P=d! 0&b(3Ρ'GLg4j_qx0v#,%psūxGjeQ@ϩ15+Z ^фS}êT/}0c.mbLb1»+.0 ]({Jt|hIYVۗr5FШ6#kΕIS`L 0FU7B;eL 0=h"ofeRyVKٔxSY5_vN :”Rݒ L]\:,UVs.aT\%fj%m|ga#Ç%q<^bƕ27T"_F'2-VamBYb 6_.Ov-*VF7 ޲Z9^Ǿ,eF4Y!CNe'>nޠ|&J3`L 9</I߮،EW(z OM%^ACFeRֿjeZ/eb+v̱Gg{݋n}*Zc5`L Ce,L 0&*cg!+Ζ,[c}Bt}WXyZI{챮RqA[^瞠ؔᛤѽ"\.>xBSH4y9eL>xVHօC|ީ_.yy>k&@13`L 0&`Q=)R_p`;$^C&;G 0&к qRg`Z`L EQE͙`LE E {`&#F`L <*%zvY(<,&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0! FlMcsg 7-TVR]j;&`B(!J%gyaa"b5.m=fXB\&CEgl;6G-ȫkHxP=&MsmBkkR]gX)% Q%eL 0&Ц E臣胺#4&u]}9f\iyͷUJZ \,dؓD?3B([U~?RI`ΓM& 䝂!#R>CZh\&h?Κ*TJ]iS}5ƒ֘*-֜wfNT[ f % L^fc)l!Gn0\ NRMJ%'k\&hc0cbčy;[SkMѺҚnfneBZ!`L Sɬ@aT.磊9Tqtdeb8?)0++#086@Vc鑙34LJMWX@AJM V4ϔ%0&@Mv݂:_WTUԴLw\5vMKE#O;r$+$C5a4O eͣRUJ u^ 55xڒ.JXf5fNV5/ k<9^L5g3+YFCkhCHya^>SWgtJ*%NY&" e%6Ҷm(Jjwo"F>uU%/]m ))K,rXfS5#xe'mɚ(/pC/™iZVA0yCc`)Ɔ`wL Oc_X/ŷ3Oh[Rږȇk>mu6H uJ/EZ'VIugPjqStQ&I %*\֗>y!o{ 7G 'DΩX а@TzRb:X;P . V&uLքW n{y.;{`BdF[K ,Gy(7:QN迡=rZc,{8'䣯BXV6R WV 45/0@(O}/gq{a OidFnҰ+h7v;b' ]@#K+:Ke\OM&v3*nB|4Oh, 43@YR_ ic]]$ᒓe%N))SfL3˦-h_` E_6qdQ(QM- ҵ+m9gnNtp~pp[a/I'9f 9j&%*m;= /IΛ3e[ 2=^)9$ܗWnUT詸#.DQ⯅y̧v:Ul`XZ[P8Þ8H׵bcp/WW + Gn DhVc6 |qB,r/lu4q`tdvn*[\]ee}({(9GVntEd$\r'o3Uv PFϧ)TNkbjNGLZB}w] 8} B}/|p{D k"gŲ3{}EyմϜDSqޛ4! +9C9 ѹgOQ~0;w^OfU=?afc%=3WK..Ca@\VVRjA0w]7I^*%zOeHCFt%BoʧP &YL .f5Y>;>Pb7DCu/^x!!_󛋍rn7^ZTx0ŁD?ydsZ~eiKAϓ 9 bGe#\UE8#9b k!so)*d9^ViA@* KYx?톯M6ր$v[m5My.OMQ/wǠ Q)>H,ʫA(g0aXuShF݅(x·aF5e&VrW*_@_/2EƩ+]L0Ƶ=e iòΎHœL>;kVrcoH-sOK{,L3ZYgGS$#lXOɶC* r멈fI|VʠiYxV)s̗dWX>՟^ع2G@ՖAx-BG/^y . ^C[6מ;!X|%F2C&?J^Ig)A1xy{ ~5ޣ:a!dAa"q-< e~ޣg\/mhsi&%ŠQљlQ˹b"1FX5JSWWZ5Ye'f9{ݫ,%ϺwQ[h&3p8FT% -if/l{'ઑ!uJƒ5pZ@v o#+w`\p0:UzPO!1 Ļ M0ӴQ{r7v" {P/LAiz`|Rh( RQJ(wDq])aZ߆kic/cmȿSZ>7sr̚ G9T36`E K’eBgz@Qpl$Pwl@ݱsYU񱂅WƧ8ǀKiuf,u1N{kتOVi803I@SLEVq"2SކJGjX'儥1bL 9ޞת0=d@Ӣh92B;t6yCb"]y1KJۍ&JmI) Z0mBOuHvW 鎋Ls:qH%C2~2u*D#?Kr2 u2c*c;[{S[S;ݛv2Fnj ]&E"{Fȗ?13mOg/tcv ٠ݸ \F8>n* 2hG+:|ݖQVLzA̾/ޞ"\NS;&NTTa=r;b=,^]A5D0Q`1pO#"J_7?C7v9\ʲnEJ }$XSgR >%}gHp~Ŏ\LS2W#<p>ZӖ*DiweZ[~/ 3Xw-K(t/?%'1J]0FqvZcXÍR=~v>M;Gx^jOsjyG6j*47 9/h,S'ðf=Q+)LL(NC`Ox9_<& L!Oʚ/諈#)D@TKc{ixe!+)?WBh0d≅( M h4*yDQchV/@;"@OVS`x7գɼөD0I( !l)hXg:[⎣eHj|]3<e{ICuT{'(Oc-QG =#6;M}ҁjL>+̛%sdG]G]>`-k$ .b͇g;ͨ*GRB\h00/pO@#?%oZ0ބ\vU:M10ۥ 7 /xhfzSq4Iz3L1Sif uA' *v]2Z#w.n}w#8Xz;]׹wpݜs>goC" Zc\+0-UPon{Ԫxɼ-gyNT&_ xʝmNqΖs~FZchgc~Ӻhi<~Ds>|F;.n4dx'XnPn~/?%BO2n3淽".0RweYKCAf| 4 ,PF3 7He^փ#Lӣ_5w5OJkSk F{\UmƼ=7YYMdҔg2M1Mɑe3&@8GG2t*\|w L0ZEꛨy? C&Ϫ:V4E`:(dSWc t;l=84=(oMbe#yf 0&@$GJC[}ѧ#YPSB~g)[)N)"'~VZ Χ#]ȗvI QP6nZ橈`%U?)DD6-%VdP¼5?ne7w;0Bif o bXJ 4)DVn(S)g n9.XifMI@u0*5/,siH6HT)R"!ք\ira} ]2:Xݻf>%ifrS[.ZӴ6`.OE08bh1N9llak`Ueo~_\cWO ؏ |BR_]зM3sC=fec&J2ʚ`z˜@ p>MV""fas˩ 3:lߡbˌ鞦姄 W/_#>]~\餫W7[4GJҨ%4mHemZ*5dp>.gםh⨃ٿD#j >h`ٙ~oEo3M]4[푧㧍 )QL+˚?7qbE}0rxPr g?b\*(-X .h*1 Rփ^Fٳl7r- ,H;!?WE5ZHq< vB{Wjꅂ'hsgNE==BmI$"#Bz۫.7\~ P46[m ((( )-w٭>}M-#|8֨P3 +M?DfnZMow1+7^q<=DEMJRpN.9E٭>pCʏV0(|Z.{PB=20ݛ/ 5:Jqu}q8n=4pBGפhǰ {a*~¯Y:K'd́P(0(%` Xe@Z|:fFt("_<'BR(4!g^s{Ѧ\ygA$N PZN,mWOדU884nLR?{N2?`wO5j ݘ VD`])WgF~BGS|R۲>V5.sw=פ: LJc=!RxΦ!Lѡ[PvHvP!|(<{@?KhM cY=.dQ(|[_o';~MU[ʧDW4U4|ӐWOݳ'ni{)7![CU]s2 h_(M)m)53%C}eiD yZŖճg);bm@zODyQa %2Up4AECLkh?fؼ.oLt()>+ txZK"S~e[p? ;Qۮ E;75 =@OڎI[ʧDUilK3gT> 8eEۡ AweҠP6ߘЧ죸hPRG{P3:wyx m'iSdkDZ*[H`ɕn3{]!eG1*.[ޮt֤62@~Eg|A@_;8J>O{-O}i=`+u69#3YkljLu`ZVN9#MS`{-B'iJCd }gC-SZXs!tF7j@*pwA'JXweA'4n@D6P7yč ?Q?Jݷ޵*ꌢ+l p~XH{kEyϳ̇_ʃpmf <=kJ>-ԳJO>`cq`4nO(oFlDִҼѮSM >v4G~XC]Qe:.U?dG<rនG]<ղ;"9Ν;u:wt`UjGєq ,\cG?4G۽TlH nYR)m"/P8 D4M㷡w-c˅AƸXO_ koQbގ^)9fǰF@[ɧFi0գbΒ%4uoڛMsp]%WBacKm9^>]5Q!6:phiܘ'?}= [gf]SU m(;}O>wxS1KQEX HIPj^|ZPNr קDdw {5PJE-emFΨL >Uw7zGwO=Lx:<̺զ ؞PIEh2lbȨl#h!b[8Y8h m!ˆ4łS\`gEG@!YivCj [ЙhZZA"[K͐,G]֮$>gqƑPZ\x!W^_,me_^!Aة@ P71`!gG˧PƚJʰ@2U|x#p.( ֘@R-?`FM3•:|@aq ?sݻ}0? }^G[n ޏs(my!:|~ |jop`:1bP`DGR&[0M" {S*I!e`5zيUb[i׫8!{G:Uq_=w9apm J\}{_Lx/gDc)|1G۸UP"aM~_ើ@i_VR2"-:GŦ%̿;EKO[r; ZJ}O'ߺ>Oi"ƭ:^;u03:7eY%2".>C<Ҿ_n>l!`㥟[^?CwbϿXWw%C|rҷQv萦*ʆl)x;{r|z`"4h-5x/G#͹X]ز:HNm9ߜы_K(!nʩ RdRз3lRr<"qI|D߃PsV%:LdC<:(/n[-IĻE"{ ?֋ċȾ |?!W P+B)2II қr<}9smDrB0ŭeIWoamC#z~3uТQ!#O66ғǟ,1a"5=C?sA l?R4nYw4X%]J!˗F?8wp·'E7e~b}aeqgW9|z,7cv#yB(S@pC !Na|f*;LPry݀81U߸t*> `xO%dD!ElDNpI%vH$?Avʸ:pBuj5Y O)TNxE1E-d;\!l8!'EEN-BDmLpmuњŊWEb04Ԯn !|Utl^_}kW٦vCbƽu#:BeE⇡n-k.<#Uڨ2GAӻC3rp]ڨ9K70׹ص5sb Ц]/<T[\M&B:K>tPILWvDC$C.qB,<4SzZ6XoE;'5?.4P%}C]O56b%E2׮QU`q~@R$\I&hhڳ =~;gAa(˧p=3iqQ0" MQ_wHS!^{lto4`?Bv=SDȶHcGnwT.2 4wiil D., %[5ǣE _׊Fu"$].B =蹇i,{ہb!CcF-GsҌtW470``@'nwbvs+"A<.@|4$@nirO-JXY(4+cg*5?0  (Si߼̿DS`? W$MآپֳdbEVNi%3:KK@s 2&=5=z+\.'N%ha}:Y0nnrJX--셰@-ADxEѴ^t{BT"@`@id(ÁcEKhBqd"id`cj@x}CTp-# B)hRP%c|[ h7 ~R˥Gi/F (2҄hCUily>Ps)##}w}{ @3{Fj~q4U5!nL:39vXtPPh1{k16'HM5RAz#lsDo}Ѩ8(5k՚ I i.F S˲sh%΀Ep76܌`z 2bP"5lfBM%͕*(M]*cUJj:d@I *!RӄiѺ]"|p:) #CD3#d("M Bd hbAp}>_tnPU?+oX{@6\ ;!$=%z> z? ovo#Qjl:x4ںFT_P83X8wF]pq"94s!覮&X݈*" + 5F4db *xZ6Z=N 9ٛo8WhiFt%U"{lt<;,h'e σ8)^@I >3BUK'5e F˩Ш\mp (\Q #/]IWo3ԙ7hG0iΫE \[QU: j 00_+K/{ KذLzdFa+GUȕ.={GƯa!͏<)נ )0m8OG\MVtBת^vo9QhlUbEYhhF ]P^N&釱n!1oV)0W)\*p9IXwi >!A2Tt+jg1o&XMW`/s{wj TP\ɾ|J5-}x.hq_-8;phfg2~?W]<0QCшfa`0[Ls0 w#o1Ź8y AGrH tV2V)02$o ywd!(͏†8[PBT+iX 6K7SN}'$]ъLV 0,i Φ,g9t@ plX4Z b9#pÔNKctF(ǁZmT{lwDAU^҆b&9'WiFhasۼ~MIw?#PAs:p ˠ΀- vcVclLNͫ`>%:~s ]&KPQm%$  ? ̯CZHgRAM[H\q{0НhhͭhN({}8 2nܫ#7Qb'n(&ln}ƒoF=Y&4 ֦ "08ru{oE$MMҠ92v|+Z#x FGp2W6\A&f\ wǦjsYY7gAsU~l>ˋ}:HRhtFݻT (4yY;̵34J&LGD!BC^ yi5a4mn;vЊ(oD♂Fs3JKw hC&}l4)dNB((&cȲ_'tn$@W׊#AQ /֑(u>x6~(Oq ` ݝ`^]dn;G!>DE['FDnC #Ѽt[8 =:SވJFg=T[sRd1z%)Pe3g6p^DЄQ 9_i89>T-&ڲqTUs>)H8S:$fw[O8*[l̃lK:VL]Z[|MjBly9kQlt^'3\.=Gwޛh_py P yyq!$t*3@O],Uo6g( :e1[3t8-[;pqϋ#PҦ߀p5}QO"6dD0c?VVn})rb46<41@/ϾJ3d2HMȷHzGW>qd(94xh^#8kB}:7E=ls{c|I1%6 C|0tuu\QQWa c=`o`E|ՁVv(N;d% xİR/O w@9u}E@x)&"O,dڤmFAF NsǙzx{G(\E$-fx9EFbTW|Jn\7##{ F,_^m ڽ PUE E)lazA~OaY&?%,8Ќx&$`7nk0p-3 3jKJ5,sS^?M{uhԇm+M`ЩtϧMRIas_;cPj㮖D[nzj\(DUVES ^r*A&(bDє0r|zWN@ztjv>L<,9/#z+a5e%+1WbCV*#H|{eB,0,A 挌kjٯg.J{6Rd'}|:wvAu{>d.2oJлaeZ\RW'=oӟ;E~KPpȌ>}Fz8 :3AoޢSCذ3X&shΕܕЈё}\YQW!0&;NX1kru]LW=g~1z!hĝԑmo<_jOI/x>"‘h0-+V)aC/(qvȩ&|4 #t߱-ȨʥX&O :u|[tǏ60|voXf1˷ WӮ!rK-qn a6"hs#r%Ѡvul1izzjֻxndAO"xdt&c=c_4 4[C<fhLm1"ش22V.X'`$6+F$plcH23qor,vF+_1;sI"T&4!=ڈ5u8֪=F Zq̯2oDi:4kk7/,w ˫K1Ţĥ+cbGf/Zf.81>/Sj=݅fL|c/t$lX%jP,,sD kwA?-dݞ2 ӯsK1w};x`]t*vqE'n\b8ls9MalYܣz=49x7!eHtj1Ź7$8l0Юi]LR0v0+)7 bDMĄ;Iab-GLqXh"J0a:5z Q_Zŕk"%-]:N[f51nxOI=U/$b|L*؊vS"!ca?zb㞣qB9 jXշ}~b|w3w'nau5' gcڣFf젮5-U*IxAv*b }x*T;?Af(ltn@շw[3^:pq'گm͢$$C{-ƟcpН{Vl/]x)}(!PW.uW4-58#F>8OZ,~[~04?Nԕ'VR#’qKԽjx_$3Ɨs8^2zGa #}gf &H fv]nj$ 2[6+ևp+^`zUF/=mq0eWu˱@Ф^D ti%jC Uyʌ^t`IɝڀYm߼$b!h娡 Av=G-XֻWf>jLDJjhB9zѮGWhNMZA;0[Bhn*nDUY~d5TŽPyMA ~(vlY_Wi;R\@6࿅^hӸ.5cg=BçV."LWZ%71҇<"|jǼcҬjVfyjM (;xsC* [Så7 ]) 31'f{Ps)SV@(^5suN?g B ێ?,VϢQ&T x[>8 ~C}-}R8ESm2xL&w4SPs(^,ZK$&][EYeVvn 6B6P0:=~$NfR(JP^GU `/䢧}q98b^5CrҤa {ons+Mbgr$`3KA%UAL=c}:8}ewbxg<}G303鈛O& PP{م+ד-@>}N ~ՠ }hlaŦ\ӟ#4vmQ\ (g_e?LFŐ@hH!fQ 9}z L -cAL 5jt †'%5F?@C(} bˎ]z;jeXUt&]H`raJ%;ƕ@IDAT%TfT =Ђs??/- H C-2BKd P˅ISph5Z5#7w.8rc˗IQ.4τd>iLQO)7v<>wfssΔ{jSI>ka8>#=270t:aǦٹ˿4[s'#I$Ɵ f*/07c ViȈ(jˍ*DFƌ?ugP3y"t v4&6dîEm/O^>c21=5v7K̿fG[sRx)8&39iU>8n:B9'<VF;7or7#|D>XNC"1>~GxDX1+cl/>Xn7 cvZ"à΂AޟT 0V9ǤTR5r?b1]Jzݳ:+`'ޯN%S i. 2ZkҀ% կb S+j^hN$chmkMZ8=skmuH'{/gw0O&m)肓yU[vta[ ~Z<>f8@8%=ڊAXI3/Ecf 77,3׮:(aC-TtX?I#aC+PLFI4ú t̘>eM;S Ƣ^Wm9 /ReTтl޽Ч A Բ08 4[2 ҢAmYF9kq0 1 N~H)Щɒ oX.NAIf gf5: #No\"o+hhѾ|ut}pU}뽇M u^d]V@ Jа-L'r&ͣs#b _-jo >OGaē'>Ƿz٨%>Ʒ :< LxΟI4w]Ty2pj?ю1@HzEN='ĻVP@@-MbޢIje`aqo():u%̬5 Y7)P11X[/>>V J }05_Vn\ի}Be *"}\_yX?lR=yCդ$42Ϭ;grܕW'cI0̹j/ oAۗ\CtдN⵫W3s9+6GGj0u{<&˕`3$Gk/ow[ݜ>\p;SAfK]ݹߦo| ފuc 8|ؽi'))콆bHe>΁]e@N]kԮ{V,}.69d|մ-fǩqU-RpΠvC^Zj< \!.\*G7tJPwܸ|%awFķCE4*-%#@שQsEmԳ޷=U*RZ & 4O#kXOѸK Qb-3Jկq', arM82$ɭ퟽yB5L6k>E2Kۼb6l5y(GY{ a|?]y`5s_z6+ vVlnY|>o5Զu2y繱2ɒ {dD Dv+dt- xILWԢA^9N8o༹w'L֏W>>ee;̫}MagN6͐in|kRqm;5MedX"<>L( "椏~={ՅN 6aJ6?d6Sf֭ccx2vPW?_@ ptc~ZlOQ~JP dPFAaT\&-9摟׫IOeEw8(#x~7uڻdl;8Zaex-4zhzW%CN\Qj>b&MW{(#23"nbsE)l(f3Rعπ3.7Oxŕhca +^Ψý`/eT!::e5YCfqb)aCQi5d& J#>(Rs{ۋs٪ M gZԮY=Q8?isy˵_\)Op`Js5PyܲRH-gV#|rkk4-|PLvF`(Q"a6H49biQUH{曊AɏfohXL;#nqؼLFLCl3H% ʩPu`6S:^ ϼA)}vԐ 3hD %oFvьf^Ð2Vl{d9SB%Yp (xł+j jZ\i}F.v)Ze΂q0aǻ6?~ زzy-b6,;;k 0bP 3:µL 3a#` SNŮڰ䏥iiW"y9w#à ҁ-`R ~­r8hڨ@dbjj6"p2qWb[Y0&r̸?c78Pؠ& BT?i%`;X@a?wM;bOJ[`mlY6[86mm=Q`1ѩ|Mm{h%evh[Cvм /XDОme1*W)8PhD'prTQ>\? 5l5]۶ Ҳ?1%KMb?jBnp)esgk ش^fBBV 7#ΐ9##=rիI'۽_#"@FB9Ja,6p!}5HTnp[LܬN-CτWk׿L|7TLj 719HXu Y"񟝝q=ڕ)N>n.dqޠA9eFŹ:f8.s82[H` KQP&T,eY~:]`„eM9iTTrc z#h(P؎v h͹hT>h80 0m?1ĺ`\Ȼ" MA\a&9ti))p9Dkh;)>, mO>h30P 0B/na/ىRCFf #v׶]bqLFqB sfu cM [2d}aV% J >X3 ڹA !;Ķ8wHaA/MH^gsQmڭ7qP†:xsn)Z @ [\&͗{Z4[:jvAL6~\][V{L*P{b>(ݪ a{ B D f_Z$L%e7KmqN3 1SJ0|DXp>RgǏ E"QPB;8>|LCL~ZǢ'4?1W ZDIS5)քJ!m.ؕ? ?.$M'b*U %] |ӱկgo2η*}2CS^ԯUMG ͢hBbRh .@Yӣ-O$Xi;!@ Z6 Ǟ{~e=sYN=C`Ї]E5"BiB;*aCSp<ǽ'mUs^SQoQW 08jDt}%ۧv:)敫iRqsIq`4f ڰ0b^aE (^b)B #UԤm 00Pl 3}bW=%l(S1a+h( Ɛ7QXQ0+N =Um8.P(C JS9):uQ=!~ j9~nyJ)܁0J}0N[Upa5ridmG s#69"nsi]=װv ᬥj2cz&u-gԳhR}:ڐlsX_ fxf}:4q oW>Y&U M6QgL2Խ(`K+E_]&ԼGM ./>YE1 Z}T8 152Ȭq^]羨`GQ1EodTռbEs{ҧEܣ6c Wx*ǬCՃCls06? G1n5U"cEͶui:ؖÿ| 5l\(pBJNL׶y~3nZ2/,Uz<-lS~8Si?Lc 2f j^v9m9ulw~x])!C]S{c JL L}'#r|=I^+hh 9?-Wcj2~8*o\y#;ICՕ<7&i$gI~`f41]ڣ7Y5{|VcXϗѐEn1uM\"u$}eʛVIDOUs]FM%ĀEX̔ ̖~~~?{}Vq҇Gg9{ԗw>>ycwȝlIFyb0g ~丱erk!|Bmb&zf|M\ Eu*ܬ]"i5wfIm4dn/ n |Pׄ'743+E@ 骞dN;玃ۯ {}ʩOgS^ j`:H//2BrǧOQ73ƏYDaԯOg~ ӟ2gEL@30t$3'_I]L?3PKW|N}>%nD5c^?O6i#м鍧w˸A L<t9:m2K7FGdۯɈkRzPLT^_Ša%}=C//'2.%_N5 |%Ւyz,=4YY,2KkFI3%i(GDʳHjyyV[qx6ycaEfee=CFVgT"У߼s>#lNg>yz=-ljVx%d8kd,/k,X_ _}^V09vԢ=L i/̕@h/ydTӗoNA~ =`thr--|Jz>2! p9sXKz@?>K*6 yS:'`صNdE_!p+Bb_F\PptjAK FCcrsI"Z%*Yd3"l]'$hRd,ŐNvY.83 C8/\KNщ4zB #whϺʕ?BO+] nq[klM`Ɨu"* j4JRʃ1"XCf327 {"7t[66gg_ϒbW/nb6_ںvE,҄Z,r>&%_-F㣟QtkD D#DE \*|[w 0gc`ȓg. pXNtW,>@>U=!9H8,^o',Aa5i:QoA0Bڙ>}MG `VRl+W+iBԢa:ew1;-)z;A*[0[z751LVx~wgn paFl:}oW?=>OCq]U`dBqC2K1a2Lӯe mkz鄩ӣ-O$lŹ$gM;v_~l6V}@hHm?|_ݰ +Lu,Ǹlߒc1N|de$i!|xJ JظnႥ欬difa@Ǘ|5b~?o*NI oMiTЩ3^%D*BVAC[1?ڲ>.*3&s%ǽ>ת\{sn˶m\w3*bTtS/ &|<\<-x 41(L-- GX, 1VLHKK=fw4`p=uk18޴i4mfL2Ad"ө )q  B;:=`v+qx;f?N >D|l h=977{~fK1؇T ~+8t9~KC_z|=2r\Fa;֭qd_ż5C"xr\9y2j8i0T/#~xNK,8p ͫg s[&l#F$Ûr]%;p~!b~rѮY=ڍv,f-(MLwO{HrPD-ZGm药TURQjXK ޮ^EP?t>w_{m˾cpF2N-h/88zzz}tT.';IׯJqMNI>>JԨRU7deg#B$㬘Kq .X7-OTpzhF0seb75itLFI݂UXOnRKfmi:EƱ}?-y6ͧ+aCi6e$<=Щ; 2 jaAdJdsں@}E+zņ~ZHfTE\=,7`Rd4''"+!nsh)vsbGGC-XS3 ȸ~:wxi0QBN(7g}^J!˗PmAfR)lpRB9AAwÜeSwRfIWos詯3g؟OJ%#GN_ÀCy%iX L!i{\L<ةWP>K4%,0?Æ2Y)׭fG̚MxM o^РS7:u!L!JLjX[/;DEFɝXN jU?L,LrZ`q'}dFzQ.9%MP2W{XEUÐm2\DACQ`;D_;(]Na6+F#1 ?\KDx12$SI"voXA= hEjazwt)r%e[D܉3",$XthK;ՙt3prB0M-hTE/DIɐ8q/4\;4+8^t .zM\ѪQmq5 2(ȾZILWdzuj"g/^PAF Ϊbu""~U玃ɻA %~5,~\ Q2,⾡X-ViRG,Ev@8\{NMO>i'{?aJIKnЌfpkջ 8QKaWo 1q۠!}zY )aŠ X\23-τ}Mq6,ciZdj 9+N4Rǖ̻j0.C0TR.N9DV,zwl!wa־c$j4WkWIlhׁBFD#2u7yu({2R`0f"VHo95Z4/<8D ?1OsBlUQ; &Mwun!c4ߙObT)1w3ɏb(uaصX󐘷r5 WiVX,c ?;(@OHFEb2Y5DIk}(M= cZ-M۵>A O݂w 3iBу[F/ЇSJAUj6M-ĨwS{2j5(LPpA:ǙBzE:6Tcָ%F>n Vl{D_G;-Z[[,#b6fUH H\7:}C`t(P#-t Ma Uit?v:׸MGj[/mOcandT]oj,Q ?iWO G&=:=j($+-ݸǶ)<(/4EffjKU?IeD ^KMH0a2%@D;؞^qn8 })n&sO-Usq#x!1S t@pK} ΀з$ٜE:7s=7_(ϻlM0G`|ẇBmR}8ՃVSo]h~E ƯPaq)p Zk65ǓV`0WF~N%lh?j`4G -;nRkAQ}JfYFߡ34R)`=y66h%8iWa#B):aT]uѡ֞&26{j@t+qR|*Wh6~b %!һcSLMIwDڡ&kY 2F%g.t& aCMj/3*աfpQ@U /Vc O2cH5K* #۔6[X|ݶivTNZc$jfb)b$* Ee[{}"Df\ >E"@Q9p^aep?ݏ:8W Z&Uc2\ Wi4:ǑǐTi0W׹E0G-]NN|>mҖJ''O[fL҇\`42d<cϞ?i:sGzJ-;4ybpWd!jt8zgg8G&$=:VB~pݾ(- %+~lIa0ז:=878 (HsZMXa߆@!XM)E-I4n >CD4FrRm[,{oއ.b]b<60"D|8Bҕ0y]j^`QsXovN ;h}!81̵5To9%hF J{/ 2rwgu1Wa4rHgageY;Qj+xyѹߠ&[V.I@}<)^S\p4ŭfЬf>Bs?C'?v?ڿ|>h0$hF{)\QCi/ Q]YGA΄ "j7rlUw@Gj+EanjPHCrЗ^{)!{yy8oL vF(5G#_=yVyv/ֹe}RA[K_i?r8@w@>+̕[ӎ2S0[N zgOުI1 U]:5[Q:*LlK[&Щɒ!-m?~| {%3Kc_.ZU_Wn'iE?zoqOJ-T\&E¹K'YyQiX~P4SL{^v'];An\Lo٦8c?Ci$"sѴ/CjRlǻψfnQ})?L?>5(;),"= ?B͈Ǥ~{t$K3'_I Rb6~g~N:Mm i`Nu>z_F"9\3-<>DVcguk)%۾yCmvy4%dKOԱE1U{jC46!3υ=t0b\Q^uĎRp^>l8rsZU$l ja)0?.Et-(Eǽ0y_`-fs-h, a}S&!wk딂 H9|Zlw&2}6;G^ȗoM]e0ԗG/ﺤSw ?\nr8hŨ@0Pb,5p5-oB>/3=BfzL8Js-2s>!o~m]G^(|H^-(/ύ''$~Gyw)@{ nQݼ3?3ފ^|Q<"LyL\ P!r%"NK/xa޵ kN}e Oѩ S Yl'3V:x+W`@j:^9Dk؇Z`ޚ aBE_*=`D:{X`wˢ*ΔF._^kPQA Ɗ)s:LlɈ} T׋ڹǾ'5M;h`0]0 Ԣ|ydPc 0yZ?n@1m=Щ 0 "y, dFZ㦋Ix 0#Va+x] 6Ő>?L3a ,ݸW&dVIC&i~Vo} |!99˼?4:|r4g'[sVX9xfeg9Ü0tf3҅-ns2>X-s? '\\7 hXۤ=G-`q'1.477 rJӭbө'/R<sFƵ]0\*=WCK0UfXfܨ##{JH*Xvrd#n?WUzd. NJЍ| yJG^e_9?K0ZcY ̶K*]0[@nDz\)tf Fb)p^s%h(4xt3'7a8bE CL|h ;9K2g V!mRQB% DI rA]d+*JegbA\wۼ< oKA(*T/c(T=BQsEGfn >z w5ah4`]GFulX=NOz_N5鍷]†/]GPc`4cO9KqT(>>Яu fsM0?#hVѠvui6UÇ%لL䌴Bz#1G!vx06d'"cxDi><763!J 2-y14@.0qN&1<\FUi6lW7#тĴ'F/Hz]n$*/dқ aKz|/ro֌lٻĥ+cbD"~:J`( >gTGl^Ֆ٫ߙ&4=kZ Ge>qef%W!CG*{\\G} [1SrBnq\k5 Su^ .bcŷ.l[շBIU5'2a3װR< E3D>DinWm >KsHH%!J#1w';gtɣa xF}u,Feoh#F>8OZtZFkzN%lufԽjx_̘6>~UDnQKd} &dfa+Z،s>rڹ#S=0t=hEZUZ5"ÜB eo{HxkjHztIL"Jkk.ȳU(,N:=)o]fT?{&;Cl˥B{a=IK6ʨ4>>i|| h.CcGqQ18Hzm'] ө䁳xyK +huhYM{65c4{| sz{|gK?;֧mbp5nxzR4l @IDATw1.Ŭ])*U `A{mzQ^`~̫z(EZl7][ ދG ~h4W7qfTCeniR:woN,a)X#_`B}4N͙ S(K*5^a$ʳX1:ш+'K-tnHٶݡΎtI Ӟ-=o{4YvH@a93\T  `͊WS`uΞ Kpt8)2qƉT{k"(U( z@A=')te co9w\Kjɼ~/GPP*2ƾ(h(~nyto6"*W 5|RR$^@k81}D;xNFo/~Eˑl.Ҏؓb#|")<-X}GʄA NM .YHj$V*"fŖ؂aڴqeƨDĠu4asHŐ U}@oѰKGeA%H}Џ٫[i1[v##3##!%=D_Jބ4ٞZFhTY0_:텂*aA΀U tЏ2S*[}zRuXojk,;Bnd.IE+r9rLJx1@DzU+4 ,W (c SE2"m-xNPV'ܘGӪE}eFҴYݿ5x:yp 0zy=]2A nτ/$#J\-#EY~DzXv ,$JGU\|7#`ҽmcѧS YY`o$SؐM)0_6hʴpX~># 1" W9/_ο!A}"!%>/}D~iL(L[îY<~N$8cl:q$n=4 XOƄ?i8-7̹LjƼ*[4gҦ*թ޻ QjKFՊWf&."un!̱aj.kg4cT3^H->!Z/_/a|r+Yo{EzY$3fDh66; &ݡuv,Yկ%PQ ;b ( ڋ#hL, #1!RkHb%p^2w~ը2}0+= .Jg=<" 4 h61gҷCg` >a&MnE(vwԝ?-+WlٱӅBT;`|2̵aQ||>7{pOɕTD7g[wĞl_ %hpo^۴eѪ689{VN_J} s89i}FK| 09C?11J)'Q/-]ڶĝUa1XkW⒤=K 0¹b ls׵`c(c&}p^&~xN# V .Qko6sb@V6 ԂPQ|c1w""UV{Zgbc ҃7W߀`5Q2pjfjm5@8L<trQ::iw["V(4.*&^ S󞣀:,w t@wUڄQJv40RQ@K.mR=Ţu!+; Mm] Std$;"ݹe4c_lBGm6PSB?ǣ/Z-S|pWo<^-""œnXpØ]Nc`輸 |?6 qP~p-dkܩ03 ii){m|z^ yޫ۲r|M{0eߩ\H҄VA$ڵI`hF}ǞmfTُks%dxAHI'$܎hἥ.uֶ]fά~Q4 0EtmU!m{>ޅiGa;e )(l+M]a*#E2@x "9<6Fc]~ u&Odĵp?6Өl 䈦uшHPG>×r-2$( Agi^&ûj(u$|DU͟W BsU:YȜ^`/vZpAN%nsQW'?9PP@VVV~}[mAp0Y^1d Zs;1ɇ\3?áght;Ic_ |Z"] K]Me>&.\w n^nG C҇cK#Gѝ`L`rM{jP^2V&]~x멻eY3f/ct&,Vs? apuBX"!ƀ){d+@ =~3".ψ߅5p$Ok)l{Y As[ %.]uR}cOWvLaoP.Z0~x|vI)':-o%P𓙕mJHI3aL u0dge^Ũp)\PРwIPPfT4KW(#`ڕQm:hМ`TCa*TČz,3"#+ĬLG&]i٢ii)B%m (@ y?yO`l UWGYFܯ9F(6=" )a6wH?Cº@-2_I (?4 X,Eࠒ]:@3w&ay5'ދ +!<& ]&ENo)ۊ]w le.Cf!9q^Ҥm55hR!>$<Ӕ}Nrb"o(*VwQ[|⾄쬬djĊSMÆ2zfFRZJSG;#\# d$i&̨8 #mpWcd_>7iѨV-BJW(X6_7^$rq1;J{EڠAw =X>;KZwNqdw ۘ#h"IjIJ~)̲ue#1 P!GäQ}EhP:j >q7'Tm~q:x|51KmlW4zR CAPZ !qS$Y|\3?w}2 <ȜPwn-qΝhC %lk-I.%(3c؃#TI#Z~JؒF(|*Ag{Z/<:U=;?NY U&]e`}:8Hz3@y&O͇j)0A2`XhA܉Hkg.&†Byi/Z1 & .ā+Ai*`bz=jY1d0Z\Y,w74J H2 C 5߳63U@Ȉ`0S.h(yYc6HJJQ4b>PD άO? ĉ~_5>tp+0_M{p Mn7px6_8E'F0hԻ .#?Oч W6Fp34) a(mHOhI80nCp,Þ6]EFiX+†Qڢ~u82/(#;hD*,c@ǀwcO.%ES:(h(a}AF%w8r5~_G0(C1|΃c(T^҈N0:xq9 KE'QQ?@R:87YΪ3kk)lqHe%Ծ_QбP9ٮXfe6ѷCSD 1n-{zz^׼:BEIwCx1_0}gI1>@Fel)짧Ia}"@&C8QKQYJP̬O'Us1pcɬ5Eb&̣'kUzX/U Zmjv|QD/Nǐ4gS.-}z# }ߔcŷ_=160@3&&,#|ڰrX2hZZ*?Q7vH].s(Gr>y|nfow4V{ -8(>x|@& a)4[nХݰ6+ps_VnNMDń=EYpv,\K {Z†B2d@Be,C=>eY-eaė‰l ?qxpXbtB (ZPEm2"=Q Y\\];7\k48ЖsRPk9t!Cah2L+~I=hZGa7]XkI̅kbx}M5-ڰGv2~`]*hُ"Vk2o,d%lEiD@V2&#qlL\@%`'D48܇ >$pogLn9.ch\ 1AC>pѷj\NipNM`7̊|uSzx _WL_9;Xߵ<"#sOcI藡C1a`G!R9i+Y5Dv4RPj¦S<4L"g_!Kҁbr8`yV fZL~]]V:| !%d[69$zm"~ZEjXCxMEgVВl{>R&7ARߩZU*@#XJ_#R2$&КS'b6"# mLp\h0y d.u i1!)͔g4ޚ8mF<~\d"#S&3Bu(2^=Q'Kge'L@PIXDԬR1t]lwֱE3_;Q 2_`3:DK;̘s=1mFogMH[t,:x5Bօ!qLbO Tw(m\ J>[G?o{C?QKJIjw{)SEkJCU\i(Y2@imJ*ɶ<~NFL9S[<-(acG qOjĊ!.qEΙ& ]Fbr?ow.$c>صe?7? [՛իn"[6n::x?0),cLr䄯ލ\. > 4zŽMј3TujW1ui@thV7f(:1``>fHGߙ˯?r>s0A{ڧHnYOy2g`^vMkKS.p?dp$i3Hs4#ơJ2VFj_vB%?>ΎR{k h@A^RIGg %) kT;:%.?좉=:{ ݮImYERhsI:a³Axz; 8|Ǽo'H8 ";oٖ*g/&+o4?A(p84x@i'L9 I\?+;8+mFa--sk -v$$_gg?P~mt@ ]Se@P94"rLI0 L4GѤxbokO>'ԶXK*@rX(4! @ iVn|5)Sa >Hw^d.m Oҩ' 㯾MP"1 WF yiPn7m?x S" _IL:}ڠ;^fV:+wgGC`)FBV`/_g/{oD|I<:SFZL5oo yOtpm&A{ʖ1P0DseJDI *I0鏺X&0rI'z_?K'0m"XK* `Zм>@H2 K(( !M=7=s GtR99/ߚ(͢ػV}.h87*Ec1oSӇOt[24#CF*Uxj[Ӵ4xCBCxph'7̱[I8iD <hV:maRECJв /KV k3Uʋ0?-!ͅƖGE܉x5ʶ1J22өWd@ZmR+xF}G;6H\)Q9 _O`%!9Uز_\MLѧ>"&Jq62՟;2V2Պ0L.\RI}+QӐ$GC7*[c:' >Dl1 :}cL2i5k ACy }l޽LG sgoO_h}ȡ~FSBnm pah8{詤>@Gl%$;"gJ I̗(\hxqzV6EoZRI˶hdLKж K^8 TLlh|Xi7YAŇ| G"ǹ^6γuWGU5`t::ZNdfXwݟ7qa.lw bsb!#}W]G(}ٿ3`'Ǖ{"O[>wIDx'M}/|_ :jq\7r]@A Df [/=rSSkLʼn<-a]l#&c[q=wɨ-QjE1aDOQLx5YF-._'He^+X70kmJ`R V>)M_fB0?ݬq;M8_|Z 6f5 S>ZqN5ȸ.D&C+pBkEit֌-2rO,Qpbc' "̨Hyn] F#CƵĜ%DXLea~JbHYIg\g CŽCՂl@1_GWI屑H|J0%ţHd ,v~G;jU#bqTHI Р.;>R0s$(bGȾL'Ic0,l?(;DC+b.x"(=ۈA`k {3f!3 QA/x`/R2 -R7dqú &}"pL$D'lP0?ib̐}kҍ12+l*@#=l73&e՟.FGaڵq9A4΢eb |qnmO Ev$OW1QP~[Z$ӣb53Smc'uA{(5]LfӲERlSH¡ i@So[]5$Ldt1>4oJW<';Ƅ$[3NhHӸpn{I3Y' @-I22brZ%7_{h(ֈ:U]%x(P@XfNͱ^ɑ΋}"dՠR}+΂ d"&bJCeCP0v91u2 NSдPa"&R@v˾rAC}}n;ѤvYPiZ7\ kwl 훊Y0ksE155u#4(6bS j=!,O?pdfea>ԜaSZlRj:B 1:HtV(a?'6{G-HKcm\U}vPj0Y&3#s ءd!xS$:z9'cjM(xR(&з H_+bKOY, 5wxKL=s8wfx;8Z&H30?VP4ɶ*St:Q.Sw DjUe9dEA 4=Y F6;7>wM&@!h+ <~N"YNre&!97E ܴN5mƹf jVIkAm r ͻ2A+?|߾} RVW&eI\gCJZ7c1 X>4o{w5)EԂ4,߷?ah6tk޲bokOy-_KJEnqMj$B-"* O(hƣ.hƑyà gW\JP== jZŀ#jpG\_P` A0mQR€C+j-ՠ`A)uP+F^:zI.I;|kʶ]†0չ(m߽50@&AуbC[ |lgqTeu섯r@ڽg%TV׫^\$]yh6,g X\4ϲ&u5Ha 2iGT>7yS !\A )`FoK b:ŤQ}--}x.(Dh.(h0`op|+OV ߛVae'u^Js-򴹠Muok Q;}&ަԜfkҪ~LmQ}8@F4H "v/KnyB18/?^u?Bp&dt: =gk8p1OKeiXpe7QQ} 24i >dd]Zf}fHnTK~Z B߿o{8Jgb{3:χ X:+3supg_о[_‡78nRCU)0:~K7~TM-imq&P; @KfsQy_|ma9B' V+6Z:@++s5<diZ'o`Fg p1 >F4*\OUg5W"ŕ:żiqKAC Snl^u!1Вx ,O{сFt|`rJkͨ^ED.j7N}"Bɤm9g:b6҇X̷V ٓ30Lkw]e2KMwEIƞ-{{/>2D ?޻~D^ E99Mh"~3i>zQ!uM/d?0AůC6 #۶IWljU[ex_KZ [ڂgn`i Ͼ:fS7cw@8xBU]\$} k}+!#;YkWǙoOB8P-tnY_PNq*&-T<@ɈX2 xamyh6.eL˾>#&P@ow3%襁0+q f8td Hv)eo cw*)lq6c"{ A̵a6jJG^MlVB RkE KaSel>w͌[̭#R} TH,>pAHֻۣXN}w4N}RbGt "J%w:m&a4Wx``: 1#ԯQIm0m&NT%X,&na4z[c&)d:n'iE0E8vr'zWI3j6{j#irpSwטe2Sh־5B^8&X_H擹uÓ|A#1`)h-EIDeEWf1 g2Mj+7{Alk+k)hv4=k[t ?DysmUiZ_ϋzM3}4ܶz9EF /OaC ^l@o8:YroѶ#?"M |whW-3FFFk \6>"007L՛̧9ƹy}6X.<6ǁ^^MQ$ڌ(h}-6M>t ByF;fZadypXUsnNpfwh@'a%1$B+4!X3u-UQa4"$CIPnh;L)SEX4Ђ#vJ"ܳ^ջ|ҩ׀dט6sÓVpԧCSQ0/8Mݑ6N1oVyg5M+7Wޘۯsd"Ǟ ~\Qڱofaҵ;qHnF) L1wC Zni([Ʋg1I A!&blkIiұ6E9h:hN(W&D /9[A\r#g/?b{S-1WծI-6屈_FU8t0B֜^0"0':Ҿū~3?ߢ& (p)(O~1A Z=iS^2<hNpJA}ӺwuK7hҸ-2m?x S" _I@w4H%.y:,,8#zby~_޴+g_ȯTѩH$*H_;ƌ-ψ/HөY91pV9s`:e0t{ v5FNň33o(̤!jP@sTUiT !J론mZb[>Pq y,tz5eapi4Z#f޷]GN_}cgM's!H6N"Q;JG`[{}.ԾX rhi](jc!Ӯv]y A%Jzcw2肆/ mxr\9~~}~ Cq9Z=?j$ O>:>rWIn~N[5 /~f Cdr=B> kR&JǥkQc&94Ij>nɬE2eCBXl[);(_173J<=_QCy&[B  C>93FxAj˜Af S8 b/4R`[ =vN`A^X,+_'F(hk 1c(T=yEV ^,Yع#-\ A:r@}Fj!Is<#>BוMWںd.2oja͂('w7PlT\^j4\6o4V+ KY{hvc;DžąՈ~_?Bb^w6lu83ԡ4{q|s 5)`hZ,gdxW3LE{O?ȫB\>_ќ>~Ю`IbX)ճԬ<94[!A7b^-hS{1ɺ]N0 ,˶ۿdR;r!>ַOF~m[xb:8 3Y-^7TT-UVV>԰Ă;†sZiOjӏy7%}w5]bرoRH=}4tlѦS 3xߗ4l+R|ܫGCh |=/6?~ixd(px#:N9I5}7 Lk^{ z9ݰ*pL1'[S+>'lZBZAC[>#0UJZL4oA`uM2jBD w),l[bmC=1@歔Bs"P Ӳ-үh @M7F7+dh)C_Ds$=09c{ThJSItMEY~Gդ3'B15 GThH'"oWO"{.gG&Tlw Db{b-H<2`xhp %6Ed}4 `"j5yhaS&D59{柍{m A*Ə ʲ v|onM˰)V Lj{6!?;>sF+ꤠ!8)3E&:qydS<6f4<00$p<|NM447۷B2z|0:D*ۆGqN:휀EBkr0!;$辅kZ|$y~;#Dqú$dON˟D>JɄRl4hE9{ 5HPyͥB^P蝃2u d|fX e3|ǀ:'?E,X|*Oa%0CC2ͱ"v܆x|StAH .l{T!TsJL򿿭Ƌ7 e{ۆbYG,{u8x/M@K`c&c=紴yVՊ C@n t󙈹7ճ 78W+m/b׉76xd"ǟPl?ky]⒃zh9i6_`r];" xta$w #'Ȅ8(%-=y`Z%j.@4{Lf \* FPF`rRzЯSv-!ҏo/~^%[6@uWk`j3ocv&,jFJ9wvj!ѥU#>9Ԑ0h@ǖ$ -S}Q!=re`]о(`0 FBEGhev"8c $Rg&_=iι-{ wb>iVi5*W GÁW]S]8f&M6~E-FcAe 'u31Q3 9O;,[_ Çy%گuJA-1Sy7 jEȸest"}h;&( MRxOwA?D3OX Cڡ y;n״*`1Y({1BM$TPy@_/ÀPP&@qFVS_=X#(ėņP;_/R)(m۟kw-̟öYդN7ejJ e]xg(.E +!GƒL\_@@2A\n;i-uWdXsS4߳uqj4e\w Q|,Pǜc==M8%M5 &,:m`i.|>M %rCMGzD3c^ xWRC0s0wͮʖ)a#JE^sQeHINW*8xaeKG3m~Z .4&{|+\@ e=wDHXy y ”j.lA;BK` >CC'˅u"z!{FxPI7iupP9C-[&uIq# _@څ _oFiѩ;]x_p:w'*MgAXG/m0  =F)4C^^ "Ra+6m8xa@*z#j޶q|UU?,#Eb5mZM{XS$"sQ_bPV͝L.Gx_Wn&lC$37e,% }G"fAMO>/5ǪH{ISˏ͜>muGV+N Zuo^ q@7֨e`t=nTI6 Qj@zAq6o݁͞03-'E3eI9K6A(acäPú ^QaRC А`a ZxeqX4ݢ >秖|xVM-Z2M&ɨdpv?wA.95[ scGC+kO`hT"O|1`&y;YCt}2OFC&Z01?60`ޖ%jR $^tE5`.W8`Ve݅Sr8y4n# f(Ln|)8%0k}?07h$P(gDb$E tp51VKAC;yh^J|(ZFx:蝂69B c4_ LAj| S H !wiVacޢM@SeFuB&u a4Pm;]lH:m~V Zws>l4i m:c`ts@ ZOȝV SpUs*9 +w k50_}8#g P ̭s4>8~cuEdK7%}Vv rnS!N("rwJڿ~נU+ @]<0ccbI Im9Jj-܇ktF5.U=§Åe8gʝ!i˥M~O/@AҸmC"_^PFyqgj[-˱Pϸ#7rQ,MJCx{LXj}CZ4 )G2_\2Vk>kimg@ HAқꏭ+rMƜFJL\Ym.'`SK][2ذ'8+ X{#d~7 hRȄe`BVLK&~+ T@uA ?C;+$3 a1_NeY2 Π@ C"-'=1ww]>pR1p><+( -U2<  Z?hlO"ThiWGk܌#mՠ c7801 PT(c F`|ip^z6`hfqm0 R7"MFⵙ; rͧeW|Y Z:Hℂ?5WRƼ *à/[ghwj$wƞ@³R!hN7(S65pfLh]ʹK3&SSF|kJΞ8'zí4ztݷ6Z4*,ЮP6٫ד.:%)-{9xlg*љ00IU{,.XumS:p)ԭ s̛:@=hJ+xXX{hD-hZA܉RP`Xet>s1A+Pԙ}\9)#= mMWO_FԡR"ewɴфh%{ u䆎y7JT4-g%t8[A+ThyDO[\Y>;,A.0 hb/ VYHGIԴT,kk8 B՛F"JڇWԲQ+ ҏ\QͪZ#LzԨѬkn2Ze;_U gHpY(Ep #*9R@u ~wϤ=0w/?z_A hU%QXM99Q-X*aC lNތE ~g _;j0r/v@+xׂ2k6 rٌgLaAФNUBbLj>(6]U&6w n00nKB3V3$]qYk\jwE}䗉;%N^3@FA@aX*xl}-]>׽3ٜ) P;}jޥ#)5MrVT0~;o oA,Z l4 h^H?:z30k9.hCZ$2a=1\}ߡ3;bS:8I\FPOdTQ[1چnFdO[v'p?(T]aMS$wn1m܏(Z_^Lȁ}=F+љ3$|盿$>T.<{h/|GFk@6 5rsqT 謑U! B曛4T>TiVD1;&zWiaB' Kt{0=͸j.t S0ߍGN-PEڠ2sisYͫ.U=)rzǜӗoMٖԷ򛳦OͳX7Np0 *57~zrpFJBA:hO)j4e\ql$.%5$j> I4*66sG~'=z|D@VVV~}[mA hӣ?rp`gQEr]ҪZh+Tu3/~ںYæ?<-mpU-PޞLʍ@Ehnڤ#:QAjH- RoY'=AIj6_R\v7ge^TT/nմ~ G锛FFsg ;;G20i~ 4S`4q+ZP֞6KZsk 1,}JG"&kPk=Qp|@$[\@8~8Wq /aŚ {NJd`tk> aA54eN|ChAZv_pXҤm55hR!>$<_Ӕ}Nrb"ߑxV ;b(\)67LBvVVz^y2zyb)&a_y=3#)-%#*(hA*}9Ǒsypc@oxM @=\&O+`NѸU lG9muAsஔZȵdٱG,HP6 %@"DVPؒ) sٕE:ՆZ ]v8g;bβRNwnn^ɹC+lk t1: 9g]:\M}UukKM@Z…v-  ksV1RkA@1jwJFAi5lN@-FшTVc.9)gr>NOz(t(P ^>Ǯ E&up.QB;8>|O:oeӧ{MTjKNvN͜V/z a OgE.' 6Z-k%lp!@ Z:ń+fA1{ZA1 <>ǘԻ :}XEOTc "PP QUs'L"wMzIU,e|.N܁{5op*\c@ǀ30 S+aC߬yg] @>.8O.r6k.,\d(pX 1#YPcHƀR(p|atnp'}(o8XnP(C HdK7Eߘ]◠ƑG톚7;G&y1MZ2ޅ+I2- X[{GW nGy7}6:VA}eZUlFP+}_y ?h+m<7BNtE_<)ljYa&NʼnPǏ2,,ƼCU%H(aXQmm~Z{Wef{?~`g%*"/\ s̯|D氟e!0S.wSśĞçEJZ*>{Ѯi"׭*`~]thVWݾj~Vg†ZT1ZB"a@;Y)&L+UNEb{ΑN(g{ת}ڳԵ^Ng[}U㧘uO 6[ -^xP&#GzZ2n2_V UKj:ϵnǀ<|vŞ}E ftU'?`5 T*A[k[V\† ?iAP U eT}0 "M 锟ΘMO&cmA>NnB+Š%y`5-l`{wJY&uzQB=3|mS'NuhVxhP/!9U_Mĝ!rzh6Ocs:;Y4Oᜯγo07D%hКի.?_ *vŝL"طC3nΒMF0q%!EluHtjQO=sQL9;?,jTzow}gG4eV)FoڎQ:7Eԫ^ɳ)UuǨBE&lDF~x6dLRg ~Shp\h0QábIی Ii ?,8oM6#jǏkYd4+2X>YW x*Y JIX`ԬR1t:*[:ulQN@2_p~V_v sǁ~rB'xCԨiVu+[Z5)l?(Z6)/+Jb ]/l$fg(!\th^W>u^!* 8QObrva7BԩVISˁ][K $BB,EĈ>6KawVHLM-U**Hs4+[:H VQ3DUy4hNH m#cu1ѿKK:6(68xb=7]I:a|LJXj.L1'[SOVTȻƀ9羂0UҩCzĪKѶZǀ22{cU}^ջNd> 6ozɽMј0bҺw8CvoH"C/oS6:v;MGN_9QhVy~+)[f@f?s\pаVUeiRaa|3",4D,ТA +61 JBآ| 2g0 [44i}sr23}N)<`?]UjǨUJ2 - @ӟ,5tZy(W&D4'fun)!cQ}ERsV @ Z#_KYmC]|†f Je};qI2!JU$ԑ\[ً =/~OQ {Ӈ}S7ЇxbSBC)ר>E˝;]r8$_JX ذ%?W^|9zF浄T^I F w9)❯|gwk- m L`ʋ1*!)ՔuU.?R=YӧhIbV|dy)ݴn5椑0#AU:$Pѱ8>Q`1 |AfA:/l;(ͥ(t2 r e/SFqWe5jp;s1gL"G.=3\"'Ά_}9 G1]! h nK0ʴ'L4|N$Ju[ x0T +Dﻳ!nRqK:AG:_鵠{s&yNuKjɄx3+,I3;R%Eyh6힛*%_GMlX+wӋxqv!Z(Ɩ4j iWS"(JG߀R%K@^9}Qѿs }Dj:’ج}vqU7Fw KBR+My uXawj㵂\OF[5~uMK4\:Q[{c2-i'6{Ӈ="ϊ@Ā_fᑓF6dh7]иEhB 2Oi63?C[Mq]gp:1iC>c8{X#N_ISMsӫV(+Kgop8K:2rUk_ɪPoHfuz2v!i9-sQq?s(M²h== t~aWRžgI (gm[!-m#,at!"ѤJl-͐OW((GSt"g{|?.VlհEZhSѨ'4%J|E)j4|e0v+ KY{hvdKáGo9K躘C" ů/765b|6jQ\s 4}6tǀDԛ?G[ؠ_!Cv}xv|Žܥsr NX@;fF l|o<Wl wiG<6Jd{;`oLؾ~U,ڪ lzq%Щ.x5WMji^ޥIs.A0#0B?\ Чⶠ#LHN[p/L)%t;r=)X7G^by;~tunEVIf{B+QӐ2GR嶞8KkG4,.0Ql#U<v>8mԬ. =q Yq|t]cqasbXN}#O3Elӯ70 i!@`, ]|u|͋tc |><1((4{".jS|l 3@D% Q4Vk>k.-uMM9L}O 4<9||hf-iN׭uCC iBqC捾L܌i~Mks]Ѩw Y „ɛ'[f0昗 J9%2VE҇!"">xh4[=[[T  737|`P.4q=zh4v=JRƛ^t\u!{Dr՟{PWqX-֔(dfLKKNs;`lmbqEr2l}ri@7 )Iv24bM4:kԉ<&-.)\ 5!̣(3JmWz.ǜcR6?J;B~m]չhmGdO_EȘoc਀)0` WGԾ1۵} va"[4\^聘?0u&KD|O+D܁{2҄7tJA7fsw?Χ .p $]pX=bET@PB  $zBHOv&wlv7mw9MfvΝ;瞹{Z%dj6nAOsNBRx2>Ǭs/$`e$H&l؉ɽm+ڴ7%\5z2Y2tx}jrrEJ$hn.<%Z'[K/_D,'6U|oA\TD"Fl':4+1q'3W0.-+ZXn7bמO3Pdb˛2RS<_1mN-_d8)aIݼUXוƹrǹWMt͊2Ѡ5`~]%x%ɸc9҄;1Z:Ug~c{~͎ՄoGB1R&r}2:qD{ct}&>DZQpz/D_;~*ģ9Nd49Mma9}YԴwSeV+&L33a܀K:~۪eSR'J2p֘8w Nc5m{C{:+cǂġ&?oo3Hf#-̡@/0𓜖aY/뗾)l{j3(dh4γ:N %;Ŭi;Q`&b`F n춋8JtcqE MYNқ"ՈzW `3sQ; @>tB '.˕AJ!q`"1tM¦XL֊w(O[4SMNڷ%µ&Z7m!\CM1b@ *pƼ`4 ׆cV]eC&0D?&?zHy&]v)}{T/'!,@ٻ̇ 3ܼx|KAU6F-c$3L@N8E)@d& P6CM|~r$#xu1ص7BCO -@J@ ]:`RX:~Lȉ8Ҫh״.1i Yf5هGC{YhiLfQT3cĭyO[ev*}yDO釾@8ӿԒ>$S *^l 7(#z#l#P.#Yʭ"p$/\();Z( Q [i79u-+559Z8Iv6i۷R`o/OvK@VvVzzjjbW1*&284XjBJauWGҩ55n7.] ۆzMZԬ۴isLɀjG3/BL3?'%&Op4~Cn@ XLe$'%$ĝ>yxo) 2X)iHrE&|4&|д<'飪/}>Pc{$ (&{[],@{DMX9sA \JH"3$.3r➫:59@ 'bO9؃Pŏ$ڂ Ww7_~{cuX1j=4@:{FH!flw NA-UjG{j3.++[~ch-hkk 0E1 5vtC$3CE'vZD݃:)aHfdb=;^Zk.ygdA|/%ptkl?Q؎5ås?ԷzT3[/gGg; yJ rs2`rJXk O]t9ڣ4kUHdÑ&SNӂN֠ajr%ؙ"9uaM Kiz-f 7L T]NLSs++Q!IOMJc=GLb\k@IDATYCHiAbNK h+|BBU`u\P)Gc/ A+ǗCƜ[aCa@at󨠋"|h&G2xd8cNZ0p1iɏ9ǜA>xs<^'}ߜrZpeqi.IF"6 ɔ>t !Ʃ䦀Chn)4}¼{ qۼQ%i}b 2FjI~Jy~x][FWϋgGG?Q)O&Z!C(tALw3Ib#bѺ]f2RLԷc3)U,KӪ2ˢxhdinB2*#_:n$c08 ='[2_. ZC璀eVvK2Uw܀>aW1 CxLp9}<6W XA'ԹV ( Z2{ET -~9mZ4wL6MrG8 ZN tǰ\2YSp"VӬfi\yOߨmsAT@fUW׊:ja3[B]FĂ!J2EWcqumU|$Y#,<=nO[WA&#}(LA5uŰX f{2q܈ ~8RPJPLdr G,š[Ϝ H@2I_ćG/CS(1g6HC }4PAǤoE#L:mi)"Zj*8IRЬ~ذ+ZT#[Z#\9c9ioL4GoXTlߑӢY`e!0 T,RGXDᴘ{Rb:ce, QG9 KG%p?i 1z{-Z%zU茶M J`֖!s#Nz<(:?j`7iIC) 5&>)3ֻΌJ}ab4ƂS͗@AC!>Ѩj+jJ0AaEJt^쮾J6YۏAdxq0llf}!_ HrʘGTW;zV6ɝ '{2\eԮ+FAn#7!7bܸWw2cV6f (]S2JP2MU4kU9}=7EFIVUk†_-+ ?0$σĸ"̾MN:˾G|(81ϝ>Q^W^R 4)!5Ur\%ҼB7g0J΅KA02XUA:"FczpрPrmKNF 𝡆BEUq|夙&f@$NFt^f Ԏ#1U᥉#,FX36#iq@|q|B7M8/rh!>5ENjDʭz5Ři-^E{/[{v0 3r;>^SPS .i7UFgy:֖S9đ‰vo ?2/y}Zt0b~S;n a#k>I5Pq]+V(;\dܡMцTv;*xd-Y *}úi䣉sY`F†<שCFPS\V0J,myk zIcnғe;N#mTX.>k>f>kcqq,h)isݪ8dI,Oǣ/vl4B F93,`Y[aCi5cW7dJYTzjgNxQ 57ri~_pN$,)\d?hp[3&o9a: Ā=#V`hJ`%&[ߙWI$cx]7sf+͡y MٳСgKM-wac=Aa  s-3U1"HupC 1Z))%h2͚7y25UQ*hct0@3x|^ˉIdv0aN7>xh%Õ00q#kԹgIy?g X X`MBgȀ|GNIK w<^ߋώ{WBh>,\S !GN!P N1U~8W5¸Y 砶Mֆ4x#19ouө.lWCAN1PbR@d\G^yYOrW 0;+5K0] RG27UWf8] VWcs^gf`\ǹ~)垝8bvnY&:,14GtUiËWցNN}r)ZU9]t 1P k/ʽzٛdx[YxK+&4y![,lrُ/O٩y=G>eA'/HZ-)6-+w+Ӂ;"GuMP4q] N e̷;uaUa;E}˙C:b,b[}d0NjյɱdOsl4?r*́azlL9{^1d /L@N*39vW``鼡=k C:mӤg=;vԘ)ou3*+xUvLziukT-QtFދg1'(8t"N:cTOu{&S͈ Z",IɩbޢbE sCtܝ4oL, ٰ"D%h!uWU FjRD {v|>\nm,ʚCa޽e62Pa)y999̼l9 JjV4aTO+91m _ù s bÆ%XF+5Z7{K Ljǎ};[4 Xa#Cq4j09DN?+CYƧH=E+$ZsG>|g/V* )3L҅["Dr ʼqo7cMA50<2o Fo# |ޭVh$gl@! 0 3Uk="S쇘S!]=1rj. 3e[2Bbp~+\ PѯSw}PI N\SW6*?P=4L~F`va\A_) D .2x[FTf X0kUنy?ͦ>^F<-IcV`)f+F"q$&#ɚway=6."1 횆ldҲ3}ڿSsb_TYY˷ jS 2Iۏ+#m>硞H 7_Nr6jv:!#EE44,ɩbTvЉX\H5Dp4%hK6GĈXiB%'J}}D3CdȐ+i lD tW\ONpsRf֮!R"ش'F2=6Yi/hZY ѫ|dvmEA!xRJBˀ"Cxġ94CD*4;nO-a&x1X!"w*Q 5( yaCG]kB<{G bx6vУ@=}fIӣuIJW"855Y 4]zsJ߹̀wb.b}dѱY=q' ʜ0i﷐b{tP9ŭ̥4۷R)6Egٺ>b)l|M9t+-):Xѷ#LOx^[>>w ?^I-ntAkRGL͉YA"V/ CbʸRƀ F2Vѱy]u&=[!D(}Rd{ RRcg/zC`5qT/IayN}odxp0!;ًCЗv0d#6Y y+6VlqRLV_, A5)>#3[/ޢ~jaa·Sf/ ~>Hw>ɸB3  QDW0#yyH6SC,th"smtpVmu{RkDƕPZe0Lj:7EZZCpsZw:.:7pu($w7wBD&p,: {]7s0t뚝4S RHa[>tCAX!R|d 1`SsۋE^/'^G@ n(A? LC ;g(2u_l+*.WCx[sϾrJܫd?i_ ?F=ܱN#RLgѭ=4Jzpj†7ң˕J;4W0ĞS@2gWag.n( vnQ?-83 MldJcPd!_1\e5,{/F.~^.!p ԙX, )mꬅx6;^Ъx[j[Ș}`&*W!T W@EpG5=W؍tUڨ ju@C %qjdwA Rd4EML3Z6AWj@VvJczM5%&!եK؅czvm)}VZPfiߝ}ۋXx NɭۤFZJM:W8qH+.4h BLݱF>`B HquTI]k^"4X;V)PĄu@ GS"MMfͫŁUń<n\Y?LڻL1?7kS~|Y.:QV_j>r.> PTyfT&c22or C5W;iB`vi.㣥fNo{XtXб'-5!d^-ʵ0 5%=/ +&8'j+̝rܦ<$PϥegL?MW9aOi axj*T4ys:f7|?hfCY+=״SKC74,6FS3 Y[h9sìP~S@eA8lRKx䊚= 0+֩C{7H߇ CQx BP@M}p3U8YS@Q+w4ɹ i+cjr2w'ҰSTP>0 3rO\$r@K2k>+{a>Ld~&`g[ F~e' $+M_\wfRe+NX]֣ ëܥ3J(i(p@b:0$mF:e`A|Vȸ,@I}pFm5F.XnQ^j$~00y^ GIm'J ey> `c84ih&km{o9ZaƂrpPՂ!7}~ԪiAiɬ[[Ε;Oiڦ}՘=qxnЕ)Y$U^97/\-W|"~{[X<ǔ4X*{h/=.^F'MLLnj[$ tZ;[i/BN{󗉁]ZB;[/<'/ejX ,†OŊ,N[hiclx[3&\!lX*tm9KK{}do<&[F"-{BLb`L&tiYO8x Z-dŨ#L|l-|G<(s}Fj&H;>n#U؟Ln0w;px0R#gE= >6)ymED5@&ݴSlK&92Rsmӄo;vX"#j`[0̂ܣv!foΚ0@Iw0p5Z X7+춅4 aHU5H胓zR >>c@ cEzH1 Yo:k?|yN4'5) -QE +K[ SN2D(pXоix`FE*F)$Z ]ԌC0駥=oUjѱkO9 AztG&OEv˕+&n25,mB4Xu x_*1!pt0_-وU` =6i`u ]NBqJrR|5KN-_ a*'hmLc#D}C#PH5r'̶8?oo}hNm$(tj95"h흜q<8yn جk7pg}2?{:?(Gs8N ᛱ ΰ4Mf2XWNr`@nh*6>>ն3J̉X:#I2E}I9(v Z&Njpt%p[k;eK UPQ*wfX;}̍ÇB*yʎVFRZ= o2??^#!}JAA"(>X@[c[~Zg/13`F,cT _^M )f}C?{vOP`iJa Gѥ3L<*W9*X`v}:61O39}L'pX0v`g-ҷBÔ 6h&Fh^GA\{K'}éc1[QV0Gqv}d(9r+K h0ݰ@3Т sđFвkq,  =+z(UZPmݳ]$.?4燖hDQj[$bO WiPs[x~F \ipL0 i}'}ˋg"6!+!aƆ2Z#M]Z! 44gi A4eRw(lFY@ҸEiwvl)ɝ$H:jNܸ](d*mYuмж ( >4šs[˹cc®MGrlGi D WS3WMxf]Ԣiͥ=~p?'Pw[EN6ErvT2Z26 4fJf9J!(*'ͧ80𱡉[cWժq+CB{u@h^2dͼ`e( ӼU3o6Ws)krҎ{K~PY>ҹ S٩ڎq_}KAC zmwtUѳؓ'DԪ0ip,C:m f5c:Cf(\3楚y/0WOeT⥤WU/&25OHj :< _\D.34ԼR*J 7:cLh:/V!e 0T*䉌#+2 i <j$]!,CS2ޅ<4*DvW]fY&He俗0r~qUha_+zX%),F$W!-KKG>{w%t[0-M; S`1t ѷR]^HAv0j1 Mw6% c¨皽Y(ESBgw4}1k<YA f%=00TyQ PG_Mىa-Y*aC lN6A hRŭ="7JRg✅?&[ ;Q`"{pssݭ^줍Ƭp!GaC \5IkL9.3`M9y^^yxd6[Vs@6Cf_Go,)):^5 0PpAiFJJ};zRX'ܔ@=`ئ!b}O-^j# 6~j}vV ۹'We N#s]f8w&[@1SH1P|QhN i CfǼ@s}x4`lfn1/މ :jګʹni¹ϴ}/q839w7%5T*JMZj?i|ґIun j&SN) 4rX^oOmO:o@.Qg_L^[ y> Z2 اXٿJ78~T, zt KH:tp2&j" l0gr@!2Rvz7HcĐ `Prt['㇢޴-#s_.l(ՙR30 (YVeoH{A1hB2dyh3\rMf'-W #{%v: aEC'VN|Z &S†l&>[AmRc~e*-'` CQ10w[8pG\.^8ekaeNLoS.}&Rܧy@isq!BA70͈ԫ}Ue꜌p:=$94 u-\!f ye/6orT0 N@"P@z%ݧ2Cn"UZgd,~Wngo4JpHa +K>߯ u dh [򲐇 Ԛ< _ʨBtuRĖm$@}<7%l(3*E:}ZϹ npXň76Yvqo`jv"@9)0a57E>/޼%<jvA8tkpmέ €*l-1b%-#lN*f}Cxmkз2d5 ,1&d́Ct0ctioe<(J` GӰ5H ֚e !u.Ms,V9euAZfڹamXF FsS% &f\-DHlc -3ĬL؇A݀>L:~۪eSRsE:tndA:$b> HJn%p[FꋒGjbЯXT׀0&;d4etZOzf)z }"ǏK߼7s  6hIaCM8ԡ XfgyF[+,c-1=kadvh6~m 0ͼ^Yb[n_WF@2ƈhi:6w{i"+M1Kڣ͝=s9 MpCvHө0#Awa^n7ݾf9Zś 2 TpU"pd\Ͽfm;6 iܴ_Py^yⶀ,驩ׯ_9}$&*&28Wd(P1]i5HZC؁ApӚSQ8~h־s 5 :Si3Hݷ\ūZzzzV`q$def'S'VJ8iR']8}ȁ#{*(hpAs?؏+}c[+# hFaʗ }>:zJ,#y-+hFaF.^c%hxA9ΰd}hڦݓ66_&C#IRj"מ$o>l8&@?<ESnJdߒ) s *u3u*:@k^=g^lQÅ {YR|\¹~<:):V}zyh7 9np ͱEj : cb4˗Vu1{+7 W Vi1{]q+Ёw"k&D ޚ1Yd a&@MJD0tT|y~up/ c S:~%f9-F?v4}#F7~sW*TB5sp(PqA-RpP%dt1`LnI?m6.(JIW!{k@fKϊ>$WlصJ$Cf 6 j:di(pcS*a-ecghziB;8`'c WMҨbfy/sp (ܫqcP _5vh5O8ĀvނE9ʁU) tqڡ*afIǜX8P4cK:7Q`*&NLp&}(g8~X.P(C H`Cy7E<◠G7[ E;p ̊ gz_~_/ukԤ2EE rgn\fķ[ o틬Z`"gQV9P}k%UN%d N({sǪ}ڽԱ^N{[yUuNݧ` |e> od{^5X;!P8'E&13}\0ƈQɧeVhVFY`mסHNݸ$6ޓa9;tߎ37#VG,*4QRԢp,b I[$6H9aTo2 i)Qz Ӆ jN2qQ!⎡w H-Ƙ)rHJEy[?KKP!g?1v0d25DMu%U@߻iI&Lfi̞:}3fM»ݗrC;/sqzy w-Мb7B_tH01!E{AMDo[QtoHܳcWlO3@˳ߋL^pZ 1(%JxE  ?  "NQH=[g_!=< ;zZϛ=%+ا P+idBHfoh7 +FqR:&uLN%FaU x2S̬Gy)HլNW)gR79\+[V50Fv3-b̕0krBw;q*?>7DMLr8lI6%#]FUgSC{GΈ Ƣ 0-Rr3%D}:6S=8$+?俨A7gqj@jW"ŹK hC 4'Y̜GkRwLGm?4=~81k 0WEJbw #װvuѯsN["ő3E&!FUx0 ^mqٳ}zyr~h[Q 5ݻLGϜ'0OS&sT)}=7V(oǏBVYf5aC' t >w1a,&cb>uR9%Ǽ <.f#}%dƭgAR@0V' XԂF@m0wb+·Q,D`Bƽz`̘+#:,>t\% ZLd2g⻿bEuDDߢ N;LA ELNE)M97%38f4 גMWPQQ?&97\mߐ~-Lar:4X 54sX`f~vCj/ 3?s fa.  _fkx66R0Lwj^4aTOPn @h/O0>jP $ :}`-@P#0q=D/c7%ꑗ_769sXgJKInLLqسQN&)TUaT+-ʭh !@fCoI-KjmB2id]j*.^/l_s4mQWEx OtW_i , 7r~j,ӒG"IjTܴL_x%Y~XoL:ŏ;?EK :>;>}ZY[Eg3RS҅{}qʌYY3YMRcHMIrMMˀFJDz4b!  "Ѷڣ pѭՑ*Va l OS{mɲ{w\yorrϝ8Z59\%lZ5F.ze o NxaZwyz>J\~ʕkAR?\a.~' 0 )&_OQnN}ˉ}7rş#E˅p#2g2߄2z):(y&L|1[kihl*@IDAT>_1{5?K IjۧLf`zR8{IZ<#r񝑊((am[Y٬'o8d(lYWcvnE#Ō&ÌZij63cլ[:Y3x@Y휐PZ/];ƺD#˽70 YnP6I3˷ V> ԔP;HD i]`fGtHVB.[v m'}Q!c舚lax}$|UzeAcL8&oy}#n[Ex8K}!awŘUы: MzGx߾mɭ>B}ێ>>m'ppX zV3T{ND3\uo.E]YͼvULD(U#lP ꜽ=UuMF]5-MXuoKnv rǖcˑO*iKdHAp-Ё;dQ>%nxs>bj}E1y{ Ԡ1ًeVYN3cYI`[3&sw'n,;"(Wi]d^h 1>?翿_\c˩bÒQ%K#7":u߁ľiCz~}vq9j)COrY^9>m4%aDi{-#Pˈ@D'̿X/1j" nifؓ'D$ `✜'n,ZY6Vp kD9MKo>Ln#Q|1R`eC>ex{tB?mIn;;p7Ğ8%p#8FJ9.Y׳4m0Ynf_+x{OEi\ϯђt(c> ;v|l4n:8z=n?_;~SݸTMAC [/]mJ@L#͙ne C֪Q-5/J*h(\ր#<}1qYؿgג%h6HS.ͽ6fnYEfffzi6f1FjS5PfI:}7]}|-=V8^&skе0r\?udMLfagS8_sdϒvi4Aa_Khۘ-oɏ=p'M+>EAZyhDζ5;[ń\6ּҥ$INQgQqb,]q0<e{s:M|]  Z""U%b,K8[4#(K1 C^^}@*&ǏeHŤc\6x9~8m2t^6Q1qg=Un%S-RР`$$x?ׯcbY 9Ո 'mDKO̜(<}>}%3 i"C|b#G-`UWbC!8AcH$hnnAt KHJ?~ib|dlZ,DU9tնzݑ']`MXi@)1|S* }@h gUE~.9_/љZJ-Z*|dQt$_t >˛2RSŴeG?slS1]u]i._5C7+68GShu h>E}9Dh `R.C-<_ Z:Ug~c{~͎ռ{`gDgV'h"[.?V*:|ck)0ǰ9s`KYn Xod_r%(PVpv1 ʼn%vDذ׶eew{\+><1YVEb?4b/\KT81; fS1ܣ؊_Cb52K?8vq7!;d4pZ}s;?3FSBj(xP60< o˨St7fe&El8OpR)aTiY3^U-RB07 [֮]ԱW?Yj@ ͐ u(v]+(Ӹ'^x}1Lbq6E\7>5}v ))rhR"pA%bïbr-=bQeu'm }0cq?z֪x qǘ^ݿs ċoЍOH̴? Yxki"!"$͚uw~1x=4/'qIWAFݑۉN-rNlaːO;@WO~Y쨵*T*\fQ}pDD8#6m@/UVg⯈[#Ec:1}ABQ={񂛧A|{yVo=ct&޴' j?¢+|o~hnn0Vy$S\MtlVO7j%8t ٶqD$C;Rk뚝H~v}D$Cce[4.{AG;~*q!qBF7 ίj4o[X{t2ovvVo045]T>Wcn i&fg>h 迤S1뷭Z:%zMGp77D:媶ոkط=lw{ 90xL/ 8m~lgb eV^^ajO''9-tG23 ^/}{Sx<}fP)io\1ym$`&Tb^I l9 +by&]v F0ejc6E+YG)MCDP/&n tR8wY$r keIa+߽:4ɓQ|`-V4ˋ rFqf2 +{Dѫ`d?]VTd<| Nq{-9#.HfnWQR(0Y3@- vhV¢BڑZQؼ(,O$C-3ۢi&ls7cDZB%m#G#T .-*FN]JMM΄c9NiҺ]M5XKWqSnYY驩ׯ_AƨȈh`b &JN`uqU6kҢfݦMTFȴ`M Uk?z=yiBtVffg9)1xEUL,2N<7 Ҧڨ *|c`g?yOg⃳ (xy-)p8n݀>n6wvG-0T$7:a3IАZ9R9~3++DJЉX)0yQӺ53B$375{cNG%gOϳ*̶&=3~]s;>' IwꍁPѶIϋuGГ5f0GqVzm+ړ}*JAw1.el6HR@s-+߾YlD<>x,V&a`זRF9Sؠ|%:4dɢK,٣is!796ؑobtd.ժeu;WU8oG)؇ܔ0~$b2p\1枌b\s{)4;^# ^+<Xn^S81\KZ8qN}GQ8S9xVp4~z{֫|ǔA:U~V]H@oBMt:իHPJV4Iօ盳E`<$۵)"FtkH/6C6{g3Vۊܫ0[jR7X޶Y+:jʂCU@DVlawZ`["aUh="Ѵzfi])glUʮ4RgrbVLoNJ!MF^n| !>d_r}KACi5J̤oqvW*w/)2 $߇1U~Jy~x][efy(h4>e)^+d(eS<k\/1psS β!<&}E%6+4S۱YnP ,K!D)CEO7|Fyk#XK G` 5-4ǰ>m̺;nZp+(,n(үb|Ie$/6*LFc%lp— qSz7%J LC2iܔf+jy3˩{qX@l1++ꯡGc(d 7ߏʕ %lPAߥ _|bh71GG=qNat>c%kD%.kMcA+E:΍ IT7Հ7LlZ4LJh7%0҄-^quX$g<@MfGePڷӗbSD4VPf`07 yWYL˷BcxY1~>Ҧμ!x)@h39!̓ݒ^Bs)ە+OXWϢ?qv9(լ&n$޾?3,醂yߑӢ=>)9EfRҞ?h nC G!%U+I)4.Ѵ,^X*ݫ6ZI\12lT.E (Y8؟ܴB`J >sߏO= ڍo㲶d觏p$Ȥja˨Sߦl F9xp~o$S jLR}Sa왟?[36r[Uv̨ ޓrJׄ ZI ~W%WK6^_%v9{B苁,hZ|iRD!>Ѩjw^T' .vAd4bo,^w)ٛO  ^I8aXan1!-Y[F tm~@&$UZ|_jh%U`6>Hmktc<Ҳɔ4:}i~<k`U  1e `KGӲ{SڻW췆mV;Ndʨ] V;@GnCnSŸqs0'Od42v2csgܠi,ۮ}O)UKWc4W0T}s;?K6^(SѪ.ۻ@pٻ-ۼGTC(L%lɳ|/3/f } &%C"3ĸRπ=|EQ^yxP> :wytж3')Gfh)jѼkLkঀ]=F(1gXZc,l檽Ć? pj]L>;т5W^=kֲF( !ΐ,٣iU9AIR1CnԼB5ELb̴LPܖ}֌<>sF6dXfi8߉UE dTvSe{'h˩cm9uSZ/O)h-اyw Hξ+hӔap]ذLFiVQHd1ʶ~jɜ4-=2K2rqHu a4XO:!;݄ ' %y6wER֬ *}Jc܌AT)dɤY{qA,n=mU8C.s@F& u=o݄⊦yn *O:ۺfUN\s!w[grY'k)vD2F|/lƟoCcޘӹyw&~qͨ/ .BCi73J'C߫0"OBrY> T㯾_ ,Mu }W97,9 oJ`|/4[}XǔYҏ䡓†ltcX$Y4L: S`rMH6GkQ~r3Yx[:2ri(" z$,WHwG,T?ױXI(4ӊG$eS+iXT-~^W+tE l*3 {#9]xCXϷbv Zl晹 aop4~v#L8~f~9lB}8odj >~[{{{s46^!F0 6A1<3m\90^FYgFP8Vtdq)]ذN:]^D`RCmFan6X*}F\Aa DR!\B?6D1YK 5ɿE2??7F(%s 1K77|5_#;\ު;Uo14CzH˴ؚ@ؒWMZxh!>lv1^pUS?4@%U`VY zy*?ˣZZ Am1 ?h8{G^}cs^߈2 })YTYǏ+U]ذ(<U WF$^Oo?s@ ? !f+.*0Dvca֥1c@@(h(-]g'pą[֧i`'2q7cdEyc19SoWI@1 6Ph]J.B`Ok.TP^j i Ƒнm#`vQ é |^7b(M71@\ڠAEKOiߢUSRĭbb_M=1?~̯ 0 ^BzMz/RE)TEwذLSBbGt{!+$6r!{S޼̾7}h _Ph Gofb{ ^&R(VT@M*¤j>rl0\<}>N0Fr bC{zt|dFq>?ilфV#97î T\N-C^@,W9 PvW)hu honA=/I'wњׯ&r z9Z+@n_Y~][_Fͨǂ| yy3P{ Q -${eIgf^nhYAXK}Zuhui-CҪqLƵHb>1فGNyj#h;˶ YxL yWv20@ˆ /aw]ٝ,/]sdBP399iS*EcΈLGı`s I%^Odg F{W+Dbd7x\ȴ+4ڴ^wש]CS uCܿtÞ㇏:'PNө?US0L5ɑ \`BݮmX4IC8;psd&)ld$nfAs]c֡suM+\ιc8wLȃ"[:҃}Odf2&eSo -m$,yk0Uxna-M{ɳL,;7A(έ/U3v'n yjO -\`wis:U˥ヌӗF|v\hCYUFy=O6Z86u_ש*!D6wk-M^EjjzBJ(ڳisW ɲ6r NZIqFS@Đxhݟ2M_i*(Z4cTC} mʤ:*9DaE&9h:C};g0nQllbR#D#mӏc bi!0=FHW u1h jjY O !kG>u.c *KVЁe2|!\/CGB) (y[.1Ӳ@o;̀8{J(XfDzNIrLX|8ۑsq/HI-B do&n-0>}s$ _Kr$ 4i K[@gzV0||??Z&7F.QJ;`RHzC ^Pu~S.r^tju Lަh.שC0Bn/mK /Ůf{qdO92hd]I0N A aA1{} 꽵M/!LT'qUiTK=zRFDu@Q|)_|Qʇb.9OPFEi:q0}j)У})0P5c^z~][I#Q QYݍ͑s:v8-瘍֥_;bēi`XQ =AXMl<%x@z%sIɐ%sOK(O׵k!KĜwΜS/`6OНI6?\M0bNG|f7 hdO`{r gܥ&$$ȳtGcFwlLbFƐ@dޣAskD>{\^| ?C2nhg4[7J + "͢hh&g6њ2kw(AjTy3O ? ZPy- +T ?4׫B& |w'ƛFa/sBnKI9nw%>Tǵ-˖ &J[[C0Lm<͛:; O(Qf5-ܲ fcؽҵ)y8,!*:wa#U|F5aNR1k2VmGO ~0rlK &{epW,9fAA*T<.͡h&뷱\^^ٺ?AVvmva#J*C)UeS l42F\NuVxТE-Lu7O2 g,k{K%'G%&kf4ClAsιOIL8gI4>-N1g rP=FqP`9pey?@#A"A8(gw"ąBXZV%BӫiβeFPc.x٘>+C^AA&M584tY)Y,&Ixs])Q,̚EgPѼ^ʹޠB|ԆPӀ"d}1 ]jPB>~Fua2F"Tʁ~=Qf_j]0OgQiQVWfhB|ƵQ7 .{.d"A Hs?\ gdt{tjgŸlԵp|R7׷j(}V hxAUe^kh&]Б"MשdYv#Sl \;N"q(|D5:$puasι?jܸ+kBpEDy0[o m{ݱ̧旿1հlz/P14wiCS*j]&sB,Ƙ}:Mx4s9~N|P^P8q(Li} ɊaUtY]]"|qo;ŗrsUTi>_ͱk7h+jV*+>2W<=YZ &@#Yt ҍvh߮(hG.5[PN7ꋟxmnz< 06)EvLL?++dSVnN85,p`%їȗ¯j3ϔ+^L +| C\[6oyfͶe7}tYpw6מ^~wYRI]ӯyL a)ڽ9syshd!{}B\p3.?'(Xs aS{a,;Ou<ݻ}&! ;N=fܕ6*r\ɃJFCfbQ7A )@Cⓟ/eyW㒐Ϡw&cߥ؄ah|BŁt@o5L 6%cުi0Bqݰ̚{j}ͪQLP҇ʍ <̼9ȬVB3#C]`~돷g;q$|32pWg~s4Qe|WsllC]SС] dcS1tQdy]L%oAd Rq~zZ&<AL6~!D ˕/=G/QFV42 [,Bx9 q>`tm'm4EaC 9:U)y)`kY`yo Ne;֘Ե=Ail_ \L.-;ې^ _a𕰡[~ lΝsF0=J/7sie_ŝVH8@AI@7DmwjMQ/ p{qӚoT-ե9wߏ9~ԗ\eںş&ڤo}Mv!|]3d0u##'(SԨD*` :AA+;} K 2O}2bb ?1+ac+fM_ZtoqCY35F!H\vpΣc8=O)l0A1 &}Wd>Ȱ^%as8زnnPa8&zp7V;XdoNv,QEaw%џU0K;8K+\.?]HgMntf\xu¨0ÈqvȨKCPhv@`иsS=cΜbphO)j4ߧ|Z &S†lp&} W \}n>_?| )]a}P v*Fv)ko/0t=ޮU| aA9F_uѓ6[.]*q!-~үhv-:W]ۙz ZѻEkF{\HW\}(A,5Gm_jgu8JV/* 2}:=?$MgVx&c aܡ];3cnBEHƦdA:p]lMR "}(aw4{a΃1忟ZE讕)QL{^(MESHyd]?1AǜnK>{0+c\7P& ŸotJ"o;Z;d`UbYcuy+SCϛ7W=u*wL h Z @5"w!Q s_qofmרנiHXEB 0#dANNNLMx=ڼ~?:Wd(P5]i5HF0ÈBrAm4veS~jتmju6 +Z>|ذ)qѐ*U)qwfKL+O,#9%9BŸٺg̀I8h:ȵ"}po RF`Zg7CqphмS6[]ƥٹ.9##UIu(8b8Bp `MJp>rj((I#S@@1 <+p%l(LHGEPAF(`pC\C~eZKxVOzQs|*s]❚ \7x u -Pyn%NH28*=kZ4 W><[lspw >B4k74glaΤGŤ v- 5S!2APd ,`҇:Q44GR gJ@Y1\OSuG Y>BN\;(d{ [{n_*s;i-E>|נY?@^Qan?Pu&^1 32 Q9,ť[0-Z M5Ǿ"Bш@P3{iHe]wY,6~պs ίZ;8/jnpY`@~n?qk\ƀ `\wHL \u+ab<5?,Pࠐa4c&搌ΥQfQ0+K =Um~ %l35+zStoC?d svb}(z#{H(gs>#>&o5H7˧bJ惵(skvyA#qd7/exs}D2<D*LW@ѽ:!C01} 䵰z>*䢬>Cfa*LQUL`T]&}gիtBAW<+0sV3=g,pbo}y,͝[g, pY1{f}4@ܯ^`cŏ3#'Έ eJཎNzo.ċ6 k~ZE:_$%S@VA;1h/0mڴ[t gm3ʟ[Ŭ4>r$@5Xםk~]<@1j|u7aj1۱g,E^YC:KeRW6yBdqHAjEZI\0* xy1AbnoXb΄#K7쮈XZu҈ 厭6 ;t XSX;jzӣK 1mQ YUA2QE36haڌTȟ#=*Z6!uxX@UbbP.DhH}Œu;ϳWo^?Z&n앢kMj=;eKW|:HcC7͡:Ni x_[fyCTEGAу#Io㱃9xcߕĊ۽v8U֭mRGkV*3`(yd||ўWGx ;S6Z/^qEh|Fdhipkխ^QآFF&, 0o{Р%=4Fs%ߟP&2 98կsߚln9u'>E<9FѬn57-P6ɪ}GqMDuM{įֈ_O'&? GwQ|Iq&6^|1;Eē[`w3DţnRkFָ;y6ҿ[-n[-Su+ `O \1a߶loxqDGmNy,O>W}믳?X^ԫQ@.ľT/Mи5uΪN7~o}paiHIC ;>.U"kU0׏,תR^Mڎ:T9u~[`{;G~tCKIN>}66_ѵ;'L]Q^Y<]HHfNtn\Xv,14d.Ba\Q7Zpy)lwM8+(ZT$zUx ))F8]``Iv1x燴#g|^ ڠw^kV6& @h=7Q:>R<>ܪ}=Jdo> )E.*)9&wlh5nulTx&Zү{͐o}gN-@IDATbc^Ԕ솦<XJ8e?)zgzM[@솟c`ر%DZoțh +†/}]b)AFܬo19Z6kۋ>v&}ӱzaٳeYm!?ȋ577k"nJ*{#c >u^#԰&ip OSj:,$XtmWQ#S4Xv4َ-7c;ιo@gp0=fU^CQt:gaX@J%Y`TسpB t* lǜ{zp54yF, .M\W٤nNVYy, 3$v! u1¡vi3-KX\;7cڷ:(7-u:/xlfb4B;9&&H,۸G h}1;O)`eFd2nݻ{ I)؁bE&pv?OSn@(r7G({ԓ(< ^<*Nq2s F!\ͤ.i&w_VyPcXXm?}n6-y۲9. `SKRt?}Z&3B%A5}: $Y>+TMx:51E\N,@/\ܨXv3EDJ ok|`^~ pα]hEs> ?d:B/ҤKg.$I_U|?\U_l2'S`VSIo(Vg?Sܸ]Ep oGMөjFخqXzesqE>8w4P(pR$msRؠ Eq=ꣵkW/}ٮPν;6zw{g ,#Q0˕(7Ǡ;P kŰ{{ʄeø?@!x2>dMT6|ybB% lEX oB p-BN . eQBn&%߽mc Iydʬ"hʋIlb;ҬIeJ! 6=r gbx`Qx{CKqmڲV //v1~,U*T2gMv+bgzmCqI8r|ؾ?N"+Mc|#z#raEI+~UʧA1'zŲB ONJg.GNOGQj= }?-v4{$LlLG~][!A)Y{̨͒ש"qS˰O#-1 Q\iC:'n"ȡ`):(-+X؝Q'#Mh FjSmZn5;a}|kGBBҿdj'XHO.#0Ic``xZfy !(6~R?b5M%o[8}̒*j3(d '2z%@i ALwhՠ([2 a+Ha[uwݎR8}F#1ՖGn60׷n' !DăbQt uB|iTb![Je/NY|| AAdywNP1z80F ML1فxP.!u&#^Yf&_ Q` W5 ԳЇwb'Ck$lm@-AŲf&ݱ4>}^ƕK !ZY&q^='٣5+!Iqu_xoj46:,cF岲d| ' v1^;:;9:wwAhQ:'$#K^~\FjВ7תRCͻ)T">C,:4+>i JM<1+wې}>h,(>#C>& oZ7)"ԴdЊa JS8#d-K?%Yq"l yi a¨%ΰoJ -+zf>@AFq5vxtܴ1pgz,Zr()AZ {|2nAw_Ag?4~2X֧ˀQJ`NVBeqcHGf 02|潜cW{҂qy h@i"m~E&)EuqL i2vV^}GNpo{LSURvcWPzl쥵Zo[jbhJ j -R7F 4S*w~qq~B&!'~?ou4AX ,Ϲ(AG :B-R : uNß*U?K`^xCFD49}PDa*߶ZRk"nM?,Zh[gF2ѣ2-OYAuyPkd;#@4r4V6: Ԃ^?~a0 J`@ڦaZV[X ݁c2xo'>!!$VKF:~B]q]R00E-4 (T} aOĘխX^3҇_х7(,yD0S)>+zrwW>U}qXVg׺Q)m'rFvh-UQ#']qύKraBRldEMNʨ&~ L4an>~_uR蟱j>h0;m;6ӗb܅x)Tpab\7ː Aeb,*Äk뾣2/O< }{x3K3.f:3:{׷jIWV{i]ye@(*5bFQ鉞To4[q uuWPP+W2ӂQsֺa-/]%oeZãVh6s|YwŝG2pIaְbP՘f /A,_'~Y7MsHg28}\^j4@nf^8rKO}w54bdlGXV|[3̡[{S3[MlN?8Z{;̵7a%9'a⨈ ֏K%{Vkz Rތ0fx[E 8 iDIfsK疢uM:bZWQ&#;Chհhm̕ HO̕2dhOA _!hTijڋ?KDH)%ԊegxߥBgofnEmp%n2XZNFY}-Lj>>ijbkY7M!wv,B`D&C_'zUGdujiDB/qSpo5ulQOܲO܆~͉##>A+4A}k2dTh_gERO1*kOuN9CpVsˌ }4`:MRjЂmCát}~>*␱%;L F,80i ߱Q44bt_p-tMʀW[dyZ?-l0 ᰻ԫڅy "F#@x[<\iMf窎~a6<,Vn']d9r"{AK~-hxFR>>{g'(kOLNvJN?zPxLJ?ܞ70AdR0:L=w#џvl}b4Bq~_$YPUg~-7`' ~ (*~CXw/iB` Z8|լB&7]ѣ8D )$MԳmS¬kݪ99@F=uUW1r;:5)Ń#"gC%2#-R(N0,_6˧ FZANiW%63h38(ps^i4;Hn? EًϢ<;sq5Z@[9t@-L6Q-Vժ9;d>ƟuO6(?Oup lŨ#lsaꭉ2P|'jz(| Z$<|ᏟwW Ja#!L"ْ=" $<+Y 4f b#q9}JG`C pWYAf 63qIwoX0nMgJ?1að`wMFN14>Q[kDS.g3b%\]ug'N:;r8<#,K굞 O$dx*-S˟\O2a0S cb7ٶ{zk̉FߔoݔImcc\9VMN؃h4R/G:dIǿ `\M쒫[g<GKd=cb. ayE)q_3qǜeMN' '&"~M̀*V B&B,7L PݮBt{b|f h?19'|Φ|0c15.o4mI4J됈[u+9;߻ڀ;+Of"LXX2VS2V8GZ@'$ձԝr۩eRP1;'Bkq9f6dF3ujc[oh%S&_qx羣's&c(Si0$wߙbB2;έ#MjqyœyL07O`p!|K}#Y?MG`B2Y }HᯅEVmoJI">CQ=x-dt2h2<,u]5*˳gO <{t;t@z0 HפJ?婘&|!k9b3Sv g/݂-IKP)PQ׵N==[t6͎ @&a b;ĪOQ &S^; T/%Ҳzq/+K)!Bw?ōA$sG IfiwMvepni5jEGu26~aoedݟ^}1ޞ>w4d^:zCxǠ]-]c Nl#b6ةum_w윉?DhrTV 8CʵB7 $<.XRX.õ q-|,:R6OV"]y74IJ%Q!Wvƈ mz"Zu;!óS6ǣ6̓h5ҾÀًqp 969+bÝ74jz%?]#Bu`v2\jL|4̨HHeOyLaÈ)Ā?_{?-&r ϑv=RI)EtrJ kiaMB e=vhXq,!@~Ajzl,WXkHns|5|fŜo  < "Pt l `QBvc8e}8,*F,2Bu`_VlsT]o|,p=sL AkfW 6i"f \X{4UJ]|:hzyM)Aqx-MWN.o/4Ͳ[gJM9sg^{->Yg Mv$Q15F φa|Y^Me>7 M{׈klb1igdV]s^h>\Θ[ʇBg ߇p>"L_A:U:0 .(,,}U6c9?rHvB 3i2pb;$8 kL `_EQ,l`ߒp8h1vtu쁵/x *BCw>~a\ݿ- kO*yS=knGKZ߯@3+>)>( ]-qja`D?'%B?q͊@ i^-#e[*AF{ؼ]F,~9QajBZ[Jf[oSf/oCf,Կ4,V(@[),{8U^5B4a9{] LѮ$Rclxj,'o^* n@:5䴿^BgrrҦZU1!7ͧh]F'?۠?jL (YoEۜB M6 y%V0ꀰocǮA-I{|2a9T A){ӱ |,AOw 'AYDoȈz&hD:lOrGK!#FF(~.c{xb3k45 䙌C|j2ނv$kV Y{Y x0?kHLOQuLf\*01F*~sk+  whn?T3u/y@H_Q)85Wfesr?Pi!)H2,7sʧ;y' <eK Gl9}31 u]TVq+=! j4P0rucûݴ/rR}dם߃-pqÆy@hs/oAq.%MAD5#_2x<Ľ6c[nh!LZ"^Ҝ 7zAxXvG!*tPW0^Qd=(Unu0Q @#dXU0>xz<]tȢ1Z}p&T4.O qlO#;&BF6LDKWGǨyVn ̭G:>b=f?}xhݟhszĀ #(fdRx!6ZsFUL HRiOgcKj-,aiI:36qp }p^-40 Mz'w}hddUar_61#'}4 {>>D ˕8ʗ(lx|hI ﻩ/Z7 15R0 1" 9Iپ o{ʖ #(Lx߃̛[#5ӻ`lv'HiPvC2r}kݎ+*͠jO`8f!h1ft[ )",S|+*9&4k4cP6BCAC i!Z{CS-{9 Oa$3Zhg]dΐ90@43zөYz{)6;賃_K3D+M4aØ'6>Ae~41 #6k^bYt|^Ř<ݎ/b.9κ*o臵Wc_LIȌy}J 4^֥4`K M<~M@/R>k%$\W󯺢JSu<(hpZ7G_3B ڟi&[yUFH,HI6?dL0bN%}AdBL4iQ(] 6G eY6ɐʤIQ`bEQ0C``)V縷܅sfzl&e4y$Ϳ(`4SvH#cQ xʷRIcHs_zaG;$޺Q-UjЏ7ba4$d6jTQ^%$KM^lOQ/;M$>iNg pe>21p4P )7t&Ȱߠ *bpq|l)lp /붔.ݕ}pG'0i '<)XFڕ+, Y}/* )8 S$iBԣΞq-L*c)dY;ORO\| 'wjwh |+@L胓%*:wa#$Ɋ~Wwx49  jzz *khٖz`HL;ȢC \5ʽȹd-!ɣj7sS>\SUFM\S NL'[:^m;5rK֪"PKS:+u XюQ !촣d.>/[MUFd ӭ[5,'V\կ잳)p\݊6F.\IӼzjڰhi^OmgWfvӣ7nӾ3PapdyĀYb` _h Y*W J #IO=eoIoN09ݿY=&>}ቋ! |h3+'}:MS4sڦL/2UaLx飩Z,Kter((Da6YQhD=v;(=Fc;GhÄ❉W0&D:/yj{1U&׮xaĮH͉G7}G6pGVi^=fFKƼ>~7C>n)>yWO{>5aɔ1?&BwE3Pо](hӇ[&L 01[е],mKsH!9IWӀB i^7މB2#o >ee_yt[4A.5_wPCI)Š5 MMmA"C1'ێ:6IcTב|~!3QJ MT`BvC!7}+Rxه_yei=x`[?<ͫ'%pƼ>Ct/3zUϽ=S! ]{=߅of?ӷ9ط8 R0%dQͳU=~5x\Sλȝ'yޣÉpu`s9?v3mR smi ]W1Gfg&hYfe8~aeO(ddW5g;q$hoi^=53ZRy}:O0d>Re;9oBx0OYhׇ/@[>tbm4,] @fL  Uf' 5|vFi5;w΍kZ(]//'`e_0MIaä лcS!A4%!!5ߨZK;~_nT^|뀟4b'i$Yjk-&01`b@€ ,5k|BΚDM($I>uyTƚHY0ȸ 4_ cplYRm~ W=+Ig[`e7 _i= [а^y%ń\4IUW@FI/\?MZ*rݷ>#O= l6[>l* <{&01 iD mi{.GcW`t?TmZ ǝXwoÐ7#ou:t>42%A#(ݩ0C& J>43N&FkS,zq_ Y JaSm1 pRMV65[UMʹ'Klۯ6q-~Cb2 :C ʌtA0#\ἊK-:]ckU)ؾ5Z5O`pGT,g=i /eƅ3j7~ b`h,;>rĂށ$}O&t..j?fBpOo|Vد5]6x&|}Ka犜+x@8J(fY4?jU;;ԷaFz`T0hP(fә swh΅+̘p; QɃ 6&T-BtEPp+#hv;*Mc?]pYTVD1~\$ H (&X0$Ì(bfOͮcNa% 1 \?(t '`^bkiE6PnBxm⨈xmǻʹ`;COKxq[|OJu՜X<%%דojÁZA: ;sg||so?3hSFMCŠ-Rd`i- rrrbblŋgٵ}Y"@FB9J,6E jLSQu2)\?f5lնA:u - W>lؔXCAE+WJ>5|$6[bZy2fy f\Oq p8S.$\;yx{6o܃)TPuM~pV &014i, jVw!#Fw?EZ;hC6=Ww-~-Rhz7u5mP~DK0zt:zeK3t ƂuKX`ڴi~+ rvc߹/9y^;gVg5;_M1Z8놎|U~z=`OĦ?Jc8|Lׄe;UmЈ}Vq\x} tC]p4*Z}L Q|}s¹+>{$ܴn&#B A& @8< ɹ%S@@1 <{+iUH:Q5 ҧc5ql3\/,xt|:y(:V{sa65j &5NׅIn~(2yKpbI\eY24Du89QO58WVM< 8P_3S0Q?`;>842c,ߖoWMk'ʼMP>Kpr웈Guy&}[7}1dDd=\3 >/;dL})#a 8 }{ s] D<G{cv{#~1*Ƞ7G_xyXQmt ^_ atI@L~hBqٓ?À?)Hj-(ANr^((s&}JAL(!sBanP  Tc}PuAmRpPƵL01`bG9mc0m;vݐ*U(t@{a1z+jO-̼J`՛`P~jb^C7ACC~ٓ Kg}M8c&>>s%/G_k8CKߢ :?װ@"yJv[Aaؐ,pqPDg׋(;8G9}. ˒ %z6,YF\g0ݞ)[OL.i:=9>d.##^ayuZè=39/@}ٖV~d,мWaC1xdB>a@}y_1{FA1 <>Իt &P]U~{S1ϹM  Zv5 ZuθI3Xu/anG^\+8J&01+ F:bԖC CG1~dw޴ .Rh:5Q RWƺ?.ٗσ`ܩix A)DkEKo=  +-^.5L-Ϻ$ &'|iX|6qSj  (C$˕!j? ܉uB8aS܏9X47fYS֖2˼6A?<k b?€?5d xp.yȜ_ayooe|I'ҖjuB JxP )M)KPGUj(n9o,czȈ10qfCw~ ߌ0哑Y~ipjye,gp$qѭɝuc\ܛea|I: aRT tI>D!]-&b1r%n]LܚGOj]yP>U0+/emu/ɇ`_۴/7r-̹hV\,lG+eKeYz%-=HeM& }d%z ǢkCb \tTKd,;R`8n4 M pq9JԁlIB_+PtGFըPk0>WH#MڣUS1pԻYG+skn &L L-77v?C  mI*+{-b7l N˅;:-Α-U<:XUO-v1}m ^u<^Qۄvjb{L=;4iv;S@~!zy<Imz1;ɗx@]jxT=` t=ƇMj ue@Mw ܵn/{~9=/ W-W}T~dԇK}Ȍ&5w\xytiT&}~^GF: ^Ϟ3SfKxW813r?%d{=lb@1mīWZ4os&qZF-hlUμ'mH%$^@BBmpJ R FRMYƿ>>%֧/bG?ZʰS^ǁd`|C&ȕ-EJ:E'TA ]d;f٭6 FKO.뮻rS0 @_hGtܛr_† 1-R*`@)lb8È0{¨>ꥤ?0)#S&ǀgu۽u1tLoJy7CC'~01yE:˹AM_Ht'&2 3c%K5r at$%h@Qi!o;+B$2tV>OrtbJrtڃ/DASc^{ 6htƒ.͢H +6.2Q#^:N<܏J*u P=f; E^zТR%BԏH<!AKH8u}bjEZ Z槨1Gxy1AbnoXb΄#K7쮸z~Zun/]ڰ[VYY| )K>c}|#F-6`^01'H tsv34vط`Z XhՑ#ozꅚF(>!AaHwGDZ%mz0qAg ͺ~(61d)ꈷquVoC{!Мy"/#_灁E_r+dl4An;Y K- ųNF[u=9(F? WUj75\BJw{\ssi9< YݭYZxD.>u~Z%L|"/k<AWs`7#w15 `TBRؖ}BnXל"Ǐ~ W]PTdXŦ?ٕŭp'{:~`s|vgƢ.I x=6~H\i%co׫C/F3Z_KfD f<6-cM (֞y#f|7M- 4wOs057z2sqIx&P@/$RL5L~yb{e}_w0@АQclh㦼fDo͓u U 4#"' U뀀VZ qnuxz@U:?}R}xk2ף| UפR\hq+xK '+j?U bKzF!Prx,BNjdtmgM]X ME\Tz &:d[u`TE(S! ƆItn4g& eBR? fý^o1\bW߯_H.tXq=U]US@@x9n>T#mL<^n8>OzfըptSׯV+Dv%`Ѷi0ƲNa"@q:wv}lj@!w_Ï>(PRP( k;r up\֌Z=$vSb(kk]Ec@9k\\S{'$gqLǪ\Uz[2WvZ)UL[ԹB@!p2,9qK7OA8e-Sqp0e6GU /nJW_1~27\\\䩅Й.RS?G=ҢSǸĊ(?ͦJ:l`S8SJPqpdpW {[ f#F5Nj_7a/>q֮N'9SSs}$(7C(AQ9}}}9VYg'a${Ԝޫu\BƂ5zb v8by7Xz{Y ApMB@!p 8KHn+ ceҝT u@%$^ZN`0^xd0xyѡgNHR-4sJzeT˫wz~h .@3;!`"}\fww_2g!b9샷`] >nH0UӒcr+Q{Ac16眩\qg1beD&o++]}WNEQÛs IMtl}n.t?jUԹtYU q -\l }gTzUs4u? Cݑ׉zgG/%_/oPwq5(>iediKdiOi6=B凜 ԡ̫H"%J5* cpb#ne:!94WhD-H'PjԸnHX˲+$QiP"]I<`o@ Z%$к]Gb8]eNwR7₏' ,C9YAA|<7 dMK{E.gyuҚw#vr11fb+kBxdl7}Nme ]' uEjI0ɮzcFzācqkFP կLɩvg ֊ڃ~\^;jߌڋ~Y숁 X^ 5䜘a7ms_FI)KIE60}kg '%]I AgjX'D0Dky0W;c6.sZ$װj\t . rC.c'M|h0c&w(ڝiߑ38чi_ccKIƕƁۺ;n kwQ~KjZVˁqԮi(YX3fG`( %%hhuA!E5=ܯIOzx? '?Z4k+hȹylMLNӯeLny.+3Ä؏ L6%Q[>XY̆(󗒨{v9r:.  j S% `6@tm9҈'ݼ2ѓ X`0i>TJ\c1VJ$6j~ԩeq.fiWIe6x^>  7dAWZffކu("KJM K /e)!52ij,j 3g_qq޸2ddZ(ah G;NjTGs6&9C* L͌o -8bpV ZEsUkԭF;RZFXڶXͨ#onM^팷mOҡ *fPa(Љ]|(56)zz5X␒X~qǛUsEޭ-H3 >^tfR D&/^M( ?WG /HbVa"Ə((f?xdc Zi|iXtK3>Ka~H>9ِ_+g ƔPg2RH ]Jӻ> mrKm4tH4l'YgfQݐԥ2YJ&(E73*`X"dv ܻZUyB;Ys ʓ|ʌٖB5cM-d AqǕҌye[hcl@ QPz0g̨ j^VnЄ bQo :8 ]Z5njm/Hs]d屷}`N}40p8s? l43o5d <;KЅ/-EdvR;V(-A?d6#&Y5=ԶR3UB^+X McU L\:4'Tz2,R#G\oU끧JTh\P\aG**3?J1PR*j7nTZ6)jW)({\  ߓd4ztP_h<6 ǀ*=Ѯh_Oi?P.tY FU=}m-O}ғ*}җROwwj5=[yyՅh6@c|P-hɆ}"s?UPxifw=tՉd.*xiZ/y)~kJwZ2g""7g.83~4|1o3adwؕmX&| l=` ^*j7nwhC^ainu)\z@@c+&!^7Q'tFݚABQ!j*aXiq/<%_۶oڒ 1B4̆rMG( {tӄ+J1DNwp=@*Qdbo~0oG&9+½t5!BJ!oI>JG+xK6 ={iϮ{3ۣX@!P>a^hP-CXt9elAT#$n.+|iaĽ($N {/9t\)j7l-#ӝ=\`Ć DhQU0Rcذê  9(P,1h_y=ƻ_OB*=!aԡ0rTnv\Y2QX{Ϩk r@TfOA՘pdm#qq4J'.+wЭ#=\N]7۶riM~]< XLz0U Oj+c֌+/N> +Nv%/[?U.TyzxW^X!pc$6})I}+!)lHiԺcq\60rtCB@!pmrlFDDJ5orGԪ{[5A1Tm1]N(|9}=CNl b! ?zr_T!йUCSvQ Wn4U6A`vԗFPA;nh?C ;iv q==Ȍwp8g<[2qjpQTȸ1A!!wDGB%mSsl1^!P(n^FYS`6==C|tpf7U.: G6sc8m ]fc?;|z_{馫&jX!fx1Mi._:p{DZ5!M8hd\?|5+]sp!Tv B\{-uXIDȸQI;voHJ6:R /V\iЅڳS #gw]LOÒZ6/@E!c6vEQGsAHF3/ً3-A[;QKq3A+ (B1F9%q46~XIHc|0ߍȸϻTrxU0+m ƾqmoUhv#4IYRUiHFÚaN{3u!lZ|m_fSm/睯aĜ?>KXM$KW_N!PJ&Efxɼc8"ORUn?$(8CpxL% ,Rx)g C.N1J0 Aҁ$5χ:rIcRvm&c32SG\7VOq`|twVFP@sRIl1-/099w%GBO@ڗ>|,/eᕸbukk1by6LVg|,zZ:[X}թfp e·J~ ]ت| @kal #K;M*S {[()<voۘnҒ*XI؃^ZPhߔ HHBGxS2j`ֶ H*$ ,]ܽږ#DX}3yosǪ~Č \Uxﺇ iƘo&=l{D22-X Zh@_>n%M %xNbN jB+4_SQYDE3E<`id>ep`W*3r5+87z1W,Fha-êwԱ3ԤnSmX'DTrhZ?lq= +_hXtMiwE 8YI2sQ団 DiZq@[ؠ Ψ(k]f?B'rH5f|ӣD~BYQQ.hSq{## YyQ vK6^?V欄'GIh!(Ɯ[c=5qilu\H"+.]nU8K @ a)&9Wv$:<U&<H1' F.cHAEjTޘp7}Iy ;5rj%C91ʖ}ci=tRP uKEnݸ6~h)w(ӹ`أ sh8"6b<-֯L k=7wyCbY?Q\nؾsĢpV.} NfA}obG<9T_ v$ӆY9wk1x?Vsv|bRuc6 Ngl5АO>$L0?K.<{QQ~o4X}S3xg$rǛE>ul5^5VPdAja$0Ck6>Tݜuߣ&FC05jӲ-3.];PAz1"^7?e RdގA=# n1"80GҀ<%ÓlX5RYDlX@(%\27=m&+*DqW*5Hſ7%jJw$.y":. Eb~,@bq}b_fIY KH\f|ٻ*A5Zw3H7W-*BPv v[FöPrR TϷ?&ˌu<9yAE2[FCþpٖ;ikb[~<͘˼0y$T5̌1]y8|_cR ǞZ6vWp+Vjlݟ4 c{@}iDƼC@X dAƇcJј\F#Wm4U.B`vԗ\@, ԊTp[sl[p۟$]wfy]/a;u?s Y^}c}f=}2/7L=Z/Kʴ_GaXUF|_cOlj]b4_, A*+?l?b`/]Y} }ߡSMn ̂`6`>)! OπLs/Rㇽۻ9Խ‹F9LcsgH)3iK(w=VEv8=}k;\G>y^Kv7l Z& @~0Ua~dKwhݬ ʄ{.}~3c#\ F,8F'lPZ.1HvN֦&*N6|&Bgm7;*Z2 l;h Ҋ`lFm6?s\TAJ7$PƏ+N=ty%g̍b~Dȅz#v1-TriFjڅqJ!S uD`BxdqSY,fPF5[ ?.lJ>_VlXdYmֳ/k|d6$QPu-rZyMm= ?-v]=3 /0>`?[j=_,hwpȢw+ JLojevTѻu7 돍[.!6\N7UhcD8޻eçWo9"!' JQ{A*(0YiiI){m|nվY>w"I ^ðlwؚX$"^;B! aȩ+7PSq.GY>y'-k1aleסkC2g³׶S9Ld/XTK>?G5g)D" Fch'bu[Xـ " m{NԴV) ǵ*%YO{UQ=*p vb2 }Ēnum^*Ե231"Vd3u_8Gg. aR @р$ǣ⷟>IfCJ6*@UR9~`u}Q~O}eP_Gfs[n&mod64ak{+D!A@1R`W;&+4u(c6wi (б 9ƒ5+2$MAN|P`9Y`6x~oYk'%U6 #p` 8pom1c}e5_ZHIfCQ)Fyi)ʃ7_~}U/69uBܻkaɥE\pEl+|VO=c/ [xq^QƏ| |sS-5=E  w{9FϞc7M{p`< n]o+N iWP:r@`gkQ4ɑ֘Llf UX2ehR7ҁТ{h}M{Ģkً"p!1!'C&1`X :DTQXIh f(_yvBf[ .zi>a9WffbLYs7& P}lkgg[D5|U4oDz6ReC lɘU.\IĆ d6 DHfV%d߇~5祦6}l 85V-_睷sM . jږncUO٢'&b3/~Ym0pu fC;.B"+$yr֍&Rl ^ Ox9r<QuLhX I ;m)B1iǵd1vg]1kw꾁"٪\DիlľA:5{yCibu>F:2G@x炵nvfl$yQVx(މ"b8g; +4bqa<|jlUJaNaܙzź|:7߬E[?`O*nJ=qqY왙I))N9w1.^׏ݥT#d+R\ ,Xթ eZ?صEh㦭|}1O~$%AVVw;-G|ʋ|]lNMIJTO/HNK{w7 I5*ym0V,A8WPX@'1\ (Tnj@L .q/gql˗v}Ou`6"鹄$Ȓ0!h<i@boQGN3AQVz `f@s4,o[&D uKw2=4Hk%5BmIZ3Q,mƘ[8ZL7̆x})}fM [kz|$9nk dFٱh>DAg>vQTvM2h[L 09 ]{A}N30ƱA0!ѻ!^x..|{ybXgM*a7 yR@oU9 6+R(J@VFeh*ek%qgy@|X ƻmBSXuZ>FŶwGSYhSQ]IgK0%xz%TxVDY*m4gjatS!~̚s4pIZugfOfr@,,`Obr R2g*y {DRKz.ia㋺_{a6z8Usvx|,$Qc9zd@ ƞrR2늀l?)R L AN]1QR,H!P"%xO=D d ##d3*m܁7N.R`dȱ Z>©:u@*R(#Me^J4lj7 ;H0['Ґ\[y{{!yy(J$^B*mL1QDl|9#\vaۉ f8CunҕβX^V\@jӂ@#[BV+3tӗvJO;卯%ɣ$il?xX8~"2_;0ǒǒG70J7$!|[ B@'ɞq ' :|H!P*CA10rc6y |}}TNfs*߸qcP2h_9v]d!7<ȍBb=WPA`vԗ\oH?u(z_FM{dӝ:=~ZW3%n|ԙU:ϪC ޖm%vŜ8Ah=mۤ.#_*t"0(a e4fr{褰ɀ)glTc4_BQى /`xWF;!~>t1 /o{ #қ_&/C ;ǧFK3c@4 *>cÖѐȅ'lhKDoE@;L~1 |qDJ*p Ge?ügTz6a\@; ݐ%sS`DޫgGL])GH֨훏v8$J2!9J&=թeCl@ѯs+NMFbHKݱ {+3VzQY;6x}UDdW7tļ1YAiƇGcrLL'+qpJ7& q|Hh}ćm4kvDPk#& jVc=ߨe{^=Į4q(YY`6]&ʖpO?HJJC}=b#usYɖ`#7~fmCbe[ ɑC\4=AvO|*`k ł&GvvDL&f/?̥ygXjߜ Yթ״&ъlR6~#6x\7Fc&n<׸/9c:y_^bbt3$!q4rDcl48HB$`ڟ7{[M__>[C^yt( z3,RRbj(x,)+psA6t{ /[1yEY[=jBgqQ5~?E.$bu.*BGM;JQWQﻬ7+,,}`ePcpe~ #0!,?Rό|h#`d1cX{ӭMfz}ՇH7 8eI~w:3 xDxHN\Ǩ@uA!P+*};~Yv 288E( ޓ 1*yJvkBjw<nj) QA 0׎GGݾ\5=OKNc6XKFPԱB@!P( g!PQ%0X M8G8U]*xfل{`cx+$G읿_sh{ ua6RrB@!P( @I l@&/?[pl|~Gq`ۦz6vM9GI Җ р{[x:e[);֮qϦha΀d6d/9xLU B@!P(\^ը0Ѹ jڷe{v1h6ڀUQN?F~|9ޟ?y1DW҃T= B@!P\E`+bGAޫ~A5[1Ъ оuBRjk۶{`np:/}*HRP( B@! رvrlܰ" ~p^ǍgQ OE771ms?8Nھ-p]7TSR( B@!PR oWgۦ(-Y"~rS%Z(ECG B@!PC0V0 e]xY:uYqEF{:r讹}}@mHS&RFS+ B@!(7`އ%TCC> ,O \Dž,3E8k/fz2k>Q'M͚=Ȩ8eEP=P( BE9uhXO0>^:e(mxT`.oK @z5=IچltJ]Z7*k=D_Bf*K/D{-C }ujՀ,tYfZ๊pq8> 򓦗m.SB@!P( Ur/ 0R!Ԩ=}>/q4 l{|¥|(cno/AŲP-]Swζf45W\h;fr[i.WHU B@!P(?^@- kap3=<l@b~ө%ڵ5(?=vwo~aߵbZw$jUG˒s43'['F ȹIݢt|<8'5"8{1~\NMN{mc-/Oڴ=0kvURq"T ֍sT$Cr46~XIHc|<4K4\BO;|>7Id>f5JHLuS`p[&3<:4~]r|%ZR,Rzi¨[z:ݤ9;i;t6=nJƅEX-I#8j$,< NUOB@!P( l "Oj(:OC(.2 jL>IDe6SBR /,px{ćS]Vyq&.;JLVA;6իPfNj:h@cMV_Ly?VAUs2SmyGzFECkg)f<@ɩ`.,ҁjW@?ز!M3:O.F]`2?s/3;uĵq#}a7OG@o q$Aq9\iKl5-t=\|b ku8>?wgq5tߦrT( B"I`Jf)oO?oJJM{/Qs]IMWy7sR3ؙ ߟ|GKp1)U0%>x[B>҅jsKbrӔiF`iƯ^cW2Ub>#pT̶VR( @ dRN(径Y\;ypU?jh,u`u%Ie<[ʖ}E͌pp}60~KO] wbrRO+ B@!(9v{9HFغgIq0.lϞ҃)(ѐao@ؔ@ZŶneWZP傔q:TT=yԹUTS止Gcu]v?+*v0C%iޣhUQLsMb a?Ѕ֫)Y0=9LHkW7.ڝtߠ).\|%(3>h]:rZH^jZի0uv#_"N2:_K-"n)faPVX6|t=j#GHZvWZV̚<|vdmvV B@!H22/'&󦴌,EId ^ ī곍$_/fBt4䒍Tݜ#9p6æM"yv4eaЌr[JR'na$Xj* Jf^ed iCq1RM0p;۲a|x?ǫt=LmR7/TKo9@%t`xE>a?oo/O bFSVs7=+= r969L얝ωVNHP+:Y_᥅Fc4 TqBXĸl]{o4ᅁ:9aȄ"?#-Jϰ*VtDx z5X4gEI>k0uzG5Mpȩ#ݓnwq8P[ßn1Ƈ9Ҫ[ҿkP0lkHVE:, #^ ϱh.Oa5Oܞ3 00Ü6K4.;MfϿOJBnt'Y3'ݝrؾw_6o\bfNиnWI3S\ xƔ=m@vk.cqS"#Owixf+R( #%6~P{ޜdԢzzQ#V^yoRPZ)JrGCTjFšZUa A 9?OfrҋLӡy=ڰp_Sl m!d:{f.l=tB JowVZ#Zխ=΃'MP *dg{eQa޴~M$fIvoyXFb20b)x)@<fszkUǧF>͌F3%_x{x$^;%G*"kS 5i# ^e<=[t9N˟ (4!|zlŒŲOMȓ1hwf4қYRG4jmt2{}žÏ] f4ښi6_Ӝf)z03\Wݼ|8T?ZM_Wl5f9l^!P(* (yL_bھ5;;ic!yPw`D[M~<>o:ywŪ?xR^</ڟ>w{{qRxw4[ V#ȟkvz_h"zlҒ{鵙-fV}D۹UC$ԫL`l:6(b¾A`z?5zldf/{޺zyǺ`A_/9i;qQkeΠ$/Si]sޅǏLXPbD3sDL~Ni~<_ޓA{ըRg͔g5mƬWOÙIaSO igaq[ِJ9{KCE1y+80uڜl榍F*q-,CfV<^>ҁHY[OL}5çSD>q\XYfd>c&xgV/"r%Igrwo23| $?%2Ҫk8]5O'ONDRRDg_ZҏT+sxR4 Br"`յܚ4 ܰd}p =hmx!Tw\Dѷ${:N![|eju$V=~/> Nh9*yӣwJgk,XMG\&'|{X56#x6t0_ifGzwyKWO@2rȩyV~YvvV2o;ҍ:fd N%5W^|.|ګB fk8ֳoLZ2srXu*g)H p۱$#j| qaխqMϥ8hIR[YJ B@!3ğ0I73/Xa~\ L%%Ly0FFxc<# ΖѐK`wx*)–0Q|yþE2{PQ|Ѿhg=;!'d0!J+F&,líFv=O%5gBw3UMtUƱw$`bXw^}졻  UX4+H̿^@XVx}5ߛ(~|WI̐/lpXruvWYӼXL#-zjڕs(F|yjP( b"  ldX{ӭM` }s?AQ%+7}.$M>KIIœB2N!Ä)L!n/f5ټ>N]HbFEl H4Csvakf˥ݜ~AZgfܪ{ӈ;<rZR[,슖)WpjI=JMl_rswr-޻l1Ӳfv-I2LfU_90jC9K5^`fmͯs0y_aeg,V=+&[ן({gJ&<=>%2On/P()ِTiRx>}ڼbW.%+-Q8^h7 )[W.Ì%!%8{Zxl.%XMg5^xapt}ZZχ M6f b\n{fgXE&z7C?ht.y.|FU-PϜ LjժOYOm8|zQ^S{_E3 @E 6tU] * I7 Ec)*HoC{3s&dRHyw?s{w3{O5Hpx*ϫBs^?ɾqBTԉʔGO*ވߡ,Q]}9rK }`nZm2 6Vx{;r|v:5^6c-v9޵bfee2 uc Gk4)G|a*31:R0` y`Dnib"f"ph"lȹrE]{jӮsNABBBlVC5,\ܜ̬#{ٿcXbK>ĊK 6 ACDАuAҸ-8"x/Zdu(oUz@@@j ÔJ|,e*‡D rO&r/}8s]yBz5g  LN6q| >׵|!p@`>1q5SB4FCj ' (yɰw*v†b]R]j*|Z.!'Ȼp[lA@@j^}Bޙ6djlaKɗ!,Q4VìÈ.,|ZBPnP\Br%!˝>IFL:Gkp@@@eУR! !/=rEQp\ .'Ţ<K!@m!P|pGi-ăԆdqr A\MN2-љnFk3+9<*nt3!>:*yaC:'p?Yx>A4sP,"YV6tsQ㣇uj˞[k}{tP؊qxMyW2shaAmDj$9ο?ͱ r4aC/p>%\۲g ؇H[V{%F'K   PU2&\2AK/ZZ7 کmm_'Ib:ɤiMXq\j!p|pyio#j͓j7;)Zw$]ԥoI= e 4y%Q%v+{6e5!z?OcwR쒌P:]UuC(a11Ss[9 -#!uᜅ46Gd#DQF}9H>?"aYP65vn*U-'+_zIPB@@@@F} -MxVh4JUKD1OI>g˖RHG\?۪͍*@U,<2{|SYsрQ"-}VEz<* NyFIyK}@I    "cJ,2ͧ)NR0IY?0P ;GyJ~Uޓk!\t @@@ US%_1 d{0Y޶|Hdyf<!/B?Z"[   u*_ NhԴͲaQA}PV30my3Yu'JDeKA[=A}VnTQeH]@@@j0>lƏcfk}^hpꭴt.J&B4Gn6#ܰRkyқ/>Lv[妫ϧVMCUe>[;ˢatǠnvӮC'誎 Acі#B.GEqk@@@@j*d VY^4VNloC:,rz'?RS)]QϿW^А6b3`SyQZ=Ԡ^;v ߎI4ooV6 YΜ?߂O]prPHW#Q)%& Bә`{]mOvkM}lܠ7}4o7= m"5oR-{p^3R׊,4N_(HfLC#PjF5 rN:3߼'?;{Vj&O/\sUzX8a4ԢIթ 5]ѨQVMƏ^ې)+ iF2 +lՔnbȭ=#+IquhݔrGY𐐚Mo{S ~p ] mԻK[j G2>'vZiGA?O'…>{#$V*j(    p H4L,% ճ Q*LI?(%# I=;M}ޣ' 'Y9ttu5:hDHϚJևRט5 6.4bA{l{x8egGG4jL,hȞREi:Ϻ)%q &Rv~u!͔.\FXv۞#sog4c!$XDeA^= aC 2Qy)*K !.fB -IQZL% &<72Iaᓺ + ex:rķ]etkߒ^%۩^p гX<ᨍmyS;E"&[Fض/~\NO$v鷻whAK7h=$7gc y;pe;GAب@@@|L,%&d3/Ջ?7 ֿ8H\y}B* !f9XpHct9Dmtt_IIQ$E.D@wh]{q54x1pSUO0yuC 1#5Yi h׿#'nb͇YP'VCj6>E    P dҘPGZ̗dxv(tofJ{w*x ZNI5lԙM䋿9thT7Z1}18gFԹT^6Iw;!k!vh$<_J<}^0s.=1m-Xҗ_x9䴌"Eʦb ّ\|Wf~7^Eݖd*E*E(=otԾ <6Z%ۃb    ' ǟcz yXy$oNaeu;uA@Yqظ0]Ѳ 忳у5Ig.GAugjq^ūXIWme!⬾8Z7oܐoޣ/#Z}?;doWʭ Ձ3t]tkBb7n}:ΦZ׳Թj>MJD;1zuYĉw֯BΈpp66j= F6ҵ9"ejo|̗EEJW    B 2:vev\/ḿeG=u7˸ oީ Y-Q %O`lwcҷF_׸' k $],^*W*)H}%%y3Y!m2[$bzUɿʆFز(}K4v^k\|5'l( c%O^d!m}poL W!j=G.kb!rlFeɡ(USf-dBg#k#%fMw0O%oYzRg4AbTNc.R\*pF5iW!lԄQBA@@@"C"ML^BY޼I-a>,7:׶F \N\/gٷ^ߓ$֕PhDVWz~\l\x Ƙ׹U h-ĭYu0 Wh=bN3kT~MIERG^b:    +.Vdmi쯱i/q;WUvHݼI#Z>12<]>xF= yU5Nҟ4+j+ )@Ǣ{WJ)+[W¦SwXS,Mt-뫡 Wq tl m!cuz]_7> 2 o&L Uk͛][_nVB73BUv\zj%[#qS~[7kҤ ((_㍆Y̻TM{eA=%{#L܍Ke]g|DodvoQCèǕRyV?qm+ՎI4W*MؐUdIOQ-^4UCyyj1'- t/Cm{|2?x@1{vCލ{]HvՖp~|[ac{4 5ZpՕz,Z_kzM;Gn^G#f z7S5]ѿy=V07C Cxzrv nxfH4v2ڮEa<[$S~kH̟$fdSZllMkJtM'P}|^!Ӎulӌ7cSxu禾WwnClkm'?4[UX(}p~;dӬ$dV9X;]sse릴!#0r}[Ázڲ֨;o,QVߌ:/1ot*ovF7wƧkw׆Oր8@@@kx YTҮ,2k5t-k"=3m"5bA5bIOؼoNsizIߓ{nhل&F&$luX׮XDUc dƏ{(oTuEuOC}֌XjKrdx͡R;e?גv:E(:#.9{SeԌbYS!np2_@?:, hh?&s].7JP y&&WjŢRG=xꂾ hR\ "iб5!A; Stz$'h.]e:5v(FE+23}+MrzZ#; ͉pV]P/ǿ@nm 1Lcf&ރ@ jJt~OK6tFnv΅䔌j6ZĮIvdl?p\pGT"0LrrlBh*g. Q"m8K'zl$ṇST͵$û9q%N֨ԣf}9Z]'Ox&SY}+ ɩ37Wlw UZEȗ2PiK,L-Jԏb I/֊   'KtIDATϫĢA&R8df>-V746ݯt9efa[V}P"~.,j Q͊ܗ_q͘~^rr]Zdn[ݗB|EOR3t(=1rr6wEЪ)nNٔIe~=>33WngrZ-YΧZv_YGUees2{QꋕIFcJW龂C⊵ '`QT}@ '`L&JE?\ѢyM_MA6k+>Eթ/WKNσ8?0?B[^+YVį}M3Ĕi,Ul՞}/6dD7>a5E^DLzD4; YhǁD 9}I$b"&>"#"\ӥhUzJ 2޼QxzFQK.Z|$KwiSy@Vl5xW\ /0E   P B[)lT / /%3nݯ­ ^t)%`ealr%]Jn%*UR`h¨;t>!R/=qe÷]/#6<q*eH. K"f=,ɑDy;Fi}3ziq2'4zDzXyAӜ ~U>t_lLD湆` @@@d.Q&`ú߻xDeѓ|Y} yϧ߅!h{P+<{ OB8$a%Lhp%5U9ede>=>;aE4eIb(z+Eq# 2"!rѱˤ9r9cGY6cӠ~ZhR'ȄG|&RwZPBjrdڇ'ϥ7LKKư,Q 9B@A&eR1'33-}eX F'>Dݾ%"xoҙdںn,4ҹfy] z7>6~[tD"HH|p'ǎ4lDVB ' B۸rx/V,xѧpe :DV_69"~"lϮ!pa;{gbM,ptٺ2"N5huϑth~G"hHL(S/FlȮ}mqC:Xi#57r`eBa2v95jyC,Ԥ4]ͻ}V3_$ hl}TlۊsW/gnaj)Q/ڥWMw޾{%r'}6.e;[ N 2ckJY&],w:I@Led5*1ѵG}-kVn}z5W./~!ZJ. v:5^6쓅xҎٽt͢gfah4̂ ̧~޶,iHumU>)leUB@*4֑/V>~zl鼎VRޜ7a7G}yN\toW:҃7)/SYe l81 yEM7;gqGp2|UW_Ѯs׮!۬6NGA.+''++%+=pYlo&ľ(6,W;ve>4I$g3g;SڡB-6?-6$c~.wn\?7W7x.1q<6p;rmkFvb@E!>)cӣk@@HpR_? 3ϒ2^ p2ILPuEؐ)l@`(>d$QF?/c,E08K>,-! Xmʍ?^*P@u=O ]hX%V..-)g6\CҴȘQf /7<&`݂6;ׅYЈz>`糮,Tz£y2ɱ,+6~N08g>ogmsO2%S11 bA-vh-)#X{nbȿ)dTU͆:X)*X^|0XW!1^iF}6ͭz]t9sY㜿G1'vDt|0Ovy ++Y2(7:&9Ƶۦ미=19Yݜ)SK}׏(SCr:ݢ867.J/v='G H"k:b(^{L6{fUy}lv(ǜVyM4gV3{9u__ۧ-y{lHR$aLX;q7Ƶc3PcI?m#bzaCi+i;^ohZ;Y,îd!hHQCMUB@@P1Svڭ#Z&E !\8{K8W9/b :&f x[А}R(1N.)HkA~/`MSU*FA"_BJ/h@ frWBE,P kC&4Kp2햳NӼ%([TXRz&Gak#'xxYS2$T)4yL+E9H4.dԜC.?}gN9o5kq )R#@$;ygڒ)Zn6 pJ{c16Ș)e1m.HcqŹ f8<Z[؞l9s֨<X;6kn|@ ߣ2{|=̜fsHuFދD.#_50}z@vC:KMucJaMnq]_1M߉}S#Rj8>X 7@@k,(l5TF/pkmXb-|sn>*2]I<'GebWF'W5ZkV&L+[*-.;&\tbhxܳ,% uw]/+Ŏ^z)H/rT]kx>?;fV(R 46R),oz!eFb$n]mx5'>"'C=J?rR򅬻xyڑclԴ'ͭ6ۈDƳ<,l`cS>܆I22Q+NpժgIj-UtՈa;ƆYo8t?>3gUXykm)߲]bw    3xDiP-hIbE"ۼ81 zXml4LIhB cN 5 o\||3/IvRyG˕բJ㍆EI*&K&LnaPV=R]XVS0^}qy{s8 oᙎk(qɥ+,'9"T@u0_*7bY[ †[S'̍^zUUz\]FV)kzu=׫)h^ 6xR|Z#ڗQ QHrNyQ29ysY]+XxY.oEQD@@@4N7/@n wz+Cbxs)ީ@|Ra/z%Õ=nyd O^y@@@@@$>)lԽ\p@IFZ RY%@@@@@@@@@@@@@@@@@@@@@@@r|r5*Ϯ~96E۬:bO@ײ^jJ VŲ`VDBuu@@@@@@6LJIi=y9׌LDKO9^mrǛ3Ҍ)ɡ폾:qbVGL뭩uß|4͙>cGl㡕}D6}UG|/ͥ{3+[a-(Rߺ?-9b,^ .T7"*;}IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/figures/network2-services.png0000664000175000017500000052376300000000000024370 0ustar00zuulzuul00000000000000PNG  IHDRsRGB pHYs&:4iTXtXML:com.adobe.xmp 5 2 1 2@IDATx|Eg])ҋ_a{m}m* E@r\](bAJ}_}}E}l;v RTZM6l.. IxN<3 `L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 4+Yņ#@\g*yyA(&.l @N>/_%a~GpA#,߷ zȑf4lZ qzejE9Yo8s<@&дtuz^? ǚp]1(zt}n cNCj=K3C+euf/~sU3JJV>]ZǴ2tWƭq˲‡Xlڪp.+{7]K59H4%X'=7mhn2 <%yzvpx/)(V|{#v?oJRMRN=`7]`ieҩn 8mJujnE)HowenmuK&A7OP[.<"G`w*KݯX uu7UҍB=dՇſosOhXUR8*2=%.[gKJO[//W8QI?kMDqKOX.5=3^4?|Ϥ6O %qHy8H =xĘ@r~By{ߖib/}r=8޿dOp\Mc7ׅB=b1vo](Q?cLe`aeWkT 4eh:4zٔ6MT&zo goqEFM%l)U)KRs4)&8_l*`FD@({L gV4?ZG[X-,A)R;] 70\rЇ顿DKcf;o]y#)ŭBjYoKhaYeŌ''fC Sϟ ٳTz](u*+/x4~+~j;L*GIuO}uljcL :7уaS&PMt5 ~tSoRVlNPP"2h|  ThEl tUV$fN"G#jsuiAėwzi9,[H!pPyACٹ (4EzOg]LA;,B aG@iyd݇1Q{jfAذr6Eb}w9sgr/ f7U<8&;9wuڕ| Чh-4޹)ѡAHu~vh!8>()'x<'7'F"/'A#?>:Aɟ^j P0uORo&$={iL7m~PAÎr^(m4 {FvuL% z@\MPm~9K}yS{ "ت^ÔQmz}kh~Q]s.V7 M@J͜!ױD g mR-7ͳgw1dBLwT4l s` |=HGPyZ&j:'Tc2]O&G#QB 䜊3GaڋӱM0s蜟);3u6顡a-A`UC%0QՈ>%2"EzXĝy0$]  A@:;ѐH] ~=HňoyG8%<ݩ͕`sS\(|AfG[Ii$!Nimr ~ ]nty [;-{{>vcM\r@}]?yܷiZx'Z9Wce[: 3 3(CP܏Ԙ5EP'*(zqߘw>x/|Nxt䒇cɼ6b$ĩc9 /F=`8"j|9XLz^xm2e_rSF쳥*~GY[B'&FZ áHgwcϓ(o=>0{2"V9iڷG7gTWСLVR+,+Q(pz!irP*ڦ T"OwxJ,1CG8Ѓ2y^_5*9 @и{yw=uA;j:(xPnQ6ūl>>3=>! ڭ㌲[/BtlO41ԩQFbXռ>qB=P_r8vyT2Ƀh={:|)B7r܏d:UJiʾ(6%%*,e]Knx $?C o.1١Kvs>eqy[יf X ;Ey sު~gJA4Ss#}…fFX!hM6/_(|6ol(vvbf48!5N[~y4A[/"Mjk6y]~zGL9PeX΍s!?<);xG՟hKWeY({3QΩqkUڋ]SotRNhSl1AHkbmL4q;Fț IvtE@#EF޶W7VU( |)gt!efex { QJNW~Mt}:iX:)zJd^3+4xpv.1-K \ʎo뾗8iLj 1vϨxQTcxJ,1m kFPt0N\}Fh>㝋{wfi0!_RQ/dyy-ڏyӆR۶pMw>3d6̀o&Ii `!;Q"v"(͢ cnA좣LP PC_6~Dz<.qѯ G7h9⌆=F\N@JRvXNf4:f)ݓvo;#=/}.PjR1Q'l}W)<`J0c:\4ȍ! 8Ρ ǼTϺ q fE*د4F칟)n.cLvn+=nASlk̊d;F"D93ڤGSAہ=hȰ㹷L450X(S}>2wZȇQhrI@6FPg=KzAԯߌɴ)t ;:'Xz񇺉 c B!-AGx>/WAAL$:"~5=+2׾\u:8>K?;j#"UH-&wזe |u[-[((+L6TG{;.B9(L2.g4Tu>@JĻo}YlPAoI_t}=BUoes dF ,hTS |B  Bw bA 'd\}t<\G>JF0NA٢Ⰴv]a5%,trSe#uۡci24yzJ)ﱅTu]xt w縌.}D*Z.eF2LՔzu{gI4 ^OŎE2ŞZȟ\vDjkPbkmJ<*]E'jgj7Ї uspw4;%"Q^6*owH|cZV<'vo'H| C/'b0 GLLv uhv0çsE:/3p?ޞiuM+҇q_蕾C <-F>" б,]Av+ jQ%dٖ+"VB<9qґz\$^ k!mG}:.o$hZiœV!S&(ԅ4`644Ow?6{}x;9]%ۂ UI4F`@WDdSn."Ou:bߨBٯT#v·C{sJg|Zh4Pǎ59*Dž>l_]3>v-ԟZJ^+t4NSl )r;FsiƱ|Q"dpQ0pXSXkٵ߄(ByHxbW޺ F7mEmv""w,Yq=ôfCW*EjTs[?:45f<`? e[<-vM< 銧N:$dK#᝹\vcۋST~gV#An-%_Fډ[BۍWi" ~;z4|J0ɰxeLCG"[Bcfu>pfHf2|Pl)qK]Y^Xs0 ]"g{AOC߃>ԩb+g1؅E?;x0)^ǐBcÖP6Eȴw?O Cw-4yY;UVa⅐Lǚ nN+>3F{DFnGw oV݋ѨO{:Ǟ؋z9;>Jho9*\8\eLy0!a {W.ubOhg>uZ Z8Gۏ(M_#͢cЎ/Η{4{u gmŞc_ޗNhQ>3<#9dYwcγi0e͆>vy.X%N[ yPTu&$QQqNإ;])6sc⠶TfFBM:آ;kyτ>hpE=c}0[DI3ݩG|SnS/%*)E-m .Al#;ֶm-?lgK"կ;3* rjD_]X o!*rZy$zv@=əyEB,dE ~)!ΣMێ:hij+aEzjU9 0F6 $-E~T/ IBTeAx;~0th|ޘ{M:Fi.b؍Ե IM />~0*ߋ{PߡrQwryY=Y{vm6 FyOl9:cbp01.-ўD7osP:EPQTW _l+N X:SH&po;|tݺ&+.jwي)<,a0knBvpޤz.IM aB/IFtxooĈC7Ez!8ꌗm. l=pxLDT߻fX+}/_Ng+N&[R֓HS=zLu^_SnkJvđB-~5g6VDFqS-k)0f4e5gGwWCx;C0gV5Wef?ԱYߢX]vY9q"AgTC'Ҍ |ܱWQ9"MNXt7iep(Fˊ]o:1c^1d1gWj)ɶʦRlsA"&w9>л7H\R߇iڑwxn̎;cI&Fh𡥊F-&/n(:=ZLi& #ܳory4aTڧQ#dܙSB0!rrFHH F3 ܣ]Nʍp.| > tgnfxJB#o$nm9rgկH/ծa|AǬu O2Ոeıyn< o?ؾ~ IksHrd U^ )uq<_+wbgD8"j12QZjA sJ>PaOSE|*6t(=<ߗdיn t{U ~O@Hg o %)آMA0t/+\&T>H%KUc-E>͒hJуT^(Mcǧ$8g4tͤi븻-!up`f{(e UyN33@ϯs!2z+E.GQ/i*{wTx!=ì<=ksA\^b>mdFdivvEQ.?7j`uP`A(3Ǽ." FŐ@7Yw6mλ;#"M hw=x-Ϝ놖Xㅺu"u_- N ОB^ŧg_q?]wf<"b#@ȞPfo{L7aYv] $7ApLa<6Ө.)㫸Ӹ~Ol7*0\S&qNڋz AaTReJ C\ڡ*U)tN-޵gYNo^d,?MKF4nC e:FoBzox܏EWWl7|5h2}h(?h) 5&\OxjgeV_ -[@F>į 9aeb(XflpE0E )_}&Q*=EgSjw-eڏh?JuDR{-|vE;< }v [ޅY5~+}=ٹE|R񝌅u r@,?>*iqrxǙ(xmsܭUa"%M G!oPVhfRY 7)֍.e1lwP>F(#nH4R H|-ζ`PE)lG}Zp,Zw~8[=3io^δ17gȁ!e%X+ #/^w zԣgP U[DuWmi!+, moBcKϞK<~NBWOzĝL xf#tF`&\w@k{djp;:=HrG_gm- Mt&FD~=:;*zSvCX^>!yy ^ _|v4|" F̉g_ {|Y}c7Ty\jmA` *Uï1BSW3!(R>Srh] 9(y6KviCqW•p:{#zw{+g|W:+MvpPwc`ʾ k+R}ٗ0^G?w!B0g 9={D^gGb GuRV^oBc8ޕ'#xW[iCQ/nH\='4H=s5TFj@y18|?CgKM&w]!ZGRVb}Q*P1md%e*M.RC_-y(jx> ImԱv^[j~0h7=^yZ$Z:، $>@ @*o~:in8ړK6cyLOmWŪbPZ$eS %l̂!38=m-wS]ܨ^sbw v뺞ۭT<]VE hX;A8":2{Ǭ6dNA(v|C.uY9CǦ:.FE7Jw5w)1C[FIt5MmU1R:_/״4*oZuGXb6JS>Yk,"ox:R.5MƼuYsyvnֽ ְsm_qS<.`L 0D`a#Q$&v{9-Q66?|GԿ|͙:u[SA׽E᷷ S"@p* }bS0t]i.d~3&vHN#`L n=%_F81֤v pѦ) Jb(ML`/4?Ogii41&)㰘`m B]ge:yɮJsAN` %;6{桦eђ3@!F `-SG}[_:483g@s srMm5(b:쟌ǡu'/[f_OGQ09re [sR>JߤО;~?~=tJс> ~>{5MNH}4# a-س>k`:4AY؉3~$ Jb5U{)GHFch,xB/Iw% J J[ án!hv@u;<W6'SzV">QRݍ͘`L~ tNu&Fw8SӦmq粍[=|QNguss.vۇVXm0m|D #|]g;BJGOPUHP~SiɱMe>='/>sAN֛cC=&zSO];` Ft.lI k3s:A4+f@>s #)K8FzWݻ/G;S,⺷/)l. 93&@cTMY7v` #~3:A6I&K1,l*Unj$?ӳfEzc6Ls>QʞRKfoq[eq)Tj ~X@:AV %G|ψLiCZ})"N *3"l}ŵ\Gr$6>`LjT2&v[PrTR4ɻR9$lNjm3&+`ʴC3SWёs/jmYt#0RC*[QsҔam(::ڏH 8b6EC Ҳ/:G w^"BձV 0&!:&Hq3gvebOk.>!r*(E,[V+9} ,r @pa{_jc27YsY03҃g0icrޭ.}|c8C#Nv~s3h`L 0&hFJnEY-R"|Δ$ʛom Å 3,JGxԈlOV/`L h=v@Y޾9e5i{̷UJQ \$d8D7"sP t'% Y$+IH䜁 RZ? 0&Zq3gvJ(., XԍfS^s̕]\v3NS[ ufJ&0YsYrԞ('YqJ,qKLO ȳL 0& d'BcRzč9;SkNѼҜnzve|Z%L 0&#`9h{6> ~^fʲ6&C3z~6Zؠ/; gL 0& js6(^"i~~5U'RKmLרBO_5*1`L AnsI8%*{"Te00!bNr,`@]|i|zUJ^DQc#`L C=5^n5IUrOoi9QMB /'pPw4Zw5:  v`L >h6Inw IʮTiUD5 DF MZd!pܹm/nvEwLj*I|k2j% (C֌Θ,k6}mt1D ]կ,3+kC: )}C)3' !9{t^λn*n?.v,lΊm&@z(SZ'mnbvgA$]mFUԡRZ˪fV0ӃYzYs r7D !Ye#7^3aBP?`?vgJKb){w?XX_T13, -]_\*5)={%=&䌶οCKD9c/oSʗZcښCPFyzqYoSJ%,둌@hjs_,q1=z"=_"=/n?<1VE}& ♍qcW 0S6ly^ ;^o umRDO}R#)YK`$Dx!'zE L k*r) 9{(xml5;rѹ$\< YZҔt|<7Ϟabt/KC=EKSC9tJ[g>X.OVXJaYC,l4I HIxSӴ eݼ 1=E)9$ܥ-;_(qBhF*(?RhYF8DN=Ò"^5D<AQ=qsE=+4y#=CZ\#Jx9ARnC@e EpbM _3&PmIj[6Nå]ԧwcĠ )>D,~g!0!Xu:SVN}e%[x.?n3il[a]Aw`Oz)0Dg7cL:Wb]S͜eux 9R}oBt-<$h `L}Z1,6,͚>R퇔C8Gy3͒7 Y)Զ3g,iK~r:'j?s+x \hU[9: K|h8* e}F rۧ~0pu@hߺeGMQ%B07/$4Q*:H FN#eHP\* =K'rX!R:!L={|_L] :rRF`Σǭ;- "vn7JfEpǓ V @/GǁPOϹ?K>3&&C.cRoN;*CPIr~Ơd͆&:&Q80t7h ݣ]a]!S!N~[MB#=]K 2a rE;oskHwa4' XoBc١Qo;τdL.%;蜞{ i5ه&ke=!XL NRm/I\V.$cni/Ipnێ7o_ouj7"Nno]SŢ~h7آ B YB؆&2nk_ JiFX.vlI'Bkի9ּUнܰV|@ԡ :R;]>5+N.6G\^h'6*P{uc9GQ>߫0 S+1q76xbO9#PoK EM"MԱXA뇄wo~QNȞyi Mh&4*DsGf /,̴U~g2 Z1 WCvIxyF'`$Ϫ[BYga" x/B5zd,[,jTi86 ݾQ"gHVR٣#mcΦqr{q]gB+KP;Vۇ;f92nyLgO|C+V]9{6qO:SEī"*W MfLǫ~0ID~Nǝ恵5Mk7)VFE{N6R#\d(asSVk ^Jh7sӳvWZun٣&3p8ZT]Eg[:n؂.t#3 Oӧn}*W $&i{AZ߁%ff|E \_Bi+o6 ,OS!hC;x^N䶧J~GqnQ=CQzޭMFhP|RhUQBwDQm 8-'l>:\2 O];bMN.Y31ݘǛN *1'@Svw`WՉ @k#赺Hɞ[6Cj](;hVMdQ] Q3LQ!8hw"g*ztpLb}Ld0:a~(5xڎvYfUѢW.ϡshvk3#c>綰U6mS(exSqa OT!LKVqѡ+,J'ATށHGjdupҘsCcbL >ޮ!ת06Λx)ƓNԐUSQ+.HkU|Z˥$yKoN;nuh/~Ǘ{Z7>>ʌ:v+֕cN]cWڹw\Ǹ`$!8G#Ws< mrTw>oN}wx@To(w>fLvR5 cά2'QCy[uxҳ3WHkTvln\Fmx>n* r$hjG3/*lTq'w6OC.Z< YO4Qy!Qub>華V1A G;6l=uʔOc++;H@Jm<4OA:cCP[RI_ȑ#M/?W ju|#N{On gd^Bkt!B?e݆v7 y*"}+:T)gRf  ډaϑGᲡ|?EΙTW<&>zf 2QYKEM q(֝oUt]Wy'=N%Oq q v)ֱF_L? !OxgzSs/X5Ǵ6%WavU}C`gWk 睎'|݊r6؃ lFupgͩ۾s=^[.eB_Xߴ!fsc 5EkюK%}ҼЧʢX_Nm~k# Ozj=q쾽ԵտU3uWZ b"iPL i8>;#/'p;Dz((WazjTb{MB}jȀ+#{_ٙ+q|~7.ɴO:mjiM&G $@Ӊsv ݁$|<ߎ׺U=[VD\uvm)}DP5~4Eo66_aدo[%kJte!jvANZ_S?])`?*(/D2H)ʅ-ݻgѦ͝ӳC>]G 6L(102cz޼ =c 4QdmL֭gj'}-ڤz60XL|P|{}z䫧W7K4xGBhWz ڼD(k@=ԟGy=hCؿX& > `}_,_Zf}v-c f=Twi#hJhATӹʲOMTV9#s>x)ZMđA|nኳĵ.>hGjs @yFyGyHyWN=4kn#ZC|XIBk5eӹCZ:GxC.7i)Yl9W7;gN=5E XFR4t6AӺtރ\ޱ~wo쾌ɨ a?u*F^Ejph! {!4pAG$hm'?:5DX 2 ;t(1 qRWŕ1CgCxe*џ h|r}iŭ-bMOS r(ilT[89{a^kRc=aRxΣ!'I17[Pv>z!|Tf=/_ 2t&e 7sf#ǐZ= /{>isNc9oI˻jIJSn2z齼v"y-/6$F\s5$SlG"@yJyKyiWNK%Uʶx>dT_0ذHM#NAJ(`# 63fژaڂ1!TP~ɱzjN9daly3/u Hpݻ|)D,oe?/fAn.Awok>j{~5TNOEͫZuWLw.]6!4w_ŕ G8<ݽj./lar6!1| PmoDOTMQg%$IHO3?7| 8Qm[F[0pvF tR,wW\˯&H@%RJ]? m'ͮ}2`B@:kRnvuMIKz x?͢3>j! Q_ү-JOg0<6ާʴ*7*]Ebf&~-BMQ5_MuG{xFYS_z6ӳ^a41|o h ͉fYOs1tf7j@%;'ܺ:awx *6PbۼM?xo U4EWA>.cqrK4g?GXpmz B=kZJ93J $ilܧEۯ.:H\%-j <un@tڧB6+Yv c~gKZhw;}v?xޗJ<^B#8M[ |ӿ3H&ԠW> 5V&R.] @Fǫd>a;F}(0zb^;(^%9fOEF@K)#G뀏I4B'zNAQٰgyOO}*gTDwz$@(!=Ifnږ==sv9sEjجX~fa){t!Llp53Wfq p y( [[0Ǹ8/}=ǥk;8b< `Q}WFL1zR&U4oiiQYY8}wHrܝ d`u"jV$h+7^Tyq" aV,/F v%Wŋ uk}vqmb=^P"JѻCSѵu#m/'$ 2NW}-Hy۶p< yӱQcKE\IL%Wv } 2ā琔b.eqY[mY,1~˺>.}hV9,8+8;*"N僃D5ŽC3 8k+)O[_Lt'XĜ8'ET%Wp`YKzJJ=㯄 /f%eg>E_|0" 3@BZ?:z7(37J Z6,wOyHBbq};~CTf[V RLe8"A 7x1(r~Z ew4V[#†Q;sSǗϬCG*(~^I4ĕ?%324qE)hPt ¸aF9`w+^V up ߞDqbYMGfO~bVٔ21mJ9\!js K ^r<~1sz)lFQ?!Cu'0>G3ȝgƪPn@^ @o\Q:M{i0<ѧb3F"%lDNpI%vH$?Avʸ҉ pBuj5Y GN_ )TNxÅkw>vB4p4}:6 `eqiµaDk"nj\[#Vl/^OM#k0Ө_7ԆCz[6>#l+['+/^/X]*~*hڲ6=bQe*Á}:7+{+4PӒbh`',uj!vi4^\ڴTs@KTQ%,T64kz@1}e7>T4@2:!'@3j=W[ѱE}Y]¸x^m~2I*~\I3uX j Za/V˂^xO bb?ef ׁFO1WCn>q@/a]R6 澰z/dfc{†)eC T'ЃlO{xE RxhHiӸݯԔWo\INKt`T@ow- :0Q44((޳ϦJȷ&_ ΈEg{I9Q՗ҾwGu@IкIq&R_Yb_"#ټ~dyQ4iAX_q=!,$HP>m -Hk|CʉvM#SgBĨ?&VYϿvg͎`Q-ĆG9j>)3mE]n]QBywAe`4HԂ,@%O`Mќ#w$MW1k rVK N2n._^CD:!d1۩,v>%<{IL9rZ2dU<ڽ(|I*h1gQ&{ (eZ,BBzFbc`rD$h%$AC!'oByis V, 25=CDжHcGnwL.24{iil1D., %Z֗ǣE ֈ%%K?.h> uAnz!hڵ=,vघ4n@DИ|@>5rPQ{.F (Pn0cNC0z_%3H{µ+(/&hMz=YE+EFu$cs\fa?Cey !ՅWhc劤닞횈a=J&V$ j%$V2:4 cңmSأ]rn[L>)p*AT G#ɄqVw$&e2^ W)/ NcNCD% H1ZQPEz8phReh:6-^VM /B0ڌчa=JiZHA*ׂv3'\~DvoiDR"#M8x2N}Ƞ*m9Cj0쮇zObh|f/(Vn/n:4mI't& 2~%uk%&^WZJ}6HoM{C:/5GtVـ-!TbԞ <,K8)jZ4\Gy?f00``pMݯ`c4zu(iUА@iRrגX$Б&dP U*=M%WǙY٫ &!CInJhD"c@ v&WpxySҬRfZkܞ"͝CЎp=J[(]46Z:u^LmZ ?ك/(@^ ;.8"94s!覎&XJ"+ 5Fddb d *ixZ4+Z< /8ٛo^?=S];4J$v=2O杍]&דPG`AdH/\\$sW"H۪e2u³$hTBT8g.(΍%K+7L4F4T[؇W [SϠ.~8Yu(`a W5^Npa]$Ș 5Wr+uNʕ]2{ b5_qarey*SA"a26q)QQJ .!A2Tt+j1o&VXMW`/sĻw*TP\ɾ|J5-}x.hq_-8;phfg 3~?8xß8l [^K*ðآ¯CyILlz߁@=4 [a0K/T}=o̝.҇Ӈ+ͨ1`aFvIb:W.#1W#)D1ZiX%”s#˰t%3IZjc{4? lf7C IS-j0%%cE0X/ٸ'W3;mip5[+BS3YMh-䳴.8dzCT:dѳAtk.l+ S:-Mh=ҍ!H:jQ`iQqZ631%>LS5B3[t@GfjH#Z ЁfWtPg^ pnOhPכ$&gm^.\g(0YƎvh+9'VUqn`~ @:&Zjڨ݂FH ,|@ҍ\= DӯGknEs*F4`@q^= x&;q3@1`qc7\}~3DYR4IP֍)qO} "lo ̑{Yך1WGA0:Ty8ݼ @04ZU;4ATJoZQ<Fg`_V^2FZ6D%R@ΣaIQ2d: MwB2Kˉh[t۱VOy#,0ߟD2x4_Sx6dG?fO"2MdQџ(T̎b9,.Ng6HXT,~5o'|Q(<l S jj臒$G 1 VNj;~"CTx2`Dvtn?԰12ͫ}.)l~sg9eã;[荨dy#p@U1 (q>/?,tNB-9ضs"j&* 00``]S܇Ri[Fumk hOva1 [{J&Q\g;df[>~Sz$er[oPbY;b㇠@<Jp[${o܃.a} ;(}b#Uw .`+qBb W9u*0n(JU2@Nip@LŅ!cO~=ݿs6vi+?{m?P]ubfv)p/h[v>|1]H?/ *BJE=qÌM8[[=OА'(Ҍ|yd>*34 5!Vۮ8َ# C4XΙ|FY;\}u2)(ba;{.#J9 #"7:g ΞPP?yG84~tn ,p6ϵB@A߭0hvکߠ[V,s0 0MxizA u)1/K4YL y?`'&o7*p :-0_Pt#>ƛ޻->g 00Pz0@<"i1K2å2΀Sr*<Kl}95bD`ٲoc`Vm8.Z(JaAYUb#L~&KXpLHrQ|o` 5# 3jKbr_9sekm% :t5No|-AI60g+Ac`Hn: :jZ 'BTk _?<)|n1khP75eBrp= HI^;Vc3oϊ}h:iNoY(0Peaҩ9Cn2݀m64Y 8(dA_x)2oFT`A=NBo:#6aDxS+<⿞WY+b_I|ܠ"WNM R40``T`@1j_*^xI 3ݜM2&My7M~("8Ϗ߱y=?&G?8{_N}eO=Щ!l8# /f'*CFA]zFǃo 2CVq&}eʔ{ѽMQT<,9"z+aե%+1WbkCVy*#d.2oJлaeZTBW;=oӟ;E~KPpȌ>}Fz8 :3AoޢSCذ3X&chܕ}\YQW0&;̥bd((퐙%>]+X{6o1z h"_ONE䖚]%lwBfGd+o leQV5lKŃ1iQzznjֻhndAOuë LQ{?ƾphT8t2y gDi1kedNJIi\GQ'X#x "ddfVWފX6 Z+_kwgDLhCz6q8֪=F Zq̯2o{Di:4K+7.,w˫I1kICzbGf/Zf.81>/(BI'd&xE j6UՊ5(?-,s#kw6@?eݞ2 ӯS 1w}=o̝t*vqE 'jTb8ls=KalYܣz=4%x7!Ptj1tnHp`m:b1%`aV@So=5o'5` X1szѡy`HÆ"l9!Q_ĵT"o=/[7ԤPp֞Xum0d&lqۦԩn10vcYq1ɸGLe~T9,DQ[@?YiS>Gٻ`cɰ:m֕VQ#3vPq;JCKou$]F+bGIgd &H fvoj( 2Zԗ+ևp+^`zUFϯ=mq0iWu˱@иnx tn)ƪC UʌN`IɝZYm׬$b!#h娡 AHMf 2ַn'ա?KK"5Ih zm*C%dVsH'f^YQa9Uqo!nCi_ ZԓUڎ> o.U*/Z7z"yi R^&QC ^>c1iVi5+W<  p9!⭩қV)i͘ ΓM=9{ϔkSQeYIhȏ 'vz&&o3)54sz?o[QQP&:vĜ$5!n[ ׮t2&(%y@MI~~Bg,sVy>CqJPx􇩔^Oj#'D]@IDAT8dQ4kӖFRNӠ PHv$̒17j`r5gQ}txr򙂌B"ԩ-s%l13 8qͯߦٗ :l`XCE֡紏c^tvʵ$KFzzEޠک4!-,ߴKbcc;.-"ʖ9 :S(mWn+$qQ>$R)g~@9a2p@Ԃ)b2\vF4-~ky}9F5^aÓݒ lr=LBQC@#sF*ULGY'r4ӡjIe䦭U={G,mtL"P +&=0߅:HNg~y'EAs'b/n"_b}IGY ZɫY{2ogkT`S=ƊyyM[yNr++9KFZZlbJ6ɾs.q EЦ@?-o_Aq e=C_QD$t>p_ks}zJBx>ځy]0$ LI y: N~`A GA jv$' 4]m9a#n(:?JXjԪ@+l6_ރ82\b>/|Dī{?“вa-33?MlA ?lԖQdk՞60Dj:BlX;cAAk˷йV*p^Պ1Mg!؅)cvh_YjKeFE-N9y€4<"#4Af 5\ޫ45Fˆ={G.rl`ٲ??ʅT'?/)Y1n,,^xxݙrOm 0`- G?I?`zQs<N'FOzdۿKŭ1y2KܙSkêTyj*/07c1Vi]Ȉ(ukTUv! WYΠg˹"tv4&6dm/W^>c219vKV%0F0QQMۡyX:kCm&"Ni꣣NwSrsefvn\]p}P0oYP/~ ,4NAqch6b)(pҌ@ˣ(t !{}^&} Ŝyn%l痴M1r6t dd H> )@njS'ٴK:ep`,zqu2"ʠ*=g>ej]a(ld ުDmaЗk2ʁ]9T0AXpC,L͇NML|;%3/dz03X;s̾-FjW98uWUq9c:{﨓t$BuM_ Pma:3i g hUc}C֞'|: JqN~odQK'(*}Po\p;SAf˗oS׿ފuc 8{ӺOг706J}0;K7>(''_kkU=+XwT[x>6_rݴmꏓZA^5mS{y" xQ5nC 3:-]BTn&`qQJ ðohT4[= /_ze'֢n\z&VIӷS k T04B&.0< |5cV?IFOWCt.a8GA2 ا]8JCb76hb+d$8n:qp+AgX,d&($}$0]Ka;ѶQ[ +IܶfŁV5w]Pr6WJ؉]M|jkmՁv 3ce@$ ؉ W>ZuLe1dPi{6OW:Qӂrߝ8eZ?^t3Ͷ?4]we Dv iLs[lP?h)a`Kd0 `6苲1'ͯ.vZpW `7n{ۼܥ˖l Sh_f|Z &S†l&>RE j6i1[$< pPJ틯;.zڻ+8wpNK70d[ hK1'%3pt'|Ux uf6Oꍾ-)96ѥ\mBYpj~ɐcI?aR:Ξ{I^4>/L7 Žú!q`ٷV}xv'Q0F*e+gD@݂t}sHarfc>Ι;ww*(3 %WQ{1< _ʨBtuuٱv5Ɖ妄 eFl4H(@JU U믥+^luլMό51GpNZRw?W~kݦXדeJs7rqдK*uѷ 3^2sàe^U߲)aò2Q Y0㪑/fdX cx` \|+Ws62 J 2 })% \HJ+J(hCII i7?@*CLBM\FcN`2N[઒QsAL}˕?23.~rƘ !6hIaGz(3O~^FVAP Ǖj|p' P({WhDi*xM+vϔL8h֧d=BHCm0+ V M/`y~BҔjҔCkBF߈@WcGqZlVv* DM9th&FnF+J3q [{*Nլ*^|Vu1hq6JkEyO!.!QIuco`c`b,&o).uQ;胈P4BCAP2JP8qIj՜הbw5~ռc ;8:]ElDtCYۿjہ<&TgK+my Ej<돿v]O yzjt1r@:,;&UlZ ŰP9l(W,+#X,A(:wgH75m[EIHOd~U; JPƒTpN!yEoNy]=gw~_Gjn 3ws̃>h):DUkwp 4M6ZᴮkPp51M:Y}LcCmH9K,ok3<>8@L˟" sy&um(q3OT^JB0ݥ.Ej#&KTRVx,âE>*AC} фQPnd8js_X0裰_yOч72jP=SKo"~8M;1phƃ9J1P\LOrÑD-hMȘ}8^kFbGsgy6/  [9{" \ІFwPM y^0R3rg(ݩB1ZAQ{vc`@LiMZ=JSBWEU=V bg*R2I=c[jLy\\PuQ\LzyOߌtBAW+^=V=-7p;r?%dk9cs $\Ki/dTX>O-k Bh昚 Gv޸x~<8fu9?cZ*NYocG"2_L{y#/0 %+koFs c2Qޭ! 0`o7"-R6 )V-}ڔŬ N&s֌v2\= 6W<1=wz?r29oc5|>w̶1L@P3>& [nVd4ךly蔌FE2 ֋qCAuKWkWsIkc ȼ梉 tUK0'Nkǡ{WT>}'Z3)k/YBHt5~RExKԮ͌ hQl:l';Lsfd 4QI3@@R?kɖT?ȴ S_;{(:Xl:Uz<1ؠ%^x7{@ן-10qʴ~lSW8h5r,ٸW:ٷ}.;Eo&# DHjC3QlpPY50 I%_zV2vz2.?J|'a1׋tEnHI_Fyr~`20AuЮZWy!h~'G^נY_ZXtjIj!X&s>z+4%x˾c596^~7EքǍavF=hc$Ly0ݛ2v?R3,pXgxbGE2S(7x_Eѷ;E`ltomϤ4ݭKFKH iϤ DAWtӟ0Yޫ^@jLĮI/Qf5б%܅+ PNC D0B f-O##UwDV5 YsLOo ?叇__Ea}=C//'ԴW&†^FŃr-ɒ& s}MgV>KRQҌo1BQ-xNIҮ=qTu7bMCzStY?O;a6dݱy}ã{wa c1g'߂:h})c J0cVB:waӴY=胇0渟9v mF<ͨ>ptrAK FCcr3 "j*YD,_#$hRd,ŐNvY.83 C8/XKNщ4zB #wxϺeʔBO+] nqkvc?5 Rw6{g0h5%) D,A!3cȅ}RCO:}-gI1xz7am1/o]<MhiM-Zm|N\}xᐽQtmX F""kT\"[w !0gc`S. pXtW,,>@>M9)9OpE_NdMꆧfTdDF AY+rskgB:-܇ZImԯL' Q>-zJ9x~("C'ĪyMl?l1R: :/2Y#N%P1q+^}<[JZ*X57]"T ErLaƄ0 OhӉӣ-O8lŹ8gM:t?o6Fƾt` 4$nX˔ xUc\:oɱ昗 zv>2Cy޴Gg|L% %l[093 V`40bqFk7o*NI oMiDЩ3^%D*BVAC[1?ڲ>.,3&sǽ>ײzl۲m82h2W1ֳ 31slO ^~y|11B SjK/KnzzyՠDA&dҒo_;Z08:lŵrwoZIb"l}i@P 6Om3OMS̠S 2Jad ܊ 8aj0G@[;NvBuw[=N >D|l h=977{~fK1؇T~+8t9~KB_z|=2rcAcV8ofbjVJp<9yRj8i0T/#~xN-8p *g s[&o#F(ÛrY)=p~a|~bѶi]ڍv,f-(LwO{nHeQDN-ZmTUT^lPS ή^E1P/t>w_{m˾pF2-k/88%=%:EDIGfpo%^IL%Wv)Šxod FpYYPqIk;+{o@,1~.i0(>%).ڣ,?\up2zM%dR lV"o=we.~(@wNQq#Z6%*a2ZW1+cWxRѠvu\$j7ЈUBXaZTvxe.qH<~g\aLŏ7!JE3bň֍k%Ȯ>^xO bRWL!:W\ c'LT)i Qn~ z7aY6jp)l6 ܵaC;w:hH^`VhJX>"f=8@jFᙰs x` \| +7fS†aiӧiݠSSҩ 1dn++%TV\qok7VS2Ydޣ'_ 1CgsWN` o^,k%hJ[|fex†*A(N^&{uh >KeH:Ccөe}Au(jU"ˇvI]OK!Jn\й{K7?2̤i +ɱ780e fab3nx/q5)Y|:$9؊;~h+dx-",.6џnw ~ZpQ:ե0nxwg3mKaD%iyپL81f"^4բ:pq QZ=ۈи)ߟrBh!\Ly-@xsF^ǀtAfI?n7e$e@˱KԶ^df͂BW.@M:@fVfZZJՔqۏJE <+FČ 4%6mO $N-; τnw)ngiԶihV/B2H/<0,WWNܗcTN&`9z&NٜPkv\fkSDhP`xq,dP8t ?}:I!V ٨ZQ9CnTĠ@sF,MʘȆ"}[ ~Cf:u'! #f^0\ R^nv$&UH{fA}|01^W=Dں ڥ?E,_.-sU /B ^oZnz=grqT=jYzrs섖m#R`R2džfVV@?&4⽿4oDv2iPAm>Nmp)x4.&{ NEVpw3Z..z6d丢h#+A]?a+wh1W"c@ C-qPA hpOp# 4IՠSZv xmWt†Y2&~ob_FZճu t5I4ƾT@&D]%(="1GȨYF"\R$ЙČN9|.p Z&& ($BA1l)`Vg@S;gHwTC|N*Yt&$A 5FR\b\$WF\$[fx]4DlPxiX *pD'3 qx hޗmcƂBjM.ӨQTy}$4$АlJƃТAXךEC&Ѐb}.jmN揢W'⪱k;2*Ws.CɀPcqXp"ɔ8dޕV#qA:bvItIΧ ZEѻCs1 jU~[%PZ _*Mf] Z{٬kU̫{ٓyOHoKX#w]:Wh^?BA O݌w 3iBу[F/ЇSJAUj6M-ĨwS{2j5(LPpA:ǙBzE:6Tcָ%F>n Fl{T_G[-Z[K,#|K7fUHB o2u̳IP4+F['햮ɚ&44Wil ^J4m??kw'rS#Y`Izj8Z7+ѱQ@!u_5 o=MԷn!)xT 23gP[Rb(I*„'~gdشyim [\ TN$M=M~Y3Зbo2R5pjk7X?[ˊI;@a tܧ H }{M9!iX3|#<׳]ShNe flZ]x-w0|(9,5߇s]]h5uVх_YZYżiiaPchqN'FO|>mJ'Sbs3O}^{w^~[2pDgNA49ÞAzƄJ-;4ybpWd"jt8zgg8G&$%-V!^? z?N_Ö?[F~9&oqզ–uau_7TT!"^9 )]6 S?-{b7s.P1b?ɍC`5A?u<&Sy/FctGo_?ߛ''xn0Z_5N ss#s;U{;C2u?nr1巑.KwϕBA1hY٤?]РB"LG]K/M_w⼣5~u=`.Z\Y{W̻ۀXVs$hd33ղ}GG(pp"oP-+Ǣ>N̅]W /)*8֗sfh3hVH3o;O\_i4QD_N5Ej(,,vBI gBAs9ph*̻C#Ԣ0T75(!{9pKd/ϐDVcgum)%۾yCmvy4%dKOԡy}1{*= "rڐh:1}HyUbGI)l8ʁc/c992Q6F 5[Zذj@|xQsC 㞛2/&4ZQ-;ȵuJA$9+;V>C/D?Ks e]t.axw`Mnu(*'!<\('}FsL GiEr'䍯T2r(ӫoiëd#$H9.иG[}7}q̏/7#U0_>nRW=tF$lÈ9{Wx=0̻vaWcЩT7:P^bT5ⳙYN0' ta œ){ yrٖ乏Vn .S4,m ģі{0r|ߺ l1M :uttj1܊ ފY)W9--u:J{2"l*7FU^ فYUUK6t#p0CґW+DN/)d>`XkᒡJl: `US.׿/x58 S0sܑa߱X-lP??sDL:wVUhĖL,0ڇ/W 'v&Qq)v,/Oohx}úQQ*]Bx\Ȃܼ>r (Wa؝?H#z+ײe *U+Yރ%_ӹKW.f3ŪUUi=Z5YќA%Va!o'8%l}.g~﷾{1RL~pc>qK #zzs ë)g '=%lx4DF~` pbJ=0YKE#<=<4g- y6tBn4<:pܙēs0!&>ȝ%i陂!Uj۲BP:CF?2Rˁs2\m31 .m^ Wӗ1 #5tԜh#kёjϪa9ZM!FF h ;*A8cuEZ>!V{GAӓנS}Gz-:u$yn@@IDATnWm8nXSΒd88wLdj UԯUMbyMqAhI6!9#A%E`LѺ? `Gɇ|nDo̝t#t{::'G$s&DAc3#Sȥpf8$.kW,ͦ#*fD9fh>#|I/4Yt= BeE Yڽm˟@>:+&Am͘QȖK\$:4'Fm/G#ÑF~uQ[mib(K+A޳pTS}a8jqzRDD6X*x;z 2贄`A^tJswz¦z~~ T|Qff.\Pr!y1tm1'D:꫏r7_Z Pp֞GJew3+u`{ڿ8n۴ N>?8y ,OJ9`(@q|Y6>k!5d\4,!!?,\H* ň__ $:v8:%b@ &W.ugհk p y?iAoi)GGi:uABnWיQwPVbmO~2cEW/AFFe3L'(0P ea0ka3VϹ^i6!uX1d3329"+aMi_Xup*=l4 zoO~Z.FT75[4%*Z* 'M#Sa-裞*;rY<=a@QU\=?,ԐPz0渇T4yLt$)*0\A<[iQL}~2@0ٙsg[.;V^QFyeHU(( 1'D GwhwuۺOvS1AY/gsVxɊqβl^[6)NLo#:;{汗>hu(QU{65c4{l sz{|gK?vӶY>8КAna7_LMϰ PBf|v.ƥ"b պ1eBUe_@@A,wﵹ]/joy՜TH t7fk ^ӗ`{ Q&Gm;9MYnie[?, v> D_=,Rv9Pϖҩ9Cn&yw \%Pzal2A(N?lVmxßU||hDΕ:l(VoۏPAgG{ hs|iOۖ=ܴuwEa9#+$  [c͊WS`uǽΞKpt8)2qƉT{m,(+ ~@A=')te bo9w\r-ɒΗ# ((zc_t 7fufF #޷S &~﫷Goo#` O.)gX1oնR!lS%rK*q@axuo1 -zFYe$`6m}Tv11hb3>>d> Db" v~t>hS"%I={{#|=m:ȣHKMLI3ׁ7b1 A&pKM*KǡPP),Haq;`~QfsJPe o\7BBiհa)G2A!fa犲s}zJrvlC >9/FgHjŘ3~~!e w?a*VfT ҂߼)jWsh}Z/H?]6 wPG<>fY?WG˰4C&(AdDɗ>pddHt: 뀏w!{ED@@+bAuum*+hT'm] ((;{%t If&gxf&mf2޼w}{޽ӎ!t}9HFu9=qNV 8!Fh2BD EFg̘aA~0Mv˷`ŠȰ4/ɀ: \e@#E^Ƞ˴".-x"1A(_& ǕP5p u=[T%12}0˔*l(?0Tk:Nm0j1˓}(}=Gg` D:0pC+: qwrˁ7#sm8t0)#cS!are< (l9珿;nfflY '†[>sWVm.m}$g?nƟOoXX^ 2A^>{4k̷@ 3+;~]6"}2QmMO[~sjݴr>g?p<\P߄B` 5{V2ptcts;|.4ʽOzF :W?vKͣFi\t& Sh󚫀:,y9kB!]sU5v:A)wIP=c 5 uՄ 1&y𔚷zP"ddf2KQzЌ$|u3fB;hn2&`8殱Z3c'.]y{fN YoPtZ WEP׶+ѿϚ6!_C?S=<|G"cI00:kY1@H>ŃfPc[)L p\`:mʙ嬨Vr]4)~dH0!:Vi{9>yi岄xxZD"_1.+j}wM6~(?95lؙz ykڲ䶵;Z0eIIq\(L$i¨ՠ~}6Ү;m{2 Pۿo &?KHg4(p@J:1 f<[2v_joW+ A0p͌H3w[ S:ɳaNMw#\V sVjJ7˹ #0md07bڞm%_i CxK7}.Vn٧'):QU n&{K͖H OJֹhpN{1.i0\tʤc/Qo l0 i8Nްdfl4:tQQ^[!LZ3gʥ{^Me36Fϛ*\8f*ۨ;zX+8oBc+46=j9ϴ ˰ s.H1~z_S /l+RtH`$?ӂhLܸObx䉌b5L[Lմnu5 陸 RN_^GdtީsxV ݳ.Go;}2Ѩj U?}*$xH0rrF,G}տkKmE N 6H=\$m`kg!!! >ۨ?\bc60d x,Af3iLxH1%t>4J}/yͧ8&FD_?9~D쫒cDcg.vQ5Dp7r=ݞIe0/anEh^ ПY@`(L-cU2|FJ"&QY@>_1 SХa`r0 yGwk=TYI2|cN7W-Z}a~oB ܲ/ǎGY -u /˪Ϡ4#d&ݠ A0y-l`_b~ا_s~;,20XZ¾p}&̣:Egp$lY⛭+(6>bFesh!]K_Me>&.w n'.[yC҇}9K!Gm`L`M;J:&rzuYFMX<ꈀUCQ S).0P>#y~ /]-tvΛ' +"p?kxII;S:|pKdD)waG~Ү&'P5?idĂUKFzů6?1 "3I3 bFC;yFI!/޳qnjV>lK1#(ˎ?Ї [vɉ\B%mp#ft 5G6"nσG6v]~0p6f9]G+ڣC"WVT#uRvQDؠ-uEaݿ"CwQ/qZ\B[VP溜,E߰tZ gCk0w&C@yM7)336662b.Cf!c?Iێkk4,,|LxX9'q qq4bPZD߻F 5dd'Sk'V<6iWRΟ8tp0r*8r BFf2bFEg8&i,$%X͘JۤuF6nQ2T%C˄wRRR/8["mPРch5H`7ÑVyyDӫ$rHC̴dX3M$iB-I·YVY_ 9_a!sg9r4WK {s=1[nބg` PX;ͦ~A/ 2d8 %om7uOZX0ug$4>On[ 2{ ?@9W #5"l1ەV4X<)Xھ훷cۇc # M ;$w)$4BSQipYz#k|N,acoEagB>0ɩZr{@(mnBoj>`h;p:к"ٮM1{MkO5 A~ /;2,6 $-A4@0h1q0 Qɕup3R,M)H_7Ga"4FP 5sBrMH6!X}( Fg~F\hGNa8[Ä|`zH˪I9-ǁ.f-svs0K8*o0RxԾ AB;K ~;2,؇"`y&E.u9aL*QvC ~7t t"4"¨ɽ8t B?0 }]xbpI41ЦqMPCGAmC,|"VN2.~uWo;GJ:iڕuBP*P[ԯ9*syK<фJ0&01AƓqd@rҔB 6pgdT^c‘CaCH^>+H mA4bFq?€}Hƾ: m^iD5Ǐ9~L f]\;2n.A^w,,L5WJUvլeՌEUߎM*~cyӟP ~yAh-A"%0*5}h9DgVO=}l_ |'2 d2dOƁ .09x?53+y2I]fƑY ?EOI#(d=-^Y%JèEL ؇yN6 !G囒zGF|Chs͘  Êqˠij9#TX.o6#ѣcY6c],%81]LrǍ}FGLC -H?Mf]#?6{㾨7y7s jk480뱣!g9L!C~oYgX꾤箶oZGjX+Թ+= /4gvyC#QA~ (wh6KNV^T†4WO:n{Hyp6V,YZ po TVBI ,O{l ,'gM}LGjޯ *}XrЇeV! ;E)e>lONx%ؚ9%mOI9^Ŧpe̜{Q_d_Y=~]-**kLiВy>R68;T|jU. *19U!h >oL?u ֌Xa c6 @RYcm ɩAV1& lG5$*1< -¤B>TzFhII.[Oz]GkZ5qBy)Yλ^ӯBԸcϤ/ꙋe0MU7Sƌ&^dr^M&r+T(m}T J_]?o[O<ʹd%M0rAiT\RժmZb^۠ Rŋ[:4AۮM'J-{Q'0rVO"llwL]SZEhnzK>u^s"B^=SU+12G{5,Og3|ŭU-T@K ZLNMG ЧϏ~5jE-+d}Ix> 4ŻMTujWlںجn$gm-0˻ǔ7aΫc]im;tA.QA׽[^ErT|}Ya j  :m״6:v*H8{k$`rסZP|iDc+/[  蚓yq['=`e*4.$Wg a) kTR[}/yc0 &}}罁^ԄXlA?%~W+vppUpUf,`46ㇿ󮦦]K2 #6>ɖ~*ga F a>UĄ*I#]!"3ۤx=/ &3֙aos~;B9l7f2sz㩻 -ΒJJ*EF@#Q1d/ͪ].a:CE4z9^%Rذ<[/>дm=,]èyh`WM{~0z\w(p4/IhOT,Tﻭ%#!p#`Q^~dP翬AK㯼^ߚw $㜌u )ij]9a}gCN WSe_1EE)M<ʗ)m7T^=GG厁C%A/s-<2%d0b}7Vޜ{Wv/&hf#t.sPpR%C޳1k8W:5Sffm,sz05^ V7ٞ)h oU#} m٤53j n DbZhMFhm h $뉃c%B;qn 硫@:f _#m&`lW J>4r?B2,[5%{ 5*Sa~DC wV UQMN.&(J^5M:Լ"q%G0|9e?n؄$hnu9.Q8i6,ٰG]Pr~_aJtŚna ͆VX/i:EFam`\\Lf˗JD]i8LxKhu1Hb|'GUP8׆VSG yG/ڠsB'tZQ>ON-6 $w *VԳ$G4[7e:7F-^` y|A yDvR}tW>i#s#ġ GЧ&`? r%}5NDvwGOH:TsK4sK0I&#nU5$0_pa>GpW3̜,2agI%b3a,B ԽpL:)Թ%$‰zS3#SobClK[}>/Q3x 1p7> <҇}WVVZ`(>?g|/[OWWϝК;u:+.Lq1e?#ϕg›Az'A/>ư(QpW6k kd蝕'(hӉ%-~M0>}Zs\Tty)ky虿 L $@,NfBBzcQV;f[ CU΀ :a loo?5z>Áܱf2Wl=gǯgb Vn `_ٿq2g=i@(wr`ҩ7qQ)Wd8Aafp3a2#>%)ZD^x:c8l~2mzm:- TwAK`eQ &\!Ft@WeqWo[g.Wpk^{ ٳPk+酎\Ԃ6gVU^nda#L%..jq;ȸv}l'M2 ίǾ/t.\^}G2i)!5|f`)@ 6| nHOزz7[W+$do"l-ZF34@h0N4~La!z"#WTPbۄ ea!1"]73K@6kUeJ30م+=[#gEzUM7hv/gTeJ:iZ֛ԧ?.P\=d@!NcVqH͍ޡ>t{WL[$(b'ґ+}J# -L-۴WmuT㱘KVćj: ^K/Ę1x߭5 5nx͓z| x k!!^6#!L=:b' j9 j&}'̲ (<'MM_f [ryHOկrg虋[vQ*\vmsTͤjٰZ> qS6vաI?YAK6A+yvZyZ8Tu3I]?a*4AMOQ5G+սM#=WxqЁҵqʬ9j`(lp~M[lzA}233zYi%le!Xs%0槐ncFx&dA%߿os&''ƢٴM iS_[5ĦldL1>4c"KHcB<ʭܲ_lH8{6{i35Y'&+@ISSޣ1jC.:'7_h(^ۙ:)E̎f<t( w[X}Hgհ[ %KP8 F–`c <+ɋ Ax=GΨ/~Y*Z 4-`Ԯuٝ=.h3\1ձ~MjW ҞujaM~sr~uG`ʓ<WWCP}]U R>u^-VA+uy$@ v~5>mi]8B0ҡ=z cSl٠ڪZ:X_Rukq5QH3L O\׬X$2) sVmSJFAOBs4ܽ'4XaԌ.㫋 WS-\/jԲMڍ4)^:DXɈ,ɔM+ ddfĥ$&^F=wl9 :x̾F~&Qgx)J6Q”Ud- hIpm'A3&!\MR.ߢm 7(LhJ>Ȱ; i\[5SUpz!9턀@L,fZ[Af& }6TȪ ;+ҡw6 :ym<0r^Fc2!5-CM|:0AW-i-˳ܦY]+3,wԩV7vN-% 3R%F~?jDqhn0U7jFiƽZ̲067PH]WVO'ݷA@Z I?mbB*WeU*ܽ.†*lP}6e9G׾vnۃ6|/n'1 >42G \A k`X_Ahms&kn_ө C(]ȼrCc d2 կV,fA]j@ӓ`i߿KsdWf*"Ghswn -OZz`"Yaud֩CBipnY\EZd9k4\p\Ї~wo9+ $γ9!19E7cKUX>4orww9>QՂ`4w0gm5m:5e2Sw̼'m&L ldnBa ffa4 VB4 gԭ֬j.ȤbgaOp-`$ގ@!=Ԭ tVcu,F ]P{^.h.('jCGpy{6<&P+ʿ~EFjӞ:]Uy,0&:z܃ue@ㆨ>HhPkJ@5h{sF} G!1T2dPf{ Fy7 Eq2&#xLDaMcNa? qυ ҟGԚޱd+̫{K`8Xf>{+d<툛fx5S)#M=> }oB p,ߴO1J  Q'T[Z$ +"]QS5LovگgSK 9}n8ϋ-4yd^ժauhn3 *-^MЦu+jtcDӫ2\w)e<" 1܋ fdh"+⎂S^lYu1 }ȱ? a")'Ft$Hө9#ϚQ*e5N}vjfts0/⺡.҇XV S~`ߖoիd;ɗ@{b<Ͻ`-NG>t4(Ly y#/N3iߧ}ViN W/hZ;| p%YqU~}ܣ]g'eO_^5UEkL\-y#}K2`^j h p^Vnt1hN#LBnGpp0MM[Ќ~ IAGj32KuS= ƿzﵟG;tG'yYf\#Q1#i6|z[D:}|{1'P~YVɄMp8i 0h%P`p/D04{6-@ظ= zcZ/umBH:Rr 4O@{lﰾud'gmڣc]ZW:©5 7%E8b;-J;(}:gE ٖd3aR ,)>Q'QQ`Q'NXYn#*>r C3UhN; ''Ġ J'N$~&~_/.F _oUSƌoQ@@ Tt"3ڨos亘4UBX,@{0[0Cp KȳhFj49C_2>m@aT:6[]BIK;yubng9O 0@`@Vd_MpHF2Fr_:2ߋhB3N "dȾ(gkѿZsZ m4W>\|GU7>Rv6_{Q |ҩ߀fݘylܠsVSFn'-zFq!t1~:Y;vަڱj(ʖK&3r3oB#sДs3 $}S,˘d %sobĀ`C yC炃Wbs@´6|DYb`}co;o|s:؜j \ W ĸVGߺ ץ I.hi1NR':2iOg@ |q*_b:R Y_ :[i`RH`!g p.]kYi_~e}>ZgQCk \Iʙ{>ĀLԲG)@ +!<^ ֵG6~wYn0@WѶM{~0z\w(p!hTގi;|mTsh^zcLՠ/Yiۺ'{Q_>Ӏ%&2-w^ī|c3<#]]N]MDӺȩsW#)S˜IrS*BN9gfI1CR!@s$UiT !J!жq-~a,j xxt:=sʲ~lX6Fir؜SY ЮC'w#Mp'{!P6NYQ;JA`Wrw}7yNԁ4Y{ ii[(lXc!.<A|bJCA㉻{[LA#/md_ٿAAzqCq8Z=;z On\&jT ֟=8d?NVA2iפLj Qc594Ij>nͬOE2J‡#RSFUѿb"Nzwh[z!|gglNF?H $L6X1B }H,/V/ݮav`jc"fQB-wU#g[ςV٫w[ቑnL HMzf *W^'խUC<,\푋f܊ ǾZ#5Ť9DQL[KߜGct7CS9uI@ kZZvc\=|3*g>]0ȾhJܼy2eE_JFóx1k' @cek`ҧ]͖/l帀дھ}^F1婻XVm=v)뷘lx}< 8ac̚Vb U *+d yqpcF^ q~Es* 7o橇tXO +jFܪF>+x^LC3c˲m酪`d;,aרq,&~{aӿߘ7X 3W(o~ΞN?e?^|@kXp3jvazZiOj_N>˷ybXo퓵zAZCWa. vhԓ.iSJO?mz92BnJN9Irn-[{hRA`b@ίZ}gYd(p#؅;=s>.m"bj8n$4yqxQwé16jb`oXQ5 ]7 )dyQ)f]$Ha /Uy\MԙKBC]Μ*V rܻ}>X @`Z~E#dj˜y1_ւRk'遁])˥BUJ_HǤke9A{>j+15 GxhH %gG=v !x*v@ȕ[+Db{j9H<2b РK݉6G9h hEjr3\^Hbl?zm7@x^) l=~uz<.1Eժ\^/zM8~F 6Q7:)hh%KۢA \cü؄8 .f4p=44\+ y91 HjLNYAunvJc]~/d|\1곟h3Ɩ 4lDaz^07ڌQCk5PIM5#q:ѵU#>9Ԑ0h@r#qHZtAm?Ea)ƻ##¸B"`w^ ق >(LKL,ELwjonϳWqwѯ+t~7xp0&#kW)[2m$d$O[n6{Wmpܡ@c HownTs;  6O?Ї?X-{ekZc4mLJ( qPI=j"?z0?sF3`|(d6r"92A!> CfN~5G{R mo˷-̟ö9DN4ejJr{c] 3gs1GSd=dX Ud H ~Fÿn ko AۺN}҃s_N&LM7ʕV -CidOIJgn4/\))R'Dq+x- ZA] |`61MhX%a=GjmĹ`W -}?w~$LYSܣqA1LA(= lRZphlke-HunnM<d nƽdKzIA5W $*3bͤtd&D-\CQa,fvnAAC +U2mz >*:&NnQ"Yh AA8I2sx9yh*U*bN'9Nq8`tAA+\M(\_cIŽ3({\w)/ϻ&ظHrVm+߱ Df  \L8 Z8jUCSœU[UEIy[o SlDXЅ˗2{I"ֈ,[*W:K FX7`*峂ЯW;!r0c-Y)(: *+!x%後(,} H>< NjJ-ދHY `~_]Gpt̢॑./냨i ,{ƵsDr2%=F>00kkRwBum:G~_nJֹUś IziE@ aסȪNQ@ 9f) W#ČAZ3;|f?T.''%mVZt[ {kw"ph^oZRs$}H3F%Y߳?w+{"OH yĬ&P?[toPvs;U)__ӚQ+!Yx ؙ :4A&]xO B Ft2zI 5"ô,`0g/Gb95}ڦv!EDpnGyu'q5(h zΧd+,<F&u2F9#!#) 0,WW w Ѽ 3vjw L ) Ѭ~50i]OJN- fbJ^E`Cy"MyL;ӂ<<-xaYs6e.j64 Z{s>l4qzm:/nWb4K?&ܳ\Ca; +c*W5F+D@ -a4%p=G /ߦvk ̝qhF?|.~ZoNE/P"#^.鳴&d"=\O6~ŭʽd(A6` }ҳc{ AM+#-66& +M(Y>$^.yp1O 'WWRd.cm zWr2֍Kh־ŕ[goW-0cF|r+rSO\I N8gs /+y<$\7Ea`25I m̍n%vҎ%t(iimg@ HIAқ+m̌-+D"lpG`scGi&.,ra%-v.lpAW ܝ}P=`?o'&-Ѥ mKÄ LL>+$FӺӏ! {Xjׯss]L'3H2^(ap{x htmhnvZ]o\d$.^B+|Z6PxΗJjPRlt\Ae0ymY-!bi-|qA4'))Ək]yKōCŘ3SwY,P“]k5V, oå4ztaw[4*(ЮPp,6JW&;qsp/8Xy9x\g*љ00I Y< \꼧@tB̙y[ W<8OGM6'j4³ƕ4hk*†lI܉^l[>g|O/^nwy~ELHe3$ "VkF%3~ %=q qF&i'"oj# p`,c<!Tb3J0A:U/1۸v0\ͦ6Y':S-%7&[% ӇwEa />*U˂_ot Z!85*8 }?-zoICgP(|.¿j.#2;φ A4 .JGh7I \D^_ТY!s29 F|D:y3ͦS(O-?o1'+W۶ I^3}c0*(0 4$lpz599.q5xv:VH7 tdN&5NB^hڝpl^襤8rH8Y  eUdG 2A&9hZEyc?=+q:\w6(5`RCM\lt.}00nKB3V3$]qYk\MSSYnQem #70À뿿EV׳|6x. ^ߓ-5g@܀e'N#h{OJ־\lAUw5 grMAm>#5_MfO BGo ,DzM{uHpп X@ҮIl-6;\D7ƍ~S)<A>H~{s'5}j٫"E~jQ:N3Z}N"d$7=j9ϴ ˰P`q҃0 &}7>8^Ea؎iP%u0 {kf{W GyҽVؑ2v!͒ȄԝMػk}2 WB (4E&*Vi/p?h}J3!S/wGgGݯ׌}H^λ9 )܁6 5$/4_FN͇f03 o.bf=ig 1 ͟1/e[a{qum>t4Ll(N"cPFGt4W/yp/EFM씙lqs\BO{99e>u$6&!]|fWTΈ]y<Ą@zzz{|mG hc?[?4WNKZBUF>?[7kŇz +%FrMSD'_Y!CO>k@ӝGDI2Q %񌿱2g/*1a*>_jZE^锋FFs@FFffR%LOjAUu !66GZuk"5 }JsG""7a5{sخ`% (hx굉yG~Yg ߛ# 'm&=WSiرOkc,l2W>5!p0u _ʨBtu–+nw0 &}172x>8q&pE^ysբevfjCx}7O1@F„j-nKKoj3h. 2g2~p"w[}%h Q24XGM!H[|'~DmR~U0(xn ÑiVuE:w&h3Wα[e-GTh#l\8z>dHffF0PWJ,acU_'Br~ ۘ dA&߿os&''rEaat~HAa#WE(63EV֑Iˆ["#JYG.nBji:UfyRl6?2.~A9^pAaf6qL (8R--@}C}ѲhF!Wm[4 d{[ N$B*))h87Ij5lܢdxK 6#< _ii))q)Oܿg-G^A @ws8~;*nc/HKnp Z?5i۱qz +6O!!.U UEk,)7pMCFzzJvy2fyb) a_v5-5>91Cwܡ)TPqUs# {&=64B`s&xL-P`ֲ-q6OCX6s|ma8$U)ȍ8۾y;}8&#AI.?!DQP`ߒ) s yY4A:eqlBȾaۃk;d2.왤&Y'7cm,59AHFaCߦV3Ā0 c(fsx "g&D/ 6LL"pak:{ʪ!X}HaduJFAפ OQ#4" 3 t̥#3Iq@e}R'! Y> ǞY!2d`>L Ǝ/O8aimS8wpjZ l͖BC_lLc68ɐYy vC^\lBa@&{a32 (pc{qLp:)} "F(p3HcjeiD)4p^MҨ0eY8~2np`"}CL 6"lo|կ2mMx<0G,Yaa6F6^ON ufaտkK:6(6dq܃Jdo:zmP}Os. W5Sffm,;9 Sa^}o8yKS!:6A%,mM ha095Mw,C>?ը_@`GO7y>ɰZcR6kU+ۺnH=6[^aw)o S\+GMC'}tOOKp=DŽ羕y-S!x=rr\pHpаVeiRa|S*2"\,ТA uq-lPcrLߥ$թE}}=^Mga68ii}Y33SSN)<`_OE UjvǨUrz#m- @HMP5tMiun{L-ծ+vZMA[ c}UKVr@ F#GKK]†f U+Q́+|)P"pjڂO1^Gίۛ?u 0-AKկQ?*r6+upHs!v,`ǼiWbx4 '#XV4Mg%wsُԽJ8e2S#u+ Ql|-h]NS&NxĬldvr)ݴn5a0ۮ@U*Qc1 |&Рf%s[W6RJ:A Z9>G0KӑUj:rN a#55ȑ?ZӅ߆/#rG*D粷<[/?дm=, YK;^W^ytHfG6=qQ/ߍA9':IrC}*eSb}u0M*nKyxA-闏zɯk2.qN:[jr҅4 5dBʖSG X0qh4xu,_GߎM0qV~5%\v3tQ-h @[R.')WUP*J/Fuyu[.-O&Z-yN_nR;R]V ym}\ZX(D<,>?sVmKXj 11FAC0|#,X[umO4<ڍQ?H-8((Q/4;0vnkb (-Qc5dHwSиAhdq64mk?%XMQͽOܿ1iC>{`w:}z:s&M5MRs.c#@Yp9'Z"LOl؃z3T<0Փ M˪0o/.ro#idFYg{-!#[@!j.']OCp>DYQ6> C B/[5-; wjوF*C=c;pUz%U*,T]]Nщ ذZa_-BNFlX~aŊShJg,1b1,SQ+ ICsޕ_Z\]c0j$` >"pxz as 4]|8Eԟ?g[ؠ_U?/٬Cv>TO@ Ɗsrٻ*_J Bg.WEbuUY@QZWĊAEz%@PC =$47{/&3oΝ;瞹{d bW%h/ Gs>XvLJM;z|VQ{p% xc-cV-Mxdӣvԯը#+wkȣL@4U`z*͕i,܎Qt5R>taJEGr}؂`OQ/+imS?M`yihyaifˣVHfB*S#H5arڛ qxU!uC[Nbtp[OE>놼 ACy.sS6  '$ۧHh.g>vdXűm)&02}1(4A 0S'C0x}\ Ν5}N. 1{#uk]8kky(e &xZٱoy3oIs꾻6ܴ@MīK}i~Iksɸ%hi4UB]K0aVY0 |}'ReaIo:}8 㥫B҇!4450tڒb !(V MAC 7W,[cJFH3 O/sK2>.6V/^YBi4ؿgҥx%l3gjNm4qx6fs"333â3 Gkʕ1 Zİ-ڣ-c Fh2f'XBEU4y&C,b_(Khw\#L:V@JdQ% 9VθàGy,}x=o8wcRF4R.}Vƫ˝o&kJd2ӱe&߈ }<<Ý ԟ\ Q%''H N-r9t ɏXw`t31ݲpkɦ־$h6 Ť|4:oJ&-.)\x-?kB w=̭$3JmWz}/}8R %t C^>>m[h>,WFdW_Ș:QSbquV6)4Ӆ ⋈jbRq7|Q#JśM2:b ?ٯ߸_vn{2҄'}Z:A[[L8 ]p2OQ4JitfgN* 38Tuz2sSě܇9fݜ`3_&a${-#qF2!g.D 6'us˾|Js ֊Dfx i))u.Wr%Ass}Y(!08iZ,O}$fa#^XqvEOCJbxvC3wt7qi_gt’ {#x"WK74)U*V)%O?/_c"c!):;ȸ9mBD%c;hRؠ)Ǿ KzŎC є>{~_h0-Nܘyްh&RIHZ:`ng9/׭[ڳß,X[EfBd>\ס`! {+o.9"}&aM)r::c%p{/S ;4 V0THIZ!=+۱.~h )FZ Zj6|'r4+i~*MY}Ӡq9/̓>_ | hF@1Z)ߜ5RAʈpqy=adO&kO1q+/\I?&\ -PxhH7ċj~0IQ.2(J==|!'<܆5yYKU{"X P"DƈFo=|M{bg&u8]?rFl!@_D6koo{1|QrhѠxsTj%Y{vh"V]nb.0h8-WD68hw"0tc ;H?ib͎2n}mсa9>2:qD{w}%fuEa" d{{ܭe׎J,GF7Mίj/{X}{v&.+aӍU"-`6e9Io<*W#&%RRe\e& #q篊 # vBs6ʕAJ!š`"1t-;¦X L։Q&^4]UNڷ%u&Z7m!\G-*ּ%`4Gv E{bV[e0D?.>yHy&]v)}{7w &!,*Aѫ̇ Ы3ܺx[|KA6f-c$3L@N8s!A%Ad&} P6CM|~r$#xv1ص7BC -@J@ }};`=RX?qd"<'i/Ea!2'La.KK0w!P`)D4*(W V0{k*N$Ȭb%JD21W;C0+ށiުz7ч3 ݣ}ZF1 KBxcK&jʱH"Jb  q$Ȑ\'a,(t=0b/ t즆C= ?Bh .%$~oǴ,۱cFh|# E֍<-RD#/Fœ]ZwՎjY f0!\VVXК&7Bab jYHf+9N P/qzwA:F!xeө9!@T7ZsL=?{i #MvGǩӂN֠ar%"y uaM Kiv=f L jW ]MJSs++Q!IOMJc=GLbRYCHiAb.H h+|RbU`u\P)Gcwʕ+X}!lbOm0ϡA0:ayTETi>4<2.J-iqj1iɏ9ǜA>xsnq |A枓-/lmsI2+ |n@bW1 CxLp9}<6W XA'ԾV YLP-us6e-K&&xpyhDtA-'D^ )8OԇZ~wECM+ʀiV@4z~uʼoͿ a!sҪ+kExOHՊ%^h!Ha[Fb*K2E)p%#AGFġҷVsH&4Q#5$ hV?Dl#*bexd:h##꺈h~a0 5WLԼK=ꞎ yBk|ΪVopI&W+:Oj|&zÒUJAj 0ɷ+j)Pٯ@筕Np:}-WPg/%_y$$q,RKl ;>Z@8ŠC͸ԫ.賱mQ5ɕ#8)C`vD?}ߔb`,3(QBr|t8 ' ܓө -`)hhb>RI`_U?*M+dW<l*T0ֳz?mRXV qBAБQ&IM N)d1I!N9)lufT ]M-Vk k:5~ | $"R2 "U \8`^ aoQ`oS@s!DZ mM&|&ruur#ΩMe.6y2T8 (`m|568x&EwpNmV;Ndʨ] V;@<GnCnSŸqs10i~lY7Ql=IdTheܫ1h>E9gs{nN>>)ׂ l+ /?28σ"̾MN:˾pG|(81ϝ:A^Wޜ8J- θ^^$by?k#,HPN`Y>R 4)!5Ur\%ҼB7g0J΅KA02XUA:"AkظǺsрPrmKNF+𝡆BEUq |e&f@$NFt^f Ԏ#1U ñ,FX36#iq@|q|B7M8/rha|j $ɉ[ik212]3Ri{;T_a46jC!w"}򽸧hUѧ.\^nvm9u-sJ#~eT_`2wFײ}('lk[V9QPYOnɌyBw m;{*xd-Y *}úi䣉s&Y`&A†ծk>f>kcqq,h) nkxdI4Oד5h2օ.sfXҌ7eCi//UϞ*6R֮+:3f9d0b#4qzw8oi3Czy`5Bk͹"Λ=mbnQ6lA?c@ c1bV[mF\b5 u= ԊWup h:u~BG(tYjR2 Lyb/1qwpe ? fZ!4Rxiʗd4@"X2 9QƀdS3d]NTG.dg<ջ3֠ s:~+SR9r LLpʭŹ&͚<mZ۴.g[f\w9†~կ00[+䞸^N2J!7xۻ̻!ULB͒.LBjen.ȽpvRO׾&4kR F˼4axC'/UԲ>MXu:-MhV14GtUiג7N'N}r)ZUYUc@ǀWb@1jgL nʼb{\f«^^fO[0~|my ^?9eh#1WEءe޽LQ|pFYEM뚠( hB:?=N%}5]ذAaNb_brAǀ(V_D?Y2 ,Zu--LYC˔)7ѽ CEdeP ڍ줂=0CaU\ytJ5!iRK_^Pgc1S1)+fTV0un V"K!`ٖLU]v$bcO9PpdtLL2F3DkYܓSļśǦH>%n#;3\3dnaHߏ%^U) ixqypɆȧsPݶ(m~y–Abp\Bd.2ojbZ/3ֲeJWJ#{ \ɉck@_M4lקϨ6-2huެ0I<8[4 X)aޣN5~Ni`r~VγO?VHHA|ޞQ}<]?U #)3L҅["Dr ʼ񗮉o7cm!50<2o Fo# |ޭVh$glB! 0 33Uk=;j;yly8Qcy*+?\dOD$3WA@v\ W=a;kۿs 5Eg鸏;npo!uC[NΥOBqޝ'_Jz+j8vlVޡwOA*;wU1аeډ'IB ԠqH&+@@[52Va t( QG@[sA!Cvd.-Q1{>q)!?/6Nf֪.R"ز7V26Yeh&RQ ѫdvmAu5'=Eq94C箈4#iw2- O-a&x1X!<w*Q 5( yaAG]kB<{)Aģ1Gq7XǾZ㠤Q};$Epά.\S=%Bf@O71f@gr~qzmVO܋"u2'Z-|{6r8] n9j7g? Z5eҝu}8߇!6qX㷤|yO`>0a>y O_4GOj,u; ]db &nN 7Y¸50`$mʬ4!B8qG<&/?{I[ZF 1@p\,Ek#$NL~0g/ 8zB_*BR;ؓ_!:7puNo龜MX}pB2uQ LH`CE[ŢiB @;f>w|醂@=oq2Cȣ2t焑 ]&@!7 F!z=3MO yz/x`g"yf_9UoFe"ynN^)3t\%=8ua ՍY gKJ%8vVNޏ+bo)s d ܰ3]Z7L WK;&6OK2 I(y`vdڲ GO]{/dž-~^!" p ԙ WR}U 43R@sY Glfw&U[ש񀍷Զ1e;LU/\pPN 6\!UDfvUŎ\a75R^9(@BlPsZj /Kג%#&)Z7hfՄ RRSsn9.7SMjnC[g6KK#TbԂb53O^tvDln}H&5JU+UmFFh.c'XBxNFP!BJAimtm)(Zjߵ:%R3`տs aNLhY pq4%d<iּZ\j񽘐GC7`~X{IffqtJޏ} a/E'Kk NnFm2 &' x2T#z& V"o>3Jj.ho[QQ@e {cD?_ !j ot2W))yy_X!7>V\Id6͖) x.5f*q&1Ӥ paƜ_EwݐZV_y)'10mA3j]!T yM;04tCSosh4%4!k -džb%0)/T8aCisu,%O:[y ҳ:4JkEq1fNiZV^e`BU BCB4jV[uj _\QU Q0n0crshOrp~( OA] H]az+?S〓5 ||T-x\79A4mwcLKI!NZBv FaZ SlO=reXt*yC! ]Ky{75&Qtb&ʴ|Qz#fǁfwը2|Z5z{G>3/%&pаDo+ " }W5mg=fj:lh0$Ύ>,ه/uHez<˺f@K2k>+`>Ld~.~4ݵc^6ˠOHj W䝹 2N'jlKͤWNGOQ3AW+(+/>(͢XW0z`dXM jWegRX!J N%B wU`MFIS{:{-ڬYύ8x}d $K0hū"єL֎@T stŒeࠪoCoLUӂҒYk+w<ڴM*Q{+SH$+Sb^);yEBk;N!y)i:U*^z]NH`Г&r&&W`5-i :UbGiOBN{[B;[/<'/z[X ,†_^,N[hiclxiqĐ6,q TfFF|rZ6֜Yͥ= z8^+;˖!-/|+t @x dlACb QJQBaCf~m;RY}N7R/6A҄ؑl^ ZgZug_Q uH3<Z&U5wԖ&D>4c #am$6_FZM?ymR cxor +??[l6yQl: &Yyӑ{T`2Ĵ-YHA N-vBC0k$좚P}p#9`9?1 %PXR-a/=2(_-tA }d j0HKdQ`ʒ2wԯ , 9oZGD`[#C9[}6 øwł@*F傊rCҿ$h($ڔg(I:]~\燖hDQj[$a?'WiPs[x~F \ipL0 i}'|ˊ, ϫO#㍻c Ɔ2Z#M]Z! 44_'i A4eRO(lFY@Ҹiwv*Y SR};Ht.qͷ|9 "PTڲqԡy? mPA|niCasճA[Ǡrlч;k "i7WMxf]Ԣiͥ= ~^j=y8"pYj`tJjBU}4ڰHЎWD vvT2Z2K2 4fJf9J!(* ͧ80ڛ𱡉[cWU+ٱ/CB{<@<T!`~ ɘu>rƣR 7<8bscrҀ!= Hz[X[ ecRӣDM(:a f@ƋN(s7$ťiN0߁ VGȐ?_]p!~vmf?6GJT5#*jk{B&$-&}p\s7]I8 n[@ `<PP`|Bbrle?URKXB ]$W &O24/`2HP3/X9i2Bp~34o[qޒdϺr>dvc8DfR>h Ci^[GǾ>x\?P'm92 >'zN[~FBYhΐY,)0 nLμx\2WIR+pqe' $I_Qc EKZQ/Ly F j^)L(CMmng8~IMr5 @DYcc~ s ʷ\9As0y"Ȋ2%<jw$ ]!,GS2އ<4*DvW]fY&HeW1r~qUh_ϫvzX%),A$.-K#h`VV:<]S&)L0X[g/%`;QAy{@k1aTs,s"MI3fu6}1sʮ<YA f楋?00Tz^XAftp (ݣ줰?.S6DA n~s4n("}\ۯy'%†ۙ8gᏉAhf6Hbf\O;i1+_.D(l(Ӛڣ&i )'qu{̠)Q0돏{+cc (ҟ hjhnģs&Ge) _ \܋/go Thf^GLffh2x0Dp? Zpu+hl Ԃs^` F:*Fr9_Ѧֻ`$@( !9}><hwjY4<3f99y.񇳤mEĜ 2]t=fYeam1:*"xtl8@AN@E 7SSnߵׯ\X~m j=m "чz[-_⥶8jxð'l'))Ii)<h'k`Z~rU@!41;`sg0Z#35d %g rA!=\hvk ɥ}{Bm{~y|tMZԶ8<,|ڭ`J%IВY>=l岭*'ba?Ap^)ď' q vn(lFzșs:}!w >8>!#u5{ŵkT=ǃrBɝ&O^gˆ7G;<]P8ѫ3n30 (YVeoH{A1hB2dEh3\J.OH_ܓ =Me[f|r%̰b2:IGU{-=90k v`+2SmwT`N~ZN-5q,f|b9{-iZ&t8Dj\Ut&:S p)͔?yȸG_fB4Á6`5'c-ZOQ~JP ֤p@1?MZ*;j̓7&?;9;!.~?c#Dl;Za?u(*Fj "˅ l-qs0ɓtʥO#@vD0O^ mp<.DXh:VC~b+lDUY!: zN-B" kN2B|BYD?<|:e|F l ÛL)0^IiL됛H&րߕYś9)l(f3Rܪ?ҩπ.wO*BjBZÖ4@Bq['KUఱN <of2 sS†2RĬӇz5v}P v*F(uư+ x V H 䯱)={sx4S-5~УV92O 0te.764bgj=>^}-'^ C6 !MȥgdɈA4!c E<KͽE(-LB_#%hXkWvexα[1֙Fk>gצuaѻ"b1";;k@M|˙q]` @zfYᙰ}|*6fפ$u0(at`I:})8>ɑ+J(j#FjSRCa/_\߀0&;d4etZOFf)F }"Ǐ̌+߼7s  6hIaCM8ԡ XfgyF[+,c-1=kaDvh6}~m 0ͼ܈Yb[n_WFtG2i:6W{i"+M6Sڣ͝5}9 mpCn;E'Z0WׁÚXN-?S{g*ʼnDHII6E8Y&u7m\ַ|nx, rFFZZRڍ FFE@c  |Zs*JǏwjZA~~*A5MyşxU7joo ,n̴djĊSKc;fFɗN;zhԾx" 8nP# sXb -lÅ {Yʅs?yurSt~Nn8s61ctj1 #h4ʗ- 3jbPVoPc(fNH!VEMΉ͞6Qd a&@MJD0tT|y~u, c S:~%f9-F?v4}#F/~sW*TB5sp(PqA-RpP%dt1`LiI?m6}OIkǑŽ?ٝlYчÛ aVX d,м†V^\qY7a@MYP̞iP<>f9u/NVRN>v}F(pg(Ԫy=q{u]O$*f^>'½78VUcV:1@ Ho-X#X^U€ I 6h6^1Dx̉ KAC1a@1 pc_jbpX Q JEgz8T 8+zSt>ڑ!~ nqWݞpqo8﷍b`VNM(S^TF֍kیrP}U\ L{?<92O?mouo/qw-3nk7y4c0D PC1 ?(M~۩<+ N8d{u Ǎ̈`h^+*QTy^yWч;2JP^5SKo$AWA^>ŏ֢>TjsvgS6:40N6}+16d8xR0ng? klώF&6/:^mo#{LxvV]ݯ0G&nl@DQ/SmR\dҌYM_̜|6T9p㋑Į&|/2:xbTZIAեGI1]A|3 \5^p@{XOw:»‰vo ?<ʱΩ PAa̝ # LwYq%_UyˈM{unDcUd3:{bd2#HW!tt%VEN5\!INH ym1< ֧ވ'q9ڍ+b+CcA]7 (j#bIT GqhSEՇcy{I2EZ`󋔞MUߙq+U=pA_7'Z1x[3CX_i(o맻 ~i\ CL&c]A[WPaeHv;:}8 άazz; y3{f̧niτ2|/sqzyp;Dnc.jr1V(wv0F^9C8yh/ g1>q [c4+,~Ǖ;sW!! B )i1?q BϦ݇Eqb$qOًįv)pR^?85/[F{|0Kedf˨SJu.Qj%1ad}C$3,!0-\a`MR"nz 2`0Zk]j͚sS[V[N(ȕVw422!$?4`?UۢDx)|m:~Ja6qU!Di& ,Vn/]O{vHuH )j߳'){dXw1~̃E՘= & $ >3c7eWY!/)X1ЏIrjnSӽ0Ϝ8}yJVZϘ8MJ :}n܌O2,-&!Wժ$SϤm{Vk`zϝgZp5+a@//j.c숞岐+s*>3}[V&NY9ޤkƒ[![g; AcԆɅ20-Rr3%D8'z6S==8$|ˋAQ)&f_) ϼ ^d)ԪVI/\$ 9ɂf"]d?j+aHyMT+D8b} kU};`Z%(5KjT g˓?EKqcV/Ǩ)jհj57w^+~Y!5]O-{^|ˊ,U'׮&݀ .VU;D="Q = m}}CEpiKI١ORxk LꌉB[7i@`FVORLMπ$# {ӿ>'zwY@b͓nr.}@a4&.Ē|!L6P#dMVXdc!l]DM|tx7v6;s W| :*t|& zb!esc д)L?|?to%ͨ(bF.V}+hE m:bh1chAʋ;4b8El; _{iZ H*y!vړ{zwm#jмB[rޝ'e}|A~%~3ӔmfxO4iSg #9%]>yN<;?XohjSCJߊ-ډ A$hh~\-,1]Ӻ0Jt4j\@ jJ/CaC+p8u\-l`W8+0PJ@IDATjD wN^)ˉ.Pc3P_:ڹK>^rΛE8̃Z!PG!TZ8>3gM6[1{{a.}(-ajT?pd<]!/'ĂD,XGVSr̻q-1)%lW@fzV$qHcuEMh6/o-"|uKBt޿f*dܫU}Ss CǕ=O$ ZOAXTqL|9ѱy]y> ѷhNk{'GSP#AafQ:4#z~#@iJӯSN,e, g) Y2FS͛ (eo6oq%tN09M:c9/WG3?~;ˡPA5u󗅙9DQ„ 3t;Azzu_J Ns])lwl^4~dPn @hMݲ0>2;3(p 3q{շp}^CA@N$:h^x/üX-WO|{.i0qNuԔij(d}BU&N!̽Bd\]<6nPH>tVw+%Fݤ50Wy?K%8p5ډi[G TOGbu/< ѳij27-ɂ;rAjIn#6>%}BI1px@GzAy[m!U*noʹԋta^G_4mf?9w uTRSni7QұtMcj.6C.uڲ\r9_$ȓu^[oݗR5$9ΪVa}9W V mGAѽs^1@QbA_ޡ޻ C7+_`2L2-1H+: w0])j)_Q{9q>fԔWYVS;uK``WnPPLp@[[#EY^{ZԞdO~Y+um-RK8fG{Zcx!iaUSx ~WӨ1htDM0<$|UZEAcL8&_?y}#n[Ex:z >Ɛ0b*E&w{3c?ǯ6DVKKK!>`8 , W=W+hT{^D3\uk.E]YͼvULD(U#lP ꜽ=UuMF[&,:%7;31;jԑnS;My 3):pK-ȘSV.}#&M&G_NSy9, B@  L4*myaimkG|kj>7{Dn.ĭe"X^ej}ՋqxU!uC[N*IpXN>5Ԯ. =$mN ӧ,Ps;>Fjc}d>vdXєN)˹ oMlH@7/#.+#az1>шUapgŝm4_҇'SsrvEZקX'mޣf 8׈usN}\Qj|R`eC>exwB?mI;;p7;%p#8FJ9.]K4m0Ynf_NFi\ϯђt(e> ޯ_[j^Gc Ti7n-U{n9aSP6Hs;ȐjTS&l  O_ n\v5 lξҥh6HSv^ N3Wm~Ά9ٌ4ƈg2E'`?-Pȸ%hh?OGi4 F1;aǺ*r /\_ zGJSu- ՗Os6UQh4GP{[Uj{p>qǪKdbHWJ΀')QLǖ|#2l4` Ojb~,99j' H6$iŇȝ2~B˷-XT1R{wMecڹi׮\Iƛ(4u'ﲾxṂQzGoyq0>Ӕ?(pߍMu("RUr,&˲HE30bBL;zX[L:ƕ; ~;hODlZO7M+6*&1To[鮻~}3HmhFa4F/عq-dI n[4OtJAykvׯ_cof禃@X4#2x9SF)_>Qx]|KCg DFD(]Fm%~#RR!9𒠹y}1,19E}3@9Ut9kUWuGdt 5acٖI9N0 Y@l\8s*{>UzΤU+U@h߾P#B7e"Z;N+_t3-WL[ QvqEc6ԍYmXhq z4npW/ݬ(@ NQnѺ|;^1r h `R.CM<_ Z:Ug~czyθUk?Wa3i+׏=4y-W/^G+t E5۔IS؊ Μ9p|\l,7/'$j0#Q<>!&#UQxt= kЇ7&*BSƹK2$:t"^Ln D'r!):%;ȸqbN2d8-Ǿ KzŎC є> : 02Yɑ 7Y 7%lf?9M+X) .s8ynО}d*-74CޖJpסt` v`LިʛK`*wHIخcStʅ|cSgսi2AZ%l`IX (_ pb}{ThICLjX*40^_!;jM YT8Έ{bDƵĚ⯬dU*ՙ bն( ƘnA_zPmTp}pm>>[1KDo` aQqbh 1[&LU^d1?Dhz!]PC- p2gƵ Hݢ<HL˶hZ=Se˂ Ahvp*q!qBF7 ίj4{X}{vWc 陙&fg>h S1^&5F"MG47D:媶ոkؿ#lO]G9 9^>a);~ygb eV^^ajO''%dff|2m\ΖTQA!CKBqG 法`JR{I>&'ಁ;4@8tc背6VEBNyTdMK$ Aw&[&b6=OJa#.HI*$ʒW{vh'd:hA@ .$̪ tAbx϶WQt:Q -D}D*22$ 2v;2T0VgȧGVջ|o> YO! !8nag$ݬэ*J &+caQBX4+)l1)0ˈ¸"2>9-f6O0v+Q҆'2pD?BҢbԵLh9V&իۤY3_ x*b!+;+##--)ƍ$l흗v!DgefMyş' z\D8?>>e[bfJrbbc1QGEATSeޞoL5LOO~-ә,aC1 ^ GpGc@ 5N[{l+oM EyN /L^uE3{Դnuq%P'LMƾb[18Ybs 1k/d\>Idݎzc!TmR]ѸN5\oICQ`mE{r_y)(.fs߹ަQmws.AK7ʷoVG6"y}dm"?'@Aa0KKI#Mщ)lPpNH Yܪ,h\C, 5v=lKjhYΕD_l-|/{pJ5!7%Lɬq W'cgs<^J iHäqGS-k '+CS&Qeo|>^*1%hNQd3Iom5<Y"O.4&S5z}1ҽXOtj &Sis^ kU걏8~沈x'ԅU"7cřڅD&D]d UԚhd_r}KACi5J̤i_8;اL{7y2 $߇1U~Jy~x][efy(h4>e)^+d(eS<k\/1psS ҅!<&x}Eo-6+4S,7[(p)]ElO/|p(5F,%Dkh[he a (T}uw ޹5V=QtQXP _P00  0s{_lVUA+ x\jEs}D8s*`c w+U18fz:5v4՛Eisen8xdXw1~̃ѤE#>Fc%lp— qSz7%܈ńqO!4nJb<Խ8t37[ʺhƌ#g6*Pɶoi+Jăz7)KK,o cƏ#{}HJK\ւiׂVtl)R6 ns=LlZ4rnKe* G6oIψ8t̎.\IYCiN_-1XMNBW܄A\ef2?mdbIz:<~Vaзg. l`| loٮ|\y:}~ì+GQFUѵu#A ux}͎yaI7,)i/.n0"AKPwK_j84#Q[RbLBrYyMOҳh5ө -`)h(aFui7b@1Ϫ2X7럙:ih7 >eCáޓt",O}JA&2qBQ&IC N)d1I!Nyl8SmU1Za{ʟ58( \JEᯖna^l0 Eo /J m|iOIcIA>aІLxQՂ# -O*(\|wjijŲUoRx73F"!8t\ttfb3`$ҵ;҇WWju]>=ܽTٿӳBz6N۪9^y׹eCk௓)iuz#x^2*ր-ťk428@"+(>`e&Eo ۬&w2pQw":x؏؇اq^#ai'Nl42u4msgMߤi,ۮ}O)UKWc4W0T}s;?K6^(SѪ.ۻ@pٻ-ߺWTE(L%lɳ|/5/a KxyNvlg)ؿ߄=|yQ^|P> :wG"YPU{9 Xw ~= 1LV(<}v诣ktczpeGQBy!uY^GӪ^sTbzJڈ)/gXzelXt qYRXGae71`bĀbB? JK4ݺDέ4?mYи 9s0u^K>W}2 %qL TSE=9 !ej< 53!06̺L C>-ň2_hAnzr;lTqk6}`WFԴ|R}C>$B8z5ǀ倫ZU+r!jRq&Ucj0Di鈩Adqgا##0+iMS$d&t=ə vL 0BU~-=+?Ky~͏u)' av4vo/~,A=xc:Uγ?j>Gk31pi0\vsbG?Ce7f &D6Ί'Np}Le/~O>ffҵMcW^b䄯!dh6mJSo,ZXkoL/zB"?%h8RcԪR$Ps{#2dv9Wd'LsYWف0$)Z\ȴ+8)ht'loۨaɦQTSr^H߹ͨhtZh2*𬭏#ޝdd5훉mBᐉ}uʶGbR  sx(h߼Lޖ&C1/,]qF&/,@_ߨ]L :MW̮2#4qpOj9ヷޘ>ÞMnuѧ 1΃q29*vہUu)NixFp"~bmr f J( ш4~P]?̵~&\ -B[Q5Lm܁#31 FH"Sͭ wZz]ׄs!ZAFeVv}7"^iRWd%z;|쌘|؆Le!ZD]$zTvr8-̻t:1@(:bTӇ2I3k#'.n~J3?E4ukGO;6}ABvs;,rϳ9Nc;z]~2dgDYM--t&A}\QAg-X AYw疢s Zi٩Eq33I{3@Đ:x/,A<ظN P 0vO35|./v̻91"|d*71:G6S.v~D-cNK :xj3ȐCaf?뤐CdrH;lPOY9ˤlA$BOGmHr>GFN*7[ pqM=k!ێ`'|m':6̓">^{5[#4Y>a:LsCqdg}|=x-QjA)h>=:4#ׇ$ oKr$ i KE7^AZϙ绿{v 36Ap5$]E2µo*Ue+NIS(Nmu47sEW86;0 ?^)cb4n.9#1) if}QѾE}([fy4xzxKGvh>gߎz^DKƐӘ^ja[3bH?\sv怦KZh9:l ͫӵh U*2=j| h~wmUhdt߱ eäy^*Ƽ>#fsSbꉫ;5M$v=Ie V))0'$O&k!܅aNE8Bd!a ňg4.#w]M0i۳ힸ2>,GN٥`1ijG?ǑU^A} ʅ4W\]b6N3 4H6q)M?(ןvojt).o0랷/NI~9iݾ'_rsj<AV:L w'LaH3?Ϡ6 BI\ա(;񲆵!&T]ZA@{\:0 *|Y bs5 'wH`BĀ792---61%M3q>sSg^҄_Y yIq Y 9ijfs~ty?k&0ǡ\&@Шv58ҧ3򾄁=&3s,]CjQȃ>q>1aKpX3%\lp8Y}Oe0uy 5mֻ j0%%Km;YE/A}. kW0 {]2o'@[MVWE}мX3j&0btZ*b< ]cӻr_Ύ.ײS66R;R8F INJ<ֲa-6*@ƃJR!s  9#%.$V>H/hWs8:aZdvDEᏰ?˳0WU=m);x⵩ޟ)Dāȱ'XD tDZfԑۿrV8BY#<~LiS} BDӺ)_a 7okg)OXo?D1gޝ)Gs-Ws@W/fKӧq` c#ޞ&7VjDYhN.zo0׏{w.GFA#>.joF;8~]{X 06 9 :`c-" Ќp_E'h)\(Xy/R=/3ϔ-^8fibf ?Ҥm0ܵu#)0. sKAZ0/K^/ wmꖇ+pwG k#ʝ{XObt~ABe)M?yYYhcgNZ\Q25jSKx~A8 Qz,urdIS } <_ L'>\H 2懡pC !v3za,;~<ѿk&!zdžu~ݨZ jíy:'N*M>ƀ C{s  ~!80#2}ޝ>Ojh.# |we jUŘDk7tZm&սaFPÆ É@ZA: M &o SqoѴǹ ˬ1nv?ڬdJ}a+K^sD4Rp9|8.(!#\\-w~#==viN#5Pt)\` W $3¥1)P((Ow1ɸX:(AC}W .n1Z^T R[\L((%B6@'VBfؘ-IkL Z xK BP/3o][Yn>؄BaÑoaJPoa1!МbC(q͈ E?=zhʭ6JD|ߗJa!wBf6J$QE@"Kn;Of %pJH0@QgO)-jYUmVGl}X&쒙{qϐ^ Qa3lbvl\w g&` p9כV-411<b(h0H$n+^9Hf/ɂ+׀|1ktԱO)d|*<8Pvx֞QD6Aؼy:.ߏRPTmRp}{r}zMV;aA `_`0 pѴ`@Sok~_F}.r0ŁؓDf9AwRWmTX3^4n6jb1M^,W%}=4SQ.AAy%;\1>)t,d&( }a#0Kq}7(|eQRJarms-]}7eοZ;|Yzhڮ`FW|:Ātf1)\5j ?bi_?4" |1}r=aVlAq߉Hhkh\gpTjE|Zѫ _Eo C5?d6w6;}VGX;l >McPÙ %zj^2)l(fzP)l0KV̟ 9%6Aqc0gn!!/e9W.{:_J;&6,P])~HB, Gܛۢ]f6o^6LhH<xY@䴴ڽsMJ{E^sޕV`>(!bh9\, ~:7Өeaaqa cF@2Ze )67p_fK,O,Ox?!ĿHKOK=|>ġ=ބ2B \7(ptkE֜@ :< O7#nqdh޶6[_]ƥٹ.aS Iur8ϑ8Bq `Jp]><`oȤ) ss6x&}d/z> \?vkf;C%<'O[=WY>.N68|a7^+afèPM( bgd3sYNK`҇[M5ޠ"Bш@Pg1{HU|e8߮p C%`p~yQsK3d܍~kƀ `\wHL : A*fA}l!㇅ 2b p€?2Fsid I>4Ʌ7DWAqP†:sQ ^7E1{?/Aͣ8ԺB鉠hK3̽P0!`c2EYb( "Q}y6~p0w8]|#AfMi0.zVQP^EwdT]w%8l|gҨMf L\*~|wΡ0Чњk}5~ o.whVrA*J(W0$s& 6T3?@.î>d”oU9‚ˤbx/ H'pz=k?9ԵYNO1zΪjLuPԩ^Y޲†Z,^\oqW=Xr0I~;& 6T@;z{jc}|j!*#zЅHSZp,˼)"z&}x ެC6Z1:v#" 6>=ワ;Od,Ba?1׭֌wVPę'Ҕ:]r'v7c}޸X{Y.p`?cgļĶ}((qnN%o.*{DU[,"Z#ΞKZ47_QX,ÙDjM7c m?\] |{跢u¬Yl)tIh:%XwM3dĸ uy6 ѵs ̣q~H \[b6/Cڎ>nhO۝TX>I2V3oڬP@6gd=19M{uj}&q> b(QemHޢn*gɇS۰:v`ݮk]Z7zˇ?5"&z.ckBx?$֎m>fMxUԩ#hV@88xnt_ fkZWn\G ؼh߼CO ?,X%v<& )1fXh1by+؇nN'L\we; !y+E"[6[5^IDTWb:LcE1`sQFN@IDATiP_[f\q[t_M:kӢ^#aU|+zKtnݤ޹U#M\5S9J{^Y5Ll>d]{I`%^}@9gAmФnu[&_`>AKzi=ExKxC9\/+z~xYFЍz#1)U!׊6MiA t7w_wֶX[pxٻERJxpVMTPV<2kѣC3q/v o|5GTTN…5_LCM'ȣ^1|\xbp˃`Nשfỹ/?nPxvu:Ԍ;C/ZG`kGqtCKOK;u&!_ө?nm&OS[6)K M,]S\1i.hp-8r"^ ] ʅ4]Jh8(Z.HvmnςY⒃us$֧7GA_/P6ac_{ :w.>Z@P20QpofGHV \l#pQ2H(<@WQp\ѷVBոWiyYZ>`گRK/M|IߤPRΜOIRREh4BRB!bb{b@S8s|+˅gOOԫ!X$k!99mLZfɸ8ү|%~χ/tff&\1E(@nUo^~YCA=MA(^mPxr-7>=f@2=>xBkI;f[[+MA8&m V32Mk9?%=w:,wsҤi@6^q (կ r8lMTWuh.j)(3GDYh+.E!=/Й]Д'K 'zG ~Qi%4|Ef7|'M?#^@S@@Q}=y5Ybs\Z%=4,|j^EGs8LٙN Jee |(, ܬ5O `gmk㧯noZla;i!d)1tq V$OX8(fxmg #n%bOƋ<5b3. :@YTݎMنs)pLcbٰaV 8*ҝN',G*Ze3x-hSy`<ٿk;լߩưҤLpH ]@D`5'A0Cbp0 l^ݢ{(;\Lo9]*ƒZرa46F9co@[У}3#ښkMKt?bA)^{dHIKU+32ML{j=[{wBGN}GOjG (-63ζqy+:f M01+9 O >7  x#T&p4Nq2t`s9 }#VfG FO:uotj.ļQ1P6>1I?C`Ǚ檈&r:5AE\ؿ /?2XvuFD ok|`^| pα]͕L(dAV=8}.}GOx{ʱM*֬t^UFr3,foU1hp;Rxo٪cw`.7n7kVNm#>QvHmĵ]Z of/OMWN 'O"&vE˭,y7n6h3aXoubƛfTd,ʄ"#+Yؾ8;s^|?tYƹ˅פ nYe .zsҙҟb>,=A*z~~Ar`hnB1{{wo? T IL\G 4[!GЫ]۰a=Jܿ[k!%87ʯ0ڮLGa ҍ#^Y{ uެ'00e|䕴P)lg%lpa`!AAmB)"$Nٱ9"lq+4vjrX;ĊGfBGVݱ$Za|J(_V`R56wQr1r5rdFM\=?wZT*&^^\޲,zT cw\zG?,B *~Jfz̮u;DۦuĂ[]F$>vF[Il '@Һ5"!/ȁ>^\x@ҊO@fP I[=B(AةВw &ծzQs'3ϡc}qfqx-fpskPQֻb17zj{&iÙdfA4uEFȇo"ȡU`]):(-+X۝ d'#M}k FjSŬ^mݚ]\ٶhy5Yd2,C'V$Al00<„I=tlv Si[~}ich2iwnkq*aK€' 6]R/r m=bӒ,8 }цq*rK rL"!)YLsм0դpWDV̺n~)l=-j Ka`+:ɓS|f">d?_QRyu\RiT↫ډvɬ~&2o:/"ԴdЊa JS8#d-O?%Yq"l yiou K aߔAZV"|Fhȁ]ےp5vXl1pgz,P-qz9wA=>cZ ҋSϡ |l?@ ,{cEDt|P+Nw,¸1"rBM?vc[{҂q-|4 49Ik ֍pL i2vہש*'8wF]PC=JZؕ&5D'[8pah& ƖR3ixZ6PeNƸ8!]7'}=_~y4AfQ^~I |'<4&Sm̰- `jsoO+MuoDߓ{!poҫ@bmJ*ݫTX'puJ@L'A wCqkQO >X ,Ϟq~CF!C)aP͏9m'?ğءZ?z`^~#Fӓ54Y}Xd!Z^ZoWk".M?$:XOThB2NX22qYՑ֨.G>m5i 7@}muԂ^?~a0/J`@թEfV[X 2o`w>!$VU+eD:v\]3]Rp0Dh[hP41iR]g "$3X֓`4S=|(eu]Tešcqc[y]Ftiq( ܃ |hh'胻2 +l'SϞ³_L>6j,vY%hCGѿ#7z\i Emt=7R/ŏ H95-&#{+$\02҄'Mo8Ly1֝ ]ƪ{%숶dhcO_w璤P k"YLwpBh++B0=&\[!x)@з4p1}>3|wUМ~W6ծ&͟.C\xԈGfnLewaл\ڤnuN r)+'-5g[4"9iUPO?Z+ Ej>M㢞՝{#"'p4F!;dR)h5,T5f KPe(l E0g!_M|SR ·j W0YS]/Q{ sh/$m>6nR+vž2rMx$I0uB\ *&ےBVUkGToFu|&~1[Ѝ}} GFs}cRУxY}uNǟxٻv=V*ʁQ>}dZ!턹`Tm)H዆6ḋ?"Uĝ}벢?Q@p&RdJ 4b/ORx73nDpⷥ١畬2X~[NFY}-Lj1>ijbky7M,B`DV#]rntQ\nmZ:?DžsG ZZ̜R]9ԭ]Sr^qQP̂:T7|0'&TɬQU}5>K >ܪY>ׅs35…:[ .р4"+pZdfCA7B M"Qix[G7:+xA4۹ ^N,k`1j'ْ0:6.Іn|o 6+>Vus%oÎCb喽!N=؋iA#w$ nLPdȵtiG47H? {Lwʈ[7f;̪N>Sryx]HGgࡀ]?1AMLD(T:.s7|Hb6Twՙwwם@_ _MGIP9elndGjD!lQmE"XI?Dw'Ԥ0Z*@NBWk(AfO>U)pD߆NMs QYfVL)dzP@Z'{Ux/#nx4֫Hـy42k!4Yd$v~|cYTşXnO~m|nn}e^(9A?&P vVj՜2ѿNO8tl7.;~jɄlstnꭏ2P|jɦxZ$<OR%R(hr1Ll@x7̌MH"ob\:px?^"ySv+RL#(;,kB>0tBؐpBi"rxk/ H."rc $Եkb5}B*=%J>t }Ii:cb7\.^? > ڒ&G_?-j\oWڀ;+%Of"LXX2V2V8Gz!21wHկ[k1SbT艳1]-l x7^A 'asM?-r/@νGLP`^I348_!zthh69h O s.0ta;}h pZ>\䁏 ^~e@0/oK֋: Cߔvç2&D n%r8.edjyX,]ꞻj3Ugu,!$\*>y8ϳ94rXntMӄ~!Dr-Y,OFs5ُg`Ke Ea Ծuzj ym^c 21XR1ĪO &S; TjW(R3^VMSF#BW_ŵA$sG INiwMvek2kЊ0>dlߚɺ/ Lc]})|[_yq ÇA}ه]c Nl#b6ةξumOw윉DhrT9V 8CʵB7 $<NXRX.۵ q9|,:2!OV2uy7,Բ9Q WvzbD%Voz"Zu"óS6ǣ!̓hӢ5ҾOɀqp9b[DYDNr^Z70 ⟮m PҷL*_=83*>y!<aĔymbĀb얉€Eud]F'\LӢqm:-L5C״p&s!ɍ c=vhXq,9t~Njzf,WRk'ƒ=QO>XNy7K%p\(G `h6FO0(1|+B2s^^䔿Z#KF^+6>"2>׌BC8n߹sL A}kfW 6i"f `f6 1D5Rmc$o~x^CzC\4^FӕSYz7yK MҙCӃ`5x /$'4D4G*SiDDlƗ噄dYɱS|S`O~ڴwkX\H-fǐP1`Zu{pfjf_:cn{T:+ ov~gõffx3 l7Up@1&¶)pY #sxtPʁ̤Pr"D!C&0GQ,l`_p`HkU(c|@~Q4}YZo)֞U9{/ݎS9UfV|SP鿁@͍e@2j8T~NM!3L_2.},hZŤW/)ƽzq&l3"]%h/6E?9ۯKT.0̐Ɩ2ٸƫŌy+ )rJ`>OAw7(q2[6@D ^@^ן!Nѝ(om_aߤu~ŬwP{?:u1> d-gWOU(dp4U *_Oٰ`;{E@^ }v`cL46o'&J8mEva&BCގl : <`+*po䰱Q,I{.>vu Vw#w;{с=gXEg ا;aY =b\TS/[ZSA\c60ānn*8`vܯv&ڎ|Z,^Q^MN9vL͆mob1p;ЭxJ!d4AQba^3I1tOz׮;Ʋ!V!1rA{X}ڈ+.v hb7"QmM&S>vVCd"ϡ~{_>Fuq8lQ.%Bظq_4\!ld}cܡu]|sGFGkQ.~{xl\Q+ 013+ )p[W#dƵ&KrDo 27sj07 ٶ_ +b;_!4FLj <=>1ڛD 6#վt7;]Z{uhCYԠBojCޮ _&|)scԼ+!@_uLW~}Ԣu1 |o|pwvFCݴY>)Q>Gw>gSUkRx6Z AngI*h-,SI:6uKpS|^',0{֧ M{-gydTTmaM>wW61ק0}4\"|}6h\U+e6r|G4B f=ǞS?Ϻyutmu/ &ZypPX=Gţ!0P2֞2$/L.r02aȜ/9Coe y!HS@iRW%dec^SPBG6+ pՕ&j}ctH;PSsgU 84yoAKNMd79,M1S)߉ 1KKFrS9fG! oK3A+7h#S0IHJf&f2iɠ5ڹeCdo(v:&srDTE\F la9Q1ƲرݽwTh"OJiSPB)"o)؝sޟy4>L="'xC:dj吟Fmpg1q`9FT2T1$}$^>?*e`Sv{8ͪ 09_+b©$;|N/}S4WH;\svYR؅͡RKaLjBh^IV\^vAhj$ ,Ak\&M7BЛc1h$,sStH&LBZ6!ɼGa09_ըMjhFC&\胝p=`dxYU 4kY:f$J5Z3Z)9a09J69 @ OI&Z#SW\N޳;j*f͜wF9+1G4W\:eӜ:k"vN_'`@w:gp4SA 2-`pq">!6<0YXE$ݖNEDd*Wm/dR#*4ߡ\X{ZC#3t2wPlf&T 9 )lDT1pU"BÜh:U0{6qi`i7]Կ20CrjqYÚ^$ fYAPPF fv1Aߋ뷪kKn޶C6Ģm%pu7L (aC4gA֗vp.yrʖ%V͚|s}zJ4A4=%f!)zù7z.fRqx+A5rRKMǺW/S#R:2"[QB! [pE O|DS5Me`gp8? jCiXص_ޯgv ]ԌPf ľ]eԩ0j8ٴY%SϞ%Q4}֛gU_!C#"FokW:APj09CٷY=&;}0h#*C-W8@S|M֐O_F S7MRuƣ9_g ɷC?s஼'BjǏQ&̕NBRMO]]"ޚ2M1ѥ#Z~[KѧK+qlk~~Htn&ѳS3X΂sQnn\J~VÐ̽@/3 lOC0 8/m2rpo$MeԽܠNH>ʖ)S.{p>~0$S'D7Xa@s $`$QͳRTOO —γ`O~eaM(\sΏL. (\  E1*QX 4iq6GzzP^cxW_K48jXo<}&-(ҧۂvD|k'b cj5g^?hUŊo 4p/31۝ +jBs;g7V J L(0K̈ØkΗG6F'Z4 틊>i,+ &c'pԼkb0M 2@Ξ=qnƵoiLfv>gQt(/-qoӪe&&ӳ7tC1 >BIRpp>fjWڜMN˕9. rVl<^[GII L6)fPcn&L %l(HffWR/qECoy[L(B pG;\1>3l)l0A1 &} Hd,I/ cphߤFM7ZRD aҹo9.޾qٿ2_i=pUbXx`=c,7kV#^Ao QcG)[#Q]}1k Ӑ7 sͤ3~N2]t;4V 6f2X(%p Aaۤ2 ~wxh```PB0`&twVaH8vp~`bĀ70DŽm)5ioFcg`tQmZKǝXoÈnt:t2G>:&N%C1eة/+cA:98mb&?ŢOaҬ6p7:zwϫغIsFZ&E?`/]uNgpX'_%S<̨H>2Pb}(U`V^oSMyA]47-;ʙ`mpGT,g=uSϼV1y׍sgnq05@ &򋁑{qz#L?͚ж~Hӵn[hh`|=w< =YYIt}q:%}r|9#p24P \ϢmkV~PBµ ӧ#TMgVx&c a;?gAryH2D%2 J0P>&Jك VGмyrW%)q~=BU._V{dDV$,C>av )#E4]?cNaͨ)1 \?(t0- N&EfS@, Aㅩ"ժ[oт =-I{mI}#:V97 y*`S"Zj_ȁz[A: ;sgRRsOs[Ԭ^CF `G/> ȂrCwn۹i>tVi Qkλj2 &}9% &€``uAUժ.F1nb$[S:u6dS)z1rlQBӯZK>]jUM߹BqMꢡP 4Cv;8X{zx 끶^^6̚5+`]arPΎ{;S=&U2]x)#'Gp6?NʑG_ `ʦJc8 |քe;Umȉ Vvq\d tތJhU"T|1u8Ugo ?'RCnǏHڎu18vd %]#KP$L(]I>ޠ֩qm>;6وcqzaSͲ9% zuޞ< eT H9ǠaY\|GJl;-LDS3#EAyߩ6M 2ϱ:L@=.2( aQvv#Cf 6x # c(cS--~uSͱ7胈P4BCAP2FA\K\70MҨbfYu3p (ܫukP _vp^&#M>yj &U~~;ˁaN )4U{ec]O xcƜ|kԀVVB0hԴ?|ʔ q$5iJxOElq*nAbu }cL vϺ$ [ #֨/>FtW.} ^sNGN~[*F_JАoM w֍!sqoG7rCH`+?`! :h1u۰+q}72 3I)xg'kmAm!oN~vFEM~#ÜfeYC@IDATP|Urv:_ZeUB͏T6}/X#+A9-0_^)0 ZAWBԻ9,`DN r( sAla0&*AC}N yO,Q tdᢢEwdTq]3v\!4j7uø30p~\;P V`b@1ĺ|70c7m0tϩ3XaK@vX.TЉ?Xqm hnV։-6rt}Hl Yks`îFzhVԨD`=؆3 ;gIy /ܴq`Mj#ԋGHRãxW/]8-6>B nRAͻhypU}髴Ou}aj}B#>\Cf0[bCͣua5飰,>82 \^xZx=c9umw9'*SB3&L F<UPb럽@!$ޛ""X'=lD=)곋 QD; @HϽ$6777&)ΙئKNOEhO_gDMuf]|.XS:~cH0zlk~'ZG͎%03q+yBP>6J#JRy5fZ;I٦DoWxG K1Xk(zOh{mțܶbfϱxpR~,ԇM M܆3W{0Ǣs`0/ 愻wEpΝb6dk0`̏Mvj^1 X/@&PgC obXa̩Q[TK5>3񡏛Ly;Ot~X…< %t :@](@fˢ þPgއh#*`dEfLye/XOxq0l:|<&q~hTL;LIW}//{{D|56lo iH<zWOLr@/ļl_}epfbJ^Fzy"hǵxʒc?eM6슗3SH/y,3fٌ NeS]F&oDQn?Nc*c gӄ]b1M[^8WۿG,nmb;6?_q1936 ׻kJk8OE`3hӴeܧKx Gj|7z˹%K2Z7 {wjN6o(-*}8=p?47l~Թ٦ {9oGϭ>멞*@RsׇQ& /bb_D/pXf:\2ʶ7crwESD'ZppFzWf0o̅@s.2y./kuqmr==Kǟ蔫{ۢe4dd1рrY@1\N\< |a}L)',yz=z,99ě[؍̩t|n薻Qɍ^$zn%`AҕP1bNr%r`v%e݈ wpOKxE=:y,d>Oz'p@IRCU:h|ozZ~,R% jTb>s>Y_r;SQUUTV\x! w#ky5:3Ķ3'&Z'dȜȐJ,7i؀^Ϗf^X,_̈f<55cM H֞y#n|1:Z7]ȍi{SDw,c:G^߉pHp ^"!xvTg`˳Kmr6uOTbc+Mz9խM}̈'@(9dkiygӢp%`G@3lyjMhWCYY=ΡٿYJss7gpK2VOTrdRtO] aɣ)/8xcYG΁53郂L8j,5*knmտLNR+0ow~ ßVqƇ ]eTd:g]CbF2ʀAaVywyfbm`n)u^ebN e'5t:2.qZ_ɣSfl4+ts>oHO76L kԵݓ\˽L{Fao3\b\o:Bܡ+͊V91*cB/C 9vtCVs_vRUAjT@yx?Sl ^vyqa`߾yi0ЗdYYk\u{{bH5m?| DP櫦*D`FTiNC5::{H{ɉRmQުfy7:􎿍Uck=f}b|7)5ͣ=W9i*/ {ggO, Ba}{0%9FYVFRˬ酡~[CHZWm[t.)^Hef|37(ME/&Jc@෵{jե#|K1\ԥBlt8)gQ n] ]˷ёDlDԫCw؅XJ;Xx38.U7W焾HK HP6Pc~iwR:y&`<{KzԦIjk6Ӂcg8(eDd˾c"mqdX䙫(`u@_3RCƬyı {]Kzhx"qiT7(q4θY] #ޞzvFFcnhPP?{$BRp;lL JJJLYJ`0hxygAHPtXy9oHRRRs6* (RTVs/" Hٜq)vMdplAяxT gR$Km"}g/Ёg~Z4v `N{4dtA)k>bu~ػ#6 ώD[M^tGNmќeCӿ[L&M}rHaasnR- {T¸+]Z-hxL~\vĝ ww7ؽ- ^H0>5 +Ow.vߜ֩EF(zcfnra};8"BPRZ =qMze~2!cWMĶQbwmնX"l1OЭ5vNHGÝܹc5twwfAHd"J@ah03Pct\KixHOHXym mS:6s7}\M|<=yhe1F'U]EUB&"W~1Ͼcf2E&pi~,ޒ9a1)۹3qH&5lvgOyeK_Y@uupٸ5k`X!.3>#MԖզHbF43?QV?7'ѱ#gћ6=vqMԥW>>/$ 4k66ml6 =aNuiHf#2.1Q]`|-t76Zy?K- \{Y 48@6C…d+WPkfVM>p`^ğgs#Ҍ̬\y|!$7f}-KrH3$k9:vx䜡eeenܠn[JS #؜v1K. 1{ J;؛lzLa>N`s%wVLb6ܡUVD`BԔKh.O080+ a =4W-mc1OFiڲhoulf/z;NҡI1=C!$MˀrQJfahm8\)H,?*S vX-+w$mA@aՖc0Gx'2JM|d.2ƀ\av5̬udմB|8غxdE;9ubp;;B}&_ wKQ#fԶiz@ 9,J߼Ϸ|ԣUYҔwPQl9 1OW3X>l{3;TWW@ښ !9JU=.Eע+g`21{A 1c#u&E*6 xSӺ>oQ~ϘXV &pg2*g_Ó)dy^L-л>w2g|IЭod:y>6 P{}FV6I-,H=>'՞a ǃ\0.QWaym *еP[+HݱӉb{}] a&Cz*B~<>_WR@58=-S ;fns m%{R4@x;z=Iɧ|)yYTyC.5(6i:vp* Tb\Je)StUU8&.Qsa]䚯J`icv/-ͥ4a{VVM boݸ>%$sx*T  ~ 6q)Xn37tnҸZXfKc-ɹ5 'ԟ\Kxjx{[| 9ҺpHl(z ɹ¥'U*R!P$ ;`4@z?ѯ_O>-\KxsOjTl w?aE=/@z|@ ŧD} /;&q;,:z`O~VٳgP]e7gڊ#ʁ:v ig ξZY0#ؕo0y)WR1ٵG֑Ԫq`,$$#ٳn`|0Xeilp5cҋ_9v5=1UjÓ?`(=ab 6DU8>p-gnRB5nx'nan]E޴7$s<`k(.ޝZ=GT})**^!P#܊EWddDL-$5u6`pKo{7pB̭p8rhԔx 9-Ĩ6oC3<[ n[\blC?/qM^H)$b]/݆aۣUJsO_M!`(j3>dfv>z,@&<_[ii-KD^;\de_V Wѷ8,]1# G}oyﵧX]c,{do<{ֳ c\Ki*esAW#7߆7rq|`|öb?XB@!``쳵_|iىB"9Oro?x=niXd1~tRL 1kjxK (o*Q Z: pfoqW?ߘv0rl+{_u. TNH.VѽFhO@:1%1S{I)yy^d?=p7rWd4'ز|qWB%@)RT ̱XW+٫+juy %{ߑ \TædLĤZ(V׳pUʪkTkPW\C0bk0|G[Wk!&q]պ> !AWy+S]qhC1YNf#a!R`q8V=~okMu톒lT€^?E W@`;F$?zUULL XpNSusXE&1tѰaӳ 577VvHMܮ-|@X GEۮ"W( @Lb4*iUFMFL~p9.Ù l,\.: G$p-1alc9[Ew2o9(&?7ЮC~Ԁ! {uiHD@h<8THnD^0.'4X1NUePT e^`!d<بݪQoHJ6:Ti.ްFԍ: /ó{_/.nD_(?<A!b6vĞ{y e8LH[2tnLB:!:Ϟ &l:B6s!J[۩s;3jdˋcJ\0LxUV2\H8.9q@BerR\{k? 6`k0#wEؽ nd=~@z,䠣o>vB$ C{]>s!_c%r ų- ov*+ (B_ۦy%q$>Qı4f?[pz1g908Q#! ^E8KWh͎Ľ71nZap ONi@6:{g?KoÝ> 9kAMɟcYr--v|PeQ$2RUe/`X3~tMtpZ$OL 5үϾcv J6. UokwKS^ET,_2]-kU2#l&|铟bS[o^).}!b@\2_tUD`BԔ8 oG2X`z3$Ć\od+sˋq%f5@oh/JMˢXxWpC(U_Adžs*Th[EC[5&ᅿ%ٖLgXEf_p-?VGA~)BL*PS)u(_Ce~C'7χ:nL9V'v-&cSi 8NcT `308czf{yZp{yYxCиAhm|~؊-wd9];_hXߎtu-i?Rda$𮃧h Qҍ{J Dh횊AzQ.~2W};AsjLn1OtNs`-A'.t`Mv5lԑldo`7W QX]$,пD QQve-G);9@*ۉ @Ѻb)tT H8lS̿hnaQmv{sYD=4sn!̥};fs%厬ܗ]4v\eqY~B`l T;AWVo?H#u* S?_LBa)s.&/_N').005ҍ=lN9Esb5cA?EZ4f݄7u~ݢBs֐*ZxzoE#F@:AE [$dCG?7^ ~_C}b'vk `݊.IiCbhoI2pi?O$O x鉶2YrCe.}Pd ' @h;qO1`6տuΒM3˒+>  _VlؘTP!L+ƛԫ]-{عn \=p`MU̳N#|woa*8`-[x#\5W s]I6{o^IwPeC^|81/<`.eLwԃqol--&$.3YҝCٰNGegMK='++rF&}N_yq4Tx˘bJ! <pk񇱂]،}Dp`yK'ذ^k:eذ(Ơ(%u(mz9>;#]6T:>0և 5k ղ'mݸ/OxpLcpI8";ˁCRYˑAۈ@-vql&lB"^Gh[S Ǖ){oI\\Ua- $<(%ҹ4^-ѴhXD)?ldO La "ag7~_y6G;Jד/k9"Co3cш1n$8A$\1/[׶+?җtLZ*)Ϫx^UaKbx"as{;W ap% w5\U ޽08hd4 Ƈ]K! ̊+ԣuZ.qkArmKaB|lѲ5=Vx4jJ$A=|mVW\aO~'-}` +->\b|84^Eܾ7 e-Khefr"Z84{ĒT3 o'i\ !q,Ro"Hf"wQn=ߝvIm~T{ę~V.@ LP1X `v3/ߩm6Ϗ?]KE'O{۝DԨ;ع$|pvF'*M6֌|'Bgm79"2 l;*h `lF}>?}\U0X H\㣸Ʃbo⨬3E5[Ҏ9N5UWrf G[4[ۥ pk$II5dgB@!D&D7yZKb6`4j9Œx@|loŁ{ŁH׺|iq%!q͍ܫ~8g#]8>\QLd&rxc Lq(Osq>7Xrs?r ( kKY*^g)J>9.%֥[__?rWjQp9y%%Ljp{ӺO._nB \0qMnoq"a|9NOOkWÃb{Xߙ$N̕) @w7mU@/7)0gۢH!( D1"C@*-2C⼓Vx8AksEu>™0GA,v5>fD_٪<6n9Hb!1ql^y:`6A.`0%*%-ϼm1ՑḖm光 ^~`U_E ]Wկ+/P=@)ZT~Je6vrP?iګ5$Opbֳ**B%.B!LCXv \ө̆kpP0` ,6}&~ߐ^ɚyt:Ʊ}?-eԧ|$!%EpP9Q9`wcӿyⓏ}hfNNN) NslB @ZHwn\Gj` p 8B,pt\ tl 䮆3Saf+u4=ij+ f^|CXz^?__/ jφ]Ybы:(@̅duӘP&,]1g#tz"͒{i+[EDK m&pkpQ;p|Hfv%h_-~sҚ =~&| ~ \S# ։`ËJ_ydWu1d]?cf1 ?l- o/@L *. *E`=m6L@/rD. FX|#Ѱy/0 ;mw&R F$0#I)WĢR9q"vf@Ix{8[+z5AlĹQXP(^3ĴZd;,ʜz<``Nƌd66>1CW_E+ _^y*?*l|2m㢦An5ܽb¸3-eĿlݩk{xrwSG`HɸrCqvo?ʕ cw)(Xj|058> R:LK~֝hڼ_?`=,bwmG,_c!3L _;QTqC2[, 8 ݕgXPC"R}S30ƹA0!;c??!Yp6!|yXTzð1o02yTs2m"?WP̬KT+Jcq'">Of#00Ѿ]LªSu 1>ƪFZVO3k5ӓ$_/ě\~M˟y`0meOK+i߆AO$f2k:%Aja֓ߙ9v<Xڟ Β1|bdDRxGw<>/@M \" 9w Zr녓 @*R(l#őLO;htJ0sdM?;jeFr3J,Ϙ,o{vF9~`~-KeIf#:G_6Z2ZJ;b `6 l3?VTE3.BgG#|/mx( ޔ}-YZ9w`AygyOq&=j:70W%sE _rdz_%*gE  fL~m ' o^8{KVasбE$>K 9؜y>Z6 nG]Ys:ԨVbGIȁj19k"2좶}pZ-N/k4"%XJӔgc&`ia<2ĄK0<[[N@?|"U1:rXz3 fA1-t>-F=>,Xf4€)r!d>ҸP@Z(o{|%c"Vb̙_Y |yDJ*p rq9_Gro; *4ܬ)pzE @se֍Gz^%ِy?)NI};zKƂـc@6B~Y}y1݈%!Mm޿opO"zD8lkCT.Yi`6¨Pv~ggz=Ԣ uNEx̂TLH&a C,օ ʄ20a&j޷"U g+{"[,,fo~B=+gu ǂi6x}y]Dy凿A`k,ְ@g ZyeH帐gLrA*k|%S`Z9MF>rr .R2ƏOyp! \yX&6De;D5%}@PB`0hޠ￷Iw0zMiNP=1l' ,=˕5 u(+A21e͍fu&kj  jH,q8:rEsˁ*`k ł&D0S^"l}J`~0;2թ״*Ee}픺PEFŁg`=&cEG|*s|`=kY?{tZx.ܗ0gH&Cޓ8ymy16pԅB@!P0be#?;",$G87rEQs>>ʕxnZ" '?`60 ztR2\;(X| AtZ9$aժ4+}R&hGA(rax'At3TZ-4+\PU*mkثJ.c/o!O,,o}aePcpeQ(WE5e ΈL>p@2Yƛ'|5]+PTy' k>eF}tc0iCdRfy? <+׍hDxEH.\ǨHuF@k>ƇK?6DG>G~ ZT3l` J7dg\zozm41#jJhDǴc{޺4=:lPkB@!P(B@ӡzlM`8pp>+~y9,ag. e+FF-hٿ{_ZG100&06Bl8*SB@!P( %ـ*MA"ܙd##jW8GY*V р{[x{8^3\ٶz6D}s%!%| crT( B@!`ڨF'e`TӞMڵXîEX @/=l:\gN99:"#`Kq,K\`2u d6l0( B@!P(*T®66!ծ,敿|ﴪy l?o;E39YWRO;oI&XP8xHfx*]t&%٨0*B@!P(Jm,Bd0|'yqx#|kQR  $G0`, 7FK!vy;rPF9@S( B@!pq6۩\#d =~-_੒)#E.TB␪rG)030$HTRũ B@!P$XQ`. /_ 9-vd0\TQ y!Q[!%p*WB@!P(X5Xbjl!f6IfgT̄l@J~FwyU9+ B@!puz lBw*62nqQ̆58F&iJd4&D0S^R6+FES* B@!`)w5է$6]я `GZ`ɡ<נRPL"lP( B"0!j@}Fl\TRX3OqƳ;OpH&eJP( B@!L,i yɉ|}n&(̤i6UEׯ0K[,'-_{*W@1  B@!P(CQ S߾^_D"xY-KT -ɩLGs6;+äsx RWQv B@!P(j  A˻am55_o/S)rm#E{-=VXǦOnU('p5B@!P(0^F?>~o6;'a3cH=7š:y,Tﴮbԫ_3n4/G9Pr^Q( B@!P\z?ш֦nucF7OU_{'=˷;c̤ B*z]!P( Bx4jJ:+fq㇆4 GֆV`Ք]o=pLO4U)Qĕd B@!P(W̗8lPlY^1#*FZ5AH O=ǀyeSCjB@!P( +␿ gh BߐG?;d]L`"f"f6G4H߳i&n4wӹa] B@!P(!^@͇_:opp"el;p?V>ndeP369fvBO\;F [1Q'[<Ȩ8EPP( B5e 4#frCհ{`6==#|tPdWN.\,\Vl9@W3 ҃&ie1u ukۤH&gVx<˷\Mxrrs)^"뼵hסS\ eM^G]4\; #Wn =-=ۃᙪ0iz/P( B@!PZT~%BJ6GHP-?3(5;i=4nԱe$\ɠl^H1Mױ]Z7/@A!ln{6=BH}ЩsIy+Xfؠ.򥌹PT7+B{"9;&Ojp1tMo8H˞GeMb'Ln?6B@!P(QJ~8u#v<&z󱻄Tt\?75 yL/H'$r şH[XEӃ6:L EZ\TO8O!AԶi dH'w 7i̤vƜMJq.v/ G֡+fAzޛ\7qfE eO4[^OHX곟7`YӰ~0My#khC4?g'ǃҜCjgItMǸ)r-[Iӧp|Y%V~#.8]+v* B@!P*T /h&xѝKm^}HJHdvS7eo,QePRfhz:gB "l,c UzQ-)n-آ!xQVhm}Ix]^ױeCٯI,o3^((Ow#3EfK^Y94zxjQ>N2JMˤwо4NT?P֍i[SHi m1Q4_kxt? 9>^ đ0'Fj-bvi'ruV8MVےn廌S8 B@!`Q);RYe [tǛR 1rPãiBaEZF=}wR؜v`. o.RYP[n}a?kM,s`W h$0#"ߋ;qCCUB3$}"ÂEqY6LHR z7vP챳&$9"o/.#K^;Qs4Q2rL8_`6,dž{M)hTLoߌӏm>?̚~5mB@!P(C@J1 Lt\ 9a; D`Aݠ$Pe;h)Y6$Kf $9>12ŭFa%2s& wҟkw1ipE^I54Իahmh'!PْP<-\]Ljak6 [#''?{3_e+̩Qٛr[)fjB@!P(ʈrA)e̢A~b~ߑxJy}R* gǁ' Ò&ϲ$BJcz6Yi`؍J#|PoA tP9t՞9$yz\P#azdRڧ#}r:vȋjtqQP¬Q ~Tb%N 33c&;K1+?XU"B@!P(j !8l %,6ZRKp#5 'TdI+c"  /OwZRem0CNNV"wHO!H8vĞa}; cjxc/VHpnf".N0Dw {\$ ,oZy V$i;`nBu${ԶEGΣle-04>ވ/G9翭4c4%?6B@!P(5 QS3b&/N-qy#W*Cz'.O>%% ~#ҵs*`c:&KB@!P(.EעppUծ±8+]=^1*R( B#鮺\ӑWB%"Q8|Uϲ!w4>%zwLx]Hc8f }p۽mGe8g\ B@!P(Bi?ulM ~R'TF֍ g4 Aqfح { :Dn&q E_0t`J*Aۊp*B@!P(GiSа>~P0K6$MJ9p3IT;nwu߭C ,iQЉ#h[A6:̒^Ԧi}5`u)/;hHw &]?.Do>vx_hp.F][7oRQW3m-E ~Ask"FC60#`~^N8 Eq|8(y+G",lKz÷3riv~^vr@!qfwBؽ- VF7.iQƨ B@!P(g/<`&x2=ȡPS^TC rPjZ&aő,XK<Ȍܪ/5 Kp4U|{q>;'Rph6΄ocuft{.bFAvnAF P!nzڦ`*ăթsWо#GvE[A xypmPIڸ3ZuE@?+'&FpKSFԍ7 ~f,ш~i_[%v]R *Yt r~]93:jkULP( @3̜ˤȒZBBa]St#c?Pjըh;IYETt!9m'R0ujQ9Գ?Ew4Ԣ:fTdulgq':4QT@GЎ4O;w-h!"pjӤ=Č9vn/̬\y| \c=)b*%D ѦGiBîXç%48pԯ[#s4;IH1dt:w5CM3qH i\55%F,usljB@!P8̬K)ief6)=3fUYjh0nZ.򮽤H%TL@]ȚRa#?_VJtw| $\\ɠiE[8e?`RnsW էmް^{kB3suyI+8(@H-~IfV"#NxgDq%]dg牨 rr΅b6kѨ,i=?yh46cQʥ승FRT(8bevrF% $<`vuBw훲={j, TJKmO&t`x鉶v_'fFw>TJ"؞\dcTIzz5v:% x&w޳Vr3Zـ΁]_+o7=;#r9V9La6鳧Q~2e$t[ ?R( QSƙuI~Ќ7qOfOL8.*S2ihފ,bHW,O݃UMs{aW7w&Olx ώ|/===<,[Qoe-_uxI5(n4yxErYS^ @:Ob5Hp٣kn螊003'Ҡ ]&w(͞}$FRdXxJAjɆ{/8$_Nlt]\\t;3Bnv!"(44Z6u; }kI0ȶBucKlBSHgM摡,E0&Oi ͒ԑR58. [eʠPD[-y bNy)̔|њL4G<|S#b3yk#D0:Y{{bR>YhBf=gޙ /n M/A:xjT?OLz!穮=n\ڶf94X0dskwUߟ=%=@l`׆^U+ PԜxkܿ(`iқBIB %=d=''1Ng>|g̨̳q,JZ+8op8#hRA;76Z#([|0&¶y潭 ?q^~@L++I3&}Kit!D򓸄kpkR0p''EX|oߋ8 m]H[HxBzk,?שHfiĜ7m+4[LP^9_o/',Vi<;)OdB Y\N3YqXM}gۣ_jG'q#4)1 e_nLh.zq}8YJN:?ܦw@X+V<++5fH"nWF{hx_Ri64KъG:KwJ{vS&hS)B1ş!/Mi8S{ޥ.IH3Rz '.& qN%i'(lmVϡ tG POUQZbT-ٱ eѵSu߆<_%"t%$A ҈RSd] L=б8]kbi"`L@3ҀalTʈ:e2Evd|hB+2U5$(h˓p`4*:2s4CN^U5ap?']0#.<—ƗYu:/ߏ$lhN1^9g͆W0֏J,p0zGW f9诗2Y-W$Jm{'H@Ț3poU5 ?~H*[!KBqiIR m~ Gʖ5<Q+ M %/8֋y?(WJ9P^Jj$h6$i奀QxG1o+: ԡ 0&@% 肆.lfz嗦[o *T9[!@㛚._avv)taz7j%&dR?WZKQ˲f?d#n)܎V͒<N(숄7m Vyٵty졞2MPFo}ZGa/,ӫu中kE`"f^`h!ަǽ \m#{$'ןs!-\ދ -&=ڣW^Yc:!| 6Bs& 3P&d\}ݪa>Pw侊qHAƖ}IRn׭ =.;AV$x#IvW` &K l1*?'zf9gn]"Xb4&3Ԩ"8Lû<& YJA??xqf>"6¯#?Hc. t*K¡Fd~JuvU>V oEL7/pfHy*XlS[pmi:%O,IkFϫLZF%sbVVgdL 0B&44$b]_dvuTGLHqd4Ɯƞz(+h8mj\WP $Va ~(T<_?a" Rcm`/3ĉ 4!^gp҈T%lM pΫ4IQ8M榞 Ύ;{} (jG}F-B6>,O;%Gxk`UJVk x +ͱ6W5 yX>goI:2'Li7Ә`L5A4وt[w+6,i"1ԍb?ΡqqPD.h HR5 44Mp"ePZ/""@FQ9$}Sܜ 0&@"OF髶.@i%IAZ +"^ 92( $H8Z 6ȌN*hw:؉' 8n[2%̸Y{ Ggko(*uL`L 0F&F}8M> j54_4>4v$XfC$|Z ]PAkPbT uS+6&bY\Vj]#v6Y_3&`ABRQua&F͆Q 9Ѕ:fCj5$ti)^AJIa;>`aRCY~&$k`L 0&P_`t[q_6.d'$pa4of]h2(Oł†?UzM&sTSTuL26~s_`L 4hA_e F'SPyh]AG0xI[FkYrK*ދ'ܔh" C[H{ӝhmi3 E9+`L 0&аQ&F.訟a>6g7HPY(iQTP\l&8r`L 0&@EhE†&p}zt*xJR{,VtU&i,|bRP#c^Qu^o5|k8 && /Iz51L 0&@}!P|H{E(ă&hqrpAA\IUrM)S֯{@LEդ>/lP/(6vcه!|4x9zI&: ;L 0&+jԈ;:u.m~:IWui#x]S=G݇%ɔp8T.i/8!a/Bd(ZV}0q1Rq`L 0&̛ޢ.c.0NsA4NxclڶE7oE8tzFgtN'Yq\ꌰAiHyT+ب*H6iHoI4=iDG{&ǻ\_uYCb}eL 0&i)ˈq1V>u@JBS|`e_(αMlxuJ0yt̔뜊Nu 5o6 li#DG!: 0&W$ c/:,Io!hH SzJ=#C-˽,BW//T=~kL 0&@Dի_޷uDXlfZ$4>TZk\r>Y+pw:؉kW =.`L 0Gn}{$ 4@V4y=p]߁re!&KsJU)oӘ`L 0&|([ZJs?| Q;{ UhiIذ"һYT k'!\`L 0& El=Lh0GNzI7V/mQ4.QO_$`L 0&Ѐ H*VDt %Hh6ڰѨ@>(r;&MrO֮ bSy;|Rqn܈е&pX5Ґ`L 0:L2C 0Y,CUl& )Vl'sl]ڵOuhHCyO~P=;j{&yqaTozBpȲhfZ;Ǘ}bǡh_h&xBv:žS.m˔ Ac(viлPHWH]=`L 0&@"@B4alh8b]ۮ}ybޒM_"HY+^ۣ ԆWol< veh$<}A,g/7$tYiײ𭢢~jwu.ڻo<ݖurL 0&`G}D2D<(d=G=-55=cK^|t  G2;v? peޅx[Ś>L53&`L \vz%E]Q;edBHPv4 9%I$h<wۏcaVrL 0&)4``j͢T/l$XjH s%=;bzCS蜾7f᨞cth"ڷjf#4_JI.H@xp4Ʋi3`ЃquC! 2#ݚ5YŰ.C  Otլ#^} =`L 0&@'@I}Bk2_IŒ !PdiQ߯̔Z 9b-JKa K:̠BS@S/#)Z0,b1ϜĒs8$ rχa Y|+N %?IDATC/GϹ{dDZ Zj ԗX7=+X7i02a*仲d.m|}rMwVK~TୋR3:{QR({\1S$TThlcL 0&>Lv>A!}npWf7a$o0Cu^M=5V\'=&h\Ua*%ģwޠi6R.m՜{-V| !✶9ZlDv@[jw CxVkCVm+ZI[L$(t2T-t_7jsC߶C+!ĠzhQ?h'̭]ΐph.x5Q6 Vn/~}?õ6O7K{NM~`w3;6 yWG>N0j|𧻓jWѱpA&`L 0_!0;2'.fyqs "G3ݾ=^ړpY{!d宱0>ЖEa OiK麏Fi^@a)P: 0yX*+Sy T(o.&cIKBW˲S+KaK,;a=p^4. ߭$P<}@=[/Jym+X͓?_$v?S*hY-g#dcH[J\ culTcL 0&PTV4]po%l"_yhE(2kڴhMqNy+|`*@BAy M FXFRtN}}8HKCKCE4 ,lԅQ62&`L*C*MN^ӂ ZM4|K~^ ,͇![}jIX(A'L 0&`9۞r6֟}MņJJL 0&`L \ٸ$!L 0&`LPhm6FLΩ8ǭ+15 #!0u**eb,upBӉz?`L 0_!e(.6_c,q;˪Ozi rnټT+GjpWփ.U^x[~^c6"c& TYaEHEt۾&TNYں?~ #T:92:*̍S 0&j*Whffq\ m&ر:kqVmmMÂU>.ߪԵtbSإǷ˶hy͒вDc8{+Դj媧 0B2nү`fOx5.ڎg4sާʽBRQ`L ,0r7.˺jsIN٫s_)*zu.][sW=GR|nUAB.[S*hPkڇӗ9a#!vP/uޱg~V/yL 0&(CvnݸG7PЮ_>]wkLoƫ:kxf~{n֮i;;7_Z'4vlGPzv (oo6q7$qq3vo%wвs⋅ymwIt.;vr=l}N~?NʾR,$w)ӇEv<ѾE3]Ճ`y}+iP=)yn^7׉a4&`L :ЪDxeV̧#w&_.\/BfNʥFx^F !ն=ڛ6|ۮ]ڵ'XxnwnŒ m?V1;E0բBYd6B cp>I>=̵F{ AyqGLf'>-lD[#>p}x^ԩq8ևp`L 032s]D~erkĵD+U*xou٫XggL 0&9Z SzSw8Hp{^T<3Llh*hmNC[ dv'0H@GS>4-e'L DMK+yF&Wjq]+׋7WF> \V"M!@Ž"6҄mhjZ'.9^̆TSz$i'5 *s*QRKŒJ%?zwSk_T] QQҰK2ׄS]xJ^34kDL 0&El=$lhZ9&lľcT\jLH66ًpMh+(RY!Bi*^xXT;'EnhD69OxL(}f.nrx7~:~ JhѬH0ާhu>,*[^HHQd˦ ^*JG,"@\2^TssҲ bջvsS?&N>/hYrئUץi/kw2=ھ} s0cu^QP(Lh9ck.|)Wd)lX3}Fi?Ȕ5;Q?L60  2")* {]>dw߻=3=+G,ݸG\i+L 0&@'O&P^Hzx~EE4 _E A5N['MڪS@qBͻYKY^į~HL|mȔi@c,L:≻o9V&#׉ӊTQeX!s$YD\>%? wkwVcE5HFR#P w5{.Zv$=维Ne=P Xf˒|P`W;P ^`U0&4!so 5EuR1#*SOrH3 nt) ;S*UR`!b܈{4|U5 #rM~"g#h w=\Y%`l`afQXV$ )PrEm#!C47O>!7v{h|S΍kgCFл@;( z7>qmL 0&h~_b)E7$HP}p&''g4i\͗<}.Cݥ3>:N4Zݲ/I:v`׫H^V0AB ^jNe#&`L 0O :,I&7i C'M$rP Ɯo+I|컯@HРHSOFlЮq˪IX({\knU6s`L 0R 44!M[t۰;)BMRSm>E'hy]Ǖ[̬WnϿ$dQATdFU͛g'{޿nR_>e;ې *1Y*Z6dv iy3R'^l,L 0& FE3VPco;tӝwv:AX}MKV+ *|@áҎa- :~`^M0 .1uסBҜ)ok]6rY" UQo=TCxzaYlcSH~$č; %!mK*[GvgH˕֡ `L *tTootC: #ݓrK]u?(lG:$ ;傂 ذq[$T_LZ :'ҀEx;FX hi>Hq' #IoA#DʽB-iĸI({BV'LAxQ2I'k>M&nV3&7|GF9oF/6V*p]k*4ߧ@DEB^|1H 4$TS$`FCӠ{ՠ.adtЌ܀@Fܨ W_|#.(#̒yI kviOXh")̘9!ѶieQxmk9 0&@k>S:۟hrjtk]'G"AE  @!i#7HG>hu. IȩSl3mX,l`}+.9ձ32M(%bHO#aos!gE=YPi$OͤB1q"H$8^:H[܇0ک磼X`S^ig%KRMbXt_$E i8US3>2e-xWЪ|D+FLFQӑ&&URύy>oZv{H/8;WRD&WTfhJqd8 E~ۡ Nb]z9UC`dVh|v7S /g6wdNPUL16n#JAc}>wd5/de܀{Ct}G)7 cEwAI&*H^f6>cQot:662xrݶ90&'3y(:RLH+ K&4  ƎI`4u=R:3(Y'3[[5v, (5$r`%_e% M!!Pdiz:ů~"T5~TLc1MK&[[t߭Yѹ&A#| *POt-~G+"Ob[H~N3QcK􋖂gj1wy4ɓ<G٧\Te "d$Lߥ2tI-u~yFB+g/"G,NqI24YA"oIRMhS3U2F H1'}SHR૚ uZTc!U:Gz}pa2(/$L'1l ķsc?.N s^PپO?/߅8/пn1h{ϊckko~@2Q/72&}I( 5q' Oi=:e58a!C+'i%N΍& }/b(!wz!%Ba {.R@TO$4bΛZ-v&e/͜//?d+dJI8y(xSuYqXM}gۣA`1$H-e0b`/Or a`]x?b32(/[ G"jE?`L +([l}"/jKr;~ӗp' k糢#@HƐ"i.h,NGa}| '7$R|R؀8+$ۢ_υS>W{ͪLoP1H{̘_ݗk{t@Q.yI4at̔렝q`_=Hg4 -uO-\˽Yq1IcՆ>ȣ h)]РrV@@M]׃&%99d (?' e#U7U賦p8?kC zV>{8O=ji# 2BDit[dFA2U4(S*R(]s`L 0C>@ELk5[jR$&pAGq]7"pе>t^J.>#HIAQc>0VQ=}*uÔ}E2QiPT|KHB09HRTZ@"&lonI'ІUyN,j:琝_PLI7ТQ5%LўBe Osi I$8HSBi#{IAT>(\ d?~0^:/9U@C\`u@b줍-Om/diiRJڌ tIz:q4kSF OĿ퐒'ߙwOyGS2/t)Nj* C}(ƐUDi k$ULHK8(v</p(ڗw= ^۷~?ONDr. 3bn_ѪY1shT^:cO!yq=Ԙ0wif߈HاQ-MV5;}0T%+/F'05㲿վbHG$ߏ)Z8CL 0:@B0`5 y{IӦqv8C7vL&Ѫrja\?^9|!&IViXqr _ck9\y!mXdrd̔ژ;H 8(e+98&h:ԉX2\Ӥũi"߇v/v -8aBrV5[ R݁%xGL.E<]@NS9)bJ,>"6!?Hc. t*K¡{UTl`2bͺƥFCxgLtNkp[af&Rs y,]^uC0M\mBwQUӡWugP?vI=x$n$k Ac/hcG'Ch*9_(gN9y %%G>#XXȺϑsN¨i&-˰D܏Z6P"m(LNP^r6zq`%jߖǪjоݦ0kyCp>3iqMU5 }0K|#r:`L 0&|&.)LACsŪv91 zX-J2FOf4*/ߘ_Fy+N&3-/-KEy}FӽR̈́M *+UY=2*=>/2>{:_3&`L%VYIO`:r9`ZTnڻ*5M ӳ{`L 4T5z\[` Cx^[ϬR0}?L 0&`>C' LϪB|f.EC$Af9gl0y\›Ϫuaӿ&I`L 4,iF7 N\6kX&nphN&{V 0&`L TL'5fXՋL9rw+C ϙ7xby*;W~`L 0&<Ia֞6csO*RجgCK%9;`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&P=>{WF,/^ܮǐ̒i؉K&`L 0&pI{MW1Vv\5;>_!PP:3&`L 0&pi !e8Ŀxw!Blvĉ ~C&On&)P!O$Nxi_'~`L 0&(%Pg jI@#e> 7s Z{gwy[-eG0DOӏ*5>YzZ]3B0}{/\L 0&`Lr\Vv3(>pKώޫn\h# XŴb=zw=w 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&`L 0&~GIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/get-started-block-storage.rst0000664000175000017500000000676600000000000024330 0ustar00zuulzuul00000000000000===================================== Cinder Block Storage service overview ===================================== The OpenStack Block Storage service (Cinder) adds persistent storage to a virtual machine. Block Storage provides an infrastructure for managing volumes, and interacts with OpenStack Compute to provide volumes for instances. The service also enables management of volume snapshots, and volume types. The Block Storage service consists of the following components: cinder-api Accepts API requests, and routes them to the ``cinder-volume`` for action. cinder-volume Interacts directly with the Block Storage service, and processes such as the ``cinder-scheduler``. It also interacts with these processes through a message queue. The ``cinder-volume`` service responds to read and write requests sent to the Block Storage service to maintain state. It can interact with a variety of storage providers through a driver architecture. cinder-scheduler daemon Selects the optimal storage provider node on which to create the volume. A similar component to the ``nova-scheduler``. cinder-backup daemon The ``cinder-backup`` service provides backing up volumes of any type to a backup storage provider. Like the ``cinder-volume`` service, it can interact with a variety of storage providers through a driver architecture. Messaging queue Routes information between the Block Storage processes. The default volume type ----------------------- Since the Train release, it is required that each volume must have a *volume type*, and thus the required configuration option ``default_volume_type`` must have a value. A system-defined volume type named ``__DEFAULT__`` is created in the database during installation and is the default value of the ``default_volume_type`` configuration option. You (or your deployment tool) may wish to have a different volume type that is more suitable for your particular installation as the default type. This can be accomplished by creating the volume type you want using the Block Storage API, and then setting that volume type as the value for the configuration option. (The latter operation, of course, cannot be done via the Block Storage API.) The system defined ``__DEFAULT__`` volume type is a regular volume type that may be updated or deleted. There is nothing special about it. It only exists because there must always be at least one volume type in a cinder deployment, and before the Block Storage API comes up, there is no way for there to be a volume type unless the system creates it. Given that since the Victoria release it is possible to set a default volume type for any project, having a volume type named ``__DEFAULT__`` in your deployment may be confusing to your users, leading them to think this is the type that will be assigned while creating volumes (if the user doesn't specify one) or them specifically requesting ``__DEFAULT__`` when creating a volume instead of the actual configured default type for the system or their project. If you don't wish to use the ``__DEFAULT__`` type, you may delete it. The Block Storage API will prevent deletion under these circumstances: * If ``__DEFAULT__`` is the value of the ``default_volume_type`` configuration option then it cannot be deleted. The solution is to make a different volume type the value of that configuration option. * If there are volumes in the deployment of the ``__DEFAULT__`` type, then it cannot be deleted. The solution is to retype those volumes to some other appropriate volume type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/index-obs.rst0000664000175000017500000000173500000000000021232 0ustar00zuulzuul00000000000000================================================================ Cinder Installation Guide for openSUSE and SUSE Linux Enterprise ================================================================ This section describes how to install and configure storage nodes for the Block Storage service. For simplicity, this configuration references one storage node with an empty local block storage device. The instructions use ``/dev/sdb``, but you can substitute a different value for your particular node. The service provisions logical volumes on this device using the :term:`LVM ` driver and provides them to instances via :term:`iSCSI ` transport. You can follow these instructions with minor modifications to horizontally scale your environment with additional storage nodes. .. toctree:: :maxdepth: 2 cinder-controller-install-obs.rst cinder-storage-install-obs.rst cinder-backup-install-obs.rst cinder-verify.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/index-rdo.rst0000664000175000017500000000175200000000000021232 0ustar00zuulzuul00000000000000====================================================================== Cinder Installation Guide for Red Hat Enterprise Linux and CentOS ====================================================================== This section describes how to install and configure storage nodes for the Block Storage service. For simplicity, this configuration references one storage node with an empty local block storage device. The instructions use ``/dev/sdb``, but you can substitute a different value for your particular node. The service provisions logical volumes on this device using the :term:`LVM ` driver and provides them to instances via :term:`iSCSI ` transport. You can follow these instructions with minor modifications to horizontally scale your environment with additional storage nodes. .. toctree:: :maxdepth: 2 cinder-controller-install-rdo.rst cinder-storage-install-rdo.rst cinder-backup-install-rdo.rst cinder-verify.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/index-ubuntu.rst0000664000175000017500000000162200000000000021764 0ustar00zuulzuul00000000000000==================================== Cinder Installation Guide for Ubuntu ==================================== This section describes how to install and configure storage nodes for the Block Storage service. For simplicity, this configuration references one storage node with an empty local block storage device. The instructions use ``/dev/sdb``, but you can substitute a different value for your particular node. The service provisions logical volumes on this device using the :term:`LVM ` driver and provides them to instances via :term:`iSCSI ` transport. You can follow these instructions with minor modifications to horizontally scale your environment with additional storage nodes. .. toctree:: :maxdepth: 2 cinder-controller-install-ubuntu.rst cinder-storage-install-ubuntu.rst cinder-backup-install-ubuntu.rst cinder-verify.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/index-windows.rst0000664000175000017500000000057600000000000022143 0ustar00zuulzuul00000000000000===================================== Cinder Installation Guide for Windows ===================================== This section describes how to install and configure storage nodes for the Block Storage service. For the moment, Cinder Volume is the only Cinder service supported on Windows. .. toctree:: :maxdepth: 2 cinder-storage-install-windows.rst cinder-verify.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/install/index.rst0000664000175000017500000000335200000000000020446 0ustar00zuulzuul00000000000000.. _cinder: ========================= Cinder Installation Guide ========================= The Block Storage service (cinder) provides block storage devices to guest instances. The method in which the storage is provisioned and consumed is determined by the Block Storage driver, or drivers in the case of a multi-backend configuration. There are a variety of drivers that are available: NAS/SAN, NFS, iSCSI, Ceph, and more. The Block Storage API and scheduler services typically run on the controller nodes. Depending upon the drivers used, the volume service can run on controller nodes, compute nodes, or standalone storage nodes. For more information, see the :doc:`Configuration Reference `. Prerequisites ~~~~~~~~~~~~~ This documentation specifically covers the installation of the Cinder Block Storage service. Before following this guide you will need to prepare your OpenStack environment using the instructions in the `OpenStack Installation Guide `_. Once able to 'Launch an instance' in your OpenStack environment follow the instructions below to add Cinder to the base environment. Adding Cinder to your OpenStack Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The following links describe how to install the Cinder Block Storage Service: .. warning:: For security reasons **Service Tokens must be configured** in OpenStack for Cinder to operate securely. Pay close attention to the :doc:`specific section describing it: <../configuration/block-storage/service-token>`. See https://bugs.launchpad.net/nova/+bug/2004555 for details. .. toctree:: get-started-block-storage index-obs index-rdo index-ubuntu index-windows ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.483122 cinder-27.0.0/doc/source/reference/0000775000175000017500000000000000000000000017072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/reference/README.rst0000664000175000017500000000103700000000000020562 0ustar00zuulzuul00000000000000================================================= Cinder Reference Documentation (source/reference) ================================================= Introduction: ------------- This directory is intended to hold any reference documentation for Cinder that doesn't fit into 'install', 'contributor', 'configuration', 'cli', 'admin', or 'user' categories. The full spec for organization of documentation may be seen in the `OS Manuals Migration Spec `. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/reference/support-matrix.ini0000664000175000017500000007071600000000000022624 0ustar00zuulzuul00000000000000# Copyright (C) 2018 Lenovo, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ##################################################################### # Drivers: [driver.datacore] title=DataCore Storage Driver (FC, iSCSI) [driver.datera] title=Datera Storage Driver (iSCSI) [driver.dell_emc_xtremio] title=Dell XtremeIO Storage Driver (FC, iSCSI) [driver.dell_emc_powermax] title=Dell PowerMax (2000, 8000, 2500, 8500) Storage Driver (iSCSI, FC, NVMe-TCP) [driver.dell_emc_powerstore] title=Dell PowerStore Storage Driver (iSCSI, FC, NVMe-TCP) [driver.dell_emc_powerstore_nfs] title=Dell PowerStore NFS Driver (NFS) [driver.dell_emc_sc] title=Dell SC Series Storage Driver (iSCSI, FC) [driver.dell_emc_powerflex] title=Dell PowerFlex (ScaleIO) Storage Driver (ScaleIO) [driver.dell_emc_powervault] title=Dell PowerVault ME Series (iSCSI, FC) [driver.dell_emc_unity] title=Dell Unity Storage Driver (FC, iSCSI) [driver.dell_emc_vmax_af] title=Dell VMAX Af (250F, 450F, 850F, 950F) Storage Driver (FC, iSCSI) [driver.dell_emc_vmax_3] title=Dell VMAX3 (100K, 200K, 400K) Storage Driver (iSCSI, FC) [driver.dell_emc_vnx] title=Dell VNX Storage Driver (FC, iSCSI) [driver.fujitsu_eternus] title=Fujitsu ETERNUS Driver (FC, iSCSI) [driver.fungible] title=Fungible Storage Driver (NVMe-TCP) [driver.hitachi_vsp] title=Hitachi VSP Storage Driver (FC, iSCSI) [driver.hpe_3par] title=HPE 3PAR Storage Driver (FC, iSCSI) [driver.hpe_msa] title=HPE MSA Driver (iSCSI, FC) [driver.hpe_nimble] title=HPE Nimble Storage Driver (iSCSI, FC) [driver.hpe_xp] title=HPE XP Storage Driver (FC, iSCSI) [driver.huawei_t_v1] title=Huawei T Series V1 Driver (iSCSI, FC) [driver.huawei_t_v2] title=Huawei T Series V2 Driver (iSCSI, FC) [driver.huawei_v3] title=Huawei V3 Series Driver (iSCSI, FC) [driver.huawei_f_v3] title=Huawei F V3 Series Driver (iSCSI, FC) [driver.huawei_v5] title=Huawei V5 Series Driver (iSCSI, FC) [driver.huawei_f_v5] title=Huawei F V5 Series Driver (iSCSI, FC) [driver.huawei_18000] title=Huawei 18000 Series Driver (iSCSI, FC) [driver.huawei_dorado] title=Huawei Dorado V3, V6 Series Driver (iSCSI, FC) [driver.huawei_fusionstorage] title=Huawei FusionStorage, OceanStor 100D Driver (dsware) [driver.ibm_ds8k] title=IBM DS8000 Family Storage Driver (FC) [driver.ibm_flashsystem] title=IBM FlashSystem Driver (iSCSI) [driver.ibm_gpfs] title=IBM GPFS Storage Driver (gpfs) [driver.ibm_storwize] title=IBM Storage Virtualize family Driver (iSCSI, FC) [driver.ibm_xiv] title=IBM Spectrum Accelerate Family Driver (iSCSI, FC) [driver.infinidat] title=Infinidat Storage Driver (iSCSI, FC) [driver.infortrend] title=infortrend Storage Driver (iSCSI, FC) [driver.inspur] title=Inspur AS/HF Series Driver (iSCSI, FC) [driver.inspur_as13000] title=Inspur AS13000 Storage Driver (iSCSI) [driver.kaminario] title=Kaminario Storage Driver (iSCSI, FC) [driver.kioxia_kumoscale] title=Kioxia Kumoscale Driver (NVMeOF) [driver.lenovo] title=Lenovo Storage Driver (FC, iSCSI) [driver.lightbits_lightos] title=Lightbits Storage Driver (NVMeTCP) [driver.linbit_linstor] title=LINBIT DRBD/LINSTOR Driver (DRBD) [driver.lvm] title=Logical Volume Manager (LVM) Reference Driver (iSCSI) [driver.macrosan] title=MacroSAN Storage Driver (iSCSI, FC) [driver.nec] title=NEC Storage M Series Driver (iSCSI, FC) [driver.nec_v] title=NEC Storage V Series Driver (iSCSI, FC) [driver.netapp_ontap_nvme_tcp] title=NetApp Data ONTAP Driver (NVMe/TCP) [driver.netapp_ontap_iscsi_fc] title=NetApp Data ONTAP Driver (iSCSI,FC) [driver.netapp_ontap_nfs] title=NetApp Data ONTAP Driver (NFS) [driver.netapp_solidfire] title=NetApp Solidfire Driver (iSCSI) [driver.nexenta] title=Nexenta Driver (iSCSI, NFS) [driver.nfs] title=Generic NFS Reference Driver (NFS) [driver.opene_joviandss] title=Open-E JovianDSS Storage Driver (iSCSI) [driver.prophetstor] title=ProphetStor Flexvisor Driver (iSCSI, NFS) [driver.pure] title=Pure Storage Driver (iSCSI, FC, NVMe-RoCE, NVMe-TCP) [driver.qnap] title=QNAP Storage Driver (iSCSI) [driver.quobyte] title=Quobyte Storage Driver (quobyte) [driver.rbd] title=RBD (Ceph) Storage Driver (RBD) [driver.rbd_iscsi] title=(Ceph) iSCSI Storage Driver (iSCSI) [driver.sandstone] title=SandStone Storage Driver (iSCSI) [driver.seagate] title=Seagate Driver (iSCSI, FC) [driver.storpool] title=StorPool Storage Driver (storpool) [driver.synology] title=Synology Storage Driver (iSCSI) [driver.toyou_netstor] title=TOYOU NetStor Storage Driver (iSCSI, FC) [driver.toyou_netstor_tyds] title=TOYOU NetStor TYDS Storage Driver (iSCSI) [driver.vrtsaccess] title=Veritas Access iSCSI Driver (iSCSI) [driver.vrtscnfs] title=Veritas Cluster NFS Driver (NFS) [driver.vzstorage] title=Virtuozzo Storage Driver (remotefs) [driver.vmware] title=VMware Storage Driver (vmdk) [driver.win_iscsi] title=Windows iSCSI Driver [driver.win_smb] title=Windows SMB Driver [driver.yadro] title=Yadro Tatlin Unified Driver (iSCSI, FC) [driver.zadara] title=Zadara Storage Driver (iSCSI, NFS) ##################################################################### # Functions: [operation.supported] title=Supported Vendor Driver status=optional notes=A vendor driver is considered supported if the vendor is running a third party CI that regularly runs and reports accurate results. If a vendor doesn't meet this requirement the driver is marked unsupported and is removed if the problem isn't resolved before the end of the subsequent release. driver.datacore=complete driver.datera=complete driver.dell_emc_powermax=complete driver.dell_emc_powerstore=complete driver.dell_emc_powerstore_nfs=complete driver.dell_emc_powervault=complete driver.dell_emc_sc=missing driver.dell_emc_unity=complete driver.dell_emc_vmax_af=complete driver.dell_emc_vmax_3=complete driver.dell_emc_vnx=missing driver.dell_emc_powerflex=complete driver.dell_emc_xtremio=missing driver.fujitsu_eternus=complete driver.fungible=complete driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=complete driver.hpe_nimble=complete driver.hpe_xp=complete driver.huawei_t_v1=complete driver.huawei_t_v2=complete driver.huawei_v3=complete driver.huawei_f_v3=complete driver.huawei_v5=complete driver.huawei_f_v5=complete driver.huawei_18000=complete driver.huawei_dorado=complete driver.huawei_fusionstorage=complete driver.infinidat=complete driver.ibm_ds8k=complete driver.ibm_flashsystem=missing driver.ibm_gpfs=complete driver.ibm_storwize=complete driver.ibm_xiv=complete driver.infortrend=complete driver.inspur=complete driver.inspur_as13000=complete driver.kaminario=complete driver.kioxia_kumoscale=complete driver.lenovo=complete driver.lightbits_lightos=complete driver.linbit_linstor=complete driver.lvm=complete driver.macrosan=complete driver.nec=complete driver.nec_v=complete driver.netapp_ontap_nvme_tcp=complete driver.netapp_ontap_iscsi_fc=complete driver.netapp_ontap_nfs=complete driver.netapp_solidfire=complete driver.nexenta=complete driver.nfs=complete driver.opene_joviandss=complete driver.prophetstor=missing driver.pure=complete driver.qnap=missing driver.quobyte=missing driver.rbd=complete driver.rbd_iscsi=complete driver.sandstone=complete driver.seagate=complete driver.storpool=complete driver.synology=complete driver.toyou_netstor=complete driver.toyou_netstor_tyds=complete driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing driver.vmware=complete driver.win_iscsi=missing driver.win_smb=missing driver.yadro=complete driver.zadara=complete [operation.online_extend_support] title=Extend an Attached Volume status=optional notes=Cinder supports the ability to extend a volume that is attached to an instance, but not all drivers are able to do this. driver.datacore=missing driver.datera=complete driver.dell_emc_powermax=complete driver.dell_emc_powerstore=complete driver.dell_emc_powerstore_nfs=complete driver.dell_emc_powervault=complete driver.dell_emc_sc=complete driver.dell_emc_unity=complete driver.dell_emc_vmax_af=complete driver.dell_emc_vmax_3=complete driver.dell_emc_vnx=complete driver.dell_emc_powerflex=complete driver.dell_emc_xtremio=complete driver.fujitsu_eternus=complete driver.fungible=missing driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=complete driver.hpe_nimble=complete driver.hpe_xp=complete driver.huawei_t_v1=complete driver.huawei_t_v2=complete driver.huawei_v3=complete driver.huawei_f_v3=complete driver.huawei_v5=complete driver.huawei_f_v5=complete driver.huawei_18000=complete driver.huawei_dorado=complete driver.huawei_fusionstorage=complete driver.infinidat=complete driver.ibm_ds8k=complete driver.ibm_flashsystem=complete driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=missing driver.infortrend=complete driver.inspur=complete driver.inspur_as13000=complete driver.kaminario=complete driver.kioxia_kumoscale=complete driver.lenovo=complete driver.lightbits_lightos=complete driver.linbit_linstor=complete driver.lvm=complete driver.macrosan=complete driver.nec=complete driver.nec_v=complete driver.netapp_ontap_nvme_tcp=complete driver.netapp_ontap_iscsi_fc=complete driver.netapp_ontap_nfs=missing driver.netapp_solidfire=complete driver.nexenta=complete driver.nfs=missing driver.opene_joviandss=missing driver.prophetstor=complete driver.pure=complete driver.qnap=complete driver.quobyte=missing driver.rbd=complete driver.rbd_iscsi=complete driver.sandstone=complete driver.seagate=complete driver.storpool=complete driver.synology=complete driver.toyou_netstor=complete driver.toyou_netstor_tyds=complete driver.vrtsaccess=complete driver.vrtscnfs=complete driver.vzstorage=complete driver.vmware=complete driver.win_iscsi=complete driver.win_smb=complete driver.yadro=complete driver.zadara=complete [operation.qos] title=QoS status=optional notes=Vendor drivers that support Quality of Service (QoS) at the backend. This means they are able to utilize QoS Specs associated with volume extra specs to control QoS settings at the storage device on a per volume basis. Drivers that don't support this can utilize frontend QoS via libvirt. driver.datacore=missing driver.datera=complete driver.dell_emc_powermax=complete driver.dell_emc_powerstore=complete driver.dell_emc_powerstore_nfs=missing driver.dell_emc_powervault=missing driver.dell_emc_sc=complete driver.dell_emc_unity=complete driver.dell_emc_vmax_af=complete driver.dell_emc_vmax_3=complete driver.dell_emc_vnx=complete driver.dell_emc_powerflex=complete driver.dell_emc_xtremio=missing driver.fujitsu_eternus=missing driver.fungible=missing driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=missing driver.hpe_nimble=missing driver.hpe_xp=missing driver.huawei_t_v1=missing driver.huawei_t_v2=complete driver.huawei_v3=complete driver.huawei_f_v3=complete driver.huawei_v5=complete driver.huawei_f_v5=complete driver.huawei_18000=complete driver.huawei_dorado=complete driver.huawei_fusionstorage=missing driver.infinidat=complete driver.ibm_ds8k=missing driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=missing driver.infortrend=missing driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=missing driver.kioxia_kumoscale=missing driver.lenovo=missing driver.lightbits_lightos=missing driver.linbit_linstor=missing driver.lvm=missing driver.macrosan=complete driver.nec=complete driver.nec_v=missing driver.netapp_ontap_nvme_tcp=missing driver.netapp_ontap_iscsi_fc=complete driver.netapp_ontap_nfs=complete driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.opene_joviandss=missing driver.prophetstor=missing driver.pure=complete driver.qnap=missing driver.quobyte=missing driver.rbd=complete driver.rbd_iscsi=missing driver.sandstone=complete driver.seagate=missing driver.storpool=missing driver.synology=missing driver.toyou_netstor=missing driver.toyou_netstor_tyds=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing driver.vmware=missing driver.win_iscsi=missing driver.win_smb=missing driver.yadro=complete driver.zadara=missing [operation.volume_replication] title=Volume Replication status=optional notes=Vendor drivers that support volume replication can report this capability to be utilized by the scheduler allowing users to request replicated volumes via extra specs. Such drivers are also then able to take advantage of Cinder's failover and failback commands. driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete driver.dell_emc_powerstore=complete driver.dell_emc_powerstore_nfs=missing driver.dell_emc_powervault=missing driver.dell_emc_sc=complete driver.dell_emc_unity=complete driver.dell_emc_vmax_af=complete driver.dell_emc_vmax_3=complete driver.dell_emc_vnx=complete driver.dell_emc_powerflex=complete driver.dell_emc_xtremio=missing driver.fujitsu_eternus=missing driver.fungible=missing driver.hitachi_vsp=missing driver.hpe_3par=complete driver.hpe_msa=missing driver.hpe_nimble=complete driver.hpe_xp=missing driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=complete driver.huawei_f_v3=complete driver.huawei_v5=complete driver.huawei_f_v5=complete driver.huawei_18000=complete driver.huawei_dorado=complete driver.huawei_fusionstorage=missing driver.infinidat=missing driver.ibm_ds8k=complete driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=complete driver.infortrend=complete driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=complete driver.kioxia_kumoscale=missing driver.lenovo=missing driver.lightbits_lightos=missing driver.linbit_linstor=missing driver.lvm=missing driver.macrosan=complete driver.nec=missing driver.nec_v=missing driver.netapp_ontap_nvme_tcp=missing driver.netapp_ontap_iscsi_fc=complete driver.netapp_ontap_nfs=complete driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.opene_joviandss=missing driver.prophetstor=missing driver.pure=complete driver.qnap=missing driver.quobyte=missing driver.rbd=complete driver.rbd_iscsi=complete driver.sandstone=complete driver.seagate=missing driver.storpool=complete driver.synology=missing driver.toyou_netstor=missing driver.toyou_netstor_tyds=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing driver.vmware=missing driver.win_iscsi=missing driver.win_smb=missing driver.yadro=missing driver.zadara=missing [operation.consistency_groups] title=Consistency Groups status=optional notes=Vendor drivers that support consistency groups are able to logically group volumes together for things like snapshotting and deletion. Grouping the volumes ensures that operations are only completed on the group of volumes, not individually, enabling the creation of consistent snapshots across a group. driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete driver.dell_emc_powerstore=complete driver.dell_emc_powerstore_nfs=missing driver.dell_emc_powervault=missing driver.dell_emc_sc=complete driver.dell_emc_unity=complete driver.dell_emc_vmax_af=complete driver.dell_emc_vmax_3=complete driver.dell_emc_vnx=complete driver.dell_emc_powerflex=complete driver.dell_emc_xtremio=complete driver.fujitsu_eternus=missing driver.fungible=missing driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=missing driver.hpe_nimble=complete driver.hpe_xp=complete driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=complete driver.huawei_f_v3=complete driver.huawei_v5=complete driver.huawei_f_v5=complete driver.huawei_18000=complete driver.huawei_dorado=complete driver.huawei_fusionstorage=missing driver.infinidat=missing driver.ibm_ds8k=complete driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=complete driver.infortrend=missing driver.inspur=complete driver.inspur_as13000=missing driver.kaminario=missing driver.kioxia_kumoscale=missing driver.lenovo=missing driver.lightbits_lightos=missing driver.linbit_linstor=missing driver.lvm=missing driver.macrosan=missing driver.nec=missing driver.nec_v=complete driver.netapp_ontap_nvme_tcp=complete driver.netapp_ontap_iscsi_fc=complete driver.netapp_ontap_nfs=complete driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.opene_joviandss=missing driver.prophetstor=complete driver.pure=complete driver.qnap=missing driver.quobyte=missing driver.rbd=missing driver.rbd_iscsi=missing driver.sandstone=missing driver.seagate=missing driver.storpool=missing driver.synology=missing driver.toyou_netstor=missing driver.toyou_netstor_tyds=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing driver.vmware=missing driver.win_iscsi=missing driver.win_smb=missing driver.yadro=missing driver.zadara=missing [operation.thin_provisioning] title=Thin Provisioning status=optional notes=If a volume driver supports thin provisioning it means that it will allow the scheduler to provision more storage space than physically exists on the backend. This may also be called 'oversubscription'. driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete driver.dell_emc_powerstore=complete driver.dell_emc_powerstore_nfs=complete driver.dell_emc_powervault=missing driver.dell_emc_sc=complete driver.dell_emc_unity=complete driver.dell_emc_vmax_af=complete driver.dell_emc_vmax_3=complete driver.dell_emc_vnx=complete driver.dell_emc_powerflex=complete driver.dell_emc_xtremio=complete driver.fungible=missing driver.fujitsu_eternus=complete driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=missing driver.hpe_nimble=complete driver.hpe_xp=complete driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=complete driver.huawei_f_v3=complete driver.huawei_v5=complete driver.huawei_f_v5=complete driver.huawei_18000=complete driver.huawei_dorado=complete driver.huawei_fusionstorage=missing driver.infinidat=complete driver.ibm_ds8k=missing driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=missing driver.infortrend=complete driver.inspur=missing driver.inspur_as13000=complete driver.kaminario=complete driver.kioxia_kumoscale=complete driver.lenovo=missing driver.lightbits_lightos=complete driver.linbit_linstor=missing driver.lvm=complete driver.macrosan=complete driver.nec=complete driver.nec_v=complete driver.netapp_ontap_nvme_tcp=complete driver.netapp_ontap_iscsi_fc=complete driver.netapp_ontap_nfs=complete driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=complete driver.opene_joviandss=complete driver.prophetstor=missing driver.pure=complete driver.qnap=missing driver.quobyte=missing driver.rbd=complete driver.rbd_iscsi=complete driver.sandstone=complete driver.seagate=missing driver.storpool=complete driver.synology=missing driver.toyou_netstor=complete driver.toyou_netstor_tyds=complete driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing driver.vmware=missing driver.win_iscsi=missing driver.win_smb=complete driver.yadro=complete driver.zadara=missing [operation.volume_migration_storage_assisted] title=Volume Migration (Storage Assisted) status=optional notes=Storage assisted volume migration is like host assisted volume migration except that a volume can be migrated without the assistance of the Cinder host. Vendor drivers that implement this can migrate volumes completely through the storage backend's functionality. driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete driver.dell_emc_powerstore=missing driver.dell_emc_powerstore_nfs=missing driver.dell_emc_powervault=missing driver.dell_emc_sc=missing driver.dell_emc_unity=complete driver.dell_emc_vmax_af=complete driver.dell_emc_vmax_3=complete driver.dell_emc_vnx=complete driver.dell_emc_powerflex=complete driver.dell_emc_xtremio=missing driver.fujitsu_eternus=missing driver.fungible=missing driver.hitachi_vsp=complete driver.hpe_3par=missing driver.hpe_msa=missing driver.hpe_nimble=missing driver.hpe_xp=missing driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=complete driver.huawei_f_v3=complete driver.huawei_v5=complete driver.huawei_f_v5=complete driver.huawei_18000=complete driver.huawei_dorado=complete driver.huawei_fusionstorage=missing driver.infinidat=missing driver.ibm_ds8k=missing driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=missing driver.infortrend=complete driver.inspur=missing driver.inspur_as13000=missing driver.kaminario=missing driver.kioxia_kumoscale=missing driver.lenovo=missing driver.lightbits_lightos=missing driver.linbit_linstor=missing driver.lvm=missing driver.macrosan=complete driver.nec=complete driver.nec_v=missing driver.netapp_ontap_nvme_tcp=missing driver.netapp_ontap_iscsi_fc=complete driver.netapp_ontap_nfs=complete driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.opene_joviandss=missing driver.prophetstor=missing driver.pure=missing driver.qnap=missing driver.quobyte=missing driver.rbd=missing driver.rbd_iscsi=missing driver.sandstone=missing driver.seagate=missing driver.storpool=complete driver.synology=missing driver.toyou_netstor=complete driver.toyou_netstor_tyds=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing driver.vmware=missing driver.win_iscsi=missing driver.win_smb=missing driver.yadro=missing driver.zadara=missing [operation.multi-attach] title=Multi-Attach Support status=optional notes=Vendor drivers that report multi-attach support are able to make one volume available to multiple instances at once. It is important to note that a clustered file system that supports multi-attach functionality is required to use multi- attach functionality otherwise data corruption may occur. driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete driver.dell_emc_powerstore=complete driver.dell_emc_powerstore_nfs=complete driver.dell_emc_powervault=complete driver.dell_emc_sc=complete driver.dell_emc_unity=complete driver.dell_emc_vmax_af=complete driver.dell_emc_vmax_3=complete driver.dell_emc_vnx=missing driver.dell_emc_powerflex=complete driver.dell_emc_xtremio=complete driver.fujitsu_eternus=missing driver.fungible=missing driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=complete driver.hpe_nimble=complete driver.hpe_xp=complete driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=missing driver.huawei_f_v3=missing driver.huawei_v5=missing driver.huawei_f_v5=missing driver.huawei_18000=missing driver.huawei_dorado=missing driver.huawei_fusionstorage=missing driver.infinidat=complete driver.ibm_ds8k=complete driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=complete driver.infortrend=complete driver.inspur=missing driver.inspur_as13000=complete driver.kaminario=missing driver.kioxia_kumoscale=missing driver.lenovo=complete driver.lightbits_lightos=complete driver.linbit_linstor=missing driver.lvm=complete driver.macrosan=missing driver.nec=complete driver.nec_v=complete driver.netapp_ontap_nvme_tcp=complete driver.netapp_ontap_iscsi_fc=complete driver.netapp_ontap_nfs=complete driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.opene_joviandss=complete driver.prophetstor=missing driver.pure=complete driver.qnap=missing driver.quobyte=missing driver.rbd=complete driver.rbd_iscsi=complete driver.sandstone=complete driver.seagate=complete driver.storpool=complete driver.synology=missing driver.toyou_netstor=complete driver.toyou_netstor_tyds=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing driver.vmware=missing driver.win_iscsi=missing driver.win_smb=missing driver.yadro=complete driver.zadara=complete [operation.revert_to_snapshot_assisted] title=Revert to Snapshot status=optional notes=Vendor drivers that implement the driver assisted function to revert a volume to the last snapshot taken. driver.datacore=complete driver.datera=missing driver.dell_emc_powermax=complete driver.dell_emc_powerstore=complete driver.dell_emc_powerstore_nfs=missing driver.dell_emc_powervault=missing driver.dell_emc_sc=missing driver.dell_emc_unity=complete driver.dell_emc_vmax_af=complete driver.dell_emc_vmax_3=complete driver.dell_emc_vnx=complete driver.dell_emc_powerflex=complete driver.dell_emc_xtremio=missing driver.fujitsu_eternus=missing driver.fungible=missing driver.hitachi_vsp=complete driver.hpe_3par=complete driver.hpe_msa=missing driver.hpe_nimble=complete driver.hpe_xp=complete driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=missing driver.huawei_f_v3=missing driver.huawei_f_v5=missing driver.huawei_v5=missing driver.huawei_18000=missing driver.huawei_dorado=missing driver.huawei_fusionstorage=missing driver.infinidat=complete driver.ibm_ds8k=complete driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=complete driver.ibm_xiv=missing driver.infortrend=missing driver.inspur=missing driver.inspur_as13000=missing driver.kaminario=missing driver.kioxia_kumoscale=missing driver.lenovo=missing driver.lightbits_lightos=missing driver.linbit_linstor=missing driver.lvm=complete driver.macrosan=missing driver.nec=complete driver.nec_v=complete driver.netapp_ontap_nvme_tcp=complete driver.netapp_ontap_iscsi_fc=complete driver.netapp_ontap_nfs=complete driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.opene_joviandss=missing driver.prophetstor=missing driver.pure=complete driver.qnap=missing driver.quobyte=missing driver.rbd=complete driver.rbd_iscsi=complete driver.sandstone=complete driver.seagate=missing driver.storpool=complete driver.synology=missing driver.toyou_netstor=complete driver.toyou_netstor_tyds=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing driver.vmware=complete driver.win_iscsi=missing driver.win_smb=missing driver.yadro=complete driver.zadara=missing [operation.active_active_ha] title=Active/Active High Availability Support status=optional notes=Vendor drivers that support running in an active/active high availability mode. Indicating support for this means that the driver doesn't contain things, such as local locks, that may impact an active/active configuration and that the driver has been tested to function properly in such a configuration. driver.datacore=missing driver.datera=missing driver.dell_emc_powermax=complete driver.dell_emc_powerstore=complete driver.dell_emc_powerstore_nfs=missing driver.dell_emc_powervault=missing driver.dell_emc_sc=missing driver.dell_emc_unity=missing driver.dell_emc_vmax_af=missing driver.dell_emc_vmax_3=missing driver.dell_emc_vnx=missing driver.dell_emc_powerflex=complete driver.dell_emc_xtremio=missing driver.fujitsu_eternus=missing driver.fungible=missing driver.hitachi_vsp=missing driver.hpe_3par=missing driver.hpe_msa=missing driver.hpe_nimble=missing driver.hpe_xp=missing driver.huawei_t_v1=missing driver.huawei_t_v2=missing driver.huawei_v3=missing driver.huawei_f_v3=missing driver.huawei_f_v5=missing driver.huawei_v5=missing driver.huawei_18000=missing driver.huawei_dorado=missing driver.huawei_fusionstorage=missing driver.infinidat=missing driver.ibm_ds8k=missing driver.ibm_flashsystem=missing driver.ibm_gpfs=missing driver.ibm_storwize=missing driver.ibm_xiv=missing driver.infortrend=missing driver.inspur=missing driver.inspur_as13000=missing driver.kaminario=missing driver.kioxia_kumoscale=missing driver.lenovo=missing driver.lightbits_lightos=complete driver.linbit_linstor=missing driver.lvm=missing driver.macrosan=complete driver.nec=missing driver.nec_v=missing driver.netapp_ontap_nvme_tcp=missing driver.netapp_ontap_iscsi_fc=complete driver.netapp_ontap_nfs=complete driver.netapp_solidfire=complete driver.nexenta=missing driver.nfs=missing driver.opene_joviandss=missing driver.prophetstor=missing driver.pure=complete driver.qnap=missing driver.quobyte=missing driver.rbd=complete driver.rbd_iscsi=complete driver.sandstone=complete driver.seagate=missing driver.storpool=missing driver.synology=missing driver.toyou_netstor=missing driver.toyou_netstor_tyds=missing driver.vrtsaccess=missing driver.vrtscnfs=missing driver.vzstorage=missing driver.vmware=missing driver.win_iscsi=missing driver.win_smb=missing driver.yadro=complete driver.zadara=missing ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/reference/support-matrix.rst0000664000175000017500000000535200000000000022647 0ustar00zuulzuul00000000000000.. Copyright (C) 2018 Lenovo, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ Cinder Driver Support Matrix ============================ The following support matrix reflects the drivers that are currently available or are available in `Cinder's driver tree `_ at the time of release. .. note:: This matrix replaces the old wiki based version of the Cinder Support Matrix as there was no way to ensure the wiki version was properly maintained. The old matrix will be left for reference but this matrix should be treated as the correct state of Cinder. .. _required_driver_functions: Required Driver Functions ~~~~~~~~~~~~~~~~~~~~~~~~~ There are a number of functions that are required to be accepted as a Cinder driver. Rather than list all the required functionality in the matrix we include the list of required functions here for reference. * Create Volume * Delete Volume * Attach Volume * Detach Volume * Extend Volume * Create Snapshot * Delete Snapshot * Create Volume from Snapshot * Create Volume from Volume (clone) * Create Image from Volume * Volume Migration (host assisted) .. note:: Since the above functions are required their support is assumed and the matrix only includes support for optional functionality. .. note:: This matrix is not dynamically generated. It is maintained by the Cinder team and Vendor driver maintainers. While every effort is made to ensure the accuracy of the data in this matrix, discrepancies with actual functionality are possible. Please refer to your vendor's support documentation for additional information. .. _driver_support_matrix: .. support_matrix:: support-matrix.ini Driver Removal History ~~~~~~~~~~~~~~~~~~~~~~ The section will be used to track driver removal starting from the Rocky release. * Rocky * CoprHD Storage Driver (FC, iSCSI, ScaleIO) * Stein * DRBDManage Driver * HGST Flash Storage Suite Driver (vgc) * ITRI DISCO Driver * NetApp E-Series Driver * Train * Tintri Storage Driver * Veritas HyperScale Storage Driver * Nexenta Edge Storage Driver * Ussuri * HPE Lefthand Driver (iSCSI) * Sheepdog Driver ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.483122 cinder-27.0.0/doc/source/user/0000775000175000017500000000000000000000000016112 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/source/user/README.rst0000664000175000017500000000114000000000000017575 0ustar00zuulzuul00000000000000======================================= Cinder User Documentation (source/user) ======================================= Introduction: ------------- This directory is intended to hold any documentation that helps Cinder end-users. This can include concept guides, tutorials, step-by-step guides for using the CLI, etc. Note that documentation this is focused on administrative actions should go into 'doc/source/admin'. The full spec for organization of documentation may be seen in the `OS Manuals Migration Spec `. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.483122 cinder-27.0.0/doc/test/0000775000175000017500000000000000000000000014613 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/doc/test/redirect-tests.txt0000664000175000017500000000603200000000000020316 0ustar00zuulzuul00000000000000/cinder/latest/man/cinder-manage.html 301 /cinder/latest/cli/cinder-manage.html /cinder/latest/upgrade.html 301 /cinder/latest/admin/upgrades.html /cinder/latest/admin/blockstorage-accelerate-image-compression.html 301 /cinder/latest/admin/accelerate-image-compression.html /cinder/latest/admin/blockstorage-api-throughput.html 301 /cinder/latest/admin/api-throughput.html /cinder/latest/admin/blockstorage-availability-zone-type.html 301 /cinder/latest/admin/availability-zone-type.html /cinder/latest/admin/blockstorage-backup-disks.html 301 /cinder/latest/admin/backup-disks.html /cinder/latest/admin/blockstorage-basic-volume-qos.html 301 /cinder/latest/admin/basic-volume-qos.html /cinder/latest/admin/blockstorage-boot-from-volume.html 301 /cinder/latest/admin/boot-from-volume.html /cinder/latest/admin/blockstorage-capacity-based-qos.html 301 /cinder/latest/admin/capacity-based-qos.html /cinder/latest/admin/blockstorage-consistency-groups.html 301 /cinder/latest/admin/consistency-groups.html /cinder/latest/admin/blockstorage-driver-filter-weighing.html 301 /cinder/latest/admin/driver-filter-weighing.html /cinder/latest/admin/blockstorage-get-capabilities.html 301 /cinder/latest/admin/get-capabilities.html /cinder/latest/admin/blockstorage-groups.html 301 /cinder/latest/admin/groups.html /cinder/latest/admin/blockstorage-image-volume-cache.html 301 /cinder/latest/admin/image-volume-cache.html /cinder/latest/admin/blockstorage-lio-iscsi-support.html 301 /cinder/latest/admin/lio-iscsi-support.html /cinder/latest/admin/blockstorage-manage-volumes.html 301 /cinder/latest/admin/manage-volumes.html /cinder/latest/admin/blockstorage-multi-backend.html 301 /cinder/latest/admin/multi-backend.html /cinder/latest/admin/blockstorage-nfs-backend.html 301 /cinder/latest/admin/nfs-backend.html /cinder/latest/admin/blockstorage-over-subscription.html 301 /cinder/latest/admin/over-subscription.html /cinder/latest/admin/blockstorage-ratelimit-volume-copy-bandwidth.html 301 /cinder/latest/admin/ratelimit-volume-copy-bandwidth.html /cinder/latest/admin/blockstorage-security.html 301 /cinder/latest/admin/security.html /cinder/latest/admin/blockstorage-troubleshoot.html 301 /cinder/latest/admin/troubleshoot.html /cinder/latest/admin/blockstorage-user-visible-extra-specs.html 301 /cinder/latest/admin/user-visible-extra-specs.html /cinder/latest/admin/blockstorage-volume-backed-image.html 301 /cinder/latest/admin/volume-backed-image.html /cinder/latest/admin/blockstorage-volume-backups-export-import.html 301 /cinder/latest/admin/volume-backups-export-import.html /cinder/latest/admin/blockstorage-volume-backups.html 301 /cinder/latest/admin/volume-backups.html /cinder/latest/admin/blockstorage-volume-migration.html 301 /cinder/latest/admin/volume-migration.html /cinder/latest/admin/blockstorage-volume-multiattach.html 301 /cinder/latest/admin/volume-multiattach.html /cinder/latest/admin/blockstorage-volume-number-weigher.html 301 /cinder/latest/admin/volume-number-weigher.html /cinder/latest/admin/generalized_filters.html 301 /cinder/latest/admin/generalized-filters.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/driver-requirements.txt0000664000175000017500000000176000000000000017650 0ustar00zuulzuul00000000000000# Document dependencies that are only used if using # certain drivers. This file is not managed by # requirements tools. # check [extras] section of setup.cfg for versions. # HPE 3PAR python-3parclient>=4.2.10 # Apache-2.0 # Kaminario krest>=1.3.0 # Apache-2.0 # Pure Storage distro # Apache-2.0 py-pure-client>=1.47.0 # BSD # Dell EMC VMAX, IBM DS8K pyOpenSSL>=1.0.0 # Apache-2.0 # HPE Lefthand python-lefthandclient>=2.0.0 # Apache-2.0 # Fujitsu Eternus DX pywbem>=0.7.0 # LGPLv2.1+ # IBM XIV pyxcli>=1.1.5 # Apache-2.0 # RBD rados # LGPLv2.1 rbd # LGPLv2.1 # RBD-iSCSI rbd-iscsi-client # Apache-2.0 # Dell EMC VNX and Unity storops>=1.2.3 # Apache-2.0 # INFINIDAT infinisdk # BSD-3 capacity # BSD infi.dtypes.wwn # PSF infi.dtypes.iqn # PSF # Storpool storpool>=7.1.0 # Apache-2.0 storpool.spopenstack>=2.2.1 # Apache-2.0 # Datera dfs_sdk>=1.2.25 # Apache-2.0 # DataCore SANsymphony websocket-client>=1.3.2 # LGPLv2+ # LINSTOR python-linstor>=1.7.0 # LGPLv3 # Quobyte psutil>=5.7.2 # BSD ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8911164 cinder-27.0.0/etc/0000775000175000017500000000000000000000000013642 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.483122 cinder-27.0.0/etc/cinder/0000775000175000017500000000000000000000000015106 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/etc/cinder/README-cinder.conf.sample0000664000175000017500000000024600000000000021436 0ustar00zuulzuul00000000000000The cinder.conf sample file is no longer generated and maintained in Trunk. To generate your own version of cinder.conf, use the following command: tox -egenconfig ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/etc/cinder/README-policy.generate.md0000664000175000017500000000065300000000000021457 0ustar00zuulzuul00000000000000# Generate policy file To generate the sample policy yaml file, run the following command from the top level of the cinder directory: tox -egenpolicy # Use generated policy file Cinder recognizes ``/etc/cinder/policy.yaml`` as the default policy file. To specify your own policy file in order to overwrite the default policy value, add this in Cinder config file: [oslo_policy] policy_file = path/to/policy/file ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/etc/cinder/api-httpd.conf0000664000175000017500000000107400000000000017651 0ustar00zuulzuul00000000000000Listen 8776 LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" cinder_combined WSGIDaemonProcess osapi_volume processes=2 threads=1 user=cinder display-name=%{GROUP} WSGIProcessGroup osapi_volume WSGIScriptAlias / /var/www/cgi-bin/cinder/osapi_volume WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/apache2/cinder_error.log CustomLog /var/log/apache2/cinder.log cinder_combined ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/etc/cinder/api-paste.ini0000664000175000017500000000425200000000000017475 0ustar00zuulzuul00000000000000############# # OpenStack # ############# [composite:osapi_volume] use = call:cinder.api:root_app_factory /: apiversions /healthcheck: healthcheck /v3: openstack_volume_api_v3 [composite:openstack_volume_api_v3] use = call:cinder.api.middleware.auth:pipeline_factory noauth = request_id cors http_proxy_to_wsgi faultwrap sizelimit osprofiler noauth apiv3 noauth_include_project_id = request_id cors http_proxy_to_wsgi faultwrap sizelimit osprofiler noauth_include_project_id apiv3 keystone = request_id cors http_proxy_to_wsgi faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 keystone_nolimit = request_id cors http_proxy_to_wsgi faultwrap sizelimit osprofiler authtoken keystonecontext apiv3 [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = cinder [filter:faultwrap] paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory [filter:osprofiler] paste.filter_factory = osprofiler.web:WsgiMiddleware.factory [filter:noauth] paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory [filter:noauth_include_project_id] paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddlewareIncludeProjectID.factory [filter:sizelimit] paste.filter_factory = oslo_middleware.sizelimit:RequestBodySizeLimiter.factory [app:apiv3] paste.app_factory = cinder.api.v3.router:APIRouter.factory [pipeline:apiversions] pipeline = request_id cors http_proxy_to_wsgi faultwrap osvolumeversionapp [app:osvolumeversionapp] paste.app_factory = cinder.api.versions:Versions.factory [pipeline:healthcheck] pipeline = request_id healthcheckapp [app:healthcheckapp] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /etc/cinder/healthcheck_disable ########## # Shared # ########## [filter:keystonecontext] paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:request_id] paste.filter_factory = cinder.api.middleware.request_id:RequestId.factory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/etc/cinder/logging_sample.conf0000664000175000017500000000333600000000000020751 0ustar00zuulzuul00000000000000[loggers] keys = root, cinder, taskflow, cinder_flow_utils [handlers] keys = stderr, stdout, watchedfile, syslog, tasks, null [formatters] keys = context, default [logger_root] level = WARNING handlers = null [logger_cinder] level = INFO handlers = stderr qualname = cinder # Both of these are used for tracking what cinder and taskflow is doing with # regard to flows and tasks (and the activity there-in). [logger_cinder_flow_utils] level = INFO handlers = tasks,stderr qualname = cinder.flow_utils [logger_taskflow] level = INFO handlers = tasks qualname = taskflow [logger_amqplib] level = WARNING handlers = stderr qualname = amqplib [logger_sqlalchemy] level = WARNING handlers = stderr qualname = sqlalchemy # "level = INFO" logs SQL queries. # "level = DEBUG" logs SQL queries and results. # "level = WARNING" logs neither. (Recommended for production systems.) [logger_boto] level = WARNING handlers = stderr qualname = boto [logger_suds] level = INFO handlers = stderr qualname = suds [logger_eventletwsgi] level = WARNING handlers = stderr qualname = eventlet.wsgi.server [handler_stderr] class = StreamHandler args = (sys.stderr,) formatter = context [handler_stdout] class = StreamHandler args = (sys.stdout,) formatter = context [handler_watchedfile] class = handlers.WatchedFileHandler args = ('cinder.log',) formatter = context [handler_tasks] class = handlers.WatchedFileHandler args = ('tasks.log',) formatter = context [handler_syslog] class = handlers.SysLogHandler args = ('/dev/log', handlers.SysLogHandler.LOG_USER) formatter = context [handler_null] class = logging.NullHandler formatter = default args = () [formatter_context] class = oslo_log.formatters.ContextFormatter [formatter_default] format = %(message)s ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/etc/cinder/resource_filters.json0000664000175000017500000000132500000000000021361 0ustar00zuulzuul00000000000000{ "volume": ["name", "status", "metadata", "bootable", "migration_status", "availability_zone", "group_id", "size", "created_at", "updated_at", "consumes_quota"], "backup": ["name", "status", "volume_id"], "snapshot": ["name", "status", "volume_id", "metadata", "availability_zone", "consumes_quota"], "group": ["name"], "group_snapshot": ["name", "status", "group_id"], "attachment": ["volume_id", "status", "instance_id", "attach_status"], "message": ["resource_uuid", "resource_type", "event_id", "request_id", "message_level"], "pool": ["name", "volume_type"], "volume_type": ["is_public", "extra_specs"] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/etc/cinder/rootwrap.conf0000664000175000017500000000221300000000000017630 0ustar00zuulzuul00000000000000# Configuration for cinder-rootwrap # This file should be owned by (and only-writeable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writeable by root ! filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/usr/lpp/mmfs/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, local0, local1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR # Rootwrap daemon exits after this seconds of inactivity daemon_timeout=600 # Rootwrap daemon limits itself to that many file descriptors (Linux only) rlimit_nofile=1024 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.483122 cinder-27.0.0/etc/cinder/rootwrap.d/0000775000175000017500000000000000000000000017205 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/etc/cinder/rootwrap.d/volume.filters0000664000175000017500000001412300000000000022107 0ustar00zuulzuul00000000000000# cinder-rootwrap command filters for volume nodes # This file should be owned by (and only-writeable by) the root user [Filters] # cinder/volume/targets/iscsi.py: target_helper '--op' ... iscsictl: CommandFilter, iscsictl, root cinder-rtstool: CommandFilter, cinder-rtstool, root # LVM related show commands pvs: EnvFilter, env, root, LC_ALL=C, pvs vgs: EnvFilter, env, root, LC_ALL=C, vgs lvs: EnvFilter, env, root, LC_ALL=C, lvs lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay # -LVM related show commands with suppress fd warnings pvs2: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs vgs2: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs lvs2: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs lvdisplay2: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay # -LVM related show commands conf var pvs3: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, pvs vgs3: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, vgs lvs3: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, lvs lvdisplay3: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, lvdisplay # -LVM conf var with suppress fd_warnings pvs4: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, pvs vgs4: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, vgs lvs4: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, lvs lvdisplay4: EnvFilter, env, root, LC_ALL=C, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay # os-brick library commands # os_brick.privileged.run_as_root oslo.privsep context # This line ties the superuser privs with the config files, context name, # and (implicitly) the actual python code invoked. privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* # Privsep calls within cinder iteself privsep-rootwrap-sys_admin: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, cinder.privsep.sys_admin_pctxt, --privsep_sock_path, /tmp/.* # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', sizestr, '-n', volume_name,.. # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ... lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate lvcreate_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvcreate lvcreate_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, LC_ALL=C, lvcreate # cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,... dd: CommandFilter, dd, root # cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ... lvremove: CommandFilter, lvremove, root # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ... # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ... lvextend: EnvFilter, env, root, LC_ALL=C, lvextend lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend lvextend_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend lvextend_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend # cinder/brick/local_dev/lvm.py: 'lvchange -a y -K ' lvchange: CommandFilter, lvchange, root # cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',... # cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ... iscsiadm: CommandFilter, iscsiadm, root # cinder/volume/utils.py: utils.temporary_chown(path, 0) chown: CommandFilter, chown, root # cinder/volume/utils.py: copy_volume(..., ionice='...') ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7] ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3] # cinder/volume/utils.py: setup_blkio_cgroup() cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+ # cinder/image/image_utils.py qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img qemu-img_convert: CommandFilter, qemu-img, root qzip: CommandFilter, qzip, root gzip: CommandFilter, gzip, root # cinder/volume/nfs.py stat: CommandFilter, stat, root mount: CommandFilter, mount, root df: CommandFilter, df, root du: CommandFilter, du, root truncate: CommandFilter, truncate, root chmod: CommandFilter, chmod, root rm: CommandFilter, rm, root # cinder/volume/drivers/netapp/dataontap/nfs_base.py: netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+ # cinder/backup/drivers/nfs.py # cinder/backup/drivers/glusterfs.py chgrp: CommandFilter, chgrp, root # cinder/brick/initiator/connector.py: ls: CommandFilter, ls, root multipath: CommandFilter, multipath, root multipathd: CommandFilter, multipathd, root # cinder/volume/drivers/ibm/gpfs.py # cinder/volume/drivers/netapp/dataontap/nfs_base.py mv: CommandFilter, mv, root # cinder/volume/drivers/ibm/gpfs.py cp: CommandFilter, cp, root mmgetstate: CommandFilter, mmgetstate, root mmclone: CommandFilter, mmclone, root mmlsattr: CommandFilter, mmlsattr, root mmchattr: CommandFilter, mmchattr, root mmlsconfig: CommandFilter, mmlsconfig, root mmlsfs: CommandFilter, mmlsfs, root mmlspool: CommandFilter, mmlspool, root mkfs: CommandFilter, mkfs, root mmcrfileset: CommandFilter, mmcrfileset, root mmlsfileset: CommandFilter, mmlsfileset, root mmlinkfileset: CommandFilter, mmlinkfileset, root mmunlinkfileset: CommandFilter, mmunlinkfileset, root mmdelfileset: CommandFilter, mmdelfileset, root # cinder/volume/drivers/ibm/gpfs.py # cinder/volume/drivers/ibm/ibmnas.py find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -ignore_readdir_race, -inum, \d+, -print0, -quit # cinder/volume/drivers/vzstorage.py pstorage-mount: CommandFilter, pstorage-mount, root pstorage: CommandFilter, pstorage, root ploop: CommandFilter, ploop, root # cinder/volume/drivers/quobyte.py mount.quobyte: CommandFilter, mount.quobyte, root umount.quobyte: CommandFilter, umount.quobyte, root # cinder/volume/drivers/dell_emc/powerstore/nfs.py dellfcopy: CommandFilter, dellfcopy, root cryptsetup: CommandFilter, cryptsetup, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/mypy-files.txt0000664000175000017500000000271000000000000015726 0ustar00zuulzuul00000000000000cinder/api/api_utils.py cinder/api/common.py cinder/api/v3/types.py cinder/backup/api.py cinder/backup/manager.py cinder/backup/drivers/ceph.py cinder/backup/rpcapi.py cinder/common/constants.py cinder/context.py cinder/coordination.py cinder/cmd/api.py cinder/cmd/backup.py cinder/cmd/manage.py cinder/cmd/scheduler.py cinder/cmd/status.py cinder/cmd/volume.py cinder/i18n.py cinder/image/cache.py cinder/image/glance.py cinder/image/image_utils.py cinder/keymgr/transfer.py cinder/exception.py cinder/flow_utils.py cinder/manager.py cinder/objects/backup.py cinder/policy.py cinder/scheduler/base_handler.py cinder/scheduler/base_weight.py cinder/scheduler/evaluator/evaluator.py cinder/scheduler/filter_scheduler.py cinder/scheduler/flows/create_volume.py cinder/scheduler/host_manager.py cinder/scheduler/manager.py cinder/scheduler/rpcapi.py cinder/scheduler/scheduler_options.py cinder/scheduler/weights/__init__.py cinder/scheduler/weights/capacity.py cinder/scheduler/weights/chance.py cinder/scheduler/weights/goodness.py cinder/scheduler/weights/stochastic.py cinder/scheduler/weights/volume_number.py cinder/service.py cinder/utils.py cinder/volume/__init__.py cinder/volume/api.py cinder/volume/drivers/nfs.py cinder/volume/drivers/rbd.py cinder/volume/drivers/remotefs.py cinder/volume/flows/api/create_volume.py cinder/volume/flows/manager/create_volume.py cinder/volume/manager.py cinder/volume/rpcapi.py cinder/volume/volume_types.py cinder/volume/volume_utils.py ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.483122 cinder-27.0.0/playbooks/0000775000175000017500000000000000000000000015072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/playbooks/cinder-multibackend-matrix.yaml0000664000175000017500000000217500000000000023171 0ustar00zuulzuul00000000000000# Playbook originally inspired by # https://opendev.org/openstack/tempest/src/tag/23.0.0/playbooks/devstack-tempest.yaml # Changes that run through devstack-tempest are likely to have an impact on # the devstack part of the job, so we keep devstack in the main play to # avoid zuul retrying on legitimate failures. - hosts: all roles: - orchestrate-devstack # We run tests only on one node, regardless how many nodes are in the system - hosts: tempest vars: migration_backends: - lvm - ceph - nfs migration_test_results: [] migration_tempest_conf: "/opt/stack/tempest/etc/tempest.conf" tasks: - include_role: name: setup-tempest-run-dir - include_role: name: setup-tempest-data-dir - include_role: name: acl-devstack-files - include_role: name: configure-run-migration-tests vars: migration_source_backend: "{{ item[0] }}" migration_destination_backend: "{{ item[1] }}" loop: "{{ migration_backends|product(migration_backends)|list }}" when: item[0] != item[1] - include_role: name: save-cinder-migration-results ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/playbooks/enable-fips.yaml0000664000175000017500000000005000000000000020136 0ustar00zuulzuul00000000000000- hosts: all roles: - enable-fips ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/playbooks/post-cinderlib.yaml0000664000175000017500000000030000000000000020665 0ustar00zuulzuul00000000000000- hosts: tempest vars: tox_envlist: functional zuul_work_dir: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}" roles: - fetch-tox-output ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/playbooks/tempest-and-cinderlib-run.yaml0000664000175000017500000000266700000000000022745 0ustar00zuulzuul00000000000000# Playbook imported from https://opendev.org/openstack/tempest/src/tag/23.0.0/playbooks/devstack-tempest.yaml # Changes that run through devstack-tempest are likely to have an impact on # the devstack part of the job, so we keep devstack in the main play to # avoid zuul retrying on legitimate failures. - hosts: all roles: - orchestrate-devstack # We run tests only on one node, regardless how many nodes are in the system - hosts: tempest environment: # This environment variable is used by the optional tempest-gabbi # job provided by the gabbi-tempest plugin. It can be safely ignored # if that plugin is not being used. GABBI_TEMPEST_PATH: "{{ gabbi_tempest_path | default('') }}" roles: - setup-tempest-run-dir - setup-tempest-data-dir - acl-devstack-files - role: run-tempest # ignore the errors here (but consider them later), so that run-cinderlib-tests is always executed ignore_errors: yes - role: change-devstack-data-owner devstack_data_subdir_changed: cinder devstack_data_subdir_owner: zuul - role: run-cinderlib-tests tox_install_siblings: false cinderlib_base_dir: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}" post_tasks: - name: Fail if the first tempest run did not work fail: msg: "tempest run returned with an error" when: tempest_run_result is defined and tempest_run_result.rc != 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/pyproject.toml0000664000175000017500000000013300000000000016000 0ustar00zuulzuul00000000000000[build-system] requires = ["pbr>=6.0.0", "setuptools>=64.0.0"] build-backend = "pbr.build" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.483122 cinder-27.0.0/rally-jobs/0000775000175000017500000000000000000000000015145 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/rally-jobs/README.rst0000664000175000017500000000205000000000000016631 0ustar00zuulzuul00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * cinder.yaml is a task that will be run in gates against OpenStack deployed by DevStack. * cinder-fake.yaml is a task that will be run in gates against OpenStack deployed by DevStack with fake cinder driver. * plugins - directory where you can add rally plugins. Almost everything in Rally is plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* Useful links ------------ * More about Rally: https://rally.readthedocs.io/en/latest/ * Rally release notes: https://rally.readthedocs.io/en/latest/project_info/release_notes/archive.html * How to add rally-gates: https://rally.readthedocs.io/en/latest/quick_start/gates.html * About plugins: https://rally.readthedocs.io/en/latest/plugins/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/rally-jobs/cinder.yaml0000664000175000017500000002537600000000000017312 0ustar00zuulzuul00000000000000{% set image_name = "^(cirros.*-disk|TestVM)$" %} --- version: 2 title: Rally task for cinder-rally-task check job (non-voting) subtasks: - title: Validate cinder client scenario: Authenticate.validate_cinder: repetitions: 2 runner: constant: times: 10 concurrency: 5 contexts: users: tenants: 3 users_per_tenant: 5 - title: Update and delete cinder Quotas scenario: Quotas.cinder_update_and_delete: max_quota: 1024 runner: constant: times: 4 concurrency: 1 contexts: users: tenants: 3 users_per_tenant: 2 api_versions: cinder: version: 3 service_name: cinder - title: Update cinder Quotas scenario: Quotas.cinder_update: max_quota: 1024 runner: constant: times: 10 concurrency: 2 contexts: users: tenants: 3 users_per_tenant: 2 api_versions: cinder: version: 3 service_name: cinder - title: Create and Delete Volume workloads: - scenario: CinderVolumes.create_and_delete_volume: size: 1 runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 3 service_name: cinder - scenario: CinderVolumes.create_and_delete_volume: size: 1 image: name: {{image_name}} runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 3 service_name: cinder - scenario: CinderVolumes.create_and_delete_volume: size: min: 1 max: 3 runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 3 service_name: cinder - title: Create and List Volume workloads: - scenario: CinderVolumes.create_and_list_volume: size: 1 detailed: True runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 3 service_name: cinder - scenario: CinderVolumes.create_and_list_volume: size: min: 1 max: 3 detailed: True runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 3 service_name: cinder - scenario: CinderVolumes.create_and_list_volume: size: 1 detailed: True image: name: {{image_name}} runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 3 service_name: cinder - title: List volumes scenario: CinderVolumes.list_volumes: detailed: True runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 1 volumes: size: 1 volumes_per_tenant: 2 api_versions: cinder: version: 3 service_name: cinder - title: Create volume workloads: - scenario: CinderVolumes.create_volume: size: 1 runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 3 service_name: cinder - scenario: CinderVolumes.create_volume: size: min: 1 max: 3 runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 3 service_name: cinder - scenario: CinderVolumes.create_volume: size: 1 image: name: {{image_name}} runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 3 service_name: cinder - title: Create and Extend volume workloads: - scenario: CinderVolumes.create_and_extend_volume: size: 1 new_size: 2 runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 3 service_name: cinder - scenario: CinderVolumes.create_and_extend_volume: size: min: 1 max: 2 new_size: min: 3 max: 4 runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 3 service_name: cinder - title: Create and attach volume scenario: CinderVolumes.create_and_attach_volume: size: 1 image: name: {{image_name}} flavor: name: "m1.tiny" runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 api_versions: cinder: version: 3 service_name: cinder - title: Create volume and snapshot attach and detach volume and delete them workloads: - scenario: CinderVolumes.create_snapshot_and_attach_volume: image: name: {{image_name}} flavor: name: "m1.tiny" size: min: 1 max: 1 volume_type: "test" runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 volume_types: - "test" api_versions: cinder: version: 3 service_name: cinder - scenario: CinderVolumes.create_snapshot_and_attach_volume: image: name: {{image_name}} flavor: name: "m1.tiny" volume_type: "test" size: min: 1 max: 1 runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 volume_types: - "test" api_versions: cinder: version: 3 service_name: cinder - title: Create volume from volume and then delete it workloads: - scenario: CinderVolumes.create_from_volume_and_delete_volume: size: 1 runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 1 users_per_tenant: 1 volumes: size: 1 api_versions: cinder: version: 3 service_name: cinder - scenario: CinderVolumes.create_from_volume_and_delete_volume: size: min: 1 max: 2 runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 1 users_per_tenant: 1 volumes: size: 1 api_versions: cinder: version: 3 service_name: cinder - title: Create and delete snapshot scenario: CinderVolumes.create_and_delete_snapshot: force: false runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 2 users_per_tenant: 2 volumes: size: 1 api_versions: cinder: version: 3 service_name: cinder - title: Create and list snapshots scenario: CinderVolumes.create_and_list_snapshots: force: False detailed: True runner: constant: times: 2 concurrency: 2 contexts: users: tenants: 1 users_per_tenant: 1 volumes: size: 1 api_versions: cinder: version: 3 service_name: cinder - title: Create and upload a volume to image scenario: CinderVolumes.create_and_upload_volume_to_image: size: 1 runner: constant: times: 1 concurrency: 1 contexts: users: tenants: 1 users_per_tenant: 1 api_versions: cinder: version: 3 service_name: cinder ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.487122 cinder-27.0.0/rally-jobs/extra/0000775000175000017500000000000000000000000016270 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/rally-jobs/extra/README.rst0000664000175000017500000000025400000000000017760 0ustar00zuulzuul00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.487122 cinder-27.0.0/rally-jobs/plugins/0000775000175000017500000000000000000000000016626 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/rally-jobs/plugins/README.rst0000664000175000017500000000061200000000000020314 0ustar00zuulzuul00000000000000Rally plugins ============= All ``*.py`` modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/rally-jobs/plugins/__init__.py0000664000175000017500000000000000000000000020725 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1759315577.487122 cinder-27.0.0/releasenotes/0000775000175000017500000000000000000000000015560 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/README.rst0000664000175000017500000000070700000000000017253 0ustar00zuulzuul00000000000000============= Release notes ============= The release notes for a patch should be included in the patch. The intended audience for release notes include deployers, administrators and end-users. A release note is required if the patch has upgrade or API impact. It is also required if the patch adds a feature or fixes a long-standing or security bug. Please see https://docs.openstack.org/cinder/latest/contributor/releasenotes.html for more details. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8151252 cinder-27.0.0/releasenotes/notes/0000775000175000017500000000000000000000000016710 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/1220b8a67602b8e7-update_rootwrap_volume_filters.yaml0000664000175000017500000000036200000000000030203 0ustar00zuulzuul00000000000000--- upgrade: - It is required to copy new rootwrap.d/volume.filters file into /etc/cinder/rootwrap.d directory. fixes: - Fixed bug causing snapshot creation to fail on systems with LC_NUMERIC set to locale using ',' as decimal separator. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/1884495-173f375dc5274fe6.yaml0000664000175000017500000000007400000000000022612 0ustar00zuulzuul00000000000000--- features: - | Nimble driver now supports discard. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/1885946-17bc5c3dc0535044.yaml0000664000175000017500000000030600000000000022564 0ustar00zuulzuul00000000000000--- features: - | Added Multi-attach feature in Nimble driver. upgrade: - | Nimble specific extra-spec ``nimble:multi-initiator`` is removed. Common extra-spec multiattach is added. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/1899512-7a872a2c19e53536.yaml0000664000175000017500000000011300000000000022512 0ustar00zuulzuul00000000000000--- features: - | Added revert to snapshot feature in Nimble driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/1918099-18b26dd9107f19c0.yaml0000664000175000017500000000023500000000000022574 0ustar00zuulzuul00000000000000--- fixes: - | Nimble driver `bug #1918099 `_: Fix revert to snapshot not working as expected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/3par-create-cg-from-source-cg-5634dcf9feb813f6.yaml0000664000175000017500000000017500000000000027434 0ustar00zuulzuul00000000000000--- features: - Added support for creating a consistency group from a source consistency group in the HPE 3PAR driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/3par-create-fc-vlun-match-set-type-babcf2cbce1ce317.yaml0000664000175000017500000000033600000000000030674 0ustar00zuulzuul00000000000000--- fixes: - 3PAR driver creates FC VLUN of match-set type instead of host sees. With match-set, the host will see the virtual volume on specified NSP (Node-Slot-Port). This change in vlun type fixes bug 1577993.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/3par-get-capability-de60c9bc7ae51c14.yaml0000664000175000017500000000010200000000000025676 0ustar00zuulzuul00000000000000--- features: - | Added get capability feature for HPE-3PAR.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/3par-license-check-51a16b5247675760.yaml0000664000175000017500000000011000000000000025042 0ustar00zuulzuul00000000000000--- features: - Disable standard capabilities based on 3PAR licenses. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/3par-manage-unmanage-snapshot-eb4e504e8782ba43.yaml0000664000175000017500000000012100000000000027525 0ustar00zuulzuul00000000000000--- features: - Added snapshot manage/unmanage support to the HPE 3PAR driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/84-to-90-endpoints-831c28423d32cac5.yaml0000664000175000017500000000013600000000000025022 0ustar00zuulzuul00000000000000other: - | PowerMax driver - Changing 8.4 to 9.0 Unisphere for PowerMax REST endpoints. ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=cinder-27.0.0/releasenotes/notes/Code-changes-to-handle-groups-with-replication_enabled-or-snapshot_enabled-72f669fe2719ce3d.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Code-changes-to-handle-groups-with-replication_enabled-or-snapshot_0000664000175000017500000000030600000000000033644 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize: Added support to enable creating a group from source, when source is a replicated group or consistency group snapshot of a replicated group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Dell-SC-Driver-to-dell_emc-folder-e5d6fb1f1cf84149.yaml0000664000175000017500000000060100000000000030144 0ustar00zuulzuul00000000000000--- upgrades: - The Dell Storage Center driver is moved to the dell_emc directory and has been rebranded to its current Dell EMC SC name. The volume_driver entry in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.sc.storagecenter_fc.SCFCDriver`` for FC or ``cinder.volume.drivers.dell_emc.sc.storagecenter_iscsi.SCISCSIDriver`` for ISCSI. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Dell-SC-New-Extra-Specs-1de0d3f1ebc62881.yaml0000664000175000017500000000035600000000000026136 0ustar00zuulzuul00000000000000--- features: - Dell SC - Compression and Dedupe support added for Storage Centers that support the options. - Dell SC - Volume and Group QOS support added for Storage Centers that support and have enabled the option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Dell-SC-Retype-Limitations-74f4b5f6a94ffe4f.yaml0000664000175000017500000000112700000000000027116 0ustar00zuulzuul00000000000000--- issues: - With the Dell SC Cinder Driver if a volume is retyped to a new storage profile all volumes created via snapshots from this volume will also change to the new storage profile. - With the Dell SC Cinder Driver retyping from one replication type to another type (ex. regular replication to live volume replication) is not supported. fixes: - With the Dell SC Cinder Driver retyping to or from a replicated type should now work. - With the Dell SC Cinder Driver retype failed to return a tuple if it had to return an update to the volume state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Dell-SC-ServerOS-Config-Option-bd0e018319758e03.yaml0000664000175000017500000000051100000000000027205 0ustar00zuulzuul00000000000000--- features: - Config option ``dell_server_os`` added to the Dell SC driver. This option allows the selection of the server type used when creating a server on the Dell DSM during initialize connection. This is only used if the server does not exist. Valid values are from the Dell DSM create server list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Dell-SC-excluded_domain_ips_ListOpt-51bacddee199ce83.yaml0000664000175000017500000000137500000000000031101 0ustar00zuulzuul00000000000000--- features: - Added an ``excluded_domain_ips`` option to the Dell EMC SC driver. This is identical to the excluded_domain_ip option only comma separated rather than multiple entry. This is concatenated with the ``excluded_domain_ip`` option. deprecations: - | The Dell EMC SC configuration option ``excluded_domain_ip`` has been deprecated and will be removed in a future release. Deployments should now migrate to the option ``excluded_domain_ips`` for equivalent functionality. upgrade: - | The Dell EMC SC configuration option ``excluded_domain_ip`` has been deprecated and will be removed in a future release. Deployments should now migrate to the option ``excluded_domain_ips`` for equivalent functionality. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Dell-SC-live-volume-41bacddee199ce83.yaml0000664000175000017500000000016600000000000025664 0ustar00zuulzuul00000000000000--- features: - Added support for the use of live volume in place of standard replication in the Dell SC driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Dell-SC-replication-failover_host-failback-a9e9cbbd6a1be6c3.yaml0000664000175000017500000000011600000000000032373 0ustar00zuulzuul00000000000000--- features: - Added replication failback support for the Dell SC driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Dell-SC-thaw_backend-b9362d381fabd4c9.yaml0000664000175000017500000000025300000000000025667 0ustar00zuulzuul00000000000000--- issues: - Dell SC Cinder driver has limited support in a failed over state so thaw_backend has been implemented to reject the thaw call when in such a state.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Dell-SC-v2.1-replication-ef6b1d6a4e2795a0.yaml0000664000175000017500000000012500000000000026245 0ustar00zuulzuul00000000000000--- features: - Added replication v2.1 support to the Dell Storage Center drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Ds8k-revert-to-snapshot-support-ea0e06e14a8710ee.yaml0000664000175000017500000000013100000000000030157 0ustar00zuulzuul00000000000000--- features: - | IBM DS8000 Driver: Add support for revert-to-snapshot operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Enable-HPE-3PAR-Compression-Feature-90e4de4b64a74a46.yaml0000664000175000017500000000047400000000000030213 0ustar00zuulzuul00000000000000--- features: - HPE 3PAR driver adds following functionalities Creating thin/dedup compresssed volume. Retype for tpvv/tdvv volumes to be compressed. Migration of compressed volumes. Create compressed volume from compressed volume/snapshot source. Compression support to create cg from source. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/releasenotes/notes/Fusionstorage_Cinder_Driver_Support_OceanStor_100D-d21a300fd27b2440.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Fusionstorage_Cinder_Driver_Support_OceanStor_100D-d21a300fd27b24400000664000175000017500000000016100000000000032564 0ustar00zuulzuul00000000000000--- features: - | The Huawei FusionStorage Cinder driver (dsware) now supports OceanStor 100D Storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/HPE-3par-Generic-Volume-Group-e048002e1c3469a3.yaml0000664000175000017500000000015200000000000027025 0ustar00zuulzuul00000000000000--- features: - Added consistency group capability to generic volume groups in the HPE 3PAR driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Huawei-volume-driver-replication-v2.1-ada5bc3ad62dc633.yaml0000664000175000017500000000011200000000000031221 0ustar00zuulzuul00000000000000--- features: - Added v2.1 replication support in Huawei Cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Huawei_Cinder_Driver_Support_Dorado_V6-5289a3b0ef90e8b1.yaml0000664000175000017500000000012300000000000031427 0ustar00zuulzuul00000000000000--- features: - | Huawei Cinder Driver Support Dorado V6 Storage.(iSCSI, FC) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/IET_iSCSI_target-dea5f68dc297510d.yaml0000664000175000017500000000014300000000000025056 0ustar00zuulzuul00000000000000--- upgrade: - | IET iSCSI target removed. IET iSCSI target was deprecated in the V release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Lefthand-generic-volume-group-570d07b4786b93c2.yaml0000664000175000017500000000014300000000000027452 0ustar00zuulzuul00000000000000--- features: - Add consistent group capability to generic volume groups in Lefthand driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/MacroSAN-volume-driver-6477e4ec7c38f49d.yaml0000664000175000017500000000015100000000000026234 0ustar00zuulzuul00000000000000--- features: - Added MacroSAN drivers that allows cinder to manage volumes in ISCSI and FC environment././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/NetApp-ONTAP-full-cg-support-cfdc91bf0acf9fe1.yaml0000664000175000017500000000035300000000000027502 0ustar00zuulzuul00000000000000--- features: - Added support for creating, deleting, and updating consistency groups for NetApp 7mode and CDOT backends. - Added support for taking, deleting, and restoring a cgsnapshot for NetApp 7mode and CDOT backends. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/SC-included_domain_ips_ListOpt-61bacddee199ce83.yaml0000664000175000017500000000037600000000000030216 0ustar00zuulzuul00000000000000--- features: - Added an ``included_domain_ips`` option to the Dell EMC SC driver. This option takes a comma separated list of target IP addresses listed under the fault domains to whitelisted. This option only applies to the ISCSI driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/SolidFire-generic-volume-group-1b1e55661cd83a43.yaml0000664000175000017500000000014600000000000027655 0ustar00zuulzuul00000000000000--- features: - Add consistent group capability to generic volume groups in the SolidFire driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/VMEM-6000-drivers-removed-9b6675ff7ae5f960.yaml0000664000175000017500000000010600000000000026276 0ustar00zuulzuul00000000000000--- upgrade: - Violin Memory 6000 array series drivers are removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Zadara-change-to-access-key-b16bdaa9d8460b57.yaml0000664000175000017500000000060600000000000027163 0ustar00zuulzuul00000000000000--- features: - | Zadara VPSA Driver: Added new driver authentication method to use VPSA API access key, and deprecate exisiting authentication method that used username and password combination. The deprecated config inputs will be removed in the next official release after Train. upgrade: - | Add a new config option 'zadara_access_key': Zadara VPSA access key. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/Zadara-newlayout-support-features-ffa20694c008ba86.yaml0000664000175000017500000000077600000000000030576 0ustar00zuulzuul00000000000000--- features: - | Zadara VPSA Driver: Added support for cinder features volume manage, snapshot manage, list manageable volumes, manageable snapshots, multiattach and ipv6 support. upgrade: - | The Zadara VPSA Driver has been updated to support json format and reorganized with new code layout. The module path ``cinder.volume.drivers.zadara.ZadaraVPSAISCSIDriver`` should now be updated to ``cinder.volume.drivers.zadara.zadara.ZadaraVPSAISCSIDriver`` in ``cinder.conf``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ZadaraStorage-13a5fff6f4fa1710.yaml0000664000175000017500000000007700000000000024620 0ustar00zuulzuul00000000000000--- features: - Added volume driver for Zadara Storage VPSA. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/a7401ead26a7c83b-keystone-url.yaml0000664000175000017500000000025000000000000024516 0ustar00zuulzuul00000000000000--- fixes: - Cinder will now correctly read Keystone's endpoint for quota calls from keystone_authtoken.auth_uri instead of keymgr.encryption_auth_url config option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-availability_zone-filter-for-snapshot-8e1494212276abde.yaml0000664000175000017500000000010500000000000032063 0ustar00zuulzuul00000000000000--- features: - Added availability_zone filter for snapshots list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-backup-project-attribute-3f57051ef9159b08.yaml0000664000175000017500000000007700000000000027325 0ustar00zuulzuul00000000000000--- features: - Added ability to query backups by project ID.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-backup-swift-container-storage-policy-8d4a268ed61b9fe2.yaml0000664000175000017500000000073000000000000032146 0ustar00zuulzuul00000000000000--- features: - | Swift backup driver: Added new configuration option ``backup_swift_create_storage_policy`` for the Swift backup driver. If specified it will be used as the storage policy when creating the Swift Container, default value is None meaning it will not be used and Swift will use the system default. Please note that this only applies if a container doesn't exist as we cannot update the storage policy on an already existing container. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-cg-capability-to-groups-2eb3e71682a88600.yaml0000664000175000017500000000135200000000000027045 0ustar00zuulzuul00000000000000--- prelude: > Drivers supporting consistent group snapshot in generic volume groups reports "consistent_group_snapshot_enabled = True" instead of "consistencygroup_support = True". As a result, a spec such as "consistencygroup_support: ' True'" in either group type or volume type will cause the scheduler not to choose the backend that does not report "consistencygroup_support = True". In order to create a generic volume group that supports consistent group snapshot, "consistent_group_snapshot_enable: ' True'" should be set in the group type specs and volume type extra specs, and "consistencygroup_support: ' True'" should not be set in group type spec and volume type extra specs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-cinder-wsgi-module-ae72ad42bfebbea8.yaml0000664000175000017500000000106400000000000026607 0ustar00zuulzuul00000000000000--- features: - | A new module, ``cinder.wsgi``, has been added as a place to gather WSGI ``application`` objects. This is intended to ease deployment by providing a consistent location for these objects. For example, if using uWSGI then instead of: .. code-block:: ini [uwsgi] wsgi-file = /bin/cinder-wsgi You can now use: .. code-block:: ini [uwsgi] module = cinder.wsgi.api:application This also simplifies deployment with other WSGI servers that expect module paths such as gunicorn. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-cluster-name-to-volume-details-ce01dd828faafcde.yaml0000664000175000017500000000022600000000000031063 0ustar00zuulzuul00000000000000--- features: - | Added new APIs on microversion 3.61 to show ``cluster_name`` attribute in the response body of volume details for admin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-coho-driver-b4472bff3f64aa41.yaml0000664000175000017500000000007600000000000025036 0ustar00zuulzuul00000000000000--- features: - Added backend driver for Coho Data storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-configurable-img-conversion-param-1e7b545ae816dfe8.yaml0000664000175000017500000000042600000000000031325 0ustar00zuulzuul00000000000000--- features: - | Added the ``image_conversion_cpu_limit`` and ``image_conversion_address_space_limit`` as configurable parameters. This adds configurability to the image conversion process to prevent the process from timing out when converting larger images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-connection-info-to-attachment-84d4dg45uh41db15.yaml0000664000175000017500000000011500000000000030500 0ustar00zuulzuul00000000000000--- features: - Added attribute ``connection_info`` to attachment object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-count-info-in-list-api-e43wac44yu750c23.yaml0000664000175000017500000000013200000000000027101 0ustar00zuulzuul00000000000000--- features: - Added count info in volume, snapshot and backup's list APIs since 3.45. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-datacore-volume-driver-3775797b0515f538.yaml0000664000175000017500000000020700000000000026636 0ustar00zuulzuul00000000000000--- features: - Added iSCSI and Fibre Channel volume drivers for DataCore's SANsymphony and Hyper-converged Virtual SAN storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-datacore-volume-driver-5c1802798425acc1.yaml0000664000175000017500000000020700000000000026754 0ustar00zuulzuul00000000000000--- features: - Added iSCSI and Fibre Channel volume drivers for DataCore's SANsymphony and Hyper-converged Virtual SAN storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-del-volumeTypeAccess-b1c8cb14a9d14db3.yaml0000664000175000017500000000035700000000000026725 0ustar00zuulzuul00000000000000--- upgrade: - Adding or removing volume_type_access from any project during DB migration 62 must not be performed. - When running PostgreSQL it is required to upgrade and restart all the cinder-api services along with DB migration 62.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-encryption-key-id-to-details-e721977fba0f2b51.yaml0000664000175000017500000000031700000000000030154 0ustar00zuulzuul00000000000000--- features: - | Starting with API microversion 3.64, an ``encryption_key_id`` attribute is included in the response body of volume and backup details when the associated volume is encrypted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-filter-to-group-snapshots-74sd8g138a289dh4.yaml0000664000175000017500000000012500000000000027655 0ustar00zuulzuul00000000000000--- fixes: - Add filter, sorter and pagination support in group snapshot listings. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-filters-support-to-get_pools-0852e9c0e42fbf98.yaml0000664000175000017500000000007600000000000030347 0ustar00zuulzuul00000000000000--- features: - Add filters support to get_pools API v3.28. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-glance-service-section-3e73daee0e995442.yaml0000664000175000017500000000203300000000000027075 0ustar00zuulzuul00000000000000--- upgrade: - | With the adoption of New Location APIs, we need a mechanism to perform service-to-service communication to access the ``add_image_location`` and ``get_image_locations`` APIs. To achieve the desired functionality, we will need to perform two additional changes during the deployment: 1. Assign the ``admin`` and ``service`` role to the ``glance`` user 2. Configure a ``[glance]`` section in cinder configuration file with the credentials of ``glance`` user and ``service`` project. Refer to the ``[nova]`` or ``[service_user]`` section for reference. features: - | Added support for service-to-service communication between Cinder and Glance. Currently the service-to-service communication is leveraged by the new location APIs for which we will need to configure a dedicated ``[glance]`` section in cinder configuration file with the credentials of ``glance`` user and ``service`` project. Refer to the ``[nova]`` or ``[service_user]`` section for reference. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-google-backup-driver-d1e7ac33d5780b79.yaml0000664000175000017500000000010700000000000026552 0ustar00zuulzuul00000000000000--- features: - Added cinder backup driver for Google Cloud Storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-io-ports-option-c751d1bd395dd614.yaml0000664000175000017500000000012500000000000025616 0ustar00zuulzuul00000000000000--- features: - Add support to configure IO ports option in Dell EMC Unity driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-like-filter-support-7d4r78d6de3984dv.yaml0000664000175000017500000000030700000000000026625 0ustar00zuulzuul00000000000000--- features: - | Added like operator support to filters for the following resources:: - volume - snapshot - backup - group - group-snapshot - attachment - message ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-operation-to-request-spec-7yt6ub75uy1284as.yaml0000664000175000017500000000070500000000000030115 0ustar00zuulzuul00000000000000--- features: - | Now scheduler plugins are aware of operation type via ``operation`` attribute in RequestSpec dictionary, plugins can support backend filtering according to backend status as well as operation type. Current possible values for ``operation`` are: - create_volume - extend_volume - create_snapshot - retype_volume - migrate_volume - manage_existing - manage_existing_snapshot - create_group ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-option-max_luns_per_storage_group-dfe3e1396b262bc8.yaml0000664000175000017500000000047100000000000031566 0ustar00zuulzuul00000000000000--- deprecations: - | Deprecate option `check_max_pool_luns_threshold`. The VNX driver will always check the threshold. fixes: - | Add option `max_luns_per_storage_group` back. The max LUNs per storage group was set to 255 before. With the new option, admin can set it to a larger number. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-periodic-task-to-clean-expired-messages-84f47gxc88hda035.yaml0000664000175000017500000000026400000000000032366 0ustar00zuulzuul00000000000000--- features: - Added periodic task to clean expired messages in cinder scheduler, also added a configuration option ``message_reap_interval`` to handle the interval. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/releasenotes/notes/add-powermax-live-migration-without-a-pool-name-7690fcd67b5f690c.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-powermax-live-migration-without-a-pool-name-7690fcd67b5f690c.ya0000664000175000017500000000054500000000000032623 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerMax driver `bug #2034937 `_: Fixed This change is to update the live migration ability in environments using PowerMax. In previous 2023.1 version, the live migration fails without a pool name. This update add the ability of live migration without a pool name. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/add-project-id-to-group-groupsnapshot-response-512013e95a80784a.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-project-id-to-group-groupsnapshot-response-512013e95a80784a.yam0000664000175000017500000000033700000000000032522 0ustar00zuulzuul00000000000000--- features: - | Added ``project_id`` attribute to response body of list groups with detail, list group snapshots with detail, show group detail and show group snapshot detail APIs since microversion "3.58". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-reset-group-snapshot-status-sd21a31cde5fa035.yaml0000664000175000017500000000007500000000000030332 0ustar00zuulzuul00000000000000--- features: - Added reset status API to group snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-reset-group-status-sd21a31cde5fa034.yaml0000664000175000017500000000010200000000000026463 0ustar00zuulzuul00000000000000--- features: - Added reset status API to generic volume group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-resource-filters-api-8g3dub1700qaye98.yaml0000664000175000017500000000013200000000000027025 0ustar00zuulzuul00000000000000--- features: - Added ``resource_filters`` API to retrieve configured resource filters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-revert-to-snapshot-support-2d21a3dv4f5fa087.yaml0000664000175000017500000000011000000000000030121 0ustar00zuulzuul00000000000000--- features: - Add revert to snapshot API and support in LVM driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-split-logger-conf-option-0424e3bd91de3a5a.yaml0000664000175000017500000000114700000000000027450 0ustar00zuulzuul00000000000000--- features: - | Added boolean conf option 'split_loggers' in [default] section of cinder.conf to `enable split logging`_ functionality. The default value of split_loggers option is set to False. Operator can set it's value to True to split HTTP content into subloggers to allow for fine-grained control of what is logged and how. This new config option 'split_loggers' should be enabled only when keystoneauth log level is set to DEBUG in 'default_log_levels' config option. .. _`enable split logging`: https://docs.openstack.org/keystoneauth/latest/using-sessions.html#logging ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-stochastic-scheduling-option-99e10eae023fbcca.yaml0000664000175000017500000000114200000000000030541 0ustar00zuulzuul00000000000000--- features: - Added a new config option ``scheduler_weight_handler``. This is a global option which specifies how the scheduler should choose from a listed of weighted pools. By default the existing weigher is used which always chooses the highest weight. - Added a new weight handler ``StochasticHostWeightHandler``. This weight handler chooses pools randomly, where the random probabilities are proportional to the weights, so higher weighted pools are chosen more frequently, but not all the time. This weight handler spreads new shares across available pools more fairly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-suppress-lvm-fd-warnings-option.402bebc03b0a9f00.yaml0000664000175000017500000000145600000000000031000 0ustar00zuulzuul00000000000000--- upgrade: - | In certain environments (Kubernetes for example) indirect calls to the LVM commands result in file descriptor leak warning messages which in turn cause the process_execution method to raise and exception. To accommodate these environments, and to maintain backward compatibility in Newton we add a ``lvm_suppress_fd_warnings`` bool config to the LVM driver. Setting this to True will append the LVM env vars to include the variable ``LVM_SUPPRESS_FD_WARNINGS=1``. This is made an optional configuration because it only applies to very specific environments. If we were to make this global that would require a rootwrap/privsep update that could break compatibility when trying to do rolling upgrades of the volume service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-tegile-driver-b7919c5f30911998.yaml0000664000175000017500000000007700000000000025107 0ustar00zuulzuul00000000000000--- features: - Added driver for Tegile IntelliFlash arrays. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-transfer-pagination-support-7y33u7y68de3cb16.yaml0000664000175000017500000000011600000000000030403 0ustar00zuulzuul00000000000000--- features: - Added transfer pagination support since microversion 3.59. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-user-id-attribute-to-backup-response-ce27364680c895f7.yaml0000664000175000017500000000017700000000000031516 0ustar00zuulzuul00000000000000--- features: - | Add ``user_id`` attribute to response body of list backup with detail and show backup detail APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-vmax-replication-490202c15503ae03.yaml0000664000175000017500000000010600000000000025551 0ustar00zuulzuul00000000000000--- features: - Add v2.1 volume replication support in VMAX driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-volume-re-image-api-6f02dcefd4975a66.yaml0000664000175000017500000000357500000000000026405 0ustar00zuulzuul00000000000000--- features: - | Add microversion 3.68 to support the ability to re-image a volume with a specific image. Specify the ``os-reimage`` action in the request body. A volume in ``available`` or ``error`` status can be re-imaged directly. To re-image a volume in ``reserved`` status, you must include the ``reimage_reserved`` parameter set to ``true``. When reimaging a volume, the volume state will be changed to ``downloading`` first. Note that this is a destructive action, that is, all data currently contained in a volume is destroyed when the volume is re-imaged. Two new policies are introduced to govern this functionality: * ``volume:reimage`` - users who satisfy this policy may re-image a volume in status ``available`` or ``error`` * ``volume:reimage_reserved`` - users who satisfy this policy may re-image a volume in status ``reserved`` The default setting for both policies allow an administrator or the volume owner to perform the associated action. See the `Policy configuration `_ documentation in the `Cinder Service Configuration` guide for details. upgrade: - | Two new policies are introduced to govern the volume reimage functionality introduced with microversion 3.68: * ``volume:reimage`` - users who satisfy this policy may re-image a volume in status ``available`` or ``error`` * ``volume:reimage_reserved`` - users who satisfy this policy may re-image a volume in status ``reserved`` The default setting for both policies allow an administrator or the volume owner to perform the associated action. See the `Policy configuration `_ documentation in the `Cinder Service Configuration` guide for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-volume-type-filter-to_get-pools-c791132540921398.yaml0000664000175000017500000000007500000000000030346 0ustar00zuulzuul00000000000000--- features: - Add ``volume-type`` filter to API Get-Pools././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add-volume-upload-image-options-3a61a31c544fa034.yaml0000664000175000017500000000016300000000000027777 0ustar00zuulzuul00000000000000--- fixes: - Added the options ``visibility`` and ``protected`` to the os-volume_upload_image REST API call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add_ceph_custom_keyring_path-43a3b8c21a1ab3c4.yaml0000664000175000017500000000020700000000000027727 0ustar00zuulzuul00000000000000--- features: - | Added RBD keyring configuration parameter ``rbd_keyring_conf`` to define custom path of Ceph keyring file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add_manage_unmanage_itri_disco_driver-1c9ee31cc86b6eda.yaml0000664000175000017500000000011300000000000031723 0ustar00zuulzuul00000000000000--- Features: - Added volume manage/unmanage support for disco driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add_multiattach_policies-8e0b22505ed6cbd8.yaml0000664000175000017500000000255700000000000027115 0ustar00zuulzuul00000000000000--- features: - Added policies to disallow multiattach operations. This includes two policies, the first being a general policy to allow the creation or retyping of multiattach volumes is a volume create policy with the name ``volume:multiattach``. The second policy is specifically for disallowing the ability to create multiple attachments on a volume that is marked as bootable, and is an attachment policy with the name ``volume:multiattach_bootable_volume``. The default for these new policies is ``rule:admin_or_owner``; be aware that if you wish to disable either of these policies for your users you will need to modify the default policy settings. upgrade: - Added policies to disallow multiattach operations. This includes two policies, the first being a general policy to allow the creation or retyping of multiattach volumes is a volume create policy with the name ``volume:multiattach``. The second policy is specifically for disallowing the ability to create multiple attachments on a volume that is marked as bootable, and is an attachment policy with the name ``volume:multiattach_bootable_volume``. The default for these new policies is ``rule:admin_or_owner``; be aware that if you wish to disable either of these policies for your users you will need to modify the default policy settings. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add_nvme_tcp_driver-558ff80aa2029e2b.yaml0000664000175000017500000000012700000000000026005 0ustar00zuulzuul00000000000000--- features: - | Added NVMe/TCP volume driver for NetApp ONTAP Storage Cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/add_replication_failback_to_solidfire-82668c071f4fa91d.yaml0000664000175000017500000000032300000000000031436 0ustar00zuulzuul00000000000000--- features: - | Add ability to call failover-host on a replication enabled SF cluster a second time with host id = default to initiate a failback to the default configured SolidFire Cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/added-virtual-size-check-42a84f6b24366e5d.yaml0000664000175000017500000000076700000000000026517 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1980268 `_: When creating a volume from an image, a check has been added to compare the requested volume size to the image's ``virtual_size`` property and fail the request if the volume will be too small to contain the image. If the image record does not contain this property, the request is accepted but the volume will go to ``error`` status if the image does not fit (which is the current behavior). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/added_ontap_libs_for_asar2_platform-6688b9f811645b96.yaml0000664000175000017500000000060600000000000030737 0ustar00zuulzuul00000000000000--- features: - | NetApp - The new ASAr2 driver class inherits from the existing ONTAP REST library, enabling reuse of the mature ONTAP codebase. This design extends the Cinder driver capabilities to support key volume operations on ASAr2 platform, including. * Volume Creation * Volume Deletion * Volume Attachment * Volume Detachment * Volume Extend ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/allow-admin-quota-operations-c1c2236711224023.yaml0000664000175000017500000000016000000000000027174 0ustar00zuulzuul00000000000000--- fixes: - Projects with the admin role are now allowed to operate on the quotas of all other projects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/allow-deleting-__DEFAULT__-type-d35dfb5d89760b9b.yaml0000664000175000017500000000350300000000000027652 0ustar00zuulzuul00000000000000--- upgrade: - | The ``default_volume_type`` configuration option is now required to have a value. The default value is ``__DEFAULT__``, so you should see no change in behavior whether or not you have set a value for ``default_volume_type``. See `Bug #1886632 `_ for more information about this change. fixes: - | `Bug #1886632 `_: The system defined ``__DEFAULT__`` volume type is now treated as a regular volume-type and may be updated or deleted. Since the configured ``default_volume_type`` cannot be deleted, however, the ``__DEFAULT__`` volume type may not be deleted if it is the value of that configuration option. other: - | Beginning with the Train release, untyped volumes (that is, volumes with no volume-type) have been disallowed. To facilitate this, a ``__DEFAULT__`` volume-type was included as part of the Train database migration. In this release, handling of the default volume-type has been improved: * The ``default_volume_type`` configuration option is required to have a value. The default value is ``__DEFAULT__``. * A request to delete the currently configured ``default_volume_type`` will fail. (You can delete that volume-type, but you cannot do it while it is the value of the configuration option.) * There must always be at least one volume-type defined in a Cinder installation. This is enforced by the type-delete call. * If the ``default_volume_type`` is misconfigured (that is, if the value refers to a non-existent volume-type), requests that rely on the default volume-type (for example, a volume-create request that does not specify a volume-type) will result in a HTTP 500 response.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/allow-encrypted-rbd-volumes-35d3536505e6309b.yaml0000664000175000017500000000057300000000000027144 0ustar00zuulzuul00000000000000--- features: - | LUKS Encrypted RBD volumes can now be created by cinder-volume. This capability was previously blocked by the rbd volume driver due to the lack of any encryptors capable of attaching to an encrypted RBD volume. These volumes can also be seeded with RAW image data from Glance through the use of QEMU 2.10 and the qemu-img convert command. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/releasenotes/notes/allow-huawei-driver-lun-copy-speed-configurable-361a480e7b7e361d.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/allow-huawei-driver-lun-copy-speed-configurable-361a480e7b7e361d.ya0000664000175000017500000000062400000000000032570 0ustar00zuulzuul00000000000000--- features: - Allow users to specify the copy speed while using Huawei driver to create volume from snapshot or clone volume, by the new added metadata 'copyspeed'. For example, user can add --metadata copyspeed=1 when creating volume from source volume/snapshot. The valid optional range of copyspeed is [1, 2, 3, 4], respectively representing LOW, MEDIUM, HIGH and HIGHEST. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/releasenotes/notes/allow-remove-name-and-description-for-consisgroup-408257a0a18bd530.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/allow-remove-name-and-description-for-consisgroup-408257a0a18bd530.0000664000175000017500000000014200000000000032513 0ustar00zuulzuul00000000000000--- features: - Allow API user to remove the consistency group name or description information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/allow_disable_image_conversion-ebf33ce9d5edf724.yaml0000664000175000017500000000343700000000000030461 0ustar00zuulzuul00000000000000--- features: - | Added a new configuration option ``image_conversion_disable`` to disallow conversion between image disk format and volume format when doing certain operations. This can prevent performance problems on a cinder-volume node due to the large amount of system resources consumed during image conversion. The default value is ``False``, which corresponds to Cinder's current behavior to always attempt image conversion. This option affects three Block Storage API calls: * Upload volume to image: ``POST /v3/volumes/{volume_id}/action`` with the ``os-volume_upload_image`` action. This call will result in a 400 (Bad Request) response when an image ``disk_format`` that would require conversion is requested. * Create a volume: ``POST /v3/volumes`` with an ``imageRef`` attribute in the request body. This will result in a 202 (Accepted) response, but if the image's ``disk_format`` would require conversion to be written to the volume, the volume will go to ``error`` status. * Reimage a volume: ``POST /v3/volumes/{volume_id}/action`` with the ``os-reimage`` action. This call will result in a 202 (Accepted) response, but if the image's ``disk_format`` would require conversion to be written to the volume, the volume will go to ``error`` status. In the latter two cases, an end user can determine what happened by using the `Messages API `_, which can be accessed using the `cinderclient `_ or `openstackclient `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/announce-ceph-min-version-4eddb0def1c39928.yaml0000664000175000017500000000252100000000000027145 0ustar00zuulzuul00000000000000--- upgrade: - | RBD driver: Prior to this release, the Cinder project did not have a statement concerning what versions of Ceph are supported by Cinder. We hereby announce that: * For a given OpenStack release, Cinder supports the current Ceph active stable releases plus the two prior releases. * For any OpenStack release, it is expected that the versions of the Ceph client and server are in alignment. The `Ceph RADOS Block Device (RBD) `__ driver documentation has been updated to reflect this policy and explains it in more detail. other: - | **Supported Ceph versions** The Cinder project wishes to clarify its policy concerning what versions of Ceph are supported by Cinder. * For a given OpenStack release, Cinder supports the current Ceph active stable releases plus the two prior releases. * For any OpenStack release, it is expected that the versions of the Ceph client and server are in alignment. The `Ceph RADOS Block Device (RBD) `__ driver documentation has been updated to reflect this policy and explains it in more detail. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/apply-limits-to-qemu-img-29f722a1bf4b91f8.yaml0000664000175000017500000000044100000000000026577 0ustar00zuulzuul00000000000000--- security: - The qemu-img tool now has resource limits applied which prevent it from using more than 1GB of address space or more than 2 seconds of CPU time. This provides protection against denial of service attacks from maliciously crafted or corrupted disk images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/attach-format-after-snapshot-9a1857456706aa72.yaml0000664000175000017500000000046400000000000027271 0ustar00zuulzuul00000000000000fixes: - | NFS driver `bug #1989514`_: When creating a snapshot of an attached volume, the volume attachment format was not updated in its connection_info and could have resulted in an unbootable guest. This has been fixed. .. _bug #1989514: https://bugs.launchpad.net/cinder/+bug/1989514 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backend-options-ed19e6c63b2b9090.yaml0000664000175000017500000000071600000000000025077 0ustar00zuulzuul00000000000000--- fixes: - | Cinder stopped supporting single-backend configurations in Ocata. However, sample ``cinder.conf`` was still generated with driver-related options in ``[DEFAULT]`` section, where those options had no effect at all. Now all of driver options are listed in ``[backend_defaults]`` section, that indicates that those options are effective only in this section and ``[]`` sections listed in ``enabled_backends``. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/releasenotes/notes/backup-ceph-driver-journaling-exculsive-lock-features-6b6044138a288a83.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup-ceph-driver-journaling-exculsive-lock-features-6b6044138a2880000664000175000017500000000024500000000000032706 0ustar00zuulzuul00000000000000--- features: - Added new BoolOpt ``backup_ceph_image_journals`` for enabling the Ceph image features required to support RBD mirroring of Cinder backup pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup-driver-configuration-36357733962dab03.yaml0000664000175000017500000000046600000000000027202 0ustar00zuulzuul00000000000000--- features: - | Add ability to specify backup driver via class name. upgrade: - | Operators should change backup driver configuration value to use class name to get backup service working in a 'S' release. deprecations: - | Backup driver initialization using module name is deprecated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup-path-removal-c411bb6c0d3887f1.yaml0000664000175000017500000000061400000000000025647 0ustar00zuulzuul00000000000000--- upgrade: - | The ability to specify a backup driver by module name was deprecated in the Queens release and the ability has now been removed. Any configuration in cinder.conf still using the module path should be updated to include the full class name. For example, ``cinder.backup.drivers.swift`` should be updated to ``cinder.backup.drivers.swift.SwiftBackupDriver``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup-snapshot-6e7447db930c31f6.yaml0000664000175000017500000000013300000000000025036 0ustar00zuulzuul00000000000000--- features: - Support for snapshot backup using the optimal path in Huawei driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml0000664000175000017500000000006500000000000025226 0ustar00zuulzuul00000000000000--- features: - Added ability to backup snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup-sparse-f396b35bfe17332e.yaml0000664000175000017500000000044500000000000024560 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2007615 `_: the restore operation of the Cinder backup service now restores into sparse volumes, if possible. So, operators no longer need more space than used previously when they restore from a disaster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup-sparse-f685f4321f2994f5.yaml0000664000175000017500000000046200000000000024435 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2025277 `_: Fixed a regression in the fix for Cinder backup restoring into sparse volumes, where OpenStack's integrated CLI triggered a traceback. The deprecated project-specific legacy CLI of Cinder continued to work. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup-update-d0b0db6a7b1c2a5b.yaml0000664000175000017500000000011000000000000024720 0ustar00zuulzuul00000000000000--- features: - Added REST API to update backup name and description. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup-user-messages-5ee0c7ead3def8f9.yaml0000664000175000017500000000051000000000000026347 0ustar00zuulzuul00000000000000--- other: - | Added user messages for backup operations that a user can query through the `Messages API `_. These allow users to retrieve error messages for asynchronous failures in backup operations like create, delete, and restore.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup-volumenotfound-set-to-error-fa47b3631093a702.yaml0000664000175000017500000000025500000000000030531 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1996049 `_: Fixed bug where backup was not set to error on failure when volume did not exist. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup_driver_init_state-d4834fa927e502ab.yaml0000664000175000017500000000016400000000000027054 0ustar00zuulzuul00000000000000--- fixes: - Fixed service state reporting when backup manager is unable to initialize one of the backup drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup_max_operations-27753c748ba1dc1a.yaml0000664000175000017500000000030700000000000026361 0ustar00zuulzuul00000000000000--- features: - | We can now limit the number of concurrent backup/restore operations that a Cinder backup service can perform using the ``backup_max_operations`` configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/backup_s3_driver-238e3612acd7cc06.yaml0000664000175000017500000000043200000000000025226 0ustar00zuulzuul00000000000000--- features: - | Added new backup driver to enable backing up cinder volumes to S3-compatible storage. See the reference `S3 backup driver `_ for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/balanced-fc-port-selection-fbf6b841fea99156.yaml0000664000175000017500000000011100000000000027163 0ustar00zuulzuul00000000000000--- features: - Support balanced FC port selection for Huawei drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bdd-pools-stats-afb4398daa9248de.yaml0000664000175000017500000000011000000000000025167 0ustar00zuulzuul00000000000000--- features: - Report pools in volume stats for Block Device Driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-add-volume-backup-id-e10d053638cb2e78.yaml0000664000175000017500000000055600000000000026226 0ustar00zuulzuul00000000000000--- features: - | Added the property ``src_backup_id`` to the volume's metadata, to record from which backup the new volume was created. If the ``src_backup_id`` exists in the volume's metadata, it will be updated. When restoring from a chain of incremental backups, ``src_backup_id`` is set to the last incremental backup used for the restore.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-datera-cinder-driver-update-2.1-5c6455b45563adc5.yaml0000664000175000017500000000037400000000000030107 0ustar00zuulzuul00000000000000--- features: - Updating the Datera Elastic DataFabric Storage Driver to version 2.1. This adds ACL support, Multipath support and basic IP pool support. upgrade: - Changes config option default for ``datera_num_replicas`` from 1 to 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-dell-powerflex-aa-828facb25b1fde63.yaml0000664000175000017500000000030100000000000026052 0ustar00zuulzuul00000000000000--- features: - | Dell PowerFlex driver: Enabled cinder volume active/active support. This allows users to configure Dell PowerFlex backends in cinder clustered environments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-dell-powermax-consistency-exempt.yaml0000664000175000017500000000132600000000000026614 0ustar00zuulzuul00000000000000--- features: - | Dell PowerMax Driver: use consistency exempt flag consistently PowerMax allows volumes to be added, removed, or suspended without affecting the state of the SRDF/A or SRDF/Metro session or requiring that other volumes in the session be suspended. Known as --exempt for symcli and editStorageGroupActionParam in the PowerMax REST API, this capability is available for an SRDF group supporting an active SRDF/A session or an active SRDF/Metro session. The PowerMax Cinder driver currently uses the exempt flag when volumes are added to SRDF groups, but not when volumes are removed. This incurs an unnecessary performance penalty that is resolved by this change. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-dell-powermax-nvme-tcp-606b091620685c06.yaml0000664000175000017500000000010600000000000026410 0ustar00zuulzuul00000000000000--- features: - | Dell PowerMax driver: Added NVMe-TCP support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-dell-powerstore-aa-ca7b2e9355a1e5a5.yaml0000664000175000017500000000027700000000000026200 0ustar00zuulzuul00000000000000--- features: - | Dell PowerStore driver: Enabled cinder volume active/active support. This allows users to configure Dell PowerStore backends in cinder clustered environments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-dell-powerstore-qos-1532737fa1bb2664.yaml0000664000175000017500000000020100000000000026162 0ustar00zuulzuul00000000000000--- features: - | Dell PowerStore Driver: Added QoS (Quality of Service) support for PowerStore 4.0 or later versions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-ibm-gpfs-supported-26ae5381dd2a47ad.yaml0000664000175000017500000000033200000000000026207 0ustar00zuulzuul00000000000000--- upgrade: - | IBM GPFS drivers had been previously marked unsupported. Testing requirements have been addressed and they are now fully supported again. IBM GPFS drivers allow cinder to manage volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-infinidat-add-snapshot-revert-1bab97e85ff10780.yaml0000664000175000017500000000013200000000000030245 0ustar00zuulzuul00000000000000--- features: - | Infinidat driver: Added support for revert to snapshot operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-inspur-instorage-driver-40371862c9559238.yaml0000664000175000017500000000015200000000000026643 0ustar00zuulzuul00000000000000--- features: - | New Cinder volume driver for Inspur InStorage. The new driver supports iSCSI. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-jdss-add-cert-and-snapshot-revert-b34f352754ad07de.yaml0000664000175000017500000000036000000000000030733 0ustar00zuulzuul00000000000000--- features: - | Added support of authenticity verification through self-signed certificates for JovianDSS data storage. Added support of revert to snapshot functionality. Expands unit-test coverage for JovianDSS driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-kumoscale-driver-3a01460f1aa83939.yaml0000664000175000017500000000020600000000000025512 0ustar00zuulzuul00000000000000--- features: - | New Cinder volume driver for KIOXIA Kumoscale storage systems. The driver storage system supports NVMeOF. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-lightbits-lightos-clustered-nvmetcp-driver-d1ef8f83263921f2.yaml0000664000175000017500000000046500000000000032740 0ustar00zuulzuul00000000000000--- features: - | Lightbits LightOS driver: new Cinder driver for Lightbits(TM) LightOS(R). Lightbits Labs (http://www.lightbitslabs.com) LightOS is software-defined, cloud native, high-performance, clustered scale-out and redundant NVMe/TCP storage that performs like local NVMe flash. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-netapp-flexgroup-support-c462fca33f0d8906.yaml0000664000175000017500000000324200000000000027423 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP driver: added support for FlexGroup pool using the NFS mode. There are several considerations for using the driver with it: 1. The FlexGroup pool is only supported using ONTAP storage 9.8 or greater. 2. The FlexGroup pool has a different view of aggregate capabilites, changing them by a list of elements, instead of a single element. They are ``netapp_aggregate``, ``netapp_raid_type``, ``netapp_disk_type`` and ``netapp_hybrid_aggregate``. The ``netapp_aggregate_used_percent`` capability is an average of used percent of all FlexGroup's aggregates. 3. The ``utilization`` capability is not calculated to FlexGroup pools, it is always set to default of 50. 4. The driver cannot support consistency group with volumes that are over FlexGroup pools. 5. The driver cannot support multi-attach with volumes that are over FlexGroup pools. 6. For volumes over the FlexGroup pool, the operations of clone volume, create snapshot and create volume from an image are implemented as the NFS generic driver. Hence, it does not rely on the ONTAP storage to perform those operations. 7. A driver with FlexGroup pools has snapshot support disabled by default. To enable, you must set ``nfs_snapshot_support`` to true in the backend's configuration section of the cinder configuration file. 8. The driver image cache is not applied for volumes over FlexGroup pools. It can use the core image cache for avoiding downloading twice, though. 9. Given that the FlexGroup pool may be on several cluster nodes, the QoS minimum support is only enabled if all nodes support it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-netapp-ontap-adaptive-qos-45891585a91eab75.yaml0000664000175000017500000000115700000000000027271 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP driver: Added support for Adaptive QoS specs. The driver now accepts ``expectedIOPSperGiB``, ``peakIOPSperGiB``, ``expectedIOPSAllocation``, ``peakIOPSAllocation``, ``absoluteMinIOPS`` and ``blockSize``. The field ``peakIOPSperGiB`` and the field ``expectedIOPSperGiB`` are required together. The ``expectedIOPSperGiB`` and ``absoluteMinIOPS`` specs are only guaranteed by ONTAP AFF systems. All specs can only be used with ONTAP version equal or greater than 9.4, excepting the ``expectedIOPSAllocation`` and ``blockSize`` specs which require at least 9.5. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-netapp-ontap-min-throughput-qos-cd3812df5c7da8fd.yaml0000664000175000017500000000127300000000000031036 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP driver: Added support for QoS Min (floor) throughput specs. The driver now accepts ``minIOPS`` and ``minIOPSperGiB`` specs, which can be set either individually or along with Max (ceiling) throughput specs. The feature requires storage ONTAP All Flash FAS (AFF) with version equal or greater than 9.3 for NFS and 9.2 for iSCSI and FCP. It also works with Select Premium with SSD and C190 storages with at least ONTAP 9.6. - | NetApp ONTAP driver: Added a new driver specific capability called `netapp_qos_min_support`. It is used to filter the pools that has support to the Qos minimum (floor) specs during the scheduler phase. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-netapp-self-signed-https-support-cb30081d4465acd1.yaml0000664000175000017500000000172600000000000030736 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP driver: Added support for self-signed certificate support for HTTPS transport for management communication between Cinder and NetApp ONTAP. ONTAP systems utilize self-signed certificates for HTTPS management access by default. These certificates are generated automatically during the initial setup or deployment of ONTAP. When ssl_cert_path is configured with the extracted certificate file (.PEM format), Cinder establishes HTTPS communication with full certificate validation. When ssl_cert_path is not provided, Cinder automatically uses HTTPS with an unverified SSL context, which provides encrypted communication but skips certificate validation. This allows secure transport while maintaining ease of configuration with ONTAP's default self-signed certificates. Administrators can extract the certificate using tools such as openssl or curl for full certificate validation if desired. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-netapp-solidfire-ipv6-on-management-ip-10187de7b732335a.yaml0000664000175000017500000000011600000000000031522 0ustar00zuulzuul00000000000000--- features: - | SolidFire driver now supports IPv6 for management IP. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-netapp-solidfire-revert-to-snapshot-741b7c204cc99546.yaml0000664000175000017500000000015200000000000031302 0ustar00zuulzuul00000000000000--- features: - | NetApp SolidFire driver now supports optimized revert to snapshot operations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-nfs-volume-encryption-3d8362843caeb39c.yaml0000664000175000017500000000012700000000000026676 0ustar00zuulzuul00000000000000--- features: - | The NFS driver now supports the creation of encrypted volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-nvmeof-lvm-target-b7771955b426abe7.yaml0000664000175000017500000000034000000000000025707 0ustar00zuulzuul00000000000000--- features: - | A new target, NVMET, is added for the LVM driver over RDMA, it allows cinder to use nvmetcli in order to create/delete subsystems on attaching/detaching an LVM volume to/from an instance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-open-src-ibm-storage-driver-d17808e52aa4eacb.yaml0000664000175000017500000000122700000000000027771 0ustar00zuulzuul00000000000000--- features: - | The IBM_Storage driver has been open sourced. This means that there is no more need to download the package from the IBM site. The only requirement remaining is to install pyxcli, which is available through pypi:: ``sudo pip install pyxcli`` upgrade: - | Previous installations of IBM Storage must be un-installed first and the new driver should be installed on top. In addition the cinder.conf values should be updated to reflect the new paths. For example the proxy setting of ``storage.proxy.IBMStorageProxy`` should be updated to ``cinder.volume.drivers.ibm.ibm_storage.proxy.IBMStorageProxy``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-powermax-protected-snap-82eb6731553356d9.yaml0000664000175000017500000000122200000000000026767 0ustar00zuulzuul00000000000000--- features: - | Dell EMC PowerMax driver: Added SRDF ``powermax:disable_protected_snap`` volume-type extra-spec property for the purpose of avoiding overconsumption on both source and target storage arrays. An operator may enable this functionality by creating a specific volume type with the property:: "powermax:disable_protected_snap": " True" When disabled (which is the default and current behavior), a replicated source volume will be protected with a snapshot of the same volume type. When enabled, snapshots of replicated source volumes will be treated as regular, non-replicated devices. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-powerstore-cg-support-ac1842d2041dcbfd.yaml0000664000175000017500000000011200000000000027035 0ustar00zuulzuul00000000000000--- features: - | PowerStore driver: Add Consistency Groups support.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-powerstore-cinder-driver-94f8c7f1371eafe7.yaml0000664000175000017500000000011400000000000027447 0ustar00zuulzuul00000000000000--- features: - | Add Dell EMC PowerStore Storage Driver (iSCSI, FC). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-powerstore-nfs-cinder-driver-b743a8a89acafa35.yaml0000664000175000017500000000010300000000000030271 0ustar00zuulzuul00000000000000--- features: - | Dell PowerStore: Added NFS storage driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-powerstore-replication-support-700016b83437602e.yaml0000664000175000017500000000052700000000000030334 0ustar00zuulzuul00000000000000--- features: - | PowerStore driver: Add OpenStack replication v2.1 support. deprecations: - | PowerStore driver: ``powerstore_appliances`` option is deprecated and will be removed in a future release. Driver does not use this option to determine which appliances to use. PowerStore uses its own load balancer instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-provisioning-improvements-bb7e28896e2a2539.yaml0000664000175000017500000000027100000000000027620 0ustar00zuulzuul00000000000000--- features: - Cinder now supports the use of 'max_over_subscription_ratio = auto' which automatically calculates the value for max_over_subscription_ratio in the scheduler. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-remove-netapp-7mode-drivers-c38398e54662f2d4.yaml0000664000175000017500000000106000000000000027532 0ustar00zuulzuul00000000000000--- upgrade: - Support for NetApp ONTAP 7 (previously known as "Data ONTAP operating in 7mode") has been removed. The NetApp Unified driver can now only be used with NetApp Clustered Data ONTAP and NetApp E-Series storage systems. This removal affects all three storage protocols that were supported on for ONTAP 7 - iSCSI, NFS and FC. Deployers are advised to consult the `migration support `_ provided to transition from ONTAP 7 to Clustered Data ONTAP operating system. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-toyou-acs5000-driver-16449ca18280def3.yaml0000664000175000017500000000014700000000000026054 0ustar00zuulzuul00000000000000--- features: - | New Cinder volume driver for TOYOU ACS5000. The new driver supports iSCSI. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-vmware-fcd-fbe19ee577d2e9e4.yaml0000664000175000017500000000013300000000000024621 0ustar00zuulzuul00000000000000--- features: - | Added backend driver for VMware VStorageObject (First Class Disk). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-yadro-tatlin-unified-driver-122218f077d70312.yaml0000664000175000017500000000013100000000000027411 0ustar00zuulzuul00000000000000--- features: - | Yadro Tatlin Unified: Added initial version of the iSCSI driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bp-yadro-tatlin-unified-fc-b6e1225ad99c6304.yaml0000664000175000017500000000012600000000000026743 0ustar00zuulzuul00000000000000--- features: - | Yadro Tatlin Unified: Added initial version of the FC driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/brcd_lookupservice_http_support-f6485b38a1feaa15.yaml0000664000175000017500000000020400000000000030603 0ustar00zuulzuul00000000000000--- features: - Support for use of ``fc_southbound_protocol`` configuration setting in the Brocade FC SAN lookup service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/brocade_http_connector-0021e41dfa56e671.yaml0000664000175000017500000000063100000000000026427 0ustar00zuulzuul00000000000000--- features: - HTTP connector for the Cinder Brocade FC Zone plugin. This connector allows for communication between the Brocade FC zone plugin and the switch to be over HTTP or HTTPs. To make use of this connector, the user would add a configuration setting in the fabric block for a Brocade switch with the name as 'fc_southbound_protocol' with a value as 'HTTP' or 'HTTPS'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/brocade_looup_fail_get_client-179151d449a34aa4.yaml0000664000175000017500000000033700000000000027654 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1888550 `_: Fix `UnboundLocalError` on the Brocade lookup driver on southbound client creation failure during the device mapping retrieval. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/brocade_py3-15647dbe3981d44b.yaml0000664000175000017500000000022400000000000024126 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1888548 `_: Add Python 3 support to the Brocade Zone Manager driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/brocade_rest_client-202cfd474c96d3fe.yaml0000664000175000017500000000041300000000000026065 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1866860 `_: Fix `AttributeError` on the Brocade ZM driver when using setting REST_HTTP or REST_HTTPS as the fc_southbound_protocol option and an exception is raised by the client. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/brocade_virtual_fabrics_support-d2d0b95b19457c1d.yaml0000664000175000017500000000042500000000000030442 0ustar00zuulzuul00000000000000--- features: - Support for configuring Fibre Channel zoning on Brocade switches through Cinder Fibre Channel Zone Manager and Brocade Fibre Channel zone plugin. To zone in a Virtual Fabric, set the configuration option 'fc_virtual_fabric_id' for the fabric. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1518213-a5bf2ea0d008f329.yaml0000664000175000017500000000013100000000000023451 0ustar00zuulzuul00000000000000--- features: - Added Keystone v3 support for Swift backup driver in single user mode. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1560649-d4f3ff71fe4ddb89.yaml0000664000175000017500000000031400000000000023657 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC Scale IO Driver: Fixes `bug 1560649 ` for creating volumes with sizes greater than that of the original snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1560867-support-nova-specific-image-7yt6fd1173c4e3wd.yaml0000664000175000017500000000024300000000000031173 0ustar00zuulzuul00000000000000--- fixes: - Fix the bug that Cinder can't support creating volume from Nova specific image which only includes ``snapshot-id`` metadata (Bug #1560867). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1570845-efdb0206718f4ca4.yaml0000664000175000017500000000020100000000000023465 0ustar00zuulzuul00000000000000--- upgrade: - The ``backup_service_inithost_offload`` configuration option now defaults to ``True`` instead of ``False``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1587376-fix-manage-resource-quota-issue-78f59f39b9fa4762.yaml0000664000175000017500000000022600000000000031516 0ustar00zuulzuul00000000000000--- fixes: - Fix the bug that Cinder would commit quota twice in a clean environment when managing volume and snapshot resource (Bug #1587376). ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/releasenotes/notes/bug-1612763-report-multiattach-enabled-NetApp-backends-0fbf2cb621e4747d.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1612763-report-multiattach-enabled-NetApp-backends-0fbf2cb621e40000664000175000017500000000030200000000000032271 0ustar00zuulzuul00000000000000--- fixes: - Volumes created on NetApp cDOT and 7mode storage systems now report 'multiattach' capability. They have always supported such a capability, but not reported it to Cinder. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1614095-add-user_id-to-snapshot_show-4884fab825983c3a.yaml0000664000175000017500000000012300000000000031037 0ustar00zuulzuul00000000000000--- features: - Add ``user_id`` field to snapshot list/detail and snapshot show. ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=cinder-27.0.0/releasenotes/notes/bug-1615451-NetApp-cDOT-fix-reporting-replication-capability-dca29f39b9fa7651.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1615451-NetApp-cDOT-fix-reporting-replication-capability-dca29f0000664000175000017500000000031300000000000032474 0ustar00zuulzuul00000000000000--- fixes: - NetApp cDOT block and file drivers now report replication capability at the pool level; and are hence compatible with using the ``replication_enabled`` extra-spec in volume types. ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=cinder-27.0.0/releasenotes/notes/bug-1622057-netapp-cdot-fix-replication-status-cheesecake-volumes-804dc8b0b1380e6b.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1622057-netapp-cdot-fix-replication-status-cheesecake-volumes-80000664000175000017500000000027200000000000033000 0ustar00zuulzuul00000000000000--- fixes: - The NetApp cDOT driver now sets the ``replication_status`` attribute appropriately on volumes created within replicated backends when using host level replication.././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=cinder-27.0.0/releasenotes/notes/bug-1632333-netapp-ontap-copyoffload-downloads-glance-image-twice-08801d8c7b9eed2c.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1632333-netapp-ontap-copyoffload-downloads-glance-image-twice-00000664000175000017500000000024000000000000032702 0ustar00zuulzuul00000000000000--- fixes: - | Fixed bug 1632333 with the NetApp ONTAP Driver. Now the copy offload method is invoked early to avoid downloading Glance images twice. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/releasenotes/notes/bug-1634203-netapp-cdot-fix-clone-from-nfs-image-cache-2218fb402783bc20.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1634203-netapp-cdot-fix-clone-from-nfs-image-cache-2218fb4027830000664000175000017500000000016500000000000031570 0ustar00zuulzuul00000000000000--- fixes: - Fixed an issue where the NetApp cDOT NFS driver failed to clone new volumes from the image cache. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1660927-netapp-no-copyoffload-77fc3cf4f2cf2335.yaml0000664000175000017500000000017300000000000027712 0ustar00zuulzuul00000000000000--- fixes: - | Fixed misleading error message when NetApp copyoffload tool is not in place during image cloning. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1667071-dc6407f40a1f7d15.yaml0000664000175000017500000000135700000000000023420 0ustar00zuulzuul00000000000000--- fixes: - Modifying the extra-specs of an in use Volume Type was something that we've unintentionally allowed. The result is unexpected or unknown volume behaviors in cases where a type was modified while a volume was assigned that type. This has been particularly annoying for folks that have assigned the volume-type to a different/new backend device. In case there are customers using this "bug" we add a config option to retain the bad behavior "allow_inuse_volume_type_modification", with a default setting of False (Don't allow). Note this config option is being introduced as deprecated and will be removed in a future release. It's being provided as a bridge to not break upgrades without notice. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1670260-fix-boolean-is_public-d16e1957c0f09d65.yaml0000664000175000017500000000053100000000000027507 0ustar00zuulzuul00000000000000--- fixes: - | Fixed issue where ``create`` and ``update`` api's of ``volume-type`` and ``group_type`` were returning 500 error if boolean 'is_public' value passed in the form of string. Now user can pass following valid boolean values to these api's: '0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', 'y', 'yes'././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1671220-4d521be71d0b8aa4.yaml0000664000175000017500000000024600000000000023453 0ustar00zuulzuul00000000000000--- fixes: - | Fixed consistency groups API which was always returning groups scoped to project ID from user context instead of given input project ID. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1686745-e8f1569455f998ba.yaml0000664000175000017500000000012500000000000023377 0ustar00zuulzuul00000000000000--- features: - | Add support to force detach a volume from all hosts on 3PAR. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1690954-40fc21683977e996.yaml0000664000175000017500000000021400000000000023227 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP NFS (bug 1690954): Fix wrong usage of export path as volume name when deleting volumes and snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1691771-fix-netapp-manage-volumes-62bec192a08b3ceb.yaml0000664000175000017500000000026600000000000030544 0ustar00zuulzuul00000000000000--- fixes: - The NetApp cDOT driver operating with NFS protocol has been fixed to manage volumes correctly when ``nas_secure_file_operations`` option has been set to False.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1693084-fix-az-cache-invalid-6td4q74q28uxcd68.yaml0000664000175000017500000000020500000000000027573 0ustar00zuulzuul00000000000000--- fixes: - | Now cinder will refresh the az cache immediately if previous create volume task failed due to az not found. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1699936-fix-host-show-incorrect-fg8698gu7y6r7d15.yaml0000664000175000017500000000013700000000000030343 0ustar00zuulzuul00000000000000--- fixes: - | Now the ``os-host show`` API will count project's resource correctly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1703405-53f09205024f2095.yaml0000664000175000017500000000041600000000000023102 0ustar00zuulzuul00000000000000--- fixes: - | Fixes a bug that prevented creation of Quobyte volumes from snapshots during snapshot backups. This now allows backing up volumes with existing snapshots. Partially fixes `bug 1703405 `_ . ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1705375-prohibit-group-deletion-if-groupsnapshot-exists.yaml0000664000175000017500000000011000000000000032566 0ustar00zuulzuul00000000000000--- fixes: - Prohibit the deletion of group if group snapshot exists. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1706888-update-backend-when-extending-3e4a9831a0w29d68.yaml0000664000175000017500000000010700000000000031165 0ustar00zuulzuul00000000000000--- fixes: - Update backend state in scheduler when extending volume.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1712651-7bc90264eb5001ea.yaml0000664000175000017500000000030400000000000023373 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP iSCSI (bug 1712651): Fix ONTAP NetApp iSCSI driver not raising a proper exception when trying to extend an attached volume beyond its max geometry. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/releasenotes/notes/bug-1714209-netapp-ontap-drivers-oversubscription-issue-c4655b9c4858d7c6.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1714209-netapp-ontap-drivers-oversubscription-issue-c4655b9c4850000664000175000017500000000150100000000000032461 0ustar00zuulzuul00000000000000--- fixes: - The ONTAP drivers ("7mode" and "cmode") have been fixed to not report consumed space as "provisioned_capacity_gb". They instead rely on the cinder scheduler's calculation of "provisioned_capacity_gb". This fixes the oversubscription miscalculations with the ONTAP drivers. This bugfix affects all three protocols supported by these drivers (iSCSI/FC/NFS). upgrade: - If using the NetApp ONTAP drivers (7mode/cmode), the configuration value for "max_over_subscription_ratio" may need to be increased to avoid scheduling problems where storage pools that previously were valid to schedule new volumes suddenly appear to be out of space to the Cinder scheduler. See documentation `here `_. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=cinder-27.0.0/releasenotes/notes/bug-1718739-netapp-eseries-fix-provisioned-capacity-report-8c51fd1173c15dbf.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1718739-netapp-eseries-fix-provisioned-capacity-report-8c51fd110000664000175000017500000000045300000000000032556 0ustar00zuulzuul00000000000000--- fixes: - The NetApp E-series driver has been fixed to correctly report the "provisioned_capacity_gb". Now it sums the capacity of all the volumes in the configured backend to get the correct value. This bug fix affects all the protocols supported by the driver (FC and iSCSI). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1723226-allow-purging-0day-4de8979db7215cf3.yaml0000664000175000017500000000037600000000000027057 0ustar00zuulzuul00000000000000--- fixes: - Added ability to purge records less than 1 day old, using the cinder-manage db_purge utility. This helps especially for those testing scenarios in which a a large number of volumes are created and deleted. (bug #1723226) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1730933-1bb0272e3c51eed3.yaml0000664000175000017500000000017200000000000023457 0ustar00zuulzuul00000000000000--- features: - | The Quobyte Cinder driver now supports identifying Quobyte mounts via the mounts fstype field.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1735337-remove-skip-quota-validation-flag-2ecb24143f1f1292.yaml0000664000175000017500000000023200000000000031747 0ustar00zuulzuul00000000000000--- fixes: - Quota validations are now forced for all APIs. skip_validation flag is now removed from the request body for the quota-set update API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1762424-f76af2f37fe408f1.yaml0000664000175000017500000000025200000000000023506 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP (bug 1762424): Fix ONTAP NetApp driver not being able to extend a volume to a size greater than the corresponding LUN max geometry. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1765182-34fdc4bb8482f8a5.yaml0000664000175000017500000000026600000000000023513 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP (bug 1765182): Make ONTAP NetApp iSCSI driver and FC driver report to the Cinder scheduler that they don't support online volume extending. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1765182-bcafd577f4b81eb6.yaml0000664000175000017500000000021600000000000023645 0ustar00zuulzuul00000000000000--- fixes: - | Make Cinder scheduler check if backend reports `online_extend_support` before performing an online extend operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1765182-de132ba52167800b.yaml0000664000175000017500000000024200000000000023320 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP (bug 1765182): Make ONTAP NetApp NFS driver report to the Cinder scheduler that it doesn't support online volume extending. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=cinder-27.0.0/releasenotes/notes/bug-1765610-qnap-fix-volume-snapshot-create-fail-2bb785eafdb87fb6.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1765610-qnap-fix-volume-snapshot-create-fail-2bb785eafdb87fb6.y0000664000175000017500000000014100000000000032205 0ustar00zuulzuul00000000000000--- fixes: - | Fixed QNAP driver failures to create volume and snapshot in some cases. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/bug-1766768-qnap-fix-upload-volume-detach-fail-33cbee59f1381bda.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1766768-qnap-fix-upload-volume-detach-fail-33cbee59f1381bda.yam0000664000175000017500000000015400000000000032064 0ustar00zuulzuul00000000000000--- fixes: - | Fixed QNAP driver failures to detach iscsi device while uploading volume to image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1773446-984d76ed29445c9b.yaml0000664000175000017500000000022000000000000023362 0ustar00zuulzuul00000000000000--- fixes: - | Fixed group availability zone-backend host mismatch [`Bug 1773446 `_]. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/bug-1773725-xtremio-remove-provisioning-factor-y7r5uy3489yd9pbf.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1773725-xtremio-remove-provisioning-factor-y7r5uy3489yd9pbf.yam0000664000175000017500000000014600000000000032643 0ustar00zuulzuul00000000000000--- fixes: The XtremIO driver has been fixed to correctly report the "free_capacity_gb" size. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1782588-7e058b379da95309.yaml0000664000175000017500000000021600000000000023306 0ustar00zuulzuul00000000000000--- fixes: - | Solidfire fix extend volume with qos-Scaling to honor the increased size with increased iops on the extended volume. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/releasenotes/notes/bug-1783790-multiattach-none-when-manage-volume-yu7du8yth78i0e6b.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1783790-multiattach-none-when-manage-volume-yu7du8yth78i0e6b.ya0000664000175000017500000000015000000000000032515 0ustar00zuulzuul00000000000000--- fixes: - Now cinder will keep track of 'multiattach' attribute when managing backend volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1784871-7f67402eb13abca7.yaml0000664000175000017500000000050600000000000023501 0ustar00zuulzuul00000000000000security: - | Removed the ability to create volumes in a ScaleIO Storage Pool that has zero-padding disabled. A new configuration option had been added to override this new behavior and allow volume creation, but should not be enabled if multiple tenants will utilize volumes from a shared Storage Pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1790141-vmax-powermaxos-upgrade-fix-4c76186cfca66790.yaml0000664000175000017500000000070300000000000030732 0ustar00zuulzuul00000000000000--- fixes: - PowerMax driver - Workload support was dropped in ucode 5978. If a VMAX All Flash array is upgraded to 5978 or greater and existing volume types leveraged workload e.g. DSS, DSS_REP, OLTP and OLTP_REP, certain operations will no longer work and the volume type will be unusable. This fix addresses these issues and fixes problems with using old volume types with workloads included in the volume type pool_name. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=cinder-27.0.0/releasenotes/notes/bug-1799221-fix-truncated-volumes-in-case-of-glance-errors-6cae19218249c3cf.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1799221-fix-truncated-volumes-in-case-of-glance-errors-6cae19210000664000175000017500000000031400000000000032316 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a bug which could create volumes with invalid content in case of unhandled errors from glance client (Bug `#1799221 `_). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1805550-default-policy-file-db15eaa76fefa115.yaml0000664000175000017500000000475000000000000027466 0ustar00zuulzuul00000000000000--- upgrade: - | Beginning with Cinder version 12.0.0, as part of the Queens release "policies in code" community effort, Cinder has had the ability to run without a policy file because sensible default values are specified in the code. Customizing the policies in effect at your site, however, still requires a policy file. The default location of this file has been ``/etc/cinder/policy.json`` (although the documentation has indicated otherwise). With this release, the default location of this file is changed to ``/etc/cinder/policy.yaml``. Some points to keep in mind: - The policy file to be used may be specified in the ``/etc/cinder/cinder.conf`` file in the ``[oslo_policy]`` section as the value of the ``policy_file`` configuration option. That way there's no question what file is being used. - To find out what policies are available and what their default values are, you can generate a sample policy file. To do this, you must have a local copy of the Cinder source code repository. From the top level directory, run the command:: tox -e genpolicy This will generate a file named ``policy.yaml`` in the ``etc/cinder`` directory of your checked-out Cinder repository. - The sample file is YAML (because unlike JSON, YAML allows comments). If you prefer, you may use a JSON policy file. - Beginning with Cinder 12.0.0, you only need to specify policies in your policy file that you want to **differ** from the default values. Unspecified policies will use the default values *defined in the code*. Given that a default value *must* be specified *in the code* when a new policy is introduced, the ``default`` policy, which was formerly used as a catch-all for policy targets that were not defined elsewhere in the policy file, has no effect. We mention this because an old upgrade strategy was to use the policy file from the previous release with ``"default": "role:admin"`` (or ``"default": "!"``) so that newly introduced actions would be blocked from end users until the operator had time to assess the implications of exposing these actions. This strategy no longer works. Hopefully this isn't a problem because we're defining sensible defaults in the code. It would be a good idea, however, to generate the sample policy file with each release (see instructions above) to verify this for yourself. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1809323-fix-invalid-backup-4a341dc362ded88e.yaml0000664000175000017500000000032000000000000027144 0ustar00zuulzuul00000000000000--- fixes: - | Now cinder will be rollback the ``quota_usages`` table when failed to create an incremental backup if there doesn't exist a parent backup or the backup is not in available state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1812685-powermax-replication-specs-fix-aa6b13b93b4059d6.yaml0000664000175000017500000000104200000000000031457 0ustar00zuulzuul00000000000000--- fixes: - When using a PowerMax OS array as a replication target, where the source is an All-Flash/Hybrid array running HyperMax OS, service level and workload settings are not correctly applied for devices on the replication target if a workload is specified. Instead of setting only the workload to None, both service level and workload are set to None. This fix corrects the application of service level and workload settings for replication sessions where the source is HyperMax OS and the target is PowerMax OS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1813851-60a4f0ffe386d9b6.yaml0000664000175000017500000000023000000000000023501 0ustar00zuulzuul00000000000000--- upgrade: - | Added config option ``backup_mount_attempts`` to specify the number of attempts to mount NFS share in the NFS backup driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1823200-victoria-ecd2d99c9223d84b.yaml0000664000175000017500000000215400000000000025314 0ustar00zuulzuul00000000000000--- upgrade: - | The fix for `Bug #1823200 `_ requires ``os-brick`` version 3.1.0 or greater. security: - | Dell EMC VxFlex OS driver: This release contains a fix for `Bug #1823200 `_. See `OSSN-0086 `_ for details. fixes: - | `Bug #1823200 `_: This release contains an updated Dell EMC VxFlex OS driver. It must be used with ``os-brick`` version 3.1.0 or greater and requires that a new configuration file be deployed on compute nodes, cinder nodes, and anywhere you would perform a volume attachment in your deployment. See the `Dell EMC VxFlex OS (ScaleIO) Storage driver `_ documentation for details about the configuration file, and see `OSSN-0086 `_ for more information about the security vulnerability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1823445-c47c25870a98335a.yaml0000664000175000017500000000105500000000000023264 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the volume property `signature_verified` propagating to images created from volumes. That property could later conflict with the same property being added again when creating a new volume from such image, preventing the volume from being created successfully. This volume property is created whenever a volume is created from an image for the purpose of indicating that the image signature was verified on creation, and was not intended to be propagated further if a new image is created from such volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1828386-fix-retype-rbd-backend.yaml0000664000175000017500000000040400000000000025434 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1828386 `_: Fix the bug that a volume retyped from another volume type to a replicated or multiattach type cannot have replication or multiattach enabled in rbd driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1833115-fix-netapp-ontap-python3-failures-dd869e602f9539e1.yaml0000664000175000017500000000022000000000000031747 0ustar00zuulzuul00000000000000--- fixes: - | Fix python 3 incompatibility issues preventing NetApp cDOT driver from generating EMS logging messages (Bug #1833115). ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/releasenotes/notes/bug-1837524-strowize-create_consistency_group_failures-bb2a976dfe9454a4.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1837524-strowize-create_consistency_group_failures-bb2a976dfe940000664000175000017500000000032500000000000033025 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1837524 `_: IBM Spectrum Virtualize Family: Fixed create_consistency_group if the volume has mirror copy and mdisk_grp_name=many. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=cinder-27.0.0/releasenotes/notes/bug-1859652-netapp-fix-retype-attached-volume-to-solidfire-1933f03673ff078d.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1859652-netapp-fix-retype-attached-volume-to-solidfire-1933f0360000664000175000017500000000022200000000000032276 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1859652 `_: Fix to allow retyping an attached volume to SolidFire. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=cinder-27.0.0/releasenotes/notes/bug-1859653-solidfire-fix-failover-after-service-restart-77e5e4da45c9c1aa.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1859653-solidfire-fix-failover-after-service-restart-77e5e4da450000664000175000017500000000032400000000000032433 0ustar00zuulzuul00000000000000--- fixes: - | NetApp SolidFire driver: Fixed an issue that causes failback to fail after a volume service restart. This change fixes bug `1859653 `_.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1860100-8c542363def7d408.yaml0000664000175000017500000000040100000000000023326 0ustar00zuulzuul00000000000000--- fixes: - | The volume-transfers list calls (``GET /v3/{project_id}/volume-transfers``, ``GET /v3/{project_id}/volume-transfers/detail``) were not recognizing ``name`` as a filterable attribute. That has been fixed in the current release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1869746-cross-project-incremental-backup-error.yaml0000664000175000017500000000032200000000000030700 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1869746 `_: Cinder no longer allows an incremental backup to be created while having the parent backup in another project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1870103-013e314e9a5b8e08.yaml0000664000175000017500000000031200000000000023317 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage driver `bug 1870103 `_: Ensure that unmanaged volumes do not exceed maximum character length on FlashArray. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1871524-5f6df9a61bf6b775.yaml0000664000175000017500000000022100000000000023511 0ustar00zuulzuul00000000000000--- fixes: - | Fix volume migration fails in the same ceph RBD pool. `Bug 1871524 `__. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1874134-netapp-ONTAP-fix-max-resize-size-ad2d88da8721560e.yaml0000664000175000017500000000041200000000000031400 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1874134 `_: Fix for NetApp ONTAP driver allowing an iSCSI or FCP volume to be extended to a size up to 16TB regardless of its original size, even if it's attached to an instance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1874541-netapp-fix-update-cluster-status-8331655904fb4fed.yaml0000664000175000017500000000026700000000000031706 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1874541 `_: Fix a ZeroDivisionError when the SolidFire driver tried to update cluster capabilities. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1875570-nfs-image-volume-cache-c45e840a6ec2a702.yaml0000664000175000017500000000033500000000000027627 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1875570 `_: Fixed issue with NFS backend where the image-volume cache was never used to create a volume, even when the cache was enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1879578-volume_type-regression-de82f4152c7b2f77.yaml0000664000175000017500000000320500000000000030173 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1879578 `_: A regression in the Train release caused Cinder to assign the default volume type too aggressively when a volume type was not specified in a volume-create request. As a result, some alternative methods of specifying the volume type were ignored and the default type (either configured by the operator or the system default) would be assigned. This release restores the intended behavior, which is described as follows: If a ``volume_type`` is not specified when a volume is created, Cinder tries to infer the volume type from other information in the volume-create request: * if a ``source_volid`` is supplied in the request, the volume type is inferred from the source volume's volume type * if a ``snapshot_id`` is supplied in the request, the volume type is inferred from the volume type associated with the snapshot * if an ``imageRef`` is supplied in the request, and the image has a ``cinder_img_volume_type`` image property, the volume type is inferred from the value of that image property Otherwise, the volume type is the default volume type configured by the operator, and if no volume type is so configured, the volume type is the system default volume type, namely, ``__DEFAULT__``. When a volume type is specified explicitly in a volume-create call, Cinder will use the specified type. If the specified type cannot be assigned due to a conflict with other parameters in the volume-create call, however, the call will result in a 400 (Bad Request) response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1884030-ds8k_support_volume_name_template-91e1b70ece172ef8.yaml0000664000175000017500000000025000000000000032420 0ustar00zuulzuul00000000000000--- fixes: - | IBM DS8000 Driver `Bug #1884030 `_: Support for volume_name_template configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1887859-backup-manager-fb8dbf289eedc4b0.yaml0000664000175000017500000000026400000000000026624 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1887859 `_: Fix for a race in Cinder Backup Manager with double initialization of backup driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1887885-nec-fix-snapshot-detach-error-fff3012e0e9a2d2b.yaml0000664000175000017500000000021000000000000031320 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1887885 `_: In NEC driver, fix a snapshot detach error. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1887908-nec-live-migration-failure-withfc-3128fff7c48e739f.yaml0000664000175000017500000000022100000000000032051 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1887908 `_: In NEC driver, fix live-migration failure with FC. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1887962-643379faf20f01cf.yaml0000664000175000017500000000042700000000000023436 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1887962 `_: PowerMax driver fix to rectify incorrectly deleted non-temporary snapshots when calling do_sync_check used in multiple operations due to missing check for temporary snapshot name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml0000664000175000017500000000025300000000000030113 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1888951 `_: Fixed an issue with creating a backup from snapshot with NFS volume driver. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/bug-1890241-strowize-delete_group_snapshot_fix-2e491e74e1f73ba7.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1890241-strowize-delete_group_snapshot_fix-2e491e74e1f73ba7.yam0000664000175000017500000000050400000000000032371 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1890241 `_: During delete_group_snapshot on IBM storwize, in case of multiple snapshots in the group, delete flow exits if any one snapshot deletion fails, but it should update error state and continue with deleting other snapshots. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/releasenotes/notes/bug-1890254-clone-fcmap-is-not-deleting-in-cleanup-f5bbb467be1b889d.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1890254-clone-fcmap-is-not-deleting-in-cleanup-f5bbb467be1b889d0000664000175000017500000000046200000000000032045 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize driver `Bug #1890254 `_: Fix check_vdisk_fc_mappings is not deleting all flashcopy mappings while deleting source volume, when multiple clones and snapshots are created using common source volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1890586-storwize-check_flashcopy_rate-fix-571e6e182b604725.yaml0000664000175000017500000000035400000000000032022 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1890586 `_: IBM Storwize: Fixed issues in check_flashcopy_rate that impacts the performance during Group Snapshot/Clone operations for bulk volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1890588-storwize-select_io_group-fix-7200f2e00140ab34.yaml0000664000175000017500000000040400000000000031065 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1890588 `_: IBM Storwize: Fixed issues in select_io_group that impacts the performance during Create_volume, Group Snapshot/Clone operations for bulk non-hyperswap volumes. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/bug-1890589-create_flashcopy_to_consistgrp_fix-9eeea4aaceb8a191.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1890589-create_flashcopy_to_consistgrp_fix-9eeea4aaceb8a191.yam0000664000175000017500000000041700000000000032712 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1890589 `_: IBM Spectrum Virtualize Family: Fixed issues in create_flashcopy_to_consistgrp, made use of iogrp,qos from opts for create_vdisk, mkfcmap calls if the data exists in opts. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/releasenotes/notes/bug-1890591-Pool-information-is-not-saved-in-stats-22f302d941cd9fe2.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1890591-Pool-information-is-not-saved-in-stats-22f302d941cd9fe20000664000175000017500000000035700000000000031767 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1890591 `_: IBM Spectrum Virtualize Family: Fixed issue in do_setup of StorwizeSVCCommonDriver to save pool information in stats during initialisation. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/releasenotes/notes/bug-1892034-Volume-name-is-not-validated-for-host-4ec0d1bd14281c77.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1892034-Volume-name-is-not-validated-for-host-4ec0d1bd14281c77.0000664000175000017500000000044700000000000031614 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1892034 `_: Fixed issue in get_host_from_connector that volume name is not validated to get the host during terminate connection when the volume name is passed as input. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/releasenotes/notes/bug-1894381-fix-cinder-manage-cluster-remove-raising-nosuchopterror.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1894381-fix-cinder-manage-cluster-remove-raising-nosuchopterror0000664000175000017500000000027600000000000033226 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1894381 `_: Fix the bug that cinder-manage cluster remove does not work and an error NoSuchOptError occurs.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1895035-rbd-restore-0cd94ccd467ae1e3.yaml0000664000175000017500000000024100000000000026003 0ustar00zuulzuul00000000000000--- fixes: - | Ceph backup driver `Bug #1895035 `_: Fixed restore full backups to non RBD volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1895510-REST-API-issue-to-get-bundle-198a3b89255759bb.yaml0000664000175000017500000000030500000000000030250 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1895510 `_: IBM DS8K: Fixed compatability issue when using the IBM DS8K driver with storage version R9 and later. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1896087-rollback-volume-status-bd04951f929bb88d.yaml0000664000175000017500000000032600000000000030063 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1896087 `_: Volume status will be rolled back to the previous state if backup creation fails when backup service is not available ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/releasenotes/notes/bug-1896214-Fix-in-change_vdisk_iogrp-during-retype-ef83ccf27d8829f5.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1896214-Fix-in-change_vdisk_iogrp-during-retype-ef83ccf27d8829f0000664000175000017500000000056500000000000032256 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family `Bug #1896214 `_: Fixed issues in change_vdisk_iogrp. During retyping a volume between I/O groups, if addvdiskaccess fails an exception is raised and if movevdisk fails rmvdiskaccess should be done for new I/O group before failing the retype operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1897598-powerflex-volume-type-conversion.yaml0000664000175000017500000000050700000000000027707 0ustar00zuulzuul00000000000000--- fixes: - | PowerFlex driver `bug #1897598 `_: Fixed bug with PowerFlex storage-assisted volume migration when volume migration was performed without conversion of volume type in cases where it should have been converted to/from thin/thick provisioned. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/releasenotes/notes/bug-1898746-ibm-svf-fix-host-failover-switch-relationship-9d3c58822a8c918c.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1898746-ibm-svf-fix-host-failover-switch-relationship-9d3c588220000664000175000017500000000034500000000000032336 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family `Bug #1898746 `_: Fixed issue regarding host-failover and group-failover which impacts storage back-end performance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1898918-b24a93d7d5aff238.yaml0000664000175000017500000000036500000000000023526 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver `Bug #1898918 `_: Fix thread block caused by the flatten operation during cloning a volume. Now the flatten operation is executed in a different thread. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1900979-powerstore-chap-support.yaml0000664000175000017500000000024100000000000026040 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1900979 `_: Fix bug with using PowerStore with enabled CHAP as a storage backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1900979-xtremio-ports-filtering-e68f90d47f17a7d9.yaml0000664000175000017500000000021600000000000030264 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1915800 `_: Add support for ports filtering in XtremIO driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1901241-361b1b361bfa5152.yaml0000664000175000017500000000042100000000000023301 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver `bug #1901241 `_: Fixed an issue where decreasing the ``rbd_max_clone_depth`` configuration option would prevent volumes that had already exceeded that depth from being cloned. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1903648-ds8k-ostype-compatability-support-a86f608d8c014a29.yaml0000664000175000017500000000025000000000000032103 0ustar00zuulzuul00000000000000--- fixes: - | IBM DS8000 Driver `Bug #1903648 `_: Fix os_type compatability and hostname template issue. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1904440-clone-rekey-fd57a2b5f6224e0f.yaml0000664000175000017500000000051200000000000025701 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1904440 `_: When an iSCSI/FC encrypted volume was cloned, the rekey operation would stamp the wrong encryption key on the newly cloned volume. This resulted in a volume that could not be attached. It does not present a security problem. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1904892-ipv6-nfs-manage-391118115dfaaf54.yaml0000664000175000017500000000036400000000000026234 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1904892 `_: Fix cinder manage operations for NFS backends using IPv6 addresses in the NFS server address. These were previously rejected by the Cinder API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1905564-e7dcf28fd734d3b2.yaml0000664000175000017500000000033400000000000023567 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax Driver `bug #1905564 `_: Fix Fix remote SRP not being assigned to volume's Host when performing retype during failover-promotion. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=cinder-27.0.0/releasenotes/notes/bug-1905988-ibm-svf-fix-volume-iops-throttling-issue-b2b89e31af5973b2.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1905988-ibm-svf-fix-volume-iops-throttling-issue-b2b89e31af59730000664000175000017500000000034000000000000032172 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family `Bug #1905988 `_: Fixed volume IOPS throttling issue with a new option to set volume IOPS based on volume size. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/releasenotes/notes/bug-1906528-ibm-svf-fix-host-failback-switch-relationship-b5b7320811688cda.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1906528-ibm-svf-fix-host-failback-switch-relationship-b5b7320810000664000175000017500000000035500000000000032232 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1906528 `_: IBM Spectrum Virtualize Family driver: Fixed issue regarding host-failback and group-failback which impacts storage back-end performance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1907964-9277e5ddec2abeda.yaml0000664000175000017500000000066500000000000023740 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver `bug #1907964 `_: Add support for fast-diff on backup images stored in Ceph. Provided fast-diff is supported by the backend it will automatically be enabled and used. With fast-diff enabled, the generation of diffs between images and snapshots as well as determining the actual data usage of a snapshot is speed up significantly.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml0000664000175000017500000000346700000000000023475 0ustar00zuulzuul00000000000000--- upgrade: - | This release contains a fix for `Bug #1908315 `_, which changes the default value of the policy governing the Block Storage API action `Reset group snapshot status `_ to make the action administrator-only. This policy was inadvertently changed to be admin-or-owner during the Queens development cycle. The policy is named ``group:reset_group_snapshot_status``. * If you have a custom value for this policy in your cinder policy configuration file, this change to the default value will not affect you. * If you have been aware of this regression and like the current (incorrect) behavior, you may add the following line to your cinder policy configuration file to restore that behavior:: "group:reset_group_snapshot_status": "rule:admin_or_owner" This setting is *not recommended* by the Cinder project team, as it may allow end users to put a group snapshot into an invalid status with indeterminate consequences. For more information about the cinder policy configuration file, see the `policy.yaml `_ section of the Cinder Configuration Guide. fixes: - | `Bug #1908315 `_: Corrected the default checkstring for the ``group:reset_group_snapshot_status`` policy to make it admin-only. This policy governs the Block Storage API action `Reset group snapshot status `_, which by default is supposed to be an adminstrator-only action. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1910767-00f20702f5fc96db.yaml0000664000175000017500000000044700000000000023421 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1910767 `_: Fixed the calculation of the allocated capacity for the volume manager. The fix takes into account all volumes that have a host setting, not just volumes with a status of 'in-use' or 'available'. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/releasenotes/notes/bug-1912451-ibm-svf-update-replication-properties-68c4f9ea56df212d.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1912451-ibm-svf-update-replication-properties-68c4f9ea56df212d.0000664000175000017500000000041400000000000032140 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1912451 `_: IBM Spectrum Virtualize Family driver: Updated replication properties for HyperSwap volumes and volumes with replication enabled that were missing from volume metadata. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/releasenotes/notes/bug-1912564-strowize-hyperswap-volume-is-not-deleting-a94291248f8f59cd.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1912564-strowize-hyperswap-volume-is-not-deleting-a94291248f8f50000664000175000017500000000025400000000000032231 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1912564 `_: Fixed HyperSwap volume deletion issue. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1912624-bakup-a-z-regression-452f4bc9dfd41871.yaml0000664000175000017500000000042500000000000027366 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1912624 `_: Corrected regression introduced by the refactoring of the backup service in the ussuri release, which prevented the creation of a volume backup in a different availability zone. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=cinder-27.0.0/releasenotes/notes/bug-1913363-ibm-svf_Fix_multiple_lshost_calls_during_attach-528f92b44a0ff6b8.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1913363-ibm-svf_Fix_multiple_lshost_calls_during_attach-528f92b0000664000175000017500000000044700000000000032773 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1913363 `_: Fixed issue in get_host_from_connector by caching the host information during attach or detach operations and using host details from cached information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1913449-4796b366ae7e871b.yaml0000664000175000017500000000042500000000000023362 0ustar00zuulzuul00000000000000--- fixes: - | `Bug 1913449 `_: Fix RBD driver _update_volume_stats() failing when using Ceph Pacific python rados libraries. This failed because we were passing a str instead of bytes to cluster.mon_command() ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=cinder-27.0.0/releasenotes/notes/bug-1914639-fix-chap-auth-issue-in-netapp-driver-e92eaa431d6fcbac.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1914639-fix-chap-auth-issue-in-netapp-driver-e92eaa431d6fcbac.y0000664000175000017500000000037200000000000032170 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a CHAP authentication issue while trying to attach an iSCSI volume using the NetApp ONTAP driver. Please refer to the `Launchpad bug #1914639 `_ for more details. ././@PaxHeader0000000000000000000000000000022200000000000011451 xustar0000000000000000124 path=cinder-27.0.0/releasenotes/notes/bug-1917605-ibm-svf_Bulk_create_Hyperswap_volume_is_failing-79a9ec2108612240.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1917605-ibm-svf_Bulk_create_Hyperswap_volume_is_failing-79a9ec20000664000175000017500000000036700000000000033007 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1917605 `_: Fixed issue in StorwizeSVCCommonDriver to save site and peer pool information in stats during initialization. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1918229-0aa9fd75c5e843d5.yaml0000664000175000017500000000051400000000000023511 0ustar00zuulzuul00000000000000--- fixes: - | Nimble driver `bug #1918229 `_: Corrected an issue where the Nimble storage driver was inaccurately determining that there was no free space left in the storage array. The driver now relies on the storage array to report the amount of free space. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1918889-xtremio-iscsi-ipv6-05c59b897da5c01b.yaml0000664000175000017500000000021100000000000027110 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1918889 `_: Add support for iSCSI IPv6 in XtremIO driver. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/releasenotes/notes/bug-1920099-ibm-svf-fix_extend_to_clone_rep_volumes-015e030332f2e714.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1920099-ibm-svf-fix_extend_to_clone_rep_volumes-015e030332f2e710000664000175000017500000000047500000000000032137 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1920099 `_: Fix issue where _check_delete_vdisk_fc_mappings was deleting flashcopy mappings during extend operation of a clone volume where its source volume already contained a snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1920237-backup-remove-export-race-941e2ab1f056e54c.yaml0000664000175000017500000000051100000000000030370 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1920237 `_: The backup manager calls volume remove_export() but does not wait for it to complete when detaching a volume after backup. This caused problems when a subsequent operation started on that volume before it had fully detached. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/bug-1920729-powerstore-iscsi-targets-filtering-9623ac03da5c6721.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1920729-powerstore-iscsi-targets-filtering-9623ac03da5c6721.yam0000664000175000017500000000041100000000000032120 0ustar00zuulzuul00000000000000--- fixes: - | PowerStore driver `Bug #1920729 `_: Fix iSCSI targets not being returned from the REST API call if targets are used for multiple purposes (iSCSI target, Replication target, etc.). ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=cinder-27.0.0/releasenotes/notes/bug-1920870-ibm-svf-fix-extend-issue-for-mirroring-volumes-31b1a9119c49e112.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1920870-ibm-svf-fix-extend-issue-for-mirroring-volumes-31b1a9110000664000175000017500000000040500000000000032321 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1920870 `_: Fixed extend issues for volumes with replication enabled by avoiding volume remote-copy relationship deletion and creation. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/releasenotes/notes/bug-1920890-ibm-svf-Retype-in-use-hyperswap-volume-95a6c033e493ee59.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1920890-ibm-svf-Retype-in-use-hyperswap-volume-95a6c033e493ee590000664000175000017500000000045300000000000031746 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1920890 `_: Fixed issue in retype_hyperswap_volume method to update site and iogrp information to the host during a retype from a non-HyperSwap volume to a HyperSwap volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1920912-add_volumes_to_clone_group_fix-1cc9668ea077831e.yaml0000664000175000017500000000040300000000000031605 0ustar00zuulzuul00000000000000fixes: - | IBM Spectrum Virtualize Family driver: `Bug #1920912 `_: Fixed rccg create issue while adding volumes to a group where the group is cloned from group snapshot or other source group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1922013-ibm-svf-fix_addvol_gmcv_grp-caa0bc2035747d99.yaml0000664000175000017500000000026300000000000030744 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1922013 `_: Fixed issues in adding volumes to GMCV group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1922255-dell-powervault-manage-volumes.rst0000664000175000017500000000030400000000000027105 0ustar00zuulzuul00000000000000--- fixes: - | DellEMC PowerVault ME Series FC/iSCSI driver `bug #1922255 `_: Implement missing support for 'cinder manageable-list'. ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=cinder-27.0.0/releasenotes/notes/bug-1922408-create-encryption-volume-from-snapshot-skip-resize-bb5d77c5e912b5c1.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1922408-create-encryption-volume-from-snapshot-skip-resize-bb5d0000664000175000017500000000024100000000000033045 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver `Bug #1922408 `_: Fixed create encrypted volume from encrypted snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1924568-ibm-svf-fix_drp_vol_create_issue-d1b75c4befb0e993.yaml0000664000175000017500000000032200000000000032173 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1924568 `_: Fixed issues that occurred while creating volume on data reduction pool. ././@PaxHeader0000000000000000000000000000022700000000000011456 xustar0000000000000000129 path=cinder-27.0.0/releasenotes/notes/bug-1924602-ibm-svf_Storwize_HyperSwap_snapshot_clone_is_failing-c144e6b99d56de64.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1924602-ibm-svf_Storwize_HyperSwap_snapshot_clone_is_failing-c10000664000175000017500000000035300000000000033273 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1924602 `_: Fixed issue to create snapshots, clones, group snapshots, and group clones for HyperSwap volumes. ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=cinder-27.0.0/releasenotes/notes/bug-1926286-ibm-svf-fix-volume-relationship-properties-fetch-issue-6b443f8521cbb15c.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1926286-ibm-svf-fix-volume-relationship-properties-fetch-issue-0000664000175000017500000000034200000000000033063 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1926286 `_: Fixed an issue while fetching relationship details of a volume with replication enabled. ././@PaxHeader0000000000000000000000000000021600000000000011454 xustar0000000000000000120 path=cinder-27.0.0/releasenotes/notes/bug-1926491-ibm-svf-update-rccg-info-for-mirror-volumes-67fbec05d803745d.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1926491-ibm-svf-update-rccg-info-for-mirror-volumes-67fbec05d800000664000175000017500000000041600000000000032253 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1926491 `_: Updating volume metadata with rccg properties for the volumes with replication enabled and added to a group or removed from a group. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/releasenotes/notes/bug-1929223-powerflex-connector-certificate-validation-707b4f9f2077d4bc.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1929223-powerflex-connector-certificate-validation-707b4f9f20770000664000175000017500000000024100000000000032335 0ustar00zuulzuul00000000000000fixes: - | `Bug #1929223 `_: Fixed HTTPS certificate validation was disabled in PowerFlex connector. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=cinder-27.0.0/releasenotes/notes/bug-1931968-ibm-svf-HyperSwap_volume_service_status_update-293dea5d0f750824.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1931968-ibm-svf-HyperSwap_volume_service_status_update-293dea5d0000664000175000017500000000043300000000000033025 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1931968 `_: Fixed issue in updating the replication status of HyperSwap volume service based on status of nodes during initialization and periodic calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1934168-a61c71869742867d.yaml0000664000175000017500000000055100000000000023224 0ustar00zuulzuul00000000000000--- other: - | The optional driver feature "Snapshot Attachment" has been removed from the `Cinder Driver Support Matrix `_. It is an enhancment used for backups, it is not exposed via the Block Storage API, and its presence in the Support Matrix was misleading. ././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=cinder-27.0.0/releasenotes/notes/bug-1935670-svc_update_rep_properties_for_empty_values_fix-a2faabbf2139195e.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1935670-svc_update_rep_properties_for_empty_values_fix-a2faabbf0000664000175000017500000000034600000000000033457 0ustar00zuulzuul00000000000000fixes: - | IBM Spectrum Virtualize Family driver: `Bug #1935670 `_: Fixed empty attribute values issue while updating volume metadata table for replicated volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1936848-6ecc78e0e970419a.yaml0000664000175000017500000000041600000000000023444 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver `bug #1936848 `_: Fixed Generic Volume Group error where the name has been changed in OpenStack and is not reflected on the corresponding storage group on the PowerMax. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-193688-bb045badcd5aecad.yaml0000664000175000017500000000126700000000000023772 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1935688 `_: Cinder only supports uploading a volume of an encrypted volume type as an image to the Image service in ``raw`` format using a ``bare`` container type. Previously, ``os-volume_upload_image`` action requests to the Block Storage API specifying different format option values were accepted, but would result in a later failure. This condition is now checked at the API layer, and ``os-volume_upload_image`` action requests on a volume of an encrypted type that specify unsupported values for ``disk_format`` or ``container_format`` now result in a 400 (Bad Request) response. ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/releasenotes/notes/bug-1938212-ibm-svf-fix-to-add-replication-support-for-V5000E-e88df9c8eb22c2a8.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1938212-ibm-svf-fix-to-add-replication-support-for-V5000E-e88df0000664000175000017500000000043400000000000032071 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver: `Bug #1938212 `_: Added replication license support for FlashSystem V5000E storage system. Removed support for IBM Storwize V3700 as it reached End Of Service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1939139-02ab552420813e70.yaml0000664000175000017500000000031300000000000023162 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver `bug #1939139 `_: Fix on create snapshot operation that exists when using PowerMax OS 5978.711 and later. ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=cinder-27.0.0/releasenotes/notes/bug-1939145-ibm-svf-fix-systemname-issue-with-multiple-partnership-c437ebbb511879b9.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1939145-ibm-svf-fix-systemname-issue-with-multiple-partnership-0000664000175000017500000000050100000000000033113 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1939145 `_: Updating create_relationship and create_rccg calls with the system_id in the place of system_name to fix the issues while creating a mirror volume or creating a consistency group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1939241-storpool-attach-encrypted-volumes-783c723683b8f9a9.yaml0000664000175000017500000000041600000000000032112 0ustar00zuulzuul00000000000000--- fixes: - | StorPool driver `bug #1939241 `_: Fixed the creation of encrypted StorPool volumes by dropping the needlessly and incompletely overridden `_attach_volume()` and `_detach_volume()` methods. ././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=cinder-27.0.0/releasenotes/notes/bug-1941694-svc_detach_second_instance_for_multi_attach_type_volume_fix-b9a882a7faa8eed6.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1941694-svc_detach_second_instance_for_multi_attach_type_volume0000664000175000017500000000032500000000000033623 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver: `Bug #1941694 `_: Fixed detaching volume from second instance for multi-attach type volumes. ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=cinder-27.0.0/releasenotes/notes/bug-1941815-RBD-call-trash-operation-when-plain-deletion-fails-50cef4a8a8010ba9.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1941815-RBD-call-trash-operation-when-plain-deletion-fails-50ce0000664000175000017500000000200200000000000032356 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver `bug #1941815 `_: Fixed deleting volumes with snapshots/volumes in the ceph trash space. upgrade: - | **RBD driver: Enable Ceph V2 Clone API and Ceph Trash auto purge** In light of the fix for RBD driver `bug #1941815 `_, we want to bring the following information to your attention. Using the v2 clone format for cloned volumes allows volumes with dependent images to be moved to the trash - where they remain until purged - and allow the RBD driver to postpone the deletion until the volume has no dependent images. Configuring the trash purge is recommended to avoid wasting space with these trashed volumes. Since the Ceph Octopus release, the trash can be configured to automatically purge on a defined schedule. See the ``rbd trash purge schedule`` commands in the `rbd manpage `_. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/bug-1942154-backup-availability-zone-object-fix-939f93fda2c539b8.yml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1942154-backup-availability-zone-object-fix-939f93fda2c539b8.ym0000664000175000017500000000063000000000000032035 0ustar00zuulzuul00000000000000--- fixes: - | Backup `Bug #1942154 `_: Fixed backup.availability_zone to be populated with availability zone of the service that is creating the backup, if it was not provided as argument when creating the backup. This indirectly fixes selecting the proper host when restoring the backup as the availability zone field is now populated. ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/releasenotes/notes/bug-1942210-show-msg-check-clone-v2-api-raise-attribute-error-40efd74bb92b9482.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1942210-show-msg-check-clone-v2-api-raise-attribute-error-40efd0000664000175000017500000000055300000000000032373 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver `bug #1942210 `_: When creating a volume from a snapshot, the operation could fail due to an uncaught exception being raised during a check to see if the backend Ceph installation supported the clone v2 API. The driver now handles this situation gracefully. ././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=cinder-27.0.0/releasenotes/notes/bug-1943682-ibm-svf-update-rccgname-to-metadata-for-clone-group-volumes-baa6bebcf8caacb8.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1943682-ibm-svf-update-rccgname-to-metadata-for-clone-group-vol0000664000175000017500000000041400000000000032653 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize family driver `Bug #1943682 `_: Updating rccg_name property to volume metadata for the resultant volumes of a clone_group from a source_group or a group_snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1944577-no-manage-to-encrypted-type-b5b5d7f8360f037f.yaml0000664000175000017500000000054300000000000030672 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1944577 `_: Managing a volume to an encrypted type was never a good idea because there was no way to specify an encryption key ID so that the volume could be used. Requests to manage a volume to an encrypted volume type now result in an invalid request response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1946483-cryptic-error-in-backup-fe03939577867e0a.yaml0000664000175000017500000000061700000000000027764 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1946483 `_: Fixed an issue where admin user fails to delete backup of an encrypted volume with an ``oslo_config.cfg.NoSuchOptError`` error in logs of cinder-backup service. With this fix cloud admin is able to delete backups of encrypted volumes created by other users if Barbican API policies allow it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1947518-rbd-open-readonly-ba523c4b0ddbba76.yaml0000664000175000017500000000053100000000000027144 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver `bug #1947518 `_: Corrected a regression caused by the fix for `Bug #1931004 `_ that was attempting to access the glance images RBD pool with write privileges when creating a volume from an image. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/releasenotes/notes/bug-1949061-ibm-svf-fix_retype_issue_of_mirror_volume-5f37c265bee89d97.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1949061-ibm-svf-fix_retype_issue_of_mirror_volume-5f37c265bee890000664000175000017500000000033600000000000032556 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1949061 `_: Fixed retype issue of mirror-volume to mirror-volume-type with different mirror pool ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/releasenotes/notes/bug-1951046-ds8k_fixed_detach_for_multi_attach_volumes-b86940efafa926f2.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1951046-ds8k_fixed_detach_for_multi_attach_volumes-b86940efafa90000664000175000017500000000036200000000000032662 0ustar00zuulzuul00000000000000--- fixes: - | IBM DS8000 Driver `Bug #1951046 `_: Fixed detach issue for multi-attach volumes. Detach the volume without deleting the host until attachment count is zero. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/releasenotes/notes/bug-1951250-storwize-fix-multiple-ssh-calls-for-retype-d3b56379b7d8b049.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1951250-storwize-fix-multiple-ssh-calls-for-retype-d3b56379b7d80000664000175000017500000000033100000000000032264 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver `Bug #1951250 `_: Reduce multiple lsiogrp, lsvdisk calls in Retype operaton to optimize the code. ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=cinder-27.0.0/releasenotes/notes/bug-1952805-cinder-schedules-incremental-backups-on-the-wrong-node-b20b0c137f33ef03.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1952805-cinder-schedules-incremental-backups-on-the-wrong-node-0000664000175000017500000000043600000000000032734 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1952805 `_: Fixed the cinder-backup posix driver's behavior with multiple backup hosts. Previously cinder-backup would frequently schedule incremental backups on the wrong host and immediately fail. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1953168-fix-capacity-filter-message-456dea41fa8a4a1b.yaml0000664000175000017500000000022700000000000031037 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1953168 `_: Fixed missing parameter in the capacity filter log message. ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/releasenotes/notes/bug-1953185-ibm-svf-RevertToSnapshot_for_rep_volumes_in_group-e3ff6c87edd4de39.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1953185-ibm-svf-RevertToSnapshot_for_rep_volumes_in_group-e3ff60000664000175000017500000000033200000000000033173 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize family driver `Bug #1953185 `_: Fixed revert to snapshot issue for replicated volumes which are a part of group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1955057-fix-get-ontap-version-4d9fa1f6c5d2eaf3.yaml0000664000175000017500000000035500000000000030013 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver `bug #1955057 `_: Fixed the function get_ontap_version on Cinder NetApp driver, now it returns a tuple of integers instead of a string. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1957073-0d1307a8637a62b7.yaml0000664000175000017500000000025100000000000023253 0ustar00zuulzuul00000000000000--- fixes: - | RBD Driver `bug #1957073 `_: Fixed snapshot deletion failure when its volume doesn't exist. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/releasenotes/notes/bug-1960314-ibm-svf-Resize_of_GMCV_volumes_in_group-f9a176153518204c.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1960314-ibm-svf-Resize_of_GMCV_volumes_in_group-f9a1761535182040000664000175000017500000000033200000000000031637 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize family driver `Bug #1960314 `_: Fixed resize issue for GMCV volumes which are a part of a consistency group(CG). ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=cinder-27.0.0/releasenotes/notes/bug-1960315-ibm-svf-delete_and_resize_volume_issue_in_reverse_replication-952164a73b336a6d.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1960315-ibm-svf-delete_and_resize_volume_issue_in_reverse_repli0000664000175000017500000000041400000000000033454 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize family driver `Bug #1960315 `_: Fixed delete and resize volume issues in during reverse replication and added support to extend the volume for failover scenarios. ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=cinder-27.0.0/releasenotes/notes/bug-1961548-ibm-svf-Fix_multiple_SVC_CLI_calls_for_create_volume_operation-338b009bca72ee60.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1961548-ibm-svf-Fix_multiple_SVC_CLI_calls_for_create_volume_op0000664000175000017500000000035200000000000033105 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver: `Bug #1961548 `_: Optimize lsvdisk and lssystem calls to reduce the computational time for creating GMCV volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1962824-ff0cac0d35021f84.yaml0000664000175000017500000000057200000000000023473 0ustar00zuulzuul00000000000000--- fixes: - | PowerStore driver `bug #1962824 `_: Fixed Cinder volume caching mechanism for the driver. Now the driver correctly raises ``exception.SnapshotLimitReached`` when maximum snapshots are created for a given volume and the volume cache is invalidated to allow a new row of fast volume clones. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1965847-fix-backup-import-3b3ccdf740a13cff.yaml0000664000175000017500000000037600000000000027207 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1965847 `_: Fixed issue where importing a backup record for a backup_id that currently existed had the unfortunate side effect of deleting the existing backup record. ././@PaxHeader0000000000000000000000000000024700000000000011460 xustar0000000000000000145 path=cinder-27.0.0/releasenotes/notes/bug-1966639-ibm-svf-resize_issue_in_reverse_replication_for_volume_part_of_group-59e3f5d652a4707c.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1966639-ibm-svf-resize_issue_in_reverse_replication_for_volume_0000664000175000017500000000036000000000000033532 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize family driver `Bug #1966639 `_: Fixed resize issue in reverse replication for the volumes which are a part of a consistency group(CG). ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=cinder-27.0.0/releasenotes/notes/bug-1968159-ibm-svf-Retype_failure_for_replication_volume-type-4e0671b299315f4b.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1968159-ibm-svf-Retype_failure_for_replication_volume-type-4e060000664000175000017500000000045000000000000033052 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver: `Bug #1968159 `_: Fix for retype failure for replicated volume-type. Controlling chfcmap call for rc_controlled fcmap for replication-type volumes during retype operation. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/releasenotes/notes/bug-1968170-add-parameters-used-in-reimage-volume-48d5b8008ec82ea6.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1968170-add-parameters-used-in-reimage-volume-48d5b8008ec82ea6.0000664000175000017500000000023400000000000031711 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1968170 `_: Fixed the message created when nova fails to reimage the volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1970768-temp-vol-delete-6586a13f08d7a5c1.yaml0000664000175000017500000000035600000000000026357 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1970768 `_: Fixed status of temporary volumes when creating backups and reverting to a snapshot, preventing accidental manual deletion of those resources. ././@PaxHeader0000000000000000000000000000024500000000000011456 xustar0000000000000000143 path=cinder-27.0.0/releasenotes/notes/bug-1976400-storwize-Fix_multiple_SVC_CLI_calls_for_rc-relationship_operations-24d15dfccc922cdd.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1976400-storwize-Fix_multiple_SVC_CLI_calls_for_rc-relationship0000664000175000017500000000036200000000000033175 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver: `Bug #1976400 `_: Optimize svcinfo CLI calls to reduce the computational time for rc-relationship related operations. ././@PaxHeader0000000000000000000000000000025100000000000011453 xustar0000000000000000147 path=cinder-27.0.0/releasenotes/notes/bug-1976499-storwize-lsfcportsetmember_is_being_called_in_the_wrong_SVC_code_level-db06c4eca902f389.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1976499-storwize-lsfcportsetmember_is_being_called_in_the_wrong0000664000175000017500000000030500000000000033607 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver: `Bug #1976499 `_: Setting correct SVC Code level for lsfcportsetmember call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1978020-glance-upload-uri-8fbc70c442ac620c.yaml0000664000175000017500000000111200000000000026765 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1978020 `_: Fixed uploading a volume to a Cinder-backed Glance image; if a store name is set in the volume type's extra specs, it must also be sent to Glance as part of the new image location URI. Please note that while the `image_service:store_id` extra spec is validated when it is set for the volume type, it is not validated later; it is the operator's responsibility to make sure that the Glance store is not renamed or removed or that the volume types are updated accordingly. ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=cinder-27.0.0/releasenotes/notes/bug-1978290-ibm-svf-optimize_SSH_calls_in_creation_of_replicated_volumes-8fad7f54a4d3e73a.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1978290-ibm-svf-optimize_SSH_calls_in_creation_of_replicated_vo0000664000175000017500000000035300000000000033302 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize family driver `Bug #1978290 `_: Optimize lsmdiskgrp SSH calls in creation of replicated volumes to reduce the computational time. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1978729-cinder-backup-4cd87c4d71b7713e.yaml0000664000175000017500000000054300000000000026153 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1978729 `_: Fixed context.message_action is None on errors by backup drivers. The message_* properties of the context were not passed during rpc, which caused a double exception when a backup driver raised an exception, masking the actual backup driver exception. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1981354-infinidat-iscsi-fix-multipath-3f8a0be5f541c66e.yaml0000664000175000017500000000033100000000000031345 0ustar00zuulzuul00000000000000--- fixes: - | Infinidat Driver `bug #1981354 `_: Fixed Infinidat driver to return all configured and enabled iSCSI portals for a given network space. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1981420-dell-powermax-fix-for-force-flag-9320910dfbf998d2.yaml0000664000175000017500000000055700000000000031506 0ustar00zuulzuul00000000000000--- fixes: - | `Dell PowerMax Driver Bug #1981420 `_: Fixed issue faced while creating synchronous volume which was caused by incorrect handling of the force flag. This is corrected by checking volume type extra specs for the value of "force_vol_edit" parameter along with the "force" parameter. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1981982-infinidat-fix-ssl-options-6ddd852c24b16760.yaml0000664000175000017500000000063300000000000030372 0ustar00zuulzuul00000000000000--- fixes: - | Infinidat Driver `bug #1981982 `_: Fixed Infinidat driver to use TLS/SSL communication between the Cinder volume service and the storage backend. Admin can set `True` or `False` for the `driver_use_ssl` and `suppress_requests_ssl_warnings` options in the driver section of cinder.conf to enable or disable these features. ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/releasenotes/notes/bug-1982078-Driver_initialization_error_w.r.t_default_portset-3992a060cca2adcb.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1982078-Driver_initialization_error_w.r.t_default_portset-3992a0000664000175000017500000000031000000000000033074 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver: `Bug #1982078 `_: Fixed the default portset value during driver initialization. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1982350-infinidat-fix-multi-attach-19f62d182b675e59.yaml0000664000175000017500000000052200000000000030415 0ustar00zuulzuul00000000000000--- fixes: - | Infinidat Driver `bug #1982350 `_: Fixed Infinidat driver multi-attach feature. Added a check if there are multiple attachments to the volume from the same connector and terminate connection only for the last attachment from the corresponding host. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/releasenotes/notes/bug-1982405-infinidat-fix-generic-volume-migration-da33a6fe980ac4eb.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1982405-infinidat-fix-generic-volume-migration-da33a6fe980ac4eb0000664000175000017500000000034000000000000032326 0ustar00zuulzuul00000000000000--- fixes: - | Infinidat Driver `bug #1982405 `_: Fixed Infinidat driver to allow generic volume migration between two storage pools within the same cluster. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=cinder-27.0.0/releasenotes/notes/bug-1983287-infinidat-fix-backup-attached-volume-b28e5dd5c25a24ec.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1983287-infinidat-fix-backup-attached-volume-b28e5dd5c25a24ec.y0000664000175000017500000000025600000000000032150 0ustar00zuulzuul00000000000000--- fixes: - | Infinidat Driver `bug #1983287 `_: Fixed Infinidat driver to allow backup of an attached volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1984000-infinidat-fix-consistency-groups-cf5b9c85dbf972ee.yaml0000664000175000017500000000041200000000000032252 0ustar00zuulzuul00000000000000--- fixes: - | Infinidat Driver `bug #1984000 `_: Fixed Infinidat driver to take into account the group identifier property when creating a new volume and add the volume to the consistency group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1985065-storwize-mkhost-failure-592d8cb76e9feeb2.yaml0000664000175000017500000000050200000000000030416 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver: `Bug #1985065 `_: Fixed to collect all the IP addresses for all the storage nodes given in lsip command response as volume of any iogrp should be available to the storage nodes in default scenario. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-1996188-vmdk-subformat-allow-list-93e6943d9a486d11.yaml0000664000175000017500000000313500000000000030340 0ustar00zuulzuul00000000000000--- upgrade: - | This release introduces a new configuration option, ``vmdk_allowed_types``, that specifies the list of VMDK image subformats that Cinder will allow. The default setting allows only the 'streamOptimized' and 'monolithicSparse' subformats, which do not use named extents. security: - | This release introduces a new configuration option, ``vmdk_allowed_types``, that specifies the list of VMDK image subformats that Cinder will allow in order to prevent exposure of host information by modifying the named extents in a VMDK image. The default setting allows only the 'streamOptimized' and 'monolithicSparse' subformats, which do not use named extents. - | As part of the fix for `Bug #1996188 `_, cinder is now more strict in checking that the ``disk_format`` recorded for an image (as revealed by the Image Service API image-show response) matches what cinder detects when it downloads the image. Thus, some requests to create a volume from a source image that had previously succeeded may fail with an ``ImageUnacceptable`` error. fixes: - | `Bug #1996188 `_: Fixed issue where a VMDK image file whose createType allowed named extents could expose host information. This change introduces a new configuration option, ``vmdk_allowed_types``, that specifies the list of VMDK image subformats that Cinder will allow. The default setting allows only the 'streamOptimized' and 'monolithicSparse' subformats. ././@PaxHeader0000000000000000000000000000025000000000000011452 xustar0000000000000000146 path=cinder-27.0.0/releasenotes/notes/bug-2003300-ibm-svf-enable_support_for_replication_volume_with_mirror_pool_option-83563770463ebbca.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2003300-ibm-svf-enable_support_for_replication_volume_with_mirr0000664000175000017500000000037100000000000033511 0ustar00zuulzuul00000000000000--- fixes: - | IBM Spectrum Virtualize Family driver: `Bug #2003300 `_: Enable support for mirror-pool option for metro-mirror replication and global-mirror replication volume-types. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2008017-netapp-fix-native-threads-04d8f58f4c29b03d.yaml0000664000175000017500000000031000000000000030370 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2008017 `_: Fixed NetApp NFS driver to never spawn a native thread avoid thread starvation and other related issues. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2008931-hpe-keyerror-on-migration-71d31e6c0a8ab0d9.yaml0000664000175000017500000000033500000000000030475 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `bug #2008931 `_: Fixed issue when performing migrate volume operation when `comment` attribute is missing from the volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2012246-292d7d93260a1fe5.yaml0000664000175000017500000000030200000000000023321 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2008017 `_: Hide value of the `[coordination] backend_url` option from logs because it can contain credential. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2016138-56f07bc9376f55f7.yaml0000664000175000017500000000023000000000000023346 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2016138 `_: Handle missing volumes during cleanup of incomplete backups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2017815-infinidat-fix-compression-setting-04eaf71933d55912.yaml0000664000175000017500000000142000000000000032012 0ustar00zuulzuul00000000000000--- fixes: - | Infinidat driver `bug #2017815 `_: Fixed Infinidat driver to inherit compression setting by default for all newly created volumes. Admin can set ``True`` or ``False`` for the ``infinidat_use_compression`` option in the driver section of ``cinder.conf`` to explicitly enable or disable compression setting for all newly created volumes. Or leave this option unset (commented out) for all created volumes to inherit their compression setting from their parent pool at creation time. The default value is unset. upgrade: - | Infinidat driver: support has been removed for pre-v3.0 InfiniBox systems. These versions are end of life and have not been supported for a long time. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2027532-volume-list-sort-by-boolean-fix-49972c69007d5ebc.yaml0000664000175000017500000000030600000000000031416 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2027532 `_: Fixed Cinder API HTTP 500 when issuing a volume list and sorting by a boolean field (i.e. "bootable"). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2028857-fix-netapp-replica-failover-error-a9cad94ae56af8d0.yaml0000664000175000017500000000032400000000000032267 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver `bug #2028857 `_: Fixed errors that were occuring in the replica failover operation when using ONTAP REST API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2045230-dell-powermax-fix-snapvx-unlink-e27d67d6b217d706.yaml0000664000175000017500000000222300000000000031505 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerMax Driver `Bug #2045230 `_: Fixed the issue that Dell PowerMax SnapVx link fails as the linked device is not yet fully defined. Previously, the below operations could fail if the linked device was not yet fully defined at the time of the call. Now, when ``snapvx_unlink_symforce`` is enabled, those operations are not interrupted by not fully defined devices. By default, ``snapvx_unlink_symforce`` is ``False``. Use extreme caution with this option. If used when a link is copy in progress or when a restore is restore in progress, this will cause an incomplete copy and data on the copy target would not be usable. Impacted operations: * Clone a volume * Create a volume from a snapshot * Create volume snapshots * Delete volume snapshots * Revert volume to snapshot * Create generic volume group from source * Unmanage volumes upgrade: - | Dell PowerMax Driver: introduced a new configuration option, ``snapvx_unlink_symforce``, to address Bug #2045230. See the Bug Fixes section for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2051830-dell-powermax-rest-api-timeout-b70bd2754debf16a.yaml0000664000175000017500000000067400000000000031533 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax Driver `bug #2051830 `_: REST API calls to the PowerMax backend did not have a timeout set, which could result in cinder waiting forever. This fix introduces two configuration options, ``rest_api_connect_timeout`` and ``rest_api_read_timeout``, to control timeouts when connecting to the backend. The default value of each is 30 seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2052995-dell-powerflex-rest-api-timeout-3a05b6b5d5460176.yaml0000664000175000017500000000067600000000000031422 0ustar00zuulzuul00000000000000--- fixes: - | PowerFlex Driver `bug #2052995 `_: REST API calls to the PowerFlex backend did not have a timeout set, which could result in cinder waiting forever. This fix introduces two configuration options, ``rest_api_connect_timeout`` and ``rest_api_read_timeout``, to control timeouts when connecting to the backend. The default value of each is 30 seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2055022-dell-powerstore-rest-api-timeout-51b3ae19266757f9.yaml0000664000175000017500000000072300000000000031613 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerStore Driver `bug #2055022 `_: REST API calls to the PowerStore backend did not have a timeout set, which could result in cinder waiting forever. This fix introduces two configuration options, ``rest_api_call_connect_timeout`` and ``rest_api_call_read_timeout``, to control timeouts when connecting to the backend. The default value of each is 30 seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2058596-3c676e7fdc642b3d.yaml0000664000175000017500000000033300000000000023514 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2058596 `_: Fixed broken ``backup_swift_service_auth=True`` which made swift backup driver consistently fail during object data access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2059809-disallow-qcow2-datafile-abc4e6d8be766710.yaml0000664000175000017500000000165500000000000030132 0ustar00zuulzuul00000000000000--- security: - | Images in the qcow2 format with an external data file are now rejected with an ``ImageUnacceptable`` error because such images could be used in an exploit to expose host information. Given that qcow2 external data files were never supported by Cinder, this change should have no impact on users. See `Bug #2059809 `_ for details. fixes: - | `Bug #2059809 `_: Fixed issue where a qcow2 format image with an external data file could expose host information. Such an image is now rejected with an ``ImageUnacceptable`` error if it is used to create a volume. Given that qcow2 external data files were never supported by Cinder, the only use for such an image previously was to attempt to steal host information, and hence this change should have no impact on users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2077643-manage-quota-sync-no-args-7fe8dbc6e3069cfc.yaml0000664000175000017500000000033000000000000030543 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2077643 `_: Fixed "cinder-manage quota sync" CLI command, which failed with an sqlalchemy error when a project id was not specified. ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=cinder-27.0.0/releasenotes/notes/bug-2078968-fix-nvme-namespace-mapping-failed-during-live-migration-bbd26bb157b076bf.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2078968-fix-nvme-namespace-mapping-failed-during-live-migration0000664000175000017500000000461700000000000033033 0ustar00zuulzuul00000000000000--- upgrade: - | Breaking Change: NetApp NVMe Subsystem Architecture Redesign Implemented a significant architectural change to NVMe volume attachment handling to address critical limitations with multi-attach workflows and QoS management. The previous implementation used a one-to-one mapping between hosts and subsystems, where each host would have its own dedicated subsystem, and multiple subsystems would map to a single namespace. This approach created two major issues: * QoS Limitations: Since QoS policies are applied at the subsystem level rather than the namespace level, having multiple subsystems per namespace made it impossible to enforce consistent QoS across all host connections to the same volume. * Multi-Attach Incompatibility: Different subsystems cannot enable true multi-attach functionality, which is essential for live migration and other advanced features where the same volume needs to be simultaneously accessible from multiple hosts. New Architecture: The implementation now uses a many-to-one mapping where multiple hosts share a single subsystem, ensuring a single subsystem-to-namespace relationship. This resolves both QoS consistency and multi-attach limitations. Compatibility Impact: This change is not backward compatible due to fundamental differences in how NVMe subsystem-to-namespace mappings are handled. Live migration of existing mappings is not technically feasible. Required Upgrade Path: * Take backup of all volumes using the old NVMe architecture * Upgrade OpenStack to the version with the new architecture * Restore volumes using the new many-to-one subsystem mapping model * For assistance with migration planning and any questions about this process, contact NetApp support who can provide guidance specific to your environment and help minimize disruption during the transition. This approach ensures data integrity while enabling the improved multi-attach and QoS capabilities of the new architecture. fixes: - | NetApp Driver `Bug #2078968 `_: Fixed NVMe namespace mapping fails during VM migration with "Namespace is already mapped to subsystem". Implemented architecture changes to support multiple hosts attaching to single namespace through shared subsystem model. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2081742-dell-powermax-rest-api-hostlunid-ee22d0105c990ea0.yaml0000664000175000017500000000044600000000000031711 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerMax Driver `bug #2081742 `_: The REST API calls for the masking view connection do not return the HostLUN ID immediately. To address this, an exception has been added to implement a retry mechanism. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2082587-fix-type-passed-during-backup-restoration.yaml0000664000175000017500000000022600000000000031326 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2082587 `_: Fixed backup restoration throwing TypeError on new volume. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/bug-2089656-dell-powermax-fix-multi-detach-req-eb0f189841689ce8.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2089656-dell-powermax-fix-multi-detach-req-eb0f189841689ce8.yam0000664000175000017500000000042300000000000031722 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerMax Driver `Bug #2089656 `_: Fixed the issue that multiple detach requests caused race conditions in Dell PowerMax driver. Also, improved trace logs for PowerMax RESTAPI requests. ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=cinder-27.0.0/releasenotes/notes/bug-2092259-dell-powermax-volume-delete-failed-fix-active-snapshot-ccc3f9b6251d2634.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2092259-dell-powermax-volume-delete-failed-fix-active-snapshot-0000664000175000017500000000077500000000000032767 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerMax Driver `bug #2092259 `_: Before a volume can be deleted, the driver issues a command to clean up active snapshots in the backend and then polls the backend to make sure the cleanup has occurred. This fix enhances the polling mechanism to give the backend more time to do the cleanup, thereby increasing the probability that the driver will be able to make a successful volume deletion request. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/releasenotes/notes/bug-2103742-fix-fail-resize-nfs-volumes-with-snapshots-e861d69b1ae6f97d.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2103742-fix-fail-resize-nfs-volumes-with-snapshots-e861d69b1ae60000664000175000017500000000040200000000000032310 0ustar00zuulzuul00000000000000--- fixes: - | NFS driver `bug #2103742 `_: Fixed issue preventing the volume resize operation from properly updating the NFS image virtual size with the new size when volume has snapshots. ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=cinder-27.0.0/releasenotes/notes/bug-2105961-fix-nvmeof-fail-due-to-initiator-property-missing-db8315541f94447f.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2105961-fix-nvmeof-fail-due-to-initiator-property-missing-db8310000664000175000017500000000045600000000000032572 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2105961 `_: Fixed issue in NVMe-oF target driver to validate the ``nqn`` property (NVMe-oF) instead of the ``initiator`` property (iSCSI) in the connector which caused attachment failures in non-iSCSI environments. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/releasenotes/notes/bug-2110274-fix-detach-issue-for-multiattached-volume-7202cecaeed5ecd0.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2110274-fix-detach-issue-for-multiattached-volume-7202cecaeed5e0000664000175000017500000000052400000000000032422 0ustar00zuulzuul00000000000000--- fixes: - | Volumes with multi-attach type can be connected to multiple instances. Additional logic has been implemented for FCP/NVMe protocols to handle the removal of cinder volumes from multiple instances. For more details, please check `Launchpad bug #2110274 `_ ././@PaxHeader0000000000000000000000000000023200000000000011452 xustar0000000000000000132 path=cinder-27.0.0/releasenotes/notes/bug-2111461-fix-db-purge-fails-due-to-foreign-key-constraint-errors-8a60db1f0158b36e.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2111461-fix-db-purge-fails-due-to-foreign-key-constraint-errors0000664000175000017500000000060300000000000032713 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2111461 `_: Fixed issue preventing cinder-manage command to purge deleted rows due to foreign key constraint errors. This happened as timestamp for bulk delete operations were recalculated per table resulting in slighty different intervals for deleting rows on primary and dependents tables. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2112245-bulk-vms-creation-device-issue-b9d82f7a826c9f2b.yaml0000664000175000017500000000044600000000000031430 0ustar00zuulzuul00000000000000--- fixes: - | NetApp Driver `bug #2112245 `_: Fixed the issue where a few cinder volume clone operations failed during bulk clone creation. Added retry logic to ensure the NetApp driver retries any failed clone operations. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/releasenotes/notes/bug-2112403-delete-unusable-image-cache-volumes-f87144726a717d28.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2112403-delete-unusable-image-cache-volumes-f87144726a717d28.ya0000664000175000017500000000034200000000000031526 0ustar00zuulzuul00000000000000--- fixes: - | `bug #2112403 `_: Fixed the image cache so that volumes are deleted if they can no longer be cloned after reaching a driver-specific snapshot limit. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/releasenotes/notes/bug-2114879-dell-powerflex-improve-secret-handling-b1217791a9dceb1a.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2114879-dell-powerflex-improve-secret-handling-b1217791a9dceb1a0000664000175000017500000000277000000000000032213 0ustar00zuulzuul00000000000000--- security: - | Dell PowerFlex driver: This release contains a fix for `Bug #2114879 `_. It removes the limitation of use with bare metal hosts mentioned in `OSSN-0086 `_. upgrade: - | Dell PowerFlex driver: The fix for `Bug #2114879 `_ requires ``os-brick`` version 6.13.0 or greater. Users do not need to create the `/opt/emc/scaleio/openstack/connector.conf` file on the hosts using ``os-brick``. Follow the steps below to upgrade: 1. Upgrade ``os-brick`` to version 6.13.0 without removing the configuration file. This version can perform mapping if the driver has not yet done so, provided the configuration file remains intact. 2. Then upgrade the PowerFlex driver to version 3.6.0 or later. Note that driver version 3.6.0 requires ``os-brick`` version 6.13.0 or higher to function correctly and will not operate with earlier versions of ``os-brick``. 3. The connector configuration file can now be safely removed. fixes: - | Dell PowerFlex driver `Bug #2114879 `_: This release contains an updated Dell PowerFlex driver. It must be used with ``os-brick`` version 6.13.0 or greater. ``os-brick`` no longer requires access to PowerFlex backend secrets, and all that is handled by the cinder driver now. ././@PaxHeader0000000000000000000000000000024500000000000011456 xustar0000000000000000143 path=cinder-27.0.0/releasenotes/notes/bug-2114993-iscsi-fc-detach-operation-fails-when-multiple-initiators-connected-b1069bab32d86027.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2114993-iscsi-fc-detach-operation-fails-when-multiple-initiator0000664000175000017500000000030700000000000033050 0ustar00zuulzuul00000000000000--- fixes: - | NetApp Driver `bug #2114993 `_: Fixed iSCSI and FC detach operation failure issue when multiple initiators are connected. ././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=cinder-27.0.0/releasenotes/notes/bug-2116261-fix-consistency-group-support-for-nvme-driver-102c67c706afc25c.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2116261-fix-consistency-group-support-for-nvme-driver-102c67c700000664000175000017500000000035100000000000032370 0ustar00zuulzuul00000000000000--- fixes: - | NetApp Driver `bug #2116261 `_: NetApp already support the consistency group for NFS/iSCSI/FCP protocol. Extend the same support for NVMe/TCP protocol. ././@PaxHeader0000000000000000000000000000024000000000000011451 xustar0000000000000000138 path=cinder-27.0.0/releasenotes/notes/bug-2117263-adding-total-volumes-capability-for-netapp-iscsi-nvme-drivers-79da99111b086161.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2117263-adding-total-volumes-capability-for-netapp-iscsi-nvme-d0000664000175000017500000000204700000000000032757 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #2117263 `_: Fixed the issue where the driver does not account for storage limits when provisioning volumes. features: - | The NetApp driver now supports the capability "total_volumes" and the default filter function is updated to filter the backends once the pool reaches maximum number of volumes which is 1024 and is due to the limitations from ONTAP FlexVolume. The "total_volumes" can be used in netapp driver backend stanza to restrict the number of volumes per pool, like in below example we are restricting maximum number of volumes per a pool to 10. Example: filter_function="capabilities.total_volumes < 10" Note: The admin needs to configure the scheduler_default_filters to include the DriverFilter as well under [DEFAULT] stanza as part of cinder.conf, please refer [1] for the default filter list. [1] https://docs.openstack.org/cinder/latest/configuration/block-storage/samples/cinder.conf.html ././@PaxHeader0000000000000000000000000000022300000000000011452 xustar0000000000000000125 path=cinder-27.0.0/releasenotes/notes/bug-2119644-enable-snapshot-creation-for-flexgroup-pool-zapi-4a6af85888a99a02.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-2119644-enable-snapshot-creation-for-flexgroup-pool-zapi-4a6af80000664000175000017500000000105100000000000032614 0ustar00zuulzuul00000000000000--- fixes: - | NetApp driver `bug #2119644 `_: Fixed unable to create snapshots for Cinder volume that belongs to FlexGroup pool. features: - | The NetApp driver now supports creating snapshots for flexgroup pools through ZAPI client and it utilizes the NFS generic driver. When it comes to REST, if the ONTAP version is below 9.14, the operation depends on the NFS generic driver. However, for ONTAP versions 9.14 and above, it relies on the ONTAP file clone API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-b3c37df596c7a632.yaml0000664000175000017500000000020500000000000022571 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1953168 `_: add netapp copyoffload provider location ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-fix-1866871-f9d61defc00f4007.yaml0000664000175000017500000000032100000000000024265 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax Driver - Allowing for default volume type in group operations where the array serial number is retrieved from the cinder.conf instead of the pool_name on the extra specs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-fix-1867163-27afa39ac77b9e15.yaml0000664000175000017500000000024100000000000024271 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax Driver - Issue with upgrades from pre Pike to Pike and later. The device is not found when trying to snapshot a legacy volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-gpfs-fix-nfs-cow.yaml0000664000175000017500000000065100000000000023446 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1947134 `_: Fixed the initialization of GPFS NFS driver when gpfs_images_share_mode is set to copy_on_write by correcting _same_filesystem functionality. - | `Bug #1947123 `_: Fixed the volume creation issue in GPFS NFS driver when gpfs_images_share_mode is set to copy_on_write. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-invalid-content-type-1715094-8yu8i9w425ua08f3.yaml0000664000175000017500000000022100000000000027675 0ustar00zuulzuul00000000000000--- fixes: - | Cinder now will return 415 (HTTPUnsupportedMediaType) when any unsupported content type is specified in request header. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug-reno-69539ecb9b0b5464.yaml0000664000175000017500000000016400000000000023462 0ustar00zuulzuul00000000000000--- fixes: - | The Solidfire cinder driver has been fixed to ensure delete happens on the correct volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug1929429-e749f5e5a242a599.yaml0000664000175000017500000000043100000000000023307 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver `bug #1929429 `_: Fixes child/parent storage group check so that a pattern match is not case sensitive. For example, myStorageGroup should equal MYSTORAGEGROUP and mystoragegroup. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug1938488-a528893c103c03af.yaml0000664000175000017500000000030300000000000023263 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1938488 `_: When cleaning up a failed backup, clean up the snapshot status when the backup source is a snapshot ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug1945500-e4df056b8be2e0ef.yaml0000664000175000017500000000056400000000000023564 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1945500 `_: The original attempt at fixing this bug did not account for differences in how glance and cinder store image metadata, and as a result some image properties were not filtered out. This new improved fix addresses those differences and makes the filtering more thorough. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug2002995-e423f17eaddae22d.yaml0000664000175000017500000000064100000000000023552 0ustar00zuulzuul00000000000000--- fixes: - | StorPool driver `bug #2002995 `_: When retyping a volume on a StorPool backend to a different volume type also on that StorPool backend but using a different StorPool template, occasionally the retype operation would fail or the old volume could be left attached to a StorPool client. This issue has been fixed in this release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug_1828993-8e78d7bbee16ca08.yaml0000664000175000017500000000024000000000000023661 0ustar00zuulzuul00000000000000--- fixes: - | Fixes a bug that could cause mount failures with the Quobyte driver if the quobyte_volume_url setting was changed in a running system. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug_1870367-49b74d10a9bfcf07.yaml0000664000175000017500000000041000000000000023561 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1870367 `_ : Partially fixed NFS and Quobyte drivers by no longer allowing extending a volume while it is attached, to prevent failures due to Qemu internal locking mechanisms. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bug_1945824-7f8f238e274ddebd.yaml0000664000175000017500000000027600000000000023670 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage driver `Bug #1945824 `_: Fixed missing DB values when creating new consistency group from CG snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bugfix-1744692-5aebd0c97ae66407.yaml0000664000175000017500000000036700000000000024222 0ustar00zuulzuul00000000000000--- fixes: - | Fixes a bug that prevented distributed file system drivers from creating snapshots during volume clone operations (NFS, WindowsSMBFS, VZstorage and Quobyte drivers). Fixing this allows creating snapshot based backups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/bugfix-netapp-driver-cinder-ipv6-c3c4d0d6a7d0de91.yaml0000664000175000017500000000016400000000000030332 0ustar00zuulzuul00000000000000--- fixes: - Fixed support for IPv6 on management and data paths for NFS, iSCSI and FCP NetApp ONTAP drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/capacity-based-qos-9f5d174658a40bd5.yaml0000664000175000017500000000130700000000000025412 0ustar00zuulzuul00000000000000--- features: - Cinder now allows for capacity based QoS which can be useful in environments where storage performance scales with consumption (such as RBD backed storage). The newly added QoS specs are `read_iops_sec_per_gb`, `write_iops_sec_per_gb`, `total_iops_sec_per_gb`, `read_bytes_sec_per_gb`, `write_bytes_sec_per_gb` and `total_bytes_sec_per_gb`. These values will be multiplied by the size of the volume and passed to the consumer. For example, setting `total_iops_sec_per_gb` to 30 and setting `total_bytes_sec_per_gb` to `1048576` (1MB) then creating a 100 GB volume with that QoS will result in a volume with 3,000 total IOPs and 100MB/s throughput limit. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/capacity-based-qos-minimum-values-b24a5f49c986f11d.yaml0000664000175000017500000000063200000000000030440 0ustar00zuulzuul00000000000000--- features: - Cinder now allows for a minimum value when using the capacity based QoS in order to make sure small volumes can get a minimum allocation for them to be usable. The newly added QoS specs are `read_iops_sec_per_gb_min`, `write_iops_sec_per_gb_min`, `total_iops_sec_per_gb_min`, `read_bytes_sec_per_gb_min`, `write_bytes_sec_per_gb_min` and `total_bytes_sec_per_gb_min` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/capacity-headroom-4b07701f1df9e5c4.yaml0000664000175000017500000000027200000000000025403 0ustar00zuulzuul00000000000000--- features: - Cinder is now collecting capacity data, including virtual free capacity etc from the backends. A notification which includes that data is periodically emitted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/castellan-backend-0c49591a54821c45.yaml0000664000175000017500000000101300000000000025117 0ustar00zuulzuul00000000000000--- upgrade: - | The support for ``cinder.keymgr.barbican.BarbicanKeyManager`` and the ``[keymgr]`` config section has now been removed. All configs should now be switched to use ``castellan.key_manager.barbican_key_manager.BarbicanKeyManager`` and the ``[key_manager]`` config section. deprecations: - | The Castellan library used for encryption has deprecated the ``api_class`` config option. Configuration files using this should now be updated to use the ``backend`` option instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ceph-add-option-to-keep-only-last-n-snapshots-89dc532656f453f4.yaml0000664000175000017500000000053300000000000032401 0ustar00zuulzuul00000000000000--- features: - | Ceph driver: Add config option to keep only the last n snapshots per backup to save disk space on the source volume storage. Enabling this option can cause incremental backups to become full backups instead under special circumstances. Please take a look at the Ceph backup driver docs for more information.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ceph-backup-no-flatten-36557727e9d73b2b.yaml0000664000175000017500000000032400000000000026113 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver `bug #1916843 `_: Fixed rpc timeout when backing up RBD snapshot. We no longer flatten temporary volumes and snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ceph-catch-more-failure-conditions-d2ec640f5ff8051c.yaml0000664000175000017500000000063500000000000030630 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2031897 `_: Fixed issues for volume backups with the Ceph driver where failures of the first process ("rbd export-diff") were not caught. Instead, only the return code of the second process ("rbd import-diff") was recognized. This change also preserves the stderr that was lost previously in order to ease debugging. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ceph-iscsi-driver-b515bd7fb73ce13b.yaml0000664000175000017500000000036400000000000025466 0ustar00zuulzuul00000000000000--- features: - | Added new Ceph iSCSI driver rbd_iscsi. This new driver is derived from the rbd driver and allows all the same features as the rbd driver. The only difference is that volume attachments are done via iSCSI. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/releasenotes/notes/certificate-based-authentication-for-netapp-drivers-b06a62df620aebc3.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/certificate-based-authentication-for-netapp-drivers-b06a62df620aebc0000664000175000017500000000055100000000000033207 0ustar00zuulzuul00000000000000--- features: - | The NetApp ONTAP driver now supports Certificate-Based-Authentication (CBA) for operators that desire certificate based authentication instead of user and password. Note: The options for cert-auth take precedence, if all the auth options are defined in the config (both cert and legacy), the legacy ones are ignored. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cg-scheduler-change-180a36b77e8cc26b.yaml0000664000175000017500000000022000000000000025573 0ustar00zuulzuul00000000000000--- fixes: - Consistency group creation previously scheduled at the pool level. Now it is fixed to schedule at the backend level as designed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cg_api_volume_type-7db1856776e707c7.yaml0000664000175000017500000000011100000000000025536 0ustar00zuulzuul00000000000000--- features: - The consistency group API now returns volume type IDs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cgroupsv2-75476a8e1ea88b5f.yaml0000664000175000017500000000024500000000000023763 0ustar00zuulzuul00000000000000--- features: - | Cinder now supports setting-up cgroups with the cgroups v2 API, which is used when doing migration of block device with the LVM backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/change-default-rbd_exclusive_cinder_pool-e59c528c7f728780.yaml0000664000175000017500000000135000000000000031755 0ustar00zuulzuul00000000000000--- upgrade: - | Ceph/RBD volume backends will now assume exclusive cinder pools, as if they had ``rbd_exclusive_cinder_pool = true`` in their configuration. This helps deployments with a large number of volumes and prevent issues on deployments with a growing number of volumes at the small cost of a slightly less accurate stats being reported to the scheduler. fixes: - | Ceph/RBD: Fix cinder taking a long time to start for Ceph/RBD backends. (`Related-Bug #1704106 `_) - | Ceph/RBD: Fix Cinder becoming non-responsive and stats gathering taking longer that its period. (`Related-Bug #1704106 `_) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/change-encryption-key-on-clone-3be7cdb0e27386e0.yaml0000664000175000017500000000026100000000000030001 0ustar00zuulzuul00000000000000--- features: - | When an encrypted volume is cloned, a new encryption key is generated for the new volume. This is currently implemented only for iSCSI/FC backends. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/check-displayname-displaydescription-123sd5gef91acb12.yaml0000664000175000017500000000020000000000000031440 0ustar00zuulzuul00000000000000--- fixes: - Add 'display_name' and 'display_description' validation for creating/updating snapshot and volume operations.././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=cinder-27.0.0/releasenotes/notes/check-snapshots-when-cascade-deleting-transferred-volume-575ef0b76bd7f334.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/check-snapshots-when-cascade-deleting-transferred-volume-575ef0b76b0000664000175000017500000000055600000000000033263 0ustar00zuulzuul00000000000000--- fixes: - After transferring a volume without snapshots from one user project to another user project, if the receiving user uses cascade deleting, it will cause some exceptions in driver and volume will be error_deleting. Adding additional check to ensure there are no snapshots left in other project when cascade deleting a tranferred volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cheesecake-promotion-30a3336fb911c3ad.yaml0000664000175000017500000000065100000000000026100 0ustar00zuulzuul00000000000000--- features: - | A new cinder-manage command, reset_active_backend, was added to promote a failed-over backend participating in replication. This allows you to reset a backend without manually editing the database. A backend undergoing promotion using this command is expected to be in a disabled and frozen state. Support for both standalone and clustered backend configurations are supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-api-microversions-d2082a095c322ce6.yaml0000664000175000017500000000012400000000000026630 0ustar00zuulzuul00000000000000--- features: - Added support for API microversions, as well as /v3 API endpoint. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/cinder-api-middleware-remove-deprecated-option-98912ab7e8b472e8.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-api-middleware-remove-deprecated-option-98912ab7e8b472e8.yam0000664000175000017500000000011400000000000032616 0ustar00zuulzuul00000000000000--- upgrade: - Removed deprecated option ``osapi_max_request_body_size``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-backup-swift-service-token-9b86e8e73ebd2a22.yaml0000664000175000017500000000076100000000000030521 0ustar00zuulzuul00000000000000--- features: - | The Swift backup driver now supports sending a X-Service-Token header with a service token when the new ``backup_swift_service_auth`` config option is enabled. Please note that you still need to configure the ``[service_user]`` group and also set ``send_service_user_token`` to enable the behavior and not only the Swift backup driver option. Note ``send_service_user_token`` enables it globally and will also affect communication with Nova and Glance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-clone-encrypted-a28338e2b1838a63.yaml0000664000175000017500000000031200000000000026202 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1913054 `_: Fix for creating a clone of an encrypted volume for drivers that require additional information to attach. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-coprhd-driver-11ebd149ea8610fd.yaml0000664000175000017500000000011500000000000026072 0ustar00zuulzuul00000000000000features: - Added volume backend drivers for CoprHD FC, iSCSI and Scaleio. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-manage-db-online-schema-migrations-d1c0d40f26d0f033.yaml0000664000175000017500000000142400000000000031737 0ustar00zuulzuul00000000000000--- upgrade: - To get rid of long running DB data migrations that must be run offline, Cinder will now be able to execute them online, on a live cloud. Before upgrading from Ocata to Pike, operator needs to perform all the Newton data migrations. To achieve that he needs to perform ``cinder-manage db online_data_migrations`` until there are no records to be updated. To limit DB performance impact migrations can be performed in chunks limited by ``--max_number`` option. If your intent is to upgrade Cinder in a non-live manner, you can use ``--ignore_state`` option safely. Please note that finishing all the Newton data migrations will be enforced by the first schema migration in Pike, so you won't be able to upgrade to Pike without that. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-manage-online-migrations-exit-status-7c16edb7facc37bb.yaml0000664000175000017500000000140300000000000032723 0ustar00zuulzuul00000000000000--- upgrade: - | The ``cinder-manage db online_data_migrations`` command now returns exit status 2 in the case where some migrations failed (raised exceptions) and no others were completed successfully from the last batch attempted. This should be considered a fatal condition that requires intervention. Exit status 1 will be returned in the case where the ``--max-count`` option was used and some migrations failed but others succeeded (updated at least one row), because more work may remain for the non-failing migrations, and their completion may be a dependency for the failing ones. The command should be reiterated while it returns exit status 1, and considered completed successfully only when it returns exit status 0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-manage-quota-ed4ee17f7097d11f.yaml0000664000175000017500000000033700000000000025725 0ustar00zuulzuul00000000000000--- features: - | The cinder-manage command now includes a new ``quota`` category with two possible actions ``check`` and ``sync`` to help administrators manage out of sync quotas on long running deployments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-status-check-backup_driver-fe009985df2bc32f.yaml0000664000175000017500000000034300000000000030555 0ustar00zuulzuul00000000000000--- upgrade: - | A new check is added to the ``cinder-status upgrade check`` CLI to check for the use of backup driver module path instead of full driver class path in the ``backup_driver`` configuration setting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-status-check-policyjson-ef61826eab95372b.yaml0000664000175000017500000000041300000000000030044 0ustar00zuulzuul00000000000000--- upgrade: - | A warning has been added to the ``cinder-status upgrade check`` CLI if a ``policy.json`` file is present. Documentation has been updated to correct the file as ``policy.yaml`` if any policies need to be changed from their defaults. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-status-check-stein_removed_drivers-8184abe8ce82f373.yaml0000664000175000017500000000041600000000000032263 0ustar00zuulzuul00000000000000--- upgrade: - | A new check is added to the ``cinder-status upgrade check`` CLI to check for the configuration of CoprHD, HGST or ITRI DISCO drivers. These drivers were removed in the Stein release and should not be configured at the time of upgrade.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder-status-check-windows_iscsi_driver-5f4e0b93c7b92f53.yaml0000664000175000017500000000051700000000000032120 0ustar00zuulzuul00000000000000--- upgrade: - | A new check is added to the ``cinder-status upgrade check`` CLI to check for the use of ``cinder.volume.drivers.windows.windows.WindowsDriver`` and a message is reported that the user needs to update the setting to ``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver`` if it is encountered. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cinder_backend_aa_glance-300c8e087c8cf192.yaml0000664000175000017500000000027600000000000026625 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1906286 `_: Fixed issue with Cinder-backed images in A/A environment not correctly using the cluster name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/clean-file-locks-on-remove-e5898012f4114d3c.yaml0000664000175000017500000000043000000000000026663 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1432387 `_: Try to automatically clean up file locks after a resource (volume, snapshot) is deleted. This will alleviate the issue of the locks directory always increasing the number of files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/clean-file-locks-tool-3a62ba05ef2d2239.yaml0000664000175000017500000000076000000000000026064 0ustar00zuulzuul00000000000000--- features: - | `Bug #1432387 `_: Add a command to cinder-manage to clean up file locks existing in hosts where there is a Cinder service running (API, Scheduler, Volume, Backup). Command works with the Cinder services running, useful to be called as a cron job, as well as stopped, to be called on host startup. Command invocation ``cinder-manage util clean_locks`` with optional parameter ``--services-offline``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cleanup-rbd-temp-file-during-convert-fail-3848e9dbe7e15fc6.yaml0000664000175000017500000000031400000000000032041 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1873738 `_: RBD Driver: Added cleanup for residue destination file if the copy image to encrypted volume operation fails.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cloudbyte-retype-support-4b9f79f351465279.yaml0000664000175000017500000000010200000000000026621 0ustar00zuulzuul00000000000000--- features: - Retype support added to CloudByte iSCSI driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml0000664000175000017500000000246400000000000027351 0ustar00zuulzuul00000000000000--- prelude: > Everything in Cinder's release notes related to the High Availability Active-Active effort -preluded with "HA A-A:"- is work in progress and should not be used in production until it has been completed and the appropriate release note has been issued stating its readiness for production. features: - "HA A-A: Add cluster configuration option to allow grouping hosts that share the same backend configurations and should work in Active-Active fashion." - "HA A-A: Updated manage command to display cluster information on service listings." - "HA A-A: Added cluster subcommand in manage command to list, remove, and rename clusters." - "HA A-A: Added clusters API endpoints for cluster related operations (index, detail, show, enable/disable). Index and detail accept filtering by `name`, `binary`, `disabled`, `num_hosts`, `num_down_hosts`, and up/down status (`is_up`) as URL parameters. Also added their respective policies." - "HA A-A: Attach and detach operations are now cluster aware and make full use of clustered cinder-volume services." - "HA A-A: Delete volume, delete snapshot, delete consistency group, and delete consistency group snapshot operations are now cluster aware and make full use of clustered cinder-volume services." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/compress-images-fed3e354d94b0845.yaml0000664000175000017500000000046400000000000025122 0ustar00zuulzuul00000000000000--- features: - | When uploading qcow2 images to Glance, image data will be compressed. This will generally result in less data transferred to Glance at the expense of higher CPU usage. This behavior is controlled by the "image_compress_on_upload" boolean option, which defaults to True. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/consistency_group_manage-d30a2ad8917a7a86.yaml0000664000175000017500000000012500000000000027073 0ustar00zuulzuul00000000000000--- features: - Added update-host command for consistency groups in cinder-manage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/coprhd-generic-volume-group-a1d41d439f94ae19.yaml0000664000175000017500000000013500000000000027342 0ustar00zuulzuul00000000000000--- features: - Add consistent group capability to generic volume groups in CoprHD driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/coprhd-mark-unsupported-aa48145873db1ab1.yaml0000664000175000017500000000111000000000000026564 0ustar00zuulzuul00000000000000--- upgrade: - | The Dell EMC CoprHD drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Dell EMC CoprHD drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Stein development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/coprhd-remove-the-driver-00ef2c41f4c7dccd.yaml0000664000175000017500000000063600000000000027046 0ustar00zuulzuul00000000000000--- upgrade: - | With removal of the CoprHD Volume Driver any volumes being used by Cinder within a CoprHD backend should be migrated to a supported storage backend before upgrade. other: - | After being marked unsupported in the Rocky release the CoprHD driver is now being removed in Stein. The vendor has indicated that this is desired as the CoprHD driver has been deprecated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/create-update-rules-b46cf9c07c5a3966.yaml0000664000175000017500000000033100000000000025672 0ustar00zuulzuul00000000000000--- features: - Separate create and update rules for volume metadata. upgrade: - If policy for update volume metadata is modified in a desired way it's needed to add a desired rule for create volume metadata. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/create_volume_from_encrypted_image-9666e1ed7b4eab5f.yaml0000664000175000017500000000016600000000000031262 0ustar00zuulzuul00000000000000--- fixes: - Creating a new volume from an image that was created from an encrypted Cinder volume now succeeds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/datacore-mark-unsupported-2399bc19a789fb4c.yaml0000664000175000017500000000107200000000000027127 0ustar00zuulzuul00000000000000--- upgrade: - | The DataCore drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The DataCore drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Stein development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml0000664000175000017500000000104500000000000026372 0ustar00zuulzuul00000000000000--- features: - Capabilites List for Datera Volume Drivers - Extended Volume-Type Support for Datera Volume Drivers - Naming convention change for Datera Volume Drivers - Volume Manage/Unmanage support for Datera Volume Drivers - New BoolOpt ``datera_debug_override_num_replicas`` for Datera Volume Drivers deprecations: - IntOpt ``datera_num_replicas`` is changed to a volume type extra spec option-- ``DF:replica_count`` - BoolOpt ``datera_acl_allow_all`` is changed to a volume type extra spec option-- ``DF:acl_allow_all`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml0000664000175000017500000000072600000000000026366 0ustar00zuulzuul00000000000000--- features: - Added Datera EDF API 2.1 support. - Added Datera Multi-Tenancy Support. - Added Datera Template Support. - Broke Datera driver up into modules. upgrade: - Datera driver location has changed from cinder.volume.drivers .datera.DateraDriver to cinder.volume.drivers.datera.datera_iscsi .DateraDriver. deprecations: - Deprecated datera_api_version option. - Removed datera_acl_allow_all option. - Removed datera_num_replicas option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/datera-2.4.0-driver-update-164bbc77e6b45eb7.yaml0000664000175000017500000000047300000000000026551 0ustar00zuulzuul00000000000000--- features: - Added ``datera_disable_profiler`` boolean config option. - Added Cinder fast-retype support to Datera EDF driver. - Added Volume Placement extra-specs support to Datera EDF driver. - Fixed ACL multi-attach bug in Datera EDF driver. - Fixed a few scalability bugs in the Datera EDF driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/datera-2019.12.10.0-driver-update-cadadf95e4725164.yaml0000664000175000017500000000137700000000000027304 0ustar00zuulzuul00000000000000--- features: - | The Datera driver now supports API v2.2, IOPS/GB and BW/GB settings, LDAP and CHAP, extended metadata attributes during volume creation and attachment. Most retype operations do not detach volumes anymore. Manageable Snapshots can be listed. Flash and Hybrid capacity information added. deprecations: - | The Datera driver removed v2 API support and the usage of initiator-groups fixes: - | Datera driver: fixes in retyping / QoS, Glance interoperability, fast clones, IP pools, volume templates and initiators, unicode character support, scalability issues other: - | The Datera driver went under a major driver revamp/restructure and a new separate Datera Python-SDK requirement has been introduced ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/datera-driver-v2-update-930468e8259c8e86.yaml0000664000175000017500000000073600000000000026166 0ustar00zuulzuul00000000000000--- features: - All Datera DataFabric backed volume-types will now use API version 2 with Datera DataFabric. upgrade: - Users of the Datera Cinder driver are now required to use Datera DataFabric version 1.0+. Versions before 1.0 will not be able to utilize this new driver since they still function on v1 of the Datera DataFabric API. deprecations: - Config option ``datera_api_token`` has been replaced by options ``san_login`` and ``san_password``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/datera-mark-unsupported-7b71d9124b3fded2.yaml0000664000175000017500000000112700000000000026647 0ustar00zuulzuul00000000000000--- upgrade: - | The driver for Datera's Storage Systems has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The driver for Datera's Storage Systems has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/db-resource-indexes-8010c9a881277503.yaml0000664000175000017500000000177500000000000025371 0ustar00zuulzuul00000000000000--- upgrade: - | The ``cinder-manage db sync`` command for this verison of cinder will add additional database indexes. Depending on database size and complexity, this will take time to complete for every single index to be created. On MySQL or MariaDB, these indexes will only be created if an index does not already exist with the same name: * ``groups_deleted_project_id_idx`` * ``group_snapshots_deleted_project_id_idx`` * ``volumes_deleted_project_id_idx`` * ``volumes_deleted_host_idx`` * ``snapshots_deleted_project_id_idx`` * ``backups_deleted_project_id_idx`` An example of the SQL commands to generate these indexes can be found in the `specific troubleshooting guide `_. fixes: - | `Bug #1952443 `_: Improve performance for creating volume from image, listing volumes, snapshots, backups, groups, and group_snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/db-schema-from-kilo-e6e952744531caa2.yaml0000664000175000017500000000034300000000000025450 0ustar00zuulzuul00000000000000--- upgrade: - The Cinder database can now only be upgraded from changes since the Kilo release. In order to upgrade from a version prior to that, you must now upgrade to at least Kilo first, then to Newton or later. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/db-schema-from-liberty-f5fa57d67441dece.yaml0000664000175000017500000000035400000000000026420 0ustar00zuulzuul00000000000000--- upgrade: - The Cinder database can now only be upgraded from changes since the Liberty release. In order to upgrade from a version prior to that, you must now upgrade to at least Liberty first, then to Ocata or later. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/db-schema-from-mitaka-168ac06161e9ca0d.yaml0000664000175000017500000000034500000000000026032 0ustar00zuulzuul00000000000000--- upgrade: - The Cinder database can now only be upgraded from changes since the Mitaka release. In order to upgrade from a version prior to that, you must now upgrade to at least Mitaka first, then to Pike or later. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/db-schema-from-newton-79b18439bd15e4c4.yaml0000664000175000017500000000035500000000000026037 0ustar00zuulzuul00000000000000--- upgrade: - | The Cinder database can now only be ugpraded from changes since the Newton release. In order to upgrade from a version prior to that, you must now upgrade to at least Newton first, then to Queens or later. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/db-schema-from-ocata-e1d7dd1dc4d3a0d9.yaml0000664000175000017500000000032200000000000026071 0ustar00zuulzuul00000000000000--- upgrade: - | The Cinder database can now only be upgraded with changes since the Ocata release. In order to upgrade from a version prior to that, you must now upgrade to at least Ocata first. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/db-schema-from-queens-de5025a780ff1d30.yaml0000664000175000017500000000032400000000000026063 0ustar00zuulzuul00000000000000--- upgrade: - | The Cinder database can now only be upgraded with changes since the Queens release. In order to upgrade from a version prior to that, you must now upgrade to at least Queens first. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/default-apiv1-disabled-9f6bb0c67b38e670.yaml0000664000175000017500000000033000000000000026224 0ustar00zuulzuul00000000000000--- upgrade: - The v1 API was deprecated in the Juno release and is now defaulted to disabled. In order to still use the v1 API, you must now set ``enable_v1_api`` to ``True`` in your cinder.conf file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/del_volume_with_fc-f024b9f2d6eaca0f.yaml0000664000175000017500000000017700000000000026065 0ustar00zuulzuul00000000000000--- fixes: - Fixed StorWize/SVC error causing volume deletion to get stuck in the 'deleting' state when using FlashCopy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/delete-tsm-backup-driver-725e33f7c213fd50.yaml0000664000175000017500000000014200000000000026523 0ustar00zuulzuul00000000000000--- upgrade: - | TSM backup driver is removed. Please, migrate your backups before upgrade. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/delete-volume-metadata-keys-3e19694401e13d00.yaml0000664000175000017500000000016700000000000027062 0ustar00zuulzuul00000000000000--- features: - Added using etags in API calls to avoid the lost update problem during deleting volume metadata. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/delete-volume-with-snapshots-0b104e212d5d36b1.yaml0000664000175000017500000000022200000000000027437 0ustar00zuulzuul00000000000000--- features: - It is now possible to delete a volume and its snapshots by passing an additional argument to volume delete, "cascade=True". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/delete_parameters-6f44fece22a7787d.yaml0000664000175000017500000000105300000000000025572 0ustar00zuulzuul00000000000000--- features: - The ``force`` boolean parameter has been added to the volume delete API. It may be used in combination with ``cascade``. This also means that volume force delete is available in the base volume API rather than only in the ``volume_admin_actions`` extension. upgrade: - There is a new policy option ``volume:force_delete`` which controls access to the ability to specify force delete via the volume delete API. This is separate from the pre-existing ``volume-admin-actions:force_delete`` policy check. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/dell-emc-pvme-driver-9333594b2cc1e0b5.yaml0000664000175000017500000000013000000000000025636 0ustar00zuulzuul00000000000000--- features: - | Dell EMC PowerVault ME Series storage arrays are now supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/dell-emc-sc-api-timeouts-ce8d166e1847ea94.yaml0000664000175000017500000000060700000000000026534 0ustar00zuulzuul00000000000000--- features: - Added dell_api_async_rest_timeout option to the Dell EMC SC driver. This is the timeout used for asynchronous REST calls to the Dell EMC SC REST API. Default is 15 seconds. - Added dell_api_sync_rest_timeout option to the Dell EMC SC driver. This is the timeout used for synchronous REST calls to the Dell EMC SC REST API. Default is 30 seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/dell-emc-sc-bugfix-1756914-ffca3133273040f6.yaml0000664000175000017500000000046700000000000026133 0ustar00zuulzuul00000000000000--- fixes: - Dell EMC SC driver correctly returns initialize_connection data when more than one IQN is attached to a volume. This fixes some random Nova Live Migration failures where the connection information being returned was for an IQN other than the one for which it was being requested. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/dell-emc-sc-mult-attach-d09cfd06ee8db8da.yaml0000664000175000017500000000015300000000000026623 0ustar00zuulzuul00000000000000--- features: - Enabled Cinder Multi-Attach capability in the Dell EMC Storage Center Cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/dell-emc-sc-support-generic-groups-98c7452d705b36f9.yaml0000664000175000017500000000015300000000000030416 0ustar00zuulzuul00000000000000--- features: - Add consistency group capability to Generic Volume Groups in the Dell EMC SC driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/dell-emc-unity-driver-72cb901467b23b22.yaml0000664000175000017500000000010300000000000025752 0ustar00zuulzuul00000000000000--- features: - Added backend driver for Dell EMC Unity storage. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/releasenotes/notes/dell-powerflex-bugfix-1998136-self-signed-certificates-62e3cb444ab7ff2b.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/dell-powerflex-bugfix-1998136-self-signed-certificates-62e3cb444ab70000664000175000017500000000057700000000000032372 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerFlex driver `bug #1998136 `_: When using self signed certificates, the option sent to os-brick via the connection_properties was not correctly handled. It has now been fixed by adding the 'verify_certificate' and 'certificate_path' to the driver when initializing the connection. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/dell-powermax-unisphere-v101-7195af74d1c7671c.yaml0000664000175000017500000000045100000000000027206 0ustar00zuulzuul00000000000000--- fixes: - | Dell PowerMax driver `bug #2051828 `_: The driver only recognized 10.0 as being Unisphere 10 and would try to use 9.2 for Unisphere 10.x (where x > 0), but now it correctly recognizes 10.x as being Unisphere 10. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/demc-trim-bb2165f74a5703a6.yaml0000664000175000017500000000076200000000000023611 0ustar00zuulzuul00000000000000--- features: - | Dell EMC PowerStore driver: Report trimming/discard support to Nova and Cinder. - | Dell EMC PowerMax driver: Report trimming/discard support to Nova and Cinder. - | Dell EMC PowerFlex driver: Report trimming/discard support to Nova and Cinder on thin volumes that don't have snapshots. Not doing trim on volumes with snapshots is the vendor's recommendation, but can be overriden with the ``report_discard_supported`` configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-api-v2-9f4543ab2e14b018.yaml0000664000175000017500000000042300000000000024751 0ustar00zuulzuul00000000000000--- deprecations: - | The Cinder v2 API has now been marked as deprecated. All new client code should use the v3 API. API v3 adds support for microversioned API calls. If no microversion is requested, the base 3.0 version for the v3 API is identical to v2. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-backends-in-default-b9784a2333fe22f2.yaml0000664000175000017500000000102600000000000027463 0ustar00zuulzuul00000000000000--- upgrade: - Any Volume Drivers configured in the ``DEFAULT`` config stanza should be moved to their own stanza and enabled via the ``enabled_backends`` config option. The older style of config with ``DEFAULT`` is deprecated and will be removed in future releases. deprecations: - Configuring Volume Drivers in the ``DEFAULT`` config stanza is not going to be maintained and will be removed in the next release. All backends should use the ``enabled_backends`` config option with separate stanza's for each. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-backup-service-to-driver-mapping-a3afabd4f55eca01.yaml0000664000175000017500000000052000000000000032465 0ustar00zuulzuul00000000000000--- deprecations: - | Backup service to driver mapping is deprecated. If you use old values like 'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' accordingly to get your backup service working in the 'R' release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-block-device-driver-d30232547a31fe1e.yaml0000664000175000017500000000043100000000000027465 0ustar00zuulzuul00000000000000--- deprecations: - The block_driver is deprecated as of the Ocata release and will be removed in the Queens release of Cinder. Instead the LVM driver with the LIO iSCSI target should be used. For those that desire higher performance, they should use LVM striping. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-cinder-linux-smb-driver-4aec58f15a963c54.yaml0000664000175000017500000000031000000000000030413 0ustar00zuulzuul00000000000000--- deprecations: - | The Cinder Linux SMBFS driver is now deprecated and will be removed during the following release. Deployers are encouraged to use the Windows SMBFS driver instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-consistency-group-apis-0d9120d16f090781.yaml0000664000175000017500000000027500000000000030146 0ustar00zuulzuul00000000000000--- deprecations: - | The Consistency Group APIs have now been marked as deprecated and will be removed in a future release. Generic Volume Group APIs should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-dell-emc-drivers-caracal-a575e95cd61ac1d8.yaml0000664000175000017500000000115300000000000030551 0ustar00zuulzuul00000000000000--- upgrade: - | The ``enable_unsupported_driver`` option will need to be set to ``True`` in the driver's section in cinder.conf to continue to use the following three drivers. - Dell SC Series Storage Driver (iSCSI, FC) - Dell VNX Storage Driver (FC, iSCSI) - Dell XtremeIO Storage Driver (iSCSI, FC) deprecations: - | The following three drivers were marked unsupported. These drivers are deprecated and will be removed in a future release. - Dell SC Series Storage Driver (iSCSI, FC) - Dell VNX Storage Driver (FC, iSCSI) - Dell XtremeIO Storage Driver (iSCSI, FC) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-glusterfs-backup-drv-5581909c0cc83102.yaml0000664000175000017500000000017300000000000027572 0ustar00zuulzuul00000000000000--- deprecations: - | The GlusterFS backup driver has been deprecated. It will be removed in the 2025.1 release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-iser-opts-b0af9a68b7b8578c.yaml0000664000175000017500000000044300000000000025764 0ustar00zuulzuul00000000000000--- deprecations: - | The following configuration options, which have been silently ignored since the Mitaka release, are hereby deprecated for removal: - ``num_iser_scan_tries`` - ``iser_target_prefix`` - ``iser_ip_address`` - ``iser_port`` - ``iser_helper`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-json-formatted-policy-file-dc3441a7b1dbfb47.yaml0000664000175000017500000000115300000000000031240 0ustar00zuulzuul00000000000000--- deprecations: - | Use of JSON formatted policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Wallaby cycle with an anticipated future removal of JSON formatted file support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Use the `oslopolicy-convert-json-to-yaml `_ tool to convert the existing JSON formatted policy file to YAML in a backward compatible way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-linbit-drbdmanagedrv-75c41ce8e81cac80.yaml0000664000175000017500000000022500000000000030102 0ustar00zuulzuul00000000000000--- deprecations: - | The LINBIT DRBDManage volume driver is moving to maintenance mode in Stein Release and will be removed in T Release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-nested-quota-d1ad7e8f54492a87.yaml0000664000175000017500000000117700000000000026376 0ustar00zuulzuul00000000000000--- upgrade: - | A new check is added to the ``cinder-status upgrade check`` CLI to check for the use of the deprecated ``cinder.quota.NestedDbQuotaDriver``. This driver will be replaced by a new, OpenStack-wide, nested quota management. deprecations: - | The ``cinder.quota.NestedDbQuotaDriver`` quota driver for handling nested projects is now deprecated. There is an OpenStack-wide effort to move to "unified limits" that will require changes in how quotas are handled for these types of configurations. The ``NestedDbQuotaDriver`` will continue to work until it is replaced with this new mechanism. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-rbd_keyring_conf-432efbcd47e52c8a.yaml0000664000175000017500000000144700000000000027415 0ustar00zuulzuul00000000000000--- security: - | Due to `OSSN-0085 `_: Cinder configuration option can leak secret key from Ceph backend, deployers using the ``rbd_keyring_conf`` option are advised to stop using it immediately. The option has been deprecated for removal early in the 'V' development cycle. deprecations: - | The configuration option ``rbd_keyring_conf`` for the Ceph cinder driver presents a security risk and the option is hereby deprecated and scheduled to be removed early in the 'V' development cycle, following the standard OpenStack deprecation policy. For more information, see `OSSN-0085 `_: Cinder configuration option can leak secret key from Ceph backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-sf-allow-template-caching-b3a1ea32512cbb11.yaml0000664000175000017500000000050200000000000030712 0ustar00zuulzuul00000000000000--- prelude: > SolidFire cinder driver deprecate sf_allow_template_caching deprecations: - | The configuration option sf_allow_template_caching for the SolidFire cinder driver has been removed. Use image_volume_cache_enabled equals True for a better template image cache that is managed from cinder. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-tsm-backup-driver-8be0c78ec1a9d6dc.yaml0000664000175000017500000000015700000000000027532 0ustar00zuulzuul00000000000000--- deprecations: - | Cinder TSM Backup Driver is deprecated and will be removed in Wallaby release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-windows-support-4667f38d71fa8ad6.yaml0000664000175000017500000000114600000000000027167 0ustar00zuulzuul00000000000000--- upgrade: - | The following drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use these drivers. - ``Windows iSCSI Driver`` - ``Windows SMB Driver`` deprecations: - | Support for running Cinder in Windows operating systems has been deprecated because of retirement of the Winstackers project. - | The following drivers have been marked as unsupported and are now deprecated. - ``Windows iSCSI Driver`` - ``Windows SMB Driver`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate-xml-api-bf3e4079f1dc5eae.yaml0000664000175000017500000000014600000000000025535 0ustar00zuulzuul00000000000000--- deprecations: - The XML API has been marked deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate_hosts_api_extension-fe0c042af10a20db.yaml0000664000175000017500000000033500000000000030220 0ustar00zuulzuul00000000000000--- upgrade: - | The hosts api extension is now deprecated and will be removed in a future version. deprecations: - | The hosts api extension is now deprecated and will be removed in a future version. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate_logs_commands-a0d59cb7535a2138.yaml0000664000175000017500000000017100000000000026564 0ustar00zuulzuul00000000000000--- deprecations: - | Deprecate the "cinder-manage logs" commands. These will be removed in a later release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate_osapi_volume_base_url-b6984886a902a562.yaml0000664000175000017500000000015600000000000030202 0ustar00zuulzuul00000000000000--- deprecations: - Instead of using osapi_volume_base_url use public_endpoint. Both do the same thing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecate_san_rest_port-0d8610a872e92e09.yaml0000664000175000017500000000020400000000000026546 0ustar00zuulzuul00000000000000--- deprecations: - | VMAX driver - configuration tag san_rest_port will be replaced by san_api_port in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecated-ibm-multipath-f06c0e907a6301de.yaml0000664000175000017500000000011500000000000026650 0ustar00zuulzuul00000000000000--- deprecations: - Deprecated IBM driver _multipath_enabled config flags. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/deprecated-nas-ip-fd86a734c92f6fae.yaml0000664000175000017500000000024600000000000025455 0ustar00zuulzuul00000000000000--- deprecations: - Deprecated the configuration option ``nas_ip``. Use option ``nas_host`` to indicate the IP address or hostname of the NAS system. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/detach-notification-31ae15dafdef36c1.yaml0000664000175000017500000000050400000000000026133 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1916980 `_: Fixed stale volume notification information on volume detach. - | `Bug #1935011 `_: Fixed missing detach.start notification when deleting an attachment in reserved state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/detach-race-delete-012820ad9c8dbe16.yaml0000664000175000017500000000033700000000000025404 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1937084 `_: Fixed race condition between delete attachment and delete volume that can leave deleted volumes stuck as attached to instances. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/detachedinstanceerror-64be35894c624eae.yaml0000664000175000017500000000032300000000000026357 0ustar00zuulzuul00000000000000--- fixes: - | Fix DetachedInstanceError is not bound to a Session for VolumeAttachments. This affected VolumeList.get_all, and could make a service fail on startup and make it stay in down state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/diff-srps-674f2c0cc893db4b.yaml0000664000175000017500000000055600000000000023776 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1979666 `_: PowerMax driver : Fixed rare case where the SRP in the local and remote arrays are different when managing volumes into OpenStack. For backward compatibility and name matching, the default storage group will assume the SRP name of the local array on both arrays. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/discard-config-option-711a7fbf20685834.yaml0000664000175000017500000000013400000000000026034 0ustar00zuulzuul00000000000000--- features: - New config option to enable discard (trim/unmap) support for any backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/disco-cinder-driver-9dac5fb04511de1f.yaml0000664000175000017500000000007200000000000025775 0ustar00zuulzuul00000000000000--- features: - Added backend driver for DISCO storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/disco-mark-unsupported-f6eb8208c8c4eb3b.yaml0000664000175000017500000000105400000000000026573 0ustar00zuulzuul00000000000000--- upgrade: - | The Disco driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Disco driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the Stein development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/disco-options-94fe9eaad5e397a5.yaml0000664000175000017500000000170200000000000024762 0ustar00zuulzuul00000000000000--- upgrade: - | Some of DISCO driver options were incorrectly read from ``[DEFAULT]`` section in the cinder.conf. Now those are correctly read from ``[]`` section. This includes following options: * ``disco_client`` * ``disco_client_port`` * ``rest_ip`` * ``choice_client`` * ``disco_src_api_port`` * ``retry_interval`` Also some options are renamed (note that 3 of them were both moved and renamed): * ``rest_ip`` to ``disco_rest_ip`` * ``choice_client`` to ``disco_choice_client`` * ``volume_name_prefix`` to ``disco_volume_name_prefix`` * ``snapshot_check_timeout`` to ``disco_snapshot_check_timeout`` * ``restore_check_timeout`` to ``disco_restore_check_timeout`` * ``clone_check_timeout`` to ``disco_clone_check_timeout`` * ``retry_interval`` to ``disco_retry_interval`` Old names and locations are still supported but support will be removed in the future. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/dothill-drivers-removed-da00a6b83865271a.yaml0000664000175000017500000000011200000000000026464 0ustar00zuulzuul00000000000000--- upgrade: - Support for Dot Hill AssuredSAN arrays has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/downstream_genconfig-e50791497ce87ce3.yaml0000664000175000017500000000013200000000000026150 0ustar00zuulzuul00000000000000--- fixes: - Removed the need for deployers to run tox for config reference generation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/drbd-linstor-rest-update-52fd52f6c09a4dd3.yaml0000664000175000017500000000052000000000000026724 0ustar00zuulzuul00000000000000--- upgrade: - | The LINSTOR driver for Cinder supports LINSTOR 0.9.12. The driver supports LINSTOR backend using REST API. The new driver adds 'linstor_autoplace_count' configuration option that specifies the number of volume replicas. features: - | The LINSTOR Driver for Cinder now supports LINSTOR 0.9.12. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/drbd-linstor-volume-driver-20273a9ad3783cf5.yaml0000664000175000017500000000011700000000000027131 0ustar00zuulzuul00000000000000--- features: - | New Cinder volume driver for LINBIT LINSTOR resources. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/drbd-resource-options-88599c0a8fc5b8a3.yaml0000664000175000017500000000024700000000000026264 0ustar00zuulzuul00000000000000--- features: - Configuration options for the DRBD driver that will be applied to DRBD resources; the default values should be okay for most installations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/driver_reinitialization-b26a8b3e665567ec.yaml0000664000175000017500000000046100000000000026753 0ustar00zuulzuul00000000000000--- features: - Added a new config ``reinit_driver_count`` in volume driver, which indicates the maximum retry limit for driver re-initialization when it fails to initialize a volume driver. Its default value is 3. The interval of retry is exponentially backoff, and will be 1s, 2s, 4s etc. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/drop-db_driver-opt-b644963bf3b6aced.yaml0000664000175000017500000000041700000000000025657 0ustar00zuulzuul00000000000000--- upgrade: - | The ``[DEFAULT] db_driver`` config option has been removed. This was intended to allow configuration of the database driver, however, there is only one database driver present in-tree and out-of-tree database drivers are not supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/drop-mysql-5-5-support-fe3ececc3c9b9915.yaml0000664000175000017500000000007700000000000026374 0ustar00zuulzuul00000000000000--- upgrade: - | Support for MySQL 5.5 has been dropped. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/drop-py2-377a91a5b66165ab.yaml0000664000175000017500000000031100000000000023377 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. The last release of Cinder to support py2.7 is OpenStack Train. The minimum version of Python now supported by Cinder is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/drop-python-3-6-and-3-7-fa2dda5d6be0cad6.yaml0000664000175000017500000000020100000000000026202 0ustar00zuulzuul00000000000000--- upgrade: - | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ds8k-allow-multi-attach-41fa7bddbbd719ec.yaml0000664000175000017500000000010100000000000026656 0ustar00zuulzuul00000000000000--- features: - IBM DS8K driver has added multiattach support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ds8k-replication-group-3f2e8cd3c2e291a3.yaml0000664000175000017500000000013100000000000026372 0ustar00zuulzuul00000000000000--- features: - | Add replication consistency group support in DS8K cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ds8k-report-backend-state-in-service-list-f0898950a0f4b122.yaml0000664000175000017500000000014700000000000031552 0ustar00zuulzuul00000000000000--- features: - | Added flag 'backend_state' which will give backend state info in service list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ds8k_async_clone_volume-25232c55da921202.yaml0000664000175000017500000000031100000000000026361 0ustar00zuulzuul00000000000000--- features: - | Added support for cloning volume asynchronously, it can be enabled by option async_clone set to true in parameter metadata when creating volume from volume or snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ds8k_specify_pool_lss-5329489c263951ba.yaml0000664000175000017500000000025700000000000026112 0ustar00zuulzuul00000000000000--- features: - DS8K driver adds two new properties into extra-specs so that user can specify pool or lss or both of them to allocate volume in their expected area. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/enable-force-delete-lun-d0e05b5d669e40f7.yaml0000664000175000017500000000110700000000000026372 0ustar00zuulzuul00000000000000--- upgrade: - | DellEMC Unity: The fix of bug `1825469 `_ changes the default value of the ``force_delete_lun_in_storagegroup`` option from ``False`` to ``True``, which means luns will always be force deleted after upgrade. fixes: - | DellEMC Unity: Fix bug `1825469 `_. The fix enables the ``force_delete_lun_in_storagegroup`` option to ``True`` by default, which makes sure that luns can deleted even when they are still in storage groups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/enable-multiattach-iscsi-fcp-netapp-driver-98ad2d75fbbf333f.yaml0000664000175000017500000000013200000000000032353 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP iSCSI and FCP drivers multiattach capability enabled.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/enable-multiattach-nfs-netapp-driver-406b9b285d85c989.yaml0000664000175000017500000000011000000000000030772 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP NFS multiattach capability enabled.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/enforce_min_vmware-a080055111b04692.yaml0000664000175000017500000000012500000000000025331 0ustar00zuulzuul00000000000000--- upgrade: - The VMware VMDK driver now enforces minimum vCenter version of 5.1. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/extend-volume-completion-action-9bf6b0ed551a8e32.yaml0000664000175000017500000000033600000000000030311 0ustar00zuulzuul00000000000000--- features: - | Add the new ``os-extend_volume_completion`` volume action, which the Nova compute agent can use to notify Cinder that it has finished handling the ``volume-extended`` external server event. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fail-detach-lun-when-auto-zone-enabled-9c87b18a3acac9d1.yaml0000664000175000017500000000035100000000000031353 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC Unity Driver: Fixes `bug 1759175 `__ to detach the lun correctly when auto zone was enabled and the lun was the last one attached to the host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml0000664000175000017500000000010100000000000026674 0ustar00zuulzuul00000000000000--- features: - Added backend driver for FalconStor FreeStor. ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=cinder-27.0.0/releasenotes/notes/falconstor-extend-driver-to-utilize-multiple-fss-pools-dc6f2bc84432a672.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/falconstor-extend-driver-to-utilize-multiple-fss-pools-dc6f2bc844320000664000175000017500000000025000000000000033253 0ustar00zuulzuul00000000000000--- features: - Added ability to specify multiple storage pools in the FalconStor driver. deprecations: - The fss_pool option is deprecated. Use fss_pools instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/feature-abort-restore-fe1252288c59e105.yaml0000664000175000017500000000024300000000000026074 0ustar00zuulzuul00000000000000--- features: - | Support backup restore cancelation by changing the backup status to anything other than `restoring` using `cinder backup-reset-state`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/feature-clone-across-pools-63021bc853e9161a.yaml0000664000175000017500000000133600000000000027017 0ustar00zuulzuul00000000000000--- features: - Add the clone_across_pools driver capability Drivers can now declare that they can clone a volume into a different pool. Essentially, if this capability is declared, Cinder will skip the check that the pool of the destination volume is the same as the pool of the source volume. Some drivers do not have such a restriction and it may be possible to complete the "create volume from image" operation very efficiently instead of falling back to the "attach and dd" option. This affects creating a volume from an image with and without the image cache. For more details please check `bp clone_across_pools `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/feature-cross-az-backups-6b68c4c4456f2fd7.yaml0000664000175000017500000000022600000000000026645 0ustar00zuulzuul00000000000000--- features: - | Cinder backup creation can now (since microversion 3.51) receive the availability zone where the backup should be stored. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/feature-multi-process-backup-8cf5ad5a0cf9b2d5.yaml0000664000175000017500000000047600000000000027742 0ustar00zuulzuul00000000000000--- features: - | Cinder backup now supports running multiple processes to make the most of the available CPU cores. Performance gains will be significant when running multiple concurrent backups/restores with compression. The number of processes is set with `backup_workers` configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/feature-netapp-iscsi-aa-support-eff8ed19a30e87c0.yaml0000664000175000017500000000024200000000000030277 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP ISCSI/FC drivers: Enabled support for Active/Active environments in the NetApp ISCSI/FC drivers (including replication). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/feature-rbd-exclusive-pool-a9bdebdeb1f0bf37.yaml0000664000175000017500000000123100000000000027535 0ustar00zuulzuul00000000000000--- features: - | When using the RBD pool exclusively for Cinder we can now set `rbd_exclusive_cinder_pool` to `true` and Cinder will use DB information to calculate provisioned size instead of querying all volumes in the backend, which will reduce the load on the Ceph cluster and the volume service. issues: - | If RBD stats collection is taking too long in your environment maybe even leading to the service appearing as down you'll want to use the `rbd_exclusive_cinder_pool = true` configuration option if you are using the pool exclusively for Cinder and maybe even if you are not and can live with the innacuracy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-3par-live-migration-0065bd2626fdb4a1.yaml0000664000175000017500000000047200000000000026354 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1697422 `_: Fix HPE 3PAR driver issue where volumes that were live migrated to it would end up being inaccessible. We would no longer be able to use the volume for any operation, such as attach, detach, delete, snapshot, etc. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-3par-migrate-rename-662d984e070a1de2.yaml0000664000175000017500000000057300000000000026262 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1858119 `_: Fix the HPE 3PAR driver's attempt to rename the backend volume after it was migrated. If the original volume resides on the same 3PAR backend then the pre and post migration volume names are swapped. Otherwise, the newly migrated volume is renamed to match the original name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-500-http-error-on-resource-conflict.yaml0000664000175000017500000000056000000000000027027 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1907295 `_: Fixed When a volume was not in the correct status to accept an attachment update (e.g.: volume in error or duplicate connectors), the REST API was returning a 500 (Internal Server Error). It now correctly returns the response code 409 (Conflict) in this situation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-abort-backup-df196e9dcb992586.yaml0000664000175000017500000000021700000000000025177 0ustar00zuulzuul00000000000000--- fixes: - | We no longer leave orphaned chunks on the backup backend or leave a temporary volume/snapshot when aborting a backup. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-backup-handling-of-encryption-key-id-f2fa56cadd80d582.yaml0000664000175000017500000000112600000000000031737 0ustar00zuulzuul00000000000000--- fixes: - | Fix the way encryption key IDs are managed for encrypted volume backups. When creating a backup, the volume's encryption key is cloned and assigned a new key ID. The backup's cloned key ID is now stored in the backup database so that it can be deleted whenever the backup is deleted. When restoring the backup of an encrypted volume, the destination volume is assigned a clone of the backup's encryption key ID. This ensures every restored backup has a unique encryption key ID, even when multiple volumes have been restored from the same backup. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-cacheable-capability-f893520d79c3db60.yaml0000664000175000017500000000021700000000000026532 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1969366 `_: Fixed reporting of cacheable capability by drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-cinder-manage-groups-quota-bug-421ae9c9eb99b22f.yaml0000664000175000017500000000033600000000000030577 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2107451 `_: Fixed crash of `cinder-manage quota sync` if there is a row in the quota_usage table with the value groups for column resource ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-cross-az-migration-ce97eff61280e1c7.yaml0000664000175000017500000000033600000000000026421 0ustar00zuulzuul00000000000000--- fixes: - | Resolve issue with cross AZ migrations and retypes where the destination volume kept the source volume's AZ, so we ended up with a volume where the AZ does not match the backend. (bug 1747949) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-ensure-export-3cccf107a82b35a0.yaml0000664000175000017500000000027200000000000025456 0ustar00zuulzuul00000000000000--- fixes: - | JovianDSS driver: `Bug #1941746 `_: Fixed Fix ensure_export function failure in case of partial target recovery. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-extend-volume-939e30f2e9e516bc.yaml0000664000175000017500000000034400000000000025403 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1772421 `_] INFINIDAT fixed a bug in volume extension feature where volumes were not extended to target size but added the given target size. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-extend-volume-in-thin-pools-57a3d53be4d47704.yaml0000664000175000017500000000044500000000000030013 0ustar00zuulzuul00000000000000--- fixes: - Fixed volume extend issue that allowed a tenant with enough quota to extend the volume to limits greater than what the volume backend supported. other: - Now extend won't work on disabled services because it's going through the scheduler, unlike how it worked before. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-groups-actions-in-a-a-mode-5d554b30a26da22c.yaml0000664000175000017500000000042500000000000027516 0ustar00zuulzuul00000000000000--- fixes: - | Fixed volume group action in Active/Active HA deployment: * Update group (`#1876133 `_) * Create group from group snapshot (`#1867906 `_) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-hnas-clone-with-different-volume-type-b969897cba2610cc.yaml0000664000175000017500000000034300000000000032034 0ustar00zuulzuul00000000000000--- fixes: - Fixed HNAS bug that placed a cloned volume in the same pool as its source, even if the clone had a different pool specification. Driver will not allow to make clones using a different volume type anymore.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-hnas-stats-reporting-1335e582e46ff440.yaml0000664000175000017500000000016600000000000026537 0ustar00zuulzuul00000000000000--- fixes: - Fixed issue where the HNAS driver was not correctly reporting THIN provisioning and related stats. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-host-info-in-volume-details-1759280bd236421e.yaml0000664000175000017500000000230000000000000027607 0ustar00zuulzuul00000000000000--- upgrade: - | Due to the fix for `Bug #1740950 `_, the ``host_name`` field in any object in the ``attachments`` array of the volume detail response is populated only when the call is made in an administrative context. Otherwise, its value is the JSON ``null`` value. This is consistent with prior API behavior, as it has always been possible for the value of that field to be ``null``. security: - | It was possible under certain circumstances for the host name of an instance to be leaked in the volume detail response. This has been fixed in the current release. The ``host_name`` field in any object in the ``attachments`` array of the volume detail response is populated only when the call is made in an administrative context. Otherwise, its value is the JSON ``null`` value. fixes: - | `Bug #1740950 `_: the ``host_name`` field in any object in the ``attachments`` array of the volume detail response is populated only when the call is made in an administrative context. Otherwise, its value is the JSON ``null`` value.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-import-backup-quota-issue-8yh69hd19u7tuu23.yaml0000664000175000017500000000012100000000000030136 0ustar00zuulzuul00000000000000--- fixes: - Cinder will now consume quota when importing new backup resource. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-kaminario-unique_fqdn_network-ecde36f614c30733.yaml0000664000175000017500000000037600000000000030640 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1886042 `_: Fix ``unique_fqdn_network`` configuration option for the Kaminario driver, as it was being ignored when defined in the driver section, which used to work. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-leave-mapped-volume-ef0bd683d415f7b1.yaml0000664000175000017500000000030100000000000026514 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1880971 `_: Fix leaving mapped volumes on offline volume migration and revert to snapshot operations failure. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-list-volume-filtering-3f2bf93ab9b98974.yaml0000664000175000017500000000022400000000000027056 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1883490 `_: Fixed incorrect response of listing volumes with filters.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-manage-no-action-46b023476e8cd938.yaml0000664000175000017500000000034400000000000025565 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1902852 `_: Fixed throwing Python traceback message when using ``cinder-manage `` without an action for the category. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-manage-replicated-multiattach-9bc258d349e0f5a6.yaml0000664000175000017500000000032500000000000030464 0ustar00zuulzuul00000000000000--- fixes: - | RBD `bug #2115985 `_: Fixed issue when managing a volume with ``multiattach`` or ``replication_enabled`` properties in volume type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-multiattach-deletion-b3990acf1f5fd378.yaml0000664000175000017500000000014100000000000027002 0ustar00zuulzuul00000000000000--- fixes: - | Fixed NetApp SolidFire bug that avoided multiatached volumes to be deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-netapp-cg-da4fd6c396e5bedb.yaml0000664000175000017500000000014600000000000024761 0ustar00zuulzuul00000000000000--- fixes: - Fixes a bug in NetApp SolidFire where the deletion of group snapshots was failing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-netapp-custom-igroup-e049b4f3b341dd54.yaml0000664000175000017500000000015400000000000026673 0ustar00zuulzuul00000000000000--- fixes: - | Fix NetApp iSCSI and FC driver issues with custom initiator groups. (bug 1697490). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-netapp-force_detach-36bdf75dd2c9a030.yaml0000664000175000017500000000012300000000000026537 0ustar00zuulzuul00000000000000--- fixes: - Fixes force_detach behavior for volumes in NetApp SolidFire driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-nfs-optimized-create-vol-9eb43f2050bba74a.yaml0000664000175000017500000000026400000000000027473 0ustar00zuulzuul00000000000000--- fixes: - | NFS driver `bug #2120933 `_: Fixed issue when creating a bootable volume from image in optimized path. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-nfs-revert-to-snap-adc04204b3661d66.yaml0000664000175000017500000000021600000000000026152 0ustar00zuulzuul00000000000000--- fixes: - | NFS driver `bug #1946059 `_: Fixed revert to snapshot operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-nfs-vol-from-snapshot-654a07d25a33bf7d.yaml0000664000175000017500000000046700000000000026762 0ustar00zuulzuul00000000000000--- fixes: - | NFS driver `bug #2074377 `_: Fixed regression caused by change I65857288b797 (the mitigation for CVE-2024-32498) that was preventing the creation of a new volume from the second and subsequent snapshots of an existing volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-powerflex-volume-cache-da3fa1769ef78ae8.yaml0000664000175000017500000000057000000000000027336 0ustar00zuulzuul00000000000000--- fixes: - | PowerFlex driver `bug #1942095 `_: Fixed Cinder volume caching mechanism for the driver. Now the driver correctly raises ``exception.SnapshotLimitReached`` when maximum snapshots are created for a given volume and a volume cache is invalidated to allow a new row of fast volume clones. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-quota-deleting-temporary-volume-274e371b425e92cc.yaml0000664000175000017500000000054000000000000030763 0ustar00zuulzuul00000000000000--- fixes: - | Fix a quota usage error triggered by a non-admin user backing up an in-use volume. The forced backup uses a temporary volume, and quota usage was incorrectly updated when the temporary volume was deleted after the backup operation completed. Fixes `bug 1778774 `__. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-rbd-upload-diff-format-38fc4ef24d7145ba.yaml0000664000175000017500000000027500000000000027107 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver `bug #2092534 `_: Fixed uploading a volume to image when image has different format than volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-reimage-image-snap-15ecd5fce9973d5d.yaml0000664000175000017500000000024200000000000026401 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2062539 `_: Fixed reimage operation when the image is backed by a volume snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-reimage-sparse-copy-d346e8f55afa6280.yaml0000664000175000017500000000101100000000000026450 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2045431 `_: Fixed a data leak scenario where we preserve sparseness when reimaging the volume. We currently do a sparse copy when writing an image on the volume. This could be a potential data leak scenario where the zero blocks of the new image are not written on the existing volume and the data from the old image still exists on the volume. We fix the scenario by not doing sparse copy when reimaging the volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-reimage-status-rollback-eb2aa8f82a8caabc.yaml0000664000175000017500000000027500000000000027670 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2036994 `_: Fixed rollback of volume status if the reimage operation fails while checking image metadata. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-remotefs-clone-volume-locking-385e740d4a5a813b.yaml0000664000175000017500000000113400000000000030364 0ustar00zuulzuul00000000000000--- fixes: - | An incorrect lock in the remotefs code, which is used for the NFS driver, and other similar drivers, resulted in concurrent clone volume operations failing. create_cloned_volume now locks on the source volume id, meaning multiple clone operations from the same source volume are serialized. A lock in the volume manager flow generally prevents this on normal clone volume operations, but this clone method in the driver is called for operations such as cloning from the cinder image-volume cache or cloning from a cinder backend used as a glance store. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-request_id-6f6972b2c12d1a18.yaml0000664000175000017500000000123300000000000024655 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1960019 `_: Fixed value of the x-openstack-request-id header when Cinder is using noauth. - | `Bug #1960020 `_: Fixed duplicated request-id values in logs for different requests, happens only on request to / to get available api versions. - | `Bug #1960021 `_: Fixed missing request id headers in requests to / to get available api versions. - | `Bug #1960329 `_: Fixed wrong request ID on middleware filters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-reserve-volume-policy-31790a8d865ee0a1.yaml0000664000175000017500000000025500000000000026774 0ustar00zuulzuul00000000000000--- fixes: - | The reserve volume API was incorrectly enforcing "volume:retype" policy action. It has been corrected to "volume_extension:volume_actions:reserve". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-reserved-image-properties-9519ddc080e7ed1a.yaml0000664000175000017500000000324000000000000027747 0ustar00zuulzuul00000000000000--- upgrade: - | We introduced a new config parameter, ``reserved_image_namespaces``, that allows operators to set the image properties to filter out from volume image metadata by namespace when uploading a volume to Glance. These properties, if not filtered out, cause failures when uploading images back to Glance. The error will happen on Glance side when the reserved namespaces are used. This option is also useful when an operator wants to use the Glance property protections feature to make some image properties read-only. fixes: - | `Bug #1945500 `_: Fixed an error when uploading to Glance a previously downloaded glance image when glance multistore is enabled. Glance reserves image properties in the namespace 'os_glance' for its own use and will not allow images to be created with these properties. Additionally, there are image properties, such as those associated with image signature verification, that are stored in a volume's image metadata, which should not be added to a new image when a volume is being uploaded as an image. Thus Cinder will no longer include any volume image metadata in the namespaces ``os_glance`` and ``img_signature`` when it creates an image in Glance. Furthermore, because the Glance property protections feature allows an operator to configure specific image properties as read-only, this fix adds a configuration option, ``reserved_image_namespaces``, that allows an operator to exclude additional image properties by namespace (the ``os_glance`` and ``img_signature`` namespaces are *always* excluded). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-resource-size-76e8ff25f07925f2.yaml0000664000175000017500000000032100000000000025332 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1948962 `_: Fixed operations that failed on volume types with 255 characters names (e.g. set quota limits or volume migrate). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-retype-with-az-e048123d982f213d.yaml0000664000175000017500000000021100000000000025316 0ustar00zuulzuul00000000000000--- fixes: - | Fixed a problem with volume retype not honoring the existing volume's Availability Zone if one isn't specified. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-schema-validation-attachment-create-3488914cb52d44d2.yaml0000664000175000017500000000066200000000000031423 0ustar00zuulzuul00000000000000--- fixes: - | Fixed the schema validation for attachment create API to make instance uuid an optional field. It had mistakenly been defined as a required field when schema validation was added in an earlier release. Also updated the schema to allow specification of the ``mode`` parameter, which has been available since microversion >= 3.54, but which was not recognized as a legitimate request field. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-show-transfer-for-non-admins-be001d79975b325d.yaml0000664000175000017500000000024700000000000030150 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1884268 `_: Fixed issue where non-admin users could not show a volume transfer by name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-show-volume-non-admins-1bc5238398e73981.yaml0000664000175000017500000000040100000000000026713 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1917574 `_: Fixed issue when cinderclient requests to show volume by name for non-admin users would result in the volume not being found for microversions 3.31 or later. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-size-reporting-seagate-driver.rst0000664000175000017500000000020400000000000026103 0ustar00zuulzuul00000000000000--- fixes: - | Seagate, HPE MSA, Dell PowerVault, Lenovo drivers: report volume size in GiB (2^30) rather than GB (10^9). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-solidfire-provisioning-report-880141e64c1ea52f.yaml0000664000175000017500000000033600000000000030530 0ustar00zuulzuul00000000000000--- fixes: - | Fix SolidFire free_capacity_gb reporting and also reports thin_provisioning_support=True. This allow the use of Cinder scheduler's parameters for thin provisioning in the SolidFire plataform. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-solidfire-python3-support-ee02ff2c1ec920f2.yaml0000664000175000017500000000016300000000000030022 0ustar00zuulzuul00000000000000--- fixes: - | Fix python3 imcompability issues and make SolidFire driver fully compatible with python3. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-solidfire-replication-dcb3e59b29950933.yaml0000664000175000017500000000054000000000000027014 0ustar00zuulzuul00000000000000--- fixes: - | The SolidFire replication was fixed. Several bugs were addressed (creating replicated volumes from snapshots, from volumes, retype a volume to a replicated type, managing a volume to a replicated type, correctly updating portal info on failover/failback and some minor other fixes). Closes bugs #1834013, #1751932. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-storage_protocol-6baf55e13249463c.yaml0000664000175000017500000000163300000000000026106 0ustar00zuulzuul00000000000000--- upgrade: - | The ``storage_protocol`` treats all variants of the protocol name as the same regarding matches, so for example using FC, fc, or fibre_channel will be treated equally in the scheduler, be it when filtering using the volume type's extra specs or when using filter and goodness functions. The storage protocol reporting via the REST API will be now the same for them all, using the preferred naming, FC, NVMe-oF, iSCSI, NFS... If your deployment uses ``storage_protocol`` to differentiate between backends that use the same protocol but report it using different variants, be aware that they will no longer be differentiated. fixes: - | `Bug #1966103 `_: Fixed inconsistent behavior of ``storage_protocol`` among different backends that report variants of the protocol name, such as FC, fc, fibre_channel. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-sub-clone-operation-f42a84ab17930f24.yaml0000664000175000017500000000037500000000000026403 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1924643 `_: Fixed the NetApp cinder driver sub-clone operation that might be used by extend operation in case the extended size is greater than the max LUN geometry. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-transfer-accept-policy-7594806372b14284.yaml0000664000175000017500000000044400000000000026605 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1950474 `_: Fixed policy authorization for transfer accept API. Previously, setting ``enforce_new_defaults=True`` in oslo_policy section would break the transfer accept API which is fixed in this release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-unnecessary-migration-on-retype-67cedb1bd8e4c4b2.yaml0000664000175000017500000000026100000000000031260 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1901188 `_: Fix unnecessary migration on retype when QoS has the same elements in both types. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/releasenotes/notes/fix-vol-image-metadata-endpoints-returning-none-ba0590e6c6757b0c.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fix-vol-image-metadata-endpoints-returning-none-ba0590e6c6757b0c.ya0000664000175000017500000000041000000000000032643 0ustar00zuulzuul00000000000000--- fixes: - | Fix the following volume image metadata endpoints returning None following policy enforcement failure: * ``os-set_image_metadata`` * ``os-unset_image_metadata`` The endpoints will now correctly raise a 403 Forbidden instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/force-delete-mv-a53924f09c475386.yaml0000664000175000017500000000065000000000000024563 0ustar00zuulzuul00000000000000--- fixes: - | Volume "force delete" was introduced with the 3.23 API microversion, however the check for in the service was incorrectly looking for microversion 3.2. That check has now been fixed. It is possible that an API call using a microversion below 3.23 would previously work for this call, which will now fail. This closes `bug #1783028 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/friendly-zone-names-d5e131d356040de0.yaml0000664000175000017500000000064700000000000025610 0ustar00zuulzuul00000000000000--- features: - Cinder FC Zone Manager Friendly Zone Names This feature adds support for Fibre Channel user friendly zone names if implemented by the volume driver. If the volume driver passes the host name and storage system to the Fibre Channel Zone Manager in the conn_info structure, the zone manager will use these names in structuring the zone name to provide a user friendly zone name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-add-cli-copy-1647fb54970a186d.yaml0000664000175000017500000000167200000000000025622 0ustar00zuulzuul00000000000000--- features: - | Fujitsu Eternus DX driver: Added cli operations when creating snapshot Fujitsu Eternus DX driver used to create snapshot using SMI-S, resulting in the inability to extend the source volume. To make the volume extendable after creating a snapshot, an additional parameter ``fujitsu_use_cli_copy`` is introduced with a default value of ``False``. * If ``fujitsu_use_cli_copy`` is set to ``False``, create a snapshot using the conventional SMI-S method. * If ``fujitsu_use_cli_copy`` is set to ``True``, create a snapshot using the CLI method, allowing volume extension of the source volume. Note that ``fujitsu_use_cli_copy`` cannot be set to True when the type of target pool is RAID Group. See the `Fujitsu ETERNUS DX driver documentation `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-add-cli-extend-e94b887dac8a45b3.yaml0000664000175000017500000000071300000000000026355 0ustar00zuulzuul00000000000000--- features: - | Fujitsu ETERNUS DX driver: Added support to extend a volume on RAID Group using CLI. Revised the 'Extend Volume' process on the RAID Group to improve processing speed as follows: * When extending a volume created on ThinProvisionPool, the process will still use SMI-S for volume extension. * When extending a volume created on RaidGroup, the process has been updated to use CLI for volume extension. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-eternus-dx-fc-741319960195215c.yaml0000664000175000017500000000010400000000000025601 0ustar00zuulzuul00000000000000--- features: - Added backend driver for Fujitsu ETERNUS DX (FC). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-eternus-dx-iscsi-e796beffb740db89.yaml0000664000175000017500000000010700000000000027066 0ustar00zuulzuul00000000000000--- features: - Added backend driver for Fujitsu ETERNUS DX (iSCSI). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-eternus-dx-update-4755ec446030d263.yaml0000664000175000017500000000056600000000000026642 0ustar00zuulzuul00000000000000--- upgrade: - The Fujitsu DX driver names have been updated to distinguish them from other Fujitsu storage. The module path ``cinder.volume.drivers.fujitsu`` should now be updated to ``cinder.volume.drivers.fujitsu.eternus_dx`` in ``cinder.conf``. Support for the previous driver naming will continue to work, but will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-improve-cli-function-6cabf36ffc6d44d6.yaml0000664000175000017500000000327600000000000030007 0ustar00zuulzuul00000000000000--- features: - | Fujitsu Eternus DX driver: Added support SSH key. Added the method for connecting to Eternus Storage using SSH key. The connection method can be selected by setting the value of parameter ``fujitsu_passwordless``, which has a default value of ``True``. * When ``fujitsu_passwordless`` is set to ``True``, SSH key is used for connecting to the storage. Additionally, ``fujitsu_private_key_path`` needs to be set to the path of the SSH private key. * When ``fujitsu_passwordless`` is set to ``False``, password is used for SSH connection to the storage. See the `Fujitsu ETERNUS DX driver documentation `_ for details. upgrade: - | Fujitsu Eternus DX driver: Added SSH key and password connection switching Added the method for connecting to Eternus Storage using SSH key. The connection method can be selected by setting the value of parameter ``fujitsu_passwordless``, which has a default value of ``True``. For upgrading from previous versions that relied on password authentication, you must explicitly set ``fujitsu_passwordless = False`` in the configuration. This ensures backward compatibility with the legacy password-based workflow. The default True value enforces key-based auth for new deployments, aligning with security best practices at the cost of a minor configuration adjustment for existing users. fixes: - | Fujitsu Eternus DX driver `bug #2048850 `_: Added parsing of error messages when CLI execution fails. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-improve-create-snapshot-cd796e66eea43c90.yaml0000664000175000017500000000045600000000000030360 0ustar00zuulzuul00000000000000--- features: - | Fujitsu ETERNUS DX driver: Add metadata to snapshot After the snapshot is created, upload the information of the snapshot on the storage to the metadata. The metadata has the following information: - ``FJ_SDV_Name`` - ``FJ_SDV_No`` - ``FJ_Pool_Name`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-improve-delete-volume-8fa509f0424deb8e.yaml0000664000175000017500000000111300000000000030007 0ustar00zuulzuul00000000000000--- features: - | Fujitsu ETERNUS DX driver: Improve volume deletion To improve the volume deletion process, add a step to check associated copy sessions. Additionally, it also improves the process of retrieving storage-managed volume numbers. There was a problem where the volume could not be deleted because the copy session information acquired by SMI-S IF from ETERNUS DX Storage, which was cached and did not reflect the information that had just been executed. This problem has been addressed through improvements in information retrieval. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-multiple-pools-a0dd9197b16b3122.yaml0000664000175000017500000000010600000000000026370 0ustar00zuulzuul00000000000000--- features: - | Fujitsu Driver: Added multiple pools support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-pool-infomation-modified-7ebcbbc11a2e6f28.yaml0000664000175000017500000000255600000000000030610 0ustar00zuulzuul00000000000000--- features: - | Fujitsu ETERNUS DX driver: Add fragment capacity information of RAID Group. ETERNUS DX driver have two types of storage pools: RAID Group and ThinProvisioning Pool. Volumes can not be created in RAID Groups for the following situations: * The maximum sequential physical free space is smaller than the volumes to be created. * 128 volumes have already been created in the RAID Group. For the above reasons, to monitor the maximum sequential physical free space and total volumes in the RAID Group, when updating pool information using ``Get Volume Stats``, also update parameter ``total_volumes`` (volumes already created in the RAID Group) and introduce ``fragment_capacity_mb`` (maximum sequential physical capacity) to the backend pool information if the backend pool is a RAID Group. Meanwhile, since creating volumes on ThinProvisioning Pool does not encounter the above restrictions, parameter ``fragment_capacity_mb`` will not be added into the information, and remove the ``total_volumes`` parameter from the backend pool information when the type of backend pool is ThinProvisioning Pool. These two parameters can be utilized in future implementations of functions related to ``filter_function``. This patch also enabled the ``multiattach`` in the driver information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-qos-support-1c1528da06d0b38a.yaml0000664000175000017500000000226100000000000025776 0ustar00zuulzuul00000000000000--- features: - | Fujitsu ETERNUS DX driver: Added support for QoS What QoS settings are available depends upon the storage firmware version of the ETERNUS AF/DX. * When the storage firmware version is less than V11L30-0000, only the upper limit of bandwidth(BWS) can be set using: - ``maxBWS`` Note that when the firmware version of the ETERNUS AF/DX is earlier than V11L30, upper limits for the volume QoS settings of the ETERNUS AF/DX are set using predefined options. This means that you should set the upper limit *of the ETERNUS AF/DX side* to a maximum value that does not exceed the specified ``maxBWS``. * When the storage firmware version is greater than V11L30-0000, the IOPS/Throughput of Total/Read/Write for the volume can be set separately using: - ``read_bytes_sec`` - ``write_bytes_sec`` - ``total_bytes_sec`` - ``read_iops_sec`` - ``write_iops_sec`` - ``total_iops_sec`` See the `Fujitsu ETERNUS DX driver documentation `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-support-revert-to-snapshot-5d40dbe8b918e68e.yaml0000664000175000017500000000050600000000000031073 0ustar00zuulzuul00000000000000--- features: - | Fujitsu Eternus DX driver: Added support for revert to snapshot operation. Added support of revert to snapshot functionality. If a volume with snapshots has been extended, causing a mismatch in size between the origin volume and the snapshot, reverting will be guarded by cinder-api. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fujitsu-update-migrated-volume-1d205cdbd7e65a28.yaml0000664000175000017500000000054600000000000030141 0ustar00zuulzuul00000000000000--- features: - | Fujitsu ETERNUS DX driver: Added support for update migrated volume Now we update the required values to successfully complete the migration. See the `Fujitsu ETERNUS DX driver documentation `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fungible-cinder-driver-af8aeb57846c8ecc.yaml0000664000175000017500000000011600000000000026570 0ustar00zuulzuul00000000000000--- features: - Added NVMe-TCP volume driver for Fungible Storage Cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fusionstorage-cinder-driver-8f3bca98f6e2065a.yaml0000664000175000017500000000010100000000000027513 0ustar00zuulzuul00000000000000--- features: - Added backend driver for Huawei FusionStorage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/fusionstorage-driver-cf13b8d95ae97319.yaml0000664000175000017500000000255600000000000026216 0ustar00zuulzuul00000000000000--- upgrades: - The FusionStorage driver has added the configuration options "manager_ips", "dsware_rest_url", "san_login", "san_password" and "dsware_storage_pools". "[]/manager_ips", the ips of FusionStorage Agent(FSA). This option is to support FSA to mount accross the different FSA nodes. The parameters takes the standard dict config form, manager_ips = host1:ip1, host2:ip2... "[]/dsware_rest_url", the address and port of FusionStorage Manager(FSM) in the format of a string. Currently, only one "dsware_rest_url" is supported. "[]/san_login", the user name of the FusionStorage Manager(FSM) in the format of a string. Currently, only one "san_login" is supported. "[]/san_password", the user password of FusionStorage Manager(FSM) in the format of a string. Currently, only one "san_password" is supported. "[]/dsware_storage_pools", the name of the storage pools that exists on the FusionStorage Manager. Multiple storage pools can be configed, separated by a semicolon. deprecations: - The FusionStorage driver has deprecated the configuration options "dsware_isthin", "dsware_manager", "fusionstorageagent", "clone_volume_timeout", "pool_type", and "pool_id_filter". These configuration options will be removed in the Train release(14.0.0). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/general-upgrades-notes-120f022aa5bfa1ea.yaml0000664000175000017500000000237200000000000026474 0ustar00zuulzuul00000000000000--- issues: - Cinder services are now automatically downgrading RPC messages to be understood by the oldest version of a service among all the deployment. Disabled and dead services are also taken into account. It is important to keep service list up to date, without old, unused records. This can be done using ``cinder-manage service remove`` command. Once situation is cleaned up services should be either restarted or ``SIGHUP`` signal should be issued to their processes to force them to reload version pins. Please note that cinder-api does not support ``SIGHUP`` signal. upgrade: - If during a *live* upgrade from Liberty a backup service will be killed while processing a restore request it may happen that such backup status won't be automatically cleaned up on the service restart. Such orphaned backups need to be cleaned up manually. - When performing a *live* upgrade from Liberty it may happen that retype calls will reserve additional quota. As by default quota reservations are invalidated after 24 hours (config option ``reservation_expire=86400``), we recommend either decreasing that time or watching for unused quota reservations manually during the upgrade process. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/generalized-resource-filter-hg598uyvuh119008.yaml0000664000175000017500000000035000000000000027621 0ustar00zuulzuul00000000000000--- features: - Added generalized resource filter support in ``list volume``, ``list backup``, ``list snapshot``, ``list group``, ``list group-snapshot``, ``list attachment``, ``list message`` and ``list pools`` APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/generic-group-quota-manage-support-559629ad07a406f4.yaml0000664000175000017500000000010000000000000030502 0ustar00zuulzuul00000000000000--- features: - Generic group is added into quota management. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/generic-groups-in-gpfs-00bb093945a02642.yaml0000664000175000017500000000013500000000000026043 0ustar00zuulzuul00000000000000--- features: - Added consistent group capability to generic volume groups in GPFS driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/generic-groups-in-vnx-cbbe1346e889b5c2.yaml0000664000175000017500000000013200000000000026237 0ustar00zuulzuul00000000000000--- features: - Add consistent group capability to generic volume groups in VNX driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/generic-volume-groups-69f998ce44f42737.yaml0000664000175000017500000000016200000000000026142 0ustar00zuulzuul00000000000000--- features: - Introduced generic volume groups and added create/ delete/update/list/show APIs for groups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/get-driver-opts-924f72346ca1e459.yaml0000664000175000017500000000027000000000000024712 0ustar00zuulzuul00000000000000features: - | Seagate driver: Added support for ``get_driver_options`` api call - | Lenovo driver: Return additional configuration options from ``get_driver_options`` call ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/glance-v1-removed-5121af3bef285324.yaml0000664000175000017500000000045500000000000025141 0ustar00zuulzuul00000000000000--- upgrade: - | The Glance v1 API has been deprecated and will soon be removed. Cinder support for using the v1 API was deprecated in the Pike release and is now no longer available. The ``glance_api_version`` configuration option to support version selection has now been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/glance_v2_upload-939c5693bcc25483.yaml0000664000175000017500000000014100000000000025066 0ustar00zuulzuul00000000000000--- fixes: - upload-to-image using Image API v2 now correctly handles custom image properties. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/gmr-ca97ba4602ce0831.yaml0000664000175000017500000000030400000000000022562 0ustar00zuulzuul00000000000000--- other: - | Enabled Guru Meditation Reports on Cinder wsgi. When running Cinder under WSGI, we might want to have Guru Meditation Reports as well as when running outside of WSGI. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/google-auth-for-gcs-backup-1642cd0e741fbdf9.yaml0000664000175000017500000000110000000000000027075 0ustar00zuulzuul00000000000000--- features: - Google backup driver now supports ``google-auth`` library, and is the preferred library if both ``google-auth`` (together with ``google-auth-httplib2``) and ``oauth2client`` libraries are present in the system. deprecations: - Cinder's Google backup driver is now called gcs, so ``backup_driver`` configuration for Google Cloud Storage should be updated from ``cinder.backup.drivers.google`` to ``cinder.backup.driver.gcs``. fixes: - Google backup driver now works when using ``google-api-python-client`` version 1.6.0 or higher. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/group-snapshots-36264409bbb8850c.yaml0000664000175000017500000000015300000000000025024 0ustar00zuulzuul00000000000000--- features: - Added create/delete APIs for group snapshots and an API to create group from source. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/group-type-group-specs-531e33ee0ae9f822.yaml0000664000175000017500000000007100000000000026366 0ustar00zuulzuul00000000000000--- features: - Added group type and group specs APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/group-update-d423eaa18dbcecc1.yaml0000664000175000017500000000033500000000000024710 0ustar00zuulzuul00000000000000--- fixes: - | Volume group updates of any kind had previously required the group to be in ``Available`` status. Updates to the group name or description will now work regardless of the volume group status. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/handle-external-events-in-extend-6ae53b822baf0004.yaml0000664000175000017500000000047100000000000030240 0ustar00zuulzuul00000000000000--- fixes: - | `bug #2000724 `_: Handled the case when glance is calling online extend and external events were being sent to nova. Now Cinder will only send external events when the volume, to be extended, is attached to a nova instance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hbsd-driver-deletion-d81f7c4513f45d7b.yaml0000664000175000017500000000041600000000000026032 0ustar00zuulzuul00000000000000--- deprecations: - The HBSD (Hitachi Block Storage Driver) volume drivers which supports Hitachi Storages HUS100 and VSP family are deprecated. Support for HUS110 family will be no longer provided. Support on VSP will be provided as hitachi.vsp_* drivers.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/healthcheck-449ed4292e6bfa22.yaml0000664000175000017500000000045400000000000024262 0ustar00zuulzuul00000000000000--- features: - | The oslo.middleware /healthcheck is now activated by default in the Cinder api-paste.ini. Operators can use it to configure HAproxy or the monitoring of Cinder APIs. Edit the ``api-paste.ini`` file and remove any healthcheck entries to disable this functionality. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hedvig-cinder-driver-e7b98f4bc214bc49.yaml0000664000175000017500000000010100000000000026071 0ustar00zuulzuul00000000000000--- features: - Added backend driver for Hedvig iSCSI storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hgst-mark-unsupported-b2886de36421c8b0.yaml0000664000175000017500000000105200000000000026212 0ustar00zuulzuul00000000000000--- upgrade: - | The HGST driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The HGST driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the Stein development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-fix-delete-volume-issues-e648525e597505fd.yaml0000664000175000017500000000027000000000000030144 0ustar00zuulzuul00000000000000--- fixes: - | Hitachi driver `bug #1908792 `_: Fix for Hitachi driver allowing delete_volume after create_cloned_volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-fix-gad-vol-compatibility-e9c62c18f7a12bc7.yaml0000664000175000017500000000146000000000000030467 0ustar00zuulzuul00000000000000--- fixes: - | Hitachi driver `bug #2043978 `_: Since around the Train era, Hitachi had an out-of-tree driver that implemented the Global-Active Device (GAD) and Remote Replication features. As part of an initiative to unify the "Enterprise" and in-tree drivers, change I4543cd036897 in the 2023.1 (Antelope) release implemented the GAD feature for the in-tree driver. Unfortunately, this change used an incompatible string to indicate what copy groups were under GAD control, and thus upgrading to the in-tree driver breaks GAD for existing volumes. This bug fix makes the copy group control identifier consistent so that current users of the out-of-tree driver can upgrade to releases that contain the in-tree driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-fix-output-token-c9eb15423e05c5b9.yaml0000664000175000017500000000063200000000000026662 0ustar00zuulzuul00000000000000--- fixes: - | Hitachi driver `bug #2040966 `_: Mask a token for REST API session. - | NEC V driver `bug #2040966 `_: Mask a token for REST API session. - | HPE XP driver `bug #2040966 `_: Mask a token for REST API session. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-fix-unlock-f4e044807985e60b.yaml0000664000175000017500000000024200000000000025346 0ustar00zuulzuul00000000000000--- fixes: - | Hitachi driver `bug #2033448 `_: Fixed to initialize lock counter for resource group ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-generic-volume-groups-434a27b290d51bf3.yaml0000664000175000017500000000011200000000000027557 0ustar00zuulzuul00000000000000--- features: - | Hitachi driver: Add Cinder generic volume groups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-prevent-data-loss-9ec3569d7d5b1e7d.yaml0000664000175000017500000000026700000000000027074 0ustar00zuulzuul00000000000000--- fixes: - | Hitachi driver `bug #2072317 `_: Fix potential data-loss due to a network issue during a volume deletion. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-storage-driver-d38dbd990730388d.yaml0000664000175000017500000000012700000000000026311 0ustar00zuulzuul00000000000000--- features: - New Cinder Hitachi driver based on REST API for Hitachi VSP storages.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-unsupported-drivers-37601e5bfabcdb8f.yaml0000664000175000017500000000035200000000000027701 0ustar00zuulzuul00000000000000--- deprecations: - The Hitachi Block Storage Driver (HBSD) and VSP driver have been marked as unsupported and are now deprecated. enable_unsupported_driver will need to be set to True in cinder.conf to continue to use them.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-add-gad-volume-514edf8ebeb2e983.yaml0000664000175000017500000000073700000000000027175 0ustar00zuulzuul00000000000000--- features: - | Hitachi driver: Support Global-Active Device (GAD) volume. GAD is a one of Hitachi storage fucntion uses volume replication to provide a high-availability environment for hosts across storage systems and sites. New properties will be added in configuration. ``hbsd:topology`` sets to ``active_active_mirror_volumex`` would specify a GAD volume. ``hitachi_mirror_xxx`` parameters would specify a secondary storage for GAD volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-add-hostgroup-name-format-option-4c8e4a5ddd69b9bd.yaml0000664000175000017500000000371100000000000032754 0ustar00zuulzuul00000000000000--- features: - | Hitachi driver: Add a config option ``hitachi_group_name_format`` for hostgroup name format. When using this option, users can specify the name format of host groups or iSCSI targets. Rules of the format: * Usable characters are alphanumerics, ".", "@", "_", ":", "-", "{" and "}". "{" and "}" can be used only in variables. * The specified value must start with ``HBSD-``. * You can use the following variables: ``{wwn}`` `FC driver only.` This is replaced with the smallest WWPN of the WWPNs of the connecting node. ``{ip}`` `iSCSI driver only.` This is replaced with the IP address of the connecting node. ``{host}`` This is replaced with the host name of the connecting node. * You can use each variable in the specified value no more than once. * The specified value must include the following variables: * FC driver: ``{wwn}`` * iSCSI driver: ``{ip}`` * The maximum length of a specified value is as follows: * FC driver: 64 * iSCSI driver: 32 * In the length calculation, use the following values as the length of each variable: * ``{wwn}``: 16 * ``{ip}``: 15 * ``{host}``: 1 * If the specified value includes ``{host}``, the following rules apply: * characters that are not permitted for this parameter, they are replaced with ``_``. * If the length of the name after variable replacement exceeds the maximum length of host group (iSCSI target) names, the host name is truncated so that the length of the host groups or iSCSI targets do not exceed the maximum length. If you specify this parameter, it is recommended that you specify ``True`` for the ``hitachi_group_create`` parameter to collect necessary information automatically. Examples: * FC driver: ``HBSD-{host}-{wwn}`` * iSCSI driver: ``HBSD-{host}-{ip}`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-add-multi-pool-4c4589b93399e641.yaml0000664000175000017500000000057600000000000026671 0ustar00zuulzuul00000000000000--- features: - | Supported multi-pools for Hitachi driver and OEM storage drivers. upgrades: - | Hitachi driver and OEM storage drivers: Changed option names ``hitachi_pool`` to ``hitachi_pools``, ``hpexp_pool`` to ``hpexp_pools`` and ``nec_v_pool`` to ``nec_v_pools``. The options ``hitachi_pool``, ``hpexp_pool`` and ``nec_v_pool`` are deprecated.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-aix-os-type-23bf7cc3b98dff3a.yaml0000664000175000017500000000030300000000000026625 0ustar00zuulzuul00000000000000--- features: - | Hitachi driver: Support AIX as host OS type. When running ``cinder attachment-create`` command with the option ``--ostype aix``, ``AIX`` is set as host OS type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-driver-87659bb496bb459b.yaml0000664000175000017500000000016100000000000025461 0ustar00zuulzuul00000000000000--- features: - Added new Hitachi VSP FC Driver. The VSP driver supports all Hitachi VSP Family and HUSVM. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-fix-except-in-del-vol-ca8b4c5d40d69531.yaml0000664000175000017500000000031400000000000030242 0ustar00zuulzuul00000000000000fixes: - | Hitachi driver `bug #2024418 `_: Fixed to raise correct exception when volume is busy while performing delete volume operation.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-fix-keyerr-when-backend-down-a5a35b15dc8f1132.yaml0000664000175000017500000000025000000000000031564 0ustar00zuulzuul00000000000000--- fixes: - | Hitachi, NEC V, HPE XP drivers `bug #2004140 `_: Fixed ``KeyError`` when a backend is down.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-fix-resource-lock-msg-5a119426e6c65998.yaml0000664000175000017500000000026400000000000030155 0ustar00zuulzuul00000000000000--- fixes: - | Hitachi driver `bug #1989176 `_: Fixed Hitachi driver to output a message for resource lock correctly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-fix-to-use-correct-HGname-78c3c47dcf984ddf.yaml0000664000175000017500000000023500000000000031205 0ustar00zuulzuul00000000000000--- fixes: - | HPE XP and NEC V driver `bug #2012515 `_: Fixed to use correct Host group name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-fix-to-use-correct-pool-in-GAD-9413a343dcc98029.yaml0000664000175000017500000000027100000000000031473 0ustar00zuulzuul00000000000000--- fixes: - | Hitachi driver `bug #2011810 `_: Fixed to use correct pool number for secondary storage on GAD environment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-iscsi-driver-cac31d7c54d7718d.yaml0000664000175000017500000000006500000000000026713 0ustar00zuulzuul00000000000000--- features: - Adds new Hitachi VSP iSCSI Driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-port-scheduler-207e01b3cd13350b.yaml0000664000175000017500000000142400000000000027060 0ustar00zuulzuul00000000000000--- features: - | Hitachi driver: Add a feature ``Port Scheduler``. This feature is enabled when specifying ``True`` for the parameter ``hitachi_port_scheduler``. When this feature is enabled and an attach request is received, the active WWNs that are obtained by Fibre Channel Zone Manager will be distributed and registered to the host groups of each port of the storage system. To use this feature, specify ``True`` for both parameters ``hitachi_group_request`` and ``hitachi_rest_name_only_discovery``. If you specify ``False`` or use default value for the ``hitachi_rest_name_only_discovery``, it will take a long time to attach volume, by seeking the host group for all specified ports. This feature is supported on Fibre Channel only. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-ports-option-7147289e6529d7fe.yaml0000664000175000017500000000133500000000000026574 0ustar00zuulzuul00000000000000--- features: - Hitachi VSP drivers have a new config option ``vsp_compute_target_ports`` to specify IDs of the storage ports used to attach volumes to compute nodes. The default is the value specified for the existing ``vsp_target_ports`` option. Either or both of ``vsp_compute_target_ports`` and ``vsp_target_ports`` must be specified. - Hitachi VSP drivers have a new config option ``vsp_horcm_pair_target_ports`` to specify IDs of the storage ports used to copy volumes by Shadow Image or Thin Image. The default is the value specified for the existing ``vsp_target_ports`` option. Either or both of ``vsp_horcm_pair_target_ports`` and ``vsp_target_ports`` must be specified. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-support-dedup-comp-4e27d95b34681f66.yaml0000664000175000017500000000050000000000000027646 0ustar00zuulzuul00000000000000--- features: - | Hitachi driver: Support data deduplication and compression, by storage assist. The feature can be worked, if user enable deduplication and compression for the DP-pool, by Configuration Manager REST API, and set the extra spec ``hbsd:capacity_saving`` to ``deduplication_compression`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-support-new-storages-d8e8a527462dba24.yaml0000664000175000017500000000021700000000000030357 0ustar00zuulzuul00000000000000--- features: - | Hitachi driver: Additionally support following storages, Hitachi VSP E590, Hitachi VSP E790 and Hitachi VSP E1090. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-support-qos-667ca4f8ae8c2ba2.yaml0000664000175000017500000000007300000000000026703 0ustar00zuulzuul00000000000000--- features: - | Hitachi driver: Added QoS support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-tgt-port-asgn-7536da008990824a.yaml0000664000175000017500000000054100000000000026517 0ustar00zuulzuul00000000000000--- features: - | Hitachi driver: Add target port assignment. Defining particular ports in extra spec ``hbsd:target_ports`` determines which of the ports specified by the ``hitachi_target_ports`` or the ``hitachi_compute_target_ports`` parameters are used to create LUN paths during volume attach operations for each volume type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi-vsp-update-retype-483a9fb48dc667d9.yaml0000664000175000017500000000036700000000000027050 0ustar00zuulzuul00000000000000--- features: - | Hitachi driver: Update retype to different pool and support storage assisted migration. Storage assisted migration feature is also used when retype a volume, which doesn't have any snapshots, to different pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi_fix-ldevnickname-0a0756449e7448d9.yaml0000664000175000017500000000035400000000000026525 0ustar00zuulzuul00000000000000fixes: - | Hitachi driver `bug #2071697 '_: Fix to set correct object ID as LDEV nickname when running host-assisted migration with ``retype`` or ``migration`` commands. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hitachi_fix-testscripts-e4490f9f99994fb8.yaml0000664000175000017500000000030300000000000026634 0ustar00zuulzuul00000000000000--- fixes: - | Hitachi driver `bug #2063317 `_: Fix test scripts to avoid failing by unexpected response from psuedo REST API server ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hnas-change-snapshot-names-8153b043eb7e99fc.yaml0000664000175000017500000000030200000000000027126 0ustar00zuulzuul00000000000000--- deprecations: - Support for snapshots named in the backend as ``snapshot-`` is deprecated. Snapshots are now named in the backend as ``.``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hnas-deprecate-iscsi-driver-cd521b3a2ba948f3.yaml0000664000175000017500000000072700000000000027353 0ustar00zuulzuul00000000000000--- upgrade: - The Hitachi NAS iSCSI driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. deprecations: - The Hitachi NAS iSCSI driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. The driver will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hnas-deprecate-nfs-driver-0d114bbe141b5d90.yaml0000664000175000017500000000031200000000000026722 0ustar00zuulzuul00000000000000--- deprecations: - The Hitachi NAS NFS driver has been marked as unsupported and is now deprecated. enable_unsupported_driver will need to be set to True in cinder.conf to continue to use it.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hnas-deprecated-svc-volume-type-77768f27946aadf4.yaml0000664000175000017500000000025200000000000030061 0ustar00zuulzuul00000000000000--- deprecations: - Deprecated the configuration option ``hnas_svcX_volume_type``. Use option ``hnas_svcX_pool_name`` to indicate the name of the services (pools). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml0000664000175000017500000000053300000000000027231 0ustar00zuulzuul00000000000000upgrade: - HNAS drivers have new configuration paths. Users should now use ``cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`` for HNAS NFS driver and ``cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver`` for HNAS iSCSI driver. deprecations: - The old HNAS drivers configuration paths have been marked for deprecation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hnas-list-manageable-9329866618fa9a9c.yaml0000664000175000017500000000014400000000000025653 0ustar00zuulzuul00000000000000--- features: - Added the ability to list manageable volumes and snapshots to HNAS NFS driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hnas-manage-unmanage-snapshot-support-40c8888cc594a7be.yaml0000664000175000017500000000012100000000000031340 0ustar00zuulzuul00000000000000--- features: - Added manage/unmanage snapshot support to the HNAS NFS driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hnas-remove-iscsi-driver-419e9c08133f9f0a.yaml0000664000175000017500000000020700000000000026561 0ustar00zuulzuul00000000000000--- upgrade: - The Hitachi NAS Platform iSCSI driver was marked as not supported in the Ocata realease and has now been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hnas_deprecate_xml-16840b5a8c25d15e.yaml0000664000175000017500000000041300000000000025550 0ustar00zuulzuul00000000000000--- upgrade: - HNAS drivers will now read configuration from cinder.conf. deprecations: - The XML configuration file used by the HNAS drivers is now deprecated and will no longer be used in the future. Please use cinder.conf for all driver configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-add-alletra-9k-info-5e1d09e083d3faa9.yaml0000664000175000017500000000024600000000000027134 0ustar00zuulzuul00000000000000other: - | HPE 3PAR: Documented that existing driver supports the new Alletra 9k backend. HPE Alletra 9k is newer version of existing HPE Primera backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-add-alletra-mp-info-3ed7f5160bf58dbe.yaml0000664000175000017500000000026100000000000027307 0ustar00zuulzuul00000000000000--- other: - | HPE 3PAR: Documented that existing driver supports the new Alletra MP backend. HPE Alletra MP is newer version of existing HPE Alletra 9k backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-add-get-manageable-2926f21116c98599.yaml0000664000175000017500000000026400000000000026526 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `Bug #1819903 `_: Fixed: umanaged volumes & snapshots missing from cinder manageable-list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-add-ipv6-support-a9f453a66c51e6d3.yaml0000664000175000017500000000024000000000000026552 0ustar00zuulzuul00000000000000fixes: - | HPE 3PAR driver `Bug #2045411 `_: Added support for ipv6 address in the 3PAR iSCSI driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-calculate-free-capacity-926b60b70bba18b7.yaml0000664000175000017500000000016600000000000030071 0ustar00zuulzuul00000000000000--- other: - | HPE 3PAR driver: In get_volume_stats response, updated the logic to calculate free_capacity ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-clone-of-repl-vol-914a6e0e105996b4.yaml0000664000175000017500000000024500000000000026532 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `bug #2021941 `_: Fixed: Now clone of replicated volume can be created ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-code-changes-for-new-wsapi-25865a65a428ce46.yaml0000664000175000017500000000023100000000000030305 0ustar00zuulzuul00000000000000fixes: - | HPE 3PAR driver `Bug #2015746 `_: Fixed: minor code changes to work with new wsapi. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-code-changes-for-wsapi-2025-75a9fda5d994504c.yaml0000664000175000017500000000024300000000000030254 0ustar00zuulzuul00000000000000fixes: - | HPE 3PAR driver `Bug #2119709 `_: Fixed: skip license check to work with new wsapi (of 2025). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-comment-for-cloned-volume-ef16dccf7639452b.yaml0000664000175000017500000000023100000000000030507 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `bug #2062524 `_: Fixed: Added comment for cloned volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-convert-to-base-vol-delete-snap-a460a4b1c419804a.yaml0000664000175000017500000000101200000000000031326 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `Bug #1994521 `_: Fixed: While performing a delete snapshot (s1) operation, the volumes (v2) dependent on the snapshot (s1) are converted to base volumes. This operation fails if these dependent volumes (v2) have their own dependent snapshots (s2). The errors during the failure were vague and not helpful. With this release, we added conditions to fail this operation early and also added useful error message. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-fix-multi-detach-in-multi-host-env-3f2211f29a336b6e.yaml0000664000175000017500000000025500000000000032003 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `Bug #1958122 `_: Fixed issue of multi-detach operation in multi host environment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-ignore-duplicate-ip-7e67260ee1cab40e.yaml0000664000175000017500000000030000000000000027333 0ustar00zuulzuul00000000000000fixes: - | HPE 3par driver `bug #2112433 `_: Fixed failure observed when vlan ip is same as iSCSI ip by ignoring the duplicate ip ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-login-getWsApiVersion-0252d655844ae054.yaml0000664000175000017500000000024100000000000027376 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `Bug #2068795 `_: Fixed: Perform login before invoking getWsApiVersion ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-peer-persistence.yaml-91cc84bf89dbb462.yaml0000664000175000017500000000012300000000000027731 0ustar00zuulzuul00000000000000--- features: - | Added Peer Persistence support in HPE 3PAR cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-pp-primera-a3442d004545b3a9.yaml0000664000175000017500000000011700000000000025324 0ustar00zuulzuul00000000000000--- features: - | Add Peer Persistence support for HPE Primera backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-pp-rcg-policy-options-53271f38c315779f.yaml0000664000175000017500000000035100000000000027377 0ustar00zuulzuul00000000000000--- fixes: - | This change fixes bug 1845483 - 3PAR: For Peer Persistence, add policy options in RCG. Following options are added after Remote Copy Group (RCG) is created: autoFailover, pathManagement, autoRecover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-primera-add-iscsi-5af339643dfa0928.yaml0000664000175000017500000000015500000000000026652 0ustar00zuulzuul00000000000000--- features: - | HPE 3PAR Driver: Add support of iSCSI driver for Primera 4.2 or higher versions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-return-lun-ids-6bc973ef74d0bf9c.yaml0000664000175000017500000000035200000000000026472 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `bug #2044255 `_: Fixed: In peer persistence setup, when volume is attached to instance, now LUN ids are returned from both the arrays. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-retype-migrate-2383ddaf92c87f9e.yaml0000664000175000017500000000046500000000000026471 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `Bug #2026718 `_: Fixed: With this patch, added logic to fetch correct volume name on 3par (osv_name), rcg_name and vvset_name (for particular scenarios); so that volumes can be identified and deleted from 3par. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-retype-thin-deco-2263063d847db454.yaml0000664000175000017500000000025200000000000026365 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `Bug #2080927 `_: Fixed: Retype thin volume to deco volume (dedup + compression) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-retype-vol-without-comment-a44c9be1ed76e7bb.yaml0000664000175000017500000000025000000000000031115 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `Bug #2023253 `_: Fixed: Handle error during retype of volume without comment ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-reuse-session-4439cb07b9118867.yaml0000664000175000017500000000025400000000000026025 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `Bug #1940069 `_: Fixed issue of connection rejected by reusing existing session. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-small-qos-latency-values-d5fa70a605b04335.yaml0000664000175000017500000000024000000000000030170 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver `bug #2018994 `_: Fixed: use small QoS Latency value (less than 1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-specify-nsp-for-fc-bootable-volume-f372879e1b625b4d.yaml0000664000175000017500000000042400000000000032063 0ustar00zuulzuul00000000000000--- fixes: - | `Bug 1809249 `_ - 3PAR driver adds the config option `hpe3par_target_nsp` that can be set to the 3PAR backend to use when multipath is not enabled and the Fibre Channel Zone Manager is not used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-support-duplicated-fqdn-751ad1dbcd137fbb.yaml0000664000175000017500000000030300000000000030405 0ustar00zuulzuul00000000000000--- issues: - | HPE 3PAR driver now supports networks with duplicated FQDNs via configuration option `unique_fqdn_network` so attaching in these networks will work (bug #1834695). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-3par-use-vlan-iscsi-ips-f75787b0d281030b.yaml0000664000175000017500000000024400000000000026717 0ustar00zuulzuul00000000000000fixes: - | HPE 3PAR driver `Bug #2015034 `_: Added handling for VLAN iscsi IPs in the 3PAR iSCSI driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe-xp-fc-iscsi-cinder-driver-75e04febff42c9ba.yaml0000664000175000017500000000010200000000000027657 0ustar00zuulzuul00000000000000--- features: - | Added backend driver for HPE XP storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe3par-replication-group-a18a28d18de09e95.yaml0000664000175000017500000000012200000000000027016 0ustar00zuulzuul00000000000000--- features: - | Added replication group support in HPE 3PAR cinder driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe_3par_multiattach-bf98a9e5c2208902.yaml0000664000175000017500000000010500000000000026034 0ustar00zuulzuul00000000000000--- features: - Enabled multiattach capability for hpe3par driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpe_3par_multiattach_detach_fix-d2d3785d656fba90.yaml0000664000175000017500000000042400000000000030277 0ustar00zuulzuul00000000000000--- fixes: - | HPE 3PAR driver: The detach issue for multiattach capability (`Bug 1834660 `_) was fixed in the Cinder 15.0.0 (Train) release, but due to an oversight, the fix has not been announced until now. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpelh-deprecations-eb7716a0b02f145e.yaml0000664000175000017500000000100000000000000025544 0ustar00zuulzuul00000000000000--- upgrade: - | HPE LeftHand config options ``hplefthand_api_url``, ``hplefthand_username``, ``hplefthand_password``, ``hplefthand_clustername``, ``hplefthand_iscsi_chap_enabled``, and ``hplefthand_debug`` were deprecated in the Mitaka release and have now been removed. The corresponding ``hpelefthand_api_url``, ``hpelefthand_username``, ``hpelefthand_password``, ``hpelefthand_clustername``, ``hpelefthand_iscsi_chap_enabled``, and ``hpelefthand_debug`` should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hpmsa-driver-updates-train-4fcbe71f3e2bb2da.yaml0000664000175000017500000000254700000000000027467 0ustar00zuulzuul00000000000000--- upgrade: - | The HPE MSA driver options ``hpmsa_backend_name`` and ``hpmsa_backend_type`` options were deprecated in favor of ``hpmsa_pool_name`` and ``hpmsa_pool_type`` to avoid confusion, and the ``hpmsa_api_protocol``, ``hpmsa_verify_certificate``, and ``hpmsa_verify_certificate_path`` options were deprecated in favor of the standard ``driver_use_ssl``, ``driver_ssl_cert_verify``, and ``driver_ssl_cert_path`` options. To retain the default behavior, add ``driver_use_ssl = true`` to back-end entries in ``cinder.conf`` before the deprecated options are removed in a future release. deprecations: - | The HPE MSA driver options ``hpmsa_backend_name`` and ``hpmsa_backend_type`` options were deprecated in favor of ``hpmsa_pool_name`` and ``hpmsa_pool_type`` to avoid confusion, and the ``hpmsa_api_protocol``, ``hpmsa_verify_certificate``, and ``hpmsa_verify_certificate_path`` options were deprecated in favor of the standard ``driver_use_ssl``, ``driver_ssl_cert_verify``, and ``driver_ssl_cert_path`` options. To retain the default behavior, add ``driver_use_ssl = true`` to back-end entries in ``cinder.conf`` before the deprecated options are removed in a future release. fixes: - | Fixed HPE MSA driver issue where a multi-attached volume could be unmapped while still in use. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/releasenotes/notes/huawei-backend-capabilities-report-optimization-d1c18d9f62ef71aa.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/huawei-backend-capabilities-report-optimization-d1c18d9f62ef71aa.ya0000664000175000017500000000011600000000000033152 0ustar00zuulzuul00000000000000--- features: - Optimize backend reporting capabilities for Huawei drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/huawei-fusionstorage-driver-readd-70fecc39eeaa0f5f.yaml0000664000175000017500000000030100000000000031022 0ustar00zuulzuul00000000000000upgrade: - | The Huawei FusionStorage driver had previously been marked unsupported. The 3rd Party CI has been restored, and the driver is now fully supported in the Ussuri release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/huawei-fusionstorage-unsupported-4be766dd2ba8f980.yaml0000664000175000017500000000120300000000000030635 0ustar00zuulzuul00000000000000--- upgrade: - | The Huawei Fusionstorage driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use the driver. deprecations: - | The Huawei Fusionstorage driver has been marked as unsupported due to a lack of Python3.7 support and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. If Python3.7 support is not demonstrated, the driver will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/huawei-generic-group-bc3fb7236efc58e7.yaml0000664000175000017500000000011700000000000026207 0ustar00zuulzuul00000000000000--- features: - Add CG capability to generic volume groups in Huawei driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/huawei-iscsi-multipath-support-a056201883909287.yaml0000664000175000017500000000007600000000000027552 0ustar00zuulzuul00000000000000--- upgrade: - Support for iSCSI multipath in Huawei driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/huawei-manage-unmanage-snapshot-e35ff844d72fedfb.yaml0000664000175000017500000000011400000000000030400 0ustar00zuulzuul00000000000000--- features: - Added manage/unmanage snapshot support for Huawei drivers.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/huawei-pool-disktype-support-7c1f64639b42a48a.yaml0000664000175000017500000000012100000000000027523 0ustar00zuulzuul00000000000000--- features: - Add support for reporting pool disk type in Huawei driver. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/releasenotes/notes/huawei-support-iscsi-configuration-in-replication-7ec53737b95ffa54.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/huawei-support-iscsi-configuration-in-replication-7ec53737b95ffa54.0000664000175000017500000000011600000000000033026 0ustar00zuulzuul00000000000000--- upgrade: - Support iSCSI configuration in replication in Huawei driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/huawei-support-manage-volume-2a746cd05621423d.yaml0000664000175000017500000000011300000000000027361 0ustar00zuulzuul00000000000000--- features: - Added manage/unmanage volume support for Huawei drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml0000664000175000017500000000012100000000000032046 0ustar00zuulzuul00000000000000--- features: - Add support for hybrid aggregates to the NetApp cDOT drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-flashsystem-manage-unmanage-88e56837102f838c.yaml0000664000175000017500000000013400000000000027736 0ustar00zuulzuul00000000000000--- features: - Volume manage/unmanage support for IBM FlashSystem FC and iSCSI drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-flashsystem-unsupported-28f9aaf11b56fb2f.yaml0000664000175000017500000000112000000000000027640 0ustar00zuulzuul00000000000000--- upgrade: - | The IBM Flashsystem drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. deprecations: - | The IBM Flashsystem drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. If the support status does not change, the drivers will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-gpfs-unsupported-b95274829573835d.yaml0000664000175000017500000000105700000000000025631 0ustar00zuulzuul00000000000000--- upgrade: - | The IBM GPFS driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The IBM GPFS driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-storage-supported-a373a54777333929.yaml0000664000175000017500000000036000000000000025756 0ustar00zuulzuul00000000000000--- upgrade: - | IBM DS8000 drivers had been previously marked unsupported. Testing requirements have been addressed and they are now fully supported again. IBM DS8000 drivers allow Cinder to manage volumes in FC environment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-storage-unsupported-9e8ea6ce9cea503d.yaml0000664000175000017500000000114200000000000027036 0ustar00zuulzuul00000000000000--- upgrade: - | The IBM Storage drivers (XIV & DS8k) have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. deprecations: - | The IBM Storage drivers (XIV & DS8k) have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. If the support status does not change, the drivers will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-storwize-removehostmappings-e7eeaf898786c6bf.yaml0000664000175000017500000000046100000000000030571 0ustar00zuulzuul00000000000000--- upgrade: - | IBM Spectrum Virtualize Family (previously known as Storwize) driver cannot delete volume which has host mapping in some rare cases while code_level of IBM Spectrum Virtualize Family storage lower than 7.7.0.0. Please upgrade to latest code to avoid this kind of issue. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-storwize-supported-6518628fb78d58a4.yaml0000664000175000017500000000051700000000000026353 0ustar00zuulzuul00000000000000--- upgrade: - | IBM Spectrum Virtualize Family (previously known as Storwize) drivers had been previously marked unsupported. Testing requirements have been addressed and they are now fully supported again. IBM Spectrum Virtualize Family drivers allow Cinder to manage volumes both in iSCSI and FC environments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-storwize-unsupported-e79cfd27523f013c.yaml0000664000175000017500000000110300000000000027027 0ustar00zuulzuul00000000000000--- upgrade: - | The IBM Storwize drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. deprecations: - | The IBM Storwize drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. If the support status does not change, they will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-storwzie-mirror-volume-ffe4c9bde78cdf1d.yaml0000664000175000017500000000011200000000000027650 0ustar00zuulzuul00000000000000--- features: - Add mirrored volume support in IBM SVC/Storwize driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-svf-add-cleanrate-support-e246a8f218d2f22e.yaml0000664000175000017500000000032000000000000027543 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize Family driver: Added support for clean_rate parameter. Clean_rate parameter can now be passed as extra-spec in volume-type or fetched from cinder.conf. ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=cinder-27.0.0/releasenotes/notes/ibm-svf-delete-volume-flag-support-for-deletevolumegroup-4224db1ca798a3bf.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-svf-delete-volume-flag-support-for-deletevolumegroup-4224db1ca70000664000175000017500000000035500000000000033273 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize Family driver: Added `--delete-volumes` flag support for delete volumegroup operation. After adding support, the volumes can optionally be deleted when the volume group is deleted. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=cinder-27.0.0/releasenotes/notes/ibm-svf-manage-gmcv-change-volumes-on-childpools-9d2217e1e6f07a0e.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-svf-manage-gmcv-change-volumes-on-childpools-9d2217e1e6f07a0e.y0000664000175000017500000000020300000000000032515 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize Family driver: Added support to manage GMCV volumes on separate storage pools. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-svf-manage-host-attachment-using-portsets-0003c54b185f0eb2.yaml0000664000175000017500000000022400000000000032603 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize Family driver: Added support to manage host attachment using portsets for code level >= 8.4.2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-svf-provide-IOPs-based-storage-offering-1b7532f42fd6d76e.yaml0000664000175000017500000000027700000000000032161 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize Family driver: Added fucntionality that returns throttle rate of maximum IOPS and bandwidth of all VDisks of a specified storage pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-svf-support-for-temporary-volumegroup-3db871245b864a52.yaml0000664000175000017500000000052400000000000032115 0ustar00zuulzuul00000000000000--- features: - | IBM Storage Virtualize Family driver: Added support to create, update and delete temporary volumegroup. upgrade: - | IBM Storage Virtualize Family driver: The configuration option 'storwize_volume_group', which was introduced in Antelope release but was never fully operational is hereby removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-svf-support-hyperswap-volume-extend-f578efa02314faff.yaml0000664000175000017500000000023300000000000032041 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize Family driver: Added volume-extend support for volumes created using a HyperSwap volume-type template. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-svf-volumegroup-configuration-parameter-44fe67bebe284191.yaml0000664000175000017500000000025400000000000032571 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize Family driver: Added `storwize_volume_group` parameter in the cinder configuration to support volume group feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-svf-volumegroup-snapshot-support-0a16d8a065501d66.yaml0000664000175000017500000000020500000000000031130 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize Family driver: Added support for creation and deletion of volumegroup snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibm-svf-volumegroup-support-134fc2194ad092bd.yaml0000664000175000017500000000036100000000000027437 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize Family driver: Added support for volumegroup for SVC Code Level 8.5.1.0 and above. User can now create, modify and delete volumegroup using the exising cinder CLI for group operations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ibmsvciogrpselection-e607739b6f655a27.yaml0000664000175000017500000000062400000000000026122 0ustar00zuulzuul00000000000000--- features: - | In IBM Storwize_SVC driver, user could specify only one IO group per backend definition. The user now may specify a comma separated list of IO groups, and at the time of creating the volume, the driver will select an IO group which has the least number of volumes associated with it. The change is backward compatible, meaning single value is still supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/idempotent-glance-metadata-aa78e09736cf57d9.yaml0000664000175000017500000000027200000000000027210 0ustar00zuulzuul00000000000000--- fixes: - | `Bug 1823445 `_: Fix an issue with bulk updates of volume Glance metadata when keys exist but are unchanged. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/iet-deprecation-f8059417c6adbb78.yaml0000664000175000017500000000117100000000000025076 0ustar00zuulzuul00000000000000--- upgrade: - | The IET iSCSI target driver has been marked deprecated and will be removed in the "V" release. The IET iSCSI target project is no longer active and is not supported by all distributions. It is recommended to migrate to a supported distribution and iSCSI target prior to upgrading. deprecations: - | The IET iSCSI target driver has been marked deprecated and will be removed in the "V" release. The IET iSCSI target project is no longer active and is not supported by all distributions. It is recommended to migrate to a supported distribution and iSCSI target prior to upgrading. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/image-metadata-size-increase-323812970dc0e513.yaml0000664000175000017500000000061300000000000027160 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1988942 `_: Increased size of volume image metadata values accepted by the Block Storage API. Volume image metadata values were limited to 255 characters but Glance allows up to 65535 bytes. This change does not affect the database tables which already allow up to 65535 bytes for image metadata values. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/image-volume-type-c91b7cff3cb41c13.yaml0000664000175000017500000000015300000000000025507 0ustar00zuulzuul00000000000000--- features: - Support cinder_img_volume_type property in glance image metadata to specify volume type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/improve-volume-transfer-records-5599e82ade4d302c.yaml0000664000175000017500000000041400000000000030264 0ustar00zuulzuul00000000000000--- features: - | Expanded volume transfer information. Starting with microversion 3.57, ``source_project_id``, ``destination_project_id``, and ``accepted`` fields will be returned in the response of the volume transfer create, show, and list calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/improvement-to-get-group-detail-0e8b68114e79a8a2.yaml0000664000175000017500000000023700000000000030073 0ustar00zuulzuul00000000000000--- features: - Added support for querying group details with volume ids which are in this group. For example, "groups/{group_id}?list_volume=True". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/improvement-to-query-consistency-group-detail-84a906d45383e067.yaml0000664000175000017500000000027200000000000032664 0ustar00zuulzuul00000000000000--- features: - Added support for querying volumes filtered by group_id using 'group_id' optional URL parameter. For example, "volumes/detail?group_id={consistency_group_id}". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/incorrect-host-config-option-347e60f957458d54_new.yaml0000664000175000017500000000043400000000000030201 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1941068 `_: Fixed type of the ``host`` configuration option. It was limited to valid FQDN values when we document that it isn't. This may result in the ``cinder-manage db sync`` command failing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/increase_glance_num_retries-66b455a0729c4535.yaml0000664000175000017500000000065700000000000027322 0ustar00zuulzuul00000000000000--- upgrade: - | The default value of the configuration option, ``glance_num_retries``, has been changed to 3 in this release. Its former value was 0. The option controls how many times to retry a Glance API call in response to a HTTP connection failure, timeout or ServiceUnavailable status. By this change, Cinder can be more resilient to temporary failure and continue the request if a retry succeeds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/infinidat-add-infinibox-driver-67cc33fc3fbff1bb.yaml0000664000175000017500000000010000000000000030245 0ustar00zuulzuul00000000000000--- features: - Added driver for the InfiniBox storage array. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/infinidat-add-storage-assisted-migration-4e12f24ee297ef65.yaml0000664000175000017500000000021000000000000031755 0ustar00zuulzuul00000000000000--- features: - | Infinidat: Added support for storage assisted volume migration within a same InfiniBox host (iSCSI and FC). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/infinidat-compression-a828904aaba90da2.yaml0000664000175000017500000000041300000000000026360 0ustar00zuulzuul00000000000000--- features: - Added support for volume compression in INFINIDAT driver. Compression is available on InfiniBox 3.0 onward. To enable volume compression, set ``infinidat_use_compression`` to True in the backend section in the Cinder configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/infinidat-group-support-44cd0715de1ea502.yaml0000664000175000017500000000012200000000000026600 0ustar00zuulzuul00000000000000--- features: - Add CG capability to generic volume groups in INFINIDAT driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/infinidat-infinisdk-04f0edc0d0a597e3.yaml0000664000175000017500000000015100000000000025777 0ustar00zuulzuul00000000000000--- upgrade: - INFINIDAT volume driver now requires the 'infinisdk' python module to be installed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/infinidat-iscsi-support-78e0d34d9e7e08c4.yaml0000664000175000017500000000010300000000000026606 0ustar00zuulzuul00000000000000--- features: - Support for iSCSI in INFINIDAT InfiniBox driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/infinidat-manage-unmanage-ccc42b79d741369f.yaml0000664000175000017500000000026500000000000027011 0ustar00zuulzuul00000000000000--- features: - | Infinidat driver: Added support to manage and unmanage volumes and snapshots. Also added the functionality to list the manageable volumes and snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/infinidat-max-osr-2d9fd2d0f9424657.yaml0000664000175000017500000000032700000000000025276 0ustar00zuulzuul00000000000000--- features: - Added support for oversubscription in thin provisioning in the INFINIDAT InfiniBox driver. To use oversubscription, define ``max_over_subscription_ratio`` in the cinder configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/infinidat-multi-attach-support-533b3e559c15801f.yaml0000664000175000017500000000012500000000000027720 0ustar00zuulzuul00000000000000--- features: - Support for volume multi-attach in the INFINIDAT InfiniBox driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/infinidat-qos-50d743591543db98.yaml0000664000175000017500000000017100000000000024346 0ustar00zuulzuul00000000000000--- features: - Added support for QoS in the INFINIDAT InfiniBox driver. QoS is available on InfiniBox 4.0 onward. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/inspur-as13000-cinder-driver-bfa5cc17683d87a9.yaml0000664000175000017500000000011400000000000027137 0ustar00zuulzuul00000000000000--- features: - | New Cinder volume driver for Inspur AS13000 series. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/inspur-instorage-fc-cinder-driver-70c13e4a64d785d5.yaml0000664000175000017500000000011200000000000030360 0ustar00zuulzuul00000000000000--- features: - | New FC Cinder volume driver for Inspur Instorage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/itri-disco-driver-removal-11e14fbf431ea876.yaml0000664000175000017500000000125400000000000027012 0ustar00zuulzuul00000000000000--- upgrade: - | The ITRI DISCO storage driver has been removed after completion of its deprecation period without a reliable 3rd Party CI system being supported. Customers using the ITRI DISCO driver should not upgrade Cinder without first migrating all volumes from their DISCO backend to a supported storage backend. Failure to migrate volumes will result in no longer being able to access volumes back by the ITRI DISCO storage backend. other: - | The ITRI DISCO storage driver was marked unsupported in Rocky due to 3rd Party CI not meeting Cinder's requirements. As a result the driver is removed starting from the Stein release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/joviandss-iscsi-driver-0becc6ee6a0b3c0a.yaml0000664000175000017500000000032100000000000026663 0ustar00zuulzuul00000000000000--- features: - | Added support for Open-E JovianDSS data storage. Driver supports Open-E disaster recovery feature and cascade volume deletion in addition to support minimum required functions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/json-schema-validation-0d22576bd556f4e0.yaml0000664000175000017500000000170300000000000026270 0ustar00zuulzuul00000000000000--- other: - | Added schema validation support using jsonschema `[json-schema-validation]`_ for all supported v3 APIs. Following APIs were accepting boolean parameters with leading and trailing white spaces (for e.g. " true "). But now with schema validation support, all these boolean parameters henceforth will not accept leading and trailing whitespaces to maintain consistency. * Generic volume groups: * delete group: "POST /v3/{project_id}/groups/{group_id}/action" * failover replication: "POST /v3/{project_id}/groups/{group_id}/action" * Volume Snapshots: * create a snapshot: "POST /v3/{project_id}/snapshots" * Volume_actions: * set bootable: "POST /v3/{project_id}/volumes/{volume_id}/action" * volume readonly update: "POST /v3/{project_id}/volumes/{volume_id}/action" .. _`[json-schema-validation]`: https://blueprints.launchpad.net/cinder/+spec/json-schema-validation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/k2-disable-discovery-bca0d65b5672ec7b.yaml0000664000175000017500000000034400000000000026070 0ustar00zuulzuul00000000000000--- features: - | Kaminario K2 iSCSI driver now supports non discovery multipathing (Nova and Cinder won't use iSCSI sendtargets) which can be enabled by setting `disable_discovery` to `true` in the configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/k2-non-unique-fqdns-b62a269a26fd53d5.yaml0000664000175000017500000000027700000000000025540 0ustar00zuulzuul00000000000000--- issues: - | Kaminario K2 now supports networks with duplicated FQDNs via configuration option `unique_fqdn_network` so attaching in these networks will work (bug #1720147). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/kaminario-cinder-driver-bug-1646692-7aad3b7496689aa7.yaml0000664000175000017500000000011300000000000030137 0ustar00zuulzuul00000000000000--- fixes: - Fixed Non-WAN port filter issue in Kaminario iSCSI driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/kaminario-cinder-driver-bug-1646766-fe810f5801d24f2f.yaml0000664000175000017500000000016500000000000030134 0ustar00zuulzuul00000000000000--- fixes: - Fixed issue of managing a VG with more than one volume in Kaminario FC and iSCSI Cinder drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/kaminario-cinder-driver-bug-44c728f026394a85.yaml0000664000175000017500000000024200000000000027057 0ustar00zuulzuul00000000000000--- fixes: - | Kaminario FC and iSCSI drivers: Fixed `bug 1829398 `_ where force detach would fail. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/releasenotes/notes/kaminario-cinder-driver-remove-deprecate-option-831920f4d25e2979.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/kaminario-cinder-driver-remove-deprecate-option-831920f4d25e2979.ya0000664000175000017500000000017200000000000032514 0ustar00zuulzuul00000000000000--- upgrade: - Removed deprecated option ``kaminario_nodedup_substring`` in Kaminario FC and iSCSI Cinder drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/kaminario-fc-cinder-driver-8266641036281a44.yaml0000664000175000017500000000012100000000000026514 0ustar00zuulzuul00000000000000--- features: - New FC Cinder volume driver for Kaminario K2 all-flash arrays. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/kaminario-iscsi-cinder-driver-c34fadf63cd253de.yaml0000664000175000017500000000012400000000000030041 0ustar00zuulzuul00000000000000--- features: - New iSCSI Cinder volume driver for Kaminario K2 all-flash arrays. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lefthand-consistency-groups-d73f8e418884fcc6.yaml0000664000175000017500000000013300000000000027473 0ustar00zuulzuul00000000000000--- features: - Consistency group support has been added to the LeftHand backend driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lefthand-manage-unmanage-snapshot-04de39d268d51169.yaml0000664000175000017500000000012500000000000030317 0ustar00zuulzuul00000000000000--- features: - Added snapshot manage/unmanage support to the HPE LeftHand driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lenovo-driver-updates-train-f2ff96ca4a2885db.yaml0000664000175000017500000000256200000000000027540 0ustar00zuulzuul00000000000000--- upgrade: - | The Lenovo driver options ``lenovo_backend_name`` and ``lenovo_backend_type`` options were deprecated in favor of ``lenovo_pool_name`` and ``lenovo_pool_type`` to avoid confusion, and the ``lenovo_api_protocol``, ``lenovo_verify_certificate``, and ``lenovo_verify_certificate_path`` options were deprecated in favor of the standard ``driver_use_ssl``, ``driver_ssl_cert_verify``, and ``driver_ssl_cert_path`` options. To retain the default behavior, add ``driver_use_ssl = true`` to back-end entries in ``cinder.conf`` before the deprecated options are removed in a future release. deprecations: - | The Lenovo driver options ``lenovo_backend_name`` and ``lenovo_backend_type`` options were deprecated in favor of ``lenovo_pool_name`` and ``lenovo_pool_type`` to avoid confusion, and the ``lenovo_api_protocol``, ``lenovo_verify_certificate``, and ``lenovo_verify_certificate_path`` options were deprecated in favor of the standard ``driver_use_ssl``, ``driver_ssl_cert_verify``, and ``driver_ssl_cert_path`` options. To retain the default behavior, add ``driver_use_ssl = true`` to back-end entries in ``cinder.conf`` before the deprecated options are removed in a future release. fixes: - | Fixed Lenovo driver issue where a multi-attached volume could be unmapped while still in use. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/leverage-compression-accelerator-579c7032290cd1e9.yaml0000664000175000017500000000463600000000000030302 0ustar00zuulzuul00000000000000--- features: - | A general framework to accommodate hardware compression accelerators for compression of volumes uploaded to the Image service (Glance) as images and decompression of compressed images used to create volumes is introduced. The only accelerator supported in this release is Intel QuickAssist Technology (QAT), which produces a compressed file in gzip format. Refer to this `Cinder documentation `_ for more information about using this feature. Additionally, the framework provides software-based compression using GUNzip tool if a suitable hardware accelerator is not available. Because this software fallback could cause performance problems if the Cinder services are not deployed on sufficiently powerful nodes, the default setting is *not* to enable compression on image upload or download. The compressed image of a volume will be stored in the Image service (Glance) with the ``container_format`` image property of ``compressed``. See the `Image service documentation `_ for more information about this image container format. issues: - | In the Image service (Glance), the ``compressed`` container format identifier does not indicate a particular compression technology; it is up to the image consumer to determine what compression has been used, and there is no requirement that OpenStack services must support arbitrary compression technologies. For the upload and download of compressed images, Cinder supports *only* the gzip format. While you may expect that Cinder will be able to consume any image in ``compressed`` container format *that Cinder has created*, you should not expect Cinder to be able to successfully use an image in ``compressed`` format that it has not created itself. upgrade: - | Added string config option ``compression_format`` in [default] section of cinder.conf to specify image compression format. Currently the only legal value for this option is ``gzip``. - | Added boolean config option ``allow_compression_on_image_upload`` in [default] section of cinder.conf to enable/disable image compression on image upload. The default value of this option is ``false``, which means image compression is disabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lightbits-qos-support-1a44a9fcdfd1ee93.yaml0000664000175000017500000000105400000000000026536 0ustar00zuulzuul00000000000000--- features: - | Lightbits driver: allows administrators to better manage and optimize storage performance by associating QoS policies with volume types. * Administrators must first create the required QoS policy on the vendor side. * Once the QoS policy is created, it can be linked to a volume type in the system using the policy's unique UUID. Example: .. code-block:: bash openstack volume type create LightbitsWithQos --property volume_backend_name= --property=lightos:qos_policy= ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lightbits-snapshot-timeout-6b25dbd15a650d52.yaml0000664000175000017500000000076400000000000027320 0ustar00zuulzuul00000000000000--- features: - | Lightbits driver: Added support to create multiple snapshots from the same volume simultaneously when using the Lightbits cinder driver. Under certain conditions, older releases of the Lightbits api-service will return various status codes (including HTTP status codes 500 and 503) that could indicate transient failures. Added retry logic on such errors becuase there's a good chance that the error is transient and subsequent calls will succeed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lightbits-volume-ipacl-23da3aa469689817.yaml0000664000175000017500000000042300000000000026245 0ustar00zuulzuul00000000000000--- features: - | Lightbits driver: Added a new configuration option ``lightos_use_ipacl``, defaulting to true. When set to true, the Cinder driver will restrict access to each volume to the IP addresses of the host machine that the volume is attached to. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/list-manageable-86c77fc39c5b2cc9.yaml0000664000175000017500000000031400000000000025133 0ustar00zuulzuul00000000000000--- features: - Added the ability to list manageable volumes and snapshots via GET operation on the /v2//os-volume-manage and /v2//os-snapshot-manage URLs, respectively. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/live_migration_v3-ae98c0d00e64c954.yaml0000664000175000017500000000007300000000000025433 0ustar00zuulzuul00000000000000--- fixes: - Fixed live migration on EMC VMAX3 backends. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lock_path-940af881b2112bbe.yaml0000664000175000017500000000102200000000000023736 0ustar00zuulzuul00000000000000--- features: - | os-brick file lock location can be specified independently of the Cinder service lock location using ``lock_path`` in the ``[os_brick]`` configuration section. Useful for HCI deployments and when running Cinder and Glance with Cinder backend on the same host. upgrade: - | On HCI deployments and when running Cinder and Glance with Cinder backend on the same host an os-brick shared location can be configured using the ``lock_path`` in the ``[os_brick]`` configuration section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lvm-delete-error-f12da00c1b3859dc.yaml0000664000175000017500000000031000000000000025234 0ustar00zuulzuul00000000000000--- fixes: - | LVM driver `bug #1901783 `_: Fix unexpected delete volume failure due to unexpected exit code 139 on ``lvs`` command call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lvm-ipv6-fix-e8d418726c92bbd5.yaml0000664000175000017500000000017200000000000024265 0ustar00zuulzuul00000000000000--- fixes: - | LVM iSCSI driver fix for IPv6 addresses for the different targets, IET, LIO, TGT, CXT, and SCST. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lvm-minimum-version-2-02-107-df4551d088f8b5a3.yaml0000664000175000017500000000011200000000000026626 0ustar00zuulzuul00000000000000--- upgrade: - | Cinder now requires LVM version 2.02.107 or newer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lvm-nvmet-fixes-fc5e867abc699633.yaml0000664000175000017500000000057400000000000025102 0ustar00zuulzuul00000000000000--- fixes: - | LVM nvmet target `bug #1964391 `_: Fixed temporary disconnection of all volumes from all hosts when creating and removing volume exports. - | LVM nvmet target `bug #1964394 `_: Fixed annoying kernel log message when exporting a volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lvm-nvmet-new-conn_props-25320e34d6ca6ac7.yaml0000664000175000017500000000027400000000000026675 0ustar00zuulzuul00000000000000--- features: - | LVM nvmet target: Added support for new nvmeof connection properties format (version 2). Controlled with ``nvmeof_conn_info_version`` configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lvm-nvmet-tcp-72a41be1a1fe0fbd.yaml0000664000175000017500000000030600000000000024721 0ustar00zuulzuul00000000000000--- features: - | LVM driver: Added support for the NVMe TCP transport protocol. Configuration option is ``target_protocol = nvmet_tcp`` when using ``nvmet`` as the ``target_helper``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lvm-thin-overprovision-1d279f66ee2252ff.yaml0000664000175000017500000000112200000000000026477 0ustar00zuulzuul00000000000000--- upgrade: - | The default value has been removed for the LVM specific `lvm_max_over_subscription_ratio` setting. This changes the behavior so that LVM backends now adhere to the common `max_over_subscription_ratio` setting. The LVM specific config option may still be used, but it is now deprecated and will be removed in a future release. deprecations: - | The `lvm_max_overprovision_ratio` config option has been deprecated. It will be removed in a future release. Configurations should move to using the common `max_overprovision_ratio` config option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/lvm-type-default-to-auto-a2ad554fc8bb25f2.yaml0000664000175000017500000000042400000000000026730 0ustar00zuulzuul00000000000000--- other: - | Modify default lvm_type setting from thick to auto. This will result in Cinder preferring thin on init, if there are no LV's in the VG it will create a thin-pool and use thin. If there are LV's and no thin-pool it will continue using thick. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/manage-resources-v3-c06096f75927fd3b.yaml0000664000175000017500000000024500000000000025533 0ustar00zuulzuul00000000000000--- features: - The v2 API extensions os-volume-manage and os-snapshot-manage have been mapped to the v3 resources manageable_volumes and manageable_snapshots ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-blockbridge-unsupported-c9e55df0eb2e3c9f.yaml0000664000175000017500000000076500000000000030036 0ustar00zuulzuul00000000000000--- upgrade: - | The Blockbridge driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. deprecations: - | The Blockbridge driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-brocade-unsupported-d5760b4bb8173652.yaml0000664000175000017500000000147300000000000026573 0ustar00zuulzuul00000000000000--- upgrade: - | The Brocade Fibre Channel Zone Manager driver has been marked as unsupported and is now deprecated. It is subject to removal during the "V" development cycle, following the standard OpenStack deprecation policy. The config option ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it in this release. deprecations: - | The Brocade Fibre Channel Zone Manager driver has been marked as unsupported and is now deprecated. It is subject to removal during the "V" development cycle, following the standard OpenStack deprecation policy. The config option ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it in this release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-cisco-zm-unsupported-57e5612f57e2407b.yaml0000664000175000017500000000114600000000000026722 0ustar00zuulzuul00000000000000--- upgrade: - | The Cisco Fibre Channel Zone Manager driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Cisco Firbre Channel Zone Manager driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-cloudbyte-unsupported-8615a127439ed262.yaml0000664000175000017500000000075100000000000027106 0ustar00zuulzuul00000000000000--- upgrade: - The CloudByte driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. deprecations: - The CloudByte driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-coho-unsupported-989db9d88ed7fff8.yaml0000664000175000017500000000105400000000000026466 0ustar00zuulzuul00000000000000--- upgrade: - | The Coho driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Coho driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-dothill-unsupported-7f95115b7b24e53c.yaml0000664000175000017500000000075200000000000026715 0ustar00zuulzuul00000000000000--- upgrade: - The DotHill drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. deprecations: - The DotHill drivers has been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-falconstor-unsupported-3b065556a4cd94de.yaml0000664000175000017500000000107600000000000027507 0ustar00zuulzuul00000000000000--- upgrade: - | The Falconstor drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Falconstor drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-hpe-xp-unsupported-c9ce6cfbab622e46.yaml0000664000175000017500000000074300000000000026755 0ustar00zuulzuul00000000000000--- upgrade: - The HPE XP driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. deprecations: - The HPE XP driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-infortrend-deprecated-553de89f8dd58aa8.yaml0000664000175000017500000000111000000000000027306 0ustar00zuulzuul00000000000000--- upgrade: - | The Infortrend drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. deprecations: - | The Infortrend drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. If their support status does not change, they will be removed in the Queens development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-nexenta-edge-unsupported-76c500738f0b3c61.yaml0000664000175000017500000000106700000000000027533 0ustar00zuulzuul00000000000000--- upgrade: - | The Nexenta Edge driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Nexenta Edge driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the 'T' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-nexentaedge-unsupported-56d184fdccc6eaac.yaml0000664000175000017500000000076400000000000030124 0ustar00zuulzuul00000000000000--- upgrade: - The Nexenta Edge drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. deprecations: - The Nexenta Edge drivers has been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-nimble-deprecated-9f7d1c178b48fa39.yaml0000664000175000017500000000105400000000000026324 0ustar00zuulzuul00000000000000--- upgrade: - | The Nimble driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Nimble driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If the support status does not change, they will be removed in the Queens development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-qnap-unsupported-79bd8ece9a2bfcd2.yaml0000664000175000017500000000074700000000000026606 0ustar00zuulzuul00000000000000--- upgrade: - | The QNAP driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. deprecations: - | The QNAP driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-reduxio-deprecated-b435032a8fdb16f2.yaml0000664000175000017500000000102000000000000026476 0ustar00zuulzuul00000000000000--- upgrade: - | The Reduxio driver has been marked unsupported and is now deprecated. ``use_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to use it. deprecations: - | The Reduxio driver has been marked unsupported and is now deprecated. ``use_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to use it. If its support status does not change, the driver will be removed in the Queens development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-scality-unsupported-530370e034a6f488.yaml0000664000175000017500000000074500000000000026564 0ustar00zuulzuul00000000000000--- upgrade: - The Scality driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. deprecations: - The Scality driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-synology-deprecated-134ba9764e14af67.yaml0000664000175000017500000000110200000000000026637 0ustar00zuulzuul00000000000000--- upgrade: - | The Synology driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in ``cinder.conf`` to continue to use it. deprecations: - | The Synology driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in ``cinder.conf`` to continue to use it. If its support status does not change, the driver will be removed in the Queens development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-tegile-deprecated-1effb23010ea997c.yaml0000664000175000017500000000106000000000000026361 0ustar00zuulzuul00000000000000--- upgrade: - | The Tegile driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Tegile driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-unsupported-gnap-739f90232c60ab3e.yaml0000664000175000017500000000062400000000000026173 0ustar00zuulzuul00000000000000--- upgrade: - | The QNAP driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. deprecations: - | The QNAP driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-unsupported-lefthand-312f8c34df6ed119.yaml0000664000175000017500000000073700000000000027125 0ustar00zuulzuul00000000000000--- upgrade: - | The HPE LeftHand driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use them. deprecations: - | The HPE LeftHand driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use them. The driver will be removed in the Ussuri release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-unsupported-tintri-driver-a71b09b7c001c75e.yaml0000664000175000017500000000105300000000000030115 0ustar00zuulzuul00000000000000--- upgrade: - | The Tintri driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Tintri driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the 'T' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-unsupported-vrtshyperscale-2bd42e2af8b803d7.yaml0000664000175000017500000000110300000000000030454 0ustar00zuulzuul00000000000000--- upgrade: - | The Veritas HyperScale driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Veritas HyperScale driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the 'T' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-violin-unsupported-fdf6b34cf9847359.yaml0000664000175000017500000000076500000000000026664 0ustar00zuulzuul00000000000000--- upgrade: - | The Violin drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use them. deprecations: - | The Violin drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use them. If its support status does not change it will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-xio-deprecated-18c914e15695d793.yaml0000664000175000017500000000105400000000000025436 0ustar00zuulzuul00000000000000--- upgrade: - | The X-IO driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The X-IO driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mark-zte-unsupported-3c048e419264eca2.yaml0000664000175000017500000000105300000000000026044 0ustar00zuulzuul00000000000000--- upgrade: - | The ZTE driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The ZTE driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/metadata-for-volume-summary-729ba648db4e4e54.yaml0000664000175000017500000000014300000000000027363 0ustar00zuulzuul00000000000000--- features: - Added support for get all distinct volumes' metadata from volume-summary API.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/migrate-backup-encryption-keys-to-barbican-6f07fd48d4937b2a.yaml0000664000175000017500000000042700000000000032240 0ustar00zuulzuul00000000000000--- fixes: - | When encryption keys based on the ConfKeyManager's fixed_key are migrated to Barbican, ConfKeyManager keys stored in the Backup table are included in the migration process. Fixes `bug 1757235 `__. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/migrate-cg-to-generic-volume-groups-f82ad3658f3e567c.yaml0000664000175000017500000000024000000000000030716 0ustar00zuulzuul00000000000000--- upgrade: - Operator needs to perform ``cinder-manage db online_data_migrations`` to migrate existing consistency groups to generic volume groups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/migrate-fixed-key-to-barbican-91dfcb829efd4bb6.yaml0000664000175000017500000000124400000000000027735 0ustar00zuulzuul00000000000000--- features: - | When Barbican is the encryption key_manager backend, any encryption keys associated with the legacy ConfKeyManager will be automatically migrated to Barbican. All database references to the ConfKeyManager's all-zeros key ID will be updated with a Barbican key ID. The encryption keys do not change. Only the encryption key ID changes. Key migration is initiated on service startup, and entries in the cinder-volume log will indicate the migration status. Log entries will indicate when a volume's encryption key ID has been migrated to Barbican, and a summary log message will indicate when key migration has finished. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/min-max-vol-size-on-type-bc7c75ea73a74d02.yaml0000664000175000017500000000033700000000000026576 0ustar00zuulzuul00000000000000--- features: - Ability to add minimum and maximum volume size restrictions which can be set on a per volume-type granularity. New volume type keys of 'provisioning:min_vol_size' and 'provisioning:max_vol_size'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/minimum-u4p-version-4c66d69d1b873796.yaml0000664000175000017500000000036500000000000025546 0ustar00zuulzuul00000000000000--- other: - | PowerMax driver - the minimum version of Unisphere for PowerMax for the current release is 9.1.0.5. It is however recommended to install the Security Releases(SR) of Unisphere for PowerMax if they become available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/modify-ensure-export-1d56a40f5e762aa8.yaml0000664000175000017500000000054500000000000026120 0ustar00zuulzuul00000000000000 fixes: - | Storwize SVC Driver: Fixes `bug 1749687 `__ previously lsvdisk() was called separately for every 'in-use' volume in order to check if the volume exists on the storage. In order to avoid problem of too long driver initialization now lsvdisk() is called once per pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/move-scaleio-driver-to-dell-emc-dir-c195374ca6b7e98d.yaml0000664000175000017500000000031300000000000030553 0ustar00zuulzuul00000000000000--- upgrade: - The ScaleIO driver is moved to the dell_emc directory. volume_driver entry in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/move-xtremio-driver-to-dell-emc-dir-f7e07a502cafd78f.yaml0000664000175000017500000000041300000000000030753 0ustar00zuulzuul00000000000000--- upgrade: - The XtremIO driver is moved to the dell_emc directory. volume_driver entry in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver`` or ``cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/msa-multiattach-5407eb60093de8f1.yaml0000664000175000017500000000011100000000000025020 0ustar00zuulzuul00000000000000--- features: - Support for multiattach is enabled for HPE MSA Storage ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/msa2060-99150398a9c416f6.yaml0000664000175000017500000000037200000000000022703 0ustar00zuulzuul00000000000000--- fixes: - | HPMSA driver: The HPE MSA driver was updated to avoid using deprecated command syntax that has been removed in the latest version of the MSA API. This is required to support the newest firmware in the MSA 2060/1060. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/multiple_clone-82bd7f80ae439080.yaml0000664000175000017500000000031500000000000024746 0ustar00zuulzuul00000000000000--- features: - Support for new configuration option - vmax_snapvx_unlink_limit for specifying the maximum number of unlinks which will be performed before a clone operation. Default value is 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mv-use_quota-b8e010f8f68a1eaa.yaml0000664000175000017500000000112600000000000024571 0ustar00zuulzuul00000000000000--- features: - | Starting with API microversion 3.65, a ``consumes_quota`` field is included in the response body of volumes and snapshots to indicate whether the volume is using quota or not. Additionally, ``consumes_quota`` can be used as a listing filter for volumes and snapshots. Its availability is controlled by its inclusion in ``etc/cinder/resource_filters.json``, where it is included by default. The default listing behavior is not to use this filter. Only temporary resources created internally by cinder will have the value set to ``false``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/mv-volume-type-name-and-id-5f4fd8480874fe9b.yaml0000664000175000017500000000044200000000000027015 0ustar00zuulzuul00000000000000--- features: - | Introduces microversion (MV) 3.63, which includes volume type ID in the volume details JSON response. This MV affects the volume detail list (``GET /v3/{project_id}/volumes/detail``), and volume-show (``GET /v3/{project_id}/volumes/{volume_id}``) calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nec-allow-more-than-4iSCSI-portals-8342defe64491f81.yaml0000664000175000017500000000041400000000000030173 0ustar00zuulzuul00000000000000--- upgrade: - | NEC Driver: Added support of more than 4 iSCSI portals for a node. deprecations: - | NEC Driver: Deprecated ``nec_iscsi_portals_per_cont`` config option. The option was used to limit number of portals and is no longer needed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nec-auto-accesscontrol-55f4b090e8128f5e.yaml0000664000175000017500000000014400000000000026312 0ustar00zuulzuul00000000000000--- upgrade: Added automatic configuration of SAN access control for the NEC volume driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nec-delete-unused-parameter-367bc9447acbb03e.yaml0000664000175000017500000000016500000000000027354 0ustar00zuulzuul00000000000000--- upgrade: - In NEC driver, the deprecated configuration parameter `ldset_controller_node_name` was deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nec-delete-volume-per-limit-d10b9df86f64b80e.yaml0000664000175000017500000000027000000000000027311 0ustar00zuulzuul00000000000000--- upgrade: - In NEC driver, the number of volumes in a storage pool is no longer limited to 1024. More volumes can be created with storage firmware revision 1015 or later. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nec-manage-unmanage-06f9beb3004fc227.yaml0000664000175000017500000000016700000000000025572 0ustar00zuulzuul00000000000000--- features: - Support manage/unmanage volume and manage/unmanage snapshot functions for the NEC volume driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nec-nondisruptive-backup-471284d07cd806ce.yaml0000664000175000017500000000017000000000000026650 0ustar00zuulzuul00000000000000--- features: - Enable backup snapshot optimal path by implementing attach and detach snapshot in the NEC driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nec-storage-assist-revert-to-sanpshot-58cddebfbf06d222.yaml0000664000175000017500000000012500000000000031521 0ustar00zuulzuul00000000000000--- features: - | NEC Driver: Added support to revert a volume to a snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nec-support-multi-attach-8aae5100f513656c.yaml0000664000175000017500000000010000000000000026561 0ustar00zuulzuul00000000000000--- features: - | NEC Driver: Added multiattach support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nec_storage_volume_driver-57663f9ecce1ae19.yaml0000664000175000017500000000010600000000000027334 0ustar00zuulzuul00000000000000--- features: - Added backend FC and iSCSI drivers for NEC Storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nec_v_storage_volume_driver-e3cb7e3c496ab066.yaml0000664000175000017500000000012500000000000027650 0ustar00zuulzuul00000000000000--- features: - | Added backend FC and iSCSI drivers for NEC V series Storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-add-generic-group-support-cdot-9bebd13356694e13.yaml0000664000175000017500000000020600000000000031155 0ustar00zuulzuul00000000000000--- features: - Added generic volume group capability to NetApp cDot drivers with support for write consistent group snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-add-support-for-adaptive-qos-0b76dadf7c044cd8.yaml0000664000175000017500000000103700000000000031057 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP: Added support for Adaptive QoS policies that have been pre-created on the storage system, with the NetApp driver and clustered ONTAP version 9.4 or higher. To use this feature, configure a Cinder volume type with the following extra-specs:: netapp:qos_policy_group= netapp:qos_policy_group_is_adaptive=" True" Note that a cluster scoped account must be used in the driver configuration in order to use QoS in clustered ONTAP. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-asar2-disaggregated-platform-support-a1b2c3d4e5f6g7h8.yaml0000664000175000017500000000367300000000000032613 0ustar00zuulzuul00000000000000--- features: - | Added support for NetApp ASA r2 (All-Flash SAN Array r2) disaggregated platform in the NetApp unified driver. This introduces a new configuration option ``netapp_disaggregated_platform`` that enables ASA r2 specific workflows and optimizations. The implementation includes: * New boolean configuration option ``netapp_disaggregated_platform`` (default: False) to enable ASA r2 workflows * New ``RestClientASAr2`` class that inherits from the standard REST client * Override capability for ASA r2 specific functionality when needed * Full backward compatibility with existing NetApp ONTAP configurations To enable ASA r2 support, set the following in your cinder configuration: .. code-block:: ini [backend_netapp_asar2] volume_driver = cinder.volume.drivers.netapp.common.NetAppDriver netapp_storage_family = ontap_cluster netapp_storage_protocol = iscsi netapp_use_legacy_client = False netapp_disaggregated_platform = True # ... other NetApp configuration options When ``netapp_disaggregated_platform`` is set to ``True``, the driver will: * Apply ASA r2 specific optimizations and workflows * Maintain full compatibility with existing volume operations * Automatically fall back to standard ONTAP behavior when ASA r2 specific methods are not available The ASA r2 client inherits all functionality from the standard REST client by default, with the ability to override individual methods for ASA r2 specific behavior. This design ensures that: * No existing functionality is lost * New ASA r2 features will be added incrementally * ASAr2 does not support ZAPIs. Hence all the APIs are accessed using REST. This feature enables users to take advantage of NetApp's disaggregated architecture and ASA r2 specific performance optimizations while maintaining a familiar operational experience. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml0000664000175000017500000000054500000000000032623 0ustar00zuulzuul00000000000000--- features: - Added host-level (whole back end replication - v2.1) replication support to the NetApp cDOT drivers (iSCSI, FC, NFS). upgrade: - While configuring NetApp cDOT back ends, new configuration options (``replication_device`` and ``netapp_replication_aggregate_map``) must be added in order to use the host-level failover feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-chap-iscsi-auth-264cd942b2a76094.yaml0000664000175000017500000000012600000000000026120 0ustar00zuulzuul00000000000000--- features: - Added iSCSI CHAP uni-directional authentication for NetApp drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-data-ontap-deprecate-7mode-drivers-a39bfcb3afefc9a5.yaml0000664000175000017500000000044400000000000032330 0ustar00zuulzuul00000000000000--- deprecations: - The 7-Mode Data ONTAP configuration of the NetApp Unified driver is deprecated as of the Ocata release and will be removed in the Queens release. Other configurations of the NetApp Unified driver, including Clustered Data ONTAP and E-series, are unaffected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-deprecate-eseries-drivers-bc4f552d277c07b9.yaml0000664000175000017500000000036100000000000030347 0ustar00zuulzuul00000000000000--- deprecations: - The NetApp E-Series drivers are deprecated as of the Rocky release and will be removed in the Stein release. Other configurations of the NetApp driver, i.e Clustered Data ONTAP and Solidfire, are unaffected.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-eseries-consistency-groups-4f6b2af2d20c94e9.yaml0000664000175000017500000000012700000000000030672 0ustar00zuulzuul00000000000000--- features: - Support for Consistency Groups in the NetApp E-Series Volume Driver. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/releasenotes/notes/netapp-fix-issue-while-ensuring-snapmirror-creation-bea36a69d443e86f.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-fix-issue-while-ensuring-snapmirror-creation-bea36a69d443e860000664000175000017500000000051400000000000033126 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP `bug #1958245 `_: In an ONTAP flexgroup replication environment, snapmirror creation would succeed but a driver bug caused an error message to be logged for the cinder-volume service. The issue has been corrected in this release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-log-filter-f3256f55c3ac3faa.yaml0000664000175000017500000000044000000000000025473 0ustar00zuulzuul00000000000000--- features: - The NetApp ONTAP driver supports a new configuration option ``netapp_api_trace_pattern`` to enable filtering backend API interactions to log. This option must be specified in the backend section when desired and it accepts a valid python regular expression. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-migrated-qos-c0c8aae50d010c75.yaml0000664000175000017500000000035200000000000025730 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP `bug #1906291 `_: Fix volume losing its QoS policy on the backend after moving it (migrate or retype with migrate) to a NetApp NFS backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-nfs-aa-support-477ddf585c5aa578.yaml0000664000175000017500000000022600000000000026175 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP NFS driver: Enabled support for Active/Active environments in the NetApp NFS driver (including replication). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-nfs-consistency-group-support-83eccc2da91ee19b.yaml0000664000175000017500000000011400000000000031503 0ustar00zuulzuul00000000000000--- features: - Added Cinder consistency group for the NetApp NFS driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-nfs-copy-offload-image-812c7152d9fe4aae.yaml0000664000175000017500000000072100000000000027604 0ustar00zuulzuul00000000000000--- features: - | NetApp NFS driver: add an alternative approach to perform the efficient clone image when the Glance source store and Cinder destination pool are not in the same FlexVol, but they are in the same Cluster. Previously, the driver required the copy offload tool for doing it efficiently, which is no longer available. Now, the operators can maintain their efficient clone image by relying on the storage file copy operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-nfs-deprecate-copy-offload-option-f9d6fe8e3dfafb04.yaml0000664000175000017500000000021500000000000032215 0ustar00zuulzuul00000000000000--- deprecations: - | Deprecate NetApp NFS option `netapp_copyoffload_tool_path`. The tool is no longer available for downloading. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-non-discovery-19af4e10f7b190ea.yaml0000664000175000017500000000055100000000000026150 0ustar00zuulzuul00000000000000--- fixes: - | NetApp iSCSI drivers no longer use the discovery mechanism for multipathing and they always return all target/portals when attaching a volume. Thanks to this, volumes will be successfully attached even if the target/portal selected as primary is down, this will be the case for both, multipath and single path connections. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-nvme-in-use-expansion-d11f03fb64050145.yaml0000664000175000017500000000105500000000000027264 0ustar00zuulzuul00000000000000--- features: - | **NetApp NVMe namespace support for in-use expansion** Added support for in-use expansion of NetApp NVMe namespaces, allowing volumes to be resized while attached to running instances without requiring detachment. This feature enables seamless volume expansion for NVMe-backed volumes in NetApp ONTAP environments. Key capabilities: * **In-use expansion**: Volumes can be expanded while attached to running instances * **NVMe namespace compatibility**: Full support for NetApp NVMe namespace expansion ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-ontap-add-revert-to-snapshot-ce20810bcf094fce.yaml0000664000175000017500000000041700000000000031062 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP driver: Added support to Revert to Snapshot for the iSCSI, FC and NFS drivers with FlexVol pool. This feature does not support FlexGroups and is limited to revert only to the most recent snapshot of a given Cinder volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-ontap-fix-detach-multiattach-d99d33dff2fefb4c.yaml0000664000175000017500000000043000000000000031257 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1839384 `_: NetApp ONTAP: Detaching any instance from multiattached volume terminates connection. Now the connection is terminated only if there're no other instances using the same initiator. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-ontap-fix-flexvol-replica-create-c7772837df20021f.yaml0000664000175000017500000000024300000000000031376 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP driver `Bug #1927784 `_: Fixed the replication setup with FlexVol pools. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-ontap-fix-force-detach-55be3f4ac962b493.yaml0000664000175000017500000000017200000000000027532 0ustar00zuulzuul00000000000000--- fixes: - | Fixed bug #1783582, where calls to os-force_detach were failing on NetApp ONTAP iSCSI/FC drivers.././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=cinder-27.0.0/releasenotes/notes/netapp-ontap-fix-qos-min-support-svm-scoped-account-a8458445d459023c.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-ontap-fix-qos-min-support-svm-scoped-account-a8458445d4590230000664000175000017500000000024400000000000032467 0ustar00zuulzuul00000000000000--- fixes: - | NetApp ONTAP: Fix check QoS minimum support for SVM scoped account. See: `Bug #1924798 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-ontap-rest-api-client-d889cfa895f01249.yaml0000664000175000017500000000177400000000000027371 0ustar00zuulzuul00000000000000--- features: - | NetApp drivers: NFS, iSCSI and FCP drivers have now the option to request ONTAP operations through REST API. The new option `netapp_use_legacy_client` switches between the old ZAPI client approach and new REST client. It is default to `True`, meaning that the drivers will keep working as before using ZAPI operations. If desired, this option can be set to `False` interacting with the storage using the new REST client. However, this new client still relies on ZAPI calls for consistency group snapshot operation. The drivers can only be configured with REST client when using ONTAP storage 9.11.1 or newer. NOTE: Enabling ONTAP REST client changes the behavior of QoS specs. Earlier, QoS values could be represented in BPS (bytes per second), but now REST client only supports integer values represented in MBPS (Megabytes per second). It means that though the user specifies the value in BPS, it will be converted to MBPS and rounded up. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-ontap-use_exact_size-d03c90efbb8a30ac.yaml0000664000175000017500000000030100000000000027623 0ustar00zuulzuul00000000000000--- fixes: - | Fixed bug #1731474 on NetApp Data ONTAP driver that was causing LUNs to be created with larger size than requested. This fix requires version 9.1 of ONTAP or later.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-solidfire-add-replication-mode-eb26535d0ec78cb4.yaml0000664000175000017500000000041500000000000031315 0ustar00zuulzuul00000000000000--- upgrade: - | SolidFire supports Synchronous, Asynchronous and SnapshotsOnly replication modes. This adds the config option `solidfire:replication_mode` to specify the mode to be used by Cinder. Its value can be `Sync`, `Async` or `SnapshotsOnly`. ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/releasenotes/notes/netapp-solidfire-fix-exceptions-while-eos-upgrade-1e3df89b5fb79165.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-solidfire-fix-exceptions-while-eos-upgrade-1e3df89b5fb79165.0000664000175000017500000000035300000000000032675 0ustar00zuulzuul00000000000000--- fixes: - | NetApp SolidFire driver `Bug #1934435 `_: Fixed errors that might occur when an operation is made to a volume at the same time as the Element OS upgrades.././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/releasenotes/notes/netapp-solidfire-fix-osprofiler-infinite-recursion-ec3d4794c89b2f83.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-solidfire-fix-osprofiler-infinite-recursion-ec3d4794c89b2f830000664000175000017500000000032600000000000033116 0ustar00zuulzuul00000000000000--- fixes: - | NetApp SolidFire driver `bug #1934459 `_: Fixed backend initialization failing with RecursionError error when OSProfiler is enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-solidfire-stats-improving-57207f313d7faf42.yaml0000664000175000017500000000027100000000000030342 0ustar00zuulzuul00000000000000features: - | NetApp SolidFire now reports QoS and efficiency stats allowing operators to use those values in consideration for weighting and filtering of their backends. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp-space-allocation-support-36a26aecc8fe1500.yaml0000664000175000017500000000073400000000000030301 0ustar00zuulzuul00000000000000features: - | NetApp iSCSI/FCP drivers: NetApp space allocation feature allows ONTAP and host to see the actual space correctly when host deletes data. It also notifies the host when the LUN cannot accept write data due to lack of space on the volume, and makes the LUN read-only (rather than going offline). This feature can be enabled or disabled on cinder volumes by using volume type extra specs with the ``netapp:space_allocation`` property.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp_cdot_report_shared_blocks_exhaustion-073a73e05daf09d4.yaml0000664000175000017500000000052200000000000033030 0ustar00zuulzuul00000000000000--- features: - The NetApp cDOT drivers report to the scheduler, for each FlexVol pool, the fraction of the shared block limit that has been consumed by dedupe and cloning operations. This value, netapp_dedupe_used_percent, may be used in the filter & goodness functions for better placement of new Cinder volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/netapp_fix_svm_scoped_permissions.yaml0000664000175000017500000000033600000000000026610 0ustar00zuulzuul00000000000000--- fixes: - NetApp cDOT block and file drivers have improved support for SVM scoped user accounts. Features not supported for SVM scoped users include QoS, aggregate usage reporting, and dedupe usage reporting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/new-config-opts-for-periodic_interval-d0cb17a2d72e0cd0.yaml0000664000175000017500000000312000000000000031425 0ustar00zuulzuul00000000000000--- features: - | Added new configuration options to allow more specific control over some periodic processes. See the 'Upgrade' section for details. upgrade: - | The ``periodic_interval`` configuration option was being used in too many places, and as a result, it had become difficult to tune specific periodic tasks without affecting other functionality. The following configuration options should now be used in place of ``periodic_interval``: * ``backup_driver_init_check_interval`` * ``backup_driver_status_check_interval`` * ``scheduler_driver_init_wait_time`` * ``backend_stats_polling_interval`` See the help text for these options for more information. The default value of each option is 60, which has been the default value of ``periodic_interval``. * If you *have not* modified ``periodic_interval``, you should see no differences from current behavior. * If you *have* modified ``periodic_interval``, please review the new options to determine which one(s) should be adjusted. Also, you should consider setting ``periodic_interval`` back to its default value of 60. A warning has been added to the ``cinder-status upgrade check`` CLI to detect whether the ``periodic_interval`` option has been modified from its default value to remind you which of the above situations currently applies to you. The ``periodic_interval`` configuration option still exists but its use is now restricted to providing a default periodicity for objects created from the ``cinder.service.Service`` class. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/new-nova-config-section-2a7a51a0572e7064.yaml0000664000175000017500000000070400000000000026277 0ustar00zuulzuul00000000000000--- features: - a [nova] section is added to configure the connection to the compute service, which is needed to the InstanceLocalityFilter, for example. deprecations: - The os_privileged_xxx and nova_xxx in the [default] section are deprecated in favor of the settings in the [nova] section. fixes: - | Fixed using of the user's token in the nova client (`bug #1686616 `_) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/new-osprofiler-call-0bb1a305c8e8f9cc.yaml0000664000175000017500000000041100000000000026024 0ustar00zuulzuul00000000000000--- upgrade: - New config option added. ``"connection_string"`` in [profiler] section is used to specify OSProfiler driver connection string, for example, ``"connection_string = messaging://"``, ``"connection_string = mongodb://localhost:27017"`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexenta-edge-driver-removal-5626d542d75f3d43.yaml0000664000175000017500000000126600000000000027164 0ustar00zuulzuul00000000000000--- upgrade: - | The Nexenta Edge storage driver has been removed after completion of its deprecation period without a reliable 3rd Party CI system being supported. Customers using the Nexenta Edge driver should not upgrade Cinder without first migrating all volumes from their Nexenta backend to a supported storage backend. Failure to migrate volumes will result in no longer being able to access volumes back by the Nexenta Edge storage backend. other: - | The Nexenta Edge storage driver was marked unsupported in Stein due to 3rd Party CI not meeting Cinder's requirements. As a result the driver is removed starting from the Train release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexenta-edge-iscsi-b3f12c7a719e8b8c.yaml0000664000175000017500000000010700000000000025547 0ustar00zuulzuul00000000000000--- features: - Added backend driver for Nexenta Edge iSCSI storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexenta-ns5-5d223f3b60f58aad.yaml0000664000175000017500000000010700000000000024226 0ustar00zuulzuul00000000000000--- features: - Added extend method to NFS driver for NexentaStor 5. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexentaedge-iscsi-driver-302529c56cdbbf38.yaml0000664000175000017500000000011000000000000026670 0ustar00zuulzuul00000000000000--- features: - Added backend driver for Nexenta Edge iSCSI storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexentaedge-iscsi-ee5d6c05d65f97af.yaml0000664000175000017500000000010000000000000025554 0ustar00zuulzuul00000000000000--- features: - Added HA support for NexentaEdge iSCSI driver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexentaedge-nbd-eb48268723141f12.yaml0000664000175000017500000000006000000000000024613 0ustar00zuulzuul00000000000000features: - Added NBD driver for NexentaEdge. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexentastor5-driver-update-937d2a1ba76a504a.yaml0000664000175000017500000000361000000000000027207 0ustar00zuulzuul00000000000000--- features: - Added revert to snapshot support for NexentaStor5 iSCSI and NFS drivers. - NexentaStor5 iSCSI and NFS drivers multiattach capability enabled. - Added support for creating, deleting, and updating consistency groups for NexentaStor5 iSCSI and NFS drivers. - Added support for taking, deleting, and restoring consistency group snapshots for NexentaStor5 iSCSI and NFS drivers. - Added consistency group capability to generic volume groups for NexentaStor5 iSCSI and NFS drivers. - Added volume manage/unmanage support for NexentaStor5 iSCSI and NFS drivers. - Added snapshot manage/unmanage support for NexentaStor5 iSCSI and NFS drivers. - Added the ability to list manageable volumes and snapshots for NexentaStor5 iSCSI and NFS drivers. upgrade: - Added a new config option ``nexenta_rest_connect_timeout``. This option specifies the time limit (in seconds), within which the connection to NexentaStor management REST API server must be established. - Added a new config option ``nexenta_rest_read_timeout``. This option specifies the time limit (in seconds), within which NexentaStor management REST API server must send a response. - Added a new config option ``nexenta_rest_backoff_factor``. This option specifies the backoff factor to apply between connection attempts to NexentaStor management REST API server. - Added a new config option ``nexenta_rest_retry_count``. This option specifies the number of times to repeat NexentaStor management REST API call in case of connection errors and NexentaStor appliance EBUSY or ENOENT errors. - Added a new config option ``nexenta_origin_snapshot_template``. This option specifies template string to generate origin name of clone. - Added a new config option ``nexenta_group_snapshot_template``. This option specifies template string to generate group snapshot name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexentastor5-https-6d58004838cfab30.yaml0000664000175000017500000000024600000000000025527 0ustar00zuulzuul00000000000000--- features: - Added secure HTTP support for REST API calls in the NexentaStor5 driver. Use of HTTPS is set True by default with option ``nexenta_use_https``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexentastor5-smartcompression-disabled-9c6ca7c758b6de69.yaml0000664000175000017500000000046400000000000031733 0ustar00zuulzuul00000000000000--- issues: - | SmartCompression feature is disabled for the NexentaStor5 NFS driver. Thick provisioned volumes created as files containing zeros are not being compressed with standard compression if SmartCompression feature is enabled. This functionality will be fixed in a later release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexentastor5_iscsi-e1d88b07d15c660b.yaml0000664000175000017500000000010700000000000025636 0ustar00zuulzuul00000000000000--- features: - Added backend driver for NexentaStor5 iSCSI storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nexentastor5_nfs-bcc8848716daea63.yaml0000664000175000017500000000010500000000000025400 0ustar00zuulzuul00000000000000--- features: - Added backend driver for NexentaStor5 NFS storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nfs-online-snapshot-c05e6c8113bbded6.yaml0000664000175000017500000000030200000000000026043 0ustar00zuulzuul00000000000000--- fixes: - | NFS driver `bug #1860913 `_: Fixed instance uses base image file when it is rebooted after online snapshot creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nfs-snapshots-21b641300341cba1.yaml0000664000175000017500000000036300000000000024423 0ustar00zuulzuul00000000000000--- features: - Added support for snapshots in the NFS driver. This functionality is only enabled if ``nfs_snapshot_support`` is set to ``True`` in cinder.conf. Cloning volumes is only supported if the source volume is not attached. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nfs_backup_no_overwrite-be7b545453baf7a3.yaml0000664000175000017500000000020700000000000027003 0ustar00zuulzuul00000000000000--- fixes: - | Fix NFS backup driver, we now support multiple backups on the same container, they are no longer overwritten. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-add-alletra-6k-info-8d242a809e6044a5.yaml0000664000175000017500000000023300000000000026533 0ustar00zuulzuul00000000000000other: - | Nimble: Documented that existing driver supports the new Alletra 6k backend. Alletra 6k is newer version of existing Nimble backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-add-fc-support-0007fdbd647be947.yaml0000664000175000017500000000010500000000000026104 0ustar00zuulzuul00000000000000--- features: - Added Nimble Storage Fibre Channel backend driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-add-force-backup-539e1e5c72f84e61.yaml0000664000175000017500000000012600000000000026276 0ustar00zuulzuul00000000000000--- features: - Support for force backup of in-use Cinder volumes in Nimble driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-change-location-277b7fc0c39fd91d.yaml0000664000175000017500000000104100000000000026375 0ustar00zuulzuul00000000000000--- upgrade: - | The Nimble Storage became a part of the HPE family of Storage solutions. The cinder Nimble driver has been relocated to the ``cinder.volume.driver.hpe`` module to reflect this. The impact on operators is that the module path ``cinder.volume.drivers.nimble.NimbleISCSIDriver`` and ``cinder.volume.drivers.nimble.FCDriver`` should now be updated to ``cinder.volume.drivers.hpe.nimble.NimbleISCSIDriver`` and ``cinder.volume.drivers.hpe.nimble.NimbleFCDriver`` respectively in ``cinder.conf`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-consistency-groups-support-7c932d5557fa725e.yaml0000664000175000017500000000012200000000000030574 0ustar00zuulzuul00000000000000--- features: - | Added consistency group support in Nimble Storage driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-manage-unmanage-1d6d5fc23cbe59a1.yaml0000664000175000017500000000013300000000000026427 0ustar00zuulzuul00000000000000--- features: - Manage and unmanage support has been added to the Nimble backend driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-mark-supported-0c8e5e21c8d1179d.yaml0000664000175000017500000000042300000000000026233 0ustar00zuulzuul00000000000000--- upgrade: - | HPE Nimble Storage drivers had been previously marked unsupported. Testing requirements have been addressed and they are now fully supported again. HPE Nimble Storage drivers allow cinder to manage volumes both in iSCSI and FC environment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-mark-usnsupported-0c8e5e21c8d1179d.yaml0000664000175000017500000000105300000000000026761 0ustar00zuulzuul00000000000000--- upgrade: - | The Nimble driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Nimble driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-max-oversubscription-ratio-1d9812954f824fcf.yaml0000664000175000017500000000021200000000000030525 0ustar00zuulzuul00000000000000--- fixes: - | HPE Nimble: Report max oversubscription ratio according to backend configuration ``max_over_subscription_ratio`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-qos-specs-8cd006777c66a64e.yaml0000664000175000017500000000031200000000000025117 0ustar00zuulzuul00000000000000--- features: - Add Support for QoS in the Nimble Storage driver. QoS is available from Nimble OS release 4.x and above. - Add Support for deduplication of volumes in the Nimble Storage driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-replication-a5f757f7d7047065.yaml0000664000175000017500000000011500000000000025440 0ustar00zuulzuul00000000000000--- features: - | HPE Nimble driver: Added group replication support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-rest-api-support-75c2324ee462d026.yaml0000664000175000017500000000014600000000000026350 0ustar00zuulzuul00000000000000--- features: - The Nimble backend driver has been updated to use REST for array communication. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-retype-support-18f717072948ba6d.yaml0000664000175000017500000000013100000000000026143 0ustar00zuulzuul00000000000000--- features: - Support for retype and volume migration for HPE Nimble Storage driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nimble-thin-provision-by-default-c5ac66120b2361ef.yaml0000664000175000017500000000015300000000000030262 0ustar00zuulzuul00000000000000--- other: - | Nimble driver: Enable thin provisioning as default method while creating volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nvmeof-premature-terminate-conn-63e3cc1fd1832874.yaml0000664000175000017500000000041400000000000030150 0ustar00zuulzuul00000000000000--- fixes: - | nvmeof target `bug #1966513 `_: Fixed LVM failing on terminate_connection if the connecting host doesn't have an iSCSI initiator name setup, for example if LVM is using the nvmet target. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nvmet-multipath-d35f55286f263e72.yaml0000664000175000017500000000122500000000000025022 0ustar00zuulzuul00000000000000--- features: - | nvmet target driver: Added support to serve volumes on multiple addresses using the ``target_secondary_ip_addresses`` configuration option. This allows os-brick to iterate through them in search of one connection that works, and once os-brick supports NVMe-oF multipathing it will be automatically supported. This requires that ``nvmeof_conn_info_version`` configuration option is set to ``2`` as well. deprecations: - | Configuration option ``iscsi_secondary_ip_addresses`` is deprecated in favor of ``target_secondary_ip_addresses`` to follow the same naming convention of ``target_ip_address``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/nvmet-shared-targets-20ed7279ef29f002.yaml0000664000175000017500000000033700000000000026002 0ustar00zuulzuul00000000000000--- features: - | nvmet target driver: Added support for shared subsystems/targets using the ``lvm_share_target`` configuration option. Defaults to non shared, e.g., each volume has its own subsystem/target. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/online-migration-checks-64b0d1732901e78e.yaml0000664000175000017500000000025500000000000026364 0ustar00zuulzuul00000000000000--- upgrade: - | Two new checks are added to the ``cinder-status upgrade check`` CLI to ensure that online data migrations from Queens onward have been completed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ontap-add-provisioned-capacity-option-2f8122663eec51ae.yaml0000664000175000017500000000037600000000000031323 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP driver: added option ´netapp_driver_reports_provisioned_capacity´, which enables the driver to calculate and report provisioned capacity to Cinder Scheduler based on volumes sizes in the storage system. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ontap-add-storage-assisted-migration-70f6fb95dbb7e580.yaml0000664000175000017500000000020300000000000031213 0ustar00zuulzuul00000000000000--- features: - | NetApp ONTAP: Added support for storage assisted migration within a same ONTAP cluster (iSCSI/FC/NFS). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/open-e-joviandss-disable-revert-to-snapshot-359a2e0317e618ec.yaml0000664000175000017500000000044500000000000032300 0ustar00zuulzuul00000000000000--- features: - | Open-E JovianDSS driver: revert-to-snapshot has been removed. other: - | Open-E JovianDSS driver: general rework of volume and snapshot creation and deletion. - | Open-E JovianDSS driver: network interfaces selection on JovianDSS storage has been reworked. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/open-e-joviandss-enable-multiattach-b1d38ffcc53bf59c.yaml0000664000175000017500000000021500000000000031156 0ustar00zuulzuul00000000000000--- features: - | Open-E JovianDSS driver: Added multiattach support. - | Open-E JovianDSS driver: Added 16K block size support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/operate-migrated-groups-with-cp-apis-e5835c6673191805.yaml0000664000175000017500000000235700000000000030604 0ustar00zuulzuul00000000000000--- upgrade: - | After running the migration script to migrate CGs to generic volume groups, CG and group APIs work as follows. * Create CG only creates in the groups table. * Modify CG modifies in the CG table if the CG is in the CG table, otherwise it modifies in the groups table. * Delete CG deletes from the CG or the groups table depending on where the CG is. * List CG checks both CG and groups tables. * List CG Snapshots checks both the CG and the groups tables. * Show CG checks both tables. * Show CG Snapshot checks both tables. * Create CG Snapshot creates either in the CG or the groups table depending on where the CG is. * Create CG from Source creates in either the CG or the groups table depending on the source. * Create Volume adds the volume either to the CG or the group. * default_cgsnapshot_type is reserved for migrating CGs. * Group APIs will only write/read in/from the groups table. * Group APIs will not work on groups with default_cgsnapshot_type. * Groups with default_cgsnapshot_type can only be operated by CG APIs. * After CG tables are removed, we will allow default_cgsnapshot_type to be used by group APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/oracle-zfssa-unsupported-4ce035213fa0e097.yaml0000664000175000017500000000116500000000000026701 0ustar00zuulzuul00000000000000--- upgrade: - | The Oracle ZFSSA drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. deprecations: - | The Oracle ZFSSA drivers have been been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. Oracle has indicated that they don't plan to continue to support the drivers so they will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/orphaned_unmanaged_volume-db63ec0509b70b8f.yaml0000664000175000017500000000042600000000000027277 0ustar00zuulzuul00000000000000--- other: - | This PowerMax driver now puts the unmanaged "orphan" volume in a storage group called OS-Unmanaged. It is not possible to query a volume's associated snapvx snapshots using the PowerMax management software, unless it belongs to a storage group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/os-brick-lock-dir-35bdd8ec0c0ef46d.yaml0000664000175000017500000000102700000000000025442 0ustar00zuulzuul00000000000000--- issues: - When running Nova Compute and Cinder Volume or Backup services on the same host they must use a shared lock directory to avoid rare race conditions that can cause volume operation failures (primarily attach/detach of volumes). This is done by setting the ``lock_path`` to the same directory in the ``oslo_concurrency`` section of nova.conf and cinder.conf. This issue affects all previous releases utilizing os-brick and shared operations on hosts between Nova Compute and Cinder data services. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/per-backend-az-28727aca360a1cc8.yaml0000664000175000017500000000046100000000000024565 0ustar00zuulzuul00000000000000--- features: - | Availability zones may now be configured per backend in a multi-backend configuration. Individual backend sections can now set the configuration option ``backend_availability_zone``. If set, this value will override the [DEFAULT] ``storage_availability_zone`` setting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/period-task-clean-reservation-0e0617a7905df923.yaml0000664000175000017500000000026700000000000027520 0ustar00zuulzuul00000000000000--- features: - Added periodic task to clean expired reservation in cinder scheduler. Added a configuration option ``reservation_clean_interval`` to handle the interval. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/permit_volume_type_operations-b2e130fd7088f335.yaml0000664000175000017500000000136600000000000030131 0ustar00zuulzuul00000000000000--- fixes: - | Enabled a cloud operator to correctly manage policy for volume type operations. To permit volume type operations for specific user, you can for example do as follows. * Add ``storage_type_admin`` role. * Add ``admin_or_storage_type_admin`` rule to ``policy.json``, e.g. ``"admin_or_storage_type_admin": "is_admin:True or role:storage_type_admin",`` * Modify rule for types_manage and volume_type_access, e.g. ``"volume_extension:types_manage": "rule:admin_or_storage_type_admin", "volume_extension:volume_type_access:addProjectAccess": "rule:admin_or_storage_type_admin", "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_or_storage_type_admin",`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/policy-for-type-list-and-show-apis-rt56uy78crt5e378.yaml0000664000175000017500000000024100000000000031014 0ustar00zuulzuul00000000000000--- fixes: - Two new policies "volume_extension:type_get" and "volume_extension:type_get_all" have been added to control type show and type list APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/policy-in-code-226f71562ab28195.yaml0000664000175000017500000000070400000000000024406 0ustar00zuulzuul00000000000000--- features: - | Cinder now support policy in code, which means if users don't need to modify any of the default policy rules, they do not need a policy file. Users can modify/generate a `policy.yaml` file which will override specific policy rules from their defaults. other: - | Default `policy.json` file is now removed as Cinder now uses default policies. A policy file is only needed if overriding one of the defaults. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-90-to-91-endpoints-a92c4d158cb63fe4.yaml0000664000175000017500000000024600000000000027035 0ustar00zuulzuul00000000000000--- other: - | PowerMax driver - the minimum version of Unisphere for PowerMax required for Train is 9.1, so all the latest 91 REST endpoints will be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-91-to-92-endpoints-bb467c8aca0165dd.yaml0000664000175000017500000000025100000000000027102 0ustar00zuulzuul00000000000000--- other: - | PowerMax driver - the minimum version of Unisphere for PowerMax required for Victoria is 9.2, so all the latest 92 REST endpoints will be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-active-active-support-bec2d96480046d82.yaml0000664000175000017500000000030700000000000030032 0ustar00zuulzuul00000000000000--- features: - | Dell PowerMax driver: Enabled support for Active/Active to both FC and iSCSI driver. This allows users to configure Dell PowerMax backends in clustered environments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-auto-migration-5cc57773c23fef02.yaml0000664000175000017500000000126100000000000026613 0ustar00zuulzuul00000000000000--- features: - | This PowerMax driver moves the legacy shared volume from the masking view structure in Ocata and prior releases (when SMI-S was supported) to staging masking view(s) in Pike and later releases (U4P REST). In Ocata, the live migration process shared the storage group, containing the volume, among the different compute nodes. In Pike, we changed the masking view structure to facilitate a cleaner live migration process where only the intended volume is migrated without exposing other volumes in the storage group. The staging storage group and masking views facilitate a seamless live migration operation in upgraded releases. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-bug-1853589-f6c7164177da0496.yaml0000664000175000017500000000036700000000000025150 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver - fix to eliminate 'cannot use the device for the function because it is in a Copy Session' when attempting to delete a volume group that previously had a group snapshot created on and deleted from it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-bug-1875478-8c9072ad9a87b83d.yaml0000664000175000017500000000034500000000000025311 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1875478 `_: PowerMax Driver - Concurrent live migrations can sometimes fail when one thread deletes a storage group that another thread may need. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-bug-1894086-iterator-expiration-674a28d8b9e13b34.yaml0000664000175000017500000000052700000000000031227 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1894086 `_: PowerMax Cinder driver addresses an issue whereby Unisphere REST iterators expire before all data can be read from them. The iterator expiration is now set to 180mins and deleted once all data has been read so no artifacts are left behind. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-bug-1930290-4f598329a6ced006.yaml0000664000175000017500000000033400000000000025202 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver `bug #1930290 `_: This fixes the QoS conflict issue on a child storage group by not setting QoS on a parent storage group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-create-grp-source-560139c0850e60ce.yaml0000664000175000017500000000044200000000000027033 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver: Previously, the target storage group created from a replicated storage group was also replicated, which could cause failures. This fix creates a non-replicated target initially, and lets the replicate group API take care of replicating it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-deadlock-5fdcacb63ca87159.yaml0000664000175000017500000000027500000000000025566 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC PowerMax driver `bug #1980870 `_: Fixed potential deadlock when moving volumes between Storage Groups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-delete-replication-group-76656e96262201d5.yaml0000664000175000017500000000027200000000000030265 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver: Fix to suspend the storage group you are about to delete and then add a force flag to delete the volume pairs within the storage group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-disable-inuse-metro-89e9f398ec9e2672.yaml0000664000175000017500000000034300000000000027501 0ustar00zuulzuul00000000000000--- issues: - | PowerMax driver - Disabling inuse storage assisted migration to a metro or asynchronous replicated volume type as this operation will not facilitate FC scanning or iSCSI login of the target array. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-existing-host-092f7daf29053d82.yaml0000664000175000017500000000037200000000000026404 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver : Enhancement to use an existing initiator group even if there is no entry for the contained initiator(s) in the login table. This is permissable so long as the initiator(s) in the connector object match. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-expand-replicated-volume-124c62ea78b1c347.yaml0000664000175000017500000000031400000000000030460 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver: Fix to prevent an R2 volume being larger than the R1 so that an extend operation will not fail if the R2 happens to be larger than the requested extend size. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-failover-abilities-1fa0a23128f1c00b.yaml0000664000175000017500000000032000000000000027363 0ustar00zuulzuul00000000000000--- features: - | PowerMax for Cinder driver now supports the ability to transition to a new primary array as part of the failover process if the existing primary array is deemed unrecoverable. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-inuse-retype-support-64bd35adab17420d.yaml0000664000175000017500000000023000000000000030051 0ustar00zuulzuul00000000000000--- features: - | PowerMax for Cinder driver now supports storage-assisted in-use retype for volumes including those in replication sessions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-load-balance-9cd152e53ecb34fd.yaml0000664000175000017500000000026500000000000026316 0ustar00zuulzuul00000000000000--- features: - | PowerMax for Cinder driver now supports Port Group and Port load balancing when attaching Nova Compute instances to volumes on the backend PowerMax. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-multiple-replication-devices-0cc532ae621ea9a5.yaml0000664000175000017500000000017300000000000031500 0ustar00zuulzuul00000000000000--- features: - | PowerMax Driver - Support to allow the use of multiple replication modes on one backend array. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-ode-metro-support-ed50bb20f932548b.yaml0000664000175000017500000000016000000000000027246 0ustar00zuulzuul00000000000000--- features: - | PowerMax for Cinder driver now supports extending in-use Metro RDF enabled volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-snapvx-link-mode-0050ac6b4a16c739.yaml0000664000175000017500000000041000000000000026743 0ustar00zuulzuul00000000000000--- other: - | The PowerMax for Cinder driver now implements noCopy mode for links between SnapVX source and target. This change will improve space efficiency by using pointers instead of copied tracks when source and target volumes are linked. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-srdf-enhancement-56b0a2817c4d310d.yaml0000664000175000017500000000056500000000000027003 0ustar00zuulzuul00000000000000--- other: - | PowerMax Driver - Two new replication specific configuration options sync_interval and sync_retries have been added to PowerMax cinder configuration. These configuration options determine how many times to retry checks to see if a SnapVX copy mode has completed with a replication enabled volume, and how long to wait between retries. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-storage-group-tagging-d2281e9b35994bec.yaml0000664000175000017500000000031600000000000030076 0ustar00zuulzuul00000000000000--- features: - | Dell EMC PowerMax driver now supports Unisphere storage group and array tagging to allow the user to specify a user defined tag to facilitate easy access and classification. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-tdev-deallocation-90bda0f95ab0b271.yaml0000664000175000017500000000021300000000000027306 0ustar00zuulzuul00000000000000--- features: - | PowerMax driver - Volume deallocate and volume delete functionality have been combined into a single workflow. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-train-tag-removal-1dfa77df7440e5f5.yaml0000664000175000017500000000025300000000000027272 0ustar00zuulzuul00000000000000--- upgrade: - | The PowerMax Cinder driver has removed the environment configuration option san_rest_port in favour of the Cinder standard option san_api_port. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-user-defined-hostname-portgroup-0b01aaaa730dfaaf.yaml0000664000175000017500000000065200000000000032344 0ustar00zuulzuul00000000000000--- features: - | Dell EMC PowerMax driver now faciliates the user to override the short host name and port group name seen in PowerMax masking view and storage view terminology. This means the user can give more meaningful names, especially when the short host name exceeds 16 characters and the port group name exceeds 12 characters, which is the condition where the driver truncates these values. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax-vol-metadata-acd2555818d25b72.yaml0000664000175000017500000000032200000000000026144 0ustar00zuulzuul00000000000000--- features: - | All volumes and snapshots created using the PowerMax for Cinder driver now have additional metadata included pertaining to the details of the asset on the backend storage array. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax_initiator_check-249279d30e3f8322.yaml0000664000175000017500000000041700000000000026661 0ustar00zuulzuul00000000000000fixes: - | PowerMax driver: Checking that the contents of the initiator group match the contents of the connector regardless of the initiator_check option being enabled. This will ensure an exception is raised if there is a mismatch, in all scenarios. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax_legacy_generation_fix-09e437f955cd9d70.yaml0000664000175000017500000000043200000000000030217 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver `bug #1938572 `_ : Legacy PowerMax OS fix to convert an int to a string if the generation of snapVX is returned as an int from REST so that a 0 does not equate to False in python. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powermax_port-check_enhancement-c95dd94328f31524.yaml0000664000175000017500000000030700000000000030207 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax driver: Enhancement to check the status of the ports in the port group so that any potential issue, like the ports being down, is highlighted early and clearly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powerstore-nvme-tcp-support-ee37cf4fdbce1621.yaml0000664000175000017500000000011000000000000027672 0ustar00zuulzuul00000000000000--- features: - | Dell PowerStore driver: Added NVMe-TCP support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/powerstore-request-data-validation-6268f2ed07b7bf40.yaml0000664000175000017500000000024400000000000030752 0ustar00zuulzuul00000000000000--- fixes: - | PowerStore driver `bug #1981068 `_: Fixed request data validation for the REST client. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/privsep-rocky-35bdfe70ed62a826.yaml0000664000175000017500000000113200000000000024705 0ustar00zuulzuul00000000000000--- security: - | Privsep transitions. Cinder is transitioning from using the older style rootwrap privilege escalation path to the new style Oslo privsep path. This should improve performance and security of Cinder in the long term. - | Privsep daemons are now started by Cinder when required. These daemons can be started via rootwrap if required. rootwrap configs therefore need to be updated to include new privsep daemon invocations. upgrade: - | The following commands are no longer required to be listed in your rootwrap configuration: cgcreate; and cgset. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/project-default-types-3a14ad0d653e604e.yaml0000664000175000017500000000051300000000000026226 0ustar00zuulzuul00000000000000--- features: - | Added support for project specific default volume types. Microversion 3.62 of the Block Storage API introduces new calls to set, get, and unset a default volume type for a specific project. Project specific defaults have higher priority than the default_volume_type option in cinder.conf././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/project-id-optional-in-urls-db97e2c447167853.yaml0000664000175000017500000000232100000000000027136 0ustar00zuulzuul00000000000000--- features: - | Inclusion of a project_id in API URLs is now optional. The `Block Storage API V3 `_ reference guide continues to show URLs with a project_id because the legacy behavior continues to be supported. A new API microversion V3.67 is introduced to inform clients when inclusion of a project_id in API URLs is optional. The V3.67 microversion is only used as an indicator that the API accepts a URL without a project_id, and this applies to all requests regardless of the microversion in the request. For example, an API node serving V3.67 or greater will accept a URL without a project_id even if the request asks for V3.0. Likewise, it will accept a URL containing a project_id even if the request asks for V3.67. upgrade: - | Upgrades are not affected by the new functionality whereby a project_id is no longer required in API URLs. The legacy behavior in which a project_id is included in the URL continues to be supported. Detection of whether a URL includes a project_id is based on the value of a new ``project_id_regex`` option. The default value matches UUIDs created by keystone. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/promotion_offline_r1_fix-f7a008d0d13a3eff.yaml0000664000175000017500000000052100000000000027136 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax Driver - `bug #1908920 `_: This offline r1 promotion fix resets replication enabled and configuration metadata during promotion retype with offline r1 array. It also gets management storage group name from source extra_specs during promotion. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/promotion_rdfg_num_fix-65a5838277ac8edf.yaml0000664000175000017500000000025200000000000026604 0ustar00zuulzuul00000000000000--- fixes: - | PowerMax Driver - Promotion RDF Group number fix uses remote array SID when finding rdf group number when performing retype during failover. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/prophetstor-generic-groups-c7136c32b2f75c0a.yaml0000664000175000017500000000014400000000000027316 0ustar00zuulzuul00000000000000--- features: - Added consistent group capability to generic volume groups in ProphetStor driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ps-removedriver-5ba447c50f2474e7.yaml0000664000175000017500000000026200000000000025064 0ustar00zuulzuul00000000000000--- upgrade: - | Dell EMC PS Series storage driver is not supported and removed starting from the Ussuri release. It was marked as deprecated in the Train release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-active-active-support-dbd0d3da3ab64e64.yaml0000664000175000017500000000032700000000000027427 0ustar00zuulzuul00000000000000--- features: - | Pure Storage FlashArray driver: Enabled support for Active/Active to both the iSCSI and FC driver. This allows users to configure Pure Storage backends in clustered environments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-active-cluster-edf8e7e80739b0f8.yaml0000664000175000017500000000023200000000000026014 0ustar00zuulzuul00000000000000--- features: - Added support to Pure Storage Volume Drivers for Active Cluster using the standard replication API's for the Block Storage Service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-check-nvmefc-ports-cf2aec3952d8192f.yaml0000664000175000017500000000017300000000000026546 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage Driver: Add internal check to allow for FlashArray with joint FC and NVMe-FC support ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-cinder-manage-aa40d62bf2bd0d33.yaml0000664000175000017500000000024200000000000025566 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage driver `bug #2096801 `_: Fixed issue using Manage Volumes from the GUI. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-configure-pg-pod-names-525a4ce8e2f46b69.yaml0000664000175000017500000000032200000000000027236 0ustar00zuulzuul00000000000000--- features: - | Pure Storage FlashArray driver has added configuration options ``pure_replication_pg_name`` and ``pure_replication_pod_name`` for setting the names for replication PGs and Pods. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-custom-user-agent-dcca4cb44b69e763.yaml0000664000175000017500000000020400000000000026502 0ustar00zuulzuul00000000000000--- upgrade: - Pure volume drivers will need 'purestorage' python module v1.6.0 or newer. Support for 1.4.x has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-default-replica-interval-07de0a56f61c7c1e.yaml0000664000175000017500000000054500000000000027725 0ustar00zuulzuul00000000000000--- upgrade: - The default value for pure_replica_interval_default used by Pure Storage volume drivers has changed from 900 to 3600 seconds. fixes: - Fixes an issue where starting the Pure volume drivers with replication enabled and default values for pure_replica_interval_default would cause an error to be raised from the backend.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml0000664000175000017500000000072100000000000025602 0ustar00zuulzuul00000000000000--- features: - Added additional metrics reported to the scheduler for Pure Volume Drivers for better filtering and weighing functions. - Added config option to enable/disable automatically calculation an over-subscription ratio max for Pure Volume Drivers. When disabled the drivers will now respect the max_oversubscription_ratio config option. fixes: - Fixed issue where Pure Volume Drivers would ignore reserved_percentage config option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml0000664000175000017500000000050100000000000026541 0ustar00zuulzuul00000000000000--- features: - New config option for Pure Storage volume drivers pure_eradicate_on_delete. When enabled will permanently eradicate data instead of placing into pending eradication state. fixes: - Allow for eradicating Pure Storage volumes, snapshots, and pgroups when deleting their Cinder counterpart. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-fc-wwpn-case-c1d97f3fa7663acf.yaml0000664000175000017500000000031200000000000025420 0ustar00zuulzuul00000000000000--- fixes: - Fix issue with PureFCDriver where partially case sensitive comparison of connector wwpn could cause initialize_connection to fail when attempting to create duplicate Purity host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-generic-volume-groups-2b0941103f7c01cb.yaml0000664000175000017500000000013400000000000027112 0ustar00zuulzuul00000000000000--- features: - Add consistent group capability to generic volume groups in Pure drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-host-personality-3512f7ccd961d4ad.yaml0000664000175000017500000000031700000000000026366 0ustar00zuulzuul00000000000000--- features: - | Pure Storage FlashArray driver has added configuration option ``pure_host_personality`` for setting the host personality upon host creation (existing hosts are not affected). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-iscsi-cidr-cbc1afb3850a9217.yaml0000664000175000017500000000035500000000000025064 0ustar00zuulzuul00000000000000--- features: - | Pure Storage FlashArray driver has added configuration option ``pure_iscsi_cidr`` for setting a network CIDR for iSCSI target connection. The default value will allow connections to all iSCSI targets. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-iscsi-cidrs-7195eda9f7214fce.yaml0000664000175000017500000000066300000000000025276 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage FlashArray driver `bug 1910143 `_: Parameter ``pure_iscsi_cidr`` is now IPv4/v6 agnostic. features: - | Pure Storage FlashArray driver: added configuration option ``pure_iscsi_cidr_list`` for setting several network CIDRs for iSCSI target connection. Both IPv4 and IPv6 is supported. The default still allows all IPv4 targets. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-list-mangeable-fed4a1b23212f545.yaml0000664000175000017500000000020700000000000025632 0ustar00zuulzuul00000000000000--- features: - Add ``get_manageable_volumes`` and ``get_manageable_snapshots`` implementations for Pure Storage Volume Drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-replicated-cg-03016fa79bcd51c1.yaml0000664000175000017500000000027500000000000025455 0ustar00zuulzuul00000000000000--- features: - | [Pure Storage] Corrected support status to True for generic group consistency snapshot support and added support for replication-enabled consistency groups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-report-addressing-91963e29fbed32a4.yaml0000664000175000017500000000035600000000000026431 0ustar00zuulzuul00000000000000--- fixes: - | Pure iSCSI & FC driver `bug #2006960 `_: Fixed attaching LUNs greater than 255. Driver leverages new os-brick functionality to specify LUN addressing mode. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-storage-add-qos-37958a90beff12d6.yaml0000664000175000017500000000017000000000000025764 0ustar00zuulzuul00000000000000--- features: - Added support for QoS in the Pure Storage drivers. QoS support is available from Purity//FA 5.3.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-storage-change-purity-support-b94057d3842a80a8.yaml0000664000175000017500000000114000000000000030553 0ustar00zuulzuul00000000000000--- upgrade: - | Pure Storage: Minimum supported FlashArray Purity//FA is changed to 5.3.0. All FlashArray backends must be at at least this minimum version or the driver will not initialize. fixes: - | Pure Storage: Remove all API version checks in driver as the new minimum FlashArray Purity//FA version supports all previously version-gated features and functionality support. other: - | Pure Storage: FlashArray minimum Purity//FA version is increased to 5.3.0. All FlashArray backends must be at at least this minimum version or the driver will not initialize. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-storage-driver-typo-a24d19021f25a4f8.yaml0000664000175000017500000000022700000000000026621 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2083532 `_: [Pure Storage] Fixed creation of volumes with only IOPS qos.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-storage-fix-failover-fe6260a112409742.yaml0000664000175000017500000000030300000000000026563 0ustar00zuulzuul00000000000000--- fixes: - | [Pure Storage] 'bug #2028380 '_: Fixed issue with cinder replication failover failing due to incorrect REST call. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-storage-multiattach-support-994da363e181d627.yaml0000664000175000017500000000011700000000000030325 0ustar00zuulzuul00000000000000--- features: - Pure Storage FlashArray driver has added multiatach support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-storage-nvme-driver-f4217c00379c4827.yaml0000664000175000017500000000027500000000000026450 0ustar00zuulzuul00000000000000--- features: - | Pure Storage adds a new driver to support NVMe-RoCE for the FlashArray. All features of the iSCSI and FC drivers are fully supported by this new driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-storage-revert-snapshot-b7e0ec4f958418c4.yaml0000664000175000017500000000012300000000000027576 0ustar00zuulzuul00000000000000--- features: - Add reverting to snapshot support in Pure Storage Cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-v2.1-replication-0246223caaa8a9b5.yaml0000664000175000017500000000014500000000000025741 0ustar00zuulzuul00000000000000--- features: - Added Cheesecake (v2.1) replication support to the Pure Storage Volume drivers.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-validate-replica-arrays-a76630cab9435770.yaml0000664000175000017500000000031500000000000027326 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage FlashArray driver `bug #1969784 `_: Fixed array failover incorrectly handles loss of an array due to network issue ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure-verify-https-requests-464320c97ba77a1f.yaml0000664000175000017500000000027300000000000027220 0ustar00zuulzuul00000000000000--- security: - Pure Storage Volume Drivers can now utilize driver_ssl_cert_verify and driver_ssl_cert_path config options to allow for secure https requests to the FlashArray. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_aa_replication-e3f5f6666f0b0c43.yaml0000664000175000017500000000032700000000000026023 0ustar00zuulzuul00000000000000--- features: - | Pure Storage FlashArray driver: Enabled support for Active/Active replication for the FlashArray driver. This allows users to configure FlashArray backends in clustered environments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_consistent_hostname-419f9c31cb77e16d.yaml0000664000175000017500000000023600000000000027143 0ustar00zuulzuul00000000000000--- fixes: - | [Pure Storage] Fixed issue where hypervisor hostnames on replicated backends for synchronously replicated volumes were not the same. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_eg1_dr-f08544454cfd105e.yaml0000664000175000017500000000031700000000000024126 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage driver `Bug #2119222 `_: Fixed issue with EG1 subscription-based FlashArrays not reporting data reduction rates. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_evergreen_one_model-0533b91fb096c468.yaml0000664000175000017500000000041500000000000026712 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2090310 `_: [Pure Storage] Fixed issue with FlashArray using the Evergreen//One consumption model not reporting ``total_provisioned``. Used ``used_provisoned`` instead in this case. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_failover_sync-86814167598af2f8.yaml0000664000175000017500000000064100000000000025516 0ustar00zuulzuul00000000000000--- features: - | Pure Storage driver: Allow synchronously replicated volumes to be created during a replication failover event. These will remain viable volumes when the replication is failed back to its original state. fixes: - | [Pure Storage] `Bug #2035404 `_: Fixed issue with missing replication pod causing driver to fail on restart. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_fc_personality-3cada97fc940e498.yaml0000664000175000017500000000016600000000000026164 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage driver: Add missing support for ``host_personality`` setting for FC-based hosts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_fix_clone_cg-cfdf7d16b63882f8.yaml0000664000175000017500000000040600000000000025563 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage FlashArray driver `bug #1936663 `_: Fixes issue where cloning a consistency group containing volumes with very long names causes a crash - Required for PowerVC support ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_fix_clone_qos-4b80be464b506e4c.yaml0000664000175000017500000000041100000000000025664 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage driver `bug #2100547 `_: Fixed issue where volumes created as clones from a source image volume did not get the defined QoS settings associated with the volume type used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_fix_replica_interval-917cd01f23ac45cc.yaml0000664000175000017500000000041100000000000027300 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage driver `bug #2115284 `_: snapshot replication interval in `cinder.conf` is set in seconds, but the backend expects it in milliseconds. Added fix to handle the conversion. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_lacp_iscsi-34678bdb98fa6bab.yaml0000664000175000017500000000031300000000000025321 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage `bug #2101859 `_: Fixed issue where LACP bonds were not been correctly identified as iSCSI and NVMe targets. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_manage_quota_delete-dd24495e883498e7.yaml0000664000175000017500000000033700000000000026723 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage driver `Bug #21119059 `_: Fixed volume deletion issue when attempting to manage a new volume that exceeds the tenants storage quota. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_nvme_tcp-a00efa8966a74f77.yaml0000664000175000017500000000014000000000000024664 0ustar00zuulzuul00000000000000--- features: - | Pure Storage FlashArray driver: Added support NVMe-TCP transport layer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_per_gb_qos-0b96279d615b81a1.yaml0000664000175000017500000000042200000000000025022 0ustar00zuulzuul00000000000000--- features: - | [Pure Storage] Added new QoS spec parameters to support QoS per GB. New spec options are ``maxIOPS_per_GB`` and ``maxBWS_per_GB``. If either of these are provided with the equivalent ``max`` value, the ``max`` value will take precedence. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_pod_safemode-d64b0705828529e5.yaml0000664000175000017500000000032400000000000025260 0ustar00zuulzuul00000000000000--- fixes: - | [Pure Storage] When using synchronous replication, ensure that FlashArray pods used by Cinder do not have SafeMode protection groups attached, as SafeMode is not supported by Cinder. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_realm_manage_fix-eb5fe76e7c55297d.yaml0000664000175000017500000000023000000000000026420 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage driver: Fixed issue with FlashArray secure tenant volumes and snapshots as theese are not eligible to be managed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_reconnect_failure-7bbc135eecc77695.yaml0000664000175000017500000000034500000000000026626 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage `bug #2121464 `_: Fixed ``AttributeError`` when trying to connect a volume to a host when the volume is already connected to the host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_replication_capability-f9fa78aa96501a69.yaml0000664000175000017500000000042600000000000027574 0ustar00zuulzuul00000000000000--- features: - | Pure Storage driver: Added replication capability to backend pool information. Response will be ```async```, ```sync``` or```trisync```. ```sync``` implies support for ```async``` and ```trisync``` implies support for ```sync``` and ```async```. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_retype_sync_support-fe72a726f83cb063.yaml0000664000175000017500000000053700000000000027214 0ustar00zuulzuul00000000000000--- features: - | Pure Storage: Added support for retyping a simple or async-replicated volume into a sync or trisync-replicated volume. Additionally, the ``cinder manage`` command may now be used to bring existing sync replicated volumes under cinder control, when previously these sort of volumes could not be managed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_sdk_version_checks-257cb8387ed6f5f8.yaml0000664000175000017500000000054600000000000026744 0ustar00zuulzuul00000000000000--- upgrade: - | Pure Storage FlashArray minimum ``purestorage`` SDK version increased to 1.17.0 fixes: - | Pure Storage FlashArray driver `bug #1929219 `_: Fixes issue with incorrect internal mechanism for checking REST API of backend array. This has no external effect for users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_storage_add_volume_metadata-89f1e23573efcf83.yaml0000664000175000017500000000055500000000000030574 0ustar00zuulzuul00000000000000--- features: - | Pure Storage driver adds volume metadata describing the backend array name (``array_name``) and volume name (``array_volume_name``). This allows easier identification and location of a cinder volume when multiple clusters are using the same backend arrays, or when the cinder scheduler has multiplae backends to choose from. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/releasenotes/notes/pure_storage_fix_clone_provider_id_powervc.yaml-e794f05b0cd90f45.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_storage_fix_clone_provider_id_powervc.yaml-e794f05b0cd90f45.ya0000664000175000017500000000030000000000000033277 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage FlashArray driver `bug #1938579 `_: Fixes issue when cloning multiple volumes in PowerVC deployments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_storage_multiattach-f4aee3576757b2ff.yaml0000664000175000017500000000035300000000000027205 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage `bug #1930748 `_: Fixed issues with multiattched volumes being diconnected from a backend when still listed as an attachment to an instance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_storage_scheduler_data-9b28bb309b17e8aa.yaml0000664000175000017500000000107300000000000027616 0ustar00zuulzuul00000000000000--- features: - | Pure Storage: Added additional IO queueing performance characteristics ``queue_usec_per_mirrored_write_op``, ``queue_usec_per_read_op`` and ``queue_usec_per_write_op``, to array statistics used by the scheduler. - | Pure Storage: Changed performance metrics to report average over the previous 30 seconds, rather than using point-in-time information. deprecations: - | Pure Storage: Deprecation of ``queue_depth`` performance characteristic return by array statistics. This will be fully removed in the 2026.1 release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_tempest_cg_fix-913d405f7487de00.yaml0000664000175000017500000000017200000000000025707 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage FlashArray driver fix to ensure cinder_tempest_plugin consistency group tests pass. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_trisync_replication-d236bba76a1ebea5.yaml0000664000175000017500000000041300000000000027333 0ustar00zuulzuul00000000000000--- features: - | Pure Storage driver: Added support for 3-site replication, aka trisync. Requires two replication devices to be created, one async and one sync, plus the addition of new parameters ``pure_trisync_enabled`` and ``pure_trisync_pg_name``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_uniform_disconnect_bug-357c849bf12e8832.yaml0000664000175000017500000000037200000000000027447 0ustar00zuulzuul00000000000000--- fixes: - | Pure Storage Cinder Driver: Fixed `bug 2029005 `__ to correctly disconnect a sync replicated volume from host on the secondary array when uniform option is set to True. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_version_bump-4877df33faa27efa.yaml0000664000175000017500000000021400000000000025720 0ustar00zuulzuul00000000000000--- other: - | [Pure Storage] ``user_agent`` string changed from reporting kernel version to operating system distro and version. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_vlan_lacp-d58f141282efb723.yaml0000664000175000017500000000025300000000000024725 0ustar00zuulzuul00000000000000--- fixes: - | [Pure Storage] Resolved issue where LACP bonds are being defined as part of a VLAN, resulting in target ports not being correctly identified. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_vol_tags-a2efbd9909697771.yaml0000664000175000017500000000020600000000000024624 0ustar00zuulzuul00000000000000--- features: - | [Pure Storage]: Added FlashArray volumes tags for future use with Pure Storage Data Intelligence tooling. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/pure_volume_group_support-303a4585277b4e1f.yaml0000664000175000017500000000070700000000000027226 0ustar00zuulzuul00000000000000--- features: - | [Pure Storage] Volume Group support added through new vendor-specific volume type extra-specs. Volume Groups can be used to isolate tenant volumes into their own area in a FlashArray, and this volume group can have tenant-wide storage QoS for the volume group. Full replication support is also available for volumes in volume groups and exisitng volume group volumes (such as VMware vVols) can be managed directly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/purestorage_rest_upgrade-b830122e37d2958a.yaml0000664000175000017500000000065200000000000026756 0ustar00zuulzuul00000000000000--- features: - | Pure Storage FlashArray drivers upgraded to remove REST 1.x support and changed to REST 2.4 as the minimum supported version. - | Pure Storage FlashArray drivers changed minimum supported Purity//FA version to 6.1.0. upgrade: - | [Pure Storage] Changed Python SDK driver requirement from ``purestorage`` to ``py-pure-client`` to support change to Purity//FA REST 2.x API calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/qb-backup-5b1f2161d160648a.yaml0000664000175000017500000000016100000000000023502 0ustar00zuulzuul00000000000000--- fixes: - | A bug in the Quobyte driver was fixed that prevented backing up volumes and snapshots ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/qb-overlay-from-snap-cache-dc102acb4820e368.yaml0000664000175000017500000000051700000000000027022 0ustar00zuulzuul00000000000000--- features: - | Added a new option ``quobyte_overlay_volumes`` for the Quobyte volume driver. This option activates internal snapshots who allow to create volumes from snapshots as overlay files based on the volume from snapshot cache. This significantly speeds up the creation of volumes from large snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/qb-switch-nas-sec-opts-635c6ef1205e4f3f.yaml0000664000175000017500000000113000000000000026221 0ustar00zuulzuul00000000000000--- upgrade: - | In order to simplify initial setup for new installations the default behaviour of the Quobyte driver for the options ``nas_secure_file_operations`` and ``nas_secure_file_permissions`` has changed. The 'auto' values are no longer mapped to true but to false. Therefore the old default behaviour to run with secure settings is changed to run without secure settings as the new default behaviour. Installations using the default values for these options should ensure to explicitly set them to true with this new Cinder Quobyte driver version. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/qnap-enhance-support-4ab5cbb110b3303b.yaml0000664000175000017500000000023000000000000026076 0ustar00zuulzuul00000000000000--- features: - | Add enhanced support to the QNAP Cinder driver, including 'CHAP', 'Thin Provision', 'SSD Cache', 'Dedup' and 'Compression'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/qnap-support-qes-200-2a3dda49afe14103.yaml0000664000175000017500000000011300000000000025603 0ustar00zuulzuul00000000000000--- features: - | QNAP Cinder driver added support for QES fw 2.0.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/qnap-support-qes-210-de75892f684cb9c3.yaml0000664000175000017500000000011300000000000025567 0ustar00zuulzuul00000000000000--- features: - | QNAP Cinder driver added support for QES fw 2.1.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/qnap-support-qts-440-c482f109694cb77f.yaml0000664000175000017500000000011600000000000025527 0ustar00zuulzuul00000000000000--- features: - | Added support for QTS fw 4.4.0 to QNAP Cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/qnap-tds-support-qes-5e5d766cded3a26d.yaml0000664000175000017500000000011600000000000026204 0ustar00zuulzuul00000000000000--- features: - | QNAP Cinder driver supports QES FW on TDS series NAS. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/queens-driver-removal-72a1a36689b6d890.yaml0000664000175000017500000000046400000000000026122 0ustar00zuulzuul00000000000000--- upgrade: - | The following volume drivers were deprecated in the Pike release and have now been removed: * Block device driver * Blockbridge * Coho * FalconStor FSS * Infortrend * QNAP * Reduxio * Tegile * Violin * X-IO * ZTE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quobyte-extra-requires-8dc1761859da923a.yaml0000664000175000017500000000071600000000000026413 0ustar00zuulzuul00000000000000--- upgrade: - | Quobyte driver: The python ``psutil`` module is no longer a requirement for cinder, so it may need to be installed separately if you are using Quobyte. This can be done by specifying ``quobyte`` extras if installing via pip (for example, ``pip install cinder[quobyte]``) or by installing ``psutil`` from the package appropriate for your operating system and ensuring that it is accessible to the cinder volume service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quobyte-unsupported-96c8f109eecb88c7.yaml0000664000175000017500000000112400000000000026170 0ustar00zuulzuul00000000000000--- upgrade: - | The Quobyte driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Quobyte driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. As an unsupported driver, it is eligible for removal from the cinder code base if its third party CI system is not fixed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quobyte_vol-snap-cache-baf607f14d916ec7.yaml0000664000175000017500000000057700000000000026456 0ustar00zuulzuul00000000000000--- fixes: - | Added a new optional cache of volumes generated from snapshots for the Quobyte backend. Enabling this cache speeds up creation of multiple volumes from a single snapshot at the cost of a slight increase in creation time for the first volume generated for this given snapshot. The ``quobyte_volume_from_snapshot_cache`` option is off by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quota-backup-resources-fc4e0795f520c4ab.yaml0000664000175000017500000000026600000000000026477 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1952420 `_: Fixed quota warnings about ``backups`` and ``backup_gigabytes`` when creating backups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quota-on-retype-with-snapshots-2d9fc7b2c75f899d.yaml0000664000175000017500000000044000000000000030157 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1877164 `_: Fix retyping volume with snapshots leaves the snapshots with the old type, making the quotas wrong inmediately for snapshots, and breaking them even more after those snapshots are deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quota-sync-migrating-2c99e134e117a945.yaml0000664000175000017500000000042600000000000025742 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1917450 `_: Fix automatic quota refresh to correctly account for migrating volumes. During volume migration we'll have 2 volumes in cinder and only one will be accounted for in quota usage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quota-sync-temporary-b4103ebc2c484c89.yaml0000664000175000017500000000062000000000000026127 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1919161 `_: Fix automatic quota refresh to correctly account for temporary volumes. During some cinder operations, such as create a backup from a snapshot, temporary volumes are created and are not counted towards quota usage, but the sync mechanism was counting them, thus incorrectly updating volume usage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quota-temp-snapshots-9d032f97f80050c5.yaml0000664000175000017500000000113400000000000025767 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1923828 `_: Fixed quota usage sync counting temporary snapshots from backups and revert to snapshot. - | `Bug #1923829 `_: Fixed manually deleting temporary snapshots from backups and revert to snapshots after failure leads to incorrect quota usage. - | `Bug #1923830 `_: Fixed successfully backing up an in-use volume using a temporary snapshot instead of a clone leads to incorrect quota usage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quota-until_refresh-updated-d35e8530f30c5522.yaml0000664000175000017500000000027400000000000027273 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1697906 `_: Fix ``until_refresh`` configuration changes not taking effect in a timely fashion or at all. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quota-usage-duplicates-c00725089da7bbd8.yaml0000664000175000017500000000022100000000000026370 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1484343 `_: Fix creation of duplicated quota usage entries in DB. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/quota-volume-transfer-abd1f418c6c63db0.yaml0000664000175000017500000000012100000000000026411 0ustar00zuulzuul00000000000000--- fixes: - Corrected quota usage when transferring a volume between tenants. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-active-active-replication-b230367912fe4a23.yaml0000664000175000017500000000033200000000000027437 0ustar00zuulzuul00000000000000--- features: - Added support for active-active replication to the RBD driver. This allows users to configure multiple volume backends that are all a member of the same cluster participating in replication. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-backend-qos-implementation-0e141b742e277d26.yaml0000664000175000017500000000006700000000000027626 0ustar00zuulzuul00000000000000--- features: - | RBD driver: Added QoS support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-bug-2065713-driver-exc-handling-f8de823cd9acd767.yaml0000664000175000017500000000045200000000000030171 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2065713 `_: Due to incorrect exception handling, ImageNotFound errors in the RBD driver's get_manageable_volumes operation would propagate up to the API layer rather than being caught and handled in the driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-choose-correct-stripe-unit-9d317f4717533fb4.yaml0000664000175000017500000000033500000000000027627 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1931004 `_: Fixed use of incorrect stripe unit in RBD image clone causing volume-from-image to fail when using raw images backed by Ceph. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-driver-assisted-migration-2d29788243060f77.yaml0000664000175000017500000000027000000000000027367 0ustar00zuulzuul00000000000000--- features: - Added driver-assisted volume migration to RBD driver. This allows a volume to be efficiently copied by Ceph from one pool to another within the same cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-flatten-child-volumes-4cb0b7fcf3a1df5e.yaml0000664000175000017500000000154500000000000027263 0ustar00zuulzuul00000000000000--- upgrade: - | Cinder now uses the RBD trash functionality to handle some volume deletions. Therefore, deployments must either a) enable scheduled RBD trash purging on the RBD backend or b) enable the Cinder RBD driver's enable_deferred_deletion option to have Cinder purge the RBD trash. This adds the new configuration option 'rbd_concurrent_flatten_operations', which limits how many RBD flattens the driver will run simultaneously. This can be used to prevent flatten operations from consuming too much I/O capacity on the Ceph cluster. It defaults to 3. fixes: - | `Bug #1969643 `_: The RBD driver can now delete volumes with other volumes cloned from it (or its snapshots) in cases where deletion would previously fail. This uses the RBD trash functionality. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-multiattach-exceptions-43066312f3b527f5.yaml0000664000175000017500000000032400000000000027030 0ustar00zuulzuul00000000000000--- fixes: - | Catch argument exceptions when configuring multiattach for rbd volumes. This allows multiattach images with flags already set to continue instead of raising an exception and failing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-multiattach-support-2900ce0245af0239.yaml0000664000175000017500000000065400000000000026437 0ustar00zuulzuul00000000000000--- features: - RBD driver has added multiattach support. It should be noted that replication and multiattach are mutually exclusive, so a single RBD volume can only be configured to support one of these features at a time. Additionally, RBD image features are not preserved which prevents a volume being retyped from multiattach to another type. This limitation is temporary and will be addressed soon. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-optimize-image-upload-836c9df06674a665.yaml0000664000175000017500000000021100000000000026632 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver: No longer copy the RBD source volume image to a temporary file when uploading a volume to an image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-rbd_secret_uuid-fsid-95daee128f59c8e4.yaml0000664000175000017500000000021200000000000026736 0ustar00zuulzuul00000000000000--- features: - | RBD driver: Sets the Ceph cluster FSID as the default value for the ``rbd_secret_uuid`` configuration option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml0000664000175000017500000000140000000000000025415 0ustar00zuulzuul00000000000000--- features: - | RBD driver supports returning a static total capacity value instead of a dynamic value like it's been doing. Configurable with `report_dynamic_total_capacity` configuration option. upgrade: - | RBD/Ceph backends should adjust `max_over_subscription_ratio` to take into account that the driver is no longer reporting volume's physical usage but it's provisioned size. fixes: - | RBD stats report has been fixed, now properly reports `allocated_capacity_gb` and `provisioned_capacity_gb` with the sum of the sizes of the volumes (not physical sizes) for volumes created by Cinder and all available in the pool respectively. Free capacity will now properly handle quota size restrictions of the pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-support-list-manageable-snapshots-3474c62ed83fb788.yaml0000664000175000017500000000010100000000000031265 0ustar00zuulzuul00000000000000--- features: - Allow rbd driver to list manageable snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-support-list-manageable-volumes-8a088a44e01d227f.yaml0000664000175000017500000000007700000000000030726 0ustar00zuulzuul00000000000000--- features: - Allow rbd driver to list manageable volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-support-managing-existing-snapshot-fb871a3ea98dc572.yaml0000664000175000017500000000010000000000000031615 0ustar00zuulzuul00000000000000--- features: - Allow rbd driver to manage existing snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-support-report-backend-state-4e124eb9efd36724.yaml0000664000175000017500000000007400000000000030314 0ustar00zuulzuul00000000000000--- features: - Allow rbd driver to report backend state. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-support-revert-to-snapshot-c9ca62c9efbabf5f.yaml0000664000175000017500000000255500000000000030450 0ustar00zuulzuul00000000000000--- features: - | RBD driver: support added for reverting a volume to the most recent snapshot taken. Please be aware of the following known issues with this operation and the Ceph storage backend: * Rolling back a volume to a snapshot overwrites the current volume with the data from the snapshot, and the time it takes to complete this operation increases with the size of the volume. It is faster to create a new volume from a snapshot. You may wish to recommend this option to your users whose use cases do not strictly require revert-to-snapshot. * The efficiency of revert-to-snapshot is also dependent upon the Ceph storage backend in use, namely, whether or not BlueStore is being used in your Ceph installation. Please consult the Ceph documentation for details. issues: - | RBD driver: There are some known issues concerning the revert-to-snapshot support added in this release. * The time it takes to complete the revert-to-snapshot operation increases with the size of the volume. It is faster to create a new volume from a snapshot. * The efficiency of revert-to-snapshot depends upon the Ceph storage backend in use, particularly whether or not BlueStore is being used in your Ceph installation. Please consult the Ceph documentation for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-thin-provisioning-c98522d6fe7b71ff.yaml0000664000175000017500000000012100000000000026335 0ustar00zuulzuul00000000000000--- features: - Allow the RBD driver to work with max_over_subscription_ratio. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-total_capacity-60f10b45e3a8c8ea.yaml0000664000175000017500000000054600000000000025635 0ustar00zuulzuul00000000000000--- fixes: - | RBD driver `bug #1960206 `_: Fixed ``total_capacity`` reported by the driver to the scheduler on Ceph clusters that have renamed the ``bytes_used`` field to ``stored``. (e.g., `Nautilus `_). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-update-features-bugfix-df97b50864ce9712.yaml0000664000175000017500000000026300000000000027075 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1997980 `_: RBD: Fixed failure to update rbd image features for multi-attach when features = 0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd-v2.1-replication-64a9d0bec5987faf.yaml0000664000175000017500000000010000000000000025716 0ustar00zuulzuul00000000000000--- features: - Added v2.1 replication support to RBD driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rbd_replication_add_secret_uuid_config-c74d65e6d3d610c6.yaml0000664000175000017500000000042300000000000031703 0ustar00zuulzuul00000000000000--- fixes: - | Rbd replication secondary device could set different user and keyring with primary cluster. Secondary secret_uuid value is configed in libvirt secret, and libvirtd using secondary secret reconnect to secondary cluster after Cinder failover host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/re-add-nexenta-driver-d3af97e33551a485.yaml0000664000175000017500000000035400000000000026026 0ustar00zuulzuul00000000000000--- features: - Added Migrate and Extend for Nexenta NFS driver. - Added Retype functionality to Nexenta iSCSI and NFS drivers. upgrades: - Refactored Nexenta iSCSI driver to use single target and targetgroup with multiple zvols. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/readd-infortrend-driver-d9b399b53a4355f8.yaml0000664000175000017500000000023600000000000026470 0ustar00zuulzuul00000000000000--- features: - Re-added Infortrend Cinder volume driver. The Infortrend driver, removed in Cinder 12.0.0 (Queens), has been restored in this release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/readd-qnap-driver-e1dc6b0c3fabe30e.yaml0000664000175000017500000000007400000000000025600 0ustar00zuulzuul00000000000000--- features: - | Re-added QNAP Cinder volume driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rebranded-hpe-drivers-caf1dcef1afe37ba.yaml0000664000175000017500000000025500000000000026616 0ustar00zuulzuul00000000000000--- upgrade: - HP drivers have been rebranded to HPE. Existing configurations will continue to work with the legacy name, but will need to be updated by the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rebranded-vnx-driver-2fb7424ddc9c41df.yaml0000664000175000017500000000046100000000000026203 0ustar00zuulzuul00000000000000--- upgrade: - EMC VNX driver have been rebranded to Dell EMC VNX driver. Existing configurations will continue to work with the legacy name, but will need to be updated by the next release. User needs update ``volume_driver`` to ``cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/redirect-detach-nova-4b7b7902d7d182e0.yaml0000664000175000017500000000450200000000000025722 0ustar00zuulzuul00000000000000--- critical: - | Detaching volumes will fail if Nova is not `configured to send service tokens `_, please read the upgrade section for more information. (`Bug #2004555 `_). upgrade: - | Nova must be `configured to send service tokens `_ **and** cinder must be configured to recognize at least one of the roles that the nova service user has been assigned in keystone. By default, cinder will recognize the ``service`` role, so if the nova service user is assigned a differently named role in your cloud, you must adjust your cinder configuration file (``service_token_roles`` configuration option in the ``keystone_authtoken`` section). If nova and cinder are not configured correctly in this regard, detaching volumes will no longer work (`Bug #2004555 `_). security: - | As part of the fix for `Bug #2004555 `_, cinder now rejects user attachment delete requests for attachments that are being used by nova instances to ensure that no leftover devices are produced on the compute nodes which could be used to access another project's volumes. Terminate connection, detach, and force detach volume actions (calls that are not usually made by users directly) are, in most cases, not allowed for users. fixes: - | `Bug #2004555 `_: Fixed issue where a user manually deleting an attachment, calling terminate connection, detach, or force detach, for a volume that is still used by a nova instance resulted in leftover devices on the compute node. These operations will now fail when it is believed to be a problem. issues: - | For security reasons (`Bug #2004555 `_) manually deleting an attachment, manually doing the ``os-terminate_connection``, ``os-detach`` or ``os-force_detach`` actions will no longer be allowed in most cases unless the request is coming from another OpenStack service on behalf of a user. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/redundancy-in-volume-url-4282087232e6e6f1.yaml0000664000175000017500000000023000000000000026434 0ustar00zuulzuul00000000000000--- fixes: - | Fixes a bug that prevented the configuration of multiple redundant Quobyte registries in the quobyte_volume_url config option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/reduxio-iscsci-driver-5827c32a0c498949.yaml0000664000175000017500000000007200000000000026030 0ustar00zuulzuul00000000000000--- features: - Added backend ISCSI driver for Reduxio. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/refactor-disco-volume-driver-3ff0145707ec0f3e.yaml0000664000175000017500000000030200000000000027503 0ustar00zuulzuul00000000000000--- deprecations: - Marked the ITRI DISCO driver option ``disco_wsdl_path`` as deprecated. The new preferred protocol for array communication is REST and SOAP support will be removed. ././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000118 path=cinder-27.0.0/releasenotes/notes/reject-volume_clear_size-settings-larger-than-1024MiB-30b38811da048948.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/reject-volume_clear_size-settings-larger-than-1024MiB-30b38811da0480000664000175000017500000000024100000000000032355 0ustar00zuulzuul00000000000000--- fixes: - Fixed 'No Space left' error by dd command when users set the config option ``volume_clear_size`` to a value larger than the size of a volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-api-v2-dadd877ee5457f79.yaml0000664000175000017500000000132500000000000024513 0ustar00zuulzuul00000000000000--- upgrade: - | The Block Storage API v2, which was deprecated in the Pike release, has been removed. If upgrading from a previous OpenStack release, it is recommended that you edit your ``/etc/cinder/api-paste.ini`` file to remove all references to v2. Additionally, the deprecated configuration option ``enable_v2_api`` has been removed. If present in a configuration file, it will be silently ignored. The configuration option ``enable_v3_api`` has been removed from this release due to the fact that v3 is now the only version of the Block Storage API available. If present in a configuration file, it will be silently ignored as the v3 API is now enabled unconditionally. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-backup-service-to-driver-mapping-4d2ed6f868a64175.yaml0000664000175000017500000000046100000000000031507 0ustar00zuulzuul00000000000000--- upgrade: - | Backup service to driver mapping is removed. If you use old values like 'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' accordingly to get your backup service working. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-block-device-driver-14f76dca2ee9bd38.yaml0000664000175000017500000000045500000000000027276 0ustar00zuulzuul00000000000000--- upgrade: - | BlockDeviceDriver was deprecated in Ocata release and marked as 'unsupported'. There is no CI for it too. If you used this driver before you have to migrate your volumes to LVM with LIO target yourself before upgrading to Queens release to get your volumes working. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-cinder-manage-logs-cmds-40fb8f475b37fb2f.yaml0000664000175000017500000000026600000000000027756 0ustar00zuulzuul00000000000000--- other: - | The "cinder-manage logs" commands have been removed. Information previously gathered by these commands may be found in cinder service and syslog logs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-cinder-manage-shell-6d6f42e5a4ee8c5c.yaml0000664000175000017500000000012400000000000027250 0ustar00zuulzuul00000000000000--- upgrade: - | The "cinder-manage shell" set of commands has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-configurable-migration-backend-aaee5a2f808c9b36.yaml0000664000175000017500000000036500000000000031465 0ustar00zuulzuul00000000000000--- upgrade: - | Support for the ``cinder.database.migration_backend`` entrypoint, which provided for configurable database migration backends, has been removed. This was never exercised and was a source of unnecessary complexity. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-datacore-300c667e9f504590.yaml0000664000175000017500000000017300000000000024653 0ustar00zuulzuul00000000000000--- upgrade: - | The DataCore drivers were marked as unsupported in the Rocky release and have now been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-deprecated-driver-mappings-b927d8ef9fc3b713.yaml0000664000175000017500000000275100000000000030613 0ustar00zuulzuul00000000000000--- upgrade: - | Old driver paths have been removed since they have been through our alloted deprecation period. Make sure if you have any of these paths being set in your cinder.conf for the volume_driver option, to update to the new driver path listed here. * Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver * Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver * Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver * Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver * Old path - cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver * New path - cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver * Old path - cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver * New path - cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver * Old path - cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver * New path - cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver * Old path - cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver * New path - cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-deprecated-keymgr-d11a25c620862ed6.yaml0000664000175000017500000000126000000000000026607 0ustar00zuulzuul00000000000000--- upgrade: - | The old deprecated ``keymgr`` options have been removed. Configuration options using the ``[keymgr]`` group will not be applied anymore. Use the ``[key_manager]`` group from Castellan instead. The Castellan ``backend`` options should also be used instead of ``api_class``, as most of the options that lived in Cinder have migrated to Castellan. - Instead of ``api_class`` option ``cinder.keymgr.barbican.BarbicanKeyManager``, use ``backend`` option `barbican`` - ``cinder.keymgr.conf_key_mgr.ConfKeyManager`` still remains, but the ``fixed_key`` configuration options should be moved to the ``[key_manager]`` section ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-deprecated-nova-opts-b1ec66fe3a9bb3b9.yaml0000664000175000017500000000067100000000000027555 0ustar00zuulzuul00000000000000--- upgrade: - Removed the deprecated options for the Nova connection:> os_privileged_user{name, password, tenant, auth_url}, nova_catalog_info, nova_catalog_admin_info, nova_endpoint_template, nova_endpoint_admin_template, nova_ca_certificates_file, nova_api_insecure. From Pike, using the [nova] section is preferred to configure compute connection for Guest Assisted Snapshost or the InstanceLocalityFilter. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-deprecated-option-9ad954726ed4d8c2.yaml0000664000175000017500000000020300000000000026722 0ustar00zuulzuul00000000000000--- upgrade: - | Removed the option ``allow_inuse_volume_type_modification`` which had been deprecated in Ocata release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-drbdmanage-driver-4edd1e1e43b6ba39.yaml0000664000175000017500000000104000000000000027007 0ustar00zuulzuul00000000000000--- upgrade: - The DRBDManage driver is now removed. Customers using the DRBDManage driver should not upgrade Cinder without first migrating all volumes from their DRBDManage backend to a supported storage backend such as LINSTOR. Failure to migrate volumes will result in not being able to access volumes backed by the DRBDManage storage backend. deprecations: - The DRBDManage driver is deprecated as of the Stein release and is removed in the Train release. Users should use the new LINSTOR driver instead.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-flashsystem-multipath-0a3ee133ebe35d1e.yaml0000664000175000017500000000036100000000000027776 0ustar00zuulzuul00000000000000--- upgrade: - | The IBM FlashSystem configuration options ``flashsystem_multipath_enabled`` was deprecated in the Mitaka release. It had no effect, so it can be safely removed and does not have a new equivalent config option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-hgst-flash-driver-a930927de333329a.yaml0000664000175000017500000000136200000000000026477 0ustar00zuulzuul00000000000000--- upgrade: - | The HGST Flash Suite storage driver has been removed after completion of its deprecation period without a reliable 3rd Party CI system being supported. Customers using the HGST Flash Suite driver should not upgrade Cinder without first migrating all volumes from their HGST backend to a supported storage backend. Failure to migrate volumes will result in no longer being able to access volumes backed by the HGST storage backend. other: - | The HGST Flash Storage Suite Driver was marked unsupported in the Rocky release because their 3rd Party CI system was not meeting Cinder's requirements. The system has not started reporting so the driver is now removed as of the Stein release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-hitachi-57d0b37cb9cc7e13.yaml0000664000175000017500000000033500000000000024776 0ustar00zuulzuul00000000000000--- upgrade: - | The Hitachi HNAS, HBSD, and VSP volume drivers were marked as deprecated in the Pike release and have now been removed. Hitachi storage drivers are now only available directly from Hitachi. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-hp-cliq-41f47fd61e47d13f.yaml0000664000175000017500000000011300000000000024641 0ustar00zuulzuul00000000000000--- upgrade: - The deprecated HP CLIQ proxy driver has now been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-hp3par-config-options-3cf0d865beff9018.yaml0000664000175000017500000000017300000000000027526 0ustar00zuulzuul00000000000000--- upgrade: - | The old deprecated ``hp3par*`` options have been removed. Use the ``hpe3par*`` instead of them. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-hpe-lefthand-driver-57b03ca9ada2654c.yaml0000664000175000017500000000062200000000000027204 0ustar00zuulzuul00000000000000--- upgrade: - | The HPE Lefthand Driver (iSCSI) was marked unsupported in the Train release as the StoreVirtual product line has gone EOL and the LeftHand OS no longer receives upgrades. The driver has been removed in this release. All data on backends powered by HPE LeftHand OS should be migrated to a supported storage backend before upgrading your Cinder installation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-ibm-nas-driver-0ed204ed0a2dcf55.yaml0000664000175000017500000000073400000000000026251 0ustar00zuulzuul00000000000000--- upgrade: - Users of the ibmnas driver should switch to using the IBM GPFS driver to enable Cinder access to IBM NAS resources. For details configuring the IBM GPFS driver, see the GPFS config reference. - https://docs.openstack.org/liberty/config-reference/content/GPFS-driver.html other: - Due to the ibmnas (SONAS) driver being rendered redundant by the addition of NFS capabilities to the IBM GPFS driver, the ibmnas driver is being removed in the Mitaka release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-iscsi-target-config-options-d23e424eb8f82042.yaml0000664000175000017500000000057600000000000030563 0ustar00zuulzuul00000000000000--- upgrade: - | The config options ``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, ``iscsi_target_prefix`` and ``iscsi_protocol`` were deprecated in the Queens release and have now been removed. Deployments should now used the more general ``target_ip_address``, ``target_port``, ``target_helper``, ``target_prefix`` and ``target_protocol`` options. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-lvm-over-sub-3c8addbf47827045.yaml0000664000175000017500000000035200000000000025646 0ustar00zuulzuul00000000000000--- upgrade: - | The LVM driver specific `lvm_max_over_subscription_ratio` setting had been deprecated and is now removed. Over subscription should now be managed using the generic `max_over_subscription_ratio` setting. ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=cinder-27.0.0/releasenotes/notes/remove-mirrorpolicy-parameter-from-huawei-driver-d32257a60d32fd90.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-mirrorpolicy-parameter-from-huawei-driver-d32257a60d32fd90.y0000664000175000017500000000011700000000000032745 0ustar00zuulzuul00000000000000--- deprecations: - | Remove mirror policy parameter from huawei driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-multiattach-request-param-4444e02533f919da.yaml0000664000175000017500000000170200000000000030247 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #2008259 `_: Fixed the volume create functionality where non-admin users were able to create multiattach volumes by providing the `multiattach` parameter in the request body. Now we can only create multiattach volumes using a multiattach volume type, which is also the recommended way. other: - | Removed the ability to create multiattach volumes by specifying `multiattach` parameter in the request body of a volume create operation. This functionality is unsafe, can lead to data loss, and has been deprecated since the Queens release. The recommended method for creating a multiattach volume is to use a volume type that supports multiattach. By default, volume types can only be created by the operator. Users who have a need for multiattach volumes should contact their operator if a suitable volume type is not available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-nas-ip-config-option-8d56c14f1f4614fc.yaml0000664000175000017500000000016600000000000027253 0ustar00zuulzuul00000000000000--- upgrade: - | The old deprecated ``nas_ip`` option has been removed. Use the ``nas_host`` instead of it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-nested-quota-driver-8b56f03694e3a694.yaml0000664000175000017500000000034400000000000027064 0ustar00zuulzuul00000000000000--- upgrade: - | The ``cinder.quota.NestedDbQuotaDriver`` quota driver was marked as deprecated in Train release and is eligible for removal since Ussuri release. This release removes the NestedQuotaDriver support.././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=cinder-27.0.0/releasenotes/notes/remove-netapp-teseries-thost-type-config-option-908941dc7d2a1d59.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-netapp-teseries-thost-type-config-option-908941dc7d2a1d59.ya0000664000175000017500000000021100000000000032673 0ustar00zuulzuul00000000000000--- upgrade: - | The old deprecated ``netapp_eseries_host_type`` option has been removed. Use the ``netapp_host_type`` instead.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-py39-7c8a2397befd6ecc.yaml0000664000175000017500000000016600000000000024345 0ustar00zuulzuul00000000000000--- upgrade: - | Support for Python 3.9 has been removed. Now Python 3.10 is the minimum version supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-pybasedir-config-option-572604d26a57ba5e.yaml0000664000175000017500000000016500000000000027757 0ustar00zuulzuul00000000000000--- upgrade: - | The old deprecated ``pybasedir`` option has been removed. Use the ``state_path`` instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-query-volume-filters-b59958fc68d3abb4.yaml0000664000175000017500000000030500000000000027517 0ustar00zuulzuul00000000000000--- upgrade: - | Deprecated config option `query_volume_filters` is removed now. Please, use config file described in resource_query_filters_file to configure allowed volume filters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-rbd_keyring_conf-2d54a4de634c255c.yaml0000664000175000017500000000062000000000000026602 0ustar00zuulzuul00000000000000--- upgrade: - | RBD driver: the ``rbd_keyring_conf`` configuration option, which was deprecated in the Ussuri release, has been removed. If it is present in a configuration file, its value will silently be ignored. For more information, see `OSSN-0085 `_: Cinder configuration option can leak secret key from Ceph backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-scality-fa209aae9748a1f3.yaml0000664000175000017500000000021100000000000025026 0ustar00zuulzuul00000000000000--- upgrade: - The Scality backend volume driver was marked as not supported in the previous release and has now been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-single-backend-7bf02e525bbbdd3a.yaml0000664000175000017500000000026700000000000026363 0ustar00zuulzuul00000000000000--- upgrade: - Configurations that are setting backend config in ``[DEFAULT]`` section are now not supported. You should use ``enabled_backends`` option to set up backends. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-sqlalchemy-migrate-c62b541fd5f4ab10.yaml0000664000175000017500000000024700000000000027143 0ustar00zuulzuul00000000000000--- upgrade: - | The legacy ``sqlalchemy-migrate`` migrations, which have been deprecated since Xena, have been removed. There should be no end-user impact. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-vol-in-error-from-cg-1ed0fde04ab2b5be.yaml0000664000175000017500000000036400000000000027452 0ustar00zuulzuul00000000000000--- fixes: - Previously the only way to remove volumes in error states from a consistency-group was to delete the consistency group and create it again. Now it is possible to remove volumes in error and error_deleting states. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-volume-clear-shred-bde9f7f9ff430feb.yaml0000664000175000017500000000046500000000000027327 0ustar00zuulzuul00000000000000--- upgrade: - The volume_clear option to use `shred` was deprecated in the Newton release and has now been removed. Since deprecation, this option has performed the same action as the `zero` option. Config settings for `shred` should be updated to be set to `zero` for continued operation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-windows-mapping-51a004f466470a2b.yaml0000664000175000017500000000062700000000000026255 0ustar00zuulzuul00000000000000--- upgrade: - | The ``WindowsDriver`` was renamed in the Queens release to ``WindowsISCSIDriver`` to avoid confusion with the SMB driver. The backwards compatibility for this has now been removed, so any cinder.conf settings still using ``cinder.volume.drivers.windows.windows.WindowsDriver`` must now be updated to use ``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove-xml-api-392b41f387e60eb1.yaml0000664000175000017500000000020100000000000024564 0ustar00zuulzuul00000000000000--- upgrade: - The XML API has been removed in Newton release. Cinder supports only JSON API request/response format now. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove_deprecated_xml-4065b893d781f65c.yaml0000664000175000017500000000013700000000000026224 0ustar00zuulzuul00000000000000--- upgrade: - | VMAX driver - Removed deprecated option ``cinder_dell_emc_config_file`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove_eseries-bb1bc134645aee50.yaml0000664000175000017500000000023000000000000025061 0ustar00zuulzuul00000000000000--- upgrade: - Support for NetApp E-Series has been removed. The NetApp Unified driver can now only be used with NetApp Clustered Data ONTAP. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove_export_failure_leaves_attachment-24e0c648269b0177.yaml0000664000175000017500000000031700000000000031750 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1935057 `_: Fixed sometimes on a detach volume may end in available and detached yet have an attachment in error_detaching. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove_glusterfs_volume_driver-d8fd2cf5f38e754b.yaml0000664000175000017500000000016100000000000030522 0ustar00zuulzuul00000000000000--- upgrade: - The GlusterFS volume driver, which was deprecated in the Newton release, has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove_lvmdriver-9c35f83132cd2ac8.yaml0000664000175000017500000000030700000000000025400 0ustar00zuulzuul00000000000000--- upgrade: - Removed deprecated LVMISCSIDriver and LVMISERDriver. These should be switched to use the LVMVolumeDriver with the desired iscsi_helper configuration set to the desired iSCSI helper. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove_osapi_volume_base_url-33fed24c4ad1b2b6.yaml0000664000175000017500000000026600000000000030074 0ustar00zuulzuul00000000000000--- upgrade: - | The `osapi_volume_base_URL` config option was deprecated in Pike and has now been removed. The `public_endpoint` config option should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove_service_filter-380e7990bfdbddc8.yaml0000664000175000017500000000025000000000000026542 0ustar00zuulzuul00000000000000--- upgrade: - The ``service`` filter for service list API was deprecated 3 years ago in 2013 July (Havana). Removed this filter and please use "binary" instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove_storwize_npiv-b704ff2d97207666.yaml0000664000175000017500000000012600000000000026161 0ustar00zuulzuul00000000000000--- upgrade: - Removed the deprecated NPIV options for the Storwize backend driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove_veritas_hyperscale_driver-988ad62d2417124f.yaml0000664000175000017500000000132000000000000030476 0ustar00zuulzuul00000000000000--- upgrade: - | The Veritas HyperScale storage driver has been removed after completion of its deprecation period without a reliable 3rd Party CI system being supported. Customers using the Veritas HyperScale driver should not upgrade Cinder without first migrating all volumes from their Veritas backend to a supported storage backend. Failure to migrate volumes will result in no longer being able to access volumes backed by the Veritas HyperScale storage backend. other: - | The Veritas HyperScale storage driver was marked unsupported in Stein due to 3rd Party CI not meeting Cinder's requirements. As a result the driver is removed starting from the Train release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/remove_volume_tmp_dir_option-c83c5341e5a42378.yaml0000664000175000017500000000031600000000000027644 0ustar00zuulzuul00000000000000--- upgrade: - The RBD driver no longer uses the "volume_tmp_dir" option to set where temporary files for image conversion are stored. Set "image_conversion_dir" to configure this in Ocata. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/removed-apiv1-616b1b76a15521cf.yaml0000664000175000017500000000046000000000000024377 0ustar00zuulzuul00000000000000--- upgrade: - | The Cinder API v1 was deprecated in the Juno release and defaulted to be disabled in the Ocata release. It is now removed completely. If upgrading from a previous version, it is recommended you edit your `/etc/cinder/api-paste.ini` file to remove all references to v1. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/removed-isertgtadm-7ccefab5d3e89c59.yaml0000664000175000017500000000050600000000000026052 0ustar00zuulzuul00000000000000--- upgrade: - The ISERTgtAdm target was deprecated in the Kilo release. It has now been removed. You should now just use LVMVolumeDriver and specify ``iscsi_helper`` for the target driver you wish to use. In order to enable iser, please set ``iscsi_protocol=iser`` with lioadm or tgtadm target helpers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/removed-rpc-topic-config-options-21c2b3f0e64f884c.yaml0000664000175000017500000000027700000000000030311 0ustar00zuulzuul00000000000000--- upgrade: - The config options ``scheduler_topic``, ``volume_topic`` and ``backup_topic`` have been removed without a deprecation period as these had never worked correctly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/removed-scality-7151638fdac3ed9d.yaml0000664000175000017500000000010200000000000025177 0ustar00zuulzuul00000000000000--- upgrade: - Backend driver for Scality SRB has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/removing-cinder-all-9f5c3d1eb230f9e6.yaml0000664000175000017500000000022600000000000025734 0ustar00zuulzuul00000000000000--- upgrade: - Removing cinder-all binary. Instead use the individual binaries like cinder-api, cinder-backup, cinder-volume, cinder-scheduler. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/removing-middleware-sizelimit-ba86907acbda83de.yaml0000664000175000017500000000035300000000000030202 0ustar00zuulzuul00000000000000--- upgrade: - Removing deprecated file cinder.middleware.sizelimit. In your api-paste.ini, replace cinder.middleware.sizelimit:RequestBodySizeLimiter.factory with oslo_middleware.sizelimit:RequestBodySizeLimiter.factory ././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=cinder-27.0.0/releasenotes/notes/rename-backup-driver-status-check-interval-option-6b27c1e29cb863e9.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rename-backup-driver-status-check-interval-option-6b27c1e29cb863e9.0000664000175000017500000000071200000000000032666 0ustar00zuulzuul00000000000000--- upgrade: - | If you have ``backup_driver_status_check_interval`` option in your cinder.conf we recommend you to use ``backup_driver_stats_polling_interval`` to avoid deprecation warnings in logs. deprecations: - | ``backup_driver_status_check_interval`` config option is renamed to ``backup_driver_stats_polling_interval`` to be similar with volume drivers configuration. Old option name support will be dropped in U release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rename-huawei-driver-092025e46b65cd48.yaml0000664000175000017500000000016000000000000025667 0ustar00zuulzuul00000000000000--- upgrade: - Rename Huawei18000ISCSIDriver and Huawei18000FCDriver to HuaweiISCSIDriver and HuaweiFCDriver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rename-iscsi-target-config-options-24913d7452c4a58e.yaml0000664000175000017500000000054200000000000030454 0ustar00zuulzuul00000000000000--- deprecations: - | ``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, ``iscsi_target_prefix`` and ``iscsi_protocol`` config options are deprecated in flavor of ``target_ip_address``, ``target_port``, ``target_helper``, ``target_prefix`` and ``target_protocol`` accordingly. Old config options will be removed in S release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rename-windows-iscsi-a7b0ca62a48c1371.yaml0000664000175000017500000000024100000000000026037 0ustar00zuulzuul00000000000000--- upgrade: - | The Windows iSCSI driver has been renamed. The updated driver location is ``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rename_xiv_ds8k_to_ibm_storage-154eca69c44b3f95.yaml0000664000175000017500000000121400000000000030160 0ustar00zuulzuul00000000000000--- features: - The xiv_ds8k driver now supports IBM XIV, Spectrum Accelerate, FlashSystem A9000, FlashSystem A9000R and DS8000 storage systems, and was renamed to IBM Storage Driver for OpenStack. The changes include text changes, file names, names of cinder.conf flags, and names of the proxy classes. upgrade: - Users of the IBM Storage Driver, previously known as the IBM XIV/DS8K driver, upgrading from Mitaka or previous releases, need to reconfigure the relevant cinder.conf entries. In most cases the change is just removal of the xiv-ds8k field prefix, but for details use the driver documentation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/replication-group-7c6c8a153460ca58.yaml0000664000175000017500000000027500000000000025400 0ustar00zuulzuul00000000000000--- features: - | Introduced replication group support and added group action APIs enable_replication, disable_replication, failover_replication and list_replication_targets. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/replication-v2.1-3par-b3f780a109f9195c.yaml0000664000175000017500000000011100000000000025570 0ustar00zuulzuul00000000000000--- features: - Added v2.1 replication support to the HPE 3PAR driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/replication-v2.1-lefthand-745b72b64e5944c3.yaml0000664000175000017500000000011500000000000026436 0ustar00zuulzuul00000000000000--- features: - Added v2.1 replication support to the HPE LeftHand driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml0000664000175000017500000000011400000000000026745 0ustar00zuulzuul00000000000000--- features: - Added replication v2.1 support to the IBM Storwize driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/report-backend-state-in-service-list-1e4ee5a2c623671e.yaml0000664000175000017500000000054700000000000031045 0ustar00zuulzuul00000000000000--- features: - | Added "backend_state: up/down" in response body of service list if context is admin. This feature will help operators or cloud management system to get the backend device state in every service. If device state is *down*, specify that storage device has got some problems. Give more information to locate bugs quickly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/report-backend-state-in-service-list-739a5398eec4a6b7.yaml0000664000175000017500000000016300000000000031060 0ustar00zuulzuul00000000000000--- features: - | Added flag 'backend_state: up/down' which will give backend state info in service list.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/report-backend-state-in-service-list-93e9f2b204b735c0.yaml0000664000175000017500000000014700000000000030762 0ustar00zuulzuul00000000000000--- features: - | Added flag 'backend_state' which will give backend state info in service list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/reset-status-notification-update-4a80a8b5feb821ef.yaml0000664000175000017500000000210300000000000030554 0ustar00zuulzuul00000000000000--- deprecations: - | In this release, sending ``os-reset_status`` notifications to the following *nonstandard* publisher_ids is DEPRECATED: * 'volumeStatusUpdate' for volume status resets * 'volumeStatusUpdate' for snapshot status resets * 'backupStatusUpdate' for backup status resets The notifications continue to be published to the above during the deprecation period. Beginning with this release, the ``os-reset_status`` notifications are also sent to the following *standard* publisher_ids: * 'volume' for volume status resets * 'snapshot' for snapshot status resets * 'backup' for backup status resets This will allow consumers of these notifications to make a smooth transition. In the Victoria release, ``os-reset_status`` notifications will *only* be sent to the standard publisher_ids. fixes: - | ``os-reset_status`` notifications for volumes, snapshots and backups will now go to the standard publisher id for volume, snapshot and backup like all other notifications for volume, snapshot and backup. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/reset-status-notification-update-b655615871db4659.yaml0000664000175000017500000000113700000000000030216 0ustar00zuulzuul00000000000000--- upgrade: - | Prior to the Ussuri release, ``os-reset_status`` notifications for volumes, snapshots, and backups were sent to *nonstandard* publisher_ids. This behavior was deprecated in Ussuri, and notifications were sent to both the standard and nonstandard publisher_ids. In this release, ``os-reset_status`` notifications, like all other notifications for volume, snapshot and backup, are sent *only* to the following *standard* publisher_ids: * 'volume' for volume status resets * 'snapshot' for snapshot status resets * 'backup' for backup status resets ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/retype-assisted-migration-6cdc7f9b21beb859.yaml0000664000175000017500000000041700000000000027301 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1886543 `_: On retypes requiring a migration, try to use the driver assisted mechanism when moving from one backend to another when we know it's safe from the volume type perspective. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/retype-encrypted-volume-49b66d3e8e65f9a5.yaml0000664000175000017500000000023000000000000026640 0ustar00zuulzuul00000000000000--- features: - Support for retype volumes with different encryptions including changes from unencrypted types to encrypted types and vice-versa. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/revert-snapshot-non-admin-8485be55060eab0d.yaml0000664000175000017500000000026600000000000027033 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1889758 `_: Fix revert to snapshot not working for non admin users when using the snapshot's name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/revert-volume-to-snapshot-6aa0dffb010265e5.yaml0000664000175000017500000000010200000000000027135 0ustar00zuulzuul00000000000000--- features: - Added revert volume to snapshot in 3par driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rpc-apis-3.0-b745f429c11d8198.yaml0000664000175000017500000000051500000000000023700 0ustar00zuulzuul00000000000000--- upgrade: - Deployments doing continuous live upgrades from master branch should not upgrade into Ocata before doing an upgrade which includes all the Newton's RPC API version bump commits (scheduler, volume). If you're upgrading deployment in a release-to-release manner, then you can safely ignore this note. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rpc-update-50bef83f48d4f96f.yaml0000664000175000017500000000034300000000000024157 0ustar00zuulzuul00000000000000--- upgrade: - | SPDK target and volume drivers have been updated with new SPDK specific RPC calls due to deprecation of some old RPC calls. Starting from Ussuri release SPDK release v19.10 or higher is required. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml0000664000175000017500000000130500000000000025456 0ustar00zuulzuul00000000000000--- features: - Added RPC backward compatibility layer similar to the one implemented in Nova. This means that Cinder services can be upgraded one-by-one without breakage. After all the services are upgraded SIGHUP signals should be issued to all the services to signal them to reload cached minimum RPC versions. Alternative is of course restart of them. Please note that cinder-api service doesn't support SIGHUP yet. Please also take into account that all the rolling upgrades capabilities are considered tech preview, as we don't have a CI testing it yet. upgrade: - Starting from Mitaka release Cinder is having a tech preview of rolling upgrades support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/rsd-cinder-driver-d71b88292536bfea.yaml0000664000175000017500000000014100000000000025337 0ustar00zuulzuul00000000000000--- features: - | Added a new Cinder driver for RackScale Design NVMe-oF storage solution. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/sandstone-iscsi-driver-31ed72d5657a4307.yaml0000664000175000017500000000014400000000000026247 0ustar00zuulzuul00000000000000--- features: - Added SandStone driver that allows cinder to manage volumes in ISCSI environment. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/sc-handle-multiattach-onterminate-6ab1f96f21bb284d.yaml0000664000175000017500000000035300000000000030566 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC SC Driver: Fixes `bug 1822229 `__ to handle the volume mappings in the backend when a volume is attached to multiple instances on the same host. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-backup-via-snapshot-8e75aa3f4570e17c.yaml0000664000175000017500000000021400000000000027306 0ustar00zuulzuul00000000000000--- features: - Add support to backup volume using snapshot in the Unity driver, which enables backing up of volumes that are in-use. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml0000664000175000017500000000010500000000000027442 0ustar00zuulzuul00000000000000--- features: - Added Consistency Group support in ScaleIO driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-default-volume-provisioning-c648118fcc8f297f.yaml0000664000175000017500000000021200000000000031117 0ustar00zuulzuul00000000000000--- upgrade: - EMC ScaleIO driver now uses the config option ``san_thin_provision`` to determine the default provisioning type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-deprecate-1.32-32033134fec181bb.yaml0000664000175000017500000000016300000000000025642 0ustar00zuulzuul00000000000000--- deprecations: - | Support for ScaleIO 1.32 is now deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-deprecate-config-1aa300d0c78ac81c.yaml0000664000175000017500000000146200000000000026664 0ustar00zuulzuul00000000000000--- deprecations: - | The ScaleIO Driver has deprecated several options specified in ``cinder.conf``: * ``sio_protection_domain_id`` * ``sio_protection_domain_name``, * ``sio_storage_pool_id`` * ``sio_storage_pool_name``. Users of the ScaleIO Driver should now utilize the ``sio_storage_pools`` options to provide a list of protection_domain:storage_pool pairs. - | The ScaleIO Driver has deprecated the ability to specify the protection domain, as ``sio:pd_name``, and storage pool, as ``sio:sp_name``, extra specs in volume types. The supported way to specify a specific protection domain and storage pool in a volume type is to define a ``pool_name`` extra spec and set the value to the appropriate ``protection_domain_name:storage_pool_name``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-enable-multiattach-e7d84ffa282842e9.yaml0000664000175000017500000000046600000000000027220 0ustar00zuulzuul00000000000000--- features: - | The multiattach capability has been enabled and verified as working with the ScaleIO driver. It is the user's responsibility to add some type of exclusion (at the file system or network file system layer) to prevent multiple writers from corrupting data on the volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-generic-volume-group-ee36e4dba8893422.yaml0000664000175000017500000000013500000000000027507 0ustar00zuulzuul00000000000000--- features: - Added consistency group support to generic volume groups in ScaleIO Driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-get-manageable-volumes-dda1e7b8e22be59e.yaml0000664000175000017500000000012500000000000030170 0ustar00zuulzuul00000000000000--- features: - Added ability to list all manageable volumes within ScaleIO Driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-manage-existing-32217f6d1c295193.yaml0000664000175000017500000000012200000000000026261 0ustar00zuulzuul00000000000000--- features: - Added support for manage/unmanage volume in the ScaleIO driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-manage-existing-snapshot-5bbd1818654c0776.yaml0000664000175000017500000000012400000000000030205 0ustar00zuulzuul00000000000000--- features: - Added support for manage/unmanage snapshot in the ScaleIO driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-qos-support-2ba20be58150f251.yaml0000664000175000017500000000006700000000000025644 0ustar00zuulzuul00000000000000--- features: - Added QoS support in ScaleIO driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-rebranding-d2d113c5d8e5c118.yaml0000664000175000017500000000050400000000000025532 0ustar00zuulzuul00000000000000--- features: - | Dell EMC ScaleIO has been renamed to Dell EMC VxFlex OS. Documentation for the driver can be found under the new name. The driver maintains full backwards compatability with prior ScaleIO releases and no configuration changes are needed upon upgrade to the new version of the driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-remove-force-delete-config-48fae029e3622d6d.yaml0000664000175000017500000000011000000000000030515 0ustar00zuulzuul00000000000000--- upgrade: - Removed force_delete option from ScaleIO configuration.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-scaling-qos-50c58e43d4b54247.yaml0000664000175000017500000000020600000000000025505 0ustar00zuulzuul00000000000000--- features: - Added support for scaling QoS in the ScaleIO driver. The new QoS keys are ``maxIOPSperGB`` and ``maxBWSperGB``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-thin-provisioning-support-9c3b9203567771dd.yaml0000664000175000017500000000071200000000000030476 0ustar00zuulzuul00000000000000--- features: - Added support for oversubscription in thin provisioning in the ScaleIO driver. Volumes should have extra_specs with the key ``provisioning:type`` with value equals to either ``thick`` or ``thin``. ``max_oversubscription_ratio`` can be defined by the global config or for ScaleIO specific with the config option ``sio_max_over_subscription_ratio``. The maximum oversubscription ratio supported at the moment is 10.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-vxflexos-rebrand-27dfe2b82d35b6a2.yaml0000664000175000017500000000102600000000000026773 0ustar00zuulzuul00000000000000--- upgrade: - | Dell EMC ScaleIO has been rebranded to VxFlex OS. The drivers ``cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver`` will now be updated to ``cinder.volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver`` in cinder.conf. Driver configuration options that start with ``sio`` should also be updated to ``vxflexos``. Existing sio configuration options will continue to work but will be removed in the V release. Online documentation will also change to reflect these changes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaleio-zeropadding-a0273c56c4d14fca.yaml0000664000175000017500000000055400000000000026005 0ustar00zuulzuul00000000000000--- security: - | Removed the ability to create volumes in a ScaleIO Storage Pool that has zero-padding disabled. A new configuration option ``sio_allow_non_padded_volumes`` has been added to override this new behavior and allow unpadded volumes, but should not be enabled if multiple tenants will utilize volumes from a shared Storage Pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/scaling-backup-service-7e5058802d2fb3dc.yaml0000664000175000017500000000044500000000000026335 0ustar00zuulzuul00000000000000--- features: - cinder-backup service is now decoupled from cinder-volume, which allows more flexible scaling. upgrade: - As cinder-backup was strongly reworked in this release, the recommended upgrade order when executing live (rolling) upgrade is c-api->c-sch->c-vol->c-bak. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/seagate-new-driver-d420fad549e9045f.yaml0000664000175000017500000000011500000000000025503 0ustar00zuulzuul00000000000000--- features: - New Cinder driver for Seagate FC and iSCSI storage arrays. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/service_dynamic_log_change-55147d288be903f1.yaml0000664000175000017500000000037300000000000027174 0ustar00zuulzuul00000000000000--- features: - | Added new APIs on microversion 3.32 to support dynamically changing log levels in Cinder services without restart as well as retrieving current log levels, which is an easy way to ping via the message broker a service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/sf-add-migration-support-691ace064d7576e9.yaml0000664000175000017500000000032100000000000026602 0ustar00zuulzuul00000000000000--- features: - | NetApp SolidFire driver: Added inter-cluster volume migration (storage assisted) support. This allows users to efficiently migrate volumes between different SolidFire backends. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/sf-fix-clone-and-request-timeout-issues-56f7a7659c7ec775.yaml0000664000175000017500000000043000000000000031474 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1898587 `_: Address cloning and api request timeout issues users may hit in certain environments, by allowing configuring timeout values for these operations through cinder configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/sf-fix-duplicate-volume-request-lost-adefacda1298dc62.yaml0000664000175000017500000000136200000000000031425 0ustar00zuulzuul00000000000000--- fixes: - | NetApp SolidFire driver `Bug #1896112 `_: Fixes an issue that may duplicate volumes during creation, in case the SolidFire backend successfully processes a request and creates the volume, but fails to deliver the result back to the driver (the response is lost). When this scenario occurs, the SolidFire driver will retry the operation, which previously resulted in the creation of a duplicate volume. This fix adds the ``sf_volume_create_timeout`` configuration option (default value: 60 seconds) which specifies an additional length of time that the driver will wait for the volume to become active on the backend before raising an exception. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/sf-fix-error-on-cluster-rebalancing-515bf41104cd181a.yaml0000664000175000017500000000047200000000000030572 0ustar00zuulzuul00000000000000--- fixes: - | NetApp SolidFire driver `Bug #1891914 `_: Fix an error that might occur on cluster workload rebalancing or system upgrade, when an operation is made to a volume at the same time its connection is being moved to a secondary node. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/shared-backend-config-d841b806354ad5be.yaml0000664000175000017500000000036200000000000026106 0ustar00zuulzuul00000000000000--- features: - New config format to allow for using shared Volume Driver configuration defaults via the [backend_defaults] stanza. Config options defined there will be used as defaults for each backend enabled via enabled_backends.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/sheepdog-driver-removal-b63d12460e886c33.yaml0000664000175000017500000000035400000000000026405 0ustar00zuulzuul00000000000000--- upgrade: - | The Sheepdog driver was marked unsupported in the Train release and has now been removed. All data on Sheepdog backends should be migrated to a supported backend before upgrading your Cinder installation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/sheepdog-mark-unsupported-648b2458d4a198de.yaml0000664000175000017500000000103600000000000027060 0ustar00zuulzuul00000000000000--- upgrade: - | The Sheepdog driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use the driver. deprecations: - | The Sheepdog driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use the driver. The driver is scheduled for removal in the 'U' release.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/show-provider-id-for-admin-ff4fd5a2518a4bfa.yaml0000664000175000017500000000011600000000000027301 0ustar00zuulzuul00000000000000--- features: - Add provider_id in the detailed view of a volume for admin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/slow-get-volume-stats-91b84c6e661dc605.yaml0000664000175000017500000000041300000000000026136 0ustar00zuulzuul00000000000000--- features: - | Log a warning from the volume service when a volume driver's get_volume_stats() call takes a long time to return. This can help deployers troubleshoot a cinder-volume service misbehaving due to a driver/backend performance issue. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/slug-b6a0fc3db0a2dd45.yaml0000664000175000017500000000063400000000000023165 0ustar00zuulzuul00000000000000--- other: - | Unified how cinder calculates the virtual free storage space for a pool. Previously Cinder had 2 different mechanisms for calculating the virtual free storage. Now both the Capacity Filter and the Capacity Weigher use the same mechanism, which is based upon the defined terms in https://specs.openstack.org/openstack/cinder-specs/specs/queens/provisioning-improvements.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/slug-qnap-driver-d4465ea6009c66df.yaml0000664000175000017500000000010200000000000025207 0ustar00zuulzuul00000000000000--- features: - Added volume driver for QNAP ES Storage Driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/smbfs-drop-alloc-data-file-8b94da952a3b1548.yaml0000664000175000017500000000036700000000000026733 0ustar00zuulzuul00000000000000--- deprecations: - | The 'smbfs_allocation_info_file_path' SMBFS driver config option is now deprecated as we're no longer using a JSON file to store volume allocation data. This file had a considerable chance of getting corrupted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/smbfs-fixed-image-9b642b63fcb79c18.yaml0000664000175000017500000000022600000000000025311 0ustar00zuulzuul00000000000000--- features: - | The SMBFS volume driver can now be configured to use fixed vhd/x images through the 'nas_volume_prov_type' config option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/smbfs-manage-unmanage-f1502781dd5f82cb.yaml0000664000175000017500000000025300000000000026142 0ustar00zuulzuul00000000000000--- features: - | The SMBFS driver now supports the volume manage/unmanage feature. Images residing on preconfigured shares may be listed and managed by Cinder. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/smbfs-pools-support-bc43c653cfb1a34f.yaml0000664000175000017500000000024700000000000026121 0ustar00zuulzuul00000000000000--- features: - | The SMBFS driver now exposes share information to the scheduler via pools. The pool names are configurable, defaulting to the share names. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/smbfs-removed-options-2c86101340273252.yaml0000664000175000017500000000045400000000000025657 0ustar00zuulzuul00000000000000--- upgrades: - | The following config SMBFS volume driver options have been deprecated and are now being removed: ``smbfs_allocation_info_file_path``, ``smbfs_sparsed_volumes``, ``smbfs_used_ratio``, ``smbfs_oversub_ratio``. Note that the equivalent generic options are available. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/smbfs-revert-snapshot-5b265ed5ded951dc.yaml0000664000175000017500000000015300000000000026431 0ustar00zuulzuul00000000000000--- features: - | The SMBFS volume driver now supports reverting volumes to the latest snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/smbfs-snapshot-attach-14742fe8f5864ac6.yaml0000664000175000017500000000033500000000000026157 0ustar00zuulzuul00000000000000--- features: - | The SMBFS driver now supports the 'snapshot attach' feature. Special care must be taken when attaching snapshots though, as writing to a snapshot will corrupt the differencing image chain. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/snapshot-in-use-without-force-86c6d74ebc9c0d60.yaml0000664000175000017500000000052000000000000027726 0ustar00zuulzuul00000000000000--- features: - | As of API version 3.66, volume snapshots of in-use volumes can be created without passing the 'force' flag, and the 'force' flag is considered invalid for this request. For backward compatability, however, when the 'force' flag is passed with a value evaluating to True, it is silently ignored. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/snapshot_backing_up_status_support-164fbbb2a564e137.yaml0000664000175000017500000000054400000000000031225 0ustar00zuulzuul00000000000000--- fixes: - When backing up a volume from a snapshot, the volume status would be set to "backing-up", preventing operations on the volume until the backup is complete. This status is now set on the snapshot instead, making the volume available for other operations. upgrade: - The "backing-up" status is added to snapshot's status matrix. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/solidfire-active-active-replication-support-f77e0e12320f8b21.yaml0000664000175000017500000000033000000000000032441 0ustar00zuulzuul00000000000000--- features: - | NetApp SolidFire driver: Enabled support for Active/Active (including replication) to the SolidFire driver. This allows users to configure SolidFire backends in clustered environments.././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=cinder-27.0.0/releasenotes/notes/solidfire-fix-retype-and-name-exception-on-migration-2af26f095b7cb345.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/solidfire-fix-retype-and-name-exception-on-migration-2af26f095b7cb30000664000175000017500000000054500000000000033027 0ustar00zuulzuul00000000000000--- fixes: - | NetApp SolidFire driver `Bug #1932964 `_: Fixed a name exception that occurs on any volume migration. - | NetApp SolidFire driver `Bug #1942090 `_: Fixed a status exception that occurs on volume retype with migration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/solidfire-no-attach-metadata-b17729ebd34703db.yaml0000664000175000017500000000032700000000000027423 0ustar00zuulzuul00000000000000--- other: - | SolidFire driver: Driver no longer stores attach timestamp and instance as metadata on the storage array. Any metadata remaining in the array must be considered outdated and incorrect. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/solidfire-scaled-qos-9b8632453909e2db.yaml0000664000175000017500000000174400000000000025701 0ustar00zuulzuul00000000000000--- features: - The SolidFire driver will recognize 4 new QoS spec keys to allow an administrator to specify QoS settings which are scaled by the size of the volume. 'ScaledIOPS' is a flag which will tell the driver to look for 'scaleMin', 'scaleMax' and 'scaleBurst' which provide the scaling factor from the minimum values specified by the previous QoS keys ('minIOPS', 'maxIOPS', 'burstIOPS'). The administrator must take care to assure that no matter what the final calculated QoS values follow minIOPS <= maxIOPS <= burstIOPS. A exception will be thrown if not. The QoS settings are also checked against the cluster min and max allowed and truncated at the min or max if they exceed. fixes: - For SolidFire, QoS specs are now checked to make sure they fall within the min and max constraints. If not the QoS specs are capped at the min or max (i.e. if spec says 50 and minimum supported is 100, the driver will set it to 100). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/solidfire-v2.1-replication-570a1f12f70e67b4.yaml0000664000175000017500000000011000000000000026670 0ustar00zuulzuul00000000000000--- features: - Added v2.1 replication support to SolidFire driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/spdk-add-config-parameter-39a880ec22956fd2.yaml0000664000175000017500000000021700000000000026652 0ustar00zuulzuul00000000000000--- upgrade: - | New config option spdk_max_queue_depth is added for SPDK NVMe-oF target. It allows users to specify max queu depth. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/spdk-nvmf-target-31e4d4dd5e2f2114.yaml0000664000175000017500000000035300000000000025172 0ustar00zuulzuul00000000000000--- features: - | A new target, spdk-nvmeof, is added for the SPDK driver over RDMA. It allows cinder to use SPDK target in order to create/delete subsystems on attaching/detaching an SPDK volume to/from an instance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/spdk-volume-081f6e72396b30e8.yaml0000664000175000017500000000030200000000000024121 0ustar00zuulzuul00000000000000--- features: - | A new volume driver, SPDK, is added for Storage Performance Development Kit NVMe-oF target handling, that allows Cinder to manage volumes in SPDK NVMe-oF driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/speed-up-starting-cinder-backup-76c1618b4cdb9d6e.yaml0000664000175000017500000000070600000000000030163 0ustar00zuulzuul00000000000000--- other: - | The Cinder Backup service examined every known backup upon startup previously, in order to restart the incomplete backups. This was a problem for installations with a large number of backups. We now use one database request in order to compile a list of incomplete backups. See Change-Id `I5c6065d99116ae5f223799e8558d25777aedd055 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/split-out-nested-quota-driver-e9493f478d2b8be5.yaml0000664000175000017500000000077500000000000027705 0ustar00zuulzuul00000000000000--- features: - Split nested quota support into a separate driver. In order to use nested quotas, change the following config ``quota_driver = cinder.quota.NestedDbQuotaDriver`` after running the following admin API "os-quota-sets/validate_setup_for_nested_quota_use" command to ensure the existing quota values make sense to nest. upgrade: - Nested quotas will no longer be used by default, but can be configured by setting ``quota_driver = cinder.quota.NestedDbQuotaDriver`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ssl-cert-fix-42e8f263c15d5343.yaml0000664000175000017500000000011500000000000024166 0ustar00zuulzuul00000000000000--- fixes: - | VMAX driver - fixes SSL certificate verification error. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/store-volume-format-info-1e17e029a9a9e578.yaml0000664000175000017500000000110100000000000026615 0ustar00zuulzuul00000000000000--- features: - | Cinder now stores the format of the backing file (raw or qcow2), for FS backends, in the volume admin metadata and includes the format in the connection_info returned in the Attachments API. Previously cinder tried to introspect the format, and under some circumstances, an incorrect format would be deduced. This will still be the case for legacy volumes. Explicitly storing the format will avoid this issue for newly created volumes. `See spec for more info `_.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storpool-clone-across-pools-b3f7923dee35503a.yaml0000664000175000017500000000033400000000000027411 0ustar00zuulzuul00000000000000--- features: - | The StorPool driver now declares the "clone across pools" capability, which allows it to create a volume into an arbitrary StorPool-backed volume type from a StorPool-backed Glance image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storpool-clone-better-dca90f40c9273de9.yaml0000664000175000017500000000035500000000000026343 0ustar00zuulzuul00000000000000--- features: - | StorPool driver: improved the way volumes are cloned into different StorPool templates (exposed as Cinder storage pools) if requested, eliminating some data duplication in the underlying StorPool cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storpool-move-api-and-config-code-in-tree-92cfe30690b78ef1.yaml0000664000175000017500000000044400000000000031673 0ustar00zuulzuul00000000000000--- other: - | Use the new implementation in os-brick for communicating with the StorPool API and reading StorPool configuration files. The StorPool backend no longer requires the OpenStack nodes to have the Python packages `storpool` and `storpool.spopenstack` installed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storpool-multiattach-f9b7edccd4db7e02.yaml0000664000175000017500000000016400000000000026513 0ustar00zuulzuul00000000000000--- features: - | The StorPool driver enables the ``multiattach`` and ``thin_provisioning`` capabilities. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storpool-revert-to-snapshot-a202358ee16ecb62.yaml0000664000175000017500000000023000000000000027437 0ustar00zuulzuul00000000000000--- features: - | StorPool driver: implemented revert to snapshot, which happens immediately i.e. without deleting and recreating the volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storpool-volume-driver-4d5f16ad9c2f373a.yaml0000664000175000017500000000007100000000000026547 0ustar00zuulzuul00000000000000--- features: - The StorPool backend driver was added. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-backup-snapshot-support-728e18dfa0d42943.yaml0000664000175000017500000000011000000000000030425 0ustar00zuulzuul00000000000000--- features: - Add backup snapshots support for Storwize/SVC driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-cg-replication-b038ff0d39fe909f.yaml0000664000175000017500000000012600000000000026670 0ustar00zuulzuul00000000000000--- features: - Add consistent replication group support in Storwize Cinder driver. ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=cinder-27.0.0/releasenotes/notes/storwize-disable-create-volume-with-non-cgsnap-group-6cba8073e3d6cadd.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-disable-create-volume-with-non-cgsnap-group-6cba8073e3d6ca0000664000175000017500000000014400000000000033154 0ustar00zuulzuul00000000000000--- features: - Disable creating volume with non cg_snapshot group_id in Storwize/SVC driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-dr-pool-support-52db3a95e54aef88.yaml0000664000175000017500000000020300000000000027047 0ustar00zuulzuul00000000000000--- features: - | Added data reduction pool support for thin-provisoned and compressed volume in Storwize cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-generic-volume-group-74495fa23e059bf9.yaml0000664000175000017500000000014500000000000027705 0ustar00zuulzuul00000000000000--- features: - Add consistency group capability to generic volume groups in Storwize drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-gmcv-support-8aceee3f40eddb9f.yaml0000664000175000017500000000052600000000000026734 0ustar00zuulzuul00000000000000--- features: - Add global mirror with change volumes(gmcv) support and user can manage gmcv replication volume by SVC driver. An example to set a gmcv replication volume type, set property replication_type as " gmcv", property replication_enabled as " True" and set property drivers:cycle_period_seconds as 500.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-hyperswap-host-site-update-621e763768fab9ee.yaml0000664000175000017500000000027000000000000031132 0ustar00zuulzuul00000000000000--- fixes: - | Updated the parameter storwize_preferred_host_site from StrOpt to DictOpt in cinder back-end configuration, and removed it from volume type configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-hyperswap-support-b830182e1058cb4f.yaml0000664000175000017500000000023300000000000027341 0ustar00zuulzuul00000000000000--- features: - Added hyperswap volume and group support in Storwize cinder driver. Storwize/svc versions prior to 7.6 do not support this feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-multiple-management-ip-1cd364d63879d9b8.yaml0000664000175000017500000000011700000000000030210 0ustar00zuulzuul00000000000000--- features: - Added multiple management IP support to Storwize SVC driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-pool-aware-support-7a40c9934642b202.yaml0000664000175000017500000000010700000000000027225 0ustar00zuulzuul00000000000000--- features: - Added multiple pools support to Storwize SVC driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-retain-aux-volme-f90fa6fde657d64f.yaml0000664000175000017500000000061500000000000027255 0ustar00zuulzuul00000000000000--- features: - | Added the option ``storwize_svc_retain_aux_volume`` to IBM Storwize Driver which takes ``True`` or ``False``. This option is to enable or disable retaining of auxiliary volume on secondary storage during delete of the volume on primary storage or moving the primary volume from mirror to non-mirror with replication enabled. The default value is ``False``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-revert-snapshot-681c76d68676558a.yaml0000664000175000017500000000011700000000000026647 0ustar00zuulzuul00000000000000--- features: - Add reverting to snapshot support in Storwize Cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize-split-up-__init__-153fa8f097a81e37.yaml0000664000175000017500000000053500000000000027141 0ustar00zuulzuul00000000000000--- upgrade: - Removed storwize_svc_connection_protocol config setting. Users will now need to set different values for volume_driver in cinder.conf. FC:volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver iSCSI:volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_iscsi.StorwizeSVCISCSIDriver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/storwize_iscsi_multipath_enhance-9de9dc29661869cd.yaml0000664000175000017500000000010600000000000030666 0ustar00zuulzuul00000000000000--- features: - Add multipath enhancement to Storwize iSCSI driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-az-in-volumetype-8yt6fg67de3976ty.yaml0000664000175000017500000000113300000000000027252 0ustar00zuulzuul00000000000000--- features: | Now availability zone is supported in volume type as below. * ``RESKEY:availability_zones`` now is a reserved spec key for AZ volume type, and administrator can create AZ volume type that includes AZ restrictions by adding a list of Az's to the extra specs similar to: ``RESKEY:availability_zones: az1,az2``. * Extra spec ``RESKEY:availability_zones`` will only be used for filtering backends when creating and retyping volumes. * Volume type can be filtered within extra spec: /types?extra_specs={"key":"value"} since microversion "3.52". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-cg-2b55da0bd9f69c7d.yaml0000664000175000017500000000106500000000000024263 0ustar00zuulzuul00000000000000--- features: - | Dell EMC Unity driver: Add consistent group support. Users could create a group type supporting consistent groups with specification `'consistent_group_snapshot_enabled': True`, then any groups created of that group type are consistent groups, otherwise they are generic groups. The supported operations are: create/delete consistent groups, add volumes to and remove volumes from consistent groups, create/delete consistent group snapshots, create consistent groups from snapshots, clone consistent groups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-create-volume-from-backup-d363e2b502a76dc2.yaml0000664000175000017500000000037100000000000030467 0ustar00zuulzuul00000000000000--- features: - | Starting with API microversion 3.47, Cinder now supports the ability to create a volume directly from a backup. For instance, you can use the command: ``cinder create --backup-id `` in cinderclient. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-extend-inuse-volume-9e4atf8912qaye99.yaml0000664000175000017500000000102200000000000027676 0ustar00zuulzuul00000000000000--- features: - | Add ability to extend ``in-use`` volume. User should be aware of the whole environment before using this feature because it's dependent on several external factors below: * nova-compute version - needs to be the latest for Pike. * only the libvirt compute driver supports this currently. * only iscsi and fibre channel volume types are supported on the nova side currently. Administrator can disable this ability by updating the ``volume:extend_attached_volume`` policy rule. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-glance-multiple-stores-79d11c5344f41446.yaml0000664000175000017500000000224500000000000027707 0ustar00zuulzuul00000000000000--- features: - | This release includes support for Glance multiple stores. An operator may now specify which Glance store will be used when a volume is uploaded to Glance as an image. Some details about this feature: * This feature is not directly user-facing. To enable it, an operator must add the field ``image_service:store_id`` in the volume-type extra-specs. The value of the field is a valid store identifier (``id``) configured in Glance, which may be discovered by making a ``GET /v2/info/stores`` call to the Image Service API. * If ``image_service:store_id`` is not set in the extra-specs for a volume-type, then any volume of that type uploaded as an image will be uploaded to the default store in Glance. * The ``image_service:store_id`` can only be set in the extra-specs for a volume-type when multiple glance stores are configured. * Cinder validates proposed Glance store identifiers by contacting Glance at the time the ``image_service:store_id`` is added to a volume-type's extra-specs. Thus the Image Service API must be available when a volume-type is updated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-huawei-consistency-group-b666f8f6c6cddd8f.yaml0000664000175000017500000000010700000000000030740 0ustar00zuulzuul00000000000000--- features: - Added consistency group support to the Huawei driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-image-signature-verification-yu8qub7286et9dh4.yaml0000664000175000017500000000067400000000000031662 0ustar00zuulzuul00000000000000--- features: - Added image signature verification support when creating volume from image. This depends on signature metadata from glance. This feature is turned on by default, administrators can change behaviour by updating option ``verify_glance_signatures``. Also, an additional image metadata ``signature_verified`` has been added to indicate whether signature verification was performed during creating process. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-images-api-2.11-3699b20670db1843.yaml0000664000175000017500000000113400000000000025677 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1898075 `_: When Glance added support for multiple cinder stores, Images API version 2.11 modified the format of the image location URI, which Cinder reads in order to try to use an optimized data path when creating a volume from an image. Unfortunately, Cinder did not understand the new format and when Glance multiple cinder stores were used, Cinder could not use the optimized data path, and instead downloaded image data from the Image service. Cinder now supports Images API version 2.11. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-incremental-backup-completion-in-rbd-1f2165fefcc470d1.yaml0000664000175000017500000000025500000000000032665 0ustar00zuulzuul00000000000000--- fixes: - Fixed issue where all Ceph RBD backups would be incremental after the first one. The driver now honors whether ``--incremental`` is specified or not. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/support-metadata-based-snapshot-list-filtering-6e6df68a7ce981f5.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-metadata-based-snapshot-list-filtering-6e6df68a7ce981f5.yam0000664000175000017500000000030000000000000033071 0ustar00zuulzuul00000000000000--- features: - Added support to querying snapshots filtered by metadata key/value using 'metadata' optional URL parameter. For example, "/v3/snapshots?metadata=={'key1':'value1'}". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-metadata-for-backup-3d8753f67e2934fa.yaml0000664000175000017500000000017500000000000027274 0ustar00zuulzuul00000000000000--- features: - Added metadata support for backup source. Now users can create/update metadata for a specified backup. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-modern-compression-9984f77bb12e97e0.yaml0000664000175000017500000000040400000000000027307 0ustar00zuulzuul00000000000000features: - | Added support to cinder backup for use of the Zstandard compression algorithm. To use it, set the ``backup_compression_algorithm`` to ``zstd`` in the cinder configuration file. (The default value for this option is ``zlib``.) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-project-id-filter-for-limit-bc5d49e239baee2a.yaml0000664000175000017500000000011000000000000031154 0ustar00zuulzuul00000000000000--- features: - Supported ``project_id`` admin filters to limits API. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/releasenotes/notes/support-retype-operation-for-global-mirror-volumes-e7091ac130e41cbd.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-retype-operation-for-global-mirror-volumes-e7091ac130e41cbd0000664000175000017500000000017500000000000033156 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize: Adds support for retype operation on global mirror volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-tenants-project-in-attachment-list-3edd8g138a28s4r8.yaml0000664000175000017500000000014700000000000032477 0ustar00zuulzuul00000000000000--- fixes: - Add ``all_tenants``, ``project_id`` support in the attachment list and detail APIs. ././@PaxHeader0000000000000000000000000000023300000000000011453 xustar0000000000000000133 path=cinder-27.0.0/releasenotes/notes/support-to-query-cinder-resources-filter-by-update-at-and-created-at-32ae9aaea131d598.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-to-query-cinder-resources-filter-by-update-at-and-created-a0000664000175000017500000000060400000000000033544 0ustar00zuulzuul00000000000000--- features: - Beginning with microversion 3.60, users may apply time comparison filters to the volume summary list and volume detail list requests by using the ``created_at`` or ``updated_at`` fields. Time must be expressed in ISO 8601 format. See the `Block Storage API v3 Reference `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-volume-glance-metadata-query-866b9e3beda2cd55.yaml0000664000175000017500000000032600000000000031351 0ustar00zuulzuul00000000000000--- features: - Added support for querying volumes filtered by glance metadata key/value using 'glance_metadata' optional URL parameter. For example, "volumes/detail?glance_metadata={"image_name":"xxx"}".././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-zeromq-messaging-driver-d26a1141290f5548.yaml0000664000175000017500000000013500000000000030056 0ustar00zuulzuul00000000000000--- features: - Added support for ZeroMQ messaging driver in cinder single backend config. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support-zmq-messaging-41085787156fbda1.yaml0000664000175000017500000000012700000000000026147 0ustar00zuulzuul00000000000000--- features: - Added support for ZMQ messaging layer in multibackend configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support_deferred_deletion_in_RBD-0c5d96f8eac5b50a.yaml0000664000175000017500000000012400000000000030561 0ustar00zuulzuul00000000000000--- features: - | Add support for deferred deletion in the RBD volume driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support_sort_backup_by_name-0b080bcb60c0eaa0.yaml0000664000175000017500000000007400000000000027715 0ustar00zuulzuul00000000000000--- features: - Add support for sorting backups by "name".././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/support_sort_snapshot_with_name-7b66a2d8e587275d.yaml0000664000175000017500000000007100000000000030503 0ustar00zuulzuul00000000000000--- features: - Support to sort snapshots with "name". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/supported-drivers-9c95dd2378cd308d.yaml0000664000175000017500000000010000000000000025515 0ustar00zuulzuul00000000000000--- features: - Added supported driver checks on all drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/svf-revert-to-snapshot-globalmirror-volume-e70fdb9115020283.yaml0000664000175000017500000000016700000000000032222 0ustar00zuulzuul00000000000000--- features: - | IBM Spectrum Virtualize Family: Added support for revert to snapshot for global-mirror volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/switch-to-alembic-2bbe27749fde70ff.yaml0000664000175000017500000000242700000000000025503 0ustar00zuulzuul00000000000000--- upgrade: - | The database migration engine has changed from `sqlalchemy-migrate`__ to `alembic`__. For most deployments, this should have minimal to no impact and the switch should be mostly transparent. The main user-facing impact is the change in schema versioning. While sqlalchemy-migrate used a linear, integer-based versioning scheme, which required placeholder migrations to allow for potential migration backports, alembic uses a distributed version control-like schema where a migration's ancestor is encoded in the file and branches are possible. The alembic migration files therefore use a arbitrary UUID-like naming scheme and the ``cinder-manage db sync`` command now expects such an version when manually specifying the version that should be applied. For example:: $ cinder-manage db sync 921e1a36b076 It is no longer possible to specify an sqlalchemy-migrate-based version. When the ``cinder-manage db sync`` command is run, all remaining sqlalchemy-migrate-based migrations will be automatically applied. Attempting to specify an sqlalchemy-migrate-based version will result in an error. .. __: https://sqlalchemy-migrate.readthedocs.io/en/latest/ .. __: https://alembic.sqlalchemy.org/en/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/sync-bump-versions-a1e6f6359173892e.yaml0000664000175000017500000000156700000000000025462 0ustar00zuulzuul00000000000000--- features: - Cinder-manage DB sync command can now bump the RPC and Objects versions of the services to avoid a second restart when doing offline upgrades. upgrade: - On offline upgrades, due to the rolling upgrade mechanism we need to restart the cinder services twice to complete the installation just like in the rolling upgrades case. First you stop the cinder services, then you upgrade them, you sync your DB, then you start all the cinder services, and then you restart them all. To avoid this last restart we can now instruct the DB sync to bump the services after the migration is completed, the command to do this is `cinder-manage db sync --bump-versions` fixes: - After an offline upgrade we had to restart all Cinder services twice, now with the `cinder-manage db sync --bump-versions` command we can avoid the second restart. ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=cinder-27.0.0/releasenotes/notes/synchronous-mirror-support-for-netapp-backends-3cece6d56fec332c.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/synchronous-mirror-support-for-netapp-backends-3cece6d56fec332c.yam0000664000175000017500000000051600000000000033315 0ustar00zuulzuul00000000000000--- features: - | Synchronous mirror support for NetApp Backends. Providing an option netapp_replication_policy for the replication of netapp backends, to enable the user to apply synchronous mirror and other relevant policies. Code also has been added to fail back from secondary to primary via default option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/synology-support-uc-model-9cda442828c2eb32.yaml0000664000175000017500000000012500000000000027105 0ustar00zuulzuul00000000000000--- features: - | Added support for UC-Series model to Synology Cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/synology-volume-driver-c5e0f655b04390ce.yaml0000664000175000017500000000011500000000000026464 0ustar00zuulzuul00000000000000--- features: - Added backend driver for Synology iSCSI-supported storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/tintri-driver-removal-91a0931c417481d2.yaml0000664000175000017500000000123600000000000026032 0ustar00zuulzuul00000000000000--- upgrade: - | The Tintri storage driver has been removed after completion of its deprecation period without a reliable 3rd Party CI system being supported. Customers using the Tintri driver should not upgrade Cinder without first migrating all volumes from their Tintri backend to a supported storage backend. Failure to migrate volumes will result in no longer being able to access volumes backed by the Tintri storage backend. other: - | The Tintri storage driver was marked unsupported in Stein due to 3rd Party CI not meeting Cinder's requirements. As a result the driver is removed starting from the Train release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/tintri_image_direct_clone-f73e561985aad867.yaml0000664000175000017500000000066600000000000027150 0ustar00zuulzuul00000000000000--- fixes: - Fix for Tintri image direct clone feature. Fix for the bug 1400966 prevents user from specifying image "nfs share location" as location value for an image. Now, in order to use Tintri image direct clone, user can specify "provider_location" in image metadata to specify image nfs share location. NFS share which hosts images should be specified in a file using tintri_image_shares_config config option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/tooz-coordination-heartbeat-cfac1064fd7878be.yaml0000664000175000017500000000045600000000000027603 0ustar00zuulzuul00000000000000--- upgrade: - | The coordination system used by Cinder has been simplified to leverage tooz builtin heartbeat feature. Therefore, the configuration options `coordination.heartbeat`, `coordination.initial_reconnect_backoff` and `coordination.max_reconnect_backoff` have been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/tooz-locks-0f9f2cc15f8dad5a.yaml0000664000175000017500000000024000000000000024331 0ustar00zuulzuul00000000000000--- features: - Locks may use Tooz as abstraction layer now, to support distributed lock managers and prepare Cinder to better support HA configurations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/toyou-netstor-storage-acs5000-fc-driver-f0d7428924bfeda1.yaml0000664000175000017500000000011700000000000031336 0ustar00zuulzuul00000000000000--- features: - | New FC cinder volume driver for TOYOU NetStor Storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/toyou-netstor-storage-tyds-iscsi-driver-798da24653d8cd0d.yaml0000664000175000017500000000012700000000000031720 0ustar00zuulzuul00000000000000--- features: - | New ISCSI cinder volume driver for TOYOU NetStor TYDS Storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/tpool-size-11121f78df24db39.yaml0000664000175000017500000000140300000000000024031 0ustar00zuulzuul00000000000000--- features: - Adds support to configure the size of the native thread pool used by the cinder volume and backup services. For the backup we use `backup_native_threads_pool_size` in the `[DEFAULT]` section, and for the backends we use `backend_native_threads_pool_size` in the driver section. fixes: - Fixes concurrency issue on backups, where only 20 native threads could be concurrently be executed. Now default will be 60, and can be changed with `backup_native_threads_pool_size`. - RBD driver can have bottlenecks if too many slow operations are happening at the same time (for example many huge volume deletions), we can now use the `backend_native_threads_pool_size` option in the RBD driver section to resolve the issue. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/transfer-encrypted-volume-2f040a6993435e79.yaml0000664000175000017500000000057100000000000026731 0ustar00zuulzuul00000000000000--- features: - | Starting with API microversion 3.70, encrypted volumes can be transferred to a user in a different project. Prior to microversion 3.70, the transfer is blocked due to the inability to transfer ownership of the volume's encryption key. With microverson 3.70, ownership of the encryption key is transferred when the volume is transferred. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/transfer-snapshots-with-volume-a7763570a807c742.yaml0000664000175000017500000000044400000000000027721 0ustar00zuulzuul00000000000000--- features: - Support transfer volume with snapshots by default in new V3 API 'v3/volume_transfers'. After microverison 3.55, if users don't want to transfer snapshots, they could use the new optional argument `no_snapshots=True` in request body of new transfer creation API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/type-extra-spec-policies-b7742b0ac2732864.yaml0000664000175000017500000000167300000000000026513 0ustar00zuulzuul00000000000000--- upgrade: - | When managing volume types an OpenStack provider is now given more control to grant access to for different storage type operations. The provider can now customize access to type create, delete, update, list, and show using new entries in the cinder policy file. As an example one provider may have roles called viewer, admin, type_viewer, and say type_admin. Admin and type_admin can create, delete, update types. Everyone can list the storage types. Admin, type_viewer, and type_admin can view the extra_specs. "volume_extension:types_extra_specs:create": "rule:admin or rule:type_admin", "volume_extension:types_extra_specs:delete": "rule:admin or rule:type_admin", "volume_extension:types_extra_specs:index": "", "volume_extension:types_extra_specs:show": "rule:admin or rule:type_admin or rule:type_viewer", "volume_extension:types_extra_specs:update": "rule:admin or rule:type_admin" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unisphere-for-powermax-10-support-637dfde0f8fa9862.yaml0000664000175000017500000000012600000000000030467 0ustar00zuulzuul00000000000000--- features: - | Dell PowerMax driver now supports Unisphere for PowerMax 10.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-backup-via-snapshot-81a2d5a118c97042.yaml0000664000175000017500000000012300000000000026663 0ustar00zuulzuul00000000000000--- features: - Add support to backup volume using snapshot in the Unity driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-compressed-volume-support-4998dee84534a324.yaml0000664000175000017500000000011600000000000030216 0ustar00zuulzuul00000000000000--- features: - | Dell EMC Unity driver: Add compressed volume support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-enable-ssl-14db2497225c4395.yaml0000664000175000017500000000052500000000000024770 0ustar00zuulzuul00000000000000--- features: - Dell EMC Unity Cinder driver allows enabling/disabling the SSL verification. Admin can set `True` or `False` for `driver_ssl_cert_verify` to enable or disable this function, alternatively set the `driver_ssl_cert_path=` for customized CA path. Both above 2 options should go under the driver section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-fast-clone-02ae88ba8fdef145.yaml0000664000175000017500000000064300000000000025357 0ustar00zuulzuul00000000000000--- features: - Add thin clone support in the Unity driver. Unity storage supports the thin clone of a LUN from OE version 4.2.0. It is more efficient than the dd solution. However, there is a limit of thin clone inside each LUN family. Every time the limit reaches, a new LUN family will be created by a dd-copy, and then the volume clone afterward will use the thin clone of the new LUN family. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-force-detach-7c89e72105f9de61.yaml0000664000175000017500000000012600000000000025440 0ustar00zuulzuul00000000000000--- features: - | Add support to force detach a volume from all hosts on Unity. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-group-replication-support-97d74275a84b06af.yaml0000664000175000017500000000022700000000000030266 0ustar00zuulzuul00000000000000--- features: - | Dell EMC Unity Driver: Added consistency group replication support. The storops library version 1.2.3 or newer is required.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-multiattach-support-993b997e522d9e84.yaml0000664000175000017500000000035100000000000027103 0ustar00zuulzuul00000000000000--- features: - | Dell EMC Unity: Implements `bp unity-multiattach-support `__ to support attaching a volume to multiple servers simultaneously. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-remove-empty-host-17d567dbb6738e4e.yaml0000664000175000017500000000036600000000000026604 0ustar00zuulzuul00000000000000--- features: - | Dell EMC Unity Driver: Adds support for removing empty host. The new option named `remove_empty_host` could be configured as `True` to notify Unity driver to remove the host after the last LUN is detached from it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-replication-support-2ab121a5ea5a2ade.yaml0000664000175000017500000000012100000000000027363 0ustar00zuulzuul00000000000000--- features: - | Dell EMC Unity Driver: Added volume replication support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-return-logged-out-initiator-6ab1f96f21bb284c.yaml0000664000175000017500000000042400000000000030617 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC Unity Driver: Fixes `bug 1773305 `__ to return the targets which connect to the logged-out initiators. Then the zone manager could clean up the FC zone based on the correct target wwns. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-retype-volume-support-773ae17b8811fb3f.yaml0000664000175000017500000000016600000000000027515 0ustar00zuulzuul00000000000000--- features: - | Dell EMC Unity driver: Add efficient retype support when new type uses the same Unity device. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-storage-assisted-migration-support-145fce87f36f1ecc.yaml0000664000175000017500000000013100000000000032304 0ustar00zuulzuul00000000000000--- features: - | Dell EMC Unity Driver: Added storage-assisted migration support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-thick-support-fdbef833f2b4d54f.yaml0000664000175000017500000000041200000000000026216 0ustar00zuulzuul00000000000000--- features: - | Dell EMC Unity Driver: Add thick volume support. Refer to `Unity Cinder Configuration document `__ to create a thick volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unity-volume-tiering-policy-support-c6d0aaff4b141bd3.yaml0000664000175000017500000000014300000000000031332 0ustar00zuulzuul00000000000000--- features: - | Dell EMC Unity driver: Add tiering policy configuration support for volume.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unsupport-prophetstor-bfbc674fd86303db.yaml0000664000175000017500000000106500000000000026623 0ustar00zuulzuul00000000000000--- upgrade: - | The Prophetstor driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Prophetstor driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/unsupport-veritas-access-ecfb4122770d93f9.yaml0000664000175000017500000000107300000000000027011 0ustar00zuulzuul00000000000000--- upgrade: - | The Veritas Access driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Veritas Access driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/untyped_to_default_type-0068e6bc8000986c.yaml0000664000175000017500000000116200000000000026610 0ustar00zuulzuul00000000000000--- features: - | Added a new default volume type ``__DEFAULT__`` which will be used when - A new volume is created without a type - The `default_volume_type` option is unset in cinder.conf Then the volume will be assigned the ``__DEFAULT__`` type. upgrade: - | The db migration script will create a volume type named ``__DEFAULT__`` The online migration will migrate all existing untyped volumes, snapshots to the ``__DEFAULT__`` type. An invalid/non-existent type specified with `default_volume_type` option in cinder.conf will result in VolumeTypeNotFoundByName exception. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml0000664000175000017500000000126500000000000026046 0ustar00zuulzuul00000000000000--- features: - | Added a new cinder-manage command to handle the situation where database purges would not complete due to the volumes table holding references to deleted services. The new command makes sure that all volumes have a reference only to the correct service_uuid, which will allow old service records to be purged from the database. Command: ``cinder-manage volume update_service`` - | When Cinder creates a new cinder-volume service, it now also immediately updates the service_uuid for all volumes associated with that cinder-volume host. In some cases, this was preventing the database purge operation from completing successfully. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/update_config_options_disco_volume_driver-07e52aa43e83c243.yaml0000664000175000017500000000107000000000000032424 0ustar00zuulzuul00000000000000--- upgrade: - | We replaced the config option in the disco volume driver "disco_choice_client" with "disco_client_protocol". We add "san_api_port" as new config option in san driver for accessing the SAN API using this port. Deprecations: - | Marked the ITRI DISCO driver option 'disco_rest_api' as deprecated in order to use the config 'san_ip' provided by the generic driver san. Marked the ITRI DISCO driver option 'disco_src_api_port' as deprecated in order to use the config 'san_ssh_port' provided by the generic driver san. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/updated-at-list-0f899098f7258331.yaml0000664000175000017500000000011600000000000024534 0ustar00zuulzuul00000000000000--- features: - The updated_at timestamp is now returned in listing detail. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/upgrade-checks-e58c4a81c857847d.yaml0000664000175000017500000000070300000000000024637 0ustar00zuulzuul00000000000000--- features: - | [`Community Goal `_] Support has been added for developers to write pre-upgrade checks. Operators can run these checks using ``cinder-status upgrade check``. This allows operators to be more confident when upgrading their deployments by having a tool that automates programmable checks against the deployment configuration or dataset. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/upload-volume-to-multiple-stores-ab130774897e41c3.yaml0000664000175000017500000000065000000000000030232 0ustar00zuulzuul00000000000000--- features: - | This release includes support for Glance automatic image colocation. When a volume originally created from an image is uploaded to the Image service, Cinder passes Glance a reference to the original image. Glance may use this information to colocate the new image data in the same image store(s) as the original image data. Consult the Glance documentation for more information.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml0000664000175000017500000000111700000000000026604 0ustar00zuulzuul00000000000000--- prelude: > The default key manager interface in Cinder was deprecated and the Castellan key manager interface library is now used instead. For more information about Castellan, please see https://docs.openstack.org/castellan/latest/ . upgrade: - If using the key manager, the configuration details should be updated to reflect the Castellan-specific configuration options. deprecations: - All barbican and keymgr config options in Cinder are now deprecated. All of these options are moved to the ``key_manager`` section for the Castellan library. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=cinder-27.0.0/releasenotes/notes/use-glance-v2-api-and-deprecate-glance_api_version-1a3b698429cb754e.yaml 22 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/use-glance-v2-api-and-deprecate-glance_api_version-1a3b698429cb754e0000664000175000017500000000027700000000000032443 0ustar00zuulzuul00000000000000--- upgrade: - Cinder now defaults to using the Glance v2 API. The ``glance_api_version`` configuration option has been deprecated and will be removed in the 11.0.0 Queens release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/use-multipath-everywhere-3707593eebdaf9eb.yaml0000664000175000017500000000106300000000000027146 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1951982 `_: Fixed cloning of encrypted volumes not using multipathing to change the encryption key used on the new volume. - | `Bug #1951977 `_: Fixed backup create and restore not using multipath configuration when attaching the volume. - | Kaminario driver `bug #1951981 `_: Fixed create volume from volume or snapshot not using multipath configuration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/use-oslo_middleware_sizelimit-5f171cf1c44444f8.yaml0000664000175000017500000000061600000000000027773 0ustar00zuulzuul00000000000000--- deprecations: - The api-paste.ini ``paste.filter_factory`` setting has been updated to use ``oslo_middleware.sizelimit`` rather than ``cinder.api.middleware.sizelimit`` compatibility shim. ``cinder.api.middleware.sizelimit`` was deprecated in kilo and should now be updated to use ``oslo_middleware.sizelimit`` in api-paste.ini in preparation for removal in the pike release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/user-visible-extra-specs-6cf7e49c6be57a01.yaml0000664000175000017500000000142500000000000026746 0ustar00zuulzuul00000000000000--- features: - | A small list of volume type extra specs are now visible to regular users, and not just to cloud administrators. This allows users to see non-senstive extra specs, which may help them choose a particular volume type when creating volumes. Sensitive extra specs are still only visible to cloud administrators. See the "User visible extra specs" section in the Cinder Administration guide for more information. security: - | A small list of volume type extra specs are now visible to regular users, and not just to cloud administrators. Cloud administrators that wish to opt out of this feature should consult the "Security considerations" portion of the "User visible extra specs" section in the Cinder Administration guide. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ussuri-release-prelude-ceeb11bc7fe46191.yaml0000664000175000017500000000110200000000000026546 0ustar00zuulzuul00000000000000prelude: | Welcome to the Ussuri release of the OpenStack Block Storage service (cinder). The cinder team would like to bring the following points to your attention. Details may be found below. * With this release, the Block Storage API version 3 has reached microversion **3.60**. * Python 2 is no longer supported. The minimum version of Python that may be used with this release is **Python 3.6**. * The unsupported driver removal policy has been revised. See the "Known Issues" section of this document for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/ussuri-unsupported-drivers-not-removed-8cfb3e01b720e9d1.yaml0000664000175000017500000000646300000000000031725 0ustar00zuulzuul00000000000000--- issues: - | For the convenience of operators, the `unsupported driver removal policy `_ was changed during the Ussuri development cycle to allow unsupported drivers to remain in the Cinder source code repository at the discretion of the Cinder team (basically, this means the vendor has not explicitly declared the driver EOL or the vendor has expressed interest in getting the third-party CI system working again). Be aware, however, that such drivers are subject to immediate removal if they begin failing the general Cinder gating tests. The following drivers were classified as unsupported in the Train release and continue as unspported in this release: - IBM FlashSystem drivers: FC and iSCSI - ProphetStor drivers: FC and iSCSI - Veritas ACCESS iSCSI driver - Virtuozzo Storage driver For completeness, here is the list of drivers first marked unsupported in the Ussuri release. See the "Deprecation Notes" section of this document for details. - Brocade Fibre Channel Zone Manager driver - MacroSAN drivers: FC and iSCSI - Veritas Clustered NFS driver In order to use an unsupported driver, ``enable_unsupported_driver`` must be set to ``True`` in the driver's section in the cinder.conf file. If you are the consumer of such a driver, we encourage you to contact the vendor to make them aware of your concerns. - | A key aspect of *supported* drivers is that there is a fully functioning third-party CI system that reports on all proposed changes to the cinder code. However, the fact that there are 'unsupported' drivers at all indicates that third-party CI systems are unfortunately not always fully functioning. You may consult the report at the following link to learn the extent to which a particular driver's CI system is reporting on the development branch of cinder: http://cinderstats.ivehearditbothways.com/cireport.txt upgrade: - | The following drivers were classified as unsupported in the Train release and continue as unspported in this release. See the "Known Issues" section of this document for details. - IBM FlashSystem drivers: FC and iSCSI - ProphetStor drivers: FC and iSCSI - Veritas ACCESS iSCSI driver - Virtuozzo Storage driver In order to use an unsupported driver, ``enable_unsupported_driver`` must be set to ``True`` in the driver's section in the cinder.conf file. If you are the consumer of such a driver, we encourage you to contact the vendor to make them aware of your concerns. deprecations: - | The following drivers were deprecated in the Train release but have not yet been removed. They continue as unsupported and deprecated in this release. See the "Known Issues" section of this document for details. - IBM FlashSystem drivers: FC and iSCSI - ProphetStor drivers: FC and iSCSI - Veritas ACCESS iSCSI driver - Virtuozzo Storage driver In order to use an unsupported driver, ``enable_unsupported_driver`` must be set to ``True`` in the driver's section in the cinder.conf file. If you are the consumer of such a driver, we encourage you to contact the vendor to make them aware of your concerns. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/validate-expired-user-tokens-40b15322197653ae.yaml0000664000175000017500000000106700000000000027271 0ustar00zuulzuul00000000000000--- features: - | Added support for Keystone middleware feature to pass service token along with the user token for Cinder to Nova and Glance services. This will help get rid of user token expiration issues during long running tasks e.g. creating volume snapshot (Cinder->Nova) and creating volume from image (Cinder->Glance) etc. To use this functionality a service user needs to be created first. Add the service user configurations in ``cinder.conf`` under ``service_user`` group and set ``send_service_user_token`` flag to ``True``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/validate_vol_create_uuids-4f08b4ef201385f6.yaml0000664000175000017500000000032700000000000027142 0ustar00zuulzuul00000000000000--- fixes: - | The create volume api will now return 400 error instead of 404/500 if user passes non-uuid values to consistencygroup_id, source_volid and source_replica parameters in the request body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/verbose-online-migrations-94fb7e8a85cdbc10.yaml0000664000175000017500000000041100000000000027252 0ustar00zuulzuul00000000000000--- features: - The cinder-manage online_data_migrations command now prints a tabular summary of completed and remaining records. The goal here is to get all your numbers to zero. The previous execution return code behavior is retained for scripting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/verify-dorado-luntype-for-huawei-driver-4fc2f4cca3141bb3.yaml0000664000175000017500000000052200000000000031735 0ustar00zuulzuul00000000000000--- fixes: - | Add 'LUNType' configuration verification for Huawei driver when connecting to Dorado array. Because Dorado array only supports 'Thin' lun type, so 'LUNType' only can be configured as 'Thin', any other type is invalid and if 'LUNType' not explicitly configured, by default use 'Thin' for Dorado array. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/veritas_access_driver-c73b2320ba9f46a8.yaml0000664000175000017500000000007500000000000026357 0ustar00zuulzuul00000000000000--- features: - Added NFS based driver for Veritas Access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/veritas_access_iscsi_driver-de642dad9e7d0890.yaml0000664000175000017500000000007700000000000027650 0ustar00zuulzuul00000000000000--- features: - Added ISCSI based driver for Veritas Access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/veritas_cluster_nfs_unsupported-88ab3ea5cbb6cd88.yaml0000664000175000017500000000114400000000000030772 0ustar00zuulzuul00000000000000--- upgrade: - | The Veritas Clustered NFS driver has been marked as unsupported and is now deprecated. ``enabled_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use the driver. deprecations: - | The Veritas Clustered NFS driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use the driver. It its support status does not change the driver will be removed in the 'V' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vhd-disk-format-upload-to-image-5851f9d35f4ee447.yaml0000664000175000017500000000013200000000000027727 0ustar00zuulzuul00000000000000--- features: - Added support for vhd and vhdx disk-formats for volume upload-to-image. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/victoria-release-prelude-0fbae229ebe8f9b7.yaml0000664000175000017500000001317300000000000027141 0ustar00zuulzuul00000000000000--- prelude: | Welcome to the Victoria release of the OpenStack Block Storage service (cinder). With this release, the Block Storage API version 3 has reached microversion **3.62**. The cinder team would like to bring the following points to your attention. Details may be found below. * Microversion 3.61 adds the ``cluster_name`` attribute to the volume detail response when called in an administrative context. * Microversion 3.62 adds API calls to manage the default volume type for a specific project. See the `Default volume types (default-types) `_ section of the `Block Storage API v3 Reference `_ for more information. * The handling of the default volume type for a cinder installation has been improved in this release. * The cinder backup service has added support for the popular Zstandard compression algorithm. (The default is the venerable Deflate (zlib) algorithm.) * Many backend storage drivers have added features and fixed bugs. features: - | PowerMax Driver - support for using snap_ids instead of generations for better handling of volume snapshots (Change-Id `I0edf2ac777bef888e760f711a94e3fe4f94262ae `_) fixes: - | PowerMax Driver - corrected handling of exceptions occurring during cleanup in the context of volume migration (Change-Id `I0c0a96e21209c5abe359c6985fae7cee598c21ab `_) - | REST API - fixed issue where the Get Current Log Levels for Cinder Services call in microversion 3.32 was ignoring the server name filter (Change-Id `Iecb3faad9270f969185089cc291127b340483a46 `_) - | Additionally, the following bugs were addressed: * `Bug #1888831 `_: PowerMax Driver - Volume updates for volumes in groups not performed during failover * `Bug #1867906 `_: group-create-from-src doesn't work in active/active mode * `Bug #1886662 `_: PowerMax Driver - Volumes not cleaned up after exception during migrate, retype, srdf protect creates/deletes blocks subsequent operations * `Bug #1874187 `_: PowerMax driver - Exception was not handled and breaks the flow while add/remove volumes to generic volume group * `Bug #1886689 `_: Rekey fails when provider is legacy provider class * `Bug #1877976 `_: PowerMax Driver - RDFG suspended on vol create exception * `Bug #1877445 `_: Pure Storage Driver - doesn't handle synchronous replication CIDR filters properly * `Bug #1875953 `_: Virtuozzo driver - missing context in create_cloned_volume call * `Bug #1863021 `_: Eventlet monkey patch results in assert len(_active) == 1 AssertionError * `Bug #1875640 `_: PowerMax Driver - Failover lock not released during U4P failover during exception * `Bug #1875959 `_: NetApp ONTAP NFS driver - Unable to perform flexclone from glance share * `Bug #1875433 `_: PowerMax Driver - Retype from rep to rep leaving storage group suspended * `Bug #1875432 `_: PowerMax Driver - Live migration fails when an instance has more than one replication device * `Bug #1871744 `_: Glance retry failed: TypeError: get() got an unexpected keyword argument 'schema_name' * `Bug #1873463 `_: Virtuozzo driver - copy_volume_to_image() needs to support glance multistore * `Bug #1892057 `_: PowerMax Driver - Missing force flag for rep group volume adds * `Bug #1892718 `_: PowerMax Driver - SRDF suspend can fail during _create_replica - | Additionally, the following minor fixes for which no bugs were filed were made: * `PowerMax Driver - Migrate extra spec class fix `_ * `PowerMax Driver - Force array and srp configuration `_ * `PowerMax Driver - Prevent unmanage with snapvx `_ * `PowerMax Driver - Allowing for an empty group on a clone volume `_ * `PowerMax Driver - RDF State Validation Enhancements `_ * `PowerMax Driver - Remove mandatory failover BID `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/virtuozzo-unsupported-645b47dceb0ddbd2.yaml0000664000175000017500000000106100000000000026702 0ustar00zuulzuul00000000000000--- upgrade: - | The Virtuozzo driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. deprecations: - | The Virtuozzo driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the 'U' development cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-attach-snapshot-3137e59ab4ff39a4.yaml0000664000175000017500000000017100000000000026067 0ustar00zuulzuul00000000000000--- features: - Enable backup snapshot optimal path by implementing attach and detach snapshot in the VMAX driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-clone-cg-09fce492931c957f.yaml0000664000175000017500000000013600000000000024412 0ustar00zuulzuul00000000000000--- features: - Added the ability to create a CG from a source CG with the VMAX driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-compression-support-1dfe463328b56d7f.yaml0000664000175000017500000000012100000000000027037 0ustar00zuulzuul00000000000000--- features: - Support for compression on VMAX All Flash in the VMAX driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-deprecate-backend-xml-708a41919bcc55a8.yaml0000664000175000017500000000036500000000000027030 0ustar00zuulzuul00000000000000--- deprecations: - | The use of xml files for vmax backend configuration is now deprecated and will be removed during the following release. Deployers are encouraged to use the cinder.conf for configuring connections to the vmax. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-driver-multiattach-support-43a7f99cd2d742ee.yaml0000664000175000017500000000010700000000000030375 0ustar00zuulzuul00000000000000--- features: - Dell EMC VMAX driver has added multiattach support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-failover-unisphere-2de78d1f76b5f836.yaml0000664000175000017500000000015300000000000026610 0ustar00zuulzuul00000000000000--- features: - Dell EMC VMAX driver has added support for failover to second instance of Unisphere. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-generic-volume-group-28b3b2674c492bbc.yaml0000664000175000017500000000016700000000000027036 0ustar00zuulzuul00000000000000--- features: - | Add consistent group snapshot support to generic volume groups in VMAX driver version 3.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-iscsi-chap-authentication-e47fcfe310b85f7b.yaml0000664000175000017500000000011600000000000030166 0ustar00zuulzuul00000000000000--- features: - | Add chap authentication support for the vmax backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-iscsi-multipath-76cc09bacf4fdfbf.yaml0000664000175000017500000000010500000000000026461 0ustar00zuulzuul00000000000000--- features: - Support for iSCSI multipathing in EMC VMAX driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-list-manageable-vols-snaps-6a7f5aa114fae8f3.yaml0000664000175000017500000000014400000000000030247 0ustar00zuulzuul00000000000000--- features: - Dell EMC VMAX driver has added list manageable volumes and snapshots support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-manage-unmanage-snapshot-3805c4ac64b8133a.yaml0000664000175000017500000000012000000000000027540 0ustar00zuulzuul00000000000000--- features: - Support for manage/ unmanage snapshots on VMAX cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-metadata-ac9bdd31e7e561c3.yaml0000664000175000017500000000012400000000000024675 0ustar00zuulzuul00000000000000--- features: - | Log VMAX specific metadata of a volume if debug is enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-oversubscription-d61d0e3b1df2487a.yaml0000664000175000017500000000010400000000000026447 0ustar00zuulzuul00000000000000--- features: - Added oversubscription support in the VMAX driver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-powermax-rebrand-70569fc8cdf40a8c.yaml0000664000175000017500000000126100000000000026330 0ustar00zuulzuul00000000000000--- upgrade: - | Dell EMC VMAX has been rebranded to PowerMax. The drivers ``cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver`` and ``cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver`` will now be updated to ``cinder.volume.drivers.dell_emc.powermax. iscsi.PowerMaxISCSIDriver`` and ``cinder.volume.drivers.dell_emc. powermax.fc.PowerMaxFCDriver`` respectively in cinder.conf. Driver configuration options that start with ``vmax`` should also be updated to ``powermax``. Existing vmax configuration options will continue to work but will be removed in the Train release. Online documentation will also change to reflect these changes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-qos-eb40ed35bd2f457d.yaml0000664000175000017500000000010000000000000023711 0ustar00zuulzuul00000000000000--- features: - QoS support in EMC VMAX iSCSI and FC drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-rename-dell-emc-f9ebfb9eb567f427.yaml0000664000175000017500000000040500000000000026073 0ustar00zuulzuul00000000000000--- upgrade: - The VMAX driver is moved to the dell_emc directory. volume_driver entry in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver`` or ``cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-replication-enhancements-c3bec80a3abb6d2e.yaml0000664000175000017500000000014700000000000030225 0ustar00zuulzuul00000000000000--- features: - | Added asynchronous remote replication support in Dell EMC VMAX cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-replication-enhancements2-0ba03224cfca9959.yaml0000664000175000017500000000010600000000000030013 0ustar00zuulzuul00000000000000--- features: - Support for VMAX SRDF/Metro on VMAX cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-replication-group-2f65ed92d761f90d.yaml0000664000175000017500000000014200000000000026436 0ustar00zuulzuul00000000000000--- features: - | Add consistent replication group support in Dell EMC VMAX cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-rest-94e48bed6f9c134c.yaml0000664000175000017500000000022200000000000024027 0ustar00zuulzuul00000000000000--- features: - | VMAX driver version 3.0, replacing SMI-S with Unisphere REST. This driver supports VMAX3 hybrid and All Flash arrays. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-rest-compression-10c2590052a9465e.yaml0000664000175000017500000000012500000000000026045 0ustar00zuulzuul00000000000000--- features: - | Adding compression functionality to VMAX driver version 3.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-rest-livemigration-885dd8731d5a8a88.yaml0000664000175000017500000000013000000000000026543 0ustar00zuulzuul00000000000000--- features: - | Adding Live Migration functionality to VMAX driver version 3.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-rest-qos-6bb4073b92c932c6.yaml0000664000175000017500000000011500000000000024453 0ustar00zuulzuul00000000000000--- features: - | Adding Qos functionality to VMAX driver version 3.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-rest-replication-612fcfd136cc076e.yaml0000664000175000017500000000013200000000000026320 0ustar00zuulzuul00000000000000--- features: - | Adding Replication V2.1 functionality to VMAX driver version 3.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-rest-retype-ceba5e8d04f637b4.yaml0000664000175000017500000000011500000000000025403 0ustar00zuulzuul00000000000000--- features: - | Add retype functionality to VMAX driver version 3.0. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-retype-replicated-volumes-325be6e5fd626819.yaml0000664000175000017500000000015700000000000030032 0ustar00zuulzuul00000000000000--- features: - Support for retype (storage-assisted migration) of replicated volumes on VMAX cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-revert-volume-to-snapshot-b4a837d84a8b2a85.yaml0000664000175000017500000000014300000000000030055 0ustar00zuulzuul00000000000000--- features: - Support for reverting a volume to a previous snapshot in VMAX cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmax-volume-migration-992c8c68e2207bbc.yaml0000664000175000017500000000036100000000000026267 0ustar00zuulzuul00000000000000--- features: - Storage assisted volume migration from one Pool/SLO/Workload combination to another, on the same array, via retype, for the VMAX driver. Both All Flash and Hybrid VMAX3 arrays are supported. VMAX2 is not supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmdk_backup_restore-41f807b7bc8e0ae8.yaml0000664000175000017500000000011300000000000026120 0ustar00zuulzuul00000000000000--- fixes: - Fixed backup and restore of volumes in VMware VMDK driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmdk_config_conn_pool_size-0658c497e118533f.yaml0000664000175000017500000000027600000000000027176 0ustar00zuulzuul00000000000000--- upgrade: - Added config option ``vmware_connection_pool_size`` in the VMware VMDK driver to specify the maximum number of connections (to vCenter) in the http connection pool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmdk_default_task_poll_interval-665f032bebfca39e.yaml0000664000175000017500000000016100000000000030570 0ustar00zuulzuul00000000000000--- upgrade: - The default interval for polling vCenter tasks in the VMware VMDK driver is changed to 2s. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmdk_image_ova-d3b3a0e72221110c.yaml0000664000175000017500000000013700000000000024647 0ustar00zuulzuul00000000000000--- fixes: - Fixed the VMware VMDK driver to create volume from image in ova container. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmdk_vc_51-df29eeb5fc93fbb1.yaml0000664000175000017500000000013100000000000024250 0ustar00zuulzuul00000000000000--- deprecations: - VMware VMDK driver deprecated the support for vCenter version 5.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmem-7000-iscsi-3c8683dcc1f0b9b4.yaml0000664000175000017500000000011600000000000024530 0ustar00zuulzuul00000000000000--- features: - Added backend driver for Violin Memory 7000 iscsi storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware-vmdk-config-eb70892e4ccf8f3c.yaml0000664000175000017500000000022700000000000025667 0ustar00zuulzuul00000000000000--- upgrade: - The VMware VMDK driver supports a new config option ``vmware_host_port`` to specify the port number to connect to vCenter server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware-vmdk-manage-existing-0edc20d9d4d19172.yaml0000664000175000017500000000011400000000000027316 0ustar00zuulzuul00000000000000--- features: - Added support for manage volume in the VMware VMDK driver.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware-vmdk-removed-bfb04eed77b95fdf.yaml0000664000175000017500000000011100000000000026213 0ustar00zuulzuul00000000000000--- upgrade: - The VMware VMDK driver for ESX server has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware-vmdk-revert-to-snapshot-ee3d638565649f44.yaml0000664000175000017500000000013100000000000027711 0ustar00zuulzuul00000000000000--- features: - | Added support for revert-to-snapshot in the VMware VMDK driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware-vmdk-snapshot-template-d3dcfc0906c02edd.yaml0000664000175000017500000000055000000000000030127 0ustar00zuulzuul00000000000000--- features: - | VMware VMDK driver now supports vSphere template as a volume snapshot format in vCenter server. The snapshot format in vCenter server can be specified using driver config option ``vmware_snapshot_format``. upgrade: - | VMware VMDK driver will use vSphere template as the default snapshot format in vCenter server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_adapter_type-66164bc3857f244f.yaml0000664000175000017500000000026000000000000025722 0ustar00zuulzuul00000000000000--- features: - | VMware VMDK driver now supports volume type extra-spec option ``vmware:adapter_type`` to specify the adapter type of volumes in vCenter server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_enable_volume_stats-1ef84e170187f0fa.yaml0000664000175000017500000000246600000000000027437 0ustar00zuulzuul00000000000000--- upgrade: - | VMware vmdk driver: The vmware vmdk driver had its get_volume_stats removed in a previous release due to a potential performance hit of 20% at a high load. The problem with reporting ``unknown`` back to the scheduler, is that it effectively removes cinder's ability to properly schedule based on capacity utilization. When this driver is enabled in a heterogenous environment without properly reporting utilization statistics, the scheduler's capacity filter will always allow this driver to service a provisioning request. Without reporting the backend stats, the capacity filter also can't determine the reserved_percentage as well as the max_over_subscription_ratio. To enable the collection of stats set ``vmware_enable_volume_stats`` to True in the driver section of cinder.conf. The default setting is False. Keep in mind that there may be a degradation in performance on the vcenter when enabling this setting. fixes: - | VMware vmdk driver: The collection of volume stats, which had been disabled, may now be turned on by using the ``vmware_enable_volume_stats`` configuration option. The default for this option is False (no stats collection). Be aware that enabling volume stats may cause performance issues under high load. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_fcd_retype-979418c39fd5d59d.yaml0000664000175000017500000000022100000000000025463 0ustar00zuulzuul00000000000000--- features: - | Added support for changing storage policy of backend volumes created by VMwareVStorageObjectDriver using retype API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_fcd_snapshot-b702f7e950dfbe7a.yaml0000664000175000017500000000027000000000000026210 0ustar00zuulzuul00000000000000--- features: - | vSphere 6.7 added support for vStorageObject snapshots. The VMwareVStorageObjectDriver is updated to use VStorageObject snapshots for volume snapshots. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_fcd_storage_policy-636d6a95f1c44b6e.yaml0000664000175000017500000000052200000000000027237 0ustar00zuulzuul00000000000000--- features: - | Added vSphere storage policy support in VMwareVStorageObjectDriver. The storage policies that must be associated with the volumes can be specified using volume type extra-spec key 'vmware:storage_profile' similar to VMware VMDK driver. The vSphere version must be 6.7 or above to use this feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_lazy_create-52f52f71105d2067.yaml0000664000175000017500000000026700000000000025454 0ustar00zuulzuul00000000000000--- features: - | VMware VMDK driver now supports a config option ``vmware_lazy_create`` to disable the default behavior of lazy creation of raw volumes in the backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_retype_adapter_type-dbd8935b8d3bcb1b.yaml0000664000175000017500000000026400000000000027663 0ustar00zuulzuul00000000000000--- features: - | VMware VMDK driver now supports changing adpater type using retype. To change the adapter type, set ``vmware:adapter_type`` in the new volume type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_vmdk_datastore_regex-fe7b68ad69ef7384.yaml0000664000175000017500000000035200000000000027702 0ustar00zuulzuul00000000000000--- features: - | VMware VMDK driver and FCD driver now support a config option ``vmware_datastore_regex`` to specify the regular expression pattern to match the name of datastores where backend volumes are created. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_vmdk_default_adapter_type-8e247bce5b229c7a.yaml0000664000175000017500000000024400000000000030663 0ustar00zuulzuul00000000000000--- features: - Added config option ``vmware_adapter_type`` for the VMware VMDK driver to specify the default adapter type for volumes in vCenter server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_vmdk_enforce_vc_55-7e1b3ede9bf2129b.yaml0000664000175000017500000000012500000000000027174 0ustar00zuulzuul00000000000000--- upgrade: - The VMware VMDK driver now enforces minimum vCenter version of 5.5. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_vmdk_managed_by-3de05504d0f9a65a.yaml0000664000175000017500000000021500000000000026471 0ustar00zuulzuul00000000000000--- features: - | The volumes created by VMware VMDK driver will be displayed as "managed by OpenStack Cinder" in vCenter server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_vmdk_nfs41-450908bbbc9eea6d.yaml0000664000175000017500000000016000000000000025504 0ustar00zuulzuul00000000000000--- features: - | VMware VMDK driver and FCD driver now support NFS 4.1 datastores in vCenter server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_vmdk_paravirtual-3d5eeef96dcbcfb7.yaml0000664000175000017500000000016300000000000027340 0ustar00zuulzuul00000000000000--- fixes: - Added support for images with ``vmware_adaptertype`` set to ``paraVirtual`` in the VMDK driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vmware_vmdk_storage_profile_config-fa3784f1ed50df9e.yaml0000664000175000017500000000027200000000000031277 0ustar00zuulzuul00000000000000--- features: - | VMware VMDK driver now supports a config option ``vmware_storage_profile`` to specify a list with names of storage profiles to be monitored for capacity. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-add-async-migrate-option-0734164feeaecadc.yaml0000664000175000017500000000032600000000000027634 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC VNX Driver: Fix `bug 1796825 `__, adding an option named `vnx_async_migrate` to accept the default setting for async migration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-add-force-detach-support-26f215e6f70cc03b.yaml0000664000175000017500000000012400000000000027374 0ustar00zuulzuul00000000000000--- features: - | Add support to force detach a volume from all hosts on VNX. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-async-migration-support-3c449139bb264004.yaml0000664000175000017500000000065200000000000027213 0ustar00zuulzuul00000000000000--- features: - VNX cinder driver now supports async migration during volume cloning. By default, the cloned volume will be available after the migration starts in the VNX instead of waiting for the completion of migration. This greatly accelerates the cloning process. If user wants to disable this, he could add ``--metadata async_migrate=False`` when creating volume from source volume/snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-configurable-migration-rate-5e0a2235777c314f.yaml0000664000175000017500000000011000000000000030027 0ustar00zuulzuul00000000000000--- features: - Configrable migration rate in VNX driver via metadata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-fail-delete-lun-due-to-tmp-snapshot-edd3cdd85e28be60.yaml0000664000175000017500000000062000000000000031641 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC VNX Cinder Driver: Fixes `bug 1794646 `__ to delete the LUN from the VNX storage. Because a temporary snapshot is created from the LUN during creating a volume from a snapshot and isn't deleted, the LUN cannot be deleted before its snapshot is deleted. The fix makes sure the temp snapshot is deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml0000664000175000017500000000117000000000000024775 0ustar00zuulzuul00000000000000--- features: - New Cinder driver based on storops library (available in pypi) for EMC VNX. upgrade: - For EMC VNX backends, please upgrade to use ``cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver``. Add config option ``storage_protocol = fc`` or ``storage_protocol = iscsi`` to the driver section to enable the FC or iSCSI driver respectively. deprecations: - Old VNX FC (``cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver``)/ iSCSI (``cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver``) drivers are deprecated. Please refer to upgrade section for information about the new driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-perf-optimize-bd55dc3ef7584228.yaml0000664000175000017500000000013000000000000025417 0ustar00zuulzuul00000000000000--- other: - "Dell EMC VNX driver: Enhances the performance of create/delete volume." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-qos-support-7057196782e2c388.yaml0000664000175000017500000000007200000000000024656 0ustar00zuulzuul00000000000000--- features: - Adds QoS support for VNX Cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-replication-group-2ebf04c80e2171f7.yaml0000664000175000017500000000012100000000000026252 0ustar00zuulzuul00000000000000--- features: - Add consistent replication group support in VNX cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-replication-v2.1-4d89935547183cc9.yaml0000664000175000017500000000010600000000000025421 0ustar00zuulzuul00000000000000--- features: - Adds v2.1 replication support in VNX Cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-repv2.1-config-update-cc2f60c20aec88dd.yaml0000664000175000017500000000055700000000000026763 0ustar00zuulzuul00000000000000--- upgrade: - In VNX Cinder driver, ``replication_device`` keys, ``backend_id`` and ``san_ip`` are mandatory now. If you prefer security file authentication, please append ``storage_vnx_security_file_dir`` in ``replication_device``, otherwise, append ``san_login``, ``san_password``, ``storage_vnx_authentication_type`` in ``replication_device``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-revert-to-snapshot-e5494b6fb5ad5a1e.yaml0000664000175000017500000000015100000000000026544 0ustar00zuulzuul00000000000000--- features: - | Added support to revert a volume to a snapshot with the Dell EMC VNX driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx-update-sg-in-cache-3ecb673727bea79b.yaml0000664000175000017500000000036400000000000026252 0ustar00zuulzuul00000000000000--- fixes: - | Dell EMC VNX Driver: Fixes `bug 1817385 `__ to make sure the sg can be created again after it was destroyed under `destroy_empty_storage_group` setting to `True`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/vnx_clone_cg-db74ee2ea71bedcb.yaml0000664000175000017500000000012000000000000025021 0ustar00zuulzuul00000000000000--- features: - Cloning of consistency group added to EMC VNX backend driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315527.0 cinder-27.0.0/releasenotes/notes/volume-filtering-for-quoted-display-name-7f5e8ac888a73001.yaml0000664000175000017500000000020200000000000031666 0ustar00zuulzuul00000000000000--- fixes: - Filtering volumes by their display name now correctly handles display names with single and double quotes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/volume-migrate-create-delete-notification-f567cae5522852ec.yaml0000664000175000017500000000024000000000000032135 0ustar00zuulzuul00000000000000--- fixes: - | `Bug #1922920 `_: Don't do volume usage notifications for migration temporary volumes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/volume-type-encryption-api-policy-base-4334ca94d73df238.yaml0000664000175000017500000000157500000000000031377 0ustar00zuulzuul00000000000000--- upgrade: - | The ``volume_extension:volume_type_encryption`` policy, which was deprecated in Stein, has been un-deprecated for the convenience of operators who would like to set the policies for the create, get, update, and delete operations for a volume type's encryption type in one place. The default value for this policy target has not changed. As a reminder, the finer-grained policies are: - ``volume_extension:volume_type_encryption:create`` - ``volume_extension:volume_type_encryption:get`` - ``volume_extension:volume_type_encryption:update`` - ``volume_extension:volume_type_encryption:delete`` The default values for these policies have also not changed. See the `sample Cinder policy file `_ for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/volume-type-encryption-api-policy-granularity-7071e45f4c7894c5.yaml0000664000175000017500000000116200000000000032750 0ustar00zuulzuul00000000000000--- upgrade: - | Add granularity to the ``volume_extension:volume_type_encryption`` policy with the addition of distinct actions for create, get, update, and delete: - ``volume_extension:volume_type_encryption:create`` - ``volume_extension:volume_type_encryption:get`` - ``volume_extension:volume_type_encryption:update`` - ``volume_extension:volume_type_encryption:delete`` To address backwards compatibility, the new rules added to the volume_type.py policy file, default to the existing rule, ``volume_extension:volume_type_encryption``, if it is set to a non-default value. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/volume_init_max_objects_retrieval-966f607c46190946.yaml0000664000175000017500000000062000000000000030516 0ustar00zuulzuul00000000000000--- upgrade: - Volume Manager now uses the configuration option ``init_host_max_objects`` retrieval to set max number of volumes and snapshots to be retrieved per batch during volume manager host initialization. Query results will be obtained in batches from the database and not in one shot to avoid extreme memory usage. Default value is 0 and disables this functionality.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/volumes-summary-6b2485f339c88a91.yaml0000664000175000017500000000022500000000000025053 0ustar00zuulzuul00000000000000--- features: - A new API to display the volumes summary. This summary API displays the total number of volumes and total volume's size in GB. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/vrts_hyperscale_driver-5b63ab706ea8ae89.yaml0000664000175000017500000000011600000000000026664 0ustar00zuulzuul00000000000000--- features: - Added volume backend driver for Veritas HyperScale storage. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/vxflexos-3.5.x-support-403427dc65a7a4f6.yaml0000664000175000017500000000010600000000000026066 0ustar00zuulzuul00000000000000--- features: - | VxFlex OS driver now supports VxFlex OS 3.5.x.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/vxflexos-migration-support-a04a73cda323b382.yaml0000664000175000017500000000013100000000000027342 0ustar00zuulzuul00000000000000--- features: - | VxFlex OS driver now supports storage-assisted volume migration. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/vxflexos-powerflex-rebrand-37dfe2b82d35b6a2.yaml0000664000175000017500000000256700000000000027403 0ustar00zuulzuul00000000000000--- upgrade: - | Dell EMC VxFlex OS has been rebranded to PowerFlex. The driver ``cinder.volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver`` has been renamed to ``cinder.volume.drivers.dell_emc.powerflex.driver.PowerFlexDriver``. Although in this release the volume manager will recognize the old driver name, that functionality will be removed in the Wallaby release, and thus we recommend that you update the driver name in ``cinder.conf`` at your earliest convenience. Existing vxFlex OS configuration options, whose usage was DEPRECATED in the Stein release, will no longer be recognized in this release. Thus all driver configuration options that start with ``vxflexos`` must be updated to ``powerflex`` in your ``cinder.conf`` before you deploy this release. Before the Rocky release, this driver was named ``cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver``. That name was deprecated in the Rocky release. In this release, the pre-Rocky name for this driver is no longer recognized and support for configuration options beginning with ``sio`` has been removed. Thus any driver configuration options that start with ``sio`` must be updated to ``powerflex`` in your ``cinder.conf`` before you deploy this release. The online documentation has been updated to reflect these changes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/vxflexos-replication-support-f43e62df35e16e3a.yaml0000664000175000017500000000020300000000000027764 0ustar00zuulzuul00000000000000--- features: - | VxFlex OS driver now supports OpenStack volume replication v2.1 for VxFlex OS v3.5.0 storage backends. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/vxflexos-revert-to-snapshot-a90c40ec476cc2bd.yaml0000664000175000017500000000014200000000000027576 0ustar00zuulzuul00000000000000--- features: - | VxFlex OS driver now supports storage-assisted revert volume to snapshot. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/vxflexos-support-compression-9139e556677ac093.yaml0000664000175000017500000000023400000000000027536 0ustar00zuulzuul00000000000000--- features: - | VxFlex OS driver now supports VxFlex OS 3.0 features: storage pools with fine granularity layout, volume compression(SPEF). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/vxflexos_drop_deprecated_opt-3231a222e458fa92.yaml0000664000175000017500000000073300000000000027610 0ustar00zuulzuul00000000000000--- upgrade: - | VxFlex OS (ScaleIO) driver drops support for options, which were marked as deprecated in Pike release. Remove config options: ``sio_protection_domain_id``, ``sio_protection_domain_name``, ``sio_storage_pool_name``, ``sio_storage_pool_id``. Remove volume type options: ``sio:sp_name``, ``sio:sp_id``, ``sio:pd_name``, ``sio:pd_id``, ``sio:provisioning_type``, ``sio:iops_limit``, ``sio:bandwidth_limit``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/vzstorage-log-path-7539342e562a2e4a.yaml0000664000175000017500000000052700000000000025412 0ustar00zuulzuul00000000000000--- features: - | Logging path can now be configured for vzstorage driver in shares config file (specified by vzstorage_shares_config option). To set custom logging path add `'-l', ''` to mount options array. Otherwise default logging path `/var/log/vstorage//cinder.log.gz` will be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/vzstorage-volume-format-cde85d3ad02f6bb4.yaml0000664000175000017500000000033200000000000027041 0ustar00zuulzuul00000000000000--- features: - | VzStorage volume driver now supports choosing desired volume format by setting vendor property 'vz:volume_format' in volume type metadata. Allowed values are 'ploop', 'qcow2' and 'raw'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/win-iscsi-config-portals-51895294228d7883.yaml0000664000175000017500000000043100000000000026306 0ustar00zuulzuul00000000000000--- features: - | The Windows iSCSI driver now returns multiple portals when available and multipath is requested. fixes: - | The Windows iSCSI driver now honors the configured iSCSI addresses, ensuring that only those addresses will be used for iSCSI traffic. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/windows-multiple-backends-9aa83631ad3d42f2.yaml0000664000175000017500000000024000000000000027072 0ustar00zuulzuul00000000000000--- fixes: - | Multiple backends may now be enabled within the same Cinder Volume service on Windows by using the ``enabled_backends`` config option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/windows-volume-backup-b328858a20f5a499.yaml0000664000175000017500000000055600000000000026132 0ustar00zuulzuul00000000000000--- features: - | The Cinder Volume Backup service can now be run on Windows. It supports backing up volumes exposed by SMBFS/iSCSI Windows Cinder Volume backends, as well as any other Cinder backend that's accessible on Windows (e.g. SANs exposing volumes via iSCSI/FC). The Swift and Posix backup drivers are known to be working on Windows. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xena-policy-changes-7a563020337f6be9.yaml0000664000175000017500000001267400000000000025522 0ustar00zuulzuul00000000000000--- features: - | **Policy configuration changes** Over the Xena and Yoga development cycles, cinder's default policy configuration is being modified to take advantage of the default authentication and authorization apparatus supplied by the Keystone project. This will give operators a rich set of default policies to control how users interact with the Block Storage service API. The details of this project are described in `Policy Personas and Permissions `_ in the `Cinder Service Configuration Guide`. We encourage you to read through that document. The following is only a summary. The primary change in the Xena release is that cinder's default policy configuration will recognize the ``reader`` role on a project. Additionally, * Some current rules defined in the policy file are being DEPRECATED and will be removed in the Yoga release. You only need to worry about this if you have used any of these rules yourself in when writing custom policies, as you cannot rely on the following rules being pre-defined in the Yoga release. * ``rule:admin_or_owner`` * ``rule:system_or_domain_or_project_admin`` * ``rule:volume_extension:volume_type_encryption`` * Some current policies that were over-general (that is, they governed both read and write operations on a resource) are being replaced by a set of new policies that provide greater granularity. The following policies are DEPRECATED and will be removed in the Yoga release: * ``group:group_types_manage`` is replaced by: * ``group:group_types:create`` * ``group:group_types:update`` * ``group:group_types:delete`` * ``group:group_types_specs`` is replaced by: * ``group:group_types_specs:get`` * ``group:group_types_specs:get_all`` * ``group:group_types:create`` * ``group:group_types:update`` * ``group:group_types:delete`` * ``volume_extension:quota_classes`` is replaced by: * ``volume_extension:quota_classes:get`` * ``volume_extension:quota_classes:update`` * ``volume_extension:types_manage`` is replaced by: * ``volume_extension:type_create`` * ``volume_extension:type_update`` * ``volume_extension:type_delete`` * ``volume_extension:volume_image_metadata`` is replaced by: * ``volume_extension:volume_image_metadata:show`` * ``volume_extension:volume_image_metadata:set`` * ``volume_extension:volume_image_metadata:remove`` * A new policy was introduced to govern an operation previously controlled by a policy that is not being removed, but whose other governed actions are conceptually different: * ``volume_extension:volume_type_access:get_all_for_type`` * A new policy was introduced as part of the feature described in the `User visible extra specs `_ section of the `Cinder Administration Guide`: * ``volume_extension:types_extra_specs:read_sensitive`` * Many policies had their default values changed and their previous values deprecated. These are indicated in the sample policy configuration file, which you can view in the `policy.yaml `_ section of the `Cinder Service Configuration Guide`. * In particular, we direct your attention to the default values for the policies associated with the Default Volume Types API (introduced with microversion 3.62 of the Block Storage API). These had experimentally recognized "scope", but for consistency with the other rules, their default values no longer recognize scope. (Scope will be introduced to all cinder policy defaults in the Yoga release.) * When a policy value is deprecated, the oslo.policy engine will check the new value, and if that fails, it will evaluate the deprecated value. This behavior may be modified so *only* the new policy value is used by setting the configuration option ``enforce_new_defaults=True`` in the ``[oslo_policy]`` section of the cinder configuration file. deprecations: - | The following policy rules have been DEPRECATED in this release and will be removed in Yoga: * ``rule:admin_or_owner`` * ``rule:system_or_domain_or_project_admin`` * ``rule:volume_extension:volume_type_encryption`` For more information, see the 'New Features' section of this document and `Policy Personas and Permissions `_ in the `Cinder Service Configuration Guide`. - | The following policies have been DEPRECATED in this release and will be removed in Yoga: * ``group:group_types_manage`` * ``group:group_types_specs`` * ``volume_extension:quota_classes`` * ``volume_extension:types_manage`` * ``volume_extension:volume_image_metadata`` For more information, see the 'New Features' section of this document and `Policy Personas and Permissions `_ in the `Cinder Service Configuration Guide`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xena-release-prelude-2190d8c515dbedc1.yaml0000664000175000017500000000256600000000000026102 0ustar00zuulzuul00000000000000--- prelude: | Welcome to the Xena release of the OpenStack Block Storage service (cinder). With this release, the Block Storage API version 3 has reached microversion **3.66**. The cinder team would like to bring the following points to your attention. Details may be found below. * The Block Storage API v2, which was deprecated way back in the Pike release, has been removed. We gently remind you that Pike was a long time ago, and that version 3.0 of the Block Storage API was designed to be completely compatible with version 2. * Microversion 3.65 includes the display of information in the volume or snapshot detail response to indicate whether that resource consumes quota, and adds the ability to filter a requested list of resources according to whether they consume quota or not. * Microversion 3.66 removes the necessity to add a 'force' flag when requesting a snapshot of an in-use volume, given that this is not a problem for modern storage systems. * The volume-type detail response has been enhanced to include non-sensitive "extra-specs" information in order to provide more data for automated systems to select a volume type. * The default policy configuration has been extensively rewritten. * Many backend storage drivers have added features and fixed bugs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xiv-ds8k-replication-2.1-996c871391152e31.yaml0000664000175000017500000000012100000000000025776 0ustar00zuulzuul00000000000000--- features: - Added replication v2.1 support to the IBM XIV/DS8K driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xiv-generic-volume-group-4609cdc86d6aaf81.yaml0000664000175000017500000000021000000000000026747 0ustar00zuulzuul00000000000000--- features: - Add consistent group capability to generic volume groups in XIV, Spectrum Accelerate and A9000/R storage systems. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xiv-new-qos-independent-type-58885c77efe24798.yaml0000664000175000017500000000047600000000000027370 0ustar00zuulzuul00000000000000--- features: - Added independent and shared types for qos classes in XIV & A9000. Shared type enables to share bandwidth and IO rates between volumes of the same class. Independent type gives each volume the same bandwidth and IO rates without being affected by other volumes in the same qos class. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xiv-replication-group-7ca437c90f2474a7.yaml0000664000175000017500000000013600000000000026204 0ustar00zuulzuul00000000000000--- features: - | Add consistency group replication support in XIV\A9000 Cinder driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xtremio-cg-from-cg-e05cf286e3a1e943.yaml0000664000175000017500000000014200000000000025416 0ustar00zuulzuul00000000000000--- features: - Support for creating a consistency group from consistency group in XtremIO. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xtremio-generic-groups-912e11525573e970.yaml0000664000175000017500000000014200000000000026127 0ustar00zuulzuul00000000000000--- features: - Add consistent group capability to generic volume groups in the XtremIO driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xtremio-ig-cleanup-bbb4bee1f1e3611c.yaml0000664000175000017500000000037300000000000025726 0ustar00zuulzuul00000000000000--- features: - | Added new option to delete XtremIO initiator groups after the last volume was detached from them. Cleanup can be enabled by setting ``xtremio_clean_unused_ig`` to ``True`` under the backend settings in cinder.conf. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xtremio-manage-snapshot-5737d3ad37df81d1.yaml0000664000175000017500000000012400000000000026564 0ustar00zuulzuul00000000000000--- features: - Added snapshot manage/unmanage support to the EMC XtremIO driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/xtremio-support-multiattache-20b1882a1216a8b2.yaml0000664000175000017500000000011100000000000027504 0ustar00zuulzuul00000000000000--- features: - Dell EMC XtremIO driver has added multiattach support. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/yoga-release-prelude-9fc369cf00df1a48.yaml0000664000175000017500000000223600000000000026114 0ustar00zuulzuul00000000000000--- prelude: | Welcome to the Yoga release of the OpenStack Block Storage service (cinder). With this release, the Block Storage API version 3 has reached microversion **3.68**. The cinder team would like to bring the following points to your attention. Details may be found throughout this document. * Microversion 3.67 is introduced as a marker to indicate that any instance of the Block Storage API 3.67 or greater treats a project_id in the URL as optional. This change is backward compatible: the API can handle legacy URLs containing a project_id as well as URLs without a project_id. This is the case regardless of what microversion specified in a request. See the "New Features" section for details. * Microversion 3.68 introduces a new volume action, ``os-reimage``, that allows a user to replace the current content of a specified volume with the data of a specified image supplied by the Image service (glance). See the "New Features" section for details. * Some new backend storage drivers have been added, and many current drivers have added features and fixed bugs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/zfssa-iscsi-get-manageable-volumes-eb23a11570c813d7.yaml0000664000175000017500000000013300000000000030470 0ustar00zuulzuul00000000000000--- features: - Oracle ZFSSA iSCSI volume driver implements ``get_manageable_volumes()`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/zfssa-iscsi-multi-connect-3be99ee84660a280.yaml0000664000175000017500000000036200000000000026760 0ustar00zuulzuul00000000000000--- fixes: - Oracle ZFSSA iSCSI - allows a volume to be connected to more than one connector at the same time, which is required for live-migration to work. ZFSSA software release 2013.1.3.x (or newer) is required for this to work. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/zfssa-volume-manage-unmanage-ccd80807103b69c8.yaml0000664000175000017500000000013100000000000027377 0ustar00zuulzuul00000000000000--- features: - Volume manage/unmanage support for Oracle ZFSSA iSCSI and NFS drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/notes/zte_cinder_driver-76ba6d034e1b6f65.yaml0000664000175000017500000000007700000000000025516 0ustar00zuulzuul00000000000000--- features: - Added backend driver for ZTE iSCSI storage. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8191252 cinder-27.0.0/releasenotes/source/0000775000175000017500000000000000000000000017060 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/2023.1.rst0000664000175000017500000000017700000000000020344 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: 2023.1-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000020332 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000020332 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/2024.2.rst0000664000175000017500000000020200000000000020333 0ustar00zuulzuul00000000000000=========================== 2024.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/2025.1.rst0000664000175000017500000000020200000000000020333 0ustar00zuulzuul00000000000000=========================== 2025.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2025.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/README.txt0000664000175000017500000000340100000000000020554 0ustar00zuulzuul00000000000000========================================= Important Notes Regarding Closed Branches ========================================= This README applies to release notes for branches that are closed. This includes End of Life, Unmaintained, and Extended Maintenance branches. The list of series, and their stable status, can be found here: https://releases.openstack.org/ Once a stable series reaches Extended Maintenance, no new official releases will be performed for that series. For this reason, and to save a significant amount of time in gate jobs that build release notes, EOL branch release notes are made static. Said another way, reno is no longer used to dynamically generate the release notes for that branch as they are not expected to change often. Branches in Extended Maintenance will not be released, but they can still accept backports of bugfixes. We may want to include release notes for these fixes, even if they will not be included in an official release. In this case, in addition to backporting the release note, you will need to manually refresh the static page so those new notes will show up under a development release version in the generated output. To regenerate the static landing pages in this case, run the following commands from the root of the openstack/cinder repo:: tox -e releasenotes --notest .tox/releasenotes/bin/reno report \ --title "$SERIES Series Release Notes" \ --branch "stable/$series" | \ sed 's/^ *$//g' > "releasenotes/source/$series.rst" In this example, ``$SERIES`` would be the title-cased series name (i.e. Rocky), and $series would be the series name in lower case (i.e. rocky). This should replace the static release note page. That page should then be added to the commit and included as part of the review. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8191252 cinder-27.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000020506 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000022757 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8191252 cinder-27.0.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000021215 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000023466 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/conf.py0000664000175000017500000002066100000000000020364 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. openstackdocs_repo_name = 'openstack/cinder' openstackdocs_bug_project = 'cinder' openstackdocs_bug_tag = 'doc' project = u'Cinder Release Notes' openstackdocs_auto_name = False copyright = u'2015, Cinder Developers' # Release notes are version independent, no need to set version and release release = '' version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'CinderReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'CinderReleaseNotes.tex', u'Cinder Release Notes Documentation', u'Cinder Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'cinderreleasenotes', u'Cinder Release Notes Documentation', [u'Cinder Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'CinderReleaseNotes', u'Cinder Release Notes Documentation', u'Cinder Developers', 'CinderReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/index.rst0000664000175000017500000000045300000000000020723 0ustar00zuulzuul00000000000000==================== Cinder Release Notes ==================== .. toctree:: :maxdepth: 1 unreleased 2025.1 2024.2 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/liberty.rst0000664000175000017500000001136600000000000021273 0ustar00zuulzuul00000000000000============================ Liberty Series Release Notes ============================ .. _Liberty Series Release Notes_7.0.3_stable_liberty: 7.0.3 ===== .. _Liberty Series Release Notes_7.0.3_stable_liberty_Security Issues: Security Issues --------------- .. releasenotes/notes/apply-limits-to-qemu-img-29f722a1bf4b91f8.yaml @ b'455b318ced717fb38dfe40014817d78fbc47dea5' - The qemu-img tool now has resource limits applied which prevent it from using more than 1GB of address space or more than 2 seconds of CPU time. This provides protection against denial of service attacks from maliciously crafted or corrupted disk images. .. _Liberty Series Release Notes_7.0.2_stable_liberty: 7.0.2 ===== .. _Liberty Series Release Notes_7.0.2_stable_liberty_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/glance_v2_upload-939c5693bcc25483.yaml @ b'01555a940d0f84d2fbc98cd10905ca6aabe00c48' - upload-to-image using Image API v2 now correctly handles custom image properties. .. _Liberty Series Release Notes_7.0.1_stable_liberty: 7.0.1 ===== .. _Liberty Series Release Notes_7.0.1_stable_liberty_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/a7401ead26a7c83b-keystone-url.yaml @ b'fa7d0916d849e9c6f93e08f8323eb2a886bcffc0' - Cinder will now correctly read Keystone's endpoint for quota calls from keystone_authtoken.auth_uri instead of keymgr.encryption_auth_url config option. .. releasenotes/notes/attach-failure-cleanup-c900497fce31410b.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - If device attachment failed it could leave the volume partially attached. Cinder now tries to clean up on failure. .. releasenotes/notes/dell-sc-cgsnapshot-delete-7322950f925912c8.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Fixed an issue when deleting a consistency group snapshot with the Dell SC backend driver. .. releasenotes/notes/emc-scaleio-extend-volume-d7ecdb26f6e65825.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - ScaleIO volumes need to be sized in increments of 8G. Handling added to volume extend operations to ensure the new size is rounded up to the nearest size when needed. .. releasenotes/notes/emc-scaleio-migration-44d554bb46158db2.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Fixed issue with the EMC ScaleIO driver not able to identify a volume after a migration is performed. .. releasenotes/notes/emc-scaleio-provisioning-type-f7542d50f62acecc.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - An error has been corrected in the EMC ScaleIO driver that had caused all volumes to be provisioned at 'thick' even if user had specificed 'thin'. .. releasenotes/notes/emc-vmax-live-migration-bf960f4802979cae.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Fixed an issue with live migration when using the EMC VMAX driver. .. releasenotes/notes/emc-vmax-multiportgroup-7352386d5ffd3075.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Removed restriction of hard coded iSCSI IP address to allow the use of multiple iSCSI portgroups. .. releasenotes/notes/fix-keystone-quota-url-2018f32e80ed9fb5.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Fixed an error in quota handling that required the keystone encryption_auth_url to be configured even if no encryption was being used. .. releasenotes/notes/hnas-manage-spaces-eb1d05447536bf87.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Allow spaces when managing existing volumes with the HNAS iSCSI driver. .. releasenotes/notes/huawei-capacity-reporting-4f75ce622e57c28a.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Capacity reporting fixed with Huawei backend drivers. .. releasenotes/notes/lio-caseinsensitive-iqn-2324f7729d24a792.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - IQN identification is now case-insensitive when using LIO. .. releasenotes/notes/netapp-volume-create-cleanup-c738114e42de1e69.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Better cleanup handling in the NetApp E-Series driver. .. releasenotes/notes/nimble-clone-extraspecs-27e2660f58b84f67.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Fixed issue with extra-specs not being applied when cloning a volume. .. releasenotes/notes/nimble-multi-initiator-8a3a58414c33f032.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Add ability to enable multi-initiator support to allow live migration in the Nimble backend driver. .. releasenotes/notes/subproject-quota-delete-3a22da070b578f8b.yaml @ b'c529bddcde41a0e70d1a20d4ead9c402e6c94d16' - Fixed issue with error being raised when performing a delete quota operation in a subproject. .. _Liberty Series Release Notes_7.0.1_stable_liberty_Other Notes: Other Notes ----------- .. releasenotes/notes/e99b24461613b6c8-start-using-reno.yaml @ b'62a79955eac7d1f247bea1ca479febb8b36349bc' - Start using reno to manage release notes. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8951166 cinder-27.0.0/releasenotes/source/locale/0000775000175000017500000000000000000000000020317 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8951166 cinder-27.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000021271 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8191252 cinder-27.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000023056 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000277556000000000000026135 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2021. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata # Andi Chandler , 2024. #zanata msgid "" msgstr "" "Project-Id-Version: Cinder Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2025-07-07 22:33+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2024-12-04 03:04+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "" "\"volume_extension:types_extra_specs:create\": \"rule:admin or rule:" "type_admin\", \"volume_extension:types_extra_specs:delete\": \"rule:admin or " "rule:type_admin\", \"volume_extension:types_extra_specs:index\": \"\", " "\"volume_extension:types_extra_specs:show\": \"rule:admin or rule:type_admin " "or rule:type_viewer\", \"volume_extension:types_extra_specs:update\": \"rule:" "admin or rule:type_admin\"" msgstr "" "\"volume_extension:types_extra_specs:create\": \"rule:admin or rule:" "type_admin\", \"volume_extension:types_extra_specs:delete\": \"rule:admin or " "rule:type_admin\", \"volume_extension:types_extra_specs:index\": \"\", " "\"volume_extension:types_extra_specs:show\": \"rule:admin or rule:type_admin " "or rule:type_viewer\", \"volume_extension:types_extra_specs:update\": \"rule:" "admin or rule:type_admin\"" msgid "'backup' for backup status resets" msgstr "'backup' for backup status resets" msgid "'backupStatusUpdate' for backup status resets" msgstr "'backupStatusUpdate' for backup status resets" msgid "'snapshot' for snapshot status resets" msgstr "'snapshot' for snapshot status resets" msgid "'volume' for volume status resets" msgstr "'volume' for volume status resets" msgid "'volumeStatusUpdate' for snapshot status resets" msgstr "'volumeStatusUpdate' for snapshot status resets" msgid "'volumeStatusUpdate' for volume status resets" msgstr "'volumeStatusUpdate' for volume status resets" msgid "" "(Note that whether a volume should be considered \"full\", even if it " "doesn't contain exactly *n* GiB of data for an *n* GiB volume, can depend " "upon the storage backend technology used.)" msgstr "" "(Note that whether a volume should be considered \"full\", even if it " "doesn't contain exactly *n* GiB of data for an *n* GiB volume, can depend " "upon the storage backend technology used.)" msgid "**Anomalies with encrypted volumes**" msgstr "**Anomalies with encrypted volumes**" msgid "**Cinder use of cgroups v1**" msgstr "**Cinder use of cgroups v1**" msgid "**NFS-based backend drivers and qcow2 version 2 images**" msgstr "**NFS-based backend drivers and qcow2 version 2 images**" msgid "**NVMe-oF issues**" msgstr "**NVMe-oF issues**" msgid "**Policy configuration changes**" msgstr "**Policy configuration changes**" msgid "**RBD driver: Enable Ceph V2 Clone API and Ceph Trash auto purge**" msgstr "**RBD driver: Enable Ceph V2 Clone API and Ceph Trash auto purge**" msgid "**Supported Ceph versions**" msgstr "**Supported Ceph versions**" msgid "10.0.0" msgstr "10.0.0" msgid "10.0.1" msgstr "10.0.1" msgid "10.0.3" msgstr "10.0.3" msgid "10.0.4" msgstr "10.0.4" msgid "10.0.5" msgstr "10.0.5" msgid "10.0.7" msgstr "10.0.7" msgid "10.0.8" msgstr "10.0.8" msgid "10.0.8-20" msgstr "10.0.8-20" msgid "11.0.0" msgstr "11.0.0" msgid "11.0.1" msgstr "11.0.1" msgid "11.0.2" msgstr "11.0.2" msgid "11.1.1" msgstr "11.1.1" msgid "11.2.0" msgstr "11.2.0" msgid "11.2.1" msgstr "11.2.1" msgid "11.2.2" msgstr "11.2.2" msgid "11.2.2-15" msgstr "11.2.2-15" msgid "12.0.0" msgstr "12.0.0" msgid "12.0.1" msgstr "12.0.1" msgid "12.0.10" msgstr "12.0.10" msgid "12.0.10-10" msgstr "12.0.10-10" msgid "12.0.2" msgstr "12.0.2" msgid "12.0.3" msgstr "12.0.3" msgid "12.0.4" msgstr "12.0.4" msgid "12.0.5" msgstr "12.0.5" msgid "12.0.6" msgstr "12.0.6" msgid "12.0.7" msgstr "12.0.7" msgid "12.0.8" msgstr "12.0.8" msgid "128 volumes have already been created in the RAID Group." msgstr "128 volumes have already been created in the RAID Group." msgid "13.0.0" msgstr "13.0.0" msgid "13.0.1" msgstr "13.0.1" msgid "13.0.2" msgstr "13.0.2" msgid "13.0.3" msgstr "13.0.3" msgid "13.0.4" msgstr "13.0.4" msgid "13.0.6" msgstr "13.0.6" msgid "13.0.7" msgstr "13.0.7" msgid "13.0.8" msgstr "13.0.8" msgid "13.0.9" msgstr "13.0.9" msgid "13.0.9-7" msgstr "13.0.9-7" msgid "14.0.0" msgstr "14.0.0" msgid "14.0.1" msgstr "14.0.1" msgid "14.0.2" msgstr "14.0.2" msgid "14.0.3" msgstr "14.0.3" msgid "14.0.4" msgstr "14.0.4" msgid "14.1.0" msgstr "14.1.0" msgid "14.2.0" msgstr "14.2.0" msgid "14.2.1" msgstr "14.2.1" msgid "14.3.0" msgstr "14.3.0" msgid "14.3.1" msgstr "14.3.1" msgid "14.3.1-9" msgstr "14.3.1-9" msgid "15.0.0" msgstr "15.0.0" msgid "15.0.1" msgstr "15.0.1" msgid "15.1.0" msgstr "15.1.0" msgid "15.2.0" msgstr "15.2.0" msgid "15.3.0" msgstr "15.3.0" msgid "15.4.0" msgstr "15.4.0" msgid "15.4.1" msgstr "15.4.1" msgid "15.5.0" msgstr "15.5.0" msgid "15.6.0" msgstr "15.6.0" msgid "15.6.0-22" msgstr "15.6.0-22" msgid "16.0.0" msgstr "16.0.0" msgid "16.1.0" msgstr "16.1.0" msgid "16.2.0" msgstr "16.2.0" msgid "16.2.1" msgstr "16.2.1" msgid "16.3.0" msgstr "16.3.0" msgid "16.4.0" msgstr "16.4.0" msgid "16.4.1" msgstr "16.4.1" msgid "16.4.2" msgstr "16.4.2" msgid "16.4.2-17" msgstr "16.4.2-17" msgid "17.0.0" msgstr "17.0.0" msgid "17.0.1" msgstr "17.0.1" msgid "17.1.0" msgstr "17.1.0" msgid "17.2.0" msgstr "17.2.0" msgid "17.3.0" msgstr "17.3.0" msgid "17.4.0" msgstr "17.4.0" msgid "17.4.0-11" msgstr "17.4.0-11" msgid "18.0.0" msgstr "18.0.0" msgid "18.1.0" msgstr "18.1.0" msgid "18.2.0" msgstr "18.2.0" msgid "18.2.1" msgstr "18.2.1" msgid "18.2.1-12" msgstr "18.2.1-12" msgid "19.0.0" msgstr "19.0.0" msgid "19.1.0" msgstr "19.1.0" msgid "19.1.1" msgstr "19.1.1" msgid "19.2.0" msgstr "19.2.0" msgid "19.3.0" msgstr "19.3.0" msgid "19.3.0-2" msgstr "19.3.0-2" msgid "" "2. The FlexGroup pool has a different view of aggregate capabilites, " "changing them by a list of elements, instead of a single element. They are " "``netapp_aggregate``, ``netapp_raid_type``, ``netapp_disk_type`` and " "``netapp_hybrid_aggregate``. The ``netapp_aggregate_used_percent`` " "capability is an average of used percent of all FlexGroup's aggregates." msgstr "" "2. The FlexGroup pool has a different view of aggregate capabilities, " "changing them by a list of elements, instead of a single element. They are " "``netapp_aggregate``, ``netapp_raid_type``, ``netapp_disk_type`` and " "``netapp_hybrid_aggregate``. The ``netapp_aggregate_used_percent`` " "capability is an average of the used percentage of all FlexGroup's " "aggregates." msgid "20.0.0" msgstr "20.0.0" msgid "20.0.1" msgstr "20.0.1" msgid "20.1.0" msgstr "20.1.0" msgid "20.2.0" msgstr "20.2.0" msgid "20.3.0" msgstr "20.3.0" msgid "20.3.1" msgstr "20.3.1" msgid "20.3.2" msgstr "20.3.2" msgid "2023.1 Series Release Notes" msgstr "2023.1 Series Release Notes" msgid "2023.1-eom" msgstr "2023.1-eom" msgid "2023.2 Series Release Notes" msgstr "2023.2 Series Release Notes" msgid "2024.1 Series Release Notes" msgstr "2024.1 Series Release Notes" msgid "2024.2 Series Release Notes" msgstr "2024.2 Series Release Notes" msgid "21.0.0" msgstr "21.0.0" msgid "21.1.0" msgstr "21.1.0" msgid "21.2.0" msgstr "21.2.0" msgid "21.3.0" msgstr "21.3.0" msgid "21.3.1" msgstr "21.3.1" msgid "22.0.0" msgstr "22.0.0" msgid "22.1.0" msgstr "22.1.0" msgid "22.1.1" msgstr "22.1.1" msgid "22.1.2" msgstr "22.1.2" msgid "22.2.0" msgstr "22.2.0" msgid "23.0.0" msgstr "23.0.0" msgid "23.1.0" msgstr "23.1.0" msgid "23.2.0" msgstr "23.2.0" msgid "23.3.0" msgstr "23.3.0" msgid "24.0.0" msgstr "24.0.0" msgid "24.1.0" msgstr "24.1.0" msgid "24.2.0" msgstr "24.2.0" msgid "25.0.0" msgstr "25.0.0" msgid "" "3. The ``utilization`` capability is not calculated to FlexGroup pools, it " "is always set to default of 50." msgstr "" "3. The ``utilization`` capability is not calculated to FlexGroup pools, it " "is always set to the default of 50." msgid "" "3PAR driver creates FC VLUN of match-set type instead of host sees. With " "match-set, the host will see the virtual volume on specified NSP (Node-Slot-" "Port). This change in vlun type fixes bug 1577993." msgstr "" "3PAR driver creates FC VLUN of match-set type instead of host sees. With " "match-set, the host will see the virtual volume on specified NSP (Node-Slot-" "Port). This change in vlun type fixes bug 1577993." msgid "" "4. The driver cannot support consistency group with volumes that are over " "FlexGroup pools." msgstr "" "4. The driver cannot support consistency groups with volumes that are over " "FlexGroup pools." msgid "" "5. The driver cannot support multi-attach with volumes that are over " "FlexGroup pools." msgstr "" "5. The driver cannot support multi-attach with volumes that are over " "FlexGroup pools." msgid "" "6. For volumes over the FlexGroup pool, the operations of clone volume, " "create snapshot and create volume from an image are implemented as the NFS " "generic driver. Hence, it does not rely on the ONTAP storage to perform " "those operations." msgstr "" "6. For volumes over the FlexGroup pool, the operations of clone volume, " "create snapshot and create volume from an image are implemented as the NFS " "generic driver. Hence, it does not rely on the ONTAP storage to perform " "those operations." msgid "" "7. A driver with FlexGroup pools has snapshot support disabled by default. " "To enable, you must set ``nfs_snapshot_support`` to true in the backend's " "configuration section of the cinder configuration file." msgstr "" "7. A driver with FlexGroup pools has snapshot support disabled by default. " "To enable, you must set ``nfs_snapshot_support`` to true in the backend's " "configuration section of the cinder configuration file." msgid "7.0.1" msgstr "7.0.1" msgid "7.0.2" msgstr "7.0.2" msgid "7.0.3" msgstr "7.0.3" msgid "" "8. The driver image cache is not applied for volumes over FlexGroup pools. " "It can use the core image cache for avoiding downloading twice, though." msgstr "" "8. The driver image cache is not applied for volumes over FlexGroup pools. " "It can use the core image cache for avoiding downloading twice, though." msgid "8.0.0" msgstr "8.0.0" msgid "8.1.0" msgstr "8.1.0" msgid "8.1.1" msgstr "8.1.1" msgid "8.1.1-11" msgstr "8.1.1-11" msgid "" "9. Given that the FlexGroup pool may be on several cluster nodes, the QoS " "minimum support is only enabled if all nodes support it." msgstr "" "9. Given that the FlexGroup pool may be on several cluster nodes, the QoS " "minimum support is only enabled if all nodes support it." msgid "9.0.0" msgstr "9.0.0" msgid "9.1.0" msgstr "9.1.0" msgid "9.1.1" msgstr "9.1.1" msgid "9.1.2" msgstr "9.1.2" msgid "" "A bug in the Quobyte driver was fixed that prevented backing up volumes and " "snapshots" msgstr "" "A bug in the Quobyte driver was fixed that prevented backing up volumes and " "snapshots" msgid "" "A general framework to accommodate hardware compression accelerators for " "compression of volumes uploaded to the Image service (Glance) as images and " "decompression of compressed images used to create volumes is introduced." msgstr "" "A general framework to accommodate hardware compression accelerators for " "compression of volumes uploaded to the Image service (Glance) as images and " "decompression of compressed images used to create volumes is introduced." msgid "" "A key aspect of *supported* drivers is that there is a fully functioning " "third-party CI system that reports on all proposed changes to the cinder " "code. However, the fact that there are 'unsupported' drivers at all " "indicates that third-party CI systems are unfortunately not always fully " "functioning. You may consult the report at the following link to learn the " "extent to which a particular driver's CI system is reporting on the " "development branch of cinder:" msgstr "" "A key aspect of *supported* drivers is that there is a fully functioning " "third-party CI system that reports on all proposed changes to the Cinder " "code. However, the fact that there are 'unsupported' drivers at all " "indicates that third-party CI systems are unfortunately not always fully " "functioning. You may consult the report at the following link to learn the " "extent to which a particular driver's CI system is reporting on the " "development branch of Cinder:" msgid "" "A lock in the volume manager flow generally prevents this on normal clone " "volume operations, but this clone method in the driver is called for " "operations such as cloning from the cinder image-volume cache or cloning " "from a cinder backend used as a glance store." msgstr "" "A lock in the volume manager flow generally prevents this on normal clone " "volume operations, but this clone method in the driver is called for " "operations such as cloning from the Cinder image-volume cache or cloning " "from a Cinder backend used as a Glance store." msgid "" "A new API microversion V3.67 is introduced to inform clients when inclusion " "of a project_id in API URLs is optional. The V3.67 microversion is only used " "as an indicator that the API accepts a URL without a project_id, and this " "applies to all requests regardless of the microversion in the request. For " "example, an API node serving V3.67 or greater will accept a URL without a " "project_id even if the request asks for V3.0. Likewise, it will accept a URL " "containing a project_id even if the request asks for V3.67." msgstr "" "A new API microversion V3.67 is introduced to inform clients when inclusion " "of a project_id in API URLs is optional. The V3.67 microversion is only used " "as an indicator that the API accepts a URL without a project_id, and this " "applies to all requests regardless of the microversion in the request. For " "example, an API node serving V3.67 or greater will accept a URL without a " "project_id even if the request asks for V3.0. Likewise, it will accept a URL " "containing a project_id even if the request asks for V3.67." msgid "" "A new API to display the volumes summary. This summary API displays the " "total number of volumes and total volume's size in GB." msgstr "" "A new API to display the volumes summary. This summary API displays the " "total number of volumes and total volume's size in GB." msgid "" "A new check is added to the ``cinder-status upgrade check`` CLI to check for " "the configuration of CoprHD, HGST or ITRI DISCO drivers. These drivers were " "removed in the Stein release and should not be configured at the time of " "upgrade." msgstr "" "A new check is added to the ``cinder-status upgrade check`` CLI to check for " "the configuration of CoprHD, HGST or ITRI DISCO drivers. These drivers were " "removed in the Stein release and should not be configured at the time of " "upgrade." msgid "" "A new check is added to the ``cinder-status upgrade check`` CLI to check for " "the use of ``cinder.volume.drivers.windows.windows.WindowsDriver`` and a " "message is reported that the user needs to update the setting to ``cinder." "volume.drivers.windows.iscsi.WindowsISCSIDriver`` if it is encountered." msgstr "" "A new check is added to the ``cinder-status upgrade check`` CLI to check for " "the use of ``cinder.volume.drivers.windows.windows.WindowsDriver`` and a " "message is reported that the user needs to update the setting to ``cinder." "volume.drivers.windows.iscsi.WindowsISCSIDriver`` if it is encountered." msgid "" "A new check is added to the ``cinder-status upgrade check`` CLI to check for " "the use of backup driver module path instead of full driver class path in " "the ``backup_driver`` configuration setting." msgstr "" "A new check is added to the ``cinder-status upgrade check`` CLI to check for " "the use of backup driver module path instead of full driver class path in " "the ``backup_driver`` configuration setting." msgid "" "A new check is added to the ``cinder-status upgrade check`` CLI to check for " "the use of the deprecated ``cinder.quota.NestedDbQuotaDriver``. This driver " "will be replaced by a new, OpenStack-wide, nested quota management." msgstr "" "A new check is added to the ``cinder-status upgrade check`` CLI to check for " "the use of the deprecated ``cinder.quota.NestedDbQuotaDriver``. This driver " "will be replaced by a new, OpenStack-wide, nested quota management." msgid "" "A new cinder-manage command, reset_active_backend, was added to promote a " "failed-over backend participating in replication. This allows you to reset " "a backend without manually editing the database. A backend undergoing " "promotion using this command is expected to be in a disabled and frozen " "state. Support for both standalone and clustered backend configurations are " "supported." msgstr "" "A new cinder-manage command, reset_active_backend, was added to promote a " "failed-over backend participating in replication. This allows you to reset " "a backend without manually editing the database. A backend undergoing " "promotion using this command is expected to be in a disabled and frozen " "state. Support for both standalone and clustered backend configurations are " "supported." msgid "" "A new module, ``cinder.wsgi``, has been added as a place to gather WSGI " "``application`` objects. This is intended to ease deployment by providing a " "consistent location for these objects. For example, if using uWSGI then " "instead of:" msgstr "" "A new module, ``cinder.wsgi``, has been added as a place to gather WSGI " "``application`` objects. This is intended to ease deployment by providing a " "consistent location for these objects. For example, if using uWSGI then " "instead of:" msgid "" "A new policy was introduced as part of the feature described in the `User " "visible extra specs `_ section of the `Cinder " "Administration Guide`:" msgstr "" "A new policy was introduced as part of the feature described in the `User " "visible extra specs `_ section of the `Cinder " "Administration Guide`:" msgid "" "A new policy was introduced to govern an operation previously controlled by " "a policy that is not being removed, but whose other governed actions are " "conceptually different:" msgstr "" "A new policy was introduced to govern an operation previously controlled by " "a policy that is not being removed, but whose other governed actions are " "conceptually different:" msgid "" "A new target, NVMET, is added for the LVM driver over RDMA, it allows cinder " "to use nvmetcli in order to create/delete subsystems on attaching/detaching " "an LVM volume to/from an instance." msgstr "" "A new target, NVMET, is added for the LVM driver over RDMA, it allows Cinder " "to use nvmetcli in order to create/delete subsystems on attaching/detaching " "an LVM volume to/from an instance." msgid "" "A new target, spdk-nvmeof, is added for the SPDK driver over RDMA. It allows " "cinder to use SPDK target in order to create/delete subsystems on attaching/" "detaching an SPDK volume to/from an instance." msgstr "" "A new target, spdk-nvmeof, is added for the SPDK driver over RDMA. It allows " "Cinder to use SPDK target in order to create/delete subsystems on attaching/" "detaching an SPDK volume to/from an instance." msgid "" "A new volume driver, SPDK, is added for Storage Performance Development Kit " "NVMe-oF target handling, that allows Cinder to manage volumes in SPDK NVMe-" "oF driver." msgstr "" "A new volume driver, SPDK, is added for Storage Performance Development Kit " "NVMe-oF target handling, that allows Cinder to manage volumes in SPDK NVMe-" "oF driver." msgid "A new volume is created without a type" msgstr "A new volume is created without a type" msgid "" "A request to delete the currently configured ``default_volume_type`` will " "fail. (You can delete that volume-type, but you cannot do it while it is " "the value of the configuration option.)" msgstr "" "A request to delete the currently configured ``default_volume_type`` will " "fail. (You can delete that volume-type, but you cannot do it while it is " "the value of the configuration option.)" msgid "" "A similar situation can arise when a user creates a volume of an encrypted " "volume type from an image in Glance. If the image happens to be sized very " "close to the gibibyte boundary given by the requested volume size, the " "operation may fail if the image data plus the encryption metadata exceeds " "the requested volume size." msgstr "" "A similar situation can arise when a user creates a volume of an encrypted " "volume type from an image in Glance. If the image happens to be sized very " "close to the gibibyte boundary given by the requested volume size, the " "operation may fail if the image data plus the encryption metadata exceeds " "the requested volume size." msgid "" "A small list of volume type extra specs are now visible to regular users, " "and not just to cloud administrators. Cloud administrators that wish to opt " "out of this feature should consult the \"Security considerations\" portion " "of the \"User visible extra specs\" section in the Cinder Administration " "guide." msgstr "" "A small list of volume type extra specs are now visible to regular users, " "and not just to cloud administrators. Cloud administrators that wish to opt " "out of this feature should consult the \"Security considerations\" portion " "of the \"User visible extra specs\" section in the Cinder Administration " "guide." msgid "" "A small list of volume type extra specs are now visible to regular users, " "and not just to cloud administrators. This allows users to see non-senstive " "extra specs, which may help them choose a particular volume type when " "creating volumes. Sensitive extra specs are still only visible to cloud " "administrators. See the \"User visible extra specs\" section in the Cinder " "Administration guide for more information." msgstr "" "A small list of volume type extra specs are now visible to regular users, " "and not just to cloud administrators. This allows users to see non-sensitive " "extra specs, which may help them choose a particular volume type when " "creating volumes. Sensitive extra specs are still only visible to cloud " "administrators. See the \"User visible extra specs\" section in the Cinder " "Administration guide for more information." msgid "" "A volume in ``available`` or ``error`` status can be re-imaged directly. To " "re-image a volume in ``reserved`` status, you must include the " "``reimage_reserved`` parameter set to ``true``. When reimaging a volume, the " "volume state will be changed to ``downloading`` first." msgstr "" "A volume in ``available`` or ``error`` status can be re-imaged directly. To " "re-image a volume in ``reserved`` status, you must include the " "``reimage_reserved`` parameter set to ``true``. When reimaging a volume, the " "volume state will be changed to ``downloading`` first." msgid "" "A warning has been added to the ``cinder-status upgrade check`` CLI if a " "``policy.json`` file is present. Documentation has been updated to correct " "the file as ``policy.yaml`` if any policies need to be changed from their " "defaults." msgstr "" "A warning has been added to the ``cinder-status upgrade check`` CLI if a " "``policy.json`` file is present. Documentation has been updated to correct " "the file as ``policy.yaml`` if any policies need to be changed from their " "defaults." msgid "" "A warning has been added to the ``cinder-status upgrade check`` CLI to " "detect whether the ``periodic_interval`` option has been modified from its " "default value to remind you which of the above situations currently applies " "to you." msgstr "" "A warning has been added to the ``cinder-status upgrade check`` CLI to " "detect whether the ``periodic_interval`` option has been modified from its " "default value to remind you which of the above situations currently applies " "to you." msgid "API request: ``POST /v3/{project_id}/types``" msgstr "API request: ``POST /v3/{project_id}/types``" msgid "" "Ability to add minimum and maximum volume size restrictions which can be set " "on a per volume-type granularity. New volume type keys of 'provisioning:" "min_vol_size' and 'provisioning:max_vol_size'." msgstr "" "Ability to add minimum and maximum volume size restrictions which can be set " "on a per volume-type granularity. New volume type keys of 'provisioning:" "min_vol_size' and 'provisioning:max_vol_size'." msgid "" "Add 'LUNType' configuration verification for Huawei driver when connecting " "to Dorado array. Because Dorado array only supports 'Thin' lun type, so " "'LUNType' only can be configured as 'Thin', any other type is invalid and if " "'LUNType' not explicitly configured, by default use 'Thin' for Dorado array." msgstr "" "Add 'LUNType' configuration verification for Huawei driver when connecting " "to Dorado array. Because Dorado array only supports 'Thin' LUN type, so " "'LUNType' only can be configured as 'Thin', any other type is invalid and if " "'LUNType' not explicitly configured, by default use 'Thin' for Dorado array." msgid "" "Add 'display_name' and 'display_description' validation for creating/" "updating snapshot and volume operations." msgstr "" "Add 'display_name' and 'display_description' validation for creating/" "updating snapshot and volume operations." msgid "Add CG capability to generic volume groups in Huawei driver." msgstr "Add CG capability to generic volume groups in Huawei driver." msgid "Add CG capability to generic volume groups in INFINIDAT driver." msgstr "Add CG capability to generic volume groups in INFINIDAT driver." msgid "Add Dell EMC PowerStore Storage Driver (iSCSI, FC)." msgstr "Add Dell EMC PowerStore Storage Driver (iSCSI, FC)." msgid "Add Peer Persistence support for HPE Primera backend." msgstr "Add Peer Persistence support for HPE Primera backend." msgid "" "Add Python 3 support to the Brocade Zone Manager driver. (bug #1888548)." msgstr "" "Add Python 3 support to the Brocade Zone Manager driver. (bug #1888548)." msgid "" "Add Support for QoS in the Nimble Storage driver. QoS is available from " "Nimble OS release 4.x and above." msgstr "" "Add Support for QoS in the Nimble Storage driver. QoS is available from " "Nimble OS release 4.x and above." msgid "Add Support for deduplication of volumes in the Nimble Storage driver." msgstr "" "Add Support for de-duplication of volumes in the Nimble Storage driver." msgid "Add ``admin_or_storage_type_admin`` rule to ``policy.json``, e.g." msgstr "Add ``admin_or_storage_type_admin`` rule to ``policy.json``, e.g." msgid "" "Add ``all_tenants``, ``project_id`` support in attachment list&detail APIs." msgstr "" "Add ``all_tenants``, ``project_id`` support in attachment list&detail APIs." msgid "" "Add ``all_tenants``, ``project_id`` support in the attachment list and " "detail APIs." msgstr "" "Add ``all_tenants``, ``project_id`` support in the attachment list and " "detail APIs." msgid "Add ``storage_type_admin`` role." msgstr "Add ``storage_type_admin`` role." msgid "" "Add ``user_id`` attribute to response body of list backup with detail and " "show backup detail APIs." msgstr "" "Add ``user_id`` attribute to response body of list backup with detail and " "show backup detail APIs." msgid "Add ``user_id`` field to snapshot list/detail and snapshot show." msgstr "Add ``user_id`` field to snapshot list/detail and snapshot show." msgid "Add ``volume-type`` filter to API Get-Pools" msgstr "Add ``volume-type`` filter to API Get-Pools" msgid "Add a new config option 'zadara_access_key': Zadara VPSA access key." msgstr "Add a new config option 'zadara_access_key': Zadara VPSA access key." msgid "" "Add ability to call failover-host on a replication enabled SF cluster a " "second time with host id = default to initiate a failback to the default " "configured SolidFire Cluster." msgstr "" "Add ability to call failover-host on a replication enabled SF cluster a " "second time with host id = default to initiate a failback to the default " "configured SolidFire Cluster." msgid "" "Add ability to enable multi-initiator support to allow live migration in the " "Nimble backend driver." msgstr "" "Add ability to enable multi-initiator support to allow live migration in the " "Nimble backend driver." msgid "" "Add ability to extend ``in-use`` volume. User should be aware of the whole " "environment before using this feature because it's dependent on several " "external factors below:" msgstr "" "Add ability to extend ``in-use`` volume. User should be aware of the whole " "environment before using this feature because it's dependent on several " "external factors below:" msgid "Add ability to specify backup driver via class name." msgstr "Add ability to specify backup driver via class name." msgid "Add backup snapshots support for Storwize/SVC driver." msgstr "Add backup snapshots support for Storwize/SVC driver." msgid "Add chap authentication support for the vmax backend." msgstr "Add CHAP authentication support for the vmax backend." msgid "" "Add consistency group capability to Generic Volume Groups in the Dell EMC SC " "driver." msgstr "" "Add consistency group capability to Generic Volume Groups in the Dell EMC SC " "driver." msgid "" "Add consistency group capability to generic volume groups in Storwize " "drivers." msgstr "" "Add consistency group capability to generic volume groups in Storwize " "drivers." msgid "Add consistency group replication support in XIV\\A9000 Cinder driver." msgstr "Add consistency group replication support in XIV\\A9000 Cinder driver." msgid "" "Add consistent group capability to generic volume groups in CoprHD driver." msgstr "" "Add consistent group capability to generic volume groups in CoprHD driver." msgid "" "Add consistent group capability to generic volume groups in Lefthand driver." msgstr "" "Add consistent group capability to generic volume groups in Lefthand driver." msgid "" "Add consistent group capability to generic volume groups in Pure drivers." msgstr "" "Add consistent group capability to generic volume groups in Pure drivers." msgid "Add consistent group capability to generic volume groups in VNX driver." msgstr "" "Add consistent group capability to generic volume groups in VNX driver." msgid "" "Add consistent group capability to generic volume groups in XIV, Spectrum " "Accelerate and A9000/R storage systems." msgstr "" "Add consistent group capability to generic volume groups in XIV, Spectrum " "Accelerate and A9000/R storage systems." msgid "" "Add consistent group capability to generic volume groups in the SolidFire " "driver." msgstr "" "Add consistent group capability to generic volume groups in the SolidFire " "driver." msgid "" "Add consistent group capability to generic volume groups in the XtremIO " "driver." msgstr "" "Add consistent group capability to generic volume groups in the XtremIO " "driver." msgid "" "Add consistent group snapshot support to generic volume groups in VMAX " "driver version 3.0." msgstr "" "Add consistent group snapshot support to generic volume groups in VMAX " "driver version 3.0." msgid "" "Add consistent replication group support in Dell EMC VMAX cinder driver." msgstr "" "Add consistent replication group support in Dell EMC VMAX cinder driver." msgid "Add consistent replication group support in Storwize Cinder driver." msgstr "Add consistent replication group support in Storwize Cinder driver." msgid "Add consistent replication group support in VNX cinder driver." msgstr "Add consistent replication group support in VNX cinder driver." msgid "" "Add enhanced support to the QNAP Cinder driver, including 'CHAP', 'Thin " "Provision', 'SSD Cache', 'Dedup' and 'Compression'." msgstr "" "Add enhanced support to the QNAP Cinder driver, including 'CHAP', 'Thin " "Provision', 'SSD Cache', 'Dedup' and 'Compression'." msgid "Add filter, sorter and pagination support in group snapshot listings." msgstr "Add filter, sorter and pagination support in group snapshot listings." msgid "Add filters support to get_pools API v3.28." msgstr "Add filters support to get_pools API v3.28." msgid "" "Add get_manageable_volumes and get_manageable_snapshots implementations for " "Pure Storage Volume Drivers." msgstr "" "Add get_manageable_volumes and get_manageable_snapshots implementations for " "Pure Storage Volume Drivers." msgid "" "Add global mirror with change volumes(gmcv) support and user can manage gmcv " "replication volume by SVC driver. An example to set a gmcv replication " "volume type, set property replication_type as \" gmcv\", property " "replication_enabled as \" True\" and set property drivers:" "cycle_period_seconds as 500." msgstr "" "Add global mirror with change volumes(gmcv) support and user can manage gmcv " "replication volume by SVC driver. An example to set a gmcv replication " "volume type, set property replication_type as \" gmcv\", property " "replication_enabled as \" True\" and set property drivers:" "cycle_period_seconds as 500." msgid "" "Add granularity to the ``volume_extension:volume_type_encryption`` policy " "with the addition of distinct actions for create, get, update, and delete:" msgstr "" "Add granularity to the ``volume_extension:volume_type_encryption`` policy " "with the addition of distinct actions for create, get, update, and delete:" msgid "" "Add microversion 3.68 to support the ability to re-image a volume with a " "specific image. Specify the ``os-reimage`` action in the request body." msgstr "" "Add microversion 3.68 to support the ability to re-image a volume with a " "specific image. Specify the ``os-reimage`` action in the request body." msgid "Add mirrored volume support in IBM SVC/Storwize driver." msgstr "Add mirrored volume support in IBM SVC/Storwize driver." msgid "Add multipath enhancement to Storwize iSCSI driver." msgstr "Add multipath enhancement to Storwize iSCSI driver." msgid "" "Add option `max_luns_per_storage_group` back. The max LUNs per storage group " "was set to 255 before. With the new option, admin can set it to a larger " "number." msgstr "" "Add option `max_luns_per_storage_group` back. The max LUNs per storage group " "was set to 255 before. With the new option, admin can set it to a larger " "number." msgid "Add provider_id in the detailed view of a volume for admin." msgstr "Add provider_id in the detailed view of a volume for admin." msgid "Add replication consistency group support in DS8K cinder driver." msgstr "Add replication consistency group support in DS8K cinder driver." msgid "Add retype functionality to VMAX driver version 3.0." msgstr "Add retype functionality to VMAX driver version 3.0." msgid "Add revert to snapshot API and support in LVM driver." msgstr "Add revert to snapshot API and support in LVM driver." msgid "Add reverting to snapshot support in Pure Storage Cinder driver." msgstr "Add reverting to snapshot support in Pure Storage Cinder driver." msgid "Add reverting to snapshot support in Storwize Cinder driver." msgstr "Add reverting to snapshot support in Storwize Cinder driver." msgid "Add support for deferred deletion in the RBD volume driver." msgstr "Add support for deferred deletion in the RBD volume driver." msgid "Add support for hybrid aggregates to the NetApp cDOT drivers." msgstr "Add support for hybrid aggregates to the NetApp cDOT drivers." msgid "Add support for reporting pool disk type in Huawei driver." msgstr "Add support for reporting pool disk type in Huawei driver." msgid "Add support for sorting backups by \"name\"." msgstr "Add support for sorting backups by \"name\"." msgid "" "Add support to backup volume using snapshot in the Unity driver, which " "enables backing up of volumes that are in-use." msgstr "" "Add support to backup volume using snapshot in the Unity driver, which " "enables backing up of volumes that are in-use." msgid "Add support to backup volume using snapshot in the Unity driver." msgstr "Add support to backup volume using snapshot in the Unity driver." msgid "Add support to configure IO ports option in Dell EMC Unity driver." msgstr "Add support to configure IO ports option in Dell EMC Unity driver." msgid "Add support to force detach a volume from all hosts on 3PAR." msgstr "Add support to force detach a volume from all hosts on 3PAR." msgid "Add support to force detach a volume from all hosts on Unity." msgstr "Add support to force detach a volume from all hosts on Unity." msgid "Add support to force detach a volume from all hosts on VNX." msgstr "Add support to force detach a volume from all hosts on VNX." msgid "" "Add the clone_across_pools driver capability Drivers can now declare that " "they can clone a volume into a different pool. Essentially, if this " "capability is declared, Cinder will skip the check that the pool of the " "destination volume is the same as the pool of the source volume. Some " "drivers do not have such a restriction and it may be possible to complete " "the \"create volume from image\" operation very efficiently instead of " "falling back to the \"attach and dd\" option. This affects creating a volume " "from an image with and without the image cache. For more details please " "check `bp clone_across_pools `__" msgstr "" "Add the clone_across_pools driver capability Drivers can now declare that " "they can clone a volume into a different pool. Essentially, if this " "capability is declared, Cinder will skip the check that the pool of the " "destination volume is the same as the pool of the source volume. Some " "drivers do not have such a restriction and it may be possible to complete " "the \"create volume from image\" operation very efficiently instead of " "falling back to the \"attach and dd\" option. This affects creating a volume " "from an image with and without the image cache. For more details please " "check `bp clone_across_pools `__" msgid "" "Add the new ``os-extend_volume_completion`` volume action, which the Nova " "compute agent can use to notify Cinder that it has finished handling the " "``volume-extended`` external server event." msgstr "" "Add the new ``os-extend_volume_completion`` volume action, which the Nova " "compute agent can use to notify Cinder that it has finished handling the " "``volume-extended`` external server event." msgid "" "Add thin clone support in the Unity driver. Unity storage supports the thin " "clone of a LUN from OE version 4.2.0. It is more efficient than the dd " "solution. However, there is a limit of thin clone inside each LUN family. " "Every time the limit reaches, a new LUN family will be created by a dd-copy, " "and then the volume clone afterward will use the thin clone of the new LUN " "family." msgstr "" "Add thin clone support in the Unity driver. Unity storage supports the thin " "clone of a LUN from OE version 4.2.0. It is more efficient than the dd " "solution. However, there is a limit of thin clone inside each LUN family. " "Every time the limit reaches, a new LUN family will be created by a dd-copy, " "and then the volume clone afterwards will use the thin clone of the new LUN " "family." msgid "Add v2.1 volume replication support in VMAX driver." msgstr "Add v2.1 volume replication support in VMAX driver." msgid "" "Added \"backend_state: up/down\" in response body of service list if context " "is admin. This feature will help operators or cloud management system to get " "the backend device state in every service. If device state is *down*, " "specify that storage device has got some problems. Give more information to " "locate bugs quickly." msgstr "" "Added \"backend_state: up/down\" in response body of service list if context " "is admin. This feature will help operators or cloud management system to get " "the backend device state in every service. If device state is *down*, " "specify that storage device has got some problems. Give more information to " "locate bugs quickly." msgid "" "Added Cheesecake (v2.1) replication support to the Pure Storage Volume " "drivers." msgstr "" "Added Cheesecake (v2.1) replication support to the Pure Storage Volume " "drivers." msgid "Added Cinder consistency group for the NetApp NFS driver." msgstr "Added Cinder consistency group for the NetApp NFS driver." msgid "Added Cinder fast-retype support to Datera EDF driver." msgstr "Added Cinder fast-retype support to Datera EDF driver." msgid "Added Consistency Group support in ScaleIO driver." msgstr "Added Consistency Group support in ScaleIO driver." msgid "Added Datera EDF API 2.1 support." msgstr "Added Datera EDF API 2.1 support." msgid "Added Datera Multi-Tenancy Support." msgstr "Added Datera Multi-Tenancy Support." msgid "Added Datera Template Support." msgstr "Added Datera Template Support." msgid "" "Added Features like Trisync replication support for Pure driver, volume " "group snapshot support for IBM SVF driver, Unisphere 10 support for Dell EMC " "PowerMax driver, Host assisted migration and retype support for Hitachi VSP " "driver." msgstr "" "Added Features like Trisync replication support for Pure driver, volume " "group snapshot support for IBM SVF driver, Unisphere 10 support for Dell EMC " "PowerMax driver, Host assisted migration and retype support for Hitachi VSP " "driver." msgid "Added HA support for NexentaEdge iSCSI driver" msgstr "Added HA support for NexentaEdge iSCSI driver" msgid "" "Added HPE XP iSCSI and FC, Fungible NVMe-TCP, NetApp NVMe-TCP storage " "drivers." msgstr "" "Added HPE XP iSCSI and FC, Fungible NVMe-TCP, NetApp NVMe-TCP storage " "drivers." msgid "Added ISCSI based driver for Veritas Access." msgstr "Added iSCSI based driver for Veritas Access." msgid "Added Keystone v3 support for Swift backup driver in single user mode." msgstr "Added Keystone v3 support for Swift backup driver in single user mode." msgid "" "Added MacroSAN drivers that allows cinder to manage volumes in ISCSI and FC " "environment" msgstr "" "Added MacroSAN drivers that allows cinder to manage volumes in ISCSI and FC " "environment" msgid "Added Migrate and Extend for Nexenta NFS driver." msgstr "Added Migrate and Extend for Nexenta NFS driver." msgid "Added Multi-attach feature in Nimble driver." msgstr "Added Multi-attach feature in Nimble driver." msgid "Added NBD driver for NexentaEdge." msgstr "Added NBD driver for NexentaEdge." msgid "Added NFS based driver for Veritas Access." msgstr "Added NFS based driver for Veritas Access." msgid "Added NVMe-TCP volume driver for Fungible Storage Cluster." msgstr "Added NVMe-TCP volume driver for Fungible Storage Cluster." msgid "Added NVMe/TCP volume driver for NetApp ONTAP Storage Cluster." msgstr "Added NVMe/TCP volume driver for NetApp ONTAP Storage Cluster." msgid "Added Nimble Storage Fibre Channel backend driver." msgstr "Added Nimble Storage Fibre Channel backend driver." msgid "Added Peer Persistence support in HPE 3PAR cinder driver." msgstr "Added Peer Persistence support in HPE 3PAR Cinder driver." msgid "Added QoS support in ScaleIO driver." msgstr "Added QoS support in ScaleIO driver." msgid "" "Added RBD keyring configuration parameter ``rbd_keyring_conf`` to define " "custom path of Ceph keyring file." msgstr "" "Added RBD keyring configuration parameter ``rbd_keyring_conf`` to define " "custom path of Ceph keyring file." msgid "Added REST API to update backup name and description." msgstr "Added REST API to update backup name and description." msgid "" "Added RPC backward compatibility layer similar to the one implemented in " "Nova. This means that Cinder services can be upgraded one-by-one without " "breakage. After all the services are upgraded SIGHUP signals should be " "issued to all the services to signal them to reload cached minimum RPC " "versions. Alternative is of course restart of them. Please note that cinder-" "api service doesn't support SIGHUP yet. Please also take into account that " "all the rolling upgrades capabilities are considered tech preview, as we " "don't have a CI testing it yet." msgstr "" "Added RPC backward compatibility layer similar to the one implemented in " "Nova. This means that Cinder services can be upgraded one-by-one without " "breakage. After all the services are upgraded SIGHUP signals should be " "issued to all the services to signal them to reload cached minimum RPC " "versions. Alternative is of course restart of them. Please note that cinder-" "api service doesn't support SIGHUP yet. Please also take into account that " "all the rolling upgrades capabilities are considered tech preview, as we " "don't have a CI testing it yet." msgid "Added Retype functionality to Nexenta iSCSI and NFS drivers." msgstr "Added Retype functionality to Nexenta iSCSI and NFS drivers." msgid "" "Added SandStone driver that allows cinder to manage volumes in ISCSI " "environment." msgstr "" "Added SandStone driver that allows Cinder to manage volumes in iSCSI " "environment." msgid "Added Volume Placement extra-specs support to Datera EDF driver." msgstr "Added Volume Placement extra-specs support to Datera EDF driver." msgid "Added ``datera_disable_profiler`` boolean config option." msgstr "Added ``datera_disable_profiler`` boolean config option." msgid "" "Added ``project_id`` attribute to response body of list groups with detail, " "list group snapshots with detail, show group detail and show group snapshot " "detail APIs since microversion \"3.58\"." msgstr "" "Added ``project_id`` attribute to response body of list groups with detail, " "list group snapshots with detail, show group detail and show group snapshot " "detail APIs since microversion \"3.58\"." msgid "Added ``resource_filters`` API to retrieve configured resource filters." msgstr "" "Added ``resource_filters`` API to retrieve configured resource filters." msgid "" "Added a new Cinder driver for RackScale Design NVMe-oF storage solution." msgstr "" "Added a new Cinder driver for RackScale Design NVMe-oF storage solution." msgid "" "Added a new cinder-manage command to handle the situation where database " "purges would not complete due to the volumes table holding references to " "deleted services. The new command makes sure that all volumes have a " "reference only to the correct service_uuid, which will allow old service " "records to be purged from the database." msgstr "" "Added a new cinder-manage command to handle the situation where database " "purges would not complete due to the volumes table holding references to " "deleted services. The new command makes sure that all volumes have a " "reference only to the correct service_uuid, which will allow old service " "records to be purged from the database." msgid "" "Added a new config ``reinit_driver_count`` in volume driver, which indicates " "the maximum retry limit for driver re-initialization when it fails to " "initialize a volume driver. Its default value is 3. The interval of retry is " "exponentially backoff, and will be 1s, 2s, 4s etc." msgstr "" "Added a new config ``reinit_driver_count`` in volume driver, which indicates " "the maximum retry limit for driver re-initialisation when it fails to " "initialise a volume driver. Its default value is 3. The interval of retry is " "exponentially back-off, and will be 1s, 2s, 4s etc." msgid "" "Added a new config option ``nexenta_group_snapshot_template``. This option " "specifies template string to generate group snapshot name." msgstr "" "Added a new config option ``nexenta_group_snapshot_template``. This option " "specifies template string to generate group snapshot name." msgid "" "Added a new config option ``nexenta_origin_snapshot_template``. This option " "specifies template string to generate origin name of clone." msgstr "" "Added a new config option ``nexenta_origin_snapshot_template``. This option " "specifies template string to generate origin name of clone." msgid "" "Added a new config option ``nexenta_rest_backoff_factor``. This option " "specifies the backoff factor to apply between connection attempts to " "NexentaStor management REST API server." msgstr "" "Added a new config option ``nexenta_rest_backoff_factor``. This option " "specifies the backoff factor to apply between connection attempts to " "NexentaStor management REST API server." msgid "" "Added a new config option ``nexenta_rest_connect_timeout``. This option " "specifies the time limit (in seconds), within which the connection to " "NexentaStor management REST API server must be established." msgstr "" "Added a new config option ``nexenta_rest_connect_timeout``. This option " "specifies the time limit (in seconds), within which the connection to " "NexentaStor management REST API server must be established." msgid "" "Added a new config option ``nexenta_rest_read_timeout``. This option " "specifies the time limit (in seconds), within which NexentaStor management " "REST API server must send a response." msgstr "" "Added a new config option ``nexenta_rest_read_timeout``. This option " "specifies the time limit (in seconds), within which NexentaStor management " "REST API server must send a response." msgid "" "Added a new config option ``nexenta_rest_retry_count``. This option " "specifies the number of times to repeat NexentaStor management REST API call " "in case of connection errors and NexentaStor appliance EBUSY or ENOENT " "errors." msgstr "" "Added a new config option ``nexenta_rest_retry_count``. This option " "specifies the number of times to repeat NexentaStor management REST API call " "in case of connection errors and NexentaStor appliance EBUSY or ENOENT " "errors." msgid "" "Added a new config option `scheduler_weight_handler`. This is a global " "option which specifies how the scheduler should choose from a listed of " "weighted pools. By default the existing weigher is used which always chooses " "the highest weight." msgstr "" "Added a new config option `scheduler_weight_handler`. This is a global " "option which specifies how the scheduler should choose from a listed of " "weighted pools. By default the existing weigher is used which always chooses " "the highest weight." msgid "" "Added a new configuration option ``image_conversion_disable`` to disallow " "conversion between image disk format and volume format when doing certain " "operations. This can prevent performance problems on a cinder-volume node " "due to the large amount of system resources consumed during image " "conversion. The default value is ``False``, which corresponds to Cinder's " "current behavior to always attempt image conversion." msgstr "" "Added a new configuration option ``image_conversion_disable`` to disallow " "conversion between image disk format and volume format when doing certain " "operations. This can prevent performance problems on a cinder-volume node " "due to the large amount of system resources consumed during image " "conversion. The default value is ``False``, which corresponds to Cinder's " "current behaviour to always attempt image conversion." msgid "Added a new default volume type ``__DEFAULT__`` which will be used when" msgstr "" "Added a new default volume type ``__DEFAULT__`` which will be used when" msgid "" "Added a new option ``quobyte_overlay_volumes`` for the Quobyte volume " "driver. This option activates internal snapshots who allow to create volumes " "from snapshots as overlay files based on the volume from snapshot cache. " "This significantly speeds up the creation of volumes from large snapshots." msgstr "" "Added a new option ``quobyte_overlay_volumes`` for the Quobyte volume " "driver. This option activates internal snapshots who allow to create volumes " "from snapshots as overlay files based on the volume from snapshot cache. " "This significantly speeds up the creation of volumes from large snapshots." msgid "" "Added a new optional cache of volumes generated from snapshots for the " "Quobyte backend. Enabling this cache speeds up creation of multiple volumes " "from a single snapshot at the cost of a slight increase in creation time for " "the first volume generated for this given snapshot. The " "``quobyte_volume_from_snapshot_cache`` option is off by default." msgstr "" "Added a new optional cache of volumes generated from snapshots for the " "Quobyte backend. Enabling this cache speeds up creation of multiple volumes " "from a single snapshot at the cost of a slight increase in creation time for " "the first volume generated for this given snapshot. The " "``quobyte_volume_from_snapshot_cache`` option is off by default." msgid "" "Added a new weight handler `StochasticHostWeightHandler`. This weight " "handler chooses pools randomly, where the random probabilities are " "proportional to the weights, so higher weighted pools are chosen more " "frequently, but not all the time. This weight handler spreads new shares " "across available pools more fairly." msgstr "" "Added a new weight handler `StochasticHostWeightHandler`. This weight " "handler chooses pools randomly, where the random probabilities are " "proportional to the weights, so higher weighted pools are chosen more " "frequently, but not all the time. This weight handler spreads new shares " "across available pools more fairly." msgid "Added ability to backup snapshots." msgstr "Added ability to backup snapshots." msgid "Added ability to list all manageable volumes within ScaleIO Driver." msgstr "Added ability to list all manageable volumes within ScaleIO Driver." msgid "" "Added ability to purge records less than 1 day old, using the cinder-manage " "db_purge utility. This helps especially for those testing scenarios in which " "a a large number of volumes are created and deleted. (bug" msgstr "" "Added ability to purge records less than 1 day old, using the cinder-manage " "db_purge utility. This helps especially for those testing scenarios in which " "a a large number of volumes are created and deleted. (bug" msgid "Added ability to query backups by project ID." msgstr "Added ability to query backups by project ID." msgid "" "Added ability to specify multiple storage pools in the FalconStor driver." msgstr "" "Added ability to specify multiple storage pools in the FalconStor driver." msgid "" "Added additional metrics reported to the scheduler for Pure Volume Drivers " "for better filtering and weighing functions." msgstr "" "Added additional metrics reported to the scheduler for Pure Volume Drivers " "for better filtering and weighing functions." msgid "" "Added an ``excluded_domain_ips`` option to the Dell EMC SC driver. This is " "identical to the excluded_domain_ip option only comma separated rather than " "multiple entry. This is concatenated with the ``excluded_domain_ip`` option." msgstr "" "Added an ``excluded_domain_ips`` option to the Dell EMC SC driver. This is " "identical to the excluded_domain_ip option only comma separated rather than " "multiple entry. This is concatenated with the ``excluded_domain_ip`` option." msgid "" "Added an ``included_domain_ips`` option to the Dell EMC SC driver. This " "option takes a comma separated list of target IP addresses listed under the " "fault domains to whitelisted. This option only applies to the ISCSI driver." msgstr "" "Added an ``included_domain_ips`` option to the Dell EMC SC driver. This " "option takes a comma separated list of target IP addresses listed under the " "fault domains to whitelisted. This option only applies to the ISCSI driver." msgid "" "Added asynchronous remote replication support in Dell EMC VMAX cinder driver." msgstr "" "Added asynchronous remote replication support in Dell EMC VMAX Cinder driver." msgid "Added attribute ``connection_info`` to attachment object." msgstr "Added attribute ``connection_info`` to attachment object." msgid "" "Added automatic configuration of SAN access control for the NEC volume " "driver." msgstr "" "Added automatic configuration of SAN access control for the NEC volume " "driver." msgid "Added availability_zone filter for snapshots list." msgstr "Added availability_zone filter for snapshots list." msgid "Added backend FC and iSCSI drivers for NEC Storage." msgstr "Added backend FC and iSCSI drivers for NEC Storage." msgid "Added backend FC and iSCSI drivers for NEC V series Storage." msgstr "Added backend FC and iSCSI drivers for NEC V series Storage." msgid "Added backend ISCSI driver for Reduxio." msgstr "Added backend ISCSI driver for Reduxio." msgid "Added backend driver for Coho Data storage." msgstr "Added backend driver for Coho Data storage." msgid "Added backend driver for DISCO storage." msgstr "Added backend driver for DISCO storage." msgid "Added backend driver for Dell EMC Unity storage." msgstr "Added backend driver for Dell EMC Unity storage." msgid "Added backend driver for FalconStor FreeStor." msgstr "Added backend driver for FalconStor FreeStor." msgid "Added backend driver for Fujitsu ETERNUS DX (FC)." msgstr "Added backend driver for Fujitsu ETERNUS DX (FC)." msgid "Added backend driver for Fujitsu ETERNUS DX (iSCSI)." msgstr "Added backend driver for Fujitsu ETERNUS DX (iSCSI)." msgid "Added backend driver for HPE XP storage." msgstr "Added backend driver for HPE XP storage." msgid "Added backend driver for Hedvig iSCSI storage." msgstr "Added backend driver for Hedvig iSCSI storage." msgid "Added backend driver for Huawei FusionStorage." msgstr "Added backend driver for Huawei FusionStorage." msgid "Added backend driver for Nexenta Edge iSCSI storage." msgstr "Added backend driver for Nexenta Edge iSCSI storage." msgid "Added backend driver for NexentaStor5 NFS storage." msgstr "Added backend driver for NexentaStor5 NFS storage." msgid "Added backend driver for NexentaStor5 iSCSI storage." msgstr "Added backend driver for NexentaStor5 iSCSI storage." msgid "Added backend driver for Synology iSCSI-supported storage." msgstr "Added backend driver for Synology iSCSI-supported storage." msgid "Added backend driver for VMware VStorageObject (First Class Disk)." msgstr "Added backend driver for VMware VStorageObject (First Class Disk)." msgid "Added backend driver for Violin Memory 7000 iscsi storage." msgstr "Added backend driver for Violin Memory 7000 iSCSI storage." msgid "Added backend driver for ZTE iSCSI storage." msgstr "Added backend driver for ZTE iSCSI storage." msgid "" "Added boolean conf option 'split_loggers' in [default] section of cinder." "conf to `enable split logging`_ functionality. The default value of " "split_loggers option is set to False. Operator can set it's value to True to " "split HTTP content into subloggers to allow for fine-grained control of what " "is logged and how. This new config option 'split_loggers' should be enabled " "only when keystoneauth log level is set to DEBUG in 'default_log_levels' " "config option." msgstr "" "Added boolean conf option 'split_loggers' in [default] section of cinder." "conf to `enable split logging`_ functionality. The default value of " "split_loggers option is set to False. Operator can set it's value to True to " "split HTTP content into subloggers to allow for fine-grained control of what " "is logged and how. This new config option 'split_loggers' should be enabled " "only when keystoneauth log level is set to DEBUG in 'default_log_levels' " "config option." msgid "" "Added boolean config option ``allow_compression_on_image_upload`` in " "[default] section of cinder.conf to enable/disable image compression on " "image upload. The default value of this option is ``false``, which means " "image compression is disabled." msgstr "" "Added boolean config option ``allow_compression_on_image_upload`` in " "[default] section of cinder.conf to enable/disable image compression on " "image upload. The default value of this option is ``false``, which means " "image compression is disabled." msgid "Added cinder backup driver for Google Cloud Storage." msgstr "Added Cinder backup driver for Google Cloud Storage." msgid "" "Added config option ``backup_mount_attempts`` to specify the number of " "attempts to mount NFS share in the NFS backup driver." msgstr "" "Added config option ``backup_mount_attempts`` to specify the number of " "attempts to mount NFS share in the NFS backup driver." msgid "" "Added config option ``vmware_adapter_type`` for the VMware VMDK driver to " "specify the default adapter type for volumes in vCenter server." msgstr "" "Added config option ``vmware_adapter_type`` for the VMware VMDK driver to " "specify the default adapter type for volumes in vCenter server." msgid "" "Added config option ``vmware_connection_pool_size`` in the VMware VMDK " "driver to specify the maximum number of connections (to vCenter) in the http " "connection pool." msgstr "" "Added config option ``vmware_connection_pool_size`` in the VMware VMDK " "driver to specify the maximum number of connections (to vCenter) in the HTTP " "connection pool." msgid "" "Added config option to enable/disable automatically calculation an over-" "subscription ratio max for Pure Volume Drivers. When disabled the drivers " "will now respect the max_oversubscription_ratio config option." msgstr "" "Added config option to enable/disable automatically calculation an over-" "subscription ratio max for Pure Volume Drivers. When disabled the drivers " "will now respect the max_oversubscription_ratio config option." msgid "" "Added consistency group capability to generic volume groups for NexentaStor5 " "iSCSI and NFS drivers." msgstr "" "Added consistency group capability to generic volume groups for NexentaStor5 " "iSCSI and NFS drivers." msgid "" "Added consistency group capability to generic volume groups in the HPE 3PAR " "driver." msgstr "" "Added consistency group capability to generic volume groups in the HPE 3PAR " "driver." msgid "Added consistency group support in Nimble Storage driver." msgstr "Added consistency group support in Nimble Storage driver." msgid "" "Added consistency group support to generic volume groups in ScaleIO Driver." msgstr "" "Added consistency group support to generic volume groups in ScaleIO Driver." msgid "Added consistency group support to the Huawei driver." msgstr "Added consistency group support to the Huawei driver." msgid "" "Added consistent group capability to generic volume groups in GPFS driver." msgstr "" "Added consistent group capability to generic volume groups in GPFS driver." msgid "" "Added consistent group capability to generic volume groups in ProphetStor " "driver." msgstr "" "Added consistent group capability to generic volume groups in ProphetStor " "driver." msgid "Added count info in volume, snapshot and backup's list APIs since 3.45." msgstr "" "Added count info in volume, snapshot and backup's list APIs since 3.45." msgid "" "Added create/delete APIs for group snapshots and an API to create group from " "source." msgstr "" "Added create/delete APIs for group snapshots and an API to create group from " "source." msgid "" "Added data reduction pool support for thin-provisoned and compressed volume " "in Storwize cinder driver." msgstr "" "Added data reduction pool support for thin-provisoned and compressed volume " "in Storwize cinder driver." msgid "" "Added dell_api_async_rest_timeout option to the Dell EMC SC driver. This is " "the timeout used for asynchronous REST calls to the Dell EMC SC REST API. " "Default is 15 seconds." msgstr "" "Added dell_api_async_rest_timeout option to the Dell EMC SC driver. This is " "the timeout used for asynchronous REST calls to the Dell EMC SC REST API. " "Default is 15 seconds." msgid "" "Added dell_api_sync_rest_timeout option to the Dell EMC SC driver. This is " "the timeout used for synchronous REST calls to the Dell EMC SC REST API. " "Default is 30 seconds." msgstr "" "Added dell_api_sync_rest_timeout option to the Dell EMC SC driver. This is " "the timeout used for synchronous REST calls to the Dell EMC SC REST API. " "Default is 30 seconds." msgid "Added driver for Tegile IntelliFlash arrays." msgstr "Added driver for Tegile IntelliFlash arrays." msgid "Added driver for the InfiniBox storage array." msgstr "Added driver for the InfiniBox storage array." msgid "" "Added driver-assisted volume migration to RBD driver. This allows a volume " "to be efficiently copied by Ceph from one pool to another within the same " "cluster." msgstr "" "Added driver-assisted volume migration to RBD driver. This allows a volume " "to be efficiently copied by Ceph from one pool to another within the same " "cluster." msgid "Added extend method to NFS driver for NexentaStor 5." msgstr "Added extend method to NFS driver for NexentaStor 5." msgid "" "Added flag 'backend_state' which will give backend state info in service " "list." msgstr "" "Added flag 'backend_state' which will give backend state info in service " "list." msgid "" "Added flag 'backend_state: up/down' which will give backend state info in " "service list." msgstr "" "Added flag 'backend_state: up/down' which will give backend state info in " "service list." msgid "" "Added generalized resource filter support in ``list volume``, ``list " "backup``, ``list snapshot``, ``list group``, ``list group-snapshot``, ``list " "attachment``, ``list message`` and ``list pools`` APIs." msgstr "" "Added generalised resource filter support in ``list volume``, ``list " "backup``, ``list snapshot``, ``list group``, ``list group-snapshot``, ``list " "attachment``, ``list message`` and ``list pools`` APIs." msgid "" "Added generic volume group capability to NetApp cDot drivers with support " "for write consistent group snapshots." msgstr "" "Added generic volume group capability to NetApp cDot drivers with support " "for write consistent group snapshots." msgid "Added get capability feature for HPE-3PAR." msgstr "Added get capability feature for HPE-3PAR." msgid "Added group type and group specs APIs." msgstr "Added group type and group specs APIs." msgid "" "Added host-level (whole back end replication - v2.1) replication support to " "the NetApp cDOT drivers (iSCSI, FC, NFS)." msgstr "" "Added host-level (whole back end replication - v2.1) replication support to " "the NetApp cDOT drivers (iSCSI, FC, NFS)." msgid "" "Added hyperswap volume and group support in Storwize cinder driver. Storwize/" "svc versions prior to 7.6 do not support this feature." msgstr "" "Added hyperswap volume and group support in Storwize Cinder driver. Storwize/" "svc versions prior to 7.6 do not support this feature." msgid "Added iSCSI CHAP uni-directional authentication for NetApp drivers." msgstr "Added iSCSI CHAP uni-directional authentication for NetApp drivers." msgid "" "Added iSCSI and Fibre Channel volume drivers for DataCore's SANsymphony and " "Hyper-converged Virtual SAN storage." msgstr "" "Added iSCSI and Fibre Channel volume drivers for DataCore's SANsymphony and " "Hyper-converged Virtual SAN storage." msgid "" "Added image signature verification support when creating volume from image. " "This depends on signature metadata from glance. This feature is turned on by " "default, administrators can change behaviour by updating option " "``verify_glance_signatures``. Also, an additional image metadata " "``signature_verified`` has been added to indicate whether signature " "verification was performed during creating process." msgstr "" "Added image signature verification support when creating volume from image. " "This depends on signature metadata from glance. This feature is turned on by " "default, administrators can change behaviour by updating option " "``verify_glance_signatures``. Also, an additional image metadata " "``signature_verified`` has been added to indicate whether signature " "verification was performed during creating process." msgid "" "Added independent and shared types for qos classes in XIV & A9000. Shared " "type enables to share bandwidth and IO rates between volumes of the same " "class. Independent type gives each volume the same bandwidth and IO rates " "without being affected by other volumes in the same qos class." msgstr "" "Added independent and shared types for QoS classes in XIV & A9000. Shared " "type enables to share bandwidth and I/O rates between volumes of the same " "class. Independent type gives each volume the same bandwidth and I/O rates " "without being affected by other volumes in the same QoS class." msgid "Added like operator support to filters for the following resources::" msgstr "Added like operator support to filters for the following resources::" msgid "Added manage/unmanage snapshot support for Huawei drivers." msgstr "Added manage/unmanage snapshot support for Huawei drivers." msgid "Added manage/unmanage snapshot support to the HNAS NFS driver." msgstr "Added manage/unmanage snapshot support to the HNAS NFS driver." msgid "Added manage/unmanage volume support for Dell Equallogic driver." msgstr "Added manage/unmanage volume support for Dell Equallogic driver." msgid "Added manage/unmanage volume support for Huawei drivers." msgstr "Added manage/unmanage volume support for Huawei drivers." msgid "" "Added metadata support for backup source. Now users can create/update " "metadata for a specified backup." msgstr "" "Added metadata support for backup source. Now users can create/update " "metadata for a specified backup." msgid "Added multiple management IP support to Storwize SVC driver." msgstr "Added multiple management IP support to Storwize SVC driver." msgid "Added multiple pools support to Storwize SVC driver." msgstr "Added multiple pools support to Storwize SVC driver." msgid "Added netapp copyoffload provider location." msgstr "Added NetApp copyoffload provider location." msgid "" "Added new APIs on microversion 3.32 to support dynamically changing log " "levels in Cinder services without restart as well as retrieving current log " "levels, which is an easy way to ping via the message broker a service." msgstr "" "Added new APIs on microversion 3.32 to support dynamically changing log " "levels in Cinder services without restart as well as retrieving current log " "levels, which is an easy way to ping via the message broker a service." msgid "" "Added new APIs on microversion 3.61 to show ``cluster_name`` attribute in " "the response body of volume details for admin." msgstr "" "Added new APIs on microversion 3.61 to show ``cluster_name`` attribute in " "the response body of volume details for admin." msgid "" "Added new BoolOpt ``backup_ceph_image_journals`` for enabling the Ceph image " "features required to support RBD mirroring of Cinder backup pool." msgstr "" "Added new BoolOpt ``backup_ceph_image_journals`` for enabling the Ceph image " "features required to support RBD mirroring of Cinder backup pool." msgid "" "Added new Ceph iSCSI driver rbd_iscsi. This new driver is derived from the " "rbd driver and allows all the same features as the rbd driver. The only " "difference is that volume attachments are done via iSCSI." msgstr "" "Added new Ceph iSCSI driver rbd_iscsi. This new driver is derived from the " "RBD driver and allows all the same features as the RBD driver. The only " "difference is that volume attachments are done via iSCSI." msgid "" "Added new Hitachi VSP FC Driver. The VSP driver supports all Hitachi VSP " "Family and HUSVM." msgstr "" "Added new Hitachi VSP FC Driver. The VSP driver supports all Hitachi VSP " "Family and HUSVM." msgid "" "Added new backup driver to enable backing up cinder volumes to S3-compatible " "storage. See the reference `S3 backup driver `_ " "for more information." msgstr "" "Added new backup driver to enable backing up Cinder volumes to S3-compatible " "storage. See the reference `S3 backup driver `_ " "for more information." msgid "" "Added new configuration options to allow more specific control over some " "periodic processes. See the 'Upgrade' section for details." msgstr "" "Added new configuration options to allow more specific control over some " "periodic processes. See the 'Upgrade' section for details." msgid "" "Added new option to delete XtremIO initiator groups after the last volume " "was detached from them. Cleanup can be enabled by setting " "``xtremio_clean_unused_ig`` to ``True`` under the backend settings in cinder." "conf." msgstr "" "Added new option to delete XtremIO initiator groups after the last volume " "was detached from them. Cleanup can be enabled by setting " "``xtremio_clean_unused_ig`` to ``True`` under the backend settings in cinder." "conf." msgid "Added oversubscription support in the VMAX driver" msgstr "Added over-subscription support in the VMAX driver" msgid "" "Added periodic task to clean expired messages in cinder scheduler, also " "added a configuration option ``message_reap_interval`` to handle the " "interval." msgstr "" "Added periodic task to clean expired messages in Cinder scheduler, also " "added a configuration option ``message_reap_interval`` to handle the " "interval." msgid "" "Added periodic task to clean expired reservation in cinder scheduler. Added " "a configuration option ``reservation_clean_interval`` to handle the interval." msgstr "" "Added periodic task to clean expired reservation in Cinder scheduler. Added " "a configuration option ``reservation_clean_interval`` to handle the interval." msgid "" "Added policies to disallow multiattach operations. This includes two " "policies, the first being a general policy to allow the creation or retyping " "of multiattach volumes is a volume create policy with the name ``volume:" "multiattach``. The second policy is specifically for disallowing the ability " "to create multiple attachments on a volume that is marked as bootable, and " "is an attachment policy with the name ``volume:" "multiattach_bootable_volume``. The default for these new policies is ``rule:" "admin_or_owner``; be aware that if you wish to disable either of these " "policies for your users you will need to modify the default policy settings." msgstr "" "Added policies to disallow multiattach operations. This includes two " "policies, the first being a general policy to allow the creation or retyping " "of multiattach volumes is a volume create policy with the name ``volume:" "multiattach``. The second policy is specifically for disallowing the ability " "to create multiple attachments on a volume that is marked as bootable, and " "is an attachment policy with the name ``volume:" "multiattach_bootable_volume``. The default for these new policies is ``rule:" "admin_or_owner``; be aware that if you wish to disable either of these " "policies for your users you will need to modify the default policy settings." msgid "Added replication failback support for the Dell SC driver." msgstr "Added replication failback support for the Dell SC driver." msgid "Added replication group support in HPE 3PAR cinder driver." msgstr "Added replication group support in HPE 3PAR Cinder driver." msgid "Added replication v2.1 support to the Dell Storage Center drivers." msgstr "Added replication v2.1 support to the Dell Storage Centre drivers." msgid "Added replication v2.1 support to the IBM Storwize driver." msgstr "Added replication v2.1 support to the IBM Storwize driver." msgid "Added replication v2.1 support to the IBM XIV/DS8K driver." msgstr "Added replication v2.1 support to the IBM XIV/DS8K driver." msgid "Added reset status API to generic volume group." msgstr "Added reset status API to generic volume group." msgid "Added reset status API to group snapshot." msgstr "Added reset status API to group snapshot." msgid "Added revert to snapshot feature in Nimble driver." msgstr "Added revert to snapshot feature in Nimble driver." msgid "" "Added revert to snapshot support for NexentaStor5 iSCSI and NFS drivers." msgstr "" "Added revert to snapshot support for NexentaStor5 iSCSI and NFS drivers." msgid "Added revert volume to snapshot in 3par driver." msgstr "Added revert volume to snapshot in 3PAR driver." msgid "" "Added schema validation support using jsonschema `[json-schema-validation]`_ " "for all supported v3 APIs." msgstr "" "Added schema validation support using jsonschema `[json-schema-validation]`_ " "for all supported v3 APIs." msgid "" "Added secure HTTP support for REST API calls in the NexentaStor5 driver. Use " "of HTTPS is set True by default with option ``nexenta_use_https``." msgstr "" "Added secure HTTP support for REST API calls in the NexentaStor5 driver. Use " "of HTTPS is set True by default with option ``nexenta_use_https``." msgid "" "Added snapshot manage/unmanage support for NexentaStor5 iSCSI and NFS " "drivers." msgstr "" "Added snapshot manage/unmanage support for NexentaStor5 iSCSI and NFS " "drivers." msgid "Added snapshot manage/unmanage support to the EMC XtremIO driver." msgstr "Added snapshot manage/unmanage support to the EMC XtremIO driver." msgid "Added snapshot manage/unmanage support to the HPE 3PAR driver." msgstr "Added snapshot manage/unmanage support to the HPE 3PAR driver." msgid "Added snapshot manage/unmanage support to the HPE LeftHand driver." msgstr "Added snapshot manage/unmanage support to the HPE LeftHand driver." msgid "" "Added string config option ``compression_format`` in [default] section of " "cinder.conf to specify image compression format. Currently the only legal " "value for this option is ``gzip``." msgstr "" "Added string config option ``compression_format`` in [default] section of " "cinder.conf to specify image compression format. Currently the only legal " "value for this option is ``gzip``." msgid "Added support for API microversions, as well as /v3 API endpoint." msgstr "Added support for API microversions, as well as /v3 API endpoint." msgid "" "Added support for Keystone middleware feature to pass service token along " "with the user token for Cinder to Nova and Glance services. This will help " "get rid of user token expiration issues during long running tasks e.g. " "creating volume snapshot (Cinder->Nova) and creating volume from image " "(Cinder->Glance) etc. To use this functionality a service user needs to be " "created first. Add the service user configurations in ``cinder.conf`` under " "``service_user`` group and set ``send_service_user_token`` flag to ``True``." msgstr "" "Added support for Keystone middleware feature to pass service token along " "with the user token for Cinder to Nova and Glance services. This will help " "get rid of user token expiration issues during long running tasks e.g. " "creating volume snapshot (Cinder->Nova) and creating volume from image " "(Cinder->Glance) etc. To use this functionality a service user needs to be " "created first. Add the service user configurations in ``cinder.conf`` under " "``service_user`` group and set ``send_service_user_token`` flag to ``True``." msgid "" "Added support for Open-E JovianDSS data storage. Driver supports Open-E " "disaster recovery feature and cascade volume deletion in addition to support " "minimum required functions." msgstr "" "Added support for Open-E JovianDSS data storage. Driver supports Open-E " "disaster recovery feature and cascade volume deletion in addition to support " "minimum required functions." msgid "Added support for QTS fw 4.4.0 to QNAP Cinder driver." msgstr "Added support for QTS fw 4.4.0 to QNAP Cinder driver." msgid "" "Added support for QoS in the INFINIDAT InfiniBox driver. QoS is available on " "InfiniBox 4.0 onward." msgstr "" "Added support for QoS in the INFINIDAT InfiniBox driver. QoS is available on " "InfiniBox 4.0 onward." msgid "" "Added support for QoS in the Pure Storage drivers. QoS support is available " "from Purity//FA 5.3.0" msgstr "" "Added support for QoS in the Pure Storage drivers. QoS support is available " "from Purity//FA 5.3.0" msgid "Added support for UC-Series model to Synology Cinder driver." msgstr "Added support for UC-Series model to Synology Cinder driver." msgid "Added support for ZMQ messaging layer in multibackend configuration." msgstr "Added support for ZMQ messaging layer in multibackend configuration." msgid "" "Added support for ZeroMQ messaging driver in cinder single backend config." msgstr "" "Added support for ZeroMQ messaging driver in Cinder single backend config." msgid "" "Added support for active-active replication to the RBD driver. This allows " "users to configure multiple volume backends that are all a member of the " "same cluster participating in replication." msgstr "" "Added support for active-active replication to the RBD driver. This allows " "users to configure multiple volume backends that are all a member of the " "same cluster participating in replication." msgid "" "Added support for changing storage policy of backend volumes created by " "VMwareVStorageObjectDriver using retype API." msgstr "" "Added support for changing storage policy of backend volumes created by " "VMwareVStorageObjectDriver using retype API." msgid "" "Added support for cloning volume asynchronously, it can be enabled by option " "async_clone set to true in parameter metadata when creating volume from " "volume or snapshot." msgstr "" "Added support for cloning volume asynchronously, it can be enabled by option " "async_clone set to true in parameter metadata when creating volume from " "volume or snapshot." msgid "" "Added support for creating a consistency group from a source consistency " "group in the HPE 3PAR driver." msgstr "" "Added support for creating a consistency group from a source consistency " "group in the HPE 3PAR driver." msgid "" "Added support for creating, deleting, and updating consistency groups for " "NetApp 7mode and CDOT backends." msgstr "" "Added support for creating, deleting, and updating consistency groups for " "NetApp 7mode and CDOT backends." msgid "" "Added support for creating, deleting, and updating consistency groups for " "NexentaStor5 iSCSI and NFS drivers." msgstr "" "Added support for creating, deleting, and updating consistency groups for " "NexentaStor5 iSCSI and NFS drivers." msgid "" "Added support for get all distinct volumes' metadata from volume-summary API." msgstr "" "Added support for get all distinct volumes' metadata from volume-summary API." msgid "" "Added support for images with vmware_adaptertype set to paraVirtual in the " "VMDK driver." msgstr "" "Added support for images with vmware_adaptertype set to paraVirtual in the " "VMDK driver." msgid "Added support for manage volume in the VMware VMDK driver." msgstr "Added support for manage volume in the VMware VMDK driver." msgid "Added support for manage/unmanage snapshot in the ScaleIO driver." msgstr "Added support for manage/unmanage snapshot in the ScaleIO driver." msgid "Added support for manage/unmanage volume in the ScaleIO driver." msgstr "Added support for manage/unmanage volume in the ScaleIO driver." msgid "" "Added support for oversubscription in thin provisioning in the INFINIDAT " "InfiniBox driver. To use oversubscription, define " "``max_over_subscription_ratio`` in the cinder configuration file." msgstr "" "Added support for over-subscription in thin provisioning in the INFINIDAT " "InfiniBox driver. To use over-subscription, define " "``max_over_subscription_ratio`` in the Cinder configuration file." msgid "" "Added support for oversubscription in thin provisioning in the ScaleIO " "driver. Volumes should have extra_specs with the key provisioning:type with " "value equals to either 'thick' or 'thin'. max_oversubscription_ratio can be " "defined by the global config or for ScaleIO specific with the config option " "sio_max_over_subscription_ratio. The maximum oversubscription ratio " "supported at the moment is 10.0." msgstr "" "Added support for over-subscription in thin provisioning in the ScaleIO " "driver. Volumes should have extra_specs with the key provisioning:type with " "value equals to either 'thick' or 'thin'. max_oversubscription_ratio can be " "defined by the global config or for ScaleIO specific with the config option " "sio_max_over_subscription_ratio. The maximum over-subscription ratio " "supported at the moment is 10.0." msgid "" "Added support for project specific default volume types. Microversion 3.62 " "of the Block Storage API introduces new calls to set, get, and unset a " "default volume type for a specific project. Project specific defaults have " "higher priority than the default_volume_type option in cinder.conf" msgstr "" "Added support for project specific default volume types. Microversion 3.62 " "of the Block Storage API introduces new calls to set, get, and unset a " "default volume type for a specific project. Project specific defaults have " "higher priority than the default_volume_type option in cinder.conf" msgid "" "Added support for querying group details with volume ids which are in this " "group. For example, \"groups/{group_id}?list_volume=True\"." msgstr "" "Added support for querying group details with volume ids which are in this " "group. For example, \"groups/{group_id}?list_volume=True\"." msgid "" "Added support for querying volumes filtered by glance metadata key/value " "using 'glance_metadata' optional URL parameter. For example, \"volumes/" "detail?glance_metadata={\"image_name\":\"xxx\"}\"." msgstr "" "Added support for querying volumes filtered by Glance metadata key/value " "using 'glance_metadata' optional URL parameter. For example, \"volumes/" "detail?glance_metadata={\"image_name\":\"xxx\"}\"." msgid "" "Added support for querying volumes filtered by group_id using 'group_id' " "optional URL parameter. For example, \"volumes/detail?" "group_id={consistency_group_id}\"." msgstr "" "Added support for querying volumes filtered by group_id using 'group_id' " "optional URL parameter. For example, \"volumes/detail?" "group_id={consistency_group_id}\"." msgid "Added support for revert-to-snapshot in the VMware VMDK driver." msgstr "Added support for revert-to-snapshot in the VMware VMDK driver." msgid "" "Added support for scaling QoS in the ScaleIO driver. The new QoS keys are " "maxIOPSperGB and maxBWSperGB." msgstr "" "Added support for scaling QoS in the ScaleIO driver. The new QoS keys are " "maxIOPSperGB and maxBWSperGB." msgid "" "Added support for snapshots in the NFS driver. This functionality is only " "enabled if ``nfs_snapshot_support`` is set to ``True`` in cinder.conf. " "Cloning volumes is only supported if the source volume is not attached." msgstr "" "Added support for snapshots in the NFS driver. This functionality is only " "enabled if ``nfs_snapshot_support`` is set to ``True`` in cinder.conf. " "Cloning volumes is only supported if the source volume is not attached." msgid "" "Added support for taking, deleting, and restoring a cgsnapshot for NetApp " "7mode and CDOT backends." msgstr "" "Added support for taking, deleting, and restoring a cgsnapshot for NetApp " "7mode and CDOT backends." msgid "" "Added support for taking, deleting, and restoring consistency group " "snapshots for NexentaStor5 iSCSI and NFS drivers." msgstr "" "Added support for taking, deleting, and restoring consistency group " "snapshots for NexentaStor5 iSCSI and NFS drivers." msgid "" "Added support for the use of live volume in place of standard replication in " "the Dell SC driver." msgstr "" "Added support for the use of live volume in place of standard replication in " "the Dell SC driver." msgid "Added support for vhd and vhdx disk-formats for volume upload-to-image." msgstr "" "Added support for vhd and vhdx disk-formats for volume upload-to-image." msgid "Added support for vhd disk-format for volume upload-to-image." msgstr "Added support for vhd disk-format for volume upload-to-image." msgid "" "Added support for volume compression in INFINIDAT driver. Compression is " "available on InfiniBox 3.0 onward. To enable volume compression, set " "``infinidat_use_compression`` to True in the backend section in the Cinder " "configuration file." msgstr "" "Added support for volume compression in INFINIDAT driver. Compression is " "available on InfiniBox 3.0 onward. To enable volume compression, set " "``infinidat_use_compression`` to True in the backend section in the Cinder " "configuration file." msgid "" "Added support of authenticity verification through self-signed certificates " "for JovianDSS data storage. Added support of revert to snapshot " "functionality. Expands unit-test coverage for JovianDSS driver." msgstr "" "Added support of authenticity verification through self-signed certificates " "for JovianDSS data storage. Added support of revert to snapshot " "functionality. Expands unit-test coverage for JovianDSS driver." msgid "Added support of revert to snapshot functionality." msgstr "Added support of revert to snapshot functionality." msgid "" "Added support to Pure Storage Volume Drivers for Active Cluster using the " "standard replication API's for the Block Storage Service." msgstr "" "Added support to Pure Storage Volume Drivers for Active Cluster using the " "standard replication APIs for the Block Storage Service." msgid "" "Added support to cinder backup for use of the Zstandard compression " "algorithm. To use it, set the ``backup_compression_algorithm`` to ``zstd`` " "in the cinder configuration file. (The default value for this option is " "``zlib``.)" msgstr "" "Added support to cinder backup for use of the Zstandard compression " "algorithm. To use it, set the ``backup_compression_algorithm`` to ``zstd`` " "in the Cinder configuration file. (The default value for this option is " "``zlib``.)" msgid "" "Added support to querying snapshots filtered by metadata key/value using " "'metadata' optional URL parameter. For example, \"/v3/snapshots?" "metadata=={'key1':'value1'}\"." msgstr "" "Added support to querying snapshots filtered by metadata key/value using " "'metadata' optional URL parameter. For example, \"/v3/snapshots?" "metadata=={'key1':'value1'}\"." msgid "" "Added support to revert a volume to a snapshot with the Dell EMC VNX driver." msgstr "" "Added support to revert a volume to a snapshot with the Dell EMC VNX driver." msgid "Added supported driver checks on all drivers." msgstr "Added supported driver checks on all drivers." msgid "" "Added the ``image_conversion_cpu_limit`` and " "``image_conversion_address_space_limit`` as configurable parameters. This " "adds configurability to the image conversion process to prevent the process " "from timing out when converting larger images." msgstr "" "Added the ``image_conversion_cpu_limit`` and " "``image_conversion_address_space_limit`` as configurable parameters. This " "adds configurability to the image conversion process to prevent the process " "from timing out when converting larger images." msgid "Added the ability to create a CG from a source CG with the VMAX driver." msgstr "" "Added the ability to create a CG from a source CG with the VMAX driver." msgid "" "Added the ability to list manageable volumes and snapshots for NexentaStor5 " "iSCSI and NFS drivers." msgstr "" "Added the ability to list manageable volumes and snapshots for NexentaStor5 " "iSCSI and NFS drivers." msgid "" "Added the ability to list manageable volumes and snapshots to HNAS NFS " "driver." msgstr "" "Added the ability to list manageable volumes and snapshots to HNAS NFS " "driver." msgid "" "Added the ability to list manageable volumes and snapshots via GET operation " "on the /v2//os-volume-manage and /v2//os-snapshot-" "manage URLs, respectively." msgstr "" "Added the ability to list manageable volumes and snapshots via GET operation " "on the /v2//os-volume-manage and /v2//os-snapshot-" "manage URLs, respectively." msgid "" "Added the option ``storwize_svc_retain_aux_volume`` to IBM Storwize Driver " "which takes ``True`` or ``False``. This option is to enable or disable " "retaining of auxiliary volume on secondary storage during delete of the " "volume on primary storage or moving the primary volume from mirror to non-" "mirror with replication enabled. The default value is ``False``." msgstr "" "Added the option ``storwize_svc_retain_aux_volume`` to IBM Storwize Driver " "which takes ``True`` or ``False``. This option is to enable or disable " "retaining of auxiliary volume on secondary storage during delete of the " "volume on primary storage or moving the primary volume from mirror to non-" "mirror with replication enabled. The default value is ``False``." msgid "" "Added the options ``visibility`` and ``protected`` to the os-" "volume_upload_image REST API call." msgstr "" "Added the options ``visibility`` and ``protected`` to the os-" "volume_upload_image REST API call." msgid "" "Added the property ``src_backup_id`` to the volume's metadata, to record " "from which backup the new volume was created. If the ``src_backup_id`` " "exists in the volume's metadata, it will be updated. When restoring from a " "chain of incremental backups, ``src_backup_id`` is set to the last " "incremental backup used for the restore." msgstr "" "Added the property ``src_backup_id`` to the volume's metadata, to record " "from which backup the new volume was created. If the ``src_backup_id`` " "exists in the volume's metadata, it will be updated. When restoring from a " "chain of incremental backups, ``src_backup_id`` is set to the last " "incremental backup used for the restore." msgid "Added transfer pagination support since microversion 3.59." msgstr "Added transfer pagination support since microversion 3.59." msgid "Added update-host command for consistency groups in cinder-manage." msgstr "Added update-host command for consistency groups in cinder-manage." msgid "" "Added user messages for backup operations that a user can query through the " "`Messages API `_. These allow users to retrieve error messages for asynchronous " "failures in backup operations like create, delete, and restore." msgstr "" "Added user messages for backup operations that a user can query through the " "`Messages API `_. These allow users to retrieve error messages for asynchronous " "failures in backup operations like create, delete, and restore." msgid "" "Added using etags in API calls to avoid the lost update problem during " "deleting volume metadata." msgstr "" "Added using etags in API calls to avoid the lost update problem during " "deleting volume metadata." msgid "Added v2.1 replication support in Huawei Cinder driver." msgstr "Added v2.1 replication support in Huawei Cinder driver." msgid "Added v2.1 replication support to RBD driver." msgstr "Added v2.1 replication support to RBD driver." msgid "Added v2.1 replication support to SolidFire driver." msgstr "Added v2.1 replication support to SolidFire driver." msgid "Added v2.1 replication support to the HPE 3PAR driver." msgstr "Added v2.1 replication support to the HPE 3PAR driver." msgid "Added v2.1 replication support to the HPE LeftHand driver." msgstr "Added v2.1 replication support to the HPE LeftHand driver." msgid "" "Added vSphere storage policy support in VMwareVStorageObjectDriver. The " "storage policies that must be associated with the volumes can be specified " "using volume type extra-spec key 'vmware:storage_profile' similar to VMware " "VMDK driver. The vSphere version must be 6.7 or above to use this feature." msgstr "" "Added vSphere storage policy support in VMwareVStorageObjectDriver. The " "storage policies that must be associated with the volumes can be specified " "using volume type extra-spec key 'vmware:storage_profile' similar to VMware " "VMDK driver. The vSphere version must be 6.7 or above to use this feature." msgid "Added volume backend driver for Veritas HyperScale storage." msgstr "Added volume backend driver for Veritas HyperScale storage." msgid "Added volume backend drivers for CoprHD FC, iSCSI and Scaleio." msgstr "Added volume backend drivers for CoprHD FC, iSCSI and Scaleio." msgid "Added volume driver for QNAP ES Storage Driver." msgstr "Added volume driver for QNAP ES Storage Driver." msgid "Added volume driver for Zadara Storage VPSA." msgstr "Added volume driver for Zadara Storage VPSA." msgid "" "Added volume manage/unmanage support for NexentaStor5 iSCSI and NFS drivers." msgstr "" "Added volume manage/unmanage support for NexentaStor5 iSCSI and NFS drivers." msgid "Adding Live Migration functionality to VMAX driver version 3.0." msgstr "Adding Live Migration functionality to VMAX driver version 3.0." msgid "Adding Qos functionality to VMAX driver version 3.0." msgstr "Adding QoS functionality to VMAX driver version 3.0." msgid "Adding Replication V2.1 functionality to VMAX driver version 3.0." msgstr "Adding Replication V2.1 functionality to VMAX driver version 3.0." msgid "Adding compression functionality to VMAX driver version 3.0." msgstr "Adding compression functionality to VMAX driver version 3.0." msgid "" "Adding or removing volume_type_access from any project during DB migration " "62 must not be performed." msgstr "" "Adding or removing volume_type_access from any project during DB migration " "62 must not be performed." msgid "" "Additionally, ``consumes_quota`` can be used as a listing filter for volumes " "and snapshots. Its availability is controlled by its inclusion in ``etc/" "cinder/resource_filters.json``, where it is included by default. The " "default listing behavior is not to use this filter." msgstr "" "Additionally, ``consumes_quota`` can be used as a listing filter for volumes " "and snapshots. Its availability is controlled by its inclusion in ``etc/" "cinder/resource_filters.json``, where it is included by default. The " "default listing behaviour is not to use this filter." msgid "Additionally, the following bugs were addressed:" msgstr "Additionally, the following bugs were addressed:" msgid "" "Additionally, the following minor fixes for which no bugs were filed were " "made:" msgstr "" "Additionally, the following minor fixes for which no bugs were filed were " "made:" msgid "" "Additionally, the framework provides software-based compression using GUNzip " "tool if a suitable hardware accelerator is not available. Because this " "software fallback could cause performance problems if the Cinder services " "are not deployed on sufficiently powerful nodes, the default setting is " "*not* to enable compression on image upload or download." msgstr "" "Additionally, the framework provides software-based compression using GUNzip " "tool if a suitable hardware accelerator is not available. Because this " "software fallback could cause performance problems if the Cinder services " "are not deployed on sufficiently powerful nodes, the default setting is " "*not* to enable compression on image upload or download." msgid "Adds QoS support for VNX Cinder driver." msgstr "Adds QoS support for VNX Cinder driver." msgid "Adds new Hitachi VSP iSCSI Driver." msgstr "Adds new Hitachi VSP iSCSI Driver." msgid "" "Adds support to configure the size of the native thread pool used by the " "cinder volume and backup services. For the backup we use " "`backup_native_threads_pool_size` in the `[DEFAULT]` section, and for the " "backends we use `backend_native_threads_pool_size` in the driver section." msgstr "" "Adds support to configure the size of the native thread pool used by the " "Cinder volume and backup services. For the backup we use " "`backup_native_threads_pool_size` in the `[DEFAULT]` section, and for the " "backends we use `backend_native_threads_pool_size` in the driver section." msgid "Adds v2.1 replication support in VNX Cinder driver." msgstr "Adds v2.1 replication support in VNX Cinder driver." msgid "" "Administrator can disable this ability by updating the ``volume:" "extend_attached_volume`` policy rule." msgstr "" "Administrator can disable this ability by updating the ``volume:" "extend_attached_volume`` policy rule." msgid "" "After CG tables are removed, we will allow default_cgsnapshot_type to be " "used by group APIs." msgstr "" "After CG tables are removed, we will allow default_cgsnapshot_type to be " "used by group APIs." msgid "" "After an offline upgrade we had to restart all Cinder services twice, now " "with the `cinder-manage db sync --bump-versions` command we can avoid the " "second restart." msgstr "" "After an offline upgrade we had to restart all Cinder services twice, now " "with the `cinder-manage db sync --bump-versions` command we can avoid the " "second restart." msgid "" "After being marked unsupported in the Rocky release the CoprHD driver is now " "being removed in Stein. The vendor has indicated that this is desired as " "the CoprHD driver has been deprecated." msgstr "" "After being marked unsupported in the Rocky release the CoprHD driver is now " "being removed in Stein. The vendor has indicated that this is desired as " "the CoprHD driver has been deprecated." msgid "" "After running the migration script to migrate CGs to generic volume groups, " "CG and group APIs work as follows." msgstr "" "After running the migration script to migrate CGs to generic volume groups, " "CG and group APIs work as follows." msgid "" "After the online database migrations from cinder 15.4.0 or more recent have " "run, you may upgrade to Ussuri in the normal way." msgstr "" "After the online database migrations from cinder 15.4.0 or more recent have " "run, you may upgrade to Ussuri in the normal way." msgid "" "After the snapshot is created, upload the information of the snapshot on the " "storage to the metadata." msgstr "" "After the snapshot is created, upload the information of the snapshot on the " "storage to the metadata." msgid "" "After transferring a volume without snapshots from one user project to " "another user project, if the receiving user uses cascade deleting, it will " "cause some exceptions in driver and volume will be error_deleting. Adding " "additional check to ensure there are no snapshots left in other project when " "cascade deleting a tranferred volume." msgstr "" "After transferring a volume without snapshots from one user project to " "another user project, if the receiving user uses cascade deleting, it will " "cause some exceptions in driver and volume will be error_deleting. Adding " "additional check to ensure there are no snapshots left in other project when " "cascade deleting a transferred volume." msgid "" "All Datera DataFabric backed volume-types will now use API version 2 with " "Datera DataFabric" msgstr "" "All Datera DataFabric backed volume-types will now use API version 2 with " "Datera DataFabric" msgid "" "All barbican and keymgr config options in Cinder are now deprecated. All of " "these options are moved to the key_manager section for the Castellan library." msgstr "" "All Barbican and keymgr config options in Cinder are now deprecated. All of " "these options are moved to the key_manager section for the Castellan library." msgid "" "All volumes and snapshots created using the PowerMax for Cinder driver now " "have additional metadata included pertaining to the details of the asset on " "the backend storage array." msgstr "" "All volumes and snapshots created using the PowerMax for Cinder driver now " "have additional metadata included pertaining to the details of the asset on " "the backend storage array." msgid "" "Allow API user to remove the consistency group name or description " "information." msgstr "" "Allow API user to remove the consistency group name or description " "information." msgid "" "Allow for eradicating Pure Storage volumes, snapshots, and pgroups when " "deleting their Cinder counterpart." msgstr "" "Allow for eradicating Pure Storage volumes, snapshots, and pgroups when " "deleting their Cinder counterpart." msgid "Allow rbd driver to list manageable snapshots." msgstr "Allow RBD driver to list manageable snapshots." msgid "Allow rbd driver to list manageable volumes." msgstr "Allow RBD driver to list manageable volumes." msgid "Allow rbd driver to manage existing snapshot." msgstr "Allow RBD driver to manage existing snapshot." msgid "Allow rbd driver to report backend state." msgstr "Allow RBD driver to report backend state." msgid "Allow spaces when managing existing volumes with the HNAS iSCSI driver." msgstr "" "Allow spaces when managing existing volumes with the HNAS iSCSI driver." msgid "Allow the RBD driver to work with max_over_subscription_ratio." msgstr "Allow the RBD driver to work with max_over_subscription_ratio." msgid "" "Allow users to specify the copy speed while using Huawei driver to create " "volume from snapshot or clone volume, by the new added metadata 'copyspeed'. " "For example, user can add --metadata copyspeed=1 when creating volume from " "source volume/snapshot. The valid optional range of copyspeed is [1, 2, 3, " "4], respectively representing LOW, MEDIUM, HIGH and HIGHEST." msgstr "" "Allow users to specify the copy speed while using Huawei driver to create " "volume from snapshot or clone volume, by the new added metadata 'copyspeed'. " "For example, user can add --metadata copyspeed=1 when creating volume from " "source volume/snapshot. The valid optional range of copyspeed is [1, 2, 3, " "4], respectively representing LOW, MEDIUM, HIGH and HIGHEST." msgid "" "Also some options are renamed (note that 3 of them were both moved and " "renamed):" msgstr "" "Also some options are renamed (note that 3 of them were both moved and " "renamed):" msgid "" "Although the encryption metadata consumes less than 1% of the volume, " "suppose that a user wants to retype a volume of a non-encrypted type to an " "encrypted type of the same size. If the non-encrypted volume is \"full\", " "we are in the position of trying to fit 101% of its capacity into the " "encrypted volume, which is not possible under the current laws of physics, " "and the retype should fail (see `Known Issues `_ for " "volume encryption in the cinder documentation)." msgstr "" "Although the encryption metadata consumes less than 1% of the volume, " "suppose that a user wants to retype a volume of a non-encrypted type to an " "encrypted type of the same size. If the non-encrypted volume is \"full\", " "we are in the position of trying to fit 101% of its capacity into the " "encrypted volume, which is not possible under the current laws of physics, " "and the retype should fail (see `Known Issues `_ for " "volume encryption in the cinder documentation)." msgid "" "An error has been corrected in the EMC ScaleIO driver that had caused all " "volumes to be provisioned at 'thick' even if user had specificed 'thin'." msgstr "" "An error has been corrected in the EMC ScaleIO driver that had caused all " "volumes to be provisioned at 'thick' even if user had specified 'thin'." msgid "" "An example of the SQL commands to generate these indexes can be found in the " "`specific troubleshooting guide `_." msgstr "" "An example of the SQL commands to generate these indexes can be found in the " "`specific troubleshooting guide `_." msgid "" "An incorrect lock in the remotefs code, which is used for the NFS driver, " "and other similar drivers, resulted in concurrent clone volume operations " "failing. create_cloned_volume now locks on the source volume id, meaning " "multiple clone operations from the same source volume are serialized." msgstr "" "An incorrect lock in the remotefs code, which is used for the NFS driver, " "and other similar drivers, resulted in concurrent clone volume operations " "failing. create_cloned_volume now locks on the source volume id, meaning " "multiple clone operations from the same source volume are serialized." msgid "" "An operator may enable this functionality by creating a specific volume type " "with the property::" msgstr "" "An operator may enable this functionality by creating a specific volume type " "with the property::" msgid "" "Any Volume Drivers configured in the DEFAULT config stanza should be moved " "to their own stanza and enabled via the enabled_backends config option. The " "older style of config with DEFAULT is deprecated and will be removed in " "future releases." msgstr "" "Any Volume Drivers configured in the DEFAULT config stanza should be moved " "to their own stanza and enabled via the enabled_backends config option. The " "older style of config with DEFAULT is deprecated and will be removed in " "future releases." msgid "As a reminder, the finer-grained policies are:" msgstr "As a reminder, the finer-grained policies are:" msgid "" "As an example one provider may have roles called viewer, admin, type_viewer, " "and say type_admin. Admin and type_admin can create, delete, update types. " "Everyone can list the storage types. Admin, type_viewer, and type_admin can " "view the extra_specs." msgstr "" "As an example one provider may have roles called viewer, admin, type_viewer, " "and say type_admin. Admin and type_admin can create, delete, update types. " "Everyone can list the storage types. Admin, type_viewer, and type_admin can " "view the extra_specs." msgid "" "As cinder-backup was strongly reworked in this release, the recommended " "upgrade order when executing live (rolling) upgrade is c-api->c-sch->c-vol-" ">c-bak." msgstr "" "As cinder-backup was strongly reworked in this release, the recommended " "upgrade order when executing live (rolling) upgrade is c-api->c-sch->c-vol-" ">c-bak." msgid "" "As of API version 3.66, volume snapshots of in-use volumes can be created " "without passing the 'force' flag, and the 'force' flag is considered invalid " "for this request. For backward compatability, however, when the 'force' " "flag is passed with a value evaluating to True, it is silently ignored." msgstr "" "As of API version 3.66, volume snapshots of in-use volumes can be created " "without passing the 'force' flag, and the 'force' flag is considered invalid " "for this request. For backward compatibility, however, when the 'force' " "flag is passed with a value evaluating to True, it is silently ignored." msgid "" "As part of the fix for `Bug #1996188 `_, cinder is now more strict in checking that the " "``disk_format`` recorded for an image (as revealed by the Image Service API " "image-show response) matches what cinder detects when it downloads the " "image. Thus, some requests to create a volume from a source image that had " "previously succeeded may fail with an ``ImageUnacceptable`` error." msgstr "" "As part of the fix for `Bug #1996188 `_, Cinder is now more strict in checking that the " "``disk_format`` recorded for an image (as revealed by the Image Service API " "image-show response) matches what Cinder detects when it downloads the " "image. Thus, some requests to create a volume from a source image that had " "previously succeeded may fail with an ``ImageUnacceptable`` error." msgid "" "As part of the fix for `Bug #2004555 `_, cinder now rejects user attachment delete requests for " "attachments that are being used by nova instances to ensure that no leftover " "devices are produced on the compute nodes which could be used to access " "another project's volumes. Terminate connection, detach, and force detach " "volume actions (calls that are not usually made by users directly) are, in " "most cases, not allowed for users." msgstr "" "As part of the fix for `Bug #2004555 `_, Cinder now rejects user attachment delete requests for " "attachments that are being used by nova instances to ensure that no leftover " "devices are produced on the compute nodes which could be used to access " "another project's volumes. Terminate connection, detach, and force detach " "volume actions (calls that are not usually made by users directly) are, in " "most cases, not allowed for users." msgid "" "As part of the fix for `Bug #2004555 `_, cinder now rejects user attachment delete requests for " "attachments that are being used by nova instances to ensure that no leftover " "devices are produced on the compute nodes which could be used to access " "another project's volumes. Terminate connection, detach, and force detach " "volume actions are not allowed for users." msgstr "" "As part of the fix for `Bug #2004555 `_, Cinder now rejects user attachment delete requests for " "attachments that are being used by nova instances to ensure that no leftover " "devices are produced on the compute nodes which could be used to access " "another project's volumes. Terminate connection, detach, and force detach " "volume actions are not allowed for users." msgid "" "Availability zones may now be configured per backend in a multi-backend " "configuration. Individual backend sections can now set the configuration " "option ``backend_availability_zone``. If set, this value will override the " "[DEFAULT] ``storage_availability_zone`` setting." msgstr "" "Availability zones may now be configured per backend in a multi-backend " "configuration. Individual backend sections can now set the configuration " "option ``backend_availability_zone``. If set, this value will override the " "[DEFAULT] ``storage_availability_zone`` setting." msgid "Backend driver for Scality SRB has been removed." msgstr "Backend driver for Scality SRB has been removed." msgid "Backup driver initialization using module name is deprecated." msgstr "Backup driver initialisation using module name is deprecated." msgid "" "Backup service to driver mapping is deprecated. If you use old values like " "'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be " "changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' " "accordingly to get your backup service working in the 'R' release." msgstr "" "Backup service to driver mapping is deprecated. If you use old values like " "'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be " "changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' " "accordingly to get your backup service working in the 'R' release." msgid "" "Backup service to driver mapping is removed. If you use old values like " "'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be " "changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' " "accordingly to get your backup service working." msgstr "" "Backup service to driver mapping is removed. If you use old values like " "'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be " "changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' " "accordingly to get your backup service working." msgid "" "Before the Rocky release, this driver was named ``cinder.volume.drivers." "dell_emc.scaleio.driver.ScaleIODriver``. That name was deprecated in the " "Rocky release. In this release, the pre-Rocky name for this driver is no " "longer recognized and support for configuration options beginning with " "``sio`` has been removed. Thus any driver configuration options that start " "with ``sio`` must be updated to ``powerflex`` in your ``cinder.conf`` before " "you deploy this release." msgstr "" "Before the Rocky release, this driver was named ``cinder.volume.drivers." "dell_emc.scaleio.driver.ScaleIODriver``. That name was deprecated in the " "Rocky release. In this release, the pre-Rocky name for this driver is no " "longer recognized and support for configuration options beginning with " "``sio`` has been removed. Thus any driver configuration options that start " "with ``sio`` must be updated to ``powerflex`` in your ``cinder.conf`` before " "you deploy this release." msgid "Before upgrading from Stein, you did **not** purge the cinder database" msgstr "Before upgrading from Stein, you did **not** purge the Cinder database" msgid "" "Beginning with Cinder 12.0.0, you only need to specify policies in your " "policy file that you want to **differ** from the default values. Unspecified " "policies will use the default values *defined in the code*. Given that a " "default value *must* be specified *in the code* when a new policy is " "introduced, the ``default`` policy, which was formerly used as a catch-all " "for policy targets that were not defined elsewhere in the policy file, has " "no effect. We mention this because an old upgrade strategy was to use the " "policy file from the previous release with ``\"default\": \"role:admin\"`` " "(or ``\"default\": \"!\"``) so that newly introduced actions would be " "blocked from end users until the operator had time to assess the " "implications of exposing these actions. This strategy no longer works. " "Hopefully this isn't a problem because we're defining sensible defaults in " "the code. It would be a good idea, however, to generate the sample policy " "file with each release (see instructions above) to verify this for yourself." msgstr "" "Beginning with Cinder 12.0.0, you only need to specify policies in your " "policy file that you want to **differ** from the default values. Unspecified " "policies will use the default values *defined in the code*. Given that a " "default value *must* be specified *in the code* when a new policy is " "introduced, the ``default`` policy, which was formerly used as a catch-all " "for policy targets that were not defined elsewhere in the policy file, has " "no effect. We mention this because an old upgrade strategy was to use the " "policy file from the previous release with ``\"default\": \"role:admin\"`` " "(or ``\"default\": \"!\"``) so that newly introduced actions would be " "blocked from end users until the operator had time to assess the " "implications of exposing these actions. This strategy no longer works. " "Hopefully this isn't a problem because we're defining sensible defaults in " "the code. It would be a good idea, however, to generate the sample policy " "file with each release (see instructions above) to verify this for yourself." msgid "" "Beginning with Cinder version 12.0.0, as part of the Queens release " "\"policies in code\" community effort, Cinder has had the ability to run " "without a policy file because sensible default values are specified in the " "code. Customizing the policies in effect at your site, however, still " "requires a policy file. The default location of this file has been ``/etc/" "cinder/policy.json`` (although the documentation has indicated otherwise). " "With this release, the default location of this file is changed to ``/etc/" "cinder/policy.yaml``." msgstr "" "Beginning with Cinder version 12.0.0, as part of the Queens release " "\"policies in code\" community effort, Cinder has had the ability to run " "without a policy file because sensible default values are specified in the " "code. Customizing the policies in effect at your site, however, still " "requires a policy file. The default location of this file has been ``/etc/" "cinder/policy.json`` (although the documentation has indicated otherwise). " "With this release, the default location of this file is changed to ``/etc/" "cinder/policy.yaml``." msgid "" "Beginning with microversion 3.60, users may apply time comparison filters to " "the volume summary list and volume detail list requests by using the " "``created_at`` or ``updated_at`` fields. Time must be expressed in ISO 8601 " "format. See the `Block Storage API v3 Reference `_ for details." msgstr "" "Beginning with microversion 3.60, users may apply time comparison filters to " "the volume summary list and volume detail list requests by using the " "``created_at`` or ``updated_at`` fields. Time must be expressed in ISO 8601 " "format. See the `Block Storage API v3 Reference `_ for details." msgid "" "Beginning with the Train release, untyped volumes (that is, volumes with no " "volume-type) have been disallowed. To facilitate this, a ``__DEFAULT__`` " "volume-type was included as part of the Train database migration. In this " "release, handling of the default volume-type has been improved:" msgstr "" "Beginning with the Train release, untyped volumes (that is, volumes with no " "volume-type) have been disallowed. To facilitate this, a ``__DEFAULT__`` " "volume-type was included as part of the Train database migration. In this " "release, handling of the default volume-type has been improved:" msgid "Better cleanup handling in the NetApp E-Series driver." msgstr "Better clean-up handling in the NetApp E-Series driver." msgid "Block device driver" msgstr "Block device driver" msgid "" "BlockDeviceDriver was deprecated in Ocata release and marked as " "'unsupported'. There is no CI for it too. If you used this driver before you " "have to migrate your volumes to LVM with LIO target yourself before " "upgrading to Queens release to get your volumes working." msgstr "" "BlockDeviceDriver was deprecated in Ocata release and marked as " "'unsupported'. There is no CI for it too. If you used this driver before you " "have to migrate your volumes to LVM with LIO target yourself before " "upgrading to Queens release to get your volumes working." msgid "Blockbridge" msgstr "Blockbridge" msgid "" "BoolOpt ``datera_acl_allow_all`` is changed to a volume type extra spec " "option-- ``DF:acl_allow_all``" msgstr "" "BoolOpt ``datera_acl_allow_all`` is changed to a volume type extra spec " "option-- ``DF:acl_allow_all``" msgid "Brocade Fibre Channel Zone Manager driver" msgstr "Brocade Fibre Channel Zone Manager driver" msgid "Broke Datera driver up into modules." msgstr "Broke Datera driver up into modules." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "By default, ``snapvx_unlink_symforce`` is ``False``. Use extreme caution " "with this option. If used when a link is copy in progress or when a restore " "is restore in progress, this will cause an incomplete copy and data on the " "copy target would not be usable." msgstr "" "By default, ``snapvx_unlink_symforce`` is ``False``. Use extreme caution " "with this option. If used when a link is copy in progress or when a restore " "is restore in progress, this will cause an incomplete copy and data on the " "copy target would not be usable." msgid "Capabilites List for Datera Volume Drivers" msgstr "Capabilities List for Datera Volume Drivers" msgid "Capacity reporting fixed with Huawei backend drivers." msgstr "Capacity reporting fixed with Huawei backend drivers." msgid "" "Catch argument exceptions when configuring multiattach for rbd volumes. This " "allows multiattach images with flags already set to continue instead of " "raising an exception and failing." msgstr "" "Catch argument exceptions when configuring multiattach for RBD volumes. This " "allows multiattach images with flags already set to continue instead of " "raising an exception and failing." msgid "" "Ceph backup driver `Bug #1895035 `_: Fixed restore full backups to non RBD volumes." msgstr "" "Ceph backup driver `Bug #1895035 `_: Fixed restore full backups to non RBD volumes." msgid "" "Ceph driver: Add config option to keep only the last n snapshots per backup " "to save disk space on the source volume storage. Enabling this option can " "cause incremental backups to become full backups instead under special " "circumstances. Please take a look at the Ceph backup driver docs for more " "information." msgstr "" "Ceph driver: Add config option to keep only the last n snapshots per backup " "to save disk space on the source volume storage. Enabling this option can " "cause incremental backups to become full backups instead under special " "circumstances. Please take a look at the Ceph backup driver docs for more " "information." msgid "" "Ceph/RBD volume backends will now assume exclusive cinder pools, as if they " "had ``rbd_exclusive_cinder_pool = true`` in their configuration." msgstr "" "Ceph/RBD volume backends will now assume exclusive cinder pools, as if they " "had ``rbd_exclusive_cinder_pool = true`` in their configuration." msgid "" "Ceph/RBD: Fix Cinder becoming non-responsive and stats gathering taking " "longer that its period. (`Related-Bug #1704106 `_)" msgstr "" "Ceph/RBD: Fix Cinder becoming non-responsive and stats gathering taking " "longer that its period. (`Related-Bug #1704106 `_)" msgid "" "Ceph/RBD: Fix cinder taking a long time to start for Ceph/RBD backends. " "(`Related-Bug #1704106 `_)" msgstr "" "Ceph/RBD: Fix Cinder taking a long time to start for Ceph/RBD backends. " "(`Related-Bug #1704106 `_)" msgid "Changes config option default for datera_num_replicas from 1 to 3" msgstr "Changes config option default for datera_num_replicas from 1 to 3" msgid "" "Cinder FC Zone Manager Friendly Zone Names This feature adds support for " "Fibre Channel user friendly zone names if implemented by the volume driver. " "If the volume driver passes the host name and storage system to the Fibre " "Channel Zone Manager in the conn_info structure, the zone manager will use " "these names in structuring the zone name to provide a user friendly zone " "name." msgstr "" "Cinder FC Zone Manager Friendly Zone Names This feature adds support for " "Fibre Channel user friendly zone names if implemented by the volume driver. " "If the volume driver passes the host name and storage system to the Fibre " "Channel Zone Manager in the conn_info structure, the zone manager will use " "these names in structuring the zone name to provide a user friendly zone " "name." msgid "Cinder Release Notes" msgstr "Cinder Release Notes" msgid "" "Cinder TSM Backup Driver is deprecated and will be removed in Wallaby " "release." msgstr "" "Cinder TSM Backup Driver is deprecated and will be removed in Wallaby " "release." msgid "" "Cinder and Nova use the os-brick library to facilitate connections to " "volumes. The ``nvmeof`` os-brick connector has been refactored over the " "past few development cycles. In between the time of the Yoga os-brick " "release (os-brick 5.2.0) and this cinder release, several bugs were " "discovered in this connector. You can find these in the bug tracker with " "this query:" msgstr "" "Cinder and Nova use the os-brick library to facilitate connections to " "volumes. The ``nvmeof`` os-brick connector has been refactored over the " "past few development cycles. In between the time of the Yoga os-brick " "release (os-brick 5.2.0) and this cinder release, several bugs were " "discovered in this connector. You can find these in the bug tracker with " "this query:" msgid "" "Cinder backup creation can now (since microversion 3.51) receive the " "availability zone where the backup should be stored." msgstr "" "Cinder backup creation can now (since microversion 3.51) receive the " "availability zone where the backup should be stored." msgid "" "Cinder backup now supports running multiple processes to make the most of " "the available CPU cores. Performance gains will be significant when running " "multiple concurrent backups/restores with compression. The number of " "processes is set with `backup_workers` configuration option." msgstr "" "Cinder backup now supports running multiple processes to make the most of " "the available CPU cores. Performance gains will be significant when running " "multiple concurrent backups/restores with compression. The number of " "processes is set with `backup_workers` configuration option." msgid "" "Cinder is now collecting capacity data, including virtual free capacity etc " "from the backends. A notification which includes that data is periodically " "emitted." msgstr "" "Cinder is now collecting capacity data, including virtual free capacity etc " "from the backends. A notification which includes that data is periodically " "emitted." msgid "" "Cinder no longer allows an incremental backup to be created while having the " "parent backup in another project." msgstr "" "Cinder no longer allows an incremental backup to be created while having the " "parent backup in another project." msgid "" "Cinder now allows for a minimum value when using the capacity based QoS in " "order to make sure small volumes can get a minimum allocation for them to be " "usable. The newly added QoS specs are `read_iops_sec_per_gb_min`, " "`write_iops_sec_per_gb_min`, `total_iops_sec_per_gb_min`, " "`read_bytes_sec_per_gb_min`, `write_bytes_sec_per_gb_min` and " "`total_bytes_sec_per_gb_min`" msgstr "" "Cinder now allows for a minimum value when using the capacity based QoS in " "order to make sure small volumes can get a minimum allocation for them to be " "usable. The newly added QoS specs are `read_iops_sec_per_gb_min`, " "`write_iops_sec_per_gb_min`, `total_iops_sec_per_gb_min`, " "`read_bytes_sec_per_gb_min`, `write_bytes_sec_per_gb_min` and " "`total_bytes_sec_per_gb_min`" msgid "" "Cinder now allows for capacity based QoS which can be useful in environments " "where storage performance scales with consumption (such as RBD backed " "storage). The newly added QoS specs are `read_iops_sec_per_gb`, " "`write_iops_sec_per_gb`, `total_iops_sec_per_gb`, `read_bytes_sec_per_gb`, " "`write_bytes_sec_per_gb` and `total_bytes_sec_per_gb`. These values will be " "multiplied by the size of the volume and passed to the consumer. For " "example, setting `total_iops_sec_per_gb` to 30 and setting " "`total_bytes_sec_per_gb` to `1048576` (1MB) then creating a 100 GB volume " "with that QoS will result in a volume with 3,000 total IOPs and 100MB/s " "throughput limit." msgstr "" "Cinder now allows for capacity based QoS which can be useful in environments " "where storage performance scales with consumption (such as RBD backed " "storage). The newly added QoS specs are `read_iops_sec_per_gb`, " "`write_iops_sec_per_gb`, `total_iops_sec_per_gb`, `read_bytes_sec_per_gb`, " "`write_bytes_sec_per_gb` and `total_bytes_sec_per_gb`. These values will be " "multiplied by the size of the volume and passed to the consumer. For " "example, setting `total_iops_sec_per_gb` to 30 and setting " "`total_bytes_sec_per_gb` to `1048576` (1MB) then creating a 100 GB volume " "with that QoS will result in a volume with 3,000 total IOPs and 100MB/s " "throughput limit." msgid "" "Cinder now defaults to using the Glance v2 API. The ``glance_api_version`` " "configuration option has been deprecated and will be removed in the 12.0.0 " "Queens release." msgstr "" "Cinder now defaults to using the Glance v2 API. The ``glance_api_version`` " "configuration option has been deprecated and will be removed in the 12.0.0 " "Queens release." msgid "Cinder now requires LVM version 2.02.107 or newer." msgstr "Cinder now requires LVM version 2.02.107 or newer." msgid "" "Cinder now stores the format of the backing file (raw or qcow2), for FS " "backends, in the volume admin metadata and includes the format in the " "connection_info returned in the Attachments API. Previously cinder tried to " "introspect the format, and under some circumstances, an incorrect format " "would be deduced. This will still be the case for legacy volumes. Explicitly " "storing the format will avoid this issue for newly created volumes. `See " "spec for more info `_." msgstr "" "Cinder now stores the format of the backing file (raw or qcow2), for FS " "backends, in the volume admin metadata and includes the format in the " "connection_info returned in the Attachments API. Previously Cinder tried to " "introspect the format, and under some circumstances, an incorrect format " "would be deduced. This will still be the case for legacy volumes. Explicitly " "storing the format will avoid this issue for newly created volumes. `See " "spec for more info `_." msgid "" "Cinder now support policy in code, which means if users don't need to modify " "any of the default policy rules, they do not need a policy file. Users can " "modify/generate a `policy.yaml` file which will override specific policy " "rules from their defaults." msgstr "" "Cinder now support policy in code, which means if users don't need to modify " "any of the default policy rules, they do not need a policy file. Users can " "modify/generate a `policy.yaml` file which will override specific policy " "rules from their defaults." msgid "" "Cinder now supports the use of 'max_over_subscription_ratio = auto' which " "automatically calculates the value for max_over_subscription_ratio in the " "scheduler." msgstr "" "Cinder now supports the use of 'max_over_subscription_ratio = auto' which " "automatically calculates the value for max_over_subscription_ratio in the " "scheduler." msgid "" "Cinder now uses the RBD trash functionality to handle some volume deletions. " "Therefore, deployments must either a) enable scheduled RBD trash purging on " "the RBD backend or b) enable the Cinder RBD driver's " "enable_deferred_deletion option to have Cinder purge the RBD trash. This " "adds the new configuration option 'rbd_concurrent_flatten_operations', which " "limits how many RBD flattens the driver will run simultaneously. This can be " "used to prevent flatten operations from consuming too much I/O capacity on " "the Ceph cluster. It defaults to 3." msgstr "" "Cinder now uses the RBD trash functionality to handle some volume deletions. " "Therefore, deployments must either a) enable scheduled RBD trash purging on " "the RBD backend or b) enable the Cinder RBD driver's " "enable_deferred_deletion option to have Cinder purge the RBD trash. This " "adds the new configuration option 'rbd_concurrent_flatten_operations', which " "limits how many RBD flattens the driver will run simultaneously. This can be " "used to prevent flatten operations from consuming too much I/O capacity on " "the Ceph cluster. It defaults to 3." msgid "" "Cinder now will return 415 (HTTPUnsupportedMediaType) when any unsupported " "content type is specified in request header." msgstr "" "Cinder now will return 415 (HTTPUnsupportedMediaType) when any unsupported " "content type is specified in request header." msgid "" "Cinder services are now automatically downgrading RPC messages to be " "understood by the oldest version of a service among all the deployment. " "Disabled and dead services are also taken into account. It is important to " "keep service list up to date, without old, unused records. This can be done " "using ``cinder-manage service remove`` command. Once situation is cleaned up " "services should be either restarted or ``SIGHUP`` signal should be issued to " "their processes to force them to reload version pins. Please note that " "cinder-api does not support ``SIGHUP`` signal." msgstr "" "Cinder services are now automatically downgrading RPC messages to be " "understood by the oldest version of a service among all the deployment. " "Disabled and dead services are also taken into account. It is important to " "keep service list up to date without old unused records. This can be done " "using ``cinder-manage service remove`` command. Once the situation is " "cleaned up services should be either restarted or the ``SIGHUP`` signal " "should be issued to their processes to force them to reload. Please note " "that cinder-api does not support ``SIGHUP`` signal." msgid "" "Cinder stopped supporting single-backend configurations in Ocata. However, " "sample ``cinder.conf`` was still generated with driver-related options in " "``[DEFAULT]`` section, where those options had no effect at all. Now all of " "driver options are listed in ``[backend_defaults]`` section, that indicates " "that those options are effective only in this section and " "``[]`` sections listed in ``enabled_backends``." msgstr "" "Cinder stopped supporting single-backend configurations in Ocata. However, " "sample ``cinder.conf`` was still generated with driver-related options in " "``[DEFAULT]`` section, where those options had no effect at all. Now all of " "driver options are listed in ``[backend_defaults]`` section, that indicates " "that those options are effective only in this section and " "``[]`` sections listed in ``enabled_backends``." msgid "" "Cinder validates proposed Glance store identifiers by contacting Glance at " "the time the ``image_service:store_id`` is added to a volume-type's extra-" "specs. Thus the Image Service API must be available when a volume-type is " "updated." msgstr "" "Cinder validates proposed Glance store identifiers by contacting Glance at " "the time the ``image_service:store_id`` is added to a volume-type's extra-" "specs. Thus the Image Service API must be available when a volume-type is " "updated." msgid "Cinder will now consume quota when importing new backup resource." msgstr "Cinder will now consume quota when importing new backup resource." msgid "" "Cinder will now correctly read Keystone's endpoint for quota calls from " "keystone_authtoken.auth_uri instead of keymgr.encryption_auth_url config " "option." msgstr "" "Cinder will now correctly read Keystone's endpoint for quota calls from " "keystone_authtoken.auth_uri instead of keymgr.encryption_auth_url config " "option." msgid "" "Cinder's Google backup driver is now called gcs, so ``backup_driver`` " "configuration for Google Cloud Storage should be updated from ``cinder." "backup.drivers.google`` to ``cinder.backup.driver.gcs``." msgstr "" "Cinder's Google backup driver is now called gcs, so ``backup_driver`` " "configuration for Google Cloud Storage should be updated from ``cinder." "backup.drivers.google`` to ``cinder.backup.driver.gcs``." msgid "" "Cinder-manage DB sync command can now bump the RPC and Objects versions of " "the services to avoid a second restart when doing offline upgrades." msgstr "" "Cinder-manage DB sync command can now bump the RPC and Objects versions of " "the services to avoid a second restart when doing offline upgrades." msgid "Clone a volume" msgstr "Clone a volume" msgid "Cloning of consistency group added to EMC VNX backend driver." msgstr "Cloning of consistency group added to EMC VNX backend driver." msgid "Coho" msgstr "Coho" msgid "Command: ``cinder-manage volume update_service``" msgstr "Command: ``cinder-manage volume update_service``" msgid "Configrable migration rate in VNX driver via metadata" msgstr "Configurable migration rate in VNX driver via metadata" msgid "" "Configuration option ``iscsi_secondary_ip_addresses`` is deprecated in favor " "of ``target_secondary_ip_addresses`` to follow the same naming convention of " "``target_ip_address``." msgstr "" "Configuration option ``iscsi_secondary_ip_addresses`` is deprecated in " "favour of ``target_secondary_ip_addresses`` to follow the same naming " "convention of ``target_ip_address``." msgid "" "Configuration options for the DRBD driver that will be applied to DRBD " "resources; the default values should be okay for most installations." msgstr "" "Configuration options for the DRBD driver that will be applied to DRBD " "resources; the default values should be okay for most installations." msgid "" "Configurations that are setting backend config in ``[DEFAULT]`` section are " "now not supported. You should use ``enabled_backends`` option to set up " "backends." msgstr "" "Configurations that are setting backend config in ``[DEFAULT]`` section are " "now not supported. You should use ``enabled_backends`` option to set up " "backends." msgid "" "Configuring Volume Drivers in the DEFAULT config stanza is not going to be " "maintained and will be removed in the next release. All backends should use " "the enabled_backends config option with separate stanza's for each." msgstr "" "Configuring Volume Drivers in the DEFAULT config stanza is not going to be " "maintained and will be removed in the next release. All backends should use " "the enabled_backends config option with separate stanzas for each." msgid "" "Consistency group creation previously scheduled at the pool level. Now it is " "fixed to schedule at the backend level as designed." msgstr "" "Consistency group creation previously scheduled at the pool level. Now it is " "fixed to schedule at the backend level as designed." msgid "" "Consistency group support has been added to the LeftHand backend driver." msgstr "" "Consistency group support has been added to the LeftHand backend driver." msgid "Corrected quota usage when transferring a volume between tenants." msgstr "Corrected quota usage when transferring a volume between tenants." msgid "Corrected support to force detach a volume from all hosts on Unity." msgstr "Corrected support to force detach a volume from all hosts on Unity." msgid "" "Create CG Snapshot creates either in the CG or the groups table depending on " "where the CG is." msgstr "" "Create CG Snapshot creates either in the CG or the groups table depending on " "where the CG is." msgid "" "Create CG from Source creates in either the CG or the groups table depending " "on the source." msgstr "" "Create CG from Source creates in either the CG or the groups table depending " "on the source." msgid "Create CG only creates in the groups table." msgstr "Create CG only creates in the groups table." msgid "Create Volume adds the volume either to the CG or the group." msgstr "Create Volume adds the volume either to the CG or the group." msgid "Create a volume from a snapshot" msgstr "Create a volume from a snapshot" msgid "" "Create a volume: ``POST /v3/volumes`` with an ``imageRef`` attribute in the " "request body. This will result in a 202 (Accepted) response, but if the " "image's ``disk_format`` would require conversion to be written to the " "volume, the volume will go to ``error`` status." msgstr "" "Create a volume: ``POST /v3/volumes`` with an ``imageRef`` attribute in the " "request body. This will result in a 202 (Accepted) response, but if the " "image's ``disk_format`` would require conversion to be written to the " "volume, the volume will go to ``error`` status." msgid "Create generic volume group from source" msgstr "Create generic volume group from source" msgid "" "Create group from group snapshot (`#1867906 `_)" msgstr "" "Create group from group snapshot (`#1867906 `_)" msgid "Create volume snapshots" msgstr "Create volume snapshots" msgid "" "Creating a new volume from an image that was created from an encrypted " "Cinder volume now succeeds." msgstr "" "Creating a new volume from an image that was created from an encrypted " "Cinder volume now succeeds." msgid "" "Creating a volume of an encrypted volume type from an image in the Image " "service (Glance) using the generic NFS driver results in an unusable " "volume. The cinder team is working on a solution which is expected to be " "backported to a future release in the Xena series. The issue is being " "tracked as `Bug #1888680 `_." msgstr "" "Creating a volume of an encrypted volume type from an image in the Image " "service (Glance) using the generic NFS driver results in an unusable " "volume. The Cinder team is working on a solution which is expected to be " "backported to a future release in the Xena series. The issue is being " "tracked as `Bug #1888680 `_." msgid "" "Creating a volume of an encrypted volume type from an image in the Image " "service (Glance) using the generic NFS driver results in an unusable " "volume. The cinder team is working on a solution which is expected to be " "backported to a future release in the Yoga series. The issue is being " "tracked as `Bug #1888680 `_." msgstr "" "Creating a volume of an encrypted volume type from an image in the Image " "service (Glance) using the generic NFS driver results in an unusable " "volume. The Cinder team is working on a solution which is expected to be " "backported to a future release in the Yoga series. The issue is being " "tracked as `Bug #1888680 `_." msgid "Critical Issues" msgstr "Critical Issues" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "DS8K driver adds two new properties into extra-specs so that user can " "specify pool or lss or both of them to allocate volume in their expected " "area." msgstr "" "DS8K driver adds two new properties into extra-specs so that user can " "specify pool or lss or both of them to allocate volume in their expected " "area." msgid "" "Datera driver location has changed from cinder.volume.drivers .datera." "DateraDriver to cinder.volume.drivers.datera.datera_iscsi .DateraDriver." msgstr "" "Datera driver location has changed from cinder.volume.drivers .datera." "DateraDriver to cinder.volume.drivers.datera.datera_iscsi .DateraDriver." msgid "" "Datera driver: fixes in retyping / QoS, Glance interoperability, fast " "clones, IP pools, volume templates and initiators, unicode character " "support, scalability issues" msgstr "" "Datera driver: fixes in retyping / QoS, Glance interoperability, fast " "clones, IP pools, volume templates and initiators, Unicode character " "support, scalability issues" msgid "" "Default `policy.json` file is now removed as Cinder now uses default " "policies. A policy file is only needed if overriding one of the defaults." msgstr "" "Default `policy.json` file is now removed as Cinder now uses default " "policies. A policy file is only needed if overriding one of the defaults." msgid "" "Delete CG deletes from the CG or the groups table depending on where the CG " "is." msgstr "" "Delete CG deletes from the CG or the groups table depending on where the CG " "is." msgid "Delete volume snapshots" msgstr "Delete volume snapshots" msgid "" "Dell EMC PS Driver stats report has been fixed, now reports the " "`provisioned_capacity_gb` properly. Fixes bug 1719659." msgstr "" "Dell EMC PS Driver stats report has been fixed, now reports the " "`provisioned_capacity_gb` properly. Fixes bug 1719659." msgid "" "Dell EMC PS Series Driver code reporting volume stats is now optimized to " "return the information earlier and accelerate the process. This change fixes " "bug 1661154." msgstr "" "Dell EMC PS Series Driver code reporting volume stats is now optimized to " "return the information earlier and accelerate the process. This change fixes " "bug 1661154." msgid "" "Dell EMC PS Series Driver code was creating duplicate ACL records during " "live migration. Fixes the initialize_connection code to not create access " "record for a host if one exists previously. This change fixes bug 1726591." msgstr "" "Dell EMC PS Series Driver code was creating duplicate ACL records during " "live migration. Fixes the initialize_connection code to not create access " "record for a host if one exists previously. This change fixes bug 1726591." msgid "" "Dell EMC PS Series Driver was creating unmanaged snapshots when extending " "volumes. Fixed it by adding the missing no-snap parameter. This change fixes " "bug 1720454." msgstr "" "Dell EMC PS Series Driver was creating unmanaged snapshots when extending " "volumes. Fixed it by adding the missing no-snap parameter. This change fixes " "bug 1720454." msgid "" "Dell EMC PS Series Driver was creating unmanaged snapshots when extending " "volumes. Fixed it by adding the missing no-snap parameter. This changes " "fixes bug 1720454." msgstr "" "Dell EMC PS Series Driver was creating unmanaged snapshots when extending " "volumes. Fixed it by adding the missing no-snap parameter. This changes " "fixes bug 1720454." msgid "" "Dell EMC PS Series storage driver is not supported and removed starting from " "the Ussuri release. It was marked as deprecated in the Train release." msgstr "" "Dell EMC PS Series storage driver is not supported and removed starting from " "the Ussuri release. It was marked as deprecated in the Train release." msgid "" "Dell EMC PS volume driver reports the total number of volumes on the backend " "in volume stats." msgstr "" "Dell EMC PS volume driver reports the total number of volumes on the backend " "in volume stats." msgid "" "Dell EMC PowerFlex driver: Report trimming/discard support to Nova and " "Cinder on thin volumes that don't have snapshots. Not doing trim on volumes " "with snapshots is the vendor's recommendation, but can be overriden with the " "``report_discard_supported`` configuration option." msgstr "" "Dell EMC PowerFlex driver: Report trimming/discard support to Nova and " "Cinder on thin volumes that don't have snapshots. Not doing trim on volumes " "with snapshots is the vendor's recommendation, but can be overridden with " "the ``report_discard_supported`` configuration option." msgid "" "Dell EMC PowerMax driver `bug #1980870 `_: Fixed potential deadlock when moving volumes between " "Storage Groups." msgstr "" "Dell EMC PowerMax driver `bug #1980870 `_: Fixed potential deadlock when moving volumes between " "Storage Groups." msgid "" "Dell EMC PowerMax driver now faciliates the user to override the short host " "name and port group name seen in PowerMax masking view and storage view " "terminology. This means the user can give more meaningful names, especially " "when the short host name exceeds 16 characters and the port group name " "exceeds 12 characters, which is the condition where the driver truncates " "these values." msgstr "" "Dell EMC PowerMax driver now facilitates the user to override the short host " "name and port group name seen in PowerMax masking view and storage view " "terminology. This means the user can give more meaningful names, especially " "when the short host name exceeds 16 characters and the port group name " "exceeds 12 characters, which is the condition where the driver truncates " "these values." msgid "" "Dell EMC PowerMax driver now supports Unisphere storage group and array " "tagging to allow the user to specify a user defined tag to facilitate easy " "access and classification." msgstr "" "Dell EMC PowerMax driver now supports Unisphere storage group and array " "tagging to allow the user to specify a user defined tag to facilitate easy " "access and classification." msgid "" "Dell EMC PowerMax driver: Added SRDF ``powermax:disable_protected_snap`` " "volume-type extra-spec property for the purpose of avoiding overconsumption " "on both source and target storage arrays." msgstr "" "Dell EMC PowerMax driver: Added SRDF ``powermax:disable_protected_snap`` " "volume-type extra-spec property for the purpose of avoiding overconsumption " "on both source and target storage arrays." msgid "" "Dell EMC PowerMax driver: Report trimming/discard support to Nova and Cinder." msgstr "" "Dell EMC PowerMax driver: Report trimming/discard support to Nova and Cinder." msgid "" "Dell EMC PowerStore driver: Report trimming/discard support to Nova and " "Cinder." msgstr "" "Dell EMC PowerStore driver: Report trimming/discard support to Nova and " "Cinder." msgid "Dell EMC PowerVault ME Series storage arrays are now supported." msgstr "Dell EMC PowerVault ME Series storage arrays are now supported." msgid "" "Dell EMC SC Driver: Fixes `bug 1822229 `__ to handle the volume mappings in the backend when a volume " "is attached to multiple instances on the same host." msgstr "" "Dell EMC SC Driver: Fixes `bug 1822229 `__ to handle the volume mappings in the backend when a volume " "is attached to multiple instances on the same host." msgid "" "Dell EMC SC driver correctly returns initialize_connection data when more " "than one IQN is attached to a volume. This fixes some random Nova Live " "Migration failures where the connection information being returned was for " "an IQN other than the one for which it was being requested." msgstr "" "Dell EMC SC driver correctly returns initialise_connection data when more " "than one IQN is attached to a volume. This fixes some random Nova Live " "Migration failures where the connection information being returned was for " "an IQN other than the one for which it was being requested." msgid "" "Dell EMC Scale IO Driver: Fixes `bug 1560649 ` for creating volumes with sizes greater than that of " "the original snapshot." msgstr "" "Dell EMC Scale IO Driver: Fixes `bug 1560649 ` for creating volumes with sizes greater than that of " "the original snapshot." msgid "" "Dell EMC ScaleIO has been rebranded to VxFlex OS. The drivers ``cinder." "volume.drivers.dell_emc.scaleio.driver.ScaleIODriver`` will now be updated " "to ``cinder.volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver`` in " "cinder.conf. Driver configuration options that start with ``sio`` should " "also be updated to ``vxflexos``. Existing sio configuration options will " "continue to work but will be removed in the V release. Online documentation " "will also change to reflect these changes." msgstr "" "Dell EMC ScaleIO has been rebranded to VxFlex OS. The drivers ``cinder." "volume.drivers.dell_emc.scaleio.driver.ScaleIODriver`` will now be updated " "to ``cinder.volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver`` in " "cinder.conf. Driver configuration options that start with ``sio`` should " "also be updated to ``vxflexos``. Existing sio configuration options will " "continue to work but will be removed in the V release. Online documentation " "will also change to reflect these changes." msgid "" "Dell EMC ScaleIO has been renamed to Dell EMC VxFlex OS. Documentation for " "the driver can be found under the new name. The driver maintains full " "backwards compatability with prior ScaleIO releases and no configuration " "changes are needed upon upgrade to the new version of the driver." msgstr "" "Dell EMC ScaleIO has been renamed to Dell EMC VxFlex OS. Documentation for " "the driver can be found under the new name. The driver maintains full " "backwards compatibility with prior ScaleIO releases and no configuration " "changes are needed upon upgrade to the new version of the driver." msgid "" "Dell EMC Unity Cinder driver allows enabling/disabling the SSL verification. " "Admin can set `True` or `False` for `driver_ssl_cert_verify` to enable or " "disable this function, alternatively set the `driver_ssl_cert_path=` " "for customized CA path. Both above 2 options should go under the driver " "section." msgstr "" "Dell EMC Unity Cinder driver allows enabling/disabling the SSL verification. " "Admin can set `True` or `False` for `driver_ssl_cert_verify` to enable or " "disable this function, alternatively set the `driver_ssl_cert_path=` " "for customized CA path. Both above 2 options should go under the driver " "section." msgid "" "Dell EMC Unity Driver: Add thick volume support. Refer to `Unity Cinder " "Configuration document `__ to create " "a thick volume." msgstr "" "Dell EMC Unity Driver: Add thick volume support. Refer to `Unity Cinder " "Configuration document `__ to create " "a thick volume." msgid "" "Dell EMC Unity Driver: Added consistency group replication support. The " "storops library version 1.2.3 or newer is required." msgstr "" "Dell EMC Unity Driver: Added consistency group replication support. The " "storops library version 1.2.3 or newer is required." msgid "Dell EMC Unity Driver: Added storage-assisted migration support." msgstr "Dell EMC Unity Driver: Added storage-assisted migration support." msgid "Dell EMC Unity Driver: Added volume replication support." msgstr "Dell EMC Unity Driver: Added volume replication support." msgid "" "Dell EMC Unity Driver: Adds support for removing empty host. The new option " "named `remove_empty_host` could be configured as `True` to notify Unity " "driver to remove the host after the last LUN is detached from it." msgstr "" "Dell EMC Unity Driver: Adds support for removing empty host. The new option " "named `remove_empty_host` could be configured as `True` to notify Unity " "driver to remove the host after the last LUN is detached from it." msgid "" "Dell EMC Unity Driver: Fixes `bug 1759175 `__ to detach the lun correctly when auto zone was enabled and " "the lun was the last one attached to the host." msgstr "" "Dell EMC Unity Driver: Fixes `bug 1759175 `__ to detach the LUN correctly when auto zone was enabled and " "the LUN was the last one attached to the host." msgid "" "Dell EMC Unity Driver: Fixes `bug 1773305 `__ to return the targets which connect to the logged-out " "initiators. Then the zone manager could clean up the FC zone based on the " "correct target wwns." msgstr "" "Dell EMC Unity Driver: Fixes `bug 1773305 `__ to return the targets which connect to the logged-out " "initiators. Then the zone manager could clean up the FC zone based on the " "correct target WWNs." msgid "Dell EMC Unity driver: Add compressed volume support." msgstr "Dell EMC Unity driver: Add compressed volume support." msgid "" "Dell EMC Unity driver: Add consistent group support. Users could create a " "group type supporting consistent groups with specification " "`'consistent_group_snapshot_enabled': True`, then any groups created of " "that group type are consistent groups, otherwise they are generic groups. " "The supported operations are: create/delete consistent groups, add volumes " "to and remove volumes from consistent groups, create/delete consistent group " "snapshots, create consistent groups from snapshots, clone consistent groups." msgstr "" "Dell EMC Unity driver: Add consistent group support. Users could create a " "group type supporting consistent groups with specification " "`'consistent_group_snapshot_enabled': True`, then any groups created of " "that group type are consistent groups, otherwise they are generic groups. " "The supported operations are: create/delete consistent groups, add volumes " "to and remove volumes from consistent groups, create/delete consistent group " "snapshots, create consistent groups from snapshots, clone consistent groups." msgid "" "Dell EMC Unity driver: Add efficient retype support when new type uses the " "same Unity device." msgstr "" "Dell EMC Unity driver: Add efficient retype support when new type uses the " "same Unity device." msgid "" "Dell EMC Unity driver: Add tiering policy configuration support for volume." msgstr "" "Dell EMC Unity driver: Add tiering policy configuration support for volume." msgid "" "Dell EMC Unity: Fixes bug 1775518 to make sure driver succeed to initialize " "even though the value of unity_io_ports and unity_storage_pool_names are " "empty" msgstr "" "Dell EMC Unity: Fixes bug 1775518 to make sure driver succeed to initialize " "even though the value of unity_io_ports and unity_storage_pool_names are " "empty" msgid "" "Dell EMC Unity: Implements `bp unity-multiattach-support `__ to support " "attaching a volume to multiple servers simultaneously." msgstr "" "Dell EMC Unity: Implements `bp unity-multiattach-support `__ to support " "attaching a volume to multiple servers simultaneously." msgid "" "Dell EMC VMAX driver has added list manageable volumes and snapshots support." msgstr "" "Dell EMC VMAX driver has added list manageable volumes and snapshots support." msgid "Dell EMC VMAX driver has added multiattach support." msgstr "Dell EMC VMAX driver has added multiattach support." msgid "" "Dell EMC VMAX driver has added support for failover to second instance of " "Unisphere." msgstr "" "Dell EMC VMAX driver has added support for failover to second instance of " "Unisphere." msgid "" "Dell EMC VMAX has been rebranded to PowerMax. The drivers ``cinder.volume." "drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver`` and ``cinder.volume.drivers." "dell_emc.vmax.fc.VMAXFCDriver`` will now be updated to ``cinder.volume." "drivers.dell_emc.powermax. iscsi.PowerMaxISCSIDriver`` and ``cinder.volume." "drivers.dell_emc. powermax.fc.PowerMaxFCDriver`` respectively in cinder." "conf. Driver configuration options that start with ``vmax`` should also be " "updated to ``powermax``. Existing vmax configuration options will continue " "to work but will be removed in the Train release. Online documentation will " "also change to reflect these changes." msgstr "" "Dell EMC VMAX has been rebranded to PowerMax. The drivers ``cinder.volume." "drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver`` and ``cinder.volume.drivers." "dell_emc.vmax.fc.VMAXFCDriver`` will now be updated to ``cinder.volume." "drivers.dell_emc.powermax. iscsi.PowerMaxISCSIDriver`` and ``cinder.volume." "drivers.dell_emc. powermax.fc.PowerMaxFCDriver`` respectively in cinder." "conf. Driver configuration options that start with ``vmax`` should also be " "updated to ``powermax``. Existing vmax configuration options will continue " "to work but will be removed in the Train release. Online documentation will " "also change to reflect these changes." msgid "" "Dell EMC VNX Cinder Driver: Fixes `bug 1794646 `__ to delete the LUN from the VNX storage. Because a " "temporary snapshot is created from the LUN during creating a volume from a " "snapshot and isn't deleted, the LUN cannot be deleted before its snapshot is " "deleted. The fix makes sure the temp snapshot is deleted." msgstr "" "Dell EMC VNX Cinder Driver: Fixes `bug 1794646 `__ to delete the LUN from the VNX storage. Because a " "temporary snapshot is created from the LUN during creating a volume from a " "snapshot and isn't deleted, the LUN cannot be deleted before its snapshot is " "deleted. The fix makes sure the temp snapshot is deleted." msgid "" "Dell EMC VNX Driver: Fix `bug 1796825 `__, adding an option named `vnx_async_migrate` to accept the " "default setting for async migration." msgstr "" "Dell EMC VNX Driver: Fix `bug 1796825 `__, adding an option named `vnx_async_migrate` to accept the " "default setting for async migration." msgid "" "Dell EMC VNX Driver: Fixes `bug 1817385 `__ to make sure the sg can be created again after it was " "destroyed under `destroy_empty_storage_group` setting to `True`." msgstr "" "Dell EMC VNX Driver: Fixes `bug 1817385 `__ to make sure the sg can be created again after it was " "destroyed under `destroy_empty_storage_group` setting to `True`." msgid "Dell EMC VNX driver: Enhances the performance of create/delete volume." msgstr "Dell EMC VNX driver: Enhances the performance of create/delete volume." msgid "" "Dell EMC VxFlex OS driver: This release contains a fix for `Bug #1823200 " "`_. See `OSSN-0086 `_ for details." msgstr "" "Dell EMC VxFlex OS driver: This release contains a fix for `Bug #1823200 " "`_. See `OSSN-0086 `_ for details." msgid "" "Dell EMC VxFlex OS has been rebranded to PowerFlex. The driver ``cinder." "volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver`` has been renamed to " "``cinder.volume.drivers.dell_emc.powerflex.driver.PowerFlexDriver``. " "Although in this release the volume manager will recognize the old driver " "name, that functionality will be removed in the Wallaby release, and thus we " "recommend that you update the driver name in ``cinder.conf`` at your " "earliest convenience." msgstr "" "Dell EMC VxFlex OS has been rebranded to PowerFlex. The driver ``cinder." "volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver`` has been renamed to " "``cinder.volume.drivers.dell_emc.powerflex.driver.PowerFlexDriver``. " "Although in this release the volume manager will recognise the old driver " "name, that functionality will be removed in the Wallaby release, and thus we " "recommend that you update the driver name in ``cinder.conf`` at your " "earliest convenience." msgid "Dell EMC XtremIO driver has added multiattach support." msgstr "Dell EMC XtremIO driver has added multiattach support." msgid "" "Dell PowerFlex driver `bug #1998136 `_: When using self signed certificates, the option sent to os-" "brick via the connection_properties was not correctly handled. It has now " "been fixed by adding the 'verify_certificate' and 'certificate_path' to the " "driver when initializing the connection." msgstr "" "Dell PowerFlex driver `bug #1998136 `_: When using self-signed certificates, the option sent to os-" "brick via the connection_properties was not correctly handled. It has now " "been fixed by adding the 'verify_certificate' and 'certificate_path' to the " "driver when initialising the connection." msgid "" "Dell PowerFlex driver: Enabled cinder volume active/active support. This " "allows users to configure Dell PowerFlex backends in cinder clustered " "environments. **Note that** active/active support is specific to OpenStack " "volume services as PowerFlex storage replication is asynchronous." msgstr "" "Dell PowerFlex driver: Enabled Cinder volume active/active support. This " "allows users to configure Dell PowerFlex backends in Cinder-clustered " "environments. **Note that** active/active support is specific to OpenStack " "volume services as PowerFlex storage replication is asynchronous." msgid "" "Dell PowerMax Driver `Bug #2045230 `_: Fixed the issue that Dell PowerMax SnapVx link fails as the " "linked device is not yet fully defined." msgstr "" "Dell PowerMax Driver `Bug #2045230 `_: Fixed the issue that Dell PowerMax SnapVx link fails as the " "linked device is not yet fully defined." msgid "" "Dell PowerMax Driver: introduced a new configuration option, " "``snapvx_unlink_symforce``, to address Bug #2045230. See the Bug Fixes " "section for details." msgstr "" "Dell PowerMax Driver: introduced a new configuration option, " "``snapvx_unlink_symforce``, to address Bug #2045230. See the Bug Fixes " "section for details." msgid "" "Dell PowerMax driver `bug #2033398 `_: Reduced the risk of failures when doing online migration. " "This change is to speed up the deletion of a volume which doesn't require to " "rename the volume prior to delete it. This removal will allow to save 15s in " "terms of operation time and avoid unnecessary pausing time during migration." msgstr "" "Dell PowerMax driver `bug #2033398 `_: Reduced the risk of failures when doing online migration. " "This change is to speed up the deletion of a volume which doesn't require to " "rename the volume prior to delete it. This removal will allow to save 15s in " "terms of operation time and avoid unnecessary pausing time during migration." msgid "" "Dell PowerMax driver `bug #2034937 `_: Fixed" msgstr "" "Dell PowerMax driver `bug #2034937 `_: Fixed" msgid "" "Dell PowerMax driver `bug #2051828 `_: The driver only recognized 10.0 as being Unisphere 10 and " "would try to use 9.2 for Unisphere 10.x (where x > 0), but now it correctly " "recognizes 10.x as being Unisphere 10." msgstr "" "Dell PowerMax driver `bug #2051828 `_: The driver only recognised 10.0 as being Unisphere 10 and " "would try to use 9.2 for Unisphere 10.x (where x > 0), but now it correctly " "recognises 10.x as being Unisphere 10." msgid "Dell PowerMax driver now supports Unisphere for PowerMax 10.0" msgstr "Dell PowerMax driver now supports Unisphere for PowerMax 10.0" msgid "" "Dell PowerMax driver: Enabled support for Active/Active to both FC and iSCSI " "driver. This allows users to configure Dell PowerMax backends in clustered " "environments." msgstr "" "Dell PowerMax driver: Enabled support for Active/Active to both FC and iSCSI " "driver. This allows users to configure Dell PowerMax backends in clustered " "environments." msgid "" "Dell PowerStore Driver `bug #2055022 `_: REST API calls to the PowerStore backend did not have a " "timeout set, which could result in cinder waiting forever. This fix " "introduces two configuration options, ``rest_api_call_connect_timeout`` and " "``rest_api_call_read_timeout``, to control timeouts when connecting to the " "backend. The default value of each is 30 seconds." msgstr "" "Dell PowerStore Driver `bug #2055022 `_: REST API calls to the PowerStore backend did not have a " "timeout set, which could result in Cinder waiting forever. This fix " "introduces two configuration options, ``rest_api_call_connect_timeout`` and " "``rest_api_call_read_timeout``, to control timeouts when connecting to the " "backend. The default value of each is 30 seconds." msgid "" "Dell PowerStore Driver: Added QoS (Quality of Service) support for " "PowerStore 4.0 or later versions." msgstr "" "Dell PowerStore Driver: Added QoS (Quality of Service) support for " "PowerStore 4.0 or later versions." msgid "Dell PowerStore driver: Added NVMe-TCP support." msgstr "Dell PowerStore driver: Added NVMe-TCP support." msgid "" "Dell PowerStore driver: Enabled cinder volume active/active support. This " "allows users to configure Dell PowerStore backends in cinder clustered " "environments." msgstr "" "Dell PowerStore driver: Enabled Cinder volume active/active support. This " "allows users to configure Dell PowerStore backends in Cinder-clustered " "environments." msgid "Dell PowerStore: Added NFS storage driver." msgstr "Dell PowerStore: Added NFS storage driver." msgid "" "Dell SC - Compression and Dedupe support added for Storage Centers that " "support the options." msgstr "" "Dell SC - Compression and Dedupe support added for Storage Centres that " "support the options." msgid "" "Dell SC - Volume and Group QOS support added for Storage Centers that " "support and have enabled the option." msgstr "" "Dell SC - Volume and Group QOS support added for Storage Centres that " "support and have enabled the option." msgid "" "Dell SC Cinder driver has limited support in a failed over state so " "thaw_backend has been implemented to reject the thaw call when in such a " "state." msgstr "" "Dell SC Cinder driver has limited support in a failed over state so " "thaw_backend has been implemented to reject the thaw call when in such a " "state." msgid "Dell SC Series Storage Driver (iSCSI, FC)" msgstr "Dell SC Series Storage Driver (iSCSI, FC)" msgid "Dell VNX Storage Driver (FC, iSCSI)" msgstr "Dell VNX Storage Driver (FC, iSCSI)" msgid "Dell XtremeIO Storage Driver (iSCSI, FC)" msgstr "Dell XtremeIO Storage Driver (iSCSI, FC)" msgid "" "DellEMC Unity: Fix bug `1825469 `_. The fix enables the ``force_delete_lun_in_storagegroup`` " "option to ``True`` by default, which makes sure that luns can deleted even " "when they are still in storage groups." msgstr "" "DellEMC Unity: Fix bug `1825469 `_. The fix enables the ``force_delete_lun_in_storagegroup`` " "option to ``True`` by default, which makes sure that LUNs can deleted even " "when they are still in storage groups." msgid "" "DellEMC Unity: The fix of bug `1825469 `_ changes the default value of the " "``force_delete_lun_in_storagegroup`` option from ``False`` to ``True``, " "which means luns will always be force deleted after upgrade." msgstr "" "DellEMC Unity: The fix of bug `1825469 `_ changes the default value of the " "``force_delete_lun_in_storagegroup`` option from ``False`` to ``True``, " "which means LUNs will always be force deleted after upgrade." msgid "" "Deployments doing continuous live upgrades from master branch should not " "upgrade into Ocata before doing an upgrade which includes all the Newton's " "RPC API version bump commits (scheduler, volume). If you're upgrading " "deployment in a release-to-release manner, then you can safely ignore this " "note." msgstr "" "Deployments doing continuous live upgrades from master branch should not " "upgrade into Ocata before doing an upgrade which includes all the Newton's " "RPC API version bump commits (scheduler, volume). If you're upgrading " "deployment in a release-to-release manner, then you can safely ignore this " "note." msgid "" "Deprecate NetApp NFS option `netapp_copyoffload_tool_path`. The tool is no " "longer available for downloading." msgstr "" "Deprecate NetApp NFS option `netapp_copyoffload_tool_path`. The tool is no " "longer available for download." msgid "" "Deprecate option `check_max_pool_luns_threshold`. The VNX driver will always " "check the threshold." msgstr "" "Deprecate option `check_max_pool_luns_threshold`. The VNX driver will always " "check the threshold." msgid "" "Deprecate the \"cinder-manage logs\" commands. These will be removed in a " "later release." msgstr "" "Deprecate the \"cinder-manage logs\" commands. These will be removed in a " "later release." msgid "Deprecated IBM driver _multipath_enabled config flags." msgstr "Deprecated IBM driver _multipath_enabled config flags." msgid "" "Deprecated config option `query_volume_filters` is removed now. Please, use " "config file described in resource_query_filters_file to configure allowed " "volume filters." msgstr "" "Deprecated config option `query_volume_filters` is removed now. Please, use " "config file described in resource_query_filters_file to configure allowed " "volume filters." msgid "Deprecated datera_api_version option." msgstr "Deprecated datera_api_version option." msgid "" "Deprecated the configuration option ``hnas_svcX_volume_type``. Use option " "``hnas_svcX_pool_name`` to indicate the name of the services (pools)." msgstr "" "Deprecated the configuration option ``hnas_svcX_volume_type``. Use option " "``hnas_svcX_pool_name`` to indicate the name of the services (pools)." msgid "" "Deprecated the configuration option ``nas_ip``. Use option ``nas_host`` to " "indicate the IP address or hostname of the NAS system." msgstr "" "Deprecated the configuration option ``nas_ip``. Use option ``nas_host`` to " "indicate the IP address or hostname of the NAS system." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Detaching volumes will fail if Nova is not `configured to send service " "tokens `_, please read the upgrade section for more information. " "(`Bug #2004555 `_)." msgstr "" "Detaching volumes will fail if Nova is not `configured to send service " "tokens `_, please read the upgrade section for more information. " "(`Bug #2004555 `_)." msgid "" "Detection of whether a URL includes a project_id is based on the value of a " "new ``project_id_regex`` option. The default value matches UUIDs created by " "keystone." msgstr "" "Detection of whether a URL includes a project_id is based on the value of a " "new ``project_id_regex`` option. The default value matches UUIDs created by " "Keystone." msgid "" "Disable creating volume with non cg_snapshot group_id in Storwize/SVC driver." msgstr "" "Disable creating volume with non cg_snapshot group_id in Storwize/SVC driver." msgid "Disable standard capabilities based on 3PAR licenses." msgstr "Disable standard capabilities based on 3PAR licenses." msgid "" "Drivers supporting consistent group snapshot in generic volume groups " "reports \"consistent_group_snapshot_enabled = True\" instead of " "\"consistencygroup_support = True\". As a result, a spec such as " "\"consistencygroup_support: ' True'\" in either group type or volume " "type will cause the scheduler not to choose the backend that does not report " "\"consistencygroup_support = True\". In order to create a generic volume " "group that supports consistent group snapshot, " "\"consistent_group_snapshot_enable: ' True'\" should be set in the group " "type specs and volume type extra specs, and \"consistencygroup_support: " "' True'\" should not be set in group type spec and volume type extra " "specs." msgstr "" "Drivers supporting consistent group snapshot in generic volume groups " "reports \"consistent_group_snapshot_enabled = True\" instead of " "\"consistencygroup_support = True\". As a result, a spec such as " "\"consistencygroup_support: ' True'\" in either group type or volume " "type will cause the scheduler not to choose the backend that does not report " "\"consistencygroup_support = True\". In order to create a generic volume " "group that supports consistent group snapshot, " "\"consistent_group_snapshot_enable: ' True'\" should be set in the group " "type specs and volume type extra specs, and \"consistencygroup_support: " "' True'\" should not be set in group type spec and volume type extra " "specs." msgid "" "Due to `Bug #1893107 `_, " "under specific circumstances, some operators may need to take actions " "outside the normal upgrade process to upgrade from Train to Ussuri. See the " "\"Upgrade Notes\" and \"Bug Fixes\" sections of these release notes for more " "details." msgstr "" "Due to `Bug #1893107 `_, " "under specific circumstances, some operators may need to take actions " "outside the normal upgrade process to upgrade from Train to Ussuri. See the " "\"Upgrade Notes\" and \"Bug Fixes\" sections of these release notes for more " "details." msgid "" "Due to `OSSN-0085 `_: Cinder " "configuration option can leak secret key from Ceph backend, deployers using " "the ``rbd_keyring_conf`` option are advised to stop using it immediately. " "The option has been deprecated for removal as of Ussuri and will be removed " "in the Victoria development cycle." msgstr "" "Due to `OSSN-0085 `_: Cinder " "configuration option can leak secret key from Ceph backend, deployers using " "the ``rbd_keyring_conf`` option are advised to stop using it immediately. " "The option has been deprecated for removal as of Ussuri and will be removed " "in the Victoria development cycle." msgid "" "Due to `OSSN-0085 `_: Cinder " "configuration option can leak secret key from Ceph backend, deployers using " "the ``rbd_keyring_conf`` option are advised to stop using it immediately. " "The option has been deprecated for removal early in the 'V' development " "cycle." msgstr "" "Due to `OSSN-0085 `_: Cinder " "configuration option can leak secret key from Ceph backend, deployers using " "the ``rbd_keyring_conf`` option are advised to stop using it immediately. " "The option has been deprecated for removal early in the 'V' development " "cycle." msgid "" "Due to the fix for `Bug #1740950 `_, the ``host_name`` field in any object in the " "``attachments`` array of the volume detail response is populated only when " "the call is made in an administrative context. Otherwise, its value is the " "JSON ``null`` value. This is consistent with prior API behavior, as it has " "always been possible for the value of that field to be ``null``." msgstr "" "Due to the fix for `Bug #1740950 `_, the ``host_name`` field in any object in the " "``attachments`` array of the volume detail response is populated only when " "the call is made in an administrative context. Otherwise, its value is the " "JSON ``null`` value. This is consistent with prior API behaviour, as it has " "always been possible for the value of that field to be ``null``." msgid "" "Due to the ibmnas (SONAS) driver being rendered redundant by the addition of " "NFS capabilities to the IBM GPFS driver, the ibmnas driver is being removed " "in the Mitaka release." msgstr "" "Due to the ibmnas (SONAS) driver being rendered redundant by the addition of " "NFS capabilities to the IBM GPFS driver, the ibmnas driver is being removed " "in the Mitaka release." msgid "" "EMC ScaleIO driver now uses the config option san_thin_provision to " "determine the default provisioning type." msgstr "" "EMC ScaleIO driver now uses the config option san_thin_provision to " "determine the default provisioning type." msgid "" "EMC VNX driver have been rebranded to Dell EMC VNX driver. Existing " "configurations will continue to work with the legacy name, but will need to " "be updated by the next release. User needs update ``volume_driver`` to " "``cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver``." msgstr "" "EMC VNX driver have been rebranded to Dell EMC VNX driver. Existing " "configurations will continue to work with the legacy name, but will need to " "be updated by the next release. User needs update ``volume_driver`` to " "``cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver``." msgid "" "ETERNUS DX driver have two types of storage pools: RAID Group and " "ThinProvisioning Pool. Volumes can not be created in RAID Groups for the " "following situations:" msgstr "" "ETERNUS DX driver have two types of storage pools: RAID Group and " "ThinProvisioning Pool. Volumes can not be created in RAID Groups for the " "following situations:" msgid "" "Enable backup snapshot optimal path by implementing attach and detach " "snapshot in the NEC driver." msgstr "" "Enable backup snapshot optimal path by implementing attach and detach " "snapshot in the NEC driver." msgid "" "Enable backup snapshot optimal path by implementing attach and detach " "snapshot in the VMAX driver." msgstr "" "Enable backup snapshot optimal path by implementing attach and detach " "snapshot in the VMAX driver." msgid "" "Enabled Cinder Multi-Attach capability in the Dell EMC Storage Center Cinder " "driver." msgstr "" "Enabled Cinder Multi-Attach capability in the Dell EMC Storage Centre Cinder " "driver." msgid "" "Enabled Guru Meditation Reports on Cinder wsgi. When running Cinder under " "WSGI, we might want to have Guru Meditation Reports as well as when running " "outside of WSGI." msgstr "" "Enabled Guru Meditation Reports on Cinder WSGI. When running Cinder under " "WSGI, we might want to have Guru Meditation Reports as well as when running " "outside of WSGI." msgid "" "Enabled a cloud operator to correctly manage policy for volume type " "operations. To permit volume type operations for specific user, you can for " "example do as follows." msgstr "" "Enabled a cloud operator to correctly manage policy for volume type " "operations. To permit volume type operations for specific user, you can for " "example do as follows." msgid "Enabled multiattach capability for hpe3par driver." msgstr "Enabled multiattach capability for hpe3par driver." msgid "" "Everything in Cinder's release notes related to the High Availability Active-" "Active effort -preluded with \"HA A-A:\"- is work in progress and should not " "be used in production until it has been completed and the appropriate " "release note has been issued stating its readiness for production." msgstr "" "Everything in Cinder's release notes related to the High Availability Active-" "Active effort -preluded with \"HA A-A:\"- is work in progress and should not " "be used in production until it has been completed and the appropriate " "release note has been issued stating its readiness for production." msgid "Examples:" msgstr "Examples:" msgid "" "Existing vxFlex OS configuration options, whose usage was DEPRECATED in the " "Stein release, will no longer be recognized in this release. Thus all driver " "configuration options that start with ``vxflexos`` must be updated to " "``powerflex`` in your ``cinder.conf`` before you deploy this release." msgstr "" "Existing vxFlex OS configuration options, whose usage was DEPRECATED in the " "Stein release, will no longer be recognised in this release. Thus all driver " "configuration options that start with ``vxflexos`` must be updated to " "``powerflex`` in your ``cinder.conf`` before you deploy this release." msgid "" "Expanded volume transfer information. Starting with microversion 3.57, " "``source_project_id``, ``destination_project_id``, and ``accepted`` fields " "will be returned in the response of the volume transfer create, show, and " "list calls." msgstr "" "Expanded volume transfer information. Starting with microversion 3.57, " "``source_project_id``, ``destination_project_id``, and ``accepted`` fields " "will be returned in the response of the volume transfer create, show, and " "list calls." msgid "Extended Volume-Type Support for Datera Volume Drivers" msgstr "Extended Volume-Type Support for Datera Volume Drivers" msgid "" "Extra spec ``RESKEY:availability_zones`` will only be used for filtering " "backends when creating and retyping volumes." msgstr "" "Extra spec ``RESKEY:availability_zones`` will only be used for filtering " "backends when creating and retyping volumes." msgid "FC driver: 64" msgstr "FC driver: 64" msgid "FC driver: ``HBSD-{host}-{wwn}``" msgstr "FC driver: ``HBSD-{host}-{wwn}``" msgid "FC driver: ``{wwn}``" msgstr "FC driver: ``{wwn}``" msgid "FalconStor FSS" msgstr "FalconStor FSS" msgid "" "Filtering volumes by their display name now correctly handles display names " "with single and double quotes." msgstr "" "Filtering volumes by their display name now correctly handles display names " "with single and double quotes." msgid "" "First, some background. The Block Storage API supports the creation of " "volumes in gibibyte (GiB) units. When a volume of a non-encrypted volume " "type of size *n* is created, the volume contains *n* GiB of usable space. " "When a volume of an encrypted type is requested, however, the volume " "contains less than *n* GiB of usable space because the encryption metadata " "that must be stored within that volume in order for the volume to be usable " "consumes an amount of the otherwise usable space." msgstr "" "First, some background. The Block Storage API supports the creation of " "volumes in gibibyte (GiB) units. When a volume of a non-encrypted volume " "type of size *n* is created, the volume contains *n* GiB of usable space. " "When a volume of an encrypted type is requested, however, the volume " "contains less than *n* GiB of usable space because the encryption metadata " "that must be stored within that volume in order for the volume to be usable " "consumes an amount of the otherwise usable space." msgid "" "Fix DetachedInstanceError is not bound to a Session for VolumeAttachments. " "This affected VolumeList.get_all, and could make a service fail on startup " "and make it stay in down state." msgstr "" "Fix DetachedInstanceError is not bound to a Session for VolumeAttachments. " "This affected VolumeList.get_all, and could make a service fail on startup " "and make it stay in down state." msgid "" "Fix HPE 3PAR driver issue where volumes that were live migrated to it would " "end up being inaccessible. We would no longer be able to use the volume for " "any operation, such as attach, detach, delete, snapshot, etc. (bug 1697422)" msgstr "" "Fix HPE 3PAR driver issue where volumes that were live migrated to it would " "end up being inaccessible. We would no longer be able to use the volume for " "any operation, such as attach, detach, delete, snapshot, etc. (bug 1697422)" msgid "" "Fix NFS backup driver, we now support multiple backups on the same " "container, they are no longer overwritten." msgstr "" "Fix NFS backup driver, we now support multiple backups on the same " "container, they are no longer overwritten." msgid "" "Fix NetApp iSCSI and FC driver issues with custom initiator groups. (bug " "1697490)." msgstr "" "Fix NetApp iSCSI and FC driver issues with custom initiator groups. (bug " "1697490)." msgid "" "Fix SolidFire free_capacity_gb reporting and also reports " "thin_provisioning_support=True. This allow the use of Cinder scheduler's " "parameters for thin provisioning in the SolidFire plataform." msgstr "" "Fix SolidFire free_capacity_gb reporting and also reports " "thin_provisioning_support=True. This allow the use of Cinder scheduler's " "parameters for thin provisioning in the SolidFire platform." msgid "" "Fix `AttributeError` on the Brocade ZM driver when using setting REST_HTTP " "or REST_HTTPS as the fc_southbound_protocol option and an exception is " "raised by the client (Bug #1866860)." msgstr "" "Fix `AttributeError` on the Brocade ZM driver when using setting REST_HTTP " "or REST_HTTPS as the fc_southbound_protocol option and an exception is " "raised by the client (Bug #1866860)." msgid "" "Fix `UnboundLocalError` on the Brocade lookup driver on southbound client " "creation failure during the device mapping retrieval (Bug #1888550)." msgstr "" "Fix `UnboundLocalError` on the Brocade lookup driver on southbound client " "creation failure during the device mapping retrieval (Bug #1888550)." msgid "" "Fix ``unique_fqdn_network`` configuration option for the Kaminario driver, " "as it was being ignored when defined in the driver section, which used to " "work. (Bug #1886042)." msgstr "" "Fix ``unique_fqdn_network`` configuration option for the Kaminario driver, " "as it was being ignored when defined in the driver section, which used to " "work. (Bug #1886042)." msgid "" "Fix a quota usage error triggered by a non-admin user backing up an in-use " "volume. The forced backup uses a temporary volume, and quota usage was " "incorrectly updated when the temporary volume was deleted after the backup " "operation completed. Fixes `bug 1778774 `__." msgstr "" "Fix a quota usage error triggered by a non-admin user backing up an in-use " "volume. The forced backup uses a temporary volume, and quota usage was " "incorrectly updated when the temporary volume was deleted after the backup " "operation completed. Fixes `bug 1778774 `__." msgid "" "Fix bug `#1874134 `_, " "allowing an iSCSI or FCP volume to be extended to a size up to 16TB " "regardless of its original size, even if it's attached to an instance." msgstr "" "Fix bug `#1874134 `_, " "allowing an iSCSI or FCP volume to be extended to a size up to 16TB " "regardless of its original size, even if it's attached to an instance." msgid "" "Fix for Tintri image direct clone feature. Fix for the bug 1400966 prevents " "user from specifying image \"nfs share location\" as location value for an " "image. Now, in order to use Tintri image direct clone, user can specify " "\"provider_location\" in image metadata to specify image nfs share location. " "NFS share which hosts images should be specified in a file using " "tintri_image_shares_config config option." msgstr "" "Fix for Tintri image direct clone feature. Fix for the bug 1400966 prevents " "user from specifying image \"nfs share location\" as location value for an " "image. Now, in order to use Tintri image direct clone, user can specify " "\"provider_location\" in image metadata to specify image NFS share location. " "NFS share which hosts images should be specified in a file using " "tintri_image_shares_config config option." msgid "" "Fix issue with PureFCDriver where partially case sensitive comparison of " "connector wwpn could cause initialize_connection to fail when attempting to " "create duplicate Purity host." msgstr "" "Fix issue with PureFCDriver where partially case sensitive comparison of " "connector wwpn could cause initialise_connection to fail when attempting to " "create duplicate Purity host." msgid "" "Fix python 3 incompatibility issues preventing NetApp cDOT driver from " "generating EMS logging messages (Bug #1833115)." msgstr "" "Fix python 3 incompatibility issues preventing NetApp cDOT driver from " "generating EMS logging messages (Bug #1833115)." msgid "" "Fix python3 imcompability issues and make SolidFire driver fully compatible " "with python3." msgstr "" "Fix python3 incompability issues and make SolidFire driver fully compatible " "with python3." msgid "" "Fix revert to snapshot not working for non admin users when using the " "snapshot's name (bug #1889758)." msgstr "" "Fix revert to snapshot not working for non admin users when using the " "snapshot's name (bug #1889758)." msgid "" "Fix the HPE 3PAR driver's attempt to rename the backend volume after it was " "migrated. If the original volume resides on the same 3PAR backend then the " "pre and post migration volume names are swapped. Otherwise, the newly " "migrated volume is renamed to match the original name. (bug 1858119)" msgstr "" "Fix the HPE 3PAR driver's attempt to rename the backend volume after it was " "migrated. If the original volume resides on the same 3PAR backend then the " "pre and post migration volume names are swapped. Otherwise, the newly " "migrated volume is renamed to match the original name. (bug 1858119)" msgid "" "Fix the bug that Cinder can't support creating volume from Nova specific " "image which only includes ``snapshot-id`` metadata (Bug" msgstr "" "Fix the bug that Cinder can't support creating volume from Nova specific " "image which only includes ``snapshot-id`` metadata (Bug" msgid "" "Fix the bug that Cinder would commit quota twice in a clean environment when " "managing volume and snapshot resource (Bug" msgstr "" "Fix the bug that Cinder would commit quota twice in a clean environment when " "managing volume and snapshot resource (Bug" msgid "" "Fix the following volume image metadata endpoints returning None following " "policy enforcement failure:" msgstr "" "Fix the following volume image metadata endpoints returning None following " "policy enforcement failure:" msgid "" "Fix the way encryption key IDs are managed for encrypted volume backups. " "When creating a backup, the volume's encryption key is cloned and assigned a " "new key ID. The backup's cloned key ID is now stored in the backup database " "so that it can be deleted whenever the backup is deleted." msgstr "" "Fix the way encryption key IDs are managed for encrypted volume backups. " "When creating a backup, the volume's encryption key is cloned and assigned a " "new key ID. The backup's cloned key ID is now stored in the backup database " "so that it can be deleted whenever the backup is deleted." msgid "" "Fix volume migration fails in the same ceph RBD pool. `Bug 1871524 `__." msgstr "" "Fix volume migration fails in the same Ceph RBD pool. `Bug 1871524 `__." msgid "" "Fixed 'No Space left' error by dd command when users set the config option " "``volume_clear_size`` to a value larger than the size of a volume." msgstr "" "Fixed 'No Space left' error by dd command when users set the config option " "``volume_clear_size`` to a value larger than the size of a volume." msgid "Fixed ACL multi-attach bug in Datera EDF driver." msgstr "Fixed ACL multi-attach bug in Datera EDF driver." msgid "" "Fixed HNAS bug that placed a cloned volume in the same pool as its source, " "even if the clone had a different pool specification. Driver will not allow " "to make clones using a different volume type anymore." msgstr "" "Fixed HNAS bug that placed a cloned volume in the same pool as its source, " "even if the clone had a different pool specification. Driver will not allow " "to make clones using a different volume type any more." msgid "" "Fixed HPE MSA driver issue where a multi-attached volume could be unmapped " "while still in use." msgstr "" "Fixed HPE MSA driver issue where a multi-attached volume could be unmapped " "while still in use." msgid "" "Fixed Lenovo driver issue where a multi-attached volume could be unmapped " "while still in use." msgstr "" "Fixed Lenovo driver issue where a multi-attached volume could be unmapped " "while still in use." msgid "" "Fixed NetApp SolidFire bug that avoided multiatached volumes to be deleted." msgstr "" "Fixed NetApp SolidFire bug that avoided multiatached volumes to be deleted." msgid "Fixed Non-WAN port filter issue in Kaminario iSCSI driver" msgstr "Fixed Non-WAN port filter issue in Kaminario iSCSI driver" msgid "Fixed Non-WAN port filter issue in Kaminario iSCSI driver." msgstr "Fixed Non-WAN port filter issue in Kaminario iSCSI driver." msgid "Fixed QNAP driver failures to create volume and snapshot in some cases." msgstr "" "Fixed QNAP driver failures to create volume and snapshot in some cases." msgid "" "Fixed QNAP driver failures to detach iscsi device while uploading volume to " "image." msgstr "" "Fixed QNAP driver failures to detach iSCSI device while uploading volume to " "image." msgid "" "Fixed StorWize/SVC error causing volume deletion to get stuck in the " "'deleting' state when using FlashCopy." msgstr "" "Fixed StorWize/SVC error causing volume deletion to get stuck in the " "'deleting' state when using FlashCopy." msgid "" "Fixed `bug #1859652 `_ to " "allow retyping an attached volume to SolidFire." msgstr "" "Fixed `bug #1859652 `_ to " "allow retyping an attached volume to SolidFire." msgid "" "Fixed a CHAP authentication issue while trying to attach an iSCSI volume " "using the NetApp ONTAP driver. Please refer to the `Launchpad bug #1914639 " "`_ for more details." msgstr "" "Fixed a CHAP authentication issue while trying to attach an iSCSI volume " "using the NetApp ONTAP driver. Please refer to the `Launchpad bug #1914639 " "`_ for more details." msgid "" "Fixed a bug which could create volumes with invalid content in case of " "unhandled errors from glance client (Bug `#1799221 `_)." msgstr "" "Fixed a bug which could create volumes with invalid content in case of " "unhandled errors from glance client (Bug `#1799221 `_)." msgid "Fixed a few scalability bugs in the Datera EDF driver." msgstr "Fixed a few scalability bugs in the Datera EDF driver." msgid "" "Fixed a problem with volume retype not honoring the existing volume's " "Availability Zone if one isn't specified." msgstr "" "Fixed a problem with volume retype not honouring the existing volume's " "Availability Zone if one isn't specified." msgid "" "Fixed an error in quota handling that required the keystone " "encryption_auth_url to be configured even if no encryption was being used." msgstr "" "Fixed an error in quota handling that required the keystone " "encryption_auth_url to be configured even if no encryption was being used." msgid "" "Fixed an issue when deleting a consistency group snapshot with the Dell SC " "backend driver." msgstr "" "Fixed an issue when deleting a consistency group snapshot with the Dell SC " "backend driver." msgid "" "Fixed an issue where the NetApp cDOT NFS driver failed to clone new volumes " "from the image cache." msgstr "" "Fixed an issue where the NetApp cDOT NFS driver failed to clone new volumes " "from the image cache." msgid "Fixed an issue with live migration when using the EMC VMAX driver." msgstr "Fixed an issue with live migration when using the EMC VMAX driver." msgid "Fixed backup and restore of volumes in VMware VMDK driver." msgstr "Fixed backup and restore of volumes in VMware VMDK driver." msgid "" "Fixed bug #1731474 on NetApp Data ONTAP driver that was causing LUNs to be " "created with larger size than requested. This fix requires version 9.1 of " "ONTAP or later." msgstr "" "Fixed bug #1731474 on NetApp Data ONTAP driver that was causing LUNs to be " "created with larger size than requested. This fix requires version 9.1 of " "ONTAP or later." msgid "" "Fixed bug #1783582, where calls to os-force_detach were failing on NetApp " "ONTAP iSCSI/FC drivers." msgstr "" "Fixed bug #1783582, where calls to os-force_detach were failing on NetApp " "ONTAP iSCSI/FC drivers." msgid "" "Fixed bug 1632333 with the NetApp ONTAP Driver. Now the copy offload method " "is invoked early to avoid downloading Glance images twice." msgstr "" "Fixed bug 1632333 with the NetApp ONTAP Driver. Now the copy offload method " "is invoked early to avoid downloading Glance images twice." msgid "" "Fixed bug causing snapshot creation to fail on systems with LC_NUMERIC set " "to locale using ',' as decimal separator." msgstr "" "Fixed bug causing snapshot creation to fail on systems with LC_NUMERIC set " "to locale using ',' as decimal separator." msgid "" "Fixed consistency groups API which was always returning groups scoped to " "project ID from user context instead of given input project ID." msgstr "" "Fixed consistency groups API which was always returning groups scoped to " "project ID from user context instead of given input project ID." msgid "" "Fixed group availability zone-backend host mismatch [`Bug 1773446 `_]." msgstr "" "Fixed group availability zone-backend host mismatch [`Bug 1773446 `_]." msgid "" "Fixed issue of managing a VG with more than one volume in Kaminario FC and " "iSCSI Cinder drivers." msgstr "" "Fixed issue of managing a VG with more than one volume in Kaminario FC and " "iSCSI Cinder drivers." msgid "" "Fixed issue where Pure Volume Drivers would ignore reserved_percentage " "config option." msgstr "" "Fixed issue where Pure Volume Drivers would ignore reserved_percentage " "config option." msgid "" "Fixed issue where ``create`` and ``update`` api's of ``volume-type`` and " "``group_type`` were returning 500 error if boolean 'is_public' value passed " "in the form of string. Now user can pass following valid boolean values to " "these api's: '0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', " "'y', 'yes'" msgstr "" "Fixed issue where ``create`` and ``update`` api's of ``volume-type`` and " "``group_type`` were returning 500 error if boolean 'is_public' value passed " "in the form of string. Now user can pass following valid boolean values to " "these api's: '0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', " "'y', 'yes'" msgid "" "Fixed issue where all Ceph RBD backups would be incremental after the first " "one. The driver now honors whether ``--incremental`` is specified or not." msgstr "" "Fixed issue where all Ceph RBD backups would be incremental after the first " "one. The driver now honours whether ``--incremental`` is specified or not." msgid "" "Fixed issue where the HNAS driver was not correctly reporting THIN " "provisioning and related stats." msgstr "" "Fixed issue where the HNAS driver was not correctly reporting THIN " "provisioning and related stats." msgid "" "Fixed issue with error being raised when performing a delete quota operation " "in a subproject." msgstr "" "Fixed issue with error being raised when performing a delete quota operation " "in a subproject." msgid "Fixed issue with extra-specs not being applied when cloning a volume." msgstr "Fixed issue with extra-specs not being applied when cloning a volume." msgid "" "Fixed issue with the EMC ScaleIO driver not able to identify a volume after " "a migration is performed." msgstr "" "Fixed issue with the EMC ScaleIO driver not able to identify a volume after " "a migration is performed." msgid "Fixed live migration on EMC VMAX3 backends." msgstr "Fixed live migration on EMC VMAX3 backends." msgid "" "Fixed misleading error message when NetApp copyoffload tool is not in place " "during image cloning." msgstr "" "Fixed misleading error message when NetApp copyoffload tool is not in place " "during image cloning." msgid "" "Fixed service state reporting when backup manager is unable to initialize " "one of the backup drivers." msgstr "" "Fixed service state reporting when backup manager is unable to initialise " "one of the backup drivers." msgid "" "Fixed support for IPv6 on management and data paths for NFS, iSCSI and FCP " "NetApp ONTAP drivers." msgstr "" "Fixed support for IPv6 on management and data paths for NFS, iSCSI and FCP " "NetApp ONTAP drivers." msgid "" "Fixed the VMware VMDK driver to create volume from image in ova container." msgstr "" "Fixed the VMware VMDK driver to create volume from image in ova container." msgid "" "Fixed the schema validation for attachment create API to make instance uuid " "an optional field. It had mistakenly been defined as a required field when " "schema validation was added in an earlier release. Also updated the schema " "to allow specification of the ``mode`` parameter, which has been available " "since microversion >= 3.54, but which was not recognized as a legitimate " "request field." msgstr "" "Fixed the schema validation for attachment create API to make instance UUID " "an optional field. It had mistakenly been defined as a required field when " "schema validation was added in an earlier release. Also updated the schema " "to allow specification of the ``mode`` parameter, which has been available " "since microversion >= 3.54, but which was not recognized as a legitimate " "request field." msgid "" "Fixed the volume property `signature_verified` propagating to images created " "from volumes. That property could later conflict with the same property " "being added again when creating a new volume from such image, preventing the " "volume from being created successfully. This volume property is created " "whenever a volume is created from an image for the purpose of indicating " "that the image signature was verified on creation, and was not intended to " "be propagated further if a new image is created from such volume." msgstr "" "Fixed the volume property `signature_verified` propagating to images created " "from volumes. That property could later conflict with the same property " "being added again when creating a new volume from such image, preventing the " "volume from being created successfully. This volume property is created " "whenever a volume is created from an image for the purpose of indicating " "that the image signature was verified on creation, and was not intended to " "be propagated further if a new image is created from such volume." msgid "" "Fixed using of the user's token in the nova client (`bug #1686616 `_)" msgstr "" "Fixed using of the user's token in the nova client (`bug #1686616 `_)" msgid "" "Fixed volume extend issue that allowed a tenant with enough quota to extend " "the volume to limits greater than what the volume backend supported." msgstr "" "Fixed volume extend issue that allowed a tenant with enough quota to extend " "the volume to limits greater than what the volume backend supported." msgid "Fixed volume group action in Active/Active HA deployment:" msgstr "Fixed volume group action in Active/Active HA deployment:" msgid "" "Fixes a bug in NetApp SolidFire where the deletion of group snapshots was " "failing." msgstr "" "Fixes a bug in NetApp SolidFire where the deletion of group snapshots was " "failing." msgid "" "Fixes a bug that could cause mount failures with the Quobyte driver if the " "quobyte_volume_url setting was changed in a running system." msgstr "" "Fixes a bug that could cause mount failures with the Quobyte driver if the " "quobyte_volume_url setting was changed in a running system." msgid "" "Fixes a bug that prevented creation of Quobyte volumes from snapshots during " "snapshot backups. This now allows backing up volumes with existing " "snapshots. Partially fixes `bug 1703405 `_ ." msgstr "" "Fixes a bug that prevented creation of Quobyte volumes from snapshots during " "snapshot backups. This now allows backing up volumes with existing " "snapshots. Partially fixes `bug 1703405 `_ ." msgid "" "Fixes a bug that prevented distributed file system drivers from creating " "snapshots during volume clone operations (NFS, WindowsSMBFS, VZstorage and " "Quobyte drivers). Fixing this allows creating snapshot based backups." msgstr "" "Fixes a bug that prevented distributed file system drivers from creating " "snapshots during volume clone operations (NFS, WindowsSMBFS, VZstorage and " "Quobyte drivers). Fixing this allows creating snapshot based backups." msgid "" "Fixes a bug that prevented the configuration of multiple redundant Quobyte " "registries in the quobyte_volume_url config option." msgstr "" "Fixes a bug that prevented the configuration of multiple redundant Quobyte " "registries in the quobyte_volume_url config option." msgid "" "Fixes an issue where starting the Pure volume drivers with replication " "enabled and default values for pure_replica_interval_default would cause an " "error to be raised from the backend." msgstr "" "Fixes an issue where starting the Pure volume drivers with replication " "enabled and default values for pure_replica_interval_default would cause an " "error to be raised from the backend." msgid "" "Fixes concurrency issue on backups, where only 20 native threads could be " "concurrently be executed. Now default will be 60, and can be changed with " "`backup_native_threads_pool_size`." msgstr "" "Fixes concurrency issue on backups, where only 20 native threads could be " "concurrently be executed. Now the default will be 60, and can be changed " "with `backup_native_threads_pool_size`." msgid "Fixes force_detach behavior for volumes in NetApp SolidFire driver." msgstr "Fixes force_detach behaviour for volumes in NetApp SolidFire driver." msgid "" "Following APIs were accepting boolean parameters with leading and trailing " "white spaces (for e.g. \" true \"). But now with schema validation support, " "all these boolean parameters henceforth will not accept leading and trailing " "whitespaces to maintain consistency." msgstr "" "Following APIs were accepting boolean parameters with leading and trailing " "white spaces (for e.g. \" true \"). But now with schema validation support, " "all these boolean parameters henceforth will not accept leading and trailing " "whitespaces to maintain consistency." msgid "" "For EMC VNX backends, please upgrade to use ``cinder.volume.drivers.emc.vnx." "driver.EMCVNXDriver``. Add config option ``storage_protocol = fc`` or " "``storage_protocol = iscsi`` to the driver section to enable the FC or iSCSI " "driver respectively." msgstr "" "For EMC VNX backends, please upgrade to use ``cinder.volume.drivers.emc.vnx." "driver.EMCVNXDriver``. Add config option ``storage_protocol = fc`` or " "``storage_protocol = iscsi`` to the driver section to enable the FC or iSCSI " "driver respectively." msgid "" "For SolidFire, QoS specs are now checked to make sure they fall within the " "min and max constraints. If not the QoS specs are capped at the min or max " "(i.e. if spec says 50 and minimum supported is 100, the driver will set it " "to 100)." msgstr "" "For SolidFire, QoS specs are now checked to make sure they fall within the " "min and max constraints. If not the QoS specs are capped at the min or max " "(i.e. if spec says 50 and minimum supported is 100, the driver will set it " "to 100)." msgid "" "For a given OpenStack release, Cinder supports the current Ceph active " "stable releases plus the two prior releases." msgstr "" "For a given OpenStack release, Cinder supports the current Ceph active " "stable releases plus the two prior releases." msgid "" "For an example of distribution-specific information about cgroups, see " "`OpenStack and cgroups v1 `_ in the Debian 11 " "(\"bullseye\") release notes." msgstr "" "For an example of distribution-specific information about cgroups, see " "`OpenStack and cgroups v1 `_ in the Debian 11 " "(\"bullseye\") release notes." msgid "" "For any OpenStack release, it is expected that the versions of the Ceph " "client and server are in alignment." msgstr "" "For any OpenStack release, it is expected that the versions of the Ceph " "client and server are in alignment." msgid "" "For completeness, here is the list of drivers first marked unsupported in " "the Ussuri release. See the \"Deprecation Notes\" section of this document " "for details." msgstr "" "For completeness, here is the list of drivers first marked unsupported in " "the Ussuri release. See the \"Deprecation Notes\" section of this document " "for details." msgid "" "For more information about the cinder policy configuration file, see the " "`policy.yaml `_ section of the Cinder Configuration " "Guide." msgstr "" "For more information about the Cinder policy configuration file, see the " "`policy.yaml `_ section of the Cinder Configuration " "Guide." msgid "" "For more information, see the 'New Features' section of this document and " "`Policy Personas and Permissions `_ in the `Cinder Service " "Configuration Guide`." msgstr "" "For more information, see the 'New Features' section of this document and " "`Policy Personas and Permissions `_ in the `Cinder Service " "Configuration Guide`." msgid "For more information:" msgstr "For more information:" msgid "" "For security reasons (`Bug #2004555 `_) manually deleting an attachment, manually doing the ``os-" "terminate_connection`` ``os-detach`` or ``os-force_detach`` actions will no " "longer be allowed unless the request is coming from another OpenStack " "service on behalf of a user." msgstr "" "For security reasons (`Bug #2004555 `_) manually deleting an attachment, manually doing the ``os-" "terminate_connection`` ``os-detach`` or ``os-force_detach`` actions will no " "longer be allowed unless the request is coming from another OpenStack " "service on behalf of a user." msgid "" "For security reasons (`Bug #2004555 `_) manually deleting an attachment, manually doing the ``os-" "terminate_connection``, ``os-detach`` or ``os-force_detach`` actions will no " "longer be allowed in most cases unless the request is coming from another " "OpenStack service on behalf of a user." msgstr "" "For security reasons, (`Bug #2004555 `_) manually deleting an attachment, manually doing the ``os-" "terminate_connection``, ``os-detach`` or ``os-force_detach`` actions will no " "longer be allowed in most cases unless the request is coming from another " "OpenStack service on behalf of a user." msgid "" "For the above reasons, to monitor the maximum sequential physical free space " "and total volumes in the RAID Group, when updating pool information using " "``Get Volume Stats``, also update parameter ``total_volumes`` (volumes " "already created in the RAID Group) and introduce ``fragment_capacity_mb`` " "(maximum sequential physical capacity) to the backend pool information if " "the backend pool is a RAID Group." msgstr "" "For the above reasons, to monitor the maximum sequential physical free space " "and total volumes in the RAID Group, when updating pool information using " "``Get Volume Stats``, also update parameter ``total_volumes`` (volumes " "already created in the RAID Group) and introduce ``fragment_capacity_mb`` " "(maximum sequential physical capacity) to the backend pool information if " "the backend pool is a RAID Group." msgid "" "For the convenience of operators, the `unsupported driver removal policy " "`_ was changed during the Ussuri development cycle to allow " "unsupported drivers to remain in the Cinder source code repository at the " "discretion of the Cinder team (basically, this means the vendor has not " "explicitly declared the driver EOL or the vendor has expressed interest in " "getting the third-party CI system working again). Be aware, however, that " "such drivers are subject to immediate removal if they begin failing the " "general Cinder gating tests." msgstr "" "For the convenience of operators, the `unsupported driver removal policy " "`_ was changed during the Ussuri development cycle to allow " "unsupported drivers to remain in the Cinder source code repository at the " "discretion of the Cinder team (basically, this means the vendor has not " "explicitly declared the driver EOL or the vendor has expressed interest in " "getting the third-party CI system working again). Be aware, however, that " "such drivers are subject to immediate removal if they begin failing the " "general Cinder gating tests." msgid "" "For the most part, users are happy with the cinder feature `Volume " "encryption supported by the key manager `_. There are, " "however, some edge cases that have revealed bugs that you and your users " "should be aware of." msgstr "" "For the most part, users are happy with the Cinder feature `Volume " "encryption supported by the key manager `_. There are, " "however, some edge cases that have revealed bugs that you and your users " "should be aware of." msgid "Fujitsu Driver: Added multiple pools support." msgstr "Fujitsu Driver: Added multiple pools support." msgid "" "Fujitsu ETERNUS DX driver: Add fragment capacity information of RAID Group." msgstr "" "Fujitsu ETERNUS DX driver: Add fragment capacity information of RAID Group." msgid "Fujitsu ETERNUS DX driver: Add metadata to snapshot" msgstr "Fujitsu ETERNUS DX driver: Add metadata to snapshot" msgid "Fujitsu ETERNUS DX driver: Added support for QoS" msgstr "Fujitsu ETERNUS DX driver: Added support for QoS" msgid "Fujitsu ETERNUS DX driver: Added support for update migrated volume" msgstr "Fujitsu ETERNUS DX driver: Added support for update migrated volume" msgid "" "Fujitsu ETERNUS DX driver: Added support to extend a volume on RAID Group " "using CLI." msgstr "" "Fujitsu ETERNUS DX driver: Added support to extend a volume on RAID Group " "using CLI." msgid "Fujitsu ETERNUS DX driver: Improve volume deletion" msgstr "Fujitsu ETERNUS DX driver: Improve volume deletion" msgid "" "Fujitsu Eternus DX driver used to create snapshot using SMI-S, resulting in " "the inability to extend the source volume." msgstr "" "Fujitsu Eternus DX driver used to create snapshot using SMI-S, resulting in " "the inability to extend the source volume." msgid "Fujitsu Eternus DX driver: Added cli operations when creating snapshot" msgstr "Fujitsu Eternus DX driver: Added CLI operations when creating snapshot" msgid "" "Fujitsu Eternus DX driver: Added support for revert to snapshot operation." msgstr "" "Fujitsu Eternus DX driver: Added support for revert to snapshot operation." msgid "Generic group is added into quota management." msgstr "Generic group is added into quota management." msgid "Generic volume groups:" msgstr "Generic volume groups:" msgid "" "Google backup driver now supports ``google-auth`` library, and is the " "preferred library if both ``google-auth`` (together with ``google-auth-" "httplib2``) and ``oauth2client`` libraries are present in the system." msgstr "" "Google backup driver now supports ``google-auth`` library, and is the " "preferred library if both ``google-auth`` (together with ``google-auth-" "httplib2``) and ``oauth2client`` libraries are present in the system." msgid "" "Google backup driver now works when using ``google-api-python-client`` " "version 1.6.0 or higher." msgstr "" "Google backup driver now works when using ``google-api-python-client`` " "version 1.6.0 or higher." msgid "Group APIs will not work on groups with default_cgsnapshot_type." msgstr "Group APIs will not work on groups with default_cgsnapshot_type." msgid "Group APIs will only write/read in/from the groups table." msgstr "Group APIs will only write/read in/from the groups table." msgid "Groups with default_cgsnapshot_type can only be operated by CG APIs." msgstr "Groups with default_cgsnapshot_type can only be operated by CG APIs." msgid "" "HA A-A: Add cluster configuration option to allow grouping hosts that share " "the same backend configurations and should work in Active-Active fashion." msgstr "" "HA A-A: Add cluster configuration option to allow grouping hosts that share " "the same backend configurations and should work in Active-Active fashion." msgid "" "HA A-A: Added cluster subcommand in manage command to list, remove, and " "rename clusters." msgstr "" "HA A-A: Added cluster subcommand in manage command to list, remove, and " "rename clusters." msgid "" "HA A-A: Added clusters API endpoints for cluster related operations (index, " "detail, show, enable/disable). Index and detail accept filtering by `name`, " "`binary`, `disabled`, `num_hosts`, `num_down_hosts`, and up/down status " "(`is_up`) as URL parameters. Also added their respective policies." msgstr "" "HA A-A: Added clusters API endpoints for cluster related operations (index, " "detail, show, enable/disable). Index and detail accept filtering by `name`, " "`binary`, `disabled`, `num_hosts`, `num_down_hosts`, and up/down status " "(`is_up`) as URL parameters. Also added their respective policies." msgid "" "HA A-A: Updated manage command to display cluster information on service " "listings." msgstr "" "HA A-A: Updated manage command to display cluster information on service " "listings." msgid "" "HNAS drivers have new configuration paths. Users should now use ``cinder." "volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`` for HNAS NFS driver and " "``cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver`` for HNAS iSCSI " "driver." msgstr "" "HNAS drivers have new configuration paths. Users should now use ``cinder." "volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`` for HNAS NFS driver and " "``cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver`` for HNAS iSCSI " "driver." msgid "HNAS drivers will now read configuration from cinder.conf." msgstr "HNAS drivers will now read configuration from cinder.conf." msgid "" "HP drivers have been rebranded to HPE. Existing configurations will continue " "to work with the legacy name, but will need to be updated by the next " "release." msgstr "" "HP drivers have been rebranded to HPE. Existing configurations will continue " "to work with the legacy name, but will need to be updated by the next " "release." msgid "" "HPE 3PAR Driver: Add support of iSCSI driver for Primera 4.2 or higher " "versions." msgstr "" "HPE 3PAR Driver: Add support of iSCSI driver for Primera 4.2 or higher " "versions." msgid "" "HPE 3PAR driver `Bug #1819903 `_: Fixed: umanaged volumes & snapshots missing from cinder " "manageable-list." msgstr "" "HPE 3PAR driver `Bug #1819903 `_: Fixed: managed volumes & snapshots missing from cinder " "manageable-list." msgid "" "HPE 3PAR driver `Bug #1940069 `_: Fixed issue of connection rejected by reusing existing " "session." msgstr "" "HPE 3PAR driver `Bug #1940069 `_: Fixed issue of connection rejected by reusing existing " "session." msgid "" "HPE 3PAR driver `Bug #1958122 `_: Fixed issue of multi-detach operation in multi host " "environment." msgstr "" "HPE 3PAR driver `Bug #1958122 `_: Fixed issue of multi-detach operation in multi host " "environment." msgid "" "HPE 3PAR driver `Bug #1994521 `_: Fixed: While performing a delete snapshot (s1) operation, " "the volumes (v2) dependent on the snapshot (s1) are converted to base " "volumes. This operation fails if these dependent volumes (v2) have their own " "dependent snapshots (s2). The errors during the failure were vague and not " "helpful. With this release, we added conditions to fail this operation early " "and also added useful error message." msgstr "" "HPE 3PAR driver `Bug #1994521 `_: Fixed: While performing a delete snapshot (s1) operation, " "the volumes (v2) dependent on the snapshot (s1) are converted to base " "volumes. This operation fails if these dependent volumes (v2) have their own " "dependent snapshots (s2). The errors during the failure were vague and not " "helpful. With this release, we added conditions to fail this operation early " "and also added a useful error message." msgid "" "HPE 3PAR driver `Bug #2015034 `_: Added handling for VLAN iscsi IPs in the 3PAR iSCSI driver." msgstr "" "HPE 3PAR driver `Bug #2015034 `_: Added handling for VLAN iSCSI IPs in the 3PAR iSCSI driver." msgid "" "HPE 3PAR driver `Bug #2015746 `_: Fixed: minor code changes to work with new wsapi." msgstr "" "HPE 3PAR driver `Bug #2015746 `_: Fixed: minor code changes to work with new wsapi." msgid "" "HPE 3PAR driver `Bug #2023253 `_: Fixed: Handle error during retype of volume without comment" msgstr "" "HPE 3PAR driver `Bug #2023253 `_: Fixed: Handle error during retype of volume without comment" msgid "" "HPE 3PAR driver `Bug #2026718 `_: Fixed: With this patch, added logic to fetch correct volume " "name on 3par (osv_name), rcg_name and vvset_name (for particular scenarios); " "so that volumes can be identified and deleted from 3par." msgstr "" "HPE 3PAR driver `Bug #2026718 `_: Fixed: With this patch, added logic to fetch correct volume " "name on 3par (osv_name), rcg_name and vvset_name (for particular scenarios); " "so that volumes can be identified and deleted from 3par." msgid "" "HPE 3PAR driver `Bug #2045411 `_: Added support for ipv6 address in the 3PAR iSCSI driver." msgstr "" "HPE 3PAR driver `Bug #2045411 `_: Added support for ipv6 address in the 3PAR iSCSI driver." msgid "" "HPE 3PAR driver `Bug #2068795 `_: Fixed: Perform login before invoking getWsApiVersion" msgstr "" "HPE 3PAR driver `Bug #2068795 `_: Fixed: Perform login before invoking getWsApiVersion" msgid "" "HPE 3PAR driver `bug #2008931 `_: Fixed issue when performing migrate volume operation when " "`comment` attribute is missing from the volume." msgstr "" "HPE 3PAR driver `bug #2008931 `_: Fixed issue when performing migrate volume operation when " "`comment` attribute is missing from the volume." msgid "" "HPE 3PAR driver `bug #2018994 `_: Fixed: use small QoS Latency value (less than 1)" msgstr "" "HPE 3PAR driver `bug #2018994 `_: Fixed: use small QoS Latency value (less than 1)" msgid "" "HPE 3PAR driver `bug #2021941 `_: Fixed: Now clone of replicated volume can be created" msgstr "" "HPE 3PAR driver `bug #2021941 `_: Fixed: Now clone of replicated volume can be created" msgid "" "HPE 3PAR driver `bug #2044255 `_: Fixed: In peer persistence setup, when volume is attached " "to instance, now LUN ids are returned from both the arrays." msgstr "" "HPE 3PAR driver `bug #2044255 `_: Fixed: In peer persistence setup, when volume is attached " "to instance, now LUN ids are returned from both the arrays." msgid "" "HPE 3PAR driver adds following functionalities Creating thin/dedup " "compresssed volume. Retype for tpvv/tdvv volumes to be compressed. Migration " "of compressed volumes. Create compressed volume from compressed volume/" "snapshot source. Compression support to create cg from source." msgstr "" "HPE 3PAR driver adds following functionalities Creating thin/dedup " "compressed volume. Retype for tpvv/tdvv volumes to be compressed. Migration " "of compressed volumes. Create compressed volume from compressed volume/" "snapshot source. Compression support to create cg from source." msgid "" "HPE 3PAR driver now supports networks with duplicated FQDNs via " "configuration option `unique_fqdn_network` so attaching in these networks " "will work (bug #1834695)." msgstr "" "HPE 3PAR driver now supports networks with duplicated FQDNs via " "configuration option `unique_fqdn_network` so attaching in these networks " "will work (bug #1834695)." msgid "" "HPE 3PAR driver: The detach issue for multiattach capability (`Bug 1834660 " "`_) was fixed in the Cinder " "15.0.0 (Train) release, but due to an oversight, the fix has not been " "announced until now." msgstr "" "HPE 3PAR driver: The detach issue for multiattach capability (`Bug 1834660 " "`_) was fixed in the Cinder " "15.0.0 (Train) release, but due to an oversight, the fix has not been " "announced until now." msgid "" "HPE 3PAR: Documented that existing driver supports the new Alletra 9k " "backend. HPE Alletra 9k is newer version of existing HPE Primera backend." msgstr "" "HPE 3PAR: Documented that the existing driver supports the new Alletra 9k " "backend. HPE Alletra 9k is newer version of the existing HPE Primera backend." msgid "" "HPE 3PAR: Documented that existing driver supports the new Alletra MP " "backend. HPE Alletra MP is newer version of existing HPE Alletra 9k backend." msgstr "" "HPE 3PAR: Documented that existing driver supports the new Alletra MP " "backend. HPE Alletra MP is newer version of existing HPE Alletra 9k backend." msgid "" "HPE LeftHand config options ``hplefthand_api_url``, ``hplefthand_username``, " "``hplefthand_password``, ``hplefthand_clustername``, " "``hplefthand_iscsi_chap_enabled``, and ``hplefthand_debug`` were deprecated " "in the Mitaka release and have now been removed. The corresponding " "``hpelefthand_api_url``, ``hpelefthand_username``, ``hpelefthand_password``, " "``hpelefthand_clustername``, ``hpelefthand_iscsi_chap_enabled``, and " "``hpelefthand_debug`` should be used instead." msgstr "" "HPE LeftHand config options ``hplefthand_api_url``, ``hplefthand_username``, " "``hplefthand_password``, ``hplefthand_clustername``, " "``hplefthand_iscsi_chap_enabled``, and ``hplefthand_debug`` were deprecated " "in the Mitaka release and have now been removed. The corresponding " "``hpelefthand_api_url``, ``hpelefthand_username``, ``hpelefthand_password``, " "``hpelefthand_clustername``, ``hpelefthand_iscsi_chap_enabled``, and " "``hpelefthand_debug`` should be used instead." msgid "" "HPE Nimble Storage drivers had been previously marked unsupported. Testing " "requirements have been addressed and they are now fully supported again. HPE " "Nimble Storage drivers allow cinder to manage volumes both in iSCSI and FC " "environment." msgstr "" "HPE Nimble Storage drivers had been previously marked unsupported. Testing " "requirements have been addressed and they are now fully supported again. HPE " "Nimble Storage drivers allow cinder to manage volumes both in iSCSI and FC " "environment." msgid "HPE Nimble driver: Added group replication support." msgstr "HPE Nimble driver: Added group replication support." msgid "" "HPE Nimble: Report max oversubscription ratio according to backend " "configuration ``max_over_subscription_ratio``" msgstr "" "HPE Nimble: Report max oversubscription ratio according to backend " "configuration ``max_over_subscription_ratio``" msgid "" "HPE XP and NEC V driver `bug #2012515 `_: Fixed to use correct Host group name." msgstr "" "HPE XP and NEC V driver `bug #2012515 `_: Fixed to use correct Host group name." msgid "" "HPMSA driver: The HPE MSA driver was updated to avoid using deprecated " "command syntax that has been removed in the latest version of the MSA API. " "This is required to support the newest firmware in the MSA 2060/1060." msgstr "" "HPMSA driver: The HPE MSA driver was updated to avoid using deprecated " "command syntax that has been removed in the latest version of the MSA API. " "This is required to support the newest firmware in the MSA 2060/1060." msgid "" "HTTP connector for the Cinder Brocade FC Zone plugin. This connector allows " "for communication between the Brocade FC zone plugin and the switch to be " "over HTTP or HTTPs. To make use of this connector, the user would add a " "configuration setting in the fabric block for a Brocade switch with the name " "as 'fc_southbound_protocol' with a value as 'HTTP' or 'HTTPS'." msgstr "" "HTTP connector for the Cinder Brocade FC Zone plugin. This connector allows " "for communication between the Brocade FC zone plugin and the switch to be " "over HTTP or HTTPS. To make use of this connector, the user would add a " "configuration setting in the fabric block for a Brocade switch with the name " "as 'fc_southbound_protocol' with a value as 'HTTP' or 'HTTPS'." msgid "" "Hitachi VSP drivers have a new config option ``vsp_compute_target_ports`` to " "specify IDs of the storage ports used to attach volumes to compute nodes. " "The default is the value specified for the existing ``vsp_target_ports`` " "option. Either or both of ``vsp_compute_target_ports`` and " "``vsp_target_ports`` must be specified." msgstr "" "Hitachi VSP drivers have a new config option ``vsp_compute_target_ports`` to " "specify IDs of the storage ports used to attach volumes to compute nodes. " "The default is the value specified for the existing ``vsp_target_ports`` " "option. Either or both of ``vsp_compute_target_ports`` and " "``vsp_target_ports`` must be specified." msgid "" "Hitachi VSP drivers have a new config option ``vsp_horcm_pair_target_ports`` " "to specify IDs of the storage ports used to copy volumes by Shadow Image or " "Thin Image. The default is the value specified for the existing " "``vsp_target_ports`` option. Either or both of " "``vsp_horcm_pair_target_ports`` and ``vsp_target_ports`` must be specified." msgstr "" "Hitachi VSP drivers have a new config option ``vsp_horcm_pair_target_ports`` " "to specify IDs of the storage ports used to copy volumes by Shadow Image or " "Thin Image. The default is the value specified for the existing " "``vsp_target_ports`` option. Either or both of " "``vsp_horcm_pair_target_ports`` and ``vsp_target_ports`` must be specified." msgid "" "Hitachi driver `bug #1908792 `_: Fix for Hitachi driver allowing delete_volume after " "create_cloned_volume." msgstr "" "Hitachi driver `bug #1908792 `_: Fix for Hitachi driver allowing delete_volume after " "create_cloned_volume." msgid "" "Hitachi driver `bug #1989176 `_: Fixed Hitachi driver to output a message for resource lock " "correctly." msgstr "" "Hitachi driver `bug #1989176 `_: Fixed Hitachi driver to output a message for resource lock " "correctly." msgid "" "Hitachi driver `bug #2011810 `_: Fixed to use correct pool number for secondary storage on " "GAD environment." msgstr "" "Hitachi driver `bug #2011810 `_: Fixed to use correct pool number for secondary storage on " "GAD environment." msgid "" "Hitachi driver `bug #2024418 `_: Fixed to raise correct exception when volume is busy while " "performing delete volume operation." msgstr "" "Hitachi driver `bug #2024418 `_: Fixed to raise correct exception when volume is busy while " "performing delete volume operation." msgid "" "Hitachi driver `bug #2033448 `_: Fixed to initialize lock counter for resource group" msgstr "" "Hitachi driver `bug #2033448 `_: Fixed to initialise lock counter for resource group" msgid "" "Hitachi driver `bug #2063317 `_: Fix test scripts to avoid failing by unexpected response " "from psuedo REST API server" msgstr "" "Hitachi driver `bug #2063317 `_: Fix test scripts to avoid failing by unexpected response " "from pseudo REST API server" msgid "" "Hitachi driver `bug #2071697 '_: Fix to set correct object ID as LDEV nickname when running " "host-assisted migration with ``retype`` or ``migration`` commands." msgstr "" "Hitachi driver `bug #2071697 '_: Fix to set correct object ID as LDEV nickname when running " "host-assisted migration with ``retype`` or ``migration`` commands." msgid "" "Hitachi driver `bug #2072317 `_: Fix potential data-loss due to a network issue during a " "volume deletion." msgstr "" "Hitachi driver `bug #2072317 `_: Fix potential data-loss due to a network issue during a " "volume deletion." msgid "" "Hitachi driver: Add a config option ``hitachi_group_name_format`` for " "hostgroup name format." msgstr "" "Hitachi driver: Add a config option ``hitachi_group_name_format`` for " "hostgroup name format." msgid "" "Hitachi driver: Support AIX as host OS type. When running ``cinder " "attachment-create`` command with the option ``--ostype aix``, ``AIX`` is set " "as host OS type." msgstr "" "Hitachi driver: Support AIX as host OS type. When running ``cinder " "attachment-create`` command with the option ``--ostype aix``, ``AIX`` is set " "as host OS type." msgid "" "Hitachi driver: Support Global-Active Device (GAD) volume. GAD is a one of " "Hitachi storage fucntion uses volume replication to provide a high-" "availability environment for hosts across storage systems and sites. New " "properties will be added in configuration. ``hbsd:topology`` sets to " "``active_active_mirror_volumex`` would specify a GAD volume. " "``hitachi_mirror_xxx`` parameters would specify a secondary storage for GAD " "volume." msgstr "" "Hitachi driver: Support Global-Active Device (GAD) volume. GAD is one of " "Hitachi's storage functions that uses volume replication to provide a high-" "availability environment for hosts across storage systems and sites. New " "properties will be added in the configuration. ``hbsd:topology`` sets to " "``active_active_mirror_volumex`` would specify a GAD volume. " "``hitachi_mirror_xxx`` parameters would specify a secondary storage for GAD " "volume." msgid "Hitachi driver: Add Cinder generic volume groups." msgstr "Hitachi driver: Add Cinder generic volume groups." msgid "" "Hitachi driver: Add a feature ``Port Scheduler``. This feature is enabled " "when specifying ``True`` for the parameter ``hitachi_port_scheduler``. When " "this feature is enabled and an attach request is received, the active WWNs " "that are obtained by Fibre Channel Zone Manager will be distributed and " "registered to the host groups of each port of the storage system. To use " "this feature, specify ``True`` for both parameters ``hitachi_group_request`` " "and ``hitachi_rest_name_only_discovery``. If you specify ``False`` or use " "default value for the ``hitachi_rest_name_only_discovery``, it will take a " "long time to attach volume, by seeking the host group for all specified " "ports. This feature is supported on Fibre Channel only." msgstr "" "Hitachi driver: Add a feature ``Port Scheduler``. This feature is enabled " "when specifying ``True`` for the parameter ``hitachi_port_scheduler``. When " "this feature is enabled and an attach request is received, the active WWNs " "that are obtained by Fibre Channel Zone Manager will be distributed and " "registered to the host groups of each port of the storage system. To use " "this feature, specify ``True`` for both parameters ``hitachi_group_request`` " "and ``hitachi_rest_name_only_discovery``. If you specify ``False`` or use " "the default value for the ``hitachi_rest_name_only_discovery``, it will take " "a long time to attach volume, by seeking the host group for all specified " "ports. This feature is supported on Fibre Channel only." msgid "" "Hitachi driver: Add target port assignment. Defining particular ports in " "extra spec ``hbsd:target_ports`` determines which of the ports specified by " "the ``hitachi_target_ports`` or the ``hitachi_compute_target_ports`` " "parameters are used to create LUN paths during volume attach operations for " "each volume type." msgstr "" "Hitachi driver: Add target port assignment. Defining particular ports in " "extra spec ``hbsd:target_ports`` determines which of the ports specified by " "the ``hitachi_target_ports`` or the ``hitachi_compute_target_ports`` " "parameters are used to create LUN paths during volume attach operations for " "each volume type." msgid "Hitachi driver: Added QoS support." msgstr "Hitachi driver: Added QoS support." msgid "" "Hitachi driver: Additionally support following storages, Hitachi VSP E590, " "Hitachi VSP E790 and Hitachi VSP E1090." msgstr "" "Hitachi driver: Additionally supports the following storage, Hitachi VSP " "E590, Hitachi VSP E790 and Hitachi VSP E1090." msgid "" "Hitachi driver: Support data deduplication and compression, by storage " "assist. The feature can be worked, if user enable deduplication and " "compression for the DP-pool, by Configuration Manager REST API, and set the " "extra spec ``hbsd:capacity_saving`` to ``deduplication_compression``" msgstr "" "Hitachi driver: Support data deduplication and compression, by storage " "assist. The feature can be worked, if the user enables deduplication and " "compression for the DP-pool, by Configuration Manager REST API, and set the " "extra spec ``hbsd:capacity_saving`` to ``deduplication_compression``" msgid "" "Hitachi driver: Update retype to different pool and support storage assisted " "migration. Storage assisted migration feature is also used when retype a " "volume, which doesn't have any snapshots, to different pool." msgstr "" "Hitachi driver: Update retype to a different pool and support storage-" "assisted migration. Storage-assisted migration feature is also used when " "retyping a volume, which doesn't have any snapshots, to a different pool." msgid "" "Hitachi, NEC V, HPE XP drivers `bug #2004140 `_: Fixed ``KeyError`` when a backend is down." msgstr "" "Hitachi, NEC V, HPE XP drivers `bug #2004140 `_: Fixed ``KeyError`` when a backend is down." msgid "Huawei Cinder Driver Support Dorado V6 Storage.(iSCSI, FC)" msgstr "Huawei Cinder Driver Support Dorado V6 Storage.(iSCSI, FC)" msgid "" "IBM DS8000 Driver `Bug #1884030 `_: Support for volume_name_template configuration option." msgstr "" "IBM DS8000 Driver `Bug #1884030 `_: Support for volume_name_template configuration option." msgid "" "IBM DS8000 Driver `Bug #1903648 `_: Fix os_type compatability and hostname template issue." msgstr "" "IBM DS8000 Driver `Bug #1903648 `_: Fix os_type compatability and hostname template issue." msgid "" "IBM DS8000 Driver `Bug #1951046 `_: Fixed detach issue for multi-attach volumes. Detach the " "volume without deleting the host until attachment count is zero." msgstr "" "IBM DS8000 Driver `Bug #1951046 `_: Fixed detach issue for multi-attach volumes. Detach the " "volume without deleting the host until attachment count is zero." msgid "IBM DS8000 Driver: Add support for revert-to-snapshot operation." msgstr "IBM DS8000 Driver: Add support for revert-to-snapshot operation." msgid "" "IBM DS8000 drivers had been previously marked unsupported. Testing " "requirements have been addressed and they are now fully supported again. IBM " "DS8000 drivers allow Cinder to manage volumes in FC environment." msgstr "" "IBM DS8000 drivers had been previously marked unsupported. Testing " "requirements have been addressed and they are now fully supported again. IBM " "DS8000 drivers allow Cinder to manage volumes in FC environment." msgid "IBM DS8K driver has added multiattach support." msgstr "IBM DS8K driver has added multiattach support." msgid "IBM FlashSystem drivers: FC and iSCSI" msgstr "IBM FlashSystem drivers: FC and iSCSI" msgid "" "IBM GPFS drivers had been previously marked unsupported. Testing " "requirements have been addressed and they are now fully supported again. IBM " "GPFS drivers allow cinder to manage volumes." msgstr "" "IBM GPFS drivers had been previously marked unsupported. Testing " "requirements have been addressed and they are now fully supported again. IBM " "GPFS drivers allow Cinder to manage volumes." msgid "" "IBM Spectrum Virtualize Family (previously known as Storwize) driver cannot " "delete volume which has host mapping in some rare cases while code_level of " "IBM Spectrum Virtualize Family storage lower than 7.7.0.0. Please upgrade to " "latest code to avoid this kind of issue." msgstr "" "IBM Spectrum Virtualize Family (previously known as Storwize) driver cannot " "delete volume which has host mapping in some rare cases while code_level of " "IBM Spectrum Virtualize Family storage lower than 7.7.0.0. Please upgrade to " "latest code to avoid this kind of issue." msgid "" "IBM Spectrum Virtualize Family (previously known as Storwize) drivers had " "been previously marked unsupported. Testing requirements have been addressed " "and they are now fully supported again. IBM Spectrum Virtualize Family " "drivers allow Cinder to manage volumes both in iSCSI and FC environments." msgstr "" "IBM Spectrum Virtualize Family (previously known as Storwize) drivers had " "been previously marked unsupported. Testing requirements have been addressed " "and they are now fully supported again. IBM Spectrum Virtualize Family " "drivers allow Cinder to manage volumes both in iSCSI and FC environments." msgid "" "IBM Spectrum Virtualize Family `Bug #1896214 `_: Fixed issues in change_vdisk_iogrp. During retyping " "a volume between I/O groups, if addvdiskaccess fails an exception is raised " "and if movevdisk fails rmvdiskaccess should be done for new I/O group before " "failing the retype operation." msgstr "" "IBM Spectrum Virtualize Family `Bug #1896214 `_: Fixed issues in change_vdisk_iogrp. During retyping " "a volume between I/O groups, if addvdiskaccess fails an exception is raised " "and if movevdisk fails rmvdiskaccess should be done for new I/O group before " "failing the retype operation." msgid "" "IBM Spectrum Virtualize Family `Bug #1898746 `_: Fixed issue regarding host-failover and group-" "failover which impacts storage back-end performance." msgstr "" "IBM Spectrum Virtualize Family `Bug #1898746 `_: Fixed issue regarding host-failover and group-" "failover which impacts storage back-end performance." msgid "" "IBM Spectrum Virtualize Family `Bug #1905988 `_: Fixed volume IOPS throttling issue with a new option " "to set volume IOPS based on volume size." msgstr "" "IBM Spectrum Virtualize Family `Bug #1905988 `_: Fixed volume IOPS throttling issue with a new option " "to set volume IOPS based on volume size." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1892034 `_: Fixed issue in get_host_from_connector that " "volume name is not validated to get the host during terminate connection " "when the volume name is passed as input." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1892034 `_: Fixed issue in get_host_from_connector that " "volume name is not validated to get the host during terminate connection " "when the volume name is passed as input." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1912564 `_: Fixed HyperSwap volume deletion issue." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1912564 `_: Fixed HyperSwap volume deletion issue." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1913363 `_: Fixed issue in get_host_from_connector by " "caching the host information during attach or detach operations and using " "host details from cached information." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1913363 `_: Fixed issue in get_host_from_connector by " "caching the host information during attach or detach operations and using " "host details from cached information." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1917605 `_: Fixed issue in StorwizeSVCCommonDriver to save " "site and peer pool information in stats during initialization." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1917605 `_: Fixed issue in StorwizeSVCCommonDriver to save " "site and peer pool information in stats during initialization." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1920099 `_: Fix issue where _check_delete_vdisk_fc_mappings " "was deleting flashcopy mappings during extend operation of a clone volume " "where its source volume already contained a snapshot." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1920099 `_: Fix issue where _check_delete_vdisk_fc_mappings " "was deleting flashcopy mappings during extend operation of a clone volume " "where its source volume already contained a snapshot." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1920870 `_: Fixed extend issues for volumes with replication " "enabled by avoiding volume remote-copy relationship deletion and creation." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1920870 `_: Fixed extend issues for volumes with replication " "enabled by avoiding volume remote-copy relationship deletion and creation." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1920870 `_: Reduce multiple lsiogrp, lsvdisk calls in Retype " "operaton to optimize the code." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1920870 `_: Reduce multiple lsiogrp, lsvdisk calls in Retype " "operation to optimize the code." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1920890 `_: Fixed issue in retype_hyperswap_volume method to " "update site and iogrp information to the host during a retype from a non-" "HyperSwap volume to a HyperSwap volume." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1920890 `_: Fixed issue in retype_hyperswap_volume method to " "update site and iogrp information to the host during a retype from a non-" "HyperSwap volume to a HyperSwap volume." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1922013 `_: Fixed issues in adding volumes to GMCV group." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1922013 `_: Fixed issues in adding volumes to GMCV group." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1924568 `_: Fixed issues that occurred while creating volume " "on data reduction pool." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1924568 `_: Fixed issues that occurred while creating volume " "on data reduction pool." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1924602 `_: Fixed issue to create snapshots, clones, group " "snapshots, and group clones for HyperSwap volumes." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1924602 `_: Fixed issue to create snapshots, clones, group " "snapshots, and group clones for HyperSwap volumes." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1926286 `_: Fixed an issue while fetching relationship " "details of a volume with replication enabled." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1926286 `_: Fixed an issue while fetching relationship " "details of a volume with replication enabled." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1926491 `_: Updating volume metadata with rccg properties " "for the volumes with replication enabled and added to a group or removed " "from a group." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1926491 `_: Updating volume metadata with rccg properties " "for the volumes with replication enabled and added to a group or removed " "from a group." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1931968 `_: Fixed issue in updating the replication status " "of HyperSwap volume service based on status of nodes during initialization " "and periodic calls." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1931968 `_: Fixed issue in updating the replication status " "of HyperSwap volume service based on status of nodes during initialization " "and periodic calls." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1939145 `_: Updating create_relationship and create_rccg " "calls with the system_id in the place of system_name to fix the issues while " "creating a mirror volume or creating a consistency group." msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1939145 `_: Updating create_relationship and create_rccg " "calls with the system_id in the place of system_name to fix the issues while " "creating a mirror volume or creating a consistency group." msgid "" "IBM Spectrum Virtualize Family driver `Bug #1949061 `_: Fixed retype issue of mirror-volume to mirror-" "volume-type with different mirror pool" msgstr "" "IBM Spectrum Virtualize Family driver `Bug #1949061 `_: Fixed retype issue of mirror-volume to mirror-" "volume-type with different mirror pool" msgid "" "IBM Spectrum Virtualize Family driver: Added `--delete-volumes` flag support " "for delete volumegroup operation. After adding support, the volumes can " "optionally be deleted when the volume group is deleted." msgstr "" "IBM Spectrum Virtualise Family driver: Added `--delete-volumes` flag support " "for delete volume group operation. After adding support, the volumes can " "optionally be deleted when the volume group is deleted." msgid "" "IBM Spectrum Virtualize Family driver: Added `storwize_volume_group` " "parameter in the cinder configuration to support volume group feature." msgstr "" "IBM Spectrum Virtualise Family driver: Added `storwize_volume_group` " "parameter in the Cinder configuration to support the volume group feature." msgid "" "IBM Spectrum Virtualize Family driver: Added fucntionality that returns " "throttle rate of maximum IOPS and bandwidth of all VDisks of a specified " "storage pool." msgstr "" "IBM Spectrum Virtualize Family driver: Added functionality that returns " "throttle rate of maximum IOPS and bandwidth of all VDisks of a specified " "storage pool." msgid "" "IBM Spectrum Virtualize Family driver: Added support for clean_rate " "parameter. Clean_rate parameter can now be passed as extra-spec in volume-" "type or fetched from cinder.conf." msgstr "" "IBM Spectrum Virtualize Family driver: Added support for clean_rate " "parameter. Clean_rate parameter can now be passed as extra-spec in volume-" "type or fetched from cinder.conf." msgid "" "IBM Spectrum Virtualize Family driver: Added support for creation and " "deletion of volumegroup snapshots." msgstr "" "IBM Spectrum Virtualize Family driver: Added support for creation and " "deletion of volumegroup snapshots." msgid "" "IBM Spectrum Virtualize Family driver: Added support for volumegroup for SVC " "Code Level 8.5.1.0 and above. User can now create, modify and delete " "volumegroup using the exising cinder CLI for group operations." msgstr "" "IBM Spectrum Virtualize Family driver: Added support for volumegroup for SVC " "Code Level 8.5.1.0 and above. User can now create, modify and delete " "volumegroup using the exising Cinder CLI for group operations." msgid "" "IBM Spectrum Virtualize Family driver: Added support to manage GMCV volumes " "on separate storage pools." msgstr "" "IBM Spectrum Virtualize Family driver: Added support to manage GMCV volumes " "on separate storage pools." msgid "" "IBM Spectrum Virtualize Family driver: Added support to manage host " "attachment using portsets for code level >= 8.4.2.0" msgstr "" "IBM Spectrum Virtualize Family driver: Added support to manage host " "attachment using portsets for code level >= 8.4.2.0" msgid "" "IBM Spectrum Virtualize Family driver: Added volume-extend support for " "volumes created using a HyperSwap volume-type template." msgstr "" "IBM Spectrum Virtualize Family driver: Added volume-extend support for " "volumes created using a HyperSwap volume-type template." msgid "" "IBM Spectrum Virtualize Family driver: `Bug #1920912 `_: Fixed rccg create issue while adding volumes to " "a group where the group is cloned from group snapshot or other source group." msgstr "" "IBM Spectrum Virtualize Family driver: `Bug #1920912 `_: Fixed rccg create issue while adding volumes to " "a group where the group is cloned from group snapshot or other source group." msgid "" "IBM Spectrum Virtualize Family driver: `Bug #1935670 `_: Fixed empty attribute values issue while " "updating volume metadata table for replicated volumes." msgstr "" "IBM Spectrum Virtualize Family driver: `Bug #1935670 `_: Fixed empty attribute values issue while " "updating volume metadata table for replicated volumes." msgid "" "IBM Spectrum Virtualize Family driver: `Bug #1938212 `_: Added replication license support for " "FlashSystem V5000E storage system. Removed support for IBM Storwize V3700 as " "it reached End Of Service." msgstr "" "IBM Spectrum Virtualize Family driver: `Bug #1938212 `_: Added replication license support for " "FlashSystem V5000E storage system. Removed support for IBM Storwize V3700 as " "it reached End Of Service." msgid "" "IBM Spectrum Virtualize Family driver: `Bug #1941694 `_: Fixed detaching volume from second instance for " "multi-attach type volumes." msgstr "" "IBM Spectrum Virtualize Family driver: `Bug #1941694 `_: Fixed detaching volume from second instance for " "multi-attach type volumes." msgid "" "IBM Spectrum Virtualize Family driver: `Bug #1961548 `_: Optimize lsvdisk and lssystem calls to reduce " "the computational time for creating GMCV volumes." msgstr "" "IBM Spectrum Virtualize Family driver: `Bug #1961548 `_: Optimize lsvdisk and lssystem calls to reduce " "the computational time for creating GMCV volumes." msgid "" "IBM Spectrum Virtualize Family driver: `Bug #1968159 `_: Fix for retype failure for replicated volume-" "type. Controlling chfcmap call for rc_controlled fcmap for replication-type " "volumes during retype operation." msgstr "" "IBM Spectrum Virtualize Family driver: `Bug #1968159 `_: Fix for retype failure for replicated volume-" "type. Controlling chfcmap call for rc_controlled fcmap for replication-type " "volumes during retype operation." msgid "" "IBM Spectrum Virtualize Family driver: `Bug #1976400 `_: Optimize svcinfo CLI calls to reduce the " "computational time for rc-relationship related operations." msgstr "" "IBM Spectrum Virtualise Family driver: `Bug #1976400 `_: Optimise svcinfo CLI calls to reduce the " "computational time for rc-relationship related operations." msgid "" "IBM Spectrum Virtualize Family driver: `Bug #1976499 `_: Setting correct SVC Code level for " "lsfcportsetmember call." msgstr "" "IBM Spectrum Virtualize Family driver: `Bug #1976499 `_: Setting correct SVC Code level for " "lsfcportsetmember call." msgid "" "IBM Spectrum Virtualize Family driver: `Bug #1982078 `_: Fixed the default portset value during driver " "initialization." msgstr "" "IBM Spectrum Virtualize Family driver: `Bug #1982078 `_: Fixed the default portset value during driver " "initialisation." msgid "" "IBM Spectrum Virtualize Family driver: `Bug #1985065 `_: Fixed to collect all the IP addresses for all " "the storage nodes given in lsip command response as volume of any iogrp " "should be available to the storage nodes in default scenario." msgstr "" "IBM Spectrum Virtualize Family driver: `Bug #1985065 `_: Fixed to collect all the IP addresses for all " "the storage nodes given in lsip command response as volume of any iogrp " "should be available to the storage nodes in default scenario." msgid "" "IBM Spectrum Virtualize Family: Added support for revert to snapshot for " "global-mirror volume." msgstr "" "IBM Spectrum Virtualize Family: Added support for revert to snapshot for " "global-mirror volume." msgid "" "IBM Spectrum Virtualize driver `Bug #1890254 `_: Fix check_vdisk_fc_mappings is not deleting all " "flashcopy mappings while deleting source volume, when multiple clones and " "snapshots are created using common source volume." msgstr "" "IBM Spectrum Virtualize driver `Bug #1890254 `_: Fix check_vdisk_fc_mappings is not deleting all " "flashcopy mappings while deleting source volume, when multiple clones and " "snapshots are created using common source volume." msgid "" "IBM Spectrum Virtualize family driver `Bug #1943682 `_: Updating rccg_name property to volume metadata " "for the resultant volumes of a clone_group from a source_group or a " "group_snapshot." msgstr "" "IBM Spectrum Virtualize family driver `Bug #1943682 `_: Updating rccg_name property to volume metadata " "for the resultant volumes of a clone_group from a source_group or a " "group_snapshot." msgid "" "IBM Spectrum Virtualize family driver `Bug #1953185 `_: Fixed revert to snapshot issue for replicated " "volumes which are a part of group." msgstr "" "IBM Spectrum Virtualize family driver `Bug #1953185 `_: Fixed revert to snapshot issue for replicated " "volumes which are a part of group." msgid "" "IBM Spectrum Virtualize family driver `Bug #1960314 `_: Fixed resize issue for GMCV volumes which are a " "part of a consistency group(CG)." msgstr "" "IBM Spectrum Virtualize family driver `Bug #1960314 `_: Fixed resize issue for GMCV volumes which are a " "part of a consistency group(CG)." msgid "" "IBM Spectrum Virtualize family driver `Bug #1960315 `_: Fixed delete and resize volume issues in during " "reverse replication and added support to extend the volume for failover " "scenarios." msgstr "" "IBM Spectrum Virtualize family driver `Bug #1960315 `_: Fixed delete and resize volume issues in during " "reverse replication and added support to extend the volume for failover " "scenarios." msgid "" "IBM Spectrum Virtualize family driver `Bug #1966639 `_: Fixed resize issue in reverse replication for " "the volumes which are a part of a consistency group(CG)." msgstr "" "IBM Spectrum Virtualize family driver `Bug #1966639 `_: Fixed resize issue in reverse replication for " "the volumes which are a part of a consistency group(CG)." msgid "" "IBM Spectrum Virtualize family driver `Bug #1978290 `_: Optimize lsmdiskgrp SSH calls in creation of " "replicated volumes to reduce the computational time." msgstr "" "IBM Spectrum Virtualize family driver `Bug #1978290 `_: Optimise lsmdiskgrp SSH calls in creation of " "replicated volumes to reduce the computational time." msgid "" "IBM Spectrum Virtualize: Added support to enable creating a group from " "source, when source is a replicated group or consistency group snapshot of a " "replicated group." msgstr "" "IBM Spectrum Virtualize: Added support to enable creating a group from " "source, when source is a replicated group or consistency group snapshot of a " "replicated group." msgid "" "IBM Spectrum Virtualize: Adds support for retype operation on global mirror " "volumes." msgstr "" "IBM Spectrum Virtualize: Adds support for retype operation on global mirror " "volumes." msgid "" "IBM Storwize drivers had been previously marked unsupported. Testing " "requirements have been addressed and they are now fully supported again. IBM " "Storwize drivers allow cinder to manage volumes both in iSCSI and FC " "environment." msgstr "" "IBM Storwize drivers had been previously marked unsupported. Testing " "requirements have been addressed and they are now fully supported again. IBM " "Storwize drivers allow cinder to manage volumes both in iSCSI and FC " "environment." msgid "" "IET iSCSI target removed. IET iSCSI target was deprecated in the V release." msgstr "" "IET iSCSI target removed. IET iSCSI target was deprecated in the V release." msgid "" "INFINIDAT volume driver now requires the 'infinisdk' python module to be " "installed." msgstr "" "INFINIDAT volume driver now requires the 'infinisdk' Python module to be " "installed." msgid "IQN identification is now case-insensitive when using LIO." msgstr "IQN identification is now case-insensitive when using LIO." msgid "" "If RBD stats collection is taking too long in your environment maybe even " "leading to the service appearing as down you'll want to use the " "`rbd_exclusive_cinder_pool = true` configuration option if you are using the " "pool exclusively for Cinder and maybe even if you are not and can live with " "the innacuracy." msgstr "" "If RBD stats collection is taking too long in your environment maybe even " "leading to the service appearing as down you'll want to use the " "`rbd_exclusive_cinder_pool = true` configuration option if you are using the " "pool exclusively for Cinder and maybe even if you are not and can live with " "the inaccuracy." msgid "" "If ``fujitsu_use_cli_copy`` is set to ``False``, create a snapshot using the " "conventional SMI-S method." msgstr "" "If ``fujitsu_use_cli_copy`` is set to ``False``, create a snapshot using the " "conventional SMI-S method." msgid "" "If ``fujitsu_use_cli_copy`` is set to ``True``, create a snapshot using the " "CLI method, allowing volume extension of the source volume." msgstr "" "If ``fujitsu_use_cli_copy`` is set to ``True``, create a snapshot using the " "CLI method, allowing volume extension of the source volume." msgid "" "If ``image_service:store_id`` is not set in the extra-specs for a volume-" "type, then any volume of that type uploaded as an image will be uploaded to " "the default store in Glance." msgstr "" "If ``image_service:store_id`` is not set in the extra-specs for a volume-" "type, then any volume of that type uploaded as an image will be uploaded to " "the default store in Glance." msgid "" "If a ``volume_type`` is not specified when a volume is created, Cinder tries " "to infer the volume type from other information in the volume-create request:" msgstr "" "If a ``volume_type`` is not specified when a volume is created, Cinder tries " "to infer the volume type from other information in the volume-create request:" msgid "" "If a volume with snapshots has been extended, causing a mismatch in size " "between the origin volume and the snapshot, reverting will be guarded by " "cinder-api." msgstr "" "If a volume with snapshots has been extended, causing a mismatch in size " "between the origin volume and the snapshot, reverting will be guarded by " "Cinder-API." msgid "" "If all of the above apply to you, your upgrade path from Train to Ussuri is " "slightly more complicated than usual and may require some actions in your " "Train deployment *before* you upgrade. Please pick the least inconvenient " "of the following options:" msgstr "" "If all of the above apply to you, your upgrade path from Train to Ussuri is " "slightly more complicated than usual and may require some actions in your " "Train deployment *before* you upgrade. Please pick the least inconvenient " "of the following options:" msgid "" "If all the above three items apply to you, as part of your upgrade to cinder " "15.4.0 you should re-run the online database migrations contained in this " "release. This will prepare your cinder database for an eventual upgrade to " "the Ussuri release." msgstr "" "If all the above three items apply to you, as part of your upgrade to cinder " "15.4.0 you should re-run the online database migrations contained in this " "release. This will prepare your Cinder database for an eventual upgrade to " "the Ussuri release." msgid "" "If device attachment failed it could leave the volume partially attached. " "Cinder now tries to clean up on failure." msgstr "" "If device attachment failed it could leave the volume partially attached. " "Cinder now tries to clean up on failure." msgid "" "If during a *live* upgrade from Liberty a backup service will be killed " "while processing a restore request it may happen that such backup status " "won't be automatically cleaned up on the service restart. Such orphaned " "backups need to be cleaned up manually." msgstr "" "If during a *live* upgrade from Liberty a backup service will be killed " "while processing a restore request it may happen that such backup status " "won't be automatically cleaned up on the service restart. Such orphaned " "backups need to be cleaned up manually." msgid "" "If necessary, you can create a new ``__DEFAULT__`` volume type as follows " "using the Block Storage API, or by using the python-cinderclient or python-" "openstackclient to do the equivalent:" msgstr "" "If necessary, you can create a new ``__DEFAULT__`` volume type as follows " "using the Block Storage API, or by using the python-cinderclient or python-" "openstackclient to do the equivalent:" msgid "" "If policy for update volume metadata is modified in a desired way it's " "needed to add a desired rule for create volume metadata." msgstr "" "If policy for update volume metadata is modified in a desired way it's " "needed to add a desired rule for create volume metadata." msgid "" "If the ``default_volume_type`` is misconfigured (that is, if the value " "refers to a non-existent volume-type), requests that rely on the default " "volume-type (for example, a volume-create request that does not specify a " "volume-type) will result in a HTTP 500 response." msgstr "" "If the ``default_volume_type`` is misconfigured (that is, if the value " "refers to a non-existent volume-type), requests that rely on the default " "volume-type (for example, a volume-create request that does not specify a " "volume-type) will result in a HTTP 500 response." msgid "" "If the length of the name after variable replacement exceeds the maximum " "length of host group (iSCSI target) names, the host name is truncated so " "that the length of the host groups or iSCSI targets do not exceed the " "maximum length." msgstr "" "If the length of the name after variable replacement exceeds the maximum " "length of host group (iSCSI target) names, the hostname is truncated so that " "the length of the host groups or iSCSI targets does not exceed the maximum " "length." msgid "If the specified value includes ``{host}``, the following rules apply:" msgstr "If the specified value includes ``{host}``, the following rules apply:" msgid "" "If using the NetApp ONTAP drivers (7mode/cmode), the configuration value for " "\"max_over_subscription_ratio\" may need to be increased to avoid scheduling " "problems where storage pools that previously were valid to schedule new " "volumes suddenly appear to be out of space to the Cinder scheduler. See " "documentation `here `_." msgstr "" "If using the NetApp ONTAP drivers (7mode/cmode), the configuration value for " "\"max_over_subscription_ratio\" may need to be increased to avoid scheduling " "problems where storage pools that previously were valid to schedule new " "volumes suddenly appear to be out of space to the Cinder scheduler. See " "documentation `here `_." msgid "" "If using the key manager, the configuration details should be updated to " "reflect the Castellan-specific configuration options." msgstr "" "If using the key manager, the configuration details should be updated to " "reflect the Castellan-specific configuration options." msgid "" "If you *have not* modified ``periodic_interval``, you should see no " "differences from current behavior." msgstr "" "If you *have not* modified ``periodic_interval``, you should see no " "differences from current behaviour." msgid "" "If you *have* modified ``periodic_interval``, please review the new options " "to determine which one(s) should be adjusted. Also, you should consider " "setting ``periodic_interval`` back to its default value of 60." msgstr "" "If you *have* modified ``periodic_interval``, please review the new options " "to determine which one(s) should be adjusted. Also, you should consider " "setting ``periodic_interval`` back to its default value of 60." msgid "" "If you are upgrading a Stein installation directly to this release (cinder " "15.4.0) or later, this notice does *not* apply to you." msgstr "" "If you are upgrading a Stein installation directly to this release (cinder " "15.4.0) or later, this notice does *not* apply to you." msgid "" "If you are upgrading to this release from an earlier release in the Train " "series (that is, you are upgrading from cinder>=15.0.0,<=15.3.0), under " "specific circumstances you should re-run the online database migrations so " "that your database will be in the correct state when you eventually upgrade " "to a Ussuri release. See the \"Upgrade Notes\" for more information." msgstr "" "If you are upgrading to this release from an earlier release in the Train " "series (that is, you are upgrading from Cinder>=15.0.0,<=15.3.0), under " "specific circumstances you should re-run the online database migrations so " "that your database will be in the correct state when you eventually upgrade " "to a Ussuri release. See the \"Upgrade Notes\" for more information." msgid "" "If you have ``backup_driver_status_check_interval`` option in your cinder." "conf we recommend you to use ``backup_driver_stats_polling_interval`` to " "avoid deprecation warnings in logs." msgstr "" "If you have ``backup_driver_status_check_interval`` option in your cinder." "conf we recommend you to use ``backup_driver_stats_polling_interval`` to " "avoid deprecation warnings in logs." msgid "" "If you have a custom value for this policy in your cinder policy " "configuration file, this change to the default value will not affect you." msgstr "" "If you have a custom value for this policy in your Cinder policy " "configuration file, this change to the default value will not affect you." msgid "" "If you have been aware of this regression and like the current (incorrect) " "behavior, you may add the following line to your cinder policy configuration " "file to restore that behavior::" msgstr "" "If you have been aware of this regression and like the current (incorrect) " "behaviour, you may add the following line to your Cinder policy " "configuration file to restore that behaviour::" msgid "" "If you have renamed (or renamed and deleted) the ``__DEFAULT__`` volume type " "in Train, you must re-create it **in your Train deployment** before " "upgrading to Ussuri. This will ensure that the ``__DEFAULT__`` volume type " "will be present in the database when you run the Ussuri online database " "migrations." msgstr "" "If you have renamed (or renamed and deleted) the ``__DEFAULT__`` volume type " "in Train, you must re-create it **in your Train deployment** before " "upgrading to Ussuri. This will ensure that the ``__DEFAULT__`` volume type " "will be present in the database when you run the Ussuri online database " "migrations." msgid "" "If you specify this parameter, it is recommended that you specify ``True`` " "for the ``hitachi_group_create`` parameter to collect necessary information " "automatically." msgstr "" "If you specify this parameter, it is recommended that you specify ``True`` " "for the ``hitachi_group_create`` parameter to collect necessary information " "automatically." msgid "" "If your deployment uses ``storage_protocol`` to differentiate between " "backends that use the same protocol but report it using different variants, " "be aware that they will no longer be differentiated." msgstr "" "If your deployment uses ``storage_protocol`` to differentiate between " "backends that use the same protocol but report it using different variants, " "be aware that they will no longer be differentiated." msgid "" "Images in the qcow2 format with an external data file are now rejected with " "an ``ImageUnacceptable`` error because such images could be used in an " "exploit to expose host information. Given that qcow2 external data files " "were never supported by Cinder, this change should have no impact on users. " "See `Bug #2059809 `_ for " "details." msgstr "" "Images in the qcow2 format with an external data file are now rejected with " "an ``ImageUnacceptable`` error because such images could be used in an " "exploit to expose host information. Given that qcow2 external data files " "were never supported by Cinder, this change should have no impact on users. " "See `Bug #2059809 `_ for " "details." msgid "Impacted operations:" msgstr "Impacted operations:" msgid "" "In IBM Storwize_SVC driver, user could specify only one IO group per backend " "definition. The user now may specify a comma separated list of IO groups, " "and at the time of creating the volume, the driver will select an IO group " "which has the least number of volumes associated with it. The change is " "backward compatible, meaning single value is still supported." msgstr "" "In IBM Storwize_SVC driver, user could specify only one IO group per backend " "definition. The user now may specify a comma separated list of IO groups, " "and at the time of creating the volume, the driver will select an IO group " "which has the least number of volumes associated with it. The change is " "backward compatible, meaning single value is still supported." msgid "" "In NEC driver, the deprecated configuration parameter " "`ldset_controller_node_name` was deleted." msgstr "" "In NEC driver, the deprecated configuration parameter " "`ldset_controller_node_name` was deleted." msgid "" "In NEC driver, the number of volumes in a storage pool is no longer limited " "to 1024. More volumes can be created with storage firmware revision 1015 or " "later." msgstr "" "In the NEC driver, the number of volumes in a storage pool is no longer " "limited to 1024. More volumes can be created with storage firmware revision " "1015 or later." msgid "" "In VNX Cinder driver, ``replication_device`` keys, ``backend_id`` and " "``san_ip`` are mandatory now. If you prefer security file authentication, " "please append ``storage_vnx_security_file_dir`` in ``replication_device``, " "otherwise, append ``san_login``, ``san_password``, " "``storage_vnx_authentication_type`` in ``replication_device``." msgstr "" "In VNX Cinder driver, ``replication_device`` keys, ``backend_id`` and " "``san_ip`` are mandatory now. If you prefer security file authentication, " "please append ``storage_vnx_security_file_dir`` in ``replication_device``, " "otherwise, append ``san_login``, ``san_password``, " "``storage_vnx_authentication_type`` in ``replication_device``." msgid "" "In certain environments (Kubernetes for example) indirect calls to the LVM " "commands result in file descriptor leak warning messages which in turn cause " "the process_execution method to raise and exception." msgstr "" "In certain environments (Kubernetes for example) indirect calls to the LVM " "commands result in file descriptor leak warning messages which in turn cause " "the process_execution method to raise and exception." msgid "" "In light of the fix for RBD driver `bug #1941815 `_, we want to bring the following information to your " "attention." msgstr "" "In light of the fix for RBD driver `bug #1941815 `_, we want to bring the following information to your " "attention." msgid "" "In order to provide operators with a choice of FCZM drivers, the Cinder " "community decided to continue supporting the Brocade FCZM driver on a best-" "effort basis. See the \"Bug Fixes\" section of these notes for changes made " "to allow the driver to run in a Python 3 environment." msgstr "" "In order to provide operators with a choice of FCZM drivers, the Cinder " "community decided to continue supporting the Brocade FCZM driver on a best-" "effort basis. See the \"Bug Fixes\" section of these notes for changes made " "to allow the driver to run in a Python 3 environment." msgid "" "In order to provide operators with a choice of Fibre Channel Zone Manager " "drivers, the Cinder community has decided to continue supporting the Brocade " "FCZM driver, which was marked 'unsupported' in the Ussuri release, on a best-" "effort basis." msgstr "" "In order to provide operators with a choice of Fibre Channel Zone Manager " "drivers, the Cinder community has decided to continue supporting the Brocade " "FCZM driver, which was marked 'unsupported' in the Ussuri release, on a best-" "effort basis." msgid "" "In order to simplify initial setup for new installations the default " "behaviour of the Quobyte driver for the options " "``nas_secure_file_operations`` and ``nas_secure_file_permissions`` has " "changed. The 'auto' values are no longer mapped to true but to false. " "Therefore the old default behaviour to run with secure settings is changed " "to run without secure settings as the new default behaviour. Installations " "using the default values for these options should ensure to explicitly set " "them to true with this new Cinder Quobyte driver version." msgstr "" "In order to simplify initial setup for new installations the default " "behaviour of the Quobyte driver for the options " "``nas_secure_file_operations`` and ``nas_secure_file_permissions`` has " "changed. The 'auto' values are no longer mapped to true but to false. " "Therefore the old default behaviour to run with secure settings is changed " "to run without secure settings as the new default behaviour. Installations " "using the default values for these options should ensure to explicitly set " "them to true with this new Cinder Quobyte driver version." msgid "" "In order to use an unsupported driver, ``enable_unsupported_driver`` must be " "set to ``True`` in the driver's section in the cinder.conf file. If you are " "the consumer of such a driver, we encourage you to contact the vendor to " "make them aware of your concerns." msgstr "" "In order to use an unsupported driver, ``enable_unsupported_driver`` must be " "set to ``True`` in the driver's section in the cinder.conf file. If you are " "the consumer of such a driver, we encourage you to contact the vendor to " "make them aware of your concerns." msgid "" "In particular, we direct your attention to the default values for the " "policies associated with the Default Volume Types API (introduced with " "microversion 3.62 of the Block Storage API). These had experimentally " "recognized \"scope\", but for consistency with the other rules, their " "default values no longer recognize scope. (Scope will be introduced to all " "cinder policy defaults in the Yoga release.)" msgstr "" "In particular, we direct your attention to the default values for the " "policies associated with the Default Volume Types API (introduced with " "microversion 3.62 of the Block Storage API). These had experimentally " "recognised \"scope\", but for consistency with the other rules, their " "default values no longer recognise scope. (Scope will be introduced to all " "cinder policy defaults in the Yoga release.)" msgid "" "In the Image service (Glance), the ``compressed`` container format " "identifier does not indicate a particular compression technology; it is up " "to the image consumer to determine what compression has been used, and there " "is no requirement that OpenStack services must support arbitrary compression " "technologies. For the upload and download of compressed images, Cinder " "supports *only* the gzip format." msgstr "" "In the Image service (Glance), the ``compressed`` container format " "identifier does not indicate a particular compression technology; it is up " "to the image consumer to determine what compression has been used, and there " "is no requirement that OpenStack services must support arbitrary compression " "technologies. For the upload and download of compressed images, Cinder " "supports *only* the gzip format." msgid "" "In the latter two cases, an end user can determine what happened by using " "the `Messages API `_, which can be accessed using the `cinderclient " "`_ or `openstackclient `_." msgstr "" "In the latter two cases, an end user can determine what happened by using " "the `Messages API `_, which can be accessed using the `cinderclient " "`_ or `openstackclient `_." msgid "" "In the length calculation, use the following values as the length of each " "variable:" msgstr "" "In the length calculation, use the following values as the length of each " "variable:" msgid "" "In this release, sending ``os-reset_status`` notifications to the following " "*nonstandard* publisher_ids is DEPRECATED:" msgstr "" "In this release, sending ``os-reset_status`` notifications to the following " "*nonstandard* publisher_ids is DEPRECATED:" msgid "" "Inclusion of a project_id in API URLs is now optional. The `Block Storage " "API V3 `_ reference " "guide continues to show URLs with a project_id because the legacy behavior " "continues to be supported." msgstr "" "The inclusion of a project_id in API URLs is now optional. The `Block " "Storage API V3 `_ " "reference guide continues to show URLs with a project_id because the legacy " "behaviour continues to be supported." msgid "" "Infinidat Driver `bug #1981354 `_: Fixed Infinidat driver to return all configured and enabled " "iSCSI portals for a given network space." msgstr "" "Infinidat Driver `bug #1981354 `_: Fixed Infinidat driver to return all configured and enabled " "iSCSI portals for a given network space." msgid "" "Infinidat Driver `bug #1981982 `_: Fixed Infinidat driver to use TLS/SSL communication between " "the Cinder volume service and the storage backend. Admin can set `True` or " "`False` for the `driver_use_ssl` and `suppress_requests_ssl_warnings` " "options in the driver section of cinder.conf to enable or disable these " "features." msgstr "" "Infinidat Driver `bug #1981982 `_: Fixed Infinidat driver to use TLS/SSL communication between " "the Cinder volume service and the storage backend. Admin can set `True` or " "`False` for the `driver_use_ssl` and `suppress_requests_ssl_warnings` " "options in the driver section of cinder.conf to enable or disable these " "features." msgid "" "Infinidat Driver `bug #1982350 `_: Fixed Infinidat driver multi-attach feature. Added a check " "if there are multiple attachments to the volume from the same connector and " "terminate connection only for the last attachment from the corresponding " "host." msgstr "" "Infinidat Driver `bug #1982350 `_: Fixed Infinidat driver multi-attach feature. Added a check " "if there are multiple attachments to the volume from the same connector and " "terminate connection only for the last attachment from the corresponding " "host." msgid "" "Infinidat Driver `bug #1982405 `_: Fixed Infinidat driver to allow generic volume migration " "between two storage pools within the same cluster." msgstr "" "Infinidat Driver `bug #1982405 `_: Fixed Infinidat driver to allow generic volume migration " "between two storage pools within the same cluster." msgid "" "Infinidat Driver `bug #1983287 `_: Fixed Infinidat driver to allow backup of an attached " "volume." msgstr "" "Infinidat Driver `bug #1983287 `_: Fixed Infinidat driver to allow backup of an attached " "volume." msgid "" "Infinidat Driver `bug #1984000 `_: Fixed Infinidat driver to take into account the group " "identifier property when creating a new volume and add the volume to the " "consistency group." msgstr "" "Infinidat Driver `bug #1984000 `_: Fixed Infinidat driver to take into account the group " "identifier property when creating a new volume and add the volume to the " "consistency group." msgid "" "Infinidat driver `bug #2017815 `_: Fixed Infinidat driver to inherit compression setting by " "default for all newly created volumes. Admin can set ``True`` or ``False`` " "for the ``infinidat_use_compression`` option in the driver section of " "``cinder.conf`` to explicitly enable or disable compression setting for all " "newly created volumes. Or leave this option unset (commented out) for all " "created volumes to inherit their compression setting from their parent pool " "at creation time. The default value is unset." msgstr "" "Infinidat driver `bug #2017815 `_: Fixed Infinidat driver to inherit compression setting by " "default for all newly created volumes. Admin can set ``True`` or ``False`` " "for the ``infinidat_use_compression`` option in the driver section of " "``cinder.conf`` to explicitly enable or disable compression setting for all " "newly created volumes. Or leave this option unset (commented out) for all " "created volumes to inherit their compression setting from their parent pool " "at creation time. The default value is unset." msgid "Infinidat driver: Added support for revert to snapshot operation." msgstr "Infinidat driver: Added support for revert to snapshot operation." msgid "" "Infinidat driver: Added support to manage and unmanage volumes and " "snapshots. Also added the functionality to list the manageable volumes and " "snapshots." msgstr "" "Infinidat driver: Added support to manage and unmanaged volumes and " "snapshots. Also added the functionality to list the manageable volumes and " "snapshots." msgid "" "Infinidat driver: support has been removed for pre-v3.0 InfiniBox systems. " "These versions are end of life and have not been supported for a long time." msgstr "" "Infinidat driver: support has been removed for pre-v3.0 InfiniBox systems. " "These versions are end of life and have not been supported for a long time." msgid "" "Infinidat: Added support for storage assisted volume migration within a same " "InfiniBox host (iSCSI and FC)." msgstr "" "Infinidat: Added support for storage assisted volume migration within a same " "InfiniBox host (iSCSI and FC)." msgid "Infortrend" msgstr "Infortrend" msgid "" "Instead of ``api_class`` option ``cinder.keymgr.barbican." "BarbicanKeyManager``, use ``backend`` option `barbican``" msgstr "" "Instead of ``api_class`` option ``cinder.keymgr.barbican." "BarbicanKeyManager``, use ``backend`` option `barbican``" msgid "" "Instead of using osapi_volume_base_url use public_endpoint. Both do the same " "thing." msgstr "" "Instead of using osapi_volume_base_url use public_endpoint. Both do the same " "thing." msgid "" "IntOpt ``datera_num_replicas`` is changed to a volume type extra spec " "option-- ``DF:replica_count``" msgstr "" "IntOpt ``datera_num_replicas`` is changed to a volume type extra spec " "option-- ``DF:replica_count``" msgid "" "Introduced generic volume groups and added create/ delete/update/list/show " "APIs for groups." msgstr "" "Introduced generic volume groups and added create/ delete/update/list/show " "APIs for groups." msgid "" "Introduced replication group support and added group action APIs " "enable_replication, disable_replication, failover_replication and " "list_replication_targets." msgstr "" "Introduced replication group support and added group action APIs " "enable_replication, disable_replication, failover_replication and " "list_replication_targets." msgid "" "Introduces microversion (MV) 3.63, which includes volume type ID in the " "volume details JSON response. This MV affects the volume detail list (``GET /" "v3/{project_id}/volumes/detail``), and volume-show (``GET /v3/{project_id}/" "volumes/{volume_id}``) calls." msgstr "" "Introduces microversion (MV) 3.63, which includes volume type ID in the " "volume details JSON response. This MV affects the volume detail list (``GET /" "v3/{project_id}/volumes/detail``), and volume-show (``GET /v3/{project_id}/" "volumes/{volume_id}``) calls." msgid "" "It is currently possible to manage a volume to an encrypted volume type, but " "that is not recommended because there is no way to supply an encryption key " "for the volume to cinder. Un-managing a volume of an encrypted volume type " "is already prevented, and it is expected that management to an encrypted " "type will similarly be blocked in a future release. This issue is being " "tracked as `Bug #1944577 `_." msgstr "" "It is currently possible to manage a volume to an encrypted volume type, but " "that is not recommended because there is no way to supply an encryption key " "for the volume to Cinder. Un-managing a volume of an encrypted volume type " "is already prevented, and it is expected that management of an encrypted " "type will similarly be blocked in a future release. This issue is being " "tracked as `Bug #1944577 `_." msgid "" "It is faster to create a new volume from a snapshot. You may wish to " "recommend this option to your users whose use cases do not strictly require " "revert-to-snapshot." msgstr "" "It is faster to create a new volume from a snapshot. You may wish to " "recommend this option to your users whose use cases do not strictly require " "revert-to-snapshot." msgid "" "It is no longer possible to specify an sqlalchemy-migrate-based version. " "When the ``cinder-manage db sync`` command is run, all remaining sqlalchemy-" "migrate-based migrations will be automatically applied. Attempting to " "specify an sqlalchemy-migrate-based version will result in an error." msgstr "" "It is no longer possible to specify an sqlalchemy-migrate-based version. " "When the ``cinder-manage db sync`` command is run, all remaining sqlalchemy-" "migrate-based migrations will be automatically applied. Attempting to " "specify an sqlalchemy-migrate-based version will result in an error." msgid "" "It is now possible to delete a volume and its snapshots by passing an " "additional argument to volume delete, \"cascade=True\"." msgstr "" "It is now possible to delete a volume and its snapshots by passing an " "additional argument to volume delete, \"cascade=True\"." msgid "" "It is required to copy new rootwrap.d/volume.filters file into /etc/cinder/" "rootwrap.d directory." msgstr "" "It is required to copy new rootwrap.d/volume.filters file into /etc/cinder/" "rootwrap.d directory." msgid "" "It was possible under certain circumstances for the host name of an instance " "to be leaked in the volume detail response. This has been fixed in the " "current release. The ``host_name`` field in any object in the " "``attachments`` array of the volume detail response is populated only when " "the call is made in an administrative context. Otherwise, its value is the " "JSON ``null`` value." msgstr "" "It was possible under certain circumstances for the host name of an instance " "to be leaked in the volume detail response. This has been fixed in the " "current release. The ``host_name`` field in any object in the " "``attachments`` array of the volume detail response is populated only when " "the call is made in an administrative context. Otherwise, its value is the " "JSON ``null`` value." msgid "" "JovianDSS driver: `Bug #1941746 `_: Fixed Fix ensure_export function failure in case of partial " "target recovery." msgstr "" "JovianDSS driver: `Bug #1941746 `_: Fixed Fix ensure_export function failure in case of partial " "target recovery." msgid "" "Just before release, `Bug #1965847 `_ was reported. When importing a backup record for a backup_id " "that currently exists, the import fails as expected. However, this " "operation has the unfortunate side effect that the existing backup record is " "deleted. Initial analysis of the bug indicates a small, isolated solution " "that should be backportable to stable branches." msgstr "" "Just before release, `Bug #1965847 `_ was reported. When importing a backup record for a backup_id " "that currently exists, the import fails as expected. However, this " "operation has the unfortunate side effect that the existing backup record is " "deleted. Initial analysis of the bug indicates a small, isolated solution " "that should be back portable to stable branches." msgid "" "Kaminario FC and iSCSI drivers: Fixed `bug 1829398 `_ where force detach would fail." msgstr "" "Kaminario FC and iSCSI drivers: Fixed `bug 1829398 `_ where force detach would fail." msgid "" "Kaminario K2 iSCSI driver now supports non discovery multipathing (Nova and " "Cinder won't use iSCSI sendtargets) which can be enabled by setting " "`disable_discovery` to `true` in the configuration." msgstr "" "Kaminario K2 iSCSI driver now supports non discovery multipathing (Nova and " "Cinder won't use iSCSI sendtargets) which can be enabled by setting " "`disable_discovery` to `true` in the configuration." msgid "" "Kaminario K2 now supports networks with duplicated FQDNs via configuration " "option `unique_fqdn_network` so attaching in these networks will work (bug " "#1720147)." msgstr "" "Kaminario K2 now supports networks with duplicated FQDNs via configuration " "option `unique_fqdn_network` so attaching in these networks will work (bug " "#1720147)." msgid "" "Kaminario driver `bug #1951981 `_: Fixed create volume from volume or snapshot not using " "multipath configuration." msgstr "" "Kaminario driver `bug #1951981 `_: Fixed create volume from volume or snapshot not using " "multipath configuration." msgid "" "Key migration is initiated on service startup, and entries in the cinder-" "volume log will indicate the migration status. Log entries will indicate " "when a volume's encryption key ID has been migrated to Barbican, and a " "summary log message will indicate when key migration has finished." msgstr "" "Key migration is initiated on service start-up, and entries in the cinder-" "volume log will indicate the migration status. Log entries will indicate " "when a volume's encryption key ID has been migrated to Barbican, and a " "summary log message will indicate when key migration has finished." msgid "Known Issues" msgstr "Known Issues" msgid "" "LUKS Encrypted RBD volumes can now be created by cinder-volume. This " "capability was previously blocked by the rbd volume driver due to the lack " "of any encryptors capable of attaching to an encrypted RBD volume. These " "volumes can also be seeded with RAW image data from Glance through the use " "of QEMU 2.10 and the qemu-img convert command." msgstr "" "LUKS Encrypted RBD volumes can now be created by cinder-volume. This " "capability was previously blocked by the rbd volume driver due to the lack " "of any encryptors capable of attaching to an encrypted RBD volume. These " "volumes can also be seeded with RAW image data from Glance through the use " "of QEMU 2.10 and the qemu-img convert command." msgid "" "LVM driver `bug #1901783 `_: " "Fix unexpected delete volume failure due to unexpected exit code 139 on " "``lvs`` command call." msgstr "" "LVM driver `bug #1901783 `_: " "Fix unexpected delete volume failure due to unexpected exit code 139 on " "``lvs`` command call." msgid "" "LVM driver: Added support for the NVMe TCP transport protocol. Configuration " "option is ``target_protocol = nvmet_tcp`` when using ``nvmet`` as the " "``target_helper``." msgstr "" "LVM driver: Added support for the NVMe TCP transport protocol. Configuration " "option is ``target_protocol = nvmet_tcp`` when using ``nvmet`` as the " "``target_helper``." msgid "" "LVM iSCSI driver fix for IPv6 addresses for the different targets, IET, LIO, " "TGT, CXT, and SCST." msgstr "" "LVM iSCSI driver fix for IPv6 addresses for the different targets, IET, LIO, " "TGT, CXT, and SCST." msgid "" "LVM nvmet target `bug #1964391 `_: Fixed temporary disconnection of all volumes from all hosts " "when creating and removing volume exports." msgstr "" "LVM nvmet target `bug #1964391 `_: Fixed temporary disconnection of all volumes from all hosts " "when creating and removing volume exports." msgid "" "LVM nvmet target `bug #1964394 `_: Fixed annoying kernel log message when exporting a volume." msgstr "" "LVM nvmet target `bug #1964394 `_: Fixed annoying kernel log message when exporting a volume." msgid "" "LVM nvmet target: Added support for new nvmeof connection properties format " "(version 2). Controlled with ``nvmeof_conn_info_version`` configuration " "option." msgstr "" "LVM nvmet target: Added support for new nvmeof connection properties format " "(version 2). Controlled with ``nvmeof_conn_info_version`` configuration " "option." msgid "" "Lenovo driver: Return additional configuration options from " "``get_driver_options`` call" msgstr "" "Lenovo driver: Return additional configuration options from " "``get_driver_options`` call" msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "" "Lightbits LightOS driver: new Cinder driver for Lightbits(TM) LightOS(R). " "Lightbits Labs (http://www.lightbitslabs.com) LightOS is software-defined, " "cloud native, high-performance, clustered scale-out and redundant NVMe/TCP " "storage that performs like local NVMe flash." msgstr "" "Lightbits LightOS driver: new Cinder driver for Lightbits(TM) LightOS(R). " "Lightbits Labs (http://www.lightbitslabs.com) LightOS is software-defined, " "cloud-native, high-performance, clustered scale-out and redundant NVMe/TCP " "storage that performs like local NVMe flash." msgid "" "Lightbits driver: Added a new configuration option ``lightos_use_ipacl``, " "defaulting to true. When set to true, the Cinder driver will restrict access " "to each volume to the IP addresses of the host machine that the volume is " "attached to." msgstr "" "Lightbits driver: Added a new configuration option ``lightos_use_ipacl``, " "defaulting to true. When set to true, the Cinder driver will restrict access " "to each volume to the IP addresses of the host machine that the volume is " "attached to." msgid "" "Lightbits driver: Added support to create multiple snapshots from the same " "volume simultaneously when using the Lightbits cinder driver. Under certain " "conditions, older releases of the Lightbits api-service will return various " "status codes (including HTTP status codes 500 and 503) that could indicate " "transient failures. Added retry logic on such errors becuase there's a good " "chance that the error is transient and subsequent calls will succeed." msgstr "" "Lightbits driver: Added support to create multiple snapshots from the same " "volume simultaneously when using the Lightbits Cinder driver. Under certain " "conditions, older releases of the Lightbits API service will return various " "status codes (including HTTP status codes 500 and 503) that could indicate " "transient failures. Added retry logic on such errors because there's a good " "chance that the error is transient and subsequent calls will succeed." msgid "List CG Snapshots checks both the CG and the groups tables." msgstr "List CG Snapshots checks both the CG and the groups tables." msgid "List CG checks both CG and groups tables." msgstr "List CG checks both CG and groups tables." msgid "" "Locks may use Tooz as abstraction layer now, to support distributed lock " "managers and prepare Cinder to better support HA configurations." msgstr "" "Locks may use Tooz as abstraction layer now, to support distributed lock " "managers and prepare Cinder to better support HA configurations." msgid "Log VMAX specific metadata of a volume if debug is enabled." msgstr "Log VMAX specific metadata of a volume if debug is enabled." msgid "" "Log a warning from the volume service when a volume driver's " "get_volume_stats() call takes a long time to return. This can help " "deployers troubleshoot a cinder-volume service misbehaving due to a driver/" "backend performance issue." msgstr "" "Log a warning from the volume service when a volume driver's " "get_volume_stats() call takes a long time to return. This can help " "deployers troubleshoot a cinder-volume service misbehaving due to a driver/" "backend performance issue." msgid "" "Logging path can now be configured for vzstorage driver in shares config " "file (specified by vzstorage_shares_config option). To set custom logging " "path add `'-l', ''` to mount options array. Otherwise " "default logging path `/var/log/vstorage//cinder.log.gz` will " "be used." msgstr "" "Logging path can now be configured for vzstorage driver in shares config " "file (specified by vzstorage_shares_config option). To set custom logging " "path add `'-l', ''` to mount options array. Otherwise " "default logging path `/var/log/vstorage//cinder.log.gz` will " "be used." msgid "" "Make Cinder scheduler check if backend reports `online_extend_support` " "before performing an online extend operation." msgstr "" "Make Cinder scheduler check if backend reports `online_extend_support` " "before performing an online extend operation." msgid "" "Manage and unmanage support has been added to the Nimble backend driver." msgstr "" "Manage and unmanage support has been added to the Nimble backend driver." msgid "Many backend storage drivers have added features and fixed bugs." msgstr "Many backend storage drivers have added features and fixed bugs." msgid "" "Many policies had their default values changed and their previous values " "deprecated. These are indicated in the sample policy configuration file, " "which you can view in the `policy.yaml `_ section of the " "`Cinder Service Configuration Guide`." msgstr "" "Many policies had their default values changed and their previous values " "deprecated. These are indicated in the sample policy configuration file, " "which you can view in the `policy.yaml `_ section of the " "`Cinder Service Configuration Guide`." msgid "" "Marked the ITRI DISCO driver option ``disco_wsdl_path`` as deprecated. The " "new preferred protocol for array communication is REST and SOAP support will " "be removed." msgstr "" "Marked the ITRI DISCO driver option ``disco_wsdl_path`` as deprecated. The " "new preferred protocol for array communication is REST and SOAP support will " "be removed." msgid "" "Meanwhile, since creating volumes on ThinProvisioning Pool does not " "encounter the above restrictions, parameter ``fragment_capacity_mb`` will " "not be added into the information, and remove the ``total_volumes`` " "parameter from the backend pool information when the type of backend pool is " "ThinProvisioning Pool." msgstr "" "Meanwhile, since creating volumes on ThinProvisioning Pool does not " "encounter the above restrictions, parameter ``fragment_capacity_mb`` will " "not be added into the information, and remove the ``total_volumes`` " "parameter from the backend pool information when the type of backend pool is " "ThinProvisioning Pool." msgid "" "Microversion 3.61 adds the ``cluster_name`` attribute to the volume detail " "response when called in an administrative context." msgstr "" "Microversion 3.61 adds the ``cluster_name`` attribute to the volume detail " "response when called in an administrative context." msgid "" "Microversion 3.62 adds API calls to manage the default volume type for a " "specific project. See the `Default volume types (default-types) `_ section of the `Block Storage API v3 Reference `_ for more information." msgstr "" "Microversion 3.62 adds API calls to manage the default volume type for a " "specific project. See the `Default volume types (default-types) `_ section of the `Block Storage API v3 Reference `_ for more information." msgid "" "Microversion 3.65 includes the display of information in the volume or " "snapshot detail response to indicate whether that resource consumes quota, " "and adds the ability to filter a requested list of resources according to " "whether they consume quota or not." msgstr "" "Microversion 3.65 includes the display of information in the volume or " "snapshot detail response to indicate whether that resource consumes quota, " "and adds the ability to filter a requested list of resources according to " "whether they consume quota or not." msgid "" "Microversion 3.66 removes the necessity to add a 'force' flag when " "requesting a snapshot of an in-use volume, given that this is not a problem " "for modern storage systems." msgstr "" "Microversion 3.66 removes the necessity to add a 'force' flag when " "requesting a snapshot of an in-use volume, given that this is not a problem " "for modern storage systems." msgid "" "Microversion 3.67 is introduced as a marker to indicate that any instance of " "the Block Storage API 3.67 or greater treats a project_id in the URL as " "optional. This change is backward compatible: the API can handle legacy " "URLs containing a project_id as well as URLs without a project_id. This is " "the case regardless of what microversion specified in a request. See the " "\"New Features\" section for details." msgstr "" "Microversion 3.67 is introduced as a marker to indicate that any instance of " "the Block Storage API 3.67 or greater treats a project_id in the URL as " "optional. This change is backward compatible: the API can handle legacy " "URLs containing a project_id as well as URLs without a project_id. This is " "the case regardless of what microversion is specified in a request. See the " "\"New Features\" section for details." msgid "" "Microversion 3.68 introduces a new volume action, ``os-reimage``, that " "allows a user to replace the current content of a specified volume with the " "data of a specified image supplied by the Image service (glance). See the " "\"New Features\" section for details." msgstr "" "Microversion 3.68 introduces a new volume action, ``os-reimage``, that " "allows a user to replace the current content of a specified volume with the " "data of a specified image supplied by the Image service (glance). See the " "\"New Features\" section for details." msgid "Mitaka Series Release Notes" msgstr "Mitaka Series Release Notes" msgid "" "Modify CG modifies in the CG table if the CG is in the CG table, otherwise " "it modifies in the groups table." msgstr "" "Modify CG modifies in the CG table if the CG is in the CG table, otherwise " "it modifies in the groups table." msgid "" "Modify default lvm_type setting from thick to auto. This will result in " "Cinder preferring thin on init, if there are no LV's in the VG it will " "create a thin-pool and use thin. If there are LV's and no thin-pool it will " "continue using thick." msgstr "" "Modify default lvm_type setting from thick to auto. This will result in " "Cinder preferring thin on init, if there are no LV's in the VG it will " "create a thin-pool and use thin. If there are LV's and no thin-pool it will " "continue using thick." msgid "Modify rule for types_manage and volume_type_access, e.g." msgstr "Modify rule for types_manage and volume_type_access, e.g." msgid "" "Modifying the extra-specs of an in use Volume Type was something that we've " "unintentionally allowed. The result is unexpected or unknown volume " "behaviors in cases where a type was modified while a volume was assigned " "that type. This has been particularly annoying for folks that have assigned " "the volume-type to a different/new backend device. In case there are " "customers using this \"bug\" we add a config option to retain the bad " "behavior \"allow_inuse_volume_type_modification\", with a default setting of " "False (Don't allow). Note this config option is being introduced as " "deprecated and will be removed in a future release. It's being provided as " "a bridge to not break upgrades without notice." msgstr "" "Modifying the extra-specs of an in use Volume Type was something that we've " "unintentionally allowed. The result is unexpected or unknown volume " "behaviours in cases where a type was modified while a volume was assigned " "that type. This has been particularly annoying for folks that have assigned " "the volume-type to a different/new backend device. In case there are " "customers using this \"bug\" we add a config option to retain the bad " "behaviour \"allow_inuse_volume_type_modification\", with a default setting " "of False (Don't allow). Note this config option is being introduced as " "deprecated and will be removed in a future release. It's being provided as " "a bridge to not break upgrades without notice." msgid "" "Multiattach support is disabled for the LVM driver when using the LIO iSCSI " "target. This functionality will be fixed in a later release." msgstr "" "Multiattach support is disabled for the LVM driver when using the LIO iSCSI " "target. This functionality will be fixed in a later release." msgid "" "Multiple backends may now be enabled within the same Cinder Volume service " "on Windows by using the ``enabled_backends`` config option." msgstr "" "Multiple backends may now be enabled within the same Cinder Volume service " "on Windows by using the ``enabled_backends`` config option." msgid "NEC Driver: Added multiattach support." msgstr "NEC Driver: Added multiattach support." msgid "NEC Driver: Added support of more than 4 iSCSI portals for a node." msgstr "NEC Driver: Added support of more than 4 iSCSI portals for a node." msgid "NEC Driver: Added support to revert a volume to a snapshot." msgstr "NEC Driver: Added support to revert a volume to a snapshot." msgid "" "NEC Driver: Deprecated ``nec_iscsi_portals_per_cont`` config option. The " "option was used to limit number of portals and is no longer needed." msgstr "" "NEC Driver: Deprecated ``nec_iscsi_portals_per_cont`` config option. The " "option was used to limit number of portals and is no longer needed." msgid "" "NFS driver `bug #1860913 `_: " "Fixed instance uses base image file when it is rebooted after online " "snapshot creation." msgstr "" "NFS driver `bug #1860913 `_: " "Fixed instance uses base image file when it is rebooted after online " "snapshot creation." msgid "" "NFS driver `bug #1946059 `_: " "Fixed revert to snapshot operation." msgstr "" "NFS driver `bug #1946059 `_: " "Fixed revert to snapshot operation." msgid "" "NOTE: Enabling ONTAP REST client changes the behavior of QoS specs. Earlier, " "QoS values could be represented in BPS (bytes per second), but now REST " "client only supports integer values represented in MBPS (Megabytes per " "second). It means that though the user specifies the value in BPS, it will " "be converted to MBPS and rounded up." msgstr "" "NOTE: Enabling ONTAP REST client changes the behaviour of QoS specs. " "Earlier, QoS values could be represented in BPS (bytes per second), but now " "REST client only supports integer values represented in MBPS (Megabytes per " "second). It means that though the user specifies the value in BPS, it will " "be converted to MBPS and rounded up." msgid "Naming convention change for Datera Volume Drivers" msgstr "Naming convention change for Datera Volume Drivers" msgid "" "Nested quotas will no longer be used by default, but can be configured by " "setting ``quota_driver = cinder.quota.NestedDbQuotaDriver``" msgstr "" "Nested quotas will no longer be used by default, but can be configured by " "setting ``quota_driver = cinder.quota.NestedDbQuotaDriver``" msgid "" "NetApp E-series (bug 1718739):The NetApp E-series driver has been fixed to " "correctly report the \"provisioned_capacity_gb\". Now it sums the capacity " "of all the volumes in the configured backend to get the correct value. This " "bug fix affects all the protocols supported by the driver (FC and iSCSI)." msgstr "" "NetApp E-series (bug 1718739):The NetApp E-series driver has been fixed to " "correctly report the \"provisioned_capacity_gb\". Now it sums the capacity " "of all the volumes in the configured backend to get the correct value. This " "bug fix affects all the protocols supported by the driver (FC and iSCSI)." msgid "" "NetApp NFS driver: add an alternative approach to perform the efficient " "clone image when the Glance source store and Cinder destination pool are not " "in the same FlexVol, but they are in the same Cluster. Previously, the " "driver required the copy offload tool for doing it efficiently, which is no " "longer available. Now, the operators can maintain their efficient clone " "image by relying on the storage file copy operation." msgstr "" "NetApp NFS driver: add an alternative approach to perform the efficient " "clone image when the Glance source store and Cinder destination pool are not " "in the same FlexVol, but they are in the same Cluster. Previously, the " "driver required the copy offload tool for doing it efficiently, which is no " "longer available. Now, the operators can maintain their efficient clone " "image by relying on the storage file copy operation." msgid "" "NetApp ONTAP (bug 1762424): Fix ONTAP NetApp driver not being able to extend " "a volume to a size greater than the corresponding LUN max geometry." msgstr "" "NetApp ONTAP (bug 1762424): Fix ONTAP NetApp driver not being able to extend " "a volume to a size greater than the corresponding LUN max geometry." msgid "" "NetApp ONTAP (bug 1765182): Make ONTAP NetApp NFS driver report to the " "Cinder scheduler that it doesn't support online volume extending." msgstr "" "NetApp ONTAP (bug 1765182): Make ONTAP NetApp NFS driver report to the " "Cinder scheduler that it doesn't support online volume extending." msgid "" "NetApp ONTAP (bug 1765182): Make ONTAP NetApp iSCSI driver and FC driver " "report to the Cinder scheduler that they don't support online volume " "extending." msgstr "" "NetApp ONTAP (bug 1765182): Make ONTAP NetApp iSCSI driver and FC driver " "report to the Cinder scheduler that they don't support online volume " "extending." msgid "" "NetApp ONTAP ISCSI/FC drivers: Enabled support for Active/Active " "environments in the NetApp ISCSI/FC drivers (including replication)." msgstr "" "NetApp ONTAP ISCSI/FC drivers: Enabled support for Active/Active " "environments in the NetApp ISCSI/FC drivers (including replication)." msgid "" "NetApp ONTAP NFS (bug 1690954): Fix wrong usage of export path as volume " "name when deleting volumes and snapshots." msgstr "" "NetApp ONTAP NFS (bug 1690954): Fix wrong usage of export path as volume " "name when deleting volumes and snapshots." msgid "" "NetApp ONTAP NFS driver: Enabled support for Active/Active environments in " "the NetApp NFS driver (including replication)." msgstr "" "NetApp ONTAP NFS driver: Enabled support for Active/Active environments in " "the NetApp NFS driver (including replication)." msgid "NetApp ONTAP NFS multiattach capability enabled." msgstr "NetApp ONTAP NFS multiattach capability enabled." msgid "" "NetApp ONTAP `bug #1906291 `_: Fix volume losing its QoS policy on the backend after " "moving it (migrate or retype with migrate) to a NetApp NFS backend." msgstr "" "NetApp ONTAP `bug #1906291 `_: Fix volume losing its QoS policy on the backend after " "moving it (migrate or retype with migrate) to a NetApp NFS backend." msgid "" "NetApp ONTAP `bug #1958245 `_: In an ONTAP flexgroup replication environment, snapmirror " "creation would succeed but a driver bug caused an error message to be logged " "for the cinder-volume service. The issue has been corrected in this release." msgstr "" "NetApp ONTAP `bug #1958245 `_: In an ONTAP flexgroup replication environment, snapmirror " "creation would succeed but a driver bug caused an error message to be logged " "for the cinder-volume service. The issue has been corrected in this release." msgid "" "NetApp ONTAP driver `Bug #1927784 `_: Fixed the replication setup with FlexVol pools." msgstr "" "NetApp ONTAP driver `Bug #1927784 `_: Fixed the replication setup with FlexVol pools." msgid "" "NetApp ONTAP driver `bug #1955057 `_: Fixed the function get_ontap_version on Cinder NetApp " "driver, now it returns a tuple of integers instead of a string." msgstr "" "NetApp ONTAP driver `bug #1955057 `_: Fixed the function get_ontap_version on Cinder NetApp " "driver, now it returns a tuple of integers instead of a string." msgid "" "NetApp ONTAP driver `bug #2028857 `_: Fixed errors that were occuring in the replica failover " "operation when using ONTAP REST API." msgstr "" "NetApp ONTAP driver `bug #2028857 `_: Fixed errors that were occurring in the replica failover " "operation when using ONTAP REST API." msgid "" "NetApp ONTAP driver: Added a new driver specific capability called " "`netapp_qos_min_support`. It is used to filter the pools that has support to " "the Qos minimum (floor) specs during the scheduler phase." msgstr "" "NetApp ONTAP driver: Added a new driver-specific capability called " "`netapp_qos_min_support`. It is used to filter the pools that have supported " "the QoS minimum (floor) specs during the scheduler phase." msgid "" "NetApp ONTAP driver: Added support for Adaptive QoS specs. The driver now " "accepts ``expectedIOPSperGiB``, ``peakIOPSperGiB``, " "``expectedIOPSAllocation``, ``peakIOPSAllocation``, ``absoluteMinIOPS`` and " "``blockSize``. The field ``peakIOPSperGiB`` and the field " "``expectedIOPSperGiB`` are required together. The ``expectedIOPSperGiB`` and " "``absoluteMinIOPS`` specs are only guaranteed by ONTAP AFF systems. All " "specs can only be used with ONTAP version equal or greater than 9.4, " "excepting the ``expectedIOPSAllocation`` and ``blockSize`` specs which " "require at least 9.5." msgstr "" "NetApp ONTAP driver: Added support for Adaptive QoS specs. The driver now " "accepts ``expectedIOPSperGiB``, ``peakIOPSperGiB``, " "``expectedIOPSAllocation``, ``peakIOPSAllocation``, ``absoluteMinIOPS`` and " "``blockSize``. The field ``peakIOPSperGiB`` and the field " "``expectedIOPSperGiB`` are required together. The ``expectedIOPSperGiB`` and " "``absoluteMinIOPS`` specs are only guaranteed by ONTAP AFF systems. All " "specs can only be used with ONTAP version equal or greater than 9.4, " "excepting the ``expectedIOPSAllocation`` and ``blockSize`` specs which " "require at least 9.5." msgid "" "NetApp ONTAP driver: Added support for QoS Min (floor) throughput specs. The " "driver now accepts ``minIOPS`` and ``minIOPSperGiB`` specs, which can be set " "either individually or along with Max (ceiling) throughput specs. The " "feature requires storage ONTAP All Flash FAS (AFF) with version equal or " "greater than 9.3 for NFS and 9.2 for iSCSI and FCP. It also works with " "Select Premium with SSD and C190 storages with at least ONTAP 9.6." msgstr "" "NetApp ONTAP driver: Added support for QoS Min (floor) throughput specs. The " "driver now accepts ``minIOPS`` and ``minIOPSperGiB`` specs, which can be set " "either individually or along with Max (ceiling) throughput specs. The " "feature requires storage ONTAP All Flash FAS (AFF) with versions equal to or " "greater than 9.3 for NFS and 9.2 for iSCSI and FCP. It also works with " "Select Premium with SSD and C190 storage with at least ONTAP 9.6." msgid "" "NetApp ONTAP driver: Added support to Revert to Snapshot for the iSCSI, FC " "and NFS drivers with FlexVol pool. This feature does not support FlexGroups " "and is limited to revert only to the most recent snapshot of a given Cinder " "volume." msgstr "" "NetApp ONTAP driver: Added support to Revert to Snapshot for the iSCSI, FC " "and NFS drivers with FlexVol pool. This feature does not support FlexGroups " "and is limited to reverting only to the most recent snapshot of a given " "Cinder volume." msgid "" "NetApp ONTAP driver: added option ´netapp_driver_reports_provisioned_capacity" "´, which enables the driver to calculate and report provisioned capacity to " "Cinder Scheduler based on volumes sizes in the storage system." msgstr "" "NetApp ONTAP driver: added option ´netapp_driver_reports_provisioned_capacity" "´, which enables the driver to calculate and report provisioned capacity to " "Cinder Scheduler based on volumes sizes in the storage system." msgid "" "NetApp ONTAP driver: added support for FlexGroup pool using the NFS mode. " "There are several considerations for using the driver with it:" msgstr "" "NetApp ONTAP driver: added support for FlexGroup pool using the NFS mode. " "There are several considerations for using the driver with it:" msgid "" "NetApp ONTAP iSCSI (bug 1712651): Fix ONTAP NetApp iSCSI driver not raising " "a proper exception when trying to extend an attached volume beyond its max " "geometry." msgstr "" "NetApp ONTAP iSCSI (bug 1712651): Fix ONTAP NetApp iSCSI driver not raising " "a proper exception when trying to extend an attached volume beyond its max " "geometry." msgid "NetApp ONTAP iSCSI and FCP drivers multiattach capability enabled." msgstr "NetApp ONTAP iSCSI and FCP drivers multiattach capability enabled." msgid "" "NetApp ONTAP: Added support for Adaptive QoS policies that have been pre-" "created on the storage system, with the NetApp driver and clustered ONTAP " "version 9.4 or higher. To use this feature, configure a Cinder volume type " "with the following extra-specs::" msgstr "" "NetApp ONTAP: Added support for Adaptive QoS policies that have been pre-" "created on the storage system, with the NetApp driver and clustered ONTAP " "version 9.4 or higher. To use this feature, configure a Cinder volume type " "with the following extra-specs::" msgid "" "NetApp ONTAP: Added support for storage assisted migration within a same " "ONTAP cluster (iSCSI/FC/NFS)." msgstr "" "NetApp ONTAP: Added support for storage-assisted migration within the same " "ONTAP cluster (iSCSI/FC/NFS)." msgid "" "NetApp ONTAP: Fix check QoS minimum support for SVM scoped account. See: " "`Bug #1924798 `_." msgstr "" "NetApp ONTAP: Fix check QoS minimum support for SVM scoped account. See: " "`Bug #1924798 `_." msgid "" "NetApp ONTAP: Fixes `bug 1839384 `__ Detaching any instance from multiattached volume terminates " "connection. Now the connection is terminated only if there're no other " "instances using the same initiator." msgstr "" "NetApp ONTAP: Fixes `bug 1839384 `__ Detaching any instance from multiattached volume terminates " "connection. Now the connection is terminated only if there're no other " "instances using the same initiator." msgid "" "NetApp SolidFire driver `Bug #1891914 `_: Fix an error that might occur on cluster workload " "rebalancing or system upgrade, when an operation is made to a volume at the " "same time its connection is being moved to a secondary node." msgstr "" "NetApp SolidFire driver `Bug #1891914 `_: Fix an error that might occur on cluster workload " "rebalancing or system upgrade, when an operation is made to a volume at the " "same time its connection is being moved to a secondary node." msgid "" "NetApp SolidFire driver `Bug #1896112 `_: Fixes an issue that may duplicate volumes during creation, " "in case the SolidFire backend successfully processes a request and creates " "the volume, but fails to deliver the result back to the driver (the response " "is lost). When this scenario occurs, the SolidFire driver will retry the " "operation, which previously resulted in the creation of a duplicate volume. " "This fix adds the ``sf_volume_create_timeout`` configuration option (default " "value: 60 seconds) which specifies an additional length of time that the " "driver will wait for the volume to become active on the backend before " "raising an exception." msgstr "" "NetApp SolidFire driver `Bug #1896112 `_: Fixes an issue that may duplicate volumes during creation, " "in case the SolidFire backend successfully processes a request and creates " "the volume, but fails to deliver the result back to the driver (the response " "is lost). When this scenario occurs, the SolidFire driver will retry the " "operation, which previously resulted in the creation of a duplicate volume. " "This fix adds the ``sf_volume_create_timeout`` configuration option (default " "value: 60 seconds) which specifies an additional length of time that the " "driver will wait for the volume to become active on the backend before " "raising an exception." msgid "" "NetApp SolidFire driver `Bug #1932964 `_: Fixed a name exception that occurs on any volume migration." msgstr "" "NetApp SolidFire driver `Bug #1932964 `_: Fixed a name exception that occurs on any volume migration." msgid "" "NetApp SolidFire driver `Bug #1934435 `_: Fixed errors that might occur when an operation is made to " "a volume at the same time as the Element OS upgrades." msgstr "" "NetApp SolidFire driver `Bug #1934435 `_: Fixed errors that might occur when an operation is made to " "a volume at the same time as the Element OS upgrades." msgid "" "NetApp SolidFire driver `Bug #1942090 `_: Fixed a status exception that occurs on volume retype with " "migration." msgstr "" "NetApp SolidFire driver `Bug #1942090 `_: Fixed a status exception that occurs on volume retype with " "migration." msgid "" "NetApp SolidFire driver `bug #1934459 `_: Fixed backend initialization failing with RecursionError " "error when OSProfiler is enabled." msgstr "" "NetApp SolidFire driver `bug #1934459 `_: Fixed backend initialisation failing with RecursionError " "error when OSProfiler is enabled." msgid "" "NetApp SolidFire driver now supports optimized revert to snapshot operations." msgstr "" "NetApp SolidFire driver now supports optimized revert to snapshot operations." msgid "" "NetApp SolidFire driver: Added inter-cluster volume migration (storage " "assisted) support. This allows users to efficiently migrate volumes between " "different SolidFire backends." msgstr "" "NetApp SolidFire driver: Added inter-cluster volume migration (storage " "assisted) support. This allows users to efficiently migrate volumes between " "different SolidFire backends." msgid "" "NetApp SolidFire driver: Enabled support for Active/Active (including " "replication) to the SolidFire driver. This allows users to configure " "SolidFire backends in clustered environments." msgstr "" "NetApp SolidFire driver: Enabled support for Active/Active (including " "replication) to the SolidFire driver. This allows users to configure " "SolidFire backends in clustered environments." msgid "" "NetApp SolidFire driver: Fixed an issue that causes failback to fail after a " "volume service restart. This change fixes bug `1859653 `_." msgstr "" "NetApp SolidFire driver: Fixed an issue that causes failback to fail after a " "volume service restart. This change fixes bug `1859653 `_." msgid "" "NetApp SolidFire now reports QoS and efficiency stats allowing operators to " "use those values in consideration for weighting and filtering of their " "backends." msgstr "" "NetApp SolidFire now reports QoS and efficiency stats allowing operators to " "use those values in consideration for weighting and filtering of their " "backends." msgid "" "NetApp cDOT block and file drivers have improved support for SVM scoped user " "accounts. Features not supported for SVM scoped users include QoS, aggregate " "usage reporting, and dedupe usage reporting." msgstr "" "NetApp cDOT block and file drivers have improved support for SVM scoped user " "accounts. Features not supported for SVM scoped users include QoS, aggregate " "usage reporting, and dedupe usage reporting." msgid "" "NetApp cDOT block and file drivers now report replication capability at the " "pool level; and are hence compatible with using the ``replication_enabled`` " "extra-spec in volume types." msgstr "" "NetApp cDOT block and file drivers now report replication capability at the " "pool level; and are hence compatible with using the ``replication_enabled`` " "extra-spec in volume types." msgid "" "NetApp drivers: NFS, iSCSI and FCP drivers have now the option to request " "ONTAP operations through REST API. The new option `netapp_use_legacy_client` " "switches between the old ZAPI client approach and new REST client. It is " "default to `True`, meaning that the drivers will keep working as before " "using ZAPI operations. If desired, this option can be set to `False` " "interacting with the storage using the new REST client. However, this new " "client still relies on ZAPI calls for consistency group snapshot operation." msgstr "" "NetApp drivers: NFS, iSCSI and FCP drivers have now the option to request " "ONTAP operations through REST API. The new option `netapp_use_legacy_client` " "switches between the old ZAPI client approach and new REST client. It is " "defaulted to `True`, meaning that the drivers will keep working as before " "using ZAPI operations. If desired, this option can be set to `False` " "interacting with the storage using the new REST client. However, this new " "client still relies on ZAPI calls for consistent group snapshot operation." msgid "" "NetApp iSCSI drivers no longer use the discovery mechanism for multipathing " "and they always return all target/portals when attaching a volume. Thanks " "to this, volumes will be successfully attached even if the target/portal " "selected as primary is down, this will be the case for both, multipath and " "single path connections." msgstr "" "NetApp iSCSI drivers no longer use the discovery mechanism for multipathing " "and they always return all target/portals when attaching a volume. Thanks " "to this, volumes will be successfully attached even if the target/portal " "selected as primary is down, this will be the case for both, multipath and " "single path connections." msgid "" "NetApp iSCSI/FCP drivers: NetApp space allocation feature allows ONTAP and " "host to see the actual space correctly when host deletes data. It also " "notifies the host when the LUN cannot accept write data due to lack of space " "on the volume, and makes the LUN read-only (rather than going offline). This " "feature can be enabled or disabled on cinder volumes by using volume type " "extra specs with the ``netapp:space_allocation`` property." msgstr "" "NetApp iSCSI/FCP drivers: NetApp space allocation feature allows ONTAP and " "host to see the actual space correctly when host deletes data. It also " "notifies the host when the LUN cannot accept write data due to lack of space " "on the volume, and makes the LUN read-only (rather than going offline). This " "feature can be enabled or disabled on cinder volumes by using volume type " "extra specs with the ``netapp:space_allocation`` property." msgid "" "New BoolOpt ``datera_debug_override_num_replicas`` for Datera Volume Drivers" msgstr "" "New BoolOpt ``datera_debug_override_num_replicas`` for Datera Volume Drivers" msgid "New Cinder Hitachi driver based on REST API for Hitachi VSP storages." msgstr "New Cinder Hitachi driver based on REST API for Hitachi VSP storages." msgid "" "New Cinder driver based on storops library (available in pypi) for EMC VNX." msgstr "" "New Cinder driver based on storops library (available in pypi) for EMC VNX." msgid "New Cinder driver for Seagate FC and iSCSI storage arrays." msgstr "New Cinder driver for Seagate FC and iSCSI storage arrays." msgid "New Cinder volume driver for Inspur AS13000 series." msgstr "New Cinder volume driver for Inspur AS13000 series." msgid "" "New Cinder volume driver for Inspur InStorage. The new driver supports iSCSI." msgstr "" "New Cinder volume driver for Inspur InStorage. The new driver supports iSCSI." msgid "" "New Cinder volume driver for KIOXIA Kumoscale storage systems. The driver " "storage system supports NVMeOF." msgstr "" "New Cinder volume driver for KIOXIA Kumoscale storage systems. The driver " "storage system supports NVMeOF." msgid "New Cinder volume driver for LINBIT LINSTOR resources." msgstr "New Cinder volume driver for LINBIT LINSTOR resources." msgid "" "New Cinder volume driver for TOYOU ACS5000. The new driver supports iSCSI." msgstr "" "New Cinder volume driver for TOYOU ACS5000. The new driver supports iSCSI." msgid "New FC Cinder volume driver for Inspur Instorage." msgstr "New FC Cinder volume driver for Inspur Instorage." msgid "New FC Cinder volume driver for Kaminario K2 all-flash arrays." msgstr "New FC Cinder volume driver for Kaminario K2 all-flash arrays." msgid "New FC cinder volume driver for TOYOU NetStor Storage." msgstr "New FC Cinder volume driver for TOYOU NetStor Storage." msgid "New Features" msgstr "New Features" msgid "New ISCSI cinder volume driver for TOYOU NetStor TYDS Storage." msgstr "New ISCSI cinder volume driver for TOYOU NetStor TYDS Storage." msgid "" "New config format to allow for using shared Volume Driver configuration " "defaults via the [backend_defaults] stanza. Config options defined there " "will be used as defaults for each backend enabled via enabled_backends." msgstr "" "New config format to allow for using shared Volume Driver configuration " "defaults via the [backend_defaults] stanza. Config options defined there " "will be used as defaults for each backend enabled via enabled_backends." msgid "" "New config option added. ``\"connection_string\"`` in [profiler] section is " "used to specify OSProfiler driver connection string, for example, " "``\"connection_string = messaging://\"``, ``\"connection_string = mongodb://" "localhost:27017\"``" msgstr "" "New config option added. ``\"connection_string\"`` in [profiler] section is " "used to specify OSProfiler driver connection string, for example, " "``\"connection_string = messaging://\"``, ``\"connection_string = mongodb://" "localhost:27017\"``" msgid "" "New config option for Pure Storage volume drivers pure_eradicate_on_delete. " "When enabled will permanantly eradicate data instead of placing into pending " "eradication state." msgstr "" "New config option for Pure Storage volume drivers pure_eradicate_on_delete. " "When enabled will permanently eradicate data instead of placing into pending " "eradication state." msgid "" "New config option spdk_max_queue_depth is added for SPDK NVMe-oF target. It " "allows users to specify max queu depth." msgstr "" "New config option spdk_max_queue_depth is added for SPDK NVMe-oF target. It " "allows users to specify max queue depth." msgid "" "New config option to enable discard (trim/unmap) support for any backend." msgstr "" "New config option to enable discard (trim/unmap) support for any backend." msgid "" "New configuration options have been added to enable mTLS between cinder and " "glance: use ``glance_certfile`` and ``glance_keyfile`` in the ``[DEFAULT]`` " "section of the cinder configuration file." msgstr "" "New configuration options have been added to enable mTLS between cinder and " "glance: use ``glance_certfile`` and ``glance_keyfile`` in the ``[DEFAULT]`` " "section of the cinder configuration file." msgid "New iSCSI Cinder volume driver for Kaminario K2 all-flash arrays." msgstr "New iSCSI Cinder volume driver for Kaminario K2 all-flash arrays." msgid "New path - cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver" msgstr "New path - cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver" msgid "New path - cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver" msgstr "New path - cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver" msgid "" "New path - cinder.volume.drivers.hpe.hpe_lefthand_iscsi." "HPELeftHandISCSIDriver" msgstr "" "New path - cinder.volume.drivers.hpe.hpe_lefthand_iscsi." "HPELeftHandISCSIDriver" msgid "New path - cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver" msgstr "New path - cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver" msgid "New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver" msgstr "New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver" msgid "New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver" msgstr "" "New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver" msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "NexentaStor5 iSCSI and NFS drivers multiattach capability enabled." msgstr "NexentaStor5 iSCSI and NFS drivers multiattach capability enabled." msgid "" "Nimble driver `bug #1918099 `_: Fix revert to snapshot not working as expected." msgstr "" "Nimble driver `bug #1918099 `_: Fix revert to snapshot not working as expected." msgid "" "Nimble driver `bug #1918229 `_: Corrected an issue where the Nimble storage driver was " "inaccurately determining that there was no free space left in the storage " "array. The driver now relies on the storage array to report the amount of " "free space." msgstr "" "Nimble driver `bug #1918229 `_: Corrected an issue where the Nimble storage driver was " "inaccurately determining that there was no free space left in the storage " "array. The driver now relies on the storage array to report the amount of " "free space." msgid "Nimble driver now supports discard." msgstr "Nimble driver now supports discard." msgid "" "Nimble driver: Enable thin provisioning as default method while creating " "volumes." msgstr "" "Nimble driver: Enable thin provisioning as default method while creating " "volumes." msgid "" "Nimble specific extra-spec ``nimble:multi-initiator`` is removed. Common " "extra-spec multiattach is added." msgstr "" "Nimble specific extra-spec ``nimble:multi-initiator`` is removed. Common " "extra-spec multiattach is added." msgid "" "Nimble: Documented that existing driver supports the new Alletra 6k backend. " "Alletra 6k is newer version of existing Nimble backend." msgstr "" "Nimble: Documented that the existing driver supports the new Alletra 6k " "backend. Alletra 6k is a newer version of the existing Nimble backend." msgid "" "Not to put too fine a point on it, silent truncation is worse than failure, " "and the Cinder team will be addressing these issues in the next release. " "Additionally (as if that isn't bad enough!), we suspect that the above " "anomalies will also occur when using volume encryption with NFS-based " "storage backends, though this has not yet been reported or confirmed." msgstr "" "Not to put too fine a point on it, silent truncation is worse than failure, " "and the Cinder team will be addressing these issues in the next release. " "Additionally (as if that isn't bad enough!), we suspect that the above " "anomalies will also occur when using volume encryption with NFS-based " "storage backends, though this has not yet been reported or confirmed." msgid "" "Note that ``fujitsu_use_cli_copy`` cannot be set to True when the type of " "target pool is RAID Group." msgstr "" "Note that ``fujitsu_use_cli_copy`` cannot be set to True when the type of " "target pool is RAID Group." msgid "" "Note that a cluster scoped account must be used in the driver configuration " "in order to use QoS in clustered ONTAP." msgstr "" "Note that a cluster scoped account must be used in the driver configuration " "in order to use QoS in clustered ONTAP." msgid "" "Note that this is a destructive action, that is, all data currently " "contained in a volume is destroyed when the volume is re-imaged." msgstr "" "Note that this is a destructive action, that is, all data currently " "contained in a volume is destroyed when the volume is re-imaged." msgid "" "Note that when the firmware version of the ETERNUS AF/DX is earlier than " "V11L30, upper limits for the volume QoS settings of the ETERNUS AF/DX are " "set using predefined options. This means that you should set the upper " "limit *of the ETERNUS AF/DX side* to a maximum value that does not exceed " "the specified ``maxBWS``." msgstr "" "Note that when the firmware version of the ETERNUS AF/DX is earlier than " "V11L30, upper limits for the volume QoS settings of the ETERNUS AF/DX are " "set using predefined options. This means that you should set the upper " "limit *of the ETERNUS AF/DX side* to a maximum value that does not exceed " "the specified ``maxBWS``." msgid "" "Nova must be `configured to send service tokens `_ **and** " "cinder must be configured to recognize at least one of the roles that the " "nova service user has been assigned in keystone. By default, cinder will " "recognize the ``service`` role, so if the nova service user is assigned a " "differently named role in your cloud, you must adjust your cinder " "configuration file (``service_token_roles`` configuration option in the " "``keystone_authtoken`` section). If nova and cinder are not configured " "correctly in this regard, detaching volumes will no longer work (`Bug " "#2004555 `_)." msgstr "" "Nova must be `configured to send service tokens `_ **and** " "Cinder must be configured to recognize at least one of the roles that the " "nova service user has been assigned in keystone. By default, Cinder will " "recognise the ``service`` role, so if the nova service user is assigned a " "differently named role in your cloud, you must adjust your Cinder " "configuration file (``service_token_roles`` configuration option in the " "``keystone_authtoken`` section). If Nova and Cinder are not configured " "correctly in this regard, detaching volumes will no longer work (`Bug " "#2004555 `_)." msgid "Now availability zone is supported in volume type as below." msgstr "Now availability zone is supported in volume type as below." msgid "" "Now cinder will be rollback the ``quota_usages`` table when failed to create " "an incremental backup if there doesn't exist a parent backup or the backup " "is not in available state." msgstr "" "Now cinder will be rollback the ``quota_usages`` table when failed to create " "an incremental backup if there doesn't exist a parent backup or the backup " "is not in available state." msgid "" "Now cinder will keep track of 'multiattach' attribute when managing backend " "volumes." msgstr "" "Now Cinder will keep track of 'multiattach' attribute when managing backend " "volumes." msgid "" "Now cinder will refresh the az cache immediately if previous create volume " "task failed due to az not found." msgstr "" "Now Cinder will refresh the az cache immediately if previous create volume " "task failed due to az not found." msgid "" "Now extend won't work on disabled services because it's going through the " "scheduler, unlike how it worked before." msgstr "" "Now extend won't work on disabled services because it's going through the " "scheduler, unlike how it worked before." msgid "" "Now scheduler plugins are aware of operation type via ``operation`` " "attribute in RequestSpec dictionary, plugins can support backend filtering " "according to backend status as well as operation type. Current possible " "values for ``operation`` are:" msgstr "" "Now scheduler plugins are aware of operation type via ``operation`` " "attribute in RequestSpec dictionary, plugins can support backend filtering " "according to backend status as well as operation type. Current possible " "values for ``operation`` are:" msgid "Now the ``os-host show`` API will count project's resource correctly." msgstr "Now the ``os-host show`` API will count project's resource correctly." msgid "" "Now we update the required values to successfully complete the migration." msgstr "" "Now we update the required values to successfully complete the migration." msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "" "Old VNX FC (``cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver``)/ iSCSI " "(``cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver``) drivers are " "deprecated. Please refer to upgrade section for information about the new " "driver." msgstr "" "Old VNX FC (``cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver``)/ iSCSI " "(``cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver``) drivers are " "deprecated. Please refer to upgrade section for information about the new " "driver." msgid "" "Old driver paths have been removed since they have been through our alloted " "deprecation period. Make sure if you have any of these paths being set in " "your cinder.conf for the volume_driver option, to update to the new driver " "path listed here." msgstr "" "Old driver paths have been removed since they have been through our allotted " "deprecation period. Make sure if you have any of these paths being set in " "your cinder.conf for the volume_driver option, to update to the new driver " "path listed here." msgid "" "Old names and locations are still supported but support will be removed in " "the future." msgstr "" "Old names and locations are still supported but support will be removed in " "the future." msgid "" "Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver" msgstr "" "Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver" msgid "" "Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver" msgstr "" "Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver" msgid "" "Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver" msgstr "" "Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver" msgid "" "Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver" msgstr "" "Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver" msgid "Old path - cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver" msgstr "Old path - cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver" msgid "Old path - cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver" msgstr "" "Old path - cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver" msgid "" "Old path - cinder.volume.drivers.san.hp.hp_lefthand_iscsi." "HPLeftHandISCSIDriver" msgstr "" "Old path - cinder.volume.drivers.san.hp.hp_lefthand_iscsi." "HPLeftHandISCSIDriver" msgid "Old path - cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver" msgstr "Old path - cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver" msgid "" "On HCI deployments and when running Cinder and Glance with Cinder backend on " "the same host an os-brick shared location can be configured using the " "``lock_path`` in the ``[os_brick]`` configuration section." msgstr "" "On HCI deployments and when running Cinder and Glance with Cinder backend on " "the same host an os-brick shared location can be configured using the " "``lock_path`` in the ``[os_brick]`` configuration section." msgid "" "On offline upgrades, due to the rolling upgrade mechanism we need to restart " "the cinder services twice to complete the installation just like in the " "rolling upgrades case. First you stop the cinder services, then you upgrade " "them, you sync your DB, then you start all the cinder services, and then you " "restart them all. To avoid this last restart we can now instruct the DB " "sync to bump the services after the migration is completed, the command to " "do this is `cinder-manage db sync --bump-versions`" msgstr "" "On offline upgrades, due to the rolling upgrade mechanism we need to restart " "the cinder services twice to complete the installation just like in the " "rolling upgrades case. First you stop the cinder services, then you upgrade " "them, you sync your DB, then you start all the cinder services, and then you " "restart them all. To avoid this last restart we can now instruct the DB " "sync to bump the services after the migration is completed, the command to " "do this is `cinder-manage db sync --bump-versions`" msgid "" "Once your Ussuri upgrade is completed, the ``__DEFAULT__`` volume type may " "safely be renamed (or renamed and deleted) as long as the " "``default_volume_type`` configuration option is set to a valid existing " "volume type." msgstr "" "Once your Ussuri upgrade is completed, the ``__DEFAULT__`` volume type may " "safely be renamed (or renamed and deleted) as long as the " "``default_volume_type`` configuration option is set to a valid existing " "volume type." msgid "" "Only temporary resources created internally by cinder will have the value " "set to ``false``." msgstr "" "Only temporary resources created internally by Cinder will have the value " "set to ``false``." msgid "Open-E JovianDSS driver: Added 16K block size support." msgstr "Open-E JovianDSS driver: Added 16K block size support." msgid "Open-E JovianDSS driver: Added multiattach support." msgstr "Open-E JovianDSS driver: Added multiattach support." msgid "" "Open-E JovianDSS driver: general rework of volume and snapshot creation and " "deletion." msgstr "" "Open-E JovianDSS driver: general rework of volume and snapshot creation and " "deletion." msgid "" "Open-E JovianDSS driver: network interfaces selection on JovianDSS storage " "has been reworked." msgstr "" "Open-E JovianDSS driver: network interface selection on JovianDSS storage " "has been reworked." msgid "Open-E JovianDSS driver: revert-to-snapshot has been removed." msgstr "Open-E JovianDSS driver: revert-to-snapshot has been removed." msgid "" "Operator needs to perform ``cinder-manage db online_data_migrations`` to " "migrate existing consistency groups to generic volume groups." msgstr "" "Operator needs to perform ``cinder-manage db online_data_migrations`` to " "migrate existing consistency groups to generic volume groups." msgid "" "Operators affected by `OSSN-0086 `_ should note that this release updates the os-brick library used " "by cinder to version 2.10.5 in order to address an issue associated with the " "previous fixes for `Bug #1823200 `_." msgstr "" "Operators affected by `OSSN-0086 `_ should note that this release updates the os-brick library used " "by cinder to version 2.10.5 in order to address an issue associated with the " "previous fixes for `Bug #1823200 `_." msgid "" "Operators affected by `OSSN-0086 `_ should note that this release updates the os-brick library used " "by cinder to version 3.0.3 in order to address an issue associated with the " "previous fixes for `Bug #1823200 `_." msgstr "" "Operators affected by `OSSN-0086 `_ should note that this release updates the os-brick library used " "by Cinder to version 3.0.3 in order to address an issue associated with the " "previous fixes for `Bug #1823200 `_." msgid "" "Operators should change backup driver configuration value to use class name " "to get backup service working in a 'S' release." msgstr "" "Operators should change backup driver configuration value to use class name " "to get backup service working in a 'S' release." msgid "Optimize backend reporting capabilities for Huawei drivers." msgstr "Optimise backend reporting capabilities for Huawei drivers." msgid "" "Oracle ZFSSA iSCSI - allows a volume to be connected to more than one " "connector at the same time, which is required for live-migration to work. " "ZFSSA software release 2013.1.3.x (or newer) is required for this to work." msgstr "" "Oracle ZFSSA iSCSI - allows a volume to be connected to more than one " "connector at the same time, which is required for live-migration to work. " "ZFSSA software release 2013.1.3.x (or newer) is required for this to work." msgid "" "Oracle ZFSSA iSCSI volume driver implements ``get_manageable_volumes()``" msgstr "" "Oracle ZFSSA iSCSI volume driver implements ``get_manageable_volumes()``" msgid "Other Notes" msgstr "Other Notes" msgid "" "Otherwise, the volume type is the default volume type configured by the " "operator, and if no volume type is so configured, the volume type is the " "system default volume type, namely, ``__DEFAULT__``." msgstr "" "Otherwise, the volume type is the default volume type configured by the " "operator, and if no volume type is so configured, the volume type is the " "system default volume type, namely, ``__DEFAULT__``." msgid "" "Over the Xena and Yoga development cycles, cinder's default policy " "configuration is being modified to take advantage of the default " "authentication and authorization apparatus supplied by the Keystone " "project. This will give operators a rich set of default policies to control " "how users interact with the Block Storage service API." msgstr "" "Over the Xena and Yoga development cycles, Cinder's default policy " "configuration is being modified to take advantage of the default " "authentication and authorisation apparatus supplied by the Keystone " "project. This will give operators a rich set of default policies to control " "how users interact with the Block Storage service API." msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "" "Please be aware of the following known issues with this operation and the " "Ceph storage backend:" msgstr "" "Please be aware of the following known issues with this operation and the " "Ceph storage backend:" msgid "Please consult the Ceph documentation for details." msgstr "Please consult the Ceph documentation for details." msgid "" "PowerFlex Driver `bug #2052995 `_: REST API calls to the PowerFlex backend did not have a " "timeout set, which could result in cinder waiting forever. This fix " "introduces two configuration options, ``rest_api_connect_timeout`` and " "``rest_api_read_timeout``, to control timeouts when connecting to the " "backend. The default value of each is 30 seconds." msgstr "" "PowerFlex Driver `bug #2052995 `_: REST API calls to the PowerFlex backend did not have a " "timeout set, which could result in cinder waiting forever. This fix " "introduces two configuration options, ``rest_api_connect_timeout`` and " "``rest_api_read_timeout``, to control timeouts when connecting to the " "backend. The default value of each is 30 seconds." msgid "" "PowerFlex driver `bug #1897598 `_: Fixed bug with PowerFlex storage-assisted volume migration " "when volume migration was performed without conversion of volume type in " "cases where it should have been converted to/from thin/thick provisioned." msgstr "" "PowerFlex driver `bug #1897598 `_: Fixed bug with PowerFlex storage-assisted volume migration " "when volume migration was performed without conversion of volume type in " "cases where it should have been converted to/from thin/thick provisioned." msgid "" "PowerFlex driver `bug #1942095 `_: Fixed Cinder volume caching mechanism for the driver. Now " "the driver correctly raises ``exception.SnapshotLimitReached`` when maximum " "snapshots are created for a given volume and a volume cache is invalidated " "to allow a new row of fast volume clones." msgstr "" "PowerFlex driver `bug #1942095 `_: Fixed Cinder volume caching mechanism for the driver. Now " "the driver correctly raises ``exception.SnapshotLimitReached`` when maximum " "snapshots are created for a given volume and a volume cache is invalidated " "to allow a new row of fast volume clones." msgid "" "PowerMax Driver - Allowing for default volume type in group operations where " "the array serial number is retrieved from the cinder.conf instead of the " "pool_name on the extra specs." msgstr "" "PowerMax Driver - Allowing for default volume type in group operations where " "the array serial number is retrieved from the cinder.conf instead of the " "pool_name on the extra specs." msgid "" "PowerMax Driver - Concurrent live migrations can sometimes fail when one " "thread deletes a storage group that another thread may need." msgstr "" "PowerMax Driver - Concurrent live migrations can sometimes fail when one " "thread deletes a storage group that another thread may need." msgid "" "PowerMax Driver - Issue with upgrades from pre Pike to Pike and later. The " "device is not found when trying to snapshot a legacy volume." msgstr "" "PowerMax Driver - Issue with upgrades from pre Pike to Pike and later. The " "device is not found when trying to snapshot a legacy volume." msgid "" "PowerMax Driver - Promotion RDF Group number fix uses remote array SID when " "finding rdf group number when performing retype during failover." msgstr "" "PowerMax Driver - Promotion RDF Group number fix uses remote array SID when " "finding rdf group number when performing retype during failover." msgid "" "PowerMax Driver - Support to allow the use of multiple replication modes on " "one backend array." msgstr "" "PowerMax Driver - Support to allow the use of multiple replication modes on " "one backend array." msgid "" "PowerMax Driver - Two new replication specific configuration options " "sync_interval and sync_retries have been added to PowerMax cinder " "configuration. These configuration options determine how many times to " "retry checks to see if a SnapVX copy mode has completed with a replication " "enabled volume, and how long to wait between retries." msgstr "" "PowerMax Driver - Two new replication specific configuration options " "sync_interval and sync_retries have been added to PowerMax cinder " "configuration. These configuration options determine how many times to " "retry checks to see if a SnapVX copy mode has completed with a replication " "enabled volume, and how long to wait between retries." msgid "" "PowerMax Driver - `bug #1908920 `_: This offline r1 promotion fix resets replication enabled " "and configuration metadata during promotion retype with offline r1 array. It " "also gets management storage group name from source extra_specs during " "promotion." msgstr "" "PowerMax Driver - `bug #1908920 `_: This offline r1 promotion fix resets replication enabled " "and configuration metadata during promotion retype with offline r1 array. It " "also gets the management storage group name from source extra_specs during " "the promotion." msgid "" "PowerMax Driver - corrected handling of exceptions occurring during cleanup " "in the context of volume migration (Change-Id " "`I0c0a96e21209c5abe359c6985fae7cee598c21ab `_)" msgstr "" "PowerMax Driver - corrected handling of exceptions occurring during cleanup " "in the context of volume migration (Change-Id " "`I0c0a96e21209c5abe359c6985fae7cee598c21ab `_)" msgid "" "PowerMax Driver - support for using snap_ids instead of generations for " "better handling of volume snapshots (Change-Id " "`I0edf2ac777bef888e760f711a94e3fe4f94262ae `_)" msgstr "" "PowerMax Driver - support for using snap_ids instead of generations for " "better handling of volume snapshots (Change-Id " "`I0edf2ac777bef888e760f711a94e3fe4f94262ae `_)" msgid "" "PowerMax Driver `bug #1905564 `_: Fix Fix remote SRP not being assigned to volume's Host when " "performing retype during failover-promotion." msgstr "" "PowerMax Driver `bug #1905564 `_: Fix Fix remote SRP not being assigned to volume's Host when " "performing retype during failover-promotion." msgid "" "PowerMax Driver `bug #2051830 `_: REST API calls to the PowerMax backend did not have a " "timeout set, which could result in cinder waiting forever. This fix " "introduces two configuration options, ``rest_api_connect_timeout`` and " "``rest_api_read_timeout``, to control timeouts when connecting to the " "backend. The default value of each is 30 seconds." msgstr "" "PowerMax Driver `bug #2051830 `_: REST API calls to the PowerMax backend did not have a " "timeout set, which could result in Cinder waiting forever. This fix " "introduces two configuration options, ``rest_api_connect_timeout`` and " "``rest_api_read_timeout``, to control timeouts when connecting to the " "backend. The default value of each is 30 seconds." msgid "" "PowerMax driver - Changing 8.4 to 9.0 Unisphere for PowerMax REST endpoints." msgstr "" "PowerMax driver - Changing 8.4 to 9.0 Unisphere for PowerMax REST endpoints." msgid "" "PowerMax driver - Disabling inuse storage assisted migration to a metro or " "asynchronous replicated volume type as this operation will not facilitate FC " "scanning or iSCSI login of the target array." msgstr "" "PowerMax driver - Disabling inuse storage assisted migration to a metro or " "asynchronous replicated volume type as this operation will not facilitate FC " "scanning or iSCSI login of the target array." msgid "" "PowerMax driver - Volume deallocate and volume delete functionality have " "been combined into a single workflow." msgstr "" "PowerMax driver - Volume deallocate and volume delete functionality have " "been combined into a single workflow." msgid "" "PowerMax driver - Workload support was dropped in ucode 5978. If a VMAX All " "Flash array is upgraded to 5978 or greater and existing volume types " "leveraged workload e.g. DSS, DSS_REP, OLTP and OLTP_REP, certain operations " "will no longer work and the volume type will be unusable. This fix addresses " "these issues and fixes problems with using old volume types with workloads " "included in the volume type pool_name." msgstr "" "PowerMax driver - Workload support was dropped in ucode 5978. If a VMAX All " "Flash array is upgraded to 5978 or greater and existing volume types " "leveraged workload e.g. DSS, DSS_REP, OLTP and OLTP_REP, certain operations " "will no longer work and the volume type will be unusable. This fix addresses " "these issues and fixes problems with using old volume types with workloads " "included in the volume type pool_name." msgid "" "PowerMax driver - fix to eliminate 'cannot use the device for the function " "because it is in a Copy Session' when attempting to delete a volume group " "that previously had a group snapshot created on and deleted from it." msgstr "" "PowerMax driver - fix to eliminate 'cannot use the device for the function " "because it is in a Copy Session' when attempting to delete a volume group " "that previously had a group snapshot created on and deleted from it." msgid "" "PowerMax driver - the minimum version of Unisphere for PowerMax for the " "current release is 9.1.0.5. It is however recommended to install the " "Security Releases(SR) of Unisphere for PowerMax if they become available." msgstr "" "PowerMax driver - the minimum version of Unisphere for PowerMax for the " "current release is 9.1.0.5. It is however recommended to install the " "Security Releases(SR) of Unisphere for PowerMax if they become available." msgid "" "PowerMax driver - the minimum version of Unisphere for PowerMax required for " "Train is 9.1, so all the latest 91 REST endpoints will be used." msgstr "" "PowerMax driver - the minimum version of Unisphere for PowerMax required for " "Train is 9.1, so all the latest 91 REST endpoints will be used." msgid "" "PowerMax driver - the minimum version of Unisphere for PowerMax required for " "Victoria is 9.2, so all the latest 92 REST endpoints will be used." msgstr "" "PowerMax driver - the minimum version of Unisphere for PowerMax required for " "Victoria is 9.2, so all the latest 92 REST endpoints will be used." msgid "" "PowerMax driver : Enhancement to use an existing initiator group even if " "there is no entry for the contained initiator(s) in the login table. This is " "permissable so long as the initiator(s) in the connector object match." msgstr "" "PowerMax driver : Enhancement to use an existing initiator group even if " "there is no entry for the contained initiator(s) in the login table. This is " "permissible so long as the initiator(s) in the connector object match." msgid "" "PowerMax driver `bug #1929429 `_: Fixes child/parent storage group check so that a pattern " "match is not case sensitive. For example, myStorageGroup should equal " "MYSTORAGEGROUP and mystoragegroup." msgstr "" "PowerMax driver `bug #1929429 `_: Fixes child/parent storage group check so that a pattern " "match is not case sensitive. For example, myStorageGroup should equal " "MYSTORAGEGROUP and mystoragegroup." msgid "" "PowerMax driver `bug #1930290 `_: This fixes the QoS conflict issue on a child storage group " "by not setting QoS on a parent storage group." msgstr "" "PowerMax driver `bug #1930290 `_: This fixes the QoS conflict issue on a child storage group " "by not setting QoS on a parent storage group." msgid "" "PowerMax driver `bug #1936848 `_: Fixed Generic Volume Group error where the name has been " "changed in OpenStack and is not reflected on the corresponding storage group " "on the PowerMax." msgstr "" "PowerMax driver `bug #1936848 `_: Fixed Generic Volume Group error where the name has been " "changed in OpenStack and is not reflected on the corresponding storage group " "on the PowerMax." msgid "" "PowerMax driver `bug #1938572 `_ : Legacy PowerMax OS fix to convert an int to a string if " "the generation of snapVX is returned as an int from REST so that a 0 does " "not equate to False in python." msgstr "" "PowerMax driver `bug #1938572 `_ : Legacy PowerMax OS fix to convert an int to a string if " "the generation of snapVX is returned as an int from REST so that a 0 does " "not equate to False in python." msgid "" "PowerMax driver `bug #1939139 `_: Fix on create snapshot operation that exists when using " "PowerMax OS 5978.711 and later." msgstr "" "PowerMax driver `bug #1939139 `_: Fix on create snapshot operation that exists when using " "PowerMax OS 5978.711 and later." msgid "" "PowerMax driver `bug #1979668 `_: Fixed visibility of manageable volumes in multiple storage " "groups." msgstr "" "PowerMax driver `bug #1979668 `_: Fixed visibility of manageable volumes in multiple storage " "groups." msgid "" "PowerMax driver: Checking that the contents of the initiator group match the " "contents of the connector regardless of the initiator_check option being " "enabled. This will ensure an exception is raised if there is a mismatch, in " "all scenarios." msgstr "" "PowerMax driver: Checking that the contents of the initiator group match the " "contents of the connector regardless of the initiator_check option being " "enabled. This will ensure an exception is raised if there is a mismatch, in " "all scenarios." msgid "" "PowerMax driver: Enhancement to check the status of the ports in the port " "group so that any potential issue, like the ports being down, is highlighted " "early and clearly." msgstr "" "PowerMax driver: Enhancement to check the status of the ports in the port " "group so that any potential issue, like the ports being down, is highlighted " "early and clearly." msgid "" "PowerMax driver: Fix to prevent an R2 volume being larger than the R1 so " "that an extend operation will not fail if the R2 happens to be larger than " "the requested extend size." msgstr "" "PowerMax driver: Fix to prevent an R2 volume from being larger than the R1 " "so that an extend operation will not fail if the R2 happens to be larger " "than the requested extend size." msgid "" "PowerMax driver: Fix to suspend the storage group you are about to delete " "and then add a force flag to delete the volume pairs within the storage " "group." msgstr "" "PowerMax driver: Fix to suspend the storage group you are about to delete " "and then add a force flag to delete the volume pairs within the storage " "group." msgid "" "PowerMax driver: Previously, the target storage group created from a " "replicated storage group was also replicated, which could cause failures. " "This fix creates a non-replicated target initially, and lets the replicate " "group API take care of replicating it." msgstr "" "PowerMax driver: Previously, the target storage group created from a " "replicated storage group was also replicated, which could cause failures. " "This fix creates a non-replicated target initially and lets the replicate " "group API take care of replicating it." msgid "" "PowerMax for Cinder driver now supports Port Group and Port load balancing " "when attaching Nova Compute instances to volumes on the backend PowerMax." msgstr "" "PowerMax for Cinder driver now supports Port Group and Port load balancing " "when attaching Nova Compute instances to volumes on the backend PowerMax." msgid "" "PowerMax for Cinder driver now supports extending in-use Metro RDF enabled " "volumes." msgstr "" "PowerMax for Cinder driver now supports extending in-use Metro RDF enabled " "volumes." msgid "" "PowerMax for Cinder driver now supports storage-assisted in-use retype for " "volumes including those in replication sessions." msgstr "" "PowerMax for Cinder driver now supports storage-assisted in-use retype for " "volumes including those in replication sessions." msgid "" "PowerMax for Cinder driver now supports the ability to transition to a new " "primary array as part of the failover process if the existing primary array " "is deemed unrecoverable." msgstr "" "PowerMax for Cinder driver now supports the ability to transition to a new " "primary array as part of the failover process if the existing primary array " "is deemed unrecoverable." msgid "" "PowerStore driver `Bug #1920729 `_: Fix iSCSI targets not being returned from the REST API call " "if targets are used for multiple purposes (iSCSI target, Replication target, " "etc.)." msgstr "" "PowerStore driver `Bug #1920729 `_: Fix iSCSI targets not being returned from the REST API call " "if targets are used for multiple purposes (iSCSI target, Replication target, " "etc.)." msgid "" "PowerStore driver `bug #1962824 `_: Fixed Cinder volume caching mechanism for the driver. Now " "the driver correctly raises ``exception.SnapshotLimitReached`` when maximum " "snapshots are created for a given volume and the volume cache is invalidated " "to allow a new row of fast volume clones." msgstr "" "PowerStore driver `bug #1962824 `_: Fixed Cinder volume caching mechanism for the driver. Now " "the driver correctly raises ``exception.SnapshotLimitReached`` when maximum " "snapshots are created for a given volume and the volume cache is invalidated " "to allow a new row of fast volume clones." msgid "" "PowerStore driver `bug #1981068 `_: Fixed request data validation for the REST client." msgstr "" "PowerStore driver `bug #1981068 `_: Fixed request data validation for the REST client." msgid "PowerStore driver: Add Consistency Groups support." msgstr "PowerStore driver: Add Consistency Groups support." msgid "PowerStore driver: Add OpenStack replication v2.1 support." msgstr "PowerStore driver: Add OpenStack replication v2.1 support." msgid "" "PowerStore driver: ``powerstore_appliances`` option is deprecated and will " "be removed in a future release. Driver does not use this option to determine " "which appliances to use. PowerStore uses its own load balancer instead." msgstr "" "PowerStore driver: ``powerstore_appliances`` option is deprecated and will " "be removed in a future release. Driver does not use this option to determine " "which appliances to use. PowerStore uses its own load balancer instead." msgid "Prelude" msgstr "Prelude" msgid "" "Previous installations of IBM Storage must be un-installed first and the new " "driver should be installed on top. In addition the cinder.conf values should " "be updated to reflect the new paths. For example the proxy setting of " "``storage.proxy.IBMStorageProxy`` should be updated to ``cinder.volume." "drivers.ibm.ibm_storage.proxy.IBMStorageProxy``." msgstr "" "Previous installations of IBM Storage must be uninstalled first and the new " "driver should be installed on top. In addition the cinder.conf values should " "be updated to reflect the new paths. For example the proxy setting of " "``storage.proxy.IBMStorageProxy`` should be updated to ``cinder.volume." "drivers.ibm.ibm_storage.proxy.IBMStorageProxy``." msgid "" "Previously the only way to remove volumes in error states from a consistency-" "group was to delete the consistency group and create it again. Now it is " "possible to remove volumes in error and error_deleting states." msgstr "" "Previously the only way to remove volumes in error states from a consistency-" "group was to delete the consistency group and create it again. Now it is " "possible to remove volumes in error and error_deleting states." msgid "" "Previously, the below operations could fail if the linked device was not yet " "fully defined at the time of the call. Now, when ``snapvx_unlink_symforce`` " "is enabled, those operations are not interrupted by not fully defined " "devices." msgstr "" "Previously, the below operations could fail if the linked device was not yet " "fully defined at the time of the call. Now, when ``snapvx_unlink_symforce`` " "is enabled, those operations are not interrupted by not fully defined " "devices." msgid "" "Prior to the Ussuri release, ``os-reset_status`` notifications for volumes, " "snapshots, and backups were sent to *nonstandard* publisher_ids. This " "behavior was deprecated in Ussuri, and notifications were sent to both the " "standard and nonstandard publisher_ids. In this release, ``os-" "reset_status`` notifications, like all other notifications for volume, " "snapshot and backup, are sent *only* to the following *standard* " "publisher_ids:" msgstr "" "Prior to the Ussuri release, ``os-reset_status`` notifications for volumes, " "snapshots, and backups were sent to *nonstandard* publisher_ids. This " "behavior was deprecated in Ussuri, and notifications were sent to both the " "standard and nonstandard publisher_ids. In this release, ``os-" "reset_status`` notifications, like all other notifications for volume, " "snapshot and backup, are sent *only* to the following *standard* " "publisher_ids:" msgid "" "Privsep daemons are now started by Cinder when required. These daemons can " "be started via rootwrap if required. rootwrap configs therefore need to be " "updated to include new privsep daemon invocations." msgstr "" "Privsep daemons are now started by Cinder when required. These daemons can " "be started via rootwrap if required. rootwrap configs therefore need to be " "updated to include new privsep daemon invocations." msgid "" "Privsep transitions. Cinder is transitioning from using the older style " "rootwrap privilege escalation path to the new style Oslo privsep path. This " "should improve performance and security of Cinder in the long term." msgstr "" "Privsep transitions. Cinder is transitioning from using the older style " "rootwrap privilege escalation path to the new style Oslo privsep path. This " "should improve performance and security of Cinder in the long term." msgid "Prohibit the deletion of group if group snapshot exists." msgstr "Prohibit the deletion of group if group snapshot exists." msgid "" "Projects with the admin role are now allowed to operate on the quotas of all " "other projects." msgstr "" "Projects with the admin role are now allowed to operate on the quotas of all " "other projects." msgid "ProphetStor drivers: FC and iSCSI" msgstr "ProphetStor drivers: FC and iSCSI" msgid "" "Pure Storage Cinder Driver: Fixed `bug 2029005 `__ to correctly disconnect a sync replicated volume " "from host on the secondary array when uniform option is set to True." msgstr "" "Pure Storage Cinder Driver: Fixed `bug 2029005 `__ to correctly disconnect a sync replicated volume " "from host on the secondary array when uniform option is set to True." msgid "" "Pure Storage Driver: Add internal check to allow for FlashArray with joint " "FC and NVMe-FC support" msgstr "" "Pure Storage Driver: Add internal check to allow for FlashArray with joint " "FC and NVMe-FC support" msgid "" "Pure Storage FlashArray driver `bug #1929219 `_: Fixes issue with incorrect internal mechanism for " "checking REST API of backend array. This has no external effect for users." msgstr "" "Pure Storage FlashArray driver `bug #1929219 `_: Fixes issue with an incorrect internal mechanism for " "checking REST API of backend array. This has no external effect for users." msgid "" "Pure Storage FlashArray driver `bug #1936663 `_: Fixes issue where cloning a consistency group " "containing volumes with very long names causes a crash - Required for " "PowerVC support" msgstr "" "Pure Storage FlashArray driver `bug #1936663 `_: Fixes the issue where cloning a consistency group " "containing volumes with very long names causes a crash - Required for " "PowerVC support" msgid "" "Pure Storage FlashArray driver `bug #1938579 `_: Fixes issue when cloning multiple volumes in PowerVC " "deployments." msgstr "" "Pure Storage FlashArray driver `bug #1938579 `_: Fixes issue when cloning multiple volumes in PowerVC " "deployments." msgid "" "Pure Storage FlashArray driver `bug #1969784 `_: Fixed array failover incorrectly handles loss of an " "array due to network issue" msgstr "" "Pure Storage FlashArray driver `bug #1969784 `_: Fixed array failover incorrectly handles loss of an " "array due to network issue" msgid "" "Pure Storage FlashArray driver `bug 1910143 `_: Parameter ``pure_iscsi_cidr`` is now IPv4/v6 " "agnostic." msgstr "" "Pure Storage FlashArray driver `bug 1910143 `_: Parameter ``pure_iscsi_cidr`` is now IPv4/v6 " "agnostic." msgid "" "Pure Storage FlashArray driver fix to ensure cinder_tempest_plugin " "consistency group tests pass." msgstr "" "Pure Storage FlashArray driver fix to ensure cinder_tempest_plugin " "consistency group tests pass." msgid "" "Pure Storage FlashArray driver has added configuration option " "``pure_host_personality`` for setting the host personality upon host " "creation (existing hosts are not affected)." msgstr "" "Pure Storage FlashArray driver has added configuration option " "``pure_host_personality`` for setting the host personality upon host " "creation (existing hosts are not affected)." msgid "" "Pure Storage FlashArray driver has added configuration option " "``pure_iscsi_cidr`` for setting a network CIDR for iSCSI target connection. " "The default value will allow connections to all iSCSI targets." msgstr "" "Pure Storage FlashArray driver has added configuration option " "``pure_iscsi_cidr`` for setting a network CIDR for iSCSI target connection. " "The default value will allow connections to all iSCSI targets." msgid "" "Pure Storage FlashArray driver has added configuration options " "``pure_replication_pg_name`` and ``pure_replication_pod_name`` for setting " "the names for replication PGs and Pods." msgstr "" "Pure Storage FlashArray driver has added configuration options " "``pure_replication_pg_name`` and ``pure_replication_pod_name`` for setting " "the names for replication PGs and Pods." msgid "Pure Storage FlashArray driver has added multiatach support." msgstr "Pure Storage FlashArray driver has added multiattach support." msgid "Pure Storage FlashArray driver: Added support NVMe-TCP transport layer." msgstr "" "Pure Storage FlashArray driver: Added support NVMe-TCP transport layer." msgid "" "Pure Storage FlashArray driver: Enabled support for Active/Active " "replication for the FlashArray driver. This allows users to configure " "FlashArray backends in clustered environments." msgstr "" "Pure Storage FlashArray driver: Enabled support for Active/Active " "replication for the FlashArray driver. This allows users to configure " "FlashArray backends in clustered environments." msgid "" "Pure Storage FlashArray driver: Enabled support for Active/Active to both " "the iSCSI and FC driver. This allows users to configure Pure Storage " "backends in clustered environments." msgstr "" "Pure Storage FlashArray driver: Enabled support for Active/Active to both " "the iSCSI and FC driver. This allows users to configure Pure Storage " "backends in clustered environments." msgid "" "Pure Storage FlashArray driver: added configuration option " "``pure_iscsi_cidr_list`` for setting several network CIDRs for iSCSI target " "connection. Both IPv4 and IPv6 is supported. The default still allows all " "IPv4 targets." msgstr "" "Pure Storage FlashArray driver: added configuration option " "``pure_iscsi_cidr_list`` for setting several network CIDRs for iSCSI target " "connection. Both IPv4 and IPv6 are supported. The default still allows all " "IPv4 targets." msgid "" "Pure Storage FlashArray drivers changed minimum supported Purity//FA version " "to 6.1.0." msgstr "" "Pure Storage FlashArray drivers changed minimum supported Purity//FA version " "to 6.1.0." msgid "" "Pure Storage FlashArray drivers upgraded to remove REST 1.x support and " "changed to REST 2.4 as the minimum supported version." msgstr "" "Pure Storage FlashArray drivers upgraded to remove REST 1.x support and " "changed to REST 2.4 as the minimum supported version." msgid "" "Pure Storage FlashArray minimum ``purestorage`` SDK version increased to " "1.17.0" msgstr "" "Pure Storage FlashArray minimum ``purestorage`` SDK version increased to " "1.17.0" msgid "" "Pure Storage Volume Drivers can now utilize driver_ssl_cert_verify and " "driver_ssl_cert_path config options to allow for secure https requests to " "the FlashArray." msgstr "" "Pure Storage Volume Drivers can now utilise driver_ssl_cert_verify and " "driver_ssl_cert_path config options to allow for secure https requests to " "the FlashArray." msgid "" "Pure Storage `bug #1930748 `_: Fixed issues with multiattched volumes being diconnected " "from a backend when still listed as an attachment to an instance." msgstr "" "Pure Storage `bug #1930748 `_: Fixed issues with multiattched volumes being disconnected " "from a backend when still listed as an attachment to an instance." msgid "" "Pure Storage adds a new driver to support NVMe-RoCE for the FlashArray. All " "features of the iSCSI and FC drivers are fully supported by this new driver." msgstr "" "Pure Storage adds a new driver to support NVMe-RoCE for the FlashArray. All " "features of the iSCSI and FC drivers are fully supported by this new driver." msgid "" "Pure Storage driver `Bug #1945824 `_: Fixed missing DB values when creating new consistency group " "from CG snapshot." msgstr "" "Pure Storage driver `Bug #1945824 `_: Fixed missing DB values when creating new consistency group " "from CG snapshot." msgid "" "Pure Storage driver `bug 1870103 `_: Ensure that unmanaged volumes do not exceed maximum " "character length on FlashArray." msgstr "" "Pure Storage driver `bug 1870103 `_: Ensure that unmanaged volumes do not exceed maximum " "character length on FlashArray." msgid "" "Pure Storage driver adds volume metadata describing the backend array name " "(``array_name``) and volume name (``array_volume_name``). This allows easier " "identification and location of a cinder volume when multiple clusters are " "using the same backend arrays, or when the cinder scheduler has multiplae " "backends to choose from." msgstr "" "Pure Storage driver adds volume metadata describing the backend array name " "(``array_name``) and volume name (``array_volume_name``). This allows easier " "identification and location of a Cinder volume when multiple clusters are " "using the same backend arrays, or when the Cinder scheduler has multiple " "backends to choose from." msgid "" "Pure Storage driver: Add missing support for ``host_personality`` setting " "for FC-based hosts" msgstr "" "Pure Storage driver: Add missing support for ``host_personality`` setting " "for FC-based hosts" msgid "" "Pure Storage driver: Added replication capability to backend pool " "information. Response will be ```async```, ```sync``` or```trisync```. " "```sync``` implies support for ```async``` and ```trisync``` implies support " "for ```sync``` and ```async```." msgstr "" "Pure Storage driver: Added replication capability to backend pool " "information. Response will be ```async```, ```sync``` or```trisync```. " "```sync``` implies support for ```async``` and ```trisync``` implies support " "for ```sync``` and ```async```." msgid "" "Pure Storage driver: Added support for 3-site replication, aka trisync. " "Requires two replication devices to be created, one async and one sync, plus " "the addition of new parameters ``pure_trisync_enabled`` and " "``pure_trisync_pg_name``." msgstr "" "Pure Storage driver: Added support for 3-site replication, aka trisync. " "Requires two replication devices to be created, one async and one sync, plus " "the addition of new parameters ``pure_trisync_enabled`` and " "``pure_trisync_pg_name``." msgid "" "Pure Storage driver: Allow synchronously replicated volumes to be created " "during a replication failover event. These will remain viable volumes when " "the replication is failed back to its original state." msgstr "" "Pure Storage driver: Allow synchronously replicated volumes to be created " "during a replication failover event. These will remain viable volumes when " "the replication is failed back to its original state." msgid "" "Pure Storage: FlashArray minimum Purity//FA version is increased to 5.3.0. " "All FlashArray backends must be at at least this minimum version or the " "driver will not initialize." msgstr "" "Pure Storage: FlashArray minimum Purity//FA version is increased to 5.3.0. " "All FlashArray backends must be at at least this minimum version or the " "driver will not initialise." msgid "" "Pure Storage: Minimum supported FlashArray Purity//FA is changed to 5.3.0. " "All FlashArray backends must be at at least this minimum version or the " "driver will not initialize." msgstr "" "Pure Storage: Minimum supported FlashArray Purity//FA is changed to 5.3.0. " "All FlashArray backends must be at at least this minimum version or the " "driver will not initialise." msgid "" "Pure Storage: Remove all API version checks in driver as the new minimum " "FlashArray Purity//FA version supports all previously version-gated features " "and functionality support." msgstr "" "Pure Storage: Remove all API version checks in the driver as the new minimum " "FlashArray Purity//FA version supports all previously version-gated features " "and functionality support." msgid "" "Pure iSCSI & FC driver `bug #2006960 `_: Fixed attaching LUNs greater than 255. Driver leverages " "new os-brick functionality to specify LUN addressing mode." msgstr "" "Pure iSCSI & FC driver `bug #2006960 `_: Fixed attaching LUNs greater than 255. Driver leverages " "new os-brick functionality to specify LUN addressing mode." msgid "" "Pure volume drivers will need 'purestorage' python module v1.6.0 or newer. " "Support for 1.4.x has been removed." msgstr "" "Pure volume drivers will need 'purestorage' Python module v1.6.0 or newer. " "Support for 1.4.x has been removed." msgid "" "Python 2 is no longer supported. The minimum version of Python that may be " "used with this release is **Python 3.6**." msgstr "" "Python 2 is no longer supported. The minimum version of Python that may be " "used with this release is **Python 3.6**." msgid "" "Python 2.7 support has been dropped. The last release of Cinder to support " "py2.7 is OpenStack Train. The minimum version of Python now supported by " "Cinder is Python 3.6." msgstr "" "Python 2.7 support has been dropped. The last release of Cinder to support " "py2.7 is OpenStack Train. The minimum version of Python now supported by " "Cinder is Python 3.6." msgid "" "Python 3.6 & 3.7 support has been dropped. The minimum version of Python now " "supported is Python 3.8." msgstr "" "Python 3.6 & 3.7 support has been dropped. The minimum version of Python now " "supported is Python 3.8." msgid "QNAP" msgstr "QNAP" msgid "QNAP Cinder driver added support for QES fw 2.0.0." msgstr "QNAP Cinder driver added support for QES fw 2.0.0." msgid "QNAP Cinder driver added support for QES fw 2.1.0." msgstr "QNAP Cinder driver added support for QES fw 2.1.0." msgid "QNAP Cinder driver supports QES FW on TDS series NAS." msgstr "QNAP Cinder driver supports QES FW on TDS series NAS." msgid "QoS support in EMC VMAX iSCSI and FC drivers." msgstr "QoS support in EMC VMAX iSCSI and FC drivers." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "" "Quota validations are now forced for all APIs. skip_validation flag is now " "removed from the request body for the quota-set update API." msgstr "" "Quota validations are now forced for all APIs. skip_validation flag is now " "removed from the request body for the quota-set update API." msgid "" "RBD Driver `bug #1957073 `_: " "Fixed snapshot deletion failure when its volume doesn't exist." msgstr "" "RBD Driver `bug #1957073 `_: " "Fixed snapshot deletion failure when its volume doesn't exist." msgid "" "RBD driver `Bug #1898918 `_: " "Fix thread block caused by the flatten operation during cloning a volume. " "Now the flatten operation is executed in a different thread." msgstr "" "RBD driver `Bug #1898918 `_: " "Fix thread block caused by the flatten operation during cloning a volume. " "Now the flatten operation is executed in a different thread." msgid "" "RBD driver `Bug #1922408 `_: " "Fixed create encrypted volume from encrypted snapshot." msgstr "" "RBD driver `Bug #1922408 `_: " "Fixed create encrypted volume from encrypted snapshot." msgid "" "RBD driver `bug #1901241 `_: " "Fixed an issue where decreasing the ``rbd_max_clone_depth`` configuration " "option would prevent volumes that had already exceeded that depth from being " "cloned." msgstr "" "RBD driver `bug #1901241 `_: " "Fixed an issue where decreasing the ``rbd_max_clone_depth`` configuration " "option would prevent volumes that had already exceeded that depth from being " "cloned." msgid "" "RBD driver `bug #1907964 `_: " "Add support for fast-diff on backup images stored in Ceph. Provided fast-" "diff is supported by the backend it will automatically be enabled and used. " "With fast-diff enabled, the generation of diffs between images and snapshots " "as well as determining the actual data usage of a snapshot is speed up " "significantly." msgstr "" "RBD driver `bug #1907964 `_: " "Add support for fast-diff on backup images stored in Ceph. Provided fast-" "diff is supported by the backend it will automatically be enabled and used. " "With fast-diff enabled, the generation of diffs between images and snapshots " "as well as determining the actual data usage of a snapshot is speed up " "significantly." msgid "" "RBD driver `bug #1916843 `_: " "Fixed rpc timeout when backing up RBD snapshot. We no longer flatten " "temporary volumes and snapshots." msgstr "" "RBD driver `bug #1916843 `_: " "Fixed RPC timeout when backing up RBD snapshot. We no longer flatten " "temporary volumes and snapshots." msgid "" "RBD driver `bug #1941815 `_: " "Fixed deleting volumes with snapshots/volumes in the ceph trash space." msgstr "" "RBD driver `bug #1941815 `_: " "Fixed deleting volumes with snapshots/volumes in the ceph trash space." msgid "" "RBD driver `bug #1942210 `_: " "When creating a volume from a snapshot, the operation could fail due to an " "uncaught exception being raised during a check to see if the backend Ceph " "installation supported the clone v2 API. The driver now handles this " "situation gracefully." msgstr "" "RBD driver `bug #1942210 `_: " "When creating a volume from a snapshot, the operation could fail due to an " "uncaught exception being raised during a check to see if the backend Ceph " "installation supported the clone v2 API. The driver now handles this " "situation gracefully." msgid "" "RBD driver `bug #1947518 `_: " "Corrected a regression caused by the fix for `Bug #1931004 `_ that was attempting to access the " "glance images RBD pool with write privileges when creating a volume from an " "image." msgstr "" "RBD driver `bug #1947518 `_: " "Corrected a regression caused by the fix for `Bug #1931004 `_ that was attempting to access the " "glance images RBD pool with write privileges when creating a volume from an " "image." msgid "" "RBD driver `bug #1960206 `_: " "Fixed ``total_capacity`` reported by the driver to the scheduler on Ceph " "clusters that have renamed the ``bytes_used`` field to ``stored``. (e.g., " "`Nautilus `_)." msgstr "" "RBD driver `bug #1960206 `_: " "Fixed ``total_capacity`` reported by the driver to the scheduler on Ceph " "clusters that have renamed the ``bytes_used`` field to ``stored``. (e.g., " "`Nautilus `_)." msgid "" "RBD driver can have bottlenecks if too many slow operations are happening at " "the same time (for example many huge volume deletions), we can now use the " "`backend_native_threads_pool_size` option in the RBD driver section to " "resolve the issue." msgstr "" "RBD driver can have bottlenecks if too many slow operations are happening at " "the same time (for example many huge volume deletions), we can now use the " "`backend_native_threads_pool_size` option in the RBD driver section to " "resolve the issue." msgid "" "RBD driver has added multiattach support. It should be noted that " "replication and multiattach are mutually exclusive, so a single RBD volume " "can only be configured to support one of these features at a time. " "Additionally, RBD image features are not preserved which prevents a volume " "being retyped from multiattach to another type. This limitation is temporary " "and will be addressed soon." msgstr "" "RBD driver has added multiattach support. It should be noted that " "replication and multiattach are mutually exclusive, so a single RBD volume " "can only be configured to support one of these features at a time. " "Additionally, RBD image features are not preserved which prevents a volume " "being retyped from multiattach to another type. This limitation is temporary " "and will be addressed soon." msgid "" "RBD driver supports returning a static total capacity value instead of a " "dynamic value like it's been doing. Configurable with " "`report_dynamic_total_capacity` configuration option." msgstr "" "RBD driver supports returning a static total capacity value instead of a " "dynamic value like it's been doing. Configurable with " "`report_dynamic_total_capacity` configuration option." msgid "RBD driver: Added QoS support." msgstr "RBD driver: Added QoS support." msgid "" "RBD driver: No longer copy the RBD source volume image to a temporary file " "when uploading a volume to an image." msgstr "" "RBD driver: No longer copy the RBD source volume image to a temporary file " "when uploading a volume to an image." msgid "" "RBD driver: Prior to this release, the Cinder project did not have a " "statement concerning what versions of Ceph are supported by Cinder. We " "hereby announce that:" msgstr "" "RBD driver: Prior to this release, the Cinder project did not have a " "statement concerning what versions of Ceph are supported by Cinder. We " "hereby announce that:" msgid "" "RBD driver: Sets the Ceph cluster FSID as the default value for the " "``rbd_secret_uuid`` configuration option." msgstr "" "RBD driver: Sets the Ceph cluster FSID as the default value for the " "``rbd_secret_uuid`` configuration option." msgid "" "RBD driver: There are some known issues concerning the revert-to-snapshot " "support added in this release." msgstr "" "RBD driver: There are some known issues concerning the revert-to-snapshot " "support added in this release." msgid "" "RBD driver: support added for reverting a volume to the most recent snapshot " "taken." msgstr "" "RBD driver: support added for reverting a volume to the most recent snapshot " "taken." msgid "" "RBD driver: the ``rbd_keyring_conf`` configuration option, which was " "deprecated in the Ussuri release, has been removed. If it is present in a " "configuration file, its value will silently be ignored. For more " "information, see `OSSN-0085 `_: Cinder configuration option can leak secret key from Ceph " "backend." msgstr "" "RBD driver: the ``rbd_keyring_conf`` configuration option, which was " "deprecated in the Ussuri release, has been removed. If it is present in a " "configuration file, its value will silently be ignored. For more " "information, see `OSSN-0085 `_: Cinder configuration option can leak secret key from Ceph " "backend." msgid "" "RBD stats report has been fixed, now properly reports " "`allocated_capacity_gb` and `provisioned_capacity_gb` with the sum of the " "sizes of the volumes (not physical sizes) for volumes created by Cinder and " "all available in the pool respectively. Free capacity will now properly " "handle quota size restrictions of the pool." msgstr "" "RBD stats report has been fixed, now properly reports " "`allocated_capacity_gb` and `provisioned_capacity_gb` with the sum of the " "sizes of the volumes (not physical sizes) for volumes created by Cinder and " "all available in the pool respectively. Free capacity will now properly " "handle quota size restrictions of the pool." msgid "" "RBD/Ceph backends should adjust `max_over_subscription_ratio` to take into " "account that the driver is no longer reporting volume's physical usage but " "it's provisioned size." msgstr "" "RBD/Ceph backends should adjust `max_over_subscription_ratio` to take into " "account that the driver is no longer reporting volume's physical usage but " "it's provisioned size." msgid "" "REST API - fixed issue where the Get Current Log Levels for Cinder Services " "call in microversion 3.32 was ignoring the server name filter (Change-Id " "`Iecb3faad9270f969185089cc291127b340483a46 `_)" msgstr "" "REST API - fixed issue where the Get Current Log Levels for Cinder Services " "call in microversion 3.32 was ignoring the server name filter (Change-Id " "`Iecb3faad9270f969185089cc291127b340483a46 `_)" msgid "" "Rbd replication secondary device could set different user and keyring with " "primary cluster. Secondary secret_uuid value is configed in libvirt secret, " "and libvirtd using secondary secret reconnect to secondary cluster after " "Cinder failover host." msgstr "" "RBD replication secondary device could set different user and keyring with " "primary cluster. Secondary secret_uuid value is configured in libvirt " "secret, and libvirtd using secondary secret reconnect to secondary cluster " "after Cinder failover host." msgid "" "Re-added Infortrend Cinder volume driver. The Infortrend driver, removed in " "Cinder 12.0.0 (Queens), has been restored in this release." msgstr "" "Re-added Infortrend Cinder volume driver. The Infortrend driver, removed in " "Cinder 12.0.0 (Queens), has been restored in this release." msgid "Re-added QNAP Cinder volume driver." msgstr "Re-added QNAP Cinder volume driver." msgid "Reduxio" msgstr "Reduxio" msgid "" "Reimage a volume: ``POST /v3/volumes/{volume_id}/action`` with the ``os-" "reimage`` action. This call will result in a 202 (Accepted) response, but " "if the image's ``disk_format`` would require conversion to be written to the " "volume, the volume will go to ``error`` status." msgstr "" "Reimage a volume: ``POST /v3/volumes/{volume_id}/action`` with the ``os-" "reimage`` action. This call will result in a 202 (Accepted) response, but " "if the image's ``disk_format`` would require conversion to be written to the " "volume, the volume will go to ``error`` status." msgid "Remove mirror policy parameter from huawei driver." msgstr "Remove mirror policy parameter from Huawei driver." msgid "Removed - ``eqlx_chap_login``" msgstr "Removed - ``eqlx_chap_login``" msgid "Removed - ``eqlx_chap_password``" msgstr "Removed - ``eqlx_chap_password``" msgid "Removed - ``eqlx_cli_timeout``" msgstr "Removed - ``eqlx_cli_timeout``" msgid "Removed - ``eqlx_use_chap``" msgstr "Removed - ``eqlx_use_chap``" msgid "Removed datera_acl_allow_all option." msgstr "Removed datera_acl_allow_all option." msgid "Removed datera_num_replicas option." msgstr "Removed datera_num_replicas option." msgid "" "Removed deprecated LVMISCSIDriver and LVMISERDriver. These should be " "switched to use the LVMVolumeDriver with the desired iscsi_helper " "configuration set to the desired iSCSI helper." msgstr "" "Removed deprecated LVMISCSIDriver and LVMISERDriver. These should be " "switched to use the LVMVolumeDriver with the desired iscsi_helper " "configuration set to the desired iSCSI helper." msgid "" "Removed deprecated option ``kaminario_nodedup_substring`` in Kaminario FC " "and iSCSI Cinder drivers." msgstr "" "Removed deprecated option ``kaminario_nodedup_substring`` in Kaminario FC " "and iSCSI Cinder drivers." msgid "Removed deprecated option ``osapi_max_request_body_size``." msgstr "Removed deprecated option ``osapi_max_request_body_size``." msgid "Removed force_delete option from ScaleIO configuration." msgstr "Removed force_delete option from ScaleIO configuration." msgid "" "Removed restriction of hard coded iSCSI IP address to allow the use of " "multiple iSCSI portgroups." msgstr "" "Removed restriction of hard coded iSCSI IP address to allow the use of " "multiple iSCSI portgroups." msgid "" "Removed storwize_svc_connection_protocol config setting. Users will now need " "to set different values for volume_driver in cinder.conf. FC:volume_driver = " "cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver " "iSCSI:volume_driver = cinder.volume.drivers.ibm.storwize_svc." "storwize_svc_iscsi.StorwizeSVCISCSIDriver" msgstr "" "Removed storwize_svc_connection_protocol config setting. Users will now need " "to set different values for volume_driver in cinder.conf. FC:volume_driver = " "cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver " "iSCSI:volume_driver = cinder.volume.drivers.ibm.storwize_svc." "storwize_svc_iscsi.StorwizeSVCISCSIDriver" msgid "" "Removed the ability to create multiattach volumes by specifying " "`multiattach` parameter in the request body of a volume create operation. " "This functionality is unsafe, can lead to data loss, and has been deprecated " "since the Queens release. The recommended method for creating a multiattach " "volume is to use a volume type that supports multiattach. By default, " "volume types can only be created by the operator. Users who have a need for " "multiattach volumes should contact their operator if a suitable volume type " "is not available." msgstr "" "Removed the ability to create multiattach volumes by specifying " "`multiattach` parameter in the request body of a volume create operation. " "This functionality is unsafe, can lead to data loss, and has been deprecated " "since the Queens release. The recommended method for creating a multiattach " "volume is to use a volume type that supports multiattach. By default, " "volume types can only be created by the operator. Users who have a need for " "multiattach volumes should contact their operator if a suitable volume type " "is not available." msgid "" "Removed the ability to create volumes in a ScaleIO Storage Pool that has " "zero-padding disabled. A new configuration option " "``sio_allow_non_padded_volumes`` has been added to override this new " "behavior and allow unpadded volumes, but should not be enabled if multiple " "tenants will utilize volumes from a shared Storage Pool." msgstr "" "Removed the ability to create volumes in a ScaleIO Storage Pool that has " "zero-padding disabled. A new configuration option " "``sio_allow_non_padded_volumes`` has been added to override this new " "behaviour and allow unpadded volumes, but should not be enabled if multiple " "tenants will utilise volumes from a shared Storage Pool." msgid "" "Removed the ability to create volumes in a ScaleIO Storage Pool that has " "zero-padding disabled. A new configuration option had been added to override " "this new behavior and allow volume creation, but should not be enabled if " "multiple tenants will utilize volumes from a shared Storage Pool." msgstr "" "Removed the ability to create volumes in a ScaleIO Storage Pool that has " "zero-padding disabled. A new configuration option had been added to override " "this new behaviour and allow volume creation, but should not be enabled if " "multiple tenants will utilise volumes from a shared Storage Pool." msgid "Removed the deprecated NPIV options for the Storwize backend driver." msgstr "Removed the deprecated NPIV options for the Storwize backend driver." msgid "" "Removed the deprecated options for the Nova connection:> " "os_privileged_user{name, password, tenant, auth_url}, nova_catalog_info, " "nova_catalog_admin_info, nova_endpoint_template, " "nova_endpoint_admin_template, nova_ca_certificates_file, nova_api_insecure. " "From Pike, using the [nova] section is preferred to configure compute " "connection for Guest Assisted Snapshost or the InstanceLocalityFilter." msgstr "" "Removed the deprecated options for the Nova connection:> " "os_privileged_user{name, password, tenant, auth_url}, nova_catalog_info, " "nova_catalog_admin_info, nova_endpoint_template, " "nova_endpoint_admin_template, nova_ca_certificates_file, nova_api_insecure. " "From Pike, using the [nova] section is preferred to configure compute " "connection for Guest Assisted Snapshot or the InstanceLocalityFilter." msgid "" "Removed the need for deployers to run tox for config reference generation." msgstr "" "Removed the need for deployers to run tox for config reference generation." msgid "" "Removed the option ``allow_inuse_volume_type_modification`` which had been " "deprecated in Ocata release." msgstr "" "Removed the option ``allow_inuse_volume_type_modification`` which had been " "deprecated in Ocata release." msgid "" "Removing cinder-all binary. Instead use the individual binaries like cinder-" "api, cinder-backup, cinder-volume, cinder-scheduler." msgstr "" "Removing cinder-all binary. Instead use the individual binaries like cinder-" "api, cinder-backup, cinder-volume, cinder-scheduler." msgid "" "Removing deprecated file cinder.middleware.sizelimit. In your api-paste.ini, " "replace cinder.middleware.sizelimit:RequestBodySizeLimiter.factory with " "oslo_middleware.sizelimit:RequestBodySizeLimiter.factory" msgstr "" "Removing deprecated file cinder.middleware.sizelimit. In your api-paste.ini, " "replace cinder.middleware.sizelimit:RequestBodySizeLimiter.factory with " "oslo_middleware.sizelimit:RequestBodySizeLimiter.factory" msgid "" "Removing the Dell EqualLogic driver's deprecated configuration options. " "Please replace old options in your cinder.conf with the new one." msgstr "" "Removing the Dell EqualLogic driver's deprecated configuration options. " "Please replace old options in your cinder.conf with the new one." msgid "" "Rename Huawei18000ISCSIDriver and Huawei18000FCDriver to HuaweiISCSIDriver " "and HuaweiFCDriver." msgstr "" "Rename Huawei18000ISCSIDriver and Huawei18000FCDriver to HuaweiISCSIDriver " "and HuaweiFCDriver." msgid "Replaced with - ``chap_password``" msgstr "Replaced with - ``chap_password``" msgid "Replaced with - ``chap_username``" msgstr "Replaced with - ``chap_username``" msgid "Replaced with - ``ssh_conn_timeout``" msgstr "Replaced with - ``ssh_conn_timeout``" msgid "Replaced with - ``use_chap_auth``" msgstr "Replaced with - ``use_chap_auth``" msgid "Report pools in volume stats for Block Device Driver." msgstr "Report pools in volume stats for Block Device Driver." msgid "Request body::" msgstr "Request body::" msgid "" "Resolve issue with cross AZ migrations and retypes where the destination " "volume kept the source volume's AZ, so we ended up with a volume where the " "AZ does not match the backend. (bug 1747949)" msgstr "" "Resolve issue with cross AZ migrations and retypes where the destination " "volume kept the source volume's AZ, so we ended up with a volume where the " "AZ does not match the backend. (bug 1747949)" msgid "Retype support added to CloudByte iSCSI driver." msgstr "Retype support added to CloudByte iSCSI driver." msgid "Revert volume to snapshot" msgstr "Revert volume to snapshot" msgid "" "Revised the 'Extend Volume' process on the RAID Group to improve processing " "speed as follows:" msgstr "" "Revised the 'Extend Volume' process on the RAID Group to improve processing " "speed as follows:" msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "" "Rolling back a volume to a snapshot overwrites the current volume with the " "data from the snapshot, and the time it takes to complete this operation " "increases with the size of the volume." msgstr "" "Rolling back a volume to a snapshot overwrites the current volume with the " "data from the snapshot, and the time it takes to complete this operation " "increases with the size of the volume." msgid "" "SPDK target and volume drivers have been updated with new SPDK specific RPC " "calls due to deprecation of some old RPC calls. Starting from Ussuri release " "SPDK release v19.10 or higher is required." msgstr "" "SPDK target and volume drivers have been updated with new SPDK specific RPC " "calls due to deprecation of some old RPC calls. Starting from Ussuri release " "SPDK release v19.10 or higher is required." msgid "" "ScaleIO volumes need to be sized in increments of 8G. Handling added to " "volume extend operations to ensure the new size is rounded up to the nearest " "size when needed." msgstr "" "ScaleIO volumes need to be sized in increments of 8G. Handling added to " "volume extend operations to ensure the new size is rounded up to the nearest " "size when needed." msgid "Seagate driver: Added support for ``get_driver_options`` api call" msgstr "Seagate driver: Added support for ``get_driver_options`` API call" msgid "Security Issues" msgstr "Security Issues" msgid "" "See the `Fujitsu ETERNUS DX driver documentation `_ for details." msgstr "" "See the `Fujitsu ETERNUS DX driver documentation `_ for details." msgid "" "See the `os-brick 2.10.4 release notes `_ for more " "information." msgstr "" "See the `os-brick 2.10.4 release notes `_ for more " "information." msgid "" "See the `os-brick 2.10.5 release notes `_ for more " "information." msgstr "" "See the `os-brick 2.10.5 release notes `_ for more " "information." msgid "" "See the `os-brick 2.8.6 release notes `_ for more " "information." msgstr "" "See the `os-brick 2.8.6 release notes `_ for more " "information." msgid "" "See the `os-brick 2.8.7 release notes `_ for more " "information." msgstr "" "See the `os-brick 2.8.7 release notes `_ for more " "information." msgid "" "See the `os-brick 3.0.3 release notes `_ for more " "information." msgstr "" "See the `os-brick 3.0.3 release notes `_ for more " "information." msgid "" "See the help text for these options for more information. The default value " "of each option is 60, which has been the default value of " "``periodic_interval``." msgstr "" "See the help text for these options for more information. The default value " "of each option is 60, which has been the default value of " "``periodic_interval``." msgid "Separate create and update rules for volume metadata." msgstr "Separate create and update rules for volume metadata." msgid "Show CG Snapshot checks both tables." msgstr "Show CG Snapshot checks both tables." msgid "Show CG checks both tables." msgstr "Show CG checks both tables." msgid "" "Similarly, when creating an encrypted volume from a snapshot of an encrypted " "volume, if the amount of data in the original volume at the time the " "snapshot was created is very close to the gibibyte boundary given by the " "volume's size, it is possible for the data in the new volume to be silently " "truncated." msgstr "" "Similarly, when creating an encrypted volume from a snapshot of an encrypted " "volume, if the amount of data in the original volume at the time the " "snapshot was created is very close to the gibibyte boundary given by the " "volume's size, it is possible for the data in the new volume to be silently " "truncated." msgid "" "SmartCompression feature is disabled for the NexentaStor5 NFS driver. Thick " "provisioned volumes created as files containing zeros are not being " "compressed with standard compression if SmartCompression feature is enabled. " "This functionality will be fixed in a later release." msgstr "" "SmartCompression feature is disabled for the NexentaStor5 NFS driver. Thick " "provisioned volumes created as files containing zeros are not being " "compressed with standard compression if SmartCompression feature is enabled. " "This functionality will be fixed in a later release." msgid "" "So far, the behavior isn't anomalous; it's basically what you'd expect once " "you are aware that the encryption metadata must be stored in the volume and " "that it consumes some space." msgstr "" "So far, the behaviour isn't anomalous; it's basically what you'd expect once " "you are aware that the encryption metadata must be stored in the volume and " "that it consumes some space." msgid "SolidFire driver now supports IPv6 for management IP." msgstr "SolidFire driver now supports IPv6 for management IP." msgid "" "SolidFire driver: Driver no longer stores attach timestamp and instance as " "metadata on the storage array. Any metadata remaining in the array must be " "considered outdated and incorrect." msgstr "" "SolidFire driver: Driver no longer stores attach timestamp and instance as " "metadata on the storage array. Any metadata remaining in the array must be " "considered outdated and incorrect." msgid "" "SolidFire supports Synchronous, Asynchronous and SnapshotsOnly replication " "modes. This adds the config option `solidfire:replication_mode` to specify " "the mode to be used by Cinder. Its value can be `Sync`, `Async` or " "`SnapshotsOnly`." msgstr "" "SolidFire supports Synchronous, Asynchronous and SnapshotsOnly replication " "modes. This adds the config option `solidfire:replication_mode` to specify " "the mode to be used by Cinder. Its value can be `Sync`, `Async` or " "`SnapshotsOnly`." msgid "" "Solidfire fix extend volume with qos-Scaling to honor the increased size " "with increased iops on the extended volume." msgstr "" "Solidfire fix extend volume with qos-Scaling to honour the increased size " "with increased IOPS on the extended volume." msgid "" "Some current policies that were over-general (that is, they governed both " "read and write operations on a resource) are being replaced by a set of new " "policies that provide greater granularity. The following policies are " "DEPRECATED and will be removed in the Yoga release:" msgstr "" "Some current policies that were over-general (that is, they governed both " "read and write operations on a resource) are being replaced by a set of new " "policies that provide greater granularity. The following policies are " "DEPRECATED and will be removed in the Yoga release:" msgid "" "Some current rules defined in the policy file are being DEPRECATED and will " "be removed in the Yoga release. You only need to worry about this if you " "have used any of these rules yourself in when writing custom policies, as " "you cannot rely on the following rules being pre-defined in the Yoga release." msgstr "" "Some current rules defined in the policy file are being DEPRECATED and will " "be removed in the Yoga release. You only need to worry about this if you " "have used any of these rules yourself when writing custom policies, as you " "cannot rely on the following rules being pre-defined in the Yoga release." msgid "" "Some new backend storage drivers have been added, and many current drivers " "have added features and fixed bugs." msgstr "" "Some new backend storage drivers have been added, and many current drivers " "have added features and fixed bugs." msgid "" "Some of DISCO driver options were incorrectly read from ``[DEFAULT]`` " "section in the cinder.conf. Now those are correctly read from " "``[]`` section. This includes following options:" msgstr "" "Some of DISCO driver options were incorrectly read from ``[DEFAULT]`` " "section in the cinder.conf. Now those are correctly read from " "``[]`` section. This includes following options:" msgid "Some points to keep in mind:" msgstr "Some points to keep in mind:" msgid "" "Split nested quota support into a separate driver. In order to use nested " "quotas, change the following config ``quota_driver = cinder.quota." "NestedDbQuotaDriver`` after running the following admin API \"os-quota-sets/" "validate_setup_for_nested_quota_use\" command to ensure the existing quota " "values make sense to nest." msgstr "" "Split nested quota support into a separate driver. In order to use nested " "quotas, change the following config ``quota_driver = cinder.quota." "NestedDbQuotaDriver`` after running the following admin API \"os-quota-sets/" "validate_setup_for_nested_quota_use\" command to ensure the existing quota " "values make sense to nest." msgid "Start using reno to manage release notes." msgstr "Start using Reno to manage release notes." msgid "" "Starting from Mitaka release Cinder is having a tech preview of rolling " "upgrades support." msgstr "" "Starting from Mitaka release Cinder is having a tech preview of rolling " "upgrades support." msgid "" "Starting with API microversion 3.47, Cinder now supports the ability to " "create a volume directly from a backup. For instance, you can use the " "command: ``cinder create --backup-id `` in cinderclient." msgstr "" "Starting with API microversion 3.47, Cinder now supports the ability to " "create a volume directly from a backup. For instance, you can use the " "command: ``cinder create --backup-id `` in cinderclient." msgid "" "Starting with API microversion 3.64, an ``encryption_key_id`` attribute is " "included in the response body of volume and backup details when the " "associated volume is encrypted." msgstr "" "Starting with API microversion 3.64, an ``encryption_key_id`` attribute is " "included in the response body of volume and backup details when the " "associated volume is encrypted." msgid "" "Starting with API microversion 3.65, a ``consumes_quota`` field is included " "in the response body of volumes and snapshots to indicate whether the volume " "is using quota or not." msgstr "" "Starting with API microversion 3.65, a ``consumes_quota`` field is included " "in the response body of volumes and snapshots to indicate whether the volume " "is using quota or not." msgid "" "Starting with API microversion 3.70, encrypted volumes can be transferred to " "a user in a different project. Prior to microversion 3.70, the transfer is " "blocked due to the inability to transfer ownership of the volume's " "encryption key. With microverson 3.70, ownership of the encryption key is " "transferred when the volume is transferred." msgstr "" "Starting with API microversion 3.70, encrypted volumes can be transferred to " "a user in a different project. Prior to microversion 3.70, the transfer is " "blocked due to the inability to transfer ownership of the volume's " "encryption key. With microverson 3.70, ownership of the encryption key is " "transferred when the volume is transferred." msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "StorPool driver `bug #1939241 `_: Fixed the creation of encrypted StorPool volumes by " "dropping the needlessly and incompletely overridden `_attach_volume()` and " "`_detach_volume()` methods." msgstr "" "StorPool driver `bug #1939241 `_: Fixed the creation of encrypted StorPool volumes by " "dropping the needlessly and incompletely overridden `_attach_volume()` and " "`_detach_volume()` methods." msgid "" "StorPool driver `bug #2002995 `_: When retyping a volume on a StorPool backend to a different " "volume type also on that StorPool backend but using a different StorPool " "template, occasionally the retype operation would fail or the old volume " "could be left attached to a StorPool client. This issue has been fixed in " "this release." msgstr "" "StorPool driver `bug #2002995 `_: When retyping a volume on a StorPool backend to a different " "volume type also on that StorPool backend but using a different StorPool " "template, occasionally the retype operation would fail or the old volume " "could be left attached to a StorPool client. This issue has been fixed in " "this release." msgid "" "StorPool driver: implemented revert to snapshot, which happens immediately i." "e. without deleting and recreating the volume." msgstr "" "StorPool driver: implemented revert to snapshot, which happens immediately i." "e. without deleting and recreating the volume." msgid "" "StorPool driver: improved the way volumes are cloned into different StorPool " "templates (exposed as Cinder storage pools) if requested, eliminating some " "data duplication in the underlying StorPool cluster." msgstr "" "StorPool driver: improved the way volumes are cloned into different StorPool " "templates (exposed as Cinder storage pools) if requested, eliminating some " "data duplication in the underlying StorPool cluster." msgid "" "Storage assisted volume migration from one Pool/SLO/Workload combination to " "another, on the same array, via retype, for the VMAX driver. Both All Flash " "and Hybrid VMAX3 arrays are supported. VMAX2 is not supported." msgstr "" "Storage assisted volume migration from one Pool/SLO/Workload combination to " "another, on the same array, via retype, for the VMAX driver. Both All Flash " "and Hybrid VMAX3 arrays are supported. VMAX2 is not supported." msgid "" "Storwize SVC Driver: Fixes `bug 1749687 `__ previously lsvdisk() was called separately for every 'in-" "use' volume in order to check if the volume exists on the storage. In order " "to avoid problem of too long driver initialization now lsvdisk() is called " "once per pool." msgstr "" "Storwize SVC Driver: Fixes `bug 1749687 `__ previously lsvdisk() was called separately for every 'in-" "use' volume in order to check if the volume exists on the storage. In order " "to avoid problem of too long driver initialisation now lsvdisk() is called " "once per pool." msgid "Support Force backup of in-use cinder volumes for Nimble Storage." msgstr "Support Force backup of in-use cinder volumes for Nimble Storage." msgid "" "Support backup restore cancelation by changing the backup status to anything " "other than `restoring` using `cinder backup-reset-state`." msgstr "" "Support backup restore cancellation by changing the backup status to " "anything other than `restoring` using `cinder backup-reset-state`." msgid "Support balanced FC port selection for Huawei drivers." msgstr "Support balanced FC port selection for Huawei drivers." msgid "" "Support cinder_img_volume_type property in glance image metadata to specify " "volume type." msgstr "" "Support cinder_img_volume_type property in glance image metadata to specify " "volume type." msgid "Support for Consistency Groups in the NetApp E-Series Volume Driver." msgstr "Support for Consistency Groups in the NetApp E-Series Volume Driver." msgid "Support for Dot Hill AssuredSAN arrays has been removed." msgstr "Support for Dot Hill AssuredSAN arrays has been removed." msgid "Support for MySQL 5.5 has been dropped." msgstr "Support for MySQL 5.5 has been dropped." msgid "" "Support for NetApp E-Series has been removed. The NetApp Unified driver can " "now only be used with NetApp Clustered Data ONTAP." msgstr "" "Support for NetApp E-Series has been removed. The NetApp Unified driver can " "now only be used with NetApp Clustered Data ONTAP." msgid "" "Support for NetApp ONTAP 7 (previously known as \"Data ONTAP operating in " "7mode\") has been removed. The NetApp Unified driver can now only be used " "with NetApp Clustered Data ONTAP and NetApp E-Series storage systems. This " "removal affects all three storage protocols that were supported on for ONTAP " "7 - iSCSI, NFS and FC. Deployers are advised to consult the `migration " "support `_ provided " "to transition from ONTAP 7 to Clustered Data ONTAP operating system." msgstr "" "Support for NetApp ONTAP 7 (previously known as \"Data ONTAP operating in " "7mode\") has been removed. The NetApp Unified driver can now only be used " "with NetApp Clustered Data ONTAP and NetApp E-Series storage systems. This " "removal affects all three storage protocols that were supported on for ONTAP " "7 - iSCSI, NFS and FC. Deployers are advised to consult the `migration " "support `_ provided " "to transition from ONTAP 7 to Clustered Data ONTAP operating system." msgid "" "Support for ScaleIO 1.32 is now deprecated and will be removed in a future " "release." msgstr "" "Support for ScaleIO 1.32 is now deprecated and will be removed in a future " "release." msgid "Support for VMAX SRDF/Metro on VMAX cinder driver." msgstr "Support for VMAX SRDF/Metro on VMAX cinder driver." msgid "Support for compression on VMAX All Flash in the VMAX driver." msgstr "Support for compression on VMAX All Flash in the VMAX driver." msgid "" "Support for configuring Fibre Channel zoning on Brocade switches through " "Cinder Fibre Channel Zone Manager and Brocade Fibre Channel zone plugin. To " "zone in a Virtual Fabric, set the configuration option " "'fc_virtual_fabric_id' for the fabric." msgstr "" "Support for configuring Fibre Channel zoning on Brocade switches through " "Cinder Fibre Channel Zone Manager and Brocade Fibre Channel zone plugin. To " "zone in a Virtual Fabric, set the configuration option " "'fc_virtual_fabric_id' for the fabric." msgid "" "Support for creating a consistency group from consistency group in XtremIO." msgstr "" "Support for creating a consistency group from consistency group in XtremIO." msgid "Support for force backup of in-use Cinder volumes in Nimble driver." msgstr "Support for force backup of in-use Cinder volumes in Nimble driver." msgid "Support for iSCSI in INFINIDAT InfiniBox driver." msgstr "Support for iSCSI in INFINIDAT InfiniBox driver." msgid "Support for iSCSI multipath in Huawei driver." msgstr "Support for iSCSI multipath in Huawei driver." msgid "Support for iSCSI multipathing in EMC VMAX driver." msgstr "Support for iSCSI multipathing in EMC VMAX driver." msgid "Support for manage/ unmanage snapshots on VMAX cinder driver." msgstr "Support for manage/unmanage snapshots on VMAX cinder driver." msgid "" "Support for multiattach has been enabled for HPE MSA Storage since the " "14.0.0 release, but no release note was included to announce this change." msgstr "" "Support for multiattach has been enabled for HPE MSA Storage since the " "14.0.0 release, but no release note was included to announce this change." msgid "Support for multiattach is enabled for HPE MSA Storage" msgstr "Support for multiattach is enabled for HPE MSA Storage" msgid "" "Support for retype (storage-assisted migration) of replicated volumes on " "VMAX cinder driver." msgstr "" "Support for retype (storage-assisted migration) of replicated volumes on " "VMAX Cinder driver." msgid "Support for retype and volume migration for HPE Nimble Storage driver." msgstr "Support for retype and volume migration for HPE Nimble Storage driver." msgid "" "Support for retype volumes with different encryptions including changes from " "unencrypted types to encrypted types and vice-versa." msgstr "" "Support for retype volumes with different encryptions including changes from " "unencrypted types to encrypted types and vice-versa." msgid "" "Support for reverting a volume to a previous snapshot in VMAX cinder driver." msgstr "" "Support for reverting a volume to a previous snapshot in VMAX cinder driver." msgid "" "Support for running Cinder in Windows operating systems has been deprecated " "because of retirement of the Winstackers project." msgstr "" "Support for running Cinder in Windows operating systems has been deprecated " "because of the retirement of the Winstackers project." msgid "Support for snapshot backup using the optimal path in Huawei driver." msgstr "Support for snapshot backup using the optimal path in Huawei driver." msgid "" "Support for snapshots named in the backend as ``snapshot-`` is " "deprecated. Snapshots are now named in the backend as ``." "``." msgstr "" "Support for snapshots named in the backend as ``snapshot-`` is " "deprecated. Snapshots are now named in the backend as ``." "``." msgid "" "Support for the ``cinder.database.migration_backend`` entrypoint, which " "provided for configurable database migration backends, has been removed. " "This was never exercised and was a source of unnecessary complexity." msgstr "" "Support for the ``cinder.database.migration_backend`` entrypoint, which is " "provided for configurable database migration backends, has been removed. " "This was never exercised and was a source of unnecessary complexity." msgid "" "Support for use of 'fc_southbound_protocol' configuration setting in the " "Brocade FC SAN lookup service." msgstr "" "Support for use of 'fc_southbound_protocol' configuration setting in the " "Brocade FC SAN lookup service." msgid "Support for volume multi-attach in the INFINIDAT InfiniBox driver." msgstr "Support for volume multi-attach in the INFINIDAT InfiniBox driver." msgid "Support iSCSI configuration in replication in Huawei driver." msgstr "Support iSCSI configuration in replication in Huawei driver." msgid "" "Support manage/unmanage volume and manage/unmanage snapshot functions for " "the NEC volume driver." msgstr "" "Support managed/unmanaged volume and managed/unmanaged snapshot functions " "for the NEC volume driver." msgid "Support to sort snapshots with \"name\"." msgstr "Support to sort snapshots with \"name\"." msgid "" "Support transfer volume with snapshots by default in new V3 API 'v3/" "volume_transfers'. After microverison 3.55, if users don't want to transfer " "snapshots, they could use the new optional argument `no_snapshots=True` in " "request body of new transfer creation API." msgstr "" "Support transfer volume with snapshots by default in new V3 API 'v3/" "volume_transfers'. After microverison 3.55, if users don't want to transfer " "snapshots, they could use the new optional argument `no_snapshots=True` in " "request body of new transfer creation API." msgid "Supported ``project_id`` admin filters to limits API." msgstr "Supported ``project_id`` admin filters to limits API." msgid "Supported multi-pools for Hitachi driver and OEM storage drivers." msgstr "Supported multi-pools for Hitachi driver and OEM storage drivers." msgid "" "Swift backup driver: Added new configuration option " "``backup_swift_create_storage_policy`` for the Swift backup driver. If " "specified it will be used as the storage policy when creating the Swift " "Container, default value is None meaning it will not be used and Swift will " "use the system default. Please note that this only applies if a container " "doesn't exist as we cannot update the storage policy on an already existing " "container." msgstr "" "Swift backup driver: Added new configuration option " "``backup_swift_create_storage_policy`` for the Swift backup driver. If " "specified it will be used as the storage policy when creating the Swift " "Container, default value is None meaning it will not be used and Swift will " "use the system default. Please note that this only applies if a container " "doesn't exist as we cannot update the storage policy on an already existing " "container." msgid "" "TSM backup driver is removed. Please, migrate your backups before upgrade." msgstr "" "TSM backup driver is removed. Please, migrate your backups before upgrade." msgid "Tegile" msgstr "Tegile" msgid "The \"backing-up\" status is added to snapshot's status matrix." msgstr "The \"backing-up\" status is added to snapshot's status matrix." msgid "" "The \"cinder-manage logs\" commands have been removed. Information " "previously gathered by these commands may be found in cinder service and " "syslog logs." msgstr "" "The \"cinder-manage logs\" commands have been removed. Information " "previously gathered by these commands may be found in cinder service and " "syslog logs." msgid "The \"cinder-manage shell\" set of commands has been removed." msgstr "The \"cinder-manage shell\" set of commands has been removed." msgid "" "The 'backup_service_inithost_offload' configuration option now defaults to " "'True' instead of 'False'." msgstr "" "The 'backup_service_inithost_offload' configuration option now defaults to " "'True' instead of 'False'." msgid "" "The 'smbfs_allocation_info_file_path' SMBFS driver config option is now " "deprecated as we're no longer using a JSON file to store volume allocation " "data. This file had a considerable chance of getting corrupted." msgstr "" "The 'smbfs_allocation_info_file_path' SMBFS driver config option is now " "deprecated as we're no longer using a JSON file to store volume allocation " "data. This file had a considerable chance of getting corrupted." msgid "" "The 7-Mode Data ONTAP configuration of the NetApp Unified driver is " "deprecated as of the Ocata release and will be removed in the Queens " "release. Other configurations of the NetApp Unified driver, including " "Clustered Data ONTAP and E-series, are unaffected." msgstr "" "The 7-Mode Data ONTAP configuration of the NetApp Unified driver is " "deprecated as of the Ocata release and will be removed in the Queens " "release. Other configurations of the NetApp Unified driver, including " "Clustered Data ONTAP and E-series, are unaffected." msgid "" "The Block Storage API v2, which was deprecated in the Pike release, has been " "removed. If upgrading from a previous OpenStack release, it is recommended " "that you edit your ``/etc/cinder/api-paste.ini`` file to remove all " "references to v2. Additionally, the deprecated configuration option " "``enable_v2_api`` has been removed. If present in a configuration file, it " "will be silently ignored." msgstr "" "The Block Storage API v2, which was deprecated in the Pike release, has been " "removed. If upgrading from a previous OpenStack release, it is recommended " "that you edit your ``/etc/cinder/api-paste.ini`` file to remove all " "references to v2. Additionally, the deprecated configuration option " "``enable_v2_api`` has been removed. If present in a configuration file, it " "will be silently ignored." msgid "" "The Block Storage API v2, which was deprecated way back in the Pike release, " "has been removed. We gently remind you that Pike was a long time ago, and " "that version 3.0 of the Block Storage API was designed to be completely " "compatible with version 2." msgstr "" "The Block Storage API v2, which was deprecated way back in the Pike release, " "has been removed. We gently remind you that Pike was a long time ago and " "that version 3.0 of the Block Storage API was designed to be completely " "compatible with version 2." msgid "" "The Blockbridge driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it." msgstr "" "The Blockbridge driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it." msgid "" "The Blockbridge driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it. If its support status does not change it will be " "removed in the next release." msgstr "" "The Blockbridge driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it. If its support status does not change it will be " "removed in the next release." msgid "" "The Brocade Fibre Channel Zone Manager driver has been marked as unsupported " "and is now deprecated. It is subject to removal during the \"V\" development " "cycle, following the standard OpenStack deprecation policy. The config " "option ``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it in this release." msgstr "" "The Brocade Fibre Channel Zone Manager driver has been marked as unsupported " "and is now deprecated. It is subject to removal during the \"V\" development " "cycle, following the standard OpenStack deprecation policy. The config " "option ``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it in this release." msgid "" "The Brocade Fibre Channel Zone Manager driver was marked 'unsupported' in " "the Ussuri release because the vendor declined to support Python 3, whereas " "all OpenStack releases beginning with Ussuri support Python 3 only." msgstr "" "The Brocade Fibre Channel Zone Manager driver was marked 'unsupported' in " "the Ussuri release because the vendor declined to support Python 3, whereas " "all OpenStack releases beginning with Ussuri support Python 3 only." msgid "" "The Brocade Fibre Channel Zone Manager driver was marked 'unsupported' in " "the Ussuri release, and it continues as 'unsupported' in this release. If " "you choose to use the driver, the configuration option " "``enable_unsupported_driver`` must be set to ``True`` in the ``fc-zone-" "manager`` section in cinder.conf to allow its use." msgstr "" "The Brocade Fibre Channel Zone Manager driver was marked 'unsupported' in " "the Ussuri release, and it continues as 'unsupported' in this release. If " "you choose to use the driver, the configuration option " "``enable_unsupported_driver`` must be set to ``True`` in the ``fc-zone-" "manager`` section in cinder.conf to allow its use." msgid "" "The Castellan library used for encryption has deprecated the ``api_class`` " "config option. Configuration files using this should now be updated to use " "the ``backend`` option instead." msgstr "" "The Castellan library used for encryption has deprecated the ``api_class`` " "config option. Configuration files using this should now be updated to use " "the ``backend`` option instead." msgid "" "The Cinder API v1 was deprecated in the Juno release and defaulted to be " "disabled in the Ocata release. It is now removed completely. If upgrading " "from a previous version, it is recommended you edit your `/etc/cinder/api-" "paste.ini` file to remove all references to v1." msgstr "" "The Cinder API v1 was deprecated in the Juno release and defaulted to be " "disabled in the Ocata release. It is now removed completely. If upgrading " "from a previous version, it is recommended you edit your `/etc/cinder/api-" "paste.ini` file to remove all references to v1." msgid "" "The Cinder Linux SMBFS driver is now deprecated and will be removed during " "the following release. Deployers are encouraged to use the Windows SMBFS " "driver instead." msgstr "" "The Cinder Linux SMBFS driver is now deprecated and will be removed during " "the following release. Deployers are encouraged to use the Windows SMBFS " "driver instead." msgid "" "The Cinder Volume Backup service can now be run on Windows. It supports " "backing up volumes exposed by SMBFS/iSCSI Windows Cinder Volume backends, as " "well as any other Cinder backend that's accessible on Windows (e.g. SANs " "exposing volumes via iSCSI/FC)." msgstr "" "The Cinder Volume Backup service can now be run on Windows. It supports " "backing up volumes exposed by SMBFS/iSCSI Windows Cinder Volume backends, as " "well as any other Cinder backend that's accessible on Windows (e.g. SANs " "exposing volumes via iSCSI/FC)." msgid "" "The Cinder database can now only be ugpraded from changes since the Newton " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Newton first, then to Queens or later." msgstr "" "The Cinder database can now only be upgraded from changes since the Newton " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Newton first, then to Queens or later." msgid "" "The Cinder database can now only be upgraded from changes since the Kilo " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Kilo first, then to Newton or later." msgstr "" "The Cinder database can now only be upgraded from changes since the Kilo " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Kilo first, then to Newton or later." msgid "" "The Cinder database can now only be upgraded from changes since the Liberty " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Liberty first, then to Ocata or later." msgstr "" "The Cinder database can now only be upgraded from changes since the Liberty " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Liberty first, then to Ocata or later." msgid "" "The Cinder database can now only be upgraded from changes since the Mitaka " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Mitaka first, then to Pike or later." msgstr "" "The Cinder database can now only be upgraded from changes since the Mitaka " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Mitaka first, then to Pike or later." msgid "" "The Cinder database can now only be upgraded with changes since the Ocata " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Ocata first." msgstr "" "The Cinder database can now only be upgraded with changes since the Ocata " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Ocata first." msgid "" "The Cinder database can now only be upgraded with changes since the Queens " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Queens first." msgstr "" "The Cinder database can now only be upgraded with changes since the Queens " "release. In order to upgrade from a version prior to that, you must now " "upgrade to at least Queens first." msgid "" "The Cinder project team would like to point out some issues addressed by " "this release:" msgstr "" "The Cinder project team would like to point out some issues addressed by " "this release:" msgid "" "The Cinder project wishes to clarify its policy concerning what versions of " "Ceph are supported by Cinder." msgstr "" "The Cinder project wishes to clarify its policy concerning what versions of " "Ceph are supported by Cinder." msgid "" "The Cinder v2 API has now been marked as deprecated. All new client code " "should use the v3 API. API v3 adds support for microversioned API calls. If " "no microversion is requested, the base 3.0 version for the v3 API is " "identical to v2." msgstr "" "The Cinder v2 API has now been marked as deprecated. All new client code " "should use the v3 API. API v3 adds support for microversioned API calls. If " "no microversion is requested, the base 3.0 version for the v3 API is " "identical to v2." msgid "" "The Cisco Fibre Channel Zone Manager driver has been marked as unsupported " "and is now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use it." msgstr "" "The Cisco Fibre Channel Zone Manager driver has been marked as unsupported " "and is now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use it." msgid "" "The Cisco Firbre Channel Zone Manager driver has been marked as unsupported " "and is now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use it. If " "its support status does not change, they will be removed in the Queens " "development cycle." msgstr "" "The Cisco Fibre Channel Zone Manager driver has been marked as unsupported " "and is now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use it. If " "its support status does not change, they will be removed in the Queens " "development cycle." msgid "" "The CloudByte driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it." msgstr "" "The CloudByte driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it." msgid "" "The CloudByte driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it. If its support status does not change it will be " "removed in the next release." msgstr "" "The CloudByte driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it. If its support status does not change it will be " "removed in the next release." msgid "" "The Coho driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The Coho driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The Coho driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, they will be removed in the Queens development cycle." msgstr "" "The Coho driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, they will be removed in the Queens development cycle." msgid "" "The Consistency Group APIs have now been marked as deprecated and will be " "removed in a future release. Generic Volume Group APIs should be used " "instead." msgstr "" "The Consistency Group APIs have now been marked as deprecated and will be " "removed in a future release. Generic Volume Group APIs should be used " "instead." msgid "" "The DRBDManage driver is deprecated as of the Stein release and is removed " "in the Train release. Users should use the new LINSTOR driver instead." msgstr "" "The DRBDManage driver is deprecated as of the Stein release and is removed " "in the Train release. Users should use the new LINSTOR driver instead." msgid "" "The DRBDManage driver is now removed. Customers using the DRBDManage driver " "should not upgrade Cinder without first migrating all volumes from their " "DRBDManage backend to a supported storage backend such as LINSTOR. Failure " "to migrate volumes will result in not being able to access volumes backed by " "the DRBDManage storage backend." msgstr "" "The DRBDManage driver is now removed. Customers using the DRBDManage driver " "should not upgrade Cinder without first migrating all volumes from their " "DRBDManage backend to a supported storage backend such as LINSTOR. Failure " "to migrate volumes will result in not being able to access volumes backed by " "the DRBDManage storage backend." msgid "" "The DataCore drivers have been marked as unsupported and are now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The DataCore drivers have been marked as unsupported and are now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The DataCore drivers have been marked as unsupported and are now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, they will be removed in the Stein development cycle." msgstr "" "The DataCore drivers have been marked as unsupported and are now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, they will be removed in the Stein development cycle." msgid "" "The DataCore drivers were marked as unsupported in the Rocky release and " "have now been removed." msgstr "" "The DataCore drivers were marked as unsupported in the Rocky release and " "have now been removed." msgid "" "The Datera driver now supports API v2.2, IOPS/GB and BW/GB settings, LDAP " "and CHAP, extended metadata attributes during volume creation and " "attachment. Most retype operations do not detach volumes anymore. Manageable " "Snapshots can be listed. Flash and Hybrid capacity information added." msgstr "" "The Datera driver now supports API v2.2, IOPS/GB and BW/GB settings, LDAP " "and CHAP, extended metadata attributes during volume creation and " "attachment. Most retype operations do not detach volumes any more. " "Manageable Snapshots can be listed. Flash and Hybrid capacity information " "added." msgid "" "The Datera driver removed v2 API support and the usage of initiator-groups" msgstr "" "The Datera driver removed v2 API support and the usage of initiator-groups" msgid "" "The Datera driver went under a major driver revamp/restructure and a new " "separate Datera Python-SDK requirement has been introduced" msgstr "" "The Datera driver went under a major driver revamp/restructure and a new " "separate Datera Python-SDK requirement has been introduced" msgid "" "The Dell EMC CoprHD drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it." msgstr "" "The Dell EMC CoprHD drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it." msgid "" "The Dell EMC CoprHD drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it. If its support " "status does not change, they will be removed in the Stein development cycle." msgstr "" "The Dell EMC CoprHD drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it. If its support " "status does not change, they will be removed in the Stein development cycle." msgid "" "The Dell EMC PS Series volume driver which supports Dell PS Series " "(EqualLogic) Storage is moving to maintenance mode in S Release and will be " "removed in T Release." msgstr "" "The Dell EMC PS Series volume driver which supports Dell PS Series " "(EqualLogic) Storage is moving to maintenance mode in S Release and will be " "removed in T Release." msgid "" "The Dell EMC SC configuration option ``excluded_domain_ip`` has been " "deprecated and will be removed in a future release. Deployments should now " "migrate to the option ``excluded_domain_ips`` for equivalent functionality." msgstr "" "The Dell EMC SC configuration option ``excluded_domain_ip`` has been " "deprecated and will be removed in a future release. Deployments should now " "migrate to the option ``excluded_domain_ips`` for equivalent functionality." msgid "" "The Disco driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The Disco driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The Disco driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the Stein development cycle." msgstr "" "The Disco driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the Stein development cycle." msgid "" "The DotHill drivers has been marked as unsupported and are now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it. If its support status does not change it will be " "removed in the next release." msgstr "" "The DotHill drivers has been marked as unsupported and are now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it. If its support status does not change it will be " "removed in the next release." msgid "" "The DotHill drivers have been marked as unsupported and are now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it." msgstr "" "The DotHill drivers have been marked as unsupported and are now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it." msgid "" "The EqualLogic driver is moved to the dell_emc directory and has been " "rebranded to its current Dell EMC PS Series name. The volume_driver entry in " "cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.ps." "PSSeriesISCSIDriver``." msgstr "" "The EqualLogic driver is moved to the dell_emc directory and has been " "rebranded to its current Dell EMC PS Series name. The volume_driver entry in " "cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.ps." "PSSeriesISCSIDriver``." msgid "" "The Falconstor drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it." msgstr "" "The Falconstor drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it." msgid "" "The Falconstor drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it. If its support " "status does not change, they will be removed in the Queens development cycle." msgstr "" "The Falconstor drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it. If its support " "status does not change, they will be removed in the Queens development cycle." msgid "" "The FlexGroup pool is only supported using ONTAP storage 9.8 or greater." msgstr "" "The FlexGroup pool is only supported using ONTAP storage 9.8 or greater." msgid "" "The Fujitsu DX driver names have been updated to distinguish them from other " "Fujitsu storage. The module path ``cinder.volume.drivers.fujitsu`` should " "now be updated to ``cinder.volume.drivers.fujitsu.eternus_dx`` in ``cinder." "conf``. Support for the previous driver naming will continue to work, but " "will be removed in a future release." msgstr "" "The Fujitsu DX driver names have been updated to distinguish them from other " "Fujitsu storage. The module path ``cinder.volume.drivers.fujitsu`` should " "now be updated to ``cinder.volume.drivers.fujitsu.eternus_dx`` in ``cinder." "conf``. Support for the previous driver naming will continue to work, but " "will be removed in a future release." msgid "" "The FusionStorage driver has deprecated the configuration options " "\"dsware_isthin\", \"dsware_manager\", \"fusionstorageagent\", " "\"clone_volume_timeout\", \"pool_type\", and \"pool_id_filter\". These " "configuration options will be removed in the Train release(14.0.0)." msgstr "" "The FusionStorage driver has deprecated the configuration options " "\"dsware_isthin\", \"dsware_manager\", \"fusionstorageagent\", " "\"clone_volume_timeout\", \"pool_type\", and \"pool_id_filter\". These " "configuration options will be removed in the Train release(14.0.0)." msgid "" "The Glance v1 API has been deprecated and will soon be removed. Cinder " "support for using the v1 API was deprecated in the Pike release and is now " "no longer available. The ``glance_api_version`` configuration option to " "support version selection has now been removed." msgstr "" "The Glance v1 API has been deprecated and will soon be removed. Cinder " "support for using the v1 API was deprecated in the Pike release and is now " "no longer available. The ``glance_api_version`` configuration option to " "support version selection has now been removed." msgid "" "The GlusterFS backup driver has been deprecated. It will be removed in the " "2025.1 release." msgstr "" "The GlusterFS backup driver has been deprecated. It will be removed in the " "2025.1 release." msgid "" "The GlusterFS volume driver, which was deprecated in the Newton release, has " "been removed." msgstr "" "The GlusterFS volume driver, which was deprecated in the Newton release, has " "been removed." msgid "" "The HBSD (Hitachi Block Storage Driver) volume drivers which supports " "Hitachi Storages HUS100 and VSP family are deprecated. Support for HUS110 " "family will be no longer provided. Support on VSP will be provided as " "hitachi.vsp_* drivers." msgstr "" "The HBSD (Hitachi Block Storage Driver) volume drivers which supports " "Hitachi Storages HUS100 and VSP family are deprecated. Support for HUS110 " "family will be no longer provided. Support on VSP will be provided as " "hitachi.vsp_* drivers." msgid "" "The HGST Flash Storage Suite Driver was marked unsupported in the Rocky " "release because their 3rd Party CI system was not meeting Cinder's " "requirements. The system has not started reporting so the driver is now " "removed as of the Stein release." msgstr "" "The HGST Flash Storage Suite Driver was marked unsupported in the Rocky " "release because their 3rd Party CI system was not meeting Cinder's " "requirements. The system has not started reporting so the driver is now " "removed as of the Stein release." msgid "" "The HGST Flash Suite storage driver has been removed after completion of its " "deprecation period without a reliable 3rd Party CI system being supported. " "Customers using the HGST Flash Suite driver should not upgrade Cinder " "without first migrating all volumes from their HGST backend to a supported " "storage backend. Failure to migrate volumes will result in no longer being " "able to access volumes backed by the HGST storage backend." msgstr "" "The HGST Flash Suite storage driver has been removed after completion of its " "deprecation period without a reliable 3rd Party CI system being supported. " "Customers using the HGST Flash Suite driver should not upgrade Cinder " "without first migrating all volumes from their HGST backend to a supported " "storage backend. Failure to migrate volumes will result in no longer being " "able to access volumes backed by the HGST storage backend." msgid "" "The HGST driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The HGST driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The HGST driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the Stein development cycle." msgstr "" "The HGST driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the Stein development cycle." msgid "" "The HPE LeftHand driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use them." msgstr "" "The HPE LeftHand driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use them." msgid "" "The HPE LeftHand driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use them. The driver will be removed in the " "Ussuri release." msgstr "" "The HPE LeftHand driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use them. The driver will be removed in the " "Ussuri release." msgid "" "The HPE Lefthand Driver (iSCSI) was marked unsupported in the Train release " "as the StoreVirtual product line has gone EOL and the LeftHand OS no longer " "receives upgrades. The driver has been removed in this release. All data " "on backends powered by HPE LeftHand OS should be migrated to a supported " "storage backend before upgrading your Cinder installation." msgstr "" "The HPE Lefthand Driver (iSCSI) was marked unsupported in the Train release " "as the StoreVirtual product line has gone EOL and the LeftHand OS no longer " "receives upgrades. The driver has been removed in this release. All data " "on backends powered by HPE LeftHand OS should be migrated to a supported " "storage backend before upgrading your Cinder installation." msgid "" "The HPE MSA driver options ``hpmsa_backend_name`` and ``hpmsa_backend_type`` " "options were deprecated in favor of ``hpmsa_pool_name`` and " "``hpmsa_pool_type`` to avoid confusion, and the ``hpmsa_api_protocol``, " "``hpmsa_verify_certificate``, and ``hpmsa_verify_certificate_path`` options " "were deprecated in favor of the standard ``driver_use_ssl``, " "``driver_ssl_cert_verify``, and ``driver_ssl_cert_path`` options. To retain " "the default behavior, add ``driver_use_ssl = true`` to back-end entries in " "``cinder.conf`` before the deprecated options are removed in a future " "release." msgstr "" "The HPE MSA driver options ``hpmsa_backend_name`` and ``hpmsa_backend_type`` " "options were deprecated in favor of ``hpmsa_pool_name`` and " "``hpmsa_pool_type`` to avoid confusion, and the ``hpmsa_api_protocol``, " "``hpmsa_verify_certificate``, and ``hpmsa_verify_certificate_path`` options " "were deprecated in favor of the standard ``driver_use_ssl``, " "``driver_ssl_cert_verify``, and ``driver_ssl_cert_path`` options. To retain " "the default behavior, add ``driver_use_ssl = true`` to back-end entries in " "``cinder.conf`` before the deprecated options are removed in a future " "release." msgid "" "The HPE XP driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it." msgstr "" "The HPE XP driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it." msgid "" "The HPE XP driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it. If its support status does not change it will be " "removed in the next release." msgstr "" "The HPE XP driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it. If its support status does not change it will be " "removed in the next release." msgid "" "The Hitachi Block Storage Driver (HBSD) and VSP driver have been marked as " "unsupported and are now deprecated. enable_unsupported_driver will need to " "be set to True in cinder.conf to continue to use them." msgstr "" "The Hitachi Block Storage Driver (HBSD) and VSP driver have been marked as " "unsupported and are now deprecated. enable_unsupported_driver will need to " "be set to True in cinder.conf to continue to use them." msgid "" "The Hitachi HNAS, HBSD, and VSP volume drivers were marked as deprecated in " "the Pike release and have now been removed. Hitachi storage drivers are now " "only available directly from Hitachi." msgstr "" "The Hitachi HNAS, HBSD, and VSP volume drivers were marked as deprecated in " "the Pike release and have now been removed. Hitachi storage drivers are now " "only available directly from Hitachi." msgid "" "The Hitachi NAS NFS driver has been marked as unsupported and is now " "deprecated. enable_unsupported_driver will need to be set to True in cinder." "conf to continue to use it." msgstr "" "The Hitachi NAS NFS driver has been marked as unsupported and is now " "deprecated. enable_unsupported_driver will need to be set to True in cinder." "conf to continue to use it." msgid "" "The Hitachi NAS Platform iSCSI driver was marked as not supported in the " "Ocata realease and has now been removed." msgstr "" "The Hitachi NAS Platform iSCSI driver was marked as not supported in the " "Ocata release and has now been removed." msgid "" "The Hitachi NAS iSCSI driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use it." msgstr "" "The Hitachi NAS iSCSI driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use it." msgid "" "The Hitachi NAS iSCSI driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use it. The driver will be removed in the next " "release." msgstr "" "The Hitachi NAS iSCSI driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use it. The driver will be removed in the next " "release." msgid "" "The Huawei FusionStorage Cinder driver (dsware) now supports OceanStor 100D " "Storage." msgstr "" "The Huawei FusionStorage Cinder driver (dsware) now supports OceanStor 100D " "Storage." msgid "" "The Huawei FusionStorage driver had previously been marked unsupported. The " "3rd Party CI has been restored, and the driver is now fully supported in the " "Ussuri release." msgstr "" "The Huawei FusionStorage driver had previously been marked unsupported. The " "3rd Party CI has been restored, and the driver is now fully supported in the " "Ussuri release." msgid "" "The Huawei Fusionstorage driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use the driver." msgstr "" "The Huawei Fusionstorage driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use the driver." msgid "" "The Huawei Fusionstorage driver has been marked as unsupported due to a lack " "of Python3.7 support and is now deprecated. ``enable_unsupported_driver`` " "will need to be set to ``True`` in the driver's section in cinder.conf to " "continue to use them. If Python3.7 support is not demonstrated, the driver " "will be removed in the 'U' development cycle." msgstr "" "The Huawei Fusionstorage driver has been marked as unsupported due to a lack " "of Python3.7 support and is now deprecated. ``enable_unsupported_driver`` " "will need to be set to ``True`` in the driver's section in cinder.conf to " "continue to use them. If Python3.7 support is not demonstrated, the driver " "will be removed in the 'U' development cycle." msgid "" "The IBM FlashSystem configuration options ``flashsystem_multipath_enabled`` " "was deprecated in the Mitaka release. It had no effect, so it can be safely " "removed and does not have a new equivalent config option." msgstr "" "The IBM FlashSystem configuration options ``flashsystem_multipath_enabled`` " "was deprecated in the Mitaka release. It had no effect, so it can be safely " "removed and does not have a new equivalent config option." msgid "" "The IBM Flashsystem drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them." msgstr "" "The IBM Flashsystem drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them." msgid "" "The IBM Flashsystem drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them. If the support " "status does not change, the drivers will be removed in the 'U' development " "cycle." msgstr "" "The IBM Flashsystem drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them. If the support " "status does not change, the drivers will be removed in the 'U' development " "cycle." msgid "" "The IBM GPFS driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The IBM GPFS driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The IBM GPFS driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the 'U' development cycle." msgstr "" "The IBM GPFS driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the 'U' development cycle." msgid "" "The IBM Storage drivers (XIV & DS8k) have been marked as unsupported and are " "now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use them." msgstr "" "The IBM Storage drivers (XIV & DS8k) have been marked as unsupported and are " "now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use them." msgid "" "The IBM Storage drivers (XIV & DS8k) have been marked as unsupported and are " "now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use them. If " "the support status does not change, the drivers will be removed in the 'U' " "development cycle." msgstr "" "The IBM Storage drivers (XIV & DS8k) have been marked as unsupported and are " "now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use them. If " "the support status does not change, the drivers will be removed in the 'U' " "development cycle." msgid "" "The IBM Storwize drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them." msgstr "" "The IBM Storwize drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them." msgid "" "The IBM Storwize drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them. If the support " "status does not change, they will be removed in the 'U' development cycle." msgstr "" "The IBM Storwize drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them. If the support " "status does not change, they will be removed in the 'U' development cycle." msgid "" "The IBM_Storage driver has been open sourced. This means that there is no " "more need to download the package from the IBM site. The only requirement " "remaining is to install pyxcli, which is available through pypi::" msgstr "" "The IBM_Storage driver has been open sourced. This means that there is no " "more need to download the package from the IBM site. The only requirement " "remaining is to install pyxcli, which is available through pypi::" msgid "" "The IET iSCSI target driver has been marked deprecated and will be removed " "in the \"V\" release. The IET iSCSI target project is no longer active and " "is not supported by all distributions. It is recommended to migrate to a " "supported distribution and iSCSI target prior to upgrading." msgstr "" "The IET iSCSI target driver has been marked deprecated and will be removed " "in the \"V\" release. The IET iSCSI target project is no longer active and " "is not supported by all distributions. It is recommended to migrate to a " "supported distribution and iSCSI target prior to upgrading." msgid "" "The ISERTgtAdm target was deprecated in the Kilo release. It has now been " "removed. You should now just use LVMVolumeDriver and specify iscsi_helper " "for the target driver you wish to use. In order to enable iser, please set " "iscsi_protocol=iser with lioadm or tgtadm target helpers." msgstr "" "The ISERTgtAdm target was deprecated in the Kilo release. It has now been " "removed. You should now just use LVMVolumeDriver and specify iscsi_helper " "for the target driver you wish to use. In order to enable iser, please set " "iscsi_protocol=iser with lioadm or tgtadm target helpers." msgid "" "The ITRI DISCO storage driver has been removed after completion of its " "deprecation period without a reliable 3rd Party CI system being supported. " "Customers using the ITRI DISCO driver should not upgrade Cinder without " "first migrating all volumes from their DISCO backend to a supported storage " "backend. Failure to migrate volumes will result in no longer being able to " "access volumes back by the ITRI DISCO storage backend." msgstr "" "The ITRI DISCO storage driver has been removed after completion of its " "deprecation period without a reliable 3rd Party CI system being supported. " "Customers using the ITRI DISCO driver should not upgrade Cinder without " "first migrating all volumes from their DISCO backend to a supported storage " "backend. Failure to migrate volumes will result in no longer being able to " "access volumes back by the ITRI DISCO storage backend." msgid "" "The ITRI DISCO storage driver was marked unsupported in Rocky due to 3rd " "Party CI not meeting Cinder's requirements. As a result the driver is " "removed starting from the Stein release." msgstr "" "The ITRI DISCO storage driver was marked unsupported in Rocky due to 3rd " "Party CI not meeting Cinder's requirements. As a result the driver is " "removed starting from the Stein release." msgid "" "The Infortrend drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them." msgstr "" "The Infortrend drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them." msgid "" "The Infortrend drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them. If their " "support status does not change, they will be removed in the Queens " "development cycle." msgstr "" "The Infortrend drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them. If their " "support status does not change, they will be removed in the Queens " "development cycle." msgid "" "The LINBIT DRBDManage volume driver is moving to maintenance mode in Stein " "Release and will be removed in T Release." msgstr "" "The LINBIT DRBDManage volume driver is moving to maintenance mode in Stein " "Release and will be removed in T Release." msgid "The LINSTOR Driver for Cinder now supports LINSTOR 0.9.12." msgstr "The LINSTOR Driver for Cinder now supports LINSTOR 0.9.12." msgid "" "The LINSTOR driver for Cinder supports LINSTOR 0.9.12. The driver supports " "LINSTOR backend using REST API." msgstr "" "The LINSTOR driver for Cinder supports LINSTOR 0.9.12. The driver supports " "LINSTOR backend using REST API." msgid "" "The LVM driver specific `lvm_max_over_subscription_ratio` setting had been " "deprecated and is now removed. Over subscription should now be managed using " "the generic `max_over_subscription_ratio` setting." msgstr "" "The LVM driver specific `lvm_max_over_subscription_ratio` setting had been " "deprecated and is now removed. Over subscription should now be managed using " "the generic `max_over_subscription_ratio` setting." msgid "" "The Lenovo driver options ``lenovo_backend_name`` and " "``lenovo_backend_type`` options were deprecated in favor of " "``lenovo_pool_name`` and ``lenovo_pool_type`` to avoid confusion, and the " "``lenovo_api_protocol``, ``lenovo_verify_certificate``, and " "``lenovo_verify_certificate_path`` options were deprecated in favor of the " "standard ``driver_use_ssl``, ``driver_ssl_cert_verify``, and " "``driver_ssl_cert_path`` options. To retain the default behavior, add " "``driver_use_ssl = true`` to back-end entries in ``cinder.conf`` before the " "deprecated options are removed in a future release." msgstr "" "The Lenovo driver options ``lenovo_backend_name`` and " "``lenovo_backend_type`` options were deprecated in favour of " "``lenovo_pool_name`` and ``lenovo_pool_type`` to avoid confusion, and the " "``lenovo_api_protocol``, ``lenovo_verify_certificate``, and " "``lenovo_verify_certificate_path`` options were deprecated in favor of the " "standard ``driver_use_ssl``, ``driver_ssl_cert_verify``, and " "``driver_ssl_cert_path`` options. To retain the default behaviour, add " "``driver_use_ssl = true`` to back-end entries in ``cinder.conf`` before the " "deprecated options are removed in a future release." msgid "The NFS driver now supports the creation of encrypted volumes." msgstr "The NFS driver now supports the creation of encrypted volumes." msgid "" "The NetApp E-Series drivers are deprecated as of the Rocky release and will " "be removed in the Stein release. Other configurations of the NetApp driver, " "i.e Clustered Data ONTAP and Solidfire, are unaffected." msgstr "" "The NetApp E-Series drivers are deprecated as of the Rocky release and will " "be removed in the Stein release. Other configurations of the NetApp driver, " "i.e Clustered Data ONTAP and Solidfire, are unaffected." msgid "" "The NetApp E-series driver has been fixed to correctly report the " "\"provisioned_capacity_gb\". Now it sums the capacity of all the volumes in " "the configured backend to get the correct value. This bug fix affects all " "the protocols supported by the driver (FC and iSCSI)." msgstr "" "The NetApp E-series driver has been fixed to correctly report the " "\"provisioned_capacity_gb\". Now it sums the capacity of all the volumes in " "the configured backend to get the correct value. This bug fix affects all " "the protocols supported by the driver (FC and iSCSI)." msgid "" "The NetApp ONTAP driver supports a new configuration option " "``netapp_api_trace_pattern`` to enable filtering backend API interactions to " "log. This option must be specified in the backend section when desired and " "it accepts a valid python regular expression." msgstr "" "The NetApp ONTAP driver supports a new configuration option " "``netapp_api_trace_pattern`` to enable filtering backend API interactions to " "log. This option must be specified in the backend section when desired and " "it accepts a valid python regular expression." msgid "" "The NetApp cDOT driver now sets the ``replication_status`` attribute " "appropriately on volumes created within replicated backends when using host " "level replication." msgstr "" "The NetApp cDOT driver now sets the ``replication_status`` attribute " "appropriately on volumes created within replicated backends when using host " "level replication." msgid "" "The NetApp cDOT driver operating with NFS protocol has been fixed to manage " "volumes correctly when ``nas_secure_file_operations`` option has been set to " "False." msgstr "" "The NetApp cDOT driver operating with NFS protocol has been fixed to manage " "volumes correctly when ``nas_secure_file_operations`` option has been set to " "False." msgid "" "The NetApp cDOT drivers report to the scheduler, for each FlexVol pool, the " "fraction of the shared block limit that has been consumed by dedupe and " "cloning operations. This value, netapp_dedupe_used_percent, may be used in " "the filter & goodness functions for better placement of new Cinder volumes." msgstr "" "The NetApp cDOT drivers report to the scheduler, for each FlexVol pool, the " "fraction of the shared block limit that has been consumed by dedupe and " "cloning operations. This value, netapp_dedupe_used_percent, may be used in " "the filter & goodness functions for better placement of new Cinder volumes." msgid "" "The Nexenta Edge driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it." msgstr "" "The Nexenta Edge driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it." msgid "" "The Nexenta Edge driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it. If its support " "status does not change, it will be removed in the 'T' development cycle." msgstr "" "The Nexenta Edge driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it. If its support " "status does not change, it will be removed in the 'T' development cycle." msgid "" "The Nexenta Edge drivers has been marked as unsupported and are now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use it. If its support status does not change " "it will be removed in the next release." msgstr "" "The Nexenta Edge drivers has been marked as unsupported and are now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use it. If its support status does not change " "it will be removed in the next release." msgid "" "The Nexenta Edge drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use it." msgstr "" "The Nexenta Edge drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` " "in cinder.conf to continue to use it." msgid "" "The Nexenta Edge storage driver has been removed after completion of its " "deprecation period without a reliable 3rd Party CI system being supported. " "Customers using the Nexenta Edge driver should not upgrade Cinder without " "first migrating all volumes from their Nexenta backend to a supported " "storage backend. Failure to migrate volumes will result in no longer being " "able to access volumes back by the Nexenta Edge storage backend." msgstr "" "The Nexenta Edge storage driver has been removed after completion of its " "deprecation period without a reliable 3rd Party CI system being supported. " "Customers using the Nexenta Edge driver should not upgrade Cinder without " "first migrating all volumes from their Nexenta backend to a supported " "storage backend. Failure to migrate volumes will result in no longer being " "able to access volumes back by the Nexenta Edge storage backend." msgid "" "The Nexenta Edge storage driver was marked unsupported in Stein due to 3rd " "Party CI not meeting Cinder's requirements. As a result the driver is " "removed starting from the Train release." msgstr "" "The Nexenta Edge storage driver was marked unsupported in Stein due to 3rd " "Party CI not meeting Cinder's requirements. As a result the driver is " "removed starting from the Train release." msgid "" "The Nimble Storage became a part of the HPE family of Storage solutions. The " "cinder Nimble driver has been relocated to the ``cinder.volume.driver.hpe`` " "module to reflect this. The impact on operators is that the module path " "``cinder.volume.drivers.nimble.NimbleISCSIDriver`` and ``cinder.volume." "drivers.nimble.FCDriver`` should now be updated to ``cinder.volume.drivers." "hpe.nimble.NimbleISCSIDriver`` and ``cinder.volume.drivers.hpe.nimble." "NimbleFCDriver`` respectively in ``cinder.conf``" msgstr "" "Nimble Storage became a part of the HPE family of Storage solutions. The " "cinder Nimble driver has been relocated to the ``cinder.volume.driver.hpe`` " "module to reflect this. The impact on operators is that the module path " "``cinder.volume.drivers.nimble.NimbleISCSIDriver`` and ``cinder.volume." "drivers.nimble.FCDriver`` should now be updated to ``cinder.volume.drivers." "hpe.nimble.NimbleISCSIDriver`` and ``cinder.volume.drivers.hpe.nimble." "NimbleFCDriver`` respectively in ``cinder.conf``" msgid "" "The Nimble backend driver has been updated to use REST for array " "communication." msgstr "" "The Nimble backend driver has been updated to use REST for array " "communication." msgid "" "The ONTAP drivers (\"7mode\" and \"cmode\") have been fixed to not report " "consumed space as \"provisioned_capacity_gb\". They instead rely on the " "cinder scheduler's calculation of \"provisioned_capacity_gb\". This fixes " "the oversubscription miscalculations with the ONTAP drivers. This bugfix " "affects all three protocols supported by these drivers (iSCSI/FC/NFS)." msgstr "" "The ONTAP drivers (\"7mode\" and \"cmode\") have been fixed to not report " "consumed space as \"provisioned_capacity_gb\". They instead rely on the " "cinder scheduler's calculation of \"provisioned_capacity_gb\". This fixes " "the over-subscription miscalculations with the ONTAP drivers. This bugfix " "affects all three protocols supported by these drivers (iSCSI/FC/NFS)." msgid "" "The Oracle ZFSSA drivers have been been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them. Oracle has " "indicated that they don't plan to continue to support the drivers so they " "will be removed in the 'U' development cycle." msgstr "" "The Oracle ZFSSA drivers have been been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them. Oracle has " "indicated that they don't plan to continue to support the drivers so they " "will be removed in the 'U' development cycle." msgid "" "The Oracle ZFSSA drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them." msgstr "" "The Oracle ZFSSA drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use them." msgid "" "The PowerMax Cinder driver has removed the environment configuration option " "san_rest_port in favour of the Cinder standard option san_api_port." msgstr "" "The PowerMax Cinder driver has removed the environment configuration option " "san_rest_port in favour of the Cinder standard option san_api_port." msgid "" "The PowerMax for Cinder driver now implements noCopy mode for links between " "SnapVX source and target. This change will improve space efficiency by " "using pointers instead of copied tracks when source and target volumes are " "linked." msgstr "" "The PowerMax for Cinder driver now implements noCopy mode for links between " "SnapVX source and target. This change will improve space efficiency by " "using pointers instead of copied tracks when source and target volumes are " "linked." msgid "" "The Prophetstor driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The Prophetstor driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The Prophetstor driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the 'U' development cycle." msgstr "" "The Prophetstor driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the 'U' development cycle." msgid "" "The QNAP driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it." msgstr "" "The QNAP driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it." msgid "" "The QNAP driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it. If its support status does not change it will be " "removed in the next release." msgstr "" "The QNAP driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use it. If its support status does not change it will be " "removed in the next release." msgid "" "The Quobyte Cinder driver now supports identifying Quobyte mounts via the " "mounts fstype field." msgstr "" "The Quobyte Cinder driver now supports identifying Quobyte mounts via the " "mounts fstype field." msgid "" "The Quobyte driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The Quobyte driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The Quobyte driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. As an unsupported " "driver, it is eligible for removal from the cinder code base if its third " "party CI system is not fixed." msgstr "" "The Quobyte driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. As an unsupported " "driver, it is eligible for removal from the Cinder code base if its third-" "party CI system is not fixed." msgid "" "The RBD driver no longer uses the \"volume_tmp_dir\" option to set where " "temporary files for image conversion are stored. Set " "\"image_conversion_dir\" to configure this in Ocata." msgstr "" "The RBD driver no longer uses the \"volume_tmp_dir\" option to set where " "temporary files for image conversion are stored. Set " "\"image_conversion_dir\" to configure this in Ocata." msgid "" "The Reduxio driver has been marked unsupported and is now deprecated. " "``use_unsupported_driver`` will need to be set to ``True`` in the driver's " "section in cinder.conf to use it." msgstr "" "The Reduxio driver has been marked unsupported and is now deprecated. " "``use_unsupported_driver`` will need to be set to ``True`` in the driver's " "section in cinder.conf to use it." msgid "" "The Reduxio driver has been marked unsupported and is now deprecated. " "``use_unsupported_driver`` will need to be set to ``True`` in the driver's " "section in cinder.conf to use it. If its support status does not change, the " "driver will be removed in the Queens development cycle." msgstr "" "The Reduxio driver has been marked unsupported and is now deprecated. " "``use_unsupported_driver`` will need to be set to ``True`` in the driver's " "section in cinder.conf to use it. If its support status does not change, the " "driver will be removed in the Queens development cycle." msgid "" "The SMBFS driver now exposes share information to the scheduler via pools. " "The pool names are configurable, defaulting to the share names." msgstr "" "The SMBFS driver now exposes share information to the scheduler via pools. " "The pool names are configurable, defaulting to the share names." msgid "" "The SMBFS driver now supports the 'snapshot attach' feature. Special care " "must be taken when attaching snapshots though, as writing to a snapshot will " "corrupt the differencing image chain." msgstr "" "The SMBFS driver now supports the 'snapshot attach' feature. Special care " "must be taken when attaching snapshots though, as writing to a snapshot will " "corrupt the differencing image chain." msgid "" "The SMBFS driver now supports the volume manage/unmanage feature. Images " "residing on preconfigured shares may be listed and managed by Cinder." msgstr "" "The SMBFS driver now supports the volume manage/unmanage feature. Images " "residing on preconfigured shares may be listed and managed by Cinder." msgid "" "The SMBFS volume driver can now be configured to use fixed vhd/x images " "through the 'nas_volume_prov_type' config option." msgstr "" "The SMBFS volume driver can now be configured to use fixed VHD/X images " "through the 'nas_volume_prov_type' config option." msgid "" "The SMBFS volume driver now supports reverting volumes to the latest " "snapshot." msgstr "" "The SMBFS volume driver now supports reverting volumes to the latest " "snapshot." msgid "" "The ScaleIO Driver has deprecated several options specified in ``cinder." "conf``: * ``sio_protection_domain_id`` * ``sio_protection_domain_name``, * " "``sio_storage_pool_id`` * ``sio_storage_pool_name``. Users of the ScaleIO " "Driver should now utilize the ``sio_storage_pools`` options to provide a " "list of protection_domain:storage_pool pairs." msgstr "" "The ScaleIO Driver has deprecated several options specified in ``cinder." "conf``: * ``sio_protection_domain_id`` * ``sio_protection_domain_name``, * " "``sio_storage_pool_id`` * ``sio_storage_pool_name``. Users of the ScaleIO " "Driver should now utilise the ``sio_storage_pools`` options to provide a " "list of protection_domain:storage_pool pairs." msgid "" "The ScaleIO Driver has deprecated the ability to specify the protection " "domain, as ``sio:pd_name``, and storage pool, as ``sio:sp_name``, extra " "specs in volume types. The supported way to specify a specific protection " "domain and storage pool in a volume type is to define a ``pool_name`` extra " "spec and set the value to the appropriate ``protection_domain_name:" "storage_pool_name``." msgstr "" "The ScaleIO Driver has deprecated the ability to specify the protection " "domain, as ``sio:pd_name``, and storage pool, as ``sio:sp_name``, extra " "specs in volume types. The supported way to specify a specific protection " "domain and storage pool in a volume type is to define a ``pool_name`` extra " "spec and set the value to the appropriate ``protection_domain_name:" "storage_pool_name``." msgid "" "The ScaleIO driver is moved to the dell_emc directory. volume_driver entry " "in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc." "scaleio.driver.ScaleIODriver``." msgstr "" "The ScaleIO driver is moved to the dell_emc directory. volume_driver entry " "in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc." "scaleio.driver.ScaleIODriver``." msgid "" "The Scality backend volume driver was marked as not supported in the " "previous release and has now been removed." msgstr "" "The Scality backend volume driver was marked as not supported in the " "previous release and has now been removed." msgid "" "The Scality driver has been marked as unsupported and is now deprecated. " "enable_unsupported_drivers will need to be set to True in cinder.conf to " "continue to use it." msgstr "" "The Scality driver has been marked as unsupported and is now deprecated. " "enable_unsupported_drivers will need to be set to True in cinder.conf to " "continue to use it." msgid "" "The Scality driver has been marked as unsupported and is now deprecated. " "enable_unsupported_drivers will need to be set to True in cinder.conf to " "continue to use it. If its support status does not change it will be removed " "in the next release." msgstr "" "The Scality driver has been marked as unsupported and is now deprecated. " "enable_unsupported_drivers will need to be set to True in cinder.conf to " "continue to use it. If its support status does not change it will be removed " "in the next release." msgid "" "The Sheepdog driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use the driver." msgstr "" "The Sheepdog driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use the driver." msgid "" "The Sheepdog driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use the driver. The driver is " "scheduled for removal in the 'U' release." msgstr "" "The Sheepdog driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use the driver. The driver is " "scheduled for removal in the 'U' release." msgid "" "The Sheepdog driver was marked unsupported in the Train release and has now " "been removed. All data on Sheepdog backends should be migrated to a " "supported backend before upgrading your Cinder installation." msgstr "" "The Sheepdog driver was marked unsupported in the Train release and has now " "been removed. All data on Sheepdog backends should be migrated to a " "supported backend before upgrading your Cinder installation." msgid "" "The SolidFire driver will recognize 4 new QoS spec keys to allow an " "administrator to specify QoS settings which are scaled by the size of the " "volume. 'ScaledIOPS' is a flag which will tell the driver to look for " "'scaleMin', 'scaleMax' and 'scaleBurst' which provide the scaling factor " "from the minimum values specified by the previous QoS keys ('minIOPS', " "'maxIOPS', 'burstIOPS'). The administrator must take care to assure that no " "matter what the final calculated QoS values follow minIOPS <= maxIOPS <= " "burstIOPS. A exception will be thrown if not. The QoS settings are also " "checked against the cluster min and max allowed and truncated at the min or " "max if they exceed." msgstr "" "The SolidFire driver will recognize 4 new QoS spec keys to allow an " "administrator to specify QoS settings which are scaled by the size of the " "volume. 'ScaledIOPS' is a flag which will tell the driver to look for " "'scaleMin', 'scaleMax' and 'scaleBurst' which provide the scaling factor " "from the minimum values specified by the previous QoS keys ('minIOPS', " "'maxIOPS', 'burstIOPS'). The administrator must take care to assure that no " "matter what the final calculated QoS values follow minIOPS <= maxIOPS <= " "burstIOPS. A exception will be thrown if not. The QoS settings are also " "checked against the cluster min and max allowed and truncated at the min or " "max if they exceed." msgid "" "The SolidFire replication was fixed. Several bugs were addressed (creating " "replicated volumes from snapshots, from volumes, retype a volume to a " "replicated type, managing a volume to a replicated type, correctly updating " "portal info on failover/failback and some minor other fixes). Closes bugs " "#1834013, #1751932." msgstr "" "The SolidFire replication was fixed. Several bugs were addressed (creating " "replicated volumes from snapshots, from volumes, retype a volume to a " "replicated type, managing a volume to a replicated type, correctly updating " "portal info on failover/failback and some minor other fixes). Closes bugs " "#1834013, #1751932." msgid "" "The Solidfire cinder driver has been fixed to ensure delete happens on the " "correct volume." msgstr "" "The Solidfire Cinder driver has been fixed to ensure delete happens on the " "correct volume." msgid "The StorPool backend driver was added." msgstr "The StorPool backend driver was added." msgid "" "The StorPool driver enables the ``multiattach`` and ``thin_provisioning`` " "capabilities." msgstr "" "The StorPool driver enables the ``multiattach`` and ``thin_provisioning`` " "capabilities." msgid "" "The StorPool driver now declares the \"clone across pools\" capability, " "which allows it to create a volume into an arbitrary StorPool-backed volume " "type from a StorPool-backed Glance image." msgstr "" "The StorPool driver now declares the \"clone across pools\" capability, " "which allows it to create a volume into an arbitrary StorPool-backed volume " "type from a StorPool-backed Glance image." msgid "The Swift and Posix backup drivers are known to be working on Windows." msgstr "The Swift and Posix backup drivers are known to be working on Windows." msgid "" "The Swift backup driver now supports sending a X-Service-Token header with a " "service token when the new ``backup_swift_service_auth`` config option is " "enabled. Please note that you still need to configure the ``[service_user]`` " "group and also set ``send_service_user_token`` to enable the behavior and " "not only the Swift backup driver option. Note ``send_service_user_token`` " "enables it globally and will also affect communication with Nova and Glance." msgstr "" "The Swift backup driver now supports sending a X-Service-Token header with a " "service token when the new ``backup_swift_service_auth`` config option is " "enabled. Please note that you still need to configure the ``[service_user]`` " "group and also set ``send_service_user_token`` to enable the behaviour and " "not only the Swift backup driver option. Note ``send_service_user_token`` " "enables it globally and will also affect communication with Nova and Glance." msgid "" "The Synology driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in ``cinder.conf`` to continue to use it." msgstr "" "The Synology driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in ``cinder.conf`` to continue to use it." msgid "" "The Synology driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in ``cinder.conf`` to continue to use it. If its support " "status does not change, the driver will be removed in the Queens development " "cycle." msgstr "" "The Synology driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in ``cinder.conf`` to continue to use it. If its support " "status does not change, the driver will be removed in the Queens development " "cycle." msgid "" "The Tegile driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The Tegile driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The Tegile driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, they will be removed in the Queens development cycle." msgstr "" "The Tegile driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, they will be removed in the Queens development cycle." msgid "" "The Tintri driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The Tintri driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The Tintri driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the 'T' development cycle." msgstr "" "The Tintri driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the 'T' development cycle." msgid "" "The Tintri storage driver has been removed after completion of its " "deprecation period without a reliable 3rd Party CI system being supported. " "Customers using the Tintri driver should not upgrade Cinder without first " "migrating all volumes from their Tintri backend to a supported storage " "backend. Failure to migrate volumes will result in no longer being able to " "access volumes backed by the Tintri storage backend." msgstr "" "The Tintri storage driver has been removed after completion of its " "deprecation period without a reliable 3rd Party CI system being supported. " "Customers using the Tintri driver should not upgrade Cinder without first " "migrating all volumes from their Tintri backend to a supported storage " "backend. Failure to migrate volumes will result in no longer being able to " "access volumes backed by the Tintri storage backend." msgid "" "The Tintri storage driver was marked unsupported in Stein due to 3rd Party " "CI not meeting Cinder's requirements. As a result the driver is removed " "starting from the Train release." msgstr "" "The Tintri storage driver was marked unsupported in Stein due to 3rd Party " "CI not meeting Cinder's requirements. As a result the driver is removed " "starting from the Train release." msgid "" "The VMAX driver is moved to the dell_emc directory. volume_driver entry in " "cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.vmax." "iscsi.VMAXISCSIDriver`` or ``cinder.volume.drivers.dell_emc.vmax.fc." "VMAXFCDriver``." msgstr "" "The VMAX driver is moved to the dell_emc directory. volume_driver entry in " "cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.vmax." "iscsi.VMAXISCSIDriver`` or ``cinder.volume.drivers.dell_emc.vmax.fc." "VMAXFCDriver``." msgid "The VMware VMDK driver for ESX server has been removed." msgstr "The VMware VMDK driver for ESX server has been removed." msgid "The VMware VMDK driver now enforces minimum vCenter version of 5.1." msgstr "The VMware VMDK driver now enforces minimum vCenter version of 5.1." msgid "The VMware VMDK driver now enforces minimum vCenter version of 5.5." msgstr "The VMware VMDK driver now enforces minimum vCenter version of 5.5." msgid "" "The VMware VMDK driver supports a new config option 'vmware_host_port' to " "specify the port number to connect to vCenter server." msgstr "" "The VMware VMDK driver supports a new config option 'vmware_host_port' to " "specify the port number to connect to vCenter server." msgid "" "The Veritas Access driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it." msgstr "" "The Veritas Access driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it." msgid "" "The Veritas Access driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it. If its support " "status does not change, it will be removed in the 'U' development cycle." msgstr "" "The Veritas Access driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it. If its support " "status does not change, it will be removed in the 'U' development cycle." msgid "" "The Veritas Clustered NFS driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` " "in the driver's section in cinder.conf to continue to use the driver. It " "its support status does not change the driver will be removed in the 'V' " "development cycle." msgstr "" "The Veritas Clustered NFS driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` " "in the driver's section in cinder.conf to continue to use the driver. It " "its support status does not change the driver will be removed in the 'V' " "development cycle." msgid "" "The Veritas Clustered NFS driver has been marked as unsupported and is now " "deprecated. ``enabled_unsupported_driver`` will need to be set to ``True`` " "in the driver's section in cinder.conf to continue to use the driver." msgstr "" "The Veritas Clustered NFS driver has been marked as unsupported and is now " "deprecated. ``enabled_unsupported_driver`` will need to be set to ``True`` " "in the driver's section in cinder.conf to continue to use the driver." msgid "" "The Veritas HyperScale driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it." msgstr "" "The Veritas HyperScale driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it." msgid "" "The Veritas HyperScale driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it. If its support " "status does not change, it will be removed in the 'T' development cycle." msgstr "" "The Veritas HyperScale driver has been marked as unsupported and is now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use it. If its support " "status does not change, it will be removed in the 'T' development cycle." msgid "" "The Veritas HyperScale storage driver has been removed after completion of " "its deprecation period without a reliable 3rd Party CI system being " "supported. Customers using the Veritas HyperScale driver should not upgrade " "Cinder without first migrating all volumes from their Veritas backend to a " "supported storage backend. Failure to migrate volumes will result in no " "longer being able to access volumes backed by the Veritas HyperScale storage " "backend." msgstr "" "The Veritas HyperScale storage driver has been removed after completion of " "its deprecation period without a reliable 3rd Party CI system being " "supported. Customers using the Veritas HyperScale driver should not upgrade " "Cinder without first migrating all volumes from their Veritas backend to a " "supported storage backend. Failure to migrate volumes will result in no " "longer being able to access volumes backed by the Veritas HyperScale storage " "backend." msgid "" "The Veritas HyperScale storage driver was marked unsupported in Stein due " "to 3rd Party CI not meeting Cinder's requirements. As a result the driver " "is removed starting from the Train release." msgstr "" "The Veritas HyperScale storage driver was marked unsupported in Stein due " "to 3rd Party CI not meeting Cinder's requirements. As a result the driver " "is removed starting from the Train release." msgid "" "The Violin drivers have been marked as unsupported and are now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use them." msgstr "" "The Violin drivers have been marked as unsupported and are now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use them." msgid "" "The Violin drivers have been marked as unsupported and are now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use them. If its support status does not change it will " "be removed in the next release." msgstr "" "The Violin drivers have been marked as unsupported and are now deprecated. " "``enable_unsupported_drivers`` will need to be set to ``True`` in cinder." "conf to continue to use them. If its support status does not change it will " "be removed in the next release." msgid "" "The Virtuozzo driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The Virtuozzo driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The Virtuozzo driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the 'U' development cycle." msgstr "" "The Virtuozzo driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, it will be removed in the 'U' development cycle." msgid "" "The Windows iSCSI driver has been renamed. The updated driver location is " "``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``." msgstr "" "The Windows iSCSI driver has been renamed. The updated driver location is " "``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``." msgid "" "The Windows iSCSI driver now honors the configured iSCSI addresses, ensuring " "that only those addresses will be used for iSCSI traffic." msgstr "" "The Windows iSCSI driver now honours the configured iSCSI addresses, " "ensuring that only those addresses will be used for iSCSI traffic." msgid "" "The Windows iSCSI driver now returns multiple portals when available and " "multipath is requested." msgstr "" "The Windows iSCSI driver now returns multiple portals when available and " "multipath is requested." msgid "" "The X-IO driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The X-IO driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The X-IO driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, they will be removed in the Queens development cycle." msgstr "" "The X-IO driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, they will be removed in the Queens development cycle." msgid "" "The XML API has been marked deprecated and will be removed in a future " "release." msgstr "" "The XML API has been marked deprecated and will be removed in a future " "release." msgid "" "The XML API has been removed in Newton release. Cinder supports only JSON " "API request/response format now." msgstr "" "The XML API has been removed in Newton release. Cinder supports only JSON " "API request/response format now." msgid "" "The XML configuration file used by the HNAS drivers is now deprecated and " "will no longer be used in the future. Please use cinder.conf for all driver " "configuration." msgstr "" "The XML configuration file used by the HNAS drivers is now deprecated and " "will no longer be used in the future. Please use cinder.conf for all driver " "configuration." msgid "" "The XtremIO driver has been fixed to correctly report the " "\"free_capacity_gb\" size." msgstr "" "The XtremIO driver has been fixed to correctly report the " "\"free_capacity_gb\" size." msgid "" "The XtremIO driver is moved to the dell_emc directory. volume_driver entry " "in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc." "xtremio.XtremIOISCSIDriver`` or ``cinder.volume.drivers.dell_emc.xtremio." "XtremIOFCDriver``." msgstr "" "The XtremIO driver is moved to the dell_emc directory. volume_driver entry " "in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc." "xtremio.XtremIOISCSIDriver`` or ``cinder.volume.drivers.dell_emc.xtremio." "XtremIOFCDriver``." msgid "" "The ZTE driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgstr "" "The ZTE driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it." msgid "" "The ZTE driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, they will be removed in the Queens development cycle." msgstr "" "The ZTE driver has been marked as unsupported and is now deprecated. " "``enable_unsupported_driver`` will need to be set to ``True`` in the " "driver's section in cinder.conf to continue to use it. If its support status " "does not change, they will be removed in the Queens development cycle." msgid "" "The Zadara VPSA Driver has been updated to support json format and " "reorganized with new code layout. The module path ``cinder.volume.drivers." "zadara.ZadaraVPSAISCSIDriver`` should now be updated to ``cinder.volume." "drivers.zadara.zadara.ZadaraVPSAISCSIDriver`` in ``cinder.conf``." msgstr "" "The Zadara VPSA Driver has been updated to support JSON format and " "reorganised with a new code layout. The module path ``cinder.volume.drivers." "zadara.ZadaraVPSAISCSIDriver`` should now be updated to ``cinder.volume." "drivers.zadara.zadara.ZadaraVPSAISCSIDriver`` in ``cinder.conf``." msgid "" "The `Ceph RADOS Block Device (RBD) `__ driver " "documentation has been updated to reflect this policy and explains it in " "more detail." msgstr "" "The `Ceph RADOS Block Device (RBD) `__ driver " "documentation has been updated to reflect this policy and explains it in " "more detail." msgid "" "The ``WindowsDriver`` was renamed in the Queens release to " "``WindowsISCSIDriver`` to avoid confusion with the SMB driver. The backwards " "compatibility for this has now been removed, so any cinder.conf settings " "still using ``cinder.volume.drivers.windows.windows.WindowsDriver`` must now " "be updated to use ``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``." msgstr "" "The ``WindowsDriver`` was renamed in the Queens release to " "``WindowsISCSIDriver`` to avoid confusion with the SMB driver. The backwards " "compatibility for this has now been removed, so any cinder.conf settings " "still using ``cinder.volume.drivers.windows.windows.WindowsDriver`` must now " "be updated to use ``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``." msgid "" "The ``[DEFAULT] db_driver`` config option has been removed. This was " "intended to allow configuration of the database driver, however, there is " "only one database driver present in-tree and out-of-tree database drivers " "are not supported." msgstr "" "The ``[DEFAULT] db_driver`` config option has been removed. This was " "intended to allow configuration of the database driver, however, there is " "only one database driver present in-tree and out-of-tree database drivers " "are not supported." msgid "" "The ``__DEFAULT__`` volume type may safely be renamed (or renamed and " "deleted) after you have run the online migrations as long as the " "``default_volume_type`` configuration option is set to a valid existing " "volume type." msgstr "" "The ``__DEFAULT__`` volume type may safely be renamed (or renamed and " "deleted) after you have run the online migrations as long as the " "``default_volume_type`` configuration option is set to a valid existing " "volume type." msgid "" "The ``cinder-manage db online_data_migrations`` command now returns exit " "status 2 in the case where some migrations failed (raised exceptions) and no " "others were completed successfully from the last batch attempted. This " "should be considered a fatal condition that requires intervention. Exit " "status 1 will be returned in the case where the ``--max-count`` option was " "used and some migrations failed but others succeeded (updated at least one " "row), because more work may remain for the non-failing migrations, and their " "completion may be a dependency for the failing ones. The command should be " "reiterated while it returns exit status 1, and considered completed " "successfully only when it returns exit status 0." msgstr "" "The ``cinder-manage db online_data_migrations`` command now returns exit " "status 2 in the case where some migrations failed (raised exceptions) and no " "others were completed successfully from the last batch attempted. This " "should be considered a fatal condition that requires intervention. Exit " "status 1 will be returned in the case where the ``--max-count`` option was " "used and some migrations failed but others succeeded (updated at least one " "row), because more work may remain for the non-failing migrations, and their " "completion may be a dependency for the failing ones. The command should be " "reiterated while it returns exit status 1, and considered completed " "successfully only when it returns exit status 0." msgid "" "The ``cinder-manage db sync`` command for this verison of cinder will add " "additional database indexes. Depending on database size and complexity, " "this will take time to complete for every single index to be created. On " "MySQL or MariaDB, these indexes will only be created if an index does not " "already exist with the same name:" msgstr "" "The ``cinder-manage db sync`` command for this verison of cinder will add " "additional database indexes. Depending on database size and complexity, " "this will take time to complete for every single index to be created. On " "MySQL or MariaDB, these indexes will only be created if an index does not " "already exist with the same name:" msgid "" "The ``cinder.quota.NestedDbQuotaDriver`` quota driver for handling nested " "projects is now deprecated. There is an OpenStack-wide effort to move to " "\"unified limits\" that will require changes in how quotas are handled for " "these types of configurations. The ``NestedDbQuotaDriver`` will continue to " "work until it is replaced with this new mechanism." msgstr "" "The ``cinder.quota.NestedDbQuotaDriver`` quota driver for handling nested " "projects is now deprecated. There is an OpenStack-wide effort to move to " "\"unified limits\" that will require changes in how quotas are handled for " "these types of configurations. The ``NestedDbQuotaDriver`` will continue to " "work until it is replaced with this new mechanism." msgid "" "The ``cinder.quota.NestedDbQuotaDriver`` quota driver was marked as " "deprecated in Train release and is eligible for removal since Ussuri " "release. This release removes the NestedQuotaDriver support." msgstr "" "The ``cinder.quota.NestedDbQuotaDriver`` quota driver was marked as " "deprecated in the Train release and is eligible for removal since the Ussuri " "release. This release removes the NestedQuotaDriver support." msgid "" "The ``default_volume_type`` configuration option is now required to have a " "value. The default value is ``__DEFAULT__``, so you should see no change in " "behavior whether or not you have set a value for ``default_volume_type``. " "See `Bug #1886632 `_ for " "more information about this change." msgstr "" "The ``default_volume_type`` configuration option is now required to have a " "value. The default value is ``__DEFAULT__``, so you should see no change in " "behaviour whether or not you have set a value for ``default_volume_type``. " "See `Bug #1886632 `_ for " "more information about this change." msgid "" "The ``default_volume_type`` configuration option is required to have a " "value. The default value is ``__DEFAULT__``." msgstr "" "The ``default_volume_type`` configuration option is required to have a " "value. The default value is ``__DEFAULT__``." msgid "" "The ``enable_unsupported_driver`` option will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use the following three " "drivers." msgstr "" "The ``enable_unsupported_driver`` option will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use the following three " "drivers." msgid "" "The ``force`` boolean parameter has been added to the volume delete API. It " "may be used in combination with ``cascade``. This also means that volume " "force delete is available in the base volume API rather than only in the " "``volume_admin_actions`` extension." msgstr "" "The ``force`` boolean parameter has been added to the volume delete API. It " "may be used in combination with ``cascade``. This also means that volume " "force delete is available in the base volume API rather than only in the " "``volume_admin_actions`` extension." msgid "" "The ``image_service:store_id`` can only be set in the extra-specs for a " "volume-type when multiple glance stores are configured." msgstr "" "The ``image_service:store_id`` can only be set in the extra-specs for a " "volume-type when multiple glance stores are configured." msgid "" "The ``periodic_interval`` configuration option still exists but its use is " "now restricted to providing a default periodicity for objects created from " "the ``cinder.service.Service`` class." msgstr "" "The ``periodic_interval`` configuration option still exists but its use is " "now restricted to providing a default periodicity for objects created from " "the ``cinder.service.Service`` class." msgid "" "The ``periodic_interval`` configuration option was being used in too many " "places, and as a result, it had become difficult to tune specific periodic " "tasks without affecting other functionality. The following configuration " "options should now be used in place of ``periodic_interval``:" msgstr "" "The ``periodic_interval`` configuration option was being used in too many " "places, and as a result, it had become difficult to tune specific periodic " "tasks without affecting other functionality. The following configuration " "options should now be used in place of ``periodic_interval``:" msgid "" "The ``service`` filter for service list API was deprecated 3 years ago in " "2013 July (Havana). Removed this filter and please use \"binary\" instead." msgstr "" "The ``service`` filter for service list API was deprecated 3 years ago in " "2013 July (Havana). Removed this filter and please use \"binary\" instead." msgid "" "The ``storage_protocol`` treats all variants of the protocol name as the " "same regarding matches, so for example using FC, fc, or fibre_channel will " "be treated equally in the scheduler, be it when filtering using the volume " "type's extra specs or when using filter and goodness functions." msgstr "" "The ``storage_protocol`` treats all variants of the protocol name as the " "same regarding matches, so for example using FC, fc, or fibre_channel will " "be treated equally in the scheduler, be it when filtering using the volume " "type's extra specs or when using filter and goodness functions." msgid "" "The ``volume_extension:volume_type_encryption`` policy, which was deprecated " "in Stein, has been un-deprecated for the convenience of operators who would " "like to set the policies for the create, get, update, and delete operations " "for a volume type's encryption type in one place. The default value for " "this policy target has not changed." msgstr "" "The ``volume_extension:volume_type_encryption`` policy, which was deprecated " "in Stein, has been un-deprecated for the convenience of operators who would " "like to set the policies for the create, get, update, and delete operations " "for a volume type's encryption type in one place. The default value for " "this policy target has not changed." msgid "The `default_volume_type` option is unset in cinder.conf" msgstr "The `default_volume_type` option is unset in cinder.conf" msgid "" "The `lvm_max_overprovision_ratio` config option has been deprecated. It will " "be removed in a future release. Configurations should move to using the " "common `max_overprovision_ratio` config option." msgstr "" "The `lvm_max_overprovision_ratio` config option has been deprecated. It will " "be removed in a future release. Configurations should move to using the " "common `max_overprovision_ratio` config option." msgid "" "The `osapi_volume_base_URL` config option was deprecated in Pike and has now " "been removed. The `public_endpoint` config option should be used instead." msgstr "" "The `osapi_volume_base_URL` config option was deprecated in Pike and has now " "been removed. The `public_endpoint` config option should be used instead." msgid "" "The ability to specify a backup driver by module name was deprecated in the " "Queens release and the ability has now been removed. Any configuration in " "cinder.conf still using the module path should be updated to include the " "full class name. For example, ``cinder.backup.drivers.swift`` should be " "updated to ``cinder.backup.drivers.swift.SwiftBackupDriver``." msgstr "" "The ability to specify a backup driver by module name was deprecated in the " "Queens release and the ability has now been removed. Any configuration in " "cinder.conf still using the module path should be updated to include the " "full class name. For example, ``cinder.backup.drivers.swift`` should be " "updated to ``cinder.backup.drivers.swift.SwiftBackupDriver``." msgid "" "The block_driver is deprecated as of the Ocata release and will be removed " "in the Queens release of Cinder. Instead the LVM driver with the LIO iSCSI " "target should be used. For those that desire higher performance, they " "should use LVM striping." msgstr "" "The block_driver is deprecated as of the Ocata release and will be removed " "in the Queens release of Cinder. Instead the LVM driver with the LIO iSCSI " "target should be used. For those that desire higher performance they should " "use LVM striping." msgid "" "The cinder backup service has added support for the popular Zstandard " "compression algorithm. (The default is the venerable Deflate (zlib) " "algorithm.)" msgstr "" "The cinder backup service has added support for the popular Zstandard " "compression algorithm. (The default is the venerable Deflate (zlib) " "algorithm.)" msgid "" "The cinder options associated with throttling are " "``volume_copy_blkio_cgroup_name`` and ``volume_copy_bps_limit``. They are " "described in the `sample cinder configuration file `_ " "for the Wallaby release." msgstr "" "The Cinder options associated with throttling are " "``volume_copy_blkio_cgroup_name`` and ``volume_copy_bps_limit``. They are " "described in the `sample cinder configuration file `_ " "for the Wallaby release." msgid "" "The cinder team is working on a throttling solution using cgroup v2, but it " "was not ready at the time of this release. The solution is expected to be " "backported to a future release in the Xena series. This issue is being " "tracked as `Bug #1942203 `_." msgstr "" "The Cinder team is working on a throttling solution using cgroup v2, but it " "was not ready at the time of this release. The solution is expected to be " "backported to a future release in the Xena series. This issue is being " "tracked as `Bug #1942203 `_." msgid "" "The cinder team is working on a throttling solution using cgroup v2, but it " "was not ready at the time of this release. The solution is expected to be " "backported to a future release in the Yoga series. This issue continues to " "be tracked as `Bug #1942203 `_." msgstr "" "The Cinder team is working on a throttling solution using cgroup v2, but it " "was not ready at the time of this release. The solution is expected to be " "backported to a future release in the Yoga series. This issue continues to " "be tracked as `Bug #1942203 `_." msgid "" "The cinder-manage command now includes a new ``quota`` category with two " "possible actions ``check`` and ``sync`` to help administrators manage out of " "sync quotas on long running deployments." msgstr "" "The cinder-manage command now includes a new ``quota`` category with two " "possible actions ``check`` and ``sync`` to help administrators manage out-of-" "sync quotas on long-running deployments." msgid "" "The cinder-manage online_data_migrations command now prints a tabular " "summary of completed and remaining records. The goal here is to get all your " "numbers to zero. The previous execution return code behavior is retained for " "scripting." msgstr "" "The cinder-manage online_data_migrations command now prints a tabular " "summary of completed and remaining records. The goal here is to get all your " "numbers to zero. The previous execution return code behaviour is retained " "for scripting." msgid "" "The cinder-volume service currently depends on `Linux Kernel Control Groups " "(cgroups) version 1 `_ to control i/o throttling during some volume-copy " "and image-convert operations. At the time of this release, some Linux " "distributions may have changed to using `cgroups v2 `_ by default. Thus, you may " "need to take explicit steps to ensure that **cgroups v1** is enabled on any " "OpenStack nodes running the cinder-volume service. This may entail setting " "specific Linux kernel parameters for these nodes. Consult your Linux " "distribution's documentation for details." msgstr "" "The cinder-volume service currently depends on `Linux Kernel Control Groups " "(cgroups) version 1 `_ to control i/o throttling during some volume-copy " "and image-convert operations. At the time of this release, some Linux " "distributions may have changed to using `cgroups v2 `_ by default. Thus, you may " "need to take explicit steps to ensure that **cgroups v1** is enabled on any " "OpenStack nodes running the cinder-volume service. This may entail setting " "specific Linux kernel parameters for these nodes. Consult your Linux " "distribution's documentation for details." msgid "" "The cinder-volume service currently depends on `Linux Kernel Control Groups " "(cgroups) version 1 `_ to control i/o throttling during some volume-copy " "and image-convert operations. Some Linux distributions, however, have " "changed to using `cgroup v2 `_ by default and may have discontinued cgroups v1 " "support completely. Consult your Linux distribution's documentation for " "details." msgstr "" "The cinder-volume service currently depends on `Linux Kernel Control Groups " "(cgroups) version 1 `_ to control i/o throttling during some volume-copy " "and image-convert operations. Some Linux distributions, however, have " "changed to using `cgroup v2 `_ by default and may have discontinued cgroups v1 " "support completely. Consult your Linux distribution's documentation for " "details." msgid "" "The cinder-volume service depends on `Linux Kernel Control Groups (cgroups) " "version 1 `_ to control i/o throttling during some volume-copy and image-" "convert operations. Some Linux distributions, however, have changed to " "using `cgroup v2 `_ by default and may have discontinued cgroups v1 support " "completely. Consult your Linux distribution's documentation for details." msgstr "" "The cinder-volume service depends on `Linux Kernel Control Groups (cgroups) " "version 1 `_ to control i/o throttling during some volume-copy and image-" "convert operations. Some Linux distributions, however, have changed to " "using `cgroup v2 `_ by default and may have discontinued cgroups v1 support " "completely. Consult your Linux distribution's documentation for details." msgid "" "The compressed image of a volume will be stored in the Image service " "(Glance) with the ``container_format`` image property of ``compressed``. " "See the `Image service documentation `_ for more information about this image container format." msgstr "" "The compressed image of a volume will be stored in the Image service " "(Glance) with the ``container_format`` image property of ``compressed``. " "See the `Image service documentation `_ for more information about this image container format." msgid "" "The config option ``vmware_storage_profile`` is now deprecated and ignored. " "Setting this option results in performance degradation of the controller and " "put lot of load on vCenter server." msgstr "" "The config option ``vmware_storage_profile`` is now deprecated and ignored. " "Setting this option results in performance degradation of the controller and " "put lot of load on vCenter server." msgid "" "The config options ``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, " "``iscsi_target_prefix`` and ``iscsi_protocol`` were deprecated in the Queens " "release and have now been removed. Deployments should now used the more " "general ``target_ip_address``, ``target_port``, ``target_helper``, " "``target_prefix`` and ``target_protocol`` options." msgstr "" "The config options ``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, " "``iscsi_target_prefix`` and ``iscsi_protocol`` were deprecated in the Queens " "release and have now been removed. Deployments should now used the more " "general ``target_ip_address``, ``target_port``, ``target_helper``, " "``target_prefix`` and ``target_protocol`` options." msgid "" "The config options ``scheduler_topic``, ``volume_topic`` and " "``backup_topic`` have been removed without a deprecation period as these had " "never worked correctly." msgstr "" "The config options ``scheduler_topic``, ``volume_topic`` and " "``backup_topic`` have been removed without a deprecation period as these had " "never worked correctly." msgid "" "The configuration option ``enable_v3_api`` has been removed from this " "release due to the fact that v3 is now the only version of the Block Storage " "API available. If present in a configuration file, it will be silently " "ignored as the v3 API is now enabled unconditionally." msgstr "" "The configuration option ``enable_v3_api`` has been removed from this " "release due to the fact that v3 is now the only version of the Block Storage " "API available. If present in a configuration file, it will be silently " "ignored as the v3 API is now enabled unconditionally." msgid "" "The configuration option ``rbd_keyring_conf`` for the Ceph cinder driver " "presents a security risk and the option is hereby deprecated and scheduled " "to be removed early in the 'V' development cycle, following the standard " "OpenStack deprecation policy. For more information, see `OSSN-0085 `_: Cinder configuration option can " "leak secret key from Ceph backend." msgstr "" "The configuration option ``rbd_keyring_conf`` for the Ceph cinder driver " "presents a security risk and the option is hereby deprecated and scheduled " "to be removed early in the 'V' development cycle, following the standard " "OpenStack deprecation policy. For more information, see `OSSN-0085 `_: Cinder configuration option can " "leak secret key from Ceph backend." msgid "" "The configuration option ``sf_allow_template_caching`` for the SolidFire " "driver has been removed. Use ``image_volume_cache`` enabled equals ``True`` " "for a better template image cache that is managed from Cinder." msgstr "" "The configuration option ``sf_allow_template_caching`` for the SolidFire " "driver has been removed. Use ``image_volume_cache`` enabled equals ``True`` " "for a better template image cache that is managed from Cinder." msgid "The consistency group API now returns volume type IDs." msgstr "The consistency group API now returns volume type IDs." msgid "" "The coordination system used by Cinder has been simplified to leverage tooz " "builtin heartbeat feature. Therefore, the configuration options " "`coordination.heartbeat`, `coordination.initial_reconnect_backoff` and " "`coordination.max_reconnect_backoff` have been removed." msgstr "" "The coordination system used by Cinder has been simplified to leverage Tooz " "built-in heartbeat feature. Therefore, the configuration options " "`coordination.heartbeat`, `coordination.initial_reconnect_backoff` and " "`coordination.max_reconnect_backoff` have been removed." msgid "" "The create volume api will now return 400 error instead of 404/500 if user " "passes non-uuid values to consistencygroup_id, source_volid and " "source_replica parameters in the request body." msgstr "" "The create volume API will now return 400 error instead of 404/500 if user " "passes non-UUID values to consistencygroup_id, source_volid and " "source_replica parameters in the request body." msgid "" "The database migration engine has changed from `sqlalchemy-migrate`__ to " "`alembic`__. For most deployments, this should have minimal to no impact and " "the switch should be mostly transparent. The main user-facing impact is the " "change in schema versioning. While sqlalchemy-migrate used a linear, integer-" "based versioning scheme, which required placeholder migrations to allow for " "potential migration backports, alembic uses a distributed version control-" "like schema where a migration's ancestor is encoded in the file and branches " "are possible. The alembic migration files therefore use an arbitrary UUID-" "like naming scheme and the ``cinder-manage db sync`` command now expects " "such a version when manually specifying the version that should be applied. " "For example::" msgstr "" "The database migration engine has changed from `sqlalchemy-migrate`__ to " "`alembic`__. For most deployments, this should have minimal to no impact and " "the switch should be mostly transparent. The main user-facing impact is the " "change in schema versioning. While sqlalchemy-migrate used a linear, integer-" "based versioning scheme, which required placeholder migrations to allow for " "potential migration backports, alembic uses a distributed version control-" "like schema where a migration's ancestor is encoded in the file and branches " "are possible. The alembic migration files therefore use an arbitrary UUID-" "like naming scheme and the ``cinder-manage db sync`` command now expects " "such a version when manually specifying the version that should be applied. " "For example::" msgid "" "The db migration script will create a volume type named ``__DEFAULT__`` The " "online migration will migrate all existing untyped volumes, snapshots to the " "``__DEFAULT__`` type. An invalid/non-existent type specified with " "`default_volume_type` option in cinder.conf will result in " "VolumeTypeNotFoundByName exception." msgstr "" "The db migration script will create a volume type named ``__DEFAULT__`` The " "online migration will migrate all existing untyped volumes, snapshots to the " "``__DEFAULT__`` type. An invalid/non-existent type specified with " "`default_volume_type` option in cinder.conf will result in " "VolumeTypeNotFoundByName exception." msgid "" "The default interval for polling vCenter tasks in the VMware VMDK driver is " "changed to 2s." msgstr "" "The default interval for polling vCenter tasks in the VMware VMDK driver is " "changed to 2s." msgid "" "The default key manager interface in Cinder was deprecated and the Castellan " "key manager interface library is now used instead. For more information " "about Castellan, please see http://docs.openstack.org/developer/castellan/ ." msgstr "" "The default key manager interface in Cinder was deprecated and the Castellan " "key manager interface library is now used instead. For more information " "about Castellan, please see http://docs.openstack.org/developer/castellan/ ." msgid "The default policy configuration has been extensively rewritten." msgstr "The default policy configuration has been extensively rewritten." msgid "" "The default setting for both policies allow an administrator or the volume " "owner to perform the associated action. See the `Policy configuration " "`_ documentation in the `Cinder Service Configuration` guide for " "details." msgstr "" "The default setting for both policies allows an administrator or the volume " "owner to perform the associated action. See the `Policy configuration " "`_ documentation in the `Cinder Service Configuration` guide for " "details." msgid "" "The default value for pure_replica_interval_default used by Pure Storage " "volume drivers has changed from 900 to 3600 seconds." msgstr "" "The default value for pure_replica_interval_default used by Pure Storage " "volume drivers has changed from 900 to 3600 seconds." msgid "" "The default value has been removed for the LVM specific " "`lvm_max_over_subscription_ratio` setting. This changes the behavior so that " "LVM backends now adhere to the common `max_over_subscription_ratio` setting. " "The LVM specific config option may still be used, but it is now deprecated " "and will be removed in a future release." msgstr "" "The default value has been removed for the LVM specific " "`lvm_max_over_subscription_ratio` setting. This changes the behaviour so " "that LVM backends now adhere to the common `max_over_subscription_ratio` " "setting. The LVM specific config option may still be used, but it is now " "deprecated and will be removed in a future release." msgid "" "The default value of the configuration option, ``glance_num_retries``, has " "been changed to 3 in this release. Its former value was 0. The option " "controls how many times to retry a Glance API call in response to a HTTP " "connection failure, timeout or ServiceUnavailable status. By this change, " "Cinder can be more resilient to temporary failure and continue the request " "if a retry succeeds." msgstr "" "The default value of the configuration option, ``glance_num_retries``, has " "been changed to 3 in this release. Its former value was 0. The option " "controls how many times to retry a Glance API call in response to a HTTP " "connection failure, timeout or ServiceUnavailable status. By this change, " "Cinder can be more resilient to temporary failure and continue the request " "if a retry succeeds." msgid "" "The default values for these policies have also not changed. See the " "`sample Cinder policy file `_ for more information." msgstr "" "The default values for these policies have also not changed. See the " "`sample Cinder policy file `_ for more information." msgid "The deprecated HP CLIQ proxy driver has now been removed." msgstr "The deprecated HP CLIQ proxy driver has now been removed." msgid "" "The details of this project are described in `Policy Personas and " "Permissions `_ in the `Cinder Service Configuration " "Guide`. We encourage you to read through that document. The following is " "only a summary." msgstr "" "The details of this project are described in `Policy Personas and " "Permissions `_ in the `Cinder Service Configuration " "Guide`. We encourage you to read through that document. The following is " "only a summary." msgid "" "The driver for Datera's Storage Systems has been marked as unsupported and " "is now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use it." msgstr "" "The driver for Datera's Storage Systems has been marked as unsupported and " "is now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use it." msgid "" "The driver for Datera's Storage Systems has been marked as unsupported and " "is now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use it. If " "its support status does not change, it will be removed in the 'U' " "development cycle." msgstr "" "The driver for Datera's Storage Systems has been marked as unsupported and " "is now deprecated. ``enable_unsupported_driver`` will need to be set to " "``True`` in the driver's section in cinder.conf to continue to use it. If " "its support status does not change, it will be removed in the 'U' " "development cycle." msgid "" "The driver has been tested against the first Release Candidate for the " "cinder Victoria release, but it does not have ongoing third-party CI. If you " "use this driver, and would be interested in running third-party CI for it, " "please contact the Cinder project team." msgstr "" "The driver has been tested against the first Release Candidate for the " "cinder Victoria release, but it does not have ongoing third-party CI. If you " "use this driver, and would be interested in running third-party CI for it, " "please contact the Cinder project team." msgid "" "The drivers can only be configured with REST client when using ONTAP storage " "9.11.1 or newer." msgstr "" "The drivers can only be configured with REST client when using ONTAP storage " "9.11.1 or newer." msgid "" "The efficiency of revert-to-snapshot depends upon the Ceph storage backend " "in use, particularly whether or not BlueStore is being used in your Ceph " "installation." msgstr "" "The efficiency of revert-to-snapshot depends upon the Ceph storage backend " "in use, particularly whether or not BlueStore is being used in your Ceph " "installation." msgid "" "The efficiency of revert-to-snapshot is also dependent upon the Ceph storage " "backend in use, namely, whether or not BlueStore is being used in your Ceph " "installation." msgstr "" "The efficiency of revert-to-snapshot is also dependent upon the Ceph storage " "backend in use, namely, whether or not BlueStore is being used in your Ceph " "installation." msgid "The endpoints will now correctly raise a 403 Forbidden instead." msgstr "The endpoints will now correctly raise a 403 Forbidden instead." msgid "" "The fix for `Bug #1823200 `_ " "requires ``os-brick`` >= 2.10.3 but < 2.11.0." msgstr "" "The fix for `Bug #1823200 `_ " "requires ``os-brick`` >= 2.10.3 but < 2.11.0." msgid "" "The fix for `Bug #1823200 `_ " "requires ``os-brick`` >= 2.8.5 but < 2.9.0." msgstr "" "The fix for `Bug #1823200 `_ " "requires ``os-brick`` >= 2.8.5 but < 2.9.0." msgid "" "The fix for `Bug #1823200 `_ " "requires ``os-brick`` >= 3.0.2 but < 3.1.0." msgstr "" "The fix for `Bug #1823200 `_ " "requires ``os-brick`` >= 3.0.2 but < 3.1.0." msgid "" "The fix for `Bug #1823200 `_ " "requires ``os-brick`` version 3.1.0 or greater." msgstr "" "The fix for `Bug #1823200 `_ " "requires ``os-brick`` version 3.1.0 or greater." msgid "" "The following commands are no longer required to be listed in your rootwrap " "configuration: cgcreate; and cgset." msgstr "" "The following commands are no longer required to be listed in your rootwrap " "configuration: cgcreate; and cgset." msgid "" "The following drivers have been marked as unsupported and are now deprecated." msgstr "" "The following drivers have been marked as unsupported and are now deprecated." msgid "" "The following drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use these drivers." msgstr "" "The following drivers have been marked as unsupported and are now " "deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in " "the driver's section in cinder.conf to continue to use these drivers." msgid "" "The following drivers were classified as unsupported in the Train release " "and continue as unspported in this release. See the \"Known Issues\" " "section of this document for details." msgstr "" "The following drivers were classified as unsupported in the Train release " "and continue as unsupported in this release. See the \"Known Issues\" " "section of this document for details." msgid "" "The following drivers were classified as unsupported in the Train release " "and continue as unspported in this release:" msgstr "" "The following drivers were classified as unsupported in the Train release " "and continue as unsupported in this release:" msgid "" "The following drivers were deprecated in the Train release but have not yet " "been removed. They continue as unsupported and deprecated in this release. " "See the \"Known Issues\" section of this document for details." msgstr "" "The following drivers were deprecated in the Train release but have not yet " "been removed. They continue as unsupported and deprecated in this release. " "See the \"Known Issues\" section of this document for details." msgid "" "The following policies have been DEPRECATED in this release and will be " "removed in Yoga:" msgstr "" "The following policies have been DEPRECATED in this release and will be " "removed in Yoga:" msgid "" "The following policy rules have been DEPRECATED in this release and will be " "removed in Yoga:" msgstr "" "The following policy rules have been DEPRECATED in this release and will be " "removed in Yoga:" msgid "" "The following three drivers were marked unsupported. These drivers are " "deprecated and will be removed in a future release." msgstr "" "The following three drivers were marked unsupported. These drivers are " "deprecated and will be removed in a future release." msgid "" "The following volume drivers were deprecated in the Pike release and have " "now been removed:" msgstr "" "The following volume drivers were deprecated in the Pike release and have " "now been removed:" msgid "" "The format version of a qcow2 can be determined by looking for the " "``compat`` field in the output of the ``qemu-img info`` command. A version 2 " "format image will report ``compat=0.10``, whereas a qcow2 in version 3 " "format will report ``compat=1.1``." msgstr "" "The format version of a qcow2 can be determined by looking for the " "``compat`` field in the output of the ``qemu-img info`` command. A version 2 " "format image will report ``compat=0.10``, whereas a qcow2 in version 3 " "format will report ``compat=1.1``." msgid "The fss_pool option is deprecated. Use fss_pools instead." msgstr "The fss_pool option is deprecated. Use fss_pools instead." msgid "" "The handling of the default volume type for a cinder installation has been " "improved in this release." msgstr "" "The handling of the default volume type for a cinder installation has been " "improved in this release." msgid "" "The hosts api extension is now deprecated and will be removed in a future " "version." msgstr "" "The hosts API extension is now deprecated and will be removed in a future " "version." msgid "" "The issue is partially fixed in the current release (\"partially\" because " "in specific circumstances, an operator may need to take some actions outside " "the normal upgrade process). See the \"Upgrade Notes\" for more information." msgstr "" "The issue is partially fixed in the current release (\"partially\" because " "in specific circumstances, an operator may need to take some actions outside " "the normal upgrade process). See the \"Upgrade Notes\" for more information." msgid "" "The legacy ``sqlalchemy-migrate`` migrations, which have been deprecated " "since Xena, have been removed. There should be no end-user impact." msgstr "" "The legacy ``sqlalchemy-migrate`` migrations, which have been deprecated " "since Xena, have been removed. There should be no end-user impact." msgid "The maximum length of a specified value is as follows:" msgstr "The maximum length of a specified value is as follows:" msgid "" "The maximum sequential physical free space is smaller than the volumes to be " "created." msgstr "" "The maximum sequential physical free space is smaller than the volumes to be " "created." msgid "The metadata has the following information:" msgstr "The metadata has the following information:" msgid "" "The multiattach capability has been enabled and verified as working with the " "ScaleIO driver. It is the user's responsibility to add some type of " "exclusion (at the file system or network file system layer) to prevent " "multiple writers from corrupting data on the volume." msgstr "" "The multiattach capability has been enabled and verified as working with the " "ScaleIO driver. It is the user's responsibility to add some type of " "exclusion (at the file system or network file system layer) to prevent " "multiple writers from corrupting data on the volume." msgid "" "The new driver adds 'linstor_autoplace_count' configuration option that " "specifies the number of volume replicas." msgstr "" "The new driver adds 'linstor_autoplace_count' configuration option that " "specifies the number of volume replicas." msgid "" "The notifications continue to be published to the above during the " "deprecation period. Beginning with this release, the ``os-reset_status`` " "notifications are also sent to the following *standard* publisher_ids:" msgstr "" "The notifications continue to be published to the above during the " "deprecation period. Beginning with this release, the ``os-reset_status`` " "notifications are also sent to the following *standard* publisher_ids:" msgid "" "The old HNAS drivers configuration paths have been marked for deprecation." msgstr "" "The old HNAS drivers configuration paths have been marked for deprecation." msgid "" "The old deprecated ``hp3par*`` options have been removed. Use the " "``hpe3par*`` instead of them." msgstr "" "The old deprecated ``hp3par*`` options have been removed. Use the " "``hpe3par*`` instead of them." msgid "" "The old deprecated ``keymgr`` options have been removed. Configuration " "options using the ``[keymgr]`` group will not be applied anymore. Use the " "``[key_manager]`` group from Castellan instead. The Castellan ``backend`` " "options should also be used instead of ``api_class``, as most of the options " "that lived in Cinder have migrated to Castellan." msgstr "" "The old deprecated ``keymgr`` options have been removed. Configuration " "options using the ``[keymgr]`` group will not be applied any more. Use the " "``[key_manager]`` group from Castellan instead. The Castellan ``backend`` " "options should also be used instead of ``api_class``, as most of the options " "that lived in Cinder have migrated to Castellan." msgid "" "The old deprecated ``nas_ip`` option has been removed. Use the ``nas_host`` " "instead of it." msgstr "" "The old deprecated ``nas_ip`` option has been removed. Use the ``nas_host`` " "instead of it." msgid "" "The old deprecated ``netapp_eseries_host_type`` option has been removed. Use " "the ``netapp_host_type`` instead." msgstr "" "The old deprecated ``netapp_eseries_host_type`` option has been removed. Use " "the ``netapp_host_type`` instead." msgid "" "The old deprecated ``pybasedir`` option has been removed. Use the " "``state_path`` instead." msgstr "" "The old deprecated ``pybasedir`` option has been removed. Use the " "``state_path`` instead." msgid "" "The online data migrations for Train have been updated to address an upgrade " "issue (`Bug #1893107 `_). " "The issue does not manifest itself in the Train release of cinder, but under " "specific circumstances it can prevent a cinder database upgrade from Train " "to Ussuri. See the \"Upgrade Notes\" and \"Bug Fixes\" sections below for " "more information." msgstr "" "The online data migrations for Train have been updated to address an upgrade " "issue (`Bug #1893107 `_). " "The issue does not manifest itself in the Train release of Cinder, but under " "specific circumstances it can prevent a Cinder database upgrade from Train " "to Ussuri. See the \"Upgrade Notes\" and \"Bug Fixes\" sections below for " "more information." msgid "" "The online database migrations in this release require the existence of a " "volume type named ``__DEFAULT__``. A ``__DEFAULT__`` volume type was " "created as part of your original installation of/upgrade to a Train release " "of cinder. If you have renamed (or renamed and deleted) the ``__DEFAULT__`` " "volume type, you must re-create it before running the online migrations. " "(If you renamed it, you don't have to un-rename it; you can create a new one " "just for the purposes of the online database migration.)" msgstr "" "The online database migrations in this release require the existence of a " "volume type named ``__DEFAULT__``. A ``__DEFAULT__`` volume type was " "created as part of your original installation of/upgrade to a Train release " "of cinder. If you have renamed (or renamed and deleted) the ``__DEFAULT__`` " "volume type, you must re-create it before running the online migrations. " "(If you renamed it, you don't have to un-rename it; you can create a new one " "just for the purposes of the online database migration.)" msgid "The online documentation has been updated to reflect these changes." msgstr "The online documentation has been updated to reflect these changes." msgid "" "The only accelerator supported in this release is Intel QuickAssist " "Technology (QAT), which produces a compressed file in gzip format. Refer to " "this `Cinder documentation `_ for more information about " "using this feature." msgstr "" "The only accelerator supported in this release is Intel QuickAssist " "Technology (QAT), which produces a compressed file in gzip format. Refer to " "this `Cinder documentation `_ for more information about " "using this feature." msgid "" "The optional driver feature \"Snapshot Attachment\" has been removed from " "the `Cinder Driver Support Matrix `_. It is an enhancment used for backups, it " "is not exposed via the Block Storage API, and its presence in the Support " "Matrix was misleading." msgstr "" "The optional driver feature \"Snapshot Attachment\" has been removed from " "the `Cinder Driver Support Matrix `_. It is an enhancement used for backups, it " "is not exposed via the Block Storage API, and its presence in the Support " "Matrix was misleading." msgid "" "The os_privileged_xxx and nova_xxx in the [default] section are deprecated " "in favor of the settings in the [nova] section." msgstr "" "The os_privileged_xxx and nova_xxx in the [default] section are deprecated " "in favour of the settings in the [nova] section." msgid "" "The oslo.middleware /healthcheck is now activated by default in the Cinder " "api-paste.ini. Operators can use it to configure HAproxy or the monitoring " "of Cinder APIs. Edit the ``api-paste.ini`` file and remove any healthcheck " "entries to disable this functionality." msgstr "" "The oslo.middleware /healthcheck is now activated by default in the Cinder " "api-paste.ini. Operators can use it to configure HAproxy or the monitoring " "of Cinder APIs. Edit the ``api-paste.ini`` file and remove any healthcheck " "entries to disable this functionality." msgid "" "The policy file to be used may be specified in the ``/etc/cinder/cinder." "conf`` file in the ``[oslo_policy]`` section as the value of the " "``policy_file`` configuration option. That way there's no question what file " "is being used." msgstr "" "The policy file to be used may be specified in the ``/etc/cinder/cinder." "conf`` file in the ``[oslo_policy]`` section as the value of the " "``policy_file`` configuration option. That way there's no question what file " "is being used." msgid "The policy is named ``group:reset_group_snapshot_status``." msgstr "The policy is named ``group:reset_group_snapshot_status``." msgid "" "The primary change in the Xena release is that cinder's default policy " "configuration will recognize the ``reader`` role on a project. Additionally," msgstr "" "The primary change in the Xena release is that Cinder's default policy " "configuration will recognise the ``reader`` role on a project. Additionally," msgid "" "The qemu-img tool now has resource limits applied which prevent it from " "using more than 1GB of address space or more than 2 seconds of CPU time. " "This provides protection against denial of service attacks from maliciously " "crafted or corrupted disk images." msgstr "" "The qemu-img tool now has resource limits applied which prevent it from " "using more than 1GB of address space or more than 2 seconds of CPU time. " "This provides protection against denial of service attacks from maliciously " "crafted or corrupted disk images." msgid "" "The reserve volume API was incorrectly enforcing \"volume:retype\" policy " "action. It has been corrected to \"volume_extension:volume_actions:reserve\"." msgstr "" "The reserve volume API was incorrectly enforcing \"volume:retype\" policy " "action. It has been corrected to \"volume_extension:volume_actions:reserve\"." msgid "" "The sample file is YAML (because unlike JSON, YAML allows comments). If you " "prefer, you may use a JSON policy file." msgstr "" "The sample file is YAML (because unlike JSON, YAML allows comments). If you " "prefer, you may use a JSON policy file." msgid "The specified value must include the following variables:" msgstr "The specified value must include the following variables:" msgid "The specified value must start with ``HBSD-``." msgstr "The specified value must start with ``HBSD-``." msgid "" "The storage protocol reporting via the REST API will be now the same for " "them all, using the preferred naming, FC, NVMe-oF, iSCSI, NFS..." msgstr "" "The storage protocol reporting via the REST API will be now the same for " "them all, using the preferred naming, FC, NVMe-oF, iSCSI, NFS..." msgid "" "The support for ``cinder.keymgr.barbican.BarbicanKeyManager`` and the " "``[keymgr]`` config section has now been removed. All configs should now be " "switched to use ``castellan.key_manager.barbican_key_manager." "BarbicanKeyManager`` and the ``[key_manager]`` config section." msgstr "" "The support for ``cinder.keymgr.barbican.BarbicanKeyManager`` and the " "``[keymgr]`` config section has now been removed. All configs should now be " "switched to use ``castellan.key_manager.barbican_key_manager." "BarbicanKeyManager`` and the ``[key_manager]`` config section." msgid "" "The time it takes to complete the revert-to-snapshot operation increases " "with the size of the volume. It is faster to create a new volume from a " "snapshot." msgstr "" "The time it takes to complete the revert-to-snapshot operation increases " "with the size of the volume. It is faster to create a new volume from a " "snapshot." msgid "" "The unsupported driver removal policy has been revised. See the \"Known " "Issues\" section of this document for more information." msgstr "" "The unsupported driver removal policy has been revised. See the \"Known " "Issues\" section of this document for more information." msgid "The updated_at timestamp is now returned in listing detail." msgstr "The updated_at timestamp is now returned in listing detail." msgid "" "The use of xml files for vmax backend configuration is now deprecated and " "will be removed during the following release. Deployers are encouraged to " "use the cinder.conf for configuring connections to the vmax." msgstr "" "The use of XML files for VMAX backend configuration is now deprecated and " "will be removed during the following release. Deployers are encouraged to " "use the cinder.conf for configuring connections to the VMAX." msgid "" "The v1 API was deprecated in the Juno release and is now defaulted to " "disabled. In order to still use the v1 API, you must now set " "``enable_v1_api`` to ``True`` in your cinder.conf file." msgstr "" "The v1 API was deprecated in the Juno release and is now defaulted to " "disabled. In order to still use the v1 API, you must now set " "``enable_v1_api`` to ``True`` in your cinder.conf file." msgid "" "The v2 API extensions os-volume-manage and os-snapshot-manage have been " "mapped to the v3 resources manageable_volumes and manageable_snapshots" msgstr "" "The v2 API extensions os-volume-manage and os-snapshot-manage have been " "mapped to the v3 resources manageable_volumes and manageable_snapshots" msgid "" "The volume-transfers list calls (``GET /v3/{project_id}/volume-transfers``, " "``GET /v3/{project_id}/volume-transfers/detail``) were not recognizing " "``name`` as a filterable attribute. That has been fixed in the current " "release." msgstr "" "The volume-transfers list calls (``GET /v3/{project_id}/volume-transfers``, " "``GET /v3/{project_id}/volume-transfers/detail``) were not recognising " "``name`` as a filterable attribute. That has been fixed in the current " "release." msgid "" "The volume-type detail response has been enhanced to include non-sensitive " "\"extra-specs\" information in order to provide more data for automated " "systems to select a volume type." msgstr "" "The volume-type detail response has been enhanced to include non-sensitive " "\"extra-specs\" information in order to provide more data for automated " "systems to select a volume type." msgid "" "The volume_clear option to use `shred` was deprecated in the Newton release " "and has now been removed. Since deprecation, this option has performed the " "same action as the `zero` option. Config settings for `shred` should be " "updated to be set to `zero` for continued operation." msgstr "" "The volume_clear option to use `shred` was deprecated in the Newton release " "and has now been removed. Since deprecation, this option has performed the " "same action as the `zero` option. Configuration settings for `shred` should " "be updated to be set to `zero` for continued operation." msgid "" "The volumes created by VMware VMDK driver will be displayed as \"managed by " "OpenStack Cinder\" in vCenter server." msgstr "" "The volumes created by VMware VMDK driver will be displayed as \"managed by " "OpenStack Cinder\" in vCenter server." msgid "" "The xiv_ds8k driver now supports IBM XIV, Spectrum Accelerate, FlashSystem " "A9000, FlashSystem A9000R and DS8000 storage systems, and was renamed to IBM " "Storage Driver for OpenStack. The changes include text changes, file names, " "names of cinder.conf flags, and names of the proxy classes." msgstr "" "The xiv_ds8k driver now supports IBM XIV, Spectrum Accelerate, FlashSystem " "A9000, FlashSystem A9000R and DS8000 storage systems, and was renamed to IBM " "Storage Driver for OpenStack. The changes include text changes, file names, " "names of cinder.conf flags, and names of the proxy classes." msgid "Then the volume will be assigned the ``__DEFAULT__`` type." msgstr "Then the volume will be assigned the ``__DEFAULT__`` type." msgid "" "There is a new policy option ``volume:force_delete`` which controls access " "to the ability to specify force delete via the volume delete API. This is " "separate from the pre-existing ``volume-admin-actions:force_delete`` policy " "check." msgstr "" "There is a new policy option ``volume:force_delete`` which controls access " "to the ability to specify force delete via the volume delete API. This is " "separate from the pre-existing ``volume-admin-actions:force_delete`` policy " "check." msgid "" "There is a race condition between the delete attachment and delete volume " "operations that has been observed when running cinder-csi. This race can " "leave deleted volumes stuck as attached to instances. The cinder team is " "working on a solution which is expected to be backported to a future release " "in the Xena series. The issue is being tracked as `Bug #1937084 `_." msgstr "" "There is a race condition between the deleted attachment and delete volume " "operations that have been observed when running cinder-csi. This race can " "leave deleted volumes stuck as attached to instances. The cinder team is " "working on a solution which is expected to be backported to a future release " "in the Xena series. The issue is being tracked as `Bug #1937084 `_." msgid "" "There must always be at least one volume-type defined in a Cinder " "installation. This is enforced by the type-delete call." msgstr "" "There must always be at least one volume-type defined in a Cinder " "installation. This is enforced by the type-delete call." msgid "" "There was a problem where the volume could not be deleted because the copy " "session information acquired by SMI-S IF from ETERNUS DX Storage, which was " "cached and did not reflect the information that had just been executed." msgstr "" "There was a problem where the volume could not be deleted because the copy " "session information acquired by SMI-S IF from ETERNUS DX Storage, which was " "cached and did not reflect the information that had just been executed." msgid "" "These two parameters can be utilized in future implementations of functions " "related to ``filter_function``." msgstr "" "These two parameters can be utilised in future implementations of functions " "related to ``filter_function``." msgid "" "This PowerMax driver moves the legacy shared volume from the masking view " "structure in Ocata and prior releases (when SMI-S was supported) to staging " "masking view(s) in Pike and later releases (U4P REST). In Ocata, the live " "migration process shared the storage group, containing the volume, among the " "different compute nodes. In Pike, we changed the masking view structure to " "facilitate a cleaner live migration process where only the intended volume " "is migrated without exposing other volumes in the storage group. The staging " "storage group and masking views facilitate a seamless live migration " "operation in upgraded releases." msgstr "" "This PowerMax driver moves the legacy shared volume from the masking view " "structure in Ocata and prior releases (when SMI-S was supported) to staging " "masking view(s) in Pike and later releases (U4P REST). In Ocata, the live " "migration process shared the storage group, containing the volume, among the " "different compute nodes. In Pike, we changed the masking view structure to " "facilitate a cleaner live migration process where only the intended volume " "is migrated without exposing other volumes in the storage group. The staging " "storage group and masking views facilitate a seamless live migration " "operation in upgraded releases." msgid "" "This PowerMax driver now puts the unmanaged \"orphan\" volume in a storage " "group called OS-Unmanaged. It is not possible to query a volume's associated " "snapvx snapshots using the PowerMax management software, unless it belongs " "to a storage group." msgstr "" "This PowerMax driver now puts the unmanaged \"orphan\" volume in a storage " "group called OS-Unmanaged. It is not possible to query a volume's associated " "snapvx snapshots using the PowerMax management software, unless it belongs " "to a storage group." msgid "" "This also simplifies deployment with other WSGI servers that expect module " "paths such as gunicorn." msgstr "" "This also simplifies deployment with other WSGI servers that expect module " "paths such as gunicorn." msgid "" "This change also preserves the stderr that was lost previously in order to " "ease debugging." msgstr "" "This change also preserves the stderr that was lost previously in order to " "ease debugging." msgid "" "This change fixes bug 1845483 - 3PAR: For Peer Persistence, add policy " "options in RCG. Following options are added after Remote Copy Group (RCG) is " "created: autoFailover, pathManagement, autoRecover" msgstr "" "This change fixes bug 1845483 - 3PAR: For Peer Persistence, add policy " "options in RCG. Following options are added after Remote Copy Group (RCG) is " "created: autoFailover, pathManagement, autoRecover" msgid "" "This change is to update the live migration ability in environments using " "PowerMax. In previous 2023.1 version, the live migration fails without a " "pool name. This update add the ability of live migration without a pool name." msgstr "" "This change is to update the live migration ability in environments using " "PowerMax. In the previous 2023.1 version, the live migration fails without a " "pool name. This update adds the ability of live migration without a pool " "name." msgid "" "This feature is not directly user-facing. To enable it, an operator must " "add the field ``image_service:store_id`` in the volume-type extra-specs. " "The value of the field is a valid store identifier (``id``) configured in " "Glance, which may be discovered by making a ``GET /v2/info/stores`` call to " "the Image Service API." msgstr "" "This feature is not directly user-facing. To enable it, an operator must " "add the field ``image_service:store_id`` in the volume-type extra-specs. " "The value of the field is a valid store identifier (``id``) configured in " "Glance, which may be discovered by making a ``GET /v2/info/stores`` call to " "the Image Service API." msgid "" "This helps deployments with a large number of volumes and prevent issues on " "deployments with a growing number of volumes at the small cost of a slightly " "less accurate stats being reported to the scheduler." msgstr "" "This helps deployments with a large number of volumes and prevents issues on " "deployments with a growing number of volumes at the small cost of slightly " "less accurate stats being reported to the scheduler." msgid "" "This is made an optional configuration because it only applies to very " "specific environments. If we were to make this global that would require a " "rootwrap/privsep update that could break compatibility when trying to do " "rolling upgrades of the volume service." msgstr "" "This is made an optional configuration because it only applies to very " "specific environments. If we were to make this global that would require a " "rootwrap/privsep update that could break compatibility when trying to do " "rolling upgrades of the volume service." msgid "This is replaced with the host name of the connecting node." msgstr "This is replaced with the host name of the connecting node." msgid "" "This migration requires the existence of a ``__DEFAULT__`` volume type. If " "you have renamed (or renamed and deleted) the ``__DEFAULT__`` volume type in " "Train, you must re-create it before running the online migrations. (If you " "renamed it, you don't have to un-rename it; you can create a new one just " "for the purposes of the online database migration.)" msgstr "" "This migration requires the existence of a ``__DEFAULT__`` volume type. If " "you have renamed (or renamed and deleted) the ``__DEFAULT__`` volume type in " "Train, you must re-create it before running the online migrations. (If you " "renamed it, you don't have to un-rename it; you can create a new one just " "for the purposes of the online database migration.)" msgid "" "This note applies to deployments that are using the cinder configuration " "option ``volume_copy_bps_limit`` in its non-default value (the default is 0)." msgstr "" "This note applies to deployments that are using the Cinder configuration " "option ``volume_copy_bps_limit`` in its non-default value (the default is 0)." msgid "This option affects three Block Storage API calls:" msgstr "This option affects three Block Storage API calls:" msgid "This patch also enabled the ``multiattach`` in the driver information." msgstr "This patch also enabled the ``multiattach`` in the driver information." msgid "" "This problem has been addressed through improvements in information " "retrieval." msgstr "" "This problem has been addressed through improvements in information " "retrieval." msgid "" "This release contains a fix for `Bug #1908315 `_, which changes the default value of the policy " "governing the Block Storage API action `Reset group snapshot status `_ " "to make the action administrator-only. This policy was inadvertently " "changed to be admin-or-owner during the Queens development cycle." msgstr "" "This release contains a fix for `Bug #1908315 `_, which changes the default value of the policy " "governing the Block Storage API action `Reset group snapshot status `_ " "to make the action administrator-only. This policy was inadvertently " "changed to be admin-or-owner during the Queens development cycle." msgid "" "This release contains a partial fix for an upgrade issue. If you are " "upgrading a Train deployment of cinder to Ussuri, under specific " "circumstances you may need to take actions outside the normal upgrade " "process in order to accomplish a successful upgrade. In particular, there " "may be changes you must make in your Train deployment **before** you " "upgrade. See the \"Upgrade Notes\" and \"Bug Fixes\" sections of these " "release notes for details." msgstr "" "This release contains a partial fix for an upgrade issue. If you are " "upgrading a Train deployment of cinder to Ussuri, under specific " "circumstances you may need to take actions outside the normal upgrade " "process in order to accomplish a successful upgrade. In particular, there " "may be changes you must make in your Train deployment **before** you " "upgrade. See the \"Upgrade Notes\" and \"Bug Fixes\" sections of these " "release notes for details." msgid "" "This release improves the handling of the ``__DEFAULT__`` volume-type (see " "\"Other Notes\", below) and fixes `Bug #1879578 `_, in which the default type was applied too " "aggressively (see the discussion of this issue in \"Bug Fixes\", below)." msgstr "" "This release improves the handling of the ``__DEFAULT__`` volume-type (see " "\"Other Notes\", below) and fixes `Bug #1879578 `_, in which the default type was applied too " "aggressively (see the discussion of this issue in \"Bug Fixes\", below)." msgid "" "This release includes support for Glance automatic image colocation. When a " "volume originally created from an image is uploaded to the Image service, " "Cinder passes Glance a reference to the original image. Glance may use this " "information to colocate the new image data in the same image store(s) as the " "original image data. Consult the Glance documentation for more information." msgstr "" "This release includes support for Glance automatic image colocation. When a " "volume originally created from an image is uploaded to the Image service, " "Cinder passes Glance a reference to the original image. Glance may use this " "information to colocate the new image data in the same image store(s) as the " "original image data. Consult the Glance documentation for more information." msgid "" "This release includes support for Glance multiple stores. An operator may " "now specify which Glance store will be used when a volume is uploaded to " "Glance as an image. Some details about this feature:" msgstr "" "This release includes support for Glance multiple stores. An operator may " "now specify which Glance store will be used when a volume is uploaded to " "Glance as an image. Some details about this feature:" msgid "" "This release introduces a new configuration option, ``vmdk_allowed_types``, " "that specifies the list of VMDK image subformats that Cinder will allow in " "order to prevent exposure of host information by modifying the named extents " "in a VMDK image. The default setting allows only the 'streamOptimized' and " "'monolithicSparse' subformats, which do not use named extents." msgstr "" "This release introduces a new configuration option, ``vmdk_allowed_types``, " "that specifies the list of VMDK image subformats that Cinder will allow in " "order to prevent exposure of host information by modifying the named extents " "in a VMDK image. The default setting allows only the 'streamOptimized' and " "'monolithicSparse' subformats, which do not use named extents." msgid "" "This release introduces a new configuration option, ``vmdk_allowed_types``, " "that specifies the list of VMDK image subformats that Cinder will allow. " "The default setting allows only the 'streamOptimized' and 'monolithicSparse' " "subformats, which do not use named extents." msgstr "" "This release introduces a new configuration option, ``vmdk_allowed_types``, " "that specifies the list of VMDK image subformats that Cinder will allow. " "The default setting allows only the 'streamOptimized' and 'monolithicSparse' " "subformats, which do not use named extents." msgid "" "This release modifies the online database migrations to address an an " "upgrade issue (`Bug #1893107 `_). The issue does not manifest itself in the Train release " "of cinder, but under specific circumstances it can prevent a cinder database " "upgrade from Train to Ussuri." msgstr "" "This release modifies the online database migrations to address an an " "upgrade issue (`Bug #1893107 `_). The issue does not manifest itself in the Train release " "of Cinder, but under specific circumstances it can prevent a Cinder database " "upgrade from Train to Ussuri." msgid "" "This release partially fixes an upgrade issue (`Bug #1893107 `_) that under some circumstances could " "prevent a cinder database upgrade from Train to Ussuri. The issue would " "occur if you had upgraded an unpurged Stein database to Train, and then " "attempted to upgrade the still unpurged database to Ussuri. If this " "describes your situation, please read further, because in order to avert " "this issue, there are some steps you may need to take in your **Train** " "deployment *before* you upgrade to Ussuri." msgstr "" "This release partially fixes an upgrade issue (`Bug #1893107 `_) that under some circumstances could " "prevent a Cinder database upgrade from Train to Ussuri. The issue would " "occur if you had upgraded an unpurged Stein database to Train, and then " "attempted to upgrade the still unpurged database to Ussuri. If this " "describes your situation, please read further, because in order to avert " "this issue, there are some steps you may need to take in your **Train** " "deployment *before* you upgrade to Ussuri." msgid "" "This release restores the intended behavior, which is described as follows:" msgstr "" "This release restores the intended behaviour, which is described as follows:" msgid "" "This release updates the os-brick library used by Cinder to version 2.10.4 " "to correct a problem with the fix for `Bug #1823200 `_ in the previous os-brick release." msgstr "" "This release updates the os-brick library used by Cinder to version 2.10.4 " "to correct a problem with the fix for `Bug #1823200 `_ in the previous os-brick release." msgid "" "This release updates the os-brick library used by Cinder to version 2.10.5 " "to correct hopefully the final problem with the fix for `Bug #1823200 " "`_ in the previous os-brick " "release." msgstr "" "This release updates the os-brick library used by Cinder to version 2.10.5 " "to correct hopefully the final problem with the fix for `Bug #1823200 " "`_ in the previous os-brick " "release." msgid "" "This release updates the os-brick library used by Cinder to version 2.8.6 to " "correct a problem with the fix for `Bug #1823200 `_ in the previous os-brick release. It does not " "contain any Cinder code changes." msgstr "" "This release updates the os-brick library used by Cinder to version 2.8.6 to " "correct a problem with the fix for `Bug #1823200 `_ in the previous os-brick release. It does not " "contain any Cinder code changes." msgid "" "This release updates the os-brick library used by Cinder to version 2.8.7 to " "correct hopefully the final problem with the fix for `Bug #1823200 `_ in the previous os-brick release." msgstr "" "This release updates the os-brick library used by Cinder to version 2.8.7 to " "correct hopefully the final problem with the fix for `Bug #1823200 `_ in the previous os-brick release." msgid "" "This release updates the os-brick library used by Cinder to version 3.0.3 to " "correct hopefully the final problem with the fix for `Bug #1823200 `_ in the previous os-brick release." msgstr "" "This release updates the os-brick library used by Cinder to version 3.0.3 to " "correct hopefully the final problem with the fix for `Bug #1823200 `_ in the previous os-brick release." msgid "" "This requires that ``nvmeof_conn_info_version`` configuration option is set " "to ``2`` as well." msgstr "" "This requires that ``nvmeof_conn_info_version`` configuration option is set " "to ``2`` as well." msgid "" "This setting is *not recommended* by the Cinder project team, as it may " "allow end users to put a group snapshot into an invalid status with " "indeterminate consequences." msgstr "" "This setting is *not recommended* by the Cinder project team, as it may " "allow end users to put a group snapshot into an invalid status with " "indeterminate consequences." msgid "" "This upgrade notice applies to you only if **all** of the following " "conditions are met:" msgstr "" "This upgrade notice applies to you only if **all** of the following " "conditions are met:" msgid "" "This will allow consumers of these notifications to make a smooth " "transition. In the Victoria release, ``os-reset_status`` notifications will " "*only* be sent to the standard publisher_ids." msgstr "" "This will allow consumers of these notifications to make a smooth " "transition. In the Victoria release, ``os-reset_status`` notifications will " "*only* be sent to the standard publisher_ids." msgid "" "This will generate a file named ``policy.yaml`` in the ``etc/cinder`` " "directory of your checked-out Cinder repository." msgstr "" "This will generate a file named ``policy.yaml`` in the ``etc/cinder`` " "directory of your checked-out Cinder repository." msgid "" "Thus, we recommend that operators who want to manage a storage object in an " "NFS-based storage backend as a cinder volume should not do this with a qcow2 " "image that is in the version 2 format, but should change it to the qcow2-v3 " "format first." msgstr "" "Thus, we recommend that operators who want to manage a storage object in an " "NFS-based storage backend as a Cinder volume should not do this with a qcow2 " "image that is in the version 2 format but should change it to the qcow2-v3 " "format first." msgid "" "To accommodate these environments, and to maintain backward compatibility in " "Newton we add a ``lvm_suppress_fd_warnings`` bool config to the LVM driver. " "Setting this to True will append the LVM env vars to include the variable " "``LVM_SUPPRESS_FD_WARNINGS=1``." msgstr "" "To accommodate these environments, and to maintain backward compatibility in " "Newton we add a ``lvm_suppress_fd_warnings`` bool config to the LVM driver. " "Setting this to True will append the LVM environment variables to include " "the variable ``LVM_SUPPRESS_FD_WARNINGS=1``." msgid "" "To address backwards compatibility, the new rules added to the volume_type." "py policy file, default to the existing rule, ``volume_extension:" "volume_type_encryption``, if it is set to a non-default value." msgstr "" "To address backwards compatibility, the new rules added to the volume_type." "py policy file, default to the existing rule, ``volume_extension:" "volume_type_encryption``, if it is set to a non-default value." msgid "" "To find out what policies are available and what their default values are, " "you can generate a sample policy file. To do this, you must have a local " "copy of the Cinder source code repository. From the top level directory, run " "the command::" msgstr "" "To find out what policies are available and what their default values are, " "you can generate a sample policy file. To do this, you must have a local " "copy of the Cinder source code repository. From the top level directory, run " "the command::" msgid "" "To get rid of long running DB data migrations that must be run offline, " "Cinder will now be able to execute them online, on a live cloud. Before " "upgrading from Ocata to Pike, operator needs to perform all the Newton data " "migrations. To achieve that he needs to perform ``cinder-manage db " "online_data_migrations`` until there are no records to be updated. To limit " "DB performance impact migrations can be performed in chunks limited by ``--" "max_number`` option. If your intent is to upgrade Cinder in a non-live " "manner, you can use ``--ignore_state`` option safely. Please note that " "finishing all the Newton data migrations will be enforced by the first " "schema migration in Pike, so you won't be able to upgrade to Pike without " "that." msgstr "" "To get rid of long running DB data migrations that must be run offline, " "Cinder will now be able to execute them online, on a live cloud. Before " "upgrading from Ocata to Pike, operator needs to perform all the Newton data " "migrations. To achieve that he needs to perform ``cinder-manage db " "online_data_migrations`` until there are no records to be updated. To limit " "DB performance impact migrations can be performed in chunks limited by ``--" "max_number`` option. If your intent is to upgrade Cinder in a non-live " "manner, you can use ``--ignore_state`` option safely. Please note that " "finishing all the Newton data migrations will be enforced by the first " "schema migration in Pike, so you won't be able to upgrade to Pike without " "that." msgid "" "To improve the volume deletion process, add a step to check associated copy " "sessions. Additionally, it also improves the process of retrieving storage-" "managed volume numbers." msgstr "" "To improve the volume deletion process, add a step to check associated copy " "sessions. Additionally, it also improves the process of retrieving storage-" "managed volume numbers." msgid "" "To make the volume extendable after creating a snapshot, an additional " "parameter ``fujitsu_use_cli_copy`` is introduced with a default value of " "``False``." msgstr "" "To make the volume extendable after creating a snapshot, an additional " "parameter ``fujitsu_use_cli_copy`` is introduced with a default value of " "``False``." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "" "Two new checks are added to the ``cinder-status upgrade check`` CLI to " "ensure that online data migrations from Queens onward have been completed." msgstr "" "Two new checks are added to the ``cinder-status upgrade check`` CLI to " "ensure that online data migrations from Queens onward have been completed." msgid "" "Two new policies \"volume_extension:type_get\" and \"volume_extension:" "type_get_all\" have been added to control type show and type list APIs." msgstr "" "Two new policies \"volume_extension:type_get\" and \"volume_extension:" "type_get_all\" have been added to control type show and type list APIs." msgid "" "Two new policies are introduced to govern the volume reimage functionality " "introduced with microversion 3.68:" msgstr "" "Two new policies are introduced to govern the volume reimage functionality " "introduced with microversion 3.68:" msgid "Two new policies are introduced to govern this functionality:" msgstr "Two new policies are introduced to govern this functionality:" msgid "" "Under some circumstances, NFS-based backend drivers will store a volume as a " "qcow2 image. Thus cinder allows for the possibility that an operator may " "choose to manage a storage object in an NFS-based backend that is a qcow2 " "image." msgstr "" "Under some circumstances, NFS-based backend drivers will store a volume as a " "qcow2 image. Thus Cinder allows for the possibility that an operator may " "choose to manage a storage object in an NFS-based backend that is a qcow2 " "image." msgid "" "Unified how cinder calculates the virtual free storage space for a pool. " "Previously Cinder had 2 different mechanisms for calculating the virtual " "free storage. Now both the Capacity Filter and the Capacity Weigher use the " "same mechanism, which is based upon the defined terms in https://specs." "openstack.org/openstack/cinder-specs/specs/queens/provisioning-improvements." "html" msgstr "" "Unified how Cinder calculates the virtual free storage space for a pool. " "Previously Cinder had two different mechanisms for calculating the virtual " "free storage. Now both the Capacity Filter and the Capacity Weigher use the " "same mechanism, which is based upon the defined terms in https://specs." "openstack.org/openstack/cinder-specs/specs/queens/provisioning-improvements." "html" msgid "Unmanage volumes" msgstr "Unmanage volumes" msgid "Update backend state in scheduler when extending volume." msgstr "Update backend state in scheduler when extending volume." msgid "" "Update group (`#1876133 `_)" msgstr "" "Update group (`#1876133 `_)" msgid "" "Updated the parameter storwize_preferred_host_site from StrOpt to DictOpt in " "cinder back-end configuration, and removed it from volume type configuration." msgstr "" "Updated the parameter storwize_preferred_host_site from StrOpt to DictOpt in " "cinder back-end configuration, and removed it from volume type configuration." msgid "" "Updated the parameter storwzie_preferred_host_site from StrOpt to DictOpt in " "cinder back-end configuration, and removed it from volume type configuration." msgstr "" "Updated the parameter storwzie_preferred_host_site from StrOpt to DictOpt in " "Cinder back-end configuration, and removed it from volume type configuration." msgid "" "Updating the Datera Elastic DataFabric Storage Driver to version 2.1. This " "adds ACL support, Multipath support and basic IP pool support." msgstr "" "Updating the Datera Elastic DataFabric Storage Driver to version 2.1. This " "adds ACL support, Multipath support and basic IP pool support." msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Upgrade to Ussuri, but run the online database migrations **before** you run " "the db_sync. (The normal ordering is to run db_sync first, and then run the " "online migrations.)" msgstr "" "Upgrade to Ussuri, but run the online database migrations **before** you run " "the db_sync. (The normal ordering is to run db_sync first, and then run the " "online migrations.)" msgid "" "Upgrade your Train deployment to cinder 15.4.0 or more recent and re-run the " "online database migrations in your Train deployment." msgstr "" "Upgrade your Train deployment to Cinder 15.4.0 or more recent and re-run the " "online database migrations in your Train deployment." msgid "" "Upgrades are not affected by the new functionality whereby a project_id is " "no longer required in API URLs. The legacy behavior in which a project_id is " "included in the URL continues to be supported." msgstr "" "Upgrades are not affected by the new functionality whereby a project_id is " "no longer required in API URLs. The legacy behaviour in which a project_id " "is included in the URL continues to be supported." msgid "" "Upload volume to image: ``POST /v3/volumes/{volume_id}/action`` with the " "``os-volume_upload_image`` action. This call will result in a 400 (Bad " "Request) response when an image ``disk_format`` that would require " "conversion is requested." msgstr "" "Upload volume to image: ``POST /v3/volumes/{volume_id}/action`` with the " "``os-volume_upload_image`` action. This call will result in a 400 (Bad " "Request) response when an image ``disk_format`` that would require " "conversion is requested." msgid "" "Usable characters are alphanumerics, \".\", \"@\", \"_\", \":\", \"-\", " "\"{\" and \"}\". \"{\" and \"}\" can be used only in variables." msgstr "" "Usable characters are alphanumerics, \".\", \"@\", \"_\", \":\", \"-\", " "\"{\" and \"}\". \"{\" and \"}\" can be used only in variables." msgid "" "Use of JSON formatted policy files was deprecated by the ``oslo.policy`` " "library during the Victoria development cycle. As a result, this deprecation " "is being noted in the Wallaby cycle with an anticipated future removal of " "JSON formatted file support by ``oslo.policy``. As such operators will need " "to convert to YAML policy files. Use the `oslopolicy-convert-json-to-yaml " "`_ tool to convert the existing JSON formatted policy file to " "YAML in a backward compatible way." msgstr "" "Use of JSON formatted policy files was deprecated by the ``oslo.policy`` " "library during the Victoria development cycle. As a result, this deprecation " "is being noted in the Wallaby cycle with an anticipated future removal of " "JSON formatted file support by ``oslo.policy``. As such operators will need " "to convert to YAML policy files. Use the `oslopolicy-convert-json-to-yaml " "`_ tool to convert the existing JSON formatted policy file to " "YAML in a backward compatible way." msgid "" "Use the directions above if you need to re-create the ``__DEFAULT__`` volume " "type." msgstr "" "Use the directions above if you need to re-create the ``__DEFAULT__`` volume " "type." msgid "" "Users of the Datera Cinder driver are now required to use Datera DataFabric " "version 1.0+. Versions before 1.0 will not be able to utilize this new " "driver since they still function on v1 of the Datera DataFabric API" msgstr "" "Users of the Datera Cinder driver are now required to use Datera DataFabric " "version 1.0+. Versions before 1.0 will not be able to utilise this new " "driver since they still function on v1 of the Datera DataFabric API" msgid "" "Users of the IBM Storage Driver, previously known as the IBM XIV/DS8K " "driver, upgrading from Mitaka or previous releases, need to reconfigure the " "relevant cinder.conf entries. In most cases the change is just removal of " "the xiv-ds8k field prefix, but for details use the driver documentation." msgstr "" "Users of the IBM Storage Driver, previously known as the IBM XIV/DS8K " "driver, upgrading from Mitaka or previous releases, need to reconfigure the " "relevant cinder.conf entries. In most cases the change is just removal of " "the xiv-ds8k field prefix, but for details use the driver documentation." msgid "" "Users of the ibmnas driver should switch to using the IBM GPFS driver to " "enable Cinder access to IBM NAS resources. For details configuring the IBM " "GPFS driver, see the GPFS config reference. - http://docs.openstack.org/" "liberty/config-reference/content/GPFS-driver.html" msgstr "" "Users of the ibmnas driver should switch to using the IBM GPFS driver to " "enable Cinder access to IBM NAS resources. For details configuring the IBM " "GPFS driver, see the GPFS config reference. - http://docs.openstack.org/" "liberty/config-reference/content/GPFS-driver.html" msgid "" "Using the v2 clone format for cloned volumes allows volumes with dependent " "images to be moved to the trash - where they remain until purged - and allow " "the RBD driver to postpone the deletion until the volume has no dependent " "images. Configuring the trash purge is recommended to avoid wasting space " "with these trashed volumes. Since the Ceph Octopus release, the trash can be " "configured to automatically purge on a defined schedule. See the ``rbd trash " "purge schedule`` commands in the `rbd manpage `_." msgstr "" "Using the v2 clone format for cloned volumes allows volumes with dependent " "images to be moved to the Rubbish Bin - where they remain until purged - and " "allow the RBD driver to postpone the deletion until the volume has no " "dependent images. Configuring the Rubbish Bin purge is recommended to avoid " "wasting space with these volumes. Since the Ceph Octopus release, the " "Rubbish Bin can be configured to automatically purge on a defined schedule. " "See the ``rbd trash purge schedule`` commands in the `rbd manpage `_." msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "VMAX driver - Removed deprecated option ``cinder_dell_emc_config_file``" msgstr "" "VMAX driver - Removed deprecated option ``cinder_dell_emc_config_file``" msgid "" "VMAX driver - configuration tag san_rest_port will be replaced by " "san_api_port in the next release." msgstr "" "VMAX driver - configuration tag san_rest_port will be replaced by " "san_api_port in the next release." msgid "VMAX driver - fixes SSL certificate verification error." msgstr "VMAX driver - fixes SSL certificate verification error." msgid "" "VMAX driver support for new configuration option - vmax_snapvx_unlink_limit " "for specifying the maximum number of unlinks which will be performed before " "a clone operation. Default value is 3" msgstr "" "VMAX driver support for new configuration option - vmax_snapvx_unlink_limit " "for specifying the maximum number of unlinks which will be performed before " "a clone operation. Default value is 3" msgid "" "VMAX driver version 3.0, replacing SMI-S with Unisphere REST. This driver " "supports VMAX3 hybrid and All Flash arrays." msgstr "" "VMAX driver version 3.0, replacing SMI-S with Unisphere REST. This driver " "supports VMAX3 hybrid and All Flash arrays." msgid "" "VMware VMDK driver and FCD driver now support NFS 4.1 datastores in vCenter " "server." msgstr "" "VMware VMDK driver and FCD driver now support NFS 4.1 datastores in vCenter " "server." msgid "" "VMware VMDK driver and FCD driver now support a config option " "``vmware_datastore_regex`` to specify the regular expression pattern to " "match the name of datastores where backend volumes are created." msgstr "" "VMware VMDK driver and FCD driver now support a config option " "``vmware_datastore_regex`` to specify the regular expression pattern to " "match the name of datastores where backend volumes are created." msgid "VMware VMDK driver deprecated the support for vCenter version 5.1" msgstr "VMware VMDK driver deprecated the support for vCenter version 5.1" msgid "" "VMware VMDK driver now supports a config option ``vmware_lazy_create`` to " "disable the default behavior of lazy creation of raw volumes in the backend." msgstr "" "VMware VMDK driver now supports a config option ``vmware_lazy_create`` to " "disable the default behaviour of lazy creation of raw volumes in the backend." msgid "" "VMware VMDK driver now supports a config option ``vmware_storage_profile`` " "to specify a list with names of storage profiles to be monitored for " "capacity." msgstr "" "VMware VMDK driver now supports a config option ``vmware_storage_profile`` " "to specify a list with names of storage profiles to be monitored for " "capacity." msgid "" "VMware VMDK driver now supports changing adpater type using retype. To " "change the adapter type, set ``vmware:adapter_type`` in the new volume type." msgstr "" "VMware VMDK driver now supports changing adapter type using retype. To " "change the adapter type, set ``vmware:adapter_type`` in the new volume type." msgid "" "VMware VMDK driver now supports vSphere template as a volume snapshot format " "in vCenter server. The snapshot format in vCenter server can be specified " "using driver config option ``vmware_snapshot_format``." msgstr "" "VMware VMDK driver now supports vSphere template as a volume snapshot format " "in vCenter server. The snapshot format in vCenter server can be specified " "using driver config option ``vmware_snapshot_format``." msgid "" "VMware VMDK driver now supports volume type extra-spec option ``vmware:" "adapter_type`` to specify the adapter type of volumes in vCenter server." msgstr "" "VMware VMDK driver now supports volume type extra-spec option ``vmware:" "adapter_type`` to specify the adapter type of volumes in vCenter server." msgid "" "VMware VMDK driver will use vSphere template as the default snapshot format " "in vCenter server." msgstr "" "VMware VMDK driver will use vSphere template as the default snapshot format " "in vCenter server." msgid "" "VMware vmdk driver: The collection of volume stats, which had been disabled, " "may now be turned on by using the ``vmware_enable_volume_stats`` " "configuration option. The default for this option is False (no stats " "collection). Be aware that enabling volume stats may cause performance " "issues under high load." msgstr "" "VMware vmdk driver: The collection of volume stats, which had been disabled, " "may now be turned on by using the ``vmware_enable_volume_stats`` " "configuration option. The default for this option is False (no stats " "collection). Be aware that enabling volume stats may cause performance " "issues under high load." msgid "" "VMware vmdk driver: The vmware vmdk driver had its get_volume_stats removed " "in a previous release due to a potential performance hit of 20% at a high " "load. The problem with reporting ``unknown`` back to the scheduler, is that " "it effectively removes cinder's ability to properly schedule based on " "capacity utilization. When this driver is enabled in a heterogenous " "environment without properly reporting utilization statistics, the " "scheduler's capacity filter will always allow this driver to service a " "provisioning request. Without reporting the backend stats, the capacity " "filter also can't determine the reserved_percentage as well as the " "max_over_subscription_ratio. To enable the collection of stats set " "``vmware_enable_volume_stats`` to True in the driver section of cinder." "conf. The default setting is False. Keep in mind that there may be a " "degradation in performance on the vcenter when enabling this setting." msgstr "" "VMware vmdk driver: The VMware vmdk driver had its get_volume_stats removed " "in a previous release due to a potential performance hit of 20% at a high " "load. The problem with reporting ``unknown`` back to the scheduler, is that " "it effectively removes Cinder's ability to properly schedule based on " "capacity utilisation. When this driver is enabled in a heterogeneous " "environment without properly reporting utilisation statistics, the " "scheduler's capacity filter will always allow this driver to service a " "provisioning request. Without reporting the backend stats, the capacity " "filter also can't determine the reserved_percentage as well as the " "max_over_subscription_ratio. To enable the collection of stats set " "``vmware_enable_volume_stats`` to True in the driver section of cinder." "conf. The default setting is False. Keep in mind that there may be a " "degradation in performance on the vCenter when enabling this setting." msgid "" "VNX cinder driver now supports async migration during volume cloning. By " "default, the cloned volume will be available after the migration starts in " "the VNX instead of waiting for the completion of migration. This greatly " "accelerates the cloning process. If user wants to disable this, he could add " "``--metadata async_migrate=False`` when creating volume from source volume/" "snapshot." msgstr "" "VNX cinder driver now supports async migration during volume cloning. By " "default, the cloned volume will be available after the migration starts in " "the VNX instead of waiting for the completion of migration. This greatly " "accelerates the cloning process. If user wants to disable this, he could add " "``--metadata async_migrate=False`` when creating volume from source volume/" "snapshot." msgid "Veritas ACCESS iSCSI driver" msgstr "Veritas ACCESS iSCSI driver" msgid "Veritas Clustered NFS driver" msgstr "Veritas Clustered NFS driver" msgid "" "Version 3 of the qcow2 format has been the default for qcow2 creation in " "qemu-img since QEMU-1.7 (December 2013), and operating system vendors are " "discussing discontinuing (or limiting) support of the version 2 format in " "upcoming releases." msgstr "" "Version 3 of the qcow2 format has been the default for qcow2 creation in " "qemu-img since QEMU-1.7 (December 2013), and operating system vendors are " "discussing discontinuing (or limiting) support of the version 2 format in " "upcoming releases." msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Violin" msgstr "Violin" msgid "Violin Memory 6000 array series drivers are removed." msgstr "Violin Memory 6000 array series drivers are removed." msgid "Virtuozzo Storage driver" msgstr "Virtuozzo Storage driver" msgid "" "Volume \"force delete\" was introduced with the 3.23 API microversion, " "however the check for in the service was incorrectly looking for " "microversion 3.2. That check has now been fixed. It is possible that an API " "call using a microversion below 3.23 would previously work for this call, " "which will now fail. This closes `bug #1783028 `_." msgstr "" "Volume \"force delete\" was introduced with the 3.23 API microversion, " "however the check for in the service was incorrectly looking for " "microversion 3.2. That check has now been fixed. It is possible that an API " "call using a microversion below 3.23 would previously work for this call, " "which will now fail. This closes `bug #1783028 `_." msgid "Volume Manage/Unmanage support for Datera Volume Drivers" msgstr "Volume Manage/Unmanage support for Datera Volume Drivers" msgid "" "Volume Manager now uses the configuration option ``init_host_max_objects`` " "retrieval to set max number of volumes and snapshots to be retrieved per " "batch during volume manager host initialization. Query results will be " "obtained in batches from the database and not in one shot to avoid extreme " "memory usage. Default value is 0 and disables this functionality." msgstr "" "Volume Manager now uses the configuration option ``init_host_max_objects`` " "retrieval to set max number of volumes and snapshots to be retrieved per " "batch during volume manager host initialization. Query results will be " "obtained in batches from the database and not in one shot to avoid extreme " "memory usage. Default value is 0 and disables this functionality." msgid "Volume Snapshots:" msgstr "Volume Snapshots:" msgid "" "Volume group updates of any kind had previously required the group to be in " "``Available`` status. Updates to the group name or description will now work " "regardless of the volume group status." msgstr "" "Volume group updates of any kind had previously required the group to be in " "``Available`` status. Updates to the group name or description will now work " "regardless of the volume group status." msgid "" "Volume manage/unmanage support for IBM FlashSystem FC and iSCSI drivers." msgstr "" "Volume manage/unmanage support for IBM FlashSystem FC and iSCSI drivers." msgid "Volume manage/unmanage support for Oracle ZFSSA iSCSI and NFS drivers." msgstr "Volume manage/unmanage support for Oracle ZFSSA iSCSI and NFS drivers." msgid "" "Volume type can be filtered within extra spec: /types?extra_specs={\"key\":" "\"value\"} since microversion \"3.52\"." msgstr "" "Volume type can be filtered within extra spec: /types?extra_specs={\"key\":" "\"value\"} since microversion \"3.52\"." msgid "Volume_actions:" msgstr "Volume_actions:" msgid "" "Volumes created on NetApp cDOT and 7mode storage systems now report " "'multiattach' capability. They have always supported such a capability, but " "not reported it to Cinder." msgstr "" "Volumes created on NetApp cDOT and 7mode storage systems now report " "'multiattach' capability. They have always supported such a capability, but " "not reported it to Cinder." msgid "" "VxFlex OS (ScaleIO) driver drops support for options, which were marked as " "deprecated in Pike release. Remove config options: " "``sio_protection_domain_id``, ``sio_protection_domain_name``, " "``sio_storage_pool_name``, ``sio_storage_pool_id``. Remove volume type " "options: ``sio:sp_name``, ``sio:sp_id``, ``sio:pd_name``, ``sio:pd_id``, " "``sio:provisioning_type``, ``sio:iops_limit``, ``sio:bandwidth_limit``." msgstr "" "VxFlex OS (ScaleIO) driver drops support for options, which were marked as " "deprecated in Pike release. Remove config options: " "``sio_protection_domain_id``, ``sio_protection_domain_name``, " "``sio_storage_pool_name``, ``sio_storage_pool_id``. Remove volume type " "options: ``sio:sp_name``, ``sio:sp_id``, ``sio:pd_name``, ``sio:pd_id``, " "``sio:provisioning_type``, ``sio:iops_limit``, ``sio:bandwidth_limit``." msgid "" "VxFlex OS driver now supports OpenStack volume replication v2.1 for VxFlex " "OS v3.5.0 storage backends." msgstr "" "VxFlex OS driver now supports OpenStack volume replication v2.1 for VxFlex " "OS v3.5.0 storage backends." msgid "" "VxFlex OS driver now supports VxFlex OS 3.0 features: storage pools with " "fine granularity layout, volume compression(SPEF)." msgstr "" "VxFlex OS driver now supports VxFlex OS 3.0 features: storage pools with " "fine granularity layout, volume compression(SPEF)." msgid "VxFlex OS driver now supports VxFlex OS 3.5.x." msgstr "VxFlex OS driver now supports VxFlex OS 3.5.x." msgid "" "VxFlex OS driver now supports storage-assisted revert volume to snapshot." msgstr "" "VxFlex OS driver now supports storage-assisted revert volume to snapshot." msgid "VxFlex OS driver now supports storage-assisted volume migration." msgstr "VxFlex OS driver now supports storage-assisted volume migration." msgid "" "VzStorage volume driver now supports choosing desired volume format by " "setting vendor property 'vz:volume_format' in volume type metadata. Allowed " "values are 'ploop', 'qcow2' and 'raw'." msgstr "" "VzStorage volume driver now supports choosing desired volume format by " "setting vendor property 'vz:volume_format' in volume type metadata. Allowed " "values are 'ploop', 'qcow2' and 'raw'." msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "" "We can now limit the number of concurrent backup/restore operations that a " "Cinder backup service can perform using the ``backup_max_operations`` " "configuration option." msgstr "" "We can now limit the number of concurrent backup/restore operations that a " "Cinder backup service can perform using the ``backup_max_operations`` " "configuration option." msgid "" "We currently do a sparse copy when writing an image on the volume. This " "could be a potential data leak scenario where the zero blocks of the new " "image are not written on the existing volume and the data from the old image " "still exists on the volume. We fix the scenario by not doing sparse copy " "when reimaging the volume." msgstr "" "We currently do a sparse copy when writing an image on the volume. This " "could be a potential data leak scenario where the zero blocks of the new " "image are not written on the existing volume and the data from the old image " "still exists on the volume. We fix the scenario by not doing sparse copy " "when reimaging the volume." msgid "" "We introduced a new config parameter, ``reserved_image_namespaces``, that " "allows operators to set the image properties to filter out from volume image " "metadata by namespace when uploading a volume to Glance. These properties, " "if not filtered out, cause failures when uploading images back to Glance. " "The error will happen on Glance side when the reserved namespaces are used. " "This option is also useful when an operator wants to use the Glance property " "protections feature to make some image properties read-only." msgstr "" "We introduced a new config parameter, ``reserved_image_namespaces``, that " "allows operators to set the image properties to filter out from volume image " "metadata by namespace when uploading a volume to Glance. These properties, " "if not filtered out, cause failures when uploading images back to Glance. " "The error will happen on the Glance side when the reserved namespaces are " "used. This option is also useful when an operator wants to use the Glance " "property protections feature to make some image properties read-only." msgid "" "We no longer leave orphaned chunks on the backup backend or leave a " "temporary volume/snapshot when aborting a backup." msgstr "" "We no longer leave orphaned chunks on the backup backend or leave a " "temporary volume/snapshot when aborting a backup." msgid "" "We recently became aware of the following anomalies, however, when using the " "current RBD driver with a Ceph storage backend." msgstr "" "We recently became aware of the following anomalies, however, when using the " "current RBD driver with a Ceph storage backend." msgid "" "We replaced the config option in the disco volume driver " "\"disco_choice_client\" with \"disco_client_protocol\". We add " "\"san_api_port\" as new config option in san driver for accessing the SAN " "API using this port." msgstr "" "We replaced the config option in the disco volume driver " "\"disco_choice_client\" with \"disco_client_protocol\". We add " "\"san_api_port\" as new config option in SAN driver for accessing the SAN " "API using this port." msgid "" "Welcome to the 2023.1 (Antelope) release of the OpenStack Block Storage " "service (cinder). With this release, we added several drivers and driver " "features as follows:" msgstr "" "Welcome to the 2023.1 (Antelope) release of the OpenStack Block Storage " "service (Cinder). With this release, we added several drivers and driver " "features as follows:" msgid "" "Welcome to the Ussuri release of the OpenStack Block Storage service " "(cinder). The cinder team would like to bring the following points to your " "attention. Details may be found below." msgstr "" "Welcome to the Ussuri release of the OpenStack Block Storage service " "(Cinder). The cinder team would like to bring the following points to your " "attention. Details may be found below." msgid "" "Welcome to the Victoria release of the OpenStack Block Storage service " "(cinder). With this release, the Block Storage API version 3 has reached " "microversion **3.62**. The cinder team would like to bring the following " "points to your attention. Details may be found below." msgstr "" "Welcome to the Victoria release of the OpenStack Block Storage service " "(cinder). With this release, the Block Storage API version 3 has reached " "microversion **3.62**. The cinder team would like to bring the following " "points to your attention. Details may be found below." msgid "" "Welcome to the Wallaby release of the OpenStack Block Storage service " "(cinder). With this release, the Block Storage API version 3 has reached " "microversion **3.64**." msgstr "" "Welcome to the Wallaby release of the OpenStack Block Storage service " "(cinder). With this release, the Block Storage API version 3 has reached " "microversion **3.64**." msgid "" "Welcome to the Xena release of the OpenStack Block Storage service " "(cinder). With this release, the Block Storage API version 3 has reached " "microversion **3.66**. The cinder team would like to bring the following " "points to your attention. Details may be found below." msgstr "" "Welcome to the Xena release of the OpenStack Block Storage service " "(cinder). With this release, the Block Storage API version 3 has reached " "microversion **3.66**. The Cinder team would like to bring the following " "points to your attention. Details may be found below." msgid "" "Welcome to the Yoga release of the OpenStack Block Storage service " "(cinder). With this release, the Block Storage API version 3 has reached " "microversion **3.68**. The cinder team would like to bring the following " "points to your attention. Details may be found throughout this document." msgstr "" "Welcome to the Yoga release of the OpenStack Block Storage service " "(cinder). With this release, the Block Storage API version 3 has reached " "microversion **3.68**. The Cinder team would like to bring the following " "points to your attention. Details may be found throughout this document." msgid "" "What QoS settings are available depends upon the storage firmware version of " "the ETERNUS AF/DX." msgstr "" "What QoS settings are available depends upon the storage firmware version of " "the ETERNUS AF/DX." msgid "" "When Barbican is the encryption key_manager backend, any encryption keys " "associated with the legacy ConfKeyManager will be automatically migrated to " "Barbican. All database references to the ConfKeyManager's all-zeros key ID " "will be updated with a Barbican key ID. The encryption keys do not change. " "Only the encryption key ID changes." msgstr "" "When Barbican is the encryption key_manager backend, any encryption keys " "associated with the legacy ConfKeyManager will be automatically migrated to " "Barbican. All database references to the ConfKeyManager's all-zeros key ID " "will be updated with a Barbican key ID. The encryption keys do not change. " "Only the encryption key ID changes." msgid "" "When Cinder creates a new cinder-volume service, it now also immediately " "updates the service_uuid for all volumes associated with that cinder-volume " "host. In some cases, this was preventing the database purge operation from " "completing successfully." msgstr "" "When Cinder creates a new cinder-volume service, it now also immediately " "updates the service_uuid for all volumes associated with that cinder-volume " "host. In some cases, this was preventing the database purge operation from " "completing successfully." msgid "" "When a policy value is deprecated, the oslo.policy engine will check the new " "value, and if that fails, it will evaluate the deprecated value. This " "behavior may be modified so *only* the new policy value is used by setting " "the configuration option ``enforce_new_defaults=True`` in the " "``[oslo_policy]`` section of the cinder configuration file." msgstr "" "When a policy value is deprecated, the oslo.policy engine will check the new " "value, and if that fails, it will evaluate the deprecated value. This " "behaviour may be modified so *only* the new policy value is used by setting " "the configuration option ``enforce_new_defaults=True`` in the " "``[oslo_policy]`` section of the cinder configuration file." msgid "" "When a volume type is specified explicitly in a volume-create call, Cinder " "will use the specified type. If the specified type cannot be assigned due " "to a conflict with other parameters in the volume-create call, however, the " "call will result in a 400 (Bad Request) response." msgstr "" "When a volume type is specified explicitly in a volume-create call, Cinder " "will use the specified type. If the specified type cannot be assigned due " "to a conflict with other parameters in the volume-create call, however, the " "call will result in a 400 (Bad Request) response." msgid "" "When an encrypted volume is cloned, a new encryption key is generated for " "the new volume. This is currently implemented only for iSCSI/FC backends." msgstr "" "When an encrypted volume is cloned, a new encryption key is generated for " "the new volume. This is currently implemented only for iSCSI/FC backends." msgid "" "When backing up a volume from a snapshot, the volume status would be set to " "\"backing-up\", preventing operations on the volume until the backup is " "complete. This status is now set on the snapshot instead, making the volume " "available for other operations." msgstr "" "When backing up a volume from a snapshot, the volume status would be set to " "\"backing-up\", preventing operations on the volume until the backup is " "complete. This status is now set on the snapshot instead, making the volume " "available for other operations." msgid "" "When creating an encrypted volume from an image in Glance that was created " "from a non-encrypted volume uploaded as an image, or an image that just " "happens to be sized very close to the gibibyte boundary given by the " "requested volume size, the space consumed by the encryption header may not " "leave sufficient space for the data contained in the image. In this case, " "the data is silently truncated to fit within the requested volume size." msgstr "" "When creating an encrypted volume from an image in Glance that was created " "from a non-encrypted volume uploaded as an image, or an image that just " "happens to be sized very close to the gibibyte boundary given by the " "requested volume size, the space consumed by the encryption header may not " "leave sufficient space for the data contained in the image. In this case, " "the data is silently truncated to fit within the requested volume size." msgid "" "When disabled (which is the default and current behavior), a replicated " "source volume will be protected with a snapshot of the same volume type." msgstr "" "When disabled (which is the default and current behaviour), a replicated " "source volume will be protected with a snapshot of the same volume type." msgid "" "When enabled, snapshots of replicated source volumes will be treated as " "regular, non-replicated devices." msgstr "" "When enabled, snapshots of replicated source volumes will be treated as " "regular, non-replicated devices." msgid "" "When encryption keys based on the ConfKeyManager's fixed_key are migrated to " "Barbican, ConfKeyManager keys stored in the Backup table are included in the " "migration process. Fixes `bug 1757235 `__." msgstr "" "When encryption keys based on the ConfKeyManager's fixed_key are migrated to " "Barbican, ConfKeyManager keys stored in the Backup table are included in the " "migration process. Fixes `bug 1757235 `__." msgid "" "When extending a volume created on RaidGroup, the process has been updated " "to use CLI for volume extension." msgstr "" "When extending a volume created on RaidGroup, the process has been updated " "to use CLI for volume extension." msgid "" "When extending a volume created on ThinProvisionPool, the process will still " "use SMI-S for volume extension." msgstr "" "When extending a volume created on ThinProvisionPool, the process will still " "use SMI-S for volume extension." msgid "" "When managing volume types an OpenStack provider is now given more control " "to grant access to for different storage type operations. The provider can " "now customize access to type create, delete, update, list, and show using " "new entries in the cinder policy file." msgstr "" "When managing volume types an OpenStack provider is now given more control " "to grant access to for different storage type operations. The provider can " "now customise access to type create, delete, update, list, and show using " "new entries in the Cinder policy file." msgid "" "When performing a *live* upgrade from Liberty it may happen that retype " "calls will reserve additional quota. As by default quota reservations are " "invalidated after 24 hours (config option ``reservation_expire=86400``), we " "recommend either decreasing that time or watching for unused quota " "reservations manually during the upgrade process." msgstr "" "When performing a *live* upgrade from Liberty it may happen that retype " "calls will reserve additional quota. As by default quota reservations are " "invalidated after 24 hours (config option ``reservation_expire=86400``), we " "recommend either decreasing that time or watching for unused quota " "reservations manually during the upgrade process." msgid "" "When restoring the backup of an encrypted volume, the destination volume is " "assigned a clone of the backup's encryption key ID. This ensures every " "restored backup has a unique encryption key ID, even when multiple volumes " "have been restored from the same backup." msgstr "" "When restoring the backup of an encrypted volume, the destination volume is " "assigned a clone of the backup's encryption key ID. This ensures every " "restored backup has a unique encryption key ID, even when multiple volumes " "have been restored from the same backup." msgid "" "When running Nova Compute and Cinder Volume or Backup services on the same " "host they must use a shared lock directory to avoid rare race conditions " "that can cause volume operation failures (primarily attach/detach of " "volumes). This is done by setting the \"lock_path\" to the same directory in " "the \"oslo_concurrency\" section of nova.conf and cinder.conf. This issue " "affects all previous releases utilizing os-brick and shared operations on " "hosts between Nova Compute and Cinder data services." msgstr "" "When running Nova Compute and Cinder Volume or Backup services on the same " "host they must use a shared lock directory to avoid rare race conditions " "that can cause volume operation failures (primarily attach/detach of " "volumes). This is done by setting the \"lock_path\" to the same directory in " "the \"oslo_concurrency\" section of nova.conf and cinder.conf. This issue " "affects all previous releases utilising os-brick and shared operations on " "hosts between Nova Compute and Cinder data services." msgid "" "When running PostgreSQL it is required to upgrade and restart all the cinder-" "api services along with DB migration 62." msgstr "" "When running PostgreSQL it is required to upgrade and restart all the cinder-" "api services along with DB migration 62." msgid "" "When the Ceph backup driver is used for the backup service, restoring a " "backup to a volume created on a non-RBD backend fails. The cinder team has " "developed a fix but decided to do more thorough testing before including it " "in a release. When ready, the solution is expected to be backported to a " "future release in the Yoga series. The issue is being tracked as `Bug " "#1895035 `_." msgstr "" "When the Ceph backup driver is used for the backup service, restoring a " "backup to a volume created on a non-RBD backend fails. The Cinder team has " "developed a fix but decided to do more thorough testing before including it " "in a release. When ready, the solution is expected to be backported to a " "future release in the Yoga series. The issue is being tracked as `Bug " "#1895035 `_." msgid "" "When the Ceph backup driver is used for the backup service, restoring a " "backup to a volume created on a non-RBD backend fails. The cinder team is " "working on a solution which is expected to be backported to a future release " "in the Xena series. The issue is being tracked as `Bug #1895035 `_." msgstr "" "When the Ceph backup driver is used for the backup service, restoring a " "backup to a volume created on a non-RBD backend fails. The Cinder team is " "working on a solution which is expected to be backported to a future release " "in the Xena series. The issue is being tracked as `Bug #1895035 `_." msgid "" "When the storage firmware version is greater than V11L30-0000, the IOPS/" "Throughput of Total/Read/Write for the volume can be set separately using:" msgstr "" "When the storage firmware version is greater than V11L30-0000, the IOPS/" "Throughput of Total/Read/Write for the volume can be set separately using:" msgid "" "When the storage firmware version is less than V11L30-0000, only the upper " "limit of bandwidth(BWS) can be set using:" msgstr "" "When the storage firmware version is less than V11L30-0000, only the upper " "limit of bandwidth(BWS) can be set using:" msgid "" "When transferring an encrypted volume, its snapshots must also be " "transferred. Attempts to transfer an encrypted volume without transferring " "its snapshots are disallowed." msgstr "" "When transferring an encrypted volume, its snapshots must also be " "transferred. Attempts to transfer an encrypted volume without transferring " "its snapshots are disallowed." msgid "" "When uploading qcow2 images to Glance, image data will be compressed. This " "will generally result in less data transferred to Glance at the expense of " "higher CPU usage. This behavior is controlled by the " "\"image_compress_on_upload\" boolean option, which defaults to True." msgstr "" "When uploading qcow2 images to Glance, image data will be compressed. This " "will generally result in less data transferred to Glance at the expense of " "higher CPU usage. This behaviour is controlled by the " "\"image_compress_on_upload\" boolean option, which defaults to True." msgid "" "When using a PowerMax OS array as a replication target, where the source is " "an All-Flash/Hybrid array running HyperMax OS, service level and workload " "settings are not correctly applied for devices on the replication target if " "a workload is specified. Instead of setting only the workload to None, both " "service level and workload are set to None. This fix corrects the " "application of service level and workload settings for replication sessions " "where the source is HyperMax OS and the target is PowerMax OS." msgstr "" "When using a PowerMax OS array as a replication target, where the source is " "an All-Flash/Hybrid array running HyperMax OS, service level and workload " "settings are not correctly applied for devices on the replication target if " "a workload is specified. Instead of setting only the workload to None, both " "service level and workload are set to None. This fix corrects the " "application of service level and workload settings for replication sessions " "where the source is HyperMax OS and the target is PowerMax OS." msgid "" "When using the RBD pool exclusively for Cinder we can now set " "`rbd_exclusive_cinder_pool` to `true` and Cinder will use DB information to " "calculate provisioned size instead of querying all volumes in the backend, " "which will reduce the load on the Ceph cluster and the volume service." msgstr "" "When using the RBD pool exclusively for Cinder we can now set " "`rbd_exclusive_cinder_pool` to `true` and Cinder will use DB information to " "calculate provisioned size instead of querying all volumes in the backend, " "which will reduce the load on the Ceph cluster and the volume service." msgid "" "When using this option, users can specify the name format of host groups or " "iSCSI targets. Rules of the format:" msgstr "" "When using this option, users can specify the name format of host groups or " "iSCSI targets. Rules of the format:" msgid "" "While configuring NetApp cDOT back ends, new configuration options " "('replication_device' and 'netapp_replication_aggregate_map') must be added " "in order to use the host-level failover feature." msgstr "" "While configuring NetApp cDOT back ends, new configuration options " "('replication_device' and 'netapp_replication_aggregate_map') must be added " "in order to use the host-level failover feature." msgid "" "While in your Train deployment, purge the cinder database. This will remove " "soft-deleted volumes and snapshots and allow you to upgrade to Ussuri in the " "regular way." msgstr "" "While in your Train deployment, purge the cinder database. This will remove " "soft-deleted volumes and snapshots and allow you to upgrade to Ussuri in the " "regular way." msgid "" "While the driver has been tested against the first Release Candidate for the " "cinder Victoria release, be aware that it does not have ongoing third-party " "CI. If you choose to use the driver, the configuration option " "``enable_unsupported_driver`` must be set to ``True`` in the ``fc-zone-" "manager`` section in cinder.conf to allow its use in this release." msgstr "" "While the driver has been tested against the first Release Candidate for the " "cinder Victoria release, be aware that it does not have ongoing third-party " "CI. If you choose to use the driver, the configuration option " "``enable_unsupported_driver`` must be set to ``True`` in the ``fc-zone-" "manager`` section in cinder.conf to allow its use in this release." msgid "" "While you may expect that Cinder will be able to consume any image in " "``compressed`` container format *that Cinder has created*, you should not " "expect Cinder to be able to successfully use an image in ``compressed`` " "format that it has not created itself." msgstr "" "While you may expect that Cinder will be able to consume any image in " "``compressed`` container format *that Cinder has created*, you should not " "expect Cinder to be able to successfully use an image in ``compressed`` " "format that it has not created itself." msgid "" "With removal of the CoprHD Volume Driver any volumes being used by Cinder " "within a CoprHD backend should be migrated to a supported storage backend " "before upgrade." msgstr "" "With removal of the CoprHD Volume Driver any volumes being used by Cinder " "within a CoprHD backend should be migrated to a supported storage backend " "before upgrade." msgid "" "With the Dell SC Cinder Driver if a volume is retyped to a new storage " "profile all volumes created via snapshots from this volume will also change " "to the new storage profile." msgstr "" "With the Dell SC Cinder Driver if a volume is retyped to a new storage " "profile all volumes created via snapshots from this volume will also change " "to the new storage profile." msgid "" "With the Dell SC Cinder Driver retype failed to return a tuple if it had to " "return an update to the volume state." msgstr "" "With the Dell SC Cinder Driver retype failed to return a tuple if it had to " "return an update to the volume state." msgid "" "With the Dell SC Cinder Driver retyping from one replication type to another " "type (ex. regular replication to live volume replication) is not supported." msgstr "" "With the Dell SC Cinder Driver retyping from one replication type to another " "type (ex. regular replication to live volume replication) is not supported." msgid "" "With the Dell SC Cinder Driver retyping to or from a replicated type should " "now work." msgstr "" "With the Dell SC Cinder Driver retyping to or from a replicated type should " "now work." msgid "" "With this release, the Block Storage API version 3 has reached microversion " "**3.60**." msgstr "" "With this release, the Block Storage API version 3 has reached microversion " "**3.60**." msgid "X-IO" msgstr "X-IO" msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yadro Tatlin Unified: Added initial version of the FC driver." msgstr "Yadro Tatlin Unified: Added initial version of the FC driver." msgid "Yadro Tatlin Unified: Added initial version of the iSCSI driver." msgstr "Yadro Tatlin Unified: Added initial version of the iSCSI driver." msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "You can now use:" msgstr "You can now use:" msgid "You can use each variable in the specified value no more than once." msgstr "You can use each variable in the specified value no more than once." msgid "You can use the following variables:" msgstr "You can use the following variables:" msgid "You upgraded to Train from Stein" msgstr "You upgraded to Train from Stein" msgid "" "Your original upgrade from Stein was to cinder version 15.3.0 or earlier" msgstr "" "Your original upgrade from Stein was to Cinder version 15.3.0 or earlier" msgid "" "Your original upgrade from Stein was to cinder version 15.3.0 or earlier." msgstr "" "Your original upgrade from Stein was to cinder version 15.3.0 or earlier." msgid "ZTE" msgstr "ZTE" msgid "" "Zadara VPSA Driver: Added new driver authentication method to use VPSA API " "access key, and deprecate exisiting authentication method that used username " "and password combination. The deprecated config inputs will be removed in " "the next official release after Train." msgstr "" "Zadara VPSA Driver: Added new driver authentication method to use VPSA API " "access key, and deprecate existing authentication method that used username " "and password combination. The deprecated config inputs will be removed in " "the next official release after Train." msgid "" "Zadara VPSA Driver: Added support for cinder features volume manage, " "snapshot manage, list manageable volumes, manageable snapshots, multiattach " "and ipv6 support." msgstr "" "Zadara VPSA Driver: Added support for cinder features volume manage, " "snapshot manage, list manageable volumes, manageable snapshots, multiattach " "and IPv6 support." msgid "Zed Series Release Notes" msgstr "Zed Series Release Notes" msgid "" "[Pure Storage] 'bug #2028380 '_: Fixed issue with cinder replication failover failing due to " "incorrect REST call." msgstr "" "[Pure Storage] 'bug #2028380 '_: Fixed issue with Cinder replication failover failing due to " "incorrect REST call." msgid "" "[Pure Storage] Changed Python SDK driver requirement from ``purestorage`` to " "``py-pure-client`` to support change to Purity//FA REST 2.x API calls." msgstr "" "[Pure Storage] Changed Python SDK driver requirement from ``purestorage`` to " "``py-pure-client`` to support change to Purity//FA REST 2.x API calls." msgid "" "[Pure Storage] Corrected support status to True for generic group " "consistency snapshot support and added support for replication-enabled " "consistency groups." msgstr "" "[Pure Storage] Corrected support status to True for generic group " "consistency snapshot support and added support for replication-enabled " "consistency groups." msgid "" "[Pure Storage] `Bug #2035404 `_: Fixed issue with missing replication pod causing driver to " "fail on restart." msgstr "" "[Pure Storage] `Bug #2035404 `_: Fixed issue with missing replication pod causing driver to " "fail on restart." msgid "" "[Pure Storage] ``user_agent`` string changed from reporting kernel version " "to operating system distro and version." msgstr "" "[Pure Storage] ``user_agent`` string changed from reporting kernel version " "to operating system distro and version." msgid "" "[Pure Storage] `bug #2028380 `_: Fixed issue with cinder replication failover failing due to " "incorrect REST call." msgstr "" "[Pure Storage] `bug #2028380 `_: Fixed issue with cinder replication failover failing due to " "incorrect REST call." msgid "" "[`Community Goal `_] Support has been added for developers to write pre-upgrade " "checks. Operators can run these checks using ``cinder-status upgrade " "check``. This allows operators to be more confident when upgrading their " "deployments by having a tool that automates programmable checks against the " "deployment configuration or dataset." msgstr "" "[`Community Goal `_] Support has been added for developers to write pre-upgrade " "checks. Operators can run these checks using ``cinder-status upgrade " "check``. This allows operators to be more confident when upgrading their " "deployments by having a tool that automates programmable checks against the " "deployment configuration or dataset." msgid "" "[`bug 1772421 `_] " "INFINIDAT fixed a bug in volume extension feature where volumes were not " "extended to target size but added the given target size." msgstr "" "[`bug 1772421 `_] " "INFINIDAT fixed a bug in volume extension feature where volumes were not " "extended to target size but added the given target size." msgid "" "`Bug #1432387 `_: Add a " "command to cinder-manage to clean up file locks existing in hosts where " "there is a Cinder service running (API, Scheduler, Volume, Backup). Command " "works with the Cinder services running, useful to be called as a cron job, " "as well as stopped, to be called on host startup. Command invocation " "``cinder-manage util clean_locks`` with optional parameter ``--services-" "offline``." msgstr "" "`Bug #1432387 `_: Add a " "command to cinder-manage to clean up file locks existing in hosts where " "there is a Cinder service running (API, Scheduler, Volume, Backup). Command " "works with the Cinder services running, useful to be called as a cron job, " "as well as stopped, to be called on host startup. Command invocation " "``cinder-manage util clean_locks`` with optional parameter ``--services-" "offline``." msgid "" "`Bug #1432387 `_: Try to " "automatically clean up file locks after a resource (volume, snapshot) is " "deleted. This will alleviate the issue of the locks directory always " "increasing the number of files." msgstr "" "`Bug #1432387 `_: Try to " "automatically clean up file locks after a resource (volume, snapshot) is " "deleted. This will alleviate the issue of the locks directory always " "increasing the number of files." msgid "" "`Bug #1484343 `_: Fix " "creation of duplicated quota usage entries in DB." msgstr "" "`Bug #1484343 `_: Fix " "creation of duplicated quota usage entries in DB." msgid "" "`Bug #1697422 `_: Fix HPE " "3PAR driver issue where volumes that were live migrated to it would end up " "being inaccessible. We would no longer be able to use the volume for any " "operation, such as attach, detach, delete, snapshot, etc." msgstr "" "`Bug #1697422 `_: Fix HPE " "3PAR driver issue where volumes that were live migrated to it would end up " "being inaccessible. We would no longer be able to use the volume for any " "operation, such as attach, detach, delete, snapshot, etc." msgid "" "`Bug #1697906 `_: Fix " "``until_refresh`` configuration changes not taking effect in a timely " "fashion or at all." msgstr "" "`Bug #1697906 `_: Fix " "``until_refresh`` configuration changes not taking effect in a timely " "fashion or at all." msgid "" "`Bug #1740950 `_: the " "``host_name`` field in any object in the ``attachments`` array of the volume " "detail response is populated only when the call is made in an administrative " "context. Otherwise, its value is the JSON ``null`` value." msgstr "" "`Bug #1740950 `_: the " "``host_name`` field in any object in the ``attachments`` array of the volume " "detail response is populated only when the call is made in an administrative " "context. Otherwise, its value is the JSON ``null`` value." msgid "" "`Bug #1823200 `_: This " "release contains an updated Dell EMC VxFlex OS driver. It must be used with " "``os-brick`` >= 2.10.3 but < 2.11.0. and requires that a new configuration " "file be deployed on compute nodes, cinder nodes, and anywhere you would " "perform a volume attachment in your deployment. See the `Dell EMC VxFlex OS " "(ScaleIO) Storage driver `_ " "documentation for details about the configuration file, and see `OSSN-0086 " "`_ for more information " "about the security vulnerability." msgstr "" "`Bug #1823200 `_: This " "release contains an updated Dell EMC VxFlex OS driver. It must be used with " "``os-brick`` >= 2.10.3 but < 2.11.0. and requires that a new configuration " "file be deployed on compute nodes, cinder nodes, and anywhere you would " "perform a volume attachment in your deployment. See the `Dell EMC VxFlex OS " "(ScaleIO) Storage driver `_ " "documentation for details about the configuration file, and see `OSSN-0086 " "`_ for more information " "about the security vulnerability." msgid "" "`Bug #1823200 `_: This " "release contains an updated Dell EMC VxFlex OS driver. It must be used with " "``os-brick`` >= 2.8.5 but < 2.9.0. and requires that a new configuration " "file be deployed on compute nodes, cinder nodes, and anywhere you would " "perform a volume attachment in your deployment. See the `Dell EMC VxFlex OS " "(ScaleIO) Storage driver `_ " "documentation for details about the configuration file, and see `OSSN-0086 " "`_ for more information " "about the security vulnerability." msgstr "" "`Bug #1823200 `_: This " "release contains an updated Dell EMC VxFlex OS driver. It must be used with " "``os-brick`` >= 2.8.5 but < 2.9.0. and requires that a new configuration " "file be deployed on compute nodes, cinder nodes, and anywhere you would " "perform a volume attachment in your deployment. See the `Dell EMC VxFlex OS " "(ScaleIO) Storage driver `_ " "documentation for details about the configuration file, and see `OSSN-0086 " "`_ for more information " "about the security vulnerability." msgid "" "`Bug #1823200 `_: This " "release contains an updated Dell EMC VxFlex OS driver. It must be used with " "``os-brick`` >= 3.0.2 but < 3.1.0. and requires that a new configuration " "file be deployed on compute nodes, cinder nodes, and anywhere you would " "perform a volume attachment in your deployment. See the `Dell EMC VxFlex OS " "(ScaleIO) Storage driver `_ " "documentation for details about the configuration file, and see `OSSN-0086 " "`_ for more information " "about the security vulnerability." msgstr "" "`Bug #1823200 `_: This " "release contains an updated Dell EMC VxFlex OS driver. It must be used with " "``os-brick`` >= 3.0.2 but < 3.1.0. and requires that a new configuration " "file be deployed on compute nodes, cinder nodes, and anywhere you would " "perform a volume attachment in your deployment. See the `Dell EMC VxFlex OS " "(ScaleIO) Storage driver `_ " "documentation for details about the configuration file, and see `OSSN-0086 " "`_ for more information " "about the security vulnerability." msgid "" "`Bug #1823200 `_: This " "release contains an updated Dell EMC VxFlex OS driver. It must be used with " "``os-brick`` version 3.1.0 or greater and requires that a new configuration " "file be deployed on compute nodes, cinder nodes, and anywhere you would " "perform a volume attachment in your deployment. See the `Dell EMC VxFlex OS " "(ScaleIO) Storage driver `_ " "documentation for details about the configuration file, and see `OSSN-0086 " "`_ for more information " "about the security vulnerability." msgstr "" "`Bug #1823200 `_: This " "release contains an updated Dell EMC VxFlex OS driver. It must be used with " "``os-brick`` version 3.1.0 or greater and requires that a new configuration " "file be deployed on compute nodes, cinder nodes, and anywhere you would " "perform a volume attachment in your deployment. See the `Dell EMC VxFlex OS " "(ScaleIO) Storage driver `_ " "documentation for details about the configuration file, and see `OSSN-0086 " "`_ for more information " "about the security vulnerability." msgid "" "`Bug #1828386 `_: Fix the " "bug that a volume retyped from another volume type to a replicated or " "multiattach type cannot have replication or multiattach enabled in rbd " "driver." msgstr "" "`Bug #1828386 `_: Fix the " "bug that a volume retyped from another volume type to a replicated or " "multiattach type cannot have replication or multiattach enabled in RBD " "driver." msgid "" "`Bug #1837524 `_: IBM " "Spectrum Virtualize Family: Fixed create_consistency_group if the volume " "has mirror copy and mdisk_grp_name=many." msgstr "" "`Bug #1837524 `_: IBM " "Spectrum Virtualize Family: Fixed create_consistency_group if the volume " "has mirror copy and mdisk_grp_name=many." msgid "" "`Bug #1839384 `_: NetApp " "ONTAP: Detaching any instance from multiattached volume terminates " "connection. Now the connection is terminated only if there're no other " "instances using the same initiator." msgstr "" "`Bug #1839384 `_: NetApp " "ONTAP: Detaching any instance from multiattached volume terminates " "connection. Now the connection is terminated only if there're no other " "instances using the same initiator." msgid "" "`Bug #1858119 `_: Fix the " "HPE 3PAR driver's attempt to rename the backend volume after it was " "migrated. If the original volume resides on the same 3PAR backend then the " "pre and post migration volume names are swapped. Otherwise, the newly " "migrated volume is renamed to match the original name." msgstr "" "`Bug #1858119 `_: Fix the " "HPE 3PAR driver's attempt to rename the backend volume after it was " "migrated. If the original volume resides on the same 3PAR backend then the " "pre and post migration volume names are swapped. Otherwise, the newly " "migrated volume is renamed to match the original name." msgid "" "`Bug #1859652 `_: Fix to " "allow retyping an attached volume to SolidFire." msgstr "" "`Bug #1859652 `_: Fix to " "allow retyping an attached volume to SolidFire." msgid "" "`Bug #1863021 `_: Eventlet " "monkey patch results in assert len(_active) == 1 AssertionError" msgstr "" "`Bug #1863021 `_: Eventlet " "monkey patch results in assert len(_active) == 1 AssertionError" msgid "" "`Bug #1863806 `_: ``os-" "reset_status`` notifications for volumes, snapshots, and backups were being " "sent to nonstandard publisher_ids relative to other cinder notifications for " "volumes, snapshots, and backups. Now they are also sent to the following " "*standard* publisher_ids, where most people would expect to find them:" msgstr "" "`Bug #1863806 `_: ``os-" "reset_status`` notifications for volumes, snapshots, and backups were being " "sent to nonstandard publisher_ids relative to other cinder notifications for " "volumes, snapshots, and backups. Now they are also sent to the following " "*standard* publisher_ids, where most people would expect to find them:" msgid "" "`Bug #1866860 `_: Fix " "`AttributeError` on the Brocade ZM driver when using setting REST_HTTP or " "REST_HTTPS as the fc_southbound_protocol option and an exception is raised " "by the client." msgstr "" "`Bug #1866860 `_: Fix " "`AttributeError` on the Brocade ZM driver when using setting REST_HTTP or " "REST_HTTPS as the fc_southbound_protocol option and an exception is raised " "by the client." msgid "" "`Bug #1867906 `_: group-" "create-from-src doesn't work in active/active mode" msgstr "" "`Bug #1867906 `_: group-" "create-from-src doesn't work in active/active mode" msgid "" "`Bug #1869746 `_: Cinder no " "longer allows an incremental backup to be created while having the parent " "backup in another project." msgstr "" "`Bug #1869746 `_: Cinder no " "longer allows an incremental backup to be created while having the parent " "backup in another project." msgid "" "`Bug #1870367 `_ : Partially " "fixed NFS and Quobyte drivers by no longer allowing extending a volume while " "it is attached, to prevent failures due to Qemu internal locking mechanisms." msgstr "" "`Bug #1870367 `_ : Partially " "fixed NFS and Quobyte drivers by no longer allowing extending a volume while " "it is attached, to prevent failures due to QEMU internal locking mechanisms." msgid "" "`Bug #1871744 `_: Glance " "retry failed: TypeError: get() got an unexpected keyword argument " "'schema_name'" msgstr "" "`Bug #1871744 `_: Glance " "retry failed: TypeError: get() got an unexpected keyword argument " "'schema_name'" msgid "" "`Bug #1873463 `_: Virtuozzo " "driver - copy_volume_to_image() needs to support glance multistore" msgstr "" "`Bug #1873463 `_: Virtuozzo " "driver - copy_volume_to_image() needs to support glance multistore" msgid "" "`Bug #1873738 `_: RBD " "Driver: Added cleanup for residue destination file if the copy image to " "encrypted volume operation fails." msgstr "" "`Bug #1873738 `_: RBD " "Driver: Added cleanup for residue destination file if the copy image to " "encrypted volume operation fails." msgid "" "`Bug #1874134 `_: Fix for " "NetApp ONTAP driver allowing an iSCSI or FCP volume to be extended to a size " "up to 16TB regardless of its original size, even if it's attached to an " "instance." msgstr "" "`Bug #1874134 `_: Fix for " "NetApp ONTAP driver allowing an iSCSI or FCP volume to be extended to a size " "up to 16TB regardless of its original size, even if it's attached to an " "instance." msgid "" "`Bug #1874187 `_: PowerMax " "driver - Exception was not handled and breaks the flow while add/remove " "volumes to generic volume group" msgstr "" "`Bug #1874187 `_: PowerMax " "driver - Exception was not handled and breaks the flow while add/remove " "volumes to generic volume group" msgid "" "`Bug #1874541 `_: Fix a " "ZeroDivisionError when the SolidFire driver tried to update cluster " "capabilities." msgstr "" "`Bug #1874541 `_: Fix a " "ZeroDivisionError when the SolidFire driver tried to update cluster " "capabilities." msgid "" "`Bug #1875432 `_: PowerMax " "Driver - Live migration fails when an instance has more than one replication " "device" msgstr "" "`Bug #1875432 `_: PowerMax " "Driver - Live migration fails when an instance has more than one replication " "device" msgid "" "`Bug #1875433 `_: PowerMax " "Driver - Retype from rep to rep leaving storage group suspended" msgstr "" "`Bug #1875433 `_: PowerMax " "Driver - Retype from rep to rep leaving storage group suspended" msgid "" "`Bug #1875478 `_: PowerMax " "Driver - Concurrent live migrations can sometimes fail when one thread " "deletes a storage group that another thread may need." msgstr "" "`Bug #1875478 `_: PowerMax " "Driver - Concurrent live migrations can sometimes fail when one thread " "deletes a storage group that another thread may need." msgid "" "`Bug #1875570 `_: Fixed " "issue with NFS backend where the image-volume cache was never used to create " "a volume, even when the cache was enabled." msgstr "" "`Bug #1875570 `_: Fixed " "issue with NFS backend where the image-volume cache was never used to create " "a volume, even when the cache was enabled." msgid "" "`Bug #1875640 `_: PowerMax " "Driver - Failover lock not released during U4P failover during exception" msgstr "" "`Bug #1875640 `_: PowerMax " "Driver - Failover lock not released during U4P failover during exception" msgid "" "`Bug #1875953 `_: Virtuozzo " "driver - missing context in create_cloned_volume call" msgstr "" "`Bug #1875953 `_: Virtuozzo " "driver - missing context in create_cloned_volume call" msgid "" "`Bug #1875959 `_: Fixed " "issue where NetApp ONTAP NFS driver would fail to flexclone a Glance image." msgstr "" "`Bug #1875959 `_: Fixed " "issue where NetApp ONTAP NFS driver would fail to flexclone a Glance image." msgid "" "`Bug #1875959 `_: NetApp " "ONTAP NFS driver - Unable to perform flexclone from glance share" msgstr "" "`Bug #1875959 `_: NetApp " "ONTAP NFS driver - Unable to perform flexclone from glance share" msgid "" "`Bug #1877164 `_: Fix " "retyping volume with snapshots leaves the snapshots with the old type, " "making the quotas wrong inmediately for snapshots, and breaking them even " "more after those snapshots are deleted." msgstr "" "`Bug #1877164 `_: Fix " "retyping volume with snapshots leaves the snapshots with the old type, " "making the quotas wrong immediately for snapshots, and breaking them even " "more after those snapshots are deleted." msgid "" "`Bug #1877445 `_: Pure " "Storage Driver - doesn't handle synchronous replication CIDR filters properly" msgstr "" "`Bug #1877445 `_: Pure " "Storage Driver - doesn't handle synchronous replication CIDR filters properly" msgid "" "`Bug #1877976 `_: PowerMax " "Driver - RDFG suspended on vol create exception" msgstr "" "`Bug #1877976 `_: PowerMax " "Driver - RDFG suspended on vol create exception" msgid "" "`Bug #1879578 `_: A " "regression in the Train release caused Cinder to assign the default volume " "type too aggressively when a volume type was not specified in a volume-" "create request. As a result, some alternative methods of specifying the " "volume type were ignored and the default type (either configured by the " "operator or the system default) would be assigned." msgstr "" "`Bug #1879578 `_: A " "regression in the Train release caused Cinder to assign the default volume " "type too aggressively when a volume type was not specified in a volume-" "create request. As a result, some alternative methods of specifying the " "volume type were ignored and the default type (either configured by the " "operator or the system default) would be assigned." msgid "" "`Bug #1880971 `_: Fix " "leaving mapped volumes on offline volume migration and revert to snapshot " "operations failure." msgstr "" "`Bug #1880971 `_: Fix " "leaving mapped volumes on offline volume migration and revert to snapshot " "operations failure." msgid "" "`Bug #1883490 `_: Fixed " "incorrect response of listing volumes with filters." msgstr "" "`Bug #1883490 `_: Fixed " "incorrect response of listing volumes with filters." msgid "" "`Bug #1884268 `_: Fixed " "issue where non-admin users could not show a volume transfer by name." msgstr "" "`Bug #1884268 `_: Fixed " "issue where non-admin users could not show a volume transfer by name." msgid "" "`Bug #1886042 `_: Fix " "``unique_fqdn_network`` configuration option for the Kaminario driver, as it " "was being ignored when defined in the driver section, which used to work." msgstr "" "`Bug #1886042 `_: Fix " "``unique_fqdn_network`` configuration option for the Kaminario driver, as it " "was being ignored when defined in the driver section, which used to work." msgid "" "`Bug #1886543 `_: On retypes " "requiring a migration, try to use the driver assisted mechanism when moving " "from one backend to another when we know it's safe from the volume type " "perspective." msgstr "" "`Bug #1886543 `_: On retypes " "requiring a migration, try to use the driver assisted mechanism when moving " "from one backend to another when we know it's safe from the volume type " "perspective." msgid "" "`Bug #1886632 `_: The system " "defined ``__DEFAULT__`` volume type is now treated as a regular volume-type " "and may be updated or deleted. Since the configured ``default_volume_type`` " "cannot be deleted, however, the ``__DEFAULT__`` volume type may not be " "deleted if it is the value of that configuration option." msgstr "" "`Bug #1886632 `_: The system " "defined ``__DEFAULT__`` volume type is now treated as a regular volume-type " "and may be updated or deleted. Since the configured ``default_volume_type`` " "cannot be deleted, however, the ``__DEFAULT__`` volume type may not be " "deleted if it is the value of that configuration option." msgid "" "`Bug #1886662 `_: PowerMax " "Driver - Volumes not cleaned up after exception during migrate, retype, srdf " "protect creates/deletes blocks subsequent operations" msgstr "" "`Bug #1886662 `_: PowerMax " "Driver - Volumes not cleaned up after exception during migrate, retype, srdf " "protect creates/deletes blocks subsequent operations" msgid "" "`Bug #1886689 `_: Rekey " "fails when provider is legacy provider class" msgstr "" "`Bug #1886689 `_: Rekey " "fails when provider is legacy provider class" msgid "" "`Bug #1887859 `_: Fix for a " "race in Cinder Backup Manager with double initialization of backup driver." msgstr "" "`Bug #1887859 `_: Fix for a " "race in Cinder Backup Manager with double initialisation of backup driver." msgid "" "`Bug #1887885 `_: In NEC " "driver, fix a snapshot detach error." msgstr "" "`Bug #1887885 `_: In NEC " "driver, fix a snapshot detach error." msgid "" "`Bug #1887908 `_: In NEC " "driver, fix live-migration failure with FC." msgstr "" "`Bug #1887908 `_: In NEC " "driver, fix live-migration failure with FC." msgid "" "`Bug #1887962 `_: PowerMax " "driver fix to rectify incorrectly deleted non-temporary snapshots when " "calling do_sync_check used in multiple operations due to missing check for " "temporary snapshot name." msgstr "" "`Bug #1887962 `_: PowerMax " "driver fix to rectify incorrectly deleted non-temporary snapshots when " "calling do_sync_check used in multiple operations due to missing check for " "temporary snapshot name." msgid "" "`Bug #1888548 `_: Add Python " "3 support to the Brocade Zone Manager driver." msgstr "" "`Bug #1888548 `_: Add Python " "3 support to the Brocade Zone Manager driver." msgid "" "`Bug #1888550 `_: Fix " "`UnboundLocalError` on the Brocade lookup driver on southbound client " "creation failure during the device mapping retrieval." msgstr "" "`Bug #1888550 `_: Fix " "`UnboundLocalError` on the Brocade lookup driver on southbound client " "creation failure during the device mapping retrieval." msgid "" "`Bug #1888831 `_: PowerMax " "Driver - Volume updates for volumes in groups not performed during failover" msgstr "" "`Bug #1888831 `_: PowerMax " "Driver - Volume updates for volumes in groups not performed during failover" msgid "" "`Bug #1888951 `_: Fixed an " "issue with creating a backup from snapshot with NFS volume driver." msgstr "" "`Bug #1888951 `_: Fixed an " "issue with creating a backup from snapshot with NFS volume driver." msgid "" "`Bug #1889758 `_: Fix revert " "to snapshot not working for non admin users when using the snapshot's name." msgstr "" "`Bug #1889758 `_: Fix revert " "to snapshot not working for non admin users when using the snapshot's name." msgid "" "`Bug #1890241 `_: During " "delete_group_snapshot on IBM storwize, in case of multiple snapshots in the " "group, delete flow exits if any one snapshot deletion fails, but it should " "update error state and continue with deleting other snapshots." msgstr "" "`Bug #1890241 `_: During " "delete_group_snapshot on IBM Storwize, in case of multiple snapshots in the " "group, delete flow exits if any one snapshot deletion fails, but it should " "update error state and continue with deleting other snapshots." msgid "" "`Bug #1890586 `_: IBM " "Storwize: Fixed issues in check_flashcopy_rate that impacts the performance " "during Group Snapshot/Clone operations for bulk volumes." msgstr "" "`Bug #1890586 `_: IBM " "Storwize: Fixed issues in check_flashcopy_rate that impacts the performance " "during Group Snapshot/Clone operations for bulk volumes." msgid "" "`Bug #1890588 `_: IBM " "Storwize: Fixed issues in select_io_group that impacts the performance " "during Create_volume, Group Snapshot/Clone operations for bulk non-hyperswap " "volumes." msgstr "" "`Bug #1890588 `_: IBM " "Storwize: Fixed issues in select_io_group that impacts the performance " "during Create_volume, Group Snapshot/Clone operations for bulk non-hyperswap " "volumes." msgid "" "`Bug #1890589 `_: IBM " "Spectrum Virtualize Family: Fixed issues in create_flashcopy_to_consistgrp, " "made use of iogrp,qos from opts for create_vdisk, mkfcmap calls if the data " "exists in opts." msgstr "" "`Bug #1890589 `_: IBM " "Spectrum Virtualize Family: Fixed issues in create_flashcopy_to_consistgrp, " "made use of iogrp,qos from opts for create_vdisk, mkfcmap calls if the data " "exists in opts." msgid "" "`Bug #1890591 `_: IBM " "Spectrum Virtualize Family: Fixed issue in do_setup of " "StorwizeSVCCommonDriver to save pool information in stats during " "initialisation." msgstr "" "`Bug #1890591 `_: IBM " "Spectrum Virtualise Family: Fixed issue in do_setup of " "StorwizeSVCCommonDriver to save pool information in stats during " "initialisation." msgid "" "`Bug #1892057 `_: PowerMax " "Driver - Missing force flag for rep group volume adds" msgstr "" "`Bug #1892057 `_: PowerMax " "Driver - Missing force flag for rep group volume adds" msgid "" "`Bug #1892718 `_: PowerMax " "Driver - SRDF suspend can fail during _create_replica" msgstr "" "`Bug #1892718 `_: PowerMax " "Driver - SRDF suspend can fail during _create_replica" msgid "" "`Bug #1893107 `_: The Ussuri " "release changes the cinder database schema to make the ``volume_type_id`` " "column in the ``volumes`` and ``snapshots`` tables non-nullable because all " "volumes have been required to have a volume type since the Train release. " "The online database migration in the cinder Train series (release 15.3.0 or " "earlier), however, did not process soft-deleted rows, leaving the " "possibility that there could be a deleted volume or snapshot with a null " "``volume_type_id``, which in turn will make the database upgrade fail when " "the non-nullability constraint cannot be applied when a Train installation " "is upgraded to Ussuri." msgstr "" "`Bug #1893107 `_: The Ussuri " "release changes the Cinder database schema to make the ``volume_type_id`` " "column in the ``volumes`` and ``snapshots`` tables non-nullable because all " "volumes have been required to have a volume type since the Train release. " "The online database migration in the Cinder Train series (release 15.3.0 or " "earlier), however, did not process soft-deleted rows, leaving the " "possibility that there could be a deleted volume or snapshot with a null " "``volume_type_id``, which in turn will make the database upgrade fail when " "the non-nullability constraint cannot be applied when a Train installation " "is upgraded to Ussuri." msgid "" "`Bug #1893107 `_: The Ussuri " "release changes the cinder database schema to make the ``volume_type_id`` " "column in the ``volumes`` and ``snapshots`` tables non-nullable because all " "volumes have been required to have a volume type since the Train release. " "The online migration in cinder release 15.3.0 or earlier, however, did not " "process soft-deleted rows, leaving the possibility that there could be a " "deleted volume or snapshot with a null ``volume_type_id``, which in turn " "would make the database upgrade fail in Ussuri when the non-nullability " "constraint could not be applied." msgstr "" "`Bug #1893107 `_: The Ussuri " "release changes the Cinder database schema to make the ``volume_type_id`` " "column in the ``volumes`` and ``snapshots`` tables non-nullable because all " "volumes have been required to have a volume type since the Train release. " "The online migration in cinder release 15.3.0 or earlier, however, did not " "process soft-deleted rows, leaving the possibility that there could be a " "deleted volume or snapshot with a null ``volume_type_id``, which in turn " "would make the database upgrade fail in Ussuri when the non-nullability " "constraint could not be applied." msgid "" "`Bug #1894086 `_: PowerMax " "Cinder driver addresses an issue whereby Unisphere REST iterators expire " "before all data can be read from them. The iterator expiration is now set to " "180mins and deleted once all data has been read so no artifacts are left " "behind." msgstr "" "`Bug #1894086 `_: PowerMax " "Cinder driver addresses an issue whereby Unisphere REST iterators expire " "before all data can be read from them. The iterator expiration is now set to " "180mins and deleted once all data has been read so no artifacts are left " "behind." msgid "" "`Bug #1894381 `_: Fix the " "bug that cinder-manage cluster remove does not work and an error " "NoSuchOptError occurs." msgstr "" "`Bug #1894381 `_: Fix the " "bug that cinder-manage cluster remove does not work and an error " "NoSuchOptError occurs." msgid "" "`Bug #1895510 `_: IBM DS8K: " "Fixed compatability issue when using the IBM DS8K driver with storage " "version R9 and later." msgstr "" "`Bug #1895510 `_: IBM DS8K: " "Fixed compatibility issue when using the IBM DS8K driver with storage " "version R9 and later." msgid "" "`Bug #1896087 `_: Volume " "status will be rolled back to the previous state if backup creation fails " "when backup service is not available" msgstr "" "`Bug #1896087 `_: Volume " "status will be rolled back to the previous state if backup creation fails " "when backup service is not available" msgid "" "`Bug #1898075 `_: When " "Glance added support for multiple cinder stores, Images API version 2.11 " "modified the format of the image location URI, which Cinder reads in order " "to try to use an optimized data path when creating a volume from an image. " "Unfortunately, Cinder did not understand the new format and when Glance " "multiple cinder stores were used, Cinder could not use the optimized data " "path, and instead downloaded image data from the Image service. Cinder now " "supports Images API version 2.11." msgstr "" "`Bug #1898075 `_: When " "Glance added support for multiple Cinder stores, Images API version 2.11 " "modified the format of the image location URI, which Cinder reads in order " "to try to use an optimised data path when creating a volume from an image. " "Unfortunately, Cinder did not understand the new format and when Glance " "multiple Cinder stores were used, Cinder could not use the optimised data " "path, and instead downloaded image data from the Image service. Cinder now " "supports Images API version 2.11." msgid "" "`Bug #1898587 `_: Address " "cloning and api request timeout issues users may hit in certain " "environments, by allowing configuring timeout values for these operations " "through cinder configuration file." msgstr "" "`Bug #1898587 `_: Address " "cloning and API request timeout issues users may hit in certain " "environments, by allowing configuring timeout values for these operations " "through cinder configuration file." msgid "" "`Bug #1900979 `_: Fix bug " "with using PowerStore with enabled CHAP as a storage backend." msgstr "" "`Bug #1900979 `_: Fix bug " "with using PowerStore with enabled CHAP as a storage backend." msgid "" "`Bug #1901188 `_: Fix " "unnecessary migration on retype when QoS has the same elements in both types." msgstr "" "`Bug #1901188 `_: Fix " "unnecessary migration on retype when QoS has the same elements in both types." msgid "" "`Bug #1902852 `_: Fixed throwing Python traceback message when using ``cinder-" "manage `` without an action for the category." msgstr "" "`Bug #1902852 `_: Fixed throwing Python traceback message when using ``cinder-" "manage `` without an action for the category." msgid "" "`Bug #1904440 `_: When an " "iSCSI/FC encrypted volume was cloned, the rekey operation would stamp the " "wrong encryption key on the newly cloned volume. This resulted in a volume " "that could not be attached. It does not present a security problem." msgstr "" "`Bug #1904440 `_: When an " "iSCSI/FC encrypted volume was cloned, the rekey operation would stamp the " "wrong encryption key on the newly cloned volume. This resulted in a volume " "that could not be attached. It does not present a security problem." msgid "" "`Bug #1904892 `_: Fix cinder " "manage operations for NFS backends using IPv6 addresses in the NFS server " "address. These were previously rejected by the Cinder API." msgstr "" "`Bug #1904892 `_: Fix Cinder " "manage operations for NFS backends using IPv6 addresses in the NFS server " "address. These were previously rejected by the Cinder API." msgid "" "`Bug #1906528 `_: IBM " "Spectrum Virtualize Family driver: Fixed issue regarding host-failback and " "group-failback which impacts storage back-end performance." msgstr "" "`Bug #1906528 `_: IBM " "Spectrum Virtualize Family driver: Fixed issue regarding host-failback and " "group-failback which impacts storage back-end performance." msgid "" "`Bug #1908315 `_: Corrected " "the default checkstring for the ``group:reset_group_snapshot_status`` policy " "to make it admin-only. This policy governs the Block Storage API action " "`Reset group snapshot status `_, which by default is supposed to " "be an adminstrator-only action." msgstr "" "`Bug #1908315 `_: Corrected " "the default checkstring for the ``group:reset_group_snapshot_status`` policy " "to make it admin-only. This policy governs the Block Storage API action " "`Reset group snapshot status `_, which by default is supposed to " "be an adminstrator-only action." msgid "" "`Bug #1910767 `_: Fixed the " "calculation of the allocated capacity for the volume manager. The fix takes " "into account all volumes that have a host setting, not just volumes with a " "status of 'in-use' or 'available'." msgstr "" "`Bug #1910767 `_: Fixed the " "calculation of the allocated capacity for the volume manager. The fix takes " "into account all volumes that have a host setting, not just volumes with a " "status of 'in-use' or 'available'." msgid "" "`Bug #1912451 `_: IBM " "Spectrum Virtualize Family driver: Updated replication properties for " "HyperSwap volumes and volumes with replication enabled that were missing " "from volume metadata." msgstr "" "`Bug #1912451 `_: IBM " "Spectrum Virtualize Family driver: Updated replication properties for " "HyperSwap volumes and volumes with replication enabled that were missing " "from volume metadata." msgid "" "`Bug #1912624 `_: Corrected " "regression introduced by the refactoring of the backup service in the ussuri " "release, which prevented the creation of a volume backup in a different " "availability zone." msgstr "" "`Bug #1912624 `_: Corrected " "regression introduced by the refactoring of the backup service in the Ussuri " "release, which prevented the creation of a volume backup in a different " "availability zone." msgid "" "`Bug #1913054 `_: Fix for " "creating a clone of an encrypted volume for drivers that require additional " "information to attach." msgstr "" "`Bug #1913054 `_: Fix for " "creating a clone of an encrypted volume for drivers that require additional " "information to attach." msgid "" "`Bug #1915800 `_: Add " "support for ports filtering in XtremIO driver." msgstr "" "`Bug #1915800 `_: Add " "support for ports filtering in XtremIO driver." msgid "" "`Bug #1916980 `_: Fixed " "stale volume notification information on volume detach." msgstr "" "`Bug #1916980 `_: Fixed " "stale volume notification information on volume detach." msgid "" "`Bug #1917450 `_: Fix " "automatic quota refresh to correctly account for migrating volumes. During " "volume migration we'll have 2 volumes in cinder and only one will be " "accounted for in quota usage." msgstr "" "`Bug #1917450 `_: Fix " "automatic quota refresh to correctly account for migrating volumes. During " "volume migration, we'll have 2 volumes in Cinder and only one will be " "accounted for in quota usage." msgid "" "`Bug #1917574 `_: Fixed " "issue when cinderclient requests to show volume by name for non-admin users " "would result in the volume not being found for microversions 3.31 or later." msgstr "" "`Bug #1917574 `_: Fixed " "issue when cinderclient requests to show volume by name for non-admin users " "would result in the volume not being found for microversions 3.31 or later." msgid "" "`Bug #1917797 `_: Fix " "Cinder's communication with the Glance API to correctly load mTLS " "certificates from config (``glance_certfile`` and ``glance_keyfile`` in the " "``[DEFAULT]`` section)." msgstr "" "`Bug #1917797 `_: Fix " "Cinder's communication with the Glance API to correctly load mTLS " "certificates from config (``glance_certfile`` and ``glance_keyfile`` in the " "``[DEFAULT]`` section)." msgid "" "`Bug #1918889 `_: Add " "support for iSCSI IPv6 in XtremIO driver." msgstr "" "`Bug #1918889 `_: Add " "support for iSCSI IPv6 in XtremIO driver." msgid "" "`Bug #1919161 `_: Fix " "automatic quota refresh to correctly account for temporary volumes. During " "some cinder operations, such as create a backup from a snapshot, temporary " "volumes are created and are not counted towards quota usage, but the sync " "mechanism was counting them, thus incorrectly updating volume usage." msgstr "" "`Bug #1919161 `_: Fix " "automatic quota refresh to correctly account for temporary volumes. During " "some cinder operations, such as create a backup from a snapshot, temporary " "volumes are created and are not counted towards quota usage, but the sync " "mechanism was counting them, thus incorrectly updating volume usage." msgid "" "`Bug #1920237 `_: The backup " "manager calls volume remove_export() but does not wait for it to complete " "when detaching a volume after backup. This caused problems when a " "subsequent operation started on that volume before it had fully detached." msgstr "" "`Bug #1920237 `_: The backup " "manager calls volume remove_export() but does not wait for it to complete " "when detaching a volume after backup. This caused problems when a " "subsequent operation started on that volume before it had fully detached." msgid "" "`Bug #1922920 `_: Don't do " "volume usage notifications for migration temporary volumes." msgstr "" "`Bug #1922920 `_: Don't do " "volume usage notifications for migration temporary volumes." msgid "" "`Bug #1923828 `_: Fixed " "quota usage sync counting temporary snapshots from backups and revert to " "snapshot." msgstr "" "`Bug #1923828 `_: Fixed " "quota usage sync counting temporary snapshots from backups and revert to " "snapshot." msgid "" "`Bug #1923829 `_: Fixed " "manually deleting temporary snapshots from backups and revert to snapshots " "after failure leads to incorrect quota usage." msgstr "" "`Bug #1923829 `_: Fixed " "manually deleting temporary snapshots from backups and revert to snapshots " "after failure leads to incorrect quota usage." msgid "" "`Bug #1923830 `_: Fixed " "successfully backing up an in-use volume using a temporary snapshot instead " "of a clone leads to incorrect quota usage." msgstr "" "`Bug #1923830 `_: Fixed " "successfully backing up an in-use volume using a temporary snapshot instead " "of a clone leads to incorrect quota usage." msgid "" "`Bug #1924643 `_: Fixed the " "NetApp cinder driver sub-clone operation that might be used by extend " "operation in case the extended size is greater than the max LUN geometry." msgstr "" "`Bug #1924643 `_: Fixed the " "NetApp cinder driver sub-clone operation that might be used by extend " "operation in case the extended size is greater than the max LUN geometry." msgid "" "`Bug #1929223 `_: Fixed " "HTTPS certificate validation was disabled in PowerFlex connector." msgstr "" "`Bug #1929223 `_: Fixed " "HTTPS certificate validation was disabled in PowerFlex connector." msgid "" "`Bug #1931004 `_: Fixed use " "of incorrect stripe unit in RBD image clone causing volume-from-image to " "fail when using raw images backed by Ceph." msgstr "" "`Bug #1931004 `_: Fixed use " "of incorrect stripe unit in RBD image clone causing volume-from-image to " "fail when using raw images backed by Ceph." msgid "" "`Bug #1935011 `_: Fixed " "missing detach.start notification when deleting an attachment in reserved " "state." msgstr "" "`Bug #1935011 `_: Fixed " "missing detach.start notification when deleting an attachment in reserved " "state." msgid "" "`Bug #1935057 `_: Fixed " "sometimes on a detach volume may end in available and detached yet have an " "attachment in error_detaching." msgstr "" "`Bug #1935057 `_: Fixed " "sometimes on a detach volume may end in available and detached yet have an " "attachment in error_detaching." msgid "" "`Bug #1935688 `_: Cinder " "only supports uploading a volume of an encrypted volume type as an image to " "the Image service in ``raw`` format using a ``bare`` container type. " "Previously, ``os-volume_upload_image`` action requests to the Block Storage " "API specifying different format option values were accepted, but would " "result in a later failure. This condition is now checked at the API layer, " "and ``os-volume_upload_image`` action requests on a volume of an encrypted " "type that specify unsupported values for ``disk_format`` or " "``container_format`` now result in a 400 (Bad Request) response." msgstr "" "`Bug #1935688 `_: Cinder " "only supports uploading a volume of an encrypted volume type as an image to " "the Image service in ``raw`` format using a ``bare`` container type. " "Previously, ``os-volume_upload_image`` action requests to the Block Storage " "API specifying different format option values were accepted but would result " "in a later failure. This condition is now checked at the API layer, and " "``os-volume_upload_image`` action requests on a volume of an encrypted type " "that specifies unsupported values for ``disk_format`` or " "``container_format`` now result in a 400 (Bad Request) response." msgid "" "`Bug #1937084 `_: Fixed race " "condition between delete attachment and delete volume that can leave deleted " "volumes stuck as attached to instances." msgstr "" "`Bug #1937084 `_: Fixed race " "condition between delete attachment and delete volume that can leave deleted " "volumes stuck as attached to instances." msgid "" "`Bug #1938488 `_: When " "cleaning up a failed backup, clean up the snapshot status when the backup " "source is a snapshot" msgstr "" "`Bug #1938488 `_: When " "cleaning up a failed backup, clean up the snapshot status when the backup " "source is a snapshot" msgid "" "`Bug #1941068 `_: Fixed type " "of the ``host`` configuration option. It was limited to valid FQDN values " "when we document that it isn't. This may result in the ``cinder-manage db " "sync`` command failing." msgstr "" "`Bug #1941068 `_: Fixed type " "of the ``host`` configuration option. It was limited to valid FQDN values " "when we document that it isn't. This may result in the ``cinder-manage db " "sync`` command failing." msgid "" "`Bug #1944577 `_: Managing a " "volume to an encrypted type was never a good idea because there was no way " "to specify an encryption key ID so that the volume could be used. Requests " "to manage a volume to an encrypted volume type now result in an invalid " "request response." msgstr "" "`Bug #1944577 `_: Managing a " "volume to an encrypted type was never a good idea because there was no way " "to specify an encryption key ID so that the volume could be used. Requests " "to manage a volume to an encrypted volume type now result in an invalid " "request response." msgid "" "`Bug #1945500 `_: Fixed an " "error when uploading to Glance a previously downloaded glance image when " "glance multistore is enabled. Glance reserves image properties in the " "namespace 'os_glance' for its own use and will not allow images to be " "created with these properties. Additionally, there are image properties, " "such as those associated with image signature verification, that are stored " "in a volume's image metadata, which should not be added to a new image when " "a volume is being uploaded as an image. Thus Cinder will no longer include " "any volume image metadata in the namespaces ``os_glance`` and " "``img_signature`` when it creates an image in Glance. Furthermore, because " "the Glance property protections feature allows an operator to configure " "specific image properties as read-only, this fix adds a configuration " "option, ``reserved_image_namespaces``, that allows an operator to exclude " "additional image properties by namespace (the ``os_glance`` and " "``img_signature`` namespaces are *always* excluded)." msgstr "" "`Bug #1945500 `_: Fixed an " "error when uploading to Glance a previously downloaded glance image when " "glance multistore is enabled. Glance reserves image properties in the " "namespace 'os_glance' for its own use and will not allow images to be " "created with these properties. Additionally, there are image properties, " "such as those associated with image signature verification, that are stored " "in a volume's image metadata, which should not be added to a new image when " "a volume is being uploaded as an image. Thus Cinder will no longer include " "any volume image metadata in the namespaces ``os_glance`` and " "``img_signature`` when it creates an image in Glance. Furthermore, because " "the Glance property protections feature allows an operator to configure " "specific image properties as read-only, this fix adds a configuration " "option, ``reserved_image_namespaces``, that allows an operator to exclude " "additional image properties by namespace (the ``os_glance`` and " "``img_signature`` namespaces are *always* excluded)." msgid "" "`Bug #1945500 `_: The " "original attempt at fixing this bug did not account for differences in how " "glance and cinder store image metadata, and as a result some image " "properties were not filtered out. This new improved fix addresses those " "differences and makes the filtering more thorough." msgstr "" "`Bug #1945500 `_: The " "original attempt at fixing this bug did not account for differences in how " "Glance and Cinder store image metadata, and as a result some image " "properties were not filtered out. This new improved fix addresses those " "differences and makes the filtering more thorough." msgid "" "`Bug #1946483 `_: Fixed an " "issue where admin user fails to delete backup of an encrypted volume with an " "``oslo_config.cfg.NoSuchOptError`` error in logs of cinder-backup service. " "With this fix cloud admin is able to delete backups of encrypted volumes " "created by other users if Barbican API policies allow it." msgstr "" "`Bug #1946483 `_: Fixed an " "issue where admin user fails to delete backup of an encrypted volume with an " "``oslo_config.cfg.NoSuchOptError`` error in logs of cinder-backup service. " "With this fix cloud admin is able to delete backups of encrypted volumes " "created by other users if Barbican API policies allow it." msgid "" "`Bug #1947123 `_: Fixed the " "volume creation issue in GPFS NFS driver when gpfs_images_share_mode is set " "to copy_on_write." msgstr "" "`Bug #1947123 `_: Fixed the " "volume creation issue in GPFS NFS driver when gpfs_images_share_mode is set " "to copy_on_write." msgid "" "`Bug #1947134 `_: Fixed the " "initialization of GPFS NFS driver when gpfs_images_share_mode is set to " "copy_on_write by correcting _same_filesystem functionality." msgstr "" "`Bug #1947134 `_: Fixed the " "initialization of GPFS NFS driver when gpfs_images_share_mode is set to " "copy_on_write by correcting _same_filesystem functionality." msgid "" "`Bug #1948962 `_: Fixed " "operations that failed on volume types with 255 characters names (e.g. set " "quota limits or volume migrate)." msgstr "" "`Bug #1948962 `_: Fixed " "operations that failed on volume types with 255 characters names (e.g. set " "quota limits or volume migrate)." msgid "" "`Bug #1950474 `_: Fixed " "policy authorization for transfer accept API. Previously, if an operator had " "overridden the default transfer accept policy to something project specific " "in policy.yaml file, it would break the transfer accept API which is fixed " "in this release." msgstr "" "`Bug #1950474 `_: Fixed " "policy authorization for transfer accept API. Previously, if an operator had " "overridden the default transfer accept policy to something project specific " "in policy.yaml file, it would break the transfer accept API which is fixed " "in this release." msgid "" "`Bug #1950474 `_: Fixed " "policy authorization for transfer accept API. Previously, setting " "``enforce_new_defaults=True`` in oslo_policy section would break the " "transfer accept API which is fixed in this release." msgstr "" "`Bug #1950474 `_: Fixed " "policy authorization for transfer accept API. Previously, setting " "``enforce_new_defaults=True`` in oslo_policy section would break the " "transfer accept API which is fixed in this release." msgid "" "`Bug #1951977 `_: Fixed " "backup create and restore not using multipath configuration when attaching " "the volume." msgstr "" "`Bug #1951977 `_: Fixed " "backup create and restore not using multipath configuration when attaching " "the volume." msgid "" "`Bug #1951982 `_: Fixed " "cloning of encrypted volumes not using multipathing to change the encryption " "key used on the new volume." msgstr "" "`Bug #1951982 `_: Fixed " "cloning of encrypted volumes not using multipathing to change the encryption " "key used on the new volume." msgid "" "`Bug #1952420 `_: Fixed " "quota warnings about ``backups`` and ``backup_gigabytes`` when creating " "backups." msgstr "" "`Bug #1952420 `_: Fixed " "quota warnings about ``backups`` and ``backup_gigabytes`` when creating " "backups." msgid "" "`Bug #1952443 `_: Improve " "performance for creating volume from image, listing volumes, snapshots, " "backups, groups, and group_snapshots." msgstr "" "`Bug #1952443 `_: Improve " "performance for creating volume from an image, listing volumes, snapshots, " "backups, groups, and group_snapshots." msgid "" "`Bug #1952805 `_: Fixed the " "cinder-backup posix driver's behavior with multiple backup hosts. Previously " "cinder-backup would frequently schedule incremental backups on the wrong " "host and immediately fail." msgstr "" "`Bug #1952805 `_: Fixed the " "Cinder-backup Posix driver's behaviour with multiple backup hosts. " "Previously cinder-backup would frequently schedule incremental backups on " "the wrong host and immediately fail." msgid "" "`Bug #1953168 `_: Fixed " "missing parameter in the capacity filter log message." msgstr "" "`Bug #1953168 `_: Fixed " "missing parameter in the capacity filter log message." msgid "" "`Bug #1960019 `_: Fixed " "value of the x-openstack-request-id header when Cinder is using noauth." msgstr "" "`Bug #1960019 `_: Fixed " "value of the x-openstack-request-id header when Cinder is using noauth." msgid "" "`Bug #1960020 `_: Fixed " "duplicated request-id values in logs for different requests, happens only on " "request to / to get available api versions." msgstr "" "`Bug #1960020 `_: Fixed " "duplicated request-id values in logs for different requests, happens only on " "request to / to get available API versions." msgid "" "`Bug #1960021 `_: Fixed " "missing request id headers in requests to / to get available api versions." msgstr "" "`Bug #1960021 `_: Fixed " "missing request id headers in requests to / to get available API versions." msgid "" "`Bug #1960329 `_: Fixed " "wrong request ID on middleware filters." msgstr "" "`Bug #1960329 `_: Fixed " "wrong request ID on middleware filters." msgid "" "`Bug #1965847 `_: Fixed " "issue where importing a backup record for a backup_id that currently existed " "had the unfortunate side effect of deleting the existing backup record." msgstr "" "`Bug #1965847 `_: Fixed " "issue where importing a backup record for a backup_id that currently existed " "had the unfortunate side effect of deleting the existing backup record." msgid "`Bug #1965952 `_:" msgstr "`Bug #1965952 `_:" msgid "" "`Bug #1966103 `_: Fixed " "inconsistent behavior of ``storage_protocol`` among different backends that " "report variants of the protocol name, such as FC, fc, fibre_channel." msgstr "" "`Bug #1966103 `_: Fixed " "inconsistent behaviour of ``storage_protocol`` among different backends that " "report variants of the protocol name, such as FC, fc, fibre_channel." msgid "" "`Bug #1968170 `_: Fixed the " "message created when nova fails to reimage the volume." msgstr "" "`Bug #1968170 `_: Fixed the " "message created when Nova fails to reimage the volume." msgid "" "`Bug #1969366 `_: Fixed " "reporting of cacheable capability by drivers." msgstr "" "`Bug #1969366 `_: Fixed " "reporting of cacheable capability by drivers." msgid "" "`Bug #1969643 `_: The RBD " "driver can now delete volumes with other volumes cloned from it (or its " "snapshots) in cases where deletion would previously fail. This uses the RBD " "trash functionality." msgstr "" "`Bug #1969643 `_: The RBD " "driver can now delete volumes with other volumes cloned from it (or its " "snapshots) in cases where deletion would previously fail. This uses the RBD " "trash functionality." msgid "" "`Bug #1970768 `_: Fixed " "status of temporary volumes when creating backups and reverting to a " "snapshot, preventing accidental manual deletion of those resources." msgstr "" "`Bug #1970768 `_: Fixed " "status of temporary volumes when creating backups and reverting to a " "snapshot, preventing accidental manual deletion of those resources." msgid "" "`Bug #1978020 `_: Fixed " "uploading a volume to a Cinder-backed Glance image; if a store name is set " "in the volume type's extra specs, it must also be sent to Glance as part of " "the new image location URI. Please note that while the `image_service:" "store_id` extra spec is validated when it is set for the volume type, it is " "not validated later; it is the operator's responsibility to make sure that " "the Glance store is not renamed or removed or that the volume types are " "updated accordingly." msgstr "" "`Bug #1978020 `_: Fixed " "uploading a volume to a Cinder-backed Glance image; if a store name is set " "in the volume type's extra specs, it must also be sent to Glance as part of " "the new image location URI. Please note that while the `image_service:" "store_id` extra spec is validated when it is set for the volume type, it is " "not validated later; it is the operator's responsibility to make sure that " "the Glance store is not renamed or removed or that the volume types are " "updated accordingly." msgid "" "`Bug #1978729 `_: Fixed " "context.message_action is None on errors by backup drivers. The message_* " "properties of the context were not passed during rpc, which caused a double " "exception when a backup driver raised an exception, masking the actual " "backup driver exception." msgstr "" "`Bug #1978729 `_: Fixed " "context.message_action is None on errors by backup drivers. The message_* " "properties of the context were not passed during RPC, which caused a double " "exception when a backup driver raised an exception, masking the actual " "backup driver exception." msgid "" "`Bug #1979666 `_: PowerMax " "driver : Fixed rare case where the SRP in the local and remote arrays are " "different when managing volumes into OpenStack. For backward compatibility " "and name matching, the default storage group will assume the SRP name of the " "local array on both arrays." msgstr "" "`Bug #1979666 `_: PowerMax " "driver : Fixed rare case where the SRP in the local and remote arrays are " "different when managing volumes into OpenStack. For backward compatibility " "and name matching, the default storage group will assume the SRP name of the " "local array on both arrays." msgid "" "`Bug #1980268 `_: When " "creating a volume from an image, a check has been added to compare the " "requested volume size to the image's ``virtual_size`` property and fail the " "request if the volume will be too small to contain the image. If the image " "record does not contain this property, the request is accepted but the " "volume will go to ``error`` status if the image does not fit (which is the " "current behavior)." msgstr "" "`Bug #1980268 `_: When " "creating a volume from an image, a check has been added to compare the " "requested volume size to the image's ``virtual_size`` property and fail the " "request if the volume will be too small to contain the image. If the image " "record does not contain this property, the request is accepted but the " "volume will go to ``error`` status if the image does not fit (which is the " "current behavior)." msgid "" "`Bug #1988942 `_: Increased " "size of volume image metadata values accepted by the Block Storage API. " "Volume image metadata values were limited to 255 characters but Glance " "allows up to 65535 bytes. This change does not affect the database tables " "which already allow up to 65535 bytes for image metadata values." msgstr "" "`Bug #1988942 `_: Increased " "size of volume image metadata values accepted by the Block Storage API. " "Volume image metadata values were limited to 255 characters but Glance " "allows up to 65535 bytes. This change does not affect the database tables " "which already allow up to 65535 bytes for image metadata values." msgid "" "`Bug #1996049 `_: Fixed bug " "where backup was not set to error on failure when volume did not exist." msgstr "" "`Bug #1996049 `_: Fixed bug " "where backup was not set to error on failure when volume did not exist." msgid "" "`Bug #1996188 `_: Fixed " "issue where a VMDK image file whose createType allowed named extents could " "expose host information. This change introduces a new configuration option, " "``vmdk_allowed_types``, that specifies the list of VMDK image subformats " "that Cinder will allow. The default setting allows only the " "'streamOptimized' and 'monolithicSparse' subformats." msgstr "" "`Bug #1996188 `_: Fixed " "issue where a VMDK image file whose createType allowed named extents could " "expose host information. This change introduces a new configuration option, " "``vmdk_allowed_types``, that specifies the list of VMDK image subformats " "that Cinder will allow. The default setting allows only the " "'streamOptimized' and 'monolithicSparse' subformats." msgid "" "`Bug #1997980 `_: RBD: Fixed " "failure to update rbd image features for multi-attach when features = 0." msgstr "" "`Bug #1997980 `_: RBD: Fixed " "failure to update RBD image features for multi-attach when features = 0." msgid "" "`Bug #2004555 `_: Fixed " "issue where a user manually deleting an attachment, calling terminate " "connection, detach, or force detach, for a volume that is still used by a " "nova instance resulted in leftover devices on the compute node. These " "operations will now fail when it is believed to be a problem." msgstr "" "`Bug #2004555 `_: Fixed " "issue where a user manually deleting an attachment, calling terminate " "connection, detach, or force detach, for a volume that is still used by a " "Nova instance resulted in leftover devices on the compute node. These " "operations will now fail when it is believed to be a problem." msgid "" "`Bug #2004555 `_: Fixed " "issue where a user manually deleting an attachment, calling terminate " "connection, detach, or force detach, for a volume that is still used by a " "nova instance resulted in leftover devices on the compute node. These " "operations will now fail." msgstr "" "`Bug #2004555 `_: Fixed " "issue where a user manually deleting an attachment, calling terminate " "connection, detach, or force detach, for a volume that is still used by a " "Nova instance resulted in leftover devices on the compute node. These " "operations will now fail." msgid "" "`Bug #2007615 `_: the " "restore operation of the Cinder backup service now restores into sparse " "volumes, if possible. So, operators no longer need more space than used " "previously when they restore from a disaster." msgstr "" "`Bug #2007615 `_: the " "restore operation of the Cinder backup service now restores into sparse " "volumes, if possible. So, operators no longer need more space than used " "previously when they restore from a disaster." msgid "" "`Bug #2008017 `_: Fixed " "NetApp NFS driver to never spawn a native thread avoid thread starvation and " "other related issues." msgstr "" "`Bug #2008017 `_: Fixed " "NetApp NFS driver to never spawn a native thread to avoid thread starvation " "and other related issues." msgid "" "`Bug #2008017 `_: Hide value " "of the `[coordination] backend_url` option from logs because it can contain " "credential." msgstr "" "`Bug #2008017 `_: Hide value " "of the `[coordination] backend_url` option from logs because it can contain " "credential." msgid "" "`Bug #2008259 `_: Fixed the " "volume create functionality where non-admin users were able to create " "multiattach volumes by providing the `multiattach` parameter in the request " "body. Now we can only create multiattach volumes using a multiattach volume " "type, which is also the recommended way." msgstr "" "`Bug #2008259 `_: Fixed the " "volume create functionality where non-admin users were able to create " "multiattach volumes by providing the `multiattach` parameter in the request " "body. Now we can only create multiattach volumes using a multiattach volume " "type, which is also the recommended way." msgid "" "`Bug #2012246 `_: Hide value " "of the `[coordination] backend_url` option from logs because it can contain " "credential." msgstr "" "`Bug #2012246 `_: Hide value " "of the `[coordination] backend_url` option from logs because it can contain " "credential." msgid "" "`Bug #2016138 `_: Handle " "missing volumes during cleanup of incomplete backups." msgstr "" "`Bug #2016138 `_: Handle " "missing volumes during cleanup of incomplete backups." msgid "" "`Bug #2025277 `_: Fixed a " "regression in the fix for Cinder backup restoring into sparse volumes, where " "OpenStack's integrated CLI triggered a traceback. The deprecated project-" "specific legacy CLI of Cinder continued to work." msgstr "" "`Bug #2025277 `_: Fixed a " "regression in the fix for Cinder backup restoring into sparse volumes, where " "OpenStack's integrated CLI triggered a traceback. The deprecated project-" "specific legacy CLI of Cinder continued to work." msgid "" "`Bug #2027532 `_: Fixed " "Cinder API HTTP 500 when issuing a volume list and sorting by a boolean " "field (i.e. \"bootable\")." msgstr "" "`Bug #2027532 `_: Fixed " "Cinder API HTTP 500 when issuing a volume list and sorting by a boolean " "field (i.e. \"bootable\")." msgid "" "`Bug #2031897 `_: Fixed " "issues for volume backups with the Ceph driver where failures of the first " "process (\"rbd export-diff\") were not caught. Instead, only the return code " "of the second process (\"rbd import-diff\") was recognized." msgstr "" "`Bug #2031897 `_: Fixed " "issues for volume backups with the Ceph driver where failures of the first " "process (\"rbd export-diff\") were not caught. Instead, only the return code " "of the second process (\"rbd import-diff\") was recognized." msgid "" "`Bug #2036994 `_: Fixed " "rollback of volume status if the reimage operation fails while checking " "image metadata." msgstr "" "`Bug #2036994 `_: Fixed " "rollback of volume status if the reimage operation fails while checking " "image metadata." msgid "" "`Bug #2045431 `_: Fixed a " "data leak scenario where we preserve sparseness when reimaging the volume." msgstr "" "`Bug #2045431 `_: Fixed a " "data leak scenario where we preserve sparseness when reimaging the volume." msgid "" "`Bug #2058596 `_: Fixed " "broken ``backup_swift_service_auth=True`` which made swift backup driver " "consistently fail during object data access." msgstr "" "`Bug #2058596 `_: Fixed " "broken ``backup_swift_service_auth=True`` which made Swift backup driver " "consistently fail during object data access." msgid "" "`Bug #2059809 `_: Fixed " "issue where a qcow2 format image with an external data file could expose " "host information. Such an image is now rejected with an " "``ImageUnacceptable`` error if it is used to create a volume. Given that " "qcow2 external data files were never supported by Cinder, the only use for " "such an image previously was to attempt to steal host information, and hence " "this change should have no impact on users." msgstr "" "`Bug #2059809 `_: Fixed " "issue where a qcow2 format image with an external data file could expose " "host information. Such an image is now rejected with an " "``ImageUnacceptable`` error if it is used to create a volume. Given that " "qcow2 external data files were never supported by Cinder, the only use for " "such an image previously was to attempt to steal host information, and hence " "this change should have no impact on users." msgid "" "`Bug #2065713 `_: Due to " "incorrect exception handling, ImageNotFound errors in the RBD driver's " "get_manageable_volumes operation would propagate up to the API layer rather " "than being caught and handled in the driver." msgstr "" "`Bug #2065713 `_: Due to " "incorrect exception handling, ImageNotFound errors in the RBD driver's " "get_manageable_volumes operation would propagate up to the API layer rather " "than being caught and handled in the driver." msgid "" "`Bug #2077643 `_: Fixed " "\"cinder-manage quota sync\" CLI command, which failed with an sqlalchemy " "error when a project id was not specified." msgstr "" "`Bug #2077643 `_: Fixed " "\"cinder-manage quota sync\" CLI command, which failed with an sqlalchemy " "error when a project id was not specified." msgid "" "`Bug #2083532 `_: [Pure " "Storage] Fixed creation of volumes with only IOPS qos." msgstr "" "`Bug #2083532 `_: [Pure " "Storage] Fixed creation of volumes with only IOPS qos." msgid "" "`Bug 1809249 `_ - 3PAR " "driver adds the config option `hpe3par_target_nsp` that can be set to the " "3PAR backend to use when multipath is not enabled and the Fibre Channel Zone " "Manager is not used." msgstr "" "`Bug 1809249 `_ - 3PAR " "driver adds the config option `hpe3par_target_nsp` that can be set to the " "3PAR backend to use when multipath is not enabled and the Fibre Channel Zone " "Manager is not used." msgid "" "`Bug 1913449 `_: Fix RBD " "driver _update_volume_stats() failing when using Ceph Pacific python rados " "libraries. This failed because we were passing a str instead of bytes to " "cluster.mon_command()" msgstr "" "`Bug 1913449 `_: Fix RBD " "driver _update_volume_stats() failing when using Ceph Pacific Python RADOS " "libraries. This failed because we were passing a str instead of bytes to " "cluster.mon_command()" msgid "" "`Dell PowerMax Driver Bug #1981420 `_: Fixed issue faced while creating synchronous volume which " "was caused by incorrect handling of the force flag. This is corrected by " "checking volume type extra specs for the value of \"force_vol_edit\" " "parameter along with the \"force\" parameter." msgstr "" "`Dell PowerMax Driver Bug #1981420 `_: Fixed issue faced while creating synchronous volume which " "was caused by incorrect handling of the force flag. This is corrected by " "checking volume type extra specs for the value of \"force_vol_edit\" " "parameter along with the \"force\" parameter." msgid "" "`FC driver only.` This is replaced with the smallest WWPN of the WWPNs of " "the connecting node." msgstr "" "`FC driver only.` This is replaced with the smallest WWPN of the WWPNs of " "the connecting node." msgid "" "`PowerMax Driver - Allowing for an empty group on a clone volume `_" msgstr "" "`PowerMax Driver - Allowing for an empty group on a clone volume `_" msgid "" "`PowerMax Driver - Force array and srp configuration `_" msgstr "" "`PowerMax Driver - Force array and srp configuration `_" msgid "" "`PowerMax Driver - Migrate extra spec class fix `_" msgstr "" "`PowerMax Driver - Migrate extra spec class fix `_" msgid "" "`PowerMax Driver - Prevent unmanage with snapvx `_" msgstr "" "`PowerMax Driver - Prevent unmanage with snapvx `_" msgid "" "`PowerMax Driver - RDF State Validation Enhancements `_" msgstr "" "`PowerMax Driver - RDF State Validation Enhancements `_" msgid "" "`PowerMax Driver - Remove mandatory failover BID `_" msgstr "" "`PowerMax Driver - Remove mandatory failover BID `_" msgid "" "``\"admin_or_storage_type_admin\": \"is_admin:True or role:" "storage_type_admin\",``" msgstr "" "``\"admin_or_storage_type_admin\": \"is_admin:True or role:" "storage_type_admin\",``" msgid "" "``\"volume_extension:types_manage\": \"rule:admin_or_storage_type_admin\", " "\"volume_extension:volume_type_access:addProjectAccess\": \"rule:" "admin_or_storage_type_admin\", \"volume_extension:volume_type_access:" "removeProjectAccess\": \"rule:admin_or_storage_type_admin\",``" msgstr "" "``\"volume_extension:types_manage\": \"rule:admin_or_storage_type_admin\", " "\"volume_extension:volume_type_access:addProjectAccess\": \"rule:" "admin_or_storage_type_admin\", \"volume_extension:volume_type_access:" "removeProjectAccess\": \"rule:admin_or_storage_type_admin\",``" msgid "``FJ_Pool_Name``" msgstr "``FJ_Pool_Name``" msgid "``FJ_SDV_Name``" msgstr "``FJ_SDV_Name``" msgid "``FJ_SDV_No``" msgstr "``FJ_SDV_No``" msgid "" "``RESKEY:availability_zones`` now is a reserved spec key for AZ volume type, " "and administrator can create AZ volume type that includes AZ restrictions by " "adding a list of Az's to the extra specs similar to: ``RESKEY:" "availability_zones: az1,az2``." msgstr "" "``RESKEY:availability_zones`` now is a reserved spec key for AZ volume type, " "and administrator can create AZ volume type that includes AZ restrictions by " "adding a list of AZs to the extra specs similar to: ``RESKEY:" "availability_zones: az1,az2``." msgid "``Windows SMB Driver``" msgstr "``Windows SMB Driver``" msgid "``Windows iSCSI Driver``" msgstr "``Windows iSCSI Driver``" msgid "``backend_stats_polling_interval``" msgstr "``backend_stats_polling_interval``" msgid "``backup_driver_init_check_interval``" msgstr "``backup_driver_init_check_interval``" msgid "``backup_driver_status_check_interval``" msgstr "``backup_driver_status_check_interval``" msgid "" "``backup_driver_status_check_interval`` config option is renamed to " "``backup_driver_stats_polling_interval`` to be similar with volume drivers " "configuration. Old option name support will be dropped in U release." msgstr "" "``backup_driver_status_check_interval`` config option is renamed to " "``backup_driver_stats_polling_interval`` to be similar with volume drivers " "configuration. Old option name support will be dropped in U release." msgid "``backups_deleted_project_id_idx``" msgstr "``backups_deleted_project_id_idx``" msgid "``choice_client``" msgstr "``choice_client``" msgid "``choice_client`` to ``disco_choice_client``" msgstr "``choice_client`` to ``disco_choice_client``" msgid "" "``cinder.keymgr.conf_key_mgr.ConfKeyManager`` still remains, but the " "``fixed_key`` configuration options should be moved to the ``[key_manager]`` " "section" msgstr "" "``cinder.keymgr.conf_key_mgr.ConfKeyManager`` still remains, but the " "``fixed_key`` configuration options should be moved to the ``[key_manager]`` " "section" msgid "``clone_check_timeout`` to ``disco_clone_check_timeout``" msgstr "``clone_check_timeout`` to ``disco_clone_check_timeout``" msgid "``disco_client_port``" msgstr "``disco_client_port``" msgid "``disco_client``" msgstr "``disco_client``" msgid "``disco_src_api_port``" msgstr "``disco_src_api_port``" msgid "``group:group_types:create``" msgstr "``group:group_types:create``" msgid "``group:group_types:delete``" msgstr "``group:group_types:delete``" msgid "``group:group_types:update``" msgstr "``group:group_types:update``" msgid "``group:group_types_manage``" msgstr "``group:group_types_manage``" msgid "``group:group_types_manage`` is replaced by:" msgstr "``group:group_types_manage`` is replaced by:" msgid "``group:group_types_specs:get_all``" msgstr "``group:group_types_specs:get_all``" msgid "``group:group_types_specs:get``" msgstr "``group:group_types_specs:get``" msgid "``group:group_types_specs``" msgstr "``group:group_types_specs``" msgid "``group:group_types_specs`` is replaced by:" msgstr "``group:group_types_specs`` is replaced by:" msgid "``group_snapshots_deleted_project_id_idx``" msgstr "``group_snapshots_deleted_project_id_idx``" msgid "``groups_deleted_project_id_idx``" msgstr "``groups_deleted_project_id_idx``" msgid "" "``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, " "``iscsi_target_prefix`` and ``iscsi_protocol`` config options are deprecated " "in flavor of ``target_ip_address``, ``target_port``, ``target_helper``, " "``target_prefix`` and ``target_protocol`` accordingly. Old config options " "will be removed in S release." msgstr "" "``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, " "``iscsi_target_prefix`` and ``iscsi_protocol`` config options are deprecated " "in flavour of ``target_ip_address``, ``target_port``, ``target_helper``, " "``target_prefix`` and ``target_protocol`` accordingly. Old config options " "will be removed in S release." msgid "``maxBWS``" msgstr "``maxBWS``" msgid "" "``os-reset_status`` notifications for volumes, snapshots and backups will " "now go to the standard publisher id for volume, snapshot and backup like all " "other notifications for volume, snapshot and backup." msgstr "" "``os-reset_status`` notifications for volumes, snapshots and backups will " "now go to the standard publisher id for volume, snapshot and backup like all " "other notifications for volume, snapshot and backup." msgid "``os-set_image_metadata``" msgstr "``os-set_image_metadata``" msgid "``os-unset_image_metadata``" msgstr "``os-unset_image_metadata``" msgid "``read_bytes_sec``" msgstr "``read_bytes_sec``" msgid "``read_iops_sec``" msgstr "``read_iops_sec``" msgid "``rest_ip``" msgstr "``rest_ip``" msgid "``rest_ip`` to ``disco_rest_ip``" msgstr "``rest_ip`` to ``disco_rest_ip``" msgid "``restore_check_timeout`` to ``disco_restore_check_timeout``" msgstr "``restore_check_timeout`` to ``disco_restore_check_timeout``" msgid "``retry_interval``" msgstr "``retry_interval``" msgid "``retry_interval`` to ``disco_retry_interval``" msgstr "``retry_interval`` to ``disco_retry_interval``" msgid "``rule:admin_or_owner``" msgstr "``rule:admin_or_owner``" msgid "``rule:system_or_domain_or_project_admin``" msgstr "``rule:system_or_domain_or_project_admin``" msgid "``rule:volume_extension:volume_type_encryption``" msgstr "``rule:volume_extension:volume_type_encryption``" msgid "``scheduler_driver_init_wait_time``" msgstr "``scheduler_driver_init_wait_time``" msgid "``snapshot_check_timeout`` to ``disco_snapshot_check_timeout``" msgstr "``snapshot_check_timeout`` to ``disco_snapshot_check_timeout``" msgid "``snapshots_deleted_project_id_idx``" msgstr "``snapshots_deleted_project_id_idx``" msgid "``total_bytes_sec``" msgstr "``total_bytes_sec``" msgid "``total_iops_sec``" msgstr "``total_iops_sec``" msgid "" "``volume:reimage_reserved`` - users who satisfy this policy may re-image a " "volume in status ``reserved``" msgstr "" "``volume:reimage_reserved`` - users who satisfy this policy may re-image a " "volume in status ``reserved``" msgid "" "``volume:reimage`` - users who satisfy this policy may re-image a volume in " "status ``available`` or ``error``" msgstr "" "``volume:reimage`` - users who satisfy this policy may re-image a volume in " "status ``available`` or ``error``" msgid "``volume_extension:quota_classes:get``" msgstr "``volume_extension:quota_classes:get``" msgid "``volume_extension:quota_classes:update``" msgstr "``volume_extension:quota_classes:update``" msgid "``volume_extension:quota_classes``" msgstr "``volume_extension:quota_classes``" msgid "``volume_extension:quota_classes`` is replaced by:" msgstr "``volume_extension:quota_classes`` is replaced by:" msgid "``volume_extension:type_create``" msgstr "``volume_extension:type_create``" msgid "``volume_extension:type_delete``" msgstr "``volume_extension:type_delete``" msgid "``volume_extension:type_update``" msgstr "``volume_extension:type_update``" msgid "``volume_extension:types_extra_specs:read_sensitive``" msgstr "``volume_extension:types_extra_specs:read_sensitive``" msgid "``volume_extension:types_manage``" msgstr "``volume_extension:types_manage``" msgid "``volume_extension:types_manage`` is replaced by:" msgstr "``volume_extension:types_manage`` is replaced by:" msgid "``volume_extension:volume_image_metadata:remove``" msgstr "``volume_extension:volume_image_metadata:remove``" msgid "``volume_extension:volume_image_metadata:set``" msgstr "``volume_extension:volume_image_metadata:set``" msgid "``volume_extension:volume_image_metadata:show``" msgstr "``volume_extension:volume_image_metadata:show``" msgid "``volume_extension:volume_image_metadata``" msgstr "``volume_extension:volume_image_metadata``" msgid "``volume_extension:volume_image_metadata`` is replaced by:" msgstr "``volume_extension:volume_image_metadata`` is replaced by:" msgid "``volume_extension:volume_type_access:get_all_for_type``" msgstr "``volume_extension:volume_type_access:get_all_for_type``" msgid "``volume_extension:volume_type_encryption:create``" msgstr "``volume_extension:volume_type_encryption:create``" msgid "``volume_extension:volume_type_encryption:delete``" msgstr "``volume_extension:volume_type_encryption:delete``" msgid "``volume_extension:volume_type_encryption:get``" msgstr "``volume_extension:volume_type_encryption:get``" msgid "``volume_extension:volume_type_encryption:update``" msgstr "``volume_extension:volume_type_encryption:update``" msgid "``volume_name_prefix`` to ``disco_volume_name_prefix``" msgstr "``volume_name_prefix`` to ``disco_volume_name_prefix``" msgid "``volumes_deleted_host_idx``" msgstr "``volumes_deleted_host_idx``" msgid "``volumes_deleted_project_id_idx``" msgstr "``volumes_deleted_project_id_idx``" msgid "``write_bytes_sec``" msgstr "``write_bytes_sec``" msgid "``write_iops_sec``" msgstr "``write_iops_sec``" msgid "``{host}``" msgstr "``{host}``" msgid "``{host}``: 1" msgstr "``{host}``: 1" msgid "``{ip}``" msgstr "``{ip}``" msgid "``{ip}``: 15" msgstr "``{ip}``: 15" msgid "``{wwn}``" msgstr "``{wwn}``" msgid "``{wwn}``: 16" msgstr "``{wwn}``: 16" msgid "" "`bug #2000724 `_: Handled " "the case when glance is calling online extend and external events were being " "sent to nova. Now Cinder will only send external events when the volume, to " "be extended, is attached to a nova instance." msgstr "" "`bug #2000724 `_: Handled " "the case when Glance is calling online extend and external events were being " "sent to Nova. Now Cinder will only send external events when the volume, to " "be extended, is attached to a Nova instance." msgid "" "`iSCSI driver only.` This is replaced with the IP address of the connecting " "node." msgstr "" "`iSCSI driver only.` This is replaced with the IP address of the connecting " "node." msgid "" "a [nova] section is added to configure the connection to the compute " "service, which is needed to the InstanceLocalityFilter, for example." msgstr "" "a [nova] section is added to configure the connection to the compute " "service, which is needed to the InstanceLocalityFilter, for example." msgid "" "characters that are not permitted for this parameter, they are replaced with " "``_``." msgstr "" "characters that are not permitted for this parameter, they are replaced with " "``_``." msgid "" "cinder-backup service is now decoupled from cinder-volume, which allows more " "flexible scaling." msgstr "" "cinder-backup service is now decoupled from cinder-volume, which allows more " "flexible scaling." msgid "" "cinder.api.middleware.sizelimit was deprecated in kilo and compatability " "shim added to call into oslo_middleware. Using oslo_middleware.sizelimit " "directly will allow us to remove the compatability shim in a future release." msgstr "" "cinder.api.middleware.sizelimit was deprecated in Kilo and a compatibility " "shim added to call into oslo_middleware. Using oslo_middleware.sizelimit " "directly will allow us to remove the compatibility shim in a future release." msgid "create a snapshot: \"POST /v3/{project_id}/snapshots\"" msgstr "create a snapshot: \"POST /v3/{project_id}/snapshots\"" msgid "create_group" msgstr "create_group" msgid "create_snapshot" msgstr "create_snapshot" msgid "create_volume" msgstr "create_volume" msgid "" "datera_api_token -- this has been replaced by san_login and san_password" msgstr "" "datera_api_token -- this has been replaced by san_login and san_password" msgid "default_cgsnapshot_type is reserved for migrating CGs." msgstr "default_cgsnapshot_type is reserved for migrating CGs." msgid "delete group: \"POST /v3/{project_id}/groups/{group_id}/action\"" msgstr "delete group: \"POST /v3/{project_id}/groups/{group_id}/action\"" msgid "" "dell_server_os option added to the Dell SC driver. This option allows the " "selection of the server type used when creating a server on the Dell DSM " "during initialize connection. This is only used if the server does not " "exist. Valid values are from the Dell DSM create server list." msgstr "" "dell_server_os option added to the Dell SC driver. This option allows the " "selection of the server type used when creating a server on the Dell DSM " "during initialise connection. This is only used if the server does not " "exist. Valid values are from the Dell DSM create server list." msgid "extend_volume" msgstr "extend_volume" msgid "" "failover replication: \"POST /v3/{project_id}/groups/{group_id}/action\"" msgstr "" "failover replication: \"POST /v3/{project_id}/groups/{group_id}/action\"" msgid "http://cinderstats.ivehearditbothways.com/cireport.txt" msgstr "http://cinderstats.ivehearditbothways.com/cireport.txt" msgid "https://bugs.launchpad.net/os-brick/+bugs?field.tag=nvme" msgstr "https://bugs.launchpad.net/os-brick/+bugs?field.tag=nvme" msgid "iSCSI driver: 32" msgstr "iSCSI driver: 32" msgid "iSCSI driver: ``HBSD-{host}-{ip}``" msgstr "iSCSI driver: ``HBSD-{host}-{ip}``" msgid "iSCSI driver: ``{ip}``" msgstr "iSCSI driver: ``{ip}``" msgid "" "if a ``snapshot_id`` is supplied in the request, the volume type is inferred " "from the volume type associated with the snapshot" msgstr "" "if a ``snapshot_id`` is supplied in the request, the volume type is inferred " "from the volume type associated with the snapshot" msgid "" "if a ``source_volid`` is supplied in the request, the volume type is " "inferred from the source volume's volume type" msgstr "" "if a ``source_volid`` is supplied in the request, the volume type is " "inferred from the source volume's volume type" msgid "" "if an ``imageRef`` is supplied in the request, and the image has a " "``cinder_img_volume_type`` image property, the volume type is inferred from " "the value of that image property" msgstr "" "if an ``imageRef`` is supplied in the request, and the image has a " "``cinder_img_volume_type`` image property, the volume type is inferred from " "the value of that image property" msgid "manage_existing" msgstr "manage_existing" msgid "manage_existing_snapshot" msgstr "manage_existing_snapshot" msgid "migrate_volume" msgstr "migrate_volume" msgid "nova-compute version - needs to be the latest for Pike." msgstr "nova-compute version - needs to be the latest for Pike." msgid "" "nvmeof target `bug #1966513 `_: Fixed LVM failing on terminate_connection if the connecting " "host doesn't have an iSCSI initiator name setup, for example if LVM is using " "the nvmet target." msgstr "" "nvmeof target `bug #1966513 `_: Fixed LVM failing on terminate_connection if the connecting " "host doesn't have an iSCSI initiator name setup, for example if LVM is using " "the nvmet target." msgid "" "nvmet target driver: Added support for shared subsystems/targets using the " "``lvm_share_target`` configuration option. Defaults to non shared, e.g., " "each volume has its own subsystem/target." msgstr "" "nvmet target driver: Added support for shared subsystems/targets using the " "``lvm_share_target`` configuration option. Defaults to non shared, e.g., " "each volume has its own subsystem/target." msgid "" "nvmet target driver: Added support to serve volumes on multiple addresses " "using the ``target_secondary_ip_addresses`` configuration option. This " "allows os-brick to iterate through them in search of one connection that " "works, and once os-brick supports NVMe-oF multipathing it will be " "automatically supported." msgstr "" "nvmet target driver: Added support to serve volumes on multiple addresses " "using the ``target_secondary_ip_addresses`` configuration option. This " "allows os-brick to iterate through them in search of one connection that " "works, and once os-brick supports NVMe-oF multipathing it will be " "automatically supported." msgid "" "only iscsi and fibre channel volume types are supported on the nova side " "currently." msgstr "" "only iSCSI and fibre channel volume types are supported on the Nova side " "currently." msgid "only the libvirt compute driver supports this currently." msgstr "only the libvirt compute driver supports this currently." msgid "" "os-brick file lock location can be specified independently of the Cinder " "service lock location using ``lock_path`` in the ``[os_brick]`` " "configuration section. Useful for HCI deployments and when running Cinder " "and Glance with Cinder backend on the same host." msgstr "" "os-brick file lock location can be specified independently of the Cinder " "service lock location using ``lock_path`` in the ``[os_brick]`` " "configuration section. Useful for HCI deployments and when running Cinder " "and Glance with the Cinder backend on the same host." msgid "retype_volume" msgstr "retype_volume" msgid "set bootable: \"POST /v3/{project_id}/volumes/{volume_id}/action\"" msgstr "set bootable: \"POST /v3/{project_id}/volumes/{volume_id}/action\"" msgid "" "upload-to-image using Image API v2 now correctly handles custom image " "properties." msgstr "" "upload-to-image using Image API v2 now correctly handles custom image " "properties." msgid "" "use oslo_middleware.sizelimit rather than cinder.api.middleware.sizelimit " "compatibility shim" msgstr "" "use oslo_middleware.sizelimit rather than cinder.api.middleware.sizelimit " "compatibility shim" msgid "" "vSphere 6.7 added support for vStorageObject snapshots. The " "VMwareVStorageObjectDriver is updated to use VStorageObject snapshots for " "volume snapshots." msgstr "" "vSphere 6.7 added support for vStorageObject snapshots. The " "VMwareVStorageObjectDriver is updated to use VStorageObject snapshots for " "volume snapshots." msgid "" "volume readonly update: \"POST /v3/{project_id}/volumes/{volume_id}/action\"" msgstr "" "volume readonly update: \"POST /v3/{project_id}/volumes/{volume_id}/action\"" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8951166 cinder-27.0.0/releasenotes/source/locale/ja/0000775000175000017500000000000000000000000020711 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8231254 cinder-27.0.0/releasenotes/source/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000022476 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po0000664000175000017500000014565100000000000025543 0ustar00zuulzuul00000000000000# Akihiro Motoki , 2016. #zanata # Yoshiki Eguchi , 2016. #zanata # Hidekazu Nakamura , 2017. #zanata # Yuko Fukuda , 2017. #zanata # Shu Muto , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: Cinder Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2025-07-07 22:33+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-02-16 09:09+0000\n" "Last-Translator: Shu Muto \n" "Language-Team: Japanese\n" "Language: ja\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "" "\"volume_extension:types_extra_specs:create\": \"rule:admin or rule:" "type_admin\", \"volume_extension:types_extra_specs:delete\": \"rule:admin or " "rule:type_admin\", \"volume_extension:types_extra_specs:index\": \"\", " "\"volume_extension:types_extra_specs:show\": \"rule:admin or rule:type_admin " "or rule:type_viewer\", \"volume_extension:types_extra_specs:update\": \"rule:" "admin or rule:type_admin\"" msgstr "" "\"volume_extension:types_extra_specs:create\": \"rule:admin or rule:" "type_admin\", \"volume_extension:types_extra_specs:delete\": \"rule:admin or " "rule:type_admin\", \"volume_extension:types_extra_specs:index\": \"\", " "\"volume_extension:types_extra_specs:show\": \"rule:admin or rule:type_admin " "or rule:type_viewer\", \"volume_extension:types_extra_specs:update\": \"rule:" "admin or rule:type_admin\"" msgid "10.0.0" msgstr "10.0.0" msgid "10.0.1" msgstr "10.0.1" msgid "10.0.3" msgstr "10.0.3" msgid "10.0.4" msgstr "10.0.4" msgid "10.0.5" msgstr "10.0.5" msgid "11.0.0" msgstr "11.0.0" msgid "11.0.1" msgstr "11.0.1" msgid "11.0.2" msgstr "11.0.2" msgid "" "3PAR driver creates FC VLUN of match-set type instead of host sees. With " "match-set, the host will see the virtual volume on specified NSP (Node-Slot-" "Port). This change in vlun type fixes bug 1577993." msgstr "" "3PAR ドライバーはホストが見る代わりに match-set タイプの FC VLUN を作成しま" "す。 match-set でホストは指定された NSP(Node-Slot-Port)上の仮想ボリュームを" "見ます。これはバグ 1577993 の vlun type に関する修正です。" msgid "7.0.1" msgstr "7.0.1" msgid "7.0.2" msgstr "7.0.2" msgid "7.0.3" msgstr "7.0.3" msgid "8.0.0" msgstr "8.0.0" msgid "8.1.0" msgstr "8.1.0" msgid "8.1.1" msgstr "8.1.1" msgid "8.1.1-11" msgstr "8.1.1-11" msgid "9.0.0" msgstr "9.0.0" msgid "9.1.0" msgstr "9.1.0" msgid "9.1.1" msgstr "9.1.1" msgid "9.1.2" msgstr "9.1.2" msgid "" "A bug in the Quobyte driver was fixed that prevented backing up volumes and " "snapshots" msgstr "" "ボリュームのバックアップとスナップショットの取得を阻止していたQuobyteドライバ" "のバグが修正されました。" msgid "" "A new API to display the volumes summary. This summary API displays the " "total number of volumes and total volume's size in GB." msgstr "" "ボリュームサマリを表示する新しい API。このサマリ API はボリューム数とボリュー" "ムサイズの合計(GB)を表示します。" msgid "" "Add 'LUNType' configuration verification for Huawei driver when connecting " "to Dorado array. Because Dorado array only supports 'Thin' lun type, so " "'LUNType' only can be configured as 'Thin', any other type is invalid and if " "'LUNType' not explicitly configured, by default use 'Thin' for Dorado array." msgstr "" "Huawei ドライバーにDorado アレイへの接続時の 'LUNType' 設定検証を追加しまし" "た。Dorado アレイは 'Thin' lun タイプのみサポートしているため、'LUNType' の" "み 'Thin' に設定し、その他のタイプは無効であり、'LUNType' が明示的に設定され" "ていない場合はデフォルトで 'Thin' を Dorado アレイに使用します。" msgid "" "Add 'display_name' and 'display_description' validation for creating/" "updating snapshot and volume operations." msgstr "" "スナップショットの作成/更新とボリューム操作に 'display_name' と " "'display_description' の検証を追加しました。" msgid "Add CG capability to generic volume groups in Huawei driver." msgstr "" "Huaweiドライバーで 一般的なボリュームグループにコンシステンシーグループ能力を" "追加しました。" msgid "Add CG capability to generic volume groups in INFINIDAT driver." msgstr "" "INFINIDAT ドライバーで 一般的なボリュームグループにコンシステンシーグループ能" "力を追加しました。" msgid "" "Add Support for QoS in the Nimble Storage driver. QoS is available from " "Nimble OS release 4.x and above." msgstr "" " Nimble ストレージドライバーに QoS サポートを追加しました。QoS は Nimble OS " "release 4.x とそれ以降で利用可能。" msgid "Add Support for deduplication of volumes in the Nimble Storage driver." msgstr "" "Nimble ストレージドライバーでボリュームの重複排除サポートを追加しました。" msgid "Add ``admin_or_storage_type_admin`` rule to ``policy.json``, e.g." msgstr "" "``policy.json`` に ``admin_or_storage_type_admin`` を追加しました。 例:" msgid "" "Add ``all_tenants``, ``project_id`` support in attachment list&detail APIs." msgstr "" "接続リストと詳細の API に ``all_tenants`` と ``project_id`` のサポートを追加" "しました。" msgid "" "Add ``all_tenants``, ``project_id`` support in the attachment list and " "detail APIs." msgstr "" "接続リストと詳細の API に ``all_tenants`` と ``project_id`` のサポートを追加" "しました。" msgid "Add ``storage_type_admin`` role." msgstr "``storage_type_admin`` ロールを追加しました。" msgid "Add ``user_id`` field to snapshot list/detail and snapshot show." msgstr "" "snapshot list/detail および snapshot show に ``user_id`` フィールドを追加しま" "した。" msgid "Add ``volume-type`` filter to API Get-Pools" msgstr "Get-Pools API に ``volume-type`` フィルターを追加しました。" msgid "" "Add ability to enable multi-initiator support to allow live migration in the " "Nimble backend driver." msgstr "" "Nimble バックエンドドライバーで、ライブマイグレーションでのマルチイニシエー" "ターのサポートを可能にする機能を追加しました。" msgid "" "Add ability to extend ``in-use`` volume. User should be aware of the whole " "environment before using this feature because it's dependent on several " "external factors below:" msgstr "" "``使用中``ボリュームを拡張できるようになりました。以下のいくつかの外部要因に" "依存しているため、この機能を使用する前に環境全体を認識しておく必要がありま" "す。" msgid "Add ability to specify backup driver via class name." msgstr "クラス名でバックアップドライバーを指定できるようになりました。" msgid "Add backup snapshots support for Storwize/SVC driver." msgstr "" "Storwize/SVC ドライバーにスナップショットバックアップのサポートを追加しまし" "た。" msgid "Add chap authentication support for the vmax backend." msgstr "VMAX バックエンドに chap 認証のサポートを追加しました。" msgid "" "Add consistency group capability to Generic Volume Groups in the Dell EMC SC " "driver." msgstr "" "Dell EMC SC ドライバに汎用Volume Groupに対する整合性グループ機能が追加されま" "した。" msgid "" "Add consistency group capability to generic volume groups in Storwize " "drivers." msgstr "" "Storwize ドライバーの一般的なボリュームグループにコンシステンシーグループ能力" "を追加しました。" msgid "Add consistency group replication support in XIV\\A9000 Cinder driver." msgstr "" "XIV\\A9000 Cinder ドライバーでコンシステンシーグループレプリケーションをサ" "ポートしました。" msgid "" "Add consistent group capability to generic volume groups in CoprHD driver." msgstr "" "CoprHD ドライバーの一般的なボリュームグループにコンシステンシーグループ能力を" "追加しました。" msgid "" "Add consistent group capability to generic volume groups in Lefthand driver." msgstr "" "Lefthandドライバに汎用Volume Groupに対する整合性グループ機能が追加されまし" "た。" msgid "" "Add consistent group capability to generic volume groups in Pure drivers." msgstr "" "Pure ドライバで 一般的なボリュームグループにコンシステンシーグループ能力を追" "加しました。" msgid "Add consistent group capability to generic volume groups in VNX driver." msgstr "" "VNX ドライバーで 一般的なボリュームグループにコンシステンシーグループ能力を追" "加しました。" msgid "" "Add consistent group capability to generic volume groups in XIV, Spectrum " "Accelerate and A9000/R storage systems." msgstr "" "XIV, Spectrum Accelerate, A9000/R ストレージシステムに汎用ボリュームグループ" "に整合性グループ機能が追加されました。" msgid "" "Add consistent group capability to generic volume groups in the SolidFire " "driver." msgstr "" " SolidFire ドライバーの一般的なボリュームグループにコンシステンシーグループ能" "力を追加しました。" msgid "" "Add consistent group capability to generic volume groups in the XtremIO " "driver." msgstr "" " XtremIO ドライバーで 一般的なボリュームグループにコンシステンシーグループ能" "力を追加しました。" msgid "" "Add consistent group snapshot support to generic volume groups in VMAX " "driver version 3.0." msgstr "" " VMAX ドライバーの一般的なボリュームグループにコンシステンシーグループスナッ" "プショットのサポートを追加しました。" msgid "" "Add consistent replication group support in Dell EMC VMAX cinder driver." msgstr "" "Dell EMC VMAX Cinder ドライバーに、 整合レプリケーショングループのサポートを" "追加しました。" msgid "Add consistent replication group support in Storwize Cinder driver." msgstr "" "Storwize Cinder ドライバーに、 整合レプリケーショングループのサポートを追加し" "ました。" msgid "Add consistent replication group support in VNX cinder driver." msgstr "" "VNX Cinder ドライバーに、 整合レプリケーショングループのサポートを追加しまし" "た。" msgid "" "Add enhanced support to the QNAP Cinder driver, including 'CHAP', 'Thin " "Provision', 'SSD Cache', 'Dedup' and 'Compression'." msgstr "" "Add enhanced support to the QNAP Cinder driver, including 'CHAP'、'Thin " "Provision'、'SSD Cache'、'Dedup' および 'Compression' を含む、サポート強化を " "QNAP Cinder ドライバーに追加しました。" msgid "Add filter, sorter and pagination support in group snapshot listings." msgstr "" "グループスナップショットの一覧にフィルター、ソート、ページ送りのサポートを追" "加しました。" msgid "Add filters support to get_pools API v3.28." msgstr "get_pools API v3.28 でフィルタサポートを追加しました。" msgid "" "Add get_manageable_volumes and get_manageable_snapshots implementations for " "Pure Storage Volume Drivers." msgstr "" "Pure Storage ボリュームドライバーに get_manageable_volumes と " "get_manageable_snapshots 実装を追加しました。" msgid "" "Add global mirror with change volumes(gmcv) support and user can manage gmcv " "replication volume by SVC driver. An example to set a gmcv replication " "volume type, set property replication_type as \" gmcv\", property " "replication_enabled as \" True\" and set property drivers:" "cycle_period_seconds as 500." msgstr "" "ボリューム変更を伴うグローバルミラー (gmcv) のサポートを追加し、SVC ドライ" "バーで gmcv レプリケーションボリュームを管理できるようになりました。gmcv レプ" "リケーションボリュームタイプを設定する一つの例として、プロパティー " "replication_type を \" gmcv\" に設定し、プロパティー replication_enabled " "を \" True\" に設定し、プロパティー driver:cycle_period_seconds を 500 に" "設定します。" msgid "Add mirrored volume support in IBM SVC/Storwize driver." msgstr "" "IBM SVC/Storwize ドライバーにミラーされたボリュームのサポートを追加しました。" msgid "Add multipath enhancement to Storwize iSCSI driver." msgstr "Storwize SVC ドライバーにマルチパスのサポートを追加しました。" msgid "Add provider_id in the detailed view of a volume for admin." msgstr "管理者向けにボリューム詳細表示に provider_id を追加しました。" msgid "Add support for hybrid aggregates to the NetApp cDOT drivers." msgstr "" "NetApp cDOT ドライバーに ハイブリッドアグリゲートのサポートを追加しました。" msgid "Add support for reporting pool disk type in Huawei driver." msgstr "" " Huawei ドライバーにプールディスクタイプをレポートするサポートを追加しまし" "た。" msgid "Add support to backup volume using snapshot in the Unity driver." msgstr "" "Unity ドライバにスナップショットを使うボリュームバックアップのサポートを追加" "しました。" msgid "Add support to configure IO ports option in Dell EMC Unity driver." msgstr "" "Dell EMC Unity ドライバーに IO ポートオプション設定のサポートを追加しました。" msgid "Add v2.1 volume replication support in VMAX driver." msgstr " VMAXドライバーに、 v2.1 レプリケーションのサポートを追加しました。" msgid "" "Added Cheesecake (v2.1) replication support to the Pure Storage Volume " "drivers." msgstr "" "Pure Storage ボリュームドライバーに Cheesecake (v2.1) レプリケーションのサ" "ポートを追加しました。" msgid "Added Cinder consistency group for the NetApp NFS driver." msgstr "NetApp NFS ドライバー用に整合性グループのサポートを追加しました。" msgid "Added Consistency Group support in ScaleIO driver." msgstr "Scale IO ドライバーに整合性グループのサポートを追加しました。" msgid "Added Datera EDF API 2.1 support." msgstr "Datera EDF API 2.1 サポートを追加しました。" msgid "Added Datera Multi-Tenancy Support." msgstr "Datera マルチテナントサポートを追加しました。" msgid "Added Datera Template Support." msgstr "Datera テンプレートサポートを追加しました。" msgid "Added HA support for NexentaEdge iSCSI driver" msgstr " NexentaEdge iSCSI ドライバに HA サポートを追加しました。" msgid "Added Keystone v3 support for Swift backup driver in single user mode." msgstr "" "シングルユーザーモードで、Swift バックアップドライバー用の Keystone v3 のサ" "ポートを追加しました。" msgid "Added Migrate and Extend for Nexenta NFS driver." msgstr "Nexenta NFS ドライバーにマイグレーションと拡張の機能を追加しました。" msgid "Added NBD driver for NexentaEdge." msgstr "NexentaEdge 用 NBD ドライバーを追加しました。" msgid "Added Nimble Storage Fibre Channel backend driver." msgstr "" "Nimble Storage ファイバーチャネルバックエンドドライバーを追加しました。" msgid "Added QoS support in ScaleIO driver." msgstr "Scale IO ドライバーに QoS サポートを追加しました。" msgid "Added REST API to update backup name and description." msgstr "バックアップ名と説明を更新する REST API を追加しました。" msgid "" "Added RPC backward compatibility layer similar to the one implemented in " "Nova. This means that Cinder services can be upgraded one-by-one without " "breakage. After all the services are upgraded SIGHUP signals should be " "issued to all the services to signal them to reload cached minimum RPC " "versions. Alternative is of course restart of them. Please note that cinder-" "api service doesn't support SIGHUP yet. Please also take into account that " "all the rolling upgrades capabilities are considered tech preview, as we " "don't have a CI testing it yet." msgstr "" "Novaに実装されているものと近い、RPC 後方互換性レイヤーを追加しました。これ" "は、 Cinder サービスを1つずつ、破損させずにアップグレードできることを意味し" "ます。全てのサービスをアップグレードした後、 キャッシュされた最小のRPCバー" "ジョンを再読み込みすることを知らせるために、 SIGHUP シグナルを全てのサービス" "に送信します。代替手段は、もちろん全てのサービスを再起動トすることです。" "cinder-api サービスではまだ SIGHUP をサポートしていないことに注意してくださ" "い。また、まだこの機能のCIテストは行われていないため、すべてのローリングアッ" "プグレード機能がテクニカルプレビューであることを考慮してください。" msgid "Added Retype functionality to Nexenta iSCSI and NFS drivers." msgstr "" "Nexenta iSCSI ドライバーおよび NFS ドライバーにタイプ変更機能を追加しました。" msgid "" "Added a new config option `scheduler_weight_handler`. This is a global " "option which specifies how the scheduler should choose from a listed of " "weighted pools. By default the existing weigher is used which always chooses " "the highest weight." msgstr "" "新しい設定オプション `scheduler_weight_handler` を追加しました。これはグロー" "バルオプションで、どのようにスケジューラが重みづけされたプールのリストから選" "択するかを指定します。デフォルトでは今ある weigher が使われます。これは常に最" "高の weight を選びます。" msgid "" "Added a new weight handler `StochasticHostWeightHandler`. This weight " "handler chooses pools randomly, where the random probabilities are " "proportional to the weights, so higher weighted pools are chosen more " "frequently, but not all the time. This weight handler spreads new shares " "across available pools more fairly." msgstr "" "新しいウェイトハンドラ― `StochasticHostWeightHandler` を追加しました。この" "ウェイトハンドラ―はランダムにプールを選びます。ランダム性は重みに比例しますの" "で、より高く重みづけられたプールはより頻繁に選ばれますが、いつもそうではあり" "ません。このウェイトハンドラはより公平に使用可能なプール間で新しく分け合うこ" "とを広めます。" msgid "Added ability to backup snapshots." msgstr "バックアップスナップショットを可能にする機能を追加しました。" msgid "Added ability to query backups by project ID." msgstr "プロジェクト ID によりバックアップを検索できる機能を追加しました。" msgid "" "Added additional metrics reported to the scheduler for Pure Volume Drivers " "for better filtering and weighing functions." msgstr "" "Pure ボリュームドライバのフィルタリング機能と重みづけ機能の向上のため、スケ" "ジューラーにレポートされる指標を追加しました。" msgid "Added backend FC and iSCSI drivers for NEC Storage." msgstr "NEC ストレージ用に FC と iSCSI バックエンドドライバーを追加しました。" msgid "Added backend ISCSI driver for Reduxio." msgstr "Reduxio用バックエンド ISCSIドライバーを追加しました。" msgid "Added backend driver for Coho Data storage." msgstr "Coho Data ストレージ用バックエンドドライバーを追加しました。" msgid "Added backend driver for DISCO storage." msgstr "Discoストレージ用バックエンドドライバーを追加しました。" msgid "Added backend driver for Dell EMC Unity storage." msgstr " Dell EMC Unity ストレージ用バックエンドドライバーを追加しました。" msgid "Added backend driver for FalconStor FreeStor." msgstr " FalconStor FreeStor 用バックエンドドライバーを追加しました。" msgid "Added backend driver for Fujitsu ETERNUS DX (FC)." msgstr " Fujitsu ETERNUS DX 用バックエンドドライバー (FC) を追加 しました。" msgid "Added backend driver for Fujitsu ETERNUS DX (iSCSI)." msgstr "Fujitsu ETERNUS DX 用バックエンドドライバー (iSCSI) を追加しました。" msgid "Added backend driver for Huawei FusionStorage." msgstr "Huawei FusionStorage用バックエンドドライバーを追加しました。" msgid "Added backend driver for Nexenta Edge iSCSI storage." msgstr "Nexenta Edge iSCSI ストレージ用バックエンドドライバーを追加しました。" msgid "Added backend driver for NexentaStor5 NFS storage." msgstr "NexentaStor5 NFS ストレージ用バックエンドドライバーを追加しました。" msgid "Added backend driver for NexentaStor5 iSCSI storage." msgstr "NexentaStor5 iSCSI ストレージ用バックエンドドライバーを追加しました。" msgid "Added backend driver for Synology iSCSI-supported storage." msgstr "" "Synology iSCSI-supported ストレージ用バックエンドドライバーを追加しました。" msgid "Added backend driver for Violin Memory 7000 iscsi storage." msgstr "" "Violin Memory 7000 iscsi ストレージ用バックエンドドライバーを追加しました。" msgid "Added backend driver for ZTE iSCSI storage." msgstr "ZTE iSCSI ストレージ用バックエンドドライバーを追加しました。" msgid "Added cinder backup driver for Google Cloud Storage." msgstr "Google Cloud ストレージ用 バックアップドライバーを追加しました。" msgid "" "Added config option ``vmware_connection_pool_size`` in the VMware VMDK " "driver to specify the maximum number of connections (to vCenter) in the http " "connection pool." msgstr "" "VMware VMDK ドライバに ``vmware_connection_pool_size`` 設定オプションを追加し" "て、http sつ属プールの(vCenterへの)最大接続数を指定します。" msgid "" "Added config option to enable/disable automatically calculation an over-" "subscription ratio max for Pure Volume Drivers. When disabled the drivers " "will now respect the max_oversubscription_ratio config option." msgstr "" "Pure ボリュームドライバー用に、オーバーサブスクリプション比率の最大値の自動計" "算を有効/無効にする設定オプションを追加しました。ドライバーを無効にした場合、" "設定オプション max_oversubscription_ratio が優先されます。" msgid "" "Added consistency group support to generic volume groups in ScaleIO Driver." msgstr "" "ScaleIOドライバの汎用ボリュームグループに整合性グループのサポートを追加しまし" "た。" msgid "Added consistency group support to the Huawei driver." msgstr "Huawei ドライバー用に整合性グループのサポートを追加しました。" msgid "" "Added create/delete APIs for group snapshots and an API to create group from " "source." msgstr "" "グループスナップショットの作成/削除 API と、ソースからグループを作成する API " "を追加しました。" msgid "Added driver for Tegile IntelliFlash arrays." msgstr "Tegile IntelliFlash アレイ用ドライバーを追加しました。" msgid "Added driver for the InfiniBox storage array." msgstr "InfiniBox ストレージアレイ 用ドライバーを追加しました。" msgid "Added extend method to NFS driver for NexentaStor 5." msgstr "NexentaStor 5 用 NFS ドライバーへ拡張メソッドを追加しました。" msgid "Added group type and group specs APIs." msgstr "グループタイプと group specs API を追加しました。 " msgid "" "Added host-level (whole back end replication - v2.1) replication support to " "the NetApp cDOT drivers (iSCSI, FC, NFS)." msgstr "" "NetApp cDOT ドライバー(iSCSI、FC、NFS)用にホストレベル(全体バックエンドレ" "プリケーション v2.1)レプリケーションサポートを追加しました。" msgid "Added iSCSI CHAP uni-directional authentication for NetApp drivers." msgstr "NetAPp ドライバー用に iSCSI CHAP の一方向認証機能を追加しました。" msgid "Added manage/unmanage snapshot support for Huawei drivers." msgstr "" "Huawei ドライバー用にスナップショットの管理/管理解除機能のサポートを追加しま" "した。" msgid "Added manage/unmanage snapshot support to the HNAS NFS driver." msgstr "" "HNAS NFS ドライバー用にスナップショットの管理/管理解除機能のサポートを追加し" "ました。" msgid "Added manage/unmanage volume support for Dell Equallogic driver." msgstr "" "Dell Equallogic ドライバー用にボリュームの管理/管理解除機能のサポートを追加し" "ました。" msgid "Added manage/unmanage volume support for Huawei drivers." msgstr "" "Huawei ドライバー用にボリュームの管理/管理解除機能のサポートを追加しました。" msgid "Added multiple management IP support to Storwize SVC driver." msgstr "Storwize SVC ドライバー用に複数の管理IPのサポートを追加しました。" msgid "Added multiple pools support to Storwize SVC driver." msgstr "Storwize SVC ドライバー用に複数プールのサポートを追加しました。" msgid "" "Added new BoolOpt ``backup_ceph_image_journals`` for enabling the Ceph image " "features required to support RBD mirroring of Cinder backup pool." msgstr "" "Ceph イメージ機能を有効とするために新しい BoolOpt " "``backup_ceph_image_journals`` を追加しました。これは Cinder バックアッププー" "ルの RBD ミラーリングをサポートするために必要です。" msgid "" "Added new Hitachi VSP FC Driver. The VSP driver supports all Hitachi VSP " "Family and HUSVM." msgstr "" "新しい Hitachi VSP FC ドライバを追加しました。VSP ドライバはすべての Hitachi " "VSP ファミリーと HUSVM をサポートします。" msgid "Added oversubscription support in the VMAX driver" msgstr "" "VMAX ドライバーに オーバーサブスクリプションの サポートを追加しました。" msgid "Added replication failback support for the Dell SC driver." msgstr "" "Dell SC ドライバー用にレプリケーションフェイルバックのサポートを追加しまし" "た。" msgid "Added replication v2.1 support to the Dell Storage Center drivers." msgstr "" "Dell Storage Center ドライバー用に、レプリケーション V2.1 のサポートを追加し" "ました。" msgid "Added replication v2.1 support to the IBM Storwize driver." msgstr "" "IBM Storwize ドライバー用に、レプリケーション V2.1 のサポートを追加しました。" msgid "Added replication v2.1 support to the IBM XIV/DS8K driver." msgstr "" "IBM XIV/DS8K ドライバー用に、レプリケーション V2.1 のサポートを追加しました。" msgid "Added reset status API to generic volume group." msgstr "一般的なボリュームグループに reset status API を追加しました。" msgid "Added reset status API to group snapshot." msgstr "グループスナップショットに reset status API を追加しました。" msgid "Added snapshot manage/unmanage support to the EMC XtremIO driver." msgstr "" "EMC XtremIO ドライバー用にスナップショットの管理/管理解除機能のサポートを追加" "しました。" msgid "Added snapshot manage/unmanage support to the HPE 3PAR driver." msgstr "" "HPE 3PAR ドライバー用にスナップショットの管理/管理解除機能のサポートを追加し" "ました。" msgid "Added snapshot manage/unmanage support to the HPE LeftHand driver." msgstr "" "HPE LeftHand ドライバー用にスナップショットの管理/管理解除機能のサポートを追" "加しました。" msgid "Added support for API microversions, as well as /v3 API endpoint." msgstr "" "API マイクロバージョンおよび /v3 API エンドポイントのサポートを追加しました。" msgid "Added support for ZMQ messaging layer in multibackend configuration." msgstr "" "マルチバックエンド設定において、 Zero MQ メッセージングドライバーのサポートを" "追加しました。" msgid "" "Added support for ZeroMQ messaging driver in cinder single backend config." msgstr "" "cinder の単一のバックエンド設定において、 Zero MQ メッセージングドライバーの" "サポートを追加しました。" msgid "" "Added support for creating a consistency group from a source consistency " "group in the HPE 3PAR driver." msgstr "" "HPE 3PAR ドライバーに、ソースの整合性グループから整合性グループを作成する機能" "を追加しました。" msgid "" "Added support for creating, deleting, and updating consistency groups for " "NetApp 7mode and CDOT backends." msgstr "" "NetApp 7mode と CDOT バックエンドに、整合性グループの追加、削除、更新機能のサ" "ポートを追加しました。" msgid "" "Added support for images with vmware_adaptertype set to paraVirtual in the " "VMDK driver." msgstr "" "VMDK ドライバーに、準仮想化のため vmware_adaptertype を設定したイメージのサ" "ポートを追加しました。" msgid "Added support for manage volume in the VMware VMDK driver." msgstr "" "VMware VMDK ドライバーに、ボリュームの管理機能のサポートを追加しました。" msgid "Added support for manage/unmanage snapshot in the ScaleIO driver." msgstr "" "ScaleIO ドライバー用にスナップショットの管理/管理解除機能のサポートを追加しま" "した。" msgid "Added support for manage/unmanage volume in the ScaleIO driver." msgstr "" "ScaleIO ドライバー用にボリュームの管理/管理解除機能のサポートを追加しました。" msgid "" "Added support for oversubscription in thin provisioning in the ScaleIO " "driver. Volumes should have extra_specs with the key provisioning:type with " "value equals to either 'thick' or 'thin'. max_oversubscription_ratio can be " "defined by the global config or for ScaleIO specific with the config option " "sio_max_over_subscription_ratio. The maximum oversubscription ratio " "supported at the moment is 10.0." msgstr "" "ScaleIO ドライバに、シンプロビジョニングのオーバーサブスクリプションのサポー" "トを追加しました。ボリュームは extra_specs に 鍵 provisioning:type、値が " "'thick' または 'thin' とするべきです。max_oversubscription_ratio をグローバ" "ルオプションとして定義するか、ScaleIO 固有の設定オプション " "sio_max_over_subscription_ratio が定義できます。最大オーバーサブスクリプショ" "ン比率は 10.0 がサポートされます。" msgid "" "Added support for querying volumes filtered by glance metadata key/value " "using 'glance_metadata' optional URL parameter. For example, \"volumes/" "detail?glance_metadata={\"image_name\":\"xxx\"}\"." msgstr "" "glance メタデータの項目である'glance_metadata' とオプションのURLパラメータ" "を キー/値 として、ボリュームをフィルターするクエリーを作成する機能のサポート" "を追加しました。例: \"volumes/detail?glance_metadata={\"image_name\":" "\"xxx\"}\"" msgid "" "Added support for scaling QoS in the ScaleIO driver. The new QoS keys are " "maxIOPSperGB and maxBWSperGB." msgstr "" "ScaleIO ドライバー用にスケーリング QoS 機能のサポートを追加しました。新しい " "QoS キーは maxIOPSperGB と maxBWSperGB です。" msgid "" "Added support for taking, deleting, and restoring a cgsnapshot for NetApp " "7mode and CDOT backends." msgstr "" "NetApp 7mode と CDOT バックエンドに、cgsnapshot の取得、削除、リストア機能の" "サポートを追加しました。" msgid "" "Added the ability to list manageable volumes and snapshots via GET operation " "on the /v2//os-volume-manage and /v2//os-snapshot-" "manage URLs, respectively." msgstr "" "GET オペレーションにより、管理可能なボリュームとスナップショットのリストを表" "示を行う機能を追加しました。URLはそれぞれ、 /v2//os-volume-" "manage と /v2//os-snapshot-manage です。" msgid "" "Added the options ``visibility`` and ``protected`` to the os-" "volume_upload_image REST API call." msgstr "" "os-volume_upload_image REST API コールに、 ``visibility`` と ``protected`` オ" "プションを追加しました。" msgid "Added v2.1 replication support in Huawei Cinder driver." msgstr "" "Huawei Cinder ドライバーに、 v2.1 レプリケーションのサポートを追加しました。" msgid "Added v2.1 replication support to SolidFire driver." msgstr "" "SolidFire ドライバーに、 v2.1 レプリケーションのサポートを追加しました。" msgid "Added v2.1 replication support to the HPE 3PAR driver." msgstr "" "HPE 3PAR ドライバーに、 v2.1 レプリケーションのサポートを追加しました。" msgid "Added v2.1 replication support to the HPE LeftHand driver." msgstr "" "HPE LeftHand ドライバーに、 v2.1 レプリケーションのサポートを追加しました。" msgid "Added volume backend drivers for CoprHD FC, iSCSI and Scaleio." msgstr "" "CoprHD FC / iSCSI と Scaleio 用ボリュームバックエンドドライバーを追加しまし" "た。" msgid "Added volume driver for Zadara Storage VPSA." msgstr "Zadara Storage VPSA 用ボリュームバックエンドドライバーを追加しました。" msgid "" "Adding or removing volume_type_access from any project during DB migration " "62 must not be performed." msgstr "" "DB マイグレーションを行っているプロジェクトでは、 volume_type_access の追加/" "削除は行われません。" msgid "Adds v2.1 replication support in VNX Cinder driver." msgstr "" "VNX Cinder ドライバーに、 v2.1 レプリケーションのサポートを追加しました。" msgid "" "All Datera DataFabric backed volume-types will now use API version 2 with " "Datera DataFabric" msgstr "" "全ての Datera DataFabric を背後に持つボリュームタイプは、 Datera DataFabric " "との通信に API バージョン 2 を利用するようになりました。" msgid "" "Allow API user to remove the consistency group name or description " "information." msgstr "" "API ユーザーが整合性グループの名前や説明を削除することが許容されるようになり" "ました。" msgid "" "Allow for eradicating Pure Storage volumes, snapshots, and pgroups when " "deleting their Cinder counterpart." msgstr "" "Pure Storage のボリューム、スナップショット、 pgroup を、 cincer カウンター" "パートが削除された際に一括で削除できるようになりました。" msgid "Allow spaces when managing existing volumes with the HNAS iSCSI driver." msgstr "" "HNAS iSCSI ドライバーで既存のボリュームを管理する際、スペースが許容されるよう" "になりました。" msgid "" "An error has been corrected in the EMC ScaleIO driver that had caused all " "volumes to be provisioned at 'thick' even if user had specificed 'thin'." msgstr "" "EMC ScaleIO において、プロビジョニングで 'thin' を指定した場合でもすべてのボ" "リュームが 'thick' としてプロビジョニングされるエラーを修正しました。" msgid "" "Any Volume Drivers configured in the DEFAULT config stanza should be moved " "to their own stanza and enabled via the enabled_backends config option. The " "older style of config with DEFAULT is deprecated and will be removed in " "future releases." msgstr "" "DEFAULT config スタンザで設定されているすべてのボリュームドライバーは、自身の" "スタンザに移動させ、 enabled_backends config オプションを有効にしなければいけ" "ません。 DEFAULT を使った古いスタイルの設定は非推奨であり、今後のリリースで削" "除される予定です。" msgid "Backend driver for Scality SRB has been removed." msgstr "Scality SRB 用のバックエンドドライバーは削除されました。" msgid "Better cleanup handling in the NetApp E-Series driver." msgstr "より良いNetApp E-Series ドライバにおけるクリーンアップ処理" msgid "" "BoolOpt ``datera_acl_allow_all`` is changed to a volume type extra spec " "option-- ``DF:acl_allow_all``" msgstr "" "BoolOpt ``datera_acl_allow_all` はボリュームタイプの extra spec オプション " "``DF:acl_allow_all`` に変更されました。" msgid "Broke Datera driver up into modules." msgstr "Datera ドライバがモジュール化されました。" msgid "Bug Fixes" msgstr "バグ修正" msgid "Capabilites List for Datera Volume Drivers" msgstr "Datera ボリュームドライバ用のケイパビリティーリスト" msgid "Capacity reporting fixed with Huawei backend drivers." msgstr "" "Huawei バックエンドドライバにおけるキャパシティーレポーティングが修正されま" "した。" msgid "Changes config option default for datera_num_replicas from 1 to 3" msgstr "" "設定オプション datera_num_replicas のデフォルト値を 1 から 3 に変更しました。" msgid "Cinder Release Notes" msgstr "Cinder リリースノート" msgid "Current Series Release Notes" msgstr "開発中バージョンのリリースノート" msgid "Deprecated IBM driver _multipath_enabled config flags." msgstr "IBM driver _multipath_enabled 設定フラグは非推奨となりました。" msgid "Deprecated datera_api_version option." msgstr "datera_api_versionオプションは非推奨となりました。" msgid "" "Deprecated the configuration option ``hnas_svcX_volume_type``. Use option " "``hnas_svcX_pool_name`` to indicate the name of the services (pools)." msgstr "" "``hnas_svcX_volume_type``構成オプションは非推奨となりました。サービス(プール)" "の名前を指定する際には、``hnas_svcX_pool_name``オプションを使用してください。" msgid "" "Deprecated the configuration option ``nas_ip``. Use option ``nas_host`` to " "indicate the IP address or hostname of the NAS system." msgstr "" "``nas_ip``構成オプションは非推奨となりました。NASシステムのIPアドレスまはたホ" "スト名を指定する際には、``nas_host``オプションを使用してください。" msgid "Deprecation Notes" msgstr "廃止予定の機能" msgid "Known Issues" msgstr "既知の問題" msgid "Liberty Series Release Notes" msgstr "Liberty バージョンのリリースノート" msgid "Mitaka Series Release Notes" msgstr "Mitaka バージョンのリリースノート" msgid "New Features" msgstr "新機能" msgid "New iSCSI Cinder volume driver for Kaminario K2 all-flash arrays." msgstr "" "Kaminario K2 all-flash アレイの新しい iSCSI Cinder ボリュームドライバー" msgid "New path - cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver" msgstr "新しいパス - cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver" msgid "New path - cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver" msgstr "" "新しいパス - cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver" msgid "" "New path - cinder.volume.drivers.hpe.hpe_lefthand_iscsi." "HPELeftHandISCSIDriver" msgstr "" "新しいパス - cinder.volume.drivers.hpe.hpe_lefthand_iscsi." "HPELeftHandISCSIDriver" msgid "New path - cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver" msgstr "新しいパス - cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver" msgid "New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver" msgstr "新しいパス - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver" msgid "New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver" msgstr "" "新しいパス - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver" msgid "Newton Series Release Notes" msgstr "Newton バージョンのリリースノート" msgid "Ocata Series Release Notes" msgstr "Ocata バージョンのリリースノート" msgid "Other Notes" msgstr "その他の注意点" msgid "Pike Series Release Notes" msgstr "Pike バージョンのリリースノート" msgid "QNAP" msgstr "QNAP" msgid "QNAP Cinder driver added support for QES fw 2.0.0." msgstr "QNAP Cinder ドライバーに QES fw 2.0.0 サポートを追加しました。" msgid "QoS support in EMC VMAX iSCSI and FC drivers." msgstr "EMC VMAX iSCSI と FC ドライバーの QoS サポート" msgid "Queens Series Release Notes" msgstr "Queens バージョンのリリースノート" msgid "Re-added QNAP Cinder volume driver." msgstr "QNAP Cinder ボリュームドライバーを再追加しました。" msgid "Reduxio" msgstr "Reduxio" msgid "Removed - ``eqlx_chap_login``" msgstr "``eqlx_chap_login`` を削除しました" msgid "Removed - ``eqlx_chap_password``" msgstr "``eqlx_chap_password`` を削除しました" msgid "Removed - ``eqlx_cli_timeout``" msgstr "``eqlx_cli_timeout`` を削除しました" msgid "Removed - ``eqlx_use_chap``" msgstr "``eqlx_use_chap`` を削除しました" msgid "" "Removing cinder-all binary. Instead use the individual binaries like cinder-" "api, cinder-backup, cinder-volume, cinder-scheduler." msgstr "" "cinder-all バイナリーを削除しました。代わりに cinder-api、cinder-backup、" "cinder-volume、cinder-scheduler などの個別のバイナリーを使用してください。" msgid "" "Removing deprecated file cinder.middleware.sizelimit. In your api-paste.ini, " "replace cinder.middleware.sizelimit:RequestBodySizeLimiter.factory with " "oslo_middleware.sizelimit:RequestBodySizeLimiter.factory" msgstr "" "廃止予定だった cinder.middleware.sizelimit を削除しました。api-paste.ini の " "cinder.middleware.sizelimit:RequestBodySizeLimiter.factory を " "oslo_middleware.sizelimit:RequestBodySizeLimiter.factory に入れ替えてくださ" "い。" msgid "" "Removing the Dell EqualLogic driver's deprecated configuration options. " "Please replace old options in your cinder.conf with the new one." msgstr "" "廃止予定だった Dell EqualLogic ドライバーを削除しました。cinder.conf の古いオ" "プションを新しいものに入れ替えてください。" msgid "" "Rename Huawei18000ISCSIDriver and Huawei18000FCDriver to HuaweiISCSIDriver " "and HuaweiFCDriver." msgstr "" "Huawei18000ISCSIDriver と Huawei18000FCDriver を HuaweiISCSIDriver と " "HuaweiFCDriver に名前を変更しました。" msgid "Replaced with - ``chap_password``" msgstr "``chap_password`` に置き換えられました" msgid "Replaced with - ``chap_username``" msgstr "``chap_username`` に置き換えられました" msgid "Replaced with - ``ssh_conn_timeout``" msgstr "``ssh_conn_timeout`` に置き換えられました" msgid "Replaced with - ``use_chap_auth``" msgstr "``use_chap_auth`` に置き換えられました" msgid "Security Issues" msgstr "セキュリティー上の問題" msgid "Show CG Snapshot checks both tables." msgstr "両方のテーブルに CG スナップショットチェックを表示する。" msgid "Show CG checks both tables." msgstr "両方のテーブルに CG チェックを表示する。" msgid "Start using reno to manage release notes." msgstr "リリースノートの管理に reno を使い始めました。" msgid "" "Starting from Mitaka release Cinder is having a tech preview of rolling " "upgrades support." msgstr "" "Mitaka リリースから、Cinder はローリングアップグレードの技術プレビューをはじ" "めました。" msgid "Support Force backup of in-use cinder volumes for Nimble Storage." msgstr "" "Nimble ストレージで、使用中の Cinder ボリュームの強制バックアップをサポートし" "ました。" msgid "" "Support cinder_img_volume_type property in glance image metadata to specify " "volume type." msgstr "" "ボリュームタイプを指定するための Glance イメージメタデータの " "cinder_img_volume_type プロパティをサポートしました。" msgid "Support for Consistency Groups in the NetApp E-Series Volume Driver." msgstr "" "NetApp E-Series ボリュームドライバーで整合性グループをサポートしました。" msgid "Support for Dot Hill AssuredSAN arrays has been removed." msgstr "Dot Hill AssuredSAN アレイのサポートを削除しました。" msgid "Support for VMAX SRDF/Metro on VMAX cinder driver." msgstr "VMAX cinder ドライバーで VMAX SRDF/Metro をサポートしました。" msgid "Support for compression on VMAX All Flash in the VMAX driver." msgstr "VMAX ドライバーの VMAX All Flash で圧縮をサポートしました。" msgid "" "Support for creating a consistency group from consistency group in XtremIO." msgstr "XtremIO の整合性グループからの整合性グループの作成をサポートしました。" msgid "Support for force backup of in-use Cinder volumes in Nimble driver." msgstr "" "Nimble ドライバーで、使用中の Cinder ボリュームの強制バックアップをサポートし" "ました。" msgid "Support for iSCSI in INFINIDAT InfiniBox driver." msgstr "INFINIDAT InfiniBox ドライバーで iSCSI をサポートしました。" msgid "Support for iSCSI multipath in Huawei driver." msgstr "Huawei ドライバーで iSCSI マルチパスをサポートしました。" msgid "Support for iSCSI multipathing in EMC VMAX driver." msgstr "EMC VMAX ドライバーで iSCSI マルチパスをサポートしました。" msgid "Support for manage/ unmanage snapshots on VMAX cinder driver." msgstr "" "VMAX ドライバーでスナップショットの管理/管理解除機能をサポートしました。" msgid "Support for snapshot backup using the optimal path in Huawei driver." msgstr "" "Huawei ドライバーで最適パスを使用したスナップショットバックアップをサポートし" "ました。" msgid "Support iSCSI configuration in replication in Huawei driver." msgstr "Huawei ドライバーで複製における iSCSI 設定をサポートしました。" msgid "" "Support manage/unmanage volume and manage/unmanage snapshot functions for " "the NEC volume driver." msgstr "" "NEC ボリュームドライバーで、ボリュームとスナップショットの管理/管理解除機能の" "サポートしました。" msgid "Support to sort snapshots with \"name\"." msgstr "「名前」によるスナップショットのソートをサポートしました。" msgid "Tegile" msgstr "Tegile" msgid "Upgrade Notes" msgstr "アップグレード時の注意" msgid "Violin" msgstr "Violin" msgid "Violin Memory 6000 array series drivers are removed." msgstr "Violin Memory 6000 array シリーズのドライバーは削除されました。" msgid "Volume Manage/Unmanage support for Datera Volume Drivers" msgstr "" "Datera Volume ドライバー用にボリュームの管理/管理解除機能のサポートを追加しま" "した。" msgid "" "Volume manage/unmanage support for IBM FlashSystem FC and iSCSI drivers." msgstr "" "IBM FlashSystem FC および iSCSI ドライバー用にボリュームの管理/管理解除機能の" "サポートを追加しました。" msgid "Volume manage/unmanage support for Oracle ZFSSA iSCSI and NFS drivers." msgstr "" "Oracle ZFSSA iSCSI および NFS ドライバー用にボリュームの管理/管理解除機能のサ" "ポートを追加しました。" msgid "X-IO" msgstr "X-IO" msgid "ZTE" msgstr "ZTE" msgid "" "``\"admin_or_storage_type_admin\": \"is_admin:True or role:" "storage_type_admin\",``" msgstr "" "``\"admin_or_storage_type_admin\": \"is_admin:True or role:" "storage_type_admin\",``" msgid "" "``\"volume_extension:types_manage\": \"rule:admin_or_storage_type_admin\", " "\"volume_extension:volume_type_access:addProjectAccess\": \"rule:" "admin_or_storage_type_admin\", \"volume_extension:volume_type_access:" "removeProjectAccess\": \"rule:admin_or_storage_type_admin\",``" msgstr "" "``\"volume_extension:types_manage\": \"rule:admin_or_storage_type_admin\", " "\"volume_extension:volume_type_access:addProjectAccess\": \"rule:" "admin_or_storage_type_admin\", \"volume_extension:volume_type_access:" "removeProjectAccess\": \"rule:admin_or_storage_type_admin\",``" msgid "``choice_client``" msgstr "``choice_client``" msgid "``choice_client`` to ``disco_choice_client``" msgstr "``choice_client`` から ``disco_choice_client``" msgid "" "``cinder.keymgr.conf_key_mgr.ConfKeyManager`` still remains, but the " "``fixed_key`` configuration options should be moved to the ``[key_manager]`` " "section" msgstr "" "``cinder.keymgr.conf_key_mgr.ConfKeyManager`` は残っていますが、" "``fixed_key`` 設定オプションは ``[key_manager]`` セクションに移動する必要があ" "ります" msgid "``clone_check_timeout`` to ``disco_clone_check_timeout``" msgstr "``clone_check_timeout`` から ``disco_clone_check_timeout``" msgid "``disco_client_port``" msgstr "``disco_client_port``" msgid "``disco_client``" msgstr "``disco_client``" msgid "``disco_src_api_port``" msgstr "``disco_src_api_port``" msgid "" "``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, " "``iscsi_target_prefix`` and ``iscsi_protocol`` config options are deprecated " "in flavor of ``target_ip_address``, ``target_port``, ``target_helper``, " "``target_prefix`` and ``target_protocol`` accordingly. Old config options " "will be removed in S release." msgstr "" "``iscsi_ip_address``、``iscsi_port``、``target_helper``、" "``iscsi_target_prefix``、``iscsi_protocol`` 設定オプションは廃止予定となり、" "``target_ip_address``、``target_port``、``target_helper``、" "``target_prefix``、``target_protocol``に変更されました。古い設定オプションは " "S リリースで削除予定です。" msgid "``os-set_image_metadata``" msgstr "``os-set_image_metadata``" msgid "``os-unset_image_metadata``" msgstr "``os-unset_image_metadata``" msgid "``rest_ip``" msgstr "``rest_ip``" msgid "``rest_ip`` to ``disco_rest_ip``" msgstr "``rest_ip`` から ``disco_rest_ip``" msgid "``restore_check_timeout`` to ``disco_restore_check_timeout``" msgstr "``restore_check_timeout`` から ``disco_restore_check_timeout``" msgid "``retry_interval``" msgstr "``retry_interval``" msgid "``retry_interval`` to ``disco_retry_interval``" msgstr "``retry_interval`` から ``disco_retry_interval``" msgid "``snapshot_check_timeout`` to ``disco_snapshot_check_timeout``" msgstr "``snapshot_check_timeout`` から ``disco_snapshot_check_timeout``" msgid "``volume_name_prefix`` to ``disco_volume_name_prefix``" msgstr "``volume_name_prefix`` から ``disco_volume_name_prefix``" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/mitaka.rst0000664000175000017500000006727200000000000021076 0ustar00zuulzuul00000000000000=========================== Mitaka Series Release Notes =========================== .. _Mitaka Series Release Notes_8.1.1-11_stable_mitaka: 8.1.1-11 ======== .. _Mitaka Series Release Notes_8.1.1-11_stable_mitaka_Security Issues: Security Issues --------------- .. releasenotes/notes/apply-limits-to-qemu-img-29f722a1bf4b91f8.yaml @ b'c6adc020a67ae77e3645d4f6e80fa93b19432177' - The qemu-img tool now has resource limits applied which prevent it from using more than 1GB of address space or more than 2 seconds of CPU time. This provides protection against denial of service attacks from maliciously crafted or corrupted disk images. .. _Mitaka Series Release Notes_8.1.1_stable_mitaka: 8.1.1 ===== .. _Mitaka Series Release Notes_8.1.1_stable_mitaka_New Features: New Features ------------ .. releasenotes/notes/bdd-pools-stats-afb4398daa9248de.yaml @ b'3140f750858f0bb6e919e8673197c9d7c6b157f2' - Report pools in volume stats for Block Device Driver. .. releasenotes/notes/nimble-add-force-backup-539e1e5c72f84e61.yaml @ b'0ea086e1131fa3da284e348ee962d61470a99035' - Support Force backup of in-use cinder volumes for Nimble Storage. .. releasenotes/notes/vhd-disk-format-upload-to-image-5851f9d35f4ee447.yaml @ b'f45d02bace943eab2806233eff39ffa258ad685e' - Added support for vhd disk-format for volume upload-to-image. .. _Mitaka Series Release Notes_8.1.1_stable_mitaka_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/3par-create-fc-vlun-match-set-type-babcf2cbce1ce317.yaml @ b'5f45e0363eadee8aedaf74e11a112ffee82e13de' - 3PAR driver creates FC VLUN of match-set type instead of host sees. With match-set, the host will see the virtual volume on specified NSP (Node-Slot-Port). This change in vlun type fixes bug 1577993. .. releasenotes/notes/pure-fc-wwpn-case-c1d97f3fa7663acf.yaml @ b'55a668dea793e232590b24f8362e764a1a572573' - Fix issue with PureFCDriver where partially case sensitive comparison of connector wwpn could cause initialize_connection to fail when attempting to create duplicate Purity host. .. _Mitaka Series Release Notes_8.1.0_stable_mitaka: 8.1.0 ===== .. _Mitaka Series Release Notes_8.1.0_stable_mitaka_New Features: New Features ------------ .. releasenotes/notes/brcd_lookupservice_http_support-f6485b38a1feaa15.yaml @ b'946776cc5934b5889e15275a2e2ba6f3a8218aeb' - Support for use of 'fc_southbound_protocol' configuration setting in the Brocade FC SAN lookup service. .. _Mitaka Series Release Notes_8.1.0_stable_mitaka_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/vmware-vmdk-config-eb70892e4ccf8f3c.yaml @ b'83ef56a4187115422bbdb47dc218c243cef13054' - The VMware VMDK driver supports a new config option 'vmware_host_port' to specify the port number to connect to vCenter server. .. _Mitaka Series Release Notes_8.1.0_stable_mitaka_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/allow-admin-quota-operations-c1c2236711224023.yaml @ b'76c74ea3c773368431f2a6894cf4ab5181896115' - Projects with the admin role are now allowed to operate on the quotas of all other projects. .. releasenotes/notes/vmware_vmdk_paravirtual-3d5eeef96dcbcfb7.yaml @ b'92fa7eed95982e5cb5b483100cb1c5cf53eb95ea' - Added support for images with vmware_adaptertype set to paraVirtual in the VMDK driver. .. _Mitaka Series Release Notes_8.0.0_stable_mitaka: 8.0.0 ===== .. _Mitaka Series Release Notes_8.0.0_stable_mitaka_New Features: New Features ------------ .. releasenotes/notes/3par-create-cg-from-source-cg-5634dcf9feb813f6.yaml @ b'c9e5562dfddf190e124a4169e7cc9193fd82cd3d' - Added support for creating a consistency group from a source consistency group in the HPE 3PAR driver. .. releasenotes/notes/3par-license-check-51a16b5247675760.yaml @ b'6fddcf6da018c1c394a3d841eede1118d94d4e36' - Disable standard capabilities based on 3PAR licenses. .. releasenotes/notes/3par-manage-unmanage-snapshot-eb4e504e8782ba43.yaml @ b'9c3cbdd90fbf4e462c23f640e68cd88034c873c2' - Added snapshot manage/unmanage support to the HPE 3PAR driver. .. releasenotes/notes/Dell-SC-v2.1-replication-ef6b1d6a4e2795a0.yaml @ b'87b9380e20e5ff9a1c429930a28321b8fe31f00d' - Added replication v2.1 support to the Dell Storage Center drivers. .. releasenotes/notes/Huawei-volume-driver-replication-v2.1-ada5bc3ad62dc633.yaml @ b'eb3fcbb9bc32f7589ea5b974ae084f30b7ac9822' - Added v2.1 replication support in Huawei Cinder driver. .. releasenotes/notes/NetApp-ONTAP-full-cg-support-cfdc91bf0acf9fe1.yaml @ b'3b2d17a5db07dfba5d20a1697025706dda6f0a0a' - Added support for creating, deleting, and updating consistency groups for NetApp 7mode and CDOT backends. .. releasenotes/notes/NetApp-ONTAP-full-cg-support-cfdc91bf0acf9fe1.yaml @ b'3b2d17a5db07dfba5d20a1697025706dda6f0a0a' - Added support for taking, deleting, and restoring a cgsnapshot for NetApp 7mode and CDOT backends. .. releasenotes/notes/add-coho-driver-b4472bff3f64aa41.yaml @ b'f7e9c240dcc25bdf17e3ad0e4591a7368fe8032a' - Added backend driver for Coho Data storage. .. releasenotes/notes/add-google-backup-driver-d1e7ac33d5780b79.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added cinder backup driver for Google Cloud Storage. .. releasenotes/notes/add-tegile-driver-b7919c5f30911998.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added driver for Tegile IntelliFlash arrays. .. releasenotes/notes/backup-snapshots-2f547c8788bc11e1.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added ability to backup snapshots. .. releasenotes/notes/balanced-fc-port-selection-fbf6b841fea99156.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Support balanced FC port selection for Huawei drivers. .. releasenotes/notes/brocade_http_connector-0021e41dfa56e671.yaml @ b'935aa1a5b401d086334fa8ac52bf01170a3eb9ca' - HTTP connector for the Cinder Brocade FC Zone plugin. This connector allows for communication between the Brocade FC zone plugin and the switch to be over HTTP or HTTPs. To make use of this connector, the user would add a configuration setting in the fabric block for a Brocade switch with the name as 'fc_southbound_protocol' with a value as 'HTTP' or 'HTTPS'. .. releasenotes/notes/brocade_virtual_fabrics_support-d2d0b95b19457c1d.yaml @ b'3abd22f7bbc1b00c01de7b8b53fd19c453f822a6' - Support for configuring Fibre Channel zoning on Brocade switches through Cinder Fibre Channel Zone Manager and Brocade Fibre Channel zone plugin. To zone in a Virtual Fabric, set the configuration option 'fc_virtual_fabric_id' for the fabric. .. releasenotes/notes/cg_api_volume_type-7db1856776e707c7.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - The consistency group API now returns volume type IDs. .. releasenotes/notes/cinder-api-microversions-d2082a095c322ce6.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added support for API microversions, as well as /v3 API endpoint. .. releasenotes/notes/cloudbyte-retype-support-4b9f79f351465279.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - Retype support added to CloudByte iSCSI driver. .. releasenotes/notes/datera-driver-v2-update-930468e8259c8e86.yaml @ b'3962a77f050f4a3760c362539650ca1b95045d2d' - All Datera DataFabric backed volume-types will now use API version 2 with Datera DataFabric .. releasenotes/notes/delete-volume-with-snapshots-0b104e212d5d36b1.yaml @ b'0b2a2172ce0f6605e04e2f66757a8be3e25be3fe' - It is now possible to delete a volume and its snapshots by passing an additional argument to volume delete, "cascade=True". .. releasenotes/notes/discard-config-option-711a7fbf20685834.yaml @ b'63e54b80d0b3103621e248122c48b8bbb167580a' - New config option to enable discard (trim/unmap) support for any backend. .. releasenotes/notes/disco-cinder-driver-9dac5fb04511de1f.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added backend driver for DISCO storage. .. releasenotes/notes/friendly-zone-names-d5e131d356040de0.yaml @ b'c346612cc7c9ff0b6e4534534b1818b5db2cfbc4' - Cinder FC Zone Manager Friendly Zone Names This feature adds support for Fibre Channel user friendly zone names if implemented by the volume driver. If the volume driver passes the host name and storage system to the Fibre Channel Zone Manager in the conn_info structure, the zone manager will use these names in structuring the zone name to provide a user friendly zone name. .. releasenotes/notes/fujitsu-eternus-dx-fc-741319960195215c.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added backend driver for Fujitsu ETERNUS DX (FC). .. releasenotes/notes/fujitsu-eternus-dx-iscsi-e796beffb740db89.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added backend driver for Fujitsu ETERNUS DX (iSCSI). .. releasenotes/notes/huawei-manage-unmanage-snapshot-e35ff844d72fedfb.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added manage/unmanage snapshot support for Huawei drivers. .. releasenotes/notes/huawei-support-manage-volume-2a746cd05621423d.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added manage/unmanage volume support for Huawei drivers. .. releasenotes/notes/image-volume-type-c91b7cff3cb41c13.yaml @ b'dc12ecd1ea7ab5fe6f90e4479d4e5727ff64e16c' - Support cinder_img_volume_type property in glance image metadata to specify volume type. .. releasenotes/notes/lefthand-consistency-groups-d73f8e418884fcc6.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - Consistency group support has been added to the LeftHand backend driver. .. releasenotes/notes/lefthand-manage-unmanage-snapshot-04de39d268d51169.yaml @ b'6fa9ac877b7d29596199da1d6d0ad12f01eb134b' - Added snapshot manage/unmanage support to the HPE LeftHand driver. .. releasenotes/notes/netapp-chap-iscsi-auth-264cd942b2a76094.yaml @ b'ce3052a867771875f8f472438bcc187caa3021e7' - Added iSCSI CHAP uni-directional authentication for NetApp drivers. .. releasenotes/notes/netapp-eseries-consistency-groups-4f6b2af2d20c94e9.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Support for Consistency Groups in the NetApp E-Series Volume Driver. .. releasenotes/notes/nexenta-edge-iscsi-b3f12c7a719e8b8c.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - Added backend driver for Nexenta Edge iSCSI storage. .. releasenotes/notes/nexentastor5_iscsi-e1d88b07d15c660b.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added backend driver for NexentaStor5 iSCSI storage. .. releasenotes/notes/nexentastor5_nfs-bcc8848716daea63.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added backend driver for NexentaStor5 NFS storage. .. releasenotes/notes/nimble-manage-unmanage-1d6d5fc23cbe59a1.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - Manage and unmanage support has been added to the Nimble backend driver. .. releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added additional metrics reported to the scheduler for Pure Volume Drivers for better filtering and weighing functions. .. releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added config option to enable/disable automatically calculation an over-subscription ratio max for Pure Volume Drivers. When disabled the drivers will now respect the max_oversubscription_ratio config option. .. releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml @ b'b85caca74a90a9b9215c9c8a4a6b868f8f300952' - New config option for Pure Storage volume drivers pure_eradicate_on_delete. When enabled will permanantly eradicate data instead of placing into pending eradication state. .. releasenotes/notes/pure-v2.1-replication-0246223caaa8a9b5.yaml @ b'04f4aa158d7390f1e0412398dbe962a192fa6eaa' - Added Cheesecake (v2.1) replication support to the Pure Storage Volume drivers. .. releasenotes/notes/re-add-nexenta-driver-d3af97e33551a485.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added Migrate and Extend for Nexenta NFS driver. .. releasenotes/notes/re-add-nexenta-driver-d3af97e33551a485.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added Retype functionality to Nexenta iSCSI and NFS drivers. .. releasenotes/notes/replication-v2.1-3par-b3f780a109f9195c.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added v2.1 replication support to the HPE 3PAR driver. .. releasenotes/notes/replication-v2.1-lefthand-745b72b64e5944c3.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added v2.1 replication support to the HPE LeftHand driver. .. releasenotes/notes/replication-v2.1-storwize-2df7bfd8c253090b.yaml @ b'c8cf5504cc6c49b3060b8c8c0f1304b19d00bfb1' - Added replication v2.1 support to the IBM Storwize driver. .. releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml @ b'c9a55d852e3f56a955039e99b628ce0b1c1e95af' - Added RPC backward compatibility layer similar to the one implemented in Nova. This means that Cinder services can be upgraded one-by-one without breakage. After all the services are upgraded SIGHUP signals should be issued to all the services to signal them to reload cached minimum RPC versions. Alternative is of course restart of them. Please note that cinder-api service doesn't support SIGHUP yet. Please also take into account that all the rolling upgrades capabilities are considered tech preview, as we don't have a CI testing it yet. .. releasenotes/notes/scaleio-consistency-groups-707f9b4ffcb3c14c.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added Consistency Group support in ScaleIO driver. .. releasenotes/notes/scaleio-manage-existing-32217f6d1c295193.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added support for manage/unmanage volume in the ScaleIO driver. .. releasenotes/notes/scaleio-qos-support-2ba20be58150f251.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added QoS support in ScaleIO driver. .. releasenotes/notes/scaling-backup-service-7e5058802d2fb3dc.yaml @ b'05a516da01225bed8b99ca49e558d40d71df3fe1' - cinder-backup service is now decoupled from cinder-volume, which allows more flexible scaling. .. releasenotes/notes/split-out-nested-quota-driver-e9493f478d2b8be5.yaml @ b'7ebd4904b977d29c97447b53fbd718bccfa39969' - Split nested quota support into a separate driver. In order to use nested quotas, change the following config ``quota_driver = cinder.quota.NestedDbQuotaDriver`` after running the following admin API "os-quota-sets/validate_setup_for_nested_quota_use" command to ensure the existing quota values make sense to nest. .. releasenotes/notes/storwize-multiple-management-ip-1cd364d63879d9b8.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added multiple management IP support to Storwize SVC driver. .. releasenotes/notes/storwize-pool-aware-support-7a40c9934642b202.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added multiple pools support to Storwize SVC driver. .. releasenotes/notes/support-zeromq-messaging-driver-d26a1141290f5548.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added support for ZeroMQ messaging driver in cinder single backend config. .. releasenotes/notes/tooz-locks-0f9f2cc15f8dad5a.yaml @ b'd6fabaa6cf7700cfb957e37594d0da818afea806' - Locks may use Tooz as abstraction layer now, to support distributed lock managers and prepare Cinder to better support HA configurations. .. releasenotes/notes/updated-at-list-0f899098f7258331.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - The updated_at timestamp is now returned in listing detail. .. releasenotes/notes/vmware-vmdk-manage-existing-0edc20d9d4d19172.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Added support for manage volume in the VMware VMDK driver. .. releasenotes/notes/vnx-configurable-migration-rate-5e0a2235777c314f.yaml @ b'719bedd6254b4203e19fa7467d8fa524e673ae56' - Configrable migration rate in VNX driver via metadata .. releasenotes/notes/vnx-replication-v2.1-4d89935547183cc9.yaml @ b'ab2a05aab3b5cb19c656808d137a3c69ffe6e741' - Adds v2.1 replication support in VNX Cinder driver. .. releasenotes/notes/vnx_clone_cg-db74ee2ea71bedcb.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - Cloning of consistency group added to EMC VNX backend driver. .. releasenotes/notes/xiv-ds8k-replication-2.1-996c871391152e31.yaml @ b'9952531da4eb63689ed390c3dc2e291180e81f29' - Added replication v2.1 support to the IBM XIV/DS8K driver. .. releasenotes/notes/xtremio-cg-from-cg-e05cf286e3a1e943.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Support for creating a consistency group from consistency group in XtremIO. .. releasenotes/notes/zfssa-volume-manage-unmanage-ccd80807103b69c8.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Volume manage/unmanage support for Oracle ZFSSA iSCSI and NFS drivers. .. _Mitaka Series Release Notes_8.0.0_stable_mitaka_Known Issues: Known Issues ------------ .. releasenotes/notes/general-upgrades-notes-120f022aa5bfa1ea.yaml @ b'2b4b4883a3f01e38a34b2ffc814d5a805cd3493a' - Cinder services are now automatically downgrading RPC messages to be understood by the oldest version of a service among all the deployment. Disabled and dead services are also taken into account. It is important to keep service list up to date, without old, unused records. This can be done using ``cinder-manage service remove`` command. Once situation is cleaned up services should be either restarted or ``SIGHUP`` signal should be issued to their processes to force them to reload version pins. Please note that cinder-api does not support ``SIGHUP`` signal. .. _Mitaka Series Release Notes_8.0.0_stable_mitaka_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/1220b8a67602b8e7-update_rootwrap_volume_filters.yaml @ b'81645a9ca68ad7ec4a5986925b835d28df078b4c' - It is required to copy new rootwrap.d/volume.filters file into /etc/cinder/rootwrap.d directory. .. releasenotes/notes/VMEM-6000-drivers-removed-9b6675ff7ae5f960.yaml @ b'00b46803e151d99b4813310aa976974e618b4927' - Violin Memory 6000 array series drivers are removed. .. releasenotes/notes/add-del-volumeTypeAccess-b1c8cb14a9d14db3.yaml @ b'b2cd356cacad84d925a5781c7ac6c56c68a73e04' - Adding or removing volume_type_access from any project during DB migration 62 must not be performed. .. releasenotes/notes/add-del-volumeTypeAccess-b1c8cb14a9d14db3.yaml @ b'b2cd356cacad84d925a5781c7ac6c56c68a73e04' - When running PostgreSQL it is required to upgrade and restart all the cinder-api services along with DB migration 62. .. releasenotes/notes/datera-driver-v2-update-930468e8259c8e86.yaml @ b'3962a77f050f4a3760c362539650ca1b95045d2d' - Users of the Datera Cinder driver are now required to use Datera DataFabric version 1.0+. Versions before 1.0 will not be able to utilize this new driver since they still function on v1 of the Datera DataFabric API .. releasenotes/notes/enforce_min_vmware-a080055111b04692.yaml @ b'015cb3ab56a8b9d2419feb159aa03b414904113f' - The VMware VMDK driver now enforces minimum vCenter version of 5.1. .. releasenotes/notes/general-upgrades-notes-120f022aa5bfa1ea.yaml @ b'2b4b4883a3f01e38a34b2ffc814d5a805cd3493a' - If during a *live* upgrade from Liberty a backup service will be killed while processing a restore request it may happen that such backup status won't be automatically cleaned up on the service restart. Such orphaned backups need to be cleaned up manually. .. releasenotes/notes/general-upgrades-notes-120f022aa5bfa1ea.yaml @ b'2b4b4883a3f01e38a34b2ffc814d5a805cd3493a' - When performing a *live* upgrade from Liberty it may happen that retype calls will reserve additional quota. As by default quota reservations are invalidated after 24 hours (config option ``reservation_expire=86400``), we recommend either decreasing that time or watching for unused quota reservations manually during the upgrade process. .. releasenotes/notes/rebranded-hpe-drivers-caf1dcef1afe37ba.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - HP drivers have been rebranded to HPE. Existing configurations will continue to work with the legacy name, but will need to be updated by the next release. .. releasenotes/notes/remove-hp-cliq-41f47fd61e47d13f.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - The deprecated HP CLIQ proxy driver has now been removed. .. releasenotes/notes/remove-ibm-nas-driver-0ed204ed0a2dcf55.yaml @ b'f63d3217744b9f281df2424b6a31108728f65c75' - Users of the ibmnas driver should switch to using the IBM GPFS driver to enable Cinder access to IBM NAS resources. For details configuring the IBM GPFS driver, see the GPFS config reference. - http://docs.openstack.org/liberty/config-reference/content/GPFS-driver.html .. releasenotes/notes/remove_lvmdriver-9c35f83132cd2ac8.yaml @ b'dbce6abe96de0f046e8432bbd1ce0426a692750a' - Removed deprecated LVMISCSIDriver and LVMISERDriver. These should be switched to use the LVMVolumeDriver with the desired iscsi_helper configuration set to the desired iSCSI helper. .. releasenotes/notes/remove_storwize_npiv-b704ff2d97207666.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - Removed the deprecated NPIV options for the Storwize backend driver. .. releasenotes/notes/removed-scality-7151638fdac3ed9d.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - Backend driver for Scality SRB has been removed. .. releasenotes/notes/rename-huawei-driver-092025e46b65cd48.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Rename Huawei18000ISCSIDriver and Huawei18000FCDriver to HuaweiISCSIDriver and HuaweiFCDriver. .. releasenotes/notes/rpc_compatibility-375be8ac3158981d.yaml @ b'c9a55d852e3f56a955039e99b628ce0b1c1e95af' - Starting from Mitaka release Cinder is having a tech preview of rolling upgrades support. .. releasenotes/notes/scaleio-remove-force-delete-config-48fae029e3622d6d.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Removed force_delete option from ScaleIO configuration. .. releasenotes/notes/scaling-backup-service-7e5058802d2fb3dc.yaml @ b'05a516da01225bed8b99ca49e558d40d71df3fe1' - As cinder-backup was strongly reworked in this release, the recommended upgrade order when executing live (rolling) upgrade is c-api->c-sch->c-vol->c-bak. .. releasenotes/notes/split-out-nested-quota-driver-e9493f478d2b8be5.yaml @ b'7ebd4904b977d29c97447b53fbd718bccfa39969' - Nested quotas will no longer be used by default, but can be configured by setting ``quota_driver = cinder.quota.NestedDbQuotaDriver`` .. releasenotes/notes/storwize-split-up-__init__-153fa8f097a81e37.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Removed storwize_svc_connection_protocol config setting. Users will now need to set different values for volume_driver in cinder.conf. FC:volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_fc.StorwizeSVCFCDriver iSCSI:volume_driver = cinder.volume.drivers.ibm.storwize_svc.storwize_svc_iscsi.StorwizeSVCISCSIDriver .. releasenotes/notes/vmware-vmdk-removed-bfb04eed77b95fdf.yaml @ b'015cb3ab56a8b9d2419feb159aa03b414904113f' - The VMware VMDK driver for ESX server has been removed. .. _Mitaka Series Release Notes_8.0.0_stable_mitaka_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/datera-driver-v2-update-930468e8259c8e86.yaml @ b'3962a77f050f4a3760c362539650ca1b95045d2d' - datera_api_token -- this has been replaced by san_login and san_password .. releasenotes/notes/deprecate-xml-api-bf3e4079f1dc5eae.yaml @ b'32cb195f0343a0835c3fcccc5962345941fe6025' - The XML API has been marked deprecated and will be removed in a future release. .. releasenotes/notes/deprecated-ibm-multipath-f06c0e907a6301de.yaml @ b'32cb195f0343a0835c3fcccc5962345941fe6025' - Deprecated IBM driver _multipath_enabled config flags. .. _Mitaka Series Release Notes_8.0.0_stable_mitaka_Security Issues: Security Issues --------------- .. releasenotes/notes/pure-verify-https-requests-464320c97ba77a1f.yaml @ b'615cc81051164c8e53c4237a28563264d1edc768' - Pure Storage Volume Drivers can now utilize driver_ssl_cert_verify and driver_ssl_cert_path config options to allow for secure https requests to the FlashArray. .. _Mitaka Series Release Notes_8.0.0_stable_mitaka_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/1220b8a67602b8e7-update_rootwrap_volume_filters.yaml @ b'81645a9ca68ad7ec4a5986925b835d28df078b4c' - Fixed bug causing snapshot creation to fail on systems with LC_NUMERIC set to locale using ',' as decimal separator. .. releasenotes/notes/a7401ead26a7c83b-keystone-url.yaml @ b'109353dedbe53201eb6999984c5658d9193115df' - Cinder will now correctly read Keystone's endpoint for quota calls from keystone_authtoken.auth_uri instead of keymgr.encryption_auth_url config option. .. releasenotes/notes/backup_driver_init_state-d4834fa927e502ab.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - Fixed service state reporting when backup manager is unable to initialize one of the backup drivers. .. releasenotes/notes/cg-scheduler-change-180a36b77e8cc26b.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Consistency group creation previously scheduled at the pool level. Now it is fixed to schedule at the backend level as designed. .. releasenotes/notes/downstream_genconfig-e50791497ce87ce3.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - Removed the need for deployers to run tox for config reference generation. .. releasenotes/notes/glance_v2_upload-939c5693bcc25483.yaml @ b'edf00659aadaf898ae679f358a6ea8533f4dd891' - upload-to-image using Image API v2 now correctly handles custom image properties. .. releasenotes/notes/permit_volume_type_operations-b2e130fd7088f335.yaml @ b'4ccd1bd15100b7046e634323e55ad610ef52e0ab' - Enabled a cloud operator to correctly manage policy for volume type operations. To permit volume type operations for specific user, you can for example do as follows. * Add ``storage_type_admin`` role. * Add ``admin_or_storage_type_admin`` rule to ``policy.json``, e.g. ``"admin_or_storage_type_admin": "is_admin:True or role:storage_type_admin",`` * Modify rule for types_manage and volume_type_access, e.g. ``"volume_extension:types_manage": "rule:admin_or_storage_type_admin", "volume_extension:volume_type_access:addProjectAccess": "rule:admin_or_storage_type_admin", "volume_extension:volume_type_access:removeProjectAccess": "rule:admin_or_storage_type_admin",`` .. releasenotes/notes/pure-enhanced-stats-42a684fe4546d1b1.yaml @ b'4566b6f550c52d5cf1e2763bc2b9607ad25e57a5' - Fixed issue where Pure Volume Drivers would ignore reserved_percentage config option. .. releasenotes/notes/pure-eradicate-on-delete-1e15e1440d5cd4d6.yaml @ b'b85caca74a90a9b9215c9c8a4a6b868f8f300952' - Allow for eradicating Pure Storage volumes, snapshots, and pgroups when deleting their Cinder counterpart. .. releasenotes/notes/quota-volume-transfer-abd1f418c6c63db0.yaml @ b'7fdc8baf4e32fe59165b7511b3336420bec8c8ef' - Corrected quota usage when transferring a volume between tenants. .. releasenotes/notes/remove-vol-in-error-from-cg-1ed0fde04ab2b5be.yaml @ b'80620b1fea79a24f4b22fdfb9213e2aec69ef826' - Previously the only way to remove volumes in error states from a consistency-group was to delete the consistency group and create it again. Now it is possible to remove volumes in error and error_deleting states. .. releasenotes/notes/tintri_image_direct_clone-f73e561985aad867.yaml @ b'bc86c4b44713f34793b7a4693c919a6a1e618875' - Fix for Tintri image direct clone feature. Fix for the bug 1400966 prevents user from specifying image "nfs share location" as location value for an image. Now, in order to use Tintri image direct clone, user can specify "provider_location" in image metadata to specify image nfs share location. NFS share which hosts images should be specified in a file using tintri_image_shares_config config option. .. releasenotes/notes/volume-filtering-for-quoted-display-name-7f5e8ac888a73001.yaml @ b'fc119315f1931f4893fb4e7423b4c806772f77a5' - Filtering volumes by their display name now correctly handles display names with single and double quotes. .. _Mitaka Series Release Notes_8.0.0_stable_mitaka_Other Notes: Other Notes ----------- .. releasenotes/notes/remove-ibm-nas-driver-0ed204ed0a2dcf55.yaml @ b'f63d3217744b9f281df2424b6a31108728f65c75' - Due to the ibmnas (SONAS) driver being rendered redundant by the addition of NFS capabilities to the IBM GPFS driver, the ibmnas driver is being removed in the Mitaka release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/newton.rst0000664000175000017500000007577600000000000021152 0ustar00zuulzuul00000000000000=========================== Newton Series Release Notes =========================== .. _Newton Series Release Notes_9.1.2_stable_newton: 9.1.2 ===== .. _Newton Series Release Notes_9.1.2_stable_newton_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1634203-netapp-cdot-fix-clone-from-nfs-image-cache-2218fb402783bc20.yaml @ b'f6a4f9346922f1eb91698114b57404f77dc0aae7' - Fixed an issue where the NetApp cDOT NFS driver failed to clone new volumes from the image cache. .. _Newton Series Release Notes_9.1.1_stable_newton: 9.1.1 ===== .. _Newton Series Release Notes_9.1.1_stable_newton_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/kaminario-cinder-driver-bug-1646692-7aad3b7496689aa7.yaml @ b'cedd23f421d95a67da1fca35127bb02d6ca0a82f' - Fixed Non-WAN port filter issue in Kaminario iSCSI driver .. releasenotes/notes/kaminario-cinder-driver-bug-1646766-fe810f5801d24f2f.yaml @ b'fbe75b62eda6b4f8012253fe3dc128de8b4855d5' - Fixed issue of managing a VG with more than one volume in Kaminario FC and iSCSI Cinder drivers. .. _Newton Series Release Notes_9.1.0_stable_newton: 9.1.0 ===== .. _Newton Series Release Notes_9.1.0_stable_newton_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1622057-netapp-cdot-fix-replication-status-cheesecake-volumes-804dc8b0b1380e6b.yaml @ b'0bed2f471ebba59445c82c08e63794167d0b3ecf' - The NetApp cDOT driver now sets the ``replication_status`` attribute appropriately on volumes created within replicated backends when using host level replication. .. _Newton Series Release Notes_9.0.0_stable_newton: 9.0.0 ===== .. _Newton Series Release Notes_9.0.0_stable_newton_Prelude: Prelude ------- .. releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml @ b'8b713e5327d8b3328ae8695202098d5b61e88e7b' Everything in Cinder's release notes related to the High Availability Active-Active effort -preluded with "HA A-A:"- is work in progress and should not be used in production until it has been completed and the appropriate release note has been issued stating its readiness for production. .. releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml @ b'682e49df2a3db3eacff3be23a2b79811d081d620' The default key manager interface in Cinder was deprecated and the Castellan key manager interface library is now used instead. For more information about Castellan, please see http://docs.openstack.org/developer/castellan/ . .. _Newton Series Release Notes_9.0.0_stable_newton_New Features: New Features ------------ .. releasenotes/notes/Dell-SC-ServerOS-Config-Option-bd0e018319758e03.yaml @ b'38549395f8f4a2bd2eca1a8691a4d3c30362e354' - dell_server_os option added to the Dell SC driver. This option allows the selection of the server type used when creating a server on the Dell DSM during initialize connection. This is only used if the server does not exist. Valid values are from the Dell DSM create server list. .. releasenotes/notes/Dell-SC-live-volume-41bacddee199ce83.yaml @ b'fecbf75edcfcf76915221c38d46549e030c63e0f' - Added support for the use of live volume in place of standard replication in the Dell SC driver. .. releasenotes/notes/Dell-SC-replication-failover_host-failback-a9e9cbbd6a1be6c3.yaml @ b'6cfe6e29d7a62ac5d335401bff8a1cf40c43e0d5' - Added replication failback support for the Dell SC driver. .. releasenotes/notes/ZadaraStorage-13a5fff6f4fa1710.yaml @ b'a85522cc3fad56540ceea45417df07945e4f2b0f' - Added volume driver for Zadara Storage VPSA. .. releasenotes/notes/add-stochastic-scheduling-option-99e10eae023fbcca.yaml @ b'de66e8f8114e06d180fe3a26f62f1dfc0258da85' - Added a new config option `scheduler_weight_handler`. This is a global option which specifies how the scheduler should choose from a listed of weighted pools. By default the existing weigher is used which always chooses the highest weight. .. releasenotes/notes/add-stochastic-scheduling-option-99e10eae023fbcca.yaml @ b'de66e8f8114e06d180fe3a26f62f1dfc0258da85' - Added a new weight handler `StochasticHostWeightHandler`. This weight handler chooses pools randomly, where the random probabilities are proportional to the weights, so higher weighted pools are chosen more frequently, but not all the time. This weight handler spreads new shares across available pools more fairly. .. releasenotes/notes/allow-remove-name-and-description-for-consisgroup-408257a0a18bd530.yaml @ b'e22c24410631824e417bb35da370f10b08025e2c' - Allow API user to remove the consistency group name or description information. .. releasenotes/notes/backup-snapshot-6e7447db930c31f6.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Support for snapshot backup using the optimal path in Huawei driver. .. releasenotes/notes/backup-update-d0b0db6a7b1c2a5b.yaml @ b'c5ebe48b8ef5bebd0a1eaba3fd76993bfabc41a1' - Added REST API to update backup name and description. .. releasenotes/notes/bdd-pools-stats-afb4398daa9248de.yaml @ b'948ac4ab45208b37d2aa7a06b0b36ba10da54547' - Report pools in volume stats for Block Device Driver. .. releasenotes/notes/bp-datera-cinder-driver-update-2.1-5c6455b45563adc5.yaml @ b'c06e552fd5a16f3682bac4455f2f75c952cf4eba' - Updating the Datera Elastic DataFabric Storage Driver to version 2.1. This adds ACL support, Multipath support and basic IP pool support. .. releasenotes/notes/bp-datera-cinder-driver-update-2.1-5c6455b45563adc5.yaml @ b'c06e552fd5a16f3682bac4455f2f75c952cf4eba' - Changes config option default for datera_num_replicas from 1 to 3 .. releasenotes/notes/brcd_lookupservice_http_support-f6485b38a1feaa15.yaml @ b'b550cec9cd54b06a1945794ef60dde6215b2f4a3' - Support for use of 'fc_southbound_protocol' configuration setting in the Brocade FC SAN lookup service. .. releasenotes/notes/bug-1518213-a5bf2ea0d008f329.yaml @ b'c2ac7d6604bf5ff7c7b7802979e1d9b177390af5' - Added Keystone v3 support for Swift backup driver in single user mode. .. releasenotes/notes/cinder-coprhd-driver-11ebd149ea8610fd.yaml @ b'a7c715b4d08d369ad1246e23b54c36cf89d44a78' - Added volume backend drivers for CoprHD FC, iSCSI and Scaleio. .. releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml @ b'8b713e5327d8b3328ae8695202098d5b61e88e7b' - HA A-A: Add cluster configuration option to allow grouping hosts that share the same backend configurations and should work in Active-Active fashion. .. releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml @ b'8b713e5327d8b3328ae8695202098d5b61e88e7b' - HA A-A: Updated manage command to display cluster information on service listings. .. releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml @ b'8b713e5327d8b3328ae8695202098d5b61e88e7b' - HA A-A: Added cluster subcommand in manage command to list, remove, and rename clusters. .. releasenotes/notes/cluster_job_distribution-f916dd2e4cce6c1b.yaml @ b'8b713e5327d8b3328ae8695202098d5b61e88e7b' - HA A-A: Added clusters API endpoints for cluster related operations (index, detail, show, enable/disable). Index and detail accept filtering by `name`, `binary`, `disabled`, `num_hosts`, `num_down_hosts`, and up/down status (`is_up`) as URL parameters. Also added their respective policies. .. releasenotes/notes/create-update-rules-b46cf9c07c5a3966.yaml @ b'9771c2cd4e32979358f8647e57b4bab355221c0d' - Separate create and update rules for volume metadata. .. releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Capabilites List for Datera Volume Drivers .. releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Extended Volume-Type Support for Datera Volume Drivers .. releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Naming convention change for Datera Volume Drivers .. releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Volume Manage/Unmanage support for Datera Volume Drivers .. releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - New BoolOpt ``datera_debug_override_num_replicas`` for Datera Volume Drivers .. releasenotes/notes/delete-volume-metadata-keys-3e19694401e13d00.yaml @ b'6bf2d1b94cc775850347d913cbfd3abc674f2b3d' - Added using etags in API calls to avoid the lost update problem during deleting volume metadata. .. releasenotes/notes/drbd-resource-options-88599c0a8fc5b8a3.yaml @ b'f1b991913603cf9f3f157328a2725b3f61b33c97' - Configuration options for the DRBD driver that will be applied to DRBD resources; the default values should be okay for most installations. .. releasenotes/notes/eqlx-volume-manage-unmanage-a24ec7f0d9989df3.yaml @ b'62b0acb5035beab5651e97eb29515a6dc129e064' - Added manage/unmanage volume support for Dell Equallogic driver. .. releasenotes/notes/falconstor-cinder-driver-dcb61441cd7601c5.yaml @ b'a6f48a55eb362b8236d9b11cbd961f28aa6fe1ba' - Added backend driver for FalconStor FreeStor. .. releasenotes/notes/fusionstorage-cinder-driver-8f3bca98f6e2065a.yaml @ b'ecfb70cfebed4a40c24bcb874c18eede62a4b378' - Added backend driver for Huawei FusionStorage. .. releasenotes/notes/generic-volume-groups-69f998ce44f42737.yaml @ b'8c74c74695043eb7a468028edb049a1611b87e77' - Introduced generic volume groups and added create/ delete/update/list/show APIs for groups. .. releasenotes/notes/group-snapshots-36264409bbb8850c.yaml @ b'708b9be9c0f7ee291461580a0fce92bebbc79d51' - Added create/delete APIs for group snapshots and an API to create group from source. .. releasenotes/notes/group-type-group-specs-531e33ee0ae9f822.yaml @ b'8cf9786e00e47421bf96fbc76f0b9b4ec8605540' - Added group type and group specs APIs. .. releasenotes/notes/hnas-manage-unmanage-snapshot-support-40c8888cc594a7be.yaml @ b'70bfb78875de0bdda92ea2a482c3c1009bf33833' - Added manage/unmanage snapshot support to the HNAS NFS driver. .. releasenotes/notes/huawei-pool-disktype-support-7c1f64639b42a48a.yaml @ b'3767c6bf743c1f287bec9114949e4c4ed7c0dc96' - Add support for reporting pool disk type in Huawei driver. .. releasenotes/notes/hybrid-aggregates-in-netapp-cdot-drivers-f6afa9884cac4e86.yaml @ b'7cc95f80549a45a245f988bcde9cc3ca013b8023' - Add support for hybrid aggregates to the NetApp cDOT drivers. .. releasenotes/notes/ibm-flashsystem-manage-unmanage-88e56837102f838c.yaml @ b'5242d1f09f2b50b9ced65b72f7aa157ed73a53d8' - Volume manage/unmanage support for IBM FlashSystem FC and iSCSI drivers. .. releasenotes/notes/improvement-to-query-consistency-group-detail-84a906d45383e067.yaml @ b'3eafcf5720efb3c49a374c9108f935e044f9a01e' - Added support for querying volumes filtered by group_id using 'group_id' optional URL parameter. For example, "volumes/detail?group_id={consistency_group_id}". .. releasenotes/notes/kaminario-fc-cinder-driver-8266641036281a44.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - New FC Cinder volume driver for Kaminario K2 all-flash arrays. .. releasenotes/notes/kaminario-iscsi-cinder-driver-c34fadf63cd253de.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - New iSCSI Cinder volume driver for Kaminario K2 all-flash arrays. .. releasenotes/notes/list-manageable-86c77fc39c5b2cc9.yaml @ b'1574ccf2d22cc86b83f828eadb5778a631fa9789' - Added the ability to list manageable volumes and snapshots via GET operation on the /v2//os-volume-manage and /v2//os-snapshot-manage URLs, respectively. .. releasenotes/notes/manage-resources-v3-c06096f75927fd3b.yaml @ b'0b0000f8fcc5dca4b2f9153b8af66da2538368fb' - The v2 API extensions os-volume-manage and os-snapshot-manage have been mapped to the v3 resources manageable_volumes and manageable_snapshots .. releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml @ b'294ee65bd3850f2b1a8c1ef10c0bd64782ed7afe' - Added host-level (whole back end replication - v2.1) replication support to the NetApp cDOT drivers (iSCSI, FC, NFS). .. releasenotes/notes/netapp-nfs-consistency-group-support-83eccc2da91ee19b.yaml @ b'389188c5ea9c048af927297dea08a8c9cc9506f6' - Added Cinder consistency group for the NetApp NFS driver. .. releasenotes/notes/nexentaedge-iscsi-ee5d6c05d65f97af.yaml @ b'672120b372b98229e27616ee35e7413ad20742c4' - Added HA support for NexentaEdge iSCSI driver .. releasenotes/notes/nexentaedge-nbd-eb48268723141f12.yaml @ b'ca9e590f8204032b55609d6304be95a5c35cd23d' - Added NBD driver for NexentaEdge. .. releasenotes/notes/nimble-add-force-backup-539e1e5c72f84e61.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Support for force backup of in-use Cinder volumes in Nimble driver. .. releasenotes/notes/pure-list-mangeable-fed4a1b23212f545.yaml @ b'73d2b55352e5924fe4fa93548b549c00f63ad12e' - Add get_manageable_volumes and get_manageable_snapshots implementations for Pure Storage Volume Drivers. .. releasenotes/notes/rename_xiv_ds8k_to_ibm_storage-154eca69c44b3f95.yaml @ b'66bcfb29b458db517a5ac11f359b53af27ac2587' - The xiv_ds8k driver now supports IBM XIV, Spectrum Accelerate, FlashSystem A9000, FlashSystem A9000R and DS8000 storage systems, and was renamed to IBM Storage Driver for OpenStack. The changes include text changes, file names, names of cinder.conf flags, and names of the proxy classes. .. releasenotes/notes/retype-encrypted-volume-49b66d3e8e65f9a5.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Support for retype volumes with different encryptions including changes from unencrypted types to encrypted types and vice-versa. .. releasenotes/notes/scaleio-manage-existing-snapshot-5bbd1818654c0776.yaml @ b'1861ed5836eb9475fe4d5cd41203b670c4e71626' - Added support for manage/unmanage snapshot in the ScaleIO driver. .. releasenotes/notes/scaleio-scaling-qos-50c58e43d4b54247.yaml @ b'17d7712fd1de382da24a01e2e3e7ef8e24a84895' - Added support for scaling QoS in the ScaleIO driver. The new QoS keys are maxIOPSperGB and maxBWSperGB. .. releasenotes/notes/scaleio-thin-provisioning-support-9c3b9203567771dd.yaml @ b'49093ae469d21499c76988b6aeaaa00cde92c069' - Added support for oversubscription in thin provisioning in the ScaleIO driver. Volumes should have extra_specs with the key provisioning:type with value equals to either 'thick' or 'thin'. max_oversubscription_ratio can be defined by the global config or for ScaleIO specific with the config option sio_max_over_subscription_ratio. The maximum oversubscription ratio supported at the moment is 10.0. .. releasenotes/notes/solidfire-v2.1-replication-570a1f12f70e67b4.yaml @ b'3f5e040e731f5b04382c267c3936c7f364422ee9' - Added v2.1 replication support to SolidFire driver. .. releasenotes/notes/support-huawei-consistency-group-b666f8f6c6cddd8f.yaml @ b'd32d9966b6cf9a3cdd7889161b566d52d435f40a' - Added consistency group support to the Huawei driver. .. releasenotes/notes/support-volume-glance-metadata-query-866b9e3beda2cd55.yaml @ b'fca31fc95e00580249b19ec52a2e82e7d8dcff38' - Added support for querying volumes filtered by glance metadata key/value using 'glance_metadata' optional URL parameter. For example, "volumes/detail?glance_metadata={"image_name":"xxx"}". .. releasenotes/notes/supported-drivers-9c95dd2378cd308d.yaml @ b'a227bf440ef47ca4c283990b0b8f35d67182e315' - Added supported driver checks on all drivers. .. releasenotes/notes/synology-volume-driver-c5e0f655b04390ce.yaml @ b'78d124dee28e83a4718a455c456605b8127eab09' - Added backend driver for Synology iSCSI-supported storage. .. releasenotes/notes/vhd-disk-format-upload-to-image-5851f9d35f4ee447.yaml @ b'e815f56bd54548e98c45e19a95f80ffd51cc21f1' - Added support for vhd and vhdx disk-formats for volume upload-to-image. .. releasenotes/notes/vmax-iscsi-multipath-76cc09bacf4fdfbf.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Support for iSCSI multipathing in EMC VMAX driver. .. releasenotes/notes/vmax-oversubscription-d61d0e3b1df2487a.yaml @ b'5377ed581083d51acfdf35faf185f0ff1ab0e86f' - Added oversubscription support in the VMAX driver .. releasenotes/notes/vmax-qos-eb40ed35bd2f457d.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - QoS support in EMC VMAX iSCSI and FC drivers. .. releasenotes/notes/vmem-7000-iscsi-3c8683dcc1f0b9b4.yaml @ b'7720fce5098fa25eec55dfde6a4eec46fbe4b030' - Added backend driver for Violin Memory 7000 iscsi storage. .. releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - New Cinder driver based on storops library (available in pypi) for EMC VNX. .. releasenotes/notes/volumes-summary-6b2485f339c88a91.yaml @ b'3db21d003fb6a2ea42043c4e262e8334541d7544' - A new API to display the volumes summary. This summary API displays the total number of volumes and total volume's size in GB. .. releasenotes/notes/xtremio-manage-snapshot-5737d3ad37df81d1.yaml @ b'7f44844cc103ac61940cebd89f7835c971ee0ffc' - Added snapshot manage/unmanage support to the EMC XtremIO driver. .. releasenotes/notes/zte_cinder_driver-76ba6d034e1b6f65.yaml @ b'6bf2d1b94cc775850347d913cbfd3abc674f2b3d' - Added backend driver for ZTE iSCSI storage. .. _Newton Series Release Notes_9.0.0_stable_newton_Known Issues: Known Issues ------------ .. releasenotes/notes/os-brick-lock-dir-35bdd8ec0c0ef46d.yaml @ b'37b7a2097a5a8ba1223ba180fcf30f86b188b20e' - When running Nova Compute and Cinder Volume or Backup services on the same host they must use a shared lock directory to avoid rare race conditions that can cause volume operation failures (primarily attach/detach of volumes). This is done by setting the "lock_path" to the same directory in the "oslo_concurrency" section of nova.conf and cinder.conf. This issue affects all previous releases utilizing os-brick and shared operations on hosts between Nova Compute and Cinder data services. .. _Newton Series Release Notes_9.0.0_stable_newton_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/add-suppress-lvm-fd-warnings-option.402bebc03b0a9f00.yaml @ b'844aa0ac3e8068e25193a680ac0c63d68682de4b' - In certain environments (Kubernetes for example) indirect calls to the LVM commands result in file descriptor leak warning messages which in turn cause the process_execution method to raise and exception. To accommodate these environments, and to maintain backward compatibility in Newton we add a ``lvm_suppress_fd_warnings`` bool config to the LVM driver. Setting this to True will append the LVM env vars to include the variable ``LVM_SUPPRESS_FD_WARNINGS=1``. This is made an optional configuration because it only applies to very specific environments. If we were to make this global that would require a rootwrap/privsep update that could break compatibility when trying to do rolling upgrades of the volume service. .. releasenotes/notes/bug-1570845-efdb0206718f4ca4.yaml @ b'622627282f4e79cb6812018db464d5e23ce9ed8e' - The 'backup_service_inithost_offload' configuration option now defaults to 'True' instead of 'False'. .. releasenotes/notes/create-update-rules-b46cf9c07c5a3966.yaml @ b'9771c2cd4e32979358f8647e57b4bab355221c0d' - If policy for update volume metadata is modified in a desired way it's needed to add a desired rule for create volume metadata. .. releasenotes/notes/db-schema-from-kilo-e6e952744531caa2.yaml @ b'10bce4b764976875cb7b3eed59b5149ba1ea070f' - The Cinder database can now only be upgraded from changes since the Kilo release. In order to upgrade from a version prior to that, you must now upgrade to at least Kilo first, then to Newton or later. .. releasenotes/notes/deprecate-backends-in-default-b9784a2333fe22f2.yaml @ b'395288aae47f7b87cfc8b2ff009a2e2f7af2f390' - Any Volume Drivers configured in the DEFAULT config stanza should be moved to their own stanza and enabled via the enabled_backends config option. The older style of config with DEFAULT is deprecated and will be removed in future releases. .. releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml @ b'6c61bdda46e825fafec5a01ccfa958bdc1d88ac3' - HNAS drivers have new configuration paths. Users should now use ``cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver`` for HNAS NFS driver and ``cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver`` for HNAS iSCSI driver. .. releasenotes/notes/hnas_deprecate_xml-16840b5a8c25d15e.yaml @ b'3f292f024e451fa29dbd123142802e71b98a4cc0' - HNAS drivers will now read configuration from cinder.conf. .. releasenotes/notes/huawei-iscsi-multipath-support-a056201883909287.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Support for iSCSI multipath in Huawei driver. .. releasenotes/notes/huawei-support-iscsi-configuration-in-replication-7ec53737b95ffa54.yaml @ b'3c362510172b11e90424a0e83337f840a26f321d' - Support iSCSI configuration in replication in Huawei driver. .. releasenotes/notes/mark-scality-unsupported-530370e034a6f488.yaml @ b'aded066c995d8b1a51aeb97b7d16c79024bbe639' - The Scality driver has been marked as unsupported and is now deprecated. enable_unsupported_drivers will need to be set to True in cinder.conf to continue to use it. .. releasenotes/notes/netapp-cDOT-whole-backend-replication-support-59d7537fe3d0eb05.yaml @ b'294ee65bd3850f2b1a8c1ef10c0bd64782ed7afe' - While configuring NetApp cDOT back ends, new configuration options ('replication_device' and 'netapp_replication_aggregate_map') must be added in order to use the host-level failover feature. .. releasenotes/notes/pure-custom-user-agent-dcca4cb44b69e763.yaml @ b'925ee611d54fc6780618e8f0a881359a79cfe776' - Pure volume drivers will need 'purestorage' python module v1.6.0 or newer. Support for 1.4.x has been removed. .. releasenotes/notes/remove-xml-api-392b41f387e60eb1.yaml @ b'c042a05ac3872494f3a0924ebb0561e1e33a2d1c' - The XML API has been removed in Newton release. Cinder supports only JSON API request/response format now. .. releasenotes/notes/removed-isertgtadm-7ccefab5d3e89c59.yaml @ b'0bc4bb4fbc7f5a04732c8fe19a89e0e2d329f0f5' - The ISERTgtAdm target was deprecated in the Kilo release. It has now been removed. You should now just use LVMVolumeDriver and specify iscsi_helper for the target driver you wish to use. In order to enable iser, please set iscsi_protocol=iser with lioadm or tgtadm target helpers. .. releasenotes/notes/removed-rpc-topic-config-options-21c2b3f0e64f884c.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - The config options ``scheduler_topic``, ``volume_topic`` and ``backup_topic`` have been removed without a deprecation period as these had never worked correctly. .. releasenotes/notes/rename_xiv_ds8k_to_ibm_storage-154eca69c44b3f95.yaml @ b'66bcfb29b458db517a5ac11f359b53af27ac2587' - Users of the IBM Storage Driver, previously known as the IBM XIV/DS8K driver, upgrading from Mitaka or previous releases, need to reconfigure the relevant cinder.conf entries. In most cases the change is just removal of the xiv-ds8k field prefix, but for details use the driver documentation. .. releasenotes/notes/rpc-apis-3.0-b745f429c11d8198.yaml @ b'8a4aecb155478e9493f4d36b080ccdf6be406eba' - Deployments doing continuous live upgrades from master branch should not upgrade into Ocata before doing an upgrade which includes all the Newton's RPC API version bump commits (scheduler, volume). If you're upgrading deployment in a release-to-release manner, then you can safely ignore this note. .. releasenotes/notes/scaleio-default-volume-provisioning-c648118fcc8f297f.yaml @ b'8319ea4c497aa2e25ae9b8be671ac33378aa95db' - EMC ScaleIO driver now uses the config option san_thin_provision to determine the default provisioning type. .. releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml @ b'682e49df2a3db3eacff3be23a2b79811d081d620' - If using the key manager, the configuration details should be updated to reflect the Castellan-specific configuration options. .. releasenotes/notes/use-oslo_middleware_sizelimit-5f171cf1c44444f8.yaml @ b'ed4bcc0be5fbea67cf0f92ec68eefd80f2933968' - use oslo_middleware.sizelimit rather than cinder.api.middleware.sizelimit compatibility shim .. releasenotes/notes/vmdk_default_task_poll_interval-665f032bebfca39e.yaml @ b'd1a45ba0ddb2c551454ccb931426448ea2f66b27' - The default interval for polling vCenter tasks in the VMware VMDK driver is changed to 2s. .. releasenotes/notes/vmware-vmdk-config-eb70892e4ccf8f3c.yaml @ b'55b442ce192e93a26d12064645aa95fd3661babb' - The VMware VMDK driver supports a new config option 'vmware_host_port' to specify the port number to connect to vCenter server. .. releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - For EMC VNX backends, please upgrade to use ``cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver``. Add config option ``storage_protocol = fc`` or ``storage_protocol = iscsi`` to the driver section to enable the FC or iSCSI driver respectively. .. _Newton Series Release Notes_9.0.0_stable_newton_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - IntOpt ``datera_num_replicas`` is changed to a volume type extra spec option-- ``DF:replica_count`` .. releasenotes/notes/datera-2.2-driver-update-28b97aa2aaf333b6.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - BoolOpt ``datera_acl_allow_all`` is changed to a volume type extra spec option-- ``DF:acl_allow_all`` .. releasenotes/notes/deprecate-backends-in-default-b9784a2333fe22f2.yaml @ b'395288aae47f7b87cfc8b2ff009a2e2f7af2f390' - Configuring Volume Drivers in the DEFAULT config stanza is not going to be maintained and will be removed in the next release. All backends should use the enabled_backends config option with separate stanza's for each. .. releasenotes/notes/deprecated-nas-ip-fd86a734c92f6fae.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Deprecated the configuration option ``nas_ip``. Use option ``nas_host`` to indicate the IP address or hostname of the NAS system. .. releasenotes/notes/hnas-drivers-refactoring-9dbe297ffecced21.yaml @ b'6c61bdda46e825fafec5a01ccfa958bdc1d88ac3' - The old HNAS drivers configuration paths have been marked for deprecation. .. releasenotes/notes/hnas_deprecate_xml-16840b5a8c25d15e.yaml @ b'3f292f024e451fa29dbd123142802e71b98a4cc0' - The XML configuration file used by the HNAS drivers is now deprecated and will no longer be used in the future. Please use cinder.conf for all driver configuration. .. releasenotes/notes/mark-scality-unsupported-530370e034a6f488.yaml @ b'aded066c995d8b1a51aeb97b7d16c79024bbe639' - The Scality driver has been marked as unsupported and is now deprecated. enable_unsupported_drivers will need to be set to True in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. .. releasenotes/notes/use-castellan-key-manager-4911c3c4908ca633.yaml @ b'682e49df2a3db3eacff3be23a2b79811d081d620' - All barbican and keymgr config options in Cinder are now deprecated. All of these options are moved to the key_manager section for the Castellan library. .. releasenotes/notes/use-oslo_middleware_sizelimit-5f171cf1c44444f8.yaml @ b'ed4bcc0be5fbea67cf0f92ec68eefd80f2933968' - cinder.api.middleware.sizelimit was deprecated in kilo and compatability shim added to call into oslo_middleware. Using oslo_middleware.sizelimit directly will allow us to remove the compatability shim in a future release. .. releasenotes/notes/vmdk_vc_51-df29eeb5fc93fbb1.yaml @ b'd1a45ba0ddb2c551454ccb931426448ea2f66b27' - VMware VMDK driver deprecated the support for vCenter version 5.1 .. releasenotes/notes/vnx-new-driver-7e96934c2d3a6edc.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Old VNX FC (``cinder.volume.drivers.emc.emc_cli_fc.EMCCLIFCDriver``)/ iSCSI (``cinder.volume.drivers.emc.emc_cli_iscsi.EMCCLIISCSIDriver``) drivers are deprecated. Please refer to upgrade section for information about the new driver. .. _Newton Series Release Notes_9.0.0_stable_newton_Security Issues: Security Issues --------------- .. releasenotes/notes/apply-limits-to-qemu-img-29f722a1bf4b91f8.yaml @ b'8547444775e406a50d9d26a0003e9ba6554b0d70' - The qemu-img tool now has resource limits applied which prevent it from using more than 1GB of address space or more than 2 seconds of CPU time. This provides protection against denial of service attacks from maliciously crafted or corrupted disk images. .. _Newton Series Release Notes_9.0.0_stable_newton_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/3par-create-fc-vlun-match-set-type-babcf2cbce1ce317.yaml @ b'0912153358a686721539c48a0736a321544873a1' - 3PAR driver creates FC VLUN of match-set type instead of host sees. With match-set, the host will see the virtual volume on specified NSP (Node-Slot-Port). This change in vlun type fixes bug 1577993. .. releasenotes/notes/add-volume-upload-image-options-3a61a31c544fa034.yaml @ b'f8ce884002817bb76c68616314dc2dc5cedb61d6' - Added the options ``visibility`` and ``protected`` to the os-volume_upload_image REST API call. .. releasenotes/notes/allow-admin-quota-operations-c1c2236711224023.yaml @ b'a0a04f4332a609e854f2e67e3e9e9b723197b584' - Projects with the admin role are now allowed to operate on the quotas of all other projects. .. releasenotes/notes/bug-1612763-report-multiattach-enabled-NetApp-backends-0fbf2cb621e4747d.yaml @ b'5568b40d0682f6c34bce3f4dd7b5b824c93f6082' - Volumes created on NetApp cDOT and 7mode storage systems now report 'multiattach' capability. They have always supported such a capability, but not reported it to Cinder. .. releasenotes/notes/bug-1615451-NetApp-cDOT-fix-reporting-replication-capability-dca29f39b9fa7651.yaml @ b'623990df64092fe72a6473ac89fff1ba0d3aaec7' - NetApp cDOT block and file drivers now report replication capability at the pool level; and are hence compatible with using the ``replication_enabled`` extra-spec in volume types. .. releasenotes/notes/del_volume_with_fc-f024b9f2d6eaca0f.yaml @ b'a2bac00c508e6bd65add9f76250de4a35ac0c267' - Fixed StorWize/SVC error causing volume deletion to get stuck in the 'deleting' state when using FlashCopy. .. releasenotes/notes/fix-hnas-stats-reporting-1335e582e46ff440.yaml @ b'6bf2d1b94cc775850347d913cbfd3abc674f2b3d' - Fixed issue where the HNAS driver was not correctly reporting THIN provisioning and related stats. .. releasenotes/notes/live_migration_v3-ae98c0d00e64c954.yaml @ b'6bf2d1b94cc775850347d913cbfd3abc674f2b3d' - Fixed live migration on EMC VMAX3 backends. .. releasenotes/notes/pure-fc-wwpn-case-c1d97f3fa7663acf.yaml @ b'b5214838303e56d0556a843ee40da591cd747b87' - Fix issue with PureFCDriver where partially case sensitive comparison of connector wwpn could cause initialize_connection to fail when attempting to create duplicate Purity host. .. releasenotes/notes/reject-volume_clear_size-settings-larger-than-1024MiB-30b38811da048948.yaml @ b'a49711f6dd26a360047fc4d22508eb68744600ac' - Fixed 'No Space left' error by dd command when users set the config option ``volume_clear_size`` to a value larger than the size of a volume. .. releasenotes/notes/vmdk_backup_restore-41f807b7bc8e0ae8.yaml @ b'18325aebc609a4cf2b4b7b939716c982411b31b6' - Fixed backup and restore of volumes in VMware VMDK driver. .. releasenotes/notes/vmdk_image_ova-d3b3a0e72221110c.yaml @ b'd1a45ba0ddb2c551454ccb931426448ea2f66b27' - Fixed the VMware VMDK driver to create volume from image in ova container. .. releasenotes/notes/vmware_vmdk_paravirtual-3d5eeef96dcbcfb7.yaml @ b'93490b2c9e66eaf7b68bc3bc9a25f415a5cd0b85' - Added support for images with vmware_adaptertype set to paraVirtual in the VMDK driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/ocata.rst0000664000175000017500000013275500000000000020716 0ustar00zuulzuul00000000000000========================== Ocata Series Release Notes ========================== .. _Ocata Series Release Notes_10.0.8-20_stable_ocata: 10.0.8-20 ========= .. _Ocata Series Release Notes_10.0.8-20_stable_ocata_New Features: New Features ------------ .. releasenotes/notes/feature-rbd-exclusive-pool-a9bdebdeb1f0bf37.yaml @ b'b1a0d62f357e431a6b74d38440a8392de972b824' - When using the RBD pool exclusively for Cinder we can now set `rbd_exclusive_cinder_pool` to `true` and Cinder will use DB information to calculate provisioned size instead of querying all volumes in the backend, which will reduce the load on the Ceph cluster and the volume service. .. releasenotes/notes/generic-group-quota-manage-support-559629ad07a406f4.yaml @ b'492cf46f63c829ec722c0b8fb06de678e85afc5e' - Generic group is added into quota management. .. releasenotes/notes/unity-remove-empty-host-17d567dbb6738e4e.yaml @ b'9773f963fea7e8a7033a047fda8967259ef4f99f' - Dell EMC Unity Driver: Adds support for removing empty host. The new option named `remove_empty_host` could be configured as `True` to notify Unity driver to remove the host after the last LUN is detached from it. .. releasenotes/notes/vnx-add-force-detach-support-26f215e6f70cc03b.yaml @ b'39f1f020f46eaf57ed106648047da4f318c37d5d' - Add support to force detach a volume from all hosts on VNX. .. _Ocata Series Release Notes_10.0.8-20_stable_ocata_Known Issues: Known Issues ------------ .. releasenotes/notes/feature-rbd-exclusive-pool-a9bdebdeb1f0bf37.yaml @ b'b1a0d62f357e431a6b74d38440a8392de972b824' - If RBD stats collection is taking too long in your environment maybe even leading to the service appearing as down you'll want to use the `rbd_exclusive_cinder_pool = true` configuration option if you are using the pool exclusively for Cinder and maybe even if you are not and can live with the innacuracy. .. _Ocata Series Release Notes_10.0.8-20_stable_ocata_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1775518-fix-unity-empty-list-issue-2d6b7c33aae1ffcc.yaml @ b'5be96ca9c58c52ce11db0d2f19a6ce527118556a' - Dell EMC Unity: Fixes bug 1775518 to make sure driver succeed to initialize even though the value of unity_io_ports and unity_storage_pool_names are empty .. releasenotes/notes/fail-detach-lun-when-auto-zone-enabled-9c87b18a3acac9d1.yaml @ b'e704e834024cf4ec156527b16f7437f9dba4d551' - Dell EMC Unity Driver: Fixes `bug 1759175 `__ to detach the lun correctly when auto zone was enabled and the lun was the last one attached to the host. .. releasenotes/notes/kaminario-cinder-driver-bug-44c728f026394a85.yaml @ b'633e2d5205e4718e270f260e24d606fa104ff9a3' - Kaminario FC and iSCSI drivers: Fixed `bug 1829398 `_ where force detach would fail. .. releasenotes/notes/unity-force-detach-7c89e72105f9de61.yaml @ b'9dbcb24f0313c1187bc6269e61421bef4c45b3c9' - Corrected support to force detach a volume from all hosts on Unity. .. releasenotes/notes/vnx-update-sg-in-cache-3ecb673727bea79b.yaml @ b'187016c2852533c1806c7fdb34aa8dc6dfcd528e' - Dell EMC VNX Driver: Fixes `bug 1817385 `__ to make sure the sg can be created again after it was destroyed under `destroy_empty_storage_group` setting to `True`. .. _Ocata Series Release Notes_10.0.8_stable_ocata: 10.0.8 ====== .. _Ocata Series Release Notes_10.0.8_stable_ocata_Security Issues: Security Issues --------------- .. releasenotes/notes/scaleio-zeropadding-a0273c56c4d14fca.yaml @ b'2dc52153215bb6a37532a959c5c98239be21bb56' - Removed the ability to create volumes in a ScaleIO Storage Pool that has zero-padding disabled. A new configuration option ``sio_allow_non_padded_volumes`` has been added to override this new behavior and allow unpadded volumes, but should not be enabled if multiple tenants will utilize volumes from a shared Storage Pool. .. _Ocata Series Release Notes_10.0.8_stable_ocata_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1690954-40fc21683977e996.yaml @ b'dbca62c207d63bbc192acf75ae39c6b56702295a' - NetApp ONTAP NFS (bug 1690954): Fix wrong usage of export path as volume name when deleting volumes and snapshots. .. releasenotes/notes/bug-1762424-f76af2f37fe408f1.yaml @ b'ee330b9a27cc49b5566a2df878a6da51e701f83c' - NetApp ONTAP (bug 1762424): Fix ONTAP NetApp driver not being able to extend a volume to a size greater than the corresponding LUN max geometry. .. releasenotes/notes/fix-abort-backup-df196e9dcb992586.yaml @ b'f3aa39f21505dddaab592a85648678d628f5616e' - We no longer leave orphaned chunks on the backup backend or leave a temporary volume/snapshot when aborting a backup. .. releasenotes/notes/netapp_fix_svm_scoped_permissions.yaml @ b'd67448fdad668a35c5c35a5a06d2ac2af5b26bcd' - NetApp cDOT block and file drivers have improved support for SVM scoped user accounts. Features not supported for SVM scoped users include QoS, aggregate usage reporting, and dedupe usage reporting. .. _Ocata Series Release Notes_10.0.7_stable_ocata: 10.0.7 ====== .. _Ocata Series Release Notes_10.0.7_stable_ocata_New Features: New Features ------------ .. releasenotes/notes/k2-disable-discovery-bca0d65b5672ec7b.yaml @ b'c3464ac5cb523fecb3c265e1f1ed26831507d126' - Kaminario K2 iSCSI driver now supports non discovery multipathing (Nova and Cinder won't use iSCSI sendtargets) which can be enabled by setting `disable_discovery` to `true` in the configuration. .. releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml @ b'69a79e38afbdc67f61568c0b82cf6d06ca304e56' - RBD driver supports returning a static total capacity value instead of a dynamic value like it's been doing. Configurable with `report_dynamic_total_capacity` configuration option. .. _Ocata Series Release Notes_10.0.7_stable_ocata_Known Issues: Known Issues ------------ .. releasenotes/notes/k2-non-unique-fqdns-b62a269a26fd53d5.yaml @ b'caceaa52a7070548b7b42df877e23bc4d3845def' - Kaminario K2 now supports networks with duplicated FQDNs via configuration option `unique_fqdn_network` so attaching in these networks will work (bug #1720147). .. _Ocata Series Release Notes_10.0.7_stable_ocata_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/bug-1714209-netapp-ontap-drivers-oversubscription-issue-c4655b9c4858d7c6.yaml @ b'83a849a78c93ad7a1a7f2c9c0cd3b7ae08a2ff32' - If using the NetApp ONTAP drivers (7mode/cmode), the configuration value for "max_over_subscription_ratio" may need to be increased to avoid scheduling problems where storage pools that previously were valid to schedule new volumes suddenly appear to be out of space to the Cinder scheduler. See documentation `here `_. .. releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml @ b'69a79e38afbdc67f61568c0b82cf6d06ca304e56' - RBD/Ceph backends should adjust `max_over_subscription_ratio` to take into account that the driver is no longer reporting volume's physical usage but it's provisioned size. .. _Ocata Series Release Notes_10.0.7_stable_ocata_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1632333-netapp-ontap-copyoffload-downloads-glance-image-twice-08801d8c7b9eed2c.yaml @ b'6d59c490c262d7634af5b2e03149c9f028f4d81c' - Fixed bug 1632333 with the NetApp ONTAP Driver. Now the copy offload method is invoked early to avoid downloading Glance images twice. .. releasenotes/notes/bug-1714209-netapp-ontap-drivers-oversubscription-issue-c4655b9c4858d7c6.yaml @ b'83a849a78c93ad7a1a7f2c9c0cd3b7ae08a2ff32' - The ONTAP drivers ("7mode" and "cmode") have been fixed to not report consumed space as "provisioned_capacity_gb". They instead rely on the cinder scheduler's calculation of "provisioned_capacity_gb". This fixes the oversubscription miscalculations with the ONTAP drivers. This bugfix affects all three protocols supported by these drivers (iSCSI/FC/NFS). .. releasenotes/notes/dell-emc-sc-bugfix-1756914-ffca3133273040f6.yaml @ b'd2bff999dbf9fdf48e4b1f4c402217ceb97bf6a0' - Dell EMC SC driver correctly returns initialize_connection data when more than one IQN is attached to a volume. This fixes some random Nova Live Migration failures where the connection information being returned was for an IQN other than the one for which it was being requested. .. releasenotes/notes/netapp-ontap-use_exact_size-d03c90efbb8a30ac.yaml @ b'013d6183c434ec05a11115e603f02cf6e57a85b1' - Fixed bug #1731474 on NetApp Data ONTAP driver that was causing LUNs to be created with larger size than requested. This fix requires version 9.1 of ONTAP or later. .. releasenotes/notes/ps-duplicate-ACL-5aa447c50f2474e7.yaml @ b'c96512aab0ee00201e26b0efa9c87c7f62fd463e' - Dell EMC PS Series Driver code was creating duplicate ACL records during live migration. Fixes the initialize_connection code to not create access record for a host if one exists previously. This change fixes bug 1726591. .. releasenotes/notes/ps-extend_volume-no-snap-8aa447c50f2475a7.yaml @ b'09b1b1351901b5cf042b2a59624751451707c87a' - Dell EMC PS Series Driver was creating unmanaged snapshots when extending volumes. Fixed it by adding the missing no-snap parameter. This change fixes bug 1720454. .. releasenotes/notes/ps-optimize-parsing-8aa447c50f2474c7.yaml @ b'047c3f87b590ea2d627692d05347fcb49c060bab' - Dell EMC PS Series Driver code reporting volume stats is now optimized to return the information earlier and accelerate the process. This change fixes bug 1661154. .. releasenotes/notes/ps-over-subscription-ratio-cal-8aa447c50f2474a8.yaml @ b'e205ab8dc7ac73d182958e60b6b9a9cb7b54601d' - Dell EMC PS Driver stats report has been fixed, now reports the `provisioned_capacity_gb` properly. Fixes bug 1719659. .. releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml @ b'69a79e38afbdc67f61568c0b82cf6d06ca304e56' - RBD stats report has been fixed, now properly reports `allocated_capacity_gb` and `provisioned_capacity_gb` with the sum of the sizes of the volumes (not physical sizes) for volumes created by Cinder and all available in the pool respectively. Free capacity will now properly handle quota size restrictions of the pool. .. _Ocata Series Release Notes_10.0.5_stable_ocata: 10.0.5 ====== .. _Ocata Series Release Notes_10.0.5_stable_ocata_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1705375-prohibit-group-deletion-if-groupsnapshot-exists.yaml @ b'42aa97ba3ef8f31e3188d0676aabc769121a2368' - Prohibit the deletion of group if group snapshot exists. .. _Ocata Series Release Notes_10.0.4_stable_ocata: 10.0.4 ====== .. _Ocata Series Release Notes_10.0.4_stable_ocata_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/support-tenants-project-in-attachment-list-3edd8g138a28s4r8.yaml @ b'c09f4ffbb967d87254b9a364b5f49557348f960c' - Add ``all_tenants``, ``project_id`` support in attachment list&detail APIs. .. _Ocata Series Release Notes_10.0.3_stable_ocata: 10.0.3 ====== .. _Ocata Series Release Notes_10.0.3_stable_ocata_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/nfs_backup_no_overwrite-be7b545453baf7a3.yaml @ b'640b9dc2b739b04f1f7d70c2172f5b5fbc3b9b28' - Fix NFS backup driver, we now support multiple backups on the same container, they are no longer overwritten. .. _Ocata Series Release Notes_10.0.1_stable_ocata: 10.0.1 ====== .. _Ocata Series Release Notes_10.0.1_stable_ocata_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1671220-4d521be71d0b8aa4.yaml @ b'25a1805b198080425e9244d7dcc79e81dd9d024f' - Fixed consistency groups API which was always returning groups scoped to project ID from user context instead of given input project ID. .. _Ocata Series Release Notes_10.0.0_stable_ocata: 10.0.0 ====== .. _Ocata Series Release Notes_10.0.0_stable_ocata_New Features: New Features ------------ .. releasenotes/notes/Dell-SC-New-Extra-Specs-1de0d3f1ebc62881.yaml @ b'c5368a739456a2864b731ed40de9d48190dd1765' - Dell SC - Compression and Dedupe support added for Storage Centers that support the options. .. releasenotes/notes/Dell-SC-New-Extra-Specs-1de0d3f1ebc62881.yaml @ b'c5368a739456a2864b731ed40de9d48190dd1765' - Dell SC - Volume and Group QOS support added for Storage Centers that support and have enabled the option. .. releasenotes/notes/add-backup-project-attribute-3f57051ef9159b08.yaml @ b'3f7acda20fb1e9e2623c86e560c4a5ab25b475e4' - Added ability to query backups by project ID. .. releasenotes/notes/add-io-ports-option-c751d1bd395dd614.yaml @ b'8e4e0c86c32d802389e4718e5610fb06765e4308' - Add support to configure IO ports option in Dell EMC Unity driver. .. releasenotes/notes/add-reset-group-snapshot-status-sd21a31cde5fa035.yaml @ b'304ff4c23db878262a553d0d15771c0beb970b42' - Added reset status API to group snapshot. .. releasenotes/notes/add-reset-group-status-sd21a31cde5fa034.yaml @ b'15c555445bb61ec856ce3b0b9d3fb90df00d349f' - Added reset status API to generic volume group. .. releasenotes/notes/add-vmax-replication-490202c15503ae03.yaml @ b'67a2178eb490e35320138bd25da650eddc9cd79a' - Add v2.1 volume replication support in VMAX driver. .. releasenotes/notes/bp-open-src-ibm-storage-driver-d17808e52aa4eacb.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The IBM_Storage driver has been open sourced. This means that there is no more need to download the package from the IBM site. The only requirement remaining is to install pyxcli, which is available through pypi:: ``sudo pip install pyxcli`` .. releasenotes/notes/capacity-headroom-4b07701f1df9e5c4.yaml @ b'b67a416bb94bc0f2e64fc896e1c04581956f777d' - Cinder is now collecting capacity data, including virtual free capacity etc from the backends. A notification which includes that data is periodically emitted. .. releasenotes/notes/consistency_group_manage-d30a2ad8917a7a86.yaml @ b'67520e5eb2de79cbb270c7703e715a73c187ec09' - Added update-host command for consistency groups in cinder-manage. .. releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml @ b'9a8dc08346964a58023992eb1d7b00cb0e4e7679' - Added Datera EDF API 2.1 support. .. releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml @ b'9a8dc08346964a58023992eb1d7b00cb0e4e7679' - Added Datera Multi-Tenancy Support. .. releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml @ b'9a8dc08346964a58023992eb1d7b00cb0e4e7679' - Added Datera Template Support. .. releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml @ b'9a8dc08346964a58023992eb1d7b00cb0e4e7679' - Broke Datera driver up into modules. .. releasenotes/notes/delete_parameters-6f44fece22a7787d.yaml @ b'4d454f6eb1d3948e9c33563dce2f12b69a1b7392' - The ``force`` boolean parameter has been added to the volume delete API. It may be used in combination with ``cascade``. This also means that volume force delete is available in the base volume API rather than only in the ``volume_admin_actions`` extension. .. releasenotes/notes/dell-emc-unity-driver-72cb901467b23b22.yaml @ b'5a8f26eb62ac7130dec476db8661b96ed9c96715' - Added backend driver for Dell EMC Unity storage. .. releasenotes/notes/generic-groups-in-vnx-cbbe1346e889b5c2.yaml @ b'6359dcecd54b54b18edbffbd55e91809383cea6a' - Add consistent group capability to generic volume groups in VNX driver. .. releasenotes/notes/hitachi-vsp-driver-87659bb496bb459b.yaml @ b'5c815388e2d8d4b62f7a66dd14d07ce961143435' - Added new Hitachi VSP FC Driver. The VSP driver supports all Hitachi VSP Family and HUSVM. .. releasenotes/notes/hitachi-vsp-iscsi-driver-cac31d7c54d7718d.yaml @ b'3f6a9739b351980f938b4f4346586ba1012f2ce0' - Adds new Hitachi VSP iSCSI Driver. .. releasenotes/notes/hitachi-vsp-ports-option-7147289e6529d7fe.yaml @ b'b9b352fe6199569351f5e53e603480b2f8d6927f' - Hitachi VSP drivers have a new config option ``vsp_compute_target_ports`` to specify IDs of the storage ports used to attach volumes to compute nodes. The default is the value specified for the existing ``vsp_target_ports`` option. Either or both of ``vsp_compute_target_ports`` and ``vsp_target_ports`` must be specified. .. releasenotes/notes/hitachi-vsp-ports-option-7147289e6529d7fe.yaml @ b'b9b352fe6199569351f5e53e603480b2f8d6927f' - Hitachi VSP drivers have a new config option ``vsp_horcm_pair_target_ports`` to specify IDs of the storage ports used to copy volumes by Shadow Image or Thin Image. The default is the value specified for the existing ``vsp_target_ports`` option. Either or both of ``vsp_horcm_pair_target_ports`` and ``vsp_target_ports`` must be specified. .. releasenotes/notes/hnas-list-manageable-9329866618fa9a9c.yaml @ b'fb87dc52bf2e29f038d669c657e34f928352f51d' - Added the ability to list manageable volumes and snapshots to HNAS NFS driver. .. releasenotes/notes/huawei-backend-capabilities-report-optimization-d1c18d9f62ef71aa.yaml @ b'c557f3bd912db78e9c6fe7786315afc939733c81' - Optimize backend reporting capabilities for Huawei drivers. .. releasenotes/notes/improvement-to-get-group-detail-0e8b68114e79a8a2.yaml @ b'5e0393b26d01250a296866408ddd3b1620a5396c' - Added support for querying group details with volume ids which are in this group. For example, "groups/{group_id}?list_volume=True". .. releasenotes/notes/infinidat-add-infinibox-driver-67cc33fc3fbff1bb.yaml @ b'8020d32b078f7c4e2f179413abdb96777509343a' - Added driver for the InfiniBox storage array. .. releasenotes/notes/nec_storage_volume_driver-57663f9ecce1ae19.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - Added backend FC and iSCSI drivers for NEC Storage. .. releasenotes/notes/netapp_cdot_report_shared_blocks_exhaustion-073a73e05daf09d4.yaml @ b'685e4c98eef0fbf73d1408d50383cfdaca583dcb' - The NetApp cDOT drivers report to the scheduler, for each FlexVol pool, the fraction of the shared block limit that has been consumed by dedupe and cloning operations. This value, netapp_dedupe_used_percent, may be used in the filter & goodness functions for better placement of new Cinder volumes. .. releasenotes/notes/nexenta-ns5-5d223f3b60f58aad.yaml @ b'e0a6071b594b3ef9194ac569addc0e42ebccb105' - Added extend method to NFS driver for NexentaStor 5. .. releasenotes/notes/nexentastor5-https-6d58004838cfab30.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - Added secure HTTP support for REST API calls in the NexentaStor5 driver. Use of HTTPS is set True by default with option ``nexenta_use_https``. .. releasenotes/notes/nfs-snapshots-21b641300341cba1.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - Added support for snapshots in the NFS driver. This functionality is only enabled if ``nfs_snapshot_support`` is set to ``True`` in cinder.conf. Cloning volumes is only supported if the source volume is not attached. .. releasenotes/notes/nimble-add-fc-support-0007fdbd647be947.yaml @ b'c3d1dd1048bff4976bc771539d1f44cc423b7adf' - Added Nimble Storage Fibre Channel backend driver. .. releasenotes/notes/nimble-qos-specs-8cd006777c66a64e.yaml @ b'd7931d7fc58166ef02b5936f5b1a1f1bd8bee151' - Add Support for QoS in the Nimble Storage driver. QoS is available from Nimble OS release 4.x and above. .. releasenotes/notes/nimble-qos-specs-8cd006777c66a64e.yaml @ b'd7931d7fc58166ef02b5936f5b1a1f1bd8bee151' - Add Support for deduplication of volumes in the Nimble Storage driver. .. releasenotes/notes/nimble-rest-api-support-75c2324ee462d026.yaml @ b'c9eada86f29406a72c9d5d5a8b123c65ae69a4b9' - The Nimble backend driver has been updated to use REST for array communication. .. releasenotes/notes/pure-generic-volume-groups-2b0941103f7c01cb.yaml @ b'abf53e0815014b1ffc9d8d03bec1570faef18327' - Add consistent group capability to generic volume groups in Pure drivers. .. releasenotes/notes/rbd-thin-provisioning-c98522d6fe7b71ff.yaml @ b'd4fd5660736a1363a4e78480b116532c71b5ce49' - Allow the RBD driver to work with max_over_subscription_ratio. .. releasenotes/notes/rbd-v2.1-replication-64a9d0bec5987faf.yaml @ b'f81d8a37debce61b9f5414257419af87bfce536d' - Added v2.1 replication support to RBD driver. .. releasenotes/notes/reduxio-iscsci-driver-5827c32a0c498949.yaml @ b'5bb68e49d3323c7a73166aef147c248c69503e9a' - Added backend ISCSI driver for Reduxio. .. releasenotes/notes/show-provider-id-for-admin-ff4fd5a2518a4bfa.yaml @ b'ef0d793e9b9d99ebf4eb54766a1d5a915f54c2e8' - Add provider_id in the detailed view of a volume for admin. .. releasenotes/notes/slug-qnap-driver-d4465ea6009c66df.yaml @ b'f6342d029ece2872c47857da68c20e141b17f464' - Added volume driver for QNAP ES Storage Driver. .. releasenotes/notes/solidfire-scaled-qos-9b8632453909e2db.yaml @ b'409391d6a607f5905d48e4885a174d1da9f6456b' - The SolidFire driver will recognize 4 new QoS spec keys to allow an administrator to specify QoS settings which are scaled by the size of the volume. 'ScaledIOPS' is a flag which will tell the driver to look for 'scaleMin', 'scaleMax' and 'scaleBurst' which provide the scaling factor from the minimum values specified by the previous QoS keys ('minIOPS', 'maxIOPS', 'burstIOPS'). The administrator must take care to assure that no matter what the final calculated QoS values follow minIOPS <= maxIOPS <= burstIOPS. A exception will be thrown if not. The QoS settings are also checked against the cluster min and max allowed and truncated at the min or max if they exceed. .. releasenotes/notes/storwize_iscsi_multipath_enhance-9de9dc29661869cd.yaml @ b'ad59cb5ac2fdc93cdcc93ac582c948a2a820a124' - Add multipath enhancement to Storwize iSCSI driver. .. releasenotes/notes/support-metadata-based-snapshot-list-filtering-6e6df68a7ce981f5.yaml @ b'f5cdbe8f74e64ce912e875141c2e092655988344' - Added support to querying snapshots filtered by metadata key/value using 'metadata' optional URL parameter. For example, "/v3/snapshots?metadata=={'key1':'value1'}". .. releasenotes/notes/support-zmq-messaging-41085787156fbda1.yaml @ b'df647d0ccd56b7a10b003b8c7372ed3b5b717cc1' - Added support for ZMQ messaging layer in multibackend configuration. .. releasenotes/notes/unity-backup-via-snapshot-81a2d5a118c97042.yaml @ b'17171f6b15a422629b12357fe9274abe4cca7f2e' - Add support to backup volume using snapshot in the Unity driver. .. releasenotes/notes/vmax-attach-snapshot-3137e59ab4ff39a4.yaml @ b'efd07037ea9c47c3906319df0d4083d7f41a3002' - Enable backup snapshot optimal path by implementing attach and detach snapshot in the VMAX driver. .. releasenotes/notes/vmax-clone-cg-09fce492931c957f.yaml @ b'4e3eb04b569a85e81301239cb4a354b34c47ecda' - Added the ability to create a CG from a source CG with the VMAX driver. .. releasenotes/notes/vmax-compression-support-1dfe463328b56d7f.yaml @ b'a9a3eddaf26d4705c06f25152ccb0a6a427eaf7b' - Support for compression on VMAX All Flash in the VMAX driver. .. releasenotes/notes/vmax-volume-migration-992c8c68e2207bbc.yaml @ b'6624c3197bfce7092a1b16ae74f1c3c9532d0a04' - Storage assisted volume migration from one Pool/SLO/Workload combination to another, on the same array, via retype, for the VMAX driver. Both All Flash and Hybrid VMAX3 arrays are supported. VMAX2 is not supported. .. releasenotes/notes/vnx-async-migration-support-3c449139bb264004.yaml @ b'6ccfcafd45c445bc48593a310a3649e18c8b8a51' - VNX cinder driver now supports async migration during volume cloning. By default, the cloned volume will be available after the migration starts in the VNX instead of waiting for the completion of migration. This greatly accelerates the cloning process. If user wants to disable this, he could add ``--metadata async_migrate=False`` when creating volume from source volume/snapshot. .. releasenotes/notes/xtremio-generic-groups-912e11525573e970.yaml @ b'ca77a489dc5a172b89f0609e2901e399daea925b' - Add consistent group capability to generic volume groups in the XtremIO driver. .. _Ocata Series Release Notes_10.0.0_stable_ocata_Known Issues: Known Issues ------------ .. releasenotes/notes/Dell-SC-Retype-Limitations-74f4b5f6a94ffe4f.yaml @ b'c514b25b0783769d5349b0a34880545c54e1ca4c' - With the Dell SC Cinder Driver if a volume is retyped to a new storage profile all volumes created via snapshots from this volume will also change to the new storage profile. .. releasenotes/notes/Dell-SC-Retype-Limitations-74f4b5f6a94ffe4f.yaml @ b'c514b25b0783769d5349b0a34880545c54e1ca4c' - With the Dell SC Cinder Driver retyping from one replication type to another type (ex. regular replication to live volume replication) is not supported. .. releasenotes/notes/Dell-SC-thaw_backend-b9362d381fabd4c9.yaml @ b'3f5a7e1bc84f0c202487955d8587ba041dcc1450' - Dell SC Cinder driver has limited support in a failed over state so thaw_backend has been implemented to reject the thaw call when in such a state. .. _Ocata Series Release Notes_10.0.0_stable_ocata_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/add-suppress-lvm-fd-warnings-option.402bebc03b0a9f00.yaml @ b'055ec1ce73ca55c463481c349b42dee66e5e86d6' - In certain environments (Kubernetes for example) indirect calls to the LVM commands result in file descriptor leak warning messages which in turn cause the process_execution method to raise and exception. To accommodate these environments, and to maintain backward compatibility in Newton we add a ``lvm_suppress_fd_warnings`` bool config to the LVM driver. Setting this to True will append the LVM env vars to include the variable ``LVM_SUPPRESS_FD_WARNINGS=1``. This is made an optional configuration because it only applies to very specific environments. If we were to make this global that would require a rootwrap/privsep update that could break compatibility when trying to do rolling upgrades of the volume service. .. releasenotes/notes/bp-open-src-ibm-storage-driver-d17808e52aa4eacb.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - Previous installations of IBM Storage must be un-installed first and the new driver should be installed on top. In addition the cinder.conf values should be updated to reflect the new paths. For example the proxy setting of ``storage.proxy.IBMStorageProxy`` should be updated to ``cinder.volume.drivers.ibm.ibm_storage.proxy.IBMStorageProxy``. .. releasenotes/notes/cinder-api-middleware-remove-deprecated-option-98912ab7e8b472e8.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - Removed deprecated option ``osapi_max_request_body_size``. .. releasenotes/notes/cinder-manage-db-online-schema-migrations-d1c0d40f26d0f033.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - To get rid of long running DB data migrations that must be run offline, Cinder will now be able to execute them online, on a live cloud. Before upgrading from Ocata to Pike, operator needs to perform all the Newton data migrations. To achieve that he needs to perform ``cinder-manage db online_data_migrations`` until there are no records to be updated. To limit DB performance impact migrations can be performed in chunks limited by ``--max_number`` option. If your intent is to upgrade Cinder in a non-live manner, you can use ``--ignore_state`` option safely. Please note that finishing all the Newton data migrations will be enforced by the first schema migration in Pike, so you won't be able to upgrade to Pike without that. .. releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml @ b'9a8dc08346964a58023992eb1d7b00cb0e4e7679' - Datera driver location has changed from cinder.volume.drivers .datera.DateraDriver to cinder.volume.drivers.datera.datera_iscsi .DateraDriver. .. releasenotes/notes/db-schema-from-liberty-f5fa57d67441dece.yaml @ b'38f2ad54b343152f0edba817b191d456d4303d17' - The Cinder database can now only be upgraded from changes since the Liberty release. In order to upgrade from a version prior to that, you must now upgrade to at least Liberty first, then to Ocata or later. .. releasenotes/notes/default-apiv1-disabled-9f6bb0c67b38e670.yaml @ b'7fcca079ff63f1a1b6d4d3067508883f01515add' - The v1 API was deprecated in the Juno release and is now defaulted to disabled. In order to still use the v1 API, you must now set ``enable_v1_api`` to ``True`` in your cinder.conf file. .. releasenotes/notes/delete_parameters-6f44fece22a7787d.yaml @ b'4d454f6eb1d3948e9c33563dce2f12b69a1b7392' - There is a new policy option ``volume:force_delete`` which controls access to the ability to specify force delete via the volume delete API. This is separate from the pre-existing ``volume-admin-actions:force_delete`` policy check. .. releasenotes/notes/hnas-deprecate-iscsi-driver-cd521b3a2ba948f3.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The Hitachi NAS iSCSI driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. .. releasenotes/notes/kaminario-cinder-driver-remove-deprecate-option-831920f4d25e2979.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - Removed deprecated option ``kaminario_nodedup_substring`` in Kaminario FC and iSCSI Cinder drivers. .. releasenotes/notes/mark-cloudbyte-unsupported-8615a127439ed262.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The CloudByte driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. .. releasenotes/notes/mark-dothill-unsupported-7f95115b7b24e53c.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The DotHill drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. .. releasenotes/notes/mark-hpe-xp-unsupported-c9ce6cfbab622e46.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The HPE XP driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. .. releasenotes/notes/mark-nexentaedge-unsupported-56d184fdccc6eaac.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The Nexenta Edge drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. .. releasenotes/notes/migrate-cg-to-generic-volume-groups-f82ad3658f3e567c.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - Operator needs to perform ``cinder-manage db online_data_migrations`` to migrate existing consistency groups to generic volume groups. .. releasenotes/notes/move-eqlx-driver-to-dell-emc-fe5d2b484c47b7a6.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The EqualLogic driver is moved to the dell_emc directory and has been rebranded to its current Dell EMC PS Series name. The volume_driver entry in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver``. .. releasenotes/notes/move-scaleio-driver-to-dell-emc-dir-c195374ca6b7e98d.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The ScaleIO driver is moved to the dell_emc directory. volume_driver entry in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver``. .. releasenotes/notes/move-xtremio-driver-to-dell-emc-dir-f7e07a502cafd78f.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The XtremIO driver is moved to the dell_emc directory. volume_driver entry in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver`` or ``cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver``. .. releasenotes/notes/new-osprofiler-call-0bb1a305c8e8f9cc.yaml @ b'd48e9670270305beeaba57ffcfd61b14792d8097' - New config option added. ``"connection_string"`` in [profiler] section is used to specify OSProfiler driver connection string, for example, ``"connection_string = messaging://"``, ``"connection_string = mongodb://localhost:27017"`` .. releasenotes/notes/operate-migrated-groups-with-cp-apis-e5835c6673191805.yaml @ b'44ebdd22526e9a4ae0646d9f9ae2b391e70bed57' - After running the migration script to migrate CGs to generic volume groups, CG and group APIs work as follows. * Create CG only creates in the groups table. * Modify CG modifies in the CG table if the CG is in the CG table, otherwise it modifies in the groups table. * Delete CG deletes from the CG or the groups table depending on where the CG is. * List CG checks both CG and groups tables. * List CG Snapshots checks both the CG and the groups tables. * Show CG checks both tables. * Show CG Snapshot checks both tables. * Create CG Snapshot creates either in the CG or the groups table depending on where the CG is. * Create CG from Source creates in either the CG or the groups table depending on the source. * Create Volume adds the volume either to the CG or the group. * default_cgsnapshot_type is reserved for migrating CGs. * Group APIs will only write/read in/from the groups table. * Group APIs will not work on groups with default_cgsnapshot_type. * Groups with default_cgsnapshot_type can only be operated by CG APIs. * After CG tables are removed, we will allow default_cgsnapshot_type to be used by group APIs. .. releasenotes/notes/rebranded-vnx-driver-2fb7424ddc9c41df.yaml @ b'c479e94901baaba9a9d4991efef4fd9a16124030' - EMC VNX driver have been rebranded to Dell EMC VNX driver. Existing configurations will continue to work with the legacy name, but will need to be updated by the next release. User needs update ``volume_driver`` to ``cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver``. .. releasenotes/notes/remove-deprecated-driver-mappings-b927d8ef9fc3b713.yaml @ b'6ac5d02419277982bd12b0954b7feddb0a3f5f82' - Old driver paths have been removed since they have been through our alloted deprecation period. Make sure if you have any of these paths being set in your cinder.conf for the volume_driver option, to update to the new driver path listed here. * Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver * Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver * Old path - cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver * Old path - cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver * New path - cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver * Old path - cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver * New path - cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver * Old path - cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver * New path - cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver * Old path - cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver * New path - cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver * Old path - cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver * New path - cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver .. releasenotes/notes/remove-eqlx-deprecated-options-89ba02c41d4da62a.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - Removing the Dell EqualLogic driver's deprecated configuration options. Please replace old options in your cinder.conf with the new one. * Removed - ``eqlx_cli_timeout`` * Replaced with - ``ssh_conn_timeout`` * Removed - ``eqlx_use_chap`` * Replaced with - ``use_chap_auth`` * Removed - ``eqlx_chap_login`` * Replaced with - ``chap_username`` * Removed - ``eqlx_chap_password`` * Replaced with - ``chap_password`` .. releasenotes/notes/remove-scality-fa209aae9748a1f3.yaml @ b'a931f9db79554630d8d71fcff1334bb4e37cb398' - The Scality backend volume driver was marked as not supported in the previous release and has now been removed. .. releasenotes/notes/remove-single-backend-7bf02e525bbbdd3a.yaml @ b'e8e3ae7616878cc46303ceee40164b9b38a3975c' - Configurations that are setting backend config in ``[DEFAULT]`` section are now not supported. You should use ``enabled_backends`` option to set up backends. .. releasenotes/notes/remove-volume-clear-shred-bde9f7f9ff430feb.yaml @ b'f90c49e0062c4e9287b44e11040fa16a26013a58' - The volume_clear option to use `shred` was deprecated in the Newton release and has now been removed. Since deprecation, this option has performed the same action as the `zero` option. Config settings for `shred` should be updated to be set to `zero` for continued operation. .. releasenotes/notes/remove_glusterfs_volume_driver-d8fd2cf5f38e754b.yaml @ b'16e93ccd4f3a6d62ed9d277f03b64bccc63ae060' - The GlusterFS volume driver, which was deprecated in the Newton release, has been removed. .. releasenotes/notes/remove_volume_tmp_dir_option-c83c5341e5a42378.yaml @ b'e73995308fccc9ae1f8d956d3ceeecca76fec14f' - The RBD driver no longer uses the "volume_tmp_dir" option to set where temporary files for image conversion are stored. Set "image_conversion_dir" to configure this in Ocata. .. releasenotes/notes/removing-cinder-all-9f5c3d1eb230f9e6.yaml @ b'dafc68aa569a607a37c8f31d3230ea5a5efda93f' - Removing cinder-all binary. Instead use the individual binaries like cinder-api, cinder-backup, cinder-volume, cinder-scheduler. .. releasenotes/notes/vmax-rename-dell-emc-f9ebfb9eb567f427.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The VMAX driver is moved to the dell_emc directory. volume_driver entry in cinder.conf needs to be changed to ``cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver`` or ``cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver``. .. releasenotes/notes/vmdk_config_conn_pool_size-0658c497e118533f.yaml @ b'2fce7a3d0ca264e012d0fb5cf128a74dd9a07fb0' - Added config option ``vmware_connection_pool_size`` in the VMware VMDK driver to specify the maximum number of connections (to vCenter) in the http connection pool. .. releasenotes/notes/vnx-repv2.1-config-update-cc2f60c20aec88dd.yaml @ b'8f845056fd49e6ca503e4a08baea4185ad32a4b6' - In VNX Cinder driver, ``replication_device`` keys, ``backend_id`` and ``san_ip`` are mandatory now. If you prefer security file authentication, please append ``storage_vnx_security_file_dir`` in ``replication_device``, otherwise, append ``san_login``, ``san_password``, ``storage_vnx_authentication_type`` in ``replication_device``. .. _Ocata Series Release Notes_10.0.0_stable_ocata_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml @ b'9a8dc08346964a58023992eb1d7b00cb0e4e7679' - Deprecated datera_api_version option. .. releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml @ b'9a8dc08346964a58023992eb1d7b00cb0e4e7679' - Removed datera_acl_allow_all option. .. releasenotes/notes/datera-2.3-driver-update-12d0221fd4bb9fb0.yaml @ b'9a8dc08346964a58023992eb1d7b00cb0e4e7679' - Removed datera_num_replicas option. .. releasenotes/notes/deprecate-block-device-driver-d30232547a31fe1e.yaml @ b'65fe16ea85c2b989c61deefba51e2172822cc7a0' - The block_driver is deprecated as of the Ocata release and will be removed in the Queens release of Cinder. Instead the LVM driver with the LIO iSCSI target should be used. For those that desire higher performance, they should use LVM striping. .. releasenotes/notes/deprecate-cinder-linux-smb-driver-4aec58f15a963c54.yaml @ b'd623546c9372086ed65b32462cbec596d6b7bcd6' - The Cinder Linux SMBFS driver is now deprecated and will be removed during the following release. Deployers are encouraged to use the Windows SMBFS driver instead. .. releasenotes/notes/hbsd-driver-deletion-d81f7c4513f45d7b.yaml @ b'ba1e0d7940f2268c72b017f6f6ea19c784c617c0' - The HBSD (Hitachi Block Storage Driver) volume drivers which supports Hitachi Storages HUS100 and VSP family are deprecated. Support for HUS110 family will be no longer provided. Support on VSP will be provided as hitachi.vsp_* drivers. .. releasenotes/notes/hnas-change-snapshot-names-8153b043eb7e99fc.yaml @ b'8f82bb7966986f8857913ed2366fcf42c134e027' - Support for snapshots named in the backend as ``snapshot-`` is deprecated. Snapshots are now named in the backend as ``.``. .. releasenotes/notes/hnas-deprecate-iscsi-driver-cd521b3a2ba948f3.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The Hitachi NAS iSCSI driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. The driver will be removed in the next release. .. releasenotes/notes/hnas-deprecated-svc-volume-type-77768f27946aadf4.yaml @ b'19ad533a6d403913172142bc83d31adb10d752a8' - Deprecated the configuration option ``hnas_svcX_volume_type``. Use option ``hnas_svcX_pool_name`` to indicate the name of the services (pools). .. releasenotes/notes/mark-cloudbyte-unsupported-8615a127439ed262.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The CloudByte driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. .. releasenotes/notes/mark-dothill-unsupported-7f95115b7b24e53c.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The DotHill drivers has been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. .. releasenotes/notes/mark-hpe-xp-unsupported-c9ce6cfbab622e46.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The HPE XP driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. .. releasenotes/notes/mark-nexentaedge-unsupported-56d184fdccc6eaac.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - The Nexenta Edge drivers has been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. .. releasenotes/notes/netapp-data-ontap-deprecate-7mode-drivers-a39bfcb3afefc9a5.yaml @ b'd6bd4c87407854c2093fb61e6963777028609f4f' - The 7-Mode Data ONTAP configuration of the NetApp Unified driver is deprecated as of the Ocata release and will be removed in the Queens release. Other configurations of the NetApp Unified driver, including Clustered Data ONTAP and E-series, are unaffected. .. releasenotes/notes/refactor-disco-volume-driver-3ff0145707ec0f3e.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - Marked the ITRI DISCO driver option ``disco_wsdl_path`` as deprecated. The new preferred protocol for array communication is REST and SOAP support will be removed. .. _Ocata Series Release Notes_10.0.0_stable_ocata_Security Issues: Security Issues --------------- .. releasenotes/notes/apply-limits-to-qemu-img-29f722a1bf4b91f8.yaml @ b'78f17f0ad79380ee3d9c50f2670252bcc559b62b' - The qemu-img tool now has resource limits applied which prevent it from using more than 1GB of address space or more than 2 seconds of CPU time. This provides protection against denial of service attacks from maliciously crafted or corrupted disk images. .. _Ocata Series Release Notes_10.0.0_stable_ocata_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/Dell-SC-Retype-Limitations-74f4b5f6a94ffe4f.yaml @ b'c514b25b0783769d5349b0a34880545c54e1ca4c' - With the Dell SC Cinder Driver retyping to or from a replicated type should now work. .. releasenotes/notes/Dell-SC-Retype-Limitations-74f4b5f6a94ffe4f.yaml @ b'c514b25b0783769d5349b0a34880545c54e1ca4c' - With the Dell SC Cinder Driver retype failed to return a tuple if it had to return an update to the volume state. .. releasenotes/notes/bug-1622057-netapp-cdot-fix-replication-status-cheesecake-volumes-804dc8b0b1380e6b.yaml @ b'df284e68f9f00282b05eb523e4ee3d5f63b8a750' - The NetApp cDOT driver now sets the ``replication_status`` attribute appropriately on volumes created within replicated backends when using host level replication. .. releasenotes/notes/bug-1634203-netapp-cdot-fix-clone-from-nfs-image-cache-2218fb402783bc20.yaml @ b'beed5c7789d4d05137a1f8dce87c56a7c3500cdf' - Fixed an issue where the NetApp cDOT NFS driver failed to clone new volumes from the image cache. .. releasenotes/notes/fix-extend-volume-in-thin-pools-57a3d53be4d47704.yaml @ b'31dba529117eab92f7f8bdcd4f417430754fb9cc' - Fixed volume extend issue that allowed a tenant with enough quota to extend the volume to limits greater than what the volume backend supported. .. releasenotes/notes/fix-hnas-clone-with-different-volume-type-b969897cba2610cc.yaml @ b'd1f23f3634f032ee0eae26eee2c3057f309c674a' - Fixed HNAS bug that placed a cloned volume in the same pool as its source, even if the clone had a different pool specification. Driver will not allow to make clones using a different volume type anymore. .. releasenotes/notes/kaminario-cinder-driver-bug-1646692-7aad3b7496689aa7.yaml @ b'c5630ce51dd7b3902bbf204707a3ae6674884109' - Fixed Non-WAN port filter issue in Kaminario iSCSI driver. .. releasenotes/notes/kaminario-cinder-driver-bug-1646766-fe810f5801d24f2f.yaml @ b'6d7125bdbce1c665c9c5e37e1f9928281279d475' - Fixed issue of managing a VG with more than one volume in Kaminario FC and iSCSI Cinder drivers. .. releasenotes/notes/solidfire-scaled-qos-9b8632453909e2db.yaml @ b'409391d6a607f5905d48e4885a174d1da9f6456b' - For SolidFire, QoS specs are now checked to make sure they fall within the min and max constraints. If not the QoS specs are capped at the min or max (i.e. if spec says 50 and minimum supported is 100, the driver will set it to 100). .. _Ocata Series Release Notes_10.0.0_stable_ocata_Other Notes: Other Notes ----------- .. releasenotes/notes/fix-extend-volume-in-thin-pools-57a3d53be4d47704.yaml @ b'31dba529117eab92f7f8bdcd4f417430754fb9cc' - Now extend won't work on disabled services because it's going through the scheduler, unlike how it worked before. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/pike.rst0000664000175000017500000016272500000000000020557 0ustar00zuulzuul00000000000000========================= Pike Series Release Notes ========================= .. _Pike Series Release Notes_11.2.2-15_stable_pike: 11.2.2-15 ========= .. _Pike Series Release Notes_11.2.2-15_stable_pike_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/kaminario-cinder-driver-bug-44c728f026394a85.yaml @ b'7dcd50a0bfc533221b52a2d5611ab4cc986311c9' - Kaminario FC and iSCSI drivers: Fixed `bug 1829398 `_ where force detach would fail. .. releasenotes/notes/netapp-non-discovery-19af4e10f7b190ea.yaml @ b'd440a94baf360bb9b4b1dc0c83ee4559a8d80d13' - NetApp iSCSI drivers no longer use the discovery mechanism for multipathing and they always return all target/portals when attaching a volume. Thanks to this, volumes will be successfully attached even if the target/portal selected as primary is down, this will be the case for both, multipath and single path connections. .. _Pike Series Release Notes_11.2.2_stable_pike: 11.2.2 ====== .. _Pike Series Release Notes_11.2.2_stable_pike_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/vnx-update-sg-in-cache-3ecb673727bea79b.yaml @ b'dcfc2f3d5ac69e0fd0c6ecbdd6ce26ed1cecd96c' - Dell EMC VNX Driver: Fixes `bug 1817385 `__ to make sure the sg can be created again after it was destroyed under `destroy_empty_storage_group` setting to `True`. .. _Pike Series Release Notes_11.2.1_stable_pike: 11.2.1 ====== .. _Pike Series Release Notes_11.2.1_stable_pike_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix-cross-az-migration-ce97eff61280e1c7.yaml @ b'056281d1079deca7a1e7d5343eb0a7cdd691a859' - Resolve issue with cross AZ migrations and retypes where the destination volume kept the source volume's AZ, so we ended up with a volume where the AZ does not match the backend. (bug 1747949) .. _Pike Series Release Notes_11.2.0_stable_pike: 11.2.0 ====== .. _Pike Series Release Notes_11.2.0_stable_pike_New Features: New Features ------------ .. releasenotes/notes/bug-1730933-1bb0272e3c51eed3.yaml @ b'a1d67d52f79656ce9c7f4b326d2703d972c35d9a' - The Quobyte Cinder driver now supports identifying Quobyte mounts via the mounts fstype field. .. releasenotes/notes/feature-rbd-exclusive-pool-a9bdebdeb1f0bf37.yaml @ b'1dca272d8f47bc180cc481e6c6a835eda0bb06a8' - When using the RBD pool exclusively for Cinder we can now set `rbd_exclusive_cinder_pool` to `true` and Cinder will use DB information to calculate provisioned size instead of querying all volumes in the backend, which will reduce the load on the Ceph cluster and the volume service. .. releasenotes/notes/unity-enable-ssl-14db2497225c4395.yaml @ b'95fe19850e875d769e361eb78a9003af2ee3db56' - Dell EMC Unity Cinder driver allows enabling/disabling the SSL verification. Admin can set `True` or `False` for `driver_ssl_cert_verify` to enable or disable this function, alternatively set the `driver_ssl_cert_path=` for customized CA path. Both above 2 options should go under the driver section. .. releasenotes/notes/unity-remove-empty-host-17d567dbb6738e4e.yaml @ b'66c50600def5f8f25106afaa316e7fc300d72c87' - Dell EMC Unity Driver: Adds support for removing empty host. The new option named `remove_empty_host` could be configured as `True` to notify Unity driver to remove the host after the last LUN is detached from it. .. releasenotes/notes/vmware-vmdk-snapshot-template-d3dcfc0906c02edd.yaml @ b'a5e86c387e67650451d957c5ef525b452203c2fd' - VMware VMDK driver now supports vSphere template as a volume snapshot format in vCenter server. The snapshot format in vCenter server can be specified using driver config option ``vmware_snapshot_format``. .. _Pike Series Release Notes_11.2.0_stable_pike_Known Issues: Known Issues ------------ .. releasenotes/notes/feature-rbd-exclusive-pool-a9bdebdeb1f0bf37.yaml @ b'1dca272d8f47bc180cc481e6c6a835eda0bb06a8' - If RBD stats collection is taking too long in your environment maybe even leading to the service appearing as down you'll want to use the `rbd_exclusive_cinder_pool = true` configuration option if you are using the pool exclusively for Cinder and maybe even if you are not and can live with the innacuracy. .. _Pike Series Release Notes_11.2.0_stable_pike_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/vmware-vmdk-snapshot-template-d3dcfc0906c02edd.yaml @ b'a5e86c387e67650451d957c5ef525b452203c2fd' - VMware VMDK driver will use vSphere template as the default snapshot format in vCenter server. .. _Pike Series Release Notes_11.2.0_stable_pike_Security Issues: Security Issues --------------- .. releasenotes/notes/scaleio-zeropadding-a0273c56c4d14fca.yaml @ b'6309c097e653c5f8b40e0602950d0ef54a9efb37' - Removed the ability to create volumes in a ScaleIO Storage Pool that has zero-padding disabled. A new configuration option ``sio_allow_non_padded_volumes`` has been added to override this new behavior and allow unpadded volumes, but should not be enabled if multiple tenants will utilize volumes from a shared Storage Pool. .. _Pike Series Release Notes_11.2.0_stable_pike_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1775518-fix-unity-empty-list-issue-2d6b7c33aae1ffcc.yaml @ b'b8b5de985359fe07f235e8c10f375246b1608fbb' - Dell EMC Unity: Fixes bug 1775518 to make sure driver succeed to initialize even though the value of unity_io_ports and unity_storage_pool_names are empty .. releasenotes/notes/bug-1799221-fix-truncated-volumes-in-case-of-glance-errors-6cae19218249c3cf.yaml @ b'0551f0a9ff7b787fcb3e1b686b83e25f99cad874' - Fixed a bug which could create volumes with invalid content in case of unhandled errors from glance client (Bug `#1799221 `_). .. releasenotes/notes/fix-import-backup-quota-issue-8yh69hd19u7tuu23.yaml @ b'40f5aef94a88a55cd20aad02e4f9ff2c38943b77' - Cinder will now consume quota when importing new backup resource. .. releasenotes/notes/fix-netapp-cg-da4fd6c396e5bedb.yaml @ b'fcae2f086f159b865a3624bfcff125a56f298fd2' - Fixes a bug in NetApp SolidFire where the deletion of group snapshots was failing. .. releasenotes/notes/fix-netapp-force_detach-36bdf75dd2c9a030.yaml @ b'90233982d249486aebaa996db744e3820aee1ddb' - Fixes force_detach behavior for volumes in NetApp SolidFire driver. .. releasenotes/notes/fix-quota-deleting-temporary-volume-274e371b425e92cc.yaml @ b'9bd60bbdb997c07ce22d590dfc66272ac5325836' - Fix a quota usage error triggered by a non-admin user backing up an in-use volume. The forced backup uses a temporary volume, and quota usage was incorrectly updated when the temporary volume was deleted after the backup operation completed. Fixes `bug 1778774 `__. .. releasenotes/notes/netapp-ontap-fix-force-detach-55be3f4ac962b493.yaml @ b'93399a32bf994a3129a798be72f27a9731cb2750' - Fixed bug #1783582, where calls to os-force_detach were failing on NetApp ONTAP iSCSI/FC drivers. .. releasenotes/notes/unity-return-logged-out-initiator-6ab1f96f21bb284c.yaml @ b'f85193b5be6f4b69cf91137c40a34c64af676f52' - Dell EMC Unity Driver: Fixes `bug 1773305 `__ to return the targets which connect to the logged-out initiators. Then the zone manager could clean up the FC zone based on the correct target wwns. .. _Pike Series Release Notes_11.1.1_stable_pike: 11.1.1 ====== .. _Pike Series Release Notes_11.1.1_stable_pike_New Features: New Features ------------ .. releasenotes/notes/vnx-add-force-detach-support-26f215e6f70cc03b.yaml @ b'c0935c030266398a89ffcb1ebbfdb2d38a2197c2' - Add support to force detach a volume from all hosts on VNX. .. _Pike Series Release Notes_11.1.1_stable_pike_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1632333-netapp-ontap-copyoffload-downloads-glance-image-twice-08801d8c7b9eed2c.yaml @ b'82a13da48e7451c2f7813bf2de6990625c05624c' - Fixed bug 1632333 with the NetApp ONTAP Driver. Now the copy offload method is invoked early to avoid downloading Glance images twice. .. releasenotes/notes/bug-1690954-40fc21683977e996.yaml @ b'fd2c17edc0332e20149e1aed652a613f2f90de61' - NetApp ONTAP NFS (bug 1690954): Fix wrong usage of export path as volume name when deleting volumes and snapshots. .. releasenotes/notes/bug-1712651-7bc90264eb5001ea.yaml @ b'e7d8a3997938e20541c9f00d508ffaee75a7c7ba' - NetApp ONTAP iSCSI (bug 1712651): Fix ONTAP NetApp iSCSI driver not raising a proper exception when trying to extend an attached volume beyond its max geometry. .. releasenotes/notes/bug-1718739-netapp-eseries-fix-provisioned-capacity-report-8c51fd1173c15dbf.yaml @ b'e2b755590a952dbdd490d0d02f53cf6852463cce' - NetApp E-series (bug 1718739):The NetApp E-series driver has been fixed to correctly report the "provisioned_capacity_gb". Now it sums the capacity of all the volumes in the configured backend to get the correct value. This bug fix affects all the protocols supported by the driver (FC and iSCSI). .. releasenotes/notes/bug-1762424-f76af2f37fe408f1.yaml @ b'e3145a30cf3d904cba05834086039487dddcf714' - NetApp ONTAP (bug 1762424): Fix ONTAP NetApp driver not being able to extend a volume to a size greater than the corresponding LUN max geometry. .. releasenotes/notes/dell-emc-sc-bugfix-1756914-ffca3133273040f6.yaml @ b'adf7d84b66fdbbfba03721cc5f3f1f392a702eb4' - Dell EMC SC driver correctly returns initialize_connection data when more than one IQN is attached to a volume. This fixes some random Nova Live Migration failures where the connection information being returned was for an IQN other than the one for which it was being requested. .. releasenotes/notes/fail-detach-lun-when-auto-zone-enabled-9c87b18a3acac9d1.yaml @ b'ead818199e07923760e525c0367c9dcb5f4ab343' - Dell EMC Unity Driver: Fixes `bug 1759175 `__ to detach the lun correctly when auto zone was enabled and the lun was the last one attached to the host. .. releasenotes/notes/fix-abort-backup-df196e9dcb992586.yaml @ b'94393daaa057b604ab212a99cd5cd18c693c95c1' - We no longer leave orphaned chunks on the backup backend or leave a temporary volume/snapshot when aborting a backup. .. releasenotes/notes/netapp-ontap-use_exact_size-d03c90efbb8a30ac.yaml @ b'3e462d29f47594ab485aa8eb8091929ee9c30516' - Fixed bug #1731474 on NetApp Data ONTAP driver that was causing LUNs to be created with larger size than requested. This fix requires version 9.1 of ONTAP or later. .. releasenotes/notes/netapp_fix_svm_scoped_permissions.yaml @ b'0cc92ee4b5d3f4c87ed40685246537c7fbfa1891' - NetApp cDOT block and file drivers have improved support for SVM scoped user accounts. Features not supported for SVM scoped users include QoS, aggregate usage reporting, and dedupe usage reporting. .. releasenotes/notes/unity-force-detach-7c89e72105f9de61.yaml @ b'25d76990d0f2c4fb9dba6a7424e7f0c89d1c70a3' - Corrected support to force detach a volume from all hosts on Unity. .. _Pike Series Release Notes_11.0.2_stable_pike: 11.0.2 ====== .. _Pike Series Release Notes_11.0.2_stable_pike_New Features: New Features ------------ .. releasenotes/notes/k2-disable-discovery-bca0d65b5672ec7b.yaml @ b'7bcb2ff94cf38eaa9def1115569981760e36510c' - Kaminario K2 iSCSI driver now supports non discovery multipathing (Nova and Cinder won't use iSCSI sendtargets) which can be enabled by setting `disable_discovery` to `true` in the configuration. .. _Pike Series Release Notes_11.0.2_stable_pike_Known Issues: Known Issues ------------ .. releasenotes/notes/k2-non-unique-fqdns-b62a269a26fd53d5.yaml @ b'c11b6a9da277f91d86a963db48045d9dcc44deca' - Kaminario K2 now supports networks with duplicated FQDNs via configuration option `unique_fqdn_network` so attaching in these networks will work (bug #1720147). .. _Pike Series Release Notes_11.0.2_stable_pike_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/ps-duplicate-ACL-5aa447c50f2474e7.yaml @ b'1efed3a4345a3a6d51172fad726aa48a972008e8' - Dell EMC PS Series Driver code was creating duplicate ACL records during live migration. Fixes the initialize_connection code to not create access record for a host if one exists previously. This change fixes bug 1726591. .. releasenotes/notes/ps-extend_volume-no-snap-8aa447c50f2475a7.yaml @ b'1e5cd9ba2a7906a182b0f2b0d7678213d80cd493' - Dell EMC PS Series Driver was creating unmanaged snapshots when extending volumes. Fixed it by adding the missing no-snap parameter. This change fixes bug 1720454. .. _Pike Series Release Notes_11.0.1_stable_pike: 11.0.1 ====== .. _Pike Series Release Notes_11.0.1_stable_pike_New Features: New Features ------------ .. releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml @ b'8d7f37e810d3228d8b79e4add1a383abe516d9bb' - RBD driver supports returning a static total capacity value instead of a dynamic value like it's been doing. Configurable with `report_dynamic_total_capacity` configuration option. .. _Pike Series Release Notes_11.0.1_stable_pike_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/bug-1714209-netapp-ontap-drivers-oversubscription-issue-c4655b9c4858d7c6.yaml @ b'558571b44d9cd2195993e42539fd2c689b179ee6' - If using the NetApp ONTAP drivers (7mode/cmode), the configuration value for "max_over_subscription_ratio" may need to be increased to avoid scheduling problems where storage pools that previously were valid to schedule new volumes suddenly appear to be out of space to the Cinder scheduler. See documentation `here `_. .. releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml @ b'8d7f37e810d3228d8b79e4add1a383abe516d9bb' - RBD/Ceph backends should adjust `max_over_subscription_ratio` to take into account that the driver is no longer reporting volume's physical usage but it's provisioned size. .. _Pike Series Release Notes_11.0.1_stable_pike_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1714209-netapp-ontap-drivers-oversubscription-issue-c4655b9c4858d7c6.yaml @ b'558571b44d9cd2195993e42539fd2c689b179ee6' - The ONTAP drivers ("7mode" and "cmode") have been fixed to not report consumed space as "provisioned_capacity_gb". They instead rely on the cinder scheduler's calculation of "provisioned_capacity_gb". This fixes the oversubscription miscalculations with the ONTAP drivers. This bugfix affects all three protocols supported by these drivers (iSCSI/FC/NFS). .. releasenotes/notes/ps-optimize-parsing-8aa447c50f2474c7.yaml @ b'dbde6a3cad318ad8a9e23e23184bccb442b069aa' - Dell EMC PS Series Driver code reporting volume stats is now optimized to return the information earlier and accelerate the process. This change fixes bug 1661154. .. releasenotes/notes/ps-over-subscription-ratio-cal-8aa447c50f2474a8.yaml @ b'a6632b7a79a13e3611801080dfdb4131b90985a5' - Dell EMC PS Driver stats report has been fixed, now reports the `provisioned_capacity_gb` properly. Fixes bug 1719659. .. releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml @ b'8d7f37e810d3228d8b79e4add1a383abe516d9bb' - RBD stats report has been fixed, now properly reports `allocated_capacity_gb` and `provisioned_capacity_gb` with the sum of the sizes of the volumes (not physical sizes) for volumes created by Cinder and all available in the pool respectively. Free capacity will now properly handle quota size restrictions of the pool. .. _Pike Series Release Notes_11.0.0_stable_pike: 11.0.0 ====== .. _Pike Series Release Notes_11.0.0_stable_pike_Prelude: Prelude ------- .. releasenotes/notes/add-cg-capability-to-groups-2eb3e71682a88600.yaml @ b'aa277fe1b606525e724af0d3e432edff90310903' Drivers supporting consistent group snapshot in generic volume groups reports "consistent_group_snapshot_enabled = True" instead of "consistencygroup_support = True". As a result, a spec such as "consistencygroup_support: ' True'" in either group type or volume type will cause the scheduler not to choose the backend that does not report "consistencygroup_support = True". In order to create a generic volume group that supports consistent group snapshot, "consistent_group_snapshot_enable: ' True'" should be set in the group type specs and volume type extra specs, and "consistencygroup_support: ' True'" should not be set in group type spec and volume type extra specs. .. _Pike Series Release Notes_11.0.0_stable_pike_New Features: New Features ------------ .. releasenotes/notes/Enable-HPE-3PAR-Compression-Feature-90e4de4b64a74a46.yaml @ b'd7940f57438a7e10d74bffdbc0240867b52ae341' - HPE 3PAR driver adds following functionalities Creating thin/dedup compresssed volume. Retype for tpvv/tdvv volumes to be compressed. Migration of compressed volumes. Create compressed volume from compressed volume/snapshot source. Compression support to create cg from source. .. releasenotes/notes/HPE-3par-Generic-Volume-Group-e048002e1c3469a3.yaml @ b'fadefc8206a612f035ce0530ce97c6703c4957b1' - Added consistency group capability to generic volume groups in the HPE 3PAR driver. .. releasenotes/notes/Lefthand-generic-volume-group-570d07b4786b93c2.yaml @ b'81ece6a9f2ac9b4ff3efe304bab847006f8b0aef' - Add consistent group capability to generic volume groups in Lefthand driver. .. releasenotes/notes/SolidFire-generic-volume-group-1b1e55661cd83a43.yaml @ b'1cbf1194203945308bfcae2656e800e5b084275f' - Add consistent group capability to generic volume groups in the SolidFire driver. .. releasenotes/notes/add-connection-info-to-attachment-84d4dg45uh41db15.yaml @ b'8031fb1e98f189b165f00c919f4f33d9e0d01226' - Added attribute ``connection_info`` to attachment object. .. releasenotes/notes/add-filters-support-to-get_pools-0852e9c0e42fbf98.yaml @ b'ba80322a5789a4b240108a7911db9235e8140016' - Add filters support to get_pools API v3.28. .. releasenotes/notes/add-like-filter-support-7d4r78d6de3984dv.yaml @ b'6df8415411f5166a8682114cb8a972d3b51a47e3' - Added like operator support to filters for the following resources:: - volume - snapshot - backup - group - group-snapshot - attachment - message .. releasenotes/notes/add-periodic-task-to-clean-expired-messages-84f47gxc88hda035.yaml @ b'c1cb931ecbb785e7196c233087ee368474b604a4' - Added periodic task to clean expired messages in cinder scheduler, also added a configuration option ``message_reap_interval`` to handle the interval. .. releasenotes/notes/add-resource-filters-api-8g3dub1700qaye98.yaml @ b'8fcb809509fbdd4d5b0ecee2c33fa44f405b4aeb' - Added ``resource_filters`` API to retrieve configured resource filters. .. releasenotes/notes/add-revert-to-snapshot-support-2d21a3dv4f5fa087.yaml @ b'8fba9a90807714f8869c470af6e28bb1da027a54' - Add revert to snapshot API and support in LVM driver. .. releasenotes/notes/add-volume-type-filter-to_get-pools-c791132540921398.yaml @ b'd5a3fdabca25a63bd3d01c86442ef649e7613aff' - Add ``volume-type`` filter to API Get-Pools .. releasenotes/notes/add_ceph_custom_keyring_path-43a3b8c21a1ab3c4.yaml @ b'd0520a07e9dcee53fe2f13900f4c36c7e455c6f0' - Added RBD keyring configuration parameter ``rbd_keyring_conf`` to define custom path of Ceph keyring file. .. releasenotes/notes/allow-huawei-driver-lun-copy-speed-configurable-361a480e7b7e361d.yaml @ b'045b1647c0ae4c03ee588ca7874fd4a9aa7f6879' - Allow users to specify the copy speed while using Huawei driver to create volume from snapshot or clone volume, by the new added metadata 'copyspeed'. For example, user can add --metadata copyspeed=1 when creating volume from source volume/snapshot. The valid optional range of copyspeed is [1, 2, 3, 4], respectively representing LOW, MEDIUM, HIGH and HIGHEST. .. releasenotes/notes/backup-ceph-driver-journaling-exculsive-lock-features-6b6044138a288a83.yaml @ b'dc96c948f7b69d5b60f10fb6ad130226bdfab368' - Added new BoolOpt ``backup_ceph_image_journals`` for enabling the Ceph image features required to support RBD mirroring of Cinder backup pool. .. releasenotes/notes/bug-1614095-add-user_id-to-snapshot_show-4884fab825983c3a.yaml @ b'b1e2b0459ca4dd5b84eb8fcb66e4a2414c154183' - Add ``user_id`` field to snapshot list/detail and snapshot show. .. releasenotes/notes/coprhd-generic-volume-group-a1d41d439f94ae19.yaml @ b'b248aad12a223095b22b312b16b18c108df81fd4' - Add consistent group capability to generic volume groups in CoprHD driver. .. releasenotes/notes/datera-2.4.0-driver-update-164bbc77e6b45eb7.yaml @ b'1e23faf82a3babe710e9c7a1264925cb32c6f78d' - Added ``datera_disable_profiler`` boolean config option. .. releasenotes/notes/datera-2.4.0-driver-update-164bbc77e6b45eb7.yaml @ b'1e23faf82a3babe710e9c7a1264925cb32c6f78d' - Added Cinder fast-retype support to Datera EDF driver. .. releasenotes/notes/datera-2.4.0-driver-update-164bbc77e6b45eb7.yaml @ b'1e23faf82a3babe710e9c7a1264925cb32c6f78d' - Added Volume Placement extra-specs support to Datera EDF driver. .. releasenotes/notes/datera-2.4.0-driver-update-164bbc77e6b45eb7.yaml @ b'1e23faf82a3babe710e9c7a1264925cb32c6f78d' - Fixed ACL multi-attach bug in Datera EDF driver. .. releasenotes/notes/datera-2.4.0-driver-update-164bbc77e6b45eb7.yaml @ b'1e23faf82a3babe710e9c7a1264925cb32c6f78d' - Fixed a few scalability bugs in the Datera EDF driver. .. releasenotes/notes/dell-emc-sc-support-generic-groups-98c7452d705b36f9.yaml @ b'bd619f2ceac28eabb78e6fcb9fff54348463bf44' - Add consistency group capability to Generic Volume Groups in the Dell EMC SC driver. .. releasenotes/notes/ds8k-replication-group-3f2e8cd3c2e291a3.yaml @ b'b5e46bb9bb4ad37dba01011d8d8f12eb99916cf9' - Add replication consistency group support in DS8K cinder driver. .. releasenotes/notes/ds8k_specify_pool_lss-5329489c263951ba.yaml @ b'b401355c6ffa8e933b72ec9db63496da6998c1f5' - DS8K driver adds two new properties into extra-specs so that user can specify pool or lss or both of them to allocate volume in their expected area. .. releasenotes/notes/falconstor-extend-driver-to-utilize-multiple-fss-pools-dc6f2bc84432a672.yaml @ b'213001f931c469bd16f2558b91eef8152caf8fab' - Added ability to specify multiple storage pools in the FalconStor driver. .. releasenotes/notes/generalized-resource-filter-hg598uyvuh119008.yaml @ b'dc31763c582169509ed2f1c3cacd3b6950baa44c' - Added generalized resource filter support in ``list volume``, ``list backup``, ``list snapshot``, ``list group``, ``list group-snapshot``, ``list attachment``, ``list message`` and ``list pools`` APIs. .. releasenotes/notes/generic-group-quota-manage-support-559629ad07a406f4.yaml @ b'608de666fabf9ab65fa905a3b9a95f7cbad83013' - Generic group is added into quota management. .. releasenotes/notes/generic-groups-in-gpfs-00bb093945a02642.yaml @ b'6252bd8e5ad77e52e720132455ccc3410d45bf65' - Added consistent group capability to generic volume groups in GPFS driver. .. releasenotes/notes/huawei-generic-group-bc3fb7236efc58e7.yaml @ b'2e06995ad5153f5d76ad9ba0f0ca0e2134fea43c' - Add CG capability to generic volume groups in Huawei driver. .. releasenotes/notes/ibm-storwzie-mirror-volume-ffe4c9bde78cdf1d.yaml @ b'76fc4edc64b04d6a736387f1b0f1acdff815e496' - Add mirrored volume support in IBM SVC/Storwize driver. .. releasenotes/notes/ibmsvciogrpselection-e607739b6f655a27.yaml @ b'edfa61c61f1ff007f43051591dfccaccd61ba4ac' - In IBM Storwize_SVC driver, user could specify only one IO group per backend definition. The user now may specify a comma separated list of IO groups, and at the time of creating the volume, the driver will select an IO group which has the least number of volumes associated with it. The change is backward compatible, meaning single value is still supported. .. releasenotes/notes/infinidat-compression-a828904aaba90da2.yaml @ b'ec55bc239caac7d849ab2aa7cbd0e0428aefc450' - Added support for volume compression in INFINIDAT driver. Compression is available on InfiniBox 3.0 onward. To enable volume compression, set ``infinidat_use_compression`` to True in the backend section in the Cinder configuration file. .. releasenotes/notes/infinidat-group-support-44cd0715de1ea502.yaml @ b'f308007862bd7362a509fc549f683b1aa94aa159' - Add CG capability to generic volume groups in INFINIDAT driver. .. releasenotes/notes/infinidat-iscsi-support-78e0d34d9e7e08c4.yaml @ b'747d4464c7fd8ea75711874e467f9cdede7560bf' - Support for iSCSI in INFINIDAT InfiniBox driver. .. releasenotes/notes/infinidat-qos-50d743591543db98.yaml @ b'd5030ca7d57532957bb4c1e6a395fe0f3e091cb6' - Added support for QoS in the INFINIDAT InfiniBox driver. QoS is available on InfiniBox 4.0 onward. .. releasenotes/notes/metadata-for-volume-summary-729ba648db4e4e54.yaml @ b'bf40945dccacdc4c75c1afb2f963f2668525f9f8' - Added support for get all distinct volumes' metadata from volume-summary API. .. releasenotes/notes/nec-nondisruptive-backup-471284d07cd806ce.yaml @ b'55e8befc4cf5cfa0ba544cefcebc475016f2a930' - Enable backup snapshot optimal path by implementing attach and detach snapshot in the NEC driver. .. releasenotes/notes/netapp-add-generic-group-support-cdot-9bebd13356694e13.yaml @ b'0215fcc022d60608a0d887dd6510496ab2162f5b' - Added generic volume group capability to NetApp cDot drivers with support for write consistent group snapshots. .. releasenotes/notes/new-nova-config-section-2a7a51a0572e7064.yaml @ b'9f213981ac349e0fa22a1aed217dbe7aee3813ae' - a [nova] section is added to configure the connection to the compute service, which is needed to the InstanceLocalityFilter, for example. .. releasenotes/notes/per-backend-az-28727aca360a1cc8.yaml @ b'7c1e92278cce54a3a0cb3dc9a059988ddc2ec3bc' - Availability zones may now be configured per backend in a multi-backend configuration. Individual backend sections can now set the configuration option ``backend_availability_zone``. If set, this value will override the [DEFAULT] ``storage_availability_zone`` setting. .. releasenotes/notes/period-task-clean-reservation-0e0617a7905df923.yaml @ b'07f242d68cac8c23e92a1ebc64094b0df26e7812' - Added periodic task to clean expired reservation in cinder scheduler. Added a configuration option ``reservation_clean_interval`` to handle the interval. .. releasenotes/notes/prophetstor-generic-groups-c7136c32b2f75c0a.yaml @ b'3cc8eef15df76d99bdcb3cbe5b89d7b6f0a5436b' - Added consistent group capability to generic volume groups in ProphetStor driver. .. releasenotes/notes/rbd-support-managing-existing-snapshot-fb871a3ea98dc572.yaml @ b'e5abf57fe985fd0e837e3d92c0087dfbe13ad56c' - Allow rbd driver to manage existing snapshot. .. releasenotes/notes/replication-group-7c6c8a153460ca58.yaml @ b'18744ba1991a7e1599d256857727454bac1ae2d2' - Introduced replication group support and added group action APIs enable_replication, disable_replication, failover_replication and list_replication_targets. .. releasenotes/notes/scaleio-generic-volume-group-ee36e4dba8893422.yaml @ b'fcbd762d9d7923ac403324c8aafa6731cb52632a' - Added consistency group support to generic volume groups in ScaleIO Driver. .. releasenotes/notes/scaleio-get-manageable-volumes-dda1e7b8e22be59e.yaml @ b'c129e80cb0f985f0d16af59360affd1dc377f707' - Added ability to list all manageable volumes within ScaleIO Driver. .. releasenotes/notes/service_dynamic_log_change-55147d288be903f1.yaml @ b'a60a09ce5fec847ee4af1cf2661f04ad15459c98' - Added new APIs on microversion 3.32 to support dynamically changing log levels in Cinder services without restart as well as retrieving current log levels, which is an easy way to ping via the message broker a service. .. releasenotes/notes/shared-backend-config-d841b806354ad5be.yaml @ b'76016fffc946301ba4df6b2b58713dcb41d45dff' - New config format to allow for using shared Volume Driver configuration defaults via the [backend_defaults] stanza. Config options defined there will be used as defaults for each backend enabled via enabled_backends. .. releasenotes/notes/smbfs-pools-support-bc43c653cfb1a34f.yaml @ b'd60f1a8a7c58e3413d966f449e5139f1da3e3a01' - The SMBFS driver now exposes share information to the scheduler via pools. The pool names are configurable, defaulting to the share names. .. releasenotes/notes/storwize-generic-volume-group-74495fa23e059bf9.yaml @ b'103870f40d8a65892dab1edc69413c3e16321edd' - Add consistency group capability to generic volume groups in Storwize drivers. .. releasenotes/notes/storwize-gmcv-support-8aceee3f40eddb9f.yaml @ b'b03992b6161ea1852b2abad9f04062bebd51a10c' - Add global mirror with change volumes(gmcv) support and user can manage gmcv replication volume by SVC driver. An example to set a gmcv replication volume type, set property replication_type as " gmcv", property replication_enabled as " True" and set property drivers:cycle_period_seconds as 500. .. releasenotes/notes/support-extend-inuse-volume-9e4atf8912qaye99.yaml @ b'3dd842de8282efc95f3727d486cfc061888fe0a5' - Add ability to extend ``in-use`` volume. User should be aware of the whole environment before using this feature because it's dependent on several external factors below: * nova-compute version - needs to be the latest for Pike. * only the libvirt compute driver supports this currently. * only iscsi and fibre channel volume types are supported on the nova side currently. Administrator can disable this ability by updating the ``volume:extend_attached_volume`` policy rule. .. releasenotes/notes/support-metadata-for-backup-3d8753f67e2934fa.yaml @ b'39c732bbce64665531140411669d3bd163d513cf' - Added metadata support for backup source. Now users can create/update metadata for a specified backup. .. releasenotes/notes/support-project-id-filter-for-limit-bc5d49e239baee2a.yaml @ b'4a2448bd15a0191df8bb4710870e2e0b5750278a' - Supported ``project_id`` admin filters to limits API. .. releasenotes/notes/support_sort_backup_by_name-0b080bcb60c0eaa0.yaml @ b'2c7758d4513fa257b0d684de878f921184b47ae1' - Add support for sorting backups by "name". .. releasenotes/notes/support_sort_snapshot_with_name-7b66a2d8e587275d.yaml @ b'8b5264f559e60a8947f9d879070ff67960ae86f3' - Support to sort snapshots with "name". .. releasenotes/notes/unity-fast-clone-02ae88ba8fdef145.yaml @ b'a6c22238e1021f51d0348e58402db4f56dbe539d' - Add thin clone support in the Unity driver. Unity storage supports the thin clone of a LUN from OE version 4.2.0. It is more efficient than the dd solution. However, there is a limit of thin clone inside each LUN family. Every time the limit reaches, a new LUN family will be created by a dd-copy, and then the volume clone afterward will use the thin clone of the new LUN family. .. releasenotes/notes/verbose-online-migrations-94fb7e8a85cdbc10.yaml @ b'939fa2c0ff6527258a9b4e17be8f0f5a765eefce' - The cinder-manage online_data_migrations command now prints a tabular summary of completed and remaining records. The goal here is to get all your numbers to zero. The previous execution return code behavior is retained for scripting. .. releasenotes/notes/veritas_access_driver-c73b2320ba9f46a8.yaml @ b'5993af92ef9fe86e23942b6c0e2188c4831de8f8' - Added NFS based driver for Veritas Access. .. releasenotes/notes/vmax-generic-volume-group-28b3b2674c492bbc.yaml @ b'1ee279bd901b36e3ca84500a4d7339b09aa84524' - Add consistent group snapshot support to generic volume groups in VMAX driver version 3.0. .. releasenotes/notes/vmax-rest-94e48bed6f9c134c.yaml @ b'f6d9fbadb23a5dcd7aea026895b38e11f1d3ec2a' - VMAX driver version 3.0, replacing SMI-S with Unisphere REST. This driver supports VMAX3 hybrid and All Flash arrays. .. releasenotes/notes/vmax-rest-compression-10c2590052a9465e.yaml @ b'51252cf5049e1e714411ea7ce3f309c31e51822a' - Adding compression functionality to VMAX driver version 3.0. .. releasenotes/notes/vmax-rest-livemigration-885dd8731d5a8a88.yaml @ b'dd065f8e191ffb2762e4cd75a1350e41aed0caae' - Adding Live Migration functionality to VMAX driver version 3.0. .. releasenotes/notes/vmax-rest-qos-6bb4073b92c932c6.yaml @ b'95dd5b488142801a7cac575b1901938051bee1bf' - Adding Qos functionality to VMAX driver version 3.0. .. releasenotes/notes/vmax-rest-replication-612fcfd136cc076e.yaml @ b'22eb9b69c1c7ee11ab5cfdec4957ce7b86ccbf14' - Adding Replication V2.1 functionality to VMAX driver version 3.0. .. releasenotes/notes/vmax-rest-retype-ceba5e8d04f637b4.yaml @ b'2f08c8dea3c4506ce186ac6ab58148f734cfacca' - Add retype functionality to VMAX driver version 3.0. .. releasenotes/notes/vmware_adapter_type-66164bc3857f244f.yaml @ b'8dbf2b7e980678f3f7dd8a0071d5f70cc3ad266a' - VMware VMDK driver now supports volume type extra-spec option ``vmware:adapter_type`` to specify the adapter type of volumes in vCenter server. .. releasenotes/notes/vmware_vmdk_default_adapter_type-8e247bce5b229c7a.yaml @ b'fdd49d09a6c85b4b07be18d56ac29c5af2ac224f' - Added config option ``vmware_adapter_type`` for the VMware VMDK driver to specify the default adapter type for volumes in vCenter server. .. releasenotes/notes/vnx-qos-support-7057196782e2c388.yaml @ b'93993a0cedbe2105d7481fda0b1f83dee0a63fe4' - Adds QoS support for VNX Cinder driver. .. releasenotes/notes/vnx-replication-group-2ebf04c80e2171f7.yaml @ b'c52323babd11432156eaa7cb44ee16c766b70f6a' - Add consistent replication group support in VNX cinder driver. .. releasenotes/notes/vrts_hyperscale_driver-5b63ab706ea8ae89.yaml @ b'2902da9c58fb531a719036583885f8894ae6ac2d' - Added volume backend driver for Veritas HyperScale storage. .. releasenotes/notes/win-iscsi-config-portals-51895294228d7883.yaml @ b'b2ddad27522a79e7d18e5a6c74776c82faf12fc6' - The Windows iSCSI driver now returns multiple portals when available and multipath is requested. .. releasenotes/notes/xiv-generic-volume-group-4609cdc86d6aaf81.yaml @ b'23cf5b08ce4149da62c720a28dfb2c90fef57d25' - Add consistent group capability to generic volume groups in XIV, Spectrum Accelerate and A9000/R storage systems. .. releasenotes/notes/xiv-new-qos-independent-type-58885c77efe24798.yaml @ b'9b088ca82a2612f0cf73cfa6bc670c6e5b5f64b6' - Added independent and shared types for qos classes in XIV & A9000. Shared type enables to share bandwidth and IO rates between volumes of the same class. Independent type gives each volume the same bandwidth and IO rates without being affected by other volumes in the same qos class. .. releasenotes/notes/xiv-replication-group-7ca437c90f2474a7.yaml @ b'bb9a4e1a90e6223a3602172336c8b45f578df55f' - Add consistency group replication support in XIV\A9000 Cinder driver. .. _Pike Series Release Notes_11.0.0_stable_pike_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/db-schema-from-mitaka-168ac06161e9ca0d.yaml @ b'5f95cbded70f2ecfc0e7e4d8dd5ca84b8e2575df' - The Cinder database can now only be upgraded from changes since the Mitaka release. In order to upgrade from a version prior to that, you must now upgrade to at least Mitaka first, then to Pike or later. .. releasenotes/notes/disco-options-94fe9eaad5e397a5.yaml @ b'7999271653b99d40335b288a55e91de077148cc1' - Some of DISCO driver options were incorrectly read from ``[DEFAULT]`` section in the cinder.conf. Now those are correctly read from ``[]`` section. This includes following options: * ``disco_client`` * ``disco_client_port`` * ``rest_ip`` * ``choice_client`` * ``disco_src_api_port`` * ``retry_interval`` Also some options are renamed (note that 3 of them were both moved and renamed): * ``rest_ip`` to ``disco_rest_ip`` * ``choice_client`` to ``disco_choice_client`` * ``volume_name_prefix`` to ``disco_volume_name_prefix`` * ``snapshot_check_timeout`` to ``disco_snapshot_check_timeout`` * ``restore_check_timeout`` to ``disco_restore_check_timeout`` * ``clone_check_timeout`` to ``disco_clone_check_timeout`` * ``retry_interval`` to ``disco_retry_interval`` Old names and locations are still supported but support will be removed in the future. .. releasenotes/notes/dothill-drivers-removed-da00a6b83865271a.yaml @ b'76522b90a3c960ef15f0ad6ce37d24e556b9a5a8' - Support for Dot Hill AssuredSAN arrays has been removed. .. releasenotes/notes/hnas-remove-iscsi-driver-419e9c08133f9f0a.yaml @ b'6c603df9ca240299b706a9b6c19bbeb347539ce3' - The Hitachi NAS Platform iSCSI driver was marked as not supported in the Ocata realease and has now been removed. .. releasenotes/notes/infinidat-infinisdk-04f0edc0d0a597e3.yaml @ b'921205a8f23001af2f98f621496d43594ca8c5b4' - INFINIDAT volume driver now requires the 'infinisdk' python module to be installed. .. releasenotes/notes/mark-blockbridge-unsupported-c9e55df0eb2e3c9f.yaml @ b'3f4916a87334c45e851909f9bcf16a669d368266' - The Blockbridge driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. .. releasenotes/notes/mark-coho-unsupported-989db9d88ed7fff8.yaml @ b'5aed3b1384526ad146b4b153eda935be356b5ed6' - The Coho driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. .. releasenotes/notes/mark-falconstor-unsupported-3b065556a4cd94de.yaml @ b'314df517a56381c6be28f5919fd25db555b14579' - The Falconstor drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. .. releasenotes/notes/mark-infortrend-deprecated-553de89f8dd58aa8.yaml @ b'19413e8abe50aa389213585cfd8591e0c0ac1987' - The Infortrend drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. .. releasenotes/notes/mark-qnap-unsupported-79bd8ece9a2bfcd2.yaml @ b'b59dc58723094f519b0e1d5613da5bc55124e58f' - The QNAP driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. .. releasenotes/notes/mark-reduxio-deprecated-b435032a8fdb16f2.yaml @ b'0953f1b6c21bf3737c656550bc21a1c63ec26988' - The Reduxio driver has been marked unsupported and is now deprecated. ``use_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to use it. .. releasenotes/notes/mark-synology-deprecated-134ba9764e14af67.yaml @ b'31ad999435d5e3b03cb96aeb4b8ebdcb2fff70c2' - The Synology driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in ``cinder.conf`` to continue to use it. .. releasenotes/notes/mark-tegile-deprecated-1effb23010ea997c.yaml @ b'943f3e0660b04e982f95ef5f2fe6385787f7d509' - The Tegile driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. .. releasenotes/notes/mark-violin-unsupported-fdf6b34cf9847359.yaml @ b'061464fa0756f0037c525bac77c00247635a9951' - The Violin drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use them. .. releasenotes/notes/mark-xio-deprecated-18c914e15695d793.yaml @ b'346f51e6cfae7d1586c7fbc27329ed9cf48aae5f' - The X-IO driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. .. releasenotes/notes/mark-zte-unsupported-3c048e419264eca2.yaml @ b'54583a40dfc896b800d9ab3c8e4425da7a1a240b' - The ZTE driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. .. releasenotes/notes/pure-default-replica-interval-07de0a56f61c7c1e.yaml @ b'0d02e6f6b15f290ead2f61a5b96411408519c122' - The default value for pure_replica_interval_default used by Pure Storage volume drivers has changed from 900 to 3600 seconds. .. releasenotes/notes/remove_service_filter-380e7990bfdbddc8.yaml @ b'fa3752efdb787c0e3e71f6690b701235e79ae697' - The ``service`` filter for service list API was deprecated 3 years ago in 2013 July (Havana). Removed this filter and please use "binary" instead. .. releasenotes/notes/removing-middleware-sizelimit-ba86907acbda83de.yaml @ b'644c50fe0e3d644d5bd7ebc25c4bcb1d5fe29a68' - Removing deprecated file cinder.middleware.sizelimit. In your api-paste.ini, replace cinder.middleware.sizelimit:RequestBodySizeLimiter.factory with oslo_middleware.sizelimit:RequestBodySizeLimiter.factory .. releasenotes/notes/snapshot_backing_up_status_support-164fbbb2a564e137.yaml @ b'9f213981ac349e0fa22a1aed217dbe7aee3813ae' - The "backing-up" status is added to snapshot's status matrix. .. releasenotes/notes/tooz-coordination-heartbeat-cfac1064fd7878be.yaml @ b'42dafd2705a8cb4346c396376977c705e55d9e7c' - The coordination system used by Cinder has been simplified to leverage tooz builtin heartbeat feature. Therefore, the configuration options `coordination.heartbeat`, `coordination.initial_reconnect_backoff` and `coordination.max_reconnect_backoff` have been removed. .. releasenotes/notes/type-extra-spec-policies-b7742b0ac2732864.yaml @ b'46d9b4091160d8aa957dd49a8b12c1c887da136a' - When managing volume types an OpenStack provider is now given more control to grant access to for different storage type operations. The provider can now customize access to type create, delete, update, list, and show using new entries in the cinder policy file. As an example one provider may have roles called viewer, admin, type_viewer, and say type_admin. Admin and type_admin can create, delete, update types. Everyone can list the storage types. Admin, type_viewer, and type_admin can view the extra_specs. "volume_extension:types_extra_specs:create": "rule:admin or rule:type_admin", "volume_extension:types_extra_specs:delete": "rule:admin or rule:type_admin", "volume_extension:types_extra_specs:index": "", "volume_extension:types_extra_specs:show": "rule:admin or rule:type_admin or rule:type_viewer", "volume_extension:types_extra_specs:update": "rule:admin or rule:type_admin" .. releasenotes/notes/use-glance-v2-api-and-deprecate-glance_api_version-1a3b698429cb754e.yaml @ b'a766fb0ead97ad4a67092e0f68ca1b9b25dbc17e' - Cinder now defaults to using the Glance v2 API. The ``glance_api_version`` configuration option has been deprecated and will be removed in the 12.0.0 Queens release. .. releasenotes/notes/vmware_vmdk_enforce_vc_55-7e1b3ede9bf2129b.yaml @ b'549092a5483d1e6e5693b3cec79d3dca20905717' - The VMware VMDK driver now enforces minimum vCenter version of 5.5. .. _Pike Series Release Notes_11.0.0_stable_pike_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/deprecate-api-v2-9f4543ab2e14b018.yaml @ b'f6d3454f608ec40570deb62997ccda8048f6e2dc' - The Cinder v2 API has now been marked as deprecated. All new client code should use the v3 API. API v3 adds support for microversioned API calls. If no microversion is requested, the base 3.0 version for the v3 API is identical to v2. .. releasenotes/notes/deprecate_osapi_volume_base_url-b6984886a902a562.yaml @ b'811395c6453c59abffadc9fd0c08e887b1a8b996' - Instead of using osapi_volume_base_url use public_endpoint. Both do the same thing. .. releasenotes/notes/falconstor-extend-driver-to-utilize-multiple-fss-pools-dc6f2bc84432a672.yaml @ b'213001f931c469bd16f2558b91eef8152caf8fab' - The fss_pool option is deprecated. Use fss_pools instead. .. releasenotes/notes/hitachi-unsupported-drivers-37601e5bfabcdb8f.yaml @ b'595c8d3f8523a9612ccc64ff4147eab993493892' - The Hitachi Block Storage Driver (HBSD) and VSP driver have been marked as unsupported and are now deprecated. enable_unsupported_driver will need to be set to True in cinder.conf to continue to use them. .. releasenotes/notes/hnas-deprecate-nfs-driver-0d114bbe141b5d90.yaml @ b'c37fcfa374f5719b7c527a19286e7950b0231b4d' - The Hitachi NAS NFS driver has been marked as unsupported and is now deprecated. enable_unsupported_driver will need to be set to True in cinder.conf to continue to use it. .. releasenotes/notes/mark-blockbridge-unsupported-c9e55df0eb2e3c9f.yaml @ b'3f4916a87334c45e851909f9bcf16a669d368266' - The Blockbridge driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. .. releasenotes/notes/mark-coho-unsupported-989db9d88ed7fff8.yaml @ b'5aed3b1384526ad146b4b153eda935be356b5ed6' - The Coho driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. .. releasenotes/notes/mark-falconstor-unsupported-3b065556a4cd94de.yaml @ b'314df517a56381c6be28f5919fd25db555b14579' - The Falconstor drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. .. releasenotes/notes/mark-infortrend-deprecated-553de89f8dd58aa8.yaml @ b'19413e8abe50aa389213585cfd8591e0c0ac1987' - The Infortrend drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use them. If their support status does not change, they will be removed in the Queens development cycle. .. releasenotes/notes/mark-qnap-unsupported-79bd8ece9a2bfcd2.yaml @ b'b59dc58723094f519b0e1d5613da5bc55124e58f' - The QNAP driver has been marked as unsupported and is now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use it. If its support status does not change it will be removed in the next release. .. releasenotes/notes/mark-reduxio-deprecated-b435032a8fdb16f2.yaml @ b'0953f1b6c21bf3737c656550bc21a1c63ec26988' - The Reduxio driver has been marked unsupported and is now deprecated. ``use_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to use it. If its support status does not change, the driver will be removed in the Queens development cycle. .. releasenotes/notes/mark-synology-deprecated-134ba9764e14af67.yaml @ b'31ad999435d5e3b03cb96aeb4b8ebdcb2fff70c2' - The Synology driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in ``cinder.conf`` to continue to use it. If its support status does not change, the driver will be removed in the Queens development cycle. .. releasenotes/notes/mark-tegile-deprecated-1effb23010ea997c.yaml @ b'943f3e0660b04e982f95ef5f2fe6385787f7d509' - The Tegile driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. .. releasenotes/notes/mark-violin-unsupported-fdf6b34cf9847359.yaml @ b'061464fa0756f0037c525bac77c00247635a9951' - The Violin drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_drivers`` will need to be set to ``True`` in cinder.conf to continue to use them. If its support status does not change it will be removed in the next release. .. releasenotes/notes/mark-xio-deprecated-18c914e15695d793.yaml @ b'346f51e6cfae7d1586c7fbc27329ed9cf48aae5f' - The X-IO driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. .. releasenotes/notes/mark-zte-unsupported-3c048e419264eca2.yaml @ b'54583a40dfc896b800d9ab3c8e4425da7a1a240b' - The ZTE driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. .. releasenotes/notes/new-nova-config-section-2a7a51a0572e7064.yaml @ b'9f213981ac349e0fa22a1aed217dbe7aee3813ae' - The os_privileged_xxx and nova_xxx in the [default] section are deprecated in favor of the settings in the [nova] section. .. releasenotes/notes/remove-mirrorpolicy-parameter-from-huawei-driver-d32257a60d32fd90.yaml @ b'6e74dbd4c3c4d6a5d6d77998e48b690d23209366' - Remove mirror policy parameter from huawei driver. .. releasenotes/notes/scaleio-deprecate-1.32-32033134fec181bb.yaml @ b'a4acf1268d65ff850304e859375b962486664e5a' - Support for ScaleIO 1.32 is now deprecated and will be removed in a future release. .. releasenotes/notes/scaleio-deprecate-config-1aa300d0c78ac81c.yaml @ b'b12b865ac5fdae72972b8f3416b56f9e7332f995' - The ScaleIO Driver has deprecated several options specified in ``cinder.conf``: * ``sio_protection_domain_id`` * ``sio_protection_domain_name``, * ``sio_storage_pool_id`` * ``sio_storage_pool_name``. Users of the ScaleIO Driver should now utilize the ``sio_storage_pools`` options to provide a list of protection_domain:storage_pool pairs. .. releasenotes/notes/scaleio-deprecate-config-1aa300d0c78ac81c.yaml @ b'b12b865ac5fdae72972b8f3416b56f9e7332f995' - The ScaleIO Driver has deprecated the ability to specify the protection domain, as ``sio:pd_name``, and storage pool, as ``sio:sp_name``, extra specs in volume types. The supported way to specify a specific protection domain and storage pool in a volume type is to define a ``pool_name`` extra spec and set the value to the appropriate ``protection_domain_name:storage_pool_name``. .. releasenotes/notes/smbfs-drop-alloc-data-file-8b94da952a3b1548.yaml @ b'792da5dbbf854a3f23414cf4c53babd44db033cf' - The 'smbfs_allocation_info_file_path' SMBFS driver config option is now deprecated as we're no longer using a JSON file to store volume allocation data. This file had a considerable chance of getting corrupted. .. _Pike Series Release Notes_11.0.0_stable_pike_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/add-filter-to-group-snapshots-74sd8g138a289dh4.yaml @ b'cb5aaf0bcb894a141a9bfb50b9aff4fb209fc850' - Add filter, sorter and pagination support in group snapshot listings. .. releasenotes/notes/backend-options-ed19e6c63b2b9090.yaml @ b'1f62a411f4c241f9105a8ffb53fa2e7a1f71902a' - Cinder stopped supporting single-backend configurations in Ocata. However, sample ``cinder.conf`` was still generated with driver-related options in ``[DEFAULT]`` section, where those options had no effect at all. Now all of driver options are listed in ``[backend_defaults]`` section, that indicates that those options are effective only in this section and ``[]`` sections listed in ``enabled_backends``. .. releasenotes/notes/bug-1660927-netapp-no-copyoffload-77fc3cf4f2cf2335.yaml @ b'5043f56cb65defd5f623881584681ae814da1a4e' - Fixed misleading error message when NetApp copyoffload tool is not in place during image cloning. .. releasenotes/notes/bug-1667071-dc6407f40a1f7d15.yaml @ b'b245225d5e67120dfe7aee5e941f381846c89423' - Modifying the extra-specs of an in use Volume Type was something that we've unintentionally allowed. The result is unexpected or unknown volume behaviors in cases where a type was modified while a volume was assigned that type. This has been particularly annoying for folks that have assigned the volume-type to a different/new backend device. In case there are customers using this "bug" we add a config option to retain the bad behavior "allow_inuse_volume_type_modification", with a default setting of False (Don't allow). Note this config option is being introduced as deprecated and will be removed in a future release. It's being provided as a bridge to not break upgrades without notice. .. releasenotes/notes/bug-1670260-fix-boolean-is_public-d16e1957c0f09d65.yaml @ b'd8928c20671a23b26ae9d8e76e95d62a174b8300' - Fixed issue where ``create`` and ``update`` api's of ``volume-type`` and ``group_type`` were returning 500 error if boolean 'is_public' value passed in the form of string. Now user can pass following valid boolean values to these api's: '0', 'f', 'false', 'off', 'n', 'no', '1', 't', 'true', 'on', 'y', 'yes' .. releasenotes/notes/bug-1671220-4d521be71d0b8aa4.yaml @ b'9ed8c61ec5745f7e07e7eb78888e3e76fcd5b289' - Fixed consistency groups API which was always returning groups scoped to project ID from user context instead of given input project ID. .. releasenotes/notes/bug-1693084-fix-az-cache-invalid-6td4q74q28uxcd68.yaml @ b'9f213981ac349e0fa22a1aed217dbe7aee3813ae' - Now cinder will refresh the az cache immediately if previous create volume task failed due to az not found. .. releasenotes/notes/bug-1705375-prohibit-group-deletion-if-groupsnapshot-exists.yaml @ b'252ff38a9dbe9751a54a0ca9e88d30020cc58296' - Prohibit the deletion of group if group snapshot exists. .. releasenotes/notes/bug-1706888-update-backend-when-extending-3e4a9831a0w29d68.yaml @ b'a8776a726ea6320e2985b6c12f580ea8b17d21d2' - Update backend state in scheduler when extending volume. .. releasenotes/notes/check-displayname-displaydescription-123sd5gef91acb12.yaml @ b'52fb5585bc7b4b4a781089d141df333a3202e1fd' - Add 'display_name' and 'display_description' validation for creating/updating snapshot and volume operations. .. releasenotes/notes/check-snapshots-when-cascade-deleting-transferred-volume-575ef0b76bd7f334.yaml @ b'74ad916490a9fb34a256ed93fe7250e206afd930' - After transferring a volume without snapshots from one user project to another user project, if the receiving user uses cascade deleting, it will cause some exceptions in driver and volume will be error_deleting. Adding additional check to ensure there are no snapshots left in other project when cascade deleting a tranferred volume. .. releasenotes/notes/create_volume_from_encrypted_image-9666e1ed7b4eab5f.yaml @ b'a76fda426979ce79e9055b56ef47bf9f5b1ad912' - Creating a new volume from an image that was created from an encrypted Cinder volume now succeeds. .. releasenotes/notes/new-nova-config-section-2a7a51a0572e7064.yaml @ b'9f213981ac349e0fa22a1aed217dbe7aee3813ae' - Fixed using of the user's token in the nova client (`bug #1686616 `_) .. releasenotes/notes/nfs_backup_no_overwrite-be7b545453baf7a3.yaml @ b'535e71797031c3d3e3a5e2023c5ede470b02e3a7' - Fix NFS backup driver, we now support multiple backups on the same container, they are no longer overwritten. .. releasenotes/notes/pure-default-replica-interval-07de0a56f61c7c1e.yaml @ b'0d02e6f6b15f290ead2f61a5b96411408519c122' - Fixes an issue where starting the Pure volume drivers with replication enabled and default values for pure_replica_interval_default would cause an error to be raised from the backend. .. releasenotes/notes/qb-backup-5b1f2161d160648a.yaml @ b'43eb121b4110f0e87a36dba1ddbf89d3ebfbd199' - A bug in the Quobyte driver was fixed that prevented backing up volumes and snapshots .. releasenotes/notes/redundancy-in-volume-url-4282087232e6e6f1.yaml @ b'00006260d2f0d34cc2f090f4bfda32643c709b62' - Fixes a bug that prevented the configuration of multiple redundant Quobyte registries in the quobyte_volume_url config option. .. releasenotes/notes/snapshot_backing_up_status_support-164fbbb2a564e137.yaml @ b'9f213981ac349e0fa22a1aed217dbe7aee3813ae' - When backing up a volume from a snapshot, the volume status would be set to "backing-up", preventing operations on the volume until the backup is complete. This status is now set on the snapshot instead, making the volume available for other operations. .. releasenotes/notes/support-tenants-project-in-attachment-list-3edd8g138a28s4r8.yaml @ b'9f213981ac349e0fa22a1aed217dbe7aee3813ae' - Add ``all_tenants``, ``project_id`` support in the attachment list and detail APIs. .. releasenotes/notes/validate_vol_create_uuids-4f08b4ef201385f6.yaml @ b'2d4a8048762b6453b075c29c58c7ab063a9102cf' - The create volume api will now return 400 error instead of 404/500 if user passes non-uuid values to consistencygroup_id, source_volid and source_replica parameters in the request body. .. releasenotes/notes/verify-dorado-luntype-for-huawei-driver-4fc2f4cca3141bb3.yaml @ b'05427efcceaab2f1bbf5c04adc30f99550c157d7' - Add 'LUNType' configuration verification for Huawei driver when connecting to Dorado array. Because Dorado array only supports 'Thin' lun type, so 'LUNType' only can be configured as 'Thin', any other type is invalid and if 'LUNType' not explicitly configured, by default use 'Thin' for Dorado array. .. releasenotes/notes/win-iscsi-config-portals-51895294228d7883.yaml @ b'b2ddad27522a79e7d18e5a6c74776c82faf12fc6' - The Windows iSCSI driver now honors the configured iSCSI addresses, ensuring that only those addresses will be used for iSCSI traffic. .. releasenotes/notes/zfssa-iscsi-multi-connect-3be99ee84660a280.yaml @ b'278ad6a2bd8a8401ce40d57a8a243500d11b1c17' - Oracle ZFSSA iSCSI - allows a volume to be connected to more than one connector at the same time, which is required for live-migration to work. ZFSSA software release 2013.1.3.x (or newer) is required for this to work. .. _Pike Series Release Notes_11.0.0_stable_pike_Other Notes: Other Notes ----------- .. releasenotes/notes/lvm-type-default-to-auto-a2ad554fc8bb25f2.yaml @ b'8c57c6d3ee32c6ad3db7f4936412aa4773ff5ada' - Modify default lvm_type setting from thick to auto. This will result in Cinder preferring thin on init, if there are no LV's in the VG it will create a thin-pool and use thin. If there are LV's and no thin-pool it will continue using thick. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/queens.rst0000664000175000017500000016215100000000000021120 0ustar00zuulzuul00000000000000=========================== Queens Series Release Notes =========================== .. _Queens Series Release Notes_12.0.10-10_stable_queens: 12.0.10-10 ========== .. _Queens Series Release Notes_12.0.10-10_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1773725-xtremio-remove-provisioning-factor-y7r5uy3489yd9pbf.yaml @ b'121515cf596ead8fbe9a4c3967bf1eacf975e738' - The XtremIO driver has been fixed to correctly report the "free_capacity_gb" size. .. releasenotes/notes/bug-fix-1867163-27afa39ac77b9e15.yaml @ b'748fc29254785d22c4623c0e5ec9bd71f0ef6365' - PowerMax Driver - Issue with upgrades from pre Pike to Pike and later. The device is not found when trying to snapshot a legacy volume. .. _Queens Series Release Notes_12.0.10_stable_queens: 12.0.10 ======= .. _Queens Series Release Notes_12.0.10_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bugfix-1744692-5aebd0c97ae66407.yaml @ b'772897cad777716e55216f59d0e11ec191b31c80' - Fixes a bug that prevented distributed file system drivers from creating snapshots during volume clone operations (NFS, WindowsSMBFS, VZstorage and Quobyte drivers). Fixing this allows creating snapshot based backups. .. releasenotes/notes/detachedinstanceerror-64be35894c624eae.yaml @ b'2abd8a68bb1bde9adc7c870a1b827731ad4e42e9' - Fix DetachedInstanceError is not bound to a Session for VolumeAttachments. This affected VolumeList.get_all, and could make a service fail on startup and make it stay in down state. .. releasenotes/notes/hpe-3par-specify-nsp-for-fc-bootable-volume-f372879e1b625b4d.yaml @ b'9d4e3674dbba71182122557fa1bb04c3afdf9f91' - `Bug 1809249 `_ - 3PAR driver adds the config option `hpe3par_target_nsp` that can be set to the 3PAR backend to use when multipath is not enabled and the Fibre Channel Zone Manager is not used. .. _Queens Series Release Notes_12.0.8_stable_queens: 12.0.8 ====== .. _Queens Series Release Notes_12.0.8_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/fix-multiattach-deletion-b3990acf1f5fd378.yaml @ b'3c4b0c130d26177bab09d167e96c3d7b32e9e04b' - Fixed NetApp SolidFire bug that avoided multiatached volumes to be deleted. .. releasenotes/notes/kaminario-cinder-driver-bug-44c728f026394a85.yaml @ b'8d7620b478e31510e5f1ec74eb8fb488d45a7873' - Kaminario FC and iSCSI drivers: Fixed `bug 1829398 `_ where force detach would fail. .. releasenotes/notes/netapp-non-discovery-19af4e10f7b190ea.yaml @ b'7cc7322a0fb5a9c7f965bdf80de4c0cf71fdfe37' - NetApp iSCSI drivers no longer use the discovery mechanism for multipathing and they always return all target/portals when attaching a volume. Thanks to this, volumes will be successfully attached even if the target/portal selected as primary is down, this will be the case for both, multipath and single path connections. .. _Queens Series Release Notes_12.0.7_stable_queens: 12.0.7 ====== .. _Queens Series Release Notes_12.0.7_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1773446-984d76ed29445c9b.yaml @ b'8d7675783b6453c8b7ed3396b681be74f04708ce' - Fixed group availability zone-backend host mismatch [`Bug 1773446 `_]. .. _Queens Series Release Notes_12.0.6_stable_queens: 12.0.6 ====== .. _Queens Series Release Notes_12.0.6_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/vnx-update-sg-in-cache-3ecb673727bea79b.yaml @ b'29d42fa73448b3d23d9da4db7b2f407694681ece' - Dell EMC VNX Driver: Fixes `bug 1817385 `__ to make sure the sg can be created again after it was destroyed under `destroy_empty_storage_group` setting to `True`. .. _Queens Series Release Notes_12.0.5_stable_queens: 12.0.5 ====== .. _Queens Series Release Notes_12.0.5_stable_queens_Known Issues: Known Issues ------------ .. releasenotes/notes/lio-multiattach-disabled-a6ee89072fe5d032.yaml @ b'7503af11e6d00be718de054981fb5969fd0e4a5c' - Multiattach support is disabled for the LVM driver when using the LIO iSCSI target. This functionality will be fixed in a later release. .. _Queens Series Release Notes_12.0.5_stable_queens_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/bug-1805550-default-policy-file-db15eaa76fefa115.yaml @ b'61e90d528d444fd98d6f34c4b0f81e4ac1e1f0d4' - Beginning with Cinder version 12.0.0, as part of the Queens release "policies in code" community effort, Cinder has had the ability to run without a policy file because sensible default values are specified in the code. Customizing the policies in effect at your site, however, still requires a policy file. The default location of this file has been ``/etc/cinder/policy.json`` (although the documentation has indicated otherwise). With this release, the default location of this file is changed to ``/etc/cinder/policy.yaml``. Some points to keep in mind: - The policy file to be used may be specified in the ``/etc/cinder/cinder.conf`` file in the ``[oslo_policy]`` section as the value of the ``policy_file`` configuration option. That way there's no question what file is being used. - To find out what policies are available and what their default values are, you can generate a sample policy file. To do this, you must have a local copy of the Cinder source code repository. From the top level directory, run the command:: tox -e genpolicy This will generate a file named ``policy.yaml`` in the ``etc/cinder`` directory of your checked-out Cinder repository. - The sample file is YAML (because unlike JSON, YAML allows comments). If you prefer, you may use a JSON policy file. - Beginning with Cinder 12.0.0, you only need to specify policies in your policy file that you want to **differ** from the default values. Unspecified policies will use the default values *defined in the code*. Given that a default value *must* be specified *in the code* when a new policy is introduced, the ``default`` policy, which was formerly used as a catch-all for policy targets that were not defined elsewhere in the policy file, has no effect. We mention this because an old upgrade strategy was to use the policy file from the previous release with ``"default": "role:admin"`` (or ``"default": "!"``) so that newly introduced actions would be blocked from end users until the operator had time to assess the implications of exposing these actions. This strategy no longer works. Hopefully this isn't a problem because we're defining sensible defaults in the code. It would be a good idea, however, to generate the sample policy file with each release (see instructions above) to verify this for yourself. .. _Queens Series Release Notes_12.0.5_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1790141-vmax-powermaxos-upgrade-fix-4c76186cfca66790.yaml @ b'4818a540c3e8a96ad3478333a072582ba256ee9a' - PowerMax driver - Workload support was dropped in ucode 5978. If a VMAX All Flash array is upgraded to 5978 or greater and existing volume types leveraged workload e.g. DSS, DSS_REP, OLTP and OLTP_REP, certain operations will no longer work and the volume type will be unusable. This fix addresses these issues and fixes problems with using old volume types with workloads included in the volume type pool_name. .. releasenotes/notes/bug-1799221-fix-truncated-volumes-in-case-of-glance-errors-6cae19218249c3cf.yaml @ b'937af5be0e7c17b34860e25cc434a219d7143387' - Fixed a bug which could create volumes with invalid content in case of unhandled errors from glance client (Bug `#1799221 `_). .. releasenotes/notes/bug-reno-69539ecb9b0b5464.yaml @ b'ed2c7b90376fa6f5cd6ef8f46fe9b2d408b0b756' - The Solidfire cinder driver has been fixed to ensure delete happens on the correct volume. .. releasenotes/notes/fix-import-backup-quota-issue-8yh69hd19u7tuu23.yaml @ b'acf16280a48dffbf21358b1c3b7484445c0a2b7c' - Cinder will now consume quota when importing new backup resource. .. releasenotes/notes/fix-netapp-cg-da4fd6c396e5bedb.yaml @ b'e201d5fcb51fa4fa5292768ad31fcd952a7979e2' - Fixes a bug in NetApp SolidFire where the deletion of group snapshots was failing. .. releasenotes/notes/fix-netapp-force_detach-36bdf75dd2c9a030.yaml @ b'a5edf0b62215fa3335f60115303cae9210408385' - Fixes force_detach behavior for volumes in NetApp SolidFire driver. .. releasenotes/notes/storwize-hyperswap-host-site-update-621e763768fab9ee.yaml @ b'5d19cab2922ae26c91953d1618c7f06c05a8e96a' - Updated the parameter storwzie_preferred_host_site from StrOpt to DictOpt in cinder back-end configuration, and removed it from volume type configuration. .. _Queens Series Release Notes_12.0.4_stable_queens: 12.0.4 ====== .. _Queens Series Release Notes_12.0.4_stable_queens_New Features: New Features ------------ .. releasenotes/notes/netapp-log-filter-f3256f55c3ac3faa.yaml @ b'a3564892f97b5ee64b0b6a146383d0e10fd76c17' - The NetApp ONTAP driver supports a new configuration option ``netapp_api_trace_pattern`` to enable filtering backend API interactions to log. This option must be specified in the backend section when desired and it accepts a valid python regular expression. .. releasenotes/notes/nimble-retype-support-18f717072948ba6d.yaml @ b'ecb06ef6fc8403140639a2c1b0fac49bf2c7480d' - Support for retype and volume migration for HPE Nimble Storage driver. .. _Queens Series Release Notes_12.0.4_stable_queens_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/nec-delete-volume-per-limit-d10b9df86f64b80e.yaml @ b'06b9876207ca81b50a877774b5968decaf1833ca' - In NEC driver, the number of volumes in a storage pool is no longer limited to 1024. More volumes can be created with storage firmware revision 1015 or later. .. _Queens Series Release Notes_12.0.4_stable_queens_Security Issues: Security Issues --------------- .. releasenotes/notes/scaleio-zeropadding-a0273c56c4d14fca.yaml @ b'f0cef07bef5ea8ed29179ee3774df5f4a634ba86' - Removed the ability to create volumes in a ScaleIO Storage Pool that has zero-padding disabled. A new configuration option ``sio_allow_non_padded_volumes`` has been added to override this new behavior and allow unpadded volumes, but should not be enabled if multiple tenants will utilize volumes from a shared Storage Pool. .. _Queens Series Release Notes_12.0.4_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bugfix-netapp-driver-cinder-ipv6-c3c4d0d6a7d0de91.yaml @ b'40eaa89a9001edda3e1146831b26aaf7ded64c4a' - Fixed support for IPv6 on management and data paths for NFS, iSCSI and FCP NetApp ONTAP drivers. .. releasenotes/notes/fix-quota-deleting-temporary-volume-274e371b425e92cc.yaml @ b'23729bdda4099908a205f6d60f64ffad712f820e' - Fix a quota usage error triggered by a non-admin user backing up an in-use volume. The forced backup uses a temporary volume, and quota usage was incorrectly updated when the temporary volume was deleted after the backup operation completed. Fixes `bug 1778774 `__. .. releasenotes/notes/force-delete-mv-a53924f09c475386.yaml @ b'869fab393b13d056a702c899d073c48b6bea6f50' - Volume "force delete" was introduced with the 3.23 API microversion, however the check for in the service was incorrectly looking for microversion 3.2. That check has now been fixed. It is possible that an API call using a microversion below 3.23 would previously work for this call, which will now fail. This closes `bug #1783028 `_. .. releasenotes/notes/netapp-ontap-fix-force-detach-55be3f4ac962b493.yaml @ b'98ee144c3af5d824c2a828a8b727b9dd95efa659' - Fixed bug #1783582, where calls to os-force_detach were failing on NetApp ONTAP iSCSI/FC drivers. .. releasenotes/notes/ssl-cert-fix-42e8f263c15d5343.yaml @ b'cf22fa2875f1787889e6eeea693b7c81528e2918' - VMAX driver - fixes SSL certificate verification error. .. releasenotes/notes/unity-return-logged-out-initiator-6ab1f96f21bb284c.yaml @ b'e8c223f1f9041a2204138ea587a25728c4dba6fd' - Dell EMC Unity Driver: Fixes `bug 1773305 `__ to return the targets which connect to the logged-out initiators. Then the zone manager could clean up the FC zone based on the correct target wwns. .. _Queens Series Release Notes_12.0.3_stable_queens: 12.0.3 ====== .. _Queens Series Release Notes_12.0.3_stable_queens_New Features: New Features ------------ .. releasenotes/notes/unity-remove-empty-host-17d567dbb6738e4e.yaml @ b'24bd0c4b645dbcb99977e6a3e16c51979455b1eb' - Dell EMC Unity Driver: Adds support for removing empty host. The new option named `remove_empty_host` could be configured as `True` to notify Unity driver to remove the host after the last LUN is detached from it. .. _Queens Series Release Notes_12.0.3_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1712651-7bc90264eb5001ea.yaml @ b'41735db868fc1de2dac313ea60742e7e1cc76289' - NetApp ONTAP iSCSI (bug 1712651): Fix ONTAP NetApp iSCSI driver not raising a proper exception when trying to extend an attached volume beyond its max geometry. .. releasenotes/notes/bug-1762424-f76af2f37fe408f1.yaml @ b'e0a1269d8728d8e1fa15d9c89ed7a2f90cab6b77' - NetApp ONTAP (bug 1762424): Fix ONTAP NetApp driver not being able to extend a volume to a size greater than the corresponding LUN max geometry. .. releasenotes/notes/bug-1775518-fix-unity-empty-list-issue-2d6b7c33aae1ffcc.yaml @ b'c7f9d29e17523653b7d2f0eb2b109798235b6ebb' - Dell EMC Unity: Fixes bug 1775518 to make sure driver succeed to initialize even though the value of unity_io_ports and unity_storage_pool_names are empty .. _Queens Series Release Notes_12.0.2_stable_queens: 12.0.2 ====== .. _Queens Series Release Notes_12.0.2_stable_queens_New Features: New Features ------------ .. releasenotes/notes/feature-rbd-exclusive-pool-a9bdebdeb1f0bf37.yaml @ b'21821c16580377c4e6443d0b440f41cb7de0ca8d' - When using the RBD pool exclusively for Cinder we can now set `rbd_exclusive_cinder_pool` to `true` and Cinder will use DB information to calculate provisioned size instead of querying all volumes in the backend, which will reduce the load on the Ceph cluster and the volume service. .. releasenotes/notes/sync-bump-versions-a1e6f6359173892e.yaml @ b'25c737d6b8e19a1932696554e47dd262ae651592' - Cinder-manage DB sync command can now bump the RPC and Objects versions of the services to avoid a second restart when doing offline upgrades. .. releasenotes/notes/unity-enable-ssl-14db2497225c4395.yaml @ b'685de5a7b683552899fc0fd6c095d35b6a9bf555' - Dell EMC Unity Cinder driver allows enabling/disabling the SSL verification. Admin can set `True` or `False` for `driver_ssl_cert_verify` to enable or disable this function, alternatively set the `driver_ssl_cert_path=` for customized CA path. Both above 2 options should go under the driver section. .. _Queens Series Release Notes_12.0.2_stable_queens_Known Issues: Known Issues ------------ .. releasenotes/notes/feature-rbd-exclusive-pool-a9bdebdeb1f0bf37.yaml @ b'21821c16580377c4e6443d0b440f41cb7de0ca8d' - If RBD stats collection is taking too long in your environment maybe even leading to the service appearing as down you'll want to use the `rbd_exclusive_cinder_pool = true` configuration option if you are using the pool exclusively for Cinder and maybe even if you are not and can live with the innacuracy. .. _Queens Series Release Notes_12.0.2_stable_queens_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/sync-bump-versions-a1e6f6359173892e.yaml @ b'25c737d6b8e19a1932696554e47dd262ae651592' - On offline upgrades, due to the rolling upgrade mechanism we need to restart the cinder services twice to complete the installation just like in the rolling upgrades case. First you stop the cinder services, then you upgrade them, you sync your DB, then you start all the cinder services, and then you restart them all. To avoid this last restart we can now instruct the DB sync to bump the services after the migration is completed, the command to do this is `cinder-manage db sync --bump-versions` .. _Queens Series Release Notes_12.0.2_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1690954-40fc21683977e996.yaml @ b'64df0693991bd3815acc8e445da912a499198f7e' - NetApp ONTAP NFS (bug 1690954): Fix wrong usage of export path as volume name when deleting volumes and snapshots. .. releasenotes/notes/fail-detach-lun-when-auto-zone-enabled-9c87b18a3acac9d1.yaml @ b'febe57cfee50425c0fc9945169b5f9c3898cbdfe' - Dell EMC Unity Driver: Fixes `bug 1759175 `__ to detach the lun correctly when auto zone was enabled and the lun was the last one attached to the host. .. releasenotes/notes/netapp-ontap-use_exact_size-d03c90efbb8a30ac.yaml @ b'c664f08fe2bd44b209a1f75a58d6dca86200b2fc' - Fixed bug #1731474 on NetApp Data ONTAP driver that was causing LUNs to be created with larger size than requested. This fix requires version 9.1 of ONTAP or later. .. releasenotes/notes/sync-bump-versions-a1e6f6359173892e.yaml @ b'25c737d6b8e19a1932696554e47dd262ae651592' - After an offline upgrade we had to restart all Cinder services twice, now with the `cinder-manage db sync --bump-versions` command we can avoid the second restart. .. _Queens Series Release Notes_12.0.2_stable_queens_Other Notes: Other Notes ----------- .. releasenotes/notes/vnx-perf-optimize-bd55dc3ef7584228.yaml @ b'e78fe7d62ce51f0216a8059bf01f18f5cf905d37' - Dell EMC VNX driver: Enhances the performance of create/delete volume. .. _Queens Series Release Notes_12.0.1_stable_queens: 12.0.1 ====== .. _Queens Series Release Notes_12.0.1_stable_queens_New Features: New Features ------------ .. releasenotes/notes/bug-1686745-e8f1569455f998ba.yaml @ b'9a3fab147ef1182b5149fc1ccbefa0e6cebf1492' - Add support to force detach a volume from all hosts on 3PAR. .. releasenotes/notes/tpool-size-11121f78df24db39.yaml @ b'5dc330a2cb8ed1f28115c28f094900349a33ae20' - Adds support to configure the size of the native thread pool used by the cinder volume and backup services. For the backup we use `backup_native_threads_pool_size` in the `[DEFAULT]` section, and for the backends we use `backend_native_threads_pool_size` in the driver section. .. _Queens Series Release Notes_12.0.1_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/dell-emc-sc-bugfix-1756914-ffca3133273040f6.yaml @ b'96b77e0aa62790320d16f7229aaeeb650fa875b6' - Dell EMC SC driver correctly returns initialize_connection data when more than one IQN is attached to a volume. This fixes some random Nova Live Migration failures where the connection information being returned was for an IQN other than the one for which it was being requested. .. releasenotes/notes/fix-abort-backup-df196e9dcb992586.yaml @ b'd65444ce7df88292581d0726f9eb633be2292287' - We no longer leave orphaned chunks on the backup backend or leave a temporary volume/snapshot when aborting a backup. .. releasenotes/notes/fix-cross-az-migration-ce97eff61280e1c7.yaml @ b'c2df706c023d752825c250c4ceb2f98f5ce5a476' - Resolve issue with cross AZ migrations and retypes where the destination volume kept the source volume's AZ, so we ended up with a volume where the AZ does not match the backend. (bug 1747949) .. releasenotes/notes/migrate-backup-encryption-keys-to-barbican-6f07fd48d4937b2a.yaml @ b'bc76abef28b34723abdbd29881553a1af94b024b' - When encryption keys based on the ConfKeyManager's fixed_key are migrated to Barbican, ConfKeyManager keys stored in the Backup table are included in the migration process. Fixes `bug 1757235 `__. .. releasenotes/notes/tpool-size-11121f78df24db39.yaml @ b'5dc330a2cb8ed1f28115c28f094900349a33ae20' - Fixes concurrency issue on backups, where only 20 native threads could be concurrently be executed. Now default will be 60, and can be changed with `backup_native_threads_pool_size`. .. releasenotes/notes/tpool-size-11121f78df24db39.yaml @ b'5dc330a2cb8ed1f28115c28f094900349a33ae20' - RBD driver can have bottlenecks if too many slow operations are happening at the same time (for example many huge volume deletions), we can now use the `backend_native_threads_pool_size` option in the RBD driver section to resolve the issue. .. _Queens Series Release Notes_12.0.0_stable_queens: 12.0.0 ====== .. _Queens Series Release Notes_12.0.0_stable_queens_New Features: New Features ------------ .. releasenotes/notes/3par-get-capability-de60c9bc7ae51c14.yaml @ b'9e46d6e1a62a60ff95c503d14b1f0f5ecb8b9ccf' - Added get capability feature for HPE-3PAR. .. releasenotes/notes/add-availability_zone-filter-for-snapshot-8e1494212276abde.yaml @ b'0f5a7f3ac31f73c12f627f54e6c41449cce99b98' - Added availability_zone filter for snapshots list. .. releasenotes/notes/add-count-info-in-list-api-e43wac44yu750c23.yaml @ b'23b74639848df37ee25f0d613062862749c7e42d' - Added count info in volume, snapshot and backup's list APIs since 3.45. .. releasenotes/notes/add-datacore-volume-driver-3775797b0515f538.yaml @ b'5f0ea63b60dbec6175145f975789253d2a956384' - Added iSCSI and Fibre Channel volume drivers for DataCore's SANsymphony and Hyper-converged Virtual SAN storage. .. releasenotes/notes/add_multiattach_policies-8e0b22505ed6cbd8.yaml @ b'76f2158d47df2b112293f3463feb1caf5a0db04b' - Added policies to disallow multiattach operations. This includes two policies, the first being a general policy to allow the creation or retyping of multiattach volumes is a volume create policy with the name ``volume:multiattach``. The second policy is specifically for disallowing the ability to create multiple attachments on a volume that is marked as bootable, and is an attachment policy with the name ``volume:multiattach_bootable_volume``. The default for these new policies is ``rule:admin_or_owner``; be aware that if you wish to disable either of these policies for your users you will need to modify the default policy settings. .. releasenotes/notes/add_replication_failback_to_solidfire-82668c071f4fa91d.yaml @ b'e7498ca5bdd6e16b46e8a6d17dbc7492f6e710e9' - Add ability to call failover-host on a replication enabled SF cluster a second time with host id = default to initiate a failback to the default configured SolidFire Cluster. .. releasenotes/notes/allow-encrypted-rbd-volumes-35d3536505e6309b.yaml @ b'fcb45b439ba039fd88c332fd912949d52cfe290f' - LUKS Encrypted RBD volumes can now be created by cinder-volume. This capability was previously blocked by the rbd volume driver due to the lack of any encryptors capable of attaching to an encrypted RBD volume. These volumes can also be seeded with RAW image data from Glance through the use of QEMU 2.10 and the qemu-img convert command. .. releasenotes/notes/backup-driver-configuration-36357733962dab03.yaml @ b'de2ffaff36e3713e3862b15816f59c4d3dd8abca' - Add ability to specify backup driver via class name. .. releasenotes/notes/bp-inspur-instorage-driver-40371862c9559238.yaml @ b'e7362103c67579bf7caf1437afc3d4518923c8a6' - New Cinder volume driver for Inspur InStorage. The new driver supports iSCSI. .. releasenotes/notes/bp-provisioning-improvements-bb7e28896e2a2539.yaml @ b'f98c9da944b46875cdec91cf4c0c28ce89e1ac6a' - Cinder now supports the use of 'max_over_subscription_ratio = auto' which automatically calculates the value for max_over_subscription_ratio in the scheduler. .. releasenotes/notes/bp-vmware-fcd-fbe19ee577d2e9e4.yaml @ b'377549c67c4290532df602e89e6f9e6193ab188d' - Added backend driver for VMware VStorageObject (First Class Disk). .. releasenotes/notes/bug-1730933-1bb0272e3c51eed3.yaml @ b'ac0583c94acb8f960e9b6e242ce3e6eb604962c0' - The Quobyte Cinder driver now supports identifying Quobyte mounts via the mounts fstype field. .. releasenotes/notes/ds8k_async_clone_volume-25232c55da921202.yaml @ b'1d9d7c00b40a529af2dc6cd4672dca14be53a9b6' - Added support for cloning volume asynchronously, it can be enabled by option async_clone set to true in parameter metadata when creating volume from volume or snapshot. .. releasenotes/notes/hpe3par-replication-group-a18a28d18de09e95.yaml @ b'3da071994b76346a823e3bd1b5aa9582e7163cd0' - Added replication group support in HPE 3PAR cinder driver. .. releasenotes/notes/infinidat-max-osr-2d9fd2d0f9424657.yaml @ b'88080cb45c1aaadae3aa284384ce595ff4a9a067' - Added support for oversubscription in thin provisioning in the INFINIDAT InfiniBox driver. To use oversubscription, define ``max_over_subscription_ratio`` in the cinder configuration file. .. releasenotes/notes/k2-disable-discovery-bca0d65b5672ec7b.yaml @ b'c9ec9b9bd755f042dbe1ccbdc5e3ff87fa60269c' - Kaminario K2 iSCSI driver now supports non discovery multipathing (Nova and Cinder won't use iSCSI sendtargets) which can be enabled by setting `disable_discovery` to `true` in the configuration. .. releasenotes/notes/migrate-fixed-key-to-barbican-91dfcb829efd4bb6.yaml @ b'189a1096da2b0ad6b51fd5943a385a89f56a18c4' - When Barbican is the encryption key_manager backend, any encryption keys associated with the legacy ConfKeyManager will be automatically migrated to Barbican. All database references to the ConfKeyManager's all-zeros key ID will be updated with a Barbican key ID. The encryption keys do not change. Only the encryption key ID changes. Key migration is initiated on service startup, and entries in the cinder-volume log will indicate the migration status. Log entries will indicate when a volume's encryption key ID has been migrated to Barbican, and a summary log message will indicate when key migration has finished. .. releasenotes/notes/nec-manage-unmanage-06f9beb3004fc227.yaml @ b'db7d054d33da4ca4abaf16dedaf95d1c020fe981' - Support manage/unmanage volume and manage/unmanage snapshot functions for the NEC volume driver. .. releasenotes/notes/policy-in-code-226f71562ab28195.yaml @ b'9fe72de4b690bc5c964c12715581128830c667d5' - Cinder now support policy in code, which means if users don't need to modify any of the default policy rules, they do not need a policy file. Users can modify/generate a `policy.yaml` file which will override specific policy rules from their defaults. .. releasenotes/notes/ps-report-total-volumes-8aa447c50f2474a7.yaml @ b'a8a4518580d321604fbd54a04996aba9ee02cb25' - Dell EMC PS volume driver reports the total number of volumes on the backend in volume stats. .. releasenotes/notes/qnap-enhance-support-4ab5cbb110b3303b.yaml @ b'08dcf03541995cc9f8a22232bf738967e4b6570b' - Add enhanced support to the QNAP Cinder driver, including 'CHAP', 'Thin Provision', 'SSD Cache', 'Dedup' and 'Compression'. .. releasenotes/notes/qnap-support-qes-200-2a3dda49afe14103.yaml @ b'5c32be5a6de64f3a853924c2e82fb1d99acde712' - QNAP Cinder driver added support for QES fw 2.0.0. .. releasenotes/notes/rbd-driver-assisted-migration-2d29788243060f77.yaml @ b'dd119d5620bebc59a72f4fb1e1b795f56da5db64' - Added driver-assisted volume migration to RBD driver. This allows a volume to be efficiently copied by Ceph from one pool to another within the same cluster. .. releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml @ b'8469109016bcfd5806e230202e1996a8ba649535' - RBD driver supports returning a static total capacity value instead of a dynamic value like it's been doing. Configurable with `report_dynamic_total_capacity` configuration option. .. releasenotes/notes/rbd-support-list-manageable-volumes-8a088a44e01d227f.yaml @ b'164246094e90af6f63f63c321514b07a655941a9' - Allow rbd driver to list manageable volumes. .. releasenotes/notes/readd-qnap-driver-e1dc6b0c3fabe30e.yaml @ b'14dea86f5dbdfded0b23afa6ac454f9914ac0a77' - Re-added QNAP Cinder volume driver. .. releasenotes/notes/report-backend-state-in-service-list-1e4ee5a2c623671e.yaml @ b'0dc8390e11cfe0946ea61350a82e3e8e0c1c6e4d' - Added "backend_state: up/down" in response body of service list if context is admin. This feature will help operators or cloud management system to get the backend device state in every service. If device state is *down*, specify that storage device has got some problems. Give more information to locate bugs quickly. .. releasenotes/notes/revert-volume-to-snapshot-6aa0dffb010265e5.yaml @ b'd317c54edb2bfcfef523e5ccbc0119c78539824e' - Added revert volume to snapshot in 3par driver. .. releasenotes/notes/scaleio-backup-via-snapshot-8e75aa3f4570e17c.yaml @ b'7b5bbc951aa41174c58c53ec361b4337125ae66a' - Add support to backup volume using snapshot in the Unity driver, which enables backing up of volumes that are in-use. .. releasenotes/notes/scaleio-enable-multiattach-e7d84ffa282842e9.yaml @ b'5f0ea63b60dbec6175145f975789253d2a956384' - The multiattach capability has been enabled and verified as working with the ScaleIO driver. It is the user's responsibility to add some type of exclusion (at the file system or network file system layer) to prevent multiple writers from corrupting data on the volume. .. releasenotes/notes/smbfs-fixed-image-9b642b63fcb79c18.yaml @ b'54c2787132396a73a45682133f66777ba1eb2085' - The SMBFS volume driver can now be configured to use fixed vhd/x images through the 'nas_volume_prov_type' config option. .. releasenotes/notes/smbfs-manage-unmanage-f1502781dd5f82cb.yaml @ b'ed945da6bf05475c272443d2eebacfc79c389926' - The SMBFS driver now supports the volume manage/unmanage feature. Images residing on preconfigured shares may be listed and managed by Cinder. .. releasenotes/notes/smbfs-revert-snapshot-5b265ed5ded951dc.yaml @ b'e8715f690e61557d08c6df9040a2e4d87d3e6bad' - The SMBFS volume driver now supports reverting volumes to the latest snapshot. .. releasenotes/notes/storpool-volume-driver-4d5f16ad9c2f373a.yaml @ b'b5832afb3a7e04b4709be6ab863d0281c75616b3' - The StorPool backend driver was added. .. releasenotes/notes/storwize-backup-snapshot-support-728e18dfa0d42943.yaml @ b'f68847353e46c8729d8fc13d2e53608c72c159c7' - Add backup snapshots support for Storwize/SVC driver. .. releasenotes/notes/storwize-cg-replication-b038ff0d39fe909f.yaml @ b'24e4c3ea684c7d418c2de5ac8c46d175819a4b42' - Add consistent replication group support in Storwize Cinder driver. .. releasenotes/notes/storwize-disable-create-volume-with-non-cgsnap-group-6cba8073e3d6cadd.yaml @ b'b03a23618122ee8abf596a471da326bfcb9e1710' - Disable creating volume with non cg_snapshot group_id in Storwize/SVC driver. .. releasenotes/notes/storwize-hyperswap-support-b830182e1058cb4f.yaml @ b'c0d471a42461ccc50dbb4b27cfcdd1f4282f3880' - Added hyperswap volume and group support in Storwize cinder driver. Storwize/svc versions prior to 7.6 do not support this feature. .. releasenotes/notes/storwize-revert-snapshot-681c76d68676558a.yaml @ b'f701d091bea170e595ed47469a60c4e148a2edcd' - Add reverting to snapshot support in Storwize Cinder driver. .. releasenotes/notes/support-create-volume-from-backup-d363e2b502a76dc2.yaml @ b'39694623e421e1f0149bff2ea62345d93eed425e' - Starting with API microversion 3.47, Cinder now supports the ability to create a volume directly from a backup. For instance, you can use the command: ``cinder create --backup-id `` in cinderclient. .. releasenotes/notes/unity-force-detach-7c89e72105f9de61.yaml @ b'b44721dfacc2b4b7f4b3bf07f813800c597a576a' - Add support to force detach a volume from all hosts on Unity. .. releasenotes/notes/validate-expired-user-tokens-40b15322197653ae.yaml @ b'826b72ea09a5a5703d732c2abd18b8e8a92b982b' - Added support for Keystone middleware feature to pass service token along with the user token for Cinder to Nova and Glance services. This will help get rid of user token expiration issues during long running tasks e.g. creating volume snapshot (Cinder->Nova) and creating volume from image (Cinder->Glance) etc. To use this functionality a service user needs to be created first. Add the service user configurations in ``cinder.conf`` under ``service_user`` group and set ``send_service_user_token`` flag to ``True``. .. releasenotes/notes/vmax-iscsi-chap-authentication-e47fcfe310b85f7b.yaml @ b'77055e7cc688492a22ac7ba40f38bd78259c9b32' - Add chap authentication support for the vmax backend. .. releasenotes/notes/vmax-manage-unmanage-snapshot-3805c4ac64b8133a.yaml @ b'7dda6ef758bb08712855d32293bd973c65f90c22' - Support for manage/ unmanage snapshots on VMAX cinder driver. .. releasenotes/notes/vmax-replication-enhancements-c3bec80a3abb6d2e.yaml @ b'84e39916c71ca56ebe5ae14c34dc16dbb359ed05' - Added asynchronous remote replication support in Dell EMC VMAX cinder driver. .. releasenotes/notes/vmax-replication-enhancements2-0ba03224cfca9959.yaml @ b'925bdfbb06e31d5ad2240c803102e8a5ff309c5a' - Support for VMAX SRDF/Metro on VMAX cinder driver. .. releasenotes/notes/vmax-replication-group-2f65ed92d761f90d.yaml @ b'c6b0c4bca66153634a0685f370283b16fe8e0345' - Add consistent replication group support in Dell EMC VMAX cinder driver. .. releasenotes/notes/vmax-revert-volume-to-snapshot-b4a837d84a8b2a85.yaml @ b'cf40a001dac4d2f63165b6e4bbd14acb1d09ed54' - Support for reverting a volume to a previous snapshot in VMAX cinder driver. .. releasenotes/notes/vmware-vmdk-revert-to-snapshot-ee3d638565649f44.yaml @ b'01971c9cb6cb555d0c440ffbb7332f18ed553930' - Added support for revert-to-snapshot in the VMware VMDK driver. .. releasenotes/notes/vmware-vmdk-snapshot-template-d3dcfc0906c02edd.yaml @ b'f36fc239804fb8fbf57d9df0320e2cb6d315ea10' - VMware VMDK driver now supports vSphere template as a volume snapshot format in vCenter server. The snapshot format in vCenter server can be specified using driver config option ``vmware_snapshot_format``. .. releasenotes/notes/vmware_lazy_create-52f52f71105d2067.yaml @ b'18c8af402b057768f56cbcb68b1d00b0447eba4e' - VMware VMDK driver now supports a config option ``vmware_lazy_create`` to disable the default behavior of lazy creation of raw volumes in the backend. .. releasenotes/notes/vmware_retype_adapter_type-dbd8935b8d3bcb1b.yaml @ b'52d2ef021fab8513c68bbf40a9e3990c09920f33' - VMware VMDK driver now supports changing adpater type using retype. To change the adapter type, set ``vmware:adapter_type`` in the new volume type. .. releasenotes/notes/vmware_vmdk_managed_by-3de05504d0f9a65a.yaml @ b'14ff0cc2bd5d6cb91766f7ff6cf83f18d23ac8cd' - The volumes created by VMware VMDK driver will be displayed as "managed by OpenStack Cinder" in vCenter server. .. releasenotes/notes/vnx-add-force-detach-support-26f215e6f70cc03b.yaml @ b'e91e7d5e2f599bc43ecdfbd0d7d5ede2ee813fac' - Add support to force detach a volume from all hosts on VNX. .. releasenotes/notes/vzstorage-log-path-7539342e562a2e4a.yaml @ b'f9ebdbf09d331a683a26b5e626fac0888e7317b9' - Logging path can now be configured for vzstorage driver in shares config file (specified by vzstorage_shares_config option). To set custom logging path add `'-l', ''` to mount options array. Otherwise default logging path `/var/log/vstorage//cinder.log.gz` will be used. .. releasenotes/notes/vzstorage-volume-format-cde85d3ad02f6bb4.yaml @ b'1f69f7507e2c8e0b65516710e974ba6932b5f5a2' - VzStorage volume driver now supports choosing desired volume format by setting vendor property 'vz:volume_format' in volume type metadata. Allowed values are 'ploop', 'qcow2' and 'raw'. .. releasenotes/notes/xtremio-ig-cleanup-bbb4bee1f1e3611c.yaml @ b'645bda4f48482f27e7d71776af02561004069315' - Added new option to delete XtremIO initiator groups after the last volume was detached from them. Cleanup can be enabled by setting ``xtremio_clean_unused_ig`` to ``True`` under the backend settings in cinder.conf. .. _Queens Series Release Notes_12.0.0_stable_queens_Known Issues: Known Issues ------------ .. releasenotes/notes/k2-non-unique-fqdns-b62a269a26fd53d5.yaml @ b'baa8626eac9c975b719c03274d42b54ce3de74fe' - Kaminario K2 now supports networks with duplicated FQDNs via configuration option `unique_fqdn_network` so attaching in these networks will work (bug #1720147). .. _Queens Series Release Notes_12.0.0_stable_queens_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/add_multiattach_policies-8e0b22505ed6cbd8.yaml @ b'76f2158d47df2b112293f3463feb1caf5a0db04b' - Added policies to disallow multiattach operations. This includes two policies, the first being a general policy to allow the creation or retyping of multiattach volumes is a volume create policy with the name ``volume:multiattach``. The second policy is specifically for disallowing the ability to create multiple attachments on a volume that is marked as bootable, and is an attachment policy with the name ``volume:multiattach_bootable_volume``. The default for these new policies is ``rule:admin_or_owner``; be aware that if you wish to disable either of these policies for your users you will need to modify the default policy settings. .. releasenotes/notes/backup-driver-configuration-36357733962dab03.yaml @ b'de2ffaff36e3713e3862b15816f59c4d3dd8abca' - Operators should change backup driver configuration value to use class name to get backup service working in a 'S' release. .. releasenotes/notes/bp-remove-netapp-7mode-drivers-c38398e54662f2d4.yaml @ b'425f45a311dc78ff34a18ffea7dbf5bb6dd2d421' - Support for NetApp ONTAP 7 (previously known as "Data ONTAP operating in 7mode") has been removed. The NetApp Unified driver can now only be used with NetApp Clustered Data ONTAP and NetApp E-Series storage systems. This removal affects all three storage protocols that were supported on for ONTAP 7 - iSCSI, NFS and FC. Deployers are advised to consult the `migration support `_ provided to transition from ONTAP 7 to Clustered Data ONTAP operating system. .. releasenotes/notes/bug-1714209-netapp-ontap-drivers-oversubscription-issue-c4655b9c4858d7c6.yaml @ b'42b8b7fe60ffdd7a7772dc0ab228265dc83344bc' - If using the NetApp ONTAP drivers (7mode/cmode), the configuration value for "max_over_subscription_ratio" may need to be increased to avoid scheduling problems where storage pools that previously were valid to schedule new volumes suddenly appear to be out of space to the Cinder scheduler. See documentation `here `_. .. releasenotes/notes/castellan-backend-0c49591a54821c45.yaml @ b'e75be5d90519094fca3ee475b906e7c2fe1d09fd' - The support for ``cinder.keymgr.barbican.BarbicanKeyManager`` and the ``[keymgr]`` config section has now been removed. All configs should now be switched to use ``castellan.key_manager.barbican_key_manager.BarbicanKeyManager`` and the ``[key_manager]`` config section. .. releasenotes/notes/db-schema-from-newton-79b18439bd15e4c4.yaml @ b'a9afbddd11fd5cd82f88e51170633b58cbcb8ecc' - The Cinder database can now only be ugpraded from changes since the Newton release. In order to upgrade from a version prior to that, you must now upgrade to at least Newton first, then to Queens or later. .. releasenotes/notes/deprecate_hosts_api_extension-fe0c042af10a20db.yaml @ b'74746b3407684df6a6e687ce502ffdc7c57f44ab' - The hosts api extension is now deprecated and will be removed in a future version. .. releasenotes/notes/glance-v1-removed-5121af3bef285324.yaml @ b'd76fef6bf454d1aa3a3c111567126d3a837ea9e3' - The Glance v1 API has been deprecated and will soon be removed. Cinder support for using the v1 API was deprecated in the Pike release and is now no longer available. The ``glance_api_version`` configuration option to support version selection has now been removed. .. releasenotes/notes/lvm-thin-overprovision-1d279f66ee2252ff.yaml @ b'9d4922771383bdd24261cde95ce322d7e04d67f3' - The default value has been removed for the LVM specific `lvm_max_over_subscription_ratio` setting. This changes the behavior so that LVM backends now adhere to the common `max_over_subscription_ratio` setting. The LVM specific config option may still be used, but it is now deprecated and will be removed in a future release. .. releasenotes/notes/mark-cisco-zm-unsupported-57e5612f57e2407b.yaml @ b'c92c428233df7b42bea05bf5468771c07fa8e51b' - The Cisco Fibre Channel Zone Manager driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. .. releasenotes/notes/nec-auto-accesscontrol-55f4b090e8128f5e.yaml @ b'd4dd162bcddba85dee5920e147e0b9ce189be276' - Added automatic configuration of SAN access control for the NEC volume driver. .. releasenotes/notes/nec-delete-unused-parameter-367bc9447acbb03e.yaml @ b'9974c39f0355bd0a0c3c3364297688de2eccf467' - In NEC driver, the deprecated configuration parameter `ldset_controller_node_name` was deleted. .. releasenotes/notes/pure-default-replica-interval-07de0a56f61c7c1e.yaml @ b'f82d2bf6f13a360f6a1c08066cf682e2e07043db' - The default value for pure_replica_interval_default used by Pure Storage volume drivers has changed from 900 to 3600 seconds. .. releasenotes/notes/queens-driver-removal-72a1a36689b6d890.yaml @ b'9d3be35cd6ad3b40983f43ce0cc4c2cf9bdcd807' - The following volume drivers were deprecated in the Pike release and have now been removed: * Block device driver * Blockbridge * Coho * FalconStor FSS * Infortrend * QNAP * Reduxio * Tegile * Violin * X-IO * ZTE .. releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml @ b'8469109016bcfd5806e230202e1996a8ba649535' - RBD/Ceph backends should adjust `max_over_subscription_ratio` to take into account that the driver is no longer reporting volume's physical usage but it's provisioned size. .. releasenotes/notes/remove-block-device-driver-14f76dca2ee9bd38.yaml @ b'711e88a8f9f8322f02a434bbe00417580715cacd' - BlockDeviceDriver was deprecated in Ocata release and marked as 'unsupported'. There is no CI for it too. If you used this driver before you have to migrate your volumes to LVM with LIO target yourself before upgrading to Queens release to get your volumes working. .. releasenotes/notes/remove-deprecated-keymgr-d11a25c620862ed6.yaml @ b'ef2202b6adc5d817b26559a9e20b536a547bca65' - The old deprecated ``keymgr`` options have been removed. Configuration options using the ``[keymgr]`` group will not be applied anymore. Use the ``[key_manager]`` group from Castellan instead. The Castellan ``backend`` options should also be used instead of ``api_class``, as most of the options that lived in Cinder have migrated to Castellan. - Instead of ``api_class`` option ``cinder.keymgr.barbican.BarbicanKeyManager``, use ``backend`` option `barbican`` - ``cinder.keymgr.conf_key_mgr.ConfKeyManager`` still remains, but the ``fixed_key`` configuration options should be moved to the ``[key_manager]`` section .. releasenotes/notes/remove-deprecated-nova-opts-b1ec66fe3a9bb3b9.yaml @ b'c463c6f50c5d1cf6277539626fe386f6d4df6355' - Removed the deprecated options for the Nova connection:> os_privileged_user{name, password, tenant, auth_url}, nova_catalog_info, nova_catalog_admin_info, nova_endpoint_template, nova_endpoint_admin_template, nova_ca_certificates_file, nova_api_insecure. From Pike, using the [nova] section is preferred to configure compute connection for Guest Assisted Snapshost or the InstanceLocalityFilter. .. releasenotes/notes/remove-hitachi-57d0b37cb9cc7e13.yaml @ b'55d726e5c366834a4dc3131326e9bd3850a6e22f' - The Hitachi HNAS, HBSD, and VSP volume drivers were marked as deprecated in the Pike release and have now been removed. Hitachi storage drivers are now only available directly from Hitachi. .. releasenotes/notes/remove-hp3par-config-options-3cf0d865beff9018.yaml @ b'b36ec9c29b742b416f2eba5cb6a5563d85c3c7af' - The old deprecated ``hp3par*`` options have been removed. Use the ``hpe3par*`` instead of them. .. releasenotes/notes/remove-nas-ip-config-option-8d56c14f1f4614fc.yaml @ b'd3d53eeb84b417f83db5995bd4640b768d6763bf' - The old deprecated ``nas_ip`` option has been removed. Use the ``nas_host`` instead of it. .. releasenotes/notes/remove-netapp-teseries-thost-type-config-option-908941dc7d2a1d59.yaml @ b'93b4b27dccf5a317ceb12880ab48e39bc4c2b24c' - The old deprecated ``netapp_eseries_host_type`` option has been removed. Use the ``netapp_host_type`` instead. .. releasenotes/notes/remove-pybasedir-config-option-572604d26a57ba5e.yaml @ b'b8a553dfedc9fb2667945cf7b158c64edd05a05e' - The old deprecated ``pybasedir`` option has been removed. Use the ``state_path`` instead. .. releasenotes/notes/remove_osapi_volume_base_url-33fed24c4ad1b2b6.yaml @ b'efc9016055b81872eb548f2a61b55d651f912658' - The `osapi_volume_base_URL` config option was deprecated in Pike and has now been removed. The `public_endpoint` config option should be used instead. .. releasenotes/notes/removed-apiv1-616b1b76a15521cf.yaml @ b'3e91de956e1947a7014709010b99df380242ac74' - The Cinder API v1 was deprecated in the Juno release and defaulted to be disabled in the Ocata release. It is now removed completely. If upgrading from a previous version, it is recommended you edit your `/etc/cinder/api-paste.ini` file to remove all references to v1. .. releasenotes/notes/rename-windows-iscsi-a7b0ca62a48c1371.yaml @ b'0914b850f9d850543dedb4183d427462ee994a4c' - The Windows iSCSI driver has been renamed. The updated driver location is ``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``. .. releasenotes/notes/type-extra-spec-policies-b7742b0ac2732864.yaml @ b'7bd2950ad53603457f539d7afa54c710137313fc' - When managing volume types an OpenStack provider is now given more control to grant access to for different storage type operations. The provider can now customize access to type create, delete, update, list, and show using new entries in the cinder policy file. As an example one provider may have roles called viewer, admin, type_viewer, and say type_admin. Admin and type_admin can create, delete, update types. Everyone can list the storage types. Admin, type_viewer, and type_admin can view the extra_specs. "volume_extension:types_extra_specs:create": "rule:admin or rule:type_admin", "volume_extension:types_extra_specs:delete": "rule:admin or rule:type_admin", "volume_extension:types_extra_specs:index": "", "volume_extension:types_extra_specs:show": "rule:admin or rule:type_admin or rule:type_viewer", "volume_extension:types_extra_specs:update": "rule:admin or rule:type_admin" .. releasenotes/notes/update_config_options_disco_volume_driver-07e52aa43e83c243.yaml @ b'1a3f91662c383f105a74638c84655689ab5eac60' - We replaced the config option in the disco volume driver "disco_choice_client" with "disco_client_protocol". We add "san_api_port" as new config option in san driver for accessing the SAN API using this port. .. releasenotes/notes/vmware-vmdk-snapshot-template-d3dcfc0906c02edd.yaml @ b'f36fc239804fb8fbf57d9df0320e2cb6d315ea10' - VMware VMDK driver will use vSphere template as the default snapshot format in vCenter server. .. _Queens Series Release Notes_12.0.0_stable_queens_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/backup-driver-configuration-36357733962dab03.yaml @ b'de2ffaff36e3713e3862b15816f59c4d3dd8abca' - Backup driver initialization using module name is deprecated. .. releasenotes/notes/castellan-backend-0c49591a54821c45.yaml @ b'e75be5d90519094fca3ee475b906e7c2fe1d09fd' - The Castellan library used for encryption has deprecated the ``api_class`` config option. Configuration files using this should now be updated to use the ``backend`` option instead. .. releasenotes/notes/deprecate-backup-service-to-driver-mapping-a3afabd4f55eca01.yaml @ b'1fedb7334bb0a3b6d585d00f91516ad2a9b4bde7' - Backup service to driver mapping is deprecated. If you use old values like 'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' accordingly to get your backup service working in the 'R' release. .. releasenotes/notes/deprecate-consistency-group-apis-0d9120d16f090781.yaml @ b'556ae86d382bc4bf9a4272884dd1f8ed5f694b4e' - The Consistency Group APIs have now been marked as deprecated and will be removed in a future release. Generic Volume Group APIs should be used instead. .. releasenotes/notes/deprecate_hosts_api_extension-fe0c042af10a20db.yaml @ b'74746b3407684df6a6e687ce502ffdc7c57f44ab' - The hosts api extension is now deprecated and will be removed in a future version. .. releasenotes/notes/deprecate_logs_commands-a0d59cb7535a2138.yaml @ b'7c00d9b966abac50ad5ad8664fbe327ba2aca10e' - Deprecate the "cinder-manage logs" commands. These will be removed in a later release. .. releasenotes/notes/lvm-thin-overprovision-1d279f66ee2252ff.yaml @ b'9d4922771383bdd24261cde95ce322d7e04d67f3' - The `lvm_max_overprovision_ratio` config option has been deprecated. It will be removed in a future release. Configurations should move to using the common `max_overprovision_ratio` config option. .. releasenotes/notes/mark-cisco-zm-unsupported-57e5612f57e2407b.yaml @ b'c92c428233df7b42bea05bf5468771c07fa8e51b' - The Cisco Firbre Channel Zone Manager driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Queens development cycle. .. releasenotes/notes/rename-iscsi-target-config-options-24913d7452c4a58e.yaml @ b'4b092e8d9d6611d73e22177cb57581e3e2cecee3' - ``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, ``iscsi_target_prefix`` and ``iscsi_protocol`` config options are deprecated in flavor of ``target_ip_address``, ``target_port``, ``target_helper``, ``target_prefix`` and ``target_protocol`` accordingly. Old config options will be removed in S release. .. releasenotes/notes/vmax-deprecate-backend-xml-708a41919bcc55a8.yaml @ b'ec7f04ee97d9484845c41b8c775ec248da8cda4b' - The use of xml files for vmax backend configuration is now deprecated and will be removed during the following release. Deployers are encouraged to use the cinder.conf for configuring connections to the vmax. .. _Queens Series Release Notes_12.0.0_stable_queens_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1560867-support-nova-specific-image-7yt6fd1173c4e3wd.yaml @ b'25dd8109df4425e7f470429956d093bf59fcf669' - Fix the bug that Cinder can't support creating volume from Nova specific image which only includes ``snapshot-id`` metadata (Bug .. releasenotes/notes/bug-1587376-fix-manage-resource-quota-issue-78f59f39b9fa4762.yaml @ b'e72f0fdf2678482723e95bcc89a8c2117865c8a7' - Fix the bug that Cinder would commit quota twice in a clean environment when managing volume and snapshot resource (Bug .. releasenotes/notes/bug-1632333-netapp-ontap-copyoffload-downloads-glance-image-twice-08801d8c7b9eed2c.yaml @ b'c27173bad69da4889a5237cf2becc14bb6fc578a' - Fixed bug 1632333 with the NetApp ONTAP Driver. Now the copy offload method is invoked early to avoid downloading Glance images twice. .. releasenotes/notes/bug-1691771-fix-netapp-manage-volumes-62bec192a08b3ceb.yaml @ b'4b874c5ddc154629a82814d26b64b7eb0c0fb5d6' - The NetApp cDOT driver operating with NFS protocol has been fixed to manage volumes correctly when ``nas_secure_file_operations`` option has been set to False. .. releasenotes/notes/bug-1699936-fix-host-show-incorrect-fg8698gu7y6r7d15.yaml @ b'f50b3555773a1559e29c75ac48857b50cea8dfe5' - Now the ``os-host show`` API will count project's resource correctly. .. releasenotes/notes/bug-1714209-netapp-ontap-drivers-oversubscription-issue-c4655b9c4858d7c6.yaml @ b'42b8b7fe60ffdd7a7772dc0ab228265dc83344bc' - The ONTAP drivers ("7mode" and "cmode") have been fixed to not report consumed space as "provisioned_capacity_gb". They instead rely on the cinder scheduler's calculation of "provisioned_capacity_gb". This fixes the oversubscription miscalculations with the ONTAP drivers. This bugfix affects all three protocols supported by these drivers (iSCSI/FC/NFS). .. releasenotes/notes/bug-1718739-netapp-eseries-fix-provisioned-capacity-report-8c51fd1173c15dbf.yaml @ b'f905253b9443db1870c3d2b7b70e032bb089efa0' - The NetApp E-series driver has been fixed to correctly report the "provisioned_capacity_gb". Now it sums the capacity of all the volumes in the configured backend to get the correct value. This bug fix affects all the protocols supported by the driver (FC and iSCSI). .. releasenotes/notes/bug-1723226-allow-purging-0day-4de8979db7215cf3.yaml @ b'2a44b3cdba722682a326155060c12c51b5fca1fb' - Added ability to purge records less than 1 day old, using the cinder-manage db_purge utility. This helps especially for those testing scenarios in which a a large number of volumes are created and deleted. (bug .. releasenotes/notes/fix-backup-handling-of-encryption-key-id-f2fa56cadd80d582.yaml @ b'bec756e0401bfbb7a31a0532e4163fcf29126f32' - Fix the way encryption key IDs are managed for encrypted volume backups. When creating a backup, the volume's encryption key is cloned and assigned a new key ID. The backup's cloned key ID is now stored in the backup database so that it can be deleted whenever the backup is deleted. When restoring the backup of an encrypted volume, the destination volume is assigned a clone of the backup's encryption key ID. This ensures every restored backup has a unique encryption key ID, even when multiple volumes have been restored from the same backup. .. releasenotes/notes/fix-reserve-volume-policy-31790a8d865ee0a1.yaml @ b'678b9de0f43fa666946a064edc32f38514dfd593' - The reserve volume API was incorrectly enforcing "volume:retype" policy action. It has been corrected to "volume_extension:volume_actions:reserve". .. releasenotes/notes/fix-vol-image-metadata-endpoints-returning-none-ba0590e6c6757b0c.yaml @ b'b5f6c2864f5ca829854af5c12f37a3d49ccc9d5f' - Fix the following volume image metadata endpoints returning None following policy enforcement failure: * ``os-set_image_metadata`` * ``os-unset_image_metadata`` The endpoints will now correctly raise a 403 Forbidden instead. .. releasenotes/notes/group-update-d423eaa18dbcecc1.yaml @ b'fdfb2d51a4f362091ab5e94981d18d7741f11cf6' - Volume group updates of any kind had previously required the group to be in ``Available`` status. Updates to the group name or description will now work regardless of the volume group status. .. releasenotes/notes/netapp_fix_svm_scoped_permissions.yaml @ b'887797541dff6d2cd10265de26214bcf1515fcf7' - NetApp cDOT block and file drivers have improved support for SVM scoped user accounts. Features not supported for SVM scoped users include QoS, aggregate usage reporting, and dedupe usage reporting. .. releasenotes/notes/ps-duplicate-ACL-5aa447c50f2474e7.yaml @ b'22c09d57687b98faf4193cb1be3d738ddf3bbd28' - Dell EMC PS Series Driver code was creating duplicate ACL records during live migration. Fixes the initialize_connection code to not create access record for a host if one exists previously. This change fixes bug 1726591. .. releasenotes/notes/ps-extend_volume-no-snap-8aa447c50f2475a7.yaml @ b'0910706e762cec88a2b53e82bd7e6a1c372163b9' - Dell EMC PS Series Driver was creating unmanaged snapshots when extending volumes. Fixed it by adding the missing no-snap parameter. This changes fixes bug 1720454. .. releasenotes/notes/ps-optimize-parsing-8aa447c50f2474c7.yaml @ b'a9a0c2ee2e973d0594f2707d64846e882e179c94' - Dell EMC PS Series Driver code reporting volume stats is now optimized to return the information earlier and accelerate the process. This change fixes bug 1661154. .. releasenotes/notes/ps-over-subscription-ratio-cal-8aa447c50f2474a8.yaml @ b'761f0c3e66691e6f5c683a63a81beccbbca1cacf' - Dell EMC PS Driver stats report has been fixed, now reports the `provisioned_capacity_gb` properly. Fixes bug 1719659. .. releasenotes/notes/pure-default-replica-interval-07de0a56f61c7c1e.yaml @ b'f82d2bf6f13a360f6a1c08066cf682e2e07043db' - Fixes an issue where starting the Pure volume drivers with replication enabled and default values for pure_replica_interval_default would cause an error to be raised from the backend. .. releasenotes/notes/rbd-stats-report-0c7e803bb0b1aedb.yaml @ b'8469109016bcfd5806e230202e1996a8ba649535' - RBD stats report has been fixed, now properly reports `allocated_capacity_gb` and `provisioned_capacity_gb` with the sum of the sizes of the volumes (not physical sizes) for volumes created by Cinder and all available in the pool respectively. Free capacity will now properly handle quota size restrictions of the pool. .. releasenotes/notes/releasenotes/notes/bug-1735337-remove-skip-quota-validation-flag-2ecb24143f1f1292.yaml @ b'7310676502f34a3e38329995731e12bcd5331210' - Quota validations are now forced for all APIs. skip_validation flag is now removed from the request body for the quota-set update API. .. releasenotes/notes/windows-multiple-backends-9aa83631ad3d42f2.yaml @ b'3510f3860481482b2311ef3eef8b5fd6cabb2337' - Multiple backends may now be enabled within the same Cinder Volume service on Windows by using the ``enabled_backends`` config option. .. _Queens Series Release Notes_12.0.0_stable_queens_Other Notes: Other Notes ----------- .. releasenotes/notes/policy-in-code-226f71562ab28195.yaml @ b'9fe72de4b690bc5c964c12715581128830c667d5' - Default `policy.json` file is now removed as Cinder now uses default policies. A policy file is only needed if overriding one of the defaults. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/rocky.rst0000664000175000017500000013342500000000000020751 0ustar00zuulzuul00000000000000========================== Rocky Series Release Notes ========================== .. _Rocky Series Release Notes_13.0.9-7_stable_rocky: 13.0.9-7 ======== .. _Rocky Series Release Notes_13.0.9-7_stable_rocky_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-fix-1867163-27afa39ac77b9e15.yaml @ b'917e0b0cef91686787fe7cf4185cd4efb03d6361' - PowerMax Driver - Issue with upgrades from pre Pike to Pike and later. The device is not found when trying to snapshot a legacy volume. .. _Rocky Series Release Notes_13.0.9_stable_rocky: 13.0.9 ====== .. _Rocky Series Release Notes_13.0.9_stable_rocky_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/support-incremental-backup-completion-in-rbd-1f2165fefcc470d1.yaml @ b'bce6d01e153100681540d29496d2332c45b53eb3' - Fixed issue where all Ceph RBD backups would be incremental after the first one. The driver now honors whether ``--incremental`` is specified or not. .. _Rocky Series Release Notes_13.0.8_stable_rocky: 13.0.8 ====== .. _Rocky Series Release Notes_13.0.8_stable_rocky_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bugfix-1744692-5aebd0c97ae66407.yaml @ b'b886d093d782301829afb42e6476b0f5a4678fba' - Fixes a bug that prevented distributed file system drivers from creating snapshots during volume clone operations (NFS, WindowsSMBFS, VZstorage and Quobyte drivers). Fixing this allows creating snapshot based backups. .. releasenotes/notes/detachedinstanceerror-64be35894c624eae.yaml @ b'eabf648b7acce33cb93d667e8f6e70505093b4c9' - Fix DetachedInstanceError is not bound to a Session for VolumeAttachments. This affected VolumeList.get_all, and could make a service fail on startup and make it stay in down state. .. _Rocky Series Release Notes_13.0.7_stable_rocky: 13.0.7 ====== .. _Rocky Series Release Notes_13.0.7_stable_rocky_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/hpe-3par-specify-nsp-for-fc-bootable-volume-f372879e1b625b4d.yaml @ b'df7fd514a77734e5d57a56d5c0f34d0245efc86c' - `Bug 1809249 `_ - 3PAR driver adds the config option `hpe3par_target_nsp` that can be set to the 3PAR backend to use when multipath is not enabled and the Fibre Channel Zone Manager is not used. .. releasenotes/notes/sc-handle-multiattach-onterminate-6ab1f96f21bb284d.yaml @ b'767bdf1a5e28f18c12dba03adc429766f9be79d6' - Dell EMC SC Driver: Fixes `bug 1822229 `__ to handle the volume mappings in the backend when a volume is attached to multiple instances on the same host. .. _Rocky Series Release Notes_13.0.6_stable_rocky: 13.0.6 ====== .. _Rocky Series Release Notes_13.0.6_stable_rocky_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/kaminario-cinder-driver-bug-44c728f026394a85.yaml @ b'4467c231b029e608104068d33780b8ad021c00d1' - Kaminario FC and iSCSI drivers: Fixed `bug 1829398 `_ where force detach would fail. .. releasenotes/notes/netapp-non-discovery-19af4e10f7b190ea.yaml @ b'8be2bb6d2a31b157a5247de9068806fd11ac4075' - NetApp iSCSI drivers no longer use the discovery mechanism for multipathing and they always return all target/portals when attaching a volume. Thanks to this, volumes will be successfully attached even if the target/portal selected as primary is down, this will be the case for both, multipath and single path connections. .. _Rocky Series Release Notes_13.0.4_stable_rocky: 13.0.4 ====== .. _Rocky Series Release Notes_13.0.4_stable_rocky_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1773446-984d76ed29445c9b.yaml @ b'6ca3f2badfc76343db74ce6cea4b91b1c0fc6d17' - Fixed group availability zone-backend host mismatch [`Bug 1773446 `_]. .. releasenotes/notes/fix-multiattach-deletion-b3990acf1f5fd378.yaml @ b'10baccdccbccb2e08a834495ff909a23c08306dd' - Fixed NetApp SolidFire bug that avoided multiatached volumes to be deleted. .. releasenotes/notes/vnx-update-sg-in-cache-3ecb673727bea79b.yaml @ b'1f57f502c893a840ee5021fbca36ae95eed50e73' - Dell EMC VNX Driver: Fixes `bug 1817385 `__ to make sure the sg can be created again after it was destroyed under `destroy_empty_storage_group` setting to `True`. .. _Rocky Series Release Notes_13.0.3_stable_rocky: 13.0.3 ====== .. _Rocky Series Release Notes_13.0.3_stable_rocky_New Features: New Features ------------ .. releasenotes/notes/multiple_clone-82bd7f80ae439080.yaml @ b'90e3517dad84ca32d2490e942f0e7b933c167be3' - VMAX driver support for new configuration option - vmax_snapvx_unlink_limit for specifying the maximum number of unlinks which will be performed before a clone operation. Default value is 3 .. _Rocky Series Release Notes_13.0.3_stable_rocky_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/bug-1805550-default-policy-file-db15eaa76fefa115.yaml @ b'd07abe397c5bdfb45a82150fb7e1a9efaf21ce42' - Beginning with Cinder version 12.0.0, as part of the Queens release "policies in code" community effort, Cinder has had the ability to run without a policy file because sensible default values are specified in the code. Customizing the policies in effect at your site, however, still requires a policy file. The default location of this file has been ``/etc/cinder/policy.json`` (although the documentation has indicated otherwise). With this release, the default location of this file is changed to ``/etc/cinder/policy.yaml``. Some points to keep in mind: - The policy file to be used may be specified in the ``/etc/cinder/cinder.conf`` file in the ``[oslo_policy]`` section as the value of the ``policy_file`` configuration option. That way there's no question what file is being used. - To find out what policies are available and what their default values are, you can generate a sample policy file. To do this, you must have a local copy of the Cinder source code repository. From the top level directory, run the command:: tox -e genpolicy This will generate a file named ``policy.yaml`` in the ``etc/cinder`` directory of your checked-out Cinder repository. - The sample file is YAML (because unlike JSON, YAML allows comments). If you prefer, you may use a JSON policy file. - Beginning with Cinder 12.0.0, you only need to specify policies in your policy file that you want to **differ** from the default values. Unspecified policies will use the default values *defined in the code*. Given that a default value *must* be specified *in the code* when a new policy is introduced, the ``default`` policy, which was formerly used as a catch-all for policy targets that were not defined elsewhere in the policy file, has no effect. We mention this because an old upgrade strategy was to use the policy file from the previous release with ``"default": "role:admin"`` (or ``"default": "!"``) so that newly introduced actions would be blocked from end users until the operator had time to assess the implications of exposing these actions. This strategy no longer works. Hopefully this isn't a problem because we're defining sensible defaults in the code. It would be a good idea, however, to generate the sample policy file with each release (see instructions above) to verify this for yourself. .. _Rocky Series Release Notes_13.0.3_stable_rocky_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bug-1790141-vmax-powermaxos-upgrade-fix-4c76186cfca66790.yaml @ b'f9bfbec2b5e35d1ae8df51229dd4105221b2761f' - PowerMax driver - Workload support was dropped in ucode 5978. If a VMAX All Flash array is upgraded to 5978 or greater and existing volume types leveraged workload e.g. DSS, DSS_REP, OLTP and OLTP_REP, certain operations will no longer work and the volume type will be unusable. This fix addresses these issues and fixes problems with using old volume types with workloads included in the volume type pool_name. .. releasenotes/notes/bug-1799221-fix-truncated-volumes-in-case-of-glance-errors-6cae19218249c3cf.yaml @ b'bf89f76fb1b7a52299c17467106018eae01608e8' - Fixed a bug which could create volumes with invalid content in case of unhandled errors from glance client (Bug `#1799221 `_). .. releasenotes/notes/bug-reno-69539ecb9b0b5464.yaml @ b'6ceed81d4b25978ac64bf36afa45c021ef95d02f' - The Solidfire cinder driver has been fixed to ensure delete happens on the correct volume. .. _Rocky Series Release Notes_13.0.2_stable_rocky: 13.0.2 ====== .. _Rocky Series Release Notes_13.0.2_stable_rocky_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/cinder-manage-online-migrations-exit-status-7c16edb7facc37bb.yaml @ b'74fd810ad1a8561ba150925d19feb8cbe598fe84' - The ``cinder-manage db online_data_migrations`` command now returns exit status 2 in the case where some migrations failed (raised exceptions) and no others were completed successfully from the last batch attempted. This should be considered a fatal condition that requires intervention. Exit status 1 will be returned in the case where the ``--max-count`` option was used and some migrations failed but others succeeded (updated at least one row), because more work may remain for the non-failing migrations, and their completion may be a dependency for the failing ones. The command should be reiterated while it returns exit status 1, and considered completed successfully only when it returns exit status 0. .. _Rocky Series Release Notes_13.0.1_stable_rocky: 13.0.1 ====== .. _Rocky Series Release Notes_13.0.1_stable_rocky_Known Issues: Known Issues ------------ .. releasenotes/notes/lio-multiattach-disabled-a6ee89072fe5d032.yaml @ b'dd5a565c5ba587b0306bb29509293cf1b7c04bc3' - Multiattach support is disabled for the LVM driver when using the LIO iSCSI target. This functionality will be fixed in a later release. .. _Rocky Series Release Notes_13.0.1_stable_rocky_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/bugfix-netapp-driver-cinder-ipv6-c3c4d0d6a7d0de91.yaml @ b'b290b49183200fb9dbc4fff655f00cfaf3c8b67a' - Fixed support for IPv6 on management and data paths for NFS, iSCSI and FCP NetApp ONTAP drivers. .. releasenotes/notes/fix-netapp-cg-da4fd6c396e5bedb.yaml @ b'fb69816509c86736a8cf8e1c9602873f3a787ed3' - Fixes a bug in NetApp SolidFire where the deletion of group snapshots was failing. .. releasenotes/notes/fix-netapp-force_detach-36bdf75dd2c9a030.yaml @ b'bfcd4b2f1be3ab37ae0801dd523ce952577cda14' - Fixes force_detach behavior for volumes in NetApp SolidFire driver. .. _Rocky Series Release Notes_13.0.0_stable_rocky: 13.0.0 ====== .. _Rocky Series Release Notes_13.0.0_stable_rocky_New Features: New Features ------------ .. releasenotes/notes/add-operation-to-request-spec-7yt6ub75uy1284as.yaml @ b'e1ec4b4c2e1f0de512f09e38824c1d7e2fa38617' - Now scheduler plugins are aware of operation type via ``operation`` attribute in RequestSpec dictionary, plugins can support backend filtering according to backend status as well as operation type. Current possible values for ``operation`` are: - create_volume - extend_volume - create_snapshot - retype_volume - migrate_volume - manage_existing - manage_existing_snapshot - create_group .. releasenotes/notes/add-split-logger-conf-option-0424e3bd91de3a5a.yaml @ b'70c48ff6bbf7c4094da34a1af256ac7fbc032399' - Added boolean conf option 'split_loggers' in [default] section of cinder.conf to `enable split logging`_ functionality. The default value of split_loggers option is set to False. Operator can set it's value to True to split HTTP content into subloggers to allow for fine-grained control of what is logged and how. This new config option 'split_loggers' should be enabled only when keystoneauth log level is set to DEBUG in 'default_log_levels' config option. .. _`enable split logging`: https://docs.openstack.org/keystoneauth/latest/using-sessions.html#logging .. releasenotes/notes/bp-nvmeof-lvm-target-b7771955b426abe7.yaml @ b'8d7e131c587f31d85c76f990998d411af490554f' - A new target, NVMET, is added for the LVM driver over RDMA, it allows cinder to use nvmetcli in order to create/delete subsystems on attaching/detaching an LVM volume to/from an instance. .. releasenotes/notes/bug-1686745-e8f1569455f998ba.yaml @ b'abca1abc7b01fc1d85af8b9cfa5b646abafc9d4a' - Add support to force detach a volume from all hosts on 3PAR. .. releasenotes/notes/capacity-based-qos-9f5d174658a40bd5.yaml @ b'29d2090aef7b31df23ca846d365c6d21957486ba' - Cinder now allows for capacity based QoS which can be useful in environments where storage performance scales with consumption (such as RBD backed storage). The newly added QoS specs are `read_iops_sec_per_gb`, `write_iops_sec_per_gb`, `total_iops_sec_per_gb`, `read_bytes_sec_per_gb`, `write_bytes_sec_per_gb` and `total_bytes_sec_per_gb`. These values will be multiplied by the size of the volume and passed to the consumer. For example, setting `total_iops_sec_per_gb` to 30 and setting `total_bytes_sec_per_gb` to `1048576` (1MB) then creating a 100 GB volume with that QoS will result in a volume with 3,000 total IOPs and 100MB/s throughput limit. .. releasenotes/notes/capacity-based-qos-minimum-values-b24a5f49c986f11d.yaml @ b'37f2bdcdec85f27651b91a9a2d0fddb66e7bfe8a' - Cinder now allows for a minimum value when using the capacity based QoS in order to make sure small volumes can get a minimum allocation for them to be usable. The newly added QoS specs are `read_iops_sec_per_gb_min`, `write_iops_sec_per_gb_min`, `total_iops_sec_per_gb_min`, `read_bytes_sec_per_gb_min`, `write_bytes_sec_per_gb_min` and `total_bytes_sec_per_gb_min` .. releasenotes/notes/cheesecake-promotion-30a3336fb911c3ad.yaml @ b'df81b59f9d1f70dcde002eb9252c55e46d77a5c0' - A new cinder-manage command, reset_active_backend, was added to promote a failed-over backend participating in replication. This allows you to reset a backend without manually editing the database. A backend undergoing promotion using this command is expected to be in a disabled and frozen state. Support for both standalone and clustered backend configurations are supported. .. releasenotes/notes/dell-emc-sc-api-timeouts-ce8d166e1847ea94.yaml @ b'1d6ad6ef179f465289c95b5f45ac79b0f03e0866' - Added dell_api_async_rest_timeout option to the Dell EMC SC driver. This is the timeout used for asynchronous REST calls to the Dell EMC SC REST API. Default is 15 seconds. .. releasenotes/notes/dell-emc-sc-api-timeouts-ce8d166e1847ea94.yaml @ b'1d6ad6ef179f465289c95b5f45ac79b0f03e0866' - Added dell_api_sync_rest_timeout option to the Dell EMC SC driver. This is the timeout used for synchronous REST calls to the Dell EMC SC REST API. Default is 30 seconds. .. releasenotes/notes/dell-emc-sc-mult-attach-d09cfd06ee8db8da.yaml @ b'2e82f0de90d077f7d87353b59c14f69bff3dbaa5' - Enabled Cinder Multi-Attach capability in the Dell EMC Storage Center Cinder driver. .. releasenotes/notes/ds8k-allow-multi-attach-41fa7bddbbd719ec.yaml @ b'3599eb5ba948e743ad63999dabeecafa86373ee0' - IBM DS8K driver has added multiattach support. .. releasenotes/notes/ds8k-report-backend-state-in-service-list-f0898950a0f4b122.yaml @ b'3e12e2f930be2563f69bed37906521687b9487ae' - Added flag 'backend_state' which will give backend state info in service list. .. releasenotes/notes/enable-multiattach-iscsi-fcp-netapp-driver-98ad2d75fbbf333f.yaml @ b'ac0c7d89ba4cb2b1dc894b1f5a1f2361bc36dc0f' - NetApp ONTAP iSCSI and FCP drivers multiattach capability enabled. .. releasenotes/notes/enable-multiattach-nfs-netapp-driver-406b9b285d85c989.yaml @ b'4e064a3ae7ddc305e7226432d5248f4d7ae6c77d' - NetApp ONTAP NFS multiattach capability enabled. .. releasenotes/notes/feature-abort-restore-fe1252288c59e105.yaml @ b'89f6291ee33780ed6d4e4886d5d18a0ce0cdb182' - Support backup restore cancelation by changing the backup status to anything other than `restoring` using `cinder backup-reset-state`. .. releasenotes/notes/feature-cross-az-backups-6b68c4c4456f2fd7.yaml @ b'5feaf74ccf10148859e206ce21bfd54dec2c1c16' - Cinder backup creation can now (since microversion 3.51) receive the availability zone where the backup should be stored. .. releasenotes/notes/feature-multi-process-backup-8cf5ad5a0cf9b2d5.yaml @ b'373b52404151d80e83004a37d543f825846edea1' - Cinder backup now supports running multiple processes to make the most of the available CPU cores. Performance gains will be significant when running multiple concurrent backups/restores with compression. The number of processes is set with `backup_workers` configuration option. .. releasenotes/notes/feature-rbd-exclusive-pool-a9bdebdeb1f0bf37.yaml @ b'f33baccc3544cbda6cd5908328a56096046657ed' - When using the RBD pool exclusively for Cinder we can now set `rbd_exclusive_cinder_pool` to `true` and Cinder will use DB information to calculate provisioned size instead of querying all volumes in the backend, which will reduce the load on the Ceph cluster and the volume service. .. releasenotes/notes/google-auth-for-gcs-backup-1642cd0e741fbdf9.yaml @ b'79d7a4e8da6f1118b5c235928876cf78085f4332' - Google backup driver now supports ``google-auth`` library, and is the preferred library if both ``google-auth`` (together with ``google-auth-httplib2``) and ``oauth2client`` libraries are present in the system. .. releasenotes/notes/infinidat-multi-attach-support-533b3e559c15801f.yaml @ b'666c0fc8db20dfe1d5adc036d24b52a3eaa1091a' - Support for volume multi-attach in the INFINIDAT InfiniBox driver. .. releasenotes/notes/inspur-instorage-fc-cinder-driver-70c13e4a64d785d5.yaml @ b'a6e79968ed237a7f0982cdc0d8fcf231d63b38fc' - New FC Cinder volume driver for Inspur Instorage. .. releasenotes/notes/netapp-log-filter-f3256f55c3ac3faa.yaml @ b'bb0aac560dbc5a2859f02824d36bf76d17039358' - The NetApp ONTAP driver supports a new configuration option ``netapp_api_trace_pattern`` to enable filtering backend API interactions to log. This option must be specified in the backend section when desired and it accepts a valid python regular expression. .. releasenotes/notes/nexentaedge-iscsi-driver-302529c56cdbbf38.yaml @ b'e2bd03ef75b4417e3531186fd6cc0a270ffbd32c' - Added backend driver for Nexenta Edge iSCSI storage. .. releasenotes/notes/pure-active-cluster-edf8e7e80739b0f8.yaml @ b'715069f6155ca292dbe1bddef7b6bd1ec8ea0ccc' - Added support to Pure Storage Volume Drivers for Active Cluster using the standard replication API's for the Block Storage Service. .. releasenotes/notes/pure-storage-multiattach-support-994da363e181d627.yaml @ b'8f4802baf51415abd660b1d9bc8bd73e539318e2' - Pure Storage FlashArray driver has added multiatach support. .. releasenotes/notes/qnap-support-qes-210-de75892f684cb9c3.yaml @ b'4dceb56f8890a61527a06520abc076c0b42cf92c' - QNAP Cinder driver added support for QES fw 2.1.0. .. releasenotes/notes/rbd-active-active-replication-b230367912fe4a23.yaml @ b'245a488c36003764e3550c2c95fa4bef6119e0ea' - Added support for active-active replication to the RBD driver. This allows users to configure multiple volume backends that are all a member of the same cluster participating in replication. .. releasenotes/notes/rbd-support-list-manageable-snapshots-3474c62ed83fb788.yaml @ b'280cc7c5ae4b07a9a1e23b8f9cc925be9872c8e1' - Allow rbd driver to list manageable snapshots. .. releasenotes/notes/rbd-support-report-backend-state-4e124eb9efd36724.yaml @ b'006296856808063be6c32c2092b40515b773df84' - Allow rbd driver to report backend state. .. releasenotes/notes/report-backend-state-in-service-list-739a5398eec4a6b7.yaml @ b'03676baac5020bafedb3228f561734fb6d89dc8e' - Added flag 'backend_state: up/down' which will give backend state info in service list. .. releasenotes/notes/report-backend-state-in-service-list-93e9f2b204b735c0.yaml @ b'c5a8000b9c857521e896e1fc39a77f0fcfc12ccc' - Added flag 'backend_state' which will give backend state info in service list. .. releasenotes/notes/scaleio-rebranding-d2d113c5d8e5c118.yaml @ b'a852c46ba483e9a015c30a77fa461b45c1517786' - Dell EMC ScaleIO has been renamed to Dell EMC VxFlex OS. Documentation for the driver can be found under the new name. The driver maintains full backwards compatability with prior ScaleIO releases and no configuration changes are needed upon upgrade to the new version of the driver. .. releasenotes/notes/smbfs-snapshot-attach-14742fe8f5864ac6.yaml @ b'32a08e4d6a18be743e936448f05f97113e80619c' - The SMBFS driver now supports the 'snapshot attach' feature. Special care must be taken when attaching snapshots though, as writing to a snapshot will corrupt the differencing image chain. .. releasenotes/notes/storwize-dr-pool-support-52db3a95e54aef88.yaml @ b'822fb701de48d30e662b5f16270b3c38e8703151' - Added data reduction pool support for thin-provisoned and compressed volume in Storwize cinder driver. .. releasenotes/notes/support-az-in-volumetype-8yt6fg67de3976ty.yaml @ b'306fa19079ccf8f5278fdf36341edecd95df04a7' - Now availability zone is supported in volume type as below. * ``RESKEY:availability_zones`` now is a reserved spec key for AZ volume type, and administrator can create AZ volume type that includes AZ restrictions by adding a list of Az's to the extra specs similar to: ``RESKEY:availability_zones: az1,az2``. * Extra spec ``RESKEY:availability_zones`` will only be used for filtering backends when creating and retyping volumes. * Volume type can be filtered within extra spec: /types?extra_specs={"key":"value"} since microversion "3.52". .. releasenotes/notes/support-image-signature-verification-yu8qub7286et9dh4.yaml @ b'e8c24577b8bd98d86358abf543010b76229c8757' - Added image signature verification support when creating volume from image. This depends on signature metadata from glance. This feature is turned on by default, administrators can change behaviour by updating option ``verify_glance_signatures``. Also, an additional image metadata ``signature_verified`` has been added to indicate whether signature verification was performed during creating process. .. releasenotes/notes/sync-bump-versions-a1e6f6359173892e.yaml @ b'3cd2ebd3759c76fdf5a292e612127094c7aa2b17' - Cinder-manage DB sync command can now bump the RPC and Objects versions of the services to avoid a second restart when doing offline upgrades. .. releasenotes/notes/tpool-size-11121f78df24db39.yaml @ b'e570436d1cca5cfa89388aec8b2daa63d01d0250' - Adds support to configure the size of the native thread pool used by the cinder volume and backup services. For the backup we use `backup_native_threads_pool_size` in the `[DEFAULT]` section, and for the backends we use `backend_native_threads_pool_size` in the driver section. .. releasenotes/notes/transfer-snapshots-with-volume-a7763570a807c742.yaml @ b'c0efaa1d46b762693f8fe3a09d0359ead3e097c4' - Support transfer volume with snapshots by default in new V3 API 'v3/volume_transfers'. After microverison 3.55, if users don't want to transfer snapshots, they could use the new optional argument `no_snapshots=True` in request body of new transfer creation API. .. releasenotes/notes/unity-compressed-volume-support-4998dee84534a324.yaml @ b'2da949da1f79a0121d75c50eecdd102382287bda' - Dell EMC Unity driver: Add compressed volume support. .. releasenotes/notes/unity-enable-ssl-14db2497225c4395.yaml @ b'8aa49599c7df62de5ab25a0a841265092e2881f7' - Dell EMC Unity Cinder driver allows enabling/disabling the SSL verification. Admin can set `True` or `False` for `driver_ssl_cert_verify` to enable or disable this function, alternatively set the `driver_ssl_cert_path=` for customized CA path. Both above 2 options should go under the driver section. .. releasenotes/notes/unity-multiattach-support-993b997e522d9e84.yaml @ b'dffff08a204ddf6416cd6ddb036e8e029dc80509' - Dell EMC Unity: Implements `bp unity-multiattach-support `__ to support attaching a volume to multiple servers simultaneously. .. releasenotes/notes/unity-remove-empty-host-17d567dbb6738e4e.yaml @ b'f9a9aa5a25688cac86e5dc060a20374e4a29bbef' - Dell EMC Unity Driver: Adds support for removing empty host. The new option named `remove_empty_host` could be configured as `True` to notify Unity driver to remove the host after the last LUN is detached from it. .. releasenotes/notes/unity-thick-support-fdbef833f2b4d54f.yaml @ b'e458bdbf84aba2dab5fc0f65a49764466016558b' - Dell EMC Unity Driver: Add thick volume support. Refer to `Unity Cinder Configuration document `__ to create a thick volume. .. releasenotes/notes/veritas_access_iscsi_driver-de642dad9e7d0890.yaml @ b'a9fad35a20570e6ecd3757ea50e794a0592c3921' - Added ISCSI based driver for Veritas Access. .. releasenotes/notes/vmax-driver-multiattach-support-43a7f99cd2d742ee.yaml @ b'106cf3cbf0a094755d4af063a05de9aa36ae385d' - Dell EMC VMAX driver has added multiattach support. .. releasenotes/notes/vmax-list-manageable-vols-snaps-6a7f5aa114fae8f3.yaml @ b'd05a7a10dc04335c205ab3ee5a2d03a62c26b8e8' - Dell EMC VMAX driver has added list manageable volumes and snapshots support. .. releasenotes/notes/vmax-metadata-ac9bdd31e7e561c3.yaml @ b'4662ead8c3ef1970bc7be7815bcbacc221f6fe1e' - Log VMAX specific metadata of a volume if debug is enabled. .. releasenotes/notes/vmax-retype-replicated-volumes-325be6e5fd626819.yaml @ b'992542a9fb00efdd479d2d18fd6da848b162adf9' - Support for retype (storage-assisted migration) of replicated volumes on VMAX cinder driver. .. releasenotes/notes/vmware_vmdk_datastore_regex-fe7b68ad69ef7384.yaml @ b'f1e21ee2526e35c60f5d2251d569469dddd4efc5' - VMware VMDK driver and FCD driver now support a config option ``vmware_datastore_regex`` to specify the regular expression pattern to match the name of datastores where backend volumes are created. .. releasenotes/notes/vmware_vmdk_nfs41-450908bbbc9eea6d.yaml @ b'68e3b4a1d544683a7d7b0cfd7f730dc9a0bbdd77' - VMware VMDK driver and FCD driver now support NFS 4.1 datastores in vCenter server. .. releasenotes/notes/vnx-revert-to-snapshot-e5494b6fb5ad5a1e.yaml @ b'2cd65abb713381bbf6155e6e176043f9c41c04a7' - Added support to revert a volume to a snapshot with the Dell EMC VNX driver. .. releasenotes/notes/windows-volume-backup-b328858a20f5a499.yaml @ b'302402df330a52fbe9e531cf5603babad0c1f367' - The Cinder Volume Backup service can now be run on Windows. It supports backing up volumes exposed by SMBFS/iSCSI Windows Cinder Volume backends, as well as any other Cinder backend that's accessible on Windows (e.g. SANs exposing volumes via iSCSI/FC). The Swift and Posix backup drivers are known to be working on Windows. .. releasenotes/notes/xtremio-support-multiattache-20b1882a1216a8b2.yaml @ b'607e7688b90e8233ac8c52b896fa11ca44a2b026' - Dell EMC XtremIO driver has added multiattach support. .. _Rocky Series Release Notes_13.0.0_stable_rocky_Known Issues: Known Issues ------------ .. releasenotes/notes/feature-rbd-exclusive-pool-a9bdebdeb1f0bf37.yaml @ b'f33baccc3544cbda6cd5908328a56096046657ed' - If RBD stats collection is taking too long in your environment maybe even leading to the service appearing as down you'll want to use the `rbd_exclusive_cinder_pool = true` configuration option if you are using the pool exclusively for Cinder and maybe even if you are not and can live with the innacuracy. .. _Rocky Series Release Notes_13.0.0_stable_rocky_Upgrade Notes: Upgrade Notes ------------- .. releasenotes/notes/coprhd-mark-unsupported-aa48145873db1ab1.yaml @ b'19d5e68b46c829cee9285c5baeff16771c1942e2' - The Dell EMC CoprHD drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. .. releasenotes/notes/datacore-mark-unsupported-2399bc19a789fb4c.yaml @ b'13b9df4074de945cb21d8c1a66b7746a4b3c4c61' - The DataCore drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. .. releasenotes/notes/disco-mark-unsupported-f6eb8208c8c4eb3b.yaml @ b'4f4a6ba23f2e479dd00a6ce9c80a968032f3f57d' - The Disco driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. .. releasenotes/notes/hgst-mark-unsupported-b2886de36421c8b0.yaml @ b'c88d7ba117205f433be4fcde0c1fef71534c6cad' - The HGST driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. .. releasenotes/notes/nec-delete-volume-per-limit-d10b9df86f64b80e.yaml @ b'ecfd4d393a12b408b9961358841c70d20b476f49' - In NEC driver, the number of volumes in a storage pool is no longer limited to 1024. More volumes can be created with storage firmware revision 1015 or later. .. releasenotes/notes/privsep-rocky-35bdfe70ed62a826.yaml @ b'861646d1ba53f6becea59bc50306229e162f0c6c' - The following commands are no longer required to be listed in your rootwrap configuration: cgcreate; and cgset. .. releasenotes/notes/remove-backup-service-to-driver-mapping-4d2ed6f868a64175.yaml @ b'497cd4e3cdbea7b61d9bca46a65561993e0b9f26' - Backup service to driver mapping is removed. If you use old values like 'cinder.backup.services.swift' or 'cinder.backup.services.ceph' it should be changed to 'cinder.backup.drivers.swift' or 'cinder.backup.drivers.ceph' accordingly to get your backup service working. .. releasenotes/notes/remove-deprecated-option-9ad954726ed4d8c2.yaml @ b'd1c5379369b24effdccfe5dde3e93bd21884eda5' - Removed the option ``allow_inuse_volume_type_modification`` which had been deprecated in Ocata release. .. releasenotes/notes/remove-lvm-over-sub-3c8addbf47827045.yaml @ b'2c05388d5ccbbecbbe02b45aec30f24321da0057' - The LVM driver specific `lvm_max_over_subscription_ratio` setting had been deprecated and is now removed. Over subscription should now be managed using the generic `max_over_subscription_ratio` setting. .. releasenotes/notes/remove_deprecated_xml-4065b893d781f65c.yaml @ b'c0a5be259e608808d1866dd8f54bcacf8ab6365b' - VMAX driver - Removed deprecated option ``cinder_dell_emc_config_file`` .. releasenotes/notes/sync-bump-versions-a1e6f6359173892e.yaml @ b'3cd2ebd3759c76fdf5a292e612127094c7aa2b17' - On offline upgrades, due to the rolling upgrade mechanism we need to restart the cinder services twice to complete the installation just like in the rolling upgrades case. First you stop the cinder services, then you upgrade them, you sync your DB, then you start all the cinder services, and then you restart them all. To avoid this last restart we can now instruct the DB sync to bump the services after the migration is completed, the command to do this is `cinder-manage db sync --bump-versions` .. _Rocky Series Release Notes_13.0.0_stable_rocky_Deprecation Notes: Deprecation Notes ----------------- .. releasenotes/notes/add-option-max_luns_per_storage_group-dfe3e1396b262bc8.yaml @ b'04847424b462ceade2daaca519a14e28779a026d' - Deprecate option `check_max_pool_luns_threshold`. The VNX driver will always check the threshold. .. releasenotes/notes/coprhd-mark-unsupported-aa48145873db1ab1.yaml @ b'19d5e68b46c829cee9285c5baeff16771c1942e2' - The Dell EMC CoprHD drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Stein development cycle. .. releasenotes/notes/datacore-mark-unsupported-2399bc19a789fb4c.yaml @ b'13b9df4074de945cb21d8c1a66b7746a4b3c4c61' - The DataCore drivers have been marked as unsupported and are now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, they will be removed in the Stein development cycle. .. releasenotes/notes/deprecate_san_rest_port-0d8610a872e92e09.yaml @ b'017dd6b4bcf92c14d49421268cb571c00879f3cc' - VMAX driver - configuration tag san_rest_port will be replaced by san_api_port in the next release. .. releasenotes/notes/disco-mark-unsupported-f6eb8208c8c4eb3b.yaml @ b'4f4a6ba23f2e479dd00a6ce9c80a968032f3f57d' - The Disco driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the Stein development cycle. .. releasenotes/notes/google-auth-for-gcs-backup-1642cd0e741fbdf9.yaml @ b'79d7a4e8da6f1118b5c235928876cf78085f4332' - Cinder's Google backup driver is now called gcs, so ``backup_driver`` configuration for Google Cloud Storage should be updated from ``cinder.backup.drivers.google`` to ``cinder.backup.driver.gcs``. .. releasenotes/notes/hgst-mark-unsupported-b2886de36421c8b0.yaml @ b'c88d7ba117205f433be4fcde0c1fef71534c6cad' - The HGST driver has been marked as unsupported and is now deprecated. ``enable_unsupported_driver`` will need to be set to ``True`` in the driver's section in cinder.conf to continue to use it. If its support status does not change, it will be removed in the Stein development cycle. .. releasenotes/notes/netapp-deprecate-eseries-drivers-bc4f552d277c07b9.yaml @ b'747373f4bdfe4814000a88e4443b56545d5d55bd' - The NetApp E-Series drivers are deprecated as of the Rocky release and will be removed in the Stein release. Other configurations of the NetApp driver, i.e Clustered Data ONTAP and Solidfire, are unaffected. .. _Rocky Series Release Notes_13.0.0_stable_rocky_Security Issues: Security Issues --------------- .. releasenotes/notes/bug-1784871-7f67402eb13abca7.yaml @ b'3a39d09166bf6d1c7d2bae63caf3e2a954328862' - Removed the ability to create volumes in a ScaleIO Storage Pool that has zero-padding disabled. A new configuration option had been added to override this new behavior and allow volume creation, but should not be enabled if multiple tenants will utilize volumes from a shared Storage Pool. .. releasenotes/notes/privsep-rocky-35bdfe70ed62a826.yaml @ b'861646d1ba53f6becea59bc50306229e162f0c6c' - Privsep transitions. Cinder is transitioning from using the older style rootwrap privilege escalation path to the new style Oslo privsep path. This should improve performance and security of Cinder in the long term. .. releasenotes/notes/privsep-rocky-35bdfe70ed62a826.yaml @ b'861646d1ba53f6becea59bc50306229e162f0c6c' - Privsep daemons are now started by Cinder when required. These daemons can be started via rootwrap if required. rootwrap configs therefore need to be updated to include new privsep daemon invocations. .. releasenotes/notes/scaleio-zeropadding-a0273c56c4d14fca.yaml @ b'41de06dcf6f1c84b6d4eb2f8d3b4f002d8c67f96' - Removed the ability to create volumes in a ScaleIO Storage Pool that has zero-padding disabled. A new configuration option ``sio_allow_non_padded_volumes`` has been added to override this new behavior and allow unpadded volumes, but should not be enabled if multiple tenants will utilize volumes from a shared Storage Pool. .. _Rocky Series Release Notes_13.0.0_stable_rocky_Bug Fixes: Bug Fixes --------- .. releasenotes/notes/add-option-max_luns_per_storage_group-dfe3e1396b262bc8.yaml @ b'04847424b462ceade2daaca519a14e28779a026d' - Add option `max_luns_per_storage_group` back. The max LUNs per storage group was set to 255 before. With the new option, admin can set it to a larger number. .. releasenotes/notes/bug-1690954-40fc21683977e996.yaml @ b'4d75cbf3c35a8aa917d3970beac99d612f13eed3' - NetApp ONTAP NFS (bug 1690954): Fix wrong usage of export path as volume name when deleting volumes and snapshots. .. releasenotes/notes/bug-1712651-7bc90264eb5001ea.yaml @ b'2b60912d5667350eae7ecbc67d4dba3658518d10' - NetApp ONTAP iSCSI (bug 1712651): Fix ONTAP NetApp iSCSI driver not raising a proper exception when trying to extend an attached volume beyond its max geometry. .. releasenotes/notes/bug-1762424-f76af2f37fe408f1.yaml @ b'029cadbf4067ad6f0bf08588cf439587f3c7052c' - NetApp ONTAP (bug 1762424): Fix ONTAP NetApp driver not being able to extend a volume to a size greater than the corresponding LUN max geometry. .. releasenotes/notes/bug-1765182-34fdc4bb8482f8a5.yaml @ b'792eea0a12bd97f69294ad0570ac330a3f1fe423' - NetApp ONTAP (bug 1765182): Make ONTAP NetApp iSCSI driver and FC driver report to the Cinder scheduler that they don't support online volume extending. .. releasenotes/notes/bug-1765182-bcafd577f4b81eb6.yaml @ b'f33b234aa02a2f682455cbc01758a7330d64e6ae' - Make Cinder scheduler check if backend reports `online_extend_support` before performing an online extend operation. .. releasenotes/notes/bug-1765182-de132ba52167800b.yaml @ b'40d50eccdef9cc1b42bb8e24e6b641b20213720d' - NetApp ONTAP (bug 1765182): Make ONTAP NetApp NFS driver report to the Cinder scheduler that it doesn't support online volume extending. .. releasenotes/notes/bug-1765610-qnap-fix-volume-snapshot-create-fail-2bb785eafdb87fb6.yaml @ b'880ff557ca3d6569464b9667ac25825cf5e3c7fd' - Fixed QNAP driver failures to create volume and snapshot in some cases. .. releasenotes/notes/bug-1766768-qnap-fix-upload-volume-detach-fail-33cbee59f1381bda.yaml @ b'ee9fda3e89b619c058768d5fa17cc3e9ecf4a99f' - Fixed QNAP driver failures to detach iscsi device while uploading volume to image. .. releasenotes/notes/bug-1773725-xtremio-remove-provisioning-factor-y7r5uy3489yd9pbf.yaml @ b'c157b547067389e697d6eea021de7e71535a62c9' - The XtremIO driver has been fixed to correctly report the "free_capacity_gb" size. .. releasenotes/notes/dell-emc-sc-bugfix-1756914-ffca3133273040f6.yaml @ b'f8980ea128dd6698a1cb3a283d98b47371b854f6' - Dell EMC SC driver correctly returns initialize_connection data when more than one IQN is attached to a volume. This fixes some random Nova Live Migration failures where the connection information being returned was for an IQN other than the one for which it was being requested. .. releasenotes/notes/fail-detach-lun-when-auto-zone-enabled-9c87b18a3acac9d1.yaml @ b'c816be897e7ab1c95b38979b2cc94ecf179e44e7' - Dell EMC Unity Driver: Fixes `bug 1759175 `__ to detach the lun correctly when auto zone was enabled and the lun was the last one attached to the host. .. releasenotes/notes/fix-abort-backup-df196e9dcb992586.yaml @ b'4ff9e63707e2c4cf5869f28e3e86fd0606d2db9a' - We no longer leave orphaned chunks on the backup backend or leave a temporary volume/snapshot when aborting a backup. .. releasenotes/notes/fix-cross-az-migration-ce97eff61280e1c7.yaml @ b'fb8085894b69f56091bde19683a919cb15d502cc' - Resolve issue with cross AZ migrations and retypes where the destination volume kept the source volume's AZ, so we ended up with a volume where the AZ does not match the backend. (bug 1747949) .. releasenotes/notes/fix-extend-volume-939e30f2e9e516bc.yaml @ b'c96f3997104f0dca4ed191e3df92715b33bd1a63' - [`bug 1772421 `_] INFINIDAT fixed a bug in volume extension feature where volumes were not extended to target size but added the given target size. .. releasenotes/notes/fix-import-backup-quota-issue-8yh69hd19u7tuu23.yaml @ b'4b4fbd35da26c7d697ddf18d3f0487f9ea817224' - Cinder will now consume quota when importing new backup resource. .. releasenotes/notes/fix-quota-deleting-temporary-volume-274e371b425e92cc.yaml @ b'8d9f8629013bcb880fb3e33f03b274c10befdab0' - Fix a quota usage error triggered by a non-admin user backing up an in-use volume. The forced backup uses a temporary volume, and quota usage was incorrectly updated when the temporary volume was deleted after the backup operation completed. Fixes `bug 1778774 `__. .. releasenotes/notes/force-delete-mv-a53924f09c475386.yaml @ b'fe69f679369057a7c381178f770bf68d0bc1cee0' - Volume "force delete" was introduced with the 3.23 API microversion, however the check for in the service was incorrectly looking for microversion 3.2. That check has now been fixed. It is possible that an API call using a microversion below 3.23 would previously work for this call, which will now fail. This closes `bug #1783028 `_. .. releasenotes/notes/google-auth-for-gcs-backup-1642cd0e741fbdf9.yaml @ b'79d7a4e8da6f1118b5c235928876cf78085f4332' - Google backup driver now works when using ``google-api-python-client`` version 1.6.0 or higher. .. releasenotes/notes/migrate-backup-encryption-keys-to-barbican-6f07fd48d4937b2a.yaml @ b'341dd44ba796e933920da6718a2891e35ed88506' - When encryption keys based on the ConfKeyManager's fixed_key are migrated to Barbican, ConfKeyManager keys stored in the Backup table are included in the migration process. Fixes `bug 1757235 `__. .. releasenotes/notes/modify-ensure-export-1d56a40f5e762aa8.yaml @ b'd5f79c52d886e962e1e42af4d892b35eec5bb81f' - Storwize SVC Driver: Fixes `bug 1749687 `__ previously lsvdisk() was called separately for every 'in-use' volume in order to check if the volume exists on the storage. In order to avoid problem of too long driver initialization now lsvdisk() is called once per pool. .. releasenotes/notes/netapp-ontap-fix-force-detach-55be3f4ac962b493.yaml @ b'8776c81f64bfee814573f217b140408f57fb302d' - Fixed bug #1783582, where calls to os-force_detach were failing on NetApp ONTAP iSCSI/FC drivers. .. releasenotes/notes/netapp-ontap-use_exact_size-d03c90efbb8a30ac.yaml @ b'67391f1f0f30172190882e7d3f4a4ddc271dfa00' - Fixed bug #1731474 on NetApp Data ONTAP driver that was causing LUNs to be created with larger size than requested. This fix requires version 9.1 of ONTAP or later. .. releasenotes/notes/policy-for-type-list-and-show-apis-rt56uy78crt5e378.yaml @ b'44b4e5462a5652a58b141e7409f50431a12a7299' - Two new policies "volume_extension:type_get" and "volume_extension:type_get_all" have been added to control type show and type list APIs. .. releasenotes/notes/quobyte_vol-snap-cache-baf607f14d916ec7.yaml @ b'8c72fcadae92640331807f021401a7c250e56286' - Added a new optional cache of volumes generated from snapshots for the Quobyte backend. Enabling this cache speeds up creation of multiple volumes from a single snapshot at the cost of a slight increase in creation time for the first volume generated for this given snapshot. The ``quobyte_volume_from_snapshot_cache`` option is off by default. .. releasenotes/notes/ssl-cert-fix-42e8f263c15d5343.yaml @ b'512fd07124ea7210a6653e519e964a898a31d406' - VMAX driver - fixes SSL certificate verification error. .. releasenotes/notes/storwize-hyperswap-host-site-update-621e763768fab9ee.yaml @ b'b47b199c4f53870880299137c5bb5a079c8a7440' - Updated the parameter storwize_preferred_host_site from StrOpt to DictOpt in cinder back-end configuration, and removed it from volume type configuration. .. releasenotes/notes/sync-bump-versions-a1e6f6359173892e.yaml @ b'3cd2ebd3759c76fdf5a292e612127094c7aa2b17' - After an offline upgrade we had to restart all Cinder services twice, now with the `cinder-manage db sync --bump-versions` command we can avoid the second restart. .. releasenotes/notes/tpool-size-11121f78df24db39.yaml @ b'e570436d1cca5cfa89388aec8b2daa63d01d0250' - Fixes concurrency issue on backups, where only 20 native threads could be concurrently be executed. Now default will be 60, and can be changed with `backup_native_threads_pool_size`. .. releasenotes/notes/tpool-size-11121f78df24db39.yaml @ b'e570436d1cca5cfa89388aec8b2daa63d01d0250' - RBD driver can have bottlenecks if too many slow operations are happening at the same time (for example many huge volume deletions), we can now use the `backend_native_threads_pool_size` option in the RBD driver section to resolve the issue. .. releasenotes/notes/unity-return-logged-out-initiator-6ab1f96f21bb284c.yaml @ b'1ef06d4a31c3010f170da20fcc823dd81e77c1a7' - Dell EMC Unity Driver: Fixes `bug 1773305 `__ to return the targets which connect to the logged-out initiators. Then the zone manager could clean up the FC zone based on the correct target wwns. .. _Rocky Series Release Notes_13.0.0_stable_rocky_Other Notes: Other Notes ----------- .. releasenotes/notes/json-schema-validation-0d22576bd556f4e0.yaml @ b'd98dbf4da8852b0726b87520511fcc16b1c75dd8' - Added schema validation support using jsonschema `[json-schema-validation]`_ for all supported v3 APIs. Following APIs were accepting boolean parameters with leading and trailing white spaces (for e.g. " true "). But now with schema validation support, all these boolean parameters henceforth will not accept leading and trailing whitespaces to maintain consistency. * Generic volume groups: * delete group: "POST /v3/{project_id}/groups/{group_id}/action" * failover replication: "POST /v3/{project_id}/groups/{group_id}/action" * Volume Snapshots: * create a snapshot: "POST /v3/{project_id}/snapshots" * Volume_actions: * set bootable: "POST /v3/{project_id}/volumes/{volume_id}/action" * volume readonly update: "POST /v3/{project_id}/volumes/{volume_id}/action" .. _`[json-schema-validation]`: https://blueprints.launchpad.net/cinder/+spec/json-schema-validation .. releasenotes/notes/remove-cinder-manage-logs-cmds-40fb8f475b37fb2f.yaml @ b'adfda23b609aae482208966e8fd65f176d4bcd49' - The "cinder-manage logs" commands have been removed. Information previously gathered by these commands may be found in cinder service and syslog logs. .. releasenotes/notes/vnx-perf-optimize-bd55dc3ef7584228.yaml @ b'b54e7ff3576e495c4a1ed95a3e307b897860209b' - Dell EMC VNX driver: Enhances the performance of create/delete volume. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/stein.rst0000664000175000017500000000031500000000000020733 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein :ignore-notes: multiple_clone-82bd7f80ae439080.yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000020733 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000025500000000000021743 0ustar00zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: :ignore-notes: msa-multiattach-5407eb60093de8f1.yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000021136 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/victoria.rst0000664000175000017500000000020700000000000021431 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: victoria-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000020300000000000021240 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: wallaby-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/xena.rst0000664000175000017500000000016700000000000020551 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: xena-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/yoga.rst0000664000175000017500000000016700000000000020555 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: yoga-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/releasenotes/source/zed.rst0000664000175000017500000000016300000000000020374 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: zed-eom ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/reno.yaml0000664000175000017500000001071000000000000014715 0ustar00zuulzuul00000000000000--- # Ignore the kilo-eol tag because that branch does not work with reno # and contains no release notes. closed_branch_tag_re: "(.+)(? Additional instructions available at https://docs.openstack.org/cinder/latest/contributor/releasenotes.html Replace this text with content to appear at the top of the section for this release. All of the prelude content is merged together and then rendered separately from the items listed in other parts of the file, so the text needs to be worded so that both the prelude and the other items make sense when read independently. This may mean repeating some details. Not every release note requires a prelude. Usually only notes describing major features or adding release theme details should have a prelude. features: - | List new features here, or remove this section. All of the list items in this section are combined when the release notes are rendered, so the text needs to be worded so that it does not depend on any information only available in another section, such as the prelude. This may mean repeating some details. Examples for core features and driver specific features: - | We can now limit the number of concurrent backup/restore operations that a Cinder backup service can perform using the ``backup_max_operations`` configuration option. - | RBD driver: Added support for volume manage and unmanage operations. issues: - | List known issues here, or remove this section. All of the list items in this section are combined when the release notes are rendered, so the text needs to be worded so that it does not depend on any information only available in another section, such as the prelude. This may mean repeating some details. upgrade: - | List upgrade notes here, or remove this section. All of the list items in this section are combined when the release notes are rendered, so the text needs to be worded so that it does not depend on any information only available in another section, such as the prelude. This may mean repeating some details. deprecations: - | List deprecations notes here, or remove this section. All of the list items in this section are combined when the release notes are rendered, so the text needs to be worded so that it does not depend on any information only available in another section, such as the prelude. This may mean repeating some details. critical: - | Add critical notes here, or remove this section. All of the list items in this section are combined when the release notes are rendered, so the text needs to be worded so that it does not depend on any information only available in another section, such as the prelude. This may mean repeating some details. security: - | Add security notes here, or remove this section. All of the list items in this section are combined when the release notes are rendered, so the text needs to be worded so that it does not depend on any information only available in another section, such as the prelude. This may mean repeating some details. fixes: - | Add normal bug fixes here, or remove this section. All of the list items in this section are combined when the release notes are rendered, so the text needs to be worded so that it does not depend on any information only available in another section, such as the prelude. This may mean repeating some details. Examples for core code fixes, and driver fixes: - | `Bug #1889758 `_: Fixed revert to snapshot not working for non admin users when using the snapshot's name. - | Brocade driver `bug #1866860 `_: Fixed ``AttributeError`` when using ``REST_HTTP`` or ``REST_HTTPS`` as the ``fc_southbound_protocol`` option and an exception is raised by the client. other: - | Add other notes here, or remove this section. All of the list items in this section are combined when the release notes are rendered, so the text needs to be worded so that it does not depend on any information only available in another section, such as the prelude. This may mean repeating some details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/requirements.txt0000664000175000017500000000364500000000000016363 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=5.8.0 # Apache-2.0 decorator>=4.4.2 # BSD eventlet>=0.30.1,!=0.32.0 # MIT greenlet>=0.4.16 # MIT iso8601>=0.1.12 # MIT jsonschema>=3.2.0 # MIT keystoneauth1>=4.2.1 # Apache-2.0 keystonemiddleware>=9.1.0 # Apache-2.0 lxml>=4.5.2 # BSD oslo.config>=8.3.2 # Apache-2.0 oslo.concurrency>=4.5.0 # Apache-2.0 oslo.context>=3.4.0 # Apache-2.0 oslo.db>=11.0.0 # Apache-2.0 oslo.log>=4.6.1 # Apache-2.0 oslo.messaging>=14.6.0 # Apache-2.0 oslo.middleware>=4.1.1 # Apache-2.0 oslo.policy>=4.5.0 # Apache-2.0 oslo.privsep>=2.6.2 # Apache-2.0 oslo.reports>=3.2.0 # Apache-2.0 oslo.rootwrap>=6.2.0 # Apache-2.0 oslo.serialization>=4.2.0 # Apache-2.0 oslo.service>=2.8.0 # Apache-2.0 oslo.upgradecheck>=1.1.1 # Apache-2.0 oslo.utils>=6.0.0 # Apache-2.0 oslo.versionedobjects>=2.4.0 # Apache-2.0 osprofiler>=3.4.0 # Apache-2.0 packaging>=20.4 paramiko>=2.7.2 # LGPLv2.1+ Paste>=3.4.3 # MIT PasteDeploy>=2.1.0 # MIT pyparsing>=2.4.7 # MIT python-barbicanclient>=5.0.1 # Apache-2.0 python-glanceclient>=3.2.2 # Apache-2.0 python-keystoneclient>=4.1.1 # Apache-2.0 python-novaclient>=18.2.0 # Apache-2.0 python-swiftclient>=3.10.1 # Apache-2.0 requests>=2.25.1 # Apache-2.0 Routes>=2.4.1 # MIT taskflow>=4.5.0 # Apache-2.0 rtslib-fb>=2.1.74 # Apache-2.0 SQLAlchemy>=1.4.23 # MIT stevedore>=3.2.2 # Apache-2.0 tabulate>=0.8.7 # MIT tenacity>=6.3.1 # Apache-2.0 WebOb>=1.8.6 # MIT oslo.i18n>=5.1.0 # Apache-2.0 oslo.vmware>=3.10.0 # Apache-2.0 os-brick>=6.10.0 # Apache-2.0 os-win>=5.5.0 # Apache-2.0 tooz>=2.8.0 # Apache-2.0 google-api-python-client>=1.11.0 # Apache-2.0 castellan>=3.7.0 # Apache-2.0 cryptography>=3.1 # BSD/Apache-2.0 cursive>=0.2.2 # Apache-2.0 zstd>=1.4.5.1 # BSD boto3>=1.18.49 # Apache-2.0 distro>=1.8.0 # Apache-2.0 tzdata>=2022.4 # MIT ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8951166 cinder-27.0.0/roles/0000775000175000017500000000000000000000000014213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8951166 cinder-27.0.0/roles/configure-run-migration-tests/0000775000175000017500000000000000000000000022125 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8231254 cinder-27.0.0/roles/configure-run-migration-tests/defaults/0000775000175000017500000000000000000000000023734 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/roles/configure-run-migration-tests/defaults/main.yaml0000664000175000017500000000033000000000000025540 0ustar00zuulzuul00000000000000--- migration_source_backend: lvm migration_destination_backend: lvm migration_test_regex: "(.*test_volume_retype_with_migration.*|.*test_volume_migrate_attached.*)" migration_test_results: [] tempest_run_result: {} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8231254 cinder-27.0.0/roles/configure-run-migration-tests/tasks/0000775000175000017500000000000000000000000023252 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/roles/configure-run-migration-tests/tasks/main.yaml0000664000175000017500000000174300000000000025067 0ustar00zuulzuul00000000000000--- - name: Reconfigure tempest.conf ini_file: path: "{{ migration_tempest_conf }}" section: volume option: backend_names value: "{{ migration_source_backend }},{{ migration_destination_backend }}" become: true become_user: tempest - set_fact: tempest_run_result: {} - name: Run migration ({{ migration_source_backend }} -> {{ migration_destination_backend }}) include_role: name: run-tempest apply: # ignore the errors for this run, otherwise the other migration tests # won't be executed ignore_errors: yes vars: tempest_test_regex: "{{ migration_test_regex }}" tox_envlist: all - set_fact: _migration_result_item: source: "{{ migration_source_backend }}" destination: "{{ migration_destination_backend }}" result: "{{ tempest_run_result.get('rc', 1) }}" - name: Update the migration test results set_fact: migration_test_results: "{{ migration_test_results + [ _migration_result_item ] }}" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315576.8951166 cinder-27.0.0/roles/save-cinder-migration-results/0000775000175000017500000000000000000000000022101 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8231254 cinder-27.0.0/roles/save-cinder-migration-results/defaults/0000775000175000017500000000000000000000000023710 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/roles/save-cinder-migration-results/defaults/main.yaml0000664000175000017500000000012600000000000025517 0ustar00zuulzuul00000000000000--- devstack_base_dir: /opt/stack tempest_work_dir: "{{ devstack_base_dir }}/tempest" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8231254 cinder-27.0.0/roles/save-cinder-migration-results/tasks/0000775000175000017500000000000000000000000023226 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/roles/save-cinder-migration-results/tasks/main.yaml0000664000175000017500000000056300000000000025042 0ustar00zuulzuul00000000000000--- - block: - template: src: migration_results_reporter.py.j2 dest: "{{ tempest_work_dir }}/migration_results_reporter.py" - name: Generate the results using stestr shell: | stestr run --no-discover --test-path . migration_results_reporter args: chdir: "{{ tempest_work_dir }}" become: true become_user: tempest ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8231254 cinder-27.0.0/roles/save-cinder-migration-results/templates/0000775000175000017500000000000000000000000024077 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/roles/save-cinder-migration-results/templates/migration_results_reporter.py.j20000664000175000017500000000040300000000000032454 0ustar00zuulzuul00000000000000import unittest class CinderMigrationsMatrixTest(unittest.TestCase): {% for test_case in migration_test_results %} def test__{{ test_case.source }}_to_{{ test_case.destination }}(self): self.assertEqual({{ test_case.result }}, 0) {% endfor %} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/ruff.toml0000664000175000017500000000025000000000000014723 0ustar00zuulzuul00000000000000# hacking already covers E402 (and disagrees w/ ruff) lint.ignore = ["E402"] [lint.per-file-ignores] "cinder/volume/drivers/fungible/swagger_api_client.py" = ["E501"] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8431256 cinder-27.0.0/setup.cfg0000664000175000017500000001070700000000000014715 0ustar00zuulzuul00000000000000[metadata] name = cinder description = OpenStack Block Storage long_description = file: README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org url = https://docs.openstack.org/cinder/latest/ python_requires = >=3.10 classifiers = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 project_urls = Source=https://opendev.org/openstack/cinder Tracker=https://bugs.launchpad.net/cinder [files] data_files = etc/cinder = etc/cinder/api-paste.ini etc/cinder/rootwrap.conf etc/cinder/resource_filters.json etc/cinder/rootwrap.d = etc/cinder/rootwrap.d/* packages = cinder [entry_points] cinder.scheduler.filters = AvailabilityZoneFilter = cinder.scheduler.filters.availability_zone_filter:AvailabilityZoneFilter CapabilitiesFilter = cinder.scheduler.filters.capabilities_filter:CapabilitiesFilter CapacityFilter = cinder.scheduler.filters.capacity_filter:CapacityFilter DifferentBackendFilter = cinder.scheduler.filters.affinity_filter:DifferentBackendFilter DriverFilter = cinder.scheduler.filters.driver_filter:DriverFilter JsonFilter = cinder.scheduler.filters.json_filter:JsonFilter RetryFilter = cinder.scheduler.filters.ignore_attempted_hosts_filter:IgnoreAttemptedHostsFilter SameBackendFilter = cinder.scheduler.filters.affinity_filter:SameBackendFilter InstanceLocalityFilter = cinder.scheduler.filters.instance_locality_filter:InstanceLocalityFilter cinder.scheduler.weights = AllocatedCapacityWeigher = cinder.scheduler.weights.capacity:AllocatedCapacityWeigher CapacityWeigher = cinder.scheduler.weights.capacity:CapacityWeigher ChanceWeigher = cinder.scheduler.weights.chance:ChanceWeigher GoodnessWeigher = cinder.scheduler.weights.goodness:GoodnessWeigher VolumeNumberWeigher = cinder.scheduler.weights.volume_number:VolumeNumberWeigher oslo.config.opts = cinder = cinder.opts:list_opts oslo.config.opts.defaults = cinder = cinder.common.config:set_external_library_defaults oslo.policy.enforcer = cinder = cinder.policy:get_enforcer oslo.policy.policies = cinder = cinder.policies:list_rules console_scripts = cinder-api = cinder.cmd.api:main cinder-backup = cinder.cmd.backup:main cinder-manage = cinder.cmd.manage:main cinder-rootwrap = oslo_rootwrap.cmd:main cinder-rtstool = cinder.cmd.rtstool:main cinder-scheduler = cinder.cmd.scheduler:main cinder-status = cinder.cmd.status:main cinder-volume = cinder.cmd.volume:main cinder-volume-usage-audit = cinder.cmd.volume_usage_audit:main wsgi_scripts = cinder-wsgi = cinder.wsgi.wsgi:initialize_application [extras] all = websocket-client>=1.3.2 # LGPLv2+ pyOpenSSL>=17.5.0 # Apache-2.0 storops>=0.5.10 # Apache-2.0 pywbem>=0.7.0 #LGPLv2.1+ python-3parclient>=4.2.10 # Apache-2.0 krest>=1.3.0 # Apache-2.0 infinisdk>=103.0.1 # BSD-3 py-pure-client>=1.47.0 # BSD rsd-lib>=1.1.0 # Apache-2.0 storpool>=7.1.0 # Apache-2.0 storpool.spopenstack>=2.2.1 # Apache-2.0 dfs-sdk>=1.2.25 # Apache-2.0 rbd-iscsi-client>=0.1.8 # Apache-2.0 python-linstor>=1.7.0 # LGPLv3 psutil>=5.7.2 # BSD datacore = websocket-client>=1.3.2 # LGPLv2+ powermax = pyOpenSSL>=17.5.0 # Apache-2.0 vnx = storops>=0.5.10 # Apache-2.0 unity = storops>=0.5.10 # Apache-2.0 fujitsu = pywbem>=0.7.0 #LGPLv2.1+ hpe3par = python-3parclient>=4.2.10 # Apache-2.0 kaminario = krest>=1.3.0 # Apache-2.0 ds8k = pyOpenSSL>=17.5.0 # Apache-2.0 infinidat = infinisdk>=103.0.1 # BSD-3 pure = py-pure-client>=1.47.0 # BSD rsd = rsd-lib>=1.1.0 # Apache-2.0 storpool = storpool>=7.1.0 # Apache-2.0 storpool.spopenstack>=2.2.1 # Apache-2.0 datera = dfs-sdk>=1.2.25 # Apache-2.0 rbd_iscsi = rbd-iscsi-client>=0.1.8 # Apache-2.0 linstor = python-linstor>=1.7.0 # LGPLv3 quobyte = psutil>=5.7.2 # BSD [mypy] show_column_numbers = true show_error_context = true ignore_missing_imports = true follow_imports = skip incremental = true check_untyped_defs = true warn_unused_ignores = true show_error_codes = true pretty = true html_report = mypy-report no_implicit_optional = true strict_equality = true [options] packages = cinder [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/setup.py0000664000175000017500000000127100000000000014602 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/test-requirements.txt0000664000175000017500000000135700000000000017336 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. # Install bounded pep8/pyflakes first, then let flake8 install hacking>=7.0.0,<7.1.0 # Apache-2.0 flake8-import-order<0.19.0 # LGPLv3 flake8-logging-format>=0.6.0 # Apache-2.0 stestr>=3.2.1 # Apache-2.0 coverage>=5.5 # Apache-2.0 ddt>=1.4.4 # MIT fixtures>=3.0.0 # Apache-2.0/BSD oslotest>=4.5.0 # Apache-2.0 PyMySQL>=0.10.0 # MIT License psycopg2-binary>=2.8.5 # LGPL/ZPL SQLAlchemy-Utils>=0.37.8 # BSD License testtools>=2.4.0 # MIT doc8>=0.8.1 # Apache-2.0 mypy>=1.7.0,<1.18.0 # MIT moto>=5.0.0 # Apache-2.0 distro>=1.8.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8231254 cinder-27.0.0/tools/0000775000175000017500000000000000000000000014227 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/check_exec.py0000775000175000017500000000232700000000000016671 0ustar00zuulzuul00000000000000#!/usr/bin/python3 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Print a list and return with error if any executable files are found. # Compatible with both python 2 and 3. import os.path import stat import sys if len(sys.argv) < 2: print("Usage: %s " % sys.argv[0]) sys.exit(1) directories = sys.argv[1:] executable = [] for d in directories: for root, mydir, myfile in os.walk(d): for f in myfile: path = os.path.join(root, f) mode = os.lstat(path).st_mode if stat.S_IXUSR & mode: executable.append(path) if executable: print("Executable files found:") for f in executable: print(f) sys.exit(1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/coding-checks.sh0000775000175000017500000000224100000000000017266 0ustar00zuulzuul00000000000000#!/bin/bash set -eu usage() { echo "Usage: $0 [OPTION]..." echo "Run Cinder's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire cinder module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } process_options() { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) scriptargs="$scriptargs $opt" esac i=$((i+1)) done } run_pylint() { local target="${scriptargs:-HEAD~1}" if [[ "$target" = *"all"* ]]; then files="cinder" else files=$(git diff --name-only --diff-filter=ACMRU $target -- "*.py") fi if [ -n "${files}" ]; then echo "Running pylint against:" printf "\t%s\n" "${files[@]}" pylint --rcfile=.pylintrc --output-format=colorized ${files} -E -j 0 else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1759315577.8271253 cinder-27.0.0/tools/config/0000775000175000017500000000000000000000000015474 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/config/__init__.py0000664000175000017500000000000000000000000017573 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/config/check_uptodate.sh0000775000175000017500000000401000000000000021010 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is used to check if there have been configuration changes that # have not been checked in. # The opts file needs to be present in order to compare it if [ ! -e cinder/opts.py ]; then echo "" echo "#################################################" echo "ERROR: cinder/opts.py file is missing." echo "#################################################" exit 1 fi # Rename the existing file so we can generate a new one to compare mv cinder/opts.py cinder/opts.py.orig python tools/config/generate_cinder_opts.py &> tox-genops.log if [ $? -ne 0 ]; then cat tox-genops.log >&2 echo "" echo "#################################################" echo "ERROR: Non-zero exit from generate_cinder_opts.py." echo " See output above for details." echo "#################################################" mv cinder/opts.py.orig cinder/opts.py exit 1 fi diff cinder/opts.py.orig cinder/opts.py if [ $? -ne 0 ]; then echo "" echo "########################################################" echo "ERROR: Configuration options change detected." echo " A new cinder/opts.py file must be generated." echo " Run 'tox -e genopts' from the base directory" echo " and add the result to your commit." echo "########################################################" rm cinder/opts.py mv cinder/opts.py.orig cinder/opts.py exit 1 fi rm cinder/opts.py.orig ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/config/cinder-config-generator.conf0000664000175000017500000000112500000000000023035 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/cinder/cinder.conf.sample wrap_width = 79 namespace = castellan.config namespace = cinder namespace = keystonemiddleware.auth_token namespace = osprofiler namespace = oslo.config namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware namespace = oslo.policy namespace = oslo.privsep namespace = oslo.reports namespace = oslo.service.periodic_task namespace = oslo.service.service namespace = oslo.service.sslutils namespace = oslo.service.wsgi namespace = oslo.versionedobjects namespace = os_brick ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/config/cinder-policy-generator.conf0000664000175000017500000000011100000000000023061 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/cinder/policy.yaml.sample namespace = cinder ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/config/generate_cinder_opts.py0000664000175000017500000002361400000000000022237 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import os import subprocess import textwrap from cinder.volume import configuration from cinder.compute import nova OrderedDict = collections.OrderedDict BASEDIR = os.path.split(os.path.realpath(__file__))[0] + "/../../" if __name__ == "__main__": os.chdir(BASEDIR) opt_file = open("cinder/opts.py", 'w') opt_dict = OrderedDict() dir_trees_list = [] REGISTER_OPTS_STR = "CONF.register_opts(" REGISTER_OPT_STR = "CONF.register_opt(" license_str = textwrap.dedent( """ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License.\n """) opt_file.write(license_str) edit_header = textwrap.dedent( """ ################################################################### # WARNING! # # Do not edit this file directly. This file should be generated by # running the command "tox -e genopts" any time a config option # has been added, changed, or removed. ###################################################################\n """) opt_file.write(edit_header) opt_file.write("import itertools\n\n") opt_file.write("from keystoneauth1 import loading\n\n") # NOTE(geguileo): We need to register all OVOs before importing any other # cinder files, otherwise any decorator that uses cinder.objects.YYY will # fail with exception AttributeError: 'module' object has no attribute # 'YYY' when running tox -egenconfig opt_file.write( "from cinder import objects # noqa\nobjects.register_all()\n") targetdir = 'cinder' common_string = ('find ' + targetdir + ' -type f -name "*.py" ! ' '-path "*/tests/*" -exec grep -l "%s" {} ' '+ | sed -e "s|^' + BASEDIR + '|/|g" | sort -u') cmd_opts = common_string % REGISTER_OPTS_STR output_opts = subprocess.check_output( # nosec : command is hardcoded '{}'.format(cmd_opts), shell=True, universal_newlines=True) dir_trees_list = output_opts.split() cmd_opt = common_string % REGISTER_OPT_STR output_opt = subprocess.check_output( # nosec : command is hardcoded '{}'.format(cmd_opt), shell=True, universal_newlines=True) temp_list = output_opt.split() for item in temp_list: dir_trees_list.append(item) dir_trees_list.sort() flag = False def _check_import(aline): if len(aline) > 79: new_lines = aline.partition(' as ') return new_lines else: return [aline] for atree in dir_trees_list: if atree in ["tools/config/generate_cinder_opts.py", "cinder/tests/hacking/checks.py", "cinder/volume/configuration.py", "cinder/test.py", "cinder/cmd/status.py"]: continue dirs_list = atree.split('/') import_module = "from " init_import_module = "" import_name = "" for dir in dirs_list: if dir.find(".py") == -1: import_module += dir + "." init_import_module += dir + "." import_name += dir + "_" else: if dir[:-3] != "__init__": import_name += dir[:-3].replace("_", "") import_module = (import_module[:-1] + " import " + dir[:-3] + " as " + import_name) lines = _check_import(import_module) if len(lines) > 1: opt_file.write(lines[0] + lines[1] + "\\\n") opt_file.write(" " + lines[2] + "\n") else: opt_file.write(lines[0] + "\n") else: import_name = import_name[:-1].replace('/', '.') init_import = atree[:-12].replace('/', '.') opt_file.write("import " + init_import + "\n") flag = True if flag is False: opt_dict[import_name] = atree else: opt_dict[init_import_module.strip(".")] = atree flag = False registered_opts_dict = OrderedDict([('DEFAULT', [])]) def _write_item(opts): list_name = opts[-3:] if list_name.lower() == "opt": line_to_write = " [" + opts.strip("\n") + "],\n" opt_line = _check_line_length(line_to_write) if len(opt_line) > 1: opt_file.write(opt_line[0] + opt_line[1] + "\n") opt_file.write(" " + opt_line[2]) else: opt_file.write(opt_line[0]) else: line_to_write = " " + opts.strip("\n") + ",\n" opt_line = _check_line_length(line_to_write) if len(opt_line) > 1: opt_file.write(opt_line[0] + opt_line[1] + "\n") opt_file.write(" " + opt_line[2]) else: opt_file.write(opt_line[0]) if opts.endswith('service_user_opts'): su_dnt = " " * 16 su_plg = su_dnt + "loading.get_auth_plugin_conf_options" opt_file.write( su_plg + "('v3password'),\n" + su_dnt + "loading.get_session_conf_options(),\n") def _retrieve_name(aline): if REGISTER_OPT_STR in aline: str_to_replace = REGISTER_OPT_STR else: str_to_replace = REGISTER_OPTS_STR return aline.replace(str_to_replace, "") def _check_line_length(aline): if len(aline) > 79: temp = aline.split(".") lines_to_write = [] for section in temp: lines_to_write.append(section) lines_to_write.append('.') return lines_to_write else: return [aline] for key in opt_dict: fd = os.open(opt_dict[key], os.O_RDONLY) afile = os.fdopen(fd, "r") for aline in afile: exists = aline.find("CONF.register_opt") if exists != -1: # TODO(kjnelson) FIX THIS LATER. These are instances where # CONF.register_opts is happening without actually registering # real lists of opts exists = aline.find('base_san_opts') if (exists != -1) or (key == 'cinder_volume_configuration'): continue group_exists = aline.find(', group=') formatted_opt = _retrieve_name(aline[: group_exists]) formatted_opt = formatted_opt.replace(')', '').strip() if group_exists != -1: group_name = aline[group_exists:-1].replace( ', group=\"\'', '').replace( ', group=', '').strip( "\'\")").upper() # NOTE(dulek): Hack to resolve constants manually. if (group_name.endswith('SHARED_CONF_GROUP') or group_name.lower() == 'backend_defaults'): group_name = configuration.SHARED_CONF_GROUP if (group_name == 'NOVA_GROUP'): group_name = nova.NOVA_GROUP if (group_name == 'GLANCE_GROUP'): group_name = 'glance' if group_name in registered_opts_dict: line = key + "." + formatted_opt registered_opts_dict[group_name].append(line) else: line = key + "." + formatted_opt registered_opts_dict[group_name] = [line] else: line = key + "." + formatted_opt registered_opts_dict['DEFAULT'].append(line) setup_str = ("\n\n" "def list_opts():\n" " return [\n") opt_file.write(setup_str) registered_opts_dict = OrderedDict(sorted(registered_opts_dict.items(), key = lambda x: x[0])) for key in registered_opts_dict: # NOTE(jsbryant): We need to have 'DEFAULT' in uppercase but any # other section using uppercase causes a Sphinx warning. if (key == 'DEFAULT'): section_start_str = (" ('" + key + "',\n" " itertools.chain(\n") else: section_start_str = (" ('" + key.lower() + "',\n" " itertools.chain(\n") opt_file.write(section_start_str) for item in registered_opts_dict[key]: _write_item(item) section_end_str = " )),\n" opt_file.write(section_end_str) closing_str = (" ]\n") opt_file.write(closing_str) opt_file.close() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/fast8.sh0000775000175000017500000000141300000000000015612 0ustar00zuulzuul00000000000000#!/bin/bash NUM_COMMITS=${FAST8_NUM_COMMITS:-1} if [[ $NUM_COMMITS = "smart" ]]; then # Run on all commits not submitted yet # (sort of -- only checks vs. "master" since this is easy) NUM_COMMITS=$(git cherry master | wc -l) fi echo "Checking last $NUM_COMMITS commits." cd $(dirname "$0")/.. CHANGED="" CHANGED+="$(git diff --name-only HEAD~${NUM_COMMITS} \*.py | tr '\n' ' ')" while [[ -z $CHANGED ]]; do # Search back until we find a commit containing python files NUM_COMMITS=$((NUM_COMMITS + 1)) CHANGED+="$(git diff --name-only HEAD~${NUM_COMMITS} \*.py | tr '\n' ' ')" ; done # Skip files that don't exist # (have been git rm'd) CHECK="" for FILE in $CHANGED; do if [ -f "$FILE" ]; then CHECK+="$FILE " fi done flake8 $CHECK ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/generate_driver_list.py0000775000175000017500000001455500000000000021016 0ustar00zuulzuul00000000000000#! /usr/bin/env python3 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generate list of Cinder drivers""" import argparse import operator import os import json import textwrap from cinder.interface import util from cinder import objects # Object loading can cause issues loading drivers, force it up front objects.register_all() parser = argparse.ArgumentParser(prog="generate_driver_list") parser.add_argument("--format", default='str', choices=['str', 'dict'], help="Output format type") # Keep backwards compatibilty with the gate-docs test # The tests pass ['docs'] on the cmdln, but it's never been used. parser.add_argument("output_list", default=None, nargs='?') CI_WIKI_ROOT = "https://wiki.openstack.org/wiki/ThirdPartySystems/" class Output(object): def __init__(self, base_dir, output_list): # At this point we don't care what was passed in, just a trigger # to write this out to the doc tree for now self.driver_file = None if output_list: self.driver_file = open( '%s/doc/source/drivers.rst' % base_dir, 'w+') self.driver_file.write('===================\n') self.driver_file.write('Available Drivers\n') self.driver_file.write('===================\n\n') def __enter__(self): return self def __exit__(self, type, value, traceback): if self.driver_file: self.driver_file.close() def write(self, text): if self.driver_file: self.driver_file.write('%s\n' % text) else: print(text) def format_description(desc, output): desc = desc or '' lines = desc.rstrip('\n').split('\n') output.write('* Description: %s' % lines[0]) output.write('') output.write(textwrap.dedent('\n'.join(lines[1:]))) def format_options(driver_options, output): if driver_options and len(driver_options) > 0: output.write('* Driver Configuration Options:') output.write('') output.write('.. list-table:: **Driver configuration options**') output.write(' :header-rows: 1') output.write(' :widths: 14 30') output.write('') output.write(' * - Name = Default Value') output.write(' - (Type) Description') sorted_options = sorted(driver_options, key=operator.attrgetter('name')) for opt in sorted_options: output.write(' * - %s = %s' % (opt.name, opt.default)) output.write(' - (%s) %s' % (opt.type, opt.help)) output.write('') def filter_drivers(drivers): '''This filters all of the drivers into separate lists.''' supported_drivers = [] unsupported_drivers = [] for driver in drivers: if not driver.supported: unsupported_drivers.append(driver) else: supported_drivers.append(driver) return supported_drivers, unsupported_drivers def print_drivers(drivers, config_name, output, section_char='-', display_unsupported=True): for driver in sorted(drivers, key=lambda x: x.class_name): driver_name = driver.class_name if not driver.supported and display_unsupported: driver_name += " (unsupported)" output.write(driver_name) output.write(section_char * len(driver_name)) if driver.version: output.write('* Version: %s' % driver.version) output.write('* %s=%s' % (config_name, driver.class_fqn)) if driver.ci_wiki_name and 'Cinder_Jenkins' not in driver.ci_wiki_name: output.write('* CI info: %s%s' % (CI_WIKI_ROOT, driver.ci_wiki_name)) format_options(driver.driver_options, output) format_description(driver.desc, output) output.write('') output.write('') def output_str(cinder_root, args): with Output(cinder_root, args.output_list) as output: output.write('Volume Drivers') output.write('==============') supported_drivers, unsupported_drivers = filter_drivers( util.get_volume_drivers()) output.write('Supported Drivers') output.write('-----------------') output.write('') print_drivers(supported_drivers, 'volume_driver', output, '~') output.write('Unsupported Drivers') output.write('-------------------') output.write('') print_drivers(unsupported_drivers, 'volume_driver', output, '~') output.write('Backup Drivers') output.write('==============') print_drivers(util.get_backup_drivers(), 'backup_driver', output) output.write('FC Zone Manager Drivers') output.write('=======================') print_drivers(util.get_fczm_drivers(), 'zone_driver', output) def collect_driver_info(driver): """Build the dictionary that describes this driver.""" info = {'name': driver.class_name, 'version': driver.version, 'fqn': driver.class_fqn, 'description': driver.desc, 'ci_wiki_name': driver.ci_wiki_name, 'supported': driver.supported, 'options': driver.driver_options, } return info def output_dict(): """Output the results as a JSON dict.""" driver_list = [] drivers = util.get_volume_drivers() for driver in drivers: driver_list.append(collect_driver_info(driver)) print(json.dumps(driver_list)) def main(): tools_dir = os.path.dirname(os.path.abspath(__file__)) cinder_root = os.path.dirname(tools_dir) cur_dir = os.getcwd() os.chdir(cinder_root) args = parser.parse_args() try: if args.format == 'str': output_str(cinder_root, args) elif args.format == 'dict': output_dict() finally: os.chdir(cur_dir) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/mypywrap.sh0000775000175000017500000000137400000000000016463 0ustar00zuulzuul00000000000000#!/bin/bash # # A wrapper around mypy that allows us to specify what files to run 'mypy' type # checks on. Intended to be invoked via tox: # # tox -e mypy # # Eventually this should go away once we have either converted everything or # converted enough and ignored [1] the rest. # # [1] http://mypy.readthedocs.io/en/latest/config_file.html#per-module-flags ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" #export MYPYPATH=$ROOT_DIR/../cinder/tests/stubs/ python -m mypy -V if [ $# -eq 0 ]; then # if no arguments provided, use the standard converted lists lines=$(grep -v '#' $ROOT_DIR/../mypy-files.txt) python -m mypy $OS_MYPY_OPTS ${lines[@]} else # else test what the user asked us to python -m mypy $OS_MYPY_OPTS $@ fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tools/test-setup.sh0000775000175000017500000000636400000000000016714 0ustar00zuulzuul00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest function is_rhel7 { [ -f /usr/bin/yum ] && \ cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ cat /etc/*release | grep -q 'release 7' } function is_rhel8 { [ -f /usr/bin/dnf ] && \ cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ cat /etc/*release | grep -q 'release 8' } function is_rhel9 { [ -f /usr/bin/dnf ] && \ cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \ cat /etc/*release | grep -q 'release 9' } function set_conf_line { # file regex value sudo sh -c "grep -q -e '$2' $1 && \ sed -i 's|$2|$3|g' $1 || \ echo '$3' >> $1" } if is_rhel7 || is_rhel8 || is_rhel9; then # mysql needs to be started on centos/rhel sudo systemctl restart mariadb.service # postgres setup for centos sudo postgresql-setup --initdb PG_CONF=/var/lib/pgsql/data/postgresql.conf set_conf_line $PG_CONF '^password_encryption =.*' 'password_encryption = scram-sha-256' PG_HBA=/var/lib/pgsql/data/pg_hba.conf set_conf_line $PG_HBA '^local[ \t]*all[ \t]*all.*' 'local all all peer' set_conf_line $PG_HBA '^host[ \t]*all[ \t]*all[ \t]*127.0.0.1\/32.*' 'host all all 127.0.0.1/32 scram-sha-256' set_conf_line $PG_HBA '^host[ \t]*all[ \t]*all[ \t]*::1\/128.*' 'host all all ::1/128 scram-sha-256' sudo systemctl restart postgresql.service fi sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # an anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW'; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1759315528.0 cinder-27.0.0/tox.ini0000664000175000017500000002177500000000000014416 0ustar00zuulzuul00000000000000[tox] minversion = 4.0.0 # specify virtualenv here to keep local runs consistent with the # gate (it sets the versions of pip, setuptools, and wheel) requires = virtualenv>=20.17.1 # this allows tox to infer the base python from the environment name # and override any basepython configured in this file ignore_basepython_conflict=true envlist = py3,compliance,pep8 [testenv] usedevelop = true setenv = VIRTUAL_ENV={envdir} OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 OS_TEST_PATH=./cinder/tests/unit PYTHONDONTWRITEBYTECODE=1 # TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0 SQLALCHEMY_WARN_20=1 # this environment's install command is used if the 'minversion' or 'requires' # values declared above in the [tox] section require tox to update itself, so # we don't define a non-default install_command here deps = -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt # By default stestr will set concurrency # to ncpu, to specify something else use # the concurrency= option. # call ie: 'tox -epy27 -- --concurrency=4' commands = stestr run --random {posargs} stestr slowest allowlist_externals = find passenv = *_proxy *_PROXY [testenv:py{3,310,311,312,313}] # NOTE: Do not move the constraints from the install_command into deps, as that # may result in tox using unconstrained/untested dependencies. # We use "usedevelop = True" for tox jobs (except bindep), so tox does 2 # install calls, one for the deps and another for the cinder source code # as editable (pip -e). # Without the constraints in the install_command only the first # installation will honor the upper constraints, and the second install # for cinder itself will not know about the constraints which can result # in installing versions we don't want. # With constraints in the install_command tox will always honor our # constraints. install_command = python -m pip install -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages} [testenv:functional] install_command = {[testenv:py3]install_command} setenv = OS_TEST_PATH = ./cinder/tests/functional [testenv:functional-py{3,310,311,312,313}] install_command = {[testenv:functional]install_command} setenv = {[testenv:functional]setenv} [testenv:api-samples] install_command = {[testenv:functional]install_command} setenv = GENERATE_SAMPLES=True PYTHONHASHSEED=0 commands = find . -ignore_readdir_race -type f -name "*.pyc" -delete stestr --test-path=./cinder/tests/functional/api_sample_tests run {posargs} stestr slowest [testenv:compliance] install_command = {[testenv:py3]install_command} setenv = OS_TEST_PATH = ./cinder/tests/compliance [testenv:pep8] allowlist_externals = {toxinidir}/tools/config/check_uptodate.sh {toxinidir}/tools/check_exec.py commands = flake8 {posargs} . doc8 {toxinidir}/tools/config/check_uptodate.sh {toxinidir}/tools/check_exec.py {toxinidir}/cinder {toxinidir}/doc/source/ {toxinidir}/releasenotes/notes [testenv:fast8] install_command = {[testenv:py3]install_command} allowlist_externals = {toxinidir}/tools/fast8.sh commands = {toxinidir}/tools/fast8.sh passenv = FAST8_NUM_COMMITS [testenv:pylint] allowlist_externals = {toxinidir}/tools/coding-checks.sh install_command = {[testenv:py3]install_command} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt pylint==3.0.2 commands = {toxinidir}/tools/coding-checks.sh --pylint {posargs:all} [testenv:cover] # Also do not run test_coverage_ext tests while gathering coverage as those # tests conflict with coverage. install_command = {[testenv:py3]install_command} setenv = {[testenv]setenv} PYTHON=coverage run --source cinder --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml [testenv:genconfig] install_command = {[testenv:py3]install_command} sitepackages = False commands = oslo-config-generator --config-file=tools/config/cinder-config-generator.conf [testenv:genpolicy] install_command = {[testenv:py3]install_command} commands = oslopolicy-sample-generator --config-file=tools/config/cinder-policy-generator.conf [testenv:genopts] install_command = {[testenv:py3]install_command} sitepackages = False commands = python tools/config/generate_cinder_opts.py [testenv:venv] install_command = {[testenv:py3]install_command} deps = {[testenv]deps} reno commands = {posargs} [testenv:docs] # we intentionally put the constraints in the install_command, not the # deps ... see comment near the top of this file install_command = python -m pip install -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages} allowlist_externals = rm deps = doc8 -r{toxinidir}/doc/requirements.txt commands = doc8 rm -rf doc/source/contributor/api doc/build/html doc/build/doctrees sphinx-build -W -j auto -b html -d doc/build/doctrees doc/source doc/build/html # Test the redirects. This must run after the main docs build whereto doc/build/html/.htaccess doc/test/redirect-tests.txt [testenv:pdf-docs] install_command = {[testenv:docs]install_command} deps = {[testenv:docs]deps} commands = rm -fr doc/source/contributor/api/ rm -fr doc/build/pdf sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf allowlist_externals = make rm [testenv:api-ref] install_command = {[testenv:docs]install_command} allowlist_externals = rm deps = {[testenv:docs]deps} commands = rm -rf api-ref/build sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html/ [testenv:releasenotes] install_command = {[testenv:docs]install_command} deps = {[testenv:docs]deps} commands = sphinx-build -a -E -W -j auto -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:gendriverlist] install_command = {[testenv:py3]install_command} sitepackages = False commands = python {toxinidir}/tools/generate_driver_list.py [testenv:bandit] install_command = {[testenv:py3]install_command} deps = -r{toxinidir}/test-requirements.txt bandit==1.6.0 commands = bandit -r cinder -n5 -x cinder/tests/* -ll [testenv:bandit-baseline] install_command = {[testenv:bandit]install_command} deps = bandit==1.6.0 commands = bandit-baseline -r cinder -n5 -x cinder/tests/* -ii -ll [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files, and develop mode disabled # explicitly to avoid unnecessarily installing the checked-out repo too skip_install = True deps = bindep commands = bindep {posargs} usedevelop = False [testenv:mypy] description = Run type checks. setenv = OS_MYPY_OPTS=--install-types --non-interactive install_command = {[testenv:py3]install_command} allowlist_externals = {toxinidir}/tools/mypywrap.sh commands = {toxinidir}/tools/mypywrap.sh {posargs} [flake8] # Following checks are ignored on purpose. # # E251 unexpected spaces around keyword / parameter equals # reason: no improvement in readability # E402 module level import not at top of file # reason: there are numerous places where we import modules # later for legitimate reasons # # W503 line break before binary operator # reason: pep8 itself is not sure about this one and # reversed this rule in 2016 # W504 line break after binary operator # reason: no agreement on this being universally # preferable for our code. Disabled to keep checking # tools from getting in our way with regards to this. # H101 include name with TODO # reason: no real benefit # G200 Logging statements should not include the exception # reason: Many existing cases of this that may be legitimate ignore = E251,E402,W503,W504,H101,G200 # H904 Delay string interpolations at logging calls. enable-extensions = H106,H203,H904 exclude = .git,.venv,.tox,dist,tools,doc/ext,*egg,build max-complexity = 30 application-import-names = cinder import-order-style = pep8 [flake8:local-plugins] extension = N322 = checks:no_mutable_default_args N323 = checks:check_explicit_underscore_import C301 = checks:check_datetime_now C303 = checks:check_no_print_statements C309 = checks:no_test_log C310 = checks:CheckLoggingFormatArgs C311 = checks:CheckOptRegistrationArgs C312 = checks:no_translate_logs C313 = checks:validate_assertTrue C336 = checks:dict_constructor_with_list_copy C337 = checks:no_third_party_mock C338 = checks:no_log_warn paths = ./cinder/tests/hacking [doc8] ignore-path=.venv,.git,.tox,*.egg-info,doc/src/api,doc/source/drivers.rst,doc/build,.eggs,doc/source/configuration/tables,./*.txt,releasenotes,doc/source/contributor/api,doc/test extension=.txt,.rst,.inc [testenv:ruff] deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt ruff commands = ruff check {toxinidir}/cinder {posargs} [hacking] import_exceptions = typing